From 1c79356b52d46aa6b508fb032f5ae709b1f2897b Mon Sep 17 00:00:00 2001 From: Apple Date: Fri, 8 Aug 2003 20:38:03 +0000 Subject: [PATCH] xnu-123.5.tar.gz --- APPLE_LICENSE | 372 + EXTERNAL_HEADERS/ar.h | 88 + EXTERNAL_HEADERS/architecture/byte_order.h | 536 + EXTERNAL_HEADERS/architecture/i386/asm_help.h | 316 + .../architecture/i386/byte_order.h | 148 + EXTERNAL_HEADERS/architecture/i386/cpu.h | 68 + EXTERNAL_HEADERS/architecture/i386/desc.h | 144 + EXTERNAL_HEADERS/architecture/i386/fpu.h | 152 + EXTERNAL_HEADERS/architecture/i386/frame.h | 125 + EXTERNAL_HEADERS/architecture/i386/io.h | 34 + EXTERNAL_HEADERS/architecture/i386/reg_help.h | 104 + EXTERNAL_HEADERS/architecture/i386/sel.h | 52 + EXTERNAL_HEADERS/architecture/i386/table.h | 92 + EXTERNAL_HEADERS/architecture/i386/tss.h | 114 + EXTERNAL_HEADERS/architecture/ppc/asm_help.h | 450 + .../architecture/ppc/basic_regs.h | 300 + .../architecture/ppc/byte_order.h | 179 + EXTERNAL_HEADERS/architecture/ppc/cframe.h | 46 + EXTERNAL_HEADERS/architecture/ppc/fp_regs.h | 147 + .../architecture/ppc/macro_help.h | 58 + .../architecture/ppc/pseudo_inst.h | 414 + EXTERNAL_HEADERS/architecture/ppc/reg_help.h | 224 + EXTERNAL_HEADERS/bsd/i386/ansi.h | 103 + EXTERNAL_HEADERS/bsd/i386/limits.h | 116 + EXTERNAL_HEADERS/bsd/ppc/ansi.h | 103 + EXTERNAL_HEADERS/bsd/ppc/limits.h | 118 + EXTERNAL_HEADERS/mach-o/fat.h | 59 + EXTERNAL_HEADERS/mach-o/kld.h | 82 + EXTERNAL_HEADERS/mach-o/loader.h | 723 ++ EXTERNAL_HEADERS/mach-o/nlist.h | 193 + EXTERNAL_HEADERS/machine/limits.h | 124 + EXTERNAL_HEADERS/stdarg.h | 195 + EXTERNAL_HEADERS/stdint.h | 186 + Makefile | 50 + PB.project | 15 + README | 122 + SETUP/seed_objroot | 133 + SETUP/setup.csh | 4 + SETUP/setup.sh | 4 + bsd/Makefile | 79 + bsd/conf/MASTER | 243 + bsd/conf/MASTER.i386 | 91 + bsd/conf/MASTER.ppc | 87 + bsd/conf/Makefile | 62 + bsd/conf/Makefile.i386 | 8 + bsd/conf/Makefile.ppc | 8 + bsd/conf/Makefile.template | 105 + bsd/conf/compat_hdrs | 80 + bsd/conf/compat_hdrs.awk | 22 + bsd/conf/copyright | 6 + bsd/conf/files | 572 + bsd/conf/files.i386 | 20 + bsd/conf/files.ppc | 21 + bsd/conf/machine.awk | 18 + bsd/conf/param.c | 134 + bsd/conf/tools/Makefile | 36 + bsd/conf/tools/doconf/Makefile | 49 + bsd/conf/tools/doconf/doconf.csh | 313 + bsd/conf/tools/newvers/Makefile | 49 + bsd/conf/tools/newvers/newvers.csh | 33 + bsd/conf/version.major | 1 + bsd/conf/version.minor | 1 + bsd/conf/version.variant | 0 bsd/crypto/Makefile | 46 + bsd/crypto/blowfish/Makefile | 37 + bsd/crypto/blowfish/bf_cbc.c | 148 + bsd/crypto/blowfish/bf_cbc_m.c | 339 + bsd/crypto/blowfish/bf_enc.c | 140 + bsd/crypto/blowfish/bf_locl.h | 215 + bsd/crypto/blowfish/bf_pi.h | 325 + bsd/crypto/blowfish/bf_skey.c | 120 + bsd/crypto/blowfish/blowfish.h | 122 + bsd/crypto/cast128/Makefile | 37 + bsd/crypto/cast128/cast128.c | 871 ++ bsd/crypto/cast128/cast128.h | 60 + bsd/crypto/cast128/cast128_cbc.c | 217 + bsd/crypto/cast128/cast128_subkey.h | 89 + bsd/crypto/des/Makefile | 37 + bsd/crypto/des/des.h | 278 + bsd/crypto/des/des_3cbc.c | 244 + bsd/crypto/des/des_cbc.c | 326 + bsd/crypto/des/des_ecb.c | 229 + bsd/crypto/des/des_locl.h | 339 + bsd/crypto/des/des_setkey.c | 236 + bsd/crypto/des/podd.h | 64 + bsd/crypto/des/sk.h | 193 + bsd/crypto/des/spr.h | 193 + bsd/crypto/rc5/Makefile | 37 + bsd/crypto/rc5/rc5.c | 216 + bsd/crypto/rc5/rc5.h | 84 + bsd/crypto/rc5/rc5_cbc.c | 209 + bsd/crypto/sha1.c | 273 + bsd/crypto/sha1.h | 69 + bsd/dev/Makefile | 37 + bsd/dev/busvar.h | 44 + bsd/dev/disk.h | 164 + bsd/dev/disk_label.h | 107 + bsd/dev/i386/conf.c | 325 + bsd/dev/i386/cons.c | 240 + bsd/dev/i386/cons.h | 57 + bsd/dev/i386/kern_machdep.c | 170 + bsd/dev/i386/km.c | 361 + bsd/dev/i386/lock_stubs.c | 26 + bsd/dev/i386/mem.c | 210 + bsd/dev/i386/memmove.c | 52 + bsd/dev/i386/pci_device.h | 124 + bsd/dev/i386/pio.h | 242 + bsd/dev/i386/sel.h | 46 + bsd/dev/i386/sel_inline.h | 139 + bsd/dev/i386/stubs.c | 137 + bsd/dev/i386/table_inline.h | 61 + bsd/dev/i386/unix_signal.c | 313 + bsd/dev/i386/unix_startup.c | 156 + bsd/dev/kmreg_com.h | 104 + bsd/dev/ldd.h | 49 + bsd/dev/ppc/conf.c | 302 + bsd/dev/ppc/cons.c | 222 + bsd/dev/ppc/cons.h | 57 + bsd/dev/ppc/ffs.c | 53 + bsd/dev/ppc/ffs.s | 64 + bsd/dev/ppc/kern_machdep.c | 154 + bsd/dev/ppc/km.c | 361 + bsd/dev/ppc/machdep.c | 101 + bsd/dev/ppc/mem.c | 211 + bsd/dev/ppc/memmove.c | 46 + bsd/dev/ppc/nvram.c | 111 + bsd/dev/ppc/ppc_init.c | 274 + bsd/dev/ppc/stubs.c | 168 + bsd/dev/ppc/systemcalls.c | 168 + bsd/dev/ppc/unix_signal.c | 294 + bsd/dev/ppc/unix_startup.c | 151 + bsd/dev/ppc/xsumas.s | 249 + bsd/hfs/MacOSStubs.c | 612 + bsd/hfs/Makefile | 43 + bsd/hfs/hfs.h | 786 ++ bsd/hfs/hfs_btreeio.c | 354 + bsd/hfs/hfs_dbg.h | 337 + bsd/hfs/hfs_encodings.c | 632 + bsd/hfs/hfs_encodings.h | 55 + bsd/hfs/hfs_endian.c | 517 + bsd/hfs/hfs_endian.h | 78 + bsd/hfs/hfs_format.h | 605 + bsd/hfs/hfs_link.c | 273 + bsd/hfs/hfs_lockf.c | 707 ++ bsd/hfs/hfs_lockf.h | 107 + bsd/hfs/hfs_lookup.c | 892 ++ bsd/hfs/hfs_macos_defs.h | 1658 +++ bsd/hfs/hfs_mount.h | 56 + bsd/hfs/hfs_readwrite.c | 1831 +++ bsd/hfs/hfs_search.c | 830 ++ bsd/hfs/hfs_vfsops.c | 1883 +++ bsd/hfs/hfs_vfsutils.c | 3713 ++++++ bsd/hfs/hfs_vhash.c | 400 + bsd/hfs/hfs_vnodeops.c | 5081 ++++++++ bsd/hfs/hfscommon/BTree/BTree.c | 1828 +++ bsd/hfs/hfscommon/BTree/BTreeAllocate.c | 588 + bsd/hfs/hfscommon/BTree/BTreeMiscOps.c | 646 + bsd/hfs/hfscommon/BTree/BTreeNodeOps.c | 1218 ++ bsd/hfs/hfscommon/BTree/BTreeTreeOps.c | 1211 ++ bsd/hfs/hfscommon/Catalog/Catalog.c | 1856 +++ bsd/hfs/hfscommon/Catalog/CatalogIterators.c | 756 ++ bsd/hfs/hfscommon/Catalog/CatalogUtilities.c | 889 ++ bsd/hfs/hfscommon/Catalog/FileIDsServices.c | 756 ++ bsd/hfs/hfscommon/Makefile | 27 + bsd/hfs/hfscommon/Misc/BTreeWrapper.c | 484 + bsd/hfs/hfscommon/Misc/FileExtentMapping.c | 2102 ++++ bsd/hfs/hfscommon/Misc/GenericMRUCache.c | 333 + bsd/hfs/hfscommon/Misc/VolumeAllocation.c | 1758 +++ bsd/hfs/hfscommon/Misc/VolumeRequests.c | 491 + .../hfscommon/Unicode/UCStringCompareData.h | 313 + bsd/hfs/hfscommon/Unicode/UnicodeWrappers.c | 477 + bsd/hfs/hfscommon/headers/BTreesInternal.h | 333 + bsd/hfs/hfscommon/headers/BTreesPrivate.h | 492 + bsd/hfs/hfscommon/headers/CatalogPrivate.h | 223 + bsd/hfs/hfscommon/headers/FileMgrInternal.h | 913 ++ .../hfscommon/headers/HFSInstrumentation.h | 140 + .../hfscommon/headers/HFSUnicodeWrappers.h | 121 + bsd/hfs/hfscommon/headers/Makefile | 38 + bsd/i386/Makefile | 27 + bsd/i386/cpu.h | 32 + bsd/i386/disklabel.h | 35 + bsd/i386/endian.h | 122 + bsd/i386/exec.h | 111 + bsd/i386/label_t.h | 40 + bsd/i386/param.h | 163 + bsd/i386/profile.h | 47 + bsd/i386/psl.h | 54 + bsd/i386/ptrace.h | 61 + bsd/i386/reboot.h | 61 + bsd/i386/reg.h | 56 + bsd/i386/setjmp.h | 54 + bsd/i386/signal.h | 64 + bsd/i386/spl.h | 55 + bsd/i386/table.h | 33 + bsd/i386/types.h | 83 + bsd/i386/user.h | 29 + bsd/i386/vmparam.h | 67 + bsd/if/ppc/if_en.c | 1129 ++ bsd/if/ppc/if_en.h | 60 + bsd/if/ppc/mace.c | 258 + bsd/if/ppc/mace.h | 368 + bsd/include/Makefile | 44 + bsd/include/ar.h | 88 + bsd/include/arpa/Makefile | 37 + bsd/include/arpa/ftp.h | 130 + bsd/include/arpa/inet.h | 74 + bsd/include/arpa/nameser.h | 448 + bsd/include/arpa/nameser_compat.h | 194 + bsd/include/arpa/telnet.h | 341 + bsd/include/arpa/tftp.h | 101 + bsd/include/bitstring.h | 164 + bsd/include/c.h | 82 + bsd/include/ctype.h | 168 + bsd/include/db.h | 239 + bsd/include/dirent.h | 123 + bsd/include/disktab.h | 82 + bsd/include/err.h | 81 + bsd/include/errno.h | 23 + bsd/include/fcntl.h | 22 + bsd/include/fnmatch.h | 74 + bsd/include/fsproperties.h | 49 + bsd/include/fstab.h | 100 + bsd/include/fts.h | 149 + bsd/include/glob.h | 111 + bsd/include/grp.h | 90 + bsd/include/kvm.h | 90 + bsd/include/limits.h | 108 + bsd/include/locale.h | 101 + bsd/include/math.h | 208 + bsd/include/memory.h | 57 + bsd/include/mpool.h | 156 + bsd/include/ndbm.h | 98 + bsd/include/netdb.h | 258 + bsd/include/nlist.h | 106 + bsd/include/paths.h | 100 + bsd/include/protocols/Makefile | 37 + bsd/include/protocols/dumprestore.h | 133 + bsd/include/protocols/routed.h | 125 + bsd/include/protocols/rwhod.h | 89 + bsd/include/protocols/talkd.h | 135 + bsd/include/protocols/timed.h | 120 + bsd/include/pwd.h | 126 + bsd/include/ranlib.h | 100 + bsd/include/regex.h | 127 + bsd/include/regexp.h | 90 + bsd/include/resolv.h.kame | 356 + bsd/include/rune.h | 87 + bsd/include/runetype.h | 124 + bsd/include/semaphore.h | 30 + bsd/include/setjmp.h | 27 + bsd/include/sgtty.h | 60 + bsd/include/signal.h | 102 + bsd/include/stab.h | 88 + bsd/include/stddef.h | 86 + bsd/include/stdio.h | 401 + bsd/include/stdlib.h | 189 + bsd/include/string.h | 114 + bsd/include/strings.h | 57 + bsd/include/struct.h | 72 + bsd/include/sysexits.h | 139 + bsd/include/syslog.h | 23 + bsd/include/tar.h | 94 + bsd/include/termios.h | 23 + bsd/include/time.h | 127 + bsd/include/ttyent.h | 91 + bsd/include/tzfile.h | 172 + bsd/include/unistd.h | 233 + bsd/include/util.h | 107 + bsd/include/utime.h | 71 + bsd/include/utmp.h | 86 + bsd/include/vis.h | 105 + bsd/isofs/Makefile | 34 + bsd/isofs/cd9660/Makefile | 37 + bsd/isofs/cd9660/TODO | 47 + bsd/isofs/cd9660/TODO.hibler | 9 + bsd/isofs/cd9660/cd9660_bmap.c | 217 + bsd/isofs/cd9660/cd9660_lookup.c | 576 + bsd/isofs/cd9660/cd9660_mount.h | 74 + bsd/isofs/cd9660/cd9660_node.c | 522 + bsd/isofs/cd9660/cd9660_node.h | 192 + bsd/isofs/cd9660/cd9660_rrip.c | 720 ++ bsd/isofs/cd9660/cd9660_rrip.h | 163 + bsd/isofs/cd9660/cd9660_util.c | 927 ++ bsd/isofs/cd9660/cd9660_vfsops.c | 1253 ++ bsd/isofs/cd9660/cd9660_vnops.c | 1382 +++ bsd/isofs/cd9660/iso.h | 393 + bsd/isofs/cd9660/iso_rrip.h | 106 + bsd/kern/ast.h | 49 + bsd/kern/bsd_init.c | 782 ++ bsd/kern/bsd_stubs.c | 256 + bsd/kern/init_sysent.c | 754 ++ bsd/kern/kdebug.c | 976 ++ bsd/kern/kern_acct.c | 338 + bsd/kern/kern_clock.c | 526 + bsd/kern/kern_core.c | 380 + bsd/kern/kern_descrip.c | 1318 ++ bsd/kern/kern_event.c | 214 + bsd/kern/kern_exec.c | 908 ++ bsd/kern/kern_exit.c | 721 ++ bsd/kern/kern_fork.c | 416 + bsd/kern/kern_ktrace.c | 501 + bsd/kern/kern_lock.c | 477 + bsd/kern/kern_malloc.c | 367 + bsd/kern/kern_mib.c | 92 + bsd/kern/kern_mman.c | 1163 ++ bsd/kern/kern_newsysctl.c | 1159 ++ bsd/kern/kern_pcsamples.c | 361 + bsd/kern/kern_physio.c | 299 + bsd/kern/kern_proc.c | 424 + bsd/kern/kern_prot.c | 707 ++ bsd/kern/kern_resource.c | 543 + bsd/kern/kern_shutdown.c | 287 + bsd/kern/kern_sig.c | 1834 +++ bsd/kern/kern_subr.c | 297 + bsd/kern/kern_symfile.c | 293 + bsd/kern/kern_synch.c | 363 + bsd/kern/kern_sysctl.c | 1213 ++ bsd/kern/kern_time.c | 538 + bsd/kern/kern_xxx.c | 212 + bsd/kern/mach_fat.c | 169 + bsd/kern/mach_header.c | 422 + bsd/kern/mach_header.h | 68 + bsd/kern/mach_loader.c | 1098 ++ bsd/kern/mach_loader.h | 70 + bsd/kern/mach_process.c | 328 + bsd/kern/md5c.c | 342 + bsd/kern/parallel.c | 117 + bsd/kern/parallel.h | 60 + bsd/kern/posix_sem.c | 931 ++ bsd/kern/posix_shm.c | 874 ++ bsd/kern/preload.h | 50 + bsd/kern/qsort.c | 196 + bsd/kern/spl.c | 161 + bsd/kern/subr_disk.c | 246 + bsd/kern/subr_log.c | 344 + bsd/kern/subr_prf.c | 902 ++ bsd/kern/subr_prof.c | 422 + bsd/kern/subr_xxx.c | 187 + bsd/kern/sys_domain.c | 38 + bsd/kern/sys_generic.c | 1266 ++ bsd/kern/sys_socket.c | 387 + bsd/kern/syscalls.c | 382 + bsd/kern/sysctl_init.c | 551 + bsd/kern/sysv_ipc.c | 235 + bsd/kern/sysv_msg.c | 1049 ++ bsd/kern/sysv_sem.c | 996 ++ bsd/kern/sysv_shm.c | 670 + bsd/kern/tty.c | 2553 ++++ bsd/kern/tty_compat.c | 570 + bsd/kern/tty_conf.c | 242 + bsd/kern/tty_pty.c | 945 ++ bsd/kern/tty_subr.c | 569 + bsd/kern/tty_tb.c | 376 + bsd/kern/tty_tty.c | 246 + bsd/kern/ubc_subr.c | 1155 ++ bsd/kern/uipc_domain.c | 446 + bsd/kern/uipc_mbuf.c | 1286 ++ bsd/kern/uipc_mbuf2.c | 448 + bsd/kern/uipc_proto.c | 120 + bsd/kern/uipc_socket.c | 2081 ++++ bsd/kern/uipc_socket2.c | 1266 ++ bsd/kern/uipc_syscalls.c | 1974 +++ bsd/kern/uipc_usrreq.c | 1231 ++ bsd/libkern/Makefile | 25 + bsd/libkern/bcd.c | 56 + bsd/libkern/bcmp.c | 86 + bsd/libkern/inet_ntoa.c | 70 + bsd/libkern/libkern.h | 130 + bsd/libkern/locc.c | 71 + bsd/libkern/random.c | 103 + bsd/libkern/rindex.c | 79 + bsd/libkern/scanc.c | 71 + bsd/libkern/skpc.c | 71 + bsd/libkern/strtol.c | 255 + bsd/machine/Makefile | 29 + bsd/machine/ansi.h | 42 + bsd/machine/byte_order.h | 31 + bsd/machine/cons.h | 35 + bsd/machine/cpu.h | 36 + bsd/machine/disklabel.h | 35 + bsd/machine/endian.h | 38 + bsd/machine/exec.h | 38 + bsd/machine/label_t.h | 35 + bsd/machine/param.h | 38 + bsd/machine/proc.h | 38 + bsd/machine/profile.h | 42 + bsd/machine/psl.h | 35 + bsd/machine/ptrace.h | 38 + bsd/machine/reboot.h | 35 + bsd/machine/reg.h | 35 + bsd/machine/setjmp.h | 42 + bsd/machine/signal.h | 35 + bsd/machine/spl.h | 35 + bsd/machine/table.h | 35 + bsd/machine/trap.h | 38 + bsd/machine/types.h | 38 + bsd/machine/unix_traps.h | 35 + bsd/machine/user.h | 35 + bsd/machine/vmparam.h | 35 + bsd/miscfs/Makefile | 40 + bsd/miscfs/deadfs/dead_vnops.c | 464 + bsd/miscfs/devfs/Makefile | 41 + bsd/miscfs/devfs/README | 118 + bsd/miscfs/devfs/devfs.h | 111 + bsd/miscfs/devfs/devfs_proto.h | 45 + bsd/miscfs/devfs/devfs_tree.c | 1192 ++ bsd/miscfs/devfs/devfs_vfsops.c | 426 + bsd/miscfs/devfs/devfs_vnops.c | 1606 +++ bsd/miscfs/devfs/devfsdefs.h | 282 + bsd/miscfs/devfs/index.html | 22 + bsd/miscfs/devfs/reproto.sh | 42 + bsd/miscfs/fdesc/Makefile | 40 + bsd/miscfs/fdesc/fdesc.h | 103 + bsd/miscfs/fdesc/fdesc_vfsops.c | 272 + bsd/miscfs/fdesc/fdesc_vnops.c | 990 ++ bsd/miscfs/fifofs/fifo.h | 109 + bsd/miscfs/fifofs/fifo_vnops.c | 572 + bsd/miscfs/kernfs/kernfs.h | 79 + bsd/miscfs/kernfs/kernfs_vfsops.c | 338 + bsd/miscfs/kernfs/kernfs_vnops.c | 785 ++ bsd/miscfs/nullfs/null.h | 96 + bsd/miscfs/nullfs/null_subr.c | 302 + bsd/miscfs/nullfs/null_vfsops.c | 388 + bsd/miscfs/nullfs/null_vnops.c | 669 + bsd/miscfs/portal/portal.h | 92 + bsd/miscfs/portal/portal_vfsops.c | 302 + bsd/miscfs/portal/portal_vnops.c | 792 ++ bsd/miscfs/procfs/procfs.h | 202 + bsd/miscfs/procfs/procfs_ctl.c | 315 + bsd/miscfs/procfs/procfs_fpregs.c | 124 + bsd/miscfs/procfs/procfs_mem.c | 322 + bsd/miscfs/procfs/procfs_note.c | 93 + bsd/miscfs/procfs/procfs_regs.c | 124 + bsd/miscfs/procfs/procfs_status.c | 165 + bsd/miscfs/procfs/procfs_subr.c | 338 + bsd/miscfs/procfs/procfs_vfsops.c | 251 + bsd/miscfs/procfs/procfs_vnops.c | 918 ++ bsd/miscfs/specfs/Makefile | 37 + bsd/miscfs/specfs/spec_vnops.c | 840 ++ bsd/miscfs/specfs/specdev.h | 163 + bsd/miscfs/synthfs/synthfs.h | 231 + bsd/miscfs/synthfs/synthfs_util.c | 344 + bsd/miscfs/synthfs/synthfs_vfsops.c | 583 + bsd/miscfs/synthfs/synthfs_vnops.c | 1844 +++ bsd/miscfs/umapfs/umap.h | 113 + bsd/miscfs/umapfs/umap_subr.c | 470 + bsd/miscfs/umapfs/umap_vfsops.c | 438 + bsd/miscfs/umapfs/umap_vnops.c | 520 + bsd/miscfs/union/Makefile | 37 + bsd/miscfs/union/union.h | 151 + bsd/miscfs/union/union_subr.c | 1119 ++ bsd/miscfs/union/union_vfsops.c | 524 + bsd/miscfs/union/union_vnops.c | 1829 +++ bsd/miscfs/volfs/volfs.h | 189 + bsd/miscfs/volfs/volfs_vfsops.c | 481 + bsd/miscfs/volfs/volfs_vnops.c | 1149 ++ bsd/net/Makefile | 42 + bsd/net/bpf.c | 1426 +++ bsd/net/bpf.h | 263 + bsd/net/bpf_compat.h | 70 + bsd/net/bpf_filter.c | 579 + bsd/net/bpfdesc.h | 128 + bsd/net/bridge.c | 679 + bsd/net/bridge.h | 168 + bsd/net/bsd_comp.c | 1133 ++ bsd/net/dlil.c | 1997 +++ bsd/net/dlil.h | 325 + bsd/net/dlil_ethersubr.c | 1227 ++ bsd/net/dlil_pvt.h | 46 + bsd/net/ether_at_pr_module.c | 469 + bsd/net/ether_if_module.c | 618 + bsd/net/ether_inet6_pr_module.c | 423 + bsd/net/ether_inet_pr_module.c | 468 + bsd/net/etherdefs.h | 75 + bsd/net/ethernet.h | 123 + bsd/net/hostcache.c | 269 + bsd/net/hostcache.h | 115 + bsd/net/if.c | 1291 ++ bsd/net/if.h | 277 + bsd/net/if_arp.h | 156 + bsd/net/if_atm.h | 167 + bsd/net/if_atmsubr.c | 652 + bsd/net/if_blue.c | 677 + bsd/net/if_blue.h | 117 + bsd/net/if_disc.c | 223 + bsd/net/if_dl.h | 106 + bsd/net/if_dummy.c | 367 + bsd/net/if_ether.c | 21 + bsd/net/if_ethersubr.c | 474 + bsd/net/if_faith.c | 467 + bsd/net/if_fddisubr.c | 653 + bsd/net/if_gif.c | 694 ++ bsd/net/if_gif.h | 103 + bsd/net/if_llc.h | 165 + bsd/net/if_loop.c | 614 + bsd/net/if_media.c | 504 + bsd/net/if_media.h | 379 + bsd/net/if_mib.c | 174 + bsd/net/if_mib.h | 190 + bsd/net/if_ppp.h | 157 + bsd/net/if_pppvar.h | 131 + bsd/net/if_sl.c | 897 ++ bsd/net/if_slvar.h | 109 + bsd/net/if_sppp.h | 195 + bsd/net/if_spppsubr.c | 4295 +++++++ bsd/net/if_tun.c | 765 ++ bsd/net/if_tun.h | 63 + bsd/net/if_tunvar.h | 70 + bsd/net/if_types.h | 126 + bsd/net/if_var.h | 474 + bsd/net/if_vlan.c | 587 + bsd/net/if_vlan_var.h | 104 + bsd/net/kext_net.c | 193 + bsd/net/kext_net.h | 200 + bsd/net/ndrv.c | 831 ++ bsd/net/ndrv.h | 79 + bsd/net/net_osdep.c | 106 + bsd/net/net_osdep.h | 169 + bsd/net/netisr.c | 133 + bsd/net/netisr.h | 89 + bsd/net/pfkeyv2.h | 454 + bsd/net/ppp_comp.h | 185 + bsd/net/ppp_deflate.c | 699 ++ bsd/net/ppp_defs.h | 175 + bsd/net/radix.c | 1045 ++ bsd/net/radix.h | 190 + bsd/net/raw_cb.c | 165 + bsd/net/raw_cb.h | 96 + bsd/net/raw_usrreq.c | 326 + bsd/net/route.c | 1088 ++ bsd/net/route.h | 321 + bsd/net/rtsock.c | 1095 ++ bsd/net/rtsock_mip.c | 70 + bsd/net/slcompress.c | 623 + bsd/net/slcompress.h | 184 + bsd/net/slip.h | 82 + bsd/net/tokendefs.h | 152 + bsd/net/tokensr.h | 372 + bsd/net/zlib.c | 5383 ++++++++ bsd/net/zlib.h | 1013 ++ bsd/netat/Makefile | 45 + bsd/netat/adsp.c | 398 + bsd/netat/adsp.h | 704 ++ bsd/netat/adsp_CLDeny.c | 94 + bsd/netat/adsp_CLListen.c | 102 + bsd/netat/adsp_Close.c | 531 + bsd/netat/adsp_Control.c | 546 + bsd/netat/adsp_Init.c | 174 + bsd/netat/adsp_InitGlobals.c | 79 + bsd/netat/adsp_NewCID.c | 83 + bsd/netat/adsp_Open.c | 296 + bsd/netat/adsp_Options.c | 99 + bsd/netat/adsp_Packet.c | 836 ++ bsd/netat/adsp_Read.c | 419 + bsd/netat/adsp_RxAttn.c | 211 + bsd/netat/adsp_RxData.c | 391 + bsd/netat/adsp_Status.c | 150 + bsd/netat/adsp_Timer.c | 211 + bsd/netat/adsp_TimerElem.c | 193 + bsd/netat/adsp_Write.c | 242 + bsd/netat/adsp_attention.c | 133 + bsd/netat/adsp_internal.h | 357 + bsd/netat/adsp_misc.c | 140 + bsd/netat/adsp_reset.c | 243 + bsd/netat/adsp_stream.c | 655 + bsd/netat/appletalk.h | 295 + bsd/netat/asp.h | 216 + bsd/netat/asp_proto.c | 2298 ++++ bsd/netat/at.c | 681 + bsd/netat/at_aarp.h | 183 + bsd/netat/at_config.h | 75 + bsd/netat/at_ddp_brt.h | 82 + bsd/netat/at_pat.h | 64 + bsd/netat/at_pcb.c | 245 + bsd/netat/at_pcb.h | 118 + bsd/netat/at_proto.c | 104 + bsd/netat/at_snmp.h | 214 + bsd/netat/at_var.h | 279 + bsd/netat/atalk.exp | 9 + bsd/netat/atalk.imp | 9 + bsd/netat/atp.h | 455 + bsd/netat/atp_alloc.c | 195 + bsd/netat/atp_misc.c | 344 + bsd/netat/atp_open.c | 261 + bsd/netat/atp_read.c | 560 + bsd/netat/atp_write.c | 1893 +++ bsd/netat/aurp.h | 289 + bsd/netat/aurp_aurpd.c | 448 + bsd/netat/aurp_cfg.c | 100 + bsd/netat/aurp_gdata.c | 61 + bsd/netat/aurp_misc.c | 211 + bsd/netat/aurp_open.c | 248 + bsd/netat/aurp_rd.c | 117 + bsd/netat/aurp_ri.c | 866 ++ bsd/netat/aurp_rx.c | 208 + bsd/netat/aurp_tickle.c | 159 + bsd/netat/aurp_tx.c | 141 + bsd/netat/aurp_zi.c | 613 + bsd/netat/ddp.c | 1406 +++ bsd/netat/ddp.h | 186 + bsd/netat/ddp.save | 903 ++ bsd/netat/ddp_aarp.c | 984 ++ bsd/netat/ddp_aep.c | 112 + bsd/netat/ddp_brt.c | 188 + bsd/netat/ddp_lap.c | 1695 +++ bsd/netat/ddp_nbp.c | 1565 +++ bsd/netat/ddp_proto.c | 167 + bsd/netat/ddp_r_rtmp.c | 1629 +++ bsd/netat/ddp_r_zip.c | 1962 +++ bsd/netat/ddp_rtmp.c | 346 + bsd/netat/ddp_rtmptable.c | 1145 ++ bsd/netat/ddp_sip.c | 181 + bsd/netat/ddp_usrreq.c | 351 + bsd/netat/debug.h | 262 + bsd/netat/drv_dep.c | 350 + bsd/netat/ep.h | 40 + bsd/netat/lap.h | 91 + bsd/netat/nbp.h | 146 + bsd/netat/pap.h | 127 + bsd/netat/routing_tables.h | 214 + bsd/netat/rtmp.h | 64 + bsd/netat/sys_dep.c | 336 + bsd/netat/sys_glue.c | 1244 ++ bsd/netat/sysglue.h | 190 + bsd/netat/zip.h | 92 + bsd/netccitt/Makefile | 38 + bsd/netccitt/ccitt_proto.c | 122 + bsd/netccitt/dll.h | 104 + bsd/netccitt/hd_debug.c | 233 + bsd/netccitt/hd_input.c | 690 ++ bsd/netccitt/hd_output.c | 268 + bsd/netccitt/hd_subr.c | 415 + bsd/netccitt/hd_timer.c | 168 + bsd/netccitt/hd_var.h | 128 + bsd/netccitt/hdlc.h | 177 + bsd/netccitt/if_x25subr.c | 820 ++ bsd/netccitt/llc_input.c | 489 + bsd/netccitt/llc_output.c | 325 + bsd/netccitt/llc_subr.c | 2379 ++++ bsd/netccitt/llc_timer.c | 201 + bsd/netccitt/llc_var.h | 680 + bsd/netccitt/pk.h | 228 + bsd/netccitt/pk_acct.c | 166 + bsd/netccitt/pk_debug.c | 163 + bsd/netccitt/pk_input.c | 1145 ++ bsd/netccitt/pk_llcsubr.c | 392 + bsd/netccitt/pk_output.c | 237 + bsd/netccitt/pk_subr.c | 1214 ++ bsd/netccitt/pk_timer.c | 147 + bsd/netccitt/pk_usrreq.c | 626 + bsd/netccitt/pk_var.h | 252 + bsd/netccitt/x25.h | 178 + bsd/netccitt/x25_sockaddr.h | 178 + bsd/netccitt/x25acct.h | 91 + bsd/netccitt/x25err.h | 85 + bsd/netinet/Makefile | 44 + bsd/netinet/bootp.h | 127 + bsd/netinet/fil.c | 1344 ++ bsd/netinet/icmp6.h | 725 ++ bsd/netinet/icmp_var.h | 109 + bsd/netinet/if_atm.c | 300 + bsd/netinet/if_atm.h | 67 + bsd/netinet/if_ether.c | 747 ++ bsd/netinet/if_ether.h | 192 + bsd/netinet/if_fddi.h | 107 + bsd/netinet/if_tun.h | 70 + bsd/netinet/igmp.c | 512 + bsd/netinet/igmp.h | 116 + bsd/netinet/igmp_var.h | 132 + bsd/netinet/in.c | 1170 ++ bsd/netinet/in.h | 516 + bsd/netinet/in_bootp.c | 640 + bsd/netinet/in_cksum.c | 224 + bsd/netinet/in_gif.c | 506 + bsd/netinet/in_gif.h | 68 + bsd/netinet/in_hostcache.c | 177 + bsd/netinet/in_hostcache.h | 103 + bsd/netinet/in_pcb.c | 1369 ++ bsd/netinet/in_pcb.h | 373 + bsd/netinet/in_proto.c | 310 + bsd/netinet/in_rmx.c | 447 + bsd/netinet/in_systm.h | 82 + bsd/netinet/in_var.h | 293 + bsd/netinet/ip.h | 209 + bsd/netinet/ip6.h | 427 + bsd/netinet/ip_auth.c | 527 + bsd/netinet/ip_auth.h | 85 + bsd/netinet/ip_compat.h | 750 ++ bsd/netinet/ip_divert.c | 483 + bsd/netinet/ip_dummynet.c | 658 + bsd/netinet/ip_dummynet.h | 135 + bsd/netinet/ip_ecn.c | 167 + bsd/netinet/ip_ecn.h | 68 + bsd/netinet/ip_encap.c | 531 + bsd/netinet/ip_encap.h | 84 + bsd/netinet/ip_fil.h | 546 + bsd/netinet/ip_flow.c | 355 + bsd/netinet/ip_flow.h | 77 + bsd/netinet/ip_frag.c | 487 + bsd/netinet/ip_frag.h | 79 + bsd/netinet/ip_ftp_pxy.c | 280 + bsd/netinet/ip_fw.c | 1408 +++ bsd/netinet/ip_fw.h | 231 + bsd/netinet/ip_icmp.c | 869 ++ bsd/netinet/ip_icmp.h | 212 + bsd/netinet/ip_input.c | 1896 +++ bsd/netinet/ip_log.c | 503 + bsd/netinet/ip_mroute.c | 2320 ++++ bsd/netinet/ip_mroute.h | 288 + bsd/netinet/ip_nat.c | 1393 +++ bsd/netinet/ip_nat.h | 201 + bsd/netinet/ip_output.c | 1872 +++ bsd/netinet/ip_proxy.c | 362 + bsd/netinet/ip_proxy.h | 113 + bsd/netinet/ip_state.c | 821 ++ bsd/netinet/ip_state.h | 164 + bsd/netinet/ip_var.h | 211 + bsd/netinet/ipl.h | 37 + bsd/netinet/raw_ip.c | 754 ++ bsd/netinet/tcp.h | 153 + bsd/netinet/tcp_debug.c | 219 + bsd/netinet/tcp_debug.h | 97 + bsd/netinet/tcp_fsm.h | 126 + bsd/netinet/tcp_input.c | 2941 +++++ bsd/netinet/tcp_output.c | 934 ++ bsd/netinet/tcp_seq.h | 122 + bsd/netinet/tcp_subr.c | 1261 ++ bsd/netinet/tcp_timer.c | 549 + bsd/netinet/tcp_timer.h | 157 + bsd/netinet/tcp_usrreq.c | 1136 ++ bsd/netinet/tcp_var.h | 486 + bsd/netinet/tcpip.h | 178 + bsd/netinet/udp.h | 71 + bsd/netinet/udp_usrreq.c | 966 ++ bsd/netinet/udp_var.h | 145 + bsd/netinet6/Makefile | 41 + bsd/netinet6/ah.h | 106 + bsd/netinet6/ah_core.c | 1132 ++ bsd/netinet6/ah_input.c | 916 ++ bsd/netinet6/ah_output.c | 558 + bsd/netinet6/dest6.c | 154 + bsd/netinet6/esp.h | 106 + bsd/netinet6/esp_core.c | 1271 ++ bsd/netinet6/esp_input.c | 790 ++ bsd/netinet6/esp_output.c | 671 + bsd/netinet6/frag6.c | 710 ++ bsd/netinet6/icmp6.c | 2524 ++++ bsd/netinet6/icmp6.h | 4 + bsd/netinet6/in6.c | 2636 ++++ bsd/netinet6/in6.h | 710 ++ bsd/netinet6/in6_cksum.c | 320 + bsd/netinet6/in6_gif.c | 459 + bsd/netinet6/in6_gif.h | 45 + bsd/netinet6/in6_ifattach.c | 862 ++ bsd/netinet6/in6_ifattach.h | 50 + bsd/netinet6/in6_pcb.c | 1182 ++ bsd/netinet6/in6_pcb.h | 106 + bsd/netinet6/in6_prefix.c | 1226 ++ bsd/netinet6/in6_prefix.h | 85 + bsd/netinet6/in6_proto.c | 626 + bsd/netinet6/in6_rmx.c | 518 + bsd/netinet6/in6_src.c | 495 + bsd/netinet6/in6_var.h | 671 + bsd/netinet6/ip6.h | 4 + bsd/netinet6/ip6_forward.c | 524 + bsd/netinet6/ip6_fw.c | 1264 ++ bsd/netinet6/ip6_fw.h | 219 + bsd/netinet6/ip6_input.c | 1937 +++ bsd/netinet6/ip6_mroute.c | 1896 +++ bsd/netinet6/ip6_mroute.h | 249 + bsd/netinet6/ip6_output.c | 3248 +++++ bsd/netinet6/ip6_var.h | 368 + bsd/netinet6/ip6protosw.h | 137 + bsd/netinet6/ipcomp.h | 69 + bsd/netinet6/ipcomp_core.c | 311 + bsd/netinet6/ipcomp_input.c | 394 + bsd/netinet6/ipcomp_output.c | 425 + bsd/netinet6/ipsec.c | 3668 ++++++ bsd/netinet6/ipsec.h | 436 + bsd/netinet6/mip6.c | 3156 +++++ bsd/netinet6/mip6.h | 861 ++ bsd/netinet6/mip6_common.h | 141 + bsd/netinet6/mip6_ha.c | 1190 ++ bsd/netinet6/mip6_hooks.c | 428 + bsd/netinet6/mip6_io.c | 1507 +++ bsd/netinet6/mip6_md.c | 1293 ++ bsd/netinet6/mip6_mn.c | 3106 +++++ bsd/netinet6/mld6.c | 494 + bsd/netinet6/mld6_var.h | 50 + bsd/netinet6/natpt_defs.h | 319 + bsd/netinet6/natpt_dispatch.c | 718 ++ bsd/netinet6/natpt_list.c | 54 + bsd/netinet6/natpt_list.h | 254 + bsd/netinet6/natpt_log.c | 170 + bsd/netinet6/natpt_log.h | 129 + bsd/netinet6/natpt_rule.c | 497 + bsd/netinet6/natpt_soctl.h | 91 + bsd/netinet6/natpt_trans.c | 1612 +++ bsd/netinet6/natpt_tslot.c | 1043 ++ bsd/netinet6/natpt_usrreq.c | 581 + bsd/netinet6/natpt_var.h | 112 + bsd/netinet6/nd6.c | 2065 ++++ bsd/netinet6/nd6.h | 349 + bsd/netinet6/nd6_nbr.c | 1372 ++ bsd/netinet6/nd6_rtr.c | 1725 +++ bsd/netinet6/pim6.h | 68 + bsd/netinet6/pim6_var.h | 70 + bsd/netinet6/raw_ip6.c | 695 ++ bsd/netinet6/route6.c | 193 + bsd/netinet6/udp6.h | 38 + bsd/netinet6/udp6_usrreq.c | 861 ++ bsd/netinet6/udp6_var.h | 78 + bsd/netiso/Makefile | 42 + bsd/netiso/argo_debug.h | 313 + bsd/netiso/clnl.h | 85 + bsd/netiso/clnp.h | 482 + bsd/netiso/clnp_debug.c | 279 + bsd/netiso/clnp_er.c | 394 + bsd/netiso/clnp_frag.c | 878 ++ bsd/netiso/clnp_input.c | 570 + bsd/netiso/clnp_options.c | 551 + bsd/netiso/clnp_output.c | 580 + bsd/netiso/clnp_raw.c | 372 + bsd/netiso/clnp_stat.h | 122 + bsd/netiso/clnp_subr.c | 677 + bsd/netiso/clnp_timer.c | 199 + bsd/netiso/cltp_usrreq.c | 426 + bsd/netiso/cltp_var.h | 75 + bsd/netiso/cons.h | 111 + bsd/netiso/cons_pcb.h | 212 + bsd/netiso/eonvar.h | 191 + bsd/netiso/esis.c | 1084 ++ bsd/netiso/esis.h | 152 + bsd/netiso/idrp_usrreq.c | 198 + bsd/netiso/if_cons.c | 978 ++ bsd/netiso/if_eon.c | 627 + bsd/netiso/iso.c | 938 ++ bsd/netiso/iso.h | 214 + bsd/netiso/iso_chksum.c | 379 + bsd/netiso/iso_errno.h | 297 + bsd/netiso/iso_pcb.c | 635 + bsd/netiso/iso_pcb.h | 132 + bsd/netiso/iso_proto.c | 216 + bsd/netiso/iso_snpac.c | 756 ++ bsd/netiso/iso_snpac.h | 133 + bsd/netiso/iso_var.h | 155 + bsd/netiso/tp_astring.c | 95 + bsd/netiso/tp_clnp.h | 112 + bsd/netiso/tp_cons.c | 326 + bsd/netiso/tp_driver.c | 1020 ++ bsd/netiso/tp_emit.c | 1015 ++ bsd/netiso/tp_events.h | 103 + bsd/netiso/tp_inet.c | 707 ++ bsd/netiso/tp_input.c | 1642 +++ bsd/netiso/tp_ip.h | 109 + bsd/netiso/tp_iso.c | 712 ++ bsd/netiso/tp_meas.c | 145 + bsd/netiso/tp_meas.h | 115 + bsd/netiso/tp_output.c | 730 ++ bsd/netiso/tp_param.h | 385 + bsd/netiso/tp_pcb.c | 1018 ++ bsd/netiso/tp_pcb.h | 374 + bsd/netiso/tp_seq.h | 142 + bsd/netiso/tp_stat.h | 301 + bsd/netiso/tp_states.h | 32 + bsd/netiso/tp_subr.c | 967 ++ bsd/netiso/tp_subr2.c | 898 ++ bsd/netiso/tp_timer.c | 397 + bsd/netiso/tp_timer.h | 111 + bsd/netiso/tp_tpdu.h | 314 + bsd/netiso/tp_trace.c | 193 + bsd/netiso/tp_trace.h | 215 + bsd/netiso/tp_user.h | 180 + bsd/netiso/tp_usrreq.c | 774 ++ bsd/netiso/tuba_subr.c | 369 + bsd/netiso/tuba_table.c | 173 + bsd/netiso/tuba_table.h | 80 + bsd/netiso/tuba_usrreq.c | 333 + bsd/netkey/Makefile | 37 + bsd/netkey/key.c | 6899 +++++++++++ bsd/netkey/key.h | 82 + bsd/netkey/key_debug.c | 733 ++ bsd/netkey/key_debug.h | 95 + bsd/netkey/key_var.h | 133 + bsd/netkey/keydb.c | 229 + bsd/netkey/keydb.h | 164 + bsd/netkey/keysock.c | 770 ++ bsd/netkey/keysock.h | 86 + bsd/netkey/keyv2.h | 4 + bsd/netns/Makefile | 38 + bsd/netns/idp.h | 67 + bsd/netns/idp_usrreq.c | 587 + bsd/netns/idp_var.h | 70 + bsd/netns/ns.c | 390 + bsd/netns/ns.h | 172 + bsd/netns/ns_cksum.c | 225 + bsd/netns/ns_error.c | 344 + bsd/netns/ns_error.h | 111 + bsd/netns/ns_if.h | 103 + bsd/netns/ns_input.c | 506 + bsd/netns/ns_ip.c | 461 + bsd/netns/ns_output.c | 181 + bsd/netns/ns_pcb.c | 384 + bsd/netns/ns_pcb.h | 101 + bsd/netns/ns_proto.c | 123 + bsd/netns/sp.h | 73 + bsd/netns/spidp.h | 83 + bsd/netns/spp_debug.c | 191 + bsd/netns/spp_debug.h | 80 + bsd/netns/spp_timer.h | 143 + bsd/netns/spp_usrreq.c | 1827 +++ bsd/netns/spp_var.h | 236 + bsd/nfs/Makefile | 39 + bsd/nfs/krpc.h | 61 + bsd/nfs/krpc_subr.c | 461 + bsd/nfs/nfs.h | 841 ++ bsd/nfs/nfs_bio.c | 1300 ++ bsd/nfs/nfs_boot.c | 984 ++ bsd/nfs/nfs_node.c | 451 + bsd/nfs/nfs_nqlease.c | 1323 ++ bsd/nfs/nfs_serv.c | 3543 ++++++ bsd/nfs/nfs_socket.c | 2628 ++++ bsd/nfs/nfs_srvcache.c | 364 + bsd/nfs/nfs_subs.c | 2192 ++++ bsd/nfs/nfs_syscalls.c | 1302 ++ bsd/nfs/nfs_vfsops.c | 1194 ++ bsd/nfs/nfs_vnops.c | 4632 +++++++ bsd/nfs/nfsdiskless.h | 140 + bsd/nfs/nfsm_subs.h | 484 + bsd/nfs/nfsmount.h | 121 + bsd/nfs/nfsnode.h | 215 + bsd/nfs/nfsproto.h | 475 + bsd/nfs/nfsrtt.h | 126 + bsd/nfs/nfsrvcache.h | 111 + bsd/nfs/nqnfs.h | 240 + bsd/nfs/rpcv2.h | 165 + bsd/nfs/xdr_subs.h | 116 + bsd/ppc/Makefile | 26 + bsd/ppc/cpu.h | 34 + bsd/ppc/disklabel.h | 43 + bsd/ppc/endian.h | 118 + bsd/ppc/exec.h | 97 + bsd/ppc/label_t.h | 38 + bsd/ppc/param.h | 138 + bsd/ppc/profile.h | 51 + bsd/ppc/psl.h | 37 + bsd/ppc/ptrace.h | 61 + bsd/ppc/reboot.h | 57 + bsd/ppc/reg.h | 36 + bsd/ppc/setjmp.h | 77 + bsd/ppc/signal.h | 92 + bsd/ppc/spl.h | 55 + bsd/ppc/table.h | 33 + bsd/ppc/types.h | 85 + bsd/ppc/user.h | 30 + bsd/ppc/vmparam.h | 60 + bsd/sys/Makefile | 49 + bsd/sys/acct.h | 104 + bsd/sys/attr.h | 245 + bsd/sys/buf.h | 260 + bsd/sys/callout.h | 78 + bsd/sys/cdefs.h | 165 + bsd/sys/cdio.h | 177 + bsd/sys/chio.h | 109 + bsd/sys/clist.h | 73 + bsd/sys/conf.h | 262 + bsd/sys/dir.h | 87 + bsd/sys/dirent.h | 104 + bsd/sys/disklabel.h | 362 + bsd/sys/disktab.h | 97 + bsd/sys/dkbad.h | 95 + bsd/sys/dkstat.h | 91 + bsd/sys/dmap.h | 82 + bsd/sys/domain.h | 104 + bsd/sys/errno.h | 215 + bsd/sys/ev.h | 75 + bsd/sys/exec.h | 94 + bsd/sys/fcntl.h | 295 + bsd/sys/file.h | 114 + bsd/sys/filedesc.h | 125 + bsd/sys/filio.h | 77 + bsd/sys/fsctl.h | 80 + bsd/sys/gmon.h | 225 + bsd/sys/ioccom.h | 91 + bsd/sys/ioctl.h | 107 + bsd/sys/ioctl_compat.h | 191 + bsd/sys/ipc.h | 120 + bsd/sys/kdebug.h | 358 + bsd/sys/kern_event.h | 117 + bsd/sys/kernel.h | 95 + bsd/sys/ktrace.h | 183 + bsd/sys/linker_set.h | 100 + bsd/sys/loadable_fs.h | 125 + bsd/sys/lock.h | 220 + bsd/sys/lockf.h | 107 + bsd/sys/mach_swapon.h | 46 + bsd/sys/malloc.h | 312 + bsd/sys/mbuf.h | 507 + bsd/sys/md5.h | 50 + bsd/sys/mman.h | 170 + bsd/sys/mount.h | 376 + bsd/sys/msg.h | 178 + bsd/sys/msgbuf.h | 72 + bsd/sys/mtio.h | 163 + bsd/sys/namei.h | 227 + bsd/sys/netport.h | 47 + bsd/sys/param.h | 251 + bsd/sys/paths.h | 33 + bsd/sys/poll.h | 97 + bsd/sys/proc.h | 375 + bsd/sys/protosw.h | 378 + bsd/sys/ptrace.h | 90 + bsd/sys/queue.h | 550 + bsd/sys/reboot.h | 120 + bsd/sys/resource.h | 147 + bsd/sys/resourcevar.h | 117 + bsd/sys/select.h | 78 + bsd/sys/sem.h | 207 + bsd/sys/semaphore.h | 55 + bsd/sys/shm.h | 121 + bsd/sys/signal.h | 227 + bsd/sys/signalvar.h | 223 + bsd/sys/socket.h | 458 + bsd/sys/socketvar.h | 450 + bsd/sys/sockio.h | 129 + bsd/sys/stat.h | 233 + bsd/sys/subr_prf.h | 82 + bsd/sys/syscall.h | 296 + bsd/sys/sysctl.h | 646 + bsd/sys/syslimits.h | 87 + bsd/sys/syslog.h | 303 + bsd/sys/systm.h | 231 + bsd/sys/table.h | 124 + bsd/sys/termios.h | 305 + bsd/sys/time.h | 179 + bsd/sys/timeb.h | 74 + bsd/sys/times.h | 87 + bsd/sys/tprintf.h | 67 + bsd/sys/trace.h | 143 + bsd/sys/tty.h | 344 + bsd/sys/ttychars.h | 86 + bsd/sys/ttycom.h | 163 + bsd/sys/ttydefaults.h | 118 + bsd/sys/ttydev.h | 84 + bsd/sys/types.h | 215 + bsd/sys/ubc.h | 145 + bsd/sys/ucred.h | 90 + bsd/sys/uio.h | 111 + bsd/sys/un.h | 88 + bsd/sys/unistd.h | 163 + bsd/sys/unpcb.h | 135 + bsd/sys/user.h | 138 + bsd/sys/utfconv.h | 46 + bsd/sys/utsname.h | 80 + bsd/sys/ux_exception.h | 89 + bsd/sys/vadvise.h | 77 + bsd/sys/vcmd.h | 69 + bsd/sys/version.h | 78 + bsd/sys/vlimit.h | 76 + bsd/sys/vm.h | 102 + bsd/sys/vmmeter.h | 136 + bsd/sys/vmparam.h | 42 + bsd/sys/vnode.h | 496 + bsd/sys/vnode_if.h | 1339 ++ bsd/sys/vstat.h | 70 + bsd/sys/wait.h | 183 + bsd/ufs/Makefile | 36 + bsd/ufs/ffs/Makefile | 37 + bsd/ufs/ffs/ffs_alloc.c | 1648 +++ bsd/ufs/ffs/ffs_balloc.c | 675 + bsd/ufs/ffs/ffs_extern.h | 146 + bsd/ufs/ffs/ffs_inode.c | 588 + bsd/ufs/ffs/ffs_subr.c | 300 + bsd/ufs/ffs/ffs_tables.c | 158 + bsd/ufs/ffs/ffs_vfsops.c | 1284 ++ bsd/ufs/ffs/ffs_vnops.c | 424 + bsd/ufs/ffs/fs.h | 558 + bsd/ufs/mfs/mfs_extern.h | 82 + bsd/ufs/mfs/mfs_vfsops.c | 340 + bsd/ufs/mfs/mfs_vnops.c | 372 + bsd/ufs/mfs/mfsiom.h | 59 + bsd/ufs/mfs/mfsnode.h | 108 + bsd/ufs/ufs/Makefile | 37 + bsd/ufs/ufs/dinode.h | 151 + bsd/ufs/ufs/dir.h | 181 + bsd/ufs/ufs/inode.h | 186 + bsd/ufs/ufs/lockf.h | 109 + bsd/ufs/ufs/quota.h | 242 + bsd/ufs/ufs/ufs_bmap.c | 682 + bsd/ufs/ufs/ufs_byte_order.c | 401 + bsd/ufs/ufs/ufs_byte_order.h | 67 + bsd/ufs/ufs/ufs_extern.h | 163 + bsd/ufs/ufs/ufs_ihash.c | 173 + bsd/ufs/ufs/ufs_inode.c | 167 + bsd/ufs/ufs/ufs_lockf.c | 707 ++ bsd/ufs/ufs/ufs_lookup.c | 1115 ++ bsd/ufs/ufs/ufs_quota.c | 943 ++ bsd/ufs/ufs/ufs_readwrite.c | 680 + bsd/ufs/ufs/ufs_vfsops.c | 245 + bsd/ufs/ufs/ufs_vnops.c | 2241 ++++ bsd/ufs/ufs/ufsmount.h | 135 + bsd/uxkern/ux_exception.c | 316 + bsd/vfs/Makefile | 37 + bsd/vfs/vfs_bio.c | 2111 ++++ bsd/vfs/vfs_cache.c | 369 + bsd/vfs/vfs_cluster.c | 2643 ++++ bsd/vfs/vfs_conf.c | 308 + bsd/vfs/vfs_init.c | 338 + bsd/vfs/vfs_lookup.c | 751 ++ bsd/vfs/vfs_subr.c | 2699 ++++ bsd/vfs/vfs_support.c | 1332 ++ bsd/vfs/vfs_support.h | 232 + bsd/vfs/vfs_syscalls.c | 3575 ++++++ bsd/vfs/vfs_utfconv.c | 416 + bsd/vfs/vfs_vnops.c | 520 + bsd/vfs/vnode_if.c | 1138 ++ bsd/vfs/vnode_if.sh | 379 + bsd/vfs/vnode_if.src | 715 ++ bsd/vm/Makefile | 35 + bsd/vm/dp_backing_file.c | 313 + bsd/vm/vm_pageout.h | 32 + bsd/vm/vm_pager.h | 63 + bsd/vm/vm_unix.c | 757 ++ bsd/vm/vnode_pager.c | 351 + bsd/vm/vnode_pager.h | 136 + .../ata/drvApplePIIXATA/AppleATAPIIX.cpp | 1037 ++ .../ata/drvApplePIIXATA/AppleATAPIIX.h | 130 + .../ata/drvApplePIIXATA/AppleATAPIIXRegs.h | 275 + .../ata/drvApplePIIXATA/AppleATAPIIXTiming.h | 142 + .../drvAppleUltra33ATA/AppleUltra33ATA.cpp | 747 ++ .../ata/drvAppleUltra33ATA/AppleUltra33ATA.h | 101 + .../drvAppleUltra33ATA/AppleUltra33ATARegs.h | 141 + .../drvAppleUltra66ATA/AppleUltra66ATA.cpp | 749 ++ .../ata/drvAppleUltra66ATA/AppleUltra66ATA.h | 123 + .../drvAppleADBDevices/AppleADBButtons.cpp | 244 + .../drvAppleADBDevices/AppleADBButtons.h | 78 + .../drvAppleADBDevices/AppleADBDisplay.cpp | 368 + .../drvAppleADBDevices/AppleADBDisplay.h | 104 + .../drvAppleADBDevices/AppleADBKeyboard.cpp | 415 + .../drvAppleADBDevices/AppleADBKeyboard.h | 131 + .../drvAppleADBDevices/AppleADBMouse.cpp | 454 + .../drvAppleADBDevices/AppleADBMouse.h | 94 + .../drvAppleADBDevices/IOADBDevice.cpp | 181 + .../drvApplePS2Keyboard/ApplePS2Keyboard.cpp | 1091 ++ .../drvApplePS2Keyboard/ApplePS2Keyboard.h | 89 + .../drvApplePS2Mouse/ApplePS2Mouse.cpp | 518 + .../drvApplePS2Mouse/ApplePS2Mouse.h | 85 + iokit/Drivers/network/AppleBPF/AppleBPF.cpp | 120 + iokit/Drivers/network/AppleBPF/AppleBPF.h | 49 + iokit/Drivers/network/AppleBPF/bpf.c | 1290 ++ iokit/Drivers/network/AppleBPF/bpf_filter.c | 563 + .../Drivers/network/drvIntel82557/i82557.cpp | 820 ++ iokit/Drivers/network/drvIntel82557/i82557.h | 311 + .../Drivers/network/drvIntel82557/i82557HW.h | 608 + .../network/drvIntel82557/i82557Inline.h | 112 + .../network/drvIntel82557/i82557PHY.cpp | 628 + .../Drivers/network/drvIntel82557/i82557PHY.h | 199 + .../network/drvIntel82557/i82557Private.cpp | 1692 +++ .../network/drvIntel82557/i82557eeprom.cpp | 182 + .../network/drvIntel82557/i82557eeprom.h | 211 + .../Drivers/network/drvMaceEnet/MaceEnet.cpp | 918 ++ iokit/Drivers/network/drvMaceEnet/MaceEnet.h | 245 + .../network/drvMaceEnet/MaceEnetHW.cpp | 46 + .../network/drvMaceEnet/MaceEnetPrivate.cpp | 1793 +++ .../network/drvMaceEnet/MaceEnetPrivate.h | 43 + .../network/drvMaceEnet/MaceEnetRegisters.h | 265 + iokit/Drivers/network/drvPPCBMac/BMacEnet.cpp | 1199 ++ iokit/Drivers/network/drvPPCBMac/BMacEnet.h | 272 + .../Drivers/network/drvPPCBMac/BMacEnetHW.cpp | 135 + .../network/drvPPCBMac/BMacEnetMII.cpp | 386 + .../Drivers/network/drvPPCBMac/BMacEnetMII.h | 171 + .../network/drvPPCBMac/BMacEnetPrivate.cpp | 1958 +++ .../network/drvPPCBMac/BMacEnetPrivate.h | 49 + .../network/drvPPCBMac/BMacEnetRegisters.h | 212 + iokit/Drivers/network/drvPPCUniN/UniNEnet.cpp | 1071 ++ iokit/Drivers/network/drvPPCUniN/UniNEnet.h | 314 + .../Drivers/network/drvPPCUniN/UniNEnetHW.cpp | 72 + .../network/drvPPCUniN/UniNEnetMII.cpp | 259 + .../Drivers/network/drvPPCUniN/UniNEnetMII.h | 231 + .../network/drvPPCUniN/UniNEnetPrivate.cpp | 1701 +++ .../network/drvPPCUniN/UniNEnetPrivate.h | 39 + .../network/drvPPCUniN/UniNEnetRegisters.h | 529 + .../network/drvPPCUniN/UniNPowerSaver.cpp | 378 + .../pci/drvApplePCI/AppleGracklePCI.cpp | 356 + .../Drivers/pci/drvApplePCI/AppleGracklePCI.h | 94 + .../Drivers/pci/drvApplePCI/AppleI386PCI.cpp | 563 + iokit/Drivers/pci/drvApplePCI/AppleI386PCI.h | 106 + .../pci/drvApplePCI/AppleMacRiscPCI.cpp | 800 ++ .../Drivers/pci/drvApplePCI/AppleMacRiscPCI.h | 180 + .../platform/drvAppleCuda/AppleCuda.cpp | 1381 +++ .../Drivers/platform/drvAppleCuda/AppleCuda.h | 190 + .../platform/drvAppleCuda/AppleCudaCommands.h | 207 + .../platform/drvAppleCuda/AppleCudaHW.h | 178 + .../platform/drvAppleCuda/AppleVIA6522.h | 297 + .../drvAppleCuda/IOCudaADBController.cpp | 243 + .../drvAppleCuda/IOCudaADBController.h | 56 + .../platform/drvAppleGossamerPE/Gossamer.cpp | 266 + .../platform/drvAppleGossamerPE/Gossamer.h | 69 + .../drvAppleGossamerPE/GossamerCPU.cpp | 496 + .../platform/drvAppleGossamerPE/GossamerCPU.h | 70 + .../drvAppleGrandCentral/GrandCentral.cpp | 283 + .../drvAppleGrandCentral/GrandCentral.h | 89 + .../drvAppleI386Generic/AppleI386CPU.cpp | 140 + .../drvAppleI386Generic/AppleI386CPU.h | 67 + .../AppleI386PlatformExpert.cpp | 170 + .../AppleI386PlatformExpert.h | 57 + .../AppleIntelClassicPIC.h | 163 + .../drvAppleIntelClassicPIC/PIC8259.cpp | 319 + .../drvAppleIntelClock/AppleIntelClock.h | 38 + .../drvAppleIntelClock/IntelClock.cpp | 47 + .../platform/drvAppleMacIO/AppleMacIO.cpp | 282 + .../Drivers/platform/drvAppleNMI/AppleNMI.cpp | 161 + .../platform/drvAppleNVRAM/AppleNVRAM.cpp | 145 + .../platform/drvAppleNVRAM/AppleNVRAM.h | 49 + .../Drivers/platform/drvAppleOHare/OHare.cpp | 303 + iokit/Drivers/platform/drvAppleOHare/OHare.h | 96 + .../drvApplePMU/IOPMUADBController.cpp | 383 + .../platform/drvApplePMU/IOPMUADBController.h | 111 + .../ApplePS2Controller.cpp | 1080 ++ .../ApplePS2Controller.h | 233 + .../ApplePS2KeyboardDevice.cpp | 97 + .../ApplePS2MouseDevice.cpp | 97 + .../drvApplePlatformExpert/AppleCPU.cpp | 106 + .../drvApplePlatformExpert/AppleCPU.h | 51 + .../ApplePlatformExpert.cpp | 157 + .../drvApplePowerExpressPE/PowerExpress.cpp | 90 + .../drvApplePowerExpressPE/PowerExpress.h | 49 + .../drvApplePowerStarPE/PowerStar.cpp | 122 + .../platform/drvApplePowerStarPE/PowerStar.h | 48 + .../drvApplePowerSurgePE/PowerSurge.cpp | 50 + .../drvApplePowerSurgePE/PowerSurge.h | 44 + .../drvAppleRootDomain/RootDomain.cpp | 450 + .../platform/drvAppleRootDomain/RootDomain.h | 64 + .../RootDomainUserClient.cpp | 125 + .../drvAppleRootDomain/RootDomainUserClient.h | 65 + .../scsi/drvSymbios8xx/Sym8xxClient.cpp | 450 + .../scsi/drvSymbios8xx/Sym8xxController.h | 143 + .../scsi/drvSymbios8xx/Sym8xxExecute.cpp | 1372 ++ .../Drivers/scsi/drvSymbios8xx/Sym8xxInit.cpp | 523 + .../scsi/drvSymbios8xx/Sym8xxInterface.h | 174 + .../Drivers/scsi/drvSymbios8xx/Sym8xxMisc.cpp | 73 + iokit/Drivers/scsi/drvSymbios8xx/Sym8xxRegs.h | 556 + iokit/Drivers/scsi/drvSymbios8xx/Sym8xxSRB.h | 67 + .../Drivers/scsi/drvSymbios8xx/Sym8xxScript.h | 474 + .../scsi/drvSymbios8xx/Sym8xxScript.lis | 993 ++ .../scsi/drvSymbios8xx/Sym8xxScript.ss | 882 ++ iokit/Drivers/scsi/drvSymbios8xx/nasmpb | Bin 0 -> 344008 bytes .../GenericInterruptController.cpp | 200 + .../GenericInterruptController.h | 78 + iokit/Families/IOADBBus/IOADBBus.cpp | 37 + iokit/Families/IOADBBus/IOADBBusPriv.h | 153 + iokit/Families/IOADBBus/IOADBController.cpp | 802 ++ .../IOADBBus/IOADBControllerUserClient.cpp | 125 + .../IOADBBus/IOADBControllerUserClient.h | 68 + .../Families/IOATAHDDrive/IOATAHDCommand.cpp | 612 + iokit/Families/IOATAHDDrive/IOATAHDDrive.cpp | 584 + .../Families/IOATAHDDrive/IOATAHDDriveNub.cpp | 245 + iokit/Families/IOATAHDDrive/IOATAHDPower.cpp | 590 + .../IOATAPICDDrive/IOATAPICDCommand.cpp | 196 + .../IOATAPICDDrive/IOATAPICDDrive.cpp | 513 + .../IOATAPICDDrive/IOATAPICDDriveNub.cpp | 362 + .../IOATAPIDVDDrive/IOATAPIDVDCommand.cpp | 146 + .../IOATAPIDVDDrive/IOATAPIDVDDrive.cpp | 529 + .../IOATAPIDVDDrive/IOATAPIDVDDriveNub.cpp | 397 + .../IOATAPIHDDrive/IOATAPIHDCommand.cpp | 325 + .../IOATAPIHDDrive/IOATAPIHDDrive.cpp | 556 + .../IOATAPIHDDrive/IOATAPIHDDriveNub.cpp | 247 + .../IOATAStandard/ATAQueueHelpers.cpp | 152 + .../IOATAStandard/IOATAStandardCommand.cpp | 343 + .../IOATAStandard/IOATAStandardController.cpp | 956 ++ .../IOATAStandard/IOATAStandardData.cpp | 119 + .../IOATAStandard/IOATAStandardDevice.cpp | 2370 ++++ .../IOATAStandard/IOATAStandardDriver.cpp | 569 + .../IOATAStandard/IOATAStandardDriverDma.cpp | 381 + .../IOATAStandard/IOATAStandardDriverPio.cpp | 501 + .../IOBlockStorage/IOBlockStorageDevice.cpp | 74 + .../IOCDBlockStorageDevice.cpp | 57 + .../Families/IOCDStorage/IOCDAudioControl.cpp | 116 + .../IOCDAudioControlUserClient.cpp | 324 + .../IOCDStorage/IOCDBlockStorageDriver.cpp | 607 + iokit/Families/IOCDStorage/IOCDMedia.cpp | 320 + .../IOCDStorage/IOCDPartitionScheme.cpp | 800 ++ .../IODVDBlockStorageDevice.cpp | 72 + .../IODVDStorage/IODVDBlockStorageDriver.cpp | 158 + iokit/Families/IODVDStorage/IODVDMedia.cpp | 168 + iokit/Families/IOGraphics/ADBTest.m | 70 + iokit/Families/IOGraphics/AppleDDCDisplay.cpp | 311 + .../IOGraphics/AppleG3SeriesDisplay.cpp | 453 + iokit/Families/IOGraphics/DDCInfo.m | 80 + iokit/Families/IOGraphics/IOAccelerator.cpp | 29 + .../Families/IOGraphics/IOBootFramebuffer.cpp | 230 + iokit/Families/IOGraphics/IOBootFramebuffer.h | 80 + iokit/Families/IOGraphics/IOCursorBlits.h | 425 + iokit/Families/IOGraphics/IODisplay.cpp | 518 + .../Families/IOGraphics/IODisplayWrangler.cpp | 720 ++ iokit/Families/IOGraphics/IODisplayWrangler.h | 99 + iokit/Families/IOGraphics/IOFramebuffer.cpp | 1847 +++ .../IOGraphics/IOFramebufferReallyPrivate.h | 39 + .../IOGraphics/IOFramebufferUserClient.cpp | 801 ++ .../IOGraphics/IOFramebufferUserClient.h | 173 + .../Families/IOGraphics/IOGraphicsDevice.cpp | 39 + .../IOHIDDescriptorParser/HIDCheckReport.c | 113 + .../HIDCountDescriptorItems.c | 238 + .../IOHIDDescriptorParser/HIDGetButtonCaps.c | 493 + .../IOHIDDescriptorParser/HIDGetButtons.c | 165 + .../HIDGetButtonsOnPage.c | 213 + .../IOHIDDescriptorParser/HIDGetCaps.c | 257 + .../HIDGetCollectionNodes.c | 121 + .../IOHIDDescriptorParser/HIDGetData.c | 133 + .../HIDGetNextButtonInfo.c | 151 + .../HIDGetNextUsageValueInfo.c | 153 + .../HIDGetReportLength.c | 122 + .../IOHIDDescriptorParser/HIDGetUsageValue.c | 270 + .../HIDGetUsageValueArray.c | 178 + .../IOHIDDescriptorParser/HIDGetValueCaps.c | 543 + .../IOHIDDescriptorParser/HIDHasUsage.c | 189 + .../IOHIDDescriptorParser/HIDInitReport.c | 224 + .../HIDIsButtonOrValue.c | 167 + .../IOHIDDescriptorParser/HIDLib.h | 132 + .../HIDMaxUsageListLength.c | 111 + .../IOHIDDescriptorParser/HIDNextItem.c | 169 + .../HIDOpenCloseDescriptor.c | 195 + .../HIDParseDescriptor.c | 173 + .../HIDPostProcessRIValue.c | 112 + .../IOHIDDescriptorParser/HIDPriv.h | 390 + .../HIDProcessCollection.c | 141 + .../HIDProcessGlobalItem.c | 195 + .../HIDProcessLocalItem.c | 295 + .../HIDProcessMainItem.c | 95 + .../HIDProcessReportItem.c | 213 + .../IOHIDDescriptorParser/HIDPutData.c | 166 + .../HIDScaleUsageValue.c | 198 + .../IOHIDDescriptorParser/HIDSetButtons.c | 244 + .../HIDSetScaledUsageValue.c | 155 + .../IOHIDDescriptorParser/HIDSetUsageValue.c | 156 + .../HIDSetUsageValueArray.c | 175 + .../HIDUsageAndPageFromIndex.c | 140 + .../IOHIDDescriptorParser/HIDUsageInRange.c | 97 + .../HIDUsageListDifference.c | 122 + .../IOHIDDescriptorParser/MacTypes.h | 31 + .../IOHIDDescriptorParser/PoolAlloc.c | 41 + iokit/Families/IOHIDSystem/IOHIDIO.cpp | 55 + iokit/Families/IOHIDSystem/IOHIDSystem.cpp | 2643 ++++ .../Families/IOHIDSystem/IOHIDUserClient.cpp | 246 + iokit/Families/IOHIDSystem/IOHIDUserClient.h | 102 + iokit/Families/IOHIDSystem/IOHIDevice.cpp | 82 + iokit/Families/IOHIDSystem/IOHIKeyboard.cpp | 579 + .../IOHIDSystem/IOHIKeyboardMapper.cpp | 768 ++ iokit/Families/IOHIDSystem/IOHIPointing.cpp | 790 ++ iokit/Families/IOHIDSystem/IOHITablet.cpp | 107 + .../IOHIDSystem/IOHITabletPointer.cpp | 69 + iokit/Families/IONDRVSupport/IONDRV.cpp | 322 + iokit/Families/IONDRVSupport/IONDRV.h | 190 + .../IONDRVSupport/IONDRVFramebuffer.cpp | 1965 +++ .../IONDRVSupport/IONDRVLibraries.cpp | 1776 +++ iokit/Families/IONDRVSupport/IOPEFInternals.c | 2108 ++++ iokit/Families/IONDRVSupport/IOPEFInternals.h | 931 ++ iokit/Families/IONDRVSupport/IOPEFLibraries.h | 62 + iokit/Families/IONDRVSupport/IOPEFLoader.c | 600 + iokit/Families/IONDRVSupport/IOPEFLoader.h | 150 + iokit/Families/IONVRAM/IONVRAMController.cpp | 51 + .../IONetworking/IOEthernetController.cpp | 416 + .../IONetworking/IOEthernetInterface.cpp | 922 ++ .../IONetworking/IOKernelDebugger.cpp | 688 ++ .../IONetworking/IOMbufMemoryCursor.cpp | 576 + iokit/Families/IONetworking/IOMbufQueue.h | 180 + .../IONetworking/IONetworkController.cpp | 2159 ++++ iokit/Families/IONetworking/IONetworkData.cpp | 597 + .../IONetworking/IONetworkInterface.cpp | 1531 +++ .../Families/IONetworking/IONetworkMedium.cpp | 383 + .../Families/IONetworking/IONetworkStack.cpp | 1124 ++ .../IONetworking/IONetworkUserClient.cpp | 274 + iokit/Families/IONetworking/IOOutputQueue.cpp | 899 ++ iokit/Families/IONetworking/IOPacketQueue.cpp | 267 + iokit/Families/IOPCIBus/IOPCIBridge.cpp | 1187 ++ iokit/Families/IOPCIBus/IOPCIDevice.cpp | 473 + iokit/Families/IOPCIBus/IOPCIDeviceI386.cpp | 104 + iokit/Families/IOPCIBus/IOPCIDevicePPC.cpp | 130 + .../Families/IOSCSICDDrive/IOSCSICDDrive.cpp | 746 ++ .../IOSCSICDDrive/IOSCSICDDriveNub.cpp | 250 + .../IOSCSIDVDDrive/IOSCSIDVDDrive.cpp | 493 + .../IOSCSIDVDDrive/IOSCSIDVDDriveNub.cpp | 264 + iokit/Families/IOSCSIHDDrive/IOBasicSCSI.cpp | 1331 ++ .../Families/IOSCSIHDDrive/IOSCSIHDDrive.cpp | 899 ++ .../IOSCSIHDDrive/IOSCSIHDDriveNub.cpp | 184 + .../IOSCSIParallel/IOSCSIParallelCommand.cpp | 429 + .../IOSCSIParallelController.cpp | 1079 ++ .../IOSCSIParallel/IOSCSIParallelDevice.cpp | 2156 ++++ .../Families/IOSCSIParallel/queueHelpers.cpp | 152 + .../IOStorage/IOApplePartitionScheme.cpp | 747 ++ .../IOStorage/IOBlockStorageDriver.cpp | 2338 ++++ .../IOStorage/IOFDiskPartitionScheme.cpp | 566 + iokit/Families/IOStorage/IOMedia.cpp | 792 ++ iokit/Families/IOStorage/IOMediaBSDClient.cpp | 2125 ++++ .../IOStorage/IONeXTPartitionScheme.cpp | 627 + .../Families/IOStorage/IOPartitionScheme.cpp | 418 + iokit/Families/IOStorage/IOStorage.cpp | 194 + .../IOSystemManagement/IOWatchDogTimer.cpp | 112 + iokit/IOKit/IOBSD.h | 39 + iokit/IOKit/IOBufferMemoryDescriptor.h | 224 + iokit/IOKit/IOCPU.h | 149 + iokit/IOKit/IOCatalogue.h | 268 + iokit/IOKit/IOCommand.h | 80 + iokit/IOKit/IOCommandGate.h | 209 + iokit/IOKit/IOCommandPool.h | 231 + iokit/IOKit/IOCommandQueue.h | 91 + iokit/IOKit/IOConditionLock.h | 65 + iokit/IOKit/IODataQueue.h | 135 + iokit/IOKit/IODataQueueShared.h | 67 + iokit/IOKit/IODeviceMemory.h | 91 + iokit/IOKit/IODeviceTreeSupport.h | 118 + iokit/IOKit/IOEventSource.h | 239 + iokit/IOKit/IOFilterInterruptEventSource.h | 148 + iokit/IOKit/IOInterruptController.h | 144 + iokit/IOKit/IOInterruptEventSource.h | 194 + iokit/IOKit/IOInterrupts.h | 52 + iokit/IOKit/IOKitDebug.h | 93 + iokit/IOKit/IOKitKeys.h | 97 + iokit/IOKit/IOKitServer.h | 116 + iokit/IOKit/IOLib.h | 297 + iokit/IOKit/IOLocks.h | 336 + iokit/IOKit/IOMemoryCursor.h | 451 + iokit/IOKit/IOMemoryDescriptor.h | 690 ++ iokit/IOKit/IOMessage.h | 55 + iokit/IOKit/IOMultiMemoryDescriptor.h | 164 + iokit/IOKit/IONVRAM.h | 131 + iokit/IOKit/IONotifier.h | 66 + iokit/IOKit/IOPlatformExpert.h | 274 + iokit/IOKit/IORangeAllocator.h | 156 + iokit/IOKit/IORegistryEntry.h | 814 ++ iokit/IOKit/IOReturn.h | 119 + iokit/IOKit/IOService.h | 1758 +++ iokit/IOKit/IOServicePM.h | 299 + iokit/IOKit/IOSharedLock.h | 80 + iokit/IOKit/IOSyncer.h | 53 + iokit/IOKit/IOTimeStamp.h | 149 + iokit/IOKit/IOTimerEventSource.h | 217 + iokit/IOKit/IOTypes.h | 229 + iokit/IOKit/IOUserClient.h | 195 + iokit/IOKit/IOWorkLoop.h | 201 + iokit/IOKit/Makefile | 59 + iokit/IOKit/OSMessageNotification.h | 107 + iokit/IOKit/adb/IOADBBus.h | 145 + iokit/IOKit/adb/IOADBController.h | 113 + iokit/IOKit/adb/IOADBDevice.h | 55 + iokit/IOKit/adb/IOADBLib.h | 43 + iokit/IOKit/adb/Makefile | 36 + iokit/IOKit/adb/adb.h | 25 + iokit/IOKit/assert.h | 43 + iokit/IOKit/ata/IOATACommand_Reference.h | 500 + iokit/IOKit/ata/IOATAController_Reference.h | 410 + iokit/IOKit/ata/IOATADeviceInterface.h | 38 + iokit/IOKit/ata/IOATADevice_Reference.h | 408 + iokit/IOKit/ata/IOATADriver_Reference.h | 223 + iokit/IOKit/ata/IOATAStandardInterface.h | 39 + iokit/IOKit/ata/Makefile | 37 + iokit/IOKit/ata/ata-device/ATACommand.h | 157 + iokit/IOKit/ata/ata-device/ATADevice.h | 96 + iokit/IOKit/ata/ata-device/ATAPublic.h | 441 + iokit/IOKit/ata/ata-device/IOATACommand.h | 137 + iokit/IOKit/ata/ata-device/IOATADevice.h | 107 + iokit/IOKit/ata/ata-device/Makefile | 36 + .../ata/ata-standard/ATAStandardController.h | 69 + .../ata/ata-standard/ATAStandardPrivate.h | 78 + .../ata/ata-standard/ATAStandardTarget.h | 67 + .../ata/ata-standard/IOATAStandardCommand.h | 211 + .../ata-standard/IOATAStandardController.h | 200 + .../ata/ata-standard/IOATAStandardDevice.h | 269 + .../ata/ata-standard/IOATAStandardDriver.h | 124 + iokit/IOKit/ata/ata-standard/Makefile | 36 + iokit/IOKit/cdb/CDBCommand.h | 63 + iokit/IOKit/cdb/CDBPublic.h | 33 + iokit/IOKit/cdb/IOCDBCommand.h | 101 + iokit/IOKit/cdb/IOCDBDevice.h | 69 + iokit/IOKit/cdb/IOCDBInterface.h | 48 + iokit/IOKit/cdb/Makefile | 36 + iokit/IOKit/graphics/IOAccelClientConnect.h | 44 + iokit/IOKit/graphics/IOAccelSurfaceConnect.h | 114 + iokit/IOKit/graphics/IOAccelTypes.h | 68 + iokit/IOKit/graphics/IOAccelerator.h | 35 + iokit/IOKit/graphics/IODisplay.h | 170 + iokit/IOKit/graphics/IOFramebuffer.h | 455 + iokit/IOKit/graphics/IOFramebufferPrivate.h | 151 + iokit/IOKit/graphics/IOFramebufferShared.h | 145 + iokit/IOKit/graphics/IOGraphicsDevice.h | 53 + iokit/IOKit/graphics/IOGraphicsEngine.h | 51 + iokit/IOKit/graphics/IOGraphicsTypes.h | 397 + iokit/IOKit/graphics/Makefile | 39 + iokit/IOKit/hidsystem/IOHIDDescriptorParser.h | 932 ++ iokit/IOKit/hidsystem/IOHIDParameter.h | 209 + iokit/IOKit/hidsystem/IOHIDShared.h | 282 + iokit/IOKit/hidsystem/IOHIDSystem.h | 498 + iokit/IOKit/hidsystem/IOHIDTypes.h | 170 + iokit/IOKit/hidsystem/IOHIDUsageTables.h | 1017 ++ iokit/IOKit/hidsystem/IOHIDevice.h | 63 + iokit/IOKit/hidsystem/IOHIKeyboard.h | 187 + iokit/IOKit/hidsystem/IOHIKeyboardMapper.h | 123 + iokit/IOKit/hidsystem/IOHIPointing.h | 137 + iokit/IOKit/hidsystem/IOHITablet.h | 78 + iokit/IOKit/hidsystem/IOHITabletPointer.h | 55 + iokit/IOKit/hidsystem/IOLLEvent.h | 351 + iokit/IOKit/hidsystem/Makefile | 35 + iokit/IOKit/hidsystem/ev_keymap.h | 143 + iokit/IOKit/hidsystem/ev_private.h | 141 + iokit/IOKit/i386/IOSharedLockImp.h | 128 + iokit/IOKit/i386/Makefile | 35 + iokit/IOKit/machine/IOSharedLockImp.h | 30 + iokit/IOKit/machine/Makefile | 35 + iokit/IOKit/ndrvsupport/IOMacOSTypes.h | 394 + iokit/IOKit/ndrvsupport/IOMacOSVideo.h | 1236 ++ iokit/IOKit/ndrvsupport/IONDRVFramebuffer.h | 304 + iokit/IOKit/ndrvsupport/IONDRVSupport.h | 91 + iokit/IOKit/ndrvsupport/Makefile | 34 + iokit/IOKit/network/IOBasicOutputQueue.h | 301 + iokit/IOKit/network/IOEthernetController.h | 447 + iokit/IOKit/network/IOEthernetInterface.h | 296 + iokit/IOKit/network/IOEthernetStats.h | 163 + iokit/IOKit/network/IOGatedOutputQueue.h | 141 + iokit/IOKit/network/IOKernelDebugger.h | 304 + iokit/IOKit/network/IOMbufMemoryCursor.h | 438 + iokit/IOKit/network/IONetworkController.h | 1375 +++ iokit/IOKit/network/IONetworkData.h | 462 + iokit/IOKit/network/IONetworkInterface.h | 933 ++ iokit/IOKit/network/IONetworkLib.h | 118 + iokit/IOKit/network/IONetworkMedium.h | 411 + iokit/IOKit/network/IONetworkStack.h | 144 + iokit/IOKit/network/IONetworkStats.h | 94 + iokit/IOKit/network/IONetworkUserClient.h | 208 + iokit/IOKit/network/IOOutputQueue.h | 257 + iokit/IOKit/network/IOPacketQueue.h | 271 + iokit/IOKit/network/Makefile | 44 + iokit/IOKit/nvram/IONVRAMController.h | 38 + iokit/IOKit/nvram/Makefile | 34 + iokit/IOKit/pci/IOAGPDevice.h | 191 + iokit/IOKit/pci/IOPCIBridge.h | 271 + iokit/IOKit/pci/IOPCIDevice.h | 489 + iokit/IOKit/pci/Makefile | 34 + iokit/IOKit/platform/AppleMacIO.h | 73 + iokit/IOKit/platform/AppleMacIODevice.h | 54 + iokit/IOKit/platform/AppleNMI.h | 70 + iokit/IOKit/platform/ApplePlatformExpert.h | 85 + iokit/IOKit/platform/Makefile | 34 + iokit/IOKit/power/IOPwrController.h | 35 + iokit/IOKit/power/Makefile | 34 + iokit/IOKit/ppc/IODBDMA.h | 362 + iokit/IOKit/ppc/IOSharedLockImp.h | 138 + iokit/IOKit/ppc/Makefile | 34 + iokit/IOKit/ps2/ApplePS2Device.h | 300 + iokit/IOKit/ps2/ApplePS2KeyboardDevice.h | 54 + iokit/IOKit/ps2/ApplePS2MouseDevice.h | 54 + iokit/IOKit/ps2/Makefile | 34 + iokit/IOKit/pwr_mgt/IOPM.h | 269 + iokit/IOKit/pwr_mgt/IOPMLibDefs.h | 30 + iokit/IOKit/pwr_mgt/IOPMPowerSource.h | 87 + iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h | 52 + iokit/IOKit/pwr_mgt/IOPMchangeNoteList.h | 134 + iokit/IOKit/pwr_mgt/IOPMinformee.h | 44 + iokit/IOKit/pwr_mgt/IOPMinformeeList.h | 54 + iokit/IOKit/pwr_mgt/IOPMlog.h | 68 + iokit/IOKit/pwr_mgt/IOPMpmChild.h | 35 + iokit/IOKit/pwr_mgt/IOPMpowerState.h | 41 + iokit/IOKit/pwr_mgt/IOPowerConnection.h | 82 + iokit/IOKit/pwr_mgt/Makefile | 39 + iokit/IOKit/pwr_mgt/RootDomain.h | 91 + iokit/IOKit/rtc/IORTCController.h | 40 + iokit/IOKit/rtc/Makefile | 34 + iokit/IOKit/scsi/IOSCSICommand_Reference.h | 538 + iokit/IOKit/scsi/IOSCSIController_Reference.h | 475 + iokit/IOKit/scsi/IOSCSIDeviceInterface.h | 38 + iokit/IOKit/scsi/IOSCSIDevice_Reference.h | 401 + iokit/IOKit/scsi/IOSCSIParallelInterface.h | 38 + iokit/IOKit/scsi/Makefile | 36 + iokit/IOKit/scsi/scsi-device/IOSCSICommand.h | 132 + iokit/IOKit/scsi/scsi-device/IOSCSIDevice.h | 100 + iokit/IOKit/scsi/scsi-device/Makefile | 36 + iokit/IOKit/scsi/scsi-device/SCSICommand.h | 89 + iokit/IOKit/scsi/scsi-device/SCSIDevice.h | 120 + iokit/IOKit/scsi/scsi-device/SCSIPublic.h | 291 + .../scsi-parallel/IOSCSIParallelCommand.h | 206 + .../scsi-parallel/IOSCSIParallelController.h | 180 + .../scsi/scsi-parallel/IOSCSIParallelDevice.h | 246 + iokit/IOKit/scsi/scsi-parallel/Makefile | 36 + .../scsi-parallel/SCSIParallelController.h | 95 + .../scsi/scsi-parallel/SCSIParallelTarget.h | 106 + iokit/IOKit/storage/IOApplePartitionScheme.h | 245 + iokit/IOKit/storage/IOBlockStorageDevice.h | 411 + iokit/IOKit/storage/IOBlockStorageDriver.h | 1320 ++ iokit/IOKit/storage/IOCDAudioControl.h | 184 + .../storage/IOCDAudioControlUserClient.h | 159 + iokit/IOKit/storage/IOCDBlockStorageDevice.h | 105 + iokit/IOKit/storage/IOCDBlockStorageDriver.h | 189 + iokit/IOKit/storage/IOCDMedia.h | 283 + iokit/IOKit/storage/IOCDPartitionScheme.h | 196 + iokit/IOKit/storage/IOCDTypes.h | 226 + iokit/IOKit/storage/IODVDBlockStorageDevice.h | 99 + iokit/IOKit/storage/IODVDBlockStorageDriver.h | 193 + iokit/IOKit/storage/IODVDMedia.h | 162 + iokit/IOKit/storage/IODVDTypes.h | 58 + iokit/IOKit/storage/IOFDiskPartitionScheme.h | 239 + iokit/IOKit/storage/IOMedia.h | 527 + iokit/IOKit/storage/IOMediaBSDClient.h | 136 + iokit/IOKit/storage/IONeXTPartitionScheme.h | 235 + iokit/IOKit/storage/IOPartitionScheme.h | 276 + iokit/IOKit/storage/IOStorage.h | 390 + iokit/IOKit/storage/Makefile | 49 + iokit/IOKit/storage/ata/IOATAHDDrive.h | 470 + iokit/IOKit/storage/ata/IOATAHDDriveNub.h | 89 + iokit/IOKit/storage/ata/IOATAPICDDrive.h | 194 + iokit/IOKit/storage/ata/IOATAPICDDriveNub.h | 119 + iokit/IOKit/storage/ata/IOATAPIDVDDrive.h | 265 + iokit/IOKit/storage/ata/IOATAPIDVDDriveNub.h | 135 + iokit/IOKit/storage/ata/IOATAPIHDDrive.h | 232 + iokit/IOKit/storage/ata/IOATAPIHDDriveNub.h | 87 + iokit/IOKit/storage/ata/Makefile | 34 + iokit/IOKit/storage/scsi/IOBasicSCSI.h | 1207 ++ iokit/IOKit/storage/scsi/IOSCSICDDrive.h | 410 + iokit/IOKit/storage/scsi/IOSCSICDDriveNub.h | 106 + iokit/IOKit/storage/scsi/IOSCSIDVDDrive.h | 239 + iokit/IOKit/storage/scsi/IOSCSIDVDDriveNub.h | 111 + iokit/IOKit/storage/scsi/IOSCSIHDDrive.h | 784 ++ iokit/IOKit/storage/scsi/IOSCSIHDDriveNub.h | 76 + iokit/IOKit/storage/scsi/Makefile | 34 + iokit/IOKit/system.h | 91 + .../IOKit/system_management/IOWatchDogTimer.h | 49 + iokit/IOKit/system_management/Makefile | 34 + iokit/Kernel/IOBufferMemoryDescriptor.cpp | 425 + iokit/Kernel/IOCPU.cpp | 446 + iokit/Kernel/IOCatalogue.cpp | 877 ++ iokit/Kernel/IOCommand.cpp | 51 + iokit/Kernel/IOCommandGate.cpp | 167 + iokit/Kernel/IOCommandPool.cpp | 202 + iokit/Kernel/IOCommandQueue.cpp | 268 + iokit/Kernel/IOConditionLock.cpp | 199 + iokit/Kernel/IODataQueue.cpp | 213 + iokit/Kernel/IODeviceMemory.cpp | 80 + iokit/Kernel/IODeviceTreeSupport.cpp | 1056 ++ iokit/Kernel/IOEventSource.cpp | 109 + iokit/Kernel/IOFilterInterruptEventSource.cpp | 187 + iokit/Kernel/IOInterruptController.cpp | 653 + iokit/Kernel/IOInterruptEventSource.cpp | 246 + iokit/Kernel/IOKitDebug.cpp | 155 + iokit/Kernel/IOLib.c | 607 + iokit/Kernel/IOLocks.cpp | 223 + iokit/Kernel/IOMemoryCursor.cpp | 328 + iokit/Kernel/IOMemoryDescriptor.cpp | 1826 +++ iokit/Kernel/IOMultiMemoryDescriptor.cpp | 368 + iokit/Kernel/IONVRAM.cpp | 1349 ++ iokit/Kernel/IOPMPowerSource.cpp | 166 + iokit/Kernel/IOPMPowerSourceList.cpp | 152 + iokit/Kernel/IOPMchangeNoteList.cpp | 219 + iokit/Kernel/IOPMinformee.cpp | 51 + iokit/Kernel/IOPMinformeeList.cpp | 168 + iokit/Kernel/IOPMpmChild.cpp | 36 + iokit/Kernel/IOPlatformExpert.cpp | 1059 ++ iokit/Kernel/IOPowerConnection.cpp | 93 + iokit/Kernel/IORangeAllocator.cpp | 369 + iokit/Kernel/IORegistryEntry.cpp | 1999 +++ iokit/Kernel/IOService.cpp | 3865 ++++++ iokit/Kernel/IOServicePM.cpp | 3642 ++++++ iokit/Kernel/IOServicePrivate.h | 190 + iokit/Kernel/IOStartIOKit.cpp | 180 + iokit/Kernel/IOStringFuncs.c | 293 + iokit/Kernel/IOSyncer.cpp | 116 + iokit/Kernel/IOTimerEventSource.cpp | 234 + iokit/Kernel/IOUserClient.cpp | 2616 ++++ iokit/Kernel/IOWorkLoop.cpp | 430 + iokit/Kernel/PMmisc.cpp | 63 + iokit/Kernel/i386/IOAsmSupport.s | 39 + iokit/Kernel/i386/IOSharedLock.s | 23 + iokit/Kernel/ppc/IOAsmSupport.s | 114 + iokit/Kernel/ppc/IODBDMA.cpp | 155 + iokit/Kernel/ppc/IOSharedLock.s | 23 + iokit/Kernel/printPlist | 80 + iokit/KernelConfigTables.cpp | 740 ++ iokit/Makefile | 30 + iokit/Tests/TestCollections.cpp | 941 ++ iokit/Tests/TestContainers.cpp | 470 + iokit/Tests/TestDevice.cpp | 178 + iokit/Tests/Tests.cpp | 159 + iokit/Tests/Tests.h | 99 + iokit/User/Makefile | 38 + iokit/User/Makefile.user | 41 + iokit/bsddev/IOBSDConsole.cpp | 216 + iokit/bsddev/IOBSDConsole.h | 67 + iokit/bsddev/IOKitBSDInit.cpp | 466 + iokit/bsddev/IOKitBSDInit.h | 32 + iokit/conf/MASTER | 58 + iokit/conf/MASTER.i386 | 15 + iokit/conf/MASTER.ppc | 17 + iokit/conf/Makefile | 63 + iokit/conf/Makefile.i386 | 8 + iokit/conf/Makefile.ppc | 8 + iokit/conf/Makefile.template | 112 + iokit/conf/copyright | 6 + iokit/conf/files | 233 + iokit/conf/files.i386 | 72 + iokit/conf/files.ppc | 107 + iokit/conf/tools/Makefile | 36 + iokit/conf/tools/doconf/Makefile | 49 + iokit/conf/tools/doconf/doconf.csh | 313 + iokit/conf/tools/newvers/Makefile | 49 + iokit/conf/tools/newvers/newvers.csh | 34 + iokit/conf/version.major | 1 + iokit/conf/version.minor | 1 + iokit/conf/version.variant | 0 iokit/include/DeviceTree.h | 205 + iokit/include/Makefile | 30 + .../architecture/i386/kernBootStruct.h | 22 + iokit/include/architecture/i386/pio.h | 246 + iokit/include/assert.h | 24 + iokit/include/bsddev/EventShmemLock.h | 22 + iokit/include/bsddev/Makefile | 41 + iokit/include/bsddev/ev_keymap.h | 23 + iokit/include/bsddev/ev_types.h | 23 + iokit/include/bsddev/event.h | 23 + iokit/include/bsddev/evio.h | 23 + iokit/include/bsddev/evsio.h | 29 + iokit/include/bsddev/i386/EventShmemLock.h | 23 + iokit/include/bsddev/i386/Makefile | 39 + iokit/include/bsddev/i386/event.h | 23 + iokit/include/bsddev/i386/evio.h | 23 + iokit/include/bsddev/i386/evsio.h | 23 + iokit/include/bsddev/machine/EventShmemLock.h | 35 + iokit/include/bsddev/machine/Makefile | 39 + iokit/include/bsddev/machine/event.h | 35 + iokit/include/bsddev/machine/evio.h | 35 + iokit/include/bsddev/machine/evsio.h | 35 + iokit/include/bsddev/ppc/EventShmemLock.h | 23 + iokit/include/bsddev/ppc/Makefile | 39 + iokit/include/bsddev/ppc/event.h | 23 + iokit/include/bsddev/ppc/evio.h | 23 + iokit/include/bsddev/ppc/evsio.h | 23 + iokit/include/drivers/Makefile | 36 + iokit/include/drivers/event_status_driver.h | 151 + iokit/include/mach/mach.h | 21 + iokit/mach-o/mach_header.h | 73 + libkern/Makefile | 25 + libkern/c++/OSArray.cpp | 393 + libkern/c++/OSBoolean.cpp | 111 + libkern/c++/OSCPPDebug.cpp | 46 + libkern/c++/OSCollection.cpp | 47 + libkern/c++/OSCollectionIterator.cpp | 131 + libkern/c++/OSData.cpp | 407 + libkern/c++/OSDictionary.cpp | 570 + libkern/c++/OSIterator.cpp | 36 + libkern/c++/OSMetaClass.cpp | 794 ++ libkern/c++/OSNumber.cpp | 165 + libkern/c++/OSObject.cpp | 157 + libkern/c++/OSOrderedSet.cpp | 342 + libkern/c++/OSRuntime.cpp | 343 + libkern/c++/OSSerialize.cpp | 249 + libkern/c++/OSSet.cpp | 323 + libkern/c++/OSString.cpp | 273 + libkern/c++/OSSymbol.cpp | 521 + libkern/c++/OSUnserialize.cpp | 1614 +++ libkern/c++/OSUnserialize.y | 659 + libkern/c++/OSUnserializeXML.cpp | 2040 +++ libkern/c++/OSUnserializeXML.y | 1082 ++ .../Tests/TestSerialization/CustomInfo.xml | 15 + libkern/c++/Tests/TestSerialization/Makefile | 43 + .../TestSerialization/Makefile.postamble | 100 + .../Tests/TestSerialization/Makefile.preamble | 137 + .../c++/Tests/TestSerialization/PB.project | 17 + .../PBUserInfo/PBUserInfo_root.plist | 1 + .../test1.kmodproj/CustomInfo.xml | 24 + .../TestSerialization/test1.kmodproj/Makefile | 49 + .../test1.kmodproj/Makefile.postamble | 100 + .../test1.kmodproj/Makefile.preamble | 137 + .../test1.kmodproj/PB.project | 25 + .../test1.kmodproj/test1_main.cpp | 101 + .../test1.kmodproj/test1_main.h | 21 + .../test2.kmodproj/CustomInfo.xml | 24 + .../TestSerialization/test2.kmodproj/Makefile | 47 + .../test2.kmodproj/Makefile.postamble | 100 + .../test2.kmodproj/Makefile.preamble | 137 + .../test2.kmodproj/PB.project | 24 + .../test2.kmodproj/test2_main.cpp | 164 + libkern/conf/MASTER | 55 + libkern/conf/MASTER.i386 | 11 + libkern/conf/MASTER.ppc | 17 + libkern/conf/Makefile | 63 + libkern/conf/Makefile.i386 | 8 + libkern/conf/Makefile.ppc | 8 + libkern/conf/Makefile.template | 109 + libkern/conf/copyright | 6 + libkern/conf/files | 29 + libkern/conf/files.i386 | 1 + libkern/conf/files.ppc | 2 + libkern/conf/tools/Makefile | 36 + libkern/conf/tools/doconf/Makefile | 49 + libkern/conf/tools/doconf/doconf.csh | 313 + libkern/conf/tools/newvers/Makefile | 49 + libkern/conf/tools/newvers/newvers.csh | 34 + libkern/conf/version.major | 1 + libkern/conf/version.minor | 1 + libkern/conf/version.variant | 0 libkern/gen/OSAtomicOperations.c | 336 + libkern/i386/OSAtomic.c | 47 + libkern/i386/OSAtomic.s | 48 + libkern/kmod/Makefile | 37 + libkern/kmod/Makefile.kmod | 63 + libkern/kmod/README | 28 + libkern/kmod/c_start.c | 50 + libkern/kmod/c_stop.c | 50 + libkern/kmod/cplus_start.c | 58 + libkern/kmod/cplus_stop.c | 58 + libkern/libkern/Makefile | 43 + libkern/libkern/OSAtomic.h | 259 + libkern/libkern/OSBase.h | 45 + libkern/libkern/OSByteOrder.h | 778 ++ libkern/libkern/OSReturn.h | 75 + libkern/libkern/OSTypes.h | 86 + libkern/libkern/c++/Makefile | 53 + libkern/libkern/c++/OSArray.h | 262 + libkern/libkern/c++/OSBoolean.h | 127 + libkern/libkern/c++/OSCPPDebug.h | 41 + libkern/libkern/c++/OSCollection.h | 143 + libkern/libkern/c++/OSCollectionIterator.h | 89 + libkern/libkern/c++/OSContainers.h | 43 + libkern/libkern/c++/OSData.h | 262 + libkern/libkern/c++/OSDictionary.h | 322 + libkern/libkern/c++/OSIterator.h | 69 + libkern/libkern/c++/OSLib.h | 53 + libkern/libkern/c++/OSMetaClass.h | 559 + libkern/libkern/c++/OSNumber.h | 175 + libkern/libkern/c++/OSObject.h | 164 + libkern/libkern/c++/OSOrderedSet.h | 260 + libkern/libkern/c++/OSSerialize.h | 115 + libkern/libkern/c++/OSSet.h | 265 + libkern/libkern/c++/OSString.h | 183 + libkern/libkern/c++/OSSymbol.h | 125 + libkern/libkern/c++/OSUnserialize.h | 41 + libkern/libkern/i386/Makefile | 35 + libkern/libkern/i386/OSByteOrder.h | 209 + libkern/libkern/machine/Makefile | 35 + libkern/libkern/machine/OSByteOrder.h | 223 + libkern/libkern/ppc/Makefile | 35 + libkern/libkern/ppc/OSByteOrder.h | 207 + libkern/mach-o/loader.h | 722 ++ libkern/mach-o/mach_header.h | 73 + libkern/ppc/OSAtomic.s | 107 + libsa/Makefile | 30 + libsa/bootstrap.cpp | 103 + libsa/bsearch.c | 101 + libsa/catalogue.cpp | 1438 +++ libsa/conf/MASTER | 55 + libsa/conf/MASTER.i386 | 11 + libsa/conf/MASTER.ppc | 17 + libsa/conf/Makefile | 63 + libsa/conf/Makefile.i386 | 8 + libsa/conf/Makefile.ppc | 8 + libsa/conf/Makefile.template | 112 + libsa/conf/copyright | 6 + libsa/conf/files | 17 + libsa/conf/files.i386 | 1 + libsa/conf/files.ppc | 2 + libsa/conf/tools/Makefile | 36 + libsa/conf/tools/doconf/Makefile | 49 + libsa/conf/tools/doconf/doconf.csh | 313 + libsa/conf/tools/newvers/Makefile | 49 + libsa/conf/tools/newvers/newvers.csh | 34 + libsa/conf/version.major | 1 + libsa/conf/version.minor | 1 + libsa/conf/version.variant | 0 libsa/i386/setjmp.s | 94 + libsa/kmod.cpp | 1240 ++ libsa/libsa/Makefile | 30 + libsa/libsa/catalogue.h | 5 + libsa/libsa/i386/Makefile | 32 + libsa/libsa/i386/setjmp.h | 61 + libsa/libsa/kmod.h | 11 + libsa/libsa/mach/Makefile | 32 + libsa/libsa/mach/mach.h | 12 + libsa/libsa/malloc.h | 42 + libsa/libsa/mkext.h | 55 + libsa/libsa/ppc/Makefile | 32 + libsa/libsa/ppc/setjmp.h | 49 + libsa/libsa/setjmp.h | 42 + libsa/libsa/stdlib.h | 45 + libsa/libsa/unistd.h | 8 + libsa/libsa/vers_rsrc.h | 29 + libsa/mach.c | 41 + libsa/mach_loader.h | 70 + libsa/malloc.c | 593 + libsa/malloc_debug_stuff | 294 + libsa/malloc_unused | 76 + libsa/misc.c | 47 + libsa/mkext.c | 368 + libsa/ppc/setjmp.s | 191 + libsa/printPlist | 80 + libsa/sort.c | 205 + libsa/vers_rsrc.c | 429 + makedefs/MakeInc.cmd | 24 + makedefs/MakeInc.def | 262 + makedefs/MakeInc.dir | 455 + makedefs/MakeInc.rule | 490 + osfmk/.gdbinit | 1152 ++ osfmk/Makefile | 62 + .../UserNotification/KUNCUserNotifications.c | 368 + .../UserNotification/KUNCUserNotifications.h | 209 + osfmk/UserNotification/Makefile | 91 + osfmk/UserNotification/UNDReply.defs | 39 + osfmk/UserNotification/UNDRequest.defs | 113 + osfmk/UserNotification/UNDTypes.defs | 60 + osfmk/UserNotification/UNDTypes.h | 66 + osfmk/conf/MASTER | 198 + osfmk/conf/MASTER.i386 | 103 + osfmk/conf/MASTER.ppc | 65 + osfmk/conf/Makefile | 68 + osfmk/conf/Makefile.i386 | 10 + osfmk/conf/Makefile.ppc | 23 + osfmk/conf/Makefile.template | 127 + osfmk/conf/copyright | 3 + osfmk/conf/copyright.cmu | 25 + osfmk/conf/copyright.osf | 49 + osfmk/conf/files | 260 + osfmk/conf/files.i386 | 96 + osfmk/conf/files.ppc | 101 + osfmk/conf/kernelversion.major | 1 + osfmk/conf/kernelversion.minor | 1 + osfmk/conf/kernelversion.variant | 1 + osfmk/conf/tools/Makefile | 38 + osfmk/conf/tools/doconf/Makefile | 49 + osfmk/conf/tools/doconf/doconf.csh | 313 + osfmk/conf/tools/kernel_newvers/Makefile | 49 + .../tools/kernel_newvers/kernel_newvers.csh | 39 + osfmk/conf/tools/newvers/Makefile | 49 + osfmk/conf/tools/newvers/newvers.csh | 33 + osfmk/conf/version.major | 1 + osfmk/conf/version.minor | 1 + osfmk/conf/version.variant | 1 + osfmk/ddb/Makefile | 29 + osfmk/ddb/db_access.c | 168 + osfmk/ddb/db_access.h | 112 + osfmk/ddb/db_aout.c | 955 ++ osfmk/ddb/db_aout.h | 89 + osfmk/ddb/db_break.c | 997 ++ osfmk/ddb/db_break.h | 238 + osfmk/ddb/db_coff.h | 106 + osfmk/ddb/db_command.c | 713 ++ osfmk/ddb/db_command.h | 181 + osfmk/ddb/db_cond.c | 267 + osfmk/ddb/db_cond.h | 66 + osfmk/ddb/db_examine.c | 935 ++ osfmk/ddb/db_examine.h | 118 + osfmk/ddb/db_expr.c | 573 + osfmk/ddb/db_expr.h | 66 + osfmk/ddb/db_ext_symtab.c | 148 + osfmk/ddb/db_input.c | 822 ++ osfmk/ddb/db_input.h | 61 + osfmk/ddb/db_lex.c | 571 + osfmk/ddb/db_lex.h | 216 + osfmk/ddb/db_macro.c | 298 + osfmk/ddb/db_macro.h | 71 + osfmk/ddb/db_output.c | 348 + osfmk/ddb/db_output.h | 146 + osfmk/ddb/db_print.c | 1061 ++ osfmk/ddb/db_print.h | 207 + osfmk/ddb/db_run.c | 533 + osfmk/ddb/db_run.h | 110 + osfmk/ddb/db_sym.c | 1806 +++ osfmk/ddb/db_sym.h | 488 + osfmk/ddb/db_task_thread.c | 418 + osfmk/ddb/db_task_thread.h | 178 + osfmk/ddb/db_trap.c | 142 + osfmk/ddb/db_trap.h | 76 + osfmk/ddb/db_variables.c | 819 ++ osfmk/ddb/db_variables.h | 250 + osfmk/ddb/db_watch.c | 445 + osfmk/ddb/db_watch.h | 162 + osfmk/ddb/db_write_cmd.c | 209 + osfmk/ddb/db_write_cmd.h | 61 + osfmk/ddb/makedis.c | 2383 ++++ osfmk/ddb/nlist.h | 135 + osfmk/ddb/orig/db_print.c | 1380 +++ osfmk/ddb/stab.h | 147 + osfmk/ddb/tr.c | 393 + osfmk/ddb/tr.h | 197 + osfmk/default_pager/Makefile | 138 + osfmk/default_pager/Makefile.template | 43 + osfmk/default_pager/default_pager.c | 976 ++ osfmk/default_pager/default_pager_alerts.defs | 43 + osfmk/default_pager/default_pager_internal.h | 773 ++ osfmk/default_pager/default_pager_object.defs | 137 + osfmk/default_pager/default_pager_types.defs | 113 + osfmk/default_pager/default_pager_types.h | 96 + osfmk/default_pager/diag.h | 102 + osfmk/default_pager/dp_backing_store.c | 3658 ++++++ osfmk/default_pager/dp_memory_object.c | 1447 +++ osfmk/device/Makefile | 57 + osfmk/device/device.defs | 444 + osfmk/device/device_init.c | 87 + osfmk/device/device_port.h | 67 + osfmk/device/device_types.defs | 99 + osfmk/device/device_types.h | 109 + osfmk/device/iokit_rpc.c | 402 + osfmk/device/subrs.c | 353 + osfmk/i386/AT386/asm_startup.h | 269 + osfmk/i386/AT386/autoconf.c | 24 + osfmk/i386/AT386/bbclock.c | 305 + osfmk/i386/AT386/bbclock_entries.h | 31 + osfmk/i386/AT386/conf.c | 82 + osfmk/i386/AT386/config.h | 43 + osfmk/i386/AT386/cram.h | 157 + osfmk/i386/AT386/himem.c | 300 + osfmk/i386/AT386/himem.h | 130 + osfmk/i386/AT386/iso_scan_font.h | 302 + osfmk/i386/AT386/kernBootStruct.h | 183 + osfmk/i386/AT386/machdep.mk | 57 + osfmk/i386/AT386/misc_protos.h | 71 + osfmk/i386/AT386/model_dep.c | 667 + osfmk/i386/AT386/mp/boot.h | 104 + osfmk/i386/AT386/mp/mp.c | 183 + osfmk/i386/AT386/mp/mp.h | 227 + osfmk/i386/AT386/mp/mp_events.h | 40 + osfmk/i386/AT386/mp/mp_v1_1.c | 204 + osfmk/i386/AT386/mp/mp_v1_1.h | 146 + osfmk/i386/AT386/mp/slave_boot.s | 212 + osfmk/i386/AT386/physmem_entries.h | 83 + osfmk/i386/AT386/rtc.h | 226 + osfmk/i386/AT386/video_console.c | 1940 +++ osfmk/i386/AT386/video_console.h | 56 + osfmk/i386/Makefile | 31 + osfmk/i386/_setjmp.s | 90 + osfmk/i386/apic.h | 125 + osfmk/i386/arch_types.h | 40 + osfmk/i386/asm.h | 289 + osfmk/i386/ast.h | 68 + osfmk/i386/ast_check.c | 55 + osfmk/i386/ast_types.h | 63 + osfmk/i386/bcopy.s | 111 + osfmk/i386/bsd_i386.c | 471 + osfmk/i386/bzero.s | 104 + osfmk/i386/cpu.c | 70 + osfmk/i386/cpu_data.h | 183 + osfmk/i386/cpu_number.h | 121 + osfmk/i386/cpuid.c | 409 + osfmk/i386/cpuid.h | 164 + osfmk/i386/cswitch.s | 198 + osfmk/i386/db_disasm.c | 1820 +++ osfmk/i386/db_gcc_aout.c | 681 + osfmk/i386/db_interface.c | 1043 ++ osfmk/i386/db_machdep.h | 190 + osfmk/i386/db_trace.c | 817 ++ osfmk/i386/eflags.h | 85 + osfmk/i386/endian.h | 79 + osfmk/i386/exec.h | 95 + osfmk/i386/flipc_page.h | 35 + osfmk/i386/fpu.c | 763 ++ osfmk/i386/fpu.h | 152 + osfmk/i386/gcc.s | 66 + osfmk/i386/gdb_defs.h | 61 + osfmk/i386/gdt.c | 124 + osfmk/i386/genassym.c | 295 + osfmk/i386/hardclock.c | 296 + osfmk/i386/hardclock_entries.h | 50 + osfmk/i386/hi_res_clock.h | 41 + osfmk/i386/hi_res_clock_map.c | 59 + osfmk/i386/hw_lock_types.h | 92 + osfmk/i386/i386_lock.s | 814 ++ osfmk/i386/idt.s | 409 + osfmk/i386/intel_read_fault.h | 39 + osfmk/i386/io_emulate.c | 140 + osfmk/i386/io_emulate.h | 69 + osfmk/i386/io_map.c | 85 + osfmk/i386/io_map_entries.h | 29 + osfmk/i386/io_port.h | 76 + osfmk/i386/iopb.c | 662 + osfmk/i386/iopb.h | 87 + osfmk/i386/iopb_entries.h | 44 + osfmk/i386/ipl.h | 116 + osfmk/i386/ktss.c | 131 + osfmk/i386/ldt.c | 85 + osfmk/i386/lock.h | 342 + osfmk/i386/locore.s | 3294 +++++ osfmk/i386/loose_ends.c | 126 + osfmk/i386/mach_param.h | 58 + osfmk/i386/machdep_call.c | 90 + osfmk/i386/machdep_call.h | 41 + osfmk/i386/machine_routines.c | 148 + osfmk/i386/machine_routines.h | 106 + osfmk/i386/machine_routines_asm.s | 41 + osfmk/i386/machine_rpc.h | 43 + osfmk/i386/machlimits.h | 71 + osfmk/i386/machparam.h | 57 + osfmk/i386/misc_protos.h | 67 + osfmk/i386/mp_desc.c | 314 + osfmk/i386/mp_desc.h | 114 + osfmk/i386/net_filter.c | 1554 +++ osfmk/i386/ntoh.h | 68 + osfmk/i386/ntoh.s | 113 + osfmk/i386/pcb.c | 1473 +++ osfmk/i386/phys.c | 169 + osfmk/i386/pic.h | 200 + osfmk/i386/pio.h | 160 + osfmk/i386/pit.h | 126 + osfmk/i386/pmap.c | 2962 +++++ osfmk/i386/pmap.h | 517 + osfmk/i386/proc_reg.h | 253 + osfmk/i386/read_fault.c | 261 + osfmk/i386/rtclock.c | 1071 ++ osfmk/i386/rtclock_entries.h | 46 + osfmk/i386/sched_param.h | 61 + osfmk/i386/seg.h | 205 + osfmk/i386/setjmp.h | 63 + osfmk/i386/setjmp.s | 21 + osfmk/i386/stab.h | 94 + osfmk/i386/start.s | 622 + osfmk/i386/task.h | 56 + osfmk/i386/thread.h | 154 + osfmk/i386/thread_act.h | 171 + osfmk/i386/timer.h | 64 + osfmk/i386/trap.c | 1296 ++ osfmk/i386/trap.h | 117 + osfmk/i386/tss.h | 93 + osfmk/i386/user_ldt.c | 432 + osfmk/i386/user_ldt.h | 85 + osfmk/i386/vm_tuning.h | 61 + osfmk/i386/xpr.h | 73 + osfmk/ipc/Makefile | 25 + osfmk/ipc/ipc_entry.c | 982 ++ osfmk/ipc/ipc_entry.h | 196 + osfmk/ipc/ipc_hash.c | 682 + osfmk/ipc/ipc_hash.h | 132 + osfmk/ipc/ipc_init.c | 225 + osfmk/ipc/ipc_init.h | 124 + osfmk/ipc/ipc_kmsg.c | 3091 +++++ osfmk/ipc/ipc_kmsg.h | 416 + osfmk/ipc/ipc_machdep.h | 93 + osfmk/ipc/ipc_mqueue.c | 1004 ++ osfmk/ipc/ipc_mqueue.h | 184 + osfmk/ipc/ipc_notify.c | 408 + osfmk/ipc/ipc_notify.h | 94 + osfmk/ipc/ipc_object.c | 1066 ++ osfmk/ipc/ipc_object.h | 344 + osfmk/ipc/ipc_port.c | 1931 +++ osfmk/ipc/ipc_port.h | 443 + osfmk/ipc/ipc_print.h | 102 + osfmk/ipc/ipc_pset.c | 361 + osfmk/ipc/ipc_pset.h | 143 + osfmk/ipc/ipc_right.c | 2150 ++++ osfmk/ipc/ipc_right.h | 209 + osfmk/ipc/ipc_space.c | 390 + osfmk/ipc/ipc_space.h | 201 + osfmk/ipc/ipc_splay.c | 994 ++ osfmk/ipc/ipc_splay.h | 138 + osfmk/ipc/ipc_table.c | 297 + osfmk/ipc/ipc_table.h | 235 + osfmk/ipc/ipc_types.h | 68 + osfmk/ipc/mach_debug.c | 580 + osfmk/ipc/mach_msg.c | 1933 +++ osfmk/ipc/mach_port.c | 1790 +++ osfmk/ipc/mig_log.c | 119 + osfmk/ipc/port.h | 86 + osfmk/kdp/kdp.c | 474 + osfmk/kdp/kdp.h | 43 + osfmk/kdp/kdp_en_debugger.h | 37 + osfmk/kdp/kdp_internal.h | 132 + osfmk/kdp/kdp_private.h | 112 + osfmk/kdp/kdp_protocol.h | 388 + osfmk/kdp/kdp_udp.c | 577 + osfmk/kdp/kdp_udp.h | 120 + osfmk/kdp/ml/i386/kdp_machdep.c | 440 + osfmk/kdp/ml/i386/kdp_vm.c | 51 + osfmk/kdp/ml/ppc/kdp_asm.s | 83 + osfmk/kdp/ml/ppc/kdp_machdep.c | 526 + osfmk/kdp/ml/ppc/kdp_misc.s | 81 + osfmk/kdp/ml/ppc/kdp_vm.c | 224 + osfmk/kdp/pe/POWERMAC/kdp_mace.c | 672 + osfmk/kdp/pe/POWERMAC/kdp_mace.h | 389 + osfmk/kern/Makefile | 50 + osfmk/kern/assert.h | 86 + osfmk/kern/ast.c | 301 + osfmk/kern/ast.h | 192 + osfmk/kern/bits.c | 129 + osfmk/kern/bsd_kern.c | 503 + osfmk/kern/call_entry.h | 67 + osfmk/kern/clock.c | 867 ++ osfmk/kern/clock.h | 273 + osfmk/kern/counters.c | 183 + osfmk/kern/counters.h | 213 + osfmk/kern/cpu_data.c | 50 + osfmk/kern/cpu_data.h | 63 + osfmk/kern/cpu_number.h | 64 + osfmk/kern/debug.c | 211 + osfmk/kern/debug.h | 69 + osfmk/kern/etap.c | 1866 +++ osfmk/kern/etap_macros.h | 440 + osfmk/kern/etap_map.c | 174 + osfmk/kern/etap_map.h | 84 + osfmk/kern/etap_options.h | 102 + osfmk/kern/etap_pool.c | 224 + osfmk/kern/etap_pool.h | 107 + osfmk/kern/exception.c | 317 + osfmk/kern/exception.h | 50 + osfmk/kern/host.c | 679 + osfmk/kern/host.h | 96 + osfmk/kern/host_statistics.h | 48 + osfmk/kern/ipc_clock.c | 191 + osfmk/kern/ipc_host.c | 861 ++ osfmk/kern/ipc_host.h | 159 + osfmk/kern/ipc_kobject.c | 764 ++ osfmk/kern/ipc_kobject.h | 154 + osfmk/kern/ipc_mig.c | 476 + osfmk/kern/ipc_mig.h | 128 + osfmk/kern/ipc_subsystem.c | 178 + osfmk/kern/ipc_subsystem.h | 82 + osfmk/kern/ipc_sync.c | 138 + osfmk/kern/ipc_sync.h | 42 + osfmk/kern/ipc_tt.c | 1741 +++ osfmk/kern/ipc_tt.h | 169 + osfmk/kern/kalloc.c | 619 + osfmk/kern/kalloc.h | 88 + osfmk/kern/kern_print.h | 66 + osfmk/kern/kern_types.h | 97 + osfmk/kern/kmod.c | 799 ++ osfmk/kern/ledger.c | 439 + osfmk/kern/ledger.h | 65 + osfmk/kern/lock.c | 2181 ++++ osfmk/kern/lock.h | 275 + osfmk/kern/lock_mon.c | 415 + osfmk/kern/mach_clock.c | 264 + osfmk/kern/mach_factor.c | 168 + osfmk/kern/mach_param.h | 146 + osfmk/kern/machine.c | 506 + osfmk/kern/machine.h | 79 + osfmk/kern/macro_help.h | 129 + osfmk/kern/misc_protos.h | 188 + osfmk/kern/mk_sp.c | 1280 ++ osfmk/kern/mk_sp.h | 131 + osfmk/kern/mk_timer.c | 295 + osfmk/kern/mk_timer.h | 60 + osfmk/kern/norma_protos.h | 110 + osfmk/kern/printf.c | 657 + osfmk/kern/priority.c | 222 + osfmk/kern/processor.c | 1122 ++ osfmk/kern/processor.h | 266 + osfmk/kern/profile.c | 490 + osfmk/kern/profile.h | 190 + osfmk/kern/queue.c | 225 + osfmk/kern/queue.h | 618 + osfmk/kern/sched.h | 266 + osfmk/kern/sched_prim.c | 2678 ++++ osfmk/kern/sched_prim.h | 370 + osfmk/kern/sf.c | 43 + osfmk/kern/sf.h | 330 + osfmk/kern/simple_lock.h | 329 + osfmk/kern/simple_lock_types.h | 275 + osfmk/kern/spl.c | 64 + osfmk/kern/spl.h | 37 + osfmk/kern/sscanf.c | 93 + osfmk/kern/startup.c | 372 + osfmk/kern/startup.h | 82 + osfmk/kern/subsystem.c | 492 + osfmk/kern/subsystem.h | 92 + osfmk/kern/sync_lock.c | 862 ++ osfmk/kern/sync_lock.h | 144 + osfmk/kern/sync_sema.c | 976 ++ osfmk/kern/sync_sema.h | 65 + osfmk/kern/syscall_emulation.c | 526 + osfmk/kern/syscall_emulation.h | 96 + osfmk/kern/syscall_subr.c | 246 + osfmk/kern/syscall_subr.h | 68 + osfmk/kern/syscall_sw.c | 246 + osfmk/kern/syscall_sw.h | 87 + osfmk/kern/task.c | 1768 +++ osfmk/kern/task.h | 290 + osfmk/kern/task_policy.c | 104 + osfmk/kern/task_swap.c | 1471 +++ osfmk/kern/task_swap.h | 129 + osfmk/kern/template.mk | 68 + osfmk/kern/thread.c | 1999 +++ osfmk/kern/thread.h | 634 + osfmk/kern/thread_act.c | 2319 ++++ osfmk/kern/thread_act.h | 514 + osfmk/kern/thread_call.c | 1186 ++ osfmk/kern/thread_call.h | 131 + osfmk/kern/thread_policy.c | 318 + osfmk/kern/thread_pool.c | 290 + osfmk/kern/thread_pool.h | 162 + osfmk/kern/thread_swap.c | 225 + osfmk/kern/thread_swap.h | 195 + osfmk/kern/time_out.h | 90 + osfmk/kern/timer.c | 591 + osfmk/kern/timer.h | 228 + osfmk/kern/timer_call.c | 323 + osfmk/kern/timer_call.h | 82 + osfmk/kern/wait_queue.c | 1093 ++ osfmk/kern/wait_queue.h | 261 + osfmk/kern/xpr.c | 462 + osfmk/kern/xpr.h | 240 + osfmk/kern/zalloc.c | 1801 +++ osfmk/kern/zalloc.h | 197 + osfmk/libsa/Makefile | 24 + osfmk/libsa/ctype.h | 65 + osfmk/libsa/errno.h | 84 + osfmk/libsa/float.h | 45 + osfmk/libsa/i386/float.h | 48 + osfmk/libsa/i386/math.h | 54 + osfmk/libsa/i386/stdarg.h | 66 + osfmk/libsa/i386/types.h | 70 + osfmk/libsa/i386/va_list.h | 79 + osfmk/libsa/ieeefloat.h | 113 + osfmk/libsa/machine/stdarg.h | 35 + osfmk/libsa/machine/stdarg_apple.h | 35 + osfmk/libsa/machine/types.h | 35 + osfmk/libsa/machine/va_list.h | 35 + osfmk/libsa/math.h | 102 + osfmk/libsa/ppc/float.h | 49 + osfmk/libsa/ppc/math.h | 55 + osfmk/libsa/ppc/stdarg.h | 25 + osfmk/libsa/ppc/stdarg_apple.h | 195 + osfmk/libsa/ppc/types.h | 65 + osfmk/libsa/ppc/va_list.h | 126 + osfmk/libsa/stdarg.h | 43 + osfmk/libsa/stdio.h | 70 + osfmk/libsa/stdlib.h | 76 + osfmk/libsa/string.h | 92 + osfmk/libsa/sys/timers.h | 63 + osfmk/libsa/types.h | 90 + osfmk/libsa/va_list.h | 43 + osfmk/mach-o/loader.h | 722 ++ osfmk/mach-o/mach_header.c | 550 + osfmk/mach-o/mach_header.h | 73 + osfmk/mach/AT386/machdep.mk | 35 + osfmk/mach/Makefile | 309 + osfmk/mach/Makefile.template | 174 + osfmk/mach/alert.h | 82 + osfmk/mach/boolean.h | 147 + osfmk/mach/boot_info.h | 253 + osfmk/mach/bootstrap.defs | 79 + osfmk/mach/clock.defs | 73 + osfmk/mach/clock_priv.defs | 62 + osfmk/mach/clock_reply.defs | 48 + osfmk/mach/clock_types.defs | 65 + osfmk/mach/clock_types.h | 124 + osfmk/mach/error.h | 192 + osfmk/mach/etap.h | 276 + osfmk/mach/etap_events.h | 339 + osfmk/mach/events_info.h | 88 + osfmk/mach/exc.defs | 112 + osfmk/mach/exception.h | 65 + osfmk/mach/exception_types.h | 157 + osfmk/mach/flipc_cb.h | 1214 ++ osfmk/mach/flipc_debug.h | 242 + osfmk/mach/flipc_device.h | 98 + osfmk/mach/flipc_locks.h | 174 + osfmk/mach/flipc_types.h | 254 + osfmk/mach/host_info.h | 350 + osfmk/mach/host_priv.defs | 343 + osfmk/mach/host_reboot.h | 48 + osfmk/mach/host_security.defs | 92 + osfmk/mach/i386/Makefile | 57 + osfmk/mach/i386/boolean.h | 112 + osfmk/mach/i386/exception.h | 202 + osfmk/mach/i386/flipc_dep.h | 137 + osfmk/mach/i386/fp_reg.h | 184 + osfmk/mach/i386/kern_return.h | 118 + osfmk/mach/i386/mach_i386.defs | 155 + osfmk/mach/i386/mach_i386_types.h | 111 + osfmk/mach/i386/machine_types.defs | 84 + osfmk/mach/i386/ndr_def.h | 69 + osfmk/mach/i386/processor_info.h | 32 + osfmk/mach/i386/rpc.h | 51 + osfmk/mach/i386/syscall_sw.h | 168 + osfmk/mach/i386/thread_state.h | 57 + osfmk/mach/i386/thread_status.h | 380 + osfmk/mach/i386/vm_param.h | 208 + osfmk/mach/i386/vm_types.h | 175 + osfmk/mach/kern_return.h | 316 + osfmk/mach/kmod.h | 157 + osfmk/mach/ledger.defs | 72 + osfmk/mach/lock_set.defs | 72 + osfmk/mach/mach.h | 41 + osfmk/mach/mach_host.defs | 235 + osfmk/mach/mach_interface.h | 60 + osfmk/mach/mach_ioctl.h | 117 + osfmk/mach/mach_norma.defs | 253 + osfmk/mach/mach_param.h | 143 + osfmk/mach/mach_port.defs | 456 + osfmk/mach/mach_syscalls.h | 46 + osfmk/mach/mach_time.h | 54 + osfmk/mach/mach_traps.h | 138 + osfmk/mach/mach_types.defs | 470 + osfmk/mach/mach_types.h | 223 + osfmk/mach/machine.h | 282 + osfmk/mach/machine/Makefile | 28 + osfmk/mach/machine/asm.h | 35 + osfmk/mach/machine/boolean.h | 35 + osfmk/mach/machine/exception.h | 35 + osfmk/mach/machine/kern_return.h | 35 + osfmk/mach/machine/machine_types.defs | 35 + osfmk/mach/machine/ndr_def.h | 35 + osfmk/mach/machine/processor_info.h | 35 + osfmk/mach/machine/rpc.h | 35 + osfmk/mach/machine/syscall_sw.h | 35 + osfmk/mach/machine/thread_state.h | 35 + osfmk/mach/machine/thread_status.h | 35 + osfmk/mach/machine/vm_param.h | 35 + osfmk/mach/machine/vm_types.h | 35 + osfmk/mach/memory_object.defs | 241 + osfmk/mach/memory_object.h | 221 + osfmk/mach/memory_object_control.defs | 219 + osfmk/mach/memory_object_default.defs | 113 + osfmk/mach/memory_object_name.defs | 74 + osfmk/mach/memory_object_types.h | 575 + osfmk/mach/message.h | 590 + osfmk/mach/mig.h | 55 + osfmk/mach/mig_errors.h | 124 + osfmk/mach/mig_log.h | 110 + osfmk/mach/mk_timer.h | 60 + osfmk/mach/mk_traps.h | 34 + osfmk/mach/msg_type.h | 117 + osfmk/mach/ndr.h | 90 + osfmk/mach/norma_special_ports.h | 110 + osfmk/mach/norma_task.defs | 48 + osfmk/mach/notify.defs | 115 + osfmk/mach/notify.h | 207 + osfmk/mach/policy.h | 227 + osfmk/mach/port.h | 275 + osfmk/mach/ppc/Makefile | 35 + osfmk/mach/ppc/boolean.h | 131 + osfmk/mach/ppc/exception.h | 114 + osfmk/mach/ppc/kern_return.h | 137 + osfmk/mach/ppc/machine_types.defs | 99 + osfmk/mach/ppc/ndr_def.h | 64 + osfmk/mach/ppc/processor_info.h | 170 + osfmk/mach/ppc/rpc.h | 61 + osfmk/mach/ppc/syscall_sw.h | 74 + osfmk/mach/ppc/thread_state.h | 35 + osfmk/mach/ppc/thread_status.h | 226 + osfmk/mach/ppc/vm_param.h | 51 + osfmk/mach/ppc/vm_types.h | 196 + osfmk/mach/processor.defs | 120 + osfmk/mach/processor_info.h | 162 + osfmk/mach/processor_set.defs | 154 + osfmk/mach/prof.defs | 131 + osfmk/mach/prof_types.h | 85 + osfmk/mach/rpc.h | 424 + osfmk/mach/semaphore.defs | 75 + osfmk/mach/semaphore.h | 89 + osfmk/mach/shared_memory_server.h | 224 + osfmk/mach/std_types.defs | 124 + osfmk/mach/std_types.h | 69 + osfmk/mach/sync.defs | 137 + osfmk/mach/sync_policy.h | 35 + osfmk/mach/syscall_sw.h | 107 + osfmk/mach/task.defs | 379 + osfmk/mach/task_info.h | 238 + osfmk/mach/task_ledger.h | 74 + osfmk/mach/task_policy.h | 127 + osfmk/mach/task_special_ports.h | 185 + osfmk/mach/thread_act.defs | 313 + osfmk/mach/thread_info.h | 221 + osfmk/mach/thread_policy.h | 167 + osfmk/mach/thread_special_ports.h | 146 + osfmk/mach/thread_status.h | 171 + osfmk/mach/thread_switch.h | 75 + osfmk/mach/time_value.h | 168 + osfmk/mach/upl.defs | 112 + osfmk/mach/vm_attributes.h | 146 + osfmk/mach/vm_behavior.h | 90 + osfmk/mach/vm_inherit.h | 133 + osfmk/mach/vm_map.defs | 426 + osfmk/mach/vm_param.h | 151 + osfmk/mach/vm_prot.h | 187 + osfmk/mach/vm_region.h | 263 + osfmk/mach/vm_statistics.h | 229 + osfmk/mach/vm_sync.h | 102 + osfmk/mach/vm_types.h | 60 + osfmk/mach_debug/Makefile | 31 + osfmk/mach_debug/hash_info.h | 113 + osfmk/mach_debug/ipc_info.h | 162 + osfmk/mach_debug/mach_debug.h | 44 + osfmk/mach_debug/mach_debug_types.defs | 194 + osfmk/mach_debug/mach_debug_types.h | 67 + osfmk/mach_debug/page_info.h | 100 + osfmk/mach_debug/template.mk | 59 + osfmk/mach_debug/vm_info.h | 128 + osfmk/mach_debug/zone_info.h | 143 + osfmk/machine/Makefile | 47 + osfmk/machine/asm.h | 35 + osfmk/machine/ast.h | 35 + osfmk/machine/ast_types.h | 35 + osfmk/machine/cpu_data.h | 35 + osfmk/machine/cpu_number.h | 35 + osfmk/machine/db_machdep.h | 35 + osfmk/machine/disk.h | 35 + osfmk/machine/endian.h | 35 + osfmk/machine/gdb_defs.h | 35 + osfmk/machine/hw_lock_types.h | 35 + osfmk/machine/io_map_entries.h | 35 + osfmk/machine/iobus.h | 35 + osfmk/machine/kgdb_defs.h | 35 + osfmk/machine/kgdb_setjmp.h | 35 + osfmk/machine/lock.h | 35 + osfmk/machine/mach_param.h | 35 + osfmk/machine/machine_routines.h | 35 + osfmk/machine/machine_rpc.h | 35 + osfmk/machine/machlimits.h | 35 + osfmk/machine/machparam.h | 35 + osfmk/machine/pmap.h | 35 + osfmk/machine/sched_param.h | 35 + osfmk/machine/setjmp.h | 35 + osfmk/machine/spl.h | 35 + osfmk/machine/task.h | 35 + osfmk/machine/thread.h | 35 + osfmk/machine/thread_act.h | 35 + osfmk/machine/timer.h | 35 + osfmk/machine/trap.h | 35 + osfmk/machine/vm_tuning.h | 35 + osfmk/machine/xpr.h | 35 + osfmk/ppc/AltiAssist.s | 93 + osfmk/ppc/Diagnostics.c | 164 + osfmk/ppc/Diagnostics.h | 87 + osfmk/ppc/Emulate.s | 65 + osfmk/ppc/Firmware.h | 141 + osfmk/ppc/Firmware.s | 2335 ++++ osfmk/ppc/FirmwareC.c | 281 + osfmk/ppc/FirmwareCalls.h | 93 + osfmk/ppc/MPinterfaces.s | 455 + osfmk/ppc/Makefile | 36 + osfmk/ppc/POWERMAC/dbdma.c | 148 + osfmk/ppc/POWERMAC/dbdma.h | 22 + osfmk/ppc/POWERMAC/mp/MPPlugIn.h | 346 + osfmk/ppc/POWERMAC/mp/MP_2p.s | 2409 ++++ osfmk/ppc/POWERMAC/mp/mp.c | 32 + osfmk/ppc/POWERMAC/mp/mp.h | 47 + osfmk/ppc/POWERMAC/scc_8530.h | 422 + osfmk/ppc/POWERMAC/serial_io.c | 632 + osfmk/ppc/POWERMAC/serial_io.h | 152 + osfmk/ppc/POWERMAC/video_console.c | 2223 ++++ osfmk/ppc/POWERMAC/video_console.h | 117 + osfmk/ppc/POWERMAC/video_console_entries.h | 97 + osfmk/ppc/POWERMAC/video_scroll.s | 139 + osfmk/ppc/PPCcalls.c | 42 + osfmk/ppc/PPCcalls.h | 61 + osfmk/ppc/Performance.h | 39 + osfmk/ppc/Performance.s | 119 + osfmk/ppc/PseudoKernel.c | 390 + osfmk/ppc/PseudoKernel.h | 89 + osfmk/ppc/_setjmp.s | 188 + osfmk/ppc/aligned_data.s | 237 + osfmk/ppc/alignment.c | 1039 ++ osfmk/ppc/asm.h | 570 + osfmk/ppc/ast.h | 37 + osfmk/ppc/ast_types.h | 35 + osfmk/ppc/atomic_switch.h | 124 + osfmk/ppc/atomic_switch.s | 251 + osfmk/ppc/bat_init.c | 318 + osfmk/ppc/bcopy.s | 526 + osfmk/ppc/bits.s | 105 + osfmk/ppc/boot.h | 22 + osfmk/ppc/bsd_asm.s | 129 + osfmk/ppc/bsd_ppc.c | 274 + osfmk/ppc/bzero.s | 171 + osfmk/ppc/cache.s | 280 + osfmk/ppc/clock.h | 52 + osfmk/ppc/conf.c | 81 + osfmk/ppc/console_feed.c | 260 + osfmk/ppc/console_feed_entries.h | 42 + osfmk/ppc/cpu.c | 801 ++ osfmk/ppc/cpu_data.h | 50 + osfmk/ppc/cpu_number.h | 30 + osfmk/ppc/cswtch.s | 2638 ++++ osfmk/ppc/db_asm.s | 147 + osfmk/ppc/db_disasm.c | 231 + osfmk/ppc/db_interface.c | 671 + osfmk/ppc/db_low_trace.c | 661 + osfmk/ppc/db_low_trace.h | 74 + osfmk/ppc/db_machdep.h | 198 + osfmk/ppc/db_trace.c | 831 ++ osfmk/ppc/endian.h | 87 + osfmk/ppc/exception.h | 642 + osfmk/ppc/fpu_protos.h | 35 + osfmk/ppc/genassym.c | 939 ++ osfmk/ppc/hardclock_entries.h | 31 + osfmk/ppc/hexfont.h | 295 + osfmk/ppc/hw_counters.h | 80 + osfmk/ppc/hw_exception.s | 1842 +++ osfmk/ppc/hw_lock.s | 1557 +++ osfmk/ppc/hw_lock_types.h | 93 + osfmk/ppc/hw_vm.s | 3161 +++++ osfmk/ppc/interrupt.c | 167 + osfmk/ppc/io_map.c | 95 + osfmk/ppc/io_map_entries.h | 34 + osfmk/ppc/iso_font.h | 302 + osfmk/ppc/lock.h | 73 + osfmk/ppc/low_trace.h | 75 + osfmk/ppc/lowmem_vectors.s | 2459 ++++ osfmk/ppc/mach_param.h | 57 + osfmk/ppc/machine_cpu.h | 48 + osfmk/ppc/machine_routines.c | 376 + osfmk/ppc/machine_routines.h | 188 + osfmk/ppc/machine_routines_asm.s | 1123 ++ osfmk/ppc/machine_rpc.h | 45 + osfmk/ppc/machlimits.h | 86 + osfmk/ppc/machparam.h | 80 + osfmk/ppc/mappings.c | 1899 +++ osfmk/ppc/mappings.h | 187 + osfmk/ppc/mem.c | 91 + osfmk/ppc/mem.h | 56 + osfmk/ppc/misc.c | 259 + osfmk/ppc/misc_asm.s | 281 + osfmk/ppc/misc_protos.h | 111 + osfmk/ppc/model_dep.c | 645 + osfmk/ppc/movc.s | 622 + osfmk/ppc/mp.h | 38 + osfmk/ppc/net_filter.c | 750 ++ osfmk/ppc/new_screen.h | 42 + osfmk/ppc/notify_interrupt.c | 209 + osfmk/ppc/pcb.c | 951 ++ osfmk/ppc/pmap.c | 2251 ++++ osfmk/ppc/pmap.h | 170 + osfmk/ppc/pmap_internals.h | 125 + osfmk/ppc/ppc_disasm.i | 220 + osfmk/ppc/ppc_init.c | 325 + osfmk/ppc/ppc_vm_init.c | 343 + osfmk/ppc/proc_reg.h | 744 ++ osfmk/ppc/rtclock.c | 1082 ++ osfmk/ppc/savearea.c | 256 + osfmk/ppc/savearea.h | 43 + osfmk/ppc/savearea_asm.s | 450 + osfmk/ppc/sched_param.h | 64 + osfmk/ppc/screen.h | 319 + osfmk/ppc/screen_switch.h | 135 + osfmk/ppc/serial_console.c | 326 + osfmk/ppc/serial_defs.h | 77 + osfmk/ppc/setjmp.h | 51 + osfmk/ppc/start.s | 832 ++ osfmk/ppc/status.c | 988 ++ osfmk/ppc/stubs.c | 47 + osfmk/ppc/task.h | 57 + osfmk/ppc/testjump.c | 74 + osfmk/ppc/thread.h | 64 + osfmk/ppc/thread_act.h | 135 + osfmk/ppc/trap.c | 756 ++ osfmk/ppc/trap.h | 98 + osfmk/ppc/vm_tuning.h | 29 + osfmk/ppc/vmachmon.c | 1032 ++ osfmk/ppc/vmachmon.h | 215 + osfmk/ppc/vmachmon_asm.s | 714 ++ osfmk/ppc/xpr.h | 30 + osfmk/profiling/Makefile | 51 + osfmk/profiling/i386/Makefile | 24 + osfmk/profiling/i386/profile-asm.s | 1449 +++ osfmk/profiling/i386/profile-md.c | 1192 ++ osfmk/profiling/i386/profile-md.h | 390 + osfmk/profiling/machine/Makefile | 25 + osfmk/profiling/machine/profile-md.h | 35 + osfmk/profiling/ppc/Makefile | 25 + osfmk/profiling/ppc/profile-md.h | 138 + osfmk/profiling/profile-internal.h | 368 + osfmk/profiling/profile-kgmon.c | 384 + osfmk/profiling/profile-mk.c | 239 + osfmk/profiling/profile-mk.h | 60 + osfmk/sys/ioctl.h | 118 + osfmk/sys/scsi.h | 43 + osfmk/sys/sdi.h | 499 + osfmk/sys/sdi_edt.h | 43 + osfmk/sys/syslog.h | 197 + osfmk/sys/time.h | 114 + osfmk/sys/tm.h | 102 + osfmk/sys/types.h | 165 + osfmk/sys/varargs.h | 235 + osfmk/sys/version.h | 135 + osfmk/vm/Makefile | 28 + osfmk/vm/bsd_vm.c | 791 ++ osfmk/vm/cpm.h | 61 + osfmk/vm/memory_object.c | 1736 +++ osfmk/vm/memory_object.h | 101 + osfmk/vm/pmap.h | 367 + osfmk/vm/vm_debug.c | 648 + osfmk/vm/vm_debug.h | 44 + osfmk/vm/vm_external.c | 279 + osfmk/vm/vm_external.h | 139 + osfmk/vm/vm_fault.c | 3552 ++++++ osfmk/vm/vm_fault.h | 142 + osfmk/vm/vm_init.c | 118 + osfmk/vm/vm_init.h | 33 + osfmk/vm/vm_kern.c | 975 ++ osfmk/vm/vm_kern.h | 167 + osfmk/vm/vm_map.c | 10299 ++++++++++++++++ osfmk/vm/vm_map.h | 1013 ++ osfmk/vm/vm_object.c | 4513 +++++++ osfmk/vm/vm_object.h | 549 + osfmk/vm/vm_page.h | 445 + osfmk/vm/vm_pageout.c | 3537 ++++++ osfmk/vm/vm_pageout.h | 158 + osfmk/vm/vm_print.h | 66 + osfmk/vm/vm_resident.c | 2374 ++++ osfmk/vm/vm_shared_memory_server.c | 742 ++ osfmk/vm/vm_user.c | 2711 ++++ pexpert/Makefile | 40 + pexpert/conf/MASTER | 88 + pexpert/conf/MASTER.i386 | 15 + pexpert/conf/MASTER.ppc | 17 + pexpert/conf/Makefile | 63 + pexpert/conf/Makefile.i386 | 8 + pexpert/conf/Makefile.ppc | 8 + pexpert/conf/Makefile.template | 111 + pexpert/conf/copyright | 6 + pexpert/conf/files | 14 + pexpert/conf/files.i386 | 15 + pexpert/conf/files.ppc | 7 + pexpert/conf/tools/Makefile | 36 + pexpert/conf/tools/doconf/Makefile | 49 + pexpert/conf/tools/doconf/doconf.csh | 313 + pexpert/conf/tools/newvers/Makefile | 49 + pexpert/conf/tools/newvers/newvers.csh | 33 + pexpert/conf/version.major | 1 + pexpert/conf/version.minor | 1 + pexpert/conf/version.variant | 0 pexpert/gen/bootargs.c | 187 + pexpert/gen/device_tree.c | 468 + pexpert/gen/pe_gen.c | 57 + pexpert/i386/fakePPCDeviceTree.c | 149 + pexpert/i386/fakePPCDeviceTree.h | 60 + pexpert/i386/fakePPCStructs.h | 59 + pexpert/i386/kd.c | 1026 ++ pexpert/i386/kd.h | 841 ++ pexpert/i386/kdasm.s | 179 + pexpert/i386/pe_bootargs.c | 30 + pexpert/i386/pe_identify_machine.c | 57 + pexpert/i386/pe_init.c | 292 + pexpert/i386/pe_interrupt.c | 69 + pexpert/i386/pe_kprintf.c | 64 + pexpert/i386/pe_misc.s | 42 + pexpert/i386/pe_spl.c | 78 + pexpert/i386/text_console.c | 363 + pexpert/i386/video_console.h | 56 + pexpert/pexpert/Makefile | 42 + pexpert/pexpert/boot.h | 27 + pexpert/pexpert/device_tree.h | 242 + pexpert/pexpert/i386/Makefile | 27 + pexpert/pexpert/i386/boot.h | 236 + pexpert/pexpert/i386/fb_entries.h | 34 + pexpert/pexpert/i386/kd_entries.h | 103 + pexpert/pexpert/i386/kdsoft.h | 266 + pexpert/pexpert/i386/protos.h | 75 + pexpert/pexpert/machine/Makefile | 26 + pexpert/pexpert/machine/boot.h | 36 + pexpert/pexpert/machine/protos.h | 36 + pexpert/pexpert/pe_images.h | 330 + pexpert/pexpert/pexpert.h | 220 + pexpert/pexpert/ppc/Makefile | 28 + pexpert/pexpert/ppc/boot.h | 77 + pexpert/pexpert/ppc/dbdma.h | 176 + pexpert/pexpert/ppc/interrupts.h | 30 + pexpert/pexpert/ppc/powermac.h | 57 + pexpert/pexpert/ppc/protos.h | 211 + pexpert/pexpert/protos.h | 100 + pexpert/ppc/pe_bootargs.c | 30 + pexpert/ppc/pe_clock_speed.c | 164 + pexpert/ppc/pe_clock_speed_asm.s | 110 + pexpert/ppc/pe_identify_machine.c | 155 + pexpert/ppc/pe_init.c | 287 + pexpert/ppc/pe_kprintf.c | 128 + pexpert/ppc/pe_misc.s | 46 + 2759 files changed, 959794 insertions(+) create mode 100644 APPLE_LICENSE create mode 100644 EXTERNAL_HEADERS/ar.h create mode 100644 EXTERNAL_HEADERS/architecture/byte_order.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/asm_help.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/byte_order.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/cpu.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/desc.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/fpu.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/frame.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/io.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/reg_help.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/sel.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/table.h create mode 100644 EXTERNAL_HEADERS/architecture/i386/tss.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/asm_help.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/basic_regs.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/byte_order.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/cframe.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/fp_regs.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/macro_help.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/pseudo_inst.h create mode 100644 EXTERNAL_HEADERS/architecture/ppc/reg_help.h create mode 100644 EXTERNAL_HEADERS/bsd/i386/ansi.h create mode 100644 EXTERNAL_HEADERS/bsd/i386/limits.h create mode 100644 EXTERNAL_HEADERS/bsd/ppc/ansi.h create mode 100644 EXTERNAL_HEADERS/bsd/ppc/limits.h create mode 100644 EXTERNAL_HEADERS/mach-o/fat.h create mode 100644 EXTERNAL_HEADERS/mach-o/kld.h create mode 100644 EXTERNAL_HEADERS/mach-o/loader.h create mode 100644 EXTERNAL_HEADERS/mach-o/nlist.h create mode 100644 EXTERNAL_HEADERS/machine/limits.h create mode 100644 EXTERNAL_HEADERS/stdarg.h create mode 100644 EXTERNAL_HEADERS/stdint.h create mode 100644 Makefile create mode 100644 PB.project create mode 100644 README create mode 100755 SETUP/seed_objroot create mode 100644 SETUP/setup.csh create mode 100644 SETUP/setup.sh create mode 100644 bsd/Makefile create mode 100644 bsd/conf/MASTER create mode 100644 bsd/conf/MASTER.i386 create mode 100644 bsd/conf/MASTER.ppc create mode 100644 bsd/conf/Makefile create mode 100644 bsd/conf/Makefile.i386 create mode 100644 bsd/conf/Makefile.ppc create mode 100644 bsd/conf/Makefile.template create mode 100644 bsd/conf/compat_hdrs create mode 100644 bsd/conf/compat_hdrs.awk create mode 100644 bsd/conf/copyright create mode 100644 bsd/conf/files create mode 100644 bsd/conf/files.i386 create mode 100644 bsd/conf/files.ppc create mode 100644 bsd/conf/machine.awk create mode 100644 bsd/conf/param.c create mode 100644 bsd/conf/tools/Makefile create mode 100644 bsd/conf/tools/doconf/Makefile create mode 100755 bsd/conf/tools/doconf/doconf.csh create mode 100644 bsd/conf/tools/newvers/Makefile create mode 100644 bsd/conf/tools/newvers/newvers.csh create mode 100644 bsd/conf/version.major create mode 100644 bsd/conf/version.minor create mode 100644 bsd/conf/version.variant create mode 100644 bsd/crypto/Makefile create mode 100644 bsd/crypto/blowfish/Makefile create mode 100644 bsd/crypto/blowfish/bf_cbc.c create mode 100644 bsd/crypto/blowfish/bf_cbc_m.c create mode 100644 bsd/crypto/blowfish/bf_enc.c create mode 100644 bsd/crypto/blowfish/bf_locl.h create mode 100644 bsd/crypto/blowfish/bf_pi.h create mode 100644 bsd/crypto/blowfish/bf_skey.c create mode 100644 bsd/crypto/blowfish/blowfish.h create mode 100644 bsd/crypto/cast128/Makefile create mode 100644 bsd/crypto/cast128/cast128.c create mode 100644 bsd/crypto/cast128/cast128.h create mode 100644 bsd/crypto/cast128/cast128_cbc.c create mode 100644 bsd/crypto/cast128/cast128_subkey.h create mode 100644 bsd/crypto/des/Makefile create mode 100644 bsd/crypto/des/des.h create mode 100644 bsd/crypto/des/des_3cbc.c create mode 100644 bsd/crypto/des/des_cbc.c create mode 100644 bsd/crypto/des/des_ecb.c create mode 100644 bsd/crypto/des/des_locl.h create mode 100644 bsd/crypto/des/des_setkey.c create mode 100644 bsd/crypto/des/podd.h create mode 100644 bsd/crypto/des/sk.h create mode 100644 bsd/crypto/des/spr.h create mode 100644 bsd/crypto/rc5/Makefile create mode 100644 bsd/crypto/rc5/rc5.c create mode 100644 bsd/crypto/rc5/rc5.h create mode 100644 bsd/crypto/rc5/rc5_cbc.c create mode 100644 bsd/crypto/sha1.c create mode 100644 bsd/crypto/sha1.h create mode 100644 bsd/dev/Makefile create mode 100644 bsd/dev/busvar.h create mode 100644 bsd/dev/disk.h create mode 100644 bsd/dev/disk_label.h create mode 100644 bsd/dev/i386/conf.c create mode 100644 bsd/dev/i386/cons.c create mode 100644 bsd/dev/i386/cons.h create mode 100644 bsd/dev/i386/kern_machdep.c create mode 100644 bsd/dev/i386/km.c create mode 100644 bsd/dev/i386/lock_stubs.c create mode 100644 bsd/dev/i386/mem.c create mode 100644 bsd/dev/i386/memmove.c create mode 100644 bsd/dev/i386/pci_device.h create mode 100644 bsd/dev/i386/pio.h create mode 100644 bsd/dev/i386/sel.h create mode 100644 bsd/dev/i386/sel_inline.h create mode 100644 bsd/dev/i386/stubs.c create mode 100644 bsd/dev/i386/table_inline.h create mode 100644 bsd/dev/i386/unix_signal.c create mode 100644 bsd/dev/i386/unix_startup.c create mode 100644 bsd/dev/kmreg_com.h create mode 100644 bsd/dev/ldd.h create mode 100644 bsd/dev/ppc/conf.c create mode 100644 bsd/dev/ppc/cons.c create mode 100644 bsd/dev/ppc/cons.h create mode 100644 bsd/dev/ppc/ffs.c create mode 100644 bsd/dev/ppc/ffs.s create mode 100644 bsd/dev/ppc/kern_machdep.c create mode 100644 bsd/dev/ppc/km.c create mode 100644 bsd/dev/ppc/machdep.c create mode 100644 bsd/dev/ppc/mem.c create mode 100644 bsd/dev/ppc/memmove.c create mode 100644 bsd/dev/ppc/nvram.c create mode 100644 bsd/dev/ppc/ppc_init.c create mode 100644 bsd/dev/ppc/stubs.c create mode 100644 bsd/dev/ppc/systemcalls.c create mode 100644 bsd/dev/ppc/unix_signal.c create mode 100644 bsd/dev/ppc/unix_startup.c create mode 100644 bsd/dev/ppc/xsumas.s create mode 100644 bsd/hfs/MacOSStubs.c create mode 100644 bsd/hfs/Makefile create mode 100644 bsd/hfs/hfs.h create mode 100644 bsd/hfs/hfs_btreeio.c create mode 100644 bsd/hfs/hfs_dbg.h create mode 100644 bsd/hfs/hfs_encodings.c create mode 100644 bsd/hfs/hfs_encodings.h create mode 100644 bsd/hfs/hfs_endian.c create mode 100644 bsd/hfs/hfs_endian.h create mode 100644 bsd/hfs/hfs_format.h create mode 100644 bsd/hfs/hfs_link.c create mode 100644 bsd/hfs/hfs_lockf.c create mode 100644 bsd/hfs/hfs_lockf.h create mode 100644 bsd/hfs/hfs_lookup.c create mode 100644 bsd/hfs/hfs_macos_defs.h create mode 100644 bsd/hfs/hfs_mount.h create mode 100644 bsd/hfs/hfs_readwrite.c create mode 100644 bsd/hfs/hfs_search.c create mode 100644 bsd/hfs/hfs_vfsops.c create mode 100644 bsd/hfs/hfs_vfsutils.c create mode 100644 bsd/hfs/hfs_vhash.c create mode 100644 bsd/hfs/hfs_vnodeops.c create mode 100644 bsd/hfs/hfscommon/BTree/BTree.c create mode 100644 bsd/hfs/hfscommon/BTree/BTreeAllocate.c create mode 100644 bsd/hfs/hfscommon/BTree/BTreeMiscOps.c create mode 100644 bsd/hfs/hfscommon/BTree/BTreeNodeOps.c create mode 100644 bsd/hfs/hfscommon/BTree/BTreeTreeOps.c create mode 100644 bsd/hfs/hfscommon/Catalog/Catalog.c create mode 100644 bsd/hfs/hfscommon/Catalog/CatalogIterators.c create mode 100644 bsd/hfs/hfscommon/Catalog/CatalogUtilities.c create mode 100644 bsd/hfs/hfscommon/Catalog/FileIDsServices.c create mode 100644 bsd/hfs/hfscommon/Makefile create mode 100644 bsd/hfs/hfscommon/Misc/BTreeWrapper.c create mode 100644 bsd/hfs/hfscommon/Misc/FileExtentMapping.c create mode 100644 bsd/hfs/hfscommon/Misc/GenericMRUCache.c create mode 100644 bsd/hfs/hfscommon/Misc/VolumeAllocation.c create mode 100644 bsd/hfs/hfscommon/Misc/VolumeRequests.c create mode 100644 bsd/hfs/hfscommon/Unicode/UCStringCompareData.h create mode 100644 bsd/hfs/hfscommon/Unicode/UnicodeWrappers.c create mode 100644 bsd/hfs/hfscommon/headers/BTreesInternal.h create mode 100644 bsd/hfs/hfscommon/headers/BTreesPrivate.h create mode 100644 bsd/hfs/hfscommon/headers/CatalogPrivate.h create mode 100644 bsd/hfs/hfscommon/headers/FileMgrInternal.h create mode 100644 bsd/hfs/hfscommon/headers/HFSInstrumentation.h create mode 100644 bsd/hfs/hfscommon/headers/HFSUnicodeWrappers.h create mode 100644 bsd/hfs/hfscommon/headers/Makefile create mode 100644 bsd/i386/Makefile create mode 100644 bsd/i386/cpu.h create mode 100644 bsd/i386/disklabel.h create mode 100644 bsd/i386/endian.h create mode 100644 bsd/i386/exec.h create mode 100644 bsd/i386/label_t.h create mode 100644 bsd/i386/param.h create mode 100644 bsd/i386/profile.h create mode 100644 bsd/i386/psl.h create mode 100644 bsd/i386/ptrace.h create mode 100644 bsd/i386/reboot.h create mode 100644 bsd/i386/reg.h create mode 100644 bsd/i386/setjmp.h create mode 100644 bsd/i386/signal.h create mode 100644 bsd/i386/spl.h create mode 100644 bsd/i386/table.h create mode 100644 bsd/i386/types.h create mode 100644 bsd/i386/user.h create mode 100644 bsd/i386/vmparam.h create mode 100644 bsd/if/ppc/if_en.c create mode 100644 bsd/if/ppc/if_en.h create mode 100644 bsd/if/ppc/mace.c create mode 100644 bsd/if/ppc/mace.h create mode 100644 bsd/include/Makefile create mode 100644 bsd/include/ar.h create mode 100644 bsd/include/arpa/Makefile create mode 100644 bsd/include/arpa/ftp.h create mode 100644 bsd/include/arpa/inet.h create mode 100644 bsd/include/arpa/nameser.h create mode 100644 bsd/include/arpa/nameser_compat.h create mode 100644 bsd/include/arpa/telnet.h create mode 100644 bsd/include/arpa/tftp.h create mode 100644 bsd/include/bitstring.h create mode 100644 bsd/include/c.h create mode 100644 bsd/include/ctype.h create mode 100644 bsd/include/db.h create mode 100644 bsd/include/dirent.h create mode 100644 bsd/include/disktab.h create mode 100644 bsd/include/err.h create mode 100644 bsd/include/errno.h create mode 100644 bsd/include/fcntl.h create mode 100644 bsd/include/fnmatch.h create mode 100644 bsd/include/fsproperties.h create mode 100644 bsd/include/fstab.h create mode 100644 bsd/include/fts.h create mode 100644 bsd/include/glob.h create mode 100644 bsd/include/grp.h create mode 100644 bsd/include/kvm.h create mode 100644 bsd/include/limits.h create mode 100644 bsd/include/locale.h create mode 100644 bsd/include/math.h create mode 100644 bsd/include/memory.h create mode 100644 bsd/include/mpool.h create mode 100644 bsd/include/ndbm.h create mode 100644 bsd/include/netdb.h create mode 100644 bsd/include/nlist.h create mode 100644 bsd/include/paths.h create mode 100644 bsd/include/protocols/Makefile create mode 100644 bsd/include/protocols/dumprestore.h create mode 100644 bsd/include/protocols/routed.h create mode 100644 bsd/include/protocols/rwhod.h create mode 100644 bsd/include/protocols/talkd.h create mode 100644 bsd/include/protocols/timed.h create mode 100644 bsd/include/pwd.h create mode 100644 bsd/include/ranlib.h create mode 100644 bsd/include/regex.h create mode 100644 bsd/include/regexp.h create mode 100644 bsd/include/resolv.h.kame create mode 100644 bsd/include/rune.h create mode 100644 bsd/include/runetype.h create mode 100644 bsd/include/semaphore.h create mode 100644 bsd/include/setjmp.h create mode 100644 bsd/include/sgtty.h create mode 100644 bsd/include/signal.h create mode 100644 bsd/include/stab.h create mode 100644 bsd/include/stddef.h create mode 100644 bsd/include/stdio.h create mode 100644 bsd/include/stdlib.h create mode 100644 bsd/include/string.h create mode 100644 bsd/include/strings.h create mode 100644 bsd/include/struct.h create mode 100644 bsd/include/sysexits.h create mode 100644 bsd/include/syslog.h create mode 100644 bsd/include/tar.h create mode 100644 bsd/include/termios.h create mode 100644 bsd/include/time.h create mode 100644 bsd/include/ttyent.h create mode 100644 bsd/include/tzfile.h create mode 100644 bsd/include/unistd.h create mode 100644 bsd/include/util.h create mode 100644 bsd/include/utime.h create mode 100644 bsd/include/utmp.h create mode 100644 bsd/include/vis.h create mode 100644 bsd/isofs/Makefile create mode 100644 bsd/isofs/cd9660/Makefile create mode 100644 bsd/isofs/cd9660/TODO create mode 100644 bsd/isofs/cd9660/TODO.hibler create mode 100644 bsd/isofs/cd9660/cd9660_bmap.c create mode 100644 bsd/isofs/cd9660/cd9660_lookup.c create mode 100644 bsd/isofs/cd9660/cd9660_mount.h create mode 100644 bsd/isofs/cd9660/cd9660_node.c create mode 100644 bsd/isofs/cd9660/cd9660_node.h create mode 100644 bsd/isofs/cd9660/cd9660_rrip.c create mode 100644 bsd/isofs/cd9660/cd9660_rrip.h create mode 100644 bsd/isofs/cd9660/cd9660_util.c create mode 100644 bsd/isofs/cd9660/cd9660_vfsops.c create mode 100644 bsd/isofs/cd9660/cd9660_vnops.c create mode 100644 bsd/isofs/cd9660/iso.h create mode 100644 bsd/isofs/cd9660/iso_rrip.h create mode 100644 bsd/kern/ast.h create mode 100644 bsd/kern/bsd_init.c create mode 100644 bsd/kern/bsd_stubs.c create mode 100644 bsd/kern/init_sysent.c create mode 100644 bsd/kern/kdebug.c create mode 100644 bsd/kern/kern_acct.c create mode 100644 bsd/kern/kern_clock.c create mode 100644 bsd/kern/kern_core.c create mode 100644 bsd/kern/kern_descrip.c create mode 100644 bsd/kern/kern_event.c create mode 100644 bsd/kern/kern_exec.c create mode 100644 bsd/kern/kern_exit.c create mode 100644 bsd/kern/kern_fork.c create mode 100644 bsd/kern/kern_ktrace.c create mode 100644 bsd/kern/kern_lock.c create mode 100644 bsd/kern/kern_malloc.c create mode 100644 bsd/kern/kern_mib.c create mode 100644 bsd/kern/kern_mman.c create mode 100644 bsd/kern/kern_newsysctl.c create mode 100644 bsd/kern/kern_pcsamples.c create mode 100644 bsd/kern/kern_physio.c create mode 100644 bsd/kern/kern_proc.c create mode 100644 bsd/kern/kern_prot.c create mode 100644 bsd/kern/kern_resource.c create mode 100644 bsd/kern/kern_shutdown.c create mode 100644 bsd/kern/kern_sig.c create mode 100644 bsd/kern/kern_subr.c create mode 100644 bsd/kern/kern_symfile.c create mode 100644 bsd/kern/kern_synch.c create mode 100644 bsd/kern/kern_sysctl.c create mode 100644 bsd/kern/kern_time.c create mode 100644 bsd/kern/kern_xxx.c create mode 100644 bsd/kern/mach_fat.c create mode 100644 bsd/kern/mach_header.c create mode 100644 bsd/kern/mach_header.h create mode 100644 bsd/kern/mach_loader.c create mode 100644 bsd/kern/mach_loader.h create mode 100644 bsd/kern/mach_process.c create mode 100644 bsd/kern/md5c.c create mode 100644 bsd/kern/parallel.c create mode 100644 bsd/kern/parallel.h create mode 100644 bsd/kern/posix_sem.c create mode 100644 bsd/kern/posix_shm.c create mode 100644 bsd/kern/preload.h create mode 100644 bsd/kern/qsort.c create mode 100644 bsd/kern/spl.c create mode 100644 bsd/kern/subr_disk.c create mode 100644 bsd/kern/subr_log.c create mode 100644 bsd/kern/subr_prf.c create mode 100644 bsd/kern/subr_prof.c create mode 100644 bsd/kern/subr_xxx.c create mode 100644 bsd/kern/sys_domain.c create mode 100644 bsd/kern/sys_generic.c create mode 100644 bsd/kern/sys_socket.c create mode 100644 bsd/kern/syscalls.c create mode 100644 bsd/kern/sysctl_init.c create mode 100644 bsd/kern/sysv_ipc.c create mode 100644 bsd/kern/sysv_msg.c create mode 100644 bsd/kern/sysv_sem.c create mode 100644 bsd/kern/sysv_shm.c create mode 100644 bsd/kern/tty.c create mode 100644 bsd/kern/tty_compat.c create mode 100644 bsd/kern/tty_conf.c create mode 100644 bsd/kern/tty_pty.c create mode 100644 bsd/kern/tty_subr.c create mode 100644 bsd/kern/tty_tb.c create mode 100644 bsd/kern/tty_tty.c create mode 100644 bsd/kern/ubc_subr.c create mode 100644 bsd/kern/uipc_domain.c create mode 100644 bsd/kern/uipc_mbuf.c create mode 100644 bsd/kern/uipc_mbuf2.c create mode 100644 bsd/kern/uipc_proto.c create mode 100644 bsd/kern/uipc_socket.c create mode 100644 bsd/kern/uipc_socket2.c create mode 100644 bsd/kern/uipc_syscalls.c create mode 100644 bsd/kern/uipc_usrreq.c create mode 100644 bsd/libkern/Makefile create mode 100644 bsd/libkern/bcd.c create mode 100644 bsd/libkern/bcmp.c create mode 100644 bsd/libkern/inet_ntoa.c create mode 100644 bsd/libkern/libkern.h create mode 100644 bsd/libkern/locc.c create mode 100644 bsd/libkern/random.c create mode 100644 bsd/libkern/rindex.c create mode 100644 bsd/libkern/scanc.c create mode 100644 bsd/libkern/skpc.c create mode 100644 bsd/libkern/strtol.c create mode 100644 bsd/machine/Makefile create mode 100644 bsd/machine/ansi.h create mode 100644 bsd/machine/byte_order.h create mode 100644 bsd/machine/cons.h create mode 100644 bsd/machine/cpu.h create mode 100644 bsd/machine/disklabel.h create mode 100644 bsd/machine/endian.h create mode 100644 bsd/machine/exec.h create mode 100644 bsd/machine/label_t.h create mode 100644 bsd/machine/param.h create mode 100644 bsd/machine/proc.h create mode 100644 bsd/machine/profile.h create mode 100644 bsd/machine/psl.h create mode 100644 bsd/machine/ptrace.h create mode 100644 bsd/machine/reboot.h create mode 100644 bsd/machine/reg.h create mode 100644 bsd/machine/setjmp.h create mode 100644 bsd/machine/signal.h create mode 100644 bsd/machine/spl.h create mode 100644 bsd/machine/table.h create mode 100644 bsd/machine/trap.h create mode 100644 bsd/machine/types.h create mode 100644 bsd/machine/unix_traps.h create mode 100644 bsd/machine/user.h create mode 100644 bsd/machine/vmparam.h create mode 100644 bsd/miscfs/Makefile create mode 100644 bsd/miscfs/deadfs/dead_vnops.c create mode 100644 bsd/miscfs/devfs/Makefile create mode 100644 bsd/miscfs/devfs/README create mode 100644 bsd/miscfs/devfs/devfs.h create mode 100644 bsd/miscfs/devfs/devfs_proto.h create mode 100644 bsd/miscfs/devfs/devfs_tree.c create mode 100644 bsd/miscfs/devfs/devfs_vfsops.c create mode 100644 bsd/miscfs/devfs/devfs_vnops.c create mode 100644 bsd/miscfs/devfs/devfsdefs.h create mode 100644 bsd/miscfs/devfs/index.html create mode 100644 bsd/miscfs/devfs/reproto.sh create mode 100644 bsd/miscfs/fdesc/Makefile create mode 100644 bsd/miscfs/fdesc/fdesc.h create mode 100644 bsd/miscfs/fdesc/fdesc_vfsops.c create mode 100644 bsd/miscfs/fdesc/fdesc_vnops.c create mode 100644 bsd/miscfs/fifofs/fifo.h create mode 100644 bsd/miscfs/fifofs/fifo_vnops.c create mode 100644 bsd/miscfs/kernfs/kernfs.h create mode 100644 bsd/miscfs/kernfs/kernfs_vfsops.c create mode 100644 bsd/miscfs/kernfs/kernfs_vnops.c create mode 100644 bsd/miscfs/nullfs/null.h create mode 100644 bsd/miscfs/nullfs/null_subr.c create mode 100644 bsd/miscfs/nullfs/null_vfsops.c create mode 100644 bsd/miscfs/nullfs/null_vnops.c create mode 100644 bsd/miscfs/portal/portal.h create mode 100644 bsd/miscfs/portal/portal_vfsops.c create mode 100644 bsd/miscfs/portal/portal_vnops.c create mode 100644 bsd/miscfs/procfs/procfs.h create mode 100644 bsd/miscfs/procfs/procfs_ctl.c create mode 100644 bsd/miscfs/procfs/procfs_fpregs.c create mode 100644 bsd/miscfs/procfs/procfs_mem.c create mode 100644 bsd/miscfs/procfs/procfs_note.c create mode 100644 bsd/miscfs/procfs/procfs_regs.c create mode 100644 bsd/miscfs/procfs/procfs_status.c create mode 100644 bsd/miscfs/procfs/procfs_subr.c create mode 100644 bsd/miscfs/procfs/procfs_vfsops.c create mode 100644 bsd/miscfs/procfs/procfs_vnops.c create mode 100644 bsd/miscfs/specfs/Makefile create mode 100644 bsd/miscfs/specfs/spec_vnops.c create mode 100644 bsd/miscfs/specfs/specdev.h create mode 100644 bsd/miscfs/synthfs/synthfs.h create mode 100644 bsd/miscfs/synthfs/synthfs_util.c create mode 100644 bsd/miscfs/synthfs/synthfs_vfsops.c create mode 100644 bsd/miscfs/synthfs/synthfs_vnops.c create mode 100644 bsd/miscfs/umapfs/umap.h create mode 100644 bsd/miscfs/umapfs/umap_subr.c create mode 100644 bsd/miscfs/umapfs/umap_vfsops.c create mode 100644 bsd/miscfs/umapfs/umap_vnops.c create mode 100644 bsd/miscfs/union/Makefile create mode 100644 bsd/miscfs/union/union.h create mode 100644 bsd/miscfs/union/union_subr.c create mode 100644 bsd/miscfs/union/union_vfsops.c create mode 100644 bsd/miscfs/union/union_vnops.c create mode 100644 bsd/miscfs/volfs/volfs.h create mode 100644 bsd/miscfs/volfs/volfs_vfsops.c create mode 100644 bsd/miscfs/volfs/volfs_vnops.c create mode 100644 bsd/net/Makefile create mode 100644 bsd/net/bpf.c create mode 100644 bsd/net/bpf.h create mode 100644 bsd/net/bpf_compat.h create mode 100644 bsd/net/bpf_filter.c create mode 100644 bsd/net/bpfdesc.h create mode 100644 bsd/net/bridge.c create mode 100644 bsd/net/bridge.h create mode 100644 bsd/net/bsd_comp.c create mode 100644 bsd/net/dlil.c create mode 100644 bsd/net/dlil.h create mode 100644 bsd/net/dlil_ethersubr.c create mode 100644 bsd/net/dlil_pvt.h create mode 100644 bsd/net/ether_at_pr_module.c create mode 100644 bsd/net/ether_if_module.c create mode 100644 bsd/net/ether_inet6_pr_module.c create mode 100644 bsd/net/ether_inet_pr_module.c create mode 100644 bsd/net/etherdefs.h create mode 100644 bsd/net/ethernet.h create mode 100644 bsd/net/hostcache.c create mode 100644 bsd/net/hostcache.h create mode 100644 bsd/net/if.c create mode 100644 bsd/net/if.h create mode 100644 bsd/net/if_arp.h create mode 100644 bsd/net/if_atm.h create mode 100644 bsd/net/if_atmsubr.c create mode 100644 bsd/net/if_blue.c create mode 100644 bsd/net/if_blue.h create mode 100644 bsd/net/if_disc.c create mode 100644 bsd/net/if_dl.h create mode 100644 bsd/net/if_dummy.c create mode 100644 bsd/net/if_ether.c create mode 100644 bsd/net/if_ethersubr.c create mode 100644 bsd/net/if_faith.c create mode 100644 bsd/net/if_fddisubr.c create mode 100644 bsd/net/if_gif.c create mode 100644 bsd/net/if_gif.h create mode 100644 bsd/net/if_llc.h create mode 100644 bsd/net/if_loop.c create mode 100644 bsd/net/if_media.c create mode 100644 bsd/net/if_media.h create mode 100644 bsd/net/if_mib.c create mode 100644 bsd/net/if_mib.h create mode 100644 bsd/net/if_ppp.h create mode 100644 bsd/net/if_pppvar.h create mode 100644 bsd/net/if_sl.c create mode 100644 bsd/net/if_slvar.h create mode 100644 bsd/net/if_sppp.h create mode 100644 bsd/net/if_spppsubr.c create mode 100644 bsd/net/if_tun.c create mode 100644 bsd/net/if_tun.h create mode 100644 bsd/net/if_tunvar.h create mode 100644 bsd/net/if_types.h create mode 100644 bsd/net/if_var.h create mode 100644 bsd/net/if_vlan.c create mode 100644 bsd/net/if_vlan_var.h create mode 100644 bsd/net/kext_net.c create mode 100644 bsd/net/kext_net.h create mode 100644 bsd/net/ndrv.c create mode 100644 bsd/net/ndrv.h create mode 100644 bsd/net/net_osdep.c create mode 100644 bsd/net/net_osdep.h create mode 100644 bsd/net/netisr.c create mode 100644 bsd/net/netisr.h create mode 100644 bsd/net/pfkeyv2.h create mode 100644 bsd/net/ppp_comp.h create mode 100644 bsd/net/ppp_deflate.c create mode 100644 bsd/net/ppp_defs.h create mode 100644 bsd/net/radix.c create mode 100644 bsd/net/radix.h create mode 100644 bsd/net/raw_cb.c create mode 100644 bsd/net/raw_cb.h create mode 100644 bsd/net/raw_usrreq.c create mode 100644 bsd/net/route.c create mode 100644 bsd/net/route.h create mode 100644 bsd/net/rtsock.c create mode 100644 bsd/net/rtsock_mip.c create mode 100644 bsd/net/slcompress.c create mode 100644 bsd/net/slcompress.h create mode 100644 bsd/net/slip.h create mode 100644 bsd/net/tokendefs.h create mode 100644 bsd/net/tokensr.h create mode 100644 bsd/net/zlib.c create mode 100644 bsd/net/zlib.h create mode 100644 bsd/netat/Makefile create mode 100644 bsd/netat/adsp.c create mode 100644 bsd/netat/adsp.h create mode 100644 bsd/netat/adsp_CLDeny.c create mode 100644 bsd/netat/adsp_CLListen.c create mode 100644 bsd/netat/adsp_Close.c create mode 100644 bsd/netat/adsp_Control.c create mode 100644 bsd/netat/adsp_Init.c create mode 100644 bsd/netat/adsp_InitGlobals.c create mode 100644 bsd/netat/adsp_NewCID.c create mode 100644 bsd/netat/adsp_Open.c create mode 100644 bsd/netat/adsp_Options.c create mode 100644 bsd/netat/adsp_Packet.c create mode 100644 bsd/netat/adsp_Read.c create mode 100644 bsd/netat/adsp_RxAttn.c create mode 100644 bsd/netat/adsp_RxData.c create mode 100644 bsd/netat/adsp_Status.c create mode 100644 bsd/netat/adsp_Timer.c create mode 100644 bsd/netat/adsp_TimerElem.c create mode 100644 bsd/netat/adsp_Write.c create mode 100644 bsd/netat/adsp_attention.c create mode 100644 bsd/netat/adsp_internal.h create mode 100644 bsd/netat/adsp_misc.c create mode 100644 bsd/netat/adsp_reset.c create mode 100644 bsd/netat/adsp_stream.c create mode 100644 bsd/netat/appletalk.h create mode 100644 bsd/netat/asp.h create mode 100644 bsd/netat/asp_proto.c create mode 100644 bsd/netat/at.c create mode 100644 bsd/netat/at_aarp.h create mode 100644 bsd/netat/at_config.h create mode 100644 bsd/netat/at_ddp_brt.h create mode 100644 bsd/netat/at_pat.h create mode 100644 bsd/netat/at_pcb.c create mode 100644 bsd/netat/at_pcb.h create mode 100644 bsd/netat/at_proto.c create mode 100644 bsd/netat/at_snmp.h create mode 100644 bsd/netat/at_var.h create mode 100644 bsd/netat/atalk.exp create mode 100644 bsd/netat/atalk.imp create mode 100644 bsd/netat/atp.h create mode 100644 bsd/netat/atp_alloc.c create mode 100644 bsd/netat/atp_misc.c create mode 100644 bsd/netat/atp_open.c create mode 100644 bsd/netat/atp_read.c create mode 100644 bsd/netat/atp_write.c create mode 100644 bsd/netat/aurp.h create mode 100644 bsd/netat/aurp_aurpd.c create mode 100644 bsd/netat/aurp_cfg.c create mode 100644 bsd/netat/aurp_gdata.c create mode 100644 bsd/netat/aurp_misc.c create mode 100644 bsd/netat/aurp_open.c create mode 100644 bsd/netat/aurp_rd.c create mode 100644 bsd/netat/aurp_ri.c create mode 100644 bsd/netat/aurp_rx.c create mode 100644 bsd/netat/aurp_tickle.c create mode 100644 bsd/netat/aurp_tx.c create mode 100644 bsd/netat/aurp_zi.c create mode 100644 bsd/netat/ddp.c create mode 100644 bsd/netat/ddp.h create mode 100644 bsd/netat/ddp.save create mode 100644 bsd/netat/ddp_aarp.c create mode 100644 bsd/netat/ddp_aep.c create mode 100644 bsd/netat/ddp_brt.c create mode 100644 bsd/netat/ddp_lap.c create mode 100644 bsd/netat/ddp_nbp.c create mode 100644 bsd/netat/ddp_proto.c create mode 100644 bsd/netat/ddp_r_rtmp.c create mode 100644 bsd/netat/ddp_r_zip.c create mode 100644 bsd/netat/ddp_rtmp.c create mode 100644 bsd/netat/ddp_rtmptable.c create mode 100644 bsd/netat/ddp_sip.c create mode 100644 bsd/netat/ddp_usrreq.c create mode 100644 bsd/netat/debug.h create mode 100644 bsd/netat/drv_dep.c create mode 100644 bsd/netat/ep.h create mode 100644 bsd/netat/lap.h create mode 100644 bsd/netat/nbp.h create mode 100644 bsd/netat/pap.h create mode 100644 bsd/netat/routing_tables.h create mode 100644 bsd/netat/rtmp.h create mode 100644 bsd/netat/sys_dep.c create mode 100644 bsd/netat/sys_glue.c create mode 100644 bsd/netat/sysglue.h create mode 100644 bsd/netat/zip.h create mode 100644 bsd/netccitt/Makefile create mode 100644 bsd/netccitt/ccitt_proto.c create mode 100644 bsd/netccitt/dll.h create mode 100644 bsd/netccitt/hd_debug.c create mode 100644 bsd/netccitt/hd_input.c create mode 100644 bsd/netccitt/hd_output.c create mode 100644 bsd/netccitt/hd_subr.c create mode 100644 bsd/netccitt/hd_timer.c create mode 100644 bsd/netccitt/hd_var.h create mode 100644 bsd/netccitt/hdlc.h create mode 100644 bsd/netccitt/if_x25subr.c create mode 100644 bsd/netccitt/llc_input.c create mode 100644 bsd/netccitt/llc_output.c create mode 100644 bsd/netccitt/llc_subr.c create mode 100644 bsd/netccitt/llc_timer.c create mode 100644 bsd/netccitt/llc_var.h create mode 100644 bsd/netccitt/pk.h create mode 100644 bsd/netccitt/pk_acct.c create mode 100644 bsd/netccitt/pk_debug.c create mode 100644 bsd/netccitt/pk_input.c create mode 100644 bsd/netccitt/pk_llcsubr.c create mode 100644 bsd/netccitt/pk_output.c create mode 100644 bsd/netccitt/pk_subr.c create mode 100644 bsd/netccitt/pk_timer.c create mode 100644 bsd/netccitt/pk_usrreq.c create mode 100644 bsd/netccitt/pk_var.h create mode 100644 bsd/netccitt/x25.h create mode 100644 bsd/netccitt/x25_sockaddr.h create mode 100644 bsd/netccitt/x25acct.h create mode 100644 bsd/netccitt/x25err.h create mode 100644 bsd/netinet/Makefile create mode 100644 bsd/netinet/bootp.h create mode 100644 bsd/netinet/fil.c create mode 100644 bsd/netinet/icmp6.h create mode 100644 bsd/netinet/icmp_var.h create mode 100644 bsd/netinet/if_atm.c create mode 100644 bsd/netinet/if_atm.h create mode 100644 bsd/netinet/if_ether.c create mode 100644 bsd/netinet/if_ether.h create mode 100644 bsd/netinet/if_fddi.h create mode 100644 bsd/netinet/if_tun.h create mode 100644 bsd/netinet/igmp.c create mode 100644 bsd/netinet/igmp.h create mode 100644 bsd/netinet/igmp_var.h create mode 100644 bsd/netinet/in.c create mode 100644 bsd/netinet/in.h create mode 100644 bsd/netinet/in_bootp.c create mode 100644 bsd/netinet/in_cksum.c create mode 100644 bsd/netinet/in_gif.c create mode 100644 bsd/netinet/in_gif.h create mode 100644 bsd/netinet/in_hostcache.c create mode 100644 bsd/netinet/in_hostcache.h create mode 100644 bsd/netinet/in_pcb.c create mode 100644 bsd/netinet/in_pcb.h create mode 100644 bsd/netinet/in_proto.c create mode 100644 bsd/netinet/in_rmx.c create mode 100644 bsd/netinet/in_systm.h create mode 100644 bsd/netinet/in_var.h create mode 100644 bsd/netinet/ip.h create mode 100644 bsd/netinet/ip6.h create mode 100644 bsd/netinet/ip_auth.c create mode 100644 bsd/netinet/ip_auth.h create mode 100644 bsd/netinet/ip_compat.h create mode 100644 bsd/netinet/ip_divert.c create mode 100644 bsd/netinet/ip_dummynet.c create mode 100644 bsd/netinet/ip_dummynet.h create mode 100644 bsd/netinet/ip_ecn.c create mode 100644 bsd/netinet/ip_ecn.h create mode 100644 bsd/netinet/ip_encap.c create mode 100644 bsd/netinet/ip_encap.h create mode 100644 bsd/netinet/ip_fil.h create mode 100644 bsd/netinet/ip_flow.c create mode 100644 bsd/netinet/ip_flow.h create mode 100644 bsd/netinet/ip_frag.c create mode 100644 bsd/netinet/ip_frag.h create mode 100644 bsd/netinet/ip_ftp_pxy.c create mode 100644 bsd/netinet/ip_fw.c create mode 100644 bsd/netinet/ip_fw.h create mode 100644 bsd/netinet/ip_icmp.c create mode 100644 bsd/netinet/ip_icmp.h create mode 100644 bsd/netinet/ip_input.c create mode 100644 bsd/netinet/ip_log.c create mode 100644 bsd/netinet/ip_mroute.c create mode 100644 bsd/netinet/ip_mroute.h create mode 100644 bsd/netinet/ip_nat.c create mode 100644 bsd/netinet/ip_nat.h create mode 100644 bsd/netinet/ip_output.c create mode 100644 bsd/netinet/ip_proxy.c create mode 100644 bsd/netinet/ip_proxy.h create mode 100644 bsd/netinet/ip_state.c create mode 100644 bsd/netinet/ip_state.h create mode 100644 bsd/netinet/ip_var.h create mode 100644 bsd/netinet/ipl.h create mode 100644 bsd/netinet/raw_ip.c create mode 100644 bsd/netinet/tcp.h create mode 100644 bsd/netinet/tcp_debug.c create mode 100644 bsd/netinet/tcp_debug.h create mode 100644 bsd/netinet/tcp_fsm.h create mode 100644 bsd/netinet/tcp_input.c create mode 100644 bsd/netinet/tcp_output.c create mode 100644 bsd/netinet/tcp_seq.h create mode 100644 bsd/netinet/tcp_subr.c create mode 100644 bsd/netinet/tcp_timer.c create mode 100644 bsd/netinet/tcp_timer.h create mode 100644 bsd/netinet/tcp_usrreq.c create mode 100644 bsd/netinet/tcp_var.h create mode 100644 bsd/netinet/tcpip.h create mode 100644 bsd/netinet/udp.h create mode 100644 bsd/netinet/udp_usrreq.c create mode 100644 bsd/netinet/udp_var.h create mode 100644 bsd/netinet6/Makefile create mode 100644 bsd/netinet6/ah.h create mode 100644 bsd/netinet6/ah_core.c create mode 100644 bsd/netinet6/ah_input.c create mode 100644 bsd/netinet6/ah_output.c create mode 100644 bsd/netinet6/dest6.c create mode 100644 bsd/netinet6/esp.h create mode 100644 bsd/netinet6/esp_core.c create mode 100644 bsd/netinet6/esp_input.c create mode 100644 bsd/netinet6/esp_output.c create mode 100644 bsd/netinet6/frag6.c create mode 100644 bsd/netinet6/icmp6.c create mode 100644 bsd/netinet6/icmp6.h create mode 100644 bsd/netinet6/in6.c create mode 100644 bsd/netinet6/in6.h create mode 100644 bsd/netinet6/in6_cksum.c create mode 100644 bsd/netinet6/in6_gif.c create mode 100644 bsd/netinet6/in6_gif.h create mode 100644 bsd/netinet6/in6_ifattach.c create mode 100644 bsd/netinet6/in6_ifattach.h create mode 100644 bsd/netinet6/in6_pcb.c create mode 100644 bsd/netinet6/in6_pcb.h create mode 100644 bsd/netinet6/in6_prefix.c create mode 100644 bsd/netinet6/in6_prefix.h create mode 100644 bsd/netinet6/in6_proto.c create mode 100644 bsd/netinet6/in6_rmx.c create mode 100644 bsd/netinet6/in6_src.c create mode 100644 bsd/netinet6/in6_var.h create mode 100644 bsd/netinet6/ip6.h create mode 100644 bsd/netinet6/ip6_forward.c create mode 100644 bsd/netinet6/ip6_fw.c create mode 100644 bsd/netinet6/ip6_fw.h create mode 100644 bsd/netinet6/ip6_input.c create mode 100644 bsd/netinet6/ip6_mroute.c create mode 100644 bsd/netinet6/ip6_mroute.h create mode 100644 bsd/netinet6/ip6_output.c create mode 100644 bsd/netinet6/ip6_var.h create mode 100644 bsd/netinet6/ip6protosw.h create mode 100644 bsd/netinet6/ipcomp.h create mode 100644 bsd/netinet6/ipcomp_core.c create mode 100644 bsd/netinet6/ipcomp_input.c create mode 100644 bsd/netinet6/ipcomp_output.c create mode 100644 bsd/netinet6/ipsec.c create mode 100644 bsd/netinet6/ipsec.h create mode 100644 bsd/netinet6/mip6.c create mode 100644 bsd/netinet6/mip6.h create mode 100644 bsd/netinet6/mip6_common.h create mode 100644 bsd/netinet6/mip6_ha.c create mode 100644 bsd/netinet6/mip6_hooks.c create mode 100644 bsd/netinet6/mip6_io.c create mode 100644 bsd/netinet6/mip6_md.c create mode 100644 bsd/netinet6/mip6_mn.c create mode 100644 bsd/netinet6/mld6.c create mode 100644 bsd/netinet6/mld6_var.h create mode 100644 bsd/netinet6/natpt_defs.h create mode 100644 bsd/netinet6/natpt_dispatch.c create mode 100644 bsd/netinet6/natpt_list.c create mode 100644 bsd/netinet6/natpt_list.h create mode 100644 bsd/netinet6/natpt_log.c create mode 100644 bsd/netinet6/natpt_log.h create mode 100644 bsd/netinet6/natpt_rule.c create mode 100644 bsd/netinet6/natpt_soctl.h create mode 100644 bsd/netinet6/natpt_trans.c create mode 100644 bsd/netinet6/natpt_tslot.c create mode 100644 bsd/netinet6/natpt_usrreq.c create mode 100644 bsd/netinet6/natpt_var.h create mode 100644 bsd/netinet6/nd6.c create mode 100644 bsd/netinet6/nd6.h create mode 100644 bsd/netinet6/nd6_nbr.c create mode 100644 bsd/netinet6/nd6_rtr.c create mode 100644 bsd/netinet6/pim6.h create mode 100644 bsd/netinet6/pim6_var.h create mode 100644 bsd/netinet6/raw_ip6.c create mode 100644 bsd/netinet6/route6.c create mode 100644 bsd/netinet6/udp6.h create mode 100644 bsd/netinet6/udp6_usrreq.c create mode 100644 bsd/netinet6/udp6_var.h create mode 100644 bsd/netiso/Makefile create mode 100644 bsd/netiso/argo_debug.h create mode 100644 bsd/netiso/clnl.h create mode 100644 bsd/netiso/clnp.h create mode 100644 bsd/netiso/clnp_debug.c create mode 100644 bsd/netiso/clnp_er.c create mode 100644 bsd/netiso/clnp_frag.c create mode 100644 bsd/netiso/clnp_input.c create mode 100644 bsd/netiso/clnp_options.c create mode 100644 bsd/netiso/clnp_output.c create mode 100644 bsd/netiso/clnp_raw.c create mode 100644 bsd/netiso/clnp_stat.h create mode 100644 bsd/netiso/clnp_subr.c create mode 100644 bsd/netiso/clnp_timer.c create mode 100644 bsd/netiso/cltp_usrreq.c create mode 100644 bsd/netiso/cltp_var.h create mode 100644 bsd/netiso/cons.h create mode 100644 bsd/netiso/cons_pcb.h create mode 100644 bsd/netiso/eonvar.h create mode 100644 bsd/netiso/esis.c create mode 100644 bsd/netiso/esis.h create mode 100644 bsd/netiso/idrp_usrreq.c create mode 100644 bsd/netiso/if_cons.c create mode 100644 bsd/netiso/if_eon.c create mode 100644 bsd/netiso/iso.c create mode 100644 bsd/netiso/iso.h create mode 100644 bsd/netiso/iso_chksum.c create mode 100644 bsd/netiso/iso_errno.h create mode 100644 bsd/netiso/iso_pcb.c create mode 100644 bsd/netiso/iso_pcb.h create mode 100644 bsd/netiso/iso_proto.c create mode 100644 bsd/netiso/iso_snpac.c create mode 100644 bsd/netiso/iso_snpac.h create mode 100644 bsd/netiso/iso_var.h create mode 100644 bsd/netiso/tp_astring.c create mode 100644 bsd/netiso/tp_clnp.h create mode 100644 bsd/netiso/tp_cons.c create mode 100644 bsd/netiso/tp_driver.c create mode 100644 bsd/netiso/tp_emit.c create mode 100644 bsd/netiso/tp_events.h create mode 100644 bsd/netiso/tp_inet.c create mode 100644 bsd/netiso/tp_input.c create mode 100644 bsd/netiso/tp_ip.h create mode 100644 bsd/netiso/tp_iso.c create mode 100644 bsd/netiso/tp_meas.c create mode 100644 bsd/netiso/tp_meas.h create mode 100644 bsd/netiso/tp_output.c create mode 100644 bsd/netiso/tp_param.h create mode 100644 bsd/netiso/tp_pcb.c create mode 100644 bsd/netiso/tp_pcb.h create mode 100644 bsd/netiso/tp_seq.h create mode 100644 bsd/netiso/tp_stat.h create mode 100644 bsd/netiso/tp_states.h create mode 100644 bsd/netiso/tp_subr.c create mode 100644 bsd/netiso/tp_subr2.c create mode 100644 bsd/netiso/tp_timer.c create mode 100644 bsd/netiso/tp_timer.h create mode 100644 bsd/netiso/tp_tpdu.h create mode 100644 bsd/netiso/tp_trace.c create mode 100644 bsd/netiso/tp_trace.h create mode 100644 bsd/netiso/tp_user.h create mode 100644 bsd/netiso/tp_usrreq.c create mode 100644 bsd/netiso/tuba_subr.c create mode 100644 bsd/netiso/tuba_table.c create mode 100644 bsd/netiso/tuba_table.h create mode 100644 bsd/netiso/tuba_usrreq.c create mode 100644 bsd/netkey/Makefile create mode 100644 bsd/netkey/key.c create mode 100644 bsd/netkey/key.h create mode 100644 bsd/netkey/key_debug.c create mode 100644 bsd/netkey/key_debug.h create mode 100644 bsd/netkey/key_var.h create mode 100644 bsd/netkey/keydb.c create mode 100644 bsd/netkey/keydb.h create mode 100644 bsd/netkey/keysock.c create mode 100644 bsd/netkey/keysock.h create mode 100644 bsd/netkey/keyv2.h create mode 100644 bsd/netns/Makefile create mode 100644 bsd/netns/idp.h create mode 100644 bsd/netns/idp_usrreq.c create mode 100644 bsd/netns/idp_var.h create mode 100644 bsd/netns/ns.c create mode 100644 bsd/netns/ns.h create mode 100644 bsd/netns/ns_cksum.c create mode 100644 bsd/netns/ns_error.c create mode 100644 bsd/netns/ns_error.h create mode 100644 bsd/netns/ns_if.h create mode 100644 bsd/netns/ns_input.c create mode 100644 bsd/netns/ns_ip.c create mode 100644 bsd/netns/ns_output.c create mode 100644 bsd/netns/ns_pcb.c create mode 100644 bsd/netns/ns_pcb.h create mode 100644 bsd/netns/ns_proto.c create mode 100644 bsd/netns/sp.h create mode 100644 bsd/netns/spidp.h create mode 100644 bsd/netns/spp_debug.c create mode 100644 bsd/netns/spp_debug.h create mode 100644 bsd/netns/spp_timer.h create mode 100644 bsd/netns/spp_usrreq.c create mode 100644 bsd/netns/spp_var.h create mode 100644 bsd/nfs/Makefile create mode 100644 bsd/nfs/krpc.h create mode 100644 bsd/nfs/krpc_subr.c create mode 100644 bsd/nfs/nfs.h create mode 100644 bsd/nfs/nfs_bio.c create mode 100644 bsd/nfs/nfs_boot.c create mode 100644 bsd/nfs/nfs_node.c create mode 100644 bsd/nfs/nfs_nqlease.c create mode 100644 bsd/nfs/nfs_serv.c create mode 100644 bsd/nfs/nfs_socket.c create mode 100644 bsd/nfs/nfs_srvcache.c create mode 100644 bsd/nfs/nfs_subs.c create mode 100644 bsd/nfs/nfs_syscalls.c create mode 100644 bsd/nfs/nfs_vfsops.c create mode 100644 bsd/nfs/nfs_vnops.c create mode 100644 bsd/nfs/nfsdiskless.h create mode 100644 bsd/nfs/nfsm_subs.h create mode 100644 bsd/nfs/nfsmount.h create mode 100644 bsd/nfs/nfsnode.h create mode 100644 bsd/nfs/nfsproto.h create mode 100644 bsd/nfs/nfsrtt.h create mode 100644 bsd/nfs/nfsrvcache.h create mode 100644 bsd/nfs/nqnfs.h create mode 100644 bsd/nfs/rpcv2.h create mode 100644 bsd/nfs/xdr_subs.h create mode 100644 bsd/ppc/Makefile create mode 100644 bsd/ppc/cpu.h create mode 100644 bsd/ppc/disklabel.h create mode 100644 bsd/ppc/endian.h create mode 100644 bsd/ppc/exec.h create mode 100644 bsd/ppc/label_t.h create mode 100644 bsd/ppc/param.h create mode 100644 bsd/ppc/profile.h create mode 100644 bsd/ppc/psl.h create mode 100644 bsd/ppc/ptrace.h create mode 100644 bsd/ppc/reboot.h create mode 100644 bsd/ppc/reg.h create mode 100644 bsd/ppc/setjmp.h create mode 100644 bsd/ppc/signal.h create mode 100644 bsd/ppc/spl.h create mode 100644 bsd/ppc/table.h create mode 100644 bsd/ppc/types.h create mode 100644 bsd/ppc/user.h create mode 100644 bsd/ppc/vmparam.h create mode 100644 bsd/sys/Makefile create mode 100644 bsd/sys/acct.h create mode 100644 bsd/sys/attr.h create mode 100644 bsd/sys/buf.h create mode 100644 bsd/sys/callout.h create mode 100644 bsd/sys/cdefs.h create mode 100644 bsd/sys/cdio.h create mode 100644 bsd/sys/chio.h create mode 100644 bsd/sys/clist.h create mode 100644 bsd/sys/conf.h create mode 100644 bsd/sys/dir.h create mode 100644 bsd/sys/dirent.h create mode 100644 bsd/sys/disklabel.h create mode 100644 bsd/sys/disktab.h create mode 100644 bsd/sys/dkbad.h create mode 100644 bsd/sys/dkstat.h create mode 100644 bsd/sys/dmap.h create mode 100644 bsd/sys/domain.h create mode 100644 bsd/sys/errno.h create mode 100644 bsd/sys/ev.h create mode 100644 bsd/sys/exec.h create mode 100644 bsd/sys/fcntl.h create mode 100644 bsd/sys/file.h create mode 100644 bsd/sys/filedesc.h create mode 100644 bsd/sys/filio.h create mode 100644 bsd/sys/fsctl.h create mode 100644 bsd/sys/gmon.h create mode 100644 bsd/sys/ioccom.h create mode 100644 bsd/sys/ioctl.h create mode 100644 bsd/sys/ioctl_compat.h create mode 100644 bsd/sys/ipc.h create mode 100644 bsd/sys/kdebug.h create mode 100644 bsd/sys/kern_event.h create mode 100644 bsd/sys/kernel.h create mode 100644 bsd/sys/ktrace.h create mode 100644 bsd/sys/linker_set.h create mode 100644 bsd/sys/loadable_fs.h create mode 100644 bsd/sys/lock.h create mode 100644 bsd/sys/lockf.h create mode 100644 bsd/sys/mach_swapon.h create mode 100644 bsd/sys/malloc.h create mode 100644 bsd/sys/mbuf.h create mode 100644 bsd/sys/md5.h create mode 100644 bsd/sys/mman.h create mode 100644 bsd/sys/mount.h create mode 100644 bsd/sys/msg.h create mode 100644 bsd/sys/msgbuf.h create mode 100644 bsd/sys/mtio.h create mode 100644 bsd/sys/namei.h create mode 100644 bsd/sys/netport.h create mode 100644 bsd/sys/param.h create mode 100644 bsd/sys/paths.h create mode 100644 bsd/sys/poll.h create mode 100644 bsd/sys/proc.h create mode 100644 bsd/sys/protosw.h create mode 100644 bsd/sys/ptrace.h create mode 100644 bsd/sys/queue.h create mode 100644 bsd/sys/reboot.h create mode 100644 bsd/sys/resource.h create mode 100644 bsd/sys/resourcevar.h create mode 100644 bsd/sys/select.h create mode 100644 bsd/sys/sem.h create mode 100644 bsd/sys/semaphore.h create mode 100644 bsd/sys/shm.h create mode 100644 bsd/sys/signal.h create mode 100644 bsd/sys/signalvar.h create mode 100644 bsd/sys/socket.h create mode 100644 bsd/sys/socketvar.h create mode 100644 bsd/sys/sockio.h create mode 100644 bsd/sys/stat.h create mode 100644 bsd/sys/subr_prf.h create mode 100644 bsd/sys/syscall.h create mode 100644 bsd/sys/sysctl.h create mode 100644 bsd/sys/syslimits.h create mode 100644 bsd/sys/syslog.h create mode 100644 bsd/sys/systm.h create mode 100644 bsd/sys/table.h create mode 100644 bsd/sys/termios.h create mode 100644 bsd/sys/time.h create mode 100644 bsd/sys/timeb.h create mode 100644 bsd/sys/times.h create mode 100644 bsd/sys/tprintf.h create mode 100644 bsd/sys/trace.h create mode 100644 bsd/sys/tty.h create mode 100644 bsd/sys/ttychars.h create mode 100644 bsd/sys/ttycom.h create mode 100644 bsd/sys/ttydefaults.h create mode 100644 bsd/sys/ttydev.h create mode 100644 bsd/sys/types.h create mode 100644 bsd/sys/ubc.h create mode 100644 bsd/sys/ucred.h create mode 100644 bsd/sys/uio.h create mode 100644 bsd/sys/un.h create mode 100644 bsd/sys/unistd.h create mode 100644 bsd/sys/unpcb.h create mode 100644 bsd/sys/user.h create mode 100644 bsd/sys/utfconv.h create mode 100644 bsd/sys/utsname.h create mode 100644 bsd/sys/ux_exception.h create mode 100644 bsd/sys/vadvise.h create mode 100644 bsd/sys/vcmd.h create mode 100644 bsd/sys/version.h create mode 100644 bsd/sys/vlimit.h create mode 100644 bsd/sys/vm.h create mode 100644 bsd/sys/vmmeter.h create mode 100644 bsd/sys/vmparam.h create mode 100644 bsd/sys/vnode.h create mode 100644 bsd/sys/vnode_if.h create mode 100644 bsd/sys/vstat.h create mode 100644 bsd/sys/wait.h create mode 100644 bsd/ufs/Makefile create mode 100644 bsd/ufs/ffs/Makefile create mode 100644 bsd/ufs/ffs/ffs_alloc.c create mode 100644 bsd/ufs/ffs/ffs_balloc.c create mode 100644 bsd/ufs/ffs/ffs_extern.h create mode 100644 bsd/ufs/ffs/ffs_inode.c create mode 100644 bsd/ufs/ffs/ffs_subr.c create mode 100644 bsd/ufs/ffs/ffs_tables.c create mode 100644 bsd/ufs/ffs/ffs_vfsops.c create mode 100644 bsd/ufs/ffs/ffs_vnops.c create mode 100644 bsd/ufs/ffs/fs.h create mode 100644 bsd/ufs/mfs/mfs_extern.h create mode 100644 bsd/ufs/mfs/mfs_vfsops.c create mode 100644 bsd/ufs/mfs/mfs_vnops.c create mode 100644 bsd/ufs/mfs/mfsiom.h create mode 100644 bsd/ufs/mfs/mfsnode.h create mode 100644 bsd/ufs/ufs/Makefile create mode 100644 bsd/ufs/ufs/dinode.h create mode 100644 bsd/ufs/ufs/dir.h create mode 100644 bsd/ufs/ufs/inode.h create mode 100644 bsd/ufs/ufs/lockf.h create mode 100644 bsd/ufs/ufs/quota.h create mode 100644 bsd/ufs/ufs/ufs_bmap.c create mode 100644 bsd/ufs/ufs/ufs_byte_order.c create mode 100644 bsd/ufs/ufs/ufs_byte_order.h create mode 100644 bsd/ufs/ufs/ufs_extern.h create mode 100644 bsd/ufs/ufs/ufs_ihash.c create mode 100644 bsd/ufs/ufs/ufs_inode.c create mode 100644 bsd/ufs/ufs/ufs_lockf.c create mode 100644 bsd/ufs/ufs/ufs_lookup.c create mode 100644 bsd/ufs/ufs/ufs_quota.c create mode 100644 bsd/ufs/ufs/ufs_readwrite.c create mode 100644 bsd/ufs/ufs/ufs_vfsops.c create mode 100644 bsd/ufs/ufs/ufs_vnops.c create mode 100644 bsd/ufs/ufs/ufsmount.h create mode 100644 bsd/uxkern/ux_exception.c create mode 100644 bsd/vfs/Makefile create mode 100644 bsd/vfs/vfs_bio.c create mode 100644 bsd/vfs/vfs_cache.c create mode 100644 bsd/vfs/vfs_cluster.c create mode 100644 bsd/vfs/vfs_conf.c create mode 100644 bsd/vfs/vfs_init.c create mode 100644 bsd/vfs/vfs_lookup.c create mode 100644 bsd/vfs/vfs_subr.c create mode 100644 bsd/vfs/vfs_support.c create mode 100644 bsd/vfs/vfs_support.h create mode 100644 bsd/vfs/vfs_syscalls.c create mode 100644 bsd/vfs/vfs_utfconv.c create mode 100644 bsd/vfs/vfs_vnops.c create mode 100644 bsd/vfs/vnode_if.c create mode 100644 bsd/vfs/vnode_if.sh create mode 100644 bsd/vfs/vnode_if.src create mode 100644 bsd/vm/Makefile create mode 100644 bsd/vm/dp_backing_file.c create mode 100644 bsd/vm/vm_pageout.h create mode 100644 bsd/vm/vm_pager.h create mode 100644 bsd/vm/vm_unix.c create mode 100644 bsd/vm/vnode_pager.c create mode 100644 bsd/vm/vnode_pager.h create mode 100644 iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.cpp create mode 100644 iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.h create mode 100644 iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXRegs.h create mode 100644 iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXTiming.h create mode 100644 iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.cpp create mode 100644 iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.h create mode 100644 iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATARegs.h create mode 100644 iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.cpp create mode 100644 iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.h create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.cpp create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.h create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.cpp create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.h create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.cpp create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.h create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.cpp create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.h create mode 100644 iokit/Drivers/hidsystem/drvAppleADBDevices/IOADBDevice.cpp create mode 100644 iokit/Drivers/hidsystem/drvApplePS2Keyboard/ApplePS2Keyboard.cpp create mode 100644 iokit/Drivers/hidsystem/drvApplePS2Keyboard/ApplePS2Keyboard.h create mode 100644 iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.cpp create mode 100644 iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.h create mode 100644 iokit/Drivers/network/AppleBPF/AppleBPF.cpp create mode 100644 iokit/Drivers/network/AppleBPF/AppleBPF.h create mode 100644 iokit/Drivers/network/AppleBPF/bpf.c create mode 100644 iokit/Drivers/network/AppleBPF/bpf_filter.c create mode 100644 iokit/Drivers/network/drvIntel82557/i82557.cpp create mode 100644 iokit/Drivers/network/drvIntel82557/i82557.h create mode 100644 iokit/Drivers/network/drvIntel82557/i82557HW.h create mode 100644 iokit/Drivers/network/drvIntel82557/i82557Inline.h create mode 100644 iokit/Drivers/network/drvIntel82557/i82557PHY.cpp create mode 100644 iokit/Drivers/network/drvIntel82557/i82557PHY.h create mode 100644 iokit/Drivers/network/drvIntel82557/i82557Private.cpp create mode 100644 iokit/Drivers/network/drvIntel82557/i82557eeprom.cpp create mode 100644 iokit/Drivers/network/drvIntel82557/i82557eeprom.h create mode 100644 iokit/Drivers/network/drvMaceEnet/MaceEnet.cpp create mode 100644 iokit/Drivers/network/drvMaceEnet/MaceEnet.h create mode 100644 iokit/Drivers/network/drvMaceEnet/MaceEnetHW.cpp create mode 100644 iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.cpp create mode 100644 iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.h create mode 100644 iokit/Drivers/network/drvMaceEnet/MaceEnetRegisters.h create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnet.cpp create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnet.h create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnetHW.cpp create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnetMII.cpp create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnetMII.h create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.cpp create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.h create mode 100644 iokit/Drivers/network/drvPPCBMac/BMacEnetRegisters.h create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnet.cpp create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnet.h create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnetHW.cpp create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnetMII.cpp create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnetMII.h create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.cpp create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.h create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNEnetRegisters.h create mode 100644 iokit/Drivers/network/drvPPCUniN/UniNPowerSaver.cpp create mode 100644 iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.cpp create mode 100644 iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.h create mode 100644 iokit/Drivers/pci/drvApplePCI/AppleI386PCI.cpp create mode 100644 iokit/Drivers/pci/drvApplePCI/AppleI386PCI.h create mode 100644 iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.cpp create mode 100644 iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.h create mode 100644 iokit/Drivers/platform/drvAppleCuda/AppleCuda.cpp create mode 100644 iokit/Drivers/platform/drvAppleCuda/AppleCuda.h create mode 100644 iokit/Drivers/platform/drvAppleCuda/AppleCudaCommands.h create mode 100644 iokit/Drivers/platform/drvAppleCuda/AppleCudaHW.h create mode 100644 iokit/Drivers/platform/drvAppleCuda/AppleVIA6522.h create mode 100644 iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.cpp create mode 100644 iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.h create mode 100644 iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.cpp create mode 100644 iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.h create mode 100644 iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.cpp create mode 100644 iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.h create mode 100644 iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.cpp create mode 100644 iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.h create mode 100644 iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp create mode 100644 iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h create mode 100644 iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp create mode 100644 iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h create mode 100644 iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h create mode 100644 iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp create mode 100644 iokit/Drivers/platform/drvAppleIntelClock/AppleIntelClock.h create mode 100644 iokit/Drivers/platform/drvAppleIntelClock/IntelClock.cpp create mode 100644 iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp create mode 100644 iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp create mode 100644 iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.cpp create mode 100644 iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.h create mode 100644 iokit/Drivers/platform/drvAppleOHare/OHare.cpp create mode 100644 iokit/Drivers/platform/drvAppleOHare/OHare.h create mode 100644 iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp create mode 100644 iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h create mode 100644 iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.cpp create mode 100644 iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.h create mode 100644 iokit/Drivers/platform/drvApplePS2Controller/ApplePS2KeyboardDevice.cpp create mode 100644 iokit/Drivers/platform/drvApplePS2Controller/ApplePS2MouseDevice.cpp create mode 100644 iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.cpp create mode 100644 iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.h create mode 100644 iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp create mode 100644 iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.cpp create mode 100644 iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.h create mode 100644 iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.cpp create mode 100644 iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.h create mode 100644 iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.cpp create mode 100644 iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.h create mode 100644 iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp create mode 100644 iokit/Drivers/platform/drvAppleRootDomain/RootDomain.h create mode 100644 iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp create mode 100644 iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.h create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxClient.cpp create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxController.h create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxExecute.cpp create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInit.cpp create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInterface.h create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxMisc.cpp create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxRegs.h create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxSRB.h create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.h create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.lis create mode 100644 iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.ss create mode 100755 iokit/Drivers/scsi/drvSymbios8xx/nasmpb create mode 100644 iokit/Examples/drvGenericInterruptController/GenericInterruptController.cpp create mode 100644 iokit/Examples/drvGenericInterruptController/GenericInterruptController.h create mode 100644 iokit/Families/IOADBBus/IOADBBus.cpp create mode 100644 iokit/Families/IOADBBus/IOADBBusPriv.h create mode 100644 iokit/Families/IOADBBus/IOADBController.cpp create mode 100644 iokit/Families/IOADBBus/IOADBControllerUserClient.cpp create mode 100644 iokit/Families/IOADBBus/IOADBControllerUserClient.h create mode 100644 iokit/Families/IOATAHDDrive/IOATAHDCommand.cpp create mode 100644 iokit/Families/IOATAHDDrive/IOATAHDDrive.cpp create mode 100644 iokit/Families/IOATAHDDrive/IOATAHDDriveNub.cpp create mode 100644 iokit/Families/IOATAHDDrive/IOATAHDPower.cpp create mode 100644 iokit/Families/IOATAPICDDrive/IOATAPICDCommand.cpp create mode 100644 iokit/Families/IOATAPICDDrive/IOATAPICDDrive.cpp create mode 100644 iokit/Families/IOATAPICDDrive/IOATAPICDDriveNub.cpp create mode 100644 iokit/Families/IOATAPIDVDDrive/IOATAPIDVDCommand.cpp create mode 100644 iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDrive.cpp create mode 100644 iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDriveNub.cpp create mode 100644 iokit/Families/IOATAPIHDDrive/IOATAPIHDCommand.cpp create mode 100644 iokit/Families/IOATAPIHDDrive/IOATAPIHDDrive.cpp create mode 100644 iokit/Families/IOATAPIHDDrive/IOATAPIHDDriveNub.cpp create mode 100644 iokit/Families/IOATAStandard/ATAQueueHelpers.cpp create mode 100644 iokit/Families/IOATAStandard/IOATAStandardCommand.cpp create mode 100644 iokit/Families/IOATAStandard/IOATAStandardController.cpp create mode 100644 iokit/Families/IOATAStandard/IOATAStandardData.cpp create mode 100644 iokit/Families/IOATAStandard/IOATAStandardDevice.cpp create mode 100644 iokit/Families/IOATAStandard/IOATAStandardDriver.cpp create mode 100644 iokit/Families/IOATAStandard/IOATAStandardDriverDma.cpp create mode 100644 iokit/Families/IOATAStandard/IOATAStandardDriverPio.cpp create mode 100644 iokit/Families/IOBlockStorage/IOBlockStorageDevice.cpp create mode 100644 iokit/Families/IOCDBlockStorage/IOCDBlockStorageDevice.cpp create mode 100644 iokit/Families/IOCDStorage/IOCDAudioControl.cpp create mode 100644 iokit/Families/IOCDStorage/IOCDAudioControlUserClient.cpp create mode 100644 iokit/Families/IOCDStorage/IOCDBlockStorageDriver.cpp create mode 100644 iokit/Families/IOCDStorage/IOCDMedia.cpp create mode 100644 iokit/Families/IOCDStorage/IOCDPartitionScheme.cpp create mode 100644 iokit/Families/IODVDBlockStorage/IODVDBlockStorageDevice.cpp create mode 100644 iokit/Families/IODVDStorage/IODVDBlockStorageDriver.cpp create mode 100644 iokit/Families/IODVDStorage/IODVDMedia.cpp create mode 100644 iokit/Families/IOGraphics/ADBTest.m create mode 100644 iokit/Families/IOGraphics/AppleDDCDisplay.cpp create mode 100644 iokit/Families/IOGraphics/AppleG3SeriesDisplay.cpp create mode 100644 iokit/Families/IOGraphics/DDCInfo.m create mode 100644 iokit/Families/IOGraphics/IOAccelerator.cpp create mode 100644 iokit/Families/IOGraphics/IOBootFramebuffer.cpp create mode 100644 iokit/Families/IOGraphics/IOBootFramebuffer.h create mode 100644 iokit/Families/IOGraphics/IOCursorBlits.h create mode 100644 iokit/Families/IOGraphics/IODisplay.cpp create mode 100644 iokit/Families/IOGraphics/IODisplayWrangler.cpp create mode 100644 iokit/Families/IOGraphics/IODisplayWrangler.h create mode 100644 iokit/Families/IOGraphics/IOFramebuffer.cpp create mode 100644 iokit/Families/IOGraphics/IOFramebufferReallyPrivate.h create mode 100644 iokit/Families/IOGraphics/IOFramebufferUserClient.cpp create mode 100644 iokit/Families/IOGraphics/IOFramebufferUserClient.h create mode 100644 iokit/Families/IOGraphics/IOGraphicsDevice.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCheckReport.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCountDescriptorItems.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonCaps.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtons.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonsOnPage.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCaps.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCollectionNodes.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetData.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextButtonInfo.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextUsageValueInfo.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetReportLength.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValue.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValueArray.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetValueCaps.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDHasUsage.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDInitReport.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDIsButtonOrValue.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDLib.h create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDMaxUsageListLength.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDNextItem.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDOpenCloseDescriptor.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDParseDescriptor.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPostProcessRIValue.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPriv.h create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessCollection.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessGlobalItem.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessLocalItem.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessMainItem.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessReportItem.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPutData.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDScaleUsageValue.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetButtons.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetScaledUsageValue.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValue.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValueArray.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageAndPageFromIndex.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageInRange.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageListDifference.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/MacTypes.h create mode 100644 iokit/Families/IOHIDSystem/IOHIDDescriptorParser/PoolAlloc.c create mode 100644 iokit/Families/IOHIDSystem/IOHIDIO.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHIDSystem.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHIDUserClient.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHIDUserClient.h create mode 100644 iokit/Families/IOHIDSystem/IOHIDevice.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHIKeyboard.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHIKeyboardMapper.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHIPointing.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHITablet.cpp create mode 100644 iokit/Families/IOHIDSystem/IOHITabletPointer.cpp create mode 100644 iokit/Families/IONDRVSupport/IONDRV.cpp create mode 100644 iokit/Families/IONDRVSupport/IONDRV.h create mode 100644 iokit/Families/IONDRVSupport/IONDRVFramebuffer.cpp create mode 100644 iokit/Families/IONDRVSupport/IONDRVLibraries.cpp create mode 100644 iokit/Families/IONDRVSupport/IOPEFInternals.c create mode 100644 iokit/Families/IONDRVSupport/IOPEFInternals.h create mode 100644 iokit/Families/IONDRVSupport/IOPEFLibraries.h create mode 100644 iokit/Families/IONDRVSupport/IOPEFLoader.c create mode 100644 iokit/Families/IONDRVSupport/IOPEFLoader.h create mode 100644 iokit/Families/IONVRAM/IONVRAMController.cpp create mode 100644 iokit/Families/IONetworking/IOEthernetController.cpp create mode 100644 iokit/Families/IONetworking/IOEthernetInterface.cpp create mode 100644 iokit/Families/IONetworking/IOKernelDebugger.cpp create mode 100644 iokit/Families/IONetworking/IOMbufMemoryCursor.cpp create mode 100644 iokit/Families/IONetworking/IOMbufQueue.h create mode 100644 iokit/Families/IONetworking/IONetworkController.cpp create mode 100644 iokit/Families/IONetworking/IONetworkData.cpp create mode 100644 iokit/Families/IONetworking/IONetworkInterface.cpp create mode 100644 iokit/Families/IONetworking/IONetworkMedium.cpp create mode 100644 iokit/Families/IONetworking/IONetworkStack.cpp create mode 100644 iokit/Families/IONetworking/IONetworkUserClient.cpp create mode 100644 iokit/Families/IONetworking/IOOutputQueue.cpp create mode 100644 iokit/Families/IONetworking/IOPacketQueue.cpp create mode 100644 iokit/Families/IOPCIBus/IOPCIBridge.cpp create mode 100644 iokit/Families/IOPCIBus/IOPCIDevice.cpp create mode 100644 iokit/Families/IOPCIBus/IOPCIDeviceI386.cpp create mode 100644 iokit/Families/IOPCIBus/IOPCIDevicePPC.cpp create mode 100644 iokit/Families/IOSCSICDDrive/IOSCSICDDrive.cpp create mode 100644 iokit/Families/IOSCSICDDrive/IOSCSICDDriveNub.cpp create mode 100644 iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDrive.cpp create mode 100644 iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDriveNub.cpp create mode 100644 iokit/Families/IOSCSIHDDrive/IOBasicSCSI.cpp create mode 100644 iokit/Families/IOSCSIHDDrive/IOSCSIHDDrive.cpp create mode 100644 iokit/Families/IOSCSIHDDrive/IOSCSIHDDriveNub.cpp create mode 100644 iokit/Families/IOSCSIParallel/IOSCSIParallelCommand.cpp create mode 100644 iokit/Families/IOSCSIParallel/IOSCSIParallelController.cpp create mode 100644 iokit/Families/IOSCSIParallel/IOSCSIParallelDevice.cpp create mode 100644 iokit/Families/IOSCSIParallel/queueHelpers.cpp create mode 100644 iokit/Families/IOStorage/IOApplePartitionScheme.cpp create mode 100644 iokit/Families/IOStorage/IOBlockStorageDriver.cpp create mode 100644 iokit/Families/IOStorage/IOFDiskPartitionScheme.cpp create mode 100644 iokit/Families/IOStorage/IOMedia.cpp create mode 100644 iokit/Families/IOStorage/IOMediaBSDClient.cpp create mode 100644 iokit/Families/IOStorage/IONeXTPartitionScheme.cpp create mode 100644 iokit/Families/IOStorage/IOPartitionScheme.cpp create mode 100644 iokit/Families/IOStorage/IOStorage.cpp create mode 100644 iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp create mode 100644 iokit/IOKit/IOBSD.h create mode 100644 iokit/IOKit/IOBufferMemoryDescriptor.h create mode 100644 iokit/IOKit/IOCPU.h create mode 100644 iokit/IOKit/IOCatalogue.h create mode 100644 iokit/IOKit/IOCommand.h create mode 100644 iokit/IOKit/IOCommandGate.h create mode 100644 iokit/IOKit/IOCommandPool.h create mode 100644 iokit/IOKit/IOCommandQueue.h create mode 100644 iokit/IOKit/IOConditionLock.h create mode 100644 iokit/IOKit/IODataQueue.h create mode 100644 iokit/IOKit/IODataQueueShared.h create mode 100644 iokit/IOKit/IODeviceMemory.h create mode 100644 iokit/IOKit/IODeviceTreeSupport.h create mode 100644 iokit/IOKit/IOEventSource.h create mode 100644 iokit/IOKit/IOFilterInterruptEventSource.h create mode 100644 iokit/IOKit/IOInterruptController.h create mode 100644 iokit/IOKit/IOInterruptEventSource.h create mode 100644 iokit/IOKit/IOInterrupts.h create mode 100644 iokit/IOKit/IOKitDebug.h create mode 100644 iokit/IOKit/IOKitKeys.h create mode 100644 iokit/IOKit/IOKitServer.h create mode 100644 iokit/IOKit/IOLib.h create mode 100644 iokit/IOKit/IOLocks.h create mode 100644 iokit/IOKit/IOMemoryCursor.h create mode 100644 iokit/IOKit/IOMemoryDescriptor.h create mode 100644 iokit/IOKit/IOMessage.h create mode 100644 iokit/IOKit/IOMultiMemoryDescriptor.h create mode 100644 iokit/IOKit/IONVRAM.h create mode 100644 iokit/IOKit/IONotifier.h create mode 100644 iokit/IOKit/IOPlatformExpert.h create mode 100644 iokit/IOKit/IORangeAllocator.h create mode 100644 iokit/IOKit/IORegistryEntry.h create mode 100644 iokit/IOKit/IOReturn.h create mode 100644 iokit/IOKit/IOService.h create mode 100644 iokit/IOKit/IOServicePM.h create mode 100644 iokit/IOKit/IOSharedLock.h create mode 100644 iokit/IOKit/IOSyncer.h create mode 100644 iokit/IOKit/IOTimeStamp.h create mode 100644 iokit/IOKit/IOTimerEventSource.h create mode 100644 iokit/IOKit/IOTypes.h create mode 100644 iokit/IOKit/IOUserClient.h create mode 100644 iokit/IOKit/IOWorkLoop.h create mode 100644 iokit/IOKit/Makefile create mode 100644 iokit/IOKit/OSMessageNotification.h create mode 100644 iokit/IOKit/adb/IOADBBus.h create mode 100644 iokit/IOKit/adb/IOADBController.h create mode 100644 iokit/IOKit/adb/IOADBDevice.h create mode 100644 iokit/IOKit/adb/IOADBLib.h create mode 100644 iokit/IOKit/adb/Makefile create mode 100644 iokit/IOKit/adb/adb.h create mode 100644 iokit/IOKit/assert.h create mode 100644 iokit/IOKit/ata/IOATACommand_Reference.h create mode 100644 iokit/IOKit/ata/IOATAController_Reference.h create mode 100644 iokit/IOKit/ata/IOATADeviceInterface.h create mode 100644 iokit/IOKit/ata/IOATADevice_Reference.h create mode 100644 iokit/IOKit/ata/IOATADriver_Reference.h create mode 100644 iokit/IOKit/ata/IOATAStandardInterface.h create mode 100644 iokit/IOKit/ata/Makefile create mode 100644 iokit/IOKit/ata/ata-device/ATACommand.h create mode 100644 iokit/IOKit/ata/ata-device/ATADevice.h create mode 100644 iokit/IOKit/ata/ata-device/ATAPublic.h create mode 100644 iokit/IOKit/ata/ata-device/IOATACommand.h create mode 100644 iokit/IOKit/ata/ata-device/IOATADevice.h create mode 100644 iokit/IOKit/ata/ata-device/Makefile create mode 100644 iokit/IOKit/ata/ata-standard/ATAStandardController.h create mode 100644 iokit/IOKit/ata/ata-standard/ATAStandardPrivate.h create mode 100644 iokit/IOKit/ata/ata-standard/ATAStandardTarget.h create mode 100644 iokit/IOKit/ata/ata-standard/IOATAStandardCommand.h create mode 100644 iokit/IOKit/ata/ata-standard/IOATAStandardController.h create mode 100644 iokit/IOKit/ata/ata-standard/IOATAStandardDevice.h create mode 100644 iokit/IOKit/ata/ata-standard/IOATAStandardDriver.h create mode 100644 iokit/IOKit/ata/ata-standard/Makefile create mode 100644 iokit/IOKit/cdb/CDBCommand.h create mode 100644 iokit/IOKit/cdb/CDBPublic.h create mode 100644 iokit/IOKit/cdb/IOCDBCommand.h create mode 100644 iokit/IOKit/cdb/IOCDBDevice.h create mode 100644 iokit/IOKit/cdb/IOCDBInterface.h create mode 100644 iokit/IOKit/cdb/Makefile create mode 100644 iokit/IOKit/graphics/IOAccelClientConnect.h create mode 100644 iokit/IOKit/graphics/IOAccelSurfaceConnect.h create mode 100644 iokit/IOKit/graphics/IOAccelTypes.h create mode 100644 iokit/IOKit/graphics/IOAccelerator.h create mode 100644 iokit/IOKit/graphics/IODisplay.h create mode 100644 iokit/IOKit/graphics/IOFramebuffer.h create mode 100644 iokit/IOKit/graphics/IOFramebufferPrivate.h create mode 100644 iokit/IOKit/graphics/IOFramebufferShared.h create mode 100644 iokit/IOKit/graphics/IOGraphicsDevice.h create mode 100644 iokit/IOKit/graphics/IOGraphicsEngine.h create mode 100644 iokit/IOKit/graphics/IOGraphicsTypes.h create mode 100644 iokit/IOKit/graphics/Makefile create mode 100644 iokit/IOKit/hidsystem/IOHIDDescriptorParser.h create mode 100644 iokit/IOKit/hidsystem/IOHIDParameter.h create mode 100644 iokit/IOKit/hidsystem/IOHIDShared.h create mode 100644 iokit/IOKit/hidsystem/IOHIDSystem.h create mode 100644 iokit/IOKit/hidsystem/IOHIDTypes.h create mode 100644 iokit/IOKit/hidsystem/IOHIDUsageTables.h create mode 100644 iokit/IOKit/hidsystem/IOHIDevice.h create mode 100644 iokit/IOKit/hidsystem/IOHIKeyboard.h create mode 100644 iokit/IOKit/hidsystem/IOHIKeyboardMapper.h create mode 100644 iokit/IOKit/hidsystem/IOHIPointing.h create mode 100644 iokit/IOKit/hidsystem/IOHITablet.h create mode 100644 iokit/IOKit/hidsystem/IOHITabletPointer.h create mode 100644 iokit/IOKit/hidsystem/IOLLEvent.h create mode 100644 iokit/IOKit/hidsystem/Makefile create mode 100644 iokit/IOKit/hidsystem/ev_keymap.h create mode 100644 iokit/IOKit/hidsystem/ev_private.h create mode 100644 iokit/IOKit/i386/IOSharedLockImp.h create mode 100644 iokit/IOKit/i386/Makefile create mode 100644 iokit/IOKit/machine/IOSharedLockImp.h create mode 100644 iokit/IOKit/machine/Makefile create mode 100644 iokit/IOKit/ndrvsupport/IOMacOSTypes.h create mode 100644 iokit/IOKit/ndrvsupport/IOMacOSVideo.h create mode 100644 iokit/IOKit/ndrvsupport/IONDRVFramebuffer.h create mode 100644 iokit/IOKit/ndrvsupport/IONDRVSupport.h create mode 100644 iokit/IOKit/ndrvsupport/Makefile create mode 100644 iokit/IOKit/network/IOBasicOutputQueue.h create mode 100644 iokit/IOKit/network/IOEthernetController.h create mode 100644 iokit/IOKit/network/IOEthernetInterface.h create mode 100644 iokit/IOKit/network/IOEthernetStats.h create mode 100644 iokit/IOKit/network/IOGatedOutputQueue.h create mode 100644 iokit/IOKit/network/IOKernelDebugger.h create mode 100644 iokit/IOKit/network/IOMbufMemoryCursor.h create mode 100644 iokit/IOKit/network/IONetworkController.h create mode 100644 iokit/IOKit/network/IONetworkData.h create mode 100644 iokit/IOKit/network/IONetworkInterface.h create mode 100644 iokit/IOKit/network/IONetworkLib.h create mode 100644 iokit/IOKit/network/IONetworkMedium.h create mode 100644 iokit/IOKit/network/IONetworkStack.h create mode 100644 iokit/IOKit/network/IONetworkStats.h create mode 100644 iokit/IOKit/network/IONetworkUserClient.h create mode 100644 iokit/IOKit/network/IOOutputQueue.h create mode 100644 iokit/IOKit/network/IOPacketQueue.h create mode 100644 iokit/IOKit/network/Makefile create mode 100644 iokit/IOKit/nvram/IONVRAMController.h create mode 100644 iokit/IOKit/nvram/Makefile create mode 100644 iokit/IOKit/pci/IOAGPDevice.h create mode 100644 iokit/IOKit/pci/IOPCIBridge.h create mode 100644 iokit/IOKit/pci/IOPCIDevice.h create mode 100644 iokit/IOKit/pci/Makefile create mode 100644 iokit/IOKit/platform/AppleMacIO.h create mode 100644 iokit/IOKit/platform/AppleMacIODevice.h create mode 100644 iokit/IOKit/platform/AppleNMI.h create mode 100644 iokit/IOKit/platform/ApplePlatformExpert.h create mode 100644 iokit/IOKit/platform/Makefile create mode 100644 iokit/IOKit/power/IOPwrController.h create mode 100644 iokit/IOKit/power/Makefile create mode 100644 iokit/IOKit/ppc/IODBDMA.h create mode 100644 iokit/IOKit/ppc/IOSharedLockImp.h create mode 100644 iokit/IOKit/ppc/Makefile create mode 100644 iokit/IOKit/ps2/ApplePS2Device.h create mode 100644 iokit/IOKit/ps2/ApplePS2KeyboardDevice.h create mode 100644 iokit/IOKit/ps2/ApplePS2MouseDevice.h create mode 100644 iokit/IOKit/ps2/Makefile create mode 100644 iokit/IOKit/pwr_mgt/IOPM.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMLibDefs.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMPowerSource.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMchangeNoteList.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMinformee.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMinformeeList.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMlog.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMpmChild.h create mode 100644 iokit/IOKit/pwr_mgt/IOPMpowerState.h create mode 100644 iokit/IOKit/pwr_mgt/IOPowerConnection.h create mode 100644 iokit/IOKit/pwr_mgt/Makefile create mode 100644 iokit/IOKit/pwr_mgt/RootDomain.h create mode 100644 iokit/IOKit/rtc/IORTCController.h create mode 100644 iokit/IOKit/rtc/Makefile create mode 100644 iokit/IOKit/scsi/IOSCSICommand_Reference.h create mode 100644 iokit/IOKit/scsi/IOSCSIController_Reference.h create mode 100644 iokit/IOKit/scsi/IOSCSIDeviceInterface.h create mode 100644 iokit/IOKit/scsi/IOSCSIDevice_Reference.h create mode 100644 iokit/IOKit/scsi/IOSCSIParallelInterface.h create mode 100644 iokit/IOKit/scsi/Makefile create mode 100644 iokit/IOKit/scsi/scsi-device/IOSCSICommand.h create mode 100644 iokit/IOKit/scsi/scsi-device/IOSCSIDevice.h create mode 100644 iokit/IOKit/scsi/scsi-device/Makefile create mode 100644 iokit/IOKit/scsi/scsi-device/SCSICommand.h create mode 100644 iokit/IOKit/scsi/scsi-device/SCSIDevice.h create mode 100644 iokit/IOKit/scsi/scsi-device/SCSIPublic.h create mode 100644 iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelCommand.h create mode 100644 iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelController.h create mode 100644 iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelDevice.h create mode 100644 iokit/IOKit/scsi/scsi-parallel/Makefile create mode 100644 iokit/IOKit/scsi/scsi-parallel/SCSIParallelController.h create mode 100644 iokit/IOKit/scsi/scsi-parallel/SCSIParallelTarget.h create mode 100644 iokit/IOKit/storage/IOApplePartitionScheme.h create mode 100644 iokit/IOKit/storage/IOBlockStorageDevice.h create mode 100644 iokit/IOKit/storage/IOBlockStorageDriver.h create mode 100644 iokit/IOKit/storage/IOCDAudioControl.h create mode 100644 iokit/IOKit/storage/IOCDAudioControlUserClient.h create mode 100644 iokit/IOKit/storage/IOCDBlockStorageDevice.h create mode 100644 iokit/IOKit/storage/IOCDBlockStorageDriver.h create mode 100644 iokit/IOKit/storage/IOCDMedia.h create mode 100644 iokit/IOKit/storage/IOCDPartitionScheme.h create mode 100644 iokit/IOKit/storage/IOCDTypes.h create mode 100644 iokit/IOKit/storage/IODVDBlockStorageDevice.h create mode 100644 iokit/IOKit/storage/IODVDBlockStorageDriver.h create mode 100644 iokit/IOKit/storage/IODVDMedia.h create mode 100644 iokit/IOKit/storage/IODVDTypes.h create mode 100644 iokit/IOKit/storage/IOFDiskPartitionScheme.h create mode 100644 iokit/IOKit/storage/IOMedia.h create mode 100644 iokit/IOKit/storage/IOMediaBSDClient.h create mode 100644 iokit/IOKit/storage/IONeXTPartitionScheme.h create mode 100644 iokit/IOKit/storage/IOPartitionScheme.h create mode 100644 iokit/IOKit/storage/IOStorage.h create mode 100644 iokit/IOKit/storage/Makefile create mode 100644 iokit/IOKit/storage/ata/IOATAHDDrive.h create mode 100644 iokit/IOKit/storage/ata/IOATAHDDriveNub.h create mode 100644 iokit/IOKit/storage/ata/IOATAPICDDrive.h create mode 100644 iokit/IOKit/storage/ata/IOATAPICDDriveNub.h create mode 100644 iokit/IOKit/storage/ata/IOATAPIDVDDrive.h create mode 100644 iokit/IOKit/storage/ata/IOATAPIDVDDriveNub.h create mode 100644 iokit/IOKit/storage/ata/IOATAPIHDDrive.h create mode 100644 iokit/IOKit/storage/ata/IOATAPIHDDriveNub.h create mode 100644 iokit/IOKit/storage/ata/Makefile create mode 100644 iokit/IOKit/storage/scsi/IOBasicSCSI.h create mode 100644 iokit/IOKit/storage/scsi/IOSCSICDDrive.h create mode 100644 iokit/IOKit/storage/scsi/IOSCSICDDriveNub.h create mode 100644 iokit/IOKit/storage/scsi/IOSCSIDVDDrive.h create mode 100644 iokit/IOKit/storage/scsi/IOSCSIDVDDriveNub.h create mode 100644 iokit/IOKit/storage/scsi/IOSCSIHDDrive.h create mode 100644 iokit/IOKit/storage/scsi/IOSCSIHDDriveNub.h create mode 100644 iokit/IOKit/storage/scsi/Makefile create mode 100644 iokit/IOKit/system.h create mode 100644 iokit/IOKit/system_management/IOWatchDogTimer.h create mode 100644 iokit/IOKit/system_management/Makefile create mode 100644 iokit/Kernel/IOBufferMemoryDescriptor.cpp create mode 100644 iokit/Kernel/IOCPU.cpp create mode 100644 iokit/Kernel/IOCatalogue.cpp create mode 100644 iokit/Kernel/IOCommand.cpp create mode 100644 iokit/Kernel/IOCommandGate.cpp create mode 100644 iokit/Kernel/IOCommandPool.cpp create mode 100644 iokit/Kernel/IOCommandQueue.cpp create mode 100644 iokit/Kernel/IOConditionLock.cpp create mode 100644 iokit/Kernel/IODataQueue.cpp create mode 100644 iokit/Kernel/IODeviceMemory.cpp create mode 100644 iokit/Kernel/IODeviceTreeSupport.cpp create mode 100644 iokit/Kernel/IOEventSource.cpp create mode 100644 iokit/Kernel/IOFilterInterruptEventSource.cpp create mode 100644 iokit/Kernel/IOInterruptController.cpp create mode 100644 iokit/Kernel/IOInterruptEventSource.cpp create mode 100644 iokit/Kernel/IOKitDebug.cpp create mode 100644 iokit/Kernel/IOLib.c create mode 100644 iokit/Kernel/IOLocks.cpp create mode 100644 iokit/Kernel/IOMemoryCursor.cpp create mode 100644 iokit/Kernel/IOMemoryDescriptor.cpp create mode 100644 iokit/Kernel/IOMultiMemoryDescriptor.cpp create mode 100644 iokit/Kernel/IONVRAM.cpp create mode 100644 iokit/Kernel/IOPMPowerSource.cpp create mode 100644 iokit/Kernel/IOPMPowerSourceList.cpp create mode 100644 iokit/Kernel/IOPMchangeNoteList.cpp create mode 100644 iokit/Kernel/IOPMinformee.cpp create mode 100644 iokit/Kernel/IOPMinformeeList.cpp create mode 100644 iokit/Kernel/IOPMpmChild.cpp create mode 100644 iokit/Kernel/IOPlatformExpert.cpp create mode 100644 iokit/Kernel/IOPowerConnection.cpp create mode 100644 iokit/Kernel/IORangeAllocator.cpp create mode 100644 iokit/Kernel/IORegistryEntry.cpp create mode 100644 iokit/Kernel/IOService.cpp create mode 100644 iokit/Kernel/IOServicePM.cpp create mode 100644 iokit/Kernel/IOServicePrivate.h create mode 100644 iokit/Kernel/IOStartIOKit.cpp create mode 100644 iokit/Kernel/IOStringFuncs.c create mode 100644 iokit/Kernel/IOSyncer.cpp create mode 100644 iokit/Kernel/IOTimerEventSource.cpp create mode 100644 iokit/Kernel/IOUserClient.cpp create mode 100644 iokit/Kernel/IOWorkLoop.cpp create mode 100644 iokit/Kernel/PMmisc.cpp create mode 100644 iokit/Kernel/i386/IOAsmSupport.s create mode 100644 iokit/Kernel/i386/IOSharedLock.s create mode 100644 iokit/Kernel/ppc/IOAsmSupport.s create mode 100644 iokit/Kernel/ppc/IODBDMA.cpp create mode 100644 iokit/Kernel/ppc/IOSharedLock.s create mode 100644 iokit/Kernel/printPlist create mode 100644 iokit/KernelConfigTables.cpp create mode 100644 iokit/Makefile create mode 100644 iokit/Tests/TestCollections.cpp create mode 100644 iokit/Tests/TestContainers.cpp create mode 100644 iokit/Tests/TestDevice.cpp create mode 100644 iokit/Tests/Tests.cpp create mode 100644 iokit/Tests/Tests.h create mode 100644 iokit/User/Makefile create mode 100644 iokit/User/Makefile.user create mode 100644 iokit/bsddev/IOBSDConsole.cpp create mode 100644 iokit/bsddev/IOBSDConsole.h create mode 100644 iokit/bsddev/IOKitBSDInit.cpp create mode 100644 iokit/bsddev/IOKitBSDInit.h create mode 100644 iokit/conf/MASTER create mode 100644 iokit/conf/MASTER.i386 create mode 100644 iokit/conf/MASTER.ppc create mode 100644 iokit/conf/Makefile create mode 100644 iokit/conf/Makefile.i386 create mode 100644 iokit/conf/Makefile.ppc create mode 100644 iokit/conf/Makefile.template create mode 100644 iokit/conf/copyright create mode 100644 iokit/conf/files create mode 100644 iokit/conf/files.i386 create mode 100644 iokit/conf/files.ppc create mode 100644 iokit/conf/tools/Makefile create mode 100644 iokit/conf/tools/doconf/Makefile create mode 100755 iokit/conf/tools/doconf/doconf.csh create mode 100644 iokit/conf/tools/newvers/Makefile create mode 100644 iokit/conf/tools/newvers/newvers.csh create mode 100644 iokit/conf/version.major create mode 100644 iokit/conf/version.minor create mode 100644 iokit/conf/version.variant create mode 100644 iokit/include/DeviceTree.h create mode 100644 iokit/include/Makefile create mode 100644 iokit/include/architecture/i386/kernBootStruct.h create mode 100644 iokit/include/architecture/i386/pio.h create mode 100644 iokit/include/assert.h create mode 100644 iokit/include/bsddev/EventShmemLock.h create mode 100644 iokit/include/bsddev/Makefile create mode 100644 iokit/include/bsddev/ev_keymap.h create mode 100644 iokit/include/bsddev/ev_types.h create mode 100644 iokit/include/bsddev/event.h create mode 100644 iokit/include/bsddev/evio.h create mode 100644 iokit/include/bsddev/evsio.h create mode 100644 iokit/include/bsddev/i386/EventShmemLock.h create mode 100644 iokit/include/bsddev/i386/Makefile create mode 100644 iokit/include/bsddev/i386/event.h create mode 100644 iokit/include/bsddev/i386/evio.h create mode 100644 iokit/include/bsddev/i386/evsio.h create mode 100644 iokit/include/bsddev/machine/EventShmemLock.h create mode 100644 iokit/include/bsddev/machine/Makefile create mode 100644 iokit/include/bsddev/machine/event.h create mode 100644 iokit/include/bsddev/machine/evio.h create mode 100644 iokit/include/bsddev/machine/evsio.h create mode 100644 iokit/include/bsddev/ppc/EventShmemLock.h create mode 100644 iokit/include/bsddev/ppc/Makefile create mode 100644 iokit/include/bsddev/ppc/event.h create mode 100644 iokit/include/bsddev/ppc/evio.h create mode 100644 iokit/include/bsddev/ppc/evsio.h create mode 100644 iokit/include/drivers/Makefile create mode 100644 iokit/include/drivers/event_status_driver.h create mode 100644 iokit/include/mach/mach.h create mode 100644 iokit/mach-o/mach_header.h create mode 100644 libkern/Makefile create mode 100644 libkern/c++/OSArray.cpp create mode 100644 libkern/c++/OSBoolean.cpp create mode 100644 libkern/c++/OSCPPDebug.cpp create mode 100644 libkern/c++/OSCollection.cpp create mode 100644 libkern/c++/OSCollectionIterator.cpp create mode 100644 libkern/c++/OSData.cpp create mode 100644 libkern/c++/OSDictionary.cpp create mode 100644 libkern/c++/OSIterator.cpp create mode 100644 libkern/c++/OSMetaClass.cpp create mode 100644 libkern/c++/OSNumber.cpp create mode 100644 libkern/c++/OSObject.cpp create mode 100644 libkern/c++/OSOrderedSet.cpp create mode 100644 libkern/c++/OSRuntime.cpp create mode 100644 libkern/c++/OSSerialize.cpp create mode 100644 libkern/c++/OSSet.cpp create mode 100644 libkern/c++/OSString.cpp create mode 100644 libkern/c++/OSSymbol.cpp create mode 100644 libkern/c++/OSUnserialize.cpp create mode 100644 libkern/c++/OSUnserialize.y create mode 100644 libkern/c++/OSUnserializeXML.cpp create mode 100644 libkern/c++/OSUnserializeXML.y create mode 100644 libkern/c++/Tests/TestSerialization/CustomInfo.xml create mode 100644 libkern/c++/Tests/TestSerialization/Makefile create mode 100644 libkern/c++/Tests/TestSerialization/Makefile.postamble create mode 100644 libkern/c++/Tests/TestSerialization/Makefile.preamble create mode 100644 libkern/c++/Tests/TestSerialization/PB.project create mode 100644 libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist create mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml create mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile create mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble create mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble create mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project create mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.cpp create mode 100644 libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h create mode 100755 libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml create mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile create mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble create mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble create mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project create mode 100644 libkern/c++/Tests/TestSerialization/test2.kmodproj/test2_main.cpp create mode 100644 libkern/conf/MASTER create mode 100644 libkern/conf/MASTER.i386 create mode 100644 libkern/conf/MASTER.ppc create mode 100644 libkern/conf/Makefile create mode 100644 libkern/conf/Makefile.i386 create mode 100644 libkern/conf/Makefile.ppc create mode 100644 libkern/conf/Makefile.template create mode 100644 libkern/conf/copyright create mode 100644 libkern/conf/files create mode 100644 libkern/conf/files.i386 create mode 100644 libkern/conf/files.ppc create mode 100644 libkern/conf/tools/Makefile create mode 100644 libkern/conf/tools/doconf/Makefile create mode 100755 libkern/conf/tools/doconf/doconf.csh create mode 100644 libkern/conf/tools/newvers/Makefile create mode 100644 libkern/conf/tools/newvers/newvers.csh create mode 100644 libkern/conf/version.major create mode 100644 libkern/conf/version.minor create mode 100644 libkern/conf/version.variant create mode 100644 libkern/gen/OSAtomicOperations.c create mode 100644 libkern/i386/OSAtomic.c create mode 100644 libkern/i386/OSAtomic.s create mode 100644 libkern/kmod/Makefile create mode 100644 libkern/kmod/Makefile.kmod create mode 100644 libkern/kmod/README create mode 100644 libkern/kmod/c_start.c create mode 100644 libkern/kmod/c_stop.c create mode 100644 libkern/kmod/cplus_start.c create mode 100644 libkern/kmod/cplus_stop.c create mode 100644 libkern/libkern/Makefile create mode 100644 libkern/libkern/OSAtomic.h create mode 100644 libkern/libkern/OSBase.h create mode 100644 libkern/libkern/OSByteOrder.h create mode 100644 libkern/libkern/OSReturn.h create mode 100644 libkern/libkern/OSTypes.h create mode 100644 libkern/libkern/c++/Makefile create mode 100644 libkern/libkern/c++/OSArray.h create mode 100644 libkern/libkern/c++/OSBoolean.h create mode 100644 libkern/libkern/c++/OSCPPDebug.h create mode 100644 libkern/libkern/c++/OSCollection.h create mode 100644 libkern/libkern/c++/OSCollectionIterator.h create mode 100644 libkern/libkern/c++/OSContainers.h create mode 100644 libkern/libkern/c++/OSData.h create mode 100644 libkern/libkern/c++/OSDictionary.h create mode 100644 libkern/libkern/c++/OSIterator.h create mode 100644 libkern/libkern/c++/OSLib.h create mode 100644 libkern/libkern/c++/OSMetaClass.h create mode 100644 libkern/libkern/c++/OSNumber.h create mode 100644 libkern/libkern/c++/OSObject.h create mode 100644 libkern/libkern/c++/OSOrderedSet.h create mode 100644 libkern/libkern/c++/OSSerialize.h create mode 100644 libkern/libkern/c++/OSSet.h create mode 100644 libkern/libkern/c++/OSString.h create mode 100644 libkern/libkern/c++/OSSymbol.h create mode 100644 libkern/libkern/c++/OSUnserialize.h create mode 100644 libkern/libkern/i386/Makefile create mode 100644 libkern/libkern/i386/OSByteOrder.h create mode 100644 libkern/libkern/machine/Makefile create mode 100644 libkern/libkern/machine/OSByteOrder.h create mode 100644 libkern/libkern/ppc/Makefile create mode 100644 libkern/libkern/ppc/OSByteOrder.h create mode 100644 libkern/mach-o/loader.h create mode 100644 libkern/mach-o/mach_header.h create mode 100644 libkern/ppc/OSAtomic.s create mode 100644 libsa/Makefile create mode 100644 libsa/bootstrap.cpp create mode 100644 libsa/bsearch.c create mode 100644 libsa/catalogue.cpp create mode 100644 libsa/conf/MASTER create mode 100644 libsa/conf/MASTER.i386 create mode 100644 libsa/conf/MASTER.ppc create mode 100644 libsa/conf/Makefile create mode 100644 libsa/conf/Makefile.i386 create mode 100644 libsa/conf/Makefile.ppc create mode 100644 libsa/conf/Makefile.template create mode 100644 libsa/conf/copyright create mode 100644 libsa/conf/files create mode 100644 libsa/conf/files.i386 create mode 100644 libsa/conf/files.ppc create mode 100644 libsa/conf/tools/Makefile create mode 100644 libsa/conf/tools/doconf/Makefile create mode 100755 libsa/conf/tools/doconf/doconf.csh create mode 100644 libsa/conf/tools/newvers/Makefile create mode 100644 libsa/conf/tools/newvers/newvers.csh create mode 100644 libsa/conf/version.major create mode 100644 libsa/conf/version.minor create mode 100644 libsa/conf/version.variant create mode 100644 libsa/i386/setjmp.s create mode 100644 libsa/kmod.cpp create mode 100644 libsa/libsa/Makefile create mode 100644 libsa/libsa/catalogue.h create mode 100644 libsa/libsa/i386/Makefile create mode 100644 libsa/libsa/i386/setjmp.h create mode 100644 libsa/libsa/kmod.h create mode 100644 libsa/libsa/mach/Makefile create mode 100644 libsa/libsa/mach/mach.h create mode 100644 libsa/libsa/malloc.h create mode 100644 libsa/libsa/mkext.h create mode 100644 libsa/libsa/ppc/Makefile create mode 100644 libsa/libsa/ppc/setjmp.h create mode 100644 libsa/libsa/setjmp.h create mode 100644 libsa/libsa/stdlib.h create mode 100644 libsa/libsa/unistd.h create mode 100644 libsa/libsa/vers_rsrc.h create mode 100644 libsa/mach.c create mode 100644 libsa/mach_loader.h create mode 100644 libsa/malloc.c create mode 100644 libsa/malloc_debug_stuff create mode 100644 libsa/malloc_unused create mode 100644 libsa/misc.c create mode 100644 libsa/mkext.c create mode 100644 libsa/ppc/setjmp.s create mode 100644 libsa/printPlist create mode 100644 libsa/sort.c create mode 100644 libsa/vers_rsrc.c create mode 100644 makedefs/MakeInc.cmd create mode 100644 makedefs/MakeInc.def create mode 100644 makedefs/MakeInc.dir create mode 100644 makedefs/MakeInc.rule create mode 100644 osfmk/.gdbinit create mode 100644 osfmk/Makefile create mode 100644 osfmk/UserNotification/KUNCUserNotifications.c create mode 100644 osfmk/UserNotification/KUNCUserNotifications.h create mode 100644 osfmk/UserNotification/Makefile create mode 100644 osfmk/UserNotification/UNDReply.defs create mode 100644 osfmk/UserNotification/UNDRequest.defs create mode 100644 osfmk/UserNotification/UNDTypes.defs create mode 100644 osfmk/UserNotification/UNDTypes.h create mode 100644 osfmk/conf/MASTER create mode 100644 osfmk/conf/MASTER.i386 create mode 100644 osfmk/conf/MASTER.ppc create mode 100644 osfmk/conf/Makefile create mode 100644 osfmk/conf/Makefile.i386 create mode 100644 osfmk/conf/Makefile.ppc create mode 100644 osfmk/conf/Makefile.template create mode 100644 osfmk/conf/copyright create mode 100644 osfmk/conf/copyright.cmu create mode 100644 osfmk/conf/copyright.osf create mode 100644 osfmk/conf/files create mode 100644 osfmk/conf/files.i386 create mode 100644 osfmk/conf/files.ppc create mode 100644 osfmk/conf/kernelversion.major create mode 100644 osfmk/conf/kernelversion.minor create mode 100644 osfmk/conf/kernelversion.variant create mode 100644 osfmk/conf/tools/Makefile create mode 100644 osfmk/conf/tools/doconf/Makefile create mode 100755 osfmk/conf/tools/doconf/doconf.csh create mode 100644 osfmk/conf/tools/kernel_newvers/Makefile create mode 100644 osfmk/conf/tools/kernel_newvers/kernel_newvers.csh create mode 100644 osfmk/conf/tools/newvers/Makefile create mode 100644 osfmk/conf/tools/newvers/newvers.csh create mode 100644 osfmk/conf/version.major create mode 100644 osfmk/conf/version.minor create mode 100644 osfmk/conf/version.variant create mode 100644 osfmk/ddb/Makefile create mode 100644 osfmk/ddb/db_access.c create mode 100644 osfmk/ddb/db_access.h create mode 100644 osfmk/ddb/db_aout.c create mode 100644 osfmk/ddb/db_aout.h create mode 100644 osfmk/ddb/db_break.c create mode 100644 osfmk/ddb/db_break.h create mode 100644 osfmk/ddb/db_coff.h create mode 100644 osfmk/ddb/db_command.c create mode 100644 osfmk/ddb/db_command.h create mode 100644 osfmk/ddb/db_cond.c create mode 100644 osfmk/ddb/db_cond.h create mode 100644 osfmk/ddb/db_examine.c create mode 100644 osfmk/ddb/db_examine.h create mode 100644 osfmk/ddb/db_expr.c create mode 100644 osfmk/ddb/db_expr.h create mode 100644 osfmk/ddb/db_ext_symtab.c create mode 100644 osfmk/ddb/db_input.c create mode 100644 osfmk/ddb/db_input.h create mode 100644 osfmk/ddb/db_lex.c create mode 100644 osfmk/ddb/db_lex.h create mode 100644 osfmk/ddb/db_macro.c create mode 100644 osfmk/ddb/db_macro.h create mode 100644 osfmk/ddb/db_output.c create mode 100644 osfmk/ddb/db_output.h create mode 100644 osfmk/ddb/db_print.c create mode 100644 osfmk/ddb/db_print.h create mode 100644 osfmk/ddb/db_run.c create mode 100644 osfmk/ddb/db_run.h create mode 100644 osfmk/ddb/db_sym.c create mode 100644 osfmk/ddb/db_sym.h create mode 100644 osfmk/ddb/db_task_thread.c create mode 100644 osfmk/ddb/db_task_thread.h create mode 100644 osfmk/ddb/db_trap.c create mode 100644 osfmk/ddb/db_trap.h create mode 100644 osfmk/ddb/db_variables.c create mode 100644 osfmk/ddb/db_variables.h create mode 100644 osfmk/ddb/db_watch.c create mode 100644 osfmk/ddb/db_watch.h create mode 100644 osfmk/ddb/db_write_cmd.c create mode 100644 osfmk/ddb/db_write_cmd.h create mode 100644 osfmk/ddb/makedis.c create mode 100644 osfmk/ddb/nlist.h create mode 100644 osfmk/ddb/orig/db_print.c create mode 100644 osfmk/ddb/stab.h create mode 100644 osfmk/ddb/tr.c create mode 100644 osfmk/ddb/tr.h create mode 100644 osfmk/default_pager/Makefile create mode 100644 osfmk/default_pager/Makefile.template create mode 100644 osfmk/default_pager/default_pager.c create mode 100644 osfmk/default_pager/default_pager_alerts.defs create mode 100644 osfmk/default_pager/default_pager_internal.h create mode 100644 osfmk/default_pager/default_pager_object.defs create mode 100644 osfmk/default_pager/default_pager_types.defs create mode 100644 osfmk/default_pager/default_pager_types.h create mode 100644 osfmk/default_pager/diag.h create mode 100644 osfmk/default_pager/dp_backing_store.c create mode 100644 osfmk/default_pager/dp_memory_object.c create mode 100644 osfmk/device/Makefile create mode 100644 osfmk/device/device.defs create mode 100644 osfmk/device/device_init.c create mode 100644 osfmk/device/device_port.h create mode 100644 osfmk/device/device_types.defs create mode 100644 osfmk/device/device_types.h create mode 100644 osfmk/device/iokit_rpc.c create mode 100644 osfmk/device/subrs.c create mode 100644 osfmk/i386/AT386/asm_startup.h create mode 100644 osfmk/i386/AT386/autoconf.c create mode 100644 osfmk/i386/AT386/bbclock.c create mode 100644 osfmk/i386/AT386/bbclock_entries.h create mode 100644 osfmk/i386/AT386/conf.c create mode 100644 osfmk/i386/AT386/config.h create mode 100644 osfmk/i386/AT386/cram.h create mode 100644 osfmk/i386/AT386/himem.c create mode 100644 osfmk/i386/AT386/himem.h create mode 100644 osfmk/i386/AT386/iso_scan_font.h create mode 100644 osfmk/i386/AT386/kernBootStruct.h create mode 100644 osfmk/i386/AT386/machdep.mk create mode 100644 osfmk/i386/AT386/misc_protos.h create mode 100644 osfmk/i386/AT386/model_dep.c create mode 100644 osfmk/i386/AT386/mp/boot.h create mode 100644 osfmk/i386/AT386/mp/mp.c create mode 100644 osfmk/i386/AT386/mp/mp.h create mode 100644 osfmk/i386/AT386/mp/mp_events.h create mode 100644 osfmk/i386/AT386/mp/mp_v1_1.c create mode 100644 osfmk/i386/AT386/mp/mp_v1_1.h create mode 100644 osfmk/i386/AT386/mp/slave_boot.s create mode 100644 osfmk/i386/AT386/physmem_entries.h create mode 100644 osfmk/i386/AT386/rtc.h create mode 100644 osfmk/i386/AT386/video_console.c create mode 100644 osfmk/i386/AT386/video_console.h create mode 100644 osfmk/i386/Makefile create mode 100644 osfmk/i386/_setjmp.s create mode 100644 osfmk/i386/apic.h create mode 100644 osfmk/i386/arch_types.h create mode 100644 osfmk/i386/asm.h create mode 100644 osfmk/i386/ast.h create mode 100644 osfmk/i386/ast_check.c create mode 100644 osfmk/i386/ast_types.h create mode 100644 osfmk/i386/bcopy.s create mode 100644 osfmk/i386/bsd_i386.c create mode 100644 osfmk/i386/bzero.s create mode 100644 osfmk/i386/cpu.c create mode 100644 osfmk/i386/cpu_data.h create mode 100644 osfmk/i386/cpu_number.h create mode 100644 osfmk/i386/cpuid.c create mode 100644 osfmk/i386/cpuid.h create mode 100644 osfmk/i386/cswitch.s create mode 100644 osfmk/i386/db_disasm.c create mode 100644 osfmk/i386/db_gcc_aout.c create mode 100644 osfmk/i386/db_interface.c create mode 100644 osfmk/i386/db_machdep.h create mode 100644 osfmk/i386/db_trace.c create mode 100644 osfmk/i386/eflags.h create mode 100644 osfmk/i386/endian.h create mode 100644 osfmk/i386/exec.h create mode 100644 osfmk/i386/flipc_page.h create mode 100644 osfmk/i386/fpu.c create mode 100644 osfmk/i386/fpu.h create mode 100644 osfmk/i386/gcc.s create mode 100644 osfmk/i386/gdb_defs.h create mode 100644 osfmk/i386/gdt.c create mode 100644 osfmk/i386/genassym.c create mode 100644 osfmk/i386/hardclock.c create mode 100644 osfmk/i386/hardclock_entries.h create mode 100644 osfmk/i386/hi_res_clock.h create mode 100644 osfmk/i386/hi_res_clock_map.c create mode 100644 osfmk/i386/hw_lock_types.h create mode 100644 osfmk/i386/i386_lock.s create mode 100644 osfmk/i386/idt.s create mode 100644 osfmk/i386/intel_read_fault.h create mode 100644 osfmk/i386/io_emulate.c create mode 100644 osfmk/i386/io_emulate.h create mode 100644 osfmk/i386/io_map.c create mode 100644 osfmk/i386/io_map_entries.h create mode 100644 osfmk/i386/io_port.h create mode 100644 osfmk/i386/iopb.c create mode 100644 osfmk/i386/iopb.h create mode 100644 osfmk/i386/iopb_entries.h create mode 100644 osfmk/i386/ipl.h create mode 100644 osfmk/i386/ktss.c create mode 100644 osfmk/i386/ldt.c create mode 100644 osfmk/i386/lock.h create mode 100644 osfmk/i386/locore.s create mode 100644 osfmk/i386/loose_ends.c create mode 100644 osfmk/i386/mach_param.h create mode 100644 osfmk/i386/machdep_call.c create mode 100644 osfmk/i386/machdep_call.h create mode 100644 osfmk/i386/machine_routines.c create mode 100644 osfmk/i386/machine_routines.h create mode 100644 osfmk/i386/machine_routines_asm.s create mode 100644 osfmk/i386/machine_rpc.h create mode 100644 osfmk/i386/machlimits.h create mode 100644 osfmk/i386/machparam.h create mode 100644 osfmk/i386/misc_protos.h create mode 100644 osfmk/i386/mp_desc.c create mode 100644 osfmk/i386/mp_desc.h create mode 100644 osfmk/i386/net_filter.c create mode 100644 osfmk/i386/ntoh.h create mode 100644 osfmk/i386/ntoh.s create mode 100644 osfmk/i386/pcb.c create mode 100644 osfmk/i386/phys.c create mode 100644 osfmk/i386/pic.h create mode 100644 osfmk/i386/pio.h create mode 100644 osfmk/i386/pit.h create mode 100644 osfmk/i386/pmap.c create mode 100644 osfmk/i386/pmap.h create mode 100644 osfmk/i386/proc_reg.h create mode 100644 osfmk/i386/read_fault.c create mode 100644 osfmk/i386/rtclock.c create mode 100644 osfmk/i386/rtclock_entries.h create mode 100644 osfmk/i386/sched_param.h create mode 100644 osfmk/i386/seg.h create mode 100644 osfmk/i386/setjmp.h create mode 100644 osfmk/i386/setjmp.s create mode 100644 osfmk/i386/stab.h create mode 100644 osfmk/i386/start.s create mode 100644 osfmk/i386/task.h create mode 100644 osfmk/i386/thread.h create mode 100644 osfmk/i386/thread_act.h create mode 100644 osfmk/i386/timer.h create mode 100644 osfmk/i386/trap.c create mode 100644 osfmk/i386/trap.h create mode 100644 osfmk/i386/tss.h create mode 100644 osfmk/i386/user_ldt.c create mode 100644 osfmk/i386/user_ldt.h create mode 100644 osfmk/i386/vm_tuning.h create mode 100644 osfmk/i386/xpr.h create mode 100644 osfmk/ipc/Makefile create mode 100644 osfmk/ipc/ipc_entry.c create mode 100644 osfmk/ipc/ipc_entry.h create mode 100644 osfmk/ipc/ipc_hash.c create mode 100644 osfmk/ipc/ipc_hash.h create mode 100644 osfmk/ipc/ipc_init.c create mode 100644 osfmk/ipc/ipc_init.h create mode 100644 osfmk/ipc/ipc_kmsg.c create mode 100644 osfmk/ipc/ipc_kmsg.h create mode 100644 osfmk/ipc/ipc_machdep.h create mode 100644 osfmk/ipc/ipc_mqueue.c create mode 100644 osfmk/ipc/ipc_mqueue.h create mode 100644 osfmk/ipc/ipc_notify.c create mode 100644 osfmk/ipc/ipc_notify.h create mode 100644 osfmk/ipc/ipc_object.c create mode 100644 osfmk/ipc/ipc_object.h create mode 100644 osfmk/ipc/ipc_port.c create mode 100644 osfmk/ipc/ipc_port.h create mode 100644 osfmk/ipc/ipc_print.h create mode 100644 osfmk/ipc/ipc_pset.c create mode 100644 osfmk/ipc/ipc_pset.h create mode 100644 osfmk/ipc/ipc_right.c create mode 100644 osfmk/ipc/ipc_right.h create mode 100644 osfmk/ipc/ipc_space.c create mode 100644 osfmk/ipc/ipc_space.h create mode 100644 osfmk/ipc/ipc_splay.c create mode 100644 osfmk/ipc/ipc_splay.h create mode 100644 osfmk/ipc/ipc_table.c create mode 100644 osfmk/ipc/ipc_table.h create mode 100644 osfmk/ipc/ipc_types.h create mode 100644 osfmk/ipc/mach_debug.c create mode 100644 osfmk/ipc/mach_msg.c create mode 100644 osfmk/ipc/mach_port.c create mode 100644 osfmk/ipc/mig_log.c create mode 100644 osfmk/ipc/port.h create mode 100644 osfmk/kdp/kdp.c create mode 100644 osfmk/kdp/kdp.h create mode 100644 osfmk/kdp/kdp_en_debugger.h create mode 100644 osfmk/kdp/kdp_internal.h create mode 100644 osfmk/kdp/kdp_private.h create mode 100644 osfmk/kdp/kdp_protocol.h create mode 100644 osfmk/kdp/kdp_udp.c create mode 100644 osfmk/kdp/kdp_udp.h create mode 100644 osfmk/kdp/ml/i386/kdp_machdep.c create mode 100644 osfmk/kdp/ml/i386/kdp_vm.c create mode 100644 osfmk/kdp/ml/ppc/kdp_asm.s create mode 100644 osfmk/kdp/ml/ppc/kdp_machdep.c create mode 100644 osfmk/kdp/ml/ppc/kdp_misc.s create mode 100644 osfmk/kdp/ml/ppc/kdp_vm.c create mode 100644 osfmk/kdp/pe/POWERMAC/kdp_mace.c create mode 100644 osfmk/kdp/pe/POWERMAC/kdp_mace.h create mode 100644 osfmk/kern/Makefile create mode 100644 osfmk/kern/assert.h create mode 100644 osfmk/kern/ast.c create mode 100644 osfmk/kern/ast.h create mode 100644 osfmk/kern/bits.c create mode 100644 osfmk/kern/bsd_kern.c create mode 100644 osfmk/kern/call_entry.h create mode 100644 osfmk/kern/clock.c create mode 100644 osfmk/kern/clock.h create mode 100644 osfmk/kern/counters.c create mode 100644 osfmk/kern/counters.h create mode 100644 osfmk/kern/cpu_data.c create mode 100644 osfmk/kern/cpu_data.h create mode 100644 osfmk/kern/cpu_number.h create mode 100644 osfmk/kern/debug.c create mode 100644 osfmk/kern/debug.h create mode 100644 osfmk/kern/etap.c create mode 100644 osfmk/kern/etap_macros.h create mode 100644 osfmk/kern/etap_map.c create mode 100644 osfmk/kern/etap_map.h create mode 100644 osfmk/kern/etap_options.h create mode 100644 osfmk/kern/etap_pool.c create mode 100644 osfmk/kern/etap_pool.h create mode 100644 osfmk/kern/exception.c create mode 100644 osfmk/kern/exception.h create mode 100644 osfmk/kern/host.c create mode 100644 osfmk/kern/host.h create mode 100644 osfmk/kern/host_statistics.h create mode 100644 osfmk/kern/ipc_clock.c create mode 100644 osfmk/kern/ipc_host.c create mode 100644 osfmk/kern/ipc_host.h create mode 100644 osfmk/kern/ipc_kobject.c create mode 100644 osfmk/kern/ipc_kobject.h create mode 100644 osfmk/kern/ipc_mig.c create mode 100644 osfmk/kern/ipc_mig.h create mode 100644 osfmk/kern/ipc_subsystem.c create mode 100644 osfmk/kern/ipc_subsystem.h create mode 100644 osfmk/kern/ipc_sync.c create mode 100644 osfmk/kern/ipc_sync.h create mode 100644 osfmk/kern/ipc_tt.c create mode 100644 osfmk/kern/ipc_tt.h create mode 100644 osfmk/kern/kalloc.c create mode 100644 osfmk/kern/kalloc.h create mode 100644 osfmk/kern/kern_print.h create mode 100644 osfmk/kern/kern_types.h create mode 100644 osfmk/kern/kmod.c create mode 100644 osfmk/kern/ledger.c create mode 100644 osfmk/kern/ledger.h create mode 100644 osfmk/kern/lock.c create mode 100644 osfmk/kern/lock.h create mode 100644 osfmk/kern/lock_mon.c create mode 100644 osfmk/kern/mach_clock.c create mode 100644 osfmk/kern/mach_factor.c create mode 100644 osfmk/kern/mach_param.h create mode 100644 osfmk/kern/machine.c create mode 100644 osfmk/kern/machine.h create mode 100644 osfmk/kern/macro_help.h create mode 100644 osfmk/kern/misc_protos.h create mode 100644 osfmk/kern/mk_sp.c create mode 100644 osfmk/kern/mk_sp.h create mode 100644 osfmk/kern/mk_timer.c create mode 100644 osfmk/kern/mk_timer.h create mode 100644 osfmk/kern/norma_protos.h create mode 100644 osfmk/kern/printf.c create mode 100644 osfmk/kern/priority.c create mode 100644 osfmk/kern/processor.c create mode 100644 osfmk/kern/processor.h create mode 100644 osfmk/kern/profile.c create mode 100644 osfmk/kern/profile.h create mode 100644 osfmk/kern/queue.c create mode 100644 osfmk/kern/queue.h create mode 100644 osfmk/kern/sched.h create mode 100644 osfmk/kern/sched_prim.c create mode 100644 osfmk/kern/sched_prim.h create mode 100644 osfmk/kern/sf.c create mode 100644 osfmk/kern/sf.h create mode 100644 osfmk/kern/simple_lock.h create mode 100644 osfmk/kern/simple_lock_types.h create mode 100644 osfmk/kern/spl.c create mode 100644 osfmk/kern/spl.h create mode 100644 osfmk/kern/sscanf.c create mode 100644 osfmk/kern/startup.c create mode 100644 osfmk/kern/startup.h create mode 100644 osfmk/kern/subsystem.c create mode 100644 osfmk/kern/subsystem.h create mode 100644 osfmk/kern/sync_lock.c create mode 100644 osfmk/kern/sync_lock.h create mode 100644 osfmk/kern/sync_sema.c create mode 100644 osfmk/kern/sync_sema.h create mode 100644 osfmk/kern/syscall_emulation.c create mode 100644 osfmk/kern/syscall_emulation.h create mode 100644 osfmk/kern/syscall_subr.c create mode 100644 osfmk/kern/syscall_subr.h create mode 100644 osfmk/kern/syscall_sw.c create mode 100644 osfmk/kern/syscall_sw.h create mode 100644 osfmk/kern/task.c create mode 100644 osfmk/kern/task.h create mode 100644 osfmk/kern/task_policy.c create mode 100644 osfmk/kern/task_swap.c create mode 100644 osfmk/kern/task_swap.h create mode 100644 osfmk/kern/template.mk create mode 100644 osfmk/kern/thread.c create mode 100644 osfmk/kern/thread.h create mode 100644 osfmk/kern/thread_act.c create mode 100644 osfmk/kern/thread_act.h create mode 100644 osfmk/kern/thread_call.c create mode 100644 osfmk/kern/thread_call.h create mode 100644 osfmk/kern/thread_policy.c create mode 100644 osfmk/kern/thread_pool.c create mode 100644 osfmk/kern/thread_pool.h create mode 100644 osfmk/kern/thread_swap.c create mode 100644 osfmk/kern/thread_swap.h create mode 100644 osfmk/kern/time_out.h create mode 100644 osfmk/kern/timer.c create mode 100644 osfmk/kern/timer.h create mode 100644 osfmk/kern/timer_call.c create mode 100644 osfmk/kern/timer_call.h create mode 100644 osfmk/kern/wait_queue.c create mode 100644 osfmk/kern/wait_queue.h create mode 100644 osfmk/kern/xpr.c create mode 100644 osfmk/kern/xpr.h create mode 100644 osfmk/kern/zalloc.c create mode 100644 osfmk/kern/zalloc.h create mode 100644 osfmk/libsa/Makefile create mode 100644 osfmk/libsa/ctype.h create mode 100644 osfmk/libsa/errno.h create mode 100644 osfmk/libsa/float.h create mode 100644 osfmk/libsa/i386/float.h create mode 100644 osfmk/libsa/i386/math.h create mode 100644 osfmk/libsa/i386/stdarg.h create mode 100644 osfmk/libsa/i386/types.h create mode 100644 osfmk/libsa/i386/va_list.h create mode 100644 osfmk/libsa/ieeefloat.h create mode 100644 osfmk/libsa/machine/stdarg.h create mode 100644 osfmk/libsa/machine/stdarg_apple.h create mode 100644 osfmk/libsa/machine/types.h create mode 100644 osfmk/libsa/machine/va_list.h create mode 100644 osfmk/libsa/math.h create mode 100644 osfmk/libsa/ppc/float.h create mode 100644 osfmk/libsa/ppc/math.h create mode 100644 osfmk/libsa/ppc/stdarg.h create mode 100644 osfmk/libsa/ppc/stdarg_apple.h create mode 100644 osfmk/libsa/ppc/types.h create mode 100644 osfmk/libsa/ppc/va_list.h create mode 100644 osfmk/libsa/stdarg.h create mode 100644 osfmk/libsa/stdio.h create mode 100644 osfmk/libsa/stdlib.h create mode 100644 osfmk/libsa/string.h create mode 100644 osfmk/libsa/sys/timers.h create mode 100644 osfmk/libsa/types.h create mode 100644 osfmk/libsa/va_list.h create mode 100644 osfmk/mach-o/loader.h create mode 100644 osfmk/mach-o/mach_header.c create mode 100644 osfmk/mach-o/mach_header.h create mode 100644 osfmk/mach/AT386/machdep.mk create mode 100644 osfmk/mach/Makefile create mode 100644 osfmk/mach/Makefile.template create mode 100644 osfmk/mach/alert.h create mode 100644 osfmk/mach/boolean.h create mode 100644 osfmk/mach/boot_info.h create mode 100644 osfmk/mach/bootstrap.defs create mode 100644 osfmk/mach/clock.defs create mode 100644 osfmk/mach/clock_priv.defs create mode 100644 osfmk/mach/clock_reply.defs create mode 100644 osfmk/mach/clock_types.defs create mode 100644 osfmk/mach/clock_types.h create mode 100644 osfmk/mach/error.h create mode 100644 osfmk/mach/etap.h create mode 100644 osfmk/mach/etap_events.h create mode 100644 osfmk/mach/events_info.h create mode 100644 osfmk/mach/exc.defs create mode 100644 osfmk/mach/exception.h create mode 100644 osfmk/mach/exception_types.h create mode 100644 osfmk/mach/flipc_cb.h create mode 100644 osfmk/mach/flipc_debug.h create mode 100644 osfmk/mach/flipc_device.h create mode 100644 osfmk/mach/flipc_locks.h create mode 100644 osfmk/mach/flipc_types.h create mode 100644 osfmk/mach/host_info.h create mode 100644 osfmk/mach/host_priv.defs create mode 100644 osfmk/mach/host_reboot.h create mode 100644 osfmk/mach/host_security.defs create mode 100644 osfmk/mach/i386/Makefile create mode 100644 osfmk/mach/i386/boolean.h create mode 100644 osfmk/mach/i386/exception.h create mode 100644 osfmk/mach/i386/flipc_dep.h create mode 100644 osfmk/mach/i386/fp_reg.h create mode 100644 osfmk/mach/i386/kern_return.h create mode 100644 osfmk/mach/i386/mach_i386.defs create mode 100644 osfmk/mach/i386/mach_i386_types.h create mode 100644 osfmk/mach/i386/machine_types.defs create mode 100644 osfmk/mach/i386/ndr_def.h create mode 100644 osfmk/mach/i386/processor_info.h create mode 100644 osfmk/mach/i386/rpc.h create mode 100644 osfmk/mach/i386/syscall_sw.h create mode 100644 osfmk/mach/i386/thread_state.h create mode 100644 osfmk/mach/i386/thread_status.h create mode 100644 osfmk/mach/i386/vm_param.h create mode 100644 osfmk/mach/i386/vm_types.h create mode 100644 osfmk/mach/kern_return.h create mode 100644 osfmk/mach/kmod.h create mode 100644 osfmk/mach/ledger.defs create mode 100644 osfmk/mach/lock_set.defs create mode 100644 osfmk/mach/mach.h create mode 100644 osfmk/mach/mach_host.defs create mode 100644 osfmk/mach/mach_interface.h create mode 100644 osfmk/mach/mach_ioctl.h create mode 100644 osfmk/mach/mach_norma.defs create mode 100644 osfmk/mach/mach_param.h create mode 100644 osfmk/mach/mach_port.defs create mode 100644 osfmk/mach/mach_syscalls.h create mode 100644 osfmk/mach/mach_time.h create mode 100644 osfmk/mach/mach_traps.h create mode 100644 osfmk/mach/mach_types.defs create mode 100644 osfmk/mach/mach_types.h create mode 100644 osfmk/mach/machine.h create mode 100644 osfmk/mach/machine/Makefile create mode 100644 osfmk/mach/machine/asm.h create mode 100644 osfmk/mach/machine/boolean.h create mode 100644 osfmk/mach/machine/exception.h create mode 100644 osfmk/mach/machine/kern_return.h create mode 100644 osfmk/mach/machine/machine_types.defs create mode 100644 osfmk/mach/machine/ndr_def.h create mode 100644 osfmk/mach/machine/processor_info.h create mode 100644 osfmk/mach/machine/rpc.h create mode 100644 osfmk/mach/machine/syscall_sw.h create mode 100644 osfmk/mach/machine/thread_state.h create mode 100644 osfmk/mach/machine/thread_status.h create mode 100644 osfmk/mach/machine/vm_param.h create mode 100644 osfmk/mach/machine/vm_types.h create mode 100644 osfmk/mach/memory_object.defs create mode 100644 osfmk/mach/memory_object.h create mode 100644 osfmk/mach/memory_object_control.defs create mode 100644 osfmk/mach/memory_object_default.defs create mode 100644 osfmk/mach/memory_object_name.defs create mode 100644 osfmk/mach/memory_object_types.h create mode 100644 osfmk/mach/message.h create mode 100644 osfmk/mach/mig.h create mode 100644 osfmk/mach/mig_errors.h create mode 100644 osfmk/mach/mig_log.h create mode 100644 osfmk/mach/mk_timer.h create mode 100644 osfmk/mach/mk_traps.h create mode 100644 osfmk/mach/msg_type.h create mode 100644 osfmk/mach/ndr.h create mode 100644 osfmk/mach/norma_special_ports.h create mode 100644 osfmk/mach/norma_task.defs create mode 100644 osfmk/mach/notify.defs create mode 100644 osfmk/mach/notify.h create mode 100644 osfmk/mach/policy.h create mode 100644 osfmk/mach/port.h create mode 100644 osfmk/mach/ppc/Makefile create mode 100644 osfmk/mach/ppc/boolean.h create mode 100644 osfmk/mach/ppc/exception.h create mode 100644 osfmk/mach/ppc/kern_return.h create mode 100644 osfmk/mach/ppc/machine_types.defs create mode 100644 osfmk/mach/ppc/ndr_def.h create mode 100644 osfmk/mach/ppc/processor_info.h create mode 100644 osfmk/mach/ppc/rpc.h create mode 100644 osfmk/mach/ppc/syscall_sw.h create mode 100644 osfmk/mach/ppc/thread_state.h create mode 100644 osfmk/mach/ppc/thread_status.h create mode 100644 osfmk/mach/ppc/vm_param.h create mode 100644 osfmk/mach/ppc/vm_types.h create mode 100644 osfmk/mach/processor.defs create mode 100644 osfmk/mach/processor_info.h create mode 100644 osfmk/mach/processor_set.defs create mode 100644 osfmk/mach/prof.defs create mode 100644 osfmk/mach/prof_types.h create mode 100644 osfmk/mach/rpc.h create mode 100644 osfmk/mach/semaphore.defs create mode 100644 osfmk/mach/semaphore.h create mode 100644 osfmk/mach/shared_memory_server.h create mode 100644 osfmk/mach/std_types.defs create mode 100644 osfmk/mach/std_types.h create mode 100644 osfmk/mach/sync.defs create mode 100644 osfmk/mach/sync_policy.h create mode 100644 osfmk/mach/syscall_sw.h create mode 100644 osfmk/mach/task.defs create mode 100644 osfmk/mach/task_info.h create mode 100644 osfmk/mach/task_ledger.h create mode 100644 osfmk/mach/task_policy.h create mode 100644 osfmk/mach/task_special_ports.h create mode 100644 osfmk/mach/thread_act.defs create mode 100644 osfmk/mach/thread_info.h create mode 100644 osfmk/mach/thread_policy.h create mode 100644 osfmk/mach/thread_special_ports.h create mode 100644 osfmk/mach/thread_status.h create mode 100644 osfmk/mach/thread_switch.h create mode 100644 osfmk/mach/time_value.h create mode 100644 osfmk/mach/upl.defs create mode 100644 osfmk/mach/vm_attributes.h create mode 100644 osfmk/mach/vm_behavior.h create mode 100644 osfmk/mach/vm_inherit.h create mode 100644 osfmk/mach/vm_map.defs create mode 100644 osfmk/mach/vm_param.h create mode 100644 osfmk/mach/vm_prot.h create mode 100644 osfmk/mach/vm_region.h create mode 100644 osfmk/mach/vm_statistics.h create mode 100644 osfmk/mach/vm_sync.h create mode 100644 osfmk/mach/vm_types.h create mode 100644 osfmk/mach_debug/Makefile create mode 100644 osfmk/mach_debug/hash_info.h create mode 100644 osfmk/mach_debug/ipc_info.h create mode 100644 osfmk/mach_debug/mach_debug.h create mode 100644 osfmk/mach_debug/mach_debug_types.defs create mode 100644 osfmk/mach_debug/mach_debug_types.h create mode 100644 osfmk/mach_debug/page_info.h create mode 100644 osfmk/mach_debug/template.mk create mode 100644 osfmk/mach_debug/vm_info.h create mode 100644 osfmk/mach_debug/zone_info.h create mode 100644 osfmk/machine/Makefile create mode 100644 osfmk/machine/asm.h create mode 100644 osfmk/machine/ast.h create mode 100644 osfmk/machine/ast_types.h create mode 100644 osfmk/machine/cpu_data.h create mode 100644 osfmk/machine/cpu_number.h create mode 100644 osfmk/machine/db_machdep.h create mode 100644 osfmk/machine/disk.h create mode 100644 osfmk/machine/endian.h create mode 100644 osfmk/machine/gdb_defs.h create mode 100644 osfmk/machine/hw_lock_types.h create mode 100644 osfmk/machine/io_map_entries.h create mode 100644 osfmk/machine/iobus.h create mode 100644 osfmk/machine/kgdb_defs.h create mode 100644 osfmk/machine/kgdb_setjmp.h create mode 100644 osfmk/machine/lock.h create mode 100644 osfmk/machine/mach_param.h create mode 100644 osfmk/machine/machine_routines.h create mode 100644 osfmk/machine/machine_rpc.h create mode 100644 osfmk/machine/machlimits.h create mode 100644 osfmk/machine/machparam.h create mode 100644 osfmk/machine/pmap.h create mode 100644 osfmk/machine/sched_param.h create mode 100644 osfmk/machine/setjmp.h create mode 100644 osfmk/machine/spl.h create mode 100644 osfmk/machine/task.h create mode 100644 osfmk/machine/thread.h create mode 100644 osfmk/machine/thread_act.h create mode 100644 osfmk/machine/timer.h create mode 100644 osfmk/machine/trap.h create mode 100644 osfmk/machine/vm_tuning.h create mode 100644 osfmk/machine/xpr.h create mode 100644 osfmk/ppc/AltiAssist.s create mode 100644 osfmk/ppc/Diagnostics.c create mode 100644 osfmk/ppc/Diagnostics.h create mode 100644 osfmk/ppc/Emulate.s create mode 100644 osfmk/ppc/Firmware.h create mode 100644 osfmk/ppc/Firmware.s create mode 100644 osfmk/ppc/FirmwareC.c create mode 100644 osfmk/ppc/FirmwareCalls.h create mode 100644 osfmk/ppc/MPinterfaces.s create mode 100644 osfmk/ppc/Makefile create mode 100644 osfmk/ppc/POWERMAC/dbdma.c create mode 100644 osfmk/ppc/POWERMAC/dbdma.h create mode 100644 osfmk/ppc/POWERMAC/mp/MPPlugIn.h create mode 100644 osfmk/ppc/POWERMAC/mp/MP_2p.s create mode 100644 osfmk/ppc/POWERMAC/mp/mp.c create mode 100644 osfmk/ppc/POWERMAC/mp/mp.h create mode 100644 osfmk/ppc/POWERMAC/scc_8530.h create mode 100644 osfmk/ppc/POWERMAC/serial_io.c create mode 100644 osfmk/ppc/POWERMAC/serial_io.h create mode 100644 osfmk/ppc/POWERMAC/video_console.c create mode 100644 osfmk/ppc/POWERMAC/video_console.h create mode 100644 osfmk/ppc/POWERMAC/video_console_entries.h create mode 100644 osfmk/ppc/POWERMAC/video_scroll.s create mode 100644 osfmk/ppc/PPCcalls.c create mode 100644 osfmk/ppc/PPCcalls.h create mode 100644 osfmk/ppc/Performance.h create mode 100644 osfmk/ppc/Performance.s create mode 100644 osfmk/ppc/PseudoKernel.c create mode 100644 osfmk/ppc/PseudoKernel.h create mode 100644 osfmk/ppc/_setjmp.s create mode 100644 osfmk/ppc/aligned_data.s create mode 100644 osfmk/ppc/alignment.c create mode 100644 osfmk/ppc/asm.h create mode 100644 osfmk/ppc/ast.h create mode 100644 osfmk/ppc/ast_types.h create mode 100644 osfmk/ppc/atomic_switch.h create mode 100644 osfmk/ppc/atomic_switch.s create mode 100644 osfmk/ppc/bat_init.c create mode 100644 osfmk/ppc/bcopy.s create mode 100644 osfmk/ppc/bits.s create mode 100644 osfmk/ppc/boot.h create mode 100644 osfmk/ppc/bsd_asm.s create mode 100644 osfmk/ppc/bsd_ppc.c create mode 100644 osfmk/ppc/bzero.s create mode 100644 osfmk/ppc/cache.s create mode 100644 osfmk/ppc/clock.h create mode 100644 osfmk/ppc/conf.c create mode 100644 osfmk/ppc/console_feed.c create mode 100644 osfmk/ppc/console_feed_entries.h create mode 100644 osfmk/ppc/cpu.c create mode 100644 osfmk/ppc/cpu_data.h create mode 100644 osfmk/ppc/cpu_number.h create mode 100644 osfmk/ppc/cswtch.s create mode 100644 osfmk/ppc/db_asm.s create mode 100644 osfmk/ppc/db_disasm.c create mode 100644 osfmk/ppc/db_interface.c create mode 100644 osfmk/ppc/db_low_trace.c create mode 100644 osfmk/ppc/db_low_trace.h create mode 100644 osfmk/ppc/db_machdep.h create mode 100644 osfmk/ppc/db_trace.c create mode 100644 osfmk/ppc/endian.h create mode 100644 osfmk/ppc/exception.h create mode 100644 osfmk/ppc/fpu_protos.h create mode 100644 osfmk/ppc/genassym.c create mode 100644 osfmk/ppc/hardclock_entries.h create mode 100644 osfmk/ppc/hexfont.h create mode 100644 osfmk/ppc/hw_counters.h create mode 100644 osfmk/ppc/hw_exception.s create mode 100644 osfmk/ppc/hw_lock.s create mode 100644 osfmk/ppc/hw_lock_types.h create mode 100644 osfmk/ppc/hw_vm.s create mode 100644 osfmk/ppc/interrupt.c create mode 100644 osfmk/ppc/io_map.c create mode 100644 osfmk/ppc/io_map_entries.h create mode 100644 osfmk/ppc/iso_font.h create mode 100644 osfmk/ppc/lock.h create mode 100644 osfmk/ppc/low_trace.h create mode 100644 osfmk/ppc/lowmem_vectors.s create mode 100644 osfmk/ppc/mach_param.h create mode 100644 osfmk/ppc/machine_cpu.h create mode 100644 osfmk/ppc/machine_routines.c create mode 100644 osfmk/ppc/machine_routines.h create mode 100644 osfmk/ppc/machine_routines_asm.s create mode 100644 osfmk/ppc/machine_rpc.h create mode 100644 osfmk/ppc/machlimits.h create mode 100644 osfmk/ppc/machparam.h create mode 100644 osfmk/ppc/mappings.c create mode 100644 osfmk/ppc/mappings.h create mode 100644 osfmk/ppc/mem.c create mode 100644 osfmk/ppc/mem.h create mode 100644 osfmk/ppc/misc.c create mode 100644 osfmk/ppc/misc_asm.s create mode 100644 osfmk/ppc/misc_protos.h create mode 100644 osfmk/ppc/model_dep.c create mode 100644 osfmk/ppc/movc.s create mode 100644 osfmk/ppc/mp.h create mode 100644 osfmk/ppc/net_filter.c create mode 100644 osfmk/ppc/new_screen.h create mode 100644 osfmk/ppc/notify_interrupt.c create mode 100644 osfmk/ppc/pcb.c create mode 100644 osfmk/ppc/pmap.c create mode 100644 osfmk/ppc/pmap.h create mode 100644 osfmk/ppc/pmap_internals.h create mode 100644 osfmk/ppc/ppc_disasm.i create mode 100644 osfmk/ppc/ppc_init.c create mode 100644 osfmk/ppc/ppc_vm_init.c create mode 100644 osfmk/ppc/proc_reg.h create mode 100644 osfmk/ppc/rtclock.c create mode 100644 osfmk/ppc/savearea.c create mode 100644 osfmk/ppc/savearea.h create mode 100644 osfmk/ppc/savearea_asm.s create mode 100644 osfmk/ppc/sched_param.h create mode 100644 osfmk/ppc/screen.h create mode 100644 osfmk/ppc/screen_switch.h create mode 100644 osfmk/ppc/serial_console.c create mode 100644 osfmk/ppc/serial_defs.h create mode 100644 osfmk/ppc/setjmp.h create mode 100644 osfmk/ppc/start.s create mode 100644 osfmk/ppc/status.c create mode 100644 osfmk/ppc/stubs.c create mode 100644 osfmk/ppc/task.h create mode 100644 osfmk/ppc/testjump.c create mode 100644 osfmk/ppc/thread.h create mode 100644 osfmk/ppc/thread_act.h create mode 100644 osfmk/ppc/trap.c create mode 100644 osfmk/ppc/trap.h create mode 100644 osfmk/ppc/vm_tuning.h create mode 100644 osfmk/ppc/vmachmon.c create mode 100644 osfmk/ppc/vmachmon.h create mode 100644 osfmk/ppc/vmachmon_asm.s create mode 100644 osfmk/ppc/xpr.h create mode 100644 osfmk/profiling/Makefile create mode 100644 osfmk/profiling/i386/Makefile create mode 100644 osfmk/profiling/i386/profile-asm.s create mode 100644 osfmk/profiling/i386/profile-md.c create mode 100644 osfmk/profiling/i386/profile-md.h create mode 100644 osfmk/profiling/machine/Makefile create mode 100644 osfmk/profiling/machine/profile-md.h create mode 100644 osfmk/profiling/ppc/Makefile create mode 100644 osfmk/profiling/ppc/profile-md.h create mode 100644 osfmk/profiling/profile-internal.h create mode 100644 osfmk/profiling/profile-kgmon.c create mode 100644 osfmk/profiling/profile-mk.c create mode 100644 osfmk/profiling/profile-mk.h create mode 100644 osfmk/sys/ioctl.h create mode 100644 osfmk/sys/scsi.h create mode 100644 osfmk/sys/sdi.h create mode 100644 osfmk/sys/sdi_edt.h create mode 100644 osfmk/sys/syslog.h create mode 100644 osfmk/sys/time.h create mode 100644 osfmk/sys/tm.h create mode 100644 osfmk/sys/types.h create mode 100644 osfmk/sys/varargs.h create mode 100644 osfmk/sys/version.h create mode 100644 osfmk/vm/Makefile create mode 100644 osfmk/vm/bsd_vm.c create mode 100644 osfmk/vm/cpm.h create mode 100644 osfmk/vm/memory_object.c create mode 100644 osfmk/vm/memory_object.h create mode 100644 osfmk/vm/pmap.h create mode 100644 osfmk/vm/vm_debug.c create mode 100644 osfmk/vm/vm_debug.h create mode 100644 osfmk/vm/vm_external.c create mode 100644 osfmk/vm/vm_external.h create mode 100644 osfmk/vm/vm_fault.c create mode 100644 osfmk/vm/vm_fault.h create mode 100644 osfmk/vm/vm_init.c create mode 100644 osfmk/vm/vm_init.h create mode 100644 osfmk/vm/vm_kern.c create mode 100644 osfmk/vm/vm_kern.h create mode 100644 osfmk/vm/vm_map.c create mode 100644 osfmk/vm/vm_map.h create mode 100644 osfmk/vm/vm_object.c create mode 100644 osfmk/vm/vm_object.h create mode 100644 osfmk/vm/vm_page.h create mode 100644 osfmk/vm/vm_pageout.c create mode 100644 osfmk/vm/vm_pageout.h create mode 100644 osfmk/vm/vm_print.h create mode 100644 osfmk/vm/vm_resident.c create mode 100644 osfmk/vm/vm_shared_memory_server.c create mode 100644 osfmk/vm/vm_user.c create mode 100644 pexpert/Makefile create mode 100644 pexpert/conf/MASTER create mode 100644 pexpert/conf/MASTER.i386 create mode 100644 pexpert/conf/MASTER.ppc create mode 100644 pexpert/conf/Makefile create mode 100644 pexpert/conf/Makefile.i386 create mode 100644 pexpert/conf/Makefile.ppc create mode 100644 pexpert/conf/Makefile.template create mode 100644 pexpert/conf/copyright create mode 100644 pexpert/conf/files create mode 100644 pexpert/conf/files.i386 create mode 100644 pexpert/conf/files.ppc create mode 100644 pexpert/conf/tools/Makefile create mode 100644 pexpert/conf/tools/doconf/Makefile create mode 100755 pexpert/conf/tools/doconf/doconf.csh create mode 100644 pexpert/conf/tools/newvers/Makefile create mode 100644 pexpert/conf/tools/newvers/newvers.csh create mode 100644 pexpert/conf/version.major create mode 100644 pexpert/conf/version.minor create mode 100644 pexpert/conf/version.variant create mode 100644 pexpert/gen/bootargs.c create mode 100644 pexpert/gen/device_tree.c create mode 100644 pexpert/gen/pe_gen.c create mode 100644 pexpert/i386/fakePPCDeviceTree.c create mode 100644 pexpert/i386/fakePPCDeviceTree.h create mode 100644 pexpert/i386/fakePPCStructs.h create mode 100644 pexpert/i386/kd.c create mode 100644 pexpert/i386/kd.h create mode 100644 pexpert/i386/kdasm.s create mode 100644 pexpert/i386/pe_bootargs.c create mode 100644 pexpert/i386/pe_identify_machine.c create mode 100644 pexpert/i386/pe_init.c create mode 100644 pexpert/i386/pe_interrupt.c create mode 100644 pexpert/i386/pe_kprintf.c create mode 100644 pexpert/i386/pe_misc.s create mode 100644 pexpert/i386/pe_spl.c create mode 100644 pexpert/i386/text_console.c create mode 100644 pexpert/i386/video_console.h create mode 100644 pexpert/pexpert/Makefile create mode 100644 pexpert/pexpert/boot.h create mode 100644 pexpert/pexpert/device_tree.h create mode 100644 pexpert/pexpert/i386/Makefile create mode 100644 pexpert/pexpert/i386/boot.h create mode 100644 pexpert/pexpert/i386/fb_entries.h create mode 100644 pexpert/pexpert/i386/kd_entries.h create mode 100644 pexpert/pexpert/i386/kdsoft.h create mode 100644 pexpert/pexpert/i386/protos.h create mode 100644 pexpert/pexpert/machine/Makefile create mode 100644 pexpert/pexpert/machine/boot.h create mode 100644 pexpert/pexpert/machine/protos.h create mode 100644 pexpert/pexpert/pe_images.h create mode 100644 pexpert/pexpert/pexpert.h create mode 100644 pexpert/pexpert/ppc/Makefile create mode 100644 pexpert/pexpert/ppc/boot.h create mode 100644 pexpert/pexpert/ppc/dbdma.h create mode 100644 pexpert/pexpert/ppc/interrupts.h create mode 100644 pexpert/pexpert/ppc/powermac.h create mode 100644 pexpert/pexpert/ppc/protos.h create mode 100644 pexpert/pexpert/protos.h create mode 100644 pexpert/ppc/pe_bootargs.c create mode 100644 pexpert/ppc/pe_clock_speed.c create mode 100644 pexpert/ppc/pe_clock_speed_asm.s create mode 100644 pexpert/ppc/pe_identify_machine.c create mode 100644 pexpert/ppc/pe_init.c create mode 100644 pexpert/ppc/pe_kprintf.c create mode 100644 pexpert/ppc/pe_misc.s diff --git a/APPLE_LICENSE b/APPLE_LICENSE new file mode 100644 index 000000000..a0a84169d --- /dev/null +++ b/APPLE_LICENSE @@ -0,0 +1,372 @@ +APPLE PUBLIC SOURCE LICENSE +Version 1.1 - April 19,1999 + +Please read this License carefully before downloading this software. +By downloading and using this software, you are agreeing to be bound +by the terms of this License. If you do not or cannot agree to the +terms of this License, please do not download or use the software. + +1. General; Definitions. This License applies to any program or other +work which Apple Computer, Inc. ("Apple") publicly announces as +subject to this Apple Public Source License and which contains a +notice placed by Apple identifying such program or work as "Original +Code" and stating that it is subject to the terms of this Apple Public +Source License version 1.1 (or subsequent version thereof), as it may +be revised from time to time by Apple ("License"). As used in this +License: + +1.1 "Affected Original Code" means only those specific portions of +Original Code that allegedly infringe upon any party's intellectual +property rights or are otherwise the subject of a claim of +infringement. + +1.2 "Applicable Patent Rights" mean: (a) in the case where Apple is +the grantor of rights, (i) claims of patents that are now or hereafter +acquired, owned by or assigned to Apple and (ii) that cover subject +matter contained in the Original Code, but only to the extent +necessary to use, reproduce and/or distribute the Original Code +without infringement; and (b) in the case where You are the grantor of +rights, (i) claims of patents that are now or hereafter acquired, +owned by or assigned to You and (ii) that cover subject matter in Your +Modifications, taken alone or in combination with Original Code. + +1.3 "Covered Code" means the Original Code, Modifications, the +combination of Original Code and any Modifications, and/or any +respective portions thereof. + +1.4 "Deploy" means to use, sublicense or distribute Covered Code other +than for Your internal research and development (R&D), and includes +without limitation, any and all internal use or distribution of +Covered Code within Your business or organization except for R&D use, +as well as direct or indirect sublicensing or distribution of Covered +Code by You to any third party in any form or manner. + +1.5 "Larger Work" means a work which combines Covered Code or portions +thereof with code not governed by the terms of this License. + +1.6 "Modifications" mean any addition to, deletion from, and/or change +to, the substance and/or structure of Covered Code. When code is +released as a series of files, a Modification is: (a) any addition to +or deletion from the contents of a file containing Covered Code; +and/or (b) any new file or other representation of computer program +statements that contains any part of Covered Code. + +1.7 "Original Code" means (a) the Source Code of a program or other +work as originally made available by Apple under this License, +including the Source Code of any updates or upgrades to such programs +or works made available by Apple under this License, and that has been +expressly identified by Apple as such in the header file(s) of such +work; and (b) the object code compiled from such Source Code and +originally made available by Apple under this License. + +1.8 "Source Code" means the human readable form of a program or other +work that is suitable for making modifications to it, including all +modules it contains, plus any associated interface definition files, +scripts used to control compilation and installation of an executable +(object code). + +1.9 "You" or "Your" means an individual or a legal entity exercising +rights under this License. For legal entities, "You" or "Your" +includes any entity which controls, is controlled by, or is under +common control with, You, where "control" means (a) the power, direct +or indirect, to cause the direction or management of such entity, +whether by contract or otherwise, or (b) ownership of fifty percent +(50%) or more of the outstanding shares or beneficial ownership of +such entity. + +2. Permitted Uses; Conditions & Restrictions. Subject to the terms +and conditions of this License, Apple hereby grants You, effective on +the date You accept this License and download the Original Code, a +world-wide, royalty-free, non- exclusive license, to the extent of +Apple's Applicable Patent Rights and copyrights covering the Original +Code, to do the following: + +2.1 You may use, copy, modify and distribute Original Code, with or +without Modifications, solely for Your internal research and +development, provided that You must in each instance: + +(a) retain and reproduce in all copies of Original Code the copyright +and other proprietary notices and disclaimers of Apple as they appear +in the Original Code, and keep intact all notices in the Original Code +that refer to this License; + +(b) include a copy of this License with every copy of Source Code of +Covered Code and documentation You distribute, and You may not offer +or impose any terms on such Source Code that alter or restrict this +License or the recipients' rights hereunder, except as permitted under +Section 6; and + +(c) completely and accurately document all Modifications that you have +made and the date of each such Modification, designate the version of +the Original Code you used, prominently include a file carrying such +information with the Modifications, and duplicate the notice in +Exhibit A in each file of the Source Code of all such Modifications. + +2.2 You may Deploy Covered Code, provided that You must in each + instance: + +(a) satisfy all the conditions of Section 2.1 with respect to the +Source Code of the Covered Code; + +(b) make all Your Deployed Modifications publicly available in Source +Code form via electronic distribution (e.g. download from a web site) +under the terms of this License and subject to the license grants set +forth in Section 3 below, and any additional terms You may choose to +offer under Section 6. You must continue to make the Source Code of +Your Deployed Modifications available for as long as you Deploy the +Covered Code or twelve (12) months from the date of initial +Deployment, whichever is longer; + +(c) if You Deploy Covered Code containing Modifications made by You, +inform others of how to obtain those Modifications by filling out and +submitting the information found at +http://www.apple.com/publicsource/modifications.html, if available; +and + +(d) if You Deploy Covered Code in object code, executable form only, +include a prominent notice, in the code itself as well as in related +documentation, stating that Source Code of the Covered Code is +available under the terms of this License with information on how and +where to obtain such Source Code. + +3. Your Grants. In consideration of, and as a condition to, the +licenses granted to You under this License: + +(a) You hereby grant to Apple and all third parties a non-exclusive, +royalty-free license, under Your Applicable Patent Rights and other +intellectual property rights owned or controlled by You, to use, +reproduce, modify, distribute and Deploy Your Modifications of the +same scope and extent as Apple's licenses under Sections 2.1 and 2.2; +and + +(b) You hereby grant to Apple and its subsidiaries a non-exclusive, +worldwide, royalty-free, perpetual and irrevocable license, under Your +Applicable Patent Rights and other intellectual property rights owned +or controlled by You, to use, reproduce, execute, compile, display, +perform, modify or have modified (for Apple and/or its subsidiaries), +sublicense and distribute Your Modifications, in any form, through +multiple tiers of distribution. + +4. Larger Works. You may create a Larger Work by combining Covered +Code with other code not governed by the terms of this License and +distribute the Larger Work as a single product. In each such +instance, You must make sure the requirements of this License are +fulfilled for the Covered Code or any portion thereof. + +5. Limitations on Patent License. Except as expressly stated in +Section 2, no other patent rights, express or implied, are granted by +Apple herein. Modifications and/or Larger Works may require +additional patent licenses from Apple which Apple may grant in its +sole discretion. + +6. Additional Terms. You may choose to offer, and to charge a fee +for, warranty, support, indemnity or liability obligations and/or +other rights consistent with the scope of the license granted herein +("Additional Terms") to one or more recipients of Covered +Code. However, You may do so only on Your own behalf and as Your sole +responsibility, and not on behalf of Apple. You must obtain the +recipient's agreement that any such Additional Terms are offered by +You alone, and You hereby agree to indemnify, defend and hold Apple +harmless for any liability incurred by or claims asserted against +Apple by reason of any such Additional Terms. + +7. Versions of the License. Apple may publish revised and/or new +versions of this License from time to time. Each version will be +given a distinguishing version number. Once Original Code has been +published under a particular version of this License, You may continue +to use it under the terms of that version. You may also choose to use +such Original Code under the terms of any subsequent version of this +License published by Apple. No one other than Apple has the right to +modify the terms applicable to Covered Code created under this +License. + +8. NO WARRANTY OR SUPPORT. The Original Code may contain in whole or +in part pre-release, untested, or not fully tested works. The +Original Code may contain errors that could cause failures or loss of +data, and may be incomplete or contain inaccuracies. You expressly +acknowledge and agree that use of the Original Code, or any portion +thereof, is at Your sole and entire risk. THE ORIGINAL CODE IS +PROVIDED "AS IS" AND WITHOUT WARRANTY, UPGRADES OR SUPPORT OF ANY KIND +AND APPLE AND APPLE'S LICENSOR(S) (FOR THE PURPOSES OF SECTIONS 8 AND +9, APPLE AND APPLE'S LICENSOR(S) ARE COLLECTIVELY REFERRED TO AS +"APPLE") EXPRESSLY DISCLAIM ALL WARRANTIES AND/OR CONDITIONS, EXPRESS +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +AND/OR CONDITIONS OF MERCHANTABILITY OR SATISFACTORY QUALITY AND +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY +RIGHTS. APPLE DOES NOT WARRANT THAT THE FUNCTIONS CONTAINED IN THE +ORIGINAL CODE WILL MEET YOUR REQUIREMENTS, OR THAT THE OPERATION OF +THE ORIGINAL CODE WILL BE UNINTERRUPTED OR ERROR- FREE, OR THAT +DEFECTS IN THE ORIGINAL CODE WILL BE CORRECTED. NO ORAL OR WRITTEN +INFORMATION OR ADVICE GIVEN BY APPLE OR AN APPLE AUTHORIZED +REPRESENTATIVE SHALL CREATE A WARRANTY OR IN ANY WAY INCREASE THE +SCOPE OF THIS WARRANTY. You acknowledge that the Original Code is not +intended for use in the operation of nuclear facilities, aircraft +navigation, communication systems, or air traffic control machines in +which case the failure of the Original Code could lead to death, +personal injury, or severe physical or environmental damage. + +9. Liability. + +9.1 Infringement. If any portion of, or functionality implemented by, +the Original Code becomes the subject of a claim of infringement, +Apple may, at its option: (a) attempt to procure the rights necessary +for Apple and You to continue using the Affected Original Code; (b) +modify the Affected Original Code so that it is no longer infringing; +or (c) suspend Your rights to use, reproduce, modify, sublicense and +distribute the Affected Original Code until a final determination of +the claim is made by a court or governmental administrative agency of +competent jurisdiction and Apple lifts the suspension as set forth +below. Such suspension of rights will be effective immediately upon +Apple's posting of a notice to such effect on the Apple web site that +is used for implementation of this License. Upon such final +determination being made, if Apple is legally able, without the +payment of a fee or royalty, to resume use, reproduction, +modification, sublicensing and distribution of the Affected Original +Code, Apple will lift the suspension of rights to the Affected +Original Code by posting a notice to such effect on the Apple web site +that is used for implementation of this License. If Apple suspends +Your rights to Affected Original Code, nothing in this License shall +be construed to restrict You, at Your option and subject to applicable +law, from replacing the Affected Original Code with non-infringing +code or independently negotiating for necessary rights from such third +party. + +9.2 LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES SHALL APPLE BE +LIABLE FOR ANY INCIDENTAL, SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES +ARISING OUT OF OR RELATING TO THIS LICENSE OR YOUR USE OR INABILITY TO +USE THE ORIGINAL CODE, OR ANY PORTION THEREOF, WHETHER UNDER A THEORY +OF CONTRACT, WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCTS LIABILITY +OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES AND NOTWITHSTANDING THE FAILURE OF ESSENTIAL PURPOSE OF +ANY REMEDY. In no event shall Apple's total liability to You for all +damages under this License exceed the amount of fifty dollars +($50.00). + +10. Trademarks. This License does not grant any rights to use the +trademarks or trade names "Apple", "Apple Computer", "Mac OS X", "Mac +OS X Server" or any other trademarks or trade names belonging to Apple +(collectively "Apple Marks") and no Apple Marks may be used to endorse +or promote products derived from the Original Code other than as +permitted by and in strict compliance at all times with Apple's third +party trademark usage guidelines which are posted at +http://www.apple.com/legal/guidelinesfor3rdparties.html. + +11. Ownership. Apple retains all rights, title and interest in and to +the Original Code and any Modifications made by or on behalf of Apple +("Apple Modifications"), and such Apple Modifications will not be +automatically subject to this License. Apple may, at its sole +discretion, choose to license such Apple Modifications under this +License, or on different terms from those contained in this License or +may choose not to license them at all. Apple's development, use, +reproduction, modification, sublicensing and distribution of Covered +Code will not be subject to this License. + +12. Termination. + +12.1 Termination. This License and the rights granted hereunder will + terminate: + +(a) automatically without notice from Apple if You fail to comply with +any term(s) of this License and fail to cure such breach within 30 +days of becoming aware of such breach; (b) immediately in the event of +the circumstances described in Section 13.5(b); or (c) automatically +without notice from Apple if You, at any time during the term of this +License, commence an action for patent infringement against Apple. + +12.2 Effect of Termination. Upon termination, You agree to +immediately stop any further use, reproduction, modification, +sublicensing and distribution of the Covered Code and to destroy all +copies of the Covered Code that are in your possession or control. +All sublicenses to the Covered Code which have been properly granted +prior to termination shall survive any termination of this License. +Provisions which, by their nature, should remain in effect beyond the +termination of this License shall survive, including but not limited +to Sections 3, 5, 8, 9, 10, 11, 12.2 and 13. Neither party will be +liable to the other for compensation, indemnity or damages of any sort +solely as a result of terminating this License in accordance with its +terms, and termination of this License will be without prejudice to +any other right or remedy of either party. + +13. Miscellaneous. + +13.1 Government End Users. The Covered Code is a "commercial item" as +defined in FAR 2.101. Government software and technical data rights +in the Covered Code include only those rights customarily provided to +the public as defined in this License. This customary commercial +license in technical data and software is provided in accordance with +FAR 12.211 (Technical Data) and 12.212 (Computer Software) and, for +Department of Defense purchases, DFAR 252.227-7015 (Technical Data -- +Commercial Items) and 227.7202-3 (Rights in Commercial Computer +Software or Computer Software Documentation). Accordingly, all U.S. +Government End Users acquire Covered Code with only those rights set +forth herein. + +13.2 Relationship of Parties. This License will not be construed as +creating an agency, partnership, joint venture or any other form of +legal association between You and Apple, and You will not represent to +the contrary, whether expressly, by implication, appearance or +otherwise. + +13.3 Independent Development. Nothing in this License will impair +Apple's right to acquire, license, develop, have others develop for +it, market and/or distribute technology or products that perform the +same or similar functions as, or otherwise compete with, +Modifications, Larger Works, technology or products that You may +develop, produce, market or distribute. + +13.4 Waiver; Construction. Failure by Apple to enforce any provision +of this License will not be deemed a waiver of future enforcement of +that or any other provision. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +will not apply to this License. + +13.5 Severability. (a) If for any reason a court of competent +jurisdiction finds any provision of this License, or portion thereof, +to be unenforceable, that provision of the License will be enforced to +the maximum extent permissible so as to effect the economic benefits +and intent of the parties, and the remainder of this License will +continue in full force and effect. (b) Notwithstanding the foregoing, +if applicable law prohibits or restricts You from fully and/or +specifically complying with Sections 2 and/or 3 or prevents the +enforceability of either of those Sections, this License will +immediately terminate and You must immediately discontinue any use of +the Covered Code and destroy all copies of it that are in your +possession or control. + +13.6 Dispute Resolution. Any litigation or other dispute resolution +between You and Apple relating to this License shall take place in the +Northern District of California, and You and Apple hereby consent to +the personal jurisdiction of, and venue in, the state and federal +courts within that District with respect to this License. The +application of the United Nations Convention on Contracts for the +International Sale of Goods is expressly excluded. + +13.7 Entire Agreement; Governing Law. This License constitutes the +entire agreement between the parties with respect to the subject +matter hereof. This License shall be governed by the laws of the +United States and the State of California, except that body of +California law concerning conflicts of law. + +Where You are located in the province of Quebec, Canada, the following +clause applies: The parties hereby confirm that they have requested +that this License and all related documents be drafted in English. Les +parties ont exige que le present contrat et tous les documents +connexes soient rediges en anglais. + +EXHIBIT A. + +"Portions Copyright (c) 1999-2000 Apple Computer, Inc. All Rights +Reserved. This file contains Original Code and/or Modifications of +Original Code as defined in and that are subject to the Apple Public +Source License Version 1.1 (the "License"). You may not use this file +except in compliance with the License. Please obtain a copy of the +License at http://www.apple.com/publicsource and read it before using +this file. + +The Original Code and all software distributed under the License are +distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER +EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE OR NON- INFRINGEMENT. Please see the +License for the specific language governing rights and limitations +under the License." diff --git a/EXTERNAL_HEADERS/ar.h b/EXTERNAL_HEADERS/ar.h new file mode 100644 index 000000000..def1c4320 --- /dev/null +++ b/EXTERNAL_HEADERS/ar.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * This code is derived from software contributed to Berkeley by + * Hugh Smith at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ar.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _AR_H_ +#define _AR_H_ + +/* Pre-4BSD archives had these magic numbers in them. */ +#define OARMAG1 0177555 +#define OARMAG2 0177545 + +#define ARMAG "!\n" /* ar "magic number" */ +#define SARMAG 8 /* strlen(ARMAG); */ + +#define AR_EFMT1 "#1/" /* extended format #1 */ + +struct ar_hdr { + char ar_name[16]; /* name */ + char ar_date[12]; /* modification time */ + char ar_uid[6]; /* user id */ + char ar_gid[6]; /* group id */ + char ar_mode[8]; /* octal file permissions */ + char ar_size[10]; /* size in bytes */ +#define ARFMAG "`\n" + char ar_fmag[2]; /* consistency check */ +}; + +#endif /* !_AR_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/byte_order.h b/EXTERNAL_HEADERS/architecture/byte_order.h new file mode 100644 index 000000000..b39c272cb --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/byte_order.h @@ -0,0 +1,536 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Byte ordering conversion. + * + * HISTORY + * + * 20 October 1992 ? at NeXT + * Added #ifdef wrapper to prevent multiple inclusions of this file. + * + * 8 October 1992 ? at NeXT + * Converted to NXxxx versions. Condensed history. + * + * 18 May 1992 ? at NeXT + * Created. + */ + +#ifndef _ARCHITECTURE_BYTE_ORDER_H_ +#define _ARCHITECTURE_BYTE_ORDER_H_ + +typedef unsigned long NXSwappedFloat; +typedef unsigned long long NXSwappedDouble; + +#if defined (__ppc__) +#include "architecture/ppc/byte_order.h" +#elif defined (__i386__) +#include "architecture/i386/byte_order.h" +#else +#error architecture not supported +#endif + +/* + * Identify the byte order + * of the current host. + */ + +enum NXByteOrder { + NX_UnknownByteOrder, + NX_LittleEndian, + NX_BigEndian +}; + +static __inline__ +enum NXByteOrder +NXHostByteOrder(void) +{ + unsigned int _x; + + _x = (NX_BigEndian << 24) | NX_LittleEndian; + + return ((enum NXByteOrder)*((unsigned char *)&_x)); +} + +/* + * The predicated versions + * are defined here in terms + * of the unpredicated ones. + */ + +#if __BIG_ENDIAN__ + +static __inline__ +unsigned short +NXSwapBigShortToHost( + unsigned short x +) +{ + return (x); +} + +static __inline__ +unsigned int +NXSwapBigIntToHost( + unsigned int x +) +{ + return (x); +} + +static __inline__ +unsigned long +NXSwapBigLongToHost( + unsigned long x +) +{ + return (x); +} + +static __inline__ +unsigned long long +NXSwapBigLongLongToHost( + unsigned long long x +) +{ + return (x); +} + +#ifndef KERNEL + +static __inline__ +double +NXSwapBigDoubleToHost( + NXSwappedDouble x +) +{ + return NXConvertSwappedDoubleToHost(x); +} + +static __inline__ +float +NXSwapBigFloatToHost( + NXSwappedFloat x +) +{ + return NXConvertSwappedFloatToHost(x); +} + +#endif /* KERNEL */ + +static __inline__ +unsigned short +NXSwapHostShortToBig( + unsigned short x +) +{ + return (x); +} + +static __inline__ +unsigned int +NXSwapHostIntToBig( + unsigned int x +) +{ + return (x); +} + +static __inline__ +unsigned long +NXSwapHostLongToBig( + unsigned long x +) +{ + return (x); +} + +static __inline__ +unsigned long long +NXSwapHostLongLongToBig( + unsigned long long x +) +{ + return (x); +} + +#ifndef KERNEL + +static __inline__ +NXSwappedDouble +NXSwapHostDoubleToBig( + double x +) +{ + return NXConvertHostDoubleToSwapped(x); +} + +static __inline__ +NXSwappedFloat +NXSwapHostFloatToBig( + float x +) +{ + return NXConvertHostFloatToSwapped(x); +} + +#endif /* KERNEL */ + +static __inline__ +unsigned short +NXSwapLittleShortToHost( + unsigned short x +) +{ + return (NXSwapShort(x)); +} + +static __inline__ +unsigned int +NXSwapLittleIntToHost( + unsigned int x +) +{ + return (NXSwapInt(x)); +} + +static __inline__ +unsigned long +NXSwapLittleLongToHost( + unsigned long x +) +{ + return (NXSwapLong(x)); +} + +static __inline__ +unsigned long long +NXSwapLittleLongLongToHost( + unsigned long long x +) +{ + return (NXSwapLongLong(x)); +} + +#ifndef KERNEL + +static __inline__ +double +NXSwapLittleDoubleToHost( + NXSwappedDouble x +) +{ + return NXConvertSwappedDoubleToHost(NXSwapDouble(x)); +} + +static __inline__ +float +NXSwapLittleFloatToHost( + NXSwappedFloat x +) +{ + return NXConvertSwappedFloatToHost(NXSwapFloat(x)); +} + +#endif /* KERNEL */ + +static __inline__ +unsigned short +NXSwapHostShortToLittle( + unsigned short x +) +{ + return (NXSwapShort(x)); +} + +static __inline__ +unsigned int +NXSwapHostIntToLittle( + unsigned int x +) +{ + return (NXSwapInt(x)); +} + +static __inline__ +unsigned long +NXSwapHostLongToLittle( + unsigned long x +) +{ + return (NXSwapLong(x)); +} + +static __inline__ +unsigned long long +NXSwapHostLongLongToLittle( + unsigned long long x +) +{ + return (NXSwapLongLong(x)); +} + +#ifndef KERNEL + +static __inline__ +NXSwappedDouble +NXSwapHostDoubleToLittle( + double x +) +{ + return NXSwapDouble(NXConvertHostDoubleToSwapped(x)); +} + +static __inline__ +NXSwappedFloat +NXSwapHostFloatToLittle( + float x +) +{ + return NXSwapFloat(NXConvertHostFloatToSwapped(x)); +} + +#endif /* KERNEL */ +#endif /*__BIG_ENDIAN__ */ + +#if __LITTLE_ENDIAN__ + +static __inline__ +unsigned short +NXSwapBigShortToHost( + unsigned short x +) +{ + return (NXSwapShort(x)); +} + +static __inline__ +unsigned int +NXSwapBigIntToHost( + unsigned int x +) +{ + return (NXSwapInt(x)); +} + +static __inline__ +unsigned long +NXSwapBigLongToHost( + unsigned long x +) +{ + return (NXSwapLong(x)); +} + +static __inline__ +unsigned long long +NXSwapBigLongLongToHost( + unsigned long long x +) +{ + return (NXSwapLongLong(x)); +} + +static __inline__ +double +NXSwapBigDoubleToHost( + NXSwappedDouble x +) +{ + return NXConvertSwappedDoubleToHost(NXSwapDouble(x)); +} + +static __inline__ +float +NXSwapBigFloatToHost( + NXSwappedFloat x +) +{ + return NXConvertSwappedFloatToHost(NXSwapFloat(x)); +} + +static __inline__ +unsigned short +NXSwapHostShortToBig( + unsigned short x +) +{ + return (NXSwapShort(x)); +} + +static __inline__ +unsigned int +NXSwapHostIntToBig( + unsigned int x +) +{ + return (NXSwapInt(x)); +} + +static __inline__ +unsigned long +NXSwapHostLongToBig( + unsigned long x +) +{ + return (NXSwapLong(x)); +} + +static __inline__ +unsigned long long +NXSwapHostLongLongToBig( + unsigned long long x +) +{ + return (NXSwapLongLong(x)); +} + +static __inline__ +NXSwappedDouble +NXSwapHostDoubleToBig( + double x +) +{ + return (NXSwapDouble(NXConvertHostDoubleToSwapped(x))); +} + +static __inline__ +NXSwappedFloat +NXSwapHostFloatToBig( + float x +) +{ + return (NXSwapFloat(NXConvertHostFloatToSwapped(x))); +} + +static __inline__ +unsigned short +NXSwapLittleShortToHost( + unsigned short x +) +{ + return (x); +} + +static __inline__ +unsigned int +NXSwapLittleIntToHost( + unsigned int x +) +{ + return (x); +} + +static __inline__ +unsigned long +NXSwapLittleLongToHost( + unsigned long x +) +{ + return (x); +} + +static __inline__ +unsigned long long +NXSwapLittleLongLongToHost( + unsigned long long x +) +{ + return (x); +} + +static __inline__ +double +NXSwapLittleDoubleToHost( + NXSwappedDouble x +) +{ + return NXConvertSwappedDoubleToHost(x); +} + +static __inline__ +float +NXSwapLittleFloatToHost( + NXSwappedFloat x +) +{ + return NXConvertSwappedFloatToHost(x); +} + +static __inline__ +unsigned short +NXSwapHostShortToLittle( + unsigned short x +) +{ + return (x); +} + +static __inline__ +unsigned int +NXSwapHostIntToLittle( + unsigned int x +) +{ + return (x); +} + +static __inline__ +unsigned long +NXSwapHostLongToLittle( + unsigned long x +) +{ + return (x); +} + +static __inline__ +unsigned long long +NXSwapHostLongLongToLittle( + unsigned long long x +) +{ + return (x); +} + +static __inline__ +NXSwappedDouble +NXSwapHostDoubleToLittle( + double x +) +{ + return NXConvertHostDoubleToSwapped(x); +} + +static __inline__ +NXSwappedFloat +NXSwapHostFloatToLittle( + float x +) +{ + return NXConvertHostFloatToSwapped(x); +} + +#endif /* __LITTLE_ENDIAN__ */ + +#endif /* _ARCHITECTURE_BYTE_ORDER_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/i386/asm_help.h b/EXTERNAL_HEADERS/architecture/i386/asm_help.h new file mode 100644 index 000000000..0da8f189c --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/asm_help.h @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. + * + * File: architecture/i386/asm_help.h + * Author: Mike DeMoney, NeXT Computer, Inc. + * Modified for i386 by: Bruce Martin, NeXT Computer, Inc. + * + * This header file defines macros useful when writing assembly code + * for the Intel i386 family processors. + * + * HISTORY + * 10-Mar-92 Bruce Martin (bmartin@next.com) + * Adapted to i386 + * 23-Jan-91 Mike DeMoney (mike@next.com) + * Created. + */ + +#ifndef _ARCH_I386_ASM_HELP_H_ +#define _ARCH_I386_ASM_HELP_H_ + +#include + + +#ifdef __ASSEMBLER__ + +#define ALIGN \ + .align 2, 0x90 + +#define ROUND_TO_STACK(len) \ + (((len) + STACK_INCR - 1) / STACK_INCR * STACK_INCR) + +#ifdef notdef +#define CALL_MCOUNT \ + pushl %ebp ;\ + movl %esp, %ebp ;\ + .data ;\ + 1: .long 0 ;\ + .text ;\ + lea 9b,%edx ;\ + call mcount ;\ + popl %ebp ; +#else +#define CALL_MCOUNT +#endif + +/* + * Prologue for functions that may call other functions. Saves + * registers and sets up a C frame. + */ +#define NESTED_FUNCTION_PROLOGUE(localvarsize) \ + .set __framesize,ROUND_TO_STACK(localvarsize) ;\ + .set __nested_function, 1 ;\ + CALL_MCOUNT \ + .if __framesize ;\ + pushl %ebp ;\ + movl %esp, %ebp ;\ + subl $__framesize, %esp ;\ + .endif ;\ + pushl %edi ;\ + pushl %esi ;\ + pushl %ebx + +/* + * Prologue for functions that do not call other functions. Does not + * save registers (this is the functions responsibility). Does set + * up a C frame. + */ +#define LEAF_FUNCTION_PROLOGUE(localvarsize) \ + .set __framesize,ROUND_TO_STACK(localvarsize) ;\ + .set __nested_function, 0 ;\ + CALL_MCOUNT \ + .if __framesize ;\ + pushl %ebp ;\ + movl %esp, %ebp ;\ + subl $__framesize, %esp ;\ + .endif + +/* + * Prologue for any function. + * + * We assume that all Leaf functions will be responsible for saving any + * local registers they clobber. + */ +#define FUNCTION_EPILOGUE \ + .if __nested_function ;\ + popl %ebx ;\ + popl %esi ;\ + popl %edi ;\ + .endif ;\ + .if __framesize ;\ + movl %ebp, %esp ;\ + popl %ebp ;\ + .endif ;\ + ret + + +/* + * Macros for declaring procedures + * + * Use of these macros allows ctags to have a predictable way + * to find various types of declarations. They also simplify + * inserting appropriate symbol table information. + * + * NOTE: these simple stubs will be replaced with more + * complicated versions once we know what the linker and gdb + * will require as far as register use masks and frame declarations. + * These macros may also be ifdef'ed in the future to contain profiling + * code. + * + */ + +/* + * TEXT -- declare start of text segment + */ +#define TEXT \ + .text + +/* + * DATA -- declare start of data segment + */ +#define DATA \ + .data + +/* + * LEAF -- declare global leaf procedure + * NOTE: Control SHOULD NOT FLOW into a LEAF! A LEAF should only + * be jumped to. (A leaf may do an align.) Use a LABEL() if you + * need control to flow into the label. + */ +#define LEAF(name, localvarsize) \ + .globl name ;\ + ALIGN ;\ +name: ;\ + LEAF_FUNCTION_PROLOGUE(localvarsize) + +/* + * X_LEAF -- declare alternate global label for leaf + */ +#define X_LEAF(name, value) \ + .globl name ;\ + .set name,value + +/* + * P_LEAF -- declare private leaf procedure + */ +#define P_LEAF(name, localvarsize) \ + ALIGN ;\ +name: ;\ + LEAF_FUNCTION_PROLOGUE(localvarsize) + +/* + * LABEL -- declare a global code label + * MUST be used (rather than LEAF, NESTED, etc) if control + * "flows into" the label. + */ +#define LABEL(name) \ + .globl name ;\ +name: + +/* + * NESTED -- declare procedure that invokes other procedures + */ +#define NESTED(name, localvarsize) \ + .globl name ;\ + ALIGN ;\ +name: ;\ + NESTED_FUNCTION_PROLOGUE(localvarsize) + +/* + * X_NESTED -- declare alternate global label for nested proc + */ +#define X_NESTED(name, value) \ + .globl name ;\ + .set name,value + +/* + * P_NESTED -- declare private nested procedure + */ +#define P_NESTED(name, localvarsize) \ + ALIGN ;\ +name: ;\ + NESTED_FUNCTION_PROLOGUE(localvarsize) + +/* + * END -- mark end of procedure + */ +#define END(name) \ + FUNCTION_EPILOGUE + + +/* + * Storage definition macros + * The main purpose of these is to allow an easy handle for ctags + */ + +/* + * IMPORT -- import symbol + */ +#define IMPORT(name) \ + .reference name + +/* + * ABS -- declare global absolute symbol + */ +#define ABS(name, value) \ + .globl name ;\ + .set name,value + +/* + * P_ABS -- declare private absolute symbol + */ +#define P_ABS(name, value) \ + .set name,value + +/* + * EXPORT -- declare global label for data + */ +#define EXPORT(name) \ + .globl name ;\ +name: + +/* + * BSS -- declare global zero'ed storage + */ +#define BSS(name,size) \ + .comm name,size + + +/* + * P_BSS -- declare private zero'ed storage + */ +#define P_BSS(name,size) \ + .lcomm name,size + +/* + * dynamic/PIC macros for routines which reference external symbols + */ + +#if defined(__DYNAMIC__) +#define PICIFY(var) \ + call 1f ; \ +1: ; \ + popl %edx ; \ + movl L ## var ## $non_lazy_ptr-1b(%edx),%edx + +#define CALL_EXTERN_AGAIN(func) \ + PICIFY(func) ; \ + call %edx + +#define NON_LAZY_STUB(var) \ +.non_lazy_symbol_pointer ; \ +L ## var ## $non_lazy_ptr: ; \ +.indirect_symbol var ; \ +.long 0 ; \ +.text + +#define CALL_EXTERN(func) \ + CALL_EXTERN_AGAIN(func) ; \ + NON_LAZY_STUB(func) + +#define BRANCH_EXTERN(func) \ + PICIFY(func) ; \ + jmp %edx ; \ + NON_LAZY_STUB(func) + +#define PUSH_EXTERN(var) \ + PICIFY(var) ; \ + movl (%edx),%edx ; \ + pushl %edx ; \ + NON_LAZY_STUB(var) + +#define REG_TO_EXTERN(reg, var) \ + PICIFY(var) ; \ + movl reg, (%edx) ; \ + NON_LAZY_STUB(var) + +#define EXTERN_TO_REG(var, reg) \ + call 1f ; \ +1: ; \ + popl %edx ; \ + movl L ## var ##$non_lazy_ptr-1b(%edx),reg ; \ + NON_LAZY_STUB(var) + + +#else +#define BRANCH_EXTERN(func) jmp func +#define PUSH_EXTERN(var) pushl var +#define CALL_EXTERN(func) call func +#define CALL_EXTERN_AGAIN(func) call func +#define REG_TO_EXTERN(reg, var) movl reg, var +#define EXTERN_TO_REG(var, reg) movl $ ## var, reg +#endif + +#endif /* __ASSEMBLER__ */ + +#endif /* _ARCH_I386_ASM_HELP_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/i386/byte_order.h b/EXTERNAL_HEADERS/architecture/i386/byte_order.h new file mode 100644 index 000000000..6b1bb5ed9 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/byte_order.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Byte ordering conversion (for i386). + * + * HISTORY + * + * 8 October 1992 ? at NeXT + * Converted to NXxxx versions. Condensed history. + * + * 18 May 1992 ? at NeXT + * Created. + */ + +static __inline__ +unsigned short +NXSwapShort( + unsigned short inv +) +{ + register unsigned short value = inv; + + __asm__ volatile( "xchgb %h1, %b1" : "=q" (value) : "0" (value)); + + return (value); +} + +static __inline__ +unsigned long +NXSwapInt( + unsigned long inv +) +{ + register unsigned int outv = inv; + + __asm__ volatile( "bswap %0" : "=r" (outv) : "0" (outv)); + + return (outv); +} + +static __inline__ +unsigned long +NXSwapLong( + unsigned long inv +) +{ + unsigned long outv; + + __asm__ volatile( + "bswap %0" + + : "=r" (outv) + : "0" (inv)); + + return (outv); +} + +static __inline__ +unsigned long long +NXSwapLongLong( + unsigned long long inv +) +{ + union llconv { + unsigned long long ull; + unsigned long ul[2]; + } *inp, outv; + + inp = (union llconv *)&inv; + + outv.ul[0] = NXSwapLong(inp->ul[1]); + outv.ul[1] = NXSwapLong(inp->ul[0]); + + return (outv.ull); +} + +static __inline__ NXSwappedFloat +NXConvertHostFloatToSwapped(float x) +{ + union fconv { + float number; + NXSwappedFloat sf; + }; + return ((union fconv *)&x)->sf; +} + +static __inline__ float +NXConvertSwappedFloatToHost(NXSwappedFloat x) +{ + union fconv { + float number; + NXSwappedFloat sf; + }; + return ((union fconv *)&x)->number; +} + +static __inline__ NXSwappedDouble +NXConvertHostDoubleToSwapped(double x) +{ + union dconv { + double number; + NXSwappedDouble sd; + }; + return ((union dconv *)&x)->sd; +} + +static __inline__ double +NXConvertSwappedDoubleToHost(NXSwappedDouble x) +{ + union dconv { + double number; + NXSwappedDouble sd; + }; + return ((union dconv *)&x)->number; +} + +static __inline__ NXSwappedFloat +NXSwapFloat(NXSwappedFloat x) +{ + return NXSwapLong(x); +} + +static __inline__ NXSwappedDouble +NXSwapDouble(NXSwappedDouble x) +{ + return NXSwapLongLong(x); +} diff --git a/EXTERNAL_HEADERS/architecture/i386/cpu.h b/EXTERNAL_HEADERS/architecture/i386/cpu.h new file mode 100644 index 000000000..5db7029a3 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/cpu.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Special processor registers. + * + * HISTORY + * + * 5 April 1992 ? at NeXT + * Created. + */ + +/* + * Control register 0 + */ + +typedef struct _cr0 { + unsigned int pe :1, + mp :1, + em :1, + ts :1, + :1, + ne :1, + :10, + wp :1, + :1, + am :1, + :10, + nw :1, + cd :1, + pg :1; +} cr0_t; + +/* + * Debugging register 6 + */ + +typedef struct _dr6 { + unsigned int b0 :1, + b1 :1, + b2 :1, + b3 :1, + :9, + bd :1, + bs :1, + bt :1, + :16; +} dr6_t; diff --git a/EXTERNAL_HEADERS/architecture/i386/desc.h b/EXTERNAL_HEADERS/architecture/i386/desc.h new file mode 100644 index 000000000..4d3c62cac --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/desc.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Segment descriptors. + * + * HISTORY + * + * 29 March 1992 ? at NeXT + * Created. + */ + +/* + * Code segment descriptor. + */ + +typedef struct code_desc { + unsigned short limit00; + unsigned short base00; + unsigned char base16; + unsigned char type :5, +#define DESC_CODE_EXEC 0x18 +#define DESC_CODE_READ 0x1a + dpl :2, + present :1; + unsigned char limit16 :4, + :2, + opsz :1, +#define DESC_CODE_16B 0 +#define DESC_CODE_32B 1 + granular:1; +#define DESC_GRAN_BYTE 0 +#define DESC_GRAN_PAGE 1 + unsigned char base24; +} code_desc_t; + +/* + * Data segment descriptor. + */ + +typedef struct data_desc { + unsigned short limit00; + unsigned short base00; + unsigned char base16; + unsigned char type :5, +#define DESC_DATA_RONLY 0x10 +#define DESC_DATA_WRITE 0x12 + dpl :2, + present :1; + unsigned char limit16 :4, + :2, + stksz :1, +#define DESC_DATA_16B 0 +#define DESC_DATA_32B 1 + granular:1; + unsigned char base24; +} data_desc_t; + +/* + * LDT segment descriptor. + */ + +typedef struct ldt_desc { + unsigned short limit00; + unsigned short base00; + unsigned char base16; + unsigned char type :5, +#define DESC_LDT 0x02 + :2, + present :1; + unsigned char limit16 :4, + :3, + granular:1; + unsigned char base24; +} ldt_desc_t; + +#include + +/* + * Call gate descriptor. + */ + +typedef struct call_gate { + unsigned short offset00; + sel_t seg; + unsigned int argcnt :5, + :3, + type :5, +#define DESC_CALL_GATE 0x0c + dpl :2, + present :1, + offset16:16; +} call_gate_t; + +/* + * Trap gate descriptor. + */ + +typedef struct trap_gate { + unsigned short offset00; + sel_t seg; + unsigned int :8, + type :5, +#define DESC_TRAP_GATE 0x0f + dpl :2, + present :1, + offset16:16; +} trap_gate_t; + + +/* + * Interrupt gate descriptor. + */ + +typedef struct intr_gate { + unsigned short offset00; + sel_t seg; + unsigned int :8, + type :5, +#define DESC_INTR_GATE 0x0e + dpl :2, + present :1, + offset16:16; +} intr_gate_t; diff --git a/EXTERNAL_HEADERS/architecture/i386/fpu.h b/EXTERNAL_HEADERS/architecture/i386/fpu.h new file mode 100644 index 000000000..a353a12b1 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/fpu.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Floating Point unit. + * + * HISTORY + * + * 5 October 1992 ? at NeXT + * Added names to previously unamed fields in the mantissa. + * + * 5 April 1992 ? at NeXT + * Created. + */ + +/* + * Data register. + */ + +typedef struct fp_data_reg { + unsigned short mant; + unsigned short mant1 :16, + mant2 :16, + mant3 :16; + unsigned short exp :15, + sign :1; +} fp_data_reg_t; + +/* + * Data register stack. + */ + +typedef struct fp_stack { + fp_data_reg_t ST[8]; +} fp_stack_t; + +/* + * Register stack tag word. + */ + +typedef struct fp_tag { + unsigned short tag0 :2, + tag1 :2, + tag2 :2, + tag3 :2, + tag4 :2, + tag5 :2, + tag6 :2, + tag7 :2; +#define FP_TAG_VALID 0 +#define FP_TAG_ZERO 1 +#define FP_TAG_SPEC 2 +#define FP_TAG_EMPTY 3 +} fp_tag_t; + +/* + * Status word. + */ + +typedef struct fp_status { + unsigned short invalid :1, + denorm :1, + zdiv :1, + ovrfl :1, + undfl :1, + precis :1, + stkflt :1, + errsumm :1, + c0 :1, + c1 :1, + c2 :1, + tos :3, + c3 :1, + busy :1; +} fp_status_t; + +/* + * Control word. + */ + +typedef struct fp_control { + unsigned short invalid :1, + denorm :1, + zdiv :1, + ovrfl :1, + undfl :1, + precis :1, + :2, + pc :2, +#define FP_PREC_24B 0 +#define FP_PREC_53B 2 +#define FP_PREC_64B 3 + rc :2, +#define FP_RND_NEAR 0 +#define FP_RND_DOWN 1 +#define FP_RND_UP 2 +#define FP_CHOP 3 + /*inf*/ :1, + :3; +} fp_control_t; + +#include + +/* + * Floating point 'environment' + * used by FSTENV/FLDENV instructions. + */ + +typedef struct fp_env { + fp_control_t control; + unsigned short :16; + fp_status_t status; + unsigned short :16; + fp_tag_t tag; + unsigned short :16; + unsigned int ip; + sel_t cs; + unsigned short opcode; + unsigned int dp; + sel_t ds; + unsigned short :16; +} fp_env_t; + +/* + * Floating point state + * used by FSAVE/FRSTOR instructions. + */ + +typedef struct fp_state { + fp_env_t environ; + fp_stack_t stack; +} fp_state_t; diff --git a/EXTERNAL_HEADERS/architecture/i386/frame.h b/EXTERNAL_HEADERS/architecture/i386/frame.h new file mode 100644 index 000000000..ec5604667 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/frame.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Processor exception frame. + * + * HISTORY + * + * 31 August 1992 ? at NeXT + * Added v86 mode stuff. + * + * 8 June 1992 ? at NeXT + * Changed name of write field in err_code_t + * which collided with write() in shlib. + * + * 30 March 1992 ? at NeXT + * Created. + */ + +/* + * Format of the error code + * generated by the hardware + * for certain exceptions. + */ + +typedef union err_code { + struct err_code_normal { + unsigned int ext :1, + tbl :2, +#define ERR_GDT 0 +#define ERR_IDT 1 +#define ERR_LDT 2 + index :13, + :16; + } normal; + struct err_code_pgfault { + unsigned int prot :1, + wrtflt :1, + user :1, + :29; + } pgfault; +} err_code_t; + +#include + +/* + * The actual hardware exception frame + * is variable in size. An error code is + * only pushed for certain exceptions. + * Previous stack information is only + * pushed for exceptions that cause a + * change in privilege level. The dpl + * field of the saved CS selector can be + * used to determine whether this is the + * case. If the interrupted task was + * executing in v86 mode, then the data + * segment registers are also present in + * the exception frame (in addition to + * previous stack information). This + * case can be determined by examining + * eflags. + */ + +typedef struct except_frame { + err_code_t err; + unsigned int eip; + sel_t cs; + unsigned int :0; + unsigned int eflags; + unsigned int esp; + sel_t ss; + unsigned int :0; + unsigned short v_es; + unsigned int :0; + unsigned short v_ds; + unsigned int :0; + unsigned short v_fs; + unsigned int :0; + unsigned short v_gs; + unsigned int :0; +} except_frame_t; + +/* + * Values in eflags. + */ + +#ifndef EFL_CF /* FIXME */ +#define EFL_CF 0x00001 +#define EFL_PF 0x00004 +#define EFL_AF 0x00010 +#define EFL_ZF 0x00040 +#define EFL_SF 0x00080 +#define EFL_TF 0x00100 +#define EFL_IF 0x00200 +#define EFL_DF 0x00400 +#define EFL_OF 0x00800 +#define EFL_IOPL 0x03000 +#define EFL_NT 0x04000 +#define EFL_RF 0x10000 +#define EFL_VM 0x20000 +#define EFL_AC 0x40000 +#endif + +#define EFL_CLR 0xfff88028 +#define EFL_SET 0x00000002 diff --git a/EXTERNAL_HEADERS/architecture/i386/io.h b/EXTERNAL_HEADERS/architecture/i386/io.h new file mode 100644 index 000000000..2fe56c292 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/io.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel 386 Family: IO space defines. + * + * HISTORY + * + * 11 August 1992 ? at NeXT + * Created. + */ + +typedef unsigned short io_addr_t; +typedef unsigned short io_len_t; diff --git a/EXTERNAL_HEADERS/architecture/i386/reg_help.h b/EXTERNAL_HEADERS/architecture/i386/reg_help.h new file mode 100644 index 000000000..89dc49a08 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/reg_help.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. + * + * File: architecture/i386/reg_help.h + * Author: Mike DeMoney, NeXT Computer, Inc. + * Modified for i386 by: Bruce Martin, NeXT Computer, Inc. + * + * This header file defines cpp macros useful for defining + * machine register and doing machine-level operations. + * + * HISTORY + * 10-Mar-92 Bruce Martin (bmartin@next.com) + * Adapted to i386 + * 23-Jan-91 Mike DeMoney (mike@next.com) + * Created. + */ + +#ifndef _ARCH_I386_REG_HELP_H_ +#define _ARCH_I386_REG_HELP_H_ + +/* Bitfield definition aid */ +#define BITS_WIDTH(msb, lsb) ((msb)-(lsb)+1) +#define BIT_WIDTH(pos) (1) /* mostly to record the position */ + +/* Mask creation */ +#define MKMASK(width, offset) (((unsigned)-1)>>(32-(width))<<(offset)) +#define BITSMASK(msb, lsb) MKMASK(BITS_WIDTH(msb, lsb), lsb & 0x1f) +#define BITMASK(pos) MKMASK(BIT_WIDTH(pos), pos & 0x1f) + +/* Register addresses */ +#if __ASSEMBLER__ +# define REG_ADDR(type, addr) (addr) +#else /* __ASSEMBLER__ */ +# define REG_ADDR(type, addr) (*(volatile type *)(addr)) +#endif /* __ASSEMBLER__ */ + +/* Cast a register to be an unsigned */ +#define CONTENTS(foo) (*(unsigned *) &(foo)) + +/* Stack pointer must always be a multiple of 4 */ +#define STACK_INCR 4 +#define ROUND_FRAME(x) ((((unsigned)(x)) + STACK_INCR - 1) & ~(STACK_INCR-1)) + +/* STRINGIFY -- perform all possible substitutions, then stringify */ +#define __STR(x) #x /* just a helper macro */ +#define STRINGIFY(x) __STR(x) + +/* + * REG_PAIR_DEF -- define a register pair + * Register pairs are appropriately aligned to allow access via + * ld.d and st.d. + * + * Usage: + * struct foo { + * REG_PAIR_DEF( + * bar_t *, barp, + * afu_t, afu + * ); + * }; + * + * Access to individual entries of the pair is via the REG_PAIR + * macro (below). + */ +#define REG_PAIR_DEF(type0, name0, type1, name1) \ + struct { \ + type0 name0 __attribute__(( aligned(8) )); \ + type1 name1; \ + } name0##_##name1 + +/* + * REG_PAIR -- Macro to define names for accessing individual registers + * of register pairs. + * + * Usage: + * arg0 is first element of pair + * arg1 is second element of pair + * arg2 is desired element of pair + * eg: + * #define foo_barp REG_PAIR(barp, afu, afu) + */ +#define REG_PAIR(name0, name1, the_name) \ + name0##_##name1.the_name + +#endif /* _ARCH_I386_REG_HELP_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/i386/sel.h b/EXTERNAL_HEADERS/architecture/i386/sel.h new file mode 100644 index 000000000..8588273e5 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/sel.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Segment selector. + * + * HISTORY + * + * 29 March 1992 ? at NeXT + * Created. + */ + +/* + * Segment selector. + */ + +#ifndef __XNU_ARCH_I386_SEL_H +#define __XNU_ARCH_I386_SEL_H + +typedef struct sel { + unsigned short rpl :2, +#define KERN_PRIV 0 +#define USER_PRIV 3 + ti :1, +#define SEL_GDT 0 +#define SEL_LDT 1 + index :13; +} sel_t; + +#define NULL_SEL ((sel_t) { 0, 0, 0 } ) + +#endif /* __XNU_ARCH_I386_SEL_H */ diff --git a/EXTERNAL_HEADERS/architecture/i386/table.h b/EXTERNAL_HEADERS/architecture/i386/table.h new file mode 100644 index 000000000..cba52e414 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/table.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Descriptor tables. + * + * HISTORY + * + * 30 March 1992 ? at NeXT + * Created. + */ + +#include +#include + +/* + * A totally generic descriptor + * table entry. + */ + +typedef union dt_entry { + code_desc_t code; + data_desc_t data; + ldt_desc_t ldt; + tss_desc_t task_state; + call_gate_t call_gate; + trap_gate_t trap_gate; + intr_gate_t intr_gate; + task_gate_t task_gate; +} dt_entry_t; + +#define DESC_TBL_MAX 8192 + +/* + * Global descriptor table. + */ + +typedef union gdt_entry { + code_desc_t code; + data_desc_t data; + ldt_desc_t ldt; + call_gate_t call_gate; + task_gate_t task_gate; + tss_desc_t task_state; +} gdt_entry_t; + +typedef gdt_entry_t gdt_t; + +/* + * Interrupt descriptor table. + */ + +typedef union idt_entry { + trap_gate_t trap_gate; + intr_gate_t intr_gate; + task_gate_t task_gate; +} idt_entry_t; + +typedef idt_entry_t idt_t; + +/* + * Local descriptor table. + */ + +typedef union ldt_entry { + code_desc_t code; + data_desc_t data; + call_gate_t call_gate; + task_gate_t task_gate; +} ldt_entry_t; + +typedef ldt_entry_t ldt_t; diff --git a/EXTERNAL_HEADERS/architecture/i386/tss.h b/EXTERNAL_HEADERS/architecture/i386/tss.h new file mode 100644 index 000000000..68c902900 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/i386/tss.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Task State Segment. + * + * HISTORY + * + * 29 March 1992 ? at NeXT + * Created. + */ + +#include + +/* + * Task State segment. + */ + +typedef struct tss { + sel_t oldtss; + unsigned int :0; + unsigned int esp0; + sel_t ss0; + unsigned int :0; + unsigned int esp1; + sel_t ss1; + unsigned int :0; + unsigned int esp2; + sel_t ss2; + unsigned int :0; + unsigned int cr3; + unsigned int eip; + unsigned int eflags; + unsigned int eax; + unsigned int ecx; + unsigned int edx; + unsigned int ebx; + unsigned int esp; + unsigned int ebp; + unsigned int esi; + unsigned int edi; + sel_t es; + unsigned int :0; + sel_t cs; + unsigned int :0; + sel_t ss; + unsigned int :0; + sel_t ds; + unsigned int :0; + sel_t fs; + unsigned int :0; + sel_t gs; + unsigned int :0; + sel_t ldt; + unsigned int :0; + unsigned int t :1, + :15, + io_bmap :16; +} tss_t; + +#define TSS_SIZE(n) (sizeof (struct tss) + (n)) + +/* + * Task State segment descriptor. + */ + +typedef struct tss_desc { + unsigned short limit00; + unsigned short base00; + unsigned char base16; + unsigned char type :5, +#define DESC_TSS 0x09 + dpl :2, + present :1; + unsigned char limit16 :4, + :3, + granular:1; + unsigned char base24; +} tss_desc_t; + +/* + * Task gate descriptor. + */ + +typedef struct task_gate { + unsigned short :16; + sel_t tss; + unsigned int :8, + type :5, +#define DESC_TASK_GATE 0x05 + dpl :2, + present :1, + :0; +} task_gate_t; diff --git a/EXTERNAL_HEADERS/architecture/ppc/asm_help.h b/EXTERNAL_HEADERS/architecture/ppc/asm_help.h new file mode 100644 index 000000000..9d8181e21 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/asm_help.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * File: architecture/ppc/asm_help.h + * Author: Mike DeMoney, NeXT Software, Inc. + * + * This header file defines macros useful when writing assembly code + * for the PowerPC processors. + * r12 is used as the tmp register / PICIFY base. + * + * HISTORY + * 20-May-97 Umesh Vaishampayan (umeshv@apple.com) + * Implemented Dynamic / PIC macros. + * + * 28-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * added ".align" directive to various macros to avoid alignment + * faults. Moved Register Usage #defines to reg_help.h as that's + * where they should have been in the first place. + * Added Dynamic / PIC macroes for routines which refernce external + * symbols. Not implemented fully as yet. + * + * 05-Nov-92 Mike DeMoney (mike@next.com) + * Created. + */ + +#ifndef _ARCH_PPC_ASM_HELP_H_ +#define _ARCH_PPC_ASM_HELP_H_ + +#include + +#ifdef __ASSEMBLER__ +/* + * ppc stack frames look like this after procedure prolog has + * been executed: + * + * Higher address: + * ......... + * +-------------------------------+ + * | caller's LR | + * +-------------------------------+ + * | caller's CR | + * +-------------------------------+ + * Caller's SP->| caller's caller's sp | ^^ Caller's Frame ^^ + * +===============================+ vv Called Rtn Frame vv + * | Save Area for | FPF 31 + * .......... + * | Caller's FPF's | FPF n + * +-------------------------------+ + * | Save Area for | GRF 31 + * .......... + * | Caller's GRF's | GRF n + * +-------------------------------+ + * | alignment pad | + * ............ + * | (if necessary) | + * +-------------------------------+ + * | Local | + * ........ + * | Variables | + * +-------------------------------+ + * SP + X -> | aN for FUTURE call | + * +-------------------------------+ + * .......... + * +-------------------------------+ + * SP + 28 -> | a1 for FUTURE call | + * +-------------------------------+ + * SP + 24 -> | a0 for FUTURE call | + * +-------------------------------+ + * SP + 20 -> | caller's TOC | + * +-------------------------------+ + * SP + 16 -> | reserved | + * +-------------------------------+ + * SP + 12 -> | reserved | + * +-------------------------------+ + * SP + 8 -> | LR callee-save for FUTURE call| + * +-------------------------------+ + * SP + 4 -> | CR callee-save for FUTURE call| + * +-------------------------------+ + * SP -> | caller's sp | + * +===============================+ + * Lower address: + * + * NOTE: All state with the exception of LR and CR are saved in the + * called routines frame. LR and CR are saved in the CALLER'S FRAME. + * + * ALSO NOTE: Args to the called routine are found in the caller's frame. + */ + +/* + * ARG(n) -- stack offset to n'th argument + * + * NOTE CAREFULLY! These macros start numbering arguments at 1 (NOT 0) + * The first argument is ARG(1). + * + * ALSO NOTE: This stack offset is only valid if using routine + * DOES NOT alter SP. + * + */ +#define ARG(n) ((((n) - 1) * 4) + 24) + +/* + * Macros for building stack frame according to C calling conventions. + * lr, cr, and sp are saved. + * + * NOTE WELL: localvarsize is in bytes, maxargsout is a count of words, + * grfsaved and fpfsaved is a count of registers. BE SURE TO COUNT + * BOTH FP (r31) AND sN REGISTERS IN THE COUNT OF GRF REGISTERS SAVED! + * This will be TWO more than the N of the highest sN register you + * save: s2 implies you are saving s2, s1, s0, and fp => grfsaved + * should be 4! + * + * FURTHER NOTE: These macros do NOT SAVE GRF or FPF registers. User + * must do that. GRF sN regs should be saved via + * stmw sN,SAVED_GRF_S(N)(sp) + * where N is the highest numbered s* register to be saved. E.g. if + * s0, s1, and s2 are to be saved use: + * stmw s2,SAVED_GRF_S(2)(sp) + * Note that this also saves fp. + * An individual saved grf can be loaded via: + * lwz s2,SAVED_GRF_S(2)(sp) + * Analogous stuff works for fpf's. + * + * NOTE: these simple routines will be replaced with more complicated + * ones once we know what the linker and gdb will require as for as + * register use masks and frame declarations. + * + * Warning: ROUND_TO_STACK is only to be used in assembly language; + * for C usage, use ROUND_FRAME() in reg_help.h. + */ +#define ROUND_TO_STACK(len) \ + (((len) + STACK_INCR - 1) / STACK_INCR * STACK_INCR) + +#define BUILD_FRAME(localvarsize, maxargsout, grfsaved, fpfsaved) \ + .set __argoutsize, ROUND_TO_STACK((maxargsout) * 4) @\ + .if __argoutsize < 32 @\ + .set __argoutsize,32 @\ + .endif @\ + .set __framesize, ROUND_TO_STACK( \ + 24 + __argoutsize + (localvarsize) \ + + 4*(grfsaved) + 8*(fpfsaved)) @\ + .set __grfbase,(__framesize - 4*(grfsaved) - 8*(fpfsaved)) @\ + .set __fpfbase,(__framesize - 8*(fpfsaved)) @\ + mflr r0 @\ + mfcr r12 @\ + stw r0,8(sp) @\ + stw r12,4(sp) @\ + stwu r1,-__framesize(r1) + +/* + * Macros for referencing data in stack frame. + * + * NOTE WELL: ARG's and VAR's start at 1, NOT 0. Why ??? (FIXME) + */ +#define LOCAL_VAR(n) (((n)-1)*4 + __argoutsize + 24) +#define SAVED_GRF_S(n) (__grfbase + ((grfsaved) - (n) - 2) * 4) +#define SAVED_FRF_FS(n) (__fpfbase + ((fpfsaved) - (n) - 1) * 4) +#define ARG_IN(n) (ARG(n) + __framesize) +#define ARG_OUT(n) (ARG(n) + 0) +#define SAVED_FP (__grfbase + ((grfsaved) - 1) * 4) +#define SAVED_LR (__framesize + 8) +#define SAVED_CR (__framesize + 4) + +/* + * Macros for unwinding stack frame. + * NOTE: GRF's and FPF's are NOT RESTORED. User must do this before + * using this macro. + */ +#define RETURN \ + .if __framesize @\ + lwz32 r0,r1,SAVED_LR @\ + lwz32 r12,r1,SAVED_CR @\ + addic sp,r1,__framesize @\ + mtlr r0 @\ + mtcrf 0xff,r12 @\ + blr @\ + .else @\ + blr @\ + .endif + + +/* + * Macros for declaring procedures + * + * Use of these macros allows ctags to have a predictable way + * to find various types of declarations. They also simplify + * inserting appropriate symbol table information. + * + * NOTE: these simple stubs will be replaced with more + * complicated versions once we know what the linker and gdb + * will require as far as register use masks and frame declarations. + * These macros may also be ifdef'ed in the future to contain profiling + * code. + * + * FIXME: Document what makes a leaf a LEAF and a handler a HANDLER. + * (E.g. leaf's have return pc in lr, NESTED's have rpc in offset off + * sp, handlers have rpc in exception frame which is found via exception + * link, etc etc.) + */ + +/* + * TEXT -- declare start of text segment + */ +#define TEXT \ + .text @\ + .align 2 + +/* + * LEAF -- declare global leaf procedure + * NOTE: Control SHOULD NOT FLOW into a LEAF! A LEAF should only + * be jumped to. (A leaf may do an align.) Use a LABEL() if you + * need control to flow into the label. + */ +#define LEAF(name) \ + .align 2 @\ + .globl name @\ +name: @\ + .set __framesize,0 + +/* + * X_LEAF -- declare alternate global label for leaf + */ +#define X_LEAF(name, value) \ + .globl name @\ + .set name,value + +/* + * P_LEAF -- declare private leaf procedure + */ +#define P_LEAF(name) \ + .align 2 @\ +name: @\ + .set __framesize,0 + +/* + * LABEL -- declare a global code label + * MUST be used (rather than LEAF, NESTED, etc) if control + * "flows into" the label. + */ +#define LABEL(name) \ + .align 2 @\ + .globl name @\ +name: + +/* + * NESTED -- declare procedure that invokes other procedures + */ +#define NESTED(name, localvarsize, maxargsout, grfsaved, fpfsaved)\ + .align 2 @\ + .globl name @\ +name: @\ + BUILD_FRAME(localvarsize, maxargsout, grfsaved, fpfsaved) + +/* + * X_NESTED -- declare alternate global label for nested proc + */ +#define X_NESTED(name, value) \ + .globl name @\ + .set name,value + +/* + * P_NESTED -- declare private nested procedure + */ +#define P_NESTED(name, localvarsize, maxargsout, grfsaved, fpfsaved)\ + .align 2 @\ +name: @\ + BUILD_FRAME(locavarsize, maxargsout, grfsaved, fpfsaved) + +/* + * HANDLER -- declare procedure with exception frame rather than + * standard C frame + */ +#define HANDLER(name) \ + .align 2 @\ + .globl name @\ +name: + +/* + * X_HANDLER -- declare alternate name for exception handler + * (Should appear immediately before a HANDLER declaration or + * another X_HANDLER declaration) + */ +#define X_HANDLER(name) \ + .align 2 @\ + .globl name @\ +name: + +/* + * P_HANDLER -- declare private handler + */ +#define P_HANDLER(name) \ + .align 2 @\ +name: + +/* + * END -- mark end of procedure + * FIXME: Unimplemented for now. + */ +#define END(name) + +/* + * BL -- call procedure (relative) + */ +#define BL(name) \ + bl name + +/* + * Storage definition macros + * The main purpose of these is to allow an easy handle for ctags + */ + +/* + * IMPORT -- import symbol + */ +#define IMPORT(name) \ + .reference name + +/* + * ABS -- declare global absolute symbol + */ +#define ABS(name, value) \ + .globl name @\ + .set name,value + +/* + * P_ABS -- declare private absolute symbol + */ +#define P_ABS(name, value) \ + .set name,value + +/* + * EXPORT -- declare global label for data + */ +#define EXPORT(name) \ + .align 2 @\ + .globl name @\ +name: + +/* + * BSS -- declare global zero'ed storage + */ +#define BSS(name,size) \ + .comm name,size + + +/* + * P_BSS -- declare private zero'ed storage + */ +#define P_BSS(name,size) \ + .lcomm name,size + +/* + * dynamic/PIC macros for routines which reference external symbols + */ +#if defined(__DYNAMIC__) +#define PICIFY_REG r12 + +/* Assume that the lr is saved before calling any of these macros */ +/* using PICIFY() */ + +#define PICIFY(var) \ + mflr r0 @\ + bl 1f @\ +1: mflr PICIFY_REG @\ + mtlr r0 @\ + addis PICIFY_REG, PICIFY_REG, ha16(L ## var ## $non_lazy_ptr - 1b) @\ + lwz PICIFY_REG, lo16(L ## var ## $non_lazy_ptr - 1b)(PICIFY_REG) + +#define CALL_EXTERN_AGAIN(var) \ + PICIFY(var) @\ + mtctr PICIFY_REG @\ + mflr r0 @\ + stw r0,8(r1) @\ + stwu r1,-56(r1) @\ + bctrl @\ + addic r1,r1,56 @\ + lwz r0,8(r1) @\ + mtlr r0 + +#define NON_LAZY_STUB(var) \ + .non_lazy_symbol_pointer @\ + .align 2 @\ +L ## var ## $non_lazy_ptr: @\ + .indirect_symbol var @\ + .long 0 @\ + .text @\ + .align 2 + +#define BRANCH_EXTERN(var) \ + PICIFY(var) @\ + mtctr PICIFY_REG @\ + bctr @\ + NON_LAZY_STUB(var) + +#define CALL_EXTERN(var) \ + CALL_EXTERN_AGAIN(var) @\ + NON_LAZY_STUB(var) + +#define REG_TO_EXTERN(reg, var) \ + PICIFY(var) @\ + stw reg, 0(PICIFY_REG) @\ + NON_LAZY_STUB(var) + +#define EXTERN_TO_REG(reg, var) \ + PICIFY(var) @\ + lwz reg, 0(PICIFY_REG) @\ + NON_LAZY_STUB(var) + +#else /* ! __DYNAMIC__ */ +#define TMP_REG r12 +#define BRANCH_EXTERN(var) \ + b var + +#define CALL_EXTERN(var) \ + bl var + +#define CALL_EXTERN_AGAIN(var) \ + CALL_EXTERN(var) + +#define REG_TO_EXTERN(reg, var) \ + lis TMP_REG, ha16(var) @\ + stw reg, lo16(var)(TMP_REG) + +#define EXTERN_TO_REG(reg, var) \ + lis reg, ha16(var) @\ + lwz reg, lo16(var)(reg) + +#endif /* __DYNAMIC__ */ + +#endif /* __ASSEMBLER__ */ +#endif /* _ARCH_PPC_ASM_HELP_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/ppc/basic_regs.h b/EXTERNAL_HEADERS/architecture/ppc/basic_regs.h new file mode 100644 index 000000000..f902cb084 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/basic_regs.h @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * File: architecture/ppc/basic_regs.h + * Author: Doug Mitchell, NeXT Software, Inc. + * + * Basic ppc registers. + * + * HISTORY + * 22-May-97 Umesh Vaishampayan (umeshv@apple.com) + Updated to match MPCFPE32B/AD 1/97 REV. 1 + * 29-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported from m98k. + * 05-Nov-92 Doug Mitchell at NeXT + * Created. + */ + +#ifndef _ARCH_PPC_BASIC_REGS_H_ +#define _ARCH_PPC_BASIC_REGS_H_ + +#include +#include + +#if !defined(__ASSEMBLER__) + +/* + * Number of General Purpose registers. + */ +#define PPC_NGP_REGS 32 + +/* + * Common half-word used in Machine State Register and in + * various exception frames. Defined as a macro because the compiler + * will align a struct to a word boundary when used inside another struct. + */ +#define MSR_BITS \ + unsigned ee:BIT_WIDTH(15), /* external intr enable */ \ + pr:BIT_WIDTH(14), /* problem state */ \ + fp:BIT_WIDTH(13), /* floating point avail */ \ + me:BIT_WIDTH(12), /* machine check enable */ \ + fe0:BIT_WIDTH(11), /* fp exception mode 0 */ \ + se:BIT_WIDTH(10), /* single step enable */ \ + be:BIT_WIDTH(9), /* branch trace enable */ \ + fe1:BIT_WIDTH(8), /* fp exception mode 0 */ \ + rsvd1:BIT_WIDTH(7), /* reserved */ \ + ip:BIT_WIDTH(6), /* interrupt prefix */ \ + ir:BIT_WIDTH(5), /* instruction relocate */ \ + dr:BIT_WIDTH(4), /* data relocate */ \ + rsvd2:BITS_WIDTH(3,2), /* reserved */ \ + ri:BIT_WIDTH(1), /* recoverable exception */ \ + le:BIT_WIDTH(0) /* Little-endian mode */ + +/* + * Machine state register. + * Read and written via get_msr() and set_msr() inlines, below. + */ +typedef struct { + unsigned rsvd3:BITS_WIDTH(31,19), // reserved + pow:BIT_WIDTH(18), // Power management enable + rsvd0: BIT_WIDTH(17), // reserved + ile: BIT_WIDTH(16); // exception little endian + + MSR_BITS; // see above +} msr_t; + +/* + * Data Storage Interrupt Status Register (DSISR) + */ +typedef struct { + unsigned dse:BIT_WIDTH(31); // direct-store error + unsigned tnf:BIT_WIDTH(30); // translation not found + unsigned :BITS_WIDTH(29,28); + unsigned pe:BIT_WIDTH(27); // protection error + unsigned dsr:BIT_WIDTH(26); // lwarx/stwcx to direct-store + unsigned rw:BIT_WIDTH(25); // 1 => store, 0 => load + unsigned :BITS_WIDTH(24,23); + unsigned dab:BIT_WIDTH(22); // data address bkpt (601) + unsigned ssf:BIT_WIDTH(21); // seg table search failed + unsigned :BITS_WIDTH(20,0); +} dsisr_t; + +/* + * Instruction Storage Interrupt Status Register (really SRR1) + */ +typedef struct { + unsigned :BIT_WIDTH(31); + unsigned tnf:BIT_WIDTH(30); // translation not found + unsigned :BIT_WIDTH(29); + unsigned dse:BIT_WIDTH(28); // direct-store fetch error + unsigned pe:BIT_WIDTH(27); // protection error + unsigned :BITS_WIDTH(26,22); + unsigned ssf:BIT_WIDTH(21); // seg table search failed + unsigned :BITS_WIDTH(20,16); + MSR_BITS; +} isisr_t; + +/* + * Alignment Interrupt Status Register (really DSISR) + * NOTE: bit numbers in field *names* are in IBM'ese (0 is MSB). + * FIXME: Yuck!!! Double Yuck!!! + */ +typedef struct { + unsigned :BITS_WIDTH(31,20); + unsigned ds3031:BITS_WIDTH(19,18);// bits 30:31 if DS form + unsigned :BIT_WIDTH(17); + unsigned x2930:BITS_WIDTH(16,15); // bits 29:30 if X form + unsigned x25:BIT_WIDTH(14); // bit 25 if X form or + // bit 5 if D or DS form + unsigned x2124:BITS_WIDTH(13,10); // bits 21:24 if X form or + // bits 1:4 if D or DS form + unsigned all615:BITS_WIDTH(9,0); // bits 6:15 of instr + MSR_BITS; +} aisr_t; + +/* + * Program Interrupt Status Register (really SRR1) + */ +typedef struct { + unsigned :BITS_WIDTH(31,21); + unsigned fpee:BIT_WIDTH(20); // floating pt enable exception + unsigned ill:BIT_WIDTH(19); // illegal instruction + unsigned priv:BIT_WIDTH(18); // privileged instruction + unsigned trap:BIT_WIDTH(17); // trap program interrupt + unsigned subseq:BIT_WIDTH(16); // 1 => SRR0 points to + // subsequent instruction + MSR_BITS; +} pisr_t; + +/* + * Condition register. May not be useful in C, let's see... + */ +typedef struct { + unsigned lt:BIT_WIDTH(31), // negative + gt:BIT_WIDTH(30), // positive + eq:BIT_WIDTH(29), // equal to zero + so:BIT_WIDTH(28), // summary overflow + fx:BIT_WIDTH(27), // floating point exception + fex:BIT_WIDTH(26), // fp enabled exception + vx:BIT_WIDTH(25), // fp invalid operation + // exception + ox:BIT_WIDTH(24), // fp overflow exception + rsvd:BITS_WIDTH(23,0); // reserved +} cr_t; + +/* + * Abstract values representing fe0:fe1. + * See get_fp_exc_mode(), below. + */ +typedef enum { + FEM_IGNORE_EXCEP, // ignore exceptions + FEM_IMPR_NONREC, // imprecise nonrecoverable + FEM_IMPR_RECOV, // imprecise recoverable + FEM_PRECISE +} fp_exc_mode_t; + + +/* + * Special purpose registers. + */ + +/* + * Processor version register (special purpose register pvr). + */ +typedef struct { + unsigned version:BITS_WIDTH(31,16), + revision:BITS_WIDTH(15,0); +} pvr_t; + +/* + * Fixed point exception register (special purpose register xer) + */ +typedef struct { + unsigned so:BIT_WIDTH(31), // summary overflow + ov:BIT_WIDTH(30), // overflow + ca:BIT_WIDTH(29), // carry + rsvd1:BITS_WIDTH(28,7), // reserved + byte_count:BITS_WIDTH(6,0); +} xer_t; + +/* + * Inlines and macros to manipulate the above registers. + */ + +/* + * Get/set machine state register. + */ +static __inline__ msr_t +get_msr() +{ + msr_t __msr_tmp; + __asm__ volatile ("mfmsr %0 /* mfmsr */" : "=r" (__msr_tmp)); + return __msr_tmp; +} + +static __inline__ void +set_msr(msr_t msr) +{ + __asm__ volatile ("mtmsr %0 /* mtmsr */ " : : "r" (msr)); +} + +/* + * Determine current fp_exc_mode_t given prog_mode. + */ +static __inline__ fp_exc_mode_t +get_fp_exc_mode(pmr_t pmr) +{ + if(pmr.fe0) + return pmr.fe1 ? FEM_PRECISE : FEM_IMPR_RECOV; + else + return pmr.fe1 ? FEM_IMPR_NONREC : FEM_IGNORE_EXCEP; +} + +/* + * Software definitions for special purpose registers. + * The same register is used as per_cpu data pointer and + * vector base register. This requires that the vector + * table be the first item in the per_cpu table. + */ +#define SR_EXCEPTION_TMP_LR sprg0 +#define SR_EXCEPTION_TMP_CR sprg1 +#define SR_EXCEPTION_TMP_AT sprg2 +#define SR_PER_CPU_DATA sprg3 +#define SR_VBR sprg3 + +/* + * Get/set special purpose registers. + * + * GET_SPR - get SPR by name. + * + * Example usage: + * + * { + * xer_t some_xer; + * + * some_xer = GET_SPR(xer_t, xer); + * ... + * } + * + * This is a strange one. We're creating a list of C expressions within + * a set of curlies; the last expression ("__spr_tmp;") is the return value + * of the statement created by the curlies. + * + */ + +#define GET_SPR(type, spr) \ +({ \ + unsigned __spr_tmp; \ + __asm__ volatile ("mfspr %0, " STRINGIFY(spr) : "=r" (__spr_tmp)); \ + *(type *)&__spr_tmp; \ +}) + +/* + * Example usage of SET_SPR: + * + * { + * xer_t some_xer; + * + * ...set up some_xer... + * SET_SPR(xer, some_xer); + * } + */ +#define SET_SPR(spr, val) \ +MACRO_BEGIN \ + __typeof__ (val) __spr_tmp = (val); \ + __asm__ volatile ("mtspr "STRINGIFY(spr) ", %0" : : "r" (__spr_tmp)); \ +MACRO_END + +/* + * Fully synchronize instruction stream. + */ +static __inline__ void +ppc_sync() +{ + __asm__ volatile ("sync /* sync */" : : ); +} + +#endif /* ! __ASSEMBLER__ */ + +#endif /* _ARCH_PPC_BASIC_REGS_H_ */ + diff --git a/EXTERNAL_HEADERS/architecture/ppc/byte_order.h b/EXTERNAL_HEADERS/architecture/ppc/byte_order.h new file mode 100644 index 000000000..77927e97a --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/byte_order.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. + * + * Byte ordering conversion (for ppc). + * + * HISTORY + * + * 29-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported from m98k. + * + * 8 October 1992 ? at NeXT + * Converted to NXxxx versions. Condensed history. + * + * 28 August 1992 Bruce Martin @NeXT + * Created. + */ + +static __inline__ +unsigned short +NXSwapShort( + unsigned short inv +) +{ + union sconv { + unsigned short us; + unsigned char uc[2]; + } *inp, outv; + + inp = (union sconv *)&inv; + + outv.uc[0] = inp->uc[1]; + outv.uc[1] = inp->uc[0]; + + return (outv.us); +} + +static __inline__ +unsigned int +NXSwapInt( + unsigned int inv +) +{ + union iconv { + unsigned int ui; + unsigned char uc[4]; + } *inp, outv; + + inp = (union iconv *)&inv; + + outv.uc[0] = inp->uc[3]; + outv.uc[1] = inp->uc[2]; + outv.uc[2] = inp->uc[1]; + outv.uc[3] = inp->uc[0]; + + return (outv.ui); +} + +static __inline__ +unsigned long +NXSwapLong( + unsigned long inv +) +{ + union lconv { + unsigned long ul; + unsigned char uc[4]; + } *inp, outv; + + inp = (union lconv *)&inv; + + outv.uc[0] = inp->uc[3]; + outv.uc[1] = inp->uc[2]; + outv.uc[2] = inp->uc[1]; + outv.uc[3] = inp->uc[0]; + + return (outv.ul); +} + +static __inline__ +unsigned long long +NXSwapLongLong( + unsigned long long inv +) +{ + union llconv { + unsigned long long ull; + unsigned char uc[8]; + } *inp, outv; + + inp = (union llconv *)&inv; + + outv.uc[0] = inp->uc[7]; + outv.uc[1] = inp->uc[6]; + outv.uc[2] = inp->uc[5]; + outv.uc[3] = inp->uc[4]; + outv.uc[4] = inp->uc[3]; + outv.uc[5] = inp->uc[2]; + outv.uc[6] = inp->uc[1]; + outv.uc[7] = inp->uc[0]; + + return (outv.ull); +} + +#ifndef KERNEL + +static __inline__ NXSwappedFloat +NXConvertHostFloatToSwapped(float x) +{ + union fconv { + float number; + NXSwappedFloat sf; + }; + return ((union fconv *)&x)->sf; +} + +static __inline__ float +NXConvertSwappedFloatToHost(NXSwappedFloat x) +{ + union fconv { + float number; + NXSwappedFloat sf; + }; + return ((union fconv *)&x)->number; +} + +static __inline__ NXSwappedDouble +NXConvertHostDoubleToSwapped(double x) +{ + union dconv { + double number; + NXSwappedDouble sd; + }; + return ((union dconv *)&x)->sd; +} + +static __inline__ double +NXConvertSwappedDoubleToHost(NXSwappedDouble x) +{ + union dconv { + double number; + NXSwappedDouble sd; + }; + return ((union dconv *)&x)->number; +} + +static __inline__ NXSwappedFloat +NXSwapFloat(NXSwappedFloat x) +{ + return NXSwapLong(x); +} + +static __inline__ NXSwappedDouble +NXSwapDouble(NXSwappedDouble x) +{ + return NXSwapLongLong(x); +} + +#endif /* ! KERNEL */ diff --git a/EXTERNAL_HEADERS/architecture/ppc/cframe.h b/EXTERNAL_HEADERS/architecture/ppc/cframe.h new file mode 100644 index 000000000..80a08ab6c --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/cframe.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 NeXT Software, Inc. All rights reserved. + * + * File: architecture/ppc/cframe.h + * Author: Mike DeMoney, NeXT Software, Inc. + * + * This include file defines C calling sequence defines + * for ppc port. + * + * HISTORY + * 20-May-97 Umesh Vaishampayan (umeshv@apple.com) + * Added C_RED_ZONE. + * 29-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported from m98k. + * 11-June-91 Mike DeMoney (mike@next.com) + * Created. + */ + +#ifndef _ARCH_PPC_CFRAME_H_ +#define _ARCH_PPC_CFRAME_H_ + +#define C_ARGSAVE_LEN 32 /* at least 32 bytes of arg save */ +#define C_STACK_ALIGN 16 /* stack must be 16 byte aligned */ +#define C_RED_ZONE 244 /* 224 bytes to skip over saved registers */ + +#endif /* _ARCH_PPC_CFRAME_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/ppc/fp_regs.h b/EXTERNAL_HEADERS/architecture/ppc/fp_regs.h new file mode 100644 index 000000000..51b78d404 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/fp_regs.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * File: architecture/ppc/fp_regs.h + * Author: Doug Mitchell, NeXT Software, Inc. + * + * ppc floating point registers. + * + * HISTORY + * 29-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported from m98k. + * 05-Nov-92 Doug Mitchell at NeXT + * Created. + */ + +#ifndef _ARCH_PPC_FP_REGS_H_ +#define _ARCH_PPC_FP_REGS_H_ + +#include + +#if !defined(__ASSEMBLER__) +/* + * Floating point status and control register. + * + * This struct is aligned to an 8-byte boundary because 64-bit + * load/store instructions (lfd/stfd) are used to access it. The + * FPSCR can only be read/written through other FP registers. + */ +typedef struct { + unsigned unused[1] __attribute__(( aligned(8) )); + unsigned fx:BIT_WIDTH(31), // exception summary + fex:BIT_WIDTH(30), // enabled exception summary + vx:BIT_WIDTH(29), // invalid op exception + // summary + ox:BIT_WIDTH(28), // overflow exception + ux:BIT_WIDTH(27), // underflow exception + zx:BIT_WIDTH(26), // divide by zero exception + xx:BIT_WIDTH(25), // inexact exception + vx_snan:BIT_WIDTH(24), // not a number exception + vx_isi:BIT_WIDTH(23), // exception + vx_idi:BIT_WIDTH(22), // exception + vx_zdz:BIT_WIDTH(21), // exception + vx_imz:BIT_WIDTH(20), // exception + vx_xvc:BIT_WIDTH(19), // exception + fr:BIT_WIDTH(18), // fraction rounded + fi:BIT_WIDTH(17), // fraction inexact + class:BIT_WIDTH(16), // class descriptor + fl:BIT_WIDTH(15), // negative + fg:BIT_WIDTH(14), // positive + fe:BIT_WIDTH(13), // equal or zero + fu:BIT_WIDTH(12), // not a number + rsvd1:BIT_WIDTH(11), // reserved + vx_soft:BIT_WIDTH(10), // software request exception + rsvd2:BIT_WIDTH(9), // reserved + vx_cvi:BIT_WIDTH(8), // invalid integer convert + // exception + ve:BIT_WIDTH(7), // invalid op exception enable + oe:BIT_WIDTH(6), // overflow exception enable + ue:BIT_WIDTH(5), // underflow exception enable + ze:BIT_WIDTH(4), // divide by zero exception + // enable + xe:BIT_WIDTH(3), // inexact exception enable + ni:BIT_WIDTH(2), // non-IEEE exception enable + rn:BITS_WIDTH(1,0); // rounding control +} ppc_fp_scr_t; + +/* + * Values for fp_scr_t.rn (rounding control). + */ +typedef enum { + RN_NEAREST = 0, + RN_TOWARD_ZERO = 1, + RN_TOWARD_PLUS = 2, + RN_TOWARD_MINUS = 3 +} ppc_fp_rn_t; + +/* + * ppc_fpf_t -- data types that MAY be in floating point register file + * Actual data types supported is implementation dependent + */ +typedef union { + float f; // 32 bit IEEE single + double d; // 64 bit IEEE double + + /* + * Insure compiler aligns struct appropriately + */ + unsigned x[2] __attribute__(( aligned(8) )); +} ppc_fpf_t; + +/* + * Number of FP registers. + */ +#define PPC_NFP_REGS 32 + +/* + * Read/write FPSCR. + * FIXME - these don't work, you need to go thru a fp register. + */ +typedef union { + double __dbl; + ppc_fp_scr_t __scr; +} __fp_un_t; + +static __inline__ ppc_fp_scr_t +get_fp_scr() +{ + __fp_un_t __fp_un; + + __asm__ volatile ("mffs. %0 /* mffs */" \ + : "=f" (__fp_un.__dbl)); + return (__fp_un.__scr); +} + +static __inline__ void +set_fp_scr(ppc_fp_scr_t fp_scr) +{ + __fp_un_t __fp_un; + + __fp_un.__scr = fp_scr; + __asm__ volatile ("mtfsf 0xff, %0; /* mtfsf */ " \ + : : "f" (__fp_un.__dbl)); +} + +#endif /* ! __ASSEMBLER__ */ + +#endif /* _ARCH_PPC_FP_REGS_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/ppc/macro_help.h b/EXTERNAL_HEADERS/architecture/ppc/macro_help.h new file mode 100644 index 000000000..17152ca39 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/macro_help.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * Copyright (c) 1988 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + * + * File: architecture/ppc/macro_help.h + * + * Provide help in making lint-free macro routines + * + * HISTORY + * + * 29-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * Created from m98k version. + */ + +#ifndef _ARCH_PPC_MACRO_HELP_H_ +#define _ARCH_PPC_MACRO_HELP_H_ + +#ifndef MACRO_BEGIN +# define MACRO_BEGIN do { +#endif /* MACRO_BEGIN */ + +#ifndef MACRO_END +# define MACRO_END } while (0) +#endif /* MACRO_END */ + +#ifndef MACRO_RETURN +# define MACRO_RETURN if (1) return +#endif /* MACRO_RETURN */ + +#endif /* _ARCH_PPC_MACRO_HELP_H_ */ + diff --git a/EXTERNAL_HEADERS/architecture/ppc/pseudo_inst.h b/EXTERNAL_HEADERS/architecture/ppc/pseudo_inst.h new file mode 100644 index 000000000..c2514014e --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/pseudo_inst.h @@ -0,0 +1,414 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * File: architecture/ppc/pseudo_inst.h + * Author: Mike DeMoney + * + * This header file defines assembler pseudo-instruction macros for + * for the ppc. + * + * NOTE: This is obviously only useful to include in assembly + * code source. + * + * ALSO NOTE: These macros don't attempt to be 64-bit compatable + * + * HISTORY + * 29-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported from m98k. + * 05-Nov-92 Mike DeMoney (mike@next.com) + * Created. + */ + +#ifndef _ARCH_PPC_PSEUDO_INST_H_ +#define _ARCH_PPC_PSEUDO_INST_H_ + +#include +#include + +#ifdef __ASSEMBLER__ + +/* + * Pseudo instruction definitions + */ + +/* + * Macro package initialization + */ + .set __no_at,0 /* allow at by default */ + +/* + * .at_off -- disable use of at by macros + * .at_on -- enable use of at by macros + */ +.macro .at_off + .set __no_at,1 +.endmacro + +.macro .at_on + .set __no_at,0 +.endmacro + +/* + * li32 rD,IMMED + * + * Load 32-bit immediate into rD + * FIXME: Need a way to undefine built-in macro for this. + */ +.macro li32 // li32 rD,immed +.if $n != 2 + .abort "invalid operands of li32" +.endif +.abs __is_abs,$1 +.if !__is_abs + addis $0,0,hi16($1) + ori $0,$0,lo16($1) +.elseif $1 == 0 + addi $0,0,0 +.elseif ($1 & 0xffff) == 0 + addis $0,0,hi16($1) +.elseif ($1 & 0xffff8000) == 0 + addi $0,0,$1 +.elseif ($1 & 0xffff8000) == 0xffff8000 + addi $0,0,$1 +.else + addis $0,0,hi16($1) + ori $0,$0,lo16($1) +.endif +.endmacro + + +/* + * andi32. rD,rS1,IMMED + * + * Perform "andi." with (possibly) 32-bit immediate + */ +.macro andi32. // andi32. rD,rS1,IMMED +.if $n != 3 + .abort "invalid operands of andi." +.endif + .set __used_at,0 +.abs __is_abs,$2 +.if !__is_abs + .set __used_at,1 + li32 at,$2 + and. $0,$1,at +.elseif ($2 & 0xffff0000) == 0 + andi. $0,$1,$2 +.elseif ($2 & 0xffff) == 0 + andis. $0,$1,hi16($2) +.else + .set __used_at,1 + li32 at,$2 + and. $0,$1,at +.endif +.if __no_at & __used_at + .abort "Macro uses at while .no_at in effect" +.endif +.endmacro + +/* + * ori32 rD,rS1,IMMED + * + * Perform "ori" with (possibly) 32-bit immediate + */ +.macro ori32 // ori32 rD,rS1,IMMED +.if $n != 3 + .abort "invalid operands of ori" +.endif +.abs __is_abs,$2 +.if !__is_abs + oris $0,$1,hi16($2) + ori $0,$1,lo16($2) +.elseif ($2 & 0xffff0000) == 0 + ori $0,$1,$2 +.elseif ($2 & 0xffff) == 0 + oris $0,$1,hi16($2) +.else + oris $0,$1,hi16($2) + ori $0,$1,lo16($2) +.endif +.endmacro + +/* + * xori32 rD,rS1,IMMED + * + * Perform "xor" with (possibly) 32-bit immediate + */ +.macro xori32 // xori32 rD,rS1,IMMED +.if $n != 3 + .abort "invalid operands of xori" +.endif +.abs __is_abs,$2 +.if !__is_abs + xoris $0,$1,hi16($2) + xori $0,$1,lo16($2) +.elseif ($2 & 0xffff0000) == 0 + xori $0,$1,$2 +.elseif ($2 & 0xffff) == 0 + xoris $0,$1,hi16($2) +.else + xoris $0,$1,hi16($2) + xori $0,$1,lo16($2) +.endif +.endmacro + + +/* + * MEMREF_INST -- macros to memory referencing instructions + * "capable" of dealing with 32 bit offsets. + * + * NOTE: Because the assembler doesn't have any mechanism for easily + * parsing the d(rS) syntax of register-displacement form instructions, + * these instructions do NOT mirror the normal memory reference + * instructions. The following "transformation" is used: + * lbz rD,d(rS) + * becomes: + * lbz32 rD,rS,d + * I.e.: "32" is appended to the instruction name and the base register + * and displacement become the 2'nd and 3'rd comma-separated operands. + * + * The forms: + * lbz32 rD,d + * and: + * lbz32 rD,rS + * are also recognized and the missing operand is assumed 0. + * + * ALSO NOTE: r0 or zt should never be used as rS in these instructions. + * Use "0" as rS in this case. + */ +#define MEMREF_INST(op) \ +.macro op ## 32 @\ +.set __used_at,0 @\ +.if $n == 3 @\ + .greg __is_greg,$1 @\ + .abs __is_abs,$2 @\ + .if __is_abs @\ + .if ($2 & 0xffff8000) == 0 @\ + op $0,$2($1) @\ + .elseif ($2 & 0xffff8000) == 0xffff8000 @\ + op $0,$2($1) @\ + .else @\ + .if !__is_greg @\ + .set __used_at,1 @\ + lis at,ha16($2) @\ + op $0,lo16($2)(at) @\ + .else @\ + .set __used_at,1 @\ + lis at,ha16($2) @\ + add at,at,$1 @\ + op $0,lo16($2)(at) @\ + .endif @\ + .endif @\ + .else @\ + .if !__is_greg @\ + .set __used_at,1 @\ + lis at,ha16($2) @\ + op $0,lo16($2)(at) @\ + .else @\ + .set __used_at,1 @\ + lis at,ha16($2) @\ + add at,at,$1 @\ + op $0,lo16($2)(at) @\ + .endif @\ + .endif @\ +.elseif $n == 2 @\ + .greg __is_greg,$1 @\ + .if !__is_greg @\ + .abs __is_abs,$1 @\ + .if __is_abs @\ + .if ($1 & 0xffff8000) == 0 @\ + op $0,$1(0) @\ + .elseif ($1 & 0xffff8000) == 0xffff8000 @\ + op $0,$1(0) @\ + .else @\ + .set __used_at,1 @\ + lis at,ha16($1) @\ + op $0,lo16($1)(at) @\ + .endif @\ + .else @\ + .set __used_at,1 @\ + lis at,ha16($1) @\ + op $0,lo16($1)(at) @\ + .endif @\ + .else @\ + op $0,0($1) @\ + .endif @\ +.else @\ + .abort "Invalid operands of " #op "32" @\ +.endif @\ +.if __no_at & __used_at @\ + .abort "Macro uses at while .no_at in effect" @\ +.endif @\ +.endmacro + +MEMREF_INST(lbz) +MEMREF_INST(lhz) +MEMREF_INST(lha) +MEMREF_INST(lwz) +MEMREF_INST(lwa) +MEMREF_INST(ld) + +MEMREF_INST(stb) +MEMREF_INST(sth) +MEMREF_INST(stw) +MEMREF_INST(std) + +MEMREF_INST(lmw) +MEMREF_INST(lmd) +MEMREF_INST(stmw) +MEMREF_INST(stmd) + +/* + * ARITH_INST -- define 32-bit immediate forms of arithmetic + * instructions + * + * E.g. addi32 rD,rS,IMMED + */ +#define ARITH_INST(op, op3, sf) \ +.macro op ## 32 ## sf @\ +.if $n != 3 @\ + .abort "invalid operands to " #op "32" @\ +.endif @\ +.abs __is_abs,$2 @\ +.if __is_abs @\ + .if ($2 & 0xffff8000) == 0 @\ + op##sf $0,$1,$2 @\ + .elseif ($2 & 0xffff8000) == 0xffff8000 @\ + op##sf $0,$1,$2 @\ + .elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ + .else @\ + li32 at,$2 @\ + op3##sf $0,$1,at @\ + .endif @\ +.elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ +.else @\ + li32 at,$2 @\ + op3##sf $0,$1,at @\ +.endif @\ +.endmacro + +ARITH_INST(addi, add, ) +ARITH_INST(subi, sub, ) +ARITH_INST(addic, addc, ) +ARITH_INST(subic, subc, ) +ARITH_INST(addic, addc, .) +ARITH_INST(subic, subc, .) +ARITH_INST(mulli, mull, ) + +/* + * CMPEX_INST -- define 32-bit immediate forms of extended compare + * instructions + * + * E.g. cmpwi32 cr3,rS,IMMED + * cmpwi32 rS,IMMED + */ +#define CMPEX_INST(op, op3) \ +.macro op ## 32 @\ +.if $n == 3 @\ + .abs __is_abs,$2 @\ + .if __is_abs @\ + .if ($2 & 0xffff8000) == 0 @\ + op $0,$1,$2 @\ + .elseif ($2 & 0xffff8000) == 0xffff8000 @\ + op $0,$1,$2 @\ + .elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ + .else @\ + li32 at,$2 @\ + op3 $0,$1,at @\ + .endif @\ + .elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ + .else @\ + li32 at,$2 @\ + op3 $0,$1,at @\ + .endif @\ +.elseif $n == 2 @\ + .abs __is_abs,$1 @\ + .if __is_abs @\ + .if ($1 & 0xffff8000) == 0 @\ + op $0,$1 @\ + .elseif ($1 & 0xffff8000) == 0xffff8000 @\ + op $0,$1 @\ + .elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ + .else @\ + li32 at,$1 @\ + op3 $0,at @\ + .endif @\ + .elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ + .else @\ + li32 at,$1 @\ + op3 $0,at @\ + .endif @\ +.else @\ + .abort "invalid operands to " #op "32" @\ +.endif @\ +.endmacro + +CMPEX_INST(cmpdi, cmpd) +CMPEX_INST(cmpwi, cmpw) +CMPEX_INST(cmpldi, cmpld) +CMPEX_INST(cmplwi, cmplw) + +/* + * CMP_INST -- define 32-bit immediate forms of standard compare + * instructions + * + * E.g. cmpi32 cr3,0,rS,IMMED + */ +#define CMP_INST(op, op3) \ +.macro op ## 32 @\ +.if $n == 4 @\ + .abs __is_abs,$3 @\ + .if __is_abs @\ + .if ($3 & 0xffff8000) == 0 @\ + op $0,$1,$2,$3 @\ + .elseif ($3 & 0xffff8000) == 0xffff8000 @\ + op $0,$1,$2,$3 @\ + .elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ + .else @\ + li32 at,$3 @\ + op3 $0,$1,$2,at @\ + .endif @\ + .elseif __no_at @\ + .abort "Macro uses at while .no_at in effect" @\ + .else @\ + li32 at,$3 @\ + op3 $0,$1,$2,at @\ + .endif @\ +.else @\ + .abort "invalid operands to " #op "32" @\ +.endif @\ +.endmacro + +CMP_INST(cmpi, cmp) +CMP_INST(cmpli, cmpl) + +#endif /* __ASSEMBLER__ */ + +#endif /* _ARCH_PPC_PSEUDO_INST_H_ */ diff --git a/EXTERNAL_HEADERS/architecture/ppc/reg_help.h b/EXTERNAL_HEADERS/architecture/ppc/reg_help.h new file mode 100644 index 000000000..d9d1b5ec9 --- /dev/null +++ b/EXTERNAL_HEADERS/architecture/ppc/reg_help.h @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * File: architecture/ppc/reg_help.h + * Author: Doug Mitchell, NeXT Computer, Inc. + * + * m98k-specific macros and inlines for defining machine registers. + * + * HISTORY + * 05-Nov-92 Doug Mitchell at NeXT + * Created. + * + * 29-Dec-96 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported from m98k. Removed dependency on nrw directory. + * Merged code from architecture/nrw/reg_help.h. + * Moved Register Usage #defines from asm_help.h in here. + */ + +#ifndef _ARCH_PPC_REG_HELP_H_ +#define _ARCH_PPC_REG_HELP_H_ + +#if defined(__ASSEMBLER__) +/* + * GRF Register Usage Aliases + */ +#define zt r0 // architecturally 0 for mem refs only! + // real reg other inst, caller-saved +#define sp r1 // stack pointer, callee-saved +#define toc r2 // tbl of contents, callee-saved +#define a0 r3 // arg 0, return value 0, caller saved +#define a1 r4 // arg 1, return value 1, caller saved +#define a2 r5 // .... +#define a3 r6 +#define a4 r7 +#define a5 r8 +#define a6 r9 +#define a7 r10 // arg 7, return value 7, caller saved +#define ep r11 // environment ptr, caller saved +#define at r12 // assembler temp, caller saved +#define s17 r13 // callee-saved 17 +#define s16 r14 +#define s15 r15 +#define s14 r16 +#define s13 r17 +#define s12 r18 +#define s11 r19 +#define s10 r20 +#define s9 r21 +#define s8 r22 +#define s7 r23 +#define s6 r24 +#define s5 r25 +#define s4 r26 +#define s3 r27 +#define s2 r28 +#define s1 r29 // .... +#define s0 r30 // callee-saved 0 +#define fp r31 // frame-pointer, callee-saved + +/* + * Conversion of GRF aliases to register numbers + */ +#define GRF_ZT 0 // architecturally 0 for mem refs only! + // real reg other inst, caller-saved +#define GRF_SP 1 // stack pointer, callee-saved +#define GRF_TOC 2 // tbl of contents, callee-saved +#define GRF_A0 3 // arg 0, return value 0, caller saved +#define GRF_A1 4 // arg 1, return value 1, caller saved +#define GRF_A2 5 // .... +#define GRF_A3 6 +#define GRF_A4 7 +#define GRF_A5 8 +#define GRF_A6 9 +#define GRF_A7 10 // arg 7, return value 7, caller saved +#define GRF_EP 11 // environment ptr, caller saved +#define GRF_AT 12 // assembler temp, caller saved +#define GRF_S17 13 // callee-saved 17 +#define GRF_S16 14 +#define GRF_S15 15 +#define GRF_S14 16 +#define GRF_S13 17 +#define GRF_S12 18 +#define GRF_S11 19 +#define GRF_S10 20 +#define GRF_S9 21 +#define GRF_S8 22 +#define GRF_S7 23 +#define GRF_S6 24 +#define GRF_S5 25 +#define GRF_S4 26 +#define GRF_S3 27 +#define GRF_S2 28 +#define GRF_S1 29 // .... +#define GRF_S0 30 // callee-saved 0 +#define GRF_FP 31 // frame pointer, callee-saved + +/* + * FPF Register names + */ +#define ft0 f0 // scratch reg, caller-saved +#define fa0 f1 // fp arg 0, return 0, caller-saved +#define fa1 f2 // fp arg 1, caller-saved +#define fa2 f3 // fp arg 2, caller-saved +#define fa3 f4 +#define fa4 f5 +#define fa5 f6 +#define fa6 f7 +#define fa7 f8 +#define fa8 f9 +#define fa9 f10 +#define fa10 f11 +#define fa11 f12 +#define fa12 f13 // fp arg 12, caller-saved +#define fs17 f14 // callee-saved 17 +#define fs16 f15 +#define fs15 f16 +#define fs14 f17 +#define fs13 f18 +#define fs12 f19 +#define fs11 f20 +#define fs10 f21 +#define fs9 f22 +#define fs8 f23 +#define fs7 f24 +#define fs6 f25 +#define fs5 f26 +#define fs4 f27 +#define fs3 f28 +#define fs2 f29 +#define fs1 f30 +#define fs0 f31 // callee-saved 0 + +/* + * Conversion of FPF aliases to register numbers + */ +#define FPF_FT0 0 // scratch reg, caller-saved +#define FPF_FA0 1 // fp arg 0, return 0, caller-saved +#define FPF_FA1 2 // fp arg 1, caller-saved +#define FPF_FA2 3 // fp arg 2, caller-saved +#define FPF_FA3 4 +#define FPF_FA4 5 +#define FPF_FA5 6 +#define FPF_FA6 7 +#define FPF_FA7 8 +#define FPF_FA8 9 +#define FPF_FA9 10 +#define FPF_FA10 11 +#define FPF_FA11 12 +#define FPF_FA12 13 // fp arg 12, caller-saved +#define FPF_FS17 14 // callee-saved 17 +#define FPF_FS16 15 +#define FPF_FS15 16 +#define FPF_FS14 17 +#define FPF_FS13 18 +#define FPF_FS12 19 +#define FPF_FS11 20 +#define FPF_FS10 21 +#define FPF_FS9 22 +#define FPF_FS8 23 +#define FPF_FS7 24 +#define FPF_FS6 25 +#define FPF_FS5 26 +#define FPF_FS4 27 +#define FPF_FS3 28 +#define FPF_FS2 29 +#define FPF_FS1 30 +#define FPF_FS0 31 // callee-saved 0 + +#endif /* __ASSEMBLER__ */ + + +/* Bitfield definition aid */ +#define BITS_WIDTH(msb, lsb) ((msb)-(lsb)+1) +#define BIT_WIDTH(pos) (1) /* mostly to record the position */ + +/* Mask creation */ +#define MKMASK(width, offset) (((unsigned)-1)>>(32-(width))<<(offset)) +#define BITSMASK(msb, lsb) MKMASK(BITS_WIDTH(msb, lsb), lsb & 0x1f) +#define BITMASK(pos) MKMASK(BIT_WIDTH(pos), pos & 0x1f) + +/* Register addresses */ +#if __ASSEMBLER__ +# define REG_ADDR(type, addr) (addr) +#else /* ! __ASSEMBLER__ */ +# define REG_ADDR(type, addr) (*(volatile type *)(addr)) +#endif /* __ASSEMBLER__ */ + +/* Cast a register to be an unsigned */ +/* CAUTION : non naturally aligned foo can result into alignment traps + * use at own risk. + */ +#define CONTENTS(foo) (*(unsigned *) &(foo)) + +/* STRINGIFY -- perform all possible substitutions, then stringify */ +#define __STR(x) #x /* just a helper macro */ +#define STRINGIFY(x) __STR(x) + +/* + * Stack pointer must always be a multiple of 16 + */ +#define STACK_INCR 16 +#define ROUND_FRAME(x) ((((unsigned)(x)) + STACK_INCR - 1) & ~(STACK_INCR-1)) + +#endif /* _ARCH_PPC_REG_HELP_H_ */ diff --git a/EXTERNAL_HEADERS/bsd/i386/ansi.h b/EXTERNAL_HEADERS/bsd/i386/ansi.h new file mode 100644 index 000000000..5689939b2 --- /dev/null +++ b/EXTERNAL_HEADERS/bsd/i386/ansi.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ansi.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _ANSI_H_ +#define _ANSI_H_ + +/* + * Types which are fundamental to the implementation and may appear in + * more than one standard header are defined here. Standard headers + * then use: + * #ifdef _BSD_SIZE_T_ + * typedef _BSD_SIZE_T_ size_t; + * #undef _BSD_SIZE_T_ + * #endif + */ +#define _BSD_CLOCK_T_ unsigned long /* clock() */ +#if defined(__GNUC__) && defined(__PTRDIFF_TYPE__) && defined(__SIZE_TYPE__) +#define _BSD_PTRDIFF_T_ __PTRDIFF_TYPE__ /* ptr1 - ptr2 */ +#define _BSD_SIZE_T_ __SIZE_TYPE__ /* sizeof() */ +#else +#define _BSD_PTRDIFF_T_ int /* ptr1 - ptr2 */ +#define _BSD_SIZE_T_ unsigned long /* sizeof() */ +#endif /* __GNUC__ */ +#define _BSD_SSIZE_T_ int /* byte count or error */ +#define _BSD_TIME_T_ long /* time() */ +#define _BSD_VA_LIST_ void * /* va_list */ + +/* + * Runes (wchar_t) is declared to be an ``int'' instead of the more natural + * ``unsigned long'' or ``long''. Two things are happening here. It is not + * unsigned so that EOF (-1) can be naturally assigned to it and used. Also, + * it looks like 10646 will be a 31 bit standard. This means that if your + * ints cannot hold 32 bits, you will be in trouble. The reason an int was + * chosen over a long is that the is*() and to*() routines take ints (says + * ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you + * lose a bit of ANSI conformance, but your programs will still work. + * + * Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t + * and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains + * defined for ctype.h. + */ +#if defined(__GNUC__) && defined(__WCHAR_TYPE__) +#define _BSD_WCHAR_T_ __WCHAR_TYPE__ /* wchar_t */ +#define _BSD_RUNE_T_ __WCHAR_TYPE__ /* rune_t */ +#else +#define _BSD_WCHAR_T_ int /* wchar_t */ +#define _BSD_RUNE_T_ int /* rune_t */ +#endif /* __GNUC__ */ + +#endif /* _ANSI_H_ */ diff --git a/EXTERNAL_HEADERS/bsd/i386/limits.h b/EXTERNAL_HEADERS/bsd/i386/limits.h new file mode 100644 index 000000000..64eecd5a2 --- /dev/null +++ b/EXTERNAL_HEADERS/bsd/i386/limits.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)limits.h 8.3 (Berkeley) 1/4/94 + */ +/* + * HISTORY + * + * 10-July-97 Umesh Vaishampayan (umeshv@apple.com) + * Avoid multiple includes. + */ + +#ifndef _I386_LIMITS_H_ +#define _I386_LIMITS_H_ + +#define CHAR_BIT 8 /* number of bits in a char */ +#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */ + + +#define CLK_TCK 100 /* ticks per second */ + +/* + * According to ANSI (section 2.2.4.2), the values below must be usable by + * #if preprocessing directives. Additionally, the expression must have the + * same type as would an expression that is an object of the corresponding + * type converted according to the integral promotions. The subtraction for + * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an + * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2). + * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values + * are written as hex so that GCC will be quiet about large integer constants. + */ +#define SCHAR_MAX 127 /* min value for a signed char */ +#define SCHAR_MIN (-128) /* max value for a signed char */ + +#define UCHAR_MAX 255 /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ + +#define USHRT_MAX 65535 /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ + +#define UINT_MAX 0xffffffff /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ + +#define ULONG_MAX 0xffffffff /* max value for an unsigned long */ +#define LONG_MAX 2147483647 /* max value for a long */ +#define LONG_MIN (-2147483647-1) /* min value for a long */ + +#if !defined(_ANSI_SOURCE) +#define SSIZE_MAX INT_MAX /* max value for a ssize_t */ + +#if !defined(_POSIX_SOURCE) +#define SIZE_T_MAX UINT_MAX /* max value for a size_t */ + +/* GCC requires that quad constants be written as expressions. */ +#define UQUAD_MAX ((u_quad_t)0-1) /* max value for a uquad_t */ + /* max value for a quad_t */ +#define QUAD_MAX ((quad_t)(UQUAD_MAX >> 1)) +#define QUAD_MIN (-QUAD_MAX-1) /* min value for a quad_t */ + +#endif /* !_POSIX_SOURCE */ +#endif /* !_ANSI_SOURCE */ + +#endif /* _I386_LIMITS_H_ */ diff --git a/EXTERNAL_HEADERS/bsd/ppc/ansi.h b/EXTERNAL_HEADERS/bsd/ppc/ansi.h new file mode 100644 index 000000000..8e1d219a4 --- /dev/null +++ b/EXTERNAL_HEADERS/bsd/ppc/ansi.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ansi.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _ANSI_H_ +#define _ANSI_H_ + +/* + * Types which are fundamental to the implementation and may appear in + * more than one standard header are defined here. Standard headers + * then use: + * #ifdef _BSD_SIZE_T_ + * typedef _BSD_SIZE_T_ size_t; + * #undef _BSD_SIZE_T_ + * #endif + */ +#define _BSD_CLOCK_T_ unsigned long /* clock() */ +#if defined(__GNUC__) && defined(__PTRDIFF_TYPE__) && defined(__SIZE_TYPE__) +#define _BSD_PTRDIFF_T_ __PTRDIFF_TYPE__ /* ptr1 - ptr2 */ +#define _BSD_SIZE_T_ __SIZE_TYPE__ /* sizeof() */ +#else +#define _BSD_PTRDIFF_T_ int /* ptr1 - ptr2 */ +#define _BSD_SIZE_T_ unsigned long /* sizeof() */ +#endif /* __GNUC__ */ +#define _BSD_SSIZE_T_ int /* byte count or error */ +#define _BSD_TIME_T_ long /* time() */ +#define _BSD_VA_LIST_ char * /* va_list */ + +/* + * Runes (wchar_t) is declared to be an ``int'' instead of the more natural + * ``unsigned long'' or ``long''. Two things are happening here. It is not + * unsigned so that EOF (-1) can be naturally assigned to it and used. Also, + * it looks like 10646 will be a 31 bit standard. This means that if your + * ints cannot hold 32 bits, you will be in trouble. The reason an int was + * chosen over a long is that the is*() and to*() routines take ints (says + * ANSI C), but they use _RUNE_T_ instead of int. By changing it here, you + * lose a bit of ANSI conformance, but your programs will still work. + * + * Note that _WCHAR_T_ and _RUNE_T_ must be of the same type. When wchar_t + * and rune_t are typedef'd, _WCHAR_T_ will be undef'd, but _RUNE_T remains + * defined for ctype.h. + */ +#if defined(__GNUC__) && defined(__WCHAR_TYPE__) +#define _BSD_WCHAR_T_ __WCHAR_TYPE__ /* wchar_t */ +#define _BSD_RUNE_T_ __WCHAR_TYPE__ /* rune_t */ +#else +#define _BSD_WCHAR_T_ int /* wchar_t */ +#define _BSD_RUNE_T_ int /* rune_t */ +#endif /* __GNUC__ */ + +#endif /* _ANSI_H_ */ diff --git a/EXTERNAL_HEADERS/bsd/ppc/limits.h b/EXTERNAL_HEADERS/bsd/ppc/limits.h new file mode 100644 index 000000000..a5bc0b5df --- /dev/null +++ b/EXTERNAL_HEADERS/bsd/ppc/limits.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)limits.h 8.3 (Berkeley) 1/4/94 + */ +/* + * HISTORY + * + * 10-July-97 Umesh Vaishampayan (umeshv@apple.com) + * Fixed conflicts with float.h. Avoid multiple includes. + */ + +#ifndef _PPC_LIMITS_H_ +#define _PPC_LIMITS_H_ + +#define CHAR_BIT 8 /* number of bits in a char */ +#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */ + +#define CLK_TCK 100 /* ticks per second */ + +/* + * According to ANSI (section 2.2.4.2), the values below must be usable by + * #if preprocessing directives. Additionally, the expression must have the + * same type as would an expression that is an object of the corresponding + * type converted according to the integral promotions. The subtraction for + * INT_MIN and LONG_MIN is so the value is not unsigned; 2147483648 is an + * unsigned int for 32-bit two's complement ANSI compilers (section 3.1.3.2). + * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values + * are written as hex so that GCC will be quiet about large integer constants. + */ +#define SCHAR_MAX 127 /* min value for a signed char */ +#define SCHAR_MIN (-128) /* max value for a signed char */ + +#define UCHAR_MAX 255 /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ + +#define USHRT_MAX 65535 /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ + +#define UINT_MAX 0xffffffff /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ + +#define ULONG_MAX 0xffffffff /* max value for an unsigned long */ +#define LONG_MAX 2147483647 /* max value for a long */ +#define LONG_MIN (-2147483647-1) /* min value for a long */ + +#if !defined(_ANSI_SOURCE) +#define SSIZE_MAX INT_MAX /* max value for a ssize_t */ + +#if !defined(_POSIX_SOURCE) && !defined(_XOPEN_SOURCE) +#define SIZE_T_MAX UINT_MAX /* max value for a size_t */ + +#define UQUAD_MAX 0xffffffffffffffffULL /* max unsigned quad */ +#define QUAD_MAX 0x7fffffffffffffffLL /* max signed quad */ +#define QUAD_MIN (-0x7fffffffffffffffLL-1) /* min signed quad */ + +#endif /* !_POSIX_SOURCE && !_XOPEN_SOURCE */ +#endif /* !_ANSI_SOURCE */ + +#if (!defined(_ANSI_SOURCE)&&!defined(_POSIX_SOURCE)) || defined(_XOPEN_SOURCE) +#define LONG_BIT 32 +#define WORD_BIT 32 +#endif /* (!(_ANSI_SOURCE) && !(_POSIX_SOURCE)) || (_XOPEN_SOURCE) */ + +#endif /* _PPC_LIMITS_H_ */ diff --git a/EXTERNAL_HEADERS/mach-o/fat.h b/EXTERNAL_HEADERS/mach-o/fat.h new file mode 100644 index 000000000..3558e4acf --- /dev/null +++ b/EXTERNAL_HEADERS/mach-o/fat.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * This header file describes the structures of the file format for "fat" + * architecture specific file (wrapper design). At the begining of the file + * there is one fat_header structure followed by a number of fat_arch + * structures. For each architecture in the file, specified by a pair of + * cputype and cpusubtype, the fat_header describes the file offset, file + * size and alignment in the file of the architecture specific member. + * The padded bytes in the file to place each member on it's specific alignment + * are defined to be read as zeros and can be left as "holes" if the file system + * can support them as long as they read as zeros. + * + * All structures defined here are always written and read to/from disk + * in big-endian order. + */ + +/* + * is needed here for the cpu_type_t and cpu_subtype_t types + * and contains the constants for the possible values of these types. + */ +#include +#include + +#define FAT_MAGIC 0xcafebabe +#define FAT_CIGAM NXSwapLong(FAT_MAGIC) + +struct fat_header { + unsigned long magic; /* FAT_MAGIC */ + unsigned long nfat_arch; /* number of structs that follow */ +}; + +struct fat_arch { + cpu_type_t cputype; /* cpu specifier (int) */ + cpu_subtype_t cpusubtype; /* machine specifier (int) */ + unsigned long offset; /* file offset to this object file */ + unsigned long size; /* size of this object file */ + unsigned long align; /* alignment as a power of 2 */ +}; + diff --git a/EXTERNAL_HEADERS/mach-o/kld.h b/EXTERNAL_HEADERS/mach-o/kld.h new file mode 100644 index 000000000..4a63602cf --- /dev/null +++ b/EXTERNAL_HEADERS/mach-o/kld.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _MACHO_KLD_H_ +#define _MACHO_KLD_H_ + +#include +#include + +/* + * These API's are in libkld. Both kmodload(8) and /mach_kernel should + * link with -lkld and then ld(1) will expand -lkld to libkld.dylib or + * libkld.a depending on if -dynamic or -static is in effect. + */ + +/* + * Note that you must supply the following function for error reporting when + * using any of the functions listed here. + */ +extern void kld_error_vprintf(const char *format, va_list ap); + +/* + * This two are only in libkld.dylib for use by kmodload(8) (user code compiled + * with the default -dynamic). + */ +#ifdef __DYNAMIC__ +__private_extern__ long kld_load_basefile( + const char *base_filename); + +/* Note: this takes only one object file name */ +__private_extern__ long kld_load( + struct mach_header **header_addr, + const char *object_filename, + const char *output_filename); +#endif /* __DYNAMIC__ */ + +/* + * This two are only in libkld.a use by /mach_kernel (kernel code compiled with + * -static). + */ +#ifdef __STATIC__ +/* Note: this api does not write an output file */ +__private_extern__ long kld_load_from_memory( + struct mach_header **header_addr, + const char *object_name, + char *object_addr, + long object_size); +#endif /* __STATIC__ */ + +__private_extern__ long kld_unload_all( + long deallocate_sets); + +__private_extern__ long kld_lookup( + const char *symbol_name, + unsigned long *value); + +__private_extern__ long kld_forget_symbol( + const char *symbol_name); + +__private_extern__ void kld_address_func( + unsigned long (*func)(unsigned long size, unsigned long headers_size)); + +#endif /* _MACHO_KLD_H_ */ diff --git a/EXTERNAL_HEADERS/mach-o/loader.h b/EXTERNAL_HEADERS/mach-o/loader.h new file mode 100644 index 000000000..4bd6bf4d6 --- /dev/null +++ b/EXTERNAL_HEADERS/mach-o/loader.h @@ -0,0 +1,723 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHO_LOADER_H_ +#define _MACHO_LOADER_H_ + +/* + * This file describes the format of mach object files. + */ + +/* + * is needed here for the cpu_type_t and cpu_subtype_t types + * and contains the constants for the possible values of these types. + */ +#include + +/* + * is needed here for the vm_prot_t type and contains the + * constants that are or'ed together for the possible values of this type. + */ +#include + +/* + * is expected to define the flavors of the thread + * states and the structures of those flavors for each machine. + */ +#include +#include + +/* + * The mach header appears at the very beginning of the object file. + */ +struct mach_header { + unsigned long magic; /* mach magic number identifier */ + cpu_type_t cputype; /* cpu specifier */ + cpu_subtype_t cpusubtype; /* machine specifier */ + unsigned long filetype; /* type of file */ + unsigned long ncmds; /* number of load commands */ + unsigned long sizeofcmds; /* the size of all the load commands */ + unsigned long flags; /* flags */ +}; + +/* Constant for the magic field of the mach_header */ +#define MH_MAGIC 0xfeedface /* the mach magic number */ +#define MH_CIGAM NXSwapInt(MH_MAGIC) + +/* + * The layout of the file depends on the filetype. For all but the MH_OBJECT + * file type the segments are padded out and aligned on a segment alignment + * boundary for efficient demand pageing. The MH_EXECUTE, MH_FVMLIB, MH_DYLIB, + * MH_DYLINKER and MH_BUNDLE file types also have the headers included as part + * of their first segment. + * + * The file type MH_OBJECT is a compact format intended as output of the + * assembler and input (and possibly output) of the link editor (the .o + * format). All sections are in one unnamed segment with no segment padding. + * This format is used as an executable format when the file is so small the + * segment padding greatly increases it's size. + * + * The file type MH_PRELOAD is an executable format intended for things that + * not executed under the kernel (proms, stand alones, kernels, etc). The + * format can be executed under the kernel but may demand paged it and not + * preload it before execution. + * + * A core file is in MH_CORE format and can be any in an arbritray legal + * Mach-O file. + * + * Constants for the filetype field of the mach_header + */ +#define MH_OBJECT 0x1 /* relocatable object file */ +#define MH_EXECUTE 0x2 /* demand paged executable file */ +#define MH_FVMLIB 0x3 /* fixed VM shared library file */ +#define MH_CORE 0x4 /* core file */ +#define MH_PRELOAD 0x5 /* preloaded executable file */ +#define MH_DYLIB 0x6 /* dynamicly bound shared library file*/ +#define MH_DYLINKER 0x7 /* dynamic link editor */ +#define MH_BUNDLE 0x8 /* dynamicly bound bundle file */ + +/* Constants for the flags field of the mach_header */ +#define MH_NOUNDEFS 0x1 /* the object file has no undefined + references, can be executed */ +#define MH_INCRLINK 0x2 /* the object file is the output of an + incremental link against a base file + and can't be link edited again */ +#define MH_DYLDLINK 0x4 /* the object file is input for the + dynamic linker and can't be staticly + link edited again */ +#define MH_BINDATLOAD 0x8 /* the object file's undefined + references are bound by the dynamic + linker when loaded. */ +#define MH_PREBOUND 0x10 /* the file has it's dynamic undefined + references prebound. */ + +/* + * The load commands directly follow the mach_header. The total size of all + * of the commands is given by the sizeofcmds field in the mach_header. All + * load commands must have as their first two fields cmd and cmdsize. The cmd + * field is filled in with a constant for that command type. Each command type + * has a structure specifically for it. The cmdsize field is the size in bytes + * of the particular load command structure plus anything that follows it that + * is a part of the load command (i.e. section structures, strings, etc.). To + * advance to the next load command the cmdsize can be added to the offset or + * pointer of the current load command. The cmdsize MUST be a multiple of + * sizeof(long) (this is forever the maximum alignment of any load commands). + * The padded bytes must be zero. All tables in the object file must also + * follow these rules so the file can be memory mapped. Otherwise the pointers + * to these tables will not work well or at all on some machines. With all + * padding zeroed like objects will compare byte for byte. + */ +struct load_command { + unsigned long cmd; /* type of load command */ + unsigned long cmdsize; /* total size of command in bytes */ +}; + +/* Constants for the cmd field of all load commands, the type */ +#define LC_SEGMENT 0x1 /* segment of this file to be mapped */ +#define LC_SYMTAB 0x2 /* link-edit stab symbol table info */ +#define LC_SYMSEG 0x3 /* link-edit gdb symbol table info (obsolete) */ +#define LC_THREAD 0x4 /* thread */ +#define LC_UNIXTHREAD 0x5 /* unix thread (includes a stack) */ +#define LC_LOADFVMLIB 0x6 /* load a specified fixed VM shared library */ +#define LC_IDFVMLIB 0x7 /* fixed VM shared library identification */ +#define LC_IDENT 0x8 /* object identification info (obsolete) */ +#define LC_FVMFILE 0x9 /* fixed VM file inclusion (internal use) */ +#define LC_PREPAGE 0xa /* prepage command (internal use) */ +#define LC_DYSYMTAB 0xb /* dynamic link-edit symbol table info */ +#define LC_LOAD_DYLIB 0xc /* load a dynamicly linked shared library */ +#define LC_ID_DYLIB 0xd /* dynamicly linked shared lib identification */ +#define LC_LOAD_DYLINKER 0xe /* load a dynamic linker */ +#define LC_ID_DYLINKER 0xf /* dynamic linker identification */ +#define LC_PREBOUND_DYLIB 0x10 /* modules prebound for a dynamicly */ + /* linked shared library */ + +/* + * A variable length string in a load command is represented by an lc_str + * union. The strings are stored just after the load command structure and + * the offset is from the start of the load command structure. The size + * of the string is reflected in the cmdsize field of the load command. + * Once again any padded bytes to bring the cmdsize field to a multiple + * of sizeof(long) must be zero. + */ +union lc_str { + unsigned long offset; /* offset to the string */ + char *ptr; /* pointer to the string */ +}; + +/* + * The segment load command indicates that a part of this file is to be + * mapped into the task's address space. The size of this segment in memory, + * vmsize, maybe equal to or larger than the amount to map from this file, + * filesize. The file is mapped starting at fileoff to the beginning of + * the segment in memory, vmaddr. The rest of the memory of the segment, + * if any, is allocated zero fill on demand. The segment's maximum virtual + * memory protection and initial virtual memory protection are specified + * by the maxprot and initprot fields. If the segment has sections then the + * section structures directly follow the segment command and their size is + * reflected in cmdsize. + */ +struct segment_command { + unsigned long cmd; /* LC_SEGMENT */ + unsigned long cmdsize; /* includes sizeof section structs */ + char segname[16]; /* segment name */ + unsigned long vmaddr; /* memory address of this segment */ + unsigned long vmsize; /* memory size of this segment */ + unsigned long fileoff; /* file offset of this segment */ + unsigned long filesize; /* amount to map from the file */ + vm_prot_t maxprot; /* maximum VM protection */ + vm_prot_t initprot; /* initial VM protection */ + unsigned long nsects; /* number of sections in segment */ + unsigned long flags; /* flags */ +}; + +/* Constants for the flags field of the segment_command */ +#define SG_HIGHVM 0x1 /* the file contents for this segment is for + the high part of the VM space, the low part + is zero filled (for stacks in core files) */ +#define SG_FVMLIB 0x2 /* this segment is the VM that is allocated by + a fixed VM library, for overlap checking in + the link editor */ +#define SG_NORELOC 0x4 /* this segment has nothing that was relocated + in it and nothing relocated to it, that is + it maybe safely replaced without relocation*/ + +/* + * A segment is made up of zero or more sections. Non-MH_OBJECT files have + * all of their segments with the proper sections in each, and padded to the + * specified segment alignment when produced by the link editor. The first + * segment of a MH_EXECUTE and MH_FVMLIB format file contains the mach_header + * and load commands of the object file before it's first section. The zero + * fill sections are always last in their segment (in all formats). This + * allows the zeroed segment padding to be mapped into memory where zero fill + * sections might be. + * + * The MH_OBJECT format has all of it's sections in one segment for + * compactness. There is no padding to a specified segment boundary and the + * mach_header and load commands are not part of the segment. + * + * Sections with the same section name, sectname, going into the same segment, + * segname, are combined by the link editor. The resulting section is aligned + * to the maximum alignment of the combined sections and is the new section's + * alignment. The combined sections are aligned to their original alignment in + * the combined section. Any padded bytes to get the specified alignment are + * zeroed. + * + * The format of the relocation entries referenced by the reloff and nreloc + * fields of the section structure for mach object files is described in the + * header file . + */ +struct section { + char sectname[16]; /* name of this section */ + char segname[16]; /* segment this section goes in */ + unsigned long addr; /* memory address of this section */ + unsigned long size; /* size in bytes of this section */ + unsigned long offset; /* file offset of this section */ + unsigned long align; /* section alignment (power of 2) */ + unsigned long reloff; /* file offset of relocation entries */ + unsigned long nreloc; /* number of relocation entries */ + unsigned long flags; /* flags (section type and attributes)*/ + unsigned long reserved1; /* reserved */ + unsigned long reserved2; /* reserved */ +}; + +/* + * The flags field of a section structure is separated into two parts a section + * type and section attributes. The section types are mutually exclusive (it + * can only have one type) but the section attributes are not (it may have more + * than one attribute). + */ +#define SECTION_TYPE 0x000000ff /* 256 section types */ +#define SECTION_ATTRIBUTES 0xffffff00 /* 24 section attributes */ + +/* Constants for the type of a section */ +#define S_REGULAR 0x0 /* regular section */ +#define S_ZEROFILL 0x1 /* zero fill on demand section */ +#define S_CSTRING_LITERALS 0x2 /* section with only literal C strings*/ +#define S_4BYTE_LITERALS 0x3 /* section with only 4 byte literals */ +#define S_8BYTE_LITERALS 0x4 /* section with only 8 byte literals */ +#define S_LITERAL_POINTERS 0x5 /* section with only pointers to */ + /* literals */ +/* + * For the two types of symbol pointers sections and the symbol stubs section + * they have indirect symbol table entries. For each of the entries in the + * section the indirect symbol table entries, in corresponding order in the + * indirect symbol table, start at the index stored in the reserved1 field + * of the section structure. Since the indirect symbol table entries + * correspond to the entries in the section the number of indirect symbol table + * entries is inferred from the size of the section divided by the size of the + * entries in the section. For symbol pointers sections the size of the entries + * in the section is 4 bytes and for symbol stubs sections the byte size of the + * stubs is stored in the reserved2 field of the section structure. + */ +#define S_NON_LAZY_SYMBOL_POINTERS 0x6 /* section with only non-lazy + symbol pointers */ +#define S_LAZY_SYMBOL_POINTERS 0x7 /* section with only lazy symbol + pointers */ +#define S_SYMBOL_STUBS 0x8 /* section with only symbol + stubs, byte size of stub in + the reserved2 field */ +#define S_MOD_INIT_FUNC_POINTERS 0x9 /* section with only function + pointers for initialization*/ +/* + * Constants for the section attributes part of the flags field of a section + * structure. + */ +#define SECTION_ATTRIBUTES_USR 0xff000000 /* User setable attributes */ +#define S_ATTR_PURE_INSTRUCTIONS 0x80000000 /* section contains only true + machine instructions */ +#define SECTION_ATTRIBUTES_SYS 0x00ffff00 /* system setable attributes */ +#define S_ATTR_SOME_INSTRUCTIONS 0x00000400 /* section contains some + machine instructions */ +#define S_ATTR_EXT_RELOC 0x00000200 /* section has external + relocation entries */ +#define S_ATTR_LOC_RELOC 0x00000100 /* section has local + relocation entries */ + + +/* + * The names of segments and sections in them are mostly meaningless to the + * link-editor. But there are few things to support traditional UNIX + * executables that require the link-editor and assembler to use some names + * agreed upon by convention. + * + * The initial protection of the "__TEXT" segment has write protection turned + * off (not writeable). + * + * The link-editor will allocate common symbols at the end of the "__common" + * section in the "__DATA" segment. It will create the section and segment + * if needed. + */ + +/* The currently known segment names and the section names in those segments */ + +#define SEG_PAGEZERO "__PAGEZERO" /* the pagezero segment which has no */ + /* protections and catches NULL */ + /* references for MH_EXECUTE files */ + + +#define SEG_TEXT "__TEXT" /* the tradition UNIX text segment */ +#define SECT_TEXT "__text" /* the real text part of the text */ + /* section no headers, and no padding */ +#define SECT_FVMLIB_INIT0 "__fvmlib_init0" /* the fvmlib initialization */ + /* section */ +#define SECT_FVMLIB_INIT1 "__fvmlib_init1" /* the section following the */ + /* fvmlib initialization */ + /* section */ + +#define SEG_DATA "__DATA" /* the tradition UNIX data segment */ +#define SECT_DATA "__data" /* the real initialized data section */ + /* no padding, no bss overlap */ +#define SECT_BSS "__bss" /* the real uninitialized data section*/ + /* no padding */ +#define SECT_COMMON "__common" /* the section common symbols are */ + /* allocated in by the link editor */ + +#define SEG_OBJC "__OBJC" /* objective-C runtime segment */ +#define SECT_OBJC_SYMBOLS "__symbol_table" /* symbol table */ +#define SECT_OBJC_MODULES "__module_info" /* module information */ +#define SECT_OBJC_STRINGS "__selector_strs" /* string table */ +#define SECT_OBJC_REFS "__selector_refs" /* string table */ + +#define SEG_ICON "__ICON" /* the NeXT icon segment */ +#define SECT_ICON_HEADER "__header" /* the icon headers */ +#define SECT_ICON_TIFF "__tiff" /* the icons in tiff format */ + +#define SEG_LINKEDIT "__LINKEDIT" /* the segment containing all structs */ + /* created and maintained by the link */ + /* editor. Created with -seglinkedit */ + /* option to ld(1) for MH_EXECUTE and */ + /* FVMLIB file types only */ + +#define SEG_UNIXSTACK "__UNIXSTACK" /* the unix stack segment */ + +/* + * Fixed virtual memory shared libraries are identified by two things. The + * target pathname (the name of the library as found for execution), and the + * minor version number. The address of where the headers are loaded is in + * header_addr. + */ +struct fvmlib { + union lc_str name; /* library's target pathname */ + unsigned long minor_version; /* library's minor version number */ + unsigned long header_addr; /* library's header address */ +}; + +/* + * A fixed virtual shared library (filetype == MH_FVMLIB in the mach header) + * contains a fvmlib_command (cmd == LC_IDFVMLIB) to identify the library. + * An object that uses a fixed virtual shared library also contains a + * fvmlib_command (cmd == LC_LOADFVMLIB) for each library it uses. + */ +struct fvmlib_command { + unsigned long cmd; /* LC_IDFVMLIB or LC_LOADFVMLIB */ + unsigned long cmdsize; /* includes pathname string */ + struct fvmlib fvmlib; /* the library identification */ +}; + +/* + * Dynamicly linked shared libraries are identified by two things. The + * pathname (the name of the library as found for execution), and the + * compatibility version number. The pathname must match and the compatibility + * number in the user of the library must be greater than or equal to the + * library being used. The time stamp is used to record the time a library was + * built and copied into user so it can be use to determined if the library used + * at runtime is exactly the same as used to built the program. + */ +struct dylib { + union lc_str name; /* library's path name */ + unsigned long timestamp; /* library's build time stamp */ + unsigned long current_version; /* library's current version number */ + unsigned long compatibility_version;/* library's compatibility vers number*/ +}; + +/* + * A dynamicly linked shared library (filetype == MH_DYLIB in the mach header) + * contains a dylib_command (cmd == LC_ID_DYLIB) to identify the library. + * An object that uses a dynamicly linked shared library also contains a + * dylib_command (cmd == LC_LOAD_DYLIB) for each library it uses. + */ +struct dylib_command { + unsigned long cmd; /* LC_ID_DYLIB or LC_LOAD_DYLIB */ + unsigned long cmdsize; /* includes pathname string */ + struct dylib dylib; /* the library identification */ +}; + +/* + * A program (filetype == MH_EXECUTE) or bundle (filetype == MH_BUNDLE) that is + * prebound to it's dynamic libraries has one of these for each library that + * the static linker used in prebinding. It contains a bit vector for the + * modules in the library. The bits indicate which modules are bound (1) and + * which are not (0) from the library. The bit for module 0 is the low bit + * of the first byte. So the bit for the Nth module is: + * (linked_modules[N/8] >> N%8) & 1 + */ +struct prebound_dylib_command { + unsigned long cmd; /* LC_PREBOUND_DYLIB */ + unsigned long cmdsize; /* includes strings */ + union lc_str name; /* library's path name */ + unsigned long nmodules; /* number of modules in library */ + union lc_str linked_modules; /* bit vector of linked modules */ +}; + +/* + * A program that uses a dynamic linker contains a dylinker_command to identify + * the name of the dynamic linker (LC_LOAD_DYLINKER). And a dynamic linker + * contains a dylinker_command to identify the dynamic linker (LC_ID_DYLINKER). + * A file can have at most one of these. + */ +struct dylinker_command { + unsigned long cmd; /* LC_ID_DYLINKER or LC_LOAD_DYLINKER */ + unsigned long cmdsize; /* includes pathname string */ + union lc_str name; /* dynamic linker's path name */ +}; + +/* + * Thread commands contain machine-specific data structures suitable for + * use in the thread state primitives. The machine specific data structures + * follow the struct thread_command as follows. + * Each flavor of machine specific data structure is preceded by an unsigned + * long constant for the flavor of that data structure, an unsigned long + * that is the count of longs of the size of the state data structure and then + * the state data structure follows. This triple may be repeated for many + * flavors. The constants for the flavors, counts and state data structure + * definitions are expected to be in the header file . + * These machine specific data structures sizes must be multiples of + * sizeof(long). The cmdsize reflects the total size of the thread_command + * and all of the sizes of the constants for the flavors, counts and state + * data structures. + * + * For executable objects that are unix processes there will be one + * thread_command (cmd == LC_UNIXTHREAD) created for it by the link-editor. + * This is the same as a LC_THREAD, except that a stack is automatically + * created (based on the shell's limit for the stack size). Command arguments + * and environment variables are copied onto that stack. + */ +struct thread_command { + unsigned long cmd; /* LC_THREAD or LC_UNIXTHREAD */ + unsigned long cmdsize; /* total size of this command */ + /* unsigned long flavor flavor of thread state */ + /* unsigned long count count of longs in thread state */ + /* struct XXX_thread_state state thread state for this flavor */ + /* ... */ +}; + +/* + * The symtab_command contains the offsets and sizes of the link-edit 4.3BSD + * "stab" style symbol table information as described in the header files + * and . + */ +struct symtab_command { + unsigned long cmd; /* LC_SYMTAB */ + unsigned long cmdsize; /* sizeof(struct symtab_command) */ + unsigned long symoff; /* symbol table offset */ + unsigned long nsyms; /* number of symbol table entries */ + unsigned long stroff; /* string table offset */ + unsigned long strsize; /* string table size in bytes */ +}; + +/* + * This is the second set of the symbolic information which is used to support + * the data structures for the dynamicly link editor. + * + * The original set of symbolic information in the symtab_command which contains + * the symbol and string tables must also be present when this load command is + * present. When this load command is present the symbol table is organized + * into three groups of symbols: + * local symbols (static and debugging symbols) - grouped by module + * defined external symbols - grouped by module (sorted by name if not lib) + * undefined external symbols (sorted by name) + * In this load command there are offsets and counts to each of the three groups + * of symbols. + * + * This load command contains a the offsets and sizes of the following new + * symbolic information tables: + * table of contents + * module table + * reference symbol table + * indirect symbol table + * The first three tables above (the table of contents, module table and + * reference symbol table) are only present if the file is a dynamicly linked + * shared library. For executable and object modules, which are files + * containing only one module, the information that would be in these three + * tables is determined as follows: + * table of contents - the defined external symbols are sorted by name + * module table - the file contains only one module so everything in the + * file is part of the module. + * reference symbol table - is the defined and undefined external symbols + * + * For dynamicly linked shared library files this load command also contains + * offsets and sizes to the pool of relocation entries for all sections + * separated into two groups: + * external relocation entries + * local relocation entries + * For executable and object modules the relocation entries continue to hang + * off the section structures. + */ +struct dysymtab_command { + unsigned long cmd; /* LC_DYSYMTAB */ + unsigned long cmdsize; /* sizeof(struct dysymtab_command) */ + + /* + * The symbols indicated by symoff and nsyms of the LC_SYMTAB load command + * are grouped into the following three groups: + * local symbols (further grouped by the module they are from) + * defined external symbols (further grouped by the module they are from) + * undefined symbols + * + * The local symbols are used only for debugging. The dynamic binding + * process may have to use them to indicate to the debugger the local + * symbols for a module that is being bound. + * + * The last two groups are used by the dynamic binding process to do the + * binding (indirectly through the module table and the reference symbol + * table when this is a dynamicly linked shared library file). + */ + unsigned long ilocalsym; /* index to local symbols */ + unsigned long nlocalsym; /* number of local symbols */ + + unsigned long iextdefsym; /* index to externally defined symbols */ + unsigned long nextdefsym; /* number of externally defined symbols */ + + unsigned long iundefsym; /* index to undefined symbols */ + unsigned long nundefsym; /* number of undefined symbols */ + + /* + * For the for the dynamic binding process to find which module a symbol + * is defined in the table of contents is used (analogous to the ranlib + * structure in an archive) which maps defined external symbols to modules + * they are defined in. This exists only in a dynamicly linked shared + * library file. For executable and object modules the defined external + * symbols are sorted by name and is use as the table of contents. + */ + unsigned long tocoff; /* file offset to table of contents */ + unsigned long ntoc; /* number of entries in table of contents */ + + /* + * To support dynamic binding of "modules" (whole object files) the symbol + * table must reflect the modules that the file was created from. This is + * done by having a module table that has indexes and counts into the merged + * tables for each module. The module structure that these two entries + * refer to is described below. This exists only in a dynamicly linked + * shared library file. For executable and object modules the file only + * contains one module so everything in the file belongs to the module. + */ + unsigned long modtaboff; /* file offset to module table */ + unsigned long nmodtab; /* number of module table entries */ + + /* + * To support dynamic module binding the module structure for each module + * indicates the external references (defined and undefined) each module + * makes. For each module there is an offset and a count into the + * reference symbol table for the symbols that the module references. + * This exists only in a dynamicly linked shared library file. For + * executable and object modules the defined external symbols and the + * undefined external symbols indicates the external references. + */ + unsigned long extrefsymoff; /* offset to referenced symbol table */ + unsigned long nextrefsyms; /* number of referenced symbol table entries */ + + /* + * The sections that contain "symbol pointers" and "routine stubs" have + * indexes and (implied counts based on the size of the section and fixed + * size of the entry) into the "indirect symbol" table for each pointer + * and stub. For every section of these two types the index into the + * indirect symbol table is stored in the section header in the field + * reserved1. An indirect symbol table entry is simply a 32bit index into + * the symbol table to the symbol that the pointer or stub is referring to. + * The indirect symbol table is ordered to match the entries in the section. + */ + unsigned long indirectsymoff; /* file offset to the indirect symbol table */ + unsigned long nindirectsyms; /* number of indirect symbol table entries */ + + /* + * To support relocating an individual module in a library file quickly the + * external relocation entries for each module in the library need to be + * accessed efficiently. Since the relocation entries can't be accessed + * through the section headers for a library file they are separated into + * groups of local and external entries further grouped by module. In this + * case the presents of this load command who's extreloff, nextrel, + * locreloff and nlocrel fields are non-zero indicates that the relocation + * entries of non-merged sections are not referenced through the section + * structures (and the reloff and nreloc fields in the section headers are + * set to zero). + * + * Since the relocation entries are not accessed through the section headers + * this requires the r_address field to be something other than a section + * offset to identify the item to be relocated. In this case r_address is + * set to the offset from the vmaddr of the first LC_SEGMENT command. + * + * The relocation entries are grouped by module and the module table + * entries have indexes and counts into them for the group of external + * relocation entries for that the module. + * + * For sections that are merged across modules there must not be any + * remaining external relocation entries for them (for merged sections + * remaining relocation entries must be local). + */ + unsigned long extreloff; /* offset to external relocation entries */ + unsigned long nextrel; /* number of external relocation entries */ + + /* + * All the local relocation entries are grouped together (they are not + * grouped by their module since they are only used if the object is moved + * from it staticly link edited address). + */ + unsigned long locreloff; /* offset to local relocation entries */ + unsigned long nlocrel; /* number of local relocation entries */ + +}; + +/* + * An indirect symbol table entry is simply a 32bit index into the symbol table + * to the symbol that the pointer or stub is refering to. Unless it is for a + * non-lazy symbol pointer section for a defined symbol which strip(1) as + * removed. In which case it has the value INDIRECT_SYMBOL_LOCAL. If the + * symbol was also absolute INDIRECT_SYMBOL_ABS is or'ed with that. + */ +#define INDIRECT_SYMBOL_LOCAL 0x80000000 +#define INDIRECT_SYMBOL_ABS 0x40000000 + + +/* a table of contents entry */ +struct dylib_table_of_contents { + unsigned long symbol_index; /* the defined external symbol + (index into the symbol table) */ + unsigned long module_index; /* index into the module table this symbol + is defined in */ +}; + +/* a module table entry */ +struct dylib_module { + unsigned long module_name; /* the module name (index into string table) */ + + unsigned long iextdefsym; /* index into externally defined symbols */ + unsigned long nextdefsym; /* number of externally defined symbols */ + unsigned long irefsym; /* index into reference symbol table */ + unsigned long nrefsym; /* number of reference symbol table entries */ + unsigned long ilocalsym; /* index into symbols for local symbols */ + unsigned long nlocalsym; /* number of local symbols */ + + unsigned long iextrel; /* index into external relocation entries */ + unsigned long nextrel; /* number of external relocation entries */ + + unsigned long iinit; /* index into the init section */ + unsigned long ninit; /* number of init section entries */ + + unsigned long /* for this module address of the start of */ + objc_module_info_addr; /* the (__OBJC,__module_info) section */ + unsigned long /* for this module size of */ + objc_module_info_size; /* the (__OBJC,__module_info) section */ +}; + +/* + * The entries in the reference symbol table are used when loading the module + * (both by the static and dynamic link editors) and if the module is unloaded + * or replaced. Therefore all external symbols (defined and undefined) are + * listed in the module's reference table. The flags describe the type of + * reference that is being made. The constants for the flags are defined in + * as they are also used for symbol table entries. + */ +struct dylib_reference { + unsigned long isym:24, /* index into the symbol table */ + flags:8; /* flags to indicate the type of reference */ +}; + +/* + * The symseg_command contains the offset and size of the GNU style + * symbol table information as described in the header file . + * The symbol roots of the symbol segments must also be aligned properly + * in the file. So the requirement of keeping the offsets aligned to a + * multiple of a sizeof(long) translates to the length field of the symbol + * roots also being a multiple of a long. Also the padding must again be + * zeroed. (THIS IS OBSOLETE and no longer supported). + */ +struct symseg_command { + unsigned long cmd; /* LC_SYMSEG */ + unsigned long cmdsize; /* sizeof(struct symseg_command) */ + unsigned long offset; /* symbol segment offset */ + unsigned long size; /* symbol segment size in bytes */ +}; + +/* + * The ident_command contains a free format string table following the + * ident_command structure. The strings are null terminated and the size of + * the command is padded out with zero bytes to a multiple of sizeof(long). + * (THIS IS OBSOLETE and no longer supported). + */ +struct ident_command { + unsigned long cmd; /* LC_IDENT */ + unsigned long cmdsize; /* strings that follow this command */ +}; + +/* + * The fvmfile_command contains a reference to a file to be loaded at the + * specified virtual address. (Presently, this command is reserved for NeXT + * internal use. The kernel ignores this command when loading a program into + * memory). + */ +struct fvmfile_command { + unsigned long cmd; /* LC_FVMFILE */ + unsigned long cmdsize; /* includes pathname string */ + union lc_str name; /* files pathname */ + unsigned long header_addr; /* files virtual address */ +}; + +#endif _MACHO_LOADER_H_ diff --git a/EXTERNAL_HEADERS/mach-o/nlist.h b/EXTERNAL_HEADERS/mach-o/nlist.h new file mode 100644 index 000000000..91763d098 --- /dev/null +++ b/EXTERNAL_HEADERS/mach-o/nlist.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHO_NLIST_H_ +#define _MACHO_NLIST_H_ +/* $NetBSD: nlist.h,v 1.5 1994/10/26 00:56:11 cgd Exp $ */ + +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nlist.h 8.2 (Berkeley) 1/21/94 + */ + +/* + * Format of a symbol table entry of a Mach-O file. Modified from the BSD + * format. The modifications from the original format were changing n_other + * (an unused field) to n_sect and the addition of the N_SECT type. These + * modifications are required to support symbols in an arbitrary number of + * sections not just the three sections (text, data and bss) in a BSD file. + */ +struct nlist { + union { + char *n_name; /* for use when in-core */ + long n_strx; /* index into the string table */ + } n_un; + unsigned char n_type; /* type flag, see below */ + unsigned char n_sect; /* section number or NO_SECT */ + short n_desc; /* see */ + unsigned long n_value; /* value of this symbol (or stab offset) */ +}; + +/* + * Symbols with a index into the string table of zero (n_un.n_strx == 0) are + * defined to have a null, "", name. Therefore all string indexes to non null + * names must not have a zero string index. This is bit historical information + * that has never been well documented. + */ + +/* + * The n_type field really contains three fields: + * unsigned char N_STAB:3, + * N_PEXT:1, + * N_TYPE:3, + * N_EXT:1; + * which are used via the following masks. + */ +#define N_STAB 0xe0 /* if any of these bits set, a symbolic debugging entry */ +#define N_PEXT 0x10 /* private external symbol bit */ +#define N_TYPE 0x0e /* mask for the type bits */ +#define N_EXT 0x01 /* external symbol bit, set for external symbols */ + +/* + * Only symbolic debugging entries have some of the N_STAB bits set and if any + * of these bits are set then it is a symbolic debugging entry (a stab). In + * which case then the values of the n_type field (the entire field) are given + * in + */ + +/* + * Values for N_TYPE bits of the n_type field. + */ +#define N_UNDF 0x0 /* undefined, n_sect == NO_SECT */ +#define N_ABS 0x2 /* absolute, n_sect == NO_SECT */ +#define N_SECT 0xe /* defined in section number n_sect */ +#define N_PBUD 0xc /* prebound undefined (defined in a dylib) */ +#define N_INDR 0xa /* indirect */ + +/* + * If the type is N_INDR then the symbol is defined to be the same as another + * symbol. In this case the n_value field is an index into the string table + * of the other symbol's name. When the other symbol is defined then they both + * take on the defined type and value. + */ + +/* + * If the type is N_SECT then the n_sect field contains an ordinal of the + * section the symbol is defined in. The sections are numbered from 1 and + * refer to sections in order they appear in the load commands for the file + * they are in. This means the same ordinal may very well refer to different + * sections in different files. + * + * The n_value field for all symbol table entries (including N_STAB's) gets + * updated by the link editor based on the value of it's n_sect field and where + * the section n_sect references gets relocated. If the value of the n_sect + * field is NO_SECT then it's n_value field is not changed by the link editor. + */ +#define NO_SECT 0 /* symbol is not in any section */ +#define MAX_SECT 255 /* 1 thru 255 inclusive */ + +/* + * Common symbols are represented by undefined (N_UNDF) external (N_EXT) types + * who's values (n_value) are non-zero. In which case the value of the n_value + * field is the size (in bytes) of the common symbol. The n_sect field is set + * to NO_SECT. + */ + +/* + * To support the lazy binding of undefined symbols in the dynamic link-editor, + * the undefined symbols in the symbol table (the nlist structures) are marked + * with the indication if the undefined reference is a lazy reference or + * non-lazy reference. If both a non-lazy reference and a lazy reference is + * made to the same symbol the non-lazy reference takes precedence. A reference + * is lazy only when all references to that symbol are made through a symbol + * pointer in a lazy symbol pointer section. + * + * The implementation of marking nlist structures in the symbol table for + * undefined symbols will be to use some of the bits of the n_desc field as a + * reference type. The mask REFERENCE_TYPE will be applied to the n_desc field + * of an nlist structure for an undefined symbol to determine the type of + * undefined reference (lazy or non-lazy). + * + * The constants for the REFERENCE FLAGS are propagated to the reference table + * in a shared library file. In that case the constant for a defined symbol, + * REFERENCE_FLAG_DEFINED, is also used. + */ +/* Reference type bits of the n_desc field of undefined symbols */ +#define REFERENCE_TYPE 0xf +/* types of references */ +#define REFERENCE_FLAG_UNDEFINED_NON_LAZY 0 +#define REFERENCE_FLAG_UNDEFINED_LAZY 1 +#define REFERENCE_FLAG_DEFINED 2 +#define REFERENCE_FLAG_PRIVATE_DEFINED 3 +#define REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY 4 +#define REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY 5 + +/* + * To simplify stripping of objects that use are used with the dynamic link + * editor, the static link editor marks the symbols defined an object that are + * referenced by a dynamicly bound object (dynamic shared libraries, bundles). + * With this marking strip knows not to strip these symbols. + */ +#define REFERENCED_DYNAMICALLY 0x0010 + +/* + * The non-reference type bits of the n_desc field for global symbols are + * reserved for the dynamic link editor. All of these bits must start out + * zero in the object file. + */ +#define N_DESC_DISCARDED 0x8000 /* symbol is discarded */ + +#endif diff --git a/EXTERNAL_HEADERS/machine/limits.h b/EXTERNAL_HEADERS/machine/limits.h new file mode 100644 index 000000000..2c5eac77a --- /dev/null +++ b/EXTERNAL_HEADERS/machine/limits.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#if !defined (_LIMITS_H___) && !defined (_MACH_MACHLIMITS_H_) +#if defined (__ppc__) +#include +#elif defined (__i386__) +#include +#else +#error architecture not supported +#endif +#undef MB_LEN_MAX +#endif +#ifndef _LIMITS_H___ +#ifndef _MACH_MACHLIMITS_H_ + +/* _MACH_MACHLIMITS_H_ is used on OSF/1. */ +#define _LIMITS_H___ +#define _MACH_MACHLIMITS_H_ + +/* Number of bits in a `char'. */ +#undef CHAR_BIT +#define CHAR_BIT 8 + +/* Maximum length of a multibyte character. */ +#ifndef MB_LEN_MAX +#define MB_LEN_MAX 1 +#endif + +/* Minimum and maximum values a `signed char' can hold. */ +#undef SCHAR_MIN +#define SCHAR_MIN (-128) +#undef SCHAR_MAX +#define SCHAR_MAX 127 + +/* Maximum value an `unsigned char' can hold. (Minimum is 0). */ +#undef UCHAR_MAX +#define UCHAR_MAX 255 + +/* Minimum and maximum values a `char' can hold. */ +#ifdef __CHAR_UNSIGNED__ +#undef CHAR_MIN +#define CHAR_MIN 0 +#undef CHAR_MAX +#define CHAR_MAX 255 +#else +#undef CHAR_MIN +#define CHAR_MIN (-128) +#undef CHAR_MAX +#define CHAR_MAX 127 +#endif + +/* Minimum and maximum values a `signed short int' can hold. */ +#undef SHRT_MIN +#define SHRT_MIN (-32768) +#undef SHRT_MAX +#define SHRT_MAX 32767 + +/* Maximum value an `unsigned short int' can hold. (Minimum is 0). */ +#undef USHRT_MAX +#define USHRT_MAX 65535 + +/* Minimum and maximum values a `signed int' can hold. */ +#ifndef __INT_MAX__ +#define __INT_MAX__ 2147483647 +#endif +#undef INT_MIN +#define INT_MIN (-INT_MAX-1) +#undef INT_MAX +#define INT_MAX __INT_MAX__ + +/* Maximum value an `unsigned int' can hold. (Minimum is 0). */ +#undef UINT_MAX +#define UINT_MAX (INT_MAX * 2U + 1) + +/* Minimum and maximum values a `signed long int' can hold. + (Same as `int'). */ +#ifndef __LONG_MAX__ +#define __LONG_MAX__ 2147483647L +#endif +#undef LONG_MIN +#define LONG_MIN (-LONG_MAX-1) +#undef LONG_MAX +#define LONG_MAX __LONG_MAX__ + +/* Maximum value an `unsigned long int' can hold. (Minimum is 0). */ +#undef ULONG_MAX +#define ULONG_MAX (LONG_MAX * 2UL + 1) + +#if defined (__GNU_LIBRARY__) ? defined (__USE_GNU) : !defined (__STRICT_ANSI__) +/* Minimum and maximum values a `signed long long int' can hold. */ +#ifndef __LONG_LONG_MAX__ +#define __LONG_LONG_MAX__ 9223372036854775807LL +#endif +#undef LONG_LONG_MIN +#define LONG_LONG_MIN (-LONG_LONG_MAX-1) +#undef LONG_LONG_MAX +#define LONG_LONG_MAX __LONG_LONG_MAX__ + +/* Maximum value an `unsigned long long int' can hold. (Minimum is 0). */ +#undef ULONG_LONG_MAX +#define ULONG_LONG_MAX (LONG_LONG_MAX * 2ULL + 1) +#endif + +#endif /* _MACH_MACHLIMITS_H_ */ +#endif /* _LIMITS_H___ */ diff --git a/EXTERNAL_HEADERS/stdarg.h b/EXTERNAL_HEADERS/stdarg.h new file mode 100644 index 000000000..dc927b294 --- /dev/null +++ b/EXTERNAL_HEADERS/stdarg.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* stdarg.h for GNU. + Note that the type used in va_arg is supposed to match the + actual type **after default promotions**. + Thus, va_arg (..., short) is not valid. */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +#ifdef __clipper__ +#include +#else +#ifdef __m88k__ +#include +#else +#ifdef __i860__ +#include +#else +#ifdef __hppa__ +#include +#else +#ifdef __mips__ +#include +#else +#ifdef __sparc__ +#include +#else +#ifdef __i960__ +#include +#else +#ifdef __alpha__ +#include +#else +#if defined (__H8300__) || defined (__H8300H__) +#include +#else +#if defined (__PPC__) && defined (_CALL_SYSV) +#include +#else + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +#if defined(__svr4__) || defined(_AIX) || defined(_M_UNIX) || defined(__NetBSD__) || (defined(__APPLE__) && defined(__ppc__)) +typedef char *__gnuc_va_list; +#else +typedef void *__gnuc_va_list; +#endif +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +/* Amount of space required in an argument list for an arg of type TYPE. + TYPE may alternatively be an expression whose type is used. */ + +#if defined(sysV68) +#define __va_rounded_size(TYPE) \ + (((sizeof (TYPE) + sizeof (short) - 1) / sizeof (short)) * sizeof (short)) +#else +#define __va_rounded_size(TYPE) \ + (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int)) +#endif + +#define va_start(AP, LASTARG) \ + (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG))) + +#undef va_end +void va_end (__gnuc_va_list); /* Defined in libgcc.a */ +#define va_end(AP) ((void)0) + +/* We cast to void * and then to TYPE * because this avoids + a warning about increasing the alignment requirement. */ + +#if defined (__arm__) || defined (__i386__) || defined (__i860__) || defined (__ns32000__) || defined (__vax__) +/* This is for little-endian machines; small args are padded upward. */ +#define va_arg(AP, TYPE) \ + (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \ + *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE)))) +#else /* big-endian */ +/* This is for big-endian machines; small args are padded downward. */ +#define va_arg(AP, TYPE) \ + (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \ + *((TYPE *) (void *) ((char *) (AP) \ + - ((sizeof (TYPE) < __va_rounded_size (char) \ + ? sizeof (TYPE) : __va_rounded_size (TYPE)))))) +#endif /* big-endian */ +#endif /* _STDARG_H */ + +#endif /* not powerpc with V.4 calling sequence */ +#endif /* not h8300 */ +#endif /* not alpha */ +#endif /* not i960 */ +#endif /* not sparc */ +#endif /* not mips */ +#endif /* not hppa */ +#endif /* not i860 */ +#endif /* not m88k */ +#endif /* not clipper */ + +#ifdef _STDARG_H +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */ +#undef _VA_LIST +#endif + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#ifdef __svr4__ +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#endif /* _VA_LIST_ */ +#else /* not __svr4__ */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +typedef __gnuc_va_list va_list; +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/EXTERNAL_HEADERS/stdint.h b/EXTERNAL_HEADERS/stdint.h new file mode 100644 index 000000000..98fd43852 --- /dev/null +++ b/EXTERNAL_HEADERS/stdint.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2000,2001 Apple Computer, Inc. All rights reserved. + * + * We build on rather than in order to + * minimize the global namespace pollution (i.e., we'd like to define + * *only* those identifiers that the C standard mandates should be + * defined by ). Using means that (at + * least as of January 2001) all of the extra macros that do get + * #defined by #include'ing are in the implementor's + * namespace ("_[A-Z].*" or "__.*"). + * + * The reason that we do #include the relevant ...types.h instead of + * creating several "competing" typedefs is to make header collisions + * less likely during the transition to C99. + * + * Caveat: There are still five extra typedef's defined by doing it + * this way: "u_int{8,16,32,64}_t" and "register_t". Might be + * fixable via pre- and post- #defines, but probably not worth it. + */ + +#ifndef _STDINT_H_ +#define _STDINT_H_ + +#include + +/* from ISO/IEC 988:1999 spec */ + +/* 7.18.1.1 Exact-width integer types */ + /* int8_t is defined in */ + /* int16_t is defined in */ + /* int32_t is defined in */ + /* int64_t is defined in */ +typedef u_int8_t uint8_t; /* u_int8_t is defined in */ +typedef u_int16_t uint16_t; /* u_int16_t is defined in */ +typedef u_int32_t uint32_t; /* u_int32_t is defined in */ +typedef u_int64_t uint64_t; /* u_int64_t is defined in */ + + +/* 7.18.1.2 Minumun-width integer types */ +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + + +/* 7.18.1.3 Fastest-width integer types */ +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + + +/* 7.18.1.4 Integer types capable of hgolding object pointers */ + /* intptr_t is defined in */ + /* uintptr_t is defined in */ + + +/* 7.18.1.5 Greatest-width integer types */ +typedef long long intmax_t; +typedef unsigned long long uintmax_t; + + +/* "C++ implementations should define these macros only when + * __STDC_LIMIT_MACROS is defined before is included." + * In other words, if C++, then __STDC_LIMIT_MACROS enables the + * macros below. (Note that there also exists a different enabling + * macro (__STDC_CONSTANT_MACROS) for the last few, below.) + */ +#if (! defined(__cplusplus)) || defined(__STDC_LIMIT_MACROS) + + +/* 7.18.2 Limits of specified-width integer types: + * These #defines specify the minimum and maximum limits + * of each of the types declared above. + */ + + +/* 7.18.2.1 Limits of exact-width integer types */ +#define INT8_MIN -128 +#define INT16_MIN -32768 +#define INT32_MIN -2147483648 +#define INT64_MIN -9223372036854775808LL + +#define INT8_MAX +127 +#define INT16_MAX +32767 +#define INT32_MAX +2147483647 +#define INT64_MAX +9223372036854775807LL + +#define UINT8_MAX 255 +#define UINT16_MAX 65535 +#define UINT32_MAX 4294967295U +#define UINT64_MAX 18446744073709551615ULL + +/* 7.18.2.2 Limits of minimum-width integer types */ +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST64_MIN INT64_MIN + +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MAX INT64_MAX + +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +/* 7.18.2.3 Limits of fastest minimum-width integer types */ +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST64_MIN INT64_MIN + +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MAX INT64_MAX + +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +/* 7.18.2.4 Limits of integer types capable of holding object pointers */ + +#define INTPTR_MIN INT32_MIN +#define INTPTR_MAX INT32_MAX + +#define UINTPTR_MAX UINT32_MAX + +/* 7.18.2.5 Limits of greatest-width integer types */ +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX + +#define UINTMAX_MAX UINT64_MAX + +/* 7.18.3 "Other" */ +#define PTRDIFF_MIN INT32_MIN +#define PTRDIFF_MAX INT32_MAX + +/* We have no sig_atomic_t yet, so no SIG_ATOMIC_{MIN,MAX}. + Should end up being {-127,127} or {0,255} ... or bigger. + My bet would be on one of {U}INT32_{MIN,MAX}. */ + +#define SIZE_MAX UINT32_MAX + +#define WCHAR_MAX INT32_MAX + +/* We have no wint_t yet, so no WINT_{MIN,MAX}. + Should end up being {U}INT32_{MIN,MAX}, depending. */ + + +#endif /* if C++, then __STDC_LIMIT_MACROS enables the above macros */ + +/* "C++ implementations should define these macros only when + * __STDC_CONSTANT_MACROS is defined before is included." + */ +#if (! defined(__cplusplus)) || defined(__STDC_CONSTANT_MACROS) + +/* 7.18.4 Macros for integer constants */ +#define INT8_C(v) ((int8_t)v) +#define INT16_C(v) ((int16_t)v) +#define INT32_C(v) (v ## L) +#define INT64_C(v) (v ## LL) + +#define UINT8_C(v) ((uint8_t)v) +#define UINT16_C(v) ((uint16_t)v) +#define UINT32_C(v) (v ## UL) +#define UINT64_C(v) (v ## ULL) + +#define INTMAX_C(v) (v ## LL) +#define UINTMAX_C(v) (v ## ULL) + +#endif /* if C++, then __STDC_CONSTANT_MACROS enables the above macros */ + +#endif /* _STDINT_H_ */ diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..00fd1ce02 --- /dev/null +++ b/Makefile @@ -0,0 +1,50 @@ +ifndef VERSDIR +export VERSDIR=$(shell /bin/pwd) +endif + +export MakeInc_cmd=${VERSDIR}/makedefs/MakeInc.cmd +export MakeInc_def=${VERSDIR}/makedefs/MakeInc.def +export MakeInc_rule=${VERSDIR}/makedefs/MakeInc.rule +export MakeInc_dir=${VERSDIR}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +ALL_SUBDIRS = \ + iokit \ + osfmk \ + bsd \ + pexpert \ + libkern \ + libsa + +INSTINC_SUBDIRS = $(ALL_SUBDIRS) + +INSTINC_SUBDIRS_PPC = $(INSTINC_SUBDIRS) + +INSTINC_SUBDIRS_I386 = $(INSTINC_SUBDIRS) + +EXPINC_SUBDIRS = $(ALL_SUBDIRS) + +EXPINC_SUBDIRS_PPC = $(EXPINC_SUBDIRS) + +EXPINC_SUBDIRS_I386 = $(EXPINC_SUBDIRS) + +COMP_SUBDIRS = $(ALL_SUBDIRS) + +INST_SUBDIRS = \ + libkern \ + libsa \ + iokit \ + osfmk \ + bsd + +INSTALL_FILE_LIST= \ + mach_kernel + +INSTALL_FILE_DIR= \ + / + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/PB.project b/PB.project new file mode 100644 index 000000000..28014d5c7 --- /dev/null +++ b/PB.project @@ -0,0 +1,15 @@ +{ + FILESTABLE = {}; + LANGUAGE = English; + NEXTSTEP_BUILDTOOL = /bin/gnumake; + NEXTSTEP_JAVA_COMPILER = /usr/bin/javac; + NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc; + PDO_UNIX_BUILDTOOL = /bin/gnumake; + PDO_UNIX_JAVA_COMPILER = "$(JDKBINDIR)/javac"; + PDO_UNIX_OBJCPLUS_COMPILER = "$(NEXTDEV_BIN)/gcc"; + PROJECTNAME = xnu; + PROJECTTYPE = Legacy; + PROJECTVERSION = 2.8; + WINDOWS_JAVA_COMPILER = "$(JDKBINDIR)/javac.exe"; + WINDOWS_OBJCPLUS_COMPILER = "$(DEVDIR)/gcc"; +} diff --git a/README b/README new file mode 100644 index 000000000..690dd7893 --- /dev/null +++ b/README @@ -0,0 +1,122 @@ +How to build XNU: + +1) Setup your environment: + + Create and go to your sandbox directory + + $ cd + + Extract the xnu project from cvs: + + $ cvs co -r xnu + + where must be replaced by the matching xnu tag for + the xnu project level. + + Go to the top directory in your XNU project. + + $ cd /xnu + + If you are using a sh-style shell, run the following command: + $ . SETUP/setup.sh + + If you are using a csh-style shell, run the following command: + % source SETUP/setup.csh + + This will define the following environmental variables: + SRCROOT, OBJROOT, DSTROOT, SYMROOT + +2) Export the Component Header Files + + From the top directory, run: + + $ make exporthdrs + + This exports the component header files in the $OBJROOT/EXPORT_HDRS + directory. + +3) Build all the Components + + From the top directory. run: + + $ make all + + This builds all the components for all architectures defined in + ARCH_CONFIGS and for all kernel configurations defined in KERNEL_CONFIGS. + By default, ARCH_CONFIGS contains one architecture, the build machine + architecture, and KERNEL_CONFIGS is set to build for RELEASE. + This will also create a bootable image, mach_kernel, and a kernel binary + with symbols, mach_kernel.sys. + + Example: + $(OBJROOT)/RELEASE_PPC/osfmk/RELEASE/osfmk.o: pre-linked object for osfmk component + $(OBJROOT)/RELEASE_PPC/mach_kernel: bootable image + +4) Building a Component + + From a component top directory: + + $ make all + + This builds a component for all architectures defined in ARCH_CONFIGS + and for all kernel configurations defined in KERNEL_CONFIGS. + By default, ARCH_CONFIGS contains one architecture, the build machine + architecture, and KERNEL_CONFIGS is set to build for RELEASE . + + WARNING: If a component header file has been modified, you will have to do + the above procedures 3 and 4. + + Example: + $(OBJROOT)/RELEASE_PPC/osfmk/RELEASE/osfmk.o: pre-linked object for osfmk component + + From the component top directory: + + $ make mach_kernel + + This includes your component in the bootable image, mach_kernel, and + in the kernel binary with symbols, mach_kernel.sys. + +5) Building DEBUG + + Define KERNEL_CONFIGS to DEBUG in your environment or when running a + make command. Then, apply procedures 4, 5 + + $ make KERNEL_CONFIGS=DEBUG all + + or + + $ export KERNEL_CONFIGS=DEBUG + $ make all + + Example: + $(OBJROOT)/DEBUG_PPC/osfmk/DEBUG/osfmk.o: pre-linked object for osfmk component + $(OBJROOT)/DEBUG_PPC/mach_kernel: bootable image + +6) Building fat + + Define ARCH_CONFIGS in your environment or when running a make command. + Apply procedures 3, 4, 5 + + $ make ARCH_CONFIGS="PPC I386" exporthdrs all + + or + + $ export ARCH_CONFIGS="PPC I386" + $ make exporthdrs all + +7) Build check before integration + + From the top directory, run: + + $ ~rc/bin/buildit . -arch ppc -arch i386 -noinstallsrc -nosum + +8) Creating tags and cscope + + Set up your build environment as per instructions in 2a + + From the top directory, run: + + $ make tags # this will build ctags and etags + + $ make cscope # this will build cscope database + diff --git a/SETUP/seed_objroot b/SETUP/seed_objroot new file mode 100755 index 000000000..6773e70e4 --- /dev/null +++ b/SETUP/seed_objroot @@ -0,0 +1,133 @@ +#!/bin/sh + +if [ ! $OBJROOT ] +then + echo "OBJROOT not defined" + exit 1 +fi + +if [ ! $PREBUILT_OBJROOT ] +then + PREBUILT_OBJROOT=/Prebuilt/$1/xnu/BUILD/obj +fi + +if [ ! -e $PREBUILT_OBJROOT ] +then + echo "$PREBUILT_OBJROOT doesn't exist" + exit 1 +else +if [ $# = 2 -a ! -e $PREBUILT_OBJROOT/$2 ] +then + echo "$PREBUILT_OBJROOT/$2 doesn't exist" + exit 1 +fi +if [ -e $PREBUILT_OBJROOT/BUILDING_SEED_OBJROOT ] +then + echo "Building $PREBUILT_OBJROOT, try later" + exit 1 +fi +fi + +cd $PREBUILT_OBJROOT + +if [ $# = 1 ] +then + +if [ ! -e $OBJROOT ] +then +mkdir -p $OBJROOT +echo "Copying $PREBUILT_OBJROOT in $OBJROOT" +pax -rw . $OBJROOT +else +echo "Remove $OBJROOT before calling seed_objroot" +exit 1 +fi + +else + +if [ ! -e $OBJROOT/$2 ] +then +mkdir -p $OBJROOT/$2 +echo "Copying $PREBUILT_OBJROOT/$2 in $OBJROOT/$2" +pax -rw $2 $OBJROOT +RELEASE_OBJ=`echo $2 | sed 's/DEBUG/RELEASE/'` +if [ $1 != $RELEASE_OBJ -a ! -e $OBJROOT/$RELEASE_OBJ ] +then +mkdir -p $OBJROOT/$RELEASE_OBJ +echo "Copying $PREBUILT_OBJROOT/$RELEASE_OBJ in $OBJROOT/$RELEASE_OBJ" +pax -rw $RELEASE_OBJ $OBJROOT +fi + +else +echo "remove $OBJROOT/$2 before calling seed_objroot" +exit 1 +fi + +fi + +if [ ! -e $OBJROOT/EXPORT_HDRS ] +then +echo "Copying $PREBUILT_OBJROOT/EXPORT_HDRS in $OBJROOT/EXPORT_HDRS" +mkdir -p $OBJROOT/EXPORT_HDRS +pax -rw EXPORT_HDRS $OBJROOT +fi + +cd $OBJROOT +if [ -e RELEASE_PPC/osfmk/RELEASE/config.RELEASE_PPC ] +then +PREV_OBJROOT=`grep objectdir RELEASE_PPC/osfmk/RELEASE/config.RELEASE_PPC | cut -f 2 -d\" | + sed 's|/RELEASE_PPC/osfmk/RELEASE||'` +fi +if [ -z $PREV_OBJROOT -a -e DEBUG_PPC/osfmk/DEBUG/config.DEBUG_PPC ] +then + PREV_OBJROOT=`grep objectdir DEBUG_PPC/osfmk/DEBUG/config.DEBUG_PPC | cut -f 2 -d\" | + sed 's|/DEBUG_PPC/osfmk/DEBUG||'` +fi +if [ -z $PREV_OBJROOT -a -e RELEASE_I386/osfmk/RELEASE/config.RELEASE_I386 ] +then + PREV_OBJROOT=`grep objectdir RELEASE_I386/osfmk/RELEASE/config.RELEASE_I386 | cut -f 2 -d\" | + sed 's|/RELEASE_I386/osfmk/RELEASE||'` +fi +if [ -z $PREV_OBJROOT -a -e DEBUG_I386/osfmk/DEBUG/config.DEBUG_I386 ] +then + PREV_OBJROOT=`grep objectdir DEBUG_I386/osfmk/DEBUG/config.DEBUG_I386 | cut -f 2 -d\" | + sed 's|/DEBUG_I386/osfmk/DEBUG||'` +fi +if [ -z $PREV_OBJROOT ] +then + echo "PREV_OBJROOT not found" + exit 1 +fi + +if [ -e RELEASE_PPC/osfmk/RELEASE/config.RELEASE_PPC ] +then +PREV_SRCROOT=`grep sourcedir RELEASE_PPC/osfmk/RELEASE/config.RELEASE_PPC | cut -f 2 -d\"` +fi +if [ -z $PREV_SRCROOT -a -e DEBUG_PPC/osfmk/DEBUG/config.DEBUG_PPC ] +then + PREV_SRCROOT=`grep sourcedir DEBUG_PPC/osfmk/DEBUG/config.DEBUG_PPC | cut -f 2 -d\"` +fi +if [ -z $PREV_SRCROOT -a -e RELEASE_I386/osfmk/RELEASE/config.RELEASE_I386 ] +then +PREV_SRCROOT=`grep sourcedir RELEASE_I386/osfmk/RELEASE/config.RELEASE_I386 | cut -f 2 -d\"` +fi +if [ -z $PREV_SRCROOT -a -e DEBUG_I386/osfmk/DEBUG/config.DEBUG_I386 ] +then + PREV_SRCROOT=`grep sourcedir DEBUG_I386/osfmk/DEBUG/config.DEBUG_I386 | cut -f 2 -d\"` +fi +if [ -z $PREV_SRCROOT ] +then + echo "PREV_SRCROOT not found" + exit 1 +fi + +echo "s|$PREV_OBJROOT|$OBJROOT|" > prebuild.sed +echo "s|$PREV_SRCROOT|$SRCROOT|" >>prebuild.sed + +for i in `find . -name Makedep -print` +do +sed -f prebuild.sed $i > $i.tmp +rm $i +mv $i.tmp $i +done +rm -f `find $OBJROOT -name Makefile -print` prebuild.sed diff --git a/SETUP/setup.csh b/SETUP/setup.csh new file mode 100644 index 000000000..3d489f4ca --- /dev/null +++ b/SETUP/setup.csh @@ -0,0 +1,4 @@ +setenv SRCROOT `pwd` +setenv OBJROOT $SRCROOT/BUILD/obj +setenv DSTROOT $SRCROOT/BUILD/dst +setenv SYMROOT $SRCROOT/BUILD/sym diff --git a/SETUP/setup.sh b/SETUP/setup.sh new file mode 100644 index 000000000..7924785c7 --- /dev/null +++ b/SETUP/setup.sh @@ -0,0 +1,4 @@ +export SRCROOT=$(pwd) +export OBJROOT=$SRCROOT/BUILD/obj +export DSTROOT=$SRCROOT/BUILD/dst +export SYMROOT=$SRCROOT/BUILD/sym diff --git a/bsd/Makefile b/bsd/Makefile new file mode 100644 index 000000000..4c16828e0 --- /dev/null +++ b/bsd/Makefile @@ -0,0 +1,79 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + crypto \ + dev \ + hfs \ + include \ + isofs \ + libkern \ + machine \ + miscfs \ + net \ + netat \ + netccitt \ + netinet \ + netinet6 \ + netiso \ + netkey \ + netns \ + nfs \ + sys \ + ufs \ + vfs + +INSTINC_SUBDIRS_PPC = \ + ppc + +INSTINC_SUBDIRS_I386 = \ + i386 + +EXPINC_SUBDIRS = \ + crypto \ + dev \ + hfs \ + include \ + isofs \ + libkern \ + machine \ + miscfs \ + net \ + netat \ + netccitt \ + netinet \ + netinet6 \ + netiso \ + netkey \ + netns \ + nfs \ + sys \ + ufs \ + vfs \ + vm + +EXPINC_SUBDIRS_PPC = \ + ppc + +EXPINC_SUBDIRS_I386 = \ + i386 + +SETUP_SUBDIRS = \ + conf + +COMP_SUBDIRS = \ + conf + +INST_SUBDIRS = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/conf/MASTER b/bsd/conf/MASTER new file mode 100644 index 000000000..52e02e133 --- /dev/null +++ b/bsd/conf/MASTER @@ -0,0 +1,243 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +####################################################################### +# +# Master machine independent configuration file. +# +# Specific configuration files are created based on this and +# the machine specific master file using the doconf script. +# +# Any changes to the master configuration files will affect all +# other configuration files based upon it. +# +####################################################################### +# +# To build a configuration, execute "doconf ." +# Configurations are specified in the "Configurations:" section +# of the MASTER and MASTER.* files as follows: +# +# = [ ... ] +# +# Lines in the MASTER and MASTER.* files are selected based on +# the attribute selector list, found in a comment at the end of +# the line. This is a list of attributes separated by commas. +# The "!" operator selects the line if none of the attributes are +# specified. +# +# For example: +# +# selects a line if "foo" or "bar" are specified. +# selects a line if neither "foo" nor "bar" is +# specified. +# +# Lines with no attributes specified are selected for all +# configurations. +# +####################################################################### +# +# STANDARD CONFIGURATION OPTIONS (select any combination) +# +# debug = extra kernel level debugging support +# mach = Mach support +# +# EXPERIMENTAL CONFIGURATION OPTIONS (select any combination, carefully) +# +# nbc = no buffer cache support +# simple = non-rollover clock support +# timing = precision timing support +# host = host resource control support +# fixpri = fixed priority threads +# +# MULTI-PROCESSOR CONFIGURATION (select at most one) +# +# multi16 = enable 16 multi-processors +# multi32 = enable 32 multi-processors +# multi48 = enable 48 multi-processors +# +# SYSTEM SIZE CONFIGURATION (select exactly one) +# +# xlarge = extra large scale system configuration +# large = large scale system configuration +# medium = medium scale system configuration +# small = small scale system configuration +# xsmall = extra small scale system configuration +# bsmall = special extra small scale system configuration for +# (e.g. for boot floppies) +# +####################################################################### +# +# Standard Mach Research Configurations: +# -------- ---- -------- --------------- +# +# These are the default configurations that can be used by most sites. +# They are used internally by the Mach project. +# +# MACH = [mach multi16 medium debug] +# +####################################################################### +# +ident NeXT + +# obsolete timezone spec +options TIMEZONE=0, PST=0 + +options QUOTA # # +options INET +options NEW_VM_CODE # # +options OLD_VM_CODE # # +options HW_AST # Hardware ast support # +options HW_FOOTPRINT # Cache footprint support # +options MACH # Standard Mach features # +options MACH_ASSERT # Compile in assertions # +options MACH_COMPAT # Vendor syscall compatibility # +options MACH_COUNTERS # counters # +options MACH_DEBUG # IPC debugging interface # +options MACH_FASTLINK # Fast symbolic links +options MACH_FIXPRI # Fixed priority threads # +options MACH_HOST # Mach host (resource alloc.) # +options MACH_IPC_COMPAT # Enable old IPC interface # +options MACH_IPC_DEBUG # Enable IPC debugging calls # +options MACH_IPC_TEST # Testing code/printfs # +options MACH_LDEBUG # Sanity-check simple locking # +options MACH_NP # Mach IPC support # +options MACH_NBC # No buffer cache # +options REV_ENDIAN_FS # Reverse Endian FS # +options MACH_NET # Fast network access # +options MACH_XP # external pager support # +options NORMA_IPC # NORMA IPC support # +options NORMA_DEVICE # NORMA unified device names # +options NORMA_VM # NORMA virtual memory support # +options NORMA_TASK # NORMA task support # +options NORMA_ETHER # NORMA across ethernet # +options SIMPLE_CLOCK # don't assume fixed tick # +options STAT_TIME # Use statistical timing # +options XPR_DEBUG # kernel tracing # +options KDEBUG # kernel tracing # +options DDM_DEBUG # driverkit-style tracing # +options MACH_OLD_VM_COPY # Old vm_copy technology # +options NO_DIRECT_RPC # for untyped mig servers # +options IPX # ipx stack # +options EON # # +options ISO # ISO stack # +options LLC # 802.2 support # +options LOOP # loopback support # +options MROUTING # multicast routing # +options NS # Netware # +options PPP # PPP # +options ROUTING # routing # +options TPIP # # +options TUN # # +options VLAN # # +options IPDIVERT # Divert sockets (for NAT) # +options IPFIREWALL # IP Firewalling (used by NAT) # +#options IPFIREWALL_VERBOSE # # +options IPFIREWALL_FORWARD #Transparent proxy # +options IPFIREWALL_DEFAULT_TO_ACCEPT # allow everything by default # +options IPFIREWALL_KEXT # Kernel extension # +options MULTICAST # Internet Protocol Class-D $ + + +# +# 4.4 general kernel +# +options COMPAT_43 # 4.3 BSD compatibility # +options DIAGNOSTIC # diagnostics # +options KTRACE # ktrace support # + +# +# 4.4 filesystems +# +options FFS # Fast Filesystem Support # +options HFS # HFS/HFS+ support # +options HFS_HARDLINKS # HFS+ hardlink support # +options FIFO # fifo support # +options PORTAL # portal_fs support # +options UNION # union_fs support # +options FDESC # fdesc_fs support # +options CD9660 # ISO 9660 CD-ROM support # +options VOLFS # volfs support # +options DEVFS # devfs support # +options SYNTHFS # synthfs support # + +# +# NFS support +# +options NFSCLIENT # Be an NFS client # +options NFSSERVER # Be an NFS server # + +# +# AppleTalk Support +# +options NETAT # AppleTalk support # +options AURP_SUPPORT # AppleTalk Update Routing # +# +# Machine Independent Apple Features +# +options KERNSERV # kernel_server intarface # +options MALLOCDEBUG # kernel malloc debugging # +options DRIVERKIT # driverkit support # +options KERNOBJC # Objective-C support # +options OBJCTEST # Objc internal test # +options KERNEL_STACK # MI kernel stack support # +profile # build a profiling kernel # + +# +# IPv6 (Kame Stable 20000425) Support +# +options "INET6" # kernel IPv6 Support # +options MAPPED_ADDR_ENABLED # enable IPv4, on IPv6 socket # +options IPSEC # IP security # +options IPSEC_ESP # IP security # +options "IPV6FIREWALL" # IPv6 Firewall Feature # +options "IPV6FIREWALL_DEFAULT_TO_ACCEPT" #IPv6 Firewall Feature # +options "IPV6FIREWALL_VERBOSE" #IPv6 Firewall Feature # +options NATPT # KAME/IPv6 NAT feature # +pseudo-device gif 2 # +pseudo-device dummy 2 # +pseudo-device faith 1 # + +makeoptions LIBDRIVER = "libDriver_kern.o" # +makeoptions LIBOBJC = "libkobjc.o" # + +maxusers 64 # +maxusers 50 # +maxusers 32 # +maxusers 16 # +maxusers 8 # +maxusers 2 # + +# +# Multi-processor selection +# +pseudo-device cpus 64 # +pseudo-device cpus 32 # +pseudo-device cpus 16 # +pseudo-device cpus 2 # +pseudo-device cpus 1 # + +# +# Ethernet (ARP) +# +pseudo-device ether +# +# Network loopback device +# +pseudo-device loop +# +# UCB pseudo terminal service +# +pseudo-device pty 32 init pty_init +# +# packet filter device +# +pseudo-device bpfilter 4 init bpf_init + +# +# shim to "linux" mach disk drivers (mach drivers must also be turned on) +# +# now using iokit disk shim, this is code is obsolete +#pseudo-device diskshim diff --git a/bsd/conf/MASTER.i386 b/bsd/conf/MASTER.i386 new file mode 100644 index 000000000..7bcd2f6c1 --- /dev/null +++ b/bsd/conf/MASTER.i386 @@ -0,0 +1,91 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +###################################################################### +# +# Master Apple configuration file (see the master machine independent +# configuration file for a description of the file format). +# +###################################################################### +# +# Apple (PSEUDO-)DEVICES (select any combination) +# ex = Excelan EXOS 202 Ethernet interface +# ip = Interphase V/SMD 3200 disk controller +# od = Canon OMD-1 Optical Disk +# rd = RAM disk +# sd = SCSI disk +# sg = Generic SCSI Device +# st = SCSI tape +# fd = Floppy Disk +# en = Integrated Ethernet controller +# dsp = DSP560001 digital signal processor +# iplmeas = ipl time measurement +# nextp = NeXT Laser Printer +# sound = sound I/O +# vol = removable volume support device +# venip = virtual Ethernet/IP network interface +# zs = Serial device +# +# MULTIPROCESSOR SUPPORT (select exactly one) +# multi = support 4 processors +# uni = supports single processor +# +# SPECIAL CHARACTERISTICS (select any combination) +# gdb = GNU kernel debugger +# posix_kern = POSIX support +# +# CPU TYPE (select exactly one) +# NeXT = FIXME +# +###################################################################### +# +# Standard Apple Research Configurations: +# -------- ----- -------- --------------- +# +# RELEASE = [intel pc mach medium event vol pst gdb kernobjc libdriver fixpri simple_clock mdebug kernserv driverkit uxpr kernstack ipc_compat ipc_debug nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 devfs revfs hfs mrouting ipdivert ipfirewall] +# PROFILE = [RELEASE profile] +# DEBUG = [intel pc mach medium event vol pst gdb kernobjc libdriver_g fixpri debug simple_clock mdebug kernserv driverkit xpr_debug uxpr kernstack ipc_compat ipc_debug nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs hfs devfs mach_assert mrouting ipdivert ipfirewall] +# +###################################################################### +# +machine "i386" # +cpu "i386" # + +makeoptions CCONFIGFLAGS = "-g -O3 -fno-omit-frame-pointer" # +makeoptions CCONFIGFLAGS = "-O3" # +makeoptions RELOC = "00100000" # +makeoptions SYMADDR = "00780000" # + +options GDB # GNU kernel debugger # +options DEBUG # general debugging code # +options SHOW_SPACE # print size of structures # +options EVENTMETER # event meter support # +options FP_EMUL # floating point emulation # +#options PC_SUPPORT # virtual PC support # +options UXPR # user-level XPR package # +config mach_kernel swap generic # + +options EVENT # + +# +# Ipl measurement system +# +pseudo-device iplmeas # + +# +# NFS measurement system +# +pseudo-device nfsmeas # + +# +# Removable Volume support +# +pseudo-device vol # + +# +# Point-to-Point Protocol support +# +pseudo-device ppp 2 # diff --git a/bsd/conf/MASTER.ppc b/bsd/conf/MASTER.ppc new file mode 100644 index 000000000..ddcf51100 --- /dev/null +++ b/bsd/conf/MASTER.ppc @@ -0,0 +1,87 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +###################################################################### +# +# Master Apple configuration file (see the master machine independent +# configuration file for a description of the file format). +# +###################################################################### +# +# Apple (PSEUDO-)DEVICES (select any combination) +# ex = Excelan EXOS 202 Ethernet interface +# ip = Interphase V/SMD 3200 disk controller +# od = Canon OMD-1 Optical Disk +# rd = RAM disk +# sd = SCSI disk +# sg = Generic SCSI Device +# st = SCSI tape +# fd = Floppy Disk +# en = Integrated Ethernet controller +# dsp = DSP560001 digital signal processor +# iplmeas = ipl time measurement +# nextp = NeXT Laser Printer +# sound = sound I/O +# vol = removable volume support device +# venip = virtual Ethernet/IP network interface +# zs = Serial device +# +# MULTIPROCESSOR SUPPORT (select exactly one) +# multi = support 4 processors +# uni = supports single processor +# +# SPECIAL CHARACTERISTICS (select any combination) +# gdb = GNU kernel debugger +# posix_kern = POSIX support +# +# CPU TYPE (select exactly one) +# NeXT = FIXME +# +###################################################################### +# +# Standard Apple Research Configurations: +# -------- ----- -------- --------------- +# +# RELEASE = [ppc mach medium vol pst gdb simple_clock kernstack nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs noprofiling hfs volfs devfs synthfs netat mrouting ipdivert ipfirewall] +# RELEASE_TRACE = [RELEASE kdebug] +# PROFILE = [ppc mach medium vol pst gdb debug simple_clock kernstack nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs profile hfs volfs devfs synthfs netat mrouting ipdivert ipfirewall] +# DEBUG = [ppc mach medium vol pst gdb debug simple_clock kernstack nfsclient nfsserver quota fifo fdesc union ffs cd9660 compat_43 revfs profiling hfs volfs devfs synthfs netat mrouting mach_assert ipdivert ipfirewall] +# DEBUG_TRACE = [DEBUG kdebug] +# +###################################################################### +# +machine "ppc" # +cpu "ppc" # + +options GDB # GNU kernel debugger # +options DEBUG # general debugging code # +options SHOW_SPACE # print size of structures # +options EVENTMETER # event meter support # +options FP_EMUL # floating point emulation # +options UXPR # user-level XPR package # +config mach_kernel swap generic # + +options EVENT # + +# +# Ipl measurement system +# +pseudo-device iplmeas # + +# +# NFS measurement system +# +pseudo-device nfsmeas # + +# +# Removable Volume support +# +pseudo-device vol # + +# +# Point-to-Point Protocol support +# +pseudo-device ppp 2 # diff --git a/bsd/conf/Makefile b/bsd/conf/Makefile new file mode 100644 index 000000000..5a0b12a67 --- /dev/null +++ b/bsd/conf/Makefile @@ -0,0 +1,62 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + tools + +COMP_SUBDIRS = + +INST_SUBDIRS = + +ifndef BSD_KERNEL_CONFIG +export BSD_KERNEL_CONFIG = $(KERNEL_CONFIG) +endif + +COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: + make build_setup + +$(COMPOBJROOT)/$(BSD_KERNEL_CONFIG)/Makefile : $(SOURCE)/MASTER \ + $(SOURCE)/MASTER.$(arch_config) \ + $(SOURCE)/Makefile.template \ + $(SOURCE)/Makefile.$(arch_config) \ + $(SOURCE)/files \ + $(SOURCE)/files.$(arch_config) \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf + @echo "Running doconf for $(BSD_KERNEL_CONFIG)"; + (doconf_target=$(addsuffix /conf, $(TARGET)); \ + echo $${doconf_target};\ + $(MKDIR) $${doconf_target}; \ + cd $${doconf_target}; \ + rm -f $(notdir $?); \ + cp $? $${doconf_target}; \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(arch_config) -d $(TARGET)/$(BSD_KERNEL_CONFIG) $(BSD_KERNEL_CONFIG); \ + ); + +.ORDER: $(COMPOBJROOT)/$(BSD_KERNEL_CONFIG)/Makefile + +do_setup_conf: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf \ + $(COMPOBJROOT)/$(BSD_KERNEL_CONFIG)/Makefile + +do_all: do_setup_conf + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(BSD_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + (cd $(COMPOBJROOT)/$(BSD_KERNEL_CONFIG); \ + next_source=$(subst conf/,,$(SOURCE)); \ + ${MAKE} MAKEFILES=$(TARGET)/$(BSD_KERNEL_CONFIG)/Makefile \ + SOURCE=$${next_source} \ + TARGET=$(TARGET) \ + build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(BSD_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_all: do_all + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/bsd/conf/Makefile.i386 b/bsd/conf/Makefile.i386 new file mode 100644 index 000000000..2f6232c14 --- /dev/null +++ b/bsd/conf/Makefile.i386 @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for i386 +###################################################################### + +###################################################################### +#END Machine dependent Makefile fragment for i386 +###################################################################### + diff --git a/bsd/conf/Makefile.ppc b/bsd/conf/Makefile.ppc new file mode 100644 index 000000000..7786ccbd6 --- /dev/null +++ b/bsd/conf/Makefile.ppc @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for ppc +###################################################################### + +###################################################################### +#END Machine dependent Makefile fragment for ppc +###################################################################### + diff --git a/bsd/conf/Makefile.template b/bsd/conf/Makefile.template new file mode 100644 index 000000000..adcab81bd --- /dev/null +++ b/bsd/conf/Makefile.template @@ -0,0 +1,105 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# Export IDENT for sub-makefiles +# +export IDENT + +# +# XXX: INCFLAGS +# +INCFLAGS_MAKEFILE= -I$(SOURCE) -I$(SOURCE)include -I$(SOURCE)netat -I$(SOURCE)netat/h -I$(SOURCE)netat/at -I$(SOURCE_DIR) + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +# +# XXX: CFLAGS +# +CFLAGS+= -imacros meta_features.h -DARCH_PRIVATE -DKERNEL -DDRIVER_PRIVATE -D_KERNEL_BUILD -DKERNEL_BUILD -DMACH_KERNEL -DBSD_BUILD -DNCPUS=1 -Wno-four-char-constants -fpascal-strings -D__APPLE__ -I. + +# +# Directories for mig generated files +# +COMP_SUBDIRS = + +# +# Make sure we don't remove this by accident if interrupted at the wrong +# time. +# +.PRECIOUS: Makefile + +VERSION_FILES= \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.major \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.minor \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.variant + +COPYRIGHT_FILES = \ + $(SOURCE_DIR)/$(COMPONENT)/conf/copyright + +# +# Theses macros are filled in by the config program depending on the +# current configuration. The MACHDEP macro is replaced by the +# contents of the machine dependent makefile template and the others +# are replaced by the corresponding symbol definitions for the +# configuration. +# + +%OBJS + +%CFILES + +%MFILES + +%SFILES + +%BFILES + +%ORDERED +%MACHDEP + +# +# OBJSDEPS is the set of files (defined in the machine dependent +# template if necessary) which all objects depend on (such as an +# in-line assembler expansion filter) +# +${OBJS}: ${OBJSDEPS} + + +%LOAD + +LDOBJS = $(OBJS) + +$(COMPONENT).o: $(LDOBJS) + @echo "[ creating $(COMPONENT).o ]" + $(RM) $(RMFLAGS) vers.c + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} + ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c + @echo [ updating $(COMPONENT).o ${BSD_KERNEL_CONFIG} ] + $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT).o ${LDOBJS} vers.o + +do_depend: do_all + ${MD} -u Makedep -f -d `ls *.d`; + +do_all: $(COMPONENT).o + +do_build_all: do_depend + +%RULES + +-include Makedep + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/bsd/conf/compat_hdrs b/bsd/conf/compat_hdrs new file mode 100644 index 000000000..9229d66db --- /dev/null +++ b/bsd/conf/compat_hdrs @@ -0,0 +1,80 @@ +DELETED sys linedesc.h +DELETED sys mach_extra.h +kernserv kern kalloc.h +kernserv kern lock.h +kernserv kern queue.h +kernserv kern sched_prim.h +kernserv sys printf.h +kernserv next loadable_fs.h +mach kern exc.defs +mach kern exc.h +mach kern mach.defs +mach kern mach_host.defs +mach kern mach_host.h +mach kern mach_interface.h +mach kern mach_param.h +mach kern mach_traps.h +mach kern mach_types.defs +mach kern mach_types.h +mach kern std_types.defs +mach kern std_types.h +mach kern syscall_sw.h +mach sys boolean.h +mach sys exception.h +mach sys features.h +mach sys host_info.h +mach sys kern_return.h +mach sys machine.h +mach sys message.h +mach sys mig_errors.h +mach sys msg_type.h +mach sys notify.h +mach sys policy.h +mach sys port.h +mach sys processor_info.h +mach sys task_info.h +mach sys task_special_ports.h +mach sys thread_info.h +mach sys thread_special_ports.h +mach sys thread_status.h COMPATMACHINE +mach sys thread_switch.h +mach sys time_stamp.h DELETED +mach sys time_value.h +mach vm memory_object.h +mach vm vm_inherit.h +mach vm vm_param.h COMPATMACHINE +mach vm vm_prot.h +mach vm vm_statistics.h +mach_debug kern ipc_statistics.h DELETED +mach_debug kern mach_debug.defs DELETED +mach_debug kern mach_debug.h DELETED +mach_debug kern mach_debug_types.defs DELETED +mach_debug kern mach_debug_types.h DELETED +mach_debug kern zone_info.h DELETED +bsd/dev nextdev ldd.h +bsd/dev nextdev npio.h +bsd/dev nextdev scsireg.h +bsd/dev nextdev fd_extern.h +bsd/dev nextdev disk.h +bsd/dev nextdev zsreg.h zsio.h +bsd/machine next cpu.h +bsd/machine next machparam.h +bsd/machine next param.h +bsd/machine next psl.h +bsd/machine next reg.h +bsd/machine next signal.h +bsd/machine next spl.h +bsd/machine next table.h +bsd/machine next user.h +bsd/machine next vmparam.h +bsd/sys kern mach_swapon.h +kernserv next printf.h +kernserv next us_timer.h +mach/machine next boolean.h +mach/machine next exception.h +mach/machine next kern_return.h +mach/machine next syscall_sw.h +mach/machine next thread_status.h COMPATMACHINE +mach/machine next time_stamp.h DELETED +mach/machine next vm_param.h COMPATMACHINE +mach/machine next vm_types.h diff --git a/bsd/conf/compat_hdrs.awk b/bsd/conf/compat_hdrs.awk new file mode 100644 index 000000000..643e7dc1e --- /dev/null +++ b/bsd/conf/compat_hdrs.awk @@ -0,0 +1,22 @@ +BEGIN { + hdr = "#warning Compatibility header file imported, use <%s/%s>\n" \ + "#import\t<%s/%s>\n" +} +/^#/ { # skip comments in data file + continue; +} +/COMPATMACHINE/ { + ofile = sprintf("compat/%s/%s", $2, $3); + printf("#import\t\n", $3) > ofile + printf(hdr, $1, $3, $1, $3) > ofile; + continue; +} +/DELETED/ { + ofile = sprintf("compat/%s/%s", $2, $3); + printf("#error This file has been removed\n") > ofile; + continue; +} +{ + ofile = sprintf("compat/%s/%s", $2, $3); + printf(hdr, $1, $NF, $1, $NF) > ofile; +} diff --git a/bsd/conf/copyright b/bsd/conf/copyright new file mode 100644 index 000000000..8930fb873 --- /dev/null +++ b/bsd/conf/copyright @@ -0,0 +1,6 @@ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ diff --git a/bsd/conf/files b/bsd/conf/files new file mode 100644 index 000000000..fc46bf79f --- /dev/null +++ b/bsd/conf/files @@ -0,0 +1,572 @@ +# +# Mach Operating System +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# N.B. "kern/lock.c" is listed as "optional cpus" so that config will +# create an "cpus.h" file. +# +OPTIONS/hw_ast optional hw_ast +OPTIONS/hw_footprint optional hw_footprint +OPTIONS/kernserv optional kernserv +OPTIONS/mach_assert optional mach_assert +OPTIONS/mach_compat optional mach_compat +OPTIONS/mach_counters optional mach_counters +OPTIONS/mach_debug optional mach_debug +OPTIONS/mach_fastlink optional mach_fastlink +OPTIONS/mach_fixpri optional mach_fixpri +OPTIONS/mach_host optional mach_host +OPTIONS/mach_ipc_compat optional mach_ipc_compat +OPTIONS/mach_ipc_debug optional mach_ipc_debug +OPTIONS/mach_ipc_test optional mach_ipc_test +OPTIONS/mach_kdb optional mach_kdb +OPTIONS/mach_ldebug optional mach_ldebug +OPTIONS/mach_load optional mach_load +OPTIONS/mach_machine_routines optional mach_machine_routines +OPTIONS/rev_endian_fs optional rev_endian_fs +OPTIONS/mach_net optional mach_net +OPTIONS/mach_np optional mach_np +OPTIONS/mach_old_vm_copy optional mach_old_vm_copy +OPTIONS/mach_pagemap optional mach_pagemap +OPTIONS/mach_sctimes optional mach_sctimes +OPTIONS/mach_vm_debug optional mach_vm_debug +OPTIONS/mach_xp optional mach_xp +OPTIONS/mach_xp_fpd optional mach_xp_fpd +OPTIONS/quota optional quota +OPTIONS/simple_clock optional simple_clock +OPTIONS/stat_time optional stat_time +OPTIONS/xpr_debug optional xpr_debug +OPTIONS/kdebug optional kdebug +OPTIONS/nfsclient optional nfsclient +OPTIONS/nfsserver optional nfsserver +OPTIONS/driverkit optional driverkit +OPTIONS/mallocdebug optional mallocdebug +OPTIONS/kernobjc optional kernobjc +OPTIONS/kernremote optional kernremote +OPTIONS/uxpr optional uxpr +OPTIONS/kernel_stack optional kernel_stack +OPTIONS/norma_ipc optional norma_ipc +OPTIONS/norma_device optional norma_device +OPTIONS/norma_vm optional norma_vm +OPTIONS/norma_task optional norma_task +OPTIONS/norma_ether optional norma_ether +OPTIONS/new_vm_code optional new_vm_code +OPTIONS/old_vm_code optional old_vm_code +OPTIONS/compat_43 optional compat_43 +OPTIONS/diagnostic optional diagnostic +OPTIONS/ktrace optional ktrace +OPTIONS/profiling optional profiling + +# +# Network options +# +OPTIONS/multicast optional multicast +OPTIONS/mrouting optional mrouting +OPTIONS/routing optional routing +OPTIONS/eon optional eon +OPTIONS/tpip optional tpip +OPTIONS/ns optional ns +OPTIONS/iso optional iso +OPTIONS/tuba optional tuba +OPTIONS/ccitt optional ccitt +OPTIONS/hdlc optional hdlc +OPTIONS/llc optional llc +OPTIONS/gateway optional gateway +OPTIONS/ipx optional ipx +OPTIONS/tun optional tun +OPTIONS/vlan optional vlan +OPTIONS/bpfilter optional bpfilter +OPTIONS/sl optional sl +OPTIONS/ppp optional ppp +OPTIONS/sppp optional sppp +OPTIONS/ppp_deflate optional ppp_deflate +OPTIONS/disc optional disc +OPTIONS/ether optional ether +OPTIONS/fddi optional fddi + +OPTIONS/ipfilter optional ipfilter +OPTIONS/ipdivert optional ipdivert +OPTIONS/dummynet optional dummynet +OPTIONS/ipfirewall optional ipfirewall +OPTIONS/tcpdebug optional tcpdebug +OPTIONS/bridge optional bridge +OPTIONS/faith optional faith +OPTIONS/gif optional gif +OPTIONS/netat optional netat + +# +# Filesystem options +# +OPTIONS/ffs optional ffs +OPTIONS/hfs optional hfs +OPTIONS/hfs_hardlinks optional hfs +OPTIONS/mfs optional mfs +OPTIONS/fdesc optional fdesc +OPTIONS/fifo optional fifo +OPTIONS/kernfs optional kernfs +OPTIONS/nullfs optional nullfs +OPTIONS/portal optional portal +OPTIONS/procfs optional procfs +OPTIONS/umapfs optional umapfs +OPTIONS/union optional union +OPTIONS/cd9660 optional cd9660 +OPTIONS/volfs optional volfs +OPTIONS/devfs optional devfs +OPTIONS/synthfs optional synthfs + +bsd/libkern/random.c standard +bsd/libkern/scanc.c standard +bsd/libkern/skpc.c standard +bsd/libkern/inet_ntoa.c standard +bsd/libkern/bcd.c standard + +bsd/vfs/vfs_bio.c standard +bsd/vfs/vfs_cache.c standard +bsd/vfs/vfs_cluster.c standard +bsd/vfs/vfs_conf.c standard +bsd/vfs/vfs_init.c standard +bsd/vfs/vfs_lookup.c standard +bsd/vfs/vfs_subr.c standard +bsd/vfs/vfs_syscalls.c standard +bsd/vfs/vfs_support.c standard +bsd/vfs/vfs_utfconv.c standard +bsd/vfs/vfs_vnops.c standard +bsd/vfs/vnode_if.c standard + +bsd/miscfs/deadfs/dead_vnops.c standard +bsd/miscfs/fdesc/fdesc_vfsops.c optional fdesc +bsd/miscfs/fdesc/fdesc_vnops.c optional fdesc +bsd/miscfs/fifofs/fifo_vnops.c optional fifo +bsd/miscfs/kernfs/kernfs_vfsops.c optional kernfs +bsd/miscfs/kernfs/kernfs_vnops.c optional kernfs +bsd/miscfs/nullfs/null_subr.c optional nullfs +bsd/miscfs/nullfs/null_vfsops.c optional nullfs +bsd/miscfs/nullfs/null_vnops.c optional nullfs +bsd/miscfs/portal/portal_vfsops.c optional portal +bsd/miscfs/portal/portal_vnops.c optional portal +bsd/miscfs/procfs/procfs_subr.c optional procfs +bsd/miscfs/procfs/procfs_vnops.c optional procfs +bsd/miscfs/procfs/procfs_vfsops.c optional procfs +bsd/miscfs/procfs/procfs_note.c optional procfs +bsd/miscfs/procfs/procfs_mem.c optional procfs +bsd/miscfs/procfs/procfs_ctl.c optional procfs +bsd/miscfs/procfs/procfs_status.c optional procfs +bsd/miscfs/procfs/procfs_regs.c optional procfs +bsd/miscfs/procfs/procfs_fpregs.c optional procfs +bsd/miscfs/specfs/spec_vnops.c standard +bsd/miscfs/umapfs/umap_subr.c optional umapfs +bsd/miscfs/umapfs/umap_vfsops.c optional umapfs +bsd/miscfs/umapfs/umap_vnops.c optional umapfs +bsd/miscfs/union/union_subr.c optional union +bsd/miscfs/union/union_vfsops.c optional union +bsd/miscfs/union/union_vnops.c optional union + +bsd/miscfs/volfs/volfs_vfsops.c optional volfs +bsd/miscfs/volfs/volfs_vnops.c optional volfs + +bsd/miscfs/devfs/devfs_tree.c optional devfs +bsd/miscfs/devfs/devfs_vnops.c optional devfs +bsd/miscfs/devfs/devfs_vfsops.c optional devfs + +bsd/miscfs/synthfs/synthfs_vfsops.c optional synthfs +bsd/miscfs/synthfs/synthfs_vnops.c optional synthfs +bsd/miscfs/synthfs/synthfs_util.c optional synthfs + +bsd/isofs/cd9660/cd9660_bmap.c optional cd9660 +bsd/isofs/cd9660/cd9660_lookup.c optional cd9660 +bsd/isofs/cd9660/cd9660_node.c optional cd9660 +bsd/isofs/cd9660/cd9660_rrip.c optional cd9660 +bsd/isofs/cd9660/cd9660_util.c optional cd9660 +bsd/isofs/cd9660/cd9660_vfsops.c optional cd9660 +bsd/isofs/cd9660/cd9660_vnops.c optional cd9660 + +bsd/net/if_spppsubr.c optional sppp +bsd/net/slcompress.c optional i4bipr +bsd/net/bpf.c optional bpfilter +bsd/net/bpf_filter.c optional bpfilter +bsd/net/bridge.c optional bridge +bsd/net/bsd_comp.c optional ppp_bsdcomp +bsd/net/if.c standard +bsd/net/if_atmsubr.c optional atm +bsd/net/if_disc.c optional disc +bsd/net/dlil.c standard +bsd/net/ether_if_module.c optional ether +bsd/net/ether_at_pr_module.c optional ether +bsd/net/ether_inet_pr_module.c optional ether +bsd/net/ether_inet6_pr_module.c optional ether inet6 +bsd/net/if_ethersubr.c optional ether +bsd/net/if_fddisubr.c optional fddi +bsd/net/if_loop.c optional loop +bsd/net/if_media.c standard +bsd/net/if_mib.c standard +bsd/net/if_sl.c optional sl +bsd/net/if_spppsubr.c optional sppp +bsd/net/if_tun.c optional tun +bsd/net/if_vlan.c optional vlan +bsd/net/kext_net.c standard +bsd/net/ndrv.c standard +bsd/net/ppp_deflate.c optional ppp_deflate +bsd/net/radix.c standard +bsd/net/raw_cb.c standard +bsd/net/raw_usrreq.c standard +bsd/net/route.c standard +bsd/net/rtsock.c standard +bsd/net/slcompress.c optional ppp +bsd/net/slcompress.c optional sl +bsd/net/zlib.c optional ppp_deflate +bsd/net/netisr.c standard +bsd/net/zlib.c optional ipsec +bsd/net/if_dummy.c optional dummy +bsd/net/if_gif.c optional gif +#bsd/net/radish.c standard +bsd/net/if_faith.c optional faith +bsd/net/net_osdep.c optional ipsec +bsd/net/net_osdep.c optional inet6 + + +bsd/netccitt//ccitt_proto.c optional ccitt +bsd/netccitt/hd_debug.c optional hdlc +bsd/netccitt/hd_input.c optional hdlc +bsd/netccitt/hd_output.c optional hdlc +bsd/netccitt/hd_subr.c optional hdlc +bsd/netccitt/hd_timer.c optional hdlc +bsd/netccitt/if_x25subr.c optional ccitt +bsd/netccitt/llc_input.c optional llc +bsd/netccitt/llc_output.c optional llc +bsd/netccitt/llc_subr.c optional llc +bsd/netccitt/llc_timer.c optional llc +bsd/netccitt/pk_acct.c optional ccitt +bsd/netccitt/pk_debug.c optional ccitt +bsd/netccitt/pk_input.c optional ccitt +bsd/netccitt/pk_llcsubr.c optional llc hdlc +bsd/netccitt/pk_output.c optional ccitt +bsd/netccitt/pk_subr.c optional ccitt +bsd/netccitt/pk_timer.c optional ccitt +bsd/netccitt/pk_usrreq.c optional ccitt + +bsd/netinet/fil.c optional ipfilter inet +bsd/netinet/if_atm.c optional atm +bsd/netinet/if_ether.c optional ether +bsd/netinet/igmp.c standard +bsd/netinet/in.c standard +bsd/netinet/in_bootp.c standard +bsd/netinet/in_pcb.c standard +bsd/netinet/in_proto.c standard +bsd/netinet/in_rmx.c standard +bsd/netinet/ip_auth.c optional ipfilter inet +bsd/netinet/ip_divert.c optional ipdivert +bsd/netinet/ip_dummynet.c optional dummynet +bsd/netinet/ip_fil.c optional ipfilter inet +bsd/netinet/ip_flow.c standard +bsd/netinet/ip_frag.c optional ipfilter inet +bsd/netinet/ip_fw.c optional ipfirewall +bsd/netinet/ip_icmp.c standard +bsd/netinet/ip_input.c standard +bsd/netinet/ip_log.c optional ipfilter inet +bsd/netinet/ip_mroute.c standard +bsd/netinet/ip_nat.c optional ipfilter inet +bsd/netinet/ip_output.c standard +bsd/netinet/ip_proxy.c optional ipfilter inet +bsd/netinet/ip_state.c optional ipfilter inet +bsd/netinet/mlf_ipl.c optional ipfilter inet +bsd/netinet/raw_ip.c standard +bsd/netinet/tcp_debug.c optional tcpdebug +bsd/netinet/tcp_input.c standard +bsd/netinet/tcp_output.c standard +bsd/netinet/tcp_subr.c standard +bsd/netinet/tcp_timer.c standard +bsd/netinet/tcp_usrreq.c standard +bsd/netinet/udp_usrreq.c standard +bsd/netinet/in_gif.c optional gif inet +bsd/netinet/ip_ecn.c optional inet inet6 +bsd/netinet/ip_ecn.c optional inet ipsec +bsd/netinet/ip_encap.c optional inet inet6 +bsd/netinet/ip_encap.c optional inet ipsec + +bsd/netinet6/in6.c optional inet6 +bsd/netinet6/in6_gif.c optional gif inet6 +bsd/netinet6/in6_ifattach.c optional inet6 +bsd/netinet6/in6_cksum.c optional inet6 +bsd/netinet6/in6_pcb.c optional inet6 +bsd/netinet6/in6_src.c optional inet6 +#bsd/netinet6/in6_pcbsubr.c optional inet6 +bsd/netinet6/in6_proto.c optional inet6 +bsd/netinet6/in6_rmx.c optional inet6 +bsd/netinet6/in6_prefix.c optional inet6 +bsd/netinet6/dest6.c optional inet6 +bsd/netinet6/frag6.c optional inet6 +bsd/netinet6/icmp6.c optional inet6 +bsd/netinet6/ip6_input.c optional inet6 +bsd/netinet6/ip6_forward.c optional inet6 +bsd/netinet6/ip6_mroute.c optional inet6 +bsd/netinet6/ip6_output.c optional inet6 +bsd/netinet6/route6.c optional inet6 +bsd/netinet6/mld6.c optional inet6 +bsd/netinet6/nd6.c optional inet6 +bsd/netinet6/nd6_nbr.c optional inet6 +bsd/netinet6/nd6_rtr.c optional inet6 +bsd/netinet6/raw_ip6.c optional inet6 +bsd/netinet6/udp6_usrreq.c optional inet6 +bsd/netinet6/ah_core.c optional ipsec +bsd/netinet6/esp_core.c optional ipsec +bsd/netinet6/ipsec.c optional ipsec +bsd/netinet6/ah_output.c optional ipsec +bsd/netinet6/ah_input.c optional ipsec +bsd/netinet6/esp_output.c optional ipsec +bsd/netinet6/esp_input.c optional ipsec +bsd/netinet6/ipcomp_core.c optional ipsec +bsd/netinet6/ipcomp_input.c optional ipsec +bsd/netinet6/ipcomp_output.c optional ipsec +bsd/netinet6/ip6_fw.c optional ipv6firewall +bsd/netinet6/natpt_dispatch.c optional inet6 natpt +bsd/netinet6/natpt_list.c optional inet6 natpt +bsd/netinet6/natpt_log.c optional inet6 natpt +bsd/netinet6/natpt_rule.c optional inet6 natpt +bsd/netinet6/natpt_trans.c optional inet6 natpt +bsd/netinet6/natpt_tslot.c optional inet6 natpt +bsd/netinet6/natpt_usrreq.c optional inet6 natpt + +bsd/netkey/key.c optional ipsec +bsd/netkey/key_debug.c optional ipsec +bsd/netkey/keysock.c optional ipsec +bsd/netkey/keydb.c optional ipsec + +bsd/kern/md5c.c optional inet6 +bsd/kern/md5c.c optional ipsec +#bsd/crypto/md5.c optional ipsec +#bsd/crypto/hmac_md5.c optional ipsec +bsd/crypto/sha1.c optional ipsec +bsd/crypto/des/des_cbc.c optional ipsec +bsd/crypto/des/des_ecb.c optional ipsec +bsd/crypto/des/des_setkey.c optional ipsec +bsd/crypto/des/des_3cbc.c optional ipsec +bsd/crypto/blowfish/bf_cbc.c optional ipsec +bsd/crypto/blowfish/bf_cbc_m.c optional ipsec +bsd/crypto/blowfish/bf_enc.c optional ipsec +bsd/crypto/blowfish/bf_skey.c optional ipsec +bsd/crypto/cast128/cast128.c optional ipsec +bsd/crypto/cast128/cast128_cbc.c optional ipsec +bsd/crypto/rc5/rc5.c optional ipsec +bsd/crypto/rc5/rc5_cbc.c optional ipsec + + +#bsd/netpm/pm_aTT.c optional pm +#bsd/netpm/pm_ams.c optional pm +#bsd/netpm/pm_dispatch.c optional pm +#bsd/netpm/pm_filter.c optional pm +#bsd/netpm/pm_list.c optional pm +#bsd/netpm/pm_log.c optional pm +#bsd/netpm/pm_nat.c optional pm +#bsd/netpm/pm_pmd.c optional pm +#bsd/netpm/pm_route.c optional pm +#bsd/netpm/pm_usrreq.c optional pm + + + +bsd/netns/idp_usrreq.c optional ns +bsd/netns/ns.c optional ns +bsd/netns/ns_error.c optional ns +bsd/netns/ns_input.c optional ns +bsd/netns/ns_ip.c optional ns +bsd/netns/ns_output.c optional ns +bsd/netns/ns_pcb.c optional ns +bsd/netns/ns_proto.c optional ns +bsd/netns/spp_debug.c optional ns +bsd/netns/spp_usrreq.c optional ns + +bsd/netat/at.c optional netat +bsd/netat/at_proto.c optional netat +bsd/netat/at_pcb.c optional netat +bsd/netat/ddp_usrreq.c optional netat +bsd/netat/atp_alloc.c optional netat +bsd/netat/atp_misc.c optional netat +bsd/netat/atp_open.c optional netat +bsd/netat/atp_read.c optional netat +bsd/netat/atp_write.c optional netat +bsd/netat/asp_proto.c optional netat +bsd/netat/ddp_aep.c optional netat +bsd/netat/ddp.c optional netat +bsd/netat/ddp_brt.c optional netat +bsd/netat/ddp_proto.c optional netat +bsd/netat/ddp_nbp.c optional netat +bsd/netat/ddp_rtmp.c optional netat +bsd/netat/ddp_sip.c optional netat +bsd/netat/ddp_rtmptable.c optional netat +bsd/netat/ddp_r_rtmp.c optional netat +bsd/netat/ddp_r_zip.c optional netat +bsd/netat/ddp_aarp.c optional netat +bsd/netat/ddp_lap.c optional netat +bsd/netat/adsp_CLDeny.c optional netat +bsd/netat/adsp_Read.c optional netat +bsd/netat/adsp_Timer.c optional netat +bsd/netat/adsp_attention.c optional netat +bsd/netat/adsp_CLListen.c optional netat +bsd/netat/adsp_NewCID.c optional netat +bsd/netat/adsp_TimerElem.c optional netat +bsd/netat/adsp_reset.c optional netat +bsd/netat/adsp_Close.c optional netat +bsd/netat/adsp_Open.c optional netat +bsd/netat/adsp_RxAttn.c optional netat +bsd/netat/adsp_Write.c optional netat +bsd/netat/adsp_Control.c optional netat +bsd/netat/adsp_RxData.c optional netat +bsd/netat/adsp.c optional netat +bsd/netat/adsp_Init.c optional netat +bsd/netat/adsp_Options.c optional netat +bsd/netat/adsp_stream.c optional netat +bsd/netat/adsp_InitGlobals.c optional netat +bsd/netat/adsp_Packet.c optional netat +bsd/netat/adsp_Status.c optional netat +bsd/netat/adsp_misc.c optional netat +bsd/netat/sys_glue.c optional netat +bsd/netat/sys_dep.c optional netat +bsd/netat/drv_dep.c optional netat +bsd/netat/aurp_aurpd.c optional netat +bsd/netat/aurp_cfg.c optional netat +bsd/netat/aurp_gdata.c optional netat +bsd/netat/aurp_misc.c optional netat +bsd/netat/aurp_open.c optional netat +bsd/netat/aurp_rd.c optional netat +bsd/netat/aurp_ri.c optional netat +bsd/netat/aurp_rx.c optional netat +bsd/netat/aurp_tickle.c optional netat +bsd/netat/aurp_tx.c optional netat +bsd/netat/aurp_zi.c optional netat + +bsd/nfs/krpc_subr.c optional nfsclient +bsd/nfs/nfs_bio.c optional nfsclient +bsd/nfs/nfs_boot.c optional nfsclient +bsd/nfs/nfs_node.c optional nfsclient +bsd/nfs/nfs_nqlease.c optional nfsclient nfsserver +bsd/nfs/nfs_serv.c optional nfsserver +bsd/nfs/nfs_socket.c optional nfsclient nfsserver +bsd/nfs/nfs_srvcache.c optional nfsserver +bsd/nfs/nfs_subs.c optional nfsclient nfsserver +bsd/nfs/nfs_syscalls.c optional nfsclient nfsserver +bsd/nfs/nfs_vfsops.c optional nfsclient +bsd/nfs/nfs_vnops.c optional nfsclient + +bsd/ufs/ffs/ffs_alloc.c standard +bsd/ufs/ffs/ffs_balloc.c standard +bsd/ufs/ffs/ffs_inode.c standard +bsd/ufs/ffs/ffs_subr.c standard +bsd/ufs/ffs/ffs_tables.c standard +bsd/ufs/ffs/ffs_vfsops.c standard +bsd/ufs/ffs/ffs_vnops.c standard +bsd/ufs/mfs/mfs_vfsops.c optional mfs +bsd/ufs/mfs/mfs_vnops.c optional mfs +bsd/ufs/ufs/ufs_bmap.c standard +bsd/ufs/ufs/ufs_byte_order.c optional rev_endian_fs +bsd/ufs/ufs/ufs_ihash.c standard +bsd/ufs/ufs/ufs_inode.c standard +bsd/ufs/ufs/ufs_lockf.c standard +bsd/ufs/ufs/ufs_lookup.c standard +bsd/ufs/ufs/ufs_quota.c optional quota +bsd/ufs/ufs/ufs_vfsops.c standard +bsd/ufs/ufs/ufs_vnops.c standard + +bsd/hfs/hfs_btreeio.c optional hfs +bsd/hfs/hfs_encodings.c optional hfs +bsd/hfs/hfs_endian.c optional hfs +bsd/hfs/hfs_link.c optional hfs +bsd/hfs/hfs_lockf.c optional hfs +bsd/hfs/hfs_lookup.c optional hfs +bsd/hfs/hfs_readwrite.c optional hfs +bsd/hfs/hfs_search.c optional hfs +bsd/hfs/hfs_vfsops.c optional hfs +bsd/hfs/hfs_vfsutils.c optional hfs +bsd/hfs/hfs_vhash.c optional hfs +bsd/hfs/hfs_vnodeops.c optional hfs +bsd/hfs/MacOSStubs.c optional hfs +bsd/hfs/hfscommon/BTree/BTree.c optional hfs +bsd/hfs/hfscommon/BTree/BTreeAllocate.c optional hfs +bsd/hfs/hfscommon/BTree/BTreeMiscOps.c optional hfs +bsd/hfs/hfscommon/BTree/BTreeNodeOps.c optional hfs +bsd/hfs/hfscommon/BTree/BTreeTreeOps.c optional hfs +bsd/hfs/hfscommon/Catalog/Catalog.c optional hfs +bsd/hfs/hfscommon/Catalog/CatalogIterators.c optional hfs +bsd/hfs/hfscommon/Catalog/CatalogUtilities.c optional hfs +bsd/hfs/hfscommon/Catalog/FileIDsServices.c optional hfs +bsd/hfs/hfscommon/Misc/BTreeWrapper.c optional hfs +bsd/hfs/hfscommon/Misc/FileExtentMapping.c optional hfs +bsd/hfs/hfscommon/Misc/GenericMRUCache.c optional hfs +bsd/hfs/hfscommon/Misc/VolumeAllocation.c optional hfs +bsd/hfs/hfscommon/Misc/VolumeRequests.c optional hfs +bsd/hfs/hfscommon/Unicode/UnicodeWrappers.c optional hfs + +bsd/kern/bsd_init.c standard +bsd/kern/init_sysent.c standard +bsd/kern/kdebug.c standard +bsd/kern/kern_acct.c standard +bsd/kern/kern_clock.c standard +bsd/kern/kern_core.c standard +bsd/kern/kern_symfile.c standard +bsd/kern/kern_descrip.c standard +bsd/kern/kern_event.c standard +bsd/kern/kern_exec.c standard +bsd/kern/kern_exit.c standard +bsd/kern/kern_fork.c standard +bsd/kern/kern_ktrace.c optional ktrace +bsd/kern/kern_lock.c optional cpus +bsd/kern/kern_malloc.c standard +bsd/kern/kern_mman.c standard +bsd/kern/kern_physio.c standard +bsd/kern/kern_proc.c standard +bsd/kern/kern_prot.c standard +bsd/kern/kern_resource.c standard +bsd/kern/kern_shutdown.c standard +bsd/kern/kern_sig.c standard +bsd/kern/kern_subr.c standard +bsd/kern/kern_synch.c standard +bsd/kern/kern_sysctl.c standard +bsd/kern/kern_newsysctl.c standard +bsd/kern/kern_mib.c standard +bsd/kern/sysctl_init.c standard +bsd/kern/kern_time.c standard +bsd/kern/kern_xxx.c standard +bsd/kern/mach_process.c standard +bsd/kern/kern_pcsamples.c standard +bsd/kern/spl.c standard +bsd/kern/subr_log.c standard +bsd/kern/subr_prf.c standard +bsd/kern/subr_prof.c standard +bsd/kern/subr_xxx.c standard +bsd/kern/sys_generic.c standard +bsd/kern/sys_socket.c standard +bsd/kern/sys_domain.c standard +bsd/kern/syscalls.c standard +bsd/kern/tty.c standard +bsd/kern/tty_compat.c optional compat_43 +bsd/kern/tty_conf.c standard +bsd/kern/tty_pty.c optional pty +bsd/kern/tty_subr.c standard +bsd/kern/tty_tty.c standard +bsd/kern/ubc_subr.c standard +bsd/kern/uipc_domain.c standard +bsd/kern/uipc_mbuf.c standard +bsd/kern/uipc_mbuf2.c optional inet6 +bsd/kern/uipc_mbuf2.c optional ipsec +bsd/kern/uipc_proto.c standard +bsd/kern/uipc_socket.c standard +bsd/kern/uipc_socket2.c standard +bsd/kern/uipc_syscalls.c standard +bsd/kern/uipc_usrreq.c standard +bsd/kern/sysv_ipc.c standard +bsd/kern/sysv_shm.c standard +bsd/kern/mach_fat.c standard +bsd/kern/mach_header.c standard +bsd/kern/mach_loader.c standard +bsd/kern/posix_sem.c standard +bsd/kern/posix_shm.c standard + +bsd/vm/vnode_pager.c standard +bsd/vm/vm_unix.c standard +bsd/vm/dp_backing_file.c standard + +bsd/uxkern/ux_exception.c standard + +bsd/conf/param.c standard +./ioconf.c standard diff --git a/bsd/conf/files.i386 b/bsd/conf/files.i386 new file mode 100644 index 000000000..80015fc5f --- /dev/null +++ b/bsd/conf/files.i386 @@ -0,0 +1,20 @@ +OPTIONS/show_space optional show_space +OPTIONS/gdb optional gdb +OPTIONS/iplmeas optional iplmeas + + +bsd/dev/i386/conf.c standard +bsd/dev/i386/cons.c standard +bsd/dev/i386/mem.c standard +bsd/dev/i386/km.c standard +bsd/dev/i386/kern_machdep.c standard +bsd/dev/i386/memmove.c standard +bsd/dev/i386/stubs.c standard +bsd/dev/i386/lock_stubs.c standard +bsd/dev/i386/unix_signal.c standard +bsd/dev/i386/unix_startup.c standard + + +bsd/kern/bsd_stubs.c standard +bsd/netinet/in_cksum.c standard + diff --git a/bsd/conf/files.ppc b/bsd/conf/files.ppc new file mode 100644 index 000000000..9b4564717 --- /dev/null +++ b/bsd/conf/files.ppc @@ -0,0 +1,21 @@ +OPTIONS/show_space optional show_space +OPTIONS/gdb optional gdb +OPTIONS/iplmeas optional iplmeas + +bsd/netinet/in_cksum.c standard + +bsd/dev/ppc/conf.c standard +bsd/dev/ppc/cons.c standard +bsd/dev/ppc/mem.c standard +bsd/dev/ppc/unix_startup.c standard +bsd/dev/ppc/unix_signal.c standard +bsd/dev/ppc/ffs.s standard +bsd/dev/ppc/memmove.c standard +bsd/dev/ppc/machdep.c standard +bsd/dev/ppc/kern_machdep.c standard +bsd/dev/ppc/stubs.c standard +bsd/dev/ppc/km.c standard +bsd/dev/ppc/xsumas.s standard + +bsd/kern/bsd_stubs.c standard + diff --git a/bsd/conf/machine.awk b/bsd/conf/machine.awk new file mode 100644 index 000000000..3b487628d --- /dev/null +++ b/bsd/conf/machine.awk @@ -0,0 +1,18 @@ +BEGIN { + hdr = "#if\tm68k\n" \ + "#import \n" \ + "#endif\tm68k\n" \ + "#if\tm88k\n" \ + "#import \n" \ + "#endif\tm88k\n" + hdr = "#import \n" +} +/\.h$/ { + ofile = sprintf("%s/%s", loc, $1); + printf(hdr, dir, $1, dir, $1) > ofile; + continue; +} + +{ + dir = $1; loc = $2; +} diff --git a/bsd/conf/param.c b/bsd/conf/param.c new file mode 100644 index 000000000..2cb4f2d29 --- /dev/null +++ b/bsd/conf/param.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)param.c 8.3 (Berkeley) 8/20/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct timezone tz = { TIMEZONE, PST }; + +#define NPROC (20 + 16 * MAXUSERS) +int maxproc = NPROC; +int nprocs = 0; /* XXX */ + +#define NTEXT (80 + NPROC / 8) /* actually the object cache */ +#define NVNODE (NPROC + NTEXT + 300) +int desiredvnodes = NVNODE + 350; + +#define MAXFILES (OPEN_MAX + 2048) +int maxfiles = MAXFILES; + +unsigned int ncallout = 16 + 2*NPROC; +int nmbclusters = NMBCLUSTERS; +int nport = NPROC / 2; + +#define MAXSOCKETS NMBCLUSTERS +int maxsockets = MAXSOCKETS; + +#define SHMMAXPGS 1024 /* XXX until we have more kmap space */ + +#ifndef SHMMAX +#define SHMMAX (SHMMAXPGS * 4096) +#endif +#ifndef SHMMIN +#define SHMMIN 1 +#endif +#ifndef SHMMNI +#define SHMMNI 32 /* <= SHMMMNI in shm.h */ +#endif +#ifndef SHMSEG +#define SHMSEG 8 +#endif +#ifndef SHMALL +#define SHMALL (SHMMAXPGS) +#endif + +struct shminfo shminfo = { + SHMMAX, + SHMMIN, + SHMMNI, + SHMSEG, + SHMALL +}; + +/* + * These have to be allocated somewhere; allocating + * them here forces loader errors if this file is omitted + * (if they've been externed everywhere else; hah!). + */ +struct callout *callout; +struct cblock *cfree; +struct cblock *cfreelist = 0; +int cfreecount = 0; +struct buf *buf; +struct domain *domains; diff --git a/bsd/conf/tools/Makefile b/bsd/conf/tools/Makefile new file mode 100644 index 000000000..9df86ce8c --- /dev/null +++ b/bsd/conf/tools/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + doconf \ + newvers + +COMP_SUBDIRS = \ + doconf \ + newvers + +INST_SUBDIRS = \ + + +setup_build_all: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/conf/tools/doconf/Makefile b/bsd/conf/tools/doconf/Makefile new file mode 100644 index 000000000..2bf0b7a10 --- /dev/null +++ b/bsd/conf/tools/doconf/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)doconf + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/conf/tools/doconf/doconf.csh b/bsd/conf/tools/doconf/doconf.csh new file mode 100755 index 000000000..43388c11c --- /dev/null +++ b/bsd/conf/tools/doconf/doconf.csh @@ -0,0 +1,313 @@ +#!/bin/csh -f +set path = ($path .) +###################################################################### +# HISTORY +# 1-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University +# Added "-verbose" switch, so this script produces no output +# in the normal case. +# +# 10-Oct-87 Mike Accetta (mja) at Carnegie-Mellon University +# Flushed cmu_*.h and spin_locks.h +# [ V5.1(XF18) ] +# +# 6-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# Use MASTER.local and MASTER..local for generation of +# configuration files in addition to MASTER and MASTER.. +# +# 25-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Removed use of obsolete wb_*.h files when building the feature +# list; modified to save the previous configuration file and +# display the differences between it and the new file. +# [ V5.1(F8) ] +# +# 25-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# If there is no /etc/machine just print out a message telling +# user to use the -cpu option. I thought this script was supposed +# to work even without a /etc/machine, but it doesn't... and this +# is the easiest way out. +# +# 13-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Added "romp_fpa.h" file to extra features for the RT. +# [ V5.1(F7) ] +# +# 11-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to maintain the appropriate configuration features file +# in the "machine" directory whenever the corresponding +# configuration is generated. This replaces the old mechanism of +# storing this directly in the file since it was +# machine dependent and also precluded building programs for more +# than one configuration from the same set of sources. +# [ V5.1(F6) ] +# +# 21-Feb-87 Mike Accetta (mja) at Carnegie-Mellon University +# Fixed to require wired-in cpu type names for only those +# machines where the kernel name differs from that provided by +# /etc/machine (i.e. IBMRT => ca and SUN => sun3); updated to +# permit configuration descriptions in both machine indepedent +# and dependent master configuration files so that attributes can +# be grouped accordingly. +# [ V5.1(F3) ] +# +# 17-Jan-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to work from any directory at the same level as +# "conf"; generate configuration from both MASTER and +# MASTER. files; added -cpu switch. +# [ V5.1(F1) ] +# +# 18-Aug-86 Mike Accetta (mja) at Carnegie-Mellon University +# Added -make switch and changed meaning of -config; upgraded to +# allow multiple attributes per configuration and to define +# configurations in terms of these attributes within MASTER. +# +# 14-Apr-83 Mike Accetta (mja) at Carnegie-Mellon University +# Added -config switch to only run /etc/config without +# "make depend" and "make". +# +###################################################################### + +set prog=$0 +set prog=$prog:t +set nonomatch +set OBJDIR=../BUILD +if ("`/usr/bin/uname`" == "Rhapsody" ) then +set CONFIG_DIR=/usr/local/bin +else +set CONFIG_DIR=/usr/bin +endif + +unset domake +unset doconfig +unset beverbose +unset MACHINE +unset profile + +while ($#argv >= 1) + if ("$argv[1]" =~ -*) then + switch ("$argv[1]") + case "-c": + case "-config": + set doconfig + breaksw + case "-m": + case "-make": + set domake + breaksw + case "-cpu": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set MACHINE="$argv[2]" + shift + breaksw + case "-d": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set OBJDIR="$argv[2]" + shift + breaksw + case "-verbose": + set beverbose + breaksw + case "-p": + case "-profile": + set profile + breaksw + default: + echo "${prog}: ${argv[1]}: unknown switch" + exit 1 + breaksw + endsw + shift + else + break + endif +end + +if ($#argv == 0) set argv=(GENERIC) + +if (! $?MACHINE) then + if (-d /NextApps) then + set MACHINE=`hostinfo | awk '/MC680x0/ { printf("m68k") } /MC880x0/ { printf("m88k") }'` + endif +endif + +if (! $?MACHINE) then + if (-f /etc/machine) then + set MACHINE="`/etc/machine`" + else + echo "${prog}: no /etc/machine, specify machine type with -cpu" + echo "${prog}: e.g. ${prog} -cpu VAX CONFIGURATION" + exit 1 + endif +endif + +set FEATURES_EXTRA= + +switch ("$MACHINE") + case IBMRT: + set cpu=ca + set ID=RT + set FEATURES_EXTRA="romp_dualcall.h romp_fpa.h" + breaksw + case SUN: + set cpu=sun3 + set ID=SUN3 + breaksw + default: + set cpu=`echo $MACHINE | tr A-Z a-z` + set ID=`echo $MACHINE | tr a-z A-Z` + breaksw +endsw +set FEATURES=../h/features.h +set FEATURES_H=(cs_*.h mach_*.h net_*.h\ + cputypes.h cpus.h vice.h\ + $FEATURES_EXTRA) +set MASTER_DIR=../conf +set MASTER = ${MASTER_DIR}/MASTER +set MASTER_CPU=${MASTER}.${cpu} + +set MASTER_LOCAL = ${MASTER}.local +set MASTER_CPU_LOCAL = ${MASTER_CPU}.local +if (! -f $MASTER_LOCAL) set MASTER_LOCAL = "" +if (! -f $MASTER_CPU_LOCAL) set MASTER_CPU_LOCAL = "" + +if (! -d $OBJDIR) then + echo "[ creating $OBJDIR ]" + mkdir -p $OBJDIR +endif + +foreach SYS ($argv) + set SYSID=${SYS}_${ID} + set SYSCONF=$OBJDIR/config.$SYSID + set BLDDIR=$OBJDIR + if ($?beverbose) then + echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" + endif + echo +$SYS \ + | \ + cat $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL - \ + $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL \ + | \ + sed -n \ + -e "/^+/{" \ + -e "s;[-+];#&;gp" \ + -e 't loop' \ + -e ': loop' \ + -e 'n' \ + -e '/^#/b loop' \ + -e '/^$/b loop' \ + -e 's;^\([^#]*\).*#[ ]*<\(.*\)>[ ]*$;\2#\1;' \ + -e 't not' \ + -e 's;\([^#]*\).*;#\1;' \ + -e 't not' \ + -e ': not' \ + -e 's;[ ]*$;;' \ + -e 's;^\!\(.*\);\1#\!;' \ + -e 'p' \ + -e 't loop' \ + -e 'b loop' \ + -e '}' \ + -e "/^[^#]/d" \ + -e 's; ; ;g' \ + -e "s;^# *\([^ ]*\)[ ]*=[ ]*\[\(.*\)\].*;\1#\2;p" \ + | \ + awk '-F#' '\ +part == 0 && $1 != "" {\ + m[$1]=m[$1] " " $2;\ + next;\ +}\ +part == 0 && $1 == "" {\ + for (i=NF;i>1;i--){\ + s=substr($i,2);\ + c[++na]=substr($i,1,1);\ + a[na]=s;\ + }\ + while (na > 0){\ + s=a[na];\ + d=c[na--];\ + if (m[s] == "") {\ + f[s]=d;\ + } else {\ + nx=split(m[s],x," ");\ + for (j=nx;j>0;j--) {\ + z=x[j];\ + a[++na]=z;\ + c[na]=d;\ + }\ + }\ + }\ + part=1;\ + next;\ +}\ +part != 0 {\ + if ($1 != "") {\ + n=split($1,x,",");\ + ok=0;\ + for (i=1;i<=n;i++) {\ + if (f[x[i]] == "+") {\ + ok=1;\ + }\ + }\ + if (NF > 2 && ok == 0 || NF <= 2 && ok != 0) {\ + print $2; \ + }\ + } else { \ + print $2; \ + }\ +}\ +' >$SYSCONF.new + if (-z $SYSCONF.new) then + echo "${prog}: ${$SYSID}: no such configuration in $MASTER_DIR/MASTER{,.$cpu}" + rm -f $SYSCONF.new + endif + if (! -d $BLDDIR) then + echo "[ creating $BLDDIR ]" + mkdir -p $BLDDIR + endif +# +# These paths are used by config. +# +# "builddir" is the name of the directory where kernel binaries +# are put. It is a single path element, never absolute, and is +# always relative to "objectdir". "builddir" is used by config +# solely to determine where to put files created by "config" (e.g. +# the created Makefile and *.h's.) +# +# "objectdir" is the name of the directory which will hold "builddir". +# It is a path; if relative, it is relative to the current directory +# where config is run. It's sole use is to be prepended to "builddir" +# to indicate where config-created files are to be placed (see above). +# +# "sourcedir" is the location of the sources used to build the kernel. +# It is a path; if relative, it is relative to the directory specified +# by the concatenation of "objectdir" and "builddir" (i.e. where the +# kernel binaries are put). +# + echo 'builddir "."' >> $SYSCONF.new + set OBJRELDIR=`relpath $OBJROOT $OBJDIR` + echo 'objectdir "'$OBJROOT'/'$OBJRELDIR'"' >> $SYSCONF.new + set SRCDIR=`dirname $SOURCE` + echo 'sourcedir "'$SRCROOT'"' >> $SYSCONF.new + if (-f $SYSCONF) then + diff $SYSCONF $SYSCONF.new + rm -f $SYSCONF.old + mv $SYSCONF $SYSCONF.old + endif + rm -f $SYSCONF + mv $SYSCONF.new $SYSCONF + if ($?doconfig) then + echo "[ configuring $SYSID ]" + if ($?profile) then + $CONFIG_DIR/config -c $MASTER_DIR -p $SYSCONF + else + $CONFIG_DIR/config -c $MASTER_DIR $SYSCONF + endif + endif + if ($?domake) then + echo "[ making $SYSID ]" + (cd $BLDDIR; make) + endif +end diff --git a/bsd/conf/tools/newvers/Makefile b/bsd/conf/tools/newvers/Makefile new file mode 100644 index 000000000..73603c753 --- /dev/null +++ b/bsd/conf/tools/newvers/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)newvers + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/conf/tools/newvers/newvers.csh b/bsd/conf/tools/newvers/newvers.csh new file mode 100644 index 000000000..75324d3bc --- /dev/null +++ b/bsd/conf/tools/newvers/newvers.csh @@ -0,0 +1,33 @@ +#!/bin/sh - +# +# Mach Operating System +# Copyright (c) 1990 Carnegie-Mellon University +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# newvers.sh copyright major minor variant +# + +major="$1"; minor="$2"; variant="$3" +v="${major}.${minor}" d=`pwd` h="rcbuilder" t=`date` w=`whoami` +if [ -z "$d" -o -z "$h" -o -z "$t" ]; then + exit 1 +fi +CONFIG=`expr "$d" : '.*/\([^/]*\)$'` +d=`expr "$d" : '.*/\([^/]*/[^/]*/[^/]*\)$'` +( + /bin/echo "int ${COMPONENT}_version_major = ${major};" ; + /bin/echo "int ${COMPONENT}_version_minor = ${minor};" ; + /bin/echo "char ${COMPONENT}_version_variant[] = \"${variant}\";" ; + /bin/echo "char ${COMPONENT}_version[] = \"BSD Component Version ${v}:\\n${t}; $w($h):$d\\n\";" ; + /bin/echo "char ${COMPONENT}_osrelease[] = \"${major}.${minor}\";" ; + /bin/echo "char ${COMPONENT}_ostype[] = \"BSD\";" ; +) > vers.c +if [ -s vers.suffix -o ! -f vers.suffix ]; then + rm -f vers.suffix + echo ".${variant}.${CONFIG}" > vers.suffix +fi +exit 0 diff --git a/bsd/conf/version.major b/bsd/conf/version.major new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/bsd/conf/version.major @@ -0,0 +1 @@ +1 diff --git a/bsd/conf/version.minor b/bsd/conf/version.minor new file mode 100644 index 000000000..573541ac9 --- /dev/null +++ b/bsd/conf/version.minor @@ -0,0 +1 @@ +0 diff --git a/bsd/conf/version.variant b/bsd/conf/version.variant new file mode 100644 index 000000000..e69de29bb diff --git a/bsd/crypto/Makefile b/bsd/crypto/Makefile new file mode 100644 index 000000000..d191e76d1 --- /dev/null +++ b/bsd/crypto/Makefile @@ -0,0 +1,46 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + cast128 \ + des \ + blowfish \ + rc5 + + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + cast128 \ + des \ + blowfish \ + rc5 + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + sha1.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = crypto + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = crypto + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/crypto/blowfish/Makefile b/bsd/crypto/blowfish/Makefile new file mode 100644 index 000000000..73b43dc7f --- /dev/null +++ b/bsd/crypto/blowfish/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + bf_locl.h bf_pi.h blowfish.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = crypto + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = crypto + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/crypto/blowfish/bf_cbc.c b/bsd/crypto/blowfish/bf_cbc.c new file mode 100644 index 000000000..e382fa993 --- /dev/null +++ b/bsd/crypto/blowfish/bf_cbc.c @@ -0,0 +1,148 @@ +/* crypto/bf/bf_cbc.c */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@mincom.oz.au)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include +#include + +void BF_cbc_encrypt(in, out, length, ks, iv, encrypt) +unsigned char *in; +unsigned char *out; +long length; +BF_KEY *ks; +unsigned char *iv; +int encrypt; + { + register BF_LONG tin0,tin1; + register BF_LONG tout0,tout1,xor0,xor1; + register long l=length; + BF_LONG tin[2]; + + if (encrypt) + { + n2l(iv,tout0); + n2l(iv,tout1); + iv-=8; + for (l-=8; l>=0; l-=8) + { + n2l(in,tin0); + n2l(in,tin1); + tin0^=tout0; + tin1^=tout1; + tin[0]=tin0; + tin[1]=tin1; + BF_encrypt(tin,ks,BF_ENCRYPT); + tout0=tin[0]; + tout1=tin[1]; + l2n(tout0,out); + l2n(tout1,out); + } + if (l != -8) + { + n2ln(in,tin0,tin1,l+8); + tin0^=tout0; + tin1^=tout1; + tin[0]=tin0; + tin[1]=tin1; + BF_encrypt(tin,ks,BF_ENCRYPT); + tout0=tin[0]; + tout1=tin[1]; + l2n(tout0,out); + l2n(tout1,out); + } + l2n(tout0,iv); + l2n(tout1,iv); + } + else + { + n2l(iv,xor0); + n2l(iv,xor1); + iv-=8; + for (l-=8; l>=0; l-=8) + { + n2l(in,tin0); + n2l(in,tin1); + tin[0]=tin0; + tin[1]=tin1; + BF_encrypt(tin,ks,BF_DECRYPT); + tout0=tin[0]^xor0; + tout1=tin[1]^xor1; + l2n(tout0,out); + l2n(tout1,out); + xor0=tin0; + xor1=tin1; + } + if (l != -8) + { + n2l(in,tin0); + n2l(in,tin1); + tin[0]=tin0; + tin[1]=tin1; + BF_encrypt(tin,ks,BF_DECRYPT); + tout0=tin[0]^xor0; + tout1=tin[1]^xor1; + l2nn(tout0,tout1,out,l+8); + xor0=tin0; + xor1=tin1; + } + l2n(xor0,iv); + l2n(xor1,iv); + } + tin0=tin1=tout0=tout1=xor0=xor1=0; + tin[0]=tin[1]=0; + } + diff --git a/bsd/crypto/blowfish/bf_cbc_m.c b/bsd/crypto/blowfish/bf_cbc_m.c new file mode 100644 index 000000000..c7a56d904 --- /dev/null +++ b/bsd/crypto/blowfish/bf_cbc_m.c @@ -0,0 +1,339 @@ +/* + * heavily modified to accept mbuf, by Jun-ichiro itojun Itoh + * , 1997. + */ +/* crypto/bf/bf_cbc.c */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@mincom.oz.au)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include +#include +#include +#include + +#include +#include + +#define panic(x) {printf(x); return;} + +void BF_cbc_encrypt_m(m0, skip, length, key, iv, mode) + struct mbuf *m0; + int skip; + int length; + BF_KEY *key; + unsigned char *iv; + int mode; +{ + u_int8_t inbuf[8], outbuf[8]; + struct mbuf *m; + size_t off; + register BF_LONG tin0, tin1; + register BF_LONG tout0, tout1; + BF_LONG tin[2]; + + /* sanity checks */ + if (m0->m_pkthdr.len < skip) { + printf("mbuf length < skip\n"); + return; + } + if (m0->m_pkthdr.len < length) { + printf("mbuf length < encrypt length\n"); + return; + } + if (m0->m_pkthdr.len < skip + length) { + printf("mbuf length < skip + encrypt length\n"); + return; + } + if (length % 8) { + printf("length is not multiple of 8\n"); + return; + } + + m = m0; + off = 0; + + /* skip over the header */ + while (skip) { + if (!m) + panic("mbuf chain?\n"); + if (m->m_len <= skip) { + skip -= m->m_len; + m = m->m_next; + off = 0; + } else { + off = skip; + skip = 0; + } + } + + /* initialize */ + tin0 = tin1 = tout0 = tout1 = 0; + tin[0] = tin[1] = 0; + + if (mode == BF_ENCRYPT) { + u_int8_t *in, *out; + + n2l(iv, tout0); + n2l(iv, tout1); + + while (0 < length) { + if (!m) + panic("mbuf chain?\n"); + + /* + * copy the source into input buffer. + * don't update off or m, since we need to use them * later. + */ + if (off + 8 <= m->m_len) + bcopy(mtod(m, u_int8_t *) + off, &inbuf[0], 8); + else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *in; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + in = &inbuf[0]; + while (in - &inbuf[0] < 8) { + if (!p) + panic("mbuf chain?\n"); + + *in++ = *p++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + } + + in = &inbuf[0]; + out = &outbuf[0]; + n2l(in, tin0); + n2l(in, tin1); + + tin0 ^= tout0; tin[0] = tin0; + tin1 ^= tout1; tin[1] = tin1; + BF_encrypt(tin, key, BF_ENCRYPT); + tout0 = tin[0]; l2n(tout0, out); + tout1 = tin[1]; l2n(tout1, out); + + /* + * copy the output buffer into the result. + * need to update off and m. + */ + if (off + 8 < m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + off += 8; + } else if (off + 8 == m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + do { + m = m->m_next; + } while (m && ! m->m_len); + off = 0; + } else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *out; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + out = &outbuf[0]; + while (out - &outbuf[0] < 8) { + if (!p) + panic("mbuf chain?"); + *p++ = *out++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + + m = n; + off = noff; + } + + length -= 8; + } + } else if (mode == BF_DECRYPT) { + register BF_LONG xor0, xor1; + u_int8_t *in, *out; + + xor0 = xor1 = 0; + n2l(iv, xor0); + n2l(iv, xor1); + + while (0 < length) { + if (!m) + panic("mbuf chain?\n"); + + /* + * copy the source into input buffer. + * don't update off or m, since we need to use them * later. + */ + if (off + 8 <= m->m_len) + bcopy(mtod(m, u_int8_t *) + off, &inbuf[0], 8); + else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *in; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + in = &inbuf[0]; + while (in - &inbuf[0] < 8) { + if (!p) + panic("mbuf chain?\n"); + *in++ = *p++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + } + + in = &inbuf[0]; + out = &outbuf[0]; + n2l(in, tin0); tin[0] = tin0; + n2l(in, tin1); tin[1] = tin1; + BF_encrypt(tin, key, BF_DECRYPT); + tout0 = tin[0] ^ xor0; + tout1 = tin[1] ^ xor1; + l2n(tout0, out); + l2n(tout1, out); + xor0 = tin0; + xor1 = tin1; + + + /* + * copy the output buffer into the result. + * need to update off and m. + */ + if (off + 8 < m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + off += 8; + } else if (off + 8 == m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + do { + m = m->m_next; + } while (m && ! m->m_len); + off = 0; + } else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *out; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + out = &outbuf[0]; + while (out - &outbuf[0] < 8) { + if (!p) + panic("mbuf chain?\n"); + *p++ = *out++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + + m = n; + off = noff; + } + + length -= 8; + } + } +} diff --git a/bsd/crypto/blowfish/bf_enc.c b/bsd/crypto/blowfish/bf_enc.c new file mode 100644 index 000000000..41d302a51 --- /dev/null +++ b/bsd/crypto/blowfish/bf_enc.c @@ -0,0 +1,140 @@ +/* crypto/bf/bf_enc.c */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@mincom.oz.au)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include +#include + +/* Blowfish as implemented from 'Blowfish: Springer-Verlag paper' + * (From LECTURE NOTES IN COIMPUTER SCIENCE 809, FAST SOFTWARE ENCRYPTION, + * CAMBRIDGE SECURITY WORKSHOP, CAMBRIDGE, U.K., DECEMBER 9-11, 1993) + */ + +#if (BF_ROUNDS != 16) && (BF_ROUNDS != 20) +If you set BF_ROUNDS to some value other than 16 or 20, you will have +to modify the code. +#endif + +void BF_encrypt(data,key,encrypt) +BF_LONG *data; +BF_KEY *key; +int encrypt; + { + register BF_LONG l,r,*p,*s; + + p=key->P; + s= &(key->S[0]); + l=data[0]; + r=data[1]; + + if (encrypt) + { + l^=p[0]; + BF_ENC(r,l,s,p[ 1]); + BF_ENC(l,r,s,p[ 2]); + BF_ENC(r,l,s,p[ 3]); + BF_ENC(l,r,s,p[ 4]); + BF_ENC(r,l,s,p[ 5]); + BF_ENC(l,r,s,p[ 6]); + BF_ENC(r,l,s,p[ 7]); + BF_ENC(l,r,s,p[ 8]); + BF_ENC(r,l,s,p[ 9]); + BF_ENC(l,r,s,p[10]); + BF_ENC(r,l,s,p[11]); + BF_ENC(l,r,s,p[12]); + BF_ENC(r,l,s,p[13]); + BF_ENC(l,r,s,p[14]); + BF_ENC(r,l,s,p[15]); + BF_ENC(l,r,s,p[16]); +#if BF_ROUNDS == 20 + BF_ENC(r,l,s,p[17]); + BF_ENC(l,r,s,p[18]); + BF_ENC(r,l,s,p[19]); + BF_ENC(l,r,s,p[20]); +#endif + r^=p[BF_ROUNDS+1]; + } + else + { + l^=p[BF_ROUNDS+1]; +#if BF_ROUNDS == 20 + BF_ENC(r,l,s,p[20]); + BF_ENC(l,r,s,p[19]); + BF_ENC(r,l,s,p[18]); + BF_ENC(l,r,s,p[17]); +#endif + BF_ENC(r,l,s,p[16]); + BF_ENC(l,r,s,p[15]); + BF_ENC(r,l,s,p[14]); + BF_ENC(l,r,s,p[13]); + BF_ENC(r,l,s,p[12]); + BF_ENC(l,r,s,p[11]); + BF_ENC(r,l,s,p[10]); + BF_ENC(l,r,s,p[ 9]); + BF_ENC(r,l,s,p[ 8]); + BF_ENC(l,r,s,p[ 7]); + BF_ENC(r,l,s,p[ 6]); + BF_ENC(l,r,s,p[ 5]); + BF_ENC(r,l,s,p[ 4]); + BF_ENC(l,r,s,p[ 3]); + BF_ENC(r,l,s,p[ 2]); + BF_ENC(l,r,s,p[ 1]); + r^=p[0]; + } + data[1]=l&0xffffffff; + data[0]=r&0xffffffff; + } diff --git a/bsd/crypto/blowfish/bf_locl.h b/bsd/crypto/blowfish/bf_locl.h new file mode 100644 index 000000000..fbf8d9814 --- /dev/null +++ b/bsd/crypto/blowfish/bf_locl.h @@ -0,0 +1,215 @@ +/* crypto/bf/bf_local.h */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@mincom.oz.au)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + * + * Always modify bf_locl.org since bf_locl.h is automatically generated from + * it during SSLeay configuration. + * + * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + */ + +#undef c2l +#define c2l(c,l) (l =((unsigned long)(*((c)++))) , \ + l|=((unsigned long)(*((c)++)))<< 8L, \ + l|=((unsigned long)(*((c)++)))<<16L, \ + l|=((unsigned long)(*((c)++)))<<24L) + +/* NOTE - c is not incremented as per c2l */ +#undef c2ln +#define c2ln(c,l1,l2,n) { \ + c+=n; \ + l1=l2=0; \ + switch (n) { \ + case 8: l2 =((unsigned long)(*(--(c))))<<24L; \ + case 7: l2|=((unsigned long)(*(--(c))))<<16L; \ + case 6: l2|=((unsigned long)(*(--(c))))<< 8L; \ + case 5: l2|=((unsigned long)(*(--(c)))); \ + case 4: l1 =((unsigned long)(*(--(c))))<<24L; \ + case 3: l1|=((unsigned long)(*(--(c))))<<16L; \ + case 2: l1|=((unsigned long)(*(--(c))))<< 8L; \ + case 1: l1|=((unsigned long)(*(--(c)))); \ + } \ + } + +#undef l2c +#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16L)&0xff), \ + *((c)++)=(unsigned char)(((l)>>24L)&0xff)) + +/* NOTE - c is not incremented as per l2c */ +#undef l2cn +#define l2cn(l1,l2,c,n) { \ + c+=n; \ + switch (n) { \ + case 8: *(--(c))=(unsigned char)(((l2)>>24L)&0xff); \ + case 7: *(--(c))=(unsigned char)(((l2)>>16L)&0xff); \ + case 6: *(--(c))=(unsigned char)(((l2)>> 8L)&0xff); \ + case 5: *(--(c))=(unsigned char)(((l2) )&0xff); \ + case 4: *(--(c))=(unsigned char)(((l1)>>24L)&0xff); \ + case 3: *(--(c))=(unsigned char)(((l1)>>16L)&0xff); \ + case 2: *(--(c))=(unsigned char)(((l1)>> 8L)&0xff); \ + case 1: *(--(c))=(unsigned char)(((l1) )&0xff); \ + } \ + } + +/* NOTE - c is not incremented as per n2l */ +#define n2ln(c,l1,l2,n) { \ + c+=n; \ + l1=l2=0; \ + switch (n) { \ + case 8: l2 =((unsigned long)(*(--(c)))) ; \ + case 7: l2|=((unsigned long)(*(--(c))))<< 8; \ + case 6: l2|=((unsigned long)(*(--(c))))<<16; \ + case 5: l2|=((unsigned long)(*(--(c))))<<24; \ + case 4: l1 =((unsigned long)(*(--(c)))) ; \ + case 3: l1|=((unsigned long)(*(--(c))))<< 8; \ + case 2: l1|=((unsigned long)(*(--(c))))<<16; \ + case 1: l1|=((unsigned long)(*(--(c))))<<24; \ + } \ + } + +/* NOTE - c is not incremented as per l2n */ +#define l2nn(l1,l2,c,n) { \ + c+=n; \ + switch (n) { \ + case 8: *(--(c))=(unsigned char)(((l2) )&0xff); \ + case 7: *(--(c))=(unsigned char)(((l2)>> 8)&0xff); \ + case 6: *(--(c))=(unsigned char)(((l2)>>16)&0xff); \ + case 5: *(--(c))=(unsigned char)(((l2)>>24)&0xff); \ + case 4: *(--(c))=(unsigned char)(((l1) )&0xff); \ + case 3: *(--(c))=(unsigned char)(((l1)>> 8)&0xff); \ + case 2: *(--(c))=(unsigned char)(((l1)>>16)&0xff); \ + case 1: *(--(c))=(unsigned char)(((l1)>>24)&0xff); \ + } \ + } + +#undef n2l +#define n2l(c,l) (l =((unsigned long)(*((c)++)))<<24L, \ + l|=((unsigned long)(*((c)++)))<<16L, \ + l|=((unsigned long)(*((c)++)))<< 8L, \ + l|=((unsigned long)(*((c)++)))) + +#undef l2n +#define l2n(l,c) (*((c)++)=(unsigned char)(((l)>>24L)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16L)&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ + *((c)++)=(unsigned char)(((l) )&0xff)) + +/* This is actually a big endian algorithm, the most significate byte + * is used to lookup array 0 */ + +/* use BF_PTR2 for intel boxes, + * BF_PTR for sparc and MIPS/SGI + * use nothing for Alpha and HP. + */ +#if !defined(BF_PTR) && !defined(BF_PTR2) +#undef BF_PTR +#endif + +#define BF_M 0x3fc +#define BF_0 22L +#define BF_1 14L +#define BF_2 6L +#define BF_3 2L /* left shift */ + +#if defined(BF_PTR2) + +/* This is basically a special pentium verson */ +#define BF_ENC(LL,R,S,P) \ + { \ + BF_LONG t,u,v; \ + u=R>>BF_0; \ + v=R>>BF_1; \ + u&=BF_M; \ + v&=BF_M; \ + t= *(BF_LONG *)((unsigned char *)&(S[ 0])+u); \ + u=R>>BF_2; \ + t+= *(BF_LONG *)((unsigned char *)&(S[256])+v); \ + v=R<>BF_0)&BF_M))+ \ + *(BF_LONG *)((unsigned char *)&(S[256])+((R>>BF_1)&BF_M)))^ \ + *(BF_LONG *)((unsigned char *)&(S[512])+((R>>BF_2)&BF_M)))+ \ + *(BF_LONG *)((unsigned char *)&(S[768])+((R<>24L) ] + \ + S[0x0100+((R>>16L)&0xff)])^ \ + S[0x0200+((R>> 8L)&0xff)])+ \ + S[0x0300+((R )&0xff)])&0xffffffff; +#endif diff --git a/bsd/crypto/blowfish/bf_pi.h b/bsd/crypto/blowfish/bf_pi.h new file mode 100644 index 000000000..560dd1ee8 --- /dev/null +++ b/bsd/crypto/blowfish/bf_pi.h @@ -0,0 +1,325 @@ +/* crypto/bf/bf_pi.h */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@mincom.oz.au)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +static BF_KEY bf_init= { + { + 0x243f6a88L, 0x85a308d3L, 0x13198a2eL, 0x03707344L, + 0xa4093822L, 0x299f31d0L, 0x082efa98L, 0xec4e6c89L, + 0x452821e6L, 0x38d01377L, 0xbe5466cfL, 0x34e90c6cL, + 0xc0ac29b7L, 0xc97c50ddL, 0x3f84d5b5L, 0xb5470917L, + 0x9216d5d9L, 0x8979fb1b + },{ + 0xd1310ba6L, 0x98dfb5acL, 0x2ffd72dbL, 0xd01adfb7L, + 0xb8e1afedL, 0x6a267e96L, 0xba7c9045L, 0xf12c7f99L, + 0x24a19947L, 0xb3916cf7L, 0x0801f2e2L, 0x858efc16L, + 0x636920d8L, 0x71574e69L, 0xa458fea3L, 0xf4933d7eL, + 0x0d95748fL, 0x728eb658L, 0x718bcd58L, 0x82154aeeL, + 0x7b54a41dL, 0xc25a59b5L, 0x9c30d539L, 0x2af26013L, + 0xc5d1b023L, 0x286085f0L, 0xca417918L, 0xb8db38efL, + 0x8e79dcb0L, 0x603a180eL, 0x6c9e0e8bL, 0xb01e8a3eL, + 0xd71577c1L, 0xbd314b27L, 0x78af2fdaL, 0x55605c60L, + 0xe65525f3L, 0xaa55ab94L, 0x57489862L, 0x63e81440L, + 0x55ca396aL, 0x2aab10b6L, 0xb4cc5c34L, 0x1141e8ceL, + 0xa15486afL, 0x7c72e993L, 0xb3ee1411L, 0x636fbc2aL, + 0x2ba9c55dL, 0x741831f6L, 0xce5c3e16L, 0x9b87931eL, + 0xafd6ba33L, 0x6c24cf5cL, 0x7a325381L, 0x28958677L, + 0x3b8f4898L, 0x6b4bb9afL, 0xc4bfe81bL, 0x66282193L, + 0x61d809ccL, 0xfb21a991L, 0x487cac60L, 0x5dec8032L, + 0xef845d5dL, 0xe98575b1L, 0xdc262302L, 0xeb651b88L, + 0x23893e81L, 0xd396acc5L, 0x0f6d6ff3L, 0x83f44239L, + 0x2e0b4482L, 0xa4842004L, 0x69c8f04aL, 0x9e1f9b5eL, + 0x21c66842L, 0xf6e96c9aL, 0x670c9c61L, 0xabd388f0L, + 0x6a51a0d2L, 0xd8542f68L, 0x960fa728L, 0xab5133a3L, + 0x6eef0b6cL, 0x137a3be4L, 0xba3bf050L, 0x7efb2a98L, + 0xa1f1651dL, 0x39af0176L, 0x66ca593eL, 0x82430e88L, + 0x8cee8619L, 0x456f9fb4L, 0x7d84a5c3L, 0x3b8b5ebeL, + 0xe06f75d8L, 0x85c12073L, 0x401a449fL, 0x56c16aa6L, + 0x4ed3aa62L, 0x363f7706L, 0x1bfedf72L, 0x429b023dL, + 0x37d0d724L, 0xd00a1248L, 0xdb0fead3L, 0x49f1c09bL, + 0x075372c9L, 0x80991b7bL, 0x25d479d8L, 0xf6e8def7L, + 0xe3fe501aL, 0xb6794c3bL, 0x976ce0bdL, 0x04c006baL, + 0xc1a94fb6L, 0x409f60c4L, 0x5e5c9ec2L, 0x196a2463L, + 0x68fb6fafL, 0x3e6c53b5L, 0x1339b2ebL, 0x3b52ec6fL, + 0x6dfc511fL, 0x9b30952cL, 0xcc814544L, 0xaf5ebd09L, + 0xbee3d004L, 0xde334afdL, 0x660f2807L, 0x192e4bb3L, + 0xc0cba857L, 0x45c8740fL, 0xd20b5f39L, 0xb9d3fbdbL, + 0x5579c0bdL, 0x1a60320aL, 0xd6a100c6L, 0x402c7279L, + 0x679f25feL, 0xfb1fa3ccL, 0x8ea5e9f8L, 0xdb3222f8L, + 0x3c7516dfL, 0xfd616b15L, 0x2f501ec8L, 0xad0552abL, + 0x323db5faL, 0xfd238760L, 0x53317b48L, 0x3e00df82L, + 0x9e5c57bbL, 0xca6f8ca0L, 0x1a87562eL, 0xdf1769dbL, + 0xd542a8f6L, 0x287effc3L, 0xac6732c6L, 0x8c4f5573L, + 0x695b27b0L, 0xbbca58c8L, 0xe1ffa35dL, 0xb8f011a0L, + 0x10fa3d98L, 0xfd2183b8L, 0x4afcb56cL, 0x2dd1d35bL, + 0x9a53e479L, 0xb6f84565L, 0xd28e49bcL, 0x4bfb9790L, + 0xe1ddf2daL, 0xa4cb7e33L, 0x62fb1341L, 0xcee4c6e8L, + 0xef20cadaL, 0x36774c01L, 0xd07e9efeL, 0x2bf11fb4L, + 0x95dbda4dL, 0xae909198L, 0xeaad8e71L, 0x6b93d5a0L, + 0xd08ed1d0L, 0xafc725e0L, 0x8e3c5b2fL, 0x8e7594b7L, + 0x8ff6e2fbL, 0xf2122b64L, 0x8888b812L, 0x900df01cL, + 0x4fad5ea0L, 0x688fc31cL, 0xd1cff191L, 0xb3a8c1adL, + 0x2f2f2218L, 0xbe0e1777L, 0xea752dfeL, 0x8b021fa1L, + 0xe5a0cc0fL, 0xb56f74e8L, 0x18acf3d6L, 0xce89e299L, + 0xb4a84fe0L, 0xfd13e0b7L, 0x7cc43b81L, 0xd2ada8d9L, + 0x165fa266L, 0x80957705L, 0x93cc7314L, 0x211a1477L, + 0xe6ad2065L, 0x77b5fa86L, 0xc75442f5L, 0xfb9d35cfL, + 0xebcdaf0cL, 0x7b3e89a0L, 0xd6411bd3L, 0xae1e7e49L, + 0x00250e2dL, 0x2071b35eL, 0x226800bbL, 0x57b8e0afL, + 0x2464369bL, 0xf009b91eL, 0x5563911dL, 0x59dfa6aaL, + 0x78c14389L, 0xd95a537fL, 0x207d5ba2L, 0x02e5b9c5L, + 0x83260376L, 0x6295cfa9L, 0x11c81968L, 0x4e734a41L, + 0xb3472dcaL, 0x7b14a94aL, 0x1b510052L, 0x9a532915L, + 0xd60f573fL, 0xbc9bc6e4L, 0x2b60a476L, 0x81e67400L, + 0x08ba6fb5L, 0x571be91fL, 0xf296ec6bL, 0x2a0dd915L, + 0xb6636521L, 0xe7b9f9b6L, 0xff34052eL, 0xc5855664L, + 0x53b02d5dL, 0xa99f8fa1L, 0x08ba4799L, 0x6e85076aL, + 0x4b7a70e9L, 0xb5b32944L, 0xdb75092eL, 0xc4192623L, + 0xad6ea6b0L, 0x49a7df7dL, 0x9cee60b8L, 0x8fedb266L, + 0xecaa8c71L, 0x699a17ffL, 0x5664526cL, 0xc2b19ee1L, + 0x193602a5L, 0x75094c29L, 0xa0591340L, 0xe4183a3eL, + 0x3f54989aL, 0x5b429d65L, 0x6b8fe4d6L, 0x99f73fd6L, + 0xa1d29c07L, 0xefe830f5L, 0x4d2d38e6L, 0xf0255dc1L, + 0x4cdd2086L, 0x8470eb26L, 0x6382e9c6L, 0x021ecc5eL, + 0x09686b3fL, 0x3ebaefc9L, 0x3c971814L, 0x6b6a70a1L, + 0x687f3584L, 0x52a0e286L, 0xb79c5305L, 0xaa500737L, + 0x3e07841cL, 0x7fdeae5cL, 0x8e7d44ecL, 0x5716f2b8L, + 0xb03ada37L, 0xf0500c0dL, 0xf01c1f04L, 0x0200b3ffL, + 0xae0cf51aL, 0x3cb574b2L, 0x25837a58L, 0xdc0921bdL, + 0xd19113f9L, 0x7ca92ff6L, 0x94324773L, 0x22f54701L, + 0x3ae5e581L, 0x37c2dadcL, 0xc8b57634L, 0x9af3dda7L, + 0xa9446146L, 0x0fd0030eL, 0xecc8c73eL, 0xa4751e41L, + 0xe238cd99L, 0x3bea0e2fL, 0x3280bba1L, 0x183eb331L, + 0x4e548b38L, 0x4f6db908L, 0x6f420d03L, 0xf60a04bfL, + 0x2cb81290L, 0x24977c79L, 0x5679b072L, 0xbcaf89afL, + 0xde9a771fL, 0xd9930810L, 0xb38bae12L, 0xdccf3f2eL, + 0x5512721fL, 0x2e6b7124L, 0x501adde6L, 0x9f84cd87L, + 0x7a584718L, 0x7408da17L, 0xbc9f9abcL, 0xe94b7d8cL, + 0xec7aec3aL, 0xdb851dfaL, 0x63094366L, 0xc464c3d2L, + 0xef1c1847L, 0x3215d908L, 0xdd433b37L, 0x24c2ba16L, + 0x12a14d43L, 0x2a65c451L, 0x50940002L, 0x133ae4ddL, + 0x71dff89eL, 0x10314e55L, 0x81ac77d6L, 0x5f11199bL, + 0x043556f1L, 0xd7a3c76bL, 0x3c11183bL, 0x5924a509L, + 0xf28fe6edL, 0x97f1fbfaL, 0x9ebabf2cL, 0x1e153c6eL, + 0x86e34570L, 0xeae96fb1L, 0x860e5e0aL, 0x5a3e2ab3L, + 0x771fe71cL, 0x4e3d06faL, 0x2965dcb9L, 0x99e71d0fL, + 0x803e89d6L, 0x5266c825L, 0x2e4cc978L, 0x9c10b36aL, + 0xc6150ebaL, 0x94e2ea78L, 0xa5fc3c53L, 0x1e0a2df4L, + 0xf2f74ea7L, 0x361d2b3dL, 0x1939260fL, 0x19c27960L, + 0x5223a708L, 0xf71312b6L, 0xebadfe6eL, 0xeac31f66L, + 0xe3bc4595L, 0xa67bc883L, 0xb17f37d1L, 0x018cff28L, + 0xc332ddefL, 0xbe6c5aa5L, 0x65582185L, 0x68ab9802L, + 0xeecea50fL, 0xdb2f953bL, 0x2aef7dadL, 0x5b6e2f84L, + 0x1521b628L, 0x29076170L, 0xecdd4775L, 0x619f1510L, + 0x13cca830L, 0xeb61bd96L, 0x0334fe1eL, 0xaa0363cfL, + 0xb5735c90L, 0x4c70a239L, 0xd59e9e0bL, 0xcbaade14L, + 0xeecc86bcL, 0x60622ca7L, 0x9cab5cabL, 0xb2f3846eL, + 0x648b1eafL, 0x19bdf0caL, 0xa02369b9L, 0x655abb50L, + 0x40685a32L, 0x3c2ab4b3L, 0x319ee9d5L, 0xc021b8f7L, + 0x9b540b19L, 0x875fa099L, 0x95f7997eL, 0x623d7da8L, + 0xf837889aL, 0x97e32d77L, 0x11ed935fL, 0x16681281L, + 0x0e358829L, 0xc7e61fd6L, 0x96dedfa1L, 0x7858ba99L, + 0x57f584a5L, 0x1b227263L, 0x9b83c3ffL, 0x1ac24696L, + 0xcdb30aebL, 0x532e3054L, 0x8fd948e4L, 0x6dbc3128L, + 0x58ebf2efL, 0x34c6ffeaL, 0xfe28ed61L, 0xee7c3c73L, + 0x5d4a14d9L, 0xe864b7e3L, 0x42105d14L, 0x203e13e0L, + 0x45eee2b6L, 0xa3aaabeaL, 0xdb6c4f15L, 0xfacb4fd0L, + 0xc742f442L, 0xef6abbb5L, 0x654f3b1dL, 0x41cd2105L, + 0xd81e799eL, 0x86854dc7L, 0xe44b476aL, 0x3d816250L, + 0xcf62a1f2L, 0x5b8d2646L, 0xfc8883a0L, 0xc1c7b6a3L, + 0x7f1524c3L, 0x69cb7492L, 0x47848a0bL, 0x5692b285L, + 0x095bbf00L, 0xad19489dL, 0x1462b174L, 0x23820e00L, + 0x58428d2aL, 0x0c55f5eaL, 0x1dadf43eL, 0x233f7061L, + 0x3372f092L, 0x8d937e41L, 0xd65fecf1L, 0x6c223bdbL, + 0x7cde3759L, 0xcbee7460L, 0x4085f2a7L, 0xce77326eL, + 0xa6078084L, 0x19f8509eL, 0xe8efd855L, 0x61d99735L, + 0xa969a7aaL, 0xc50c06c2L, 0x5a04abfcL, 0x800bcadcL, + 0x9e447a2eL, 0xc3453484L, 0xfdd56705L, 0x0e1e9ec9L, + 0xdb73dbd3L, 0x105588cdL, 0x675fda79L, 0xe3674340L, + 0xc5c43465L, 0x713e38d8L, 0x3d28f89eL, 0xf16dff20L, + 0x153e21e7L, 0x8fb03d4aL, 0xe6e39f2bL, 0xdb83adf7L, + 0xe93d5a68L, 0x948140f7L, 0xf64c261cL, 0x94692934L, + 0x411520f7L, 0x7602d4f7L, 0xbcf46b2eL, 0xd4a20068L, + 0xd4082471L, 0x3320f46aL, 0x43b7d4b7L, 0x500061afL, + 0x1e39f62eL, 0x97244546L, 0x14214f74L, 0xbf8b8840L, + 0x4d95fc1dL, 0x96b591afL, 0x70f4ddd3L, 0x66a02f45L, + 0xbfbc09ecL, 0x03bd9785L, 0x7fac6dd0L, 0x31cb8504L, + 0x96eb27b3L, 0x55fd3941L, 0xda2547e6L, 0xabca0a9aL, + 0x28507825L, 0x530429f4L, 0x0a2c86daL, 0xe9b66dfbL, + 0x68dc1462L, 0xd7486900L, 0x680ec0a4L, 0x27a18deeL, + 0x4f3ffea2L, 0xe887ad8cL, 0xb58ce006L, 0x7af4d6b6L, + 0xaace1e7cL, 0xd3375fecL, 0xce78a399L, 0x406b2a42L, + 0x20fe9e35L, 0xd9f385b9L, 0xee39d7abL, 0x3b124e8bL, + 0x1dc9faf7L, 0x4b6d1856L, 0x26a36631L, 0xeae397b2L, + 0x3a6efa74L, 0xdd5b4332L, 0x6841e7f7L, 0xca7820fbL, + 0xfb0af54eL, 0xd8feb397L, 0x454056acL, 0xba489527L, + 0x55533a3aL, 0x20838d87L, 0xfe6ba9b7L, 0xd096954bL, + 0x55a867bcL, 0xa1159a58L, 0xcca92963L, 0x99e1db33L, + 0xa62a4a56L, 0x3f3125f9L, 0x5ef47e1cL, 0x9029317cL, + 0xfdf8e802L, 0x04272f70L, 0x80bb155cL, 0x05282ce3L, + 0x95c11548L, 0xe4c66d22L, 0x48c1133fL, 0xc70f86dcL, + 0x07f9c9eeL, 0x41041f0fL, 0x404779a4L, 0x5d886e17L, + 0x325f51ebL, 0xd59bc0d1L, 0xf2bcc18fL, 0x41113564L, + 0x257b7834L, 0x602a9c60L, 0xdff8e8a3L, 0x1f636c1bL, + 0x0e12b4c2L, 0x02e1329eL, 0xaf664fd1L, 0xcad18115L, + 0x6b2395e0L, 0x333e92e1L, 0x3b240b62L, 0xeebeb922L, + 0x85b2a20eL, 0xe6ba0d99L, 0xde720c8cL, 0x2da2f728L, + 0xd0127845L, 0x95b794fdL, 0x647d0862L, 0xe7ccf5f0L, + 0x5449a36fL, 0x877d48faL, 0xc39dfd27L, 0xf33e8d1eL, + 0x0a476341L, 0x992eff74L, 0x3a6f6eabL, 0xf4f8fd37L, + 0xa812dc60L, 0xa1ebddf8L, 0x991be14cL, 0xdb6e6b0dL, + 0xc67b5510L, 0x6d672c37L, 0x2765d43bL, 0xdcd0e804L, + 0xf1290dc7L, 0xcc00ffa3L, 0xb5390f92L, 0x690fed0bL, + 0x667b9ffbL, 0xcedb7d9cL, 0xa091cf0bL, 0xd9155ea3L, + 0xbb132f88L, 0x515bad24L, 0x7b9479bfL, 0x763bd6ebL, + 0x37392eb3L, 0xcc115979L, 0x8026e297L, 0xf42e312dL, + 0x6842ada7L, 0xc66a2b3bL, 0x12754cccL, 0x782ef11cL, + 0x6a124237L, 0xb79251e7L, 0x06a1bbe6L, 0x4bfb6350L, + 0x1a6b1018L, 0x11caedfaL, 0x3d25bdd8L, 0xe2e1c3c9L, + 0x44421659L, 0x0a121386L, 0xd90cec6eL, 0xd5abea2aL, + 0x64af674eL, 0xda86a85fL, 0xbebfe988L, 0x64e4c3feL, + 0x9dbc8057L, 0xf0f7c086L, 0x60787bf8L, 0x6003604dL, + 0xd1fd8346L, 0xf6381fb0L, 0x7745ae04L, 0xd736fcccL, + 0x83426b33L, 0xf01eab71L, 0xb0804187L, 0x3c005e5fL, + 0x77a057beL, 0xbde8ae24L, 0x55464299L, 0xbf582e61L, + 0x4e58f48fL, 0xf2ddfda2L, 0xf474ef38L, 0x8789bdc2L, + 0x5366f9c3L, 0xc8b38e74L, 0xb475f255L, 0x46fcd9b9L, + 0x7aeb2661L, 0x8b1ddf84L, 0x846a0e79L, 0x915f95e2L, + 0x466e598eL, 0x20b45770L, 0x8cd55591L, 0xc902de4cL, + 0xb90bace1L, 0xbb8205d0L, 0x11a86248L, 0x7574a99eL, + 0xb77f19b6L, 0xe0a9dc09L, 0x662d09a1L, 0xc4324633L, + 0xe85a1f02L, 0x09f0be8cL, 0x4a99a025L, 0x1d6efe10L, + 0x1ab93d1dL, 0x0ba5a4dfL, 0xa186f20fL, 0x2868f169L, + 0xdcb7da83L, 0x573906feL, 0xa1e2ce9bL, 0x4fcd7f52L, + 0x50115e01L, 0xa70683faL, 0xa002b5c4L, 0x0de6d027L, + 0x9af88c27L, 0x773f8641L, 0xc3604c06L, 0x61a806b5L, + 0xf0177a28L, 0xc0f586e0L, 0x006058aaL, 0x30dc7d62L, + 0x11e69ed7L, 0x2338ea63L, 0x53c2dd94L, 0xc2c21634L, + 0xbbcbee56L, 0x90bcb6deL, 0xebfc7da1L, 0xce591d76L, + 0x6f05e409L, 0x4b7c0188L, 0x39720a3dL, 0x7c927c24L, + 0x86e3725fL, 0x724d9db9L, 0x1ac15bb4L, 0xd39eb8fcL, + 0xed545578L, 0x08fca5b5L, 0xd83d7cd3L, 0x4dad0fc4L, + 0x1e50ef5eL, 0xb161e6f8L, 0xa28514d9L, 0x6c51133cL, + 0x6fd5c7e7L, 0x56e14ec4L, 0x362abfceL, 0xddc6c837L, + 0xd79a3234L, 0x92638212L, 0x670efa8eL, 0x406000e0L, + 0x3a39ce37L, 0xd3faf5cfL, 0xabc27737L, 0x5ac52d1bL, + 0x5cb0679eL, 0x4fa33742L, 0xd3822740L, 0x99bc9bbeL, + 0xd5118e9dL, 0xbf0f7315L, 0xd62d1c7eL, 0xc700c47bL, + 0xb78c1b6bL, 0x21a19045L, 0xb26eb1beL, 0x6a366eb4L, + 0x5748ab2fL, 0xbc946e79L, 0xc6a376d2L, 0x6549c2c8L, + 0x530ff8eeL, 0x468dde7dL, 0xd5730a1dL, 0x4cd04dc6L, + 0x2939bbdbL, 0xa9ba4650L, 0xac9526e8L, 0xbe5ee304L, + 0xa1fad5f0L, 0x6a2d519aL, 0x63ef8ce2L, 0x9a86ee22L, + 0xc089c2b8L, 0x43242ef6L, 0xa51e03aaL, 0x9cf2d0a4L, + 0x83c061baL, 0x9be96a4dL, 0x8fe51550L, 0xba645bd6L, + 0x2826a2f9L, 0xa73a3ae1L, 0x4ba99586L, 0xef5562e9L, + 0xc72fefd3L, 0xf752f7daL, 0x3f046f69L, 0x77fa0a59L, + 0x80e4a915L, 0x87b08601L, 0x9b09e6adL, 0x3b3ee593L, + 0xe990fd5aL, 0x9e34d797L, 0x2cf0b7d9L, 0x022b8b51L, + 0x96d5ac3aL, 0x017da67dL, 0xd1cf3ed6L, 0x7c7d2d28L, + 0x1f9f25cfL, 0xadf2b89bL, 0x5ad6b472L, 0x5a88f54cL, + 0xe029ac71L, 0xe019a5e6L, 0x47b0acfdL, 0xed93fa9bL, + 0xe8d3c48dL, 0x283b57ccL, 0xf8d56629L, 0x79132e28L, + 0x785f0191L, 0xed756055L, 0xf7960e44L, 0xe3d35e8cL, + 0x15056dd4L, 0x88f46dbaL, 0x03a16125L, 0x0564f0bdL, + 0xc3eb9e15L, 0x3c9057a2L, 0x97271aecL, 0xa93a072aL, + 0x1b3f6d9bL, 0x1e6321f5L, 0xf59c66fbL, 0x26dcf319L, + 0x7533d928L, 0xb155fdf5L, 0x03563482L, 0x8aba3cbbL, + 0x28517711L, 0xc20ad9f8L, 0xabcc5167L, 0xccad925fL, + 0x4de81751L, 0x3830dc8eL, 0x379d5862L, 0x9320f991L, + 0xea7a90c2L, 0xfb3e7bceL, 0x5121ce64L, 0x774fbe32L, + 0xa8b6e37eL, 0xc3293d46L, 0x48de5369L, 0x6413e680L, + 0xa2ae0810L, 0xdd6db224L, 0x69852dfdL, 0x09072166L, + 0xb39a460aL, 0x6445c0ddL, 0x586cdecfL, 0x1c20c8aeL, + 0x5bbef7ddL, 0x1b588d40L, 0xccd2017fL, 0x6bb4e3bbL, + 0xdda26a7eL, 0x3a59ff45L, 0x3e350a44L, 0xbcb4cdd5L, + 0x72eacea8L, 0xfa6484bbL, 0x8d6612aeL, 0xbf3c6f47L, + 0xd29be463L, 0x542f5d9eL, 0xaec2771bL, 0xf64e6370L, + 0x740e0d8dL, 0xe75b1357L, 0xf8721671L, 0xaf537d5dL, + 0x4040cb08L, 0x4eb4e2ccL, 0x34d2466aL, 0x0115af84L, + 0xe1b00428L, 0x95983a1dL, 0x06b89fb4L, 0xce6ea048L, + 0x6f3f3b82L, 0x3520ab82L, 0x011a1d4bL, 0x277227f8L, + 0x611560b1L, 0xe7933fdcL, 0xbb3a792bL, 0x344525bdL, + 0xa08839e1L, 0x51ce794bL, 0x2f32c9b7L, 0xa01fbac9L, + 0xe01cc87eL, 0xbcc7d1f6L, 0xcf0111c3L, 0xa1e8aac7L, + 0x1a908749L, 0xd44fbd9aL, 0xd0dadecbL, 0xd50ada38L, + 0x0339c32aL, 0xc6913667L, 0x8df9317cL, 0xe0b12b4fL, + 0xf79e59b7L, 0x43f5bb3aL, 0xf2d519ffL, 0x27d9459cL, + 0xbf97222cL, 0x15e6fc2aL, 0x0f91fc71L, 0x9b941525L, + 0xfae59361L, 0xceb69cebL, 0xc2a86459L, 0x12baa8d1L, + 0xb6c1075eL, 0xe3056a0cL, 0x10d25065L, 0xcb03a442L, + 0xe0ec6e0eL, 0x1698db3bL, 0x4c98a0beL, 0x3278e964L, + 0x9f1f9532L, 0xe0d392dfL, 0xd3a0342bL, 0x8971f21eL, + 0x1b0a7441L, 0x4ba3348cL, 0xc5be7120L, 0xc37632d8L, + 0xdf359f8dL, 0x9b992f2eL, 0xe60b6f47L, 0x0fe3f11dL, + 0xe54cda54L, 0x1edad891L, 0xce6279cfL, 0xcd3e7e6fL, + 0x1618b166L, 0xfd2c1d05L, 0x848fd2c5L, 0xf6fb2299L, + 0xf523f357L, 0xa6327623L, 0x93a83531L, 0x56cccd02L, + 0xacf08162L, 0x5a75ebb5L, 0x6e163697L, 0x88d273ccL, + 0xde966292L, 0x81b949d0L, 0x4c50901bL, 0x71c65614L, + 0xe6c6c7bdL, 0x327a140aL, 0x45e1d006L, 0xc3f27b9aL, + 0xc9aa53fdL, 0x62a80f00L, 0xbb25bfe2L, 0x35bdd2f6L, + 0x71126905L, 0xb2040222L, 0xb6cbcf7cL, 0xcd769c2bL, + 0x53113ec0L, 0x1640e3d3L, 0x38abbd60L, 0x2547adf0L, + 0xba38209cL, 0xf746ce76L, 0x77afa1c5L, 0x20756060L, + 0x85cbfe4eL, 0x8ae88dd8L, 0x7aaaf9b0L, 0x4cf9aa7eL, + 0x1948c25cL, 0x02fb8a8cL, 0x01c36ae4L, 0xd6ebe1f9L, + 0x90d4f869L, 0xa65cdea0L, 0x3f09252dL, 0xc208e69fL, + 0xb74e6132L, 0xce77e25bL, 0x578fdfe3L, 0x3ac372e6L, + } + }; + diff --git a/bsd/crypto/blowfish/bf_skey.c b/bsd/crypto/blowfish/bf_skey.c new file mode 100644 index 000000000..4515b8454 --- /dev/null +++ b/bsd/crypto/blowfish/bf_skey.c @@ -0,0 +1,120 @@ +/* crypto/bf/bf_skey.c */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@mincom.oz.au)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include +#include +#include +#include +#include +#include + +void BF_set_key(key,len,data) +BF_KEY *key; +int len; +unsigned char *data; + { + int i; + BF_LONG *p,ri,in[2]; + unsigned char *d,*end; + + + memcpy((char *)key,(char *)&bf_init,sizeof(BF_KEY)); + p=key->P; + + if (len > ((BF_ROUNDS+2)*4)) len=(BF_ROUNDS+2)*4; + + d=data; + end= &(data[len]); + for (i=0; i<(BF_ROUNDS+2); i++) + { + ri= *(d++); + if (d >= end) d=data; + + ri<<=8; + ri|= *(d++); + if (d >= end) d=data; + + ri<<=8; + ri|= *(d++); + if (d >= end) d=data; + + ri<<=8; + ri|= *(d++); + if (d >= end) d=data; + + p[i]^=ri; + } + + in[0]=0L; + in[1]=0L; + for (i=0; i<(BF_ROUNDS+2); i+=2) + { + BF_encrypt(in,key,BF_ENCRYPT); + p[i ]=in[0]; + p[i+1]=in[1]; + } + + p=key->S; + for (i=0; i<4*256; i+=2) + { + BF_encrypt(in,key,BF_ENCRYPT); + p[i ]=in[0]; + p[i+1]=in[1]; + } + } + diff --git a/bsd/crypto/blowfish/blowfish.h b/bsd/crypto/blowfish/blowfish.h new file mode 100644 index 000000000..0e5b989e4 --- /dev/null +++ b/bsd/crypto/blowfish/blowfish.h @@ -0,0 +1,122 @@ +/* crypto/bf/blowfish.h */ +/* Copyright (C) 1995-1997 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@mincom.oz.au). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@mincom.oz.au)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@mincom.oz.au)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#ifndef HEADER_BLOWFISH_H +#define HEADER_BLOWFISH_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define BF_ENCRYPT 1 +#define BF_DECRYPT 0 + +/* If you make this 'unsigned int' the pointer variants will work on + * the Alpha, otherwise they will not. Strangly using the '8 byte' + * BF_LONG and the default 'non-pointer' inner loop is the best configuration + * for the Alpha */ +#define BF_LONG unsigned long + +#define BF_ROUNDS 16 +#define BF_BLOCK 8 + +typedef struct bf_key_st + { + BF_LONG P[BF_ROUNDS+2]; + BF_LONG S[4*256]; + } BF_KEY; + +#ifndef NOPROTO + +void BF_set_key(BF_KEY *key, int len, unsigned char *data); +void BF_ecb_encrypt(unsigned char *in,unsigned char *out,BF_KEY *key, + int encrypt); +void BF_encrypt(BF_LONG *data,BF_KEY *key,int encrypt); +void BF_cbc_encrypt(unsigned char *in, unsigned char *out, long length, + BF_KEY *ks, unsigned char *iv, int encrypt); +void BF_cfb64_encrypt(unsigned char *in, unsigned char *out, long length, + BF_KEY *schedule, unsigned char *ivec, int *num, int encrypt); +void BF_ofb64_encrypt(unsigned char *in, unsigned char *out, long length, + BF_KEY *schedule, unsigned char *ivec, int *num); +char *BF_options(void); + +/* added by itojun */ +struct mbuf; +void BF_cbc_encrypt_m(struct mbuf *, int, int, BF_KEY *, + unsigned char *, int); + +#else + +void BF_set_key(); +void BF_ecb_encrypt(); +void BF_encrypt(); +void BF_cbc_encrypt(); +void BF_cfb64_encrypt(); +void BF_ofb64_encrypt(); +char *BF_options(); + +/* added by itojun */ +void BF_cbc_encrypt_m(); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/bsd/crypto/cast128/Makefile b/bsd/crypto/cast128/Makefile new file mode 100644 index 000000000..98869f4e0 --- /dev/null +++ b/bsd/crypto/cast128/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + cast128.h cast128_subkey.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = crypto + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = crypto + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/crypto/cast128/cast128.c b/bsd/crypto/cast128/cast128.c new file mode 100644 index 000000000..0d9661f47 --- /dev/null +++ b/bsd/crypto/cast128/cast128.c @@ -0,0 +1,871 @@ +/* + * heavily modified by Tomomi Suzuki + */ +/* + * The CAST-128 Encryption Algorithm (RFC 2144) + * + * original implementation + * 1997/08/21 + */ +/* + * Copyright (C) 1997 Hideo "Sir MANMOS" Morishita + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY Hideo "Sir MaNMOS" Morishita ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Hideo "Sir MaNMOS" Morishita BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include + + +static u_int32_t S1[]; +static u_int32_t S2[]; +static u_int32_t S3[]; +static u_int32_t S4[]; +static u_int32_t S5[]; +static u_int32_t S6[]; +static u_int32_t S7[]; +static u_int32_t S8[]; + + +/* + * Step 1 + */ +void set_cast128_subkey(u_int32_t *subkey, u_int8_t *key) +{ + u_int32_t buf[8]; /* for x0x1x2x3, x4x5x6x7 ..., z0z1z2z3, ... */ + + buf[0] = (key[ 0] << 24) | (key[ 1] << 16) | (key[ 2] << 8) + | key[ 3]; + buf[1] = (key[ 4] << 24) | (key[ 5] << 16) | (key[ 6] << 8) + | key[ 7]; + buf[2] = (key[ 8] << 24) | (key[ 9] << 16) | (key[10] << 8) + | key[11]; + buf[3] = (key[12] << 24) | (key[13] << 16) | (key[14] << 8) + | key[15]; + + /* masking subkey */ + z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]; + z4z5z6z7 = x8x9xAxB ^ S5[z0] ^ S6[z2] ^ S7[z1] ^ S8[z3] ^ S8[xA]; + z8z9zAzB = xCxDxExF ^ S5[z7] ^ S6[z6] ^ S7[z5] ^ S8[z4] ^ S5[x9]; + zCzDzEzF = x4x5x6x7 ^ S5[zA] ^ S6[z9] ^ S7[zB] ^ S8[z8] ^ S6[xB]; + subkey[0] = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]; + subkey[1] = S5[zA] ^ S6[zB] ^ S7[z5] ^ S8[z4] ^ S6[z6]; + subkey[2] = S5[zC] ^ S6[zD] ^ S7[z3] ^ S8[z2] ^ S7[z9]; + subkey[3] = S5[zE] ^ S6[zF] ^ S7[z1] ^ S8[z0] ^ S8[zC]; + + x0x1x2x3 = z8z9zAzB ^ S5[z5] ^ S6[z7] ^ S7[z4] ^ S8[z6] ^ S7[z0]; + x4x5x6x7 = z0z1z2z3 ^ S5[x0] ^ S6[x2] ^ S7[x1] ^ S8[x3] ^ S8[z2]; + x8x9xAxB = z4z5z6z7 ^ S5[x7] ^ S6[x6] ^ S7[x5] ^ S8[x4] ^ S5[z1]; + xCxDxExF = zCzDzEzF ^ S5[xA] ^ S6[x9] ^ S7[xB] ^ S8[x8] ^ S6[z3]; + subkey[4] = S5[x3] ^ S6[x2] ^ S7[xC] ^ S8[xD] ^ S5[x8]; + subkey[5] = S5[x1] ^ S6[x0] ^ S7[xE] ^ S8[xF] ^ S6[xD]; + subkey[6] = S5[x7] ^ S6[x6] ^ S7[x8] ^ S8[x9] ^ S7[x3]; + subkey[7] = S5[x5] ^ S6[x4] ^ S7[xA] ^ S8[xB] ^ S8[x7]; + + z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]; + z4z5z6z7 = x8x9xAxB ^ S5[z0] ^ S6[z2] ^ S7[z1] ^ S8[z3] ^ S8[xA]; + z8z9zAzB = xCxDxExF ^ S5[z7] ^ S6[z6] ^ S7[z5] ^ S8[z4] ^ S5[x9]; + zCzDzEzF = x4x5x6x7 ^ S5[zA] ^ S6[z9] ^ S7[zB] ^ S8[z8] ^ S6[xB]; + subkey[8] = S5[z3] ^ S6[z2] ^ S7[zC] ^ S8[zD] ^ S5[z9]; + subkey[9] = S5[z1] ^ S6[z0] ^ S7[zE] ^ S8[zF] ^ S6[zC]; + subkey[10] = S5[z7] ^ S6[z6] ^ S7[z8] ^ S8[z9] ^ S7[z2]; + subkey[11] = S5[z5] ^ S6[z4] ^ S7[zA] ^ S8[zB] ^ S8[z6]; + + x0x1x2x3 = z8z9zAzB ^ S5[z5] ^ S6[z7] ^ S7[z4] ^ S8[z6] ^ S7[z0]; + x4x5x6x7 = z0z1z2z3 ^ S5[x0] ^ S6[x2] ^ S7[x1] ^ S8[x3] ^ S8[z2]; + x8x9xAxB = z4z5z6z7 ^ S5[x7] ^ S6[x6] ^ S7[x5] ^ S8[x4] ^ S5[z1]; + xCxDxExF = zCzDzEzF ^ S5[xA] ^ S6[x9] ^ S7[xB] ^ S8[x8] ^ S6[z3]; + subkey[12] = S5[x8] ^ S6[x9] ^ S7[x7] ^ S8[x6] ^ S5[x3]; + subkey[13] = S5[xA] ^ S6[xB] ^ S7[x5] ^ S8[x4] ^ S6[x7]; + subkey[14] = S5[xC] ^ S6[xD] ^ S7[x3] ^ S8[x2] ^ S7[x8]; + subkey[15] = S5[xE] ^ S6[xF] ^ S7[x1] ^ S8[x0] ^ S8[xD]; + + /* rotate subkey (least significast 5 bits) */ + z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]; + z4z5z6z7 = x8x9xAxB ^ S5[z0] ^ S6[z2] ^ S7[z1] ^ S8[z3] ^ S8[xA]; + z8z9zAzB = xCxDxExF ^ S5[z7] ^ S6[z6] ^ S7[z5] ^ S8[z4] ^ S5[x9]; + zCzDzEzF = x4x5x6x7 ^ S5[zA] ^ S6[z9] ^ S7[zB] ^ S8[z8] ^ S6[xB]; + subkey[16] = (S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]) & 0x1f; + subkey[17] = (S5[zA] ^ S6[zB] ^ S7[z5] ^ S8[z4] ^ S6[z6]) & 0x1f; + subkey[18] = (S5[zC] ^ S6[zD] ^ S7[z3] ^ S8[z2] ^ S7[z9]) & 0x1f; + subkey[19] = (S5[zE] ^ S6[zF] ^ S7[z1] ^ S8[z0] ^ S8[zC]) & 0x1f; + + x0x1x2x3 = z8z9zAzB ^ S5[z5] ^ S6[z7] ^ S7[z4] ^ S8[z6] ^ S7[z0]; + x4x5x6x7 = z0z1z2z3 ^ S5[x0] ^ S6[x2] ^ S7[x1] ^ S8[x3] ^ S8[z2]; + x8x9xAxB = z4z5z6z7 ^ S5[x7] ^ S6[x6] ^ S7[x5] ^ S8[x4] ^ S5[z1]; + xCxDxExF = zCzDzEzF ^ S5[xA] ^ S6[x9] ^ S7[xB] ^ S8[x8] ^ S6[z3]; + subkey[20] = (S5[x3] ^ S6[x2] ^ S7[xC] ^ S8[xD] ^ S5[x8]) & 0x1f; + subkey[21] = (S5[x1] ^ S6[x0] ^ S7[xE] ^ S8[xF] ^ S6[xD]) & 0x1f; + subkey[22] = (S5[x7] ^ S6[x6] ^ S7[x8] ^ S8[x9] ^ S7[x3]) & 0x1f; + subkey[23] = (S5[x5] ^ S6[x4] ^ S7[xA] ^ S8[xB] ^ S8[x7]) & 0x1f; + + z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]; + z4z5z6z7 = x8x9xAxB ^ S5[z0] ^ S6[z2] ^ S7[z1] ^ S8[z3] ^ S8[xA]; + z8z9zAzB = xCxDxExF ^ S5[z7] ^ S6[z6] ^ S7[z5] ^ S8[z4] ^ S5[x9]; + zCzDzEzF = x4x5x6x7 ^ S5[zA] ^ S6[z9] ^ S7[zB] ^ S8[z8] ^ S6[xB]; + subkey[24] = (S5[z3] ^ S6[z2] ^ S7[zC] ^ S8[zD] ^ S5[z9]) & 0x1f; + subkey[25] = (S5[z1] ^ S6[z0] ^ S7[zE] ^ S8[zF] ^ S6[zC]) & 0x1f; + subkey[26] = (S5[z7] ^ S6[z6] ^ S7[z8] ^ S8[z9] ^ S7[z2]) & 0x1f; + subkey[27] = (S5[z5] ^ S6[z4] ^ S7[zA] ^ S8[zB] ^ S8[z6]) & 0x1f; + + x0x1x2x3 = z8z9zAzB ^ S5[z5] ^ S6[z7] ^ S7[z4] ^ S8[z6] ^ S7[z0]; + x4x5x6x7 = z0z1z2z3 ^ S5[x0] ^ S6[x2] ^ S7[x1] ^ S8[x3] ^ S8[z2]; + x8x9xAxB = z4z5z6z7 ^ S5[x7] ^ S6[x6] ^ S7[x5] ^ S8[x4] ^ S5[z1]; + xCxDxExF = zCzDzEzF ^ S5[xA] ^ S6[x9] ^ S7[xB] ^ S8[x8] ^ S6[z3]; + subkey[28] = (S5[x8] ^ S6[x9] ^ S7[x7] ^ S8[x6] ^ S5[x3]) & 0x1f; + subkey[29] = (S5[xA] ^ S6[xB] ^ S7[x5] ^ S8[x4] ^ S6[x7]) & 0x1f; + subkey[30] = (S5[xC] ^ S6[xD] ^ S7[x3] ^ S8[x2] ^ S7[x8]) & 0x1f; + subkey[31] = (S5[xE] ^ S6[xF] ^ S7[x1] ^ S8[x0] ^ S8[xD]) & 0x1f; +} + + +#define CAST128_TYPE1(rc, d, km, kr) { \ + u_int32_t x = circular_leftshift(((km)+(d)), (kr)); \ + (rc) = ((S1[byte0(x)] ^ S2[byte1(x)]) - S3[byte2(x)]) + S4[byte3(x)]; \ +} + +#define CAST128_TYPE2(rc, d, km, kr) { \ + u_int32_t x = circular_leftshift(((km)^(d)), (kr)); \ + (rc) = ((S1[byte0(x)] - S2[byte1(x)]) + S3[byte2(x)]) ^ S4[byte3(x)]; \ +} + +#define CAST128_TYPE3(rc, d, km, kr) { \ + u_int32_t x = circular_leftshift(((km)-(d)), (kr)); \ + (rc) = ((S1[byte0(x)] + S2[byte1(x)]) ^ S3[byte2(x)]) - S4[byte3(x)]; \ +} + + +void cast128_encrypt_round16(u_int8_t *c, const u_int8_t *m, + u_int32_t *subkey) +{ + u_int32_t l; /* left 32bit */ + u_int32_t r; /* right 32bit */ + u_int32_t br; /* backup right 32bit */ + u_int32_t rc; /* result code of CAST128_TYPE?() */ + u_int32_t *km, *kr; + + /* Step 2 */ + l = (m[0] << 24) | (m[1] << 16) | (m[2] << 8) | m[3]; + r = (m[4] << 24) | (m[5] << 16) | (m[6] << 8) | m[7]; + + /* Step 3 */ + km = subkey; + kr = subkey + 16; + + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; + + /* Step 4 */ + c[0] = (r >> 24) & 0xff; + c[1] = (r >> 16) & 0xff; + c[2] = (r >> 8) & 0xff; + c[3] = r & 0xff; + c[4] = (l >> 24) & 0xff; + c[5] = (l >> 16) & 0xff; + c[6] = (l >> 8) & 0xff; + c[7] = l & 0xff; +} + + +void cast128_decrypt_round16(u_int8_t *m, const u_int8_t *c, + u_int32_t *subkey) +{ + u_int32_t l; /* left 32bit */ + u_int32_t r; /* right 32bit */ + u_int32_t bl; /* backup left 32bit */ + u_int32_t rc; /* result code of CAST128_TYPE?() */ + u_int32_t *km, *kr; + + /* Step 2 */ + r = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3]; + l = (c[4] << 24) | (c[5] << 16) | (c[6] << 8) | c[7]; + + /* Step 3 */ + km = subkey + 15; + kr = subkey + 31; + + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; + + /* Step 4 */ + m[0] = (l >> 24) & 0xff; + m[1] = (l >> 16) & 0xff; + m[2] = (l >> 8) & 0xff; + m[3] = l & 0xff; + m[4] = (r >> 24) & 0xff; + m[5] = (r >> 16) & 0xff; + m[6] = (r >> 8) & 0xff; + m[7] = r & 0xff; +} + + +void cast128_encrypt_round12(u_int8_t *c, const u_int8_t *m, + u_int32_t *subkey) +{ + u_int32_t l; /* left 32bit */ + u_int32_t r; /* right 32bit */ + u_int32_t br; /* backup right 32bit */ + u_int32_t rc; /* result code of CAST128_TYPE?() */ + u_int32_t *km, *kr; + + /* Step 2 */ + l = (m[0] << 24) | (m[1] << 16) | (m[2] << 8) | m[3]; + r = (m[4] << 24) | (m[5] << 16) | (m[6] << 8) | m[7]; + + /* Step 3 */ + km = subkey; + kr = subkey + 16; + + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE1(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE2(rc, r, *km, *kr); r = l ^ rc; l = br; km++; kr++; + br = r; CAST128_TYPE3(rc, r, *km, *kr); r = l ^ rc; l = br; + + /* Step 4 */ + c[0] = (r >> 24) & 0xff; + c[1] = (r >> 16) & 0xff; + c[2] = (r >> 8) & 0xff; + c[3] = r & 0xff; + c[4] = (l >> 24) & 0xff; + c[5] = (l >> 16) & 0xff; + c[6] = (l >> 8) & 0xff; + c[7] = l & 0xff; +} + + +void cast128_decrypt_round12(u_int8_t *m, const u_int8_t *c, + u_int32_t *subkey) +{ + u_int32_t l; /* left 32bit */ + u_int32_t r; /* right 32bit */ + u_int32_t bl; /* backup left 32bit */ + u_int32_t rc; /* result code of CAST128_TYPE?() */ + u_int32_t *km, *kr; + + /* Step 2 */ + r = (c[0] << 24) | (c[1] << 16) | (c[2] << 8) | c[3]; + l = (c[4] << 24) | (c[5] << 16) | (c[6] << 8) | c[7]; + + /* Step 3 */ + km = subkey + 11; + kr = subkey + 27; + + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE3(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE2(rc, l, *km, *kr); l = r ^ rc; r = bl; km--; kr--; + bl = l; CAST128_TYPE1(rc, l, *km, *kr); l = r ^ rc; r = bl; + + /* Step 4 */ + m[0] = (l >> 24) & 0xff; + m[1] = (l >> 16) & 0xff; + m[2] = (l >> 8) & 0xff; + m[3] = l & 0xff; + m[4] = (r >> 24) & 0xff; + m[5] = (r >> 16) & 0xff; + m[6] = (r >> 8) & 0xff; + m[7] = r & 0xff; +} + + +static u_int32_t S1[] = { + 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, + 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, + 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, + 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, + 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, + 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, + 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, + 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, + 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, + 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, + 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, + 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, + 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, + 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, + 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, + 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, + 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, + 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, + 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, + 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, + 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, + 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, + 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, + 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, + 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, + 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, + 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, + 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, + 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, + 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, + 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, + 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, + 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, + 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, + 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, + 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, + 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, + 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, + 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, + 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, + 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, + 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, + 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, + 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, + 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, + 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, + 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, + 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, + 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, + 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, + 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, + 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, + 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, + 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, + 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, + 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, + 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, + 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, + 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, + 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, + 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, + 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, + 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, + 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, +}; + +static u_int32_t S2[] = { + 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, + 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, + 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, + 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, + 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, + 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, + 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, + 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, + 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, + 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, + 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, + 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, + 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, + 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, + 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, + 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, + 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, + 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, + 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, + 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, + 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, + 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, + 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, + 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, + 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, + 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, + 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, + 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, + 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, + 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, + 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, + 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, + 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, + 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, + 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, + 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, + 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, + 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, + 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, + 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, + 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, + 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, + 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, + 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, + 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, + 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, + 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, + 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, + 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, + 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, + 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, + 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, + 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, + 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, + 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, + 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, + 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, + 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, + 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, + 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, + 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, + 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, + 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, + 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, +}; + +static u_int32_t S3[] = { + 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, + 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, + 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, + 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, + 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, + 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, + 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, + 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, + 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, + 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, + 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, + 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, + 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, + 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, + 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, + 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, + 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, + 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, + 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, + 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, + 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, + 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, + 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, + 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, + 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, + 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, + 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, + 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, + 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, + 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, + 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, + 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, + 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, + 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, + 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, + 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, + 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, + 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, + 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, + 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, + 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, + 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, + 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, + 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, + 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, + 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, + 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, + 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, + 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, + 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, + 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, + 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, + 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, + 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, + 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, + 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, + 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, + 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, + 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, + 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, + 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, + 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, + 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, + 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, +}; + +static u_int32_t S4[] = { + 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, + 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, + 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, + 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, + 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, + 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, + 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, + 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, + 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, + 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, + 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, + 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, + 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, + 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, + 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, + 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, + 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, + 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, + 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, + 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, + 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, + 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, + 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, + 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, + 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, + 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, + 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, + 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, + 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, + 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, + 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, + 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, + 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, + 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, + 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, + 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, + 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, + 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, + 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, + 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, + 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, + 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, + 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, + 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, + 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, + 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, + 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, + 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, + 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, + 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, + 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, + 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, + 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, + 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, + 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, + 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, + 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, + 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, + 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, + 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, + 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, + 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, + 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, + 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, +}; + +static u_int32_t S5[] = { + 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, + 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, + 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, + 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, + 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, + 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, + 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, + 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, + 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, + 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, + 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, + 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, + 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, + 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, + 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, + 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, + 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, + 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, + 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, + 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, + 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, + 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, + 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, + 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, + 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, + 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, + 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, + 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, + 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, + 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, + 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, + 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, + 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, + 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, + 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, + 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, + 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, + 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, + 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, + 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, + 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, + 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, + 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, + 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, + 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, + 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, + 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, + 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, + 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, + 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, + 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, + 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, + 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, + 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, + 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, + 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, + 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, + 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, + 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, + 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, + 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, + 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, + 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, + 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, +}; + +static u_int32_t S6[] = { + 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, + 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, + 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, + 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, + 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, + 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, + 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, + 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, + 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, + 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, + 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, + 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, + 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, + 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, + 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, + 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, + 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, + 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, + 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, + 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, + 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, + 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, + 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, + 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, + 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, + 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, + 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, + 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, + 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, + 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, + 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, + 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, + 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, + 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, + 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, + 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, + 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, + 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, + 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, + 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, + 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, + 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, + 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, + 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, + 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, + 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, + 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, + 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, + 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, + 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, + 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, + 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, + 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, + 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, + 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, + 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, + 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, + 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, + 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, + 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, + 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, + 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, + 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, + 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, +}; + +static u_int32_t S7[] = { + 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, + 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, + 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, + 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, + 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, + 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, + 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, + 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, + 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, + 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, + 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, + 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, + 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, + 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, + 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, + 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, + 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, + 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, + 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, + 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, + 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, + 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, + 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, + 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, + 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, + 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, + 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, + 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, + 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, + 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, + 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, + 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, + 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, + 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, + 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, + 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, + 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, + 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, + 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, + 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, + 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, + 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, + 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, + 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, + 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, + 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, + 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, + 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, + 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, + 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, + 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, + 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, + 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, + 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, + 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, + 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, + 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, + 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, + 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, + 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, + 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, + 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, + 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, + 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, +}; + +static u_int32_t S8[] = { + 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, + 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, + 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, + 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, + 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, + 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, + 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, + 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, + 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, + 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, + 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, + 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, + 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, + 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, + 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, + 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, + 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, + 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, + 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, + 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, + 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, + 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, + 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, + 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, + 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, + 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, + 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, + 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, + 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, + 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, + 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, + 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, + 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, + 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, + 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, + 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, + 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, + 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, + 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, + 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, + 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, + 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, + 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, + 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, + 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, + 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, + 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, + 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, + 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, + 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, + 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, + 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, + 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, + 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, + 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, + 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, + 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, + 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, + 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, + 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, + 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, + 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, + 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, + 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, +}; + diff --git a/bsd/crypto/cast128/cast128.h b/bsd/crypto/cast128/cast128.h new file mode 100644 index 000000000..d05af15ba --- /dev/null +++ b/bsd/crypto/cast128/cast128.h @@ -0,0 +1,60 @@ +/* + * heavily modified by Tomomi Suzuki + */ +/* + * The CAST-128 Encryption Algorithm (RFC 2144) + * + * original implementation + * 1997/08/21 + */ +/* + * Copyright (C) 1997 Hideo "Sir MANMOS" Morishita + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY Hideo "Sir MaNMOS" Morishita ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Hideo "Sir MaNMOS" Morishita BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef RFC2144_CAST_128_H +#define RFC2144_CAST_128_H + +#include +#include + + +#define CAST128_ENCRYPT 1 +#define CAST128_DECRYPT 0 + + +extern void set_cast128_subkey __P((u_int32_t *, u_int8_t *)); +extern void cast128_encrypt_round16 __P((u_int8_t *, const u_int8_t *, + u_int32_t *)); +extern void cast128_decrypt_round16 __P((u_int8_t *, const u_int8_t *, + u_int32_t *)); +extern void cast128_encrypt_round12 __P((u_int8_t *, const u_int8_t *, + u_int32_t *)); +extern void cast128_decrypt_round12 __P((u_int8_t *, const u_int8_t *, + u_int32_t *)); +extern void cast128_cbc_process __P((struct mbuf *, size_t, size_t, + u_int32_t *, u_int8_t *, size_t, int)); + +#endif + diff --git a/bsd/crypto/cast128/cast128_cbc.c b/bsd/crypto/cast128/cast128_cbc.c new file mode 100644 index 000000000..e2d2f6c1c --- /dev/null +++ b/bsd/crypto/cast128/cast128_cbc.c @@ -0,0 +1,217 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * based on sys/crypto/des/des_cbc.c, rewrote by Tomomi Suzuki + */ + +#include +#include +#include +#include + + +void +cast128_cbc_process(m0, skip, length, subkey, iv, keylen, mode) + struct mbuf *m0; + size_t skip; + size_t length; + u_int32_t *subkey; + u_int8_t *iv; + size_t keylen; + int mode; +{ + struct mbuf *m; + u_int8_t inbuf[8], outbuf[8]; + size_t off; + + /* sanity check */ + if (m0->m_pkthdr.len < skip) { + printf("cast128_cbc_process: mbuf length < skip\n"); + return; + } + if (m0->m_pkthdr.len < length) { + printf("cast128_cbc_process: mbuf length < encrypt length\n"); + return; + } + if (m0->m_pkthdr.len < skip + length) { + printf("cast128_cbc_process: " + "mbuf length < skip + encrypt length\n"); + return; + } + if (length % 8) { + printf("cast128_cbc_process: length is not multiple of 8\n"); + return; + } + + m = m0; + off = 0; + + /* skip over the header */ + while (skip) { + if (!m) + panic("cast128_cbc_process: mbuf chain?\n"); + if (m->m_len <= skip) { + skip -= m->m_len; + m = m->m_next; + off = 0; + } else { + off = skip; + skip = 0; + } + } + + /* copy iv into outbuf for XOR (encrypt) */ + bcopy(iv, outbuf, 8); + + /* + * encrypt/decrypt packet + */ + while (length > 0) { + int i; + + if (!m) + panic("cast128_cbc_process: mbuf chain?\n"); + + /* + * copy the source into input buffer. + * don't update off or m, since we need to use them + * later. + */ + if (off + 8 <= m->m_len) + bcopy(mtod(m, u_int8_t *)+off, inbuf, 8); + else { + struct mbuf *n; + size_t noff; + u_int8_t *p, *in; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + in = inbuf; + while (in - inbuf < 8) { + if (!p) { + panic("cast128_cbc_process: " + "mbuf chain?\n"); + } + *in++ = *p++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && !n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *); + else + p = NULL; + } + } + + /* encrypt/decrypt */ + switch (mode) { + case CAST128_ENCRYPT: + /* XOR */ + for (i = 0; i < 8; i++) + inbuf[i] ^= outbuf[i]; + + /* encrypt */ + if (keylen <= 80/8) + cast128_encrypt_round12(outbuf, inbuf, subkey); + else + cast128_encrypt_round16(outbuf, inbuf, subkey); + break; + + case CAST128_DECRYPT: + /* decrypt */ + if (keylen <= 80/8) + cast128_decrypt_round12(outbuf, inbuf, subkey); + else + cast128_decrypt_round16(outbuf, inbuf, subkey); + + /* XOR */ + for (i = 0; i < 8; i++) + outbuf[i] ^= iv[i]; + + /* copy inbuf into iv for next XOR */ + bcopy(inbuf, iv, 8); + break; + } + + /* + * copy the output buffer into the result. + * need to update off and m. + */ + if (off + 8 < m->m_len) { + bcopy(outbuf, mtod(m, u_int8_t *) + off, 8); + off += 8; + } else if (off + 8 == m->m_len) { + bcopy(outbuf, mtod(m, u_int8_t *) + off, 8); + do { + m = m->m_next; + } while (m && !m->m_len); + off = 0; + } else { + struct mbuf *n; + size_t noff; + u_int8_t *p, *out; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + out = outbuf; + while (out - outbuf < 8) { + if (!p) { + panic("cast128_cbc_process: " + "mbuf chain?\n"); + } + *p++ = *out++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && !n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *); + else + p = NULL; + } + + m = n; + off = noff; + } + + length -= 8; + } +} + diff --git a/bsd/crypto/cast128/cast128_subkey.h b/bsd/crypto/cast128/cast128_subkey.h new file mode 100644 index 000000000..5f52be30a --- /dev/null +++ b/bsd/crypto/cast128/cast128_subkey.h @@ -0,0 +1,89 @@ +/* + * heavily modified by Tomomi Suzuki + */ +/* + * The CAST-128 Encryption Algorithm (RFC 2144) + * + * original implementation + * 1997/08/21 + */ +/* + * Copyright (C) 1997 Hideo "Sir MANMOS" Morishita + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY Hideo "Sir MaNMOS" Morishita ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL Hideo "Sir MaNMOS" Morishita BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef RFC2144_CAST_128_SUBKEY_H +#define RFC2144_CAST_128_SUBKEY_H + +#define x0x1x2x3 buf[0] +#define x4x5x6x7 buf[1] +#define x8x9xAxB buf[2] +#define xCxDxExF buf[3] +#define z0z1z2z3 buf[4] +#define z4z5z6z7 buf[5] +#define z8z9zAzB buf[6] +#define zCzDzEzF buf[7] + +#define byte0(x) (((x) >> 24)) +#define byte1(x) (((x) >> 16) & 0xff) +#define byte2(x) (((x) >> 8) & 0xff) +#define byte3(x) (((x)) & 0xff) + +#define x0 byte0(buf[0]) +#define x1 byte1(buf[0]) +#define x2 byte2(buf[0]) +#define x3 byte3(buf[0]) +#define x4 byte0(buf[1]) +#define x5 byte1(buf[1]) +#define x6 byte2(buf[1]) +#define x7 byte3(buf[1]) +#define x8 byte0(buf[2]) +#define x9 byte1(buf[2]) +#define xA byte2(buf[2]) +#define xB byte3(buf[2]) +#define xC byte0(buf[3]) +#define xD byte1(buf[3]) +#define xE byte2(buf[3]) +#define xF byte3(buf[3]) +#define z0 byte0(buf[4]) +#define z1 byte1(buf[4]) +#define z2 byte2(buf[4]) +#define z3 byte3(buf[4]) +#define z4 byte0(buf[5]) +#define z5 byte1(buf[5]) +#define z6 byte2(buf[5]) +#define z7 byte3(buf[5]) +#define z8 byte0(buf[6]) +#define z9 byte1(buf[6]) +#define zA byte2(buf[6]) +#define zB byte3(buf[6]) +#define zC byte0(buf[7]) +#define zD byte1(buf[7]) +#define zE byte2(buf[7]) +#define zF byte3(buf[7]) + +#define circular_leftshift(x, y) ( ((x) << (y)) | ((x) >> (32-(y))) ) + +#endif + diff --git a/bsd/crypto/des/Makefile b/bsd/crypto/des/Makefile new file mode 100644 index 000000000..42d04844a --- /dev/null +++ b/bsd/crypto/des/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + des.h des_locl.h podd.h sk.h spr.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = crypto + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = crypto + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/crypto/des/des.h b/bsd/crypto/des/des.h new file mode 100644 index 000000000..144a61e5e --- /dev/null +++ b/bsd/crypto/des/des.h @@ -0,0 +1,278 @@ +/* lib/des/des.h */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#ifndef HEADER_DES_H +#define HEADER_DES_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* If this is set to 'unsigned int' on a DEC Alpha, this gives about a + * %20 speed up (longs are 8 bytes, int's are 4). */ +#ifndef DES_LONG +#define DES_LONG unsigned long +#endif + +typedef unsigned char des_cblock[8]; +typedef struct des_ks_struct + { + union { + des_cblock _; + /* make sure things are correct size on machines with + * 8 byte longs */ + DES_LONG pad[2]; + } ks; +#undef _ +#define _ ks._ + } des_key_schedule[16]; + +#define DES_KEY_SZ (sizeof(des_cblock)) +#define DES_SCHEDULE_SZ (sizeof(des_key_schedule)) + +#define DES_ENCRYPT 1 +#define DES_DECRYPT 0 + +#define DES_CBC_MODE 0 +#define DES_PCBC_MODE 1 + +#define des_ecb2_encrypt(i,o,k1,k2,e) \ + des_ecb3_encrypt((i),(o),(k1),(k2),(k1),(e)) + +#define des_ede2_cbc_encrypt(i,o,l,k1,k2,iv,e) \ + des_ede3_cbc_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(e)) + +#define des_ede2_cfb64_encrypt(i,o,l,k1,k2,iv,n,e) \ + des_ede3_cfb64_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(n),(e)) + +#define des_ede2_ofb64_encrypt(i,o,l,k1,k2,iv,n) \ + des_ede3_ofb64_encrypt((i),(o),(l),(k1),(k2),(k1),(iv),(n)) + +#define C_Block des_cblock +#define Key_schedule des_key_schedule +#ifdef KERBEROS +#define ENCRYPT DES_ENCRYPT +#define DECRYPT DES_DECRYPT +#endif +#define KEY_SZ DES_KEY_SZ +#define string_to_key des_string_to_key +#define read_pw_string des_read_pw_string +#define random_key des_random_key +#define pcbc_encrypt des_pcbc_encrypt +#define set_key des_set_key +#define key_sched des_key_sched +#define ecb_encrypt des_ecb_encrypt +#define cbc_encrypt des_cbc_encrypt +#define ncbc_encrypt des_ncbc_encrypt +#define xcbc_encrypt des_xcbc_encrypt +#define cbc_cksum des_cbc_cksum +#define quad_cksum des_quad_cksum + +/* For compatibility with the MIT lib - eay 20/05/92 */ +typedef des_key_schedule bit_64; +#define des_fixup_key_parity des_set_odd_parity +#define des_check_key_parity check_parity + +extern int des_check_key; /* defaults to false */ +extern int des_rw_mode; /* defaults to DES_PCBC_MODE */ + +/* The next line is used to disable full ANSI prototypes, if your + * compiler has problems with the prototypes, make sure this line always + * evaluates to true :-) */ +#if defined(MSDOS) || defined(__STDC__) +#undef NOPROTO +#endif +#ifndef NOPROTO +char *des_options(void); +void des_ecb3_encrypt(des_cblock *input,des_cblock *output, + des_key_schedule ks1,des_key_schedule ks2, + des_key_schedule ks3, int enc); +DES_LONG des_cbc_cksum(des_cblock *input,des_cblock *output, + long length,des_key_schedule schedule,des_cblock *ivec); +/* +void des_cbc_encrypt(des_cblock *input,des_cblock *output,long length, + des_key_schedule schedule,des_cblock *ivec,int enc); +*/ +void des_cbc_encrypt(struct mbuf *, size_t, size_t, + des_key_schedule schedule,des_cblock *ivec, int enc); +void des_ncbc_encrypt(des_cblock *input,des_cblock *output,long length, + des_key_schedule schedule,des_cblock *ivec,int enc); +void des_xcbc_encrypt(des_cblock *input,des_cblock *output,long length, + des_key_schedule schedule,des_cblock *ivec, + des_cblock *inw,des_cblock *outw,int enc); +void des_3cbc_encrypt(des_cblock *input,des_cblock *output,long length, + des_key_schedule sk1,des_key_schedule sk2, + des_cblock *ivec1,des_cblock *ivec2,int enc); +extern void des_3cbc_process(struct mbuf *, size_t, size_t, + des_key_schedule *schedule, des_cblock *ivec, int mode); +void des_cfb_encrypt(unsigned char *in,unsigned char *out,int numbits, + long length,des_key_schedule schedule,des_cblock *ivec,int enc); +void des_ecb_encrypt(des_cblock *input,des_cblock *output, + des_key_schedule ks,int enc); +void des_encrypt(DES_LONG *data,des_key_schedule ks, int enc); +void des_encrypt2(DES_LONG *data,des_key_schedule ks, int enc); +void des_ede3_cbc_encrypt(des_cblock *input, des_cblock *output, + long length, des_key_schedule ks1, des_key_schedule ks2, + des_key_schedule ks3, des_cblock *ivec, int enc); +void des_ede3_cfb64_encrypt(unsigned char *in, unsigned char *out, + long length, des_key_schedule ks1, des_key_schedule ks2, + des_key_schedule ks3, des_cblock *ivec, int *num, int encrypt); +void des_ede3_ofb64_encrypt(unsigned char *in, unsigned char *out, + long length, des_key_schedule ks1, des_key_schedule ks2, + des_key_schedule ks3, des_cblock *ivec, int *num); + +int des_enc_read(int fd,char *buf,int len,des_key_schedule sched, + des_cblock *iv); +int des_enc_write(int fd,char *buf,int len,des_key_schedule sched, + des_cblock *iv); +#ifdef PERL5 +char *des_crypt(const char *buf,const char *salt); +#else +/* some stupid compilers complain because I have declared char instead + * of const char */ +#if 1 +char *crypt(const char *buf,const char *salt); +#else +char *crypt(); +#endif +#endif +void des_ofb_encrypt(unsigned char *in,unsigned char *out, + int numbits,long length,des_key_schedule schedule,des_cblock *ivec); +void des_pcbc_encrypt(des_cblock *input,des_cblock *output,long length, + des_key_schedule schedule,des_cblock *ivec,int enc); +DES_LONG des_quad_cksum(des_cblock *input,des_cblock *output, + long length,int out_count,des_cblock *seed); +void des_random_seed(des_cblock key); +void des_random_key(des_cblock ret); +int des_read_password(des_cblock *key,char *prompt,int verify); +int des_read_2passwords(des_cblock *key1,des_cblock *key2, + char *prompt,int verify); +int des_read_pw_string(char *buf,int length,char *prompt,int verify); +void des_set_odd_parity(des_cblock *key); +int des_is_weak_key(des_cblock *key); +int des_set_key(des_cblock *key,des_key_schedule schedule); +int des_key_sched(des_cblock *key,des_key_schedule schedule); +void des_string_to_key(char *str,des_cblock *key); +void des_string_to_2keys(char *str,des_cblock *key1,des_cblock *key2); +void des_cfb64_encrypt(unsigned char *in, unsigned char *out, long length, + des_key_schedule schedule, des_cblock *ivec, int *num, int enc); +void des_ofb64_encrypt(unsigned char *in, unsigned char *out, long length, + des_key_schedule schedule, des_cblock *ivec, int *num); + +/* Extra functions from Mark Murray */ +/* +void des_cblock_print_file(des_cblock *cb, FILE *fp); +*/ +/* The following functions are not in the normal unix build or the + * SSLeay build. When using the SSLeay build, use RAND_seed() + * and RAND_bytes() instead. */ +int des_new_random_key(des_cblock *key); +void des_init_random_number_generator(des_cblock *key); +void des_set_random_generator_seed(des_cblock *key); +void des_set_sequence_number(des_cblock new_sequence_number); +void des_generate_random_block(des_cblock *block); + +#else + +char *des_options(); +void des_ecb3_encrypt(); +DES_LONG des_cbc_cksum(); +void des_cbc_encrypt(); +void des_ncbc_encrypt(); +void des_xcbc_encrypt(); +void des_3cbc_encrypt(); +void des_cfb_encrypt(); +void des_ede3_cfb64_encrypt(); +void des_ede3_ofb64_encrypt(); +void des_ecb_encrypt(); +void des_encrypt(); +void des_encrypt2(); +void des_ede3_cbc_encrypt(); +int des_enc_read(); +int des_enc_write(); +#ifdef PERL5 +char *des_crypt(); +#else +char *crypt(); +#endif +void des_ofb_encrypt(); +void des_pcbc_encrypt(); +DES_LONG des_quad_cksum(); +void des_random_seed(); +void des_random_key(); +int des_read_password(); +int des_read_2passwords(); +int des_read_pw_string(); +void des_set_odd_parity(); +int des_is_weak_key(); +int des_set_key(); +int des_key_sched(); +void des_string_to_key(); +void des_string_to_2keys(); +void des_cfb64_encrypt(); +void des_ofb64_encrypt(); + +/* Extra functions from Mark Murray */ +void des_cblock_print_file(); +/* The following functions are not in the normal unix build or the + * SSLeay build. When using the SSLeay build, use RAND_seed() + * and RAND_bytes() instead. */ +#ifdef FreeBSD +int des_new_random_key(); +void des_init_random_number_generator(); +void des_set_random_generator_seed(); +void des_set_sequence_number(); +void des_generate_random_block(); +#endif + +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/bsd/crypto/des/des_3cbc.c b/bsd/crypto/des/des_3cbc.c new file mode 100644 index 000000000..9d63a63de --- /dev/null +++ b/bsd/crypto/des/des_3cbc.c @@ -0,0 +1,244 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * based on sys/crypto/des/des_cbc.c, rewrote by Tomomi Suzuki + */ +#include + + +void des_3cbc_process(m0, skip, length, schedule, ivec, mode) + struct mbuf *m0; + size_t skip; + size_t length; + des_key_schedule *schedule; + des_cblock (*ivec); + int mode; +{ + u_int8_t inbuf[8], outbuf[8]; + struct mbuf *m; + size_t off; + DES_LONG tin0, tin1; + DES_LONG tout0, tout1; + DES_LONG tin[2]; + DES_LONG xor0 = 0, xor1 = 0; + u_int8_t *iv; + u_int8_t *in, *out; + + /* sanity check */ + if (m0->m_pkthdr.len < skip) { + printf("des_3cbc_process: mbuf length < skip\n"); + return; + } + if (m0->m_pkthdr.len < length) { + printf("des_3cbc_process: mbuf length < encrypt length\n"); + return; + } + if (m0->m_pkthdr.len < skip + length) { + printf("des_3cbc_process: mbuf length < " + "skip + encrypt length\n"); + return; + } + if (length % 8) { + printf("des_3cbc_process: length(%lu) is not multiple of 8\n", + (u_long)length); + return; + } + + m = m0; + off = 0; + + /* skip over the header */ + while (skip) { + if (!m) + panic("des_3cbc_process: mbuf chain?\n"); + if (m->m_len <= skip) { + skip -= m->m_len; + m = m->m_next; + off = 0; + } else { + off = skip; + skip = 0; + } + } + + /* initialize */ + tin0 = tin1 = tout0 = tout1 = 0; + tin[0] = tin[1] = 0; + + switch (mode) { + case DES_ENCRYPT: + iv = (u_int8_t *)ivec; + c2l(iv, tout0); + c2l(iv, tout1); + break; + case DES_DECRYPT: + xor0 = xor1 = 0; + iv = (u_int8_t *)ivec; + c2l(iv, xor0); + c2l(iv, xor1); + break; + } + + /* + * encrypt/decrypt packet + */ + while (length > 0) { + if (!m) + panic("des_3cbc_process: mbuf chain?\n"); + + /* + * copy the source into input buffer. + * don't update off or m, since we need to use them + * later. + */ + if (off + 8 <= m->m_len) + bcopy(mtod(m, u_int8_t *) + off, &inbuf[0], 8); + else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *in; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + in = &inbuf[0]; + while (in - &inbuf[0] < 8) { + if (!p) { + panic("des_3cbc_process: " + "mbuf chain?\n"); + } + *in++ = *p++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && !n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + } + + /* encrypt/decrypt */ + switch (mode) { + case DES_ENCRYPT: + in = &inbuf[0]; + out = &outbuf[0]; + c2l(in, tin0); + c2l(in, tin1); + + /* XOR */ + tin0 ^= tout0; tin[0] = tin0; + tin1 ^= tout1; tin[1] = tin1; + + des_encrypt((DES_LONG *)tin, schedule[0], DES_ENCRYPT); + des_encrypt((DES_LONG *)tin, schedule[1], DES_DECRYPT); + des_encrypt((DES_LONG *)tin, schedule[2], DES_ENCRYPT); + + tout0 = tin[0]; l2c(tout0, out); + tout1 = tin[1]; l2c(tout1, out); + break; + case DES_DECRYPT: + in = &inbuf[0]; + out = &outbuf[0]; + c2l(in, tin0); tin[0] = tin0; + c2l(in, tin1); tin[1] = tin1; + + des_encrypt((DES_LONG *)tin, schedule[2], DES_DECRYPT); + des_encrypt((DES_LONG *)tin, schedule[1], DES_ENCRYPT); + des_encrypt((DES_LONG *)tin, schedule[0], DES_DECRYPT); + + /* XOR */ + tout0 = tin[0] ^ xor0; + tout1 = tin[1] ^ xor1; + l2c(tout0, out); + l2c(tout1, out); + + /* for next iv */ + xor0 = tin0; + xor1 = tin1; + break; + } + + /* + * copy the output buffer int the result. + * need to update off and m. + */ + if (off + 8 < m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + off += 8; + } else if (off + 8 == m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + do { + m = m->m_next; + } while (m && !m->m_len); + off = 0; + } else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *out; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + out = &outbuf[0]; + while (out - &outbuf[0] < 8) { + if (!p) { + panic("des_3cbc_process: " + "mbuf chain?\n"); + } + *p++ = *out++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && !n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + + m = n; + off = noff; + } + + length -= 8; + } +} + diff --git a/bsd/crypto/des/des_cbc.c b/bsd/crypto/des/des_cbc.c new file mode 100644 index 000000000..657cf4bd9 --- /dev/null +++ b/bsd/crypto/des/des_cbc.c @@ -0,0 +1,326 @@ +/* + * heavily modified by Yoshifumi Nishida . + * then, completely rewrote by Jun-ichiro itojun Itoh , + * 1997. + */ +/* crypto/des/cbc_enc.c */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include + +#define panic(x) {printf(x); return;} + +void des_cbc_encrypt(m0, skip, length, schedule, ivec, mode) + struct mbuf *m0; + size_t skip; + size_t length; + des_key_schedule schedule; + des_cblock (*ivec); + int mode; +{ + u_int8_t inbuf[8], outbuf[8]; + struct mbuf *m; + size_t off; + register DES_LONG tin0, tin1; + register DES_LONG tout0, tout1; + DES_LONG tin[2]; + u_int8_t *iv; + + /* sanity checks */ + if (m0->m_pkthdr.len < skip) { + printf("mbuf length < skip\n"); + return; + } + if (m0->m_pkthdr.len < length) { + printf("mbuf length < encrypt length\n"); + return; + } + if (m0->m_pkthdr.len < skip + length) { + printf("mbuf length < skip + encrypt length\n"); + return; + } + if (length % 8) { + printf("length is not multiple of 8\n"); + return; + } + + m = m0; + off = 0; + + /* skip over the header */ + while (skip) { + if (!m) + panic("mbuf chain?\n"); + if (m->m_len <= skip) { + skip -= m->m_len; + m = m->m_next; + off = 0; + } else { + off = skip; + skip = 0; + } + } + + /* initialize */ + tin0 = tin1 = tout0 = tout1 = 0; + tin[0] = tin[1] = 0; + + if (mode == DES_ENCRYPT) { + u_int8_t *in, *out; + + iv = (u_int8_t *)ivec; + c2l(iv, tout0); + c2l(iv, tout1); + + while (0 < length) { + if (!m) + panic("mbuf chain?\n"); + + /* + * copy the source into input buffer. + * don't update off or m, since we need to use them * later. + */ + if (off + 8 <= m->m_len) + bcopy(mtod(m, u_int8_t *) + off, &inbuf[0], 8); + else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *in; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + in = &inbuf[0]; + while (in - &inbuf[0] < 8) { + if (!p) + panic("mbuf chain?\n"); + + *in++ = *p++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + } + + in = &inbuf[0]; + out = &outbuf[0]; + c2l(in, tin0); + c2l(in, tin1); + + tin0 ^= tout0; tin[0] = tin0; + tin1 ^= tout1; tin[1] = tin1; + des_encrypt((DES_LONG *)tin, schedule, DES_ENCRYPT); + tout0 = tin[0]; l2c(tout0, out); + tout1 = tin[1]; l2c(tout1, out); + + /* + * copy the output buffer into the result. + * need to update off and m. + */ + if (off + 8 < m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + off += 8; + } else if (off + 8 == m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + do { + m = m->m_next; + } while (m && ! m->m_len); + off = 0; + } else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *out; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + out = &outbuf[0]; + while (out - &outbuf[0] < 8) { + if (!p) + panic("mbuf chain?"); + *p++ = *out++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + + m = n; + off = noff; + } + + length -= 8; + } + } else if (mode == DES_DECRYPT) { + register DES_LONG xor0, xor1; + u_int8_t *in, *out; + + xor0 = xor1 = 0; + iv = (u_int8_t *)ivec; + c2l(iv, xor0); + c2l(iv, xor1); + + while (0 < length) { + if (!m) + panic("mbuf chain?\n"); + + /* + * copy the source into input buffer. + * don't update off or m, since we need to use them * later. + */ + if (off + 8 <= m->m_len) + bcopy(mtod(m, u_int8_t *) + off, &inbuf[0], 8); + else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *in; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + in = &inbuf[0]; + while (in - &inbuf[0] < 8) { + if (!p) + panic("mbuf chain?\n"); + *in++ = *p++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + } + + in = &inbuf[0]; + out = &outbuf[0]; + c2l(in, tin0); tin[0] = tin0; + c2l(in, tin1); tin[1] = tin1; + des_encrypt((DES_LONG *)tin, schedule, DES_DECRYPT); + tout0 = tin[0] ^ xor0; + tout1 = tin[1] ^ xor1; + l2c(tout0, out); + l2c(tout1, out); + xor0 = tin0; + xor1 = tin1; + + + /* + * copy the output buffer into the result. + * need to update off and m. + */ + if (off + 8 < m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + off += 8; + } else if (off + 8 == m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + do { + m = m->m_next; + } while (m && ! m->m_len); + off = 0; + } else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *out; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + out = &outbuf[0]; + while (out - &outbuf[0] < 8) { + if (!p) + panic("mbuf chain?\n"); + *p++ = *out++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && ! n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + + m = n; + off = noff; + } + + length -= 8; + } + } +} diff --git a/bsd/crypto/des/des_ecb.c b/bsd/crypto/des/des_ecb.c new file mode 100644 index 000000000..b0c239ac8 --- /dev/null +++ b/bsd/crypto/des/des_ecb.c @@ -0,0 +1,229 @@ +/* crypto/des/ecb_enc.c */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +#include +#include + +char *libdes_version="libdes v 3.24 - 20-Apr-1996 - eay"; +char *DES_version="DES part of SSLeay 0.6.4 30-Aug-1996"; + +char *des_options() + { +#ifdef DES_PTR + if (sizeof(DES_LONG) != sizeof(long)) + return("des(ptr,int)"); + else + return("des(ptr,long)"); +#else + if (sizeof(DES_LONG) != sizeof(long)) + return("des(idx,int)"); + else + return("des(idx,long)"); +#endif + } + + +void des_ecb_encrypt(input, output, ks, encrypt) +des_cblock (*input); +des_cblock (*output); +des_key_schedule ks; +int encrypt; + { + register DES_LONG l; + register unsigned char *in,*out; + DES_LONG ll[2]; + + in=(unsigned char *)input; + out=(unsigned char *)output; + c2l(in,l); ll[0]=l; + c2l(in,l); ll[1]=l; + des_encrypt(ll,ks,encrypt); + l=ll[0]; l2c(l,out); + l=ll[1]; l2c(l,out); + l=ll[0]=ll[1]=0; + } + +void des_encrypt(data, ks, encrypt) +DES_LONG *data; +des_key_schedule ks; +int encrypt; + { + register DES_LONG l,r,t,u; +#ifdef DES_PTR + register unsigned char *des_SP=(unsigned char *)des_SPtrans; +#endif +#ifdef undef + union fudge { + DES_LONG l; + unsigned short s[2]; + unsigned char c[4]; + } U,T; +#endif + register int i; + register DES_LONG *s; + + u=data[0]; + r=data[1]; + + IP(u,r); + /* Things have been modified so that the initial rotate is + * done outside the loop. This required the + * des_SPtrans values in sp.h to be rotated 1 bit to the right. + * One perl script later and things have a 5% speed up on a sparc2. + * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> + * for pointing this out. */ + l=(r<<1)|(r>>31); + r=(u<<1)|(u>>31); + + /* clear the top bits on machines with 8byte longs */ + l&=0xffffffffL; + r&=0xffffffffL; + + s=(DES_LONG *)ks; + /* I don't know if it is worth the effort of loop unrolling the + * inner loop + */ + if (encrypt) + { + for (i=0; i<32; i+=8) + { + D_ENCRYPT(l,r,i+0); /* 1 */ + D_ENCRYPT(r,l,i+2); /* 2 */ + D_ENCRYPT(l,r,i+4); /* 3 */ + D_ENCRYPT(r,l,i+6); /* 4 */ + } + } + else + { + for (i=30; i>0; i-=8) + { + D_ENCRYPT(l,r,i-0); /* 16 */ + D_ENCRYPT(r,l,i-2); /* 15 */ + D_ENCRYPT(l,r,i-4); /* 14 */ + D_ENCRYPT(r,l,i-6); /* 13 */ + } + } + l=(l>>1)|(l<<31); + r=(r>>1)|(r<<31); + /* clear the top bits on machines with 8byte longs */ + l&=0xffffffffL; + r&=0xffffffffL; + + FP(r,l); + data[0]=l; + data[1]=r; + l=r=t=u=0; + } + +void des_encrypt2(data, ks, encrypt) +DES_LONG *data; +des_key_schedule ks; +int encrypt; + { + register DES_LONG l,r,t,u; +#ifdef DES_PTR + register unsigned char *des_SP=(unsigned char *)des_SPtrans; +#endif +#ifdef undef + union fudge { + DES_LONG l; + unsigned short s[2]; + unsigned char c[4]; + } U,T; +#endif + register int i; + register DES_LONG *s; + + u=data[0]; + r=data[1]; + + /* Things have been modified so that the initial rotate is + * done outside the loop. This required the + * des_SPtrans values in sp.h to be rotated 1 bit to the right. + * One perl script later and things have a 5% speed up on a sparc2. + * Thanks to Richard Outerbridge <71755.204@CompuServe.COM> + * for pointing this out. */ + l=(r<<1)|(r>>31); + r=(u<<1)|(u>>31); + + /* clear the top bits on machines with 8byte longs */ + l&=0xffffffffL; + r&=0xffffffffL; + + s=(DES_LONG *)ks; + /* I don't know if it is worth the effort of loop unrolling the + * inner loop */ + if (encrypt) + { + for (i=0; i<32; i+=8) + { + D_ENCRYPT(l,r,i+0); /* 1 */ + D_ENCRYPT(r,l,i+2); /* 2 */ + D_ENCRYPT(l,r,i+4); /* 3 */ + D_ENCRYPT(r,l,i+6); /* 4 */ + } + } + else + { + for (i=30; i>0; i-=8) + { + D_ENCRYPT(l,r,i-0); /* 16 */ + D_ENCRYPT(r,l,i-2); /* 15 */ + D_ENCRYPT(l,r,i-4); /* 14 */ + D_ENCRYPT(r,l,i-6); /* 13 */ + } + } + l=(l>>1)|(l<<31); + r=(r>>1)|(r<<31); + /* clear the top bits on machines with 8byte longs */ + l&=0xffffffffL; + r&=0xffffffffL; + + data[0]=l; + data[1]=r; + l=r=t=u=0; + } diff --git a/bsd/crypto/des/des_locl.h b/bsd/crypto/des/des_locl.h new file mode 100644 index 000000000..8f33ab7a1 --- /dev/null +++ b/bsd/crypto/des/des_locl.h @@ -0,0 +1,339 @@ +/* lib/des/des_locl.h */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ +/* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + * + * Always modify des_locl.org since des_locl.h is automatically generated from + * it during SSLeay configuration. + * + * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING + */ + +#include +#include +#include +#include + +#ifndef HEADER_DES_LOCL_H +#define HEADER_DES_LOCL_H + +#if defined(WIN32) || defined(WIN16) +#ifndef MSDOS +#define MSDOS +#endif +#endif + +/* +#include +#include +#ifndef MSDOS +#include +#endif +*/ +#include + +/* the following is tweaked from a config script, that is why it is a + * protected undef/define */ +#ifndef DES_PTR +#undef DES_PTR +#endif + +#ifdef MSDOS /* Visual C++ 2.1 (Windows NT/95) */ +#include +#include +#include +#include +#ifndef RAND +#define RAND +#endif +#undef NOPROTO +#endif + +#if !defined(KERNEL) && (defined(__STDC__) || defined(VMS) || defined(M_XENIX) || defined(MSDOS)) +#include +#endif + +#ifndef RAND +#define RAND +#endif + +#ifdef linux +#undef RAND +#endif + +#ifdef MSDOS +#define getpid() 2 +#define RAND +#undef NOPROTO +#endif + +#if defined(NOCONST) +#define const +#endif + +#ifdef __STDC__ +#undef NOPROTO +#endif + +#ifdef RAND +#define srandom(s) srand(s) +#define random rand +#endif + +#define ITERATIONS 16 +#define HALF_ITERATIONS 8 + +/* used in des_read and des_write */ +#define MAXWRITE (1024*16) +#define BSIZE (MAXWRITE+4) + +#define c2l(c,l) (l =((DES_LONG)(*((c)++))) , \ + l|=((DES_LONG)(*((c)++)))<< 8L, \ + l|=((DES_LONG)(*((c)++)))<<16L, \ + l|=((DES_LONG)(*((c)++)))<<24L) + +/* NOTE - c is not incremented as per c2l */ +#define c2ln(c,l1,l2,n) { \ + c+=n; \ + l1=l2=0; \ + switch (n) { \ + case 8: l2 =((DES_LONG)(*(--(c))))<<24L; \ + case 7: l2|=((DES_LONG)(*(--(c))))<<16L; \ + case 6: l2|=((DES_LONG)(*(--(c))))<< 8L; \ + case 5: l2|=((DES_LONG)(*(--(c)))); \ + case 4: l1 =((DES_LONG)(*(--(c))))<<24L; \ + case 3: l1|=((DES_LONG)(*(--(c))))<<16L; \ + case 2: l1|=((DES_LONG)(*(--(c))))<< 8L; \ + case 1: l1|=((DES_LONG)(*(--(c)))); \ + } \ + } + +#define l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16L)&0xff), \ + *((c)++)=(unsigned char)(((l)>>24L)&0xff)) + +/* replacements for htonl and ntohl since I have no idea what to do + * when faced with machines with 8 byte longs. */ +#define HDRSIZE 4 + +#define n2l(c,l) (l =((DES_LONG)(*((c)++)))<<24L, \ + l|=((DES_LONG)(*((c)++)))<<16L, \ + l|=((DES_LONG)(*((c)++)))<< 8L, \ + l|=((DES_LONG)(*((c)++)))) + +#define l2n(l,c) (*((c)++)=(unsigned char)(((l)>>24L)&0xff), \ + *((c)++)=(unsigned char)(((l)>>16L)&0xff), \ + *((c)++)=(unsigned char)(((l)>> 8L)&0xff), \ + *((c)++)=(unsigned char)(((l) )&0xff)) + +/* NOTE - c is not incremented as per l2c */ +#define l2cn(l1,l2,c,n) { \ + c+=n; \ + switch (n) { \ + case 8: *(--(c))=(unsigned char)(((l2)>>24L)&0xff); \ + case 7: *(--(c))=(unsigned char)(((l2)>>16L)&0xff); \ + case 6: *(--(c))=(unsigned char)(((l2)>> 8L)&0xff); \ + case 5: *(--(c))=(unsigned char)(((l2) )&0xff); \ + case 4: *(--(c))=(unsigned char)(((l1)>>24L)&0xff); \ + case 3: *(--(c))=(unsigned char)(((l1)>>16L)&0xff); \ + case 2: *(--(c))=(unsigned char)(((l1)>> 8L)&0xff); \ + case 1: *(--(c))=(unsigned char)(((l1) )&0xff); \ + } \ + } + +#if defined(WIN32) +#define ROTATE(a,n) (_lrotr(a,n)) +#else +#define ROTATE(a,n) (((a)>>(n))+((a)<<(32-(n)))) +#endif + +/* The changes to this macro may help or hinder, depending on the + * compiler and the achitecture. gcc2 always seems to do well :-). + * Inspired by Dana How + * DO NOT use the alternative version on machines with 8 byte longs. + * It does not seem to work on the Alpha, even when DES_LONG is 4 + * bytes, probably an issue of accessing non-word aligned objects :-( */ +#ifdef DES_PTR + +#define D_ENCRYPT(L,R,S) { \ + u=((R^s[S ])<<2); \ + t= R^s[S+1]; \ + t=ROTATE(t,2); \ + L^= (\ + *(DES_LONG *)((unsigned char *)des_SP+0x100+((t )&0xfc))+ \ + *(DES_LONG *)((unsigned char *)des_SP+0x300+((t>> 8)&0xfc))+ \ + *(DES_LONG *)((unsigned char *)des_SP+0x500+((t>>16)&0xfc))+ \ + *(DES_LONG *)((unsigned char *)des_SP+0x700+((t>>24)&0xfc))+ \ + *(DES_LONG *)((unsigned char *)des_SP +((u )&0xfc))+ \ + *(DES_LONG *)((unsigned char *)des_SP+0x200+((u>> 8)&0xfc))+ \ + *(DES_LONG *)((unsigned char *)des_SP+0x400+((u>>16)&0xfc))+ \ + *(DES_LONG *)((unsigned char *)des_SP+0x600+((u>>24)&0xfc))); } +#else /* original version */ +#ifdef undef +#define D_ENCRYPT(L,R,S) \ + U.l=R^s[S+1]; \ + T.s[0]=((U.s[0]>>4)|(U.s[1]<<12))&0x3f3f; \ + T.s[1]=((U.s[1]>>4)|(U.s[0]<<12))&0x3f3f; \ + U.l=(R^s[S ])&0x3f3f3f3fL; \ + L^= des_SPtrans[1][(T.c[0])]| \ + des_SPtrans[3][(T.c[1])]| \ + des_SPtrans[5][(T.c[2])]| \ + des_SPtrans[7][(T.c[3])]| \ + des_SPtrans[0][(U.c[0])]| \ + des_SPtrans[2][(U.c[1])]| \ + des_SPtrans[4][(U.c[2])]| \ + des_SPtrans[6][(U.c[3])]; +#else +#define D_ENCRYPT(Q,R,S) {\ + u=(R^s[S ]); \ + t=R^s[S+1]; \ + t=ROTATE(t,4); \ + Q^= des_SPtrans[1][(t )&0x3f]| \ + des_SPtrans[3][(t>> 8L)&0x3f]| \ + des_SPtrans[5][(t>>16L)&0x3f]| \ + des_SPtrans[7][(t>>24L)&0x3f]| \ + des_SPtrans[0][(u )&0x3f]| \ + des_SPtrans[2][(u>> 8L)&0x3f]| \ + des_SPtrans[4][(u>>16L)&0x3f]| \ + des_SPtrans[6][(u>>24L)&0x3f]; } +#endif +#endif + + /* IP and FP + * The problem is more of a geometric problem that random bit fiddling. + 0 1 2 3 4 5 6 7 62 54 46 38 30 22 14 6 + 8 9 10 11 12 13 14 15 60 52 44 36 28 20 12 4 + 16 17 18 19 20 21 22 23 58 50 42 34 26 18 10 2 + 24 25 26 27 28 29 30 31 to 56 48 40 32 24 16 8 0 + + 32 33 34 35 36 37 38 39 63 55 47 39 31 23 15 7 + 40 41 42 43 44 45 46 47 61 53 45 37 29 21 13 5 + 48 49 50 51 52 53 54 55 59 51 43 35 27 19 11 3 + 56 57 58 59 60 61 62 63 57 49 41 33 25 17 9 1 + + The output has been subject to swaps of the form + 0 1 -> 3 1 but the odd and even bits have been put into + 2 3 2 0 + different words. The main trick is to remember that + t=((l>>size)^r)&(mask); + r^=t; + l^=(t<>(n))^(b))&(m)),\ + (b)^=(t),\ + (a)^=((t)<<(n))) + +#define IP(l,r) \ + { \ + register DES_LONG tt; \ + PERM_OP(r,l,tt, 4,0x0f0f0f0fL); \ + PERM_OP(l,r,tt,16,0x0000ffffL); \ + PERM_OP(r,l,tt, 2,0x33333333L); \ + PERM_OP(l,r,tt, 8,0x00ff00ffL); \ + PERM_OP(r,l,tt, 1,0x55555555L); \ + } + +#define FP(l,r) \ + { \ + register DES_LONG tt; \ + PERM_OP(l,r,tt, 1,0x55555555L); \ + PERM_OP(r,l,tt, 8,0x00ff00ffL); \ + PERM_OP(l,r,tt, 2,0x33333333L); \ + PERM_OP(r,l,tt,16,0x0000ffffL); \ + PERM_OP(l,r,tt, 4,0x0f0f0f0fL); \ + } +#endif + + +/* +#define mbuf2char(i_mbuf, i_index, in) \ + { \ + register int i; \ + struct mbuf *m; \ + char *buf; \ + m = i_mbuf; \ + for (i = 0; i < 8; i ++){ \ + if (i_index + i == m->m_len){ \ + m = m->m_next; \ + } \ + buf = mtod(m, char *); \ + in[i] = *(buf + i); \ + } + + +#define char2mbuf(o_mbuf, o_index, out) \ + { \ + register int i; \ + struct mbuf *m; \ + char *buf; \ + m = o_mbuf; \ + for (i = 0; i < 8; i ++){ \ + if (i_index + i == m->m_len){ \ + m = m->m_next; \ + } \ + buf = mtod(m, char *); \ + *(buf + i) = out[i]; \ + } +*/ + diff --git a/bsd/crypto/des/des_setkey.c b/bsd/crypto/des/des_setkey.c new file mode 100644 index 000000000..4656c960f --- /dev/null +++ b/bsd/crypto/des/des_setkey.c @@ -0,0 +1,236 @@ +/* crypto/des/set_key.c */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +/* set_key.c v 1.4 eay 24/9/91 + * 1.4 Speed up by 400% :-) + * 1.3 added register declarations. + * 1.2 unrolled make_key_sched a bit more + * 1.1 added norm_expand_bits + * 1.0 First working version + */ +#include +#include +#include + +#ifndef NOPROTO +static int check_parity(des_cblock (*key)); +#else +static int check_parity(); +#endif + +int des_check_key=0; + +void des_set_odd_parity(key) +des_cblock (*key); + { + int i; + + for (i=0; i>(n))^(b))&(m)),\ + * (b)^=(t),\ + * (a)=((a)^((t)<<(n)))) + */ + +#define HPERM_OP(a,t,n,m) ((t)=((((a)<<(16-(n)))^(a))&(m)),\ + (a)=(a)^(t)^(t>>(16-(n)))) + +/* return 0 if key parity is odd (correct), + * return -1 if key parity error, + * return -2 if illegal weak key. + */ +int des_set_key(key, schedule) +des_cblock (*key); +des_key_schedule schedule; + { + static int shifts2[16]={0,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0}; + register DES_LONG c,d,t,s; + register unsigned char *in; + register DES_LONG *k; + register int i; + + if (des_check_key) + { + if (!check_parity(key)) + return(-1); + + if (des_is_weak_key(key)) + return(-2); + } + + k=(DES_LONG *)schedule; + in=(unsigned char *)key; + + c2l(in,c); + c2l(in,d); + + /* do PC1 in 60 simple operations */ +/* PERM_OP(d,c,t,4,0x0f0f0f0fL); + HPERM_OP(c,t,-2, 0xcccc0000L); + HPERM_OP(c,t,-1, 0xaaaa0000L); + HPERM_OP(c,t, 8, 0x00ff0000L); + HPERM_OP(c,t,-1, 0xaaaa0000L); + HPERM_OP(d,t,-8, 0xff000000L); + HPERM_OP(d,t, 8, 0x00ff0000L); + HPERM_OP(d,t, 2, 0x33330000L); + d=((d&0x00aa00aaL)<<7L)|((d&0x55005500L)>>7L)|(d&0xaa55aa55L); + d=(d>>8)|((c&0xf0000000L)>>4); + c&=0x0fffffffL; */ + + /* I now do it in 47 simple operations :-) + * Thanks to John Fletcher (john_fletcher@lccmail.ocf.llnl.gov) + * for the inspiration. :-) */ + PERM_OP (d,c,t,4,0x0f0f0f0fL); + HPERM_OP(c,t,-2,0xcccc0000L); + HPERM_OP(d,t,-2,0xcccc0000L); + PERM_OP (d,c,t,1,0x55555555L); + PERM_OP (c,d,t,8,0x00ff00ffL); + PERM_OP (d,c,t,1,0x55555555L); + d= (((d&0x000000ffL)<<16L)| (d&0x0000ff00L) | + ((d&0x00ff0000L)>>16L)|((c&0xf0000000L)>>4L)); + c&=0x0fffffffL; + + for (i=0; i>2L)|(c<<26L)); d=((d>>2L)|(d<<26L)); } + else + { c=((c>>1L)|(c<<27L)); d=((d>>1L)|(d<<27L)); } + c&=0x0fffffffL; + d&=0x0fffffffL; + /* could be a few less shifts but I am to lazy at this + * point in time to investigate */ + s= des_skb[0][ (c )&0x3f ]| + des_skb[1][((c>> 6)&0x03)|((c>> 7L)&0x3c)]| + des_skb[2][((c>>13)&0x0f)|((c>>14L)&0x30)]| + des_skb[3][((c>>20)&0x01)|((c>>21L)&0x06) | + ((c>>22L)&0x38)]; + t= des_skb[4][ (d )&0x3f ]| + des_skb[5][((d>> 7L)&0x03)|((d>> 8L)&0x3c)]| + des_skb[6][ (d>>15L)&0x3f ]| + des_skb[7][((d>>21L)&0x0f)|((d>>22L)&0x30)]; + + /* table contained 0213 4657 */ + *(k++)=((t<<16L)|(s&0x0000ffffL))&0xffffffffL; + s= ((s>>16L)|(t&0xffff0000L)); + + s=(s<<4L)|(s>>28L); + *(k++)=s&0xffffffffL; + } + return(0); + } + +int des_key_sched(key, schedule) +des_cblock (*key); +des_key_schedule schedule; + { + return(des_set_key(key,schedule)); + } diff --git a/bsd/crypto/des/podd.h b/bsd/crypto/des/podd.h new file mode 100644 index 000000000..5093bc6bb --- /dev/null +++ b/bsd/crypto/des/podd.h @@ -0,0 +1,64 @@ +/* crypto/des/podd.h */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +static const unsigned char odd_parity[256]={ + 1, 1, 2, 2, 4, 4, 7, 7, 8, 8, 11, 11, 13, 13, 14, 14, + 16, 16, 19, 19, 21, 21, 22, 22, 25, 25, 26, 26, 28, 28, 31, 31, + 32, 32, 35, 35, 37, 37, 38, 38, 41, 41, 42, 42, 44, 44, 47, 47, + 49, 49, 50, 50, 52, 52, 55, 55, 56, 56, 59, 59, 61, 61, 62, 62, + 64, 64, 67, 67, 69, 69, 70, 70, 73, 73, 74, 74, 76, 76, 79, 79, + 81, 81, 82, 82, 84, 84, 87, 87, 88, 88, 91, 91, 93, 93, 94, 94, + 97, 97, 98, 98,100,100,103,103,104,104,107,107,109,109,110,110, +112,112,115,115,117,117,118,118,121,121,122,122,124,124,127,127, +128,128,131,131,133,133,134,134,137,137,138,138,140,140,143,143, +145,145,146,146,148,148,151,151,152,152,155,155,157,157,158,158, +161,161,162,162,164,164,167,167,168,168,171,171,173,173,174,174, +176,176,179,179,181,181,182,182,185,185,186,186,188,188,191,191, +193,193,194,194,196,196,199,199,200,200,203,203,205,205,206,206, +208,208,211,211,213,213,214,214,217,217,218,218,220,220,223,223, +224,224,227,227,229,229,230,230,233,233,234,234,236,236,239,239, +241,241,242,242,244,244,247,247,248,248,251,251,253,253,254,254}; diff --git a/bsd/crypto/des/sk.h b/bsd/crypto/des/sk.h new file mode 100644 index 000000000..db354ce9d --- /dev/null +++ b/bsd/crypto/des/sk.h @@ -0,0 +1,193 @@ +/* crypto/des/sk.h */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +static const DES_LONG des_skb[8][64]={ +{ +/* for C bits (numbered as per FIPS 46) 1 2 3 4 5 6 */ +0x00000000L,0x00000010L,0x20000000L,0x20000010L, +0x00010000L,0x00010010L,0x20010000L,0x20010010L, +0x00000800L,0x00000810L,0x20000800L,0x20000810L, +0x00010800L,0x00010810L,0x20010800L,0x20010810L, +0x00000020L,0x00000030L,0x20000020L,0x20000030L, +0x00010020L,0x00010030L,0x20010020L,0x20010030L, +0x00000820L,0x00000830L,0x20000820L,0x20000830L, +0x00010820L,0x00010830L,0x20010820L,0x20010830L, +0x00080000L,0x00080010L,0x20080000L,0x20080010L, +0x00090000L,0x00090010L,0x20090000L,0x20090010L, +0x00080800L,0x00080810L,0x20080800L,0x20080810L, +0x00090800L,0x00090810L,0x20090800L,0x20090810L, +0x00080020L,0x00080030L,0x20080020L,0x20080030L, +0x00090020L,0x00090030L,0x20090020L,0x20090030L, +0x00080820L,0x00080830L,0x20080820L,0x20080830L, +0x00090820L,0x00090830L,0x20090820L,0x20090830L, +},{ +/* for C bits (numbered as per FIPS 46) 7 8 10 11 12 13 */ +0x00000000L,0x02000000L,0x00002000L,0x02002000L, +0x00200000L,0x02200000L,0x00202000L,0x02202000L, +0x00000004L,0x02000004L,0x00002004L,0x02002004L, +0x00200004L,0x02200004L,0x00202004L,0x02202004L, +0x00000400L,0x02000400L,0x00002400L,0x02002400L, +0x00200400L,0x02200400L,0x00202400L,0x02202400L, +0x00000404L,0x02000404L,0x00002404L,0x02002404L, +0x00200404L,0x02200404L,0x00202404L,0x02202404L, +0x10000000L,0x12000000L,0x10002000L,0x12002000L, +0x10200000L,0x12200000L,0x10202000L,0x12202000L, +0x10000004L,0x12000004L,0x10002004L,0x12002004L, +0x10200004L,0x12200004L,0x10202004L,0x12202004L, +0x10000400L,0x12000400L,0x10002400L,0x12002400L, +0x10200400L,0x12200400L,0x10202400L,0x12202400L, +0x10000404L,0x12000404L,0x10002404L,0x12002404L, +0x10200404L,0x12200404L,0x10202404L,0x12202404L, +},{ +/* for C bits (numbered as per FIPS 46) 14 15 16 17 19 20 */ +0x00000000L,0x00000001L,0x00040000L,0x00040001L, +0x01000000L,0x01000001L,0x01040000L,0x01040001L, +0x00000002L,0x00000003L,0x00040002L,0x00040003L, +0x01000002L,0x01000003L,0x01040002L,0x01040003L, +0x00000200L,0x00000201L,0x00040200L,0x00040201L, +0x01000200L,0x01000201L,0x01040200L,0x01040201L, +0x00000202L,0x00000203L,0x00040202L,0x00040203L, +0x01000202L,0x01000203L,0x01040202L,0x01040203L, +0x08000000L,0x08000001L,0x08040000L,0x08040001L, +0x09000000L,0x09000001L,0x09040000L,0x09040001L, +0x08000002L,0x08000003L,0x08040002L,0x08040003L, +0x09000002L,0x09000003L,0x09040002L,0x09040003L, +0x08000200L,0x08000201L,0x08040200L,0x08040201L, +0x09000200L,0x09000201L,0x09040200L,0x09040201L, +0x08000202L,0x08000203L,0x08040202L,0x08040203L, +0x09000202L,0x09000203L,0x09040202L,0x09040203L, +},{ +/* for C bits (numbered as per FIPS 46) 21 23 24 26 27 28 */ +0x00000000L,0x00100000L,0x00000100L,0x00100100L, +0x00000008L,0x00100008L,0x00000108L,0x00100108L, +0x00001000L,0x00101000L,0x00001100L,0x00101100L, +0x00001008L,0x00101008L,0x00001108L,0x00101108L, +0x04000000L,0x04100000L,0x04000100L,0x04100100L, +0x04000008L,0x04100008L,0x04000108L,0x04100108L, +0x04001000L,0x04101000L,0x04001100L,0x04101100L, +0x04001008L,0x04101008L,0x04001108L,0x04101108L, +0x00020000L,0x00120000L,0x00020100L,0x00120100L, +0x00020008L,0x00120008L,0x00020108L,0x00120108L, +0x00021000L,0x00121000L,0x00021100L,0x00121100L, +0x00021008L,0x00121008L,0x00021108L,0x00121108L, +0x04020000L,0x04120000L,0x04020100L,0x04120100L, +0x04020008L,0x04120008L,0x04020108L,0x04120108L, +0x04021000L,0x04121000L,0x04021100L,0x04121100L, +0x04021008L,0x04121008L,0x04021108L,0x04121108L, +},{ +/* for D bits (numbered as per FIPS 46) 1 2 3 4 5 6 */ +0x00000000L,0x10000000L,0x00010000L,0x10010000L, +0x00000004L,0x10000004L,0x00010004L,0x10010004L, +0x20000000L,0x30000000L,0x20010000L,0x30010000L, +0x20000004L,0x30000004L,0x20010004L,0x30010004L, +0x00100000L,0x10100000L,0x00110000L,0x10110000L, +0x00100004L,0x10100004L,0x00110004L,0x10110004L, +0x20100000L,0x30100000L,0x20110000L,0x30110000L, +0x20100004L,0x30100004L,0x20110004L,0x30110004L, +0x00001000L,0x10001000L,0x00011000L,0x10011000L, +0x00001004L,0x10001004L,0x00011004L,0x10011004L, +0x20001000L,0x30001000L,0x20011000L,0x30011000L, +0x20001004L,0x30001004L,0x20011004L,0x30011004L, +0x00101000L,0x10101000L,0x00111000L,0x10111000L, +0x00101004L,0x10101004L,0x00111004L,0x10111004L, +0x20101000L,0x30101000L,0x20111000L,0x30111000L, +0x20101004L,0x30101004L,0x20111004L,0x30111004L, +},{ +/* for D bits (numbered as per FIPS 46) 8 9 11 12 13 14 */ +0x00000000L,0x08000000L,0x00000008L,0x08000008L, +0x00000400L,0x08000400L,0x00000408L,0x08000408L, +0x00020000L,0x08020000L,0x00020008L,0x08020008L, +0x00020400L,0x08020400L,0x00020408L,0x08020408L, +0x00000001L,0x08000001L,0x00000009L,0x08000009L, +0x00000401L,0x08000401L,0x00000409L,0x08000409L, +0x00020001L,0x08020001L,0x00020009L,0x08020009L, +0x00020401L,0x08020401L,0x00020409L,0x08020409L, +0x02000000L,0x0A000000L,0x02000008L,0x0A000008L, +0x02000400L,0x0A000400L,0x02000408L,0x0A000408L, +0x02020000L,0x0A020000L,0x02020008L,0x0A020008L, +0x02020400L,0x0A020400L,0x02020408L,0x0A020408L, +0x02000001L,0x0A000001L,0x02000009L,0x0A000009L, +0x02000401L,0x0A000401L,0x02000409L,0x0A000409L, +0x02020001L,0x0A020001L,0x02020009L,0x0A020009L, +0x02020401L,0x0A020401L,0x02020409L,0x0A020409L, +},{ +/* for D bits (numbered as per FIPS 46) 16 17 18 19 20 21 */ +0x00000000L,0x00000100L,0x00080000L,0x00080100L, +0x01000000L,0x01000100L,0x01080000L,0x01080100L, +0x00000010L,0x00000110L,0x00080010L,0x00080110L, +0x01000010L,0x01000110L,0x01080010L,0x01080110L, +0x00200000L,0x00200100L,0x00280000L,0x00280100L, +0x01200000L,0x01200100L,0x01280000L,0x01280100L, +0x00200010L,0x00200110L,0x00280010L,0x00280110L, +0x01200010L,0x01200110L,0x01280010L,0x01280110L, +0x00000200L,0x00000300L,0x00080200L,0x00080300L, +0x01000200L,0x01000300L,0x01080200L,0x01080300L, +0x00000210L,0x00000310L,0x00080210L,0x00080310L, +0x01000210L,0x01000310L,0x01080210L,0x01080310L, +0x00200200L,0x00200300L,0x00280200L,0x00280300L, +0x01200200L,0x01200300L,0x01280200L,0x01280300L, +0x00200210L,0x00200310L,0x00280210L,0x00280310L, +0x01200210L,0x01200310L,0x01280210L,0x01280310L, +},{ +/* for D bits (numbered as per FIPS 46) 22 23 24 25 27 28 */ +0x00000000L,0x04000000L,0x00040000L,0x04040000L, +0x00000002L,0x04000002L,0x00040002L,0x04040002L, +0x00002000L,0x04002000L,0x00042000L,0x04042000L, +0x00002002L,0x04002002L,0x00042002L,0x04042002L, +0x00000020L,0x04000020L,0x00040020L,0x04040020L, +0x00000022L,0x04000022L,0x00040022L,0x04040022L, +0x00002020L,0x04002020L,0x00042020L,0x04042020L, +0x00002022L,0x04002022L,0x00042022L,0x04042022L, +0x00000800L,0x04000800L,0x00040800L,0x04040800L, +0x00000802L,0x04000802L,0x00040802L,0x04040802L, +0x00002800L,0x04002800L,0x00042800L,0x04042800L, +0x00002802L,0x04002802L,0x00042802L,0x04042802L, +0x00000820L,0x04000820L,0x00040820L,0x04040820L, +0x00000822L,0x04000822L,0x00040822L,0x04040822L, +0x00002820L,0x04002820L,0x00042820L,0x04042820L, +0x00002822L,0x04002822L,0x00042822L,0x04042822L, +}}; diff --git a/bsd/crypto/des/spr.h b/bsd/crypto/des/spr.h new file mode 100644 index 000000000..ebe9007f2 --- /dev/null +++ b/bsd/crypto/des/spr.h @@ -0,0 +1,193 @@ +/* crypto/des/spr.h */ +/* Copyright (C) 1995-1996 Eric Young (eay@mincom.oz.au) + * All rights reserved. + * + * This file is part of an SSL implementation written + * by Eric Young (eay@mincom.oz.au). + * The implementation was written so as to conform with Netscapes SSL + * specification. This library and applications are + * FREE FOR COMMERCIAL AND NON-COMMERCIAL USE + * as long as the following conditions are aheared to. + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. If this code is used in a product, + * Eric Young should be given attribution as the author of the parts used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Eric Young (eay@mincom.oz.au) + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +static const DES_LONG des_SPtrans[8][64]={ +{ +/* nibble 0 */ +0x00820200L, 0x00020000L, 0x80800000L, 0x80820200L, +0x00800000L, 0x80020200L, 0x80020000L, 0x80800000L, +0x80020200L, 0x00820200L, 0x00820000L, 0x80000200L, +0x80800200L, 0x00800000L, 0x00000000L, 0x80020000L, +0x00020000L, 0x80000000L, 0x00800200L, 0x00020200L, +0x80820200L, 0x00820000L, 0x80000200L, 0x00800200L, +0x80000000L, 0x00000200L, 0x00020200L, 0x80820000L, +0x00000200L, 0x80800200L, 0x80820000L, 0x00000000L, +0x00000000L, 0x80820200L, 0x00800200L, 0x80020000L, +0x00820200L, 0x00020000L, 0x80000200L, 0x00800200L, +0x80820000L, 0x00000200L, 0x00020200L, 0x80800000L, +0x80020200L, 0x80000000L, 0x80800000L, 0x00820000L, +0x80820200L, 0x00020200L, 0x00820000L, 0x80800200L, +0x00800000L, 0x80000200L, 0x80020000L, 0x00000000L, +0x00020000L, 0x00800000L, 0x80800200L, 0x00820200L, +0x80000000L, 0x80820000L, 0x00000200L, 0x80020200L, +},{ +/* nibble 1 */ +0x10042004L, 0x00000000L, 0x00042000L, 0x10040000L, +0x10000004L, 0x00002004L, 0x10002000L, 0x00042000L, +0x00002000L, 0x10040004L, 0x00000004L, 0x10002000L, +0x00040004L, 0x10042000L, 0x10040000L, 0x00000004L, +0x00040000L, 0x10002004L, 0x10040004L, 0x00002000L, +0x00042004L, 0x10000000L, 0x00000000L, 0x00040004L, +0x10002004L, 0x00042004L, 0x10042000L, 0x10000004L, +0x10000000L, 0x00040000L, 0x00002004L, 0x10042004L, +0x00040004L, 0x10042000L, 0x10002000L, 0x00042004L, +0x10042004L, 0x00040004L, 0x10000004L, 0x00000000L, +0x10000000L, 0x00002004L, 0x00040000L, 0x10040004L, +0x00002000L, 0x10000000L, 0x00042004L, 0x10002004L, +0x10042000L, 0x00002000L, 0x00000000L, 0x10000004L, +0x00000004L, 0x10042004L, 0x00042000L, 0x10040000L, +0x10040004L, 0x00040000L, 0x00002004L, 0x10002000L, +0x10002004L, 0x00000004L, 0x10040000L, 0x00042000L, +},{ +/* nibble 2 */ +0x41000000L, 0x01010040L, 0x00000040L, 0x41000040L, +0x40010000L, 0x01000000L, 0x41000040L, 0x00010040L, +0x01000040L, 0x00010000L, 0x01010000L, 0x40000000L, +0x41010040L, 0x40000040L, 0x40000000L, 0x41010000L, +0x00000000L, 0x40010000L, 0x01010040L, 0x00000040L, +0x40000040L, 0x41010040L, 0x00010000L, 0x41000000L, +0x41010000L, 0x01000040L, 0x40010040L, 0x01010000L, +0x00010040L, 0x00000000L, 0x01000000L, 0x40010040L, +0x01010040L, 0x00000040L, 0x40000000L, 0x00010000L, +0x40000040L, 0x40010000L, 0x01010000L, 0x41000040L, +0x00000000L, 0x01010040L, 0x00010040L, 0x41010000L, +0x40010000L, 0x01000000L, 0x41010040L, 0x40000000L, +0x40010040L, 0x41000000L, 0x01000000L, 0x41010040L, +0x00010000L, 0x01000040L, 0x41000040L, 0x00010040L, +0x01000040L, 0x00000000L, 0x41010000L, 0x40000040L, +0x41000000L, 0x40010040L, 0x00000040L, 0x01010000L, +},{ +/* nibble 3 */ +0x00100402L, 0x04000400L, 0x00000002L, 0x04100402L, +0x00000000L, 0x04100000L, 0x04000402L, 0x00100002L, +0x04100400L, 0x04000002L, 0x04000000L, 0x00000402L, +0x04000002L, 0x00100402L, 0x00100000L, 0x04000000L, +0x04100002L, 0x00100400L, 0x00000400L, 0x00000002L, +0x00100400L, 0x04000402L, 0x04100000L, 0x00000400L, +0x00000402L, 0x00000000L, 0x00100002L, 0x04100400L, +0x04000400L, 0x04100002L, 0x04100402L, 0x00100000L, +0x04100002L, 0x00000402L, 0x00100000L, 0x04000002L, +0x00100400L, 0x04000400L, 0x00000002L, 0x04100000L, +0x04000402L, 0x00000000L, 0x00000400L, 0x00100002L, +0x00000000L, 0x04100002L, 0x04100400L, 0x00000400L, +0x04000000L, 0x04100402L, 0x00100402L, 0x00100000L, +0x04100402L, 0x00000002L, 0x04000400L, 0x00100402L, +0x00100002L, 0x00100400L, 0x04100000L, 0x04000402L, +0x00000402L, 0x04000000L, 0x04000002L, 0x04100400L, +},{ +/* nibble 4 */ +0x02000000L, 0x00004000L, 0x00000100L, 0x02004108L, +0x02004008L, 0x02000100L, 0x00004108L, 0x02004000L, +0x00004000L, 0x00000008L, 0x02000008L, 0x00004100L, +0x02000108L, 0x02004008L, 0x02004100L, 0x00000000L, +0x00004100L, 0x02000000L, 0x00004008L, 0x00000108L, +0x02000100L, 0x00004108L, 0x00000000L, 0x02000008L, +0x00000008L, 0x02000108L, 0x02004108L, 0x00004008L, +0x02004000L, 0x00000100L, 0x00000108L, 0x02004100L, +0x02004100L, 0x02000108L, 0x00004008L, 0x02004000L, +0x00004000L, 0x00000008L, 0x02000008L, 0x02000100L, +0x02000000L, 0x00004100L, 0x02004108L, 0x00000000L, +0x00004108L, 0x02000000L, 0x00000100L, 0x00004008L, +0x02000108L, 0x00000100L, 0x00000000L, 0x02004108L, +0x02004008L, 0x02004100L, 0x00000108L, 0x00004000L, +0x00004100L, 0x02004008L, 0x02000100L, 0x00000108L, +0x00000008L, 0x00004108L, 0x02004000L, 0x02000008L, +},{ +/* nibble 5 */ +0x20000010L, 0x00080010L, 0x00000000L, 0x20080800L, +0x00080010L, 0x00000800L, 0x20000810L, 0x00080000L, +0x00000810L, 0x20080810L, 0x00080800L, 0x20000000L, +0x20000800L, 0x20000010L, 0x20080000L, 0x00080810L, +0x00080000L, 0x20000810L, 0x20080010L, 0x00000000L, +0x00000800L, 0x00000010L, 0x20080800L, 0x20080010L, +0x20080810L, 0x20080000L, 0x20000000L, 0x00000810L, +0x00000010L, 0x00080800L, 0x00080810L, 0x20000800L, +0x00000810L, 0x20000000L, 0x20000800L, 0x00080810L, +0x20080800L, 0x00080010L, 0x00000000L, 0x20000800L, +0x20000000L, 0x00000800L, 0x20080010L, 0x00080000L, +0x00080010L, 0x20080810L, 0x00080800L, 0x00000010L, +0x20080810L, 0x00080800L, 0x00080000L, 0x20000810L, +0x20000010L, 0x20080000L, 0x00080810L, 0x00000000L, +0x00000800L, 0x20000010L, 0x20000810L, 0x20080800L, +0x20080000L, 0x00000810L, 0x00000010L, 0x20080010L, +},{ +/* nibble 6 */ +0x00001000L, 0x00000080L, 0x00400080L, 0x00400001L, +0x00401081L, 0x00001001L, 0x00001080L, 0x00000000L, +0x00400000L, 0x00400081L, 0x00000081L, 0x00401000L, +0x00000001L, 0x00401080L, 0x00401000L, 0x00000081L, +0x00400081L, 0x00001000L, 0x00001001L, 0x00401081L, +0x00000000L, 0x00400080L, 0x00400001L, 0x00001080L, +0x00401001L, 0x00001081L, 0x00401080L, 0x00000001L, +0x00001081L, 0x00401001L, 0x00000080L, 0x00400000L, +0x00001081L, 0x00401000L, 0x00401001L, 0x00000081L, +0x00001000L, 0x00000080L, 0x00400000L, 0x00401001L, +0x00400081L, 0x00001081L, 0x00001080L, 0x00000000L, +0x00000080L, 0x00400001L, 0x00000001L, 0x00400080L, +0x00000000L, 0x00400081L, 0x00400080L, 0x00001080L, +0x00000081L, 0x00001000L, 0x00401081L, 0x00400000L, +0x00401080L, 0x00000001L, 0x00001001L, 0x00401081L, +0x00400001L, 0x00401080L, 0x00401000L, 0x00001001L, +},{ +/* nibble 7 */ +0x08200020L, 0x08208000L, 0x00008020L, 0x00000000L, +0x08008000L, 0x00200020L, 0x08200000L, 0x08208020L, +0x00000020L, 0x08000000L, 0x00208000L, 0x00008020L, +0x00208020L, 0x08008020L, 0x08000020L, 0x08200000L, +0x00008000L, 0x00208020L, 0x00200020L, 0x08008000L, +0x08208020L, 0x08000020L, 0x00000000L, 0x00208000L, +0x08000000L, 0x00200000L, 0x08008020L, 0x08200020L, +0x00200000L, 0x00008000L, 0x08208000L, 0x00000020L, +0x00200000L, 0x00008000L, 0x08000020L, 0x08208020L, +0x00008020L, 0x08000000L, 0x00000000L, 0x00208000L, +0x08200020L, 0x08008020L, 0x08008000L, 0x00200020L, +0x08208000L, 0x00000020L, 0x00200020L, 0x08008000L, +0x08208020L, 0x00200000L, 0x08200000L, 0x08000020L, +0x00208000L, 0x00008020L, 0x08008020L, 0x08200000L, +0x00000020L, 0x08208000L, 0x00208020L, 0x00000000L, +0x08000000L, 0x08200020L, 0x00008000L, 0x00208020L, +}}; diff --git a/bsd/crypto/rc5/Makefile b/bsd/crypto/rc5/Makefile new file mode 100644 index 000000000..f901bbe6a --- /dev/null +++ b/bsd/crypto/rc5/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + rc5.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = crypto + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = crypto + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/crypto/rc5/rc5.c b/bsd/crypto/rc5/rc5.c new file mode 100644 index 000000000..10eb5fe17 --- /dev/null +++ b/bsd/crypto/rc5/rc5.c @@ -0,0 +1,216 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +#include + + +void +set_rc5_expandkey(e_key, key, keylen, rounds) + RC5_WORD *e_key; + u_int8_t *key; + size_t keylen; + int rounds; +{ + int i, j, k, LL, t, T; + RC5_WORD L[256/WW]; + RC5_WORD A, B; + + LL = (keylen + WW - 1) / WW; + + bzero(L, sizeof(RC5_WORD)*LL); + + for (i = 0; i < keylen; i++) { + t = (key[i] & 0xff) << (8*(i%4)); + L[i/WW] = L[i/WW] + t; + } + + T = 2 * (rounds + 1); + e_key[0] = Pw; + for (i = 1; i < T; i++) + e_key[i] = e_key[i-1] + Qw; + + i = j = 0; + A = B = 0; + if (LL > T) + k = 3 * LL; + else + k = 3 * T; + + for (; k > 0; k--) { + A = ROTL(e_key[i]+A+B, 3, W); + e_key[i] = A; + B = ROTL(L[j]+A+B, A+B, W); + L[j] = B; + + i = (i + 1) % T; + j = (j + 1) % LL; + } +} + + +/* + * + */ +void +rc5_encrypt_round16(out, in, e_key) + u_int8_t *out; + const u_int8_t *in; + const RC5_WORD *e_key; +{ + RC5_WORD A, B; + const RC5_WORD *e_keyA, *e_keyB; + + A = in[0] & 0xff; + A += (in[1] & 0xff) << 8; + A += (in[2] & 0xff) << 16; + A += (in[3] & 0xff) << 24; + B = in[4] & 0xff; + B += (in[5] & 0xff) << 8; + B += (in[6] & 0xff) << 16; + B += (in[7] & 0xff) << 24; + + e_keyA = e_key; + e_keyB = e_key + 1; + + A += *e_keyA; e_keyA += 2; + B += *e_keyB; e_keyB += 2; + + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; /* round 4 */ + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; /* round 8 */ + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; /* round 12 */ + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; + A = ROTL(A^B, B, W) + *e_keyA; e_keyA += 2; + B = ROTL(B^A, A, W) + *e_keyB; e_keyB += 2; /* round 16 */ + + out[0] = A & 0xff; + out[1] = (A >> 8) & 0xff; + out[2] = (A >> 16) & 0xff; + out[3] = (A >> 24) & 0xff; + out[4] = B & 0xff; + out[5] = (B >> 8) & 0xff; + out[6] = (B >> 16) & 0xff; + out[7] = (B >> 24) & 0xff; +} + + +/* + * + */ +void +rc5_decrypt_round16(out, in, e_key) + u_int8_t *out; + const u_int8_t *in; + const RC5_WORD *e_key; +{ + RC5_WORD A, B; + const RC5_WORD *e_keyA, *e_keyB; + + A = in[0] & 0xff; + A += (in[1] & 0xff) << 8; + A += (in[2] & 0xff) << 16; + A += (in[3] & 0xff) << 24; + B = in[4] & 0xff; + B += (in[5] & 0xff) << 8; + B += (in[6] & 0xff) << 16; + B += (in[7] & 0xff) << 24; + + e_keyA = e_key + 2*16; + e_keyB = e_key + 2*16 + 1; + + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; /* round 4 */ + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; /* round 8 */ + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; /* round 12 */ + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; + B = ROTR(B-*e_keyB, A, W) ^ A; e_keyB -= 2; + A = ROTR(A-*e_keyA, B, W) ^ B; e_keyA -= 2; /* round 16 */ + + B = B - *e_keyB; + A = A - *e_keyA; + + out[0] = A & 0xff; + out[1] = (A >> 8) & 0xff; + out[2] = (A >> 16) & 0xff; + out[3] = (A >> 24) & 0xff; + out[4] = B & 0xff; + out[5] = (B >> 8) & 0xff; + out[6] = (B >> 16) & 0xff; + out[7] = (B >> 24) & 0xff; +} + diff --git a/bsd/crypto/rc5/rc5.h b/bsd/crypto/rc5/rc5.h new file mode 100644 index 000000000..bc6d80bba --- /dev/null +++ b/bsd/crypto/rc5/rc5.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _RFC2040_RC5_H_ +#define _RFC2040_RC5_H_ + +#include +#include +#include + +/* + * if RC5_WORD change, W also may be changed. + */ +typedef u_int32_t RC5_WORD; + +#define W (32) +#define WW (W / 8) +#define ROT_MASK (W - 1) +#define BB ((2 * W) / 8) + +#define SHLL(x, s) ((RC5_WORD)((x) << ((s)&ROT_MASK))) +#define SHLR(x, s, w) ((RC5_WORD)((x) >> ((w)-((s)&ROT_MASK)))) +#define SHRL(x, s, w) ((RC5_WORD)((x) << ((w)-((s)&ROT_MASK)))) +#define SHRR(x, s) ((RC5_WORD)((x) >> ((s)&ROT_MASK))) + +#define ROTL(x, s, w) ((RC5_WORD)(SHLL((x), (s))|SHLR((x), (s), (w)))) +#define ROTR(x, s, w) ((RC5_WORD)(SHRL((x), (s), (w))|SHRR((x), (s)))) + +#define P16 0xb7e1 +#define Q16 0x9e37 +#define P32 0xb7e15163 +#define Q32 0x9e3779b9 +#define P64 0xb7e151628aed2a6b +#define Q64 0x9e3779b97f4a7c15 + +#if W == 16 +#define Pw P16 +#define Qw Q16 +#elif W == 32 +#define Pw P32 +#define Qw Q32 +#elif W == 64 +#define Pw P64 +#define Qw Q64 +#endif + +#define RC5_ENCRYPT 1 +#define RC5_DECRYPT 0 + +extern void set_rc5_expandkey __P((RC5_WORD *, u_int8_t *, size_t, int)); +extern void rc5_encrypt_round16 __P((u_int8_t *, const u_int8_t *, + const RC5_WORD *)); +extern void rc5_decrypt_round16 __P((u_int8_t *, const u_int8_t *, + const RC5_WORD *)); +extern void rc5_cbc_process __P((struct mbuf *, size_t, size_t, RC5_WORD *, + u_int8_t *, int)); + +#endif diff --git a/bsd/crypto/rc5/rc5_cbc.c b/bsd/crypto/rc5/rc5_cbc.c new file mode 100644 index 000000000..c3d094d24 --- /dev/null +++ b/bsd/crypto/rc5/rc5_cbc.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * based on sys/crypto/des/des_cbc.c, rewrote by Tomomi Suzuki + */ +#include + + +void +rc5_cbc_process(m0, skip, length, e_key, iv, mode) + struct mbuf *m0; + size_t skip; + size_t length; + RC5_WORD *e_key; + u_int8_t *iv; + int mode; +{ + u_int8_t inbuf[8], outbuf[8]; + struct mbuf *m; + size_t off; + + /* sanity check */ + if (m0->m_pkthdr.len < skip) { + printf("rc5_cbc_process: mbuf length < skip\n"); + return; + } + if (m0->m_pkthdr.len < length) { + printf("rc5_cbc_process: mbuf length < encrypt length\n"); + return; + } + if (m0->m_pkthdr.len < skip + length) { + printf("rc5_cbc_process: mbuf length < " + "skip + encrypt length\n"); + return; + } + if (length % 8) { + printf("rc5_cbc_process: length(%lu)is not multipleof 8\n", + (u_long)length); + return; + } + + m = m0; + off = 0; + + /* skip over the header */ + while (skip) { + if (!m) + panic("rc5_cbc_process: mbuf chain?\n"); + if (m->m_len <= skip) { + skip -= m->m_len; + m = m->m_next; + off = 0; + } else { + off = skip; + skip = 0; + } + } + + /* copy iv into outbuf for XOR (encrypt) */ + bcopy(iv, outbuf, 8); + + /* + * encrypt/decrypt packet + */ + while (length > 0) { + int i; + + if (!m) + panic("rc5_cbc_process: mbuf chain?\n"); + + /* + * copy the source into input buffer. + * don't update off or m, since we need to use them + * later. + */ + if (off + 8 <= m->m_len) + bcopy(mtod(m, u_int8_t *) + off, &inbuf[0], 8); + else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *in; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + in = &inbuf[0]; + while (in - &inbuf[0] < 8) { + if (!p) { + panic("rc5_cbc_process: " + "mbuf chain?\n"); + } + *in++ = *p++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && !n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + } + + /* encrypt/decrypt */ + switch (mode) { + case RC5_ENCRYPT: + /* XOR */ + for (i = 0; i < 8; i++) + inbuf[i] ^= outbuf[i]; + + /* encrypt */ + rc5_encrypt_round16(outbuf, inbuf, e_key); + break; + + case RC5_DECRYPT: + /* decrypt */ + rc5_decrypt_round16(outbuf, inbuf, e_key); + + /* XOR */ + for (i = 0; i < 8; i++) + outbuf[i] ^= iv[i]; + + /* copy inbuf into iv for next XOR */ + bcopy(inbuf, iv, 8); + break; + } + + /* + * copy the output buffer into the result. + * need to update off and m. + */ + if (off + 8 < m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + off += 8; + } else if (off + 8 == m->m_len) { + bcopy(&outbuf[0], mtod(m, u_int8_t *) + off, 8); + do { + m = m->m_next; + } while (m && !m->m_len); + off = 0; + } else { + struct mbuf *n; + size_t noff; + u_int8_t *p; + u_int8_t *out; + + n = m; + noff = off; + p = mtod(n, u_int8_t *) + noff; + + out = &outbuf[0]; + while (out - &outbuf[0] < 8) { + if (!p) { + panic("rc5_cbc_process: " + "mbuf chain?\n"); + } + *p++ = *out++; + noff++; + if (noff < n->m_len) + continue; + do { + n = n->m_next; + } while (n && !n->m_len); + noff = 0; + if (n) + p = mtod(n, u_int8_t *) + noff; + else + p = NULL; + } + + m = n; + off = noff; + } + + length -= 8; + } +} + diff --git a/bsd/crypto/sha1.c b/bsd/crypto/sha1.c new file mode 100644 index 000000000..fd74b3c8a --- /dev/null +++ b/bsd/crypto/sha1.c @@ -0,0 +1,273 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1) + * based on: http://csrc.nist.gov/fips/fip180-1.txt + * implemented by Jun-ichiro itojun Itoh + */ + +#include +#include +#include +#include + +#include + +/* sanity check */ +#if BYTE_ORDER != BIG_ENDIAN +# if BYTE_ORDER != LITTLE_ENDIAN +# define unsupported 1 +# endif +#endif + +#ifndef unsupported + +/* constant table */ +static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 }; +#define K(t) _K[(t) / 20] + +#define F0(b, c, d) (((b) & (c)) | ((~(b)) & (d))) +#define F1(b, c, d) (((b) ^ (c)) ^ (d)) +#define F2(b, c, d) (((b) & (c)) | ((b) & (d)) | ((c) & (d))) +#define F3(b, c, d) (((b) ^ (c)) ^ (d)) + +#define S(n, x) (((x) << (n)) | ((x) >> (32 - n))) + +#define H(n) (ctxt->h.b32[(n)]) +#define COUNT (ctxt->count) +#define BCOUNT (ctxt->c.b64[0] / 8) +#define W(n) (ctxt->m.b32[(n)]) + +#define PUTBYTE(x) { \ + ctxt->m.b8[(COUNT % 64)] = (x); \ + COUNT++; \ + COUNT %= 64; \ + ctxt->c.b64[0] += 8; \ + if (COUNT % 64 == 0) \ + sha1_step(ctxt); \ + } + +#define PUTPAD(x) { \ + ctxt->m.b8[(COUNT % 64)] = (x); \ + COUNT++; \ + COUNT %= 64; \ + if (COUNT % 64 == 0) \ + sha1_step(ctxt); \ + } + +static void sha1_step __P((struct sha1_ctxt *)); + +static void +sha1_step(ctxt) + struct sha1_ctxt *ctxt; +{ + u_int32_t a, b, c, d, e; + size_t t, s; + u_int32_t tmp; + +#if BYTE_ORDER == LITTLE_ENDIAN + struct sha1_ctxt tctxt; + bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64); + ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2]; + ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0]; + ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6]; + ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4]; + ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10]; + ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8]; + ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14]; + ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12]; + ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18]; + ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16]; + ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22]; + ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20]; + ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26]; + ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24]; + ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30]; + ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28]; + ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34]; + ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32]; + ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38]; + ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36]; + ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42]; + ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40]; + ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46]; + ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44]; + ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50]; + ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48]; + ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54]; + ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52]; + ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58]; + ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56]; + ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62]; + ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60]; +#endif + + a = H(0); b = H(1); c = H(2); d = H(3); e = H(4); + + for (t = 0; t < 20; t++) { + s = t & 0x0f; + if (t >= 16) { + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); + } + tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t); + e = d; d = c; c = S(30, b); b = a; a = tmp; + } + for (t = 20; t < 40; t++) { + s = t & 0x0f; + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); + tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t); + e = d; d = c; c = S(30, b); b = a; a = tmp; + } + for (t = 40; t < 60; t++) { + s = t & 0x0f; + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); + tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t); + e = d; d = c; c = S(30, b); b = a; a = tmp; + } + for (t = 60; t < 80; t++) { + s = t & 0x0f; + W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s)); + tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t); + e = d; d = c; c = S(30, b); b = a; a = tmp; + } + + H(0) = H(0) + a; + H(1) = H(1) + b; + H(2) = H(2) + c; + H(3) = H(3) + d; + H(4) = H(4) + e; + + bzero(&ctxt->m.b8[0], 64); +} + +/*------------------------------------------------------------*/ + +void +sha1_init(ctxt) + struct sha1_ctxt *ctxt; +{ + bzero(ctxt, sizeof(struct sha1_ctxt)); + H(0) = 0x67452301; + H(1) = 0xefcdab89; + H(2) = 0x98badcfe; + H(3) = 0x10325476; + H(4) = 0xc3d2e1f0; +} + +void +sha1_pad(ctxt) + struct sha1_ctxt *ctxt; +{ + size_t padlen; /*pad length in bytes*/ + size_t padstart; + + PUTPAD(0x80); + + padstart = COUNT % 64; + padlen = 64 - padstart; + if (padlen < 8) { + bzero(&ctxt->m.b8[padstart], padlen); + COUNT += padlen; + COUNT %= 64; + sha1_step(ctxt); + padstart = COUNT % 64; /* should be 0 */ + padlen = 64 - padstart; /* should be 64 */ + } + bzero(&ctxt->m.b8[padstart], padlen - 8); + COUNT += (padlen - 8); + COUNT %= 64; +#if BYTE_ORDER == BIG_ENDIAN + PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]); + PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]); + PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]); + PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]); +#else + PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]); + PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]); + PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]); + PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]); +#endif +} + +void +sha1_loop(ctxt, input0, len) + struct sha1_ctxt *ctxt; + const caddr_t input0; + size_t len; +{ + const u_int8_t *input; + size_t gaplen; + size_t gapstart; + size_t off; + size_t copysiz; + + input = (const u_int8_t *)input0; + off = 0; + + while (off < len) { + gapstart = COUNT % 64; + gaplen = 64 - gapstart; + + copysiz = (gaplen < len - off) ? gaplen : len - off; + bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz); + COUNT += copysiz; + COUNT %= 64; + ctxt->c.b64[0] += copysiz * 8; + if (COUNT % 64 == 0) + sha1_step(ctxt); + off += copysiz; + } +} + +void +sha1_result(ctxt, digest0) + struct sha1_ctxt *ctxt; + caddr_t digest0; +{ + u_int8_t *digest; + + digest = (u_int8_t *)digest0; + sha1_pad(ctxt); +#if BYTE_ORDER == BIG_ENDIAN + bcopy(&ctxt->h.b8[0], digest, 20); +#else + digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2]; + digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0]; + digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6]; + digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4]; + digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10]; + digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8]; + digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14]; + digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12]; + digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18]; + digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16]; +#endif +} + +#endif /*unsupported*/ diff --git a/bsd/crypto/sha1.h b/bsd/crypto/sha1.h new file mode 100644 index 000000000..430792443 --- /dev/null +++ b/bsd/crypto/sha1.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * FIPS pub 180-1: Secure Hash Algorithm (SHA-1) + * based on: http://csrc.nist.gov/fips/fip180-1.txt + * implemented by Jun-ichiro itojun Itoh + */ + +#ifndef _NETINET6_SHA1_H_ +#define _NETINET6_SHA1_H_ + +struct sha1_ctxt { + union { + u_int8_t b8[20]; + u_int32_t b32[5]; + } h; + union { + u_int8_t b8[8]; + u_int64_t b64[1]; + } c; + union { + u_int8_t b8[64]; + u_int32_t b32[16]; + } m; + u_int8_t count; +}; + +#if defined(KERNEL) || defined(_KERNEL) +extern void sha1_init __P((struct sha1_ctxt *)); +extern void sha1_pad __P((struct sha1_ctxt *)); +extern void sha1_loop __P((struct sha1_ctxt *, const caddr_t, size_t)); +extern void sha1_result __P((struct sha1_ctxt *, caddr_t)); + +/* compatibilty with other SHA1 source codes */ +typedef struct sha1_ctxt SHA1_CTX; +#define SHA1Init(x) sha1_init((x)) +#define SHA1Update(x, y, z) sha1_loop((x), (y), (z)) +#define SHA1Final(x, y) sha1_result((y), (x)) +#endif + +#define SHA1_RESULTLEN (160/8) + +#endif /*_NETINET6_SHA1_H_*/ diff --git a/bsd/dev/Makefile b/bsd/dev/Makefile new file mode 100644 index 000000000..8e8cf9321 --- /dev/null +++ b/bsd/dev/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + disk.h disk_label.h kmreg_com.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = dev + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = dev + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/dev/busvar.h b/bsd/dev/busvar.h new file mode 100644 index 000000000..e6508ebed --- /dev/null +++ b/bsd/dev/busvar.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987 Next, Inc. + * + * HISTORY + * 23-Jan-93 Doug Mitchell at NeXT + * Broke out machine-independent portion. + */ + +#ifdef DRIVER_PRIVATE + +#ifndef _BUSVAR_ +#define _BUSVAR_ + +/* pseudo device initialization routine support */ +struct pseudo_init { + int ps_count; + int (*ps_func)(); +}; +extern struct pseudo_init pseudo_inits[]; + +#endif /* _BUSVAR_ */ + +#endif /* DRIVER_PRIVATE */ diff --git a/bsd/dev/disk.h b/bsd/dev/disk.h new file mode 100644 index 000000000..ef8b7504e --- /dev/null +++ b/bsd/dev/disk.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)disk.h 1.0 08/29/87 (c) 1987 NeXT */ + +/* + * HISTORY + * 28-Mar-92 Doug Mitchell + * Moved disk_label struct to . + * + * 22-May-91 Gregg Kellogg (gk) at NeXT + * Split out public interface. + * + * 20-Jul-90 Doug Mitchell + * Added DKIOCSFORMAT, DKIOCGFORMAT + * + * 16-Apr-90 Doug Mitchell at NeXT + * Added DKIOCPANELPRT. + * + * 25-Mar-90 John Seamons (jks) at NeXT + * Removed obsolete DKIOCNOTIFY and DKIOCINSERT. + * + * 23-Mar-90 Doug Mitchell + * Added DKIOCEJECT. + * + * 14-Feb-90 Doug Mitchell at NeXT + * Added DKIOCMNOTIFY. + * + * 16-Mar-88 John Seamons (jks) at NeXT + * Cleaned up to support standard disk label definitions. + * + * 24-Feb-88 Mike DeMoney (mike) at NeXT + * Added defines for dl_bootfile and dl_boot0_blkno. + * Reduced NBAD to allow for these entries in disktab. + * + * 29-Aug-87 John Seamons (jks) at NeXT + * Created. + * + */ + +#ifndef _BSD_DEV_DISK_ +#define _BSD_DEV_DISK_ + +#include +#include +#include +#include +#include +#include + +#define DR_CMDSIZE 32 +#define DR_ERRSIZE 32 + +struct disk_req { + int dr_bcount; /* byte count for data transfers */ + caddr_t dr_addr; /* memory addr for data transfers */ + struct timeval dr_exec_time; /* execution time of operation */ + + /* + * interpretation of cmdblk and errblk is driver specific. + */ + char dr_cmdblk[DR_CMDSIZE]; + char dr_errblk[DR_ERRSIZE]; +}; + +struct sdc_wire { + vm_offset_t start, end; + boolean_t new_pageable; +}; + + +#define BAD_BLK_OFF 4 /* offset of bad blk tbl from label */ +#define NBAD_BLK (12 * 1024 / sizeof (int)) + +struct bad_block { /* bad block table, sized to be 12KB */ + int bad_blk[NBAD_BLK]; +}; + +/* + * sector bitmap states (2 bits per sector) + */ +#define SB_UNTESTED 0 /* must be zero */ +#define SB_BAD 1 +#define SB_WRITTEN 2 +#define SB_ERASED 3 + +struct drive_info { /* info about drive hardware */ + char di_name[MAXDNMLEN]; /* drive type name */ + int di_label_blkno[NLABELS];/* label loc'ns in DEVICE SECTORS */ + int di_devblklen; /* device sector size */ + int di_maxbcount; /* max bytes per transfer request */ +}; + +#define DS_STATSIZE 32 + +struct disk_stats { + int s_ecccnt; /* avg ECC corrections per sector */ + int s_maxecc; /* max ECC corrections observed */ + + /* + * interpretation of s_stats is driver specific + */ + char s_stats[DS_STATSIZE]; +}; + +struct drive_location { + char location[ 128 ]; +}; + +#define DKIOCGLABEL _IOR('d', 0,struct disk_label) /* read label */ +#define DKIOCSLABEL _IOW('d', 1,struct disk_label) /* write label */ +#define DKIOCGBITMAP _IO('d', 2) /* read bitmap */ +#define DKIOCSBITMAP _IO('d', 3) /* write bitmap */ +#define DKIOCREQ _IOWR('d', 4, struct disk_req) /* cmd request */ +#define DKIOCINFO _IOR('d', 5, struct drive_info) /* get drive info */ +#define DKIOCZSTATS _IO('d',7) /* zero statistics */ +#define DKIOCGSTATS _IO('d', 8) /* get statistics */ +#define DKIOCRESET _IO('d', 9) /* reset disk */ +#define DKIOCGFLAGS _IOR('d', 11, int) /* get driver flags */ +#define DKIOCSFLAGS _IOW('d', 12, int) /* set driver flags */ +#define DKIOCSDCWIRE _IOW('d', 14, struct sdc_wire) /* sdc wire memory */ +#define DKIOCSDCLOCK _IO('d', 15) /* sdc lock */ +#define DKIOCSDCUNLOCK _IO('d', 16) /* sdc unlock */ +#define DKIOCGFREEVOL _IOR('d', 17, int) /* get free volume # */ +#define DKIOCGBBT _IO('d', 18) /* read bad blk tbl */ +#define DKIOCSBBT _IO('d', 19) /* write bad blk tbl */ +#define DKIOCMNOTIFY _IOW('d', 20, int) /* message on insert */ +#define DKIOCEJECT _IO('d', 21) /* eject disk */ +#define DKIOCPANELPRT _IOW('d', 22, int) /* register Panel */ + /* Request port */ +#define DKIOCSFORMAT _IOW('d', 23, int) /* set 'Formatted' flag */ +#define DKIOCGFORMAT _IOR('d', 23, int) /* get 'Formatted' flag */ +#define DKIOCBLKSIZE _IOR('d', 24, int) /* device sector size */ +#define DKIOCNUMBLKS _IOR('d', 25, int) /* number of sectors */ +#define DKIOCCHECKINSERT _IO('d',26) /* manually poll removable */ + /* media drive */ +#define DKIOCCANCELAUTOMOUNT _IOW('d',27, dev_t) /* cancel automount request */ +#define DKIOCGLOCATION _IOR('d',28, struct drive_location) /* arch dependent location descrip */ +#define DKIOCSETBLOCKSIZE _IOW('d', 24, int) /* set media's preferred sector size */ +#define DKIOCGETBLOCKSIZE DKIOCBLKSIZE /* get media's preferred sector size */ +#define DKIOCGETBLOCKCOUNT DKIOCNUMBLKS /* get media's sector count */ +#define DKIOCGETLOCATION DKIOCGLOCATION /* get media's location description */ +#define DKIOCISFORMATTED DKIOCGFORMAT /* is media formatted? */ +#define DKIOCISWRITABLE _IOR('d', 29, int) /* is media writable? */ +#endif /* _BSD_DEV_DISK_ */ + diff --git a/bsd/dev/disk_label.h b/bsd/dev/disk_label.h new file mode 100644 index 000000000..f177d1ffb --- /dev/null +++ b/bsd/dev/disk_label.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 by NeXT Computer, Inc. + * + * File: bsd/dev/disk_label.h - NeXT disk label definition. + * + * HISTORY + * 28-Mar-92 Doug Mitchell at NeXT + * Split out from . + */ + +#ifndef _BSD_DEV_DISK_LABEL_ +#define _BSD_DEV_DISK_LABEL_ + +#include + +#define NLABELS 4 /* # of labels on a disk */ +#define MAXLBLLEN 24 /* dl_label[] size */ +#define NBAD 1670 /* sized to make label ~= 8KB */ + +/* + * if dl_version >= DL_V3 then the bad block table is relocated + * to a structure separate from the disk label. + */ +typedef union { + unsigned short DL_v3_checksum; + int DL_bad[NBAD]; /* block number that is bad */ +} dl_un_t; + +typedef struct disk_label { + int dl_version; // label version number + int dl_label_blkno; // block # where this label is + int dl_size; // size of media area (sectors) + char dl_label[MAXLBLLEN]; // media label + unsigned dl_flags; // flags (see DL_xxx, below) + unsigned dl_tag; // volume tag + struct disktab dl_dt; // common info in disktab + dl_un_t dl_un; + unsigned short dl_checksum; // ones complement checksum + + /* add things here so dl_checksum stays in a fixed place */ +} disk_label_t; + +/* + * Known label versions. + */ +#define DL_V1 0x4e655854 /* version #1: "NeXT" */ +#define DL_V2 0x646c5632 /* version #2: "dlV2" */ +#define DL_V3 0x646c5633 /* version #3: "dlV3" */ +#define DL_VERSION DL_V3 /* default version */ + + +/* + * dl_flags values + */ +#define DL_UNINIT 0x80000000 /* label is uninitialized */ + +/* + * Aliases for disktab fields + */ +#define dl_name dl_dt.d_name +#define dl_type dl_dt.d_type +#define dl_part dl_dt.d_partitions +#define dl_front dl_dt.d_front +#define dl_back dl_dt.d_back +#define dl_ngroups dl_dt.d_ngroups +#define dl_ag_size dl_dt.d_ag_size +#define dl_ag_alts dl_dt.d_ag_alts +#define dl_ag_off dl_dt.d_ag_off +#define dl_secsize dl_dt.d_secsize +#define dl_ncyl dl_dt.d_ncylinders +#define dl_nsect dl_dt.d_nsectors +#define dl_ntrack dl_dt.d_ntracks +#define dl_rpm dl_dt.d_rpm +#define dl_bootfile dl_dt.d_bootfile +#define dl_boot0_blkno dl_dt.d_boot0_blkno +#define dl_hostname dl_dt.d_hostname +#define dl_rootpartition dl_dt.d_rootpartition +#define dl_rwpartition dl_dt.d_rwpartition + +/* + * Other aliases + */ +#define dl_v3_checksum dl_un.DL_v3_checksum +#define dl_bad dl_un.DL_bad + +#endif /* _BSD_DEV_DISK_LABEL_ */ + diff --git a/bsd/dev/i386/conf.c b/bsd/dev/i386/conf.c new file mode 100644 index 000000000..dfc84d209 --- /dev/null +++ b/bsd/dev/i386/conf.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 by Apple Computer, Inc., all rights reserved + * Copyright (c) 1993 NeXT Computer, Inc. + * + * UNIX Device switch tables. + * + * HISTORY + * + * 30 July 1997 Umesh Vaishampayan (umeshv@apple.com) + * enabled file descriptor pseudo-device. + * 18 June 1993 ? at NeXT + * Cleaned up a lot of stuff in this file. + */ + +#include +#include +#include +#include +#include +#include + + +extern int nulldev(); + +struct bdevsw bdevsw[] = +{ + /* + * For block devices, every other block of 8 slots is + * reserved to NeXT. The other slots are available for + * the user. This way we can both add new entries without + * running into each other. Be sure to fill in NeXT's + * 8 reserved slots when you jump over us -- we'll do the + * same for you. + */ + + /* 0 - 7 are reserved to NeXT */ + + NO_BDEVICE, /* 0*/ + NO_BDEVICE, /* 1*/ + NO_BDEVICE, /* 2*/ + NO_BDEVICE, /* 3*/ + NO_BDEVICE, /* 4*/ + NO_BDEVICE, /* 5*/ + NO_BDEVICE, /* 6*/ + NO_BDEVICE, /* 7*/ + + /* 8 - 15 are reserved to the user */ + NO_BDEVICE, /* 8*/ + NO_BDEVICE, /* 9*/ + NO_BDEVICE, /*10*/ + NO_BDEVICE, /*11*/ + NO_BDEVICE, /*12*/ + NO_BDEVICE, /*13*/ + NO_BDEVICE, /*14*/ + NO_BDEVICE, /*15*/ + + /* 16 - 23 are reserved to NeXT */ + NO_BDEVICE, /*16*/ + NO_BDEVICE, /*17*/ + NO_BDEVICE, /*18*/ + NO_BDEVICE, /*18*/ + NO_BDEVICE, /*20*/ + NO_BDEVICE, /*21*/ + NO_BDEVICE, /*22*/ + NO_BDEVICE, /*23*/ +}; + +int nblkdev = sizeof (bdevsw) / sizeof (bdevsw[0]); + +extern struct tty *km_tty[]; +extern int cnopen(),cnclose(),cnread(),cnwrite(),cnioctl(), + cnselect(),cngetc(), cnputc(dev_t dev, char c); +extern int kmopen(),kmclose(),kmread(),kmwrite(),kmioctl(), + kmgetc(), kmputc(dev_t dev, char c); +extern int sgopen(),sgclose(), sgioctl(); + +#if NVOL > 0 +extern int volopen(),volclose(),volioctl(); +#else +#define volopen eno_opcl +#define volclose eno_opcl +#define volioctl eno_ioctl +#endif + +extern int cttyopen(), cttyread(), cttywrite(), cttyioctl(), cttyselect(); + +extern int mmread(),mmwrite(); +#define mmselect seltrue +#define mmmmap eno_mmap + +#include +#if NPTY > 0 +extern struct tty *pt_tty[]; +extern int ptsopen(),ptsclose(),ptsread(),ptswrite(),ptsstop(),ptsputc(); +extern int ptcopen(),ptcclose(),ptcread(),ptcwrite(),ptcselect(), + ptyioctl(); +#else +#define ptsopen eno_opcl +#define ptsclose eno_opcl +#define ptsread eno_rdwrt +#define ptswrite eno_rdwrt +#define ptsstop nulldev +#define ptsputc nulldev + +#define ptcopen eno_opcl +#define ptcclose eno_opcl +#define ptcread eno_rdwrt +#define ptcwrite eno_rdwrt +#define ptcselect eno_select +#define ptyioctl eno_ioctl +#endif + +extern int logopen(),logclose(),logread(),logioctl(),logselect(); +extern int fdesc_open(), fdesc_read(), fdesc_write(), + fdesc_ioctl(), fdesc_select(); + +extern int seltrue(); + +struct cdevsw cdevsw[] = +{ + /* + * For character devices, every other block of 16 slots is + * reserved to NeXT. The other slots are available for + * the user. This way we can both add new entries without + * running into each other. Be sure to fill in NeXT's + * 16 reserved slots when you jump over us -- we'll do the + * same for you. + */ + + /* 0 - 15 are reserved to NeXT */ + + { + cnopen, cnclose, cnread, cnwrite, /* 0*/ + cnioctl, nulldev, nulldev, 0, cnselect, + eno_mmap, eno_strat, cngetc, cnputc, D_TTY + }, + NO_CDEVICE, /* 1*/ + { + cttyopen, nulldev, cttyread, cttywrite, /* 2*/ + cttyioctl, nulldev, nulldev, 0, cttyselect, + eno_mmap, eno_strat, eno_getc, eno_putc, D_TTY + }, + { + nulldev, nulldev, mmread, mmwrite, /* 3*/ + eno_ioctl, nulldev, nulldev, 0, mmselect, + mmmmap, eno_strat, eno_getc, eno_putc, 0 + }, + { + ptsopen, ptsclose, ptsread, ptswrite, /* 4*/ + ptyioctl, ptsstop, nulldev, pt_tty, ttselect, + eno_mmap, eno_strat, eno_getc, eno_putc, D_TTY + }, + { + ptcopen, ptcclose, ptcread, ptcwrite, /* 5*/ + ptyioctl, nulldev, nulldev, 0, ptcselect, + eno_mmap, eno_strat, eno_getc, eno_putc, D_TTY + }, + { + logopen, logclose, logread, eno_rdwrt, /* 6*/ + logioctl, eno_stop, nulldev, 0, logselect, + eno_mmap, eno_strat, eno_getc, eno_putc, 0 + }, + NO_CDEVICE, /* 7*/ + NO_CDEVICE, /* 8*/ + NO_CDEVICE, /* 9*/ + NO_CDEVICE, /*10*/ + NO_CDEVICE, /*11*/ + { + kmopen, kmclose, kmread, kmwrite, /*12*/ + kmioctl, nulldev, nulldev, km_tty, ttselect, + eno_mmap, eno_strat, kmgetc, kmputc, 0 + }, + NO_CDEVICE, /*13*/ + NO_CDEVICE, /*14*/ + NO_CDEVICE, /*15*/ + + /* 16 - 31 are reserved to the user */ + NO_CDEVICE, /*16*/ + NO_CDEVICE, /*17*/ + NO_CDEVICE, /*18*/ + NO_CDEVICE, /*19*/ + NO_CDEVICE, /*20*/ + NO_CDEVICE, /*21*/ + NO_CDEVICE, /*22*/ + NO_CDEVICE, /*23*/ + NO_CDEVICE, /*24*/ + NO_CDEVICE, /*25*/ + NO_CDEVICE, /*26*/ + NO_CDEVICE, /*27*/ + NO_CDEVICE, /*28*/ + NO_CDEVICE, /*29*/ + NO_CDEVICE, /*30*/ + NO_CDEVICE, /*31*/ + + /* 32 - 47 are reserved to NeXT */ + { + fdesc_open, eno_opcl, fdesc_read, fdesc_write, /*32*/ + fdesc_ioctl, eno_stop, eno_reset, 0, fdesc_select, + eno_mmap, eno_strat, eno_getc, eno_putc, 0 + }, +#if 1 + NO_CDEVICE, +#else + { + sgopen, sgclose, eno_rdwrt, eno_rdwrt, /*33*/ + sgioctl, eno_stop, eno_reset, 0, eno_select, + eno_mmap, eno_strat, eno_getc, eno_putc, D_TAPE + }, +#endif + NO_CDEVICE, /*34*/ + NO_CDEVICE, /*35*/ + NO_CDEVICE, /*36*/ + NO_CDEVICE, /*37*/ + NO_CDEVICE, /*38*/ + NO_CDEVICE, /*39*/ + NO_CDEVICE, /*40*/ + NO_CDEVICE, /*41*/ + { + volopen, volclose, eno_rdwrt, eno_rdwrt, /*42*/ + volioctl, eno_stop, eno_reset, 0, seltrue, + eno_mmap, eno_strat, eno_getc, eno_putc, 0 + }, +}; +int nchrdev = sizeof (cdevsw) / sizeof (cdevsw[0]); + + +#include /* for VCHR and VBLK */ +/* + * return true if a disk + */ +int +isdisk(dev, type) + dev_t dev; + int type; +{ + switch (major(dev)) { + case 1: /* fd: floppy */ + case 6: /* sd: scsi disk */ + case 3: /* ide: */ + if (type == VBLK) + return(1); + break; + case 14: /* sd: scsi disk */ + case 41: /* fd: floppy */ + if (type == VCHR) + return(1); + break; + } + return(0); +} + +static int chrtoblktab[] = { + /* CHR*/ /* BLK*/ /* CHR*/ /* BLK*/ + /* 0 */ NODEV, /* 1 */ NODEV, + /* 2 */ NODEV, /* 3 */ NODEV, + /* 4 */ NODEV, /* 5 */ NODEV, + /* 6 */ NODEV, /* 7 */ NODEV, + /* 8 */ NODEV, /* 9 */ NODEV, + /* 10 */ NODEV, /* 11 */ NODEV, + /* 12 */ NODEV, /* 13 */ NODEV, + /* 14 */ 6, /* 15 */ NODEV, + /* 16 */ NODEV, /* 17 */ NODEV, + /* 18 */ NODEV, /* 19 */ NODEV, + /* 20 */ NODEV, /* 21 */ NODEV, + /* 22 */ NODEV, /* 23 */ NODEV, + /* 24 */ NODEV, /* 25 */ NODEV, + /* 26 */ NODEV, /* 27 */ NODEV, + /* 28 */ NODEV, /* 29 */ NODEV, + /* 30 */ NODEV, /* 31 */ NODEV, + /* 32 */ NODEV, /* 33 */ NODEV, + /* 34 */ NODEV, /* 35 */ NODEV, + /* 36 */ NODEV, /* 37 */ NODEV, + /* 38 */ NODEV, /* 39 */ NODEV, + /* 40 */ NODEV, /* 41 */ 1, + /* 42 */ NODEV, /* 43 */ NODEV, + /* 44 */ NODEV, +}; + +/* + * convert chr dev to blk dev + */ +dev_t +chrtoblk(dev) + dev_t dev; +{ + int blkmaj; + + if (major(dev) >= nchrdev) + return(NODEV); + blkmaj = chrtoblktab[major(dev)]; + if (blkmaj == NODEV) + return(NODEV); + return(makedev(blkmaj, minor(dev))); +} + +/* + * Returns true if dev is /dev/mem or /dev/kmem. + */ +int iskmemdev(dev) + dev_t dev; +{ + + return (major(dev) == 3 && minor(dev) < 2); +} diff --git a/bsd/dev/i386/cons.c b/bsd/dev/i386/cons.c new file mode 100644 index 000000000..5c56556a6 --- /dev/null +++ b/bsd/dev/i386/cons.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987, 1988 NeXT, Inc. + * + * HISTORY + * 7-Jan-93 Mac Gillon (mgillon) at NeXT + * Integrated POSIX support + * + * 12-Aug-87 John Seamons (jks) at NeXT + * Ported to NeXT. + */ + +/* + * Indirect driver for console. + */ +#include +#include +#include +#include +#include +#include +#include + +struct tty cons; +struct tty *constty; /* current console device */ + +/*ARGSUSED*/ +int +cnopen(dev, flag, devtype, pp) + dev_t dev; + int flag, devtype; + struct proc *pp; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_open)(device, flag, devtype, pp)); +} + +/*ARGSUSED*/ +int +cnclose(dev, flag, mode, pp) + dev_t dev; + int flag, mode; + struct proc *pp; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_close)(device, flag, mode, pp)); +} + +/*ARGSUSED*/ +int +cnread(dev, uio, ioflag) + dev_t dev; + struct uio *uio; + int ioflag; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_read)(device, uio, ioflag)); +} + +/*ARGSUSED*/ +int +cnwrite(dev, uio, ioflag) + dev_t dev; + struct uio *uio; + int ioflag; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_write)(device, uio, ioflag)); +} + +/*ARGSUSED*/ +int +cnioctl(dev, cmd, addr, flag, p) + dev_t dev; + int cmd; + caddr_t addr; + int flag; + struct proc *p; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + /* + * Superuser can always use this to wrest control of console + * output from the "virtual" console. + */ + if (cmd == TIOCCONS && constty) { + int error = suser(p->p_ucred, (u_short *) NULL); + if (error) + return (error); + constty = NULL; + return (0); + } + return ((*cdevsw[major(device)].d_ioctl)(device, cmd, addr, flag, p)); +} + +/*ARGSUSED*/ +int +cnselect(dev, flag, p) + dev_t dev; + int flag; + struct proc *p; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_select)(device, flag, p)); +} + +#if 0 /* FIXME - using OSFMK console driver for the moment */ +int +cngetc() +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_getc)(device)); +} + +/*ARGSUSED*/ +int +cnputc(c) + char c; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_putc)(device, c)); +} +#endif + +#if NCPUS > 1 +slave_cnenable() +{ + /* FIXME: what to do here? */ +} +#endif NCPUS > 1 + +#if 0 +void +kprintf( const char *format, ...) +{ + /* on PPC this outputs to the serial line */ + /* nop on intel ... umeshv@apple.com */ + +} +#endif + +/* + * Write message to console; create an alert panel if no text-type window + * currently exists. Caller must call alert_done() when finished. + * The height and width arguments are not used; they are provided for + * compatibility with the 68k version of alert(). + */ +int +alert( + int width, + int height, + const char *title, + const char *msg, + int p1, + int p2, + int p3, + int p4, + int p5, + int p6, + int p7, + int p8) +{ + char smsg[200]; + + sprintf(smsg, msg, p1, p2, p3, p4, p5, p6, p7, p8); +#if FIXME /* [ */ + /* DoAlert(title, smsg); */ +#else + printf("%s\n",smsg); +#endif /* FIXME ] */ + + return 0; +} + +int +alert_done() +{ + /* DoRestore(); */ + return 0; +} + diff --git a/bsd/dev/i386/cons.h b/bsd/dev/i386/cons.h new file mode 100644 index 000000000..00d91a155 --- /dev/null +++ b/bsd/dev/i386/cons.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987 NeXT, Inc. + */ + +struct consdev { + char *cn_name; /* name of device in dev_name_list */ + int (*cn_probe)(); /* probe hardware and fill in consdev info */ + int (*cn_init)(); /* turn on as console */ + int (*cn_getc)(); /* kernel getchar interface */ + int (*cn_putc)(); /* kernel putchar interface */ + struct tty *cn_tp; /* tty structure for console device */ + dev_t cn_dev; /* major/minor of device */ + short cn_pri; /* pecking order; the higher the better */ +}; + +/* values for cn_pri - reflect our policy for console selection */ +#define CN_DEAD 0 /* device doesn't exist */ +#define CN_NORMAL 1 /* device exists but is nothing special */ +#define CN_INTERNAL 2 /* "internal" bit-mapped display */ +#define CN_REMOTE 3 /* serial interface with remote bit set */ + +/* XXX */ +#define CONSMAJOR 0 + +#ifdef KERNEL + +#include + +extern struct consdev constab[]; +extern struct consdev *cn_tab; +extern struct tty *cn_tty; + +extern struct tty cons; +extern struct tty *constty; /* current console device */ +#endif + diff --git a/bsd/dev/i386/kern_machdep.c b/bsd/dev/i386/kern_machdep.c new file mode 100644 index 000000000..0c3684b58 --- /dev/null +++ b/bsd/dev/i386/kern_machdep.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1990, NeXT, Inc. + * + * File: next/kern_machdep.c + * Author: John Seamons + * + * Machine-specific kernel routines. + * + * 8-Dec-91 Peter King (king) at NeXT + * Added grade_cpu_subtype(). + * FIXME: Do we want to merge this with check_cpu_subtype()? + * + * 5-Mar-90 John Seamons (jks) at NeXT + * Created. + */ + +#include +#include +#include + +check_cpu_subtype (cpu_subtype) + cpu_subtype_t cpu_subtype; +{ + struct machine_slot *ms = &machine_slot[cpu_number()]; + + switch (ms->cpu_subtype) { + case CPU_SUBTYPE_386: + if (cpu_subtype == CPU_SUBTYPE_386) + return (TRUE); + break; + + case CPU_SUBTYPE_486: + case CPU_SUBTYPE_486SX: + if ( cpu_subtype == CPU_SUBTYPE_486 || + cpu_subtype == CPU_SUBTYPE_486SX || + cpu_subtype == CPU_SUBTYPE_386 ) + return (TRUE); + break; + + case CPU_SUBTYPE_586: + if ( cpu_subtype == CPU_SUBTYPE_586 || + cpu_subtype == CPU_SUBTYPE_486 || + cpu_subtype == CPU_SUBTYPE_486SX || + cpu_subtype == CPU_SUBTYPE_386 ) + return (TRUE); + break; + + default: + if ( CPU_SUBTYPE_INTEL_MODEL(cpu_subtype) == + CPU_SUBTYPE_INTEL_MODEL_ALL) { + if ( CPU_SUBTYPE_INTEL_FAMILY(ms->cpu_subtype) >= + CPU_SUBTYPE_INTEL_FAMILY(cpu_subtype)) + return (TRUE); + } + else { + if ( ms->cpu_subtype == cpu_subtype) + return (TRUE); + } + break; + } + + return (FALSE); +} + +/********************************************************************** + * Routine: grade_cpu_subtype() + * + * Function: Return a relative preference for cpu_subtypes in fat + * executable files. The higher the grade, the higher the + * preference. A grade of 0 means not acceptable. + **********************************************************************/ +grade_cpu_subtype (cpu_subtype) + cpu_subtype_t cpu_subtype; +{ + struct machine_slot *ms = &machine_slot[cpu_number()]; + + switch (ms->cpu_subtype) { + case CPU_SUBTYPE_386: + switch (cpu_subtype) { + case CPU_SUBTYPE_386: + return 1; + default: + return 0; + } + + case CPU_SUBTYPE_486: + switch (cpu_subtype) { + case CPU_SUBTYPE_386: + return 1; + + case CPU_SUBTYPE_486SX: + return 2; + + case CPU_SUBTYPE_486: + return 3; + + default: + return 0; + } + + case CPU_SUBTYPE_486SX: + switch (cpu_subtype) { + case CPU_SUBTYPE_386: + return 1; + + case CPU_SUBTYPE_486: + return 2; + + case CPU_SUBTYPE_486SX: + return 3; + + default: + return 0; + } + + case CPU_SUBTYPE_586: + switch (cpu_subtype) { + case CPU_SUBTYPE_386: + return 1; + + case CPU_SUBTYPE_486SX: + return 2; + + case CPU_SUBTYPE_486: + return 3; + + case CPU_SUBTYPE_586: + return 4; + + default: + return 0; + } + + default: + if ( CPU_SUBTYPE_INTEL_MODEL(cpu_subtype) == + CPU_SUBTYPE_INTEL_MODEL_ALL) { + if ( CPU_SUBTYPE_INTEL_FAMILY(ms->cpu_subtype) >= + CPU_SUBTYPE_INTEL_FAMILY(cpu_subtype)) + return CPU_SUBTYPE_INTEL_FAMILY_MAX - + CPU_SUBTYPE_INTEL_FAMILY(ms->cpu_subtype) - + CPU_SUBTYPE_INTEL_FAMILY(cpu_subtype); + } + else { + if ( ms->cpu_subtype == cpu_subtype) + return CPU_SUBTYPE_INTEL_FAMILY_MAX + 1; + } + return 0; + } +} diff --git a/bsd/dev/i386/km.c b/bsd/dev/i386/km.c new file mode 100644 index 000000000..ac1054724 --- /dev/null +++ b/bsd/dev/i386/km.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * km.m - kernel keyboard/monitor module, procedural interface. + * + * HISTORY + */ + +#include +#include + +#include +#include +#include +#include +#include /* for kmopen */ +#include +#include /* for kmopen */ +#include +#include +#include +#include + +/* + * 'Global' variables, shared only by this file and conf.c. + */ +extern struct tty cons; +struct tty *km_tty[1] = { &cons }; + +/* + * this works early on, after initialize_screen() but before autoconf (and thus + * before we have a kmDevice). + */ +int disableConsoleOutput; + +/* + * 'Global' variables, shared only by this file and kmDevice.m. + */ +int initialized = 0; + +static int kmoutput(struct tty *tp); +static void kmtimeout(struct tty *tp); +static void kmstart(struct tty *tp); + +extern void KeyboardOpen(void); + +int kminit() +{ + cons.t_dev = makedev(12, 0); + initialized = 1; +} +/* + * cdevsw interface to km driver. + */ +int +kmopen( + dev_t dev, + int flag, + int devtype, + struct proc *pp) +{ + int rtn; + int unit; + struct tty *tp; + struct winsize *wp; + int ret; + + unit = minor(dev); + if(unit >= 1) + return (ENXIO); + + tp = (struct tty *)&cons; + tp->t_oproc = kmstart; + tp->t_param = NULL; + tp->t_dev = dev; + + if ( !(tp->t_state & TS_ISOPEN) ) { + tp->t_iflag = TTYDEF_IFLAG; + tp->t_oflag = TTYDEF_OFLAG; + tp->t_cflag = (CREAD | CS8 | CLOCAL); + tp->t_lflag = TTYDEF_LFLAG; + tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; + termioschars(&tp->t_termios); + ttsetwater(tp); + } else if ((tp->t_state & TS_XCLUDE) && pp->p_ucred->cr_uid != 0) + return EBUSY; + + tp->t_state |= TS_CARR_ON; /* lie and say carrier exists and is on. */ + ret = ((*linesw[tp->t_line].l_open)(dev, tp)); + { + PE_Video video; + wp = &tp->t_winsize; + /* Magic numbers. These are CHARWIDTH and CHARHEIGHT + * from pexpert/i386/video_console.c + */ + wp->ws_xpixel = 8; + wp->ws_ypixel = 16; + + if (flag & O_POPUP) + PE_initialize_console(0, kPETextScreen); + + bzero(&video, sizeof(video)); + PE_current_console(&video); + if( video.v_width != 0 && video.v_height != 0 ) { + wp->ws_col = video.v_width / wp->ws_xpixel; + wp->ws_row = video.v_height / wp->ws_ypixel; + } else { + wp->ws_col = 100; + wp->ws_row = 36; + } + } + return ret; +} + +int +kmclose( + dev_t dev, + int flag, + int mode, + struct proc *p) +{ + + struct tty *tp; + + tp = &cons; + (*linesw[tp->t_line].l_close)(tp,flag); + ttyclose(tp); + return (0); +} + +int +kmread( + dev_t dev, + struct uio *uio, + int ioflag) +{ + register struct tty *tp; + + tp = &cons; + return ((*linesw[tp->t_line].l_read)(tp, uio, ioflag)); +} + +int +kmwrite( + dev_t dev, + struct uio *uio, + int ioflag) +{ + register struct tty *tp; + + tp = &cons; + return ((*linesw[tp->t_line].l_write)(tp, uio, ioflag)); +} + +int +kmioctl( + dev_t dev, + int cmd, + caddr_t data, + int flag, + struct proc *p) +{ + int error; + struct tty *tp = &cons; + struct winsize *wp; + + switch (cmd) { + + + + case KMIOCSIZE: + wp = (struct winsize *)data; + *wp = tp->t_winsize; + return 0; + + case TIOCSWINSZ: + /* Prevent changing of console size -- + * this ensures that login doesn't revert to the + * termcap-defined size + */ + return EINVAL; + + /* Bodge in the CLOCAL flag as the km device is always local */ + case TIOCSETA: + case TIOCSETAW: + case TIOCSETAF: { + register struct termios *t = (struct termios *)data; + t->c_cflag |= CLOCAL; + /* No Break */ + } + default: + error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p); + if (error >= 0) { + return error; + } + error = ttioctl (tp, cmd, data, flag, p); + if (error >= 0) { + return error; + } + else { + return ENOTTY; + } + } +} + +int +kmputc( + int c) +{ + + if( disableConsoleOutput) + return( 0); + + if(!initialized) + return( 0); + + if(c == '\n') + cnputc('\r'); + + cnputc(c); + + return 0; +} + +int +kmgetc( + dev_t dev) +{ + int c; + + c= cngetc(); + + if (c == '\r') { + c = '\n'; + } + cnputc(c); + return c; +} + +int +kmgetc_silent( + dev_t dev) +{ + int c; + + c= cngetc(); + if (c == '\r') { + c = '\n'; + } + return c; +} + +/* + * Callouts from linesw. + */ + +#define KM_LOWAT_DELAY ((ns_time_t)1000) + +static void +kmstart( + struct tty *tp) +{ + extern int hz; + if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) + goto out; + if (tp->t_outq.c_cc == 0) + goto out; + tp->t_state |= TS_BUSY; + if (tp->t_outq.c_cc > tp->t_lowat) { + /* + * Start immediately. + */ + kmoutput(tp); + } + else { + /* + * Wait a bit... + */ +#if 0 + /* FIXME */ + timeout(kmtimeout, tp, hz); +#else + kmoutput(tp); +#endif + } +out: + ttwwakeup(tp); +} + +static void +kmtimeout( struct tty *tp) +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + kmoutput(tp); + (void) thread_funnel_set(kernel_flock, FALSE); + + +} +static int +kmoutput( + struct tty *tp) +{ + /* + * FIXME - to be grokked...copied from m68k km.c. + */ + char buf[80]; + char *cp; + int cc = -1; + extern int hz; + + + while (tp->t_outq.c_cc > 0) { + cc = ndqb(&tp->t_outq, 0); + if (cc == 0) + break; + cc = min(cc, sizeof buf); + (void) q_to_b(&tp->t_outq, buf, cc); + for (cp = buf; cp < &buf[cc]; cp++) { + kmputc(*cp & 0x7f); + } + } + if (tp->t_outq.c_cc > 0) { + timeout(kmtimeout, tp, hz); + } + tp->t_state &= ~TS_BUSY; + ttwwakeup(tp); + + return 0; +} +cons_cinput(char ch) +{ + struct tty *tp = &cons; + boolean_t funnel_state; + + + (*linesw[tp->t_line].l_rint) (ch, tp); + +} + diff --git a/bsd/dev/i386/lock_stubs.c b/bsd/dev/i386/lock_stubs.c new file mode 100644 index 000000000..2b207335a --- /dev/null +++ b/bsd/dev/i386/lock_stubs.c @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#if 0 +#define _KERNEL +#define DEFINE_SIMPLE_LOCK_PRIMS +#include +#endif diff --git a/bsd/dev/i386/mem.c b/bsd/dev/i386/mem.c new file mode 100644 index 000000000..5c3422a64 --- /dev/null +++ b/bsd/dev/i386/mem.c @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department, and code derived from software contributed to + * Berkeley by William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: mem.c 1.13 89/10/08$ + * @(#)mem.c 8.1 (Berkeley) 6/11/93 + */ + +#include + +/* + * Memory special file + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include /* for kernel_map */ + +static caddr_t devzerobuf; + +mmread(dev, uio) + dev_t dev; + struct uio *uio; +{ + + return (mmrw(dev, uio, UIO_READ)); +} + +mmwrite(dev, uio) + dev_t dev; + struct uio *uio; +{ + + return (mmrw(dev, uio, UIO_WRITE)); +} + +mmrw(dev, uio, rw) + dev_t dev; + struct uio *uio; + enum uio_rw rw; +{ + register int o; + register u_int c, v; + register struct iovec *iov; + int error = 0; + vm_offset_t where; + int spl; + vm_size_t size; + extern boolean_t kernacc(off_t, size_t ); + + while (uio->uio_resid > 0 && error == 0) { + iov = uio->uio_iov; + if (iov->iov_len == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + if (uio->uio_iovcnt < 0) + panic("mmrw"); + continue; + } + switch (minor(dev)) { + + /* minor device 0 is physical memory */ + case 0: + v = trunc_page(uio->uio_offset); + if (uio->uio_offset >= mem_size) + goto fault; + + size= PAGE_SIZE; + if (kmem_alloc(kernel_map, &where, size) + != KERN_SUCCESS) { + goto fault; + } + o = uio->uio_offset - v; + c = min(PAGE_SIZE - o, (u_int)iov->iov_len); + error = uiomove((caddr_t) (where + o), c, uio); + kmem_free(kernel_map, where, PAGE_SIZE); + continue; + + /* minor device 1 is kernel memory */ + case 1: + /* Do some sanity checking */ + if (((vm_address_t)uio->uio_offset >= VM_MAX_KERNEL_ADDRESS) || + ((vm_address_t)uio->uio_offset <= VM_MIN_KERNEL_ADDRESS)) + goto fault; + c = iov->iov_len; + if (!kernacc(uio->uio_offset, c)) + goto fault; + error = uiomove((caddr_t)uio->uio_offset, (int)c, uio); + continue; + + /* minor device 2 is EOF/RATHOLE */ + case 2: + if (rw == UIO_READ) + return (0); + c = iov->iov_len; + break; + case 3: + if(devzerobuf == NULL) { + MALLOC(devzerobuf, caddr_t,PAGE_SIZE, M_TEMP, M_WAITOK); + bzero(devzerobuf, PAGE_SIZE); + } + if(uio->uio_rw == UIO_WRITE) { + c = iov->iov_len; + break; + } + c = min(iov->iov_len, PAGE_SIZE); + error = uiomove(devzerobuf, (int)c, uio); + continue; + default: + goto fault; + break; + } + + if (error) + break; + iov->iov_base += c; + iov->iov_len -= c; + uio->uio_offset += c; + uio->uio_resid -= c; + } + return (error); +fault: + return (EFAULT); +} + + +boolean_t +kernacc( + off_t start, + size_t len +) +{ + off_t base; + off_t end; + + base = trunc_page(start); + end = start + len; + + while (base < end) { + if(kvtophys((vm_offset_t)base) == NULL) + return(FALSE); + base += page_size; + } + + return (TRUE); +} diff --git a/bsd/dev/i386/memmove.c b/bsd/dev/i386/memmove.c new file mode 100644 index 000000000..12b0e2070 --- /dev/null +++ b/bsd/dev/i386/memmove.c @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991,1993 NeXT Computer, Inc. All rights reserved. + * + * File: machdep/ppc/libc/memmove.c + * History: + * + * Fixed sleep integration problem. sleep was not properly + * handling thread states of THREAD_INTERRUPTED and + * THREAD_MUST_TERMINATE, so callers of sleep were getting + * confused and many times looping. This fixes the (in)famous + * unkillable gdb problem, the PB (and other processes) don't + * terminate, and more. Removed debugging kprintf left in + * bcopy code + * + */ + + + +#if 0 +void *memcpy(void *dst, const void *src, unsigned int ulen) +{ + bcopy(src, dst, ulen); + return dst; +} +#endif /* 0 */ +void *memmove(void *dst, const void *src, unsigned int ulen) +{ + bcopy(src, dst, ulen); + return dst; +} + + diff --git a/bsd/dev/i386/pci_device.h b/bsd/dev/i386/pci_device.h new file mode 100644 index 000000000..92f6a0905 --- /dev/null +++ b/bsd/dev/i386/pci_device.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.2 1998/09/30 21:20:44 wsanchez + * Merged in IntelMerge1 (mburg: Intel support) + * + * Revision 1.1.2.1 1998/09/30 18:18:50 mburg + * Changes for Intel port + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.2 1995/12/15 10:52:14 bernadat + * Split dev and vendor ids. + * [95/11/15 bernadat] + * + * Revision 1.1.6.1 1995/02/23 17:22:27 alanl + * Taken from DIPC2_SHARED + * [1995/01/03 19:09:31 alanl] + * + * Revision 1.1.2.1 1994/10/11 18:24:42 rwd + * Created. + * [1994/10/11 18:15:31 rwd] + * + * $EndLog$ + */ +/* + * Taken from + * + * Copyright (c) 1994 Wolfgang Stanglmeier, Koeln, Germany + * + */ + +#ifndef __PCI_DEVICE_H__ +#define __PCI_DEVICE_H__ + +/*------------------------------------------------------------ + * + * Per driver structure. + * + *------------------------------------------------------------ +*/ + +typedef unsigned short pci_vendor_id_t; +typedef unsigned short pci_dev_id_t; + +typedef union { + unsigned long cfg1; + struct { + unsigned char enable; + unsigned char forward; + unsigned short port; + } cfg2; + } pcici_t; + +struct pci_driver { + int (*probe )(pcici_t pci_ident); /* test whether device + is present */ + int (*attach)(pcici_t pci_ident); /* setup driver for a + device */ + pci_vendor_id_t vendor_id; /* vendor pci id */ + pci_dev_id_t device_id; /* device pci id */ + char *name; /* device name */ + char *vendor; /* device long name */ + void (*intr)(int); /* interupt handler */ +}; + +/*----------------------------------------------------------- + * + * Per device structure. + * + * It is initialized by the config utility and should live in + * "ioconf.c". At the moment there is only one field. + * + * This is a first attempt to include the pci bus to 386bsd. + * So this structure may grow .. + * + *----------------------------------------------------------- +*/ + +struct pci_device { + struct pci_driver * pd_driver; +}; + +/*----------------------------------------------------------- + * + * This functions may be used by drivers to map devices + * to virtual and physical addresses. The va and pa + * addresses are "in/out" parameters. If they are 0 + * on entry, the mapping function assigns an address. + * + *----------------------------------------------------------- +*/ + +int pci_map_mem(pcici_t tag, + unsigned long entry, + vm_offset_t *va, + vm_offset_t *pa); +#endif /*__PCI_DEVICE_H__*/ diff --git a/bsd/dev/i386/pio.h b/bsd/dev/i386/pio.h new file mode 100644 index 000000000..66992563d --- /dev/null +++ b/bsd/dev/i386/pio.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.2 1998/09/30 21:20:45 wsanchez + * Merged in IntelMerge1 (mburg: Intel support) + * + * Revision 1.1.2.1 1998/09/30 18:18:50 mburg + * Changes for Intel port + * + * Revision 1.1.1.1 1998/03/07 02:25:38 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.2 1996/07/31 09:46:36 paire + * Merged with nmk20b7_shared (1.1.11.2 -> 1.1.11.1) + * [96/06/10 paire] + * + * Revision 1.1.11.2 1996/06/13 12:38:25 bernadat + * Do not use inline macros when MACH_ASSERT is configured. + * [96/05/24 bernadat] + * + * Revision 1.1.11.1 1996/05/14 13:50:23 paire + * Added new linl and loutl __inline__. + * Added conditional compilation for [l]{in|oub}[bwl]() __inline__. + * [95/11/24 paire] + * + * Revision 1.1.8.1 1994/09/23 02:00:28 ezf + * change marker to not FREE + * [1994/09/22 21:25:52 ezf] + * + * Revision 1.1.4.5 1993/08/09 19:40:41 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:45:57 dswartz] + * + * Revision 1.1.4.4 1993/06/11 15:17:37 jeffc + * CR9176 - ANSI C violations: inb/outb macros must be changed from + * ({ ... }) to inline functions, with proper type definitions. Callers + * must pass proper types to these functions: 386 I/O port addresses + * are unsigned shorts (not pointers). + * [1993/06/10 14:26:10 jeffc] + * + * Revision 1.1.4.3 1993/06/07 22:09:28 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:00:26 jeffc] + * + * Revision 1.1.4.2 1993/06/04 15:28:45 jeffc + * CR9176 - ANSI problems - + * Added casts to get macros to take caddr_t as an I/O space address. + * [1993/06/04 13:45:55 jeffc] + * + * Revision 1.1 1992/09/30 02:25:51 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/05/14 16:14:20 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:13:56 mrt + * Changed to new Mach copyright + * [91/02/01 17:37:08 mrt] + * + * Revision 2.3 90/12/20 16:36:37 jeffreyh + * changes for __STDC__ + * [90/12/07 jeffreyh] + * + * Revision 2.2 90/11/26 14:48:41 rvb + * Pulled from 2.5 + * [90/11/22 10:09:38 rvb] + * + * [90/08/14 mg32] + * + * Now we know how types are factor in. + * Cleaned up a bunch: eliminated ({ for output and flushed unused + * output variables. + * [90/08/14 rvb] + * + * This is how its done in gcc: + * Created. + * [90/03/26 rvb] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +#ifndef I386_PIO_H +#define I386_PIO_H + +typedef unsigned short i386_ioport_t; + +/* read a longword */ +extern unsigned long inl( + i386_ioport_t port); +/* read a shortword */ +extern unsigned short inw( + i386_ioport_t port); +/* read a byte */ +extern unsigned char inb( + i386_ioport_t port); +/* write a longword */ +extern void outl( + i386_ioport_t port, + unsigned long datum); +/* write a word */ +extern void outw( + i386_ioport_t port, + unsigned short datum); +/* write a longword */ +extern void outb( + i386_ioport_t port, + unsigned char datum); + +/* input an array of longwords */ +extern void linl( + i386_ioport_t port, + int * data, + int count); +/* output an array of longwords */ +extern void loutl( + i386_ioport_t port, + int * data, + int count); + +/* input an array of words */ +extern void linw( + i386_ioport_t port, + int * data, + int count); +/* output an array of words */ +extern void loutw( + i386_ioport_t port, + int * data, + int count); + +/* input an array of bytes */ +extern void linb( + i386_ioport_t port, + char * data, + int count); +/* output an array of bytes */ +extern void loutb( + i386_ioport_t port, + char * data, + int count); + +extern __inline__ unsigned long inl( + i386_ioport_t port) +{ + unsigned long datum; + __asm__ volatile("inl %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ unsigned short inw( + i386_ioport_t port) +{ + unsigned short datum; + __asm__ volatile(".byte 0x66; inl %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ unsigned char inb( + i386_ioport_t port) +{ + unsigned char datum; + __asm__ volatile("inb %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ void outl( + i386_ioport_t port, + unsigned long datum) +{ + __asm__ volatile("outl %0, %1" : : "a" (datum), "d" (port)); +} + +extern __inline__ void outw( + i386_ioport_t port, + unsigned short datum) +{ + __asm__ volatile(".byte 0x66; outl %0, %1" : : "a" (datum), "d" (port)); +} + +extern __inline__ void outb( + i386_ioport_t port, + unsigned char datum) +{ + __asm__ volatile("outb %0, %1" : : "a" (datum), "d" (port)); +} + +#endif /* I386_PIO_H */ diff --git a/bsd/dev/i386/sel.h b/bsd/dev/i386/sel.h new file mode 100644 index 000000000..3d8824286 --- /dev/null +++ b/bsd/dev/i386/sel.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Segment selector. + * + * HISTORY + * + * 29 March 1992 ? at NeXT + * Created. + */ + +/* + * Segment selector. + */ + +typedef struct sel { + unsigned short rpl :2, +#define KERN_PRIV 0 +#define USER_PRIV 3 + ti :1, +#define SEL_GDT 0 +#define SEL_LDT 1 + index :13; +} sel_t; + diff --git a/bsd/dev/i386/sel_inline.h b/bsd/dev/i386/sel_inline.h new file mode 100644 index 000000000..e972af7b5 --- /dev/null +++ b/bsd/dev/i386/sel_inline.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Selector value conversion/validation. + * + * HISTORY + * + * 19 June 1992 ? at NeXT + * Created. + */ + + +static inline +unsigned int +sel_to_selector( + sel_t sel +) +{ + union { + sel_t sel; + unsigned short selector; + } tconv; + + tconv.sel = sel; + + return (tconv.selector); +} + +static inline +sel_t +selector_to_sel( + unsigned int selector +) +{ + union { + unsigned short selector; + sel_t sel; + } tconv; + + tconv.selector = selector; + + return (tconv.sel); +} + +#if 0 +static inline +boolean_t +valid_user_data_selector( + unsigned int selector +) +{ + sel_t sel = selector_to_sel(selector); + + if (selector == 0) + return (TRUE); + + if (sel.ti == SEL_LDT) + return (TRUE); + else if (sel.index < GDTSZ) { + data_desc_t *desc = (data_desc_t *)sel_to_gdt_entry(sel); + + if (desc->dpl == USER_PRIV) + return (TRUE); + } + + return (FALSE); +} + +static inline +boolean_t +valid_user_code_selector( + unsigned int selector +) +{ + sel_t sel = selector_to_sel(selector); + + if (selector == 0) + return (FALSE); + + if (sel.ti == SEL_LDT) { + if (sel.rpl == USER_PRIV) + return (TRUE); + } + else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { + code_desc_t *desc = (code_desc_t *)sel_to_gdt_entry(sel); + + if (desc->dpl == USER_PRIV) + return (TRUE); + } + + return (FALSE); +} + +static inline +boolean_t +valid_user_stack_selector( + unsigned int selector +) +{ + sel_t sel = selector_to_sel(selector); + + if (selector == 0) + return (FALSE); + + if (sel.ti == SEL_LDT) { + if (sel.rpl == USER_PRIV) + return (TRUE); + } + else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { + data_desc_t *desc = (data_desc_t *)sel_to_gdt_entry(sel); + + if (desc->dpl == USER_PRIV) + return (TRUE); + } + + return (FALSE); +} +#endif diff --git a/bsd/dev/i386/stubs.c b/bsd/dev/i386/stubs.c new file mode 100644 index 000000000..a4a7ad113 --- /dev/null +++ b/bsd/dev/i386/stubs.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 by Apple Computer, Inc., all rights reserved + * Copyright (c) 1993 NeXT Computer, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * copy a null terminated string from the kernel address space into + * the user address space. + * - if the user is denied write access, return EFAULT. + * - if the end of string isn't found before + * maxlen bytes are copied, return ENAMETOOLONG, + * indicating an incomplete copy. + * - otherwise, return 0, indicating success. + * the number of bytes copied is always returned in lencopied. + */ +int +copyoutstr(from, to, maxlen, lencopied) + void * from, * to; + size_t maxlen, *lencopied; +{ + int slen,len,error=0; + + slen = strlen(from) + 1; + + len = min(maxlen,slen); + if (copyout(from, to, len)) + error = EIO; + *lencopied = len; + + return error; +} + + +/* + * copy a null terminated string from one point to another in + * the kernel address space. + * - no access checks are performed. + * - if the end of string isn't found before + * maxlen bytes are copied, return ENAMETOOLONG, + * indicating an incomplete copy. + * - otherwise, return 0, indicating success. + * the number of bytes copied is always returned in lencopied. + */ +/* from ppc/fault_copy.c -Titan1T4 VERSION */ +int +copystr(vfrom, vto, maxlen, lencopied) + register void * vfrom, *vto; + size_t maxlen, *lencopied; +{ + register unsigned l; + int error; +caddr_t from, to; + + from = vfrom; + to = vto; + for (l = 0; l < maxlen; l++) + if ((*to++ = *from++) == '\0') { + if (lencopied) + *lencopied = l + 1; + return 0; + } + if (lencopied) + *lencopied = maxlen; + return ENAMETOOLONG; +} + +int copywithin(src, dst, count) +void * src, *dst; +size_t count; +{ + bcopy(src,dst,count); + return 0; +} + +cpu_number() {return(0);} + +set_bsduthreadargs(thread_t th, void * pcb, void *ignored_arg) +{ +struct uthread * ut; + + ut = get_bsdthread_info(th); + ut->uu_ar0 = (int *)pcb; + + return(1); +} + +void * +get_bsduthreadarg(thread_t th) +{ +struct uthread *ut; + ut = get_bsdthread_info(th); + return((void *)(ut->uu_arg)); +} + +int * +get_bsduthreadrval(thread_act_t th) +{ +struct uthread *ut; + ut = get_bsdthread_info(th); + return(&ut->uu_rval[0]); +} diff --git a/bsd/dev/i386/table_inline.h b/bsd/dev/i386/table_inline.h new file mode 100644 index 000000000..a4bb2937a --- /dev/null +++ b/bsd/dev/i386/table_inline.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Selector based access to descriptor tables. + * + * HISTORY + * + * 2 April 1992 ? at NeXT + * Created. + */ + +#include + +#include +#include + +static inline +gdt_entry_t * +sel_to_gdt_entry(sel) +sel_t sel; +{ + return (&gdt[sel.index]); +} + +static inline +idt_entry_t * +sel_to_idt_entry(sel) +sel_t sel; +{ + return (&idt[sel.index]); +} + +static inline +ldt_entry_t * +sel_to_ldt_entry(tbl, sel) +ldt_t * tbl; +sel_t sel; +{ + return (&tbl[sel.index]); +} diff --git a/bsd/dev/i386/unix_signal.c b/bsd/dev/i386/unix_signal.c new file mode 100644 index 000000000..7ddf4acf3 --- /dev/null +++ b/bsd/dev/i386/unix_signal.c @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT, Inc. + * + * HISTORY + * 13 May 1992 ? at NeXT + * Created. + */ + +#include +#include + +#include +#include + +#include +#include +#include + +#include + +#include +#include + + +/* + * FIXME.. should be included from mach_kernel/i386/seg.h + */ + +#define USER_CS 0x17 +#define USER_DS 0x1f + +#define UDATA_SEL USER_DS +#define UCODE_SEL USER_CS + +#define valid_user_code_selector(x) (TRUE) +#define valid_user_data_selector(x) (TRUE) +#define valid_user_stack_selector(x) (TRUE) + + +#define NULL_SEG 0 + +/* + * Send an interrupt to process. + * + * Stack is set up to allow sigcode stored + * in u. to call routine, followed by chmk + * to sigreturn routine below. After sigreturn + * resets the signal mask, the stack, the frame + * pointer, and the argument pointer, it returns + * to the user specified pc, psl. + */ + +void +sendsig(p, catcher, sig, mask, code) + struct proc *p; + sig_t catcher; + int sig, mask; + u_long code; +{ + struct sigframe { + int retaddr; + int sig; + int code; + struct sigcontext * scp; + } frame, *fp; + struct sigcontext context, *scp; + struct sigacts *ps = p->p_sigacts; + int oonstack; + thread_t thread = current_thread(); + thread_act_t th_act = current_act(); + struct i386_saved_state * saved_state = get_user_regs(th_act); + + oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; + if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack && + (ps->ps_sigonstack & sigmask(sig))) { + scp = ((struct sigcontext *)ps->ps_sigstk.ss_sp) - 1; + ps->ps_sigstk.ss_flags |= SA_ONSTACK; + } else + scp = ((struct sigcontext *)saved_state->uesp) - 1; + fp = ((struct sigframe *)scp) - 1; + + /* + * Build the argument list for the signal handler. + */ + + frame.retaddr = 0xffffffff; /* Handler should call sigreturn to get out of it */ + frame.sig = sig; + + if (sig == SIGILL || sig == SIGFPE) { + frame.code = code; + } else + frame.code = 0; + frame.scp = scp; + if (copyout((caddr_t)&frame, (caddr_t)fp, sizeof (frame))) + goto bad; + +#if PC_SUPPORT + { + PCcontext_t context = threadPCContext(thread); + + if (context && context->running) { + oonstack |= 02; + context->running = FALSE; + } + } +#endif + /* + * Build the signal context to be used by sigreturn. + */ + context.sc_onstack = oonstack; + context.sc_mask = mask; + context.sc_eax = saved_state->eax; + context.sc_ebx = saved_state->ebx; + context.sc_ecx = saved_state->ecx; + context.sc_edx = saved_state->edx; + context.sc_edi = saved_state->edi; + context.sc_esi = saved_state->esi; + context.sc_ebp = saved_state->ebp; + context.sc_esp = saved_state->uesp; + context.sc_ss = saved_state->ss; + context.sc_eflags = saved_state->efl; + context.sc_eip = saved_state->eip; + context.sc_cs = saved_state->cs; + if (saved_state->efl & EFL_VM) { + context.sc_ds = saved_state->v86_segs.v86_ds; + context.sc_es = saved_state->v86_segs.v86_es; + context.sc_fs = saved_state->v86_segs.v86_fs; + context.sc_gs = saved_state->v86_segs.v86_gs; + + saved_state->efl &= ~EFL_VM; + } + else { + context.sc_ds = saved_state->ds; + context.sc_es = saved_state->es; + context.sc_fs = saved_state->fs; + context.sc_gs = saved_state->gs; + } + if (copyout((caddr_t)&context, (caddr_t)scp, sizeof (context))) + goto bad; + + saved_state->eip = (unsigned int)catcher; + saved_state->cs = UCODE_SEL; + + saved_state->uesp = (unsigned int)fp; + saved_state->ss = UDATA_SEL; + + saved_state->ds = UDATA_SEL; + saved_state->es = UDATA_SEL; + saved_state->fs = NULL_SEG; + saved_state->gs = NULL_SEG; + return; + +bad: + SIGACTION(p, SIGILL) = SIG_DFL; + sig = sigmask(SIGILL); + p->p_sigignore &= ~sig; + p->p_sigcatch &= ~sig; + p->p_sigmask &= ~sig; + /* sendsig is called with signal lock held */ + psignal_lock(p, SIGILL, 0, 1); + return; +} + +/* + * System call to cleanup state after a signal + * has been taken. Reset signal mask and + * stack state from context left by sendsig (above). + * Return to previous pc and psl as specified by + * context left by sendsig. Check carefully to + * make sure that the user has not modified the + * psl to gain improper priviledges or to cause + * a machine fault. + */ +struct sigreturn_args { + struct sigcontext *sigcntxp; +}; +/* ARGSUSED */ +int +sigreturn(p, uap, retval) + struct proc *p; + struct sigreturn_args *uap; + int *retval; +{ + struct sigcontext context; + thread_t thread = current_thread(); + thread_act_t th_act = current_act(); + int error; + struct i386_saved_state* saved_state = get_user_regs(th_act); + + if (saved_state == NULL) + return EINVAL; + + if (error = copyin((caddr_t)uap->sigcntxp, (caddr_t)&context, + sizeof (context))) + return(error); + +#if 0 /*FIXME*/ + if ((context.sc_eflags & EFL_VM) == 0 && + (!valid_user_code_selector(context.sc_cs) || + !valid_user_data_selector(context.sc_ds) || + !valid_user_data_selector(context.sc_es) || + !valid_user_data_selector(context.sc_fs) || + !valid_user_data_selector(context.sc_gs) || + !valid_user_stack_selector(context.sc_ss)) + ) + return(EINVAL); +#endif + + if (context.sc_onstack & 01) + p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; + else + p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; + p->p_sigmask = context.sc_mask &~ sigcantmask; + saved_state->eax = context.sc_eax; + saved_state->ebx = context.sc_ebx; + saved_state->ecx = context.sc_ecx; + saved_state->edx = context.sc_edx; + saved_state->edi = context.sc_edi; + saved_state->esi = context.sc_esi; + saved_state->ebp = context.sc_ebp; + saved_state->uesp = context.sc_esp; + saved_state->ss = context.sc_ss; + saved_state->efl = context.sc_eflags; + saved_state->efl &= ~EFL_USERCLR; + saved_state->efl |= EFL_USERSET; + saved_state->eip = context.sc_eip; + saved_state->cs = context.sc_cs; + + if (context.sc_eflags & EFL_VM) { + saved_state->ds = NULL_SEG; + saved_state->es = NULL_SEG; + saved_state->fs = NULL_SEG; + saved_state->gs = NULL_SEG; + saved_state->v86_segs.v86_ds = context.sc_ds; + saved_state->v86_segs.v86_es = context.sc_es; + saved_state->v86_segs.v86_fs = context.sc_fs; + saved_state->v86_segs.v86_gs = context.sc_gs; + + saved_state->efl |= EFL_VM; + } + else { + saved_state->ds = context.sc_ds; + saved_state->es = context.sc_es; + saved_state->fs = context.sc_fs; + saved_state->gs = context.sc_gs; + } + +#if PC_SUPPORT + if (context.sc_onstack & 02) { + PCcontext_t context = threadPCContext(thread); + + if (context) + context->running = TRUE; + } +#endif + + return (EJUSTRETURN); +} + +/* + * machine_exception() performs MD translation + * of a mach exception to a unix signal and code. + */ + +boolean_t +machine_exception( + int exception, + int code, + int subcode, + int *unix_signal, + int *unix_code +) +{ + + switch(exception) { + + case EXC_BAD_INSTRUCTION: + *unix_signal = SIGILL; + *unix_code = code; + break; + + case EXC_ARITHMETIC: + *unix_signal = SIGFPE; + *unix_code = code; + break; + + default: + return(FALSE); + } + + return(TRUE); +} diff --git a/bsd/dev/i386/unix_startup.c b/bsd/dev/i386/unix_startup.c new file mode 100644 index 000000000..020c7c0e8 --- /dev/null +++ b/bsd/dev/i386/unix_startup.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992,7 NeXT Computer, Inc. + * + * Unix data structure initialization. + * + */ + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +extern vm_map_t mb_map; + +/* + * Declare these as initialized data so we can patch them. + */ + +#ifdef NBUF +int nbuf = NBUF; +int niobuf = NBUF/2; +#else +int nbuf = 0; +int niobuf = 0; +#endif + +int srv = 0; /* Flag indicates a server boot when set */ +int ncl = 0; + +vm_map_t buffer_map; +vm_map_t bufferhdr_map; + +void +bsd_startupearly() +{ + vm_offset_t firstaddr; + vm_size_t size; + kern_return_t ret; + + if (nbuf == 0) + nbuf = atop(mem_size / 100); /* 1% */ + if (nbuf > 8192) + nbuf = 8192; + if (nbuf < 256) + nbuf = 256; + + if (niobuf == 0) + niobuf = nbuf / 2; + if (niobuf > 4096) + niobuf = 4096; + if (niobuf < 128) + niobuf = 128; + + size = (nbuf + niobuf) * sizeof (struct buf); + size = round_page(size); + + ret = kmem_suballoc(kernel_map, + &firstaddr, + size, + FALSE, + TRUE, + &bufferhdr_map); + + if (ret != KERN_SUCCESS) + panic("Failed to create bufferhdr_map"); + + ret = kernel_memory_allocate(bufferhdr_map, + &firstaddr, + size, + 0, + KMA_HERE | KMA_KOBJECT); + + if (ret != KERN_SUCCESS) + panic("Failed to allocate bufferhdr_map"); + + buf = (struct buf * )firstaddr; + bzero(buf,size); + + if (mem_size > (64 * 1024 * 1024)) { + int scale; + extern u_long tcp_sendspace; + extern u_long tcp_recvspace; + + if ((nmbclusters = ncl) == 0) { + if ((nmbclusters = ((mem_size / 16) / MCLBYTES)) > 4096) + nmbclusters = 8192; + } + if ((scale = nmbclusters / NMBCLUSTERS) > 1) { + tcp_sendspace *= scale; + tcp_recvspace *= scale; + + if (tcp_sendspace > (32 * 1024)) + tcp_sendspace = 32 * 1024; + if (tcp_recvspace > (32 * 1024)) + tcp_recvspace = 32 * 1024; + } + } +} + +void +bsd_bufferinit() +{ + unsigned int i; + vm_size_t size; + kern_return_t ret; + vm_offset_t firstaddr; + + cons.t_dev = makedev(12, 0); + + bsd_startupearly(); + + ret = kmem_suballoc(kernel_map, + (vm_offset_t *)&mbutl, + (vm_size_t) (nmbclusters * MCLBYTES), + FALSE, + TRUE, + &mb_map); + + if (ret != KERN_SUCCESS) + panic("Failed to allocate mb_map\n"); + + /* + * Set up buffers, so they can be used to read disk labels. + */ + bufinit(); +} + diff --git a/bsd/dev/kmreg_com.h b/bsd/dev/kmreg_com.h new file mode 100644 index 000000000..1d4a7224f --- /dev/null +++ b/bsd/dev/kmreg_com.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * kmreg_com.h - machine independent km ioctl interface. + * + * HISTORY + * 16-Jan-92 Doug Mitchell at NeXT + * Created. + */ + +#ifdef KERNEL_PRIVATE + +#ifndef _BSD_DEV_KMREG_COM_ +#define _BSD_DEV_KMREG_COM_ + +#include +#include + +/* + * Colors for fg, bg in struct km_drawrect + */ +#define KM_COLOR_WHITE 0 +#define KM_COLOR_LTGRAY 1 +#define KM_COLOR_DKGRAY 2 +#define KM_COLOR_BLACK 3 + +/* + * The data to be rendered is treated as a pixmap of 2 bit pixels. + * The most significant bits of each byte is the leftmost pixel in that + * byte. Pixel values are assigned as described above. + * + * Each scanline should start on a 4 pixel boundry within the bitmap, + * and should be a multiple of 4 pixels in length. + * + * For the KMIOCERASERECT call, 'data' should be an integer set to the + * color to be used for the clear operation (data.fill). + * A rect at (x,y) measuring 'width' by 'height' will be cleared to + * the specified value. + */ +struct km_drawrect { + unsigned short x; /* Upper left corner of rect to be imaged. */ + unsigned short y; + unsigned short width; /* Width and height of rect to be imaged, + * in pixels */ + unsigned short height; + union { + void *bits; /* Pointer to 2 bit per pixel raster data. */ + int fill; /* Const color for erase operation. */ + } data; +}; + +/* + * Argument to KMIOCANIMCTL. + */ +typedef enum { + KM_ANIM_STOP, /* stop permanently */ + KM_ANIM_SUSPEND, /* suspend */ + KM_ANIM_RESUME /* resume */ +} km_anim_ctl_t; + +#define KMIOCPOPUP _IO('k', 1) /* popup new window */ +#define KMIOCRESTORE _IO('k', 2) /* restore background */ +#define KMIOCDUMPLOG _IO('k', 3) /* dump message log */ +#define KMIOCDRAWRECT _IOW('k', 5, struct km_drawrect) /* Draw rect from + * bits */ +#define KMIOCERASERECT _IOW('k', 6, struct km_drawrect) /* Erase a rect */ + +#ifdef KERNEL_PRIVATE +#define KMIOCDISABLCONS _IO('k', 8) /* disable console messages */ +#endif /* KERNEL_PRIVATE */ + +#define KMIOCANIMCTL _IOW('k',9, km_anim_ctl_t) + /* stop animation */ +#define KMIOCSTATUS _IOR('k',10, int) /* get status bits */ +#define KMIOCSIZE _IOR('k',11, struct winsize) /* get screen size */ + +/* + * Status bits returned via KMIOCSTATUS. + */ +#define KMS_SEE_MSGS 0x00000001 + +#endif /* _BSD_DEV_KMREG_COM_ */ + +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/dev/ldd.h b/bsd/dev/ldd.h new file mode 100644 index 000000000..9e33d5123 --- /dev/null +++ b/bsd/dev/ldd.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)ldd.h 2.0 03/20/90 (c) 1990 NeXT + * + * ldd.h - kernel prototypes used by loadable device drivers + * + * HISTORY + * 22-May-91 Gregg Kellogg (gk) at NeXT + * Split out public interface. + * + * 16-Aug-90 Gregg Kellogg (gk) at NeXT + * Removed a lot of stuff that's defined in other header files. + * Eventually this file should either go away or contain only imports of + * other files. + * + * 20-Mar-90 Doug Mitchell at NeXT + * Created. + * + */ + +#ifndef _BSD_DEV_LDD_PRIV_ +#define _BSD_DEV_LDD_PRIV_ + +#include +#include + +typedef int (*PFI)(); + +#endif /* _BSD_DEV_LDD_PRIV_ */ + diff --git a/bsd/dev/ppc/conf.c b/bsd/dev/ppc/conf.c new file mode 100644 index 000000000..0030fe927 --- /dev/null +++ b/bsd/dev/ppc/conf.c @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 by Apple Computer, Inc., all rights reserved + * Copyright (c) 1993 NeXT Computer, Inc. + * + * UNIX Device switch tables. + * + * HISTORY + * + * 30 July 1997 Umesh Vaishampayan (umeshv@apple.com) + * enabled file descriptor pseudo-device. + * 18 June 1993 ? at NeXT + * Cleaned up a lot of stuff in this file. + */ + +#include +#include +#include +#include +#include +#include + + +extern int nulldev(); + +struct bdevsw bdevsw[] = +{ + /* + * For block devices, every other block of 8 slots is + * reserved to NeXT. The other slots are available for + * the user. This way we can both add new entries without + * running into each other. Be sure to fill in NeXT's + * 8 reserved slots when you jump over us -- we'll do the + * same for you. + */ + + /* 0 - 7 are reserved to NeXT */ + + NO_BDEVICE, /* 0*/ + NO_BDEVICE, /* 1*/ + NO_BDEVICE, /* 2*/ + NO_BDEVICE, /* 3*/ + NO_BDEVICE, /* 4*/ + NO_BDEVICE, /* 5*/ + NO_BDEVICE, /* 6*/ + NO_BDEVICE, /* 7*/ + + /* 8 - 15 are reserved to the user */ + NO_BDEVICE, /* 8*/ + NO_BDEVICE, /* 9*/ + NO_BDEVICE, /*10*/ + NO_BDEVICE, /*11*/ + NO_BDEVICE, /*12*/ + NO_BDEVICE, /*13*/ + NO_BDEVICE, /*14*/ + NO_BDEVICE, /*15*/ + + /* 16 - 23 are reserved to NeXT */ + NO_BDEVICE, /*16*/ + NO_BDEVICE, /*17*/ + NO_BDEVICE, /*18*/ + NO_BDEVICE, /*18*/ + NO_BDEVICE, /*20*/ + NO_BDEVICE, /*21*/ + NO_BDEVICE, /*22*/ + NO_BDEVICE, /*23*/ +}; + +int nblkdev = sizeof (bdevsw) / sizeof (bdevsw[0]); + +extern struct tty *km_tty[]; +extern int consopen(), consclose(), consread(), conswrite(), consioctl(), + consselect(), cons_getc(), cons_putc(); +extern int kmopen(),kmclose(),kmread(),kmwrite(),kmioctl(), + kmgetc(), kmputc(dev_t dev, char c); + +extern int cttyopen(), cttyread(), cttywrite(), cttyioctl(), cttyselect(); + +extern int mmread(),mmwrite(); +#define mmselect seltrue + +#if 1 +#define NPTY 32 +#else /* 1 */ +#include +#endif /* 1 */ +#if NPTY > 0 +extern struct tty *pt_tty[]; +extern int ptsopen(),ptsclose(),ptsread(),ptswrite(),ptsstop(),ptsputc(); +extern int ptcopen(),ptcclose(),ptcread(),ptcwrite(),ptcselect(), + ptyioctl(); +#else +#define ptsopen eno_opcl +#define ptsclose eno_opcl +#define ptsread eno_rdwrt +#define ptswrite eno_rdwrt +#define ptsstop nulldev +#define ptsputc nulldev + +#define ptcopen eno_opcl +#define ptcclose eno_opcl +#define ptcread eno_rdwrt +#define ptcwrite eno_rdwrt +#define ptcselect eno_select +#define ptyioctl eno_ioctl +#endif + +extern int logopen(),logclose(),logread(),logioctl(),logselect(); +extern int seltrue(); + +struct cdevsw cdevsw[] = +{ + /* + * For character devices, every other block of 16 slots is + * reserved to NeXT. The other slots are available for + * the user. This way we can both add new entries without + * running into each other. Be sure to fill in NeXT's + * 16 reserved slots when you jump over us -- we'll do the + * same for you. + */ + + /* 0 - 15 are reserved to NeXT */ + + { + consopen, consclose, consread, conswrite, /* 0*/ + consioctl, nulldev, nulldev, 0, consselect, + eno_mmap, eno_strat, cons_getc, cons_putc, D_TTY + }, + NO_CDEVICE, /* 1*/ + { + cttyopen, nulldev, cttyread, cttywrite, /* 2*/ + cttyioctl, nulldev, nulldev, 0, cttyselect, + eno_mmap, eno_strat, eno_getc, eno_putc, D_TTY + }, + { + nulldev, nulldev, mmread, mmwrite, /* 3*/ + eno_ioctl, nulldev, nulldev, 0, mmselect, + eno_mmap, eno_strat, eno_getc, eno_putc, 0 + }, + { + ptsopen, ptsclose, ptsread, ptswrite, /* 4*/ + ptyioctl, ptsstop, nulldev, pt_tty, ttselect, + eno_mmap, eno_strat, eno_getc, eno_putc, D_TTY + }, + { + ptcopen, ptcclose, ptcread, ptcwrite, /* 5*/ + ptyioctl, nulldev, nulldev, 0, ptcselect, + eno_mmap, eno_strat, eno_getc, eno_putc, D_TTY + }, + { + logopen, logclose, logread, eno_rdwrt, /* 6*/ + logioctl, eno_stop, nulldev, 0, logselect, + eno_mmap, eno_strat, eno_getc, eno_putc, 0 + }, + NO_CDEVICE, /* 7*/ + NO_CDEVICE, /* 8*/ + NO_CDEVICE, /* 9*/ + NO_CDEVICE, /*10*/ + NO_CDEVICE, /*11*/ + { + kmopen, kmclose, kmread, kmwrite, /*12*/ + kmioctl, nulldev, nulldev, km_tty, ttselect, + eno_mmap, eno_strat, kmgetc, kmputc, 0 + }, + NO_CDEVICE, /*13*/ + NO_CDEVICE, /*14*/ + NO_CDEVICE, /*15*/ + + /* 16 - 31 are reserved to the user */ + NO_CDEVICE, /*16*/ + NO_CDEVICE, /*17*/ + NO_CDEVICE, /*18*/ + NO_CDEVICE, /*19*/ + NO_CDEVICE, /*20*/ + NO_CDEVICE, /*21*/ + NO_CDEVICE, /*22*/ + NO_CDEVICE, /*23*/ + NO_CDEVICE, /*24*/ + NO_CDEVICE, /*25*/ + NO_CDEVICE, /*26*/ + NO_CDEVICE, /*27*/ + NO_CDEVICE, /*28*/ + NO_CDEVICE, /*29*/ + NO_CDEVICE, /*30*/ + NO_CDEVICE, /*31*/ + + /* 32 - 47 are reserved to NeXT */ + NO_CDEVICE, /*32*/ + NO_CDEVICE, /*33*/ + NO_CDEVICE, /*34*/ + NO_CDEVICE, /*35*/ + NO_CDEVICE, /*36*/ + /* 37 used to be for nvram */ + NO_CDEVICE, /*37*/ + NO_CDEVICE, /*38*/ + NO_CDEVICE, /*39*/ + NO_CDEVICE, /*40*/ + /* 41 used to be for fd */ + NO_CDEVICE, /*41*/ + NO_CDEVICE, /*42*/ +}; +int nchrdev = sizeof (cdevsw) / sizeof (cdevsw[0]); + + +#include /* for VCHR and VBLK */ +/* + * return true if a disk + */ +int +isdisk(dev, type) + dev_t dev; + int type; +{ + switch (major(dev)) { + case 1: /* fd: floppy */ + case 6: /* sd: scsi disk */ + case 3: /* ide: */ + if (type == VBLK) + return(1); + break; + case 14: /* sd: scsi disk */ + case 41: /* fd: floppy */ + if (type == VCHR) + return(1); + break; + } + return(0); +} + +static int chrtoblktab[] = { + /* CHR*/ /* BLK*/ /* CHR*/ /* BLK*/ + /* 0 */ NODEV, /* 1 */ NODEV, + /* 2 */ NODEV, /* 3 */ NODEV, + /* 4 */ NODEV, /* 5 */ NODEV, + /* 6 */ NODEV, /* 7 */ NODEV, + /* 8 */ NODEV, /* 9 */ NODEV, + /* 10 */ NODEV, /* 11 */ NODEV, + /* 12 */ NODEV, /* 13 */ NODEV, + /* 14 */ 6, /* 15 */ NODEV, + /* 16 */ NODEV, /* 17 */ NODEV, + /* 18 */ NODEV, /* 19 */ NODEV, + /* 20 */ NODEV, /* 21 */ NODEV, + /* 22 */ NODEV, /* 23 */ NODEV, + /* 24 */ NODEV, /* 25 */ NODEV, + /* 26 */ NODEV, /* 27 */ NODEV, + /* 28 */ NODEV, /* 29 */ NODEV, + /* 30 */ NODEV, /* 31 */ NODEV, + /* 32 */ NODEV, /* 33 */ NODEV, + /* 34 */ NODEV, /* 35 */ NODEV, + /* 36 */ NODEV, /* 37 */ NODEV, + /* 38 */ NODEV, /* 39 */ NODEV, + /* 40 */ NODEV, /* 41 */ 1, + /* 42 */ NODEV, /* 43 */ NODEV, + /* 44 */ NODEV, +}; + +/* + * convert chr dev to blk dev + */ +dev_t +chrtoblk(dev) + dev_t dev; +{ + int blkmaj; + + if (major(dev) >= nchrdev) + return(NODEV); + blkmaj = chrtoblktab[major(dev)]; + if (blkmaj == NODEV) + return(NODEV); + return(makedev(blkmaj, minor(dev))); +} + +/* + * Returns true if dev is /dev/mem or /dev/kmem. + */ +int iskmemdev(dev) + dev_t dev; +{ + + return (major(dev) == 3 && minor(dev) < 2); +} diff --git a/bsd/dev/ppc/cons.c b/bsd/dev/ppc/cons.c new file mode 100644 index 000000000..3c5c10d2b --- /dev/null +++ b/bsd/dev/ppc/cons.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987, 1988 NeXT, Inc. + * + * HISTORY + * 7-Jan-93 Mac Gillon (mgillon) at NeXT + * Integrated POSIX support + * + * 12-Aug-87 John Seamons (jks) at NeXT + * Ported to NeXT. + */ + +/* + * Indirect driver for console. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +struct tty cons; +struct tty *constty; /* current console device */ + +/*ARGSUSED*/ +int +consopen(dev, flag, devtype, pp) + dev_t dev; + int flag, devtype; + struct proc *pp; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_open)(device, flag, devtype, pp)); +} + +/*ARGSUSED*/ +int +consclose(dev, flag, mode, pp) + dev_t dev; + int flag, mode; + struct proc *pp; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_close)(device, flag, mode, pp)); +} + +/*ARGSUSED*/ +int +consread(dev, uio, ioflag) + dev_t dev; + struct uio *uio; + int ioflag; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_read)(device, uio, ioflag)); +} + +/*ARGSUSED*/ +int +conswrite(dev, uio, ioflag) + dev_t dev; + struct uio *uio; + int ioflag; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_write)(device, uio, ioflag)); +} + +/*ARGSUSED*/ +int +consioctl(dev, cmd, addr, flag, p) + dev_t dev; + int cmd; + caddr_t addr; + int flag; + struct proc *p; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + /* + * Superuser can always use this to wrest control of console + * output from the "virtual" console. + */ + if (cmd == TIOCCONS && constty) { + int error = suser(p->p_ucred, (u_short *) NULL); + if (error) + return (error); + constty = NULL; + return (0); + } + return ((*cdevsw[major(device)].d_ioctl)(device, cmd, addr, flag, p)); +} + +/*ARGSUSED*/ +int +consselect(dev, flag, p) + dev_t dev; + int flag; + struct proc *p; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_select)(device, flag, p)); +} + +int +cons_getc() +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_getc)(device)); +} + +/*ARGSUSED*/ +int +cons_putc(c) + char c; +{ + dev_t device; + + if (constty) + device = constty->t_dev; + else + device = cons.t_dev; + return ((*cdevsw[major(device)].d_putc)(device, c)); +} + +/* + * Write message to console; create an alert panel if no text-type window + * currently exists. Caller must call alert_done() when finished. + * The height and width arguments are not used; they are provided for + * compatibility with the 68k version of alert(). + */ +int +alert( + int width, + int height, + const char *title, + const char *msg, + int p1, + int p2, + int p3, + int p4, + int p5, + int p6, + int p7, + int p8) +{ + char smsg[200]; + + sprintf(smsg, msg, p1, p2, p3, p4, p5, p6, p7, p8); +#if FIXME /* [ */ + /* DoAlert(title, smsg); */ +#else + printf("%s\n",smsg); +#endif /* FIXME ] */ + + return 0; +} + +int +alert_done() +{ + /* DoRestore(); */ + return 0; +} + diff --git a/bsd/dev/ppc/cons.h b/bsd/dev/ppc/cons.h new file mode 100644 index 000000000..00d91a155 --- /dev/null +++ b/bsd/dev/ppc/cons.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987 NeXT, Inc. + */ + +struct consdev { + char *cn_name; /* name of device in dev_name_list */ + int (*cn_probe)(); /* probe hardware and fill in consdev info */ + int (*cn_init)(); /* turn on as console */ + int (*cn_getc)(); /* kernel getchar interface */ + int (*cn_putc)(); /* kernel putchar interface */ + struct tty *cn_tp; /* tty structure for console device */ + dev_t cn_dev; /* major/minor of device */ + short cn_pri; /* pecking order; the higher the better */ +}; + +/* values for cn_pri - reflect our policy for console selection */ +#define CN_DEAD 0 /* device doesn't exist */ +#define CN_NORMAL 1 /* device exists but is nothing special */ +#define CN_INTERNAL 2 /* "internal" bit-mapped display */ +#define CN_REMOTE 3 /* serial interface with remote bit set */ + +/* XXX */ +#define CONSMAJOR 0 + +#ifdef KERNEL + +#include + +extern struct consdev constab[]; +extern struct consdev *cn_tab; +extern struct tty *cn_tty; + +extern struct tty cons; +extern struct tty *constty; /* current console device */ +#endif + diff --git a/bsd/dev/ppc/ffs.c b/bsd/dev/ppc/ffs.c new file mode 100644 index 000000000..fa02a4795 --- /dev/null +++ b/bsd/dev/ppc/ffs.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. + * + * File: machdep/i386/libc/ffs.c + * Author: Bruce Martin, NeXT Computer, Inc. + * + * This file contains machine dependent code for the ffs function + * on NeXT i386-based products. Currently tuned for the i486. + * + * HISTORY + * 27-Sep-92 Bruce Martin (Bruce_Martin@NeXT.COM) + * Created: stolen from Mike's code. + */ + +unsigned +ffs(unsigned mask) +{ + unsigned bitpos; + + if (mask == 0) + return 0; + + bitpos = 1; + while ((mask & 0xff) == 0) { + bitpos += 8; + mask >>= 8; + } + while ((mask & 1) == 0) { + bitpos += 1; + mask >>= 1; + } + return bitpos; +} diff --git a/bsd/dev/ppc/ffs.s b/bsd/dev/ppc/ffs.s new file mode 100644 index 000000000..75ec0dea4 --- /dev/null +++ b/bsd/dev/ppc/ffs.s @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992, 1997-1998 Apple Computer, Inc. All rights reserved. + * + * File: machdep/ppc/libc/ffs.s + * + * int ffs(int value) + * + * DESCRIPTION + * The ffs() function finds the first bit set in value and returns the + * index of that bit. Bits are numbered starting from 1, starting at + * the right-most bit. A return value of 0 means that the argument was + * + * HISTORY + * 14-Aug-1998 Umesh Vaishampayan (umeshv@apple.com) + * Optimized! + * + * 10-Mar-1998 Matt Watson (mwatson@apple.com) + * Correctified + * + * 19-Jan-1998 Matt Watson (mwatson@apple.com) + * Simplified + * + * 24-Jan-1997 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported to PPC. + */ + +.text +.align 4 +.globl _ffs +_ffs: /* Cycles */ + neg r0,r3 /* 0 */ + and r3,r0,r3 /* 1 */ + li r4, 32 /* 1 */ + cntlzw r3,r3 /* 2 */ + subf r3,r3,r4 /* 3 */ + blr + + .globl _abs +_abs: + srawi r0,r3,31 + xor r3,r0,r3 + subf r3,r0,r3 + blr + diff --git a/bsd/dev/ppc/kern_machdep.c b/bsd/dev/ppc/kern_machdep.c new file mode 100644 index 000000000..38a00cc3c --- /dev/null +++ b/bsd/dev/ppc/kern_machdep.c @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1990, 1993 NeXT, Inc. + * Copyright (C) 1997 Apple Computer, Inc. + * + * File: next/kern_machdep.c + * Author: John Seamons + * + * Machine-specific kernel routines. + * + * HISTORY + * 8-Dec-91 Peter King (king) at NeXT + * Added grade_cpu_subtype(). + * FIXME: Do we want to merge this with check_cpu_subtype()? + * + * 5-Mar-90 John Seamons (jks) at NeXT + * Created. + */ + +#include +#include +#include +#include +#include +#include + +int +check_cpu_subtype(cpu_subtype_t cpu_subtype) +{ + struct machine_slot *ms = &machine_slot[cpu_number()]; + + if (cpu_subtype == ms->cpu_subtype) + return (TRUE); + + if (cpu_subtype == CPU_SUBTYPE_POWERPC_601) + return (FALSE); + + switch (cpu_subtype) { + case CPU_SUBTYPE_POWERPC_7450: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_604e: + case CPU_SUBTYPE_POWERPC_604: + case CPU_SUBTYPE_POWERPC_603ev: + case CPU_SUBTYPE_POWERPC_603e: + case CPU_SUBTYPE_POWERPC_603: + case CPU_SUBTYPE_POWERPC_ALL: + return (TRUE); + } + + return (FALSE); +} + +/* + * Routine: grade_cpu_subtype() + * + * Function: + * Return a relative preference for cpu_subtypes in fat executable files. + * The higher the grade, the higher the preference. + * A grade of 0 means not acceptable. + */ + +int +grade_cpu_subtype(cpu_subtype_t cpu_subtype) +{ + struct machine_slot *ms = &machine_slot[cpu_number()]; + + /* + * This code should match cpusubtype_findbestarch() in best_arch.c in the + * cctools project. As of 2/16/98 this is what has been agreed upon for + * the PowerPC subtypes. If an exact match is not found the subtype will + * be picked from the following order: + * 7400, 750, 604e, 604, 603ev, 603e, 603, ALL + * Note the 601 is NOT in the list above. It is only picked via an exact + * match. For details see Radar 2213821. + * + * To implement this function to follow what was agreed upon above, we use + * the fact there are currently 10 different subtypes. Exact matches return + * the value 10, the value 0 is returned for 601 that is not an exact match, + * and the values 9 thru 1 are returned for the subtypes listed in the order + * above. + */ + if (ms->cpu_subtype == cpu_subtype) + return 10; + if (cpu_subtype == CPU_SUBTYPE_POWERPC_601) + return 0; + switch (cpu_subtype) { + case CPU_SUBTYPE_POWERPC_7450: + return 9; + case CPU_SUBTYPE_POWERPC_7400: + return 8; + case CPU_SUBTYPE_POWERPC_750: + return 7; + case CPU_SUBTYPE_POWERPC_604e: + return 6; + case CPU_SUBTYPE_POWERPC_604: + return 5; + case CPU_SUBTYPE_POWERPC_603ev: + return 4; + case CPU_SUBTYPE_POWERPC_603e: + return 3; + case CPU_SUBTYPE_POWERPC_603: + return 2; + case CPU_SUBTYPE_POWERPC_ALL: + return 1; + } + /* + * If we get here it is because it is a cpusubtype we don't support (602 and + * 620) or new cpusubtype that was added since this code was written. Both + * will be considered unacceptable. + */ + return 0; +} + +boolean_t +kernacc( + off_t start, + size_t len +) +{ + off_t base; + off_t end; + + base = trunc_page(start); + end = start + len; + + while (base < end) { + if(kvtophys((vm_offset_t)base) == NULL) + return(FALSE); + base += page_size; + } + + return (TRUE); +} diff --git a/bsd/dev/ppc/km.c b/bsd/dev/ppc/km.c new file mode 100644 index 000000000..e04d3a636 --- /dev/null +++ b/bsd/dev/ppc/km.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * km.m - kernel keyboard/monitor module, procedural interface. + * + * HISTORY + */ + +#include +#include + +#include +#include +#include +#include +#include /* for kmopen */ +#include +#include /* for kmopen */ +#include +#include +#include +#include + +/* + * 'Global' variables, shared only by this file and conf.c. + */ +extern struct tty cons; +struct tty *km_tty[1] = { &cons }; + +/* + * this works early on, after initialize_screen() but before autoconf (and thus + * before we have a kmDevice). + */ +int disableConsoleOutput; + +/* + * 'Global' variables, shared only by this file and kmDevice.m. + */ +int initialized = 0; + +static int kmoutput(struct tty *tp); +static void kmtimeout(struct tty *tp); +static void kmstart(struct tty *tp); + +extern void KeyboardOpen(void); + +int kminit() +{ + cons.t_dev = makedev(12, 0); + initialized = 1; +} +/* + * cdevsw interface to km driver. + */ +int +kmopen( + dev_t dev, + int flag, + int devtype, + struct proc *pp) +{ + int rtn; + int unit; + struct tty *tp; + struct winsize *wp; + int ret; + + unit = minor(dev); + if(unit >= 1) + return (ENXIO); + + tp = (struct tty *)&cons; + tp->t_oproc = kmstart; + tp->t_param = NULL; + tp->t_dev = dev; + + if ( !(tp->t_state & TS_ISOPEN) ) { + tp->t_iflag = TTYDEF_IFLAG; + tp->t_oflag = TTYDEF_OFLAG; + tp->t_cflag = (CREAD | CS8 | CLOCAL); + tp->t_lflag = TTYDEF_LFLAG; + tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; + termioschars(&tp->t_termios); + ttsetwater(tp); + } else if ((tp->t_state & TS_XCLUDE) && pp->p_ucred->cr_uid != 0) + return EBUSY; + + tp->t_state |= TS_CARR_ON; /* lie and say carrier exists and is on. */ + ret = ((*linesw[tp->t_line].l_open)(dev, tp)); + { + PE_Video video; + wp = &tp->t_winsize; + /* Magic numbers. These are CHARWIDTH and CHARHEIGHT + * from osfmk/ppc/POWERMAC/video_console.c + */ + wp->ws_xpixel = 8; + wp->ws_ypixel = 16; + + if (flag & O_POPUP) + PE_initialize_console(0, kPETextScreen); + + bzero(&video, sizeof(video)); + PE_current_console(&video); + if( video.v_width != 0 && video.v_height != 0 ) { + wp->ws_col = video.v_width / wp->ws_xpixel; + wp->ws_row = video.v_height / wp->ws_ypixel; + } else { + wp->ws_col = 100; + wp->ws_row = 36; + } + } + return ret; +} + +int +kmclose( + dev_t dev, + int flag, + int mode, + struct proc *p) +{ + + struct tty *tp; + + tp = &cons; + (*linesw[tp->t_line].l_close)(tp,flag); + ttyclose(tp); + return (0); +} + +int +kmread( + dev_t dev, + struct uio *uio, + int ioflag) +{ + register struct tty *tp; + + tp = &cons; + return ((*linesw[tp->t_line].l_read)(tp, uio, ioflag)); +} + +int +kmwrite( + dev_t dev, + struct uio *uio, + int ioflag) +{ + register struct tty *tp; + + tp = &cons; + return ((*linesw[tp->t_line].l_write)(tp, uio, ioflag)); +} + +int +kmioctl( + dev_t dev, + int cmd, + caddr_t data, + int flag, + struct proc *p) +{ + int error; + struct tty *tp = &cons; + struct winsize *wp; + + switch (cmd) { + + + + case KMIOCSIZE: + wp = (struct winsize *)data; + *wp = tp->t_winsize; + return 0; + + case TIOCSWINSZ: + /* Prevent changing of console size -- + * this ensures that login doesn't revert to the + * termcap-defined size + */ + return EINVAL; + + /* Bodge in the CLOCAL flag as the km device is always local */ + case TIOCSETA: + case TIOCSETAW: + case TIOCSETAF: { + register struct termios *t = (struct termios *)data; + t->c_cflag |= CLOCAL; + /* No Break */ + } + default: + error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p); + if (error >= 0) { + return error; + } + error = ttioctl (tp, cmd, data, flag, p); + if (error >= 0) { + return error; + } + else { + return ENOTTY; + } + } +} + +int +kmputc( + int c) +{ + + if( disableConsoleOutput) + return( 0); + + if(!initialized) + return( 0); + + if(c == '\n') + cnputcusr('\r'); + + cnputcusr(c); + + return 0; +} + +int +kmgetc( + dev_t dev) +{ + int c; + + c= cngetc(); + + if (c == '\r') { + c = '\n'; + } + cnputcusr(c); + return c; +} + +int +kmgetc_silent( + dev_t dev) +{ + int c; + + c= cngetc(); + if (c == '\r') { + c = '\n'; + } + return c; +} + +/* + * Callouts from linesw. + */ + +#define KM_LOWAT_DELAY ((ns_time_t)1000) + +static void +kmstart( + struct tty *tp) +{ + extern int hz; + if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) + goto out; + if (tp->t_outq.c_cc == 0) + goto out; + tp->t_state |= TS_BUSY; + if (tp->t_outq.c_cc > tp->t_lowat) { + /* + * Start immediately. + */ + kmoutput(tp); + } + else { + /* + * Wait a bit... + */ +#if 0 + /* FIXME */ + timeout(kmtimeout, tp, hz); +#else + kmoutput(tp); +#endif + } +out: + ttwwakeup(tp); +} + +static void +kmtimeout( struct tty *tp) +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + kmoutput(tp); + (void) thread_funnel_set(kernel_flock, FALSE); + + +} +static int +kmoutput( + struct tty *tp) +{ + /* + * FIXME - to be grokked...copied from m68k km.c. + */ + char buf[80]; + char *cp; + int cc = -1; + extern int hz; + + + while (tp->t_outq.c_cc > 0) { + cc = ndqb(&tp->t_outq, 0); + if (cc == 0) + break; + cc = min(cc, sizeof buf); + (void) q_to_b(&tp->t_outq, buf, cc); + for (cp = buf; cp < &buf[cc]; cp++) { + kmputc(*cp & 0x7f); + } + } + if (tp->t_outq.c_cc > 0) { + timeout(kmtimeout, tp, hz); + } + tp->t_state &= ~TS_BUSY; + ttwwakeup(tp); + + return 0; +} +cons_cinput(char ch) +{ + struct tty *tp = &cons; + boolean_t funnel_state; + + + (*linesw[tp->t_line].l_rint) (ch, tp); + +} + diff --git a/bsd/dev/ppc/machdep.c b/bsd/dev/ppc/machdep.c new file mode 100644 index 000000000..6e44b5eb9 --- /dev/null +++ b/bsd/dev/ppc/machdep.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * + * Machine dependent cruft. + * + * 27-Apr-1997 A.Ramesh at Apple + * + * + */ + +#include +#include +#include + +int reboot_how; +extern struct tty cons; +extern struct tty *constty; /* current console device */ + +extern int getchar(); + +#define putchar cnputc + +void +gets(buf) + char *buf; +{ + register char *lp; + register c; + + lp = buf; + for (;;) { + c = getchar() & 0177; + switch(c) { + case '\n': + case '\r': + *lp++ = '\0'; + return; + case '\b': + if (lp > buf) { + lp--; + putchar(' '); + putchar('\b'); + } + continue; + case '#': + case '\177': + lp--; + if (lp < buf) + lp = buf; + continue; + case '@': + case 'u'&037: + lp = buf; + putchar('\n'); /* XXX calls 'cnputc' on mips */ + continue; + default: + *lp++ = c; + } + } +} + +int +getchar() +{ + int c; + + c = cngetc(); +#if 0 + if (c == 0x1b) /* ESC ? */ + call_kdp(); +#endif 0 + + if (c == '\r') + c = '\n'; + cnputc(c); + return c; +} + diff --git a/bsd/dev/ppc/mem.c b/bsd/dev/ppc/mem.c new file mode 100644 index 000000000..6af5aca37 --- /dev/null +++ b/bsd/dev/ppc/mem.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department, and code derived from software contributed to + * Berkeley by William Jolitz. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: mem.c 1.13 89/10/08$ + * @(#)mem.c 8.1 (Berkeley) 6/11/93 + */ + +#include + +/* + * Memory special file + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +static caddr_t devzerobuf; + +extern vm_offset_t mem_actual; +extern pmap_t kernel_pmap; + +mmread(dev, uio) + dev_t dev; + struct uio *uio; +{ + + return (mmrw(dev, uio, UIO_READ)); +} + +mmwrite(dev, uio) + dev_t dev; + struct uio *uio; +{ + + return (mmrw(dev, uio, UIO_WRITE)); +} + +mmrw(dev, uio, rw) + dev_t dev; + struct uio *uio; + enum uio_rw rw; +{ + register int o; + register u_int c, v; + register struct iovec *iov; + int error = 0; + vm_offset_t where; + int spl; + vm_size_t size; + extern boolean_t kernacc(off_t, size_t ); + + while (uio->uio_resid > 0 && error == 0) { + iov = uio->uio_iov; + if (iov->iov_len == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + if (uio->uio_iovcnt < 0) + panic("mmrw"); + continue; + } + switch (minor(dev)) { + +/* minor device 0 is physical memory */ + case 0: + v = trunc_page(uio->uio_offset); + if (uio->uio_offset >= ((dgWork.dgFlags & enaDiagDM) ? mem_actual : mem_size)) + goto fault; + + size= PAGE_SIZE; + + if(dgWork.dgFlags & enaDiagDM) { /* Can we really get all memory? */ + if (kmem_alloc_pageable(kernel_map, &where, size) != KERN_SUCCESS) { + goto fault; + } + else { + (void)mapping_make(kernel_pmap, 0, where, v, + VM_PROT_READ, 2, 0); /* Map it in for the moment */ + } + } + else { + if (kmem_alloc(kernel_map, &where, size) + != KERN_SUCCESS) { + goto fault; + } + } + o = uio->uio_offset - v; + c = min(PAGE_SIZE - o, (u_int)iov->iov_len); + error = uiomove((caddr_t) (where + o), c, uio); + + if(dgWork.dgFlags & enaDiagDM) (void)mapping_remove(kernel_pmap, where); /* Unmap it */ + kmem_free(kernel_map, where, PAGE_SIZE); + continue; + + /* minor device 1 is kernel memory */ + case 1: + /* Do some sanity checking */ + if (((caddr_t)uio->uio_offset >= VM_MAX_KERNEL_ADDRESS) || + ((caddr_t)uio->uio_offset <= VM_MIN_KERNEL_ADDRESS)) + goto fault; + c = iov->iov_len; + if (!kernacc((caddr_t)uio->uio_offset, c)) + goto fault; + error = uiomove((caddr_t)uio->uio_offset, (int)c, uio); + continue; + + /* minor device 2 is EOF/RATHOLE */ + case 2: + if (rw == UIO_READ) + return (0); + c = iov->iov_len; + break; + /* minor device 3 is ZERO/RATHOLE */ + case 3: + if(devzerobuf == NULL) { + MALLOC(devzerobuf, caddr_t,PAGE_SIZE, M_TEMP, M_WAITOK); + bzero(devzerobuf, PAGE_SIZE); + } + if(uio->uio_rw == UIO_WRITE) { + c = iov->iov_len; + break; + } + c = min(iov->iov_len, PAGE_SIZE); + error = uiomove(devzerobuf, (int)c, uio); + continue; + default: + goto fault; + break; + } + + if (error) + break; + iov->iov_base += c; + iov->iov_len -= c; + uio->uio_offset += c; + uio->uio_resid -= c; + } + return (error); +fault: + return (EFAULT); +} + diff --git a/bsd/dev/ppc/memmove.c b/bsd/dev/ppc/memmove.c new file mode 100644 index 000000000..e36599aa8 --- /dev/null +++ b/bsd/dev/ppc/memmove.c @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991,1993 NeXT Computer, Inc. All rights reserved. + * + */ + + + +void ovbcopy(const char *src, char *dst, unsigned int ulen) +{ + bcopy(src, dst, ulen); +} + +#if 0 +void *memcpy(void *dst, const void *src, unsigned int ulen) +{ + bcopy(src, dst, ulen); + return dst; +} +#endif /* 0 */ +void *memmove(void *dst, const void *src, unsigned int ulen) +{ + bcopy(src, dst, ulen); + return dst; +} + + diff --git a/bsd/dev/ppc/nvram.c b/bsd/dev/ppc/nvram.c new file mode 100644 index 000000000..d4900147f --- /dev/null +++ b/bsd/dev/ppc/nvram.c @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * BSD driver for Non-volatile RAM. + * Stub functions call the real thing in the Platform Expert. + * + * Suurballe 11 Feb 1999 + */ + +#include +#include + +extern int PEnvopen ( dev_t, int, int, struct proc * ); +extern int PEnvclose ( dev_t, int, int, struct proc * ); +extern int PEnvread ( long, int, unsigned char *); +extern int PEnvwrite ( long, int, unsigned char * ); + + +nvopen(dev, flag, devtype, pp) + dev_t dev; + int flag, devtype; + struct proc *pp; +{ + return PEnvopen(dev,flag,devtype,pp); +} + + + +nvclose(dev, flag, mode, pp) + dev_t dev; + int flag, mode; + struct proc *pp; +{ + return PEnvclose(dev,flag,mode,pp); +} + + + +nvread(dev, uio, ioflag) + dev_t dev; + struct uio *uio; + int ioflag; +{ + long offset; + long size; + int c; + unsigned char cc; + long read = 0; + int error = 0; + + offset = uio->uio_offset; + size = uio->uio_resid; + + for (read = 0; read < size; read++, offset++) { + error = PEnvread(offset, 1, &cc); + if ( error ) { + return error; + } + c = (int)cc; + error = ureadc(c, uio); + if (error) { + return error; + } + } + return error; +} + + + +nvwrite(dev_t dev, struct uio *uio, int ioflag) +{ + register struct iovec *iov; + long offset; + long size; + int c; + unsigned char cc; + long wrote = 0; + int error = 0; + + offset = uio->uio_offset; + size = uio->uio_resid; + + for (wrote = 0; wrote < size; wrote++, offset++) { + c = uwritec(uio); + if (c < 0) { + return 0; + } + cc = (unsigned char)c; + error = PEnvwrite(offset, 1, &cc); + } + return error; +} diff --git a/bsd/dev/ppc/ppc_init.c b/bsd/dev/ppc/ppc_init.c new file mode 100644 index 000000000..293ac2a6a --- /dev/null +++ b/bsd/dev/ppc/ppc_init.c @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __MACHO__ +#include +#endif + +/* External references */ + +extern unsigned int intstack[]; /* declared in start.s */ +extern unsigned int intstack_top_ss; /* declared in start.s */ +#if MACH_KGDB +extern unsigned int gdbstackptr; /* declared in start.s */ +extern unsigned int gdbstack_top_ss; /* declared in start.s */ +#endif /* MACH_KGDB */ + +/* Stuff declared in kern/bootstrap.c which we may need to initialise */ + +extern vm_offset_t boot_start; +extern vm_size_t boot_size; +extern vm_offset_t boot_region_desc; +extern vm_size_t boot_region_count; +extern int boot_thread_state_flavor; +extern thread_state_t boot_thread_state; +extern unsigned int boot_thread_state_count; + +/* Trap handling function prototypes */ + +extern void thandler(void); /* trap handler */ +extern void ihandler(void); /* interrupt handler */ +extern void shandler(void); /* syscall handler */ +extern void gdbhandler(void); /* debugger handler */ +extern void fpu_switch(void); /* fp handler */ +extern void atomic_switch_trap(void); /* fast path atomic thread switch */ + +/* definitions */ + +struct ppc_thread_state boot_task_thread_state; + + + + + +#if 1 /* TODO NMGS - vm_map_steal_memory shouldn't use these - remove */ +vm_offset_t avail_start; +vm_offset_t avail_end; +#endif +unsigned int avail_remaining = 0; +vm_offset_t first_avail; + +/* + * Mach-O Support + */ + + +#ifdef __MACHO__ +extern struct mach_header _mh_execute_header; +void *sectTEXTB; +int sectSizeTEXT; +void *sectDATAB; +int sectSizeDATA; +void *sectOBJCB; +int sectSizeOBJC; +void *sectLINKB; +int sectSizeLINK; + +vm_offset_t end, etext, edata; +#define ETEXT etext +#endif + + + +void ppc_vm_init(unsigned int memory_size, boot_args *args) +{ + unsigned int htabmask; + unsigned int i; + vm_offset_t addr; + int boot_task_end_offset; +#if NCPUS > 1 + const char *cpus; +#endif /* NCPUS > 1 */ + + printf("mem_size = %d M\n",memory_size / (1024 * 1024)); + +#ifdef __MACHO__ + /* Now retrieve addresses for end, edata, and etext + * from MACH-O headers. + */ + + + etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; + edata = (vm_offset_t) sectDATAB + sectSizeDATA; + end = getlastaddr(); +#endif + + /* Stitch valid memory regions together - they may be contiguous + * even though they're not already glued together + */ + + /* Go through the list of memory regions passed in via the args + * and copy valid entries into the pmap_mem_regions table, adding + * further calculated entries. + */ + + + /* Initialise the pmap system, using space above `first_avail'*/ + +#ifndef __MACHO__ + free_regions[free_regions_count].start = + round_page((unsigned int)&_ExceptionVectorsEnd - + (unsigned int)&_ExceptionVectorsStart); +#else + /* On MACH-O generated kernels, the Exception Vectors + * are already mapped and loaded at 0 -- no relocation + * or freeing of memory is needed + */ + + free_regions[free_regions_count].start = round_page((unsigned int)&_ExceptionVectorsEnd) + 4096; +#endif + + /* If we are on a PDM machine memory at 1M might be used + * for video. TODO NMGS call video driver to do this + * somehow + */ + + + /* For PowerMac, first_avail is set to above the bootstrap task. + * TODO NMGS - different screen modes - might free mem? + */ + + first_avail = round_page(args->first_avail); + + + /* map in the exception vectors */ + /* + * map the kernel text, data and bss. Don't forget other regions too + */ + for (i = 0; i < args->kern_info.region_count; i++) { +#if MACH_KDB + if (args->kern_info.regions[i].prot == VM_PROT_NONE && + i == args->kern_info.region_count - 1) { + /* assume that's the kernel symbol table */ + kern_sym_start = args->kern_info.regions[i].addr; + kern_sym_size = args->kern_info.regions[i].size; + printf("kernel symbol table at 0x%x size 0x%x\n", + kern_sym_start, kern_sym_size); + args->kern_info.regions[i].prot |= + (VM_PROT_WRITE|VM_PROT_READ); + } +#endif /* MACH_KDB */ + +#ifdef __MACHO__ + /* Skip the VECTORS segment */ + if (args->kern_info.regions[i].addr == 0) + continue; +#endif + + boot_region_count = args->task_info.region_count; + boot_size = 0; + boot_task_end_offset = 0; + /* Map bootstrap task pages 1-1 so that user_bootstrap can find it */ + for (i = 0; i < boot_region_count; i++) { + if (args->task_info.regions[i].mapped) { + /* kernel requires everything page aligned */ +#if DEBUG + printf("mapping virt 0x%08x to phys 0x%08x end 0x%x, prot=0x%b\n", + ppc_trunc_page(args->task_info.base_addr + + args->task_info.regions[i].offset), + ppc_trunc_page(args->task_info.base_addr + + args->task_info.regions[i].offset), + ppc_round_page(args->task_info.base_addr + + args->task_info.regions[i].offset + + args->task_info.regions[i].size), + args->task_info.regions[i].prot, + "\x10\1READ\2WRITE\3EXEC"); +#endif /* DEBUG */ + + (void)pmap_map( + ppc_trunc_page(args->task_info.base_addr + + args->task_info.regions[i].offset), + ppc_trunc_page(args->task_info.base_addr + + args->task_info.regions[i].offset), + ppc_round_page(args->task_info.base_addr + + args->task_info.regions[i].offset + + args->task_info.regions[i].size), + args->task_info.regions[i].prot); + + /* Count the size of mapped space */ + boot_size += args->task_info.regions[i].size; + + /* There may be an overlapping physical page + * mapped to two different virtual addresses + */ + if (boot_task_end_offset > + args->task_info.regions[i].offset) { + boot_size -= boot_task_end_offset - + args->task_info.regions[i].offset; +#if DEBUG + printf("WARNING - bootstrap overlaps regions\n"); +#endif /* DEBUG */ + } + + boot_task_end_offset = + args->task_info.regions[i].offset + + args->task_info.regions[i].size; + } + } + + if (boot_region_count) { + + /* Add a new region to the bootstrap task for it's stack */ + args->task_info.regions[boot_region_count].addr = + BOOT_STACK_BASE; + args->task_info.regions[boot_region_count].size = + BOOT_STACK_SIZE; + args->task_info.regions[boot_region_count].mapped = FALSE; + boot_region_count++; + + boot_start = args->task_info.base_addr; + boot_region_desc = (vm_offset_t) args->task_info.regions; + /* TODO NMGS need to put param info onto top of boot stack */ + boot_task_thread_state.r1 = BOOT_STACK_PTR-0x100; + boot_task_thread_state.srr0 = args->task_info.entry; + boot_task_thread_state.srr1 = + MSR_MARK_SYSCALL(MSR_EXPORT_MASK_SET); + + boot_thread_state_flavor = PPC_THREAD_STATE; + boot_thread_state_count = PPC_THREAD_STATE_COUNT; + boot_thread_state = + (thread_state_t)&boot_task_thread_state; + } + + + +} + diff --git a/bsd/dev/ppc/stubs.c b/bsd/dev/ppc/stubs.c new file mode 100644 index 000000000..630a1b0b6 --- /dev/null +++ b/bsd/dev/ppc/stubs.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 by Apple Computer, Inc., all rights reserved + * Copyright (c) 1993 NeXT Computer, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * copy a null terminated string from the kernel address space into + * the user address space. + * - if the user is denied write access, return EFAULT. + * - if the end of string isn't found before + * maxlen bytes are copied, return ENAMETOOLONG, + * indicating an incomplete copy. + * - otherwise, return 0, indicating success. + * the number of bytes copied is always returned in lencopied. + */ +int +copyoutstr(from, to, maxlen, lencopied) + void * from, * to; + size_t maxlen, *lencopied; +{ + int slen,len,error=0; + + /* XXX Must optimize this */ + + slen = strlen(from) + 1; + if (slen > maxlen) + error = ENAMETOOLONG; + + len = min(maxlen,slen); + if (copyout(from, to, len)) + error = EFAULT; + *lencopied = len; + + return error; +} + + +/* + * copy a null terminated string from one point to another in + * the kernel address space. + * - no access checks are performed. + * - if the end of string isn't found before + * maxlen bytes are copied, return ENAMETOOLONG, + * indicating an incomplete copy. + * - otherwise, return 0, indicating success. + * the number of bytes copied is always returned in lencopied. + */ +/* from ppc/fault_copy.c -Titan1T4 VERSION */ +int +copystr(vfrom, vto, maxlen, lencopied) + register void * vfrom, *vto; + size_t maxlen, *lencopied; +{ + register unsigned l; + int error; +caddr_t from, to; + + from = vfrom; + to = vto; + for (l = 0; l < maxlen; l++) + if ((*to++ = *from++) == '\0') { + if (lencopied) + *lencopied = l + 1; + return 0; + } + if (lencopied) + *lencopied = maxlen; + return ENAMETOOLONG; +} + +int copywithin(src, dst, count) +void * src, *dst; +size_t count; +{ + bcopy(src,dst,count); + return 0; +} + +struct unix_syscallargs { + int flavor; + int r3; + int arg1, arg2,arg3,arg4,arg5,arg6,arg7; +}; + +set_bsduthreadargs(thread_t th, void * pcb, struct unix_syscallargs * sarg) +{ +struct uthread * ut; + + ut = get_bsdthread_info(th); + ut->uu_ar0 = (int *)pcb; + + if (sarg->flavor) + { + ut->uu_arg[0] = sarg->arg1; + ut->uu_arg[1] = sarg->arg2; + ut->uu_arg[2] = sarg->arg3; + ut->uu_arg[3] = sarg->arg4; + ut->uu_arg[4] = sarg->arg5; + ut->uu_arg[5] = sarg->arg6; + ut->uu_arg[7] = sarg->arg7; + } + else + { + ut->uu_arg[0] = sarg->r3; + ut->uu_arg[1] = sarg->arg1; + ut->uu_arg[2] = sarg->arg2; + ut->uu_arg[3] = sarg->arg3; + ut->uu_arg[4] = sarg->arg4; + ut->uu_arg[5] = sarg->arg5; + ut->uu_arg[6] = sarg->arg6; + ut->uu_arg[7] = sarg->arg7; + } + + return(1); +} + +void * +get_bsduthreadarg(thread_t th) +{ +struct uthread *ut; + ut = get_bsdthread_info(th); + return((void *)(ut->uu_arg)); +} + +int * +get_bsduthreadrval(thread_act_t th) +{ +struct uthread *ut; + ut = get_bsdthread_info(th); + return(&ut->uu_rval[0]); +} + diff --git a/bsd/dev/ppc/systemcalls.c b/bsd/dev/ppc/systemcalls.c new file mode 100644 index 000000000..4bd050722 --- /dev/null +++ b/bsd/dev/ppc/systemcalls.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * PowerPC Family: System Call handlers. + * + * HISTORY + * 27-July-97 A. Ramesh + * Adopted for Common Core. + */ + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + + +#define ERESTART -1 /* restart syscall */ +#define EJUSTRETURN -2 /* don't modify regs, just return */ + + +struct unix_syscallargs { + int flavor; + int r3; + int arg1, arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9; +}; +extern struct sysent { /* system call table */ + int16_t sy_narg; /* number of args */ + int16_t sy_parallel;/* can execute in parallel */ + int32_t (*sy_call)(); /* implementing function */ +} sysent[]; + +/* +** Function: unix_syscall +** +** Inputs: pcb - pointer to Process Control Block +** arg1 - arguments to mach system calls +** arg2 +** arg3 +** arg4 +** arg5 +** arg6 +** arg7 +** +** Outputs: none +*/ +void +unix_syscall( + struct pcb * pcb, + int arg1, + int arg2, + int arg3, + int arg4, + int arg5, + int arg6, + int arg7 + ) +{ + struct ppc_saved_state *regs; + thread_t thread; + struct proc *p; + struct sysent *callp; + int nargs, error; + unsigned short code; + int rval[2]; + struct unix_syscallargs sarg; + + if (!USERMODE(pcb->ss.srr1)) + panic("unix_syscall"); + + regs = &pcb->ss; + thread = current_thread(); + + + /* + ** Get index into sysent table + */ + code = regs->r0; + + + /* + ** Set up call pointer + */ + callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; + + sarg. flavor = (callp == sysent): 1: 0; + sarg. r3 = regs->r3; + sarg. arg1 = arg1; + sarg. arg2 = arg2; + sarg. arg3 = arg3; + sarg. arg4 = arg4; + sarg. arg5 = arg5; + sarg. arg6 = arg6; + sarg. arg7 = arg7; + + set_bsduthreadargs(thread,pcb,&sarg); + + + if (callp->sy_narg > 8) + panic("unix_syscall: max arg count exceeded"); + + rval[0] = 0; + + /* r4 is volatile, if we set it to regs->r4 here the child + * will have parents r4 after execve */ + rval[1] = 0; + + error = 0; /* Start with a good value */ + + /* + ** the PPC runtime calls cerror after every unix system call, so + ** assume no error and adjust the "pc" to skip this call. + ** It will be set back to the cerror call if an error is detected. + */ + regs->srr0 += 4; + vt = get_bsduthreadarg(thread); + p = ((struct proc *)get_bsdtask_info(current_task())); + error = (*(callp->sy_call))(p, (caddr_t)vt, rval); + + if (error == ERESTART) { + regs->srr0 -= 8; + } + else if (error != EJUSTRETURN) { + if (error) + { + regs->r3 = error; + /* set the "pc" to execute cerror routine */ + regs->srr0 -= 4; + } else { /* (not error) */ + regs->r3 = rval[0]; + regs->r4 = rval[1]; + } + } + /* else (error == EJUSTRETURN) { nothing } */ + + thread_exception_return(); + /* NOTREACHED */ + +} + diff --git a/bsd/dev/ppc/unix_signal.c b/bsd/dev/ppc/unix_signal.c new file mode 100644 index 000000000..894756641 --- /dev/null +++ b/bsd/dev/ppc/unix_signal.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#define C_REDZONE_LEN 224 +#define C_STK_ALIGN 16 +#define C_PARAMSAVE_LEN 64 +#define C_LINKAGE_LEN 48 +#define TRUNC_DOWN(a,b,c) (((((unsigned)a)-(b))/(c)) * (c)) + +/* + * Arrange for this process to run a signal handler + */ + + +struct sigregs { + struct ppc_saved_state ss; + struct ppc_float_state fs; +}; + +void +sendsig(p, catcher, sig, mask, code) + struct proc *p; + sig_t catcher; + int sig, mask; + u_long code; +{ + struct sigregs *p_regs; + struct sigcontext context, *p_context; + struct sigacts *ps = p->p_sigacts; + int framesize; + int oonstack; + unsigned long sp; + struct ppc_saved_state statep; + struct ppc_float_state fs; + unsigned long state_count; + struct thread *thread; + thread_act_t th_act; + unsigned long paramp,linkp; + + thread = current_thread(); + th_act = current_act(); + + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_get_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + goto bad; + } + state_count = PPC_FLOAT_STATE_COUNT; + if (act_machine_get_state(th_act, PPC_FLOAT_STATE, &fs, &state_count) != KERN_SUCCESS) { + goto bad; + } + + oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; + + /* figure out where our new stack lives */ + if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack && + (ps->ps_sigonstack & sigmask(sig))) { + sp = (unsigned long)(ps->ps_sigstk.ss_sp); + sp += ps->ps_sigstk.ss_size; + ps->ps_sigstk.ss_flags |= SA_ONSTACK; + } + else + sp = statep.r1; + + // preserve RED ZONE area + sp = TRUNC_DOWN(sp, C_REDZONE_LEN, C_STK_ALIGN); + + // context goes first on stack + sp -= sizeof(*p_context); + p_context = (struct sigcontext *) sp; + + // next are the saved registers + sp -= sizeof(*p_regs); + p_regs = (struct sigregs *)sp; + + // C calling conventions, create param save and linkage + // areas + + sp = TRUNC_DOWN(sp, C_PARAMSAVE_LEN, C_STK_ALIGN); + paramp = sp; + sp -= C_LINKAGE_LEN; + linkp = sp; + + /* fill out sigcontext */ + context.sc_onstack = oonstack; + context.sc_mask = mask; + context.sc_ir = statep.srr0; + context.sc_psw = statep.srr1; + context.sc_regs = p_regs; + + /* copy info out to user space */ + if (copyout((caddr_t)&context, (caddr_t)p_context, sizeof(context))) + goto bad; + if (copyout((caddr_t)&statep, (caddr_t)&p_regs->ss, + sizeof(struct ppc_saved_state))) + goto bad; + if (copyout((caddr_t)&fs, (caddr_t)&p_regs->fs, + sizeof(struct ppc_float_state))) + goto bad; + + /* Place our arguments in arg registers: rtm dependent */ + + statep.r3 = (unsigned long)sig; + statep.r4 = (unsigned long)code; + statep.r5 = (unsigned long)p_context; + + statep.srr0 = (unsigned long)catcher; + statep.srr1 = get_msr_exportmask(); /* MSR_EXPORT_MASK_SET */ + statep.r1 = sp; + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_set_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + goto bad; + } + + return; + +bad: + SIGACTION(p, SIGILL) = SIG_DFL; + sig = sigmask(SIGILL); + p->p_sigignore &= ~sig; + p->p_sigcatch &= ~sig; + p->p_sigmask &= ~sig; + /* sendsig is called with signal lock held */ + psignal_lock(p, SIGILL, 0, 1); + return; +} + +/* + * System call to cleanup state after a signal + * has been taken. Reset signal mask and + * stack state from context left by sendsig (above). + * Return to previous pc and psl as specified by + * context left by sendsig. Check carefully to + * make sure that the user has not modified the + * psl to gain improper priviledges or to cause + * a machine fault. + */ +struct sigreturn_args { + struct sigcontext *sigcntxp; +}; + +/* ARGSUSED */ +int +sigreturn(p, uap, retval) + struct proc *p; + struct sigreturn_args *uap; + int *retval; +{ + struct sigcontext context; + struct sigregs *p_regs; + int error; + struct thread *thread; + thread_act_t th_act; + struct ppc_saved_state statep; + struct ppc_float_state fs; + unsigned long state_count; + unsigned int nbits, rbits; + + thread = current_thread(); + th_act = current_act(); + + if (error = copyin(uap->sigcntxp, &context, sizeof(context))) { + return(error); + } + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_get_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + return(EINVAL); + } + state_count = PPC_FLOAT_STATE_COUNT; + if (act_machine_get_state(th_act, PPC_FLOAT_STATE, &fs, &state_count) != KERN_SUCCESS) { + return(EINVAL); + } + nbits = get_msr_nbits(); + rbits = get_msr_rbits(); + /* adjust the critical fields */ + /* make sure naughty bits are off */ + context.sc_psw &= ~(nbits); + /* make sure necessary bits are on */ + context.sc_psw |= (rbits); + +// /* we return from sigreturns as if we faulted in */ +// entry->es_flags = (entry->es_flags & ~ES_GATEWAY) | ES_TRAP; + + if (context.sc_regs) { + p_regs = (struct sigregs *)context.sc_regs; + if (error = copyin(&p_regs->ss, &statep, + sizeof(struct ppc_saved_state))) + return(error); + + if (error = copyin(&p_regs->fs, &fs, + sizeof(struct ppc_float_state))) + return(error); + + } + else { + statep.r1 = context.sc_sp; + } +// entry->es_general.saved.stack_pointer = context.sc_sp; + + if (context.sc_onstack & 01) + p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; + else + p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; + p->p_sigmask = context.sc_mask &~ sigcantmask; + statep.srr0 = context.sc_ir; + statep.srr1 = context.sc_psw; + + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_set_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + return(EINVAL); + } + + state_count = PPC_FLOAT_STATE_COUNT; + if (act_machine_set_state(th_act, PPC_FLOAT_STATE, &fs, &state_count) != KERN_SUCCESS) { + return(EINVAL); + } + return (EJUSTRETURN); +} + +/* + * machine_exception() performs MD translation + * of a mach exception to a unix signal and code. + */ + +boolean_t +machine_exception( + int exception, + int code, + int subcode, + int *unix_signal, + int *unix_code +) +{ + switch(exception) { + + case EXC_BAD_INSTRUCTION: + *unix_signal = SIGILL; + *unix_code = code; + break; + + case EXC_ARITHMETIC: + *unix_signal = SIGFPE; + *unix_code = code; + break; + + case EXC_SOFTWARE: + if (code == EXC_PPC_TRAP) { + *unix_signal = SIGTRAP; + *unix_code = code; + break; + } else + return(FALSE); + + default: + return(FALSE); + } + + return(TRUE); +} + diff --git a/bsd/dev/ppc/unix_startup.c b/bsd/dev/ppc/unix_startup.c new file mode 100644 index 000000000..32ba91f5c --- /dev/null +++ b/bsd/dev/ppc/unix_startup.c @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992,7 NeXT Computer, Inc. + * + * Unix data structure initialization. + */ + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +extern vm_map_t mb_map; + +/* + * Declare these as initialized data so we can patch them. + */ + +#ifdef NBUF +int nbuf = NBUF; +int niobuf = NBUF/2; +#else +int nbuf = 0; +int niobuf = 0; +#endif + +int srv = 0; /* Flag indicates a server boot when set */ +int ncl = 0; + +vm_map_t bufferhdr_map; + +void +bsd_startupearly() +{ + vm_offset_t firstaddr; + vm_size_t size; + kern_return_t ret; + + if (nbuf == 0) + nbuf = atop(mem_size / 100); /* 1% */ + if (nbuf > 8192) + nbuf = 8192; + if (nbuf < 256) + nbuf = 256; + + if (niobuf == 0) + niobuf = nbuf / 2; + if (niobuf > 4096) + niobuf = 4096; + if (niobuf < 128) + niobuf = 128; + + size = (nbuf + niobuf) * sizeof (struct buf); + size = round_page(size); + + ret = kmem_suballoc(kernel_map, + &firstaddr, + size, + FALSE, + TRUE, + &bufferhdr_map); + + if (ret != KERN_SUCCESS) + panic("Failed to create bufferhdr_map"); + + ret = kernel_memory_allocate(bufferhdr_map, + &firstaddr, + size, + 0, + KMA_HERE | KMA_KOBJECT); + + if (ret != KERN_SUCCESS) + panic("Failed to allocate bufferhdr_map"); + + buf = (struct buf * )firstaddr; + bzero(buf,size); + + if ((mem_size > (64 * 1024 * 1024)) || ncl) { + int scale; + extern u_long tcp_sendspace; + extern u_long tcp_recvspace; + + if ((nmbclusters = ncl) == 0) { + if ((nmbclusters = ((mem_size / 16) / MCLBYTES)) > 8192) + nmbclusters = 8192; + } + if ((scale = nmbclusters / NMBCLUSTERS) > 1) { + tcp_sendspace *= scale; + tcp_recvspace *= scale; + + if (tcp_sendspace > (32 * 1024)) + tcp_sendspace = 32 * 1024; + if (tcp_recvspace > (32 * 1024)) + tcp_recvspace = 32 * 1024; + } + } +} + +void +bsd_bufferinit() +{ + kern_return_t ret; + + cons.t_dev = makedev(12, 0); + + bsd_startupearly(); + + ret = kmem_suballoc(kernel_map, + &mbutl, + (vm_size_t) (nmbclusters * MCLBYTES), + FALSE, + TRUE, + &mb_map); + + if (ret != KERN_SUCCESS) + panic("Failed to allocate mb_map\n"); + + /* + * Set up buffers, so they can be used to read disk labels. + */ + bufinit(); +} + diff --git a/bsd/dev/ppc/xsumas.s b/bsd/dev/ppc/xsumas.s new file mode 100644 index 000000000..c83a688f1 --- /dev/null +++ b/bsd/dev/ppc/xsumas.s @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#define STANDALONE 0 + +#if STANDALONE +#include "asm.h" +#include "assym.h" +#include "proc_reg.h" /* For CACHE_LINE_SIZE */ + +#else + +#include +#if 0 +/* #include */ +#include /* For CACHE_LINE_SIZE */ +#endif 0 +#endif + +/* + * Reg 3 - Pointer to data + * Reg 4 - Length of data + * Reg 5 - Accumulated sum value + * Reg 6 - Starting on odd boundary flag (relative to byte 0 of the checksumed data) + */ + +ENTRY(xsum_assym, TAG_NO_FRAME_USED) + + mr r11, r6 ; Swapped flag + addi r8, 0, 0 + addi r10, 0, 0x1f + addi r7, 0, 1 + addic r7, r7, 0 ; This clears the carry bit! + mr r12, r5 ; Save the passed-in checksum value + + /* + * Sum bytes before cache line boundary + */ + + cmpi cr0,0,r4,0 ; Check for length of 0 + beq Lleftovers + + and. r9, r3, r10 + beq Laligned32 ; 32 byte aligned + + andi. r9, r3, 0x3 + beq Laligned4 + + andi. r9, r3, 0x1 + beq Laligned2 ; 2 byte aligned + + addi r11, 0, 1 ; swap bytes at end + lbz r8, 0(r3) + add r3, r3, r7 + subf. r4, r7, r4 + beq Ldone + +Laligned2: + cmpi cr0,0,r4,2 ; If remaining length is less than two - go to wrap-up + blt Lleftovers + andi. r9, r3, 0x3 ; If aligned on a 4-byte boundary, go to that code + beq Laligned4 + lhz r5, 0(r3) ; Load and add a halfword to the checksum + adde r8, r8, r5 + slwi r7, r7, 1 + add r3, r3, r7 + subf. r4, r7, r4 + beq Ldone + + + /* + Add longwords up to the 32 byte boundary + */ + +Laligned4: + addi r7, 0, 4 +Lloop4: + cmpi cr0,0,r4,4 + blt Lleftovers + and. r9, r3, r10 + beq Laligned32 + lwz r5, 0(r3) + adde r8, r8, r5 + add r3, r3, r7 + subf. r4, r7, r4 + bne Lloop4 + b Ldone + + + /* + We're aligned on a 32 byte boundary now - add 8 longwords to checksum + until the remaining length is less than 32 + */ +Laligned32: + andis. r6, r4, 0xffff + bne Lmainloop + andi. r6, r4, 0xffe0 + beq Lleftovers + +Lmainloop: + addi r9, 0, 64 + addi r10, 0, 32 + cmpi cr0,0,r4,64 + blt Lnopretouch + dcbt r3, r10 ; Touch one cache-line ahead +Lnopretouch: + lwz r5, 0(r3) + + /* + * This is the main meat of the checksum. I attempted to arrange this code + * such that the processor would execute as many instructions as possible + * in parallel. + */ + +Lloop: + cmpi cr0,0,r4,96 + blt Lnotouch + dcbt r3, r9 ; Touch two cache lines ahead +Lnotouch: + adde r8, r8, r5 + lwz r5, 4(r3) + lwz r6, 8(r3) + lwz r7, 12(r3) + adde r8, r8, r5 + lwz r5, 16(r3) + adde r8, r8, r6 + lwz r6, 20(r3) + adde r8, r8, r7 + lwz r7, 24(r3) + adde r8, r8, r5 + lwz r5, 28(r3) + add r3, r3, r10 + adde r8, r8, r6 + adde r8, r8, r7 + adde r8, r8, r5 + subf r4, r10, r4 + andi. r6, r4, 0xffe0 + beq Lleftovers + lwz r5, 0(r3) + b Lloop + + /* + * Handle whatever bytes are left + */ + +Lleftovers: + /* + * Handle leftover bytes + */ + cmpi cr0,0,r4,0 + beq Ldone + + addi r7, 0, 1 + addi r10, 0, 0x7ffc + + and. r9, r4, r10 + bne Lfourormore + srw r10, r10, r7 + and. r9, r4, r10 + bne Ltwoormore + b Loneleft + +Lfourormore: + addi r10, 0, 4 + +Lfourloop: + lwz r5, 0(r3) + adde r8, r8, r5 + add r3, r3, r10 + subf r4, r10, r4 + andi. r6, r4, 0xfffc + bne Lfourloop + +Ltwoormore: + andi. r6, r4, 0xfffe + beq Loneleft + lhz r5, 0(r3) + adde r8, r8, r5 + addi r3, r3, 2 + subi r4, r4, 2 + +Loneleft: + cmpi cr0,0,r4,0 + beq Ldone + lbz r5, 0(r3) + slwi r5, r5, 8 + adde r8, r8, r5 + + /* + * Wrap the longword around, adding the two 16-bit portions + * to each other along with any previous and subsequent carries. + */ +Ldone: + addze r8, r8 ; Add the carry + addze r8, r8 ; Add the carry again (the last add may have carried) + andis. r6, r8, 0xffff ; Stuff r6 with the high order 16 bits of sum word + srwi r6, r6, 16 ; Shift it to the low order word + andi. r8, r8, 0xffff ; Zero out the high order word + add r8, r8, r6 ; Add the two halves + + andis. r6, r8, 0xffff ; Do the above again in case we carried into the + srwi r6, r6, 16 ; high order word with the last add. + andi. r8, r8, 0xffff + add r3, r8, r6 + + cmpi cr0,0,r11,0 ; Check to see if we need to swap the bytes + beq Ldontswap + + /* + * Our buffer began on an odd boundary, so we need to swap + * the checksum bytes. + */ + slwi r8, r3, 8 ; shift byte 0 to byte 1 + clrlwi r8, r8, 16 ; Clear top 16 bits + srwi r3, r3, 8 ; shift byte 1 to byte 0 + or r3, r8, r3 ; or them + +Ldontswap: + add r3, r3, r12 ; Add in the passed-in checksum + andis. r6, r3, 0xffff ; Wrap and add any carries into the top 16 bits + srwi r6, r6, 16 + andi. r3, r3, 0xffff + add r3, r3, r6 + + andis. r6, r3, 0xffff ; Do the above again in case we carried into the + srwi r6, r6, 16 ; high order word with the last add. + andi. r3, r3, 0xffff + add r3, r3, r6 + blr + + diff --git a/bsd/hfs/MacOSStubs.c b/bsd/hfs/MacOSStubs.c new file mode 100644 index 000000000..0cca15c9e --- /dev/null +++ b/bsd/hfs/MacOSStubs.c @@ -0,0 +1,612 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)MacOSStubs.c 4.0 +* +* (c) 1997-1999 Apple Computer, Inc. All Rights Reserved +* +* MacOSStubs.c -- Contains routines called by MacOS code, that is not defined. +* +* HISTORY +* 9-9-99 Don Brady Don't use MNT_WAIT in C_FlushMDB. +* 9-Mar-1999 Don Brady Remove more obsolete routines, add ClearMemory(bzero). +* 20-Nov-1998 Don Brady Remove UFSToHFSStr and HFSToUFSStr routines (obsolete). +* 31-Aug-1998 Don Brady Move DST adjustments to GetTimeLocal (radar #2265075). +* 28-Jul-1998 Don Brady Add GetDiskBlocks routine (radar #2258148). +* 23-Jul-1998 Don Brady Use bdwrite instead of bwrite for default in RelBlock_glue (radar #2257225). +* 7-Jul-1998 Don Brady Remove character mappings from/to hfs (ufs_hfs and hfs_ufs tables). +* 22-Jun-1998 Pat Dirks Added the vice versa mappings in ufs_hfs and hfs_ufs to more +* thoroughly interchange ":" and "/" in name strings. +* 4-Jun-1998 Pat Dirks Changed to do all B*-Tree writes synchronously (FORCESYNCBTREEWRITES = 1) +* 4-jun-1998 Don Brady Use VPUT macro instead of vput. +* 6-may-1998 Don Brady Bump h_devvp refcount in GetInitializedVNode (radar #2232480). +* 27-apr-1998 Don Brady Change printf to kprintf. +* 23-Apr-1998 Pat Dirks Cleaned up GetBlock_glue to add brelse on I/O errors from bread. +* 23-apr-1998 Don Brady Add '/' to ':' mapping and vice versa to mapping tables. +* 21-apr-1998 Don Brady Clean up time/date conversion routines. +* 11-apr-1998 Don Brady Add RequireFileLock routine. +* 8-apr-1998 Don Brady C_FlushMDB now calls hfs_flushvolumeheader and hfs_flushMDB. +* 12-nov-1997 Scott Roberts +* Initially created file. +* +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hfs.h" +#include "hfs_dbg.h" + + +#include "hfscommon/headers/FileMgrInternal.h" + +extern int (**hfs_vnodeop_p)(void *); + + +/* + * gTimeZone should only be used for HFS volumes! + * It is initialized when an HFS volume is mounted. + */ +struct timezone gTimeZone = {8*60,1}; + + +/*************************************************************************************/ + +/*************************************************************************************/ +/* + * The following two routines work in tandem: StoreBufferMapping stores + * successive buffer address -> buffer pointer mappings in a circular + * match list, advancing the list index forward each time, while LookupBufferMapping + * looks backwards through the list to look up a particular mapping (which is + * typically the entry currently pointed to by gBufferAddress). + * + */ +static void StoreBufferMapping(caddr_t bufferAddress, struct buf *bp) +{ + int i; + + DBG_ASSERT(gBufferListIndex >= 0); + DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE); + + simple_lock(&gBufferPtrListLock); + + /* We've got at most BUFFERPTRLISTSIZE tries at this... */ + for (i = BUFFERPTRLISTSIZE; i > 0; --i) { + if (gBufferAddress[gBufferListIndex] == NULL) { + gBufferAddress[gBufferListIndex] = bufferAddress; + gBufferHeaderPtr[gBufferListIndex] = bp; + break; + } + gBufferListIndex = (gBufferListIndex + 1) % BUFFERPTRLISTSIZE; + }; + + if (i == 0) { + panic("StoreBufferMapping: couldn't find an empty slot in buffer list."); + }; + + DBG_ASSERT(gBufferListIndex >= 0); + DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE); + + simple_unlock(&gBufferPtrListLock); +} + + +/*static*/ OSErr LookupBufferMapping(caddr_t bufferAddress, struct buf **bpp, int *mappingIndexPtr) +{ + OSErr err = E_NONE; + int i; + int listIndex = gBufferListIndex; + struct buf *bp = NULL; + + DBG_ASSERT(gBufferListIndex >= 0); + DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE); + + simple_lock(&gBufferPtrListLock); + + /* We've got at most BUFFERPTRLISTSIZE tries at this... */ + for (i = BUFFERPTRLISTSIZE; i > 0; --i) { + if (gBufferAddress[listIndex] == bufferAddress) { + *mappingIndexPtr = listIndex; + bp = gBufferHeaderPtr[listIndex]; + break; + }; + + listIndex = (listIndex - 1); + if (listIndex < 0) { + listIndex = BUFFERPTRLISTSIZE - 1; + }; + }; + + if (bp == NULL) { + DEBUG_BREAK_MSG(("LookupBufferMapping: couldn't find buffer header for buffer in list.\n")); + err = -1; + }; + + DBG_ASSERT(gBufferListIndex >= 0); + DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE); + + simple_unlock(&gBufferPtrListLock); + + *bpp = bp; + return err; +} + + +static void ReleaseMappingEntry(int entryIndex) { + + DBG_ASSERT(gBufferListIndex >= 0); + DBG_ASSERT(gBufferListIndex < BUFFERPTRLISTSIZE); + + simple_lock(&gBufferPtrListLock); + gBufferAddress[entryIndex] = NULL; + simple_unlock(&gBufferPtrListLock); +}; +#if HFS_DIAGNOSTIC +#define DBG_GETBLOCK 0 +#else +#define DBG_GETBLOCK 0 +#endif + +OSErr GetBlock_glue (UInt16 options, UInt32 blockNum, Ptr *baddress, FileReference fileRefNum, ExtendedVCB * vcb) +{ + int status; + struct buf *bp = NULL; + int readcount = 0; + +#if DBG_GETBLOCK + DBG_IO(("Getting block %ld with options %d and a refnum of %x\n", blockNum, options, fileRefNum )); +#endif + + if ((options & ~(gbReadMask | gbNoReadMask)) != 0) { + DEBUG_BREAK_MSG(("GetBlock_glue: options = 0x%04X.\n", options)); + }; + + *baddress = NULL; + + if (options & gbNoReadMask) { + if (fileRefNum == NULL) { + bp = getblk (VCBTOHFS(vcb)->hfs_devvp, + IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size), + IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size), + 0, + 0, + BLK_META); + } else { + bp = getblk (fileRefNum, + IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size), + IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size), + 0, + 0, + BLK_META); + }; + status = E_NONE; + } else { + do { + if (fileRefNum == NULL) { + status = meta_bread (VCBTOHFS(vcb)->hfs_devvp, + IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size), + IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size), + NOCRED, + &bp); + } else { + status = meta_bread (fileRefNum, + IOBLKNOFORBLK(blockNum, VCBTOHFS(vcb)->hfs_phys_block_size), + IOBYTECCNTFORBLK(blockNum, kHFSBlockSize, VCBTOHFS(vcb)->hfs_phys_block_size), + NOCRED, + &bp); + }; + if (status != E_NONE) { + if (bp) brelse(bp); + goto Error_Exit; + }; + + if (bp == NULL) { + status = -1; + goto Error_Exit; + }; + + ++readcount; + + if ((options & gbReadMask) && (bp->b_flags & B_CACHE)) { + /* Rats! The block was found in the cache just when we really wanted a + fresh copy off disk... + */ + if (bp->b_flags & B_DIRTY) { + DEBUG_BREAK_MSG(("GetBlock_glue: forced read for dirty block!\n")) + }; + bp->b_flags |= B_INVAL; + brelse(bp); + + /* Fall through and try again until we get a fresh copy from the disk... */ + }; + } while (((options & gbReadMask) != 0) && (readcount <= 1)); + }; + + *baddress = bp->b_data + IOBYTEOFFSETFORBLK(bp->b_blkno, VCBTOHFS(vcb)->hfs_phys_block_size); + StoreBufferMapping(*baddress, bp); + +Error_Exit: ; + return status; +} + +void MarkBlock_glue (Ptr address) +{ + int err; + struct buf *bp = NULL; + int mappingEntry; + + if ((err = LookupBufferMapping(address, &bp, &mappingEntry))) { + panic("Failed to find buffer pointer for buffer in MarkBlock_glue."); + } else { + bp->b_flags |= B_DIRTY; + }; +} + +OSErr RelBlock_glue (Ptr address, UInt16 options ) +{ + int err; + struct buf *bp; + int mappingEntry; + + if (options & ~(rbTrashMask | rbDirtyMask | rbWriteMask) == 0) { + DEBUG_BREAK_MSG(("RelBlock_glue: options = 0x%04X.\n", options)); + }; + + if ((err = LookupBufferMapping(address, &bp, &mappingEntry))) { + DEBUG_BREAK_MSG(("Failed to find buffer pointer for buffer in RelBlock_glue.\n")); + } else { + if (bp->b_flags & B_DIRTY) { + /* The buffer was previously marked dirty (using MarkBlock_glue): + now's the time to write it. */ + options |= rbDirtyMask; + }; + ReleaseMappingEntry(mappingEntry); + if (options & rbTrashMask) { + bp->b_flags |= B_INVAL; + brelse(bp); + } else { + if (options & (rbDirtyMask | rbWriteMask)) { + bp->b_flags |= B_DIRTY; + if (options & rbWriteMask) { + bwrite(bp); + } else { + bdwrite(bp); + } + } else { + brelse(bp); + }; + }; + err = E_NONE; + }; + return err; +} + +/* */ +/* Creates a new vnode to hold a psuedo file like an extents tree file */ +/* */ + +OSStatus GetInitializedVNode(struct hfsmount *hfsmp, struct vnode **tmpvnode, int init_ubc) +{ + + struct hfsnode *hp; + struct vnode *vp = NULL; + int rtn; + + DBG_ASSERT(hfsmp != NULL); + DBG_ASSERT(tmpvnode != NULL); + + /* Allocate a new hfsnode. */ + /* + * Must do malloc() before getnewvnode(), since malloc() can block + * and could cause other part of the system to access v_data + * which has not been initialized yet + */ + MALLOC_ZONE(hp, struct hfsnode *, sizeof(struct hfsnode), M_HFSNODE, M_WAITOK); + if(hp == NULL) { + rtn = ENOMEM; + goto Err_Exit; + } + bzero((caddr_t)hp, sizeof(struct hfsnode)); + lockinit(&hp->h_lock, PINOD, "hfsnode", 0, 0); + + MALLOC_ZONE(hp->h_meta, struct hfsfilemeta *, + sizeof(struct hfsfilemeta), M_HFSFMETA, M_WAITOK); + /* Allocate a new vnode. */ + if ((rtn = getnewvnode(VT_HFS, HFSTOVFS(hfsmp), hfs_vnodeop_p, &vp))) { + FREE_ZONE(hp->h_meta, sizeof(struct hfsfilemeta), M_HFSFMETA); + FREE_ZONE(hp, sizeof(struct hfsnode), M_HFSNODE); + goto Err_Exit; + } + + /* Init the structure */ + bzero(hp->h_meta, sizeof(struct hfsfilemeta)); + + hp->h_vp = vp; /* Make HFSTOV work */ + hp->h_meta->h_devvp = hfsmp->hfs_devvp; + hp->h_meta->h_dev = hfsmp->hfs_raw_dev; + hp->h_meta->h_usecount++; + hp->h_nodeflags |= IN_ACCESS | IN_CHANGE | IN_UPDATE; +#if HFS_DIAGNOSTIC + hp->h_valid = HFS_VNODE_MAGIC; +#endif + vp->v_data = hp; /* Make VTOH work */ + vp->v_type = VREG; + /* + * Metadata files are VREG but not available for IO + * through mapped IO as will as POSIX IO APIs. + * Hence we do not initialize UBC for those files + */ + if (init_ubc) + ubc_info_init(vp); + else + vp->v_ubcinfo = UBC_NOINFO; + + *tmpvnode = vp; + + VREF(hp->h_meta->h_devvp); + + return noErr; + +Err_Exit: + + *tmpvnode = NULL; + + return rtn; +} + +OSErr GetNewFCB(ExtendedVCB *vcb, FileReference* fRefPtr) +{ + OSErr err; + + err = GetInitializedVNode( VCBTOHFS(vcb), fRefPtr, 0 ); + panic("This node is not completely initialized in GetNewFCB!"); /* XXX SER */ + + return( err ); +} + + +OSErr CheckVolumeOffLine( ExtendedVCB *vcb ) +{ + + return( 0 ); +} + + +OSErr C_FlushMDB( ExtendedVCB *volume) +{ + short err; + + if (volume->vcbSigWord == kHFSPlusSigWord) + err = hfs_flushvolumeheader(VCBTOHFS(volume), 0); + else + err = hfs_flushMDB(VCBTOHFS(volume), 0); + + return err; +} + + +/* + * GetTimeUTC - get the GMT Mac OS time (in seconds since 1/1/1904) + * + * called by the Catalog Manager when creating/updating HFS Plus records + */ +UInt32 GetTimeUTC(void) +{ + return (time.tv_sec + MAC_GMT_FACTOR); +} + +/* + * GetTimeLocal - get the local Mac OS time (in seconds since 1/1/1904) + * + * called by the Catalog Manager when creating/updating HFS records + */ +UInt32 GetTimeLocal(Boolean forHFS) +{ + UInt32 localTime; + + localTime = UTCToLocal(GetTimeUTC()); + + if (forHFS && gTimeZone.tz_dsttime) + localTime += 3600; + + return localTime; +} + +/* + * LocalToUTC - convert from Mac OS local time to Mac OS GMT time. + * This should only be called for HFS volumes (not for HFS Plus). + */ +UInt32 LocalToUTC(UInt32 localTime) +{ + UInt32 gtime = localTime; + + if (gtime != 0) { + gtime += (gTimeZone.tz_minuteswest * 60); + /* + * We no longer do DST adjustments here since we don't + * know if time supplied needs adjustment! + * + * if (gTimeZone.tz_dsttime) + * gtime -= 3600; + */ + } + return (gtime); +} + +/* + * UTCToLocal - convert from Mac OS GMT time to Mac OS local time. + * This should only be called for HFS volumes (not for HFS Plus). + */ +UInt32 UTCToLocal(UInt32 utcTime) +{ + UInt32 ltime = utcTime; + + if (ltime != 0) { + ltime -= (gTimeZone.tz_minuteswest * 60); + /* + * We no longer do DST adjustments here since we don't + * know if time supplied needs adjustment! + * + * if (gTimeZone.tz_dsttime) + * ltime += 3600; + */ + } + return (ltime); +} + +/* + * to_bsd_time - convert from Mac OS time (seconds since 1/1/1904) + * to BSD time (seconds since 1/1/1970) + */ +u_int32_t to_bsd_time(u_int32_t hfs_time) +{ + u_int32_t gmt = hfs_time; + + if (gmt > MAC_GMT_FACTOR) + gmt -= MAC_GMT_FACTOR; + else + gmt = 0; /* don't let date go negative! */ + + return gmt; +} + +/* + * to_hfs_time - convert from BSD time (seconds since 1/1/1970) + * to Mac OS time (seconds since 1/1/1904) + */ +u_int32_t to_hfs_time(u_int32_t bsd_time) +{ + u_int32_t hfs_time = bsd_time; + + /* don't adjust zero - treat as uninitialzed */ + if (hfs_time != 0) + hfs_time += MAC_GMT_FACTOR; + + return (hfs_time); +} + + +void BlockMoveData (const void *srcPtr, void *destPtr, Size byteCount) +{ + bcopy(srcPtr, destPtr, byteCount); +} + + +Ptr NewPtrSysClear (Size byteCount) +{ + Ptr tmptr; + MALLOC (tmptr, Ptr, byteCount, M_TEMP, M_WAITOK); + if (tmptr) + bzero(tmptr, byteCount); + return tmptr; +} + + + +Ptr NewPtr (Size byteCount) +{ + Ptr tmptr; + MALLOC (tmptr, Ptr, byteCount, M_TEMP, M_WAITOK); + return tmptr; +} + + +void DisposePtr (Ptr p) +{ + FREE (p, M_TEMP); +} + + +void DebugStr (ConstStr255Param debuggerMsg) +{ + kprintf ("*** Mac OS Debugging Message: %s\n", &debuggerMsg[1]); + DEBUG_BREAK; +} + +OSErr MemError (void) +{ + return 0; +} + + +void ClearMemory( void* start, UInt32 length ) +{ + bzero(start, (size_t)length); +} + + +/* + * RequireFileLock + * + * Check to see if a vnode is locked in the current context + * This is to be used for debugging purposes only!! + */ +#if HFS_DIAGNOSTIC +void RequireFileLock(FileReference vp, int shareable) +{ + struct lock__bsd__ *lkp; + int locked = false; + pid_t pid; + void * self; + + pid = current_proc()->p_pid; + self = (void *) current_thread(); + lkp = &VTOH(vp)->h_lock; + +return; + + simple_lock(&lkp->lk_interlock); + + if (shareable && (lkp->lk_sharecount > 0) && (lkp->lk_lockholder == LK_NOPROC)) + locked = true; + else if ((lkp->lk_exclusivecount > 0) && (lkp->lk_lockholder == pid) && (lkp->lk_lockthread == self)) + locked = true; + + simple_unlock(&lkp->lk_interlock); + + if (!locked) { + DBG_VFS((" # context... self=0x%0X, pid=0x%0X, proc=0x%0X\n", (int)self, pid, (int)current_proc())); + DBG_VFS((" # lock state... thread=0x%0X, holder=0x%0X, ex=%d, sh=%d\n", (int)lkp->lk_lockthread, lkp->lk_lockholder, lkp->lk_exclusivecount, lkp->lk_sharecount)); + + switch (H_FILEID(VTOH(vp))) { + case 3: + DEBUG_BREAK_MSG((" #\n # RequireFileLock: extent btree vnode not locked! v: 0x%08X\n #\n", (u_int)vp)); + break; + + case 4: + DEBUG_BREAK_MSG((" #\n # RequireFileLock: catalog btree vnode not locked! v: 0x%08X\n #\n", (u_int)vp)); + break; + + default: + DEBUG_BREAK_MSG((" #\n # RequireFileLock: file (%d) not locked! v: 0x%08X\n #\n", H_FILEID(VTOH(vp)), (u_int)vp)); + break; + } + } +} +#endif + diff --git a/bsd/hfs/Makefile b/bsd/hfs/Makefile new file mode 100644 index 000000000..fe40106ac --- /dev/null +++ b/bsd/hfs/Makefile @@ -0,0 +1,43 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + hfscommon + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + hfscommon + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + hfs_encodings.h hfs_format.h hfs_mount.h + +PRIVATE_DATAFILES = \ + hfs.h hfs_macos_defs.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = hfs + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = hfs + +INSTALL_MI_LCL_LIST = ${DATAFILES} ${PRIVATE_DATAFILES} + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/hfs/hfs.h b/bsd/hfs/hfs.h new file mode 100644 index 000000000..9c5b866e9 --- /dev/null +++ b/bsd/hfs/hfs.h @@ -0,0 +1,786 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)hfs.h 3.0 +* +* (c) 1990, 1992 NeXT Computer, Inc. All Rights Reserved +* (c) 1997-1999 Apple Computer, Inc. All Rights Reserved +* +* hfs.h -- constants, structures, function declarations. etc. +* for Macintosh file system vfs. +* +* HISTORY +* 12-Aug-1999 Scott Roberts Merge into HFSStruct, the FCB +* 6-Jun-1999 Don Brady Minor cleanup of hfsmount struct. +* 22-Mar-1999 Don Brady For POSIX delete semantics: add private metadata strings. +* 13-Jan-1999 Don Brady Add ATTR_CMN_SCRIPT to HFS_ATTR_CMN_LOOKUPMASK (radar #2296613). +* 20-Nov-1998 Don Brady Remove UFSToHFSStr and HFSToUFSStr prototypes (obsolete). +* Move filename entry from FCB to hfsfilemeta, hfsdirentry +* names are now 255 byte long. +* 10-Nov-1998 Pat Dirks Added MAXLOGBLOCKSIZE and MAXLOGBLOCKSIZEBLOCKS and RELEASE_BUFFER flag. +* Added hfsLogicalBlockTableEntry and h_logicalblocktable field in struct hfsnode. +* 4-Sep-1998 Pat Dirks Added hfs_log_block_size to hfsmount struct [again] and BestBlockSizeFit routine. +* 31-aug-1998 Don Brady Add UL to MAC_GMT_FACTOR constant. +* 04-jun-1998 Don Brady Add hfsMoveRename prototype to replace hfsMove and hfsRename. +* Add VRELE and VPUT macros to catch bad ref counts. +* 28-May-1998 Pat Dirks Move internal 'struct searchinfo' def'n here from attr.h +* 03-may-1998 Brent Knight Add gTimeZone. +* 23-apr-1998 Don Brady Add File type and creator for symbolic links. +* 22-apr-1998 Don Brady Removed kMetaFile. +* 21-apr-1998 Don Brady Add to_bsd_time and to_hfs_time prototypes. +* 20-apr-1998 Don Brady Remove course-grained hfs metadata locking. +* 15-apr-1998 Don Brady Add hasOverflowExtents and hfs_metafilelocking prototypes. Add kSysFile constant. +* 14-apr-1998 Deric Horn Added searchinfo_t, definition of search criteria used by searchfs. +* 9-apr-1998 Don Brady Added hfs_flushMDB and hfs_flushvolumeheader prototypes. +* 8-apr-1998 Don Brady Add MAKE_VREFNUM macro. +* 26-mar-1998 Don Brady Removed CloseBTreeFile and OpenBtreeFile prototypes. +* +* 12-nov-1997 Scott Roberts Added changes for HFSPlus +*/ + +#ifndef __HFS__ +#define __HFS__ + +#include +#include +#include +#include + +#include + +#include +#include +#include + + +struct uio; // This is more effective than #include in case KERNEL is undefined... +struct hfslockf; // For advisory locking + +/* + * Just reported via MIG interface. + */ +#define VERSION_STRING "hfs-2 (4-12-99)" + +#define HFS_LINK_MAX 32767 + +/* + * Set to force READ_ONLY. + */ +#define FORCE_READONLY 0 + +enum { kMDBSize = 512 }; /* Size of I/O transfer to read entire MDB */ + +enum { kMasterDirectoryBlock = 2 }; /* MDB offset on disk in 512-byte blocks */ +enum { kMDBOffset = kMasterDirectoryBlock * 512 }; /* MDB offset on disk in bytes */ + +enum { + kUnknownID = 0, + kRootParID = 1, + kRootDirID = 2 +}; + +enum { + kUndefinedFork = 0, + kAnyFork, + kDataFork, + kRsrcFork, + kDirectory, + kSysFile, + kDefault +}; + + +/* + * File type and creator for symbolic links + */ +enum { + kSymLinkFileType = 0x736C6E6B, /* 'slnk' */ + kSymLinkCreator = 0x72686170 /* 'rhap' */ +}; + +#define BUFFERPTRLISTSIZE 25 + +extern char * gBufferAddress[BUFFERPTRLISTSIZE]; +extern struct buf *gBufferHeaderPtr[BUFFERPTRLISTSIZE]; +extern int gBufferListIndex; +extern simple_lock_data_t gBufferPtrListLock; + +extern struct timezone gTimeZone; + +/* Flag values for bexpand: */ +#define RELEASE_BUFFER 0x00000001 + + +/* Internal Data structures*/ + +struct vcb_t { + int16_t vcbFlags; + u_int16_t vcbSigWord; + u_int32_t vcbCrDate; + u_int32_t vcbLsMod; + int16_t vcbAtrb; + u_int16_t vcbNmFls; /* HFS only */ + int16_t vcbVBMSt; + int16_t vcbAlBlSt; + int32_t vcbClpSiz; + u_int32_t vcbNxtCNID; + u_int8_t vcbVN[256]; + int16_t vcbVRefNum; + u_int16_t vcbVSeqNum; + u_int32_t vcbVolBkUp; + int32_t vcbWrCnt; + u_int16_t vcbNmRtDirs; /* HFS only */ + u_int16_t vcbReserved; + int32_t vcbFilCnt; + int32_t vcbDirCnt; + int32_t vcbFndrInfo[8]; + struct vnode * extentsRefNum; + struct vnode * catalogRefNum; + struct vnode * allocationsRefNum; + u_int32_t blockSize; /* size of allocation blocks - vcbAlBlkSiz*/ + u_int32_t totalBlocks; /* number of allocation blocks in volume */ + u_int32_t freeBlocks; /* number of unused allocation blocks - vcbFreeBks*/ + u_int32_t nextAllocation; /* start of next allocation search - vcbAllocPtr*/ + u_int32_t altIDSector; /* location of alternate MDB/VH */ + u_int32_t hfsPlusIOPosOffset; /* Disk block where HFS+ starts */ + u_int32_t checkedDate; /* date and time of last disk check */ + u_int64_t encodingsBitmap; /* HFS Plus only*/ + u_int32_t volumeNameEncodingHint; /* Text encoding used for volume name*/ + char * hintCachePtr; /* points to this volumes heuristicHint cache*/ + u_int32_t localCreateDate; /* creation times for HFS+ volumes are in local time */ + simple_lock_data_t vcbSimpleLock; /* simple lock to allow concurrent access to vcb data */ +}; +typedef struct vcb_t ExtendedVCB; + +/* vcbFlags */ +#define kHFS_DamagedVolume 0x1 /* This volume has errors, unmount dirty */ +#define MARK_VOLUMEDAMAGED(fcb) FCBTOVCB((fcb))->vcbFlags |= kHFS_DamagedVolume; + + +/* + * NOTE: The code relies on being able to cast an ExtendedVCB* to a vfsVCB* in order + * to gain access to the mount point pointer from a pointer + * to an ExtendedVCB. DO NOT INSERT OTHER FIELDS BEFORE THE vcb FIELD!! + * + * vcbFlags, vcbLsMod, vcbFilCnt, vcbDirCnt, vcbNxtCNID, etc + * are locked by the hfs_lock simple lock. + */ +typedef struct vfsVCB { + ExtendedVCB vcb_vcb; + struct hfsmount *vcb_hfsmp; /* Pointer to hfsmount structure */ +} vfsVCB_t; + + + +/* This structure describes the HFS specific mount structure data. */ +typedef struct hfsmount { + u_long hfs_mount_flags; + u_int8_t hfs_fs_clean; /* Whether contents have been flushed in clean state */ + u_int8_t hfs_fs_ronly; /* Whether this was mounted as read-initially */ + u_int8_t hfs_unknownpermissions; /* Whether this was mounted with MNT_UNKNOWNPERMISSIONS */ + + /* Physical Description */ + u_long hfs_phys_block_count; /* Num of PHYSICAL blocks of volume */ + u_long hfs_phys_block_size; /* Always a multiple of 512 */ + + /* Access to VFS and devices */ + struct mount *hfs_mp; /* filesystem vfs structure */ + struct vnode *hfs_devvp; /* block device mounted vnode */ + dev_t hfs_raw_dev; /* device mounted */ + struct netexport hfs_export; /* Export information */ + u_int32_t hfs_logBlockSize; /* Size of buffer cache buffer for I/O */ + + /* Default values for HFS standard and non-init access */ + uid_t hfs_uid; /* uid to set as owner of the files */ + gid_t hfs_gid; /* gid to set as owner of the files */ + mode_t hfs_dir_mask; /* mask to and with directory protection bits */ + mode_t hfs_file_mask; /* mask to and with file protection bits */ + u_long hfs_encoding; /* Defualt encoding for non hfs+ volumes */ + + /* simple lock for shared meta renaming */ + simple_lock_data_t hfs_renamelock; + + /* HFS Specific */ + struct vfsVCB hfs_vcb; + u_long hfs_private_metadata_dir; /* private/hidden directory for unlinked files */ + u_int32_t hfs_metadata_createdate; + hfs_to_unicode_func_t hfs_get_unicode; + unicode_to_hfs_func_t hfs_get_hfsname; +} hfsmount_t; + + +/***************************************************************************** +* +* hfsnode structure +* +* +* +*****************************************************************************/ + +#define MAXHFSVNODELEN 31 +typedef u_char FileNameStr[MAXHFSVNODELEN+1]; + +CIRCLEQ_HEAD(siblinghead, hfsnode) ; /* Head of the sibling list */ + + +struct hfsnode { + LIST_ENTRY(hfsnode) h_hash; /* links on valid files */ + CIRCLEQ_ENTRY(hfsnode) h_sibling; /* links on siblings */ + struct lock__bsd__ h_lock; /* node lock. */ + union { + struct hfslockf *hu_lockf; /* Head of byte-level lock list. */ + void *hu_sysdata; /* private data for system files */ + } h_un; + struct vnode * h_vp; /* vnode associated with this inode. */ + struct hfsfilemeta * h_meta; /* Ptr to file meta data */ + u_int16_t h_nodeflags; /* flags, see below */ + u_int8_t h_type; /* Type of info: dir, data, rsrc */ + int8_t fcbFlags; /* FCB flags */ + u_int64_t fcbEOF; /* Logical length or EOF in bytes */ + u_int64_t fcbPLen; /* Physical file length in bytes */ + u_int64_t fcbMaxEOF; /* Maximum logical length or EOF in bytes */ + u_int32_t fcbClmpSize; /* Number of bytes per clump */ + HFSPlusExtentRecord fcbExtents; /* Extents of file */ + +#if HFS_DIAGNOSTIC + u_int32_t h_valid; /* is the vnode reference valid */ +#endif +}; +#define h_lockf h_un.hu_lockf +#define fcbBTCBPtr h_un.hu_sysdata + +typedef struct hfsnode FCB; + + +typedef struct hfsfilemeta { + struct siblinghead h_siblinghead; /* Head of the sibling list */ + simple_lock_data_t h_siblinglock; /* sibling list lock. */ + u_int32_t h_metaflags; /* IN_LONGNAME, etc */ + struct vnode *h_devvp; /* vnode for block I/O. */ + + dev_t h_dev; /* Device associated with the inode. */ + u_int32_t h_nodeID; /* specific id of this node */ + u_int32_t h_dirID; /* Parent Directory ID */ + u_int32_t h_hint; /* Catalog hint */ + + off_t h_size; /* Total physical size of object */ + u_int16_t h_usecount; /* How many siblings */ + u_int16_t h_mode; /* IFMT, permissions; see below. */ + u_int32_t h_pflags; /* Permission flags (NODUMP, IMMUTABLE, APPEND etc.) */ + u_int32_t h_uid; /* File owner. */ + u_int32_t h_gid; /* File group. */ + union { + dev_t hu_rdev; /* Special device info for this node */ + u_int32_t hu_indnodeno; /* internal indirect node number (never exported) */ + } h_spun; + u_int32_t h_crtime; /* BSD-format creation date in secs. */ + u_int32_t h_atime; /* BSD-format access date in secs. */ + u_int32_t h_mtime; /* BSD-format mod date in seconds */ + u_int32_t h_ctime; /* BSD-format status change date */ + u_int32_t h_butime; /* BSD-format last backup date in secs. */ + u_int16_t h_nlink; /* link count (aprox. for dirs) */ + u_short h_namelen; /* Length of name string */ + char * h_namePtr; /* Points the name of the file */ + FileNameStr h_fileName; /* CName of file */ +} hfsfilemeta; +#define h_rdev h_spun.hu_rdev +#define h_indnodeno h_spun.hu_indnodeno + +#define H_EXTENDSIZE(VP,BYTES) ((VP)->h_meta->h_size += (BYTES)) +#define H_TRUNCSIZE(VP,BYTES) ((VP)->h_meta->h_size -= (BYTES)) + +#define MAKE_INODE_NAME(name,linkno) \ + (void) sprintf((name), "%s%d", HFS_INODE_PREFIX, (linkno)) + + +/* + * Macros for quick access to fields buried in the fcb inside an hfs node: + */ +#define H_FORKTYPE(HP) ((HP)->h_type) +#define H_FILEID(HP) ((HP)->h_meta->h_nodeID) +#define H_DIRID(HP) ((HP)->h_meta->h_dirID) +#define H_NAME(HP) ((HP)->h_meta->h_namePtr) +#define H_HINT(HP) ((HP)->h_meta->h_hint) +#define H_DEV(HP) ((HP)->h_meta->h_dev) + +/* These flags are kept in flags. */ +#define IN_ACCESS 0x0001 /* Access time update request. */ +#define IN_CHANGE 0x0002 /* Change time update request. */ +#define IN_UPDATE 0x0004 /* Modification time update request. */ +#define IN_MODIFIED 0x0008 /* Node has been modified. */ +#define IN_RENAME 0x0010 /* Node is being renamed. */ +#define IN_SHLOCK 0x0020 /* File has shared lock. */ +#define IN_EXLOCK 0x0040 /* File has exclusive lock. */ +#define IN_ALLOCATING 0x1000 /* vnode is in transit, wait or ignore */ +#define IN_WANT 0x2000 /* Its being waited for */ + +/* These flags are kept in meta flags. */ +#define IN_LONGNAME 0x0400 /* File has long name buffer. */ +#define IN_UNSETACCESS 0x0200 /* File has unset access. */ +#define IN_DELETED 0x0800 /* File has been marked to be deleted */ +#define IN_NOEXISTS 0x1000 /* File has been deleted, catalog entry is gone */ +#if HFS_HARDLINKS +#define IN_DATANODE 0x2000 /* File is a data node (hard-linked) */ +#endif + + +/* File permissions stored in mode */ +#define IEXEC 0000100 /* Executable. */ +#define IWRITE 0000200 /* Writeable. */ +#define IREAD 0000400 /* Readable. */ +#define ISVTX 0001000 /* Sticky bit. */ +#define ISGID 0002000 /* Set-gid. */ +#define ISUID 0004000 /* Set-uid. */ + +/* File types */ +#define IFMT 0170000 /* Mask of file type. */ +#define IFIFO 0010000 /* Named pipe (fifo). */ +#define IFCHR 0020000 /* Character device. */ +#define IFDIR 0040000 /* Directory file. */ +#define IFBLK 0060000 /* Block device. */ +#define IFREG 0100000 /* Regular file. */ +#define IFLNK 0120000 /* Symbolic link. */ +#define IFSOCK 0140000 /* UNIX domain socket. */ +#define IFWHT 0160000 /* Whiteout. */ + +/* Value to make sure vnode is real and defined */ +#define HFS_VNODE_MAGIC 0x4846532b /* 'HFS+' */ + +/* To test wether the forkType is a sibling type */ +#define SIBLING_FORKTYPE(FORK) ((FORK==kDataFork) || (FORK==kRsrcFork)) + +/* + * Write check macro + */ +#define WRITE_CK(VNODE, FUNC_NAME) { \ + if ((VNODE)->v_mount->mnt_flag & MNT_RDONLY) { \ + DBG_ERR(("%s: ATTEMPT TO WRITE A READONLY VOLUME\n", \ + FUNC_NAME)); \ + return(EROFS); \ + } \ +} + + +/* + * hfsmount locking and unlocking. + * + * mvl_lock_flags + */ +#define MVL_LOCKED 0x00000001 /* debug only */ + +#if HFS_DIAGNOSTIC +#define MVL_LOCK(mvip) { \ + (simple_lock(&(mvip)->mvl_lock)); \ + (mvip)->mvl_flags |= MVL_LOCKED; \ +} + +#define MVL_UNLOCK(mvip) { \ + if(((mvip)->mvl_flags & MVL_LOCKED) == 0) { \ + panic("MVL_UNLOCK - hfsnode not locked"); \ + } \ + (simple_unlock(&(mvip)->mvl_lock)); \ + (mvip)->mvl_flags &= ~MVL_LOCKED; \ +} +#else /* HFS_DIAGNOSTIC */ +#define MVL_LOCK(mvip) (simple_lock(&(mvip)->mvl_lock)) +#define MVL_UNLOCK(mvip) (simple_unlock(&(mvip)->mvl_lock)) +#endif /* HFS_DIAGNOSTIC */ + + +/* structure to hold a "." or ".." directory entry (12 bytes) */ +typedef struct hfsdotentry { + u_int32_t d_fileno; /* unique file number */ + u_int16_t d_reclen; /* length of this structure */ + u_int8_t d_type; /* dirent file type */ + u_int8_t d_namelen; /* len of filename */ + char d_name[4]; /* "." or ".." */ +} hfsdotentry; + +#define AVERAGE_HFSDIRENTRY_SIZE (8+22+4) +#define MAX_HFSDIRENTRY_SIZE sizeof(struct dirent) + +#define DIRENTRY_SIZE(namlen) \ + ((sizeof(struct dirent) - (NAME_MAX+1)) + (((namlen)+1 + 3) &~ 3)) + +enum { + kCatNameIsAllocated = 0x1, /* The name is malloc'd and is in cnm_nameptr */ + kCatNameIsMangled = 0x2, /* The name is mangled */ + kCatNameUsesReserved = 0x4, /* It overides the space reserved by cnm_namespace into cndu_extra, careful */ + kCatNameIsConsumed = 0x8, /* The name has been already processed, no freeing or work is needed */ + kCatNameNoCopyName = 0x10, /* Dont copy the name */ + kCatNameMangleName = 0x20 /* Mangle name if greater than passed in length */ +}; + +/* + * CatalogNameSpecifier is a structure that contains a name and possibly its form + * + * Special care needs to be taken with the flags, they can cause side effects. + */ +struct CatalogNameSpecifier { + u_int16_t cnm_flags; /* See above */ + u_int16_t cnm_length; /* Length of the name */ + u_int32_t cnm_parID; /* ID of the parent directory */ + unsigned char *cnm_nameptr; /* If allocated, a ptr to the space, else NULL */ + unsigned char cnm_namespace[MAXHFSVNODELEN+1]; /* Space where the name can be kept */ +}; +/* + * NOTE IT IS REQUIRED that KMaxMangleNameLen >= MAXHFSVNODELEN + * Also the total size of CatalogNameSpecifier should be less then cndu_extra, which + * currently it easily is, this is not a requirement, just a nicety. + * + * The rules to how to store a name: + * If its less than MAXHFSVNODELEN always store it in cnm_namespace. + * If we can get by doing mangling then cnm_namespace + * else allocate the space needed to cnm_nameptr. + * This reflects what is done at vnode creation. + */ + + +enum { + kCatalogFolderNode = 1, + kCatalogFileNode = 2 +}; + +/* + * CatalogNodeData has same layout as the on-disk HFS Plus file/dir records. + * Classic hfs file/dir records are converted to match this layout. + * + * The cnd_extra padding allows big hfs plus thread records (520 bytes max) + * to be read onto this stucture during a cnid lookup. + * + * IMPORTANT!!!!!! + * After declaring this structure, you must use the macro INIT_CATALOGDATA to prepare it + * and CLEAN_CATALOGDATA after using it, to clean any allocated structures. + * + * If you do not need to have the name, then pass in kCatNameNoCopyName for flags + */ +struct CatalogNodeData { + int16_t cnd_type; + u_int16_t cnd_flags; + u_int32_t cnd_valence; /* dirs only */ + u_int32_t cnd_nodeID; + u_int32_t cnd_createDate; + u_int32_t cnd_contentModDate; + u_int32_t cnd_attributeModDate; + u_int32_t cnd_accessDate; + u_int32_t cnd_backupDate; + u_int32_t cnd_ownerID; + u_int32_t cnd_groupID; + u_int8_t cnd_adminFlags; /* super-user changeable flags */ + u_int8_t cnd_ownerFlags; /* owner changeable flags */ + u_int16_t cnd_mode; /* file type + permission bits */ + union { + u_int32_t cndu_iNodeNum; /* indirect links only */ + u_int32_t cndu_linkCount; /* indirect nodes only */ + u_int32_t cndu_rawDevice; /* special files (FBLK and FCHR) only */ + } cnd_un; + u_int8_t cnd_finderInfo[32]; + u_int32_t cnd_textEncoding; + u_int32_t cnd_reserved; + HFSPlusForkData cnd_datafork; + HFSPlusForkData cnd_rsrcfork; + u_int32_t cnd_iNodeNumCopy; + u_int8_t cnd_extra[268]; /* make struct at least 520 bytes long */ + struct CatalogNameSpecifier cnd_namespecifier; +}; +typedef struct CatalogNodeData CatalogNodeData; + +#define cnd_iNodeNum cnd_un.cndu_iNodeNum +#define cnd_linkCount cnd_un.cndu_linkCount +#define cnd_rawDevice cnd_un.cndu_rawDevice + +#define cnm_flags cnd_namespecifier.cnm_flags +#define cnm_length cnd_namespecifier.cnm_length +#define cnm_parID cnd_namespecifier.cnm_parID +#define cnm_nameptr cnd_namespecifier.cnm_nameptr +#define cnm_namespace cnd_namespecifier.cnm_namespace + +#define INIT_CATALOGDATA(C,F) do { bzero(&((C)->cnd_namespecifier), sizeof(struct CatalogNameSpecifier)); (C)->cnm_flags=(F);}while(0); +#if HFS_DIAGNOSTIC +extern void debug_check_catalogdata(struct CatalogNodeData *cat); +#define CLEAN_CATALOGDATA(C) do { debug_check_catalogdata(C); \ + if ((C)->cnm_flags & kCatNameIsAllocated) {\ + FREE((C)->cnm_nameptr, M_TEMP);\ + (C)->cnm_flags &= ~kCatNameIsAllocated;\ + (C)->cnm_nameptr = NULL;\ + }}while(0); +#else +#define CLEAN_CATALOGDATA(C) do { if ((C)->cnm_flags & kCatNameIsAllocated) {\ + FREE((C)->cnm_nameptr, M_TEMP);\ + (C)->cnm_flags &= ~kCatNameIsAllocated;\ + (C)->cnm_nameptr = NULL;\ + }}while(0); +#endif + +/* structure to hold a catalog record information */ +/* Of everything you wanted to know about a catalog entry, file and directory */ +typedef struct hfsCatalogInfo { + CatalogNodeData nodeData; + u_int32_t hint; +} hfsCatalogInfo; + +enum { kHFSPlusMaxFileNameBytes = kHFSPlusMaxFileNameChars * 3 }; + +enum { kdirentMaxNameBytes = NAME_MAX }; + +// structure definition of the searchfs system trap for the search criterea. +struct directoryInfoSpec +{ + u_long numFiles; +}; + +struct fileInfoSpec +{ + off_t dataLogicalLength; + off_t dataPhysicalLength; + off_t resourceLogicalLength; + off_t resourcePhysicalLength; +}; + +struct searchinfospec +{ + u_char name[kHFSPlusMaxFileNameBytes]; + u_long nameLength; + char attributes; // see IM:Files 2-100 + u_long nodeID; + u_long parentDirID; + struct timespec creationDate; + struct timespec modificationDate; + struct timespec changeDate; + struct timespec lastBackupDate; + u_long finderInfo[8]; + uid_t uid; + gid_t gid; + mode_t mask; + struct fileInfoSpec f; + struct directoryInfoSpec d; +}; +typedef struct searchinfospec searchinfospec_t; + +#define HFSTIMES(hp, t1, t2) { \ + if ((hp)->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) { \ + (hp)->h_nodeflags |= IN_MODIFIED; \ + if ((hp)->h_nodeflags & IN_ACCESS) { \ + (hp)->h_meta->h_atime = (t1)->tv_sec; \ + }; \ + if ((hp)->h_nodeflags & IN_UPDATE) { \ + (hp)->h_meta->h_mtime = (t2)->tv_sec; \ + } \ + if ((hp)->h_nodeflags & IN_CHANGE) { \ + (hp)->h_meta->h_ctime = time.tv_sec; \ + }; \ + (hp)->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE); \ + } \ +} + +/* This overlays the fid structure (see mount.h). */ +struct hfsfid { + u_int16_t hfsfid_len; /* Length of structure. */ + u_int16_t hfsfid_pad; /* Force 32-bit alignment. */ + /* The following data is filesystem-dependent, up to MAXFIDSZ (16) bytes: */ + u_int32_t hfsfid_cnid; /* Catalog node ID. */ + u_int32_t hfsfid_gen; /* Generation number (create date). */ +}; + +/* macro to determine if hfs or hfsplus */ +#define ISHFSPLUS(VCB) ((VCB)->vcbSigWord == kHFSPlusSigWord) +#define ISHFS(VCB) ((VCB)->vcbSigWord == kHFSSigWord) + + +/* + * Various ways to acquire a VNode pointer: + */ +#define HTOV(HP) ((HP)->h_vp) + +/* + * Various ways to acquire an HFS Node pointer: + */ +#define VTOH(VP) ((struct hfsnode *)((VP)->v_data)) +#define FCBTOH(FCB) ((struct hfsnode *)FCB) + +/* + * Various ways to acquire an FCB pointer: + */ +#define HTOFCB(HP) (HP) +#define VTOFCB(VP) ((FCB *)((VP)->v_data)) /* Should be the same as VTOH */ + +/* + * Various ways to acquire a VFS mount point pointer: + */ +#define VTOVFS(VP) ((VP)->v_mount) +#define HTOVFS(HP) ((HP)->h_vp->v_mount) +#define FCBTOVFS(FCB) ((FCB)->h_vp->v_mount) +#define HFSTOVFS(HFSMP) ((HFSMP)->hfs_mp) +#define VCBTOVFS(VCB) (((struct vfsVCB *)(VCB))->vcb_hfsmp->hfs_mp) + +/* + * Various ways to acquire an HFS mount point pointer: + */ +#define VTOHFS(VP) ((struct hfsmount *)((VP)->v_mount->mnt_data)) +#define HTOHFS(HP) ((struct hfsmount *)(HP)->h_vp->v_mount->mnt_data) +#define FCBTOHFS(FCB) ((struct hfsmount *)(FCB)->h_vp->v_mount->mnt_data) +#define VFSTOHFS(MP) ((struct hfsmount *)(MP)->mnt_data) +#define VCBTOHFS(VCB) (((struct vfsVCB *)(VCB))->vcb_hfsmp) + +/* + * Various ways to acquire a VCB pointer: + */ +#define VTOVCB(VP) (&(((struct hfsmount *)((VP)->v_mount->mnt_data))->hfs_vcb.vcb_vcb)) +#define HTOVCB(HP) (&(((struct hfsmount *)((HP)->h_vp->v_mount->mnt_data))->hfs_vcb.vcb_vcb)) +#define FCBTOVCB(FCB) (&(((struct hfsmount *)((FCB)->h_vp->v_mount->mnt_data))->hfs_vcb.vcb_vcb)) +#define VFSTOVCB(MP) (&(((struct hfsmount *)(MP)->mnt_data)->hfs_vcb.vcb_vcb)) +#define HFSTOVCB(HFSMP) (&(HFSMP)->hfs_vcb.vcb_vcb) + + +#define E_NONE 0 +#define kHFSBlockSize 512 +#define kHFSBlockShift 9 /* 2^9 = 512 */ + +#define IOBLKNOFORBLK(STARTINGBLOCK, BLOCKSIZEINBYTES) ((daddr_t)((STARTINGBLOCK) / ((BLOCKSIZEINBYTES) >> 9))) +#define IOBLKCNTFORBLK(STARTINGBLOCK, BYTESTOTRANSFER, BLOCKSIZEINBYTES) \ + ((int)(IOBLKNOFORBYTE(((STARTINGBLOCK) * 512) + (BYTESTOTRANSFER) - 1, (BLOCKSIZEINBYTES)) - \ + IOBLKNOFORBLK((STARTINGBLOCK), (BLOCKSIZEINBYTES)) + 1)) +#define IOBYTECCNTFORBLK(STARTINGBLOCK, BYTESTOTRANSFER, BLOCKSIZEINBYTES) \ + (IOBLKCNTFORBLK((STARTINGBLOCK),(BYTESTOTRANSFER),(BLOCKSIZEINBYTES)) * (BLOCKSIZEINBYTES)) +#define IOBYTEOFFSETFORBLK(STARTINGBLOCK, BLOCKSIZEINBYTES) \ + (((STARTINGBLOCK) * 512) - \ + (IOBLKNOFORBLK((STARTINGBLOCK), (BLOCKSIZEINBYTES)) * (BLOCKSIZEINBYTES))) + +#define IOBLKNOFORBYTE(STARTINGBYTE, BLOCKSIZEINBYTES) ((daddr_t)((STARTINGBYTE) / (BLOCKSIZEINBYTES))) +#define IOBLKCNTFORBYTE(STARTINGBYTE, BYTESTOTRANSFER, BLOCKSIZEINBYTES) \ +((int)(IOBLKNOFORBYTE((STARTINGBYTE) + (BYTESTOTRANSFER) - 1, (BLOCKSIZEINBYTES)) - \ + IOBLKNOFORBYTE((STARTINGBYTE), (BLOCKSIZEINBYTES)) + 1)) +#define IOBYTECNTFORBYTE(STARTINGBYTE, BYTESTOTRANSFER, BLOCKSIZEINBYTES) \ + (IOBLKCNTFORBYTE((STARTINGBYTE),(BYTESTOTRANSFER),(BLOCKSIZEINBYTES)) * (BLOCKSIZEINBYTES)) +#define IOBYTEOFFSETFORBYTE(STARTINGBYTE, BLOCKSIZEINBYTES) ((STARTINGBYTE) - (IOBLKNOFORBYTE((STARTINGBYTE), (BLOCKSIZEINBYTES)) * (BLOCKSIZEINBYTES))) + +#define MAKE_VREFNUM(x) ((int32_t)((x) & 0xffff)) +/* + * This is the straight GMT conversion constant: + * 00:00:00 January 1, 1970 - 00:00:00 January 1, 1904 + * (3600 * 24 * ((365 * (1970 - 1904)) + (((1970 - 1904) / 4) + 1))) + */ +#define MAC_GMT_FACTOR 2082844800UL + +#define HFS_ATTR_CMN_LOOKUPMASK (ATTR_CMN_SCRIPT | ATTR_CMN_FNDRINFO | ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST) +#define HFS_ATTR_DIR_LOOKUPMASK (ATTR_DIR_LINKCOUNT | ATTR_DIR_ENTRYCOUNT) +#define HFS_ATTR_FILE_LOOKUPMASK (ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | ATTR_FILE_ALLOCSIZE | \ + ATTR_FILE_DATALENGTH | ATTR_FILE_DATAALLOCSIZE | ATTR_FILE_DATAEXTENTS | \ + ATTR_FILE_RSRCLENGTH | ATTR_FILE_RSRCALLOCSIZE | ATTR_FILE_RSRCEXTENTS) + +u_int32_t to_bsd_time(u_int32_t hfs_time); +u_int32_t to_hfs_time(u_int32_t bsd_time); + +int hfs_flushfiles(struct mount *mp, int flags); +short hfs_flushMDB(struct hfsmount *hfsmp, int waitfor); +short hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor); + +short hfs_getcatalog (ExtendedVCB *vcb, u_int32_t dirID, char *name, short len, hfsCatalogInfo *catInfo); +short hfsMoveRename (ExtendedVCB *vcb, u_int32_t oldDirID, char *oldName, u_int32_t newDirID, char *newName, u_int32_t *hint); +short hfsCreate (ExtendedVCB *vcb, u_int32_t dirID, char *name, int mode); +short hfsCreateFileID (ExtendedVCB *vcb, u_int32_t parentDirID, StringPtr name, u_int32_t catalogHint, u_int32_t *fileIDPtr); +short hfs_vcreate (ExtendedVCB *vcb, hfsCatalogInfo *catInfo, u_int8_t forkType, struct vnode **vpp); +short hfsDelete (ExtendedVCB *vcb, u_int32_t parentDirID, StringPtr name, short isfile, u_int32_t catalogHint); +short hfsUnmount(struct hfsmount *hfsmp, struct proc *p); + +extern int hfs_metafilelocking(struct hfsmount *hfsmp, u_long fileID, u_int flags, struct proc *p); +extern int hasOverflowExtents(struct hfsnode *hp); + +void hfs_set_metaname(char *, struct hfsfilemeta *, struct hfsmount *); + +short MacToVFSError(OSErr err); +int hfs_owner_rights(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean invokesuperuserstatus); + +void CopyVNodeToCatalogNode (struct vnode *vp, struct CatalogNodeData *nodeData); +void CopyCatalogToHFSNode(struct hfsCatalogInfo *catalogInfo, struct hfsnode *hp); +u_long FindMetaDataDirectory(ExtendedVCB *vcb); + + +short make_dir_entry(FCB **fileptr, char *name, u_int32_t fileID); + +int AttributeBlockSize(struct attrlist *attrlist); +void PackCommonAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +void PackVolAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +void PackFileDirAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +void PackForkAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +void PackAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +void PackCatalogInfoAttributeBlock (struct attrlist *alist, + struct vnode * root_vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +void UnpackCommonAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +void UnpackAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr); +unsigned long BestBlockSizeFit(unsigned long allocationBlockSize, + unsigned long blockSizeLimit, + unsigned long baseMultiple); + +OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, + u_long sectors, struct proc *p); +OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, + u_long embBlkOffset, u_long sectors, struct proc *p); +OSStatus GetInitializedVNode(struct hfsmount *hfsmp, struct vnode **tmpvnode, int init_ubc); + +int hfs_getconverter(u_int32_t encoding, hfs_to_unicode_func_t *get_unicode, + unicode_to_hfs_func_t *get_hfsname); + +int hfs_relconverter(u_int32_t encoding); + +int hfs_to_utf8(ExtendedVCB *vcb, Str31 hfs_str, ByteCount maxDstLen, + ByteCount *actualDstLen, unsigned char* dstStr); + +int utf8_to_hfs(ExtendedVCB *vcb, ByteCount srcLen, const unsigned char* srcStr, + Str31 dstStr); + +int mac_roman_to_utf8(Str31 hfs_str, ByteCount maxDstLen, ByteCount *actualDstLen, + unsigned char* dstStr); + +int utf8_to_mac_roman(ByteCount srcLen, const unsigned char* srcStr, Str31 dstStr); + +#endif /* __HFS__ */ diff --git a/bsd/hfs/hfs_btreeio.c b/bsd/hfs/hfs_btreeio.c new file mode 100644 index 000000000..67e52e0ef --- /dev/null +++ b/bsd/hfs/hfs_btreeio.c @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)hfs_btreeio.c +* +* (c) 1998, 2000 Apple Computer, Inc. All Rights Reserved +* +* hfs_btreeio.c -- I/O Routines for the HFS B-tree files. +* +* HISTORY +* 15-Feb-2000 Don Brady Added ClearBTNodes. +* 16-Jul-1998 Don Brady In ExtendBtreeFile force all b-tree nodes to be contiguous on disk. +* 4-Jun-1998 Pat Dirks Changed to do all B*-Tree writes synchronously (FORCESYNCBTREEWRITES = 1) +* 18-apr-1998 Don Brady Call brelse on bread failure. +* 17-Apr-1998 Pat Dirks Fixed ReleaseBTreeBlock to not call brelse when bwrite or bdwrite is called. +* 13-apr-1998 Don Brady Add ExtendBTreeFile routine (from BTreeWrapper.c). +* 26-mar-1998 Don Brady SetBTreeBlockSize was incorrectly excluding 512 byte blockSize. +* 18-feb-1998 Don Brady Initially created file. +* +*/ + +#include +#include +#include +#include +#include + + +#include "hfs.h" +#include "hfs_dbg.h" +#include "hfs_endian.h" + +#include "hfscommon/headers/FileMgrInternal.h" +#include "hfscommon/headers/BTreesPrivate.h" + +#define FORCESYNCBTREEWRITES 0 + +static OSStatus FlushAlternate( ExtendedVCB *vcb ); + +static int ClearBTNodes(struct vnode *vp, long blksize, off_t offset, off_t amount); + + +OSStatus SetBTreeBlockSize(FileReference vp, ByteCount blockSize, ItemCount minBlockCount) +{ + BTreeControlBlockPtr bTreePtr; + + DBG_ASSERT(vp != NULL); + DBG_ASSERT(VTOFCB(vp) != NULL); + DBG_ASSERT(VTOFCB(vp)->fcbBTCBPtr != NULL); + DBG_ASSERT(blockSize >= kMinNodeSize); + if (blockSize > MAXBSIZE ) + return (fsBTBadNodeSize); + + DBG_TREE(("SetBlockSizeProc: blockSize=%ld for file %ld\n", blockSize, H_FILEID(VTOH(vp)))); + + bTreePtr = (BTreeControlBlockPtr)(VTOH(vp)->fcbBTCBPtr); + bTreePtr->nodeSize = blockSize; + + return (E_NONE); +} + + +OSStatus GetBTreeBlock(FileReference vp, UInt32 blockNum, GetBlockOptions options, BlockDescriptor *block) +{ + OSStatus retval = E_NONE; + struct buf *bp = NULL; + + if (options & kGetEmptyBlock) + bp = getblk (vp, + IOBLKNOFORBLK(blockNum, VTOHFS(vp)->hfs_phys_block_size), + IOBYTECCNTFORBLK(blockNum, block->blockSize, VTOHFS(vp)->hfs_phys_block_size), + 0, + 0, + BLK_META); + else + retval = meta_bread(vp, + IOBLKNOFORBLK(blockNum, VTOHFS(vp)->hfs_phys_block_size), + IOBYTECCNTFORBLK(blockNum, block->blockSize, VTOHFS(vp)->hfs_phys_block_size), + NOCRED, + &bp); + + DBG_ASSERT(bp != NULL); + DBG_ASSERT(bp->b_data != NULL); + DBG_ASSERT(bp->b_bcount == block->blockSize); + DBG_ASSERT(bp->b_lblkno == blockNum); + + if (bp == NULL) + retval = -1; //XXX need better error + + if (retval == E_NONE) { + block->blockHeader = bp; + block->buffer = bp->b_data + IOBYTEOFFSETFORBLK(bp->b_blkno, VTOHFS(vp)->hfs_phys_block_size); + block->blockReadFromDisk = (bp->b_flags & B_CACHE) == 0; /* not found in cache ==> came from disk */ + +#if BYTE_ORDER == LITTLE_ENDIAN + /* Endian swap B-Tree node (only if it's a valid block) */ + if (!(options & kGetEmptyBlock)) { + /* This happens when we first open the b-tree, we might not have all the node data on hand */ + if ((((BTNodeDescriptor *)block->buffer)->kind == kBTHeaderNode) && + (((BTHeaderRec *)((char *)block->buffer + 14))->nodeSize != bp->b_bcount) && + (SWAP_BE16 (((BTHeaderRec *)((char *)block->buffer + 14))->nodeSize) != bp->b_bcount)) { + + /* Don't swap the descriptors at all, we don't care (this block will be invalidated) */ + SWAP_BT_NODE (block, ISHFSPLUS(VTOVCB(vp)), H_FILEID(VTOH(vp)), 3); + + /* The node needs swapping */ + } else if (*((UInt16 *)((char *)block->buffer + (block->blockSize - sizeof (UInt16)))) == 0x0e00) { + SWAP_BT_NODE (block, ISHFSPLUS(VTOVCB(vp)), H_FILEID(VTOH(vp)), 0); +#if 0 + /* The node is not already in native byte order, hence corrupt */ + } else if (*((UInt16 *)((char *)block->buffer + (block->blockSize - sizeof (UInt16)))) != 0x000e) { + panic ("%s Corrupt B-Tree node detected!\n", "GetBTreeBlock:"); +#endif + } + } +#endif + } else { + if (bp) + brelse(bp); + block->blockHeader = NULL; + block->buffer = NULL; + } + + return (retval); +} + + +OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlockOptions options) +{ + OSStatus retval = E_NONE; + struct buf *bp = NULL; + + bp = (struct buf *) blockPtr->blockHeader; + + if (bp == NULL) { + DBG_TREE(("ReleaseBlockProc: blockHeader is zero!\n")); + retval = -1; + goto exit; + } + + if (options & kTrashBlock) { + bp->b_flags |= B_INVAL; + brelse(bp); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */ + } else { + if (options & kForceWriteBlock) { + bp->b_flags |= B_DIRTY; + retval = VOP_BWRITE(bp); + } else if (options & kMarkBlockDirty) { + bp->b_flags |= B_DIRTY; +#if FORCESYNCBTREEWRITES + VOP_BWRITE(bp); +#else + if (options & kLockTransaction) { + + /* + * + * Set the B_LOCKED flag and unlock the buffer, causing brelse to move + * the buffer onto the LOCKED free list. This is necessary, otherwise + * getnewbuf() would try to reclaim the buffers using bawrite, which + * isn't going to work. + * + */ + bp->b_flags |= B_LOCKED; + }; + bdwrite(bp); + +#endif + } else { + brelse(bp); /* note: B-tree code will clear blockPtr->blockHeader and blockPtr->buffer */ + }; + }; + +exit: + return (retval); +} + + +OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF) +{ +#pragma unused (maxEOF) + + OSStatus retval; + UInt64 actualBytesAdded; + UInt64 bytesToAdd; + UInt32 extendFlags; + BTreeInfoRec btInfo; + ExtendedVCB *vcb; + FCB *filePtr; + struct proc *p = NULL; + + + filePtr = GetFileControlBlock(vp); + + if ( minEOF > filePtr->fcbEOF ) + { + bytesToAdd = minEOF - filePtr->fcbEOF; + + if (bytesToAdd < filePtr->fcbClmpSize) + bytesToAdd = filePtr->fcbClmpSize; //XXX why not always be a mutiple of clump size? + } + else + { + DBG_TREE((" ExtendBTreeFile: minEOF is smaller than current size!")); + return -1; + } + + vcb = FCBTOVCB(filePtr); + + /* + * The Extents B-tree can't have overflow extents. ExtendFileC will + * return an error if an attempt is made to extend the Extents B-tree + * when the resident extents are exhausted. + */ + /* XXX warning - this can leave the volume bitmap unprotected during ExtendFileC call */ + if(H_FILEID(filePtr) != kHFSExtentsFileID) + { + p = current_proc(); + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE, p); + if (retval) + return (retval); + } + + (void) BTGetInformation(filePtr, 0, &btInfo); + + /* + * The b-tree code expects nodes to be contiguous. So when + * the allocation block size is less than the b-tree node + * size, we need to force disk allocations to be contiguous. + */ + if (vcb->blockSize >= btInfo.nodeSize) { + extendFlags = 0; + } else { + /* Ensure that all b-tree nodes are contiguous on disk */ + extendFlags = kEFAllMask | kEFContigMask; + } + + retval = ExtendFileC(vcb, filePtr, bytesToAdd, extendFlags, &actualBytesAdded ); + + if(H_FILEID(filePtr) != kHFSExtentsFileID) + (void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, p); + + if (retval) + return (retval); + + if (actualBytesAdded < bytesToAdd) + DBG_TREE((" ExtendBTreeFile: actualBytesAdded < bytesToAdd!")); + + filePtr->fcbEOF = filePtr->fcbPLen; + + retval = ClearBTNodes(vp, btInfo.nodeSize, filePtr->fcbEOF - actualBytesAdded, actualBytesAdded); + if (retval) + return (retval); + + /* + * Update the Alternate MDB or Alternate VolumeHeader + */ + if ((H_FILEID(filePtr) == kHFSExtentsFileID) || + (H_FILEID(filePtr) == kHFSCatalogFileID) || + (H_FILEID(filePtr) == kHFSAttributesFileID) + ) { + MarkVCBDirty( vcb ); + if (vcb->vcbSigWord == kHFSPlusSigWord) { + retval = hfs_flushvolumeheader(VCBTOHFS(vcb), 0); + } else { + retval = hfs_flushMDB(VCBTOHFS(vcb), 0); + } + if (retval == 0) { + retval = FlushAlternate(vcb); + } + } + + return retval; +} + + +static OSStatus +FlushAlternate( ExtendedVCB *vcb ) +{ + void *maindata; + void *altdata; + int result; + + /* Get the main MDB/VolumeHeader block */ + result = GetBlock_glue(gbDefault, + (vcb->hfsPlusIOPosOffset / kHFSBlockSize) + kMasterDirectoryBlock, + (Ptr *)&maindata, kNoFileReference, vcb); + if (result) return (result); + + /* Get the alternate MDB/VolumeHeader block */ + result = GetBlock_glue( gbDefault, vcb->altIDSector, + (Ptr *)&altdata, kNoFileReference, vcb ); + + if (result == 0) { + bcopy(maindata, altdata, kMDBSize); + + result = RelBlock_glue( (Ptr)altdata, rbWriteMask ); + } + + (void) RelBlock_glue( (Ptr)maindata, rbFreeMask ); + + return (result); +} + + +/* + * Clear out (zero) new b-tree nodes on disk. + */ +static int +ClearBTNodes(struct vnode *vp, long blksize, off_t offset, off_t amount) +{ + struct buf *bp = NULL; + daddr_t blk; + daddr_t blkcnt; + + blk = offset / blksize; + blkcnt = amount / blksize; + + while (blkcnt > 0) { + bp = getblk(vp, blk, blksize, 0, 0, BLK_META); + if (bp == NULL) + continue; + bzero((char *)bp->b_data, blksize); + bp->b_flags |= (B_DIRTY | B_AGE); + + /* wait/yield every 32 blocks so we don't hog all the buffers */ + if ((blk % 32) == 0) + VOP_BWRITE(bp); + else + bawrite(bp); + --blkcnt; + ++blk; + } + + return (0); +} diff --git a/bsd/hfs/hfs_dbg.h b/bsd/hfs/hfs_dbg.h new file mode 100644 index 000000000..9033f82ad --- /dev/null +++ b/bsd/hfs/hfs_dbg.h @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* hfs_dbg.h + * + * (c) 1997 Apple Computer, Inc. All Rights Reserved + * + * hfs_dbg.h -- debugging macros for HFS file system. + * + * HISTORY + * 10-Nov-1998 Pat Dirks Cleaned up definition of DBG_ASSERT to handle embedded '%' correctly. + * 28-Apr-1998 Scott Roberts Reorganized and added HFS_DEBUG_STAGE + * 17-Nov-1997 Pat Dirks Pat Dirks at Apple Computer + * Derived from old hfs version. + */ + +struct componentname; + +/* Define the debugging stage... + 4 -> Do all, aggresive, call_kdp + 3 -> debug asserts and debug err, panic instead of call_kdp + 2 -> debug error, no kdb + 1 -> very little, panic only +*/ +#ifndef HFS_DIAGNOSTIC + #define HFS_DIAGNOSTIC 0 +#endif /* HFS_DIAGNOSTIC */ + +#ifndef HFS_DEBUG_STAGE +#if HFS_DIAGNOSTIC + #define HFS_DEBUG_STAGE 4 +#else + #define HFS_DEBUG_STAGE 1 +#endif /* KERNEL */ +#endif /* HFS_DEBUG_STAGE */ + +#ifdef KERNEL + #define PRINTIT kprintf +#else /* KERNEL */ + #define PRINTIT printf +#endif /* KERNEL */ + +#if (HFS_DEBUG_STAGE > 3) +#define DEBUG_BREAK Debugger(""); +#else +#define DEBUG_BREAK +#endif + +#if (HFS_DEBUG_STAGE == 4) + #define DEBUG_BREAK_MSG(PRINTF_ARGS) { PRINTIT PRINTF_ARGS; DEBUG_BREAK }; +#elif (HFS_DEBUG_STAGE == 3) + #define DEBUG_BREAK_MSG(PRINTF_ARGS) { panic PRINTF_ARGS;}; +#else + #define DEBUG_BREAK_MSG(PRINTF_ARGS) { PRINTIT PRINTF_ARGS; }; +#endif + + +//#define PRINT_DELAY (void) tsleep((caddr_t)&lbolt, PPAUSE, "hfs kprintf", 0) +#define PRINT_DELAY + +/* + * Debugging macros. + */ +#if HFS_DIAGNOSTIC +extern int hfs_dbg_all; +extern int hfs_dbg_vfs; +extern int hfs_dbg_vop; +extern int hfs_dbg_load; +extern int hfs_dbg_io; +extern int hfs_dbg_utils; +extern int hfs_dbg_rw; +extern int hfs_dbg_lookup; +extern int hfs_dbg_tree; +extern int hfs_dbg_err; +extern int hfs_dbg_test; + +#ifdef KERNEL + #if (HFS_DEBUG_STAGE == 4) + char gDebugAssertStr[255]; + #define DBG_ASSERT(a) { if (!(a)) { \ + sprintf(gDebugAssertStr,"Oops - File "__FILE__", line %d: assertion '%s' failed.\n", __LINE__, #a); \ + Debugger(gDebugAssertStr); } } + #else +#define DBG_ASSERT(a) { if (!(a)) { panic("File "__FILE__", line %d: assertion '%s' failed.\n", __LINE__, #a); } } + #endif /* HFS_DEBUG_STAGE */ +#else + #define DBG_ASSERT(a) assert(a) +#endif /* KERNEL */ + +//#define DBG_VFS if (hfs_dbg_all || hfs_dbg_vfs) PRINTIT +#define DBG_VFS(x) { \ + if(hfs_dbg_all || hfs_dbg_vfs) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_VFS_CONT(x) { \ + if(hfs_dbg_all || hfs_dbg_vfs) { \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_VOP(x) { \ + if(hfs_dbg_all || hfs_dbg_vop) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_VOP_CONT(x) { \ + if(hfs_dbg_all || hfs_dbg_vop) { \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_LOAD(x) { \ + if(hfs_dbg_all || hfs_dbg_load) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_IO(x) { \ + if(hfs_dbg_all || hfs_dbg_io) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_UTILS(x) { \ + if(hfs_dbg_all || hfs_dbg_utils) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_RW(x) { \ + if(hfs_dbg_all || hfs_dbg_rw) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_LOOKUP(x) { \ + if(hfs_dbg_all || hfs_dbg_lookup) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_TREE(x) { \ + if(hfs_dbg_all || hfs_dbg_tree) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_ERR(x) { \ + if(hfs_dbg_all || hfs_dbg_err) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT("HFS ERROR: "); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#define DBG_TEST(x) { \ + if(hfs_dbg_all || hfs_dbg_test) { \ + PRINTIT("%X: ", current_proc()->p_pid); \ + PRINTIT x; \ + PRINT_DELAY; \ + }; \ +} +#else // HFS_DIAGNOSTIC +#define DBG_ASSERT(a) +#define DBG_VFS(x) +#define DBG_VFS_CONT(x) +#define DBG_VOP(x) +#define DBG_VOP_CONT(x) +#define DBG_LOAD(x) +#define DBG_IO(x) +#define DBG_UTILS(x) +#define DBG_RW(x) +#define DBG_LOOKUP(x) +#define DBG_TREE(x) +#define DBG_ERR(x) +#define DBG_TEST(x) +#endif // HFS_DIAGNOSTIC + + +/* Used to help print commone values in the vnode ops */ +#if HFS_DIAGNOSTIC + extern void debug_vn_status (char* introStr, struct vnode *vn); + extern void debug_vn_print (char* introStr, struct vnode *vn); + extern void debug_check_vnode(struct vnode *vp, int stage); + + #define DBG_VN_STATUS (introStr, vn) debug_vn_status (introStr, vn) + #define DBG_VN_PRINT (introStr, vn) debug_vn_print (introStr, vn) + #define DBG_FUNC_NAME(FSTR) static char *funcname = FSTR + #define DBG_PRINT_FUNC_NAME() DBG_VFS(("%s: ", funcname)); + #define DBG_VOP_PRINT_FUNCNAME() DBG_VOP(("%s: ", funcname)); + + + /* This checks to make sure the passed in node is valid and HFS */ + #define DBG_HFS_NODE_CHECK(VP) { \ + if ((VP) == NULL || VTOH((VP))->h_valid != HFS_VNODE_MAGIC) { \ + DBG_VOP_CONT(("%s: INVALID VNODE: ", funcname)); \ + DBG_VOP_PRINT_VNODE_INFO(VP); \ + DBG_VOP_CONT(("\n")); \ + return (EINVAL); \ + } \ + } + + #define DBG_VOP_PRINT_VNODE_INFO(VP) { if (VP && VTOH((VP))->h_valid == HFS_VNODE_MAGIC) { \ + DBG_VOP_CONT(("\tn: %s, p: %d, id: %d, f: %d, u: %d, v: 0x%x ",H_NAME(VTOH(VP)), \ + H_DIRID(VTOH(VP)), H_FILEID(VTOH(VP)), H_FORKTYPE(VTOH(VP)), (VP)->v_usecount, (u_int)(VP))); \ + } else { \ + DBG_VOP_CONT(("\tBAD MACNODE"));}} + + #define DBG_VOP_PRINT_CPN_INFO(CN) DBG_VOP_CONT(("name: %s",(CN)->cn_nameptr)); + +#else /* HFS_DIAGNOSTIC */ + + #define DBG_VN_PRINT(introStr,vn) + #define DBG_VN_STATUS(introStr,vn) + #define DBG_FUNC_NAME(FSTR) + #define DBG_PRINT_FUNC_NAME() + #define DBG_HFS_NODE_CHECK(VP) + #define DBG_VOP_PRINT_FUNCNAME() + #define DBG_VOP_PRINT_VNODE_INFO(VP) + #define DBG_VOP_PRINT_CPN_INFO(CN) + +#endif /* HFS_DIAGNOSTIC */ + + +#if HFS_DIAGNOSTIC +#define DBG_VOP_TEST_LOCKS 1 +#else /* HFS_DIAGNOSTIC */ +#undef DBG_VOP_TEST_LOCKS +#endif /* HFS_DIAGNOSTIC */ + + + +#if DBG_VOP_TEST_LOCKS + +typedef struct VopDbgStoreRec { + short id; + struct vnode *vp; + short inState; + short outState; + short errState; + int inValue; + int outValue; + } VopDbgStoreRec; + + +void DbgVopTest (int max, int error, VopDbgStoreRec *VopDbgStore, char *funcname); +void DbgLookupTest(char *funcname, struct componentname *cnp, struct vnode *dvp, struct vnode *vp); + +#define VOPDBG_IGNORE 0 +#define VOPDBG_LOCKED 1 +#define VOPDBG_UNLOCKED -1 +#define VOPDBG_LOCKNOTNIL 2 +#define VOPDBG_SAME 3 + +#define VOPDBG_ZERO 0 +#define VOPDBG_POS 1 + +/* This sets up the test for the lock state of vnodes. The entry paramaters are: + * I = index of paramater + * VP = pointer to a vnode + * ENTRYSTATE = the inState of the lock + * EXITSTATE = the outState of the lock + * ERRORSTATE = the error state of the lock + * It initializes the structure, does some preliminary validity checks, but does nothing + * if the instate is set to be ignored. + */ + + +#define DBG_VOP_LOCKS_DECL(I) VopDbgStoreRec VopDbgStore[I];short numOfLockSlots=I +#define DBG_VOP_LOCKS_INIT(I,VP,ENTRYSTATE,EXITSTATE,ERRORSTATE,CHECKFLAG) \ + if (I >= numOfLockSlots) { \ + DEBUG_BREAK_MSG(("%s: DBG_VOP_LOCKS_INIT: Entry #%d greater than allocated slots!\n", funcname, I)); \ + }; \ + VopDbgStore[I].id = I; \ + VopDbgStore[I].vp = VP; \ + VopDbgStore[I].inState = ENTRYSTATE; \ + VopDbgStore[I].outState = EXITSTATE; \ + VopDbgStore[I].errState = ERRORSTATE; \ + VopDbgStore[I].inValue = 0; \ + VopDbgStore[I].outValue = 0; \ + if ((VopDbgStore[I].inState != VOPDBG_IGNORE)) { \ + if ((VP) == NULL) \ + PRINTIT ("%X: %s: DBG_VOP_LOCK on start: Null vnode ptr\n", current_proc()->p_pid, funcname); \ + else \ + VopDbgStore[I].inValue = lockstatus (&(VTOH(VP))->h_lock); \ + } \ + if ((VP) != NULL) \ + { \ + if (CHECKFLAG==VOPDBG_POS && (VP)->v_usecount <= 0) \ + PRINTIT("%X: %s: BAD USECOUNT OF %d !!!!\n", current_proc()->p_pid, funcname, (VP)->v_usecount); \ + else if ((VP)->v_usecount < 0) \ + PRINTIT("%X: %s: BAD USECOUNT OF %d !!!!\n", current_proc()->p_pid, funcname, (VP)->v_usecount); \ + } + + #define DBG_VOP_UPDATE_VP(I, VP) \ + VopDbgStore[I].vp = VP; + + #define DBG_VOP_LOCKS_TEST(status) DbgVopTest (numOfLockSlots, status, VopDbgStore, funcname); + #define DBG_VOP_LOOKUP_TEST(funcname, cnp, dvp, vp) DbgLookupTest (funcname, cnp, dvp, vp); + +#else /* DBG_VOP_TEST_LOCKS */ + + #define DBG_VOP_LOCKS_DECL(A) + #define DBG_VOP_LOCKS_INIT(A,B,C,D,E,F) + #define DBG_VOP_LOCKS_TEST(a) + #define DBG_VOP_LOOKUP_TEST(funcname, cnp, dvp, vp) + #define DBG_VOP_UPDATE_VP(I, VP) +#endif /* DBG_VOP_TEST_LOCKS */ diff --git a/bsd/hfs/hfs_encodings.c b/bsd/hfs/hfs_encodings.c new file mode 100644 index 000000000..e85de58dc --- /dev/null +++ b/bsd/hfs/hfs_encodings.c @@ -0,0 +1,632 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "hfs.h" + + +/* hfs encoding converter list */ +SLIST_HEAD(encodinglst, hfs_encoding) hfs_encoding_list = {0}; +decl_simple_lock_data(,hfs_encoding_list_slock); + + +/* hfs encoding converter entry */ +struct hfs_encoding { + SLIST_ENTRY(hfs_encoding) link; + int refcount; + int kmod_id; + UInt32 encoding; + hfs_to_unicode_func_t get_unicode_func; + unicode_to_hfs_func_t get_hfsname_func; +}; + +/* XXX We should use an "official" interface! */ +extern kern_return_t kmod_destroy(host_priv_t host_priv, kmod_t id); +extern struct host realhost; + +#define MAX_HFS_UNICODE_CHARS (15*5) + +static int mac_roman_to_unicode(Str31 hfs_str, UniChar *uni_str, UInt32 maxCharLen, UInt32 *usedCharLen); + +static int unicode_to_mac_roman(UniChar *uni_str, UInt32 unicodeChars, Str31 hfs_str); + + +void +hfs_converterinit(void) +{ + SLIST_INIT(&hfs_encoding_list); + simple_lock_init(&hfs_encoding_list_slock); + + /* + * add resident MacRoman converter and take a reference + * since its always "loaded". + */ + hfs_addconverter(0, kTextEncodingMacRoman, mac_roman_to_unicode, unicode_to_mac_roman); + SLIST_FIRST(&hfs_encoding_list)->refcount++; +} + + +/* + * hfs_addconverter - add an HFS encoding converter + * + * This is called exclusivly by kernel loadable modules + * (like HFS_Japanese.kmod) to register hfs encoding + * conversion routines. + * + */ +int +hfs_addconverter(int id, UInt32 encoding, hfs_to_unicode_func_t get_unicode, unicode_to_hfs_func_t get_hfsname) +{ + struct hfs_encoding *encp; + + MALLOC(encp, struct hfs_encoding *, sizeof(struct hfs_encoding), M_TEMP, M_WAITOK); + + simple_lock(&hfs_encoding_list_slock); + + encp->link.sle_next = NULL; + encp->refcount = 0; + encp->encoding = encoding; + encp->get_unicode_func = get_unicode; + encp->get_hfsname_func = get_hfsname; + encp->kmod_id = id; + SLIST_INSERT_HEAD(&hfs_encoding_list, encp, link); + + simple_unlock(&hfs_encoding_list_slock); + return (0); +} + + +/* + * hfs_remconverter - remove an HFS encoding converter + * + * Can be called by a kernel loadable module's finalize + * routine to remove an encoding converter so that the + * module (i.e. the code) can be unloaded. + * + * However, in the normal case, the removing and unloading + * of these converters is done in hfs_relconverter. + * The call is initiated from within the kernel during the unmounting of an hfs voulume. + */ +int +hfs_remconverter(int id, UInt32 encoding) +{ + struct hfs_encoding *encp; + int busy = 0; + + simple_lock(&hfs_encoding_list_slock); + SLIST_FOREACH(encp, &hfs_encoding_list, link) { + if (encp->encoding == encoding && encp->kmod_id == id) { + encp->refcount--; + + /* if converter is no longer in use, release it */ + if (encp->refcount <= 0 && encp->kmod_id != 0) { + SLIST_REMOVE(&hfs_encoding_list, encp, hfs_encoding, link); + FREE(encp, M_TEMP); + } else { + busy = 1; + } + break; + } + } + simple_unlock(&hfs_encoding_list_slock); + + return (busy); +} + + +/* + * hfs_getconverter - get HFS encoding converters + * + * Normally called during the mounting of an hfs voulume. + */ +int +hfs_getconverter(UInt32 encoding, hfs_to_unicode_func_t *get_unicode, unicode_to_hfs_func_t *get_hfsname) +{ + struct hfs_encoding *encp; + int found = 0; + + simple_lock(&hfs_encoding_list_slock); + SLIST_FOREACH(encp, &hfs_encoding_list, link) { + if (encp->encoding == encoding) { + found = 1; + *get_unicode = encp->get_unicode_func; + *get_hfsname = encp->get_hfsname_func; + ++encp->refcount; + break; + } + } + simple_unlock(&hfs_encoding_list_slock); + + if (!found) { + *get_unicode = NULL; + *get_hfsname = NULL; + return (EINVAL); + } + + return (0); +} + + +/* + * hfs_relconverter - release interest in an HFS encoding converter + * + * Normally called during the unmounting of an hfs voulume. + */ +int +hfs_relconverter(UInt32 encoding) +{ + struct hfs_encoding *encp; + int found = 0; + + simple_lock(&hfs_encoding_list_slock); + SLIST_FOREACH(encp, &hfs_encoding_list, link) { + if (encp->encoding == encoding) { + found = 1; + encp->refcount--; + + /* if converter is no longer in use, release it */ + if (encp->refcount <= 0 && encp->kmod_id != 0) { + int id = encp->kmod_id; + + SLIST_REMOVE(&hfs_encoding_list, encp, hfs_encoding, link); + FREE(encp, M_TEMP); + encp = NULL; + + simple_unlock(&hfs_encoding_list_slock); + kmod_destroy(host_priv_self(), id); + simple_lock(&hfs_encoding_list_slock); + } + break; + } + } + simple_unlock(&hfs_encoding_list_slock); + + return (found ? 0 : EINVAL); +} + + +/* + * Convert HFS encoded string into UTF-8 + * + * Unicode output is fully decomposed + * '/' chars are converted to ':' + */ +int +hfs_to_utf8(ExtendedVCB *vcb, Str31 hfs_str, ByteCount maxDstLen, ByteCount *actualDstLen, unsigned char* dstStr) +{ + int error; + UniChar uniStr[MAX_HFS_UNICODE_CHARS]; + ItemCount uniCount; + size_t utf8len; + hfs_to_unicode_func_t hfs_get_unicode = VCBTOHFS(vcb)->hfs_get_unicode; + + error = hfs_get_unicode(hfs_str, uniStr, MAX_HFS_UNICODE_CHARS, &uniCount); + + if (uniCount == 0) + error = EINVAL; + + if (error == 0) { + error = utf8_encodestr(uniStr, uniCount * sizeof(UniChar), dstStr, &utf8len, maxDstLen , ':', 0); + if (error == ENAMETOOLONG) + *actualDstLen = utf8_encodelen(uniStr, uniCount * sizeof(UniChar), ':', 0); + else + *actualDstLen = utf8len; + } + + return error; +} + + +/* + * When an HFS name cannot be encoded with the current + * volume encoding then MacRoman is used as a fallback. + */ +int +mac_roman_to_utf8(Str31 hfs_str, ByteCount maxDstLen, ByteCount *actualDstLen, unsigned char* dstStr) +{ + int error; + UniChar uniStr[MAX_HFS_UNICODE_CHARS]; + ItemCount uniCount; + size_t utf8len; + + error = mac_roman_to_unicode(hfs_str, uniStr, MAX_HFS_UNICODE_CHARS, &uniCount); + + if (uniCount == 0) + error = EINVAL; + + if (error == 0) { + error = utf8_encodestr(uniStr, uniCount * sizeof(UniChar), dstStr, &utf8len, maxDstLen , ':', 0); + if (error == ENAMETOOLONG) + *actualDstLen = utf8_encodelen(uniStr, uniCount * sizeof(UniChar), ':', 0); + else + *actualDstLen = utf8len; + } + + return error; +} + + +/* + * Convert UTF-8 string into HFS encoding + * + * ':' chars are converted to '/' + * Assumes input represents fully decomposed Unicode + */ +int +utf8_to_hfs(ExtendedVCB *vcb, ByteCount srcLen, const unsigned char* srcStr, Str31 dstStr) +{ + int error; + UniChar uniStr[MAX_HFS_UNICODE_CHARS]; + size_t ucslen; + unicode_to_hfs_func_t hfs_get_hfsname = VCBTOHFS(vcb)->hfs_get_hfsname; + + error = utf8_decodestr(srcStr, srcLen, uniStr, &ucslen, sizeof(uniStr), ':', 0); + if (error == 0) + error = hfs_get_hfsname(uniStr, ucslen/sizeof(UniChar), dstStr); + + return error; +} + +int +utf8_to_mac_roman(ByteCount srcLen, const unsigned char* srcStr, Str31 dstStr) +{ + int error; + UniChar uniStr[MAX_HFS_UNICODE_CHARS]; + size_t ucslen; + + error = utf8_decodestr(srcStr, srcLen, uniStr, &ucslen, sizeof(uniStr), ':', 0); + if (error == 0) + error = unicode_to_mac_roman(uniStr, ucslen/sizeof(UniChar), dstStr); + + return error; +} + +/* + * HFS MacRoman to/from Unicode conversions are built into the kernel + * All others hfs encodings are loadable. + */ + +/* 0x00A0 - 0x00FF = Latin 1 Supplement (30 total) */ +static UInt8 gLatin1Table[] = { + /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ + /* 0x00A0 */ 0xCA, 0xC1, 0xA2, 0xA3, 0xDB, 0xB4, '?', 0xA4, 0xAC, 0xA9, 0xBB, 0xC7, 0xC2, '?', 0xA8, 0xF8, + /* 0x00B0 */ 0xA1, 0XB1, '?', '?', 0xAB, 0xB5, 0xA6, 0xe1, 0xFC, '?', 0xBC, 0xC8, '?', '?', '?', 0xC0, + /* 0x00C0 */ '?', '?', '?', '?', '?', '?', 0xAE, '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x00D0 */ '?', '?', '?', '?', '?', '?', '?', '?', 0xAF, '?', '?', '?', '?', '?', '?', 0xA7, + /* 0x00E0 */ '?', '?', '?', '?', '?', '?', 0xBE, '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x00F0 */ '?', '?', '?', '?', '?', '?', '?', 0xD6, 0xBF, '?', '?', '?', '?', '?', '?', '?' +}; + +/* 0x02C0 - 0x02DF = Spacing Modifiers (8 total) */ +static UInt8 gSpaceModsTable[] = { + /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ + /* 0x02C0 */ '?', '?', '?', '?', '?', '?', 0xF6, 0xFF, '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x02D0 */ '?', '?', '?', '?', '?', '?', '?', '?', 0xF9, 0xFA, 0xFB, 0xFE, 0xF7, 0xFD, '?', '?' +}; + +/* 0x2010 - 0x20AF = General Punctuation (17 total) */ +static UInt8 gPunctTable[] = { + /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ + /* 0x2010 */ '?', '?', '?', 0xd0, 0xd1, '?', '?', '?', 0xd4, 0xd5, 0xe2, '?', 0xd2, 0xd3, 0xe3, '?', + /* 0x2020 */ 0xa0, 0xe0, 0xa5, '?', '?', '?', 0xc9, '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2030 */ 0xe4, '?', '?', '?', '?', '?', '?', '?', '?', 0xdc, 0xdd, '?', '?', '?', '?', '?', + /* 0x2040 */ '?', '?', '?', '?', 0xda, '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2050 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2060 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2070 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2080 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2090 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x20A0 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', 0xdb, '?', '?', '?' +}; + +/* 0x22xx = Mathematical Operators (11 total) */ +static UInt8 gMathTable[] = { + /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ + /* 0x2200 */ '?', '?', 0xb6, '?', '?', '?', 0xc6, '?', '?', '?', '?', '?', '?', '?', '?', 0xb8, + /* 0x2210 */ '?', 0xb7, '?', '?', '?', '?', '?', '?', '?', '?', 0xc3, '?', '?', '?', 0xb0, '?', + /* 0x2220 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', 0xba, '?', '?', '?', '?', + /* 0x2230 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2240 */ '?', '?', '?', '?', '?', '?', '?', '?', 0xc5, '?', '?', '?', '?', '?', '?', '?', + /* 0x2250 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', + /* 0x2260 */ 0xad, '?', '?', '?', 0xb2, 0xb3, '?', '?' +}; + +/* */ +static UInt8 gReverseCombTable[] = { + /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */ + /* 0x40 */ 0xDA, 0x40, 0xDA, 0xDA, 0xDA, 0x56, 0xDA, 0xDA, 0xDA, 0x6C, 0xDA, 0xDA, 0xDA, 0xDA, 0x82, 0x98, + /* 0x50 */ 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xAE, 0xDA, 0xDA, 0xDA, 0xC4, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, + /* 0x60 */ 0xDA, 0x4B, 0xDA, 0xDA, 0xDA, 0x61, 0xDA, 0xDA, 0xDA, 0x77, 0xDA, 0xDA, 0xDA, 0xDA, 0x8D, 0xA3, + /* 0x70 */ 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xB9, 0xDA, 0xDA, 0xDA, 0xCF, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, 0xDA, + + /* Combining Diacritical Marks (0x0300 - 0x030A) */ + /* 0 1 2 3 4 5 6 7 8 9 A */ + /* 'A' */ + /* 0x0300 */ 0xCB, 0xE7, 0xE5, 0xCC, '?', '?', '?', '?', 0x80, '?', 0x81, + + /* 'a' */ + /* 0x0300 */ 0x88, 0x87, 0x89, 0x8B, '?', '?', '?', '?', 0x8A, '?', 0x8C, + + /* 'E' */ + /* 0x0300 */ 0xE9, 0x83, 0xE6, '?', '?', '?', '?', '?', 0xE8, '?', '?', + + /* 'e' */ + /* 0x0300 */ 0x8F, 0x8E, 0x90, '?', '?', '?', '?', '?', 0x91, '?', '?', + + /* 'I' */ + /* 0x0300 */ 0xED, 0xEA, 0xEB, '?', '?', '?', '?', '?', 0xEC, '?', '?', + + /* 'i' */ + /* 0x0300 */ 0x93, 0x92, 0x94, '?', '?', '?', '?', '?', 0x95, '?', '?', + + /* 'N' */ + /* 0x0300 */ '?', '?', '?', 0x84, '?', '?', '?', '?', '?', '?', '?', + + /* 'n' */ + /* 0x0300 */ '?', '?', '?', 0x96, '?', '?', '?', '?', '?', '?', '?', + + /* 'O' */ + /* 0x0300 */ 0xF1, 0xEE, 0xEF, 0xCD, '?', '?', '?', '?', 0x85, '?', '?', + + /* 'o' */ + /* 0x0300 */ 0x98, 0x97, 0x99, 0x9B, '?', '?', '?', '?', 0x9A, '?', '?', + + /* 'U' */ + /* 0x0300 */ 0xF4, 0xF2, 0xF3, '?', '?', '?', '?', '?', 0x86, '?', '?', + + /* 'u' */ + /* 0x0300 */ 0x9D, 0x9C, 0x9E, '?', '?', '?', '?', '?', 0x9F, '?', '?', + + /* 'Y' */ + /* 0x0300 */ '?', '?', '?', '?', '?', '?', '?', '?', 0xD9, '?', '?', + + /* 'y' */ + /* 0x0300 */ '?', '?', '?', '?', '?', '?', '?', '?', 0xD8, '?', '?', + + /* else */ + /* 0x0300 */ '?', '?', '?', '?', '?', '?', '?', '?', '?', '?', '?' +}; + + +/* + * Convert Unicode string into HFS MacRoman encoding + * + * Assumes Unicode input is fully decomposed + */ +static int unicode_to_mac_roman(UniChar *uni_str, UInt32 unicodeChars, Str31 hfs_str) +{ + UInt8 *p; + const UniChar *u; + UniChar c; + UniChar mask; + UInt16 inputChars; + UInt16 pascalChars; + OSErr result = noErr; + UInt8 lsb; + UInt8 prevChar; + UInt8 mc; + + mask = (UniChar) 0xFF80; + p = &hfs_str[1]; + u = uni_str; + inputChars = unicodeChars; + pascalChars = prevChar = 0; + + while (inputChars) { + c = *(u++); + lsb = (UInt8) c; + + /* + * If its not 7-bit ascii, then we need to map it + */ + if ( c & mask ) { + mc = '?'; + switch (c & 0xFF00) { + case 0x0000: + if (lsb >= 0xA0) + mc = gLatin1Table[lsb - 0xA0]; + break; + + case 0x0200: + if (lsb >= 0xC0 && lsb <= 0xDF) + mc = gSpaceModsTable[lsb - 0xC0]; + break; + + case 0x2000: + if (lsb >= 0x10 && lsb <= 0xAF) + mc = gPunctTable[lsb- 0x10]; + break; + + case 0x2200: + if (lsb <= 0x68) + mc = gMathTable[lsb]; + break; + + case 0x0300: + if (c <= 0x030A) { + if (prevChar >= 'A' && prevChar < 'z') { + mc = gReverseCombTable[gReverseCombTable[prevChar - 0x40] + lsb]; + --p; /* backup over base char */ + --pascalChars; + } + } else { + switch (c) { + case 0x0327: /* combining cedilla */ + if (prevChar == 'C') + mc = 0x82; + else if (prevChar == 'c') + mc = 0x8D; + else + break; + --p; /* backup over base char */ + --pascalChars; + break; + + case 0x03A9: mc = 0xBD; break; /* omega */ + + case 0x03C0: mc = 0xB9; break; /* pi */ + } + } + break; + + default: + switch (c) { + case 0x0131: mc = 0xf5; break; /* dotless i */ + + case 0x0152: mc = 0xce; break; /* OE */ + + case 0x0153: mc = 0xcf; break; /* oe */ + + case 0x0192: mc = 0xc4; break; /* Ä */ + + case 0x2122: mc = 0xaa; break; /* TM */ + + case 0x25ca: mc = 0xd7; break; /* diamond */ + + case 0xf8ff: mc = 0xf0; break; /* apple logo */ + + case 0xfb01: mc = 0xde; break; /* fi */ + + case 0xfb02: mc = 0xdf; break; /* fl */ + } + } /* end switch (c & 0xFF00) */ + + /* + * If we have an unmapped character then we need to mangle the name... + */ + if (mc == '?') + result = kTECUsedFallbacksStatus; + + prevChar = 0; + lsb = mc; + + } else { + prevChar = lsb; + } + + if (pascalChars >= 31) + break; + + *(p++) = lsb; + ++pascalChars; + --inputChars; + + } /* end while */ + + hfs_str[0] = pascalChars; + + if (inputChars > 0) + result = ENAMETOOLONG; /* ran out of room! */ + + return result; +} + + +static UniChar gHiBitBaseUnicode[128] = { + /* 0x80 */ 0x0041, 0x0041, 0x0043, 0x0045, 0x004e, 0x004f, 0x0055, 0x0061, + /* 0x88 */ 0x0061, 0x0061, 0x0061, 0x0061, 0x0061, 0x0063, 0x0065, 0x0065, + /* 0x90 */ 0x0065, 0x0065, 0x0069, 0x0069, 0x0069, 0x0069, 0x006e, 0x006f, + /* 0x98 */ 0x006f, 0x006f, 0x006f, 0x006f, 0x0075, 0x0075, 0x0075, 0x0075, + /* 0xa0 */ 0x2020, 0x00b0, 0x00a2, 0x00a3, 0x00a7, 0x2022, 0x00b6, 0x00df, + /* 0xa8 */ 0x00ae, 0x00a9, 0x2122, 0x00b4, 0x00a8, 0x2260, 0x00c6, 0x00d8, + /* 0xb0 */ 0x221e, 0x00b1, 0x2264, 0x2265, 0x00a5, 0x00b5, 0x2202, 0x2211, + /* 0xb8 */ 0x220f, 0x03c0, 0x222b, 0x00aa, 0x00ba, 0x03a9, 0x00e6, 0x00f8, + /* 0xc0 */ 0x00bf, 0x00a1, 0x00ac, 0x221a, 0x0192, 0x2248, 0x2206, 0x00ab, + /* 0xc8 */ 0x00bb, 0x2026, 0x00a0, 0x0041, 0x0041, 0x004f, 0x0152, 0x0153, + /* 0xd0 */ 0x2013, 0x2014, 0x201c, 0x201d, 0x2018, 0x2019, 0x00f7, 0x25ca, + /* 0xd8 */ 0x0079, 0x0059, 0x2044, 0x20ac, 0x2039, 0x203a, 0xfb01, 0xfb02, + /* 0xe0 */ 0x2021, 0x00b7, 0x201a, 0x201e, 0x2030, 0x0041, 0x0045, 0x0041, + /* 0xe8 */ 0x0045, 0x0045, 0x0049, 0x0049, 0x0049, 0x0049, 0x004f, 0x004f, + /* 0xf0 */ 0xf8ff, 0x004f, 0x0055, 0x0055, 0x0055, 0x0131, 0x02c6, 0x02dc, + /* 0xf8 */ 0x00af, 0x02d8, 0x02d9, 0x02da, 0x00b8, 0x02dd, 0x02db, 0x02c7 +}; + +static UniChar gHiBitCombUnicode[128] = { + /* 0x80 */ 0x0308, 0x030a, 0x0327, 0x0301, 0x0303, 0x0308, 0x0308, 0x0301, + /* 0x88 */ 0x0300, 0x0302, 0x0308, 0x0303, 0x030a, 0x0327, 0x0301, 0x0300, + /* 0x90 */ 0x0302, 0x0308, 0x0301, 0x0300, 0x0302, 0x0308, 0x0303, 0x0301, + /* 0x98 */ 0x0300, 0x0302, 0x0308, 0x0303, 0x0301, 0x0300, 0x0302, 0x0308, + /* 0xa0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0xa8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0xb0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0xb8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0xc0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0xc8 */ 0x0000, 0x0000, 0x0000, 0x0300, 0x0303, 0x0303, 0x0000, 0x0000, + /* 0xd0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0xd8 */ 0x0308, 0x0308, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 0xe0 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0302, 0x0302, 0x0301, + /* 0xe8 */ 0x0308, 0x0300, 0x0301, 0x0302, 0x0308, 0x0300, 0x0301, 0x0302, + /* 0xf0 */ 0x0000, 0x0300, 0x0301, 0x0302, 0x0300, 0x0000, 0x0000, 0x0000, + /* 0xf8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 +}; + + +/* + * Convert HFS MacRoman encoded string into Unicode + * + * Unicode output is fully decomposed + */ +static int mac_roman_to_unicode(Str31 hfs_str, UniChar *uni_str, + UInt32 maxCharLen, UInt32 *unicodeChars) +{ + const UInt8 *p; + UniChar *u; + UInt16 pascalChars; + UInt8 c; + + p = hfs_str; + u = uni_str; + + *unicodeChars = pascalChars = *(p++); /* pick up length byte */ + + while (pascalChars--) { + c = *(p++); + + if ( (SInt8) c >= 0 ) { /* check if seven bit ascii */ + *(u++) = (UniChar) c; /* just pad high byte with zero */ + } else { /* its a hi bit character */ + UniChar uc; + + c &= 0x7F; + *(u++) = uc = gHiBitBaseUnicode[c]; + + /* + * if the unicode character we get back is an alpha char + * then we must have an additional combining character + */ + if ((uc <= (UniChar) 'z') && (uc >= (UniChar) 'A')) { + *(u++) = gHiBitCombUnicode[c]; + ++(*unicodeChars); + } + } + } + + return noErr; +} + diff --git a/bsd/hfs/hfs_encodings.h b/bsd/hfs/hfs_encodings.h new file mode 100644 index 000000000..7d4e7bbc1 --- /dev/null +++ b/bsd/hfs/hfs_encodings.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-2000 Apple Computer, Inc. All Rights Reserved + */ + +#ifndef _HFS_ENCODINGS_H_ +#define _HFS_ENCODINGS_H_ + +/* + * HFS Filename Encoding Converters Interface + * + * Private Interface for adding hfs filename + * encoding converters. These are not needed + * for HFS Plus volumes (since they already + * have Unicode filenames). + * + * Used by HFS Encoding Converter Kernel Modules + * (like HFS_Japanese.kmod) to register their + * encoding conversion routines. + */ + +typedef int (* hfs_to_unicode_func_t)(Str31 hfs_str, UniChar *uni_str, + UInt32 maxCharLen, UInt32 *usedCharLen); + +typedef int (* unicode_to_hfs_func_t)(UniChar *uni_str, UInt32 unicodeChars, + Str31 hfs_str); + + +int hfs_addconverter(int kmod_id, UInt32 encoding, hfs_to_unicode_func_t get_unicode, + unicode_to_hfs_func_t get_hfsname); + +int hfs_remconverter(int kmod_id, UInt32 encoding); + + +#endif /* ! _HFS_ENCODINGS_H_ */ diff --git a/bsd/hfs/hfs_endian.c b/bsd/hfs/hfs_endian.c new file mode 100644 index 000000000..d01635c4e --- /dev/null +++ b/bsd/hfs/hfs_endian.c @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * hfs_endian.c + * + * This file implements endian swapping routines for the HFS/HFS Plus + * volume format. + */ + +#include + +#include "hfs_endian.h" +#include "hfs_dbg.h" + +#undef ENDIAN_DEBUG + +/* Private swapping routines */ +int hfs_swap_HFSPlusBTInternalNode (BlockDescriptor *src, HFSCatalogNodeID fileID, int unswap); +int hfs_swap_HFSBTInternalNode (BlockDescriptor *src, HFSCatalogNodeID fileID, int unswap); + +/* + * hfs_swap_HFSPlusForkData + * + * There's still a few spots where we still need to swap the fork data. + */ +void +hfs_swap_HFSPlusForkData ( + HFSPlusForkData *src +) +{ + int i; + + DBG_FUNC_NAME("hfs_swap_HFSPlusForkData"); + DBG_PRINT_FUNC_NAME(); + + src->logicalSize = SWAP_BE64 (src->logicalSize); + + src->clumpSize = SWAP_BE32 (src->clumpSize); + src->totalBlocks = SWAP_BE32 (src->totalBlocks); + + for (i = 0; i < kHFSPlusExtentDensity; i++) { + src->extents[i].startBlock = SWAP_BE32 (src->extents[i].startBlock); + src->extents[i].blockCount = SWAP_BE32 (src->extents[i].blockCount); + } +} + +/* + * hfs_swap_BTNode + * + * NOTE: This operation is not naturally symmetric. + * We have to determine which way we're swapping things. + */ +int +hfs_swap_BTNode ( + BlockDescriptor *src, + int isHFSPlus, + HFSCatalogNodeID fileID, + int unswap +) +{ + BTNodeDescriptor *srcDesc = src->buffer; + UInt16 *srcOffs = NULL; + + UInt32 i; + int error = 0; + + DBG_FUNC_NAME("hfs_swap_BTNode"); + DBG_PRINT_FUNC_NAME(); + +#ifdef ENDIAN_DEBUG + if (unswap == 0) { + printf ("BE -> LE Swap\n"); + } else if (unswap == 1) { + printf ("LE -> BE Swap\n"); + } else if (unswap == 3) { + printf ("Not swapping descriptors\n"); + } else { + panic ("%s This is impossible", "hfs_swap_BTNode:"); + } +#endif + + /* If we are doing a swap */ + if (unswap == 0) { + /* Swap the node descriptor */ + srcDesc->fLink = SWAP_BE32 (srcDesc->fLink); + srcDesc->bLink = SWAP_BE32 (srcDesc->bLink); + + /* Don't swap srcDesc->kind */ + /* Don't swap srcDesc->height */ + /* Don't swap srcDesc->reserved */ + + srcDesc->numRecords = SWAP_BE16 (srcDesc->numRecords); + + /* Swap the node offsets (including the free space one!) */ + srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - ((srcDesc->numRecords + 1) * sizeof (UInt16)))); + + /* Sanity check */ + if ((char *)srcOffs > ((char *)src->buffer + src->blockSize)) { + panic ("%s Too many records in the B-Tree node", "hfs_swap_BTNode:"); + } + + for (i = 0; i < srcDesc->numRecords + 1; i++) { + srcOffs[i] = SWAP_BE16 (srcOffs[i]); + + /* Sanity check */ + if (srcOffs[i] >= src->blockSize) { + panic ("%s B-Tree node offset out of range", "hfs_swap_BTNode:"); + } + } + } + + /* Swap the records (ordered by frequency of access) */ + /* Swap a B-Tree internal node */ + if ((srcDesc->kind == kBTIndexNode) || + (srcDesc-> kind == kBTLeafNode)) { + + if (isHFSPlus) { + error = hfs_swap_HFSPlusBTInternalNode (src, fileID, unswap); + } else { + error = hfs_swap_HFSBTInternalNode (src, fileID, unswap); + } + + /* Swap a B-Tree map node */ + } else if (srcDesc-> kind == kBTMapNode) { + /* Don't swap the bitmaps, they'll be done in the bitmap routines */ + + /* Swap a B-Tree header node */ + } else if (srcDesc-> kind == kBTHeaderNode) { + /* The header's offset is hard-wired because we cannot trust the offset pointers */ + BTHeaderRec *srcHead = (BTHeaderRec *)((char *)src->buffer + 14); + + srcHead->treeDepth = SWAP_BE16 (srcHead->treeDepth); + + srcHead->rootNode = SWAP_BE32 (srcHead->rootNode); + srcHead->leafRecords = SWAP_BE32 (srcHead->leafRecords); + srcHead->firstLeafNode = SWAP_BE32 (srcHead->firstLeafNode); + srcHead->lastLeafNode = SWAP_BE32 (srcHead->lastLeafNode); + + srcHead->nodeSize = SWAP_BE16 (srcHead->nodeSize); + srcHead->maxKeyLength = SWAP_BE16 (srcHead->maxKeyLength); + + srcHead->totalNodes = SWAP_BE32 (srcHead->totalNodes); + srcHead->freeNodes = SWAP_BE32 (srcHead->freeNodes); + + srcHead->clumpSize = SWAP_BE32 (srcHead->clumpSize); + srcHead->attributes = SWAP_BE32 (srcHead->attributes); + + /* Don't swap srcHead->reserved1 */ + /* Don't swap srcHead->btreeType */ + /* Don't swap srcHead->reserved2 */ + /* Don't swap srcHead->reserved3 */ + /* Don't swap bitmap */ + } + + /* If we are doing an unswap */ + if (unswap == 1) { + /* Swap the node descriptor */ + srcDesc->fLink = SWAP_BE32 (srcDesc->fLink); + srcDesc->bLink = SWAP_BE32 (srcDesc->bLink); + + /* Don't swap srcDesc->kind */ + /* Don't swap srcDesc->height */ + /* Don't swap srcDesc->reserved */ + + /* Swap the node offsets (including the free space one!) */ + srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - ((srcDesc->numRecords + 1) * sizeof (UInt16)))); + + /* Sanity check */ + if ((char *)srcOffs > ((char *)src->buffer + src->blockSize)) { + panic ("%s Too many records in the B-Tree node", "hfs_swap_BTNode:"); + } + + for (i = 0; i < srcDesc->numRecords + 1; i++) { + /* Sanity check */ + if (srcOffs[i] >= src->blockSize) { + panic ("%s B-Tree node offset out of range", "hfs_swap_BTNode:"); + } + + srcOffs[i] = SWAP_BE16 (srcOffs[i]); + } + + srcDesc->numRecords = SWAP_BE16 (srcDesc->numRecords); + } + + return (error); +} + +int +hfs_swap_HFSPlusBTInternalNode ( + BlockDescriptor *src, + HFSCatalogNodeID fileID, + int unswap +) +{ + BTNodeDescriptor *srcDesc = src->buffer; + UInt16 *srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - (srcDesc->numRecords * sizeof (UInt16)))); + + UInt32 i; + UInt32 j; + + DBG_FUNC_NAME("hfs_swap_HFSPlusBTInternalNode"); + DBG_PRINT_FUNC_NAME(); + + if (fileID == kHFSExtentsFileID) { + HFSPlusExtentKey *srcKey; + HFSPlusExtentDescriptor *srcRec; + + for (i = 0; i < srcDesc->numRecords; i++) { + srcKey = (HFSPlusExtentKey *)((char *)src->buffer + srcOffs[i]); + + if (!unswap) srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + srcRec = (HFSPlusExtentDescriptor *)((char *)srcKey + srcKey->keyLength + 2); + if (unswap) srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + + /* Don't swap srcKey->forkType */ + /* Don't swap srcKey->pad */ + + srcKey->fileID = SWAP_BE32 (srcKey->fileID); + srcKey->startBlock = SWAP_BE32 (srcKey->startBlock); + + /* Stop if this is just an index node */ + if (srcDesc->kind == kBTIndexNode) { + *((UInt32 *)srcRec) = SWAP_BE32 (*((UInt32 *)srcRec)); + continue; + } + + /* Swap the extent data */ + + /* Swap each extent */ + for (j = 0; j < kHFSPlusExtentDensity; j++) { + srcRec[j].startBlock = SWAP_BE32 (srcRec[j].startBlock); + srcRec[j].blockCount = SWAP_BE32 (srcRec[j].blockCount); + } + } + + } else if (fileID == kHFSCatalogFileID) { + HFSPlusCatalogKey *srcKey; + SInt16 *srcPtr; + + for (i = 0; i < srcDesc->numRecords; i++) { + srcKey = (HFSPlusCatalogKey *)((char *)src->buffer + srcOffs[i]); + + if (!unswap) srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + srcPtr = (SInt16 *)((char *)srcKey + srcKey->keyLength + 2); + if (unswap) srcKey->keyLength = SWAP_BE16 (srcKey->keyLength); + + srcKey->parentID = SWAP_BE32 (srcKey->parentID); + + if (!unswap) srcKey->nodeName.length = SWAP_BE16 (srcKey->nodeName.length); + for (j = 0; j < srcKey->nodeName.length; j++) { + srcKey->nodeName.unicode[j] = SWAP_BE16 (srcKey->nodeName.unicode[j]); + } + if (unswap) srcKey->nodeName.length = SWAP_BE16 (srcKey->nodeName.length); + + /* Stop if this is just an index node */ + if (srcDesc->kind == kBTIndexNode) { + *((UInt32 *)srcPtr) = SWAP_BE32 (*((UInt32 *)srcPtr)); + continue; + } + + /* Swap the recordType field, if unswapping, leave to later */ + if (!unswap) srcPtr[0] = SWAP_BE16 (srcPtr[0]); + + if (srcPtr[0] == kHFSPlusFolderRecord) { + HFSPlusCatalogFolder *srcRec = (HFSPlusCatalogFolder *)srcPtr; + + srcRec->flags = SWAP_BE16 (srcRec->flags); + srcRec->valence = SWAP_BE32 (srcRec->valence); + srcRec->folderID = SWAP_BE32 (srcRec->folderID); + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->contentModDate = SWAP_BE32 (srcRec->contentModDate); + srcRec->attributeModDate = SWAP_BE32 (srcRec->attributeModDate); + srcRec->accessDate = SWAP_BE32 (srcRec->accessDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + srcRec->bsdInfo.ownerID = SWAP_BE32 (srcRec->bsdInfo.ownerID); + srcRec->bsdInfo.groupID = SWAP_BE32 (srcRec->bsdInfo.groupID); + + /* Don't swap srcRec->bsdInfo.adminFlags */ + /* Don't swap srcRec->bsdInfo.ownerFlags */ + + srcRec->bsdInfo.fileMode = SWAP_BE16 (srcRec->bsdInfo.fileMode); + srcRec->bsdInfo.special.iNodeNum = SWAP_BE32 (srcRec->bsdInfo.special.iNodeNum); + + srcRec->textEncoding = SWAP_BE32 (srcRec->textEncoding); + + /* Don't swap srcRec->userInfo */ + /* Don't swap srcRec->finderInfo */ + /* Don't swap srcRec->reserved */ + + } else if (srcPtr[0] == kHFSPlusFileRecord) { + HFSPlusCatalogFile *srcRec = (HFSPlusCatalogFile *)srcPtr; + + srcRec->flags = SWAP_BE16 (srcRec->flags); + + srcRec->fileID = SWAP_BE32 (srcRec->fileID); + + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->contentModDate = SWAP_BE32 (srcRec->contentModDate); + srcRec->attributeModDate = SWAP_BE32 (srcRec->attributeModDate); + srcRec->accessDate = SWAP_BE32 (srcRec->accessDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + srcRec->bsdInfo.ownerID = SWAP_BE32 (srcRec->bsdInfo.ownerID); + srcRec->bsdInfo.groupID = SWAP_BE32 (srcRec->bsdInfo.groupID); + + /* Don't swap srcRec->bsdInfo.adminFlags */ + /* Don't swap srcRec->bsdInfo.ownerFlags */ + + srcRec->bsdInfo.fileMode = SWAP_BE16 (srcRec->bsdInfo.fileMode); + srcRec->bsdInfo.special.iNodeNum = SWAP_BE32 (srcRec->bsdInfo.special.iNodeNum); + + srcRec->textEncoding = SWAP_BE32 (srcRec->textEncoding); + + /* Don't swap srcRec->reserved1 */ + /* Don't swap srcRec->userInfo */ + /* Don't swap srcRec->finderInfo */ + /* Don't swap srcRec->reserved2 */ + + hfs_swap_HFSPlusForkData (&srcRec->dataFork); + hfs_swap_HFSPlusForkData (&srcRec->resourceFork); + + } else if ((srcPtr[0] == kHFSPlusFolderThreadRecord) || + (srcPtr[0] == kHFSPlusFileThreadRecord)) { + + HFSPlusCatalogThread *srcRec = (HFSPlusCatalogThread *)srcPtr; + + /* Don't swap srcRec->reserved */ + + srcRec->parentID = SWAP_BE32 (srcRec->parentID); + + if (!unswap) srcRec->nodeName.length = SWAP_BE16 (srcRec->nodeName.length); + for (j = 0; j < srcRec->nodeName.length; j++) { + srcRec->nodeName.unicode[j] = SWAP_BE16 (srcRec->nodeName.unicode[j]); + } + if (unswap) srcRec->nodeName.length = SWAP_BE16 (srcRec->nodeName.length); + + } else { + panic ("%s unrecognized catalog record type", "hfs_swap_BTNode:"); + } + + /* If unswapping, we can safely unswap type field now */ + if (unswap) srcPtr[0] = SWAP_BE16 (srcPtr[0]); + } + + } else { + panic ("%s unrecognized B-Tree type", "hfs_swap_BTNode:"); + } + + return (0); +} + +int +hfs_swap_HFSBTInternalNode ( + BlockDescriptor *src, + HFSCatalogNodeID fileID, + int unswap +) +{ + BTNodeDescriptor *srcDesc = src->buffer; + UInt16 *srcOffs = (UInt16 *)((char *)src->buffer + (src->blockSize - (srcDesc->numRecords * sizeof (UInt16)))); + + UInt32 i; + UInt32 j; + + DBG_FUNC_NAME("hfs_swap_HFSBTInternalNode"); + DBG_PRINT_FUNC_NAME(); + + if (fileID == kHFSExtentsFileID) { + HFSExtentKey *srcKey; + HFSExtentDescriptor *srcRec; + + for (i = 0; i < srcDesc->numRecords; i++) { + srcKey = (HFSExtentKey *)((char *)src->buffer + srcOffs[i]); + + /* Don't swap srcKey->keyLength */ + /* Don't swap srcKey->forkType */ + + srcKey->fileID = SWAP_BE32 (srcKey->fileID); + srcKey->startBlock = SWAP_BE16 (srcKey->startBlock); + + /* Point to record data (round up to even byte boundary) */ + srcRec = (HFSExtentDescriptor *)((char *)srcKey + ((srcKey->keyLength + 2) & ~1)); + + /* Stop if this is just an index node */ + if (srcDesc->kind == kBTIndexNode) { + *((UInt32 *)srcRec) = SWAP_BE32 (*((UInt32 *)srcRec)); + continue; + } + + /* Swap each extent */ + for (j = 0; j < kHFSExtentDensity; j++) { + srcRec[j].startBlock = SWAP_BE16 (srcRec[j].startBlock); + srcRec[j].blockCount = SWAP_BE16 (srcRec[j].blockCount); + } + } + + } else if (fileID == kHFSCatalogFileID) { + HFSCatalogKey *srcKey; + SInt16 *srcPtr; + + for (i = 0; i < srcDesc->numRecords; i++) { + srcKey = (HFSCatalogKey *)((char *)src->buffer + srcOffs[i]); + + /* Don't swap srcKey->keyLength */ + /* Don't swap srcKey->reserved */ + + srcKey->parentID = SWAP_BE32 (srcKey->parentID); + + /* Don't swap srcKey->nodeName */ + + /* Point to record data (round up to even byte boundary) */ + srcPtr = (SInt16 *)((char *)srcKey + ((srcKey->keyLength + 2) & ~1)); + + /* Stop if this is just an index node */ + if (srcDesc->kind == kBTIndexNode) { + *((UInt32 *)srcPtr) = SWAP_BE32 (*((UInt32 *)srcPtr)); + continue; + } + + /* Swap the recordType field, if unswapping, leave to later */ + if (!unswap) srcPtr[0] = SWAP_BE16 (srcPtr[0]); + + if (srcPtr[0] == kHFSFolderRecord) { + HFSCatalogFolder *srcRec = (HFSCatalogFolder *)srcPtr; + + srcRec->flags = SWAP_BE16 (srcRec->flags); + srcRec->valence = SWAP_BE16 (srcRec->valence); + + srcRec->folderID = SWAP_BE32 (srcRec->folderID); + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->modifyDate = SWAP_BE32 (srcRec->modifyDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + /* Don't swap srcRec->userInfo */ + /* Don't swap srcRec->finderInfo */ + /* Don't swap resserved array */ + + } else if (srcPtr[0] == kHFSFileRecord) { + HFSCatalogFile *srcRec = (HFSCatalogFile *)srcPtr; + + srcRec->flags = srcRec->flags; + srcRec->fileType = srcRec->fileType; + + /* Don't swap srcRec->userInfo */ + + srcRec->fileID = SWAP_BE32 (srcRec->fileID); + + srcRec->dataStartBlock = SWAP_BE16 (srcRec->dataStartBlock); + srcRec->dataLogicalSize = SWAP_BE32 (srcRec->dataLogicalSize); + srcRec->dataPhysicalSize = SWAP_BE32 (srcRec->dataPhysicalSize); + + srcRec->rsrcStartBlock = SWAP_BE16 (srcRec->rsrcStartBlock); + srcRec->rsrcLogicalSize = SWAP_BE32 (srcRec->rsrcLogicalSize); + srcRec->rsrcPhysicalSize = SWAP_BE32 (srcRec->rsrcPhysicalSize); + + srcRec->createDate = SWAP_BE32 (srcRec->createDate); + srcRec->modifyDate = SWAP_BE32 (srcRec->modifyDate); + srcRec->backupDate = SWAP_BE32 (srcRec->backupDate); + + /* Don't swap srcRec->finderInfo */ + + srcRec->clumpSize = SWAP_BE16 (srcRec->clumpSize); + + /* Swap the two sets of extents as an array of six (three each) UInt16 */ + for (j = 0; j < kHFSExtentDensity * 2; j++) { + srcRec->dataExtents[j].startBlock = SWAP_BE16 (srcRec->dataExtents[j].startBlock); + srcRec->dataExtents[j].blockCount = SWAP_BE16 (srcRec->dataExtents[j].blockCount); + } + + /* Don't swap srcRec->reserved */ + + } else if ((srcPtr[0] == kHFSFolderThreadRecord) || + (srcPtr[0] == kHFSFileThreadRecord)) { + + HFSCatalogThread *srcRec = (HFSCatalogThread *)srcPtr; + + /* Don't swap srcRec->reserved array */ + + srcRec->parentID = SWAP_BE32 (srcRec->parentID); + + /* Don't swap srcRec->nodeName */ + + } else { + panic ("%s unrecognized catalog record type", "hfs_swap_BTNode:"); + } + + /* If unswapping, we can safely swap type now */ + if (unswap) srcPtr[0] = SWAP_BE16 (srcPtr[0]); + } + + } else { + panic ("%s unrecognized B-Tree type", "hfs_swap_BTNode:"); + } + + return (0); +} diff --git a/bsd/hfs/hfs_endian.h b/bsd/hfs/hfs_endian.h new file mode 100644 index 000000000..f20851d99 --- /dev/null +++ b/bsd/hfs/hfs_endian.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HFS_ENDIAN_H__ +#define __HFS_ENDIAN_H__ + +/* + * hfs_endian.h + * + * This file prototypes endian swapping routines for the HFS/HFS Plus + * volume format. + */ +#include "hfs.h" +#include "hfscommon/headers/BTreesInternal.h" +#include + +/*********************/ +/* BIG ENDIAN Macros */ +/*********************/ +#if BYTE_ORDER == BIG_ENDIAN + + /* HFS is always big endian, make swaps into no-ops */ + #define SWAP_BE16(__a) (__a) + #define SWAP_BE32(__a) (__a) + #define SWAP_BE64(__a) (__a) + + /* HFS is always big endian, no swapping needed */ + #define SWAP_HFS_PLUS_FORK_DATA(__a) + #define SWAP_BT_NODE(__a, __b, __c) + +/************************/ +/* LITTLE ENDIAN Macros */ +/************************/ +#elif BYTE_ORDER == LITTLE_ENDIAN + + /* HFS is always big endian, make swaps actually swap */ + #define SWAP_BE16(__a) NXSwapBigShortToHost (__a) + #define SWAP_BE32(__a) NXSwapBigLongToHost (__a) + #define SWAP_BE64(__a) NXSwapBigLongLongToHost (__a) + + #define SWAP_HFS_PLUS_FORK_DATA(__a) hfs_swap_HFSPlusForkData ((__a)) + #define SWAP_BT_NODE(__a, __b, __c, __d) hfs_swap_BTNode ((__a), (__b), (__c), (__d)) + +#else +#warning Unknown byte order +#error +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +void hfs_swap_HFSPlusForkData (HFSPlusForkData *src); +int hfs_swap_BTNode (BlockDescriptor *src, int isHFSPlus, HFSCatalogNodeID fileID, int unswap); + +#ifdef __cplusplus +} +#endif + +#endif /* __HFS_FORMAT__ */ diff --git a/bsd/hfs/hfs_format.h b/bsd/hfs/hfs_format.h new file mode 100644 index 000000000..c9840f64f --- /dev/null +++ b/bsd/hfs/hfs_format.h @@ -0,0 +1,605 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HFS_FORMAT__ +#define __HFS_FORMAT__ + +/* + * hfs_format.c + * + * This file describes the on-disk format for HFS and HFS Plus volumes. + * The HFS Plus volume format is desciibed in detail in Apple Technote 1150. + * + * http://developer.apple.com/technotes/tn/tn1150.html + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* some on-disk hfs structures have 68K alignment (misaligned) */ +#pragma options align=mac68k + +/* Signatures used to differentiate between HFS and HFS Plus volumes */ +enum { + kHFSSigWord = 0x4244, /* 'BD' in ASCII */ + kHFSPlusSigWord = 0x482B, /* 'H+' in ASCII */ + kHFSPlusVersion = 0x0004, /* will change as format changes */ + /* version 4 shipped with Mac OS 8.1 */ + kHFSPlusMountVersion = 0x31302E30 /* '10.0' for Mac OS X */ +}; + + +/* + * Mac OS X has a special directory for linked and unlinked files (HFS Plus only). + * This directory and its contents are never exported from the filesystem under + * Mac OS X. + * + * To make this folder name sort last, it has embedded null prefix. + * (0xC0, 0x80 in UTF-8) + */ +#define HFSPLUSMETADATAFOLDER "\xC0\x80\xC0\x80\xC0\x80\xC0\x80HFS+ Private Data" + +/* + * Files in the HFS Private Data folder have one of the following prefixes + * followed by a decimal number (no leading zeros). For indirect nodes this + * number is a 32 bit random number. For unlinked (deleted) files that are + * still open, the number is the file ID for that file. + * + * e.g. iNode7182000 and temp3296 + */ +#define HFS_INODE_PREFIX "iNode" +#define HFS_DELETE_PREFIX "temp" + +/* + * Indirect link files (hard links) have the following type/creator. + */ +enum { + kHardLinkFileType = 0x686C6E6B, /* 'hlnk' */ + kHFSPlusCreator = 0x6866732B /* 'hfs+' */ +}; + + +/* Unicode strings are used for HFS Plus file and folder names */ +struct HFSUniStr255 { + u_int16_t length; /* number of unicode characters */ + u_int16_t unicode[255]; /* unicode characters */ +}; +typedef struct HFSUniStr255 HFSUniStr255; +typedef const HFSUniStr255 *ConstHFSUniStr255Param; + +enum { + kHFSMaxVolumeNameChars = 27, + kHFSMaxFileNameChars = 31, + kHFSPlusMaxFileNameChars = 255 +}; + + +/* Extent overflow file data structures */ + +/* HFS Extent key */ +struct HFSExtentKey { + u_int8_t keyLength; /* length of key, excluding this field */ + u_int8_t forkType; /* 0 = data fork, FF = resource fork */ + u_int32_t fileID; /* file ID */ + u_int16_t startBlock; /* first file allocation block number in this extent */ +}; +typedef struct HFSExtentKey HFSExtentKey; + +/* HFS Plus Extent key */ +struct HFSPlusExtentKey { + u_int16_t keyLength; /* length of key, excluding this field */ + u_int8_t forkType; /* 0 = data fork, FF = resource fork */ + u_int8_t pad; /* make the other fields align on 32-bit boundary */ + u_int32_t fileID; /* file ID */ + u_int32_t startBlock; /* first file allocation block number in this extent */ +}; +typedef struct HFSPlusExtentKey HFSPlusExtentKey; + +/* Number of extent descriptors per extent record */ +enum { + kHFSExtentDensity = 3, + kHFSPlusExtentDensity = 8 +}; + +/* HFS extent descriptor */ +struct HFSExtentDescriptor { + u_int16_t startBlock; /* first allocation block */ + u_int16_t blockCount; /* number of allocation blocks */ +}; +typedef struct HFSExtentDescriptor HFSExtentDescriptor; + +/* HFS Plus extent descriptor */ +struct HFSPlusExtentDescriptor { + u_int32_t startBlock; /* first allocation block */ + u_int32_t blockCount; /* number of allocation blocks */ +}; +typedef struct HFSPlusExtentDescriptor HFSPlusExtentDescriptor; + +/* HFS extent record */ +typedef HFSExtentDescriptor HFSExtentRecord[3]; + +/* HFS Plus extent record */ +typedef HFSPlusExtentDescriptor HFSPlusExtentRecord[8]; + + +/* Finder information */ +struct FndrFileInfo { + u_int32_t fdType; /* file type */ + u_int32_t fdCreator; /* file creator */ + u_int16_t fdFlags; /* Finder flags */ + struct { + int16_t v; /* file's location */ + int16_t h; + } fdLocation; + int16_t opaque; +}; +typedef struct FndrFileInfo FndrFileInfo; + +struct FndrDirInfo { + struct { /* folder's window rectangle */ + int16_t top; + int16_t left; + int16_t bottom; + int16_t right; + } frRect; + unsigned short frFlags; /* Finder flags */ + struct { + u_int16_t v; /* folder's location */ + u_int16_t h; + } frLocation; + int16_t opaque; +}; +typedef struct FndrDirInfo FndrDirInfo; + +struct FndrOpaqueInfo { + int8_t opaque[16]; +}; +typedef struct FndrOpaqueInfo FndrOpaqueInfo; + + +/* HFS Plus Fork data info - 80 bytes */ +struct HFSPlusForkData { + u_int64_t logicalSize; /* fork's logical size in bytes */ + u_int32_t clumpSize; /* fork's clump size in bytes */ + u_int32_t totalBlocks; /* total blocks used by this fork */ + HFSPlusExtentRecord extents; /* initial set of extents */ +}; +typedef struct HFSPlusForkData HFSPlusForkData; + + +/* Mac OS X has 16 bytes worth of "BSD" info. + * + * Note: Mac OS 9 implementations and applications + * should preserve, but not change, this information. + */ +struct HFSPlusBSDInfo { + u_int32_t ownerID; /* user or group ID of file/folder owner */ + u_int32_t groupID; /* additional user of group ID */ + u_int8_t adminFlags; /* super-user changeable flags */ + u_int8_t ownerFlags; /* owner changeable flags */ + u_int16_t fileMode; /* file type and permission bits */ + union { + u_int32_t iNodeNum; /* indirect node number (hard links only) */ + u_int32_t linkCount; /* links that refer to this indirect node */ + u_int32_t rawDevice; /* special file device (FBLK and FCHR only) */ + } special; +}; +typedef struct HFSPlusBSDInfo HFSPlusBSDInfo; + + +/* Catalog file data structures */ + +enum { + kHFSRootParentID = 1, /* Parent ID of the root folder */ + kHFSRootFolderID = 2, /* Folder ID of the root folder */ + kHFSExtentsFileID = 3, /* File ID of the extents file */ + kHFSCatalogFileID = 4, /* File ID of the catalog file */ + kHFSBadBlockFileID = 5, /* File ID of the bad allocation block file */ + kHFSAllocationFileID = 6, /* File ID of the allocation file (HFS Plus only) */ + kHFSStartupFileID = 7, /* File ID of the startup file (HFS Plus only) */ + kHFSAttributesFileID = 8, /* File ID of the attribute file (HFS Plus only) */ + kHFSBogusExtentFileID = 15, /* Used for exchanging extents in extents file */ + kHFSFirstUserCatalogNodeID = 16 +}; + +/* HFS catalog key */ +struct HFSCatalogKey { + u_int8_t keyLength; /* key length (in bytes) */ + u_int8_t reserved; /* reserved (set to zero) */ + u_int32_t parentID; /* parent folder ID */ + u_char nodeName[kHFSMaxFileNameChars + 1]; /* catalog node name */ +}; +typedef struct HFSCatalogKey HFSCatalogKey; + +/* HFS Plus catalog key */ +struct HFSPlusCatalogKey { + u_int16_t keyLength; /* key length (in bytes) */ + u_int32_t parentID; /* parent folder ID */ + HFSUniStr255 nodeName; /* catalog node name */ +}; +typedef struct HFSPlusCatalogKey HFSPlusCatalogKey; + +/* Catalog record types */ +enum { + /* HFS Catalog Records */ + kHFSFolderRecord = 0x0100, /* Folder record */ + kHFSFileRecord = 0x0200, /* File record */ + kHFSFolderThreadRecord = 0x0300, /* Folder thread record */ + kHFSFileThreadRecord = 0x0400, /* File thread record */ + + /* HFS Plus Catalog Records */ + kHFSPlusFolderRecord = 1, /* Folder record */ + kHFSPlusFileRecord = 2, /* File record */ + kHFSPlusFolderThreadRecord = 3, /* Folder thread record */ + kHFSPlusFileThreadRecord = 4 /* File thread record */ +}; + + +/* Catalog file record flags */ +enum { + kHFSFileLockedBit = 0x0000, /* file is locked and cannot be written to */ + kHFSFileLockedMask = 0x0001, + kHFSThreadExistsBit = 0x0001, /* a file thread record exists for this file */ + kHFSThreadExistsMask = 0x0002 +}; + + +/* HFS catalog folder record - 70 bytes */ +struct HFSCatalogFolder { + int16_t recordType; /* == kHFSFolderRecord */ + u_int16_t flags; /* folder flags */ + u_int16_t valence; /* folder valence */ + u_int32_t folderID; /* folder ID */ + u_int32_t createDate; /* date and time of creation */ + u_int32_t modifyDate; /* date and time of last modification */ + u_int32_t backupDate; /* date and time of last backup */ + FndrDirInfo userInfo; /* Finder information */ + FndrOpaqueInfo finderInfo; /* additional Finder information */ + u_int32_t reserved[4]; /* reserved - initialized as zero */ +}; +typedef struct HFSCatalogFolder HFSCatalogFolder; + +/* HFS Plus catalog folder record - 88 bytes */ +struct HFSPlusCatalogFolder { + int16_t recordType; /* == kHFSPlusFolderRecord */ + u_int16_t flags; /* file flags */ + u_int32_t valence; /* folder's valence (limited to 2^16 in Mac OS) */ + u_int32_t folderID; /* folder ID */ + u_int32_t createDate; /* date and time of creation */ + u_int32_t contentModDate; /* date and time of last content modification */ + u_int32_t attributeModDate; /* date and time of last attribute modification */ + u_int32_t accessDate; /* date and time of last access (MacOS X only) */ + u_int32_t backupDate; /* date and time of last backup */ + HFSPlusBSDInfo bsdInfo; /* permissions (for MacOS X) */ + FndrDirInfo userInfo; /* Finder information */ + FndrOpaqueInfo finderInfo; /* additional Finder information */ + u_int32_t textEncoding; /* hint for name conversions */ + u_int32_t reserved; /* reserved - initialized as zero */ +}; +typedef struct HFSPlusCatalogFolder HFSPlusCatalogFolder; + +/* HFS catalog file record - 102 bytes */ +struct HFSCatalogFile { + int16_t recordType; /* == kHFSFileRecord */ + u_int8_t flags; /* file flags */ + int8_t fileType; /* file type (unused ?) */ + FndrFileInfo userInfo; /* Finder information */ + u_int32_t fileID; /* file ID */ + u_int16_t dataStartBlock; /* not used - set to zero */ + int32_t dataLogicalSize; /* logical EOF of data fork */ + int32_t dataPhysicalSize; /* physical EOF of data fork */ + u_int16_t rsrcStartBlock; /* not used - set to zero */ + int32_t rsrcLogicalSize; /* logical EOF of resource fork */ + int32_t rsrcPhysicalSize; /* physical EOF of resource fork */ + u_int32_t createDate; /* date and time of creation */ + u_int32_t modifyDate; /* date and time of last modification */ + u_int32_t backupDate; /* date and time of last backup */ + FndrOpaqueInfo finderInfo; /* additional Finder information */ + u_int16_t clumpSize; /* file clump size (not used) */ + HFSExtentRecord dataExtents; /* first data fork extent record */ + HFSExtentRecord rsrcExtents; /* first resource fork extent record */ + u_int32_t reserved; /* reserved - initialized as zero */ +}; +typedef struct HFSCatalogFile HFSCatalogFile; + +/* HFS Plus catalog file record - 248 bytes */ +struct HFSPlusCatalogFile { + int16_t recordType; /* == kHFSPlusFileRecord */ + u_int16_t flags; /* file flags */ + u_int32_t reserved1; /* reserved - initialized as zero */ + u_int32_t fileID; /* file ID */ + u_int32_t createDate; /* date and time of creation */ + u_int32_t contentModDate; /* date and time of last content modification */ + u_int32_t attributeModDate; /* date and time of last attribute modification */ + u_int32_t accessDate; /* date and time of last access (MacOS X only) */ + u_int32_t backupDate; /* date and time of last backup */ + HFSPlusBSDInfo bsdInfo; /* permissions (for MacOS X) */ + FndrFileInfo userInfo; /* Finder information */ + FndrOpaqueInfo finderInfo; /* additional Finder information */ + u_int32_t textEncoding; /* hint for name conversions */ + u_int32_t reserved2; /* reserved - initialized as zero */ + + /* Note: these start on double long (64 bit) boundry */ + HFSPlusForkData dataFork; /* size and block data for data fork */ + HFSPlusForkData resourceFork; /* size and block data for resource fork */ +}; +typedef struct HFSPlusCatalogFile HFSPlusCatalogFile; + +/* HFS catalog thread record - 46 bytes */ +struct HFSCatalogThread { + int16_t recordType; /* == kHFSFolderThreadRecord or kHFSFileThreadRecord */ + int32_t reserved[2]; /* reserved - initialized as zero */ + u_int32_t parentID; /* parent ID for this catalog node */ + u_char nodeName[kHFSMaxFileNameChars + 1]; /* name of this catalog node */ +}; +typedef struct HFSCatalogThread HFSCatalogThread; + +/* HFS Plus catalog thread record -- 264 bytes */ +struct HFSPlusCatalogThread { + int16_t recordType; /* == kHFSPlusFolderThreadRecord or kHFSPlusFileThreadRecord */ + int16_t reserved; /* reserved - initialized as zero */ + u_int32_t parentID; /* parent ID for this catalog node */ + HFSUniStr255 nodeName; /* name of this catalog node (variable length) */ +}; +typedef struct HFSPlusCatalogThread HFSPlusCatalogThread; + + +/* + These are the types of records in the attribute B-tree. The values were + chosen so that they wouldn't conflict with the catalog record types. +*/ +enum { + kHFSPlusAttrInlineData = 0x10, /* if size < kAttrOverflowSize */ + kHFSPlusAttrForkData = 0x20, /* if size >= kAttrOverflowSize */ + kHFSPlusAttrExtents = 0x30 /* overflow extents for large attributes */ +}; + + +/* + HFSPlusAttrInlineData + For small attributes, whose entire value is stored within this one + B-tree record. + There would not be any other records for this attribute. +*/ +struct HFSPlusAttrInlineData { + u_int32_t recordType; /* == kHFSPlusAttrInlineData*/ + u_int32_t reserved; + u_int32_t logicalSize; /* size in bytes of userData*/ + u_int8_t userData[2]; /* variable length; space allocated is a multiple of 2 bytes*/ +}; +typedef struct HFSPlusAttrInlineData HFSPlusAttrInlineData; + + +/* + HFSPlusAttrForkData + For larger attributes, whose value is stored in allocation blocks. + If the attribute has more than 8 extents, there will be additonal + records (of type HFSPlusAttrExtents) for this attribute. +*/ +struct HFSPlusAttrForkData { + u_int32_t recordType; /* == kHFSPlusAttrForkData*/ + u_int32_t reserved; + HFSPlusForkData theFork; /* size and first extents of value*/ +}; +typedef struct HFSPlusAttrForkData HFSPlusAttrForkData; + +/* + HFSPlusAttrExtents + This record contains information about overflow extents for large, + fragmented attributes. +*/ +struct HFSPlusAttrExtents { + u_int32_t recordType; /* == kHFSPlusAttrExtents*/ + u_int32_t reserved; + HFSPlusExtentRecord extents; /* additional extents*/ +}; +typedef struct HFSPlusAttrExtents HFSPlusAttrExtents; + +/* A generic Attribute Record*/ +union HFSPlusAttrRecord { + u_int32_t recordType; + HFSPlusAttrInlineData inlineData; + HFSPlusAttrForkData forkData; + HFSPlusAttrExtents overflowExtents; +}; +typedef union HFSPlusAttrRecord HFSPlusAttrRecord; + +/* Key and node lengths */ +enum { + kHFSPlusExtentKeyMaximumLength = sizeof(HFSPlusExtentKey) - sizeof(u_int16_t), + kHFSExtentKeyMaximumLength = sizeof(HFSExtentKey) - sizeof(u_int8_t), + kHFSPlusCatalogKeyMaximumLength = sizeof(HFSPlusCatalogKey) - sizeof(u_int16_t), + kHFSPlusCatalogKeyMinimumLength = kHFSPlusCatalogKeyMaximumLength - sizeof(HFSUniStr255) + sizeof(u_int16_t), + kHFSCatalogKeyMaximumLength = sizeof(HFSCatalogKey) - sizeof(u_int8_t), + kHFSCatalogKeyMinimumLength = kHFSCatalogKeyMaximumLength - (kHFSMaxFileNameChars + 1) + sizeof(u_int8_t), + kHFSPlusCatalogMinNodeSize = 4096, + kHFSPlusExtentMinNodeSize = 512, + kHFSPlusAttrMinNodeSize = 4096 +}; + + +/* HFS and HFS Plus volume attribute bits */ +enum { + /* Bits 0-6 are reserved (always cleared by MountVol call) */ + kHFSVolumeHardwareLockBit = 7, /* volume is locked by hardware */ + kHFSVolumeUnmountedBit = 8, /* volume was successfully unmounted */ + kHFSVolumeSparedBlocksBit = 9, /* volume has bad blocks spared */ + kHFSVolumeNoCacheRequiredBit = 10, /* don't cache volume blocks (i.e. RAM or ROM disk) */ + kHFSBootVolumeInconsistentBit = 11, /* boot volume is inconsistent (System 7.6 and later) */ + kHFSCatalogNodeIDsReusedBit = 12, + /* Bits 13-14 are reserved for future use */ + kHFSVolumeSoftwareLockBit = 15, /* volume is locked by software */ + + kHFSVolumeHardwareLockMask = 1 << kHFSVolumeHardwareLockBit, + kHFSVolumeUnmountedMask = 1 << kHFSVolumeUnmountedBit, + kHFSVolumeSparedBlocksMask = 1 << kHFSVolumeSparedBlocksBit, + kHFSVolumeNoCacheRequiredMask = 1 << kHFSVolumeNoCacheRequiredBit, + kHFSBootVolumeInconsistentMask = 1 << kHFSBootVolumeInconsistentBit, + kHFSCatalogNodeIDsReusedMask = 1 << kHFSCatalogNodeIDsReusedBit, + kHFSVolumeSoftwareLockMask = 1 << kHFSVolumeSoftwareLockBit, + kHFSMDBAttributesMask = 0x8380 +}; + + +/* HFS Master Directory Block - 162 bytes */ +/* Stored at sector #2 (3rd sector) and second-to-last sector. */ +struct HFSMasterDirectoryBlock { + u_int16_t drSigWord; /* == kHFSSigWord */ + u_int32_t drCrDate; /* date and time of volume creation */ + u_int32_t drLsMod; /* date and time of last modification */ + u_int16_t drAtrb; /* volume attributes */ + u_int16_t drNmFls; /* number of files in root folder */ + u_int16_t drVBMSt; /* first block of volume bitmap */ + u_int16_t drAllocPtr; /* start of next allocation search */ + u_int16_t drNmAlBlks; /* number of allocation blocks in volume */ + u_int32_t drAlBlkSiz; /* size (in bytes) of allocation blocks */ + u_int32_t drClpSiz; /* default clump size */ + u_int16_t drAlBlSt; /* first allocation block in volume */ + u_int32_t drNxtCNID; /* next unused catalog node ID */ + u_int16_t drFreeBks; /* number of unused allocation blocks */ + u_char drVN[kHFSMaxVolumeNameChars + 1]; /* volume name */ + u_int32_t drVolBkUp; /* date and time of last backup */ + u_int16_t drVSeqNum; /* volume backup sequence number */ + u_int32_t drWrCnt; /* volume write count */ + u_int32_t drXTClpSiz; /* clump size for extents overflow file */ + u_int32_t drCTClpSiz; /* clump size for catalog file */ + u_int16_t drNmRtDirs; /* number of directories in root folder */ + u_int32_t drFilCnt; /* number of files in volume */ + u_int32_t drDirCnt; /* number of directories in volume */ + u_int32_t drFndrInfo[8]; /* information used by the Finder */ + u_int16_t drEmbedSigWord; /* embedded volume signature (formerly drVCSize) */ + HFSExtentDescriptor drEmbedExtent; /* embedded volume location and size (formerly drVBMCSize and drCtlCSize) */ + u_int32_t drXTFlSize; /* size of extents overflow file */ + HFSExtentRecord drXTExtRec; /* extent record for extents overflow file */ + u_int32_t drCTFlSize; /* size of catalog file */ + HFSExtentRecord drCTExtRec; /* extent record for catalog file */ +}; +typedef struct HFSMasterDirectoryBlock HFSMasterDirectoryBlock; + + +/* HFS Plus Volume Header - 512 bytes */ +/* Stored at sector #2 (3rd sector) and second-to-last sector. */ +struct HFSPlusVolumeHeader { + u_int16_t signature; /* == kHFSPlusSigWord */ + u_int16_t version; /* == kHFSPlusVersion */ + u_int32_t attributes; /* volume attributes */ + u_int32_t lastMountedVersion; /* implementation version which last mounted volume */ + u_int32_t reserved; /* reserved - initialized as zero */ + + u_int32_t createDate; /* date and time of volume creation */ + u_int32_t modifyDate; /* date and time of last modification */ + u_int32_t backupDate; /* date and time of last backup */ + u_int32_t checkedDate; /* date and time of last disk check */ + + u_int32_t fileCount; /* number of files in volume */ + u_int32_t folderCount; /* number of directories in volume */ + + u_int32_t blockSize; /* size (in bytes) of allocation blocks */ + u_int32_t totalBlocks; /* number of allocation blocks in volume (includes this header and VBM*/ + u_int32_t freeBlocks; /* number of unused allocation blocks */ + + u_int32_t nextAllocation; /* start of next allocation search */ + u_int32_t rsrcClumpSize; /* default resource fork clump size */ + u_int32_t dataClumpSize; /* default data fork clump size */ + u_int32_t nextCatalogID; /* next unused catalog node ID */ + + u_int32_t writeCount; /* volume write count */ + u_int64_t encodingsBitmap; /* which encodings have been use on this volume */ + + u_int8_t finderInfo[32]; /* information used by the Finder */ + + HFSPlusForkData allocationFile; /* allocation bitmap file */ + HFSPlusForkData extentsFile; /* extents B-tree file */ + HFSPlusForkData catalogFile; /* catalog B-tree file */ + HFSPlusForkData attributesFile; /* extended attributes B-tree file */ + HFSPlusForkData startupFile; /* boot file (secondary loader) */ +}; +typedef struct HFSPlusVolumeHeader HFSPlusVolumeHeader; + + +/* B-tree structures */ + +enum BTreeKeyLimits{ + kMaxKeyLength = 520 +}; + +union BTreeKey{ + u_int8_t length8; + u_int16_t length16; + u_int8_t rawData [kMaxKeyLength+2]; +}; +typedef union BTreeKey BTreeKey; + +/* BTNodeDescriptor -- Every B-tree node starts with these fields. */ +struct BTNodeDescriptor { + u_int32_t fLink; /* next node at this level*/ + u_int32_t bLink; /* previous node at this level*/ + int8_t kind; /* kind of node (leaf, index, header, map)*/ + u_int8_t height; /* zero for header, map; child is one more than parent*/ + u_int16_t numRecords; /* number of records in this node*/ + u_int16_t reserved; /* reserved - initialized as zero */ +}; +typedef struct BTNodeDescriptor BTNodeDescriptor; + +/* Constants for BTNodeDescriptor kind */ +enum { + kBTLeafNode = -1, + kBTIndexNode = 0, + kBTHeaderNode = 1, + kBTMapNode = 2 +}; + +/* BTHeaderRec -- The first record of a B-tree header node */ +struct BTHeaderRec { + u_int16_t treeDepth; /* maximum height (usually leaf nodes) */ + u_int32_t rootNode; /* node number of root node */ + u_int32_t leafRecords; /* number of leaf records in all leaf nodes */ + u_int32_t firstLeafNode; /* node number of first leaf node */ + u_int32_t lastLeafNode; /* node number of last leaf node */ + u_int16_t nodeSize; /* size of a node, in bytes */ + u_int16_t maxKeyLength; /* reserved */ + u_int32_t totalNodes; /* total number of nodes in tree */ + u_int32_t freeNodes; /* number of unused (free) nodes in tree */ + u_int16_t reserved1; /* unused */ + u_int32_t clumpSize; /* reserved */ + u_int8_t btreeType; /* reserved */ + u_int8_t reserved2; /* reserved */ + u_int32_t attributes; /* persistent attributes about the tree */ + u_int32_t reserved3[16]; /* reserved */ +}; +typedef struct BTHeaderRec BTHeaderRec; + +/* Constants for BTHeaderRec attributes */ +enum { + kBTBadCloseMask = 0x00000001, /* reserved */ + kBTBigKeysMask = 0x00000002, /* key length field is 16 bits */ + kBTVariableIndexKeysMask = 0x00000004 /* keys in index nodes are variable length */ +}; + +#pragma options align=reset + +#ifdef __cplusplus +} +#endif + +#endif /* __HFS_FORMAT__ */ diff --git a/bsd/hfs/hfs_link.c b/bsd/hfs/hfs_link.c new file mode 100644 index 000000000..b3e91dfdd --- /dev/null +++ b/bsd/hfs/hfs_link.c @@ -0,0 +1,273 @@ +/* + * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#if HFS_HARDLINKS + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hfs.h" +#include "hfscommon/headers/FileMgrInternal.h" + + +/* + * Create a new indirect link + * + * An indirect link is a reference to a data node. The only useable fields in the + * link are the parentID, name and text encoding. All other catalog fields + * are ignored. + */ +static int +createindirectlink(struct hfsnode *dnhp, UInt32 linkPID, char *linkName) +{ + struct hfsCatalogInfo catInfo; + struct FInfo *fip; + ExtendedVCB *vcb; + int result; + + vcb = HTOVCB(dnhp); + + /* Create the indirect link directly in the catalog */ + result = hfsCreate(vcb, linkPID, linkName, IFREG); + if (result) return (result); + + /* + * XXX SER Here is a good example where hfsCreate should pass in a catinfo and return + * things like the hint and file ID there should be no reason to call lookup here + */ + catInfo.hint = 0; + INIT_CATALOGDATA(&catInfo.nodeData, kCatNameNoCopyName); + + result = hfs_getcatalog(vcb, linkPID, linkName, -1, &catInfo); + if (result) goto errExit; + + fip = (struct FInfo *)&catInfo.nodeData.cnd_finderInfo; + fip->fdType = kHardLinkFileType; /* 'hlnk' */ + fip->fdCreator = kHFSPlusCreator; /* 'hfs+' */ + fip->fdFlags |= kHasBeenInited; + + /* links are matched to data nodes by nodeID and to volumes by create date */ + catInfo.nodeData.cnd_iNodeNum = dnhp->h_meta->h_indnodeno; + catInfo.nodeData.cnd_createDate = vcb->vcbCrDate; + + result = UpdateCatalogNode(vcb, linkPID, linkName, catInfo.hint, &catInfo.nodeData); + if (result) goto errExit; + + CLEAN_CATALOGDATA(&catInfo.nodeData); + return (0); + +errExit: + CLEAN_CATALOGDATA(&catInfo.nodeData); + + /* get rid of link node */ + (void) hfsDelete(vcb, linkPID, linkName, TRUE, 0); + + return (result); +} + + +/* + * 2 locks are needed (dvp and hp) + * also need catalog lock + * + * caller's responsibility: + * componentname cleanup + * unlocking dvp and hp + */ +static int +hfs_makelink(hp, dvp, cnp) + struct hfsnode *hp; + struct vnode *dvp; + register struct componentname *cnp; +{ + struct proc *p = cnp->cn_proc; + struct hfsnode *dhp = VTOH(dvp); + u_int32_t ldirID; /* directory ID of linked nodes directory */ + ExtendedVCB *vcb = VTOVCB(dvp); + u_int32_t hint; + u_int32_t indnodeno = 0; + char inodename[32]; + int retval; + + ldirID = VTOHFS(dvp)->hfs_private_metadata_dir; + + /* We don't allow link nodes in our Private Meta Data folder! */ + if ( H_FILEID(dhp) == ldirID) + return (EPERM); + + if (vcb->freeBlocks == 0) + return (ENOSPC); + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (retval != E_NONE) + return retval; + + /* + * If this is a new hardlink then we need to create the data + * node (inode) and replace the original file with a link node. + */ + if (hp->h_meta->h_nlink == 1) { + do { + /* get a unique indirect node number */ + indnodeno = ((random() & 0x3fffffff) + 100); + MAKE_INODE_NAME(inodename, indnodeno); + + /* move source file to data node directory */ + hint = 0; + retval = hfsMoveRename(vcb, H_DIRID(hp), H_NAME(hp), ldirID, inodename, &hint); + } while (retval == cmExists); + + if (retval) goto out; + + hp->h_meta->h_indnodeno = indnodeno; + + /* replace source file with link node */ + retval = createindirectlink(hp, H_DIRID(hp), H_NAME(hp)); + if (retval) { + /* put it source file back */ + hint = 0; + (void) hfsMoveRename(vcb, ldirID, inodename, H_DIRID(hp), H_NAME(hp), &hint); + goto out; + } + } + + /* + * Create a catalog entry for the new link (parentID + name). + */ + retval = createindirectlink(hp, H_FILEID(dhp), cnp->cn_nameptr); + if (retval && hp->h_meta->h_nlink == 1) { + /* get rid of new link */ + (void) hfsDelete(vcb, H_DIRID(hp), H_NAME(hp), TRUE, 0); + + /* put it source file back */ + hint = 0; + (void) hfsMoveRename(vcb, ldirID, inodename, H_DIRID(hp), H_NAME(hp), &hint); + goto out; + } + + /* + * Finally, if this is a new hardlink then we need to mark the hfs node + */ + if (hp->h_meta->h_nlink == 1) { + hp->h_meta->h_nlink++; + hp->h_nodeflags |= IN_CHANGE; + hp->h_meta->h_metaflags |= IN_DATANODE; + } + +out: + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_RELEASE, p); + + return (retval); +} + + +/* + * link vnode call +#% link vp U U U +#% link tdvp L U U +# + vop_link { + IN WILLRELE struct vnode *vp; + IN struct vnode *targetPar_vp; + IN struct componentname *cnp; + + */ +int +hfs_link(ap) +struct vop_link_args /* { + struct vnode *a_vp; + struct vnode *a_tdvp; + struct componentname *a_cnp; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vnode *tdvp = ap->a_tdvp; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + struct hfsnode *hp; + struct timeval tv; + int error; + +#if HFS_DIAGNOSTIC + if ((cnp->cn_flags & HASBUF) == 0) + panic("hfs_link: no name"); +#endif + if (tdvp->v_mount != vp->v_mount) { + VOP_ABORTOP(tdvp, cnp); + error = EXDEV; + goto out2; + } + if (VTOVCB(tdvp)->vcbSigWord != kHFSPlusSigWord) + return err_link(ap); /* hfs disks don't support hard links */ + + if (VTOHFS(vp)->hfs_private_metadata_dir == 0) + return err_link(ap); /* no private metadata dir, no links possible */ + + if (tdvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE, p))) { + VOP_ABORTOP(tdvp, cnp); + goto out2; + } + hp = VTOH(vp); + if (hp->h_meta->h_nlink >= HFS_LINK_MAX) { + VOP_ABORTOP(tdvp, cnp); + error = EMLINK; + goto out1; + } + if (hp->h_meta->h_pflags & (IMMUTABLE | APPEND)) { + VOP_ABORTOP(tdvp, cnp); + error = EPERM; + goto out1; + } + if (vp->v_type == VBLK || vp->v_type == VCHR) { + VOP_ABORTOP(tdvp, cnp); + error = EINVAL; /* cannot link to a special file */ + goto out1; + } + + hp->h_meta->h_nlink++; + hp->h_nodeflags |= IN_CHANGE; + tv = time; + error = VOP_UPDATE(vp, &tv, &tv, 1); + if (!error) + error = hfs_makelink(hp, tdvp, cnp); + if (error) { + hp->h_meta->h_nlink--; + hp->h_nodeflags |= IN_CHANGE; + } + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); +out1: + if (tdvp != vp) + VOP_UNLOCK(vp, 0, p); +out2: + vput(tdvp); + return (error); +} + +#endif diff --git a/bsd/hfs/hfs_lockf.c b/bsd/hfs/hfs_lockf.c new file mode 100644 index 000000000..2f4237734 --- /dev/null +++ b/bsd/hfs/hfs_lockf.c @@ -0,0 +1,707 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* (c) 1997-1998 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Scooter Morris at Genentech Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hfs_lockf.c 1.0 + * derived from @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hfs_lockf.h" +#include "hfs.h" + +/* + * This variable controls the maximum number of processes that will + * be checked in doing deadlock detection. + */ +int hfsmaxlockdepth = MAXDEPTH; + +#ifdef LOCKF_DEBUG +#include +#include +int lockf_debug = 0; +struct ctldebug debug4 = { "lockf_debug", &lockf_debug }; +#endif + +#define NOLOCKF (struct hfslockf *)0 +#define SELF 0x1 +#define OTHERS 0x2 + +/* + * Set a byte-range lock. + */ +int +hfs_setlock(lock) + register struct hfslockf *lock; +{ + register struct hfslockf *block; + struct hfsnode *hp = lock->lf_hfsnode; + struct hfslockf **prev, *overlap, *ltmp; + static char lockstr[] = "hfslockf"; + int ovcase, priority, needtolink, error; + +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) + hfs_lprint("hfs_setlock", lock); +#endif /* LOCKF_DEBUG */ + + /* + * Set the priority + */ + priority = PLOCK; + if (lock->lf_type == F_WRLCK) + priority += 4; + priority |= PCATCH; + /* + * Scan lock list for this file looking for locks that would block us. + */ + while ((block = hfs_getblock(lock))) { + /* + * Free the structure and return if nonblocking. + */ + if ((lock->lf_flags & F_WAIT) == 0) { + FREE(lock, M_LOCKF); + return (EAGAIN); + } + /* + * We are blocked. Since flock style locks cover + * the whole file, there is no chance for deadlock. + * For byte-range locks we must check for deadlock. + * + * Deadlock detection is done by looking through the + * wait channels to see if there are any cycles that + * involve us. MAXDEPTH is set just to make sure we + * do not go off into neverland. + */ + if ((lock->lf_flags & F_POSIX) && + (block->lf_flags & F_POSIX)) { + register struct proc *wproc; + register struct hfslockf *waitblock; + int i = 0; + + /* The block is waiting on something */ + wproc = (struct proc *)block->lf_id; + while (wproc->p_wchan && + (wproc->p_wmesg == lockstr) && + (i++ < hfsmaxlockdepth)) { + waitblock = (struct hfslockf *)wproc->p_wchan; + /* Get the owner of the blocking lock */ + waitblock = waitblock->lf_next; + if ((waitblock->lf_flags & F_POSIX) == 0) + break; + wproc = (struct proc *)waitblock->lf_id; + if (wproc == (struct proc *)lock->lf_id) { + _FREE(lock, M_LOCKF); + return (EDEADLK); + } + } + } + /* + * For flock type locks, we must first remove + * any shared locks that we hold before we sleep + * waiting for an exclusive lock. + */ + if ((lock->lf_flags & F_FLOCK) && + lock->lf_type == F_WRLCK) { + lock->lf_type = F_UNLCK; + (void) hfs_clearlock(lock); + lock->lf_type = F_WRLCK; + } + /* + * Add our lock to the blocked list and sleep until we're free. + * Remember who blocked us (for deadlock detection). + */ + lock->lf_next = block; + TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) { + hfs_lprint("hfs_setlock: blocking on", block); + hfs_lprintlist("hfs_setlock", block); + } +#endif /* LOCKF_DEBUG */ + if ((error = tsleep((caddr_t)lock, priority, lockstr, 0))) { + /* + * We may have been awakened by a signal (in + * which case we must remove ourselves from the + * blocked list) and/or by another process + * releasing a lock (in which case we have already + * been removed from the blocked list and our + * lf_next field set to NOLOCKF). + */ + if (lock->lf_next) + TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, + lf_block); + _FREE(lock, M_LOCKF); + return (error); + } + } + /* + * No blocks!! Add the lock. Note that we will + * downgrade or upgrade any overlapping locks this + * process already owns. + * + * Skip over locks owned by other processes. + * Handle any locks that overlap and are owned by ourselves. + */ + prev = &hp->h_lockf; + block = hp->h_lockf; + needtolink = 1; + for (;;) { + if ((ovcase = hfs_findoverlap(block, lock, SELF, &prev, &overlap))) + block = overlap->lf_next; + /* + * Six cases: + * 0) no overlap + * 1) overlap == lock + * 2) overlap contains lock + * 3) lock contains overlap + * 4) overlap starts before lock + * 5) overlap ends after lock + */ + switch (ovcase) { + case 0: /* no overlap */ + if (needtolink) { + *prev = lock; + lock->lf_next = overlap; + } + break; + + case 1: /* overlap == lock */ + /* + * If downgrading lock, others may be + * able to acquire it. + */ + if (lock->lf_type == F_RDLCK && + overlap->lf_type == F_WRLCK) + hfs_wakelock(overlap); + overlap->lf_type = lock->lf_type; + FREE(lock, M_LOCKF); + lock = overlap; /* for debug output below */ + break; + + case 2: /* overlap contains lock */ + /* + * Check for common starting point and different types. + */ + if (overlap->lf_type == lock->lf_type) { + _FREE(lock, M_LOCKF); + lock = overlap; /* for debug output below */ + break; + } + if (overlap->lf_start == lock->lf_start) { + *prev = lock; + lock->lf_next = overlap; + overlap->lf_start = lock->lf_end + 1; + } else + hfs_split(overlap, lock); + hfs_wakelock(overlap); + break; + + case 3: /* lock contains overlap */ + /* + * If downgrading lock, others may be able to + * acquire it, otherwise take the list. + */ + if (lock->lf_type == F_RDLCK && + overlap->lf_type == F_WRLCK) { + hfs_wakelock(overlap); + } else { + while ((ltmp = overlap->lf_blkhd.tqh_first)) { + TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, + lf_block); + TAILQ_INSERT_TAIL(&lock->lf_blkhd, + ltmp, lf_block); + } + } + /* + * Add the new lock if necessary and delete the overlap. + */ + if (needtolink) { + *prev = lock; + lock->lf_next = overlap->lf_next; + prev = &lock->lf_next; + needtolink = 0; + } else + *prev = overlap->lf_next; + _FREE(overlap, M_LOCKF); + continue; + + case 4: /* overlap starts before lock */ + /* + * Add lock after overlap on the list. + */ + lock->lf_next = overlap->lf_next; + overlap->lf_next = lock; + overlap->lf_end = lock->lf_start - 1; + prev = &lock->lf_next; + hfs_wakelock(overlap); + needtolink = 0; + continue; + + case 5: /* overlap ends after lock */ + /* + * Add the new lock before overlap. + */ + if (needtolink) { + *prev = lock; + lock->lf_next = overlap; + } + overlap->lf_start = lock->lf_end + 1; + hfs_wakelock(overlap); + break; + } + break; + } +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) { + hfs_lprint("hfs_setlock: got the lock", lock); + hfs_lprintlist("hfs_setlock", lock); + } +#endif /* LOCKF_DEBUG */ + return (0); +} + +/* + * Remove a byte-range lock on an hfsnode. + * + * Generally, find the lock (or an overlap to that lock) + * and remove it (or shrink it), then wakeup anyone we can. + */ +int +hfs_clearlock(unlock) + register struct hfslockf *unlock; +{ + struct hfsnode *hp = unlock->lf_hfsnode; + register struct hfslockf *lf = hp->h_lockf; + struct hfslockf *overlap, **prev; + int ovcase; + + if (lf == NOLOCKF) + return (0); +#ifdef LOCKF_DEBUG + if (unlock->lf_type != F_UNLCK) + panic("hfs_clearlock: bad type"); + if (lockf_debug & 1) + hfs_lprint("hfs_clearlock", unlock); +#endif /* LOCKF_DEBUG */ + prev = &hp->h_lockf; + while ((ovcase = hfs_findoverlap(lf, unlock, SELF, &prev, &overlap))) { + /* + * Wakeup the list of locks to be retried. + */ + hfs_wakelock(overlap); + + switch (ovcase) { + + case 1: /* overlap == lock */ + *prev = overlap->lf_next; + FREE(overlap, M_LOCKF); + break; + + case 2: /* overlap contains lock: split it */ + if (overlap->lf_start == unlock->lf_start) { + overlap->lf_start = unlock->lf_end + 1; + break; + } + hfs_split(overlap, unlock); + overlap->lf_next = unlock->lf_next; + break; + + case 3: /* lock contains overlap */ + *prev = overlap->lf_next; + lf = overlap->lf_next; + _FREE(overlap, M_LOCKF); + continue; + + case 4: /* overlap starts before lock */ + overlap->lf_end = unlock->lf_start - 1; + prev = &overlap->lf_next; + lf = overlap->lf_next; + continue; + + case 5: /* overlap ends after lock */ + overlap->lf_start = unlock->lf_end + 1; + break; + } + break; + } +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) + hfs_lprintlist("hfs_clearlock", unlock); +#endif /* LOCKF_DEBUG */ + return (0); +} + +/* + * Check whether there is a blocking lock, + * and if so return its process identifier. + */ +int +hfs_getlock(lock, fl) + register struct hfslockf *lock; + register struct flock *fl; +{ + register struct hfslockf *block; + +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) + hfs_lprint("hfs_getlock", lock); +#endif /* LOCKF_DEBUG */ + + if ((block = hfs_getblock(lock))) { + fl->l_type = block->lf_type; + fl->l_whence = SEEK_SET; + fl->l_start = block->lf_start; + if (block->lf_end == -1) + fl->l_len = 0; + else + fl->l_len = block->lf_end - block->lf_start + 1; + if (block->lf_flags & F_POSIX) + fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; + else + fl->l_pid = -1; + } else { + fl->l_type = F_UNLCK; + } + return (0); +} + +/* + * Walk the list of locks for an hfsnode and + * return the first blocking lock. + */ +struct hfslockf * +hfs_getblock(lock) + register struct hfslockf *lock; +{ + struct hfslockf **prev, *overlap, *lf = lock->lf_hfsnode->h_lockf; + int ovcase; + + prev = &lock->lf_hfsnode->h_lockf; + while ((ovcase = hfs_findoverlap(lf, lock, OTHERS, &prev, &overlap))) { + /* + * We've found an overlap, see if it blocks us + */ + if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) + return (overlap); + /* + * Nope, point to the next one on the list and + * see if it blocks us + */ + lf = overlap->lf_next; + } + return (NOLOCKF); +} + +/* + * Walk the list of locks for an hfsnode to + * find an overlapping lock (if any). + * + * NOTE: this returns only the FIRST overlapping lock. There + * may be more than one. + */ +int +hfs_findoverlap(lf, lock, type, prev, overlap) + register struct hfslockf *lf; + struct hfslockf *lock; + int type; + struct hfslockf ***prev; + struct hfslockf **overlap; +{ + off_t start, end; + + *overlap = lf; + if (lf == NOLOCKF) + return (0); +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + hfs_lprint("hfs_findoverlap: looking for overlap in", lock); +#endif /* LOCKF_DEBUG */ + start = lock->lf_start; + end = lock->lf_end; + while (lf != NOLOCKF) { + if (((type & SELF) && lf->lf_id != lock->lf_id) || + ((type & OTHERS) && lf->lf_id == lock->lf_id)) { + *prev = &lf->lf_next; + *overlap = lf = lf->lf_next; + continue; + } +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + hfs_lprint("\tchecking", lf); +#endif /* LOCKF_DEBUG */ + /* + * OK, check for overlap + * + * Six cases: + * 0) no overlap + * 1) overlap == lock + * 2) overlap contains lock + * 3) lock contains overlap + * 4) overlap starts before lock + * 5) overlap ends after lock + */ + if ((lf->lf_end != -1 && start > lf->lf_end) || + (end != -1 && lf->lf_start > end)) { + /* Case 0 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("no overlap\n"); +#endif /* LOCKF_DEBUG */ + if ((type & SELF) && end != -1 && lf->lf_start > end) + return (0); + *prev = &lf->lf_next; + *overlap = lf = lf->lf_next; + continue; + } + if ((lf->lf_start == start) && (lf->lf_end == end)) { + /* Case 1 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap == lock\n"); +#endif /* LOCKF_DEBUG */ + return (1); + } + if ((lf->lf_start <= start) && + (end != -1) && + ((lf->lf_end >= end) || (lf->lf_end == -1))) { + /* Case 2 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap contains lock\n"); +#endif /* LOCKF_DEBUG */ + return (2); + } + if (start <= lf->lf_start && + (end == -1 || + (lf->lf_end != -1 && end >= lf->lf_end))) { + /* Case 3 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("lock contains overlap\n"); +#endif /* LOCKF_DEBUG */ + return (3); + } + if ((lf->lf_start < start) && + ((lf->lf_end >= start) || (lf->lf_end == -1))) { + /* Case 4 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap starts before lock\n"); +#endif /* LOCKF_DEBUG */ + return (4); + } + if ((lf->lf_start > start) && + (end != -1) && + ((lf->lf_end > end) || (lf->lf_end == -1))) { + /* Case 5 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap ends after lock\n"); +#endif /* LOCKF_DEBUG */ + return (5); + } + panic("hfs_findoverlap: default"); + } + return (0); +} + +/* + * Split a lock and a contained region into + * two or three locks as necessary. + */ +void +hfs_split(lock1, lock2) + register struct hfslockf *lock1; + register struct hfslockf *lock2; +{ + register struct hfslockf *splitlock; + +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) { + hfs_lprint("hfs_split", lock1); + hfs_lprint("splitting from", lock2); + } +#endif /* LOCKF_DEBUG */ + /* + * Check to see if spliting into only two pieces. + */ + if (lock1->lf_start == lock2->lf_start) { + lock1->lf_start = lock2->lf_end + 1; + lock2->lf_next = lock1; + return; + } + if (lock1->lf_end == lock2->lf_end) { + lock1->lf_end = lock2->lf_start - 1; + lock2->lf_next = lock1->lf_next; + lock1->lf_next = lock2; + return; + } + /* + * Make a new lock consisting of the last part of + * the encompassing lock + */ + MALLOC(splitlock, struct hfslockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); + bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); + splitlock->lf_start = lock2->lf_end + 1; + TAILQ_INIT(&splitlock->lf_blkhd); + lock1->lf_end = lock2->lf_start - 1; + /* + * OK, now link it in + */ + splitlock->lf_next = lock1->lf_next; + lock2->lf_next = splitlock; + lock1->lf_next = lock2; +} + +/* + * Wakeup a blocklist + */ +void +hfs_wakelock(listhead) + struct hfslockf *listhead; +{ + register struct hfslockf *wakelock; + + while ((wakelock = listhead->lf_blkhd.tqh_first)) { + TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); + wakelock->lf_next = NOLOCKF; +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + hfs_lprint("hfs_wakelock: awakening", wakelock); +#endif /* LOCKF_DEBUG */ + wakeup((caddr_t)wakelock); + } +} + +#ifdef LOCKF_DEBUG +/* + * Print out a lock. + */ +hfs_lprint(tag, lock) + char *tag; + register struct hfslockf *lock; +{ + + printf("%s: lock 0x%lx for ", tag, lock); + if (lock->lf_flags & F_POSIX) + printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); + else + printf("id 0x%x", lock->lf_id); + printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", + lock->lf_hfsnode->i_number, + major(lock->lf_hfsnode->h_dev), + minor(lock->lf_hfsnode->h_dev), + lock->lf_type == F_RDLCK ? "shared" : + lock->lf_type == F_WRLCK ? "exclusive" : + lock->lf_type == F_UNLCK ? "unlock" : + "unknown", lock->lf_start, lock->lf_end); + if (lock->lf_blkhd.tqh_first) + printf(" block 0x%x\n", lock->lf_blkhd.tqh_first); + else + printf("\n"); +} + +hfs_lprintlist(tag, lock) + char *tag; + struct hfslockf *lock; +{ + register struct hfslockf *lf, *blk; + + printf("%s: Lock list for ino %d on dev <%d, %d>:\n", + tag, lock->lf_hfsnode->i_number, + major(lock->lf_hfsnode->h_dev), + minor(lock->lf_hfsnode->h_dev)); + for (lf = lock->lf_hfsnode->h_lockf; lf; lf = lf->lf_next) { + printf("\tlock 0x%lx for ", lf); + if (lf->lf_flags & F_POSIX) + printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); + else + printf("id 0x%x", lf->lf_id); + printf(", %s, start %d, end %d", + lf->lf_type == F_RDLCK ? "shared" : + lf->lf_type == F_WRLCK ? "exclusive" : + lf->lf_type == F_UNLCK ? "unlock" : + "unknown", lf->lf_start, lf->lf_end); + for (blk = lf->lf_blkhd.tqh_first; blk; + blk = blk->lf_block.tqe_next) { + printf("\n\t\tlock request 0x%lx for ", blk); + if (blk->lf_flags & F_POSIX) + printf("proc %d", + ((struct proc *)(blk->lf_id))->p_pid); + else + printf("id 0x%x", blk->lf_id); + printf(", %s, start %d, end %d", + blk->lf_type == F_RDLCK ? "shared" : + blk->lf_type == F_WRLCK ? "exclusive" : + blk->lf_type == F_UNLCK ? "unlock" : + "unknown", blk->lf_start, blk->lf_end); + if (blk->lf_blkhd.tqh_first) + panic("hfs_lprintlist: bad list"); + } + printf("\n"); + } +} +#endif /* LOCKF_DEBUG */ diff --git a/bsd/hfs/hfs_lockf.h b/bsd/hfs/hfs_lockf.h new file mode 100644 index 000000000..b3ad07eac --- /dev/null +++ b/bsd/hfs/hfs_lockf.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* (c) 1997-1998 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Scooter Morris at Genentech Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * + * @(#)hfs_lockf.h 1.0 5/5/98 + * derived from @(#)lockf.h 8.2 (Berkeley) 10/26/94 + */ + +/* + * The hfslockf structure is a kernel structure which contains the information + * associated with a byte range lock. The hfslockf structures are linked into + * the inode structure. Locks are sorted by the starting byte of the lock for + * efficiency. + */ +TAILQ_HEAD(locklist, hfslockf); + +struct hfslockf { + short lf_flags; /* Semantics: F_POSIX, F_FLOCK, F_WAIT */ + short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ + off_t lf_start; /* Byte # of the start of the lock */ + off_t lf_end; /* Byte # of the end of the lock (-1=EOF) */ + caddr_t lf_id; /* Id of the resource holding the lock */ + struct hfsnode *lf_hfsnode; /* Back pointer to the inode */ + struct hfslockf *lf_next; /* Pointer to the next lock on this inode */ + struct locklist lf_blkhd; /* List of requests blocked on this lock */ + TAILQ_ENTRY(hfslockf) lf_block;/* A request waiting for a lock */ +}; + +/* Maximum length of sleep chains to traverse to try and detect deadlock. */ +#define MAXDEPTH 50 + +__BEGIN_DECLS +void hfs_addblock __P((struct hfslockf *, struct hfslockf *)); +int hfs_clearlock __P((struct hfslockf *)); +int hfs_findoverlap __P((struct hfslockf *, + struct hfslockf *, int, struct hfslockf ***, struct hfslockf **)); +struct hfslockf * + hfs_getblock __P((struct hfslockf *)); +int hfs_getlock __P((struct hfslockf *, struct flock *)); +int hfs_setlock __P((struct hfslockf *)); +void hfs_split __P((struct hfslockf *, struct hfslockf *)); +void hfs_wakelock __P((struct hfslockf *)); +__END_DECLS + +#ifdef LOCKF_DEBUG +extern int lockf_debug; + +__BEGIN_DECLS +void hfs_lprint __P((char *, struct hfslockf *)); +void hfs_lprintlist __P((char *, struct hfslockf *)); +__END_DECLS +#endif diff --git a/bsd/hfs/hfs_lookup.c b/bsd/hfs/hfs_lookup.c new file mode 100644 index 000000000..8ade827fc --- /dev/null +++ b/bsd/hfs/hfs_lookup.c @@ -0,0 +1,892 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hfs_lookup.c 1.0 + * derived from @(#)ufs_lookup.c 8.15 (Berkeley) 6/16/95 + * + * (c) 1998-1999 Apple Computer, Inc. All Rights Reserved + * (c) 1990, 1992 NeXT Computer, Inc. All Rights Reserved + * + * + * hfs_lookup.c -- code to handle directory traversal on HFS/HFS+ volume + * + * MODIFICATION HISTORY: + * 21-May-1999 Don Brady Add support for HFS rooting. + * 25-Feb-1999 Clark Warner Fixed the error case of VFS_VGGET when + * processing DotDot (..) to relock parent + * 23-Feb-1999 Pat Dirks Finish cleanup around Don's last fix in "." and ".." handling. + * 11-Nov-1998 Don Brady Take out VFS_VGET that got added as part of previous fix. + * 14-Oct-1998 Don Brady Fix locking policy volation in hfs_lookup for ".." case + * (radar #2279902). + * 4-Jun-1998 Pat Dirks Split off from hfs_vnodeops.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hfs.h" +#include "hfs_dbg.h" +#include "hfscommon/headers/FileMgrInternal.h" + +u_int16_t GetForkFromName(struct componentname *cnp); +int hfs_vget_sibling(struct vnode *vdp, u_int16_t forkType, struct vnode **vpp); +int hfs_vget_catinfo(struct vnode *parent_vp, struct hfsCatalogInfo *catInfo, u_int32_t forkType, struct vnode **target_vpp); + +/* + * XXX SER fork strings. + * Put these someplace better + */ +#define gHFSForkIdentStr "/" +#define gDataForkNameStr "data" +#define gRsrcForkNameStr "rsrc" + + +#if DBG_VOP_TEST_LOCKS +extern void DbgVopTest(int maxSlots, int retval, VopDbgStoreRec *VopDbgStore, char *funcname); +#endif + +/***************************************************************************** +* +* Operations on vnodes +* +*****************************************************************************/ + + +/* + * FROM FREEBSD 3.1 + * Convert a component of a pathname into a pointer to a locked hfsnode. + * This is a very central and rather complicated routine. + * If the file system is not maintained in a strict tree hierarchy, + * this can result in a deadlock situation (see comments in code below). + * + * The cnp->cn_nameiop argument is LOOKUP, CREATE, RENAME, or DELETE depending + * on whether the name is to be looked up, created, renamed, or deleted. + * When CREATE, RENAME, or DELETE is specified, information usable in + * creating, renaming, or deleting a directory entry may be calculated. + * Notice that these are the only operations that can affect the directory of the target. + * + * If flag has LOCKPARENT or'ed into it and the target of the pathname + * exists, lookup returns both the target and its parent directory locked. + * When creating or renaming and LOCKPARENT is specified, the target may + * not be ".". When deleting and LOCKPARENT is specified, the target may + * be "."., but the caller must check to ensure it does an vrele and vput + * instead of two vputs. + * + * LOCKPARENT and WANTPARENT actually refer to the parent of the last item, + * so if ISLASTCN is not set, they should be ignored. Also they are mutually exclusive, or + * WANTPARENT really implies DONTLOCKPARENT. Either of them set means that the calling + * routine wants to access the parent of the target, locked or unlocked. + * + * Keeping the parent locked as long as possible protects from other processes + * looking up the same item, so it has to be locked until the hfsnode is totally finished + * + * This routine is actually used as VOP_CACHEDLOOKUP method, and the + * filesystem employs the generic hfs_cache_lookup() as VOP_LOOKUP + * method. + * + * hfs_cache_lookup() performs the following for us: + * check that it is a directory + * check accessibility of directory + * check for modification attempts on read-only mounts + * if name found in cache + * if at end of path and deleting or creating + * drop it + * else + * return name. + * return VOP_CACHEDLOOKUP() + * + * Overall outline of hfs_lookup: + * + * handle simple cases of . and .. + * search for name in directory, to found or notfound + * notfound: + * if creating, return locked directory, leaving info on available slots + * else return error + * found: + * if at end of path and deleting, return information to allow delete + * if at end of path and rewriting (RENAME and LOCKPARENT), lock target + * inode and return info to allow rewrite + * if not at end, add name to cache; if at end and neither creating + * nor deleting, add name to cache + */ + +/* + * Lookup *nm in directory *pvp, return it in *a_vpp. + * **a_vpp is held on exit. + * We create a hfsnode for the file, but we do NOT open the file here. + +#% lookup dvp L ? ? +#% lookup vpp - L - + + IN struct vnode *dvp - Parent node of file; + INOUT struct vnode **vpp - node of target file, its a new node if the target vnode did not exist; + IN struct componentname *cnp - Name of file; + + * When should we lock parent_hp in here ?? + */ + +int +hfs_lookup(ap) + struct vop_cachedlookup_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + struct vnode *parent_vp; + struct vnode *target_vp; + struct vnode *tparent_vp; + struct hfsnode *parent_hp; /* parent */ + struct componentname *cnp; + struct ucred *cred; + struct proc *p; + struct hfsCatalogInfo catInfo; + u_int32_t parent_id; + u_int32_t nodeID; + u_int16_t targetLen; + u_int16_t forkType; + int flags; + int lockparent; /* !0 => lockparent flag is set */ + int wantparent; /* !0 => wantparent or lockparent flag */ + int nameiop; + int retval; + u_char isDot, isDotDot, found; + DBG_FUNC_NAME("lookup"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_POS); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\n")); + DBG_HFS_NODE_CHECK(ap->a_dvp); + + + /* + * Do initial setup + */ + INIT_CATALOGDATA(&catInfo.nodeData, 0); + parent_vp = ap->a_dvp; + cnp = ap->a_cnp; + parent_hp = VTOH(parent_vp); /* parent */ + target_vp = NULL; + targetLen = cnp->cn_namelen; + nameiop = cnp->cn_nameiop; + cred = cnp->cn_cred; + p = cnp->cn_proc; + lockparent = cnp->cn_flags & LOCKPARENT; + wantparent = cnp->cn_flags & (LOCKPARENT|WANTPARENT); + flags = cnp->cn_flags; + parent_id = H_FILEID(parent_hp); + nodeID = kUnknownID; + found = FALSE; + isDot = FALSE; + isDotDot = FALSE; + retval = E_NONE; + forkType = kUndefinedFork; + + + /* + * We now have a segment name to search for, and a directory to search. + * + */ + + /* + * First check to see if it is a . or .., else look it up. + */ + + if (flags & ISDOTDOT) { /* Wanting the parent */ + isDotDot = TRUE; + found = TRUE; /* .. is always defined */ + nodeID = H_DIRID(parent_hp); + } /* Wanting ourselves */ + else if ((cnp->cn_nameptr[0] == '.') && (targetLen == 1)) { + isDot = TRUE; + found = TRUE; /* We always know who we are */ + } + else { /* Wanting something else */ + catInfo.hint = kNoHint; + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VTOHFS(parent_vp), kHFSCatalogFileID, LK_SHARED, p); + if (retval) + goto Err_Exit; + + retval = hfs_getcatalog (VTOVCB(parent_vp), parent_id, cnp->cn_nameptr, targetLen, &catInfo); + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(parent_vp), kHFSCatalogFileID, LK_RELEASE, p); + + if (retval == E_NONE) + found = TRUE; + }; + + + /* + * At this point we know IF we have a valid dir/name. + */ + + + retval = E_NONE; + if (! found) { + /* + * This is a non-existing entry + * + * If creating, and at end of pathname and current + * directory has not been removed, then can consider + * allowing file to be created. + */ + if ((nameiop == CREATE || nameiop == RENAME || + (nameiop == DELETE && + (ap->a_cnp->cn_flags & DOWHITEOUT) && + (ap->a_cnp->cn_flags & ISWHITEOUT))) && + (flags & ISLASTCN)) { + /* + * Access for write is interpreted as allowing + * creation of files in the directory. + */ + retval = VOP_ACCESS(parent_vp, VWRITE, cred, cnp->cn_proc); + if (retval) + return (retval); + + cnp->cn_flags |= SAVENAME; + if (!lockparent) + VOP_UNLOCK(parent_vp, 0, p); + retval = EJUSTRETURN; + goto Err_Exit; + } + + /* + * Insert name into cache (as non-existent) if appropriate. + */ + + /* + * XXX SER - Here we would store the name in cache as non-existant if not trying to create it, but, + * the name cache IS case-sensitive, thus maybe showing a negative hit, when the name + * is only different by case. So hfs does not support negative caching. Something to look at. + * (See radar 2293594 for a failed example) + if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) + cache_enter(parent_vp, *vpp, cnp); + */ + + retval = ENOENT; + } + else { + /* + * We have found an entry + * + * Here we have to decide what type of vnode to create. + * There are 3 type of objects that are given: + * 1. '.': return the same dp + * 2. '..' return the parent of dp, always a VDIR + * 3. catinfo rec: return depending on type: + * A. VDIR, nodeType is kCatalogFolderNode + * B. VLINK nodeType is kCatalogFileNode, the mode is IFLNK (esp. if it is a link to a directory e.g. bar/link/foo) + * C. VREG, nodeType is kCatalogFileNode, forkType at this point is unknown + * To determine the forkType, we can use this algorithm (\0 in the strings mean the NULL character): + * a. forkType is kDataType iff ISLASTCN is set (as in the case of the default fork e.g. data/foo). + * b. forkType is kDataType iff ISLASTCN is not set and the namePtr is followed by "/?AppleHFSFork/data\0" + * c. forkType is kRsrcType iff ISLASTCN is not set and the namePtr is followed by "/?AppleHFSFork/rsrc\0" + * If the latter two are correct, then we 'consume' the remaining of the name buffer + * and set the cnp as appropriate. + * Anything else returns an retval + */ + + + /* + * If deleting, and at end of pathname, return + * parameters which can be used to remove file. + * If the wantparent flag isn't set, we return only + * the directory (in ndp->ndvp), otherwise we go + * on and lock the hfsnode, being careful with ".". + * + * Forks cannot be deleted so scan-ahead is illegal, so just return the default fork + */ + if (nameiop == DELETE && (flags & ISLASTCN)) { + /* + * Write access to directory required to delete files. + */ + retval = VOP_ACCESS(parent_vp, VWRITE, cred, cnp->cn_proc); + if (retval) + goto Err_Exit; + + if (isDot) { /* Want to return ourselves */ + VREF(parent_vp); + target_vp = parent_vp; + goto Err_Exit; + } + else if (isDotDot) { + retval = VFS_VGET(parent_vp->v_mount, &nodeID, &target_vp); + if (retval) + goto Err_Exit; + } + else { + retval = hfs_vget_catinfo(parent_vp, &catInfo, kAnyFork, &target_vp); + if (retval) + goto Err_Exit; + CLEAN_CATALOGDATA(&catInfo.nodeData); + }; + + + /* + * If directory is "sticky", then user must own + * the directory, or the file in it, else she + * may not delete it (unless she's root). This + * implements append-only directories. + */ + if ((parent_hp->h_meta->h_mode & ISVTX) && + (cred->cr_uid != 0) && + (cred->cr_uid != parent_hp->h_meta->h_uid) && + (target_vp->v_type != VLNK) && + (hfs_owner_rights(target_vp, cred, p, false))) { + vput(target_vp); + retval = EPERM; + goto Err_Exit; + } +#if HFS_HARDLINKS + /* + * If this is a link node then we need to save the name + * (of the link) so we can delete it from the catalog b-tree. + * In this case, hfs_remove will then free the component name. + */ + if (target_vp && (VTOH(target_vp)->h_meta->h_metaflags & IN_DATANODE)) + cnp->cn_flags |= SAVENAME; +#endif + + if (!lockparent) + VOP_UNLOCK(parent_vp, 0, p); + goto Err_Exit; + }; + + /* + * If rewriting 'RENAME', return the hfsnode and the + * information required to rewrite the present directory + */ + if (nameiop == RENAME && wantparent && (cnp->cn_flags & ISLASTCN)) { + + if ((retval = VOP_ACCESS(parent_vp, VWRITE, cred, cnp->cn_proc)) != 0) + goto Err_Exit; + + /* + * Careful about locking second inode. + * This can only occur if the target is ".". like 'mv foo/bar foo/.' + */ + if (isDot) { + retval = EISDIR; + goto Err_Exit; + } + else if (isDotDot) { + retval = VFS_VGET(parent_vp->v_mount, &nodeID, &target_vp); + if (retval) + goto Err_Exit; + } + else { + /* If then name differs in case, then act like it does not exist + * This allows renaming foo->Foo + * Exclude length difference due to compose/decompe issues. + */ + if ((cnp->cn_namelen == catInfo.nodeData.cnm_length) && + strncmp(cnp->cn_nameptr, catInfo.nodeData.cnm_nameptr, targetLen)) { + if (!lockparent) + VOP_UNLOCK(parent_vp, 0, p); + retval = EJUSTRETURN; + goto Err_Exit; + }; + + retval = hfs_vget_catinfo(parent_vp, &catInfo, kAnyFork, &target_vp); + if (retval) + goto Err_Exit; + + CLEAN_CATALOGDATA(&catInfo.nodeData); /* Should do nothing */ + }; + + cnp->cn_flags |= SAVENAME; + if (!lockparent) + VOP_UNLOCK(parent_vp, 0, p); + + goto Err_Exit; + /* Finished...all is well, goto the end */ + }; + + /* + * Step through the translation in the name. We do not `vput' the + * directory because we may need it again if a symbolic link + * is relative to the current directory. Instead we save it + * unlocked as "tparent_vp". We must get the target hfsnode before unlocking + * the directory to insure that the hfsnode will not be removed + * before we get it. We prevent deadlock by always fetching + * inodes from the root, moving down the directory tree. Thus + * when following backward pointers ".." we must unlock the + * parent directory before getting the requested directory. + * There is a potential race condition here if both the current + * and parent directories are removed before the VFS_VGET for the + * hfsnode associated with ".." returns. We hope that this occurs + * infrequently since we cannot avoid this race condition without + * implementing a sophisticated deadlock detection algorithm. + * Note also that this simple deadlock detection scheme will not + * work if the file system has any hard links other than ".." + * that point backwards in the directory structure. + */ + + tparent_vp = parent_vp; + if (isDotDot) { + VOP_UNLOCK(tparent_vp, 0, p); /* race to get the inode */ + if ((retval = VFS_VGET(parent_vp->v_mount, &nodeID, &target_vp))) { + vn_lock(tparent_vp, LK_EXCLUSIVE | LK_RETRY, p); + goto Err_Exit; + } + if (lockparent && (flags & ISLASTCN) && (tparent_vp != target_vp) && + (retval = vn_lock(tparent_vp, LK_EXCLUSIVE, p))) { + vput(target_vp); + goto Err_Exit; + } + } + else if (isDot) { + VREF(parent_vp); /* we want ourself, ie "." */ + target_vp = parent_vp; + } + else { + mode_t mode; + /* + * Determine what fork to get, currenty 3 scenarios are supported: + * 1. ./foo: if it is a dir, return a VDIR else return data fork + * 2. ./foo/.__Fork/data: return data fork + * 3. ./foo/.__Fork/rsrc: return resource fork + * So the algorithm is: + * If the object is a directory + * then return a VDIR vnode + * else if ISLASTCN is true + * then get the vnode with forkType=kDataFork + * else + * compare with the remaining cnp buffer with "/.__Fork/" + * if a match + * then compare string after that with either 'data' or 'rsrc' + * if match + * then + * 'consume' rest of cnp, setting appropriate values and flags + * return vnode depending on match + * else + * bad fork name + * else + * illegal path after a file object + */ + + mode = (mode_t)(catInfo.nodeData.cnd_mode); + + if (catInfo.nodeData.cnd_type == kCatalogFolderNode) { + forkType = kDirectory; /* Really ignored */ + } + else if ((mode & IFMT) == IFLNK) { + forkType = kDataFork; + } /* After this point, nodeType should be a file */ + else if (flags & ISLASTCN) { /* Create a default fork */ + forkType = kDataFork; + } + else { /* determine what fork was specified */ + forkType = GetForkFromName(cnp); + flags |= ISLASTCN; /* To know to unlock the parent if needed */ + }; /* else */ + + + /* If couldn't determine what type of fork, leave */ + if (forkType == kUndefinedFork) { + retval = EISDIR; + goto Err_Exit; + }; + + /* Get the vnode now that what type of fork is known */ + DBG_ASSERT((forkType==kDirectory) || (forkType==kDataFork) || (forkType==kRsrcFork)); + retval = hfs_vget_catinfo(tparent_vp, &catInfo, forkType, &target_vp); + if (retval != E_NONE) + goto Err_Exit; + + if (!lockparent || !(flags & ISLASTCN)) + VOP_UNLOCK(tparent_vp, 0, p); + + CLEAN_CATALOGDATA(&catInfo.nodeData); + + }; /* else found */ + + + /* + * Insert name in cache if wanted. + * Names with composed chars are not put into the name cache. + */ + if ((cnp->cn_flags & MAKEENTRY) + && (cnp->cn_namelen == catInfo.nodeData.cnm_length)) { + /* + * XXX SER - Might be good idea to bcopy(catInfo.nodeData.fsspec.name, cnp->cn_nameptr) + * to "normalize" the name cache. This will avoid polluting the name cache with + * names that are different in case, and allow negative caching + */ + cache_enter(parent_vp, target_vp, cnp); + } + + + + }; /* else found == TRUE */ + +Err_Exit: + + CLEAN_CATALOGDATA(&catInfo.nodeData); /* Just to make sure */ + *ap->a_vpp = target_vp; + + DBG_VOP_UPDATE_VP(1, *ap->a_vpp); + //DBG_VOP_LOOKUP_TEST (funcname, cnp, parent_vp, target_vp); + //DBG_VOP_LOCKS_TEST(E_NONE); + + return (retval); +} + + + +/* + * Based on vn_cache_lookup (which is vfs_cache_lookup in FreeBSD 3.1) + * + * Name caching works as follows: + * + * Names found by directory scans are retained in a cache + * for future reference. It is managed LRU, so frequently + * used names will hang around. Cache is indexed by hash value + * obtained from (vp, name) where vp refers to the directory + * containing name. + * + * If it is a "negative" entry, (i.e. for a name that is known NOT to + * exist) the vnode pointer will be NULL. + * + * Upon reaching the last segment of a path, if the reference + * is for DELETE, or NOCACHE is set (rewrite), and the + * name is located in the cache, it will be dropped. + * + * In hfs, since a name can represent multiple forks, it cannot + * be known what fork the name matches, so further checks have to be done. + * Currently a policy of first requested, is the one stored, is followed. + * + * SER XXX If this proves inadequate maybe we can munge the name to contain a fork reference + * like foo -> foo.d for the data fork. + */ + +int +hfs_cache_lookup(ap) + struct vop_lookup_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + struct vnode *vdp; + struct vnode *pdp; + int lockparent; + int error; + struct vnode **vpp = ap->a_vpp; + struct componentname *cnp = ap->a_cnp; + struct ucred *cred = cnp->cn_cred; + int flags = cnp->cn_flags; + struct proc *p = cnp->cn_proc; + struct hfsnode *hp; + u_int32_t vpid; /* capability number of vnode */ + DBG_FUNC_NAME("cache_lookup"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_POS); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\n")); + DBG_VOP_CONT(("\tTarget: "));DBG_VOP_PRINT_CPN_INFO(ap->a_cnp);DBG_VOP_CONT(("\n")); + DBG_HFS_NODE_CHECK(ap->a_dvp); + + *vpp = NULL; + vdp = ap->a_dvp; + lockparent = flags & LOCKPARENT; + + if (vdp->v_type != VDIR) + return (ENOTDIR); + + if ((flags & ISLASTCN) && (vdp->v_mount->mnt_flag & MNT_RDONLY) && + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) + return (EROFS); + + error = VOP_ACCESS(vdp, VEXEC, cred, cnp->cn_proc); + + if (error) + return (error); + + /* + * Lookup an entry in the cache + * If the lookup succeeds, the vnode is returned in *vpp, and a status of -1 is + * returned. If the lookup determines that the name does not exist + * (negative cacheing), a status of ENOENT is returned. If the lookup + * fails, a status of zero is returned. + */ + error = cache_lookup(vdp, vpp, cnp); + + if (error == 0) { /* Unsuccessfull */ + DBG_VOP(("\tWas not in name cache\n")); + error = hfs_lookup(ap); +#if HFS_HARDLINKS + if (error) + return (error); + /* + * If this is a hard-link vnode then we need to update + * the name (of the link) and update the parent ID. This + * enables getattrlist calls to return correct link info. + */ + hp = VTOH(*ap->a_vpp); + if ((flags & ISLASTCN) && (hp->h_meta->h_metaflags & IN_DATANODE)) { + H_DIRID(hp) = H_FILEID(VTOH(ap->a_dvp)); + hfs_set_metaname(cnp->cn_nameptr, hp->h_meta, HTOHFS(hp)); + } +#endif + return (error); + }; + + DBG_VOP(("\tName was found in the name cache")); + if (error == ENOENT) { + DBG_VOP_CONT((" though it was a NEGATIVE HIT\n")); + return (error); + }; + DBG_VOP_CONT(("\n")); + +#if HFS_HARDLINKS + /* + * If this is a hard-link vnode then we need to update + * the name (of the link) and update the parent ID. This + * enables getattrlist calls to return correct link info. + */ + hp = VTOH(*vpp); + if ((flags & ISLASTCN) && (hp->h_meta->h_metaflags & IN_DATANODE)) { + H_DIRID(hp) = H_FILEID(VTOH(vdp)); + hfs_set_metaname(cnp->cn_nameptr, hp->h_meta, HTOHFS(hp)); + } +#endif + + /* We have a name that matched */ + pdp = vdp; + vdp = *vpp; + vpid = vdp->v_id; + if (pdp == vdp) { /* lookup on "." */ + VREF(vdp); + error = 0; + } else if (flags & ISDOTDOT) { + /* + * Carefull on the locking policy, + * remember we always lock from parent to child, so have + * to release lock on child before trying to lock parent + * then regain lock if needed + */ + VOP_UNLOCK(pdp, 0, p); + error = vget(vdp, LK_EXCLUSIVE, p); + if (!error && lockparent && (flags & ISLASTCN)) + error = vn_lock(pdp, LK_EXCLUSIVE, p); + } else { + /* + * Check to see if a specific fork is not being requested. + * + * If it is a file and not the last path item + * then check if its a proper fork + * If it is, check to see if the matched vnode is the same fork + * else see if the proper fork exists. + * If it does, return that one, else do VOP_CACHEDLOOKUP() + * Notice that nothing is done if an undefined fork is named. Just leave and let lookup() + * handle strange cases. + * + * XXX SER Notice that when the target is not what was in the name cache, + * it is locked, before trying to get its sibling. Could this be a problem since both + * siblings can be locked, but not in a determinalistic order???? + */ + u_int16_t forkType; + + error = vget(vdp, LK_EXCLUSIVE, p); + if ((! error) && (vdp->v_type == VREG) && (vpid == vdp->v_id)) { + if (!(flags & ISLASTCN)) { + forkType = GetForkFromName(cnp); + if (forkType != kUndefinedFork) { + flags |= ISLASTCN; + if (H_FORKTYPE(VTOH(vdp)) != forkType) { + error = hfs_vget_sibling(vdp, forkType, vpp); + vput(vdp); + if (! error) { + vdp = *vpp; + vpid = vdp->v_id; + } + } + } + } + else { + /* Its the last item, so we want the data fork */ + if (H_FORKTYPE(VTOH(vdp)) != kDataFork) { + error = hfs_vget_sibling(vdp, kDataFork, vpp); + vput(vdp); + if (! error) { + vdp = *vpp; + vpid = vdp->v_id; + } + } + }; + }; + if (!lockparent || error || !(flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + }; + /* + * Check that the capability number did not change + * while we were waiting for the lock. + */ + if (!error) { + if (vpid == vdp->v_id) + return (0); /* HERE IS THE NORMAL EXIT FOR CACHE LOOKUP!!!! */ + /* + * The above is the NORMAL exit, after this point is an error + * condition. + */ + vput(vdp); + if (lockparent && pdp != vdp && (flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + } + error = vn_lock(pdp, LK_EXCLUSIVE, p); + if (error) + return (error); + return (hfs_lookup(ap)); +} + +/* + * Parses a componentname and sees if the remaining path + * contains a hfs named fork specifier. If it does set the + * componentname to consume the rest of the path, and + * return the forkType + */ + +u_int16_t GetForkFromName(struct componentname *cnp) +{ + u_int16_t forkType = kUndefinedFork; + char *tcp = cnp->cn_nameptr + cnp->cn_namelen; + + if (bcmp(tcp, _PATH_FORKSPECIFIER, sizeof(_PATH_FORKSPECIFIER) - 1) == 0) { + /* Its a HFS fork, so far */ + tcp += (sizeof(_PATH_FORKSPECIFIER) - 1); + if (bcmp(tcp, _PATH_DATANAME, sizeof(_PATH_DATANAME)) == 0) { + forkType = kDataFork; + cnp->cn_consume = sizeof(_PATH_FORKSPECIFIER) + sizeof(_PATH_DATANAME) - 2; + } + else if (bcmp(tcp, _PATH_RSRCNAME, sizeof(_PATH_RSRCNAME)) == 0) { + forkType = kRsrcFork; + cnp->cn_consume = sizeof(_PATH_FORKSPECIFIER) + sizeof(_PATH_RSRCNAME) - 2; + }; /* else if */ + }; /* if bcmp */ + + + /* XXX SER For backwards compatability...keep it */ + if (forkType == kUndefinedFork) { + tcp = cnp->cn_nameptr + cnp->cn_namelen; + if (bcmp(tcp, gHFSForkIdentStr, sizeof(gHFSForkIdentStr) - 1) == 0) { + /* Its a HFS fork, so far */ + tcp += (sizeof(gHFSForkIdentStr) - 1); + if (bcmp(tcp, gDataForkNameStr, sizeof(gDataForkNameStr)) == 0) { + forkType = kDataFork; + cnp->cn_consume = sizeof(gHFSForkIdentStr) + sizeof(gDataForkNameStr) - 2; + } + else if (bcmp(tcp, gRsrcForkNameStr, sizeof(gRsrcForkNameStr)) == 0) { + forkType = kRsrcFork; + cnp->cn_consume = sizeof(gHFSForkIdentStr) + sizeof(gRsrcForkNameStr) - 2; + }; /* else if */ + }; /* if bcmp */ + }; + + return forkType; +} + +#if DBG_VOP_TEST_LOCKS + +void DbgLookupTest( char *funcname, struct componentname *cnp, struct vnode *dvp, struct vnode *vp) +{ + if (! (hfs_dbg_lookup || hfs_dbg_all)) + return; + + + if (dvp) { + if (lockstatus(&VTOH(dvp)->h_lock)) { + DBG_LOOKUP (("%s: Parent vnode exited LOCKED", funcname)); + } + else { + DBG_LOOKUP (("%s: Parent vnode exited UNLOCKED", funcname)); + } + } + + if (vp) { + if (vp==dvp) + { + DBG_LOOKUP (("%s: Target and Parent are the same", funcname)); + } + else { + if (lockstatus(&VTOH(vp)->h_lock)) { + DBG_LOOKUP (("%s: Found vnode exited LOCKED", funcname)); + } + else { + DBG_LOOKUP (("%s: Found vnode exited LOCKED", funcname)); + } + } + DBG_LOOKUP (("%s: Found vnode 0x%x has vtype of %d\n ", funcname, (u_int)vp, vp->v_type)); + } + else + DBG_LOOKUP (("%s: Found vnode exited NULL\n", funcname)); + + +} + +#endif /* DBG_VOP_TEST_LOCKS */ + diff --git a/bsd/hfs/hfs_macos_defs.h b/bsd/hfs/hfs_macos_defs.h new file mode 100644 index 000000000..73aa2ddcd --- /dev/null +++ b/bsd/hfs/hfs_macos_defs.h @@ -0,0 +1,1658 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: hfs_macos_types.h + + Contains: Basic Macintosh OS data types. + + Version: System 7.5 + + DRI: Nick Kledzik + + History: + 12-Aug-1999 Scott Roberts Created from ConditionalMacros.h, MacOSStubs.h, MacOSTypes.h + + + +*/ + + +#ifndef __hfs_macos_types__ +#define __hfs_macos_types__ + + +#include + #ifdef KERNEL + #include + #include + #endif + +#include +#include +#include +#include + +/****** START OF CONDITIONALMACROS *********/ + + #if defined(__ppc__) || defined(powerpc) || defined(ppc) + #define TARGET_CPU_PPC 1 + #define TARGET_CPU_68K 0 + #define TARGET_CPU_X86 0 + #define TARGET_CPU_MIPS 0 + #define TARGET_CPU_SPARC 0 + #define TARGET_CPU_ALPHA 0 + #define TARGET_RT_MAC_CFM 0 + #define TARGET_RT_MAC_MACHO 1 + #define TARGET_RT_MAC_68881 0 + #define TARGET_RT_LITTLE_ENDIAN 0 + #define TARGET_RT_BIG_ENDIAN 1 + #elif defined(m68k) + #define TARGET_CPU_PPC 0 + #define TARGET_CPU_68K 1 + #define TARGET_CPU_X86 0 + #define TARGET_CPU_MIPS 0 + #define TARGET_CPU_SPARC 0 + #define TARGET_CPU_ALPHA 0 + #define TARGET_RT_MAC_CFM 0 + #define TARGET_RT_MAC_MACHO 1 + #define TARGET_RT_MAC_68881 0 + #define TARGET_RT_LITTLE_ENDIAN 0 + #define TARGET_RT_BIG_ENDIAN 1 + #elif defined(sparc) + #define TARGET_CPU_PPC 0 + #define TARGET_CPU_68K 0 + #define TARGET_CPU_X86 0 + #define TARGET_CPU_MIPS 0 + #define TARGET_CPU_SPARC 1 + #define TARGET_CPU_ALPHA 0 + #define TARGET_RT_MAC_CFM 0 + #define TARGET_RT_MAC_MACHO 1 + #define TARGET_RT_MAC_68881 0 + #define TARGET_RT_LITTLE_ENDIAN 0 + #define TARGET_RT_BIG_ENDIAN 1 + #elif defined(__i386__) || defined(i386) || defined(intel) + #define TARGET_CPU_PPC 0 + #define TARGET_CPU_68K 0 + #define TARGET_CPU_X86 1 + #define TARGET_CPU_MIPS 0 + #define TARGET_CPU_SPARC 0 + #define TARGET_CPU_ALPHA 0 + #define TARGET_RT_MAC_CFM 0 + #define TARGET_RT_MAC_MACHO 1 + #define TARGET_RT_MAC_68881 0 + #define TARGET_RT_LITTLE_ENDIAN 1 + #define TARGET_RT_BIG_ENDIAN 0 + #else + #error unrecognized GNU C compiler + #endif + + + #define TARGET_OS_MAC 0 + #define TARGET_OS_WIN32 0 + #define TARGET_OS_UNIX 0 + + #define PRAGMA_IMPORT 0 + #define PRAGMA_STRUCT_ALIGN 1 + #define PRAGMA_ONCE 0 + #define PRAGMA_STRUCT_PACK 0 + #define PRAGMA_STRUCT_PACKPUSH 0 + #define PRAGMA_ENUM_PACK 0 + #define PRAGMA_ENUM_ALWAYSINT 0 + #define PRAGMA_ENUM_OPTIONS 0 + #define FOUR_CHAR_CODE(x) (x) + + #define TYPE_EXTENDED 0 + #if __GNUC__ >= 2 + #define TYPE_LONGLONG 1 + #else + #define TYPE_LONGLONG 0 + #endif + #ifdef __cplusplus + #define TYPE_BOOL 1 + #else + #define TYPE_BOOL 0 + #endif + + #define FUNCTION_PASCAL 0 + #define FUNCTION_DECLSPEC 0 + #define FUNCTION_WIN32CC 0 + + + #define EXTERN_API(_type) extern _type + #define EXTERN_API_C(_type) extern _type + #define EXTERN_API_STDCALL(_type) extern _type + #define EXTERN_API_C_STDCALL(_type) extern _type + + #define DEFINE_API(_type) _type + #define DEFINE_API_C(_type) _type + #define DEFINE_API_STDCALL(_type) _type + #define DEFINE_API_C_STDCALL(_type) _type + + #define CALLBACK_API(_type, _name) _type ( * _name) + #define CALLBACK_API_C(_type, _name) _type ( * _name) + #define CALLBACK_API_STDCALL(_type, _name) _type ( * _name) + #define CALLBACK_API_C_STDCALL(_type, _name) _type ( * _name) + + #define TARGET_API_MACOS_X 1 + #define TARGET_API_MAC_OS8 0 + #define TARGET_API_MAC_CARBON 0 + + #define ONEWORDINLINE(w1) + #define TWOWORDINLINE(w1,w2) + #define THREEWORDINLINE(w1,w2,w3) + #define FOURWORDINLINE(w1,w2,w3,w4) + #define FIVEWORDINLINE(w1,w2,w3,w4,w5) + #define SIXWORDINLINE(w1,w2,w3,w4,w5,w6) + #define SEVENWORDINLINE(w1,w2,w3,w4,w5,w6,w7) + #define EIGHTWORDINLINE(w1,w2,w3,w4,w5,w6,w7,w8) + #define NINEWORDINLINE(w1,w2,w3,w4,w5,w6,w7,w8,w9) + #define TENWORDINLINE(w1,w2,w3,w4,w5,w6,w7,w8,w9,w10) + #define ELEVENWORDINLINE(w1,w2,w3,w4,w5,w6,w7,w8,w9,w10,w11) + #define TWELVEWORDINLINE(w1,w2,w3,w4,w5,w6,w7,w8,w9,w10,w11,w12) + + +/****** START OF MACOSTYPES *********/ + + +/* + 4.4BSD's sys/types.h defines size_t without defining __size_t__: + Things are a lot clearer from here on if we define __size_t__ now. + */ +#define __size_t__ + +/* + Convert kernel's diagnostic flag to MacOS's +*/ +#if HFS_DIAGNOSTIC + #define DEBUG_BUILD 1 +#else + #define DEBUG_BUILD 0 +#endif /* DIAGNOSTIC */ + +/******************************************************************************** + + Special values in C + + NULL The C standard for an impossible pointer value + nil A carry over from pascal, NULL is prefered for C + +*********************************************************************************/ +#ifndef NULL + #define NULL 0 +#endif + +#ifndef nil + #define nil NULL +#endif + + +/******************************************************************************** + + Base integer types for all target OS's and CPU's + + UInt8 8-bit unsigned integer + SInt8 8-bit signed integer + UInt16 16-bit unsigned integer + SInt16 16-bit signed integer + UInt32 32-bit unsigned integer + SInt32 32-bit signed integer + UInt64 64-bit unsigned integer + SInt64 64-bit signed integer + +*********************************************************************************/ +typedef u_int8_t UInt8; +typedef int8_t SInt8; +typedef u_int16_t UInt16; +typedef int16_t SInt16; +typedef u_int32_t UInt32; +typedef int32_t SInt32; +typedef u_int64_t UInt64; +typedef int64_t SInt64; + + + +/******************************************************************************** + + Base floating point types + + Float32 32 bit IEEE float: 1 sign bit, 8 exponent bits, 23 fraction bits + Float64 64 bit IEEE float: 1 sign bit, 11 exponent bits, 52 fraction bits + Float80 80 bit MacOS float: 1 sign bit, 15 exponent bits, 1 integer bit, 63 fraction bits + Float96 96 bit 68881 float: 1 sign bit, 15 exponent bits, 16 pad bits, 1 integer bit, 63 fraction bits + + Note: These are fixed size floating point types, useful when writing a floating + point value to disk. If your compiler does not support a particular size + float, a struct is used instead. + Use of of the NCEG types (e.g. double_t) or an ANSI C type (e.g. double) if + you want a floating point representation that is natural for any given + compiler, but might be a different size on different compilers. + +*********************************************************************************/ +typedef float Float32; +typedef double Float64; + +struct Float80 { + SInt16 exp; + UInt16 man[4]; +}; +typedef struct Float80 Float80; + +struct Float96 { + SInt16 exp[2]; /* the second 16-bits is always zero */ + UInt16 man[4]; +}; +typedef struct Float96 Float96; + + + +/******************************************************************************** + + MacOS Memory Manager types + + Ptr Pointer to a non-relocatable block + Handle Pointer to a master pointer to a relocatable block + Size The number of bytes in a block (signed for historical reasons) + +*********************************************************************************/ +typedef char * Ptr; +typedef Ptr * Handle; +typedef long Size; +/******************************************************************************** + + Higher level basic types + + OSErr 16-bit result error code + OSStatus 32-bit result error code + LogicalAddress Address in the clients virtual address space + ConstLogicalAddress Address in the clients virtual address space that will only be read + PhysicalAddress Real address as used on the hardware bus + BytePtr Pointer to an array of bytes + ByteCount The size of an array of bytes + ByteOffset An offset into an array of bytes + ItemCount 32-bit iteration count + OptionBits Standard 32-bit set of bit flags + PBVersion ? + Duration 32-bit millisecond timer for drivers + AbsoluteTime 64-bit clock + ScriptCode The coarse features of a written language (e.g. Roman vs Cyrillic) + LangCode A particular language (e.g. English) + RegionCode A variation of a language (British vs American English) + FourCharCode A 32-bit value made by packing four 1 byte characters together + OSType A FourCharCode used in the OS and file system (e.g. creator) + ResType A FourCharCode used to tag resources (e.g. 'DLOG') + +*********************************************************************************/ +typedef SInt16 OSErr; +typedef SInt32 OSStatus; +typedef void * LogicalAddress; +typedef const void * ConstLogicalAddress; +typedef void * PhysicalAddress; +typedef UInt8 * BytePtr; +typedef UInt32 ByteCount; +typedef UInt32 ByteOffset; +typedef SInt32 Duration; +typedef UInt64 AbsoluteTime; +typedef UInt32 OptionBits; +typedef UInt32 ItemCount; +typedef UInt32 PBVersion; +typedef SInt16 ScriptCode; +typedef SInt16 LangCode; +typedef SInt16 RegionCode; +typedef unsigned long FourCharCode; +typedef FourCharCode OSType; +typedef FourCharCode ResType; +typedef OSType * OSTypePtr; +typedef ResType * ResTypePtr; + + +/******************************************************************************** + + Boolean types and values + + Boolean A one byte value, holds "false" (0) or "true" (1) + false The Boolean value of zero (0) + true The Boolean value of one (1) + +*********************************************************************************/ +/* + The identifiers "true" and "false" are becoming keywords in C++ + and work with the new built-in type "bool" + "Boolean" will remain an unsigned char for compatibility with source + code written before "bool" existed. +*/ +#if !TYPE_BOOL + +enum { + false = 0, + true = 1 +}; + +#endif /* !TYPE_BOOL */ + +typedef unsigned char Boolean; + + +/******************************************************************************** + + Function Pointer Types + + ProcPtr Generic pointer to a function + Register68kProcPtr Pointer to a 68K function that expects parameters in registers + UniversalProcPtr Pointer to classic 68K code or a RoutineDescriptor + + ProcHandle Pointer to a ProcPtr + UniversalProcHandle Pointer to a UniversalProcPtr + +*********************************************************************************/ +typedef long (*ProcPtr)(); +typedef void (*Register68kProcPtr)(); + +typedef ProcPtr UniversalProcPtr; + +typedef ProcPtr * ProcHandle; +typedef UniversalProcPtr * UniversalProcHandle; + +/******************************************************************************** + + Quickdraw Types + + Point 2D Quickdraw coordinate, range: -32K to +32K + Rect Rectangluar Quickdraw area + Style Quickdraw font rendering styles + StyleParameter Style when used as a parameter (historical 68K convention) + StyleField Style when used as a field (historical 68K convention) + CharParameter Char when used as a parameter (historical 68K convention) + + Note: The original Macintosh toolbox in 68K Pascal defined Style as a SET. + Both Style and CHAR occupy 8-bits in packed records or 16-bits when + used as fields in non-packed records or as parameters. + +*********************************************************************************/ +struct Point { + short v; + short h; +}; +typedef struct Point Point; + +typedef Point * PointPtr; +struct Rect { + short top; + short left; + short bottom; + short right; +}; +typedef struct Rect Rect; + +typedef Rect * RectPtr; +typedef short CharParameter; + +enum { + normal = 0, + bold = 1, + italic = 2, + underline = 4, + outline = 8, + shadow = 0x10, + condense = 0x20, + extend = 0x40 +}; + +typedef unsigned char Style; +typedef short StyleParameter; +typedef Style StyleField; + + +/******************************************************************************** + + Common Constants + + noErr OSErr: function performed properly - no error + kNilOptions OptionBits: all flags false + kInvalidID KernelID: NULL is for pointers as kInvalidID is for ID's + kVariableLengthArray array bounds: variable length array + + Note: kVariableLengthArray is used in array bounds to specify a variable length array. + It is ususally used in variable length structs when the last field is an array + of any size. Before ANSI C, we used zero as the bounds of variable length + array, but zero length array are illegal in ANSI C. Example usage: + + struct FooList + { + short listLength; + Foo elements[kVariableLengthArray]; + }; + +*********************************************************************************/ + +enum { + noErr = 0 +}; + + +enum { + kNilOptions = 0 +}; + +#define kInvalidID 0 + +enum { + kVariableLengthArray = 1 +}; + + + +/******************************************************************************** + + String Types + + UniChar A single UniCode character (16-bits) + + StrNNN Pascal string holding up to NNN bytes + StringPtr Pointer to a pascal string + StringHandle Pointer to a StringPtr + ConstStrNNNParam For function parameters only - means string is const + + CStringPtr Pointer to a C string (same as: char*) + ConstCStringPtr Pointer to a const C string (same as: const char*) + + Note: The length of a pascal string is stored in the first byte. + A pascal string does not have a termination byte and can be at most 255 bytes long. + The first character in a pascal string is offset one byte from the start of the string. + + A C string is terminated with a byte of value zero. + A C string has no length limitation. + The first character in a C string is the first byte of the string. + + +*********************************************************************************/ +typedef UInt16 UniChar; +typedef unsigned char Str255[256]; +typedef unsigned char Str63[64]; +typedef unsigned char Str32[33]; +typedef unsigned char Str31[32]; +typedef unsigned char Str27[28]; +typedef unsigned char Str15[16]; +/* + The type Str32 is used in many AppleTalk based data structures. + It holds up to 32 one byte chars. The problem is that with the + length byte it is 33 bytes long. This can cause weird alignment + problems in structures. To fix this the type "Str32Field" has + been created. It should only be used to hold 32 chars, but + it is 34 bytes long so that there are no alignment problems. +*/ +typedef unsigned char Str32Field[34]; +typedef unsigned char * StringPtr; +typedef StringPtr * StringHandle; +typedef const unsigned char * ConstStr255Param; +typedef const unsigned char * ConstStr63Param; +typedef const unsigned char * ConstStr32Param; +typedef const unsigned char * ConstStr31Param; +typedef const unsigned char * ConstStr27Param; +typedef const unsigned char * ConstStr15Param; +#ifdef __cplusplus +inline unsigned char StrLength(ConstStr255Param string) { return (*string); } +#else +#define StrLength(string) (*(unsigned char *)(string)) +#endif /* defined(__cplusplus) */ + +typedef const unsigned char * ConstUTF8Param; + +/********************************************************************************* + + Old names for types + +*********************************************************************************/ +typedef UInt8 Byte; +typedef SInt8 SignedByte; +typedef SInt64 * WidePtr; +typedef UInt64 * UnsignedWidePtr; +typedef Float80 extended80; +typedef Float96 extended96; +typedef SInt8 VHSelect; + + +EXTERN_API( void ) +DebugStr (ConstStr255Param debuggerMsg); + +/********************************************************************************* + + Added types for HFSPlus MacOS X functionality. Needs to be incorporated to + other places + +*********************************************************************************/ + + typedef struct vnode* FileReference; + #define kNoFileReference NULL + + +#define HFSInstrumentation 0 + + +/***** START OF MACOSSTUBS ********/ + + +/* + SizeTDef.h -- Common definitions + + size_t - this type is defined by several ANSI headers. +*/ +#if ! defined (__size_t__) + #define __size_t__ + #if defined (__xlc) || defined (__xlC) || defined (__xlC__) || defined (__MWERKS__) + typedef unsigned long size_t; + #else /* __xlC */ + typedef unsigned int size_t; + #endif /* __xlC */ +#endif /* __size_t__ */ + + +/* + StdDef.h -- Common definitions + +*/ + +#define offsetof(structure,field) ((size_t)&((structure *) 0)->field) + + + +/* + File: Errors.h + +*/ +enum { + paramErr = -50, /*error in user parameter list*/ + noHardwareErr = -200, /*Sound Manager Error Returns*/ + notEnoughHardwareErr = -201, /*Sound Manager Error Returns*/ + userCanceledErr = -128, + qErr = -1, /*queue element not found during deletion*/ + vTypErr = -2, /*invalid queue element*/ + corErr = -3, /*core routine number out of range*/ + unimpErr = -4, /*unimplemented core routine*/ + SlpTypeErr = -5, /*invalid queue element*/ + seNoDB = -8, /*no debugger installed to handle debugger command*/ + controlErr = -17, /*I/O System Errors*/ + statusErr = -18, /*I/O System Errors*/ + readErr = -19, /*I/O System Errors*/ + writErr = -20, /*I/O System Errors*/ + badUnitErr = -21, /*I/O System Errors*/ + unitEmptyErr = -22, /*I/O System Errors*/ + openErr = -23, /*I/O System Errors*/ + closErr = -24, /*I/O System Errors*/ + dRemovErr = -25, /*tried to remove an open driver*/ + dInstErr = -26 /*DrvrInstall couldn't find driver in resources*/ +}; + +enum { /* Printing Errors */ + iMemFullErr = -108, + iIOAbort = -27, /*Scrap Manager errors*/ + noScrapErr = -100, /*No scrap exists error*/ + noTypeErr = -102, /*No object of that type in scrap*/ + memROZWarn = -99, /*soft error in ROZ*/ + memROZError = -99, /*hard error in ROZ*/ + memROZErr = -99, /*hard error in ROZ*/ + memFullErr = -108, /*Not enough room in heap zone*/ + nilHandleErr = -109, /*Master Pointer was NIL in HandleZone or other*/ + memWZErr = -111, /*WhichZone failed (applied to free block)*/ + memPurErr = -112, /*trying to purge a locked or non-purgeable block*/ + memAdrErr = -110 /*address was odd; or out of range*/ +}; + + + +enum { + abortErr = -27, /*IO call aborted by KillIO*/ + iIOAbortErr = -27, /*IO abort error (Printing Manager)*/ + notOpenErr = -28, /*Couldn't rd/wr/ctl/sts cause driver not opened*/ + unitTblFullErr = -29, /*unit table has no more entries*/ + dceExtErr = -30, /*dce extension error*/ + slotNumErr = -360, /*invalid slot # error*/ + gcrOnMFMErr = -400, /*gcr format on high density media error*/ + dirFulErr = -33, /*Directory full*/ + dskFulErr = -34, /*disk full*/ + nsvErr = -35, /*no such volume*/ + ioErr = -36, /*I/O error (bummers)*/ + bdNamErr = -37, /*there may be no bad names in the final system!*/ + fnOpnErr = -38, /*File not open*/ + eofErr = -39, /*End of file*/ + posErr = -40, /*tried to position to before start of file (r/w)*/ + mFulErr = -41, /*memory full (open) or file won't fit (load)*/ + tmfoErr = -42, /*too many files open*/ + fnfErr = -43, /*File not found*/ + wPrErr = -44, /*diskette is write protected.*/ + fLckdErr = -45 /*file is locked*/ +}; + + +enum { + vLckdErr = -46, /*volume is locked*/ + fBsyErr = -47, /*File is busy (delete)*/ + dupFNErr = -48, /*duplicate filename (rename)*/ + opWrErr = -49, /*file already open with with write permission*/ + rfNumErr = -51, /*refnum error*/ + gfpErr = -52, /*get file position error*/ + volOffLinErr = -53, /*volume not on line error (was Ejected)*/ + permErr = -54, /*permissions error (on file open)*/ + volOnLinErr = -55, /*drive volume already on-line at MountVol*/ + nsDrvErr = -56, /*no such drive (tried to mount a bad drive num)*/ + noMacDskErr = -57, /*not a mac diskette (sig bytes are wrong)*/ + extFSErr = -58, /*volume in question belongs to an external fs*/ + fsRnErr = -59, /*file system internal error:during rename the old entry was deleted but could not be restored.*/ + badMDBErr = -60, /*bad master directory block*/ + wrPermErr = -61, /*write permissions error*/ + dirNFErr = -120, /*Directory not found*/ + tmwdoErr = -121, /*No free WDCB available*/ + badMovErr = -122, /*Move into offspring error*/ + wrgVolTypErr = -123, /*Wrong volume type error [operation not supported for MFS]*/ + volGoneErr = -124 /*Server volume has been disconnected.*/ +}; + +enum { + /*Dictionary Manager errors*/ + notBTree = -410, /*The file is not a dictionary.*/ + btNoSpace = -413, /*Can't allocate disk space.*/ + btDupRecErr = -414, /*Record already exists.*/ + btRecNotFnd = -415, /*Record cannot be found.*/ + btKeyLenErr = -416, /*Maximum key length is too long or equal to zero.*/ + btKeyAttrErr = -417, /*There is no such a key attribute.*/ + unknownInsertModeErr = -20000, /*There is no such an insert mode.*/ + recordDataTooBigErr = -20001, /*The record data is bigger than buffer size (1024 bytes).*/ + invalidIndexErr = -20002 /*The recordIndex parameter is not valid.*/ +}; + + +enum { + fidNotFound = -1300, /*no file thread exists.*/ + fidExists = -1301, /*file id already exists*/ + notAFileErr = -1302, /*directory specified*/ + diffVolErr = -1303, /*files on different volumes*/ + catChangedErr = -1304, /*the catalog has been modified*/ + desktopDamagedErr = -1305, /*desktop database files are corrupted*/ + sameFileErr = -1306, /*can't exchange a file with itself*/ + badFidErr = -1307, /*file id is dangling or doesn't match with the file number*/ + notARemountErr = -1308, /*when _Mount allows only remounts and doesn't get one*/ + fileBoundsErr = -1309, /*file's EOF, offset, mark or size is too big*/ + fsDataTooBigErr = -1310, /*file or volume is too big for system*/ + volVMBusyErr = -1311, /*can't eject because volume is in use by VM*/ + envNotPresent = -5500, /*returned by glue.*/ + envBadVers = -5501, /*Version non-positive*/ + envVersTooBig = -5502, /*Version bigger than call can handle*/ + fontDecError = -64, /*error during font declaration*/ + fontNotDeclared = -65, /*font not declared*/ + fontSubErr = -66, /*font substitution occured*/ + fontNotOutlineErr = -32615, /*bitmap font passed to routine that does outlines only*/ + firstDskErr = -84, /*I/O System Errors*/ + lastDskErr = -64, /*I/O System Errors*/ + noDriveErr = -64, /*drive not installed*/ + offLinErr = -65, /*r/w requested for an off-line drive*/ + noNybErr = -66 /*couldn't find 5 nybbles in 200 tries*/ +}; + +enum { + /* general text errors*/ + kTextUnsupportedEncodingErr = -8738, /* specified encoding not supported for this operation*/ + kTextMalformedInputErr = -8739, /* in DBCS, for example, high byte followed by invalid low byte*/ + kTextUndefinedElementErr = -8740, /* text conversion errors*/ + kTECMissingTableErr = -8745, + kTECTableChecksumErr = -8746, + kTECTableFormatErr = -8747, + kTECCorruptConverterErr = -8748, /* invalid converter object reference*/ + kTECNoConversionPathErr = -8749, + kTECBufferBelowMinimumSizeErr = -8750, /* output buffer too small to allow processing of first input text element*/ + kTECArrayFullErr = -8751, /* supplied name buffer or TextRun, TextEncoding, or UnicodeMapping array is too small*/ + kTECBadTextRunErr = -8752, + kTECPartialCharErr = -8753, /* input buffer ends in the middle of a multibyte character, conversion stopped*/ + kTECUnmappableElementErr = -8754, + kTECIncompleteElementErr = -8755, /* text element may be incomplete or is too long for internal buffers*/ + kTECDirectionErr = -8756, /* direction stack overflow, etc.*/ + kTECGlobalsUnavailableErr = -8770, /* globals have already been deallocated (premature TERM)*/ + kTECItemUnavailableErr = -8771, /* item (e.g. name) not available for specified region (& encoding if relevant)*/ + /* text conversion status codes*/ + kTECUsedFallbacksStatus = -8783, + kTECNeedFlushStatus = -8784, + kTECOutputBufferFullStatus = -8785, /* output buffer has no room for conversion of next input text element (partial conversion)*/ + /* deprecated error & status codes for low-level converter*/ + unicodeChecksumErr = -8769, + unicodeNoTableErr = -8768, + unicodeVariantErr = -8767, + unicodeFallbacksErr = -8766, + unicodePartConvertErr = -8765, + unicodeBufErr = -8764, + unicodeCharErr = -8763, + unicodeElementErr = -8762, + unicodeNotFoundErr = -8761, + unicodeTableFormatErr = -8760, + unicodeDirectionErr = -8759, + unicodeContextualErr = -8758, + unicodeTextEncodingDataErr = -8757 +}; + + +/* + File: MacMemory.h + + +*/ + + +/* + File: MixedMode.h + +*/ + +/* Calling Conventions */ +typedef unsigned short CallingConventionType; + +enum { + kPascalStackBased = 0, + kCStackBased = 1, + kRegisterBased = 2, + kD0DispatchedPascalStackBased = 8, + kD1DispatchedPascalStackBased = 12, + kD0DispatchedCStackBased = 9, + kStackDispatchedPascalStackBased = 14, + kThinkCStackBased = 5 +}; + + + #define STACK_UPP_TYPE(name) name + #define REGISTER_UPP_TYPE(name) name + + +/* + File: OSUtils.h + +*/ +typedef struct QElem QElem; + +typedef QElem * QElemPtr; +struct QHdr { + short qFlags; + QElemPtr qHead; + QElemPtr qTail; +}; +typedef struct QHdr QHdr; + +typedef QHdr * QHdrPtr; + +typedef CALLBACK_API( void , DeferredTaskProcPtr )(long dtParam); +/* + WARNING: DeferredTaskProcPtr uses register based parameters under classic 68k + and cannot be written in a high-level language without + the help of mixed mode or assembly glue. +*/ +typedef REGISTER_UPP_TYPE(DeferredTaskProcPtr) DeferredTaskUPP; +enum { uppDeferredTaskProcInfo = 0x0000B802 }; /* register no_return_value Func(4_bytes:A1) */ +#define NewDeferredTaskProc(userRoutine) (DeferredTaskUPP)NewRoutineDescriptor((ProcPtr)(userRoutine), uppDeferredTaskProcInfo, GetCurrentArchitecture()) +#define CallDeferredTaskProc(userRoutine, dtParam) CALL_ONE_PARAMETER_UPP((userRoutine), uppDeferredTaskProcInfo, (dtParam)) +struct DeferredTask { + QElemPtr qLink; + short qType; + short dtFlags; + DeferredTaskUPP dtAddr; + long dtParam; + long dtReserved; +}; +typedef struct DeferredTask DeferredTask; + +typedef DeferredTask * DeferredTaskPtr; + +/* + File: Finder.h + + +*/ + +/* + The following declerations used to be in Files.‰, + but are Finder specific and were moved here. +*/ + +enum { + /* Finder Flags */ + kIsOnDesk = 0x0001, + kColor = 0x000E, + kIsShared = 0x0040, /* bit 0x0080 is hasNoINITS */ + kHasBeenInited = 0x0100, /* bit 0x0200 was the letter bit for AOCE, but is now reserved for future use */ + kHasCustomIcon = 0x0400, + kIsStationery = 0x0800, + kNameLocked = 0x1000, + kHasBundle = 0x2000, + kIsInvisible = 0x4000, + kIsAlias = 0x8000 +}; + + +enum { + /* Finder Constants */ + fOnDesk = 1, + fHasBundle = 8192, + fTrash = -3, + fDesktop = -2, + fDisk = 0 +}; + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=mac68k +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(push, 2) +#elif PRAGMA_STRUCT_PACK + #pragma pack(2) +#endif + + +struct FInfo { + OSType fdType; /*the type of the file*/ + OSType fdCreator; /*file's creator*/ + unsigned short fdFlags; /*flags ex. hasbundle,invisible,locked, etc.*/ + Point fdLocation; /*file's location in folder*/ + short fdFldr; /*folder containing file*/ +}; +typedef struct FInfo FInfo; + +struct FXInfo { + short fdIconID; /*Icon ID*/ + short fdUnused[3]; /*unused but reserved 6 bytes*/ + SInt8 fdScript; /*Script flag and number*/ + SInt8 fdXFlags; /*More flag bits*/ + short fdComment; /*Comment ID*/ + long fdPutAway; /*Home Dir ID*/ +}; +typedef struct FXInfo FXInfo; + +struct DInfo { + Rect frRect; /*folder rect*/ + unsigned short frFlags; /*Flags*/ + Point frLocation; /*folder location*/ + short frView; /*folder view*/ +}; +typedef struct DInfo DInfo; + +struct DXInfo { + Point frScroll; /*scroll position*/ + long frOpenChain; /*DirID chain of open folders*/ + SInt8 frScript; /*Script flag and number*/ + SInt8 frXFlags; /*More flag bits*/ + short frComment; /*comment*/ + long frPutAway; /*DirID*/ +}; +typedef struct DXInfo DXInfo; + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=reset +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(pop) +#elif PRAGMA_STRUCT_PACK + #pragma pack() +#endif + + +enum { + fsRtParID = 1, + fsRtDirID = 2 +}; + + + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=mac68k +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(push, 2) +#elif PRAGMA_STRUCT_PACK + #pragma pack(2) +#endif + + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=reset +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(pop) +#elif PRAGMA_STRUCT_PACK + #pragma pack() +#endif + + +/* + * UTGetBlock options + */ + +enum { + gbDefault = 0, /* default value - read if not found */ + /* bits and masks */ + gbReadBit = 0, /* read block from disk (forced read) */ + gbReadMask = 0x0001, + gbExistBit = 1, /* get existing cache block */ + gbExistMask = 0x0002, + gbNoReadBit = 2, /* don't read block from disk if not found in cache */ + gbNoReadMask = 0x0004, + gbReleaseBit = 3, /* release block immediately after GetBlock */ + gbReleaseMask = 0x0008 +}; + + +/* + * UTReleaseBlock options + */ + +enum { + rbDefault = 0, /* default value - just mark the buffer not in-use */ + /* bits and masks */ + rbWriteBit = 0, /* force write buffer to disk */ + rbWriteMask = 0x0001, + rbTrashBit = 1, /* trash buffer contents after release */ + rbTrashMask = 0x0002, + rbDirtyBit = 2, /* mark buffer dirty */ + rbDirtyMask = 0x0004, + rbFreeBit = 3, /* free the buffer (save in the hash) */ + rbFreeMask = 0x000A /* rbFreeMask (rbFreeBit + rbTrashBit) works as rbTrash on < System 7.0 RamCache; on >= System 7.0, rbfreeMask overrides rbTrash */ +}; + +/* + * UTFlushCache options + */ + +enum { + fcDefault = 0, /* default value - pass this fcOption to just flush any dirty buffers */ + /* bits and masks */ + fcTrashBit = 0, /* (don't pass this as fcOption, use only for testing bit) */ + fcTrashMask = 0x0001, /* pass this fcOption value to flush and trash cache blocks */ + fcFreeBit = 1, /* (don't pass this as fcOption, use only for testing bit) */ + fcFreeMask = 0x0003 /* pass this fcOption to flush and free cache blocks (Note: both fcTrash and fcFree bits are set) */ +}; + + + +/* + * FCBRec.fcbFlags bits + */ + +enum { + fcbWriteBit = 0, /* Data can be written to this file */ + fcbWriteMask = 0x01, + fcbResourceBit = 1, /* This file is a resource fork */ + fcbResourceMask = 0x02, + fcbWriteLockedBit = 2, /* File has a locked byte range */ + fcbWriteLockedMask = 0x04, + fcbSharedWriteBit = 4, /* File is open for shared write access */ + fcbSharedWriteMask = 0x10, + fcbFileLockedBit = 5, /* File is locked (write-protected) */ + fcbFileLockedMask = 0x20, + fcbOwnClumpBit = 6, /* File has clump size specified in FCB */ + fcbOwnClumpMask = 0x40, + fcbModifiedBit = 7, /* File has changed since it was last flushed */ + fcbModifiedMask = 0x80 +}; + + +/* + File: TextCommon.h + +*/ + +/* LocaleIdentifier is an obsolete Copland typedef, will be removed soon*/ +typedef UInt32 LocaleIdentifier; +/* TextEncodingBase type & values */ +/* (values 0-32 correspond to the Script Codes defined in Inside Macintosh: Text pages 6-52 and 6-53 */ +typedef UInt32 TextEncodingBase; + +enum { + /* Mac OS encodings*/ + kTextEncodingMacRoman = 0L, + kTextEncodingMacJapanese = 1, + kTextEncodingMacChineseTrad = 2, + kTextEncodingMacKorean = 3, + kTextEncodingMacArabic = 4, + kTextEncodingMacHebrew = 5, + kTextEncodingMacGreek = 6, + kTextEncodingMacCyrillic = 7, + kTextEncodingMacDevanagari = 9, + kTextEncodingMacGurmukhi = 10, + kTextEncodingMacGujarati = 11, + kTextEncodingMacOriya = 12, + kTextEncodingMacBengali = 13, + kTextEncodingMacTamil = 14, + kTextEncodingMacTelugu = 15, + kTextEncodingMacKannada = 16, + kTextEncodingMacMalayalam = 17, + kTextEncodingMacSinhalese = 18, + kTextEncodingMacBurmese = 19, + kTextEncodingMacKhmer = 20, + kTextEncodingMacThai = 21, + kTextEncodingMacLaotian = 22, + kTextEncodingMacGeorgian = 23, + kTextEncodingMacArmenian = 24, + kTextEncodingMacChineseSimp = 25, + kTextEncodingMacTibetan = 26, + kTextEncodingMacMongolian = 27, + kTextEncodingMacEthiopic = 28, + kTextEncodingMacCentralEurRoman = 29, + kTextEncodingMacVietnamese = 30, + kTextEncodingMacExtArabic = 31, /* The following use script code 0, smRoman*/ + kTextEncodingMacSymbol = 33, + kTextEncodingMacDingbats = 34, + kTextEncodingMacTurkish = 35, + kTextEncodingMacCroatian = 36, + kTextEncodingMacIcelandic = 37, + kTextEncodingMacRomanian = 38, /* The following use script code 4, smArabic*/ + kTextEncodingMacFarsi = 0x8C, /* Like MacArabic but uses Farsi digits*/ + /* The following use script code 7, smCyrillic*/ + kTextEncodingMacUkrainian = 0x98, /* The following use script code 32, smUnimplemented*/ + kTextEncodingMacVT100 = 0xFC, /* VT100/102 font from Comm Toolbox: Latin-1 repertoire + box drawing etc*/ + /* Special Mac OS encodings*/ + kTextEncodingMacHFS = 0xFF, /* Meta-value, should never appear in a table.*/ + /* Unicode & ISO UCS encodings begin at 0x100*/ + kTextEncodingUnicodeDefault = 0x0100, /* Meta-value, should never appear in a table.*/ + kTextEncodingUnicodeV1_1 = 0x0101, + kTextEncodingISO10646_1993 = 0x0101, /* Code points identical to Unicode 1.1*/ + kTextEncodingUnicodeV2_0 = 0x0103, /* New location for Korean Hangul*/ + /* ISO 8-bit and 7-bit encodings begin at 0x200*/ + kTextEncodingISOLatin1 = 0x0201, /* ISO 8859-1*/ + kTextEncodingISOLatin2 = 0x0202, /* ISO 8859-2*/ + kTextEncodingISOLatinCyrillic = 0x0205, /* ISO 8859-5*/ + kTextEncodingISOLatinArabic = 0x0206, /* ISO 8859-6, = ASMO 708, =DOS CP 708*/ + kTextEncodingISOLatinGreek = 0x0207, /* ISO 8859-7*/ + kTextEncodingISOLatinHebrew = 0x0208, /* ISO 8859-8*/ + kTextEncodingISOLatin5 = 0x0209, /* ISO 8859-9*/ + /* MS-DOS & Windows encodings begin at 0x400*/ + kTextEncodingDOSLatinUS = 0x0400, /* code page 437*/ + kTextEncodingDOSGreek = 0x0405, /* code page 737 (formerly code page 437G)*/ + kTextEncodingDOSBalticRim = 0x0406, /* code page 775*/ + kTextEncodingDOSLatin1 = 0x0410, /* code page 850, "Multilingual"*/ + kTextEncodingDOSGreek1 = 0x0411, /* code page 851*/ + kTextEncodingDOSLatin2 = 0x0412, /* code page 852, Slavic*/ + kTextEncodingDOSCyrillic = 0x0413, /* code page 855, IBM Cyrillic*/ + kTextEncodingDOSTurkish = 0x0414, /* code page 857, IBM Turkish*/ + kTextEncodingDOSPortuguese = 0x0415, /* code page 860*/ + kTextEncodingDOSIcelandic = 0x0416, /* code page 861*/ + kTextEncodingDOSHebrew = 0x0417, /* code page 862*/ + kTextEncodingDOSCanadianFrench = 0x0418, /* code page 863*/ + kTextEncodingDOSArabic = 0x0419, /* code page 864*/ + kTextEncodingDOSNordic = 0x041A, /* code page 865*/ + kTextEncodingDOSRussian = 0x041B, /* code page 866*/ + kTextEncodingDOSGreek2 = 0x041C, /* code page 869, IBM Modern Greek*/ + kTextEncodingDOSThai = 0x041D, /* code page 874, also for Windows*/ + kTextEncodingDOSJapanese = 0x0420, /* code page 932, also for Windows*/ + kTextEncodingDOSChineseSimplif = 0x0421, /* code page 936, also for Windows*/ + kTextEncodingDOSKorean = 0x0422, /* code page 949, also for Windows; Unified Hangul Code*/ + kTextEncodingDOSChineseTrad = 0x0423, /* code page 950, also for Windows*/ + kTextEncodingWindowsLatin1 = 0x0500, /* code page 1252*/ + kTextEncodingWindowsANSI = 0x0500, /* code page 1252 (alternate name)*/ + kTextEncodingWindowsLatin2 = 0x0501, /* code page 1250, Central Europe*/ + kTextEncodingWindowsCyrillic = 0x0502, /* code page 1251, Slavic Cyrillic*/ + kTextEncodingWindowsGreek = 0x0503, /* code page 1253*/ + kTextEncodingWindowsLatin5 = 0x0504, /* code page 1254, Turkish*/ + kTextEncodingWindowsHebrew = 0x0505, /* code page 1255*/ + kTextEncodingWindowsArabic = 0x0506, /* code page 1256*/ + kTextEncodingWindowsBalticRim = 0x0507, /* code page 1257*/ + kTextEncodingWindowsKoreanJohab = 0x0510, /* code page 1361, for Windows NT*/ + /* Various national standards begin at 0x600*/ + kTextEncodingUS_ASCII = 0x0600, + kTextEncodingJIS_X0201_76 = 0x0620, + kTextEncodingJIS_X0208_83 = 0x0621, + kTextEncodingJIS_X0208_90 = 0x0622, + kTextEncodingJIS_X0212_90 = 0x0623, + kTextEncodingJIS_C6226_78 = 0x0624, + kTextEncodingGB_2312_80 = 0x0630, + kTextEncodingGBK_95 = 0x0631, /* annex to GB 13000-93; for Windows 95*/ + kTextEncodingKSC_5601_87 = 0x0640, /* same as KSC 5601-92 without Johab annex*/ + kTextEncodingKSC_5601_92_Johab = 0x0641, /* KSC 5601-92 Johab annex*/ + kTextEncodingCNS_11643_92_P1 = 0x0651, /* CNS 11643-1992 plane 1*/ + kTextEncodingCNS_11643_92_P2 = 0x0652, /* CNS 11643-1992 plane 2*/ + kTextEncodingCNS_11643_92_P3 = 0x0653, /* CNS 11643-1992 plane 3 (was plane 14 in 1986 version)*/ + /* ISO 2022 collections begin at 0x800*/ + kTextEncodingISO_2022_JP = 0x0820, + kTextEncodingISO_2022_JP_2 = 0x0821, + kTextEncodingISO_2022_CN = 0x0830, + kTextEncodingISO_2022_CN_EXT = 0x0831, + kTextEncodingISO_2022_KR = 0x0840, /* EUC collections begin at 0x900*/ + kTextEncodingEUC_JP = 0x0920, /* ISO 646, 1-byte katakana, JIS 208, JIS 212*/ + kTextEncodingEUC_CN = 0x0930, /* ISO 646, GB 2312-80*/ + kTextEncodingEUC_TW = 0x0931, /* ISO 646, CNS 11643-1992 Planes 1-16*/ + kTextEncodingEUC_KR = 0x0940, /* ISO 646, KS C 5601-1987*/ + /* Misc standards begin at 0xA00*/ + kTextEncodingShiftJIS = 0x0A01, /* plain Shift-JIS*/ + kTextEncodingKOI8_R = 0x0A02, /* Russian internet standard*/ + kTextEncodingBig5 = 0x0A03, /* Big-5 (has variants)*/ + kTextEncodingMacRomanLatin1 = 0x0A04, /* Mac OS Roman permuted to align with ISO Latin-1*/ + kTextEncodingHZ_GB_2312 = 0x0A05, /* HZ (RFC 1842, for Chinese mail & news)*/ + /* Other platform encodings*/ + kTextEncodingNextStepLatin = 0x0B01, /* NextStep encoding*/ + /* EBCDIC & IBM host encodings begin at 0xC00*/ + kTextEncodingEBCDIC_US = 0x0C01, /* basic EBCDIC-US*/ + kTextEncodingEBCDIC_CP037 = 0x0C02, /* code page 037, extended EBCDIC (Latin-1 set) for US,Canada...*/ + /* Special value*/ + kTextEncodingMultiRun = 0x0FFF, /* Multi-encoding text with external run info*/ + /* The following are older names for backward compatibility*/ + kTextEncodingMacTradChinese = 2, + kTextEncodingMacRSymbol = 8, + kTextEncodingMacSimpChinese = 25, + kTextEncodingMacGeez = 28, + kTextEncodingMacEastEurRoman = 29, + kTextEncodingMacUninterp = 32 +}; + +/* TextEncodingVariant type & values */ +typedef UInt32 TextEncodingVariant; + +enum { + /* Default TextEncodingVariant, for any TextEncodingBase*/ + kTextEncodingDefaultVariant = 0, /* Variants of kTextEncodingMacIcelandic */ + kMacIcelandicStandardVariant = 0, /* 0xBB & 0xBC are fem./masc. ordinal indicators*/ + kMacIcelandicTrueTypeVariant = 1, /* 0xBB & 0xBC are fi/fl ligatures*/ + /* Variants of kTextEncodingMacJapanese*/ + kMacJapaneseStandardVariant = 0, + kMacJapaneseStdNoVerticalsVariant = 1, + kMacJapaneseBasicVariant = 2, + kMacJapanesePostScriptScrnVariant = 3, + kMacJapanesePostScriptPrintVariant = 4, + kMacJapaneseVertAtKuPlusTenVariant = 5, /* Variant options for most Japanese encodings (MacJapanese, ShiftJIS, EUC-JP, ISO 2022-JP) */ + /* These can be OR-ed into the variant value in any combination*/ + kJapaneseNoOneByteKanaOption = 0x20, + kJapaneseUseAsciiBackslashOption = 0x40, /* Variants of kTextEncodingMacArabic*/ + kMacArabicStandardVariant = 0, /* 0xC0 is 8-spoke asterisk, 0x2A & 0xAA are asterisk (e.g. Cairo)*/ + kMacArabicTrueTypeVariant = 1, /* 0xC0 is asterisk, 0x2A & 0xAA are multiply signs (e.g. Baghdad)*/ + kMacArabicThuluthVariant = 2, /* 0xC0 is Arabic five-point star, 0x2A & 0xAA are multiply signs*/ + kMacArabicAlBayanVariant = 3, /* 8-spoke asterisk, multiply sign, Koranic ligatures & parens*/ + /* Variants of kTextEncodingMacFarsi*/ + kMacFarsiStandardVariant = 0, /* 0xC0 is 8-spoke asterisk, 0x2A & 0xAA are asterisk (e.g. Tehran)*/ + kMacFarsiTrueTypeVariant = 1, /* asterisk, multiply signs, Koranic ligatures, geometric shapes*/ + /* Variants of kTextEncodingMacHebrew*/ + kMacHebrewStandardVariant = 0, + kMacHebrewFigureSpaceVariant = 1, /* Variants of Unicode & ISO 10646 encodings*/ + kUnicodeNoSubset = 0, + kUnicodeNoCompatibilityVariant = 1, + kUnicodeMaxDecomposedVariant = 2, + kUnicodeNoComposedVariant = 3, + kUnicodeNoCorporateVariant = 4, /* Variants of Big-5 encoding*/ + kBig5_BasicVariant = 0, + kBig5_StandardVariant = 1, /* 0xC6A1-0xC7FC: kana, Cyrillic, enclosed numerics*/ + kBig5_ETenVariant = 2, /* adds kana, Cyrillic, radicals, etc with hi bytes C6-C8,F9*/ + /* The following are older names for backward compatibility*/ + kJapaneseStandardVariant = 0, + kJapaneseStdNoVerticalsVariant = 1, + kJapaneseBasicVariant = 2, + kJapanesePostScriptScrnVariant = 3, + kJapanesePostScriptPrintVariant = 4, + kJapaneseVertAtKuPlusTenVariant = 5, /* kJapaneseStdNoOneByteKanaVariant = 6, // replaced by kJapaneseNoOneByteKanaOption*/ + /* kJapaneseBasicNoOneByteKanaVariant = 7, // replaced by kJapaneseNoOneByteKanaOption */ + kHebrewStandardVariant = 0, + kHebrewFigureSpaceVariant = 1 +}; + +/* TextEncodingFormat type & values */ +typedef UInt32 TextEncodingFormat; + +enum { + /* Default TextEncodingFormat for any TextEncodingBase*/ + kTextEncodingDefaultFormat = 0, /* Formats for Unicode & ISO 10646*/ + kUnicode16BitFormat = 0, + kUnicodeUTF7Format = 1, + kUnicodeUTF8Format = 2, + kUnicode32BitFormat = 3 +}; + +/* TextEncoding type */ +typedef UInt32 TextEncoding; +/* name part selector for GetTextEncodingName*/ +typedef UInt32 TextEncodingNameSelector; + +enum { + kTextEncodingFullName = 0, + kTextEncodingBaseName = 1, + kTextEncodingVariantName = 2, + kTextEncodingFormatName = 3 +}; + +/* Types used in conversion */ +struct TextEncodingRun { + ByteOffset offset; + TextEncoding textEncoding; +}; +typedef struct TextEncodingRun TextEncodingRun; + +typedef TextEncodingRun * TextEncodingRunPtr; +typedef const TextEncodingRun * ConstTextEncodingRunPtr; +struct ScriptCodeRun { + ByteOffset offset; + ScriptCode script; +}; +typedef struct ScriptCodeRun ScriptCodeRun; + +typedef ScriptCodeRun * ScriptCodeRunPtr; +typedef const ScriptCodeRun * ConstScriptCodeRunPtr; +typedef UInt8 * TextPtr; +typedef const UInt8 * ConstTextPtr; +/* Basic types for Unicode characters and strings: */ +typedef UniChar * UniCharArrayPtr; +typedef const UniChar * ConstUniCharArrayPtr; +/* enums for TextEncoding Conversion routines*/ + +enum { + kTextScriptDontCare = -128, + kTextLanguageDontCare = -128, + kTextRegionDontCare = -128 +}; + + + +/* + File: UnicodeConverter.h + + +*/ + +/* Unicode conversion contexts: */ +typedef struct OpaqueTextToUnicodeInfo* TextToUnicodeInfo; +typedef struct OpaqueUnicodeToTextInfo* UnicodeToTextInfo; +typedef struct OpaqueUnicodeToTextRunInfo* UnicodeToTextRunInfo; +typedef const TextToUnicodeInfo ConstTextToUnicodeInfo; +typedef const UnicodeToTextInfo ConstUnicodeToTextInfo; +/* UnicodeMapVersion type & values */ +typedef SInt32 UnicodeMapVersion; + +enum { + kUnicodeUseLatestMapping = -1, + kUnicodeUseHFSPlusMapping = 4 +}; + +/* Types used in conversion */ +struct UnicodeMapping { + TextEncoding unicodeEncoding; + TextEncoding otherEncoding; + UnicodeMapVersion mappingVersion; +}; +typedef struct UnicodeMapping UnicodeMapping; + +typedef UnicodeMapping * UnicodeMappingPtr; +typedef const UnicodeMapping * ConstUnicodeMappingPtr; +/* Control flags for ConvertFromUnicodeToText and ConvertFromTextToUnicode */ + +enum { + kUnicodeUseFallbacksBit = 0, + kUnicodeKeepInfoBit = 1, + kUnicodeDirectionalityBits = 2, + kUnicodeVerticalFormBit = 4, + kUnicodeLooseMappingsBit = 5, + kUnicodeStringUnterminatedBit = 6, + kUnicodeTextRunBit = 7, + kUnicodeKeepSameEncodingBit = 8 +}; + + +enum { + kUnicodeUseFallbacksMask = 1L << kUnicodeUseFallbacksBit, + kUnicodeKeepInfoMask = 1L << kUnicodeKeepInfoBit, + kUnicodeDirectionalityMask = 3L << kUnicodeDirectionalityBits, + kUnicodeVerticalFormMask = 1L << kUnicodeVerticalFormBit, + kUnicodeLooseMappingsMask = 1L << kUnicodeLooseMappingsBit, + kUnicodeStringUnterminatedMask = 1L << kUnicodeStringUnterminatedBit, + kUnicodeTextRunMask = 1L << kUnicodeTextRunBit, + kUnicodeKeepSameEncodingMask = 1L << kUnicodeKeepSameEncodingBit +}; + +/* Values for kUnicodeDirectionality field */ + +enum { + kUnicodeDefaultDirection = 0, + kUnicodeLeftToRight = 1, + kUnicodeRightToLeft = 2 +}; + +/* Directionality masks for control flags */ + +enum { + kUnicodeDefaultDirectionMask = kUnicodeDefaultDirection << kUnicodeDirectionalityBits, + kUnicodeLeftToRightMask = kUnicodeLeftToRight << kUnicodeDirectionalityBits, + kUnicodeRightToLeftMask = kUnicodeRightToLeft << kUnicodeDirectionalityBits +}; + +/* Control flags for TruncateForUnicodeToText: */ +/* + Now TruncateForUnicodeToText uses control flags from the same set as used by + ConvertFromTextToUnicode, ConvertFromUnicodeToText, etc., but only + kUnicodeStringUnterminatedMask is meaningful for TruncateForUnicodeToText. + + Previously two special control flags were defined for TruncateForUnicodeToText: + kUnicodeTextElementSafeBit = 0 + kUnicodeRestartSafeBit = 1 + However, neither of these was implemented. + Instead of implementing kUnicodeTextElementSafeBit, we now use + kUnicodeStringUnterminatedMask since it accomplishes the same thing and avoids + having special flags just for TruncateForUnicodeToText + Also, kUnicodeRestartSafeBit is unnecessary, since restart-safeness is handled by + setting kUnicodeKeepInfoBit with ConvertFromUnicodeToText. + If TruncateForUnicodeToText is called with one or both of the old special control + flags set (bits 0 or 1), it will not generate a paramErr, but the old bits have no + effect on its operation. +*/ + +/* Filter bits for filter field in QueryUnicodeMappings and CountUnicodeMappings: */ + +enum { + kUnicodeMatchUnicodeBaseBit = 0, + kUnicodeMatchUnicodeVariantBit = 1, + kUnicodeMatchUnicodeFormatBit = 2, + kUnicodeMatchOtherBaseBit = 3, + kUnicodeMatchOtherVariantBit = 4, + kUnicodeMatchOtherFormatBit = 5 +}; + + +enum { + kUnicodeMatchUnicodeBaseMask = 1L << kUnicodeMatchUnicodeBaseBit, + kUnicodeMatchUnicodeVariantMask = 1L << kUnicodeMatchUnicodeVariantBit, + kUnicodeMatchUnicodeFormatMask = 1L << kUnicodeMatchUnicodeFormatBit, + kUnicodeMatchOtherBaseMask = 1L << kUnicodeMatchOtherBaseBit, + kUnicodeMatchOtherVariantMask = 1L << kUnicodeMatchOtherVariantBit, + kUnicodeMatchOtherFormatMask = 1L << kUnicodeMatchOtherFormatBit +}; + +/* Control flags for SetFallbackUnicodeToText */ + +enum { + kUnicodeFallbackSequencingBits = 0 +}; + + +enum { + kUnicodeFallbackSequencingMask = 3L << kUnicodeFallbackSequencingBits +}; + +/* values for kUnicodeFallbackSequencing field */ + +enum { + kUnicodeFallbackDefaultOnly = 0L, + kUnicodeFallbackCustomOnly = 1L, + kUnicodeFallbackDefaultFirst = 2L, + kUnicodeFallbackCustomFirst = 3L +}; + + + +/* + File: Timer.h + + +*/ + + +enum { + /* high bit of qType is set if task is active */ + kTMTaskActive = (1L << 15) +}; + +typedef struct TMTask TMTask; +typedef TMTask * TMTaskPtr; +typedef CALLBACK_API( void , TimerProcPtr )(TMTaskPtr tmTaskPtr); +/* + WARNING: TimerProcPtr uses register based parameters under classic 68k + and cannot be written in a high-level language without + the help of mixed mode or assembly glue. +*/ +typedef REGISTER_UPP_TYPE(TimerProcPtr) TimerUPP; +struct TMTask { + QElemPtr qLink; + short qType; + TimerUPP tmAddr; + long tmCount; + long tmWakeUp; + long tmReserved; +}; + + +/* + File: TextCommonPriv.h + + +*/ + + +/* + ----------------------------------------------------------------------------------------------------------- + TextEncoding creation & extraction macros. + Current packed format: + 31 30 29 26 25 16 15 0 + |pack| format | variant | base | + |vers| | | | + |2bit| 4 bits | 10 bits | 16 bits | + Unpacked elements + base 15 0 + | 0 | 16 bits | + variant 9 0 + | 0 | 10 bits | + format 3 0 + | 0 | 4 bits | + ----------------------------------------------------------------------------------------------------------- +*/ + +enum { + kTextEncodingVersion = 0 +}; + + +enum { + kTextEncodingBaseShiftBits = 0, /* <13>*/ + kTextEncodingVariantShiftBits = 16, /* <13>*/ + kTextEncodingFormatShiftBits = 26, /* <13><16>*/ + kTextEncodingVersionShiftBits = 30 +}; + + + +enum { + kTextEncodingBaseSourceMask = 0x0000FFFF, /* 16 bits <13>*/ + kTextEncodingVariantSourceMask = 0x000003FF, /* 10 bits <13><16>*/ + kTextEncodingFormatSourceMask = 0x0000000F, /* 4 bits <13><16>*/ + kTextEncodingVersionSourceMask = 0x00000003 /* 2 bits*/ +}; + + +enum { + kTextEncodingBaseMask = kTextEncodingBaseSourceMask << kTextEncodingBaseShiftBits, + kTextEncodingVariantMask = kTextEncodingVariantSourceMask << kTextEncodingVariantShiftBits, + kTextEncodingFormatMask = kTextEncodingFormatSourceMask << kTextEncodingFormatShiftBits, + kTextEncodingVersionMask = kTextEncodingVersionSourceMask << kTextEncodingVersionShiftBits +}; + + +enum { + kTextEncodingVersionShifted = (kTextEncodingVersion & kTextEncodingVersionSourceMask) << kTextEncodingVersionShiftBits +}; + + +#define CreateTextEncodingPriv(base,variant,format) \ + ( ((base & kTextEncodingBaseSourceMask) << kTextEncodingBaseShiftBits) \ + | ((variant & kTextEncodingVariantSourceMask) << kTextEncodingVariantShiftBits) \ + | ((format & kTextEncodingFormatSourceMask) << kTextEncodingFormatShiftBits) \ + | (kTextEncodingVersionShifted) ) +#define GetTextEncodingBasePriv(encoding) \ + ((encoding & kTextEncodingBaseMask) >> kTextEncodingBaseShiftBits) +#define GetTextEncodingVariantPriv(encoding) \ + ((encoding & kTextEncodingVariantMask) >> kTextEncodingVariantShiftBits) +#define GetTextEncodingFormatPriv(encoding) \ + ((encoding & kTextEncodingFormatMask) >> kTextEncodingFormatShiftBits) +#define IsMacTextEncoding(encoding) ((encoding & 0x0000FF00L) == 0x00000000L) +#define IsUnicodeTextEncoding(encoding) ((encoding & 0x0000FF00L) == 0x00000100L) +/* TextEncoding used by HFS*/ + +enum { + kMacHFSTextEncoding = 0x000000FF +}; + + +/* + File: Instrumentation.h + + +*/ +/*******************************************************************/ +/* Types */ +/*******************************************************************/ +/* Reference to an instrumentation class */ +typedef struct InstOpaqueClassRef* InstClassRef; + +/* Aliases to the generic instrumentation class for each type of class */ +typedef InstClassRef InstPathClassRef; +typedef InstClassRef InstTraceClassRef; +typedef InstClassRef InstHistogramClassRef; +typedef InstClassRef InstSplitHistogramClassRef; +typedef InstClassRef InstMagnitudeClassRef; +typedef InstClassRef InstGrowthClassRef; +typedef InstClassRef InstTallyClassRef; + +/* Reference to a data descriptor */ +typedef struct InstOpaqueDataDescriptorRef* InstDataDescriptorRef; + + +/*******************************************************************/ +/* Constant Definitions */ +/*******************************************************************/ + +/* Reference to the root of the class hierarchy */ +#define kInstRootClassRef ( (InstClassRef) -1) + +/* Options used for creating classes */ +typedef OptionBits InstClassOptions; + + +enum { + kInstDisableClassMask = 0x00, /* Create the class disabled */ + kInstEnableClassMask = 0x01, /* Create the class enabled */ + + kInstSummaryTraceClassMask = 0x20 /* Create a summary trace class instead of a regular one */ +}; + + + +EXTERN_API( Boolean ) +EqualString (ConstStr255Param str1, + ConstStr255Param str2, + Boolean caseSensitive, + Boolean diacSensitive); + + + + +/* + File: LowMemPriv.h + + +*/ + +/* The following replace storage used in low-mem on MacOS: */ +extern struct FSVarsRec * gFSMVars; + + +#define LMGetFSMVars() gFSMVars + + + +EXTERN_API( void ) +InsTime (QElemPtr tmTaskPtr); +EXTERN_API( void ) +PrimeTime (QElemPtr tmTaskPtr, + long count); +EXTERN_API( void ) +RmvTime (QElemPtr tmTaskPtr); + + + + +/* PROTOTYPES */ + +#if HFS_DIAGNOSTIC + extern void RequireFileLock(FileReference vp, int shareable); + #define REQUIRE_FILE_LOCK(vp,s) RequireFileLock((vp),(s)) +#else + #define REQUIRE_FILE_LOCK(vp,s) +#endif + + +EXTERN_API( void ) +BlockMove (const void * srcPtr, + void * destPtr, + Size byteCount); +EXTERN_API( void ) +BlockMoveData (const void * srcPtr, + void * destPtr, + Size byteCount); + +EXTERN_API_C( void ) +BlockMoveUncached (const void * srcPtr, + void * destPtr, + Size byteCount); + +EXTERN_API_C( void ) +BlockMoveDataUncached (const void * srcPtr, + void * destPtr, + Size byteCount); + +EXTERN_API_C( void ) +BlockZero (void * destPtr, + Size byteCount); + +EXTERN_API_C( void ) +BlockZeroUncached (void * destPtr, + Size byteCount); + +EXTERN_API( Ptr ) +NewPtr (Size byteCount); + +EXTERN_API( Ptr ) +NewPtrSys (Size byteCount); + +EXTERN_API( Ptr ) +NewPtrClear (Size byteCount); + +EXTERN_API( Ptr ) +NewPtrSysClear (Size byteCount); + +EXTERN_API( OSErr ) +MemError (void); + +EXTERN_API( void ) +DisposePtr (Ptr p); + +EXTERN_API( Size ) +GetPtrSize (Ptr p); + +EXTERN_API( void ) +SetPtrSize (Ptr p, + Size newSize); + +EXTERN_API( void ) +DisposeHandle (Handle h); + +EXTERN_API( void ) +SetHandleSize (Handle h, + Size newSize); + +/* + File: DateTimeUtils.h + + +*/ +EXTERN_API( void ) +GetDateTime (unsigned long * secs); + + + +#endif /* __hfs_macos_types__ */ diff --git a/bsd/hfs/hfs_mount.h b/bsd/hfs/hfs_mount.h new file mode 100644 index 000000000..b7c191ecb --- /dev/null +++ b/bsd/hfs/hfs_mount.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-2000 Apple Computer, Inc. All Rights Reserved + * + */ + +#ifndef _HFS_MOUNT_H_ +#define _HFS_MOUNT_H_ + +#include +#include + +/* + * Arguments to mount HFS-based filesystems + */ + +#define OVERRIDE_UNKNOWN_PERMISSIONS 0 + +#define UNKNOWNUID ((uid_t)99) +#define UNKNOWNGID ((gid_t)99) +#define UNKNOWNPERMISSIONS (S_IRWXU | S_IROTH | S_IXOTH) /* 705 */ + +struct hfs_mount_args { + char *fspec; /* block special device to mount */ + struct export_args export; /* network export information */ + uid_t hfs_uid; /* uid that owns hfs files (standard HFS only) */ + gid_t hfs_gid; /* gid that owns hfs files (standard HFS only) */ + mode_t hfs_mask; /* mask to be applied for hfs perms (standard HFS only) */ + u_long hfs_encoding; /* encoding for this volume (standard HFS only) */ + struct timezone hfs_timezone; /* user time zone info (standard HFS only) */ + int flags; /* mounting flags, see below */ +}; + +#define HFSFSMNT_NOXONFILES 0x1 /* disable execute permissions for files */ + +#endif /* ! _HFS_MOUNT_H_ */ diff --git a/bsd/hfs/hfs_readwrite.c b/bsd/hfs/hfs_readwrite.c new file mode 100644 index 000000000..dcb0fe1e5 --- /dev/null +++ b/bsd/hfs/hfs_readwrite.c @@ -0,0 +1,1831 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)hfs_readwrite.c 1.0 + * + * (c) 1990, 1992 NeXT Computer, Inc. All Rights Reserved + * (c) 1998 Apple Computer, Inc. All Rights Reserved + * + * + * hfs_readwrite.c -- vnode operations to deal with reading and writing files. + * + * MODIFICATION HISTORY: + * 9-Nov-1999 Scott Roberts hfs_allocate now returns sizes based on allocation block boundaries (#2398794) + * 3-Feb-1999 Pat Dirks Merged in Joe's change to hfs_truncate to skip vinvalbuf if LEOF isn't changing (#2302796) + * Removed superfluous (and potentially dangerous) second call to vinvalbuf() in hfs_truncate. + * 2-Dec-1998 Pat Dirks Added support for read/write bootstrap ioctls. + * 10-Nov-1998 Pat Dirks Changed read/write/truncate logic to optimize block sizes for first extents of a file. + * Changed hfs_strategy to correct I/O sizes from cluser code I/O requests in light of + * different block sizing. Changed bexpand to handle RELEASE_BUFFER flag. + * 22-Sep-1998 Don Brady Changed truncate zero-fill to use bwrite after several bawrites have been queued. + * 11-Sep-1998 Pat Dirks Fixed buffering logic to not rely on B_CACHE, which is set for empty buffers that + * have been pre-read by cluster_read (use b_validend > 0 instead). + * 27-Aug-1998 Pat Dirks Changed hfs_truncate to use cluster_write in place of bawrite where possible. + * 25-Aug-1998 Pat Dirks Changed hfs_write to do small device-block aligned writes into buffers without doing + * read-ahead of the buffer. Added bexpand to deal with incomplete [dirty] buffers. + * Fixed can_cluster macro to use MAXPHYSIO instead of MAXBSIZE. + * 19-Aug-1998 Don Brady Remove optimization in hfs_truncate that prevented extra physical blocks from + * being truncated (radar #2265750). Also set fcb->fcbEOF before calling vinvalbuf. + * 7-Jul-1998 Pat Dirks Added code to honor IO_NOZEROFILL in hfs_truncate. + * 16-Jul-1998 Don Brady In hfs_bmap use MAXPHYSIO instead of MAXBSIZE when calling MapFileBlockC (radar #2263753). + * 16-Jul-1998 Don Brady Fix error handling in hfs_allocate (radar #2252265). + * 04-Jul-1998 chw Synchronized options in hfs_allocate with flags in call to ExtendFileC + * 25-Jun-1998 Don Brady Add missing blockNo incrementing to zero fill loop in hfs_truncate. + * 22-Jun-1998 Don Brady Add bp = NULL assignment after brelse in hfs_read. + * 4-Jun-1998 Pat Dirks Split off from hfs_vnodeops.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include + +#include + + +#include +#include + + +#include + +#include "hfs.h" +#include "hfs_dbg.h" +#include "hfs_endian.h" +#include "hfscommon/headers/FileMgrInternal.h" +#include "hfscommon/headers/BTreesInternal.h" + + +#define can_cluster(size) ((((size & (4096-1))) == 0) && (size <= (MAXPHYSIO/2))) + +enum { + MAXHFSFILESIZE = 0x7FFFFFFF /* this needs to go in the mount structure */ +}; + +extern u_int32_t GetLogicalBlockSize(struct vnode *vp); + +#if DBG_VOP_TEST_LOCKS +extern void DbgVopTest(int maxSlots, int retval, VopDbgStoreRec *VopDbgStore, char *funcname); +#endif + +#if HFS_DIAGNOSTIC +void debug_check_blocksizes(struct vnode *vp); +#endif + +/***************************************************************************** +* +* Operations on vnodes +* +*****************************************************************************/ + +/* +#% read vp L L L +# + vop_read { + IN struct vnode *vp; + INOUT struct uio *uio; + IN int ioflag; + IN struct ucred *cred; + + */ + +int +hfs_read(ap) +struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; +} */ *ap; +{ + register struct vnode *vp; + struct hfsnode *hp; + register struct uio *uio; + struct buf *bp; + daddr_t logBlockNo; + u_long fragSize, moveSize, startOffset, ioxfersize; + long devBlockSize = 0; + off_t bytesRemaining; + int retval; + u_short mode; + FCB *fcb; + + DBG_FUNC_NAME("hfs_read"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + vp = ap->a_vp; + hp = VTOH(vp); + fcb = HTOFCB(hp); + mode = hp->h_meta->h_mode; + uio = ap->a_uio; + +#if HFS_DIAGNOSTIC + if (uio->uio_rw != UIO_READ) + panic("%s: mode", funcname); +#endif + + /* Can only read files */ + if (ap->a_vp->v_type != VREG && ap->a_vp->v_type != VLNK) { + DBG_VOP_LOCKS_TEST(EISDIR); + return (EISDIR); + } + DBG_RW(("\tfile size Ox%X\n", (u_int)fcb->fcbEOF)); + DBG_RW(("\tstarting at offset Ox%X of file, length Ox%X\n", (u_int)uio->uio_offset, (u_int)uio->uio_resid)); + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); +#endif + + /* + * If they didn't ask for any data, then we are done. + */ + if (uio->uio_resid == 0) { + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); + } + + /* cant read from a negative offset */ + if (uio->uio_offset < 0) { + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } + + if (uio->uio_offset > fcb->fcbEOF) { + if ( (!ISHFSPLUS(VTOVCB(vp))) && (uio->uio_offset > (off_t)MAXHFSFILESIZE)) + retval = EFBIG; + else + retval = E_NONE; + + DBG_VOP_LOCKS_TEST(retval); + return (retval); + } + + VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); + + if (UBCISVALID(vp)) + retval = cluster_read(vp, uio, (off_t)fcb->fcbEOF, devBlockSize, 0); + else { + + for (retval = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { + + if ((bytesRemaining = (fcb->fcbEOF - uio->uio_offset)) <= 0) + break; + + logBlockNo = (daddr_t)(uio->uio_offset / PAGE_SIZE_64); + startOffset = (u_long) (uio->uio_offset & PAGE_MASK_64); + fragSize = PAGE_SIZE; + + if (((logBlockNo * PAGE_SIZE) + fragSize) < fcb->fcbEOF) + ioxfersize = fragSize; + else { + ioxfersize = fcb->fcbEOF - (logBlockNo * PAGE_SIZE); + ioxfersize = (ioxfersize + (devBlockSize - 1)) & ~(devBlockSize - 1); + } + DBG_RW(("\tat logBlockNo Ox%X, with Ox%lX left to read\n", logBlockNo, (UInt32)uio->uio_resid)); + moveSize = ioxfersize; + DBG_RW(("\tmoveSize = Ox%lX; ioxfersize = Ox%lX; startOffset = Ox%lX.\n", + moveSize, ioxfersize, startOffset)); + DBG_ASSERT(moveSize >= startOffset); + moveSize -= startOffset; + + if (bytesRemaining < moveSize) + moveSize = bytesRemaining; + + if (uio->uio_resid < moveSize) { + moveSize = uio->uio_resid; + DBG_RW(("\treducing moveSize to Ox%lX (uio->uio_resid).\n", moveSize)); + }; + if (moveSize == 0) { + break; + }; + + DBG_RW(("\tat logBlockNo Ox%X, extent of Ox%lX, xfer of Ox%lX; moveSize = Ox%lX\n", logBlockNo, fragSize, ioxfersize, moveSize)); + + if (( uio->uio_offset + fragSize) >= fcb->fcbEOF) { + retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp); + + } else if (logBlockNo - 1 == vp->v_lastr && !(vp->v_flag & VRAOFF)) { + daddr_t nextLogBlockNo = logBlockNo + 1; + int nextsize; + + if (((nextLogBlockNo * PAGE_SIZE) + + (daddr_t)fragSize) < fcb->fcbEOF) + nextsize = fragSize; + else { + nextsize = fcb->fcbEOF - (nextLogBlockNo * PAGE_SIZE); + nextsize = (nextsize + (devBlockSize - 1)) & ~(devBlockSize - 1); + } + retval = breadn(vp, logBlockNo, ioxfersize, &nextLogBlockNo, &nextsize, 1, NOCRED, &bp); + } else { + retval = bread(vp, logBlockNo, ioxfersize, NOCRED, &bp); + }; + + if (retval != E_NONE) { + if (bp) { + brelse(bp); + bp = NULL; + } + break; + }; + vp->v_lastr = logBlockNo; + + /* + * We should only get non-zero b_resid when an I/O retval + * has occurred, which should cause us to break above. + * However, if the short read did not cause an retval, + * then we want to ensure that we do not uiomove bad + * or uninitialized data. + */ + ioxfersize -= bp->b_resid; + + if (ioxfersize < moveSize) { /* XXX PPD This should take the offset into account, too! */ + if (ioxfersize == 0) + break; + moveSize = ioxfersize; + } + if ((startOffset + moveSize) > bp->b_bcount) + panic("hfs_read: bad startOffset or moveSize\n"); + + DBG_RW(("\tcopying Ox%lX bytes from %lX; resid = Ox%lX...\n", moveSize, (char *)bp->b_data + startOffset, bp->b_resid)); + + if ((retval = uiomove((caddr_t)bp->b_data + startOffset, (int)moveSize, uio))) + break; + + if (S_ISREG(mode) && + (((startOffset + moveSize) == fragSize) || (uio->uio_offset == fcb->fcbEOF))) { + bp->b_flags |= B_AGE; + }; + + DBG_ASSERT(bp->b_bcount == bp->b_validend); + + brelse(bp); + /* Start of loop resets bp to NULL before reaching outside this block... */ + } + + if (bp != NULL) { + DBG_ASSERT(bp->b_bcount == bp->b_validend); + brelse(bp); + }; + } + + if (HTOVCB(hp)->vcbSigWord == kHFSPlusSigWord) + hp->h_nodeflags |= IN_ACCESS; + + DBG_VOP_LOCKS_TEST(retval); + + #if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); + #endif + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 12)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); + + return (retval); +} + +/* + * Write data to a file or directory. +#% write vp L L L +# + vop_write { + IN struct vnode *vp; + INOUT struct uio *uio; + IN int ioflag; + IN struct ucred *cred; + + */ +int +hfs_write(ap) +struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; +} */ *ap; +{ + struct hfsnode *hp = VTOH(ap->a_vp); + struct uio *uio = ap->a_uio; + struct vnode *vp = ap->a_vp ; + struct vnode *dev; + struct buf *bp; + struct proc *p, *cp; + struct timeval tv; + FCB *fcb = HTOFCB(hp); + ExtendedVCB *vcb = HTOVCB(hp); + long devBlockSize = 0; + daddr_t logBlockNo; + long fragSize; + off_t origFileSize, currOffset, writelimit, bytesToAdd; + off_t actualBytesAdded; + u_long blkoffset, resid, xfersize, clearSize; + int flags, ioflag; + int retval; + DBG_FUNC_NAME("hfs_write"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_RW(("\thfsnode 0x%x (%s)\n", (u_int)hp, H_NAME(hp))); + DBG_RW(("\tstarting at offset Ox%lX of file, length Ox%lX\n", (UInt32)uio->uio_offset, (UInt32)uio->uio_resid)); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + dev = hp->h_meta->h_devvp; + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); +#endif + + if (uio->uio_offset < 0) { + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } + + if (uio->uio_resid == 0) { + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); + } + + if (ap->a_vp->v_type != VREG && ap->a_vp->v_type != VLNK) { /* Can only write files */ + DBG_VOP_LOCKS_TEST(EISDIR); + return (EISDIR); + }; + +#if HFS_DIAGNOSTIC + if (uio->uio_rw != UIO_WRITE) + panic("%s: mode", funcname); +#endif + + ioflag = ap->a_ioflag; + uio = ap->a_uio; + vp = ap->a_vp; + + if (ioflag & IO_APPEND) + uio->uio_offset = fcb->fcbEOF; + if ((hp->h_meta->h_pflags & APPEND) && uio->uio_offset != fcb->fcbEOF) + return (EPERM); + + writelimit = uio->uio_offset + uio->uio_resid; + + /* + * Maybe this should be above the vnode op call, but so long as + * file servers have no limits, I don't think it matters. + */ + p = uio->uio_procp; + if (vp->v_type == VREG && p && + writelimit > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { + psignal(p, SIGXFSZ); + return (EFBIG); + }; + VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); + + resid = uio->uio_resid; + origFileSize = fcb->fcbPLen; + flags = ioflag & IO_SYNC ? B_SYNC : 0; + + DBG_RW(("\tLEOF is 0x%lX, PEOF is 0x%lX.\n", fcb->fcbEOF, fcb->fcbPLen)); + + /* + NOTE: In the following loop there are two positions tracked: + currOffset is the current I/O starting offset. currOffset is never >LEOF; the + LEOF is nudged along with currOffset as data is zeroed or written. + uio->uio_offset is the start of the current I/O operation. It may be arbitrarily + beyond currOffset. + + The following is true at all times: + + currOffset <= LEOF <= uio->uio_offset <= writelimit + */ + currOffset = MIN(uio->uio_offset, fcb->fcbEOF); + + DBG_RW(("\tstarting I/O loop at 0x%lX.\n", (u_long)currOffset)); + + cp = current_proc(); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); + retval = 0; + + if (fcb->fcbEOF > fcb->fcbMaxEOF) + fcb->fcbMaxEOF = fcb->fcbEOF; + + /* Now test if we need to extend the file */ + /* Doing so will adjust the fcbPLen for us */ + + while (writelimit > (off_t)fcb->fcbPLen) { + + bytesToAdd = writelimit - fcb->fcbPLen; + DBG_RW(("\textending file by 0x%lX bytes; 0x%lX blocks free", + (unsigned long)bytesToAdd, (unsigned long)vcb->freeBlocks)); + + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, cp); + if (retval != E_NONE) + break; + + retval = MacToVFSError( + ExtendFileC (vcb, + fcb, + bytesToAdd, + kEFContigBit, + &actualBytesAdded)); + + (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, cp); + DBG_VOP_CONT(("\tactual bytes added = 0x%lX bytes, retval = %d...\n", actualBytesAdded, retval)); + if ((actualBytesAdded == 0) && (retval == E_NONE)) + retval = ENOSPC; + if (retval != E_NONE) + break; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_NONE, + (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); + }; + + if (UBCISVALID(vp) && retval == E_NONE) { + off_t filesize; + off_t zero_off; + int lflag; + + if (writelimit > fcb->fcbEOF) + filesize = writelimit; + else + filesize = fcb->fcbEOF; + + lflag = (ioflag & IO_SYNC); + + if (uio->uio_offset > fcb->fcbMaxEOF) { + zero_off = fcb->fcbMaxEOF; + lflag |= IO_HEADZEROFILL; + } else + zero_off = 0; + + /* + * if the write starts beyond the current EOF then + * we we'll zero fill from the current EOF to where the write begins + */ + retval = cluster_write(vp, uio, fcb->fcbEOF, filesize, zero_off, + (off_t)0, devBlockSize, lflag); + + if (uio->uio_offset > fcb->fcbEOF) { + fcb->fcbEOF = uio->uio_offset; + + if (fcb->fcbEOF > fcb->fcbMaxEOF) + fcb->fcbMaxEOF = fcb->fcbEOF; + + ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ + } + if (resid > uio->uio_resid) + hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; + + } else { + + while (retval == E_NONE && uio->uio_resid > 0) { + + logBlockNo = currOffset / PAGE_SIZE; + blkoffset = currOffset & PAGE_MASK; + + if (((off_t)(fcb->fcbPLen) - currOffset) < PAGE_SIZE_64) + fragSize = (off_t)(fcb->fcbPLen) - ((off_t)logBlockNo * PAGE_SIZE_64); + else + fragSize = PAGE_SIZE; + xfersize = fragSize - blkoffset; + + DBG_RW(("\tcurrOffset = Ox%lX, logBlockNo = Ox%X, blkoffset = Ox%lX, xfersize = Ox%lX, fragSize = Ox%lX.\n", + (unsigned long)currOffset, logBlockNo, blkoffset, xfersize, fragSize)); + + /* Make any adjustments for boundary conditions */ + if (currOffset + (off_t)xfersize > writelimit) { + xfersize = writelimit - currOffset; + DBG_RW(("\ttrimming xfersize to 0x%lX to match writelimit (uio_resid)...\n", xfersize)); + }; + + /* + * There is no need to read into bp if: + * We start on a block boundary and will overwrite the whole block + * + * OR + */ + if ((blkoffset == 0) && (xfersize >= fragSize)) { + DBG_RW(("\tRequesting %ld-byte block Ox%lX w/o read...\n", fragSize, (long)logBlockNo)); + + bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ); + retval = 0; + + if (bp->b_blkno == -1) { + brelse(bp); + retval = EIO; /* XXX */ + break; + } + } else { + + if (currOffset == fcb->fcbEOF && blkoffset == 0) { + bp = getblk(vp, logBlockNo, fragSize, 0, 0, BLK_READ); + retval = 0; + + if (bp->b_blkno == -1) { + brelse(bp); + retval = EIO; /* XXX */ + break; + } + + } else { + /* + * This I/O transfer is not sufficiently aligned, so read the affected block into a buffer: + */ + DBG_VOP(("\tRequesting block Ox%X, size = 0x%08lX...\n", logBlockNo, fragSize)); + retval = bread(vp, logBlockNo, fragSize, ap->a_cred, &bp); + + if (retval != E_NONE) { + if (bp) + brelse(bp); + break; + } + } + } + + /* See if we are starting to write within file boundaries: + If not, then we need to present a "hole" for the area between + the current EOF and the start of the current I/O operation: + + Note that currOffset is only less than uio_offset if uio_offset > LEOF... + */ + if (uio->uio_offset > currOffset) { + clearSize = MIN(uio->uio_offset - currOffset, xfersize); + DBG_RW(("\tzeroing Ox%lX bytes Ox%lX bytes into block Ox%X...\n", clearSize, blkoffset, logBlockNo)); + bzero(bp->b_data + blkoffset, clearSize); + currOffset += clearSize; + blkoffset += clearSize; + xfersize -= clearSize; + }; + + if (xfersize > 0) { + DBG_RW(("\tCopying Ox%lX bytes Ox%lX bytes into block Ox%X... ioflag == 0x%X\n", + xfersize, blkoffset, logBlockNo, ioflag)); + retval = uiomove((caddr_t)bp->b_data + blkoffset, (int)xfersize, uio); + currOffset += xfersize; + }; + DBG_ASSERT((bp->b_bcount % devBlockSize) == 0); + + if (ioflag & IO_SYNC) { + (void)VOP_BWRITE(bp); + //DBG_RW(("\tissuing bwrite\n")); + } else if ((xfersize + blkoffset) == fragSize) { + //DBG_RW(("\tissuing bawrite\n")); + bp->b_flags |= B_AGE; + bawrite(bp); + } else { + //DBG_RW(("\tissuing bdwrite\n")); + bdwrite(bp); + }; + + /* Update the EOF if we just extended the file + (the PEOF has already been moved out and the block mapping table has been updated): */ + if (currOffset > fcb->fcbEOF) { + DBG_VOP(("\textending EOF to 0x%lX...\n", (UInt32)fcb->fcbEOF)); + fcb->fcbEOF = currOffset; + + if (fcb->fcbEOF > fcb->fcbMaxEOF) + fcb->fcbMaxEOF = fcb->fcbEOF; + + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ + }; + + if (retval || (resid == 0)) + break; + hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; + }; + }; + /* + * If we successfully wrote any data, and we are not the superuser + * we clear the setuid and setgid bits as a precaution against + * tampering. + */ + if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) + hp->h_meta->h_mode &= ~(ISUID | ISGID); + + if (retval) { + if (ioflag & IO_UNIT) { + (void)VOP_TRUNCATE(vp, origFileSize, + ioflag & IO_SYNC, ap->a_cred, uio->uio_procp); + uio->uio_offset -= resid - uio->uio_resid; + uio->uio_resid = resid; + } + } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) { + tv = time; + retval = VOP_UPDATE(vp, &tv, &tv, 1); + } + + #if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); + #endif + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 0)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, (int)fcb->fcbEOF, (int)fcb->fcbPLen, 0); + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + +#% ioctl vp U U U +# + vop_ioctl { + IN struct vnode *vp; + IN u_long command; + IN caddr_t data; + IN int fflag; + IN struct ucred *cred; + IN struct proc *p; + + */ + + +/* ARGSUSED */ +int +hfs_ioctl(ap) +struct vop_ioctl_args /* { + struct vnode *a_vp; + int a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + DBG_FUNC_NAME("hfs_ioctl"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + + switch (ap->a_command) { + + case 1: + { register struct hfsnode *hp; + register struct vnode *vp; + register struct radvisory *ra; + FCB *fcb; + int devBlockSize = 0; + int error; + + vp = ap->a_vp; + + VOP_LEASE(vp, ap->a_p, ap->a_cred, LEASE_READ); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); + + ra = (struct radvisory *)(ap->a_data); + hp = VTOH(vp); + + fcb = HTOFCB(hp); + + if (ra->ra_offset >= fcb->fcbEOF) { + VOP_UNLOCK(vp, 0, ap->a_p); + DBG_VOP_LOCKS_TEST(EFBIG); + return (EFBIG); + } + VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); + + error = advisory_read(vp, fcb->fcbEOF, ra->ra_offset, ra->ra_count, devBlockSize); + VOP_UNLOCK(vp, 0, ap->a_p); + + DBG_VOP_LOCKS_TEST(error); + return (error); + } + + case 2: /* F_READBOOTBLOCKS */ + case 3: /* F_WRITEBOOTBLOCKS */ + { + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + struct fbootstraptransfer *btd = (struct fbootstraptransfer *)ap->a_data; + u_long devBlockSize; + int error; + struct iovec aiov; + struct uio auio; + u_long blockNumber; + u_long blockOffset; + u_long xfersize; + struct buf *bp; + + if ((vp->v_flag & VROOT) == 0) return EINVAL; + if (btd->fbt_offset + btd->fbt_length > 1024) return EINVAL; + + aiov.iov_base = btd->fbt_buffer; + aiov.iov_len = btd->fbt_length; + + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = btd->fbt_offset; + auio.uio_resid = btd->fbt_length; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_rw = (ap->a_command == 3) ? UIO_WRITE : UIO_READ; /* F_WRITEBOOTSTRAP / F_READBOOTSTRAP */ + auio.uio_procp = ap->a_p; + + VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); + + while (auio.uio_resid > 0) { + blockNumber = auio.uio_offset / devBlockSize; + error = bread(hp->h_meta->h_devvp, blockNumber, devBlockSize, ap->a_cred, &bp); + if (error) { + if (bp) brelse(bp); + return error; + }; + + blockOffset = auio.uio_offset % devBlockSize; + xfersize = devBlockSize - blockOffset; + error = uiomove((caddr_t)bp->b_data + blockOffset, (int)xfersize, &auio); + if (error) { + brelse(bp); + return error; + }; + if (auio.uio_rw == UIO_WRITE) { + error = VOP_BWRITE(bp); + if (error) return error; + } else { + brelse(bp); + }; + }; + }; + return 0; + + default: + DBG_VOP_LOCKS_TEST(ENOTTY); + return (ENOTTY); + } + + return 0; +} + +/* ARGSUSED */ +int +hfs_select(ap) +struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + DBG_FUNC_NAME("hfs_select"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + + /* + * We should really check to see if I/O is possible. + */ + DBG_VOP_LOCKS_TEST(1); + return (1); +} + + + +/* + * Mmap a file + * + * NB Currently unsupported. +# XXX - not used +# + vop_mmap { + IN struct vnode *vp; + IN int fflags; + IN struct ucred *cred; + IN struct proc *p; + + */ + +/* ARGSUSED */ + +int +hfs_mmap(ap) +struct vop_mmap_args /* { + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + DBG_FUNC_NAME("hfs_mmap"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); +} + + + +/* + * Seek on a file + * + * Nothing to do, so just return. +# XXX - not used +# Needs work: Is newoff right? What's it mean? +# + vop_seek { + IN struct vnode *vp; + IN off_t oldoff; + IN off_t newoff; + IN struct ucred *cred; + */ +/* ARGSUSED */ +int +hfs_seek(ap) +struct vop_seek_args /* { + struct vnode *a_vp; + off_t a_oldoff; + off_t a_newoff; + struct ucred *a_cred; +} */ *ap; +{ + DBG_FUNC_NAME("hfs_seek"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); +} + + +/* + * Bmap converts a the logical block number of a file to its physical block + * number on the disk. + */ + +/* + * vp - address of vnode file the file + * bn - which logical block to convert to a physical block number. + * vpp - returns the vnode for the block special file holding the filesystem + * containing the file of interest + * bnp - address of where to return the filesystem physical block number +#% bmap vp L L L +#% bmap vpp - U - +# + vop_bmap { + IN struct vnode *vp; + IN daddr_t bn; + OUT struct vnode **vpp; + IN daddr_t *bnp; + OUT int *runp; + */ +/* + * Converts a logical block number to a physical block, and optionally returns + * the amount of remaining blocks in a run. The logical block is based on hfsNode.logBlockSize. + * The physical block number is based on the device block size, currently its 512. + * The block run is returned in logical blocks, and is the REMAINING amount of blocks + */ + +int +hfs_bmap(ap) +struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; +} */ *ap; +{ + struct hfsnode *hp = VTOH(ap->a_vp); + struct hfsmount *hfsmp = VTOHFS(ap->a_vp); + int retval = E_NONE; + daddr_t logBlockSize; + size_t bytesContAvail = 0; + struct proc *p = NULL; + int lockExtBtree; + +#define DEBUG_BMAP 0 +#if DEBUG_BMAP + DBG_FUNC_NAME("hfs_bmap"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + if (ap->a_vpp != NULL) { + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_UNLOCKED, VOPDBG_IGNORE, VOPDBG_POS); + } else { + DBG_VOP_LOCKS_INIT(1,NULL, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + }; +#endif + + DBG_IO(("\tMapped blk %d --> ", ap->a_bn)); + /* + * Check for underlying vnode requests and ensure that logical + * to physical mapping is requested. + */ + if (ap->a_vpp != NULL) + *ap->a_vpp = VTOH(ap->a_vp)->h_meta->h_devvp; + if (ap->a_bnp == NULL) + return (0); + + lockExtBtree = hasOverflowExtents(hp); + if (lockExtBtree) + { + p = current_proc(); + retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE | LK_CANRECURSE, p); + if (retval) + return (retval); + } + + logBlockSize = GetLogicalBlockSize(ap->a_vp); + + retval = MacToVFSError( + MapFileBlockC (HFSTOVCB(hfsmp), + HTOFCB(hp), + MAXPHYSIO, + (off_t)(ap->a_bn * logBlockSize), + ap->a_bnp, + &bytesContAvail)); + + if (lockExtBtree) (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); + + if (retval == E_NONE) { + /* Figure out how many read ahead blocks there are */ + if (ap->a_runp != NULL) { + if (can_cluster(logBlockSize)) { + /* Make sure this result never goes negative: */ + *ap->a_runp = (bytesContAvail < logBlockSize) ? 0 : (bytesContAvail / logBlockSize) - 1; + } else { + *ap->a_runp = 0; + }; + }; + }; + + DBG_IO(("%d:%d.\n", *ap->a_bnp, (bytesContAvail < logBlockSize) ? 0 : (bytesContAvail / logBlockSize) - 1)); + +#if DEBUG_BMAP + + DBG_VOP_LOCKS_TEST(retval); +#endif + + if (ap->a_runp) { + DBG_ASSERT((*ap->a_runp * logBlockSize) < bytesContAvail); /* At least *ap->a_runp blocks left and ... */ + if (can_cluster(logBlockSize)) { + DBG_ASSERT(bytesContAvail - (*ap->a_runp * logBlockSize) < (2*logBlockSize)); /* ... at most 1 logical block accounted for by current block */ + /* ... plus some sub-logical block sized piece */ + }; + }; + + return (retval); +} + +/* blktooff converts logical block number to file offset */ + +int +hfs_blktooff(ap) +struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; +} */ *ap; +{ + if (ap->a_vp == NULL) + return (EINVAL); + *ap->a_offset = (off_t)ap->a_lblkno * PAGE_SIZE_64; + + return(0); +} + +int +hfs_offtoblk(ap) +struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; +} */ *ap; +{ + long lbsize, boff; + + if (ap->a_vp == NULL) + return (EINVAL); + *ap->a_lblkno = ap->a_offset / PAGE_SIZE_64; + + return(0); +} + +int +hfs_cmap(ap) +struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_foffset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; +} */ *ap; +{ + struct hfsnode *hp = VTOH(ap->a_vp); + struct hfsmount *hfsmp = VTOHFS(ap->a_vp); + size_t bytesContAvail = 0; + int retval = E_NONE; + int lockExtBtree; + struct proc *p = NULL; + +#define DEBUG_CMAP 0 +#if DEBUG_CMAP + DBG_FUNC_NAME("hfs_cmap"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); + + DBG_VOP_LOCKS_INIT(0, ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); +#endif + + DBG_IO(("\tMapped offset %qx --> ", ap->a_foffset)); + /* + * Check for underlying vnode requests and ensure that logical + * to physical mapping is requested. + */ + if (ap->a_bpn == NULL) + return (0); + + if (lockExtBtree = hasOverflowExtents(hp)) + { + p = current_proc(); + if (retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE | LK_CANRECURSE, p)) + return (retval); + } + retval = MacToVFSError( + MapFileBlockC (HFSTOVCB(hfsmp), + HTOFCB(hp), + ap->a_size, + ap->a_foffset, + ap->a_bpn, + &bytesContAvail)); + + if (lockExtBtree) (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); + + if ((retval == E_NONE) && (ap->a_run)) + *ap->a_run = bytesContAvail; + + if (ap->a_poff) + *(int *)ap->a_poff = 0; + + DBG_IO(("%d:%d.\n", *ap->a_bpn, bytesContAvail)); + +#if DEBUG_BMAP + + DBG_VOP_LOCKS_TEST(retval); +#endif + + return (retval); + +} + +/* + * Calculate the logical to physical mapping if not done already, + * then call the device strategy routine. +# +#vop_strategy { +# IN struct buf *bp; + */ +int +hfs_strategy(ap) +struct vop_strategy_args /* { + struct buf *a_bp; +} */ *ap; +{ + register struct buf *bp = ap->a_bp; + register struct vnode *vp = bp->b_vp; + register struct hfsnode *hp; + int retval = 0; + + DBG_FUNC_NAME("hfs_strategy"); + +// DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\n")); + + hp = VTOH(vp); + + if ( !(bp->b_flags & B_VECTORLIST)) { + + if (vp->v_type == VBLK || vp->v_type == VCHR) + panic("hfs_strategy: device vnode passed!"); + + if (bp->b_flags & B_PAGELIST) { + /* + * if we have a page list associated with this bp, + * then go through cluste_bp since it knows how to + * deal with a page request that might span non-contiguous + * physical blocks on the disk... + */ + retval = cluster_bp(bp); + vp = hp->h_meta->h_devvp; + bp->b_dev = vp->v_rdev; + + return (retval); + } + /* + * If we don't already know the filesystem relative block number + * then get it using VOP_BMAP(). If VOP_BMAP() returns the block + * number as -1 then we've got a hole in the file. HFS filesystems + * don't allow files with holes, so we shouldn't ever see this. + */ + if (bp->b_blkno == bp->b_lblkno) { + if ((retval = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL))) { + bp->b_error = retval; + bp->b_flags |= B_ERROR; + biodone(bp); + return (retval); + } + if ((long)bp->b_blkno == -1) + clrbuf(bp); + } + if ((long)bp->b_blkno == -1) { + biodone(bp); + return (0); + } + if (bp->b_validend == 0) { + /* Record the exact size of the I/O transfer about to be made: */ + DBG_ASSERT(bp->b_validoff == 0); + bp->b_validend = bp->b_bcount; + DBG_ASSERT(bp->b_dirtyoff == 0); + }; + } + vp = hp->h_meta->h_devvp; + bp->b_dev = vp->v_rdev; + DBG_IO(("\t\t>>>%s: continuing w/ vp: 0x%x with logBlk Ox%X and phyBlk Ox%X\n", funcname, (u_int)vp, bp->b_lblkno, bp->b_blkno)); + + return VOCALL (vp->v_op, VOFFSET(vop_strategy), ap); +} + + +/* +#% reallocblks vp L L L +# + vop_reallocblks { + IN struct vnode *vp; + IN struct cluster_save *buflist; + + */ + +int +hfs_reallocblks(ap) +struct vop_reallocblks_args /* { + struct vnode *a_vp; + struct cluster_save *a_buflist; +} */ *ap; +{ + DBG_FUNC_NAME("hfs_reallocblks"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + /* Currently no support for clustering */ /* XXX */ + DBG_VOP_LOCKS_TEST(ENOSPC); + return (ENOSPC); +} + + + +/* +# +#% truncate vp L L L +# +vop_truncate { + IN struct vnode *vp; + IN off_t length; + IN int flags; (IO_SYNC) + IN struct ucred *cred; + IN struct proc *p; +}; + * Truncate the hfsnode hp to at most length size, freeing (or adding) the + * disk blocks. + */ +int hfs_truncate(ap) + struct vop_truncate_args /* { + struct vnode *a_vp; + off_t a_length; + int a_flags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct hfsnode *hp = VTOH(vp); + off_t length = ap->a_length; + long vflags; + struct timeval tv; + int retval; + FCB *fcb; + off_t bytesToAdd; + off_t actualBytesAdded; + DBG_FUNC_NAME("hfs_truncate"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(ap->a_vp); +#endif + + fcb = HTOFCB(hp); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_START, + (int)length, fcb->fcbEOF, fcb->fcbPLen, 0, 0); + + if (length < 0) { + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } + + if ((!ISHFSPLUS(VTOVCB(vp))) && (length > (off_t)MAXHFSFILESIZE)) { + DBG_VOP_LOCKS_TEST(EFBIG); + return (EFBIG); + } + + if (vp->v_type != VREG && vp->v_type != VLNK) { + DBG_VOP_LOCKS_TEST(EISDIR); + return (EISDIR); /* hfs doesn't support truncating of directories */ + } + + tv = time; + retval = E_NONE; + + DBG_RW(("%s: truncate from Ox%lX to Ox%X bytes\n", funcname, fcb->fcbPLen, length)); + + /* + * we cannot just check if fcb->fcbEOF == length (as an optimization) + * since there may be extra physical blocks that also need truncation + */ + + if (fcb->fcbEOF > fcb->fcbMaxEOF) + fcb->fcbMaxEOF = fcb->fcbEOF; + + /* + * Lengthen the size of the file. We must ensure that the + * last byte of the file is allocated. Since the smallest + * value of fcbEOF is 0, length will be at least 1. + */ + if (length > fcb->fcbEOF) { + off_t filePosition; + daddr_t logBlockNo; + long logBlockSize; + long blkOffset; + off_t bytestoclear; + int blockZeroCount; + struct buf *bp=NULL; + + /* + * If we don't have enough physical space then + * we need to extend the physical size. + */ + if (length > fcb->fcbPLen) { + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); + if (retval) + goto Err_Exit; + + while ((length > fcb->fcbPLen) && (retval == E_NONE)) { + bytesToAdd = length - fcb->fcbPLen; + retval = MacToVFSError( + ExtendFileC (HTOVCB(hp), + fcb, + bytesToAdd, + kEFAllMask, /* allocate all requested bytes or none */ + &actualBytesAdded)); + + if (actualBytesAdded == 0 && retval == E_NONE) { + if (length > fcb->fcbPLen) + length = fcb->fcbPLen; + break; + } + } + (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); + if (retval) + goto Err_Exit; + + DBG_ASSERT(length <= fcb->fcbPLen); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE, + (int)length, fcb->fcbEOF, fcb->fcbPLen, 0, 0); + } + + if (! (ap->a_flags & IO_NOZEROFILL)) { + + if (UBCISVALID(vp) && retval == E_NONE) { + u_long devBlockSize; + + if (length > fcb->fcbMaxEOF) { + + VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); + + retval = cluster_write(vp, (struct uio *) 0, fcb->fcbEOF, length, fcb->fcbMaxEOF, + (off_t)0, devBlockSize, ((ap->a_flags & IO_SYNC) | IO_HEADZEROFILL)); + + if (retval) + goto Err_Exit; + } + } else { + + /* + * zero out any new logical space... + */ + bytestoclear = length - fcb->fcbEOF; + filePosition = fcb->fcbEOF; + + while (bytestoclear > 0) { + logBlockNo = (daddr_t)(filePosition / PAGE_SIZE_64); + blkOffset = (long)(filePosition & PAGE_MASK_64); + + if (((off_t)(fcb->fcbPLen) - ((off_t)logBlockNo * (off_t)PAGE_SIZE)) < PAGE_SIZE_64) + logBlockSize = (off_t)(fcb->fcbPLen) - ((off_t)logBlockNo * PAGE_SIZE_64); + else + logBlockSize = PAGE_SIZE; + + if (logBlockSize < blkOffset) + panic("hfs_truncate: bad logBlockSize computed\n"); + + blockZeroCount = MIN(bytestoclear, logBlockSize - blkOffset); + + if (blkOffset == 0 && ((bytestoclear >= logBlockSize) || filePosition >= fcb->fcbEOF)) { + bp = getblk(vp, logBlockNo, logBlockSize, 0, 0, BLK_WRITE); + retval = 0; + + } else { + retval = bread(vp, logBlockNo, logBlockSize, ap->a_cred, &bp); + if (retval) { + brelse(bp); + goto Err_Exit; + } + } + bzero((char *)bp->b_data + blkOffset, blockZeroCount); + + bp->b_flags |= B_DIRTY | B_AGE; + + if (ap->a_flags & IO_SYNC) + VOP_BWRITE(bp); + else if (logBlockNo % 32) + bawrite(bp); + else + VOP_BWRITE(bp); /* wait after we issue 32 requests */ + + bytestoclear -= blockZeroCount; + filePosition += blockZeroCount; + } + }; + } + fcb->fcbEOF = length; + + if (fcb->fcbEOF > fcb->fcbMaxEOF) + fcb->fcbMaxEOF = fcb->fcbEOF; + + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ + + } else { /* Shorten the size of the file */ + + if (fcb->fcbEOF > length) { + /* + * Any buffers that are past the truncation point need to be + * invalidated (to maintain buffer cache consistency). For + * simplicity, we invalidate all the buffers by calling vinvalbuf. + */ + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)length); /* XXX check errors */ + + vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; + retval = vinvalbuf(vp, vflags, ap->a_cred, ap->a_p, 0, 0); + } + + /* + * For a TBE process the deallocation of the file blocks is + * delayed until the file is closed. And hfs_close calls + * truncate with the IO_NDELAY flag set. So when IO_NDELAY + * isn't set, we make sure this isn't a TBE process. + */ + if ((ap->a_flags & IO_NDELAY) || (!ISSET(ap->a_p->p_flag, P_TBE))) { + + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); + if (retval) + goto Err_Exit; + retval = MacToVFSError( + TruncateFileC( + HTOVCB(hp), + fcb, + length, + false)); + (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); + if (retval) + goto Err_Exit; + + fcb->fcbMaxEOF = length; + } + fcb->fcbEOF = length; + + if (fcb->fcbFlags & fcbModifiedMask) + hp->h_nodeflags |= IN_MODIFIED; + } + hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; + retval = VOP_UPDATE(vp, &tv, &tv, MNT_WAIT); + if (retval) { + DBG_ERR(("Could not update truncate")); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_NONE, + -1, -1, -1, retval, 0); + } +Err_Exit:; + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(ap->a_vp); +#endif + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 7)) | DBG_FUNC_END, + (int)length, fcb->fcbEOF, fcb->fcbPLen, retval, 0); + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + + +/* +# +#% allocate vp L L L +# +vop_allocate { + IN struct vnode *vp; + IN off_t length; + IN int flags; + IN struct ucred *cred; + IN struct proc *p; +}; + * allocate the hfsnode hp to at most length size + */ +int hfs_allocate(ap) + struct vop_allocate_args /* { + struct vnode *a_vp; + off_t a_length; + u_int32_t a_flags; + off_t *a_bytesallocated; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct hfsnode *hp = VTOH(vp); + off_t length = ap->a_length; + off_t startingPEOF; + off_t moreBytesRequested; + off_t actualBytesAdded; + long vflags; + struct timeval tv; + int retval, retval2; + FCB *fcb; + UInt32 extendFlags =0; /* For call to ExtendFileC */ + DBG_FUNC_NAME("hfs_allocate"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + /* Set the number of bytes allocated to 0 so that the caller will know that we + did nothing. ExtendFileC will fill this in for us if we actually allocate space */ + + *(ap->a_bytesallocated) = 0; + + /* Now for some error checking */ + + if (length < (off_t)0) { + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } + + if (vp->v_type != VREG && vp->v_type != VLNK) { + DBG_VOP_LOCKS_TEST(EISDIR); + return (EISDIR); /* hfs doesn't support truncating of directories */ + } + + /* Fill in the flags word for the call to Extend the file */ + + if (ap->a_flags & ALLOCATECONTIG) { + extendFlags |= kEFContigMask; + } + + if (ap->a_flags & ALLOCATEALL) { + extendFlags |= kEFAllMask; + } + + fcb = HTOFCB(hp); + tv = time; + retval = E_NONE; + startingPEOF = fcb->fcbPLen; + + if (ap->a_flags & ALLOCATEFROMPEOF) { + length += fcb->fcbPLen; + } + + DBG_RW(("%s: allocate from Ox%lX to Ox%X bytes\n", funcname, fcb->fcbPLen, (u_int)length)); + + /* If no changes are necesary, then we're done */ + if (fcb->fcbPLen == length) + goto Std_Exit; + + /* + * Lengthen the size of the file. We must ensure that the + * last byte of the file is allocated. Since the smallest + * value of fcbPLen is 0, length will be at least 1. + */ + if (length > fcb->fcbPLen) { + moreBytesRequested = length - fcb->fcbPLen; + + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); + if (retval) goto Err_Exit; + + retval = MacToVFSError( + ExtendFileC(HTOVCB(hp), + fcb, + moreBytesRequested, + extendFlags, + &actualBytesAdded)); + + *(ap->a_bytesallocated) = actualBytesAdded; + + (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); + + DBG_ASSERT(length <= fcb->fcbPLen); + + /* + * if we get an error and no changes were made then exit + * otherwise we must do the VOP_UPDATE to reflect the changes + */ + if (retval && (startingPEOF == fcb->fcbPLen)) goto Err_Exit; + + /* + * Adjust actualBytesAdded to be allocation block aligned, not + * clump size aligned. + * NOTE: So what we are reporting does not affect reality + * until the file is closed, when we truncate the file to allocation + * block size. + */ + + if ((actualBytesAdded != 0) && (moreBytesRequested < actualBytesAdded)) { + u_long blks, blocksize; + + blocksize = VTOVCB(vp)->blockSize; + blks = moreBytesRequested / blocksize; + if ((blks * blocksize) != moreBytesRequested) + blks++; + + *(ap->a_bytesallocated) = blks * blocksize; + } + + } else { /* Shorten the size of the file */ + + if (fcb->fcbEOF > length) { + /* + * Any buffers that are past the truncation point need to be + * invalidated (to maintain buffer cache consistency). For + * simplicity, we invalidate all the buffers by calling vinvalbuf. + */ + vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; + (void) vinvalbuf(vp, vflags, ap->a_cred, ap->a_p, 0, 0); + } + + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); + if (retval) goto Err_Exit; + + retval = MacToVFSError( + TruncateFileC( + HTOVCB(hp), + fcb, + length, + false)); + (void) hfs_metafilelocking(HTOHFS(hp), kHFSExtentsFileID, LK_RELEASE, ap->a_p); + + /* + * if we get an error and no changes were made then exit + * otherwise we must do the VOP_UPDATE to reflect the changes + */ + if (retval && (startingPEOF == fcb->fcbPLen)) goto Err_Exit; + if (fcb->fcbFlags & fcbModifiedMask) + hp->h_nodeflags |= IN_MODIFIED; + + DBG_ASSERT(length <= fcb->fcbPLen) // DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG DEBUG + + if (fcb->fcbEOF > fcb->fcbPLen) { + fcb->fcbEOF = fcb->fcbPLen; + fcb->fcbMaxEOF = fcb->fcbPLen; + + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)fcb->fcbEOF); /* XXX check errors */ + } + } + +Std_Exit: + hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; + retval2 = VOP_UPDATE(vp, &tv, &tv, MNT_WAIT); + + if (retval == 0) retval = retval2; + +Err_Exit: + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + + + +/* pagein for HFS filesystem, similar to hfs_read(), but without cluster_read() */ +int +hfs_pagein(ap) + struct vop_pagein_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + register struct vnode *vp; + struct hfsnode *hp; + FCB *fcb; + long devBlockSize = 0; + int retval; + + DBG_FUNC_NAME("hfs_pagein"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + vp = ap->a_vp; + hp = VTOH(vp); + fcb = HTOFCB(hp); + + if (vp->v_type != VREG && vp->v_type != VLNK) + panic("hfs_pagein: vp not UBC type\n"); + + DBG_VOP(("\tfile size Ox%X\n", (u_int)fcb->fcbEOF)); + DBG_VOP(("\tstarting at offset Ox%X of file, length Ox%X\n", (u_int)ap->a_f_offset, (u_int)ap->a_size)); + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); +#endif + + VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); + + retval = cluster_pagein(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, + ap->a_size, (off_t)fcb->fcbEOF, devBlockSize, + ap->a_flags); + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); +#endif + DBG_VOP_LOCKS_TEST(retval); + + return (retval); +} + +/* + * pageout for HFS filesystem. + */ +int +hfs_pageout(ap) + struct vop_pageout_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + FCB *fcb = HTOFCB(hp); + int retval; + long devBlockSize = 0; + + DBG_FUNC_NAME("hfs_pageout"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(vp);DBG_VOP_CONT(("\n")); + DBG_VOP(("\thfsnode 0x%x (%s)\n", (u_int)hp, H_NAME(hp))); + DBG_VOP(("\tstarting at offset Ox%lX of file, length Ox%lX\n", + (UInt32)ap->a_f_offset, (UInt32)ap->a_size)); + + DBG_VOP_LOCKS_INIT(0, vp, VOPDBG_LOCKED, + VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); +#endif + + if (UBCINVALID(vp)) + panic("hfs_pageout: Not a VREG: vp=%x", vp); + + VOP_DEVBLOCKSIZE(hp->h_meta->h_devvp, &devBlockSize); + + retval = cluster_pageout(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, ap->a_size, + (off_t)fcb->fcbEOF, devBlockSize, ap->a_flags); + /* + * If we successfully wrote any data, and we are not the superuser + * we clear the setuid and setgid bits as a precaution against + * tampering. + */ + if (retval == 0 && ap->a_cred && ap->a_cred->cr_uid != 0) + hp->h_meta->h_mode &= ~(ISUID | ISGID); + +#if HFS_DIAGNOSTIC + debug_check_blocksizes(vp); +#endif + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* + * Intercept B-Tree node writes to unswap them if necessary. +# +#vop_bwrite { +# IN struct buf *bp; + */ +int +hfs_bwrite(ap) +struct vop_bwrite_args /* { + struct buf *a_bp; +} */ *ap; +{ + register struct buf *bp = ap->a_bp; + register struct vnode *vp = bp->b_vp; + BlockDescriptor block; + int retval = 0; + + DBG_FUNC_NAME("hfs_bwrite"); + +#if BYTE_ORDER == LITTLE_ENDIAN + /* Trap B-Tree writes */ + if ((H_FILEID(VTOH(vp)) == kHFSExtentsFileID) || + (H_FILEID(VTOH(vp)) == kHFSCatalogFileID)) { + + /* Swap if the B-Tree node is in native byte order */ + if (((UInt16 *)((char *)bp->b_data + bp->b_bcount - 2))[0] == 0x000e) { + /* Prepare the block pointer */ + block.blockHeader = bp; + block.buffer = bp->b_data + IOBYTEOFFSETFORBLK(bp->b_blkno, VTOHFS(vp)->hfs_phys_block_size); + block.blockReadFromDisk = (bp->b_flags & B_CACHE) == 0; /* not found in cache ==> came from disk */ + block.blockSize = bp->b_bcount; + + /* Endian un-swap B-Tree node */ + SWAP_BT_NODE (&block, ISHFSPLUS (VTOVCB(vp)), H_FILEID(VTOH(vp)), 1); + } + + /* We don't check to make sure that it's 0x0e00 because it could be all zeros */ + } +#endif + + retval = vn_bwrite (ap); + + return (retval); +} diff --git a/bsd/hfs/hfs_search.c b/bsd/hfs/hfs_search.c new file mode 100644 index 000000000..e4adb4201 --- /dev/null +++ b/bsd/hfs/hfs_search.c @@ -0,0 +1,830 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)hfs_search.c + * + * (c) 1997-2000 Apple Computer, Inc. All Rights Reserved + * + * + * MODIFICATION HISTORY: + * 04-May-1999 Don Brady Split off from hfs_vnodeops.c. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hfs.h" +#include "hfs_dbg.h" +#include "hfscommon/headers/FileMgrInternal.h" +#include "hfscommon/headers/CatalogPrivate.h" +#include "hfscommon/headers/HFSUnicodeWrappers.h" + + +/* Private description used in hfs_search */ +/* + * ============ W A R N I N G ! ============ + * DO NOT INCREASE THE SIZE OF THIS STRUCT! + * It must match the size of the opaque + * searchstate struct (in sys/attr.h). + */ +struct SearchState { + long searchBits; + BTreeIterator btreeIterator; +}; +typedef struct SearchState SearchState; + + +static int UnpackSearchAttributeBlock(struct vnode *vp, struct attrlist *alist, searchinfospec_t *searchInfo, void *attributeBuffer); + +Boolean CheckCriteria(ExtendedVCB *vcb, const SearchState *searchState, + u_long searchBits, struct attrlist *attrList, + CatalogNodeData *cnp, CatalogKey *key, + searchinfospec_t *searchInfo1, searchinfospec_t *searchInfo2); + +static int CheckAccess(CatalogNodeData *cnp, CatalogKey *key, struct proc *p); + +static int InsertMatch(struct vnode *vp, struct uio *a_uio, CatalogNodeData *cnp, + CatalogKey *key, struct attrlist *returnAttrList, + void *attributesBuffer, void *variableBuffer, + u_long bufferSize, u_long * nummatches ); + +static Boolean CompareRange(u_long val, u_long low, u_long high); +static Boolean CompareWideRange(u_int64_t val, u_int64_t low, u_int64_t high); + +static Boolean CompareRange( u_long val, u_long low, u_long high ) +{ + return( (val >= low) && (val <= high) ); +} + +static Boolean CompareWideRange( u_int64_t val, u_int64_t low, u_int64_t high ) +{ + return( (val >= low) && (val <= high) ); +} +//#define CompareRange(val, low, high) ((val >= low) && (val <= high)) + + + +/************************************************************************/ +/* Entry for searchfs() */ +/************************************************************************/ + +#define errSearchBufferFull 101 /* Internal search errors */ +/* +# +#% searchfs vp L L L +# +vop_searchfs { + IN struct vnode *vp; + IN off_t length; + IN int flags; + IN struct ucred *cred; + IN struct proc *p; +}; +*/ + +int +hfs_search( ap ) +struct vop_searchfs_args *ap; /* + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + void *a_searchparams1; + void *a_searchparams2; + struct attrlist *a_searchattrs; + u_long a_maxmatches; + struct timeval *a_timelimit; + struct attrlist *a_returnattrs; + u_long *a_nummatches; + u_long a_scriptcode; + u_long a_options; + struct uio *a_uio; + struct searchstate *a_searchstate; +*/ +{ + CatalogNodeData cnode; + BTreeKey *key; + FSBufferDescriptor btRecord; + FCB* catalogFCB; + SearchState *searchState; + searchinfospec_t searchInfo1; + searchinfospec_t searchInfo2; + void *attributesBuffer; + void *variableBuffer; + short recordSize; + short operation; + u_long fixedBlockSize; + u_long eachReturnBufferSize; + struct proc *p = current_proc(); + u_long nodesToCheck = 30; /* After we search 30 nodes we must give up time */ + u_long lastNodeNum = 0XFFFFFFFF; + ExtendedVCB *vcb = VTOVCB(ap->a_vp); + int err = E_NONE; + int isHFSPlus; + + /* XXX Parameter check a_searchattrs? */ + + *(ap->a_nummatches) = 0; + + if ( ap->a_options & ~SRCHFS_VALIDOPTIONSMASK ) + return( EINVAL ); + + if (ap->a_uio->uio_resid <= 0) + return (EINVAL); + + isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord); + searchState = (SearchState *)ap->a_searchstate; + + /* + * Check if this is the first time we are being called. + * If it is, allocate SearchState and we'll move it to the users space on exit + */ + if ( ap->a_options & SRCHFS_START ) { + bzero( (caddr_t)searchState, sizeof(SearchState) ); + operation = kBTreeFirstRecord; + ap->a_options &= ~SRCHFS_START; + } else { + operation = kBTreeCurrentRecord; + } + + /* UnPack the search boundries, searchInfo1, searchInfo2 */ + err = UnpackSearchAttributeBlock( ap->a_vp, ap->a_searchattrs, &searchInfo1, ap->a_searchparams1 ); + if (err) return err; + err = UnpackSearchAttributeBlock( ap->a_vp, ap->a_searchattrs, &searchInfo2, ap->a_searchparams2 ); + if (err) return err; + + btRecord.itemCount = 1; + if (isHFSPlus) { + btRecord.itemSize = sizeof(cnode); + btRecord.bufferAddress = &cnode; + } else { + btRecord.itemSize = sizeof(HFSCatalogFile); + btRecord.bufferAddress = &cnode.cnd_extra; + } + catalogFCB = VTOFCB( vcb->catalogRefNum ); + key = (BTreeKey*) &(searchState->btreeIterator.key); + fixedBlockSize = sizeof(u_long) + AttributeBlockSize( ap->a_returnattrs ); /* u_long for length longword */ + eachReturnBufferSize = fixedBlockSize; + + if ( ap->a_returnattrs->commonattr & ATTR_CMN_NAME ) /* XXX should be more robust! */ + eachReturnBufferSize += kHFSPlusMaxFileNameBytes + 1; + + MALLOC( attributesBuffer, void *, eachReturnBufferSize, M_TEMP, M_WAITOK ); + variableBuffer = (void*)((char*) attributesBuffer + fixedBlockSize); + + /* Lock catalog b-tree */ + err = hfs_metafilelocking( VTOHFS(ap->a_vp), kHFSCatalogFileID, LK_SHARED, p ); + if ( err != E_NONE ) { + goto ExitThisRoutine; + }; + + /* + * Iterate over all the catalog btree records + */ + + err = BTIterateRecord( catalogFCB, operation, &(searchState->btreeIterator), &btRecord, &recordSize ); + + while( err == E_NONE ) { + if (!isHFSPlus) + CopyCatalogNodeData(vcb, (CatalogRecord*)&cnode.cnd_extra, &cnode); + + if ( CheckCriteria( vcb, searchState, ap->a_options, ap->a_searchattrs, &cnode, + (CatalogKey *)key, &searchInfo1, &searchInfo2 ) && + CheckAccess(&cnode, (CatalogKey *)key, ap->a_uio->uio_procp)) { + err = InsertMatch(ap->a_vp, ap->a_uio, &cnode, (CatalogKey *)key, + ap->a_returnattrs, attributesBuffer, variableBuffer, + eachReturnBufferSize, ap->a_nummatches); + if ( err != E_NONE ) + break; + } + + err = BTIterateRecord( catalogFCB, kBTreeNextRecord, &(searchState->btreeIterator), &btRecord, &recordSize ); + + if ( *(ap->a_nummatches) >= ap->a_maxmatches ) + break; + + if ( searchState->btreeIterator.hint.nodeNum != lastNodeNum ) { + lastNodeNum = searchState->btreeIterator.hint.nodeNum; + if ( --nodesToCheck == 0 ) + break; /* We must leave the kernel to give up time */ + } + } + + /* Unlock catalog b-tree */ + (void) hfs_metafilelocking( VTOHFS(ap->a_vp), kHFSCatalogFileID, LK_RELEASE, p ); + + + if ( err == E_NONE ) { + err = EAGAIN; /* signal to the user to call searchfs again */ + } else if ( err == errSearchBufferFull ) { + if ( *(ap->a_nummatches) > 0 ) + err = EAGAIN; + else + err = ENOBUFS; + } else if ( err == btNotFound ) { + err = E_NONE; /* the entire disk has been searched */ + } + +ExitThisRoutine: + FREE( attributesBuffer, M_TEMP ); + + return( err ); +} + + +static Boolean +CompareMasked(const UInt32 *thisValue, const UInt32 *compareData, + const UInt32 *compareMask, UInt32 count) +{ + Boolean matched; + UInt32 i; + + matched = true; /* Assume it will all match */ + + for (i=0; i= f_len) { + *tsp = f_len; + + if (FastRelString(tsp++, find) == 0) + return TRUE; + } + + return FALSE; +} + + +/* + * Check to see if caller has access rights to this item + */ +static int +CheckAccess(CatalogNodeData *cnp, CatalogKey *key, struct proc *p) +{ + return (1); +} + +Boolean +CheckCriteria( ExtendedVCB *vcb, const SearchState *searchState, u_long searchBits, + struct attrlist *attrList, CatalogNodeData *cnp, CatalogKey *key, + searchinfospec_t *searchInfo1, searchinfospec_t *searchInfo2 ) +{ + Boolean matched, atleastone; + Boolean isHFSPlus; + attrgroup_t searchAttributes; + + isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord); + + switch (cnp->cnd_type) { + case kCatalogFolderNode: + if ( (searchBits & SRCHFS_MATCHDIRS) == 0 ) { /* If we are NOT searching folders */ + matched = false; + goto TestDone; + } + break; + + case kCatalogFileNode: + if ( (searchBits & SRCHFS_MATCHFILES) == 0 ) { /* If we are NOT searching files */ + matched = false; + goto TestDone; + } + break; + + default: /* Never match a thread record or any other type. */ + return( false ); /* Not a file or folder record, so can't search it */ + } + + matched = true; /* Assume we got a match */ + atleastone = false; /* Dont insert unless we match at least one criteria */ + + /* First, attempt to match the name -- either partial or complete */ + if ( attrList->commonattr & ATTR_CMN_NAME ) { + if (isHFSPlus) { + /* Check for partial/full HFS Plus name match */ + + if ( searchBits & SRCHFS_MATCHPARTIALNAMES ) { + matched = ComparePartialUnicodeName(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length, + (UniChar*)searchInfo1->name, + searchInfo1->nameLength ); + } else /* full HFS Plus name match */ { + matched = (FastUnicodeCompare(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length, + (UniChar*)searchInfo1->name, + searchInfo1->nameLength ) == 0); + } + } else { + /* Check for partial/full HFS name match */ + + if ( searchBits & SRCHFS_MATCHPARTIALNAMES ) + matched = ComparePartialPascalName(key->hfs.nodeName, (u_char*)searchInfo1->name); + else /* full HFS name match */ + matched = (FastRelString(key->hfs.nodeName, (u_char*)searchInfo1->name) == 0); + } + + if ( matched == false || (searchBits & ~SRCHFS_MATCHPARTIALNAMES) == 0 ) + goto TestDone; /* no match, or nothing more to compare */ + + atleastone = true; + } + + /* Now that we have a record worth searching, see if it matches the search attributes */ + if (cnp->cnd_type == kCatalogFileNode) { + if ((attrList->dirattr & ~ATTR_FILE_VALIDMASK) != 0) { /* attr we do know about */ + matched = false; + goto TestDone; + } + else if ((attrList->dirattr & ATTR_FILE_VALIDMASK) != 0) { + searchAttributes = attrList->fileattr; + + /* File logical length (data fork) */ + if ( searchAttributes & ATTR_FILE_DATALENGTH ) { + matched = CompareWideRange( + cnp->cnd_datafork.logicalSize, + searchInfo1->f.dataLogicalLength, + searchInfo2->f.dataLogicalLength); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* File physical length (data fork) */ + if ( searchAttributes & ATTR_FILE_DATAALLOCSIZE ) { + matched = CompareWideRange( + cnp->cnd_datafork.totalBlocks * vcb->blockSize, + searchInfo1->f.dataPhysicalLength, + searchInfo2->f.dataPhysicalLength); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* File logical length (resource fork) */ + if ( searchAttributes & ATTR_FILE_RSRCLENGTH ) { + matched = CompareWideRange( + cnp->cnd_rsrcfork.logicalSize, + searchInfo1->f.resourceLogicalLength, + searchInfo2->f.resourceLogicalLength); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* File physical length (resource fork) */ + if ( searchAttributes & ATTR_FILE_RSRCALLOCSIZE ) { + matched = CompareWideRange( + cnp->cnd_rsrcfork.totalBlocks * vcb->blockSize, + searchInfo1->f.resourcePhysicalLength, + searchInfo2->f.resourcePhysicalLength); + if (matched == false) goto TestDone; + atleastone = true; + } + } + else { + atleastone = true; /* to match SRCHFS_MATCHDIRS */ + } + } + /* + * Check the directory attributes + */ + else if (cnp->cnd_type == kCatalogFolderNode) { + if ((attrList->dirattr & ~ATTR_DIR_VALIDMASK) != 0) { /* attr we do know about */ + matched = false; + goto TestDone; + } + else if ((attrList->dirattr & ATTR_DIR_VALIDMASK) != 0) { + searchAttributes = attrList->dirattr; + + /* Directory valence */ + if ( searchAttributes & ATTR_DIR_ENTRYCOUNT ) { + matched = CompareRange(cnp->cnd_valence, searchInfo1->d.numFiles, searchInfo2->d.numFiles ); + if (matched == false) goto TestDone; + atleastone = true; + } + } + else { + atleastone = true; /* to match SRCHFS_MATCHDIRS */ + } + } + + /* + * Check the common attributes + */ + searchAttributes = attrList->commonattr; + if ( (searchAttributes & ATTR_CMN_VALIDMASK) != 0 ) { + + /* node ID */ + if ( searchAttributes & ATTR_CMN_OBJID ) { + matched = CompareRange( cnp->cnd_nodeID, searchInfo1->nodeID, searchInfo2->nodeID ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* Parent ID */ + if ( searchAttributes & ATTR_CMN_PAROBJID ) { + HFSCatalogNodeID parentID; + + if (isHFSPlus) + parentID = key->hfsPlus.parentID; + else + parentID = key->hfs.parentID; + + matched = CompareRange( parentID, searchInfo1->parentDirID, searchInfo2->parentDirID ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* Finder Info & Extended Finder Info where extFinderInfo is last 32 bytes */ + if ( searchAttributes & ATTR_CMN_FNDRINFO ) { + UInt32 *thisValue; + thisValue = (UInt32 *) &cnp->cnd_finderInfo; + + /* + * Note: ioFlFndrInfo and ioDrUsrWds have the same offset in search info, so + * no need to test the object type here. + */ + matched = CompareMasked( thisValue, (UInt32 *) &searchInfo1->finderInfo, + (UInt32 *) &searchInfo2->finderInfo, 8 ); /* 8 * UInt32 */ + if (matched == false) goto TestDone; + atleastone = true; + } + + /* Create date */ + if ( searchAttributes & ATTR_CMN_CRTIME ) { + matched = CompareRange(to_bsd_time(cnp->cnd_createDate), + searchInfo1->creationDate.tv_sec, searchInfo2->creationDate.tv_sec ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* Mod date */ + if ( searchAttributes & ATTR_CMN_MODTIME ) { + matched = CompareRange(to_bsd_time(cnp->cnd_contentModDate), + searchInfo1->modificationDate.tv_sec, searchInfo2->modificationDate.tv_sec ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* Change Time */ + if ( searchAttributes & ATTR_CMN_CHGTIME ) { + matched = CompareRange(to_bsd_time(cnp->cnd_attributeModDate), + searchInfo1->changeDate.tv_sec, searchInfo2->changeDate.tv_sec ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* Backup date */ + if ( searchAttributes & ATTR_CMN_BKUPTIME ) { + matched = CompareRange(to_bsd_time(cnp->cnd_backupDate), + searchInfo1->lastBackupDate.tv_sec, searchInfo2->lastBackupDate.tv_sec ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* User ID */ + if ( searchAttributes & ATTR_CMN_OWNERID ) { + matched = CompareRange( cnp->cnd_ownerID, searchInfo1->uid, searchInfo2->uid ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* Group ID */ + if ( searchAttributes & ATTR_CMN_GRPID ) { + matched = CompareRange( cnp->cnd_groupID, searchInfo1->gid, searchInfo2->gid ); + if (matched == false) goto TestDone; + atleastone = true; + } + + /* mode */ + if ( searchAttributes & ATTR_CMN_ACCESSMASK ) { + matched = CompareRange( (u_long)cnp->cnd_mode, + (u_long)searchInfo1->mask, (u_long)searchInfo2->mask ); + if (matched == false) goto TestDone; + atleastone = true; + } + + } + + /* If we got here w/o matching any, then set to false */ + if (! atleastone) + matched = false; + +TestDone: + /* + * Finally, determine whether we need to negate the sense of the match + * (i.e. find all objects that DON'T match). + */ + if ( searchBits & SRCHFS_NEGATEPARAMS ) + matched = !matched; + + return( matched ); +} + + +/* + * Adds another record to the packed array for output + */ +static int +InsertMatch( struct vnode *root_vp, struct uio *a_uio, CatalogNodeData *cnp, + CatalogKey *key, struct attrlist *returnAttrList, void *attributesBuffer, + void *variableBuffer, u_long bufferSize, u_long * nummatches ) +{ + int err; + void *rovingAttributesBuffer; + void *rovingVariableBuffer; + struct hfsCatalogInfo catalogInfo; + u_long packedBufferSize; + ExtendedVCB *vcb = VTOVCB(root_vp); + Boolean isHFSPlus = vcb->vcbSigWord == kHFSPlusSigWord; + u_long privateDir = VTOHFS(root_vp)->hfs_private_metadata_dir; + + rovingAttributesBuffer = (char*)attributesBuffer + sizeof(u_long); /* Reserve space for length field */ + rovingVariableBuffer = variableBuffer; + + INIT_CATALOGDATA(&catalogInfo.nodeData, 0); + + /* The packing call below expects a struct hfsCatalogInfo */ + bcopy(cnp, &catalogInfo.nodeData, (cnp->cnd_type == kCatalogFileNode) ? + sizeof(HFSPlusCatalogFile) : sizeof(HFSPlusCatalogFolder)); + + catalogInfo.nodeData.cnm_parID = isHFSPlus ? key->hfsPlus.parentID : key->hfs.parentID; + + /* hide open files that have been deleted */ + if ((privateDir != 0) && (catalogInfo.nodeData.cnm_parID == privateDir)) + return (0); + + /* hide our private meta data directory */ + if ((privateDir != 0) && (catalogInfo.nodeData.cnd_nodeID == privateDir)) + return (0); + + if ( returnAttrList->commonattr & ATTR_CMN_NAME ) { + size_t utf8len = 0; + + catalogInfo.nodeData.cnm_nameptr = catalogInfo.nodeData.cnm_namespace; + + /* Return result in UTF-8 */ + if ( isHFSPlus ) { + err = utf8_encodestr(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length * sizeof(UniChar), + catalogInfo.nodeData.cnm_namespace, + &utf8len, + MAXHFSVNODELEN + 1, ':', 0); + if (err == ENAMETOOLONG) { + utf8len = utf8_encodelen(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length * sizeof(UniChar), ':', 0); + MALLOC(catalogInfo.nodeData.cnm_nameptr, char *, utf8len+1, M_TEMP, M_WAITOK); + catalogInfo.nodeData.cnm_flags |= kCatNameIsAllocated; + err = utf8_encodestr(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length * sizeof(UniChar), + catalogInfo.nodeData.cnm_nameptr, + &utf8len, + utf8len + 1, ':', 0); + } + } else { + err = hfs_to_utf8(vcb, + key->hfs.nodeName, + MAXHFSVNODELEN + 1, + (ByteCount*) &utf8len, + catalogInfo.nodeData.cnm_namespace); + if (err == ENAMETOOLONG) { + MALLOC(catalogInfo.nodeData.cnm_nameptr, char *, utf8len+1, M_TEMP, M_WAITOK); + catalogInfo.nodeData.cnm_flags |= kCatNameIsAllocated; + err = hfs_to_utf8(vcb, + key->hfs.nodeName, + utf8len + 1, + (ByteCount*) &utf8len, + catalogInfo.nodeData.cnm_nameptr); + } else if (err) { + /* + * When an HFS name cannot be encoded with the current + * volume encoding we use MacRoman as a fallback. + */ + err = mac_roman_to_utf8(key->hfs.nodeName, MAXHFSVNODELEN + 1, + (ByteCount*) &utf8len, + catalogInfo.nodeData.cnm_namespace); + } + } + catalogInfo.nodeData.cnm_length = utf8len; + if (err && (catalogInfo.nodeData.cnm_flags & kCatNameIsAllocated)) + { + DisposePtr(catalogInfo.nodeData.cnm_nameptr); + catalogInfo.nodeData.cnm_flags &= ~kCatNameIsAllocated; + catalogInfo.nodeData.cnm_nameptr = catalogInfo.nodeData.cnm_namespace; + catalogInfo.nodeData.cnm_namespace[0] = 0; + } + } + + PackCatalogInfoAttributeBlock( returnAttrList,root_vp, &catalogInfo, &rovingAttributesBuffer, &rovingVariableBuffer ); + + CLEAN_CATALOGDATA(&catalogInfo.nodeData); + + packedBufferSize = (char*)rovingVariableBuffer - (char*)attributesBuffer; + + if ( packedBufferSize > a_uio->uio_resid ) + return( errSearchBufferFull ); + + (* nummatches)++; + + *((u_long *)attributesBuffer) = packedBufferSize; /* Store length of fixed + var block */ + + err = uiomove( (caddr_t)attributesBuffer, packedBufferSize, a_uio ); /* XXX should be packedBufferSize */ + + return( err ); +} + + +static int +UnpackSearchAttributeBlock( struct vnode *vp, struct attrlist *alist, searchinfospec_t *searchInfo, void *attributeBuffer ) +{ + attrgroup_t a; + u_long bufferSize; + + DBG_ASSERT(searchInfo != NULL); + + bufferSize = *((u_long *)attributeBuffer); + if (bufferSize == 0) + return (EINVAL); /* XXX -DJB is a buffer size of zero ever valid for searchfs? */ + + ++((u_long *)attributeBuffer); /* advance past the size */ + + /* + * UnPack common attributes + */ + a = alist->commonattr; + if ( a != 0 ) { + if ( a & ATTR_CMN_NAME ) { + char *s = (char*) attributeBuffer + ((attrreference_t *) attributeBuffer)->attr_dataoffset; + size_t len = ((attrreference_t *) attributeBuffer)->attr_length; + + if (len > sizeof(searchInfo->name)) + return (EINVAL); + + if (VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) { + size_t ucslen; + /* Convert name to Unicode to match HFS Plus B-Tree names */ + + if (len > 0) { + if (utf8_decodestr(s, len-1, (UniChar*)searchInfo->name, &ucslen, + sizeof(searchInfo->name), ':', UTF_DECOMPOSED)) + return (EINVAL); + + searchInfo->nameLength = ucslen / sizeof(UniChar); + } else { + searchInfo->nameLength = 0; + } + ++((attrreference_t *)attributeBuffer); + + } else { + /* Convert name to pascal string to match HFS B-Tree names */ + + if (len > 0) { + if (utf8_to_hfs(VTOVCB(vp), len-1, s, (u_char*)searchInfo->name) != 0) + return (EINVAL); + + searchInfo->nameLength = searchInfo->name[0]; + } else { + searchInfo->name[0] = searchInfo->nameLength = 0; + } + ++((attrreference_t *)attributeBuffer); + } + } + if ( a & ATTR_CMN_OBJID ) { + searchInfo->nodeID = ((fsobj_id_t *) attributeBuffer)->fid_objno; /* ignore fid_generation */ + ++((fsobj_id_t *)attributeBuffer); + } + if ( a & ATTR_CMN_PAROBJID ) { + searchInfo->parentDirID = ((fsobj_id_t *) attributeBuffer)->fid_objno; /* ignore fid_generation */ + ++((fsobj_id_t *)attributeBuffer); + } + if ( a & ATTR_CMN_CRTIME ) { + searchInfo->creationDate = *((struct timespec *)attributeBuffer); + ++((struct timespec *)attributeBuffer); + } + if ( a & ATTR_CMN_MODTIME ) { + searchInfo->modificationDate = *((struct timespec *)attributeBuffer); + ++((struct timespec *)attributeBuffer); + } + if ( a & ATTR_CMN_CHGTIME ) { + searchInfo->changeDate = *((struct timespec *)attributeBuffer); + ++((struct timespec *)attributeBuffer); + } + if ( a & ATTR_CMN_BKUPTIME ) { + searchInfo->lastBackupDate = *((struct timespec *)attributeBuffer); + ++((struct timespec *)attributeBuffer); + } + if ( a & ATTR_CMN_FNDRINFO ) { + bcopy( attributeBuffer, searchInfo->finderInfo, sizeof(u_long) * 8 ); + (u_long *)attributeBuffer += 8; + } + if ( a & ATTR_CMN_BKUPTIME ) { + searchInfo->lastBackupDate = *((struct timespec *)attributeBuffer); + ++((struct timespec *)attributeBuffer); + } + if ( a & ATTR_CMN_OWNERID ) { + searchInfo->uid = *((uid_t *)attributeBuffer); + ++((uid_t *)attributeBuffer); + } + if ( a & ATTR_CMN_GRPID ) { + searchInfo->gid = *((gid_t *)attributeBuffer); + ++((gid_t *)attributeBuffer); + } + if ( a & ATTR_CMN_ACCESSMASK ) { + searchInfo->mask = *((mode_t *)attributeBuffer); + ++((mode_t *)attributeBuffer); + } + } + + a = alist->dirattr; + if ( a != 0 ) { + if ( a & ATTR_DIR_ENTRYCOUNT ) { + searchInfo->d.numFiles = *((u_long *)attributeBuffer); + ++((u_long *)attributeBuffer); + } + } + + a = alist->fileattr; + if ( a != 0 ) { + if ( a & ATTR_FILE_DATALENGTH ) { + searchInfo->f.dataLogicalLength = *((off_t *)attributeBuffer); + ++((off_t *)attributeBuffer); + } + if ( a & ATTR_FILE_DATAALLOCSIZE ) { + searchInfo->f.dataPhysicalLength = *((off_t *)attributeBuffer); + ++((off_t *)attributeBuffer); + } + if ( a & ATTR_FILE_RSRCLENGTH ) { + searchInfo->f.resourceLogicalLength = *((off_t *)attributeBuffer); + ++((off_t *)attributeBuffer); + } + if ( a & ATTR_FILE_RSRCALLOCSIZE ) { + searchInfo->f.resourcePhysicalLength = *((off_t *)attributeBuffer); + ++((off_t *)attributeBuffer); + } + } + + return (0); +} + + diff --git a/bsd/hfs/hfs_vfsops.c b/bsd/hfs/hfs_vfsops.c new file mode 100644 index 000000000..b3b2e8f1a --- /dev/null +++ b/bsd/hfs/hfs_vfsops.c @@ -0,0 +1,1883 @@ +/* + * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * hfs_vfsops.c + * derived from @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95 + * + * (c) Copyright 1997-1998 Apple Computer, Inc. All rights reserved. + * + * hfs_vfsops.c -- VFS layer for loadable HFS file system. + * + * HISTORY + * 9-Nov-1999 Don Brady Fix error handling in hfs_unmount [2399157]. + * 9-Sep-1999 Don Brady Clear system file fcbModified flags in hfs_flushvolumeheader/hfs_flushMDB. + * 5-Aug-1999 Pat Dirks Moved special HFS flag from f_fsid.val[0][0] to mount flags (#2293117). + * 23-Jul-1999 Pat Dirks Added special-case code for root's parent directory in hfs_vget (#2263664). + * 9-Jun-1999 Don Brady Fix hfs_mount for reload and read-only downgrade cases. + * 2-Jun-1999 Don Brady Fix hfs_statfs to return correct f_files value. + * 4-May-1999 Don Brady Remove obsolete loadable module code. + * 22-Mar-1999 Don Brady Hide our private meta data in hfs_vget. + * 18-May-1999 Don Brady Add hfs_mountroot for HFS Plus rooting. + * 22-Mar-1999 Don Brady Hide our private meta data in hfs_vget. + * 12-Nov-1998 Pat Dirks Changed hfs_statfs to return volume's actual log. block size (#2286198). + * 22-Aug-1998 Scott Roberts Assign uid,gid, and mask for default on objects. + * 29-Jul-1998 Pat Dirks Fixed changed hfs_vget() to release complex node when retrying for data fork node. + * 27-Jul-1998 Scott Roberts Changes hfs_vget() to return data forks instead of complex. + * 14-Jul-1998 CHW Added check for use count of device node in hfs_mountfs + * 1-Jul-1998 Don Brady Always set kHFSVolumeUnmountedMask bit of vcb->vcbAtrb in hfs_unmount. + * 30-Jun-1998 Don Brady Removed hard-coded EINVAL error in hfs_mountfs (for radar #2249539). + * 24-Jun-1998 Don Brady Added setting of timezone to hfs_mount (radar #2226387). + * 4-Jun-1998 Don Brady Use VPUT/VRELE macros instead of vput/vrele. + * 6-May-1998 Scott Roberts Updated hfs_vget with kernel changes. + * 29-Apr-1998 Don Brady Update hfs_statfs to actually fill in statfs fields (radar #2227092). + * 23-Apr-1998 Pat Dirks Cleaned up code to call brelse() on errors from bread(). + * 4/20/1998 Don Brady Remove course-grained hfs metadata locking. + * 4/18/1998 Don Brady Add VCB locking. + * 4/16/1998 Don Brady hfs_unmount now flushes the volume bitmap. Add b-tree locking to hfs_vget. + * 4/8/1998 Don Brady Replace hfs_mdbupdate with hfs_flushvolumeheader and hfs_flushMDB. + * 4/8/1998 Don Brady In hfs_unmount call hfs_mdbupdate before trashing metafiles! + * 4/3/1998 Don Brady Call InitCatalogCache instead of PostInitFS. + * 4/1/1998 Don Brady Get rid of gHFSFlags, gReqstVol and gFlushOnlyFlag globals (not used). + * 3/30/1998 Don Brady In hfs_unmount use SKIPSYSTEM option on first vflush. + * 3/26/1998 Don Brady Changed hfs_unmount to call vflush before calling hfsUnmount. + * In hfs_mountfs don't mount hfs-wrapper. + * 3/19/1998 Pat Dirks Fixed bug in hfs_mount where device vnode was being + * released on way out. + * 11/14/1997 Pat Dirks Derived from hfs_vfsops.c + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hfs.h" +#include "hfs_dbg.h" +#include "hfs_endian.h" + +#include "hfscommon/headers/FileMgrInternal.h" +#include "hfscommon/headers/BTreesInternal.h" + +#if HFS_DIAGNOSTIC +int hfs_dbg_all = 0; +int hfs_dbg_vfs = 0; +int hfs_dbg_vop = 0; +int hfs_dbg_load = 0; +int hfs_dbg_io = 0; +int hfs_dbg_utils = 0; +int hfs_dbg_rw = 0; +int hfs_dbg_lookup = 0; +int hfs_dbg_tree = 0; +int hfs_dbg_err = 0; +int hfs_dbg_test = 0; +#endif + +/* + * HFS File System globals: + */ +Ptr gBufferAddress[BUFFERPTRLISTSIZE]; +struct buf *gBufferHeaderPtr[BUFFERPTRLISTSIZE]; +int gBufferListIndex; +simple_lock_data_t gBufferPtrListLock; + +//static char hfs_fs_name[MFSNAMELEN] = "hfs"; + +/* The following represent information held in low-memory on the MacOS: */ + +struct FSVarsRec *gFSMVars; + +/* + * Global variables defined in other modules: + */ +extern struct vnodeopv_desc hfs_vnodeop_opv_desc; + +extern struct vnode *hfs_vhashget(dev_t dev, UInt32 nodeID, UInt8 forkType); + +extern OSErr HFSPlusToHFSExtents( const HFSPlusExtentRecord oldExtents, HFSExtentRecord newExtents); + + +extern void inittodr( time_t base); +extern OSErr GetVolumeNameFromCatalog(ExtendedVCB *vcb); +extern void CopyCatalogToObjectMeta(struct hfsCatalogInfo *catInfo, struct vnode *vp, struct hfsfilemeta *fm); +extern void CopyCatalogToFCB(struct hfsCatalogInfo *catInfo, struct vnode *vp); +extern void hfs_name_CatToMeta(CatalogNodeData *nodeData, struct hfsfilemeta *fm); + +int hfs_changefs(struct mount *mp, struct hfs_mount_args *args, struct proc *p); + +int hfs_reload(struct mount *mp, struct ucred *cred, struct proc *p); +int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, struct hfs_mount_args *args); +int hfs_vget(struct mount *mp, void *objID, struct vnode **vpp); +void hfs_vhashinit(); +void hfs_converterinit(void); + + +static int hfs_statfs(); + + +/* + * Called by vfs_mountroot when mounting HFS Plus as root. + */ +int +hfs_mountroot() +{ + extern struct vnode *rootvp; + struct mount *mp; + struct proc *p = current_proc(); /* XXX */ + struct hfsmount *hfsmp; + int error; + + /* + * Get vnode for rootdev. + */ + if ((error = bdevvp(rootdev, &rootvp))) { + printf("hfs_mountroot: can't setup bdevvp"); + return (error); + } + if ((error = vfs_rootmountalloc("hfs", "root_device", &mp))) + return (error); + if ((error = hfs_mountfs(rootvp, mp, p, NULL))) { + mp->mnt_vfc->vfc_refcount--; + vfs_unbusy(mp, p); + _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + return (error); + } + simple_lock(&mountlist_slock); + CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); + simple_unlock(&mountlist_slock); + + /* Init hfsmp */ + hfsmp = VFSTOHFS(mp); + + hfsmp->hfs_dir_mask = (S_IRWXU|S_IRWXG|S_IRWXO); /* 0777 */ + hfsmp->hfs_file_mask = (S_IRWXU|S_IRWXG|S_IRWXO); /* 0777 */ + + (void)hfs_statfs(mp, &mp->mnt_stat, p); + + vfs_unbusy(mp, p); + inittodr(to_bsd_time(HFSTOVCB(hfsmp)->vcbLsMod)); + return (0); +} + + +/* + * VFS Operations. + * + * mount system call + */ + +int +hfs_mount (mp, path, data, ndp, p) + register struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + struct hfsmount *hfsmp = NULL; + struct vnode *devvp; + struct hfs_mount_args args; + size_t size; + int retval = E_NONE; + int flags; + mode_t accessmode; + int loadconv = 0; + + if ((retval = copyin(data, (caddr_t)&args, sizeof(args)))) + goto error_exit; + + /* + * If updating, check whether changing from read-only to + * read/write; if there is no device name, that's all we do. + */ + if (mp->mnt_flag & MNT_UPDATE) { + + hfsmp = VFSTOHFS(mp); + if ((hfsmp->hfs_fs_ronly == 0) && (mp->mnt_flag & MNT_RDONLY)) { + + /* use VFS_SYNC to push out System (btree) files */ + retval = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p); + if (retval && ((mp->mnt_flag & MNT_FORCE) == 0)) + goto error_exit; + + flags = WRITECLOSE; + if (mp->mnt_flag & MNT_FORCE) + flags |= FORCECLOSE; + + if ((retval = hfs_flushfiles(mp, flags))) + goto error_exit; + hfsmp->hfs_fs_clean = 1; + hfsmp->hfs_fs_ronly = 1; + if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSPlusSigWord) + retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT); + else + retval = hfs_flushMDB(hfsmp, MNT_WAIT); + + /* also get the volume bitmap blocks */ + if (!retval) + retval = VOP_FSYNC(hfsmp->hfs_devvp, NOCRED, MNT_WAIT, p); + + if (retval) { + hfsmp->hfs_fs_clean = 0; + hfsmp->hfs_fs_ronly = 0; + goto error_exit; + } + } + + if ((mp->mnt_flag & MNT_RELOAD) && + (retval = hfs_reload(mp, ndp->ni_cnd.cn_cred, p))) + goto error_exit; + + if (hfsmp->hfs_fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { + /* + * If upgrade to read-write by non-root, then verify + * that user has necessary permissions on the device. + */ + if (p->p_ucred->cr_uid != 0) { + devvp = hfsmp->hfs_devvp; + vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); + if ((retval = VOP_ACCESS(devvp, VREAD | VWRITE, p->p_ucred, p))) { + VOP_UNLOCK(devvp, 0, p); + goto error_exit; + } + VOP_UNLOCK(devvp, 0, p); + } + if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSPlusSigWord) + retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT); + else + retval = hfs_flushMDB(hfsmp, MNT_WAIT); + + if (retval != E_NONE) + goto error_exit; + + /* only change hfs_fs_ronly after a successfull write */ + hfsmp->hfs_fs_ronly = 0; + hfsmp->hfs_fs_clean = 0; + } + + if ((hfsmp->hfs_fs_ronly == 0) && + (HFSTOVCB(hfsmp)->vcbSigWord == kHFSPlusSigWord)) { + /* setup private/hidden directory for unlinked files */ + hfsmp->hfs_private_metadata_dir = FindMetaDataDirectory(HFSTOVCB(hfsmp)); + } + + if (args.fspec == 0) { + /* + * Process export requests. + */ + return vfs_export(mp, &hfsmp->hfs_export, &args.export); + } + } + + /* + * Not an update, or updating the name: look up the name + * and verify that it refers to a sensible block device. + */ + NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); + retval = namei(ndp); + if (retval != E_NONE) { + DBG_ERR(("hfs_mount: CAN'T GET DEVICE: %s, %x\n", args.fspec, ndp->ni_vp->v_rdev)); + goto error_exit; + } + + devvp = ndp->ni_vp; + + if (devvp->v_type != VBLK) { + vrele(devvp); + retval = ENOTBLK; + goto error_exit; + } + if (major(devvp->v_rdev) >= nblkdev) { + vrele(devvp); + retval = ENXIO; + goto error_exit; + } + + /* + * If mount by non-root, then verify that user has necessary + * permissions on the device. + */ + if (p->p_ucred->cr_uid != 0) { + accessmode = VREAD; + if ((mp->mnt_flag & MNT_RDONLY) == 0) + accessmode |= VWRITE; + vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); + if ((retval = VOP_ACCESS(devvp, accessmode, p->p_ucred, p))) { + vput(devvp); + goto error_exit; + } + VOP_UNLOCK(devvp, 0, p); + } + + if ((mp->mnt_flag & MNT_UPDATE) == 0) { + retval = hfs_mountfs(devvp, mp, p, &args); + if (retval != E_NONE) + vrele(devvp); + } else { + if (devvp != hfsmp->hfs_devvp) + retval = EINVAL; /* needs translation */ + else + retval = hfs_changefs(mp, &args, p); + vrele(devvp); + } + + if (retval != E_NONE) { + goto error_exit; + } + + + /* Set the mount flag to indicate that we support volfs */ + mp->mnt_flag |= MNT_DOVOLFS; + if (VFSTOVCB(mp)->vcbSigWord == kHFSSigWord) { + /* HFS volumes only want roman-encoded names: */ + mp->mnt_flag |= MNT_FIXEDSCRIPTENCODING; + } + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN-1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); + (void)hfs_statfs(mp, &mp->mnt_stat, p); + return (E_NONE); + +error_exit: + + return (retval); +} + + +/* change fs mount parameters */ +int +hfs_changefs(mp, args, p) + struct mount *mp; + struct hfs_mount_args *args; + struct proc *p; +{ + int retval; + int namefix, permfix, permswitch; + struct hfsmount *hfsmp; + struct hfsnode *hp; + mode_t hfs_file_mask; + ExtendedVCB *vcb; + hfsCatalogInfo catInfo; + register struct vnode *vp, *nvp; + hfs_to_unicode_func_t get_unicode_func; + unicode_to_hfs_func_t get_hfsname_func; + + hfsmp = VFSTOHFS(mp); + vcb = HFSTOVCB(hfsmp); + permswitch = (((hfsmp->hfs_unknownpermissions != 0) && ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) == 0)) || + ((hfsmp->hfs_unknownpermissions == 0) && ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) != 0))); + hfsmp->hfs_unknownpermissions = ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) != 0); + namefix = permfix = 0; + + /* change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */ + if (args->hfs_timezone.tz_minuteswest != VNOVAL) { + gTimeZone = args->hfs_timezone; + } + + /* change the default uid, gid and/or mask */ + if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) { + hfsmp->hfs_uid = args->hfs_uid; + ++permfix; + } + if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) { + hfsmp->hfs_gid = args->hfs_gid; + ++permfix; + } + if (args->hfs_mask != (mode_t)VNOVAL) { + if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) { + hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS; + hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS; + if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES)) + hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE); + ++permfix; + } + } + + /* change the hfs encoding value (hfs only) */ + if ((HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) && + (hfsmp->hfs_encoding != (u_long)VNOVAL) && + (hfsmp->hfs_encoding != args->hfs_encoding)) { + + retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func); + if (retval) goto error_exit; + + /* + * Connect the new hfs_get_unicode converter but leave + * the old hfs_get_hfsname converter in place so that + * we can lookup existing vnodes to get their correctly + * encoded names. + * + * When we're all finished, we can then connect the new + * hfs_get_hfsname converter and release our interest + * in the old converters. + */ + hfsmp->hfs_get_unicode = get_unicode_func; + ++namefix; + } + + + if (!(namefix || permfix || permswitch)) goto exit; + + /* + * For each active vnode fix things that changed + * + * Note that we can visit a vnode more than once + * and we can race with fsync. + */ + simple_lock(&mntvnode_slock); +loop: + for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { + /* + * If the vnode that we are about to fix is no longer + * associated with this mount point, start over. + */ + if (vp->v_mount != mp) + goto loop; + + simple_lock(&vp->v_interlock); + nvp = vp->v_mntvnodes.le_next; + if (vp->v_flag & VSYSTEM) { + simple_unlock(&vp->v_interlock); + continue; + } + simple_unlock(&mntvnode_slock); + retval = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); + if (retval) { + simple_lock(&mntvnode_slock); + if (retval == ENOENT) + goto loop; + continue; + } + + hp = VTOH(vp); + + INIT_CATALOGDATA(&catInfo.nodeData, 0); + + catInfo.hint = kNoHint; + retval = hfs_getcatalog(vcb, H_DIRID(hp), H_NAME(hp), hp->h_meta->h_namelen, &catInfo); + /* If we couldn't find this guy skip to the next one */ + if (retval) { + if (namefix) + cache_purge(vp); + vput(vp); + simple_lock(&mntvnode_slock); + continue; + } + + H_HINT(hp) = catInfo.hint; + if (permswitch || (permfix && (hp->h_meta->h_metaflags & IN_UNSETACCESS))) { + if ((vcb->vcbSigWord == kHFSPlusSigWord) && (catInfo.nodeData.cnd_mode & IFMT)) { + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + /* + * Override the permissions as determined by the mount auguments + * in ALMOST the same way unset permissions are treated but keep + * track of whether or not the file or folder is hfs locked + * by leaving the h_pflags field unchanged from what was unpacked + * out of the catalog. + */ + hp->h_meta->h_metaflags |= IN_UNSETACCESS; + hp->h_meta->h_uid = VTOHFS(vp)->hfs_uid; + hp->h_meta->h_gid = VTOHFS(vp)->hfs_gid; + } else { + hp->h_meta->h_uid = catInfo.nodeData.cnd_ownerID; + hp->h_meta->h_gid = catInfo.nodeData.cnd_groupID; + }; + hp->h_meta->h_mode = (mode_t)catInfo.nodeData.cnd_mode; + } else { + /* + * Set the permissions as determined by the mount auguments + * but keep in account if the file or folder is hfs locked + */ + hp->h_meta->h_metaflags |= IN_UNSETACCESS; + hp->h_meta->h_uid = VTOHFS(vp)->hfs_uid; + hp->h_meta->h_gid = VTOHFS(vp)->hfs_gid; + + /* Default access is full read/write/execute: */ + hp->h_meta->h_mode = ACCESSPERMS; /* 0777: rwxrwxrwx */ + /* ... but no more than that permitted by the mount point's: */ + if ((hp->h_meta->h_mode & IFMT) == IFDIR) { + hp->h_meta->h_mode &= VTOHFS(vp)->hfs_dir_mask; + } else { + hp->h_meta->h_mode &= VTOHFS(vp)->hfs_file_mask; + } + }; + }; + + /* + * If we're switching name converters then... + * Remove the existing entry from the namei cache. + * Update name to one based on new encoder. + */ + if (namefix) { + cache_purge(vp); + hfs_name_CatToMeta(&catInfo.nodeData, hp->h_meta); + + if (catInfo.nodeData.cnd_nodeID == kHFSRootFolderID) + strncpy(vcb->vcbVN, H_NAME(hp), NAME_MAX); + } + + CLEAN_CATALOGDATA(&catInfo.nodeData); + + vput(vp); + simple_lock(&mntvnode_slock); + + } /* end for (vp...) */ + simple_unlock(&mntvnode_slock); + + +exit: + /* + * If we're switching name converters we can now + * connect the new hfs_get_hfsname converter and + * release our interest in the old converters. + */ + if (namefix) { + u_long old_encoding = hfsmp->hfs_encoding; + + hfsmp->hfs_get_hfsname = get_hfsname_func; + hfsmp->hfs_encoding = args->hfs_encoding; + vcb->volumeNameEncodingHint = args->hfs_encoding; + + (void) hfs_relconverter(old_encoding); + } + + return (0); + +error_exit: + + return (retval); +} + + +/* + * Reload all incore data for a filesystem (used after running fsck on + * the root filesystem and finding things to fix). The filesystem must + * be mounted read-only. + * + * Things to do to update the mount: + * 1) invalidate all cached meta-data. + * 2) re-read volume header from disk. + * 3) re-load meta-file info (extents, file size). + * 4) re-load B-tree header data. + * 5) invalidate all inactive vnodes. + * 6) invalidate all cached file data. + * 7) re-read hfsnode data for all active vnodes. + */ +int +hfs_reload(mountp, cred, p) + register struct mount *mountp; + struct ucred *cred; + struct proc *p; +{ + register struct vnode *vp, *nvp, *devvp; + struct hfsnode *hp; + struct buf *bp; + int size, error, i; + struct hfsmount *hfsmp; + struct HFSPlusVolumeHeader *vhp; + ExtendedVCB *vcb; + FCB *fcb; + + if ((mountp->mnt_flag & MNT_RDONLY) == 0) + return (EINVAL); + + hfsmp = VFSTOHFS(mountp); + vcb = HFSTOVCB(hfsmp); + + if (vcb->vcbSigWord == kHFSSigWord) + return (EINVAL); /* rooting from HFS is not supported! */ + + /* + * Invalidate all cached meta-data. + */ + devvp = hfsmp->hfs_devvp; + if (vinvalbuf(devvp, 0, cred, p, 0, 0)) + panic("hfs_reload: dirty1"); + InvalidateCatalogCache(vcb); + + /* + * Re-read VolumeHeader from disk. + */ + size = kMDBSize; + error = bread( hfsmp->hfs_devvp, + IOBLKNOFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size), + IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, size), + NOCRED, + &bp); + if (error) { + if (bp != NULL) + brelse(bp); + return (error); + } + + vhp = (HFSPlusVolumeHeader *) ((char *)bp->b_data + + IOBYTEOFFSETFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size)); + + if ((ValidVolumeHeader(vhp) != 0) || (vcb->blockSize != SWAP_BE32 (vhp->blockSize))) { + brelse(bp); + return (EIO); /* XXX needs translation */ + } + + vcb->vcbLsMod = SWAP_BE32 (vhp->modifyDate); + vcb->vcbAtrb = (UInt16) SWAP_BE32 (vhp->attributes); /* VCB only uses lower 16 bits */ + vcb->vcbClpSiz = SWAP_BE32 (vhp->rsrcClumpSize); + vcb->vcbNxtCNID = SWAP_BE32 (vhp->nextCatalogID); + vcb->vcbVolBkUp = SWAP_BE32 (vhp->backupDate); + vcb->vcbWrCnt = SWAP_BE32 (vhp->writeCount); + vcb->vcbFilCnt = SWAP_BE32 (vhp->fileCount); + vcb->vcbDirCnt = SWAP_BE32 (vhp->folderCount); + vcb->nextAllocation = SWAP_BE32 (vhp->nextAllocation); + vcb->totalBlocks = SWAP_BE32 (vhp->totalBlocks); + vcb->freeBlocks = SWAP_BE32 (vhp->freeBlocks); + vcb->checkedDate = SWAP_BE32 (vhp->checkedDate); + vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap); + bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo)); + vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */ + + /* + * Re-load meta-file vnode data (extent info, file size, etc). + */ + fcb = VTOFCB((struct vnode *)vcb->extentsRefNum); + /* bcopy(vhp->extentsFile.extents, fcb->fcbExtents, sizeof(HFSPlusExtentRecord)); */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + fcb->fcbExtents[i].startBlock = SWAP_BE32 (vhp->extentsFile.extents[i].startBlock); + fcb->fcbExtents[i].blockCount = SWAP_BE32 (vhp->extentsFile.extents[i].blockCount); + } + fcb->fcbEOF = SWAP_BE64 (vhp->extentsFile.logicalSize); + fcb->fcbPLen = SWAP_BE32 (vhp->extentsFile.totalBlocks) * vcb->blockSize; + fcb->fcbClmpSize = SWAP_BE32 (vhp->extentsFile.clumpSize); + + fcb = VTOFCB((struct vnode *)vcb->catalogRefNum); + /* bcopy(vhp->catalogFile.extents, fcb->fcbExtents, sizeof(HFSPlusExtentRecord)); */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + fcb->fcbExtents[i].startBlock = SWAP_BE32 (vhp->catalogFile.extents[i].startBlock); + fcb->fcbExtents[i].blockCount = SWAP_BE32 (vhp->catalogFile.extents[i].blockCount); + } + fcb->fcbPLen = SWAP_BE64 (vhp->catalogFile.logicalSize); + fcb->fcbPLen = SWAP_BE32 (vhp->catalogFile.totalBlocks) * vcb->blockSize; + fcb->fcbClmpSize = SWAP_BE32 (vhp->catalogFile.clumpSize); + + fcb = VTOFCB((struct vnode *)vcb->allocationsRefNum); + /* bcopy(vhp->allocationFile.extents, fcb->fcbExtents, sizeof(HFSPlusExtentRecord)); */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + fcb->fcbExtents[i].startBlock = SWAP_BE32 (vhp->allocationFile.extents[i].startBlock); + fcb->fcbExtents[i].blockCount = SWAP_BE32 (vhp->allocationFile.extents[i].blockCount); + } + fcb->fcbEOF = SWAP_BE64 (vhp->allocationFile.logicalSize); + fcb->fcbPLen = SWAP_BE32 (vhp->allocationFile.totalBlocks) * vcb->blockSize; + fcb->fcbClmpSize = SWAP_BE32 (vhp->allocationFile.clumpSize); + + brelse(bp); + vhp = NULL; + + /* + * Re-load B-tree header data + */ + fcb = VTOFCB((struct vnode *)vcb->extentsRefNum); + if (error = MacToVFSError( BTReloadData(fcb) )) + return (error); + + fcb = VTOFCB((struct vnode *)vcb->catalogRefNum); + if (error = MacToVFSError( BTReloadData(fcb) )) + return (error); + + /* Now that the catalog is ready, get the volume name */ + /* also picks up the create date in GMT */ + if ((error = MacToVFSError( GetVolumeNameFromCatalog(vcb) ))) + return (error); + + /* Re-establish private/hidden directory for unlinked files */ + hfsmp->hfs_private_metadata_dir = FindMetaDataDirectory(vcb); + +loop: + simple_lock(&mntvnode_slock); + for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { + if (vp->v_mount != mountp) { + simple_unlock(&mntvnode_slock); + goto loop; + } + nvp = vp->v_mntvnodes.le_next; + + /* + * Invalidate all inactive vnodes. + */ + if (vrecycle(vp, &mntvnode_slock, p)) + goto loop; + + /* + * Invalidate all cached file data. + */ + simple_lock(&vp->v_interlock); + simple_unlock(&mntvnode_slock); + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { + goto loop; + } + if (vinvalbuf(vp, 0, cred, p, 0, 0)) + panic("hfs_reload: dirty2"); + + /* + * Re-read hfsnode data for all active vnodes (non-metadata files). + */ + hp = VTOH(vp); + if ((vp->v_flag & VSYSTEM) == 0) { + hfsCatalogInfo catInfo; + + /* lookup by fileID since name could have changed */ + catInfo.hint = kNoHint; + INIT_CATALOGDATA(&catInfo.nodeData, 0); + + if ((error = hfs_getcatalog(vcb, H_FILEID(hp), NULL, -1, &catInfo))) { + vput(vp); + CLEAN_CATALOGDATA(&catInfo.nodeData); + return (error); + } + + H_HINT(hp) = catInfo.hint; + if (hp->h_meta->h_metaflags & IN_LONGNAME) + FREE(H_NAME(hp), M_TEMP); + H_NAME(hp) = NULL; + hp->h_meta->h_namelen = 0; + CopyCatalogToObjectMeta(&catInfo, vp, hp->h_meta); + CopyCatalogToFCB(&catInfo, vp); + + CLEAN_CATALOGDATA(&catInfo.nodeData); + } + + vput(vp); + simple_lock(&mntvnode_slock); + } + simple_unlock(&mntvnode_slock); + + return (0); +} + + +/* + * Common code for mount and mountroot + */ +int +hfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p, struct hfs_mount_args *args) +{ + int retval = E_NONE; + register struct hfsmount *hfsmp; + struct buf *bp; + dev_t dev; + HFSMasterDirectoryBlock *mdbp; + int ronly; + struct ucred *cred; + u_long diskBlks; + u_long blksize; + DBG_VFS(("hfs_mountfs: mp = 0x%lX\n", (u_long)mp)); + + dev = devvp->v_rdev; + cred = p ? p->p_ucred : NOCRED; + /* + * Disallow multiple mounts of the same device. + * Disallow mounting of a device that is currently in use + * (except for root, which might share swap device for miniroot). + * Flush out any old buffers remaining from a previous use. + */ + if ((retval = vfs_mountedon(devvp))) + return (retval); + if ((vcount(devvp) > 1) && (devvp != rootvp)) + return (EBUSY); + if ((retval = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0))) + return (retval); + + ronly = (mp->mnt_flag & MNT_RDONLY) != 0; + DBG_VFS(("hfs_mountfs: opening device...\n")); + if ((retval = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p))) + return (retval); + + blksize = kHFSBlockSize; + DBG_VFS(("hfs_mountfs: size = %d (DEV_BSIZE = %d).\n", blksize, DEV_BSIZE)); + + bp = NULL; + hfsmp = NULL; + + /* + * XXX SER Currently we only support 512 block size systems. This might change + * So this is a place holder to remind us that the mdb might not be 512 aligned + * retval = VOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, &blksize, FWRITE, cred, p); + * if (retval) return retval; + */ + + /* + * the next three lines should probably be replaced + * with a call to the yet unimplemented function VOP_SETBLOCKSIZE + */ + retval = VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &blksize, FWRITE, cred, p); + if (retval) return retval; + devvp->v_specsize = blksize; + + DBG_VFS(("hfs_mountfs: reading MDB [block no. %d + %d bytes, size %d bytes]...\n", + IOBLKNOFORBLK(kMasterDirectoryBlock, blksize), + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, blksize), + IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, blksize))); + + if ((retval = bread(devvp, IOBLKNOFORBLK(kMasterDirectoryBlock, blksize), + IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, blksize), cred, &bp))) { + goto error_exit; + }; + mdbp = (HFSMasterDirectoryBlock*) ((char *)bp->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, blksize)); + + MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK); + bzero(hfsmp, sizeof(struct hfsmount)); + + simple_lock_init(&hfsmp->hfs_renamelock); + + DBG_VFS(("hfs_mountfs: Initializing hfsmount structure at 0x%lX...\n", (u_long)hfsmp)); + /* + * Init the volume information structure + */ + mp->mnt_data = (qaddr_t)hfsmp; + hfsmp->hfs_mp = mp; /* Make VFSTOHFS work */ + hfsmp->hfs_vcb.vcb_hfsmp = hfsmp; /* Make VCBTOHFS work */ + hfsmp->hfs_raw_dev = devvp->v_rdev; + hfsmp->hfs_devvp = devvp; + hfsmp->hfs_phys_block_size = blksize; + + /* The hfs_log_block_size field is updated in the respective hfs_MountHFS[Plus]Volume routine */ + hfsmp->hfs_logBlockSize = BestBlockSizeFit(SWAP_BE32 (mdbp->drAlBlkSiz), MAXBSIZE, hfsmp->hfs_phys_block_size); + hfsmp->hfs_fs_ronly = ronly; + hfsmp->hfs_unknownpermissions = ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) != 0); + if (args) { + hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid; + if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID; + hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid; + if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID; + if (args->hfs_mask != (mode_t)VNOVAL) { + hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS; + if (args->flags & HFSFSMNT_NOXONFILES) { + hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE); + } else { + hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS; + } + } else { + hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */ + hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */ + }; + } else { + /* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */ + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + hfsmp->hfs_uid = UNKNOWNUID; + hfsmp->hfs_gid = UNKNOWNGID; + hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS; /* 0777: rwx---rwx */ + hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE; /* 0666: no --x by default? */ + }; + }; + + /* See above comment for DKIOCGETBLOCKSIZE + * retval = VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &blksize, FWRITE, cred, p); + * if (retval) return retval; + */ + + retval = VOP_IOCTL(devvp, DKIOCNUMBLKS, (caddr_t)&diskBlks, 0, cred, p); + if (retval) return retval; + + if (SWAP_BE16 (mdbp->drSigWord) == kHFSPlusSigWord) { + /* Enidan swap volume header in place */ + /* SWAP_HFS_PLUS_VOLUME_HEADER ((HFSPlusVolumeHeader *)bp->b_data); */ + + /* mount wrapper-less HFS-Plus volume */ + (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname); + retval = hfs_MountHFSPlusVolume(hfsmp, (HFSPlusVolumeHeader*) bp->b_data, 0, diskBlks, p); + + /* Enidan un-swap volume header in place */ + /* SWAP_HFS_PLUS_VOLUME_HEADER ((HFSPlusVolumeHeader *)bp->b_data); */ + + } else if (SWAP_BE16 (mdbp->drEmbedSigWord) == kHFSPlusSigWord) { + u_long embBlkOffset; + HFSPlusVolumeHeader *vhp; + + embBlkOffset = SWAP_BE16 (mdbp->drAlBlSt) + + (SWAP_BE16 (mdbp->drEmbedExtent.startBlock) * (SWAP_BE32 (mdbp->drAlBlkSiz)/kHFSBlockSize)); + /* calculate virtual number of 512-byte sectors */ + diskBlks = SWAP_BE16 (mdbp->drEmbedExtent.blockCount) * (SWAP_BE32 (mdbp->drAlBlkSiz)/kHFSBlockSize); + + brelse(bp); + bp = NULL; /* done with MDB, go grab Volume Header */ + mdbp = NULL; + + retval = bread( devvp, + IOBLKNOFORBLK(kMasterDirectoryBlock+embBlkOffset, blksize), + IOBYTECCNTFORBLK(kMasterDirectoryBlock+embBlkOffset, kMDBSize, blksize), + cred, + &bp); + if (retval) { + goto error_exit; + }; + vhp = (HFSPlusVolumeHeader*) ((char *)bp->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, blksize)); + + /* Enidan swap volume header in place */ + /* SWAP_HFS_PLUS_VOLUME_HEADER (vhp); */ + + /* mount embedded HFS Plus volume */ + (void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname); + retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embBlkOffset, diskBlks, p); + + /* Enidan un-swap volume header in place */ + /* SWAP_HFS_PLUS_VOLUME_HEADER (vhp); */ + + } else if (devvp != rootvp) { + if (args) { + hfsmp->hfs_encoding = args->hfs_encoding; + HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding; + + + /* establish the timezone */ + gTimeZone = args->hfs_timezone; + } + + retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname); + if (retval) goto error_exit; + + /* mount HFS volume */ + retval = hfs_MountHFSVolume( hfsmp, mdbp, diskBlks, p); + + if (retval) + (void) hfs_relconverter(hfsmp->hfs_encoding); + + } else { + /* sorry, we cannot root from HFS */ + retval = EINVAL; + } + + if ( retval ) { + goto error_exit; + } + + brelse(bp); + bp = NULL; + + mp->mnt_stat.f_fsid.val[0] = (long)dev; + mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; + mp->mnt_maxsymlinklen = 0; + devvp->v_specflags |= SI_MOUNTEDON; + + if (ronly == 0) { + hfsmp->hfs_fs_clean = 0; + if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSPlusSigWord) + (void) hfs_flushvolumeheader(hfsmp, MNT_WAIT); + else + (void) hfs_flushMDB(hfsmp, MNT_WAIT); + } + goto std_exit; + +error_exit: + DBG_VFS(("hfs_mountfs: exiting with error %d...\n", retval)); + + if (bp) + brelse(bp); + (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p); + if (hfsmp) { + FREE(hfsmp, M_HFSMNT); + mp->mnt_data = (qaddr_t)0; + } + +std_exit: + return (retval); +} + + +/* + * Make a filesystem operational. + * Nothing to do at the moment. + */ +/* ARGSUSED */ +int hfs_start(mp, flags, p) +struct mount *mp; +int flags; +struct proc *p; +{ + DBG_FUNC_NAME("hfs_start"); + DBG_PRINT_FUNC_NAME(); + + return (0); +} + + +/* + * unmount system call + */ +int +hfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + struct hfsmount *hfsmp = VFSTOHFS(mp); + int retval = E_NONE; + int flags; + + flags = 0; + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + + if ((retval = hfs_flushfiles(mp, flags))) + return (retval); + + /* + * Flush out the b-trees, volume bitmap and Volume Header + */ + if (hfsmp->hfs_fs_ronly == 0) { + retval = VOP_FSYNC(HFSTOVCB(hfsmp)->catalogRefNum, NOCRED, MNT_WAIT, p); + if (retval && ((mntflags & MNT_FORCE) == 0)) + return (retval); + + retval = VOP_FSYNC(HFSTOVCB(hfsmp)->extentsRefNum, NOCRED, MNT_WAIT, p); + if (retval && ((mntflags & MNT_FORCE) == 0)) + return (retval); + + if (retval = VOP_FSYNC(hfsmp->hfs_devvp, NOCRED, MNT_WAIT, p)) { + if ((mntflags & MNT_FORCE) == 0) + return (retval); + } + + /* See if this volume is damaged, is so do not unmount cleanly */ + if (HFSTOVCB(hfsmp)->vcbFlags & kHFS_DamagedVolume) { + hfsmp->hfs_fs_clean = 0; + HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; + } else { + hfsmp->hfs_fs_clean = 1; + HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask; + } + if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSPlusSigWord) + retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT); + else + retval = hfs_flushMDB(hfsmp, MNT_WAIT); + + if (retval) { + hfsmp->hfs_fs_clean = 0; + HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask; + if ((mntflags & MNT_FORCE) == 0) + return (retval); /* could not flush everything */ + } + } + + /* + * Invalidate our caches and release metadata vnodes + */ + (void) hfsUnmount(hfsmp, p); + + if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) + (void) hfs_relconverter(hfsmp->hfs_encoding); + + hfsmp->hfs_devvp->v_specflags &= ~SI_MOUNTEDON; + retval = VOP_CLOSE(hfsmp->hfs_devvp, hfsmp->hfs_fs_ronly ? FREAD : FREAD|FWRITE, + NOCRED, p); + vrele(hfsmp->hfs_devvp); + + FREE(hfsmp, M_HFSMNT); + mp->mnt_data = (qaddr_t)0; + + return (retval); +} + + +/* + * Return the root of a filesystem. + * + * OUT - vpp, should be locked and vget()'d (to increment usecount and lock) + */ +int hfs_root(mp, vpp) +struct mount *mp; +struct vnode **vpp; +{ + struct vnode *nvp; + int retval; + UInt32 rootObjID = kRootDirID; + + DBG_FUNC_NAME("hfs_root"); + DBG_PRINT_FUNC_NAME(); + + if ((retval = VFS_VGET(mp, &rootObjID, &nvp))) + return (retval); + + *vpp = nvp; + return (0); +} + + +/* + * Do operations associated with quotas + */ +int hfs_quotactl(mp, cmds, uid, arg, p) +struct mount *mp; +int cmds; +uid_t uid; +caddr_t arg; +struct proc *p; +{ + DBG_FUNC_NAME("hfs_quotactl"); + DBG_PRINT_FUNC_NAME(); + + return (EOPNOTSUPP); +} + + +/* + * Get file system statistics. + */ +static int +hfs_statfs(mp, sbp, p) + struct mount *mp; + register struct statfs *sbp; + struct proc *p; +{ + ExtendedVCB *vcb = VFSTOVCB(mp); + struct hfsmount *hfsmp = VFSTOHFS(mp); + u_long freeCNIDs; + + DBG_FUNC_NAME("hfs_statfs"); + DBG_PRINT_FUNC_NAME(); + + freeCNIDs = (u_long)0xFFFFFFFF - (u_long)vcb->vcbNxtCNID; + + sbp->f_bsize = vcb->blockSize; + sbp->f_iosize = hfsmp->hfs_logBlockSize; + sbp->f_blocks = vcb->totalBlocks; + sbp->f_bfree = vcb->freeBlocks; + sbp->f_bavail = vcb->freeBlocks; + sbp->f_files = vcb->totalBlocks - 2; /* max files is constrained by total blocks */ + sbp->f_ffree = MIN(freeCNIDs, vcb->freeBlocks); + + sbp->f_type = 0; + if (sbp != &mp->mnt_stat) { + sbp->f_type = mp->mnt_vfc->vfc_typenum; + bcopy((caddr_t)mp->mnt_stat.f_mntonname, + (caddr_t)&sbp->f_mntonname[0], MNAMELEN); + bcopy((caddr_t)mp->mnt_stat.f_mntfromname, + (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); + } + return (0); +} + + +/* + * Go through the disk queues to initiate sandbagged IO; + * go through the inodes to write those that have been modified; + * initiate the writing of the super block if it has been modified. + * + * Note: we are always called with the filesystem marked `MPBUSY'. + */ +static int hfs_sync(mp, waitfor, cred, p) +struct mount *mp; +int waitfor; +struct ucred *cred; +struct proc *p; +{ + struct vnode *nvp, *vp; + struct hfsnode *hp; + struct hfsmount *hfsmp = VFSTOHFS(mp); + ExtendedVCB *vcb; + int error, allerror = 0; + + DBG_FUNC_NAME("hfs_sync"); + DBG_PRINT_FUNC_NAME(); + + /* + * During MNT_UPDATE hfs_changefs might be manipulating + * vnodes so back off + */ + if (mp->mnt_flag & MNT_UPDATE) + return (0); + + hfsmp = VFSTOHFS(mp); + if (hfsmp->hfs_fs_ronly != 0) { + panic("update: rofs mod"); + }; + + /* + * Write back each 'modified' vnode + */ + +loop:; + simple_lock(&mntvnode_slock); + for (vp = mp->mnt_vnodelist.lh_first; + vp != NULL; + vp = nvp) { + /* + * If the vnode that we are about to sync is no longer + * associated with this mount point, start over. + */ + if (vp->v_mount != mp) { + simple_unlock(&mntvnode_slock); + goto loop; + } + simple_lock(&vp->v_interlock); + nvp = vp->v_mntvnodes.le_next; + hp = VTOH(vp); + + if ((vp->v_type == VNON) || (((hp->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) && + (vp->v_dirtyblkhd.lh_first == NULL) && !(vp->v_flag & VHASDIRTY))) { + simple_unlock(&vp->v_interlock); + simple_unlock(&mntvnode_slock); + simple_lock(&mntvnode_slock); + continue; + } + + simple_unlock(&mntvnode_slock); + error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); + if (error) { + if (error == ENOENT) + goto loop; + simple_lock(&mntvnode_slock); + continue; + } + + if ((error = VOP_FSYNC(vp, cred, waitfor, p))) { + DBG_ERR(("hfs_sync: error %d calling fsync on vnode 0x%X.\n", error, (u_int)vp)); + allerror = error; + }; + DBG_ASSERT(*((volatile int *)(&(vp)->v_interlock))==0); + vput(vp); + simple_lock(&mntvnode_slock); + }; + + vcb = HFSTOVCB(hfsmp); + + /* Now reprocess the BTree node, stored above */ + { + struct vnode *btvp; + /* + * If the vnode that we are about to sync is no longer + * associated with this mount point, start over. + */ + btvp = vcb->extentsRefNum; + if ((btvp==0) || (btvp->v_type == VNON) || (btvp->v_mount != mp)) + goto skipBtree; + simple_lock(&btvp->v_interlock); + hp = VTOH(btvp); + if (((hp->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) && + (btvp->v_dirtyblkhd.lh_first == NULL) && !(btvp->v_flag & VHASDIRTY)) { + simple_unlock(&btvp->v_interlock); + goto skipBtree; + } + simple_unlock(&mntvnode_slock); + error = vget(btvp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); + if (error) { + simple_lock(&mntvnode_slock); + goto skipBtree; + } + if ((error = VOP_FSYNC(btvp, cred, waitfor, p))) + allerror = error; + VOP_UNLOCK(btvp, 0, p); + vrele(btvp); + simple_lock(&mntvnode_slock); + }; + +skipBtree:; + + simple_unlock(&mntvnode_slock); + + /* + * Force stale file system control information to be flushed. + */ + if ((error = VOP_FSYNC(hfsmp->hfs_devvp, cred, waitfor, p))) + allerror = error; + /* + * Write back modified superblock. + */ + + if (IsVCBDirty(vcb)) { + if (vcb->vcbSigWord == kHFSPlusSigWord) + error = hfs_flushvolumeheader(hfsmp, waitfor); + else + error = hfs_flushMDB(hfsmp, waitfor); + + if (error) + allerror = error; + }; + + return (allerror); +} + + +/* + * File handle to vnode + * + * Have to be really careful about stale file handles: + * - check that the hfsnode number is valid + * - call hfs_vget() to get the locked hfsnode + * - check for an unallocated hfsnode (i_mode == 0) + * - check that the given client host has export rights and return + * those rights via. exflagsp and credanonp + */ +int +hfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) +register struct mount *mp; +struct fid *fhp; +struct mbuf *nam; +struct vnode **vpp; +int *exflagsp; +struct ucred **credanonp; +{ + struct hfsfid *hfsfhp; + struct vnode *nvp; + int result; + struct netcred *np; + DBG_FUNC_NAME("hfs_fhtovp"); + DBG_PRINT_FUNC_NAME(); + + *vpp = NULL; + hfsfhp = (struct hfsfid *)fhp; + + /* + * Get the export permission structure for this tuple. + */ + np = vfs_export_lookup(mp, &VFSTOHFS(mp)->hfs_export, nam); + if (np == NULL) { + return EACCES; + }; + + result = VFS_VGET(mp, &hfsfhp->hfsfid_cnid, &nvp); + if (result) return result; + if (nvp == NULL) return ESTALE; + + if ((hfsfhp->hfsfid_gen != VTOH(nvp)->h_meta->h_crtime)) { + vput(nvp); + return ESTALE; + }; + + *vpp = nvp; + *exflagsp = np->netc_exflags; + *credanonp = &np->netc_anon; + + return 0; +} + + +/* + * Vnode pointer to File handle + */ +/* ARGSUSED */ +static int hfs_vptofh(vp, fhp) +struct vnode *vp; +struct fid *fhp; +{ + struct hfsnode *hp; + struct hfsfid *hfsfhp; + struct proc *p = current_proc(); + int result; + u_int32_t fileID; + DBG_FUNC_NAME("hfs_vptofh"); + DBG_PRINT_FUNC_NAME(); + + hp = VTOH(vp); + hfsfhp = (struct hfsfid *)fhp; + + /* If a file handle is requested for a file on an HFS volume we must be sure + to create the thread record before returning the object id in the filehandle + to make sure the file can be retrieved by fileid if necessary: + */ + if ((vp->v_type == VREG) && ISHFS(VTOVCB(vp))) { + /* Create a thread record and return the FileID [which is the file's fileNumber] */ + /* lock catalog b-tree */ + if ((result = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_EXCLUSIVE, p)) != 0) return result; + result = hfsCreateFileID(VTOVCB(vp), H_DIRID(hp), H_NAME(hp), H_HINT(hp), &fileID); + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, p); + if (result) { + DBG_ERR(("hfs_vptofh: error %d on CreateFileIDRef.\n", result)); + return result; + }; + DBG_ASSERT(fileID == H_FILEID(hp)); + }; + + hfsfhp->hfsfid_len = sizeof(struct hfsfid); + hfsfhp->hfsfid_pad = 0; + hfsfhp->hfsfid_cnid = H_FILEID(hp); + hfsfhp->hfsfid_gen = hp->h_meta->h_crtime; + + return 0; +} + + +/* + * Initial HFS filesystems, done only once. + */ +int +hfs_init(vfsp) +struct vfsconf *vfsp; +{ + int i; + static int done = 0; + OSErr err; + + DBG_FUNC_NAME("hfs_init"); + DBG_PRINT_FUNC_NAME(); + + if (done) + return (0); + done = 1; + hfs_vhashinit(); + hfs_converterinit(); + + simple_lock_init (&gBufferPtrListLock); + + for (i = BUFFERPTRLISTSIZE - 1; i >= 0; --i) { + gBufferAddress[i] = NULL; + gBufferHeaderPtr[i] = NULL; + }; + gBufferListIndex = 0; + + /* + * Do any initialization that the MacOS/MacOS X shared code relies on + * (normally done as part of MacOS's startup): + */ + MALLOC(gFSMVars, FSVarsRec *, sizeof(FSVarsRec), M_TEMP, M_WAITOK); + bzero(gFSMVars, sizeof(FSVarsRec)); + + /* + * Allocate Catalog Iterator cache... + */ + err = InitCatalogCache(); +#if HFS_DIAGNOSTIC + if (err) panic("hfs_init: Error returned from InitCatalogCache() call."); +#endif + /* + * XXX do we need to setup the following? + * + * GMT offset, Unicode globals, CatSearch Buffers, BTSscanner + */ + + return E_NONE; +} + + +/* + * fast filesystem related variables. + */ +static int hfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) +int *name; +u_int namelen; +void *oldp; +size_t *oldlenp; +void *newp; +size_t newlen; +struct proc *p; +{ + DBG_FUNC_NAME("hfs_sysctl"); + DBG_PRINT_FUNC_NAME(); + + return (EOPNOTSUPP); +} + + +/* This will return a vnode of either a directory or a data vnode based on an object id. If + * it is a file id, its data fork will be returned. + */ +int +hfs_vget(struct mount *mp, + void *ino, + struct vnode **vpp) +{ + struct hfsmount *hfsmp; + dev_t dev; + int retval = E_NONE; + + DBG_VFS(("hfs_vget: ino = %ld\n", *(UInt32 *)ino)); + + /* Check if unmount in progress */ + if (mp->mnt_kern_flag & MNTK_UNMOUNT) { + *vpp = NULL; + return (EPERM); + } + + hfsmp = VFSTOHFS(mp); + dev = hfsmp->hfs_raw_dev; + + /* First check to see if it is in the cache */ + *vpp = hfs_vhashget(dev, *(UInt32 *)ino, kDefault); + + /* hide open files that have been deleted */ + if (*vpp != NULL) { + if ((VTOH(*vpp)->h_meta->h_metaflags & IN_NOEXISTS) || + (hfsmp->hfs_private_metadata_dir != 0) && + (H_DIRID(VTOH(*vpp)) == hfsmp->hfs_private_metadata_dir)) { + vput(*vpp); + retval = ENOENT; + goto Err_Exit; + } + } + + /* The vnode is not in the cache, so lets make it */ + if (*vpp == NULL) + { + hfsCatalogInfo catInfo; + struct proc *p = current_proc(); + UInt8 forkType; + + INIT_CATALOGDATA(&catInfo.nodeData, 0); + catInfo.hint = kNoHint; + /* Special-case the root's parent directory (DirID = 1) because + it doesn't actually exist in the catalog: */ + if ((*vpp == NULL) && (*(UInt32 *)ino == kRootParID)) { + bzero(&catInfo, sizeof(catInfo)); + catInfo.nodeData.cnd_type = kCatalogFolderNode; + catInfo.nodeData.cnm_nameptr = catInfo.nodeData.cnm_namespace; + catInfo.nodeData.cnm_namespace[0] = '/'; + catInfo.nodeData.cnm_length = 1; + catInfo.nodeData.cnd_nodeID = kRootParID; + catInfo.nodeData.cnm_parID = kRootParID; + catInfo.nodeData.cnd_valence = 1; + catInfo.nodeData.cnd_ownerID = 0; + catInfo.nodeData.cnd_groupID = 0; + catInfo.nodeData.cnd_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO); + } else { + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p); + if (retval != E_NONE) goto Lookup_Err_Exit; + + retval = hfs_getcatalog(VFSTOVCB(mp), *(UInt32 *)ino, NULL, -1, &catInfo); + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + + if (retval != E_NONE) goto Lookup_Err_Exit; + + /* hide open files that have been deleted */ + if ((hfsmp->hfs_private_metadata_dir != 0) && + (catInfo.nodeData.cnm_parID == hfsmp->hfs_private_metadata_dir)) { + retval = ENOENT; + goto Lookup_Err_Exit; + }; + }; + + forkType = (catInfo.nodeData.cnd_type == kCatalogFolderNode) ? kDirectory : kDataFork; + retval = hfs_vcreate(VFSTOVCB(mp), &catInfo, forkType, vpp); + +Lookup_Err_Exit: + CLEAN_CATALOGDATA(&catInfo.nodeData); + }; + + UBCINFOCHECK("hfs_vget", *vpp); + +Err_Exit: + + if (retval != E_NONE) { + DBG_VFS(("hfs_vget: Error returned of %d\n", retval)); + } + else { + DBG_VFS(("hfs_vget: vp = 0x%x\n", (u_int)*vpp)); + } + + return (retval); + +} + +/* + * Flush out all the files in a filesystem. + */ +int +hfs_flushfiles(struct mount *mp, int flags) +{ + int error; + + error = vflush(mp, NULLVP, (SKIPSYSTEM | SKIPSWAP | flags)); + error = vflush(mp, NULLVP, (SKIPSYSTEM | flags)); + + return (error); +} + +short hfs_flushMDB(struct hfsmount *hfsmp, int waitfor) +{ + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + FCB *fcb; + HFSMasterDirectoryBlock *mdb; + struct buf *bp; + int retval; + int size = kMDBSize; /* 512 */ + ByteCount namelen; + + if (vcb->vcbSigWord != kHFSSigWord) + return EINVAL; + + DBG_ASSERT(hfsmp->hfs_devvp != NULL); + + retval = bread(hfsmp->hfs_devvp, IOBLKNOFORBLK(kMasterDirectoryBlock, size), + IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, size), NOCRED, &bp); + if (retval) { + DBG_VFS((" hfs_flushMDB bread return error! (%d)\n", retval)); + if (bp) brelse(bp); + return retval; + } + + DBG_ASSERT(bp != NULL); + DBG_ASSERT(bp->b_data != NULL); + DBG_ASSERT(bp->b_bcount == size); + + mdb = (HFSMasterDirectoryBlock *)((char *)bp->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, size)); + + VCB_LOCK(vcb); + mdb->drCrDate = SWAP_BE32 (UTCToLocal(vcb->vcbCrDate)); + mdb->drLsMod = SWAP_BE32 (UTCToLocal(vcb->vcbLsMod)); + mdb->drAtrb = SWAP_BE16 (vcb->vcbAtrb); + mdb->drNmFls = SWAP_BE16 (vcb->vcbNmFls); + mdb->drAllocPtr = SWAP_BE16 (vcb->nextAllocation); + mdb->drClpSiz = SWAP_BE32 (vcb->vcbClpSiz); + mdb->drNxtCNID = SWAP_BE32 (vcb->vcbNxtCNID); + mdb->drFreeBks = SWAP_BE16 (vcb->freeBlocks); + + namelen = strlen(vcb->vcbVN); + retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN); + /* Retry with MacRoman in case that's how it was exported. */ + if (retval) + retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN); + + mdb->drVolBkUp = SWAP_BE32 (UTCToLocal(vcb->vcbVolBkUp)); + mdb->drVSeqNum = SWAP_BE16 (vcb->vcbVSeqNum); + mdb->drWrCnt = SWAP_BE32 (vcb->vcbWrCnt); + mdb->drNmRtDirs = SWAP_BE16 (vcb->vcbNmRtDirs); + mdb->drFilCnt = SWAP_BE32 (vcb->vcbFilCnt); + mdb->drDirCnt = SWAP_BE32 (vcb->vcbDirCnt); + + bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo)); + + fcb = VTOFCB(vcb->extentsRefNum); + /* HFSPlusToHFSExtents(fcb->fcbExtents, mdb->drXTExtRec); */ + mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fcb->fcbExtents[0].startBlock); + mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fcb->fcbExtents[0].blockCount); + mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fcb->fcbExtents[1].startBlock); + mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fcb->fcbExtents[1].blockCount); + mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fcb->fcbExtents[2].startBlock); + mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fcb->fcbExtents[2].blockCount); + + mdb->drXTFlSize = SWAP_BE32 (fcb->fcbPLen); + mdb->drXTClpSiz = SWAP_BE32 (fcb->fcbClmpSize); + + fcb = VTOFCB(vcb->catalogRefNum); + /* HFSPlusToHFSExtents(fcb->fcbExtents, mdb->drCTExtRec); */ + mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fcb->fcbExtents[0].startBlock); + mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fcb->fcbExtents[0].blockCount); + mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fcb->fcbExtents[1].startBlock); + mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fcb->fcbExtents[1].blockCount); + mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fcb->fcbExtents[2].startBlock); + mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fcb->fcbExtents[2].blockCount); + + mdb->drCTFlSize = SWAP_BE32 (fcb->fcbPLen); + mdb->drCTClpSiz = SWAP_BE32 (fcb->fcbClmpSize); + VCB_UNLOCK(vcb); + + if (waitfor != MNT_WAIT) + bawrite(bp); + else + retval = VOP_BWRITE(bp); + + MarkVCBClean( vcb ); + + return (retval); +} + + +short hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor) +{ + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + FCB *fcb; + HFSPlusVolumeHeader *volumeHeader; + int retval; + int size = sizeof(HFSPlusVolumeHeader); + struct buf *bp; + int i; + + if (vcb->vcbSigWord != kHFSPlusSigWord) + return EINVAL; + + retval = bread(hfsmp->hfs_devvp, IOBLKNOFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size), + IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, size), NOCRED, &bp); + if (retval) { + DBG_VFS((" hfs_flushvolumeheader bread return error! (%d)\n", retval)); + if (bp) brelse(bp); + return retval; + } + + DBG_ASSERT(bp != NULL); + DBG_ASSERT(bp->b_data != NULL); + DBG_ASSERT(bp->b_bcount == size); + + volumeHeader = (HFSPlusVolumeHeader *)((char *)bp->b_data + + IOBYTEOFFSETFORBLK((vcb->hfsPlusIOPosOffset / 512) + kMasterDirectoryBlock, size)); + + /* + * For embedded HFS+ volumes, update create date if it changed + * (ie from a setattrlist call) + */ + if ((vcb->hfsPlusIOPosOffset != 0) && (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) + { + struct buf *bp2; + HFSMasterDirectoryBlock *mdb; + + retval = bread(hfsmp->hfs_devvp, IOBLKNOFORBLK(kMasterDirectoryBlock, kMDBSize), + IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, kMDBSize), NOCRED, &bp2); + if (retval != E_NONE) { + if (bp2) brelse(bp2); + } else { + mdb = (HFSMasterDirectoryBlock *)((char *)bp2->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, kMDBSize)); + + if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate ) + { + mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate); /* pick up the new create date */ + + (void) VOP_BWRITE(bp2); /* write out the changes */ + } + else + { + brelse(bp2); /* just release it */ + } + } + } + + VCB_LOCK(vcb); + /* Note: only update the lower 16 bits worth of attributes */ + volumeHeader->attributes = SWAP_BE32 ((SWAP_BE32 (volumeHeader->attributes) & 0xFFFF0000) + (UInt16) vcb->vcbAtrb); + volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion); + volumeHeader->createDate = SWAP_BE32 (vcb->localCreateDate); /* volume create date is in local time */ + volumeHeader->modifyDate = SWAP_BE32 (vcb->vcbLsMod); + volumeHeader->backupDate = SWAP_BE32 (vcb->vcbVolBkUp); + volumeHeader->checkedDate = SWAP_BE32 (vcb->checkedDate); + volumeHeader->fileCount = SWAP_BE32 (vcb->vcbFilCnt); + volumeHeader->folderCount = SWAP_BE32 (vcb->vcbDirCnt); + volumeHeader->freeBlocks = SWAP_BE32 (vcb->freeBlocks); + volumeHeader->nextAllocation = SWAP_BE32 (vcb->nextAllocation); + volumeHeader->rsrcClumpSize = SWAP_BE32 (vcb->vcbClpSiz); + volumeHeader->dataClumpSize = SWAP_BE32 (vcb->vcbClpSiz); + volumeHeader->nextCatalogID = SWAP_BE32 (vcb->vcbNxtCNID); + volumeHeader->writeCount = SWAP_BE32 (vcb->vcbWrCnt); + volumeHeader->encodingsBitmap = SWAP_BE64 (vcb->encodingsBitmap); + + bcopy( vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo) ); + + VCB_UNLOCK(vcb); + + fcb = VTOFCB(vcb->extentsRefNum); + /* bcopy( fcb->fcbExtents, volumeHeader->extentsFile.extents, sizeof(HFSPlusExtentRecord) ); */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + volumeHeader->extentsFile.extents[i].startBlock = SWAP_BE32 (fcb->fcbExtents[i].startBlock); + volumeHeader->extentsFile.extents[i].blockCount = SWAP_BE32 (fcb->fcbExtents[i].blockCount); + } + + fcb->fcbFlags &= ~fcbModifiedMask; + volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fcb->fcbEOF); + volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fcb->fcbPLen / vcb->blockSize); + volumeHeader->extentsFile.clumpSize = SWAP_BE32 (fcb->fcbClmpSize); + + fcb = VTOFCB(vcb->catalogRefNum); + /* bcopy( fcb->fcbExtents, volumeHeader->catalogFile.extents, sizeof(HFSPlusExtentRecord) ); */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + volumeHeader->catalogFile.extents[i].startBlock = SWAP_BE32 (fcb->fcbExtents[i].startBlock); + volumeHeader->catalogFile.extents[i].blockCount = SWAP_BE32 (fcb->fcbExtents[i].blockCount); + } + + fcb->fcbFlags &= ~fcbModifiedMask; + volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fcb->fcbEOF); + volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fcb->fcbPLen / vcb->blockSize); + volumeHeader->catalogFile.clumpSize = SWAP_BE32 (fcb->fcbClmpSize); + + fcb = VTOFCB(vcb->allocationsRefNum); + /* bcopy( fcb->fcbExtents, volumeHeader->allocationFile.extents, sizeof(HFSPlusExtentRecord) ); */ + for (i = 0; i < kHFSPlusExtentDensity; i++) { + volumeHeader->allocationFile.extents[i].startBlock = SWAP_BE32 (fcb->fcbExtents[i].startBlock); + volumeHeader->allocationFile.extents[i].blockCount = SWAP_BE32 (fcb->fcbExtents[i].blockCount); + } + + fcb->fcbFlags &= ~fcbModifiedMask; + volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fcb->fcbEOF); + volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fcb->fcbPLen / vcb->blockSize); + volumeHeader->allocationFile.clumpSize = SWAP_BE32 (fcb->fcbClmpSize); + + if (waitfor != MNT_WAIT) + bawrite(bp); + else + retval = VOP_BWRITE(bp); + + MarkVCBClean( vcb ); + + return (retval); +} + + +/* + * Moved here to avoid having to define prototypes + */ + +/* + * hfs vfs operations. + */ +struct vfsops hfs_vfsops = { + hfs_mount, + hfs_start, + hfs_unmount, + hfs_root, + hfs_quotactl, + hfs_statfs, + hfs_sync, + hfs_vget, + hfs_fhtovp, + hfs_vptofh, + hfs_init, + hfs_sysctl +}; diff --git a/bsd/hfs/hfs_vfsutils.c b/bsd/hfs/hfs_vfsutils.c new file mode 100644 index 000000000..93062300d --- /dev/null +++ b/bsd/hfs/hfs_vfsutils.c @@ -0,0 +1,3713 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)hfs_vfsutils.c 4.0 +* +* (c) 1997-2000 Apple Computer, Inc. All Rights Reserved +* +* hfs_vfsutils.c -- Routines that go between the HFS layer and the VFS. +* +* Change History (most recent first): +* +* 22-Jan-2000 Don Brady Remove calls to MountCheck. +* 7-Sep-1999 Don Brady Add HFS Plus hard-link support. +* 25-Aug-1999 Don Brady Dont't use vcbAlBlSt for HFS plus volumes (2350009). +* 9-Aug-1999 Pat Dirks Added support for ATTR_VOL_ENCODINGSUSED [#2357367]. +* 16-Jul-1999 Pat Dirks Fixed PackCommonCatalogInfoAttributeBlock to return full range of possible vnode types [#2317604] +* 15-Jun-1999 Pat Dirks Added support for return of mounted device in hfs_getattrlist [#2345297]. +* 9-Jun-1999 Don Brady Cleanup vcb accesses in hfs_MountHFSVolume. +* 3-Jun-1999 Don Brady Remove references to unused/legacy vcb fields (eg vcbXTClpSiz). +* 21-May-1999 Don Brady Add call to hfs_vinit in hfsGet to support mknod. +* 6-Apr-1999 Don Brady Fixed de-reference of NULL dvp in hfsGet. +* 22-Mar-1999 Don Brady Add support for UFS delete semantics. +* 1-Mar-1999 Scott Roberts Dont double MALLOC on long names. +* 23-Feb-1999 Pat Dirks Change incrementing of meta refcount to be done BEFORE lock is acquired. +* 2-Feb-1999 Pat Dirks For volume ATTR_CMN_SCRIPT use vcb->volumeNameEncodingHint instead of 0. +* 10-Mar-1999 Don Brady Removing obsolete code. +* 2-Feb-1999 Don Brady For volume ATTR_CMN_SCRIPT use vcb->volumeNameEncodingHint instead of 0. +* 18-Jan-1999 Pat Dirks Changed CopyCatalogToHFSNode to start with ACCESSPERMS instead of adding +* write access only for unlocked files (now handled via IMMUTABLE setting) +* 7-Dec-1998 Pat Dirks Changed PackCatalogInfoFileAttributeBlock to return proper I/O block size. +* 7-Dec-1998 Don Brady Pack the real text encoding instead of zero. +* 16-Dec-1998 Don Brady Use the root's crtime intead of vcb create time for getattrlist. +* 16-Dec-1998 Don Brady Use the root's crtime intead of vcb create time for getattrlist. +* 2-Dec-1998 Scott Roberts Copy the mdbVN correctly into the vcb. +* 3-Dec-1998 Pat Dirks Added support for ATTR_VOL_MOUNTFLAGS. +* 20-Nov-1998 Don Brady Add support for UTF-8 names. +* 18-Nov-1998 Pat Dirks Changed UnpackCommonAttributeBlock to call wait for hfs_chflags to update catalog entry when changing flags +* 13-Nov-1998 Pat Dirks Changed BestBlockSizeFit to try PAGE_SIZE only and skip check for MAXBSIZE. +* 10-Nov-1998 Pat Dirks Changed CopyCatalogToHFSNode to ensure consistency between lock flag and IMMUTABLE bits. +* 10-Nov-1998 Pat Dirks Added MapFileOffset(), LogicalBlockSize() and UpdateBlockMappingTable() routines. +* 18-Nov-1998 Pat Dirks Changed PackVolAttributeBlock to return proper logical block size +* for ATTR_VOL_IOBLOCKSIZE attribute. +* 3-Nov-1998 Umesh Vaishampayan Changes to deal with "struct timespec" +* change in the kernel. +* 23-Sep-1998 Don Brady In UnpackCommonAttributeBlock simplified setting of gid, uid and mode. +* 10-Nov-1998 Pat Dirks Added MapFileOffset(), LogicalBlockSize() and UpdateBlockMappingTable() routines. +* 17-Sep-1998 Pat Dirks Changed BestBlockSizeFit to try MAXBSIZE and PAGE_SIZE first. +* 8-Sep-1998 Don Brady Fix CopyVNodeToCatalogNode to use h_mtime for contentModDate (instead of h_ctime). +* 4-Sep-1998 Pat Dirks Added BestBlockSizeFit routine. +* 18-Aug-1998 Don Brady Change DEBUG_BREAK_MSG to a DBG_UTILS in MacToVFSError (radar #2262802). +* 30-Jun-1998 Don Brady Add calls to MacToVFSError to hfs/hfsplus mount routines (for radar #2249539). +* 22-Jun-1998 Don Brady Add more error cases to MacToVFSError; all HFS Common errors are negative. +* Changed hfsDelete to call DeleteFile for files. +* 4-Jun-1998 Pat Dirks Changed incorrect references to 'vcbAlBlkSize' to 'blockSize'; +* Added hfsCreateFileID. +* 4-Jun-1998 Don Brady Add hfsMoveRename to replace hfsMove and hfsRename. Use VPUT/VRELE macros +* instead of vput/vrele to catch bad ref counts. +* 28-May-1998 Pat Dirks Adjusted for change in definition of ATTR_CMN_NAME and removed ATTR_CMN_RAWDEVICE. +* 7-May-1998 Don Brady Added check for NULL vp to hfs_metafilelocking (radar #2233832). +* 24-Apr-1998 Pat Dirks Fixed AttributeBlockSize to return only length of variable attribute block. +* 4/21/1998 Don Brady Add SUPPORTS_MAC_ALIASES conditional (for radar #2225419). +* 4/21/1998 Don Brady Map cmNotEmpty errors to ENOTEMPTY (radar #2229259). +* 4/21/1998 Don Brady Fix up time/date conversions. +* 4/20/1998 Don Brady Remove course-grained hfs metadata locking. +* 4/18/1998 Don Brady Add VCB locking. +* 4/17/1998 Pat Dirks Fixed PackFileAttributeBlock to return more up-to-date EOF/PEOF info from vnode. +* 4/15/1998 Don Brady Add hasOverflowExtents and hfs_metafilelocking. Use ExtendBTreeFile instead +* of SetEndOfForkProc. Set forktype for system files. +* 4/14/1998 Deric Horn PackCatalogInfoAttributeBlock(), and related packing routines to +* pack attribute data given hfsCatalogInfo, without the objects vnode; +* 4/14/1998 Scott Roberts Add execute priviledges to all hfs objects. +* 4/9/1998 Don Brady Add MDB/VolumeHeader flushing to hfsUnmount; +* 4/8/1998 Don Brady Make sure vcbVRefNum field gets initialized (use MAKE_VREFNUM). +* 4/6/1998 Don Brady Removed calls to CreateVolumeCatalogCache (obsolete). +* 4/06/1998 Scott Roberts Added complex file support. +* 4/02/1998 Don Brady UpdateCatalogNode now takes parID and name as input. +* 3/31/1998 Don Brady Sync up with final HFSVolumes.h header file. +* 3/31/1998 Don Brady Check result from UFSToHFSStr to make sure hfs/hfs+ names are not greater +* than 31 characters. +* 3/30/1998 Don Brady In InitMetaFileVNode set VSYSTEM bit in vnode's v_flag. +* 3/26/1998 Don Brady Cleaned up hfs_MountXXX routines. Removed CloseBtreeFile and OpenBTreeFile. +* Simplified hfsUnmount (removed MacOS specific code). +* 3/17/1998 Don Brady AttributeBlockSize calculation did not account for the size field (4bytes). +* PackVolCommonAttributes and PackCommonAttributeBlock for ATTR_CMN_NAME +* were not setting up the name correctly. +* 3/17/1998 Don Brady Changed CreateCatalogNode interface to take kCatalogFolderNode and +* kCatalogFileNode as type input. Also, force MountCheck to always run. +* 12-nov-1997 Scott Roberts Initially created file. +* 17-Mar-98 ser Broke out and created CopyCatalogToHFSNode() +* +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hfs.h" +#include "hfs_dbg.h" +#include "hfs_mount.h" +#include "hfs_endian.h" + +#include "hfscommon/headers/FileMgrInternal.h" +#include "hfscommon/headers/BTreesInternal.h" +#include "hfscommon/headers/HFSUnicodeWrappers.h" + +#define SUPPORTS_MAC_ALIASES 0 +#define kMaxSecsForFsync 5 + +#define BYPASSBLOCKINGOPTIMIZATION 0 + +#define kMaxLockedMetaBuffers 32 /* number of locked buffer caches to hold for meta data */ + +extern int (**hfs_vnodeop_p)(void *); +extern int (**hfs_specop_p)(void *); +extern int (**hfs_fifoop_p)(void *); +extern int count_lock_queue __P((void)); +extern uid_t console_user; + +OSErr ValidMasterDirectoryBlock( HFSMasterDirectoryBlock *mdb ); +UInt16 DivUp( UInt32 byteRun, UInt32 blockSize ); + +/* Externs from vhash */ +extern void hfs_vhashins_sibling(dev_t dev, UInt32 nodeID, struct hfsnode *hp, struct hfsfilemeta **fm); +extern void hfs_vhashins(dev_t dev, UInt32 nodeID,struct hfsnode *hp); +extern struct vnode *hfs_vhashget(dev_t dev, UInt32 nodeID, UInt8 forkType); + +extern int hfs_vinit( struct mount *mntp, int (**specops)(void *), int (**fifoops)(), struct vnode **vpp); + +extern UInt16 CountRootFiles(ExtendedVCB *vcb); +extern OSErr GetVolumeNameFromCatalog(ExtendedVCB *vcb); + +static int InitMetaFileVNode(struct vnode *vp, off_t eof, u_long clumpSize, const HFSPlusExtentRecord extents, + HFSCatalogNodeID fileID, void * keyCompareProc); + +static void ReleaseMetaFileVNode(struct vnode *vp); + +static void RemovedMetaDataDirectory(ExtendedVCB *vcb); + +void CopyCatalogToObjectMeta(struct hfsCatalogInfo *catalogInfo, struct vnode *vp, struct hfsfilemeta *fm); +void CopyCatalogToFCB(struct hfsCatalogInfo *catalogInfo, struct vnode *vp); +void hfs_name_CatToMeta(CatalogNodeData *nodeData, struct hfsfilemeta *fm); +u_int32_t GetLogicalBlockSize(struct vnode *vp); + +/* BTree accessor routines */ +extern OSStatus GetBTreeBlock(FileReference vp, UInt32 blockNum, GetBlockOptions options, BlockDescriptor *block); +extern OSStatus SetBTreeBlockSize(FileReference vp, ByteCount blockSize, ItemCount minBlockCount); +extern OSStatus ExtendBTreeFile(FileReference vp, FSSize minEOF, FSSize maxEOF); +extern OSStatus ReleaseBTreeBlock(FileReference vp, BlockDescPtr blockPtr, ReleaseBlockOptions options); + +//******************************************************************************* +// Note: Finder information in the HFS/HFS+ metadata are considered opaque and +// hence are not in the right byte order on little endian machines. It is +// the responsibility of the finder and other clients to swap the data. +//******************************************************************************* + +//******************************************************************************* +// Routine: hfs_MountHFSVolume +// +// +//******************************************************************************* + +OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, + u_long sectors, struct proc *p) +{ + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + struct vnode *tmpvnode; + OSErr err; + HFSPlusExtentRecord extents; + ByteCount utf8chars; + DBG_FUNC_NAME("hfs_MountHFSVolume"); + DBG_PRINT_FUNC_NAME(); + + if (hfsmp == nil || mdb == nil) /* exit if bad paramater */ + return (EINVAL); + + err = ValidMasterDirectoryBlock( mdb ); /* make sure this is an HFS disk */ + if (err) + return MacToVFSError(err); + + /* don't mount a writeable volume if its dirty, it must be cleaned by fsck_hfs */ + if ((hfsmp->hfs_fs_ronly == 0) && ((SWAP_BE16 (mdb->drAtrb) & kHFSVolumeUnmountedMask) == 0)) + return (EINVAL); + + /* + * The MDB seems OK: transfer info from it into VCB + * Note - the VCB starts out clear (all zeros) + * + */ + vcb->vcbVRefNum = MAKE_VREFNUM(hfsmp->hfs_raw_dev); + + vcb->vcbSigWord = SWAP_BE16 (mdb->drSigWord); + vcb->vcbCrDate = LocalToUTC (SWAP_BE32 (mdb->drCrDate)); + vcb->vcbLsMod = LocalToUTC (SWAP_BE32 (mdb->drLsMod)); + vcb->vcbAtrb = SWAP_BE16 (mdb->drAtrb); + vcb->vcbNmFls = SWAP_BE16 (mdb->drNmFls); + vcb->vcbVBMSt = SWAP_BE16 (mdb->drVBMSt); + vcb->nextAllocation = SWAP_BE16 (mdb->drAllocPtr); + vcb->totalBlocks = SWAP_BE16 (mdb->drNmAlBlks); + vcb->blockSize = SWAP_BE32 (mdb->drAlBlkSiz); + vcb->vcbClpSiz = SWAP_BE32 (mdb->drClpSiz); + vcb->vcbAlBlSt = SWAP_BE16 (mdb->drAlBlSt); + vcb->vcbNxtCNID = SWAP_BE32 (mdb->drNxtCNID); + vcb->freeBlocks = SWAP_BE16 (mdb->drFreeBks); + vcb->vcbVolBkUp = LocalToUTC (SWAP_BE32 (mdb->drVolBkUp)); + vcb->vcbVSeqNum = SWAP_BE16 (mdb->drVSeqNum); + vcb->vcbWrCnt = SWAP_BE32 (mdb->drWrCnt); + vcb->vcbNmRtDirs = SWAP_BE16 (mdb->drNmRtDirs); + vcb->vcbFilCnt = SWAP_BE32 (mdb->drFilCnt); + vcb->vcbDirCnt = SWAP_BE32 (mdb->drDirCnt); + bcopy(mdb->drFndrInfo, vcb->vcbFndrInfo, sizeof(vcb->vcbFndrInfo)); + vcb->nextAllocation = SWAP_BE16 ( mdb->drAllocPtr); /* Duplicate?!?!?! */ + vcb->encodingsBitmap = 0; + vcb->vcbWrCnt++; /* Compensate for write of MDB on last flush */ + /* + * Copy the drVN field, which is a Pascal String to the vcb, which is a cstring + */ + + /* convert hfs encoded name into UTF-8 string */ + err = hfs_to_utf8(vcb, mdb->drVN, NAME_MAX, &utf8chars, vcb->vcbVN); + /* + * When an HFS name cannot be encoded with the current + * volume encoding we use MacRoman as a fallback. + */ + if (err || (utf8chars == 0)) + (void) mac_roman_to_utf8(mdb->drVN, NAME_MAX, &utf8chars, vcb->vcbVN); + + vcb->altIDSector = sectors - 2; + + // Initialize our dirID/nodePtr cache associated with this volume. + err = InitMRUCache( sizeof(UInt32), kDefaultNumMRUCacheBlocks, &(vcb->hintCachePtr) ); + ReturnIfError( err ); + + hfsmp->hfs_logBlockSize = BestBlockSizeFit(vcb->blockSize, MAXBSIZE, hfsmp->hfs_phys_block_size); + + // XXX PPD: Should check here for hardware lock flag and set flags in VCB/MP appropriately + VCB_LOCK_INIT(vcb); + + /* + * Set up Extents B-tree vnode... + */ + err = GetInitializedVNode(hfsmp, &tmpvnode, 0); + if (err) goto MtVolErr; + /* HFSToHFSPlusExtents(mdb->drXTExtRec, extents); */ /* ASDFADSFSD */ + extents[0].startBlock = SWAP_BE16 (mdb->drXTExtRec[0].startBlock); + extents[0].blockCount = SWAP_BE16 (mdb->drXTExtRec[0].blockCount); + extents[1].startBlock = SWAP_BE16 (mdb->drXTExtRec[1].startBlock); + extents[1].blockCount = SWAP_BE16 (mdb->drXTExtRec[1].blockCount); + extents[2].startBlock = SWAP_BE16 (mdb->drXTExtRec[2].startBlock); + extents[2].blockCount = SWAP_BE16 (mdb->drXTExtRec[2].blockCount); + + err = InitMetaFileVNode(tmpvnode, SWAP_BE32 (mdb->drXTFlSize), SWAP_BE32 (mdb->drXTClpSiz), extents, + kHFSExtentsFileID, CompareExtentKeys); + if (err) goto MtVolErr; + + /* + * Set up Catalog B-tree vnode... + */ + err = GetInitializedVNode(hfsmp, &tmpvnode, 0); + if (err) goto MtVolErr; + /* HFSToHFSPlusExtents(mdb->drCTExtRec, extents); */ + extents[0].startBlock = SWAP_BE16 (mdb->drCTExtRec[0].startBlock); + extents[0].blockCount = SWAP_BE16 (mdb->drCTExtRec[0].blockCount); + extents[1].startBlock = SWAP_BE16 (mdb->drCTExtRec[1].startBlock); + extents[1].blockCount = SWAP_BE16 (mdb->drCTExtRec[1].blockCount); + extents[2].startBlock = SWAP_BE16 (mdb->drCTExtRec[2].startBlock); + extents[2].blockCount = SWAP_BE16 (mdb->drCTExtRec[2].blockCount); + + err = InitMetaFileVNode(tmpvnode, SWAP_BE32 (mdb->drCTFlSize), SWAP_BE32 (mdb->drCTClpSiz), extents, + kHFSCatalogFileID, CompareCatalogKeys); + if (err) goto MtVolErr; + + /* mark the volume dirty (clear clean unmount bit) */ + vcb->vcbAtrb &= ~kHFSVolumeUnmountedMask; + + /* Remove any MetaDataDirectory from hfs disks */ + if (hfsmp->hfs_fs_ronly == 0) + RemovedMetaDataDirectory(vcb); + + /* + * all done with b-trees so we can unlock now... + */ + VOP_UNLOCK(vcb->catalogRefNum, 0, p); + VOP_UNLOCK(vcb->extentsRefNum, 0, p); + + err = noErr; + + if ( err == noErr ) + { + if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected + { + MarkVCBDirty( vcb ); // mark VCB dirty so it will be written + } + } + goto CmdDone; + + //-- Release any resources allocated so far before exiting with an error: +MtVolErr:; + ReleaseMetaFileVNode(vcb->catalogRefNum); + ReleaseMetaFileVNode(vcb->extentsRefNum); + +CmdDone:; + return( err ); +} + +//******************************************************************************* +// Routine: hfs_MountHFSPlusVolume +// +// +//******************************************************************************* + +OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, + u_long embBlkOffset, u_long sectors, struct proc *p) +{ + register ExtendedVCB *vcb; + HFSPlusForkData *fdp; + struct vnode *tmpvnode; + OSErr retval; + + if (hfsmp == nil || vhp == nil) /* exit if bad paramater */ + return (EINVAL); + + DBG_VFS(("hfs_MountHFSPlusVolume: signature=0x%x, version=%d, blockSize=%ld\n", + SWAP_BE16 (vhp->signature), + SWAP_BE16 (vhp->version), + SWAP_BE32 (vhp->blockSize))); + + retval = ValidVolumeHeader(vhp); /* make sure this is an HFS Plus disk */ + if (retval) + return MacToVFSError(retval); + + /* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */ + if (hfsmp->hfs_fs_ronly == 0 && (SWAP_BE32 (vhp->attributes) & kHFSVolumeUnmountedMask) == 0) + return (EINVAL); + /* + * The VolumeHeader seems OK: transfer info from it into VCB + * Note - the VCB starts out clear (all zeros) + */ + vcb = HFSTOVCB(hfsmp); + + //DBG_ASSERT((hfsmp->hfs_raw_dev & 0xFFFF0000) == 0); + vcb->vcbVRefNum = MAKE_VREFNUM(hfsmp->hfs_raw_dev); + vcb->vcbSigWord = SWAP_BE16 (vhp->signature); + vcb->vcbLsMod = SWAP_BE32 (vhp->modifyDate); + vcb->vcbAtrb = (UInt16) SWAP_BE32 (vhp->attributes); // VCB only uses lower 16 bits + vcb->vcbClpSiz = SWAP_BE32 (vhp->rsrcClumpSize); + vcb->vcbNxtCNID = SWAP_BE32 (vhp->nextCatalogID); + vcb->vcbVolBkUp = SWAP_BE32 (vhp->backupDate); + vcb->vcbWrCnt = SWAP_BE32 (vhp->writeCount); + vcb->vcbFilCnt = SWAP_BE32 (vhp->fileCount); + vcb->vcbDirCnt = SWAP_BE32 (vhp->folderCount); + + /* copy 32 bytes of Finder info */ + bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo)); + + vcb->vcbAlBlSt = 0; /* hfs+ allocation blocks start at first block of volume */ + vcb->vcbWrCnt++; /* compensate for write of Volume Header on last flush */ + + VCB_LOCK_INIT(vcb); + + /* Now fill in the Extended VCB info */ + vcb->nextAllocation = SWAP_BE32 (vhp->nextAllocation); + vcb->totalBlocks = SWAP_BE32 (vhp->totalBlocks); + vcb->freeBlocks = SWAP_BE32 (vhp->freeBlocks); + vcb->blockSize = SWAP_BE32 (vhp->blockSize); + vcb->checkedDate = SWAP_BE32 (vhp->checkedDate); + vcb->encodingsBitmap = SWAP_BE64 (vhp->encodingsBitmap); + + vcb->hfsPlusIOPosOffset = embBlkOffset * 512; + + vcb->altIDSector = embBlkOffset + sectors - 2; + + vcb->localCreateDate = SWAP_BE32 (vhp->createDate); /* in local time, not GMT! */ + + /* Update the logical block size in the mount struct (currently set up from the wrapper MDB) + using the new blocksize value: */ + hfsmp->hfs_logBlockSize = BestBlockSizeFit(vcb->blockSize, MAXBSIZE, hfsmp->hfs_phys_block_size); + + // XXX PPD: Should check here for hardware lock flag and set flags in VCB/MP appropriately + // vcb->vcbAtrb |= kVolumeHardwareLockMask; // XXX this line for debugging only!!!! + + // Initialize our dirID/nodePtr cache associated with this volume. + retval = InitMRUCache( sizeof(UInt32), kDefaultNumMRUCacheBlocks, &(vcb->hintCachePtr) ); + if (retval != noErr) goto ErrorExit; + + /* + * Set up Extents B-tree vnode... + */ + retval = GetInitializedVNode(hfsmp, &tmpvnode, 0); + if (retval) goto ErrorExit; + fdp = &vhp->extentsFile; + SWAP_HFS_PLUS_FORK_DATA (fdp); + retval = InitMetaFileVNode(tmpvnode, fdp->logicalSize, fdp->clumpSize, fdp->extents, + kHFSExtentsFileID, CompareExtentKeysPlus); + SWAP_HFS_PLUS_FORK_DATA (fdp); + if (retval) goto ErrorExit; + + /* + * Set up Catalog B-tree vnode... + */ + retval = GetInitializedVNode(hfsmp, &tmpvnode, 0); + if (retval) goto ErrorExit; + fdp = &vhp->catalogFile; + SWAP_HFS_PLUS_FORK_DATA (fdp); + retval = InitMetaFileVNode(tmpvnode, fdp->logicalSize, fdp->clumpSize, fdp->extents, + kHFSCatalogFileID, CompareExtendedCatalogKeys); + SWAP_HFS_PLUS_FORK_DATA (fdp); + if (retval) goto ErrorExit; + + /* + * Set up Allocation file vnode... + */ + retval = GetInitializedVNode(hfsmp, &tmpvnode, 0); + if (retval) goto ErrorExit; + fdp = &vhp->allocationFile; + SWAP_HFS_PLUS_FORK_DATA (fdp); + retval = InitMetaFileVNode(tmpvnode, fdp->logicalSize, fdp->clumpSize, fdp->extents, + kHFSAllocationFileID, NULL); + SWAP_HFS_PLUS_FORK_DATA (fdp); + if (retval) goto ErrorExit; + + /* + * Now that Catalog file is open get the volume name from the catalog + */ + retval = MacToVFSError( GetVolumeNameFromCatalog(vcb) ); + if (retval != noErr) goto ErrorExit; + + /* mark the volume dirty (clear clean unmount bit) */ + vcb->vcbAtrb &= ~kHFSVolumeUnmountedMask; + + /* setup private/hidden directory for unlinked files */ + hfsmp->hfs_private_metadata_dir = FindMetaDataDirectory(vcb); + + /* + * all done with metadata files so we can unlock now... + */ + VOP_UNLOCK(vcb->allocationsRefNum, 0, p); + VOP_UNLOCK(vcb->catalogRefNum, 0, p); + VOP_UNLOCK(vcb->extentsRefNum, 0, p); + + if ( !(vcb->vcbAtrb & kHFSVolumeHardwareLockMask) ) // if the disk is not write protected + { + MarkVCBDirty( vcb ); // mark VCB dirty so it will be written + } + + DBG_VFS(("hfs_MountHFSPlusVolume: returning (%d)\n", retval)); + + return (0); + + +ErrorExit: + /* + * A fatal error occured and the volume cannot be mounted + * release any resources that we aquired... + */ + + DBG_VFS(("hfs_MountHFSPlusVolume: fatal error (%d)\n", retval)); + + InvalidateCatalogCache(vcb); + + ReleaseMetaFileVNode(vcb->allocationsRefNum); + ReleaseMetaFileVNode(vcb->catalogRefNum); + ReleaseMetaFileVNode(vcb->extentsRefNum); + + return (retval); +} + + +/* + * ReleaseMetaFileVNode + * + * vp L - - + */ +static void ReleaseMetaFileVNode(struct vnode *vp) +{ + if (vp) + { + FCB *fcb = VTOFCB(vp); + + if (fcb->fcbBTCBPtr != NULL) + (void) BTClosePath(fcb); /* ignore errors since there is only one path open */ + + /* release the node even if BTClosePath fails */ + if (VOP_ISLOCKED(vp)) + vput(vp); + else + vrele(vp); + } +} + + +/* + * InitMetaFileVNode + * + * vp U L L + */ +static int InitMetaFileVNode(struct vnode *vp, off_t eof, u_long clumpSize, const HFSPlusExtentRecord extents, + HFSCatalogNodeID fileID, void * keyCompareProc) +{ + FCB *fcb; + ExtendedVCB *vcb; + int result = 0; + + DBG_ASSERT(vp != NULL); + DBG_ASSERT(vp->v_data != NULL); + + vcb = VTOVCB(vp); + fcb = VTOFCB(vp); + + switch (fileID) + { + case kHFSExtentsFileID: + vcb->extentsRefNum = vp; + break; + + case kHFSCatalogFileID: + vcb->catalogRefNum = vp; + break; + + case kHFSAllocationFileID: + vcb->allocationsRefNum = vp; + break; + + default: + panic("InitMetaFileVNode: invalid fileID!"); + } + + fcb->fcbEOF = eof; + fcb->fcbPLen = eof; + fcb->fcbClmpSize = clumpSize; + H_FILEID(VTOH(vp)) = fileID; + H_DIRID(VTOH(vp)) = kHFSRootParentID; + H_FORKTYPE(VTOH(vp)) = kSysFile; + + bcopy(extents, fcb->fcbExtents, sizeof(HFSPlusExtentRecord)); + + /* + * Lock the hfsnode and insert the hfsnode into the hash queue: + */ + hfs_vhashins(H_DEV(VTOH(vp)), fileID, VTOH(vp)); + vp->v_flag |= VSYSTEM; /* tag our metadata files (used by vflush call) */ + + /* As the vnode is a system vnode we don't need UBC */ + if(UBCINFOEXISTS(vp)) { + /* So something is wrong if the it exists */ + panic("ubc exists for system vnode"); + } + + if (keyCompareProc != NULL) { + result = BTOpenPath(fcb, + (KeyCompareProcPtr) keyCompareProc, + GetBTreeBlock, + ReleaseBTreeBlock, + ExtendBTreeFile, + SetBTreeBlockSize); + result = MacToVFSError(result); + } + + return (result); +} + + +/************************************************************* +* +* Unmounts a hfs volume. +* At this point vflush() has been called (to dump all non-metadata files) +* +*************************************************************/ + +short hfsUnmount( register struct hfsmount *hfsmp, struct proc *p) +{ + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + int retval = E_NONE; + + (void) DisposeMRUCache(vcb->hintCachePtr); + InvalidateCatalogCache( vcb ); + // XXX PPD: Should dispose of any allocated volume cache here: call DisposeVolumeCacheBlocks( vcb )? + + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); + (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, p); + + if (vcb->vcbSigWord == kHFSPlusSigWord) + ReleaseMetaFileVNode(vcb->allocationsRefNum); + + ReleaseMetaFileVNode(vcb->catalogRefNum); + ReleaseMetaFileVNode(vcb->extentsRefNum); + + return (retval); +} + + +/* + * Performs a lookup on the given dirID, name. Returns the catalog info + * + * If len is -1, then it is a null terminated string, pass it along to MacOS as kUndefinedStrLen + */ + +short hfs_getcatalog (ExtendedVCB *vcb, UInt32 parentDirID, char *name, short len, hfsCatalogInfo *catInfo) +{ + OSErr result; + UInt32 length; + struct FInfo *fip; + + if (len == -1 ) { /* Convert it to MacOS terms */ + if (name) + length = strlen(name); + else + length = kUndefinedStrLen; + } + else + length = len; + + result = GetCatalogNode(vcb, parentDirID, name, length, catInfo->hint, &catInfo->nodeData, &catInfo->hint); + +#if HFS_DIAGNOSTICS + if (catInfo->nodeData.cnm_nameptr) { + DBG_ASSERT(strlen(catInfo->nodeData.cnm_nameptr) == catInfo->nodeData.cnm_length); + } +#endif + +#if HFS_HARDLINKS + if (result) + goto exit; + + fip = (struct FInfo *) &catInfo->nodeData.cnd_finderInfo; + + /* + * if we encounter an indirect link (hardlink) then auto resolve it... + */ + if ((catInfo->nodeData.cnd_type == kCatalogFileNode) && + (fip->fdType == kHardLinkFileType) && + (fip->fdCreator == kHFSPlusCreator) && + ((catInfo->nodeData.cnd_createDate == vcb->vcbCrDate) || + (catInfo->nodeData.cnd_createDate == VCBTOHFS(vcb)->hfs_metadata_createdate))) { + + u_int32_t indlinkno; + char iNodeName[32]; + UInt32 privDir = VCBTOHFS(vcb)->hfs_private_metadata_dir; + + indlinkno = catInfo->nodeData.cnd_iNodeNum; + MAKE_INODE_NAME(iNodeName, indlinkno); + + /* + * Get nodeData from the data node file. + * Flag the node data to NOT copy the name, perserver the original + */ + catInfo->nodeData.cnm_flags |= kCatNameNoCopyName; + result = GetCatalogNode(vcb, privDir, iNodeName, 0, 0, &catInfo->nodeData, &catInfo->hint); + catInfo->nodeData.cnm_flags &= ~kCatNameNoCopyName; /* Just to keep things like they should be */ + + /* make sure there's at lease 1 reference */ + if (result == 0) { + if (catInfo->nodeData.cnd_linkCount == 0) + catInfo->nodeData.cnd_linkCount = 2; + /* keep a copy of iNodeNum to put into h_indnodeno */ + catInfo->nodeData.cnd_iNodeNumCopy = indlinkno; + } + + /* if we can not resolve the link, allow the link to be + * exposed (as an empty file) so it can be deleted + */ + if (result == cmNotFound) + result = 0; + } + +exit: +#endif + + if (result) + DBG_ERR(("on Lookup, GetCatalogNode returned: %d: dirid: %ld name: %s\n", result, parentDirID, name)); + + return MacToVFSError(result); +} + + + +short hfsDelete (ExtendedVCB *vcb, UInt32 parentDirID, StringPtr name, short isfile, UInt32 catalogHint) +{ + OSErr result = noErr; + + /* XXX have all the file's blocks been flushed/trashed? */ + + /* + * DeleteFile will delete the catalog node and then + * free up any disk space used by the file. + */ + if (isfile) + result = DeleteFile(vcb, parentDirID, name, catalogHint); + else /* is a directory */ + result = DeleteCatalogNode(vcb, parentDirID, name, catalogHint); + + if (result) + DBG_ERR(("on Delete, DeleteFile returned: %d: dirid: %ld name: %s\n", result, parentDirID, name)); + + return MacToVFSError(result); +} + + +short hfsMoveRename (ExtendedVCB *vcb, UInt32 oldDirID, char *oldName, UInt32 newDirID, char *newName, UInt32 *hint) +{ + OSErr result = noErr; + + result = MoveRenameCatalogNode(vcb, oldDirID,oldName, *hint, newDirID, newName, hint); + + if (result) + DBG_ERR(("on hfsMoveRename, MoveRenameCatalogNode returned: %d: newdirid: %ld newname: %s\n", result, newDirID, newName)); + + + return MacToVFSError(result); +} + +/* XXX SER pass back the hint so other people can use it */ + + +short hfsCreate(ExtendedVCB *vcb, UInt32 dirID, char *name, int mode) +{ + OSErr result = noErr; + HFSCatalogNodeID catalogNodeID; + UInt32 catalogHint; + UInt32 type; + + /* just test for directories, the default is to create a file (like symlinks) */ + if ((mode & IFMT) == IFDIR) + type = kCatalogFolderNode; + else + type = kCatalogFileNode; + + result = CreateCatalogNode (vcb, dirID, name, type, &catalogNodeID, &catalogHint); + + return MacToVFSError(result); +} + + +short hfsCreateFileID (ExtendedVCB *vcb, UInt32 parentDirID, StringPtr name, UInt32 catalogHint, UInt32 *fileIDPtr) +{ + return MacToVFSError(CreateFileIDRef(vcb, parentDirID, name, catalogHint, fileIDPtr)); +} + + +/********************************************************************************/ +/* */ +/* hfs_vget_catinfo - Returns a vnode derived from a hfs catInfo struct */ +/* */ +/********************************************************************************/ + +int hfs_vget_catinfo(struct vnode *parent_vp, struct hfsCatalogInfo *catInfo, u_int32_t forkType, struct vnode **target_vp) +{ + int retval = E_NONE; + + + *target_vp = hfs_vhashget(H_DEV(VTOH(parent_vp)), catInfo->nodeData.cnd_nodeID, forkType); + + if (*target_vp == NULL) { + if (forkType == kAnyFork) + if (catInfo->nodeData.cnd_type == kCatalogFolderNode) + forkType = kDirectory; + else + forkType = kDataFork; + + retval = hfs_vcreate( VTOVCB(parent_vp), catInfo, forkType, target_vp); + }; + + return (retval); +} + + + +/********************************************************************************/ +/* */ +/* hfs_vget_fork - Returns a vnode derived from a sibling */ +/* vp is locked */ +/* */ +/********************************************************************************/ + +int hfs_vget_sibling(struct vnode *vp, u_int16_t forkType, struct vnode **vpp) +{ + struct vnode * target_vp = NULL; + int retval = E_NONE; + + + DBG_ASSERT(vp != NULL); + DBG_ASSERT(VTOH(vp) != NULL); + DBG_ASSERT(VTOH(vp)->h_meta != NULL); + DBG_ASSERT(forkType==kDataFork || forkType==kRsrcFork); + + target_vp = hfs_vhashget(H_DEV(VTOH(vp)), H_FILEID(VTOH(vp)), forkType); + + /* + * If not in the hash, then we have to create it + */ + if (target_vp == NULL) { + struct proc *p = current_proc(); + hfsCatalogInfo catInfo; + + INIT_CATALOGDATA(&catInfo.nodeData, 0); + catInfo.hint = H_HINT(VTOH(vp)); + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_SHARED, p); + if (retval) goto GetCatErr_Exit; + + retval = hfs_getcatalog (VTOVCB(vp), H_DIRID(VTOH(vp)), H_NAME(VTOH(vp)), VTOH(vp)->h_meta->h_namelen, &catInfo); + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, p); + if (retval) goto GetCatErr_Exit; + + retval = hfs_vcreate( VTOVCB(vp), &catInfo, forkType, &target_vp); + +GetCatErr_Exit: + CLEAN_CATALOGDATA(&catInfo.nodeData); + }; + +Err_Exit: + + if (!retval) { + DBG_ASSERT(target_vp!=NULL); + } else { + DBG_ASSERT(target_vp==NULL); + } + + *vpp = target_vp; + return (retval); +} + + +/************************************************************************/ +/* hfs_vcreate - Returns a vnode derived from hfs */ +/* */ +/* When creating the vnode, care must be made to set the */ +/* correct fields in the correct order. Calls to malloc() */ +/* and other subroutines, can cause a context switch, */ +/* and the fields must be ready for the possibility */ +/* */ +/* */ +/************************************************************************/ + +short hfs_vcreate(ExtendedVCB *vcb, hfsCatalogInfo *catInfo, UInt8 forkType, struct vnode **vpp) +{ + struct hfsnode *hp; + struct vnode *vp; + struct hfsmount *hfsmp; + struct hfsfilemeta *fm; + struct mount *mp; + struct vfsFCB *xfcb; + dev_t dev; + short retval; + +#if HFS_DIAGNOSTIC + DBG_ASSERT(vcb != NULL); + DBG_ASSERT(catInfo != NULL); + DBG_ASSERT(vpp != NULL); + DBG_ASSERT((forkType == kDirectory) || (forkType == kDataFork) || (forkType == kRsrcFork)); + if (catInfo->nodeData.cnd_type == kCatalogFolderNode) { + DBG_ASSERT(forkType == kDirectory); + } else { + DBG_ASSERT(forkType != kDirectory); + } +#endif + + hfsmp = VCBTOHFS(vcb); + mp = HFSTOVFS(hfsmp); + dev = hfsmp->hfs_raw_dev; + + /* Check if unmount in progress */ + if (mp->mnt_kern_flag & MNTK_UNMOUNT) { + *vpp = NULL; + return (EPERM); + } + DBG_UTILS(("\thfs_vcreate: On '%s' with forktype of %d, nodeType of 0x%08lX\n", catInfo->nodeData.cnm_nameptr, forkType, (unsigned long)catInfo->nodeData.cnd_type)); + + /* Must malloc() here, since getnewvnode() can sleep */ + MALLOC_ZONE(hp, struct hfsnode *, sizeof(struct hfsnode), M_HFSNODE, M_WAITOK); + bzero((caddr_t)hp, sizeof(struct hfsnode)); + + /* + * Set that this node is in the process of being allocated + * Set it as soon as possible, so context switches well always hit upon it. + * if this is set then wakeup() MUST be called on hp after the flag is cleared + * DO NOT exit without clearing and waking up !!!! + */ + hp->h_nodeflags |= IN_ALLOCATING; /* Mark this as being allocating */ + lockinit(&hp->h_lock, PINOD, "hfsnode", 0, 0); + + + /* getnewvnode() does a VREF() on the vnode */ + /* Allocate a new vnode. If unsuccesful, leave after freeing memory */ + if ((retval = getnewvnode(VT_HFS, mp, hfs_vnodeop_p, &vp))) { + wakeup(hp); /* Shouldnt happen, but just to make sure */ + FREE_ZONE(hp, sizeof(struct hfsnode), M_HFSNODE); + *vpp = NULL; + return (retval); + }; + + /* + * Set the essentials before locking it down + */ + hp->h_vp = vp; /* Make HFSTOV work */ + vp->v_data = hp; /* Make VTOH work */ + H_FORKTYPE(hp) = forkType; + fm = NULL; + + /* + * Lock the hfsnode and insert the hfsnode into the hash queue, also if meta exists + * add to sibling list and return the meta address + */ + if (SIBLING_FORKTYPE(forkType)) + hfs_vhashins_sibling(dev, catInfo->nodeData.cnd_nodeID, hp, &fm); + else + hfs_vhashins(dev, catInfo->nodeData.cnd_nodeID, hp); + + /* + * If needed allocate and init the object meta data: + */ + if (fm == NULL) { + /* Allocate it....remember we can do a context switch here */ + MALLOC_ZONE(fm, struct hfsfilemeta *, sizeof(struct hfsfilemeta), M_HFSFMETA, M_WAITOK); + bzero(fm, sizeof(struct hfsfilemeta)); + + /* Fill it in */ + /* + * NOTICE: XXX Even though we have added the vnode to the hash so it is alive on TWO + * accessable lists, we do not assign it until later, + * this helps to make sure we do not use a half initiated meta + */ + + /* Init the sibling list if needed */ + if (SIBLING_FORKTYPE(forkType)) { + simple_lock_init(&fm->h_siblinglock); + CIRCLEQ_INIT(&fm->h_siblinghead); + CIRCLEQ_INSERT_HEAD(&fm->h_siblinghead, hp, h_sibling); + }; + + fm->h_dev = dev; + CopyCatalogToObjectMeta(catInfo, vp, fm); + + /* + * the vnode is finally alive, with the exception of the FCB below, + * It is finally locked and ready for its debutante ball + */ + hp->h_meta = fm; + }; + fm->h_usecount++; + + + /* + * Init the File Control Block. + */ + CopyCatalogToFCB(catInfo, vp); + + /* + * Finish vnode initialization. + * Setting the v_type 'stamps' the vnode as 'complete', so should be done almost last. + * + * At this point the vnode should be locked and fully allocated. And ready to be used + * or accessed. (though having it locked prevents most of this, it + * can still be accessed through lists and hashs). + */ + vp->v_type = IFTOVT(hp->h_meta->h_mode); + if ((vp->v_type == VREG) + && (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp))) { + ubc_info_init(vp); + } + + /* + * Initialize the vnode from the inode, check for aliases, sets the VROOT flag. + * Note that the underlying vnode may have changed. + */ + if ((retval = hfs_vinit(mp, hfs_specop_p, hfs_fifoop_p, &vp))) { + wakeup((caddr_t)hp); + vput(vp); + *vpp = NULL; + return (retval); + } + + /* + * Finish inode initialization now that aliasing has been resolved. + */ + hp->h_meta->h_devvp = hfsmp->hfs_devvp; + VREF(hp->h_meta->h_devvp); + +#if HFS_DIAGNOSTIC + hp->h_valid = HFS_VNODE_MAGIC; +#endif + hp->h_nodeflags &= ~IN_ALLOCATING; /* vnode is completely initialized */ + + /* Wake up anybody waiting for us to finish..see hfs_vhash.c */ + wakeup((caddr_t)hp); + +#if HFS_DIAGNOSTIC + + /* Lets do some testing here */ + DBG_ASSERT(hp->h_meta); + DBG_ASSERT(VTOH(vp)==hp); + DBG_ASSERT(HTOV(hp)==vp); + DBG_ASSERT(hp->h_meta->h_usecount>=1 && hp->h_meta->h_usecount<=2); + if (catInfo->nodeData.cnd_type == kCatalogFolderNode) { + DBG_ASSERT(vp->v_type == VDIR); + DBG_ASSERT(H_FORKTYPE(VTOH(vp)) == kDirectory); + } +#endif // HFS_DIAGNOSTIC + + + *vpp = vp; + return 0; + +} + +void CopyCatalogToObjectMeta(struct hfsCatalogInfo *catalogInfo, struct vnode *vp, struct hfsfilemeta *fm) +{ + ExtendedVCB *vcb = VTOVCB(vp); + struct mount *mp = VTOVFS(vp); + Boolean isHFSPlus, isDirectory; + ushort finderFlags; + ushort filetype; + + DBG_ASSERT (fm != NULL); + DBG_ASSERT (fm->h_namelen == 0); + DBG_ASSERT (fm->h_namePtr == 0); + + DBG_UTILS(("\tCopying to file's meta data: name:%s, nodeid:%ld\n", catalogInfo->nodeData.cnm_nameptr, catalogInfo->nodeData.cnd_nodeID)); + + isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord); + isDirectory = (catalogInfo->nodeData.cnd_type == kCatalogFolderNode); + finderFlags = SWAP_BE16 (((struct FInfo *)(&catalogInfo->nodeData.cnd_finderInfo))->fdFlags); + + /* Copy over the dirid, and hint */ + fm->h_nodeID = catalogInfo->nodeData.cnd_nodeID; + fm->h_dirID = catalogInfo->nodeData.cnm_parID; + fm->h_hint = catalogInfo->hint; + + /* Copy over the name */ + hfs_name_CatToMeta(&catalogInfo->nodeData, fm); + + + /* get dates in BSD format */ + fm->h_mtime = to_bsd_time(catalogInfo->nodeData.cnd_contentModDate); + fm->h_crtime = to_bsd_time(catalogInfo->nodeData.cnd_createDate); + fm->h_butime = to_bsd_time(catalogInfo->nodeData.cnd_backupDate); + if (isHFSPlus) { + fm->h_atime = to_bsd_time(catalogInfo->nodeData.cnd_accessDate); + fm->h_ctime = to_bsd_time(catalogInfo->nodeData.cnd_attributeModDate); + } + else { + fm->h_atime = to_bsd_time(catalogInfo->nodeData.cnd_contentModDate); + fm->h_ctime = to_bsd_time(catalogInfo->nodeData.cnd_contentModDate); + } + + /* Now the rest */ + if (isHFSPlus && (catalogInfo->nodeData.cnd_mode & IFMT)) { + fm->h_uid = catalogInfo->nodeData.cnd_ownerID; + fm->h_gid = catalogInfo->nodeData.cnd_groupID; + fm->h_pflags = catalogInfo->nodeData.cnd_ownerFlags | + (catalogInfo->nodeData.cnd_adminFlags << 16); + fm->h_mode = (mode_t)catalogInfo->nodeData.cnd_mode; +#if 1 + if (fm->h_uid == 0xFFFFFFFD) { /* 0xfffffffd = 4294967293, the old "unknown" */ + fm->h_uid = UNKNOWNUID; + fm->h_metaflags |= IN_CHANGE; + vcb->vcbFlags |= kHFS_DamagedVolume; /* Trigger fsck on next mount */ + }; + if (fm->h_gid == 0xFFFFFFFD) { /* 0xfffffffd = 4294967293, the old "unknown" */ + fm->h_gid = UNKNOWNGID; + fm->h_metaflags |= IN_CHANGE; + vcb->vcbFlags |= kHFS_DamagedVolume; /* Trigger fsck on next mount */ + }; +#endif + filetype = fm->h_mode & IFMT; + if (filetype == IFCHR || filetype == IFBLK) + fm->h_rdev = catalogInfo->nodeData.cnd_rawDevice; + else { + fm->h_rdev = 0; +#if HFS_HARDLINKS + if (catalogInfo->nodeData.cnd_type == kCatalogFileNode && + catalogInfo->nodeData.cnd_linkCount > 0) { + fm->h_nlink = catalogInfo->nodeData.cnd_linkCount; + fm->h_indnodeno = catalogInfo->nodeData.cnd_iNodeNumCopy; + fm->h_metaflags |= IN_DATANODE; + } +#endif + } + + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + /* + * Override the permissions as determined by the mount auguments + * in ALMOST the same way unset permissions are treated but keep + * track of whether or not the file or folder is hfs locked + * by leaving the h_pflags field unchanged from what was unpacked + * out of the catalog. + */ + fm->h_metaflags |= IN_UNSETACCESS; + fm->h_uid = VTOHFS(vp)->hfs_uid; + fm->h_gid = VTOHFS(vp)->hfs_gid; +#if OVERRIDE_UNKNOWN_PERMISSIONS + /* Default access is full read/write/execute: */ + /* XXX won't this smash IFCHR, IFBLK and IFLNK (for no-follow lookups)? */ + fm->h_mode = ACCESSPERMS; /* 0777: rwxrwxrwx */ + fm->h_rdev = 0; + + /* ... but no more than that permitted by the mount point's: */ + if (isDirectory) { + fm->h_mode &= VTOHFS(vp)->hfs_dir_mask; + } + else { + fm->h_mode &= VTOHFS(vp)->hfs_file_mask; + } + + if(isDirectory) + fm->h_mode |= IFDIR; + else if (SUPPORTS_MAC_ALIASES && (finderFlags & kIsAlias)) /* aliases will be symlinks in the future */ + fm->h_mode |= IFLNK; + else + fm->h_mode |= IFREG; +#endif + }; + } else { + /* + * Set the permissions as determined by the mount auguments + * but keep in account if the file or folder is hfs locked + */ + fm->h_metaflags |= IN_UNSETACCESS; + fm->h_uid = VTOHFS(vp)->hfs_uid; + fm->h_gid = VTOHFS(vp)->hfs_gid; + fm->h_pflags = 0; /* No valid pflags on disk (IMMUTABLE is synced from lock flag later) */ + fm->h_rdev = 0; /* No valid rdev on disk */ + /* Default access is full read/write/execute: */ + fm->h_mode = ACCESSPERMS; /* 0777: rwxrwxrwx */ + + /* ... but no more than that permitted by the mount point's: */ + if (isDirectory) { + fm->h_mode &= VTOHFS(vp)->hfs_dir_mask; + } + else { + fm->h_mode &= VTOHFS(vp)->hfs_file_mask; + } + + if(isDirectory) + fm->h_mode |= IFDIR; + else if (SUPPORTS_MAC_ALIASES && (finderFlags & kIsAlias)) /* aliases will be symlinks in the future */ + fm->h_mode |= IFLNK; + else + fm->h_mode |= IFREG; + }; + + /* Make sure that there is no nodeType/mode mismatch */ + if (isDirectory && ((fm->h_mode & IFMT) != IFDIR)) { + fm->h_mode &= ~IFMT; /* Clear the bad bits */ + fm->h_mode |= IFDIR; /* Set the proper one */ + }; + + /* Make sure the IMMUTABLE bits are in sync with the locked flag in the catalog: */ + if (!isDirectory) { + if (catalogInfo->nodeData.cnd_flags & kHFSFileLockedMask) { + /* The file's supposed to be locked: + Make sure at least one of the IMMUTABLE bits is set: */ + if ((fm->h_pflags & (SF_IMMUTABLE | UF_IMMUTABLE)) == 0) { + fm->h_pflags |= UF_IMMUTABLE; /* Set the user-changable IMMUTABLE bit */ + }; + } else { + /* The file's supposed to be unlocked: */ + fm->h_pflags &= ~(SF_IMMUTABLE | UF_IMMUTABLE); + }; + }; + + if (isDirectory) { + fm->h_nlink = 2 + catalogInfo->nodeData.cnd_valence; + fm->h_size = (2 * sizeof(hfsdotentry)) + + (catalogInfo->nodeData.cnd_valence * AVERAGE_HFSDIRENTRY_SIZE); + if (fm->h_size < MAX_HFSDIRENTRY_SIZE) + fm->h_size = MAX_HFSDIRENTRY_SIZE; + } else { + fm->h_size = (off_t)vcb->blockSize * + (off_t)(catalogInfo->nodeData.cnd_rsrcfork.totalBlocks + + catalogInfo->nodeData.cnd_datafork.totalBlocks); + } +} + + +void CopyCatalogToFCB(struct hfsCatalogInfo *catalogInfo, struct vnode *vp) +{ + FCB *fcb = VTOFCB(vp); + ExtendedVCB *vcb = VTOVCB(vp); + Boolean isHFSPlus, isDirectory, isResource; + HFSPlusExtentDescriptor *extents; + UInt8 forkType; + + DBG_ASSERT (vp != NULL); + DBG_ASSERT (fcb != NULL); + DBG_ASSERT (vcb != NULL); + DBG_ASSERT (VTOH(vp) != NULL); + + forkType = H_FORKTYPE(VTOH(vp)); + isResource = (forkType == kRsrcFork); + isDirectory = (catalogInfo->nodeData.cnd_type == kCatalogFolderNode); + isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord); + + /* Init the fcb */ + fcb->fcbFlags = catalogInfo->nodeData.cnd_flags; + + if (forkType != kDirectory) { + fcb->fcbFlags &= kHFSFileLockedMask; /* Clear resource, dirty bits */ + if (fcb->fcbFlags != 0) /* if clear, its not locked, then.. */ + fcb->fcbFlags = fcbFileLockedMask; /* duplicate the bit for later use */ + + fcb->fcbClmpSize = vcb->vcbClpSiz; /*XXX why not use the one in catalogInfo? */ + + if (isResource) + extents = catalogInfo->nodeData.cnd_rsrcfork.extents; + else + extents = catalogInfo->nodeData.cnd_datafork.extents; + + /* Copy the extents to their correct location: */ + bcopy (extents, fcb->fcbExtents, sizeof(HFSPlusExtentRecord)); + + if (isResource) { + fcb->fcbEOF = catalogInfo->nodeData.cnd_rsrcfork.logicalSize; + fcb->fcbPLen = (off_t)((off_t)catalogInfo->nodeData.cnd_rsrcfork.totalBlocks * (off_t)vcb->blockSize); + fcb->fcbFlags |= fcbResourceMask; + } else { + fcb->fcbEOF = catalogInfo->nodeData.cnd_datafork.logicalSize; + fcb->fcbPLen = (off_t)((off_t)catalogInfo->nodeData.cnd_datafork.totalBlocks * (off_t)vcb->blockSize); + }; + }; + + +} + +int hasOverflowExtents(struct hfsnode *hp) +{ + ExtendedVCB *vcb = HTOVCB(hp); + FCB *fcb = HTOFCB(hp); + u_long blocks; + + if (vcb->vcbSigWord == kHFSPlusSigWord) + { + + if (fcb->fcbExtents[7].blockCount == 0) + return false; + + blocks = fcb->fcbExtents[0].blockCount + + fcb->fcbExtents[1].blockCount + + fcb->fcbExtents[2].blockCount + + fcb->fcbExtents[3].blockCount + + fcb->fcbExtents[4].blockCount + + fcb->fcbExtents[5].blockCount + + fcb->fcbExtents[6].blockCount + + fcb->fcbExtents[7].blockCount; + } + else + { + if (fcb->fcbExtents[2].blockCount == 0) + return false; + + blocks = fcb->fcbExtents[0].blockCount + + fcb->fcbExtents[1].blockCount + + fcb->fcbExtents[2].blockCount; + } + + return ((fcb->fcbPLen / vcb->blockSize) > blocks); +} + + +int hfs_metafilelocking(struct hfsmount *hfsmp, u_long fileID, u_int flags, struct proc *p) +{ + ExtendedVCB *vcb; + struct vnode *vp = NULL; + int numOfLockedBuffs; + int retval = 0; + + vcb = HFSTOVCB(hfsmp); + + DBG_UTILS(("hfs_metafilelocking: vol: %d, file: %d %s%s%s\n", vcb->vcbVRefNum, fileID, + ((flags & LK_TYPE_MASK) == LK_RELEASE ? "RELEASE" : ""), + ((flags & LK_TYPE_MASK) == LK_EXCLUSIVE ? "EXCLUSIVE" : ""), + ((flags & LK_TYPE_MASK) == LK_SHARED ? "SHARED" : "") )); + + + switch (fileID) + { + case kHFSExtentsFileID: + vp = vcb->extentsRefNum; + break; + + case kHFSCatalogFileID: + vp = vcb->catalogRefNum; + break; + + case kHFSAllocationFileID: + /* bitmap is covered by Extents B-tree locking */ + /* FALL THROUGH */ + default: + panic("hfs_lockmetafile: invalid fileID"); + } + + if (vp != NULL) { + + /* Release, if necesary any locked buffer caches */ + if ((flags & LK_TYPE_MASK) == LK_RELEASE) { + struct timeval tv = time; + u_int32_t lastfsync = tv.tv_sec; + + (void) BTGetLastSync(VTOFCB(vp), &lastfsync); + + numOfLockedBuffs = count_lock_queue(); + if ((numOfLockedBuffs > kMaxLockedMetaBuffers) || ((numOfLockedBuffs>1) && ((tv.tv_sec - lastfsync) > kMaxSecsForFsync))) { + DBG_UTILS(("Synching meta deta: %d... # locked buffers = %d, fsync gap = %ld\n", H_FILEID(VTOH(vp)), + numOfLockedBuffs, (tv.tv_sec - lastfsync))); + hfs_fsync_transaction(vp); + }; + }; + + retval = lockmgr(&VTOH(vp)->h_lock, flags, &vp->v_interlock, p); + }; + + return retval; +} + + +/* + * There are three ways to qualify for ownership rights on an object: + * + * 1. (a) Your UID matches the UID of the vnode + * (b) The object in question is owned by "unknown" and your UID matches the console user's UID + * 2. Permissions on the filesystem are being ignored and your UID matches the replacement UID + * 3. You are root + * + */ +int hfs_owner_rights(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean invokesuperuserstatus) { + return ((cred->cr_uid == VTOH(vp)->h_meta->h_uid) || /* [1a] */ + ((VTOH(vp)->h_meta->h_uid == UNKNOWNUID) && (cred->cr_uid == console_user)) || /* [1b] */ + ((VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) && /* [2] */ + (cred->cr_uid == VTOHFS(vp)->hfs_uid)) || /* [2] */ + (invokesuperuserstatus && (suser(cred, &p->p_acflag) == 0))) ? 0 : EPERM; +} + + + +int hfs_catalogentry_owner_rights(uid_t obj_uid, struct mount *mp, struct ucred *cred, struct proc *p, Boolean invokesuperuserstatus) { + return ((cred->cr_uid == obj_uid) || /* [1a] */ + ((VFSTOHFS(mp)->hfs_uid == UNKNOWNUID) && (cred->cr_uid == console_user)) || /* [1b] */ + ((mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) && /* [2] */ + (cred->cr_uid == VFSTOHFS(mp)->hfs_uid)) || /* [2] */ + (invokesuperuserstatus && (suser(cred, &p->p_acflag) == 0))) ? 0 : EPERM; +} + + + +void CopyVNodeToCatalogNode (struct vnode *vp, struct CatalogNodeData *nodeData) +{ + ExtendedVCB *vcb; + FCB *fcb; + struct hfsnode *hp; + Boolean isHFSPlus, isResource; + HFSPlusExtentDescriptor *extents; + + hp = VTOH(vp); + vcb = HTOVCB(hp); + fcb = HTOFCB(hp); + isResource = (H_FORKTYPE(hp) == kRsrcFork); + isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord); + + /* date and time of last fork modification */ + if (hp->h_meta->h_mtime != 0) + nodeData->cnd_contentModDate = to_hfs_time(hp->h_meta->h_mtime); + + if (isHFSPlus) { + /* Make sure that there is no nodeType/mode mismatch */ + if ((nodeData->cnd_type == kCatalogFolderNode) + && ((hp->h_meta->h_mode & IFMT) != IFDIR)) { + + DBG_ASSERT((hp->h_meta->h_mode & IFMT) == IFDIR); + hp->h_meta->h_mode &= ~IFMT; /* Clear the bad bits */ + hp->h_meta->h_mode |= IFDIR; /* Set the proper one */ + }; + /* date and time of last modification (any kind) */ + if (hp->h_meta->h_ctime != 0) + nodeData->cnd_attributeModDate = to_hfs_time(hp->h_meta->h_ctime); + /* date and time of last access (MacOS X only) */ + if (hp->h_meta->h_atime != 0) + nodeData->cnd_accessDate = to_hfs_time(hp->h_meta->h_atime); + /* hfs_setattr can change the create date */ + if (hp->h_meta->h_crtime != 0) + nodeData->cnd_createDate = to_hfs_time(hp->h_meta->h_crtime); + if (! (hp->h_meta->h_metaflags & IN_UNSETACCESS)) { + nodeData->cnd_adminFlags = hp->h_meta->h_pflags >> 16; + nodeData->cnd_ownerFlags = hp->h_meta->h_pflags & 0x000000FF; + nodeData->cnd_mode = hp->h_meta->h_mode; + nodeData->cnd_ownerID = hp->h_meta->h_uid; + nodeData->cnd_groupID = hp->h_meta->h_gid; + } + }; + + /* the rest only applies to files */ + if (nodeData->cnd_type == kCatalogFileNode) { + if (hp->h_meta->h_pflags & (SF_IMMUTABLE | UF_IMMUTABLE)) { + /* The file is locked: set the locked bit in the catalog. */ + nodeData->cnd_flags |= kHFSFileLockedMask; + } else { + /* The file is unlocked: make sure the locked bit in the catalog is clear. */ + nodeData->cnd_flags &= ~kHFSFileLockedMask; + }; + if (isResource) { + extents = nodeData->cnd_rsrcfork.extents; + nodeData->cnd_rsrcfork.logicalSize = fcb->fcbEOF; + nodeData->cnd_rsrcfork.totalBlocks = fcb->fcbPLen / vcb->blockSize; + } else { + extents = nodeData->cnd_datafork.extents; + nodeData->cnd_datafork.logicalSize = fcb->fcbEOF; + nodeData->cnd_datafork.totalBlocks = fcb->fcbPLen / vcb->blockSize; + }; + + bcopy ( fcb->fcbExtents, extents, sizeof(HFSPlusExtentRecord)); + + if ((vp->v_type == VBLK) || (vp->v_type == VCHR)) + nodeData->cnd_rawDevice = hp->h_meta->h_rdev; + else if (hp->h_meta->h_metaflags & IN_DATANODE) + nodeData->cnd_linkCount = hp->h_meta->h_nlink; + + if (vp->v_type == VLNK) { + ((struct FInfo *)(&nodeData->cnd_finderInfo))->fdType = SWAP_BE32 (kSymLinkFileType); + ((struct FInfo *)(&nodeData->cnd_finderInfo))->fdCreator = SWAP_BE32 (kSymLinkCreator); + + /* Set this up as an alias */ + #if SUPPORTS_MAC_ALIASES + ((struct FInfo *)(&nodeData->cnd_finderInfo))->fdFlags |= SWAP_BE16 (kIsAlias); + #endif + } + } + } + + +/********************************************************************* + + Sets the name in the filemeta structure + + XXX Does not preflight if changing from one size to another + XXX Currently not protected from context switching + +*********************************************************************/ + +void hfs_set_metaname(char *name, struct hfsfilemeta *fm, struct hfsmount *hfsmp) +{ +int namelen = strlen(name); +char *tname, *fname; + +#if HFS_DIAGNOSTIC + DBG_ASSERT(name != NULL); + DBG_ASSERT(fm != NULL); + if (fm->h_namePtr) { + DBG_ASSERT(fm->h_namelen == strlen(fm->h_namePtr)); + if (strlen(fm->h_namePtr) > MAXHFSVNODELEN) + DBG_ASSERT(fm->h_metaflags & IN_LONGNAME); + }; + if (fm->h_metaflags & IN_LONGNAME) { + DBG_ASSERT(fm->h_namePtr != (char *)fm->h_fileName); + DBG_ASSERT(fm->h_namePtr != NULL); + }; +#endif //HFS_DIAGNOSTIC + + /* + * Details that have to be dealt with: + * 1. No name is allocated. fm->h_namePtr should be NULL + * 2. A name is being changed and: + * a. it was in static space and now cannot fit + * b. It was malloc'd and now will fit in the static + * c. It did and will fit in the static + * This could be a little smarter: + * - Dont re'malloc if the new name is smaller (but then wasting memory) + * - If its a longname but the same size, we still free and malloc + * - + */ + + + /* Allocate the new memory */ + if (namelen > MAXHFSVNODELEN) { + /* + * Notice the we ALWAYS allocate, even if the new is less then the old, + * or even if they are the SAME + */ + MALLOC(tname, char *, namelen+1, M_TEMP, M_WAITOK); + } + else + tname = fm->h_fileName; + + simple_lock(&hfsmp->hfs_renamelock); + + /* Check to see if there is something to free, if yes, remember it */ + if (fm->h_metaflags & IN_LONGNAME) + fname = fm->h_namePtr; + else + fname = NULL; + + /* Set the flag */ + if (namelen > MAXHFSVNODELEN) { + fm->h_metaflags |= IN_LONGNAME; + } + else { + fm->h_metaflags &= ~IN_LONGNAME; + }; + + /* Now copy it over */ + bcopy(name, tname, namelen+1); + + fm->h_namePtr = tname; + fm->h_namelen = namelen; + + simple_unlock(&hfsmp->hfs_renamelock); + + /* Lastly, free the old, if set */ + if (fname != NULL) + FREE(fname, M_TEMP); + +} + +void hfs_name_CatToMeta(CatalogNodeData *nodeData, struct hfsfilemeta *fm) +{ +char *fname; + +#if HFS_DIAGNOSTIC + DBG_ASSERT(nodeData != NULL); + DBG_ASSERT(fm != NULL); + if (fm->h_namePtr) { + DBG_ASSERT(fm->h_namelen == strlen(fm->h_namePtr)); + if (strlen(fm->h_namePtr) > MAXHFSVNODELEN) + DBG_ASSERT(fm->h_metaflags & IN_LONGNAME); + }; + if (fm->h_metaflags & IN_LONGNAME) { + DBG_ASSERT(fm->h_namePtr != (char *)fm->h_fileName); + DBG_ASSERT(fm->h_namePtr != NULL); + }; + + DBG_ASSERT(nodeData->cnm_nameptr != NULL); + + if (nodeData->cnm_length) { + DBG_ASSERT(strlen(nodeData->cnm_nameptr) == nodeData->cnm_length); + } + + if (nodeData->cnm_length > MAXHFSVNODELEN) + { DBG_ASSERT(nodeData->cnm_nameptr != nodeData->cnm_namespace); } + else if (nodeData->cnm_nameptr) + { DBG_ASSERT(nodeData->cnm_nameptr == nodeData->cnm_namespace); } + +#endif //HFS_DIAGNOSTIC + + + /* Check to see if there is something to free, if yes, remember it */ + if (fm->h_metaflags & IN_LONGNAME) + fname = fm->h_namePtr; + else + fname = NULL; + + /* Set the flag */ + if (nodeData->cnm_length > MAXHFSVNODELEN) { + fm->h_metaflags |= IN_LONGNAME; + } else { + fm->h_metaflags &= ~IN_LONGNAME; + }; + + /* Copy over the name */ + if (nodeData->cnm_nameptr == nodeData->cnm_namespace) { + bcopy(nodeData->cnm_namespace, fm->h_fileName, nodeData->cnm_length+1); + fm->h_namePtr = fm->h_fileName; + } + else { + fm->h_namePtr = nodeData->cnm_nameptr; + } + + fm->h_namelen = nodeData->cnm_length; + + nodeData->cnm_flags |= kCatNameIsConsumed; + nodeData->cnm_flags &= ~kCatNameIsAllocated; + nodeData->cnm_length = 0; + nodeData->cnm_nameptr = (char *)0; + nodeData->cnm_namespace[0] = 0; + + /* Lastly, free the old, if set */ + if (fname != NULL) + FREE(fname, M_TEMP); +} + + + +unsigned long DerivePermissionSummary(uid_t obj_uid, gid_t obj_gid, mode_t obj_mode, struct mount *mp, struct ucred *cred, struct proc *p) { + register gid_t *gp; + unsigned long permissions; + int i; + + /* User id 0 (root) always gets access. */ + if (cred->cr_uid == 0) { + permissions = R_OK | W_OK | X_OK; + goto Exit; + }; + + /* Otherwise, check the owner. */ + if (hfs_catalogentry_owner_rights(obj_uid, mp, cred, p, false) == 0) { + permissions = ((unsigned long)obj_mode & S_IRWXU) >> 6; + goto Exit; + } + + /* Otherwise, check the groups. */ + if (! (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS)) { + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) { + if (obj_gid == *gp) { + permissions = ((unsigned long)obj_mode & S_IRWXG) >> 3; + goto Exit; + } + }; + }; + + /* Otherwise, settle for 'others' access. */ + permissions = (unsigned long)obj_mode & S_IRWXO; + +Exit: + return permissions; +} + + + +int AttributeBlockSize(struct attrlist *attrlist) { + int size; + attrgroup_t a; + +#if ((ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | ATTR_CMN_OBJTYPE | \ + ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | ATTR_CMN_OBJPERMANENTID | ATTR_CMN_PAROBJID | \ + ATTR_CMN_SCRIPT | ATTR_CMN_CRTIME | ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | \ + ATTR_CMN_ACCTIME | ATTR_CMN_BKUPTIME | ATTR_CMN_FNDRINFO | ATTR_CMN_OWNERID | \ + ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST| \ + ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS) != ATTR_CMN_VALIDMASK) +#error AttributeBlockSize: Missing bits in common mask computation! +#endif + DBG_ASSERT((attrlist->commonattr & ~ATTR_CMN_VALIDMASK) == 0); + +#if ((ATTR_VOL_FSTYPE | ATTR_VOL_SIGNATURE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | \ + ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | ATTR_VOL_ALLOCATIONCLUMP | ATTR_VOL_IOBLOCKSIZE | \ + ATTR_VOL_OBJCOUNT | ATTR_VOL_FILECOUNT | ATTR_VOL_DIRCOUNT | ATTR_VOL_MAXOBJCOUNT | \ + ATTR_VOL_MOUNTPOINT | ATTR_VOL_NAME | ATTR_VOL_MOUNTFLAGS | ATTR_VOL_INFO | \ + ATTR_VOL_MOUNTEDDEVICE| ATTR_VOL_ENCODINGSUSED | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES) != ATTR_VOL_VALIDMASK) +#error AttributeBlockSize: Missing bits in volume mask computation! +#endif + DBG_ASSERT((attrlist->volattr & ~ATTR_VOL_VALIDMASK) == 0); + +#if ((ATTR_DIR_LINKCOUNT | ATTR_DIR_ENTRYCOUNT | ATTR_DIR_MOUNTSTATUS) != ATTR_DIR_VALIDMASK) +#error AttributeBlockSize: Missing bits in directory mask computation! +#endif + DBG_ASSERT((attrlist->dirattr & ~ATTR_DIR_VALIDMASK) == 0); +#if ((ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | ATTR_FILE_ALLOCSIZE | ATTR_FILE_IOBLOCKSIZE | \ + ATTR_FILE_CLUMPSIZE | ATTR_FILE_DEVTYPE | ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | \ + ATTR_FILE_FORKLIST | ATTR_FILE_DATALENGTH | ATTR_FILE_DATAALLOCSIZE | ATTR_FILE_DATAEXTENTS | \ + ATTR_FILE_RSRCLENGTH | ATTR_FILE_RSRCALLOCSIZE | ATTR_FILE_RSRCEXTENTS) != ATTR_FILE_VALIDMASK) +#error AttributeBlockSize: Missing bits in file mask computation! +#endif + DBG_ASSERT((attrlist->fileattr & ~ATTR_FILE_VALIDMASK) == 0); + +#if ((ATTR_FORK_TOTALSIZE | ATTR_FORK_ALLOCSIZE) != ATTR_FORK_VALIDMASK) +#error AttributeBlockSize: Missing bits in fork mask computation! +#endif + DBG_ASSERT((attrlist->forkattr & ~ATTR_FORK_VALIDMASK) == 0); + + size = 0; + + if ((a = attrlist->commonattr) != 0) { + if (a & ATTR_CMN_NAME) size += sizeof(struct attrreference); + if (a & ATTR_CMN_DEVID) size += sizeof(dev_t); + if (a & ATTR_CMN_FSID) size += sizeof(fsid_t); + if (a & ATTR_CMN_OBJTYPE) size += sizeof(fsobj_type_t); + if (a & ATTR_CMN_OBJTAG) size += sizeof(fsobj_tag_t); + if (a & ATTR_CMN_OBJID) size += sizeof(fsobj_id_t); + if (a & ATTR_CMN_OBJPERMANENTID) size += sizeof(fsobj_id_t); + if (a & ATTR_CMN_PAROBJID) size += sizeof(fsobj_id_t); + if (a & ATTR_CMN_SCRIPT) size += sizeof(text_encoding_t); + if (a & ATTR_CMN_CRTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_MODTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_CHGTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_ACCTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_BKUPTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_FNDRINFO) size += 32 * sizeof(UInt8); + if (a & ATTR_CMN_OWNERID) size += sizeof(uid_t); + if (a & ATTR_CMN_GRPID) size += sizeof(gid_t); + if (a & ATTR_CMN_ACCESSMASK) size += sizeof(u_long); + if (a & ATTR_CMN_NAMEDATTRCOUNT) size += sizeof(u_long); + if (a & ATTR_CMN_NAMEDATTRLIST) size += sizeof(struct attrreference); + if (a & ATTR_CMN_FLAGS) size += sizeof(u_long); + if (a & ATTR_CMN_USERACCESS) size += sizeof(u_long); + }; + if ((a = attrlist->volattr) != 0) { + if (a & ATTR_VOL_FSTYPE) size += sizeof(u_long); + if (a & ATTR_VOL_SIGNATURE) size += sizeof(u_long); + if (a & ATTR_VOL_SIZE) size += sizeof(off_t); + if (a & ATTR_VOL_SPACEFREE) size += sizeof(off_t); + if (a & ATTR_VOL_SPACEAVAIL) size += sizeof(off_t); + if (a & ATTR_VOL_MINALLOCATION) size += sizeof(off_t); + if (a & ATTR_VOL_ALLOCATIONCLUMP) size += sizeof(off_t); + if (a & ATTR_VOL_IOBLOCKSIZE) size += sizeof(u_long); + if (a & ATTR_VOL_OBJCOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_FILECOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_DIRCOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_MAXOBJCOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_MOUNTPOINT) size += sizeof(struct attrreference); + if (a & ATTR_VOL_NAME) size += sizeof(struct attrreference); + if (a & ATTR_VOL_MOUNTFLAGS) size += sizeof(u_long); + if (a & ATTR_VOL_MOUNTEDDEVICE) size += sizeof(struct attrreference); + if (a & ATTR_VOL_ENCODINGSUSED) size += sizeof(unsigned long long); + if (a & ATTR_VOL_CAPABILITIES) size += sizeof(vol_capabilities_attr_t); + if (a & ATTR_VOL_ATTRIBUTES) size += sizeof(vol_attributes_attr_t); + }; + if ((a = attrlist->dirattr) != 0) { + if (a & ATTR_DIR_LINKCOUNT) size += sizeof(u_long); + if (a & ATTR_DIR_ENTRYCOUNT) size += sizeof(u_long); + if (a & ATTR_DIR_MOUNTSTATUS) size += sizeof(u_long); + }; + if ((a = attrlist->fileattr) != 0) { + if (a & ATTR_FILE_LINKCOUNT) size += sizeof(u_long); + if (a & ATTR_FILE_TOTALSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_ALLOCSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_IOBLOCKSIZE) size += sizeof(size_t); + if (a & ATTR_FILE_CLUMPSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_DEVTYPE) size += sizeof(u_long); + if (a & ATTR_FILE_FILETYPE) size += sizeof(u_long); + if (a & ATTR_FILE_FORKCOUNT) size += sizeof(u_long); + if (a & ATTR_FILE_FORKLIST) size += sizeof(struct attrreference); + if (a & ATTR_FILE_DATALENGTH) size += sizeof(off_t); + if (a & ATTR_FILE_DATAALLOCSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_DATAEXTENTS) size += sizeof(extentrecord); + if (a & ATTR_FILE_RSRCLENGTH) size += sizeof(off_t); + if (a & ATTR_FILE_RSRCALLOCSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_RSRCEXTENTS) size += sizeof(extentrecord); + }; + if ((a = attrlist->forkattr) != 0) { + if (a & ATTR_FORK_TOTALSIZE) size += sizeof(off_t); + if (a & ATTR_FORK_ALLOCSIZE) size += sizeof(off_t); + }; + + return size; +} + + + +char* FindMountpointName(struct mount *mp) { + size_t namelength = strlen(mp->mnt_stat.f_mntonname); + int foundchars = 0; + char *c; + + if (namelength == 0) return NULL; + + /* Look backwards through the name string, looking for the first slash + encountered (which must precede the last part of the pathname) + */ + for (c = mp->mnt_stat.f_mntonname + namelength - 1; namelength > 0; --c, --namelength) { + if (*c != '/') { + foundchars = 1; + } else if (foundchars) { + return (c + 1); + }; + }; + + return mp->mnt_stat.f_mntonname; +} + + + +void PackObjectName(struct vnode *vp, + char *name, + size_t namelen, + void **attrbufptrptr, + void **varbufptrptr) { + char *mpname; + size_t mpnamelen; + u_long attrlength; + + /* The name of an object may be incorrect for the root of a mounted filesystem + because it may be mounted on a different directory name than the name of the + volume (such as "blah-1". For the root directory, it's best to return the + last element of the location where the volume's mounted: + */ + if ((vp->v_flag & VROOT) && (mpname = FindMountpointName(vp->v_mount))) { + mpnamelen = strlen(mpname); + + /* Trim off any trailing slashes: */ + while ((mpnamelen > 0) && (mpname[mpnamelen-1] == '/')) { + --mpnamelen; + }; + + /* If there's anything left, use it instead of the volume's name */ + if (mpnamelen > 0) { + name = mpname; + namelen = mpnamelen; + }; + }; + + attrlength = namelen + 1; + ((struct attrreference *)(*attrbufptrptr))->attr_dataoffset = (char *)(*varbufptrptr) - (char *)(*attrbufptrptr); + ((struct attrreference *)(*attrbufptrptr))->attr_length = attrlength; + (void) strncpy((unsigned char *)(*varbufptrptr), name, attrlength); + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (char *)(*varbufptrptr) += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)(*attrbufptrptr)); +} + + + +void PackVolCommonAttributes(struct attrlist *alist, + struct vnode *root_vp, + struct hfsCatalogInfo *root_catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + struct hfsnode *root_hp = VTOH(root_vp); + struct mount *mp = VTOVFS(root_vp); + struct hfsmount *hfsmp = VTOHFS(root_vp); + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + u_long attrlength; + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + if ((a = alist->commonattr) != 0) { + if (a & ATTR_CMN_NAME) { + PackObjectName(root_vp, H_NAME(root_hp), root_hp->h_meta->h_namelen, &attrbufptr, &varbufptr); + }; + if (a & ATTR_CMN_DEVID) *((dev_t *)attrbufptr)++ = hfsmp->hfs_raw_dev; + if (a & ATTR_CMN_FSID) { + *((fsid_t *)attrbufptr) = mp->mnt_stat.f_fsid; + ++((fsid_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJTYPE) *((fsobj_type_t *)attrbufptr)++ = 0; + if (a & ATTR_CMN_OBJTAG) *((fsobj_tag_t *)attrbufptr)++ = VT_HFS; + if (a & ATTR_CMN_OBJID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = 0; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJPERMANENTID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = 0; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_PAROBJID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = 0; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + VCB_LOCK(vcb); + if (a & ATTR_CMN_SCRIPT) *((text_encoding_t *)attrbufptr)++ = vcb->volumeNameEncodingHint; + /* NOTE: all VCB dates are in Mac OS time */ + if (a & ATTR_CMN_CRTIME) { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(LocalToUTC(vcb->localCreateDate)); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_MODTIME) { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(vcb->vcbLsMod); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_CHGTIME) { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(vcb->vcbLsMod); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_ACCTIME) { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(vcb->vcbLsMod); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_BKUPTIME) { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(vcb->vcbVolBkUp); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_FNDRINFO) { + bcopy (&vcb->vcbFndrInfo, attrbufptr, sizeof(vcb->vcbFndrInfo)); + (char *)attrbufptr += sizeof(vcb->vcbFndrInfo); + }; + VCB_UNLOCK(vcb); + if (a & ATTR_CMN_OWNERID) { + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((uid_t *)attrbufptr)++ = + (VTOHFS(root_vp)->hfs_uid == UNKNOWNUID) ? console_user : VTOHFS(root_vp)->hfs_uid; + } else { + *((uid_t *)attrbufptr)++ = + (root_hp->h_meta->h_uid == UNKNOWNUID) ? console_user : root_hp->h_meta->h_uid; + }; + }; + if (a & ATTR_CMN_GRPID) { + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((gid_t *)attrbufptr)++ = VTOHFS(root_vp)->hfs_gid; + } else { + *((gid_t *)attrbufptr)++ = root_hp->h_meta->h_gid; + }; + }; + if (a & ATTR_CMN_ACCESSMASK) *((u_long *)attrbufptr)++ = (u_long)root_hp->h_meta->h_mode; + if (a & ATTR_CMN_NAMEDATTRCOUNT) *((u_long *)attrbufptr)++ = 0; /* XXX PPD TBC */ + if (a & ATTR_CMN_NAMEDATTRLIST) { + attrlength = 0; + ((struct attrreference *)attrbufptr)->attr_dataoffset = 0; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (char *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_CMN_FLAGS) *((u_long *)attrbufptr)++ = root_hp->h_meta->h_pflags; + if (a & ATTR_CMN_USERACCESS) { + if (mp->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary((VTOHFS(root_vp)->hfs_uid == UNKNOWNUID) ? console_user : VTOHFS(root_vp)->hfs_uid, + VTOHFS(root_vp)->hfs_gid, + root_hp->h_meta->h_mode, + VTOVFS(root_vp), + current_proc()->p_ucred, + current_proc()); + } else { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary((root_hp->h_meta->h_uid == UNKNOWNUID) ? console_user : root_hp->h_meta->h_uid, + root_hp->h_meta->h_gid, + root_hp->h_meta->h_mode, + VTOVFS(root_vp), + current_proc()->p_ucred, + current_proc()); + }; + }; + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + + +void PackVolAttributeBlock(struct attrlist *alist, + struct vnode *root_vp, + struct hfsCatalogInfo *root_catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + struct mount *mp = VTOVFS(root_vp); + struct hfsmount *hfsmp = VTOHFS(root_vp); + ExtendedVCB *vcb = HFSTOVCB(hfsmp); + u_long attrlength; + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + if ((a = alist->volattr) != 0) { + VCB_LOCK(vcb); + if (a & ATTR_VOL_FSTYPE) *((u_long *)attrbufptr)++ = (u_long)mp->mnt_vfc->vfc_typenum; + if (a & ATTR_VOL_SIGNATURE) *((u_long *)attrbufptr)++ = (u_long)vcb->vcbSigWord; + if (a & ATTR_VOL_SIZE) *((off_t *)attrbufptr)++ = (off_t)vcb->totalBlocks * (off_t)vcb->blockSize; + if (a & ATTR_VOL_SPACEFREE) *((off_t *)attrbufptr)++ = (off_t)vcb->freeBlocks * (off_t)vcb->blockSize; + if (a & ATTR_VOL_SPACEAVAIL) *((off_t *)attrbufptr)++ = (off_t)vcb->freeBlocks * (off_t)vcb->blockSize; + if (a & ATTR_VOL_MINALLOCATION) *((off_t *)attrbufptr)++ = (off_t)vcb->blockSize; + if (a & ATTR_VOL_ALLOCATIONCLUMP) *((off_t *)attrbufptr)++ = (off_t)(vcb->vcbClpSiz); + if (a & ATTR_VOL_IOBLOCKSIZE) *((u_long *)attrbufptr)++ = (u_long)hfsmp->hfs_logBlockSize; + if (a & ATTR_VOL_OBJCOUNT) *((u_long *)attrbufptr)++ = (u_long)vcb->vcbFilCnt + (u_long)vcb->vcbDirCnt; + if (a & ATTR_VOL_FILECOUNT) *((u_long *)attrbufptr)++ = (u_long)vcb->vcbFilCnt; + if (a & ATTR_VOL_DIRCOUNT) *((u_long *)attrbufptr)++ = (u_long)vcb->vcbDirCnt; + if (a & ATTR_VOL_MAXOBJCOUNT) *((u_long *)attrbufptr)++ = 0xFFFFFFFF; + if (a & ATTR_VOL_MOUNTPOINT) { + ((struct attrreference *)attrbufptr)->attr_dataoffset = (char *)varbufptr - (char *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = strlen(mp->mnt_stat.f_mntonname) + 1; + attrlength = ((struct attrreference *)attrbufptr)->attr_length; + attrlength = attrlength + ((4 - (attrlength & 3)) & 3); /* round up to the next 4-byte boundary: */ + (void) bcopy(mp->mnt_stat.f_mntonname, varbufptr, attrlength); + + /* Advance beyond the space just allocated: */ + (char *)varbufptr += attrlength; + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_VOL_NAME) { + ((struct attrreference *)attrbufptr)->attr_dataoffset = (char *)varbufptr - (char *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = VTOH(root_vp)->h_meta->h_namelen + 1; + attrlength = ((struct attrreference *)attrbufptr)->attr_length; + attrlength = attrlength + ((4 - (attrlength & 3)) & 3); /* round up to the next 4-byte boundary: */ + bcopy(H_NAME(VTOH(root_vp)), varbufptr, attrlength); + + /* Advance beyond the space just allocated: */ + (char *)varbufptr += attrlength; + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_VOL_MOUNTFLAGS) *((u_long *)attrbufptr)++ = (u_long)mp->mnt_flag; + if (a & ATTR_VOL_MOUNTEDDEVICE) { + ((struct attrreference *)attrbufptr)->attr_dataoffset = (char *)varbufptr - (char *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = strlen(mp->mnt_stat.f_mntfromname) + 1; + attrlength = ((struct attrreference *)attrbufptr)->attr_length; + attrlength = attrlength + ((4 - (attrlength & 3)) & 3); /* round up to the next 4-byte boundary: */ + (void) bcopy(mp->mnt_stat.f_mntfromname, varbufptr, attrlength); + + /* Advance beyond the space just allocated: */ + (char *)varbufptr += attrlength; + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_VOL_ENCODINGSUSED) *((unsigned long long *)attrbufptr)++ = (unsigned long long)vcb->encodingsBitmap; + if (a & ATTR_VOL_CAPABILITIES) { + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_FORMAT] = + VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS | VOL_CAP_FMT_HARDLINKS; + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_INTERFACES] = + VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT; + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_RESERVED1] = 0; + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_RESERVED2] = 0; + + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_FORMAT] = + VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS | VOL_CAP_FMT_HARDLINKS; + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_INTERFACES] = + VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT; + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_RESERVED1] = 0; + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_RESERVED2] = 0; + + ++((vol_capabilities_attr_t *)attrbufptr); + }; + if (a & ATTR_VOL_ATTRIBUTES) { + ((vol_attributes_attr_t *)attrbufptr)->validattr.commonattr = ATTR_CMN_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.volattr = ATTR_VOL_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.dirattr = ATTR_DIR_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.fileattr = ATTR_FILE_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.forkattr = ATTR_FORK_VALIDMASK; + + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.commonattr = ATTR_CMN_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.volattr = ATTR_VOL_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.dirattr = ATTR_DIR_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.fileattr = ATTR_FILE_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.forkattr = ATTR_FORK_VALIDMASK; + + ++((vol_attributes_attr_t *)attrbufptr); + }; + VCB_UNLOCK(vcb); + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + + + +void PackVolumeInfo(struct attrlist *alist, + struct vnode *root_vp, + struct hfsCatalogInfo *root_catinfo, + void **attrbufptrptr, + void **varbufptrptr) { + + PackVolCommonAttributes(alist, root_vp, root_catinfo, attrbufptrptr, varbufptrptr); + PackVolAttributeBlock(alist, root_vp, root_catinfo, attrbufptrptr, varbufptrptr); +}; + +// Pack the common attribute contents of an objects hfsCatalogInfo +void PackCommonCatalogInfoAttributeBlock(struct attrlist *alist, + struct vnode *root_vp, + struct hfsCatalogInfo *catalogInfo, + void **attrbufptrptr, + void **varbufptrptr ) +{ + struct hfsnode *hp; + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + u_long attrlength; + + hp = VTOH(root_vp); + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + if ((a = alist->commonattr) != 0) + { + if (a & ATTR_CMN_NAME) + { + attrlength = strlen(catalogInfo->nodeData.cnm_nameptr) + 1; + ((struct attrreference *)attrbufptr)->attr_dataoffset = (char *)varbufptr - (char *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + (void) strncpy((unsigned char *)varbufptr, + catalogInfo->nodeData.cnm_nameptr, attrlength); + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (char *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_CMN_DEVID) *((dev_t *)attrbufptr)++ = H_DEV(hp); + if (a & ATTR_CMN_FSID) { + *((fsid_t *)attrbufptr) = VTOVFS(root_vp)->mnt_stat.f_fsid; + ++((fsid_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJTYPE) + { + switch (catalogInfo->nodeData.cnd_type) { + case kCatalogFolderNode: + *((fsobj_type_t *)attrbufptr)++ = VDIR; + break; + + case kCatalogFileNode: + /* Files in an HFS+ catalog can represent many things (regular files, symlinks, block/character devices, ...) */ + if ((HTOVCB(hp)->vcbSigWord == kHFSPlusSigWord) && + (catalogInfo->nodeData.cnd_mode & IFMT)) { + *((fsobj_type_t *)attrbufptr)++ = + IFTOVT((mode_t)catalogInfo->nodeData.cnd_mode); + } else { + *((fsobj_type_t *)attrbufptr)++ = VREG; + }; + break; + + default: + *((fsobj_type_t *)attrbufptr)++ = VNON; + break; + }; + } + if (a & ATTR_CMN_OBJTAG) *((fsobj_tag_t *)attrbufptr)++ = root_vp->v_tag; + if (a & ATTR_CMN_OBJID) + { + ((fsobj_id_t *)attrbufptr)->fid_objno = catalogInfo->nodeData.cnd_nodeID; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJPERMANENTID) + { + ((fsobj_id_t *)attrbufptr)->fid_objno = catalogInfo->nodeData.cnd_nodeID; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_PAROBJID) + { + ((fsobj_id_t *)attrbufptr)->fid_objno = catalogInfo->nodeData.cnm_parID; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_SCRIPT) + { + if (HTOVCB(hp)->vcbSigWord == kHFSPlusSigWord) { + *((text_encoding_t *)attrbufptr)++ = catalogInfo->nodeData.cnd_textEncoding; + } else { + *((text_encoding_t *)attrbufptr)++ = VTOHFS(root_vp)->hfs_encoding; + } + }; + if (a & ATTR_CMN_CRTIME) + { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(catalogInfo->nodeData.cnd_createDate); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_MODTIME) + { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(catalogInfo->nodeData.cnd_contentModDate); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_CHGTIME) + { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(catalogInfo->nodeData.cnd_attributeModDate); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_ACCTIME) + { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(catalogInfo->nodeData.cnd_accessDate); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_BKUPTIME) + { + ((struct timespec *)attrbufptr)->tv_sec = to_bsd_time(catalogInfo->nodeData.cnd_backupDate); + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_FNDRINFO) + { + bcopy (&catalogInfo->nodeData.cnd_finderInfo, attrbufptr, sizeof(catalogInfo->nodeData.cnd_finderInfo)); + (char *)attrbufptr += sizeof(catalogInfo->nodeData.cnd_finderInfo); + }; + if (a & ATTR_CMN_OWNERID) { + if (VTOVFS(root_vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((uid_t *)attrbufptr)++ = + (VTOHFS(root_vp)->hfs_uid == UNKNOWNUID) ? console_user : VTOHFS(root_vp)->hfs_uid; + } else { + *((uid_t *)attrbufptr)++ = + (catalogInfo->nodeData.cnd_ownerID == UNKNOWNUID) ? console_user : catalogInfo->nodeData.cnd_ownerID; + }; + } + if (a & ATTR_CMN_GRPID) { + if (VTOVFS(root_vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((gid_t *)attrbufptr)++ = VTOHFS(root_vp)->hfs_gid; + } else { + *((gid_t *)attrbufptr)++ = catalogInfo->nodeData.cnd_groupID; + }; + } + if (a & ATTR_CMN_ACCESSMASK) { +#if OVERRIDE_UNKNOWN_PERMISSIONS + if (VTOVFS(root_vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + switch (catalogInfo->nodeData.cnd_type) { + case kCatalogFileNode: + /* Files in an HFS+ catalog can represent many things (regular files, symlinks, block/character devices, ...) */ + *((fsobj_type_t *)attrbufptr)++ = (u_long)(VTOHFS(root_vp)->hfs_file_mask); + break; + + case kCatalogFolderNode: + /* Fall through to default case */ + + default: + *((u_long *)attrbufptr)++ = (u_long)(VTOHFS(root_vp)->hfs_dir_mask); + }; + } else { +#endif + *((u_long *)attrbufptr)++ = + (u_long)catalogInfo->nodeData.cnd_mode; +#if OVERRIDE_UNKNOWN_PERMISSIONS + }; +#endif + } + if (a & ATTR_CMN_NAMEDATTRCOUNT) *((u_long *)attrbufptr)++ = 0; /* XXX PPD TBC */ + if (a & ATTR_CMN_NAMEDATTRLIST) + { + attrlength = 0; + ((struct attrreference *)attrbufptr)->attr_dataoffset = 0; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (char *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_CMN_FLAGS) + *((u_long *)attrbufptr)++ = + (u_long) (catalogInfo->nodeData.cnd_ownerFlags | + (catalogInfo->nodeData.cnd_adminFlags << 16)); + if (a & ATTR_CMN_USERACCESS) { + if (VTOVFS(root_vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary((VTOHFS(root_vp)->hfs_uid == UNKNOWNUID) ? console_user : VTOHFS(root_vp)->hfs_uid, + VTOHFS(root_vp)->hfs_gid, + (catalogInfo->nodeData.cnd_type == kCatalogFileNode) ? VTOHFS(root_vp)->hfs_file_mask : VTOHFS(root_vp)->hfs_dir_mask, + VTOVFS(root_vp), + current_proc()->p_ucred, + current_proc()); + } else { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary((catalogInfo->nodeData.cnd_ownerID == UNKNOWNUID) ? console_user : catalogInfo->nodeData.cnd_ownerID, + catalogInfo->nodeData.cnd_groupID, + (mode_t)catalogInfo->nodeData.cnd_mode, + VTOVFS(root_vp), + current_proc()->p_ucred, + current_proc()); + }; + }; + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + +void PackCommonAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + struct hfsnode *hp; + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + u_long attrlength; + + hp = VTOH(vp); + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + if ((a = alist->commonattr) != 0) { + if (a & ATTR_CMN_NAME) { + PackObjectName(vp, H_NAME(hp), hp->h_meta->h_namelen, &attrbufptr, &varbufptr); + }; + if (a & ATTR_CMN_DEVID) *((dev_t *)attrbufptr)++ = H_DEV(hp); + if (a & ATTR_CMN_FSID) { + *((fsid_t *)attrbufptr) = VTOVFS(vp)->mnt_stat.f_fsid; + ++((fsid_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJTYPE) *((fsobj_type_t *)attrbufptr)++ = vp->v_type; + if (a & ATTR_CMN_OBJTAG) *((fsobj_tag_t *)attrbufptr)++ = vp->v_tag; + if (a & ATTR_CMN_OBJID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = H_FILEID(hp); + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJPERMANENTID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = H_FILEID(hp); + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_PAROBJID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = H_DIRID(hp); + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_SCRIPT) + { + if (HTOVCB(hp)->vcbSigWord == kHFSPlusSigWord) { + *((text_encoding_t *)attrbufptr)++ = catInfo->nodeData.cnd_textEncoding; + } else { + *((text_encoding_t *)attrbufptr)++ = VTOHFS(vp)->hfs_encoding; + } + }; + if (a & ATTR_CMN_CRTIME) { + ((struct timespec *)attrbufptr)->tv_sec = hp->h_meta->h_crtime; + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_MODTIME) { + ((struct timespec *)attrbufptr)->tv_sec = hp->h_meta->h_mtime; + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_CHGTIME) { + ((struct timespec *)attrbufptr)->tv_sec = hp->h_meta->h_ctime; + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_ACCTIME) { + ((struct timespec *)attrbufptr)->tv_sec = hp->h_meta->h_atime; + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_BKUPTIME) { + ((struct timespec *)attrbufptr)->tv_sec = hp->h_meta->h_butime; + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_FNDRINFO) { + bcopy (&catInfo->nodeData.cnd_finderInfo, attrbufptr, sizeof(catInfo->nodeData.cnd_finderInfo)); + (char *)attrbufptr += sizeof(catInfo->nodeData.cnd_finderInfo); + }; + if (a & ATTR_CMN_OWNERID) { + if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((uid_t *)attrbufptr)++ = + (VTOHFS(vp)->hfs_uid == UNKNOWNUID) ? console_user : VTOHFS(vp)->hfs_uid; + } else { + *((uid_t *)attrbufptr)++ = + (hp->h_meta->h_uid == UNKNOWNUID) ? console_user : hp->h_meta->h_uid; + } + }; + if (a & ATTR_CMN_GRPID) { + if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((gid_t *)attrbufptr)++ = VTOHFS(vp)->hfs_gid; + } else { + *((gid_t *)attrbufptr)++ = hp->h_meta->h_gid; + }; + }; + if (a & ATTR_CMN_ACCESSMASK) *((u_long *)attrbufptr)++ = (u_long)hp->h_meta->h_mode; + if (a & ATTR_CMN_NAMEDATTRCOUNT) *((u_long *)attrbufptr)++ = 0; /* XXX PPD TBC */ + if (a & ATTR_CMN_NAMEDATTRLIST) { + attrlength = 0; + ((struct attrreference *)attrbufptr)->attr_dataoffset = 0; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (char *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_CMN_FLAGS) *((u_long *)attrbufptr)++ = hp->h_meta->h_pflags; + if (a & ATTR_CMN_USERACCESS) { + if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary((VTOHFS(vp)->hfs_uid == UNKNOWNUID) ? console_user : VTOHFS(vp)->hfs_uid, + VTOHFS(vp)->hfs_gid, + hp->h_meta->h_mode, + VTOVFS(vp), + current_proc()->p_ucred, + current_proc()); + } else { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary((hp->h_meta->h_uid == UNKNOWNUID) ? console_user : hp->h_meta->h_uid, + hp->h_meta->h_gid, + hp->h_meta->h_mode, + VTOVFS(vp), + current_proc()->p_ucred, + current_proc()); + }; + }; + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + +// Pack the directory attributes given hfsCatalogInfo +void PackCatalogInfoDirAttributeBlock( struct attrlist *alist, struct vnode *vp, + struct hfsCatalogInfo *catInfo, void **attrbufptrptr, void **varbufptrptr ) +{ + void *attrbufptr; + attrgroup_t a; + u_long valence; + + attrbufptr = *attrbufptrptr; + a = alist->dirattr; + + if ( (catInfo->nodeData.cnd_type == kCatalogFolderNode) && (a != 0) ) { + valence = catInfo->nodeData.cnd_valence; + if ((catInfo->nodeData.cnm_parID == kRootParID) && + (VTOHFS(vp)->hfs_private_metadata_dir != 0)) { + --valence; /* hide private dir */ + } + /* The 'link count' is faked */ + if (a & ATTR_DIR_LINKCOUNT) + *((u_long *)attrbufptr)++ = 2 + valence; + if (a & ATTR_DIR_ENTRYCOUNT) + *((u_long *)attrbufptr)++ = valence; + if (a & ATTR_DIR_MOUNTSTATUS) + *((u_long *)attrbufptr)++ = 0; + }; + + *attrbufptrptr = attrbufptr; +} + + +void PackDirAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + void *attrbufptr; + attrgroup_t a; + u_long valence; + + attrbufptr = *attrbufptrptr; + + a = alist->dirattr; + if ((vp->v_type == VDIR) && (a != 0)) { + valence = catInfo->nodeData.cnd_valence; + if ((catInfo->nodeData.cnm_parID == kRootParID) && + (VTOHFS(vp)->hfs_private_metadata_dir != 0)) { + --valence; /* hide private dir */ + } + + /* The 'link count' is faked */ + if (a & ATTR_DIR_LINKCOUNT) + *((u_long *)attrbufptr)++ = 2 + valence; + if (a & ATTR_DIR_ENTRYCOUNT) + *((u_long *)attrbufptr)++ = valence; + if (a & ATTR_DIR_MOUNTSTATUS) { + if (vp->v_mountedhere) { + *((u_long *)attrbufptr)++ = DIR_MNTSTATUS_MNTPOINT; + } else { + *((u_long *)attrbufptr)++ = 0; + }; + }; + }; + + *attrbufptrptr = attrbufptr; +} + + + +// Pack the file attributes from the hfsCatalogInfo for the file. +void PackCatalogInfoFileAttributeBlock( struct attrlist *alist, struct vnode *root_vp, struct hfsCatalogInfo *catInfo, void **attrbufptrptr, void **varbufptrptr ) +{ + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + u_long attrlength; + ExtendedVCB *vcb = VTOVCB(root_vp); + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + a = alist->fileattr; + if ( (catInfo->nodeData.cnd_type == kCatalogFileNode) && (a != 0) ) + { +#if HFS_HARDLINKS + if (a & ATTR_FILE_LINKCOUNT) { + u_long linkcnt = catInfo->nodeData.cnd_linkCount; + + if (linkcnt < 1) + linkcnt = 1; + *((u_long *)attrbufptr)++ = linkcnt; + } +#else + if (a & ATTR_FILE_LINKCOUNT) *((u_long *)attrbufptr)++ = 1; +#endif + if (a & ATTR_FILE_TOTALSIZE) { + *((off_t *)attrbufptr)++ = + (off_t)catInfo->nodeData.cnd_datafork.logicalSize + + (off_t)catInfo->nodeData.cnd_rsrcfork.logicalSize; + } + if (a & ATTR_FILE_ALLOCSIZE) { + *((off_t *)attrbufptr)++ = + (off_t)((off_t)catInfo->nodeData.cnd_datafork.totalBlocks * (off_t)vcb->blockSize) + + (off_t)((off_t)catInfo->nodeData.cnd_rsrcfork.totalBlocks * (off_t)vcb->blockSize); + } + if (a & ATTR_FILE_IOBLOCKSIZE) { + *((u_long *)attrbufptr)++ = (u_long)(VTOHFS(root_vp)->hfs_logBlockSize); + } + if (a & ATTR_FILE_CLUMPSIZE) { + *((u_long *)attrbufptr)++ = vcb->vcbClpSiz; + } + if (a & ATTR_FILE_DEVTYPE) { + u_long rawdev; + u_short filetype; + + filetype = (catInfo->nodeData.cnd_mode & IFMT); + if (filetype == IFCHR || filetype == IFBLK) + rawdev = (u_long)catInfo->nodeData.cnd_rawDevice; + else + rawdev = 0; + + *((u_long *)attrbufptr)++ = rawdev; + } + if (a & ATTR_FILE_FILETYPE) { + *((u_long *)attrbufptr)++ = 0; /* XXX PPD */ + } + if (a & ATTR_FILE_FORKCOUNT) { + *((u_long *)attrbufptr)++ = 2; /* XXX PPD */ + } + if (a & ATTR_FILE_FORKLIST) { + attrlength = 0; + ((struct attrreference *)attrbufptr)->attr_dataoffset = 0; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (char *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_FILE_DATALENGTH) { + *((off_t *)attrbufptr)++ = + (off_t)catInfo->nodeData.cnd_datafork.logicalSize; + } + if (a & ATTR_FILE_DATAALLOCSIZE) { + *((off_t *)attrbufptr)++ = + (off_t)((off_t)catInfo->nodeData.cnd_datafork.totalBlocks * (off_t)vcb->blockSize); + } + if (a & ATTR_FILE_DATAEXTENTS) { + bcopy(&catInfo->nodeData.cnd_datafork.extents, attrbufptr, sizeof(extentrecord)); + (char *)attrbufptr += sizeof(extentrecord) + ((4 - (sizeof(extentrecord) & 3)) & 3); + }; + if (a & ATTR_FILE_RSRCLENGTH) { + *((off_t *)attrbufptr)++ = + (off_t)catInfo->nodeData.cnd_rsrcfork.logicalSize; + } + if (a & ATTR_FILE_RSRCALLOCSIZE) { + *((off_t *)attrbufptr)++ = + (off_t)((off_t)catInfo->nodeData.cnd_rsrcfork.totalBlocks * (off_t)vcb->blockSize); + } + if (a & ATTR_FILE_RSRCEXTENTS) { + bcopy(&catInfo->nodeData.cnd_rsrcfork.extents, attrbufptr, sizeof(extentrecord)); + (char *)attrbufptr += sizeof(extentrecord) + ((4 - (sizeof(extentrecord) & 3)) & 3); + }; + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + +void PackFileAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + struct hfsnode *hp = VTOH(vp); + FCB *fcb = HTOFCB(hp); + ExtendedVCB *vcb = HTOVCB(hp); + Boolean isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord); + void *attrbufptr = *attrbufptrptr; + void *varbufptr = *varbufptrptr; + attrgroup_t a = alist->fileattr; + u_long attrlength; + + if (a != 0) { +#if HFS_HARDLINKS + if (a & ATTR_FILE_LINKCOUNT) { + u_long linkcnt = catInfo->nodeData.cnd_linkCount; + + if (linkcnt < 1) + linkcnt = 1; + *((u_long *)attrbufptr)++ = linkcnt; + } +#else + if (a & ATTR_FILE_LINKCOUNT) *((u_long *)attrbufptr)++ = 1; +#endif + if (a & ATTR_FILE_TOTALSIZE) { + *((off_t *)attrbufptr)++ = + (off_t)catInfo->nodeData.cnd_datafork.logicalSize + + (off_t)catInfo->nodeData.cnd_rsrcfork.logicalSize; + } + if (a & ATTR_FILE_ALLOCSIZE) { + switch (H_FORKTYPE(hp)) { + case kDataFork: + *((off_t *)attrbufptr)++ = + (off_t)fcb->fcbPLen + + (off_t)((off_t)catInfo->nodeData.cnd_rsrcfork.totalBlocks * (off_t)vcb->blockSize); + break; + case kRsrcFork: + *((off_t *)attrbufptr)++ = + (off_t)fcb->fcbPLen + + (off_t)((off_t)catInfo->nodeData.cnd_datafork.totalBlocks * (off_t)vcb->blockSize); + break; + default: + *((off_t *)attrbufptr)++ = + (off_t)((off_t)catInfo->nodeData.cnd_datafork.totalBlocks * (off_t)vcb->blockSize) + + (off_t)((off_t)catInfo->nodeData.cnd_rsrcfork.totalBlocks * (off_t)vcb->blockSize); + }; + }; + if (a & ATTR_FILE_IOBLOCKSIZE) *((u_long *)attrbufptr)++ = GetLogicalBlockSize(vp); + if (a & ATTR_FILE_CLUMPSIZE) *((u_long *)attrbufptr)++ = fcb->fcbClmpSize; + if (a & ATTR_FILE_DEVTYPE) { + u_long rawdev; + + if ((vp->v_type == VBLK) || (vp->v_type == VCHR)) + rawdev = (u_long)catInfo->nodeData.cnd_rawDevice; + else + rawdev = 0; + *((u_long *)attrbufptr)++ = rawdev; + } + if (a & ATTR_FILE_FILETYPE) *((u_long *)attrbufptr)++ = 0; /* XXX PPD */ + if (a & ATTR_FILE_FORKCOUNT) *((u_long *)attrbufptr)++ = 2; /* XXX PPD */ + if (a & ATTR_FILE_FORKLIST) { + attrlength = 0; + ((struct attrreference *)attrbufptr)->attr_dataoffset = 0; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (char *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (H_FORKTYPE(hp) == kDataFork) { + if (a & ATTR_FILE_DATALENGTH) + *((off_t *)attrbufptr)++ = fcb->fcbEOF; + if (a & ATTR_FILE_DATAALLOCSIZE) *((off_t *)attrbufptr)++ = fcb->fcbPLen; + if (a & ATTR_FILE_DATAEXTENTS) { + bcopy ( fcb->fcbExtents, attrbufptr, sizeof(extentrecord)); + (char *)attrbufptr += sizeof(extentrecord) + ((4 - (sizeof(extentrecord) & 3)) & 3); + }; + } else { + if (a & ATTR_FILE_DATALENGTH) { + *((off_t *)attrbufptr)++ = + (off_t)catInfo->nodeData.cnd_datafork.logicalSize; + } + if (a & ATTR_FILE_DATAALLOCSIZE) { + *((off_t *)attrbufptr)++ = + (off_t)((off_t)catInfo->nodeData.cnd_datafork.totalBlocks * (off_t)vcb->blockSize); + } + if (a & ATTR_FILE_DATAEXTENTS) { + bcopy(&catInfo->nodeData.cnd_datafork.extents, attrbufptr, sizeof(extentrecord)); + (char *)attrbufptr += sizeof(extentrecord) + ((4 - (sizeof(extentrecord) & 3)) & 3); + }; + }; + if (H_FORKTYPE(hp) == kRsrcFork) { + if (a & ATTR_FILE_RSRCLENGTH) + *((off_t *)attrbufptr)++ = fcb->fcbEOF; + if (a & ATTR_FILE_RSRCALLOCSIZE) *((off_t *)attrbufptr)++ = fcb->fcbPLen; + if (a & ATTR_FILE_RSRCEXTENTS) { + bcopy ( fcb->fcbExtents, attrbufptr, sizeof(extentrecord)); + (char *)attrbufptr += sizeof(extentrecord) + ((4 - (sizeof(extentrecord) & 3)) & 3); + }; + } else { + if (a & ATTR_FILE_RSRCLENGTH) { + *((off_t *)attrbufptr)++ = + (off_t)catInfo->nodeData.cnd_rsrcfork.logicalSize; + } + if (a & ATTR_FILE_RSRCALLOCSIZE) { + *((off_t *)attrbufptr)++ = + (off_t)((off_t)catInfo->nodeData.cnd_rsrcfork.totalBlocks * (off_t)vcb->blockSize); + } + if (a & ATTR_FILE_RSRCEXTENTS) { + bcopy(&catInfo->nodeData.cnd_rsrcfork.extents, attrbufptr, sizeof(extentrecord)); + (char *)attrbufptr += sizeof(extentrecord) + ((4 - (sizeof(extentrecord) & 3)) & 3); + }; + }; + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + +#if 0 +void PackForkAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + /* XXX PPD TBC */ +} +#endif + + +// This routine takes catInfo, and alist, as inputs and packs it into an attribute block. +void PackCatalogInfoAttributeBlock ( struct attrlist *alist, struct vnode *root_vp, struct hfsCatalogInfo *catInfo, void **attrbufptrptr, void **varbufptrptr) +{ + //XXX Preflight that alist only contains bits with fields in catInfo + + PackCommonCatalogInfoAttributeBlock( alist, root_vp, catInfo, attrbufptrptr, varbufptrptr ); + + switch ( catInfo->nodeData.cnd_type ) + { + case kCatalogFolderNode: + PackCatalogInfoDirAttributeBlock( alist, root_vp, catInfo, attrbufptrptr, varbufptrptr ); + break; + + case kCatalogFileNode: + PackCatalogInfoFileAttributeBlock( alist, root_vp, catInfo, attrbufptrptr, varbufptrptr ); + break; + + default: /* Without this the compiler complains about VNON,VBLK,VCHR,VLNK,VSOCK,VFIFO,VBAD and VSTR not being handled... */ + /* XXX PPD - Panic? */ + break; + } +} + + + +void PackAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) +{ + if (alist->volattr != 0) { + DBG_ASSERT((vp->v_flag & VROOT) != 0); + PackVolumeInfo(alist,vp, catInfo, attrbufptrptr, varbufptrptr); + } else { + PackCommonAttributeBlock(alist, vp, catInfo, attrbufptrptr, varbufptrptr); + + switch (vp->v_type) { + case VDIR: + PackDirAttributeBlock(alist, vp, catInfo, attrbufptrptr, varbufptrptr); + break; + + case VREG: + case VLNK: + PackFileAttributeBlock(alist, vp, catInfo, attrbufptrptr, varbufptrptr); + break; + + /* Without this the compiler complains about VNON,VBLK,VCHR,VLNK,VSOCK,VFIFO,VBAD and VSTR + not being handled... + */ + default: + /* XXX PPD - Panic? */ + break; + }; + }; +}; + + + +void UnpackVolumeAttributeBlock(struct attrlist *alist, + struct vnode *root_vp, + ExtendedVCB *vcb, + void **attrbufptrptr, + void **varbufptrptr) { + void *attrbufptr = *attrbufptrptr; + attrgroup_t a; + + if ((alist->commonattr == 0) && (alist->volattr == 0)) { + return; /* Get out without dirtying the VCB */ + }; + + VCB_LOCK(vcb); + + a = alist->commonattr; + + if (a & ATTR_CMN_SCRIPT) { + vcb->volumeNameEncodingHint = (u_int32_t)*(((text_encoding_t *)attrbufptr)++); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_SCRIPT; +#endif + }; + if (a & ATTR_CMN_CRTIME) { + vcb->localCreateDate = UTCToLocal(to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec)); + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_CRTIME; +#endif + }; + if (a & ATTR_CMN_MODTIME) { + vcb->vcbLsMod = to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec); + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_MODTIME; +#endif + }; + if (a & ATTR_CMN_BKUPTIME) { + vcb->vcbVolBkUp = to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec); + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_BKUPTIME; +#endif + }; + if (a & ATTR_CMN_FNDRINFO) { + bcopy (attrbufptr, &vcb->vcbFndrInfo, sizeof(vcb->vcbFndrInfo)); + (char *)attrbufptr += sizeof(vcb->vcbFndrInfo); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_FNDRINFO; +#endif + }; + + DBG_ASSERT(a == 0); /* All common attributes for volumes must've been handled by now... */ + + a = alist->volattr & ~ATTR_VOL_INFO; + if (a & ATTR_VOL_NAME) { + copystr(((char *)attrbufptr) + *((u_long *)attrbufptr), vcb->vcbVN, sizeof(vcb->vcbVN), NULL); + (char *)attrbufptr += sizeof(struct attrreference); +#if HFS_DIAGNOSTIC + a &= ~ATTR_VOL_NAME; +#endif + }; + + DBG_ASSERT(a == 0); /* All common attributes for volumes must've been handled by now... */ + + vcb->vcbFlags |= 0xFF00; // Mark the VCB dirty + + VCB_UNLOCK(vcb); +} + + +void UnpackCommonAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + struct hfsnode *hp = VTOH(vp); + void *attrbufptr; + attrgroup_t a; + + attrbufptr = *attrbufptrptr; + + DBG_ASSERT(catInfo != NULL); + + a = alist->commonattr; + if (a & ATTR_CMN_SCRIPT) { + catInfo->nodeData.cnd_textEncoding = (u_int32_t)*((text_encoding_t *)attrbufptr)++; +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_SCRIPT; +#endif + }; + if (a & ATTR_CMN_CRTIME) { + catInfo->nodeData.cnd_createDate = to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec); + VTOH(vp)->h_meta->h_crtime = (UInt32)((struct timespec *)attrbufptr)->tv_sec; + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_CRTIME; +#endif + }; + if (a & ATTR_CMN_MODTIME) { + catInfo->nodeData.cnd_contentModDate = to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec); + VTOH(vp)->h_meta->h_mtime = (UInt32)((struct timespec *)attrbufptr)->tv_sec; + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_MODTIME; +#endif + }; + if (a & ATTR_CMN_CHGTIME) { + catInfo->nodeData.cnd_attributeModDate = to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec); + VTOH(vp)->h_meta->h_ctime = (UInt32)((struct timespec *)attrbufptr)->tv_sec; + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_CHGTIME; +#endif + }; + if (a & ATTR_CMN_ACCTIME) { + catInfo->nodeData.cnd_accessDate = to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec); + VTOH(vp)->h_meta->h_atime = (UInt32)((struct timespec *)attrbufptr)->tv_sec; + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_ACCTIME; +#endif + }; + if (a & ATTR_CMN_BKUPTIME) { + catInfo->nodeData.cnd_backupDate = to_hfs_time((UInt32)((struct timespec *)attrbufptr)->tv_sec); + VTOH(vp)->h_meta->h_butime = (UInt32)((struct timespec *)attrbufptr)->tv_sec; + ++((struct timespec *)attrbufptr); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_BKUPTIME; +#endif + }; + if (a & ATTR_CMN_FNDRINFO) { + bcopy (attrbufptr, &catInfo->nodeData.cnd_finderInfo, sizeof(catInfo->nodeData.cnd_finderInfo)); + (char *)attrbufptr += sizeof(catInfo->nodeData.cnd_finderInfo); +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_FNDRINFO; +#endif + }; + if (a & ATTR_CMN_OWNERID) { + if (VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) { + u_int32_t uid = (u_int32_t)*((uid_t *)attrbufptr)++; + if (uid != (uid_t)VNOVAL) + hp->h_meta->h_uid = uid; /* catalog will get updated by hfs_chown() */ + } + else { + ((uid_t *)attrbufptr)++; + } +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_OWNERID; +#endif + }; + if (a & ATTR_CMN_GRPID) { + u_int32_t gid = (u_int32_t)*((gid_t *)attrbufptr)++; + if (VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) { + if (gid != (gid_t)VNOVAL) + hp->h_meta->h_gid = gid; /* catalog will get updated by hfs_chown() */ + }; +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_GRPID; +#endif + }; + if (a & ATTR_CMN_ACCESSMASK) { + u_int16_t mode = (u_int16_t)*((u_long *)attrbufptr)++; + if (VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) { + if (mode != (mode_t)VNOVAL) { + hp->h_meta->h_mode &= ~ALLPERMS; + hp->h_meta->h_mode |= (mode & ALLPERMS); /* catalog will get updated by hfs_chmod() */ + } + }; +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_ACCESSMASK; +#endif + }; + if (a & ATTR_CMN_FLAGS) { + u_long flags = *((u_long *)attrbufptr)++; + /* Flags are settable only on HFS+ volumes. A special exception is made for the IMMUTABLE + flags (SF_IMMUTABLE and UF_IMMUTABLE), which can be set on HFS volumes as well: */ + if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) || + ((VTOVCB(vp)->vcbSigWord == kHFSSigWord) && ((flags & ~IMMUTABLE) == 0))) { + if (flags != (u_long)VNOVAL) { + hp->h_meta->h_pflags = flags; /* catalog will get updated by hfs_chflags */ + }; + }; +#if HFS_DIAGNOSTIC + a &= ~ATTR_CMN_FLAGS; +#endif + }; + +#if HFS_DIAGNOSTIC + if (a != 0) { + DEBUG_BREAK_MSG(("UnpackCommonAttributes: unhandled bit: 0x%08X\n", a)); + }; +#endif + + *attrbufptrptr = attrbufptr; +// *varbufptrptr = varbufptr; +} + + + +#if 0 +void UnpackDirAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + u_long attrlength; + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + /* XXX PPD TBC */ + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} +#endif + + + +#if 0 +void UnpackFileAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + u_long attrlength; + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + /* XXX PPD TBC */ + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} +#endif + + + +#if 0 +void UnpackForkAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + u_long attrlength; + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + /* XXX PPD TBC */ + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} +#endif + + + +void UnpackAttributeBlock(struct attrlist *alist, + struct vnode *vp, + struct hfsCatalogInfo *catInfo, + void **attrbufptrptr, + void **varbufptrptr) { + + + if (alist->volattr != 0) { + UnpackVolumeAttributeBlock(alist, vp, VTOVCB(vp), attrbufptrptr, varbufptrptr); + return; + }; + + /* We're dealing with a vnode object here: */ + UnpackCommonAttributeBlock(alist, vp, catInfo, attrbufptrptr, varbufptrptr); + +#if 0 + switch (vp->v_type) { + case VDIR: + UnpackDirAttributeBlock(alist, vp, catInfo, attrbufptrptr, varbufptrptr); + break; + + case VREG: + /* case VCPLX: */ /* XXX PPD TBC */ + UnpackFileAttributeBlock(alist, vp, catInfo, attrbufptrptr, varbufptrptr); + break; + + case VFORK: + UnpackForkAttributeBlock(alist, vp, catInfo, attrbufptrptr, varbufptrptr); + break; + + /* Without this the compiler complains about VNON,VBLK,VCHR,VLNK,VSOCK,VFIFO,VBAD and VSTR + not being handled... + */ + default: + /* XXX PPD - Panic? */ + break; + }; +#endif + +}; + + +unsigned long BestBlockSizeFit(unsigned long allocationBlockSize, + unsigned long blockSizeLimit, + unsigned long baseMultiple) { + /* + Compute the optimal (largest) block size (no larger than allocationBlockSize) that is less than the + specified limit but still an even multiple of the baseMultiple. + */ + int baseBlockCount, blockCount; + unsigned long trialBlockSize; + + if (allocationBlockSize % baseMultiple != 0) { + /* + Whoops: the allocation blocks aren't even multiples of the specified base: + no amount of dividing them into even parts will be a multiple, either then! + */ + return 512; /* Hope for the best */ + }; + + /* Try the obvious winner first, to prevent 12K allocation blocks, for instance, + from being handled as two 6K logical blocks instead of 3 4K logical blocks. + Even though the former (the result of the loop below) is the larger allocation + block size, the latter is more efficient: */ + if (allocationBlockSize % PAGE_SIZE == 0) return PAGE_SIZE; + + /* No clear winner exists: pick the largest even fraction <= MAXBSIZE: */ + baseBlockCount = allocationBlockSize / baseMultiple; /* Now guaranteed to be an even multiple */ + + for (blockCount = baseBlockCount; blockCount > 0; --blockCount) { + trialBlockSize = blockCount * baseMultiple; + if (allocationBlockSize % trialBlockSize == 0) { /* An even multiple? */ + if ((trialBlockSize <= blockSizeLimit) && + (trialBlockSize % baseMultiple == 0)) { + return trialBlockSize; + }; + }; + }; + + /* Note: we should never get here, since blockCount = 1 should always work, + but this is nice and safe and makes the compiler happy, too ... */ + return 512; +} + + +/* + * To make the HFS Plus filesystem follow UFS unlink semantics, a remove + * of an active vnode is translated to a move/rename so the file appears + * deleted. The destination folder for these move/renames is setup here + * and a reference to it is place in hfsmp->hfs_private_metadata_dir. + */ +u_long +FindMetaDataDirectory(ExtendedVCB *vcb) +{ + char namep[32]; + hfsCatalogInfo catInfo; + HFSCatalogNodeID dirID; + u_int32_t metadata_createdate; + int retval; + + if (vcb->vcbSigWord != kHFSPlusSigWord) + return (0); + + dirID = 0; + metadata_createdate = 0; + strncpy(namep, HFSPLUSMETADATAFOLDER, sizeof(namep)); + INIT_CATALOGDATA(&catInfo.nodeData, kCatNameNoCopyName); + catInfo.hint = kNoHint; + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VCBTOHFS(vcb), kHFSCatalogFileID, LK_SHARED, current_proc()); + if (retval) goto Err_Exit; + + if (hfs_getcatalog(vcb, kRootDirID, namep, -1, &catInfo) == 0) { + dirID = catInfo.nodeData.cnd_nodeID; + metadata_createdate = catInfo.nodeData.cnd_createDate; + } else if (VCBTOHFS(vcb)->hfs_fs_ronly == 0) { + if (CreateCatalogNode(vcb, kRootDirID, namep, kCatalogFolderNode, &dirID, &catInfo.hint) == 0) { + catInfo.hint = kNoHint; + if (hfs_getcatalog(vcb, kRootDirID, namep, -1, &catInfo) == 0) { + + /* create date is later used for validation */ + catInfo.nodeData.cnd_createDate = vcb->vcbCrDate; + metadata_createdate = catInfo.nodeData.cnd_createDate; + + /* directory with no permissions owned by root */ + catInfo.nodeData.cnd_mode = IFDIR; + catInfo.nodeData.cnd_adminFlags = (SF_IMMUTABLE >> 16); + + /* hidden and off the desktop view */ + ((struct DInfo *)(&catInfo.nodeData.cnd_finderInfo))->frLocation.v = SWAP_BE16 (22460); + ((struct DInfo *)(&catInfo.nodeData.cnd_finderInfo))->frLocation.h = SWAP_BE16 (22460); + ((struct DInfo *)(&catInfo.nodeData.cnd_finderInfo))->frFlags |= SWAP_BE16 (kIsInvisible + kNameLocked); + + (void) UpdateCatalogNode(vcb, kRootDirID, namep, catInfo.hint, &catInfo.nodeData); + } + } + } + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VCBTOHFS(vcb), kHFSCatalogFileID, LK_RELEASE, current_proc()); + + VCBTOHFS(vcb)->hfs_metadata_createdate = metadata_createdate; +Err_Exit: + CLEAN_CATALOGDATA(&catInfo.nodeData); + + return dirID; +} + + +static void +RemovedMetaDataDirectory(ExtendedVCB *vcb) +{ + char name[32]; + hfsCatalogInfo catInfo; + int retval; + + strncpy(name, HFSPLUSMETADATAFOLDER, sizeof(name)); + INIT_CATALOGDATA(&catInfo.nodeData, kCatNameNoCopyName); + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VCBTOHFS(vcb), kHFSCatalogFileID, LK_SHARED, current_proc()); + if (retval) goto Err_Exit; + + /* If the HFSPLUSMETADATAFOLDER exists then delete it. */ + retval = GetCatalogNode(vcb, kRootDirID, name, strlen(name), kNoHint, + &catInfo.nodeData, &catInfo.hint); + if (retval == 0 && (catInfo.nodeData.cnd_type == kCatalogFolderNode)) { + (void) DeleteCatalogNode(vcb, kRootDirID, name, catInfo.hint); + printf("hfs_mount: removed \"%s\" from hfs volume \"%s\"\n", name, vcb->vcbVN); + } + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VCBTOHFS(vcb), kHFSCatalogFileID, LK_RELEASE, current_proc()); + +Err_Exit: + CLEAN_CATALOGDATA(&catInfo.nodeData); +} + +/* + * This will return the correct logical block size for a given vnode. + * For most files, it is the allocation block size, for meta data like + * BTrees, this is kept as part of the BTree private nodeSize + */ +u_int32_t +GetLogicalBlockSize(struct vnode *vp) +{ +u_int32_t logBlockSize; + + DBG_ASSERT(vp != NULL); + + if ((vp->v_flag & VSYSTEM) && (VTOH(vp)->fcbBTCBPtr!=NULL)) { + BTreeInfoRec bTreeInfo; + int retval; + + /* + * We do not lock the BTrees, because if we are getting block..then the tree + * should be locked in the first place. + * We just want the nodeSize wich will NEVER change..so even if the world + * is changing..the nodeSize should remain the same. Which argues why lock + * it in the first place?? + */ + + (void) BTGetInformation (VTOFCB(vp), kBTreeInfoVersion, &bTreeInfo); + + logBlockSize = bTreeInfo.nodeSize; + } + else + logBlockSize = VTOHFS(vp)->hfs_logBlockSize; + + + DBG_ASSERT(logBlockSize > 0); + + return logBlockSize; +} + +/* + * Map HFS Common errors (negative) to BSD error codes (positive). + * Positive errors (ie BSD errors) are passed through unchanged. + */ +short MacToVFSError(OSErr err) +{ + if (err >= 0) { + if (err > 0) { + DBG_ERR(("MacToVFSError: passing error #%d unchanged...\n", err)); + }; + return err; + }; + + if (err != 0) { + DBG_ERR(("MacToVFSError: mapping error code %d...\n", err)); + }; + + switch (err) { + case dirFulErr: /* -33 */ + case dskFulErr: /* -34 */ + case btNoSpaceAvail: /* -32733 */ + case fxOvFlErr: /* -32750 */ + return ENOSPC; /* +28 */ + + case btBadNode: /* -32731 */ + case ioErr: /* -36 */ + return EIO; /* +5 */ + + case mFulErr: /* -41 */ + case memFullErr: /* -108 */ + return ENOMEM; /* +12 */ + + case tmfoErr: /* -42 */ + /* Consider EMFILE (Too many open files, 24)? */ + return ENFILE; /* +23 */ + + case nsvErr: /* -35 */ + case fnfErr: /* -43 */ + case dirNFErr: /* -120 */ + case fidNotFound: /* -1300 */ + return ENOENT; /* +2 */ + + case wPrErr: /* -44 */ + case vLckdErr: /* -46 */ + case fsDSIntErr: /* -127 */ + return EROFS; /* +30 */ + + case opWrErr: /* -49 */ + case fLckdErr: /* -45 */ + return EACCES; /* +13 */ + + case permErr: /* -54 */ + case wrPermErr: /* -61 */ + return EPERM; /* +1 */ + + case fBsyErr: /* -47 */ + return EBUSY; /* +16 */ + + case dupFNErr: /* -48 */ + case fidExists: /* -1301 */ + case cmExists: /* -32718 */ + case btExists: /* -32734 */ + return EEXIST; /* +17 */ + + case rfNumErr: /* -51 */ + return EBADF; /* +9 */ + + case notAFileErr: /* -1302 */ + return EISDIR; /* +21 */ + + case cmNotFound: /* -32719 */ + case btNotFound: /* -32735 */ + return ENOENT; /* 28 */ + + case cmNotEmpty: /* -32717 */ + return ENOTEMPTY; /* 66 */ + + case cmFThdDirErr: /* -32714 */ + return EISDIR; /* 21 */ + + case fxRangeErr: /* -32751 */ + return EIO; /* 5 */ + + case bdNamErr: /* -37 */ + return ENAMETOOLONG; /* 63 */ + + case fnOpnErr: /* -38 */ + case eofErr: /* -39 */ + case posErr: /* -40 */ + case paramErr: /* -50 */ + case badMDBErr: /* -60 */ + case badMovErr: /* -122 */ + case sameFileErr: /* -1306 */ + case badFidErr: /* -1307 */ + case fileBoundsErr: /* -1309 */ + return EINVAL; /* +22 */ + + default: + DBG_UTILS(("Unmapped MacOS error: %d\n", err)); + return EIO; /* +5 */ + } +} + + +/* + * All of our debugging functions + */ + +#if HFS_DIAGNOSTIC + +void debug_vn_status (char* introStr, struct vnode *vn) +{ + DBG_VOP(("%s:\t",introStr)); + if (vn != NULL) + { + if (vn->v_tag != VT_HFS) + { + DBG_VOP(("NON-HFS VNODE Ox%08lX\n", (unsigned long)vn)); + } + else if(vn->v_tag==VT_HFS && (vn->v_data==NULL || VTOH((vn))->h_valid != HFS_VNODE_MAGIC)) + { + DBG_VOP(("BAD VNODE PRIVATE DATA!!!!\n")); + } + else + { + DBG_VOP(("r: %d & ", vn->v_usecount)); + if (lockstatus(&VTOH(vn)->h_lock)) + { + DBG_VOP_CONT(("is L\n")); + } + else + { + DBG_VOP_CONT(("is U\n")); + } + } + } + else + { + DBG_VOP(("vnode is NULL\n")); + }; +} + +void debug_vn_print (char* introStr, struct vnode *vn) +{ +// DBG_FUNC_NAME("DBG_VN_PRINT"); + DBG_ASSERT (vn != NULL); + DBG_VFS(("%s: ",introStr)); + DBG_VFS_CONT(("vnode: 0x%x is a ", (uint)vn)); + switch (vn->v_tag) + { + case VT_UFS: + DBG_VFS_CONT(("%s","UFS")); + break; + case VT_HFS: + DBG_VFS_CONT(("%s","HFS")); + break; + default: + DBG_VFS_CONT(("%s","UNKNOWN")); + break; + } + + DBG_VFS_CONT((" vnode\n")); + if (vn->v_tag==VT_HFS) + { + if (vn->v_data==NULL) + { + DBG_VFS(("BAD VNODE PRIVATE DATA!!!!\n")); + } + else + { + DBG_VFS((" Name: %s Id: %ld ",H_NAME(VTOH(vn)), H_FILEID(VTOH(vn)))); + } + } + else + DBG_VFS((" ")); + + DBG_VFS_CONT(("Refcount: %d\n", vn->v_usecount)); + if (VOP_ISLOCKED(vn)) + { + DBG_VFS((" The vnode is locked\n")); + } + else + { + DBG_VFS((" The vnode is not locked\n")); + } +} + +void debug_rename_test_locks (char* introStr, + struct vnode *fvp, + struct vnode *fdvp, + struct vnode *tvp, + struct vnode *tdvp, + int fstatus, + int fdstatus, + int tstatus, + int tdstatus +) +{ + DBG_VOP(("\t%s: ", introStr)); + if (fvp) {if(lockstatus(&VTOH(fvp)->h_lock)){DBG_VFS_CONT(("L"));} else {DBG_VFS_CONT(("U"));}} else { DBG_VFS_CONT(("X"));}; + if (fdvp) {if(lockstatus(&VTOH(fdvp)->h_lock)){DBG_VFS_CONT(("L"));} else {DBG_VFS_CONT(("U"));}} else { DBG_VFS_CONT(("X"));}; + if (tvp) {if(lockstatus(&VTOH(tvp)->h_lock)){DBG_VFS_CONT(("L"));} else {DBG_VFS_CONT(("U"));}} else { DBG_VFS_CONT(("X"));}; + if (tdvp) {if(lockstatus(&VTOH(tdvp)->h_lock)){DBG_VFS_CONT(("L"));} else {DBG_VFS_CONT(("U"));}} else { DBG_VFS_CONT(("X"));}; + DBG_VFS_CONT(("\n")); + + if (fvp) { + if (lockstatus(&VTOH(fvp)->h_lock)) { + if (fstatus==VOPDBG_UNLOCKED) { + DBG_VOP(("\tfvp should be NOT LOCKED and it is\n")); + } + } else if (fstatus == VOPDBG_LOCKED) { + DBG_VOP(("\tfvp should be LOCKED and it isnt\n")); + } + } + + if (fdvp) { + if (lockstatus(&VTOH(fdvp)->h_lock)) { + if (fdstatus==VOPDBG_UNLOCKED) { + DBG_VOP(("\tfdvp should be NOT LOCKED and it is\n")); + } + } else if (fdstatus == VOPDBG_LOCKED) { + DBG_VOP(("\tfdvp should be LOCKED and it isnt\n")); + } + } + + if (tvp) { + if (lockstatus(&VTOH(tvp)->h_lock)) { + if (tstatus==VOPDBG_UNLOCKED) { + DBG_VOP(("\ttvp should be NOT LOCKED and it is\n")); + } + } else if (tstatus == VOPDBG_LOCKED) { + DBG_VOP(("\ttvp should be LOCKED and it isnt\n")); + } + } + + if (tdvp) { + if (lockstatus(&VTOH(tdvp)->h_lock)) { + if (tdstatus==VOPDBG_UNLOCKED) { + DBG_VOP(("\ttdvp should be NOT LOCKED and it is\n")); + } + } else if (tdstatus == VOPDBG_LOCKED) { + DBG_VOP(("\ttdvp should be LOCKED and it isnt\n")); + + } + } + +} +#endif /* HFS_DIAGNOSTIC */ + + +#if HFS_DIAGNOSTIC +void debug_check_buffersizes(struct vnode *vp, struct hfsnode *hp, struct buf *bp) { + DBG_ASSERT(bp->b_validoff == 0); + DBG_ASSERT(bp->b_dirtyoff == 0); + DBG_ASSERT((bp->b_bcount == HTOHFS(hp)->hfs_logBlockSize) || + ((bp->b_bcount % 512 == 0) && + (bp->b_validend > 0) && + (bp->b_dirtyend > 0) && + (bp->b_bcount < HTOHFS(hp)->hfs_logBlockSize))); + + if (bp->b_validend == 0) { + DBG_ASSERT(bp->b_dirtyend == 0); + } else { + DBG_ASSERT(bp->b_validend == bp->b_bcount); + DBG_ASSERT(bp->b_dirtyend <= bp->b_bcount); + }; +} + + +void debug_check_blocksizes(struct vnode *vp) { + struct hfsnode *hp = VTOH(vp); + struct buf *bp; + + if (vp->v_flag & VSYSTEM) return; + + for (bp = vp->v_cleanblkhd.lh_first; bp != NULL; bp = bp->b_vnbufs.le_next) { + debug_check_buffersizes(vp, hp, bp); + }; + + for (bp = vp->v_dirtyblkhd.lh_first; bp != NULL; bp = bp->b_vnbufs.le_next) { + debug_check_buffersizes(vp, hp, bp); + }; +} + +void debug_check_catalogdata(struct CatalogNodeData *cat) { + + if (cat->cnm_nameptr == NULL) { + DBG_ASSERT((cat->cnm_flags & kCatNameIsAllocated) == 0); + } + else if (cat->cnm_nameptr == cat->cnm_namespace) { + DBG_ASSERT((cat->cnm_flags & kCatNameIsAllocated) == 0); + } + else { + DBG_ASSERT((cat->cnm_flags & kCatNameIsAllocated) == kCatNameIsAllocated); + } + + if (cat->cnm_nameptr) { + DBG_ASSERT(strlen(cat->cnm_nameptr) == cat->cnm_length); + } + + if (cat->cnm_flags & kCatNameIsConsumed) { + DBG_ASSERT((cat->cnm_flags & kCatNameIsAllocated) == 0); + } + + if (cat->cnm_flags & kCatNameNoCopyName) { + DBG_ASSERT((cat->cnm_flags & (kCatNameIsAllocated|kCatNameIsConsumed|kCatNameIsMangled)) == 0); + DBG_ASSERT(cat->cnm_length == 0); + DBG_ASSERT(cat->cnm_nameptr == 0); + DBG_ASSERT(strlen(cat->cnm_namespace) == 0); + } + +} + +extern void hfs_vhash_dbg(struct hfsnode *hp); + +/* Checks the valicity of a hfs vnode */ +void debug_check_vnode(struct vnode *vp, int stage) { + struct hfsnode *hp; + u_long size; + int i; + + /* vcb stuff */ + if (VTOHFS(vp)->hfs_mount_flags & kHFSBootVolumeInconsistentMask) + DEBUG_BREAK_MSG(("Volume is damaged!")); + + /* vnode stuff */ + if (vp==NULL) + DEBUG_BREAK_MSG(("Null vnode")); + if (vp->v_tag != VT_HFS) + DEBUG_BREAK_MSG(("Not a HFS vnode, it is a %d", vp->v_tag)); + if (vp->v_data==NULL) + DEBUG_BREAK_MSG(("v_data is NULL")); + + /* hfsnode stuff */ + hp = VTOH(vp); + if (hp->h_valid != HFS_VNODE_MAGIC) + DEBUG_BREAK_MSG(("Bad Formed HFS node")); + if (hp->h_vp==NULL || hp->h_vp!=vp) + DEBUG_BREAK_MSG(("Bad hfsnode vnode pte")); + if (hp->h_meta == NULL) + DEBUG_BREAK_MSG(("Bad hfsnode meta ptr")); + switch (H_FORKTYPE(hp)) { + case kDataFork: + case kRsrcFork: + if ((hp->h_meta->h_siblinghead.cqh_first == NULL) || (hp->h_meta->h_siblinghead.cqh_last == NULL)) + DEBUG_BREAK_MSG(("Null sibling header")); + if ((hp->h_sibling.cqe_next==NULL) || (hp->h_sibling.cqe_prev==NULL)) + DEBUG_BREAK_MSG(("Null sibling list")); + if (hp->h_meta->h_usecount<1 || hp->h_meta->h_usecount>2) + DEBUG_BREAK_MSG(("Bad sibling usecount")); + break; + case kDirectory: + case kSysFile: + if ((hp->h_meta->h_siblinghead.cqh_first != NULL) || (hp->h_meta->h_siblinghead.cqh_last != NULL)) + DEBUG_BREAK_MSG(("Non Null sibling header")); + if ((hp->h_sibling.cqe_next!=NULL) || (hp->h_sibling.cqe_prev!=NULL)) + DEBUG_BREAK_MSG(("Null sibling list")); + if (hp->h_meta->h_usecount!=1) + DEBUG_BREAK_MSG(("Bad usecount")); + + break; + default: + DEBUG_BREAK_MSG(("Bad hfsnode fork type")); + } + + /* hfsmeta stuff */ + if (hp->h_meta->h_devvp == NULL) + DEBUG_BREAK_MSG(("Bad hfsnode dev vnode")); + if (H_DEV(hp) == 0) + DEBUG_BREAK_MSG(("Bad dev id")); + if (H_FILEID(hp) == 0) + DEBUG_BREAK_MSG(("Bad file id")); + + if (((hp->h_meta->h_metaflags & IN_DATANODE)==0) && (H_DIRID(hp) == 0) && (H_FILEID(hp) != 1)) + DEBUG_BREAK_MSG(("Bad dir id")); + + if (hp->h_meta->h_namePtr == NULL && hp->h_meta->h_namelen!=0) + DEBUG_BREAK_MSG(("hfs meta h_namelen is not 0")); + if (hp->h_meta->h_namePtr != NULL && strlen(hp->h_meta->h_namePtr) != hp->h_meta->h_namelen) + DEBUG_BREAK_MSG(("Bad hfs meta h_namelen")); + + /* Check the hash */ + hfs_vhash_dbg(hp); + + /* Check to see if we want to compare with the disk */ + if (stage > 200) { + int retval; + hfsCatalogInfo catInfo; + + INIT_CATALOGDATA(&catInfo.nodeData, 0); + catInfo.hint = 0; + + if (hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_SHARED, current_proc())) + return; + + if (hfs_getcatalog(VTOVCB(vp), H_DIRID(hp), hp->h_meta->h_namePtr, hp->h_meta->h_namelen, &catInfo)) + DEBUG_BREAK_MSG(("Could not find hfsnode Catalog record")); + + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, current_proc()); + + if (H_FILEID(hp) != catInfo.nodeData.cnd_nodeID) + DEBUG_BREAK_MSG(("hfsnode catalog node id mismatch")); + if (H_DIRID(hp) != catInfo.nodeData.cnm_parID) + DEBUG_BREAK_MSG(("hfsnode catalog dir id mismatch")); + if (strcmp(hp->h_meta->h_namePtr, catInfo.nodeData.cnm_nameptr) != 0) + DEBUG_BREAK_MSG(("hfsnode catalog name mismatch")); + /* Check dates too??? */ + + CLEAN_CATALOGDATA(&catInfo.nodeData); + + } + + + /* Check Extents */ + { + for(i = 0, size = 0; i < kHFSPlusExtentDensity; i++) + { + size += hp->fcbExtents[i].blockCount; + } + + if (hp->fcbEOF > hp->fcbPLen) + DEBUG_BREAK_MSG(("fcbPLen is smaller than fcbEOF")); + + if (hp->fcbExtents[kHFSPlusExtentDensity-1].blockCount == 0) { + if ((off_t)size * (off_t)VTOVCB(vp)->blockSize != hp->fcbPLen) + DEBUG_BREAK_MSG(("fcbPLen does not match extents")); + } else { + if ( hp->fcbPLen < (off_t)size * (off_t)VTOVCB(vp)->blockSize) + DEBUG_BREAK_MSG(("fcbPLen is smaller than extents")); + } + for(i = 0; i < kHFSPlusExtentDensity; i++) + { + if (hp->fcbExtents[i].blockCount == 0 || hp->fcbExtents[i].startBlock == 0) + break; + } + if ((VTOVCB(vp)->vcbSigWord == kHFSSigWord) && i > kHFSExtentDensity) + DEBUG_BREAK_MSG(("Illegal value in extents for ordinary HFS")); + if (i > kHFSPlusExtentDensity) { + for(; i < kHFSPlusExtentDensity; i++) + { + if (hp->fcbExtents[i].blockCount != 0 || hp->fcbExtents[i].startBlock != 0) + DEBUG_BREAK_MSG(("Illegal value in extents")); + } + } + } + + + /* BTree stuff */ + if (0 && vp->v_flag & VSYSTEM) { + BTreeInfoRec info; + + BTGetInformation(hp, 0, &info); + if (hp->fcbBTCBPtr == NULL) + DEBUG_BREAK_MSG(("Null fcbBTCBPtr")); + if (H_HINT(hp) == 0) + DEBUG_BREAK_MSG(("hint is 0")); + if (H_HINT(hp) > info.numNodes) + DEBUG_BREAK_MSG(("hint > numNodes")); + } + +} + +#endif /* HFS_DIAGNOSTIC */ diff --git a/bsd/hfs/hfs_vhash.c b/bsd/hfs/hfs_vhash.c new file mode 100644 index 000000000..1c7a54e7a --- /dev/null +++ b/bsd/hfs/hfs_vhash.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Copyright (c) 1998 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hfs_vhash.c + * derived from @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95 + */ + +#include +#include +#include +#include +#include +#include + +#include "hfs.h" +#include "hfs_dbg.h" + + +/* + * Structures associated with hfsnode cacheing. + */ +LIST_HEAD(vhashhead, hfsnode) *vhashtbl; +u_long vhash; /* size of hash table - 1 */ +#define HFSNODEHASH(device, nodeID) (&vhashtbl[((device) + (nodeID)) & vhash]) +struct slock hfs_vhash_slock; + +/* + * Initialize hfsnode hash table. + */ +void +hfs_vhashinit() +{ + + vhashtbl = hashinit(desiredvnodes, M_HFSMNT, &vhash); + simple_lock_init(&hfs_vhash_slock); +} + +/* + * Use the device/dirID/forkType tuple to find the incore hfsnode, and return a pointer + * to it. If it is in core, but locked, wait for it. + * + * Acceptable forkTypes are kData, kRsrcFork, kDirectory, or kDefault which translates to either + * kDataFork or kDirectory + * + * While traversing the hash, expext that a hfsnode is in the midst of being allocated, if so, + * then sleep and try again + */ +struct vnode * +hfs_vhashget(dev, nodeID, forkType) + dev_t dev; + UInt32 nodeID; + UInt8 forkType; +{ + struct proc *p = current_proc(); /* XXX */ + struct hfsnode *hp; + struct vnode *vp; + + DBG_ASSERT(forkType!=kUndefinedFork); + /* + * Go through the hash list + * If a vnode is in the process of being cleaned out or being + * allocated, wait for it to be finished and then try again + */ +loop: + simple_lock(&hfs_vhash_slock); + for (hp = HFSNODEHASH(dev, nodeID)->lh_first; hp; hp = hp->h_hash.le_next) { + /* The vnode might be in an incomplete state, so sleep until its ready */ + if (hp->h_nodeflags & IN_ALLOCATING) { + simple_unlock(&hfs_vhash_slock); + tsleep((caddr_t)hp, PINOD, "hfs_vhashlookup", 0); + goto loop; + }; + + DBG_ASSERT(hp->h_meta != NULL); + if ((H_FILEID(hp) == nodeID) && + (H_DEV(hp) == dev) && + !(hp->h_meta->h_metaflags & IN_NOEXISTS)) { + /* SER XXX kDefault of meta data (ksysfile) is not assumed here */ + if ((H_FORKTYPE(hp) == forkType) || + (forkType == kAnyFork) || + ((forkType == kDefault) && ((H_FORKTYPE(hp) == kDirectory) + || (H_FORKTYPE(hp) == kDataFork)))) { + vp = HTOV(hp); + simple_lock(&vp->v_interlock); + simple_unlock(&hfs_vhash_slock); + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) + goto loop; + return (vp); + }; + }; + }; + simple_unlock(&hfs_vhash_slock); + return (NULL); +} + + + + +/* + * Lock the hfsnode and insert the hfsnode into the hash table, and return it locked. + * Returns the sibling meta data if it exists, elses return NULL + */ +void +hfs_vhashins_sibling(dev, nodeID, hp, fm) + dev_t dev; + UInt32 nodeID; + struct hfsnode *hp; + struct hfsfilemeta **fm; +{ + struct vhashhead *ipp; + struct hfsnode *thp; + struct hfsfilemeta *tfm; + + DBG_ASSERT(fm != NULL); + DBG_ASSERT(hp != NULL); + DBG_ASSERT(hp->h_meta == NULL); + DBG_ASSERT(H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork); + + tfm = NULL; + lockmgr(&hp->h_lock, LK_EXCLUSIVE, (struct slock *)0, current_proc()); + + + /* + * Go through the hash list to see if a sibling exists + * If it does, store it to return + * If a vnode is in the process of being cleaned out or being + * allocated, wait for it to be finished and then try again + */ + + ipp = HFSNODEHASH(dev, nodeID); + +loop: + simple_lock(&hfs_vhash_slock); + for (thp = ipp->lh_first; thp; thp = thp->h_hash.le_next) { + if (thp->h_nodeflags & IN_ALLOCATING) { /* Its in the process of being allocated */ + simple_unlock(&hfs_vhash_slock); + tsleep((caddr_t)thp, PINOD, "hfs_vhash_ins_meta", 0); + goto loop; + }; + + DBG_ASSERT(thp->h_meta != NULL); + if ((H_FILEID(thp) == nodeID) && (H_DEV(thp) == dev)) { + tfm = hp->h_meta = thp->h_meta; + break; + }; + }; + + /* Add to sibling list..if it can have them */ + if (tfm && (H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork)) { + DBG_ASSERT(tfm->h_siblinghead.cqh_first != NULL && tfm->h_siblinghead.cqh_last != NULL); + simple_lock(&tfm->h_siblinglock); + CIRCLEQ_INSERT_HEAD(&tfm->h_siblinghead, hp, h_sibling); + simple_unlock(&tfm->h_siblinglock); + }; + + LIST_INSERT_HEAD(ipp, hp, h_hash); + simple_unlock(&hfs_vhash_slock); + *fm = tfm; +} + + + +/* +* Lock the hfsnode and insert the hfsnode into the hash table, and return it locked. + */ +void +hfs_vhashins(dev, nodeID, hp) + dev_t dev; + UInt32 nodeID; + struct hfsnode *hp; +{ + struct vhashhead *ipp; + + DBG_ASSERT(hp != NULL); + DBG_ASSERT(nodeID != 0); + + lockmgr(&hp->h_lock, LK_EXCLUSIVE, (struct slock *)0, current_proc()); + + simple_lock(&hfs_vhash_slock); + ipp = HFSNODEHASH(dev, nodeID); + LIST_INSERT_HEAD(ipp, hp, h_hash); + simple_unlock(&hfs_vhash_slock); +} + + +/* + * Remove the hfsnode from the hash table and then checks to see if another forks exists. + */ +void +hfs_vhashrem(hp) + struct hfsnode *hp; +{ + + DBG_ASSERT(hp != NULL); + DBG_ASSERT(hp->h_meta != NULL); + + simple_lock(&hfs_vhash_slock); + + /* Test to see if there are siblings, should only apply to forks */ + if (hp->h_meta->h_siblinghead.cqh_first != NULL) { + simple_lock(&hp->h_meta->h_siblinglock); + CIRCLEQ_REMOVE(&hp->h_meta->h_siblinghead, hp, h_sibling); + simple_unlock(&hp->h_meta->h_siblinglock); + }; + + LIST_REMOVE(hp, h_hash); + +#if HFS_DIAGNOSTIC + hp->h_hash.le_next = NULL; + hp->h_hash.le_prev = NULL; +#endif + + + simple_unlock(&hfs_vhash_slock); + + +} + + +/* + * Moves the entries from one bucket to another + * nodeID is the old bucket id + */ +void +hfs_vhashmove(hp, oldNodeID) + struct hfsnode *hp; + UInt32 oldNodeID; +{ + struct vhashhead *oldHeadIndex, *newHeadIndex; + struct hfsnode *thp, *nextNode; + UInt32 newNodeID; + + DBG_ASSERT(hp != NULL); + DBG_ASSERT(hp->h_meta != NULL); + + newNodeID = H_FILEID(hp); + + oldHeadIndex = HFSNODEHASH(H_DEV(hp), oldNodeID); + newHeadIndex = HFSNODEHASH(H_DEV(hp), newNodeID); + + /* If it is moving to the same bucket...then we are done */ + if (oldHeadIndex == newHeadIndex) + return; + +loop: + + /* + * Go through the old hash list + * If there is a nodeid mismatch, or the nodeid doesnt match the current bucket + * remove it and add it to the right bucket. + * If a vnode is in the process of being cleaned out or being + * allocated, wait for it to be finished and then try again + */ + simple_lock(&hfs_vhash_slock); + for (nextNode = oldHeadIndex->lh_first; nextNode; ) { + if (nextNode->h_nodeflags & IN_ALLOCATING) { /* Its in the process of being allocated */ + simple_unlock(&hfs_vhash_slock); + tsleep((caddr_t)nextNode, PINOD, "hfs_vhashmove", 0); + goto loop; + }; + + DBG_ASSERT(nextNode->h_meta != NULL); + thp = nextNode; + nextNode = nextNode->h_hash.le_next; + if (newNodeID == H_FILEID(thp)) { + LIST_REMOVE(thp, h_hash); + thp->h_hash.le_next = NULL; + thp->h_hash.le_next = NULL; + LIST_INSERT_HEAD(newHeadIndex, thp, h_hash); + }; + }; + + simple_unlock(&hfs_vhash_slock); +} + +#if HFS_DIAGNOSTIC +/* + * This will test the hash entry for a given hfsnode + * It will test: + * 1. The uniqei existance of the node + * 2. All other nodes, proper membership to the hash + * 3. Proper termination of the hash + * 4. All members have a non-null h_meta + */ +void hfs_vhash_dbg(hp) + struct hfsnode *hp; +{ + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp; + struct hfsnode *thp, *tthp; + int maxsiblings = 1; + int wasFound = false; + struct vhashhead *ipp, *jpp; + dev_t dev = H_DEV(hp); + UInt32 nodeID = H_FILEID(hp); + UInt8 forkType = H_FORKTYPE(hp); + u_long forksfound = 0; + + if (forkType==kDataFork || forkType==kRsrcFork) + maxsiblings++; + + if (hp == NULL) + DEBUG_BREAK_MSG(("hash_dgh: Null hfsnode")); + /* + * Go through the hash list + * If a vnode is in the process of being cleaned out or being + * allocated, wait for it to be finished and then try again + */ + ipp = HFSNODEHASH(dev, nodeID); + +loop: + simple_lock(&hfs_vhash_slock); + for (thp = ipp->lh_first; thp; thp = thp->h_hash.le_next) { + if (thp->h_nodeflags & IN_ALLOCATING) { /* Its in the process of being allocated */ + simple_unlock(&hfs_vhash_slock); + tsleep((caddr_t)thp, PINOD, "hfs_vhash_ins_meta", 0); + goto loop; + }; + + if (thp->h_meta == NULL) + DEBUG_BREAK_MSG(("hash_dgh: Null hfs_meta")); + jpp = (HFSNODEHASH(H_DEV(thp), H_FILEID(thp))); + if (ipp != jpp) + DEBUG_BREAK_MSG(("hash_dgh: Member on wrong hash")); + + if ((H_FILEID(thp) == nodeID) && (H_DEV(thp) == dev)) { + maxsiblings--; + if (maxsiblings < 0) + DEBUG_BREAK_MSG(("hash_dgh: Too many siblings")); + if ((1<uio_iovcnt == 1 is violated + * and cleaned up call to uiomove to check space available first. + * 2-Jul-1999 Pat Dirks Fixed hfs_setattrlist to ignore attempts to set null volume name (#2331829). + * 18-May-1999 Don Brady Add support for rooting from HFS Plus. + * 4-May-1999 Don Brady Split off hfs_search.c + * 15-Apr-1999 Don Brady Change va_nlink back to 1 for directories in hfs_getattr. + * 6-Apr-1999 Don Brady Fix deference of NULL h_sibling in hfs_chid. + * 29-Mar-1999 Scott Roberts Put in the correct . and .. entries for readdir + * 22-Mar-1999 Don Brady Add UFS delete semantic support to hfs_remove. + * 1-Mar-1999 Scott Roberts h_meta is now released when the complex vnode is relesed + * 26-Feb-1999 Pat Dirks (copied by Chw) Fixed hfs_lookup to check for + * error return on vget. + * 25-Feb-1999 Pat Dirks Fixed hfs_remove to use a local copy of the h_sibling pointer around vnode_uncache. + * 3-Feb-1999 Pat Dirks Changed to stop updating wrapper volume name in MDB since wrapper volume's + * catalog isn't updated and this inconsistency trips Disk First Aid's checks. + * 22-Jan-1999 Pat Dirks Changed hfs_rename, hfs_remove, and hfs_rmdir to call cache_purge. + * 22-Jan-1999 Don Brady After calling hfsMoveRename call hfs_getcatalog to get new name. + * 12-Jan-1999 Don Brady Fixed the size of ATTR_CMN_NAME buffer to NAME_MAX + 1. + * 8-Jan-1999 Pat Dirks Added hfs_writepermission and change hfs_setattrlist to use it instead of + * including an incorrect derivative of hfs_access in-line. + * 15-Dec-1998 Pat Dirks Changed setattrlist to do permission checking as appropriate (Radar #2290212). + * 17-Nov-1998 Scott Roberts Added support for long volume names in SetAttrList(). + * 6-Nov-1998 Don Brady Add support for UTF-8 names. + * 3-Nov-1998 Umesh Vaishampayan Changes to deal with "struct timespec" + * change in the kernel. + * 21-Oct-1998 Scott Roberts Added support for advisory locking (Radar #2237914). + * 25-Sep-1998 Don Brady Changed hfs_exchange to call hfs_chid after updating catalog (radar #2276605). + * 23-Sep-1998 Don Brady hfs_setattrlist now calls hfs_chown and hfs_chmod to change values. + * 15-Sep-1998 Pat Dirks Cleaned up vnode unlocking on various error exit paths and changed + * to use new error stub routines in place of hfs_mknod and hfs_link. + * 16-Sep-1998 Don Brady When renaming a volume in hfs_setattrlist, also update hfs+ wrapper name (radar #2272925). + * 1-Sep-1998 Don Brady Fix uninitiazed time variable in hfs_makenode (radar #2270372). + * 31-Aug-1998 Don Brady Adjust change time for DST in hfs_update (radar #2265075). + * 12-Aug-1998 Don Brady Update complex node name in hfs_rename (radar #2262111). + * 5-Aug-1998 Don Brady In hfs_setattrlist call MacToVFSError after calling UpdateCatalogNode (radar #2261247). + * 21-Jul-1998 Don Brady Fixed broken preflight in hfs_getattrlist. + * 17-Jul-1998 Clark Warner Fixed the one left out case of freeing M_NAMEI in hfs_abort + * 13-Jul-1998 Don Brady Add uio_resid preflight check to hfs_search (radar #2251855). + * 30-Jun-1998 Scott Roberts Changed hfs_makenode and its callers to free M_NAMEI. + * 29-Jun-1998 Don Brady Fix unpacking order in UnpackSearchAttributeBlock (radar #2249248). + * 13-Jun-1998 Scott Roberts Integrated changes to hfs_lock (radar #2237243). + * 4-Jun-1998 Pat Dirks Split off hfs_lookup.c and hfs_readwrite.c + * 3-Jun-1998 Don Brady Fix hfs_rename bugs (radar #2229259, #2239823, 2231108 and #2237380). + * Removed extra vputs in hfs_rmdir (radar #2240309). + * 28-May-1998 Don Brady Fix hfs_truncate to correctly extend files (radar #2237242). + * 20-May-1998 Don Brady In hfs_close shrink the peof to the smallest size neccessary (radar #2230094). + * 5-May-1998 Don Brady Fixed typo in hfs_rename (apply H_FILEID macro to VTOH result). + * 29-Apr-1998 Joe Sokol Don't do cluster I/O when logical block size is not 4K multiple. + * 28-Apr-1998 Pat Dirks Cleaned up unused variable physBlockNo in hfs_write. + * 28-Apr-1998 Joe Sokol Touched up support for cluster_read/cluster_write and enabled it. + * 27-Apr-1998 Don Brady Remove some DEBUG_BREAK calls in DbgVopTest. + * 24-Apr-1998 Pat Dirks Fixed read logic to read-ahead only ONE block, and of only logBlockSize instead of 64K... + * Added calls to brelse() on errors from bread[n](). + * Changed logic to add overall length field to AttrBlockSize only on attribute return operations. + * 23-Apr-1998 Don Brady The hfs_symlink call is only supported on HFS Plus disks. + * 23-Apr-1998 Deric Horn Fixed hfs_search bug where matches were skipped when buffer was full. + * 22-Apr-1998 Scott Roberts Return on error if catalog mgr returns an error in truncate. + * 21-Apr-1998 Don Brady Fix up time/date conversions. + * 20-Apr-1998 Don Brady Remove course-grained hfs metadata locking. + * 17-Apr-1998 Pat Dirks Officially enabled searchfs in vops table. + * 17-Apr-1998 Deric Horn Bug fixes to hfs_search, reenabled searchfs trap for upcoming kernel build. + * 15-Apr-1998 Don Brady Add locking for HFS B-trees. Don't lock file meta lock for VSYSTEM files. + * Don't call VOP_UPDATE for system files. Roll set_time into hfs_update. + * 14-Apr-1998 Pat Dirks Cleaned up fsync to skip complex nodes and not hit sibling nodes. + * 14-Apr-1998 Deric Horn Added hfs_search() and related routines for searchfs() support. + * 14-Apr-1998 Scott Roberts Fixed paramaters to ExchangeFileIDs() + * 13-Apr-1998 Pat Dirks Changed to update H_HINT whenever hfs_getcatalog was called. + * 8-Apr-1998 Pat Dirks Added page-in and page-out passthrough routines to keep MapFS happy. + * 6-Apr-1998 Pat Dirks Changed hfs_write to clean up code and fix bug that caused + * zeroes to be interspersed in data. Added debug printf to hfs_read. + * 6-Apr-1998 Scott Roberts Added complex file support. + * 02-apr-1998 Don Brady UpdateCatalogNode now takes parID and name as input. + * 31-mar-1998 Don Brady Sync up with final HFSVolumes.h header file. + * 27-mar-1998 Don Brady Check result from UFSToHFSStr to make sure hfs/hfs+ names are not greater than 31 characters. + * 27-mar-1998 chw minor link fixes. + * 19-Mar-1998 ser Added hfs_readdirattr. + * 17-Mar-1998 ser Removed CheckUserAccess. Added code to implement ExchangeFileIDs + * 16-Mar-1998 Pat Dirks Fixed logic in hfs_read to properly account for space + * remaining past selected offset and avoid premature panic. + * 16-jun-1997 Scott Roberts + * Dec-1991 Kevin Wells at NeXT: + * Significantly modified for Macintosh file system. + * Added support for NFS exportability. + * 25-Jun-1990 Doug Mitchell at NeXT: + * Created (for DOS file system). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "hfs.h" +#include "hfs_lockf.h" +#include "hfs_dbg.h" +#include "hfs_mount.h" + +#include "hfscommon/headers/CatalogPrivate.h" +#include "hfscommon/headers/BTreesInternal.h" +#include "hfscommon/headers/FileMgrInternal.h" +#include "hfscommon/headers/HFSUnicodeWrappers.h" + +#define OWNERSHIP_ONLY_ATTRS (ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | ATTR_CMN_FLAGS) + +#define MAKE_DELETED_NAME(NAME,FID) \ + (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID)) + + +extern uid_t console_user; + +/* Global vfs data structures for hfs */ +int (**hfs_vnodeop_p)(void *); + +/* external routines defined in hfs_vhash.c */ +extern void hfs_vhashrem(struct hfsnode *hp); +extern int vinvalbuf_vhash(register struct vnode *vp, int flags, struct ucred *cred, struct proc *p); +extern void hfs_vhashmove( struct hfsnode *hp,UInt32 nodeID); +extern struct vnode * hfs_vhashget(dev_t dev, UInt32 nodeID, UInt8 forkType); + +extern OSErr PositionIterator(CatalogIterator *cip, UInt32 offset, BTreeIterator *bip, UInt16 *op); + +extern void hfs_name_CatToMeta(CatalogNodeData *nodeData, struct hfsfilemeta *fm); + +extern groupmember(gid_t gid, struct ucred *cred); + +static int hfs_makenode( int mode, + dev_t rawdev, struct vnode *dvp, struct vnode **vpp, + struct componentname *cnp, struct proc *p); + +static void hfs_chid(struct hfsnode *hp, u_int32_t fid, u_int32_t pid, char* name); + +static int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags); + +static int hfs_chown( struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, struct proc *p); +static int hfs_chmod( struct vnode *vp, int mode, struct ucred *cred, struct proc *p); +static int hfs_chflags( struct vnode *vp, u_long flags, struct ucred *cred, struct proc *p); + + +int hfs_cache_lookup(); /* in hfs_lookup.c */ +int hfs_lookup(); /* in hfs_lookup.c */ +int hfs_read(); /* in hfs_readwrite.c */ +int hfs_write(); /* in hfs_readwrite.c */ +int hfs_ioctl(); /* in hfs_readwrite.c */ +int hfs_select(); /* in hfs_readwrite.c */ +int hfs_mmap(); /* in hfs_readwrite.c */ +int hfs_seek(); /* in hfs_readwrite.c */ +int hfs_bmap(); /* in hfs_readwrite.c */ +int hfs_strategy(); /* in hfs_readwrite.c */ +int hfs_reallocblks(); /* in hfs_readwrite.c */ +int hfs_truncate(); /* in hfs_readwrite.c */ +int hfs_allocate(); /* in hfs_readwrite.c */ +int hfs_pagein(); /* in hfs_readwrite.c */ +int hfs_pageout(); /* in hfs_readwrite.c */ +int hfs_search(); /* in hfs_search.c */ +int hfs_bwrite(); /* in hfs_readwrite.c */ +int hfs_link(); /* in hfs_link.c */ +int hfs_blktooff(); /* in hfs_readwrite.c */ +int hfs_offtoblk(); /* in hfs_readwrite.c */ +int hfs_cmap(); /* in hfs_readwrite.c */ + +/***************************************************************************** +* +* Operations on vnodes +* +*****************************************************************************/ + +/* + * Create a regular file +#% create dvp L U U +#% create vpp - L - +# + vop_create { + IN WILLRELE struct vnode *dvp; + OUT struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + + We are responsible for freeing the namei buffer, + it is done in hfs_makenode() +*/ + +static int +hfs_create(ap) +struct vop_create_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */ *ap; +{ + struct proc *p = current_proc(); + int retval; + int mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode); + DBG_FUNC_NAME("create"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_dvp); + DBG_VOP_PRINT_CPN_INFO(ap->a_cnp); + + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_POS); + DBG_VOP_CONT(("\tva_type %d va_mode 0x%x\n", + ap->a_vap->va_type, ap->a_vap->va_mode)); + +#if HFS_DIAGNOSTIC + DBG_HFS_NODE_CHECK(ap->a_dvp); + DBG_ASSERT(ap->a_dvp->v_type == VDIR); + if(ap->a_vap == NULL) { + panic("NULL attr on create"); + } + + switch(ap->a_vap->va_type) { + case VDIR: + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + DBG_VOP_LOCKS_TEST(EISDIR); + return (EISDIR); /* use hfs_mkdir instead */ + case VREG: + case VLNK: + break; + default: + DBG_ERR(("%s: INVALID va_type: %d, %s, %s\n", funcname, ap->a_vap->va_type, H_NAME(VTOH(ap->a_dvp)), ap->a_cnp->cn_nameptr)); + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } +// if(ap->a_vap->va_mode & (VSUID | VSGID | VSVTX)) { +// DBG_ERR(("%s: INVALID va_mode (%o): %s, %s\n", funcname, ap->a_vap->va_mode, H_NAME(VTOH(ap->a_dvp)), ap->a_cnp->cn_nameptr)); +// DBG_VOP_LOCKS_TEST(EINVAL); +// VOP_ABORTOP(ap->a_dvp, ap->a_cnp); +// vput(ap->a_dvp); +// return (EINVAL); /* Can't do these */ +// }; +#endif + + /* Create the vnode */ + retval = hfs_makenode(mode, 0, ap->a_dvp, ap->a_vpp, ap->a_cnp, p); + DBG_VOP_UPDATE_VP(1, *ap->a_vpp); + + if (retval != E_NONE) { + DBG_ERR(("%s: hfs_makenode FAILED: %s, %s\n", funcname, ap->a_cnp->cn_nameptr, H_NAME(VTOH(ap->a_dvp)))); + } + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + * Mknod vnode call + +#% mknod dvp L U U +#% mknod vpp - X - +# + vop_mknod { + IN WILLRELE struct vnode *dvp; + OUT WILLRELE struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + */ +/* ARGSUSED */ + +static int +hfs_mknod(ap) +struct vop_mknod_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */ *ap; +{ + struct vattr *vap = ap->a_vap; + struct vnode **vpp = ap->a_vpp; + struct proc *p = current_proc(); + dev_t rawdev = 0; + int error; + + if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) { + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + return (EOPNOTSUPP); + } + + if (vap->va_rdev != VNOVAL) { + /* + * Want to be able to use this to make badblock + * inodes, so don't truncate the dev number. + */ + rawdev = vap->va_rdev; + } + + /* Create the vnode */ + error = hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode), + rawdev, ap->a_dvp, vpp, ap->a_cnp, p); + + if (error != E_NONE) { + return (error); + } + + /* + * Remove inode so that it will be reloaded by lookup and + * checked to see if it is an alias of an existing vnode. + * Note: unlike UFS, we don't bash v_type here. + */ + vput(*vpp); + vgone(*vpp); + *vpp = 0; + return (0); +} + + +/* + * mkcomplex vnode call + * + +#% mkcomplex dvp L U U +#% mkcomplex vpp - L - +# +vop_mkcomplex { + IN WILLRELE struct vnode *dvp; + OUT struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + IN u_long type; +} + + */ + +static int +hfs_mkcomplex(ap) +struct vop_mkcomplex_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + u_long a_type; +} */ *ap; +{ + int retval = E_NONE; + DBG_FUNC_NAME("make_complex"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_dvp); + DBG_VOP_PRINT_CPN_INFO(ap->a_cnp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_POS); + + retval = VOP_CREATE(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap); + + DBG_VOP_LOCKS_TEST(retval); + return retval; +} + + +/* + * Open called. +#% open vp L L L +# + vop_open { + IN struct vnode *vp; + IN int mode; + IN struct ucred *cred; + IN struct proc *p; + */ + + +static int +hfs_open(ap) +struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + struct hfsnode *hp = VTOH(ap->a_vp); + int retval = E_NONE; + DBG_FUNC_NAME("open"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_CONT((" "));DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + if (ap->a_vp->v_type == VREG) /* Only files */ + { + /* + * Files marked append-only must be opened for appending. + */ + if ((hp->h_meta->h_pflags & APPEND) && + (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) + retval = EPERM; + } + + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* + * Close called. + * + * Update the times on the hfsnode. +#% close vp U U U +# + vop_close { + IN struct vnode *vp; + IN int fflag; + IN struct ucred *cred; + IN struct proc *p; + */ + + +static int +hfs_close(ap) +struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + register struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + struct proc *p = ap->a_p; + FCB *fcb; + struct timeval tv; + off_t leof; + u_long blks, blocksize; + int retval = E_NONE; + + DBG_FUNC_NAME("close"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_CONT((" "));DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + + simple_lock(&vp->v_interlock); + if (vp->v_usecount > (UBCINFOEXISTS(vp) ? 2 : 1)) { + tv = time; + HFSTIMES(hp, &tv, &tv); + } + simple_unlock(&vp->v_interlock); + + /* + * VOP_CLOSE can be called with vp locked (from vclean). + * We check for this case using VOP_ISLOCKED and bail. + * + * also, ignore complex nodes; there's no data associated with them. + */ + if (H_FORKTYPE(hp) == kDirectory || VOP_ISLOCKED(vp)) { + DBG_VOP_LOCKS_TEST(E_NONE); + return E_NONE; + }; + + fcb = HTOFCB(hp); + leof = fcb->fcbEOF; + + if (leof != 0) { + enum vtype our_type = vp->v_type; + u_long our_id = vp->v_id; + + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + /* + * Since we can contact switch in vn_lock our vnode + * could get recycled (eg umount -f). Double check + * that its still ours. + */ + if (vp->v_type != our_type || vp->v_id != our_id) { + VOP_UNLOCK(vp, 0, p); + DBG_VOP_LOCKS_TEST(E_NONE); + return(E_NONE); + } + + blocksize = HTOVCB(hp)->blockSize; + blks = leof / blocksize; + if (((off_t)blks * (off_t)blocksize) != leof) + blks++; + + /* + * Shrink the peof to the smallest size neccessary to contain the leof. + */ + if (((off_t)blks * (off_t)blocksize) < fcb->fcbPLen) { + retval = VOP_TRUNCATE(vp, leof, IO_NDELAY, ap->a_cred, p); + } + cluster_push(vp); + VOP_UNLOCK(vp, 0, p); + } + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* +#% access vp L L L +# + vop_access { + IN struct vnode *vp; + IN int mode; + IN struct ucred *cred; + IN struct proc *p; + + */ + +static int +hfs_access(ap) +struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct ucred *cred = ap->a_cred; + struct hfsnode *hp = VTOH(vp); + ExtendedVCB *vcb = HTOVCB(hp); + register gid_t *gp; + mode_t mask, mode; + Boolean isHFSPlus; + int retval = E_NONE; + int i; + DBG_FUNC_NAME("access"); + DBG_VOP_LOCKS_DECL(1); +// DBG_VOP_PRINT_FUNCNAME(); +// DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + mode = ap->a_mode; + isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord ); + + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + if (mode & VWRITE) { + switch (vp->v_type) { + case VDIR: + case VLNK: + case VREG: + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + return (EROFS); + break; + default: + break; + } + } + + /* If immutable bit set, nobody gets to write it. */ + if ((mode & VWRITE) && (hp->h_meta->h_pflags & IMMUTABLE)) + return (EPERM); + + /* Otherwise, user id 0 always gets access. */ + if (ap->a_cred->cr_uid == 0) { + retval = 0; + goto Exit; + }; + + mask = 0; + + /* Otherwise, check the owner. */ + if (hfs_owner_rights(vp, cred, ap->a_p, false) == 0) { + if (mode & VEXEC) + mask |= S_IXUSR; + if (mode & VREAD) + mask |= S_IRUSR; + if (mode & VWRITE) + mask |= S_IWUSR; + retval = ((hp->h_meta->h_mode & mask) == mask ? 0 : EACCES); + goto Exit; + } + + /* Otherwise, check the groups. */ + if (! (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)) { + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) { + if (hp->h_meta->h_gid == *gp) { + if (mode & VEXEC) + mask |= S_IXGRP; + if (mode & VREAD) + mask |= S_IRGRP; + if (mode & VWRITE) + mask |= S_IWGRP; + retval = ((hp->h_meta->h_mode & mask) == mask ? 0 : EACCES); + goto Exit; + } + }; + }; + + /* Otherwise, check everyone else. */ + if (mode & VEXEC) + mask |= S_IXOTH; + if (mode & VREAD) + mask |= S_IROTH; + if (mode & VWRITE) + mask |= S_IWOTH; + retval = ((hp->h_meta->h_mode & mask) == mask ? 0 : EACCES); + +Exit: + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + + +/* +#% getattr vp = = = +# + vop_getattr { + IN struct vnode *vp; + IN struct vattr *vap; + IN struct ucred *cred; + IN struct proc *p; + + */ + + +/* ARGSUSED */ +static int +hfs_getattr(ap) +struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct hfsnode *hp = VTOH(vp); + register struct vattr *vap = ap->a_vap; + struct timeval tv; + DBG_FUNC_NAME("getattr"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_POS); + + DBG_HFS_NODE_CHECK(ap->a_vp); + + tv = time; + HFSTIMES(hp, &tv, &tv); + + vap->va_fsid = H_DEV(hp); + vap->va_fileid = H_FILEID(hp); + vap->va_mode = hp->h_meta->h_mode; + if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + vap->va_uid = (VTOHFS(vp)->hfs_uid == UNKNOWNUID) ? console_user : VTOHFS(vp)->hfs_uid; + } else { + vap->va_uid = (hp->h_meta->h_uid == UNKNOWNUID) ? console_user : hp->h_meta->h_uid; + }; + vap->va_gid = hp->h_meta->h_gid; + if (vp->v_type == VDIR) { + vap->va_size = hp->h_meta->h_size; + vap->va_bytes = 0; + vap->va_rdev = 0; + vap->va_nlink = hp->h_meta->h_nlink; + /* + * account for hidden data nodes directory + */ + if ((H_FILEID(hp) == kRootDirID) && + (VTOHFS(vp)->hfs_private_metadata_dir != 0)) { + vap->va_size -= AVERAGE_HFSDIRENTRY_SIZE; + vap->va_nlink--; + } + } + else { + vap->va_size = hp->fcbEOF; + vap->va_bytes = hp->h_meta->h_size; + + if (vp->v_type == VBLK || vp->v_type == VCHR) + vap->va_rdev = hp->h_meta->h_rdev; + else + vap->va_rdev = 0; + + if (hp->h_meta->h_metaflags & IN_DELETED) + vap->va_nlink = 0; +#if HFS_HARDLINKS + else if ((hp->h_meta->h_metaflags & IN_DATANODE) && + (hp->h_meta->h_nlink > 0)) + vap->va_nlink = hp->h_meta->h_nlink; +#endif + else + vap->va_nlink = 1; + + } + + vap->va_atime.tv_nsec = 0; + vap->va_atime.tv_sec = hp->h_meta->h_atime; + vap->va_mtime.tv_nsec = 0; + vap->va_mtime.tv_sec = hp->h_meta->h_mtime; + vap->va_ctime.tv_nsec = 0; + vap->va_ctime.tv_sec = hp->h_meta->h_ctime; + vap->va_flags = hp->h_meta->h_pflags; + vap->va_gen = 0; + /* this doesn't belong here */ + if (vp->v_type == VBLK) + vap->va_blocksize = BLKDEV_IOSIZE; + else if (vp->v_type == VCHR) + vap->va_blocksize = MAXPHYSIO; + else + vap->va_blocksize = VTOVFS(vp)->mnt_stat.f_iosize; + vap->va_type = vp->v_type; + vap->va_filerev = 0; + + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); +} + +/* + * Set attribute vnode op. called from several syscalls +#% setattr vp L L L +# + vop_setattr { + IN struct vnode *vp; + IN struct vattr *vap; + IN struct ucred *cred; + IN struct proc *p; + + */ + +static int +hfs_setattr(ap) +struct vop_setattr_args /* { +struct vnode *a_vp; +struct vattr *a_vap; +struct ucred *a_cred; +struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + struct vattr *vap = ap->a_vap; + struct ucred *cred = ap->a_cred; + struct proc *p = ap->a_p; + struct timeval atimeval, mtimeval; + int retval; + DBG_FUNC_NAME("setattr"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + WRITE_CK(vp, funcname); + DBG_HFS_NODE_CHECK(ap->a_vp); + + /* + * Check for unsettable attributes. + */ + if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || + (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || + (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || + ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { + retval = EINVAL; + goto ErrorExit; + } + + if (vap->va_flags != VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + retval = EROFS; + goto ErrorExit; + }; + if ((retval = hfs_chflags(vp, vap->va_flags, cred, p))) { + goto ErrorExit; + }; + if (vap->va_flags & (IMMUTABLE | APPEND)) { + retval = 0; + goto ErrorExit; + }; + } + + if (hp->h_meta->h_pflags & (IMMUTABLE | APPEND)) { + retval = EPERM; + goto ErrorExit; + }; + /* + * Go through the fields and update iff not VNOVAL. + */ + if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + retval = EROFS; + goto ErrorExit; + }; + if ((retval = hfs_chown(vp, vap->va_uid, vap->va_gid, cred, p))) { + goto ErrorExit; + }; + } + if (vap->va_size != VNOVAL) { + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + switch (vp->v_type) { + case VDIR: + retval = EISDIR; + goto ErrorExit; + case VLNK: + case VREG: + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + retval = EROFS; + goto ErrorExit; + }; + break; + default: + break; + } + if ((retval = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p))) { + goto ErrorExit; + }; + } + hp = VTOH(vp); + if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + retval = EROFS; + goto ErrorExit; + }; + if (((retval = hfs_owner_rights(vp, cred, p, true)) != 0) && + ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || + (retval = VOP_ACCESS(vp, VWRITE, cred, p)))) { + goto ErrorExit; + }; + if (vap->va_atime.tv_sec != VNOVAL) + hp->h_nodeflags |= IN_ACCESS; + if (vap->va_mtime.tv_sec != VNOVAL) { + hp->h_nodeflags |= IN_CHANGE | IN_UPDATE; + /* + * The utimes system call can reset the modification time + * but it doesn't know about the HFS+ create time. So we + * need to insure that the creation time is always at least + * as old as the modification time. + */ + if (( VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord ) && + ( H_FILEID(hp) != kRootDirID ) && + ( vap->va_mtime.tv_sec < hp->h_meta->h_crtime )) + hp->h_meta->h_crtime = vap->va_mtime.tv_sec; + } + atimeval.tv_sec = vap->va_atime.tv_sec; + atimeval.tv_usec = 0; + mtimeval.tv_sec = vap->va_mtime.tv_sec; + mtimeval.tv_usec = 0; + if ((retval = VOP_UPDATE(vp, &atimeval, &mtimeval, 1))) { + goto ErrorExit; + }; + } + retval = 0; + if (vap->va_mode != (mode_t)VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + retval = EROFS; + goto ErrorExit; + }; + retval = hfs_chmod(vp, (int)vap->va_mode, cred, p); + }; + +ErrorExit: ; + + DBG_VOP(("hfs_setattr: returning %d...\n", retval)); + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + +# +#% getattrlist vp = = = +# + vop_getattrlist { + IN struct vnode *vp; + IN struct attrlist *alist; + INOUT struct uio *uio; + IN struct ucred *cred; + IN struct proc *p; + }; + + */ + +static int +hfs_getattrlist(ap) +struct vop_getattrlist_args /* { +struct vnode *a_vp; +struct attrlist *a_alist +struct uio *a_uio; +struct ucred *a_cred; +struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + struct attrlist *alist = ap->a_alist; + int error = 0; + struct hfsCatalogInfo catInfo; + struct hfsCatalogInfo *catInfoPtr = NULL; + struct timeval tv; + int fixedblocksize; + int attrblocksize; + int attrbufsize; + void *attrbufptr; + void *attrptr; + void *varptr; + u_int32_t fileID; + DBG_FUNC_NAME("getattrlist"); + DBG_VOP_LOCKS_DECL(1); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_POS); + DBG_HFS_NODE_CHECK(ap->a_vp); + DBG_VOP(("%s: Common attr:0x%lx, buff size Ox%lX,\n",funcname, (u_long)alist->commonattr,(u_long)ap->a_uio->uio_resid)); + + DBG_ASSERT(ap->a_uio->uio_rw == UIO_READ); + + if ((alist->bitmapcount != ATTR_BIT_MAP_COUNT) || + ((alist->commonattr & ~ATTR_CMN_VALIDMASK) != 0) || + ((alist->volattr & ~ATTR_VOL_VALIDMASK) != 0) || + ((alist->dirattr & ~ATTR_DIR_VALIDMASK) != 0) || + ((alist->fileattr & ~ATTR_FILE_VALIDMASK) != 0) || + ((alist->forkattr & ~ATTR_FORK_VALIDMASK) != 0)) { + DBG_ERR(("%s: bad attrlist\n", funcname)); + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + }; + + /* Requesting volume information requires setting the ATTR_VOL_INFO bit and + volume info requests are mutually exclusive with all other info requests: */ + if ((alist->volattr != 0) && (((alist->volattr & ATTR_VOL_INFO) == 0) || + (alist->dirattr != 0) || (alist->fileattr != 0) || (alist->forkattr != 0) + )) { + DBG_ERR(("%s: conflicting information requested\n", funcname)); + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + }; + + /* Reject requests for unsupported options for now: */ + if ((alist->commonattr & (ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST)) || + (alist->fileattr & (ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))) { + DBG_ERR(("%s: illegal bits in attlist\n", funcname)); + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + }; + + /* Requesting volume information requires root vnode */ + if ((alist->volattr) && (H_FILEID(hp) != kRootDirID)) { + DBG_ERR(("%s: not root vnode\n", funcname)); + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + }; + + + /* Update times if needed */ + tv = time; + HFSTIMES(hp, &tv, &tv); + + /* If a FileID (ATTR_CMN_OBJPERMANENTID) is requested on an HFS volume we must be sure + to create the thread record before returning it: + */ + if ((vp->v_type == VREG) && + (alist->commonattr & ATTR_CMN_OBJPERMANENTID)) { + /* Only HFS-Plus volumes are guaranteed to have a thread record in place already: */ + if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord) { + /* Create a thread record and return the FileID [which is the file's fileNumber] */ + /* lock catalog b-tree */ + error = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p); + error = hfsCreateFileID(VTOVCB(vp), H_DIRID(hp), H_NAME(hp), H_HINT(hp), &fileID); + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, ap->a_p); + if (error) { + DBG_VOP_LOCKS_TEST(error); + DBG_ERR(("hfs_getattrlist: error %d on CreateFileIDRef.\n", error)); + return error; + }; + DBG_ASSERT(fileID == H_FILEID(hp)); + }; + }; + + /* Asking for data fork attributes from the rsrc fork is not supported */ + if ((H_FORKTYPE(hp) == kRsrcFork) && (alist->fileattr & HFS_ATTR_FILE_LOOKUPMASK)) { + return (EINVAL); + } + + /* + * Avoid unnecessary catalog lookups for volume info which is available directly + * in the VCB and root vnode, or can be synthesized. + */ + INIT_CATALOGDATA(&catInfo.nodeData, 0); + catInfo.hint = kNoHint; + + if (((alist->volattr == 0) && ((alist->commonattr & HFS_ATTR_CMN_LOOKUPMASK) != 0)) || + ((alist->dirattr & HFS_ATTR_DIR_LOOKUPMASK) != 0) || + ((alist->fileattr & HFS_ATTR_FILE_LOOKUPMASK) != 0)) { + + /* lock catalog b-tree */ + error = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_SHARED, ap->a_p); + if (error) goto GetCatalogErr_Exit; + + if (alist->volattr != 0) { + /* Look up the root info, regardless of the vnode provided */ + error = hfs_getcatalog(VTOVCB(vp), 2, NULL, -1, &catInfo); + } else { + error = hfs_getcatalog(VTOVCB(vp), H_DIRID(hp), H_NAME(hp), -1, &catInfo); + if (error == 0) H_HINT(hp) = catInfo.hint; /* Remember the last valid hint */ + }; + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, ap->a_p); + + /* + * If a data fork has an active sibling and we need + * rsrc fork attributes then we need to lock the + * sibling and make a copy of its attributes. + */ + if ((hp->h_meta->h_usecount > 1) && + (H_FORKTYPE(hp) == kDataFork) && + (alist->fileattr & HFS_ATTR_FILE_LOOKUPMASK)) { + struct vnode *sib_vp = NULL; + struct hfsnode *nhp; + struct proc *p = current_proc(); + + DBG_ASSERT(hp->h_meta->h_siblinghead.cqh_first && + (hp->h_meta->h_siblinghead.cqh_first != hp->h_meta->h_siblinghead.cqh_last)); + DBG_ASSERT(H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork); + + /* Loop through all siblings, skipping ourselves */ + simple_lock(&hp->h_meta->h_siblinglock); + CIRCLEQ_FOREACH(nhp, &hp->h_meta->h_siblinghead, h_sibling) { + if (nhp == hp) /* skip ourselves */ + continue; + sib_vp = HTOV(nhp); + }; + simple_unlock(&hp->h_meta->h_siblinglock); + + /* The only error that vget returns is when the vnode is going away, so ignore the vnode */ + if (vget(sib_vp, LK_EXCLUSIVE | LK_RETRY, p) == 0) { + if (VTOH(sib_vp)->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { + /* XXX SER No need to copy the whole thing over, just copy the fork info */ + CopyVNodeToCatalogNode (sib_vp, &catInfo.nodeData); + }; + + vput(sib_vp); + }; /* vget() */ + }; /* h_use_count > 1 */ + + /* Update to the in-memory state, if it has been modified...just to make sure */ + if (VTOH(vp)->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { + /* XXX SER No need to copy the whole thing over, just copy the fork info */ + CopyVNodeToCatalogNode (vp, &catInfo.nodeData); + }; + + /* XXX What if hfs_getcatalog fails...we just continue??? */ + catInfoPtr = &catInfo; + + }; + + fixedblocksize = AttributeBlockSize(alist); + attrblocksize = fixedblocksize + (sizeof(u_long)); /* u_long for length longword */ + if (alist->commonattr & ATTR_CMN_NAME) attrblocksize += kHFSPlusMaxFileNameBytes + 1; + if (alist->commonattr & ATTR_CMN_NAMEDATTRLIST) attrblocksize += 0; /* XXX PPD */ + if (alist->volattr & ATTR_VOL_MOUNTPOINT) attrblocksize += PATH_MAX; + if (alist->volattr & ATTR_VOL_NAME) attrblocksize += kHFSPlusMaxFileNameBytes + 1; + if (alist->fileattr & ATTR_FILE_FORKLIST) attrblocksize += 0; /* XXX PPD */ + + attrbufsize = MIN(ap->a_uio->uio_resid, attrblocksize); + DBG_VOP(("hfs_getattrlist: allocating Ox%X byte buffer (Ox%X + Ox%X) for attributes...\n", + attrblocksize, + fixedblocksize, + attrblocksize - fixedblocksize)); + MALLOC(attrbufptr, void *, attrblocksize, M_TEMP, M_WAITOK); + attrptr = attrbufptr; + *((u_long *)attrptr) = 0; /* Set buffer length in case of errors */ + ++((u_long *)attrptr); /* Reserve space for length field */ + varptr = ((char *)attrptr) + fixedblocksize; /* Point to variable-length storage */ + DBG_VOP(("hfs_getattrlist: attrptr = 0x%08X, varptr = 0x%08X...\n", (u_int)attrptr, (u_int)varptr)); + + PackAttributeBlock(alist, vp, catInfoPtr, &attrptr, &varptr); + attrbufsize = MIN(attrbufsize, (u_int)varptr - (u_int)attrbufptr); /* Don't copy out more data than was generated */ + *((u_long *)attrbufptr) = attrbufsize; /* Set actual buffer length for return to caller */ + DBG_VOP(("hfs_getattrlist: copying Ox%X bytes to user address 0x%08X.\n", attrbufsize, (u_int)ap->a_uio->uio_iov->iov_base)); + error = uiomove((caddr_t)attrbufptr, attrbufsize, ap->a_uio); + if (error != E_NONE) { + DBG_ERR(("hfs_getattrlist: error %d on uiomove.\n", error)); + }; + + FREE(attrbufptr, M_TEMP); + + +GetCatalogErr_Exit: + CLEAN_CATALOGDATA(&catInfo.nodeData); + DBG_VOP_LOCKS_TEST(error); + return error; +} + + + +/* + +# +#% setattrlist vp L L L +# + vop_setattrlist { + IN struct vnode *vp; + IN struct attrlist *alist; + INOUT struct uio *uio; + IN struct ucred *cred; + IN struct proc *p; + }; + + */ + +static int +hfs_setattrlist(ap) +struct vop_setattrlist_args /* { +struct vnode *a_vp; +struct attrlist *a_alist +struct uio *a_uio; +struct ucred *a_cred; +struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + struct attrlist *alist = ap->a_alist; + struct ucred *cred = ap->a_cred; + struct proc *p = ap->a_p; + int error; + struct hfsCatalogInfo catInfo; + int attrblocksize; + void *attrbufptr = NULL; + void *attrptr; + void *varptr = NULL; + uid_t saved_uid; + gid_t saved_gid; + mode_t saved_mode; + u_long saved_flags; + char * filename; + char iNodeName[32]; + u_int32_t pid; + int retval = 0; + + DBG_FUNC_NAME("setattrlist"); + DBG_VOP_LOCKS_DECL(1); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_POS); + DBG_HFS_NODE_CHECK(ap->a_vp); + DBG_VOP(("%s: Common attr:0x%x, buff size Ox%X,\n",funcname, (u_int)alist->commonattr,(u_int)ap->a_uio->uio_resid)); + + DBG_ASSERT(ap->a_uio->uio_rw == UIO_WRITE); + + if ((alist->bitmapcount != ATTR_BIT_MAP_COUNT) || + ((alist->commonattr & ~ATTR_CMN_SETMASK) != 0) || + ((alist->volattr & ~ATTR_VOL_SETMASK) != 0) || + ((alist->dirattr & ~ATTR_DIR_SETMASK) != 0) || + ((alist->fileattr & ~ATTR_FILE_SETMASK) != 0) || + ((alist->forkattr & ~ATTR_FORK_SETMASK) != 0)) { + DBG_ERR(("%s: Bad attrlist\n", funcname)); + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + }; + + if ((alist->volattr != 0) && /* Setting volume info */ + (((alist->volattr & ATTR_VOL_INFO) == 0) || /* Not explicitly indicating this or ... */ + (alist->commonattr & ~ATTR_CMN_VOLSETMASK))) /* ... setting invalid attributes for volume */ + { + DBG_ERR(("%s: Bad attrlist\n", funcname)); + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + }; + + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + DBG_VOP_LOCKS_TEST(EROFS); + return EROFS; + }; + + /* + Ownership of the file (in addition to write access, checked below, + is required in one of two classes of calls: + + (a) When setting any ownership-requiring attribute other than ATTR_CMN_FLAGS, or + (b) When setting ATTR_CMN_FLAGS on a volume that's not plain HFS (for which no + real per-object ownership information is stored): + */ + if ((alist->commonattr & (OWNERSHIP_ONLY_ATTRS & ~ATTR_CMN_FLAGS)) || + ((alist->commonattr & ATTR_CMN_FLAGS) && (VTOVCB(vp)->vcbSigWord != kHFSSigWord))) { + /* NOTE: The following isn't ENTIRELY complete: even if you're the superuser + you cannot change the flags as long as SF_IMMUTABLE or SF_APPEND is + set and securelevel > 0. This is verified in hfs_chflags which gets + invoked to do the actual flags field change so this check is sufficient + for now. + */ + /* Check to see if the user owns the object [or is superuser]: */ + if ((retval = hfs_owner_rights(vp, cred, p, true)) != 0) { + DBG_VOP_LOCKS_TEST(retval); + return retval; + }; + } else { + DBG_ASSERT(((alist->commonattr & OWNERSHIP_ONLY_ATTRS) == 0) || + (((alist->commonattr & OWNERSHIP_ONLY_ATTRS) == ATTR_CMN_FLAGS) && + (VTOVCB(vp)->vcbSigWord == kHFSSigWord))); + /* No ownership access is required: mere write access (checked below) will do... */ + }; + + /* For any other attributes, check to see if the user has write access to + the object in question [unlike VOP_ACCESS, ignore IMMUTABLE here]: */ + + if ((((alist->commonattr & ~(OWNERSHIP_ONLY_ATTRS)) != 0) || + (alist->volattr != 0) || + (alist->dirattr != 0) || + (alist->fileattr != 0) || + (alist->forkattr != 0)) && + ((retval = hfs_write_access(vp, cred, p, false)) != 0)) { + DBG_VOP_LOCKS_TEST(retval); + return retval; + }; /* end of if ownership attr */ + + /* Allocate the buffer now to minimize the time we might be blocked holding the catalog lock */ + attrblocksize = ap->a_uio->uio_resid; + if (attrblocksize < AttributeBlockSize(alist)) { + DBG_ERR(("%s: bad attrblocksize\n", funcname)); + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + }; + + MALLOC(attrbufptr, void *, attrblocksize, M_TEMP, M_WAITOK); + + INIT_CATALOGDATA(&catInfo.nodeData, kCatNameNoCopyName); + catInfo.hint = kNoHint; + + /* lock catalog b-tree */ + error = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (error != E_NONE) { + goto FreeBuffer; + }; + + filename = H_NAME(hp); + pid = H_DIRID(hp); + +#if HFS_HARDLINKS + /* + * Force an update of the indirect node instead of the link + * by using the name and parent of the indirect node. + */ + if (hp->h_meta->h_metaflags & IN_DATANODE) { + MAKE_INODE_NAME(iNodeName, hp->h_meta->h_indnodeno); + filename = iNodeName; + pid = VTOHFS(vp)->hfs_private_metadata_dir; + } +#endif + + + error = hfs_getcatalog(VTOVCB(vp), pid, filename, -1, &catInfo); + if (error != E_NONE) { + DBG_ERR(("%s: Lookup failed on file '%s'\n", funcname, filename)); + goto ErrorExit; + }; + H_HINT(hp) = catInfo.hint; /* Remember the last valid hint */ + + error = uiomove((caddr_t)attrbufptr, attrblocksize, ap->a_uio); + if (error) goto ErrorExit; + + if ((alist->volattr) && (H_FILEID(hp) != kRootDirID)) { + error = EINVAL; + goto ErrorExit; + }; + + /* do we have permission to change the dates? */ +// if (alist->commonattr & (ATTR_CMN_CRTIME | ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | ATTR_CMN_BKUPTIME)) { + if (alist->commonattr & (ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME)) { + if ((error = hfs_owner_rights(vp, cred, p, true)) != 0) { + goto ErrorExit; + }; + }; + + /* save these in case hfs_chown() or hfs_chmod() fail */ + saved_uid = hp->h_meta->h_uid; + saved_gid = hp->h_meta->h_gid; + saved_mode = hp->h_meta->h_mode; + saved_flags = hp->h_meta->h_pflags; + + attrptr = attrbufptr; + UnpackAttributeBlock(alist, vp, &catInfo, &attrptr, &varptr); + + /* if unpacking changed the owner or group then call hfs_chown() */ + if (saved_uid != hp->h_meta->h_uid || saved_gid != hp->h_meta->h_gid) { + uid_t uid; + gid_t gid; + + uid = hp->h_meta->h_uid; + hp->h_meta->h_uid = saved_uid; + gid = hp->h_meta->h_gid; + hp->h_meta->h_gid = saved_gid; + if ((error = hfs_chown(vp, uid, gid, cred, p))) + goto ErrorExit; + } + + /* if unpacking changed the mode then call hfs_chmod() */ + if (saved_mode != hp->h_meta->h_mode) { + mode_t mode; + + mode = hp->h_meta->h_mode; + hp->h_meta->h_mode = saved_mode; + if ((error = hfs_chmod(vp, mode, cred, p))) + goto ErrorExit; + }; + + /* if unpacking changed the flags then call hfs_chflags */ + if (saved_flags != hp->h_meta->h_pflags) { + u_long flags; + + flags = hp->h_meta->h_pflags; + hp->h_meta->h_pflags = saved_flags; + if ((error = hfs_chflags(vp, flags, cred, p))) + goto ErrorExit; + }; + + if (alist->volattr == 0) { + error = MacToVFSError( UpdateCatalogNode(HTOVCB(hp), pid, filename, H_HINT(hp), &catInfo.nodeData)); + } + + if (alist->volattr & ATTR_VOL_NAME) { + ExtendedVCB *vcb = VTOVCB(vp); + int namelen = strlen(vcb->vcbVN); + + if (vcb->vcbVN[0] == 0) { + /* + Ignore attempts to rename a volume to a zero-length name: + restore the original name from the metadata. + */ + copystr(H_NAME(hp), vcb->vcbVN, sizeof(vcb->vcbVN), NULL); + } else { + error = MoveRenameCatalogNode(vcb, kRootParID, H_NAME(hp), H_HINT(hp), kRootParID, vcb->vcbVN, &H_HINT(hp)); + if (error) { + VCB_LOCK(vcb); + copystr(H_NAME(hp), vcb->vcbVN, sizeof(vcb->vcbVN), NULL); /* Restore the old name in the VCB */ + vcb->vcbFlags |= 0xFF00; // Mark the VCB dirty + VCB_UNLOCK(vcb); + goto ErrorExit; + }; + + hfs_set_metaname(vcb->vcbVN, hp->h_meta, HTOHFS(hp)); + hp->h_nodeflags |= IN_CHANGE; + +#if 0 + /* if hfs wrapper exists, update its name too */ + if (vcb->vcbSigWord == kHFSPlusSigWord && vcb->vcbAlBlSt != 0) { + HFSMasterDirectoryBlock *mdb; + struct buf *bp = NULL; + int size = kMDBSize; /* 512 */ + int volnamelen = MIN(sizeof(Str27), namelen); + + if ( bread(VTOHFS(vp)->hfs_devvp, IOBLKNOFORBLK(kMasterDirectoryBlock, size), + IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, size), NOCRED, &bp) == 0) { + + mdb = (HFSMasterDirectoryBlock *)((char *)bp->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, size)); + if (SWAP_BE16 (mdb->drSigWord) == kHFSSigWord) { + /* Convert the string to MacRoman, ignoring any errors, */ + (void) utf8_to_hfs(vcb, volnamelen, vcb->vcbVN, Str31 mdb->drVN) + bawrite(bp); + bp = NULL; + } + } + + if (bp) brelse(bp); + } +#endif + }; /* vcb->vcbVN[0] == 0 ... else ... */ + }; /* alist->volattr & ATTR_VOL_NAME */ + +ErrorExit: + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, p); + + CLEAN_CATALOGDATA(&catInfo.nodeData); + +FreeBuffer: + if (attrbufptr) FREE(attrbufptr, M_TEMP); + + DBG_VOP_LOCKS_TEST(error); + return error; +} + +/* + * Change the mode on a file. + * Inode must be locked before calling. + */ +static int +hfs_chmod(vp, mode, cred, p) +register struct vnode *vp; +register int mode; +register struct ucred *cred; +struct proc *p; +{ + register struct hfsnode *hp = VTOH(vp); + int retval; + + if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord) + return E_NONE; + +#if OVERRIDE_UNKNOWN_PERMISSIONS + if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + return E_NONE; + }; +#endif + + if ((retval = hfs_owner_rights(vp, cred, p, true)) != 0) + return (retval); + if (cred->cr_uid) { + if (vp->v_type != VDIR && (mode & S_ISTXT)) + return (EFTYPE); + if (!groupmember(hp->h_meta->h_gid, cred) && (mode & ISGID)) + return (EPERM); + } + hp->h_meta->h_mode &= ~ALLPERMS; + hp->h_meta->h_mode |= (mode & ALLPERMS); + hp->h_meta->h_metaflags &= ~IN_UNSETACCESS; + hp->h_nodeflags |= IN_CHANGE; + return (0); +} + + +static int +hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags) +{ + struct hfsnode *hp = VTOH(vp); + ExtendedVCB *vcb = HTOVCB(hp); + gid_t *gp; + Boolean isHFSPlus; + int retval = E_NONE; + int i; + + isHFSPlus = (vcb->vcbSigWord == kHFSPlusSigWord ); + + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + switch (vp->v_type) { + case VDIR: + case VLNK: + case VREG: + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + return (EROFS); + break; + default: + break; + } + + /* If immutable bit set, nobody gets to write it. */ + if (considerFlags && (hp->h_meta->h_pflags & IMMUTABLE)) + return (EPERM); + + /* Otherwise, user id 0 always gets access. */ + if (cred->cr_uid == 0) { + retval = 0; + goto Exit; + }; + + /* Otherwise, check the owner. */ + if ((retval = hfs_owner_rights(vp, cred, p, false)) == 0) { + retval = ((hp->h_meta->h_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES); + goto Exit; + } + + /* Otherwise, check the groups. */ + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) + if (hp->h_meta->h_gid == *gp) { + retval = ((hp->h_meta->h_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES); + goto Exit; + } + + /* Otherwise, check everyone else. */ + retval = ((hp->h_meta->h_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES); + +Exit: + return (retval); +} + + + +/* + * Change the flags on a file or directory. + * Inode must be locked before calling. + */ +static int +hfs_chflags(vp, flags, cred, p) +register struct vnode *vp; +register u_long flags; +register struct ucred *cred; +struct proc *p; +{ + register struct hfsnode *hp = VTOH(vp); + int retval; + + if (VTOVCB(vp)->vcbSigWord == kHFSSigWord) { + if ((retval = hfs_write_access(vp, cred, p, false)) != 0) { + return retval; + }; + } else if ((retval = hfs_owner_rights(vp, cred, p, true)) != 0) { + return retval; + }; + + if (cred->cr_uid == 0) { + if ((hp->h_meta->h_pflags & (SF_IMMUTABLE | SF_APPEND)) && + securelevel > 0) { + return EPERM; + }; + hp->h_meta->h_pflags = flags; + } else { + if (hp->h_meta->h_pflags & (SF_IMMUTABLE | SF_APPEND) || + (flags & UF_SETTABLE) != flags) { + return EPERM; + }; + hp->h_meta->h_pflags &= SF_SETTABLE; + hp->h_meta->h_pflags |= (flags & UF_SETTABLE); + } + hp->h_meta->h_metaflags &= ~IN_UNSETACCESS; + hp->h_nodeflags |= IN_CHANGE; + + return 0; +} + + +/* + * Perform chown operation on hfsnode hp; + * hfsnode must be locked prior to call. + */ +static int +hfs_chown(vp, uid, gid, cred, p) +register struct vnode *vp; +uid_t uid; +gid_t gid; +struct ucred *cred; +struct proc *p; +{ + register struct hfsnode *hp = VTOH(vp); + uid_t ouid; + gid_t ogid; + int retval = 0; + + if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord) + return EOPNOTSUPP; + + if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + return E_NONE; + }; + + if (uid == (uid_t)VNOVAL) + uid = hp->h_meta->h_uid; + if (gid == (gid_t)VNOVAL) + gid = hp->h_meta->h_gid; + /* + * If we don't own the file, are trying to change the owner + * of the file, or are not a member of the target group, + * the caller must be superuser or the call fails. + */ + if ((cred->cr_uid != hp->h_meta->h_uid || uid != hp->h_meta->h_uid || + (gid != hp->h_meta->h_gid && !groupmember((gid_t)gid, cred))) && + (retval = suser(cred, &p->p_acflag))) + return (retval); + + ogid = hp->h_meta->h_gid; + ouid = hp->h_meta->h_uid; + + hp->h_meta->h_gid = gid; + hp->h_meta->h_uid = uid; + + hp->h_meta->h_metaflags &= ~IN_UNSETACCESS; + if (ouid != uid || ogid != gid) + hp->h_nodeflags |= IN_CHANGE; + if (ouid != uid && cred->cr_uid != 0) + hp->h_meta->h_mode &= ~ISUID; + if (ogid != gid && cred->cr_uid != 0) + hp->h_meta->h_mode &= ~ISGID; + return (0); +} + + + +/* +# +#% exchange fvp L L L +#% exchange tvp L L L +# + vop_exchange { + IN struct vnode *fvp; + IN struct vnode *tvp; + IN struct ucred *cred; + IN struct proc *p; + }; + + */ + /* + * exchange is a very tricky routine, because we might have to unlock the + * passed in vnode, and then retry locking it and all its siblings, and then + * unlocking them in reverse. + * Also the sibling list lock must be kept during the whole operation to + * make sure nothing changes underneath us. + * Also it depends on behavior of the sibling list and hash, so + * careful if you change anything. + */ + +static int +hfs_exchange(ap) +struct vop_exchange_args /* { +struct vnode *a_fvp; +struct vnode *a_tvp; +struct ucred *a_cred; +struct proc *a_p; +} */ *ap; +{ + struct hfsnode *from_hp, *to_hp, *nhp; + struct hfsnode *fromFirst, *fromSecond, *toFirst, *toSecond; + struct vnode *from_vp, *to_vp; + struct hfsmount *hfsmp; + u_char tmp_name[kHFSPlusMaxFileNameBytes+1]; /* 766 bytes! */ + ExtendedVCB *vcb; + u_int32_t fromFileID, toFileID; + u_int32_t fromParID; + u_int32_t tmpLong; + int retval = E_NONE; + DBG_FUNC_NAME("exchange"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_LOCKS_INIT(0,ap->a_fvp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,ap->a_tvp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + /* Set up variables and checks */ + from_vp = ap->a_fvp; + to_vp = ap->a_tvp; + from_hp = VTOH(from_vp); + to_hp = VTOH(to_vp); + hfsmp = VTOHFS(from_vp); + vcb = HTOVCB(from_hp); + toFileID = H_FILEID(to_hp); + fromFileID = H_FILEID(from_hp); + fromParID = H_DIRID(from_hp); + + if (from_vp->v_mount != to_vp->v_mount) { + DBG_VOP_LOCKS_TEST(EXDEV); + return EXDEV; + } + + /* Can only exchange file objects */ + if (from_vp->v_type != VREG || to_vp->v_type != VREG) { + DBG_VOP_LOCKS_TEST(EINVAL); + return EINVAL; + } + + /* + * Lock the siblink list + * Check for multiple forks + * If there are, we would need to: + * 1. Unlock ourselves + * 3. Traverse the list in a forward order...locking all vnodes + * 4. Flush all buffers + * 5. Perform the exchange + * 6. Traverse the list in a reverse order...unlocking all vnodes, except orignal + * Notice that the sibling lock is kept during the whole operation. This quarentees + * that no new forks are taken off or put on + */ + DBG_ASSERT(H_FORKTYPE(from_hp)==kDataFork && H_FORKTYPE(to_hp)==kDataFork); + fromFirst = fromSecond = toFirst = toSecond = NULL; + + if (from_hp->h_meta->h_usecount > 1) { + /* + * This has siblings, so remember the passed-in vnode, + * unlock it if it is not the 'first' sibling, + * and then lock the rest of the vnodes by sibling order. + * Notice that the passed-in vnode is not vrele(), this + * keeps the usecount>0, so it wont go away. + */ + simple_lock(&from_hp->h_meta->h_siblinglock); + fromFirst = from_hp->h_meta->h_siblinghead.cqh_first; + fromSecond = fromFirst->h_sibling.cqe_next; + simple_unlock(&from_hp->h_meta->h_siblinglock); + + if (fromFirst == from_hp) { + if (vget(HTOV(fromSecond), LK_EXCLUSIVE | LK_RETRY, ap->a_p)) + fromSecond = NULL; /* its going away */ + } else { + VOP_UNLOCK(HTOV(from_hp), 0, ap->a_p); + if (vget(HTOV(fromFirst), LK_EXCLUSIVE | LK_RETRY, ap->a_p)) + fromFirst = NULL; /* its going away */ + if (vget(HTOV(fromSecond), LK_EXCLUSIVE | LK_RETRY, ap->a_p)) + fromSecond = NULL; /* its going away */ + }; + + } else { + fromFirst = from_hp; + }; + + if (to_hp->h_meta->h_usecount > 1) { + + simple_lock(&to_hp->h_meta->h_siblinglock); + toFirst = to_hp->h_meta->h_siblinghead.cqh_first; + toSecond = toFirst->h_sibling.cqe_next; + simple_unlock(&to_hp->h_meta->h_siblinglock); + + if (toFirst == to_hp) { + if (vget(HTOV(toSecond), LK_EXCLUSIVE | LK_RETRY, ap->a_p)) + toSecond = NULL; /* its going away */ + } else { + VOP_UNLOCK(HTOV(to_hp), 0, ap->a_p); + if (vget(HTOV(toFirst), LK_EXCLUSIVE | LK_RETRY, ap->a_p)) + toFirst = NULL; /* its going away */ + if (vget(HTOV(toSecond), LK_EXCLUSIVE | LK_RETRY, ap->a_p)) + toSecond = NULL; /* its going away */ + }; + + } else { + toFirst = to_hp; + }; + + + /* Ignore any errors, we are doing a 'best effort' on flushing */ + if (fromFirst) + (void) vinvalbuf(HTOV(fromFirst), V_SAVE, ap->a_cred, ap->a_p, 0, 0); + if (fromSecond) + (void) vinvalbuf(HTOV(fromSecond), V_SAVE, ap->a_cred, ap->a_p, 0, 0); + if (toFirst) + (void) vinvalbuf(HTOV(toFirst), V_SAVE, ap->a_cred, ap->a_p, 0, 0); + if (toSecond) + (void) vinvalbuf(HTOV(toSecond), V_SAVE, ap->a_cred, ap->a_p, 0, 0); + + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p); + if (retval) goto Err_Exit; + + /* lock extents b-tree iff there are overflow extents */ + /* XXX SER ExchangeFileIDs() always tries to delete the virtual extent id for exchanging files + so we neeed the tree to be always locked. + */ + retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p); + if (retval) goto Err_Exit_Relse; + + /* Do the exchange */ + retval = MacToVFSError( ExchangeFileIDs(vcb, H_NAME(from_hp), H_NAME(to_hp), H_DIRID(from_hp), H_DIRID(to_hp), H_HINT(from_hp), H_HINT(to_hp) )); + + (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, ap->a_p); + + if (retval != E_NONE) { + DBG_ERR(("/tError trying to exchange: %d\n", retval)); + goto Err_Exit_Relse; + } + + + /* Purge the vnodes from the name */ + if (fromFirst) + cache_purge(HTOV(fromFirst)); + if (fromSecond) + cache_purge(HTOV(fromSecond)); + if (toFirst) + cache_purge(HTOV(toFirst)); + if (toSecond) + cache_purge(HTOV(toSecond)); + + /* Now exchange fileID, parID, name for the vnode itself */ + copystr(H_NAME(from_hp), (char*) tmp_name, strlen(H_NAME(from_hp))+1, NULL); + hfs_chid(from_hp, toFileID, H_DIRID(to_hp), H_NAME(to_hp)); + hfs_chid(to_hp, fromFileID, fromParID, (char*) tmp_name); + + /* copy rest */ + tmpLong = HTOFCB(from_hp)->fcbFlags; + HTOFCB(from_hp)->fcbFlags = HTOFCB(to_hp)->fcbFlags; + HTOFCB(to_hp)->fcbFlags = tmpLong; + + tmpLong = from_hp->h_meta->h_crtime; + from_hp->h_meta->h_crtime = to_hp->h_meta->h_crtime; + to_hp->h_meta->h_crtime = tmpLong; + + tmpLong = from_hp->h_meta->h_butime; + from_hp->h_meta->h_butime = to_hp->h_meta->h_butime; + to_hp->h_meta->h_butime = tmpLong; + + tmpLong = from_hp->h_meta->h_atime; + from_hp->h_meta->h_atime = to_hp->h_meta->h_atime; + to_hp->h_meta->h_atime = tmpLong; + + tmpLong = from_hp->h_meta->h_ctime; + from_hp->h_meta->h_ctime = to_hp->h_meta->h_ctime; + to_hp->h_meta->h_ctime = tmpLong; + + tmpLong = from_hp->h_meta->h_gid; + from_hp->h_meta->h_gid = to_hp->h_meta->h_gid; + to_hp->h_meta->h_gid = tmpLong; + + tmpLong = from_hp->h_meta->h_uid; + from_hp->h_meta->h_uid = to_hp->h_meta->h_uid; + to_hp->h_meta->h_uid = tmpLong; + + tmpLong = from_hp->h_meta->h_pflags; + from_hp->h_meta->h_pflags = to_hp->h_meta->h_pflags; + to_hp->h_meta->h_pflags = tmpLong; + + tmpLong = from_hp->h_meta->h_mode; + from_hp->h_meta->h_mode = to_hp->h_meta->h_mode; + to_hp->h_meta->h_mode = tmpLong; + + tmpLong = from_hp->h_meta->h_rdev; + from_hp->h_meta->h_rdev = to_hp->h_meta->h_rdev; + to_hp->h_meta->h_rdev = tmpLong; + + tmpLong = from_hp->h_meta->h_size; + from_hp->h_meta->h_size = to_hp->h_meta->h_size; + to_hp->h_meta->h_size = tmpLong; + + + +Err_Exit_Relse: + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p); + + +Err_Exit: + + + /* XXX SER + * At this point, the vnodes' data is switched, but are on the old hash list. + * so move them to the right bucket. This couldnt be done until now, because the h_siblinglock + * was being held. + * Scenario: + * A fork is trying to be added while exchanging...It got the hash lock, + * but is waiting for the h_siblinglock. So we cannot try get the hash lock + * until we release h_siblinglock, so it could continue, so it adds to the sibling list + * and at the old place, so hfs_vhashmove has to move all vnodes with the old file id. + * Not very pretty, becarefull that this works ok + * Scenario 2: + * Same as the above, but before the move is made (like at this very spot), the new vnode + * is added and a vget is requested for that new vnode, it would have old data + * WE MIGHT NEED TO LOCK THE HASH BECAUSE OF THIS !!! + * Scenario 3: + * Hey! Same as above, but it is added after all the moving + * So now there is a vnode with the old data, on the old hash...it will become + * lost next time that a vget() is done + * + * XXX SER A solution might be to NOT move the hash, but the data (extents) or the + * opposite that we are doing now + */ + hfs_vhashmove(from_hp, fromFileID); + hfs_vhashmove(to_hp, toFileID); + + +#if HFS_DIAGNOSTIC + if (fromFirst) + debug_check_vnode(HTOV(fromFirst), 0); + if (fromSecond) + debug_check_vnode(HTOV(fromSecond), 0); + if (toFirst) + debug_check_vnode(HTOV(toFirst), 0); + if (toSecond) + debug_check_vnode(HTOV(toSecond), 0); +#endif + + + /* Unlock any forks, and the sibling list */ + if (to_hp->h_meta->h_usecount > 1) { + if (to_hp == toFirst) { + if (toSecond) + vput(HTOV(toSecond)); + } else { + if (toSecond) + vrele(HTOV(toSecond)); /* decrement, return it locked */ + if (toFirst) + vput(HTOV(toFirst)); + } + } + if (from_hp->h_meta->h_usecount > 1) { + if (from_hp == fromFirst) { + if (fromSecond) + vput(HTOV(fromSecond)); + } else { + if (fromSecond) + vrele(HTOV(fromSecond)); /* decrement, return it locked */ + if (fromFirst) + vput(HTOV(fromFirst)); + } + } + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + * Change a vnode's file id, parent id and name + * + * Assumes the vnode is locked and is of type VREG + */ +static void +hfs_chid(struct hfsnode *hp, u_int32_t fid, u_int32_t pid, char* name) +{ + DBG_ASSERT(HTOV(hp)->v_type == VREG); + + H_HINT(hp) = 0; + H_FILEID(hp) = fid; /* change h_nodeID */ + H_DIRID(hp) = pid; + + hfs_set_metaname(name, hp->h_meta, HTOHFS(hp)); + + +} + + +/* + +#% fsync vp L L L +# + vop_fsync { + IN struct vnode *vp; + IN struct ucred *cred; + IN int waitfor; + IN struct proc *p; + + */ + + +static int +hfs_fsync(ap) +struct vop_fsync_args /* { + struct vnode *a_vp; + struct ucred *a_cred; + int a_waitfor; + struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp ; + struct hfsnode *hp = VTOH(vp); + int retval = 0; + register struct buf *bp; + struct timeval tv; + struct buf *nbp; + int s; + + DBG_FUNC_NAME("fsync"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT((" ")); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_ZERO); + DBG_HFS_NODE_CHECK(ap->a_vp); + +#if HFS_DIAGNOSTIC + DBG_ASSERT(*((int*)&vp->v_interlock) == 0); +#endif + + + /* + * First of all, write out any clusters. + */ + cluster_push(vp); + + /* + * Flush all dirty buffers associated with a vnode. + */ +loop: + s = splbio(); + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if ((bp->b_flags & B_BUSY)) + continue; + if ((bp->b_flags & B_DELWRI) == 0) + panic("hfs_fsync: not dirty"); + bremfree(bp); + bp->b_flags |= B_BUSY; + bp->b_flags &= ~B_LOCKED; /* Clear flag, should only be set on meta files */ + splx(s); + /* + * Wait for I/O associated with indirect blocks to complete, + * since there is no way to quickly wait for them below. + */ + DBG_VOP(("\t\t\tFlushing out phys block %d == log block %d\n", bp->b_blkno, bp->b_lblkno)); + if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT) { + (void) bawrite(bp); + } else { + (void) VOP_BWRITE(bp); + } + goto loop; + } + if (vp->v_flag & VHASDIRTY) + ubc_pushdirty(vp); + + if (ap->a_waitfor == MNT_WAIT) { + while (vp->v_numoutput) { + vp->v_flag |= VBWAIT; + tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "hfs_fsync", 0); + } + + /* I have seen this happen for swapfile. So it is safer to + * check for dirty buffers again. --Umesh + */ + if (vp->v_dirtyblkhd.lh_first || (vp->v_flag & VHASDIRTY)) { + vprint("hfs_fsync: dirty", vp); + splx(s); + goto loop; + } + } + splx(s); + +#if HFS_DIAGNOSTIC + DBG_ASSERT(*((int*)&vp->v_interlock) == 0); +#endif + + tv = time; + if ((vp->v_flag & VSYSTEM) && (hp->fcbBTCBPtr!=NULL)) + BTSetLastSync(HTOFCB(hp), tv.tv_sec); + + if (H_FORKTYPE(hp) != kSysFile) { + retval = VOP_UPDATE(ap->a_vp, &tv, &tv, ap->a_waitfor == MNT_WAIT); + + if (retval != E_NONE) { + DBG_ERR(("%s: FLUSH FAILED: %s\n", funcname, H_NAME(hp))); + } + } + else + hp->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + + if (ap->a_waitfor == MNT_WAIT) { + DBG_ASSERT(vp->v_dirtyblkhd.lh_first == NULL); + }; + DBG_VOP_LOCKS_TEST(retval); + DBG_ASSERT(*((int*)&vp->v_interlock) == 0); + return (retval); +} + + +int +hfs_fsync_transaction(struct vnode *vp) +{ + struct hfsnode *hp = VTOH(vp); + register struct buf *bp; + struct timeval tv; + struct buf *nbp; + int s; + + /* + * Flush all dirty buffers associated with a vnode. + */ +loop: + s = splbio(); + + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if ((bp->b_flags & B_BUSY)) + continue; + if ((bp->b_flags & B_DELWRI) == 0) + panic("hfs_fsync: not dirty"); + if ( !(bp->b_flags & B_LOCKED)) + continue; + + bremfree(bp); + bp->b_flags |= B_BUSY; + bp->b_flags &= ~B_LOCKED; /* Clear flag, should only be set on meta files */ + splx(s); + + (void) bawrite(bp); + + goto loop; + } + splx(s); + + tv = time; + if ((vp->v_flag & VSYSTEM) && (hp->fcbBTCBPtr!=NULL)) + (void) BTSetLastSync(VTOFCB(vp), tv.tv_sec); + hp->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + + return 0; +} + +/* + +#% remove dvp L U U +#% remove vp L U U +# + vop_remove { + IN WILLRELE struct vnode *dvp; + IN WILLRELE struct vnode *vp; + IN struct componentname *cnp; + + */ + +int +hfs_remove(ap) +struct vop_remove_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + struct hfsnode *hp = VTOH(ap->a_vp); + struct hfsmount *hfsmp = HTOHFS(hp); + struct proc *p = current_proc(); + struct timeval tv; + int retval, use_count; + int filebusy = 0; + DBG_FUNC_NAME("remove"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); + DBG_VOP_PRINT_CPN_INFO(ap->a_cnp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,ap->a_vp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + + retval = E_NONE; + + if ((hp->h_meta->h_pflags & (IMMUTABLE | APPEND)) || + (VTOH(dvp)->h_meta->h_pflags & APPEND)) { + retval = EPERM; + goto out; + } + + if (vp->v_usecount > 1) { + /* + * the namei done for the rename took a reference on the + * vnode. Hence set 1 in the tookref parameter + * of ubc_isinuse(). + */ + if(UBCISVALID(vp) && !ubc_isinuse(vp, 1)) + goto hfs_nobusy; + if ((ap->a_cnp->cn_flags & NODELETEBUSY) + || (hfsmp->hfs_private_metadata_dir == 0)) { + /* Carbon semantics prohibits deleting busy files */ + retval = EBUSY; + goto out; + } else + filebusy = 1; + } + +hfs_nobusy: + + tv = time; /* Done here, so all times are the same */ + + /* Check other siblings for in use also */ + /* Uncache everything and make sure no other usecount */ + /* + * This assumes the presence of the most 1 sibling + * + * a. loop through the siblings looking for another + * b. If we find ourselves...skip it + * If there was a sibling: + * a. Check for a positve usecount + * b. uncache any pages + * c. Write out and memory changes + * The idea is to keep the h_siblinglock as little as possible + */ + if (hp->h_meta->h_usecount > 1) { + struct vnode *sib_vp = NULL; + struct hfsnode *nhp; + + DBG_ASSERT(hp->h_meta->h_siblinghead.cqh_first && + (hp->h_meta->h_siblinghead.cqh_first != hp->h_meta->h_siblinghead.cqh_last)); + DBG_ASSERT(H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork); + + /* Loop through all siblings, skipping ourselves */ + simple_lock(&hp->h_meta->h_siblinglock); + CIRCLEQ_FOREACH(nhp, &hp->h_meta->h_siblinghead, h_sibling) { + if (nhp == hp) /* skip ourselves */ + continue; + sib_vp = HTOV(nhp); + }; + simple_unlock(&hp->h_meta->h_siblinglock); + + /* Check to see if the other fork is in use */ + DBG_ASSERT(sib_vp != NULL); + simple_lock(&sib_vp->v_interlock); + use_count = sib_vp->v_usecount; + simple_unlock(&sib_vp->v_interlock); + if (use_count > 0) { + /* + * This is a sibling vnode and we did not take + * a reference on it. + * Hence set 0 in the tookref parameter + * of ubc_isinuse(). + */ + if(UBCISVALID(sib_vp) && !ubc_isinuse(sib_vp, 0)) + goto hfs_nobusy2; + if ((ap->a_cnp->cn_flags & NODELETEBUSY) + || (hfsmp->hfs_private_metadata_dir == 0)) { + /* Carbon semantics prohibits deleting busy files */ + retval = EBUSY; + goto out; + } else + filebusy = 1; + } /* use_count > 0 */ + +hfs_nobusy2: + + /* The only error that vget returns is when the vnode is going away, so ignore the vnode */ + if (vget(sib_vp, LK_EXCLUSIVE | LK_RETRY, p) == 0) { + /* + * XXX SER An intelligient person would ask, why flush out changes + * that are going to be deleted? See the next comment. + */ + if ((VTOH(sib_vp)->h_nodeflags & IN_MODIFIED) || (VTOFCB(sib_vp)->fcbFlags + & fcbModifiedMask)) { + DBG_ASSERT((VTOH(sib_vp)->h_nodeflags & IN_MODIFIED) != 0); + VOP_UPDATE(sib_vp, &tv, &tv, 0); + }; + + /* Invalidate the buffers, ignore the results */ + (void) vinvalbuf(sib_vp, 0, NOCRED, p, 0, 0); + + vput(sib_vp); + }; /* vget() */ + }; /* h_use_count > 1 */ + + /* + * remove the entry from the namei cache: + * We do it early before any linking/busy file wierdness, make sure the + * original is gone + */ + cache_purge(vp); + + /* Flush out any catalog changes */ + /* XXX SER: This is a hack, becasue hfsDelete reads the data from the disk + * and not from memory which is more correct + */ + if ((hp->h_nodeflags & IN_MODIFIED) || (HTOFCB(hp)->fcbFlags & fcbModifiedMask)) + { + DBG_ASSERT((hp->h_nodeflags & IN_MODIFIED) != 0); + VOP_UPDATE(vp, &tv, &tv, 0); + } + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (retval != E_NONE) { + retval = EBUSY; + goto out; + } + + /* + * After this point, any errors must goto out2, so the Catalog Tree gets unlocked + */ + +#if HFS_HARDLINKS + /* + * Multi-linked files just need their link node deleted from the catalog + */ + if (hp->h_meta->h_metaflags & IN_DATANODE) { + + if ((ap->a_cnp->cn_flags & HASBUF) == 0 || + ap->a_cnp->cn_nameptr[0] == '\0') { + retval = ENOENT; /* name missing */ + goto out2; + } + + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, p); + if (retval != E_NONE) { + retval = EBUSY; + goto out2; /* unlock catalog b-tree on the way out */ + } + + retval = hfsDelete (HTOVCB(hp), H_FILEID(VTOH(dvp)), + ap->a_cnp->cn_nameptr, TRUE, H_HINT(hp)); + + (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); + + if (retval != 0) + goto out2; + + hp->h_meta->h_metaflags |= IN_NOEXISTS; + hp->h_nodeflags |= IN_CHANGE; + if (--hp->h_meta->h_nlink < 1) + hp->h_meta->h_metaflags |= IN_DELETED; + + /* name and parent fields are no longer valid so invalidate them */ + H_DIRID(hp) = kUnknownID; + hfs_set_metaname("\0", hp->h_meta, HTOHFS(hp)); + + if ((ap->a_cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME)) + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + + goto out2; /* link deleted, all done */ + } +#endif + + /* + * To make the HFS filesystem follow UFS unlink semantics, a remove of + * an active vnode is translated to a move/rename so the file appears + * deleted. Later, the file is removed by hfs_inactive on the hfsnode. + */ + if (filebusy) { + UInt32 hint = H_HINT(hp); + char nodeName[32]; + + MAKE_DELETED_NAME(nodeName, H_FILEID(hp)); + + retval = hfsMoveRename (HTOVCB(hp), H_DIRID(hp), H_NAME(hp), + hfsmp->hfs_private_metadata_dir, nodeName, &hint); + if (retval) goto out2; + + hp->h_meta->h_metaflags |= IN_DELETED; + hp->h_nodeflags |= IN_CHANGE; + + /* update name so Catalog lookups succeed */ + H_HINT(hp) = hint; + H_DIRID(hp) = hfsmp->hfs_private_metadata_dir; + hfs_set_metaname(nodeName, hp->h_meta, HTOHFS(hp)); + + goto out2; /* all done, unlock the catalog */ + } + + /* unlock the Catalog */ + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + + /* Invalidate the buffers */ + if ((retval= vinvalbuf(vp, 0, NOCRED, p, 0, 0))) + goto out; + + if(UBCINFOEXISTS(vp)) + (void)ubc_setsize(vp, (off_t)0); + + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (retval != E_NONE) { + retval = EBUSY; + goto out; + } + /* lock extents b-tree (also protects volume bitmap) */ + retval = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, p); + if (retval != E_NONE) { + retval = EBUSY; + goto out2; /* unlock catalog b-tree on the way out */ + } + + /* remove entry from catalog and free any blocks used */ + retval = hfsDelete (HTOVCB(hp), H_DIRID(hp), H_NAME(hp), TRUE, H_HINT(hp)); + + /* Clean up */ + (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, p); + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + + if (retval != 0) + goto out; + + hp->h_meta->h_metaflags |= IN_NOEXISTS; + hp->h_meta->h_mode = 0; /* Makes the node go away...see inactive */ + /* clear the block mappings */ + hp->fcbPLen = (u_int64_t)0; + bzero(&hp->fcbExtents, sizeof(HFSPlusExtentRecord)); + + VTOH(dvp)->h_nodeflags |= IN_CHANGE | IN_UPDATE; + + if (dvp == vp) { + vrele(vp); + } else { + vput(vp); + }; + + vput(dvp); + DBG_VOP_LOCKS_TEST(retval); + + if (UBCINFOEXISTS(vp)) { + (void) ubc_uncache(vp); + ubc_release(vp); + /* WARNING vp may not be valid after this */ + } + return (retval); + +out2: + (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p); + +out:; + + if (! retval) + VTOH(dvp)->h_nodeflags |= IN_CHANGE | IN_UPDATE; + + if (dvp == vp) { + vrele(vp); + } else { + vput(vp); + }; + + vput(dvp); + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + +#% rename sourcePar_vp U U U +#% rename source_vp U U U +#% rename targetPar_vp L U U +#% rename target_vp X U U +# + vop_rename { + IN WILLRELE struct vnode *sourcePar_vp; + IN WILLRELE struct vnode *source_vp; + IN struct componentname *source_cnp; + IN WILLRELE struct vnode *targetPar_vp; + IN WILLRELE struct vnode *target_vp; + IN struct componentname *target_cnp; + + + */ +/* +* On entry: +* source's parent directory is unlocked +* source file or directory is unlocked +* destination's parent directory is locked +* destination file or directory is locked if it exists +* +* On exit: +* all denodes should be released +* +*/ + +static int +hfs_rename(ap) +struct vop_rename_args /* { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; +} */ *ap; +{ + struct vnode *target_vp = ap->a_tvp; + struct vnode *targetPar_vp = ap->a_tdvp; + struct vnode *source_vp = ap->a_fvp; + struct vnode *sourcePar_vp = ap->a_fdvp; + struct componentname *target_cnp = ap->a_tcnp; + struct componentname *source_cnp = ap->a_fcnp; + struct proc *p = source_cnp->cn_proc; + struct hfsnode *target_hp, *targetPar_hp, *source_hp, *sourcePar_hp; + u_int32_t oldparent = 0, newparent = 0; + int doingdirectory = 0; + int retval = 0; + struct timeval tv; + struct hfsCatalogInfo catInfo; + DBG_VOP_LOCKS_DECL(4); + + DBG_FUNC_NAME("rename");DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\n")); + DBG_VOP_CONT(("\t"));DBG_VOP_CONT(("Source:\t"));DBG_VOP_PRINT_VNODE_INFO(ap->a_fvp);DBG_VOP_CONT(("\n")); + DBG_VOP_CONT(("\t"));DBG_VOP_CONT(("SourcePar: "));DBG_VOP_PRINT_VNODE_INFO(ap->a_fdvp);DBG_VOP_CONT(("\n")); + DBG_VOP_CONT(("\t"));DBG_VOP_CONT(("Target:\t"));DBG_VOP_PRINT_VNODE_INFO(ap->a_tvp);DBG_VOP_CONT(("\n")); + DBG_VOP_CONT(("\t"));DBG_VOP_CONT(("TargetPar: "));DBG_VOP_PRINT_VNODE_INFO(ap->a_tdvp);DBG_VOP_CONT(("\n")); + DBG_VOP_CONT(("\t"));DBG_VOP_CONT(("SourceName:\t"));DBG_VOP_PRINT_CPN_INFO(ap->a_fcnp);DBG_VOP_CONT(("\n")); + DBG_VOP_CONT(("\t"));DBG_VOP_CONT(("TargetName:\t"));DBG_VOP_PRINT_CPN_INFO(ap->a_tcnp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_fdvp, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,ap->a_fvp, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(2,ap->a_tdvp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(3,ap->a_tvp, VOPDBG_LOCKNOTNIL, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + WRITE_CK(ap->a_fdvp, funcname); + DBG_HFS_NODE_CHECK(ap->a_fdvp); + DBG_HFS_NODE_CHECK(ap->a_tdvp); + +#if HFS_DIAGNOSTIC + if ((target_cnp->cn_flags & HASBUF) == 0 || + (source_cnp->cn_flags & HASBUF) == 0) + panic("hfs_rename: no name"); +#endif + + DBG_ASSERT((ap->a_fdvp->v_type == VDIR) && (ap->a_tdvp->v_type == VDIR)); + target_hp = targetPar_hp = source_hp = sourcePar_hp = 0; + + /* + * Check for cross-device rename. + */ + if ((source_vp->v_mount != targetPar_vp->v_mount) || + (target_vp && (source_vp->v_mount != target_vp->v_mount))) { + retval = EXDEV; + goto abortit; + } + + /* + * Check for access permissions + */ + if (target_vp && ((VTOH(target_vp)->h_meta->h_pflags & (IMMUTABLE | APPEND)) || + (VTOH(targetPar_vp)->h_meta->h_pflags & APPEND))) { + retval = EPERM; + goto abortit; + } + + if ((retval = vn_lock(source_vp, LK_EXCLUSIVE, p))) + goto abortit; + + sourcePar_hp = VTOH(sourcePar_vp); + source_hp = VTOH(source_vp); + oldparent = H_FILEID(sourcePar_hp); + if ((source_hp->h_meta->h_pflags & (IMMUTABLE | APPEND)) || (sourcePar_hp->h_meta->h_pflags & APPEND)) { + VOP_UNLOCK(source_vp, 0, p); + retval = EPERM; + goto abortit; + } + + /* + * Be sure we are not renaming ".", "..", or an alias of ".". This + * leads to a crippled directory tree. It's pretty tough to do a + * "ls" or "pwd" with the "." directory entry missing, and "cd .." + * doesn't work if the ".." entry is missing. + */ + if ((source_hp->h_meta->h_mode & IFMT) == IFDIR) { + if ((source_cnp->cn_namelen == 1 && source_cnp->cn_nameptr[0] == '.') + || sourcePar_hp == source_hp + || (source_cnp->cn_flags&ISDOTDOT) + || (source_hp->h_nodeflags & IN_RENAME)) { + VOP_UNLOCK(source_vp, 0, p); + retval = EINVAL; + goto abortit; + } + source_hp->h_nodeflags |= IN_RENAME; + doingdirectory = TRUE; + } + + /* + * + * >>>> Transit between abort and bad <<<< + * + */ + + targetPar_hp = VTOH(targetPar_vp); + if (target_vp) + target_hp = VTOH(target_vp); + else + DBG_ASSERT(target_hp == NULL); + + newparent = H_FILEID(targetPar_hp); + + /* Test to make sure we are not crossing devices */ + /* XXX SER Is this necesary, does catalog manager take care of this? */ + if (target_vp) { + if (H_DEV(target_hp) != H_DEV(targetPar_hp) || H_DEV(target_hp) != H_DEV(source_hp)) + panic("rename: EXDEV"); + } + else { + if (H_DEV(targetPar_hp) != H_DEV(source_hp)) + panic("rename: EXDEV"); + }; + + retval = VOP_ACCESS(source_vp, VWRITE, target_cnp->cn_cred, target_cnp->cn_proc); + if (doingdirectory && (newparent != oldparent)) { + if (retval) /* write access check above */ + goto bad; + } + retval = 0; /* Reset value from above, we dont care about it anymore */ + + /* + * If the destination exists, then be sure its type (file or dir) + * matches that of the source. And, if it is a directory make sure + * it is empty. Then delete the destination. + */ + if (target_vp) { + + /* + * If the parent directory is "sticky", then the user must + * own the parent directory, or the destination of the rename, + * otherwise the destination may not be changed (except by + * root). This implements append-only directories. + */ + if ((targetPar_hp->h_meta->h_mode & S_ISTXT) && (target_cnp->cn_cred->cr_uid != 0) && + target_cnp->cn_cred->cr_uid != targetPar_hp->h_meta->h_uid && + target_cnp->cn_cred->cr_uid != target_hp->h_meta->h_uid) { + retval = EPERM; + goto bad; + } + + /* + * VOP_REMOVE will vput targetPar_vp so we better bump + * its ref count and relockit, always set target_vp to + * NULL afterwards to indicate that were done with it. + */ + VREF(targetPar_vp); + + cache_purge(target_vp); + +#if HFS_HARDLINKS + target_cnp->cn_flags &= ~SAVENAME; +#endif + + retval = VOP_REMOVE(targetPar_vp, target_vp, target_cnp); + (void) vn_lock(targetPar_vp, LK_EXCLUSIVE | LK_RETRY, p); + + target_vp = NULL; + target_hp = NULL; + + if (retval) goto bad; + + }; + + + if (newparent != oldparent) + vn_lock(sourcePar_vp, LK_EXCLUSIVE | LK_RETRY, p); + + /* remove the existing entry from the namei cache: */ + cache_purge(source_vp); + + INIT_CATALOGDATA(&catInfo.nodeData, 0); + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VTOHFS(source_vp), kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (retval) { + if (newparent != oldparent) /* unlock the lock we just got */ + VOP_UNLOCK(sourcePar_vp, 0, p); + goto bad; + }; + + /* use source_cnp instead of H_NAME(source_hp) in case source is a hard link */ + retval = hfsMoveRename( HTOVCB(source_hp), H_DIRID(source_hp), source_cnp->cn_nameptr, + H_FILEID(VTOH(targetPar_vp)), target_cnp->cn_nameptr, &H_HINT(source_hp)); + + if (retval == 0) { + /* Look up the catalog entry just renamed since it might have been auto-decomposed */ + catInfo.hint = H_HINT(source_hp); + retval = hfs_getcatalog(HTOVCB(source_hp), H_FILEID(targetPar_hp), target_cnp->cn_nameptr, target_cnp->cn_namelen, &catInfo); + } + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(source_vp), kHFSCatalogFileID, LK_RELEASE, p); + + if (newparent != oldparent) + VOP_UNLOCK(sourcePar_vp, 0, p); + + if (retval) goto bad; + + H_DIRID(source_hp) = H_FILEID(targetPar_hp); + + hfs_name_CatToMeta(&catInfo.nodeData, source_hp->h_meta); + + CLEAN_CATALOGDATA(&catInfo.nodeData); + + source_hp->h_nodeflags &= ~IN_RENAME; + + + /* + * Timestamp both parent directories. + * Note that if this is a rename within the same directory, + * (where targetPar_hp == sourcePar_hp) + * the code below is still safe and correct. + */ + targetPar_hp->h_nodeflags |= IN_UPDATE; + sourcePar_hp->h_nodeflags |= IN_UPDATE; + tv = time; + HFSTIMES(targetPar_hp, &tv, &tv); + HFSTIMES(sourcePar_hp, &tv, &tv); + + vput(targetPar_vp); + vrele(sourcePar_vp); + vput(source_vp); + + DBG_VOP_LOCKS_TEST(retval); + if (retval != E_NONE) { + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\tReturning with error %d\n",retval)); + } + return (retval); + +bad:; + if (retval && doingdirectory) + source_hp->h_nodeflags &= ~IN_RENAME; + + if (targetPar_vp == target_vp) + vrele(targetPar_vp); + else + vput(targetPar_vp); + + if (target_vp) + vput(target_vp); + + vrele(sourcePar_vp); + + if (VOP_ISLOCKED(source_vp)) + vput(source_vp); + else + vrele(source_vp); + + DBG_VOP_LOCKS_TEST(retval); + if (retval != E_NONE) { + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\tReturning with error %d\n",retval)); + } + return (retval); + +abortit:; + + VOP_ABORTOP(targetPar_vp, target_cnp); /* XXX, why not in NFS? */ + + if (targetPar_vp == target_vp) + vrele(targetPar_vp); + else + vput(targetPar_vp); + + if (target_vp) + vput(target_vp); + + VOP_ABORTOP(sourcePar_vp, source_cnp); /* XXX, why not in NFS? */ + + vrele(sourcePar_vp); + vrele(source_vp); + + DBG_VOP_LOCKS_TEST(retval); + if (retval != E_NONE) { + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT(("\tReturning with error %d\n",retval)); + } + return (retval); +} + + + +/* + * Mkdir system call +#% mkdir dvp L U U +#% mkdir vpp - L - +# + vop_mkdir { + IN WILLRELE struct vnode *dvp; + OUT struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + + We are responsible for freeing the namei buffer, + it is done in hfs_makenode() +*/ + +int +hfs_mkdir(ap) +struct vop_mkdir_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */ *ap; +{ + struct proc *p = current_proc(); + int retval; + int mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode); + + DBG_FUNC_NAME("mkdir"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_dvp); + DBG_VOP_PRINT_CPN_INFO(ap->a_cnp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_POS); + + DBG_VOP(("%s: parent 0x%x (%s) ap->a_cnp->cn_nameptr %s\n", funcname, (u_int)VTOH(ap->a_dvp), H_NAME(VTOH(ap->a_dvp)), ap->a_cnp->cn_nameptr)); + WRITE_CK( ap->a_dvp, funcname); + DBG_HFS_NODE_CHECK(ap->a_dvp); + DBG_ASSERT(ap->a_dvp->v_type == VDIR); + + /* Create the vnode */ + DBG_ASSERT((ap->a_cnp->cn_flags & SAVESTART) == 0); + retval = hfs_makenode(mode, 0, ap->a_dvp, ap->a_vpp, ap->a_cnp, p); + DBG_VOP_UPDATE_VP(1, *ap->a_vpp); + + if (retval != E_NONE) { + DBG_ERR(("%s: hfs_makenode FAILED: %s, %s\n", funcname, ap->a_cnp->cn_nameptr, H_NAME(VTOH(ap->a_dvp)))); + DBG_VOP_LOCKS_TEST(retval); + return (retval); + } + + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); +} + +/* + * Rmdir system call. +#% rmdir dvp L U U +#% rmdir vp L U U +# + vop_rmdir { + IN WILLRELE struct vnode *dvp; + IN WILLRELE struct vnode *vp; + IN struct componentname *cnp; + + */ + +int +hfs_rmdir(ap) +struct vop_rmdir_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + struct hfsnode *hp = VTOH(vp); + struct proc *p = current_proc(); + int retval; + DBG_FUNC_NAME("rmdir"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP(("\tParent: "));DBG_VOP_PRINT_VNODE_INFO(ap->a_dvp);DBG_VOP_CONT(("\n")); + DBG_VOP(("\tTarget: "));DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP(("\tTarget Name: "));DBG_VOP_PRINT_CPN_INFO(ap->a_cnp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,ap->a_vp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + + if (dvp == vp) { + vrele(vp); + vput(vp); + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } + + /* + * HFS differs from UFS here in that we don't allow removing + * a directory that in use by others - even if its empty. + * + * In the future we might want to allow this just like we do + * for files (by renaming the busy directory). + */ +#if 0 + if (vp->v_usecount > 1) { + DBG_ERR(("%s: dir is busy, usecount is %d\n", funcname, vp->v_usecount )); + retval = EBUSY; + goto Err_Exit; + } +#endif + /* remove the entry from the namei cache: */ + cache_purge(vp); + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (retval != E_NONE) { + goto Err_Exit; + } + + /* remove entry from catalog */ + retval = hfsDelete (HTOVCB(hp), H_DIRID(hp), H_NAME(hp), FALSE, H_HINT(hp)); + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, p); + + if (! retval) { + VTOH(dvp)->h_nodeflags |= IN_CHANGE | IN_UPDATE; /* Set the parent to be updated */ + hp->h_meta->h_mode = 0; /* Makes the vnode go away...see inactive */ + hp->h_meta->h_metaflags |= IN_NOEXISTS; + } + +Err_Exit:; + if (dvp != 0) + vput(dvp); + vput(vp); + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* + * symlink -- make a symbolic link +#% symlink dvp L U U +#% symlink vpp - U - +# +# XXX - note that the return vnode has already been VRELE'ed +# by the filesystem layer. To use it you must use vget, +# possibly with a further namei. +# + vop_symlink { + IN WILLRELE struct vnode *dvp; + OUT WILLRELE struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + IN char *target; + + We are responsible for freeing the namei buffer, + it is done in hfs_makenode(). + +*/ + +int +hfs_symlink(ap) + struct vop_symlink_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; + } */ *ap; +{ + register struct vnode *vp, **vpp = ap->a_vpp; + struct proc *p = current_proc(); + struct hfsnode *hp; + u_int32_t dfltClump; + int len, retval; + DBG_FUNC_NAME("symlink"); + DBG_VOP_LOCKS_DECL(2); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_UNLOCKED, VOPDBG_IGNORE, VOPDBG_POS); + + if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) { + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + DBG_VOP((" ...sorry HFS disks don't support symbolic links.\n")); + DBG_VOP_LOCKS_TEST(EOPNOTSUPP); + return (EOPNOTSUPP); + } + + /* Create the vnode */ + retval = hfs_makenode(IFLNK | ap->a_vap->va_mode, 0, ap->a_dvp, + vpp, ap->a_cnp, p); + DBG_VOP_UPDATE_VP(1, *ap->a_vpp); + + if (retval != E_NONE) { + DBG_VOP_LOCKS_TEST(retval); + return (retval); + } + + + vp = *vpp; + len = strlen(ap->a_target); + hp = VTOH(vp); + dfltClump = hp->fcbClmpSize; + /* make clump size minimal */ + hp->fcbClmpSize = VTOVCB(vp)->blockSize; + retval = vn_rdwr(UIO_WRITE, vp, ap->a_target, len, (off_t)0, + UIO_SYSSPACE, IO_NODELOCKED, ap->a_cnp->cn_cred, (int *)0, + (struct proc *)0); + hp->fcbClmpSize = dfltClump; + + + vput(vp); + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + * Dummy dirents to simulate the "." and ".." entries of the directory + * in a hfs filesystem. HFS doesn't provide these on disk. Note that + * the size of these entries is the smallest needed to represent them + * (only 12 byte each). + */ +static hfsdotentry rootdots[2] = { + { + 1, /* d_fileno */ + sizeof(struct hfsdotentry), /* d_reclen */ + DT_DIR, /* d_type */ + 1, /* d_namlen */ + "." /* d_name */ + }, + { + 1, /* d_fileno */ + sizeof(struct hfsdotentry), /* d_reclen */ + DT_DIR, /* d_type */ + 2, /* d_namlen */ + ".." /* d_name */ + } +}; + +static hfsdotentry emptyentry = { 0 }; + +/* 4.3 Note: +* There is some confusion as to what the semantics of uio_offset are. +* In ufs, it represents the actual byte offset within the directory +* "file." HFS, however, just uses it as an entry counter - essentially +* assuming that it has no meaning except to the hfs_readdir function. +* This approach would be more efficient here, but some callers may +* assume the uio_offset acts like a byte offset. NFS in fact +* monkeys around with the offset field a lot between readdir calls. +* +* The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len +* fields is a mess as well. The libc function readdir() returns +* NULL (indicating the end of a directory) when either +* the getdirentries() syscall (which calls this and returns +* the size of the buffer passed in less the value of uiop->uio_resid) +* returns 0, or a direct record with a d_reclen of zero. +* nfs_server.c:rfs_readdir(), on the other hand, checks for the end +* of the directory by testing uiop->uio_resid == 0. The solution +* is to pad the size of the last struct direct in a given +* block to fill the block if we are not at the end of the directory. +*/ + +struct callbackstate { + u_int32_t cbs_parentID; + u_int32_t cbs_hiddenDirID; + off_t cbs_lastoffset; + struct uio * cbs_uio; + ExtendedVCB * cbs_vcb; + int16_t cbs_hfsPlus; + int16_t cbs_result; +}; + + +SInt32 +ProcessCatalogEntry(const CatalogKey *ckp, const CatalogRecord *crp, + u_int16_t recordLen, struct callbackstate *state) +{ + CatalogName *cnp; + size_t utf8chars; + u_int32_t curID; + OSErr result; + struct dirent catent; + + if (state->cbs_hfsPlus) + curID = ckp->hfsPlus.parentID; + else + curID = ckp->hfs.parentID; + + /* We're done when parent directory changes */ + if (state->cbs_parentID != curID) { +lastitem: +/* + * The NSDirectoryList class chokes on empty records (it doesnt check d_reclen!) + * so remove padding for now... + */ +#if 0 + /* + * Pad the end of list with an empty record. + * This eliminates an extra call by readdir(3c). + */ + catent.d_fileno = 0; + catent.d_reclen = 0; + catent.d_type = 0; + catent.d_namlen = 0; + *(int32_t*)&catent.d_name[0] = 0; + + state->cbs_lastoffset = state->cbs_uio->uio_offset; + + state->cbs_result = uiomove((caddr_t) &catent, 12, state->cbs_uio); + if (state->cbs_result == 0) + state->cbs_result = ENOENT; +#else + state->cbs_lastoffset = state->cbs_uio->uio_offset; + state->cbs_result = ENOENT; +#endif + return (0); /* stop */ + } + + if (state->cbs_hfsPlus) { + switch(crp->recordType) { + case kHFSPlusFolderRecord: + catent.d_type = DT_DIR; + catent.d_fileno = crp->hfsPlusFolder.folderID; + break; + case kHFSPlusFileRecord: + catent.d_type = DT_REG; + catent.d_fileno = crp->hfsPlusFile.fileID; + break; + default: + return (0); /* stop */ + }; + + cnp = (CatalogName*) &ckp->hfsPlus.nodeName; + result = utf8_encodestr(cnp->ustr.unicode, cnp->ustr.length * sizeof(UniChar), + catent.d_name, &utf8chars, kdirentMaxNameBytes + 1, ':', 0); + if (result == ENAMETOOLONG) { + result = ConvertUnicodeToUTF8Mangled(cnp->ustr.length * sizeof(UniChar), + cnp->ustr.unicode, kdirentMaxNameBytes + 1, (ByteCount*)&utf8chars, catent.d_name, catent.d_fileno); + } + } else { /* hfs */ + switch(crp->recordType) { + case kHFSFolderRecord: + catent.d_type = DT_DIR; + catent.d_fileno = crp->hfsFolder.folderID; + break; + case kHFSFileRecord: + catent.d_type = DT_REG; + catent.d_fileno = crp->hfsFile.fileID; + break; + default: + return (0); /* stop */ + }; + + cnp = (CatalogName*) ckp->hfs.nodeName; + result = hfs_to_utf8(state->cbs_vcb, cnp->pstr, kdirentMaxNameBytes + 1, + (ByteCount *)&utf8chars, catent.d_name); + /* + * When an HFS name cannot be encoded with the current + * volume encoding we use MacRoman as a fallback. + */ + if (result) + result = mac_roman_to_utf8(cnp->pstr, kdirentMaxNameBytes + 1, + (ByteCount *)&utf8chars, catent.d_name); + } + + catent.d_namlen = utf8chars; + catent.d_reclen = DIRENTRY_SIZE(utf8chars); + + /* hide our private meta data directory */ + if (curID == kRootDirID && + catent.d_fileno == state->cbs_hiddenDirID && + catent.d_type == DT_DIR) + goto lastitem; + + state->cbs_lastoffset = state->cbs_uio->uio_offset; + + /* if this entry won't fit then we're done */ + if (catent.d_reclen > state->cbs_uio->uio_resid) + return (0); /* stop */ + + state->cbs_result = uiomove((caddr_t) &catent, catent.d_reclen, state->cbs_uio); + + /* continue iteration if there's room */ + return (state->cbs_result == 0 && + state->cbs_uio->uio_resid >= AVERAGE_HFSDIRENTRY_SIZE); +} + +/* + * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value + * returned be stat() call as the block size. This is mentioned in the man page for getdirentries(): + * "Nbytes must be greater than or equal to the block size associated with the file, + * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough + * room for the . and .. entries that have to added manually. + */ + +/* +#% readdir vp L L L +# +vop_readdir { + IN struct vnode *vp; + INOUT struct uio *uio; + IN struct ucred *cred; + INOUT int *eofflag; + OUT int *ncookies; + INOUT u_long **cookies; + */ +static int +hfs_readdir(ap) +struct vop_readdir_args /* { + struct vnode *vp; + struct uio *uio; + struct ucred *cred; + int *eofflag; + int *ncookies; + u_long **cookies; +} */ *ap; +{ + register struct uio *uio = ap->a_uio; + struct hfsnode *hp = VTOH(ap->a_vp); + struct proc *p = current_proc(); + ExtendedVCB *vcb = HTOVCB(hp); + off_t off = uio->uio_offset; + u_int32_t dirID = H_FILEID(hp); + int retval = 0; + OSErr result = noErr; + u_int32_t diroffset; + BTreeIterator bi; + CatalogIterator *cip; + u_int16_t op; + struct callbackstate state; + int eofflag = 0; + + DBG_FUNC_NAME("readdir"); + DBG_VOP_LOCKS_DECL(1); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_HFS_NODE_CHECK(ap->a_vp); + + /* We assume it's all one big buffer... */ + if (uio->uio_iovcnt > 1 || uio->uio_resid < AVERAGE_HFSDIRENTRY_SIZE) { + return EINVAL; + }; + + /* Create the entries for . and .. */ + if (uio->uio_offset < sizeof(rootdots)) { + caddr_t dep; + size_t dotsize; + + rootdots[0].d_fileno = dirID; + rootdots[1].d_fileno = H_DIRID(hp); + + if (uio->uio_offset == 0) { + dep = (caddr_t) &rootdots[0]; + dotsize = 2* sizeof(struct hfsdotentry); + } else if (uio->uio_offset == sizeof(struct hfsdotentry)) { + dep = (caddr_t) &rootdots[1]; + dotsize = sizeof(struct hfsdotentry); + } else { + retval = EINVAL; + goto Exit; + } + + retval = uiomove(dep, dotsize, uio); + if (retval != 0) + goto Exit; + } + + diroffset = uio->uio_offset; + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VTOHFS(ap->a_vp), kHFSCatalogFileID, LK_SHARED, p); + if (retval != E_NONE) + goto Exit; + + /* get an iterator and position it */ + cip = GetCatalogIterator(vcb, dirID, diroffset); + + result = PositionIterator(cip, diroffset, &bi, &op); + if (result == cmNotFound) { + eofflag = 1; + retval = 0; + AgeCatalogIterator(cip); + goto cleanup; + } else if ((retval = MacToVFSError(result))) + goto cleanup; + + state.cbs_hiddenDirID = VCBTOHFS(vcb)->hfs_private_metadata_dir; + state.cbs_lastoffset = cip->currentOffset; + state.cbs_vcb = vcb; + state.cbs_uio = uio; + state.cbs_result = 0; + state.cbs_parentID = dirID; + + if (vcb->vcbSigWord == kHFSPlusSigWord) + state.cbs_hfsPlus = 1; + else + state.cbs_hfsPlus = 0; + + /* process as many entries as possible... */ + result = BTIterateRecords(GetFileControlBlock(vcb->catalogRefNum), op, &bi, + (IterateCallBackProcPtr)ProcessCatalogEntry, &state); + + if (state.cbs_result) + retval = state.cbs_result; + else + retval = MacToVFSError(result); + + if (retval == ENOENT) { + eofflag = 1; + retval = 0; + } + + if (retval == 0) { + cip->currentOffset = state.cbs_lastoffset; + cip->nextOffset = uio->uio_offset; + UpdateCatalogIterator(&bi, cip); + } + +cleanup: + if (retval) { + cip->volume = 0; + cip->folderID = 0; + AgeCatalogIterator(cip); + } + + (void) ReleaseCatalogIterator(cip); + + /* unlock catalog b-tree */ + (void) hfs_metafilelocking(VTOHFS(ap->a_vp), kHFSCatalogFileID, LK_RELEASE, p); + + if (retval != E_NONE) { + DBG_ERR(("%s: retval %d when trying to read directory %ld: %s\n",funcname, retval, + H_FILEID(hp), H_NAME(hp))); + goto Exit; + + } + + /* were we already past eof ? */ + if (uio->uio_offset == off) { + retval = E_NONE; + goto Exit; + } + + if (vcb->vcbSigWord == kHFSPlusSigWord) + hp->h_nodeflags |= IN_ACCESS; + + /* Bake any cookies */ + if (!retval && ap->a_ncookies != NULL) { + struct dirent* dpStart; + struct dirent* dpEnd; + struct dirent* dp; + int ncookies; + u_long *cookies; + u_long *cookiep; + + /* + * Only the NFS server uses cookies, and it loads the + * directory block into system space, so we can just look at + * it directly. + */ + if (uio->uio_segflg != UIO_SYSSPACE) + panic("hfs_readdir: unexpected uio from NFS server"); + dpStart = (struct dirent *)(uio->uio_iov->iov_base - (uio->uio_offset - off)); + dpEnd = (struct dirent *) uio->uio_iov->iov_base; + for (dp = dpStart, ncookies = 0; + dp < dpEnd && dp->d_reclen != 0; + dp = (struct dirent *)((caddr_t)dp + dp->d_reclen)) + ncookies++; + MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK); + for (dp = dpStart, cookiep = cookies; + dp < dpEnd; + dp = (struct dirent *)((caddr_t) dp + dp->d_reclen)) { + off += dp->d_reclen; + *cookiep++ = (u_long) off; + } + *ap->a_ncookies = ncookies; + *ap->a_cookies = cookies; + } + +Exit:; + + if (ap->a_eofflag) + *ap->a_eofflag = eofflag; + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + * readdirattr operation will return attributes for the items in the + * directory specified. + * + * It does not do . and .. entries. The problem is if you are at the root of the + * hfs directory and go to .. you could be crossing a mountpoint into a + * different (ufs) file system. The attributes that apply for it may not + * apply for the file system you are doing the readdirattr on. To make life + * simpler, this call will only return entries in its directory, hfs like. + * TO DO LATER: + * 1.getattrlist creates a thread record if the objpermanentid attribute + * is requested. Just do EINVAL for now and fix later. + * 2. more than one for uiovcnt support. + * 3. put knohint (hints) in state for next call in + * 4. credentials checking when rest of hfs does it. + * 5. Do return permissions concatenation ??? + */ + +/* +# +#% readdirattr vp L L L +# +vop_readdirattr { + IN struct vnode *vp; + IN struct attrlist *alist; + INOUT struct uio *uio; + IN u_long maxcount: + IN u_long options; + OUT u_long *newstate; + OUT int *eofflag; + OUT u_long *actualCount; + OUT u_long **cookies; + IN struct ucred *cred; +}; +*/ +static int +hfs_readdirattr(ap) +struct vop_readdirattr_args /* { + struct vnode *vp; + struct attrlist *alist; + struct uio *uio; + u_long maxcount: + u_long options; + int *newstate; + int *eofflag; + u_long *actualcount; + u_long **cookies; + struct ucred *cred; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct attrlist *alist = ap->a_alist; + register struct uio *uio = ap->a_uio; + u_long maxcount = ap->a_maxcount; + u_long ncookies; + ExtendedVCB *vcb = HTOVCB(VTOH(vp)); + UInt32 dirID = H_FILEID(VTOH(vp)); + struct proc *proc = current_proc(); /* could get this out of uio */ + off_t startoffset = uio->uio_offset; + struct hfsCatalogInfo catInfo; + UInt32 index; + int retval = 0; + u_long fixedblocksize; + u_long maxattrblocksize; + u_long currattrbufsize; + void *attrbufptr = NULL; + void *attrptr; + void *varptr; + struct vnode *entryvnode; + + + *(ap->a_actualcount) = 0; + *(ap->a_eofflag) = 0; + + /* check for invalid options, check vnode, and buffer space */ + if (((ap->a_options & ~FSOPT_NOINMEMUPDATE) != 0) || + (vp == NULL) || + (uio->uio_resid <= 0) || (uio->uio_iovcnt > 1)) + return EINVAL; + + /* this call doesn't take volume attributes */ + if ((alist->bitmapcount != ATTR_BIT_MAP_COUNT) || + ((alist->commonattr & ~ATTR_CMN_VALIDMASK) != 0) || + (alist->volattr != 0) || + ((alist->dirattr & ~ATTR_DIR_VALIDMASK) != 0) || + ((alist->fileattr & ~ATTR_FILE_VALIDMASK) != 0) || + ((alist->forkattr & ~ATTR_FORK_VALIDMASK) != 0)) + return EINVAL; + + /* Reject requests for unsupported options for now: */ + if ((alist->commonattr & (ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST)) || + (alist->fileattr & (ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST)) || + (alist->commonattr & ATTR_CMN_OBJPERMANENTID) ) + return EINVAL; + + /* getattrlist and searchfs use a secondary buffer to malloc and then use + * uiomove afterwards. It's an extra copy, but for now leave it alone + */ + fixedblocksize = (sizeof(u_long) + AttributeBlockSize(alist)); /* u_long for length */ + maxattrblocksize = fixedblocksize; + if (alist->commonattr & ATTR_CMN_NAME) + maxattrblocksize += kHFSPlusMaxFileNameBytes + 1; + MALLOC(attrbufptr, void *, maxattrblocksize, M_TEMP, M_WAITOK); + attrptr = attrbufptr; + varptr = (char *)attrbufptr + fixedblocksize; /* Point to variable-length storage */ + + /* Since attributes passed back can contain variable ones (name), we can't just use + * uio_offset as is. We thus force it to represent fixed size of hfsdirentries + * as hfs_readdir was originally doing. If this all we need to represent the current + * state, then ap->a_state is not needed at all. + */ + /* index = ap->a_state; should not be less than 1 */ + index = (uio->uio_offset / sizeof(struct dirent)) + 1; + INIT_CATALOGDATA(&catInfo.nodeData, 0); + + /* Lock catalog b-tree */ + if ((retval = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_SHARED, proc)) != E_NONE) + goto exit; + + /* HFS Catalog does not have a bulk directory enumeration call. Do it one at + * time, using hints. GetCatalogOffspring takes care of hfsplus and name issues + * for us, so that's a win. Later, implement GetCatalogOffspringBulk. + */ + catInfo.hint = kNoHint; /* note, we may want to save the latest in state */ + while ((uio->uio_resid >= 0) && (maxcount !=0 )) { + /* better to check uio_resid against max or fixedblocksize, but won't work. + * Depending on if dir or file, the attributes returned will be different. + * Thus fixedblocksize is too large in some cases.Also, the variable + * part (like name) could be between fixedblocksize and the max. + */ + OSErr result = GetCatalogOffspring(vcb, dirID, index, &catInfo.nodeData, NULL, NULL); + if (result != noErr) { + if (result == cmNotFound) { + *(ap->a_eofflag) = TRUE; + retval = E_NONE; + } + else retval = MacToVFSError(result); + break; + } + + /* hide our private meta data directory as does hfs_readdir */ + if ((dirID == kRootDirID) && + catInfo.nodeData.cnd_nodeID == VCBTOHFS(vcb)->hfs_private_metadata_dir && + catInfo.nodeData.cnd_type == kCatalogFolderNode) { + + ++index; + continue; + } + + *((u_long *)attrptr)++ = 0; /* move it past length */ + + if (ap->a_options & FSOPT_NOINMEMUPDATE) { + /* vp okay to use instead of root vp */ + PackCatalogInfoAttributeBlock(alist, vp, &catInfo, &attrptr, &varptr); + } else { + /* Check to see if there's a vnode for this item in the cache: */ + entryvnode = hfs_vhashget(H_DEV(VTOH(vp)), catInfo.nodeData.cnd_nodeID, kDefault); + if (entryvnode != NULL) { + PackAttributeBlock(alist, entryvnode, &catInfo, &attrptr, &varptr); + vput(entryvnode); + } else { + /* vp okay to use instead of root vp */ + PackCatalogInfoAttributeBlock(alist, vp, &catInfo, &attrptr, &varptr); + }; + }; + currattrbufsize = *((u_long *)attrbufptr) = ((char *)varptr - (char *)attrbufptr); + + /* now check if we can't fit in the buffer space remaining */ + if (currattrbufsize > uio->uio_resid) + break; + else { + retval = uiomove((caddr_t)attrbufptr, currattrbufsize, ap->a_uio); + if (retval != E_NONE) + break; + attrptr = attrbufptr; + varptr = (char *)attrbufptr + fixedblocksize; /* Point to variable-length storage */ + index++; + *ap->a_actualcount += 1; + maxcount--; + } + }; + *ap->a_newstate = VTOH(vp)->h_meta->h_mtime;/* before we unlock, know the mod date */ + /* Unlock catalog b-tree, finally. Ties up the everything during enumeration */ + (void) hfs_metafilelocking( VTOHFS(ap->a_vp), kHFSCatalogFileID, LK_RELEASE, proc ); + + CLEAN_CATALOGDATA(&catInfo.nodeData); + + if (!retval && ap->a_cookies != NULL) { /* CHECK THAT 0 wasn't passed in */ + void* dpStart; + void* dpEnd; + void* dp; + u_long *cookies; + u_long *cookiep; + + /* Only the NFS server uses cookies, and it loads the + * directory block into system space, so we can just look at + * it directly. + */ + if (uio->uio_segflg != UIO_SYSSPACE) /* || uio->uio_iovcnt != 1 checked earlier */ + panic("hfs_readdirattr: unexpected uio from NFS server"); + dpStart = uio->uio_iov->iov_base - (uio->uio_offset - startoffset); + dpEnd = uio->uio_iov->iov_base; + MALLOC(cookies, u_long *, (*ap->a_actualcount)*sizeof(u_long), M_TEMP, M_WAITOK); + for (dp = dpStart, cookiep = cookies; + dp < dpEnd; + dp = ((caddr_t) dp + *((u_long *)dp))) { + *cookiep++ = (u_long)((caddr_t)dp + sizeof(u_long)); + } + *ap->a_cookies = cookies; + } + + uio->uio_offset = startoffset + (*ap->a_actualcount)*sizeof(struct dirent); + +exit: + if (attrbufptr != NULL) + FREE(attrbufptr, M_TEMP); + return (retval); +} + + +/* + * Return target name of a symbolic link +#% readlink vp L L L +# + vop_readlink { + IN struct vnode *vp; + INOUT struct uio *uio; + IN struct ucred *cred; + */ + +int +hfs_readlink(ap) +struct vop_readlink_args /* { +struct vnode *a_vp; +struct uio *a_uio; +struct ucred *a_cred; +} */ *ap; +{ + int retval; + DBG_FUNC_NAME("readlink"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + retval = VOP_READ(ap->a_vp, ap->a_uio, 0, ap->a_cred); + /* clear IN_ACCESS to prevent needless update of symlink vnode */ + VTOH(ap->a_vp)->h_nodeflags &= ~IN_ACCESS; + + DBG_VOP_LOCKS_TEST(retval); + return (retval); + +} + + +/* + * hfs abort op, called after namei() when a CREATE/DELETE isn't actually + * done. If a buffer has been saved in anticipation of a CREATE, delete it. +#% abortop dvp = = = +# + vop_abortop { + IN struct vnode *dvp; + IN struct componentname *cnp; + + */ + +/* ARGSUSED */ + +static int +hfs_abortop(ap) +struct vop_abortop_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; +} */ *ap; +{ + DBG_FUNC_NAME("abortop"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_dvp); + DBG_VOP_PRINT_CPN_INFO(ap->a_cnp);DBG_VOP_CONT(("\n")); + + + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + } + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); +} + +// int prthfsactive = 0; /* 1 => print out reclaim of active vnodes */ + +/* +#% inactive vp L U U +# + vop_inactive { + IN struct vnode *vp; + IN struct proc *p; + +*/ + +static int +hfs_inactive(ap) +struct vop_inactive_args /* { + struct vnode *a_vp; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + struct proc *p = ap->a_p; + struct timeval tv; + int error = 0; + extern int prtactive; + + DBG_FUNC_NAME("inactive"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_ZERO); + + + if (prtactive && vp->v_usecount <= 0) + vprint("hfs_inactive: pushing active", vp); + + if (vp->v_usecount != 0) + DBG_VOP(("%s: bad usecount = %d\n",funcname,vp->v_usecount )); + + /* + * Ignore nodes related to stale file handles. + */ + if (hp->h_meta->h_mode == 0) + goto out; + + /* + * Check for a postponed deletion + */ + if (hp->h_meta->h_metaflags & IN_DELETED) { + hp->h_meta->h_metaflags &= ~IN_DELETED; + + error = vinvalbuf(vp, 0, NOCRED, p, 0, 0); + if (error) goto out; + + if(UBCINFOEXISTS(vp)) + (void)ubc_setsize(vp, (off_t)0); + + /* Lock both trees + * Note: we do not need a lock on the private metadata directory + * since it never has a vnode associated with it. + */ + error = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_EXCLUSIVE | LK_CANRECURSE, p); + if (error) goto out; + error = hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_EXCLUSIVE | LK_CANRECURSE, p); + if (error) { + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, p); + goto out; + } + + if (hp->h_meta->h_metaflags & IN_DATANODE) { + char iNodeName[32]; + + MAKE_INODE_NAME(iNodeName, hp->h_meta->h_indnodeno); + error = hfsDelete(HTOVCB(hp), VTOHFS(vp)->hfs_private_metadata_dir, iNodeName, TRUE, H_HINT(hp)); + } else { + /* XXX can we leave orphaned sibling? */ + error = hfsDelete(HTOVCB(hp), H_DIRID(hp), H_NAME(hp), TRUE, H_HINT(hp)); + if (error == ENOENT) { + /* try by fileID as a backup */ + error = hfsDelete(HTOVCB(hp), H_FILEID(hp), NULL, TRUE, H_HINT(hp)); + } + } + + (void) hfs_metafilelocking(VTOHFS(vp), kHFSExtentsFileID, LK_RELEASE, p); + (void) hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_RELEASE, p); + if (error) goto out; + + hp->h_meta->h_metaflags |= IN_NOEXISTS; + hp->h_meta->h_mode = 0; + /* clear the block mappings */ + hp->fcbPLen = (u_int64_t)0; + bzero(&hp->fcbExtents, sizeof(HFSPlusExtentRecord)); + + hp->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + } + + if (hp->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 0); + } + +out: + VOP_UNLOCK(vp, 0, p); + /* + * If we are done with the inode, reclaim it + * so that it can be reused immediately. + */ + if (hp->h_meta->h_mode == 0) + vrecycle(vp, (struct slock *)0, p); + + /* XXX SER Here we might want to get rid of any other forks + * The problem is that if we call vrecycle(), our structure + * disappear from under us, we would need to remember, and expect + * things to go to null or to disappear + * But it stillw would be a good thing to remove vnodes + * referencing stale data + */ + + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); +} + +/* + Ignored since the locks are gone...... +#% reclaim vp U I I +# + vop_reclaim { + IN struct vnode *vp; + IN struct proc *p; + + */ + +static int +hfs_reclaim(ap) +struct vop_reclaim_args /* { + struct vnode *a_vp; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + void *tdata = vp->v_data; + char *tname; + Boolean freeMeta = true; + struct vnode *devvp = NULL; + + extern int prtactive; + DBG_FUNC_NAME("reclaim"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + + DBG_VOP_LOCKS_INIT(0, ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_ZERO); + + /* + NOTE: XXX vnodes need careful handling because fork vnodes that failed to be + created in their entirity could be getting cleaned up here. + */ + + if (prtactive && vp->v_usecount != 0) + vprint("hfs_reclaim(): pushing active", vp); + + hp->h_nodeflags |= IN_ALLOCATING; /* Mark this as being incomplete */ + /* + * This will remove the entry from the hash AND the sibling list + * This will make sure everything is in a stable state to see if we can remove the meta + * i.e. if this is the only fork...the sibling list will be empty + */ + hfs_vhashrem(hp); + + DBG_ASSERT(tdata != NULL); + DBG_ASSERT(hp->h_meta != NULL); + + devvp = hp->h_meta->h_devvp; /* For later releasing */ + hp->h_meta->h_usecount--; + + /* release the file meta if this is the last fork */ + if (H_FORKTYPE(hp)==kDataFork || H_FORKTYPE(hp)==kRsrcFork) { + if (hp->h_meta->h_siblinghead.cqh_first != (void *) &hp->h_meta->h_siblinghead) + freeMeta = false; + }; + + if (freeMeta) { + DBG_ASSERT(hp->h_meta->h_usecount == 0); + if (hp->h_meta->h_metaflags & IN_LONGNAME) { + tname = H_NAME(hp); + DBG_ASSERT(tname != NULL); + FREE(tname, M_TEMP); + } + FREE_ZONE(hp->h_meta, sizeof(struct hfsfilemeta), M_HFSFMETA); + hp->h_meta = NULL; + } + else + DBG_ASSERT(hp->h_meta->h_usecount == 1); + + + /* + * Purge old data structures associated with the inode. + */ + cache_purge(vp); + if (devvp) { + vrele(devvp); + }; + + /* Free our data structs */ + FREE_ZONE(tdata, sizeof(struct hfsnode), M_HFSNODE); + vp->v_data = NULL; + + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); +} + + +/* + * Lock an hfsnode. If its already locked, set the WANT bit and sleep. +#% lock vp U L U +# + vop_lock { + IN struct vnode *vp; + IN int flags; + IN struct proc *p; + */ + +static int +hfs_lock(ap) +struct vop_lock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +} */ *ap; +{ + struct vnode * vp = ap->a_vp; + struct hfsnode *hp = VTOH(ap->a_vp); + int retval; + + DBG_FUNC_NAME("lock"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_CONT((" ")); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT((" flags = 0x%08X.\n", ap->a_flags)); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_ZERO); + + retval = lockmgr(&hp->h_lock, ap->a_flags, &vp->v_interlock, ap->a_p); + if (retval != E_NONE) { + if ((ap->a_flags & LK_NOWAIT) == 0) + DBG_ERR(("hfs_lock: error %d trying to lock vnode (flags = 0x%08X).\n", retval, ap->a_flags)); + goto Err_Exit; + }; + +Err_Exit:; + DBG_ASSERT(*((int*)&vp->v_interlock) == 0); + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* + * Unlock an hfsnode. +#% unlock vp L U L +# + vop_unlock { + IN struct vnode *vp; + IN int flags; + IN struct proc *p; + + */ +int +hfs_unlock(ap) +struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +} */ *ap; +{ + struct hfsnode *hp = VTOH(ap->a_vp); + struct vnode *vp = ap->a_vp; + int retval = E_NONE; + + DBG_FUNC_NAME("unlock"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(vp);DBG_VOP_CONT((" flags = 0x%08X.\n", ap->a_flags)); + DBG_VOP_LOCKS_INIT(0,vp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_LOCKED, VOPDBG_ZERO); + + + DBG_ASSERT((ap->a_flags & (LK_EXCLUSIVE|LK_SHARED)) == 0); + retval = lockmgr(&hp->h_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock, ap->a_p); + if (retval != E_NONE) { + DEBUG_BREAK_MSG(("hfs_unlock: error %d trying to unlock vnode (forktype = %d).\n", retval, H_FORKTYPE(hp))); + }; + + DBG_ASSERT(*((int*)&vp->v_interlock) == 0); + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + * Print out the contents of an hfsnode. +#% print vp = = = +# + vop_print { + IN struct vnode *vp; + */ +int +hfs_print(ap) +struct vop_print_args /* { + struct vnode *a_vp; +} */ *ap; +{ + register struct vnode * vp = ap->a_vp; + register struct hfsnode *hp = VTOH( vp); + DBG_FUNC_NAME("print"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + + printf("tag VT_HFS, dirID %d, on dev %d, %d", H_DIRID(hp), + major(H_DEV(hp)), minor(H_DEV(hp))); + /* lockmgr_printinfo(&hp->h_lock); */ + printf("\n"); + DBG_VOP_LOCKS_TEST(E_NONE); + return (E_NONE); +} + + +/* + * Check for a locked hfsnode. +#% islocked vp = = = +# + vop_islocked { + IN struct vnode *vp; + + */ +int +hfs_islocked(ap) +struct vop_islocked_args /* { + struct vnode *a_vp; +} */ *ap; +{ + int lockStatus; + //DBG_FUNC_NAME("islocked"); + //DBG_VOP_LOCKS_DECL(1); + //DBG_VOP_PRINT_FUNCNAME(); + //DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); + + //DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_ZERO); + + lockStatus = lockstatus(&VTOH( ap->a_vp)->h_lock); + //DBG_VOP_LOCKS_TEST(E_NONE); + return (lockStatus); +} + +/* + +#% pathconf vp L L L +# + vop_pathconf { + IN struct vnode *vp; + IN int name; + OUT register_t *retval; + + */ +static int +hfs_pathconf(ap) +struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; +} */ *ap; +{ + int retval = E_NONE; + DBG_FUNC_NAME("pathconf"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + DBG_HFS_NODE_CHECK (ap->a_vp); + + switch (ap->a_name) { + case _PC_LINK_MAX: +#if HFS_HARDLINKS + if (VTOVCB(ap->a_vp)->vcbSigWord == kHFSPlusSigWord) + *ap->a_retval = HFS_LINK_MAX; + else + *ap->a_retval = 1; +#else + *ap->a_retval = 1; +#endif + break; + case _PC_NAME_MAX: + *ap->a_retval = kHFSPlusMaxFileNameBytes; /* max # of characters x max utf8 representation */ + break; + case _PC_PATH_MAX: + *ap->a_retval = PATH_MAX; /* 1024 */ + break; + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + break; + case _PC_NO_TRUNC: + *ap->a_retval = 0; + break; + default: + retval = EINVAL; + } + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + + + + +/* + * Advisory record locking support +#% advlock vp U U U +# + vop_advlock { + IN struct vnode *vp; + IN caddr_t id; + IN int op; + IN struct flock *fl; + IN int flags; + + */ +int +hfs_advlock(ap) +struct vop_advlock_args /* { + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; +} */ *ap; +{ + register struct hfsnode *hp = VTOH(ap->a_vp); + register struct flock *fl = ap->a_fl; + register struct hfslockf *lock; + off_t start, end; + int retval; + DBG_FUNC_NAME("advlock"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_UNLOCKED, VOPDBG_POS); + /* + * Avoid the common case of unlocking when inode has no locks. + */ + if (hp->h_lockf == (struct hfslockf *)0) { + if (ap->a_op != F_SETLK) { + fl->l_type = F_UNLCK; + return (0); + } + } + /* + * Convert the flock structure into a start and end. + */ + start = 0; + switch (fl->l_whence) { + case SEEK_SET: + case SEEK_CUR: + /* + * Caller is responsible for adding any necessary offset + * when SEEK_CUR is used. + */ + start = fl->l_start; + break; + + case SEEK_END: + start = HTOFCB(hp)->fcbEOF + fl->l_start; + break; + + default: + return (EINVAL); + } + + if (start < 0) + return (EINVAL); + if (fl->l_len == 0) + end = -1; + else + end = start + fl->l_len - 1; + + /* + * Create the hfslockf structure + */ + MALLOC(lock, struct hfslockf *, sizeof *lock, M_LOCKF, M_WAITOK); + lock->lf_start = start; + lock->lf_end = end; + lock->lf_id = ap->a_id; + lock->lf_hfsnode = hp; + lock->lf_type = fl->l_type; + lock->lf_next = (struct hfslockf *)0; + TAILQ_INIT(&lock->lf_blkhd); + lock->lf_flags = ap->a_flags; + /* + * Do the requested operation. + */ + switch(ap->a_op) { + case F_SETLK: + retval = hfs_setlock(lock); + break; + + case F_UNLCK: + retval = hfs_clearlock(lock); + FREE(lock, M_LOCKF); + break; + + case F_GETLK: + retval = hfs_getlock(lock, fl); + FREE(lock, M_LOCKF); + break; + + default: + retval = EINVAL; + _FREE(lock, M_LOCKF); + break; + } + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + + +/* + * Update the access, modified, and node change times as specified by the + * IACCESS, IUPDATE, and ICHANGE flags respectively. The IMODIFIED flag is + * used to specify that the node needs to be updated but that the times have + * already been set. The access and modified times are taken from the second + * and third parameters; the node change time is always taken from the current + * time. If waitfor is set, then wait for the disk write of the node to + * complete. + */ +/* +#% update vp L L L + IN struct vnode *vp; + IN struct timeval *access; + IN struct timeval *modify; + IN int waitfor; +*/ + +int +hfs_update(ap) + struct vop_update_args /* { + struct vnode *a_vp; + struct timeval *a_access; + struct timeval *a_modify; + int a_waitfor; + } */ *ap; +{ + struct hfsnode *hp; + struct proc *p; + hfsCatalogInfo catInfo; + char *filename; + char iNodeName[32]; + u_int32_t pid; + int retval; + ExtendedVCB *vcb; + DBG_FUNC_NAME("update"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME(); + DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP_CONT(("\n")); + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_ZERO); + + hp = VTOH(ap->a_vp); + + DBG_ASSERT(hp && hp->h_meta); + DBG_ASSERT(*((int*)&ap->a_vp->v_interlock) == 0); + + if ((H_FORKTYPE(hp) == kSysFile) || + (VTOVFS(ap->a_vp)->mnt_flag & MNT_RDONLY) || + (hp->h_meta->h_mode == 0)) { + hp->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + DBG_VOP_LOCKS_TEST(0); + return (0); + } + + if (H_FORKTYPE(hp) == kSysFile) { + hp->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + DBG_VOP_LOCKS_TEST(0); + return (0); + } + + if (VTOVFS(ap->a_vp)->mnt_flag & MNT_RDONLY) { + hp->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + DBG_VOP_LOCKS_TEST(0); + return (0); + } + + /* Check to see if MacOS set the fcb to be dirty, if so, translate it to IN_MODIFIED */ + if (HTOFCB(hp)->fcbFlags &fcbModifiedMask) + hp->h_nodeflags |= IN_MODIFIED; + + if ((hp->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) { + DBG_VOP_LOCKS_TEST(0); + return (0); + }; + + if (hp->h_nodeflags & IN_ACCESS) + hp->h_meta->h_atime = ap->a_access->tv_sec; + if (hp->h_nodeflags & IN_UPDATE) + hp->h_meta->h_mtime = ap->a_modify->tv_sec; + if (hp->h_nodeflags & IN_CHANGE) { + hp->h_meta->h_ctime = time.tv_sec; + /* + * HFS dates that WE set must be adjusted for DST + */ + if ((HTOVCB(hp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) { + hp->h_meta->h_ctime += 3600; + hp->h_meta->h_mtime = hp->h_meta->h_ctime; + } + } + + p = current_proc(); + filename = H_NAME(hp); + pid = H_DIRID(hp); + vcb = HTOVCB(hp); + catInfo.hint = H_HINT(hp); + +#if HFS_HARDLINKS + /* + * Force an update of the indirect node instead of the link + * by using the name and parent of the indirect node. + */ + if (hp->h_meta->h_metaflags & IN_DATANODE) { + MAKE_INODE_NAME(iNodeName, hp->h_meta->h_indnodeno); + filename = iNodeName; + pid = VCBTOHFS(vcb)->hfs_private_metadata_dir; + } +#endif + + INIT_CATALOGDATA(&catInfo.nodeData, kCatNameNoCopyName); + + /* + * Since VOP_UPDATE can be called from withing another VOP (eg VOP_RENAME), + * the Catalog b-tree may aready be locked by the current thread. So we + * allow recursive locking of the Catalog from within VOP_UPDATE. + */ + /* Lock the Catalog b-tree file */ + retval = hfs_metafilelocking(HTOHFS(hp), kHFSCatalogFileID, LK_EXCLUSIVE | LK_CANRECURSE, p); + if (retval) { + DBG_VOP_LOCKS_TEST(retval); + return (retval); + }; + + retval = hfs_getcatalog(vcb, pid, filename, -1, &catInfo); + if (retval != noErr) { + (void) hfs_metafilelocking(HTOHFS(hp), kHFSCatalogFileID, LK_RELEASE, p); + retval = MacToVFSError(retval); + goto Err_Exit; + }; + + H_HINT(hp) = catInfo.hint; + CopyVNodeToCatalogNode (HTOV(hp), &catInfo.nodeData); + + retval = UpdateCatalogNode(vcb, pid, filename, H_HINT(hp), &catInfo.nodeData); + + /* unlock the Catalog b-tree file */ + (void) hfs_metafilelocking(HTOHFS(hp), kHFSCatalogFileID, LK_RELEASE, p); + + if (retval != noErr) { /* from UpdateCatalogNode() */ + retval = MacToVFSError(retval); + goto Err_Exit; + }; + + /* After the updates are finished, clear the flags */ + hp->h_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + HTOFCB(hp)->fcbFlags &= ~fcbModifiedMask; + + /* Update general data */ + if (ap->a_vp->v_type == VDIR) { + hp->h_meta->h_nlink = 2 + catInfo.nodeData.cnd_valence; + hp->h_meta->h_size = sizeof(rootdots) + + (catInfo.nodeData.cnd_valence * AVERAGE_HFSDIRENTRY_SIZE); + if (hp->h_meta->h_size < MAX_HFSDIRENTRY_SIZE) + hp->h_meta->h_size < MAX_HFSDIRENTRY_SIZE; + } else { + hp->h_meta->h_size = (off_t)vcb->blockSize * + (off_t)(catInfo.nodeData.cnd_rsrcfork.totalBlocks + + catInfo.nodeData.cnd_datafork.totalBlocks); + } + + +Err_Exit: + + CLEAN_CATALOGDATA(&catInfo.nodeData); + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + + +/* + * Initialize the vnode associated with a new hfsnode, + * handle aliased vnodes. + */ +int +hfs_vinit(mntp, specops, fifoops, vpp) + struct mount *mntp; + int (**specops)(void *); + int (**fifoops)(void *); + struct vnode **vpp; +{ + struct hfsnode *hp; + struct vnode *vp, *nvp; + + vp = *vpp; + hp = VTOH(vp); + /* vp->v_type set in CopyCatalogToHFSNode */ + switch(vp->v_type) { + case VCHR: + case VBLK: + vp->v_op = specops; + if ((nvp = checkalias(vp, hp->h_meta->h_rdev, mntp))) { + /* + * Discard unneeded vnode, but save its hfsnode. + * Note that the lock is carried over in the hfsnode + * to the replacement vnode. + */ + nvp->v_data = vp->v_data; + vp->v_data = NULL; + vp->v_op = spec_vnodeop_p; + vrele(vp); + vgone(vp); + /* + * Reinitialize aliased hfsnode. + */ + + hp->h_vp = nvp; + vp = nvp; + } + break; + case VFIFO: +#if FIFO + vp->v_op = fifoops; + break; +#else + return (EOPNOTSUPP); +#endif + default: + break; + } + if (H_FILEID(hp) == kRootDirID) + vp->v_flag |= VROOT; + + *vpp = vp; + return (0); +} + +/* + * Allocate a new node + * + * Upon leaving, namei buffer must be freed. + * + */ +static int +hfs_makenode(mode, rawdev, dvp, vpp, cnp, p) + int mode; + dev_t rawdev; + struct vnode *dvp; + struct vnode **vpp; + struct componentname *cnp; + struct proc *p; +{ + register struct hfsnode *hp, *parhp; + struct timeval tv; + struct vnode *tvp; + struct hfsCatalogInfo catInfo; + ExtendedVCB *vcb; + UInt8 forkType; + int retval; + int hasmetalock = 0; + DBG_FUNC_NAME("makenode"); + + parhp = VTOH(dvp); + vcb = HTOVCB(parhp); + *vpp = NULL; + tvp = NULL; + if ((mode & IFMT) == 0) + mode |= IFREG; + +#if HFS_DIAGNOSTIC + if ((cnp->cn_flags & HASBUF) == 0) + panic("hfs_makenode: no name"); +#endif + + /* lock catalog b-tree */ + retval = hfs_metafilelocking(VTOHFS(dvp), + kHFSCatalogFileID, LK_EXCLUSIVE, p); + if (retval != E_NONE) + goto bad1; + else + hasmetalock = 1; + + /* Create the Catalog B*-Tree entry */ + retval = hfsCreate(vcb, H_FILEID(parhp), cnp->cn_nameptr, mode); + if (retval != E_NONE) { + DBG_ERR(("%s: hfsCreate FAILED: %s, %s\n", funcname, cnp->cn_nameptr, H_NAME(parhp))); + goto bad1; + } + + /* Look up the catalog entry just created: */ + INIT_CATALOGDATA(&catInfo.nodeData, 0); + catInfo.hint = kNoHint; + + retval = hfs_getcatalog(vcb, H_FILEID(parhp), cnp->cn_nameptr, cnp->cn_namelen, &catInfo); + if (retval != E_NONE) { + DBG_ERR(("%s: hfs_getcatalog FAILED: %s, %s\n", funcname, cnp->cn_nameptr, H_NAME(parhp))); + goto bad1; + } + + /* unlock catalog b-tree */ + hasmetalock = 0; + (void) hfs_metafilelocking(VTOHFS(dvp), + kHFSCatalogFileID, LK_RELEASE, p); + + /* hfs plus has additional metadata to initialize */ + if (vcb->vcbSigWord == kHFSPlusSigWord) { + u_int32_t pflags; + int catmode; + + if (VTOVFS(dvp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) { + catInfo.nodeData.cnd_ownerID = VTOHFS(dvp)->hfs_uid; + catInfo.nodeData.cnd_groupID = VTOHFS(dvp)->hfs_gid; + catmode = mode; + } else { + catInfo.nodeData.cnd_ownerID = cnp->cn_cred->cr_uid; + catInfo.nodeData.cnd_groupID = parhp->h_meta->h_gid; + catmode = mode; + } + + switch (catmode & IFMT) { + case IFLNK: + catInfo.nodeData.cnd_ownerID = parhp->h_meta->h_uid; + break; + + case IFCHR: + case IFBLK: + /* XXX should we move this to post hfsGet? */ + catInfo.nodeData.cnd_rawDevice = rawdev; + /* + * Don't tag as a special file (BLK or CHR) until *after* + * hfsGet is called. This insures that the checkalias call + * is defered until hfs_mknod completes. + */ + catmode = (catmode & ~IFMT) | IFREG; + break; + } + + if ((catmode & ISGID) && !groupmember(parhp->h_meta->h_gid, cnp->cn_cred) && + suser(cnp->cn_cred, NULL)) + catmode &= ~ISGID; + + if (cnp->cn_flags & ISWHITEOUT) + pflags = UF_OPAQUE; + else + pflags = 0; + + /* + * The 32-bit pflags field has two bytes of significance which + * are stored separately as admin and owner flags. + * + * +------------------------------------+ + * pflags: |XXXXXXXX| SF |XXXXXXXX| UF | + * +------------------------------------+ + */ + catInfo.nodeData.cnd_adminFlags = (pflags >> 16) & 0x00FF; + catInfo.nodeData.cnd_ownerFlags = pflags & 0x00FF; + catInfo.nodeData.cnd_mode = catmode; + } + + /* Create a vnode for the object just created: */ + forkType = (catInfo.nodeData.cnd_type == kCatalogFolderNode) ? kDirectory : kDataFork; + retval = hfs_vcreate(vcb, &catInfo, forkType, &tvp); + + CLEAN_CATALOGDATA(&catInfo.nodeData); /* Should do nothing */ + + if (retval) goto bad1; /* from hfs_vcreate() */ + + /* flush out pflags, mode, gid, uid and rdev */ + tv = time; + if (vcb->vcbSigWord == kHFSPlusSigWord) { + hp = VTOH(tvp); + /* reset mode and v_type in case it was BLK/CHR */ + hp->h_meta->h_mode = mode; + tvp->v_type = IFTOVT(mode); + hp->h_meta->h_metaflags &= ~IN_UNSETACCESS; + hp->h_nodeflags |= IN_ACCESS | IN_CHANGE | IN_UPDATE; + if ((retval = VOP_UPDATE(tvp, &tv, &tv, 1))) + goto bad2; + } + + VTOH(dvp)->h_nodeflags |= IN_CHANGE | IN_UPDATE; + if ((retval = VOP_UPDATE(dvp, &tv, &tv, 1))) + goto bad2; + + if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + }; + vput(dvp); + if (UBCINFOMISSING(tvp) || UBCINFORECLAIMED(tvp)) + ubc_info_init(tvp); + + *vpp = tvp; + return (0); + +bad2: + /* + * Write retval occurred trying to update the node + * or the directory so must deallocate the node. + */ + /* XXX SER In the future maybe set *vpp to 0xdeadbeef for testing */ + vput(tvp); + +bad1: + if (hasmetalock) { + /* unlock catalog b-tree */ + hasmetalock = 0; + (void) hfs_metafilelocking(VTOHFS(dvp), + kHFSCatalogFileID, LK_RELEASE, p); + } + if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + }; + vput(dvp); + + return (retval); +} + + +#if DBG_VOP_TEST_LOCKS + +/* XXX SER Add passing in the flags...might not be a serious error if locked */ + +void DbgVopTest( int maxSlots, + int retval, + VopDbgStoreRec *VopDbgStore, + char *funcname) +{ + int index; + + for (index = 0; index < maxSlots; index++) + { + if (VopDbgStore[index].id != index) { + DEBUG_BREAK_MSG(("%s: DBG_VOP_LOCK: invalid id field (%d) in target entry (#%d).\n", funcname, VopDbgStore[index].id, index)); + }; + + if ((VopDbgStore[index].vp != NULL) && + ((VopDbgStore[index].vp->v_data==NULL) || (VTOH(VopDbgStore[index].vp)->h_valid != HFS_VNODE_MAGIC))) + continue; + + if (VopDbgStore[index].vp != NULL) + debug_check_vnode(VopDbgStore[index].vp, 0); + + switch (VopDbgStore[index].inState) + { + case VOPDBG_IGNORE: + case VOPDBG_SAME: + /* Do Nothing !!! */ + break; + case VOPDBG_LOCKED: + case VOPDBG_UNLOCKED: + case VOPDBG_LOCKNOTNIL: + { + if (VopDbgStore[index].vp == NULL && (VopDbgStore[index].inState != VOPDBG_LOCKNOTNIL)) { + DBG_ERR (("%s: InState check: Null vnode ptr in entry #%d\n", funcname, index)); + } else if (VopDbgStore[index].vp != NULL) { + switch (VopDbgStore[index].inState) + { + case VOPDBG_LOCKED: + case VOPDBG_LOCKNOTNIL: + if (VopDbgStore[index].inValue == 0) + { + DBG_ERR (("%s: Entry: not LOCKED:", funcname)); + DBG_VOP_PRINT_VNODE_INFO(VopDbgStore[index].vp); + DBG_ERR (("\n")); + } + break; + case VOPDBG_UNLOCKED: + if (VopDbgStore[index].inValue != 0) + { + DBG_ERR (("%s: Entry: not UNLOCKED:", funcname)); + DBG_VOP_PRINT_VNODE_INFO(VopDbgStore[index].vp); + DBG_ERR (("\n")); + } + break; + } + } + break; + } + default: + DBG_ERR (("%s: DBG_VOP_LOCK on entry: bad lock test value: %d\n", funcname, VopDbgStore[index].errState)); + } + + + if (retval != 0) + { + switch (VopDbgStore[index].errState) + { + case VOPDBG_IGNORE: + /* Do Nothing !!! */ + break; + case VOPDBG_LOCKED: + case VOPDBG_UNLOCKED: + case VOPDBG_SAME: + { + if (VopDbgStore[index].vp == NULL) { + DBG_ERR (("%s: ErrState check: Null vnode ptr in entry #%d\n", funcname, index)); + } else { + VopDbgStore[index].outValue = lockstatus(&VTOH(VopDbgStore[index].vp)->h_lock); + switch (VopDbgStore[index].errState) + { + case VOPDBG_LOCKED: + if (VopDbgStore[index].outValue == 0) + { + DBG_ERR (("%s: Error: not LOCKED:", funcname)); + DBG_VOP_PRINT_VNODE_INFO(VopDbgStore[index].vp); + DBG_ERR(("\n")); + } + break; + case VOPDBG_UNLOCKED: + if (VopDbgStore[index].outValue != 0) + { + DBG_ERR (("%s: Error: not UNLOCKED:", funcname)); + DBG_VOP_PRINT_VNODE_INFO(VopDbgStore[index].vp); + DBG_ERR(("\n")); + } + break; + case VOPDBG_SAME: + if (VopDbgStore[index].outValue != VopDbgStore[index].inValue) + DBG_ERR (("%s: Error: In/Out locks are DIFFERENT: 0x%x, inis %d and out is %d\n", funcname, (u_int)VopDbgStore[index].vp, VopDbgStore[index].inValue, VopDbgStore[index].outValue)); + break; + } + } + break; + } + case VOPDBG_LOCKNOTNIL: + if (VopDbgStore[index].vp != NULL) { + VopDbgStore[index].outValue = lockstatus(&VTOH(VopDbgStore[index].vp)->h_lock); + if (VopDbgStore[index].outValue == 0) + DBG_ERR (("%s: Error: Not LOCKED: 0x%x\n", funcname, (u_int)VopDbgStore[index].vp)); + } + break; + default: + DBG_ERR (("%s: Error: bad lock test value: %d\n", funcname, VopDbgStore[index].errState)); + } + } + else + { + switch (VopDbgStore[index].outState) + { + case VOPDBG_IGNORE: + /* Do Nothing !!! */ + break; + case VOPDBG_LOCKED: + case VOPDBG_UNLOCKED: + case VOPDBG_SAME: + if (VopDbgStore[index].vp == NULL) { + DBG_ERR (("%s: OutState: Null vnode ptr in entry #%d\n", funcname, index)); + }; + if (VopDbgStore[index].vp != NULL) + { + VopDbgStore[index].outValue = lockstatus(&VTOH(VopDbgStore[index].vp)->h_lock); + switch (VopDbgStore[index].outState) + { + case VOPDBG_LOCKED: + if (VopDbgStore[index].outValue == 0) + { + DBG_ERR (("%s: Out: not LOCKED:", funcname)); + DBG_VOP_PRINT_VNODE_INFO(VopDbgStore[index].vp); + DBG_ERR (("\n")); + } + break; + case VOPDBG_UNLOCKED: + if (VopDbgStore[index].outValue != 0) + { + DBG_ERR (("%s: Out: not UNLOCKED:", funcname)); + DBG_VOP_PRINT_VNODE_INFO(VopDbgStore[index].vp); + DBG_ERR (("\n")); + } + break; + case VOPDBG_SAME: + if (VopDbgStore[index].outValue != VopDbgStore[index].inValue) + DBG_ERR (("%s: Out: In/Out locks are DIFFERENT: 0x%x, in is %d and out is %d\n", funcname, (u_int)VopDbgStore[index].vp, VopDbgStore[index].inValue, VopDbgStore[index].outValue)); + break; + } + } + break; + case VOPDBG_LOCKNOTNIL: + if (VopDbgStore[index].vp != NULL) { + if (&VTOH(VopDbgStore[index].vp)->h_lock == NULL) { + DBG_ERR (("%s: DBG_VOP_LOCK on out: Null lock on vnode 0x%x\n", funcname, (u_int)VopDbgStore[index].vp)); + } + else { + VopDbgStore[index].outValue = lockstatus(&VTOH(VopDbgStore[index].vp)->h_lock); + if (VopDbgStore[index].outValue == 0) + { + DBG_ERR (("%s: DBG_VOP_LOCK on out: Should be LOCKED:", funcname)); + DBG_VOP_PRINT_VNODE_INFO(VopDbgStore[index].vp); DBG_ERR (("\n")); + } + } + } + break; + default: + DBG_ERR (("%s: DBG_VOP_LOCK on out: bad lock test value: %d\n", funcname, VopDbgStore[index].outState)); + } + } + + VopDbgStore[index].id = -1; /* Invalidate the entry to allow panic-free re-use */ + } +} + +#endif /* DBG_VOP_TEST_LOCKS */ + +/* + * Wrapper for special device reads + */ +int +hfsspec_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + + /* + * Set access flag. + */ + VTOH(ap->a_vp)->h_nodeflags |= IN_ACCESS; + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap)); +} + +/* + * Wrapper for special device writes + */ +int +hfsspec_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + + /* + * Set update and change flags. + */ + VTOH(ap->a_vp)->h_nodeflags |= IN_CHANGE | IN_UPDATE; + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap)); +} + +/* + * Wrapper for special device close + * + * Update the times on the hfsnode then do device close. + */ +int +hfsspec_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + + simple_lock(&vp->v_interlock); + if (ap->a_vp->v_usecount > 1) + HFSTIMES(hp, &time, &time); + simple_unlock(&vp->v_interlock); + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap)); +} + +#if FIFO +/* + * Wrapper for fifo reads + */ +int +hfsfifo_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + + /* + * Set access flag. + */ + VTOH(ap->a_vp)->h_nodeflags |= IN_ACCESS; + return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_read), ap)); +} + +/* + * Wrapper for fifo writes + */ +int +hfsfifo_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + + /* + * Set update and change flags. + */ + VTOH(ap->a_vp)->h_nodeflags |= IN_CHANGE | IN_UPDATE; + return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_write), ap)); +} + +/* + * Wrapper for fifo close + * + * Update the times on the hfsnode then do device close. + */ +int +hfsfifo_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + struct vnode *vp = ap->a_vp; + struct hfsnode *hp = VTOH(vp); + + simple_lock(&vp->v_interlock); + if (ap->a_vp->v_usecount > 1) + HFSTIMES(hp, &time, &time); + simple_unlock(&vp->v_interlock); + return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap)); +} +#endif /* FIFO */ + + +/***************************************************************************** +* +* VOP Tables +* +*****************************************************************************/ + +#define VOPFUNC int (*)(void *) + +struct vnodeopv_entry_desc hfs_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)hfs_cache_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)hfs_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)hfs_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)hfs_open }, /* open */ + { &vop_close_desc, (VOPFUNC)hfs_close }, /* close */ + { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)hfs_read }, /* read */ + { &vop_write_desc, (VOPFUNC)hfs_write }, /* write */ + { &vop_ioctl_desc, (VOPFUNC)hfs_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)hfs_select }, /* select */ + { &vop_exchange_desc, (VOPFUNC)hfs_exchange }, /* exchange */ + { &vop_mmap_desc, (VOPFUNC)hfs_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)hfs_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)hfs_remove }, /* remove */ +#if HFS_HARDLINKS + { &vop_link_desc, (VOPFUNC)hfs_link }, /* link */ +#else + { &vop_link_desc, (VOPFUNC)err_link }, /* link (NOT SUPPORTED) */ +#endif + { &vop_rename_desc, (VOPFUNC)hfs_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)hfs_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)hfs_rmdir }, /* rmdir */ + { &vop_mkcomplex_desc, (VOPFUNC)hfs_mkcomplex }, /* mkcomplex */ + { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, /* getattrlist */ + { &vop_setattrlist_desc, (VOPFUNC)hfs_setattrlist }, /* setattrlist */ + { &vop_symlink_desc, (VOPFUNC)hfs_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)hfs_readdir }, /* readdir */ + { &vop_readdirattr_desc, (VOPFUNC)hfs_readdirattr }, /* readdirattr */ + { &vop_readlink_desc, (VOPFUNC)hfs_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)hfs_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)hfs_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)hfs_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)hfs_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)hfs_advlock }, /* advlock */ + { &vop_reallocblks_desc, (VOPFUNC)hfs_reallocblks }, /* reallocblks */ + { &vop_truncate_desc, (VOPFUNC)hfs_truncate }, /* truncate */ + { &vop_allocate_desc, (VOPFUNC)hfs_allocate }, /* allocate */ + { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */ + { &vop_searchfs_desc, (VOPFUNC)hfs_search }, /* search fs */ + { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, /* bwrite */ + { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* pagein */ + { &vop_pageout_desc,(VOPFUNC) hfs_pageout }, /* pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ + { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */ + { NULL, (VOPFUNC)NULL } +}; + +struct vnodeopv_desc hfs_vnodeop_opv_desc = +{ &hfs_vnodeop_p, hfs_vnodeop_entries }; + +int (**hfs_specop_p)(void *); +struct vnodeopv_entry_desc hfs_specop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)spec_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)spec_open }, /* open */ + { &vop_close_desc, (VOPFUNC)hfsspec_close }, /* close */ + { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)hfsspec_read }, /* read */ + { &vop_write_desc, (VOPFUNC)hfsspec_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)spec_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)spec_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */ + { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, + { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */ + { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ + { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc hfs_specop_opv_desc = + { &hfs_specop_p, hfs_specop_entries }; + +#if FIFO +int (**hfs_fifoop_p)(void *); +struct vnodeopv_entry_desc hfs_fifoop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */ + { &vop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */ + { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */ + { &vop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */ + { &vop_reallocblks_desc, (VOPFUNC)fifo_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, + { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */ + { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc hfs_fifoop_opv_desc = + { &hfs_fifoop_p, hfs_fifoop_entries }; +#endif /* FIFO */ + + + diff --git a/bsd/hfs/hfscommon/BTree/BTree.c b/bsd/hfs/hfscommon/BTree/BTree.c new file mode 100644 index 000000000..48749f50d --- /dev/null +++ b/bsd/hfs/hfscommon/BTree/BTree.c @@ -0,0 +1,1828 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTree.c + + Contains: Implementation of public interface routines for B-tree manager. + + Version: HFS Plus 1.0 + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: File Systems + + Writers: + + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + 9/22/99 ser Added routines BTGetLastSync and BTSetLastSync + 6/1/99 djb Sync up with Mac OS 8.6. + 6/30/98 djb In BTOpenPath make sure nodes are contiguous on disk (radar #2249539). + 4/15/98 djb In BTOpenPath need to clear nodeRec.buffer if GetBlockProc fails. + 4/11/98 djb Add RequireFileLock checking to all external entry points. + + 03/23/98 djb In BTOpenPath use kTrashBlock option when releasing the header so + that we get a full node when we call GetNode. + + 12/12/97 djb Radar #2202682, BTIterateRecord with kBTreeCurrentRecord was not + checking if we had a record and could call BlockMove with an + uninitialize source pointer (causing a bus error). + 10/24/97 msd In BTIterateRecord, when moving to the previous or next record + and we have to move to another node, see if we need to release + the node about to be "shifted out" (opposite sibling of the + direction we need to move). + 7/25/97 DSH BTSearchRecord now takes a heuristicHint, nodeNum, and tries it + before calling SearchBTree + 7/24/97 djb GetBlockProc now take a file refnum instead of an FCB ptr. + 7/22/97 djb Move trace points from BTreeWrapper.c to here. + 7/21/97 djb LogEndTime now takes an error code. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 5/19/97 djb Add summary traces to BTIterateRecord. + 4/23/97 djb first checked in + + 2/19/97 djb Enable variable sized index keys for HFS+ volumes. Added node + cache to support nodes larger than 512 bytes. + 1/27/97 djb Calls to InsertTree and DeleteTree are now recursive (to support + variable sized index keys). + 1/13/97 djb Added support for getting current record to BTIterateRecord. + 1/6/97 djb Initialize "BigKeys" attribute in BTOpen. + 1/3/97 djb Added support for large keys. + 12/23/96 djb On exit map fsBTEmptyErr and fsBTEndOfIterationErr to + fsBTRecordNotFoundErr. + 12/19/96 djb first checked in + + History applicable to original Scarecrow Design: + + <13> 10/25/96 ser Changing for new VFPI + <12> 10/18/96 ser Converting over VFPI changes + <11> 9/17/96 dkh More BTree statistics. Modified hint checks to not bail out when + an error is returned from GetNode. + <10> 9/16/96 dkh Revised BTree statistics. + <9> 8/23/96 dkh Remove checks for multiple paths to BTree file. Need to add + equivalent mechanism later. + <8> 6/20/96 dkh Radar #1358740. Switch from using Pools to debug MemAllocators. + <7> 3/14/96 jev Fix BTreeSetRecord, recordFound was not set for the case of a + simple replace causing the leafRecords count to get bumped even + though we didn't have to add a record. + <6> 3/1/96 prp Fix lint problems. Bug in BTSetRecord that does not initialize + recordFound. + <5> 1/22/96 dkh Add #include Memory.h + <4> 1/10/96 msd Use the real function names from Math64.i. + <3> 1/4/96 jev Fix BTItererateRecord for the condition when the iterator + position routine does not find the record and we are looking for + the next record. In such a case, if the node's forrward link is + non-zero, we have to keep iterating next and not return + fsBTEndOfIterationErr error. + <2> 12/7/95 dkh D10E2 build. Changed usage of Ref data type to LogicalAddress. + <1> 10/18/95 rst Moved from Scarecrow project. + + <24> 7/18/95 mbb Change MoveData & ClearBytes to BlockMoveData & BlockZero. + <23> 1/31/95 prp GetBlockProc interface uses a 64 bit node number. + <22> 1/12/95 wjk Adopt Model FileSystem changes in D5. + <21> 11/16/94 prp Add IsItAHint routine and use it whenever hint's node number was + used for testing. + <20> 11/10/94 prp BTGetInfo name collides with the same name in FileManagerPriv.i. + Change it to BTGetInformation. + <19> 9/30/94 prp Get in sync with D2 interface changes. + <18> 7/22/94 wjk Convert to the new set of header files. + <17> 12/9/93 wjk Cleanup usage of char, Byte, int8, UInt8, etc. + <16> 12/2/93 wjk Move from Makefiles to BuildFiles. Fit into the ModernOS and + NRCmds environments. + <15> 11/30/93 wjk Move from Makefiles to BuildFiles. Fit into the ModernOS and + NRCmds environments. + <14> 9/30/93 gs Rename E_NoGetNodeProc and E_NoReleaseNodeProc to + E_NoXxxxBlockProc. + <13> 8/31/93 prp Use Set64U instead of Set64. + <12> 8/16/93 prp In BTSearchRecord, if the input hint found the node and record, + set the local nodeNum variable correctly so that the resultant + iterator gets set correctly. + <11> 7/1/93 gs Fix bug in BTIterateRecord related to kBTreePrevRecord + operation. + <10> 6/2/93 gs Update for changes to FSErrors.h and add some comments. + <9> 5/24/93 gs Fix bug in BTInsert/Set/ReplaceRecord which didn't set node hint + properly in some cases. + <8> 5/24/93 gs Do NOT map fsBTEmptyErr to fsBTRecordNotFoundErr in BTSearchRecord. + <7> 5/24/93 gs Rename BTFlush to BTFlushPath. + <6> 5/21/93 gs Add hint optimization to Set/Replace routines. + <5> 5/10/93 gs Remove Panic from BTInitialize for small logicalEOF. Implement + Insert, Set, Replace, and Delete. + <4> 3/23/93 gs Finish BTInitialize. + <3> 2/8/93 gs Implement BTSearchRecord and BTIterateRecord. + <2> 12/8/92 gs Implement Open and Close routines. + <1> 11/15/92 gs first checked in + +*/ + +#include "../headers/BTreesPrivate.h" + +#include "../headers/HFSInstrumentation.h" + +/* + * The amount that the BTree header leaf count can be wrong before we assume + * it is in an infinite loop. + */ +#define kNumLeafRecSlack 10 + +//////////////////////////////////// Globals //////////////////////////////////// + + +/////////////////////////// BTree Module Entry Points /////////////////////////// + + + +/*------------------------------------------------------------------------------- +Routine: BTOpenPath - Open a file for access as a B*Tree. + +Function: Create BTree control block for a file, if necessary. Validates the + file to be sure it looks like a BTree file. + + +Input: filePtr - pointer to file to open as a B-tree + keyCompareProc - pointer to client's KeyCompare function + getBlockProc - pointer to client's GetBlock function + releaseBlockProc - pointer to client's ReleaseBlock function + setEndOfForkProc - pointer to client's SetEOF function + +Result: noErr - success + paramErr - required ptr was nil + fsBTInvalidFileErr - + memFullErr - + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTOpenPath (FCB *filePtr, + KeyCompareProcPtr keyCompareProc, + GetBlockProcPtr getBlockProc, + ReleaseBlockProcPtr releaseBlockProc, + SetEndOfForkProcPtr setEndOfForkProc, + SetBlockSizeProcPtr setBlockSizeProc ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTHeaderRec *header; + NodeRec nodeRec; + + LogStartTime(kTraceOpenBTree); + + ////////////////////// Preliminary Error Checking /////////////////////////// + + if ( filePtr == nil || + getBlockProc == nil || + releaseBlockProc == nil || + setEndOfForkProc == nil || + setBlockSizeProc == nil ) + { + return paramErr; + } + + if ( filePtr->fcbBTCBPtr != nil ) // already has a BTreeCB + return noErr; + + // is file large enough to contain header node? + if ( filePtr->fcbEOF < kMinNodeSize ) + return fsBTInvalidFileErr; //€€ or E_BadHeader? + + + //////////////////////// Allocate Control Block ///////////////////////////// + + btreePtr = (BTreeControlBlock*) NewPtrSysClear( sizeof( BTreeControlBlock ) ); + if (btreePtr == nil) + { + Panic ("\pBTOpen: no memory for btreePtr."); + return memFullErr; + } + + btreePtr->getBlockProc = getBlockProc; + btreePtr->releaseBlockProc = releaseBlockProc; + btreePtr->setEndOfForkProc = setEndOfForkProc; + btreePtr->keyCompareProc = keyCompareProc; + + /////////////////////////// Read Header Node //////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + nodeRec.blockSize = kMinNodeSize; + btreePtr->fileRefNum = GetFileRefNumFromFCB(filePtr); + filePtr->fcbBTCBPtr = (Ptr) btreePtr; // attach btree cb to file + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + // it is now safe to call M_ExitOnError (err) + + err = setBlockSizeProc (btreePtr->fileRefNum, kMinNodeSize, 1); + M_ExitOnError (err); + + + err = getBlockProc (btreePtr->fileRefNum, + kHeaderNodeNum, + kGetBlock, + &nodeRec ); + if (err != noErr) + { + nodeRec.buffer = nil; + nodeRec.blockHeader = nil; + Panic("\pBTOpen: getNodeProc returned error getting header node."); + goto ErrorExit; + } + + header = (BTHeaderRec*) ((u_long)nodeRec.buffer + sizeof(BTNodeDescriptor)); + + + ///////////////////////////// verify header ///////////////////////////////// + + err = VerifyHeader (filePtr, header); + M_ExitOnError (err); + + + ///////////////////// Initalize fields from header ////////////////////////// + + PanicIf ( (FCBTOVCB(filePtr)->vcbSigWord != 0x4244) && (header->nodeSize == 512), "\p BTOpenPath: wrong node size for HFS+ volume!"); // 0x4244 = 'BD' + + btreePtr->treeDepth = header->treeDepth; + btreePtr->rootNode = header->rootNode; + btreePtr->leafRecords = header->leafRecords; + btreePtr->firstLeafNode = header->firstLeafNode; + btreePtr->lastLeafNode = header->lastLeafNode; + btreePtr->nodeSize = header->nodeSize; + btreePtr->maxKeyLength = header->maxKeyLength; + btreePtr->totalNodes = header->totalNodes; + btreePtr->freeNodes = header->freeNodes; + // ignore header->clumpSize; //€€ rename this field? + btreePtr->btreeType = header->btreeType; + + btreePtr->attributes = header->attributes; + + if ( btreePtr->maxKeyLength > 40 ) + btreePtr->attributes |= (kBTBigKeysMask + kBTVariableIndexKeysMask); //€€ we need a way to save these attributes + + /////////////////////// Initialize dynamic fields /////////////////////////// + + btreePtr->version = kBTreeVersion; + btreePtr->flags = 0; + btreePtr->writeCount = 1; + + btreePtr->numGetNodes = 1; // for earlier call to getNodeProc + + /////////////////////////// Check Header Node /////////////////////////////// + + //€€ set kBadClose attribute bit, and UpdateNode + + // if nodeSize is 512 then we don't need to release, just CheckNode + + if ( btreePtr->nodeSize == kMinNodeSize ) + { + err = CheckNode (btreePtr, nodeRec.buffer); + if (err) + VTOVCB(btreePtr->fileRefNum)->vcbFlags |= kHFS_DamagedVolume; + M_ExitOnError (err); + } + else + { + err = setBlockSizeProc (btreePtr->fileRefNum, btreePtr->nodeSize, 32); //€€ we should try and get this down to 8 + M_ExitOnError (err); + + /* + * Need to use kTrashBlock option to force the + * buffer cache to read the entire node + */ + err = releaseBlockProc(btreePtr->fileRefNum, &nodeRec, kTrashBlock); + M_ExitOnError (err); + + err = GetNode (btreePtr, kHeaderNodeNum, &nodeRec ); // calls CheckNode... + M_ExitOnError (err); + } + + //€€ total nodes * node size <= LEOF? + + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + + /* + * Under Mac OS, b-tree nodes can be non-contiguous on disk when the + * allocation block size is smaller than the b-tree node size. + */ + if ( !NodesAreContiguous(FCBTOVCB(filePtr), filePtr, btreePtr->nodeSize) ) + return fsBTInvalidNodeErr; + + //////////////////////////////// Success //////////////////////////////////// + + //€€ align LEOF to multiple of node size? - just on close + + LogEndTime(kTraceOpenBTree, noErr); + + return noErr; + + + /////////////////////// Error - Clean up and Exit /////////////////////////// + +ErrorExit: + + filePtr->fcbBTCBPtr = nil; + (void) ReleaseNode (btreePtr, &nodeRec); + DisposePtr( (Ptr) btreePtr ); + + LogEndTime(kTraceOpenBTree, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTClosePath - Flush BTree Header and Deallocate Memory for BTree. + +Function: Flush the BTreeControlBlock fields to header node, and delete BTree control + block and key descriptor associated with the file if filePtr is last + path of type kBTreeType ('btre'). + + +Input: filePtr - pointer to file to delete BTree control block for. + +Result: noErr - success + fsBTInvalidFileErr - + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTClosePath (FCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + + LogStartTime(kTraceCloseBTree); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + if (btreePtr == nil) + return fsBTInvalidFileErr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + ////////////////////// Check for other BTree Paths ////////////////////////// + + btreePtr->attributes &= ~kBTBadCloseMask; // clear "bad close" attribute bit + err = UpdateHeader (btreePtr, true); + M_ExitOnError (err); + + DisposePtr( (Ptr) btreePtr ); + filePtr->fcbBTCBPtr = nil; + + LogEndTime(kTraceCloseBTree, noErr); + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + LogEndTime(kTraceCloseBTree, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTSearchRecord - Search BTree for a record with a matching key. + +Function: Search for position in B*Tree indicated by searchKey. If a valid node hint + is provided, it will be searched first, then SearchTree will be called. + If a BTreeIterator is provided, it will be set to the position found as + a result of the search. If a record exists at that position, and a BufferDescriptor + is supplied, the record will be copied to the buffer (as much as will fit), + and recordLen will be set to the length of the record. + + If an error other than fsBTRecordNotFoundErr occurs, the BTreeIterator, if any, + is invalidated, and recordLen is set to 0. + + +Input: pathPtr - pointer to path for BTree file. + searchKey - pointer to search key to match. + hintPtr - pointer to hint (may be nil) + +Output: record - pointer to BufferDescriptor containing record + recordLen - length of data at recordPtr + iterator - pointer to BTreeIterator indicating position result of search + +Result: noErr - success, record contains copy of record found + fsBTRecordNotFoundErr - record was not found, no data copied + fsBTInvalidFileErr - no BTreeControlBlock is allocated for the fork + fsBTInvalidKeyLengthErr - + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTSearchRecord (FCB *filePtr, + BTreeIterator *searchIterator, + UInt32 heuristicHint, + FSBufferDescriptor *record, + UInt16 *recordLen, + BTreeIterator *resultIterator ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + UInt32 nodeNum; + BlockDescriptor node; + UInt16 index; + BTreeKeyPtr keyPtr; + RecordPtr recordPtr; + UInt16 len; + Boolean foundRecord; + Boolean validHint; + + + LogStartTime(kTraceSearchBTree); + + if (filePtr == nil) return paramErr; + if (searchIterator == nil) return paramErr; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) return fsBTInvalidFileErr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + foundRecord = false; + + ////////////////////////////// Take A Hint ////////////////////////////////// + + err = IsItAHint (btreePtr, searchIterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + nodeNum = searchIterator->hint.nodeNum; + + err = GetNode (btreePtr, nodeNum, &node); + if( err == noErr ) + { + if ( ((BTNodeDescriptor*) node.buffer)->kind == kBTLeafNode && + ((BTNodeDescriptor*) node.buffer)->numRecords > 0 ) + { + foundRecord = SearchNode (btreePtr, node.buffer, &searchIterator->key, &index); + + //€€ if !foundRecord, we could still skip tree search if ( 0 < index < numRecords ) + } + + if (foundRecord == false) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + } + else + { + ++btreePtr->numValidHints; + } + } + + if( foundRecord == false ) + (void) BTInvalidateHint( searchIterator ); + } + + ////////////////////////////// Try the heuristicHint ////////////////////////////////// + + if ( (foundRecord == false) && (heuristicHint != kInvalidMRUCacheKey) && (nodeNum != heuristicHint) ) + { + LogStartTime(kHeuristicHint); + nodeNum = heuristicHint; + + err = GetNode (btreePtr, nodeNum, &node); + if( err == noErr ) + { + if ( ((BTNodeDescriptor*) node.buffer)->kind == kBTLeafNode && + ((BTNodeDescriptor*) node.buffer)->numRecords > 0 ) + { + foundRecord = SearchNode (btreePtr, node.buffer, &searchIterator->key, &index); + } + + if (foundRecord == false) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + } + } + LogEndTime(kHeuristicHint, (foundRecord == false)); + } + + //////////////////////////// Search The Tree //////////////////////////////// + + if (foundRecord == false) + { + err = SearchTree ( btreePtr, &searchIterator->key, treePathTable, &nodeNum, &node, &index); + switch (err) + { + case noErr: foundRecord = true; break; + case fsBTRecordNotFoundErr: break; + default: goto ErrorExit; + } + } + + + //////////////////////////// Get the Record ///////////////////////////////// + + if (foundRecord == true) + { + //XXX Should check for errors! Or BlockMove could choke on recordPtr!!! + GetRecordByIndex (btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + + if (recordLen != nil) *recordLen = len; + + if (record != nil) + { + ByteCount recordSize; + + recordSize = record->itemCount * record->itemSize; + + if (len > recordSize) len = recordSize; + + BlockMoveData (recordPtr, record->bufferAddress, len); + } + } + + + /////////////////////// Success - Update Iterator /////////////////////////// + + if (resultIterator != nil) + { + resultIterator->hint.writeCount = btreePtr->writeCount; + resultIterator->hint.nodeNum = nodeNum; + resultIterator->hint.index = index; +#if DEBUG_BUILD + resultIterator->hint.reserved1 = 0; + resultIterator->hint.reserved2 = 0; + resultIterator->version = 0; + resultIterator->reserved = 0; +#endif + // copy the key in the BTree when found rather than searchIterator->key to get proper case/diacriticals + if (foundRecord == true) + BlockMoveData ((Ptr)keyPtr, (Ptr)&resultIterator->key, CalcKeySize(btreePtr, keyPtr)); + else + BlockMoveData ((Ptr)&searchIterator->key, (Ptr)&resultIterator->key, CalcKeySize(btreePtr, &searchIterator->key)); + } + + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + LogEndTime(kTraceSearchBTree, (foundRecord == false)); + + if (foundRecord == false) return fsBTRecordNotFoundErr; + else return noErr; + + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + if (recordLen != nil) + *recordLen = 0; + + if (resultIterator != nil) + { + resultIterator->hint.writeCount = 0; + resultIterator->hint.nodeNum = 0; + resultIterator->hint.index = 0; + resultIterator->hint.reserved1 = 0; + resultIterator->hint.reserved2 = 0; + + resultIterator->version = 0; + resultIterator->reserved = 0; + resultIterator->key.length16 = 0; // zero out two bytes to cover both types of keys + } + + if ( err == fsBTEmptyErr ) + err = fsBTRecordNotFoundErr; + + LogEndTime(kTraceSearchBTree, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTIterateRecord - Find the first, next, previous, or last record. + +Function: Find the first, next, previous, or last record in the BTree + +Input: pathPtr - pointer to path iterate records for. + operation - iteration operation (first,next,prev,last) + iterator - pointer to iterator indicating start position + +Output: iterator - iterator is updated to indicate new position + newKeyPtr - pointer to buffer to copy key found by iteration + record - pointer to buffer to copy record found by iteration + recordLen - length of record + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTIterateRecord (FCB *filePtr, + BTreeIterationOperation operation, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 *recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTreeKeyPtr keyPtr; + RecordPtr recordPtr; + UInt16 len; + + Boolean foundRecord; + UInt32 nodeNum; + + BlockDescriptor left, node, right; + UInt16 index; + + + LogStartTime(kTraceGetBTreeRecord); + + ////////////////////////// Priliminary Checks /////////////////////////////// + + left.buffer = nil; + right.buffer = nil; + node.buffer = nil; + + + if (filePtr == nil) + { + return paramErr; + } + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + { + return fsBTInvalidFileErr; //€€ handle properly + } + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + if ((operation != kBTreeFirstRecord) && + (operation != kBTreeNextRecord) && + (operation != kBTreeCurrentRecord) && + (operation != kBTreePrevRecord) && + (operation != kBTreeLastRecord)) + { + err = fsInvalidIterationMovmentErr; + goto ErrorExit; + } + + /////////////////////// Find First or Last Record /////////////////////////// + + if ((operation == kBTreeFirstRecord) || (operation == kBTreeLastRecord)) + { + if (operation == kBTreeFirstRecord) nodeNum = btreePtr->firstLeafNode; + else nodeNum = btreePtr->lastLeafNode; + + if (nodeNum == 0) + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + err = GetNode (btreePtr, nodeNum, &node); + M_ExitOnError (err); + + if ( ((NodeDescPtr) node.buffer)->kind != kBTLeafNode || + ((NodeDescPtr) node.buffer)->numRecords <= 0 ) + { + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + err = fsBTInvalidNodeErr; + MARK_VOLUMEDAMAGED(filePtr); + goto ErrorExit; + } + + if (operation == kBTreeFirstRecord) index = 0; + else index = ((BTNodeDescriptor*) node.buffer)->numRecords - 1; + + goto CopyData; //€€ is there a cleaner way? + } + + + //////////////////////// Find Iterator Position ///////////////////////////// + + err = FindIteratorPosition (btreePtr, iterator, + &left, &node, &right, &nodeNum, &index, &foundRecord); + M_ExitOnError (err); + + + ///////////////////// Find Next Or Previous Record ////////////////////////// + + if (operation == kBTreePrevRecord) + { + if (index > 0) + { + --index; + } + else + { + if (left.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->bLink; + if ( nodeNum > 0) + { + err = GetNode (btreePtr, nodeNum, &left); + M_ExitOnError (err); + } else { + err = fsBTStartOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "right", we'd better release it if needed + if (right.buffer != nil) { + err = ReleaseNode(btreePtr, &right); + M_ExitOnError(err); + } + right = node; + node = left; + left.buffer = nil; + index = ((NodeDescPtr) node.buffer)->numRecords -1; + } + } + else if (operation == kBTreeNextRecord) + { + if ((foundRecord != true) && + (((NodeDescPtr) node.buffer)->fLink == 0) && + (index == ((NodeDescPtr) node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + + // we did not find the record but the index is already positioned correctly + if ((foundRecord == false) && (index != ((NodeDescPtr) node.buffer)->numRecords)) + goto CopyData; + + // we found the record OR we have to look in the next node + if (index < ((NodeDescPtr) node.buffer)->numRecords -1) + { + ++index; + } + else + { + if (right.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->fLink; + if ( nodeNum > 0) + { + err = GetNode (btreePtr, nodeNum, &right); + M_ExitOnError (err); + } else { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "left", we'd better release it if needed + if (left.buffer != nil) { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + left = node; + node = right; + right.buffer = nil; + index = 0; + } + } + else // operation == kBTreeCurrentRecord + { + // make sure we have something... + if ((foundRecord != true) && + (index >= ((NodeDescPtr) node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + + //////////////////// Copy Record And Update Iterator //////////////////////// + +CopyData: + + // added check for errors + err = GetRecordByIndex (btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + M_ExitOnError (err); + + if (recordLen != nil) + *recordLen = len; + + if (record != nil) + { + ByteCount recordSize; + + recordSize = record->itemCount * record->itemSize; + + if (len > recordSize) len = recordSize; + + BlockMoveData (recordPtr, record->bufferAddress, len); + } + + if (iterator != nil) // first & last do not require iterator + { + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = nodeNum; + iterator->hint.index = index; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + iterator->version = 0; + iterator->reserved = 0; + + /* SER + * Check for infinite loops by making sure we do not + * process more leaf records, than can possibly be (or the BTree header + * is seriously damaged)....a brute force method. + */ + if ((operation == kBTreeFirstRecord) || (operation == kBTreeLastRecord)) + iterator->hitCount = 1; + else if (operation != kBTreeCurrentRecord) + iterator->hitCount += 1; + /* Always use the highest max, in case the grows while iterating */ + iterator->maxLeafRecs = max(btreePtr->leafRecords, iterator->maxLeafRecs); + +#if 0 + if (iterator->hitCount > iterator->maxLeafRecs + kNumLeafRecSlack) + { + err = fsBTInvalidNodeErr; + MARK_VOLUMEDAMAGED(filePtr); + goto ErrorExit; + } +#endif + + BlockMoveData ((Ptr)keyPtr, (Ptr)&iterator->key, CalcKeySize(btreePtr, keyPtr)); + } + + + ///////////////////////////// Release Nodes ///////////////////////////////// + + err = ReleaseNode (btreePtr, &node); + M_ExitOnError (err); + + if (left.buffer != nil) + { + err = ReleaseNode (btreePtr, &left); + M_ExitOnError (err); + } + + if (right.buffer != nil) + { + err = ReleaseNode (btreePtr, &right); + M_ExitOnError (err); + } + + LogEndTime(kTraceGetBTreeRecord, noErr); + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &left); + (void) ReleaseNode (btreePtr, &node); + (void) ReleaseNode (btreePtr, &right); + + if (recordLen != nil) + *recordLen = 0; + + if (iterator != nil) + { + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + iterator->version = 0; + iterator->reserved = 0; + iterator->key.length16 = 0; + } + + if ( err == fsBTEmptyErr || err == fsBTEndOfIterationErr ) + err = fsBTRecordNotFoundErr; + + LogEndTime(kTraceGetBTreeRecord, err); + + return err; +} + + +/*------------------------------------------------------------------------------- +Routine: BTIterateRecords + +Function: Find a series of records + +Input: filePtr - b-tree file + operation - iteration operation (first,next,prev,last) + iterator - pointer to iterator indicating start position + callBackProc - pointer to routince to process a record + callBackState - pointer to state data (used by callBackProc) + +Output: iterator - iterator is updated to indicate new position + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus +BTIterateRecords(FCB *filePtr, BTreeIterationOperation operation, BTreeIterator *iterator, + IterateCallBackProcPtr callBackProc, void * callBackState) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BTreeKeyPtr keyPtr; + RecordPtr recordPtr; + UInt16 len; + Boolean foundRecord; + UInt32 nodeNum; + BlockDescriptor left, node, right; + UInt16 index; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + left.buffer = nil; + right.buffer = nil; + node.buffer = nil; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + if ((operation != kBTreeFirstRecord) && + (operation != kBTreeNextRecord) && + (operation != kBTreeCurrentRecord) && + (operation != kBTreePrevRecord) && + (operation != kBTreeLastRecord)) + { + err = fsInvalidIterationMovmentErr; + goto ErrorExit; + } + + /////////////////////// Find First or Last Record /////////////////////////// + + if ((operation == kBTreeFirstRecord) || (operation == kBTreeLastRecord)) + { + if (operation == kBTreeFirstRecord) + nodeNum = btreePtr->firstLeafNode; + else + nodeNum = btreePtr->lastLeafNode; + + if (nodeNum == 0) + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + err = GetNode(btreePtr, nodeNum, &node); + M_ExitOnError(err); + + if ( ((NodeDescPtr)node.buffer)->kind != kBTLeafNode || + ((NodeDescPtr)node.buffer)->numRecords <= 0 ) + { + err = ReleaseNode(btreePtr, &node); + M_ExitOnError(err); + + err = fsBTInvalidNodeErr; + MARK_VOLUMEDAMAGED(filePtr); + goto ErrorExit; + } + + if (operation == kBTreeFirstRecord) + index = 0; + else + index = ((BTNodeDescriptor*) node.buffer)->numRecords - 1; + + goto ProcessData; + } + + //////////////////////// Find Iterator Position ///////////////////////////// + + err = FindIteratorPosition(btreePtr, iterator, &left, &node, &right, + &nodeNum, &index, &foundRecord); + M_ExitOnError(err); + + + ///////////////////// Find Next Or Previous Record ////////////////////////// + + if (operation == kBTreePrevRecord) + { + if (index > 0) + { + --index; + } + else + { + if (left.buffer == nil) + { + nodeNum = ((NodeDescPtr) node.buffer)->bLink; + if ( nodeNum > 0) + { + err = GetNode(btreePtr, nodeNum, &left); + M_ExitOnError(err); + } else { + err = fsBTStartOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "right", we'd better release it if needed + if (right.buffer != nil) { + err = ReleaseNode(btreePtr, &right); + M_ExitOnError(err); + } + right = node; + node = left; + left.buffer = nil; + index = ((NodeDescPtr) node.buffer)->numRecords -1; + } + } + else if (operation == kBTreeNextRecord) + { + if ((foundRecord != true) && + (((NodeDescPtr)node.buffer)->fLink == 0) && + (index == ((NodeDescPtr)node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + + // we did not find the record but the index is already positioned correctly + if ((foundRecord == false) && (index != ((NodeDescPtr)node.buffer)->numRecords)) + goto ProcessData; + + // we found the record OR we have to look in the next node + if (index < ((NodeDescPtr)node.buffer)->numRecords -1) + { + ++index; + } + else + { + if (right.buffer == nil) + { + nodeNum = ((NodeDescPtr)node.buffer)->fLink; + if ( nodeNum > 0) + { + err = GetNode(btreePtr, nodeNum, &right); + M_ExitOnError(err); + } else { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + // Before we stomp on "left", we'd better release it if needed + if (left.buffer != nil) { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + left = node; + node = right; + right.buffer = nil; + index = 0; + } + } + else // operation == kBTreeCurrentRecord + { + // make sure we have something... + if ((foundRecord != true) && + (index >= ((NodeDescPtr)node.buffer)->numRecords)) + { + err = fsBTEndOfIterationErr; + goto ErrorExit; + } + } + + //////////////////// Process Records Using Callback //////////////////////// + +ProcessData: + err = GetRecordByIndex(btreePtr, node.buffer, index, &keyPtr, &recordPtr, &len); + + while (err == 0) { + if (callBackProc(keyPtr, recordPtr, len, callBackState) == 0) + break; + + if ((index+1) < ((NodeDescPtr)node.buffer)->numRecords) { + ++index; + } else { + if (right.buffer == nil) + { + nodeNum = ((NodeDescPtr)node.buffer)->fLink; + if ( nodeNum > 0) + { + err = GetNode(btreePtr, nodeNum, &right); + M_ExitOnError(err); + } else { + err = fsBTEndOfIterationErr; + break; + } + } + // Before we stomp on "left", we'd better release it if needed + if (left.buffer != nil) { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + left = node; + node = right; + right.buffer = nil; + index = 0; + } + err = GetRecordByIndex(btreePtr, node.buffer, index, + &keyPtr, &recordPtr, &len); + } + + + ///////////////// Update Iterator to Last Item Processed ///////////////////// + + + if (iterator != nil) // first & last have optional iterator + { + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = nodeNum; + iterator->hint.index = index; + iterator->version = 0; + + BlockMoveData((Ptr)keyPtr, (Ptr)&iterator->key, CalcKeySize(btreePtr, keyPtr)); + } + M_ExitOnError(err); + + + ///////////////////////////// Release Nodes ///////////////////////////////// + + err = ReleaseNode(btreePtr, &node); + M_ExitOnError(err); + + if (left.buffer != nil) + { + err = ReleaseNode(btreePtr, &left); + M_ExitOnError(err); + } + + if (right.buffer != nil) + { + err = ReleaseNode(btreePtr, &right); + M_ExitOnError(err); + } + + return noErr; + + /////////////////////// Error - Clean Up and Exit /////////////////////////// + +ErrorExit: + + (void) ReleaseNode(btreePtr, &left); + (void) ReleaseNode(btreePtr, &node); + (void) ReleaseNode(btreePtr, &right); + + if (iterator != nil) + { + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->version = 0; + iterator->key.length16 = 0; + } + + if ( err == fsBTEmptyErr || err == fsBTEndOfIterationErr ) + err = fsBTRecordNotFoundErr; + + return err; +} + + +//////////////////////////////// BTInsertRecord ///////////////////////////////// + +OSStatus BTInsertRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + SInt32 nodesNeeded; + BlockDescriptor nodeRec; + UInt32 insertNodeNum; + UInt16 index; + Boolean recordFit; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + err = CheckInsertParams (filePtr, iterator, record, recordLen); + if (err != noErr) + return err; + + LogStartTime(kTraceInsertBTreeRecord); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + + ///////////////////////// Find Insert Position ////////////////////////////// + + // always call SearchTree for Insert + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + + switch (err) // set/replace/insert decision point + { + case noErr: err = fsBTDuplicateRecordErr; + goto ErrorExit; + + case fsBTRecordNotFoundErr: break; + + case fsBTEmptyErr: // if tree empty add 1st leaf node + + if (btreePtr->freeNodes == 0) + { + err = ExtendBTree (btreePtr, btreePtr->totalNodes + 1); + M_ExitOnError (err); + } + + err = AllocateNode (btreePtr, &insertNodeNum); + M_ExitOnError (err); + + err = GetNewNode (btreePtr, insertNodeNum, &nodeRec); + M_ExitOnError (err); + + ((NodeDescPtr)nodeRec.buffer)->kind = kBTLeafNode; + ((NodeDescPtr)nodeRec.buffer)->height = 1; + + recordFit = InsertKeyRecord (btreePtr, nodeRec.buffer, 0, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen ); + if (recordFit != true) + { + err = fsBTRecordTooLargeErr; + goto ErrorExit; + } + + err = UpdateNode (btreePtr, &nodeRec, 0, kLockTransaction); + M_ExitOnError (err); + + // update BTreeControlBlock + btreePtr->treeDepth = 1; + btreePtr->rootNode = insertNodeNum; + btreePtr->firstLeafNode = insertNodeNum; + btreePtr->lastLeafNode = insertNodeNum; + M_BTreeHeaderDirty (btreePtr); + + goto Success; + + default: goto ErrorExit; + } + + if (index > 0) + { + recordFit = InsertKeyRecord (btreePtr, nodeRec.buffer, index, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen); + if (recordFit == true) + { + err = UpdateNode (btreePtr, &nodeRec, 0, kLockTransaction); + M_ExitOnError (err); + + goto Success; + } + } + + /////////////////////// Extend File If Necessary //////////////////////////// + + nodesNeeded = btreePtr->treeDepth + 1 - btreePtr->freeNodes; //€€ math limit + if (nodesNeeded > 0) + { + nodesNeeded += btreePtr->totalNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + + // no need to delete existing record + + err = InsertTree (btreePtr, treePathTable, &iterator->key, record->bufferAddress, + recordLen, &nodeRec, index, 1, kInsertRecord, &insertNodeNum); + M_ExitOnError (err); + + + //////////////////////////////// Success //////////////////////////////////// + +Success: + ++btreePtr->writeCount; + ++btreePtr->leafRecords; + M_BTreeHeaderDirty (btreePtr); + + // create hint + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; // unused + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + LogEndTime(kTraceInsertBTreeRecord, noErr); + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + if (err == fsBTEmptyErr) + err = fsBTRecordNotFoundErr; + + LogEndTime(kTraceInsertBTreeRecord, err); + + return err; +} + + +//////////////////////////////// BTReplaceRecord //////////////////////////////// + +OSStatus BTReplaceRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + SInt32 nodesNeeded; + BlockDescriptor nodeRec; + UInt32 insertNodeNum; + UInt16 index; + Boolean recordFit; + Boolean validHint; + + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + err = CheckInsertParams (filePtr, iterator, record, recordLen); + if (err != noErr) + return err; + + LogStartTime(kTraceReplaceBTreeRecord); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + ////////////////////////////// Take A Hint ////////////////////////////////// + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + if (validHint) + { + insertNodeNum = iterator->hint.nodeNum; + + err = GetNode (btreePtr, insertNodeNum, &nodeRec); + if( err == noErr ) + { + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec, 0, 0); + M_ExitOnError (err); + + ++btreePtr->numValidHints; + + goto Success; + } + else + { + (void) BTInvalidateHint( iterator ); + } + + err = ReleaseNode (btreePtr, &nodeRec); + M_ExitOnError (err); + } + else + { + (void) BTInvalidateHint( iterator ); + } + } + + + ////////////////////////////// Get A Clue /////////////////////////////////// + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &insertNodeNum, &nodeRec, &index); + M_ExitOnError (err); // record must exit for Replace + + // optimization - if simple replace will work then don't extend btree + // €€ if we tried this before, and failed because it wouldn't fit then we shouldn't try this again... + + err = TrySimpleReplace (btreePtr, nodeRec.buffer, iterator, record, recordLen, &recordFit); + M_ExitOnError (err); + + if (recordFit) + { + err = UpdateNode (btreePtr, &nodeRec, 0, 0); + M_ExitOnError (err); + + goto Success; + } + + + //////////////////////////// Make Some Room ///////////////////////////////// + + nodesNeeded = btreePtr->treeDepth + 1 - btreePtr->freeNodes; //€€ math limit + if (nodesNeeded > 0) + { + nodesNeeded += btreePtr->totalNodes; + if (nodesNeeded > CalcMapBits (btreePtr)) // we'll need to add a map node too! + ++nodesNeeded; + + err = ExtendBTree (btreePtr, nodesNeeded); + M_ExitOnError (err); + } + + + DeleteRecord (btreePtr, nodeRec.buffer, index); // delete existing key/record + + err = InsertTree (btreePtr, treePathTable, &iterator->key, record->bufferAddress, + recordLen, &nodeRec, index, 1, kReplaceRecord, &insertNodeNum); + M_ExitOnError (err); + + ++btreePtr->writeCount; /* writeCount changes only if the tree structure changed */ + +Success: + // create hint + iterator->hint.writeCount = btreePtr->writeCount; + iterator->hint.nodeNum = insertNodeNum; + iterator->hint.index = 0; // unused + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + LogEndTime(kTraceReplaceBTreeRecord, noErr); + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &nodeRec); + + iterator->hint.writeCount = 0; + iterator->hint.nodeNum = 0; + iterator->hint.index = 0; + iterator->hint.reserved1 = 0; + iterator->hint.reserved2 = 0; + + + LogEndTime(kTraceReplaceBTreeRecord, err); + + return err; +} + + + +//////////////////////////////// BTDeleteRecord ///////////////////////////////// + +OSStatus BTDeleteRecord (FCB *filePtr, + BTreeIterator *iterator ) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + TreePathTable treePathTable; + BlockDescriptor nodeRec; + UInt32 nodeNum; + UInt16 index; + + LogStartTime(kTraceDeleteBTreeRecord); + + ////////////////////////// Priliminary Checks /////////////////////////////// + + nodeRec.buffer = nil; // so we can call ReleaseNode + + M_ReturnErrorIf (filePtr == nil, paramErr); + M_ReturnErrorIf (iterator == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + { + err = fsBTInvalidFileErr; + goto ErrorExit; + } + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + + /////////////////////////////// Find Key //////////////////////////////////// + + //€€ check hint for simple delete case (index > 0, numRecords > 2) + + err = SearchTree (btreePtr, &iterator->key, treePathTable, &nodeNum, &nodeRec, &index); + M_ExitOnError (err); // record must exit for Delete + + + ///////////////////////////// Delete Record ///////////////////////////////// + + err = DeleteTree (btreePtr, treePathTable, &nodeRec, index, 1); + M_ExitOnError (err); + + ++btreePtr->writeCount; + --btreePtr->leafRecords; + M_BTreeHeaderDirty (btreePtr); + + iterator->hint.nodeNum = 0; + + LogEndTime(kTraceDeleteBTreeRecord, noErr); + + return noErr; + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + (void) ReleaseNode (btreePtr, &nodeRec); + + LogEndTime(kTraceDeleteBTreeRecord, err); + + return err; +} + + + +OSStatus BTGetInformation (FCB *filePtr, + UInt16 version, + BTreeInfoRec *info ) +{ +#pragma unused (version) + + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + /* + * XXX SER + * This should not require the whole tree to be locked, just maybe the BTreeControlBlockPtr + * + * REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + */ + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + M_ReturnErrorIf (info == nil, paramErr); + + //€€ check version? + + info->nodeSize = btreePtr->nodeSize; + info->maxKeyLength = btreePtr->maxKeyLength; + info->treeDepth = btreePtr->treeDepth; + info->numRecords = btreePtr->leafRecords; + info->numNodes = btreePtr->totalNodes; + info->numFreeNodes = btreePtr->freeNodes; + info->lastfsync = btreePtr->lastfsync; + info->reserved = 0; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- +Routine: BTFlushPath - Flush BTreeControlBlock to Header Node. + +Function: Brief_description_of_the_function_and_any_side_effects + + +Input: pathPtr - pointer to path control block for B*Tree file to flush + +Output: none + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus BTFlushPath (FCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + + + LogStartTime(kTraceFlushBTree); + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + err = UpdateHeader (btreePtr, false); + + LogEndTime(kTraceFlushBTree, err); + + return err; +} + + +/*------------------------------------------------------------------------------- +Routine: BTReload - Reload B-tree Header Data. + +Function: Reload B-tree header data from disk. This is called after fsck + has made repairs to the root filesystem. The filesystem is + mounted read-only when BTReload is caled. + + +Input: filePtr - the B*Tree file that needs its header updated + +Output: none + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus +BTReloadData(FCB *filePtr) +{ + OSStatus err; + BTreeControlBlockPtr btreePtr; + BlockDescriptor node; + BTHeaderRec *header; + + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) + return (fsBTInvalidFileErr); + + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, false); + + err = GetNode(btreePtr, kHeaderNodeNum, &node); + if (err != noErr) + return (err); + + header = (BTHeaderRec*)((char *)node.buffer + sizeof(BTNodeDescriptor)); + if ((err = VerifyHeader (filePtr, header)) == 0) { + btreePtr->treeDepth = header->treeDepth; + btreePtr->rootNode = header->rootNode; + btreePtr->leafRecords = header->leafRecords; + btreePtr->firstLeafNode = header->firstLeafNode; + btreePtr->lastLeafNode = header->lastLeafNode; + btreePtr->maxKeyLength = header->maxKeyLength; + btreePtr->totalNodes = header->totalNodes; + btreePtr->freeNodes = header->freeNodes; + btreePtr->btreeType = header->btreeType; + + btreePtr->flags &= (~kBTHeaderDirty); + } + + (void) ReleaseNode(btreePtr, &node); + + return err; +} + + +/*------------------------------------------------------------------------------- +Routine: BTInvalidateHint - Invalidates the hint within a BTreeInterator. + +Function: Invalidates the hint within a BTreeInterator. + + +Input: iterator - pointer to BTreeIterator + +Output: iterator - iterator with the hint.nodeNum cleared + +Result: noErr - success + paramErr - iterator == nil +-------------------------------------------------------------------------------*/ + + +OSStatus BTInvalidateHint (BTreeIterator *iterator ) +{ + if (iterator == nil) + return paramErr; + + iterator->hint.nodeNum = 0; + + return noErr; +} + + + + +/*------------------------------------------------------------------------------- +Routine: BTGetLastSync + +Function: Returns the last time that this btree was flushed, does not include header. + +Input: filePtr - pointer file control block + +Output: lastfsync - time in seconds of last update + +Result: noErr - success + paramErr - iterator == nil +-------------------------------------------------------------------------------*/ + + +OSStatus BTGetLastSync (FCB *filePtr, + UInt32 *lastsync) +{ + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + /* Maybe instead of requiring a lock..an atomic set might be more appropriate */ + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + M_ReturnErrorIf (lastsync == nil, paramErr); + + *lastsync = btreePtr->lastfsync; + + return noErr; +} + + + + +/*------------------------------------------------------------------------------- +Routine: BTSetLastSync + +Function: Sets the last time that this btree was flushed, does not include header. + + +Input: fcb - pointer file control block + +Output: lastfsync - time in seconds of last update + +Result: noErr - success + paramErr - iterator == nil +-------------------------------------------------------------------------------*/ + + +OSStatus BTSetLastSync (FCB *filePtr, + UInt32 lastsync) +{ + BTreeControlBlockPtr btreePtr; + + + M_ReturnErrorIf (filePtr == nil, paramErr); + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + + /* Maybe instead of requiring a lock..an atomic set might be more appropriate */ + REQUIRE_FILE_LOCK(btreePtr->fileRefNum, true); + + M_ReturnErrorIf (btreePtr == nil, fsBTInvalidFileErr); + M_ReturnErrorIf (lastsync == nil, paramErr); + + btreePtr->lastfsync = lastsync; + + return noErr; +} + + diff --git a/bsd/hfs/hfscommon/BTree/BTreeAllocate.c b/bsd/hfs/hfscommon/BTree/BTreeAllocate.c new file mode 100644 index 000000000..46d23cecb --- /dev/null +++ b/bsd/hfs/hfscommon/BTree/BTreeAllocate.c @@ -0,0 +1,588 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeAllocate.c + + Contains: BTree Node Allocation routines for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: File Systems + + Writers: + + (djb) Don Brady + (ser) Scott Roberts + (msd) Mark Day + + Change History (most recent first): + + 6/1/99 djb Sync up with Mac OS 8.6. + 11/24/97 djb Remove some debug code (Panic calls). + 7/24/97 djb CallbackProcs now take refnum instead of an FCB. + 4/23/97 djb first checked in + + 2/19/97 djb Change E_BadNodeType to fsBTBadNodeType. + 12/19/96 djb first checked in + + History applicable to original Scarecrow Design: + + <4> 10/25/96 ser Changing for new VFPI + <3> 10/18/96 ser Converting over VFPI changes + <2> 1/10/96 msd Change 64-bit math to use real function names from Math64.i. + <1> 10/18/95 rst Moved from Scarecrow project. + + <8> 1/12/95 wjk Adopt Model FileSystem changes in D5. + <7> 9/30/94 prp Get in sync with D2 interface changes. + <6> 7/22/94 wjk Convert to the new set of header files. + <5> 8/31/93 prp Use U64SetU instead of S64Set. + <4> 5/21/93 gs Fix ExtendBTree bug. + <3> 5/10/93 gs Fix pointer arithmetic bug in AllocateNode. + <2> 3/23/93 gs finish ExtendBTree routine. + <1> 2/8/93 gs first checked in + <0> 1/1/93 gs begin AllocateNode and FreeNode + +*/ + +#include "../../hfs_endian.h" +#include "../headers/BTreesPrivate.h" + +///////////////////// Routines Internal To BTreeAllocate.c ////////////////////// + +OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + UInt16 **mapPtr, + UInt16 *mapSize ); + +///////////////////////////////////////////////////////////////////////////////// + +/*------------------------------------------------------------------------------- + +Routine: AllocateNode - Find Free Node, Mark It Used, and Return Node Number. + +Function: Searches the map records for the first free node, marks it "in use" and + returns the node number found. This routine should really only be called + when we know there are free blocks, otherwise it's just a waste of time. + +Note: We have to examine map nodes a word at a time rather than a long word + because the External BTree Mgr used map records that were not an integral + number of long words. Too bad. In our spare time could develop a more + sophisticated algorithm that read map records by long words (and long + word aligned) and handled the spare bytes at the beginning and end + appropriately. + +Input: btreePtr - pointer to control block for BTree file + +Output: nodeNum - number of node allocated + + +Result: noErr - success + fsBTNoMoreMapNodesErr - no free blocks were found + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, UInt32 *nodeNum) +{ + OSStatus err; + BlockDescriptor node; + UInt16 *mapPtr, *pos; + UInt16 mapSize, size; + UInt16 freeWord; + UInt16 mask; + UInt16 bitOffset; + UInt32 nodeNumber; + + + nodeNumber = 0; // first node number of header map record + node.buffer = nil; // clear node.buffer to get header node + // - and for ErrorExit + + while (true) + { + err = GetMapNode (btreePtr, &node, &mapPtr, &mapSize); + M_ExitOnError (err); + + //////////////////////// Find Word with Free Bit //////////////////////////// + + pos = mapPtr; + size = mapSize; + size >>= 1; // convert to number of words + //€€ assumes mapRecords contain an integral number of words + + while ( size-- ) + { + if ( *pos++ != 0xFFFF ) // assume test fails, and increment pos + break; + } + + --pos; // whoa! backup + + if (*pos != 0xFFFF) // hey, we got one! + break; + + nodeNumber += mapSize << 3; // covert to number of bits (nodes) + } + + ///////////////////////// Find Free Bit in Word ///////////////////////////// + + freeWord = SWAP_BE16 (*pos); + bitOffset = 15; + mask = 0x8000; + + do { + if ( (freeWord & mask) == 0) + break; + mask >>= 1; + } while (--bitOffset); + + ////////////////////// Calculate Free Node Number /////////////////////////// + + nodeNumber += ((pos - mapPtr) << 4) + (15 - bitOffset); // (pos-mapPtr) = # of words! + + + ///////////////////////// Check for End of Map ////////////////////////////// + + if (nodeNumber >= btreePtr->totalNodes) + { + err = fsBTFullErr; + goto ErrorExit; + } + + /////////////////////////// Allocate the Node /////////////////////////////// + + *pos |= SWAP_BE16 (mask); // set the map bit for the node + + err = UpdateNode (btreePtr, &node, 0, kLockTransaction); + M_ExitOnError (err); + + --btreePtr->freeNodes; + btreePtr->flags |= kBTHeaderDirty; + *nodeNum = nodeNumber; + + return noErr; + +////////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &node); + *nodeNum = 0; + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: FreeNode - Clear allocation bit for node. + +Function: Finds the bit representing the node specified by nodeNum in the node + map and clears the bit. + + +Input: btreePtr - pointer to control block for BTree file + nodeNum - number of node to mark free + +Output: none + +Result: noErr - success + fsBTNoMoreMapNodesErr - node number is beyond end of node map + != noErr - GetNode or ReleaseNode encountered some difficulty +-------------------------------------------------------------------------------*/ + +OSStatus FreeNode (BTreeControlBlockPtr btreePtr, UInt32 nodeNum) +{ + OSStatus err; + BlockDescriptor node; + UInt32 nodeIndex; + UInt16 mapSize; + UInt16 *mapPos; + UInt16 bitOffset; + + + //////////////////////////// Find Map Record //////////////////////////////// + nodeIndex = 0; // first node number of header map record + node.buffer = nil; // invalidate node.buffer to get header node + + while (nodeNum >= nodeIndex) + { + err = GetMapNode (btreePtr, &node, &mapPos, &mapSize); + M_ExitOnError (err); + + nodeIndex += mapSize << 3; // covert to number of bits (nodes) + } + + //////////////////////////// Mark Node Free ///////////////////////////////// + + nodeNum -= (nodeIndex - (mapSize << 3)); // relative to this map record + bitOffset = 15 - (nodeNum & 0x0000000F); // last 4 bits are bit offset + mapPos += nodeNum >> 4; // point to word containing map bit + + M_SWAP_BE16_ClearBitNum (*mapPos, bitOffset); // clear it + + err = UpdateNode (btreePtr, &node, 0, kLockTransaction); + M_ExitOnError (err); + + ++btreePtr->freeNodes; + btreePtr->flags |= kBTHeaderDirty; // how about a macro for this + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, &node); + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: ExtendBTree - Call FSAgent to extend file, and allocate necessary map nodes. + +Function: This routine calls the the FSAgent to extend the end of fork, if necessary, + to accomodate the number of nodes requested. It then allocates as many + map nodes as are necessary to account for all the nodes in the B*Tree. + If newTotalNodes is less than the current number of nodes, no action is + taken. + +Note: Internal HFS File Manager BTree Module counts on an integral number of + long words in map records, although they are not long word aligned. + +Input: btreePtr - pointer to control block for BTree file + newTotalNodes - total number of nodes the B*Tree is to extended to + +Output: none + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, + UInt32 newTotalNodes ) +{ + OSStatus err; + FCB *filePtr; + FSSize minEOF, maxEOF; + UInt16 nodeSize; + UInt32 oldTotalNodes; + UInt32 newMapNodes; + UInt32 mapBits, totalMapBits; + UInt32 recStartBit; + UInt32 nodeNum, nextNodeNum; + UInt32 firstNewMapNodeNum, lastNewMapNodeNum; + BlockDescriptor mapNode, newNode; + UInt16 *mapPos; + UInt16 *mapStart; + UInt16 mapSize; + UInt16 mapNodeRecSize; + UInt32 bitInWord, bitInRecord; + UInt16 mapIndex; + + + oldTotalNodes = btreePtr->totalNodes; + if (newTotalNodes <= oldTotalNodes) // we're done! + return noErr; + + nodeSize = btreePtr->nodeSize; + filePtr = GetFileControlBlock(btreePtr->fileRefNum); + + mapNode.buffer = nil; + newNode.buffer = nil; + + mapNodeRecSize = nodeSize - sizeof(BTNodeDescriptor) - 6; // 2 bytes of free space (see note) + + // update for proper 64 bit arithmetic!! + + + //////////////////////// Count Bits In Node Map ///////////////////////////// + + totalMapBits = 0; + do { + err = GetMapNode (btreePtr, &mapNode, &mapStart, &mapSize); + M_ExitOnError (err); + + mapBits = mapSize << 3; // mapSize (in bytes) * 8 + recStartBit = totalMapBits; // bit number of first bit in map record + totalMapBits += mapBits; + + } while ( ((BTNodeDescriptor*)mapNode.buffer)->fLink != 0 ); + + if (DEBUG_BUILD && totalMapBits != CalcMapBits (btreePtr)) + Panic ("\pExtendBTree: totalMapBits != CalcMapBits"); + + /////////////////////// Extend LEOF If Necessary //////////////////////////// + + minEOF = newTotalNodes * nodeSize; + if ( filePtr->fcbEOF < minEOF ) + { + // + // ???? Does this B*Tree pack stop working when LEOF > 2^32-1? + // + maxEOF = ((UInt32)0xFFFFFFFFL); + + err = btreePtr->setEndOfForkProc (btreePtr->fileRefNum, minEOF, maxEOF); + M_ExitOnError (err); + } + + + //////////////////// Calc New Total Number Of Nodes ///////////////////////// + + newTotalNodes = filePtr->fcbEOF / nodeSize; // hack! + // do we wish to perform any verification of newTotalNodes at this point? + + btreePtr->totalNodes = newTotalNodes; // do we need to update freeNodes here too? + + + ////////////// Calculate Number Of New Map Nodes Required /////////////////// + + newMapNodes = 0; + if (newTotalNodes > totalMapBits) + { + newMapNodes = (((newTotalNodes - totalMapBits) >> 3) / mapNodeRecSize) + 1; + firstNewMapNodeNum = oldTotalNodes; + lastNewMapNodeNum = firstNewMapNodeNum + newMapNodes - 1; + } + else + { + err = ReleaseNode (btreePtr, &mapNode); + M_ExitOnError (err); + + goto Success; + } + + + /////////////////////// Initialize New Map Nodes //////////////////////////// + + ((BTNodeDescriptor*)mapNode.buffer)->fLink = firstNewMapNodeNum; + + nodeNum = firstNewMapNodeNum; + while (true) + { + err = GetNewNode (btreePtr, nodeNum, &newNode); + M_ExitOnError (err); + + ((NodeDescPtr)newNode.buffer)->numRecords = 1; + ((NodeDescPtr)newNode.buffer)->kind = kBTMapNode; + + // set free space offset + *(UInt16 *)((Ptr)newNode.buffer + nodeSize - 4) = nodeSize - 6; + + if (nodeNum++ == lastNewMapNodeNum) + break; + + ((BTNodeDescriptor*)newNode.buffer)->fLink = nodeNum; // point to next map node + + err = UpdateNode (btreePtr, &newNode, 0, kLockTransaction); + M_ExitOnError (err); + } + + err = UpdateNode (btreePtr, &newNode, 0, kLockTransaction); + M_ExitOnError (err); + + + ///////////////////// Mark New Map Nodes Allocated ////////////////////////// + + nodeNum = firstNewMapNodeNum; + do { + bitInRecord = nodeNum - recStartBit; + + while (bitInRecord >= mapBits) + { + nextNodeNum = ((NodeDescPtr)mapNode.buffer)->fLink; + if ( nextNodeNum == 0) + { + err = fsBTNoMoreMapNodesErr; + goto ErrorExit; + } + + err = UpdateNode (btreePtr, &mapNode, 0, kLockTransaction); + M_ExitOnError (err); + + err = GetNode (btreePtr, nextNodeNum, &mapNode); + M_ExitOnError (err); + + mapIndex = 0; + + mapStart = (UInt16 *) GetRecordAddress (btreePtr, mapNode.buffer, mapIndex); + mapSize = GetRecordSize (btreePtr, mapNode.buffer, mapIndex); + + if (DEBUG_BUILD && mapSize != M_MapRecordSize (btreePtr->nodeSize) ) + { + Panic ("\pExtendBTree: mapSize != M_MapRecordSize"); + } + + mapBits = mapSize << 3; // mapSize (in bytes) * 8 + recStartBit = totalMapBits; // bit number of first bit in map record + totalMapBits += mapBits; + + bitInRecord = nodeNum - recStartBit; + } + + mapPos = mapStart + ((nodeNum - recStartBit) >> 4); + bitInWord = 15 - ((nodeNum - recStartBit) & 0x0000000F); + + M_SWAP_BE16_SetBitNum (*mapPos, bitInWord); + + ++nodeNum; + + } while (nodeNum <= lastNewMapNodeNum); + + err = UpdateNode (btreePtr, &mapNode, 0, kLockTransaction); + M_ExitOnError (err); + + + //////////////////////////////// Success //////////////////////////////////// + +Success: + + btreePtr->totalNodes = newTotalNodes; + btreePtr->freeNodes += (newTotalNodes - oldTotalNodes) - newMapNodes; + + btreePtr->flags |= kBTHeaderDirty; //€€ how about a macro for this + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, &mapNode); + (void) ReleaseNode (btreePtr, &newNode); + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetMapNode - Get the next map node and pointer to the map record. + +Function: Given a BlockDescriptor to a map node in nodePtr, GetMapNode releases + it and gets the next node. If nodePtr->buffer is nil, then the header + node is retrieved. + + +Input: btreePtr - pointer to control block for BTree file + nodePtr - pointer to a BlockDescriptor of a map node + +Output: nodePtr - pointer to the BlockDescriptor for the next map node + mapPtr - pointer to the map record within the map node + mapSize - number of bytes in the map record + +Result: noErr - success + fsBTNoMoreMapNodesErr - we've run out of map nodes + fsBTInvalidNodeErr - bad node, or not node type kMapNode + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + UInt16 **mapPtr, + UInt16 *mapSize ) +{ + OSStatus err; + UInt16 mapIndex; + UInt32 nextNodeNum; + + if (nodePtr->buffer != nil) // if iterator is valid... + { + nextNodeNum = ((NodeDescPtr)nodePtr->buffer)->fLink; + if (nextNodeNum == 0) + { + err = fsBTNoMoreMapNodesErr; + goto ErrorExit; + } + + err = ReleaseNode (btreePtr, nodePtr); + M_ExitOnError (err); + + err = GetNode (btreePtr, nextNodeNum, nodePtr); + M_ExitOnError (err); + + if ( ((NodeDescPtr)nodePtr->buffer)->kind != kBTMapNode) + { + err = fsBTBadNodeType; + goto ErrorExit; + } + + ++btreePtr->numMapNodesRead; + mapIndex = 0; + } else { + err = GetNode (btreePtr, kHeaderNodeNum, nodePtr); + M_ExitOnError (err); + + if ( ((NodeDescPtr)nodePtr->buffer)->kind != kBTHeaderNode) + { + err = fsBTInvalidHeaderErr; //€€ or fsBTBadNodeType + goto ErrorExit; + } + + mapIndex = 2; + } + + + *mapPtr = (UInt16 *) GetRecordAddress (btreePtr, nodePtr->buffer, mapIndex); + *mapSize = GetRecordSize (btreePtr, nodePtr->buffer, mapIndex); + + return noErr; + + +ErrorExit: + + (void) ReleaseNode (btreePtr, nodePtr); + + *mapPtr = nil; + *mapSize = 0; + + return err; +} + + + +////////////////////////////////// CalcMapBits ////////////////////////////////// + +UInt32 CalcMapBits (BTreeControlBlockPtr btreePtr) +{ + UInt32 mapBits; + + mapBits = M_HeaderMapRecordSize (btreePtr->nodeSize) << 3; + + while (mapBits < btreePtr->totalNodes) + mapBits += M_MapRecordSize (btreePtr->nodeSize) << 3; + + return mapBits; +} diff --git a/bsd/hfs/hfscommon/BTree/BTreeMiscOps.c b/bsd/hfs/hfscommon/BTree/BTreeMiscOps.c new file mode 100644 index 000000000..93828720a --- /dev/null +++ b/bsd/hfs/hfscommon/BTree/BTreeMiscOps.c @@ -0,0 +1,646 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeMiscOps.c + + Contains: Miscellaneous operations for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: File Systems + + Writers: + + (DSH) Deric Horn + (msd) Mark Day + (djb) Don Brady + + Change History (most recent first): + + 6/1/99 djb Sync up with Mac OS 8.6. + 9/4/97 djb Optimize TrySimpleReplace for the case where record size is not + changing. + 4/23/97 djb first checked in + + 3/31/97 djb Move ClearMemory to Utilities.c. + 3/17/97 DSH Casting for DFA + 2/27/97 msd Remove temporary fix from last revision. BTree EOF's should be + correct now, so check for strict equality. + 2/26/97 msd Fix a casting problem in ClearMemory. TEMPORARY FIX: Made + VerifyHeader more lenient, allowing the EOF to be greater than + the amount actually used by nodes; this should really be fixed + in the formatting code (which needs to compute the real BTree + sizes before writing the volume header). + 2/19/97 djb Added ClearMemory. Changed CalcKeyLength to KeyLength. + 1/3/97 djb Added support for large keys. + 12/19/96 djb first checked in + + History applicable to original Scarecrow Design: + + <9> 10/25/96 ser Changing for new VFPI + <8> 10/18/96 ser Converting over VFPI changes + <7> 9/17/96 dkh More BTree statistics. Change IsItAHint to not always check to + see if the hint node is allocated. + <6> 9/16/96 dkh Revised BTree statistics. + <5> 6/20/96 dkh Radar #1358740. Change from using Pools to debug MemAllocators. + <4> 1/22/96 dkh Change Pools.i inclusion to PoolsPriv.i + <3> 1/10/96 msd Change 64-bit math to use real function names from Math64.i. + <2> 12/7/95 dkh D10E2 build. Changed usage of Ref data type to LogicalAddress. + <1> 10/18/95 rst Moved from Scarecrow project. + + <19> 4/26/95 prp In UpdateHeader, clear the dirty flag after the BTree is updated. + <18> 1/12/95 wjk Adopt Model FileSystem changes in D5. + <17> 11/16/94 prp Add IsItAHint routine and use it whenever hint's node number was + used for testing. + <16> 10/5/94 bk add pools.h include file + <15> 9/30/94 prp Get in sync with D2 interface changes. + <14> 7/22/94 wjk Convert to the new set of header files. + <13> 12/2/93 wjk Move from Makefiles to BuildFiles. Fit into the ModernOS and + NRCmds environments. + <12> 11/30/93 wjk Move from Makefiles to BuildFiles. Fit into the ModernOS and + NRCmds environments. + <11> 11/23/93 wjk Changes required to compile on the RS6000. + <10> 8/31/93 prp Use U64SetU instead of S64Set. + <9> 6/2/93 gs Update for changes to FSErrors.h and add some comments. + <8> 5/21/93 gs Modify UpdateHeader to write out attributes. Remove + Get/UpdateNode from TrySimpleReplace. + <7> 5/10/93 gs Add TrySimpleReplace routine. + <6> 3/23/93 gs Change MoveData to take void * instead of Ptr. Add UpdateHeader + and ClearBytes routines. + <5> 2/8/93 gs Add FindIteratorPosition. + <4> 12/10/92 gs Implement CheckKeyDescriptor and the KeyDescriptor interpreter. + <3> 12/8/92 gs Add GetKeyDescriptor, VerifyHeader, and Alloc/Dealloc memory + routines. + <2> 12/2/92 gs Add CompareKeys routine. + <1> 11/15/92 gs first checked in + +*/ + +#include "../headers/BTreesPrivate.h" + + +////////////////////////////// Routine Definitions ////////////////////////////// + +/*------------------------------------------------------------------------------- +Routine: CalcKeyRecordSize - Return size of combined key/record structure. + +Function: Rounds keySize and recSize so they will end on word boundaries. + Does NOT add size of offset. + +Input: keySize - length of key (including length field) + recSize - length of record data + +Output: none + +Result: UInt16 - size of combined key/record that will be inserted in btree +-------------------------------------------------------------------------------*/ + +UInt16 CalcKeyRecordSize (UInt16 keySize, + UInt16 recSize ) +{ + if ( M_IsOdd (keySize) ) keySize += 1; // pad byte + + if (M_IsOdd (recSize) ) recSize += 1; // pad byte + + return (keySize + recSize); +} + + + +/*------------------------------------------------------------------------------- +Routine: VerifyHeader - Validate fields of the BTree header record. + +Function: Examines the fields of the BTree header record to determine if the + fork appears to contain a valid BTree. + +Input: forkPtr - pointer to fork control block + header - pointer to BTree header + + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus VerifyHeader (FCB *filePtr, + BTHeaderRec *header ) +{ + UInt32 forkSize; + UInt32 totalNodes; + + + switch (header->nodeSize) // node size == 512*2^n + { + case 512: + case 1024: + case 2048: + case 4096: + case 8192: + case 16384: + case 32768: break; + default: return fsBTInvalidHeaderErr; //€€ E_BadNodeType + } + + totalNodes = header->totalNodes; + + forkSize = totalNodes * header->nodeSize; + + if ( forkSize != filePtr->fcbEOF ) + return fsBTInvalidHeaderErr; + + if ( header->freeNodes >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->rootNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->firstLeafNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->lastLeafNode >= totalNodes ) + return fsBTInvalidHeaderErr; + + if ( header->treeDepth > kMaxTreeDepth ) + return fsBTInvalidHeaderErr; + + + /////////////////////////// Check BTree Type //////////////////////////////// + + switch (header->btreeType) + { + case 0: // HFS Type - no Key Descriptor + case kUserBTreeType: // with Key Descriptors etc. + case kReservedBTreeType: // Desktop Mgr BTree ? + break; + + default: return fsBTUnknownVersionErr; + } + + return noErr; +} + + + +/*------------------------------------------------------------------------------- +Routine: UpdateHeader - Write BTreeInfoRec fields to Header node. + +Function: Checks the kBTHeaderDirty flag in the BTreeInfoRec and updates the + header node if necessary. + +Input: btreePtr - pointer to BTreeInfoRec + + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus UpdateHeader(BTreeControlBlockPtr btreePtr, Boolean forceWrite) +{ + OSStatus err; + BlockDescriptor node; + BTHeaderRec *header; + UInt32 options; + + + if ((btreePtr->flags & kBTHeaderDirty) == 0) // btree info already flushed + return noErr; + + + err = GetNode (btreePtr, kHeaderNodeNum, &node ); + if (err != noErr) + return err; + + header = (BTHeaderRec*) ((char *)node.buffer + sizeof(BTNodeDescriptor)); + + header->treeDepth = btreePtr->treeDepth; + header->rootNode = btreePtr->rootNode; + header->leafRecords = btreePtr->leafRecords; + header->firstLeafNode = btreePtr->firstLeafNode; + header->lastLeafNode = btreePtr->lastLeafNode; + header->nodeSize = btreePtr->nodeSize; //€€ this shouldn't change + header->maxKeyLength = btreePtr->maxKeyLength; //€€ neither should this + header->totalNodes = btreePtr->totalNodes; + header->freeNodes = btreePtr->freeNodes; + header->btreeType = btreePtr->btreeType; + + // ignore header->clumpSize; //€€ rename this field? + + if (forceWrite) + options = kForceWriteBlock; + else + options = kLockTransaction; + + err = UpdateNode (btreePtr, &node, 0, options); + + btreePtr->flags &= (~kBTHeaderDirty); + + return err; +} + + + +/*------------------------------------------------------------------------------- +Routine: FindIteratorPosition - One_line_description. + +Function: Brief_description_of_the_function_and_any_side_effects + +Algorithm: see FSC.BT.BTIterateRecord.PICT + +Note: //€€ document side-effects of bad node hints + +Input: btreePtr - description + iterator - description + + +Output: iterator - description + left - description + middle - description + right - description + nodeNum - description + returnIndex - description + foundRecord - description + + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus FindIteratorPosition (BTreeControlBlockPtr btreePtr, + BTreeIteratorPtr iterator, + BlockDescriptor *left, + BlockDescriptor *middle, + BlockDescriptor *right, + UInt32 *returnNodeNum, + UInt16 *returnIndex, + Boolean *foundRecord ) +{ + OSStatus err; + Boolean foundIt; + UInt32 nodeNum; + UInt16 leftIndex, index, rightIndex; + Boolean validHint; + + // assume btreePtr valid + // assume left, middle, right point to BlockDescriptors + // assume nodeNum points to UInt32 + // assume index points to UInt16 + // assume foundRecord points to Boolean + + left->buffer = nil; + middle->buffer = nil; + right->buffer = nil; + + foundIt = false; + + if (iterator == nil) // do we have an iterator? + { + err = fsBTInvalidIteratorErr; + goto ErrorExit; + } + + err = IsItAHint (btreePtr, iterator, &validHint); + M_ExitOnError (err); + + nodeNum = iterator->hint.nodeNum; + if (! validHint) // does the hint appear to be valid? + { + goto SearchTheTree; + } + + err = GetNode (btreePtr, nodeNum, middle); + if( err == fsBTInvalidNodeErr ) // returned if nodeNum is out of range + goto SearchTheTree; + + M_ExitOnError (err); + + if ( ((NodeDescPtr) middle->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) middle->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + ++btreePtr->numValidHints; + + foundIt = SearchNode (btreePtr, middle->buffer, &iterator->key, &index); + if (foundIt == true) + { + goto SuccessfulExit; + } + + if (index == 0) + { + if (((NodeDescPtr) middle->buffer)->bLink == 0) // before 1st btree record + { + goto SuccessfulExit; + } + + nodeNum = ((NodeDescPtr) middle->buffer)->bLink; + + err = GetLeftSiblingNode (btreePtr, middle->buffer, left); + M_ExitOnError (err); + + if ( ((NodeDescPtr) left->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) left->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + foundIt = SearchNode (btreePtr, left->buffer, &iterator->key, &leftIndex); + if (foundIt == true) + { + *right = *middle; + *middle = *left; + left->buffer = nil; + index = leftIndex; + + goto SuccessfulExit; + } + + if (leftIndex == 0) // we're lost! + { + goto SearchTheTree; + } + else if (leftIndex >= ((NodeDescPtr) left->buffer)->numRecords) + { + nodeNum = ((NodeDescPtr) left->buffer)->fLink; + + PanicIf (index != 0, "\pFindIteratorPosition: index != 0"); //€€ just checking... + goto SuccessfulExit; + } + else + { + *right = *middle; + *middle = *left; + left->buffer = nil; + index = leftIndex; + + goto SuccessfulExit; + } + } + else if (index >= ((NodeDescPtr) middle->buffer)->numRecords) + { + if (((NodeDescPtr) middle->buffer)->fLink == 0) // beyond last record + { + goto SuccessfulExit; + } + + nodeNum = ((NodeDescPtr) middle->buffer)->fLink; + + err = GetRightSiblingNode (btreePtr, middle->buffer, right); + M_ExitOnError (err); + + if ( ((NodeDescPtr) right->buffer)->kind != kBTLeafNode || + ((NodeDescPtr) right->buffer)->numRecords <= 0 ) + { + goto SearchTheTree; + } + + foundIt = SearchNode (btreePtr, right->buffer, &iterator->key, &rightIndex); + if (rightIndex >= ((NodeDescPtr) right->buffer)->numRecords) // we're lost + { + goto SearchTheTree; + } + else // we found it, or rightIndex==0, or rightIndexbuffer = nil; + index = rightIndex; + + goto SuccessfulExit; + } + } + + + //////////////////////////// Search The Tree //////////////////////////////// + +SearchTheTree: + { + TreePathTable treePathTable; // so we only use stack space if we need to + + err = ReleaseNode (btreePtr, left); M_ExitOnError (err); + err = ReleaseNode (btreePtr, middle); M_ExitOnError (err); + err = ReleaseNode (btreePtr, right); M_ExitOnError (err); + + err = SearchTree ( btreePtr, &iterator->key, treePathTable, &nodeNum, middle, &index); + switch (err) //€€ separate find condition from exceptions + { + case noErr: foundIt = true; break; + case fsBTRecordNotFoundErr: break; + default: goto ErrorExit; + } + } + + /////////////////////////////// Success! //////////////////////////////////// + +SuccessfulExit: + + *returnNodeNum = nodeNum; + *returnIndex = index; + *foundRecord = foundIt; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + (void) ReleaseNode (btreePtr, left); + (void) ReleaseNode (btreePtr, middle); + (void) ReleaseNode (btreePtr, right); + + *returnNodeNum = 0; + *returnIndex = 0; + *foundRecord = false; + + return err; +} + + + +/////////////////////////////// CheckInsertParams /////////////////////////////// + +OSStatus CheckInsertParams (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ) +{ + BTreeControlBlockPtr btreePtr; + + if (filePtr == nil) return paramErr; + + btreePtr = (BTreeControlBlockPtr) filePtr->fcbBTCBPtr; + if (btreePtr == nil) return fsBTInvalidFileErr; + if (iterator == nil) return paramErr; + if (record == nil) return paramErr; + + // check total key/record size limit + if ( CalcKeyRecordSize (CalcKeySize(btreePtr, &iterator->key), recordLen) > (btreePtr->nodeSize >> 1)) + return fsBTRecordTooLargeErr; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- +Routine: TrySimpleReplace - Attempts a simple insert, set, or replace. + +Function: If a hint exitst for the iterator, attempt to find the key in the hint + node. If the key is found, an insert operation fails. If the is not + found, a replace operation fails. If the key was not found, and the + insert position is greater than 0 and less than numRecords, the record + is inserted, provided there is enough freeSpace. If the key was found, + and there is more freeSpace than the difference between the new record + and the old record, the old record is deleted and the new record is + inserted. + +Assumptions: iterator key has already been checked by CheckKey + + +Input: btreePtr - description + iterator - description + record - description + recordLen - description + operation - description + + +Output: recordInserted - description + + +Result: noErr - success + E_RecordExits - insert operation failure + != noErr - GetNode, ReleaseNode, UpdateNode returned an error +-------------------------------------------------------------------------------*/ + +OSStatus TrySimpleReplace (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen, + Boolean *recordInserted ) +{ + UInt32 oldSpace; + UInt32 spaceNeeded; + UInt16 index; + UInt16 keySize; + Boolean foundIt; + Boolean didItFit; + + + *recordInserted = false; // we'll assume this won't work... + + if ( nodePtr->kind != kBTLeafNode ) + return noErr; // we're in the weeds! + + foundIt = SearchNode (btreePtr, nodePtr, &iterator->key, &index); + + if ( foundIt == false ) + return noErr; // we might be lost... + + keySize = CalcKeySize(btreePtr, &iterator->key); // includes length field + + spaceNeeded = CalcKeyRecordSize (keySize, recordLen); + + oldSpace = GetRecordSize (btreePtr, nodePtr, index); + + if ( spaceNeeded == oldSpace ) + { + UInt8 * dst; + + dst = GetRecordAddress (btreePtr, nodePtr, index); + + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + dst += keySize; // skip over key to point at record + + BlockMoveData(record->bufferAddress, dst, recordLen); // blast away... + + *recordInserted = true; + } + else if ( (GetNodeFreeSize(btreePtr, nodePtr) + oldSpace) >= spaceNeeded) + { + DeleteRecord (btreePtr, nodePtr, index); + + didItFit = InsertKeyRecord (btreePtr, nodePtr, index, + &iterator->key, KeyLength(btreePtr, &iterator->key), + record->bufferAddress, recordLen); + PanicIf (didItFit == false, "\pTrySimpleInsert: InsertKeyRecord returned false!"); + + *recordInserted = true; + } + // else not enough space... + + return noErr; +} + + +/*------------------------------------------------------------------------------- +Routine: IsItAHint - checks the hint within a BTreeInterator. + +Function: checks the hint within a BTreeInterator. If it is non-zero, it may + possibly be valid. + +Input: btreePtr - pointer to control block for BTree file + iterator - pointer to BTreeIterator + +Output: answer - true if the hint looks reasonable + - false if the hint is 0 + +Result: noErr - success +-------------------------------------------------------------------------------*/ + + +OSStatus IsItAHint (BTreeControlBlockPtr btreePtr, BTreeIterator *iterator, Boolean *answer) +{ + ++btreePtr->numHintChecks; + +#if DEBUG_BUILD + if (iterator->hint.nodeNum >= btreePtr->totalNodes) + { + *answer = false; + } else + +#endif + if (iterator->hint.nodeNum == 0) + { + *answer = false; + } + else + { + *answer = true; + ++btreePtr->numPossibleHints; + } + + return noErr; +} diff --git a/bsd/hfs/hfscommon/BTree/BTreeNodeOps.c b/bsd/hfs/hfscommon/BTree/BTreeNodeOps.c new file mode 100644 index 000000000..e80c07ec5 --- /dev/null +++ b/bsd/hfs/hfscommon/BTree/BTreeNodeOps.c @@ -0,0 +1,1218 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeNodeOps.c + + Contains: Single-node operations for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: File Systems + + Writers: + + (msd) Mark Day + (djb) Don Brady + + Change History (most recent first): + + 6/1/99 djb Sync up with Mac OS 8.6. + 4/113/99 djb Fix key size checking bug in CheckNode. + 3/19/99 djb Added key size checking to CheckNode. + 3/26/98 djb Added PrintNode for debugging. + 9/4/97 djb Removed GetRightSiblingNode and GetLeftSiblingNode - they are + now macros. SearchNode is now in BTreeSearchNode.a. + 8/22/97 djb Turn off debugging code in CheckKey. + 7/24/97 djb Add summary traces for Get/Rel Node. Made GetRecordOffset into a + macro. Only call CheckNode if the node came from disk. + 7/21/97 msd Make GetRecordByIndex check its record index input; it now + returns an OSStatus. + 4/23/97 djb first checked in + + 2/19/97 djb Changes to support big node cache. + 1/3/97 djb Added support for large keys. + 12/19/96 djb first checked in + + + History applicable to original Scarecrow Design: + + <6> 10/25/96 ser Changing for new VFPI + <5> 9/17/96 dkh Add bounds checking to GetNode. Update GetNode to not assert + that CheckNode failed if the node is all zeroes. This can happen + if the hint case if the fetched node has been deallocated + <4> 3/7/96 dkh Change GetNewNode() to not use kGetEmptyBlock. Instead use + kGetBlock to fetch a block from the disk itself. €€€ Why? + <3> 1/22/96 dkh Add #include Memory.h + <2> 1/10/96 msd Change 64-bit math to use real function names from Math64.i. + <1> 10/18/95 rst Moved from Scarecrow project. + + <17> 7/18/95 mbb Change MoveData & ClearBytes to BlockMoveData & BlockZero. + <16> 1/31/95 prp GetBlockProc interface uses a 64 bit node number. + <15> 1/12/95 wjk Adopt Model FileSystem changes in D5. + <14> 9/30/94 prp Get in sync with D2 interface changes. + <13> 7/25/94 wjk Eliminate usage of BytePtr in favor of UInt8 *. + <12> 7/22/94 wjk Convert to the new set of header files. + <11> 12/2/93 wjk Move from Makefiles to BuildFiles. Fit into the ModernOS and + NRCmds environments. + <10> 11/30/93 wjk Change some Ptr's to BytePtr's in function definitions so they + agree with their prototypes. + <9> 8/31/93 prp Use U64SetU instead of S64Set. + <8> 5/21/93 gs Maintain statistical counters on Get/Release node routines. + <7> 5/10/93 gs Change keySize parameter to keyLength for InsertKeyRecord + routine. Calculate number of bytes in key from keyLength to + account for length and pad bytes. Add GetChildNodeNum routine. + <6> 3/23/93 gs Add InsertKeyRecord routine. + <5> 2/8/93 gs Fix bug in SearchNode that caused "off by 1" error when final + compare was searchKey > trialKey. Add UpdateNode. + <4> 12/10/92 gs Change keyLength field of key to 'length'. + <3> 12/8/92 gs Incorporate suggestions from preliminary code review. + <2> 12/2/92 gs Implement routines. + <1> 11/15/92 gs Define routine interfaces. + +*/ + +#include "../headers/BTreesPrivate.h" +#include "../headers/HFSInstrumentation.h" + + + +///////////////////////// BTree Module Node Operations ////////////////////////// +// +// GetNode - Call FS Agent to get node +// GetNewNode - Call FS Agent to get a new node +// ReleaseNode - Call FS Agent to release node obtained by GetNode. +// UpdateNode - Mark a node as dirty and call FS Agent to release it. +// +// CheckNode - Checks the validity of a node. +// ClearNode - Clear a node to all zeroes. +// +// InsertRecord - Inserts a record into a BTree node. +// InsertKeyRecord - Inserts a key and record pair into a BTree node. +// DeleteRecord - Deletes a record from a BTree node. +// +// SearchNode - Return index for record that matches key. +// LocateRecord - Return pointer to key and data, and size of data. +// +// GetNodeDataSize - Return the amount of space used for data in the node. +// GetNodeFreeSize - Return the amount of free space in the node. +// +// GetRecordOffset - Return the offset for record "index". +// GetRecordAddress - Return address of record "index". +// GetOffsetAddress - Return address of offset for record "index". +// +// InsertOffset - Inserts a new offset into a node. +// DeleteOffset - Deletes an offset from a node. +// +///////////////////////////////////////////////////////////////////////////////// + + + +////////////////////// Routines Internal To BTreeNodeOps.c ////////////////////// + +UInt16 GetRecordOffset (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + +UInt16 *GetOffsetAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ); + +void InsertOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + UInt16 delta ); + +void DeleteOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ); + + +///////////////////////////////////////////////////////////////////////////////// + +#define GetRecordOffset(btreePtr,node,index) (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize)) + +#if HFS_DIAGNOSTIC + #include + #define PRINTIT kprintf +#endif /* HFS_DIAGNOSTIC */ + +static void PrintNode(const NodeDescPtr node, UInt16 nodeSize, UInt32 nodeNumber); + + + +/*------------------------------------------------------------------------------- + +Routine: GetNode - Call FS Agent to get node + +Function: Gets an existing BTree node from FS Agent and verifies it. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to request + +Output: nodePtr - pointer to beginning of node (nil if error) + +Result: + noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus GetNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *nodePtr ) +{ + OSStatus err; + GetBlockProcPtr getNodeProc; + + + LogStartTime(kTraceGetNode); + + //€€ is nodeNum within proper range? + if( nodeNum >= btreePtr->totalNodes ) + { + Panic("\pGetNode:nodeNum >= totalNodes"); + err = fsBTInvalidNodeErr; + goto ErrorExit; + } + + nodePtr->blockSize = btreePtr->nodeSize; // indicate the size of a node + + getNodeProc = btreePtr->getBlockProc; + err = getNodeProc (btreePtr->fileRefNum, + nodeNum, + kGetBlock, + nodePtr ); + + if (err != noErr) + { + Panic ("\pGetNode: getNodeProc returned error."); + // nodePtr->buffer = nil; + goto ErrorExit; + } + ++btreePtr->numGetNodes; + + // + // Optimization + // Only call CheckNode if the node came from disk. + // If it was in the cache, we'll assume its already a valid node. + // + + if ( nodePtr->blockReadFromDisk ) // if we read it from disk then check it + { + err = CheckNode (btreePtr, nodePtr->buffer); + + if (err != noErr) + { + + VTOVCB(btreePtr->fileRefNum)->vcbFlags |= kHFS_DamagedVolume; + + #if HFS_DIAGNOSTIC + if (((NodeDescPtr)nodePtr->buffer)->numRecords != 0) + PrintNode(nodePtr->buffer, btreePtr->nodeSize, nodeNum); + #endif + + if (DEBUG_BUILD) + { + // With the removal of bounds checking in IsItAHint(), it's possible that + // GetNode() will be called to fetch a clear (all zeroes) node. We want + // CheckNode() to fail in this case (it does), however we don't want to assert + // this case because it is not really an "error". Returning an error from GetNode() + // in this case will cause the hint checking code to ignore the hint and revert to + // the full search mode. + + { + UInt32 *cur; + UInt32 *lastPlusOne; + + cur = nodePtr->buffer; + lastPlusOne = (UInt32 *) ((UInt8 *) cur + btreePtr->nodeSize); + + while( cur < lastPlusOne ) + { + if( *cur++ != 0 ) + { + Panic ("\pGetNode: CheckNode returned error."); + break; + } + } + } + } + + (void) TrashNode (btreePtr, nodePtr); // ignore error + goto ErrorExit; + } + } + + LogEndTime(kTraceGetNode, noErr); + + return noErr; + +ErrorExit: + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + LogEndTime(kTraceGetNode, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetNewNode - Call FS Agent to get a new node + +Function: Gets a new BTree node from FS Agent and initializes it to an empty + state. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to request + +Output: returnNodePtr - pointer to beginning of node (nil if error) + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus GetNewNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *returnNodePtr ) +{ + OSStatus err; + NodeDescPtr node; + void *pos; + GetBlockProcPtr getNodeProc; + + + //////////////////////// get buffer for new node //////////////////////////// + + returnNodePtr->blockSize = btreePtr->nodeSize; // indicate the size of a node + + getNodeProc = btreePtr->getBlockProc; + err = getNodeProc (btreePtr->fileRefNum, + nodeNum, + kGetBlock+kGetEmptyBlock, + returnNodePtr ); + + if (err != noErr) + { + Panic ("\pGetNewNode: getNodeProc returned error."); + // returnNodePtr->buffer = nil; + return err; + } + ++btreePtr->numGetNewNodes; + + + ////////////////////////// initialize the node ////////////////////////////// + + node = returnNodePtr->buffer; + + ClearNode (btreePtr, node); // clear the node + + pos = (char *)node + btreePtr->nodeSize - 2; // find address of last offset + *(UInt16 *)pos = sizeof (BTNodeDescriptor); // set offset to beginning of free space + + + return noErr; +} + + + +/*------------------------------------------------------------------------------- + +Routine: ReleaseNode - Call FS Agent to release node obtained by GetNode. + +Function: Informs the FS Agent that a BTree node may be released. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus ReleaseNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + + + LogStartTime(kTraceReleaseNode); + + err = noErr; + + if (nodePtr->buffer != nil) + { + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fileRefNum, + nodePtr, + kReleaseBlock ); + PanicIf (err, "\pReleaseNode: releaseNodeProc returned error."); + ++btreePtr->numReleaseNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + LogEndTime(kTraceReleaseNode, err); + + return err; +} + + + + +/*------------------------------------------------------------------------------- + +Routine: TrashNode - Call FS Agent to release node obtained by GetNode, and + not store it...mark it as bad. + +Function: Informs the FS Agent that a BTree node may be released and thrown away. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus TrashNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + + + LogStartTime(kTraceReleaseNode); + + err = noErr; + + if (nodePtr->buffer != nil) + { + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fileRefNum, + nodePtr, + kReleaseBlock | kTrashBlock ); + PanicIf (err, "\TrashNode: releaseNodeProc returned error."); + ++btreePtr->numReleaseNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + LogEndTime(kTraceReleaseNode, err); + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: UpdateNode - Mark a node as dirty and call FS Agent to release it. + +Function: Marks a BTree node dirty and informs the FS Agent that it may be released. + + //€€ have another routine that clears & writes a node, so we can call + CheckNode from this routine. + +Input: btreePtr - pointer to BTree control block + nodeNum - number of node to release + transactionID - ID of transaction this node update is a part of + flags - special flags to pass to ReleaseNodeProc + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus UpdateNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr, + UInt32 transactionID, + UInt32 flags ) +{ + OSStatus err; + ReleaseBlockProcPtr releaseNodeProc; + + + err = noErr; + + if (nodePtr->buffer != nil) //€€ why call UpdateNode if nil ?!? + { + if (DEBUG_BUILD) + { + if ( btreePtr->attributes & kBTVariableIndexKeysMask ) + (void) CheckNode (btreePtr, nodePtr->buffer); + } + + LogStartTime(kTraceReleaseNode); + + releaseNodeProc = btreePtr->releaseBlockProc; + err = releaseNodeProc (btreePtr->fileRefNum, + nodePtr, + flags | kMarkBlockDirty ); + + LogEndTime(kTraceReleaseNode, err); + + M_ExitOnError (err); + ++btreePtr->numUpdateNodes; + } + + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + + return noErr; + +ErrorExit: + + return err; +} + + + +/*------------------------------------------------------------------------------- + +Routine: CheckNode - Checks the validity of a node. + +Function: Checks the validity of a node by verifying that the fLink and bLink fields + are within the forks EOF. The node type must be one of the four known + types. The node height must be less than or equal to the tree height. The + node must not have more than the maximum number of records, and the record + offsets must make sense. + +Input: btreePtr - pointer to BTree control block + node - pointer to node to check + +Result: noErr - success + fsBTInvalidNodeErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus CheckNode (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + SInt32 index; + SInt32 maxRecords; + UInt32 maxNode; + UInt16 nodeSize; + UInt16 offset; + UInt16 prevOffset; + + nodeSize = btreePtr->nodeSize; + + ///////////////////// are fLink and bLink within EOF //////////////////////// + + maxNode = (GetFileControlBlock(btreePtr->fileRefNum)->fcbEOF / nodeSize) - 1; + + if ( (node->fLink > maxNode) || (node->bLink > maxNode) ) + return fsBTInvalidNodeErr; + + /////////////// check node type (leaf, index, header, map) ////////////////// + + if ( (node->kind < kBTLeafNode) || (node->kind > kBTMapNode) ) + return fsBTInvalidNodeErr; + + ///////////////////// is node height > tree depth? ////////////////////////// + + if ( node->height > btreePtr->treeDepth ) + return fsBTInvalidNodeErr; + + //////////////////////// check number of records //////////////////////////// + + //XXX can we calculate a more accurate minimum record size? + maxRecords = ( nodeSize - sizeof (BTNodeDescriptor) ) >> 3; + + if (node->numRecords > maxRecords) + return fsBTInvalidNodeErr; + + ////////////////////////// check record offsets ///////////////////////////// + + index = node->numRecords; /* start index at free space */ + prevOffset = nodeSize - (index << 1); /* use 2 bytes past end of free space */ + + do { + offset = GetRecordOffset (btreePtr, node, index); + + if (offset & 1) // offset is odd + return fsBTInvalidNodeErr; + + if (offset >= prevOffset) // offset >= previous offset + return fsBTInvalidNodeErr; + + /* reject keys that overflow record slot */ + if ((node->kind == kBTLeafNode) && + (index < node->numRecords) && /* ignore free space record */ + (CalcKeySize(btreePtr, (KeyPtr) ((Ptr)node + offset)) > (prevOffset - offset))) { + return fsBTInvalidNodeErr; + } + + prevOffset = offset; + } while ( --index >= 0 ); + + if (offset < sizeof (BTNodeDescriptor) ) // first offset < minimum ? + return fsBTInvalidNodeErr; + + return noErr; +} + + +#if HFS_DIAGNOSTIC +static void PrintNode(const NodeDescPtr node, UInt16 nodeSize, UInt32 nodeNumber) +{ + struct row { + UInt16 word[8]; + }; + struct row *offset; + UInt16 rows; + UInt32 *lp; + + PRINTIT("Dump of B-tree node #%ld ($%08lX)\n", nodeNumber, nodeNumber); + + rows = nodeSize/16; + lp = (UInt32*) node; + offset = 0; + + while (rows-- > 0) + PRINTIT("%04X: %08lX %08lX %08lX %08lX\n", (u_int)offset++, *lp++, *lp++, *lp++, *lp++); +} +#endif + + +/*------------------------------------------------------------------------------- + +Routine: ClearNode - Clear a node to all zeroes. + +Function: Writes zeroes from beginning of node for nodeSize bytes. + +Input: btreePtr - pointer to BTree control block + node - pointer to node to clear + +Result: none +-------------------------------------------------------------------------------*/ + +void ClearNode (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + ClearMemory( node, btreePtr->nodeSize ); +} + +/*------------------------------------------------------------------------------- + +Routine: InsertRecord - Inserts a record into a BTree node. + +Function: + +Note: Record size must be even! + +Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + recPtr - pointer to record to insert + +Result: noErr - success + fsBTFullErr - record larger than remaining free space. +-------------------------------------------------------------------------------*/ + +Boolean InsertRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + RecordPtr recPtr, + UInt16 recSize ) +{ + UInt16 freeSpace; + UInt16 indexOffset; + UInt16 freeOffset; + UInt16 bytesToMove; + void *src; + void *dst; + + //// will new record fit in node? + + freeSpace = GetNodeFreeSize (btreePtr, node); + //€€ we could get freeOffset & calc freeSpace + if ( freeSpace < recSize + 2) + { + return false; + } + + + //// make hole for new record + + indexOffset = GetRecordOffset (btreePtr, node, index); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((Ptr) node) + indexOffset; + dst = ((Ptr) src) + recSize; + bytesToMove = freeOffset - indexOffset; + if (bytesToMove) + MoveRecordsRight (src, dst, bytesToMove); + + + //// adjust offsets for moved records + + InsertOffset (btreePtr, node, index, recSize); + + + //// move in the new record + + dst = ((Ptr) node) + indexOffset; + MoveRecordsLeft (recPtr, dst, recSize); + + return true; +} + + + +/*------------------------------------------------------------------------------- + +Routine: InsertKeyRecord - Inserts a record into a BTree node. + +Function: + +Note: Record size must be even! + +Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + keyPtr - pointer to key for record to insert + keyLength - length of key (or maxKeyLength) + recPtr - pointer to record to insert + recSize - number of bytes to copy for record + +Result: noErr - success + fsBTFullErr - record larger than remaining free space. +-------------------------------------------------------------------------------*/ + +Boolean InsertKeyRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + KeyPtr keyPtr, + UInt16 keyLength, + RecordPtr recPtr, + UInt16 recSize ) +{ + UInt16 freeSpace; + UInt16 indexOffset; + UInt16 freeOffset; + UInt16 bytesToMove; + UInt8 * src; + UInt8 * dst; + UInt16 keySize; + UInt16 rawKeyLength; + UInt16 sizeOfLength; + + //// calculate actual key size + + if ( btreePtr->attributes & kBTBigKeysMask ) + keySize = keyLength + sizeof(UInt16); + else + keySize = keyLength + sizeof(UInt8); + + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + + //// will new record fit in node? + + freeSpace = GetNodeFreeSize (btreePtr, node); + //€€ we could get freeOffset & calc freeSpace + if ( freeSpace < keySize + recSize + 2) + { + return false; + } + + + //// make hole for new record + + indexOffset = GetRecordOffset (btreePtr, node, index); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((UInt8 *) node) + indexOffset; + dst = ((UInt8 *) src) + keySize + recSize; + bytesToMove = freeOffset - indexOffset; + if (bytesToMove) + MoveRecordsRight (src, dst, bytesToMove); + + + //// adjust offsets for moved records + + InsertOffset (btreePtr, node, index, keySize + recSize); + + + //// copy record key + + dst = ((UInt8 *) node) + indexOffset; + + if ( btreePtr->attributes & kBTBigKeysMask ) + { + *((UInt16*) dst)++ = keyLength; // use keyLength rather than key.length + rawKeyLength = keyPtr->length16; + sizeOfLength = 2; + } + else + { + *dst++ = keyLength; // use keyLength rather than key.length + rawKeyLength = keyPtr->length8; + sizeOfLength = 1; + } + + MoveRecordsLeft ( ((UInt8 *) keyPtr) + sizeOfLength, dst, rawKeyLength); // copy key + + // any pad bytes? + bytesToMove = keySize - rawKeyLength; + if (bytesToMove) + ClearMemory (dst + rawKeyLength, bytesToMove); // clear pad bytes in index key + + + //// copy record data + + dst = ((UInt8 *) node) + indexOffset + keySize; + MoveRecordsLeft (recPtr, dst, recSize); + + return true; +} + + + +/*------------------------------------------------------------------------------- + +Routine: DeleteRecord - Deletes a record from a BTree node. + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node to insert the record + index - position record is to be inserted + +Result: none +-------------------------------------------------------------------------------*/ + +void DeleteRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + SInt16 indexOffset; + SInt16 nextOffset; + SInt16 freeOffset; + SInt16 bytesToMove; + void *src; + void *dst; + + //// compress records + indexOffset = GetRecordOffset (btreePtr, node, index); + nextOffset = GetRecordOffset (btreePtr, node, index + 1); + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + src = ((Ptr) node) + nextOffset; + dst = ((Ptr) node) + indexOffset; + bytesToMove = freeOffset - nextOffset; + if (bytesToMove) + MoveRecordsLeft (src, dst, bytesToMove); + + //// Adjust the offsets + DeleteOffset (btreePtr, node, index); + + /* clear out new free space */ + bytesToMove = nextOffset - indexOffset; + ClearMemory(GetRecordAddress(btreePtr, node, node->numRecords), bytesToMove); + +} + + + +/*------------------------------------------------------------------------------- + +Routine: SearchNode - Return index for record that matches key. + +Function: Returns the record index for the record that matches the search key. + If no record was found that matches the search key, the "insert index" + of where the record should go is returned instead. + +Algorithm: A binary search algorithm is used to find the specified key. + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + searchKey - pointer to the key to match + +Output: index - pointer to beginning of key for record + +Result: true - success (index = record index) + false - key did not match anything in node (index = insert index) +-------------------------------------------------------------------------------*/ +Boolean +SearchNode( BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + KeyPtr searchKey, + UInt16 *returnIndex ) +{ + SInt32 lowerBound; + SInt32 upperBound; + SInt32 index; + SInt32 result; + KeyPtr trialKey; + UInt16 *offset; + KeyCompareProcPtr compareProc = btreePtr->keyCompareProc; + + lowerBound = 0; + upperBound = node->numRecords - 1; + offset = (UInt16 *) ((UInt8 *)(node) + (btreePtr)->nodeSize - kOffsetSize); + + while (lowerBound <= upperBound) { + index = (lowerBound + upperBound) >> 1; + + trialKey = (KeyPtr) ((UInt8 *)node + *(offset - index)); + + result = compareProc(searchKey, trialKey); + + if (result < 0) { + upperBound = index - 1; /* search < trial */ + } else if (result > 0) { + lowerBound = index + 1; /* search > trial */ + } else { + *returnIndex = index; /* search == trial */ + return true; + } + } + + *returnIndex = lowerBound; /* lowerBound is insert index */ + return false; +} + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordByIndex - Return pointer to key and data, and size of data. + +Function: Returns a pointer to beginning of key for record, a pointer to the + beginning of the data for the record, and the size of the record data + (does not include the size of the key). + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - index of record to get + +Output: keyPtr - pointer to beginning of key for record + dataPtr - pointer to beginning of data for record + dataSize - size of the data portion of the record + +Result: none +-------------------------------------------------------------------------------*/ + +OSStatus GetRecordByIndex (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + KeyPtr *keyPtr, + UInt8 * *dataPtr, + UInt16 *dataSize ) +{ + UInt16 offset; + UInt16 nextOffset; + UInt16 keySize; + + // + // Make sure index is valid (in range 0..numRecords-1) + // + if (index >= node->numRecords) + return fsBTRecordNotFoundErr; + + //// find keyPtr + offset = GetRecordOffset (btreePtr, node, index); + *keyPtr = (KeyPtr) ((Ptr)node + offset); + + //// find dataPtr + keySize = CalcKeySize(btreePtr, *keyPtr); + if ( M_IsOdd (keySize) ) + ++keySize; // add pad byte + + offset += keySize; // add the key length to find data offset + *dataPtr = (UInt8 *) node + offset; + + //// find dataSize + nextOffset = GetRecordOffset (btreePtr, node, index + 1); + *dataSize = nextOffset - offset; + + return noErr; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetNodeDataSize - Return the amount of space used for data in the node. + +Function: Gets the size of the data currently contained in a node, excluding + the node header. (record data + offset overhead) + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + +Result: - number of bytes used for data and offsets in the node. +-------------------------------------------------------------------------------*/ + +UInt16 GetNodeDataSize (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + UInt16 freeOffset; + + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); + + return freeOffset + (node->numRecords << 1) - sizeof (BTNodeDescriptor); +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetNodeFreeSize - Return the amount of free space in the node. + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + +Result: - number of bytes of free space in the node. +-------------------------------------------------------------------------------*/ + +UInt16 GetNodeFreeSize (BTreeControlBlockPtr btreePtr, NodeDescPtr node ) +{ + UInt16 freeOffset; + + freeOffset = GetRecordOffset (btreePtr, node, node->numRecords); //€€ inline? + + return btreePtr->nodeSize - freeOffset - (node->numRecords << 1) - kOffsetSize; +} + + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordOffset - Return the offset for record "index". + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset for + +Result: - offset (in bytes) from beginning of node of record specified by index +-------------------------------------------------------------------------------*/ +// make this a macro (for inlining) +#if 0 +UInt16 GetRecordOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + void *pos; + + + pos = (UInt8 *)node + btreePtr->nodeSize - (index << 1) - kOffsetSize; + + return *(short *)pos; +} +#endif + + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordAddress - Return address of record "index". + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset address for + +Result: - pointer to record "index". +-------------------------------------------------------------------------------*/ +// make this a macro (for inlining) +#if 0 +UInt8 * GetRecordAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + UInt8 * pos; + + pos = (UInt8 *)node + GetRecordOffset (btreePtr, node, index); + + return pos; +} +#endif + + + +/*------------------------------------------------------------------------------- + +Routine: GetRecordSize - Return size of record "index". + +Function: + +Note: This does not work on the FreeSpace index! + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain record size for + +Result: - size of record "index". +-------------------------------------------------------------------------------*/ + +UInt16 GetRecordSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + UInt16 *pos; + + pos = (UInt16 *) ((Ptr)node + btreePtr->nodeSize - (index << 1) - kOffsetSize); + + return *(pos-1) - *pos; +} + + + +/*------------------------------------------------------------------------------- +Routine: GetOffsetAddress - Return address of offset for record "index". + +Function: + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain offset address for + +Result: - pointer to offset for record "index". +-------------------------------------------------------------------------------*/ + +UInt16 *GetOffsetAddress (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + void *pos; + + pos = (Ptr)node + btreePtr->nodeSize - (index << 1) -2; + + return (UInt16 *)pos; +} + + + +/*------------------------------------------------------------------------------- +Routine: GetChildNodeNum - Return child node number from index record "index". + +Function: Returns the first UInt32 stored after the key for record "index". + +Assumes: The node is an Index Node. + The key.length stored at record "index" is ODD. //€€ change for variable length index keys + +Input: btreePtr - pointer to BTree control block + node - pointer to node that contains the record + index - record to obtain child node number from + +Result: - child node number from record "index". +-------------------------------------------------------------------------------*/ + +UInt32 GetChildNodeNum (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + UInt16 index ) +{ + UInt8 * pos; + + pos = GetRecordAddress (btreePtr, nodePtr, index); + pos += CalcKeySize(btreePtr, (BTreeKey *) pos); // key.length + size of length field + + return *(UInt32 *)pos; +} + + + +/*------------------------------------------------------------------------------- +Routine: InsertOffset - Add an offset and adjust existing offsets by delta. + +Function: Add an offset at 'index' by shifting 'index+1' through the last offset + and adjusting them by 'delta', the size of the record to be inserted. + The number of records contained in the node is also incremented. + +Input: btreePtr - pointer to BTree control block + node - pointer to node + index - index at which to insert record + delta - size of record to be inserted + +Result: none +-------------------------------------------------------------------------------*/ + +void InsertOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + UInt16 delta ) +{ + UInt16 *src, *dst; + UInt16 numOffsets; + + src = GetOffsetAddress (btreePtr, node, node->numRecords); // point to free offset + dst = src - 1; // point to new offset + numOffsets = node->numRecords++ - index; // subtract index & postincrement + + do { + *dst++ = *src++ + delta; // to tricky? + } while (numOffsets--); +} + + + +/*------------------------------------------------------------------------------- + +Routine: DeleteOffset - Delete an offset. + +Function: Delete the offset at 'index' by shifting 'index+1' through the last offset + and adjusting them by the size of the record 'index'. + The number of records contained in the node is also decremented. + +Input: btreePtr - pointer to BTree control block + node - pointer to node + index - index at which to delete record + +Result: none +-------------------------------------------------------------------------------*/ + +void DeleteOffset (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index ) +{ + UInt16 *src, *dst; + UInt16 numOffsets; + UInt16 delta; + + dst = GetOffsetAddress (btreePtr, node, index); + src = dst - 1; + delta = *src - *dst; + numOffsets = --node->numRecords - index; // predecrement numRecords & subtract index + + while (numOffsets--) + { + *--dst = *--src - delta; // work our way left + } +} + + diff --git a/bsd/hfs/hfscommon/BTree/BTreeTreeOps.c b/bsd/hfs/hfscommon/BTree/BTreeTreeOps.c new file mode 100644 index 000000000..19e831329 --- /dev/null +++ b/bsd/hfs/hfscommon/BTree/BTreeTreeOps.c @@ -0,0 +1,1211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeTreeOps.c + + Contains: Multi-node tree operations for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: File Systems + + Writers: + + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + + 6/1/99 djb Sync up with Mac OS 8.6. + 12/8/97 djb Radar #2200632, CollapseTree wasn't marking root node dirty. + 11/24/97 djb Radar #2005325, InsertLevel incorrectly handled root splits! + 10/17/97 msd Conditionalize DebugStrs. + 5/16/97 msd InsertNode() needs a return statement in ErrorExit. + 4/23/97 djb first checked in + + 3/17/97 DSH Conditionalize out Panic assertion for SC. + 3/3/97 djb Removed DebugStr in InsertLevel. + 2/19/97 djb Major re-write of insert code; added InsertLevel and InsertNode. + 1/27/97 djb InsertTree and DeleteTree are now recursive and support variable + sized index keys. + 1/16/97 djb Removed DebugStr in SearchTree. Added initial support for + variable sized index keys. + 1/3/97 djb Changed len8 to length8. + 1/3/97 djb Added support for large keys. + 12/19/96 djb first checked in + + History applicable to original Scarecrow Design: + + <3> 10/25/96 ser Changing for new VFPI + <2> 1/22/96 dkh Add #include Memory.h + <1> 10/18/95 rst Moved from Scarecrow project. + + <12> 7/18/95 mbb Change MoveData & ClearBytes to BlockMoveData & BlockZero. + <11> 9/30/94 prp Get in sync with D2 interface changes. + <10> 7/25/94 wjk Eliminate usage of BytePtr in favor of UInt8 *. + <9> 7/22/94 wjk Convert to the new set of header files. + <8> 12/2/93 wjk Move from Makefiles to BuildFiles. Fit into the ModernOS and + NRCmds environments. + <7> 11/30/93 wjk Change some Ptr's to BytePtr's in function definitions so they + agree with their prototypes. + <6> 5/21/93 gs Debug DeleteTree. Modify InsertTree for BTReplaceRecord. + <5> 5/10/93 gs Modify RotateLeft, and add DeleteTree, CollapseTree routines. + <4> 3/23/93 gs revise RotateLeft to use InsertKeyRecord instead of + InsertRecord. + <3> 3/23/93 gs Implement SplitLeft, InsertTree routine. + <2> 2/8/93 gs Implement SearchTree, and RotateLeft. + <1> 11/15/92 gs first checked in + +*/ + +#include "../headers/BTreesPrivate.h" + +// +/////////////////////// Routines Internal To BTree Module /////////////////////// +// +// SearchTree +// InsertTree +// +////////////////////// Routines Internal To BTreeTreeOps.c ////////////////////// + +static OSStatus AddNewRootNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ); + +static OSStatus CollapseTree (BTreeControlBlockPtr btreePtr, + BlockDescriptor *blockPtr ); + +static OSStatus RotateLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode, + UInt16 rightInsertIndex, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + Boolean *recordFit, + UInt16 *recsRotated ); + +static Boolean RotateRecordLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ); + +static OSStatus SplitLeft (BTreeControlBlockPtr btreePtr, + BlockDescriptor *leftNode, + BlockDescriptor *rightNode, + UInt32 rightNodeNum, + UInt16 index, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + UInt16 *recsRotated ); + + + +static OSStatus InsertLevel (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + InsertKey *primaryKey, + InsertKey *secondaryKey, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + UInt32 *insertNode ); + +static OSErr InsertNode (BTreeControlBlockPtr btreePtr, + InsertKey *key, + BlockDescriptor *rightNode, + UInt32 node, + UInt16 index, + UInt32 *newNode, + UInt16 *newIndex, + BlockDescriptor *leftNode, + Boolean *updateParent, + Boolean *insertParent, + Boolean *rootSplit ); + +static UInt16 GetKeyLength (const BTreeControlBlock *btreePtr, + const BTreeKey *key, + Boolean forLeafNode ); + + + +//////////////////////// BTree Multi-node Tree Operations /////////////////////// + + +/*------------------------------------------------------------------------------- + +Routine: SearchTree - Search BTree for key and set up Tree Path Table. + +Function: Searches BTree for specified key, setting up the Tree Path Table to + reflect the search path. + + +Input: btreePtr - pointer to control block of BTree to search + keyPtr - pointer to the key to search for + treePathTable - pointer to the tree path table to construct + +Output: nodeNum - number of the node containing the key position + iterator - BTreeIterator specifying record or insert position + +Result: noErr - key found, index is record index + fsBTRecordNotFoundErr - key not found, index is insert index + fsBTEmptyErr - key not found, return params are nil + otherwise - catastrophic failure (GetNode/ReleaseNode failed) +-------------------------------------------------------------------------------*/ + +OSStatus SearchTree (BTreeControlBlockPtr btreePtr, + BTreeKeyPtr searchKey, + TreePathTable treePathTable, + UInt32 *nodeNum, + BlockDescriptor *nodePtr, + UInt16 *returnIndex ) +{ + OSStatus err; + SInt16 level; + UInt32 curNodeNum; + NodeRec nodeRec; + UInt16 index; + Boolean keyFound; + KeyPtr keyPtr; + UInt8 * dataPtr; + UInt16 dataSize; + + + if (btreePtr->treeDepth == 0) // is the tree empty? + { + err = fsBTEmptyErr; + goto ErrorExit; + } + + curNodeNum = btreePtr->rootNode; + + //€€ for debugging... + treePathTable [0].node = 0; + treePathTable [0].index = 0; + + while (true) + { + PanicIf(curNodeNum == 0, "\pSearchTree: curNodeNum is zero!"); + + err = GetNode (btreePtr, curNodeNum, &nodeRec); + if (err != noErr) + { + goto ErrorExit; + } + + keyFound = SearchNode (btreePtr, nodeRec.buffer, searchKey, &index); + + level = ((BTNodeDescriptor*)nodeRec.buffer)->height; //€€ or --level; + + + treePathTable [level].node = curNodeNum; + + if ( ((BTNodeDescriptor*)nodeRec.buffer)->kind == kBTLeafNode) + { + treePathTable [level].index = index; + break; // were done... + } + + if ( (keyFound != true) && (index != 0)) + --index; + + treePathTable [level].index = index; + + GetRecordByIndex (btreePtr, nodeRec.buffer, index, &keyPtr, &dataPtr, &dataSize); + curNodeNum = *(UInt32 *)dataPtr; + err = ReleaseNode (btreePtr, &nodeRec); + if (err != noErr) + { + goto ErrorExit; + } + } + + *nodeNum = curNodeNum; + *nodePtr = nodeRec; + *returnIndex = index; + + if (keyFound) + return noErr; // searchKey found, index identifies record in node + else + return fsBTRecordNotFoundErr; // searchKey not found, index identifies insert point + +ErrorExit: + + *nodeNum = 0; + nodePtr->buffer = nil; + nodePtr->blockHeader = nil; + *returnIndex = 0; + + return err; +} + + + + +////////////////////////////////// InsertTree /////////////////////////////////// + +OSStatus InsertTree ( BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + Boolean replacingKey, + UInt32 *insertNode ) +{ + InsertKey primaryKey; + OSStatus err; + + primaryKey.keyPtr = keyPtr; + primaryKey.keyLength = GetKeyLength(btreePtr, primaryKey.keyPtr, (level == 1)); + primaryKey.recPtr = recPtr; + primaryKey.recSize = recSize; + primaryKey.replacingKey = replacingKey; + primaryKey.skipRotate = false; + + err = InsertLevel (btreePtr, treePathTable, &primaryKey, nil, + targetNode, index, level, insertNode ); + + return err; + +} // End of InsertTree + + +////////////////////////////////// InsertLevel ////////////////////////////////// + +OSStatus InsertLevel (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + InsertKey *primaryKey, + InsertKey *secondaryKey, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + UInt32 *insertNode ) +{ + OSStatus err; + BlockDescriptor leftNode; + UInt32 targetNodeNum; + UInt32 newNodeNum; + UInt16 newIndex; + Boolean insertParent; + Boolean updateParent; + Boolean newRoot; + +#if defined(applec) && !defined(__SC__) + PanicIf ((level == 1) && (((NodeDescPtr)targetNode->buffer)->kind != kBTLeafNode), "\P InsertLevel: non-leaf at level 1! "); +#endif + leftNode.buffer = nil; + targetNodeNum = treePathTable [level].node; + + insertParent = false; + updateParent = false; + + ////// process first insert ////// + + err = InsertNode (btreePtr, primaryKey, targetNode, targetNodeNum, index, + &newNodeNum, &newIndex, &leftNode, &updateParent, &insertParent, &newRoot ); + M_ExitOnError (err); + + if ( newRoot ) + { + // Extend the treePathTable by adding an entry for the new + // root node that references the current targetNode. + // + // If inserting the secondaryKey changes the first key of + // the target node, then we'll have to update the second + // key in the new root node. + + treePathTable [level + 1].node = btreePtr->rootNode; + treePathTable [level + 1].index = 1; // 1 since we always split/rotate left + } + + if ( level == 1 ) + *insertNode = newNodeNum; + + ////// process second insert (if any) ////// + + if ( secondaryKey != nil ) + { + Boolean temp; + + err = InsertNode (btreePtr, secondaryKey, targetNode, newNodeNum, newIndex, + &newNodeNum, &newIndex, &leftNode, &updateParent, &insertParent, &temp); + M_ExitOnError (err); + + if ( DEBUG_BUILD && updateParent && newRoot ) + DebugStr("\p InsertLevel: New root from primary key, update from secondary key..."); + } + + //////////////////////// Update Parent(s) /////////////////////////////// + + if ( insertParent || updateParent ) + { + BlockDescriptor parentNode; + UInt32 parentNodeNum; + KeyPtr keyPtr; + UInt8 * recPtr; + UInt16 recSize; + + secondaryKey = nil; + + PanicIf ( (level == btreePtr->treeDepth), "\p InsertLevel: unfinished insert!?"); + + ++level; + + // Get Parent Node data... + index = treePathTable [level].index; + parentNodeNum = treePathTable [level].node; + + PanicIf ( parentNodeNum == 0, "\p InsertLevel: parent node is zero!?"); + + err = GetNode (btreePtr, parentNodeNum, &parentNode); // released as target node in next level up + M_ExitOnError (err); +#if defined(applec) && !defined(__SC__) + if (DEBUG_BUILD && level > 1) + PanicIf ( ((NodeDescPtr)parentNode.buffer)->kind != kBTIndexNode, "\P InsertLevel: parent node not an index node! "); +#endif + ////////////////////////// Update Parent Index ////////////////////////////// + + if ( updateParent ) + { + //€€ debug: check if ptr == targetNodeNum + GetRecordByIndex (btreePtr, parentNode.buffer, index, &keyPtr, &recPtr, &recSize); + PanicIf( (*(UInt32 *) recPtr) != targetNodeNum, "\p InsertLevel: parent ptr doesn't match target node!"); + + // need to delete and re-insert this parent key/ptr + // we delete it here and it gets re-inserted in the + // InsertLevel call below. + DeleteRecord (btreePtr, parentNode.buffer, index); + + primaryKey->keyPtr = (KeyPtr) GetRecordAddress( btreePtr, targetNode->buffer, 0 ); + primaryKey->keyLength = GetKeyLength(btreePtr, primaryKey->keyPtr, false); + primaryKey->recPtr = (UInt8 *) &targetNodeNum; + primaryKey->recSize = sizeof(targetNodeNum); + primaryKey->replacingKey = kReplaceRecord; + primaryKey->skipRotate = insertParent; // don't rotate left if we have two inserts occuring + } + + ////////////////////////// Add New Parent Index ///////////////////////////// + + if ( insertParent ) + { + InsertKey *insertKeyPtr; + InsertKey insertKey; + + if ( updateParent ) + { + insertKeyPtr = &insertKey; + secondaryKey = &insertKey; + } + else + { + insertKeyPtr = primaryKey; + } + + insertKeyPtr->keyPtr = (KeyPtr) GetRecordAddress (btreePtr, leftNode.buffer, 0); + insertKeyPtr->keyLength = GetKeyLength(btreePtr, insertKeyPtr->keyPtr, false); + insertKeyPtr->recPtr = (UInt8 *) &((NodeDescPtr)targetNode->buffer)->bLink; + insertKeyPtr->recSize = sizeof(UInt32); + insertKeyPtr->replacingKey = kInsertRecord; + insertKeyPtr->skipRotate = false; // a rotate is OK during second insert + } + + err = InsertLevel (btreePtr, treePathTable, primaryKey, secondaryKey, + &parentNode, index, level, insertNode ); + M_ExitOnError (err); + } + + err = UpdateNode (btreePtr, targetNode, 0, kLockTransaction); // all done with target + M_ExitOnError (err); + + err = UpdateNode (btreePtr, &leftNode, 0, kLockTransaction); // all done with left sibling + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, targetNode); + (void) ReleaseNode (btreePtr, &leftNode); + + Panic ("\p InsertLevel: an error occured!"); + + return err; + +} // End of InsertLevel + + + +////////////////////////////////// InsertNode /////////////////////////////////// + +static OSErr InsertNode (BTreeControlBlockPtr btreePtr, + InsertKey *key, + + BlockDescriptor *rightNode, + UInt32 node, + UInt16 index, + + UInt32 *newNode, + UInt16 *newIndex, + + BlockDescriptor *leftNode, + Boolean *updateParent, + Boolean *insertParent, + Boolean *rootSplit ) +{ + BlockDescriptor *targetNode; + UInt32 leftNodeNum; + UInt16 recsRotated; + OSErr err; + Boolean recordFit; + + *rootSplit = false; + + PanicIf ( rightNode->buffer == leftNode->buffer, "\p InsertNode: rightNode == leftNode, huh?"); + + leftNodeNum = ((NodeDescPtr) rightNode->buffer)->bLink; + + + /////////////////////// Try Simple Insert /////////////////////////////// + + if ( node == leftNodeNum ) + targetNode = leftNode; + else + targetNode = rightNode; + + recordFit = InsertKeyRecord (btreePtr, targetNode->buffer, index, key->keyPtr, key->keyLength, key->recPtr, key->recSize); + + if ( recordFit ) + { + *newNode = node; + *newIndex = index; + + if ( (index == 0) && (((NodeDescPtr) targetNode->buffer)->height != btreePtr->treeDepth) ) + *updateParent = true; // the first record changed so we need to update the parent + } + + + //////////////////////// Try Rotate Left //////////////////////////////// + + if ( !recordFit && leftNodeNum > 0 ) + { + PanicIf ( leftNode->buffer != nil, "\p InsertNode: leftNode already aquired!"); + + if ( leftNode->buffer == nil ) + { + err = GetNode (btreePtr, leftNodeNum, leftNode); // will be released by caller or a split below + M_ExitOnError (err); + } + + PanicIf ( ((NodeDescPtr) leftNode->buffer)->fLink != node, "\p InsertNode, RotateLeft: invalid sibling link!" ); + + if ( !key->skipRotate ) // are rotates allowed? + { + err = RotateLeft (btreePtr, leftNode->buffer, rightNode->buffer, index, key->keyPtr, key->recPtr, + key->recSize, newIndex, newNode, &recordFit, &recsRotated ); + M_ExitOnError (err); + + if ( recordFit ) + { + if ( key->replacingKey || (recsRotated > 1) || (index > 0) ) + *updateParent = true; + } + } + } + + + //////////////////////// Try Split Left ///////////////////////////////// + + if ( !recordFit ) + { + // might not have left node... + err = SplitLeft (btreePtr, leftNode, rightNode, node, index, key->keyPtr, + key->recPtr, key->recSize, newIndex, newNode, &recsRotated); + M_ExitOnError (err); + + // if we split root node - add new root + + if ( ((NodeDescPtr) rightNode->buffer)->height == btreePtr->treeDepth ) + { + err = AddNewRootNode (btreePtr, leftNode->buffer, rightNode->buffer); // Note: does not update TPT + M_ExitOnError (err); + *rootSplit = true; + } + else + { + *insertParent = true; + + if ( key->replacingKey || (recsRotated > 1) || (index > 0) ) + *updateParent = true; + } + } + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, leftNode); + return err; + +} // End of InsertNode + + +/*------------------------------------------------------------------------------- +Routine: DeleteTree - One_line_description. + +Function: Brief_description_of_the_function_and_any_side_effects + +ToDo: + +Input: btreePtr - description + treePathTable - description + targetNode - description + index - description + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +OSStatus DeleteTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level ) +{ + OSStatus err; + BlockDescriptor parentNode; + BTNodeDescriptor *targetNodePtr; + UInt32 targetNodeNum; + Boolean deleteRequired; + Boolean updateRequired; + + + deleteRequired = false; + updateRequired = false; + + targetNodeNum = treePathTable[level].node; + targetNodePtr = targetNode->buffer; + PanicIf (targetNodePtr == nil, "\pDeleteTree: targetNode has nil buffer!"); + + DeleteRecord (btreePtr, targetNodePtr, index); + + //€€ coalesce remaining records? + + if ( targetNodePtr->numRecords == 0 ) // did we delete the last record? + { + BlockDescriptor siblingNode; + UInt32 siblingNodeNum; + + deleteRequired = true; + + ////////////////// Get Siblings & Update Links ////////////////////////// + + siblingNodeNum = targetNodePtr->bLink; // Left Sibling Node + if ( siblingNodeNum != 0 ) + { + err = GetNode (btreePtr, siblingNodeNum, &siblingNode); + M_ExitOnError (err); + ((NodeDescPtr)siblingNode.buffer)->fLink = targetNodePtr->fLink; + err = UpdateNode (btreePtr, &siblingNode, 0, kLockTransaction); + M_ExitOnError (err); + } + else if ( targetNodePtr->kind == kBTLeafNode ) // update firstLeafNode + { + btreePtr->firstLeafNode = targetNodePtr->fLink; + } + + siblingNodeNum = targetNodePtr->fLink; // Right Sibling Node + if ( siblingNodeNum != 0 ) + { + err = GetNode (btreePtr, siblingNodeNum, &siblingNode); + M_ExitOnError (err); + ((NodeDescPtr)siblingNode.buffer)->bLink = targetNodePtr->bLink; + err = UpdateNode (btreePtr, &siblingNode, 0, kLockTransaction); + M_ExitOnError (err); + } + else if ( targetNodePtr->kind == kBTLeafNode ) // update lastLeafNode + { + btreePtr->lastLeafNode = targetNodePtr->bLink; + } + + //////////////////////// Free Empty Node //////////////////////////////// + + ClearNode (btreePtr, targetNodePtr); + + err = UpdateNode (btreePtr, targetNode, 0, kLockTransaction); + M_ExitOnError (err); + err = FreeNode (btreePtr, targetNodeNum); + M_ExitOnError (err); + } + else if ( index == 0 ) // did we delete the first record? + { + updateRequired = true; // yes, so we need to update parent + } + + + if ( level == btreePtr->treeDepth ) // then targetNode->buffer is the root node + { + deleteRequired = false; + updateRequired = false; + + if ( targetNode->buffer == nil ) // then root was freed and the btree is empty + { + btreePtr->rootNode = 0; + btreePtr->treeDepth = 0; + } + else if ( ((NodeDescPtr)targetNode->buffer)->numRecords == 1 ) + { + err = CollapseTree (btreePtr, targetNode); + M_ExitOnError (err); + } + } + + + if ( updateRequired || deleteRequired ) + { + ++level; // next level + + //// Get Parent Node and index + index = treePathTable [level].index; + err = GetNode (btreePtr, treePathTable[level].node, &parentNode); + M_ExitOnError (err); + + if ( updateRequired ) + { + KeyPtr keyPtr; + UInt8 * recPtr; + UInt16 recSize; + UInt32 insertNode; + + //€€ debug: check if ptr == targetNodeNum + GetRecordByIndex (btreePtr, parentNode.buffer, index, &keyPtr, &recPtr, &recSize); + PanicIf( (*(UInt32 *) recPtr) != targetNodeNum, "\p DeleteTree: parent ptr doesn't match targetNodeNum!!"); + + // need to delete and re-insert this parent key/ptr + DeleteRecord (btreePtr, parentNode.buffer, index); + + keyPtr = (KeyPtr) GetRecordAddress( btreePtr, targetNode->buffer, 0 ); + recPtr = (UInt8 *) &targetNodeNum; + recSize = sizeof(targetNodeNum); + + err = InsertTree (btreePtr, treePathTable, keyPtr, recPtr, recSize, + &parentNode, index, level, kReplaceRecord, &insertNode); + M_ExitOnError (err); + } + else // deleteRequired + { + err = DeleteTree (btreePtr, treePathTable, &parentNode, index, level); + M_ExitOnError (err); + } + } + + + err = UpdateNode (btreePtr, targetNode, 0, kLockTransaction); + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, targetNode); + (void) ReleaseNode (btreePtr, &parentNode); + + return err; + +} // end DeleteTree + + + +///////////////////////////////// CollapseTree ////////////////////////////////// + +static OSStatus CollapseTree (BTreeControlBlockPtr btreePtr, + BlockDescriptor *blockPtr ) +{ + OSStatus err; + UInt32 originalRoot; + UInt32 nodeNum; + + originalRoot = btreePtr->rootNode; + + while (true) + { + if ( ((NodeDescPtr)blockPtr->buffer)->numRecords > 1) + break; // this will make a fine root node + + if ( ((NodeDescPtr)blockPtr->buffer)->kind == kBTLeafNode) + break; // we've hit bottom + + nodeNum = btreePtr->rootNode; + btreePtr->rootNode = GetChildNodeNum (btreePtr, blockPtr->buffer, 0); + --btreePtr->treeDepth; + + //// Clear and Free Current Old Root Node //// + ClearNode (btreePtr, blockPtr->buffer); + err = UpdateNode (btreePtr, blockPtr, 0, kLockTransaction); + M_ExitOnError (err); + err = FreeNode (btreePtr, nodeNum); + M_ExitOnError (err); + + //// Get New Root Node + err = GetNode (btreePtr, btreePtr->rootNode, blockPtr); + M_ExitOnError (err); + } + + if (btreePtr->rootNode != originalRoot) + M_BTreeHeaderDirty (btreePtr); + + err = UpdateNode (btreePtr, blockPtr, 0, kLockTransaction); // always update! + M_ExitOnError (err); + + return noErr; + + +/////////////////////////////////// ErrorExit /////////////////////////////////// + +ErrorExit: + (void) ReleaseNode (btreePtr, blockPtr); + return err; +} + + + +////////////////////////////////// RotateLeft /////////////////////////////////// + +/*------------------------------------------------------------------------------- + +Routine: RotateLeft - One_line_description. + +Function: Brief_description_of_the_function_and_any_side_effects + +Algorithm: if rightIndex > insertIndex, subtract 1 for actual rightIndex + +Input: btreePtr - description + leftNode - description + rightNode - description + rightInsertIndex - description + keyPtr - description + recPtr - description + recSize - description + +Output: insertIndex + insertNodeNum - description + recordFit - description + recsRotated + +Result: noErr - success + != noErr - failure +-------------------------------------------------------------------------------*/ + +static OSStatus RotateLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode, + UInt16 rightInsertIndex, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + Boolean *recordFit, + UInt16 *recsRotated ) +{ + OSStatus err; + SInt32 insertSize; + SInt32 nodeSize; + SInt32 leftSize, rightSize; + SInt32 moveSize = 0; + UInt16 keyLength; + UInt16 lengthFieldSize; + UInt16 index, moveIndex; + Boolean didItFit; + + ///////////////////// Determine If Record Will Fit ////////////////////////// + + keyLength = GetKeyLength(btreePtr, keyPtr, (rightNode->kind == kBTLeafNode)); + + // the key's length field is 8-bits in HFS and 16-bits in HFS+ + if ( btreePtr->attributes & kBTBigKeysMask ) + lengthFieldSize = sizeof(UInt16); + else + lengthFieldSize = sizeof(UInt8); + + insertSize = keyLength + lengthFieldSize + recSize + sizeof(UInt16); + + if ( M_IsOdd (insertSize) ) + ++insertSize; // add pad byte; + + nodeSize = btreePtr->nodeSize; + + // add size of insert record to right node + rightSize = nodeSize - GetNodeFreeSize (btreePtr, rightNode) + insertSize; + leftSize = nodeSize - GetNodeFreeSize (btreePtr, leftNode); + + moveIndex = 0; + + while ( leftSize < rightSize ) + { + if ( moveIndex < rightInsertIndex ) + { + moveSize = GetRecordSize (btreePtr, rightNode, moveIndex) + 2; + } + else if ( moveIndex == rightInsertIndex ) + { + moveSize = insertSize; + } + else // ( moveIndex > rightInsertIndex ) + { + moveSize = GetRecordSize (btreePtr, rightNode, moveIndex - 1) + 2; + } + + leftSize += moveSize; + rightSize -= moveSize; + ++moveIndex; + } + + if ( leftSize > nodeSize ) // undo last move + { + leftSize -= moveSize; + rightSize += moveSize; + --moveIndex; + } + + if ( rightSize > nodeSize ) // record won't fit - failure, but not error + { + *insertIndex = 0; + *insertNodeNum = 0; + *recordFit = false; + *recsRotated = 0; + + return noErr; + } + + // we've found balance point, moveIndex == number of records moved into leftNode + + + //////////////////////////// Rotate Records ///////////////////////////////// + + *recsRotated = moveIndex; + *recordFit = true; + index = 0; + + while ( index < moveIndex ) + { + if ( index == rightInsertIndex ) // insert new record in left node + { + UInt16 leftInsertIndex; + + leftInsertIndex = leftNode->numRecords; + + didItFit = InsertKeyRecord (btreePtr, leftNode, leftInsertIndex, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + Panic ("\pRotateLeft: InsertKeyRecord (left) returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + *insertIndex = leftInsertIndex; + *insertNodeNum = rightNode->bLink; + } + else + { + didItFit = RotateRecordLeft (btreePtr, leftNode, rightNode); + if ( !didItFit ) + { + Panic ("\pRotateLeft: RotateRecordLeft returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + } + + ++index; + } + + if ( moveIndex <= rightInsertIndex ) // then insert new record in right node + { + rightInsertIndex -= index; // adjust for records already rotated + + didItFit = InsertKeyRecord (btreePtr, rightNode, rightInsertIndex, + keyPtr, keyLength, recPtr, recSize); + if ( !didItFit ) + { + Panic ("\pRotateLeft: InsertKeyRecord (right) returned false!"); + err = fsBTBadRotateErr; + goto ErrorExit; + } + + *insertIndex = rightInsertIndex; + *insertNodeNum = leftNode->fLink; + } + + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + *insertIndex = 0; + *insertNodeNum = 0; + *recordFit = false; + *recsRotated = 0; + + return err; +} + + + +/////////////////////////////////// SplitLeft /////////////////////////////////// + +static OSStatus SplitLeft (BTreeControlBlockPtr btreePtr, + BlockDescriptor *leftNode, + BlockDescriptor *rightNode, + UInt32 rightNodeNum, + UInt16 index, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + UInt16 *insertIndex, + UInt32 *insertNodeNum, + UInt16 *recsRotated ) +{ + OSStatus err; + NodeDescPtr left, right; + UInt32 newNodeNum; + Boolean recordFit; + + + ///////////////////////////// Compare Nodes ///////////////////////////////// + + right = rightNode->buffer; + left = leftNode->buffer; + + PanicIf ( right->bLink != 0 && left == 0, "\p SplitLeft: left sibling missing!?" ); + + /* type should be kBTLeafNode or kBTIndexNode */ + + if ( (right->height == 1) && (right->kind != kBTLeafNode) ) + return fsBTInvalidNodeErr; + + if ( left != nil ) + { + if ( left->fLink != rightNodeNum ) + return fsBTInvalidNodeErr; //€€ E_BadSibling ? + + if ( left->height != right->height ) + return fsBTInvalidNodeErr; //€€ E_BadNodeHeight ? + + if ( left->kind != right->kind ) + return fsBTInvalidNodeErr; //€€ E_BadNodeType ? + } + + + ///////////////////////////// Allocate Node ///////////////////////////////// + + err = AllocateNode (btreePtr, &newNodeNum); + M_ExitOnError (err); + + + /////////////// Update Forward Link In Original Left Node /////////////////// + + if ( left != nil ) + { + left->fLink = newNodeNum; + err = UpdateNode (btreePtr, leftNode, 0, kLockTransaction); + M_ExitOnError (err); + } + + + /////////////////////// Initialize New Left Node //////////////////////////// + + err = GetNewNode (btreePtr, newNodeNum, leftNode); + M_ExitOnError (err); + + left = leftNode->buffer; + left->fLink = rightNodeNum; + + + // Steal Info From Right Node + + left->bLink = right->bLink; + left->kind = right->kind; + left->height = right->height; + + right->bLink = newNodeNum; // update Right bLink + + if ( (left->kind == kBTLeafNode) && (left->bLink == 0) ) + { + // if we're adding a new first leaf node - update BTreeInfoRec + + btreePtr->firstLeafNode = newNodeNum; + M_BTreeHeaderDirty (btreePtr); //€€ AllocateNode should have set the bit already... + } + + ////////////////////////////// Rotate Left ////////////////////////////////// + + err = RotateLeft (btreePtr, left, right, index, keyPtr, recPtr, recSize, + insertIndex, insertNodeNum, &recordFit, recsRotated); + M_ExitOnError (err); + + return noErr; + +ErrorExit: + + (void) ReleaseNode (btreePtr, leftNode); + (void) ReleaseNode (btreePtr, rightNode); + + //€€ Free new node if allocated? + + *insertIndex = 0; + *insertNodeNum = 0; + *recsRotated = 0; + + return err; +} + + + +/////////////////////////////// RotateRecordLeft //////////////////////////////// + +static Boolean RotateRecordLeft (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ) +{ + UInt16 size; + UInt8 * recPtr; + Boolean recordFit; + + size = GetRecordSize (btreePtr, rightNode, 0); + recPtr = GetRecordAddress (btreePtr, rightNode, 0); + + recordFit = InsertRecord (btreePtr, leftNode, leftNode->numRecords, recPtr, size); + + if ( !recordFit ) + return false; + + DeleteRecord (btreePtr, rightNode, 0); + + return true; +} + + +//////////////////////////////// AddNewRootNode ///////////////////////////////// + +static OSStatus AddNewRootNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr leftNode, + NodeDescPtr rightNode ) +{ + OSStatus err; + BlockDescriptor rootNode; + UInt32 rootNum; + KeyPtr keyPtr; + Boolean didItFit; + UInt16 keyLength; + + PanicIf (leftNode == nil, "\pAddNewRootNode: leftNode == nil"); + PanicIf (rightNode == nil, "\pAddNewRootNode: rightNode == nil"); + + + /////////////////////// Initialize New Root Node //////////////////////////// + + err = AllocateNode (btreePtr, &rootNum); + M_ExitOnError (err); + + err = GetNewNode (btreePtr, rootNum, &rootNode); + M_ExitOnError (err); + + ((NodeDescPtr)rootNode.buffer)->kind = kBTIndexNode; + ((NodeDescPtr)rootNode.buffer)->height = ++btreePtr->treeDepth; + + + ///////////////////// Insert Left Node Index Record ///////////////////////// + + keyPtr = (KeyPtr) GetRecordAddress (btreePtr, leftNode, 0); + keyLength = GetKeyLength(btreePtr, keyPtr, false); + + didItFit = InsertKeyRecord ( btreePtr, rootNode.buffer, 0, keyPtr, keyLength, + (UInt8 *) &rightNode->bLink, 4 ); + + PanicIf ( !didItFit, "\pAddNewRootNode:InsertKeyRecord failed for left index record"); + + + //////////////////// Insert Right Node Index Record ///////////////////////// + + keyPtr = (KeyPtr) GetRecordAddress (btreePtr, rightNode, 0); + keyLength = GetKeyLength(btreePtr, keyPtr, false); + + didItFit = InsertKeyRecord ( btreePtr, rootNode.buffer, 1, keyPtr, keyLength, + (UInt8 *) &leftNode->fLink, 4 ); + + PanicIf ( !didItFit, "\pAddNewRootNode:InsertKeyRecord failed for right index record"); + + + /////////////////////////// Release Root Node /////////////////////////////// + + err = UpdateNode (btreePtr, &rootNode, 0, kLockTransaction); + M_ExitOnError (err); + + // update BTreeInfoRec + + btreePtr->rootNode = rootNum; + btreePtr->flags |= kBTHeaderDirty; + + return noErr; + + + ////////////////////////////// Error Exit /////////////////////////////////// + +ErrorExit: + + return err; +} + + +static UInt16 GetKeyLength ( const BTreeControlBlock *btreePtr, const BTreeKey *key, Boolean forLeafNode ) +{ + UInt16 length; + + if ( forLeafNode || btreePtr->attributes & kBTVariableIndexKeysMask ) + length = KeyLength (btreePtr, key); // just use actual key length + else + length = btreePtr->maxKeyLength; // fixed sized index key (i.e. HFS) //€€ shouldn't we clear the pad bytes? + + return length; +} + diff --git a/bsd/hfs/hfscommon/Catalog/Catalog.c b/bsd/hfs/hfscommon/Catalog/Catalog.c new file mode 100644 index 000000000..c71d5c7f9 --- /dev/null +++ b/bsd/hfs/hfscommon/Catalog/Catalog.c @@ -0,0 +1,1856 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: Catalog.c + + Contains: Catalog Manager Implementation + + Version: HFS Plus 1.0 + + Copyright: © 1996-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: xxx put technology here xxx + + Writers: + + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + 2/2/99 djb Fix CreateFileIDRef to copy entire name when creating thread record. + 1/7/99 djb Use a max bytes of 256 in calls to ConvertUnicodeToUTF8. + 12/9/98 djb UpdateCatalogNode only updates vcbLsMod if contentModDate changes. + 11/5/98 djb Add support for UTF-8 names. + 8/31/98 djb GetTimeLocal now takes an input. + 7/8/98 ser Added accessDate and AttributeModDate init. to create routine. + 6/5/98 djb Added CreateFileIDRef routine. + 6/3/98 djb Merge MoveCatalogRecord and RenameCatalogRecord into one routine. + 4/17/98 djb Add VCB locking. + 4/6/98 djb Catalog iterators now need to be released. + 4/6/98 djb Removed CreateVolumeCatalogCache and DisposeVolumeCatalogCache (obsolete). + 3/31/98 djb Make UpdateCatalogNode interface thread-safe. + 3/31/98 djb Sync up with final HFSVolumes.h header file. + 3/17/98 djb Fixed CreateCatalogNode interface to take kCatalogFolderNode and + kCatalogFileNode as type input. + + 12/10/97 DSH 2201501, UpdateCatalogNode to only update CatalogRecords which + are under 2 Gig by checking the overloaded valence field. + 11/20/97 djb Radar #2002357. Fixing retry mechanism. + 11/17/97 djb PrepareInputName routine now returns an error. + 11/13/97 djb Radar #1683572. Add new GetCatalogOffspringFile routine for + PBGetFileInfo calls (support used to be in HFSPathnameCalls.a). + 11/7/97 msd Change calls to the wrapper routine CompareUnicodeNames() to use + the underlying routine FastUnicodeCompare() instead. + 10/19/97 msd Bug 1684586. GetCatInfo and SetCatInfo use only contentModDate. + 10/17/97 djb Change Catalog Create/Rename to use ConvertInputNameToUnicode. + 10/13/97 djb Update volumeNameEncodingHint when changing volume name. Change + name of GetSystemTextEncoding to GetDefaultTextEncoding. + 10/1/97 djb Add new catalog iterators and node cache to improve performance. + 9/12/97 msd In CreateCatalogNode, make sure parent is a folder, not a file. + 9/10/97 msd In RenameCatalogNodeUnicode, remove HFS-only code and make sure + the conversion context is set up and marked in the volume's + bitmap. + 9/9/97 DSH Added RelString_Glue to avoid having to link DFAEngine with + Interface.o + 9/8/97 msd Make sure a folder's modifyDate is set whenever its + contentModDate is set. In UpdateCatalogNode, make sure the + modifyDate is greater or equal to contentModDate; do a DebugStr + only for debug builds. + 9/7/97 djb Make some DebuStrs HFS_DIAGNOSTIC only. + 9/4/97 djb Add more Catalog Iterators, Instrument RelString. + 9/4/97 msd Remove call to PropertyDeleteObject. + 8/18/97 DSH Use RelString instead of FastRelString in DFA to avoid loading + branch island instead of table. + 8/14/97 djb Remove hard link support. Switch over to FastRelString. + 8/8/97 djb Fixed bugs in LinkCatalogNode. + 8/5/97 djb Don't restore vcbNxtCNID if thread exists (radar #1670614). + 7/25/97 DSH Pass heuristicHint to BTSearchRecord from GetCatalogOffspring. + 7/18/97 msd Include LowMemPriv.h. In LinkCatalogNode, now sets the + kInsertedFileThread2 flag correctly; should only affect error + recovery code. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 7/8/97 DSH Loading PrecompiledHeaders from define passed in on C line + 6/27/97 msd Add PBLongRename SPI. Added RenameCatalogNodeUnicode call, which + takes Unicode names for HFS Plus volumes. Removed calls to + Attributes module when creating, renaming or moving nodes. + 6/24/97 djb Validate the mangled name matches in + LocateCatalogNodeByMangledName. + 6/24/97 djb Add hard link support. + 6/20/97 msd Use contentModDate and attributeModDate fields instead of + modifyDate. Made CopyCatalogNodeData public. + 6/18/97 djb Add routines LocateCatalogNodeWithRetry & UpdateVolumeEncodings. + Add mangled name retry to DeleteCatalogNode, MoveCatalogNode and + RenameCatalogNode. + 6/13/97 djb Major changes for longname support and multiple scripts. + 6/9/97 msd Instead of calling GetDateTime, call GetTimeUTC or GetTimeLocal. + Dates on an HFS Plus volume need to be converted to/from UTC. + 6/4/97 djb Set textEncoding hint in Rename and Create. TrashCatalogIterator + was not always called with the correct folder ID. + 5/21/97 djb Turn off recursive iterators. + 5/19/97 djb Add support for B-tree iterators to GetCatalogOffspring. + 5/9/97 djb Get in sync with FilesInternal.i. + 4/24/97 djb First checked into Common System Project. + 4/11/97 DSH Use extended VCB fields catalogRefNum, and extentsRefNum. + 4/4/97 djb Get in sync with volume format changes. + 3/31/97 djb Additional HFS Plus optimization added to GetCatalogNode. + 3/28/97 djb Add Optimization to GetCatalogNode. + 3/27/97 djb Unicode conversion routines now use byte counts. + 3/17/97 DSH Casting to compile with SC, GetRecordSize -> + GetCatalogRecordSize, moved some prototypes to extern. + 3/5/97 msd Add calls to Property Manager when catalog entries are created, + deleted, moved, renamed. + 2/19/97 djb HFS Plus catalog keys no longer have a pad word. + 1/24/97 DSH (djb) GetCatalogOffSpring() fix volume->vcbDirIDM = 0 + 1/23/97 DSH Truncate name to CMMaxCName characters in PrepareInputName(). + 1/14/97 djb Fixed RenameCatalogNode for case when just a cnid is passed. + 1/13/97 djb Added support for varaible sized thread records in HFS+. + 1/11/97 DSH Moving PrepareInputName() declaration fo FilesInternal.h + 1/10/97 djb CopyCatalogNodeData was trashing the resource extents on HFS+. + 1/10/97 djb CopyCatalogNodeData was trashing dataLogicalSize on HFS+ disks. + 1/9/97 djb Get in sync with new HFSVolumesPriv.i. + 1/6/97 djb Added name length checking to CompareExtendedCatalogKeys. Fixed + GetCatalogOffspring - it was not correctly passing the HFS+ flag + to PrepareOutputName. Fixed BuildKey for HFS+ keys. + 1/3/97 djb Fixed termination bug in GetCatalogOffspring. Added support for + large keys. Integrated latest HFSVolumesPriv.h changes. + 12/19/96 DSH Changed call from C_FlushMDB to HFS+ savy + FlushVolumeControlBlock() + 12/19/96 djb Add new B-tree manager... + 12/13/96 djb Fixing bugs for HFS+. Switch to HFSUnicodeWrappers routines. + 12/12/96 djb Changed the SPI for GetCatalogNode, GetCatalogOffspring, and + UpdateCatalogNode. + 12/12/96 DSH Removed static function declarations for functions used by + FileIDServices.c. + 11/11/96 djb Added support for HFS+ Unicode names. Major changes throughout. + 11/4/96 djb Added FSSpec output to GetCatalogNode and GetCatalogOffspring + routines. + 10/29/96 djb first checked in + +*/ + +#pragma segment Catalog + +#include +#include + +#include "../../hfs_endian.h" + +#include "../headers/FileMgrInternal.h" +#include "../headers/BTreesInternal.h" +#include "../headers/CatalogPrivate.h" +#include "../headers/HFSUnicodeWrappers.h" +#include "../headers/HFSInstrumentation.h" + + +// External routines + +extern SInt32 FastRelString( ConstStr255Param str1, ConstStr255Param str2 ); + +extern SInt16 RelString_Glue(StringPtr pStr1, StringPtr pStr2); + + +// Internal routines + +static OSErr IterateCatalogNode(ExtendedVCB *volume, CatalogIterator *catalogIterator, + UInt16 index, CatalogNodeData *nodeData, + HFSCatalogNodeID *nodeID, SInt16 *nodeType); + +void InitCatalogThreadRecord(ExtendedVCB *volume, UInt32 nodeType, CatalogKey *nodeKey, + CatalogRecord *record, UInt32 *threadSize); + +void InitCatalogRecord(ExtendedVCB *volume, UInt32 nodeType, UInt32 textEncoding, + CatalogRecord *record, UInt32 *recordSize, HFSCatalogNodeID catalogNodeID); + +#if HFS_DIAGNOSTIC + #include + #define PRINTIT(A) kprintf A; +#else + #define PRINTIT(A) +#endif /* HFS_DIAGNOSTIC */ + +//_________________________________________________________________________________ +// Exported Routines +// +// CreateCatalogNode - Creates a new folder or file CNode. +// DeleteCatalogNode - Deletes an existing folder or file CNode. +// GetCatalogNode - Locates an existing folder or file CNode. +// GetCatalogOffspringFile - Gets an offspring file record from a folder. +// GetCatalogOffspring - Gets an offspring record from a folder. +// MoveRenameCatalogNode - Moves/Renames an existing folder or file CNode. +// UpdateCatalogNode - Marks a Catalog BTree node as 'dirty'. +// CreateFileIDRef - Creates a file thread record for hfs file node +// CompareCatalogKeys - Compares two catalog keys. +// +//_________________________________________________________________________________ + + +//_________________________________________________________________________________ +// +// About date/time values: +// +// Date/time values stored in control blocks and generic structures (such as +// CatalogNodeData) are always stored in local time. Values stored in HFS volume +// format structures (such as B-tree records) are also stored in local time. +// Values stored in HFS Plus format structures are stored in UTC. +//_________________________________________________________________________________ + + +// Implementation + + +//_________________________________________________________________________________ +// Routine: CreateCatalogNode +// +// Function: Creates a new folder or file CNode. A new folder or file +// record is added to the catalog BTree. If a folder CNode is +// being created, a new thread record is also added. +// +//_________________________________________________________________________________ + +OSErr +CreateCatalogNode ( ExtendedVCB *volume, HFSCatalogNodeID parentID, ConstUTF8Param name, + UInt32 nodeType, HFSCatalogNodeID *catalogNodeID, UInt32 *catalogHint) +{ + CatalogKey *nodeKey; + CatalogRecord nodeData; // 520 bytes + UInt32 nodeDataSize; + CatalogRecord parentThreadData; // 520 bytes + HFSCatalogNodeID parentsParentID; + CatalogName *parentNamePtr; + UInt32 tempHint; + UInt32 textEncoding; + UInt16 tempSize; + OSErr result; + Boolean isHFSPlus = (volume->vcbSigWord == kHFSPlusSigWord); + FCB *fcb; + FSBufferDescriptor btRecord; + BTreeIterator iterator; + BTreeIterator threadIter; + HFSCatalogNodeID nextCNID; + + if (nodeType != kCatalogFolderNode && nodeType != kCatalogFileNode) + return paramErr; + + fcb = GetFileControlBlock(volume->catalogRefNum); + nodeKey = (CatalogKey *) &iterator.key; + + //--- make sure parent exists (by locating the parent's thread record) + + result = LocateCatalogThread(volume, parentID, &parentThreadData, &tempSize, &tempHint); + ReturnIfError(result); + + TrashCatalogIterator(volume, parentID); // invalidate any iterators for this parentID + + // save copy of parent's parentID and name. + + if (isHFSPlus) + { + if (parentThreadData.recordType != kHFSPlusFolderThreadRecord) + return dirNFErr; + + parentsParentID = parentThreadData.hfsPlusThread.parentID; + parentNamePtr = (CatalogName*) &parentThreadData.hfsPlusThread.nodeName; + } + else + { + if (parentThreadData.recordType != kHFSFolderThreadRecord) + return dirNFErr; + + parentsParentID = parentThreadData.hfsThread.parentID; + parentNamePtr = (CatalogName*) &parentThreadData.hfsThread.nodeName; + } + + // invalidate cache for parent since its about to change + InvalidateCatalogNodeCache(volume, parentsParentID); + + //--- build key for new catalog node + result = BuildCatalogKeyUTF8(volume, parentID, name, kUndefinedStrLen, nodeKey, &textEncoding); + ReturnIfError(result); + + /* make sure it doesn't exist */ + result = BTSearchRecord(fcb, &iterator, kInvalidMRUCacheKey, NULL, NULL, &iterator); + if (result != btNotFound) + return (cmExists); + + nextCNID = volume->vcbNxtCNID; + if (!isHFSPlus && nextCNID == 0xFFFFFFFF) + return (dskFulErr); + + //--- build thread record for new CNode + if (isHFSPlus || nodeType == kCatalogFolderNode) + { + CatalogRecord threadData; // 520 bytes + UInt32 threadSize; + + btRecord.bufferAddress = &threadData; + btRecord.itemSize = threadSize; + btRecord.itemCount = 1; + InitCatalogThreadRecord(volume, nodeType, nodeKey, &threadData, &threadSize); +TryNextID: + BuildCatalogKey(nextCNID, NULL, isHFSPlus, (CatalogKey*) &threadIter.key); + result = BTInsertRecord(fcb, &threadIter, &btRecord, threadSize); + if (result == btExists && isHFSPlus) + { + /* + * Allow CNIDs on HFS Plus volumes to wrap around + */ + ++nextCNID; + if (nextCNID < kHFSFirstUserCatalogNodeID) + { + volume->vcbAtrb |= kHFSCatalogNodeIDsReusedMask; + volume->vcbFlags |= 0xFF00; + nextCNID = kHFSFirstUserCatalogNodeID; + } + goto TryNextID; + } + ReturnIfError(result); + } + + //--- initialize catalog data record (for folder or file) + btRecord.bufferAddress = &nodeData; + btRecord.itemSize = nodeDataSize; + btRecord.itemCount = 1; + InitCatalogRecord(volume, nodeType, textEncoding, &nodeData, &nodeDataSize, nextCNID); + + //--- add new folder/file record to catalog BTree + result = BTInsertRecord(fcb, &iterator, &btRecord, nodeDataSize ); + if (result) + { + if (result == btExists) + result = cmExists; + + if (isHFSPlus || nodeType == kCatalogFolderNode) + (void) BTDeleteRecord(fcb, &threadIter); + + return result; + } + + /* + * Return the CNID actually used. Update the volume's next ID. + */ + *catalogNodeID = nextCNID; + if (++nextCNID < kHFSFirstUserCatalogNodeID) + { + volume->vcbAtrb |= kHFSCatalogNodeIDsReusedMask; + volume->vcbFlags |= 0xFF00; + nextCNID = kHFSFirstUserCatalogNodeID; + } + volume->vcbNxtCNID = nextCNID; + + //--- update counters... + + result = UpdateFolderCount( volume, parentsParentID, parentNamePtr, nodeData.recordType, kNoHint, +1); + ReturnIfError(result); /* XXX what about cleanup ??? */ + + AdjustVolumeCounts(volume, nodeData.recordType, +1); + + result = FlushCatalog(volume); + + return result; + +} // end CreateCatalogNode + + +/* + * initialize catalog data record (for folder or file) + */ +void InitCatalogRecord(ExtendedVCB *volume, UInt32 nodeType, UInt32 textEncoding, CatalogRecord *record, UInt32 *recordSize, HFSCatalogNodeID nodeID) +{ + UInt32 timeStamp; + + ClearMemory(record, sizeof(CatalogRecord)); // first clear the record + + if (volume->vcbSigWord == kHFSPlusSigWord) + { + timeStamp = GetTimeUTC(); // get current date/time (universal) + + UpdateVolumeEncodings(volume, textEncoding); + + if (nodeType == kCatalogFolderNode ) + { + record->recordType = kHFSPlusFolderRecord; + record->hfsPlusFolder.folderID = nodeID; + record->hfsPlusFolder.createDate = timeStamp; + record->hfsPlusFolder.contentModDate = timeStamp; + record->hfsPlusFolder.accessDate = timeStamp; + record->hfsPlusFolder.attributeModDate = timeStamp; + record->hfsPlusFolder.textEncoding = textEncoding; + *recordSize = sizeof(HFSPlusCatalogFolder); + // threadType = kHFSPlusFolderThreadRecord; + } + else if (nodeType == kCatalogFileNode ) + { + record->recordType = kHFSPlusFileRecord; + record->hfsPlusFile.fileID = nodeID; + record->hfsPlusFile.createDate = timeStamp; + record->hfsPlusFile.contentModDate = timeStamp; + record->hfsPlusFile.accessDate = timeStamp; + record->hfsPlusFile.attributeModDate = timeStamp; + record->hfsPlusFile.flags |= kHFSThreadExistsMask; + record->hfsPlusFile.textEncoding = textEncoding; + *recordSize = sizeof(HFSPlusCatalogFile); + // threadType = kHFSPlusFileThreadRecord; + } + } + else /* standard hfs */ + { + timeStamp = GetTimeLocal(true); // get current local date/time + + if (nodeType == kCatalogFolderNode ) + { + record->recordType = kHFSFolderRecord; + record->hfsFolder.folderID = nodeID; + record->hfsFolder.createDate = timeStamp; + record->hfsFolder.modifyDate = timeStamp; + *recordSize = sizeof(HFSCatalogFolder); + // threadType = kHFSFolderThreadRecord; + } + else if (nodeType == kCatalogFileNode ) + { + record->recordType = kHFSFileRecord; + record->hfsFile.fileID = nodeID; + record->hfsFile.createDate = timeStamp; + record->hfsFile.modifyDate = timeStamp; + *recordSize = sizeof(HFSCatalogFile); + } + } +} + + +void InitCatalogThreadRecord(ExtendedVCB *volume, UInt32 nodeType, CatalogKey *nodeKey, + CatalogRecord *record, UInt32 *threadSize) +{ + ClearMemory(record, sizeof(CatalogRecord) ); // first clear the record + + if (volume->vcbSigWord == kHFSPlusSigWord) + { + if (nodeType == kCatalogFolderNode) + record->recordType = kHFSPlusFolderThreadRecord; + else + record->recordType = kHFSPlusFileThreadRecord; + record->hfsPlusThread.parentID = nodeKey->hfsPlus.parentID; + *threadSize = sizeof(record->hfsPlusThread); + + // HFS Plus has varaible sized threads so adjust to actual length + *threadSize -= ( sizeof(record->hfsPlusThread.nodeName.unicode) - + (nodeKey->hfsPlus.nodeName.length * sizeof(UniChar)) ); + BlockMoveData(&nodeKey->hfsPlus.nodeName, &record->hfsPlusThread.nodeName, + sizeof(UniChar) * (nodeKey->hfsPlus.nodeName.length + 1)); + } + else // classic HFS + { + if (nodeType == kCatalogFolderNode) + record->recordType = kHFSFolderThreadRecord; + else + record->recordType = kHFSFileThreadRecord; + record->hfsThread.parentID = nodeKey->hfs.parentID; + *threadSize = sizeof(record->hfsThread); + BlockMoveData(&nodeKey->hfs.nodeName, &record->hfsThread.nodeName, + nodeKey->hfs.nodeName[0] + 1); + } +} + + +//_________________________________________________________________________________ +// Routine: DeleteCatalogNode +// +// Function: Deletes an existing folder or file CNode. The thread record +// is also deleted for directories and files that have thread +// records. +// +// The valence for a folder must be zero before it can be deleted. +// The rootfolder cannot be deleted. +// +//_________________________________________________________________________________ + +OSErr +DeleteCatalogNode(ExtendedVCB *volume, HFSCatalogNodeID parentID, ConstUTF8Param name, UInt32 hint) +{ + CatalogKey key; // 518 bytes + CatalogRecord data; // 520 bytes + UInt32 nodeHint; + HFSCatalogNodeID nodeID; + HFSCatalogNodeID nodeParentID; + UInt16 nodeType; + OSErr result; + Boolean isHFSPlus = (volume->vcbSigWord == kHFSPlusSigWord); + + //--- locate subject catalog node + + result = BuildCatalogKeyUTF8(volume, parentID, name, kUndefinedStrLen, &key, NULL); + ReturnIfError(result); + + result = LocateCatalogNodeByKey(volume, hint, &key, &data, &nodeHint); + + // if we did not find it by name, then look for an embedded file ID in a mangled name + if ( (result == cmNotFound) && isHFSPlus ) + result = LocateCatalogNodeByMangledName(volume, parentID, name, kUndefinedStrLen, &key, &data, &nodeHint); + ReturnIfError(result); + + nodeParentID = isHFSPlus ? key.hfsPlus.parentID : key.hfs.parentID; // establish real parent cnid + nodeType = data.recordType; // establish cnode type + nodeID = 0; + + switch (nodeType) + { + case kHFSFolderRecord: + if (data.hfsFolder.valence != 0) // is it empty? + return cmNotEmpty; + + nodeID = data.hfsFolder.folderID; + break; + + case kHFSPlusFolderRecord: + if (data.hfsPlusFolder.valence != 0) // is it empty? + return cmNotEmpty; + + nodeID = data.hfsPlusFolder.folderID; + break; + + case kHFSFileRecord: + if (data.hfsFile.flags & kHFSThreadExistsMask) + nodeID = data.hfsFile.fileID; + break; + + case kHFSPlusFileRecord: + nodeID = data.hfsPlusFile.fileID; // note: HFS Plus files always have a thread + break; + + default: + return cmNotFound; + } + + + if (nodeID == fsRtDirID) // is this the root folder? + return cmRootCN; // sorry, you can't delete the root! + + TrashCatalogIterator(volume, nodeParentID); // invalidate any iterators for this parentID + InvalidateCatalogNodeCache(volume, nodeParentID); // and invalidate node cache + + //--- delete catalog records for CNode and file threads if they exist + + result = DeleteBTreeRecord(volume->catalogRefNum, &key); + ReturnIfError(result); + + if ( nodeID ) + { + CatalogKey threadKey; // 518 bytes + + BuildCatalogKey(nodeID, NULL, isHFSPlus, &threadKey); + + (void) DeleteBTreeRecord(volume->catalogRefNum, &threadKey); // ignore errors for thread deletes + } + + //--- update counters... + + result = UpdateFolderCount(volume, nodeParentID, NULL, nodeType, kNoHint, -1); + ReturnIfError(result); + + AdjustVolumeCounts(volume, nodeType, -1); // all done with this file or folder + + result = FlushCatalog(volume); + + return result; + +} // end DeleteCatalogNode + + +//_________________________________________________________________________________ +// Routine: GetCatalogNode +// +// Function: Locates an existing folder or file CNode and pointer to the CNode data record. +// +//_________________________________________________________________________________ + +OSErr +GetCatalogNode( ExtendedVCB *volume, HFSCatalogNodeID parentID, ConstUTF8Param name, UInt32 nameLen, UInt32 hint, + CatalogNodeData *nodeData, UInt32 *newHint) +{ + CatalogKey *key; + CatalogRecord *record; + BTreeIterator searchIterator; + FSBufferDescriptor btRecord; + ByteCount utf8len; + UInt32 heuristicHint; + UInt32 *cachedHint; + FCB *fcb; + OSErr result = noErr; + UInt16 dataSize; + Boolean isHFSPlus = (volume->vcbSigWord == kHFSPlusSigWord); + + if (isHFSPlus) { + btRecord.bufferAddress = nodeData; + btRecord.itemSize = sizeof(CatalogNodeData); + } else { + btRecord.bufferAddress = &nodeData->cnd_extra; + btRecord.itemSize = sizeof(HFSCatalogFile); + } + + btRecord.itemCount = 1; + record = (CatalogRecord *) btRecord.bufferAddress; + key = (CatalogKey *) &searchIterator.key; + + if (name && nameLen == kUndefinedStrLen) + nameLen = strlen(name); + + result = BuildCatalogKeyUTF8(volume, parentID, name, nameLen, key, NULL); + ReturnIfError(result); + + fcb = GetFileControlBlock(volume->catalogRefNum); + searchIterator.hint.nodeNum = *newHint; + searchIterator.hint.index = 0; + + /* + * We pass a 2nd hint/guess into BTSearchRecord. The heuristicHint + * is a mapping of dirID and nodeNumber, in hopes that the current + * search will be in the same node as the last search with the same + * parentID. + */ + if (name != NULL && GetMRUCacheBlock(parentID, volume->hintCachePtr, (Ptr *)&cachedHint) == 0) + heuristicHint = *cachedHint; + else + heuristicHint = kInvalidMRUCacheKey; + + result = BTSearchRecord(fcb, &searchIterator, heuristicHint, &btRecord, &dataSize, &searchIterator); + if (result == btNotFound) + result = cmNotFound; + + if (name != NULL && result == noErr) + InsertMRUCacheBlock(volume->hintCachePtr, parentID, (Ptr) &(searchIterator.hint.nodeNum)); + + if (result == noErr) { + CatalogName *nodeName = NULL; + HFSCatalogNodeID threadParentID; + + /* if we got a thread record, then go look up real record */ + switch (record->recordType) { + + case kHFSFileThreadRecord: + case kHFSFolderThreadRecord: + threadParentID = record->hfsThread.parentID; + nodeName = (CatalogName *) &record->hfsThread.nodeName; + break; + + case kHFSPlusFileThreadRecord: + case kHFSPlusFolderThreadRecord: + threadParentID = record->hfsPlusThread.parentID; + nodeName = (CatalogName *) &record->hfsPlusThread.nodeName; + break; + + default: + threadParentID = 0; + *newHint = searchIterator.hint.nodeNum; + break; + } + if (threadParentID) { + BuildCatalogKey(threadParentID, nodeName, isHFSPlus, key); + searchIterator.hint.nodeNum = kNoHint; + searchIterator.hint.index = 0; + + result = BTSearchRecord(fcb, &searchIterator, kInvalidMRUCacheKey, &btRecord, &dataSize, &searchIterator); + if (result == btNotFound) + result = cmNotFound; + if (result == noErr) + *newHint = searchIterator.hint.nodeNum; + } + } + + /* + * If we did not find it by name, then look for an embedded + * file ID in a mangled name. + */ + if ( result == cmNotFound && isHFSPlus) + result = LocateCatalogNodeByMangledName(volume, parentID, name, nameLen, + key, record, newHint); + + /* + * In Mac OS X there can also be HFS filenames that + * could not be encoded using the default encoding. + * In which case they were encoded as MacRoman. + */ + if (result == cmNotFound && !isHFSPlus) { + Str31 hfsName; + + utf8_to_mac_roman(nameLen, name, hfsName); + result = LocateCatalogNode(volume, parentID, (CatalogName*)hfsName, + 0, key, record, newHint); + } + ReturnIfError(result); + + nodeData->cnm_parID = isHFSPlus ? key->hfsPlus.parentID : key->hfs.parentID; + + if (nodeData->cnm_flags & kCatNameNoCopyName) { + if (! isHFSPlus) { + if (record->recordType == kHFSFolderRecord || record->recordType == kHFSFileRecord) + CopyCatalogNodeData(volume, record, nodeData); + else + result = cmNotFound; + } + } + else { + nodeData->cnm_nameptr = nodeData->cnm_namespace; + if ( isHFSPlus ) { + result = utf8_encodestr(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length * sizeof(UniChar), + nodeData->cnm_namespace, (size_t *)&utf8len, + MAXHFSVNODELEN + 1, ':', 0); + + /* Need to allocate buffer large enough */ + if (result == ENAMETOOLONG) { + utf8len = utf8_encodelen(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length * sizeof(UniChar), + ':', 0); + nodeData->cnm_nameptr = NewPtr(utf8len + 1); + nodeData->cnm_flags |= kCatNameIsAllocated; + result = utf8_encodestr(key->hfsPlus.nodeName.unicode, + key->hfsPlus.nodeName.length * sizeof(UniChar), + nodeData->cnm_nameptr, (size_t *)&utf8len, + utf8len + 1, ':', 0); + } + } + else { // classic HFS + + /* convert data to HFS Plus format */ + if (record->recordType == kHFSFolderRecord || record->recordType == kHFSFileRecord) { + CopyCatalogNodeData(volume, record, nodeData); + result = hfs_to_utf8(volume, key->hfs.nodeName, MAXHFSVNODELEN + 1, + &utf8len, nodeData->cnm_namespace); + /* Need to allocate buffer large enough */ + if (result == ENAMETOOLONG) { + nodeData->cnm_nameptr = NewPtr(utf8len + 1); + nodeData->cnm_flags |= kCatNameIsAllocated; + result = hfs_to_utf8(volume, key->hfs.nodeName, utf8len + 1, + &utf8len, nodeData->cnm_nameptr); + } else if (result) { + /* + * When an HFS name cannot be encoded with the current + * volume encoding we use MacRoman as a fallback. + */ + result = mac_roman_to_utf8(key->hfs.nodeName, MAXHFSVNODELEN + 1, + &utf8len, nodeData->cnm_namespace); + } + } else + result = cmNotFound; + } + + nodeData->cnm_length = utf8len; + if (result && (nodeData->cnm_flags & kCatNameIsAllocated)) + { + DisposePtr(nodeData->cnm_nameptr); + nodeData->cnm_flags &= ~kCatNameIsAllocated; + nodeData->cnm_nameptr = 0; /* Just to be clean */ + } + } + + + #if DEBUG_BUILD + if ( nodeData->cnd_nodeID > volume->vcbNxtCNID || nodeData->cnd_nodeID == 0) + DebugStr("\pGetCatalogNode bad file ID found!"); + #endif + + return result; + +} // end GetCatalogNode + + +UInt32 +GetDirEntrySize(BTreeIterator *bip, ExtendedVCB * vol) +{ + CatalogKey * ckp; + CatalogName * cnp; + ByteCount utf8chars; + UInt8 name[kdirentMaxNameBytes + 1]; + OSErr result; + + ckp = (CatalogKey*) &bip->key; + + if (vol->vcbSigWord == kHFSPlusSigWord) { + cnp = (CatalogName*) &ckp->hfsPlus.nodeName; + utf8chars = utf8_encodelen(cnp->ustr.unicode, + cnp->ustr.length * sizeof(UniChar), ':', 0); + if (utf8chars > kdirentMaxNameBytes) + utf8chars = kdirentMaxNameBytes; + utf8chars++; /* account for NULL termination */ + } else { /* hfs */ + cnp = (CatalogName*) ckp->hfs.nodeName; + result = hfs_to_utf8(vol, cnp->pstr, kdirentMaxNameBytes + 1, + &utf8chars, name); + /*XXX ignoring error */ + } + + return DIRENTRY_SIZE(utf8chars); +} +/* + * NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE NOTE + * + * This is assuming maxinum size of a name is 255 (kdirentMaxNameBytes), which is incorrect. + * Any caller of this has to make sure names > 255 are mangled!!!!!!!! + */ + +OSErr +PositionIterator(CatalogIterator *cip, UInt32 offset, BTreeIterator *bip, UInt16 *op) +{ +#define CAT_START_OFFSET (2 * sizeof(struct hfsdotentry)) + ExtendedVCB * vol; + FCB * fcb; + OSErr result = 0; + + /* are we past the end of a directory? */ + if (cip->folderID != cip->parentID) + return(cmNotFound); + + vol = cip->volume; + fcb = GetFileControlBlock(vol->catalogRefNum); + + /* make a btree iterator from catalog iterator */ + UpdateBtreeIterator(cip, bip); + + if (cip->currentOffset == offset) { + *op = kBTreeCurrentRecord; + + } else if (cip->nextOffset == offset) { + *op = kBTreeNextRecord; + + } else { /* start from beginning */ + UInt32 heuristicHint; + UInt32 *cachedHint; + + *op = kBTreeNextRecord; + + /* + * We pass a 2nd hint/guess into BTSearchRecord. The heuristicHint + * is a mapping of dirID and nodeNumber, in hopes that the current + * search will be in the same node as the last search with the same + * parentID. + */ + result = GetMRUCacheBlock( cip->folderID, vol->hintCachePtr, (Ptr *)&cachedHint ); + heuristicHint = (result == noErr) ? *cachedHint : kInvalidMRUCacheKey; + + /* Position iterator at the folder's thread record */ + result = BTSearchRecord(fcb, bip, heuristicHint, NULL, NULL, bip); + if (result) + goto exit; + + InsertMRUCacheBlock( vol->hintCachePtr, cip->folderID, (Ptr) &bip->hint.nodeNum ); + + /* find offset (note: n^2 / 2) */ + if (offset > CAT_START_OFFSET) { + HFSCatalogNodeID pid, *idp; + UInt32 curOffset, nextOffset; + + /* get first record (ie offset 24) */ + result = BTIterateRecord( fcb, kBTreeNextRecord, bip, NULL, NULL ); + if (result) + goto exit; + + if (vol->vcbSigWord == kHFSPlusSigWord) + idp = &((CatalogKey*) &bip->key)->hfsPlus.parentID; + else + idp = &((CatalogKey*) &bip->key)->hfs.parentID; + + pid = *idp; + + curOffset = CAT_START_OFFSET; + nextOffset = GetDirEntrySize(bip, vol); + + while (nextOffset < offset) { + result = BTIterateRecord( fcb, kBTreeNextRecord, bip, NULL, NULL ); + if (result) + goto exit; + + /* check for parent change */ + if (pid != *idp) { + result = cmNotFound; /* offset past end of directory */ + goto exit; + } + + curOffset = nextOffset; + nextOffset += GetDirEntrySize(bip, vol); + }; + + if (nextOffset != offset) { + result = cmNotFound; + goto exit; + } + + UpdateCatalogIterator(bip, cip); + cip->currentOffset = curOffset; + cip->nextOffset = nextOffset; + } + } + +exit: + if (result == btNotFound) + result = cmNotFound; + + return result; + +} /* end PositionIterator */ + + +//_________________________________________________________________________________ +// Routine: GetCatalogOffspring +// +// Function: Gets an offspring record from a specified folder. The folder +// is identified by it's folderID. The desired offspring CNode is +// indicated by the value of the offspring index (1 = 1st offspring +// CNode, 2 = 2nd offspring CNode, etc.). +// +//_________________________________________________________________________________ + +OSErr +GetCatalogOffspring(ExtendedVCB *volume, HFSCatalogNodeID folderID, UInt16 index, + CatalogNodeData *nodeData, + HFSCatalogNodeID *nodeID, SInt16 *nodeType) +{ + CatalogIterator * catalogIterator; + OSErr result; + + + if ( folderID == 0 ) + return cmNotFound; + + /* + * return cmNotFound for index 32767, to prevent overflowing + * the index into negative numbers. + */ + if ( index == 32767 ) + return cmNotFound; + + // get best catalog iterator... + catalogIterator = oGetCatalogIterator(volume, folderID, index); + + result = IterateCatalogNode(volume, catalogIterator, index, + nodeData, nodeID, nodeType); + + (void) ReleaseCatalogIterator(catalogIterator); + + return result; + +} // end GetCatalogOffspring + + +//_________________________________________________________________________________ +// Routine: IterateCatalogNode +// +// Function: Gets an offspring record from a specified folder. The folder +// is identified by it's folderID. The desired offspring CNode is +// indicated by the value of the offspring index (1 = 1st offspring +// CNode, 2 = 2nd offspring CNode, etc.). +// +//_________________________________________________________________________________ + +static OSErr +IterateCatalogNode( ExtendedVCB *volume, CatalogIterator *catalogIterator, UInt16 index, + CatalogNodeData *nodeData, HFSCatalogNodeID *nodeID, + SInt16 *nodeType ) +{ + HFSCatalogNodeID offspringParentID; + CatalogKey * offspringKey; + CatalogName * offspringName; + BTreeIterator btreeIterator; + FSBufferDescriptor btRecord; + CatalogRecord * record; + UInt8 databuf[32]; /* space for partial record */ + FCB * fcb; + SInt16 selectionIndex; + UInt16 tempSize; + UInt16 operation; + OSErr result; + Boolean isHFSPlus; + ByteCount utf8len; + + + isHFSPlus = (volume->vcbSigWord == kHFSPlusSigWord); + fcb = GetFileControlBlock(volume->catalogRefNum); + + // make a btree iterator from catalog iterator + UpdateBtreeIterator(catalogIterator, &btreeIterator); + + /* if client doesn't want data (ie readdir), just get type and id */ + if (nodeData == NULL) { + /* data buf has space to cover all type/id offsets */ + btRecord.bufferAddress = databuf; + btRecord.itemSize = sizeof(databuf); + } else if (isHFSPlus) { + btRecord.bufferAddress = nodeData; + btRecord.itemSize = sizeof(CatalogNodeData); + } else { + btRecord.bufferAddress = &nodeData->cnd_extra; + btRecord.itemSize = sizeof(HFSCatalogFile); + } + btRecord.itemCount = 1; + + //--- if neccessary position the iterator at the thread record for the specified folder + + if ( catalogIterator->currentIndex == 0 ) // is this a new iterator? + { + UInt32 heuristicHint; + UInt32 *cachedHint; + + // We pass a 2nd hint/guess into BTSearchRecord. The heuristicHint is a mapping of + // dirID and nodeNumber, in hopes that the current search will be in the same node + // as the last search with the same parentID. + result = GetMRUCacheBlock( catalogIterator->folderID, volume->hintCachePtr, (Ptr *)&cachedHint ); + heuristicHint = (result == noErr) ? *cachedHint : kInvalidMRUCacheKey; + + result = BTSearchRecord( fcb, &btreeIterator, heuristicHint, &btRecord, &tempSize, &btreeIterator ); + ExitOnError(result); + + UpdateCatalogIterator(&btreeIterator, catalogIterator); // update btree hint and key + + InsertMRUCacheBlock( volume->hintCachePtr, catalogIterator->folderID, (Ptr) &btreeIterator.hint.nodeNum ); + } + + //--- get offspring record (relative to catalogIterator's position) + + selectionIndex = index - catalogIterator->currentIndex; + + // now we have to map index into next/prev operations... + if (selectionIndex == 1) + { + operation = kBTreeNextRecord; + } + else if (selectionIndex == -1) + { + operation = kBTreePrevRecord; + } + else if (selectionIndex == 0) + { + operation = kBTreeCurrentRecord; + } + else if (selectionIndex > 1) + { + UInt32 i; + + for (i = 1; i < selectionIndex; ++i) + { + result = BTIterateRecord( fcb, kBTreeNextRecord, &btreeIterator, &btRecord, &tempSize ); + ExitOnError(result); + } + operation = kBTreeNextRecord; + } + else // (selectionIndex < -1) + { + SInt32 i; + + for (i = -1; i > selectionIndex; --i) + { + result = BTIterateRecord( fcb, kBTreePrevRecord, &btreeIterator, &btRecord, &tempSize ); + ExitOnError(result); + } + operation = kBTreePrevRecord; + } + + result = BTIterateRecord( fcb, operation, &btreeIterator, &btRecord, &tempSize ); + ExitOnError(result); + + offspringKey = (CatalogKey*) &btreeIterator.key; + + if (isHFSPlus) + { + offspringParentID = offspringKey->hfsPlus.parentID; + offspringName = (CatalogName*) &offspringKey->hfsPlus.nodeName; + } + else + { + offspringParentID = offspringKey->hfs.parentID; + offspringName = (CatalogName*) offspringKey->hfs.nodeName; + } + + if (offspringParentID != catalogIterator->folderID) // different parent? + { + AgeCatalogIterator(catalogIterator); // we reached the end, so don't hog the cache! + + result = cmNotFound; // must be done with this folder + goto ErrorExit; + } + + UpdateCatalogIterator(&btreeIterator, catalogIterator); // update btree hint and key + catalogIterator->currentIndex = index; // update the offspring index marker + + record = (CatalogRecord *) btRecord.bufferAddress; + + if (nodeData == NULL) { /* Just copy the id and type...not name */ + if (isHFSPlus) + { + *nodeType = record->recordType; + *nodeID = record->hfsPlusFolder.folderID; + } + else /* hfs name */ + { + + if (record->recordType == kHFSFileRecord) { + *nodeType = kCatalogFileNode; + *nodeID = record->hfsFile.fileID; + } else if (record->recordType == kHFSFolderRecord) { + *nodeType = kCatalogFolderNode; + *nodeID = record->hfsFolder.folderID; + } else + result = cmNotFound; + } + } else { + nodeData->cnm_parID = isHFSPlus ? offspringKey->hfsPlus.parentID : offspringKey->hfs.parentID; + nodeData->cnm_nameptr = nodeData->cnm_namespace; + if (isHFSPlus) + { + result = utf8_encodestr(offspringName->ustr.unicode, + offspringName->ustr.length * sizeof(UniChar), + nodeData->cnm_namespace, (size_t *)&utf8len, + MAXHFSVNODELEN + 1, ':', 0); + + /* Need to allocate buffer large enough */ + if (result == ENAMETOOLONG) { + utf8len = utf8_encodelen(offspringName->ustr.unicode, + offspringName->ustr.length * sizeof(UniChar), + ':', 0); + nodeData->cnm_nameptr = NewPtr(utf8len + 1); + nodeData->cnm_flags |= kCatNameIsAllocated; + result = utf8_encodestr(offspringName->ustr.unicode, + offspringName->ustr.length * sizeof(UniChar), + nodeData->cnm_nameptr, (size_t *)&utf8len, + utf8len + 1, ':', 0); + } + } + else /* hfs name */ + { + if (record->recordType == kHFSFolderRecord || record->recordType == kHFSFileRecord) { + + CopyCatalogNodeData(volume, record, nodeData); + result = hfs_to_utf8(volume, offspringName->pstr, MAXHFSVNODELEN + 1, &utf8len, nodeData->cnm_namespace); + if (result == ENAMETOOLONG) { /* Need to allocate buffer large enough */ + nodeData->cnm_nameptr = NewPtr(utf8len+1); + nodeData->cnm_flags |= kCatNameIsAllocated; + result = hfs_to_utf8(volume, offspringName->pstr, utf8len + 1, &utf8len, nodeData->cnm_nameptr); + } + + } else { + result = cmNotFound; + } + } + } + + return result; + +ErrorExit: + + if ( result == btNotFound ) + result = cmNotFound; + + return result; + +} // end IterateCatalogNode + + +//_________________________________________________________________________________ +// Routine: MoveRenameCatalogNode +// +// Function: Moves and/or rename an existing folder or file CNode. +// Note that when moving a folder, all decendants (its offspring, +// their offspring, etc.) are also moved. +// +// Assumes srcHint contains a text encoding that was set by a GetCatalogNode call +//_________________________________________________________________________________ + +OSErr +MoveRenameCatalogNode(ExtendedVCB *volume, HFSCatalogNodeID srcParentID, ConstUTF8Param srcName, + UInt32 srcHint, HFSCatalogNodeID dstParentID, ConstUTF8Param dstName, UInt32 *newHint) +{ + CatalogKey srcKey; // 518 bytes + CatalogRecord srcRecord; // 520 bytes + CatalogKey dstKey; // 518 bytes + CatalogKey dstFolderKey; // 518 bytes + HFSCatalogNodeID dstFolderParentID = 0; + UInt32 dstFolderHint; + CatalogName *dstFolderNamePtr = NULL; + CatalogRecord tmpRecord; // 520 bytes + HFSCatalogNodeID threadID; + UInt32 textEncoding; + OSErr result; + Boolean isNewName; + Boolean isHFSPlus = (volume->vcbSigWord == kHFSPlusSigWord); + Boolean isOrigDeleted = false; + short srcNameLen; + short dstNameLen; + + + result = BuildCatalogKeyUTF8(volume, srcParentID, srcName, kUndefinedStrLen, &srcKey, &textEncoding); + ReturnIfError(result); + + /* XXX can strlen and bcmp handle NULL pointers? */ + + srcNameLen = strlen(srcName); + dstNameLen = strlen(dstName); + + //--- check if names match + + if ((srcNameLen == dstNameLen) && (bcmp(srcName, dstName, srcNameLen) == 0)) + { + isNewName = false; + dstKey = srcKey; + if ( isHFSPlus ) { + dstKey.hfsPlus.parentID = dstParentID; // set parent ID + } + else { + dstKey.hfs.parentID = dstParentID; // set parent ID + } + } + else /* names are different */ + { + isNewName = true; + result = BuildCatalogKeyUTF8(volume, dstParentID, dstName, kUndefinedStrLen, &dstKey, &textEncoding); + ReturnIfError(result); + } + + //--- make sure source record exists + + result = LocateCatalogNodeByKey(volume, srcHint, &srcKey, &srcRecord, &srcHint); + + // if we did not find it by name, then look for an embedded file ID in a mangled name + if ( (result == cmNotFound) && isHFSPlus ) + result = LocateCatalogNodeByMangledName(volume, srcParentID, srcName, kUndefinedStrLen, &srcKey, &srcRecord, &srcHint); + ReturnIfError(result); + + srcParentID = (isHFSPlus ? srcKey.hfsPlus.parentID : srcKey.hfs.parentID); + + // if we're moving then do some additional preflighting... + + if (srcParentID != dstParentID) + { + //--- make sure destination folder exists + + result = LocateCatalogNode(volume, dstParentID, NULL, kNoHint, &dstFolderKey, &tmpRecord, &dstFolderHint); + ReturnIfError(result); + + if (tmpRecord.recordType == kHFSPlusFolderRecord) + { + dstParentID = tmpRecord.hfsPlusFolder.folderID; + dstFolderParentID = dstFolderKey.hfsPlus.parentID; + dstFolderNamePtr = (CatalogName*) &dstFolderKey.hfsPlus.nodeName; + } + else if (tmpRecord.recordType == kHFSFolderRecord) + { + dstParentID = tmpRecord.hfsFolder.folderID; + dstFolderParentID = dstFolderKey.hfs.parentID; + dstFolderNamePtr = (CatalogName*) &dstFolderKey.hfs.nodeName; + } + else + { + return badMovErr; + } + + //--- if source is a folder, make sure its a proper move + + if (srcRecord.recordType == kHFSPlusFolderRecord || srcRecord.recordType == kHFSFolderRecord) + { + HFSCatalogNodeID srcFolderID; + HFSCatalogNodeID ancestorParentID; + CatalogKey tempKey; // 518 bytes + UInt32 tempHint; + + if (isHFSPlus) + { + srcFolderID = srcRecord.hfsPlusFolder.folderID; + ancestorParentID = dstFolderKey.hfsPlus.parentID; + } + else + { + srcFolderID = srcRecord.hfsFolder.folderID; + ancestorParentID = dstFolderKey.hfs.parentID; + } + + if ( srcFolderID == fsRtDirID || // source == root? + srcFolderID == dstParentID || // source == destination? + srcFolderID == ancestorParentID ) // source == destination's parent? + { + return badMovErr; + } + + while (ancestorParentID > fsRtDirID) // loop until we reach the root folder + { + // locate next folder up the tree... + result = LocateCatalogNode(volume, ancestorParentID, NULL, kNoHint, &tempKey, &tmpRecord, &tempHint); + ReturnIfError(result); + + ancestorParentID = isHFSPlus ? tempKey.hfsPlus.parentID : tempKey.hfs.parentID; + + if (srcFolderID == ancestorParentID) // source = destination ancestor? + return badMovErr; + } + } + + TrashCatalogIterator(volume, dstParentID); // invalidate any iterators for destination parentID + } + else /* (srcParentID == dstParentID) */ + { + if ( !isNewName ) + { + *newHint = srcHint; // they match, so we're all done! + return noErr; + } + } + + TrashCatalogIterator(volume, srcParentID); // invalidate any iterators for source's parentID + InvalidateCatalogNodeCache(volume, srcParentID); // invalidate node cache since parent changed + + if (isNewName && isHFSPlus) + { + // update textEncoding hint (works for folders and files) + srcRecord.hfsPlusFolder.textEncoding = textEncoding; + + UpdateVolumeEncodings(volume, textEncoding); + } + + //--- insert source CNode record in BTree with new key (a new parent id and/or new name) + + result = InsertBTreeRecord(volume->catalogRefNum, &dstKey, &srcRecord, GetCatalogRecordSize(&srcRecord), newHint); + + if (result == btExists) + { + UInt16 dataSize; + + /* XXX what about the case: move id1,foo to id2,FOO ?? */ + if (srcParentID != dstParentID || isNewName == false) + return cmExists; + + //--- new CNode name already exists in the same folder, locate the existing one + result = SearchBTreeRecord(volume->catalogRefNum, &dstKey, srcHint, + &dstFolderKey, &tmpRecord, &dataSize, newHint); + + if (result == btNotFound) + result = cmNotFound; + ReturnIfError(result); + + //--- check if its the same CNode (same name but different upper/lower case) + + if (srcRecord.recordType != tmpRecord.recordType) + return cmExists; + + switch (srcRecord.recordType) + { + case kHFSPlusFileRecord: /* HFS Plus records share same cnid location */ + case kHFSPlusFolderRecord: + if (srcRecord.hfsPlusFolder.folderID != tmpRecord.hfsPlusFolder.folderID) + return cmExists; + break; + + case kHFSFolderRecord: + if (srcRecord.hfsFolder.folderID != tmpRecord.hfsFolder.folderID) + return cmExists; + break; + + case kHFSFileRecord: + if (srcRecord.hfsFile.fileID != tmpRecord.hfsFile.fileID) + return cmExists; + break; + + default: + return cmExists; + } + + //--- same name but different case, so delete old and insert with new name... + + result = DeleteBTreeRecord(volume->catalogRefNum, &srcKey); + ReturnIfError(result); + isOrigDeleted = true; // So we dont delete it again down below + + result = InsertBTreeRecord(volume->catalogRefNum, &dstKey, &srcRecord, dataSize, newHint); + } + ReturnIfError(result); + + // + // from this point on we need to cleanup (ie delete the new record) if we encounter errors! + // + + //--- update thread record for node (if it exists) + + switch (srcRecord.recordType) + { + case kHFSPlusFileRecord: + case kHFSPlusFolderRecord: + threadID = srcRecord.hfsPlusFolder.folderID; + break; + + case kHFSFolderRecord: + threadID = srcRecord.hfsFolder.folderID; + break; + + case kHFSFileRecord: + if (srcRecord.hfsFile.flags & kHFSThreadExistsMask) + { + threadID = srcRecord.hfsFile.fileID; + break; + } + /* fall through if no thread... */ + + default: + threadID = 0; + } + + if (threadID) + { + UInt32 threadHint; + CatalogKey threadKey; // 518 bytes + CatalogRecord threadRecord; // 520 bytes + UInt16 threadSize; + + result = LocateCatalogRecord(volume, threadID, NULL, kNoHint, &threadKey, &threadRecord, &threadHint); + if (result != noErr) goto Exit_Delete; + + if (isHFSPlus) + { + if (srcParentID != dstParentID) + threadRecord.hfsPlusThread.parentID = dstParentID; + if (isNewName) + CopyCatalogName((CatalogName *)&dstKey.hfsPlus.nodeName, (CatalogName *) &threadRecord.hfsPlusThread.nodeName, isHFSPlus); + + threadSize = sizeof(threadRecord.hfsPlusThread); + // HFS Plus has varaible sized threads so adjust to actual length + threadSize -= ( sizeof(threadRecord.hfsPlusThread.nodeName.unicode) - (threadRecord.hfsPlusThread.nodeName.length * sizeof(UniChar)) ); + } + else + { + if (srcParentID != dstParentID) + threadRecord.hfsThread.parentID = dstParentID; + if (isNewName) + CopyCatalogName((CatalogName *)&dstKey.hfs.nodeName,(CatalogName *) threadRecord.hfsThread.nodeName, isHFSPlus); + + threadSize = sizeof(threadRecord.hfsThread); + } + + result = DeleteBTreeRecord(volume->catalogRefNum, &threadKey); + if (result != noErr) goto Exit_Delete; + + result = InsertBTreeRecord(volume->catalogRefNum, &threadKey, &threadRecord, threadSize, &threadHint); + if (result != noErr) goto Exit_Delete; //XXX exiting with a missing thread! + } + + //--- we successfully added the new node so delete the old source CNode record + + if (! isOrigDeleted) { + result = DeleteBTreeRecord(volume->catalogRefNum, &srcKey); + if (result) + { + // uh oh, we could not delete the original + // so we better get rid of the new node... + + (void) DeleteBTreeRecord(volume->catalogRefNum, &dstKey); + + //XXX also need to fix up the thread... + + return result; + } + } + + if (srcParentID != dstParentID) + { + result = UpdateFolderCount(volume, srcParentID, NULL, srcRecord.recordType, kNoHint, -1); + result = UpdateFolderCount(volume, dstFolderParentID, dstFolderNamePtr, srcRecord.recordType, dstFolderHint, +1); + } + + //--- make sure changes get flushed out + VCB_LOCK(volume); + volume->vcbFlags |= 0xFF00; // Mark the VCB dirty + volume->vcbLsMod = GetTimeUTC(); // update last modified date + VCB_UNLOCK(volume); + + (void) FlushCatalog(volume); + + return result; + + +Exit_Delete: + (void) DeleteBTreeRecord(volume->catalogRefNum, &dstKey); + + return result; + +} // end MoveRenameCatalogNode + + +//_________________________________________________________________________________ +// Routine: UpdateCatalogNode +// +// Function: Marks the Catalog BTree node identified by the given catalog hint +// as 'dirty'. +// +//_________________________________________________________________________________ + + +OSErr +UpdateCatalogNode(ExtendedVCB *volume, HFSCatalogNodeID parentID, ConstUTF8Param name, + UInt32 catalogHint, const CatalogNodeData *nodeData) +{ + CatalogKey *key; + CatalogRecord *record; + UInt32 hint; + UInt16 recordSize; + OSErr result; + CatalogKey catalogKey; // 518 bytes + CatalogRecord catalogRecord; // 520 bytes + Boolean isHFSPlus = volume->vcbSigWord == kHFSPlusSigWord; + + /* XXX no reason to have ptrs to local variables... */ + key = &catalogKey; + record = &catalogRecord; + + result = BuildCatalogKeyUTF8(volume, parentID, name, kUndefinedStrLen, key, NULL); + ReturnIfError(result); + + //--- locate subject catalog node + + result = LocateCatalogNodeByKey(volume, catalogHint, key, record, &hint); + + // if we did not find it by name, then look for an embedded file ID in a mangled name + if ( (result == cmNotFound) && isHFSPlus ) + result = LocateCatalogNodeByMangledName(volume, parentID, name, kUndefinedStrLen, key, record, &hint); + + if (result == btNotFound) + result = cmNotFound; + + if (catalogHint != hint) + PRINTIT(("UpdateCatalogNode: catalogHint does not match (in: %ld, out: %ld)\n", catalogHint, hint)); + ReturnIfError(result); + + // update user modifiable fields in the catalog node record... + + switch (record->recordType) + { + case kHFSFolderRecord: + { + #if DEBUG_BUILD + if (nodeData->cnd_type != kCatalogFolderNode) + DebugStr("\p UpdateCatalogNode: folder/file mismatch!"); + #endif + + record->hfsFolder.createDate = UTCToLocal(nodeData->cnd_createDate); + record->hfsFolder.modifyDate = UTCToLocal(nodeData->cnd_contentModDate); + record->hfsFolder.backupDate = UTCToLocal(nodeData->cnd_backupDate); + + *(DInfo*) &record->hfsFolder.userInfo = *(DInfo*) &nodeData->cnd_finderInfo; + *(DXInfo*) &record->hfsFolder.finderInfo = *(DXInfo*) ((UInt32)&nodeData->cnd_finderInfo + 16); + + recordSize = sizeof(HFSCatalogFolder); + break; + } + + case kHFSFileRecord: + { + UInt32 i; + + #if DEBUG_BUILD + if (nodeData->cnd_type != kCatalogFileNode) + DebugStr("UpdateCatalogNode: folder/file mismatch!"); + if ((nodeData->cnd_datafork.totalBlocks > (0x7FFFFFFF/volume->blockSize)) || + (nodeData->cnd_rsrcfork.totalBlocks > (0x7FFFFFFF/volume->blockSize))) + DebugStr("HFS file size is larger than 2Gig"); + #endif + + record->hfsFile.flags = (UInt8) nodeData->cnd_flags; + record->hfsFile.createDate = UTCToLocal(nodeData->cnd_createDate); + record->hfsFile.modifyDate = UTCToLocal(nodeData->cnd_contentModDate); + record->hfsFile.backupDate = UTCToLocal(nodeData->cnd_backupDate); + + record->hfsFile.dataLogicalSize = nodeData->cnd_datafork.logicalSize; + record->hfsFile.dataPhysicalSize = nodeData->cnd_datafork.totalBlocks * volume->blockSize; + record->hfsFile.rsrcLogicalSize = nodeData->cnd_rsrcfork.logicalSize; + record->hfsFile.rsrcPhysicalSize = nodeData->cnd_rsrcfork.totalBlocks * volume->blockSize; + + *(FInfo*) &record->hfsFile.userInfo = *(FInfo*) &nodeData->cnd_finderInfo; + *(FXInfo*) &record->hfsFile.finderInfo = *(FXInfo*) ((UInt32)&nodeData->cnd_finderInfo + 16); + + // copy extent info + for (i = 0; i < kHFSExtentDensity; ++i) + { + record->hfsFile.dataExtents[i].startBlock = + (UInt16) nodeData->cnd_datafork.extents[i].startBlock; + record->hfsFile.dataExtents[i].blockCount = + (UInt16) nodeData->cnd_datafork.extents[i].blockCount; + record->hfsFile.rsrcExtents[i].startBlock = + (UInt16) nodeData->cnd_rsrcfork.extents[i].startBlock; + record->hfsFile.rsrcExtents[i].blockCount = + (UInt16) nodeData->cnd_rsrcfork.extents[i].blockCount; + } + + recordSize = sizeof(HFSCatalogFile); + break; + } + + case kHFSPlusFolderRecord: + { + record->hfsPlusFolder.createDate = nodeData->cnd_createDate; + record->hfsPlusFolder.contentModDate = nodeData->cnd_contentModDate; + record->hfsPlusFolder.backupDate = nodeData->cnd_backupDate; + record->hfsPlusFolder.accessDate = nodeData->cnd_accessDate; + record->hfsPlusFolder.attributeModDate = nodeData->cnd_attributeModDate; + record->hfsPlusFolder.bsdInfo.ownerID = nodeData->cnd_ownerID; + record->hfsPlusFolder.bsdInfo.groupID = nodeData->cnd_groupID; + record->hfsPlusFolder.bsdInfo.ownerFlags = nodeData->cnd_ownerFlags; + record->hfsPlusFolder.bsdInfo.adminFlags = nodeData->cnd_adminFlags; + record->hfsPlusFolder.bsdInfo.fileMode = nodeData->cnd_mode; + record->hfsPlusFolder.textEncoding = nodeData->cnd_textEncoding; + + BlockMoveData(&nodeData->cnd_finderInfo, &record->hfsPlusFolder.userInfo, 32); + + recordSize = sizeof(HFSPlusCatalogFolder); + break; + } + + case kHFSPlusFileRecord: + { + record->hfsPlusFile.flags = nodeData->cnd_flags; + record->hfsPlusFile.createDate = nodeData->cnd_createDate; + record->hfsPlusFile.contentModDate = nodeData->cnd_contentModDate; + record->hfsPlusFile.backupDate = nodeData->cnd_backupDate; + record->hfsPlusFile.accessDate = nodeData->cnd_accessDate; + record->hfsPlusFile.attributeModDate = nodeData->cnd_attributeModDate; + record->hfsPlusFile.bsdInfo.ownerID = nodeData->cnd_ownerID; + record->hfsPlusFile.bsdInfo.groupID = nodeData->cnd_groupID; + record->hfsPlusFile.bsdInfo.ownerFlags = nodeData->cnd_ownerFlags; + record->hfsPlusFile.bsdInfo.adminFlags = nodeData->cnd_adminFlags; + record->hfsPlusFile.bsdInfo.fileMode = nodeData->cnd_mode; + /* get special value (iNodeNum, linkCount or rawDevice) */ + record->hfsPlusFile.bsdInfo.special.rawDevice = nodeData->cnd_rawDevice; + record->hfsPlusFile.textEncoding = nodeData->cnd_textEncoding; + + record->hfsPlusFile.dataFork.logicalSize = nodeData->cnd_datafork.logicalSize; + record->hfsPlusFile.dataFork.totalBlocks = nodeData->cnd_datafork.totalBlocks; + BlockMoveData(&nodeData->cnd_datafork.extents, + &record->hfsPlusFile.dataFork.extents, sizeof(HFSPlusExtentRecord)); + + record->hfsPlusFile.resourceFork.logicalSize = nodeData->cnd_rsrcfork.logicalSize; + record->hfsPlusFile.resourceFork.totalBlocks = nodeData->cnd_rsrcfork.totalBlocks; + BlockMoveData(&nodeData->cnd_rsrcfork.extents, + &record->hfsPlusFile.resourceFork.extents, sizeof(HFSPlusExtentRecord)); + + BlockMoveData(&nodeData->cnd_finderInfo, &record->hfsPlusFile.userInfo, 32); + +#if HFS_HARDLINKS && DEBUG_BUILD + /* Must swap opaque finder data */ + if (SWAP_BE32 (record->hfsPlusFile.userInfo.fdType) == kHardLinkFileType && + SWAP_BE32 (record->hfsPlusFile.userInfo.fdCreator) == kHardLinkCreator) { + if (record->hfsPlusFile.dataFork.logicalSize != 0) + DebugStr("UpdateCatalogNode: link has data fork!"); + } +#endif + recordSize = sizeof(HFSPlusCatalogFile); + break; + } + + default: + return cmNotFound; + } + + result = ReplaceBTreeRecord(volume->catalogRefNum, key, catalogHint, record, recordSize, &hint); + + if ( result == btNotFound ) + { + result = cmNotFound; + } + else if ( result == noErr ) + { + /* if we're just updating the accessDate then no need to change volume mod date */ + if (nodeData->cnd_contentModDate > volume->vcbLsMod || + (isHFSPlus && nodeData->cnd_attributeModDate > volume->vcbLsMod)) + { + VCB_LOCK(volume); + volume->vcbFlags |= 0xFF00; // Mark the VCB dirty + volume->vcbLsMod = GetTimeUTC(); // update last modified date + VCB_UNLOCK(volume); + } + + result = FlushCatalog(volume); // flush the catalog + } + + return result; +} + + +//_________________________________________________________________________________ +// Routine: CreateFileIDRef +// +// Function: Creates a file thread record for hfs file node +// +//_________________________________________________________________________________ + +OSErr +CreateFileIDRef(ExtendedVCB *volume, HFSCatalogNodeID parentID, ConstUTF8Param name, UInt32 hint, HFSCatalogNodeID *threadID) +{ + CatalogKey nodeKey; // 518 bytes + CatalogRecord nodeData; // 520 bytes + HFSCatalogKey threadKey; + HFSCatalogThread threadData; + UInt32 nodeHint; + UInt32 tempHint; + OSErr result; + Boolean isHFSPlus = (volume->vcbSigWord == kHFSPlusSigWord); + + *threadID = 0; + + result = BuildCatalogKeyUTF8(volume, parentID, name, kUndefinedStrLen, &nodeKey, NULL); + ReturnIfError(result); + + //--- locate subject catalog node + + result = LocateCatalogNodeByKey(volume, hint, &nodeKey, &nodeData, &nodeHint); + + // if we did not find it by name, then look for an embedded file ID in a mangled name + if ( (result == cmNotFound) && isHFSPlus ) + result = LocateCatalogNodeByMangledName(volume, parentID, name, kUndefinedStrLen, &nodeKey, &nodeData, &nodeHint); + ReturnIfError(result); + + if (nodeData.recordType == kHFSPlusFileRecord) + { + *threadID = nodeData.hfsPlusFile.fileID; + return noErr; // already have one + } + + if (nodeData.recordType != kHFSFileRecord) + { + return notAFileErr; + } + + + if (nodeData.hfsFile.flags & kHFSThreadExistsMask) + { + *threadID = nodeData.hfsFile.fileID; + return noErr; // already have one + } + + result = VolumeWritable( volume ); + if ( result != noErr ) return result; + + // + // need to insert a thread record + // + BuildCatalogKey(nodeData.hfsFile.fileID, NULL, false, (CatalogKey *)&threadKey); + + ClearMemory(&threadData, sizeof(HFSCatalogThread)); + threadData.recordType = kHFSFileThreadRecord; + threadData.parentID = nodeKey.hfs.parentID; + BlockMoveData(&nodeKey.hfs.nodeName, &threadData.nodeName, nodeKey.hfs.nodeName[0] + 1); + + result = InsertBTreeRecord(volume->catalogRefNum, &threadKey, &threadData, sizeof(HFSCatalogThread), &tempHint); + if (result == btExists) result = noErr; //XXX could return cmExists or fidExists + ReturnIfError(result); + + // + // Finally, set the flag in the file record to say this file has a thread record. + // + nodeData.hfsFile.flags |= kHFSThreadExistsMask; + result = ReplaceBTreeRecord(volume->catalogRefNum, &nodeKey, nodeHint, &nodeData, sizeof(HFSCatalogFile), &nodeHint ); + + if (result == noErr) { + (void) FlushCatalog(volume); + *threadID = nodeData.hfsFile.fileID; + } + + return result; +} + + +//_________________________________________________________________________________ +// Routine: CompareCatalogKeys +// +// Function: Compares two catalog keys (a search key and a trial key). +// +// Result: +n search key > trial key +// 0 search key = trial key +// -n search key < trial key +//_________________________________________________________________________________ + +SInt32 +CompareCatalogKeys(HFSCatalogKey *searchKey, HFSCatalogKey *trialKey) +{ + HFSCatalogNodeID searchParentID, trialParentID; + SInt32 result; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + + if ( searchParentID > trialParentID ) // parent dirID is unsigned + result = 1; + else if ( searchParentID < trialParentID ) + result = -1; + else // parent dirID's are equal, compare names + { + #if ( ! FORDISKFIRSTAID ) + LogStartTime(kTraceRelString); + + result = FastRelString(searchKey->nodeName, trialKey->nodeName); + + LogEndTime(kTraceRelString, noErr); + #else + result = (SInt32) RelString_Glue(searchKey->nodeName, trialKey->nodeName); + #endif + } + + return result; +} + + +//_________________________________________________________________________________ +// Routine: CompareExtendedCatalogKeys +// +// Function: Compares two large catalog keys (a search key and a trial key). +// +// Result: +n search key > trial key +// 0 search key = trial key +// -n search key < trial key +//_________________________________________________________________________________ + +SInt32 +CompareExtendedCatalogKeys(HFSPlusCatalogKey *searchKey, HFSPlusCatalogKey *trialKey) +{ + SInt32 result; + HFSCatalogNodeID searchParentID, trialParentID; + + searchParentID = searchKey->parentID; + trialParentID = trialKey->parentID; + + if ( searchParentID > trialParentID ) // parent node IDs are unsigned + { + result = 1; + } + else if ( searchParentID < trialParentID ) + { + result = -1; + } + else // parent node ID's are equal, compare names + { + if ( searchKey->nodeName.length == 0 || trialKey->nodeName.length == 0 ) + result = searchKey->nodeName.length - trialKey->nodeName.length; + else + result = FastUnicodeCompare(&searchKey->nodeName.unicode[0], searchKey->nodeName.length, + &trialKey->nodeName.unicode[0], trialKey->nodeName.length); + } + + return result; +} + diff --git a/bsd/hfs/hfscommon/Catalog/CatalogIterators.c b/bsd/hfs/hfscommon/Catalog/CatalogIterators.c new file mode 100644 index 000000000..596b812bb --- /dev/null +++ b/bsd/hfs/hfscommon/Catalog/CatalogIterators.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: CatalogIterators.c + + Contains: Catalog Iterator Implementation + + Version: HFS Plus 1.0 + + Copyright: © 1997-1998 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: Mac OS File System + + Writers: + + (msd) Mark Day + (djb) Don Brady + + Change History (most recent first): + 4/23/98 djb Re-enable InvalidateCatalogCache (was commented out). + 4/6/98 djb Add locking for cache globals (list) and iterators. + 4/2/98 djb Define gCatalogCacheGlobals here instead of FSVars. + 3/31/98 djb Sync up with final HFSVolumes.h header file. + + 11/13/97 djb Radar #1683572 - Fix for indexed GetFileInfo. + 10/17/97 msd Bug 1683506. Add support for long Unicode names in + CatalogIterators. Added a single global buffer for long Unicode + names; it is used by at most one CatalogIterator at a time. + 10/1/97 djb first checked in +*/ + + +#include "../../hfs_macos_defs.h" +#include "../../hfs.h" +#include "../../hfs_dbg.h" +#include "../../hfs_format.h" + +#include "../headers/FileMgrInternal.h" +#include "../headers/BTreesInternal.h" +#include "../headers/CatalogPrivate.h" +#include "../headers/HFSInstrumentation.h" + + +#include +#include +#include +#include + +static void InsertCatalogIteratorAsMRU( CatalogCacheGlobals *cacheGlobals, CatalogIterator *iterator ); + +static void InsertCatalogIteratorAsLRU( CatalogCacheGlobals *cacheGlobals, CatalogIterator *iterator ); + +static void PrepareForLongName( CatalogIterator *iterator ); + + +#if TARGET_API_MACOS_X + CatalogCacheGlobals *gCatalogCacheGlobals; + + #define GetCatalogCacheGlobals() (gCatalogCacheGlobals) + + #define CATALOG_ITER_LIST_LOCK(g) simple_lock(&(g)->simplelock) + + #define CATALOG_ITER_LIST_UNLOCK(g) simple_unlock(&(g)->simplelock) + + #define CI_LOCK(i) lockmgr(&(i)->iterator_lock, LK_EXCLUSIVE, (simple_lock_t) 0, current_proc()) + +#define CI_UNLOCK(i) lockmgr(&(i)->iterator_lock, LK_RELEASE, (simple_lock_t) 0, current_proc()) + +#define CI_SLEEPLESS_LOCK(i) lockmgr(&(i)->iterator_lock, LK_EXCLUSIVE | LK_NOWAIT, (simple_lock_t) 0, current_proc()) + +#define CI_LOCK_FROM_LIST(g,i) lockmgr(&(i)->iterator_lock, LK_EXCLUSIVE | LK_INTERLOCK, &(g)->simplelock, current_proc()) + +#else /* TARGET_API_MACOS_X */ + + #define GetCatalogCacheGlobals() ((CatalogCacheGlobals*) ((FSVarsRec*) LMGetFSMVars()->gCatalogCacheGlobals)) + + #define CATALOG_ITER_LIST_LOCK(g) + + #define CATALOG_ITER_LIST_UNLOCK(g) + + #define CI_LOCK(i) 0 + + #define CI_UNLOCK(i) 0 + + #define CI_SLEEPLESS_LOCK(i) 0 + + #define CI_LOCK_FROM_LIST(g,i) 0 + +#endif + + +//_______________________________________________________________________________ +// Routine: InitCatalogCache +// +// Function: Allocates cache, and initializes all the cache structures. +// +//_______________________________________________________________________________ +OSErr +InitCatalogCache(void) +{ + CatalogCacheGlobals * cacheGlobals; + CatalogIterator * iterator; + UInt32 cacheSize; + UInt16 i; + UInt16 lastIterator; + OSErr err; + + + cacheSize = sizeof(CatalogCacheGlobals) + ( kCatalogIteratorCount * sizeof(CatalogIterator) ); + cacheGlobals = (CatalogCacheGlobals *) NewPtrSysClear( cacheSize ); + + err = MemError(); + if (err != noErr) + return err; + + cacheGlobals->iteratorCount = kCatalogIteratorCount; + + lastIterator = kCatalogIteratorCount - 1; // last iterator number, since they start at 0 + + // Initialize the MRU order for the cache + cacheGlobals->mru = (CatalogIterator *) ( (Ptr)cacheGlobals + sizeof(CatalogCacheGlobals) ); + + // Initialize the LRU order for the cache + cacheGlobals->lru = (CatalogIterator *) ( (Ptr)(cacheGlobals->mru) + (lastIterator * sizeof(CatalogIterator)) ); + + + // Traverse iterators, setting initial mru, lru, and default values + for ( i = 0, iterator = cacheGlobals->mru; i < kCatalogIteratorCount ; i++, iterator = iterator->nextMRU ) + { + if ( i == lastIterator ) + iterator->nextMRU = nil; // terminate the list + else + iterator->nextMRU = (CatalogIterator *) ( (Ptr)iterator + sizeof(CatalogIterator) ); + + if ( i == 0 ) + iterator->nextLRU = nil; // terminate the list + else + iterator->nextLRU = (CatalogIterator *) ( (Ptr)iterator - sizeof(CatalogIterator) ); + + #if TARGET_API_MACOS_X + lockinit(&iterator->iterator_lock, PINOD, "hfs_catalog_iterator", 0, 0); + #endif + } + + #if TARGET_API_MAC_OS8 + (FSVarsRec*) LMGetFSMVars()->gCatalogCacheGlobals = (Ptr) cacheGlobals; + #endif + + #if TARGET_API_MACOS_X + gCatalogCacheGlobals = cacheGlobals; + simple_lock_init(&cacheGlobals->simplelock); + #endif + + return noErr; +} + + +//_______________________________________________________________________________ +// Routine: InvalidateCatalogCache +// +// Function: Trash any interators matching volume parameter +// +//_______________________________________________________________________________ +void PrintCatalogIterator( void ); + +void +InvalidateCatalogCache( ExtendedVCB *volume ) +{ + TrashCatalogIterator( volume, 0 ); +} + + +//_______________________________________________________________________________ +// Routine: PrintCatalogIterator +// +// Function: Prints all interators +// +//_______________________________________________________________________________ +#if HFS_DIAGNOSTIC +void +PrintCatalogIterator( void ) +{ + CatalogIterator *iterator; + CatalogCacheGlobals *cacheGlobals = GetCatalogCacheGlobals(); + int i; + + PRINTIT("CatalogCacheGlobals @ 0x%08lX are:\n", (unsigned long)cacheGlobals); + PRINTIT("\titeratorCount: %ld \n", cacheGlobals->iteratorCount); + PRINTIT("\tmru: 0x%08lX \n", (unsigned long)cacheGlobals->mru); + PRINTIT("\tlru: 0x%08lX \n", (unsigned long)cacheGlobals->lru); + + for ( iterator = cacheGlobals->mru, i=0 ; iterator != nil && i<32 ; iterator = iterator->nextMRU, i++) + { + PRINTIT("%d: ", i); + PRINTIT(" i: 0x%08lX", (unsigned long)iterator); + PRINTIT(" M: 0x%08lX", (unsigned long)iterator->nextMRU); + PRINTIT(" L: 0x%08lX", (unsigned long)iterator->nextLRU); + PRINTIT("\n"); + } +} +#endif + +//_______________________________________________________________________________ +// Routine: TrashCatalogIterator +// +// Function: Trash any interators matching volume and folder parameters +// +//_______________________________________________________________________________ +void +TrashCatalogIterator( const ExtendedVCB *volume, HFSCatalogNodeID folderID ) +{ + CatalogIterator *iterator; + CatalogCacheGlobals *cacheGlobals = GetCatalogCacheGlobals(); + + CATALOG_ITER_LIST_LOCK(cacheGlobals); + + for ( iterator = cacheGlobals->mru ; iterator != nil ; iterator = iterator->nextMRU ) + { + top: + + // first match the volume + if ( iterator->volume != volume ) + continue; + + // now match the folder (or all folders if 0) + if ( (folderID == 0) || (folderID == iterator->folderID) ) + { + CatalogIterator *next; + + iterator->volume = 0; // trash it + iterator->folderID = 0; + + next = iterator->nextMRU; // remember the next iterator + + // if iterator is not already last then make it last + if ( next != nil ) + { + InsertCatalogIteratorAsLRU( cacheGlobals, iterator ); + + // iterator->nextMRU will always be zero (since we moved it to the end) + // so set up the next iterator manually (we know its not nil) + iterator = next; + goto top; // process the next iterator + } + } + } + + CATALOG_ITER_LIST_UNLOCK(cacheGlobals); +} + + +//_______________________________________________________________________________ +// Routine: AgeCatalogIterator +// +// Function: Move iterator to the end of the list... +// +//_______________________________________________________________________________ +void +AgeCatalogIterator ( CatalogIterator *catalogIterator ) +{ + CatalogCacheGlobals * cacheGlobals = GetCatalogCacheGlobals(); + + CATALOG_ITER_LIST_LOCK(cacheGlobals); + + //PRINTIT(" AgeCatalogIterator: v=%d, d=%ld, i=%d\n", catalogIterator->volRefNum, catalogIterator->folderID, catalogIterator->currentIndex); + + InsertCatalogIteratorAsLRU( cacheGlobals, catalogIterator ); + + CATALOG_ITER_LIST_UNLOCK(cacheGlobals); +} + + +//_______________________________________________________________________________ +// Routine: GetCatalogIterator +// +// Function: Release interest in Catalog iterator +// +//_______________________________________________________________________________ +OSErr +ReleaseCatalogIterator( CatalogIterator* catalogIterator) +{ +#if TARGET_API_MACOS_X + //PRINTIT(" ReleaseCatalogIterator: v=%d, d=%ld, i=%d\n", catalogIterator->volRefNum, catalogIterator->folderID, catalogIterator->currentIndex); + return CI_UNLOCK(catalogIterator); +#else + return noErr; +#endif +} + + +//_______________________________________________________________________________ +// Routine: GetCatalogIterator +// +// Function: Returns an iterator associated with the volume, folderID, index, +// and iterationType (kIterateFilesOnly or kIterateAll). +// Searches the cache in MRU order. +// Inserts the resulting iterator at the head of mru automatically +// +// Note: The returned iterator is locked and ReleaseCatalogIterator must +// be called to unlock it. +// +//_______________________________________________________________________________ +CatalogIterator* +oGetCatalogIterator( const ExtendedVCB *volume, HFSCatalogNodeID folderID, UInt16 index) +{ + CatalogCacheGlobals * cacheGlobals = GetCatalogCacheGlobals(); + CatalogIterator * iterator; + CatalogIterator * bestIterator; + UInt16 bestDelta; + Boolean newIterator = false; + + + LogStartTime(kGetCatalogIterator); + + bestDelta = 0xFFFF; // assume the best thing is to start from scratch + bestIterator = nil; + + CATALOG_ITER_LIST_LOCK(cacheGlobals); + + for ( iterator = cacheGlobals->mru ; iterator != nil ; iterator = iterator->nextMRU ) + { + UInt16 delta; + UInt16 iteratorIndex; + + // first make sure volume, folder id and type matches + if ( (iterator->volume != volume) || + (iterator->folderID != folderID) || + (iterator->currentIndex == 0xFFFFFFFF)) + { + continue; + } + + if ( CI_SLEEPLESS_LOCK(iterator) == EBUSY ) /* ignore busy iterators */ + { + //PRINTIT(" GetCatalogIterator: busy v=%d, d=%ld, i=%d\n", volume, folderID, iterator->currentIndex); + continue; + } + + iteratorIndex = iterator->currentIndex; + + // we matched volume, folder id and type, now check the index + if ( iteratorIndex == index ) + { + bestDelta = 0; + bestIterator = iterator; // we scored! - so get out of this loop + break; // break with iterator locked + } + + // calculate how far this iterator is from the requested index + if ( index > iteratorIndex ) + delta = index - iteratorIndex; + else + delta = iteratorIndex - index; + + + // remember the best iterator so far (there could be more than one) + if ( delta < bestDelta ) + { + bestDelta = delta; // we found a better one! + bestIterator = iterator; // so remember it + if ( delta == 1 ) // just one away is good enough! + break; // break with iterator locked + } + + (void) CI_UNLOCK(iterator); // unlock iterator before moving to the next one + + } // end for + + + // check if we didn't get one or if the one we got is too far away... + if ( (bestIterator == nil) || (index < bestDelta) ) + { + bestIterator = cacheGlobals->lru; // start over with a new iterator + + //PRINTIT(" GetCatalogIterator: recycle v=%d, d=%ld, i=%d\n", bestIterator->volRefNum, bestIterator->folderID, bestIterator->currentIndex); + (void) CI_LOCK_FROM_LIST(cacheGlobals, bestIterator); // XXX we should not eat the error! + + CATALOG_ITER_LIST_LOCK(cacheGlobals); // grab the lock again for MRU Insert below... + + bestIterator->volume = volume; // update the iterator's volume + bestIterator->folderID = folderID; // ... and folderID + bestIterator->currentIndex = 0; // ... and offspring index marker + + bestIterator->btreeNodeHint = 0; + bestIterator->btreeIndexHint = 0; + bestIterator->parentID = folderID; // set key to folderID + empty name + bestIterator->folderName.unicodeName.length = 0; // clear pascal/unicode name + + if ( volume->vcbSigWord == kHFSPlusSigWord ) + bestIterator->nameType = kShortUnicodeName; + else + bestIterator->nameType = kShortPascalName; + + newIterator = true; + } + else { + //PRINTIT(" GetCatalogIterator: found v=%d, d=%ld, i=%d\n", bestIterator->volRefNum, bestIterator->folderID, bestIterator->currentIndex); + } + + // put this iterator at the front of the list + InsertCatalogIteratorAsMRU( cacheGlobals, bestIterator ); + + CATALOG_ITER_LIST_UNLOCK(cacheGlobals); + + LogEndTime(kGetCatalogIterator, newIterator); + + return bestIterator; // return our best shot + +} // end oGetCatalogIterator + + +CatalogIterator* +GetCatalogIterator(ExtendedVCB *volume, HFSCatalogNodeID folderID, UInt32 offset) +{ + CatalogCacheGlobals *cacheGlobals = GetCatalogCacheGlobals(); + CatalogIterator *iterator; + CatalogIterator *bestIterator; + + bestIterator = NULL; + + CATALOG_ITER_LIST_LOCK(cacheGlobals); + + for (iterator = cacheGlobals->mru ; iterator != nil ; iterator = iterator->nextMRU) { + + /* first make sure volume and folder id match */ + if ((iterator->volume != volume) || (iterator->folderID != folderID)) { + continue; + } + + /* ignore busy iterators */ + if ( CI_SLEEPLESS_LOCK(iterator) == EBUSY ) { + //PRINTIT(" GetCatalogIterator: busy v=%d, d=%ld, i=%d\n", volume, folderID, iterator->currentIndex); + continue; + } + + /* we matched volume, folder id, now check the offset */ + if ( iterator->currentOffset == offset || iterator->nextOffset == offset) { + bestIterator = iterator; // we scored! - so get out of this loop + break; // break with iterator locked + } + + (void) CI_UNLOCK(iterator); // unlock iterator before moving to the next one + } + + // check if we didn't get one or if the one we got is too far away... + if (bestIterator == NULL) + { + bestIterator = cacheGlobals->lru; // start over with a new iterator + + //PRINTIT(" GetCatalogIterator: recycle v=%d, d=%ld, i=%d\n", bestIterator->volume, bestIterator->folderID, bestIterator->currentIndex); + (void) CI_LOCK_FROM_LIST(cacheGlobals, bestIterator); // XXX we should not eat the error! + + CATALOG_ITER_LIST_LOCK(cacheGlobals); // grab the lock again for MRU Insert below... + + bestIterator->volume = volume; // update the iterator's volume + bestIterator->folderID = folderID; // ... and folderID + bestIterator->currentIndex = 0xFFFFFFFF; // ... and offspring index marker + bestIterator->currentOffset = 0xFFFFFFFF; + bestIterator->nextOffset = 0xFFFFFFFF; + + bestIterator->btreeNodeHint = 0; + bestIterator->btreeIndexHint = 0; + bestIterator->parentID = folderID; // set key to folderID + empty name + bestIterator->folderName.unicodeName.length = 0; // clear pascal/unicode name + + if ( volume->vcbSigWord == kHFSPlusSigWord ) + bestIterator->nameType = kShortUnicodeName; + else + bestIterator->nameType = kShortPascalName; + } + else { + //PRINTIT(" GetCatalogIterator: found v=%d, d=%ld, i=%d\n", bestIterator->volume, bestIterator->folderID, bestIterator->currentIndex); + } + + // put this iterator at the front of the list + InsertCatalogIteratorAsMRU( cacheGlobals, bestIterator ); + + CATALOG_ITER_LIST_UNLOCK(cacheGlobals); + + return bestIterator; // return our best shot + +} /* GetCatalogIterator */ + + +//_______________________________________________________________________________ +// Routine: UpdateBtreeIterator +// +// Function: Fills in a BTreeIterator from a CatalogIterator +// +// Assumes: catalogIterator->nameType is correctly initialized! +// catalogIterator is locked (MacOS X) +//_______________________________________________________________________________ +void +UpdateBtreeIterator(const CatalogIterator *catalogIterator, BTreeIterator *btreeIterator) +{ + CatalogName * nodeName; + Boolean isHFSPlus; + + + btreeIterator->hint.writeCount = 0; + btreeIterator->hint.nodeNum = catalogIterator->btreeNodeHint; + btreeIterator->hint.index = catalogIterator->btreeIndexHint; + + switch (catalogIterator->nameType) + { + case kShortPascalName: + if ( catalogIterator->folderName.pascalName[0] > 0 ) + nodeName = (CatalogName *) catalogIterator->folderName.pascalName; + else + nodeName = NULL; + + isHFSPlus = false; + break; + + case kShortUnicodeName: + if ( catalogIterator->folderName.unicodeName.length > 0 ) + nodeName = (CatalogName *) &catalogIterator->folderName.unicodeName; + else + nodeName = NULL; + + isHFSPlus = true; + break; + + case kLongUnicodeName: + if ( catalogIterator->folderName.longNamePtr->length > 0 ) + nodeName = (CatalogName *) catalogIterator->folderName.longNamePtr; + else + nodeName = NULL; + + isHFSPlus = true; + break; + + default: + return; + } + + BuildCatalogKey(catalogIterator->parentID, nodeName, isHFSPlus, (CatalogKey*) &btreeIterator->key); +} + + +//_______________________________________________________________________________ +// Routine: UpdateCatalogIterator +// +// Function: Updates a CatalogIterator from a BTreeIterator +// +// Assumes: catalogIterator->nameType is correctly initialized! +// catalogIterator is locked (MacOS X) +//_______________________________________________________________________________ +void +UpdateCatalogIterator (const BTreeIterator *btreeIterator, CatalogIterator *catalogIterator) +{ + void * srcName; + void * dstName; + UInt16 nameSize; + CatalogKey * catalogKey; + + + catalogIterator->btreeNodeHint = btreeIterator->hint.nodeNum; + catalogIterator->btreeIndexHint = btreeIterator->hint.index; + + catalogKey = (CatalogKey*) &btreeIterator->key; + + switch (catalogIterator->nameType) + { + case kShortPascalName: + catalogIterator->parentID = catalogKey->hfs.parentID; + + dstName = catalogIterator->folderName.pascalName; + srcName = catalogKey->hfs.nodeName; + nameSize = catalogKey->hfs.nodeName[0] + sizeof(UInt8); + break; + + case kShortUnicodeName: + catalogIterator->parentID = catalogKey->hfsPlus.parentID; + + dstName = &catalogIterator->folderName.unicodeName; + srcName = &catalogKey->hfsPlus.nodeName; + nameSize = (catalogKey->hfsPlus.nodeName.length + 1) * sizeof(UInt16); + + // See if we need to make this iterator use long names + if ( nameSize > sizeof(catalogIterator->folderName.unicodeName) ) + { + PrepareForLongName(catalogIterator); // Find a long name buffer to use + dstName = catalogIterator->folderName.longNamePtr; + } + break; + + case kLongUnicodeName: + catalogIterator->parentID = catalogKey->hfsPlus.parentID; + + dstName = catalogIterator->folderName.longNamePtr; + srcName = &catalogKey->hfsPlus.nodeName; + nameSize = (catalogKey->hfsPlus.nodeName.length + 1) * sizeof(UInt16); + break; + + default: + return; + } + + if (catalogIterator->parentID != catalogIterator->folderID) + catalogIterator->nextOffset = 0xFFFFFFFF; + + BlockMoveData(srcName, dstName, nameSize); + +} // end UpdateCatalogIterator + + +//_______________________________________________________________________________ +// Routine: InsertCatalogIteratorAsMRU +// +// Function: Moves catalog iterator to head of mru order in double linked list +// +// Assumes list simple lock is held +//_______________________________________________________________________________ +static void +InsertCatalogIteratorAsMRU ( CatalogCacheGlobals *cacheGlobals, CatalogIterator *iterator ) +{ + CatalogIterator *swapIterator; + + if ( cacheGlobals->mru != iterator ) // if it's not already the mru iterator + { + swapIterator = cacheGlobals->mru; // put it in the front of the double queue + cacheGlobals->mru = iterator; + iterator->nextLRU->nextMRU = iterator->nextMRU; + if ( iterator->nextMRU != nil ) + iterator->nextMRU->nextLRU = iterator->nextLRU; + else + cacheGlobals->lru= iterator->nextLRU; + iterator->nextMRU = swapIterator; + iterator->nextLRU = nil; + swapIterator->nextLRU = iterator; + } +} + + +//________________________________________________________________________________ +// Routine: InsertCatalogIteratorAsLRU +// +// Function: Moves catalog iterator to head of lru order in double linked list +// +// Assumes list simple lock is held +//_______________________________________________________________________________ +static void +InsertCatalogIteratorAsLRU ( CatalogCacheGlobals *cacheGlobals, CatalogIterator *iterator ) +{ + CatalogIterator *swapIterator; + + if ( cacheGlobals->lru != iterator ) + { + swapIterator = cacheGlobals->lru; + cacheGlobals->lru = iterator; + iterator->nextMRU->nextLRU = iterator->nextLRU; + if ( iterator->nextLRU != nil ) + iterator->nextLRU->nextMRU = iterator->nextMRU; + else + cacheGlobals->mru= iterator->nextMRU; + iterator->nextLRU = swapIterator; + iterator->nextMRU = nil; + swapIterator->nextMRU = iterator; + } +} + + + +//_______________________________________________________________________________ +// Routine: PrepareForLongName +// +// Function: Takes a CatalogIterator whose nameType is kShortUnicodeName, and +// changes the nameType to kLongUnicodeName. +// +// Since long Unicode names aren't stored in the CatalogIterator itself, we have +// to point to an HFSUniStr255 for storage. In the current implementation, we have +// just one such global buffer in the cache globals. We'll set the iterator to +// point to the global buffer and invalidate the iterator that was using it +// (i.e. the iterator whose nameType is kLongUnicodeName). +// +// Eventually, we might want to have a list of long name buffers which we recycle +// using an LRU algorithm. Or perhaps, some other way.... +// +// Assumes: catalogIterator is locked (MacOS X) +//_______________________________________________________________________________ +static void +PrepareForLongName ( CatalogIterator *iterator ) +{ + CatalogCacheGlobals *cacheGlobals = GetCatalogCacheGlobals(); + CatalogIterator *iter; + + if (DEBUG_BUILD && iterator->nameType != kShortUnicodeName) + DebugStr("\p PrepareForLongName: nameType is wrong!"); + + // + // Walk through all the iterators. The first iterator whose nameType + // is kLongUnicodeName is invalidated (because it is using the global + // long name buffer). + // + + CATALOG_ITER_LIST_LOCK(cacheGlobals); + + for ( iter = cacheGlobals->mru ; iter != nil ; iter = iter->nextMRU ) + { + if (iter->nameType == kLongUnicodeName) + { + // if iterator is not already last then make it last + if ( iter->nextMRU != nil ) + InsertCatalogIteratorAsLRU( cacheGlobals, iter ); + + (void) CI_LOCK_FROM_LIST(cacheGlobals,iter); + iter->volume = 0; // trash it + iter->folderID = 0; + (void) CI_UNLOCK(iter); + + #if TARGET_API_MACOS_X + break; + #endif + } + } + + /* + * if iter is nil then none of the iterators was using the LongUnicodeName buffer + */ + if (iter == nil) + CATALOG_ITER_LIST_UNLOCK(cacheGlobals); + + // + // Change the nameType of this iterator and point to the global + // long name buffer. Note - this iterator is already locked + // + iterator->nameType = kLongUnicodeName; + iterator->folderName.longNamePtr = &cacheGlobals->longName; +} + diff --git a/bsd/hfs/hfscommon/Catalog/CatalogUtilities.c b/bsd/hfs/hfscommon/Catalog/CatalogUtilities.c new file mode 100644 index 000000000..15ff358a9 --- /dev/null +++ b/bsd/hfs/hfscommon/Catalog/CatalogUtilities.c @@ -0,0 +1,889 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: CatalogUtilities.c + + Contains: Private Catalog Manager support routines. + + Version: HFS Plus 1.0 + + Copyright: © 1997-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: xxx put technology here xxx + + Writers: + + (DSH) Deric Horn + (msd) Mark Day + (djb) Don Brady + + Change History (most recent first): + 1/8/99 djb Fixing LocateCatalogNodeByMangledName... + 1/7/99 djb In BuildCatalogKeyUTF8 check name length against NAME_MAX. + 12/7/98 djb Add ExtractTextEncoding routine to get text encodings. + 11/20/98 djb Add support for UTF-8 names. + 8/31/98 djb GetTimeLocal now takes an input. + 4/17/98 djb Add VCB locking. + 4/3/98 djb Removed last name conversion cache from LocateCatalogNodeWithRetry. + 4/2/98 djb InvalidateCatalogNodeCache and TrashCatalogNodeCache are not used in MacOS X. + 03/31/98 djb Sync up with final HFSVolumes.h header file. + + 1/29/98 DSH Add TrashCatalogNodeCache for TrashAllFSCaches API support. + 12/15/97 djb Radar #2202860, In LocateCatalogNodeByMangledName remap + cmParentNotFound error code to cmNotFound. + 12/10/97 DSH 2201501, Pin the leof and peof to multiple of allocation blocks + under 2 Gig. + 12/9/97 DSH 2201501, Pin returned leof values to 2^31-1 (SInt32), instead of + 2^32-1 + 11/26/97 djb Radar #2005688, 2005461 - need to handle kTextMalformedInputErr. + 11/25/97 djb Radar #2002357 (again) fix new bug introduced in . + 11/17/97 djb PrepareInputName routine now returns an error. + 10/19/97 msd Bug 1684586. GetCatInfo and SetCatInfo use only contentModDate. + 10/17/97 djb Add ConvertInputNameToUnicode for Catalog Create/Rename. + 10/14/97 djb Fix LocateCatalogNode's MakeFSSpec optimization (radar #1683166) + 10/13/97 djb Copy text encoding in CopyCatalogNodeData. Fix cut/paste error + in VolumeHasEncodings macro. When accessing encoding bitmap use + the MapEncodingToIndex and MapIndexToEncoding macros. + 10/1/97 djb Remove old Catalog Iterator code... + 9/8/97 msd Make sure a folder's modifyDate is set whenever its + contentModDate is set. + 9/4/97 djb Add MakeFSSpec optimization. + 9/4/97 msd In CatalogNodeData, change attributeModDate to modifyDate. + 8/26/97 djb Back out (UpdateFolderCount must maintain vcbNmFls for HFS + Plus volumes too). + 8/14/97 djb Remove hard link support. + 7/18/97 msd Include LowMemPriv.h. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 7/8/97 DSH Loading PrecompiledHeaders from define passed in on C line + 6/27/97 msd UpdateFolderCount should update number of root files/folders for + HFS volumes, not HFS Plus. + 6/24/97 djb LocateCatalogNodeWithRetry did not always set result code. + 6/24/97 djb Add LocateCatalogNodeByMangledName routine + 6/24/97 djb first checked in +*/ +#include +#include + +#include "../headers/FileMgrInternal.h" +#include "../headers/BTreesInternal.h" +#include "../headers/CatalogPrivate.h" +#include "../headers/HFSUnicodeWrappers.h" +#include + +static void ExtractTextEncoding (ItemCount length, ConstUniCharArrayPtr string, UInt32 * textEncoding); + +//******************************************************************************* +// Routine: LocateCatalogNode +// +// Function: Locates the catalog record for an existing folder or file +// CNode and returns pointers to the key and data records. +// +//******************************************************************************* + +OSErr +LocateCatalogNode(const ExtendedVCB *volume, HFSCatalogNodeID folderID, const CatalogName *name, + UInt32 hint, CatalogKey *keyPtr, CatalogRecord *dataPtr, UInt32 *newHint) +{ + OSErr result; + CatalogName *nodeName = NULL; /* To ward off uninitialized use warnings from compiler */ + HFSCatalogNodeID threadParentID; + + + result = LocateCatalogRecord(volume, folderID, name, hint, keyPtr, dataPtr, newHint); + ReturnIfError(result); + + // if we got a thread record, then go look up real record + switch ( dataPtr->recordType ) + { + case kHFSFileThreadRecord: + case kHFSFolderThreadRecord: + threadParentID = dataPtr->hfsThread.parentID; + nodeName = (CatalogName *) &dataPtr->hfsThread.nodeName; + break; + + case kHFSPlusFileThreadRecord: + case kHFSPlusFolderThreadRecord: + threadParentID = dataPtr->hfsPlusThread.parentID; + nodeName = (CatalogName *) &dataPtr->hfsPlusThread.nodeName; + break; + + default: + threadParentID = 0; + break; + } + + if ( threadParentID ) // found a thread + result = LocateCatalogRecord(volume, threadParentID, nodeName, kNoHint, keyPtr, dataPtr, newHint); + + return result; +} + +// +// Routine: LocateCatalogNodeByKey +// +// Function: Locates the catalog record for an existing folder or file +// CNode and returns the key and data records. +// + +OSErr +LocateCatalogNodeByKey(const ExtendedVCB *volume, UInt32 hint, CatalogKey *keyPtr, + CatalogRecord *dataPtr, UInt32 *newHint) +{ + OSErr result; + CatalogName *nodeName = NULL; /* To ward off uninitialized use warnings from compiler */ + HFSCatalogNodeID threadParentID; + UInt16 tempSize; + + + result = SearchBTreeRecord(volume->catalogRefNum, keyPtr, hint, keyPtr, + dataPtr, &tempSize, newHint); + if (result == btNotFound) + result = cmNotFound; + ReturnIfError(result); + + // if we got a thread record, then go look up real record + switch ( dataPtr->recordType ) + { + case kHFSFileThreadRecord: + case kHFSFolderThreadRecord: + threadParentID = dataPtr->hfsThread.parentID; + nodeName = (CatalogName *) &dataPtr->hfsThread.nodeName; + break; + + case kHFSPlusFileThreadRecord: + case kHFSPlusFolderThreadRecord: + threadParentID = dataPtr->hfsPlusThread.parentID; + nodeName = (CatalogName *) &dataPtr->hfsPlusThread.nodeName; + break; + + default: + threadParentID = 0; + break; + } + + if ( threadParentID ) // found a thread + result = LocateCatalogRecord(volume, threadParentID, nodeName, kNoHint, keyPtr, dataPtr, newHint); + + return result; +} + + +#if 0 +//******************************************************************************* +// Routine: LocateCatalogNodeWithRetry +// +// Function: Locates the catalog record for an existing folder or file node. +// For HFS Plus volumes a retry is performed when a catalog node is +// not found and the volume contains more than one text encoding. +// +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +#define VolumeHasEncodings(v) \ + ( ((v)->encodingsBitmap != 0 ) + +#define EncodingInstalled(i) \ + ( (fsVars)->gConversionContext[(i)].toUnicode != 0 ) + +#define EncodingUsedByVolume(v,i) \ + ( ((v)->encodingsBitmap & (1 << (i))) ) + + +OSErr +LocateCatalogNodeWithRetry (const ExtendedVCB *volume, HFSCatalogNodeID folderID, ConstStr31Param pascalName, CatalogName *unicodeName, + UInt32 hint, CatalogKey *keyPtr, CatalogRecord *dataPtr, UInt32 *newHint) +{ + TextEncoding defaultEncoding; + TextEncoding encoding; + ItemCount encodingsToTry; + FSVarsRec *fsVars; + OSErr result = cmNotFound; + + fsVars = (FSVarsRec*) LMGetFSMVars(); // used by macros + + defaultEncoding = GetDefaultTextEncoding(); + encodingsToTry = CountInstalledEncodings(); + + // 1. Try finding file using default encoding (typical case) + + { + --encodingsToTry; + result = PrepareInputName(pascalName, true, defaultEncoding, unicodeName); + if (result == noErr) + result = LocateCatalogNode(volume, folderID, unicodeName, hint, keyPtr, dataPtr, newHint); + else + result = cmNotFound; + + if ( result != cmNotFound || encodingsToTry == 0) + return result; + } + + // + // XXX if the pascal string contains all 7-bit ascii then we don't need to do anymore retries + // + + // 2. Try finding file using Mac Roman (if not already tried above) + + if ( defaultEncoding != kTextEncodingMacRoman ) + { + --encodingsToTry; + result = PrepareInputName(pascalName, true, kTextEncodingMacRoman, unicodeName); + if (result == noErr) + result = LocateCatalogNode(volume, folderID, unicodeName, hint, keyPtr, dataPtr, newHint); + else + result = cmNotFound; + + if ( result != cmNotFound || encodingsToTry == 0 ) + return result; + } + + // 3. Try with encodings from disk (if any) + + if ( VolumeHasEncodings(volume) ) // any left to try? + { + UInt32 index; + + index = 0; // since we pre increment this will skip MacRoman (which was already tried above) + + while ( index < kMacBaseEncodingCount ) + { + ++index; + + encoding = MapIndexToEncoding(index); + + if ( encoding == defaultEncoding ) + continue; // we did this one already + + if ( EncodingInstalled(index) && EncodingUsedByVolume(volume, index) ) + { + --encodingsToTry; + result = PrepareInputName(pascalName, true, encoding, unicodeName); + if (result == noErr) + result = LocateCatalogNode(volume, folderID, unicodeName, hint, keyPtr, dataPtr, newHint); + else + result = cmNotFound; + + if ( result != cmNotFound || encodingsToTry == 0 ) + return result; + } + } + } + + // 4. Try any remaining encodings (if any) + + { + UInt32 index; + + index = 0; // since we pre increment this will skip MacRoman (which was already tried above) + + while ( (encodingsToTry > 0) && (index < kMacBaseEncodingCount) ) + { + ++index; + + encoding = MapIndexToEncoding(index); + + if ( encoding == defaultEncoding ) + continue; // we did this one already + + if ( EncodingInstalled(index) && EncodingUsedByVolume(volume, index) == false ) + { + --encodingsToTry; + result = PrepareInputName(pascalName, true, encoding, unicodeName); + if (result == noErr) + result = LocateCatalogNode(volume, folderID, unicodeName, hint, keyPtr, dataPtr, newHint); + else + result = cmNotFound; + + if ( result != cmNotFound || encodingsToTry == 0 ) + return result; + } + } + } + + return cmNotFound; +} +#endif + +//******************************************************************************* +// Routine: LocateCatalogNodeByMangledName +// +// Function: Locates the catalog record associated with a mangled name (if any) +// +//******************************************************************************* +#define kMaxCompareLen 64 /* If it compares this far...lets believe it */ + +OSErr +LocateCatalogNodeByMangledName( const ExtendedVCB *volume, HFSCatalogNodeID folderID, + const unsigned char * name, UInt32 length, CatalogKey *keyPtr, + CatalogRecord *dataPtr, UInt32 *hintPtr ) +{ + HFSCatalogNodeID fileID; + unsigned char nodeName[kMaxCompareLen+1]; + OSErr result; + size_t actualDstLen; + ByteCount prefixlen; + + + if (name == NULL || name[0] == '\0') + return cmNotFound; + + fileID = GetEmbeddedFileID(name, length, &prefixlen); + + if ( fileID < kHFSFirstUserCatalogNodeID ) + return cmNotFound; + + result = LocateCatalogNode(volume, fileID, NULL, kNoHint, keyPtr, dataPtr, hintPtr); + if ( result == cmParentNotFound ) // GetCatalogNode already handled cmParentNotFound case + result = cmNotFound; // so remap + ReturnIfError(result); + + // first make sure that the parents match + if ( folderID != keyPtr->hfsPlus.parentID ) + return cmNotFound; // not the same folder so this is a false match + + (void) utf8_encodestr(keyPtr->hfsPlus.nodeName.unicode, + keyPtr->hfsPlus.nodeName.length * sizeof (UniChar), + nodeName, &actualDstLen, kMaxCompareLen+1, ':', 0); + + prefixlen = min(prefixlen, kMaxCompareLen); + + if ((prefixlen - actualDstLen) < 6) + prefixlen = actualDstLen; /* To take into account UTF8 rounding */ + + if ( (actualDstLen < prefixlen) || bcmp(nodeName, name, prefixlen-6) != 0) + return cmNotFound; // mangled names didn't match so this is a false match + + return noErr; // we found it +} + + +//******************************************************************************* +// Routine: LocateCatalogRecord +// +// Function: Locates the catalog record associated with folderID and name +// +//******************************************************************************* + +OSErr +LocateCatalogRecord(const ExtendedVCB *volume, HFSCatalogNodeID folderID, const CatalogName *name, + UInt32 hint, CatalogKey *keyPtr, CatalogRecord *dataPtr, UInt32 *newHint) +{ + OSErr result; + CatalogKey tempKey; // 518 bytes + UInt16 tempSize; + + BuildCatalogKey(folderID, name, (volume->vcbSigWord == kHFSPlusSigWord), &tempKey); + + if ( name == NULL ) + hint = kNoHint; // no CName given so clear the hint + + result = SearchBTreeRecord(volume->catalogRefNum, &tempKey, hint, keyPtr, dataPtr, &tempSize, newHint); + + return (result == btNotFound ? cmNotFound : result); +} + + +//******************************************************************************* +// Routine: LocateCatalogThread +// +// Function: Locates a catalog thread record in the catalog BTree file and +// returns a pointer to the data record. +// +//******************************************************************************* + +OSErr +LocateCatalogThread(const ExtendedVCB *volume, HFSCatalogNodeID nodeID, CatalogRecord *threadData, UInt16 *threadSize, UInt32 *threadHint) +{ + CatalogKey threadKey; // 518 bytes + OSErr result; + + //--- build key record + + BuildCatalogKey(nodeID, NULL, (volume->vcbSigWord == kHFSPlusSigWord), &threadKey); + + //--- locate thread record in BTree + + result = SearchBTreeRecord( volume->catalogRefNum, &threadKey, kNoHint, &threadKey, + threadData, threadSize, threadHint); + + return (result == btNotFound ? cmNotFound : result); +} + + +/* + * Routine: BuildCatalogKey + * + * Function: Constructs a catalog key record (ckr) given the parent + * folder ID and CName. Works for both classic and extended + * HFS volumes. + * + */ + +void +BuildCatalogKey(HFSCatalogNodeID parentID, const CatalogName *cName, Boolean isHFSPlus, CatalogKey *key) +{ + if ( isHFSPlus ) + { + key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength; // initial key length (4 + 2) + key->hfsPlus.parentID = parentID; // set parent ID + key->hfsPlus.nodeName.length = 0; // null CName length + if ( cName != NULL ) + { + CopyCatalogName(cName, (CatalogName *) &key->hfsPlus.nodeName, isHFSPlus); + key->hfsPlus.keyLength += sizeof(UniChar) * cName->ustr.length; // add CName size to key length + } + } + else + { + key->hfs.keyLength = kHFSCatalogKeyMinimumLength; // initial key length (1 + 4 + 1) + key->hfs.reserved = 0; // clear unused byte + key->hfs.parentID = parentID; // set parent ID + key->hfs.nodeName[0] = 0; // null CName length + if ( cName != NULL ) + { + UpdateCatalogName(cName->pstr, key->hfs.nodeName); + key->hfs.keyLength += key->hfs.nodeName[0]; // add CName size to key length + } + } +} + +/* + * for HFS, only MacRoman is supported. If a non-MacRoman character is found, an error is returned + */ +OSErr +BuildCatalogKeyUTF8(ExtendedVCB *volume, HFSCatalogNodeID parentID, const char *name, UInt32 nameLength, + CatalogKey *key, UInt32 *textEncoding) +{ + OSErr err = 0; + + if ( name == NULL) + nameLength = 0; + else if (nameLength == kUndefinedStrLen) + nameLength = strlen(name); + + if ( volume->vcbSigWord == kHFSPlusSigWord ) { + size_t unicodeBytes = 0; + + key->hfsPlus.keyLength = kHFSPlusCatalogKeyMinimumLength; // initial key length (4 + 2) + key->hfsPlus.parentID = parentID; // set parent ID + key->hfsPlus.nodeName.length = 0; // null CName length + if ( nameLength > 0 ) { + err = utf8_decodestr(name, nameLength, key->hfsPlus.nodeName.unicode, + &unicodeBytes, sizeof(key->hfsPlus.nodeName.unicode), ':', UTF_DECOMPOSED); + key->hfsPlus.nodeName.length = unicodeBytes / sizeof(UniChar); + key->hfsPlus.keyLength += unicodeBytes; + } + + if (textEncoding) + ExtractTextEncoding(key->hfsPlus.nodeName.length, key->hfsPlus.nodeName.unicode, textEncoding); + } + else { + key->hfs.keyLength = kHFSCatalogKeyMinimumLength; // initial key length (1 + 4 + 1) + key->hfs.reserved = 0; // clear unused byte + key->hfs.parentID = parentID; // set parent ID + key->hfs.nodeName[0] = 0; // null CName length + if ( nameLength > 0 ) { + err = utf8_to_hfs(volume, nameLength, name, &key->hfs.nodeName[0]); + /* + * Retry with MacRoman in case that's how it was exported. + * When textEncoding != NULL we know that this is a create + * or rename call and can skip the retry (ugly but it works). + */ + if (err && (textEncoding == NULL)) + err = utf8_to_mac_roman(nameLength, name, &key->hfs.nodeName[0]); + key->hfs.keyLength += key->hfs.nodeName[0]; // add CName size to key length + } + if (textEncoding) + *textEncoding = 0; + } + + if (err) { + if (err == ENAMETOOLONG) + err = bdNamErr; /* name is too long */ + else + err = paramErr; /* name has invalid characters */ + } + + return err; +} + + +/* + * make a guess at the text encoding value that coresponds to the Unicode characters + */ +static void +ExtractTextEncoding(ItemCount length, ConstUniCharArrayPtr string, UInt32 * textEncoding) +{ + int i; + UniChar ch; + + *textEncoding = 0; + + for (i = 0; i < length; ++i) { + ch = string[i]; + /* CJK codepoints are 0x3000 thru 0x9FFF */ + if (ch >= 0x3000) { + if (ch < 0xa000) { + *textEncoding = kTextEncodingMacJapanese; + break; + } + + /* fullwidth character codepoints are 0xFF00 thru 0xFFEF */ + if (ch >= 0xff00 && ch <= 0xffef) { + *textEncoding = kTextEncodingMacJapanese; + break; + } + } + } +} + + +//******************************************************************************* +// Routine: FlushCatalog +// +// Function: Flushes the catalog for a specified volume. +// +//******************************************************************************* + +OSErr +FlushCatalog(ExtendedVCB *volume) +{ + FCB * fcb; + OSErr result; + + fcb = GetFileControlBlock(volume->catalogRefNum); + result = BTFlushPath(fcb); + + if (result == noErr) + { + //--- check if catalog's fcb is dirty... + + if ( fcb->fcbFlags & fcbModifiedMask ) + { + VCB_LOCK(volume); + volume->vcbFlags |= 0xFF00; // Mark the VCB dirty + volume->vcbLsMod = GetTimeUTC(); // update last modified date + VCB_UNLOCK(volume); + + result = FlushVolumeControlBlock(volume); + } + } + + return result; +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: UpdateCatalogName +// +// Function: Updates a CName. +// +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ + +void +UpdateCatalogName(ConstStr31Param srcName, Str31 destName) +{ + Size length = srcName[0]; + + if (length > CMMaxCName) + length = CMMaxCName; // truncate to max + + destName[0] = length; // set length byte + + BlockMoveData(&srcName[1], &destName[1], length); +} + + +//******************************************************************************* +// Routine: AdjustVolumeCounts +// +// Function: Adjusts the folder and file counts in the VCB +// +//******************************************************************************* + +void +AdjustVolumeCounts(ExtendedVCB *volume, SInt16 type, SInt16 delta) +{ + //€€ also update extended VCB fields... + + VCB_LOCK(volume); + + if (type == kHFSFolderRecord || type == kHFSPlusFolderRecord) + volume->vcbDirCnt += delta; // adjust volume folder count, €€ worry about overflow? + else + volume->vcbFilCnt += delta; // adjust volume file count + + volume->vcbFlags |= 0xFF00; // Mark the VCB dirty + volume->vcbLsMod = GetTimeUTC(); // update last modified date + + VCB_UNLOCK(volume); +} + + +//******************************************************************************* + +void +UpdateVolumeEncodings(ExtendedVCB *volume, TextEncoding encoding) +{ + UInt32 index; + + encoding &= 0x7F; + + index = MapEncodingToIndex(encoding); + + VCB_LOCK(volume); + + volume->encodingsBitmap |= (1 << index); + + VCB_UNLOCK(volume); + + // vcb should already be marked dirty +} + + +//******************************************************************************* + +OSErr +UpdateFolderCount( ExtendedVCB *volume, HFSCatalogNodeID parentID, const CatalogName *name, SInt16 newType, + UInt32 hint, SInt16 valenceDelta) +{ + CatalogKey tempKey; // 518 bytes + CatalogRecord tempData; // 520 bytes + UInt32 tempHint; + HFSCatalogNodeID folderID; + UInt16 recordSize; + OSErr result; + +#if 0 + result = SearchBTreeRecord(volume->catalogRefNum, parentKey, hint, + &tempKey, &tempData, &recordSize, &tempHint); + if (result) + return (result == btNotFound ? cmNotFound : result); +#else + + result = LocateCatalogNode(volume, parentID, name, hint, &tempKey, &tempData, &tempHint); + ReturnIfError(result); +#endif + + if ( volume->vcbSigWord == kHFSPlusSigWord ) // HFS Plus + { + UInt32 timeStamp; + + if ( DEBUG_BUILD && tempData.recordType != kHFSPlusFolderRecord ) + DebugStr("\p UpdateFolder: found HFS folder on HFS+ volume!"); + + timeStamp = GetTimeUTC(); + /* adjust valence, but don't go negative */ + if (valenceDelta > 0) + tempData.hfsPlusFolder.valence += valenceDelta; + else if (tempData.hfsPlusFolder.valence != 0) + tempData.hfsPlusFolder.valence += valenceDelta; + else + volume->vcbFlags |= kHFS_DamagedVolume; + tempData.hfsPlusFolder.contentModDate = timeStamp; // set date/time last modified + folderID = tempData.hfsPlusFolder.folderID; + recordSize = sizeof(tempData.hfsPlusFolder); + } + else // classic HFS + { + if ( DEBUG_BUILD && tempData.recordType != kHFSFolderRecord ) + DebugStr("\p UpdateFolder: found HFS+ folder on HFS volume!"); + + /* adjust valence, but don't go negative */ + if (valenceDelta > 0) + tempData.hfsFolder.valence += valenceDelta; + else if (tempData.hfsFolder.valence != 0) + tempData.hfsFolder.valence += valenceDelta; + else + volume->vcbFlags |= kHFS_DamagedVolume; + tempData.hfsFolder.modifyDate = GetTimeLocal(true); // set date/time last modified + folderID = tempData.hfsFolder.folderID; + recordSize = sizeof(tempData.hfsFolder); + } + + result = ReplaceBTreeRecord(volume->catalogRefNum, &tempKey, tempHint, + &tempData, recordSize, &tempHint); + ReturnIfError(result); + + if ( folderID == kHFSRootFolderID ) + { + if (newType == kHFSFolderRecord || newType == kHFSPlusFolderRecord) + { + VCB_LOCK(volume); + volume->vcbNmRtDirs += valenceDelta; // adjust root folder count (undefined for HFS Plus) + VCB_UNLOCK(volume); + } + else + { + VCB_LOCK(volume); + volume->vcbNmFls += valenceDelta; // adjust root file count (used by GetVolInfo) + VCB_UNLOCK(volume); + } + } + + //XXX also update extended VCB fields... + + return result; +} + + +//******************************************************************************* + +UInt16 +GetCatalogRecordSize(const CatalogRecord *dataRecord) +{ + switch (dataRecord->recordType) + { + case kHFSFileRecord: + return sizeof(HFSCatalogFile); + + case kHFSFolderRecord: + return sizeof(HFSCatalogFolder); + + case kHFSPlusFileRecord: + return sizeof(HFSPlusCatalogFile); + + case kHFSPlusFolderRecord: + return sizeof(HFSPlusCatalogFolder); + + case kHFSFolderThreadRecord: + case kHFSFileThreadRecord: + return sizeof(HFSCatalogThread); + + case kHFSPlusFolderThreadRecord: + case kHFSPlusFileThreadRecord: + return sizeof(HFSPlusCatalogThread); + + default: + return 0; + } +} + + +//******************************************************************************* + +void +CopyCatalogNodeData(const ExtendedVCB *volume, const CatalogRecord *dataPtr, CatalogNodeData *nodeData) +{ + /* convert classic hfs records to hfs plus format */ + + if (dataPtr->recordType == kHFSFolderRecord) { + nodeData->cnd_type = kCatalogFolderNode; + nodeData->cnd_flags = dataPtr->hfsFolder.flags; + nodeData->cnd_nodeID = dataPtr->hfsFolder.folderID; + nodeData->cnd_createDate = LocalToUTC(dataPtr->hfsFolder.createDate); + nodeData->cnd_contentModDate = LocalToUTC(dataPtr->hfsFolder.modifyDate); + nodeData->cnd_backupDate = LocalToUTC(dataPtr->hfsFolder.backupDate); + nodeData->cnd_valence = dataPtr->hfsFolder.valence; + + BlockMoveData(&dataPtr->hfsFolder.userInfo, &nodeData->cnd_finderInfo, 32); + } else if (dataPtr->recordType == kHFSFileRecord) { + UInt32 i; + + nodeData->cnd_type = kCatalogFileNode; + nodeData->cnd_flags = dataPtr->hfsFile.flags; + nodeData->cnd_nodeID = dataPtr->hfsFile.fileID; + nodeData->cnd_createDate = LocalToUTC(dataPtr->hfsFile.createDate); + nodeData->cnd_contentModDate = LocalToUTC(dataPtr->hfsFile.modifyDate); + nodeData->cnd_backupDate = LocalToUTC(dataPtr->hfsFile.backupDate); + nodeData->cnd_linkCount = 0; + + BlockMoveData(&dataPtr->hfsFile.userInfo, &nodeData->cnd_finderInfo, 16); + BlockMoveData(&dataPtr->hfsFile.finderInfo, (void*)((UInt32)&nodeData->cnd_finderInfo + 16), 16); + + nodeData->cnd_datafork.logicalSize = dataPtr->hfsFile.dataLogicalSize; + nodeData->cnd_datafork.totalBlocks = + dataPtr->hfsFile.dataPhysicalSize / volume->blockSize; + + nodeData->cnd_rsrcfork.logicalSize = dataPtr->hfsFile.rsrcLogicalSize; + nodeData->cnd_rsrcfork.totalBlocks = + dataPtr->hfsFile.rsrcPhysicalSize / volume->blockSize; + + for (i = 0; i < kHFSExtentDensity; ++i) { + nodeData->cnd_datafork.extents[i].startBlock = + (UInt32) (dataPtr->hfsFile.dataExtents[i].startBlock); + + nodeData->cnd_datafork.extents[i].blockCount = + (UInt32) (dataPtr->hfsFile.dataExtents[i].blockCount); + + nodeData->cnd_rsrcfork.extents[i].startBlock = + (UInt32) (dataPtr->hfsFile.rsrcExtents[i].startBlock); + + nodeData->cnd_rsrcfork.extents[i].blockCount = + (UInt32) (dataPtr->hfsFile.rsrcExtents[i].blockCount); + } + for (i = kHFSExtentDensity; i < kHFSPlusExtentDensity; ++i) { + nodeData->cnd_datafork.extents[i].startBlock = 0; + nodeData->cnd_datafork.extents[i].blockCount = 0; + nodeData->cnd_rsrcfork.extents[i].startBlock = 0; + nodeData->cnd_rsrcfork.extents[i].blockCount = 0; + } + } else { + nodeData->cnd_type = 0; + } +} + + +//_______________________________________________________________________ + +void +CopyCatalogName(const CatalogName *srcName, CatalogName *dstName, Boolean isHFSPLus) +{ + UInt32 length; + + if ( srcName == NULL ) + { + if ( dstName != NULL ) + dstName->ustr.length = 0; // set length byte to zero (works for both unicode and pascal) + return; + } + + if (isHFSPLus) + length = sizeof(UniChar) * (srcName->ustr.length + 1); + else + length = sizeof(UInt8) + srcName->pstr[0]; + + if ( length > 1 ) + BlockMoveData(srcName, dstName, length); + else + dstName->ustr.length = 0; // set length byte to zero (works for both unicode and pascal) +} + +//_______________________________________________________________________ + +UInt32 +CatalogNameLength(const CatalogName *name, Boolean isHFSPlus) +{ + if (isHFSPlus) + return name->ustr.length; + else + return name->pstr[0]; +} + + + diff --git a/bsd/hfs/hfscommon/Catalog/FileIDsServices.c b/bsd/hfs/hfscommon/Catalog/FileIDsServices.c new file mode 100644 index 000000000..4e7a2d0f9 --- /dev/null +++ b/bsd/hfs/hfscommon/Catalog/FileIDsServices.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: FileIDServices.c + + Contains: File ID manipulating routines. + + Version: HFS Plus 1.0 + + Written by: Deric Horn + + Copyright: © 1996-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Deric Horn + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (JL) Jim Luther + (msd) Mark Day + (djb) Don Brady + (DSH) Deric Horn + + Change History (most recent first): + 3/2/98 djb Fix extents corruption bug in MoveExtents (radar #2309434). + 11/20/98 djb Add support for UTF-8 names. + 4/2/98 djb Switch over to real BTree interface in MoveExtents and DeleteExtents. + 3/31/98 djb Sync up with final HFSVolumes.h header file. + + 11/17/97 djb PrepareInputName routine now returns an error. + 11/13/97 djb Radar #2001699 ResolveFileID needs to use CMNotFound error. + 10/31/97 JL #2000184 - CreateFileThreadID and ExchangeFiles now return the + WDCBRecPtr or NULL for external file systems. ExchangeFiles no + longer returns length of FCB table to caller since that wasn't + ever needed. + <18> 10/23/97 DSH 1685058, Fix ExchangeFiles by invalidating the node cache before + switching the files. + 10/19/97 msd Bug 1684586. GetCatInfo and SetCatInfo use only contentModDate. + 10/16/97 DSH Return badFidErr in ResolveFileID if LocateCatalogThread fails + 10/15/97 DSH CreateFileThreadID(), remap btExists to fidExists. + 9/7/97 djb Turn off some DebugStr calls. + 9/4/97 msd Remove call to PropertyExchangeObjects. + 8/14/97 djb Remove hard link support. + 7/18/97 msd Include LowMemPriv.h. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 7/8/97 DSH Loading PrecompiledHeaders from define passed in on C line + 6/24/97 djb Add hard link support to ResolveFileID and CreateFileIDRef. + 6/20/97 msd Use contentModDate and attributeModDate fields instead of + modifyDate. + 6/13/97 djb Switch over from PrepareOutputName to ConvertUnicodeToHFSName. + PrepareInputName now takes an encoding. + 5/28/97 msd Move the declaration of FindFileName to FilesInternal.i. + 5/19/97 djb No longer need to invalidate vcbDirIDM field. + 5/16/97 msd In ExchangeFiles, change srcNamePtr from char * to StringPtr + (fixes warnings). + 4/28/97 djb (DSH) Added VolumeWritable check back into CreateFileIDThread. + 4/24/97 djb first checked in + 4/11/97 DSH Use extended VCB fields catalogRefNum, and extentsRefNum. + 4/9/97 msd Rewrite CreateFileThreadID so that it properly handles + pathnames, and doesn't overwrite the ioNamePtr. The data field + of FindFileNameGlueRec points to a CatalogNodeData, not + CatalogRecord. + 4/4/97 djb Get in sync with volume format changes. + 3/31/97 djb Change ClearMem to ClearMemory. + 3/17/97 DSH C_FlushCache prototype to FilesInternal.h + 3/5/97 msd ExchangeFiles needs to call PropertyExchangeObjects. + 2/13/97 msd Fix MoveExtents and DeleteExtents to work with HFS+ extent + records. + 1/31/97 msd In MoveExtents, when a record isn't found and you want the next + record in order, use the "next record" offset = 1 instead of + "current record" offset = 0. DeleteExtents would always exit + without doing anything because it was searching for an invalid + key. Removed several DebugStrs that were used as cheap code + coverage. + 1/15/97 DSH Resolve wasn't passing the name back for HFS + 1/13/97 djb LocateCatalogThread now passes back the thread record size. + 1/11/97 DSH HFS+, fixed some Unicode/Pascal strings related bugs for use on + HFS+ volumes. + 1/9/97 DSH Fix ExchangeFiles extents + 1/6/97 DSH pass VCB in CloseFile() routine. + 1/6/97 djb Fixed ResolveFileID - it was not returning a directory ID! + 1/3/97 msd Fix prototype for C_FlushCache. Fix prototype for + TrashFileBlocks. + 1/3/97 djb Integrate latest HFSVolumesPriv.h changes. + 1/2/97 DSH C port of ExchangeFileIDs + 12/20/96 djb Fixed bug in CreateFileID. + 12/19/96 DSH All refs to VCB are now refs to ExtendedVCB + 12/19/96 msd Use kFileThreadExistsMask (from HFSVolumesPriv.h) instead of + kFileThreadMask (from FilesInternal.h) since the latter was + incorrectly defined and has now been removed. + 12/19/96 djb Updated for new B-tree Manager interface. + 12/18/96 msd GetFileThreadID was using a bitwise-OR (|) instead of + bitwise-AND (&) to test for a bit being set. + 12/12/96 DSH first checked in + +*/ + +#include "../../hfs_macos_defs.h" +#include "../../hfs_format.h" + +#include "../headers/FileMgrInternal.h" +#include "../headers/HFSUnicodeWrappers.h" +#include "../headers/CatalogPrivate.h" + + +struct ExtentsRecBuffer { + ExtentKey extentKey; + ExtentRecord extentData; +}; +typedef struct ExtentsRecBuffer ExtentsRecBuffer; + + +OSErr CreateFileID( ExtendedVCB *vcb, HFSCatalogNodeID fileID, CatalogName *name, HFSCatalogNodeID *threadID ); +OSErr GetFileThreadID( ExtendedVCB *vcb, HFSCatalogNodeID id, const CatalogName *name, Boolean isHFSPlus, UInt32 *threadID ); + +UInt32 CheckExtents( void *extents, UInt32 blocks, Boolean isHFSPlus ); +OSErr DeleteExtents( ExtendedVCB *vcb, UInt32 fileNumber, Boolean isHFSPlus ); +OSErr MoveExtents( ExtendedVCB *vcb, UInt32 srcFileID, UInt32 destFileID, Boolean isHFSPlus ); +void CopyCatalogNodeInfo( CatalogRecord *src, CatalogRecord *dest ); +void CopyBigCatalogNodeInfo( CatalogRecord *src, CatalogRecord *dest ); + +void CopyExtentInfo( ExtentKey *key, ExtentRecord *data, ExtentsRecBuffer *buffer, UInt16 bufferCount ); +extern void TrashFileBlocks( ExtendedVCB *vcb, UInt32 fileNumber ); + + + +OSErr ExchangeFileIDs( ExtendedVCB *vcb, ConstUTF8Param srcName, ConstUTF8Param destName, HFSCatalogNodeID srcID, HFSCatalogNodeID destID, UInt32 srcHint, UInt32 destHint ) +{ + CatalogKey srcKey; // 518 bytes + CatalogRecord srcData; // 520 bytes + CatalogKey destKey; // 518 bytes + CatalogRecord destData; // 520 bytes + CatalogRecord swapData; // 520 bytes + SInt16 numSrcExtentBlocks; + SInt16 numDestExtentBlocks; + UInt32 textEncoding; + OSErr err; + Boolean isHFSPlus = ( vcb->vcbSigWord == kHFSPlusSigWord ); + + TrashCatalogIterator(vcb, srcID); // invalidate any iterators for this parentID + TrashCatalogIterator(vcb, destID); // invalidate any iterators for this parentID + + err = BuildCatalogKeyUTF8(vcb, srcID, srcName, kUndefinedStrLen, &srcKey, &textEncoding); + ReturnIfError(err); + + err = BuildCatalogKeyUTF8(vcb, destID, destName, kUndefinedStrLen, &destKey, &textEncoding); + ReturnIfError(err); + + if ( isHFSPlus ) + { + //-- Step 1: Check the catalog nodes for extents + + //-- locate the source file, test for extents in extent file, and copy the cat record for later + err = LocateCatalogNodeByKey( vcb, srcHint, &srcKey, &srcData, &srcHint ); + ReturnIfError( err ); + + if ( srcData.recordType != kHFSPlusFileRecord ) + return( cmFThdDirErr ); // Error "cmFThdDirErr = it is a directory" + + //-- Check if there are any extents in the source file + //€€ I am only checling the extents in the low 32 bits, routine will fail if files extents after 2 gig are in overflow + numSrcExtentBlocks = CheckExtents( srcData.hfsPlusFile.dataFork.extents, srcData.hfsPlusFile.dataFork.totalBlocks, isHFSPlus ); + if ( numSrcExtentBlocks == 0 ) // then check the resource fork extents + numSrcExtentBlocks = CheckExtents( srcData.hfsPlusFile.resourceFork.extents, srcData.hfsPlusFile.resourceFork.totalBlocks, isHFSPlus ); + + //-- Check if there are any extents in the destination file + err = LocateCatalogNodeByKey( vcb, destHint, &destKey, &destData, &destHint ); + ReturnIfError( err ); + + if ( destData.recordType != kHFSPlusFileRecord ) + return( cmFThdDirErr ); // Error "cmFThdDirErr = it is a directory" + + numDestExtentBlocks = CheckExtents( destData.hfsPlusFile.dataFork.extents, destData.hfsPlusFile.dataFork.totalBlocks, isHFSPlus ); + if ( numDestExtentBlocks == 0 ) // then check the resource fork extents + numDestExtentBlocks = CheckExtents( destData.hfsPlusFile.resourceFork.extents, destData.hfsPlusFile.resourceFork.totalBlocks, isHFSPlus ); + + //-- Step 2: Exchange the Extent key in the extent file + + //-- Exchange the extents key in the extent file + err = DeleteExtents( vcb, kHFSBogusExtentFileID, isHFSPlus ); + ReturnIfError( err ); + + if ( numSrcExtentBlocks && numDestExtentBlocks ) // if both files have extents + { + //-- Change the source extents file ids to our known bogus value + err = MoveExtents( vcb, srcData.hfsPlusFile.fileID, kHFSBogusExtentFileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + else + goto ExUndo1a; + } + + //-- Change the destination extents file id's to the source id's + err = MoveExtents( vcb, destData.hfsPlusFile.fileID, srcData.hfsPlusFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + +ExUndo2aPlus: err = DeleteExtents( vcb, srcData.hfsPlusFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + err = MoveExtents( vcb, kHFSBogusExtentFileID, srcData.hfsPlusFile.fileID, isHFSPlus ); // Move the extents back + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto ExUndo1a; + } + + //-- Change the bogus extents file id's to the dest id's + err = MoveExtents( vcb, kHFSBogusExtentFileID, destData.hfsPlusFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + + err = DeleteExtents( vcb, destData.hfsPlusFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + err = MoveExtents( vcb, srcData.hfsPlusFile.fileID, destData.hfsPlusFile.fileID, isHFSPlus ); // Move the extents back + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto ExUndo2aPlus; + } + + } + else if ( numSrcExtentBlocks ) // just the source file has extents + { + err = MoveExtents( vcb, srcData.hfsPlusFile.fileID, destData.hfsPlusFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + + err = DeleteExtents( vcb, srcData.hfsPlusFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto FlushAndReturn; + } + } + else if ( numDestExtentBlocks ) // just the destination file has extents + { + err = MoveExtents( vcb, destData.hfsPlusFile.fileID, srcData.hfsPlusFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + + err = DeleteExtents( vcb, destData.hfsPlusFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto FlushAndReturn; + } + } + + //-- Step 3: Change the data in the catalog nodes + + //-- find the source cnode and put dest info in it + err = LocateCatalogNodeByKey( vcb, srcHint, &srcKey, &srcData, &srcHint ); + if ( err != noErr ) + return( cmBadNews ); + + BlockMoveData( &srcData, &swapData, sizeof(CatalogRecord) ); + CopyBigCatalogNodeInfo( &destData, &srcData ); + + err = ReplaceBTreeRecord( vcb->catalogRefNum, &srcKey, srcHint, &srcData, sizeof(HFSPlusCatalogFile), &srcHint ); + ReturnIfError( err ); + + // find the destination cnode and put source info in it + err = LocateCatalogNodeByKey( vcb, destHint, &destKey, &destData, &destHint ); + if ( err != noErr ) + return( cmBadNews ); + + CopyBigCatalogNodeInfo( &swapData, &destData ); + err = ReplaceBTreeRecord( vcb->catalogRefNum, &destKey, destHint, &destData, sizeof(HFSPlusCatalogFile), &destHint ); + ReturnIfError( err ); + } + else // HFS // + { + //-- Step 1: Check the catalog nodes for extents + + //-- locate the source file, test for extents in extent file, and copy the cat record for later + err = LocateCatalogNodeByKey( vcb, srcHint, &srcKey, &srcData, &srcHint ); + ReturnIfError( err ); + + if ( srcData.recordType != kHFSFileRecord ) + return( cmFThdDirErr ); // Error "cmFThdDirErr = it is a directory" + + //-- Check if there are any extents in the source file + numSrcExtentBlocks = CheckExtents( srcData.hfsFile.dataExtents, srcData.hfsFile.dataPhysicalSize / vcb->blockSize, isHFSPlus ); + if ( numSrcExtentBlocks == 0 ) // then check the resource fork extents + numSrcExtentBlocks = CheckExtents( srcData.hfsFile.rsrcExtents, srcData.hfsFile.rsrcPhysicalSize / vcb->blockSize, isHFSPlus ); + + + //€€ Do we save the found source node for later use? + + + //-- Check if there are any extents in the destination file + err = LocateCatalogNodeByKey( vcb, destHint, &destKey, &destData, &destHint ); + ReturnIfError( err ); + + if ( destData.recordType != kHFSFileRecord ) + return( cmFThdDirErr ); // Error "cmFThdDirErr = it is a directory" + + numDestExtentBlocks = CheckExtents( destData.hfsFile.dataExtents, destData.hfsFile.dataPhysicalSize / vcb->blockSize, isHFSPlus ); + if ( numDestExtentBlocks == 0 ) // then check the resource fork extents + numDestExtentBlocks = CheckExtents( destData.hfsFile.rsrcExtents, destData.hfsFile.rsrcPhysicalSize / vcb->blockSize, isHFSPlus ); + + //€€ Do we save the found destination node for later use? + + + //-- Step 2: Exchange the Extent key in the extent file + + //-- Exchange the extents key in the extent file + err = DeleteExtents( vcb, kHFSBogusExtentFileID, isHFSPlus ); + ReturnIfError( err ); + + if ( numSrcExtentBlocks && numDestExtentBlocks ) // if both files have extents + { + //-- Change the source extents file ids to our known bogus value + err = MoveExtents( vcb, srcData.hfsFile.fileID, kHFSBogusExtentFileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + +ExUndo1a: err = DeleteExtents( vcb, kHFSBogusExtentFileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + err = FlushCatalog( vcb ); // flush the catalog + err = FlushExtentFile( vcb ); // flush the extent file (unneeded for common case, but it's cheap) + return( dskFulErr ); + } + + //-- Change the destination extents file id's to the source id's + err = MoveExtents( vcb, destData.hfsFile.fileID, srcData.hfsFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + +ExUndo2a: err = DeleteExtents( vcb, srcData.hfsFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + err = MoveExtents( vcb, kHFSBogusExtentFileID, srcData.hfsFile.fileID, isHFSPlus ); // Move the extents back + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto ExUndo1a; + } + + //-- Change the bogus extents file id's to the dest id's + err = MoveExtents( vcb, kHFSBogusExtentFileID, destData.hfsFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + + err = DeleteExtents( vcb, destData.hfsFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + err = MoveExtents( vcb, srcData.hfsFile.fileID, destData.hfsFile.fileID, isHFSPlus ); // Move the extents back + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto ExUndo2a; + } + + } + else if ( numSrcExtentBlocks ) // just the source file has extents + { + err = MoveExtents( vcb, srcData.hfsFile.fileID, destData.hfsFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + + err = DeleteExtents( vcb, srcData.hfsFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto FlushAndReturn; + } + } + else if ( numDestExtentBlocks ) // just the destination file has extents + { + err = MoveExtents( vcb, destData.hfsFile.fileID, srcData.hfsFile.fileID, isHFSPlus ); + if ( err != noErr ) + { + if ( err != dskFulErr ) + return( err ); + + err = DeleteExtents( vcb, destData.hfsFile.fileID, isHFSPlus ); + ReturnIfError( err ); // we are doomed. Just QUIT! + + goto FlushAndReturn; + } + } + + //-- Step 3: Change the data in the catalog nodes + + //-- find the source cnode and put dest info in it + err = LocateCatalogNodeByKey( vcb, srcHint, &srcKey, &srcData, &srcHint ); + if ( err != noErr ) + return( cmBadNews ); + + BlockMoveData( &srcData, &swapData, sizeof(CatalogRecord) ); + //€€ Asm source copies from the saved dest catalog node + CopyCatalogNodeInfo( &destData, &srcData ); + + err = ReplaceBTreeRecord( vcb->catalogRefNum, &srcKey, srcHint, &srcData, sizeof(HFSCatalogFile), &srcHint ); + ReturnIfError( err ); + + + // find the destination cnode and put source info in it + err = LocateCatalogNodeByKey( vcb, destHint, &destKey, &destData, &destHint ); + if ( err != noErr ) + return( cmBadNews ); + + CopyCatalogNodeInfo( &swapData, &destData ); + err = ReplaceBTreeRecord( vcb->catalogRefNum, &destKey, destHint, &destData, sizeof(HFSCatalogFile), &destHint ); + ReturnIfError( err ); + } + + err = noErr; + + //-- Step 4: Error Handling section + + +FlushAndReturn: + err = FlushCatalog( vcb ); // flush the catalog + err = FlushExtentFile( vcb ); // flush the extent file (unneeded for common case, but it's cheap) + return( err ); +} + + +void CopyCatalogNodeInfo( CatalogRecord *src, CatalogRecord *dest ) +{ +// dest->hfsFile.filStBlk = src->hfsFile.filStBlk; + dest->hfsFile.dataLogicalSize = src->hfsFile.dataLogicalSize; + dest->hfsFile.dataPhysicalSize = src->hfsFile.dataPhysicalSize; +// dest->hfsFile.filRStBlk = src->hfsFile.filRStBlk; + dest->hfsFile.rsrcLogicalSize = src->hfsFile.rsrcLogicalSize; + dest->hfsFile.rsrcPhysicalSize = src->hfsFile.rsrcPhysicalSize; + dest->hfsFile.modifyDate = src->hfsFile.modifyDate; + BlockMoveData( src->hfsFile.dataExtents, dest->hfsFile.dataExtents, sizeof(HFSExtentRecord) ); + BlockMoveData( src->hfsFile.rsrcExtents, dest->hfsFile.rsrcExtents, sizeof(HFSExtentRecord) ); +} + +void CopyBigCatalogNodeInfo( CatalogRecord *src, CatalogRecord *dest ) +{ + BlockMoveData( &src->hfsPlusFile.dataFork, &dest->hfsPlusFile.dataFork, sizeof(HFSPlusForkData) ); + BlockMoveData( &src->hfsPlusFile.resourceFork, &dest->hfsPlusFile.resourceFork, sizeof(HFSPlusForkData) ); + dest->hfsPlusFile.contentModDate = src->hfsPlusFile.contentModDate; +} + + +OSErr MoveExtents( ExtendedVCB *vcb, UInt32 srcFileID, UInt32 destFileID, Boolean isHFSPlus ) +{ + FCB * fcb; + ExtentsRecBuffer extentsBuffer[kNumExtentsToCache]; + ExtentKey * extentKeyPtr; + ExtentRecord extentData; + BTreeIterator btIterator; + FSBufferDescriptor btRecord; + UInt16 btKeySize; + UInt16 btRecordSize; + SInt16 i, j; + OSErr err; + + + fcb = GetFileControlBlock(vcb->extentsRefNum); + + (void) BTInvalidateHint(&btIterator); + extentKeyPtr = (ExtentKey*) &btIterator.key; + btRecord.bufferAddress = &extentData; + btRecord.itemCount = 1; + + //-- Collect the extent records + + // + // A search on the following key will cause the BTree to be positioned immediately + // before the first extent record for file #srcFileID, but not actually positioned + // on any record. This is because there cannot be an extent record with FABN = 0 + // (the first extent of the fork, which would be in the catalog entry, not an extent + // record). + // + // Using BTIterateRecord with kBTreeNextRecord will then get that first extent record. + // + if (isHFSPlus) { + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + btKeySize = sizeof(HFSPlusExtentKey); + + extentKeyPtr->hfsPlus.keyLength = kHFSPlusExtentKeyMaximumLength; + extentKeyPtr->hfsPlus.forkType = 0; + extentKeyPtr->hfsPlus.pad = 0; + extentKeyPtr->hfsPlus.fileID = srcFileID; + extentKeyPtr->hfsPlus.startBlock = 0; + } + else { + btRecord.itemSize = sizeof(HFSExtentRecord); + btKeySize = sizeof(HFSExtentKey); + + extentKeyPtr->hfs.keyLength = kHFSExtentKeyMaximumLength; + extentKeyPtr->hfs.forkType = 0; + extentKeyPtr->hfs.fileID = srcFileID; + extentKeyPtr->hfs.startBlock = 0; + } + + // + // We do an initial BTSearchRecord to position the BTree's iterator just before any extent + // records for srcFileID. We then do a few BTIterateRecord and BTInsertRecord of those found + // records, but with destFileID as the file number in the key. Keep doing this sequence of + // BTIterateRecord and BTInsertRecord until we find an extent for another file, or there are + // no more extent records in the tree. + // + // Basically, we're copying records kNumExtentsToCache at a time. The copies have their file ID + // set to destFileID. + // + // This depends on BTInsertRecord not effecting the iterator used by BTIterateRecord. If it + // _did_ effect the iterator, then we would need to do a BTSearchRecord before each series + // of BTIterateRecord. We'd need to set up the key for BTSearchRecord to find the last record + // we found, so that BTIterateRecord would get the next one (the first we haven't processed). + // + + err = BTSearchRecord(fcb, &btIterator, kInvalidMRUCacheKey, &btRecord, &btRecordSize, &btIterator); + + // We expect a btNotFound here, since there shouldn't be an extent record with FABN = 0. + if (err != btNotFound) + { + if ( DEBUG_BUILD ) + DebugStr("\pUnexpected error from SearchBTreeRecord"); + + if (err == noErr) // If we found such a bogus extent record, then the tree is really messed up + err = cmBadNews; // so return an error that conveys the disk is hosed. + + return err; + } + + do + { + btRecord.bufferAddress = &extentData; + btRecord.itemCount = 1; + + for ( i=0 ; ihfsPlus.fileID : extentKeyPtr->hfs.fileID; + if ( foundFileID == srcFileID ) + { + CopyExtentInfo(extentKeyPtr, &extentData, extentsBuffer, i); + } + else + { + break; + } + } + + //-- edit each extent key, and reinsert each extent record in the extent file + if (isHFSPlus) + btRecordSize = sizeof(HFSPlusExtentRecord); + else + btRecordSize = sizeof(HFSExtentRecord); + + for ( j=0 ; jextentsRefNum); + + (void) BTInvalidateHint(&btIterator); + extentKeyPtr = (ExtentKey*) &btIterator.key; + btRecord.bufferAddress = &extentData; + btRecord.itemCount = 1; + + // The algorithm is to position the BTree just before any extent records for fileID. + // Then just keep getting successive records. If the record is still for fileID, + // then delete it. + + if (isHFSPlus) { + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + + extentKeyPtr->hfsPlus.keyLength = kHFSPlusExtentKeyMaximumLength; + extentKeyPtr->hfsPlus.forkType = 0; + extentKeyPtr->hfsPlus.pad = 0; + extentKeyPtr->hfsPlus.fileID = fileID; + extentKeyPtr->hfsPlus.startBlock = 0; + } + else { + btRecord.itemSize = sizeof(HFSExtentRecord); + + extentKeyPtr->hfs.keyLength = kHFSExtentKeyMaximumLength; + extentKeyPtr->hfs.forkType = 0; + extentKeyPtr->hfs.fileID = fileID; + extentKeyPtr->hfs.startBlock = 0; + } + + err = BTSearchRecord(fcb, &btIterator, kInvalidMRUCacheKey, &btRecord, &btRecordSize, &btIterator); + if ( err != btNotFound ) + { + if (err == noErr) { // Did we find a bogus extent record? + err = cmBadNews; // Yes, so indicate things are messed up. + } + + return err; // Got some unexpected error, so return it + } + + do + { + BTreeIterator tmpIterator; + HFSCatalogNodeID foundFileID; + + err = BTIterateRecord(fcb, kBTreeNextRecord, &btIterator, &btRecord, &btRecordSize); + if ( err != noErr ) + { + if (err == btNotFound) // If we hit the end of the BTree + err = noErr; // then it's OK + + break; // We're done now. + } + + foundFileID = isHFSPlus ? extentKeyPtr->hfsPlus.fileID : extentKeyPtr->hfs.fileID; + if ( foundFileID != fileID ) + break; // numbers don't match, we must be done + + tmpIterator = btIterator; + err = BTDeleteRecord( fcb, &tmpIterator ); + if (err != noErr) + break; + } while ( true ); + + return( err ); +} + + +// Check if there are extents represented in the extents overflow file. +UInt32 CheckExtents( void *extents, UInt32 totalBlocks, Boolean isHFSPlus ) +{ + UInt32 extentAllocationBlocks; + UInt16 i; + + + if ( totalBlocks == 0 ) + return( 0 ); + + extentAllocationBlocks = 0; + + if ( isHFSPlus ) + { + for ( i = 0 ; i < kHFSPlusExtentDensity ; i++ ) + { + extentAllocationBlocks += ((HFSPlusExtentDescriptor *)extents)[i].blockCount; + if ( extentAllocationBlocks >= totalBlocks ) // greater than or equal (extents can add past eof if 'Close" crashes w/o truncating new clump) + return( 0 ); + } + } + else + { + for ( i = 0 ; i < kHFSExtentDensity ; i++ ) + { + extentAllocationBlocks += ((HFSExtentDescriptor *)extents)[i].blockCount; + if ( extentAllocationBlocks >= totalBlocks ) // greater than or equal (extents can add past eof if 'Close" crashes w/o truncating new clump) + return( 0 ); + } + } + + return( extentAllocationBlocks ); +} diff --git a/bsd/hfs/hfscommon/Makefile b/bsd/hfs/hfscommon/Makefile new file mode 100644 index 000000000..87fa95d27 --- /dev/null +++ b/bsd/hfs/hfscommon/Makefile @@ -0,0 +1,27 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + headers + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + headers + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/hfs/hfscommon/Misc/BTreeWrapper.c b/bsd/hfs/hfscommon/Misc/BTreeWrapper.c new file mode 100644 index 000000000..86bbcd0b4 --- /dev/null +++ b/bsd/hfs/hfscommon/Misc/BTreeWrapper.c @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreeWrapper.c + + Contains: Interface glue for new B-tree manager. + + Version: HFS Plus 1.0 + + Copyright: © 1996-1998 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: xxx put technology here xxx + + Writers: + + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + 8/10/98 djb Removed all references to btcb global iterator (lastIterator). + 04/02/98 djb GetBTreeRecord is only used for MacOS builds. + 03/31/98 djb Sync up with final HFSVolumes.h header file. + 9/4/97 msd Fix ValidHFSRecord to determine the type of B-tree by FileID, + not record size. Add better checking for attribute b-tree keys. + 8/22/97 djb Get blockReadFromDisk flag from GetCacheBlock call. + 8/14/97 djb Remove reserved field checks in ValidHFSRecord (radar #1649593). + Only call if ValidHFSRecord HFS_DIAGNOSTIC is true. + 8/11/97 djb Bug 1670441. In SetEndOfForkProc, don't DebugStr if the disk is + full. + 7/25/97 DSH Pass heuristicHint to BTSearchRecord from SearchBTreeRecord. + 7/24/97 djb CallBackProcs now take a file refNum instead of an FCB. + GetBlockProc now reports if block came from disk. + 7/22/97 djb Move all trace points to BTree.c file. + 7/21/97 djb LogEndTime now takes an error code. + 7/16/97 DSH FilesInternal.x -> FileMgrInternal.x to avoid name collision + 7/15/97 msd Bug #1664103. OpenBTree is not propagating errors from + BTOpenPath. + 7/9/97 djb Remove maxCNID check from ValidHFSRecord (radar #1649593). + 6/13/97 djb In ValidHFSRecord HFSPlus threads names can be > 31 chars. + 6/2/97 DSH Also flush AlternateVolumeHeader whenever Attributes or Startup + files change size. + 5/28/97 msd In ValidHFSRecord, check for attribute keys. + 5/19/97 djb Move summary traces from GetBTreeRecord to BTIterateRecord. + 5/9/97 djb Get in sync with new FilesInternal.i. + 5/7/97 djb Add summary traces to B-tree SPI. + 4/24/97 djb first checked in + 4/16/97 djb Always use new B-tree code. + 4/4/97 djb Remove clumpSize test from ValidHFSRecord. + 4/4/97 djb Get in sync with volume format changes. + 3/17/97 DSH Casting for SC, BlockProcs are now not static. + 3/3/97 djb Call trash block after closing btree! + 2/19/97 djb Add support for accessing bigger B-tree nodes. + 2/6/97 msd In CheckBTreeKey, remove test and DebugStr for parent ID being + too big. + 1/23/97 DSH SetEndOfForkProc now calls through to update the Alternate MDB + or VolumeHeader. + 1/16/97 djb Switched to dynamic lengths for BufferDescriptor length field in + SearchBTreeRecord and GetBTreeRecord. Round up to clump size in + SetEndOfForkProc. + 1/15/97 djb Don't return errors for bad file ids in key. + 1/13/97 djb Adding support for getting current record. ValidHFSRecord now + supports variable sized thread records. + 1/9/97 djb Call CheckBTreeKey before using key length in a BlockMoveData + call. + 1/6/97 djb Implement SetEndOfForkProc. + 1/6/97 djb Added HFS Plus support to CheckBTreeKey and ValidHFSRecord. + 1/3/97 djb Added support for large keys. Integrated latest HFSVolumesPriv.h + changes. + 12/23/96 djb Fixed problem in SearchBTreeRecord (dataSize is an output so it + was undefined). Added some debugging code. + 12/20/96 msd Fix OpenBTree to use the real data type for the key compare proc + pointer (not void *). Fixed problem in SearchBTreeRecord that + assigns a pointer to a buffer size field (forgot to dereference + the pointer). + 12/19/96 djb first checked in + +*/ + +#include "../headers/BTreesPrivate.h" + + + + +// B-tree callbacks... +#if TARGET_API_MAC_OS8 +OSStatus GetBlockProc ( FileReference fileRefNum, UInt32 blockNum, GetBlockOptions options, BlockDescriptor *block ); +OSStatus ReleaseBlockProc ( FileReference fileRefNum, BlockDescPtr blockPtr, ReleaseBlockOptions options ); +OSStatus SetBlockSizeProc ( FileReference fileRefNum, ByteCount blockSize, ItemCount minBlockCount ); +#endif + + +// local routines +static OSErr CheckBTreeKey(const BTreeKey *key, const BTreeControlBlock *btcb); +static Boolean ValidHFSRecord(const void *record, const BTreeControlBlock *btcb, UInt16 recordSize); + + + + +OSErr SearchBTreeRecord(FileReference refNum, const void* key, UInt32 hint, void* foundKey, void* data, UInt16 *dataSize, UInt32 *newHint) +{ + FSBufferDescriptor btRecord; + BTreeIterator searchIterator; + FCB *fcb; + BTreeControlBlock *btcb; + OSStatus result; + + + fcb = GetFileControlBlock(refNum); + btcb = (BTreeControlBlock*) fcb->fcbBTCBPtr; + + btRecord.bufferAddress = data; + btRecord.itemCount = 1; + if ( btcb->maxKeyLength == kHFSExtentKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSExtentRecord); + else if ( btcb->maxKeyLength == kHFSPlusExtentKeyMaximumLength ) + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + else + btRecord.itemSize = sizeof(CatalogRecord); + + searchIterator.hint.writeCount = 0; // clear these out for debugging... + searchIterator.hint.reserved1 = 0; + searchIterator.hint.reserved2 = 0; + + searchIterator.hint.nodeNum = hint; + searchIterator.hint.index = 0; + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + BlockMoveData(key, &searchIterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //€€ should we range check against maxkeylen? + + // We only optimize for catalog records + if( btRecord.itemSize == sizeof(CatalogRecord) ) + { + UInt32 heuristicHint; + UInt32 *cachedHint; + Ptr hintCachePtr = FCBTOVCB(fcb)->hintCachePtr; + + // We pass a 2nd hint/guess into BTSearchRecord. The heuristicHint is a mapping of + // dirID and nodeNumber, in hopes that the current search will be in the same node + // as the last search with the same parentID. + result = GetMRUCacheBlock( ((HFSCatalogKey *)key)->parentID, hintCachePtr, (Ptr *)&cachedHint ); + heuristicHint = (result == noErr) ? *cachedHint : kInvalidMRUCacheKey; + + result = BTSearchRecord( fcb, &searchIterator, heuristicHint, &btRecord, dataSize, &searchIterator ); + + InsertMRUCacheBlock( hintCachePtr, ((HFSCatalogKey *)key)->parentID, (Ptr) &(searchIterator.hint.nodeNum) ); + } + else + { + result = BTSearchRecord( fcb, &searchIterator, kInvalidMRUCacheKey, &btRecord, dataSize, &searchIterator ); + } + + if (result == noErr) + { + *newHint = searchIterator.hint.nodeNum; + + result = CheckBTreeKey(&searchIterator.key, btcb); + ExitOnError(result); + + BlockMoveData(&searchIterator.key, foundKey, CalcKeySize(btcb, &searchIterator.key)); //€€ warning, this could overflow user's buffer!!! + + if ( DEBUG_BUILD && !ValidHFSRecord(data, btcb, *dataSize) ) + DebugStr("\pSearchBTreeRecord: bad record?"); + } + +ErrorExit: + + return result; +} + + + +OSErr InsertBTreeRecord(FileReference refNum, void* key, void* data, UInt16 dataSize, UInt32 *newHint) +{ + FSBufferDescriptor btRecord; + BTreeIterator iterator; + FCB *fcb; + BTreeControlBlock *btcb; + OSStatus result; + + + fcb = GetFileControlBlock(refNum); + btcb = (BTreeControlBlock*) fcb->fcbBTCBPtr; + + btRecord.bufferAddress = data; + btRecord.itemSize = dataSize; + btRecord.itemCount = 1; + + iterator.hint.nodeNum = 0; // no hint + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + BlockMoveData(key, &iterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //€€ should we range check against maxkeylen? + + if ( DEBUG_BUILD && !ValidHFSRecord(data, btcb, dataSize) ) + DebugStr("\pInsertBTreeRecord: bad record?"); + + result = BTInsertRecord( fcb, &iterator, &btRecord, dataSize ); + + *newHint = iterator.hint.nodeNum; + +ErrorExit: + + return result; +} + + +OSErr DeleteBTreeRecord(FileReference refNum, void* key) +{ + BTreeIterator iterator; + FCB *fcb; + BTreeControlBlock *btcb; + OSStatus result; + + + fcb = GetFileControlBlock(refNum); + btcb = (BTreeControlBlock*) fcb->fcbBTCBPtr; + + iterator.hint.nodeNum = 0; // no hint + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + BlockMoveData(key, &iterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //€€ should we range check against maxkeylen? + + result = BTDeleteRecord( fcb, &iterator ); + +ErrorExit: + + return result; +} + + +OSErr ReplaceBTreeRecord(FileReference refNum, const void* key, UInt32 hint, void *newData, UInt16 dataSize, UInt32 *newHint) +{ + FSBufferDescriptor btRecord; + BTreeIterator iterator; + FCB *fcb; + BTreeControlBlock *btcb; + OSStatus result; + + + fcb = GetFileControlBlock(refNum); + btcb = (BTreeControlBlock*) fcb->fcbBTCBPtr; + + btRecord.bufferAddress = newData; + btRecord.itemSize = dataSize; + btRecord.itemCount = 1; + + iterator.hint.nodeNum = hint; + + result = CheckBTreeKey((BTreeKey *) key, btcb); + ExitOnError(result); + + BlockMoveData(key, &iterator.key, CalcKeySize(btcb, (BTreeKey *) key)); //€€ should we range check against maxkeylen? + + if ( DEBUG_BUILD && !ValidHFSRecord(newData, btcb, dataSize) ) + DebugStr("\pReplaceBTreeRecord: bad record?"); + + result = BTReplaceRecord( fcb, &iterator, &btRecord, dataSize ); + + *newHint = iterator.hint.nodeNum; + + //€€ do we need to invalidate the iterator? + +ErrorExit: + + return result; +} + + + +static OSErr CheckBTreeKey(const BTreeKey *key, const BTreeControlBlock *btcb) +{ + UInt16 keyLen; + + if ( btcb->attributes & kBTBigKeysMask ) + keyLen = key->length16; + else + keyLen = key->length8; + + if ( (keyLen < 6) || (keyLen > btcb->maxKeyLength) ) + { + if ( DEBUG_BUILD ) + DebugStr("\pCheckBTreeKey: bad key length!"); + return fsBTInvalidKeyLengthErr; + } + + return noErr; +} + + +static Boolean ValidHFSRecord(const void *record, const BTreeControlBlock *btcb, UInt16 recordSize) +{ + UInt32 cNodeID; + + if ( btcb->maxKeyLength == kHFSExtentKeyMaximumLength ) + { + return ( recordSize == sizeof(HFSExtentRecord) ); + } + else if (btcb->maxKeyLength == kHFSPlusExtentKeyMaximumLength ) + { + return ( recordSize == sizeof(HFSPlusExtentRecord) ); + } + else // Catalog record + { + CatalogRecord *catalogRecord = (CatalogRecord*) record; + + switch(catalogRecord->recordType) + { + case kHFSFolderRecord: + { + if ( recordSize != sizeof(HFSCatalogFolder) ) + return false; + if ( catalogRecord->hfsFolder.flags != 0 ) + return false; + if ( catalogRecord->hfsFolder.valence > 0x7FFF ) + return false; + + cNodeID = catalogRecord->hfsFolder.folderID; + + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + } + break; + + case kHFSPlusFolderRecord: + { + if ( recordSize != sizeof(HFSPlusCatalogFolder) ) + return false; + if ( catalogRecord->hfsPlusFolder.flags != 0 ) + return false; + if ( catalogRecord->hfsPlusFolder.valence > 0x7FFF ) + return false; + + cNodeID = catalogRecord->hfsPlusFolder.folderID; + + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + } + break; + + case kHFSFileRecord: + { +// UInt16 i; + HFSExtentDescriptor *dataExtent; + HFSExtentDescriptor *rsrcExtent; + + if ( recordSize != sizeof(HFSCatalogFile) ) + return false; + if ( (catalogRecord->hfsFile.flags & ~(0x83)) != 0 ) + return false; + + cNodeID = catalogRecord->hfsFile.fileID; + + if ( cNodeID < 16 ) + return false; + + // make sure 0 ¾ LEOF ¾ PEOF for both forks + + if ( catalogRecord->hfsFile.dataLogicalSize < 0 ) + return false; + if ( catalogRecord->hfsFile.dataPhysicalSize < catalogRecord->hfsFile.dataLogicalSize ) + return false; + if ( catalogRecord->hfsFile.rsrcLogicalSize < 0 ) + return false; + if ( catalogRecord->hfsFile.rsrcPhysicalSize < catalogRecord->hfsFile.rsrcLogicalSize ) + return false; + + dataExtent = (HFSExtentDescriptor*) &catalogRecord->hfsFile.dataExtents; + rsrcExtent = (HFSExtentDescriptor*) &catalogRecord->hfsFile.rsrcExtents; + +#if 0 + for (i = 0; i < kHFSExtentDensity; ++i) + { + if ( (dataExtent[i].blockCount > 0) && (dataExtent[i].startBlock == 0) ) + return false; + if ( (rsrcExtent[i].blockCount > 0) && (rsrcExtent[i].startBlock == 0) ) + return false; + } +#endif + } + break; + + case kHFSPlusFileRecord: + { +// UInt16 i; + HFSPlusExtentDescriptor *dataExtent; + HFSPlusExtentDescriptor *rsrcExtent; + + if ( recordSize != sizeof(HFSPlusCatalogFile) ) + return false; + if ( (catalogRecord->hfsPlusFile.flags & ~(0x83)) != 0 ) + return false; + + cNodeID = catalogRecord->hfsPlusFile.fileID; + + if ( cNodeID < 16 ) + return false; + + // make sure 0 ¾ LEOF ¾ PEOF for both forks + + dataExtent = (HFSPlusExtentDescriptor*) &catalogRecord->hfsPlusFile.dataFork.extents; + rsrcExtent = (HFSPlusExtentDescriptor*) &catalogRecord->hfsPlusFile.resourceFork.extents; + +#if 0 + for (i = 0; i < kHFSPlusExtentDensity; ++i) + { + if ( (dataExtent[i].blockCount > 0) && (dataExtent[i].startBlock == 0) ) + return false; + if ( (rsrcExtent[i].blockCount > 0) && (rsrcExtent[i].startBlock == 0) ) + return false; + } +#endif + } + break; + + case kHFSFolderThreadRecord: + case kHFSFileThreadRecord: + { + if ( recordSize != sizeof(HFSCatalogThread) ) + return false; + + cNodeID = catalogRecord->hfsThread.parentID; + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + + if ( (catalogRecord->hfsThread.nodeName[0] == 0) || + (catalogRecord->hfsThread.nodeName[0] > 31) ) + return false; + } + break; + + case kHFSPlusFolderThreadRecord: + case kHFSPlusFileThreadRecord: + { + if ( recordSize > sizeof(HFSPlusCatalogThread) || recordSize < (sizeof(HFSPlusCatalogThread) - sizeof(HFSUniStr255))) + return false; + + cNodeID = catalogRecord->hfsPlusThread.parentID; + if ( (cNodeID == 0) || (cNodeID < 16 && cNodeID > 2) ) + return false; + + if ( (catalogRecord->hfsPlusThread.nodeName.length == 0) || + (catalogRecord->hfsPlusThread.nodeName.length > 255) ) + return false; + } + break; + + default: + return false; + } + } + + return true; // record appears to be OK +} diff --git a/bsd/hfs/hfscommon/Misc/FileExtentMapping.c b/bsd/hfs/hfscommon/Misc/FileExtentMapping.c new file mode 100644 index 000000000..92ae16ad6 --- /dev/null +++ b/bsd/hfs/hfscommon/Misc/FileExtentMapping.c @@ -0,0 +1,2102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: FileExtentMapping.c + + Contains: xxx put contents here xxx + + Version: HFS Plus 1.0 + + Written by: Dave Heller, Mark Day + + Copyright: © 1996-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Mark Day + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (DSH) Deric Horn + (msd) Mark Day + (djb) Don Brady + + Change History (most recent first): + 9/9/99 djb Fix fcbModifiedMask flag testing logic. + 8/25/98 djb Flush extents b-tree header if dirty (2371088). + 6/30/98 djb Add functions NodesAreContiguous and ExtentsAreIntegral (for radar #2249539). + 6/23/98 djb Changed DeallocFile to DeleteFile which now deletes the catalog record. + Fixed UpdateExtentRecord to pass correct fcb to Btree routines. Fixed + hfs+ bug in CreateExtentRecord (double dereference). + 5/20/98 djb In ExtendFileC don't lie about the peof! (radar #2230094). + 4/17/98 djb Add VCB locking. + 4/2/98 djb Switch over to real BTree interface (no more BTreeWrapper.c). + 3/31/98 djb Sync up with final HFSVolumes.h header file. + + 1/23/98 msd Bug 2208024: AllocContig is actually allocating one extent even + though there is not enough contiguous space. + 12/2/97 DSH GetFCBExtentRecord no longer static so DFA can use it. + 10/20/97 msd When allocating more space for a file, do the clump size + calculations in ExtendFileC, not BlockAllocate. Undo change from + . + 10/17/97 msd Conditionalize DebugStrs. + 10/16/97 msd Simplify the code path for MapFileBlockC (logical to physical + block mapping) in the typical case where the file isn't + fragmented so badly that it has extents in the extents B-tree. + Simplified some of the calculations for all cases. + 10/13/97 DSH FindExtentRecord & DeleteExtentRecord are also being used by DFA + no longer static. + 10/6/97 msd When extending a file, set the physical EOF to include any extra + space allocated due to a file's clump size. + 9/19/97 msd Remove the MapLogicalToPhysical SPI. It was never used and is + not being tested anyway. + 9/5/97 msd In CompareExtentKeys and CompareExtentKeysPlus, use the symbolic + constants for key length. Don't DebugStr unless DEBUG_BUILD is + set. + 7/24/97 djb Add instrumentation to MapFileBlockC + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 7/15/97 DSH AdjEOF() mark the FCB as modified. (1664389) + 7/8/97 DSH Loading PrecompiledHeaders from define passed in on C line + 7/3/97 msd Bug #1663518. Remove DebugStr when setting the FCB extent record + for a volume control file. + 6/27/97 msd Moved enum kFirstFileRefnum to FilesInternal. + 6/24/97 djb Include "CatalogPrivate.h" + 6/16/97 msd Finish implementation of CreateLargeFile SPI. + 6/12/97 msd Add stub for CreateLargeFile SPI. + 6/5/97 msd Add MapLogicalToPhysical. + 6/2/97 msd In TruncateFileC, don't update the extent record unless it was + actually changed (prevents extra updates when truncating to the + end of the extent, and it is the last extent of the file.) Added + an AdjustEOF routine called by the assembly AdjEOF routine. It + copies the EOF, physical length, and extent information from one + FCB to all other FCBs for that fork. + 5/20/97 DSH Removed const declaration in MapFileBlocC, const is benign when + passing by value, and SC requires it to match prototype. + 5/15/97 msd Change enum kResourceForkType from -1 to 0xFF since it is now + unsigned. Change all forkType parameters to UInt8. + 5/7/97 msd When checking for an unused extent descriptor, check the length, + not the starting block. + 4/24/97 djb first checked in + 4/11/97 DSH use extended VCB fields catalogRefNum, and extentsRefNum. + 4/4/97 djb Get in sync with volume format changes. + 3/17/97 DSH Casting to compile with SC. + 2/26/97 msd Add instrumentation in ExtendFileC and TruncateFileC. In + CompareExtentKeys and CompareExtentKeysPlus, make sure the key + lengths are correct. + 2/5/97 msd The comparison with fsBTStartOfIterationErr didn't work because + the enum is an unsigned long; it is now casted to an OSErr + before comparing. + 1/31/97 msd In FindExtentRecord, turn an fsBTStartOfIterationErr error into + btNotFound. + 1/28/97 msd Fixed bug in MapFileBlockC where it returned the wrong number of + bytes available at the given block number. This could + potentially cause programs to read or write over other files. + 1/16/97 djb Extent key compare procs now return SInt32. Fixed + UpdateExtentRecord - it was passing a pointer to an ExtentKey + pointer. + 1/10/97 msd Change TruncateFileC to call DellocateFork when the new PEOF is + 0. Fixes a fxRangeErr returned when no extents existed. + 1/6/97 msd Previous change prevents extent records from being removed if + the files new PEOF is in the local (FCB/catalog) extents. + 1/3/97 djb Temp fix in TruncateFileC to prevent unwanted calls to + TruncateExtents. + 12/23/96 msd Previous change to SearchExtentFile didn't set up the outputs + for hint and key when the FCB extent record wasn't full. + 12/20/96 msd In SearchExtentFile, don't bother searching the extents file if + the FCB's extent record wasn't full, or if the FCB was for the + extents file itself. Modified SearchExtentRecord to return a + Boolean to indicate that the record was not full. + 12/19/96 DSH Changed refs from VCB to ExtendedVCB + 12/19/96 djb Updated for new B-tree Manager interface. + 12/12/96 djb Really use new SPI for GetCatalogNode. + 12/12/96 djb Use new Catalog SPI for GetCatalogNode. Added Mark's changes to + MapFileBlockC. + 12/11/96 msd TruncateFileC must always release extents, even if PEOF hasn't + changed (since allocation may have been rounded up due to clump + size). + 12/10/96 msd Check PRAGMA_LOAD_SUPPORTED before loading precompiled headers. + 12/4/96 DSH Precompiled headers + 11/26/96 msd Add an exported routine to grow the parallel FCB table to + accomodate the HFS+ ExtentRecord. + 11/26/96 msd Convert internal routines to use ExtentKey and ExtentRecord + (instead of the raw HFS structures). + 11/21/96 msd Added CompareExtentKeysPlus(). + 11/20/96 msd Finish porting FXM to C. + 11/6/96 DKH first checked in + +*/ + + +#include "../../hfs.h" +#include "../../hfs_format.h" +#include "../../hfs_endian.h" + +#include "../headers/FileMgrInternal.h" +#include "../headers/BTreesInternal.h" +#include "../headers/CatalogPrivate.h" // calling a private catalog routine (LocateCatalogNode) + +#include "../headers/HFSInstrumentation.h" + +#include + +/* +============================================================ +Public (Exported) Routines: +============================================================ + DeAllocFile Deallocate all disk space allocated to a specified file. + Both forks are deallocated. + + ExtendFileC Allocate more space to a given file. + + CompareExtentKeys + Compare two extents file keys (a search key and a trial + key). Used by the BTree manager when searching for, + adding, or deleting keys in the extents file of an HFS + volume. + + CompareExtentKeysPlus + Compare two extents file keys (a search key and a trial + key). Used by the BTree manager when searching for, + adding, or deleting keys in the extents file of an HFS+ + volume. + + MapFileBlockC Convert (map) an offset within a given file into a + physical disk address. + + TruncateFileC Truncates the disk space allocated to a file. The file + space is truncated to a specified new physical EOF, rounded + up to the next allocation block boundry. There is an option + to truncate to the end of the extent containing the new EOF. + + FlushExtentFile + Flush the extents file for a given volume. + + GrowParallelFCBs + Make sure the parallel FCB entries are big enough to support + the HFS+ ExtentRecord. If not, the array is grown and the + pre-existing data copied over. + + AdjustEOF + Copy EOF, physical length, and extent records from one FCB + to all other FCBs for that fork. This is used when a file is + grown or shrunk as the result of a Write, SetEOF, or Allocate. + + MapLogicalToPhysical + Map some position in a file to a volume block number. Also + returns the number of contiguous bytes that are mapped there. + This is a queued HFSDispatch call that does the equivalent of + MapFileBlockC, using a parameter block. + +============================================================ +Internal Routines: +============================================================ + FindExtentRecord + Search the extents BTree for a particular extent record. + SearchExtentFile + Search the FCB and extents file for an extent record that + contains a given file position (in bytes). + SearchExtentRecord + Search a given extent record to see if it contains a given + file position (in bytes). Used by SearchExtentFile. + ReleaseExtents + Deallocate all allocation blocks in all extents of an extent + data record. + TruncateExtents + Deallocate blocks and delete extent records for all allocation + blocks beyond a certain point in a file. The starting point + must be the first file allocation block for some extent record + for the file. + DeallocateFork + Deallocate all allocation blocks belonging to a given fork. + UpdateExtentRecord + If the extent record came from the extents file, write out + the updated record; otherwise, copy the updated record into + the FCB resident extent record. If the record has no extents, + and was in the extents file, then delete the record instead. +*/ + +enum +{ + kTwoGigabytes = (UInt32) 0x80000000, + + kDataForkType = 0, + kResourceForkType = 0xFF, + + kPreviousRecord = -1, + + kSectorSize = 512 // Size of a physical sector +}; + +void HFSToHFSPlusExtents( + const HFSExtentRecord oldExtents, + HFSPlusExtentRecord newExtents); + +OSErr HFSPlusToHFSExtents( + const HFSPlusExtentRecord oldExtents, + HFSExtentRecord newExtents); + +OSErr FindExtentRecord( + const ExtendedVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean allowPrevious, + HFSPlusExtentKey *foundKey, + HFSPlusExtentRecord foundData, + UInt32 *foundHint); + +OSErr DeleteExtentRecord( + const ExtendedVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock); + +static OSErr CreateExtentRecord( + const ExtendedVCB *vcb, + HFSPlusExtentKey *key, + HFSPlusExtentRecord extents, + UInt32 *hint); + + +OSErr GetFCBExtentRecord( + const FCB *fcb, + HFSPlusExtentRecord extents); + +static OSErr SearchExtentFile( + const ExtendedVCB *vcb, + const FCB *fcb, + SInt64 filePosition, + HFSPlusExtentKey *foundExtentKey, + HFSPlusExtentRecord foundExtentData, + UInt32 *foundExtentDataIndex, + UInt32 *extentBTreeHint, + UInt32 *endingFABNPlusOne ); + +static OSErr SearchExtentRecord( + const ExtendedVCB *vcb, + UInt32 searchFABN, + const HFSPlusExtentRecord extentData, + UInt32 extentDataStartFABN, + UInt32 *foundExtentDataOffset, + UInt32 *endingFABNPlusOne, + Boolean *noMoreExtents); + +static OSErr ReleaseExtents( + ExtendedVCB *vcb, + const HFSPlusExtentRecord extentRecord, + UInt32 *numReleasedAllocationBlocks, + Boolean *releasedLastExtent); + +static OSErr DeallocateFork( + ExtendedVCB *vcb, + HFSCatalogNodeID fileID, + UInt8 forkType, + HFSPlusExtentRecord catalogExtents, + Boolean * recordDeleted); + +static OSErr TruncateExtents( + ExtendedVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean * recordDeleted); + +static OSErr UpdateExtentRecord ( + const ExtendedVCB *vcb, + FCB *fcb, + const HFSPlusExtentKey *extentFileKey, + const HFSPlusExtentRecord extentData, + UInt32 extentBTreeHint); + +static OSErr MapFileBlockFromFCB( + const ExtendedVCB *vcb, + const FCB *fcb, + SInt64 offset, // Desired offset in bytes from start of file + UInt32 *firstFABN, // FABN of first block of found extent + UInt32 *firstBlock, // Corresponding allocation block number + UInt32 *nextFABN); // FABN of block after end of extent + +static Boolean ExtentsAreIntegral( + const HFSPlusExtentRecord extentRecord, + UInt32 mask, + UInt32 *blocksChecked, + Boolean *checkedLastExtent); + +//_________________________________________________________________________________ +// +// Routine: FindExtentRecord +// +// Purpose: Search the extents BTree for an extent record matching the given +// FileID, fork, and starting file allocation block number. +// +// Inputs: +// vcb Volume to search +// forkType 0 = data fork, -1 = resource fork +// fileID File's FileID (CatalogNodeID) +// startBlock Starting file allocation block number +// allowPrevious If the desired record isn't found and this flag is set, +// then see if the previous record belongs to the same fork. +// If so, then return it. +// +// Outputs: +// foundKey The key data for the record actually found +// foundData The extent record actually found (NOTE: on an HFS volume, the +// fourth entry will be zeroes. +// foundHint The BTree hint to find the node again +//_________________________________________________________________________________ +OSErr FindExtentRecord( + const ExtendedVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean allowPrevious, + HFSPlusExtentKey *foundKey, + HFSPlusExtentRecord foundData, + UInt32 *foundHint) +{ + FCB * fcb; + BTreeIterator btIterator; + FSBufferDescriptor btRecord; + OSErr err; + UInt16 btRecordSize; + + err = noErr; + *foundHint = 0; + fcb = GetFileControlBlock(vcb->extentsRefNum); + + (void) BTInvalidateHint(&btIterator); + + if (vcb->vcbSigWord == kHFSSigWord) { + HFSExtentKey * extentKeyPtr; + HFSExtentRecord extentData; + + extentKeyPtr = (HFSExtentKey*) &btIterator.key; + extentKeyPtr->keyLength = kHFSExtentKeyMaximumLength; + extentKeyPtr->forkType = forkType; + extentKeyPtr->fileID = fileID; + extentKeyPtr->startBlock = startBlock; + + btRecord.bufferAddress = &extentData; + btRecord.itemSize = sizeof(HFSExtentRecord); + btRecord.itemCount = 1; + + err = BTSearchRecord(fcb, &btIterator, kInvalidMRUCacheKey, &btRecord, &btRecordSize, &btIterator); + + if (err == btNotFound && allowPrevious) { + err = BTIterateRecord(fcb, kBTreePrevRecord, &btIterator, &btRecord, &btRecordSize); + + // A previous record may not exist, so just return btNotFound (like we would if + // it was for the wrong file/fork). + if (err == (OSErr) fsBTStartOfIterationErr) //¥¥ fsBTStartOfIterationErr is type unsigned long + err = btNotFound; + + if (err == noErr) { + // Found a previous record. Does it belong to the same fork of the same file? + if (extentKeyPtr->fileID != fileID || extentKeyPtr->forkType != forkType) + err = btNotFound; + } + } + + if (err == noErr) { + UInt16 i; + + // Copy the found key back for the caller + foundKey->keyLength = kHFSPlusExtentKeyMaximumLength; + foundKey->forkType = extentKeyPtr->forkType; + foundKey->pad = 0; + foundKey->fileID = extentKeyPtr->fileID; + foundKey->startBlock = extentKeyPtr->startBlock; + + // Copy the found data back for the caller + foundData[0].startBlock = extentData[0].startBlock; + foundData[0].blockCount = extentData[0].blockCount; + foundData[1].startBlock = extentData[1].startBlock; + foundData[1].blockCount = extentData[1].blockCount; + foundData[2].startBlock = extentData[2].startBlock; + foundData[2].blockCount = extentData[2].blockCount; + + for (i = 3; i < kHFSPlusExtentDensity; ++i) + { + foundData[i].startBlock = 0; + foundData[i].blockCount = 0; + } + } + } + else { // HFS Plus volume + HFSPlusExtentKey * extentKeyPtr; + HFSPlusExtentRecord extentData; + + extentKeyPtr = (HFSPlusExtentKey*) &btIterator.key; + extentKeyPtr->keyLength = kHFSPlusExtentKeyMaximumLength; + extentKeyPtr->forkType = forkType; + extentKeyPtr->pad = 0; + extentKeyPtr->fileID = fileID; + extentKeyPtr->startBlock = startBlock; + + btRecord.bufferAddress = &extentData; + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + btRecord.itemCount = 1; + + err = BTSearchRecord(fcb, &btIterator, kInvalidMRUCacheKey, &btRecord, &btRecordSize, &btIterator); + + if (err == btNotFound && allowPrevious) { + err = BTIterateRecord(fcb, kBTreePrevRecord, &btIterator, &btRecord, &btRecordSize); + + // A previous record may not exist, so just return btNotFound (like we would if + // it was for the wrong file/fork). + if (err == (OSErr) fsBTStartOfIterationErr) //¥¥ fsBTStartOfIterationErr is type unsigned long + err = btNotFound; + + if (err == noErr) { + // Found a previous record. Does it belong to the same fork of the same file? + if (extentKeyPtr->fileID != fileID || extentKeyPtr->forkType != forkType) + err = btNotFound; + } + } + + if (err == noErr) { + // Copy the found key back for the caller + BlockMoveData(extentKeyPtr, foundKey, sizeof(HFSPlusExtentKey)); + // Copy the found data back for the caller + BlockMoveData(&extentData, foundData, sizeof(HFSPlusExtentRecord)); + } + } + + *foundHint = btIterator.hint.nodeNum; + return err; +} + + + +static OSErr CreateExtentRecord( + const ExtendedVCB *vcb, + HFSPlusExtentKey *key, + HFSPlusExtentRecord extents, + UInt32 *hint) +{ + BTreeIterator btIterator; + FSBufferDescriptor btRecord; + UInt16 btRecordSize; + OSErr err; + + err = noErr; + *hint = 0; + (void) BTInvalidateHint(&btIterator); + + if (vcb->vcbSigWord == kHFSSigWord) { + HFSExtentKey * keyPtr; + HFSExtentRecord data; + + btRecordSize = sizeof(HFSExtentRecord); + btRecord.bufferAddress = &data; + btRecord.itemSize = btRecordSize; + btRecord.itemCount = 1; + + keyPtr = (HFSExtentKey*) &btIterator.key; + keyPtr->keyLength = kHFSExtentKeyMaximumLength; + keyPtr->forkType = key->forkType; + keyPtr->fileID = key->fileID; + keyPtr->startBlock = key->startBlock; + + err = HFSPlusToHFSExtents(extents, data); + } + else { // HFS Plus volume + btRecordSize = sizeof(HFSPlusExtentRecord); + btRecord.bufferAddress = extents; + btRecord.itemSize = btRecordSize; + btRecord.itemCount = 1; + + BlockMoveData(key, &btIterator.key, sizeof(HFSPlusExtentKey)); + } + + if (err == noErr) + err = BTInsertRecord(GetFileControlBlock(vcb->extentsRefNum), &btIterator, &btRecord, btRecordSize); + + if (err == noErr) + *hint = btIterator.hint.nodeNum; + + return err; +} + + +OSErr DeleteExtentRecord( + const ExtendedVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock) +{ + BTreeIterator btIterator; + OSErr err; + + err = noErr; + (void) BTInvalidateHint(&btIterator); + + if (vcb->vcbSigWord == kHFSSigWord) { + HFSExtentKey * keyPtr; + + keyPtr = (HFSExtentKey*) &btIterator.key; + keyPtr->keyLength = kHFSExtentKeyMaximumLength; + keyPtr->forkType = forkType; + keyPtr->fileID = fileID; + keyPtr->startBlock = startBlock; + } + else { // HFS Plus volume + HFSPlusExtentKey * keyPtr; + + keyPtr = (HFSPlusExtentKey*) &btIterator.key; + keyPtr->keyLength = kHFSPlusExtentKeyMaximumLength; + keyPtr->forkType = forkType; + keyPtr->pad = 0; + keyPtr->fileID = fileID; + keyPtr->startBlock = startBlock; + } + + err = BTDeleteRecord(GetFileControlBlock(vcb->extentsRefNum), &btIterator); + + return err; +} + + + +//_________________________________________________________________________________ +// +// Routine: MapFileBlock +// +// Function: Maps a file position into a physical disk address. +// +// Input: A2.L - VCB pointer +// (A1,D1.W) - FCB pointer +// D4.L - number of bytes desired +// D5.L - file position (byte address) +// +// Output: D3.L - physical start block +// D6.L - number of contiguous bytes available (up to D4 bytes) +// D0.L - result code <01Oct85> +// 0 = ok +// FXRangeErr = file position beyond mapped range <17Oct85> +// FXOvFlErr = extents file overflow <17Oct85> +// other = error <17Oct85> +// +// Called By: Log2Phys (read/write in place), Cache (map a file block). +//_________________________________________________________________________________ + +OSErr MapFileBlockC ( + ExtendedVCB *vcb, // volume that file resides on + FCB *fcb, // FCB of file + size_t numberOfBytes, // number of contiguous bytes desired + off_t offset, // starting offset within file (in bytes) + daddr_t *startSector, // first 512-byte sector (NOT an allocation block) + size_t *availableBytes) // number of contiguous bytes (up to numberOfBytes) +{ + OSErr err; + UInt32 allocBlockSize; // Size of the volume's allocation block + HFSPlusExtentKey foundKey; + HFSPlusExtentRecord foundData; + UInt32 foundIndex; + UInt32 hint; + UInt32 firstFABN; // file allocation block of first block in found extent + UInt32 nextFABN; // file allocation block of block after end of found extent + off_t dataEnd; // (offset) end of range that is contiguous + UInt32 sectorsPerBlock; // Number of sectors per allocation block + UInt32 startBlock; // volume allocation block corresponding to firstFABN + daddr_t temp; + off_t tmpOff; + + + LogStartTime(kTraceMapFileBlock); + + allocBlockSize = vcb->blockSize; + + err = MapFileBlockFromFCB(vcb, fcb, offset, &firstFABN, &startBlock, &nextFABN); + if (err != noErr) { + err = SearchExtentFile(vcb, fcb, offset, &foundKey, foundData, &foundIndex, &hint, &nextFABN); + if (err == noErr) { + startBlock = foundData[foundIndex].startBlock; + firstFABN = nextFABN - foundData[foundIndex].blockCount; + } + } + + if (err != noErr) + { + LogEndTime(kTraceMapFileBlock, err); + + return err; + } + + // + // Determine the end of the available space. It will either be the end of the extent, + // or the file's PEOF, whichever is smaller. + // + dataEnd = (off_t)((off_t)(nextFABN) * (off_t)(allocBlockSize)); // Assume valid data through end of this extent + if (fcb->fcbPLen < dataEnd) // Is PEOF shorter? + dataEnd = fcb->fcbPLen; // Yes, so only map up to PEOF + + // Compute the number of sectors in an allocation block + sectorsPerBlock = allocBlockSize / kSectorSize; // sectors per allocation block + + // + // Compute the absolute sector number that contains the offset of the given file + // + temp = (daddr_t)((offset - (off_t)((off_t)(firstFABN) * (off_t)(allocBlockSize)))/kSectorSize); // offset in sectors from start of the extent + temp += startBlock * sectorsPerBlock; // offset in sectors from start of allocation block space + if (vcb->vcbSigWord == kHFSPlusSigWord) + temp += vcb->hfsPlusIOPosOffset/512; /* offset inside wrapper */ + else + temp += vcb->vcbAlBlSt; /* offset in sectors from start of volume */ + + // Return the desired sector for file position "offset" + *startSector = temp; + + // + // Determine the number of contiguous bytes until the end of the extent + // (or the amount they asked for, whichever comes first). + // + tmpOff = dataEnd - offset; + if (tmpOff > (off_t)(numberOfBytes)) + *availableBytes = numberOfBytes; // more there than they asked for, so pin the output + else + *availableBytes = tmpOff; + LogEndTime(kTraceMapFileBlock, noErr); + + return noErr; +} + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: ReleaseExtents +// +// Function: Release the extents of a single extent data record. +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +static OSErr ReleaseExtents( + ExtendedVCB *vcb, + const HFSPlusExtentRecord extentRecord, + UInt32 *numReleasedAllocationBlocks, + Boolean *releasedLastExtent) +{ + UInt32 extentIndex; + UInt32 numberOfExtents; + OSErr err = noErr; + + *numReleasedAllocationBlocks = 0; + *releasedLastExtent = false; + + if (vcb->vcbSigWord == kHFSPlusSigWord) + numberOfExtents = kHFSPlusExtentDensity; + else + numberOfExtents = kHFSExtentDensity; + + for( extentIndex = 0; extentIndex < numberOfExtents; extentIndex++) + { + UInt32 numAllocationBlocks; + + // Loop over the extent record and release the blocks associated with each extent. + + numAllocationBlocks = extentRecord[extentIndex].blockCount; + if ( numAllocationBlocks == 0 ) + { + *releasedLastExtent = true; + break; + } + + err = BlockDeallocate( vcb, extentRecord[extentIndex].startBlock, numAllocationBlocks ); + if ( err != noErr ) + break; + + *numReleasedAllocationBlocks += numAllocationBlocks; // bump FABN to beg of next extent + } + + return( err ); +} + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: TruncateExtents +// +// Purpose: Delete extent records whose starting file allocation block number +// is greater than or equal to a given starting block number. The +// allocation blocks represented by the extents are deallocated. +// +// Inputs: +// vcb Volume to operate on +// fileID Which file to operate on +// startBlock Starting file allocation block number for first extent +// record to delete. +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +static OSErr TruncateExtents( + ExtendedVCB *vcb, + UInt8 forkType, + UInt32 fileID, + UInt32 startBlock, + Boolean * recordDeleted) +{ + OSErr err; + UInt32 numberExtentsReleased; + Boolean releasedLastExtent; + UInt32 hint; + HFSPlusExtentKey key; + HFSPlusExtentRecord extents; + + while (true) { + err = FindExtentRecord(vcb, forkType, fileID, startBlock, false, &key, extents, &hint); + if (err != noErr) { + if (err == btNotFound) + err = noErr; + break; + } + + err = ReleaseExtents( vcb, extents, &numberExtentsReleased, &releasedLastExtent ); + if (err != noErr) break; + + err = DeleteExtentRecord(vcb, forkType, fileID, startBlock); + if (err != noErr) break; + + *recordDeleted = true; + startBlock += numberExtentsReleased; + } + + return err; +} + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: DeallocateFork +// +// Function: De-allocates all disk space allocated to a specified fork. +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +static OSErr DeallocateFork( + ExtendedVCB *vcb, + HFSCatalogNodeID fileID, + UInt8 forkType, + HFSPlusExtentRecord catalogExtents, + Boolean * recordDeleted) /* true if a record was deleted */ +{ + OSErr err; + UInt32 numReleasedAllocationBlocks; + Boolean releasedLastExtent; + + // Release the catalog extents + err = ReleaseExtents( vcb, catalogExtents, &numReleasedAllocationBlocks, &releasedLastExtent ); + // Release the extra extents, if present + if (err == noErr && !releasedLastExtent) + err = TruncateExtents(vcb, forkType, fileID, numReleasedAllocationBlocks, recordDeleted); + + return( err ); +} + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: FlushExtentFile +// +// Function: Flushes the extent file for a specified volume +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +OSErr FlushExtentFile( ExtendedVCB *vcb ) +{ + FCB * fcb; + OSErr err; + + fcb = GetFileControlBlock(vcb->extentsRefNum); + err = BTFlushPath(fcb); + if ( err == noErr ) + { + // If the FCB for the extent "file" is dirty, mark the VCB as dirty. + + if ((fcb->fcbFlags & fcbModifiedMask) != 0) + { + MarkVCBDirty( vcb ); + err = FlushVolumeControlBlock( vcb ); + } + } + + return( err ); +} + +//------------------------------------------------------------------------------- +// Routine: DeleteFile +// +// Function: De-allocates all disk space allocated to a specified file +// including the space used by the catalog (ie the catalog record). +// The space occupied by both forks is also deallocated. +// +//------------------------------------------------------------------------------- + +OSErr DeleteFile( ExtendedVCB *vcb, HFSCatalogNodeID parDirID, ConstUTF8Param catalogName, UInt32 catalogHint ) +{ + OSErr err; + OSErr errDF, errRF; + CatalogNodeData catalogData; + Boolean recordDeleted; + + recordDeleted = false; + + INIT_CATALOGDATA(&catalogData, kCatNameNoCopyName); + + // Find catalog data in catalog + err = GetCatalogNode( vcb, parDirID, catalogName, kUndefinedStrLen, catalogHint, &catalogData, &catalogHint); + if( err != noErr ) + goto Exit; + + + // Check to make sure record is for a file + if ( catalogData.cnd_type != kCatalogFileNode ) + { + err = notAFileErr; + goto Exit; + } + + // + // Always delete the Catalog record first (to minimize disk corruption) + // + err = DeleteCatalogNode(vcb, parDirID, catalogName, catalogHint); + if( err != noErr ) + goto Exit; + + // + // Note: we don't report errors from DeallocateFork since the + // file no longer exists (since DeleteCatalogNode succeeded). + // Any errors mean that there are possibly some orphaned disk + // blocks but from the clients perspective the file was deleted. + // + + // Deallocate data fork extents + errDF = DeallocateFork( vcb, catalogData.cnd_nodeID, kDataForkType, + catalogData.cnd_datafork.extents, &recordDeleted ); + + // Deallocate resource fork extents + errRF = DeallocateFork( vcb, catalogData.cnd_nodeID, kResourceForkType, + catalogData.cnd_rsrcfork.extents, &recordDeleted ); + + if (recordDeleted) + err = FlushExtentFile( vcb ); + + CLEAN_CATALOGDATA(&catalogData); + return (errDF ? errDF : (errRF ? errRF : err)); +Exit: + + CLEAN_CATALOGDATA(&catalogData); + return( err ); +} + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: CompareExtentKeys +// +// Function: Compares two extent file keys (a search key and a trial key) for +// an HFS volume. +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +SInt32 CompareExtentKeys( const HFSExtentKey *searchKey, const HFSExtentKey *trialKey ) +{ + SInt32 result; // ± 1 + + #if DEBUG_BUILD + if (searchKey->keyLength != kHFSExtentKeyMaximumLength) + DebugStr("\pHFS: search Key is wrong length"); + if (trialKey->keyLength != kHFSExtentKeyMaximumLength) + DebugStr("\pHFS: trial Key is wrong length"); + #endif + + result = -1; // assume searchKey < trialKey + + if (searchKey->fileID == trialKey->fileID) { + // + // FileNum's are equal; compare fork types + // + if (searchKey->forkType == trialKey->forkType) { + // + // Fork types are equal; compare allocation block number + // + if (searchKey->startBlock == trialKey->startBlock) { + // + // Everything is equal + // + result = 0; + } + else { + // + // Allocation block numbers differ; determine sign + // + if (searchKey->startBlock > trialKey->startBlock) + result = 1; + } + } + else { + // + // Fork types differ; determine sign + // + if (searchKey->forkType > trialKey->forkType) + result = 1; + } + } + else { + // + // FileNums differ; determine sign + // + if (searchKey->fileID > trialKey->fileID) + result = 1; + } + + return( result ); +} + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: CompareExtentKeysPlus +// +// Function: Compares two extent file keys (a search key and a trial key) for +// an HFS volume. +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +SInt32 CompareExtentKeysPlus( const HFSPlusExtentKey *searchKey, const HFSPlusExtentKey *trialKey ) +{ + SInt32 result; // ± 1 + + #if DEBUG_BUILD + if (searchKey->keyLength != kHFSPlusExtentKeyMaximumLength) + DebugStr("\pHFS: search Key is wrong length"); + if (trialKey->keyLength != kHFSPlusExtentKeyMaximumLength) + DebugStr("\pHFS: trial Key is wrong length"); + #endif + + result = -1; // assume searchKey < trialKey + + if (searchKey->fileID == trialKey->fileID) { + // + // FileNum's are equal; compare fork types + // + if (searchKey->forkType == trialKey->forkType) { + // + // Fork types are equal; compare allocation block number + // + if (searchKey->startBlock == trialKey->startBlock) { + // + // Everything is equal + // + result = 0; + } + else { + // + // Allocation block numbers differ; determine sign + // + if (searchKey->startBlock > trialKey->startBlock) + result = 1; + } + } + else { + // + // Fork types differ; determine sign + // + if (searchKey->forkType > trialKey->forkType) + result = 1; + } + } + else { + // + // FileNums differ; determine sign + // + if (searchKey->fileID > trialKey->fileID) + result = 1; + } + + return( result ); +} + + + +//_________________________________________________________________________________ +// +// Routine: Extendfile +// +// Function: Extends the disk space allocated to a file. +// +// Input: A2.L - VCB pointer +// A1.L - pointer to FCB array +// D1.W - file refnum +// D3.B - option flags +// kEFContigMask - force contiguous allocation +// kEFAllMask - allocate all requested bytes or none +// NOTE: You may not set both options. +// D4.L - number of additional bytes to allocate +// +// Output: D0.W - result code +// 0 = ok +// -n = IO error +// D6.L - number of bytes allocated +// +// Called by: FileAloc,FileWrite,SetEof +// +// Note: ExtendFile updates the PEOF in the FCB. +//_________________________________________________________________________________ + +OSErr ExtendFileC ( + ExtendedVCB *vcb, // volume that file resides on + FCB *fcb, // FCB of file to truncate + SInt64 bytesToAdd, // number of bytes to allocate + UInt32 flags, // EFContig and/or EFAll + SInt64 *actualBytesAdded) // number of bytes actually allocated +{ + OSErr err; + UInt32 volumeBlockSize; + SInt64 blocksToAdd; + SInt64 bytesThisExtent; + HFSPlusExtentKey foundKey; + HFSPlusExtentRecord foundData; + UInt32 foundIndex; + UInt32 hint; + UInt32 nextBlock; + UInt32 startBlock; + Boolean allOrNothing; + Boolean forceContig; + Boolean wantContig; + Boolean needsFlush; + UInt32 actualStartBlock; + UInt32 actualNumBlocks; + UInt32 numExtentsPerRecord; + SInt64 maximumBytes; + SInt64 peof; + SInt64 previousPEOF; + + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + InstDataDescriptorRef traceDescriptor; + FSVarsRec *fsVars = (FSVarsRec *) LMGetFSMVars(); + + traceDescriptor = (InstDataDescriptorRef) fsVars->later[2]; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:Extents:ExtendFileC", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = InstCreateEventTag(); + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + needsFlush = false; + *actualBytesAdded = 0; + volumeBlockSize = vcb->blockSize; + allOrNothing = ((flags & kEFAllMask) != 0); + forceContig = ((flags & kEFContigMask) != 0); + previousPEOF = fcb->fcbPLen; + + if (vcb->vcbSigWord == kHFSPlusSigWord) + numExtentsPerRecord = kHFSPlusExtentDensity; + else + numExtentsPerRecord = kHFSExtentDensity; + + // + // Make sure the request and new PEOF are less than 2GB if HFS. + // + if (vcb->vcbSigWord == kHFSSigWord) { + if (bytesToAdd >= kTwoGigabytes) + goto Overflow; + if ((fcb->fcbPLen + bytesToAdd) >= kTwoGigabytes) + goto Overflow; + } + // + // Determine how many blocks need to be allocated. + // Round up the number of desired bytes to add. + // + blocksToAdd = FileBytesToBlocks(bytesToAdd, volumeBlockSize); + bytesToAdd = (SInt64)((SInt64)blocksToAdd * (SInt64)volumeBlockSize); + + // + // If the file's clump size is larger than the allocation block size, + // then set the maximum number of bytes to the requested number of bytes + // rounded up to a multiple of the clump size. + // + if (fcb->fcbClmpSize > volumeBlockSize) { + maximumBytes = (SInt64)FileBytesToBlocks(bytesToAdd, fcb->fcbClmpSize); + maximumBytes *= fcb->fcbClmpSize; + } + else { + maximumBytes = bytesToAdd; + } + + // + // Compute new physical EOF, rounded up to a multiple of a block. + // + if ((vcb->vcbSigWord == kHFSSigWord) && ((fcb->fcbPLen + bytesToAdd) >= (SInt64) kTwoGigabytes)) // Too big? + if (allOrNothing) // Yes, must they have it all? + goto Overflow; // Yes, can't have it + else { + --blocksToAdd; // No, give give 'em one block less + bytesToAdd -= volumeBlockSize; + } + + // + // If allocation is all-or-nothing, make sure there are + // enough free blocks on the volume (quick test). + // + if (allOrNothing && (blocksToAdd > (SInt64)vcb->freeBlocks)) { + err = dskFulErr; + goto ErrorExit; + } + + // + // See if there are already enough blocks allocated to the file. + // + peof = fcb->fcbPLen + bytesToAdd; // potential new PEOF + err = SearchExtentFile(vcb, fcb, peof-1, &foundKey, foundData, &foundIndex, &hint, &nextBlock); + if (err == noErr) { + // Enough blocks are already allocated. Just update the FCB to reflect the new length. + fcb->fcbPLen = peof; + H_EXTENDSIZE(fcb, bytesToAdd); + fcb->fcbFlags |= fcbModifiedMask; + goto Exit; + } + if (err != fxRangeErr) // Any real error? + goto ErrorExit; // Yes, so exit immediately + + // + // Adjust the PEOF to the end of the last extent. + // + peof = (SInt64)((SInt64)nextBlock * (SInt64)volumeBlockSize); // currently allocated PEOF + bytesThisExtent = peof - fcb->fcbPLen; + if (bytesThisExtent != 0) { + fcb->fcbPLen = peof; + H_EXTENDSIZE(fcb, bytesThisExtent); + fcb->fcbFlags |= fcbModifiedMask; + bytesToAdd -= bytesThisExtent; + } + + // + // Allocate some more space. + // + // First try a contiguous allocation (of the whole amount). + // If that fails, get whatever we can. + // If forceContig, then take whatever we got + // else, keep getting bits and pieces (non-contig) + err = noErr; + wantContig = true; + do { + startBlock = foundData[foundIndex].startBlock + foundData[foundIndex].blockCount; + err = BlockAllocate(vcb, startBlock, bytesToAdd, maximumBytes, wantContig, &actualStartBlock, &actualNumBlocks); + if (err == dskFulErr) { + if (forceContig) + break; // AllocContig failed because not enough contiguous space + if (wantContig) { + // Couldn't get one big chunk, so get whatever we can. + err = noErr; + wantContig = false; + continue; + } + if (actualNumBlocks != 0) + err = noErr; + } + if (err == noErr) { +#if HFSInstrumentation + { + struct { + UInt32 fileID; + UInt32 start; + UInt32 count; + UInt32 fabn; + } x; + + x.fileID = H_FILEID(fcb); + x.start = actualStartBlock; + x.count = actualNumBlocks; + x.fabn = nextBlock; + + InstLogTraceEventWithDataStructure( trace, eventTag, kInstMiddleEvent, traceDescriptor, + (UInt8 *) &x, sizeof(x)); + } +#endif + // Add the new extent to the existing extent record, or create a new one. + if (actualStartBlock == startBlock) { + // We grew the file's last extent, so just adjust the number of blocks. + foundData[foundIndex].blockCount += actualNumBlocks; + err = UpdateExtentRecord(vcb, fcb, &foundKey, foundData, hint); + if (err != noErr) break; + } + else { + UInt16 i; + + // Need to add a new extent. See if there is room in the current record. + if (foundData[foundIndex].blockCount != 0) // Is current extent free to use? + ++foundIndex; // No, so use the next one. + if (foundIndex == numExtentsPerRecord) { + // This record is full. Need to create a new one. + if (H_FILEID(fcb) == kHFSExtentsFileID) { + err = fxOvFlErr; // Oops. Can't extend extents file (?? really ??) + break; + } + + foundKey.keyLength = kHFSPlusExtentKeyMaximumLength; + if (fcb->fcbFlags & fcbResourceMask) + foundKey.forkType = kResourceForkType; + else + foundKey.forkType = kDataForkType; + foundKey.pad = 0; + foundKey.fileID = H_FILEID(fcb); + foundKey.startBlock = nextBlock; + + foundData[0].startBlock = actualStartBlock; + foundData[0].blockCount = actualNumBlocks; + + // zero out remaining extents... + for (i = 1; i < kHFSPlusExtentDensity; ++i) + { + foundData[i].startBlock = 0; + foundData[i].blockCount = 0; + } + + foundIndex = 0; + + err = CreateExtentRecord(vcb, &foundKey, foundData, &hint); + if (err == fxOvFlErr) { + // We couldn't create an extent record because extents B-tree + // couldn't grow. Dellocate the extent just allocated and + // return a disk full error. + (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks); + err = dskFulErr; + } + if (err != noErr) break; + + needsFlush = true; // We need to update the B-tree header + } + else { + // Add a new extent into this record and update. + foundData[foundIndex].startBlock = actualStartBlock; + foundData[foundIndex].blockCount = actualNumBlocks; + err = UpdateExtentRecord(vcb, fcb, &foundKey, foundData, hint); + if (err != noErr) break; + } + } + + // Figure out how many bytes were actually allocated. + // NOTE: BlockAllocate could have allocated more than we asked for. + // Don't set the PEOF beyond what our client asked for. + nextBlock += actualNumBlocks; + bytesThisExtent = (SInt64)((SInt64)actualNumBlocks * (SInt64)volumeBlockSize); + if (bytesThisExtent > bytesToAdd) { + bytesToAdd = 0; + } + else { + bytesToAdd -= bytesThisExtent; + maximumBytes -= bytesThisExtent; + } + fcb->fcbPLen += bytesThisExtent; + H_EXTENDSIZE(fcb, bytesThisExtent); + fcb->fcbFlags |= fcbModifiedMask; + + // If contiguous allocation was requested, then we've already got one contiguous + // chunk. If we didn't get all we wanted, then adjust the error to disk full. + if (forceContig) { + if (bytesToAdd != 0) + err = dskFulErr; + break; // We've already got everything that's contiguous + } + } + } while (err == noErr && bytesToAdd); + +ErrorExit: +Exit: + *actualBytesAdded = fcb->fcbPLen - previousPEOF; + + if (needsFlush) + (void) FlushExtentFile(vcb); + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; + +Overflow: + err = fileBoundsErr; + goto ErrorExit; +} + + + +//_________________________________________________________________________________ +// +// Routine: TruncateFileC +// +// Function: Truncates the disk space allocated to a file. The file space is +// truncated to a specified new PEOF rounded up to the next allocation +// block boundry. If the 'TFTrunExt' option is specified, the file is +// truncated to the end of the extent containing the new PEOF. +// +// Input: A2.L - VCB pointer +// A1.L - pointer to FCB array +// D1.W - file refnum +// D2.B - option flags +// TFTrunExt - truncate to the extent containing new PEOF +// D3.L - new PEOF +// +// Output: D0.W - result code +// 0 = ok +// -n = IO error +// +// Note: TruncateFile updates the PEOF in the FCB. +//_________________________________________________________________________________ + +OSErr TruncateFileC ( + ExtendedVCB *vcb, // volume that file resides on + FCB *fcb, // FCB of file to truncate + SInt64 peof, // new physical size for file + Boolean truncateToExtent) // if true, truncate to end of extent containing newPEOF +{ + OSErr err; + UInt32 nextBlock; // next file allocation block to consider + UInt32 startBlock; // Physical (volume) allocation block number of start of a range + UInt32 physNumBlocks; // Number of allocation blocks in file (according to PEOF) + UInt32 numBlocks; + HFSPlusExtentKey key; // key for current extent record; key->keyLength == 0 if FCB's extent record + UInt32 hint; // BTree hint corresponding to key + HFSPlusExtentRecord extentRecord; + UInt32 extentIndex; + UInt32 extentNextBlock; + UInt32 numExtentsPerRecord; + SInt64 temp64; + UInt8 forkType; + Boolean extentChanged; // true if we actually changed an extent + Boolean recordDeleted; // true if an extent record got deleted + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + InstDataDescriptorRef traceDescriptor; + FSVarsRec *fsVars = (FSVarsRec *) LMGetFSMVars(); + + traceDescriptor = (InstDataDescriptorRef) fsVars->later[2]; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:Extents:TruncateFileC", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = InstCreateEventTag(); + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + recordDeleted = false; + + if (vcb->vcbSigWord == kHFSPlusSigWord) + numExtentsPerRecord = kHFSPlusExtentDensity; + else + numExtentsPerRecord = kHFSExtentDensity; + + if (fcb->fcbFlags & fcbResourceMask) + forkType = kResourceForkType; + else + forkType = kDataForkType; + + temp64 = fcb->fcbPLen / (SInt64)vcb->blockSize; // number of allocation blocks currently in file + physNumBlocks = (UInt32)temp64; + + // + // Round newPEOF up to a multiple of the allocation block size. If new size is + // two gigabytes or more, then round down by one allocation block (??? really? + // shouldn't that be an error?). + // + nextBlock = FileBytesToBlocks(peof, vcb->blockSize); // number of allocation blocks to remain in file + peof = (SInt64)((SInt64)nextBlock * (SInt64)vcb->blockSize); // number of bytes in those blocks + if ((vcb->vcbSigWord == kHFSSigWord) && (peof >= (UInt32) kTwoGigabytes)) { + #if DEBUG_BUILD + DebugStr("\pHFS: Trying to truncate a file to 2GB or more"); + #endif + err = fileBoundsErr; + goto ErrorExit; + } + + // + // Update FCB's length + // + H_TRUNCSIZE(fcb, fcb->fcbPLen - peof); + fcb->fcbPLen = peof; + fcb->fcbFlags |= fcbModifiedMask; + + // + // If the new PEOF is 0, then truncateToExtent has no meaning (we should always deallocate + // all storage). + // + if (peof == 0) { + int i; + + // Deallocate all the extents for this fork + err = DeallocateFork(vcb, H_FILEID(fcb), forkType, fcb->fcbExtents, &recordDeleted); + if (err != noErr) goto ErrorExit; // got some error, so return it + + // Update the catalog extent record (making sure it's zeroed out) + if (err == noErr) { + for (i=0; i < kHFSPlusExtentDensity; i++) { + fcb->fcbExtents[i].startBlock = 0; + fcb->fcbExtents[i].blockCount = 0; + } + } + goto Done; + } + + // + // Find the extent containing byte (peof-1). This is the last extent we'll keep. + // (If truncateToExtent is true, we'll keep the whole extent; otherwise, we'll only + // keep up through peof). The search will tell us how many allocation blocks exist + // in the found extent plus all previous extents. + // + err = SearchExtentFile(vcb, fcb, peof-1, &key, extentRecord, &extentIndex, &hint, &extentNextBlock); + if (err != noErr) goto ErrorExit; + + extentChanged = false; // haven't changed the extent yet + + if (!truncateToExtent) { + // + // Shorten this extent. It may be the case that the entire extent gets + // freed here. + // + numBlocks = extentNextBlock - nextBlock; // How many blocks in this extent to free up + if (numBlocks != 0) { + // Compute first volume allocation block to free + startBlock = extentRecord[extentIndex].startBlock + extentRecord[extentIndex].blockCount - numBlocks; + // Free the blocks in bitmap + err = BlockDeallocate(vcb, startBlock, numBlocks); + if (err != noErr) goto ErrorExit; + // Adjust length of this extent + extentRecord[extentIndex].blockCount -= numBlocks; + // If extent is empty, set start block to 0 + if (extentRecord[extentIndex].blockCount == 0) + extentRecord[extentIndex].startBlock = 0; + // Remember that we changed the extent record + extentChanged = true; + } + } + + // + // Now move to the next extent in the record, and set up the file allocation block number + // + nextBlock = extentNextBlock; // Next file allocation block to free + ++extentIndex; // Its index within the extent record + + // + // Release all following extents in this extent record. Update the record. + // + while (extentIndex < numExtentsPerRecord && extentRecord[extentIndex].blockCount != 0) { + numBlocks = extentRecord[extentIndex].blockCount; + // Deallocate this extent + err = BlockDeallocate(vcb, extentRecord[extentIndex].startBlock, numBlocks); + if (err != noErr) goto ErrorExit; + // Update next file allocation block number + nextBlock += numBlocks; + // Zero out start and length of this extent to delete it from record + extentRecord[extentIndex].startBlock = 0; + extentRecord[extentIndex].blockCount = 0; + // Remember that we changed an extent + extentChanged = true; + // Move to next extent in record + ++extentIndex; + } + + // + // If any of the extents in the current record were changed, then update that + // record (in the FCB, or extents file). + // + if (extentChanged) { + err = UpdateExtentRecord(vcb, fcb, &key, extentRecord, hint); + if (err != noErr) goto ErrorExit; + } + + // + // If there are any following allocation blocks, then we need + // to seach for their extent records and delete those allocation + // blocks. + // + if (nextBlock < physNumBlocks) + err = TruncateExtents(vcb, forkType, H_FILEID(fcb), nextBlock, &recordDeleted); + +Done: +ErrorExit: + + if (recordDeleted) + (void) FlushExtentFile(vcb); + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; +} + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: SearchExtentRecord (was XRSearch) +// +// Function: Searches extent record for the extent mapping a given file +// allocation block number (FABN). +// +// Input: searchFABN - desired FABN +// extentData - pointer to extent data record (xdr) +// extentDataStartFABN - beginning FABN for extent record +// +// Output: foundExtentDataOffset - offset to extent entry within xdr +// result = noErr, offset to extent mapping desired FABN +// result = FXRangeErr, offset to last extent in record +// endingFABNPlusOne - ending FABN +1 +// noMoreExtents - True if the extent was not found, and the +// extent record was not full (so don't bother +// looking in subsequent records); false otherwise. +// +// Result: noErr = ok +// FXRangeErr = desired FABN > last mapped FABN in record +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +static OSErr SearchExtentRecord( + const ExtendedVCB *vcb, + UInt32 searchFABN, + const HFSPlusExtentRecord extentData, + UInt32 extentDataStartFABN, + UInt32 *foundExtentIndex, + UInt32 *endingFABNPlusOne, + Boolean *noMoreExtents) +{ + OSErr err = noErr; + UInt32 extentIndex; + UInt32 numberOfExtents; + UInt32 numAllocationBlocks; + Boolean foundExtent; + + *endingFABNPlusOne = extentDataStartFABN; + *noMoreExtents = false; + foundExtent = false; + + if (vcb->vcbSigWord == kHFSPlusSigWord) + numberOfExtents = kHFSPlusExtentDensity; + else + numberOfExtents = kHFSExtentDensity; + + for( extentIndex = 0; extentIndex < numberOfExtents; ++extentIndex ) + { + + // Loop over the extent record and find the search FABN. + + numAllocationBlocks = extentData[extentIndex].blockCount; + if ( numAllocationBlocks == 0 ) + { + break; + } + + *endingFABNPlusOne += numAllocationBlocks; + + if( searchFABN < *endingFABNPlusOne ) + { + // Found the extent. + foundExtent = true; + break; + } + } + + if( foundExtent ) + { + // Found the extent. Note the extent offset + *foundExtentIndex = extentIndex; + } + else + { + // Did not find the extent. Set foundExtentDataOffset accordingly + if( extentIndex > 0 ) + { + *foundExtentIndex = extentIndex - 1; + } + else + { + *foundExtentIndex = 0; + } + + // If we found an empty extent, then set noMoreExtents. + if (extentIndex < numberOfExtents) + *noMoreExtents = true; + + // Finally, return an error to the caller + err = fxRangeErr; + } + + return( err ); +} + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: SearchExtentFile (was XFSearch) +// +// Function: Searches extent file (including the FCB resident extent record) +// for the extent mapping a given file position. +// +// Input: vcb - VCB pointer +// fcb - FCB pointer +// filePosition - file position (byte address) +// +// Output: foundExtentKey - extent key record (xkr) +// If extent was found in the FCB's resident extent record, +// then foundExtentKey->keyLength will be set to 0. +// foundExtentData - extent data record(xdr) +// foundExtentIndex - index to extent entry in xdr +// result = 0, offset to extent mapping desired FABN +// result = FXRangeErr, offset to last extent in record +// (i.e., kNumExtentsPerRecord-1) +// extentBTreeHint - BTree hint for extent record +// kNoHint = Resident extent record +// endingFABNPlusOne - ending FABN +1 +// +// Result: +// noErr Found an extent that contains the given file position +// FXRangeErr Given position is beyond the last allocated extent +// (other) (some other internal I/O error) +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +static OSErr SearchExtentFile( + const ExtendedVCB *vcb, + const FCB *fcb, + SInt64 filePosition, + HFSPlusExtentKey *foundExtentKey, + HFSPlusExtentRecord foundExtentData, + UInt32 *foundExtentIndex, + UInt32 *extentBTreeHint, + UInt32 *endingFABNPlusOne ) +{ + OSErr err; + UInt32 filePositionBlock; + SInt64 temp64; + Boolean noMoreExtents; + + temp64 = filePosition / (SInt64)vcb->blockSize; + filePositionBlock = (UInt32)temp64; + + bcopy ( fcb->fcbExtents, foundExtentData, sizeof(HFSPlusExtentRecord)); + + // Search the resident FCB first. + err = SearchExtentRecord( vcb, filePositionBlock, foundExtentData, 0, + foundExtentIndex, endingFABNPlusOne, &noMoreExtents ); + + if( err == noErr ) { + // Found the extent. Set results accordingly + *extentBTreeHint = kNoHint; // no hint, because not in the BTree + foundExtentKey->keyLength = 0; // 0 = the FCB itself + + goto Exit; + } + + // Didn't find extent in FCB. If FCB's extent record wasn't full, there's no point + // in searching the extents file. Note that SearchExtentRecord left us pointing at + // the last valid extent (or the first one, if none were valid). This means we need + // to fill in the hint and key outputs, just like the "if" statement above. + if ( noMoreExtents ) { + *extentBTreeHint = kNoHint; // no hint, because not in the BTree + foundExtentKey->keyLength = 0; // 0 = the FCB itself + err = fxRangeErr; // There are no more extents, so must be beyond PEOF + goto Exit; + } + + // + // Find the desired record, or the previous record if it is the same fork + // + err = FindExtentRecord(vcb, (fcb->fcbFlags & fcbResourceMask) ? kResourceForkType : kDataForkType, + H_FILEID(fcb), filePositionBlock, true, foundExtentKey, foundExtentData, extentBTreeHint); + + if (err == btNotFound) { + // + // If we get here, the desired position is beyond the extents in the FCB, and there are no extents + // in the extents file. Return the FCB's extents and a range error. + // + *extentBTreeHint = kNoHint; + foundExtentKey->keyLength = 0; + err = GetFCBExtentRecord(fcb, foundExtentData); + // Note: foundExtentIndex and endingFABNPlusOne have already been set as a result of the very + // first SearchExtentRecord call in this function (when searching in the FCB's extents, and + // we got a range error). + + return fxRangeErr; + } + + // + // If we get here, there was either a BTree error, or we found an appropriate record. + // If we found a record, then search it for the correct index into the extents. + // + if (err == noErr) { + // Find appropriate index into extent record + err = SearchExtentRecord(vcb, filePositionBlock, foundExtentData, foundExtentKey->startBlock, + foundExtentIndex, endingFABNPlusOne, &noMoreExtents); + } + +Exit: + return err; +} + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: UpdateExtentRecord +// +// Function: Write new extent data to an existing extent record with a given key. +// If all of the extents are empty, and the extent record is in the +// extents file, then the record is deleted. +// +// Input: vcb - the volume containing the extents +// fcb - the file that owns the extents +// extentFileKey - pointer to extent key record (xkr) +// If the key length is 0, then the extents are actually part +// of the catalog record, stored in the FCB. +// extentData - pointer to extent data record (xdr) +// extentBTreeHint - hint for given key, or kNoHint +// +// Result: noErr = ok +// (other) = error from BTree +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +static OSErr UpdateExtentRecord ( + const ExtendedVCB *vcb, + FCB *fcb, + const HFSPlusExtentKey *extentFileKey, + const HFSPlusExtentRecord extentData, + UInt32 extentBTreeHint) +{ + BTreeIterator btIterator; + FSBufferDescriptor btRecord; + UInt16 btRecordSize; + FCB * btFCB; + OSErr err = noErr; + + if (extentFileKey->keyLength == 0) { // keyLength == 0 means the FCB's extent record + BlockMoveData(extentData, fcb->fcbExtents, sizeof(HFSPlusExtentRecord)); + fcb->fcbFlags |= fcbModifiedMask; + } + else { + // + // Need to find and change a record in Extents BTree + // + btFCB = GetFileControlBlock(vcb->extentsRefNum); + + if (vcb->vcbSigWord == kHFSSigWord) { + HFSExtentKey * key; // Actual extent key used on disk in HFS + HFSExtentRecord foundData; // The extent data actually found + + key = (HFSExtentKey*) &btIterator.key; + key->keyLength = kHFSExtentKeyMaximumLength; + key->forkType = extentFileKey->forkType; + key->fileID = extentFileKey->fileID; + key->startBlock = extentFileKey->startBlock; + + btIterator.hint.index = 0; + btIterator.hint.nodeNum = extentBTreeHint; + + btRecord.bufferAddress = &foundData; + btRecord.itemSize = sizeof(HFSExtentRecord); + btRecord.itemCount = 1; + + err = BTSearchRecord(btFCB, &btIterator, kInvalidMRUCacheKey, &btRecord, + &btRecordSize, &btIterator); + + if (err == noErr) + err = HFSPlusToHFSExtents(extentData, (HFSExtentDescriptor *)&foundData); + + if (err == noErr) + err = BTReplaceRecord(btFCB, &btIterator, &btRecord, btRecordSize); + } + else { // HFS Plus volume + HFSPlusExtentRecord foundData; // The extent data actually found + + BlockMoveData(extentFileKey, &btIterator.key, sizeof(HFSPlusExtentKey)); + + btIterator.hint.index = 0; + btIterator.hint.nodeNum = extentBTreeHint; + + btRecord.bufferAddress = &foundData; + btRecord.itemSize = sizeof(HFSPlusExtentRecord); + btRecord.itemCount = 1; + + err = BTSearchRecord(btFCB, &btIterator, kInvalidMRUCacheKey, &btRecord, + &btRecordSize, &btIterator); + + if (err == noErr) { + BlockMoveData(extentData, &foundData, sizeof(HFSPlusExtentRecord)); + err = BTReplaceRecord(btFCB, &btIterator, &btRecord, btRecordSize); + } + } + } + + return err; +} + + + +void HFSToHFSPlusExtents( + const HFSExtentRecord oldExtents, + HFSPlusExtentRecord newExtents) +{ + UInt32 i; + + // copy the first 3 extents + newExtents[0].startBlock = oldExtents[0].startBlock; + newExtents[0].blockCount = oldExtents[0].blockCount; + newExtents[1].startBlock = oldExtents[1].startBlock; + newExtents[1].blockCount = oldExtents[1].blockCount; + newExtents[2].startBlock = oldExtents[2].startBlock; + newExtents[2].blockCount = oldExtents[2].blockCount; + + // zero out the remaining ones + for (i = 3; i < kHFSPlusExtentDensity; ++i) + { + newExtents[i].startBlock = 0; + newExtents[i].blockCount = 0; + } +} + + + +OSErr HFSPlusToHFSExtents( + const HFSPlusExtentRecord oldExtents, + HFSExtentRecord newExtents) +{ + OSErr err; + + err = noErr; + + // copy the first 3 extents + newExtents[0].startBlock = oldExtents[0].startBlock; + newExtents[0].blockCount = oldExtents[0].blockCount; + newExtents[1].startBlock = oldExtents[1].startBlock; + newExtents[1].blockCount = oldExtents[1].blockCount; + newExtents[2].startBlock = oldExtents[2].startBlock; + newExtents[2].blockCount = oldExtents[2].blockCount; + + #if DEBUG_BUILD + if (oldExtents[3].startBlock || oldExtents[3].blockCount) { + DebugStr("\pExtentRecord with > 3 extents is invalid for HFS"); + err = fsDSIntErr; + } + #endif + + return err; +} + + + + +OSErr GetFCBExtentRecord( + const FCB *fcb, + HFSPlusExtentRecord extents) +{ + + BlockMoveData(fcb->fcbExtents, extents, sizeof(HFSPlusExtentRecord)); + + return noErr; +} + + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: MapFileBlockFromFCB +// +// Function: Determine if the given file offset is within the set of extents +// stored in the FCB. If so, return the file allocation +// block number of the start of the extent, volume allocation block number +// of the start of the extent, and file allocation block number immediately +// following the extent. +// +// Input: vcb - the volume containing the extents +// fcb - the file that owns the extents +// offset - desired offset in bytes +// +// Output: firstFABN - file alloc block number of start of extent +// firstBlock - volume alloc block number of start of extent +// nextFABN - file alloc block number of next extent +// +// Result: noErr = ok +// fxRangeErr = beyond FCB's extents +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +static OSErr MapFileBlockFromFCB( + const ExtendedVCB *vcb, + const FCB *fcb, + SInt64 offset, // Desired offset in bytes from start of file + UInt32 *firstFABN, // FABN of first block of found extent + UInt32 *firstBlock, // Corresponding allocation block number + UInt32 *nextFABN) // FABN of block after end of extent +{ + UInt32 index; + UInt32 offsetBlocks; + SInt64 temp64; + + temp64 = offset / (SInt64)vcb->blockSize; + offsetBlocks = (UInt32)temp64; + + if (vcb->vcbSigWord == kHFSSigWord) { + /* XXX SER Do we need to test for overflow values ??? */ + UInt16 blockCount; + UInt16 currentFABN; + + currentFABN = 0; + + for (index=0; indexfcbExtents[index].blockCount; + + if (blockCount == 0) + return fxRangeErr; // ran out of extents! + + // Is it in this extent? + if (offsetBlocks < blockCount) { + *firstFABN = currentFABN; + *firstBlock = fcb->fcbExtents[index].startBlock; + currentFABN += blockCount; // faster to add these as UInt16 first, then extend to UInt32 + *nextFABN = currentFABN; + return noErr; // found the right extent + } + + // Not in current extent, so adjust counters and loop again + offsetBlocks -= blockCount; + currentFABN += blockCount; + } + } + else { + UInt32 blockCount; + UInt32 currentFABN; + + currentFABN = 0; + + for (index=0; indexfcbExtents[index].blockCount; + + if (blockCount == 0) + return fxRangeErr; // ran out of extents! + + // Is it in this extent? + if (offsetBlocks < blockCount) { + *firstFABN = currentFABN; + *firstBlock = fcb->fcbExtents[index].startBlock; + *nextFABN = currentFABN + blockCount; + return noErr; // found the right extent + } + + // Not in current extent, so adjust counters and loop again + offsetBlocks -= blockCount; + currentFABN += blockCount; + } + } + + // If we fall through here, the extent record was full, but the offset was + // beyond those extents. + + return fxRangeErr; +} + + +//_________________________________________________________________________________ +// +// Routine: ExtentsAreIntegral +// +// Purpose: Ensure that each extent can hold an integral number of nodes +// Called by the NodesAreContiguous function +//_________________________________________________________________________________ + +static Boolean ExtentsAreIntegral( + const HFSPlusExtentRecord extentRecord, + UInt32 mask, + UInt32 *blocksChecked, + Boolean *checkedLastExtent) +{ + UInt32 blocks; + UInt32 extentIndex; + + *blocksChecked = 0; + *checkedLastExtent = false; + + for(extentIndex = 0; extentIndex < kHFSPlusExtentDensity; extentIndex++) + { + blocks = extentRecord[extentIndex].blockCount; + + if ( blocks == 0 ) + { + *checkedLastExtent = true; + break; + } + + *blocksChecked += blocks; + + if (blocks & mask) + return false; + } + + return true; +} + + +//_________________________________________________________________________________ +// +// Routine: NodesAreContiguous +// +// Purpose: Ensure that all b-tree nodes are contiguous on disk +// Called by BTOpenPath during volume mount +//_________________________________________________________________________________ + +Boolean NodesAreContiguous( + ExtendedVCB *vcb, + FCB *fcb, + UInt32 nodeSize) +{ + UInt32 mask; + UInt32 startBlock; + UInt32 blocksChecked; + UInt32 hint; + HFSPlusExtentKey key; + HFSPlusExtentRecord extents; + OSErr result; + Boolean lastExtentReached; + + + if (vcb->blockSize >= nodeSize) + return TRUE; + + mask = (nodeSize / vcb->blockSize) - 1; + + // check the local extents + (void) GetFCBExtentRecord(fcb, extents); + if ( !ExtentsAreIntegral(extents, mask, &blocksChecked, &lastExtentReached) ) + return FALSE; + + if (lastExtentReached || (SInt64)((SInt64)blocksChecked * (SInt64)vcb->blockSize) >= fcb->fcbPLen) + return TRUE; + + startBlock = blocksChecked; + + // check the overflow extents (if any) + while ( !lastExtentReached ) + { + result = FindExtentRecord(vcb, kDataForkType, H_FILEID(fcb), startBlock, FALSE, &key, extents, &hint); + if (result) break; + + if ( !ExtentsAreIntegral(extents, mask, &blocksChecked, &lastExtentReached) ) + return FALSE; + + startBlock += blocksChecked; + } + + return TRUE; +} + diff --git a/bsd/hfs/hfscommon/Misc/GenericMRUCache.c b/bsd/hfs/hfscommon/Misc/GenericMRUCache.c new file mode 100644 index 000000000..1ba7db973 --- /dev/null +++ b/bsd/hfs/hfscommon/Misc/GenericMRUCache.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: GenericMRUCache.c + + Contains: Contains cache accessor routines based on MRU / LRU ordering. + + Version: HFS+ 1.0 + + Copyright: © 1997-1998 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Deric Horn + + Other Contact: Don Brady + + Technology: HFS+ + + Writers: + + (DSH) Deric Horn + + Change History (most recent first): + + 1/29/98 DSH Add TrashMRUCache for TrashAllFSCaches API support. + 7/25/97 DSH first checked in +*/ + +#include "../../hfs_macos_defs.h" +#include "../headers/FileMgrInternal.h" + +enum { + // error codes + errNotInCache = -123, + errInvalidKey = -124 +}; + + +struct CacheBlock { + struct CacheBlock *nextMRU; // next node in MRU order + struct CacheBlock *nextLRU; // next node in LRU order + UInt32 flags; // status flags + UInt32 key; // comparrison Key + char buffer[1]; // user defineable data +}; +typedef struct CacheBlock CacheBlock; + +struct CacheGlobals { + UInt32 cacheBlockSize; // Size of CacheBlock structure including the buffer + UInt32 cacheBufferSize; // Size of cache buffer + UInt32 numCacheBlocks; // Number of blocks in cache + CacheBlock *mru; + CacheBlock *lru; +}; +typedef struct CacheGlobals CacheGlobals; + + +// +// Internal routines +// +static void InsertAsMRU ( CacheGlobals *cacheGlobals, CacheBlock *cacheBlock ); +static void InsertAsLRU ( CacheGlobals *cacheGlobals, CacheBlock *cacheBlock ); + + +// +// Diagram of Cache structures +// +// _______ ________ ________ ________ +// |data | | buff | | buff | | buff | +// | mru |-----> | nMRU |-----> | nMRU |--> °°° --->| nMRU |-->€ +// | lru |-\ €<-| nLRU | <-----| nLRU |<-- °°° <---| nLRU | +// ------- \ -------- -------- -------- +// \ | +// \-----------------------------------------/ +// CacheGlobals CacheBlock's + + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: InitMRUCache +// +// Function: Allocates cache, and initializes all the cache structures. +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +OSErr InitMRUCache( UInt32 bufferSize, UInt32 numCacheBlocks, Ptr *cachePtr ) +{ + OSErr err; + short i, lastBuffer; + CacheBlock *cacheBlock; + CacheGlobals *cacheGlobals; + UInt32 cacheBlockSize = offsetof( CacheBlock, buffer ) + bufferSize; + + cacheGlobals = (CacheGlobals *) NewPtrSysClear( sizeof( CacheGlobals ) + ( numCacheBlocks * cacheBlockSize ) ); + err = MemError(); + + if ( err == noErr ) + { + cacheGlobals->cacheBlockSize = cacheBlockSize; + cacheGlobals->cacheBufferSize = bufferSize; + cacheGlobals->numCacheBlocks = numCacheBlocks; + + lastBuffer = numCacheBlocks - 1; // last buffer number, since they start at 0 + + // Initialize the LRU order for the cache + cacheGlobals->lru = (CacheBlock *)((Ptr)cacheGlobals + sizeof( CacheGlobals ) + (lastBuffer * cacheBlockSize)); + cacheGlobals->lru->nextMRU = nil; + + // Initialize the MRU order for the cache + cacheGlobals->mru = (CacheBlock *)( (Ptr)cacheGlobals + sizeof( CacheGlobals ) ); // points to 1st cache block + cacheGlobals->mru->nextLRU = nil; + + // Traverse nodes, setting initial mru, lru, and default values + for ( i=0, cacheBlock=cacheGlobals->mru; ikey = kInvalidMRUCacheKey; // initialize key to illegal while we're at it + cacheBlock->flags = 0; + cacheBlock->nextMRU = (CacheBlock *) ( (Ptr)cacheBlock + cacheBlockSize ); + cacheBlock = cacheBlock->nextMRU; + } + // And the last Block + cacheGlobals->lru->key = kInvalidMRUCacheKey; + cacheBlock->flags = 0; + + for ( i=0, cacheBlock=cacheGlobals->lru; inextLRU = (CacheBlock *) ( (Ptr)cacheBlock - cacheBlockSize ); + cacheBlock = cacheBlock->nextLRU; + } + + *cachePtr = (Ptr) cacheGlobals; // return cacheGlobals to user + } + else + { + *cachePtr = nil; + } + + return( err ); +} + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: DisposeMRUCache +// +// Function: Dispose of all memory allocated by the cache +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +OSErr DisposeMRUCache( Ptr cachePtr ) +{ + OSErr err; + + DisposePtr( cachePtr ); + err = MemError(); + + return( err ); +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: TrashMRUCache +// +// Function: Invalidates all entries in the MRU cache pointed to by cachePtr. +// +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +void TrashMRUCache( Ptr cachePtr ) +{ + CacheGlobals *cacheGlobals = (CacheGlobals *) cachePtr; + CacheBlock *cacheBlock; + + for ( cacheBlock = cacheGlobals->mru ; cacheBlock != nil ; cacheBlock = cacheBlock->nextMRU ) + { + cacheBlock->flags = 0; // Clear the flags + cacheBlock->key = kInvalidMRUCacheKey; // Make it an illegal value + } +} + + +//ÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑÑ +// Routine: GetMRUCacheBlock +// +// Function: Return buffer associated with the passed in key. +// Search the cache in MRU order +// € We can insert the found cache block at the head of mru automatically +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +OSErr GetMRUCacheBlock( UInt32 key, Ptr cachePtr, Ptr *buffer ) +{ + CacheBlock *cacheBlock; + CacheGlobals *cacheGlobals = (CacheGlobals *) cachePtr; + +// if ( key == kInvalidMRUCacheKey ) // removed for performance +// return( errInvalidKey ); + + for ( cacheBlock = cacheGlobals->mru ; (cacheBlock != nil) && (cacheBlock->key != kInvalidMRUCacheKey) ; cacheBlock = cacheBlock->nextMRU ) + { + if ( cacheBlock->key == key ) + { + InsertAsMRU( cacheGlobals, cacheBlock ); + *buffer = (Ptr) cacheBlock->buffer; + return( noErr ); + } + } + + return( errNotInCache ); +} + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: InvalidateMRUCacheBlock +// +// Function: Place the cache block at the head of the lru queue and mark it invalid +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +void InvalidateMRUCacheBlock( Ptr cachePtr, Ptr buffer ) +{ + CacheGlobals *cacheGlobals = (CacheGlobals *) cachePtr; + CacheBlock *cacheBlock; + + cacheBlock = (CacheBlock *) (buffer - offsetof( CacheBlock, buffer )); + cacheBlock->flags = 0; // Clear the flags + cacheBlock->key = kInvalidMRUCacheKey; // Make it an illegal value + InsertAsLRU( cacheGlobals, cacheBlock ); +} + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: InsertMRUCacheBlock +// +// Function: Place the CacheBlock associated with the passed in key at the +// head of the mru queue and replace the buffer with the passed in buffer +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +void InsertMRUCacheBlock( Ptr cachePtr, UInt32 key, Ptr buffer ) +{ + CacheBlock *cacheBlock = NULL; + Ptr cacheBuffer; + OSErr err; + CacheGlobals *cacheGlobals = (CacheGlobals *) cachePtr; + UInt32 cacheBufferSize; + + err = GetMRUCacheBlock( key, cachePtr, &cacheBuffer ); + if ( err == errNotInCache ) + cacheBlock = cacheGlobals->lru; + else if ( err == noErr ) + cacheBlock = (CacheBlock *) (cacheBuffer - offsetof( CacheBlock, buffer )); + + cacheBufferSize = cacheGlobals->cacheBufferSize; + if ( cacheBufferSize == sizeof(UInt32) ) + *(UInt32*)cacheBlock->buffer = *(UInt32*)buffer; + else + BlockMoveData( buffer, cacheBlock->buffer, cacheBufferSize ); + InsertAsMRU( cacheGlobals, cacheBlock ); + + cacheBlock->flags = 0; + cacheBlock->key = key; +} + + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: InsertMRUCacheBlock +// +// Function: Moves cache block to head of mru order in double linked list of cached blocks +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +static void InsertAsMRU ( CacheGlobals *cacheGlobals, CacheBlock *cacheBlock ) +{ + CacheBlock *swapBlock; + + if ( cacheGlobals->mru != cacheBlock ) // if it's not already the mru cacheBlock + { + swapBlock = cacheGlobals->mru; // put it in the front of the double queue + cacheGlobals->mru = cacheBlock; + cacheBlock->nextLRU->nextMRU = cacheBlock->nextMRU; + if ( cacheBlock->nextMRU != nil ) + cacheBlock->nextMRU->nextLRU = cacheBlock->nextLRU; + else + cacheGlobals->lru= cacheBlock->nextLRU; + cacheBlock->nextMRU = swapBlock; + cacheBlock->nextLRU = nil; + swapBlock->nextLRU = cacheBlock; + } +} + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: InsertMRUCacheBlock +// +// Function: Moves cache block to head of lru order in double linked list of cached blocks +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +static void InsertAsLRU ( CacheGlobals *cacheGlobals, CacheBlock *cacheBlock ) +{ + CacheBlock *swapBlock; + + if ( cacheGlobals->lru != cacheBlock ) + { + swapBlock = cacheGlobals->lru; + cacheGlobals->lru = cacheBlock; + cacheBlock->nextMRU->nextLRU = cacheBlock->nextLRU; + if ( cacheBlock->nextLRU != nil ) + cacheBlock->nextLRU->nextMRU = cacheBlock->nextMRU; + else + cacheGlobals->mru= cacheBlock->nextMRU; + cacheBlock->nextLRU = swapBlock; + cacheBlock->nextMRU = nil; + swapBlock->nextMRU = cacheBlock; + } +} + + diff --git a/bsd/hfs/hfscommon/Misc/VolumeAllocation.c b/bsd/hfs/hfscommon/Misc/VolumeAllocation.c new file mode 100644 index 000000000..7a6417dd7 --- /dev/null +++ b/bsd/hfs/hfscommon/Misc/VolumeAllocation.c @@ -0,0 +1,1758 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: VolumeAllocation.c + + Contains: Routines for accessing and modifying the volume bitmap. + + Version: HFS Plus 1.0 + + Copyright: © 1996-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Mark Day + + Other Contact: Greg Parks + + Technology: HFS+ + + Writers: + + (djb) Don Brady + (DSH) Deric Horn + (msd) Mark Day + + Change History (most recent first): + 1/22/2000 djb Removed unused BlockCheck and BlockVerifyAllocated routines. + 4/27/98 djb Remove references to unused/legacy vcbFreeBks. + 4/27/98 djb Remove unneccessary DebugStr in BlockVerifyAllocated. + 4/17/98 djb Add VCB locking. + 4/13/98 djb Add RequireFileLock checking to ReadBitmapBlock. + 3/31/98 djb Sync up with final HFSVolumes.h header file. + + <12> 1 0/31/97 DSH Modify BlockVerifyAllocated() so DFA can call without actually + writing to the disk. + 10/20/97 msd The way BlockAllocate rounds up to a multiple of the clump size + is wrong. ExtendFileC should do the round-up and pass the result + into BlockAllocate. Removed the fcb parameter to BlockAllocate; + added a bytesMaximum parameter. + 10/17/97 msd Conditionalize DebugStrs. + 9/4/97 djb Add logging to BlockAllocate. + 9/4/97 msd Add a histogram of allocation sizes. Use DEBUG_BUILD instead of + VSM_DEBUG to generate DebugStr messages. + 8/14/97 msd Bug 1662332. Don't mark blocks dirty in UpdateFreeCount. In + BlockVerifyAllocated, only mark blocks dirty if they've actually + changed. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 7/8/97 DSH Loading PrecompiledHeaders from define passed in on C line + 6/12/97 msd Export BlockAllocateAny and UpdateVCBFreeBlks. + <3> 5/8/97 DSH Added comments and ascii diagram of new BlockFindContiguous() + algorithm. + <2> 5/7/97 DSH New faster BlockFindContiguous algorithm. It searches backwards + until a dirty bit is found instead of forwards. + 4/25/97 djb first checked in + + 4/14/97 msd Fix UpdateVCBFreeBlks so free space calculation doesn't overflow + on volumes bigger than 4GB. + 4/4/97 djb Get in sync with volume format changes. + 1/27/97 msd Speed up BlockCheck and UpdateFreeCount. Removed DebugStr from + BlockCheck; there are now DebugStr's in BlockVerifyAllocated, + before the bitmap gets fixed (so potential problems can be + debugged easier). Adjusted comments about internal routines. + Changed names of "Fast" routines back to their original names + (since the originals are now removed). + 1/24/97 msd Speed up allocation and deallocation. + 1/21/97 msd Add instrumentation for function entry/exit. BlockAllocate and + ReadBitMapBlock use the event tag to log bytes requested and + block number (respectively). + 1/15/97 djb Add HFS+ supprt to BlockCheck (for MountCheck). + 1/13/97 DSH Use vcb->nextAllocation instead of vcbAllocPtr. + 1/9/97 djb UpdateVCBFreeBlks is not converting correctly. + 1/6/97 djb Use vcb's allocationsRefNum to access allocation file. + 1/2/97 DSH Added UpdateVCBFreeBlks() to update vcbFreeBks whenever we + update vcb->freeblocks. + 12/19/96 DSH All refs to VCB are now refs to ExtendedVCB + 12/12/96 msd DivideAndRoundUp should not be declared as static. + 12/10/96 msd Check PRAGMA_LOAD_SUPPORTED before loading precompiled headers. + 12/4/96 DSH PrecompiledHeaders + 11/27/96 djb Added AllocateFreeSpace for HFS wrapper support. + 11/27/96 msd Changed ReadBitmapBlock to read from HFS+ allocation file. + Temporarily uses the vcbVBMSt field of the VCB as the allocation + file's refnum until extended VCB changes are checked in. + 11/26/96 msd VSM and FileExtentMapping routines use FCB instead of FCBRec. + 11/20/96 DSH Changed a parameter in GetBlock_glue, so I also changed the + caller. + 11/20/96 msd Include FilesInternal.h. Remove definition of MarkVCBDirty + (since it is now in FilesInternal.h). + 11/12/96 msd Need to bound allocations to be within the last allocation block + of the volume (function AllocateAt). + 11/11/96 msd first checked in +*/ + +/* +Public routines: + BlockAllocate + Allocate space on a volume. Can allocate space contiguously. + If not contiguous, then allocation may be less than what was + asked for. Returns the starting block number, and number of + blocks. (Will only do a single extent???) + BlockDeallocate + Deallocate a contiguous run of allocation blocks. + UpdateFreeCount + Computes the number of free allocation blocks on a volume. + The vcb's free block count is updated. + + AllocateFreeSpace + Allocates all the remaining free space (used for embedding HFS+ volumes). + + BlockAllocateAny + Find and allocate a contiguous range of blocks up to a given size. The + first range of contiguous free blocks found are allocated, even if there + are fewer blocks than requested (and even if a contiguous range of blocks + of the given size exists elsewhere). + + UpdateVCBFreeBlks + Given an ExtenddVCB, calculate the vcbFreeBks value + so that vcbFreeBks*vcbAlBlkSiz == freeBlocks*blockSize. + +Internal routines: + BlockMarkFree + Mark a contiguous range of blocks as free. The corresponding + bits in the volume bitmap will be cleared. + BlockMarkAllocated + Mark a contiguous range of blocks as allocated. The cor- + responding bits in the volume bitmap are set. Also tests to see + if any of the blocks were previously unallocated. + FindContiguous + Find a contiguous range of blocks of a given size. The caller + specifies where to begin the search (by block number). The + block number of the first block in the range is returned. + BlockAllocateContig + Find and allocate a contiguous range of blocks of a given size. If + a contiguous range of free blocks of the given size isn't found, then + the allocation fails (i.e. it is "all or nothing"). + ReadBitmapBlock + Given an allocation block number, read the bitmap block that + contains that allocation block into a caller-supplied buffer. +*/ + +#include "../../hfs_macos_defs.h" + +#include +#include +#include + +#include "../../hfs.h" +#include "../../hfs_dbg.h" +#include "../../hfs_format.h" +#include "../../hfs_endian.h" + +#include "../headers/FileMgrInternal.h" + +#include "../headers/HFSInstrumentation.h" + +#define EXPLICIT_BUFFER_RELEASES 1 + +enum { + kBitsPerByte = 8, + kBitsPerWord = 32, + kWordsPerBlock = 128, + kBytesPerBlock = 512, + kBitsPerBlock = 4096, + + kBitsWithinWordMask = kBitsPerWord-1, + kBitsWithinBlockMask = kBitsPerBlock-1, + kWordsWithinBlockMask = kWordsPerBlock-1, + + kExtentsPerRecord = 3 +}; + +#define kLowBitInWordMask 0x00000001ul +#define kHighBitInWordMask 0x80000000ul +#define kAllBitsSetInWord 0xFFFFFFFFul + + +static OSErr ReadBitmapBlock( + ExtendedVCB *vcb, + UInt32 block, + UInt32 **buffer); + +static OSErr BlockAllocateContig( + ExtendedVCB *vcb, + UInt32 startingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks); + +static OSErr BlockFindContiguous( + ExtendedVCB *vcb, + UInt32 startingBlock, + UInt32 endingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks); + +static OSErr BlockMarkAllocated( + ExtendedVCB *vcb, + UInt32 startingBlock, + UInt32 numBlocks); + +static OSErr BlockMarkFree( + ExtendedVCB *vcb, + UInt32 startingBlock, + UInt32 numBlocks); + + +/* +;________________________________________________________________________________ +; +; Routine: BlkAlloc +; +; Function: Allocate space on a volume. If contiguous allocation is requested, +; at least the requested number of bytes will be allocated or an +; error will be returned. If contiguous allocation is not forced, +; the space will be allocated at the first free fragment following +; the requested starting allocation block. If there is not enough +; room there, a block of less than the requested size will be +; allocated. +; +; If the requested starting block is 0 (for new file allocations), +; the volume's allocation block pointer will be used as a starting +; point. +; +; All requests will be rounded up to the next highest clump size, as +; indicated in the file's FCB. +; +; Input Arguments: +; vcb - Pointer to ExtendedVCB for the volume to allocate space on +; fcb - Pointer to FCB for the file for which storage is being allocated +; startingBlock - Preferred starting allocation block, 0 = no preference +; forceContiguous - Force contiguous flag - if bit 0 set (NE), allocation is contiguous +; or an error is returned +; bytesRequested - Number of bytes requested. If the allocation is non-contiguous, +; less than this may actually be allocated +; bytesMaximum - The maximum number of bytes to allocate. If there is additional free +; space after bytesRequested, then up to bytesMaximum bytes should really +; be allocated. (Used by ExtendFileC to round up allocations to a multiple +; of the file's clump size.) +; +; Output: +; (result) - Error code, zero for successful allocation +; *startBlock - Actual starting allocation block +; *actualBlocks - Actual number of allocation blocks allocated +; +; Side effects: +; The volume bitmap is read and updated; the volume bitmap cache may be changed. +; +; Modification history: +; <06Oct85> PWD Changed to check for errors after calls to ReadBM and NextWord +; Relocated call to MarkBlock in allocation loop +; Changed to call NextBit +; <21Oct85> PWD Changed to check VCBFreeBks before attempting to allocate any block. +; Speed up scan for free space by checking for all 1's. +;________________________________________________________________________________ +*/ + +OSErr BlockAllocate ( + ExtendedVCB *vcb, /* which volume to allocate space on */ + UInt32 startingBlock, /* preferred starting block, or 0 for no preference */ + SInt64 bytesRequested, /* desired number of BYTES to allocate */ + SInt64 bytesMaximum, /* maximum number of bytes to allocate */ + Boolean forceContiguous, /* non-zero to force contiguous allocation and to force */ + /* bytesRequested bytes to actually be allocated */ + UInt32 *actualStartBlock, /* actual first block of allocation */ + UInt32 *actualNumBlocks) /* number of blocks actually allocated; if forceContiguous */ + /* was zero, then this may represent fewer than bytesRequested */ + /* bytes */ +{ + OSErr err; + UInt32 minBlocks; // minimum number of allocation blocks requested + UInt32 maxBlocks; // number of allocation blocks requested, rounded to clump size + Boolean updateAllocPtr = false; // true if nextAllocation needs to be updated + + LogStartTime(kTraceBlockAllocate); + +#if HFSInstrumentation + InstSplitHistogramClassRef histogram; + InstTraceClassRef trace; + InstEventTag eventTag; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:VSM:BlockAllocate", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + err = InstCreateSplitHistogramClass(kInstRootClassRef, "HFS:VSM:BlockAllocate size", 0, 512, 16384, 262144, 16384, + kInstEnableClassMask, &histogram); + if (err != noErr) DebugStr("\pError from InstCreateHistogramClass"); + + eventTag = bytesRequested; // a cheap way to get bytesRequested into the log + InstLogTraceEvent( trace, eventTag, kInstStartEvent); + InstUpdateHistogram( histogram, bytesRequested, 1); +#endif + + // + // Initialize outputs in case we get an error + // + *actualStartBlock = 0; + *actualNumBlocks = 0; + + // + // Compute the number of allocation blocks requested, and maximum + // + minBlocks = FileBytesToBlocks(bytesRequested, vcb->blockSize); + maxBlocks = FileBytesToBlocks(bytesMaximum, vcb->blockSize); + + // + // If the disk is already full, don't bother. + // + if (vcb->freeBlocks == 0) { + err = dskFulErr; + goto Exit; + } + if (forceContiguous && vcb->freeBlocks < minBlocks) { + err = dskFulErr; + goto Exit; + } + + // + // If caller didn't specify a starting block number, then use the volume's + // next block to allocate from. + // + if (startingBlock == 0) { + VCB_LOCK(vcb); + startingBlock = vcb->nextAllocation; + VCB_UNLOCK(vcb); + updateAllocPtr = true; + } + + // + // If the request must be contiguous, then find a sequence of free blocks + // that is long enough. Otherwise, find the first free block. + // + if (forceContiguous) { + err = BlockAllocateContig(vcb, startingBlock, minBlocks, maxBlocks, actualStartBlock, actualNumBlocks); + } else { + err = BlockAllocateAny(vcb, startingBlock, vcb->totalBlocks, maxBlocks, actualStartBlock, actualNumBlocks); + if (err == dskFulErr) { + err = BlockAllocateAny(vcb, 0, startingBlock, maxBlocks, actualStartBlock, actualNumBlocks); + }; + } + + if (err == noErr) { + // + // If we used the volume's roving allocation pointer, then we need to update it. + // Adding in the length of the current allocation might reduce the next allocate + // call by avoiding a re-scan of the already allocated space. However, the clump + // just allocated can quite conceivably end up being truncated or released when + // the file is closed or its EOF changed. Leaving the allocation pointer at the + // start of the last allocation will avoid unnecessary fragmentation in this case. + // + VCB_LOCK(vcb); + + if (updateAllocPtr) + vcb->nextAllocation = *actualStartBlock; + + // + // Update the number of free blocks on the volume + // + vcb->freeBlocks -= *actualNumBlocks; + VCB_UNLOCK(vcb); + + UpdateVCBFreeBlks( vcb ); + MarkVCBDirty(vcb); + } + +Exit: + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + LogEndTime(kTraceBlockAllocate, err); + + return err; +} + + +/* +;________________________________________________________________________________ +; +; Routine: UpdateVCBFreeBlks +; +; Function: Whenever the freeBlocks field in the ExtendedVCB is updated, +; we must also recalculate the (UInt16) vcbFreeBks field in the +; traditional HFS VCB structure. +; +; Input Arguments: +; vcb - Pointer to ExtendedVCB for the volume to free space on +;________________________________________________________________________________ +*/ +void UpdateVCBFreeBlks( ExtendedVCB *vcb ) +{ + #if DEBUG_BUILD + if ( vcb->vcbSigWord == kHFSSigWord && vcb->freeBlocks > 0xFFFF ) + DebugStr("\p UpdateVCBFreeBlks: freeBlocks overflow!"); + #endif +} + + +/* +;________________________________________________________________________________ +; +; Routine: BlkDealloc +; +; Function: Update the bitmap to deallocate a run of disk allocation blocks +; +; Input Arguments: +; vcb - Pointer to ExtendedVCB for the volume to free space on +; firstBlock - First allocation block to be freed +; numBlocks - Number of allocation blocks to free up (must be > 0!) +; +; Output: +; (result) - Result code +; +; Side effects: +; The volume bitmap is read and updated; the volume bitmap cache may be changed. +; +; Modification history: +; +; <06Oct85> PWD Changed to check for error after calls to ReadBM and NextWord +; Now calls NextBit to read successive bits from the bitmap +;________________________________________________________________________________ +*/ + +OSErr BlockDeallocate ( + ExtendedVCB *vcb, // Which volume to deallocate space on + UInt32 firstBlock, // First block in range to deallocate + UInt32 numBlocks) // Number of contiguous blocks to deallocate +{ + OSErr err; + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:VSM:BlockDeallocate", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = InstCreateEventTag(); + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + // + // If no blocks to deallocate, then exit early + // + if (numBlocks == 0) { + err = noErr; + goto Exit; + } + + // + // Call internal routine to free the sequence of blocks + // + err = BlockMarkFree(vcb, firstBlock, numBlocks); + if (err) + goto Exit; + + // + // Update the volume's free block count, and mark the VCB as dirty. + // + VCB_LOCK(vcb); + vcb->freeBlocks += numBlocks; + VCB_UNLOCK(vcb); + UpdateVCBFreeBlks( vcb ); + MarkVCBDirty(vcb); + +Exit: + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; +} + + +/* +;_______________________________________________________________________ +; +; Routine: UpdateFree +; Arguments: vcb -- ExtendedVCB for volume +; +; Called By: MountVol +; Function: This routine is used as part of the MountVol consistency check +; to figure out the number of free allocation blocks in the volume. +; +; Modification History: +; <08Sep85> LAK New today. +; <06Oct85> PWD Added explicit check for errors after calls to ReadBM, NextWord +; Now calls NextBit. +;_______________________________________________________________________ +*/ + +OSErr UpdateFreeCount ( + ExtendedVCB *vcb) // Volume whose free block count should be updated +{ + OSErr err; + register UInt32 wordsLeft; // Number of words left in this bitmap block + register UInt32 numBlocks; // Number of blocks left to scan + register UInt32 freeCount; // Running count of free blocks found so far + register UInt32 temp; + UInt32 blockNum; // Block number of first block in this bitmap block + UInt32 *buffer = NULL; // Pointer to bitmap block + register UInt32 *currentWord; // Pointer to current word in bitmap block + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:VSM:UpdateFreeCount", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = InstCreateEventTag(); + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + // + // Pre-read the first bitmap block + // + + err = ReadBitmapBlock(vcb, 0, &buffer); + if (err != noErr) goto Exit; + + // + // Initialize buffer stuff + // + currentWord = buffer; + wordsLeft = kWordsPerBlock; + numBlocks = vcb->totalBlocks; + freeCount = 0; + blockNum = 0; + + // + // Scan whole words first + // + + while (numBlocks >= kBitsPerWord) { + // See if it's time to move to the next bitmap block + if (wordsLeft == 0) { + // Read in the next bitmap block + blockNum += kBitsPerBlock; // generate a block number in the next bitmap block + +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock(vcb, blockNum, &buffer); + if (err != noErr) goto Exit; + + // Readjust currentWord, wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } + + // We count free blocks by inverting the word in the bitmap and counting set bits. + temp = ~(*currentWord); + while (temp) { + ++freeCount; + temp &= temp-1; // this clears least significant bit that is currently set + } + + numBlocks -= kBitsPerWord; + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Check any remaining blocks. + // + + if (numBlocks != 0) { + if (wordsLeft == 0) { + // Read in the next bitmap block + blockNum += kBitsPerBlock; // generate a block number in the next bitmap block + +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock(vcb, blockNum, &buffer); + if (err != noErr) goto Exit; + + // Readjust currentWord, wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } + + // We count free blocks by inverting the word in the bitmap and counting set bits. + temp = SWAP_BE32 (~(*currentWord)); + while (numBlocks != 0) { + if (temp & kHighBitInWordMask) + ++freeCount; + temp <<= 1; + --numBlocks; + } + } + + VCB_LOCK(vcb); + vcb->freeBlocks = freeCount; + VCB_UNLOCK(vcb); + UpdateVCBFreeBlks( vcb ); + +Exit: + +#if EXPLICIT_BUFFER_RELEASES + if (buffer) { + (void)RelBlock_glue((Ptr)buffer, rbDefault); /* Ignore any additional errors */ + }; +#endif + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; +} + + + +/* +;_______________________________________________________________________ +; +; Routine: AllocateFreeSpace +; Arguments: vcb -- ExtendedVCB for volume +; +; Called By: HFSDiskInitComponent +; Function: This routine is used as part of DiskInit to create an +; embedded HFS+ volume. +; +; Note: Assumes that the free space is contiguous (true for a freshly erased disk) +;_______________________________________________________________________ +*/ + +OSErr AllocateFreeSpace ( + ExtendedVCB *vcb, // Volume whose free space is about to be expropriated + UInt32 *startBlock, // return where free space starts + UInt32 *actualBlocks) // return the number of blocks in free space +{ + OSErr err; + + err = BlockAllocateAny(vcb, 0, vcb->totalBlocks, vcb->freeBlocks, startBlock, actualBlocks); + + if (err == noErr) { + VCB_LOCK(vcb); + vcb->freeBlocks = 0; // sorry, no more blocks left! + VCB_UNLOCK(vcb); + MarkVCBDirty(vcb); + } + + return err; +} + +/* +;_______________________________________________________________________ +; +; Routine: FileBytesToBlocks +; +; Function: Divide numerator by denominator, rounding up the result if there +; was a remainder. This is frequently used for computing the number +; of whole and/or partial blocks used by some count of bytes. +; Actuall divides a 64 bit by a 32 bit into a 32bit result +; +; CAREFULL!!! THIS CAN CAUSE OVERFLOW....USER BEWARE!!! +;_______________________________________________________________________ +*/ +UInt32 FileBytesToBlocks( + SInt64 numerator, + UInt32 denominator) +{ + UInt32 quotient; + + quotient = (UInt32)(numerator / denominator); + if (quotient * denominator != numerator) + quotient++; + + return quotient; +} + + + +/* +;_______________________________________________________________________ +; +; Routine: ReadBitmapBlock +; +; Function: Read in a bitmap block corresponding to a given allocation +; block. Return a pointer to the bitmap block. +; +; Inputs: +; vcb -- Pointer to ExtendedVCB +; block -- Allocation block whose bitmap block is desired +; +; Outputs: +; buffer -- Pointer to bitmap block corresonding to "block" +;_______________________________________________________________________ +*/ +static OSErr ReadBitmapBlock( + ExtendedVCB *vcb, + UInt32 block, + UInt32 **buffer) +{ + OSErr err; + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:VSM:ReadBitmapBlock", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = block; // a cheap way to get the block number into the log + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + err = noErr; + + REQUIRE_FILE_LOCK(vcb->extentsRefNum, false); /* bitmap blocks are covered by the Extents B-tree lock */ + + if (vcb->vcbSigWord == kHFSSigWord) { + // + // HFS: Turn block number into physical block offset within the + // bitmap, and then the physical block within the volume. + // + block /= kBitsPerBlock; // block offset within bitmap + block += vcb->vcbVBMSt; // block within whole volume + } + else { + FCB *allocFile; + daddr_t startBlock; + size_t availableBytes; + + // + // HFS+: Read from allocation file. We simply convert the block number into a byte + // offset within the allocation file and then determine which block that byte is in. + // + allocFile = GetFileControlBlock(vcb->allocationsRefNum); + + // + // Find out which physical block holds byte #offset in allocation file. Note that we + // map only 1 byte (the one we asked for). + // + err = MapFileBlockC(vcb, allocFile, (size_t)1, (off_t)(block/kBitsPerByte), &startBlock, &availableBytes); + block = startBlock; + } + + if (err == noErr) { + err = GetBlock_glue( +#if EXPLICIT_BUFFER_RELEASES + 0, // No options +#else + gbReleaseMask, // Release block immediately. We only work on one + // block at a time. Call MarkBlock later if dirty. +#endif + block, // Physical block on volume + (Ptr *) buffer, // A place to return the buffer pointer + kNoFileReference, // Not a file read + vcb); // Volume to read from + } + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; +} + + + +/* +_______________________________________________________________________ + +Routine: BlockAllocateContig + +Function: Allocate a contiguous group of allocation blocks. The + allocation is all-or-nothing. The caller guarantees that + there are enough free blocks (though they may not be + contiguous, in which case this call will fail). + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block for allocation + minBlocks Minimum number of contiguous blocks to allocate + maxBlocks Maximum number of contiguous blocks to allocate + +Outputs: + actualStartBlock First block of range allocated, or 0 if error + actualNumBlocks Number of blocks allocated, or 0 if error +_______________________________________________________________________ +*/ +static OSErr BlockAllocateContig( + ExtendedVCB *vcb, + UInt32 startingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks) +{ + OSErr err; + + // + // Find a contiguous group of blocks at least minBlocks long. + // Determine the number of contiguous blocks available (up + // to maxBlocks). + // + err = BlockFindContiguous(vcb, startingBlock, vcb->totalBlocks, minBlocks, maxBlocks, + actualStartBlock, actualNumBlocks); + if (err == dskFulErr) { + //€€ Should constrain the endingBlock here, so we don't bother looking for ranges + //€€ that start after startingBlock, since we already checked those. + err = BlockFindContiguous(vcb, 0, vcb->totalBlocks, minBlocks, maxBlocks, + actualStartBlock, actualNumBlocks); + } + if (err != noErr) goto Exit; + + // + // Now mark those blocks allocated. + // + err = BlockMarkAllocated(vcb, *actualStartBlock, *actualNumBlocks); + +Exit: + if (err != noErr) { + *actualStartBlock = 0; + *actualNumBlocks = 0; + } + + return err; +} + +extern OSErr LookupBufferMapping(caddr_t bufferAddress, struct buf **bpp, int *mappingIndexPtr); + +/* +_______________________________________________________________________ + +Routine: BlockAllocateAny + +Function: Allocate one or more allocation blocks. If there are fewer + free blocks than requested, all free blocks will be + allocated. The caller guarantees that there is at least + one free block. + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block for allocation + endingBlock Last block to check + 1 + maxBlocks Maximum number of contiguous blocks to allocate + +Outputs: + actualStartBlock First block of range allocated, or 0 if error + actualNumBlocks Number of blocks allocated, or 0 if error +_______________________________________________________________________ +*/ +OSErr BlockAllocateAny( + ExtendedVCB *vcb, + UInt32 startingBlock, + register UInt32 endingBlock, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks) +{ + OSErr err; + register UInt32 block; // current block number + register UInt32 currentWord; // Pointer to current word within bitmap block + register UInt32 bitMask; // Word with given bits already set (ready to OR in) + register UInt32 wordsLeft; // Number of words left in this bitmap block + UInt32 *buffer = NULL; + UInt32 *currCache = NULL; + +#if HFS_DIAGNOSTIC + struct buf *bp = NULL; + int mappingEntry; +#endif + + // Since this routine doesn't wrap around + if (maxBlocks > (endingBlock - startingBlock)) { + maxBlocks = endingBlock - startingBlock; + } + + DBG_TREE (("\nAllocating starting at %ld, maxblocks %ld\n", startingBlock, maxBlocks)); + // + // Pre-read the first bitmap block + // + err = ReadBitmapBlock(vcb, startingBlock, &currCache); + DBG_TREE (("\n1. Read bit map at %ld, buffer is 0x%x\n", startingBlock, (int)currCache)); + if (err != noErr) goto Exit; + buffer = currCache; + MarkBlock_glue((Ptr) currCache); // this block will be dirty + DBG_ASSERT(! LookupBufferMapping((caddr_t)currCache, &bp, &mappingEntry)); + + // + // Set up the current position within the block + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (startingBlock & kBitsWithinBlockMask) / kBitsPerWord; + buffer += wordIndexInBlock; + wordsLeft = kWordsPerBlock - wordIndexInBlock; + currentWord = SWAP_BE32 (*buffer); + bitMask = kHighBitInWordMask >> (startingBlock & kBitsWithinWordMask); + } + + // + // Find the first unallocated block + // + block=startingBlock; + while (block < endingBlock) { + if ((currentWord & bitMask) == 0) + break; + + // Next bit + ++block; + bitMask >>= 1; + if (bitMask == 0) { + // Next word + bitMask = kHighBitInWordMask; + ++buffer; + + if (--wordsLeft == 0) { + // Next block +#if EXPLICIT_BUFFER_RELEASES + DBG_ASSERT(! LookupBufferMapping((caddr_t)currCache, &bp, &mappingEntry)); + err = RelBlock_glue((Ptr)currCache, rbDefault); + if (err != noErr) goto Exit; + buffer = currCache = NULL; +#endif + err = ReadBitmapBlock(vcb, block, &currCache); + if (err != noErr) goto Exit; + buffer = currCache; + DBG_TREE (("\n2. Read bit map at %ld, buffer is 0x%x\n", block, (int)currCache)); + DBG_ASSERT(! LookupBufferMapping((caddr_t)currCache, &bp, &mappingEntry)); + MarkBlock_glue((Ptr) currCache); // this block will be dirty + + wordsLeft = kWordsPerBlock; + } + + currentWord = SWAP_BE32 (*buffer); + } + } + + // Did we get to the end of the bitmap before finding a free block? + // If so, then couldn't allocate anything. + if (block == endingBlock) { + err = dskFulErr; + goto Exit; + } + + // Return the first block in the allocated range + *actualStartBlock = block; + + // If we could get the desired number of blocks before hitting endingBlock, + // then adjust endingBlock so we won't keep looking. Ideally, the comparison + // would be (block + maxBlocks) < endingBlock, but that could overflow. The + // comparison below yields identical results, but without overflow. + if (block < (endingBlock-maxBlocks)) { + endingBlock = block + maxBlocks; // if we get this far, we've found enough + } + + // + // Allocate all of the consecutive blocks + // + while ((currentWord & bitMask) == 0) { + // Allocate this block + currentWord |= bitMask; + + // Move to the next block. If no more, then exit. + ++block; + if (block == endingBlock) + break; + + // Next bit + bitMask >>= 1; + if (bitMask == 0) { + *buffer = SWAP_BE32 (currentWord); // update value in bitmap + + // Next word + bitMask = kHighBitInWordMask; + ++buffer; + + if (--wordsLeft == 0) { + // Next block +#if EXPLICIT_BUFFER_RELEASES + DBG_ASSERT(! LookupBufferMapping((caddr_t)currCache, &bp, &mappingEntry)); + err = RelBlock_glue((Ptr)currCache, rbDefault); + if (err != noErr) goto Exit; + buffer = currCache = NULL; +#endif + err = ReadBitmapBlock(vcb, block, &currCache); + if (err != noErr) goto Exit; + buffer = currCache; + DBG_TREE (("\n3. Read bit map at %ld, buffer is 0x%x\n", block, (int)currCache)); + DBG_ASSERT(! LookupBufferMapping((caddr_t)currCache, &bp, &mappingEntry)); + MarkBlock_glue((Ptr) currCache); // this block will be dirty + + wordsLeft = kWordsPerBlock; + } + + currentWord = SWAP_BE32 (*buffer); + } + } + *buffer = SWAP_BE32 (currentWord); // update the last change + +Exit: + if (err == noErr) { + *actualNumBlocks = block - *actualStartBlock; + } + else { + *actualStartBlock = 0; + *actualNumBlocks = 0; + } + +#if EXPLICIT_BUFFER_RELEASES + if (currCache) { + DBG_ASSERT(! LookupBufferMapping((caddr_t)currCache, &bp, &mappingEntry)); + (void)RelBlock_glue((Ptr)currCache, rbDefault); /* Ignore any additional errors */ + }; +#endif + + return err; +} + + + +/* +_______________________________________________________________________ + +Routine: BlockMarkAllocated + +Function: Mark a contiguous group of blocks as allocated (set in the + bitmap). It assumes those bits are currently marked + deallocated (clear in the bitmap). + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock First block number to mark as allocated + numBlocks Number of blocks to mark as allocated +_______________________________________________________________________ +*/ +static OSErr BlockMarkAllocated( + ExtendedVCB *vcb, + UInt32 startingBlock, + register UInt32 numBlocks) +{ + OSErr err; + register UInt32 *currentWord; // Pointer to current word within bitmap block + register UInt32 wordsLeft; // Number of words left in this bitmap block + register UInt32 bitMask; // Word with given bits already set (ready to OR in) + UInt32 firstBit; // Bit index within word of first bit to allocate + UInt32 numBits; // Number of bits in word to allocate + UInt32 *buffer = NULL; + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:VSM:BlockMarkAllocated", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = InstCreateEventTag(); + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + // + // Pre-read the bitmap block containing the first word of allocation + // + + err = ReadBitmapBlock(vcb, startingBlock, &buffer); + if (err != noErr) goto Exit; + MarkBlock_glue((Ptr) buffer); // this block will be dirty + + // + // Initialize currentWord, and wordsLeft. + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (startingBlock & kBitsWithinBlockMask) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + wordsLeft = kWordsPerBlock - wordIndexInBlock; + } + + // + // If the first block to allocate doesn't start on a word + // boundary in the bitmap, then treat that first word + // specially. + // + + firstBit = startingBlock % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; // turn off all bits before firstBit + numBits = kBitsPerWord - firstBit; // number of remaining bits in this word + if (numBits > numBlocks) { + numBits = numBlocks; // entire allocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32 (bitMask)) != 0) { + DebugStr("\pFATAL: blocks already allocated!"); + //err = fsDSIntErr; + //goto Exit; + } +#endif + *currentWord |= SWAP_BE32 (bitMask); // set the bits in the bitmap + numBlocks -= numBits; // adjust number of blocks left to allocate + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate whole words (32 blocks) at a time. + // + + bitMask = kAllBitsSetInWord; // put this in a register for 68K + while (numBlocks >= kBitsPerWord) { + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock(vcb, startingBlock, &buffer); + if (err != noErr) goto Exit; + MarkBlock_glue((Ptr) buffer); // this block will be dirty + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } +#if DEBUG_BUILD + if (*currentWord != 0) { + DebugStr("\pFATAL: blocks already allocated!"); + //err = fsDSIntErr; + //goto Exit; + } +#endif + *currentWord = SWAP_BE32 (bitMask); + numBlocks -= kBitsPerWord; + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate any remaining blocks. + // + + if (numBlocks != 0) { + bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock(vcb, startingBlock, &buffer); + if (err != noErr) goto Exit; + MarkBlock_glue((Ptr) buffer); // this block will be dirty + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32 (bitMask)) != 0) { + DebugStr("\pFATAL: blocks already allocated!"); + //err = fsDSIntErr; + //goto Exit; + } +#endif + *currentWord |= SWAP_BE32 (bitMask); // set the bits in the bitmap + + // No need to update currentWord or wordsLeft + } + +Exit: + +#if EXPLICIT_BUFFER_RELEASES + if (buffer) { + (void)RelBlock_glue((Ptr)buffer, rbDefault); /* Ignore any additional errors */ + }; +#endif + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; +} + + + +/* +_______________________________________________________________________ + +Routine: BlockMarkFree + +Function: Mark a contiguous group of blocks as free (clear in the + bitmap). It assumes those bits are currently marked + allocated (set in the bitmap). + +Inputs: + vcb Pointer to volume where space is to be freed + startingBlock First block number to mark as freed + numBlocks Number of blocks to mark as freed +_______________________________________________________________________ +*/ +static OSErr BlockMarkFree( + ExtendedVCB *vcb, + UInt32 startingBlock, + register UInt32 numBlocks) +{ + OSErr err; + register UInt32 *currentWord; // Pointer to current word within bitmap block + register UInt32 wordsLeft; // Number of words left in this bitmap block + register UInt32 bitMask; // Word with given bits already set (ready to OR in) + UInt32 firstBit; // Bit index within word of first bit to allocate + UInt32 numBits; // Number of bits in word to allocate + UInt32 *buffer = NULL; + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:VSM:BlockMarkFree", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = InstCreateEventTag(); + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + // + // Pre-read the bitmap block containing the first word of allocation + // + + err = ReadBitmapBlock(vcb, startingBlock, &buffer); + if (err != noErr) goto Exit; + MarkBlock_glue((Ptr) buffer); // this block will be dirty + + // + // Initialize currentWord, and wordsLeft. + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (startingBlock & kBitsWithinBlockMask) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + wordsLeft = kWordsPerBlock - wordIndexInBlock; + } + + // + // If the first block to free doesn't start on a word + // boundary in the bitmap, then treat that first word + // specially. + // + + firstBit = startingBlock % kBitsPerWord; + if (firstBit != 0) { + bitMask = kAllBitsSetInWord >> firstBit; // turn off all bits before firstBit + numBits = kBitsPerWord - firstBit; // number of remaining bits in this word + if (numBits > numBlocks) { + numBits = numBlocks; // entire allocation is inside this one word + bitMask &= ~(kAllBitsSetInWord >> (firstBit + numBits)); // turn off bits after last + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) { + DebugStr("\pFATAL: blocks not allocated!"); + //err = fsDSIntErr; + //goto Exit; + } +#endif + *currentWord &= SWAP_BE32 (~bitMask); // clear the bits in the bitmap + numBlocks -= numBits; // adjust number of blocks left to free + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate whole words (32 blocks) at a time. + // + + while (numBlocks >= kBitsPerWord) { + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock(vcb, startingBlock, &buffer); + if (err != noErr) goto Exit; + MarkBlock_glue((Ptr) buffer); // this block will be dirty + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } + +#if DEBUG_BUILD + if (*currentWord != SWAP_BE32 (kAllBitsSetInWord)) { + DebugStr("\pFATAL: blocks not allocated!"); + //err = fsDSIntErr; + //goto Exit; + } +#endif + *currentWord = 0; // clear the entire word + numBlocks -= kBitsPerWord; + + ++currentWord; // move to next word + --wordsLeft; // one less word left in this block + } + + // + // Allocate any remaining blocks. + // + + if (numBlocks != 0) { + bitMask = ~(kAllBitsSetInWord >> numBlocks); // set first numBlocks bits + if (wordsLeft == 0) { + // Read in the next bitmap block + startingBlock += kBitsPerBlock; // generate a block number in the next bitmap block + +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock(vcb, startingBlock, &buffer); + if (err != noErr) goto Exit; + MarkBlock_glue((Ptr) buffer); // this block will be dirty + + // Readjust currentWord and wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } +#if DEBUG_BUILD + if ((*currentWord & SWAP_BE32 (bitMask)) != SWAP_BE32 (bitMask)) { + DebugStr("\pFATAL: blocks not allocated!"); + //err = fsDSIntErr; + //goto Exit; + } +#endif + *currentWord &= SWAP_BE32 (~bitMask); // clear the bits in the bitmap + + // No need to update currentWord or wordsLeft + } + +Exit: + +#if EXPLICIT_BUFFER_RELEASES + if (buffer) { + (void)RelBlock_glue((Ptr)buffer, rbDefault); /* Ignore any additional errors */ + }; +#endif + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; +} + + +/* +_______________________________________________________________________ + +Routine: BlockFindContiguous + +Function: Find a contiguous range of blocks that are free (bits + clear in the bitmap). If a contiguous range of the + minimum size can't be found, an error will be returned. + + €€ It would be nice if we could skip over whole words + €€ with all bits set. + + €€ When we find a bit set, and are about to set freeBlocks + €€ to 0, we should check to see whether there are still + €€ minBlocks bits left in the bitmap. + +Inputs: + vcb Pointer to volume where space is to be allocated + startingBlock Preferred first block of range + endingBlock Last possible block in range + 1 + minBlocks Minimum number of blocks needed. Must be > 0. + maxBlocks Maximum (ideal) number of blocks desired + +Outputs: + actualStartBlock First block of range found, or 0 if error + actualNumBlocks Number of blocks found, or 0 if error +_______________________________________________________________________ +*/ +/* +_________________________________________________________________________________________ + (DSH) 5/8/97 Description of BlockFindContiguous() algorithm + Finds a contiguous range of free blocks by searching back to front. This + allows us to skip ranges of bits knowing that they are not candidates for + a match because they are too small. The below ascii diagrams illustrate + the algorithm in action. + + Representation of a piece of a volume bitmap file + If BlockFindContiguous() is called with minBlocks == 10, maxBlocks == 20 + + +Fig. 1 initialization of variables, "<--" represents direction of travel + +startingBlock (passed in) + | + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + | <--| +stopBlock currentBlock freeBlocks == 0 + countedFreeBlocks == 0 + +Fig. 2 dirty bit found + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + | | +stopBlock currentBlock freeBlocks == 3 + countedFreeBlocks == 0 + +Fig. 3 reset variables to search for remainder of minBlocks + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | | + Unsearched stopBlock currentBlock freeBlocks == 0 + countedFreeBlocks == 3 + +Fig. 4 minBlocks contiguous blocks found, *actualStartBlock is set + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | + Unsearched stopBlock freeBlocks == 7 + currentBlock countedFreeBlocks == 3 + +Fig. 5 Now run it forwards trying to accumalate up to maxBlocks if possible + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | --> + Unsearched currentBlock + *actualNumBlocks == 10 + +Fig. 6 Dirty bit is found, return actual number of contiguous blocks found + + 1 0 1 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 + |_________________| | + Unsearched currentBlock + *actualNumBlocks == 16 +_________________________________________________________________________________________ +*/ + +static OSErr BlockFindContiguous( + ExtendedVCB *vcb, + UInt32 startingBlock, + register UInt32 endingBlock, + UInt32 minBlocks, + UInt32 maxBlocks, + UInt32 *actualStartBlock, + UInt32 *actualNumBlocks) +{ + OSErr err; + register UInt32 bitMask; // mask of bit within word for currentBlock + register UInt32 tempWord; // bitmap word currently being examined + register UInt32 freeBlocks; // number of contiguous free blocks so far + register UInt32 currentBlock; // block number we're currently examining + UInt32 wordsLeft; // words remaining in bitmap block + UInt32 *buffer = NULL; + register UInt32 *currentWord; + + UInt32 stopBlock; // when all blocks until stopBlock are free, we found enough + UInt32 countedFreeBlocks; // how many contiguous free block behind stopBlock + UInt32 currentSector; // which allocations file sector + +#if HFSInstrumentation + InstTraceClassRef trace; + InstEventTag eventTag; + + err = InstCreateTraceClass(kInstRootClassRef, "HFS:VSM:BlockFindContiguous", 'hfs+', kInstEnableClassMask, &trace); + if (err != noErr) DebugStr("\pError from InstCreateTraceClass"); + + eventTag = InstCreateEventTag(); + InstLogTraceEvent( trace, eventTag, kInstStartEvent); +#endif + + if ((endingBlock - startingBlock) < minBlocks) { + // The set of blocks we're checking is smaller than the minimum number + // of blocks, so we couldn't possibly find a good range. + err = dskFulErr; + goto Exit; + } + + // Search for min blocks from back to front. + // If min blocks is found, advance the allocation pointer up to max blocks + + // + // Pre-read the bitmap block containing currentBlock + // + stopBlock = startingBlock; + currentBlock = startingBlock + minBlocks - 1; // (-1) to include startingBlock + + err = ReadBitmapBlock( vcb, currentBlock, &buffer ); + if ( err != noErr ) goto Exit; + + // + // Init buffer, currentWord, wordsLeft, and bitMask + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = ( currentBlock & kBitsWithinBlockMask ) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + + wordsLeft = wordIndexInBlock; + tempWord = SWAP_BE32 (*currentWord); + bitMask = kHighBitInWordMask >> ( currentBlock & kBitsWithinWordMask ); + currentSector = currentBlock / kBitsPerBlock; + } + + // + // Look for maxBlocks free blocks. If we find an allocated block, + // see if we've found minBlocks. + // + freeBlocks = 0; + countedFreeBlocks = 0; + + while ( currentBlock >= stopBlock ) + { + // Check current bit + if ((tempWord & bitMask) == 0) + { + ++freeBlocks; + } + else // Used bitmap block found + { + if ( ( freeBlocks + countedFreeBlocks ) >= minBlocks ) + { + break; // Found enough + } + else + { + // We found a dirty bit, so we want to check if the next (minBlocks-freeBlocks) blocks + // are free beyond what we have already checked. At Fig.2 setting up for Fig.3 + + stopBlock = currentBlock + 1 + freeBlocks; // Advance stop condition + currentBlock += minBlocks; + if ( currentBlock >= endingBlock ) break; + countedFreeBlocks = freeBlocks; + freeBlocks = 0; // Not enough; look for another range + + if ( currentSector != currentBlock / kBitsPerBlock ) + { +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock( vcb, currentBlock, &buffer ); + if (err != noErr) goto Exit; + currentSector = currentBlock / kBitsPerBlock; + } + + wordsLeft = ( currentBlock & kBitsWithinBlockMask ) / kBitsPerWord; + currentWord = buffer + wordsLeft; + tempWord = SWAP_BE32 (*currentWord); + bitMask = kHighBitInWordMask >> ( currentBlock & kBitsWithinWordMask ); + + continue; // Back to the while loop + } + } + + // Move to next bit + --currentBlock; + bitMask <<= 1; + if (bitMask == 0) // On a word boundry, start masking words + { + bitMask = kLowBitInWordMask; + + // Move to next word +NextWord: + if ( wordsLeft != 0 ) + { + --currentWord; + --wordsLeft; + } + else + { + // Read in the next bitmap block +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock( vcb, currentBlock, &buffer ); + if (err != noErr) goto Exit; + + // Adjust currentWord, wordsLeft, currentSector + currentSector = currentBlock / kBitsPerBlock; + currentWord = buffer + kWordsPerBlock - 1; // Last word in buffer + wordsLeft = kWordsPerBlock - 1; + } + + tempWord = SWAP_BE32 (*currentWord); // Grab the current word + + // + // If we found a whole word of free blocks, quickly skip over it. + // NOTE: we could actually go beyond the end of the bitmap if the + // number of allocation blocks on the volume is not a multiple of + // 32. If this happens, we'll adjust currentBlock and freeBlocks + // after the loop. + // + if ( tempWord == 0 ) + { + freeBlocks += kBitsPerWord; + currentBlock -= kBitsPerWord; + if ( freeBlocks + countedFreeBlocks >= minBlocks ) + break; // Found enough + goto NextWord; + } + } + } + + if ( freeBlocks + countedFreeBlocks < minBlocks ) + { + *actualStartBlock = 0; + *actualNumBlocks = 0; + err = dskFulErr; + goto Exit; + } + + // + // When we get here, we know we've found minBlocks continuous space. + // At Fig.4, setting up for Fig.5 + // From here we do a forward search accumalating additional free blocks. + // + + *actualNumBlocks = minBlocks; + *actualStartBlock = stopBlock - countedFreeBlocks; // ActualStartBlock is set to return to the user + currentBlock = *actualStartBlock + minBlocks; // Right after found free space + + // Now lets see if we can run the actualNumBlocks number all the way up to maxBlocks + if ( currentSector != currentBlock / kBitsPerBlock ) + { +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock( vcb, currentBlock, &buffer ); + if (err != noErr) + { + err = noErr; // We already found the space + goto Exit; + } + + currentSector = currentBlock / kBitsPerBlock; + } + + // + // Init buffer, currentWord, wordsLeft, and bitMask + // + { + UInt32 wordIndexInBlock; + + wordIndexInBlock = (currentBlock & kBitsWithinBlockMask) / kBitsPerWord; + currentWord = buffer + wordIndexInBlock; + tempWord = SWAP_BE32 (*currentWord); + wordsLeft = kWordsPerBlock - wordIndexInBlock; + bitMask = kHighBitInWordMask >> (currentBlock & kBitsWithinWordMask); + } + + if ( *actualNumBlocks < maxBlocks ) + { + while ( currentBlock < endingBlock ) + { + + if ( (tempWord & bitMask) == 0 ) + { + *actualNumBlocks += 1; + + if ( *actualNumBlocks == maxBlocks ) + break; + } + else + { + break; + } + + // Move to next bit + ++currentBlock; + bitMask >>= 1; + if (bitMask == 0) + { + bitMask = kHighBitInWordMask; + ++currentWord; + + if ( --wordsLeft == 0) + { +#if EXPLICIT_BUFFER_RELEASES + err = RelBlock_glue((Ptr)buffer, rbDefault); + if (err != noErr) goto Exit; + buffer = NULL; +#endif + err = ReadBitmapBlock(vcb, currentBlock, &buffer); + if (err != noErr) break; + + // Adjust currentWord, wordsLeft + currentWord = buffer; + wordsLeft = kWordsPerBlock; + } + tempWord = SWAP_BE32 (*currentWord); // grab the current word + } + } + } + +Exit: + +#if EXPLICIT_BUFFER_RELEASES + if (buffer) { + (void)RelBlock_glue((Ptr)buffer, rbDefault); /* Ignore any additional errors */ + }; +#endif + +#if HFSInstrumentation + InstLogTraceEvent( trace, eventTag, kInstEndEvent); +#endif + + return err; +} + + diff --git a/bsd/hfs/hfscommon/Misc/VolumeRequests.c b/bsd/hfs/hfscommon/Misc/VolumeRequests.c new file mode 100644 index 000000000..96de58244 --- /dev/null +++ b/bsd/hfs/hfscommon/Misc/VolumeRequests.c @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: VolumeRequests.c + + Contains: MountVolume and related utility routines for HFS & HFS Plus + + Version: HFS Plus 1.0 + + Written by: Deric Horn + + Copyright: © 1996-1998 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Deric Horn + + Other Contacts: Mark Day, Don Brady + + Technology: File Systems + + Writers: + + (JL) Jim Luther + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + 7/28/98 djb GetDiskBlocks is now implemented in MacOSStubs.c (radar #2258148). + 4/3/98 djb Conditionally remove FSVars reference from GetVolumeNameFromCatalog. + 3/31/98 djb Sync up with final HFSVolumes.h header file. + 1/29/98 DSH TrashAllFSCaches is responsible for trashing all file system and + disk caches. Called from FlushVol when the HFS bit is set. + 12/12/97 DSH 2003877, when vcbAllocPtr was copied to nextAllocation it was + getting sign extended. + 11/26/97 DSH 2003459, fcbs was not being initialized if volume was offline + and we are executing an unconditional unmount. + 11/24/97 DSH 2005507, FlushVolumeControlBlock() keeps MDB drCrDate in sync + with VolumeHeader createDate. + 11/11/97 DSH 1685873, RemountWrappedVolumes was only remounting the first + HFS+ volume in the queue, causing HFS wrappers to be mounted if + multiple volumes had been mounted before InitHFSPlus. + 11/4/97 DSH Clear FCB when getting a new one. + 11/3/97 JL #2001483 - Removed unneeded parameters from MountVolume, + MountHFSVolume, MountHFSPlusVolume, GetVolumeInformation, + GetXVolumeInformation and AddVCB (and added local variables as + needed). Return WDCBRecPtr from UnMountVolume. Set wdcb + parameter to NULL in GetXVolumeInformation if working directory + was not specified. + 10/31/97 DSH Added consistencyStatus parameter to MountCheck + 10/23/97 msd Bug 1685113. The VolumeHeader's createDate should be in local + time (not GMT) and identical to the MDB's drCrDate (and VCB's + vcbCrDate). When checking for a remount of an offline HFS Plus + volume, compare write counts instead of mod dates (which could + be fooled by the user changing time zones). Force MountCheck to + run if the volume was last mounted by Bride 1.0b2 or earlier. + 10/17/97 msd Conditionalize DebugStrs. + 10/13/97 djb Update volumeNameEncodingHint when updating the volume name. + 10/10/97 msd Bug 1683571. The dates in the volume header are in GMT, so be + sure to convert them when mounting a volume or flushing the + volume header. + 10/2/97 DSH In UnmountVolume() check that the drive is on line before + determining if wrapper volume needs to be renamed causing IO. + 10/1/97 DSH Run on disk version of MountCheck instead of ROM version for + boot volumes1682475. + 10/1/97 djb Add calls to InvalidateCatalogCache (part of radar #1678833). + 9/26/97 DSH Removed debugging code: support for 'W' key wrapper mounting. + 9/17/97 DSH hfsPlusIOPosOffset was uninitialized for Wrapperless volumes. + 9/5/97 djb In MountVol initialize Catalog cache before calling Catalog! + 9/4/97 msd PropertyCloseVolume renamed to AttributesCloseVolume. Remove + call to AttributesOpenVolume (it no longer exists). + 9/2/97 DSH VolumeHeader is now 3rd sector in partition, altVH is 2nd to + last cor compatability. Initial support for wrapperless + volumes. + 8/26/97 djb Only call CountRootFiles during MountVol. + 8/20/97 msd If the HFS Plus volume version doesn't match, mount the wrapper + instead. + 8/19/97 djb Add error handling to RenameWrapperVolume. + 8/15/97 msd Bug 1673999. In MakeVCBsExtendedVCBs, copy old VCB's vcbAllocPtr + to new VCB's nextAllocation field. + 8/12/97 djb Fixed GetXVolInfo to only use extended vcb fields for local + volumes (radar# 1673177) + 8/11/97 DSH vcbNmAlBlks is now taken from the embededExtent.blockCount + (1669121). + 8/11/97 djb Return actual count of files in root directory for HFS Plus + volumes (Radar #1669118). Added local CountRootFiles routine. + 8/5/97 msd Make sure version field in VolumeHeader is exactly + kHFSPlusVersion. 8/1/97 djb GetXVolumeInformation now returns + extFSErr when FSID is nonzero (Radar #1649503). + 7/25/97 DSH Init and Dispose of GenericMRUCache within ExtendedVCB. + 7/16/97 DSH FilesInternal.x -> FileMgrInternal.x to avoid name collision + 7/15/97 DSH Remount Wrapper volumes mounted before HFS+ initialization + (166729) + 7/15/97 djb Remove ioXVersion checking in GetXVolInfo (radar #1666217). + 7/8/97 DSH Loading PrecompiledHeaders from define passed in on C line + 7/7/97 djb Add GetVolumeNameFromCatalog routine. + 7/7/97 DSH GetNewVRefNum now get's a recycled vRefNum. Bug 1664445 in + Installer was cacheing the vRefNum while CheckDisk unmounts and + remounts disk. + 6/30/97 DSH shadowing values obsoleteVCBXTRef, and obsoleteVCBCTRef when + HFS+ volume is mounted. + 6/26/97 DSH GetVolInfo returns HFS signature for HFS+ volumes, GetXVolInfo + returns real signature. + 6/24/97 DSH MakeVCBsExtendedVCBs was using wdcb->count as count not byte + count. + 6/18/97 djb Set/get volume encodingsBitmap. + 6/16/97 msd Include String.h and Disks.h. + 6/12/97 djb Get in sync with HFS Plus format changes. + 6/11/97 msd Make GetXVolumeInformation return true allocation block size. It + now checks the ioXVersion field. + 5/28/97 msd When flushing the volume header, write out the allocation file's + clump size (from the FCB). When mounting an HFS Plus volume, + zero the entire FCB extent record, not just the first extent, + for the various volume control files. + 5/19/97 djb Add calls to CreateVolumeCatalogCache, + DisposeVolumeCatalogCache. + 5/9/97 djb Get in sync with new FilesInternal.i + 5/8/97 DSH Only mount HFS+ volumes with version < 2.0 in the VolumeHeader. + Return wrgVolTypErr if too new. + 5/2/97 djb Disable Manual Eject code since its buggy! + 4/25/97 djb first checked in + + 4/11/97 DSH MountHFSPlusVolume gets volume name from catalog, and + UnmountVolume shadows the name back to the wrapper partition. + 4/8/97 msd Once volume is mounted, call AttributesOpenVolume to allow a + buffer to be allocated. + 4/7/97 msd In FlushVolumeControlBlock, don't update the attributes BTree + fields in the Volume Header unless an attributes BTree was + already open. + 4/7/97 msd In SetupFCB, add case for attributes BTree. Add code to set up + the attributes BTree. Remove call to PropertyOpenVolume. In + FlushVolumeControlBlock, write out any changes to the attributes + BTree. + 4/4/97 djb Get in sync with volume format changes. + 3/31/97 djb Added catalogDataCache to VCB; Remove ClearMem routine. + 3/18/97 msd In MountHFSPlusVolume, the free blocks calculation can overflow, + setting vcbFreeBks to a too-small value. + 3/17/97 DSH Added some utility functions AddVCB, GetParallelFCBFromRefNum, + casting for SC, and made some functions extern for DFA. + 3/5/97 msd Add calls to Property Manager to open and close the volume. When + unmounting an HFS+ volume, the allocation (bitmap) file now gets + closed. + 2/19/97 djb Update to 16-bit HFS Plus signature. + 2/12/97 msd In GetXVolumeInformation, the result code could be + uninitialized. + 1/23/97 DSH UpdateAlternateVoumeControlBlock() + 1/15/97 djb Remove MountCheckStub. Add file names to fcbs for debugging. + 1/13/97 DSH Use ExtendedVCB nextAllocation instead of vcbAllocPtr through + all code. + 1/9/97 djb Get in sync with new VolumeHeader and Extended VCB. + 1/6/97 djb Changed API to ParallelFCBFromRefnum (pragma parameter was + broken). + 1/6/97 msd Set only the defined bits in the MDB drAtrb field (when copying + from VCB vcbAtrb field). + 1/6/97 DSH CloseFile requires VCB to be passed in. + 1/6/97 djb FlushVolumeControlBlock was writing to absolute block 0 instead + of to block zero of the embedded volume. + 12/20/96 msd A comparison was using "=" instead of "=="; might have caused + the wrong volume to be set as the default. + 12/19/96 DSH Setting up ExtendedVCBs + 12/19/96 djb Updated for new B-tree Manager interface. + 12/18/96 msd Change GetVCBRefNum so it can actually return a VCB pointer. + 12/12/96 djb Use new SPI for GetCatalogNode. + 12/12/96 msd Fix a bunch of errors (mostly type mismatch) when compiling with + Metrowerks. + 12/12/96 DSH adding some util functions + 12/10/96 msd Check PRAGMA_LOAD_SUPPORTED before loading precompiled headers. + 12/4/96 DSH Ported GetVolumeInformation & GetXVolumeInformation. + <3*> 11/20/96 DSH HFS Plus support to MountVolume + 11/20/96 DSH Added UnmountVol and related routines, also backed out <2> + because C_FXMKeyCmp is passed as a parameter from C but called + from Asm in BTOpen so we need a Case ON Asm entry point. + 11/20/96 msd Use CompareExtentKeys() instead of CFXMKeyCmp(). + 11/19/96 DSH first checked in + <1> 11/19/96 DSH first checked in + +*/ +#include +#include +#include +#include +#include + +#include "../../hfs.h" +#include "../../hfs_endian.h" + +#include "../headers/FileMgrInternal.h" + +#define kIDSectorOffset 2 + +OSErr GetNewFCB( ExtendedVCB *vcb, FileReference* fRefPtr); + +OSErr AccessBTree( ExtendedVCB *vcb, FileReference refNum, UInt32 fileID, UInt32 fileClumpSize, void *CompareRoutine ); + +UInt16 DivUp( UInt32 byteRun, UInt32 blockSize ); + +Boolean IsARamDiskDriver( void ); + +OSErr GetVCBRefNum( ExtendedVCB **vcb, short vRefNum ); + +OSErr ValidMasterDirectoryBlock( HFSMasterDirectoryBlock *mdb ); + +void RenameWrapperVolume( Str27 newVolumeName, UInt16 driveNumber ); + +OSErr CheckExternalFileSystem( ExtendedVCB *vcb ); + +OSErr FlushVolume( ExtendedVCB *vcb ); + +FCB *SetupFCB( ExtendedVCB *vcb, FileReference refNum, UInt32 fileID, UInt32 fileClumpSize ); + +void AddVCB( ExtendedVCB *vcb, short driveNumber, short ioDRefNum ); + +short IsPressed( unsigned short k ); + +FileReference GetNewVRefNum(); + +OSErr GetVolumeNameFromCatalog(ExtendedVCB *vcb); + +#if TARGET_API_MAC_OS8 +static UInt16 CountRootFiles(ExtendedVCB *vcb); +#endif /* TARGET_API_MAC_OS8 */ + + +#if ( hasHFSManualEject ) +static void SetVCBManEject(ExtendedVCB *vcb); +#endif + +// External routines + +extern OSErr C_FlushMDB( ExtendedVCB *volume ); + +extern OSErr DisposeVolumeCacheBlocks( ExtendedVCB *vcb ); + +extern void DisposeVolumeControlBlock( ExtendedVCB *vcb ); + +extern OSErr FlushVolumeBuffers( ExtendedVCB *vcb ); + +extern void MultiplyUInt32IntoUInt64( UInt64 *wideResult, UInt32 num1, UInt32 num2 ); + +extern void TrashCatalogNodeCache( void ); + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: VolumeWritable Asm: CVFlgs +// +// Function: Check the volume's flags to see if modify requests are allowed. +// +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +OSErr VolumeWritable( ExtendedVCB *vcb ) +{ + if ( !(vcb->vcbAtrb & 0x8000) ) // if the volume is not locked + { + if ( ! (*((Ptr)&(vcb->vcbAtrb) + 1) & kHFSVolumeHardwareLockMask) ) // if it's not write protected + return( noErr ); + else + return( wPrErr ); + } + else + { + return( vLckdErr ); + } +} + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: DivUp from Asm: DivUp +// +// Function: Given a number of bytes and block size, calculate the number of +// blocks needd to hold all the bytes. +// +// Result: Number of physical blocks needed +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +UInt16 DivUp( UInt32 byteRun, UInt32 blockSize ) +{ + UInt32 blocks; + + blocks = (byteRun + blockSize - 1) / blockSize; // Divide up, remember this is integer math. + + if ( blocks > 0xffff ) // maximum 16 bit value + blocks = 0xffff; + + return( (UInt16) blocks ); +} + + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: HFSBlocksFromTotalSectors +// +// Function: Given the total number of sectors on the volume, calculate +// the 16Bit number of allocation blocks, and allocation block size. +// +// Result: none +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +void HFSBlocksFromTotalSectors( UInt32 totalSectors, UInt32 *blockSize, UInt16 *blockCount ) +{ + UInt16 newBlockSizeInSectors = 1; + UInt32 newBlockCount = totalSectors; + + while ( newBlockCount > 0XFFFF ) + { + newBlockSizeInSectors++; + newBlockCount = totalSectors / newBlockSizeInSectors; + } + + *blockSize = newBlockSizeInSectors * 512; + *blockCount = newBlockCount; +} + + + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: ValidMasterDirectoryBlock +// +// Function: Run some sanity checks to make sure the MDB is valid +// +// Result: error +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +OSErr ValidMasterDirectoryBlock( HFSMasterDirectoryBlock *mdb ) +{ + OSErr err; + + if ( (SWAP_BE16 (mdb->drSigWord) == kHFSPlusSigWord) || (SWAP_BE16 (mdb->drSigWord) == kHFSSigWord) ) // if HFS or HFS Plus volume + { + if ( (SWAP_BE32 (mdb->drAlBlkSiz) != 0) && ((SWAP_BE32 (mdb->drAlBlkSiz) & 0x01FF) == 0) ) // non zero multiple of 512 + err = noErr; + else + err = badMDBErr; + } + else + { + err = noMacDskErr; + } + + return( err ); +} + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +// Routine: ValidVolumeHeader +// +// Function: Run some sanity checks to make sure the VolumeHeader is valid +// +// Result: error +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ +OSErr ValidVolumeHeader( HFSPlusVolumeHeader *volumeHeader ) +{ + OSErr err; + + if ( SWAP_BE16 (volumeHeader->signature) == kHFSPlusSigWord && SWAP_BE16 (volumeHeader->version) == kHFSPlusVersion ) + { + if ( (SWAP_BE32 (volumeHeader->blockSize) != 0) && ((SWAP_BE32 (volumeHeader->blockSize) & 0x01FF) == 0) ) // non zero multiple of 512 + err = noErr; + else + err = badMDBErr; //€€ I want badVolumeHeaderErr in Errors.i + } + else + { + err = noMacDskErr; + } + + return( err ); +} + + + + +//_______________________________________________________________________ +// +// Routine: CountRootFiles +// +// Input: pointer to VCB +// +// Function: Return a count of the number of files and folders in +// the root directory of a volume. For HFS volumes, this +// is maintained in the VCB (and MDB). For HFS Plus volumes, +// we get the valence of the root directory from its catalog +// record. +//_______________________________________________________________________ +UInt16 CountRootFiles(ExtendedVCB *vcb) +{ + OSErr err; + CatalogNodeData catNodeData; + UInt32 hint; + UInt16 rootCount; + +// if (vcb->vcbSigWord == kHFSSigWord || vcb->vcbFSID != 0) { +// return vcb->vcbNmFls; +// } + + // Here, it's an HFS Plus volume, so get the valence from the root + // directory's catalog record. + + rootCount = 0; + + INIT_CATALOGDATA(&catNodeData, kCatNameNoCopyName); + + err = GetCatalogNode( vcb, kHFSRootFolderID, nil, kUndefinedStrLen, kNoHint, &catNodeData, &hint ); + if ( err == noErr ) { + if (catNodeData.cnd_valence < 65536) + rootCount = catNodeData.cnd_valence; + else + rootCount = 65535; // if the value is too large, pin it + } + CLEAN_CATALOGDATA(&catNodeData); + + return rootCount; +} + + + +//_______________________________________________________________________ +// +// Routine: FlushVolumeControlBlock +// Arguments: ExtendedVCB *vcb +// Output: OSErr err +// +// Function: Flush volume information to either the VolumeHeader of the Master Directory Block +//_______________________________________________________________________ + +OSErr FlushVolumeControlBlock( ExtendedVCB *vcb ) +{ + OSErr err; + + if ( ! IsVCBDirty( vcb ) ) // if it's not dirty + return( noErr ); + + if ( vcb->vcbSigWord == kHFSPlusSigWord ) + { + err = C_FlushMDB( vcb ); // Go flush the VCB info BEFORE close + } + else + { + // This routine doesn't really return an error!!! + // So for now, we will just return noErr + err = C_FlushMDB( vcb ); // Go flush the VCB info BEFORE close + return( noErr ); + } + + return( err ); +} + + +//‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹‹ + +OSErr GetVolumeNameFromCatalog( ExtendedVCB *vcb ) +{ + CatalogNodeData nodeData; + UInt32 hint; + OSErr err; + + INIT_CATALOGDATA(&nodeData, 0); + + err = GetCatalogNode( vcb, kHFSRootFolderID, NULL, kUndefinedStrLen, kNoHint, &nodeData, &hint ); + + if ( err == noErr ) + { + BlockMoveData( nodeData.cnm_nameptr, vcb->vcbVN, min(255, nodeData.cnm_length)); + vcb->volumeNameEncodingHint = nodeData.cnd_textEncoding; + + /* HFS+ uses the root directory's create date since its in GMT */ + if (vcb->vcbSigWord == kHFSPlusSigWord) + vcb->vcbCrDate = nodeData.cnd_createDate; + } + + CLEAN_CATALOGDATA(&nodeData); + + return err; +} diff --git a/bsd/hfs/hfscommon/Unicode/UCStringCompareData.h b/bsd/hfs/hfscommon/Unicode/UCStringCompareData.h new file mode 100644 index 000000000..b7b7deaf1 --- /dev/null +++ b/bsd/hfs/hfscommon/Unicode/UCStringCompareData.h @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: UCStringCompareData.h + + Contains: xxx put contents here xxx + + Version: HFS Plus 1.0 + + Copyright: © 1997-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Mark Day + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (djb) Don Brady + (msd) Mark Day + + Change History (most recent first): + + 11/16/97 djb msd. Updated lower case table with ignorable mappings and less + aggressive case folding. Added a trailing comma to make the + StreamEdit script work right. Removed Unicode decomposition + tables. Case folding tables convert u+0000 to 0xFFFF so that the + NUL character can appear in names, while still allowing a zero + value to be a sentinel. (From Andy Daniels, 11/10/97) + 8/26/97 djb Tweak gLowerCaseTable to make it faster. + 8/14/97 djb Add RelString compare table... + 4/24/97 djb first checked in + 2/27/97 msd first checked in +*/ + +/* + * For better performance, the case folding table for basic latin + * is seperate from the others. This eliminates the extra lookup + * to get the offset to this table. + * + * Note: 0x0000 now maps to 0 so that it will be ignored + */ +UInt16 gLatinCaseFold[] = { + /* 0 */ 0xFFFF, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F, + /* 1 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F, + /* 2 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F, + /* 3 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F, + /* 4 */ 0x0040, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 5 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F, + /* 6 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F, + /* 7 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F, + /* 8 */ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F, + /* 9 */ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F, + /* A */ 0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7, 0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF, + /* B */ 0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7, 0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF, + /* C */ 0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00E6, 0x00C7, 0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF, + /* D */ 0x00F0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7, 0x00F8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00FE, 0x00DF, + /* E */ 0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7, 0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF, + /* F */ 0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7, 0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF, +}; + +/* The lower case table consists of a 256-entry high-byte table followed by some number of + 256-entry subtables. The high-byte table contains either an offset to the subtable for + characters with that high byte or zero, which means that there are no case mappings or + ignored characters in that block. Ignored characters are mapped to zero. + */ + +UInt16 gLowerCaseTable[] = { + + /* High-byte indices ( == 0 iff no case mapping and no ignorables ) */ + + /* 0 */ 0x0000, 0x0100, 0x0000, 0x0200, 0x0300, 0x0400, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 1 */ 0x0500, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 2 */ 0x0600, 0x0700, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 3 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 4 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 5 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 6 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 7 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 8 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 9 */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* A */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* B */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* C */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* D */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* E */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* F */ 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0800, 0x0900, + + /* Table 1 (for high byte 0x01) */ + + /* 0 */ 0x0100, 0x0101, 0x0102, 0x0103, 0x0104, 0x0105, 0x0106, 0x0107, 0x0108, 0x0109, 0x010A, 0x010B, 0x010C, 0x010D, 0x010E, 0x010F, + /* 1 */ 0x0111, 0x0111, 0x0112, 0x0113, 0x0114, 0x0115, 0x0116, 0x0117, 0x0118, 0x0119, 0x011A, 0x011B, 0x011C, 0x011D, 0x011E, 0x011F, + /* 2 */ 0x0120, 0x0121, 0x0122, 0x0123, 0x0124, 0x0125, 0x0127, 0x0127, 0x0128, 0x0129, 0x012A, 0x012B, 0x012C, 0x012D, 0x012E, 0x012F, + /* 3 */ 0x0130, 0x0131, 0x0133, 0x0133, 0x0134, 0x0135, 0x0136, 0x0137, 0x0138, 0x0139, 0x013A, 0x013B, 0x013C, 0x013D, 0x013E, 0x0140, + /* 4 */ 0x0140, 0x0142, 0x0142, 0x0143, 0x0144, 0x0145, 0x0146, 0x0147, 0x0148, 0x0149, 0x014B, 0x014B, 0x014C, 0x014D, 0x014E, 0x014F, + /* 5 */ 0x0150, 0x0151, 0x0153, 0x0153, 0x0154, 0x0155, 0x0156, 0x0157, 0x0158, 0x0159, 0x015A, 0x015B, 0x015C, 0x015D, 0x015E, 0x015F, + /* 6 */ 0x0160, 0x0161, 0x0162, 0x0163, 0x0164, 0x0165, 0x0167, 0x0167, 0x0168, 0x0169, 0x016A, 0x016B, 0x016C, 0x016D, 0x016E, 0x016F, + /* 7 */ 0x0170, 0x0171, 0x0172, 0x0173, 0x0174, 0x0175, 0x0176, 0x0177, 0x0178, 0x0179, 0x017A, 0x017B, 0x017C, 0x017D, 0x017E, 0x017F, + /* 8 */ 0x0180, 0x0253, 0x0183, 0x0183, 0x0185, 0x0185, 0x0254, 0x0188, 0x0188, 0x0256, 0x0257, 0x018C, 0x018C, 0x018D, 0x01DD, 0x0259, + /* 9 */ 0x025B, 0x0192, 0x0192, 0x0260, 0x0263, 0x0195, 0x0269, 0x0268, 0x0199, 0x0199, 0x019A, 0x019B, 0x026F, 0x0272, 0x019E, 0x0275, + /* A */ 0x01A0, 0x01A1, 0x01A3, 0x01A3, 0x01A5, 0x01A5, 0x01A6, 0x01A8, 0x01A8, 0x0283, 0x01AA, 0x01AB, 0x01AD, 0x01AD, 0x0288, 0x01AF, + /* B */ 0x01B0, 0x028A, 0x028B, 0x01B4, 0x01B4, 0x01B6, 0x01B6, 0x0292, 0x01B9, 0x01B9, 0x01BA, 0x01BB, 0x01BD, 0x01BD, 0x01BE, 0x01BF, + /* C */ 0x01C0, 0x01C1, 0x01C2, 0x01C3, 0x01C6, 0x01C6, 0x01C6, 0x01C9, 0x01C9, 0x01C9, 0x01CC, 0x01CC, 0x01CC, 0x01CD, 0x01CE, 0x01CF, + /* D */ 0x01D0, 0x01D1, 0x01D2, 0x01D3, 0x01D4, 0x01D5, 0x01D6, 0x01D7, 0x01D8, 0x01D9, 0x01DA, 0x01DB, 0x01DC, 0x01DD, 0x01DE, 0x01DF, + /* E */ 0x01E0, 0x01E1, 0x01E2, 0x01E3, 0x01E5, 0x01E5, 0x01E6, 0x01E7, 0x01E8, 0x01E9, 0x01EA, 0x01EB, 0x01EC, 0x01ED, 0x01EE, 0x01EF, + /* F */ 0x01F0, 0x01F3, 0x01F3, 0x01F3, 0x01F4, 0x01F5, 0x01F6, 0x01F7, 0x01F8, 0x01F9, 0x01FA, 0x01FB, 0x01FC, 0x01FD, 0x01FE, 0x01FF, + + /* Table 2 (for high byte 0x03) */ + + /* 0 */ 0x0300, 0x0301, 0x0302, 0x0303, 0x0304, 0x0305, 0x0306, 0x0307, 0x0308, 0x0309, 0x030A, 0x030B, 0x030C, 0x030D, 0x030E, 0x030F, + /* 1 */ 0x0310, 0x0311, 0x0312, 0x0313, 0x0314, 0x0315, 0x0316, 0x0317, 0x0318, 0x0319, 0x031A, 0x031B, 0x031C, 0x031D, 0x031E, 0x031F, + /* 2 */ 0x0320, 0x0321, 0x0322, 0x0323, 0x0324, 0x0325, 0x0326, 0x0327, 0x0328, 0x0329, 0x032A, 0x032B, 0x032C, 0x032D, 0x032E, 0x032F, + /* 3 */ 0x0330, 0x0331, 0x0332, 0x0333, 0x0334, 0x0335, 0x0336, 0x0337, 0x0338, 0x0339, 0x033A, 0x033B, 0x033C, 0x033D, 0x033E, 0x033F, + /* 4 */ 0x0340, 0x0341, 0x0342, 0x0343, 0x0344, 0x0345, 0x0346, 0x0347, 0x0348, 0x0349, 0x034A, 0x034B, 0x034C, 0x034D, 0x034E, 0x034F, + /* 5 */ 0x0350, 0x0351, 0x0352, 0x0353, 0x0354, 0x0355, 0x0356, 0x0357, 0x0358, 0x0359, 0x035A, 0x035B, 0x035C, 0x035D, 0x035E, 0x035F, + /* 6 */ 0x0360, 0x0361, 0x0362, 0x0363, 0x0364, 0x0365, 0x0366, 0x0367, 0x0368, 0x0369, 0x036A, 0x036B, 0x036C, 0x036D, 0x036E, 0x036F, + /* 7 */ 0x0370, 0x0371, 0x0372, 0x0373, 0x0374, 0x0375, 0x0376, 0x0377, 0x0378, 0x0379, 0x037A, 0x037B, 0x037C, 0x037D, 0x037E, 0x037F, + /* 8 */ 0x0380, 0x0381, 0x0382, 0x0383, 0x0384, 0x0385, 0x0386, 0x0387, 0x0388, 0x0389, 0x038A, 0x038B, 0x038C, 0x038D, 0x038E, 0x038F, + /* 9 */ 0x0390, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* A */ 0x03C0, 0x03C1, 0x03A2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03AA, 0x03AB, 0x03AC, 0x03AD, 0x03AE, 0x03AF, + /* B */ 0x03B0, 0x03B1, 0x03B2, 0x03B3, 0x03B4, 0x03B5, 0x03B6, 0x03B7, 0x03B8, 0x03B9, 0x03BA, 0x03BB, 0x03BC, 0x03BD, 0x03BE, 0x03BF, + /* C */ 0x03C0, 0x03C1, 0x03C2, 0x03C3, 0x03C4, 0x03C5, 0x03C6, 0x03C7, 0x03C8, 0x03C9, 0x03CA, 0x03CB, 0x03CC, 0x03CD, 0x03CE, 0x03CF, + /* D */ 0x03D0, 0x03D1, 0x03D2, 0x03D3, 0x03D4, 0x03D5, 0x03D6, 0x03D7, 0x03D8, 0x03D9, 0x03DA, 0x03DB, 0x03DC, 0x03DD, 0x03DE, 0x03DF, + /* E */ 0x03E0, 0x03E1, 0x03E3, 0x03E3, 0x03E5, 0x03E5, 0x03E7, 0x03E7, 0x03E9, 0x03E9, 0x03EB, 0x03EB, 0x03ED, 0x03ED, 0x03EF, 0x03EF, + /* F */ 0x03F0, 0x03F1, 0x03F2, 0x03F3, 0x03F4, 0x03F5, 0x03F6, 0x03F7, 0x03F8, 0x03F9, 0x03FA, 0x03FB, 0x03FC, 0x03FD, 0x03FE, 0x03FF, + + /* Table 3 (for high byte 0x04) */ + + /* 0 */ 0x0400, 0x0401, 0x0452, 0x0403, 0x0454, 0x0455, 0x0456, 0x0407, 0x0458, 0x0459, 0x045A, 0x045B, 0x040C, 0x040D, 0x040E, 0x045F, + /* 1 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0419, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 2 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 3 */ 0x0430, 0x0431, 0x0432, 0x0433, 0x0434, 0x0435, 0x0436, 0x0437, 0x0438, 0x0439, 0x043A, 0x043B, 0x043C, 0x043D, 0x043E, 0x043F, + /* 4 */ 0x0440, 0x0441, 0x0442, 0x0443, 0x0444, 0x0445, 0x0446, 0x0447, 0x0448, 0x0449, 0x044A, 0x044B, 0x044C, 0x044D, 0x044E, 0x044F, + /* 5 */ 0x0450, 0x0451, 0x0452, 0x0453, 0x0454, 0x0455, 0x0456, 0x0457, 0x0458, 0x0459, 0x045A, 0x045B, 0x045C, 0x045D, 0x045E, 0x045F, + /* 6 */ 0x0461, 0x0461, 0x0463, 0x0463, 0x0465, 0x0465, 0x0467, 0x0467, 0x0469, 0x0469, 0x046B, 0x046B, 0x046D, 0x046D, 0x046F, 0x046F, + /* 7 */ 0x0471, 0x0471, 0x0473, 0x0473, 0x0475, 0x0475, 0x0476, 0x0477, 0x0479, 0x0479, 0x047B, 0x047B, 0x047D, 0x047D, 0x047F, 0x047F, + /* 8 */ 0x0481, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048A, 0x048B, 0x048C, 0x048D, 0x048E, 0x048F, + /* 9 */ 0x0491, 0x0491, 0x0493, 0x0493, 0x0495, 0x0495, 0x0497, 0x0497, 0x0499, 0x0499, 0x049B, 0x049B, 0x049D, 0x049D, 0x049F, 0x049F, + /* A */ 0x04A1, 0x04A1, 0x04A3, 0x04A3, 0x04A5, 0x04A5, 0x04A7, 0x04A7, 0x04A9, 0x04A9, 0x04AB, 0x04AB, 0x04AD, 0x04AD, 0x04AF, 0x04AF, + /* B */ 0x04B1, 0x04B1, 0x04B3, 0x04B3, 0x04B5, 0x04B5, 0x04B7, 0x04B7, 0x04B9, 0x04B9, 0x04BB, 0x04BB, 0x04BD, 0x04BD, 0x04BF, 0x04BF, + /* C */ 0x04C0, 0x04C1, 0x04C2, 0x04C4, 0x04C4, 0x04C5, 0x04C6, 0x04C8, 0x04C8, 0x04C9, 0x04CA, 0x04CC, 0x04CC, 0x04CD, 0x04CE, 0x04CF, + /* D */ 0x04D0, 0x04D1, 0x04D2, 0x04D3, 0x04D4, 0x04D5, 0x04D6, 0x04D7, 0x04D8, 0x04D9, 0x04DA, 0x04DB, 0x04DC, 0x04DD, 0x04DE, 0x04DF, + /* E */ 0x04E0, 0x04E1, 0x04E2, 0x04E3, 0x04E4, 0x04E5, 0x04E6, 0x04E7, 0x04E8, 0x04E9, 0x04EA, 0x04EB, 0x04EC, 0x04ED, 0x04EE, 0x04EF, + /* F */ 0x04F0, 0x04F1, 0x04F2, 0x04F3, 0x04F4, 0x04F5, 0x04F6, 0x04F7, 0x04F8, 0x04F9, 0x04FA, 0x04FB, 0x04FC, 0x04FD, 0x04FE, 0x04FF, + + /* Table 4 (for high byte 0x05) */ + + /* 0 */ 0x0500, 0x0501, 0x0502, 0x0503, 0x0504, 0x0505, 0x0506, 0x0507, 0x0508, 0x0509, 0x050A, 0x050B, 0x050C, 0x050D, 0x050E, 0x050F, + /* 1 */ 0x0510, 0x0511, 0x0512, 0x0513, 0x0514, 0x0515, 0x0516, 0x0517, 0x0518, 0x0519, 0x051A, 0x051B, 0x051C, 0x051D, 0x051E, 0x051F, + /* 2 */ 0x0520, 0x0521, 0x0522, 0x0523, 0x0524, 0x0525, 0x0526, 0x0527, 0x0528, 0x0529, 0x052A, 0x052B, 0x052C, 0x052D, 0x052E, 0x052F, + /* 3 */ 0x0530, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, + /* 4 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, + /* 5 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0557, 0x0558, 0x0559, 0x055A, 0x055B, 0x055C, 0x055D, 0x055E, 0x055F, + /* 6 */ 0x0560, 0x0561, 0x0562, 0x0563, 0x0564, 0x0565, 0x0566, 0x0567, 0x0568, 0x0569, 0x056A, 0x056B, 0x056C, 0x056D, 0x056E, 0x056F, + /* 7 */ 0x0570, 0x0571, 0x0572, 0x0573, 0x0574, 0x0575, 0x0576, 0x0577, 0x0578, 0x0579, 0x057A, 0x057B, 0x057C, 0x057D, 0x057E, 0x057F, + /* 8 */ 0x0580, 0x0581, 0x0582, 0x0583, 0x0584, 0x0585, 0x0586, 0x0587, 0x0588, 0x0589, 0x058A, 0x058B, 0x058C, 0x058D, 0x058E, 0x058F, + /* 9 */ 0x0590, 0x0591, 0x0592, 0x0593, 0x0594, 0x0595, 0x0596, 0x0597, 0x0598, 0x0599, 0x059A, 0x059B, 0x059C, 0x059D, 0x059E, 0x059F, + /* A */ 0x05A0, 0x05A1, 0x05A2, 0x05A3, 0x05A4, 0x05A5, 0x05A6, 0x05A7, 0x05A8, 0x05A9, 0x05AA, 0x05AB, 0x05AC, 0x05AD, 0x05AE, 0x05AF, + /* B */ 0x05B0, 0x05B1, 0x05B2, 0x05B3, 0x05B4, 0x05B5, 0x05B6, 0x05B7, 0x05B8, 0x05B9, 0x05BA, 0x05BB, 0x05BC, 0x05BD, 0x05BE, 0x05BF, + /* C */ 0x05C0, 0x05C1, 0x05C2, 0x05C3, 0x05C4, 0x05C5, 0x05C6, 0x05C7, 0x05C8, 0x05C9, 0x05CA, 0x05CB, 0x05CC, 0x05CD, 0x05CE, 0x05CF, + /* D */ 0x05D0, 0x05D1, 0x05D2, 0x05D3, 0x05D4, 0x05D5, 0x05D6, 0x05D7, 0x05D8, 0x05D9, 0x05DA, 0x05DB, 0x05DC, 0x05DD, 0x05DE, 0x05DF, + /* E */ 0x05E0, 0x05E1, 0x05E2, 0x05E3, 0x05E4, 0x05E5, 0x05E6, 0x05E7, 0x05E8, 0x05E9, 0x05EA, 0x05EB, 0x05EC, 0x05ED, 0x05EE, 0x05EF, + /* F */ 0x05F0, 0x05F1, 0x05F2, 0x05F3, 0x05F4, 0x05F5, 0x05F6, 0x05F7, 0x05F8, 0x05F9, 0x05FA, 0x05FB, 0x05FC, 0x05FD, 0x05FE, 0x05FF, + + /* Table 5 (for high byte 0x10) */ + + /* 0 */ 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1008, 0x1009, 0x100A, 0x100B, 0x100C, 0x100D, 0x100E, 0x100F, + /* 1 */ 0x1010, 0x1011, 0x1012, 0x1013, 0x1014, 0x1015, 0x1016, 0x1017, 0x1018, 0x1019, 0x101A, 0x101B, 0x101C, 0x101D, 0x101E, 0x101F, + /* 2 */ 0x1020, 0x1021, 0x1022, 0x1023, 0x1024, 0x1025, 0x1026, 0x1027, 0x1028, 0x1029, 0x102A, 0x102B, 0x102C, 0x102D, 0x102E, 0x102F, + /* 3 */ 0x1030, 0x1031, 0x1032, 0x1033, 0x1034, 0x1035, 0x1036, 0x1037, 0x1038, 0x1039, 0x103A, 0x103B, 0x103C, 0x103D, 0x103E, 0x103F, + /* 4 */ 0x1040, 0x1041, 0x1042, 0x1043, 0x1044, 0x1045, 0x1046, 0x1047, 0x1048, 0x1049, 0x104A, 0x104B, 0x104C, 0x104D, 0x104E, 0x104F, + /* 5 */ 0x1050, 0x1051, 0x1052, 0x1053, 0x1054, 0x1055, 0x1056, 0x1057, 0x1058, 0x1059, 0x105A, 0x105B, 0x105C, 0x105D, 0x105E, 0x105F, + /* 6 */ 0x1060, 0x1061, 0x1062, 0x1063, 0x1064, 0x1065, 0x1066, 0x1067, 0x1068, 0x1069, 0x106A, 0x106B, 0x106C, 0x106D, 0x106E, 0x106F, + /* 7 */ 0x1070, 0x1071, 0x1072, 0x1073, 0x1074, 0x1075, 0x1076, 0x1077, 0x1078, 0x1079, 0x107A, 0x107B, 0x107C, 0x107D, 0x107E, 0x107F, + /* 8 */ 0x1080, 0x1081, 0x1082, 0x1083, 0x1084, 0x1085, 0x1086, 0x1087, 0x1088, 0x1089, 0x108A, 0x108B, 0x108C, 0x108D, 0x108E, 0x108F, + /* 9 */ 0x1090, 0x1091, 0x1092, 0x1093, 0x1094, 0x1095, 0x1096, 0x1097, 0x1098, 0x1099, 0x109A, 0x109B, 0x109C, 0x109D, 0x109E, 0x109F, + /* A */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, + /* B */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, + /* C */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10C6, 0x10C7, 0x10C8, 0x10C9, 0x10CA, 0x10CB, 0x10CC, 0x10CD, 0x10CE, 0x10CF, + /* D */ 0x10D0, 0x10D1, 0x10D2, 0x10D3, 0x10D4, 0x10D5, 0x10D6, 0x10D7, 0x10D8, 0x10D9, 0x10DA, 0x10DB, 0x10DC, 0x10DD, 0x10DE, 0x10DF, + /* E */ 0x10E0, 0x10E1, 0x10E2, 0x10E3, 0x10E4, 0x10E5, 0x10E6, 0x10E7, 0x10E8, 0x10E9, 0x10EA, 0x10EB, 0x10EC, 0x10ED, 0x10EE, 0x10EF, + /* F */ 0x10F0, 0x10F1, 0x10F2, 0x10F3, 0x10F4, 0x10F5, 0x10F6, 0x10F7, 0x10F8, 0x10F9, 0x10FA, 0x10FB, 0x10FC, 0x10FD, 0x10FE, 0x10FF, + + /* Table 6 (for high byte 0x20) */ + + /* 0 */ 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x200B, 0x0000, 0x0000, 0x0000, 0x0000, + /* 1 */ 0x2010, 0x2011, 0x2012, 0x2013, 0x2014, 0x2015, 0x2016, 0x2017, 0x2018, 0x2019, 0x201A, 0x201B, 0x201C, 0x201D, 0x201E, 0x201F, + /* 2 */ 0x2020, 0x2021, 0x2022, 0x2023, 0x2024, 0x2025, 0x2026, 0x2027, 0x2028, 0x2029, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x202F, + /* 3 */ 0x2030, 0x2031, 0x2032, 0x2033, 0x2034, 0x2035, 0x2036, 0x2037, 0x2038, 0x2039, 0x203A, 0x203B, 0x203C, 0x203D, 0x203E, 0x203F, + /* 4 */ 0x2040, 0x2041, 0x2042, 0x2043, 0x2044, 0x2045, 0x2046, 0x2047, 0x2048, 0x2049, 0x204A, 0x204B, 0x204C, 0x204D, 0x204E, 0x204F, + /* 5 */ 0x2050, 0x2051, 0x2052, 0x2053, 0x2054, 0x2055, 0x2056, 0x2057, 0x2058, 0x2059, 0x205A, 0x205B, 0x205C, 0x205D, 0x205E, 0x205F, + /* 6 */ 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2065, 0x2066, 0x2067, 0x2068, 0x2069, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + /* 7 */ 0x2070, 0x2071, 0x2072, 0x2073, 0x2074, 0x2075, 0x2076, 0x2077, 0x2078, 0x2079, 0x207A, 0x207B, 0x207C, 0x207D, 0x207E, 0x207F, + /* 8 */ 0x2080, 0x2081, 0x2082, 0x2083, 0x2084, 0x2085, 0x2086, 0x2087, 0x2088, 0x2089, 0x208A, 0x208B, 0x208C, 0x208D, 0x208E, 0x208F, + /* 9 */ 0x2090, 0x2091, 0x2092, 0x2093, 0x2094, 0x2095, 0x2096, 0x2097, 0x2098, 0x2099, 0x209A, 0x209B, 0x209C, 0x209D, 0x209E, 0x209F, + /* A */ 0x20A0, 0x20A1, 0x20A2, 0x20A3, 0x20A4, 0x20A5, 0x20A6, 0x20A7, 0x20A8, 0x20A9, 0x20AA, 0x20AB, 0x20AC, 0x20AD, 0x20AE, 0x20AF, + /* B */ 0x20B0, 0x20B1, 0x20B2, 0x20B3, 0x20B4, 0x20B5, 0x20B6, 0x20B7, 0x20B8, 0x20B9, 0x20BA, 0x20BB, 0x20BC, 0x20BD, 0x20BE, 0x20BF, + /* C */ 0x20C0, 0x20C1, 0x20C2, 0x20C3, 0x20C4, 0x20C5, 0x20C6, 0x20C7, 0x20C8, 0x20C9, 0x20CA, 0x20CB, 0x20CC, 0x20CD, 0x20CE, 0x20CF, + /* D */ 0x20D0, 0x20D1, 0x20D2, 0x20D3, 0x20D4, 0x20D5, 0x20D6, 0x20D7, 0x20D8, 0x20D9, 0x20DA, 0x20DB, 0x20DC, 0x20DD, 0x20DE, 0x20DF, + /* E */ 0x20E0, 0x20E1, 0x20E2, 0x20E3, 0x20E4, 0x20E5, 0x20E6, 0x20E7, 0x20E8, 0x20E9, 0x20EA, 0x20EB, 0x20EC, 0x20ED, 0x20EE, 0x20EF, + /* F */ 0x20F0, 0x20F1, 0x20F2, 0x20F3, 0x20F4, 0x20F5, 0x20F6, 0x20F7, 0x20F8, 0x20F9, 0x20FA, 0x20FB, 0x20FC, 0x20FD, 0x20FE, 0x20FF, + + /* Table 7 (for high byte 0x21) */ + + /* 0 */ 0x2100, 0x2101, 0x2102, 0x2103, 0x2104, 0x2105, 0x2106, 0x2107, 0x2108, 0x2109, 0x210A, 0x210B, 0x210C, 0x210D, 0x210E, 0x210F, + /* 1 */ 0x2110, 0x2111, 0x2112, 0x2113, 0x2114, 0x2115, 0x2116, 0x2117, 0x2118, 0x2119, 0x211A, 0x211B, 0x211C, 0x211D, 0x211E, 0x211F, + /* 2 */ 0x2120, 0x2121, 0x2122, 0x2123, 0x2124, 0x2125, 0x2126, 0x2127, 0x2128, 0x2129, 0x212A, 0x212B, 0x212C, 0x212D, 0x212E, 0x212F, + /* 3 */ 0x2130, 0x2131, 0x2132, 0x2133, 0x2134, 0x2135, 0x2136, 0x2137, 0x2138, 0x2139, 0x213A, 0x213B, 0x213C, 0x213D, 0x213E, 0x213F, + /* 4 */ 0x2140, 0x2141, 0x2142, 0x2143, 0x2144, 0x2145, 0x2146, 0x2147, 0x2148, 0x2149, 0x214A, 0x214B, 0x214C, 0x214D, 0x214E, 0x214F, + /* 5 */ 0x2150, 0x2151, 0x2152, 0x2153, 0x2154, 0x2155, 0x2156, 0x2157, 0x2158, 0x2159, 0x215A, 0x215B, 0x215C, 0x215D, 0x215E, 0x215F, + /* 6 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, + /* 7 */ 0x2170, 0x2171, 0x2172, 0x2173, 0x2174, 0x2175, 0x2176, 0x2177, 0x2178, 0x2179, 0x217A, 0x217B, 0x217C, 0x217D, 0x217E, 0x217F, + /* 8 */ 0x2180, 0x2181, 0x2182, 0x2183, 0x2184, 0x2185, 0x2186, 0x2187, 0x2188, 0x2189, 0x218A, 0x218B, 0x218C, 0x218D, 0x218E, 0x218F, + /* 9 */ 0x2190, 0x2191, 0x2192, 0x2193, 0x2194, 0x2195, 0x2196, 0x2197, 0x2198, 0x2199, 0x219A, 0x219B, 0x219C, 0x219D, 0x219E, 0x219F, + /* A */ 0x21A0, 0x21A1, 0x21A2, 0x21A3, 0x21A4, 0x21A5, 0x21A6, 0x21A7, 0x21A8, 0x21A9, 0x21AA, 0x21AB, 0x21AC, 0x21AD, 0x21AE, 0x21AF, + /* B */ 0x21B0, 0x21B1, 0x21B2, 0x21B3, 0x21B4, 0x21B5, 0x21B6, 0x21B7, 0x21B8, 0x21B9, 0x21BA, 0x21BB, 0x21BC, 0x21BD, 0x21BE, 0x21BF, + /* C */ 0x21C0, 0x21C1, 0x21C2, 0x21C3, 0x21C4, 0x21C5, 0x21C6, 0x21C7, 0x21C8, 0x21C9, 0x21CA, 0x21CB, 0x21CC, 0x21CD, 0x21CE, 0x21CF, + /* D */ 0x21D0, 0x21D1, 0x21D2, 0x21D3, 0x21D4, 0x21D5, 0x21D6, 0x21D7, 0x21D8, 0x21D9, 0x21DA, 0x21DB, 0x21DC, 0x21DD, 0x21DE, 0x21DF, + /* E */ 0x21E0, 0x21E1, 0x21E2, 0x21E3, 0x21E4, 0x21E5, 0x21E6, 0x21E7, 0x21E8, 0x21E9, 0x21EA, 0x21EB, 0x21EC, 0x21ED, 0x21EE, 0x21EF, + /* F */ 0x21F0, 0x21F1, 0x21F2, 0x21F3, 0x21F4, 0x21F5, 0x21F6, 0x21F7, 0x21F8, 0x21F9, 0x21FA, 0x21FB, 0x21FC, 0x21FD, 0x21FE, 0x21FF, + + /* Table 8 (for high byte 0xFE) */ + + /* 0 */ 0xFE00, 0xFE01, 0xFE02, 0xFE03, 0xFE04, 0xFE05, 0xFE06, 0xFE07, 0xFE08, 0xFE09, 0xFE0A, 0xFE0B, 0xFE0C, 0xFE0D, 0xFE0E, 0xFE0F, + /* 1 */ 0xFE10, 0xFE11, 0xFE12, 0xFE13, 0xFE14, 0xFE15, 0xFE16, 0xFE17, 0xFE18, 0xFE19, 0xFE1A, 0xFE1B, 0xFE1C, 0xFE1D, 0xFE1E, 0xFE1F, + /* 2 */ 0xFE20, 0xFE21, 0xFE22, 0xFE23, 0xFE24, 0xFE25, 0xFE26, 0xFE27, 0xFE28, 0xFE29, 0xFE2A, 0xFE2B, 0xFE2C, 0xFE2D, 0xFE2E, 0xFE2F, + /* 3 */ 0xFE30, 0xFE31, 0xFE32, 0xFE33, 0xFE34, 0xFE35, 0xFE36, 0xFE37, 0xFE38, 0xFE39, 0xFE3A, 0xFE3B, 0xFE3C, 0xFE3D, 0xFE3E, 0xFE3F, + /* 4 */ 0xFE40, 0xFE41, 0xFE42, 0xFE43, 0xFE44, 0xFE45, 0xFE46, 0xFE47, 0xFE48, 0xFE49, 0xFE4A, 0xFE4B, 0xFE4C, 0xFE4D, 0xFE4E, 0xFE4F, + /* 5 */ 0xFE50, 0xFE51, 0xFE52, 0xFE53, 0xFE54, 0xFE55, 0xFE56, 0xFE57, 0xFE58, 0xFE59, 0xFE5A, 0xFE5B, 0xFE5C, 0xFE5D, 0xFE5E, 0xFE5F, + /* 6 */ 0xFE60, 0xFE61, 0xFE62, 0xFE63, 0xFE64, 0xFE65, 0xFE66, 0xFE67, 0xFE68, 0xFE69, 0xFE6A, 0xFE6B, 0xFE6C, 0xFE6D, 0xFE6E, 0xFE6F, + /* 7 */ 0xFE70, 0xFE71, 0xFE72, 0xFE73, 0xFE74, 0xFE75, 0xFE76, 0xFE77, 0xFE78, 0xFE79, 0xFE7A, 0xFE7B, 0xFE7C, 0xFE7D, 0xFE7E, 0xFE7F, + /* 8 */ 0xFE80, 0xFE81, 0xFE82, 0xFE83, 0xFE84, 0xFE85, 0xFE86, 0xFE87, 0xFE88, 0xFE89, 0xFE8A, 0xFE8B, 0xFE8C, 0xFE8D, 0xFE8E, 0xFE8F, + /* 9 */ 0xFE90, 0xFE91, 0xFE92, 0xFE93, 0xFE94, 0xFE95, 0xFE96, 0xFE97, 0xFE98, 0xFE99, 0xFE9A, 0xFE9B, 0xFE9C, 0xFE9D, 0xFE9E, 0xFE9F, + /* A */ 0xFEA0, 0xFEA1, 0xFEA2, 0xFEA3, 0xFEA4, 0xFEA5, 0xFEA6, 0xFEA7, 0xFEA8, 0xFEA9, 0xFEAA, 0xFEAB, 0xFEAC, 0xFEAD, 0xFEAE, 0xFEAF, + /* B */ 0xFEB0, 0xFEB1, 0xFEB2, 0xFEB3, 0xFEB4, 0xFEB5, 0xFEB6, 0xFEB7, 0xFEB8, 0xFEB9, 0xFEBA, 0xFEBB, 0xFEBC, 0xFEBD, 0xFEBE, 0xFEBF, + /* C */ 0xFEC0, 0xFEC1, 0xFEC2, 0xFEC3, 0xFEC4, 0xFEC5, 0xFEC6, 0xFEC7, 0xFEC8, 0xFEC9, 0xFECA, 0xFECB, 0xFECC, 0xFECD, 0xFECE, 0xFECF, + /* D */ 0xFED0, 0xFED1, 0xFED2, 0xFED3, 0xFED4, 0xFED5, 0xFED6, 0xFED7, 0xFED8, 0xFED9, 0xFEDA, 0xFEDB, 0xFEDC, 0xFEDD, 0xFEDE, 0xFEDF, + /* E */ 0xFEE0, 0xFEE1, 0xFEE2, 0xFEE3, 0xFEE4, 0xFEE5, 0xFEE6, 0xFEE7, 0xFEE8, 0xFEE9, 0xFEEA, 0xFEEB, 0xFEEC, 0xFEED, 0xFEEE, 0xFEEF, + /* F */ 0xFEF0, 0xFEF1, 0xFEF2, 0xFEF3, 0xFEF4, 0xFEF5, 0xFEF6, 0xFEF7, 0xFEF8, 0xFEF9, 0xFEFA, 0xFEFB, 0xFEFC, 0xFEFD, 0xFEFE, 0x0000, + + /* Table 9 (for high byte 0xFF) */ + + /* 0 */ 0xFF00, 0xFF01, 0xFF02, 0xFF03, 0xFF04, 0xFF05, 0xFF06, 0xFF07, 0xFF08, 0xFF09, 0xFF0A, 0xFF0B, 0xFF0C, 0xFF0D, 0xFF0E, 0xFF0F, + /* 1 */ 0xFF10, 0xFF11, 0xFF12, 0xFF13, 0xFF14, 0xFF15, 0xFF16, 0xFF17, 0xFF18, 0xFF19, 0xFF1A, 0xFF1B, 0xFF1C, 0xFF1D, 0xFF1E, 0xFF1F, + /* 2 */ 0xFF20, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, + /* 3 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, 0xFF58, 0xFF59, 0xFF5A, 0xFF3B, 0xFF3C, 0xFF3D, 0xFF3E, 0xFF3F, + /* 4 */ 0xFF40, 0xFF41, 0xFF42, 0xFF43, 0xFF44, 0xFF45, 0xFF46, 0xFF47, 0xFF48, 0xFF49, 0xFF4A, 0xFF4B, 0xFF4C, 0xFF4D, 0xFF4E, 0xFF4F, + /* 5 */ 0xFF50, 0xFF51, 0xFF52, 0xFF53, 0xFF54, 0xFF55, 0xFF56, 0xFF57, 0xFF58, 0xFF59, 0xFF5A, 0xFF5B, 0xFF5C, 0xFF5D, 0xFF5E, 0xFF5F, + /* 6 */ 0xFF60, 0xFF61, 0xFF62, 0xFF63, 0xFF64, 0xFF65, 0xFF66, 0xFF67, 0xFF68, 0xFF69, 0xFF6A, 0xFF6B, 0xFF6C, 0xFF6D, 0xFF6E, 0xFF6F, + /* 7 */ 0xFF70, 0xFF71, 0xFF72, 0xFF73, 0xFF74, 0xFF75, 0xFF76, 0xFF77, 0xFF78, 0xFF79, 0xFF7A, 0xFF7B, 0xFF7C, 0xFF7D, 0xFF7E, 0xFF7F, + /* 8 */ 0xFF80, 0xFF81, 0xFF82, 0xFF83, 0xFF84, 0xFF85, 0xFF86, 0xFF87, 0xFF88, 0xFF89, 0xFF8A, 0xFF8B, 0xFF8C, 0xFF8D, 0xFF8E, 0xFF8F, + /* 9 */ 0xFF90, 0xFF91, 0xFF92, 0xFF93, 0xFF94, 0xFF95, 0xFF96, 0xFF97, 0xFF98, 0xFF99, 0xFF9A, 0xFF9B, 0xFF9C, 0xFF9D, 0xFF9E, 0xFF9F, + /* A */ 0xFFA0, 0xFFA1, 0xFFA2, 0xFFA3, 0xFFA4, 0xFFA5, 0xFFA6, 0xFFA7, 0xFFA8, 0xFFA9, 0xFFAA, 0xFFAB, 0xFFAC, 0xFFAD, 0xFFAE, 0xFFAF, + /* B */ 0xFFB0, 0xFFB1, 0xFFB2, 0xFFB3, 0xFFB4, 0xFFB5, 0xFFB6, 0xFFB7, 0xFFB8, 0xFFB9, 0xFFBA, 0xFFBB, 0xFFBC, 0xFFBD, 0xFFBE, 0xFFBF, + /* C */ 0xFFC0, 0xFFC1, 0xFFC2, 0xFFC3, 0xFFC4, 0xFFC5, 0xFFC6, 0xFFC7, 0xFFC8, 0xFFC9, 0xFFCA, 0xFFCB, 0xFFCC, 0xFFCD, 0xFFCE, 0xFFCF, + /* D */ 0xFFD0, 0xFFD1, 0xFFD2, 0xFFD3, 0xFFD4, 0xFFD5, 0xFFD6, 0xFFD7, 0xFFD8, 0xFFD9, 0xFFDA, 0xFFDB, 0xFFDC, 0xFFDD, 0xFFDE, 0xFFDF, + /* E */ 0xFFE0, 0xFFE1, 0xFFE2, 0xFFE3, 0xFFE4, 0xFFE5, 0xFFE6, 0xFFE7, 0xFFE8, 0xFFE9, 0xFFEA, 0xFFEB, 0xFFEC, 0xFFED, 0xFFEE, 0xFFEF, + /* F */ 0xFFF0, 0xFFF1, 0xFFF2, 0xFFF3, 0xFFF4, 0xFFF5, 0xFFF6, 0xFFF7, 0xFFF8, 0xFFF9, 0xFFFA, 0xFFFB, 0xFFFC, 0xFFFD, 0xFFFE, 0xFFFF, +}; + + +/* RelString case folding table */ + +unsigned short gCompareTable[] = { + + /* 0 */ 0x0000, 0x0100, 0x0200, 0x0300, 0x0400, 0x0500, 0x0600, 0x0700, 0x0800, 0x0900, 0x0A00, 0x0B00, 0x0C00, 0x0D00, 0x0E00, 0x0F00, + /* 1 */ 0x1000, 0x1100, 0x1200, 0x1300, 0x1400, 0x1500, 0x1600, 0x1700, 0x1800, 0x1900, 0x1A00, 0x1B00, 0x1C00, 0x1D00, 0x1E00, 0x1F00, + /* 2 */ 0x2000, 0x2100, 0x2200, 0x2300, 0x2400, 0x2500, 0x2600, 0x2700, 0x2800, 0x2900, 0x2A00, 0x2B00, 0x2C00, 0x2D00, 0x2E00, 0x2F00, + /* 3 */ 0x3000, 0x3100, 0x3200, 0x3300, 0x3400, 0x3500, 0x3600, 0x3700, 0x3800, 0x3900, 0x3A00, 0x3B00, 0x3C00, 0x3D00, 0x3E00, 0x3F00, + /* 4 */ 0x4000, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700, 0x4800, 0x4900, 0x4A00, 0x4B00, 0x4C00, 0x4D00, 0x4E00, 0x4F00, + /* 5 */ 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5A00, 0x5B00, 0x5C00, 0x5D00, 0x5E00, 0x5F00, + + // 0x60 maps to 'a' + // range 0x61 to 0x7a ('a' to 'z') map to upper case + + /* 6 */ 0x4180, 0x4100, 0x4200, 0x4300, 0x4400, 0x4500, 0x4600, 0x4700, 0x4800, 0x4900, 0x4A00, 0x4B00, 0x4C00, 0x4D00, 0x4E00, 0x4F00, + /* 7 */ 0x5000, 0x5100, 0x5200, 0x5300, 0x5400, 0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5A00, 0x7B00, 0x7C00, 0x7D00, 0x7E00, 0x7F00, + + // range 0x80 to 0xd8 gets mapped... + + /* 8 */ 0x4108, 0x410C, 0x4310, 0x4502, 0x4E0A, 0x4F08, 0x5508, 0x4182, 0x4104, 0x4186, 0x4108, 0x410A, 0x410C, 0x4310, 0x4502, 0x4584, + /* 9 */ 0x4586, 0x4588, 0x4982, 0x4984, 0x4986, 0x4988, 0x4E0A, 0x4F82, 0x4F84, 0x4F86, 0x4F08, 0x4F0A, 0x5582, 0x5584, 0x5586, 0x5508, + /* A */ 0xA000, 0xA100, 0xA200, 0xA300, 0xA400, 0xA500, 0xA600, 0x5382, 0xA800, 0xA900, 0xAA00, 0xAB00, 0xAC00, 0xAD00, 0x4114, 0x4F0E, + /* B */ 0xB000, 0xB100, 0xB200, 0xB300, 0xB400, 0xB500, 0xB600, 0xB700, 0xB800, 0xB900, 0xBA00, 0x4192, 0x4F92, 0xBD00, 0x4114, 0x4F0E, + /* C */ 0xC000, 0xC100, 0xC200, 0xC300, 0xC400, 0xC500, 0xC600, 0x2206, 0x2208, 0xC900, 0x2000, 0x4104, 0x410A, 0x4F0A, 0x4F14, 0x4F14, + /* D */ 0xD000, 0xD100, 0x2202, 0x2204, 0x2702, 0x2704, 0xD600, 0xD700, 0x5988, 0xD900, 0xDA00, 0xDB00, 0xDC00, 0xDD00, 0xDE00, 0xDF00, + + /* E */ 0xE000, 0xE100, 0xE200, 0xE300, 0xE400, 0xE500, 0xE600, 0xE700, 0xE800, 0xE900, 0xEA00, 0xEB00, 0xEC00, 0xED00, 0xEE00, 0xEF00, + /* F */ 0xF000, 0xF100, 0xF200, 0xF300, 0xF400, 0xF500, 0xF600, 0xF700, 0xF800, 0xF900, 0xFA00, 0xFB00, 0xFC00, 0xFD00, 0xFE00, 0xFF00, + +}; diff --git a/bsd/hfs/hfscommon/Unicode/UnicodeWrappers.c b/bsd/hfs/hfscommon/Unicode/UnicodeWrappers.c new file mode 100644 index 000000000..90c08d466 --- /dev/null +++ b/bsd/hfs/hfscommon/Unicode/UnicodeWrappers.c @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: UnicodeWrappers.c + + Contains: Wrapper routines for Unicode conversion and comparison. + +*/ +#include +#include + +#include "../../hfs_macos_defs.h" +#include "UCStringCompareData.h" + +#include "../headers/FileMgrInternal.h" +#include "../headers/HFSUnicodeWrappers.h" + +enum { + kMinFileExtensionChars = 1, /* does not include dot */ + kMaxFileExtensionChars = 5 /* does not include dot */ +}; + + +#define EXTENSIONCHAR(c) (((c) >= 0x61 && (c) <= 0x7A) || \ + ((c) >= 0x41 && (c) <= 0x5A) || \ + ((c) >= 0x30 && (c) <= 0x39)) + + +#define IsHexDigit(c) (((c) >= (UInt8) '0' && (c) <= (UInt8) '9') || \ + ((c) >= (UInt8) 'A' && (c) <= (UInt8) 'F')) + + +static void GetFilenameExtension( ItemCount length, ConstUniCharArrayPtr unicodeStr, char* extStr ); + +static void GetFileIDString( HFSCatalogNodeID fileID, char* fileIDStr ); + +static UInt32 HexStringToInteger( UInt32 length, const UInt8 *hexStr ); + + + +/* + * Convert file ID into a hexidecimal string with no leading zeros + */ +static void +GetFileIDString( HFSCatalogNodeID fileID, char * fileIDStr ) +{ + SInt32 i, b; + UInt8 *translate = (UInt8 *) "0123456789ABCDEF"; + UInt8 c; + + fileIDStr[0] = '#'; + + for ( i = 0, b = 28; b >= 0; b -= 4 ) { + c = *(translate + ((fileID >> b) & 0x0000000F)); + + /* if its not a leading zero add it to our string */ + if ( (c != (UInt8) '0') || (i > 1) || (b == 0) ) + fileIDStr[++i] = c; + } + + fileIDStr[++i] = '\0'; +} + + +/* + * Get filename extension (if any) as a C string + */ +static void +GetFilenameExtension(ItemCount length, ConstUniCharArrayPtr unicodeStr, char * extStr) +{ + UInt32 i; + UniChar c; + UInt16 extChars; /* number of extension chars (excluding dot) */ + UInt16 maxExtChars; + Boolean foundExtension; + + extStr[0] = '\0'; /* assume there's no extension */ + + if ( length < 3 ) + return; /* "x.y" is smallest possible extension */ + + if ( length < (kMaxFileExtensionChars + 2) ) + maxExtChars = length - 2; /* save room for prefix + dot */ + else + maxExtChars = kMaxFileExtensionChars; + + i = length; + extChars = 0; + foundExtension = false; + + while ( extChars <= maxExtChars ) { + c = unicodeStr[--i]; + + /* look for leading dot */ + if ( c == (UniChar) '.' ) { + if ( extChars > 0 ) /* cannot end with a dot */ + foundExtension = true; + break; + } + + if ( EXTENSIONCHAR(c) ) + ++extChars; + else + break; + } + + /* if we found one then copy it */ + if ( foundExtension ) { + UInt8 *extStrPtr = extStr; + const UniChar *unicodeStrPtr = &unicodeStr[i]; + + for ( i = 0; i <= extChars; ++i ) + *(extStrPtr++) = (UInt8) *(unicodeStrPtr++); + extStr[extChars + 1] = '\0'; /* terminate extension + dot */ + } +} + + + +/* + * Count filename extension characters (if any) + */ +static UInt32 +CountFilenameExtensionChars( const unsigned char * filename, UInt32 length ) +{ + UInt32 i; + UniChar c; + UInt32 extChars; /* number of extension chars (excluding dot) */ + UInt16 maxExtChars; + Boolean foundExtension; + + if (length == kUndefinedStrLen) + length = strlen(filename); + + if ( length < 3 ) + return 0; /* "x.y" is smallest possible extension */ + + if ( length < (kMaxFileExtensionChars + 2) ) + maxExtChars = length - 2; /* save room for prefix + dot */ + else + maxExtChars = kMaxFileExtensionChars; + + extChars = 0; /* assume there's no extension */ + i = length - 1; /* index to last ascii character */ + foundExtension = false; + + while ( extChars <= maxExtChars ) { + c = filename[i--]; + + /* look for leading dot */ + if ( c == (UInt8) '.' ) { + if ( extChars > 0 ) /* cannot end with a dot */ + return (extChars); + + break; + } + + if ( EXTENSIONCHAR(c) ) + ++extChars; + else + break; + } + + return 0; +} + + +/* + * extract the file id from a mangled name + */ +HFSCatalogNodeID +GetEmbeddedFileID(const unsigned char * filename, UInt32 length, UInt32 *prefixLength) +{ + short extChars; + short i; + UInt8 c; + + *prefixLength = 0; + + if ( filename == NULL ) + return 0; + + if (length == kUndefinedStrLen) + length = strlen(filename); + + if ( length < 28 ) + return 0; /* too small to have been mangled */ + + /* big enough for a file ID (#10) and an extension (.x) ? */ + if ( length > 5 ) + extChars = CountFilenameExtensionChars(filename, length); + else + extChars = 0; + + /* skip over dot plus extension characters */ + if ( extChars > 0 ) + length -= (extChars + 1); + + /* scan for file id digits */ + for ( i = length - 1; i >= 0; --i) { + c = filename[i]; + + /* look for file ID marker */ + if ( c == '#' ) { + if ( (length - i) < 3 ) + break; /* too small to be a file ID */ + + *prefixLength = i; + return HexStringToInteger(length - i - 1, &filename[i+1]); + } + + if ( !IsHexDigit(c) ) + break; /* file ID string must have hex digits */ + } + + return 0; +} + + + +static UInt32 +HexStringToInteger(UInt32 length, const UInt8 *hexStr) +{ + UInt32 value; + short i; + UInt8 c; + const UInt8 *p; + + value = 0; + p = hexStr; + + for ( i = 0; i < length; ++i ) { + c = *p++; + + if (c >= '0' && c <= '9') { + value = value << 4; + value += (UInt32) c - (UInt32) '0'; + } else if (c >= 'A' && c <= 'F') { + value = value << 4; + value += 10 + ((unsigned int) c - (unsigned int) 'A'); + } else { + return 0; /* bad character */ + } + } + + return value; +} + + +/* + * Routine: FastRelString + * + * Output: returns -1 if str1 < str2 + * returns 1 if str1 > str2 + * return 0 if equal + * + */ +extern unsigned short gCompareTable[]; + +SInt32 FastRelString( ConstStr255Param str1, ConstStr255Param str2 ) +{ + UInt16* compareTable; + SInt32 bestGuess; + UInt8 length, length2; + UInt8 delta; + + delta = 0; + length = *(str1++); + length2 = *(str2++); + + if (length == length2) + bestGuess = 0; + else if (length < length2) + { + bestGuess = -1; + delta = length2 - length; + } + else + { + bestGuess = 1; + length = length2; + } + + compareTable = (UInt16*) gCompareTable; + + while (length--) + { + UInt8 aChar, bChar; + + aChar = *(str1++); + bChar = *(str2++); + + if (aChar != bChar) // If they don't match exacly, do case conversion + { + UInt16 aSortWord, bSortWord; + + aSortWord = compareTable[aChar]; + bSortWord = compareTable[bChar]; + + if (aSortWord > bSortWord) + return 1; + + if (aSortWord < bSortWord) + return -1; + } + + // If characters match exactly, then go on to next character immediately without + // doing any extra work. + } + + // if you got to here, then return bestGuess + return bestGuess; +} + + + +// +// FastUnicodeCompare - Compare two Unicode strings; produce a relative ordering +// +// IF RESULT +// -------------------------- +// str1 < str2 => -1 +// str1 = str2 => 0 +// str1 > str2 => +1 +// +// The lower case table starts with 256 entries (one for each of the upper bytes +// of the original Unicode char). If that entry is zero, then all characters with +// that upper byte are already case folded. If the entry is non-zero, then it is +// the _index_ (not byte offset) of the start of the sub-table for the characters +// with that upper byte. All ignorable characters are folded to the value zero. +// +// In pseudocode: +// +// Let c = source Unicode character +// Let table[] = lower case table +// +// lower = table[highbyte(c)] +// if (lower == 0) +// lower = c +// else +// lower = table[lower+lowbyte(c)] +// +// if (lower == 0) +// ignore this character +// +// To handle ignorable characters, we now need a loop to find the next valid character. +// Also, we can't pre-compute the number of characters to compare; the string length might +// be larger than the number of non-ignorable characters. Further, we must be able to handle +// ignorable characters at any point in the string, including as the first or last characters. +// We use a zero value as a sentinel to detect both end-of-string and ignorable characters. +// Since the File Manager doesn't prevent the NUL character (value zero) as part of a filename, +// the case mapping table is assumed to map u+0000 to some non-zero value (like 0xFFFF, which is +// an invalid Unicode character). +// +// Pseudocode: +// +// while (1) { +// c1 = GetNextValidChar(str1) // returns zero if at end of string +// c2 = GetNextValidChar(str2) +// +// if (c1 != c2) break // found a difference +// +// if (c1 == 0) // reached end of string on both strings at once? +// return 0; // yes, so strings are equal +// } +// +// // When we get here, c1 != c2. So, we just need to determine which one is less. +// if (c1 < c2) +// return -1; +// else +// return 1; +// + +extern UInt16 gLowerCaseTable[]; +extern UInt16 gLatinCaseFold[]; + +SInt32 FastUnicodeCompare ( register ConstUniCharArrayPtr str1, register ItemCount length1, + register ConstUniCharArrayPtr str2, register ItemCount length2) +{ + register UInt16 c1,c2; + register UInt16 temp; + register UInt16* lowerCaseTable; + + lowerCaseTable = (UInt16*) gLowerCaseTable; + + while (1) { + /* Set default values for c1, c2 in case there are no more valid chars */ + c1 = 0; + c2 = 0; + + /* Find next non-ignorable char from str1, or zero if no more */ + while (length1 && c1 == 0) { + c1 = *(str1++); + --length1; + /* check for basic latin first */ + if (c1 < 0x0100) { + c1 = gLatinCaseFold[c1]; + break; + } + /* case fold if neccessary */ + if ((temp = lowerCaseTable[c1>>8]) != 0) + c1 = lowerCaseTable[temp + (c1 & 0x00FF)]; + } + + + /* Find next non-ignorable char from str2, or zero if no more */ + while (length2 && c2 == 0) { + c2 = *(str2++); + --length2; + /* check for basic latin first */ + if (c2 < 0x0100) { + c2 = gLatinCaseFold[c2]; + break; + } + /* case fold if neccessary */ + if ((temp = lowerCaseTable[c2>>8]) != 0) + c2 = lowerCaseTable[temp + (c2 & 0x00FF)]; + } + + if (c1 != c2) // found a difference, so stop looping + break; + + if (c1 == 0) // did we reach the end of both strings at the same time? + return 0; // yes, so strings are equal + } + + if (c1 < c2) + return -1; + else + return 1; +} + + +OSErr +ConvertUnicodeToUTF8Mangled(ByteCount srcLen, ConstUniCharArrayPtr srcStr, ByteCount maxDstLen, + ByteCount *actualDstLen, unsigned char* dstStr, HFSCatalogNodeID cnid) +{ + ByteCount subMaxLen; + size_t utf8len; + char fileIDStr[15]; + char extStr[15]; + + GetFileIDString(cnid, fileIDStr); + GetFilenameExtension(srcLen/sizeof(UniChar), srcStr, extStr); + + /* remove extension chars from source */ + srcLen -= strlen(extStr) * sizeof(UniChar); + subMaxLen = maxDstLen - (strlen(extStr) + strlen(fileIDStr)); + + (void) utf8_encodestr(srcStr, srcLen, dstStr, &utf8len, subMaxLen, ':', 0); + + strcat(dstStr, fileIDStr); + strcat(dstStr, extStr); + *actualDstLen = utf8len + (strlen(extStr) + strlen(fileIDStr)); + + return noErr; +} + diff --git a/bsd/hfs/hfscommon/headers/BTreesInternal.h b/bsd/hfs/hfscommon/headers/BTreesInternal.h new file mode 100644 index 000000000..d692e1a52 --- /dev/null +++ b/bsd/hfs/hfscommon/headers/BTreesInternal.h @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreesInternal.h + + Contains: IPI to File Manager B-tree + + Version: HFS Plus 1.0 + + Copyright: © 1996-1998 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: File Systems + + Writers: + + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + + 9/22/99 ser Added prototypes for BTGetLastSync and BTSetLastSync + 6/22/98 djb Add ERR_BASE to btree error codes to make them negative (for MacOS X only). + + 7/28/97 msd Add enum for fsBTTimeOutErr. + 7/25/97 DSH Added heuristicHint as parameter to BTSearchRecord. + 7/24/97 djb Add blockReadFromDisk flag to BlockDescriptor. Callbacks now use + a file refNum instead of an FCB. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 6/2/97 DSH Added SetEndOfForkProc() prototype, so Attributes.c can call it + directly. + 5/19/97 djb kMaxKeyLength is now 520. + 4/28/97 djb first checked in + + 3/17/97 DSH Remove Key Comparison prototype, already in FilesInternal.h. + 2/19/97 djb Add SetBlockSizeProcPtr. Add blockSize field to BlockDescriptor. + Remove E_ type error enums. + 1/27/97 djb Include Types.h and FilesInternal.h. + 1/13/97 djb Added kBTreeCurrentRecord for BTIterateRecord. + 1/3/97 djb Added support for large keys. + 12/19/96 djb first checked in + +*/ + +#ifndef __BTREESINTERNAL__ +#define __BTREESINTERNAL__ + +#ifndef __FILEMGRINTERNAL__ +#include "FileMgrInternal.h" +#endif + +enum { + fsBTInvalidHeaderErr = btBadHdr, + fsBTBadRotateErr = dsBadRotate, + fsBTInvalidNodeErr = btBadNode, + fsBTRecordTooLargeErr = btNoFit, + fsBTRecordNotFoundErr = btNotFound, + fsBTDuplicateRecordErr = btExists, + fsBTFullErr = btNoSpaceAvail, + + fsBTInvalidFileErr = ERR_BASE + 0x0302, /* no BTreeCB has been allocated for fork*/ + fsBTrFileAlreadyOpenErr = ERR_BASE + 0x0303, + fsBTInvalidIteratorErr = ERR_BASE + 0x0308, + fsBTEmptyErr = ERR_BASE + 0x030A, + fsBTNoMoreMapNodesErr = ERR_BASE + 0x030B, + fsBTBadNodeSize = ERR_BASE + 0x030C, + fsBTBadNodeType = ERR_BASE + 0x030D, + fsBTInvalidKeyLengthErr = ERR_BASE + 0x030E, + fsBTStartOfIterationErr = ERR_BASE + 0x0353, + fsBTEndOfIterationErr = ERR_BASE + 0x0354, + fsBTUnknownVersionErr = ERR_BASE + 0x0355, + fsBTTreeTooDeepErr = ERR_BASE + 0x0357, + fsIteratorExitedScopeErr = ERR_BASE + 0x0A02, /* iterator exited the scope*/ + fsIteratorScopeExceptionErr = ERR_BASE + 0x0A03, /* iterator is undefined due to error or movement of scope locality*/ + fsUnknownIteratorMovementErr = ERR_BASE + 0x0A04, /* iterator movement is not defined*/ + fsInvalidIterationMovmentErr = ERR_BASE + 0x0A05, /* iterator movement is invalid in current context*/ + fsClientIDMismatchErr = ERR_BASE + 0x0A06, /* wrong client process ID*/ + fsEndOfIterationErr = ERR_BASE + 0x0A07, /* there were no objects left to return on iteration*/ + fsBTTimeOutErr = ERR_BASE + 0x0A08 /* BTree scan interrupted -- no time left for physical I/O */ +}; + +struct BlockDescriptor{ + void *buffer; + void *blockHeader; + ByteCount blockSize; + Boolean blockReadFromDisk; + Byte reserved[3]; +}; +typedef struct BlockDescriptor BlockDescriptor; +typedef BlockDescriptor *BlockDescPtr; + + +struct FSBufferDescriptor { + LogicalAddress bufferAddress; + ByteCount itemSize; + ItemCount itemCount; +}; +typedef struct FSBufferDescriptor FSBufferDescriptor; + +typedef FSBufferDescriptor *FSBufferDescriptorPtr; + + +/* + Fork Level Access Method Block get options +*/ +enum { + kGetBlock = 0x00000000, + kForceReadBlock = 0x00000002, //€€ how does this relate to Read/Verify? Do we need this? + kGetEmptyBlock = 0x00000008 +}; +typedef OptionBits GetBlockOptions; + +/* + Fork Level Access Method Block release options +*/ +enum { + kReleaseBlock = 0x00000000, + kForceWriteBlock = 0x00000001, + kMarkBlockDirty = 0x00000002, + kTrashBlock = 0x00000004, + kLockTransaction = 0x00000100 +}; +typedef OptionBits ReleaseBlockOptions; + +typedef UInt32 FSSize; +typedef UInt32 ForkBlockNumber; + +/*============================================================================ + Fork Level Buffered I/O Access Method +============================================================================*/ + +typedef OSStatus (* GetBlockProcPtr) (FileReference fileRefNum, + UInt32 blockNum, + GetBlockOptions options, + BlockDescriptor *block ); + + +typedef OSStatus (* ReleaseBlockProcPtr) (FileReference fileRefNum, + BlockDescPtr blockPtr, + ReleaseBlockOptions options ); + +typedef OSStatus (* SetEndOfForkProcPtr) (FileReference fileRefNum, + FSSize minEOF, + FSSize maxEOF ); + +typedef OSStatus (* SetBlockSizeProcPtr) (FileReference fileRefNum, + ByteCount blockSize, + ItemCount minBlockCount ); + +OSStatus SetEndOfForkProc ( FileReference fileRefNum, FSSize minEOF, FSSize maxEOF ); + + +/* + B*Tree Information Version +*/ + +enum BTreeInformationVersion{ + kBTreeInfoVersion = 0 +}; + +/* + B*Tree Iteration Operation Constants +*/ + +enum BTreeIterationOperations{ + kBTreeFirstRecord, + kBTreeNextRecord, + kBTreePrevRecord, + kBTreeLastRecord, + kBTreeCurrentRecord +}; +typedef UInt16 BTreeIterationOperation; + + +/* + Btree types: 0 is HFS CAT/EXT file, 1~127 are AppleShare B*Tree files, 128~254 unused + hfsBtreeType EQU 0 ; control file + validBTType EQU $80 ; user btree type starts from 128 + userBT1Type EQU $FF ; 255 is our Btree type. Used by BTInit and BTPatch +*/ + +enum BTreeTypes{ + kHFSBTreeType = 0, // control file + kUserBTreeType = 128, // user btree type starts from 128 + kReservedBTreeType = 255 // +}; + + +typedef BTreeKey *BTreeKeyPtr; + + +/* + BTreeInfoRec Structure - for BTGetInformation +*/ +struct BTreeInfoRec{ + UInt16 version; + UInt16 nodeSize; + UInt16 maxKeyLength; + UInt16 treeDepth; + UInt32 lastfsync; /* Last time that this was fsynced */ + ItemCount numRecords; + ItemCount numNodes; + ItemCount numFreeNodes; + UInt32 reserved; +}; +typedef struct BTreeInfoRec BTreeInfoRec; +typedef BTreeInfoRec *BTreeInfoPtr; + +/* + BTreeHint can never be exported to the outside. Use UInt32 BTreeHint[4], + UInt8 BTreeHint[16], etc. + */ +struct BTreeHint{ + ItemCount writeCount; + UInt32 nodeNum; // node the key was last seen in + UInt16 index; // index then key was last seen at + UInt16 reserved1; + UInt32 reserved2; +}; +typedef struct BTreeHint BTreeHint; +typedef BTreeHint *BTreeHintPtr; + +/* + BTree Iterator +*/ +struct BTreeIterator{ + BTreeHint hint; + UInt16 version; + UInt16 reserved; + UInt32 hitCount; // Total number of leaf records hit + UInt32 maxLeafRecs; // Max leaf records over iteration + BTreeKey key; +}; +typedef struct BTreeIterator BTreeIterator; +typedef BTreeIterator *BTreeIteratorPtr; + + +/*============================================================================ + B*Tree SPI +============================================================================*/ + +/* + Key Comparison Function ProcPtr Type - for BTOpenPath +*/ +//typedef SInt32 (* KeyCompareProcPtr)(BTreeKeyPtr a, BTreeKeyPtr b); + + +typedef SInt32 (* IterateCallBackProcPtr)(BTreeKeyPtr key, void * record, UInt16 recordLen, void * state); + +extern OSStatus BTOpenPath (FCB *filePtr, + KeyCompareProcPtr keyCompareProc, + GetBlockProcPtr getBlockProc, + ReleaseBlockProcPtr releaseBlockProc, + SetEndOfForkProcPtr setEndOfForkProc, + SetBlockSizeProcPtr setBlockSizeProc ); + +extern OSStatus BTClosePath (FCB *filePtr ); + + +extern OSStatus BTSearchRecord (FCB *filePtr, + BTreeIterator *searchIterator, + UInt32 heuristicHint, + FSBufferDescriptor *btRecord, + UInt16 *recordLen, + BTreeIterator *resultIterator ); + +extern OSStatus BTIterateRecord (FCB *filePtr, + BTreeIterationOperation operation, + BTreeIterator *iterator, + FSBufferDescriptor *btRecord, + UInt16 *recordLen ); + + +extern OSStatus BTIterateRecords(FCB *filePtr, BTreeIterationOperation operation, BTreeIterator *iterator, + IterateCallBackProcPtr callBackProc, void * callBackState); + +extern OSStatus BTInsertRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *btrecord, + UInt16 recordLen ); + +extern OSStatus BTReplaceRecord (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *btRecord, + UInt16 recordLen ); + + +extern OSStatus BTDeleteRecord (FCB *filePtr, + BTreeIterator *iterator ); + +extern OSStatus BTGetInformation (FCB *filePtr, + UInt16 version, + BTreeInfoRec *info ); + +extern OSStatus BTFlushPath (FCB *filePtr ); + +extern OSStatus BTReloadData (FCB *filePtr); + +extern OSStatus BTInvalidateHint (BTreeIterator *iterator ); + +extern OSStatus BTGetLastSync (FCB *filePtr, + UInt32 *lastfsync ); + +extern OSStatus BTSetLastSync (FCB *filePtr, + UInt32 lastfsync ); + +#endif // __BTREESINTERNAL__ diff --git a/bsd/hfs/hfscommon/headers/BTreesPrivate.h b/bsd/hfs/hfscommon/headers/BTreesPrivate.h new file mode 100644 index 000000000..4fce7de21 --- /dev/null +++ b/bsd/hfs/hfscommon/headers/BTreesPrivate.h @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: BTreesPrivate.h + + Contains: Private interface file for the BTree Module. + + Version: xxx put the technology version here xxx + + Written by: Gordon Sheridan and Bill Bruffey + + Copyright: © 1992-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: Mark Day + + Technology: File Systems + + Writers: + + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + (ser) Scott Roberts + (dkh) Dave Heller + + Change History (most recent first): + 3/19/99 djb Disable MoveRecordsLeft/Right macros since bcopy is broken. + + 8/10/98 djb Removed unused BTreeIterator from BTreeControlBlock, fixed alignment. + + 9/4/97 djb Convert MoveRecordsLeft and GetLeftSiblingNode to macros. + 7/24/97 djb Add macro for GetRecordAddress (was a function before). + 7/21/97 msd GetRecordByIndex now returns an OSStatus. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 4/23/97 djb first checked in + + 3/17/97 DSH Added a refCon field to BTreeControlBlock, for DFA use, to point + to additional data. Fixed Panic macros for use with SC. + 2/19/97 djb Add InsertKey struct. Moved on-disk definitions to + HFSBTreesPriv.h + 1/27/97 djb InsertTree and DeleteTree are now recursive and support variable + sized index keys. + 1/15/97 djb Move GetFileRefNumFromFCB macro to FilesInternal.h. Added + kBTVariableIndexKeysMask. + 1/3/97 djb Added support for large keys. + 12/19/96 djb first checked in + + History applicable to original Scarecrow Design: + + <7> 10/25/96 ser Changing for new VFPI + <6> 10/18/96 ser Converting over VFPI changes + <5> 9/17/96 dkh More BTree statistics + <4> 9/16/96 dkh Revised BTree statistics + <3> 6/20/96 dkh Radar #1358740. Switch from using Pools to debug MemAllocators. + <2> 12/7/95 dkh D10E2 build. Changed usage of Ref data type to LogicalAddress. + <1> 10/18/95 rst Moved from Scarecrow project. + + <19> 11/22/94 djb Add prototype for GetMapNode + <18> 11/16/94 prp Add IsItAHint routine prototype. + <17> 9/30/94 prp Get in sync with D2 interface changes. + <16> 7/25/94 wjk Eliminate usage of BytePtr in favor of UInt8 *. + <15> 7/22/94 wjk Convert to the new set of header files. + <14> 5/31/94 srs Moved Btree types to public interface + <13> 12/9/93 wjk Add 68k alignment pragma's around persistent structures. + <12> 11/30/93 wjk Move from Makefiles to BuildFiles. Fit into the ModernOS and + NRCmds environments. + <11> 11/23/93 wjk Changes required to compile on the RS6000. + <10> 8/30/93 CH Removed the M_ExitOnError and M_ReturnErrorIf macros which were + already defined in FileSystemPriv.h (included here). + <9> 8/30/93 CH Added parens around the M_ReturnErrorIf macro. + <8> 5/21/93 gs Add kBadClose flag. Add some prototypes for internal routines. + <7> 5/10/93 gs Change Ptr to BytePtr. Move BTreeTypes to BTree.h. Add + DeleteTree prototype. + <6> 3/23/93 gs Remove mysterious "flags" field from HeaderRec structure. Move + prototypes of private functions to top of respective source + files. + <5> 2/8/93 gs Update to use FSAgent.h Get/Release/SetEOF/SetBlockSize + procPtrs. Add UpdateNode routine. + <4> 12/10/92 gs Add Key Descriptor function declarations. + <3> 12/8/92 gs Add HeaderRec structure and incorporate review feedback. + <2> 12/2/92 gs Add GetNode and ReleaseNode callback procptrs to BTree CB, and + add internal function declarations. + <1> 11/15/92 gs first checked in + +*/ + +#ifndef __BTREESPRIVATE__ +#define __BTREESPRIVATE__ + +#include "../../hfs_macos_defs.h" + +#ifndef __FILEMGRINTERNAL__ +#include "FileMgrInternal.h" +#endif + +#ifndef __BTREESINTERNAL__ +#include "BTreesInternal.h" +#endif + + +/////////////////////////////////// Constants /////////////////////////////////// + +#define kBTreeVersion 1 +#define kMaxTreeDepth 16 + + +#define kHeaderNodeNum 0 +#define kKeyDescRecord 1 + + +// Header Node Record Offsets +enum { + kHeaderRecOffset = 0x000E, + kKeyDescRecOffset = 0x0078, + kHeaderMapRecOffset = 0x00F8 +}; + +#define kMinNodeSize 512 + +#define kMinRecordSize 6 + // where is minimum record size enforced? + +// miscellaneous BTree constants +enum { + kOffsetSize = 2 +}; + +// Insert Operations +typedef enum { + kInsertRecord = 0, + kReplaceRecord = 1 +} InsertType; + +// illegal string attribute bits set in mask +#define kBadStrAttribMask 0xCF + + + +//////////////////////////////////// Macros ///////////////////////////////////// + +#define M_NodesInMap(mapSize) ((mapSize) << 3) + +#define M_ClearBitNum(integer,bitNumber) ((integer) &= (~(1<<(bitNumber)))) +#define M_SetBitNum(integer,bitNumber) ((integer) |= (1<<(bitNumber))) +#define M_IsOdd(integer) (((integer) & 1) != 0) +#define M_IsEven(integer) (((integer) & 1) == 0) +#define M_BTreeHeaderDirty(btreePtr) btreePtr->flags |= kBTHeaderDirty + +#define M_MapRecordSize(nodeSize) (nodeSize - sizeof (BTNodeDescriptor) - 6) +#define M_HeaderMapRecordSize(nodeSize) (nodeSize - sizeof(BTNodeDescriptor) - sizeof(BTHeaderRec) - 128 - 8) + +#define M_SWAP_BE16_ClearBitNum(integer,bitNumber) ((integer) &= SWAP_BE16(~(1<<(bitNumber)))) +#define M_SWAP_BE16_SetBitNum(integer,bitNumber) ((integer) |= SWAP_BE16(1<<(bitNumber))) + +///////////////////////////////////// Types ///////////////////////////////////// + +typedef struct BTreeControlBlock { // fields specific to BTree CBs + + UInt8 reserved1; // keep for alignment with old style fields + UInt8 btreeType; + UInt16 treeDepth; + FileReference fileRefNum; // refNum of btree file + KeyCompareProcPtr keyCompareProc; + UInt32 rootNode; + UInt32 leafRecords; + UInt32 firstLeafNode; + UInt32 lastLeafNode; + UInt16 nodeSize; + UInt16 maxKeyLength; + UInt32 totalNodes; + UInt32 freeNodes; + + UInt16 reserved3; // 4-byte alignment + + // new fields + SInt16 version; + UInt32 flags; // dynamic flags + UInt32 attributes; // persistent flags + UInt32 writeCount; + UInt32 lastfsync; /* Last time that this was fsynced */ + + GetBlockProcPtr getBlockProc; + ReleaseBlockProcPtr releaseBlockProc; + SetEndOfForkProcPtr setEndOfForkProc; + + // statistical information + UInt32 numGetNodes; + UInt32 numGetNewNodes; + UInt32 numReleaseNodes; + UInt32 numUpdateNodes; + UInt32 numMapNodesRead; // map nodes beyond header node + UInt32 numHintChecks; + UInt32 numPossibleHints; // Looks like a formated hint + UInt32 numValidHints; // Hint used to find correct record. + +} BTreeControlBlock, *BTreeControlBlockPtr; + + +UInt32 CalcKeySize(const BTreeControlBlock *btcb, const BTreeKey *key); +#define CalcKeySize(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? ((key)->length16 + 2) : ((key)->length8 + 1) ) + +UInt32 KeyLength(const BTreeControlBlock *btcb, const BTreeKey *key); +#define KeyLength(btcb, key) ( ((btcb)->attributes & kBTBigKeysMask) ? (key)->length16 : (key)->length8 ) + + + +typedef enum { + kBTHeaderDirty = 0x00000001 +} BTreeFlags; + + +typedef SInt8 *NodeBuffer; +typedef BlockDescriptor NodeRec, *NodePtr; //€€ remove this someday... + + + + +//// Tree Path Table - constructed by SearchTree, used by InsertTree and DeleteTree + +typedef struct { + UInt32 node; // node number + UInt16 index; + UInt16 reserved; // align size to a power of 2 +} TreePathRecord, *TreePathRecordPtr; + +typedef TreePathRecord TreePathTable [kMaxTreeDepth]; + + +//// InsertKey - used by InsertTree, InsertLevel and InsertNode + +struct InsertKey { + BTreeKeyPtr keyPtr; + UInt8 * recPtr; + UInt16 keyLength; + UInt16 recSize; + Boolean replacingKey; + Boolean skipRotate; +}; + +typedef struct InsertKey InsertKey; + + +//// For Notational Convenience + +typedef BTNodeDescriptor* NodeDescPtr; +typedef UInt8 *RecordPtr; +typedef BTreeKeyPtr KeyPtr; + + +//////////////////////////////////// Globals //////////////////////////////////// + + +//////////////////////////////////// Macros ///////////////////////////////////// + +#if DEBUG_BUILD + #define Panic( message ) DebugStr( (ConstStr255Param) message ) + #define PanicIf( condition, message ) if ( condition != 0 ) DebugStr( message ) +#else + #define Panic( message ) + #define PanicIf( condition, message ) +#endif + +// Exit function on error +#define M_ExitOnError( result ) if ( ( result ) != noErr ) goto ErrorExit; else ; + +// Test for passed condition and return if true +#define M_ReturnErrorIf( condition, error ) if ( condition ) return( error ) + +//////////////////////////////// Key Operations ///////////////////////////////// + +SInt32 CompareKeys (BTreeControlBlockPtr btreePtr, + KeyPtr searchKey, + KeyPtr trialKey ); + +//////////////////////////////// Map Operations ///////////////////////////////// + +OSStatus AllocateNode (BTreeControlBlockPtr btreePtr, + UInt32 *nodeNum); + +OSStatus FreeNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum); + +OSStatus ExtendBTree (BTreeControlBlockPtr btreePtr, + UInt32 nodes ); + +UInt32 CalcMapBits (BTreeControlBlockPtr btreePtr); + + +//////////////////////////////// Misc Operations //////////////////////////////// + +UInt16 CalcKeyRecordSize (UInt16 keySize, + UInt16 recSize ); + +OSStatus VerifyHeader (FCB *filePtr, + BTHeaderRec *header ); + +OSStatus UpdateHeader (BTreeControlBlockPtr btreePtr, + Boolean forceWrite ); + +OSStatus FindIteratorPosition (BTreeControlBlockPtr btreePtr, + BTreeIteratorPtr iterator, + BlockDescriptor *left, + BlockDescriptor *middle, + BlockDescriptor *right, + UInt32 *nodeNum, + UInt16 *index, + Boolean *foundRecord ); + +OSStatus CheckInsertParams (FCB *filePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen ); + +OSStatus TrySimpleReplace (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + BTreeIterator *iterator, + FSBufferDescriptor *record, + UInt16 recordLen, + Boolean *recordInserted ); + +OSStatus IsItAHint (BTreeControlBlockPtr btreePtr, + BTreeIterator *iterator, + Boolean *answer ); + +//////////////////////////////// Node Operations //////////////////////////////// + +//// Node Operations + +OSStatus GetNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *returnNodePtr ); + +OSStatus GetLeftSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *left ); + +#define GetLeftSiblingNode(btree,node,left) GetNode ((btree), ((NodeDescPtr)(node))->bLink, (left)) + +OSStatus GetRightSiblingNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + NodeRec *right ); + +#define GetRightSiblingNode(btree,node,right) GetNode ((btree), ((NodeDescPtr)(node))->fLink, (right)) + + +OSStatus GetNewNode (BTreeControlBlockPtr btreePtr, + UInt32 nodeNum, + NodeRec *returnNodePtr ); + +OSStatus ReleaseNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus TrashNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr ); + +OSStatus UpdateNode (BTreeControlBlockPtr btreePtr, + NodePtr nodePtr, + UInt32 transactionID, + UInt32 flags ); + +OSStatus GetMapNode (BTreeControlBlockPtr btreePtr, + BlockDescriptor *nodePtr, + UInt16 **mapPtr, + UInt16 *mapSize ); + +//// Node Buffer Operations + +OSStatus CheckNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +void ClearNode (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +UInt16 GetNodeDataSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + +UInt16 GetNodeFreeSize (BTreeControlBlockPtr btreePtr, + NodeDescPtr node ); + + +//// Record Operations + +Boolean InsertRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + RecordPtr recPtr, + UInt16 recSize ); + +Boolean InsertKeyRecord (BTreeControlBlockPtr btreePtr, + NodeDescPtr node, + UInt16 index, + KeyPtr keyPtr, + UInt16 keyLength, + RecordPtr recPtr, + UInt16 recSize ); + +void DeleteRecord (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + + +Boolean SearchNode (BTreeControlBlockPtr btree, + NodeDescPtr node, + KeyPtr searchKey, + UInt16 *index ); + +OSStatus GetRecordByIndex (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index, + KeyPtr *keyPtr, + UInt8 * *dataPtr, + UInt16 *dataSize ); + +UInt8 * GetRecordAddress (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + +#define GetRecordAddress(btreePtr,node,index) ((UInt8 *)(node) + (*(short *) ((UInt8 *)(node) + (btreePtr)->nodeSize - ((index) << 1) - kOffsetSize))) + + +UInt16 GetRecordSize (BTreeControlBlockPtr btree, + NodeDescPtr node, + UInt16 index ); + +UInt32 GetChildNodeNum (BTreeControlBlockPtr btreePtr, + NodeDescPtr nodePtr, + UInt16 index ); + +void MoveRecordsLeft (UInt8 * src, + UInt8 * dst, + UInt16 bytesToMove ); + +#define MoveRecordsLeft(src,dst,bytes) bcopy((src),(dst),(bytes)) + +void MoveRecordsRight (UInt8 * src, + UInt8 * dst, + UInt16 bytesToMove ); + +#define MoveRecordsRight(src,dst,bytes) bcopy((src),(dst),(bytes)) + + +//////////////////////////////// Tree Operations //////////////////////////////// + +OSStatus SearchTree (BTreeControlBlockPtr btreePtr, + BTreeKeyPtr keyPtr, + TreePathTable treePathTable, + UInt32 *nodeNum, + BlockDescriptor *nodePtr, + UInt16 *index ); + +OSStatus InsertTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + KeyPtr keyPtr, + UInt8 * recPtr, + UInt16 recSize, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level, + Boolean replacingKey, + UInt32 *insertNode ); + +OSStatus DeleteTree (BTreeControlBlockPtr btreePtr, + TreePathTable treePathTable, + BlockDescriptor *targetNode, + UInt16 index, + UInt16 level ); + +#endif //__BTREESPRIVATE__ diff --git a/bsd/hfs/hfscommon/headers/CatalogPrivate.h b/bsd/hfs/hfscommon/headers/CatalogPrivate.h new file mode 100644 index 000000000..96d0a3aa7 --- /dev/null +++ b/bsd/hfs/hfscommon/headers/CatalogPrivate.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: CatalogPrivate.h + + Contains: Private Catalog Manager interfaces. + + Version: HFS Plus 1.0 + + Copyright: © 1997-1998 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (JL) Jim Luther + (msd) Mark Day + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + 11/10/98 djb Remove obsolete PrepareInputName prototype; + 4/6/98 djb Added lock data stuctures and ReleaseCatalogIterator prototype; + 4/6/98 djb Removed CatalogDataCache since its no longer used. + 4/2/98 djb InvalidateCatalogNodeCache does nothing under MacOS X. + 3/31/98 djb Sync up with final HFSVolumes.h header file. + + 11/20/97 djb Radar #2002357. Fixing retry mechanism. + 11/17/97 djb PrepareInputName routine now returns an error. + 11/13/97 djb Radar #1683572. Move CatalogIterator to this file from + FileMgrInternal.i. Double size of short unicode name. + 10/31/97 JL #2000184 - Changed prototypes for CreateFileThreadID and + ExchangeFiles. + 10/17/97 msd In CatalogCacheGlobals, add room for a single UniStr255 so + catalog iterators can step over long Unicode names. + 10/17/97 djb Add ConvertInputNameToUnicode for Catalog Create/Rename. + 10/1/97 djb Change catalog iterator implementation. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 6/24/97 djb Add LocateCatalogNodeByMangledName routine. + 6/24/97 djb first checked in +*/ + +#ifndef __CATALOGPRIVATE__ +#define __CATALOGPRIVATE__ + +#include "../../hfs_format.h" + +#include "FileMgrInternal.h" +#include "BTreesInternal.h" + + #include + +// private catalog data cache + + + +enum { + kCatalogIteratorCount = 16 // total number of Catalog iterators (shared by all HFS/HFS Plus volumes) +}; + + +// Catalog Iterator Name Types +enum { + kShortPascalName, + kShortUnicodeName, + kLongUnicodeName // non-local name +}; + + +// short unicode name (used by CatalogIterator) +struct UniStr63 { + UInt16 length; /* number of unicode characters */ + UniChar unicode[63]; /* unicode characters */ +}; +typedef struct UniStr63 UniStr63; + + +struct CatalogIterator +{ + struct CatalogIterator *nextMRU; // next iterator in MRU order + struct CatalogIterator *nextLRU; // next iterator in LRU order + + ExtendedVCB *volume; + SInt16 currentIndex; + SInt16 reserved; + UInt32 currentOffset; + UInt32 nextOffset; + HFSCatalogNodeID folderID; + + UInt32 btreeNodeHint; // node the key was last seen in + UInt16 btreeIndexHint; // index the key was last seen at + UInt16 nameType; // { 0 = Pascal, 1 = Unicode, 3 = long name} + HFSCatalogNodeID parentID; // parent folder ID + union + { + Str31 pascalName; + UniStr63 unicodeName; + HFSUniStr255 * longNamePtr; + } folderName; + + struct lock__bsd__ iterator_lock; +}; +typedef struct CatalogIterator CatalogIterator; + + +struct CatalogCacheGlobals { + UInt32 iteratorCount; // Number of iterators in cache + CatalogIterator * mru; + CatalogIterator * lru; + UInt32 reserved; + HFSUniStr255 longName; // used by a single kLongUnicodeName iterator + + simple_lock_data_t simplelock; +}; +typedef struct CatalogCacheGlobals CatalogCacheGlobals; + + +// +// Private Catalog Manager Routines (for use only by Catalog Manager, CatSearch and FileID Services) +// + +extern OSErr LocateCatalogThread( const ExtendedVCB *volume, HFSCatalogNodeID nodeID, CatalogRecord *threadData, + UInt16 *threadSize, UInt32 *threadHint); + +extern OSErr LocateCatalogNode( const ExtendedVCB *volume, HFSCatalogNodeID folderID, const CatalogName *name, + UInt32 hint, CatalogKey *key, CatalogRecord *data, UInt32 *newHint); + +extern OSErr LocateCatalogNodeByKey ( const ExtendedVCB *volume, UInt32 hint, CatalogKey *keyPtr, + CatalogRecord *dataPtr, UInt32 *newHint ); + +extern OSErr LocateCatalogRecord( const ExtendedVCB *volume, HFSCatalogNodeID folderID, const CatalogName *name, + UInt32 hint, CatalogKey *keyPtr, CatalogRecord *dataPtr, UInt32 *newHint); + +extern OSErr LocateCatalogNodeWithRetry ( const ExtendedVCB *volume, HFSCatalogNodeID folderID, ConstStr31Param pascalName, + CatalogName *unicodeName, UInt32 hint, CatalogKey *keyPtr, CatalogRecord *dataPtr, + UInt32 *newHint ); + +extern OSErr LocateCatalogNodeByMangledName( const ExtendedVCB *volume, HFSCatalogNodeID folderID, + ConstStr31Param name, UInt32 length, + CatalogKey *keyPtr, CatalogRecord *dataPtr, UInt32 *hintPtr ); + +extern OSErr FlushCatalog( ExtendedVCB *volume); + +#define InvalidateCatalogNodeCache(v, pid) + +extern OSErr UpdateFolderCount( ExtendedVCB *volume, HFSCatalogNodeID parentID, const CatalogName *name, SInt16 newType, + UInt32 hint, SInt16 valenceDelta); + +extern UInt16 GetCatalogRecordSize( const CatalogRecord *dataRecord); + +extern void ConvertInputNameToUnicode(ConstStr31Param name, TextEncoding encodingHint, + TextEncoding *actualEncoding, CatalogName *catalogName); + +extern void BuildCatalogKey( HFSCatalogNodeID parentID, const CatalogName *name, Boolean isHFSPlus, + CatalogKey *key); + +extern OSErr BuildCatalogKeyUTF8(ExtendedVCB *volume, HFSCatalogNodeID parentID, const char *name, + UInt32 length, CatalogKey *key, UInt32 *textEncoding); + +extern void UpdateCatalogName( ConstStr31Param srcName, Str31 destName); + +extern UInt32 CatalogNameLength( const CatalogName *name, Boolean isHFSPlus); + +extern void CopyCatalogName( const CatalogName *srcName, CatalogName *dstName, Boolean isHFSPLus); + +extern OSErr ResolveFileID( ExtendedVCB *vcb, HFSCatalogNodeID fileID, HFSCatalogNodeID *parentID, Str31 name ); + +#if 0 +extern OSErr CreateFileThreadID( FIDParam *filePB, WDCBRecPtr *wdcbPtr ); + +extern OSErr ExchangeFiles( FIDParam *filePB, WDCBRecPtr *wdcbPtr ); +#endif + +extern void CopyCatalogNodeData( const ExtendedVCB *volume, const CatalogRecord *dataPtr, CatalogNodeData *nodeData); + +extern void UpdateVolumeEncodings( ExtendedVCB *volume, TextEncoding encoding); + +extern void AdjustVolumeCounts( ExtendedVCB *volume, SInt16 type, SInt16 delta ); + + +// Catalog Iterator Routines + +extern CatalogIterator* oGetCatalogIterator( const ExtendedVCB *volume, HFSCatalogNodeID folderID, UInt16 index); +extern CatalogIterator* GetCatalogIterator(ExtendedVCB *volume, HFSCatalogNodeID folderID, UInt32 offset); + +extern OSErr ReleaseCatalogIterator( CatalogIterator *catalogIterator ); + +extern void TrashCatalogIterator( const ExtendedVCB *volume, HFSCatalogNodeID folderID ); + +void AgeCatalogIterator( CatalogIterator *catalogIterator ); + +extern void UpdateBtreeIterator( const CatalogIterator *catalogIterator, BTreeIterator *btreeIterator ); + +extern void UpdateCatalogIterator( const BTreeIterator *btreeIterator, CatalogIterator *catalogIterator ); + + +#endif //__CATALOGPRIVATE__ diff --git a/bsd/hfs/hfscommon/headers/FileMgrInternal.h b/bsd/hfs/hfscommon/headers/FileMgrInternal.h new file mode 100644 index 000000000..24c46bc56 --- /dev/null +++ b/bsd/hfs/hfscommon/headers/FileMgrInternal.h @@ -0,0 +1,913 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: FilesInternal.h + + Contains: IPI for File Manager (HFS Plus) + + Version: HFS Plus 1.0 + + Copyright: © 1996-1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Don Brady + + Other Contacts: Mark Day, Deric horn, Jim Luther + + Technology: File Systems + + Writers: + + (JL) Jim Luther + (msd) Mark Day + (djb) Don Brady + (DSH) Deric Horn + + Change History (most recent first): + 9/12/99 ser Removed FCBs. + 9/9/99 pwd Fixed some VCB fields to be unsigned instead of signed to align + the definitions with the MDB/volume header and actual use [#2355889]. + 9/3/99 ser Added kUndefinedStrLen. + 6/3/99 djb Removed unused/legacy vcb fields from ExtendedVCB. + 11/20/98 djb Add support for UTF-8 names. + 8/31/98 djb Added boolean flag to GetTimeLocal prototype. + 6/30/98 djb Add NodesAreContiguous prototype (for radar #2249539). + 6/22/98 djb Add ERR_BASE to error codes to make them negative (for MacOS X only). + Replace DeallocFile prototype with DeleteFile prototype. + 6/5/98 djb Added CreateFileIDRef prototype; + 6/3/98 djb Add MoveRenameCatalogNode prototype (replaces seperate Move and Rename). + 4/17/98 djb Add VCB locking. + 4/6/98 djb Removed CreateVolumeCatalogCache and DisposeVolumeCatalogCache (obsolete). + 4/2/98 djb UpdateCatalogNode now takes parID and name as input. + 3/31/98 djb Sync up with final HFSVolumes.h header file. + 3/17/98 djb Fixed CreateCatalogNode interface to take kCatalogFolderNode and + kCatalogFileNode as type input. + + 1/29/98 DSH Added TrashVolumeDiskCache prototype for TrashAllFSCaches API + support. + 12/10/97 DSH 2201501, Overload the NodeData valence field for over 2 Gig file + support. + 11/18/97 DSH Conditionalize ou BlockCameFromDisk() macro for DFA + 11/16/97 djb LockMappingTable is now defined in UnicodeConverterPriv.i. + 11/13/97 djb Move CatalogIterator struct to CatalogPrivate.h. Include + UnicodeConverter.i instead of Unicode.i. + 11/3/97 JL #2001483 - changed UnMountVolume's prototype. + <24> 10/31/97 DSH Added consistencyStatus parameter to MountCheck. + 10/21/97 DSH Interfacer tweak + 10/21/97 DSH Conditionalize LMGetFCBTable, LMGetFSVars for DFA to call + through DFAs LowMem accessors. + 10/20/97 msd Add a bytesMaximum parameter to BlockAllocate; removed fcb + parameter. + 10/19/97 msd Bug 1684586. Remove the modifyDate field from CatalogNodeDate. + GetCatInfo and SetCatInfo use only contentModDate. + 10/16/97 djb Add LMGetFSVars and LMGetFCBTable macros, add gBlockCacheDirty + to FSVars, simplified HFS Stack swapping macros. + 10/13/97 DSH Added InitBTreeHeader prototype + 10/13/97 djb Add volumeNameEncodingHint to VCB, add textEncoding to + CatalogNodeData, add gTextEncodingFontName to FSVars. + 10/1/97 DSH Added CheckVolumeConsistency() for 1682475. + 10/1/97 djb New Catalog iterators and Catalog node cache SPI. + 9/17/97 DSH Moved prototype HFSBlocksFromTotalSectors() here for DFA + wrapperless volume support. + 9/16/97 msd Add a field to FSVarsRec to store old WriteXParam address. + 9/15/97 djb Add gBootPToUTable to FSVars (used to bootstrap Unicode). + 9/7/97 djb Add FlushBlockCache prototype. + 9/4/97 djb Add cmParentNotFound error code and reportMissingParent bit. + 9/4/97 msd Remove unused attributes calls. Rename PropertyCloseVolume to + AttributesCloseVolume. In CatalogNodeData, replace + attributeModDate with modifyDate. Remove macro LatestModDate. + 8/22/97 djb Add readFromDisk flag to GetCacheBlock and BlockCameFromDisk + macro. + 8/18/97 DSH Override ASM cache accessing routines for DFA to use DFA cache. + 7/28/97 msd Add prototypes for CacheReadInPlace, RemountWrappedVolumes. + 7/25/97 DSH Added GenericMRUCache Routines + 7/22/97 msd In CatalogNodeData, move attributeModDate after backupDate; this + allows SetCatInfo to manipulate the rest of the dates in one + block, the same as in the parameter block. + 7/21/97 djb Add more instrumentation globals (CallProfile). Reallign FSVars. + 7/18/97 msd Selector for PBCreateAttribute conflicts with PBGetXCatInfo. The + attribute calls now have selectors in the range $65..$69. + 7/16/97 DSH first checked in + 7/8/97 DSH Added LockMappingTable() until its moved into the Unicode header + files. + 7/7/97 DSH Taking out changes made in HFS <45> for greater compatability + with the Tempo installer. + 6/27/97 msd Add PBLongRename SPI. Add prototype for + RenameCatalogNodeUnicode. + 6/26/97 DSH Conditionalized macro LocalToUTC to not look at FSVars for DFA. + 6/25/97 msd Add prototype for HFSCommunicationProc. + 6/24/97 DSH Adding runtime flags to deturmine unicode usage and installation + status. + 6/24/97 djb Add linkCount to CatalogNodeData. Add LinkCatalogNode prototype. + Move Private Catalog Manager prototypes to CatalogPrivate.h. + 6/20/97 msd Add prototype for CopyCatalogNodeData. In CatalogNodeData, + replaced modifyDate with contentModDate and attributeModDate. + Added a LatestModDate macro. + <15> 6/18/97 djb Add mask to ConversionContext. Add encodingsBitmap to VCB (and + reallign some fields). Add gInstalledEncodings to FSVars. + 6/17/97 msd The conversions between local time and UTC have the sign of the + offset backwards. + 6/13/97 djb Removed PrepareOutputName. Changed parameters for + DeleteCatalogNode, MoveCatalogNode, PrepareInputName. Add + private catalog macros. + 6/12/97 msd Export BlockAllocateAny and UpdateVCBFreeBlks. + 6/12/97 msd Add a parameter block and prototype for an SPI to create very + large files. + 6/9/97 msd Add an offsetToUTC field to FSVarsRec. Add prototypes for + GetTimeUTC and GetTimeLocal; add macros for LocalToUTC and + UTCToLocal. + 6/5/97 msd Add MapLogicalToPhysical (internal routine), PBMapFilePosition + for external use. + 6/4/97 djb More Unicode converter changes (support for non roman scripts). + 6/2/97 msd Add prototype for AdjustEOF. + 5/28/97 msd Add prototypes for attributes SPI, both internal routines and PB + calls. Add FindFileNameGlueRec and FindFileName routine. + Prototypes for FindFileControlBlock and AccessBTree disappeared, + so added again. + 5/20/97 DSH Including LowMemPriv.a in DFA compiles + 5/19/97 djb Add uppLockMappingTable to FSVars. + 5/19/97 djb Add CreateVolumeCatalogCache and DisposeVolumeCatalogCache + prototypes. Remove private CatalogDataCache structure. + 5/16/97 msd Use fixed-size integers for GetBlock_glue and RelBlock_glue so + it will build with compilers other than MPW C and SC. Add + prototype for FillHFSStack, UnMountVolume, and + MakeVCBsExtendedVCBs from VolumeRequests.c. Add prototypes for + CreateEmbeddedVolume and InitUnicodeConverter. + 5/9/97 djb first checked in + 5/7/97 djb Add summary trace data. Shrink FSVars.later to 4 longs. + 4/28/97 djb first checked in + +*/ +#ifndef __FILEMGRINTERNAL__ +#define __FILEMGRINTERNAL__ + +#include +#include + +#include "../../hfs.h" +#include "../../hfs_macos_defs.h" +#include "../../hfs_format.h" + + +#if PRAGMA_ONCE +#pragma once +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if PRAGMA_IMPORT +#pragma import on +#endif + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=mac68k +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(push, 2) +#elif PRAGMA_STRUCT_PACK + #pragma pack(2) +#endif + +/* CatalogNodeID is used to track catalog objects */ +typedef UInt32 HFSCatalogNodeID; + +/* internal error codes*/ + +#if TARGET_API_MACOS_X + #define ERR_BASE -32767 +#else + #define ERR_BASE 0 +#endif + +enum { + /* FXM errors*/ + fxRangeErr = ERR_BASE + 16, /* file position beyond mapped range*/ + fxOvFlErr = ERR_BASE + 17, /* extents file overflow*/ + /* Unicode errors*/ + uniTooLongErr = ERR_BASE + 24, /* Unicode string too long to convert to Str31*/ + uniBufferTooSmallErr = ERR_BASE + 25, /* Unicode output buffer too small*/ + uniNotMappableErr = ERR_BASE + 26, /* Unicode string can't be mapped to given script*/ + /* BTree Manager errors*/ + btNotFound = ERR_BASE + 32, /* record not found*/ + btExists = ERR_BASE + 33, /* record already exists*/ + btNoSpaceAvail = ERR_BASE + 34, /* no available space*/ + btNoFit = ERR_BASE + 35, /* record doesn't fit in node */ + btBadNode = ERR_BASE + 36, /* bad node detected*/ + btBadHdr = ERR_BASE + 37, /* bad BTree header record detected*/ + dsBadRotate = ERR_BASE + 64, /* bad BTree rotate*/ + /* Catalog Manager errors*/ + cmNotFound = ERR_BASE + 48, /* CNode not found*/ + cmExists = ERR_BASE + 49, /* CNode already exists*/ + cmNotEmpty = ERR_BASE + 50, /* directory CNode not empty (valence = 0)*/ + cmRootCN = ERR_BASE + 51, /* invalid reference to root CNode*/ + cmBadNews = ERR_BASE + 52, /* detected bad catalog structure*/ + cmFThdDirErr = ERR_BASE + 53, /* thread belongs to a directory not a file*/ + cmFThdGone = ERR_BASE + 54, /* file thread doesn't exist*/ + cmParentNotFound = ERR_BASE + 55, /* CNode for parent ID does not exist*/ + /* TFS internal errors*/ + fsDSIntErr = -127 /* Internal file system error*/ +}; + + +/* internal flags*/ + + +enum { + /* File System busy flag:*/ + /* Bit zero of FSBusy (lomem $360) is true when the file system is running.*/ + /* The word at $360 is cleared when the file system is exited. The*/ + /* bits defined here are for additional flags in the FSBusy word that are*/ + /* valid only when the file system is running.*/ + fsBusyBit = 0, /* file system is running; other FSBusy bits are valid*/ + fsSCSIDefer = 1, /* file system is waiting for SCSI transaction to complete*/ + fsIntMaskDefer = 2, /* file system is waiting until the interrupt mask is lowered*/ + /* Flag bits in HFSFlags byte:*/ + hfsReq = 0, /* Set if request is specific to HFS*/ + dirCN = 1, /* Set if a CNode is a directory*/ + reportMissingParent = 4, /* tell Catalog to report missing parents (used by MakeFSSpec)*/ + skipPMSP = 5, /* Set to skip PMSP setup (one-shot)*/ + noPMSP = 6, /* Set to disable PMSP completely (status flag)*/ + hfsContd = 7, /* Set if Async trap is continued*/ + /* fsFlags values*/ + fsNoAllocate = 0, + fsNoAllocateMask = 0x01, /* true when allocating memory is a very bad idea*/ + fsNeedFCBs = 1, + fsNeedFCBsMask = 0x02, /* true when a local FCB couldn't be found */ + fsNoFCBExpansion = 2, + fsNoFCBExpansionMask = 0x04, /* true if no FCB expansion logic is desired*/ + /* ExtendFile option flags*/ + /* extendFileAllBit = 0, |* allocate all requested bytes or none *|*/ + /* extendFileAllMask = 0x0001,*/ + /* */ + /* extendFileContigBit = 1, |* force contiguous allocation *|*/ + /* extendFileContigMask = 0x0002*/ + kEFContigBit = 1, /* force contiguous allocation*/ + kEFContigMask = 0x02, + kEFAllBit = 0, /* allocate all requested bytes or none*/ + kEFAllMask = 0x01, /* TruncateFile option flags*/ + kTFTrunExtBit = 0, /* truncate to the extent containing new PEOF*/ + kTFTrunExtMask = 1 +}; + +enum { + kUndefinedStrLen = 0 /* Unknown string length */ +}; + +enum { + HFSStkLen = 1792, /* old stack size (pre HFS Plus)*/ + kFileSystemStackSlop = 16, /* additional temporary space*/ + kFileSystemStackSize = 16384, /* give us more breathing room*/ + kFileSystemVersion = FOUR_CHAR_CODE('2.0A'), /* current file system version*/ + /* 31744 = $7C00, a nice round number close to*/ + /* (32767*1000)/1024, which is about the largest */ + /* free space unsuspecting, decimal-K minded apps*/ + /* might be expected to handle.*/ + /* AlBlkLim*/ + kMaxHFSAllocationBlocks = 31744, + WDRfnMin = -32767, /* lowest assigned WD RefNum*/ + WDRfnMax = -4096, /* largest possible WDrefnum*/ + kFirstFileRefnum = 2, /* smallest FCB refnum*/ + kNoHint = 0 +}; + + +/* Internal LowMem pointers*/ + +/*€€ The following should really be in LowMemPriv.i*/ + +enum { + FSCallAsync = 0x0342, /* ONE BYTE FREE*/ + NoEject = 0x034B, /* used by Eject and Offline*/ + CacheFlag = 0x0377, + SysBMCPtr = 0x0378, /* System-wide bitmap cache pointer*/ + SysCtlCPtr = 0x0380, /* System-wide control cache pointer*/ + HFSDSErr = 0x0392, /* Final gasp - error that caused IOErr.*/ + LMParamBlock = 0x03A4, /* LMGetParams() just gives us a copy of it*/ + FSVarsPtr = 0x0BB8, /* lomem that points to file system variable block*/ + CacheVars = 0x0394, + HFSStkPtr = 0x036E, /* Temporary location of HFS Stack pointer*/ + FSIOErr = 0x03DE, /* last I/O error (NEXT WORD FREE)*/ + /* file manager vectors not found in LowMemPriv.i*/ + JUpdAltMDB = (0xED) * 4 + 0x0400, /* ($A0ED) $0400 is n/OSTable*/ + JCkExtFS = (0xEE) * 4 + 0x0400, /* ($A0EE) $0400 is n/OSTable*/ + JBMChk = (0xF0) * 4 + 0x0400, /* ($A0F0) $0400 is n/OSTable*/ + JTstMod = (0xF1) * 4 + 0x0400, /* ($A0F1) $0400 is n/OSTable*/ + JLocCRec = (0xF2) * 4 + 0x0400, /* ($A0F2) $0400 is n/OSTable*/ + JTreeSearch = (0xF3) * 4 + 0x0400, /* ($A0F3) $0400 is n/OSTable*/ + JMapFBlock = (0xF4) * 4 + 0x0400, /* ($A0F4) $0400 is n/OSTable*/ + JXFSearch = (0xF5) * 4 + 0x0400, /* ($A0F5) $0400 is n/OSTable*/ + JReadBM = (0xF6) * 4 + 0x0400 /* ($A0F6) $0400 is n/OSTable*/ +}; + + +/* Poor Man's Search Path*/ + +struct SearchPathHeader { + Ptr PMSPHook; /* Hook for PMSP modification*/ + short PMSPIndx; /* Index to PMSP index from start of PMSP*/ +}; +typedef struct SearchPathHeader SearchPathHeader; + +struct SearchPathEntry { + short spVRefNum; /* VRefNum in PMSP entry*/ + UInt32 spDirID; /* Directory ID in PMSP entry*/ +}; +typedef struct SearchPathEntry SearchPathEntry; + + +enum { + kPoorMansSearchIndex = -2, + MaxDVCnt = 8, /* Leave room for 8 default VRefNums*/ + PMSPSize = MaxDVCnt * sizeof(SearchPathEntry) + sizeof(SearchPathHeader) + 2 +}; + + + +enum { + fsWDCBExtendCount = 8, /* # of WDCB's to add when we run out*/ + /* FileIDs variables*/ + kNumExtentsToCache = 4 /* just guessing for ExchangeFiles*/ +}; + + +enum { + kInvalidMRUCacheKey = -1L, /* flag to denote current MRU cache key is invalid*/ + kDefaultNumMRUCacheBlocks = 16 /* default number of blocks in each cache*/ +}; + + +/* Universal Extent Key */ + +union ExtentKey { + HFSExtentKey hfs; + HFSPlusExtentKey hfsPlus; +}; +typedef union ExtentKey ExtentKey; +/* Universal extent descriptor */ + +union ExtentDescriptor { + HFSExtentDescriptor hfs; + HFSPlusExtentDescriptor hfsPlus; +}; +typedef union ExtentDescriptor ExtentDescriptor; +/* Universal extent record */ + +union ExtentRecord { + HFSExtentRecord hfs; + HFSPlusExtentRecord hfsPlus; +}; +typedef union ExtentRecord ExtentRecord; +/* Universal catalog key */ + +union CatalogKey { + HFSCatalogKey hfs; + HFSPlusCatalogKey hfsPlus; +}; +typedef union CatalogKey CatalogKey; +/* Universal catalog data record */ + +union CatalogRecord { + SInt16 recordType; + HFSCatalogFolder hfsFolder; + HFSCatalogFile hfsFile; + HFSCatalogThread hfsThread; + HFSPlusCatalogFolder hfsPlusFolder; + HFSPlusCatalogFile hfsPlusFile; + HFSPlusCatalogThread hfsPlusThread; +}; +typedef union CatalogRecord CatalogRecord; + + +enum { + CMMaxCName = kHFSMaxFileNameChars +}; + + +enum { + vcbMaxNam = 27, /* volumes currently have a 27 byte max name length*/ + /* VCB flags*/ + vcbManualEjectMask = 0x0001, /* bit 0 manual-eject bit: set if volume is in a manual-eject drive*/ + vcbFlushCriticalInfoMask = 0x0002, /* bit 1 critical info bit: set if critical MDB information needs to flush*/ + /* IoParam->ioVAtrb*/ + kDefaultVolumeMask = 0x0020, + kFilesOpenMask = 0x0040 +}; + + +/* Catalog Node Data - universal data returned from the Catalog Manager*/ + + +enum { + xFFFilAttrLockMask = 0x70 +}; + +/* valence is overloaded for files and used as additional flags. 2201501*/ + +enum { + kLargeDataForkMask = 0x00000001, + kLargeRsrcForkMask = 0x00000002 +}; + +/* Universal catalog name*/ + +union CatalogName { + Str31 pstr; + HFSUniStr255 ustr; +}; +typedef union CatalogName CatalogName; + +/* Unicode Conversion*/ + + +enum { + kMacBaseEncodingCount = 50, + kTextEncodingUndefined = 0x00007FFF +}; + +struct ConversionContext { + TextToUnicodeInfo toUnicode; + UnicodeToTextInfo fromUnicode; +}; +typedef struct ConversionContext ConversionContext; + +struct CallProfile { + UInt16 refCount; + UInt16 errCount; + UInt32 callCount; + UInt32 minTime; + UInt32 maxTime; + UInt64 totalTime; + UInt64 startBase; /* in nanoseconds*/ +}; +typedef struct CallProfile CallProfile; + + +struct FSVarsRec { + UInt32 gDefaultBaseEncoding; + ItemCount gInstalledEncodings; + ConversionContext gConversionContext[50]; + Ptr gBootPToUTable; /* used by boot code to find Extensions folder*/ + StringPtr gTextEncodingFontName; /* points to font name (only used when no HFS Plus volumes have been mounted)*/ + Boolean gUseDynamicUnicodeConverters; + Boolean gIsUnicodeInstalled; +}; +typedef struct FSVarsRec FSVarsRec; + + + +/* + * MacOS accessor routines + */ +#define GetFileControlBlock(fref) ((FCB *)((fref)->v_data)) +#define GetFileRefNumFromFCB(filePtr) ((filePtr)->h_vp) + + +EXTERN_API_C( Boolean ) +BlockCameFromDisk (void); + +/* The following macro marks a VCB as dirty by setting the upper 8 bits of the flags*/ +EXTERN_API_C( void ) +MarkVCBDirty (ExtendedVCB *vcb); + +EXTERN_API_C( void ) +MarkVCBClean (ExtendedVCB *vcb); + +EXTERN_API_C( Boolean ) +IsVCBDirty (ExtendedVCB *vcb); + + +#define VCB_LOCK_INIT(vcb) simple_lock_init(&vcb->vcbSimpleLock) +#define VCB_LOCK(vcb) simple_lock(&vcb->vcbSimpleLock) +#define VCB_UNLOCK(vcb) simple_unlock(&vcb->vcbSimpleLock) + +#define MarkVCBDirty(vcb) { VCB_LOCK((vcb)); ((vcb)->vcbFlags |= 0xFF00); VCB_UNLOCK((vcb)); } +#define MarkVCBClean(vcb) { VCB_LOCK((vcb)); ((vcb)->vcbFlags &= 0x00FF); VCB_UNLOCK((vcb)); } +#define IsVCBDirty(vcb) ((Boolean) ((vcb->vcbFlags & 0xFF00) != 0)) + + +/* Test for error and return if error occurred*/ +EXTERN_API_C( void ) +ReturnIfError (OSErr result); + +#define ReturnIfError(result) if ( (result) != noErr ) return (result); else ; +/* Test for passed condition and return if true*/ +EXTERN_API_C( void ) +ReturnErrorIf (Boolean condition, + OSErr result); + +#define ReturnErrorIf(condition, error) if ( (condition) ) return( (error) ); +/* Exit function on error*/ +EXTERN_API_C( void ) +ExitOnError (OSErr result); + +#define ExitOnError( result ) if ( ( result ) != noErr ) goto ErrorExit; else ; +/* Return the low 16 bits of a 32 bit value, pinned if too large*/ +EXTERN_API_C( UInt16 ) +LongToShort (UInt32 l); + +#define LongToShort( l ) l <= (UInt32)0x0000FFFF ? ((UInt16) l) : ((UInt16) 0xFFFF) + + +/* Catalog Manager Routines (IPI)*/ + +EXTERN_API_C( OSErr ) +CreateCatalogNode (ExtendedVCB * volume, + HFSCatalogNodeID parentID, + ConstUTF8Param name, + UInt32 nodeType, + HFSCatalogNodeID * catalogNodeID, + UInt32 * catalogHint); + +EXTERN_API_C( OSErr ) +DeleteCatalogNode (ExtendedVCB * volume, + HFSCatalogNodeID parentID, + ConstUTF8Param name, + UInt32 hint); + +EXTERN_API_C( OSErr ) +GetCatalogNode (ExtendedVCB * volume, + HFSCatalogNodeID parentID, + ConstUTF8Param name, + UInt32 length, + UInt32 hint, + CatalogNodeData * nodeData, + UInt32 * newHint); + +EXTERN_API_C( OSErr ) +GetCatalogOffspring (ExtendedVCB * volume, + HFSCatalogNodeID folderID, + UInt16 index, + CatalogNodeData * nodeData, + HFSCatalogNodeID * nodeID, + SInt16 * nodeType); + +EXTERN_API_C( OSErr ) +MoveRenameCatalogNode (ExtendedVCB * volume, + HFSCatalogNodeID srcParentID, + ConstUTF8Param srcName, + UInt32 srcHint, + HFSCatalogNodeID dstParentID, + ConstUTF8Param dstName, + UInt32 * newHint); + +EXTERN_API_C( OSErr ) +UpdateCatalogNode (ExtendedVCB * volume, + HFSCatalogNodeID parentID, + ConstUTF8Param name, + UInt32 catalogHint, + const CatalogNodeData * nodeData); + +EXTERN_API_C( OSErr ) +CreateFileIDRef (ExtendedVCB * volume, + HFSCatalogNodeID parentID, + ConstUTF8Param name, + UInt32 hint, + HFSCatalogNodeID * threadID); + +EXTERN_API_C( OSErr ) +ExchangeFileIDs (ExtendedVCB * volume, + ConstUTF8Param srcName, + ConstUTF8Param destName, + HFSCatalogNodeID srcID, + HFSCatalogNodeID destID, + UInt32 srcHint, + UInt32 destHint ); + +EXTERN_API_C( OSErr ) +LinkCatalogNode (ExtendedVCB * volume, + HFSCatalogNodeID parentID, + ConstUTF8Param name, + HFSCatalogNodeID linkParentID, + ConstUTF8Param linkName); + +EXTERN_API_C( SInt32 ) +CompareCatalogKeys (HFSCatalogKey * searchKey, + HFSCatalogKey * trialKey); + +EXTERN_API_C( SInt32 ) +CompareExtendedCatalogKeys (HFSPlusCatalogKey * searchKey, + HFSPlusCatalogKey * trialKey); + +EXTERN_API_C( OSErr ) +InitCatalogCache (void); + +EXTERN_API_C( void ) +InvalidateCatalogCache (ExtendedVCB * volume); + + +/* GenericMRUCache Routines*/ +EXTERN_API_C( OSErr ) +InitMRUCache (UInt32 bufferSize, + UInt32 numCacheBlocks, + Ptr * cachePtr); + +EXTERN_API_C( OSErr ) +DisposeMRUCache (Ptr cachePtr); + +EXTERN_API_C( void ) +TrashMRUCache (Ptr cachePtr); + +EXTERN_API_C( OSErr ) +GetMRUCacheBlock (UInt32 key, + Ptr cachePtr, + Ptr * buffer); + +EXTERN_API_C( void ) +InvalidateMRUCacheBlock (Ptr cachePtr, + Ptr buffer); + +EXTERN_API_C( void ) +InsertMRUCacheBlock (Ptr cachePtr, + UInt32 key, + Ptr buffer); + +/* BTree Manager Routines*/ + +typedef CALLBACK_API_C( SInt32 , KeyCompareProcPtr )(void *a, void *b); + + +EXTERN_API_C( OSErr ) +SearchBTreeRecord (FileReference refNum, + const void * key, + UInt32 hint, + void * foundKey, + void * data, + UInt16 * dataSize, + UInt32 * newHint); + +EXTERN_API_C( OSErr ) +InsertBTreeRecord (FileReference refNum, + void * key, + void * data, + UInt16 dataSize, + UInt32 * newHint); + +EXTERN_API_C( OSErr ) +DeleteBTreeRecord (FileReference refNum, + void * key); + +EXTERN_API_C( OSErr ) +ReplaceBTreeRecord (FileReference refNum, + const void * key, + UInt32 hint, + void * newData, + UInt16 dataSize, + UInt32 * newHint); + +/* From HFSVolumesInit.c*/ +EXTERN_API_C( void ) +InitBTreeHeader (UInt32 fileSize, + UInt32 clumpSize, + UInt16 nodeSize, + UInt16 recordCount, + UInt16 keySize, + UInt32 attributes, + UInt32 * mapNodes, + void * buffer); + +/* Prototypes for big block cache*/ + +EXTERN_API_C( OSErr ) +InitializeBlockCache (UInt32 blockSize, + UInt32 blockCount); + +EXTERN_API_C( OSErr ) +FlushBlockCache (void); + +EXTERN_API_C( OSErr ) +GetCacheBlock (FileReference fileRefNum, + UInt32 blockNumber, + UInt32 blockSize, + UInt16 options, + LogicalAddress * buffer, + Boolean * readFromDisk); + +EXTERN_API_C( OSErr ) +ReleaseCacheBlock (LogicalAddress buffer, + UInt16 options); + +EXTERN_API_C( OSErr ) +MarkCacheBlock (LogicalAddress buffer); + +EXTERN_API_C( OSErr ) +TrashCacheBlocks (FileReference fileRefNum); + +/* Prototypes for C->Asm glue*/ +EXTERN_API_C( OSErr ) +GetBlock_glue (UInt16 flags, + UInt32 nodeNumber, + Ptr * nodeBuffer, + FileReference refNum, + ExtendedVCB * vcb); + +EXTERN_API_C( OSErr ) +RelBlock_glue (Ptr nodeBuffer, + UInt16 flags); + +EXTERN_API_C( void ) +MarkBlock_glue (Ptr nodeBuffer); + +EXTERN_API_C( OSErr ) +C_FlushCache (ExtendedVCB * vcb, + UInt32 flags, + FileReference refNum); + + +EXTERN_API_C( void ) TrashVolumeDiskCache(ExtendedVCB * vcb); + +/* Prototypes for exported routines in VolumeAllocation.c*/ +EXTERN_API_C( OSErr ) +BlockAllocate (ExtendedVCB * vcb, + UInt32 startingBlock, + SInt64 bytesRequested, + SInt64 bytesMaximum, + Boolean forceContiguous, + UInt32 * startBlock, + UInt32 * actualBlocks); + +EXTERN_API_C( OSErr ) +BlockDeallocate (ExtendedVCB * vcb, + UInt32 firstBlock, + UInt32 numBlocks); + +EXTERN_API_C( OSErr ) +UpdateFreeCount (ExtendedVCB * vcb); + + +EXTERN_API_C( OSErr ) +AllocateFreeSpace (ExtendedVCB * vcb, + UInt32 * startBlock, + UInt32 * actualBlocks); + +EXTERN_API_C( UInt32 ) +FileBytesToBlocks (SInt64 numerator, + UInt32 denominator); + +EXTERN_API_C( OSErr ) +BlockAllocateAny (ExtendedVCB * vcb, + UInt32 startingBlock, + UInt32 endingBlock, + UInt32 maxBlocks, + UInt32 * actualStartBlock, + UInt32 * actualNumBlocks); + +EXTERN_API_C( void ) +UpdateVCBFreeBlks (ExtendedVCB * vcb); + +/* File Extent Mapping routines*/ +EXTERN_API_C( OSErr ) +FlushExtentFile (ExtendedVCB * vcb); + +EXTERN_API_C( SInt32 ) +CompareExtentKeys (const HFSExtentKey * searchKey, + const HFSExtentKey * trialKey); + +EXTERN_API_C( SInt32 ) +CompareExtentKeysPlus (const HFSPlusExtentKey *searchKey, + const HFSPlusExtentKey *trialKey); + +EXTERN_API_C( OSErr ) +DeleteFile (ExtendedVCB * vcb, + HFSCatalogNodeID parDirID, + ConstUTF8Param catalogName, + UInt32 catalogHint); + +EXTERN_API_C( OSErr ) +TruncateFileC (ExtendedVCB * vcb, + FCB * fcb, + SInt64 peof, + Boolean truncateToExtent); + +EXTERN_API_C( OSErr ) +ExtendFileC (ExtendedVCB * vcb, + FCB * fcb, + SInt64 bytesToAdd, + UInt32 flags, + SInt64 * actualBytesAdded); + +EXTERN_API_C( OSErr ) +MapFileBlockC (ExtendedVCB * vcb, + FCB * fcb, + size_t numberOfBytes, + off_t offset, + daddr_t * startBlock, + size_t * availableBytes); + +#if TARGET_API_MACOS_X +EXTERN_API_C( Boolean ) +NodesAreContiguous (ExtendedVCB * vcb, + FCB * fcb, + UInt32 nodeSize); +#endif +EXTERN_API_C( void ) +AdjustEOF (FCB * sourceFCB); + +/* Utility routines*/ + +EXTERN_API_C( void ) +ClearMemory (void * start, + UInt32 length); + +EXTERN_API_C( Boolean ) +UnicodeBinaryCompare (ConstHFSUniStr255Param ustr1, + ConstHFSUniStr255Param ustr2); + +EXTERN_API_C( Boolean ) +PascalBinaryCompare (ConstStr31Param pstr1, + ConstStr31Param pstr2); + +EXTERN_API_C( OSErr ) +VolumeWritable (ExtendedVCB * vcb); + + +/* Get the current time in UTC (GMT)*/ +EXTERN_API_C( UInt32 ) +GetTimeUTC (void); + +/* Get the current local time*/ +EXTERN_API_C( UInt32 ) +GetTimeLocal (Boolean forHFS); + +EXTERN_API_C( UInt32 ) +LocalToUTC (UInt32 localTime); + +EXTERN_API_C( UInt32 ) +UTCToLocal (UInt32 utcTime); + + +/* Volumes routines*/ +EXTERN_API_C( OSErr ) +FlushVolumeControlBlock (ExtendedVCB * vcb); + +EXTERN_API_C( OSErr ) +CheckVolumeOffLine (ExtendedVCB * vcb); + +EXTERN_API_C( OSErr ) +ValidVolumeHeader (HFSPlusVolumeHeader * volumeHeader); + +EXTERN_API_C( void ) +FillHFSStack (void); + + +EXTERN_API_C( OSErr ) +AccessBTree (ExtendedVCB * vcb, + FileReference refNum, + UInt32 fileID, + UInt32 fileClumpSize, + void * CompareRoutine); + +EXTERN_API_C( void ) +RemountWrappedVolumes (void); + +EXTERN_API_C( OSErr ) +CheckVolumeConsistency (ExtendedVCB * vcb); + +EXTERN_API_C( void ) +HFSBlocksFromTotalSectors (UInt32 totalSectors, + UInt32 * blockSize, + UInt16 * blockCount); + + + + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=reset +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(pop) +#elif PRAGMA_STRUCT_PACK + #pragma pack() +#endif + +#ifdef PRAGMA_IMPORT_OFF +#pragma import off +#elif PRAGMA_IMPORT +#pragma import reset +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __FILEMGRINTERNAL__ */ + diff --git a/bsd/hfs/hfscommon/headers/HFSInstrumentation.h b/bsd/hfs/hfscommon/headers/HFSInstrumentation.h new file mode 100644 index 000000000..5cdc2de20 --- /dev/null +++ b/bsd/hfs/hfscommon/headers/HFSInstrumentation.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HFSInstrumentation.h + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1997 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (DSH) Deric Horn + (djb) Don Brady + + Change History (most recent first): + + 10/1/97 djb Add kGetCatalogIterator + 9/4/97 djb Add kTraceRelString, kHeuristicHint. + 7/24/97 djb Add summary traces for GetNode, RelNode, and BasicIO. + 7/21/97 djb Redefine LogStartTime/LogEndTime macros. + 7/16/97 DSH FilesInternal.i renamed FileMgrInternal.i to avoid name + collision + 5/9/97 djb first checked in +*/ + +#include "../../hfs_macos_defs.h" +#include "FileMgrInternal.h" + + +// +// Instrumentation summary trace indicies +// +enum { + // Unicode routines + kTraceUnicodeToPString, + kTracePStringToUnicode, + kTraceUnicodeCompare, + + kTraceRelString, + + // B-tree routines + kTraceOpenBTree, + kTraceCloseBTree, + kTraceFlushBTree, + kTraceSearchBTree, + kTraceGetBTreeRecord, + kTraceInsertBTreeRecord, + kTraceDeleteBTreeRecord, + kTraceReplaceBTreeRecord, + + // Misc routines + kTraceMapFileBlock, + kTraceBlockAllocate, + + kTraceGetNode, + kTraceReleaseNode, + kTraceBasicIO, + kTraceFSRead, + kHeuristicHint, + kGetCatalogIterator, + + + kSummaryTraceRefs // number of summary trace references +}; + + +void STLogStartTime(UInt32 selector); +void STLogEndTime(UInt32 selector, OSErr error); + + +/* +MACRO + LogStartTime(selector) + +DESCRIPTION + If summary traces are enabled then LogStartTime will record the starting time for + the routine associated with the selector. Otherwise LogStartTime does nothing. + +*/ + +#if hasSummaryTraces + +#define LogStartTime(selector) STLogStartTime( (selector) ) + +#else + +#define LogStartTime(selector) ((void) 0) + +#endif + + + +/* +MACRO + LogEndTime(selector, error) + +DESCRIPTION + If summary traces are enabled then InsLogEndTime will record the ending time for + the routine associated with the selector. Otherwise LogEndTime does nothing. + +*/ + +#if hasSummaryTraces + +#define LogEndTime(selector,error) STLogEndTime( (selector), (error) ) + +#else + +#define LogEndTime(selector,error) ((void) 0) + +#endif diff --git a/bsd/hfs/hfscommon/headers/HFSUnicodeWrappers.h b/bsd/hfs/hfscommon/headers/HFSUnicodeWrappers.h new file mode 100644 index 000000000..f32f517ea --- /dev/null +++ b/bsd/hfs/hfscommon/headers/HFSUnicodeWrappers.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HFSUnicodeWrappers.h + + Contains: IPI to Unicode routines used by File Manager. + + Version: HFS Plus 1.0 + + Written by: Mark Day + + Copyright: © 1996-1997 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (DSH) Deric Horn + (msd) Mark Day + (djb) Don Brady + + Change History (most recent first): + + 11/16/97 djb Change Unicode.h to UnicodeConverter.h. + 11/7/97 msd Remove prototype for CompareUnicodeNames(). Add prototype for + FastUnicodeCompare(). + 10/13/97 djb Add encoding/index macros and add prototypes for new Get/Set + encodding routines. + 9/15/97 djb InitUnicodeConverter now takes a boolean. + 9/10/97 msd Add prototype for InitializeEncodingContext. + 6/26/97 DSH Include "MockConverter" prototype for DFA usage. + 6/25/97 DSH Removed Prototype definitions, and checked in Unicode.h and + TextCommon.h from Julio Gonzales into InternalInterfaces. + 6/25/97 msd Add prototypes for some new Unicode routines that haven't + appeared in MasterInterfaces yet. + 6/18/97 djb Add more ConversionContexts routines. + 6/13/97 djb Switched to ConvertUnicodeToHFSName, ConvertHFSNameToUnicode, & + CompareUnicodeNames. + 4/28/97 djb first checked in + 12/12/96 msd first checked in + +*/ + +#include "../../hfs_macos_defs.h" +#include "../../hfs_format.h" + +// Encoding vs. Index +// +// For runtime table lookups and for the volume encoding bitmap we +// need to map some encodings to keep them in a reasonable range. +// + +enum { + kIndexMacUkrainian = 48, // MacUkrainian encoding is 152 + kIndexMacFarsi = 49 // MacFarsi encoding is 140 +}; + +#define MapEncodingToIndex(e) \ + ( (e) < 48 ? (e) : ( (e) == kTextEncodingMacUkrainian ? kIndexMacUkrainian : ( (e) == kTextEncodingMacFarsi ? kIndexMacFarsi : kTextEncodingMacRoman) ) ) + +#define MapIndexToEncoding(i) \ + ( (i) == kIndexMacFarsi ? kTextEncodingMacFarsi : ( (i) == kIndexMacUkrainian ? kTextEncodingMacUkrainian : (i) ) ) + +#define ValidMacEncoding(e) \ + ( ((e) < 39) || ((e) == kTextEncodingMacFarsi) || ((e) == kTextEncodingMacUkrainian) ) + + +extern OSErr ConvertUnicodeToUTF8Mangled ( ByteCount srcLen, + ConstUniCharArrayPtr srcStr, + ByteCount maxDstLen, + ByteCount *actualDstLen, + unsigned char* dstStr , + HFSCatalogNodeID cnid); + +/* + This routine compares two Unicode names based on an ordering defined by the HFS Plus B-tree. + This ordering must stay fixed for all time. + + Output: + -n name1 < name2 (i.e. name 1 sorts before name 2) + 0 name1 = name2 + +n name1 > name2 + + NOTE: You should not depend on the magnitude of the result, just its sign. That is, when name1 < name2, then any + negative number may be returned. +*/ + +extern SInt32 FastUnicodeCompare(register ConstUniCharArrayPtr str1, register ItemCount length1, + register ConstUniCharArrayPtr str2, register ItemCount length2); + + +extern SInt32 FastRelString( ConstStr255Param str1, ConstStr255Param str2 ); + + +extern HFSCatalogNodeID GetEmbeddedFileID( ConstStr31Param filename, UInt32 length, UInt32 *prefixLength ); + diff --git a/bsd/hfs/hfscommon/headers/Makefile b/bsd/hfs/hfscommon/headers/Makefile new file mode 100644 index 000000000..81ad2b9cb --- /dev/null +++ b/bsd/hfs/hfscommon/headers/Makefile @@ -0,0 +1,38 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + BTreesInternal.h BTreesPrivate.h CatalogPrivate.h \ + FileMgrInternal.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = hfs/hfscommon/headers + +EXPORT_MI_LIST = \ + +EXPORT_MI_DIR = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/i386/Makefile b/bsd/i386/Makefile new file mode 100644 index 000000000..b300ae639 --- /dev/null +++ b/bsd/i386/Makefile @@ -0,0 +1,27 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + cpu.h disklabel.h endian.h exec.h label_t.h param.h \ + profile.h psl.h ptrace.h reboot.h setjmp.h signal.h \ + spl.h table.h types.h user.h vmparam.h + + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = i386 + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = i386 + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/i386/cpu.h b/bsd/i386/cpu.h new file mode 100644 index 000000000..65270f0ae --- /dev/null +++ b/bsd/i386/cpu.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * HISTORY + * + */ + +#ifndef _BSD_I386_CPU_H_ +#define _BSD_I386_CPU_H_ + +#define cpu_number() (0) + +#endif /* _BSD_I386_CPU_H_ */ diff --git a/bsd/i386/disklabel.h b/bsd/i386/disklabel.h new file mode 100644 index 000000000..e01222824 --- /dev/null +++ b/bsd/i386/disklabel.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_DISKLABEL_H_ +#define _MACHINE_DISKLABEL_H_ + +#define LABELSECTOR (1024 / DEV_BSIZE) /* sector containing label */ +#define LABELOFFSET 0 /* offset of label in sector */ +#define MAXPARTITIONS 8 /* number of partitions */ +#define RAW_PART 2 /* raw partition: xx?c */ + +/* Just a dummy */ +struct cpu_disklabel { + int cd_dummy; /* must have one element. */ +}; + +#endif /* _MACHINE_DISKLABEL_H_ */ diff --git a/bsd/i386/endian.h b/bsd/i386/endian.h new file mode 100644 index 000000000..1403d717b --- /dev/null +++ b/bsd/i386/endian.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +/* + * Copyright (c) 1987, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)endian.h 8.1 (Berkeley) 6/11/93 + */ + +#ifndef _I386__ENDIAN_H_ +#define _I386__ENDIAN_H_ + +/* + * Define _NOQUAD if the compiler does NOT support 64-bit integers. + */ +/* #define _NOQUAD */ + +/* + * Define the order of 32-bit words in 64-bit words. + */ +#define _QUAD_HIGHWORD 1 +#define _QUAD_LOWWORD 0 + +#if defined(KERNEL) || !defined(_POSIX_SOURCE) +/* + * Definitions for byte order, according to byte significance from low + * address to high. + */ +#define LITTLE_ENDIAN 1234 /* LSB first: i386, vax */ +#define BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */ +#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */ + +#define BYTE_ORDER LITTLE_ENDIAN + +#include + +__BEGIN_DECLS +unsigned long htonl __P((unsigned long)); +unsigned short htons __P((unsigned short)); +unsigned long ntohl __P((unsigned long)); +unsigned short ntohs __P((unsigned short)); +__END_DECLS + +/* + * Macros for network/external number representation conversion. + */ +#if BYTE_ORDER == BIG_ENDIAN && !defined(lint) +#define ntohl(x) (x) +#define ntohs(x) (x) +#define htonl(x) (x) +#define htons(x) (x) + +#define NTOHL(x) (x) +#define NTOHS(x) (x) +#define HTONL(x) (x) +#define HTONS(x) (x) + +#else +#include + +#define ntohl(x) NXSwapBigLongToHost(x) +#define ntohs(x) NXSwapBigShortToHost(x) +#define htonl(x) NXSwapHostLongToBig(x) +#define htons(x) NXSwapHostShortToBig(x) + +#define NTOHL(x) (x) = ntohl((u_long)x) +#define NTOHS(x) (x) = ntohs((u_short)x) +#define HTONL(x) (x) = htonl((u_long)x) +#define HTONS(x) (x) = htons((u_short)x) +#endif +#endif /* defined(KERNEL) || !defined(_POSIX_SOURCE) */ +#endif /* !_I386__ENDIAN_H_ */ diff --git a/bsd/i386/exec.h b/bsd/i386/exec.h new file mode 100644 index 000000000..677ee5d80 --- /dev/null +++ b/bsd/i386/exec.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)exec.h 8.1 (Berkeley) 6/11/93 + */ + +/* Size of a page in an object file. */ +#define __LDPGSZ 4096 + +/* Valid magic number check. */ +#define N_BADMAG(ex) \ + ((ex).a_magic != NMAGIC && (ex).a_magic != OMAGIC && \ + (ex).a_magic != ZMAGIC) + +/* Address of the bottom of the text segment. */ +#define N_TXTADDR(X) 0 + +/* Address of the bottom of the data segment. */ +#define N_DATADDR(ex) \ + (N_TXTADDR(ex) + ((ex).a_magic == OMAGIC ? (ex).a_text \ + : __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) + +/* Text segment offset. */ +#define N_TXTOFF(ex) \ + ((ex).a_magic == ZMAGIC ? __LDPGSZ : sizeof(struct exec)) + +/* Data segment offset. */ +#define N_DATOFF(ex) \ + (N_TXTOFF(ex) + ((ex).a_magic != ZMAGIC ? (ex).a_text : \ + __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) + +/* Symbol table offset. */ +#define N_SYMOFF(ex) \ + (N_TXTOFF(ex) + (ex).a_text + (ex).a_data + (ex).a_trsize + \ + (ex).a_drsize) + +/* String table offset. */ +#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms) + +/* Description of the object file header (a.out format). */ +struct exec { +#define OMAGIC 0407 /* old impure format */ +#define NMAGIC 0410 /* read-only text */ +#define ZMAGIC 0413 /* demand load format */ +#define QMAGIC 0314 /* demand load format. Header in text. */ + unsigned int a_magic; /* magic number */ + + unsigned int a_text; /* text segment size */ + unsigned int a_data; /* initialized data size */ + unsigned int a_bss; /* uninitialized data size */ + unsigned int a_syms; /* symbol table size */ + unsigned int a_entry; /* entry point */ + unsigned int a_trsize; /* text relocation size */ + unsigned int a_drsize; /* data relocation size */ +}; + +/* + * Address of ps_strings structure (in user space). + */ +#define PS_STRINGS \ + ((struct ps_strings *)(USRSTACK - sizeof(struct ps_strings))) diff --git a/bsd/i386/label_t.h b/bsd/i386/label_t.h new file mode 100644 index 000000000..b33aa29ef --- /dev/null +++ b/bsd/i386/label_t.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: For setjmp/longjmp (kernel version). + * + * HISTORY + * + * 20 April 1992 ? at NeXT + * Created. + */ + +#ifndef _BSD_I386_LABEL_T_H_ +#define _BSD_I386_LABEL_T_H_ + +typedef struct label_t { + int val[14]; +} label_t; + +#endif /* _BSD_I386_LABEL_T_H_ */ diff --git a/bsd/i386/param.h b/bsd/i386/param.h new file mode 100644 index 000000000..80626b0ff --- /dev/null +++ b/bsd/i386/param.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)param.h 8.1 (Berkeley) 4/4/95 + */ + +/* + * Machine dependent constants for Intel 386. + */ + +#ifndef _I386_PARAM_H_ +#define _I386_PARAM_H_ + +/* + * Round p (pointer or byte index) up to a correctly-aligned value for all + * data types (int, long, ...). The result is u_int and must be cast to + * any desired pointer type. + */ +#define ALIGNBYTES 3 +#define ALIGN(p) (((u_int)(p) + ALIGNBYTES) &~ ALIGNBYTES) + +#define NBPG 4096 /* bytes/page */ +#define PGOFSET (NBPG-1) /* byte offset into page */ +#define PGSHIFT 12 /* LOG2(NBPG) */ + +#define DEV_BSIZE 512 +#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ +#define BLKDEV_IOSIZE 2048 +#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */ + +#define STACK_GROWS_UP 0 /* stack grows to lower addresses */ + +#define CLSIZE 1 +#define CLSIZELOG2 0 + +/* + * Constants related to network buffer management. + * MCLBYTES must be no larger than CLBYTES (the software page size), and, + * on machines that exchange pages of input or output buffers with mbuf + * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple + * of the hardware page size. + */ +#define MSIZE 128 /* size of an mbuf */ +#define MCLBYTES 2048 /* large enough for ether MTU */ +#define MCLSHIFT 11 +#define MCLOFSET (MCLBYTES - 1) +#ifndef NMBCLUSTERS +#ifdef GATEWAY +#define NMBCLUSTERS ((1024 * 1024) / MCLBYTES) /* cl map size: 1MB */ +#else +#define NMBCLUSTERS ((1024 * 512) / MCLBYTES) /* cl map size: 0.5MB */ +#endif +#endif + +/* + * Some macros for units conversion + */ +/* Core clicks (NeXT_page_size bytes) to segments and vice versa */ +#define ctos(x) (x) +#define stoc(x) (x) + +/* Core clicks (4096 bytes) to disk blocks */ +#define ctod(x) ((x)<<(PGSHIFT-DEV_BSHIFT)) +#define dtoc(x) ((x)>>(PGSHIFT-DEV_BSHIFT)) +#define dtob(x) ((x)<>PGSHIFT) + +#ifdef __APPLE__ +#define btodb(bytes, devBlockSize) \ + ((unsigned)(bytes) / devBlockSize) +#define dbtob(db, devBlockSize) \ + ((unsigned)(db) * devBlockSize) +#else +#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ + ((unsigned)(bytes) >> DEV_BSHIFT) +#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ + ((unsigned)(db) << DEV_BSHIFT) +#endif + +/* + * Map a ``block device block'' to a file system block. + * This should be device dependent, and will be if we + * add an entry to cdevsw/bdevsw for that purpose. + * For now though just use DEV_BSIZE. + */ +#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE)) + +/* + * Macros to decode (and encode) processor status word. + */ +#define STATUS_WORD(rpl, ipl) (((ipl) << 8) | (rpl)) +#define USERMODE(x) (((x) & 3) == 3) +#define BASEPRI(x) (((x) & (255 << 8)) == 0) + + +#if defined(KERNEL) || defined(STANDALONE) +#define DELAY(n) delay(n) + +#else /* defined(KERNEL) || defined(STANDALONE) */ +#define DELAY(n) { register int N = (n); while (--N > 0); } +#endif /* defined(KERNEL) || defined(STANDALONE) */ + +#endif /* _I386_PARAM_H_ */ diff --git a/bsd/i386/profile.h b/bsd/i386/profile.h new file mode 100644 index 000000000..68370a2eb --- /dev/null +++ b/bsd/i386/profile.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997, Apple Computer, Inc. All rights reserved. + * + * History : + * 29-Sep-1997 Umesh Vaishampayan + * Created. + */ + +#ifndef _BSD_I386_PROFILE_H_ +#define _BSD_I386_PROFILE_H_ + +#ifdef KERNEL +/* + * Block interrupts during mcount so that those interrupts can also be + * counted (as soon as we get done with the current counting). On the + * i386 platfom, can't do splhigh/splx as those are C routines and can + * recursively invoke mcount. + */ +#warning MCOUNT_* not implemented yet. + +#define MCOUNT_INIT +#define MCOUNT_ENTER /* s = splhigh(); */ /* XXX TODO */ +#define MCOUNT_EXIT /* (void) splx(s); */ /* XXX TODO */ +#endif /* KERNEL */ + +#endif /* _BSD_I386_PROFILE_H_ */ diff --git a/bsd/i386/psl.h b/bsd/i386/psl.h new file mode 100644 index 000000000..57ad9145f --- /dev/null +++ b/bsd/i386/psl.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: Definition of eflags register. + * + * HISTORY + * + * 7 April 1992 ? at NeXT + * Created. + */ + +#if KERNEL_PRIVATE + +#ifndef _BSD_I386_PSL_H_ +#define _BSD_I386_PSL_H_ + +#define EFL_ALLCC ( \ + EFL_CF | \ + EFL_PF | \ + EFL_AF | \ + EFL_ZF | \ + EFL_SF | \ + EFL_OF \ + ) +#define EFL_USERSET ( EFL_IF | EFL_SET ) +#define EFL_USERCLR ( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR ) + +#define PSL_ALLCC EFL_ALLCC +#define PSL_T EFL_TF + +#endif /* _BSD_I386_PSL_H_ */ + +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/i386/ptrace.h b/bsd/i386/ptrace.h new file mode 100644 index 000000000..c27381154 --- /dev/null +++ b/bsd/i386/ptrace.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ptrace.h 8.1 (Berkeley) 6/11/93 + */ + +/* + * Machine dependent trace commands. + * + * None for the i386 at this time. + */ diff --git a/bsd/i386/reboot.h b/bsd/i386/reboot.h new file mode 100644 index 000000000..53088b120 --- /dev/null +++ b/bsd/i386/reboot.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: next/reboot.h + * Author: Avadis Tevanian, Jr. + * + * NeXT specific reboot flags. + * + * HISTORY + * 28-Feb-90 John Seamons (jks) at NeXT + * Added RB_COMMAND flag that allows a specific reboot command to be used. + * + * 06-Jul-88 Avadis Tevanian (avie) at NeXT, Inc. + * Created. + */ + +#ifndef _BSD_I386_REBOOT_H_ +#define _BSD_I386_REBOOT_H_ + +/* + * Empty file (publicly) + */ + +#ifdef KERNEL_PRIVATE + +/* + * Use most significant 16 bits to avoid collisions with + * machine independent flags. + */ +#define RB_POWERDOWN 0x00010000 /* power down on halt */ +#define RB_NOBOOTRC 0x00020000 /* don't run '/etc/rc.boot' */ +#define RB_DEBUG 0x00040000 /* drop into mini monitor on panic */ +#define RB_EJECT 0x00080000 /* eject disks on halt */ +#define RB_COMMAND 0x00100000 /* new boot command specified */ +#define RB_NOFP 0x00200000 /* don't use floating point */ +#define RB_BOOTNEXT 0x00400000 /* reboot into NeXT */ +#define RB_BOOTDOS 0x00800000 /* reboot into DOS */ +#define RB_PRETTY 0x01000000 /* shutdown with pretty graphics */ + +#endif /* KERNEL_PRIVATE */ + +#endif /* _BSD_I386_REBOOT_H_ */ diff --git a/bsd/i386/reg.h b/bsd/i386/reg.h new file mode 100644 index 000000000..aea69bfca --- /dev/null +++ b/bsd/i386/reg.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Intel386 Family: User registers for U**X. + * + * HISTORY + * + * 20 April 1992 ? at NeXT + * Created. + */ + +#ifdef KERNEL_PRIVATE + +#ifndef _BSD_I386_REG_H_ +#define _BSD_I386_REG_H_ + +/* FIXME - should include mach/i386/thread_status.h and + construct the values from i386_saved_state + */ +#define EDX 9 +#define ECX 10 +#define EAX 11 +#define EIP 14 +#define EFL 16 +#define ESP 7 +#define UESP 17 +#define PS EFL +#define PC EIP +#define SP UESP + + + +#endif /* _BSD_I386_REG_H_ */ + +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/i386/setjmp.h b/bsd/i386/setjmp.h new file mode 100644 index 000000000..2ce10d1cb --- /dev/null +++ b/bsd/i386/setjmp.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * File: setjmp.h + * + * Declaration of setjmp routines and data structures. + */ +#ifndef _BSD_I386_SETJMP_H +#define _BSD_I386_SETJMP_H + +#include +#include + +typedef struct sigcontext jmp_buf[1]; + +#define _JBLEN ((sizeof(struct sigcontext)) / sizeof(int)) +typedef int sigjmp_buf[_JBLEN+1]; + +__BEGIN_DECLS +extern int setjmp __P((jmp_buf env)); +extern void longjmp __P((jmp_buf env, int val)); + +#ifndef _ANSI_SOURCE +int sigsetjmp __P((sigjmp_buf env, int val)); +void siglongjmp __P((sigjmp_buf env, int val)); +#endif /* _ANSI_SOURCE */ + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +int _setjmp __P((jmp_buf env)); +void _longjmp __P((jmp_buf, int val)); +void longjmperror __P((void)); +#endif /* neither ANSI nor POSIX */ +__END_DECLS +#endif /* !_BSD_I386_SETJMP_H */ diff --git a/bsd/i386/signal.h b/bsd/i386/signal.h new file mode 100644 index 000000000..6f079937b --- /dev/null +++ b/bsd/i386/signal.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * HISTORY + * + * 8 April 1992 ? at NeXT + * Created. + */ + +#ifndef _i386_SIGNAL_ +#define _i386_SIGNAL_ 1 + +typedef int sig_atomic_t; + +/* + * Information pushed on stack when a signal is delivered. + * This is used by the kernel to restore state following + * execution of the signal handler. It is also made available + * to the handler to allow it to properly restore state if + * a non-standard exit is performed. + */ +struct sigcontext { + int sc_onstack; /* sigstack state to restore */ + int sc_mask; /* signal mask to restore */ + unsigned int sc_eax; + unsigned int sc_ebx; + unsigned int sc_ecx; + unsigned int sc_edx; + unsigned int sc_edi; + unsigned int sc_esi; + unsigned int sc_ebp; + unsigned int sc_esp; + unsigned int sc_ss; + unsigned int sc_eflags; + unsigned int sc_eip; + unsigned int sc_cs; + unsigned int sc_ds; + unsigned int sc_es; + unsigned int sc_fs; + unsigned int sc_gs; +}; + +#endif /* _i386_SIGNAL_ */ diff --git a/bsd/i386/spl.h b/bsd/i386/spl.h new file mode 100644 index 000000000..3a01fb94f --- /dev/null +++ b/bsd/i386/spl.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_I386_SPL_H_ +#define _BSD_I386_SPL_H_ + +#ifdef KERNEL +#ifndef __ASSEMBLER__ +/* + * Machine-dependent SPL definitions. + * + */ +typedef unsigned spl_t; + +extern unsigned sploff(void); +extern unsigned splhigh(void); +extern unsigned splsched(void); +extern unsigned splclock(void); +extern unsigned splpower(void); +extern unsigned splvm(void); +extern unsigned splbio(void); +extern unsigned splimp(void); +extern unsigned spltty(void); +extern unsigned splnet(void); +extern unsigned splsoftclock(void); + +extern void spllo(void); +extern void splon(unsigned level); +extern void splx(unsigned level); +extern void spln(unsigned level); +#define splstatclock() splhigh() + +#endif /* __ASSEMBLER__ */ + +#endif + +#endif /* _BSD_I386_SPL_H_ */ diff --git a/bsd/i386/table.h b/bsd/i386/table.h new file mode 100644 index 000000000..eb3e811b5 --- /dev/null +++ b/bsd/i386/table.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989 Next, Inc. + */ + +#ifndef _BSD_I386_TABLE_H_ +#define _BSD_I386_TABLE_H_ + +/* + * Empty file. + */ + +#endif /* _BSD_I386_TABLE_H_ */ diff --git a/bsd/i386/types.h b/bsd/i386/types.h new file mode 100644 index 000000000..ff7bc23b4 --- /dev/null +++ b/bsd/i386/types.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)types.h 8.3 (Berkeley) 1/5/94 + */ + +#ifndef _MACHTYPES_H_ +#define _MACHTYPES_H_ + +#ifndef __ASSEMBLER__ +#include +/* + * Basic integral types. Omit the typedef if + * not possible for a machine/compiler combination. + */ +typedef __signed char int8_t; +typedef unsigned char u_int8_t; +typedef short int16_t; +typedef unsigned short u_int16_t; +typedef int int32_t; +typedef unsigned int u_int32_t; +typedef long long int64_t; +typedef unsigned long long u_int64_t; + +typedef int32_t register_t; + +typedef int *intptr_t; +typedef unsigned long *uintptr_t; +#endif /* __ASSEMBLER__ */ +#endif /* _MACHTYPES_H_ */ diff --git a/bsd/i386/user.h b/bsd/i386/user.h new file mode 100644 index 000000000..3152f5cbc --- /dev/null +++ b/bsd/i386/user.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1989, NeXT, Inc. + * + * next/user.h + * + * We can use the default definition of u, so this file is empty. + */ + diff --git a/bsd/i386/vmparam.h b/bsd/i386/vmparam.h new file mode 100644 index 000000000..8c3c919c1 --- /dev/null +++ b/bsd/i386/vmparam.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * HISTORY + * 16-Jan-98 Wilfredo Sanchez (wsanchez@apple.com) + * Remove #ifdef KERNEL_PRiVATE around USRSTACK to match ppc version. + * Sendmail needs it. + * + * 05-Sep-97 Umesh Vaishampayan (umeshv@apple.com) + * Made MAXSSIZ a finite value. + * + * 05-Mar-89 Avadis Tevanian, Jr. (avie) at NeXT + * Make MAXDSIZ infinity. + * + * 12-Aug-87 John Seamons (jks) at NeXT + * Ported to NeXT. + */ + +#ifndef _BSD_I386_VMPARAM_H_ +#define _BSD_I386_VMPARAM_H_ 1 + +#include + +#define USRSTACK 0xc0000000 + +/* + * Virtual memory related constants, all in bytes + */ +#ifndef DFLDSIZ +#define DFLDSIZ (6*1024*1024) /* initial data size limit */ +#endif +#ifndef MAXDSIZ +#define MAXDSIZ (RLIM_INFINITY) /* max data size */ +#endif +#ifndef DFLSSIZ +#define DFLSSIZ (512*1024) /* initial stack size limit */ +#endif +#ifndef MAXSSIZ +#define MAXSSIZ (64*1024*1024) /* max stack size */ +#endif +#ifndef DFLCSIZ +#define DFLCSIZ (0) /* initial core size limit */ +#endif +#ifndef MAXCSIZ +#define MAXCSIZ (RLIM_INFINITY) /* max core size */ +#endif /* MAXCSIZ */ + +#endif /* _BSD_I386_VMPARAM_H_ */ diff --git a/bsd/if/ppc/if_en.c b/bsd/if/ppc/if_en.c new file mode 100644 index 000000000..a1b2a1b96 --- /dev/null +++ b/bsd/if/ppc/if_en.c @@ -0,0 +1,1129 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * ethernet driver for mace on-board ethernet + * + * HISTORY + * + * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 + * - ripped off code from MK/LINUX, turned it into a polled-mode + * driver for the PCI (8500) class machines + * + * Dieter Siegmund (dieter@next.com) Fri Mar 21 12:41:29 PST 1997 + * - reworked to support a BSD-style interface, and to support kdb polled + * interface and interrupt-driven interface concurrently + * + * Justin Walker (justin@apple.com) Tue May 20 10:29:29 PDT 1997 + * - Added multicast support + * + * Dieter Siegmund (dieter@next.com) Thu May 29 15:02:29 PDT 1997 + * - fixed problem with sending arp packets for ip address 0.0.0.0 + * - use kdp_register_send_receive() instead of defining + * en_send_pkt/en_recv_pkt routines to avoid name space + * collisions with IOEthernetDebugger and allow these routines to be + * overridden by a driverkit-style driver + * + * Dieter Siegmund (dieter@apple.com) Tue Jun 24 18:29:15 PDT 1997 + * - don't let the adapter auto-strip 802.3 receive frames, it messes + * up the frame size logic + * + * Dieter Siegmund (dieter@apple.com) Tue Aug 5 16:24:52 PDT 1997 + * - handle multicast address deletion correctly + */ +#ifdef MACE_DEBUG +/* + * Caveat: MACE_DEBUG delimits some code that is getting kind of + * stale. Before blindly turning on MACE_DEBUG for your + * testing, take a look at the code enabled by it to check + * that it is reasonably sane. + */ +#endif + +#include +#include + +#define RECEIVE_INT DBDMA_INT_ALWAYS + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "if_en.h" +#include "mace.h" + +extern int kdp_flag; + +#if NBPFILTER > 0 +#include +#endif + +static void polled_send_pkt(char * data, int len); +static void polled_receive_pkt(char *data, int *len, int timeout_ms); +void mace_dbdma_rx_intr(int unit, void *, void *); +void mace_dbdma_tx_intr(int, void *, void *); +void mace_pci_intr(int, void *); +void mace_service_queue(struct ifnet * ifp); + +#ifdef MACE_DEBUG +static int mace_watchdog(); +#endif + +static __inline__ vm_offset_t +KVTOPHYS(vm_offset_t v) +{ + return (v); +} + +typedef int (*funcptr)(char *, int, void *); + +#ifdef MACE_DEBUG +static int +macAddrsEqual(unsigned char * one, unsigned char * two) +{ + int i; + + for (i = 0; i < NUM_EN_ADDR_BYTES; i++) + if (*one++ != *two++) + return 0; + return 1; +} +#endif + +static __inline__ int +isprint(unsigned char c) +{ + return (c >= 0x20 && c <= 0x7e); +} + +static void +printEtherHeader(enet_addr_t * dh, enet_addr_t * sh, u_short etype) +{ + u_char * dhost = dh->ether_addr_octet; + u_char * shost = sh->ether_addr_octet; + + printf("Dst: %x:%x:%x:%x:%x:%x Src: %x:%x:%x:%x:%x:%x Type: 0x%x\n", + dhost[0], dhost[1], dhost[2], dhost[3], dhost[4], dhost[5], + shost[0], shost[1], shost[2], shost[3], shost[4], shost[5], + etype); +} + +static void +printData(u_char * data_p, int n_bytes) +{ +#define CHARS_PER_LINE 16 + char line_buf[CHARS_PER_LINE + 1]; + int line_pos; + int offset; + + for (line_pos = 0, offset = 0; offset < n_bytes; offset++, data_p++) { + if (line_pos == 0) { + printf("%04d ", offset); + } + + line_buf[line_pos] = isprint(*data_p) ? *data_p : '.'; + printf(" %02x", *data_p); + line_pos++; + if (line_pos == CHARS_PER_LINE) { + line_buf[CHARS_PER_LINE] = '\0'; + printf(" %s\n", line_buf); + line_pos = 0; + } + } + if (line_pos) { /* need to finish up the line */ + for (; line_pos < CHARS_PER_LINE; line_pos++) { + printf(" "); + line_buf[line_pos] = ' '; + } + line_buf[CHARS_PER_LINE] = '\0'; + printf(" %s\n", line_buf); + } +} + +static void +printEtherPacket(enet_addr_t * dhost, enet_addr_t * shost, u_short type, + u_char * data_p, int n_bytes) +{ + printEtherHeader(dhost, shost, type); + printData(data_p, n_bytes); +} + +void +printContiguousEtherPacket(u_char * data_p, int n_bytes) +{ + printEtherPacket((enet_addr_t *)data_p, + (enet_addr_t *)(data_p + NUM_EN_ADDR_BYTES), + *((u_short *)(data_p + (NUM_EN_ADDR_BYTES * 2))), + data_p, n_bytes); +} + +mace_t mace; + +#define MACE_DMA_AREA_SIZE (ETHER_RX_NUM_DBDMA_BUFS * ETHERNET_BUF_SIZE + PG_SIZE) +static unsigned long mace_rx_dma_area[(MACE_DMA_AREA_SIZE + sizeof(long))/sizeof(long)]; + +static unsigned long mace_tx_dma_area[(ETHERNET_BUF_SIZE + PG_SIZE + sizeof(long))/sizeof(long)]; + +/* + * mace_get_hwid + * + * This function computes the Ethernet Hardware address + * from PROM. (Its best not to ask how this is done.) + */ + +unsigned char +mace_swapbits(unsigned char bits) +{ + unsigned char mask = 0x1, i, newbits = 0; + + for (i = 0x80; i; mask <<= 1, i >>=1) { + if (bits & mask) + newbits |= i; + } + + return newbits; +} + +void +mace_get_hwid(unsigned char *hwid_addr, mace_t * m) +{ + int i; + + for (i = 0; i < NUM_EN_ADDR_BYTES; i++, hwid_addr += 16) { + m->macaddr[i] = mace_swapbits(*hwid_addr); + } +} + +/* + * mace_reset + * + * Reset the board.. + */ + +void +mace_reset() +{ + dbdma_reset(DBDMA_ETHERNET_RV); + dbdma_reset(DBDMA_ETHERNET_TX); +} + + +/* + * mace_geteh: + * + * This function gets the ethernet address (array of 6 unsigned + * bytes) from the MACE board registers. + * + */ + +void +mace_geteh(char *ep) +{ + int i; + unsigned char ep_temp; + + mace.ereg->iac = IAC_PHYADDR; eieio(); + + for (i = 0; i < ETHER_ADD_SIZE; i++) { + ep_temp = mace.ereg->padr; eieio(); + *ep++ = ep_temp; + } +} + +/* + * mace_seteh: + * + * This function sets the ethernet address (array of 6 unsigned + * bytes) on the MACE board. + */ + +static void +mace_seteh(char *ep) +{ + int i; + unsigned char status; + + if (mace.chip_id != MACE_REVISION_A2) { + mace.ereg->iac = IAC_ADDRCHG|IAC_PHYADDR; eieio(); + + while ((status = mace.ereg->iac)) { + if ((status & IAC_ADDRCHG) == 0) { + eieio(); + break; + } + eieio(); + } + } + else { + /* start to load the address.. */ + mace.ereg->iac = IAC_PHYADDR; eieio(); + } + + for (i = 0; i < NUM_EN_ADDR_BYTES; i++) { + mace.ereg->padr = *(ep+i); eieio(); + } + return; +} + +/* + * mace_setup_dbdma + * + * Setup various dbdma pointers. + */ + +void +mace_setup_dbdma() +{ + mace_t * m = &mace; + int i; + dbdma_command_t * d; + vm_offset_t address; + dbdma_regmap_t * regmap; + +#define ALIGN_MASK 0xfffffffcUL + if (m->rv_dma_area == 0) { + m->rv_dma_area = (unsigned char *) + ((((unsigned long)mace_rx_dma_area) + 3) & ALIGN_MASK); + m->rv_dma = dbdma_alloc(ETHER_RX_NUM_DBDMA_BUFS + 2); + m->tx_dma = dbdma_alloc(TX_NUM_DBDMA); + m->tx_dma_area = (unsigned char *) + ((((unsigned long)mace_tx_dma_area) + 3) & ALIGN_MASK); + } + + /* set up a ring of buffers */ + d = m->rv_dma; + for (i = 0; i < ETHER_RX_NUM_DBDMA_BUFS; i++, d++) { + address = (vm_offset_t) KVTOPHYS((vm_offset_t)&m->rv_dma_area[i*ETHERNET_BUF_SIZE]); + DBDMA_BUILD(d, DBDMA_CMD_IN_LAST, 0, ETHERNET_BUF_SIZE, + address, RECEIVE_INT, + DBDMA_WAIT_NEVER, + DBDMA_BRANCH_NEVER); + } + + /* stop when we hit the end of the list */ + DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, RECEIVE_INT, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + + /* branch to command at "address" ie. element 0 of the "array" */ + DBDMA_BUILD(d, DBDMA_CMD_NOP, 0, 0, 0, DBDMA_INT_NEVER, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_ALWAYS); + address = (vm_offset_t) KVTOPHYS((vm_offset_t)m->rv_dma); + dbdma_st4_endian(&d->d_cmddep, address); + + m->rv_head = 0; + m->rv_tail = ETHER_RX_NUM_DBDMA_BUFS; /* always contains DBDMA_CMD_STOP */ + + /* stop/init/restart dma channel */ + dbdma_reset(DBDMA_ETHERNET_RV); + dbdma_reset(DBDMA_ETHERNET_TX); + + /* Set the wait value.. */ + regmap = DBDMA_REGMAP(DBDMA_ETHERNET_RV); + dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x00)); + + /* Set the tx wait value */ + regmap = DBDMA_REGMAP(DBDMA_ETHERNET_TX); + dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x20)); + + flush_cache_v((vm_offset_t)m->rv_dma, + sizeof(dbdma_command_t) * (ETHER_RX_NUM_DBDMA_BUFS + 2)); + /* start receiving */ + dbdma_start(DBDMA_ETHERNET_RV, m->rv_dma); +} + +#ifdef MACE_DEBUG +static unsigned char testBuffer[PG_SIZE * 4]; +static unsigned char testMsg[] = "mace ethernet interface test"; + +static void +send_test_packet() +{ + unsigned char * tp; + + bzero(testBuffer, sizeof(testBuffer)); + + tp = testBuffer; + + /* send self-addressed packet */ + bcopy(&mace.macaddr[0], tp, NUM_EN_ADDR_BYTES); + tp += NUM_EN_ADDR_BYTES; + bcopy(&mace.macaddr[0], tp, NUM_EN_ADDR_BYTES); + tp += NUM_EN_ADDR_BYTES; + *tp++ = 0; + *tp++ = 0; + bcopy(testMsg, tp, sizeof(testMsg)); + polled_send_pkt(testBuffer, 80); + return; +} +#endif + +/* + * Function: init_mace + * + * Purpose: + * Called early on, initializes the adapter and readies it for + * kdb kernel debugging. + */ +void +init_mace() +{ + unsigned char status; + mace_t * m = &mace; + struct mace_board * ereg; + int mpc = 0; + + /* + * Only use in-kernel driver for early debugging (bootargs: kdp=1 or kdp=3) + */ + if ( (kdp_flag & 1) == 0 ) + { + return; + } + + bzero(&mace, sizeof(mace)); + + /* get the ethernet registers' mapped address */ + ereg = m->ereg + = (struct mace_board *) POWERMAC_IO(PCI_ETHERNET_BASE_PHYS); + mace_get_hwid((unsigned char *)POWERMAC_IO(PCI_ETHERNET_ADDR_PHYS), m); + + /* Reset the board & AMIC.. */ + mace_reset(); + + /* grab the MACE chip rev */ + m->chip_id = (ereg->chipid2 << 8 | ereg->chipid1); + + /* don't auto-strip for 802.3 */ + m->ereg->rcvfc &= ~(RCVFC_ASTRPRCV); + + /* set the ethernet address */ + mace_seteh(mace.macaddr); + { + unsigned char macaddr[NUM_EN_ADDR_BYTES]; + mace_geteh(macaddr); + printf("mace ethernet [%02x:%02x:%02x:%02x:%02x:%02x]\n", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + } + + /* Now clear the Multicast filter */ + if (m->chip_id != MACE_REVISION_A2) { + ereg->iac = IAC_ADDRCHG|IAC_LOGADDR; eieio(); + + while ((status = ereg->iac)) { + if ((status & IAC_ADDRCHG) == 0) + break; + eieio(); + } + eieio(); + } + else { + ereg->iac = IAC_LOGADDR; eieio(); + } + { + int i; + + for (i=0; i < 8; i++) + { ereg->ladrf = 0; + eieio(); + } + } + + /* register interrupt routines */ + mace_setup_dbdma(); + + /* Start the chip... */ + m->ereg->maccc = MACCC_ENXMT|MACCC_ENRCV; eieio(); + { + volatile char ch = mace.ereg->ir; eieio(); + } + + delay(500); /* paranoia */ + mace.ereg->imr = 0xfe; eieio(); + + /* register our debugger routines */ + kdp_register_send_receive((kdp_send_t)polled_send_pkt, + (kdp_receive_t)polled_receive_pkt); + +#if 0 + printf("Testing 1 2 3\n"); + send_test_packet(); + printf("Testing 1 2 3\n"); + send_test_packet(); + printf("Testing 1 2 3\n"); + send_test_packet(); + do { + static unsigned char buf[ETHERNET_BUF_SIZE]; + int len; + int nmpc = mace.ereg->mpc; eieio(); + + if (nmpc > mpc) { + mpc = nmpc; + printf("mpc %d\n", mpc); + } + polled_receive_pkt(buf, &len, 100); + if (len > 0) { + printf("rx %d\n", len); + printContiguousEtherPacket(buf, len); + } + } while(1); +#endif + + return; +} + +#ifdef MACE_DEBUG +static void +txstatus(char * msg) +{ + volatile dbdma_regmap_t * dmap = DBDMA_REGMAP(DBDMA_ETHERNET_TX); + volatile unsigned long status; + volatile unsigned long intr; + volatile unsigned long branch; + volatile unsigned long wait; + + status = dbdma_ld4_endian(&dmap->d_status); eieio(); + intr = dbdma_ld4_endian(&dmap->d_intselect); eieio(); + branch = dbdma_ld4_endian(&dmap->d_branch); eieio(); + wait = dbdma_ld4_endian(&dmap->d_wait); eieio(); + printf("(%s s=0x%x i=0x%x b=0x%x w=0x%x)", msg, status, intr, branch, + wait); + return; +} +#endif + +static void +tx_dbdma(char * data, int len) +{ + unsigned long count; + dbdma_command_t * d; + unsigned long page; + + d = mace.tx_dma; + page = ((unsigned long) data) & PG_MASK; + if ((page + len) <= PG_SIZE) { /* one piece dma */ + DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, + len, + (vm_offset_t) KVTOPHYS((vm_offset_t) data), + DBDMA_INT_NEVER, + DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); + } + else { /* two piece dma */ + count = PG_SIZE - page; + DBDMA_BUILD(d, DBDMA_CMD_OUT_MORE, DBDMA_KEY_STREAM0, + count, + (vm_offset_t)KVTOPHYS((vm_offset_t) data), + DBDMA_INT_NEVER, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, + len - count, (vm_offset_t) + KVTOPHYS((vm_offset_t)((unsigned char *)data + count)), + DBDMA_INT_NEVER, + DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); + } + d++; + DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, + 1, KVTOPHYS((vm_offset_t) &mace.ereg->xmtfs),DBDMA_INT_NEVER, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, + 1, KVTOPHYS((vm_offset_t) &mace.ereg->ir), DBDMA_INT_ALWAYS, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, 0, 0, 0); + flush_cache_v((vm_offset_t)mace.tx_dma, sizeof(dbdma_command_t) * TX_NUM_DBDMA); + dbdma_start(DBDMA_ETHERNET_TX, mace.tx_dma); + return; + +} + +static void +waitForDBDMADone(char * msg) +{ + { + /* wait for tx dma completion */ + volatile dbdma_regmap_t * dmap = DBDMA_REGMAP(DBDMA_ETHERNET_TX); + int i; + volatile unsigned long val; + + i = 0; + do { + val = dbdma_ld4_endian(&dmap->d_status); eieio(); + delay(50); + i++; + } while ((i < 100000) && (val & DBDMA_CNTRL_ACTIVE)); + if (i == 100000) + printf("mace(%s): tx_dbdma poll timed out 0x%x", msg, val); + } +} + +void +mace_service_queue(struct ifnet * ifp) +{ + unsigned char * buf_p; + struct mbuf * m; + struct mbuf * mp; + int len; + + if (mace.tx_busy) { /* transmit in progress? */ + return; + } + + IF_DEQUEUE(&(ifp->if_snd), m); + if (m == 0) { + return; + } + + len = m->m_pkthdr.len; + + if (len > ETHERMAXPACKET) { + printf("mace_start: packet too big (%d), dropping\n", len); + m_freem(m); + return; + + } + buf_p = mace.tx_dma_area; + if (m->m_nextpkt) { + printf("mace: sending more than one mbuf\n"); + } + for (mp = m; mp; mp = mp->m_next) { + if (mp->m_len == 0) + continue; + bcopy(mtod(mp, caddr_t), buf_p, min(mp->m_len, len)); + len -= mp->m_len; + buf_p += mp->m_len; + } + m_freem(m); + +#if NBPFILTER > 0 + if (ifp->if_bpf) + BPF_TAP(ifp->if_bpf, mace.tx_dma_area, m->m_pkthdr.len); +#endif + +#if 0 + printf("tx packet %d\n", m->m_pkthdr.len); + printContiguousEtherPacket(mace.tx_dma_area, m->m_pkthdr.len); +#endif + + /* fill in the dbdma records and kick off the dma */ + tx_dbdma(mace.tx_dma_area, m->m_pkthdr.len); + mace.tx_busy = 1; + return; +} + +#ifdef MACE_DEBUG +static int +mace_watchdog() +{ + struct ifnet * ifp = &mace.en_arpcom.ac_if; + int s; + + mace.txwatchdog++; + s = splnet(); + if (mace.rxintr == 0) { + printf("rx is hung up\n"); + rx_intr(); + } + mace.rxintr = 0; +#if 0 + if (mace.txintr == 0 && ifp->if_snd.ifq_head) { + if (mace.tx_busy) + dbdma_stop(DBDMA_ETHERNET_TX); + mace.tx_busy = 0; + mace_service_queue(ifp); + } + mace.txintr = 0; +#endif + timeout(mace_watchdog, 0, 10*hz); /* just in case we drop an interrupt */ + return (0); +} +#endif /* MACE_DEBUG */ + +static int +mace_start(struct ifnet * ifp) +{ +// int i = mace.tx_busy; + +// printf("mace_start %s\n", mace.tx_busy ? "(txBusy)" : ""); + mace_service_queue(ifp); + +// if (mace.tx_busy && !i) +// printf("(txStarted)\n"); + return 0; +} + +int +mace_recv_pkt(funcptr pktfunc, void * p) +{ + vm_offset_t address; + struct mace_board * board; + long bytes; + int done = 0; + int doContinue = 0; + mace_t * m; + unsigned long resid; + unsigned short status; + int tail; + + m = &mace; + board = m->ereg; + + /* remember where the tail was */ + tail = m->rv_tail; + for (done = 0; (done == 0) && (m->rv_head != tail);) { + dbdma_command_t * dmaHead; + + dmaHead = &m->rv_dma[m->rv_head]; + resid = dbdma_ld4_endian(&dmaHead->d_status_resid); + status = (resid >> 16); + bytes = resid & 0xffff; + bytes = ETHERNET_BUF_SIZE - bytes - 8; /* strip off FCS/CRC */ + + if ((status & DBDMA_ETHERNET_EOP) == 0) { + /* no packets are ready yet */ + break; + } + doContinue = 1; + /* if the packet is good, pass it up */ + if (bytes >= (ETHER_MIN_PACKET - 4)) { + char * dmaPacket; + dmaPacket = &m->rv_dma_area[m->rv_head * ETHERNET_BUF_SIZE]; + done = (*pktfunc)(dmaPacket, bytes, p); + } + /* mark the head as the new tail in the dma channel command list */ + DBDMA_BUILD(dmaHead, DBDMA_CMD_STOP, 0, 0, 0, RECEIVE_INT, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + flush_cache_v((vm_offset_t)dmaHead, sizeof(*dmaHead)); + eieio(); + + /* make the tail an available dma'able entry */ + { + dbdma_command_t * dmaTail; + dmaTail = &m->rv_dma[m->rv_tail]; + address = KVTOPHYS((vm_offset_t) + &m->rv_dma_area[m->rv_tail*ETHERNET_BUF_SIZE]); + // this command is live so write it carefully + DBDMA_ST4_ENDIAN(&dmaTail->d_address, address); + dmaTail->d_status_resid = 0; + dmaTail->d_cmddep = 0; + eieio(); + DBDMA_ST4_ENDIAN(&dmaTail->d_cmd_count, + ((DBDMA_CMD_IN_LAST) << 28) | ((0) << 24) | + ((RECEIVE_INT) << 20) | + ((DBDMA_BRANCH_NEVER) << 18) | ((DBDMA_WAIT_NEVER) << 16) | + (ETHERNET_BUF_SIZE)); + eieio(); + flush_cache_v((vm_offset_t)dmaTail, sizeof(*dmaTail)); + } + /* head becomes the tail */ + m->rv_tail = m->rv_head; + + /* advance the head */ + m->rv_head++; + if (m->rv_head == (ETHER_RX_NUM_DBDMA_BUFS + 1)) + m->rv_head = 0; + } + if (doContinue) { + sync(); + dbdma_continue(DBDMA_ETHERNET_RV); + } + return (done); +} + +/* kdb handle buffer routines */ +struct kdbCopy { + int * len; + char * data; +}; + +static int +kdb_copy(char * pktBuf, int len, void * p) +{ + struct kdbCopy * cp = (struct kdbCopy *)p; + + bcopy(pktBuf, cp->data, len); + *cp->len = len; + return (1); /* signal that we're done */ +} + +/* kdb debugger routines */ +static void +polled_send_pkt(char * data, int len) +{ + waitForDBDMADone("mace: polled_send_pkt start"); + tx_dbdma(data, len); + waitForDBDMADone("mace: polled_send_pkt end"); + return; +} + +static void +polled_receive_pkt(char *data, int *len, int timeout_ms) +{ + struct kdbCopy cp; + + cp.len = len; + cp.data = data; + + timeout_ms *= 1000; + *len = 0; + while (mace_recv_pkt(kdb_copy, (void *)&cp) == 0) { + if (timeout_ms <= 0) + break; + delay(50); + timeout_ms -= 50; + } + return; +} + +/* Bump to force ethernet data to be 4-byte aligned + * (since the ethernet header is 14 bytes, and the 802.3 header is + * 22 = 14+8 bytes). This assumes that m_data is word-aligned + * (which it is). + */ +#define ETHER_DATA_ALIGN 2 + +/* + * Function: rxpkt + * + * Purpose: + * Called from within mace_recv_pkt to deal with a packet of data. + * rxpkt() allocates an mbuf(+cluser) and passes it up to the stacks. + * Returns: + * 0 if the packet was copied to an mbuf, 1 otherwise + */ +static int +rxpkt(char * data, int len, void * p) +{ + struct ether_header * eh_p = (struct ether_header *)data; + struct ifnet * ifp = &mace.en_arpcom.ac_if; + struct mbuf * m; + + int interesting; + + mace.rxintr++; + + /* mcast, bcast -- we're interested in either */ + interesting = eh_p->ether_dhost[0] & 1; + +#if NBPFILTER > 0 + /* + * Check if there's a bpf filter listening on this interface. + * If so, hand off the raw packet to bpf_tap(). + */ + if (ifp->if_bpf) { + BPF_TAP(ifp->if_bpf, data, len); + + /* + * Keep the packet if it's a broadcast or has our + * physical ethernet address (or if we support + * multicast and it's one). + */ + if ((interesting == 0) && bcmp(eh_p->ether_dhost, mace.macaddr, + sizeof(eh_p->ether_dhost)) != 0) { + return (1); + } + } +#endif + + /* + * We "know" a full-sized packet fits in one cluster. Set up the + * packet header, and if the length is sufficient, attempt to allocate + * a cluster. If that fails, fall back to the old way (m_devget()). + * Here, we take the simple approach of cluster vs. single mbuf. + */ + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == 0) { +#ifdef MACE_DEBUG + printf("mget failed\n"); +#endif + return (1); + } + + if (len > (MHLEN - ETHER_DATA_ALIGN)) + { MCLGET(m, M_DONTWAIT); + if (m->m_flags&M_EXT) /* MCLGET succeeded */ + { m->m_data += ETHER_DATA_ALIGN; + bcopy(data, mtod(m, caddr_t), (unsigned)len); + } else + { +#ifdef MACE_DEBUG + printf("no clusters\n"); +#endif + m_free(m); + m = (struct mbuf *)m_devget(data, len, 0, ifp, 0); + if (m == 0) + return (1); + } + } else + { m->m_data += ETHER_DATA_ALIGN; + bcopy(data, mtod(m, caddr_t), (unsigned)len); + } + + /* + * Current code up the line assumes that the media header's been + * stripped, but we'd like to preserve it, just in case someone + * wants to peek. + */ + m->m_pkthdr.len = len; + m->m_len = len; + m->m_pkthdr.rcvif = ifp; + m->m_data += sizeof(*eh_p); + m->m_len -= sizeof (*eh_p); + m->m_pkthdr.len -= sizeof(*eh_p); + ether_input(ifp, eh_p, m); + + return (0); +} + + +static void +rx_intr() +{ + mace_recv_pkt(rxpkt, 0); +} + +void +mace_dbdma_rx_intr(int unit, void *ignored, void * arp) +{ + if (!mace.ready) + return; + + thread_call_func((thread_call_func_t)rx_intr, 0, TRUE); +} + + +int +mace_ioctl(struct ifnet * ifp,u_long cmd, caddr_t data) +{ + struct arpcom * ar; + unsigned error = 0; + struct ifaddr * ifa = (struct ifaddr *)data; + struct ifreq * ifr = (struct ifreq *)data; + struct sockaddr_in * sin; + + sin = (struct sockaddr_in *)(&((struct ifreq *)data)->ifr_addr); + ar = (struct arpcom *)ifp; + + switch (cmd) { + case SIOCAUTOADDR: + error = in_bootp(ifp, sin, &mace.en_arpcom.ac_enaddr); + break; + + case SIOCSIFADDR: +#if NeXT + ifp->if_flags |= (IFF_UP | IFF_RUNNING); +#else + ifp->if_flags |= IFF_UP; +#endif + switch (ifa->ifa_addr->sa_family) { + case AF_INET: + /* + * See if another station has *our* IP address. + * i.e.: There is an address conflict! If a + * conflict exists, a message is sent to the + * console. + */ + if (IA_SIN(ifa)->sin_addr.s_addr != 0) { /* don't bother for 0.0.0.0 */ + ar->ac_ipaddr = IA_SIN(ifa)->sin_addr; + arpwhohas(ar, &IA_SIN(ifa)->sin_addr); + } + break; + default: + break; + } + break; + + case SIOCSIFFLAGS: + /* + * If interface is marked down and it is running, then stop it + */ + if ((ifp->if_flags & IFF_UP) == 0 && + (ifp->if_flags & IFF_RUNNING) != 0) { + /* + * If interface is marked down and it is running, then + * stop it. + */ + ifp->if_flags &= ~IFF_RUNNING; + } else if ((ifp->if_flags & IFF_UP) != 0 && + (ifp->if_flags & IFF_RUNNING) == 0) { + /* + * If interface is marked up and it is stopped, then + * start it. + */ + ifp->if_flags |= IFF_RUNNING; + } + + /* + * If the state of the promiscuous bit changes, the + * interface must be reset to effect the change. + */ + if (((ifp->if_flags ^ mace.promisc) & IFF_PROMISC) && + (ifp->if_flags & IFF_RUNNING)) { + mace.promisc = ifp->if_flags & IFF_PROMISC; + mace_sync_promisc(ifp); + } + + break; + + case SIOCADDMULTI: + if ((error = ether_addmulti(ifr, ar)) == ENETRESET) + { if ((error = mace_addmulti(ifr, ar)) != 0) + { error = 0; + mace_sync_mcast(ifp); + } + } + break; + + case SIOCDELMULTI: + { + struct ether_addr enaddr[2]; /* [0] - addrlo, [1] - addrhi */ + + if ((error = ether_delmulti(ifr, ar, enaddr)) == ENETRESET) { + if ((error = mace_delmulti(ifr, ar, enaddr)) != 0) { + error = 0; + mace_sync_mcast(ifp); + } + } + } + break; + + default: + error = EINVAL; + break; + } + return (error); +} + +void +mace_init() +{ + struct ifnet * ifp = &mace.en_arpcom.ac_if; + + /* + * Only use in-kernel driver for early debugging (bootargs: kdp=1|3) + */ + if ( (kdp_flag & 1) == 0 ) + { + return; + } + + mace.tx_busy = 0; + mace.txintr = 0; + mace.promisc = 0; + + bzero((caddr_t)ifp, sizeof(struct ifnet)); + bcopy(&mace.macaddr, &mace.en_arpcom.ac_enaddr, NUM_EN_ADDR_BYTES); + + ifp->if_name = "en"; + ifp->if_unit = 0; + ifp->if_private = 0; + ifp->if_ioctl = mace_ioctl; + ifp->if_start = mace_start; + ifp->if_flags = + IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; +#if NBPFILTER > 0 + bpfattach(&ifp->if_bpf, ifp, DLT_EN10MB, sizeof(struct ether_header)); +#endif + if_attach(ifp); + ether_ifattach(ifp); + + mace.rxintr = 0; + + /* wire in the interrupt routines */ + pmac_register_int(PMAC_DMA_ETHERNET_RX, SPLNET, + mace_dbdma_rx_intr, 0); + pmac_register_int(PMAC_DMA_ETHERNET_TX, SPLNET, + mace_dbdma_tx_intr, 0); + +// pmac_register_int(PMAC_DEV_ETHERNET, SPLNET, mace_pci_intr); + mace.ready = 1; +#ifdef MACE_DEBUG + timeout(mace_watchdog, 0, 10*hz); /* just in case we drop an interrupt */ +#endif + return; +} + +/* + * mace_pci_intr + * + * Service MACE interrupt + */ + +void +mace_pci_intr(int device, void *ssp) +{ + unsigned char ir, retry, frame, packet, length; + + ir = mace.ereg->ir; eieio(); /* Clear Interrupt */ + packet = mace.ereg->mpc; eieio(); + length = mace.ereg->rntpc; eieio(); + + printf("(txI)"); + + if (ir & IR_XMTINT) { + retry = mace.ereg->xmtrc; eieio(); /* Grab transmit retry count */ + frame = mace.ereg->xmtfs; eieio(); +// if (mace.ready) +// mace_dbdma_tx_intr(device, ssp); + } + return; +} + +static void +tx_intr() +{ + mace.txintr++; + mace.tx_busy = 0; + mace_service_queue(&mace.en_arpcom.ac_if); +} + +/* + * mace_dbdma_tx_intr + * + * DBDMA interrupt routine + */ +void +mace_dbdma_tx_intr(int unit, void *ignored, void * arg) +{ + if (!mace.ready) + return; + + thread_call_func((thread_call_func_t)tx_intr, 0, TRUE); + return; +} diff --git a/bsd/if/ppc/if_en.h b/bsd/if/ppc/if_en.h new file mode 100644 index 000000000..8e56ff533 --- /dev/null +++ b/bsd/if/ppc/if_en.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * MacOSX Mace driver + * Defines and device state + * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 + * - ripped off code from MK/LINUX + */ + +#define PG_SIZE 0x1000UL +#define PG_MASK (PG_SIZE - 1UL) + +#define ETHERMTU 1500 +#define ETHER_RX_NUM_DBDMA_BUFS 32 +#define ETHERNET_BUF_SIZE (ETHERMTU + 36) +#define ETHER_MIN_PACKET 64 +#define TX_NUM_DBDMA 6 + +#define DBDMA_ETHERNET_EOP 0x40 + +typedef struct mace_s { + struct arpcom en_arpcom; + struct mace_board * ereg; /* ethernet register set address */ + unsigned char macaddr[NUM_EN_ADDR_BYTES]; /* mac address */ + int chip_id; + dbdma_command_t *rv_dma; + dbdma_command_t *tx_dma; + unsigned char *rv_dma_area; + unsigned char *tx_dma_area; + unsigned char multi_mask[8]; /* Multicast mask */ + unsigned char multi_use[64]; /* Per-mask-bit use count */ + int rv_tail; + int rv_head; + int tx_busy; + int txintr; + int rxintr; + int txwatchdog; + int ready; + int promisc; /* IFF_PROMISC state */ +} mace_t; + diff --git a/bsd/if/ppc/mace.c b/bsd/if/ppc/mace.c new file mode 100644 index 000000000..b66ddd3e9 --- /dev/null +++ b/bsd/if/ppc/mace.c @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * MACE Device-dependent code (some still lives in if_en.c): + * + * MACE Multicast Address scheme - + * Compute Enet CRC for each Mcast address; take high 6 bits of 32-bit + * crc, giving a "bit index" into a 64-bit register. On packet receipt, + * if corresponding bit is set, accept packet. + * We keep track of requests in a per-hash-value table (16-bit counters + * should be sufficient). Since we're hashing, we only care about the + * hash value of each address. + * + * Apple Confidential + * + * (C) COPYRIGHT Apple Computer, Inc., 1994-1997 + * All Rights Reserved + * + * Justin C. Walker + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "if_en.h" +#include "mace.h" + +extern mace_t mace; + +#define ENET_CRCPOLY 0x04c11db7 + +/* Real fast bit-reversal algorithm, 6-bit values */ +int reverse6[] = +{ 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, + 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, + 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, + 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, + 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, + 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, + 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, + 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f +}; + +unsigned int crc416(current, nxtval) +register unsigned int current; +register unsigned short nxtval; +{ register unsigned int counter; + register int highCRCBitSet, lowDataBitSet; + + /* Swap bytes */ + nxtval = ((nxtval & 0x00FF) << 8) | (nxtval >> 8); + + /* Compute bit-by-bit */ + for (counter = 0; counter != 16; ++counter) + { /* is high CRC bit set? */ + if ((current & 0x80000000) == NULL) + highCRCBitSet = 0; + else + highCRCBitSet = 1; + + current = current << 1; + + if ((nxtval & 0x0001) == NULL) + lowDataBitSet = 0; + else + lowDataBitSet = 1; + + nxtval = nxtval >> 1; + + /* do the XOR */ + if (highCRCBitSet ^ lowDataBitSet) + current = current ^ ENET_CRCPOLY; + } + return current; +} + +unsigned int mace_crc(unsigned short *address) +{ register unsigned int newcrc; + + newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ + newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ + newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ + + return(newcrc); +} + +/* + * Add requested mcast addr to Mace's filter. Assume that the first + * address in the arpcom ac_multiaddrs list is the one we're interested in. + */ +int +mace_addmulti(register struct ifreq *ifr, register struct arpcom *ar) +{ register unsigned char *addr; + unsigned int crc; + unsigned char mask; + + addr = ar->ac_multiaddrs->enm_addrlo; + + crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (mace.multi_use[crc]++) + return(0); /* This bit is already set */ + mask = crc % 8; + mask = (unsigned char)1 << mask; + mace.multi_mask[crc/8] |= mask; + return(1); +} + +int +mace_delmulti(register struct ifreq *ifr, register struct arpcom *ar, + struct ether_addr * enaddr) +{ register unsigned char *addr; + unsigned int crc; + unsigned char mask; + + addr = (char *)enaddr; /* XXX assumes addrlo == addrhi */ + + /* Now, delete the address from the filter copy, as indicated */ + crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (mace.multi_use[crc] == 0) + return(EINVAL); /* That bit wasn't in use! */ + + if (--mace.multi_use[crc]) + return(0); /* That bit is still in use */ + + mask = crc % 8; + mask = ((unsigned char)1 << mask) ^ 0xff; /* To turn off bit */ + mace.multi_mask[crc/8] &= mask; + return(1); +} + +/* + * Sync the adapter with the software copy of the multicast mask + * (logical address filter). + * If we want all m-cast addresses, we just blast 1's into the filter. + * When we reverse this, we can use the current state of the (software) + * filter, which should have been kept up to date. + */ +void +mace_sync_mcast(register struct ifnet * ifp) +{ register unsigned long temp, temp1; + register int i; + register char *p; + register struct mace_board *ereg = mace.ereg; + + temp = ereg->maccc; + + /* + * Have to deal with early rev of chip for updating LAF + * Don't know if any MacOSX systems still run this rev. + */ + if (mace.chip_id == MACERevA2) + { /* First, turn off receiver */ + temp1 = temp&~MACCC_ENRCV; + ereg->maccc = temp1; + eieio(); + + /* Then, check FIFO - frame being received will complete */ + temp1 = ereg->fifofc; + + mace.ereg->iac = IAC_LOGADDR; + eieio(); + } else + { ereg->iac = IAC_ADDRCHG|IAC_LOGADDR; + eieio(); + + while (temp1 = ereg->iac) + { eieio(); + if ((temp1&IAC_ADDRCHG) == 0) + break; + } + } + + if (ifp->if_flags & IFF_ALLMULTI) /* Then want ALL m-cast pkts */ + { /* set mask to all 1's */ + for (i=0;i<8;i++) + { ereg->ladrf = 0xff; + eieio(); + } + } else + { + /* Assuming everything is big-endian */ + for (i=0, p = &mace.multi_mask[0];i<8;i++) + { ereg->ladrf = *p++; + eieio(); + } + } + + ereg->maccc = temp; /* Reset config ctrlr */ + eieio(); + +} + +void +mace_sync_promisc(register struct ifnet *ifp) +{ + register u_long o_maccc, n_maccc; + register struct mace_board *ereg = mace.ereg; + + /* + * Save current state and disable receive. + */ + o_maccc = ereg->maccc; + n_maccc = o_maccc & ~MACCC_ENRCV; + ereg->maccc = n_maccc; + eieio(); + + /* + * Calculate new desired state + */ + if (ifp->if_flags & IFF_PROMISC) { + /* set PROMISC bit */ + o_maccc |= MACCC_PROM; + } else { + /* clear PROMISC bit */ + o_maccc &= ~MACCC_PROM; + } + + /* + * Note that the "old" mode includes the new promiscuous state now. + */ + ereg->maccc = o_maccc; + eieio(); +} diff --git a/bsd/if/ppc/mace.h b/bsd/if/ppc/mace.h new file mode 100644 index 000000000..c439e4ccc --- /dev/null +++ b/bsd/if/ppc/mace.h @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ +/* + * PMach Operating System + * Copyright (c) 1995 Santa Clara University + * All Rights Reserved. + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: if_3c501.h + * Author: Philippe Bernadat + * Date: 1989 + * Copyright (c) 1989 OSF Research Institute + * + * 3COM Etherlink 3C501 Mach Ethernet drvier + */ +/* + Copyright 1990 by Open Software Foundation, +Cambridge, MA. + + All Rights Reserved + + Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appears in all copies and +that both the copyright notice and this permission notice appear in +supporting documentation, and that the name of OSF or Open Software +Foundation not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + + OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, +IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, +NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifdef KERNEL +#include +#endif + + +#define ENETPAD(n) char n[15] + +/* 0x50f0a000 */ +struct mace_board { + volatile unsigned char rcvfifo; /* 00 receive fifo */ + ENETPAD(epad0); + volatile unsigned char xmtfifo; /* 01 transmit fifo */ + ENETPAD(epad1); + volatile unsigned char xmtfc; /* 02 transmit frame control */ + ENETPAD(epad2); + volatile unsigned char xmtfs; /* 03 transmit frame status */ + ENETPAD(epad3); + volatile unsigned char xmtrc; /* 04 transmit retry count */ + ENETPAD(epad4); + volatile unsigned char rcvfc; /* 05 receive frame control -- 4 bytes */ + ENETPAD(epad5); + volatile unsigned char rcvfs; /* 06 receive frame status */ + ENETPAD(epad6); + volatile unsigned char fifofc; /* 07 fifo frame count */ + ENETPAD(epad7); + volatile unsigned char ir; /* 08 interrupt */ + ENETPAD(epad8); + volatile unsigned char imr; /* 09 interrupt mask */ + ENETPAD(epad9); + volatile unsigned char pr; /* 10 poll */ + ENETPAD(epad10); + volatile unsigned char biucc; /* 11 bus interface unit configuration control */ + ENETPAD(epad11); + volatile unsigned char fifocc; /* 12 fifo configuration control */ + ENETPAD(epad12); + volatile unsigned char maccc; /* 13 media access control configuration control */ + ENETPAD(epad13); + volatile unsigned char plscc; /* 14 physical layer signalling configuration control */ + ENETPAD(epad14); + volatile unsigned char phycc; /* 15 physical layer configuration control */ + ENETPAD(epad15); + volatile unsigned char chipid1; /* 16 chip identification LSB */ + ENETPAD(epad16); + volatile unsigned char chipid2; /* 17 chip identification MSB */ + ENETPAD(epad17); + volatile unsigned char iac; /* 18 internal address configuration */ + ENETPAD(epad18); + volatile unsigned char res1; /* 19 */ + ENETPAD(epad19); + volatile unsigned char ladrf; /* 20 logical address filter -- 8 bytes */ + ENETPAD(epad20); + volatile unsigned char padr; /* 21 physical address -- 6 bytes */ + ENETPAD(epad21); + volatile unsigned char res2; /* 22 */ + ENETPAD(epad22); + volatile unsigned char res3; /* 23 */ + ENETPAD(epad23); + volatile unsigned char mpc; /* 24 missed packet count */ + ENETPAD(epad24); + volatile unsigned char res4; /* 25 */ + ENETPAD(epad25); + volatile unsigned char rntpc; /* 26 runt packet count */ + ENETPAD(epad26); + volatile unsigned char rcvcc; /* 27 receive collision count */ + ENETPAD(epad27); + volatile unsigned char res5; /* 28 */ + ENETPAD(epad28); + volatile unsigned char utr; /* 29 user test */ + ENETPAD(epad29); + volatile unsigned char res6; /* 30 */ + ENETPAD(epad30); + volatile unsigned char res7; /* 31 */ + }; + +/* + * Chip Revisions.. + */ + +#define MACE_REVISION_B0 0x0940 +#define MACE_REVISION_A2 0x0941 + +/* xmtfc */ +#define XMTFC_DRTRY 0X80 +#define XMTFC_DXMTFCS 0x08 +#define XMTFC_APADXNT 0x01 + +/* xmtfs */ +#define XMTFS_XNTSV 0x80 +#define XMTFS_XMTFS 0x40 +#define XMTFS_LCOL 0x20 +#define XMTFS_MORE 0x10 +#define XMTFS_ONE 0x08 +#define XMTFS_DEFER 0x04 +#define XMTFS_LCAR 0x02 +#define XMTFS_RTRY 0x01 + +/* xmtrc */ +#define XMTRC_EXDEF 0x80 + +/* rcvfc */ +#define RCVFC_LLRCV 0x08 +#define RCVFC_M_R 0x04 +#define RCVFC_ASTRPRCV 0x01 + +/* rcvfs */ +#define RCVFS_OFLO 0x80 +#define RCVFS_CLSN 0x40 +#define RCVFS_FRAM 0x20 +#define RCVFS_FCS 0x10 +#define RCVFS_REVCNT 0x0f + +/* fifofc */ +#define FIFOCC_XFW_8 0x00 +#define FIFOCC_XFW_16 0x40 +#define FIFOCC_XFW_32 0x80 +#define FIFOCC_XFW_XX 0xc0 +#define FIFOCC_RFW_16 0x00 +#define FIFOCC_RFW_32 0x10 +#define FIFOCC_RFW_64 0x20 +#define FIFOCC_RFW_XX 0x30 +#define FIFOCC_XFWU 0x08 +#define FIFOCC_RFWU 0x04 +#define FIFOCC_XBRST 0x02 +#define FIFOCC_RBRST 0x01 + + +/* ir */ +#define IR_JAB 0x80 +#define IR_BABL 0x40 +#define IR_CERR 0x20 +#define IR_RCVCCO 0x10 +#define IR_RNTPCO 0x08 +#define IR_MPCO 0x04 +#define IR_RCVINT 0x02 +#define IR_XMTINT 0x01 + +/* imr */ +#define IMR_MJAB 0x80 +#define IMR_MBABL 0x40 +#define IMR_MCERR 0x20 +#define IMR_MRCVCCO 0x10 +#define IMR_MRNTPCO 0x08 +#define IMR_MMPCO 0x04 +#define IMR_MRCVINT 0x02 +#define IMR_MXMTINT 0x01 + +/* pr */ +#define PR_XMTSV 0x80 +#define PR_TDTREQ 0x40 +#define PR_RDTREQ 0x20 + +/* biucc */ +#define BIUCC_BSWP 0x40 +#define BIUCC_XMTSP04 0x00 +#define BIUCC_XMTSP16 0x10 +#define BIUCC_XMTSP64 0x20 +#define BIUCC_XMTSP112 0x30 +#define BIUCC_SWRST 0x01 + +/* fifocc */ +#define FIFOCC_XMTFW08W 0x00 +#define FIFOCC_XMTFW16W 0x40 +#define FIFOCC_XMTFW32W 0x80 + +#define FIFOCC_RCVFW16 0x00 +#define FIFOCC_RCVFW32 0x10 +#define FIFOCC_RCVFW64 0x20 + +#define FIFOCC_XMTFWU 0x08 +#define FIFOCC_RCVFWU 0x04 +#define FIFOCC_XMTBRST 0x02 +#define FIFOCC_RCVBRST 0x01 + +/* maccc */ +#define MACCC_PROM 0x80 +#define MACCC_DXMT2PD 0x40 +#define MACCC_EMBA 0x20 +#define MACCC_DRCVPA 0x08 +#define MACCC_DRCVBC 0x04 +#define MACCC_ENXMT 0x02 +#define MACCC_ENRCV 0x01 + +/* plscc */ +#define PLSCC_XMTSEL 0x08 +#define PLSCC_AUI 0x00 +#define PLSCC_TENBASE 0x02 +#define PLSCC_DAI 0x04 +#define PLSCC_GPSI 0x06 +#define PLSCC_ENPLSIO 0x01 + +/* phycc */ +#define PHYCC_LNKFL 0x80 +#define PHYCC_DLNKTST 0x40 +#define PHYCC_REVPOL 0x20 +#define PHYCC_DAPC 0x10 +#define PHYCC_LRT 0x08 +#define PHYCC_ASEL 0x04 +#define PHYCC_RWAKE 0x02 +#define PHYCC_AWAKE 0x01 + +/* iac */ +#define IAC_ADDRCHG 0x80 +#define IAC_PHYADDR 0x04 +#define IAC_LOGADDR 0x02 + +/* utr */ +#define UTR_RTRE 0x80 +#define UTR_RTRD 0x40 +#define UTR_RPA 0x20 +#define UTR_FCOLL 0x10 +#define UTR_RCVFCSE 0x08 + +#define UTR_NOLOOP 0x00 +#define UTR_EXTLOOP 0x02 +#define UTR_INLOOP 0x04 +#define UTR_INLOOP_M 0x06 + +#define ENET_PHYADDR_LEN 6 +#define ENET_HEADER 14 + +#define BFRSIZ 2048 +#define ETHER_ADD_SIZE 6 /* size of a MAC address */ +#define DSF_LOCK 1 +#define DSF_RUNNING 2 +#define MOD_ENAL 1 +#define MOD_PROM 2 + +/* + * MACE Chip revision codes + */ +#define MACERevA2 0x0941 +#define MACERevB0 0x0940 + +#ifdef KERNEL +int mace_delmulti __P((register struct ifreq *, register struct arpcom *, + struct ether_addr *)); +int mace_addmulti __P((register struct ifreq *, register struct arpcom *)); +void mace_sync_mcast __P((register struct ifnet *)); +void mace_sync_promisc __P((register struct ifnet *)); +#endif /* KERNEL */ + diff --git a/bsd/include/Makefile b/bsd/include/Makefile new file mode 100644 index 000000000..ca571a43d --- /dev/null +++ b/bsd/include/Makefile @@ -0,0 +1,44 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + arpa \ + protocols + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + ar.h bitstring.h c.h ctype.h db.h dirent.h disktab.h err.h errno.h \ + fcntl.h fnmatch.h fsproperties.h fstab.h fts.h glob.h grp.h kvm.h limits.h locale.h \ + math.h memory.h mpool.h ndbm.h netdb.h nlist.h paths.h pwd.h ranlib.h \ + regex.h regexp.h rune.h runetype.h setjmp.h semaphore.h sgtty.h signal.h stab.h \ + stddef.h stdio.h stdlib.h string.h strings.h struct.h sysexits.h syslog.h \ + tar.h termios.h time.h ttyent.h tzfile.h unistd.h util.h utime.h utmp.h vis.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = + +EXPORT_MI_LIST = stddef.h + +EXPORT_MI_DIR = + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/include/ar.h b/bsd/include/ar.h new file mode 100644 index 000000000..def1c4320 --- /dev/null +++ b/bsd/include/ar.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * This code is derived from software contributed to Berkeley by + * Hugh Smith at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ar.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _AR_H_ +#define _AR_H_ + +/* Pre-4BSD archives had these magic numbers in them. */ +#define OARMAG1 0177555 +#define OARMAG2 0177545 + +#define ARMAG "!\n" /* ar "magic number" */ +#define SARMAG 8 /* strlen(ARMAG); */ + +#define AR_EFMT1 "#1/" /* extended format #1 */ + +struct ar_hdr { + char ar_name[16]; /* name */ + char ar_date[12]; /* modification time */ + char ar_uid[6]; /* user id */ + char ar_gid[6]; /* group id */ + char ar_mode[8]; /* octal file permissions */ + char ar_size[10]; /* size in bytes */ +#define ARFMAG "`\n" + char ar_fmag[2]; /* consistency check */ +}; + +#endif /* !_AR_H_ */ diff --git a/bsd/include/arpa/Makefile b/bsd/include/arpa/Makefile new file mode 100644 index 000000000..473ffe8ec --- /dev/null +++ b/bsd/include/arpa/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + ftp.h inet.h telnet.h tftp.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = arpa + +EXPORT_MI_LIST = + +EXPORT_MI_DIR = + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/include/arpa/ftp.h b/bsd/include/arpa/ftp.h new file mode 100644 index 000000000..90b425b79 --- /dev/null +++ b/bsd/include/arpa/ftp.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ftp.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _FTP_H_ +#define _FTP_H_ + +/* Definitions for FTP; see RFC-765. */ + +/* + * Reply codes. + */ +#define PRELIM 1 /* positive preliminary */ +#define COMPLETE 2 /* positive completion */ +#define CONTINUE 3 /* positive intermediate */ +#define TRANSIENT 4 /* transient negative completion */ +#define ERROR 5 /* permanent negative completion */ + +/* + * Type codes + */ +#define TYPE_A 1 /* ASCII */ +#define TYPE_E 2 /* EBCDIC */ +#define TYPE_I 3 /* image */ +#define TYPE_L 4 /* local byte size */ + +#ifdef FTP_NAMES +char *typenames[] = {"0", "ASCII", "EBCDIC", "Image", "Local" }; +#endif + +/* + * Form codes + */ +#define FORM_N 1 /* non-print */ +#define FORM_T 2 /* telnet format effectors */ +#define FORM_C 3 /* carriage control (ASA) */ +#ifdef FTP_NAMES +char *formnames[] = {"0", "Nonprint", "Telnet", "Carriage-control" }; +#endif + +/* + * Structure codes + */ +#define STRU_F 1 /* file (no record structure) */ +#define STRU_R 2 /* record structure */ +#define STRU_P 3 /* page structure */ +#ifdef FTP_NAMES +char *strunames[] = {"0", "File", "Record", "Page" }; +#endif + +/* + * Mode types + */ +#define MODE_S 1 /* stream */ +#define MODE_B 2 /* block */ +#define MODE_C 3 /* compressed */ +#ifdef FTP_NAMES +char *modenames[] = {"0", "Stream", "Block", "Compressed" }; +#endif + +/* + * Record Tokens + */ +#define REC_ESC '\377' /* Record-mode Escape */ +#define REC_EOR '\001' /* Record-mode End-of-Record */ +#define REC_EOF '\002' /* Record-mode End-of-File */ + +/* + * Block Header + */ +#define BLK_EOR 0x80 /* Block is End-of-Record */ +#define BLK_EOF 0x40 /* Block is End-of-File */ +#define BLK_ERRORS 0x20 /* Block is suspected of containing errors */ +#define BLK_RESTART 0x10 /* Block is Restart Marker */ + +#define BLK_BYTECOUNT 2 /* Bytes in this block */ + +#endif /* !_FTP_H_ */ diff --git a/bsd/include/arpa/inet.h b/bsd/include/arpa/inet.h new file mode 100644 index 000000000..48e2754de --- /dev/null +++ b/bsd/include/arpa/inet.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)inet.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _INET_H_ +#define _INET_H_ + +/* External definitions for functions in inet(3) */ + +#include + +__BEGIN_DECLS +unsigned long inet_addr __P((const char *)); +int inet_aton __P((const char *, struct in_addr *)); +unsigned long inet_lnaof __P((struct in_addr)); +struct in_addr inet_makeaddr __P((u_long , u_long)); +unsigned long inet_netof __P((struct in_addr)); +unsigned long inet_network __P((const char *)); +char *inet_ntoa __P((struct in_addr)); +__END_DECLS + +#endif /* !_INET_H_ */ diff --git a/bsd/include/arpa/nameser.h b/bsd/include/arpa/nameser.h new file mode 100644 index 000000000..97d292155 --- /dev/null +++ b/bsd/include/arpa/nameser.h @@ -0,0 +1,448 @@ +/* + * Copyright (c) 1983, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1996 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + +/* + * From: Id: nameser.h,v 8.16 1998/02/06 00:35:58 halley Exp + * $FreeBSD: src/include/arpa/nameser.h,v 1.12.2.1 1999/08/29 14:39:00 peter Exp $ + */ + +#ifndef _ARPA_NAMESER_H_ +#define _ARPA_NAMESER_H_ + +#define BIND_4_COMPAT + +#include +#include + +/* + * revision information. this is the release date in YYYYMMDD format. + * it can change every day so the right thing to do with it is use it + * in preprocessor commands such as "#if (__NAMESER > 19931104)". do not + * compare for equality; rather, use it to determine whether your libnameser.a + * is new enough to contain a certain feature. + */ + +/* XXXRTH I made this bigger than __BIND in 4.9.5 T6B */ +#define __NAMESER 19961001 /* New interface version stamp. */ + +/* + * Define constants based on RFC 883, RFC 1034, RFC 1035 + */ +#define NS_PACKETSZ 512 /* maximum packet size */ +#define NS_MAXDNAME 1025 /* maximum domain name */ +#define NS_MAXCDNAME 255 /* maximum compressed domain name */ +#define NS_MAXLABEL 63 /* maximum length of domain label */ +#define NS_HFIXEDSZ 12 /* #/bytes of fixed data in header */ +#define NS_QFIXEDSZ 4 /* #/bytes of fixed data in query */ +#define NS_RRFIXEDSZ 10 /* #/bytes of fixed data in r record */ +#define NS_INT32SZ 4 /* #/bytes of data in a u_int32_t */ +#define NS_INT16SZ 2 /* #/bytes of data in a u_int16_t */ +#define NS_INT8SZ 1 /* #/bytes of data in a u_int8_t */ +#define NS_INADDRSZ 4 /* IPv4 T_A */ +#define NS_IN6ADDRSZ 16 /* IPv6 T_AAAA */ +#define NS_CMPRSFLGS 0xc0 /* Flag bits indicating name compression. */ +#define NS_DEFAULTPORT 53 /* For both TCP and UDP. */ + +/* + * These can be expanded with synonyms, just keep ns_parse.c:ns_parserecord() + * in synch with it. + */ +typedef enum __ns_sect { + ns_s_qd = 0, /* Query: Question. */ + ns_s_zn = 0, /* Update: Zone. */ + ns_s_an = 1, /* Query: Answer. */ + ns_s_pr = 1, /* Update: Prerequisites. */ + ns_s_ns = 2, /* Query: Name servers. */ + ns_s_ud = 2, /* Update: Update. */ + ns_s_ar = 3, /* Query|Update: Additional records. */ + ns_s_max = 4 +} ns_sect; + +/* + * This is a message handle. It is caller allocated and has no dynamic data. + * This structure is intended to be opaque to all but ns_parse.c, thus the + * leading _'s on the member names. Use the accessor functions, not the _'s. + */ +typedef struct __ns_msg { + const u_char *_msg, *_eom; + u_int16_t _id, _flags, _counts[ns_s_max]; + const u_char *_sections[ns_s_max]; + ns_sect _sect; + int _rrnum; + const u_char *_ptr; +} ns_msg; + +/* Private data structure - do not use from outside library. */ +struct _ns_flagdata { int mask, shift; }; +extern struct _ns_flagdata _ns_flagdata[]; + +/* Accessor macros - this is part of the public interface. */ +#define ns_msg_getflag(handle, flag) ( \ + ((handle)._flags & _ns_flagdata[flag].mask) \ + >> _ns_flagdata[flag].shift \ + ) +#define ns_msg_id(handle) ((handle)._id + 0) +#define ns_msg_base(handle) ((handle)._msg + 0) +#define ns_msg_end(handle) ((handle)._eom + 0) +#define ns_msg_size(handle) ((handle)._eom - (handle)._msg) +#define ns_msg_count(handle, section) ((handle)._counts[section] + 0) + +/* + * This is a parsed record. It is caller allocated and has no dynamic data. + */ +typedef struct __ns_rr { + char name[NS_MAXDNAME]; /* XXX need to malloc */ + u_int16_t type; + u_int16_t class; + u_int32_t ttl; + u_int16_t rdlength; + const u_char *rdata; +} ns_rr; + +/* Accessor macros - this is part of the public interface. */ +#define ns_rr_name(rr) (((rr).name[0] != '\0') ? (rr).name : ".") +#define ns_rr_type(rr) ((rr).type + 0) +#define ns_rr_class(rr) ((rr).class + 0) +#define ns_rr_ttl(rr) ((rr).ttl + 0) +#define ns_rr_rdlen(rr) ((rr).rdlength + 0) +#define ns_rr_rdata(rr) ((rr).rdata + 0) + +/* + * These don't have to be in the same order as in the packet flags word, + * and they can even overlap in some cases, but they will need to be kept + * in synch with ns_parse.c:ns_flagdata[]. + */ +typedef enum __ns_flag { + ns_f_qr, /* Question/Response. */ + ns_f_opcode, /* Operation code. */ + ns_f_aa, /* Authoritative Answer. */ + ns_f_tc, /* Truncation occurred. */ + ns_f_rd, /* Recursion Desired. */ + ns_f_ra, /* Recursion Available. */ + ns_f_z, /* MBZ. */ + ns_f_ad, /* Authentic Data (DNSSEC). */ + ns_f_cd, /* Checking Disabled (DNSSEC). */ + ns_f_rcode, /* Response code. */ + ns_f_max +} ns_flag; + +/* + * Currently defined opcodes. + */ +typedef enum __ns_opcode { + ns_o_query = 0, /* Standard query. */ + ns_o_iquery = 1, /* Inverse query (deprecated/unsupported). */ + ns_o_status = 2, /* Name server status query (unsupported). */ + /* Opcode 3 is undefined/reserved. */ + ns_o_notify = 4, /* Zone change notification. */ + ns_o_update = 5, /* Zone update message. */ + ns_o_max = 6 +} ns_opcode; + +/* + * Currently defined response codes. + */ +typedef enum __ns_rcode { + ns_r_noerror = 0, /* No error occurred. */ + ns_r_formerr = 1, /* Format error. */ + ns_r_servfail = 2, /* Server failure. */ + ns_r_nxdomain = 3, /* Name error. */ + ns_r_notimpl = 4, /* Unimplemented. */ + ns_r_refused = 5, /* Operation refused. */ + /* these are for BIND_UPDATE */ + ns_r_yxdomain = 6, /* Name exists */ + ns_r_yxrrset = 7, /* RRset exists */ + ns_r_nxrrset = 8, /* RRset does not exist */ + ns_r_notauth = 9, /* Not authoritative for zone */ + ns_r_notzone = 10, /* Zone of record different from zone section */ + ns_r_max = 11 +} ns_rcode; + +/* BIND_UPDATE */ +typedef enum __ns_update_operation { + ns_uop_delete = 0, + ns_uop_add = 1, + ns_uop_max = 2 +} ns_update_operation; + +/* + * This RR-like structure is particular to UPDATE. + */ +struct ns_updrec { + struct ns_updrec *r_prev; /* prev record */ + struct ns_updrec *r_next; /* next record */ + u_int8_t r_section; /* ZONE/PREREQUISITE/UPDATE */ + char * r_dname; /* owner of the RR */ + u_int16_t r_class; /* class number */ + u_int16_t r_type; /* type number */ + u_int32_t r_ttl; /* time to live */ + u_char * r_data; /* rdata fields as text string */ + u_int16_t r_size; /* size of r_data field */ + int r_opcode; /* type of operation */ + /* following fields for private use by the resolver/server routines */ + struct ns_updrec *r_grpnext; /* next record when grouped */ + struct databuf *r_dp; /* databuf to process */ + struct databuf *r_deldp; /* databuf's deleted/overwritten */ + u_int16_t r_zone; /* zone number on server */ +}; +typedef struct ns_updrec ns_updrec; + +/* + * Currently defined type values for resources and queries. + */ +typedef enum __ns_type { + ns_t_a = 1, /* Host address. */ + ns_t_ns = 2, /* Authoritative server. */ + ns_t_md = 3, /* Mail destination. */ + ns_t_mf = 4, /* Mail forwarder. */ + ns_t_cname = 5, /* Canonical name. */ + ns_t_soa = 6, /* Start of authority zone. */ + ns_t_mb = 7, /* Mailbox domain name. */ + ns_t_mg = 8, /* Mail group member. */ + ns_t_mr = 9, /* Mail rename name. */ + ns_t_null = 10, /* Null resource record. */ + ns_t_wks = 11, /* Well known service. */ + ns_t_ptr = 12, /* Domain name pointer. */ + ns_t_hinfo = 13, /* Host information. */ + ns_t_minfo = 14, /* Mailbox information. */ + ns_t_mx = 15, /* Mail routing information. */ + ns_t_txt = 16, /* Text strings. */ + ns_t_rp = 17, /* Responsible person. */ + ns_t_afsdb = 18, /* AFS cell database. */ + ns_t_x25 = 19, /* X_25 calling address. */ + ns_t_isdn = 20, /* ISDN calling address. */ + ns_t_rt = 21, /* Router. */ + ns_t_nsap = 22, /* NSAP address. */ + ns_t_nsap_ptr = 23, /* Reverse NSAP lookup (deprecated). */ + ns_t_sig = 24, /* Security signature. */ + ns_t_key = 25, /* Security key. */ + ns_t_px = 26, /* X.400 mail mapping. */ + ns_t_gpos = 27, /* Geographical position (withdrawn). */ + ns_t_aaaa = 28, /* Ip6 Address. */ + ns_t_loc = 29, /* Location Information. */ + ns_t_nxt = 30, /* Next domain (security). */ + ns_t_eid = 31, /* Endpoint identifier. */ + ns_t_nimloc = 32, /* Nimrod Locator. */ + ns_t_srv = 33, /* Server Selection. */ + ns_t_atma = 34, /* ATM Address */ + ns_t_naptr = 35, /* Naming Authority PoinTeR */ + /* Query type values which do not appear in resource records. */ + ns_t_ixfr = 251, /* Incremental zone transfer. */ + ns_t_axfr = 252, /* Transfer zone of authority. */ + ns_t_mailb = 253, /* Transfer mailbox records. */ + ns_t_maila = 254, /* Transfer mail agent records. */ + ns_t_any = 255, /* Wildcard match. */ + ns_t_max = 65536 +} ns_type; + +/* + * Values for class field + */ +typedef enum __ns_class { + ns_c_in = 1, /* Internet. */ + /* Class 2 unallocated/unsupported. */ + ns_c_chaos = 3, /* MIT Chaos-net. */ + ns_c_hs = 4, /* MIT Hesiod. */ + /* Query class values which do not appear in resource records */ + ns_c_none = 254, /* for prereq. sections in update requests */ + ns_c_any = 255, /* Wildcard match. */ + ns_c_max = 65536 +} ns_class; + +/* + * Flags field of the KEY RR rdata + */ +#define NS_KEY_TYPEMASK 0xC000 /* Mask for "type" bits */ +#define NS_KEY_TYPE_AUTH_CONF 0x0000 /* Key usable for both */ +#define NS_KEY_TYPE_CONF_ONLY 0x8000 /* Key usable for confidentiality */ +#define NS_KEY_TYPE_AUTH_ONLY 0x4000 /* Key usable for authentication */ +#define NS_KEY_TYPE_NO_KEY 0xC000 /* No key usable for either; no key */ +/* The type bits can also be interpreted independently, as single bits: */ +#define NS_KEY_NO_AUTH 0x8000 /* Key unusable for authentication */ +#define NS_KEY_NO_CONF 0x4000 /* Key unusable for confidentiality */ +#define NS_KEY_EXPERIMENTAL 0x2000 /* Security is *mandatory* if bit=0 */ +#define NS_KEY_RESERVED3 0x1000 /* reserved - must be zero */ +#define NS_KEY_RESERVED4 0x0800 /* reserved - must be zero */ +#define NS_KEY_USERACCOUNT 0x0400 /* key is assoc. with a user acct */ +#define NS_KEY_ENTITY 0x0200 /* key is assoc. with entity eg host */ +#define NS_KEY_ZONEKEY 0x0100 /* key is zone key */ +#define NS_KEY_IPSEC 0x0080 /* key is for IPSEC (host or user)*/ +#define NS_KEY_EMAIL 0x0040 /* key is for email (MIME security) */ +#define NS_KEY_RESERVED10 0x0020 /* reserved - must be zero */ +#define NS_KEY_RESERVED11 0x0010 /* reserved - must be zero */ +#define NS_KEY_SIGNATORYMASK 0x000F /* key can sign RR's of same name */ + +#define NS_KEY_RESERVED_BITMASK ( NS_KEY_RESERVED3 | \ + NS_KEY_RESERVED4 | \ + NS_KEY_RESERVED10 | \ + NS_KEY_RESERVED11 ) + +/* The Algorithm field of the KEY and SIG RR's is an integer, {1..254} */ +#define NS_ALG_MD5RSA 1 /* MD5 with RSA */ +#define NS_ALG_EXPIRE_ONLY 253 /* No alg, no security */ +#define NS_ALG_PRIVATE_OID 254 /* Key begins with OID giving alg */ + +/* Signatures */ +#define NS_MD5RSA_MIN_BITS 512 /* Size of a mod or exp in bits */ +#define NS_MD5RSA_MAX_BITS 2552 + /* Total of binary mod and exp */ +#define NS_MD5RSA_MAX_BYTES ((NS_MD5RSA_MAX_BITS+7/8)*2+3) + /* Max length of text sig block */ +#define NS_MD5RSA_MAX_BASE64 (((NS_MD5RSA_MAX_BYTES+2)/3)*4) + +/* Offsets into SIG record rdata to find various values */ +#define NS_SIG_TYPE 0 /* Type flags */ +#define NS_SIG_ALG 2 /* Algorithm */ +#define NS_SIG_LABELS 3 /* How many labels in name */ +#define NS_SIG_OTTL 4 /* Original TTL */ +#define NS_SIG_EXPIR 8 /* Expiration time */ +#define NS_SIG_SIGNED 12 /* Signature time */ +#define NS_SIG_FOOT 16 /* Key footprint */ +#define NS_SIG_SIGNER 18 /* Domain name of who signed it */ + +/* How RR types are represented as bit-flags in NXT records */ +#define NS_NXT_BITS 8 +#define NS_NXT_BIT_SET( n,p) (p[(n)/NS_NXT_BITS] |= (0x80>>((n)%NS_NXT_BITS))) +#define NS_NXT_BIT_CLEAR(n,p) (p[(n)/NS_NXT_BITS] &= ~(0x80>>((n)%NS_NXT_BITS))) +#define NS_NXT_BIT_ISSET(n,p) (p[(n)/NS_NXT_BITS] & (0x80>>((n)%NS_NXT_BITS))) + + +/* + * Inline versions of get/put short/long. Pointer is advanced. + */ +#define NS_GET16(s, cp) { \ + register u_char *t_cp = (u_char *)(cp); \ + (s) = ((u_int16_t)t_cp[0] << 8) \ + | ((u_int16_t)t_cp[1]) \ + ; \ + (cp) += NS_INT16SZ; \ +} + +#define NS_GET32(l, cp) { \ + register u_char *t_cp = (u_char *)(cp); \ + (l) = ((u_int32_t)t_cp[0] << 24) \ + | ((u_int32_t)t_cp[1] << 16) \ + | ((u_int32_t)t_cp[2] << 8) \ + | ((u_int32_t)t_cp[3]) \ + ; \ + (cp) += NS_INT32SZ; \ +} + +#define NS_PUT16(s, cp) { \ + register u_int16_t t_s = (u_int16_t)(s); \ + register u_char *t_cp = (u_char *)(cp); \ + *t_cp++ = t_s >> 8; \ + *t_cp = t_s; \ + (cp) += NS_INT16SZ; \ +} + +#define NS_PUT32(l, cp) { \ + register u_int32_t t_l = (u_int32_t)(l); \ + register u_char *t_cp = (u_char *)(cp); \ + *t_cp++ = t_l >> 24; \ + *t_cp++ = t_l >> 16; \ + *t_cp++ = t_l >> 8; \ + *t_cp = t_l; \ + (cp) += NS_INT32SZ; \ +} + +/* + * ANSI C identifier hiding. + */ +#define ns_get16 __ns_get16 +#define ns_get32 __ns_get32 +#define ns_put16 __ns_put16 +#define ns_put32 __ns_put32 +#define ns_initparse __ns_initparse +#define ns_parserr __ns_parserr +#define ns_sprintrr __ns_sprintrr +#define ns_sprintrrf __ns_sprintrrf +#define ns_format_ttl __ns_format_ttl +#define ns_parse_ttl __ns_parse_ttl +#define ns_name_ntop __ns_name_ntop +#define ns_name_pton __ns_name_pton +#define ns_name_unpack __ns_name_unpack +#define ns_name_pack __ns_name_pack +#define ns_name_compress __ns_name_compress +#define ns_name_uncompress __ns_name_uncompress + +__BEGIN_DECLS +u_int ns_get16 __P((const u_char *)); +u_long ns_get32 __P((const u_char *)); +void ns_put16 __P((u_int, u_char *)); +void ns_put32 __P((u_long, u_char *)); +int ns_initparse __P((const u_char *, int, ns_msg *)); +int ns_parserr __P((ns_msg *, ns_sect, int, ns_rr *)); +int ns_sprintrr __P((const ns_msg *, const ns_rr *, + const char *, const char *, char *, size_t)); +int ns_sprintrrf __P((const u_char *, size_t, const char *, + ns_class, ns_type, u_long, const u_char *, + size_t, const char *, const char *, + char *, size_t)); +int ns_format_ttl __P((u_long, char *, size_t)); +int ns_parse_ttl __P((const char *, u_long *)); +int ns_name_ntop __P((const u_char *, char *, size_t)); +int ns_name_pton __P((const char *, u_char *, size_t)); +int ns_name_unpack __P((const u_char *, const u_char *, + const u_char *, u_char *, size_t)); +int ns_name_pack __P((const u_char *, u_char *, int, + const u_char **, const u_char **)); +int ns_name_uncompress __P((const u_char *, const u_char *, + const u_char *, char *, size_t)); +int ns_name_compress __P((const char *, u_char *, size_t, + const u_char **, const u_char **)); +int ns_name_skip __P((const u_char **, const u_char *)); +__END_DECLS + +#ifdef BIND_4_COMPAT +#include +#endif + +#endif /* !_ARPA_NAMESER_H_ */ diff --git a/bsd/include/arpa/nameser_compat.h b/bsd/include/arpa/nameser_compat.h new file mode 100644 index 000000000..33c96acfd --- /dev/null +++ b/bsd/include/arpa/nameser_compat.h @@ -0,0 +1,194 @@ +/* Copyright (c) 1983, 1989 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * from nameser.h 8.1 (Berkeley) 6/2/93 + * From: Id: nameser_compat.h,v 8.9 1998/03/20 23:25:10 halley Exp + * $FreeBSD: src/include/arpa/nameser_compat.h,v 1.1.2.1 1999/08/29 14:39:01 peter Exp $ + */ + +#ifndef _ARPA_NAMESER_COMPAT_ +#define _ARPA_NAMESER_COMPAT_ + +#define __BIND 19950621 /* (DEAD) interface version stamp. */ + +#include + +#if !defined(BYTE_ORDER) || \ + (BYTE_ORDER != BIG_ENDIAN && BYTE_ORDER != LITTLE_ENDIAN && \ + BYTE_ORDER != PDP_ENDIAN) + /* you must determine what the correct bit order is for + * your compiler - the next line is an intentional error + * which will force your compiles to bomb until you fix + * the above macros. + */ + error "Undefined or invalid BYTE_ORDER"; +#endif + +/* + * Structure for query header. The order of the fields is machine- and + * compiler-dependent, depending on the byte/bit order and the layout + * of bit fields. We use bit fields only in int variables, as this + * is all ANSI requires. This requires a somewhat confusing rearrangement. + */ + +typedef struct { + unsigned id :16; /* query identification number */ +#if BYTE_ORDER == BIG_ENDIAN + /* fields in third byte */ + unsigned qr: 1; /* response flag */ + unsigned opcode: 4; /* purpose of message */ + unsigned aa: 1; /* authoritive answer */ + unsigned tc: 1; /* truncated message */ + unsigned rd: 1; /* recursion desired */ + /* fields in fourth byte */ + unsigned ra: 1; /* recursion available */ + unsigned unused :1; /* unused bits (MBZ as of 4.9.3a3) */ + unsigned ad: 1; /* authentic data from named */ + unsigned cd: 1; /* checking disabled by resolver */ + unsigned rcode :4; /* response code */ +#endif +#if BYTE_ORDER == LITTLE_ENDIAN || BYTE_ORDER == PDP_ENDIAN + /* fields in third byte */ + unsigned rd :1; /* recursion desired */ + unsigned tc :1; /* truncated message */ + unsigned aa :1; /* authoritive answer */ + unsigned opcode :4; /* purpose of message */ + unsigned qr :1; /* response flag */ + /* fields in fourth byte */ + unsigned rcode :4; /* response code */ + unsigned cd: 1; /* checking disabled by resolver */ + unsigned ad: 1; /* authentic data from named */ + unsigned unused :1; /* unused bits (MBZ as of 4.9.3a3) */ + unsigned ra :1; /* recursion available */ +#endif + /* remaining bytes */ + unsigned qdcount :16; /* number of question entries */ + unsigned ancount :16; /* number of answer entries */ + unsigned nscount :16; /* number of authority entries */ + unsigned arcount :16; /* number of resource entries */ +} HEADER; + +#define PACKETSZ NS_PACKETSZ +#define MAXDNAME NS_MAXDNAME +#define MAXCDNAME NS_MAXCDNAME +#define MAXLABEL NS_MAXLABEL +#define HFIXEDSZ NS_HFIXEDSZ +#define QFIXEDSZ NS_QFIXEDSZ +#define RRFIXEDSZ NS_RRFIXEDSZ +#define INT32SZ NS_INT32SZ +#define INT16SZ NS_INT16SZ +#define INADDRSZ NS_INADDRSZ +#define IN6ADDRSZ NS_IN6ADDRSZ +#define INDIR_MASK NS_CMPRSFLGS +#define NAMESERVER_PORT NS_DEFAULTPORT + +#define S_ZONE ns_s_zn +#define S_PREREQ ns_s_pr +#define S_UPDATE ns_s_ud +#define S_ADDT ns_s_ar + +#define QUERY ns_o_query +#define IQUERY ns_o_iquery +#define STATUS ns_o_status +#define NS_NOTIFY_OP ns_o_notify +#define NS_UPDATE_OP ns_o_update + +#define NOERROR ns_r_noerror +#define FORMERR ns_r_formerr +#define SERVFAIL ns_r_servfail +#define NXDOMAIN ns_r_nxdomain +#define NOTIMP ns_r_notimpl +#define REFUSED ns_r_refused +#define YXDOMAIN ns_r_yxdomain +#define YXRRSET ns_r_yxrrset +#define NXRRSET ns_r_nxrrset +#define NOTAUTH ns_r_notauth +#define NOTZONE ns_r_notzone + +#define DELETE ns_uop_delete +#define ADD ns_uop_add + +#define T_A ns_t_a +#define T_NS ns_t_ns +#define T_MD ns_t_md +#define T_MF ns_t_mf +#define T_CNAME ns_t_cname +#define T_SOA ns_t_soa +#define T_MB ns_t_mb +#define T_MG ns_t_mg +#define T_MR ns_t_mr +#define T_NULL ns_t_null +#define T_WKS ns_t_wks +#define T_PTR ns_t_ptr +#define T_HINFO ns_t_hinfo +#define T_MINFO ns_t_minfo +#define T_MX ns_t_mx +#define T_TXT ns_t_txt +#define T_RP ns_t_rp +#define T_AFSDB ns_t_afsdb +#define T_X25 ns_t_x25 +#define T_ISDN ns_t_isdn +#define T_RT ns_t_rt +#define T_NSAP ns_t_nsap +#define T_NSAP_PTR ns_t_nsap_ptr +#define T_SIG ns_t_sig +#define T_KEY ns_t_key +#define T_PX ns_t_px +#define T_GPOS ns_t_gpos +#define T_AAAA ns_t_aaaa +#define T_LOC ns_t_loc +#define T_NXT ns_t_nxt +#define T_EID ns_t_eid +#define T_NIMLOC ns_t_nimloc +#define T_SRV ns_t_srv +#define T_ATMA ns_t_atma +#define T_NAPTR ns_t_naptr +#define T_IXFR ns_t_ixfr +#define T_AXFR ns_t_axfr +#define T_MAILB ns_t_mailb +#define T_MAILA ns_t_maila +#define T_ANY ns_t_any + +#define C_IN ns_c_in +#define C_CHAOS ns_c_chaos +#define C_HS ns_c_hs +/* BIND_UPDATE */ +#define C_NONE ns_c_none +#define C_ANY ns_c_any + +#define GETSHORT NS_GET16 +#define GETLONG NS_GET32 +#define PUTSHORT NS_PUT16 +#define PUTLONG NS_PUT32 + +#endif /* _ARPA_NAMESER_COMPAT_ */ diff --git a/bsd/include/arpa/telnet.h b/bsd/include/arpa/telnet.h new file mode 100644 index 000000000..8126ce9af --- /dev/null +++ b/bsd/include/arpa/telnet.h @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)telnet.h 8.2 (Berkeley) 12/15/93 + */ + +#ifndef _TELNET_H_ +#define _TELNET_H_ + +/* + * Definitions for the TELNET protocol. + */ +#define IAC 255 /* interpret as command: */ +#define DONT 254 /* you are not to use option */ +#define DO 253 /* please, you use option */ +#define WONT 252 /* I won't use option */ +#define WILL 251 /* I will use option */ +#define SB 250 /* interpret as subnegotiation */ +#define GA 249 /* you may reverse the line */ +#define EL 248 /* erase the current line */ +#define EC 247 /* erase the current character */ +#define AYT 246 /* are you there */ +#define AO 245 /* abort output--but let prog finish */ +#define IP 244 /* interrupt process--permanently */ +#define BREAK 243 /* break */ +#define DM 242 /* data mark--for connect. cleaning */ +#define NOP 241 /* nop */ +#define SE 240 /* end sub negotiation */ +#define EOR 239 /* end of record (transparent mode) */ +#define ABORT 238 /* Abort process */ +#define SUSP 237 /* Suspend process */ +#define xEOF 236 /* End of file: EOF is already used... */ + +#define SYNCH 242 /* for telfunc calls */ + +#ifdef TELCMDS +char *telcmds[] = { + "EOF", "SUSP", "ABORT", "EOR", + "SE", "NOP", "DMARK", "BRK", "IP", "AO", "AYT", "EC", + "EL", "GA", "SB", "WILL", "WONT", "DO", "DONT", "IAC", 0, +}; +#else +extern char *telcmds[]; +#endif + +#define TELCMD_FIRST xEOF +#define TELCMD_LAST IAC +#define TELCMD_OK(x) ((unsigned int)(x) <= TELCMD_LAST && \ + (unsigned int)(x) >= TELCMD_FIRST) +#define TELCMD(x) telcmds[(x)-TELCMD_FIRST] + +/* telnet options */ +#define TELOPT_BINARY 0 /* 8-bit data path */ +#define TELOPT_ECHO 1 /* echo */ +#define TELOPT_RCP 2 /* prepare to reconnect */ +#define TELOPT_SGA 3 /* suppress go ahead */ +#define TELOPT_NAMS 4 /* approximate message size */ +#define TELOPT_STATUS 5 /* give status */ +#define TELOPT_TM 6 /* timing mark */ +#define TELOPT_RCTE 7 /* remote controlled transmission and echo */ +#define TELOPT_NAOL 8 /* negotiate about output line width */ +#define TELOPT_NAOP 9 /* negotiate about output page size */ +#define TELOPT_NAOCRD 10 /* negotiate about CR disposition */ +#define TELOPT_NAOHTS 11 /* negotiate about horizontal tabstops */ +#define TELOPT_NAOHTD 12 /* negotiate about horizontal tab disposition */ +#define TELOPT_NAOFFD 13 /* negotiate about formfeed disposition */ +#define TELOPT_NAOVTS 14 /* negotiate about vertical tab stops */ +#define TELOPT_NAOVTD 15 /* negotiate about vertical tab disposition */ +#define TELOPT_NAOLFD 16 /* negotiate about output LF disposition */ +#define TELOPT_XASCII 17 /* extended ascic character set */ +#define TELOPT_LOGOUT 18 /* force logout */ +#define TELOPT_BM 19 /* byte macro */ +#define TELOPT_DET 20 /* data entry terminal */ +#define TELOPT_SUPDUP 21 /* supdup protocol */ +#define TELOPT_SUPDUPOUTPUT 22 /* supdup output */ +#define TELOPT_SNDLOC 23 /* send location */ +#define TELOPT_TTYPE 24 /* terminal type */ +#define TELOPT_EOR 25 /* end or record */ +#define TELOPT_TUID 26 /* TACACS user identification */ +#define TELOPT_OUTMRK 27 /* output marking */ +#define TELOPT_TTYLOC 28 /* terminal location number */ +#define TELOPT_3270REGIME 29 /* 3270 regime */ +#define TELOPT_X3PAD 30 /* X.3 PAD */ +#define TELOPT_NAWS 31 /* window size */ +#define TELOPT_TSPEED 32 /* terminal speed */ +#define TELOPT_LFLOW 33 /* remote flow control */ +#define TELOPT_LINEMODE 34 /* Linemode option */ +#define TELOPT_XDISPLOC 35 /* X Display Location */ +#define TELOPT_OLD_ENVIRON 36 /* Old - Environment variables */ +#define TELOPT_AUTHENTICATION 37/* Authenticate */ +#define TELOPT_ENCRYPT 38 /* Encryption option */ +#define TELOPT_NEW_ENVIRON 39 /* New - Environment variables */ +#define TELOPT_EXOPL 255 /* extended-options-list */ + + +#define NTELOPTS (1+TELOPT_NEW_ENVIRON) +#ifdef TELOPTS +char *telopts[NTELOPTS+1] = { + "BINARY", "ECHO", "RCP", "SUPPRESS GO AHEAD", "NAME", + "STATUS", "TIMING MARK", "RCTE", "NAOL", "NAOP", + "NAOCRD", "NAOHTS", "NAOHTD", "NAOFFD", "NAOVTS", + "NAOVTD", "NAOLFD", "EXTEND ASCII", "LOGOUT", "BYTE MACRO", + "DATA ENTRY TERMINAL", "SUPDUP", "SUPDUP OUTPUT", + "SEND LOCATION", "TERMINAL TYPE", "END OF RECORD", + "TACACS UID", "OUTPUT MARKING", "TTYLOC", + "3270 REGIME", "X.3 PAD", "NAWS", "TSPEED", "LFLOW", + "LINEMODE", "XDISPLOC", "OLD-ENVIRON", "AUTHENTICATION", + "ENCRYPT", "NEW-ENVIRON", + 0, +}; +#define TELOPT_FIRST TELOPT_BINARY +#define TELOPT_LAST TELOPT_NEW_ENVIRON +#define TELOPT_OK(x) ((unsigned int)(x) <= TELOPT_LAST) +#define TELOPT(x) telopts[(x)-TELOPT_FIRST] +#endif + +/* sub-option qualifiers */ +#define TELQUAL_IS 0 /* option is... */ +#define TELQUAL_SEND 1 /* send option */ +#define TELQUAL_INFO 2 /* ENVIRON: informational version of IS */ +#define TELQUAL_REPLY 2 /* AUTHENTICATION: client version of IS */ +#define TELQUAL_NAME 3 /* AUTHENTICATION: client version of IS */ + +#define LFLOW_OFF 0 /* Disable remote flow control */ +#define LFLOW_ON 1 /* Enable remote flow control */ +#define LFLOW_RESTART_ANY 2 /* Restart output on any char */ +#define LFLOW_RESTART_XON 3 /* Restart output only on XON */ + +/* + * LINEMODE suboptions + */ + +#define LM_MODE 1 +#define LM_FORWARDMASK 2 +#define LM_SLC 3 + +#define MODE_EDIT 0x01 +#define MODE_TRAPSIG 0x02 +#define MODE_ACK 0x04 +#define MODE_SOFT_TAB 0x08 +#define MODE_LIT_ECHO 0x10 + +#define MODE_MASK 0x1f + +/* Not part of protocol, but needed to simplify things... */ +#define MODE_FLOW 0x0100 +#define MODE_ECHO 0x0200 +#define MODE_INBIN 0x0400 +#define MODE_OUTBIN 0x0800 +#define MODE_FORCE 0x1000 + +#define SLC_SYNCH 1 +#define SLC_BRK 2 +#define SLC_IP 3 +#define SLC_AO 4 +#define SLC_AYT 5 +#define SLC_EOR 6 +#define SLC_ABORT 7 +#define SLC_EOF 8 +#define SLC_SUSP 9 +#define SLC_EC 10 +#define SLC_EL 11 +#define SLC_EW 12 +#define SLC_RP 13 +#define SLC_LNEXT 14 +#define SLC_XON 15 +#define SLC_XOFF 16 +#define SLC_FORW1 17 +#define SLC_FORW2 18 + +#define NSLC 18 + +/* + * For backwards compatability, we define SLC_NAMES to be the + * list of names if SLC_NAMES is not defined. + */ +#define SLC_NAMELIST "0", "SYNCH", "BRK", "IP", "AO", "AYT", "EOR", \ + "ABORT", "EOF", "SUSP", "EC", "EL", "EW", "RP", \ + "LNEXT", "XON", "XOFF", "FORW1", "FORW2", 0, +#ifdef SLC_NAMES +char *slc_names[] = { + SLC_NAMELIST +}; +#else +extern char *slc_names[]; +#define SLC_NAMES SLC_NAMELIST +#endif + +#define SLC_NAME_OK(x) ((unsigned int)(x) <= NSLC) +#define SLC_NAME(x) slc_names[x] + +#define SLC_NOSUPPORT 0 +#define SLC_CANTCHANGE 1 +#define SLC_VARIABLE 2 +#define SLC_DEFAULT 3 +#define SLC_LEVELBITS 0x03 + +#define SLC_FUNC 0 +#define SLC_FLAGS 1 +#define SLC_VALUE 2 + +#define SLC_ACK 0x80 +#define SLC_FLUSHIN 0x40 +#define SLC_FLUSHOUT 0x20 + +#define OLD_ENV_VAR 1 +#define OLD_ENV_VALUE 0 +#define NEW_ENV_VAR 0 +#define NEW_ENV_VALUE 1 +#define ENV_ESC 2 +#define ENV_USERVAR 3 + +/* + * AUTHENTICATION suboptions + */ + +/* + * Who is authenticating who ... + */ +#define AUTH_WHO_CLIENT 0 /* Client authenticating server */ +#define AUTH_WHO_SERVER 1 /* Server authenticating client */ +#define AUTH_WHO_MASK 1 + +/* + * amount of authentication done + */ +#define AUTH_HOW_ONE_WAY 0 +#define AUTH_HOW_MUTUAL 2 +#define AUTH_HOW_MASK 2 + +#define AUTHTYPE_NULL 0 +#define AUTHTYPE_KERBEROS_V4 1 +#define AUTHTYPE_KERBEROS_V5 2 +#define AUTHTYPE_SPX 3 +#define AUTHTYPE_MINK 4 +#define AUTHTYPE_CNT 5 + +#define AUTHTYPE_TEST 99 + +#ifdef AUTH_NAMES +char *authtype_names[] = { + "NULL", "KERBEROS_V4", "KERBEROS_V5", "SPX", "MINK", 0, +}; +#else +extern char *authtype_names[]; +#endif + +#define AUTHTYPE_NAME_OK(x) ((unsigned int)(x) < AUTHTYPE_CNT) +#define AUTHTYPE_NAME(x) authtype_names[x] + +/* + * ENCRYPTion suboptions + */ +#define ENCRYPT_IS 0 /* I pick encryption type ... */ +#define ENCRYPT_SUPPORT 1 /* I support encryption types ... */ +#define ENCRYPT_REPLY 2 /* Initial setup response */ +#define ENCRYPT_START 3 /* Am starting to send encrypted */ +#define ENCRYPT_END 4 /* Am ending encrypted */ +#define ENCRYPT_REQSTART 5 /* Request you start encrypting */ +#define ENCRYPT_REQEND 6 /* Request you send encrypting */ +#define ENCRYPT_ENC_KEYID 7 +#define ENCRYPT_DEC_KEYID 8 +#define ENCRYPT_CNT 9 + +#define ENCTYPE_ANY 0 +#define ENCTYPE_DES_CFB64 1 +#define ENCTYPE_DES_OFB64 2 +#define ENCTYPE_CNT 3 + +#ifdef ENCRYPT_NAMES +char *encrypt_names[] = { + "IS", "SUPPORT", "REPLY", "START", "END", + "REQUEST-START", "REQUEST-END", "ENC-KEYID", "DEC-KEYID", + 0, +}; +char *enctype_names[] = { + "ANY", "DES_CFB64", "DES_OFB64", 0, +}; +#else +extern char *encrypt_names[]; +extern char *enctype_names[]; +#endif + + +#define ENCRYPT_NAME_OK(x) ((unsigned int)(x) < ENCRYPT_CNT) +#define ENCRYPT_NAME(x) encrypt_names[x] + +#define ENCTYPE_NAME_OK(x) ((unsigned int)(x) < ENCTYPE_CNT) +#define ENCTYPE_NAME(x) enctype_names[x] + +#endif /* !_TELNET_H_ */ diff --git a/bsd/include/arpa/tftp.h b/bsd/include/arpa/tftp.h new file mode 100644 index 000000000..1e7324f01 --- /dev/null +++ b/bsd/include/arpa/tftp.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tftp.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _TFTP_H_ +#define _TFTP_H_ + +/* + * Trivial File Transfer Protocol (IEN-133) + */ +#define SEGSIZE 512 /* data segment size */ + +/* + * Packet types. + */ +#define RRQ 01 /* read request */ +#define WRQ 02 /* write request */ +#define DATA 03 /* data packet */ +#define ACK 04 /* acknowledgement */ +#define ERROR 05 /* error code */ + +struct tftphdr { + short th_opcode; /* packet type */ + union { + short tu_block; /* block # */ + short tu_code; /* error code */ + char tu_stuff[1]; /* request packet stuff */ + } th_u; + char th_data[1]; /* data or error string */ +}; + +#define th_block th_u.tu_block +#define th_code th_u.tu_code +#define th_stuff th_u.tu_stuff +#define th_msg th_data + +/* + * Error codes. + */ +#define EUNDEF 0 /* not defined */ +#define ENOTFOUND 1 /* file not found */ +#define EACCESS 2 /* access violation */ +#define ENOSPACE 3 /* disk full or allocation exceeded */ +#define EBADOP 4 /* illegal TFTP operation */ +#define EBADID 5 /* unknown transfer ID */ +#define EEXISTS 6 /* file already exists */ +#define ENOUSER 7 /* no such user */ + +#endif /* !_TFTP_H_ */ diff --git a/bsd/include/bitstring.h b/bsd/include/bitstring.h new file mode 100644 index 000000000..92dec9e96 --- /dev/null +++ b/bsd/include/bitstring.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Paul Vixie. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bitstring.h 8.1 (Berkeley) 7/19/93 + */ + +#ifndef _BITSTRING_H_ +#define _BITSTRING_H_ + +typedef unsigned char bitstr_t; + +/* internal macros */ + /* byte of the bitstring bit is in */ +#define _bit_byte(bit) \ + ((bit) >> 3) + + /* mask for the bit within its byte */ +#define _bit_mask(bit) \ + (1 << ((bit)&0x7)) + +/* external macros */ + /* bytes in a bitstring of nbits bits */ +#define bitstr_size(nbits) \ + ((((nbits) - 1) >> 3) + 1) + + /* allocate a bitstring */ +#define bit_alloc(nbits) \ + (bitstr_t *)calloc(1, \ + (unsigned int)bitstr_size(nbits) * sizeof(bitstr_t)) + + /* allocate a bitstring on the stack */ +#define bit_decl(name, nbits) \ + (name)[bitstr_size(nbits)] + + /* is bit N of bitstring name set? */ +#define bit_test(name, bit) \ + ((name)[_bit_byte(bit)] & _bit_mask(bit)) + + /* set bit N of bitstring name */ +#define bit_set(name, bit) \ + (name)[_bit_byte(bit)] |= _bit_mask(bit) + + /* clear bit N of bitstring name */ +#define bit_clear(name, bit) \ + (name)[_bit_byte(bit)] &= ~_bit_mask(bit) + + /* clear bits start ... stop in bitstring */ +#define bit_nclear(name, start, stop) { \ + register bitstr_t *_name = name; \ + register int _start = start, _stop = stop; \ + register int _startbyte = _bit_byte(_start); \ + register int _stopbyte = _bit_byte(_stop); \ + if (_startbyte == _stopbyte) { \ + _name[_startbyte] &= ((0xff >> (8 - (_start&0x7))) | \ + (0xff << ((_stop&0x7) + 1))); \ + } else { \ + _name[_startbyte] &= 0xff >> (8 - (_start&0x7)); \ + while (++_startbyte < _stopbyte) \ + _name[_startbyte] = 0; \ + _name[_stopbyte] &= 0xff << ((_stop&0x7) + 1); \ + } \ +} + + /* set bits start ... stop in bitstring */ +#define bit_nset(name, start, stop) { \ + register bitstr_t *_name = name; \ + register int _start = start, _stop = stop; \ + register int _startbyte = _bit_byte(_start); \ + register int _stopbyte = _bit_byte(_stop); \ + if (_startbyte == _stopbyte) { \ + _name[_startbyte] |= ((0xff << (_start&0x7)) & \ + (0xff >> (7 - (_stop&0x7)))); \ + } else { \ + _name[_startbyte] |= 0xff << ((_start)&0x7); \ + while (++_startbyte < _stopbyte) \ + _name[_startbyte] = 0xff; \ + _name[_stopbyte] |= 0xff >> (7 - (_stop&0x7)); \ + } \ +} + + /* find first bit clear in name */ +#define bit_ffc(name, nbits, value) { \ + register bitstr_t *_name = name; \ + register int _byte, _nbits = nbits; \ + register int _stopbyte = _bit_byte(_nbits), _value = -1; \ + for (_byte = 0; _byte <= _stopbyte; ++_byte) \ + if (_name[_byte] != 0xff) { \ + _value = _byte << 3; \ + for (_stopbyte = _name[_byte]; (_stopbyte&0x1); \ + ++_value, _stopbyte >>= 1); \ + break; \ + } \ + *(value) = _value; \ +} + + /* find first bit set in name */ +#define bit_ffs(name, nbits, value) { \ + register bitstr_t *_name = name; \ + register int _byte, _nbits = nbits; \ + register int _stopbyte = _bit_byte(_nbits), _value = -1; \ + for (_byte = 0; _byte <= _stopbyte; ++_byte) \ + if (_name[_byte]) { \ + _value = _byte << 3; \ + for (_stopbyte = _name[_byte]; !(_stopbyte&0x1); \ + ++_value, _stopbyte >>= 1); \ + break; \ + } \ + *(value) = _value; \ +} + +#endif /* !_BITSTRING_H_ */ diff --git a/bsd/include/c.h b/bsd/include/c.h new file mode 100644 index 000000000..905d271fa --- /dev/null +++ b/bsd/include/c.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Standard C macros + * + ********************************************************************** + * HISTORY + * 02-Feb-86 Glenn Marcy (gm0w) at Carnegie-Mellon University + * Added check to allow multiple or recursive inclusion of this + * file. Added bool enum from machine/types.h for regular users + * that want a real boolean type. + * + * 29-Dec-85 Glenn Marcy (gm0w) at Carnegie-Mellon University + * Also change spacing of MAX and MIN to coincide with that of + * sys/param.h. + * + * 19-Nov-85 Glenn Marcy (gm0w) at Carnegie-Mellon University + * Changed the number of tabs between TRUE, FALSE and their + * respective values to match those in sys/types.h. + * + * 17-Dec-84 Glenn Marcy (gm0w) at Carnegie-Mellon University + * Only define TRUE and FALSE if not defined. Added caseE macro + * for using enumerated types in switch statements. + * + * 23-Apr-81 Mike Accetta (mja) at Carnegie-Mellon University + * Added "sizeofS" and "sizeofA" macros which expand to the size + * of a string constant and array respectively. + * + ********************************************************************** + */ + +#ifndef _C_INCLUDE_ +#define _C_INCLUDE_ + +#ifndef ABS +#define ABS(x) ((x)>=0?(x):-(x)) +#endif /* ABS */ +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif /* MIN */ +#ifndef MAX +#define MAX(a,b) (((a)>(b))?(a):(b)) +#endif /* MAX */ + +#ifndef FALSE +#define FALSE 0 +#endif /* FALSE */ +#ifndef TRUE +#define TRUE 1 +#endif /* TRUE */ + +#define CERROR (-1) + +#ifndef bool +typedef enum { false = 0, true = 1 } bool; +#endif /* bool */ + +#define sizeofS(string) (sizeof(string) - 1) +#define sizeofA(array) (sizeof(array)/sizeof(array[0])) + +#define caseE(enum_type) case (int)(enum_type) + +#endif /* _C_INCLUDE_ */ diff --git a/bsd/include/ctype.h b/bsd/include/ctype.h new file mode 100644 index 000000000..bd5d05a9a --- /dev/null +++ b/bsd/include/ctype.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * This code is derived from software contributed to Berkeley by + * Paul Borman at Krystal Technologies. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ctype.h 8.4 (Berkeley) 1/21/94 + */ + +#ifndef _CTYPE_H_ +#define _CTYPE_H_ + +#include + +#define _A 0x00000100L /* Alpha */ +#define _C 0x00000200L /* Control */ +#define _D 0x00000400L /* Digit */ +#define _G 0x00000800L /* Graph */ +#define _L 0x00001000L /* Lower */ +#define _P 0x00002000L /* Punct */ +#define _S 0x00004000L /* Space */ +#define _U 0x00008000L /* Upper */ +#define _X 0x00010000L /* X digit */ +#define _B 0x00020000L /* Blank */ +#define _R 0x00040000L /* Print */ +#define _I 0x00080000L /* Ideogram */ +#define _T 0x00100000L /* Special */ +#define _Q 0x00200000L /* Phonogram */ + +#define isalnum(c) __istype((c), (_A|_D)) +#define isalpha(c) __istype((c), _A) +#define iscntrl(c) __istype((c), _C) +#define isdigit(c) __isctype((c), _D) /* ANSI -- locale independent */ +#define isgraph(c) __istype((c), _G) +#define islower(c) __istype((c), _L) +#define isprint(c) __istype((c), _R) +#define ispunct(c) __istype((c), _P) +#define isspace(c) __istype((c), _S) +#define isupper(c) __istype((c), _U) +#define isxdigit(c) __isctype((c), _X) /* ANSI -- locale independent */ + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +#define isascii(c) ((c & ~0x7F) == 0) +#define toascii(c) ((c) & 0x7F) +#define digittoint(c) __istype((c), 0xFF) +#define isideogram(c) __istype((c), _I) +#define isphonogram(c) __istype((c), _T) +#define isspecial(c) __istype((c), _Q) +#define isblank(c) __istype((c), _B) +#define isrune(c) __istype((c), 0xFFFFFF00L) +#define isnumber(c) __istype((c), _D) +#define ishexnumber(c) __istype((c), _X) +#endif + +/* See comments in about _BSD_RUNE_T_. */ +__BEGIN_DECLS +unsigned long ___runetype __P((_BSD_RUNE_T_)); +_BSD_RUNE_T_ ___tolower __P((_BSD_RUNE_T_)); +_BSD_RUNE_T_ ___toupper __P((_BSD_RUNE_T_)); +__END_DECLS + +/* + * If your compiler supports prototypes and inline functions, + * #define _USE_CTYPE_INLINE_. Otherwise, use the C library + * functions. + */ +#if !defined(_USE_CTYPE_CLIBRARY_) && defined(__GNUC__) || defined(__cplusplus) +#define _USE_CTYPE_INLINE_ 1 +#endif + +#if defined(_USE_CTYPE_INLINE_) +static __inline int +__istype(_BSD_RUNE_T_ c, unsigned long f) +{ + return((((c & _CRMASK) ? ___runetype(c) : + _CurrentRuneLocale->runetype[c]) & f) ? 1 : 0); +} + +static __inline int +__isctype(_BSD_RUNE_T_ c, unsigned long f) +{ + return((((c & _CRMASK) ? 0 : + _DefaultRuneLocale.runetype[c]) & f) ? 1 : 0); +} + +/* _ANSI_LIBRARY is defined by lib/libc/gen/isctype.c. */ +#if !defined(_ANSI_LIBRARY) +static __inline _BSD_RUNE_T_ +toupper(_BSD_RUNE_T_ c) +{ + return((c & _CRMASK) ? + ___toupper(c) : _CurrentRuneLocale->mapupper[c]); +} + +static __inline _BSD_RUNE_T_ +tolower(_BSD_RUNE_T_ c) +{ + return((c & _CRMASK) ? + ___tolower(c) : _CurrentRuneLocale->maplower[c]); +} +#endif /* !_ANSI_LIBRARY */ + +#else /* !_USE_CTYPE_INLINE_ */ + +__BEGIN_DECLS +int __istype __P((_BSD_RUNE_T_, unsigned long)); +int __isctype __P((_BSD_RUNE_T_, unsigned long)); +_BSD_RUNE_T_ toupper __P((_BSD_RUNE_T_)); +_BSD_RUNE_T_ tolower __P((_BSD_RUNE_T_)); +__END_DECLS +#endif /* _USE_CTYPE_INLINE_ */ + +#endif /* !_CTYPE_H_ */ diff --git a/bsd/include/db.h b/bsd/include/db.h new file mode 100644 index 000000000..1e4cce86c --- /dev/null +++ b/bsd/include/db.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)db.h 8.4 (Berkeley) 2/21/94 + */ + +#ifndef _DB_H_ +#define _DB_H_ + +#include +#include + +#include + +#define RET_ERROR -1 /* Return values. */ +#define RET_SUCCESS 0 +#define RET_SPECIAL 1 + +#define MAX_PAGE_NUMBER 0xffffffff /* >= # of pages in a file */ +typedef u_int32_t pgno_t; +#define MAX_PAGE_OFFSET 65535 /* >= # of bytes in a page */ +typedef u_int16_t indx_t; +#define MAX_REC_NUMBER 0xffffffff /* >= # of records in a tree */ +typedef u_int32_t recno_t; + +/* Key/data structure -- a Data-Base Thang. */ +typedef struct { + void *data; /* data */ + size_t size; /* data length */ +} DBT; + +/* Routine flags. */ +#define R_CURSOR 1 /* del, put, seq */ +#define __R_UNUSED 2 /* UNUSED */ +#define R_FIRST 3 /* seq */ +#define R_IAFTER 4 /* put (RECNO) */ +#define R_IBEFORE 5 /* put (RECNO) */ +#define R_LAST 6 /* seq (BTREE, RECNO) */ +#define R_NEXT 7 /* seq */ +#define R_NOOVERWRITE 8 /* put */ +#define R_PREV 9 /* seq (BTREE, RECNO) */ +#define R_SETCURSOR 10 /* put (RECNO) */ +#define R_RECNOSYNC 11 /* sync (RECNO) */ + +typedef enum { DB_BTREE, DB_HASH, DB_RECNO } DBTYPE; + +/* + * !!! + * The following flags are included in the dbopen(3) call as part of the + * open(2) flags. In order to avoid conflicts with the open flags, start + * at the top of the 16 or 32-bit number space and work our way down. If + * the open flags were significantly expanded in the future, it could be + * a problem. Wish I'd left another flags word in the dbopen call. + * + * !!! + * None of this stuff is implemented yet. The only reason that it's here + * is so that the access methods can skip copying the key/data pair when + * the DB_LOCK flag isn't set. + */ +#if UINT_MAX > 65535 +#define DB_LOCK 0x20000000 /* Do locking. */ +#define DB_SHMEM 0x40000000 /* Use shared memory. */ +#define DB_TXN 0x80000000 /* Do transactions. */ +#else +#define DB_LOCK 0x2000 /* Do locking. */ +#define DB_SHMEM 0x4000 /* Use shared memory. */ +#define DB_TXN 0x8000 /* Do transactions. */ +#endif + +/* Access method description structure. */ +typedef struct __db { + DBTYPE type; /* Underlying db type. */ + int (*close) __P((struct __db *)); + int (*del) __P((const struct __db *, const DBT *, u_int)); + int (*get) __P((const struct __db *, const DBT *, DBT *, u_int)); + int (*put) __P((const struct __db *, DBT *, const DBT *, u_int)); + int (*seq) __P((const struct __db *, DBT *, DBT *, u_int)); + int (*sync) __P((const struct __db *, u_int)); + void *internal; /* Access method private. */ + int (*fd) __P((const struct __db *)); +} DB; + +#define BTREEMAGIC 0x053162 +#define BTREEVERSION 3 + +/* Structure used to pass parameters to the btree routines. */ +typedef struct { +#define R_DUP 0x01 /* duplicate keys */ + u_long flags; + u_int cachesize; /* bytes to cache */ + int maxkeypage; /* maximum keys per page */ + int minkeypage; /* minimum keys per page */ + u_int psize; /* page size */ + int (*compare) /* comparison function */ + __P((const DBT *, const DBT *)); + size_t (*prefix) /* prefix function */ + __P((const DBT *, const DBT *)); + int lorder; /* byte order */ +} BTREEINFO; + +#define HASHMAGIC 0x061561 +#define HASHVERSION 2 + +/* Structure used to pass parameters to the hashing routines. */ +typedef struct { + u_int bsize; /* bucket size */ + u_int ffactor; /* fill factor */ + u_int nelem; /* number of elements */ + u_int cachesize; /* bytes to cache */ + u_int32_t /* hash function */ + (*hash) __P((const void *, size_t)); + int lorder; /* byte order */ +} HASHINFO; + +/* Structure used to pass parameters to the record routines. */ +typedef struct { +#define R_FIXEDLEN 0x01 /* fixed-length records */ +#define R_NOKEY 0x02 /* key not required */ +#define R_SNAPSHOT 0x04 /* snapshot the input */ + u_long flags; + u_int cachesize; /* bytes to cache */ + u_int psize; /* page size */ + int lorder; /* byte order */ + size_t reclen; /* record length (fixed-length records) */ + u_char bval; /* delimiting byte (variable-length records */ + char *bfname; /* btree file name */ +} RECNOINFO; + +#ifdef __DBINTERFACE_PRIVATE +/* + * Little endian <==> big endian 32-bit swap macros. + * M_32_SWAP swap a memory location + * P_32_SWAP swap a referenced memory location + * P_32_COPY swap from one location to another + */ +#define M_32_SWAP(a) { \ + u_int32_t _tmp = a; \ + ((char *)&a)[0] = ((char *)&_tmp)[3]; \ + ((char *)&a)[1] = ((char *)&_tmp)[2]; \ + ((char *)&a)[2] = ((char *)&_tmp)[1]; \ + ((char *)&a)[3] = ((char *)&_tmp)[0]; \ +} +#define P_32_SWAP(a) { \ + u_int32_t _tmp = *(u_int32_t *)a; \ + ((char *)a)[0] = ((char *)&_tmp)[3]; \ + ((char *)a)[1] = ((char *)&_tmp)[2]; \ + ((char *)a)[2] = ((char *)&_tmp)[1]; \ + ((char *)a)[3] = ((char *)&_tmp)[0]; \ +} +#define P_32_COPY(a, b) { \ + ((char *)&(b))[0] = ((char *)&(a))[3]; \ + ((char *)&(b))[1] = ((char *)&(a))[2]; \ + ((char *)&(b))[2] = ((char *)&(a))[1]; \ + ((char *)&(b))[3] = ((char *)&(a))[0]; \ +} + +/* + * Little endian <==> big endian 16-bit swap macros. + * M_16_SWAP swap a memory location + * P_16_SWAP swap a referenced memory location + * P_16_COPY swap from one location to another + */ +#define M_16_SWAP(a) { \ + u_int16_t _tmp = a; \ + ((char *)&a)[0] = ((char *)&_tmp)[1]; \ + ((char *)&a)[1] = ((char *)&_tmp)[0]; \ +} +#define P_16_SWAP(a) { \ + u_int16_t _tmp = *(u_int16_t *)a; \ + ((char *)a)[0] = ((char *)&_tmp)[1]; \ + ((char *)a)[1] = ((char *)&_tmp)[0]; \ +} +#define P_16_COPY(a, b) { \ + ((char *)&(b))[0] = ((char *)&(a))[1]; \ + ((char *)&(b))[1] = ((char *)&(a))[0]; \ +} +#endif + +__BEGIN_DECLS +DB *dbopen __P((const char *, int, int, DBTYPE, const void *)); + +#ifdef __DBINTERFACE_PRIVATE +DB *__bt_open __P((const char *, int, int, const BTREEINFO *, int)); +DB *__hash_open __P((const char *, int, int, const HASHINFO *, int)); +DB *__rec_open __P((const char *, int, int, const RECNOINFO *, int)); +void __dbpanic __P((DB *dbp)); +#endif +__END_DECLS +#endif /* !_DB_H_ */ diff --git a/bsd/include/dirent.h b/bsd/include/dirent.h new file mode 100644 index 000000000..5e2447f1d --- /dev/null +++ b/bsd/include/dirent.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dirent.h 8.2 (Berkeley) 7/28/94 + */ + +#ifndef _DIRENT_H_ +#define _DIRENT_H_ + +/* + * The kernel defines the format of directory entries returned by + * the getdirentries(2) system call. + */ +#include + +#ifdef _POSIX_SOURCE +typedef void * DIR; +#else + +#define d_ino d_fileno /* backward compatibility */ + +/* definitions for library routines operating on directories. */ +#define DIRBLKSIZ 1024 + +/* structure describing an open directory. */ +typedef struct _dirdesc { + int dd_fd; /* file descriptor associated with directory */ + long dd_loc; /* offset in current buffer */ + long dd_size; /* amount of data returned by getdirentries */ + char *dd_buf; /* data buffer */ + int dd_len; /* size of data buffer */ + long dd_seek; /* magic cookie returned by getdirentries */ + long dd_rewind; /* magic cookie for rewinding */ + int dd_flags; /* flags for readdir */ +} DIR; + +#define dirfd(dirp) ((dirp)->dd_fd) + +/* flags for opendir2 */ +#define DTF_HIDEW 0x0001 /* hide whiteout entries */ +#define DTF_NODUP 0x0002 /* don't return duplicate names */ +#define DTF_REWIND 0x0004 /* rewind after reading union stack */ +#define __DTF_READALL 0x0008 /* everything has been read */ + +#ifndef NULL +#define NULL 0 +#endif + +#endif /* _POSIX_SOURCE */ + +#ifndef KERNEL + +#include + +__BEGIN_DECLS +DIR *opendir __P((const char *)); +struct dirent *readdir __P((DIR *)); +void rewinddir __P((DIR *)); +int closedir __P((DIR *)); +#ifndef _POSIX_SOURCE +DIR *__opendir2 __P((const char *, int)); +long telldir __P((const DIR *)); +void seekdir __P((DIR *, long)); +int scandir __P((const char *, struct dirent ***, + int (*)(struct dirent *), int (*)(const void *, const void *))); +int alphasort __P((const void *, const void *)); +int getdirentries __P((int, char *, int, long *)); +#endif /* not POSIX */ +__END_DECLS + +#endif /* !KERNEL */ + +#endif /* !_DIRENT_H_ */ diff --git a/bsd/include/disktab.h b/bsd/include/disktab.h new file mode 100644 index 000000000..386cb640b --- /dev/null +++ b/bsd/include/disktab.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)disktab.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _DISKTAB_H_ +#define _DISKTAB_H_ + +/* + * Disk description table, see disktab(5) + */ +#define DISKTAB "/etc/disktab" + +struct disktab { + char *d_name; /* drive name */ + char *d_type; /* drive type */ + int d_secsize; /* sector size in bytes */ + int d_ntracks; /* # tracks/cylinder */ + int d_nsectors; /* # sectors/track */ + int d_ncylinders; /* # cylinders */ + int d_rpm; /* revolutions/minute */ + int d_badsectforw; /* supports DEC bad144 std */ + int d_sectoffset; /* use sect rather than cyl offsets */ + struct partition { + int p_size; /* #sectors in partition */ + short p_bsize; /* block size in bytes */ + short p_fsize; /* frag size in bytes */ + } d_partitions[8]; +}; + +#endif /* !_DISKTAB_H_ */ diff --git a/bsd/include/err.h b/bsd/include/err.h new file mode 100644 index 000000000..59e6d224b --- /dev/null +++ b/bsd/include/err.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)err.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _ERR_H_ +#define _ERR_H_ + +/* + * Don't use va_list in the err/warn prototypes. Va_list is typedef'd in two + * places ( and ), so if we include one + * of them here we may collide with the utility's includes. It's unreasonable + * for utilities to have to include one of them to include err.h, so we get + * _BSD_VA_LIST_ from and use it. + */ +#include +#include + +__BEGIN_DECLS +__dead void err __P((int, const char *, ...)) __attribute__((__noreturn__)); +__dead void verr __P((int, const char *, _BSD_VA_LIST_)) __attribute__((__noreturn__)); +__dead void errx __P((int, const char *, ...)) __attribute__((__noreturn__)); +__dead void verrx __P((int, const char *, _BSD_VA_LIST_)) __attribute__((__noreturn__)); +void warn __P((const char *, ...)); +void vwarn __P((const char *, _BSD_VA_LIST_)); +void warnx __P((const char *, ...)); +void vwarnx __P((const char *, _BSD_VA_LIST_)); +__END_DECLS + +#endif /* !_ERR_H_ */ diff --git a/bsd/include/errno.h b/bsd/include/errno.h new file mode 100644 index 000000000..57f2f7b66 --- /dev/null +++ b/bsd/include/errno.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/bsd/include/fcntl.h b/bsd/include/fcntl.h new file mode 100644 index 000000000..d936d7062 --- /dev/null +++ b/bsd/include/fcntl.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include diff --git a/bsd/include/fnmatch.h b/bsd/include/fnmatch.h new file mode 100644 index 000000000..f60424db6 --- /dev/null +++ b/bsd/include/fnmatch.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fnmatch.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _FNMATCH_H_ +#define _FNMATCH_H_ + +#define FNM_NOMATCH 1 /* Match failed. */ + +#define FNM_NOESCAPE 0x01 /* Disable backslash escaping. */ +#define FNM_PATHNAME 0x02 /* Slash must be matched by slash. */ +#define FNM_PERIOD 0x04 /* Period must be matched by period. */ + +#include + +__BEGIN_DECLS +#ifndef _POSIX_SOURCE +int fnmatch __P((const char *, const char *, int)); +#endif +__END_DECLS + +#endif /* !_FNMATCH_H_ */ diff --git a/bsd/include/fsproperties.h b/bsd/include/fsproperties.h new file mode 100644 index 000000000..15db66039 --- /dev/null +++ b/bsd/include/fsproperties.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _FSPROPERTIES_H_ +#define _FSPROPERTIES_H_ + +/* Info plist keys */ +#define kFSMediaTypesKey "FSMediaTypes" +#define kFSPersonalitiesKey "FSPersonalities" + +/* Sub-keys for FSMediaTypes dictionaries */ +#define kFSMediaPropertiesKey "FSMediaProperties" +#define kFSProbeArgumentsKey "FSProbeArguments" +#define kFSProbeExecutableKey "FSProbeExecutable" +#define kFSProbeOrderKey "FSProbeOrder" + +/* Sub-keys for FSPersonalities dictionaries */ +#define kFSFormatArgumentsKey "FSFormatArguments" +#define kFSFormatContentMaskKey "FSFormatContentMask" +#define kFSFormatExecutableKey "FSFormatExecutable" +#define kFSFormatMinimumSizeKey "FSFormatMinimumSize" +#define kFSMountArgumentsKey "FSMountArguments" +#define kFSMountExecutableKey "FSMountExecutable" +#define kFSNameKey "FSName" +#define kFSRepairArgumentsKey "FSRepairArguments" +#define kFSRepairExecutableKey "FSRepairExecutable" +#define kFSVerificationArgumentsKey "FSVerificationArguments" +#define kFSVerificationExecutableKey "FSVerificationExecutable" + +#endif /* _FSPROPERTIES_H_ */ diff --git a/bsd/include/fstab.h b/bsd/include/fstab.h new file mode 100644 index 000000000..5427d3468 --- /dev/null +++ b/bsd/include/fstab.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fstab.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _FSTAB_H_ +#define _FSTAB_H_ + +/* + * File system table, see fstab(5). + * + * Used by dump, mount, umount, swapon, fsck, df, ... + * + * For ufs fs_spec field is the block special name. Programs that want to + * use the character special name must create that name by prepending a 'r' + * after the right most slash. Quota files are always named "quotas", so + * if type is "rq", then use concatenation of fs_file and "quotas" to locate + * quota file. + */ +#define _PATH_FSTAB "/etc/fstab" +#define FSTAB "/etc/fstab" /* deprecated */ + +#define FSTAB_RW "rw" /* read/write device */ +#define FSTAB_RQ "rq" /* read/write with quotas */ +#define FSTAB_RO "ro" /* read-only device */ +#define FSTAB_SW "sw" /* swap device */ +#define FSTAB_XX "xx" /* ignore totally */ + +struct fstab { + char *fs_spec; /* block special device name */ + char *fs_file; /* file system path prefix */ + char *fs_vfstype; /* File system type, ufs, nfs */ + char *fs_mntops; /* Mount options ala -o */ + char *fs_type; /* FSTAB_* from fs_mntops */ + int fs_freq; /* dump frequency, in days */ + int fs_passno; /* pass number on parallel dump */ +}; + +#include + +__BEGIN_DECLS +struct fstab *getfsent __P((void)); +struct fstab *getfsspec __P((const char *)); +struct fstab *getfsfile __P((const char *)); +int setfsent __P((void)); +void endfsent __P((void)); +__END_DECLS + +#endif /* !_FSTAB_H_ */ diff --git a/bsd/include/fts.h b/bsd/include/fts.h new file mode 100644 index 000000000..3311bd3b8 --- /dev/null +++ b/bsd/include/fts.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fts.h 8.3 (Berkeley) 8/14/94 + */ + +#ifndef _FTS_H_ +#define _FTS_H_ + +typedef struct { + struct _ftsent *fts_cur; /* current node */ + struct _ftsent *fts_child; /* linked list of children */ + struct _ftsent **fts_array; /* sort array */ + dev_t fts_dev; /* starting device # */ + char *fts_path; /* path for this descent */ + int fts_rfd; /* fd for root */ + int fts_pathlen; /* sizeof(path) */ + int fts_nitems; /* elements in the sort array */ + int (*fts_compar)(); /* compare function */ + +#define FTS_COMFOLLOW 0x001 /* follow command line symlinks */ +#define FTS_LOGICAL 0x002 /* logical walk */ +#define FTS_NOCHDIR 0x004 /* don't change directories */ +#define FTS_NOSTAT 0x008 /* don't get stat info */ +#define FTS_PHYSICAL 0x010 /* physical walk */ +#define FTS_SEEDOT 0x020 /* return dot and dot-dot */ +#define FTS_XDEV 0x040 /* don't cross devices */ +#define FTS_WHITEOUT 0x080 /* return whiteout information */ +#define FTS_OPTIONMASK 0x0ff /* valid user option mask */ + +#define FTS_NAMEONLY 0x100 /* (private) child names only */ +#define FTS_STOP 0x200 /* (private) unrecoverable error */ + int fts_options; /* fts_open options, global flags */ +} FTS; + +typedef struct _ftsent { + struct _ftsent *fts_cycle; /* cycle node */ + struct _ftsent *fts_parent; /* parent directory */ + struct _ftsent *fts_link; /* next file in directory */ + long fts_number; /* local numeric value */ + void *fts_pointer; /* local address value */ + char *fts_accpath; /* access path */ + char *fts_path; /* root path */ + int fts_errno; /* errno for this node */ + int fts_symfd; /* fd for symlink */ + u_short fts_pathlen; /* strlen(fts_path) */ + u_short fts_namelen; /* strlen(fts_name) */ + + ino_t fts_ino; /* inode */ + dev_t fts_dev; /* device */ + nlink_t fts_nlink; /* link count */ + +#define FTS_ROOTPARENTLEVEL -1 +#define FTS_ROOTLEVEL 0 + short fts_level; /* depth (-1 to N) */ + +#define FTS_D 1 /* preorder directory */ +#define FTS_DC 2 /* directory that causes cycles */ +#define FTS_DEFAULT 3 /* none of the above */ +#define FTS_DNR 4 /* unreadable directory */ +#define FTS_DOT 5 /* dot or dot-dot */ +#define FTS_DP 6 /* postorder directory */ +#define FTS_ERR 7 /* error; errno is set */ +#define FTS_F 8 /* regular file */ +#define FTS_INIT 9 /* initialized only */ +#define FTS_NS 10 /* stat(2) failed */ +#define FTS_NSOK 11 /* no stat(2) requested */ +#define FTS_SL 12 /* symbolic link */ +#define FTS_SLNONE 13 /* symbolic link without target */ +#define FTS_W 14 /* whiteout object */ + u_short fts_info; /* user flags for FTSENT structure */ + +#define FTS_DONTCHDIR 0x01 /* don't chdir .. to the parent */ +#define FTS_SYMFOLLOW 0x02 /* followed a symlink to get here */ +#define FTS_ISW 0x04 /* this is a whiteout object */ + u_short fts_flags; /* private flags for FTSENT structure */ + +#define FTS_AGAIN 1 /* read node again */ +#define FTS_FOLLOW 2 /* follow symbolic link */ +#define FTS_NOINSTR 3 /* no instructions */ +#define FTS_SKIP 4 /* discard node */ + u_short fts_instr; /* fts_set() instructions */ + + struct stat *fts_statp; /* stat(2) information */ + char fts_name[1]; /* file name */ +} FTSENT; + +#include + +__BEGIN_DECLS +FTSENT *fts_children __P((FTS *, int)); +int fts_close __P((FTS *)); +FTS *fts_open __P((char * const *, int, + int (*)(const FTSENT **, const FTSENT **))); +FTSENT *fts_read __P((FTS *)); +int fts_set __P((FTS *, FTSENT *, int)); +__END_DECLS + +#endif /* !_FTS_H_ */ diff --git a/bsd/include/glob.h b/bsd/include/glob.h new file mode 100644 index 000000000..9a6d1f028 --- /dev/null +++ b/bsd/include/glob.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Guido van Rossum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)glob.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _GLOB_H_ +#define _GLOB_H_ + +#include + +struct stat; +typedef struct { + int gl_pathc; /* Count of total paths so far. */ + int gl_matchc; /* Count of paths matching pattern. */ + int gl_offs; /* Reserved at beginning of gl_pathv. */ + int gl_flags; /* Copy of flags parameter to glob. */ + char **gl_pathv; /* List of paths matching pattern. */ + /* Copy of errfunc parameter to glob. */ + int (*gl_errfunc) __P((const char *, int)); + + /* + * Alternate filesystem access methods for glob; replacement + * versions of closedir(3), readdir(3), opendir(3), stat(2) + * and lstat(2). + */ + void (*gl_closedir) __P((void *)); + struct dirent *(*gl_readdir) __P((void *)); + void *(*gl_opendir) __P((const char *)); + int (*gl_lstat) __P((const char *, struct stat *)); + int (*gl_stat) __P((const char *, struct stat *)); +} glob_t; + +#define GLOB_APPEND 0x0001 /* Append to output from previous call. */ +#define GLOB_DOOFFS 0x0002 /* Use gl_offs. */ +#define GLOB_ERR 0x0004 /* Return on error. */ +#define GLOB_MARK 0x0008 /* Append / to matching directories. */ +#define GLOB_NOCHECK 0x0010 /* Return pattern itself if nothing matches. */ +#define GLOB_NOSORT 0x0020 /* Don't sort. */ + +#ifndef _POSIX_SOURCE +#define GLOB_ALTDIRFUNC 0x0040 /* Use alternately specified directory funcs. */ +#define GLOB_BRACE 0x0080 /* Expand braces ala csh. */ +#define GLOB_MAGCHAR 0x0100 /* Pattern had globbing characters. */ +#define GLOB_NOMAGIC 0x0200 /* GLOB_NOCHECK without magic chars (csh). */ +#define GLOB_QUOTE 0x0400 /* Quote special chars with \. */ +#define GLOB_TILDE 0x0800 /* Expand tilde names from the passwd file. */ +#endif + +#define GLOB_NOSPACE (-1) /* Malloc call failed. */ +#define GLOB_ABEND (-2) /* Unignored error. */ + +__BEGIN_DECLS +int glob __P((const char *, int, int (*)(const char *, int), glob_t *)); +void globfree __P((glob_t *)); +__END_DECLS + +#endif /* !_GLOB_H_ */ diff --git a/bsd/include/grp.h b/bsd/include/grp.h new file mode 100644 index 000000000..54828e8ec --- /dev/null +++ b/bsd/include/grp.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)grp.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _GRP_H_ +#define _GRP_H_ + +#ifndef _POSIX_SOURCE +#define _PATH_GROUP "/etc/group" +#endif + +struct group { + char *gr_name; /* group name */ + char *gr_passwd; /* group password */ + int gr_gid; /* group id */ + char **gr_mem; /* group members */ +}; + +#include + +__BEGIN_DECLS +struct group *getgrgid __P((gid_t)); +struct group *getgrnam __P((const char *)); +#ifndef _POSIX_SOURCE +struct group *getgrent __P((void)); +int setgrent __P((void)); +void endgrent __P((void)); +void setgrfile __P((const char *)); +int setgroupent __P((int)); +#endif +__END_DECLS + +#endif /* !_GRP_H_ */ diff --git a/bsd/include/kvm.h b/bsd/include/kvm.h new file mode 100644 index 000000000..f513cb681 --- /dev/null +++ b/bsd/include/kvm.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kvm.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _KVM_H_ +#define _KVM_H_ + +/* Default version symbol. */ +#define VRS_SYM "_version" +#define VRS_KEY "VERSION" + +#include +#include + +__BEGIN_DECLS + +typedef struct __kvm kvm_t; + +struct kinfo_proc; +int kvm_close __P((kvm_t *)); +char **kvm_getargv __P((kvm_t *, const struct kinfo_proc *, int)); +char **kvm_getenvv __P((kvm_t *, const struct kinfo_proc *, int)); +char *kvm_geterr __P((kvm_t *)); +int kvm_getloadavg __P((kvm_t *, double [], int)); +char *kvm_getfiles __P((kvm_t *, int, int, int *)); +struct kinfo_proc * + kvm_getprocs __P((kvm_t *, int, int, int *)); +int kvm_nlist __P((kvm_t *, struct nlist *)); +kvm_t *kvm_open + __P((const char *, const char *, const char *, int, const char *)); +kvm_t *kvm_openfiles + __P((const char *, const char *, const char *, int, char *)); +int kvm_read __P((kvm_t *, unsigned long, void *, unsigned int)); +int kvm_write __P((kvm_t *, unsigned long, const void *, unsigned int)); + +__END_DECLS + +#endif /* !_KVM_H_ */ diff --git a/bsd/include/limits.h b/bsd/include/limits.h new file mode 100644 index 000000000..d2879c312 --- /dev/null +++ b/bsd/include/limits.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: limits.h,v 1.8 1996/10/21 05:10:50 jtc Exp $ */ + +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)limits.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _LIMITS_H_ +#define _LIMITS_H_ + +#include +#include + +#if !defined(_ANSI_SOURCE) +#define _POSIX_ARG_MAX 4096 +#define _POSIX_CHILD_MAX 6 +#define _POSIX_LINK_MAX 8 +#define _POSIX_MAX_CANON 255 +#define _POSIX_MAX_INPUT 255 +#define _POSIX_NAME_MAX 14 +#define _POSIX_NGROUPS_MAX 0 +#define _POSIX_OPEN_MAX 16 +#define _POSIX_PATH_MAX 255 +#define _POSIX_PIPE_BUF 512 +#define _POSIX_SSIZE_MAX 32767 +#define _POSIX_STREAM_MAX 8 +#define _POSIX_TZNAME_MAX 3 + +#define _POSIX2_BC_BASE_MAX 99 +#define _POSIX2_BC_DIM_MAX 2048 +#define _POSIX2_BC_SCALE_MAX 99 +#define _POSIX2_BC_STRING_MAX 1000 +#define _POSIX2_COLL_WEIGHTS_MAX 2 +#define _POSIX2_EQUIV_CLASS_MAX 2 +#define _POSIX2_EXPR_NEST_MAX 32 +#define _POSIX2_LINE_MAX 2048 +#define _POSIX2_RE_DUP_MAX 255 + +#define PTHREAD_STACK_MIN 8192 +#define PTHREAD_DESTRUCTOR_ITERATIONS 4 +#define PTHREAD_KEYS_MAX 128 + +#endif /* !_ANSI_SOURCE */ + +#if ( !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) ) || defined(_XOPEN_SOURCE) +#define PASS_MAX 128 + +#define NL_ARGMAX 9 +#define NL_LANGMAX 14 +#define NL_MSGMAX 32767 +#define NL_NMAX 1 +#define NL_SETMAX 255 +#define NL_TEXTMAX 255 +#endif + + +#endif /* !_LIMITS_H_ */ diff --git a/bsd/include/locale.h b/bsd/include/locale.h new file mode 100644 index 000000000..be3a8bdc1 --- /dev/null +++ b/bsd/include/locale.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)locale.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _LOCALE_H_ +#define _LOCALE_H_ + +struct lconv { + char *decimal_point; + char *thousands_sep; + char *grouping; + char *int_curr_symbol; + char *currency_symbol; + char *mon_decimal_point; + char *mon_thousands_sep; + char *mon_grouping; + char *positive_sign; + char *negative_sign; + char int_frac_digits; + char frac_digits; + char p_cs_precedes; + char p_sep_by_space; + char n_cs_precedes; + char n_sep_by_space; + char p_sign_posn; + char n_sign_posn; +}; + +#ifndef NULL +#define NULL 0 +#endif + +#define LC_ALL 0 +#define LC_COLLATE 1 +#define LC_CTYPE 2 +#define LC_MONETARY 3 +#define LC_NUMERIC 4 +#define LC_TIME 5 + +#define _LC_LAST 6 /* marks end */ + +#include + +__BEGIN_DECLS +struct lconv *localeconv __P((void)); +char *setlocale __P((int, const char *)); +__END_DECLS + +#endif /* _LOCALE_H_ */ diff --git a/bsd/include/math.h b/bsd/include/math.h new file mode 100644 index 000000000..8cc72772f --- /dev/null +++ b/bsd/include/math.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 by NeXT Computer, Inc. All rights reserved. */ +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunPro, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +#ifndef _MATH_H_ +#define _MATH_H_ + +/* + * ANSI/POSIX + */ +#define HUGE_VAL 1e500 /* IEEE: positive infinity */ + +/* + * XOPEN/SVID + */ +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +#define M_E 2.7182818284590452354 /* e */ +#define M_LOG2E 1.4426950408889634074 /* log 2e */ +#define M_LOG10E 0.43429448190325182765 /* log 10e */ +#define M_LN2 0.69314718055994530942 /* log e2 */ +#define M_LN10 2.30258509299404568402 /* log e10 */ +#define M_PI 3.14159265358979323846 /* pi */ +#define M_PI_2 1.57079632679489661923 /* pi/2 */ +#define M_PI_4 0.78539816339744830962 /* pi/4 */ +#define M_1_PI 0.31830988618379067154 /* 1/pi */ +#define M_2_PI 0.63661977236758134308 /* 2/pi */ +#define M_2_SQRTPI 1.12837916709551257390 /* 2/sqrt(pi) */ +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ + +#define MAXFLOAT ((float)3.40282346638528860e+38) +extern int signgam; + +#if !defined(_XOPEN_SOURCE) +enum fdversion {fdlibm_ieee = -1, fdlibm_svid, fdlibm_xopen, fdlibm_posix}; + +#define _LIB_VERSION_TYPE enum fdversion +#define _LIB_VERSION _fdlib_version + +/* if global variable _LIB_VERSION is not desirable, one may + * change the following to be a constant by: + * #define _LIB_VERSION_TYPE const enum version + * In that case, after one initializes the value _LIB_VERSION (see + * s_lib_version.c) during compile time, it cannot be modified + * in the middle of a program + */ +extern _LIB_VERSION_TYPE _LIB_VERSION; + +#define _IEEE_ fdlibm_ieee +#define _SVID_ fdlibm_svid +#define _XOPEN_ fdlibm_xopen +#define _POSIX_ fdlibm_posix + +#if !defined(__cplusplus) +struct exception { + int type; + char *name; + double arg1; + double arg2; + double retval; +}; +#endif + +#define HUGE MAXFLOAT + +/* + * set X_TLOSS = pi*2**52, which is possibly defined in + * (one may replace the following line by "#include ") + */ + +#define X_TLOSS 1.41484755040568800000e+16 + +#define DOMAIN 1 +#define SING 2 +#define OVERFLOW 3 +#define UNDERFLOW 4 +#define TLOSS 5 +#define PLOSS 6 + +#endif /* !_XOPEN_SOURCE */ +#endif /* !_ANSI_SOURCE && !_POSIX_SOURCE */ + + +#include +__BEGIN_DECLS +/* + * ANSI/POSIX + */ +extern __pure double acos __P((double)); +extern __pure double asin __P((double)); +extern __pure double atan __P((double)); +extern __pure double atan2 __P((double, double)); +extern __pure double cos __P((double)); +extern __pure double sin __P((double)); +extern __pure double tan __P((double)); + +extern __pure double cosh __P((double)); +extern __pure double sinh __P((double)); +extern __pure double tanh __P((double)); + +extern __pure double exp __P((double)); +extern double frexp __P((double, int *)); +extern __pure double ldexp __P((double, int)); +extern __pure double log __P((double)); +extern __pure double log10 __P((double)); +extern double modf __P((double, double *)); + +extern __pure double pow __P((double, double)); +extern __pure double sqrt __P((double)); + +extern __pure double ceil __P((double)); +extern __pure double fabs __P((double)); +extern __pure double floor __P((double)); +extern __pure double fmod __P((double, double)); + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +extern __pure double erf __P((double)); +extern __pure double erfc __P((double)); +extern double gamma __P((double)); +extern __pure double hypot __P((double, double)); +extern __pure int isinf __P((double)); +extern __pure int isnan __P((double)); +extern __pure int finite __P((double)); +extern __pure double j0 __P((double)); +extern __pure double j1 __P((double)); +extern __pure double jn __P((int, double)); +extern double lgamma __P((double)); +extern __pure double y0 __P((double)); +extern __pure double y1 __P((double)); +extern __pure double yn __P((int, double)); + +#if !defined(_XOPEN_SOURCE) +extern __pure double acosh __P((double)); +extern __pure double asinh __P((double)); +extern __pure double atanh __P((double)); +extern __pure double cbrt __P((double)); +extern __pure double logb __P((double)); +extern __pure double nextafter __P((double, double)); +extern __pure double remainder __P((double, double)); +extern __pure double scalb __P((double, int)); + +#ifndef __cplusplus +extern int matherr __P((struct exception *)); +#endif + +/* + * IEEE Test Vector + */ +extern __pure double significand __P((double)); + +/* + * Functions callable from C, intended to support IEEE arithmetic. + */ +extern __pure double copysign __P((double, double)); +extern __pure int ilogb __P((double)); +extern __pure double rint __P((double)); +extern __pure double scalbn __P((double, int)); + +/* + * BSD math library entry points + */ +extern double cabs(); +extern __pure double drem __P((double, double)); +extern __pure double expm1 __P((double)); +extern __pure double log1p __P((double)); + +/* + * Reentrant version of gamma & lgamma; passes signgam back by reference + * as the second argument; user must allocate space for signgam. + */ +#ifdef _REENTRANT +extern double gamma_r __P((double, int *)); +extern double lgamma_r __P((double, int *)); +#endif /* _REENTRANT */ +#endif /* !_XOPEN_SOURCE */ +#endif /* !_ANSI_SOURCE && !_POSIX_SOURCE */ +__END_DECLS + +#endif /* _MATH_H_ */ diff --git a/bsd/include/memory.h b/bsd/include/memory.h new file mode 100644 index 000000000..49f8bd0a5 --- /dev/null +++ b/bsd/include/memory.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)memory.h 8.1 (Berkeley) 6/2/93 + */ + +#include diff --git a/bsd/include/mpool.h b/bsd/include/mpool.h new file mode 100644 index 000000000..13f955171 --- /dev/null +++ b/bsd/include/mpool.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mpool.h 8.1 (Berkeley) 6/2/93 + */ + +/* + * The memory pool scheme is a simple one. Each in memory page is referenced + * by a bucket which is threaded in three ways. All active pages are threaded + * on a hash chain (hashed by the page number) and an lru chain. Inactive + * pages are threaded on a free chain. Each reference to a memory pool is + * handed an MPOOL which is the opaque cookie passed to all of the memory + * routines. + */ +#define HASHSIZE 128 +#define HASHKEY(pgno) ((pgno - 1) % HASHSIZE) + +/* The BKT structures are the elements of the lists. */ +typedef struct BKT { + struct BKT *hnext; /* next hash bucket */ + struct BKT *hprev; /* previous hash bucket */ + struct BKT *cnext; /* next free/lru bucket */ + struct BKT *cprev; /* previous free/lru bucket */ + void *page; /* page */ + pgno_t pgno; /* page number */ + +#define MPOOL_DIRTY 0x01 /* page needs to be written */ +#define MPOOL_PINNED 0x02 /* page is pinned into memory */ + unsigned long flags; /* flags */ +} BKT; + +/* The BKTHDR structures are the heads of the lists. */ +typedef struct BKTHDR { + struct BKT *hnext; /* next hash bucket */ + struct BKT *hprev; /* previous hash bucket */ + struct BKT *cnext; /* next free/lru bucket */ + struct BKT *cprev; /* previous free/lru bucket */ +} BKTHDR; + +typedef struct MPOOL { + BKTHDR free; /* The free list. */ + BKTHDR lru; /* The LRU list. */ + BKTHDR hashtable[HASHSIZE]; /* Hashed list by page number. */ + pgno_t curcache; /* Current number of cached pages. */ + pgno_t maxcache; /* Max number of cached pages. */ + pgno_t npages; /* Number of pages in the file. */ + u_long pagesize; /* File page size. */ + int fd; /* File descriptor. */ + /* Page in conversion routine. */ + void (*pgin) __P((void *, pgno_t, void *)); + /* Page out conversion routine. */ + void (*pgout) __P((void *, pgno_t, void *)); + void *pgcookie; /* Cookie for page in/out routines. */ +#ifdef STATISTICS + unsigned long cachehit; + unsigned long cachemiss; + unsigned long pagealloc; + unsigned long pageflush; + unsigned long pageget; + unsigned long pagenew; + unsigned long pageput; + unsigned long pageread; + unsigned long pagewrite; +#endif +} MPOOL; + +#ifdef __MPOOLINTERFACE_PRIVATE +/* Macros to insert/delete into/from hash chain. */ +#define rmhash(bp) { \ + (bp)->hprev->hnext = (bp)->hnext; \ + (bp)->hnext->hprev = (bp)->hprev; \ +} +#define inshash(bp, pg) { \ + hp = &mp->hashtable[HASHKEY(pg)]; \ + (bp)->hnext = hp->hnext; \ + (bp)->hprev = (struct BKT *)hp; \ + hp->hnext->hprev = (bp); \ + hp->hnext = (bp); \ +} + +/* Macros to insert/delete into/from lru and free chains. */ +#define rmchain(bp) { \ + (bp)->cprev->cnext = (bp)->cnext; \ + (bp)->cnext->cprev = (bp)->cprev; \ +} +#define inschain(bp, dp) { \ + (bp)->cnext = (dp)->cnext; \ + (bp)->cprev = (struct BKT *)(dp); \ + (dp)->cnext->cprev = (bp); \ + (dp)->cnext = (bp); \ +} +#endif + +__BEGIN_DECLS +MPOOL *mpool_open __P((DBT *, int, pgno_t, pgno_t)); +void mpool_filter __P((MPOOL *, void (*)(void *, pgno_t, void *), + void (*)(void *, pgno_t, void *), void *)); +void *mpool_new __P((MPOOL *, pgno_t *)); +void *mpool_get __P((MPOOL *, pgno_t, u_int)); +int mpool_put __P((MPOOL *, void *, u_int)); +int mpool_sync __P((MPOOL *)); +int mpool_close __P((MPOOL *)); +#ifdef STATISTICS +void mpool_stat __P((MPOOL *)); +#endif +__END_DECLS diff --git a/bsd/include/ndbm.h b/bsd/include/ndbm.h new file mode 100644 index 000000000..033a6ea63 --- /dev/null +++ b/bsd/include/ndbm.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Margo Seltzer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ndbm.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _NDBM_H_ +#define _NDBM_H_ + +#include + +/* Map dbm interface onto db(3). */ +#define DBM_RDONLY O_RDONLY + +/* Flags to dbm_store(). */ +#define DBM_INSERT 0 +#define DBM_REPLACE 1 + +/* + * The db(3) support for ndbm(3) always appends this suffix to the + * file name to avoid overwriting the user's original database. + */ +#define DBM_SUFFIX ".db" + +typedef struct { + char *dptr; + int dsize; +} datum; + +typedef DB DBM; +#define dbm_pagfno(a) DBM_PAGFNO_NOT_AVAILABLE + +__BEGIN_DECLS +void dbm_close __P((DBM *)); +int dbm_delete __P((DBM *, datum)); +datum dbm_fetch __P((DBM *, datum)); +datum dbm_firstkey __P((DBM *)); +long dbm_forder __P((DBM *, datum)); +datum dbm_nextkey __P((DBM *)); +DBM *dbm_open __P((const char *, int, int)); +int dbm_store __P((DBM *, datum, datum, int)); +int dbm_dirfno __P((DBM *)); +__END_DECLS + +#endif /* !_NDBM_H_ */ diff --git a/bsd/include/netdb.h b/bsd/include/netdb.h new file mode 100644 index 000000000..bad5ac4d9 --- /dev/null +++ b/bsd/include/netdb.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * ++Copyright++ 1980, 1983, 1988, 1993 + * - + * Copyright (c) 1980, 1983, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * - + * Portions Copyright (c) 1993 by Digital Equipment Corporation. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies, and that + * the name of Digital Equipment Corporation not be used in advertising or + * publicity pertaining to distribution of the document or software without + * specific, written prior permission. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND DIGITAL EQUIPMENT CORP. DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL DIGITAL EQUIPMENT + * CORPORATION BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + * - + * --Copyright-- + */ + +/* + * @(#)netdb.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _NETDB_H_ +#define _NETDB_H_ + +#include +#include + +#define _PATH_HEQUIV "/etc/hosts.equiv" +#define _PATH_HOSTS "/etc/hosts" +#define _PATH_NETWORKS "/etc/networks" +#define _PATH_PROTOCOLS "/etc/protocols" +#define _PATH_SERVICES "/etc/services" + +extern int h_errno; + +/* + * Structures returned by network data base library. All addresses are + * supplied in host order, and returned in network order (suitable for + * use in system calls). + */ +struct hostent { + char *h_name; /* official name of host */ + char **h_aliases; /* alias list */ + int h_addrtype; /* host address type */ + int h_length; /* length of address */ + char **h_addr_list; /* list of addresses from name server */ +#define h_addr h_addr_list[0] /* address, for backward compatiblity */ +}; + +/* + * Assumption here is that a network number + * fits in an unsigned long -- probably a poor one. + */ +struct netent { + char *n_name; /* official name of net */ + char **n_aliases; /* alias list */ + int n_addrtype; /* net address type */ + unsigned long n_net; /* network # */ +}; + +struct servent { + char *s_name; /* official service name */ + char **s_aliases; /* alias list */ + int s_port; /* port # */ + char *s_proto; /* protocol to use */ +}; + +struct protoent { + char *p_name; /* official protocol name */ + char **p_aliases; /* alias list */ + int p_proto; /* protocol # */ +}; + +struct addrinfo { + int ai_flags; /* AI_PASSIVE, AI_CANONNAME, AI_NUMERICHOST */ + int ai_family; /* PF_xxx */ + int ai_socktype; /* SOCK_xxx */ + int ai_protocol; /* 0 or IPPROTO_xxx for IPv4 and IPv6 */ + size_t ai_addrlen; /* length of ai_addr */ + char *ai_canonname; /* canonical name for hostname */ + struct sockaddr *ai_addr; /* binary address */ + struct addrinfo *ai_next; /* next structure in linked list */ +}; + +struct rpcent { + char *r_name; /* name of server for this rpc program */ + char **r_aliases; /* alias list */ + int r_number; /* rpc program number */ +}; + +/* + * Error return codes from gethostbyname() and gethostbyaddr() + * (left in extern int h_errno). + */ + +#define NETDB_INTERNAL -1 /* see errno */ +#define NETDB_SUCCESS 0 /* no problem */ +#define HOST_NOT_FOUND 1 /* Authoritative Answer Host not found */ +#define TRY_AGAIN 2 /* Non-Authoritative Host not found, or SERVERFAIL */ +#define NO_RECOVERY 3 /* Non recoverable errors, FORMERR, REFUSED, NOTIMP */ +#define NO_DATA 4 /* Valid name, no data record of requested type */ +#define NO_ADDRESS NO_DATA /* no address, look for MX record */ + +/* + * Error return codes from getaddrinfo() + */ +#define EAI_ADDRFAMILY 1 /* address family for hostname not supported */ +#define EAI_AGAIN 2 /* temporary failure in name resolution */ +#define EAI_BADFLAGS 3 /* invalid value for ai_flags */ +#define EAI_FAIL 4 /* non-recoverable failure in name resolution */ +#define EAI_FAMILY 5 /* ai_family not supported */ +#define EAI_MEMORY 6 /* memory allocation failure */ +#define EAI_NODATA 7 /* no address associated with hostname */ +#define EAI_NONAME 8 /* hostname nor servname provided, or not known */ +#define EAI_SERVICE 9 /* servname not supported for ai_socktype */ +#define EAI_SOCKTYPE 10 /* ai_socktype not supported */ +#define EAI_SYSTEM 11 /* system error returned in errno */ +#define EAI_BADHINTS 12 +#define EAI_PROTOCOL 13 +#define EAI_MAX 14 + +/* + * Flag values for getaddrinfo() + */ +#define AI_PASSIVE 0x00000001 /* get address to use bind() */ +#define AI_CANONNAME 0x00000002 /* fill ai_canonname */ +#define AI_NUMERICHOST 0x00000004 /* prevent name resolution */ +/* valid flags for addrinfo */ +#define AI_MASK (AI_PASSIVE | AI_CANONNAME | AI_NUMERICHOST) + +#define AI_ALL 0x00000100 /* IPv6 and IPv4-mapped (with AI_V4MAPPED) */ +#define AI_V4MAPPED_CFG 0x00000200 /* accept IPv4-mapped if kernel supports */ +#define AI_ADDRCONFIG 0x00000400 /* only if any address is assigned */ +#define AI_V4MAPPED 0x00000800 /* accept IPv4-mapped IPv6 address */ +/* special recommended flags for getipnodebyname */ +#define AI_DEFAULT (AI_V4MAPPED_CFG | AI_ADDRCONFIG) + +/* + * Constants for getnameinfo() + */ +#define NI_MAXHOST 1025 +#define NI_MAXSERV 32 + +/* + * Flag values for getnameinfo() + */ +#define NI_NOFQDN 0x00000001 +#define NI_NUMERICHOST 0x00000002 +#define NI_NAMEREQD 0x00000004 +#define NI_NUMERICSERV 0x00000008 +#define NI_DGRAM 0x00000010 +#define NI_WITHSCOPEID 0x00000020 + +/* + * Scope delimit character + */ +#define SCOPE_DELIMITER '@' + +__BEGIN_DECLS +void endhostent __P((void)); +void endnetent __P((void)); +void endprotoent __P((void)); +void endservent __P((void)); +void freehostent __P((struct hostent *)); +struct hostent *gethostbyaddr __P((const char *, int, int)); +struct hostent *gethostbyname __P((const char *)); +struct hostent *gethostbyname2 __P((const char *, int)); +struct hostent *gethostent __P((void)); +struct hostent *getipnodebyaddr __P((const void *, size_t, int, int *)); +struct hostent *getipnodebyname __P((const char *, int, int, int *)); +struct netent *getnetbyaddr __P((long, int)); +struct netent *getnetbyname __P((const char *)); +struct netent *getnetent __P((void)); +struct protoent *getprotobyname __P((const char *)); +struct protoent *getprotobynumber __P((int)); +struct protoent *getprotoent __P((void)); +struct servent *getservbyname __P((const char *, const char *)); +struct servent *getservbyport __P((int, const char *)); +struct servent *getservent __P((void)); +struct rpcent *getrpcbyname __P((const char *name)); +struct rpcent *getrpcbynumber __P((long number)); +struct rpcent *getrpcent __P((void)); +void setrpcent __P((int stayopen)); +void endrpcent __P((void)); + +void herror __P((const char *)); +char *hstrerror __P((int)); +void sethostent __P((int)); +/* void sethostfile __P((const char *)); */ +void setnetent __P((int)); +void setprotoent __P((int)); +void setservent __P((int)); + +char *gai_strerror __P((int)); +void freeaddrinfo __P((struct addrinfo *)); +int getaddrinfo __P((const char *, const char *, const struct addrinfo *, struct addrinfo **)); +__END_DECLS + +#endif /* !_NETDB_H_ */ diff --git a/bsd/include/nlist.h b/bsd/include/nlist.h new file mode 100644 index 000000000..828b4a733 --- /dev/null +++ b/bsd/include/nlist.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nlist.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _NLIST_H_ +#define _NLIST_H_ + +/* + * Symbol table entry format. The #ifdef's are so that programs including + * nlist.h can initialize nlist structures statically. + */ +struct nlist { +#ifdef _AOUT_INCLUDE_ + union { + char *n_name; /* symbol name (in memory) */ + long n_strx; /* file string table offset (on disk) */ + } n_un; +#else + char *n_name; /* symbol name (in memory) */ +#endif + +#define N_UNDF 0x00 /* undefined */ +#define N_ABS 0x02 /* absolute address */ +#define N_TEXT 0x04 /* text segment */ +#define N_DATA 0x06 /* data segment */ +#define N_BSS 0x08 /* bss segment */ +#define N_COMM 0x12 /* common reference */ +#define N_FN 0x1e /* file name */ + +#define N_EXT 0x01 /* external (global) bit, OR'ed in */ +#define N_TYPE 0x1e /* mask for all the type bits */ + unsigned char n_type; /* type defines */ + + char n_other; /* spare */ +#define n_hash n_desc /* used internally by ld(1); XXX */ + short n_desc; /* used by stab entries */ + unsigned long n_value; /* address/value of the symbol */ +}; + +#define N_FORMAT "%08x" /* namelist value format; XXX */ +#define N_STAB 0x0e0 /* mask for debugger symbols -- stab(5) */ + +#include + +__BEGIN_DECLS +int nlist __P((const char *, struct nlist *)); +__END_DECLS + +#endif /* !_NLIST_H_ */ diff --git a/bsd/include/paths.h b/bsd/include/paths.h new file mode 100644 index 000000000..45a47aa33 --- /dev/null +++ b/bsd/include/paths.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $OpenBSD: paths.h,v 1.10 1997/11/09 00:29:02 bri Exp $ */ +/* $NetBSD: paths.h,v 1.10 1997/04/23 09:41:38 lukem Exp $ */ + +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)paths.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _PATHS_H_ +#define _PATHS_H_ + +/* Default search path. */ +#define _PATH_DEFPATH "/usr/bin:/bin" +/* All standard utilities path. */ +#define _PATH_STDPATH "/usr/bin:/bin:/usr/sbin:/sbin" + +#define _PATH_BSHELL "/bin/sh" +#define _PATH_CONSOLE "/dev/console" +#define _PATH_CSHELL "/bin/csh" +#define _PATH_DEFTAPE "/dev/nrst0" +#define _PATH_DEVDB "/var/run/dev.db" +#define _PATH_DEVNULL "/dev/null" +#define _PATH_DRUM "/dev/drum" +#define _PATH_FSIRAND "/sbin/fsirand" +#define _PATH_KMEM "/dev/kmem" +#define _PATH_KVMDB "/var/db/kvm.db" +#define _PATH_LOCALE "/usr/share/locale" +#define _PATH_MAILDIR "/var/mail" +#define _PATH_MAN "/usr/share/man" +#define _PATH_MEM "/dev/mem" +#define _PATH_NOLOGIN "/etc/nologin" +#define _PATH_RSH "/usr/bin/rsh" +#define _PATH_SENDMAIL "/usr/sbin/sendmail" +#define _PATH_SHELLS "/etc/shells" +#define _PATH_TTY "/dev/tty" +#define _PATH_UNIX "/mach" +#define _PATH_VI "/usr/bin/vi" + +/* Provide trailing slash, since mostly used for building pathnames. */ +#define _PATH_DEV "/dev/" +#define _PATH_TMP "/tmp/" +#define _PATH_UUCPLOCK "/var/spool/lock/" +#define _PATH_VARDB "/var/db/" +#define _PATH_VARRUN "/var/run/" +#define _PATH_VARTMP "/var/tmp/" + +#include + +#endif /* !_PATHS_H_ */ diff --git a/bsd/include/protocols/Makefile b/bsd/include/protocols/Makefile new file mode 100644 index 000000000..81f0a9363 --- /dev/null +++ b/bsd/include/protocols/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + dumprestore.h routed.h rwhod.h talkd.h timed.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = protocols + +EXPORT_MI_LIST = + +EXPORT_MI_DIR = + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/include/protocols/dumprestore.h b/bsd/include/protocols/dumprestore.h new file mode 100644 index 000000000..0912e7d66 --- /dev/null +++ b/bsd/include/protocols/dumprestore.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dumprestore.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _DUMPRESTORE_H_ +#define _DUMPRESTORE_H_ + +/* + * TP_BSIZE is the size of file blocks on the dump tapes. + * Note that TP_BSIZE must be a multiple of DEV_BSIZE. + * + * NTREC is the number of TP_BSIZE blocks that are written + * in each tape record. HIGHDENSITYTREC is the number of + * TP_BSIZE blocks that are written in each tape record on + * 6250 BPI or higher density tapes. + * + * TP_NINDIR is the number of indirect pointers in a TS_INODE + * or TS_ADDR record. Note that it must be a power of two. + */ +#define TP_BSIZE 1024 +#define NTREC 10 +#define HIGHDENSITYTREC 32 +#define TP_NINDIR (TP_BSIZE/2) +#define LBLSIZE 16 +#define NAMELEN 64 + +#define OFS_MAGIC (int)60011 +#define NFS_MAGIC (int)60012 +#define CHECKSUM (int)84446 + +union u_spcl { + char dummy[TP_BSIZE]; + struct s_spcl { + long c_type; /* record type (see below) */ + time_t c_date; /* date of this dump */ + time_t c_ddate; /* date of previous dump */ + long c_volume; /* dump volume number */ + daddr_t c_tapea; /* logical block of this record */ + ino_t c_inumber; /* number of inode */ + long c_magic; /* magic number (see above) */ + long c_checksum; /* record checksum */ + struct dinode c_dinode; /* ownership and mode of inode */ + long c_count; /* number of valid c_addr entries */ + char c_addr[TP_NINDIR]; /* 1 => data; 0 => hole in inode */ + char c_label[LBLSIZE]; /* dump label */ + long c_level; /* level of this dump */ + char c_filesys[NAMELEN]; /* name of dumpped file system */ + char c_dev[NAMELEN]; /* name of dumpped device */ + char c_host[NAMELEN]; /* name of dumpped host */ + long c_flags; /* additional information */ + long c_firstrec; /* first record on volume */ + long c_spare[32]; /* reserved for future uses */ + } s_spcl; +}; + +/* + * special record types + */ +#define TS_TAPE 1 /* dump tape header */ +#define TS_INODE 2 /* beginning of file record */ +#define TS_ADDR 4 /* continuation of file record */ +#define TS_BITS 3 /* map of inodes on tape */ +#define TS_CLRI 6 /* map of inodes deleted since last dump */ +#define TS_END 5 /* end of volume marker */ + +/* + * flag values + */ +#define DR_NEWHEADER 0x0001 /* new format tape header */ +#define DR_NEWINODEFMT 0x0002 /* new format inodes on tape */ + +#define DUMPOUTFMT "%-16s %c %s" /* for printf */ + /* name, level, ctime(date) */ +#define DUMPINFMT "%16s %c %[^\n]\n" /* inverse for scanf */ + +#endif /* !_DUMPRESTORE_H_ */ diff --git a/bsd/include/protocols/routed.h b/bsd/include/protocols/routed.h new file mode 100644 index 000000000..95533f387 --- /dev/null +++ b/bsd/include/protocols/routed.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1983, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)routed.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _ROUTED_H_ +#define _ROUTED_H_ + +/* + * Routing Information Protocol + * + * Derived from Xerox NS Routing Information Protocol + * by changing 32-bit net numbers to sockaddr's and + * padding stuff to 32-bit boundaries. + */ +#define RIPVERSION 1 + +struct netinfo { + struct sockaddr rip_dst; /* destination net/host */ + int rip_metric; /* cost of route */ +}; + +struct rip { + u_char rip_cmd; /* request/response */ + u_char rip_vers; /* protocol version # */ + u_char rip_res1[2]; /* pad to 32-bit boundary */ + union { + struct netinfo ru_nets[1]; /* variable length... */ + char ru_tracefile[1]; /* ditto ... */ + } ripun; +#define rip_nets ripun.ru_nets +#define rip_tracefile ripun.ru_tracefile +}; + +/* + * Packet types. + */ +#define RIPCMD_REQUEST 1 /* want info */ +#define RIPCMD_RESPONSE 2 /* responding to request */ +#define RIPCMD_TRACEON 3 /* turn tracing on */ +#define RIPCMD_TRACEOFF 4 /* turn it off */ + +#define RIPCMD_MAX 5 +#ifdef RIPCMDS +char *ripcmds[RIPCMD_MAX] = + { "#0", "REQUEST", "RESPONSE", "TRACEON", "TRACEOFF" }; +#endif + +#define HOPCNT_INFINITY 16 /* per Xerox NS */ +#define MAXPACKETSIZE 512 /* max broadcast size */ + +/* + * Timer values used in managing the routing table. + * Complete tables are broadcast every SUPPLY_INTERVAL seconds. + * If changes occur between updates, dynamic updates containing only changes + * may be sent. When these are sent, a timer is set for a random value + * between MIN_WAITTIME and MAX_WAITTIME, and no additional dynamic updates + * are sent until the timer expires. + * + * Every update of a routing entry forces an entry's timer to be reset. + * After EXPIRE_TIME without updates, the entry is marked invalid, + * but held onto until GARBAGE_TIME so that others may + * see it "be deleted". + */ +#define TIMER_RATE 30 /* alarm clocks every 30 seconds */ + +#define SUPPLY_INTERVAL 30 /* time to supply tables */ +#define MIN_WAITTIME 2 /* min. interval to broadcast changes */ +#define MAX_WAITTIME 5 /* max. time to delay changes */ + +#define EXPIRE_TIME 180 /* time to mark entry invalid */ +#define GARBAGE_TIME 240 /* time to garbage collect */ + +#endif /* !_ROUTED_H_ */ diff --git a/bsd/include/protocols/rwhod.h b/bsd/include/protocols/rwhod.h new file mode 100644 index 000000000..c054e63ef --- /dev/null +++ b/bsd/include/protocols/rwhod.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)rwhod.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _RWHOD_H_ +#define _RWHOD_H_ + +/* + * rwho protocol packet format. + */ +struct outmp { + char out_line[8]; /* tty name */ + char out_name[8]; /* user id */ + long out_time; /* time on */ +}; + +struct whod { + char wd_vers; /* protocol version # */ + char wd_type; /* packet type, see below */ + char wd_pad[2]; + int wd_sendtime; /* time stamp by sender */ + int wd_recvtime; /* time stamp applied by receiver */ + char wd_hostname[32]; /* hosts's name */ + int wd_loadav[3]; /* load average as in uptime */ + int wd_boottime; /* time system booted */ + struct whoent { + struct outmp we_utmp; /* active tty info */ + int we_idle; /* tty idle time */ + } wd_we[1024 / sizeof (struct whoent)]; +}; + +#define WHODVERSION 1 +#define WHODTYPE_STATUS 1 /* host status */ + +#define _PATH_RWHODIR "/var/rwho" + +#endif /* !_RWHOD_H_ */ diff --git a/bsd/include/protocols/talkd.h b/bsd/include/protocols/talkd.h new file mode 100644 index 000000000..b8a2b97b6 --- /dev/null +++ b/bsd/include/protocols/talkd.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)talkd.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _TALKD_H_ +#define _TALKD_H_ + +/* + * This describes the protocol used by the talk server and clients. + * + * The talk server acts a repository of invitations, responding to + * requests by clients wishing to rendezvous for the purpose of + * holding a conversation. In normal operation, a client, the caller, + * initiates a rendezvous by sending a CTL_MSG to the server of + * type LOOK_UP. This causes the server to search its invitation + * tables to check if an invitation currently exists for the caller + * (to speak to the callee specified in the message). If the lookup + * fails, the caller then sends an ANNOUNCE message causing the server + * to broadcast an announcement on the callee's login ports requesting + * contact. When the callee responds, the local server uses the + * recorded invitation to respond with the appropriate rendezvous + * address and the caller and callee client programs establish a + * stream connection through which the conversation takes place. + */ + +/* + * Client->server request message format. + */ +typedef struct { + u_char vers; /* protocol version */ + u_char type; /* request type, see below */ + u_char answer; /* not used */ + u_char pad; + u_long id_num; /* message id */ + struct osockaddr addr; /* old (4.3) style */ + struct osockaddr ctl_addr; /* old (4.3) style */ + long pid; /* caller's process id */ +#define NAME_SIZE 12 + char l_name[NAME_SIZE];/* caller's name */ + char r_name[NAME_SIZE];/* callee's name */ +#define TTY_SIZE 16 + char r_tty[TTY_SIZE];/* callee's tty name */ +} CTL_MSG; + +/* + * Server->client response message format. + */ +typedef struct { + u_char vers; /* protocol version */ + u_char type; /* type of request message, see below */ + u_char answer; /* respose to request message, see below */ + u_char pad; + u_long id_num; /* message id */ + struct osockaddr addr; /* address for establishing conversation */ +} CTL_RESPONSE; + +#define TALK_VERSION 1 /* protocol version */ + +/* message type values */ +#define LEAVE_INVITE 0 /* leave invitation with server */ +#define LOOK_UP 1 /* check for invitation by callee */ +#define DELETE 2 /* delete invitation by caller */ +#define ANNOUNCE 3 /* announce invitation by caller */ + +/* answer values */ +#define SUCCESS 0 /* operation completed properly */ +#define NOT_HERE 1 /* callee not logged in */ +#define FAILED 2 /* operation failed for unexplained reason */ +#define MACHINE_UNKNOWN 3 /* caller's machine name unknown */ +#define PERMISSION_DENIED 4 /* callee's tty doesn't permit announce */ +#define UNKNOWN_REQUEST 5 /* request has invalid type value */ +#define BADVERSION 6 /* request has invalid protocol version */ +#define BADADDR 7 /* request has invalid addr value */ +#define BADCTLADDR 8 /* request has invalid ctl_addr value */ + +/* + * Operational parameters. + */ +#define MAX_LIFE 60 /* max time daemon saves invitations */ +/* RING_WAIT should be 10's of seconds less than MAX_LIFE */ +#define RING_WAIT 30 /* time to wait before resending invitation */ + +#endif /* !_TALKD_H_ */ diff --git a/bsd/include/protocols/timed.h b/bsd/include/protocols/timed.h new file mode 100644 index 000000000..e3e62639c --- /dev/null +++ b/bsd/include/protocols/timed.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)timed.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _TIMED_H_ +#define _TIMED_H_ + +/* + * Time Synchronization Protocol + */ + +#define TSPVERSION 1 +#define ANYADDR NULL + +struct tsp { + u_char tsp_type; + u_char tsp_vers; + u_short tsp_seq; + union { + struct timeval tspu_time; + char tspu_hopcnt; + } tsp_u; + char tsp_name[MAXHOSTNAMELEN]; +}; + +#define tsp_time tsp_u.tspu_time +#define tsp_hopcnt tsp_u.tspu_hopcnt + +/* + * Command types. + */ +#define TSP_ANY 0 /* match any types */ +#define TSP_ADJTIME 1 /* send adjtime */ +#define TSP_ACK 2 /* generic acknowledgement */ +#define TSP_MASTERREQ 3 /* ask for master's name */ +#define TSP_MASTERACK 4 /* acknowledge master request */ +#define TSP_SETTIME 5 /* send network time */ +#define TSP_MASTERUP 6 /* inform slaves that master is up */ +#define TSP_SLAVEUP 7 /* slave is up but not polled */ +#define TSP_ELECTION 8 /* advance candidature for master */ +#define TSP_ACCEPT 9 /* support candidature of master */ +#define TSP_REFUSE 10 /* reject candidature of master */ +#define TSP_CONFLICT 11 /* two or more masters present */ +#define TSP_RESOLVE 12 /* masters' conflict resolution */ +#define TSP_QUIT 13 /* reject candidature if master is up */ +#define TSP_DATE 14 /* reset the time (date command) */ +#define TSP_DATEREQ 15 /* remote request to reset the time */ +#define TSP_DATEACK 16 /* acknowledge time setting */ +#define TSP_TRACEON 17 /* turn tracing on */ +#define TSP_TRACEOFF 18 /* turn tracing off */ +#define TSP_MSITE 19 /* find out master's site */ +#define TSP_MSITEREQ 20 /* remote master's site request */ +#define TSP_TEST 21 /* for testing election algo */ +#define TSP_SETDATE 22 /* New from date command */ +#define TSP_SETDATEREQ 23 /* New remote for above */ +#define TSP_LOOP 24 /* loop detection packet */ + +#define TSPTYPENUMBER 25 + +#ifdef TSPTYPES +char *tsptype[TSPTYPENUMBER] = + { "ANY", "ADJTIME", "ACK", "MASTERREQ", "MASTERACK", "SETTIME", "MASTERUP", + "SLAVEUP", "ELECTION", "ACCEPT", "REFUSE", "CONFLICT", "RESOLVE", "QUIT", + "DATE", "DATEREQ", "DATEACK", "TRACEON", "TRACEOFF", "MSITE", "MSITEREQ", + "TEST", "SETDATE", "SETDATEREQ", "LOOP" }; +#endif + +#endif /* !_TIMED_H_ */ diff --git a/bsd/include/pwd.h b/bsd/include/pwd.h new file mode 100644 index 000000000..5423e3864 --- /dev/null +++ b/bsd/include/pwd.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: pwd.h,v 1.11 1997/08/16 13:47:21 lukem Exp $ */ + +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * Portions Copyright(C) 1995, Jason Downs. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pwd.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _PWD_H_ +#define _PWD_H_ + +#include + +#ifndef _POSIX_SOURCE +#define _PATH_PASSWD "/etc/passwd" +#define _PATH_MASTERPASSWD "/etc/master.passwd" +#define _PATH_MASTERPASSWD_LOCK "/etc/ptmp" + +#define _PATH_MP_DB "/etc/pwd.db" +#define _PATH_SMP_DB "/etc/spwd.db" + +#define _PATH_PWD_MKDB "/usr/sbin/pwd_mkdb" + +#define _PW_KEYBYNAME '1' /* stored by name */ +#define _PW_KEYBYNUM '2' /* stored by entry in the "file" */ +#define _PW_KEYBYUID '3' /* stored by uid */ + +#define _PASSWORD_EFMT1 '_' /* extended encryption format */ + +#define _PASSWORD_LEN 128 /* max length, not counting NULL */ + +#define _PASSWORD_NOUID 0x01 /* flag for no specified uid. */ +#define _PASSWORD_NOGID 0x02 /* flag for no specified gid. */ +#define _PASSWORD_NOCHG 0x04 /* flag for no specified change. */ +#define _PASSWORD_NOEXP 0x08 /* flag for no specified expire. */ + +#define _PASSWORD_WARNDAYS 14 /* days to warn about expiry */ +#define _PASSWORD_CHGNOW -1 /* special day to force password + * change at next login */ +#endif + +struct passwd { + char *pw_name; /* user name */ + char *pw_passwd; /* encrypted password */ + uid_t pw_uid; /* user uid */ + gid_t pw_gid; /* user gid */ + time_t pw_change; /* password change time */ + char *pw_class; /* user access class */ + char *pw_gecos; /* Honeywell login info */ + char *pw_dir; /* home directory */ + char *pw_shell; /* default shell */ + time_t pw_expire; /* account expiration */ +}; + +#include + +__BEGIN_DECLS +struct passwd *getpwuid __P((uid_t)); +struct passwd *getpwnam __P((const char *)); +#ifndef _POSIX_SOURCE +struct passwd *getpwent __P((void)); +#ifndef _XOPEN_SOURCE +int setpassent __P((int)); +#endif +int setpwent __P((void)); +void endpwent __P((void)); +#endif +__END_DECLS + +#endif /* !_PWD_H_ */ diff --git a/bsd/include/ranlib.h b/bsd/include/ranlib.h new file mode 100644 index 000000000..543b89abc --- /dev/null +++ b/bsd/include/ranlib.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1990, 1982, 1985, 1986, 1988, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ranlib.h 8.1 (Berkeley) 6/2/93 + */ +#ifndef _RANLIB_H_ +#define _RANLIB_H_ + +#include /* off_t */ + +/* + * There are two known orders of table of contents for archives. The first is + * the order ranlib(1) originally produced and still produces without any + * options. This table of contents has the archive member name "__.SYMDEF" + * This order has the ranlib structures in the order the objects appear in the + * archive and the symbol names of those objects in the order of symbol table. + * The second know order is sorted by symbol name and is produced with the -s + * option to ranlib(1). This table of contents has the archive member name + * "__.SYMDEF SORTED" and many programs (notably the 1.0 version of ld(1) can't + * tell the difference between names because of the imbedded blank in the name + * and works with either table of contents). This second order is used by the + * post 1.0 link editor to produce faster linking. The original 1.0 version of + * ranlib(1) gets confused when it is run on a archive with the second type of + * table of contents because it and ar(1) which it uses use different ways to + * determined the member name (ar(1) treats all blanks in the name as + * significant and ranlib(1) only checks for the first one). + */ +#define SYMDEF "__.SYMDEF" +#define SYMDEF_SORTED "__.SYMDEF SORTED" + +#define RANLIBMAG "__.SYMDEF" /* archive file name */ +#define RANLIBSKEW 3 /* creation time offset */ + +/* + * Structure of the __.SYMDEF table of contents for an archive. + * __.SYMDEF begins with a long giving the size in bytes of the ranlib + * structures which immediately follow, and then continues with a string + * table consisting of a long giving the number of bytes of strings which + * follow and then the strings themselves. The ran_strx fields index the + * string table whose first byte is numbered 0. + */ +struct ranlib { + union { + off_t ran_strx; /* string table index of */ + char *ran_name; /* symbol defined by */ + } ran_un; + off_t ran_off; /* library member at this offset */ +}; +#endif /* ! _RANLIB_H_ */ diff --git a/bsd/include/regex.h b/bsd/include/regex.h new file mode 100644 index 000000000..ad48247f3 --- /dev/null +++ b/bsd/include/regex.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992 Henry Spencer. + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Henry Spencer of the University of Toronto. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)regex.h 8.2 (Berkeley) 1/3/94 + */ + +#ifndef _REGEX_H_ +#define _REGEX_H_ + +#include + +/* types */ +typedef off_t regoff_t; + +typedef struct { + int re_magic; + size_t re_nsub; /* number of parenthesized subexpressions */ + __const char *re_endp; /* end pointer for REG_PEND */ + struct re_guts *re_g; /* none of your business :-) */ +} regex_t; + +typedef struct { + regoff_t rm_so; /* start of match */ + regoff_t rm_eo; /* end of match */ +} regmatch_t; + +/* regcomp() flags */ +#define REG_BASIC 0000 +#define REG_EXTENDED 0001 +#define REG_ICASE 0002 +#define REG_NOSUB 0004 +#define REG_NEWLINE 0010 +#define REG_NOSPEC 0020 +#define REG_PEND 0040 +#define REG_DUMP 0200 + +/* regerror() flags */ +#define REG_NOMATCH 1 +#define REG_BADPAT 2 +#define REG_ECOLLATE 3 +#define REG_ECTYPE 4 +#define REG_EESCAPE 5 +#define REG_ESUBREG 6 +#define REG_EBRACK 7 +#define REG_EPAREN 8 +#define REG_EBRACE 9 +#define REG_BADBR 10 +#define REG_ERANGE 11 +#define REG_ESPACE 12 +#define REG_BADRPT 13 +#define REG_EMPTY 14 +#define REG_ASSERT 15 +#define REG_INVARG 16 +#define REG_ATOI 255 /* convert name to number (!) */ +#define REG_ITOA 0400 /* convert number to name (!) */ + +/* regexec() flags */ +#define REG_NOTBOL 00001 +#define REG_NOTEOL 00002 +#define REG_STARTEND 00004 +#define REG_TRACE 00400 /* tracing of execution */ +#define REG_LARGE 01000 /* force large representation */ +#define REG_BACKR 02000 /* force use of backref code */ + +__BEGIN_DECLS +int regcomp __P((regex_t *, const char *, int)); +size_t regerror __P((int, const regex_t *, char *, size_t)); +int regexec __P((const regex_t *, + const char *, size_t, regmatch_t [], int)); +void regfree __P((regex_t *)); +__END_DECLS + +#endif /* !_REGEX_H_ */ diff --git a/bsd/include/regexp.h b/bsd/include/regexp.h new file mode 100644 index 000000000..97ce7e4b8 --- /dev/null +++ b/bsd/include/regexp.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1986 by University of Toronto. + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Henry Spencer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)regexp.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _REGEXP_H_ +#define _REGEXP_H_ + +/* + * Definitions etc. for regexp(3) routines. + * + * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof], + * not the System V one. + */ +#define NSUBEXP 10 +typedef struct regexp { + char *startp[NSUBEXP]; + char *endp[NSUBEXP]; + char regstart; /* Internal use only. */ + char reganch; /* Internal use only. */ + char *regmust; /* Internal use only. */ + int regmlen; /* Internal use only. */ + char program[1]; /* Unwarranted chumminess with compiler. */ +} regexp; + +#include + +__BEGIN_DECLS +regexp *regcomp __P((const char *)); +int regexec __P((const regexp *, const char *)); +void regsub __P((const regexp *, const char *, char *)); +void regerror __P((const char *)); +__END_DECLS + +#endif /* !_REGEXP_H_ */ diff --git a/bsd/include/resolv.h.kame b/bsd/include/resolv.h.kame new file mode 100644 index 000000000..25135c01a --- /dev/null +++ b/bsd/include/resolv.h.kame @@ -0,0 +1,356 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1983, 1987, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Portions Copyright (c) 1996 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + +/* + * @(#)resolv.h 8.1 (Berkeley) 6/2/93 + * From Id: resolv.h,v 8.12 1998/04/28 19:36:46 halley Exp $ + * $FreeBSD: src/include/resolv.h,v 1.17.2.1 1999/08/29 14:38:55 peter Exp $ + */ + +#ifndef _RESOLV_H_ +#define _RESOLV_H_ + +#include +#include +#include +#include +#include + +/* + * Revision information. This is the release date in YYYYMMDD format. + * It can change every day so the right thing to do with it is use it + * in preprocessor commands such as "#if (__RES > 19931104)". Do not + * compare for equality; rather, use it to determine whether your resolver + * is new enough to contain a certain feature. + */ + +#define __RES 19960801 + +/* + * Resolver configuration file. + * Normally not present, but may contain the address of the + * inital name server(s) to query and the domain search list. + */ + +#ifndef _PATH_RESCONF +#define _PATH_RESCONF "/etc/resolv.conf" +#endif + +/* + * Global defines and variables for resolver stub. + */ +#define MAXNS 3 /* max # name servers we'll track */ +#define MAXDFLSRCH 3 /* # default domain levels to try */ +#define MAXDNSRCH 6 /* max # domains in search path */ +#define LOCALDOMAINPARTS 2 /* min levels in name that is "local" */ +#ifdef __NetBSD__ +#define MAXDNSLUS 4 /* min # of host lookup types */ +#endif + +#define RES_TIMEOUT 5 /* min. seconds between retries */ +#define MAXRESOLVSORT 10 /* number of net to sort on */ +#define RES_MAXNDOTS 15 /* should reflect bit field size */ + +struct __res_state { + int retrans; /* retransmition time interval */ + int retry; /* number of times to retransmit */ + u_long options; /* option flags - see below. */ + int nscount; /* number of name servers */ + struct sockaddr_in + nsaddr_list[MAXNS]; /* address of name server */ +#define nsaddr nsaddr_list[0] /* for backward compatibility */ + u_short id; /* current message id */ + char *dnsrch[MAXDNSRCH+1]; /* components of domain to search */ + char defdname[256]; /* default domain (deprecated) */ + u_long pfcode; /* RES_PRF_ flags - see below. */ + unsigned ndots:4; /* threshold for initial abs. query */ + unsigned nsort:4; /* number of elements in sort_list[] */ + char unused[3]; + struct { + struct in_addr addr; + u_int32_t mask; + } sort_list[MAXRESOLVSORT]; +#ifdef __NetBSD__ + char lookups[MAXDNSLUS]; +#else + char pad[72]; /* on an i386 this means 512b total */ +#endif +}; + +#if 1 /* INET6 */ +/* + * replacement of __res_state, separated to keep binary compatibility. + */ +struct __res_state_ext { + struct sockaddr_storage nsaddr_list[MAXNS]; + struct { + int af; /* address family for addr, mask */ + union { + struct in_addr ina; + struct in6_addr in6a; + } addr, mask; + } sort_list[MAXRESOLVSORT]; +}; +#endif + +/* + * Resolver options (keep these in synch with res_debug.c, please) + */ +#define RES_INIT 0x00000001 /* address initialized */ +#define RES_DEBUG 0x00000002 /* print debug messages */ +#define RES_AAONLY 0x00000004 /* authoritative answers only (!IMPL)*/ +#define RES_USEVC 0x00000008 /* use virtual circuit */ +#define RES_PRIMARY 0x00000010 /* query primary server only (!IMPL) */ +#define RES_IGNTC 0x00000020 /* ignore truncation errors */ +#define RES_RECURSE 0x00000040 /* recursion desired */ +#define RES_DEFNAMES 0x00000080 /* use default domain name */ +#define RES_STAYOPEN 0x00000100 /* Keep TCP socket open */ +#define RES_DNSRCH 0x00000200 /* search up local domain tree */ +#define RES_INSECURE1 0x00000400 /* type 1 security disabled */ +#define RES_INSECURE2 0x00000800 /* type 2 security disabled */ +#define RES_NOALIASES 0x00001000 /* shuts off HOSTALIASES feature */ +#define RES_USE_INET6 0x00002000 /* use/map IPv6 in gethostbyname() */ +#define RES_NOTLDQUERY 0x00004000 /* Don't query TLD names */ + +#define RES_DEFAULT (RES_RECURSE | RES_DEFNAMES | RES_DNSRCH) + +/* + * Resolver "pfcode" values. Used by dig. + */ +#define RES_PRF_STATS 0x00000001 +#define RES_PRF_UPDATE 0x00000002 +#define RES_PRF_CLASS 0x00000004 +#define RES_PRF_CMD 0x00000008 +#define RES_PRF_QUES 0x00000010 +#define RES_PRF_ANS 0x00000020 +#define RES_PRF_AUTH 0x00000040 +#define RES_PRF_ADD 0x00000080 +#define RES_PRF_HEAD1 0x00000100 +#define RES_PRF_HEAD2 0x00000200 +#define RES_PRF_TTLID 0x00000400 +#define RES_PRF_HEADX 0x00000800 +#define RES_PRF_QUERY 0x00001000 +#define RES_PRF_REPLY 0x00002000 +#define RES_PRF_INIT 0x00004000 +/* 0x00008000 */ + +typedef enum { res_goahead, res_nextns, res_modified, res_done, res_error } + res_sendhookact; + +typedef res_sendhookact (*res_send_qhook)__P((struct sockaddr_in * const *ns, + const u_char **query, + int *querylen, + u_char *ans, + int anssiz, + int *resplen)); + +typedef res_sendhookact (*res_send_rhook)__P((const struct sockaddr_in *ns, + const u_char *query, + int querylen, + u_char *ans, + int anssiz, + int *resplen)); + +struct res_sym { + int number; /* Identifying number, like T_MX */ + char * name; /* Its symbolic name, like "MX" */ + char * humanname; /* Its fun name, like "mail exchanger" */ +}; + +extern struct __res_state _res; +#if 1 /* INET6 */ +extern struct __res_state_ext _res_ext; +#endif +extern const struct res_sym __p_class_syms[]; +extern const struct res_sym __p_type_syms[]; + +/* Private routines shared between libc/net, named, nslookup and others. */ +#define res_hnok __res_hnok +#define res_ownok __res_ownok +#define res_mailok __res_mailok +#define res_dnok __res_dnok +#define sym_ston __sym_ston +#define sym_ntos __sym_ntos +#define sym_ntop __sym_ntop +#define b64_ntop __b64_ntop +#define b64_pton __b64_pton +#define loc_ntoa __loc_ntoa +#define loc_aton __loc_aton +#define fp_resstat __fp_resstat +#define p_query __p_query +#define dn_skipname __dn_skipname +#define fp_resstat __fp_resstat +#define fp_query __fp_query +#define fp_nquery __fp_nquery +#define hostalias __hostalias +#define putlong __putlong +#define putshort __putshort +#define p_class __p_class +#define p_time __p_time +#define p_type __p_type +#define p_query __p_query +#define p_cdnname __p_cdnname +#define p_section __p_section +#define p_cdname __p_cdname +#define p_fqnname __p_fqnname +#define p_fqname __p_fqname +#define p_rr __p_rr /* XXX: from FreeBSD2.2.7 */ +#define p_option __p_option +#define p_secstodate __p_secstodate +#define dn_count_labels __dn_count_labels +#define dn_comp __dn_comp +#define dn_expand __dn_expand +#define res_init __res_init +#define res_randomid __res_randomid +#define res_query __res_query +#define res_search __res_search +#define res_querydomain __res_querydomain +#define res_mkquery __res_mkquery +#define res_send __res_send +#define res_isourserver __res_isourserver +#define res_nameinquery __res_nameinquery +#define res_queriesmatch __res_queriesmatch +#define res_close __res_close +#define res_mkupdate __res_mkupdate +#define res_mkupdrec __res_mkupdrec +#define res_freeupdrec __res_freeupdrec + +__BEGIN_DECLS +int res_hnok __P((const char *)); +int res_ownok __P((const char *)); +int res_mailok __P((const char *)); +int res_dnok __P((const char *)); +int sym_ston __P((const struct res_sym *, const char *, int *)); +const char * sym_ntos __P((const struct res_sym *, int, int *)); +const char * sym_ntop __P((const struct res_sym *, int, int *)); +int b64_ntop __P((u_char const *, size_t, char *, size_t)); +int b64_pton __P((char const *, u_char *, size_t)); +int loc_aton __P((const char *, u_char *)); +const char * loc_ntoa __P((const u_char *, char *)); +int dn_skipname __P((const u_char *, const u_char *)); +void fp_resstat __P((struct __res_state *, FILE *)); +void fp_query __P((const u_char *, FILE *)); +void fp_nquery __P((const u_char *, int, FILE *)); +const char * hostalias __P((const char *)); +void putlong __P((u_int32_t, u_char *)); +void putshort __P((u_int16_t, u_char *)); +const char * p_class __P((int)); +const char * p_time __P((u_int32_t)); +const char * p_type __P((int)); +void p_query __P((const u_char *)); +const u_char * p_cdnname __P((const u_char *, const u_char *, int, FILE *)); +const u_char * p_cdname __P((const u_char *, const u_char *, FILE *)); +const u_char * p_fqnname __P((const u_char *, const u_char *, + int, char *, int)); +const u_char * p_fqname __P((const u_char *, const u_char *, FILE *)); +/* XXX: from FreeBSD2.2.7 */ +const u_char * p_rr __P((const u_char *, const u_char *, FILE *)); +const char * p_option __P((u_long)); +char * p_secstodate __P((u_long)); +int dn_count_labels __P((const char *)); +int dn_comp __P((const char *, u_char *, int, + u_char **, u_char **)); +int dn_expand __P((const u_char *, const u_char *, const u_char *, + char *, int)); +int res_init __P((void)); +u_int res_randomid __P((void)); +int res_query __P((const char *, int, int, u_char *, int)); +int res_search __P((const char *, int, int, u_char *, int)); +int res_querydomain __P((const char *, const char *, int, int, + u_char *, int)); +int res_mkquery __P((int, const char *, int, int, const u_char *, + int, const u_char *, u_char *, int)); +int res_send __P((const u_char *, int, u_char *, int)); +int res_isourserver __P((const struct sockaddr_in *)); +int res_nameinquery __P((const char *, int, int, + const u_char *, const u_char *)); +int res_queriesmatch __P((const u_char *, const u_char *, + const u_char *, const u_char *)); +void res_close __P((void)); +const char * p_section __P((int, int)); +/* XXX The following depend on the ns_updrec typedef in arpa/nameser.h */ +#ifdef _ARPA_NAMESER_H_ +int res_update __P((ns_updrec *)); +int res_mkupdate __P((ns_updrec *, u_char *, int)); +ns_updrec * res_mkupdrec __P((int, const char *, u_int, u_int, u_long)); +void res_freeupdrec __P((ns_updrec *)); +#endif +__END_DECLS + +#endif /* !_RESOLV_H_ */ diff --git a/bsd/include/rune.h b/bsd/include/rune.h new file mode 100644 index 000000000..123a638dc --- /dev/null +++ b/bsd/include/rune.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Paul Borman at Krystal Technologies. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)rune.h 8.1 (Berkeley) 6/27/93 + */ + +#ifndef _RUNE_H_ +#define _RUNE_H_ + +#include +#include + +#define _PATH_LOCALE "/usr/share/locale" + +#define _INVALID_RUNE _CurrentRuneLocale->invalid_rune + +#define __sgetrune _CurrentRuneLocale->sgetrune +#define __sputrune _CurrentRuneLocale->sputrune + +#define sgetrune(s, n, r) (*__sgetrune)((s), (n), (r)) +#define sputrune(c, s, n, r) (*__sputrune)((c), (s), (n), (r)) + +__BEGIN_DECLS +char *mbrune __P((const char *, rune_t)); +char *mbrrune __P((const char *, rune_t)); +char *mbmb __P((const char *, char *)); +long fgetrune __P((FILE *)); +int fputrune __P((rune_t, FILE *)); +int fungetrune __P((rune_t, FILE *)); +int setrunelocale __P((char *)); +void setinvalidrune __P((rune_t)); +__END_DECLS + +#endif /*! _RUNE_H_ */ diff --git a/bsd/include/runetype.h b/bsd/include/runetype.h new file mode 100644 index 000000000..eb0f2c132 --- /dev/null +++ b/bsd/include/runetype.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Paul Borman at Krystal Technologies. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)runetype.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _RUNETYPE_H_ +#define _RUNETYPE_H_ + +#include +#include + +#ifndef _BSD_WCHAR_T_DEFINED_ +#define _BSD_WCHAR_T_DEFINED_ +#ifndef _ANSI_SOURCE +typedef _BSD_WCHAR_T_ rune_t; +#endif +typedef _BSD_WCHAR_T_ wchar_t; +#endif + +#define _CACHED_RUNES (1 <<8 ) /* Must be a power of 2 */ +#define _CRMASK (~(_CACHED_RUNES - 1)) + +/* + * The lower 8 bits of runetype[] contain the digit value of the rune. + */ +typedef struct { + rune_t min; /* First rune of the range */ + rune_t max; /* Last rune (inclusive) of the range */ + rune_t map; /* What first maps to in maps */ + unsigned long *types; /* Array of types in range */ +} _RuneEntry; + +typedef struct { + int nranges; /* Number of ranges stored */ + _RuneEntry *ranges; /* Pointer to the ranges */ +} _RuneRange; + +typedef struct { + char magic[8]; /* Magic saying what version we are */ + char encoding[32]; /* ASCII name of this encoding */ + + rune_t (*sgetrune) + __P((const char *, unsigned int, char const **)); + int (*sputrune) + __P((rune_t, char *, unsigned int, char **)); + rune_t invalid_rune; + + unsigned long runetype[_CACHED_RUNES]; + rune_t maplower[_CACHED_RUNES]; + rune_t mapupper[_CACHED_RUNES]; + + /* + * The following are to deal with Runes larger than _CACHED_RUNES - 1. + * Their data is actually contiguous with this structure so as to make + * it easier to read/write from/to disk. + */ + _RuneRange runetype_ext; + _RuneRange maplower_ext; + _RuneRange mapupper_ext; + + void *variable; /* Data which depends on the encoding */ + int variable_len; /* how long that data is */ +} _RuneLocale; + +#define _RUNE_MAGIC_1 "RuneMagi" /* Indicates version 0 of RuneLocale */ + +extern _RuneLocale _DefaultRuneLocale; +extern _RuneLocale *_CurrentRuneLocale; + +#endif /* !_RUNETYPE_H_ */ diff --git a/bsd/include/semaphore.h b/bsd/include/semaphore.h new file mode 100644 index 000000000..bde387f52 --- /dev/null +++ b/bsd/include/semaphore.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_SEMAPHORE_H +#define _BSD_SEMAPHORE_H + +#include +#include + +#include + +#endif /* _BSD_SEMAPHORE_H */ diff --git a/bsd/include/setjmp.h b/bsd/include/setjmp.h new file mode 100644 index 000000000..51c8fd879 --- /dev/null +++ b/bsd/include/setjmp.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_SETJMP_H +#define _BSD_SETJMP_H + +#include + +#endif /* _BSD_SETJMP_H */ diff --git a/bsd/include/sgtty.h b/bsd/include/sgtty.h new file mode 100644 index 000000000..59081aa90 --- /dev/null +++ b/bsd/include/sgtty.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1985, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)sgtty.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef USE_OLD_TTY +#define USE_OLD_TTY +#endif +#include diff --git a/bsd/include/signal.h b/bsd/include/signal.h new file mode 100644 index 000000000..e89323ff4 --- /dev/null +++ b/bsd/include/signal.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)signal.h 8.3 (Berkeley) 3/30/94 + */ + +#ifndef _USER_SIGNAL_H +#define _USER_SIGNAL_H + +#include +#include +#include + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +extern __const char *__const sys_signame[NSIG]; +extern __const char *__const sys_siglist[NSIG]; +#endif + +__BEGIN_DECLS +int raise __P((int)); +#ifndef _ANSI_SOURCE +int kill __P((pid_t, int)); +int sigaction __P((int, const struct sigaction *, struct sigaction *)); +int sigaddset __P((sigset_t *, int)); +int sigdelset __P((sigset_t *, int)); +int sigemptyset __P((sigset_t *)); +int sigfillset __P((sigset_t *)); +int sigismember __P((const sigset_t *, int)); +int sigpending __P((sigset_t *)); +int sigprocmask __P((int, const sigset_t *, sigset_t *)); +int sigsuspend __P((const sigset_t *)); +#ifndef _POSIX_SOURCE +int killpg __P((pid_t, int)); +int sigblock __P((int)); +int siginterrupt __P((int, int)); +int sigpause __P((int)); +int sigreturn __P((struct sigcontext *)); +int sigsetmask __P((int)); +int sigvec __P((int, struct sigvec *, struct sigvec *)); +void psignal __P((unsigned int, const char *)); +#endif /* !_POSIX_SOURCE */ +#endif /* !_ANSI_SOURCE */ +__END_DECLS + +/* List definitions after function declarations, or Reiser cpp gets upset. */ +#define sigaddset(set, signo) (*(set) |= 1 << ((signo) - 1), 0) +#define sigdelset(set, signo) (*(set) &= ~(1 << ((signo) - 1)), 0) +#define sigemptyset(set) (*(set) = 0, 0) +#define sigfillset(set) (*(set) = ~(sigset_t)0, 0) +#define sigismember(set, signo) ((*(set) & (1 << ((signo) - 1))) != 0) + +#endif /* !_USER_SIGNAL_H */ diff --git a/bsd/include/stab.h b/bsd/include/stab.h new file mode 100644 index 000000000..e4ee83e46 --- /dev/null +++ b/bsd/include/stab.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stab.h 8.1 (Berkeley) 6/2/93 + */ + +/* + * The following are symbols used by various debuggers and by the Pascal + * compiler. Each of them must have one (or more) of the bits defined by + * the N_STAB mask set. + */ + +#define N_GSYM 0x20 /* global symbol */ +#define N_FNAME 0x22 /* F77 function name */ +#define N_FUN 0x24 /* procedure name */ +#define N_STSYM 0x26 /* data segment variable */ +#define N_LCSYM 0x28 /* bss segment variable */ +#define N_MAIN 0x2a /* main function name */ +#define N_PC 0x30 /* global Pascal symbol */ +#define N_RSYM 0x40 /* register variable */ +#define N_SLINE 0x44 /* text segment line number */ +#define N_DSLINE 0x46 /* data segment line number */ +#define N_BSLINE 0x48 /* bss segment line number */ +#define N_SSYM 0x60 /* structure/union element */ +#define N_SO 0x64 /* main source file name */ +#define N_LSYM 0x80 /* stack variable */ +#define N_BINCL 0x82 /* include file beginning */ +#define N_SOL 0x84 /* included source file name */ +#define N_PSYM 0xa0 /* parameter variable */ +#define N_EINCL 0xa2 /* include file end */ +#define N_ENTRY 0xa4 /* alternate entry point */ +#define N_LBRAC 0xc0 /* left bracket */ +#define N_EXCL 0xc2 /* deleted include file */ +#define N_RBRAC 0xe0 /* right bracket */ +#define N_BCOMM 0xe2 /* begin common */ +#define N_ECOMM 0xe4 /* end common */ +#define N_ECOML 0xe8 /* end common (local name) */ +#define N_LENG 0xfe /* length of preceding entry */ diff --git a/bsd/include/stddef.h b/bsd/include/stddef.h new file mode 100644 index 000000000..dceacd35f --- /dev/null +++ b/bsd/include/stddef.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $OpenBSD: stddef.h,v 1.2 1997/09/21 10:45:52 niklas Exp $ */ +/* $NetBSD: stddef.h,v 1.4 1994/10/26 00:56:26 cgd Exp $ */ + +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stddef.h 5.5 (Berkeley) 4/3/91 + */ + +#ifndef __STDDEF_H__ +#define __STDDEF_H__ + +#include + +typedef _BSD_PTRDIFF_T_ ptrdiff_t; + +#ifndef _BSD_SIZE_T_DEFINED_ +#define _BSD_SIZE_T_DEFINED_ +typedef _BSD_SIZE_T_ size_t; +#endif + +#ifndef _BSD_WCHAR_T_DEFINED_ +#define _BSD_WCHAR_T_DEFINED_ +typedef _BSD_WCHAR_T_ wchar_t; +#ifndef _ANSI_SOURCE +typedef _BSD_RUNE_T_ rune_t; +#endif +#endif + +#ifndef NULL +#define NULL 0 +#endif + +#define offsetof(type, member) ((size_t)(&((type *)0)->member)) + +#endif /* __STDDEF_H__ */ diff --git a/bsd/include/stdio.h b/bsd/include/stdio.h new file mode 100644 index 000000000..2013f49a6 --- /dev/null +++ b/bsd/include/stdio.h @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Chris Torek. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stdio.h 8.5 (Berkeley) 4/29/95 + */ + +#ifndef _STDIO_H_ +#define _STDIO_H_ + +#if !defined(_ANSI_SOURCE) && !defined(__STRICT_ANSI__) +#include +#endif + +#include + +#include +#ifndef _BSD_SIZE_T_DEFINED_ +#define _BSD_SIZE_T_DEFINED_ +typedef _BSD_SIZE_T_ size_t; +#endif + +#ifndef NULL +#define NULL 0 +#endif + +/* + * This is fairly grotesque, but pure ANSI code must not inspect the + * innards of an fpos_t anyway. The library internally uses off_t, + * which we assume is exactly as big as eight chars. (When we switch + * to gcc 2.4 we will use __attribute__ here.) + * + * WARNING: the alignment constraints on an off_t and the struct below + * differ on (e.g.) the SPARC. Hence, the placement of an fpos_t object + * in a structure will change if fpos_t's are not aligned on 8-byte + * boundaries. THIS IS A CROCK, but for now there is no way around it. + */ +#if !defined(_ANSI_SOURCE) && !defined(__STRICT_ANSI__) +typedef off_t fpos_t; +#else +typedef struct __sfpos { + char _pos[8]; +} fpos_t; +#endif + +#define _FSTDIO /* Define for new stdio with functions. */ + +/* + * NB: to fit things in six character monocase externals, the stdio + * code uses the prefix `__s' for stdio objects, typically followed + * by a three-character attempt at a mnemonic. + */ + +/* stdio buffers */ +struct __sbuf { + unsigned char *_base; + int _size; +}; + +/* + * stdio state variables. + * + * The following always hold: + * + * if (_flags&(__SLBF|__SWR)) == (__SLBF|__SWR), + * _lbfsize is -_bf._size, else _lbfsize is 0 + * if _flags&__SRD, _w is 0 + * if _flags&__SWR, _r is 0 + * + * This ensures that the getc and putc macros (or inline functions) never + * try to write or read from a file that is in `read' or `write' mode. + * (Moreover, they can, and do, automatically switch from read mode to + * write mode, and back, on "r+" and "w+" files.) + * + * _lbfsize is used only to make the inline line-buffered output stream + * code as compact as possible. + * + * _ub, _up, and _ur are used when ungetc() pushes back more characters + * than fit in the current _bf, or when ungetc() pushes back a character + * that does not match the previous one in _bf. When this happens, + * _ub._base becomes non-nil (i.e., a stream has ungetc() data iff + * _ub._base!=NULL) and _up and _ur save the current values of _p and _r. + * + * NB: see WARNING above before changing the layout of this structure! + */ +typedef struct __sFILE { + unsigned char *_p; /* current position in (some) buffer */ + int _r; /* read space left for getc() */ + int _w; /* write space left for putc() */ + short _flags; /* flags, below; this FILE is free if 0 */ + short _file; /* fileno, if Unix descriptor, else -1 */ + struct __sbuf _bf; /* the buffer (at least 1 byte, if !NULL) */ + int _lbfsize; /* 0 or -_bf._size, for inline putc */ + + /* operations */ + void *_cookie; /* cookie passed to io functions */ + int (*_close) __P((void *)); + int (*_read) __P((void *, char *, int)); + fpos_t (*_seek) __P((void *, fpos_t, int)); + int (*_write) __P((void *, const char *, int)); + + /* separate buffer for long sequences of ungetc() */ + struct __sbuf _ub; /* ungetc buffer */ + unsigned char *_up; /* saved _p when _p is doing ungetc data */ + int _ur; /* saved _r when _r is counting ungetc data */ + + /* tricks to meet minimum requirements even when malloc() fails */ + unsigned char _ubuf[3]; /* guarantee an ungetc() buffer */ + unsigned char _nbuf[1]; /* guarantee a getc() buffer */ + + /* separate buffer for fgetln() when line crosses buffer boundary */ + struct __sbuf _lb; /* buffer for fgetln() */ + + /* Unix stdio files get aligned to block boundaries on fseek() */ + int _blksize; /* stat.st_blksize (may be != _bf._size) */ + fpos_t _offset; /* current lseek offset (see WARNING) */ +} FILE; + +__BEGIN_DECLS +extern FILE __sF[]; +__END_DECLS + +#define __SLBF 0x0001 /* line buffered */ +#define __SNBF 0x0002 /* unbuffered */ +#define __SRD 0x0004 /* OK to read */ +#define __SWR 0x0008 /* OK to write */ + /* RD and WR are never simultaneously asserted */ +#define __SRW 0x0010 /* open for reading & writing */ +#define __SEOF 0x0020 /* found EOF */ +#define __SERR 0x0040 /* found error */ +#define __SMBF 0x0080 /* _buf is from malloc */ +#define __SAPP 0x0100 /* fdopen()ed in append mode */ +#define __SSTR 0x0200 /* this is an sprintf/snprintf string */ +#define __SOPT 0x0400 /* do fseek() optimisation */ +#define __SNPT 0x0800 /* do not do fseek() optimisation */ +#define __SOFF 0x1000 /* set iff _offset is in fact correct */ +#define __SMOD 0x2000 /* true => fgetln modified _p text */ + +/* + * The following three definitions are for ANSI C, which took them + * from System V, which brilliantly took internal interface macros and + * made them official arguments to setvbuf(), without renaming them. + * Hence, these ugly _IOxxx names are *supposed* to appear in user code. + * + * Although numbered as their counterparts above, the implementation + * does not rely on this. + */ +#define _IOFBF 0 /* setvbuf should set fully buffered */ +#define _IOLBF 1 /* setvbuf should set line buffered */ +#define _IONBF 2 /* setvbuf should set unbuffered */ + +#define BUFSIZ 1024 /* size of buffer used by setbuf */ +#define EOF (-1) + +/* + * FOPEN_MAX is a minimum maximum, and is the number of streams that + * stdio can provide without attempting to allocate further resources + * (which could fail). Do not use this for anything. + */ + /* must be == _POSIX_STREAM_MAX */ +#define FOPEN_MAX 20 /* must be <= OPEN_MAX */ +#define FILENAME_MAX 1024 /* must be <= PATH_MAX */ + +/* System V/ANSI C; this is the wrong way to do this, do *not* use these. */ +#ifndef _ANSI_SOURCE +#define P_tmpdir "/var/tmp/" +#endif +#define L_tmpnam 1024 /* XXX must be == PATH_MAX */ +#define TMP_MAX 308915776 + +#ifndef SEEK_SET +#define SEEK_SET 0 /* set file offset to offset */ +#endif +#ifndef SEEK_CUR +#define SEEK_CUR 1 /* set file offset to current plus offset */ +#endif +#ifndef SEEK_END +#define SEEK_END 2 /* set file offset to EOF plus offset */ +#endif + +#define stdin (&__sF[0]) +#define stdout (&__sF[1]) +#define stderr (&__sF[2]) + +/* + * Functions defined in ANSI C standard. + */ +__BEGIN_DECLS +void clearerr __P((FILE *)); +int fclose __P((FILE *)); +int feof __P((FILE *)); +int ferror __P((FILE *)); +int fflush __P((FILE *)); +int fgetc __P((FILE *)); +int fgetpos __P((FILE *, fpos_t *)); +char *fgets __P((char *, size_t, FILE *)); +FILE *fopen __P((const char *, const char *)); +int fprintf __P((FILE *, const char *, ...)); +int fputc __P((int, FILE *)); +int fputs __P((const char *, FILE *)); +size_t fread __P((void *, size_t, size_t, FILE *)); +FILE *freopen __P((const char *, const char *, FILE *)); +int fscanf __P((FILE *, const char *, ...)); +int fseek __P((FILE *, long, int)); +int fsetpos __P((FILE *, const fpos_t *)); +long ftell __P((FILE *)); +size_t fwrite __P((const void *, size_t, size_t, FILE *)); +int getc __P((FILE *)); +int getchar __P((void)); +char *gets __P((char *)); +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +extern int sys_nerr; /* perror(3) external variables */ +extern __const char *__const sys_errlist[]; +#endif +void perror __P((const char *)); +int printf __P((const char *, ...)); +int putc __P((int, FILE *)); +int putchar __P((int)); +int puts __P((const char *)); +int remove __P((const char *)); +int rename __P((const char *, const char *)); +void rewind __P((FILE *)); +int scanf __P((const char *, ...)); +void setbuf __P((FILE *, char *)); +int setvbuf __P((FILE *, char *, int, size_t)); +int sprintf __P((char *, const char *, ...)); +int sscanf __P((const char *, const char *, ...)); +FILE *tmpfile __P((void)); +char *tmpnam __P((char *)); +int ungetc __P((int, FILE *)); +int vfprintf __P((FILE *, const char *, _BSD_VA_LIST_)); +int vprintf __P((const char *, _BSD_VA_LIST_)); +int vsprintf __P((char *, const char *, _BSD_VA_LIST_)); +__END_DECLS + +/* + * Functions defined in POSIX 1003.1. + */ +#ifndef _ANSI_SOURCE +#define L_cuserid 9 /* size for cuserid(); UT_NAMESIZE + 1 */ +#define L_ctermid 1024 /* size for ctermid(); PATH_MAX */ + +__BEGIN_DECLS +char *ctermid __P((char *)); +FILE *fdopen __P((int, const char *)); +int fileno __P((FILE *)); +__END_DECLS +#endif /* not ANSI */ + +/* + * Routines that are purely local. + */ +#if !defined (_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +__BEGIN_DECLS +char *fgetln __P((FILE *, size_t *)); +int fpurge __P((FILE *)); +int fseeko __P((FILE *, fpos_t, int)); +fpos_t ftello __P((FILE *)); +int getw __P((FILE *)); +int pclose __P((FILE *)); +FILE *popen __P((const char *, const char *)); +int putw __P((int, FILE *)); +void setbuffer __P((FILE *, char *, int)); +int setlinebuf __P((FILE *)); +char *tempnam __P((const char *, const char *)); +int snprintf __P((char *, size_t, const char *, ...)); +int vsnprintf __P((char *, size_t, const char *, _BSD_VA_LIST_)); +int vscanf __P((const char *, _BSD_VA_LIST_)); +int vsscanf __P((const char *, const char *, _BSD_VA_LIST_)); +FILE *zopen __P((const char *, const char *, int)); +__END_DECLS + +/* + * This is a #define because the function is used internally and + * (unlike vfscanf) the name __svfscanf is guaranteed not to collide + * with a user function when _ANSI_SOURCE or _POSIX_SOURCE is defined. + */ +#define vfscanf __svfscanf + +/* + * Stdio function-access interface. + */ +__BEGIN_DECLS +FILE *funopen __P((const void *, + int (*)(void *, char *, int), + int (*)(void *, const char *, int), + fpos_t (*)(void *, fpos_t, int), + int (*)(void *))); +__END_DECLS +#define fropen(cookie, fn) funopen(cookie, fn, 0, 0, 0) +#define fwopen(cookie, fn) funopen(cookie, 0, fn, 0, 0) +#endif /* !_ANSI_SOURCE && !_POSIX_SOURCE */ + +/* + * Functions internal to the implementation. + */ +__BEGIN_DECLS +int __srget __P((FILE *)); +int __svfscanf __P((FILE *, const char *, _BSD_VA_LIST_)); +int __swbuf __P((int, FILE *)); +__END_DECLS + +/* + * The __sfoo macros are here so that we can + * define function versions in the C library. + */ +#define __sgetc(p) (--(p)->_r < 0 ? __srget(p) : (int)(*(p)->_p++)) +#if defined(__GNUC__) && defined(__STDC__) +static __inline int __sputc(int _c, FILE *_p) { + if (--_p->_w >= 0 || (_p->_w >= _p->_lbfsize && (char)_c != '\n')) + return (*_p->_p++ = _c); + else + return (__swbuf(_c, _p)); +} +#else +/* + * This has been tuned to generate reasonable code on the vax using pcc. + */ +#define __sputc(c, p) \ + (--(p)->_w < 0 ? \ + (p)->_w >= (p)->_lbfsize ? \ + (*(p)->_p = (c)), *(p)->_p != '\n' ? \ + (int)*(p)->_p++ : \ + __swbuf('\n', p) : \ + __swbuf((int)(c), p) : \ + (*(p)->_p = (c), (int)*(p)->_p++)) +#endif + +#define __sfeof(p) (((p)->_flags & __SEOF) != 0) +#define __sferror(p) (((p)->_flags & __SERR) != 0) +#define __sclearerr(p) ((void)((p)->_flags &= ~(__SERR|__SEOF))) +#define __sfileno(p) ((p)->_file) + +#define feof(p) __sfeof(p) +#define ferror(p) __sferror(p) +#define clearerr(p) __sclearerr(p) + +#ifndef _ANSI_SOURCE +#define fileno(p) __sfileno(p) +#endif + +#ifndef lint +#define getc(fp) __sgetc(fp) +#define putc(x, fp) __sputc(x, fp) +#endif /* lint */ + +#define getchar() getc(stdin) +#define putchar(x) putc(x, stdout) +#endif /* _STDIO_H_ */ diff --git a/bsd/include/stdlib.h b/bsd/include/stdlib.h new file mode 100644 index 000000000..b08b58730 --- /dev/null +++ b/bsd/include/stdlib.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stdlib.h 8.5 (Berkeley) 5/19/95 + */ + +#ifndef _STDLIB_H_ +#define _STDLIB_H_ + +#include + +#ifndef _BSD_SIZE_T_DEFINED_ +#define _BSD_SIZE_T_DEFINED_ +typedef _BSD_SIZE_T_ size_t; +#endif + +#ifndef _BSD_WCHAR_T_DEFINED_ +#define _BSD_WCHAR_T_DEFINED_ +#ifndef _ANSI_SOURCE +typedef _BSD_WCHAR_T_ rune_t; +#endif +typedef _BSD_WCHAR_T_ wchar_t; +#endif + +typedef struct { + int quot; /* quotient */ + int rem; /* remainder */ +} div_t; + +typedef struct { + long quot; /* quotient */ + long rem; /* remainder */ +} ldiv_t; + +#ifndef NULL +#define NULL 0 +#endif + +#define EXIT_FAILURE 1 +#define EXIT_SUCCESS 0 + +#define RAND_MAX 0x7fffffff + +extern int __mb_cur_max; +#define MB_CUR_MAX __mb_cur_max + +#include + +__BEGIN_DECLS +__dead void + abort __P((void)); +__pure int + abs __P((int)); +int atexit __P((void (*)(void))); +double atof __P((const char *)); +int atoi __P((const char *)); +long atol __P((const char *)); +void *bsearch __P((const void *, const void *, size_t, + size_t, int (*)(const void *, const void *))); +void *calloc __P((size_t, size_t)); +__pure div_t + div __P((int, int)); +__dead void + exit __P((int)); +void free __P((void *)); +char *getenv __P((const char *)); +__pure long + labs __P((long)); +__pure ldiv_t + ldiv __P((long, long)); +void *malloc __P((size_t)); +void qsort __P((void *, size_t, size_t, + int (*)(const void *, const void *))); +int rand __P((void)); +void *realloc __P((void *, size_t)); +void srand __P((unsigned)); +double strtod __P((const char *, char **)); +long strtol __P((const char *, char **, int)); +unsigned long + strtoul __P((const char *, char **, int)); +int system __P((const char *)); + +/* These are currently just stubs. */ +int mblen __P((const char *, size_t)); +size_t mbstowcs __P((wchar_t *, const char *, size_t)); +int wctomb __P((char *, wchar_t)); +int mbtowc __P((wchar_t *, const char *, size_t)); +size_t wcstombs __P((char *, const wchar_t *, size_t)); + +#ifndef _ANSI_SOURCE +int putenv __P((const char *)); +int setenv __P((const char *, const char *, int)); +#endif + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +void *alloca __P((size_t)); /* built-in for gcc */ + /* getcap(3) functions */ +char *getbsize __P((int *, long *)); +char *cgetcap __P((char *, char *, int)); +int cgetclose __P((void)); +int cgetent __P((char **, char **, char *)); +int cgetfirst __P((char **, char **)); +int cgetmatch __P((char *, char *)); +int cgetnext __P((char **, char **)); +int cgetnum __P((char *, char *, long *)); +int cgetset __P((char *)); +int cgetstr __P((char *, char *, char **)); +int cgetustr __P((char *, char *, char **)); + +int daemon __P((int, int)); +char *devname __P((int, int)); +int getloadavg __P((double [], int)); + +char *group_from_gid __P((unsigned long, int)); +int heapsort __P((void *, size_t, size_t, + int (*)(const void *, const void *))); +char *initstate __P((unsigned long, char *, long)); +int mergesort __P((void *, size_t, size_t, + int (*)(const void *, const void *))); +int radixsort __P((const unsigned char **, int, const unsigned char *, + unsigned)); +int sradixsort __P((const unsigned char **, int, const unsigned char *, + unsigned)); +long random __P((void)); +char *realpath __P((const char *, char resolved_path[])); +char *setstate __P((char *)); +void srandom __P((unsigned long)); +char *user_from_uid __P((unsigned long, int)); +#ifndef __STRICT_ANSI__ +long long + strtoq __P((const char *, char **, int)); +unsigned long long + strtouq __P((const char *, char **, int)); +#endif +void unsetenv __P((const char *)); +#endif +__END_DECLS + +#endif /* _STDLIB_H_ */ diff --git a/bsd/include/string.h b/bsd/include/string.h new file mode 100644 index 000000000..285a6dd80 --- /dev/null +++ b/bsd/include/string.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)string.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _STRING_H_ +#define _STRING_H_ +#include + +#ifndef _BSD_SIZE_T_DEFINED_ +#define _BSD_SIZE_T_DEFINED_ +typedef _BSD_SIZE_T_ size_t; +#endif + +#ifndef NULL +#define NULL 0 +#endif + +#include + +__BEGIN_DECLS +void *memchr __P((const void *, int, size_t)); +int memcmp __P((const void *, const void *, size_t)); +void *memcpy __P((void *, const void *, size_t)); +void *memmove __P((void *, const void *, size_t)); +void *memset __P((void *, int, size_t)); +char *strcat __P((char *, const char *)); +char *strchr __P((const char *, int)); +int strcmp __P((const char *, const char *)); +int strcoll __P((const char *, const char *)); +char *strcpy __P((char *, const char *)); +size_t strcspn __P((const char *, const char *)); +char *strerror __P((int)); +size_t strlen __P((const char *)); +char *strncat __P((char *, const char *, size_t)); +int strncmp __P((const char *, const char *, size_t)); +char *strncpy __P((char *, const char *, size_t)); +char *strpbrk __P((const char *, const char *)); +char *strrchr __P((const char *, int)); +size_t strspn __P((const char *, const char *)); +char *strstr __P((const char *, const char *)); +char *strtok __P((char *, const char *)); +size_t strxfrm __P((char *, const char *, size_t)); + +/* Nonstandard routines */ +#ifndef _ANSI_SOURCE +int bcmp __P((const void *, const void *, size_t)); +void bcopy __P((const void *, void *, size_t)); +void bzero __P((void *, size_t)); +int ffs __P((int)); +char *index __P((const char *, int)); +void *memccpy __P((void *, const void *, int, size_t)); +char *rindex __P((const char *, int)); +int strcasecmp __P((const char *, const char *)); +char *strdup __P((const char *)); +void strmode __P((int, char *)); +int strncasecmp __P((const char *, const char *, size_t)); +char *strsep __P((char **, const char *)); +void swab __P((const void *, void *, size_t)); +#endif +__END_DECLS + +#endif /* _STRING_H_ */ diff --git a/bsd/include/strings.h b/bsd/include/strings.h new file mode 100644 index 000000000..56c3ae4a6 --- /dev/null +++ b/bsd/include/strings.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)strings.h 8.1 (Berkeley) 6/2/93 + */ + +#include diff --git a/bsd/include/struct.h b/bsd/include/struct.h new file mode 100644 index 000000000..ba8ac59be --- /dev/null +++ b/bsd/include/struct.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1983, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)struct.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _STRUCT_H_ +#define _STRUCT_H_ + +/* Offset of the field in the structure. */ +#define fldoff(name, field) \ + ((int)&(((struct name *)0)->field)) + +/* Size of the field in the structure. */ +#define fldsiz(name, field) \ + (sizeof(((struct name *)0)->field)) + +/* Address of the structure from a field. */ +#define strbase(name, addr, field) \ + ((struct name *)((char *)(addr) - fldoff(name, field))) + +#endif /* !_STRUCT_H_ */ diff --git a/bsd/include/sysexits.h b/bsd/include/sysexits.h new file mode 100644 index 000000000..0a960a06e --- /dev/null +++ b/bsd/include/sysexits.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)sysexits.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYSEXITS_H_ +#define _SYSEXITS_H_ + +/* + * SYSEXITS.H -- Exit status codes for system programs. + * + * This include file attempts to categorize possible error + * exit statuses for system programs, notably delivermail + * and the Berkeley network. + * + * Error numbers begin at EX__BASE to reduce the possibility of + * clashing with other exit statuses that random programs may + * already return. The meaning of the codes is approximately + * as follows: + * + * EX_USAGE -- The command was used incorrectly, e.g., with + * the wrong number of arguments, a bad flag, a bad + * syntax in a parameter, or whatever. + * EX_DATAERR -- The input data was incorrect in some way. + * This should only be used for user's data & not + * system files. + * EX_NOINPUT -- An input file (not a system file) did not + * exist or was not readable. This could also include + * errors like "No message" to a mailer (if it cared + * to catch it). + * EX_NOUSER -- The user specified did not exist. This might + * be used for mail addresses or remote logins. + * EX_NOHOST -- The host specified did not exist. This is used + * in mail addresses or network requests. + * EX_UNAVAILABLE -- A service is unavailable. This can occur + * if a support program or file does not exist. This + * can also be used as a catchall message when something + * you wanted to do doesn't work, but you don't know + * why. + * EX_SOFTWARE -- An internal software error has been detected. + * This should be limited to non-operating system related + * errors as possible. + * EX_OSERR -- An operating system error has been detected. + * This is intended to be used for such things as "cannot + * fork", "cannot create pipe", or the like. It includes + * things like getuid returning a user that does not + * exist in the passwd file. + * EX_OSFILE -- Some system file (e.g., /etc/passwd, /etc/utmp, + * etc.) does not exist, cannot be opened, or has some + * sort of error (e.g., syntax error). + * EX_CANTCREAT -- A (user specified) output file cannot be + * created. + * EX_IOERR -- An error occurred while doing I/O on some file. + * EX_TEMPFAIL -- temporary failure, indicating something that + * is not really an error. In sendmail, this means + * that a mailer (e.g.) could not create a connection, + * and the request should be reattempted later. + * EX_PROTOCOL -- the remote system returned something that + * was "not possible" during a protocol exchange. + * EX_NOPERM -- You did not have sufficient permission to + * perform the operation. This is not intended for + * file system problems, which should use NOINPUT or + * CANTCREAT, but rather for higher level permissions. + */ + +#define EX_OK 0 /* successful termination */ + +#define EX__BASE 64 /* base value for error messages */ + +#define EX_USAGE 64 /* command line usage error */ +#define EX_DATAERR 65 /* data format error */ +#define EX_NOINPUT 66 /* cannot open input */ +#define EX_NOUSER 67 /* addressee unknown */ +#define EX_NOHOST 68 /* host name unknown */ +#define EX_UNAVAILABLE 69 /* service unavailable */ +#define EX_SOFTWARE 70 /* internal software error */ +#define EX_OSERR 71 /* system error (e.g., can't fork) */ +#define EX_OSFILE 72 /* critical OS file missing */ +#define EX_CANTCREAT 73 /* can't create (user) output file */ +#define EX_IOERR 74 /* input/output error */ +#define EX_TEMPFAIL 75 /* temp failure; user is invited to retry */ +#define EX_PROTOCOL 76 /* remote error in protocol */ +#define EX_NOPERM 77 /* permission denied */ +#define EX_CONFIG 78 /* configuration error */ + +#define EX__MAX 78 /* maximum listed value */ + +#endif /* !_SYSEXITS_H_ */ diff --git a/bsd/include/syslog.h b/bsd/include/syslog.h new file mode 100644 index 000000000..455fe67ce --- /dev/null +++ b/bsd/include/syslog.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/bsd/include/tar.h b/bsd/include/tar.h new file mode 100644 index 000000000..37d0edc28 --- /dev/null +++ b/bsd/include/tar.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Chuck Karish of Mindcraft, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tar.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _TAR_H +#define _TAR_H + +#define TMAGIC "ustar" /* ustar and a null */ +#define TMAGLEN 6 +#define TVERSION "00" /* 00 and no null */ +#define TVERSLEN 2 + +/* Values used in typeflag field */ +#define REGTYPE '0' /* Regular file */ +#define AREGTYPE '\0' /* Regular file */ +#define LNKTYPE '1' /* Link */ +#define SYMTYPE '2' /* Reserved */ +#define CHRTYPE '3' /* Character special */ +#define BLKTYPE '4' /* Block special */ +#define DIRTYPE '5' /* Directory */ +#define FIFOTYPE '6' /* FIFO special */ +#define CONTTYPE '7' /* Reserved */ + +/* Bits used in the mode field - values in octal */ +#define TSUID 04000 /* Set UID on execution */ +#define TSGID 02000 /* Set GID on execution */ +#define TSVTX 01000 /* Reserved */ + /* File permissions */ +#define TUREAD 00400 /* Read by owner */ +#define TUWRITE 00200 /* Write by owner */ +#define TUEXEC 00100 /* Execute/Search by owner */ +#define TGREAD 00040 /* Read by group */ +#define TGWRITE 00020 /* Write by group */ +#define TGEXEC 00010 /* Execute/Search by group */ +#define TOREAD 00004 /* Read by other */ +#define TOWRITE 00002 /* Write by other */ +#define TOEXEC 00001 /* Execute/Search by other */ + +#endif diff --git a/bsd/include/termios.h b/bsd/include/termios.h new file mode 100644 index 000000000..7ab55fe45 --- /dev/null +++ b/bsd/include/termios.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/bsd/include/time.h b/bsd/include/time.h new file mode 100644 index 000000000..d5552cb67 --- /dev/null +++ b/bsd/include/time.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)time.h 8.3 (Berkeley) 1/21/94 + */ + +#ifndef _TIME_H_ +#define _TIME_H_ + +#include + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef _BSD_CLOCK_T_DEFINED_ +#define _BSD_CLOCK_T_DEFINED_ +typedef _BSD_CLOCK_T_ clock_t; +#endif + +#ifndef _BSD_TIME_T_DEFINED_ +#define _BSD_TIME_T_DEFINED_ +typedef _BSD_TIME_T_ time_t; +#endif + +#ifndef _BSD_SIZE_T_DEFINED_ +#define _BSD_SIZE_T_DEFINED_ +typedef _BSD_SIZE_T_ size_t; +#endif + +struct tm { + int tm_sec; /* seconds after the minute [0-60] */ + int tm_min; /* minutes after the hour [0-59] */ + int tm_hour; /* hours since midnight [0-23] */ + int tm_mday; /* day of the month [1-31] */ + int tm_mon; /* months since January [0-11] */ + int tm_year; /* years since 1900 */ + int tm_wday; /* days since Sunday [0-6] */ + int tm_yday; /* days since January 1 [0-365] */ + int tm_isdst; /* Daylight Savings Time flag */ + long tm_gmtoff; /* offset from CUT in seconds */ + char *tm_zone; /* timezone abbreviation */ +}; + +#include /* Include file containing CLK_TCK. */ + +#define CLOCKS_PER_SEC (CLK_TCK) + +#include + +__BEGIN_DECLS +char *asctime __P((const struct tm *)); +clock_t clock __P((void)); +char *ctime __P((const time_t *)); +double difftime __P((time_t, time_t)); +struct tm *gmtime __P((const time_t *)); +struct tm *localtime __P((const time_t *)); +time_t mktime __P((struct tm *)); +size_t strftime __P((char *, size_t, const char *, const struct tm *)); +time_t time __P((time_t *)); + +#ifndef _ANSI_SOURCE +void tzset __P((void)); +#endif /* not ANSI */ + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +char *timezone __P((int, int)); +void tzsetwall __P((void)); +#endif /* neither ANSI nor POSIX */ +__END_DECLS + +#endif /* !_TIME_H_ */ diff --git a/bsd/include/ttyent.h b/bsd/include/ttyent.h new file mode 100644 index 000000000..25ace7f44 --- /dev/null +++ b/bsd/include/ttyent.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ttyent.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _TTYENT_H_ +#define _TTYENT_H_ + +#define _PATH_TTYS "/etc/ttys" + +#define _TTYS_OFF "off" +#define _TTYS_ON "on" +#define _TTYS_SECURE "secure" +#define _TTYS_WINDOW "window" +#define _TTYS_ONERROR "onerror" +#define _TTYS_ONOPTION "onoption" + +struct ttyent { + char *ty_name; /* terminal device name */ + char *ty_getty; /* command to execute, usually getty */ + char *ty_type; /* terminal type for termcap */ +#define TTY_ON 0x01 /* enable logins (start ty_getty program) */ +#define TTY_SECURE 0x02 /* allow uid of 0 to login */ + int ty_status; /* status flags */ + char *ty_window; /* command to start up window manager */ + char *ty_onerror; /* command to execute after getty failure */ + char *ty_onoption; /* command to execute after console login */ + char *ty_comment; /* comment field */ +}; + +#include + +__BEGIN_DECLS +struct ttyent *getttyent __P((void)); +struct ttyent *getttynam __P((const char *)); +int setttyent __P((void)); +int endttyent __P((void)); +__END_DECLS + +#endif /* !_TTYENT_H_ */ diff --git a/bsd/include/tzfile.h b/bsd/include/tzfile.h new file mode 100644 index 000000000..53bc18087 --- /dev/null +++ b/bsd/include/tzfile.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Arthur David Olson of the National Cancer Institute. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tzfile.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _TZFILE_H_ +#define _TZFILE_H_ + +/* + * Information about time zone files. + */ + /* Time zone object file directory */ +#define TZDIR "/usr/share/zoneinfo" +#define TZDEFAULT "/etc/localtime" +#define TZDEFRULES "posixrules" + +/* +** Each file begins with. . . +*/ + +struct tzhead { + char tzh_reserved[24]; /* reserved for future use */ + char tzh_ttisstdcnt[4]; /* coded number of trans. time flags */ + char tzh_leapcnt[4]; /* coded number of leap seconds */ + char tzh_timecnt[4]; /* coded number of transition times */ + char tzh_typecnt[4]; /* coded number of local time types */ + char tzh_charcnt[4]; /* coded number of abbr. chars */ +}; + +/* +** . . .followed by. . . +** +** tzh_timecnt (char [4])s coded transition times a la time(2) +** tzh_timecnt (unsigned char)s types of local time starting at above +** tzh_typecnt repetitions of +** one (char [4]) coded GMT offset in seconds +** one (unsigned char) used to set tm_isdst +** one (unsigned char) that's an abbreviation list index +** tzh_charcnt (char)s '\0'-terminated zone abbreviations +** tzh_leapcnt repetitions of +** one (char [4]) coded leap second transition times +** one (char [4]) total correction after above +** tzh_ttisstdcnt (char)s indexed by type; if TRUE, transition +** time is standard time, if FALSE, +** transition time is wall clock time +** if absent, transition times are +** assumed to be wall clock time +*/ + +/* +** In the current implementation, "tzset()" refuses to deal with files that +** exceed any of the limits below. +*/ + +/* +** The TZ_MAX_TIMES value below is enough to handle a bit more than a +** year's worth of solar time (corrected daily to the nearest second) or +** 138 years of Pacific Presidential Election time +** (where there are three time zone transitions every fourth year). +*/ +#define TZ_MAX_TIMES 370 + +#define NOSOLAR /* 4BSD doesn't currently handle solar time */ + +#ifndef NOSOLAR +#define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */ +#else +#define TZ_MAX_TYPES 20 /* Maximum number of local time types */ +#endif + +#define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */ + +#define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */ + +#define SECSPERMIN 60 +#define MINSPERHOUR 60 +#define HOURSPERDAY 24 +#define DAYSPERWEEK 7 +#define DAYSPERNYEAR 365 +#define DAYSPERLYEAR 366 +#define SECSPERHOUR (SECSPERMIN * MINSPERHOUR) +#define SECSPERDAY ((long) SECSPERHOUR * HOURSPERDAY) +#define MONSPERYEAR 12 + +#define TM_SUNDAY 0 +#define TM_MONDAY 1 +#define TM_TUESDAY 2 +#define TM_WEDNESDAY 3 +#define TM_THURSDAY 4 +#define TM_FRIDAY 5 +#define TM_SATURDAY 6 + +#define TM_JANUARY 0 +#define TM_FEBRUARY 1 +#define TM_MARCH 2 +#define TM_APRIL 3 +#define TM_MAY 4 +#define TM_JUNE 5 +#define TM_JULY 6 +#define TM_AUGUST 7 +#define TM_SEPTEMBER 8 +#define TM_OCTOBER 9 +#define TM_NOVEMBER 10 +#define TM_DECEMBER 11 + +#define TM_YEAR_BASE 1900 + +#define EPOCH_YEAR 1970 +#define EPOCH_WDAY TM_THURSDAY + +/* +** Accurate only for the past couple of centuries; +** that will probably do. +*/ + +#define isleap(y) (((y) % 4) == 0 && ((y) % 100) != 0 || ((y) % 400) == 0) + +#endif /* !_TZFILE_H_ */ diff --git a/bsd/include/unistd.h b/bsd/include/unistd.h new file mode 100644 index 000000000..fb2766835 --- /dev/null +++ b/bsd/include/unistd.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1998-1999 Apple Computer, Inc. All Rights Reserved + * Copyright (c) 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)unistd.h 8.12 (Berkeley) 4/27/95 + * + * Copyright (c) 1998 Apple Compter, Inc. + * All Rights Reserved + */ + +/* History: + 7/14/99 EKN at Apple fixed getdirentriesattr from getdirentryattr + 3/26/98 CHW at Apple added real interface to searchfs call + 3/5/98 CHW at Apple added hfs semantic system calls headers +*/ + +#ifndef _UNISTD_H_ +#define _UNISTD_H_ + +#include +#include +#include + +#define STDIN_FILENO 0 /* standard input file descriptor */ +#define STDOUT_FILENO 1 /* standard output file descriptor */ +#define STDERR_FILENO 2 /* standard error file descriptor */ + +#ifndef NULL +#define NULL 0 /* null pointer constant */ +#endif + +#define _POSIX_THREADS /* We support pthreads */ + +__BEGIN_DECLS +__dead void + _exit __P((int)); +int access __P((const char *, int)); +unsigned int alarm __P((unsigned int)); +int chdir __P((const char *)); +int chown __P((const char *, uid_t, gid_t)); +int close __P((int)); +size_t confstr __P((int, char *, size_t)); +int dup __P((int)); +int dup2 __P((int, int)); +int execl __P((const char *, const char *, ...)); +int execle __P((const char *, const char *, ...)); +int execlp __P((const char *, const char *, ...)); +int execv __P((const char *, char * const *)); +int execve __P((const char *, char * const *, char * const *)); +int execvp __P((const char *, char * const *)); +pid_t fork __P((void)); +long fpathconf __P((int, int)); +char *getcwd __P((char *, size_t)); +gid_t getegid __P((void)); +uid_t geteuid __P((void)); +gid_t getgid __P((void)); +int getgroups __P((int, gid_t [])); +char *getlogin __P((void)); +pid_t getpgrp __P((void)); +pid_t getpid __P((void)); +pid_t getppid __P((void)); +uid_t getuid __P((void)); +int isatty __P((int)); +int link __P((const char *, const char *)); +off_t lseek __P((int, off_t, int)); +long pathconf __P((const char *, int)); +int pause __P((void)); +int pipe __P((int *)); +ssize_t read __P((int, void *, size_t)); +int rmdir __P((const char *)); +int setgid __P((gid_t)); +int setpgid __P((pid_t, pid_t)); +pid_t setsid __P((void)); +int setuid __P((uid_t)); +unsigned int sleep __P((unsigned int)); +long sysconf __P((int)); +pid_t tcgetpgrp __P((int)); +int tcsetpgrp __P((int, pid_t)); +char *ttyname __P((int)); +int unlink __P((const char *)); +ssize_t write __P((int, const void *, size_t)); + +extern char *optarg; /* getopt(3) external variables */ +extern int optind, opterr, optopt, optreset; +int getopt __P((int, char * const [], const char *)); + +#ifndef _POSIX_SOURCE +#ifdef __STDC__ +struct timeval; /* select(2) */ +#endif +int acct __P((const char *)); +int async_daemon __P((void)); +char *brk __P((const char *)); +int chroot __P((const char *)); +char *crypt __P((const char *, const char *)); +int des_cipher __P((const char *, char *, long, int)); +int des_setkey __P((const char *key)); +int encrypt __P((char *, int)); +void endusershell __P((void)); +int exect __P((const char *, char * const *, char * const *)); +int fchdir __P((int)); +int fchown __P((int, int, int)); +int fsync __P((int)); +int ftruncate __P((int, off_t)); +int getdtablesize __P((void)); +int getgrouplist __P((const char *, int, int *, int *)); +long gethostid __P((void)); +int gethostname __P((char *, int)); +mode_t getmode __P((const void *, mode_t)); +__pure int + getpagesize __P((void)); +char *getpass __P((const char *)); +char *getusershell __P((void)); +char *getwd __P((char *)); /* obsoleted by getcwd() */ +int initgroups __P((const char *, int)); +int iruserok __P((unsigned long, int, const char *, const char *)); +int mknod __P((const char *, mode_t, dev_t)); +int mkstemp __P((char *)); +char *mktemp __P((char *)); +int nfssvc __P((int, void *)); +int nice __P((int)); +#if 0 +void psignal __P((unsigned int, const char *)); +extern __const char *__const sys_siglist[]; +#else +#include +#endif +int profil __P((char *, int, int, int)); +int rcmd __P((char **, int, const char *, + const char *, const char *, int *)); +char *re_comp __P((const char *)); +int re_exec __P((const char *)); +int readlink __P((const char *, char *, int)); +int reboot __P((int)); +int revoke __P((const char *)); +int rresvport __P((int *)); +int ruserok __P((const char *, int, const char *, const char *)); +char *sbrk __P((int)); +int select __P((int, fd_set *, fd_set *, fd_set *, struct timeval *)); +int setegid __P((gid_t)); +int seteuid __P((uid_t)); +int setgroups __P((int, const gid_t *)); +void sethostid __P((long)); +int sethostname __P((const char *, int)); +int setkey __P((const char *)); +int setlogin __P((const char *)); +void *setmode __P((const char *)); +int setpgrp __P((pid_t pid, pid_t pgrp)); /* obsoleted by setpgid() */ +int setregid __P((gid_t, gid_t)); +int setreuid __P((uid_t, uid_t)); +int setrgid __P((gid_t)); +int setruid __P((uid_t)); +void setusershell __P((void)); +int swapon __P((const char *)); +int symlink __P((const char *, const char *)); +void sync __P((void)); +int syscall __P((int, ...)); +int truncate __P((const char *, off_t)); +int ttyslot __P((void)); +unsigned int ualarm __P((unsigned int, unsigned int)); +int unwhiteout __P((const char *)); +void usleep __P((unsigned int)); +void *valloc __P((size_t)); /* obsoleted by malloc() */ +pid_t vfork __P((void)); + +extern char *suboptarg; /* getsubopt(3) external variable */ +int getsubopt __P((char **, char * const *, char **)); + +/* HFS & HFS Plus semantics system calls go here */ +int getattrlist __P((const char*,void*,void*,size_t,unsigned long)); +int setattrlist __P((const char*,void*,void*,size_t,unsigned long)); +int exchangedata __P((const char*,const char*,unsigned long)); +int checkuseraccess __P((const char*,uid_t,gid_t*,int,int,unsigned long)); +int getdirentriesattr __P((int,void*,void*,size_t,unsigned long*,unsigned long*,unsigned long*,unsigned long)); +int searchfs __P((const char*,void*,void*,unsigned long,unsigned long,void*)); + +int fsctl __P((const char *,unsigned long,void*,unsigned long)); + + +#endif /* !_POSIX_SOURCE */ +__END_DECLS + +#endif /* !_UNISTD_H_ */ diff --git a/bsd/include/util.h b/bsd/include/util.h new file mode 100644 index 000000000..42dbdabfa --- /dev/null +++ b/bsd/include/util.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: util.h,v 1.10 1997/12/01 02:25:46 lukem Exp $ */ + +/*- + * Copyright (c) 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _UTIL_H_ +#define _UTIL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#define PIDLOCK_NONBLOCK 1 +#define PIDLOCK_USEHOSTNAME 2 + +#define FPARSELN_UNESCESC 0x01 +#define FPARSELN_UNESCCONT 0x02 +#define FPARSELN_UNESCCOMM 0x04 +#define FPARSELN_UNESCREST 0x08 +#define FPARSELN_UNESCALL 0x0f + +__BEGIN_DECLS +void login __P((struct utmp *)); +int login_tty __P((int)); +int logout __P((const char *)); +void logwtmp __P((const char *, const char *, const char *)); +int pw_lock __P((int retries)); +int pw_mkdb __P((void)); +int pw_abort __P((void)); +void pw_init __P((void)); +void pw_edit __P((int notsetuid, const char *filename)); +void pw_prompt __P((void)); +void pw_copy __P((int ffd, int tfd, struct passwd *pw, + struct passwd *old_pw)); +int pw_scan __P((char *bp, struct passwd *pw, int *flags)); +void pw_error __P((const char *name, int err, int eval)); +int openpty __P((int *, int *, char *, struct termios *, + struct winsize *)); +char *fparseln __P((FILE *, size_t *, size_t *, const char[3], int)); +pid_t forkpty __P((int *, char *, struct termios *, struct winsize *)); +int getmaxpartitions __P((void)); +int getrawpartition __P((void)); +int opendisk __P((const char *, int, char *, size_t, int)); +int pidlock __P((const char *, int, pid_t *, const char *)); +int ttylock __P((const char *, int, pid_t *)); +int ttyunlock __P((const char *)); +int ttyaction __P((char *tty, char *act, char *user)); +struct iovec; +char *ttymsg __P((struct iovec *, int, const char *, int)); +__END_DECLS + +#endif /* !_UTIL_H_ */ diff --git a/bsd/include/utime.h b/bsd/include/utime.h new file mode 100644 index 000000000..2326c06cc --- /dev/null +++ b/bsd/include/utime.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)utime.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _UTIME_H_ +#define _UTIME_H_ + +struct utimbuf { + time_t actime; /* Access time */ + time_t modtime; /* Modification time */ +}; + +#include + +__BEGIN_DECLS +int utime __P((const char *, const struct utimbuf *)); +__END_DECLS + +#endif /* !_UTIME_H_ */ diff --git a/bsd/include/utmp.h b/bsd/include/utmp.h new file mode 100644 index 000000000..0778052a7 --- /dev/null +++ b/bsd/include/utmp.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)utmp.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _UTMP_H_ +#define _UTMP_H_ + +#define _PATH_UTMP "/var/run/utmp" +#define _PATH_WTMP "/var/log/wtmp" +#define _PATH_LASTLOG "/var/log/lastlog" + +#define UT_NAMESIZE 8 +#define UT_LINESIZE 8 +#define UT_HOSTSIZE 16 + +struct lastlog { + time_t ll_time; + char ll_line[UT_LINESIZE]; + char ll_host[UT_HOSTSIZE]; +}; + +struct utmp { + char ut_line[UT_LINESIZE]; + char ut_name[UT_NAMESIZE]; + char ut_host[UT_HOSTSIZE]; + long ut_time; +}; + +#endif /* !_UTMP_H_ */ diff --git a/bsd/include/vis.h b/bsd/include/vis.h new file mode 100644 index 000000000..b9debf747 --- /dev/null +++ b/bsd/include/vis.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vis.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _VIS_H_ +#define _VIS_H_ + +/* + * to select alternate encoding format + */ +#define VIS_OCTAL 0x01 /* use octal \ddd format */ +#define VIS_CSTYLE 0x02 /* use \[nrft0..] where appropiate */ + +/* + * to alter set of characters encoded (default is to encode all + * non-graphic except space, tab, and newline). + */ +#define VIS_SP 0x04 /* also encode space */ +#define VIS_TAB 0x08 /* also encode tab */ +#define VIS_NL 0x10 /* also encode newline */ +#define VIS_WHITE (VIS_SP | VIS_TAB | VIS_NL) +#define VIS_SAFE 0x20 /* only encode "unsafe" characters */ + +/* + * other + */ +#define VIS_NOSLASH 0x40 /* inhibit printing '\' */ + +/* + * unvis return codes + */ +#define UNVIS_VALID 1 /* character valid */ +#define UNVIS_VALIDPUSH 2 /* character valid, push back passed char */ +#define UNVIS_NOCHAR 3 /* valid sequence, no character produced */ +#define UNVIS_SYNBAD -1 /* unrecognized escape sequence */ +#define UNVIS_ERROR -2 /* decoder in unknown state (unrecoverable) */ + +/* + * unvis flags + */ +#define UNVIS_END 1 /* no more characters */ + +#include + +__BEGIN_DECLS +char *vis __P((char *, int, int, int)); +int strvis __P((char *, const char *, int)); +int strvisx __P((char *, const char *, size_t, int)); +int strunvis __P((char *, const char *)); +int unvis __P((char *, int, int *, int)); +__END_DECLS + +#endif /* !_VIS_H_ */ diff --git a/bsd/isofs/Makefile b/bsd/isofs/Makefile new file mode 100644 index 000000000..fc780d6f1 --- /dev/null +++ b/bsd/isofs/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + cd9660 + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + cd9660 + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +SETUP_SUBDIRS = \ + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/isofs/cd9660/Makefile b/bsd/isofs/cd9660/Makefile new file mode 100644 index 000000000..27f7df03a --- /dev/null +++ b/bsd/isofs/cd9660/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + cd9660_mount.h cd9660_node.h cd9660_rrip.h iso.h iso_rrip.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = isofs/cd9660 + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = isofs/cd9660 + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/isofs/cd9660/TODO b/bsd/isofs/cd9660/TODO new file mode 100644 index 000000000..7951ff87c --- /dev/null +++ b/bsd/isofs/cd9660/TODO @@ -0,0 +1,47 @@ +# $NetBSD: TODO,v 1.4 1994/07/19 11:34:48 mycroft Exp $ + + 1) should understand "older", original High Sierra ("CDROM001") type + + Not yet. ( I don't have this technical information, yet. ) + + 2) should understand Rock Ridge + + Yes, we have follows function. + + o Symbolic Link + o Real Name(long name) + o File Attribute + o Time stamp + o uid, gid + o Devices + o Relocated directories + + Except follows: + + o POSIX device number mapping + + There is some preliminary stuff in there that (ab-)uses the mknod + system call, but this needs a writable filesystem + + 5) should have name translation enabled by mount flag + + Yes. we can disable the Rock Ridge Extension by follows option; + + "mount -t isofs -o -norrip /dev/cd0d /cdrom" + + 6) should run as a user process, and not take up kernel space (cdroms + are slow) + + Not yet. + + 7) ECMA support. + + Not yet. we need not only a technical spec but also ECMA format + cd-rom itself! + + 8) Character set change by SVD ( multi SVD support ) + + Not yet. We should also hack the other part of system as 8 bit + clean. As far as I know, if you export the cdrom by NFS, the client + can access the 8 bit clean (ie. Solaris Japanese with EUC code ) + diff --git a/bsd/isofs/cd9660/TODO.hibler b/bsd/isofs/cd9660/TODO.hibler new file mode 100644 index 000000000..c0de4da0a --- /dev/null +++ b/bsd/isofs/cd9660/TODO.hibler @@ -0,0 +1,9 @@ +# $NetBSD: TODO.hibler,v 1.6 1994/12/13 22:33:10 mycroft Exp $ + +1. Investiate making ISOFS another UFS shared filesystem (ala FFS/MFS/LFS). + Since it was modelled after the inode code, we might be able to merge + them back. It looks like a seperate (but very similar) lookup routine + will be needed due to the associated file stuff. + +2. Seems like there should be a "notrans" or some such mount option to show + filenames as they really are without lower-casing. Does this make sense? diff --git a/bsd/isofs/cd9660/cd9660_bmap.c b/bsd/isofs/cd9660/cd9660_bmap.c new file mode 100644 index 000000000..f031630e2 --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_bmap.c @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_bmap.c,v 1.5 1994/12/13 22:33:12 mycroft Exp $ */ + +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_bmap.c 8.4 (Berkeley) 12/5/94 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Bmap converts the logical block number of a file to its physical block + * number on the disk. The conversion is done by using the logical block + * number to index into the data block (extent) for the file. + */ +int +cd9660_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; + } */ *ap; +{ + struct iso_node *ip = VTOI(ap->a_vp); + daddr_t lblkno = ap->a_bn; + int bshift; + + /* + * Check for underlying vnode requests and ensure that logical + * to physical mapping is requested. + */ + if (ap->a_vpp != NULL) + *ap->a_vpp = ip->i_devvp; + if (ap->a_bnp == NULL) + return (0); + + /* + * Compute the requested block number + */ + bshift = ip->i_mnt->im_bshift; + *ap->a_bnp = (ip->iso_start + lblkno); + + /* + * Determine maximum number of readahead blocks following the + * requested block. + */ + if (ap->a_runp) { + int nblk; + + nblk = (ip->i_size >> bshift) - (lblkno + 1); + if (nblk <= 0) + *ap->a_runp = 0; + else if (nblk >= (MAXBSIZE >> bshift)) + *ap->a_runp = (MAXBSIZE >> bshift) - 1; + else + *ap->a_runp = nblk; + } + + return (0); +} + +/* blktooff converts a logical block number to a file offset */ +int +cd9660_blktooff(ap) + struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; + } */ *ap; +{ + register struct iso_node *ip; + register struct iso_mnt *imp; + + if (ap->a_vp == NULL) + return (EINVAL); + + ip = VTOI(ap->a_vp); + imp = ip->i_mnt; + + *ap->a_offset = (off_t)lblktosize(imp, ap->a_lblkno); + return (0); +} + +/* offtoblk converts a file offset to a logical block number */ +int +cd9660_offtoblk(ap) +struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; + } */ *ap; +{ + register struct iso_node *ip; + register struct iso_mnt *imp; + + if (ap->a_vp == NULL) + return (EINVAL); + + ip = VTOI(ap->a_vp); + imp = ip->i_mnt; + + *ap->a_lblkno = (daddr_t)lblkno(imp, ap->a_offset); + return (0); +} + +int +cd9660_cmap(ap) +struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_offset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; +} */ *ap; +{ + struct iso_node *ip = VTOI(ap->a_vp); + size_t cbytes; + int devBlockSize = 0; + + /* + * Check for underlying vnode requests and ensure that logical + * to physical mapping is requested. + */ + if (ap->a_bpn == NULL) + return (0); + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + *ap->a_bpn = (daddr_t)(ip->iso_start + lblkno(ip->i_mnt, ap->a_foffset)); + + /* + * Determine maximum number of contiguous bytes following the + * requested offset. + */ + if (ap->a_run) { + if (ip->i_size > ap->a_foffset) + cbytes = ip->i_size - ap->a_foffset; + else + cbytes = 0; + + cbytes = (cbytes + (devBlockSize - 1)) & ~(devBlockSize - 1); + + *ap->a_run = MIN(cbytes, ap->a_size); + }; + + if (ap->a_poff) + *(int *)ap->a_poff = (long)ap->a_foffset & (devBlockSize - 1); + + return (0); +} + diff --git a/bsd/isofs/cd9660/cd9660_lookup.c b/bsd/isofs/cd9660/cd9660_lookup.c new file mode 100644 index 000000000..1cab9a13e --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_lookup.c @@ -0,0 +1,576 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_lookup.c,v 1.13 1994/12/24 15:30:03 cgd Exp $ */ + +/*- + * Copyright (c) 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)ufs_lookup.c 7.33 (Berkeley) 5/19/91 + * + * @(#)cd9660_lookup.c 8.5 (Berkeley) 12/5/94 + + + + * HISTORY + * 22-Jan-98 radar 1669467 - ISO 9660 CD support - jwc + + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +struct nchstats iso_nchstats; + +/* + * Convert a component of a pathname into a pointer to a locked inode. + * This is a very central and rather complicated routine. + * If the file system is not maintained in a strict tree hierarchy, + * this can result in a deadlock situation (see comments in code below). + * + * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on + * whether the name is to be looked up, created, renamed, or deleted. + * When CREATE, RENAME, or DELETE is specified, information usable in + * creating, renaming, or deleting a directory entry may be calculated. + * If flag has LOCKPARENT or'ed into it and the target of the pathname + * exists, lookup returns both the target and its parent directory locked. + * When creating or renaming and LOCKPARENT is specified, the target may + * not be ".". When deleting and LOCKPARENT is specified, the target may + * be "."., but the caller must check to ensure it does an vrele and iput + * instead of two iputs. + * + * Overall outline of ufs_lookup: + * + * check accessibility of directory + * look for name in cache, if found, then if at end of path + * and deleting or creating, drop it, else return name + * search for name in directory, to found or notfound + * notfound: + * if creating, return locked directory, leaving info on available slots + * else return error + * found: + * if at end of path and deleting, return information to allow delete + * if at end of path and rewriting (RENAME and LOCKPARENT), lock target + * inode and return info to allow rewrite + * if not at end, add name to cache; if at end and neither creating + * nor deleting, add name to cache + * + * NOTE: (LOOKUP | LOCKPARENT) currently returns the parent inode unlocked. + */ +int +cd9660_lookup(ap) + struct vop_lookup_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + register struct vnode *vdp; /* vnode for directory being searched */ + register struct iso_node *dp; /* inode for directory being searched */ + register struct iso_mnt *imp; /* file system that directory is in */ + struct buf *bp; /* a buffer of directory entries */ + struct iso_directory_record *ep = NULL;/* the current directory entry */ + int entryoffsetinblock; /* offset of ep in bp's buffer */ + int saveoffset = 0; /* offset of last directory entry in dir */ + int numdirpasses; /* strategy for directory search */ + doff_t endsearch; /* offset to end directory search */ + struct vnode *pdp; /* saved dp during symlink work */ + struct vnode *tdp; /* returned by cd9660_vget_internal */ + u_long bmask; /* block offset mask */ + int lockparent; /* 1 => lockparent flag is set */ + int wantparent; /* 1 => wantparent or lockparent flag */ + int wantrsrc; /* 1 => looking for resource fork */ + int error; + ino_t ino = 0; + int reclen; + u_short namelen; + char altname[ISO_RRIP_NAMEMAX]; + int res; + int len; + char *name; + struct vnode **vpp = ap->a_vpp; + struct componentname *cnp = ap->a_cnp; + struct ucred *cred = cnp->cn_cred; + int flags = cnp->cn_flags; + int nameiop = cnp->cn_nameiop; + struct proc *p = cnp->cn_proc; + int devBlockSize=0; + long rsrcsize; + size_t altlen; + + bp = NULL; + *vpp = NULL; + vdp = ap->a_dvp; + dp = VTOI(vdp); + imp = dp->i_mnt; + lockparent = flags & LOCKPARENT; + wantparent = flags & (LOCKPARENT|WANTPARENT); + wantrsrc = 0; + + /* + * Check accessiblity of directory. + */ + if (vdp->v_type != VDIR) + return (ENOTDIR); + if ( (error = VOP_ACCESS(vdp, VEXEC, cred, p)) ) + return (error); + + /* + * Determine if we're looking for a resource fork + * note: this could cause a read off the end of the + * component name buffer in some rare cases. + */ + if ((flags & ISLASTCN) == 0 && + bcmp(&cnp->cn_nameptr[cnp->cn_namelen], + _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC) - 1) == 0) { + flags |= ISLASTCN; + cnp->cn_consume = sizeof(_PATH_RSRCFORKSPEC) - 1; + wantrsrc = 1; + } + /* + * We now have a segment name to search for, and a directory to search. + * + * Before tediously performing a linear scan of the directory, + * check the name cache to see if the directory/name pair + * we are looking for is known already. + * Note: resource forks are never in the name cache + */ + if ((error = cache_lookup(vdp, vpp, cnp)) && !wantrsrc) { + int vpid; /* capability number of vnode */ + + if (error == ENOENT) + return (error); +#ifdef PARANOID + if ((vdp->v_flag & VROOT) && (flags & ISDOTDOT)) + panic("cd9660_lookup: .. through root"); +#endif + /* + * Get the next vnode in the path. + * See comment below starting `Step through' for + * an explaination of the locking protocol. + */ + pdp = vdp; + dp = VTOI(*vpp); + vdp = *vpp; + vpid = vdp->v_id; + if (pdp == vdp) { + VREF(vdp); + error = 0; + } else if (flags & ISDOTDOT) { + VOP_UNLOCK(pdp, 0, p); + error = vget(vdp, LK_EXCLUSIVE | LK_RETRY, p); + if (!error && lockparent && (flags & ISLASTCN)) + error = VOP_LOCK(pdp, LK_EXCLUSIVE | LK_RETRY, p); + } else { + error = vget(vdp, LK_EXCLUSIVE | LK_RETRY, p); + if (!lockparent || error || !(flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + } + /* + * Check that the capability number did not change + * while we were waiting for the lock. + */ + if (!error) { + if (vpid == vdp->v_id) + return (0); + vput(vdp); + if (lockparent && pdp != vdp && (flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + } + if ( (error = VOP_LOCK(pdp, LK_EXCLUSIVE | LK_RETRY, p)) ) + return (error); + vdp = pdp; + dp = VTOI(pdp); + *vpp = NULL; + } + + len = cnp->cn_namelen; + name = cnp->cn_nameptr; + altname[0] = '\0'; + rsrcsize = 0; + + /* + * Decode search name into UCS-2 (Unicode) + */ + if ((imp->iso_ftype == ISO_FTYPE_JOLIET) && + !((len == 1 && *name == '.') || (flags & ISDOTDOT))) { + int flags = 0; + + if (BYTE_ORDER != BIG_ENDIAN) + flags |= UTF_REVERSE_ENDIAN; + + (void) utf8_decodestr(name, len, (u_int16_t*) altname, &altlen, + sizeof(altname), 0, flags); + name = altname; + len = altlen; + } + /* + * If there is cached information on a previous search of + * this directory, pick up where we last left off. + * We cache only lookups as these are the most common + * and have the greatest payoff. Caching CREATE has little + * benefit as it usually must search the entire directory + * to determine that the entry does not exist. Caching the + * location of the last DELETE or RENAME has not reduced + * profiling time and hence has been removed in the interest + * of simplicity. + */ + bmask = imp->im_bmask; + if (nameiop != LOOKUP || dp->i_diroff == 0 || + dp->i_diroff > dp->i_size) { + entryoffsetinblock = 0; + dp->i_offset = 0; + numdirpasses = 1; + } else { + dp->i_offset = dp->i_diroff; + + if ((entryoffsetinblock = dp->i_offset & bmask) && + (error = VOP_BLKATOFF(vdp, (off_t)dp->i_offset, NULL, &bp))) + return (error); + numdirpasses = 2; + iso_nchstats.ncs_2passes++; + } + endsearch = dp->i_size; + +searchloop: + while (dp->i_offset < endsearch) { + /* + * If offset is on a block boundary, + * read the next directory block. + * Release previous if it exists. + */ + if ((dp->i_offset & bmask) == 0) { + if (bp != NULL) + brelse(bp); + if ( (error = VOP_BLKATOFF(vdp, (off_t)dp->i_offset, NULL, &bp)) ) + return (error); + entryoffsetinblock = 0; + } + /* + * Get pointer to next entry. + */ + ep = (struct iso_directory_record *) + ((char *)bp->b_data + entryoffsetinblock); + + reclen = isonum_711(ep->length); + if (reclen == 0) { + /* skip to next block, if any */ + dp->i_offset = + (dp->i_offset & ~bmask) + imp->logical_block_size; + continue; + } + + if (reclen < ISO_DIRECTORY_RECORD_SIZE) + /* illegal entry, stop */ + break; + + if (entryoffsetinblock + reclen > imp->logical_block_size) + /* entries are not allowed to cross boundaries */ + break; + + namelen = isonum_711(ep->name_len); + + if (reclen < ISO_DIRECTORY_RECORD_SIZE + namelen) + /* illegal entry, stop */ + break; + + /* remember the size of resource forks (associated files) */ + if ((isonum_711(ep->flags) & (directoryBit | associatedBit)) == associatedBit) { + if (namelen < sizeof(altname) && ino == 0) { + rsrcsize = isonum_733(ep->size); + bcopy(ep->name, altname, namelen); + altname[namelen] = '\0'; + altlen = namelen; + } + } + /* + * Check for a name match. + */ + if (imp->iso_ftype == ISO_FTYPE_RRIP) { + if ( isonum_711(ep->flags) & directoryBit ) + ino = isodirino(ep, imp); + else + ino = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; + dp->i_ino = ino; + cd9660_rrip_getname(ep,altname,&namelen,&dp->i_ino,imp); + if (namelen == cnp->cn_namelen + && !bcmp(name,altname,namelen)) + goto found; + ino = 0; + } else { + if ((!(isonum_711(ep->flags) & associatedBit)) == !wantrsrc) { + if ((len == 1 + && *name == '.') + || (flags & ISDOTDOT)) { + if (namelen == 1 + && ep->name[0] == ((flags & ISDOTDOT) ? 1 : 0)) { + /* + * Save directory entry's inode number and + * release directory buffer. + */ + dp->i_ino = isodirino(ep, imp); + goto found; + } + if (namelen != 1 + || ep->name[0] != 0) + goto notfound; + } else if (imp->iso_ftype != ISO_FTYPE_JOLIET && !(res = isofncmp(name,len, + ep->name,namelen))) { + if ( isonum_711(ep->flags) & directoryBit ) + ino = isodirino(ep, imp); + else + ino = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; + saveoffset = dp->i_offset; + } else if (imp->iso_ftype == ISO_FTYPE_JOLIET && !(res = ucsfncmp((u_int16_t*)name, len, + (u_int16_t*) ep->name, namelen))) { + if ( isonum_711(ep->flags) & directoryBit ) + ino = isodirino(ep, imp); + else + ino = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; + saveoffset = dp->i_offset; + } else if (ino) + goto foundino; +#ifdef NOSORTBUG /* On some CDs directory entries are not sorted correctly */ + else if (res < 0) + goto notfound; + else if (res > 0 && numdirpasses == 2) + numdirpasses++; +#endif + } + } + dp->i_offset += reclen; + entryoffsetinblock += reclen; + } /* endwhile */ + + if (ino) { +foundino: + dp->i_ino = ino; + if (saveoffset != dp->i_offset) { + if (lblkno(imp, dp->i_offset) != + lblkno(imp, saveoffset)) { + if (bp != NULL) + brelse(bp); + if ( (error = VOP_BLKATOFF(vdp, (off_t)saveoffset, NULL, &bp)) ) + return (error); + } + entryoffsetinblock = saveoffset & bmask; + ep = (struct iso_directory_record *) + ((char *)bp->b_data + entryoffsetinblock); + dp->i_offset = saveoffset; + } + goto found; + } +notfound: + /* + * If we started in the middle of the directory and failed + * to find our target, we must check the beginning as well. + */ + if (numdirpasses == 2) { + numdirpasses--; + dp->i_offset = 0; + endsearch = dp->i_diroff; + goto searchloop; + } + if (bp != NULL) + brelse(bp); + + /* + * Insert name into cache (as non-existent) if appropriate. + */ + if ((cnp->cn_flags & MAKEENTRY) && !wantrsrc) + cache_enter(vdp, *vpp, cnp); + if (nameiop == CREATE || nameiop == RENAME) { + /* + * return EROFS (NOT EJUSTRETURN). The caller will then unlock + * the parent for us. + */ + return (EROFS); + } + + if (wantrsrc) + return (ENOTDIR); + else + return (ENOENT); + +found: + if (numdirpasses == 2) + iso_nchstats.ncs_pass2++; + + /* + * Found component in pathname. + * If the final component of path name, save information + * in the cache as to where the entry was found. + */ + if ((flags & ISLASTCN) && nameiop == LOOKUP) + dp->i_diroff = dp->i_offset; + + /* + * Step through the translation in the name. We do not `iput' the + * directory because we may need it again if a symbolic link + * is relative to the current directory. Instead we save it + * unlocked as "pdp". We must get the target inode before unlocking + * the directory to insure that the inode will not be removed + * before we get it. We prevent deadlock by always fetching + * inodes from the root, moving down the directory tree. Thus + * when following backward pointers ".." we must unlock the + * parent directory before getting the requested directory. + * There is a potential race condition here if both the current + * and parent directories are removed before the `iget' for the + * inode associated with ".." returns. We hope that this occurs + * infrequently since we cannot avoid this race condition without + * implementing a sophisticated deadlock detection algorithm. + * Note also that this simple deadlock detection scheme will not + * work if the file system has any hard links other than ".." + * that point backwards in the directory structure. + */ + pdp = vdp; + /* + * If ino is different from dp->i_ino, + * it's a relocated directory. + */ + if (flags & ISDOTDOT) { + VOP_UNLOCK(pdp, 0, p); /* race to get the inode */ + error = cd9660_vget_internal(vdp->v_mount, dp->i_ino, &tdp, + dp->i_ino != ino, ep, p); + VTOI(tdp)->i_parent = VTOI(pdp)->i_number; + brelse(bp); + if (error) { + VOP_LOCK(pdp, LK_EXCLUSIVE | LK_RETRY, p); + return (error); + } + if (lockparent && (flags & ISLASTCN) && + (error = VOP_LOCK(pdp, LK_EXCLUSIVE | LK_RETRY, p))) { + vput(tdp); + return (error); + } + *vpp = tdp; + } else if (dp->i_number == dp->i_ino) { + brelse(bp); + VREF(vdp); /* we want ourself, ie "." */ + *vpp = vdp; + } else { + error = cd9660_vget_internal(vdp->v_mount, dp->i_ino, &tdp, + dp->i_ino != ino, ep, p); + /* save parent inode number */ + VTOI(tdp)->i_parent = VTOI(pdp)->i_number; + if (!wantrsrc && (tdp->v_type == VREG) && (rsrcsize > 0)) { + if (bcmp(ep->name, altname, altlen) == 0) + VTOI(tdp)->i_rsrcsize = rsrcsize; + } + brelse(bp); + if (error) + return (error); + if (!lockparent || !(flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + *vpp = tdp; + } + + /* + * Insert name into cache if appropriate. + */ + if ((cnp->cn_flags & MAKEENTRY) && !wantrsrc) + cache_enter(vdp, *vpp, cnp); + + return (0); +} + + +/* + * Return buffer with the contents of block "offset" from the beginning of + * directory "ip". If "res" is non-zero, fill it in with a pointer to the + * remaining space in the directory. + */ +int +cd9660_blkatoff(ap) + struct vop_blkatoff_args /* { + struct vnode *a_vp; + off_t a_offset; + char **a_res; + struct buf **a_bpp; + } */ *ap; +{ + struct iso_node *ip; + register struct iso_mnt *imp; + struct buf *bp; + daddr_t lbn; + int bsize, error; + + ip = VTOI(ap->a_vp); + imp = ip->i_mnt; + lbn = lblkno(imp, ap->a_offset); + bsize = blksize(imp, ip, lbn); + + if ( (error = bread(ap->a_vp, lbn, bsize, NOCRED, &bp)) ) { + brelse(bp); + *ap->a_bpp = NULL; + return (error); + } + if (ap->a_res) + *ap->a_res = (char *)bp->b_data + blkoff(imp, ap->a_offset); + *ap->a_bpp = bp; + + return (0); +} diff --git a/bsd/isofs/cd9660/cd9660_mount.h b/bsd/isofs/cd9660/cd9660_mount.h new file mode 100644 index 000000000..9fbf38172 --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_mount.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_mount.h 8.1 (Berkeley) 5/24/95 + */ + +/* + * Arguments to mount ISO 9660 filesystems. + */ +struct iso_args { + char *fspec; /* block special device to mount */ + struct export_args export; /* network export info */ + int flags; /* mounting flags, see below */ + int ssector; /* starting sector, 0 for 1st session */ +}; +#define ISOFSMNT_NORRIP 0x00000001 /* disable Rock Ridge Ext.*/ +#define ISOFSMNT_GENS 0x00000002 /* enable generation numbers */ +#define ISOFSMNT_EXTATT 0x00000004 /* enable extended attributes */ +#define ISOFSMNT_NOJOLIET 0x00000008 /* disable Joliet Ext.*/ diff --git a/bsd/isofs/cd9660/cd9660_node.c b/bsd/isofs/cd9660/cd9660_node.c new file mode 100644 index 000000000..c9400cd78 --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_node.c @@ -0,0 +1,522 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_node.c,v 1.13 1994/12/24 15:30:07 cgd Exp $ */ + +/*- + * Copyright (c) 1982, 1986, 1989, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_node.c 8.5 (Berkeley) 12/5/94 + + + + * HISTORY + * 22-Jan-98 radar 1669467 - ISO 9660 CD support - jwc + * 17-Feb-98 radar 1669467 - changed lock protocols to use the lock manager - chw + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Structures associated with iso_node caching. + */ +struct iso_node **isohashtbl; +u_long isohash; +#define INOHASH(device, inum) (((device) + ((inum)>>12)) & isohash) + +#ifdef ISODEVMAP +struct iso_node **idvhashtbl; +u_long idvhash; +#define DNOHASH(device, inum) (((device) + ((inum)>>12)) & idvhash) +#endif + +/* defined in bsd/ufs/ufs/ufs_inode.c */ +extern int prtactive; /* 1 => print out reclaim of active vnodes */ + +extern void cache_purge (struct vnode *vp); + +extern u_char isonullname[]; +/* + * Initialize hash links for inodes and dnodes. + */ +int +cd9660_init() +{ + + isohashtbl = hashinit(desiredvnodes, M_ISOFSMNT, &isohash); +#ifdef ISODEVMAP + idvhashtbl = hashinit(desiredvnodes / 8, M_ISOFSMNT, &idvhash); +#endif + return 0; +} + +#ifdef ISODEVMAP +/* + * Enter a new node into the device hash list + */ +struct iso_dnode * +iso_dmap(device, inum, create) + dev_t device; + ino_t inum; + int create; +{ + register struct iso_dnode **dpp, *dp, *dq; + + dpp = &idvhashtbl[DNOHASH(device, inum)]; + for (dp = *dpp;; dp = dp->d_next) { + if (dp == NULL) + return (NULL); + if (inum == dp->i_number && device == dp->i_dev) + return (dp); + + if (!create) + return (NULL); + + MALLOC(dp, struct iso_dnode *, sizeof(struct iso_dnode), M_CACHE, + M_WAITOK); + dp->i_dev = dev; + dp->i_number = ino; + + if (dq = *dpp) + dq->d_prev = dp->d_next; + dp->d_next = dq; + dp->d_prev = dpp; + *dpp = dp; + + return (dp); +} + +void +iso_dunmap(device) + dev_t device; +{ + struct iso_dnode **dpp, *dp, *dq; + + for (dpp = idvhashtbl; dpp <= idvhashtbl + idvhash; dpp++) { + for (dp = *dpp; dp != NULL; dp = dq) + dq = dp->d_next; + if (device == dp->i_dev) { + if (dq) + dq->d_prev = dp->d_prev; + *dp->d_prev = dq; + FREE(dp, M_CACHE); + } + } + } +} +#endif + +/* + * Use the device/inum pair to find the incore inode, and return a pointer + * to it. If it is in core, but locked, wait for it. + */ +struct vnode * +cd9660_ihashget(device, inum, p) + dev_t device; + ino_t inum; + struct proc *p; +{ + register struct iso_node *ip; + struct vnode *vp; + + for (;;) + for (ip = isohashtbl[INOHASH(device, inum)];; ip = ip->i_next) { + if (ip == NULL) + return (NULL); + if (inum == ip->i_number && device == ip->i_dev) { + /* + * This is my most dangerous change. I am not waiting for + * the inode lock anymore (ufs doesn't, why should we) and + * I'm worried because there is not lock on the hashtable, + * but there wasn't before so I'll let it go for now. + * -- chw -- + */ + vp = ITOV(ip); + simple_lock(&vp->v_interlock); + if (!vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY, p)) + return (vp); + break; + } + } + /* NOTREACHED */ +} + +/* + * Insert the inode into the hash table, and return it locked. + */ +void +cd9660_ihashins(ip) + struct iso_node *ip; +{ + struct iso_node **ipp, *iq; + struct proc *p = current_proc(); + + /* lock the inode, then put it on the appropriate hash list */ + lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p); + + ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)]; + if ((iq = *ipp)) + iq->i_prev = &ip->i_next; + ip->i_next = iq; + ip->i_prev = ipp; + *ipp = ip; + } + +/* + * Remove the inode from the hash table. + */ +void +cd9660_ihashrem(ip) + register struct iso_node *ip; +{ + register struct iso_node *iq; + + if ((iq = ip->i_next)) + iq->i_prev = ip->i_prev; + *ip->i_prev = iq; +#if 1 /* was ifdef DIAGNOSTIC */ + ip->i_next = NULL; + ip->i_prev = NULL; +#endif +} + +/* + * Last reference to an inode, write the inode out and if necessary, + * truncate and deallocate the file. + */ +int +cd9660_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct proc *p = ap->a_p; + register struct iso_node *ip = VTOI(vp); + int error = 0; + + if (prtactive && vp->v_usecount != 0) + vprint("cd9660_inactive: pushing active", vp); + /* + * We need to unlock the inode here. If we don't panics or + * hangs will ensue. Our callers expect us to take care of this. + */ + + VOP_UNLOCK(vp,0,p); + + /* + * If we are done with the inode, reclaim it + * so that it can be reused immediately. + */ + if (vp->v_usecount == 0 && ip->inode.iso_mode == 0) + vgone(vp); + + return error; +} + +/* + * Reclaim an inode so that it can be used for other purposes. + */ +int +cd9660_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct iso_node *ip = VTOI(vp); + + if (prtactive && vp->v_usecount != 0) + vprint("cd9660_reclaim: pushing active", vp); + /* + * Remove the inode from its hash chain. + */ + cd9660_ihashrem(ip); + /* + * Purge old data structures associated with the inode. + */ + cache_purge(vp); + if (ip->i_devvp) { + vrele(ip->i_devvp); + ip->i_devvp = 0; + } + if (ip->i_namep != isonullname) + FREE(ip->i_namep, M_TEMP); + FREE_ZONE(vp->v_data, sizeof(struct iso_node), M_ISOFSNODE); + vp->v_data = NULL; + return (0); +} + +/* + * File attributes + */ +void +cd9660_defattr(isodir, inop, bp) + struct iso_directory_record *isodir; + struct iso_node *inop; + struct buf *bp; +{ + struct buf *bp2 = NULL; + struct iso_mnt *imp; + struct iso_extended_attributes *ap = NULL; + int off; + + if ( isonum_711(isodir->flags) & directoryBit ) { + inop->inode.iso_mode = S_IFDIR; + /* + * If we return 2, fts() will assume there are no subdirectories + * (just links for the path and .), so instead we return 1. + */ + inop->inode.iso_links = 1; + } else { + inop->inode.iso_mode = S_IFREG; + inop->inode.iso_links = 1; + } + if (!bp + && ((imp = inop->i_mnt)->im_flags & ISOFSMNT_EXTATT) + && (off = isonum_711(isodir->ext_attr_length))) { + VOP_BLKATOFF(ITOV(inop), (off_t)-(off << imp->im_bshift), NULL, + &bp2); + bp = bp2; + } + if (bp) { + ap = (struct iso_extended_attributes *)bp->b_data; + + if (isonum_711(ap->version) == 1) { + if (!(ap->perm[0]&0x40)) + inop->inode.iso_mode |= VEXEC >> 6; + if (!(ap->perm[0]&0x10)) + inop->inode.iso_mode |= VREAD >> 6; + if (!(ap->perm[0]&4)) + inop->inode.iso_mode |= VEXEC >> 3; + if (!(ap->perm[0]&1)) + inop->inode.iso_mode |= VREAD >> 3; + if (!(ap->perm[1]&0x40)) + inop->inode.iso_mode |= VEXEC; + if (!(ap->perm[1]&0x10)) + inop->inode.iso_mode |= VREAD; + inop->inode.iso_uid = isonum_723(ap->owner); /* what about 0? */ + inop->inode.iso_gid = isonum_723(ap->group); /* what about 0? */ + } else + ap = NULL; + } + if (!ap) { + inop->inode.iso_mode |= VREAD|VEXEC|(VREAD|VEXEC)>>3|(VREAD|VEXEC)>>6; + inop->inode.iso_uid = (uid_t)0; + inop->inode.iso_gid = (gid_t)0; + } + if (bp2) + brelse(bp2); +} + +/* + * Time stamps + */ +void +cd9660_deftstamp(isodir,inop,bp) + struct iso_directory_record *isodir; + struct iso_node *inop; + struct buf *bp; +{ + struct buf *bp2 = NULL; + struct iso_mnt *imp; + struct iso_extended_attributes *ap = NULL; + int off; + + if (!bp + && ((imp = inop->i_mnt)->im_flags & ISOFSMNT_EXTATT) + && (off = isonum_711(isodir->ext_attr_length))) + { + VOP_BLKATOFF(ITOV(inop), (off_t)-(off << imp->im_bshift), NULL, &bp2); + bp = bp2; + } + if (bp) { + ap = (struct iso_extended_attributes *)bp->b_data; + + if (isonum_711(ap->version) == 1) { + if (!cd9660_tstamp_conv17(ap->ftime,&inop->inode.iso_atime)) + cd9660_tstamp_conv17(ap->ctime,&inop->inode.iso_atime); + if (!cd9660_tstamp_conv17(ap->ctime,&inop->inode.iso_ctime)) + inop->inode.iso_ctime = inop->inode.iso_atime; + if (!cd9660_tstamp_conv17(ap->mtime,&inop->inode.iso_mtime)) + inop->inode.iso_mtime = inop->inode.iso_ctime; + } else + ap = NULL; + } + if (!ap) { + cd9660_tstamp_conv7(isodir->date,&inop->inode.iso_ctime); + inop->inode.iso_atime = inop->inode.iso_ctime; + inop->inode.iso_mtime = inop->inode.iso_ctime; + } + if (bp2) + brelse(bp2); +} + +int +cd9660_tstamp_conv7(pi,pu) + u_char *pi; + struct timespec *pu; +{ + int crtime, days; + int y, m, d, hour, minute, second, tz; + + y = pi[0] + 1900; + m = pi[1]; + d = pi[2]; + hour = pi[3]; + minute = pi[4]; + second = pi[5]; + tz = pi[6]; + + if (y < 1970) { + pu->tv_sec = 0; + pu->tv_nsec = 0; + return 0; + } else { +#ifdef ORIGINAL + /* computes day number relative to Sept. 19th,1989 */ + /* don't even *THINK* about changing formula. It works! */ + days = 367*(y-1980)-7*(y+(m+9)/12)/4-3*((y+(m-9)/7)/100+1)/4+275*m/9+d-100; +#else + /* + * Changed :-) to make it relative to Jan. 1st, 1970 + * and to disambiguate negative division + */ + days = 367*(y-1960)-7*(y+(m+9)/12)/4-3*((y+(m+9)/12-1)/100+1)/4+275*m/9+d-239; +#endif + crtime = ((((days * 24) + hour) * 60 + minute) * 60) + second; + + /* timezone offset is unreliable on some disks */ + if (-48 <= tz && tz <= 52) + crtime -= tz * 15 * 60; + } + pu->tv_sec = crtime; + pu->tv_nsec = 0; + return 1; +} + +static u_int +cd9660_chars2ui(begin,len) + u_char *begin; + int len; +{ + u_int rc; + + for (rc = 0; --len >= 0;) { + rc *= 10; + rc += *begin++ - '0'; + } + return rc; +} + +int +cd9660_tstamp_conv17(pi,pu) + u_char *pi; + struct timespec *pu; +{ + u_char buf[7]; + + /* year:"0001"-"9999" -> -1900 */ + buf[0] = cd9660_chars2ui(pi,4) - 1900; + + /* month: " 1"-"12" -> 1 - 12 */ + buf[1] = cd9660_chars2ui(pi + 4,2); + + /* day: " 1"-"31" -> 1 - 31 */ + buf[2] = cd9660_chars2ui(pi + 6,2); + + /* hour: " 0"-"23" -> 0 - 23 */ + buf[3] = cd9660_chars2ui(pi + 8,2); + + /* minute:" 0"-"59" -> 0 - 59 */ + buf[4] = cd9660_chars2ui(pi + 10,2); + + /* second:" 0"-"59" -> 0 - 59 */ + buf[5] = cd9660_chars2ui(pi + 12,2); + + /* difference of GMT */ + buf[6] = pi[16]; + + return cd9660_tstamp_conv7(buf,pu); +} + +ino_t +isodirino(isodir, imp) + struct iso_directory_record *isodir; + struct iso_mnt *imp; +{ + ino_t ino; + + ino = (isonum_733(isodir->extent) + isonum_711(isodir->ext_attr_length)) + << imp->im_bshift; + return (ino); +} diff --git a/bsd/isofs/cd9660/cd9660_node.h b/bsd/isofs/cd9660/cd9660_node.h new file mode 100644 index 000000000..79c64af76 --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_node.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_node.h,v 1.10 1994/12/24 15:30:09 cgd Exp $ */ + +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_node.h 8.4 (Berkeley) 12/5/94 + */ +#ifndef _CD9660_NODE_H_ +#define _CD9660_NODE_H_ + +/* + * Theoretically, directories can be more than 2Gb in length, + * however, in practice this seems unlikely. So, we define + * the type doff_t as a long to keep down the cost of doing + * lookup on a 32-bit machine. If you are porting to a 64-bit + * architecture, you should make doff_t the same as off_t. + */ + +#include +#include + +#ifndef doff_t +#define doff_t long +#endif + +typedef struct { + struct timespec iso_atime; /* time of last access */ + struct timespec iso_mtime; /* time of last modification */ + struct timespec iso_ctime; /* time file changed */ + uid_t iso_uid; /* owner user id */ + gid_t iso_gid; /* owner group id */ + u_short iso_mode; /* files access mode and type */ + short iso_links; /* links of file */ + dev_t iso_rdev; /* Major/Minor number for special */ +} ISO_RRIP_INODE; + +#ifdef ISODEVMAP +/* + * FOr device# (major,minor) translation table + */ +struct iso_dnode { + struct iso_dnode *d_next, **d_prev; /* hash chain */ + dev_t i_dev; /* device where dnode resides */ + ino_t i_number; /* the identity of the inode */ + dev_t d_dev; /* device # for translation */ +}; +#endif + +/* defines i_size as a macro */ +#undef i_size + +struct iso_node { + struct iso_node *i_next, **i_prev; /* hash chain */ + struct vnode *i_vnode; /* vnode associated with this inode */ + struct vnode *i_devvp; /* vnode for block I/O */ + dev_t i_dev; /* device where inode resides */ + ino_t i_number; /* the identity of the inode */ + /* we use the actual starting block of the file */ + struct iso_mnt *i_mnt; /* filesystem associated with this inode */ + struct lockf *i_lockf; /* head of byte-level lock list */ + doff_t i_endoff; /* end of useful stuff in directory */ + doff_t i_diroff; /* offset in dir, where we found last entry */ + doff_t i_offset; /* offset of free space in directory */ + ino_t i_ino; /* inode number of found directory */ + struct lock__bsd__ i_lock; /* Inode lock. */ + long iso_extent; /* extent of file */ + long i_size; + long iso_start; /* actual start of data of file (may be different */ + /* from iso_extent, if file has extended attributes) */ + ISO_RRIP_INODE inode; + + ino_t i_parent; /* inode number of parent directory */ + u_char *i_namep; /* node name buffer */ + + /* support Apple extensions to ISO directory rec */ + long i_rsrcsize; /* cached size of associated file */ + u_int32_t i_FileType; /* MacOS file type */ + u_int32_t i_Creator; /* MacOS file creator */ + u_int16_t i_FinderFlags; /* MacOS finder flags */ + + u_int16_t i_entries; /* count of directory entries */ +}; + +#define i_forw i_chain[0] +#define i_back i_chain[1] + + +/* defines VTOI and ITOV macros */ +#undef VTOI +#undef ITOV + +#define VTOI(vp) ((struct iso_node *)(vp)->v_data) +#define ITOV(ip) ((ip)->i_vnode) + +/* + * Prototypes for ISOFS vnode operations + */ +int cd9660_lookup __P((struct vop_lookup_args *)); +int cd9660_open __P((struct vop_open_args *)); +int cd9660_close __P((struct vop_close_args *)); +int cd9660_access __P((struct vop_access_args *)); +int cd9660_getattr __P((struct vop_getattr_args *)); +int cd9660_read __P((struct vop_read_args *)); +int cd9660_ioctl __P((struct vop_ioctl_args *)); +int cd9660_select __P((struct vop_select_args *)); +int cd9660_mmap __P((struct vop_mmap_args *)); +int cd9660_seek __P((struct vop_seek_args *)); +int cd9660_readdir __P((struct vop_readdir_args *)); +int cd9660_readlink __P((struct vop_readlink_args *)); +int cd9660_abortop __P((struct vop_abortop_args *)); +int cd9660_inactive __P((struct vop_inactive_args *)); +int cd9660_reclaim __P((struct vop_reclaim_args *)); +int cd9660_bmap __P((struct vop_bmap_args *)); +int cd9660_lock __P((struct vop_lock_args *)); +int cd9660_unlock __P((struct vop_unlock_args *)); +int cd9660_strategy __P((struct vop_strategy_args *)); +int cd9660_print __P((struct vop_print_args *)); +int cd9660_islocked __P((struct vop_islocked_args *)); +int cd9660_pathconf __P((struct vop_pathconf_args *)); +int cd9660_blkatoff __P((struct vop_blkatoff_args *)); + +void cd9660_defattr __P((struct iso_directory_record *, + struct iso_node *, struct buf *)); +void cd9660_deftstamp __P((struct iso_directory_record *, + struct iso_node *, struct buf *)); +struct vnode *cd9660_ihashget __P((dev_t, ino_t, struct proc *)); +void cd9660_ihashins __P((struct iso_node *)); +void cd9660_ihashrem __P((struct iso_node *)); +int cd9660_tstamp_conv7 __P((u_char *, struct timespec *)); +int cd9660_tstamp_conv17 __P((u_char *, struct timespec *)); +ino_t isodirino __P((struct iso_directory_record *, struct iso_mnt *)); +#ifdef ISODEVMAP +struct iso_dnode *iso_dmap __P((dev_t, ino_t, int)); +void iso_dunmap __P((dev_t)); +#endif + +#endif /* ! _CD9660_NODE_H_ */ diff --git a/bsd/isofs/cd9660/cd9660_rrip.c b/bsd/isofs/cd9660/cd9660_rrip.c new file mode 100644 index 000000000..f73a873cd --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_rrip.c @@ -0,0 +1,720 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_rrip.c,v 1.11 1994/12/24 15:30:10 cgd Exp $ */ + +/*- + * Copyright (c) 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_rrip.c 8.6 (Berkeley) 12/5/94 + + + + * HISTORY + * 22-Jan-98 radar 1669467 - ISO 9660 CD support - jwc + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +/* + * POSIX file attribute + */ +static int +cd9660_rrip_attr(p,ana) + ISO_RRIP_ATTR *p; + ISO_RRIP_ANALYZE *ana; +{ + ana->inop->inode.iso_mode = isonum_733(p->mode); + ana->inop->inode.iso_uid = isonum_733(p->uid); + ana->inop->inode.iso_gid = isonum_733(p->gid); + ana->inop->inode.iso_links = isonum_733(p->links); + ana->fields &= ~ISO_SUSP_ATTR; + return ISO_SUSP_ATTR; +} + +static void +cd9660_rrip_defattr(isodir,ana) + struct iso_directory_record *isodir; + ISO_RRIP_ANALYZE *ana; +{ + /* But this is a required field! */ + printf("RRIP without PX field?\n"); + cd9660_defattr(isodir,ana->inop,NULL); +} + +/* + * Symbolic Links + */ +static int +cd9660_rrip_slink(p,ana) + ISO_RRIP_SLINK *p; + ISO_RRIP_ANALYZE *ana; +{ + register ISO_RRIP_SLINK_COMPONENT *pcomp; + register ISO_RRIP_SLINK_COMPONENT *pcompe; + int len, wlen, cont; + char *outbuf, *inbuf; + + pcomp = (ISO_RRIP_SLINK_COMPONENT *)p->component; + pcompe = (ISO_RRIP_SLINK_COMPONENT *)((char *)p + isonum_711(p->h.length)); + len = *ana->outlen; + outbuf = ana->outbuf; + cont = ana->cont; + + /* + * Gathering a Symbolic name from each component with path + */ + for (; + pcomp < pcompe; + pcomp = (ISO_RRIP_SLINK_COMPONENT *)((char *)pcomp + ISO_RRIP_SLSIZ + + isonum_711(pcomp->clen))) { + + if (!cont) { + if (len < ana->maxlen) { + len++; + *outbuf++ = '/'; + } + } + cont = 0; + + inbuf = ".."; + wlen = 0; + + switch (*pcomp->cflag) { + + case ISO_SUSP_CFLAG_CURRENT: + /* Inserting Current */ + wlen = 1; + break; + + case ISO_SUSP_CFLAG_PARENT: + /* Inserting Parent */ + wlen = 2; + break; + + case ISO_SUSP_CFLAG_ROOT: + /* Inserting slash for ROOT */ + /* start over from beginning(?) */ + outbuf -= len; + len = 0; + break; + + case ISO_SUSP_CFLAG_VOLROOT: + /* Inserting a mount point i.e. "/cdrom" */ + /* same as above */ + outbuf -= len; + len = 0; + inbuf = ana->imp->im_mountp->mnt_stat.f_mntonname; + wlen = strlen(inbuf); + break; + + case ISO_SUSP_CFLAG_HOST: + /* Inserting hostname i.e. "kurt.tools.de" */ + inbuf = hostname; + wlen = hostnamelen; + break; + + case ISO_SUSP_CFLAG_CONTINUE: + cont = 1; + /* fall thru */ + case 0: + /* Inserting component */ + wlen = isonum_711(pcomp->clen); + inbuf = pcomp->name; + break; + default: + printf("RRIP with incorrect flags?"); + wlen = ana->maxlen + 1; + break; + } + + if (len + wlen > ana->maxlen) { + /* indicate error to caller */ + ana->cont = 1; + ana->fields = 0; + ana->outbuf -= *ana->outlen; + *ana->outlen = 0; + return 0; + } + + bcopy(inbuf,outbuf,wlen); + outbuf += wlen; + len += wlen; + + } + ana->outbuf = outbuf; + *ana->outlen = len; + ana->cont = cont; + + if (!isonum_711(p->flags)) { + ana->fields &= ~ISO_SUSP_SLINK; + return ISO_SUSP_SLINK; + } + return 0; +} + +/* + * Alternate name + */ +static int +cd9660_rrip_altname(p,ana) + ISO_RRIP_ALTNAME *p; + ISO_RRIP_ANALYZE *ana; +{ + char *inbuf; + int wlen; + int cont; + + inbuf = ".."; + wlen = 0; + cont = 0; + + switch (*p->flags) { + case ISO_SUSP_CFLAG_CURRENT: + /* Inserting Current */ + wlen = 1; + break; + + case ISO_SUSP_CFLAG_PARENT: + /* Inserting Parent */ + wlen = 2; + break; + + case ISO_SUSP_CFLAG_HOST: + /* Inserting hostname i.e. "kurt.tools.de" */ + inbuf = hostname; + wlen = hostnamelen; + break; + + case ISO_SUSP_CFLAG_CONTINUE: + cont = 1; + /* fall thru */ + case 0: + /* Inserting component */ + wlen = isonum_711(p->h.length) - 5; + inbuf = (char *)p + 5; + break; + + default: + printf("RRIP with incorrect NM flags?\n"); + wlen = ana->maxlen + 1; + break; + } + + if ((*ana->outlen += wlen) > ana->maxlen) { + /* treat as no name field */ + ana->fields &= ~ISO_SUSP_ALTNAME; + ana->outbuf -= *ana->outlen - wlen; + *ana->outlen = 0; + return 0; + } + + bcopy(inbuf,ana->outbuf,wlen); + ana->outbuf += wlen; + + if (!cont) { + ana->fields &= ~ISO_SUSP_ALTNAME; + return ISO_SUSP_ALTNAME; + } + return 0; +} + +static void +cd9660_rrip_defname(isodir,ana) + struct iso_directory_record *isodir; + ISO_RRIP_ANALYZE *ana; +{ + strcpy(ana->outbuf,".."); + switch (*isodir->name) { + default: + isofntrans(isodir->name, isonum_711(isodir->name_len), + ana->outbuf, ana->outlen, 1); + break; + case 0: + *ana->outlen = 1; + break; + case 1: + *ana->outlen = 2; + break; + } +} + +/* + * Parent or Child Link + */ +static int +cd9660_rrip_pclink(p,ana) + ISO_RRIP_CLINK *p; + ISO_RRIP_ANALYZE *ana; +{ + *ana->inump = isonum_733(p->dir_loc) << ana->imp->im_bshift; + ana->fields &= ~(ISO_SUSP_CLINK|ISO_SUSP_PLINK); + return *p->h.type == 'C' ? ISO_SUSP_CLINK : ISO_SUSP_PLINK; +} + +/* + * Relocated directory + */ +static int +cd9660_rrip_reldir(p,ana) + ISO_RRIP_RELDIR *p; + ISO_RRIP_ANALYZE *ana; +{ + /* special hack to make caller aware of RE field */ + *ana->outlen = 0; + ana->fields = 0; + return ISO_SUSP_RELDIR|ISO_SUSP_ALTNAME|ISO_SUSP_CLINK|ISO_SUSP_PLINK; +} + +static int +cd9660_rrip_tstamp(p,ana) + ISO_RRIP_TSTAMP *p; + ISO_RRIP_ANALYZE *ana; +{ + u_char *ptime; + + ptime = p->time; + + /* Check a format of time stamp (7bytes/17bytes) */ + if (!(*p->flags&ISO_SUSP_TSTAMP_FORM17)) { + if (*p->flags&ISO_SUSP_TSTAMP_CREAT) + ptime += 7; + + if (*p->flags&ISO_SUSP_TSTAMP_MODIFY) { + cd9660_tstamp_conv7(ptime,&ana->inop->inode.iso_mtime); + ptime += 7; + } else + bzero(&ana->inop->inode.iso_mtime,sizeof(struct timespec)); + + if (*p->flags&ISO_SUSP_TSTAMP_ACCESS) { + cd9660_tstamp_conv7(ptime,&ana->inop->inode.iso_atime); + ptime += 7; + } else + ana->inop->inode.iso_atime = ana->inop->inode.iso_mtime; + + if (*p->flags&ISO_SUSP_TSTAMP_ATTR) + cd9660_tstamp_conv7(ptime,&ana->inop->inode.iso_ctime); + else + ana->inop->inode.iso_ctime = ana->inop->inode.iso_mtime; + + } else { + if (*p->flags&ISO_SUSP_TSTAMP_CREAT) + ptime += 17; + + if (*p->flags&ISO_SUSP_TSTAMP_MODIFY) { + cd9660_tstamp_conv17(ptime,&ana->inop->inode.iso_mtime); + ptime += 17; + } else + bzero(&ana->inop->inode.iso_mtime,sizeof(struct timespec)); + + if (*p->flags&ISO_SUSP_TSTAMP_ACCESS) { + cd9660_tstamp_conv17(ptime,&ana->inop->inode.iso_atime); + ptime += 17; + } else + ana->inop->inode.iso_atime = ana->inop->inode.iso_mtime; + + if (*p->flags&ISO_SUSP_TSTAMP_ATTR) + cd9660_tstamp_conv17(ptime,&ana->inop->inode.iso_ctime); + else + ana->inop->inode.iso_ctime = ana->inop->inode.iso_mtime; + + } + ana->fields &= ~ISO_SUSP_TSTAMP; + return ISO_SUSP_TSTAMP; +} + +static void +cd9660_rrip_deftstamp(isodir,ana) + struct iso_directory_record *isodir; + ISO_RRIP_ANALYZE *ana; +{ + cd9660_deftstamp(isodir,ana->inop,NULL); +} + +/* + * POSIX device modes + */ +static int +cd9660_rrip_device(p,ana) + ISO_RRIP_DEVICE *p; + ISO_RRIP_ANALYZE *ana; +{ + u_int high, low; + + high = isonum_733(p->dev_t_high); + low = isonum_733(p->dev_t_low); + + if (high == 0) + ana->inop->inode.iso_rdev = makedev(major(low), minor(low)); + else + ana->inop->inode.iso_rdev = makedev(high, minor(low)); + ana->fields &= ~ISO_SUSP_DEVICE; + return ISO_SUSP_DEVICE; +} + +/* + * Flag indicating + */ +static int +cd9660_rrip_idflag(p,ana) + ISO_RRIP_IDFLAG *p; + ISO_RRIP_ANALYZE *ana; +{ + ana->fields &= isonum_711(p->flags)|~0xff; /* don't touch high bits */ + /* special handling of RE field */ + if (ana->fields&ISO_SUSP_RELDIR) + return cd9660_rrip_reldir(p,ana); + + return ISO_SUSP_IDFLAG; +} + +/* + * Continuation pointer + */ +static int +cd9660_rrip_cont(p,ana) + ISO_RRIP_CONT *p; + ISO_RRIP_ANALYZE *ana; +{ + ana->iso_ce_blk = isonum_733(p->location); + ana->iso_ce_off = isonum_733(p->offset); + ana->iso_ce_len = isonum_733(p->length); + return ISO_SUSP_CONT; +} + +/* + * System Use end + */ +static int +cd9660_rrip_stop(p,ana) + ISO_SUSP_HEADER *p; + ISO_RRIP_ANALYZE *ana; +{ + return ISO_SUSP_STOP; +} + +/* + * Extension reference + */ +static int +cd9660_rrip_extref(p,ana) + ISO_RRIP_EXTREF *p; + ISO_RRIP_ANALYZE *ana; +{ + if (isonum_711(p->len_id) != 10 + || bcmp((char *)p + 8,"RRIP_1991A",10) + || isonum_711(p->version) != 1) + return 0; + ana->fields &= ~ISO_SUSP_EXTREF; + return ISO_SUSP_EXTREF; +} + +typedef struct { + char type[2]; + int (*func)(); + void (*func2)(); + int result; +} RRIP_TABLE; + +static int +cd9660_rrip_loop(isodir,ana,table) + struct iso_directory_record *isodir; + ISO_RRIP_ANALYZE *ana; + RRIP_TABLE *table; +{ + register RRIP_TABLE *ptable; + register ISO_SUSP_HEADER *phead; + register ISO_SUSP_HEADER *pend; + struct buf *bp = NULL; + char *pwhead; + int result; + + /* + * Note: If name length is odd, + * it will be padding 1 byte after the name + */ + pwhead = isodir->name + isonum_711(isodir->name_len); + if (!(isonum_711(isodir->name_len)&1)) + pwhead++; + + /* If it's not the '.' entry of the root dir obey SP field */ + if (*isodir->name != 0 + || isonum_733(isodir->extent) != ana->imp->root_extent) + pwhead += ana->imp->rr_skip; + else + pwhead += ana->imp->rr_skip0; + + phead = (ISO_SUSP_HEADER *)pwhead; + pend = (ISO_SUSP_HEADER *)((char *)isodir + isonum_711(isodir->length)); + + result = 0; + while (1) { + ana->iso_ce_len = 0; + /* + * Note: "pend" should be more than one SUSP header + */ + while (pend >= phead + 1) { + if (isonum_711(phead->version) == 1) { + for (ptable = table; ptable->func; ptable++) { + if (*phead->type == *ptable->type + && phead->type[1] == ptable->type[1]) { + result |= ptable->func(phead,ana); + break; + } + } + if (!ana->fields) + break; + } + if (result&ISO_SUSP_STOP) { + result &= ~ISO_SUSP_STOP; + break; + } + /* plausibility check */ + if (isonum_711(phead->length) < sizeof(*phead)) + break; + /* + * move to next SUSP + * Hopefully this works with newer versions, too + */ + phead = (ISO_SUSP_HEADER *)((char *)phead + isonum_711(phead->length)); + } + + if (ana->fields && ana->iso_ce_len) { + if (ana->iso_ce_blk >= ana->imp->volume_space_size + || ana->iso_ce_off + ana->iso_ce_len > ana->imp->logical_block_size + || bread(ana->imp->im_devvp, +#if 1 // radar 1669467 - logical and physical blocksize are the same + ana->iso_ce_blk, +#else + ana->iso_ce_blk << (ana->imp->im_bshift - DEV_BSHIFT), +#endif // radar 1669467 + ana->imp->logical_block_size, NOCRED, &bp)) + /* what to do now? */ + break; + phead = (ISO_SUSP_HEADER *)(bp->b_data + ana->iso_ce_off); + pend = (ISO_SUSP_HEADER *) ((char *)phead + ana->iso_ce_len); + } else + break; + } + if (bp) + brelse(bp); + /* + * If we don't find the Basic SUSP stuffs, just set default value + * (attribute/time stamp) + */ + for (ptable = table; ptable->func2; ptable++) + if (!(ptable->result&result)) + ptable->func2(isodir,ana); + + return result; +} + +/* + * Get Attributes. + */ +static RRIP_TABLE rrip_table_analyze[] = { + { "PX", cd9660_rrip_attr, cd9660_rrip_defattr, ISO_SUSP_ATTR }, + { "TF", cd9660_rrip_tstamp, cd9660_rrip_deftstamp, ISO_SUSP_TSTAMP }, + { "PN", cd9660_rrip_device, 0, ISO_SUSP_DEVICE }, + { "RR", cd9660_rrip_idflag, 0, ISO_SUSP_IDFLAG }, + { "CE", cd9660_rrip_cont, 0, ISO_SUSP_CONT }, + { "ST", cd9660_rrip_stop, 0, ISO_SUSP_STOP }, + { "", 0, 0, 0 } +}; + +int +cd9660_rrip_analyze(isodir,inop,imp) + struct iso_directory_record *isodir; + struct iso_node *inop; + struct iso_mnt *imp; +{ + ISO_RRIP_ANALYZE analyze; + + analyze.inop = inop; + analyze.imp = imp; + analyze.fields = ISO_SUSP_ATTR|ISO_SUSP_TSTAMP|ISO_SUSP_DEVICE; + + return cd9660_rrip_loop(isodir,&analyze,rrip_table_analyze); +} + +/* + * Get Alternate Name. + */ +static RRIP_TABLE rrip_table_getname[] = { + { "NM", cd9660_rrip_altname, cd9660_rrip_defname, ISO_SUSP_ALTNAME }, + { "CL", cd9660_rrip_pclink, 0, ISO_SUSP_CLINK|ISO_SUSP_PLINK }, + { "PL", cd9660_rrip_pclink, 0, ISO_SUSP_CLINK|ISO_SUSP_PLINK }, + { "RE", cd9660_rrip_reldir, 0, ISO_SUSP_RELDIR }, + { "RR", cd9660_rrip_idflag, 0, ISO_SUSP_IDFLAG }, + { "CE", cd9660_rrip_cont, 0, ISO_SUSP_CONT }, + { "ST", cd9660_rrip_stop, 0, ISO_SUSP_STOP }, + { "", 0, 0, 0 } +}; + +int +cd9660_rrip_getname(isodir,outbuf,outlen,inump,imp) + struct iso_directory_record *isodir; + char *outbuf; + u_short *outlen; + ino_t *inump; + struct iso_mnt *imp; +{ + ISO_RRIP_ANALYZE analyze; + RRIP_TABLE *tab; + + analyze.outbuf = outbuf; + analyze.outlen = outlen; + analyze.maxlen = ISO_RRIP_NAMEMAX; + analyze.inump = inump; + analyze.imp = imp; + analyze.fields = ISO_SUSP_ALTNAME|ISO_SUSP_RELDIR|ISO_SUSP_CLINK|ISO_SUSP_PLINK; + *outlen = 0; + + tab = rrip_table_getname; + if (*isodir->name == 0 + || *isodir->name == 1) { + cd9660_rrip_defname(isodir,&analyze); + + analyze.fields &= ~ISO_SUSP_ALTNAME; + tab++; + } + + return cd9660_rrip_loop(isodir,&analyze,tab); +} + +/* + * Get Symbolic Link. + */ +static RRIP_TABLE rrip_table_getsymname[] = { + { "SL", cd9660_rrip_slink, 0, ISO_SUSP_SLINK }, + { "RR", cd9660_rrip_idflag, 0, ISO_SUSP_IDFLAG }, + { "CE", cd9660_rrip_cont, 0, ISO_SUSP_CONT }, + { "ST", cd9660_rrip_stop, 0, ISO_SUSP_STOP }, + { "", 0, 0, 0 } +}; + +int +cd9660_rrip_getsymname(isodir,outbuf,outlen,imp) + struct iso_directory_record *isodir; + char *outbuf; + u_short *outlen; + struct iso_mnt *imp; +{ + ISO_RRIP_ANALYZE analyze; + + analyze.outbuf = outbuf; + analyze.outlen = outlen; + *outlen = 0; + analyze.maxlen = MAXPATHLEN; + analyze.cont = 1; /* don't start with a slash */ + analyze.imp = imp; + analyze.fields = ISO_SUSP_SLINK; + + return (cd9660_rrip_loop(isodir,&analyze,rrip_table_getsymname)&ISO_SUSP_SLINK); +} + +static RRIP_TABLE rrip_table_extref[] = { + { "ER", cd9660_rrip_extref, 0, ISO_SUSP_EXTREF }, + { "CE", cd9660_rrip_cont, 0, ISO_SUSP_CONT }, + { "ST", cd9660_rrip_stop, 0, ISO_SUSP_STOP }, + { "", 0, 0, 0 } +}; + +/* + * Check for Rock Ridge Extension and return offset of its fields. + * Note: We insist on the ER field. + */ +int +cd9660_rrip_offset(isodir,imp) + struct iso_directory_record *isodir; + struct iso_mnt *imp; +{ + ISO_RRIP_OFFSET *p; + ISO_RRIP_ANALYZE analyze; + + imp->rr_skip0 = 0; + p = (ISO_RRIP_OFFSET *)(isodir->name + 1); + if (bcmp(p,"SP\7\1\276\357",6)) { + /* Maybe, it's a CDROM XA disc? */ + imp->rr_skip0 = 15; + p = (ISO_RRIP_OFFSET *)((char *)p + 15); + if (bcmp(p,"SP\7\1\276\357",6)) + return -1; + } + + analyze.imp = imp; + analyze.fields = ISO_SUSP_EXTREF; + if (!(cd9660_rrip_loop(isodir,&analyze,rrip_table_extref)&ISO_SUSP_EXTREF)) + return -1; + + return isonum_711(p->skip); +} diff --git a/bsd/isofs/cd9660/cd9660_rrip.h b/bsd/isofs/cd9660/cd9660_rrip.h new file mode 100644 index 000000000..4e62dc4e7 --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_rrip.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_rrip.h,v 1.6 1994/12/13 22:33:24 mycroft Exp $ */ + +/*- + * Copyright (c) 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_rrip.h 8.2 (Berkeley) 12/5/94 + */ + +typedef struct { + char type [ISODCL ( 0, 1)]; + u_char length [ISODCL ( 2, 2)]; /* 711 */ + u_char version [ISODCL ( 3, 3)]; +} ISO_SUSP_HEADER; + +typedef struct { + ISO_SUSP_HEADER h; + char mode [ISODCL ( 4, 11)]; /* 733 */ + char links [ISODCL ( 12, 19)]; /* 733 */ + char uid [ISODCL ( 20, 27)]; /* 733 */ + char gid [ISODCL ( 28, 35)]; /* 733 */ +} ISO_RRIP_ATTR; + +typedef struct { + ISO_SUSP_HEADER h; + char dev_t_high [ISODCL ( 4, 11)]; /* 733 */ + char dev_t_low [ISODCL ( 12, 19)]; /* 733 */ +} ISO_RRIP_DEVICE; + +#define ISO_SUSP_CFLAG_CONTINUE 0x01 +#define ISO_SUSP_CFLAG_CURRENT 0x02 +#define ISO_SUSP_CFLAG_PARENT 0x04 +#define ISO_SUSP_CFLAG_ROOT 0x08 +#define ISO_SUSP_CFLAG_VOLROOT 0x10 +#define ISO_SUSP_CFLAG_HOST 0x20 + +typedef struct { + u_char cflag [ISODCL ( 1, 1)]; + u_char clen [ISODCL ( 2, 2)]; + u_char name [1]; /* XXX */ +} ISO_RRIP_SLINK_COMPONENT; +#define ISO_RRIP_SLSIZ 2 + +typedef struct { + ISO_SUSP_HEADER h; + u_char flags [ISODCL ( 4, 4)]; + u_char component [ISODCL ( 5, 5)]; +} ISO_RRIP_SLINK; + +typedef struct { + ISO_SUSP_HEADER h; + char flags [ISODCL ( 4, 4)]; +} ISO_RRIP_ALTNAME; + +typedef struct { + ISO_SUSP_HEADER h; + char dir_loc [ISODCL ( 4, 11)]; /* 733 */ +} ISO_RRIP_CLINK; + +typedef struct { + ISO_SUSP_HEADER h; + char dir_loc [ISODCL ( 4, 11)]; /* 733 */ +} ISO_RRIP_PLINK; + +typedef struct { + ISO_SUSP_HEADER h; +} ISO_RRIP_RELDIR; + +#define ISO_SUSP_TSTAMP_FORM17 0x80 +#define ISO_SUSP_TSTAMP_FORM7 0x00 +#define ISO_SUSP_TSTAMP_CREAT 0x01 +#define ISO_SUSP_TSTAMP_MODIFY 0x02 +#define ISO_SUSP_TSTAMP_ACCESS 0x04 +#define ISO_SUSP_TSTAMP_ATTR 0x08 +#define ISO_SUSP_TSTAMP_BACKUP 0x10 +#define ISO_SUSP_TSTAMP_EXPIRE 0x20 +#define ISO_SUSP_TSTAMP_EFFECT 0x40 + +typedef struct { + ISO_SUSP_HEADER h; + u_char flags [ISODCL ( 4, 4)]; + u_char time [ISODCL ( 5, 5)]; +} ISO_RRIP_TSTAMP; + +typedef struct { + ISO_SUSP_HEADER h; + u_char flags [ISODCL ( 4, 4)]; +} ISO_RRIP_IDFLAG; + +typedef struct { + ISO_SUSP_HEADER h; + char len_id [ISODCL ( 4, 4)]; + char len_des [ISODCL ( 5, 5)]; + char len_src [ISODCL ( 6, 6)]; + char version [ISODCL ( 7, 7)]; +} ISO_RRIP_EXTREF; + +typedef struct { + ISO_SUSP_HEADER h; + char check [ISODCL ( 4, 5)]; + char skip [ISODCL ( 6, 6)]; +} ISO_RRIP_OFFSET; + +typedef struct { + ISO_SUSP_HEADER h; + char location [ISODCL ( 4, 11)]; + char offset [ISODCL ( 12, 19)]; + char length [ISODCL ( 20, 27)]; +} ISO_RRIP_CONT; diff --git a/bsd/isofs/cd9660/cd9660_util.c b/bsd/isofs/cd9660/cd9660_util.c new file mode 100644 index 000000000..689c091bc --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_util.c @@ -0,0 +1,927 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_util.c,v 1.8 1994/12/13 22:33:25 mycroft Exp $ */ + +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_util.c 8.3 (Berkeley) 12/5/94 + * + * HISTORY + * 7-Dec-98 Add ATTR_VOL_MOUNTFLAGS attribute support - djb + * 18-Nov-98 Add support for volfs - djb + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* XXX */ +#include /* XXX */ +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * translate and compare a filename + * Note: Version number plus ';' may be omitted. + */ +int +isofncmp(fn, fnlen, isofn, isolen) + u_char *fn, *isofn; + int fnlen, isolen; +{ + int i, j; + char c; + + while (--fnlen >= 0) { + if (--isolen < 0) + return *fn; + if ((c = *isofn++) == ';') { + switch (*fn++) { + default: + return *--fn; + case 0: + return 0; + case ';': + break; + } + for (i = 0; --fnlen >= 0; i = i * 10 + *fn++ - '0') { + if (*fn < '0' || *fn > '9') { + return -1; + } + } + for (j = 0; --isolen >= 0; j = j * 10 + *isofn++ - '0'); + return i - j; + } + /* if raw comparison fails, check if char was mapped */ + if (c != *fn) { + if (c >= 'A' && c <= 'Z') { + if (c + ('a' - 'A') != *fn) { + if (*fn >= 'a' && *fn <= 'z') + return *fn - ('a' - 'A') - c; + else + return *fn - c; + } + } else if (c == '/') { + if (*fn != ':') + return *fn - c; + } else if (c > 0 || *fn != '_') + return *fn - c; + } + fn++; + } + if (isolen > 0) { + switch (*isofn) { + default: + return -1; + case '.': + if (isofn[1] != ';') + return -1; + case ';': + return 0; + } + } + return 0; +} + + +/* + * translate and compare a UCS-2 filename + * Note: Version number plus ';' may be omitted. + */ + +int +ucsfncmp(fn, fnlen, ucsfn, ucslen) + u_int16_t *fn; + int fnlen; + u_int16_t *ucsfn; + int ucslen; +{ + int i, j; + u_int16_t c; + + /* convert byte count to char count */ + ucslen /= 2; + fnlen /= 2; + + while (--fnlen >= 0) { + if (--ucslen < 0) + return *fn; + if ((c = *ucsfn++) == UCS_SEPARATOR2) { + switch (*fn++) { + default: + return *--fn; + case 0: + return 0; + case UCS_SEPARATOR2: + break; + } + for (i = 0; --fnlen >= 0; i = i * 10 + *fn++ - '0') { + if (*fn < '0' || *fn > '9') { + return -1; + } + } + for (j = 0; --ucslen >= 0; j = j * 10 + *ucsfn++ - '0'); + return i - j; + } + if (c != *fn) + return *fn - c; + fn++; + } + if (ucslen > 0) { + switch (*ucsfn) { + default: + return -1; + case UCS_SEPARATOR1: + if (ucsfn[1] != UCS_SEPARATOR2) + return -1; + case UCS_SEPARATOR2: + return 0; + } + } + return 0; +} + + +/* + * translate a filename + */ +void +isofntrans(infn, infnlen, outfn, outfnlen, original) + u_char *infn, *outfn; + int infnlen; + u_short *outfnlen; + int original; +{ + int fnidx = 0; + + for (; fnidx < infnlen; fnidx++) { + char c = *infn++; + + /* + * Some ISO 9600 CD names contain 8-bit chars. + * These chars are mapped to '_' because there + * is no context for mapping them to UTF-8. + * In addition '/' is mapped to ':'. + * + * isofncmp accounts for these mappings. + */ + if (!original) { + if (c < 0) + c = '_'; + else if (c == '/') + c = ':'; + else if (c == '.' && *infn == ';') + break; + else if (c == ';') + break; + } + *outfn++ = c; + } + *outfnlen = fnidx; +} + + + +/* + * translate a UCS-2 filename to UTF-8 + */ +void +ucsfntrans(infn, infnlen, outfn, outfnlen, dir) + u_int16_t *infn; + int infnlen; + u_char *outfn; + u_short *outfnlen; + int dir; +{ + if (infnlen == 1) { + strcpy(outfn, ".."); + + if (*(u_char*)infn == 0) + *outfnlen = 1; + else if (*(u_char*)infn == 1) + *outfnlen = 2; + } else { + int fnidx; + size_t outbytes; + int flags; + + fnidx = infnlen/2; + flags = 0; + + if (!dir) { + /* strip file version number */ + for (fnidx--; fnidx > 0; fnidx--) { + /* stop when ';' is found */ + if (infn[fnidx] == UCS_SEPARATOR2) { + /* drop dangling dot */ + if (fnidx > 0 && infn[fnidx-1] == UCS_SEPARATOR1) + fnidx--; + break; + } + } + if (fnidx <= 0) + fnidx = infnlen/2; + } + + flags = UTF_NO_NULL_TERM; + if (BYTE_ORDER != BIG_ENDIAN) + flags |= UTF_REVERSE_ENDIAN; + + (void) utf8_encodestr(infn, fnidx * 2, outfn, &outbytes, ISO_JOLIET_NAMEMAX, 0, flags); + *outfnlen = outbytes; + } +} + + +/* + * count the number of children by enumerating the directory + */ +static int +isochildcount(vdp, dircnt, filcnt) + struct vnode *vdp; + int *dircnt; + int *filcnt; +{ + struct iso_node *dp; + struct buf *bp = NULL; + struct iso_directory_record *ep; + u_long bmask; + int error = 0; + int reclen; + int dirs, files; + int blkoffset; + int logblksize; + long diroffset; + + dp = VTOI(vdp); + bmask = dp->i_mnt->im_bmask; + logblksize = dp->i_mnt->logical_block_size; + blkoffset = diroffset = 0; + dirs = files = 0; + + while (diroffset < dp->i_size) { + /* + * If offset is on a block boundary, read the next + * directory block. Release previous if it exists. + */ + if ((diroffset & bmask) == 0) { + if (bp != NULL) + brelse(bp); + if ( (error = VOP_BLKATOFF(vdp, diroffset, NULL, &bp)) ) + break; + blkoffset = 0; + } + + ep = (struct iso_directory_record *) + ((char *)bp->b_data + blkoffset); + + reclen = isonum_711(ep->length); + if (reclen == 0) { + /* skip to next block, if any */ + diroffset = + (diroffset & ~bmask) + logblksize; + continue; + } + + if ((reclen < ISO_DIRECTORY_RECORD_SIZE) || + (blkoffset + reclen > logblksize) || + (reclen < ISO_DIRECTORY_RECORD_SIZE + isonum_711(ep->name_len))){ + /* illegal, so give up */ + break; + } + + if ( isonum_711(ep->flags) & directoryBit ) + dirs++; + else if ((isonum_711(ep->flags) & associatedBit) == 0) + files++; + + diroffset += reclen; + blkoffset += reclen; + } + + if (bp) + brelse (bp); + + *dircnt = dirs; + *filcnt = files; + + return (error); +} + + +/* + * There are two ways to qualify for ownership rights on an object: + * + * 1. Your UID matches the UID of the vnode + * 2. You are root + * + */ +static int cd9660_owner_rights(uid_t owner, struct iso_mnt *imp, struct ucred *cred, struct proc *p, int invokesuperuserstatus) { + return ((cred->cr_uid == owner) || /* [1] */ + (invokesuperuserstatus && (suser(cred, &p->p_acflag) == 0))) ? 0 : EPERM; /* [2] */ +} + + + +static unsigned long DerivePermissionSummary(uid_t owner, gid_t group, mode_t obj_mode, struct iso_mnt *imp, struct ucred *cred, struct proc *p) { + register gid_t *gp; + unsigned long permissions; + int i; + + /* User id 0 (root) always gets access. */ + if (cred->cr_uid == 0) { + permissions = R_OK | X_OK; + goto Exit; + }; + + /* Otherwise, check the owner. */ + if (cd9660_owner_rights(owner, imp, cred, p, 0) == 0) { + permissions = ((unsigned long)obj_mode & S_IRWXU) >> 6; + goto Exit; + } + + /* Otherwise, check the groups. */ + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) { + if (group == *gp) { + permissions = ((unsigned long)obj_mode & S_IRWXG) >> 3; + goto Exit; + } + }; + + /* Otherwise, settle for 'others' access. */ + permissions = (unsigned long)obj_mode & S_IRWXO; + +Exit: + return permissions & ~W_OK; /* Write access is always impossible */ +} + + +int +attrcalcsize(struct attrlist *attrlist) +{ + int size; + attrgroup_t a; + +#if ((ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | ATTR_CMN_OBJTYPE | \ + ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | ATTR_CMN_OBJPERMANENTID | ATTR_CMN_PAROBJID | \ + ATTR_CMN_SCRIPT | ATTR_CMN_CRTIME | ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | \ + ATTR_CMN_ACCTIME | ATTR_CMN_BKUPTIME | ATTR_CMN_FNDRINFO | ATTR_CMN_OWNERID | \ + ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST| \ + ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS) != ATTR_CMN_VALIDMASK) +#error AttributeBlockSize: Missing bits in common mask computation! +#endif + assert((attrlist->commonattr & ~ATTR_CMN_VALIDMASK) == 0); + +#if ((ATTR_VOL_FSTYPE | ATTR_VOL_SIGNATURE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | \ + ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | ATTR_VOL_ALLOCATIONCLUMP | ATTR_VOL_IOBLOCKSIZE | \ + ATTR_VOL_OBJCOUNT | ATTR_VOL_FILECOUNT | ATTR_VOL_DIRCOUNT | ATTR_VOL_MAXOBJCOUNT | \ + ATTR_VOL_MOUNTPOINT | ATTR_VOL_NAME | ATTR_VOL_MOUNTFLAGS | ATTR_VOL_INFO | \ + ATTR_VOL_MOUNTEDDEVICE| ATTR_VOL_ENCODINGSUSED | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES) != ATTR_VOL_VALIDMASK) +#error AttributeBlockSize: Missing bits in volume mask computation! +#endif + assert((attrlist->volattr & ~ATTR_VOL_VALIDMASK) == 0); + +#if ((ATTR_DIR_LINKCOUNT | ATTR_DIR_ENTRYCOUNT | ATTR_DIR_MOUNTSTATUS) != ATTR_DIR_VALIDMASK) +#error AttributeBlockSize: Missing bits in directory mask computation! +#endif + assert((attrlist->dirattr & ~ATTR_DIR_VALIDMASK) == 0); +#if ((ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | ATTR_FILE_ALLOCSIZE | ATTR_FILE_IOBLOCKSIZE | \ + ATTR_FILE_CLUMPSIZE | ATTR_FILE_DEVTYPE | ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | \ + ATTR_FILE_FORKLIST | ATTR_FILE_DATALENGTH | ATTR_FILE_DATAALLOCSIZE | ATTR_FILE_DATAEXTENTS | \ + ATTR_FILE_RSRCLENGTH | ATTR_FILE_RSRCALLOCSIZE | ATTR_FILE_RSRCEXTENTS) != ATTR_FILE_VALIDMASK) +#error AttributeBlockSize: Missing bits in file mask computation! +#endif + assert((attrlist->fileattr & ~ATTR_FILE_VALIDMASK) == 0); + +#if ((ATTR_FORK_TOTALSIZE | ATTR_FORK_ALLOCSIZE) != ATTR_FORK_VALIDMASK) +#error AttributeBlockSize: Missing bits in fork mask computation! +#endif + assert((attrlist->forkattr & ~ATTR_FORK_VALIDMASK) == 0); + + size = 0; + + if ((a = attrlist->commonattr) != 0) { + if (a & ATTR_CMN_NAME) size += sizeof(struct attrreference); + if (a & ATTR_CMN_DEVID) size += sizeof(dev_t); + if (a & ATTR_CMN_FSID) size += sizeof(fsid_t); + if (a & ATTR_CMN_OBJTYPE) size += sizeof(fsobj_type_t); + if (a & ATTR_CMN_OBJTAG) size += sizeof(fsobj_tag_t); + if (a & ATTR_CMN_OBJID) size += sizeof(fsobj_id_t); + if (a & ATTR_CMN_OBJPERMANENTID) size += sizeof(fsobj_id_t); + if (a & ATTR_CMN_PAROBJID) size += sizeof(fsobj_id_t); + if (a & ATTR_CMN_SCRIPT) size += sizeof(text_encoding_t); + if (a & ATTR_CMN_CRTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_MODTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_CHGTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_ACCTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_BKUPTIME) size += sizeof(struct timespec); + if (a & ATTR_CMN_FNDRINFO) size += 32 * sizeof(u_int8_t); + if (a & ATTR_CMN_OWNERID) size += sizeof(uid_t); + if (a & ATTR_CMN_GRPID) size += sizeof(gid_t); + if (a & ATTR_CMN_ACCESSMASK) size += sizeof(u_long); + if (a & ATTR_CMN_NAMEDATTRCOUNT) size += sizeof(u_long); + if (a & ATTR_CMN_NAMEDATTRLIST) size += sizeof(struct attrreference); + if (a & ATTR_CMN_FLAGS) size += sizeof(u_long); + if (a & ATTR_CMN_USERACCESS) size += sizeof(u_long); + }; + if ((a = attrlist->volattr) != 0) { + if (a & ATTR_VOL_FSTYPE) size += sizeof(u_long); + if (a & ATTR_VOL_SIGNATURE) size += sizeof(u_long); + if (a & ATTR_VOL_SIZE) size += sizeof(off_t); + if (a & ATTR_VOL_SPACEFREE) size += sizeof(off_t); + if (a & ATTR_VOL_SPACEAVAIL) size += sizeof(off_t); + if (a & ATTR_VOL_MINALLOCATION) size += sizeof(off_t); + if (a & ATTR_VOL_ALLOCATIONCLUMP) size += sizeof(off_t); + if (a & ATTR_VOL_IOBLOCKSIZE) size += sizeof(size_t); + if (a & ATTR_VOL_OBJCOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_FILECOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_DIRCOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_MAXOBJCOUNT) size += sizeof(u_long); + if (a & ATTR_VOL_MOUNTPOINT) size += sizeof(struct attrreference); + if (a & ATTR_VOL_NAME) size += sizeof(struct attrreference); + if (a & ATTR_VOL_MOUNTFLAGS) size += sizeof(u_long); + if (a & ATTR_VOL_MOUNTEDDEVICE) size += sizeof(struct attrreference); + if (a & ATTR_VOL_ENCODINGSUSED) size += sizeof(unsigned long long); + if (a & ATTR_VOL_CAPABILITIES) size += sizeof(vol_capabilities_attr_t); + if (a & ATTR_VOL_ATTRIBUTES) size += sizeof(vol_attributes_attr_t); + }; + if ((a = attrlist->dirattr) != 0) { + if (a & ATTR_DIR_LINKCOUNT) size += sizeof(u_long); + if (a & ATTR_DIR_ENTRYCOUNT) size += sizeof(u_long); + if (a & ATTR_DIR_MOUNTSTATUS) size += sizeof(u_long); + }; + if ((a = attrlist->fileattr) != 0) { + if (a & ATTR_FILE_LINKCOUNT) size += sizeof(u_long); + if (a & ATTR_FILE_TOTALSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_ALLOCSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_IOBLOCKSIZE) size += sizeof(size_t); + if (a & ATTR_FILE_CLUMPSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_DEVTYPE) size += sizeof(u_long); + if (a & ATTR_FILE_FILETYPE) size += sizeof(u_long); + if (a & ATTR_FILE_FORKCOUNT) size += sizeof(u_long); + if (a & ATTR_FILE_FORKLIST) size += sizeof(struct attrreference); + if (a & ATTR_FILE_DATALENGTH) size += sizeof(off_t); + if (a & ATTR_FILE_DATAALLOCSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_DATAEXTENTS) size += sizeof(extentrecord); + if (a & ATTR_FILE_RSRCLENGTH) size += sizeof(off_t); + if (a & ATTR_FILE_RSRCALLOCSIZE) size += sizeof(off_t); + if (a & ATTR_FILE_RSRCEXTENTS) size += sizeof(extentrecord); + }; + if ((a = attrlist->forkattr) != 0) { + if (a & ATTR_FORK_TOTALSIZE) size += sizeof(off_t); + if (a & ATTR_FORK_ALLOCSIZE) size += sizeof(off_t); + }; + + return size; +} + + + +void +packvolattr (struct attrlist *alist, + struct iso_node *ip, /* ip for root directory */ + void **attrbufptrptr, + void **varbufptrptr) +{ + void *attrbufptr; + void *varbufptr; + struct iso_mnt *imp; + struct mount *mp; + attrgroup_t a; + u_long attrlength; + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + imp = ip->i_mnt; + mp = imp->im_mountp; + + if ((a = alist->commonattr) != 0) { + if (a & ATTR_CMN_NAME) { + attrlength = strlen( imp->volume_id ) + 1; + ((struct attrreference *)attrbufptr)->attr_dataoffset = (u_int8_t *)varbufptr - (u_int8_t *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + (void) strncpy((unsigned char *)varbufptr, imp->volume_id, attrlength); + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (u_int8_t *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_CMN_DEVID) *((dev_t *)attrbufptr)++ = imp->im_devvp->v_rdev; + if (a & ATTR_CMN_FSID) *((fsid_t *)attrbufptr)++ = ITOV(ip)->v_mount->mnt_stat.f_fsid; + if (a & ATTR_CMN_OBJTYPE) *((fsobj_type_t *)attrbufptr)++ = 0; + if (a & ATTR_CMN_OBJTAG) *((fsobj_tag_t *)attrbufptr)++ = VT_ISOFS; + if (a & ATTR_CMN_OBJID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = 0; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJPERMANENTID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = 0; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_PAROBJID) { + ((fsobj_id_t *)attrbufptr)->fid_objno = 0; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_SCRIPT) *((text_encoding_t *)attrbufptr)++ = 0; + if (a & ATTR_CMN_CRTIME) *((struct timespec *)attrbufptr)++ = imp->creation_date; + if (a & ATTR_CMN_MODTIME) *((struct timespec *)attrbufptr)++ = imp->modification_date; + if (a & ATTR_CMN_CHGTIME) *((struct timespec *)attrbufptr)++ = imp->modification_date; + if (a & ATTR_CMN_ACCTIME) *((struct timespec *)attrbufptr)++ = imp->modification_date; + if (a & ATTR_CMN_BKUPTIME) { + ((struct timespec *)attrbufptr)->tv_sec = 0; + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_FNDRINFO) { + bzero (attrbufptr, 32 * sizeof(u_int8_t)); + (u_int8_t *)attrbufptr += 32 * sizeof(u_int8_t); + }; + if (a & ATTR_CMN_OWNERID) *((uid_t *)attrbufptr)++ = ip->inode.iso_uid; + if (a & ATTR_CMN_GRPID) *((gid_t *)attrbufptr)++ = ip->inode.iso_gid; + if (a & ATTR_CMN_ACCESSMASK) *((u_long *)attrbufptr)++ = (u_long)ip->inode.iso_mode; + if (a & ATTR_CMN_FLAGS) *((u_long *)attrbufptr)++ = 0; + if (a & ATTR_CMN_USERACCESS) { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary(ip->inode.iso_uid, + ip->inode.iso_gid, + ip->inode.iso_mode, + imp, + current_proc()->p_ucred, + current_proc()); + }; + }; + + if ((a = alist->volattr) != 0) { + off_t blocksize = (off_t)imp->logical_block_size; + + if (a & ATTR_VOL_FSTYPE) *((u_long *)attrbufptr)++ = (u_long)imp->im_mountp->mnt_vfc->vfc_typenum; + if (a & ATTR_VOL_SIGNATURE) *((u_long *)attrbufptr)++ = (u_long)ISO9660SIGNATURE; + if (a & ATTR_VOL_SIZE) *((off_t *)attrbufptr)++ = (off_t)imp->volume_space_size * blocksize; + if (a & ATTR_VOL_SPACEFREE) *((off_t *)attrbufptr)++ = 0; + if (a & ATTR_VOL_SPACEAVAIL) *((off_t *)attrbufptr)++ = 0; + if (a & ATTR_VOL_MINALLOCATION) *((off_t *)attrbufptr)++ = blocksize; + if (a & ATTR_VOL_ALLOCATIONCLUMP) *((off_t *)attrbufptr)++ = blocksize; + if (a & ATTR_VOL_IOBLOCKSIZE) *((size_t *)attrbufptr)++ = blocksize; + if (a & ATTR_VOL_OBJCOUNT) *((u_long *)attrbufptr)++ = 0; + if (a & ATTR_VOL_FILECOUNT) *((u_long *)attrbufptr)++ = 0; + if (a & ATTR_VOL_DIRCOUNT) *((u_long *)attrbufptr)++ = 0; + if (a & ATTR_VOL_MAXOBJCOUNT) *((u_long *)attrbufptr)++ = 0xFFFFFFFF; + if (a & ATTR_VOL_NAME) { + attrlength = strlen( imp->volume_id ) + 1; + ((struct attrreference *)attrbufptr)->attr_dataoffset = (u_int8_t *)varbufptr - (u_int8_t *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + (void) strncpy((unsigned char *)varbufptr, imp->volume_id, attrlength); + + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (u_int8_t *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_VOL_MOUNTFLAGS) *((u_long *)attrbufptr)++ = (u_long)imp->im_mountp->mnt_flag; + if (a & ATTR_VOL_MOUNTEDDEVICE) { + ((struct attrreference *)attrbufptr)->attr_dataoffset = (u_int8_t *)varbufptr - (u_int8_t *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = strlen(mp->mnt_stat.f_mntfromname) + 1; + attrlength = ((struct attrreference *)attrbufptr)->attr_length; + attrlength = attrlength + ((4 - (attrlength & 3)) & 3); /* round up to the next 4-byte boundary: */ + (void) bcopy(mp->mnt_stat.f_mntfromname, varbufptr, attrlength); + + /* Advance beyond the space just allocated: */ + (u_int8_t *)varbufptr += attrlength; + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_VOL_ENCODINGSUSED) *((unsigned long long *)attrbufptr)++ = (unsigned long long)0; + if (a & ATTR_VOL_CAPABILITIES) { + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_FORMAT] = VOL_CAP_FMT_PERSISTENTOBJECTIDS; + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_INTERFACES] = + VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT; + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_RESERVED1] = 0; + ((vol_capabilities_attr_t *)attrbufptr)->capabilities[VOL_CAPABILITIES_RESERVED2] = 0; + + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_FORMAT] = + VOL_CAP_FMT_PERSISTENTOBJECTIDS | VOL_CAP_FMT_SYMBOLICLINKS | VOL_CAP_FMT_HARDLINKS; + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_INTERFACES] = + VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_NFSEXPORT; + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_RESERVED1] = 0; + ((vol_capabilities_attr_t *)attrbufptr)->valid[VOL_CAPABILITIES_RESERVED2] = 0; + + ++((vol_capabilities_attr_t *)attrbufptr); + }; + if (a & ATTR_VOL_ATTRIBUTES) { + ((vol_attributes_attr_t *)attrbufptr)->validattr.commonattr = ATTR_CMN_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.volattr = ATTR_VOL_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.dirattr = ATTR_DIR_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.fileattr = ATTR_FILE_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->validattr.forkattr = ATTR_FORK_VALIDMASK; + + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.commonattr = ATTR_CMN_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.volattr = ATTR_VOL_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.dirattr = ATTR_DIR_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.fileattr = ATTR_FILE_VALIDMASK; + ((vol_attributes_attr_t *)attrbufptr)->nativeattr.forkattr = ATTR_FORK_VALIDMASK; + + ++((vol_attributes_attr_t *)attrbufptr); + }; + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + +void +packcommonattr (struct attrlist *alist, + struct iso_node *ip, + void **attrbufptrptr, + void **varbufptrptr) +{ + void *attrbufptr; + void *varbufptr; + attrgroup_t a; + u_long attrlength; + + attrbufptr = *attrbufptrptr; + varbufptr = *varbufptrptr; + + if ((a = alist->commonattr) != 0) { + struct iso_mnt *imp = ip->i_mnt; + + if (a & ATTR_CMN_NAME) { + /* special case root since we know how to get it's name */ + if (ITOV(ip)->v_flag & VROOT) { + attrlength = strlen( imp->volume_id ) + 1; + (void) strncpy((unsigned char *)varbufptr, imp->volume_id, attrlength); + } else { + attrlength = strlen(ip->i_namep) + 1; + (void) strncpy((unsigned char *)varbufptr, ip->i_namep, attrlength); + } + + ((struct attrreference *)attrbufptr)->attr_dataoffset = (u_int8_t *)varbufptr - (u_int8_t *)attrbufptr; + ((struct attrreference *)attrbufptr)->attr_length = attrlength; + /* Advance beyond the space just allocated and round up to the next 4-byte boundary: */ + (u_int8_t *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3); + ++((struct attrreference *)attrbufptr); + }; + if (a & ATTR_CMN_DEVID) *((dev_t *)attrbufptr)++ = ip->i_dev; + if (a & ATTR_CMN_FSID) *((fsid_t *)attrbufptr)++ = ITOV(ip)->v_mount->mnt_stat.f_fsid; + if (a & ATTR_CMN_OBJTYPE) *((fsobj_type_t *)attrbufptr)++ = ITOV(ip)->v_type; + if (a & ATTR_CMN_OBJTAG) *((fsobj_tag_t *)attrbufptr)++ = ITOV(ip)->v_tag; + if (a & ATTR_CMN_OBJID) { + if (ITOV(ip)->v_flag & VROOT) + ((fsobj_id_t *)attrbufptr)->fid_objno = 2; /* force root to be 2 */ + else + ((fsobj_id_t *)attrbufptr)->fid_objno = ip->i_number; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_OBJPERMANENTID) { + if (ITOV(ip)->v_flag & VROOT) + ((fsobj_id_t *)attrbufptr)->fid_objno = 2; /* force root to be 2 */ + else + ((fsobj_id_t *)attrbufptr)->fid_objno = ip->i_number; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_PAROBJID) { + struct iso_directory_record *dp = (struct iso_directory_record *)imp->root; + ino_t rootino = isodirino(dp, imp); + + if (ip->i_number == rootino) + ((fsobj_id_t *)attrbufptr)->fid_objno = 1; /* force root parent to be 1 */ + else if (ip->i_parent == rootino) + ((fsobj_id_t *)attrbufptr)->fid_objno = 2; /* force root to be 2 */ + else + ((fsobj_id_t *)attrbufptr)->fid_objno = ip->i_parent; + ((fsobj_id_t *)attrbufptr)->fid_generation = 0; + ++((fsobj_id_t *)attrbufptr); + }; + if (a & ATTR_CMN_SCRIPT) *((text_encoding_t *)attrbufptr)++ = 0; + if (a & ATTR_CMN_CRTIME) *((struct timespec *)attrbufptr)++ = ip->inode.iso_mtime; + if (a & ATTR_CMN_MODTIME) *((struct timespec *)attrbufptr)++ = ip->inode.iso_mtime; + if (a & ATTR_CMN_CHGTIME) *((struct timespec *)attrbufptr)++ = ip->inode.iso_ctime; + if (a & ATTR_CMN_ACCTIME) *((struct timespec *)attrbufptr)++ = ip->inode.iso_atime; + if (a & ATTR_CMN_BKUPTIME) { + ((struct timespec *)attrbufptr)->tv_sec = 0; + ((struct timespec *)attrbufptr)->tv_nsec = 0; + ++((struct timespec *)attrbufptr); + }; + if (a & ATTR_CMN_FNDRINFO) { + struct finder_info finfo = {0}; + + finfo.fdFlags = ip->i_FinderFlags; + if (ITOV(ip)->v_type == VREG) { + finfo.fdType = ip->i_FileType; + finfo.fdCreator = ip->i_Creator; + } + bcopy (&finfo, attrbufptr, sizeof(finfo)); + (u_int8_t *)attrbufptr += sizeof(finfo); + bzero (attrbufptr, EXTFNDRINFOSIZE); + (u_int8_t *)attrbufptr += EXTFNDRINFOSIZE; + }; + if (a & ATTR_CMN_OWNERID) *((uid_t *)attrbufptr)++ = ip->inode.iso_uid; + if (a & ATTR_CMN_GRPID) *((gid_t *)attrbufptr)++ = ip->inode.iso_gid; + if (a & ATTR_CMN_ACCESSMASK) *((u_long *)attrbufptr)++ = (u_long)ip->inode.iso_mode; + if (a & ATTR_CMN_FLAGS) *((u_long *)attrbufptr)++ = 0; /* could also use ip->i_flag */ + if (a & ATTR_CMN_USERACCESS) { + *((u_long *)attrbufptr)++ = + DerivePermissionSummary(ip->inode.iso_uid, + ip->inode.iso_gid, + ip->inode.iso_mode, + imp, + current_proc()->p_ucred, + current_proc()); + }; + }; + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + +void +packdirattr(struct attrlist *alist, + struct iso_node *ip, + void **attrbufptrptr, + void **varbufptrptr) +{ + void *attrbufptr; + attrgroup_t a; + int filcnt, dircnt; + + attrbufptr = *attrbufptrptr; + filcnt = dircnt = 0; + + a = alist->dirattr; + if ((ITOV(ip)->v_type == VDIR) && (a != 0)) { + /* + * if we haven't counted our children yet, do it now... + */ + if ((ip->i_entries == 0) && + (a & (ATTR_DIR_LINKCOUNT | ATTR_DIR_ENTRYCOUNT))) { + (void) isochildcount(ITOV(ip), &dircnt, &filcnt); + + if ((ip->inode.iso_links == 1) && (dircnt != 0)) + ip->inode.iso_links = dircnt; + if ((filcnt + dircnt) > 0) + ip->i_entries = dircnt + filcnt; + } + + if (a & ATTR_DIR_LINKCOUNT) { + *((u_long *)attrbufptr)++ = ip->inode.iso_links; + } + if (a & ATTR_DIR_ENTRYCOUNT) { + /* exclude '.' and '..' from total caount */ + *((u_long *)attrbufptr)++ = ((ip->i_entries <= 2) ? 0 : (ip->i_entries - 2)); + } + if (a & ATTR_DIR_MOUNTSTATUS) { + if (ITOV(ip)->v_mountedhere) { + *((u_long *)attrbufptr)++ = DIR_MNTSTATUS_MNTPOINT; + } else { + *((u_long *)attrbufptr)++ = 0; + }; + }; + }; + + *attrbufptrptr = attrbufptr; +} + + +void +packfileattr(struct attrlist *alist, + struct iso_node *ip, + void **attrbufptrptr, + void **varbufptrptr) +{ + void *attrbufptr = *attrbufptrptr; + void *varbufptr = *varbufptrptr; + attrgroup_t a = alist->fileattr; + + if ((ITOV(ip)->v_type == VREG) && (a != 0)) { + if (a & ATTR_FILE_LINKCOUNT) + *((u_long *)attrbufptr)++ = ip->inode.iso_links; + if (a & ATTR_FILE_TOTALSIZE) + *((off_t *)attrbufptr)++ = (off_t)ip->i_size; + if (a & ATTR_FILE_ALLOCSIZE) + *((off_t *)attrbufptr)++ = (off_t)ip->i_size; + if (a & ATTR_FILE_IOBLOCKSIZE) + *((u_long *)attrbufptr)++ = ip->i_mnt->logical_block_size; + if (a & ATTR_FILE_CLUMPSIZE) + *((u_long *)attrbufptr)++ = ip->i_mnt->logical_block_size; + if (a & ATTR_FILE_DEVTYPE) + *((u_long *)attrbufptr)++ = (u_long)ip->inode.iso_rdev; + if (a & ATTR_FILE_DATALENGTH) + *((off_t *)attrbufptr)++ = (off_t)ip->i_size; + if (a & ATTR_FILE_DATAALLOCSIZE) + *((off_t *)attrbufptr)++ = (off_t)ip->i_size; + if (a & ATTR_FILE_RSRCLENGTH) + *((off_t *)attrbufptr)++ = (off_t)ip->i_rsrcsize; + if (a & ATTR_FILE_RSRCALLOCSIZE) + *((off_t *)attrbufptr)++ = (off_t)ip->i_rsrcsize; + } + + *attrbufptrptr = attrbufptr; + *varbufptrptr = varbufptr; +} + + +void +packattrblk(struct attrlist *alist, + struct vnode *vp, + void **attrbufptrptr, + void **varbufptrptr) +{ + struct iso_node *ip = VTOI(vp); + + if (alist->volattr != 0) { + packvolattr(alist, ip, attrbufptrptr, varbufptrptr); + } else { + packcommonattr(alist, ip, attrbufptrptr, varbufptrptr); + + switch (ITOV(ip)->v_type) { + case VDIR: + packdirattr(alist, ip, attrbufptrptr, varbufptrptr); + break; + + case VREG: + packfileattr(alist, ip, attrbufptrptr, varbufptrptr); + break; + + /* Without this the compiler complains about VNON,VBLK,VCHR,VLNK,VSOCK,VFIFO,VBAD and VSTR + not being handled... + */ + default: + break; + }; + }; +}; diff --git a/bsd/isofs/cd9660/cd9660_vfsops.c b/bsd/isofs/cd9660/cd9660_vfsops.c new file mode 100644 index 000000000..68d602bcc --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_vfsops.c @@ -0,0 +1,1253 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_vfsops.c,v 1.18 1995/03/09 12:05:36 mycroft Exp $ */ + +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_vfsops.c 8.9 (Berkeley) 12/5/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +u_char isonullname[] = "\0"; + +extern int enodev (); + +struct vfsops cd9660_vfsops = { + cd9660_mount, + cd9660_start, + cd9660_unmount, + cd9660_root, + cd9660_quotactl, + cd9660_statfs, + cd9660_sync, + cd9660_vget, + cd9660_fhtovp, + cd9660_vptofh, + cd9660_init, + cd9660_sysctl +}; + +/* + * Called by vfs_mountroot when iso is going to be mounted as root. + * + * Name is updated by mount(8) after booting. + */ +#define ROOTNAME "root_device" + +static int iso_mountfs __P((struct vnode *devvp, struct mount *mp, + struct proc *p, struct iso_args *argp)); + +static void DRGetTypeCreatorAndFlags( + struct iso_mnt * theMountPointPtr, + struct iso_directory_record * theDirRecPtr, + u_int32_t * theTypePtr, + u_int32_t * theCreatorPtr, + u_int16_t * theFlagsPtr); + +int cd9660_vget_internal( + struct mount *mp, + ino_t ino, + struct vnode **vpp, + int relocated, + struct iso_directory_record *isodir, + struct proc *p); + +int +cd9660_mountroot() +{ + register struct mount *mp; + extern struct vnode *rootvp; + struct proc *p = current_proc(); /* XXX */ + struct iso_mnt *imp; + size_t size; + int error; + struct iso_args args; + + /* + * Get vnodes for swapdev and rootdev. + */ + if ( bdevvp(rootdev, &rootvp)) + panic("cd9660_mountroot: can't setup bdevvp's"); + + MALLOC_ZONE(mp, struct mount *, + sizeof(struct mount), M_MOUNT, M_WAITOK); + bzero((char *)mp, (u_long)sizeof(struct mount)); + mp->mnt_op = &cd9660_vfsops; + mp->mnt_flag = MNT_RDONLY; + LIST_INIT(&mp->mnt_vnodelist); + args.flags = ISOFSMNT_ROOT; + args.ssector = 0; + if ((error = iso_mountfs(rootvp, mp, p, &args))) { + FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + return (error); + } + simple_lock(&mountlist_slock); + CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); + simple_unlock(&mountlist_slock); + mp->mnt_vnodecovered = NULLVP; + imp = VFSTOISOFS(mp); + (void) copystr("/", mp->mnt_stat.f_mntonname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); + (void)cd9660_statfs(mp, &mp->mnt_stat, p); + return (0); +} + +/* + * VFS Operations. + * + * mount system call + */ +int +cd9660_mount(mp, path, data, ndp, p) + register struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + struct vnode *devvp; + struct iso_args args; + size_t size; + int error; + struct iso_mnt *imp = NULL; + + if ((error = copyin(data, (caddr_t)&args, sizeof (struct iso_args)))) + return (error); + + if ((mp->mnt_flag & MNT_RDONLY) == 0) + return (EROFS); + + /* + * If updating, check whether changing from read-only to + * read/write; if there is no device name, that's all we do. + */ + if (mp->mnt_flag & MNT_UPDATE) { + imp = VFSTOISOFS(mp); + if (args.fspec == 0) + return (vfs_export(mp, &imp->im_export, &args.export)); + } + /* + * Not an update, or updating the name: look up the name + * and verify that it refers to a sensible block device. + */ + NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); + if ((error = namei(ndp))) + return (error); + devvp = ndp->ni_vp; + + if (devvp->v_type != VBLK) { + vrele(devvp); + return (ENOTBLK); + } + if (major(devvp->v_rdev) >= nblkdev) { + vrele(devvp); + return (ENXIO); + } + if ((mp->mnt_flag & MNT_UPDATE) == 0) + error = iso_mountfs(devvp, mp, p, &args); + else { + if (devvp != imp->im_devvp) + error = EINVAL; /* needs translation */ + else + vrele(devvp); + } + if (error) { + vrele(devvp); + return (error); + } + + /* Set the mount flag to indicate that we support volfs */ + mp->mnt_flag |= MNT_DOVOLFS; + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); + return (0); +} + +/* + * Common code for mount and mountroot + */ +static int +iso_mountfs(devvp, mp, p, argp) + register struct vnode *devvp; + struct mount *mp; + struct proc *p; + struct iso_args *argp; +{ + register struct iso_mnt *isomp = (struct iso_mnt *)0; + struct buf *bp = NULL; + struct buf *pribp = NULL, *supbp = NULL; + dev_t dev = devvp->v_rdev; + int error = EINVAL; + int breaderr = 0; + int needclose = 0; + extern struct vnode *rootvp; + u_long iso_bsize; + int iso_blknum; + int joliet_level; + struct iso_volume_descriptor *vdp = NULL; + struct iso_primary_descriptor *pri = NULL; + struct iso_primary_descriptor *sup = NULL; + struct iso_directory_record *rootp; + int logical_block_size; + u_int8_t vdtype; + int blkoff = argp->ssector; + + if (!(mp->mnt_flag & MNT_RDONLY)) + return (EROFS); + + /* + * Disallow multiple mounts of the same device. + * Disallow mounting of a device that is currently in use + * (except for root, which might share swap device for miniroot). + * Flush out any old buffers remaining from a previous use. + */ + if ((error = vfs_mountedon(devvp))) + return (error); + if (vcount(devvp) > 1 && devvp != rootvp) + return (EBUSY); + if ((error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0))) + return (error); + + if ((error = VOP_OPEN(devvp, FREAD, FSCRED, p))) + return (error); + needclose = 1; + + /* This is the "logical sector size". The standard says this + * should be 2048 or the physical sector size on the device, + * whichever is greater. For now, we'll just use a constant. + */ + iso_bsize = ISO_DEFAULT_BLOCK_SIZE; + + /* tell IOKit that we're assuming 2K sectors */ + if ((error = VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, + (caddr_t)&iso_bsize, FWRITE, p->p_ucred, p))) + return (error); + devvp->v_specsize = iso_bsize; + joliet_level = 0; + for (iso_blknum = 16 + blkoff; iso_blknum < (100 + blkoff); iso_blknum++) { + if ((error = bread(devvp, iso_blknum, iso_bsize, NOCRED, &bp))) { + if (bp) { + bp->b_flags |= B_AGE; + brelse(bp); + bp = NULL; + } + breaderr = error; + printf("iso_mountfs: bread error %d reading block %d\n", error, iso_blknum); + continue; + } + + vdp = (struct iso_volume_descriptor *)bp->b_data; + if (bcmp (vdp->volume_desc_id, ISO_STANDARD_ID, sizeof(vdp->volume_desc_id)) != 0) { +#ifdef DEBUG + printf("cd9660_vfsops.c: iso_mountfs: " + "Invalid ID in volume desciptor.\n"); +#endif + error = EINVAL; + goto out; + } + + vdtype = isonum_711 (vdp->type); + if (vdtype == ISO_VD_END) + break; + + if (vdtype == ISO_VD_PRIMARY) { + if (pribp == NULL) { + pribp = bp; + bp = NULL; + pri = (struct iso_primary_descriptor *)vdp; + } + } else if(vdtype == ISO_VD_SUPPLEMENTARY) { + if (supbp == NULL) { + supbp = bp; + bp = NULL; + sup = (struct iso_primary_descriptor *)vdp; + + if ((argp->flags & ISOFSMNT_NOJOLIET) == 0) { + /* + * some Joliet CDs are "out-of-spec and don't correctly + * set the SVD flags. We ignore the flags and rely soely + * on the escape_seq + */ + if (bcmp(sup->escape_seq, ISO_UCS2_Level_1, 3) == 0) + joliet_level = 1; + else if (bcmp(sup->escape_seq, ISO_UCS2_Level_2, 3) == 0) + joliet_level = 2; + else if (bcmp(sup->escape_seq, ISO_UCS2_Level_3, 3) == 0) + joliet_level = 3; + } + } + } + + if (bp) { + bp->b_flags |= B_AGE; + brelse(bp); + bp = NULL; + } + } + + if (bp) { + bp->b_flags |= B_AGE; + brelse(bp); + bp = NULL; + } + + if (pri == NULL) { + if (breaderr) + error = breaderr; + else + error = EINVAL; + goto out; + } + + logical_block_size = isonum_723 (pri->logical_block_size); + + if (logical_block_size < DEV_BSIZE || logical_block_size > MAXBSIZE + || (logical_block_size & (logical_block_size - 1)) != 0) { + error = EINVAL; + goto out; + } + + rootp = (struct iso_directory_record *)pri->root_directory_record; + + MALLOC(isomp, struct iso_mnt *, sizeof *isomp, M_ISOFSMNT, M_WAITOK); + bzero((caddr_t)isomp, sizeof *isomp); + isomp->logical_block_size = logical_block_size; + isomp->volume_space_size = isonum_733 (pri->volume_space_size); + /* + * Since an ISO9660 multi-session CD can also access previous + * sessions, we have to include them into the space consider- + * ations. This doesn't yield a very accurate number since + * parts of the old sessions might be inaccessible now, but we + * can't do much better. This is also important for the NFS + * filehandle validation. + */ + isomp->volume_space_size += blkoff; + bcopy (rootp, isomp->root, sizeof isomp->root); + isomp->root_extent = isonum_733 (rootp->extent); + isomp->root_size = isonum_733 (rootp->size); + + /* + * getattrlist wants the volume name, create date and modify date + */ + + /* Remove any trailing white space */ + if ( strlen(pri->volume_id) ) { + char *myPtr; + + myPtr = pri->volume_id + strlen( pri->volume_id ) - 1; + while ( *myPtr == ' ' && myPtr >= pri->volume_id ) { + *myPtr = 0x00; + myPtr--; + } + } + /* YYY need to use secondary volume descriptor name for kanji disks */ + bcopy(pri->volume_id, isomp->volume_id, sizeof(isomp->volume_id)); + cd9660_tstamp_conv17(pri->creation_date, &isomp->creation_date); + cd9660_tstamp_conv17(pri->modification_date, &isomp->modification_date); + + /* See if this is a CD-XA volume */ + if (bcmp( pri->CDXASignature, ISO_XA_ID, + sizeof(pri->CDXASignature) ) == 0 ) + isomp->im_flags2 |= IMF2_IS_CDXA; + + isomp->im_bmask = logical_block_size - 1; + isomp->im_bshift = 0; + while ((1 << isomp->im_bshift) < isomp->logical_block_size) + isomp->im_bshift++; + + pribp->b_flags |= B_AGE; + brelse(pribp); + pribp = NULL; + + mp->mnt_data = (qaddr_t)isomp; + mp->mnt_stat.f_fsid.val[0] = (long)dev; + mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; + mp->mnt_maxsymlinklen = 0; + mp->mnt_flag |= MNT_LOCAL; + + isomp->im_mountp = mp; + isomp->im_dev = dev; + isomp->im_devvp = devvp; + + devvp->v_specflags |= SI_MOUNTEDON; + + /* Check the Rock Ridge Extention support */ + if (!(argp->flags & ISOFSMNT_NORRIP)) { + if ( (error = bread(isomp->im_devvp, + (isomp->root_extent + isonum_711(rootp->ext_attr_length)), + isomp->logical_block_size, NOCRED, &bp)) ) { + + printf("iso_mountfs: bread error %d reading block %d\n", + error, isomp->root_extent + isonum_711(rootp->ext_attr_length)); + argp->flags |= ISOFSMNT_NORRIP; + goto skipRRIP; + } + rootp = (struct iso_directory_record *)bp->b_data; + + if ((isomp->rr_skip = cd9660_rrip_offset(rootp,isomp)) < 0) { + argp->flags |= ISOFSMNT_NORRIP; + } else { + argp->flags &= ~ISOFSMNT_GENS; + } + + /* + * The contents are valid, + * but they will get reread as part of another vnode, so... + */ + bp->b_flags |= B_AGE; + brelse(bp); + bp = NULL; + } +skipRRIP: + + isomp->im_flags = argp->flags & (ISOFSMNT_NORRIP | ISOFSMNT_GENS | + ISOFSMNT_EXTATT | ISOFSMNT_NOJOLIET); + + switch (isomp->im_flags&(ISOFSMNT_NORRIP|ISOFSMNT_GENS)) { + default: + isomp->iso_ftype = ISO_FTYPE_DEFAULT; + break; + case ISOFSMNT_GENS|ISOFSMNT_NORRIP: + isomp->iso_ftype = ISO_FTYPE_9660; + break; + case 0: + isomp->iso_ftype = ISO_FTYPE_RRIP; + break; + } + + /* Decide whether to use the Joliet descriptor */ + + if (isomp->iso_ftype != ISO_FTYPE_RRIP && joliet_level != 0) { + rootp = (struct iso_directory_record *) + sup->root_directory_record; + bcopy (rootp, isomp->root, sizeof isomp->root); + isomp->root_extent = isonum_733 (rootp->extent); + isomp->root_size = isonum_733 (rootp->size); + supbp->b_flags |= B_AGE; + isomp->iso_ftype = ISO_FTYPE_JOLIET; + } + + if (supbp) { + brelse(supbp); + supbp = NULL; + } + + return (0); +out: + if (bp) + brelse(bp); + if (pribp) + brelse(pribp); + if (supbp) + brelse(supbp); + if (needclose) + (void)VOP_CLOSE(devvp, FREAD, NOCRED, p); + if (isomp) { + FREE((caddr_t)isomp, M_ISOFSMNT); + mp->mnt_data = (qaddr_t)0; + } + + /* Clear the mounted on bit in the devvp If it */ + /* not set, this is a nop and there is no way to */ + /* get here with it set unless we did it. If you*/ + /* are making code changes which makes the above */ + /* assumption not true, change this code. */ + + devvp->v_specflags &= ~SI_MOUNTEDON; + + return (error); +} + +/* + * Make a filesystem operational. + * Nothing to do at the moment. + */ +/* ARGSUSED */ +int +cd9660_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + return (0); +} + +/* + * unmount system call + */ +int +cd9660_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + register struct iso_mnt *isomp; + int error, flags = 0; + + if ( (mntflags & MNT_FORCE) ) + flags |= FORCECLOSE; + + if ( (error = vflush(mp, NULLVP, flags)) ) + return (error); + + isomp = VFSTOISOFS(mp); + +#ifdef ISODEVMAP + if (isomp->iso_ftype == ISO_FTYPE_RRIP) + iso_dunmap(isomp->im_dev); +#endif + + isomp->im_devvp->v_specflags &= ~SI_MOUNTEDON; + error = VOP_CLOSE(isomp->im_devvp, FREAD, NOCRED, p); + vrele(isomp->im_devvp); + FREE((caddr_t)isomp, M_ISOFSMNT); + mp->mnt_data = (qaddr_t)0; + mp->mnt_flag &= ~MNT_LOCAL; + + return (error); +} + +/* + * Return root of a filesystem + */ +int +cd9660_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct iso_mnt *imp = VFSTOISOFS(mp); + struct iso_directory_record *dp = + (struct iso_directory_record *)imp->root; + ino_t ino = isodirino(dp, imp); + + /* + * With RRIP we must use the `.' entry of the root directory. + * Simply tell vget, that it's a relocated directory. + */ + return (cd9660_vget_internal(mp, ino, vpp, + imp->iso_ftype == ISO_FTYPE_RRIP, dp, current_proc())); +} + +/* + * Do operations associated with quotas, not supported + */ +/* ARGSUSED */ +int +cd9660_quotactl(mp, cmd, uid, arg, p) + struct mount *mp; + int cmd; + uid_t uid; + caddr_t arg; + struct proc *p; +{ + + return (EOPNOTSUPP); +} + +/* + * Get file system statistics. + */ +int +cd9660_statfs(mp, sbp, p) + struct mount *mp; + register struct statfs *sbp; + struct proc *p; +{ + register struct iso_mnt *isomp; + + isomp = VFSTOISOFS(mp); + +#ifdef COMPAT_09 + sbp->f_type = 5; +#else + sbp->f_type = 0; +#endif + sbp->f_bsize = isomp->logical_block_size; + sbp->f_iosize = sbp->f_bsize; /* XXX */ + sbp->f_blocks = isomp->volume_space_size; + sbp->f_bfree = 0; /* total free blocks */ + sbp->f_bavail = 0; /* blocks free for non superuser */ + sbp->f_files = 0; /* total files */ + sbp->f_ffree = 0; /* free file nodes */ + if (sbp != &mp->mnt_stat) { + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + + strncpy( sbp->f_fstypename, mp->mnt_vfc->vfc_name, (MFSNAMELEN - 1) ); + sbp->f_fstypename[(MFSNAMELEN - 1)] = '\0'; + + /* DO NOT use the first spare for flags; it's been reassigned for another use: */ + /* sbp->f_spare[0] = isomp->im_flags; */ + + return (0); +} + +/* ARGSUSED */ +int +cd9660_sync(mp, waitfor, cred, p) + struct mount *mp; + int waitfor; + struct ucred *cred; + struct proc *p; +{ + + return (0); +} + +/* + * File handle to vnode + * + * Have to be really careful about stale file handles: + * - check that the inode number is in range + * - call iget() to get the locked inode + * - check for an unallocated inode (i_mode == 0) + * - check that the generation number matches + */ + +struct ifid { + ushort ifid_len; + ushort ifid_pad; + int ifid_ino; + long ifid_start; +}; + +/* ARGSUSED */ +int +cd9660_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) + register struct mount *mp; + struct fid *fhp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred **credanonp; +{ + struct ifid *ifhp = (struct ifid *)fhp; + register struct iso_node *ip; + register struct netcred *np; + register struct iso_mnt *imp = VFSTOISOFS(mp); + struct vnode *nvp; + int error; + +#ifdef ISOFS_DBG + printf("fhtovp: ino %d, start %ld\n", + ifhp->ifid_ino, ifhp->ifid_start); +#endif + + /* + * Get the export permission structure for this tuple. + */ + np = vfs_export_lookup(mp, &imp->im_export, nam); + if (np == NULL) + return (EACCES); + + if ( (error = VFS_VGET(mp, &ifhp->ifid_ino, &nvp)) ) { + *vpp = NULLVP; + return (error); + } + ip = VTOI(nvp); + if (ip->inode.iso_mode == 0) { + vput(nvp); + *vpp = NULLVP; + return (ESTALE); + } + *vpp = nvp; + *exflagsp = np->netc_exflags; + *credanonp = &np->netc_anon; + return (0); +} + +int +cd9660_vget(mp, ino, vpp) + struct mount *mp; + void *ino; + struct vnode **vpp; +{ + /* + * XXXX + * It would be nice if we didn't always set the `relocated' flag + * and force the extra read, but I don't want to think about fixing + * that right now. + */ + + return ( cd9660_vget_internal( mp, *(ino_t*)ino, vpp, 0, + (struct iso_directory_record *) 0, + current_proc()) ); +} + +int +cd9660_vget_internal(mp, ino, vpp, relocated, isodir, p) + struct mount *mp; + ino_t ino; + struct vnode **vpp; + int relocated; + struct iso_directory_record *isodir; + struct proc *p; +{ + register struct iso_mnt *imp; + struct iso_node *ip; + struct buf *bp; + struct vnode *vp, *nvp; + dev_t dev; + int error; + + imp = VFSTOISOFS(mp); + dev = imp->im_dev; + + /* Check for unmount in progress */ + if (mp->mnt_kern_flag & MNTK_UNMOUNT) { + *vpp = NULLVP; + return (EPERM); + } + + if ((*vpp = cd9660_ihashget(dev, ino, p)) != NULLVP) + return (0); + + MALLOC_ZONE(ip, struct iso_node *, sizeof(struct iso_node), + M_ISOFSNODE, M_WAITOK); + /* Allocate a new vnode/iso_node. */ + if ( (error = getnewvnode(VT_ISOFS, mp, cd9660_vnodeop_p, &vp)) ) { + FREE_ZONE(ip,sizeof(struct iso_node), M_ISOFSNODE); + *vpp = NULLVP; + return (error); + } + bzero((caddr_t)ip, sizeof(struct iso_node)); + lockinit(&ip->i_lock, PINOD,"isonode",0,0); + vp->v_data = ip; + ip->i_vnode = vp; + ip->i_dev = dev; + ip->i_number = ino; + ip->i_namep = &isonullname[0]; + + /* + * Put it onto its hash chain and lock it so that other requests for + * this inode will block if they arrive while we are sleeping waiting + * for old data structures to be purged or for the contents of the + * disk portion of this inode to be read. + */ + cd9660_ihashins(ip); + + if (isodir == 0) { + int lbn, off; + + lbn = lblkno(imp, ino); + if (lbn >= imp->volume_space_size) { + vput(vp); + printf("fhtovp: lbn exceed volume space %d\n", lbn); + return (ESTALE); + } + + off = blkoff(imp, ino); + if (off + ISO_DIRECTORY_RECORD_SIZE > imp->logical_block_size) { + vput(vp); + printf("fhtovp: crosses block boundary %d\n", + off + ISO_DIRECTORY_RECORD_SIZE); + return (ESTALE); + } + + error = bread(imp->im_devvp, lbn, + imp->logical_block_size, NOCRED, &bp); + if (error) { + vput(vp); + brelse(bp); + printf("fhtovp: bread error %d\n",error); + return (error); + } + isodir = (struct iso_directory_record *)(bp->b_data + off); + + if (off + isonum_711(isodir->length) > + imp->logical_block_size) { + vput(vp); + if (bp != 0) + brelse(bp); + printf("fhtovp: directory crosses block boundary " + "%d[off=%d/len=%d]\n", + off +isonum_711(isodir->length), off, + isonum_711(isodir->length)); + return (ESTALE); + } + + /* + * for directories we can get parentID from adjacent + * parent directory record + */ + if ((isonum_711(isodir->flags) & directoryBit) + && (isodir->name[0] == 0)) { + struct iso_directory_record *pdp; + + pdp = (struct iso_directory_record *) + ((char *)bp->b_data + isonum_711(isodir->length)); + if ((isonum_711(pdp->flags) & directoryBit) + && (pdp->name[0] == 1)) + ip->i_parent = isodirino(pdp, imp); + } + } else + bp = 0; + + ip->i_mnt = imp; + ip->i_devvp = imp->im_devvp; + VREF(ip->i_devvp); + + if (relocated) { + /* + * On relocated directories we must + * read the `.' entry out of a dir. + */ + ip->iso_start = ino >> imp->im_bshift; + if (bp != 0) + brelse(bp); + if ( (error = VOP_BLKATOFF(vp, (off_t)0, NULL, &bp)) ) { + vput(vp); + return (error); + } + isodir = (struct iso_directory_record *)bp->b_data; + } + + /* + * go get apple extensions to ISO directory record or use + * defaults when there are no apple extensions. + */ + if ( (isonum_711( isodir->flags ) & directoryBit) == 0 ) { + /* This is an ISO directory record for a file */ + DRGetTypeCreatorAndFlags( imp, isodir, &ip->i_FileType, + &ip->i_Creator, &ip->i_FinderFlags ); + } + + ip->iso_extent = isonum_733(isodir->extent); + ip->i_size = isonum_733(isodir->size); + ip->iso_start = isonum_711(isodir->ext_attr_length) + ip->iso_extent; + + /* + * if we have a valid name, fill in i_namep with UTF-8 name + */ + if (isonum_711(isodir->name_len) != 0) { + u_char *utf8namep; + u_short namelen; + ino_t inump = 0; + + MALLOC(utf8namep, u_char *, ISO_RRIP_NAMEMAX + 1, M_TEMP, M_WAITOK); + namelen = isonum_711(isodir->name_len); + + switch (imp->iso_ftype) { + case ISO_FTYPE_RRIP: + cd9660_rrip_getname(isodir, utf8namep, &namelen, &inump, imp); + break; + + case ISO_FTYPE_JOLIET: + ucsfntrans((u_int16_t *)isodir->name, namelen, + utf8namep, &namelen, + isonum_711(isodir->flags) & directoryBit); + break; + + default: + isofntrans (isodir->name, namelen, + utf8namep, &namelen, + imp->iso_ftype == ISO_FTYPE_9660); + } + + utf8namep[namelen] = '\0'; + MALLOC(ip->i_namep, u_char *, namelen + 1, M_TEMP, M_WAITOK); + bcopy(utf8namep, ip->i_namep, namelen + 1); + FREE(utf8namep, M_TEMP); + } + + /* + * Setup time stamp, attribute + */ + vp->v_type = VNON; + switch (imp->iso_ftype) { + default: /* ISO_FTYPE_9660 */ + { + struct buf *bp2; + int off; + if ((imp->im_flags & ISOFSMNT_EXTATT) + && (off = isonum_711(isodir->ext_attr_length))) + VOP_BLKATOFF(vp, (off_t)-(off << imp->im_bshift), NULL, &bp2); + else + bp2 = NULL; + cd9660_defattr(isodir, ip, bp2); + cd9660_deftstamp(isodir, ip, bp2); + if (bp2) + brelse(bp2); + break; + } + case ISO_FTYPE_RRIP: + cd9660_rrip_analyze(isodir, ip, imp); + break; + } + + if (bp != 0) + brelse(bp); + + /* + * Initialize the associated vnode + */ + + if (ip->iso_extent == imp->root_extent) { + vp->v_flag |= VROOT; + ip->i_parent = 1; /* root's parent is always 1 by convention */ + /* mode type must be S_IFDIR */ + ip->inode.iso_mode = (ip->inode.iso_mode & ~S_IFMT) | S_IFDIR; + } + + switch (vp->v_type = IFTOVT(ip->inode.iso_mode)) { + case VFIFO: +#if FIFO + vp->v_op = cd9660_fifoop_p; + break; +#else + vput(vp); + return (EOPNOTSUPP); +#endif /* FIFO */ + case VCHR: + case VBLK: + /* + * if device, look at device number table for translation + */ +#ifdef ISODEVMAP + if (dp = iso_dmap(dev, ino, 0)) + ip->inode.iso_rdev = dp->d_dev; +#endif + vp->v_op = cd9660_specop_p; + if ( (nvp = checkalias(vp, ip->inode.iso_rdev, mp)) ) { + /* + * Discard unneeded vnode, but save its iso_node. + */ + cd9660_ihashrem(ip); + VOP_UNLOCK(vp, 0, p); + nvp->v_data = vp->v_data; + vp->v_data = NULL; + vp->v_op = spec_vnodeop_p; + vrele(vp); + vgone(vp); + /* + * Reinitialize aliased inode. + */ + vp = nvp; + ip->i_vnode = vp; + cd9660_ihashins(ip); + } + break; + case VREG: + ubc_info_init(vp); + break; + default: + break; + } + + /* + * XXX need generation number? + */ + + *vpp = vp; + + return (0); +} + + +/************************************************************************ + * + * Function: DRGetTypeCreatorAndFlags + * + * Purpose: Set up the fileType, fileCreator and fileFlags + * + * Returns: none + * + * Side Effects: sets *theTypePtr, *theCreatorPtr, and *theFlagsPtr + * + * Description: + * + * Revision History: + * 28 Jul 88 BL¡B Added a new extension type of 6, which allows + * the specification of four of the finder flags. + * We let the creator of the disk just copy over + * the finder flags, but we only look at always + * switch launch, system, bundle, and locked bits. + * 15 Aug 88 BL¡B The Apple extensions to ISO 9660 implemented the + * padding field at the end of a directory record + * incorrectly. + * 19 Jul 89 BG Rewrote routine to handle the "new" Apple + * Extensions definition, as well as take into + * account the possibility of "other" definitions. + * 02 Nov 89 BG Corrected the 'AA' SystemUseID processing to + * check for SystemUseID == 2 (HFS). Was incorrectly + * checking for SystemUseID == 1 (ProDOS) before. + * 18 Mar 92 CMP Fixed the check for whether len_fi was odd or even. + * Before it would always assume even for an XA record. + * 26 Dec 97 jwc Swiped from MacOS implementation of ISO 9660 CD-ROM + * support and modified to work in MacOSX file system. + * + *********************************************************************** */ + +static void +DRGetTypeCreatorAndFlags( struct iso_mnt * theMountPointPtr, + struct iso_directory_record * theDirRecPtr, + u_int32_t * theTypePtr, + u_int32_t * theCreatorPtr, + u_int16_t * theFlagsPtr ) +{ + int foundStuff; + u_int32_t myType; + u_int32_t myCreator; + AppleExtension *myAppleExtPtr; + NewAppleExtension *myNewAppleExtPtr; + u_int16_t myFinderFlags; + char *myPtr; + + foundStuff = 1; + myType = 0L; + myCreator = 0L; + myFinderFlags = 0; + *theFlagsPtr = 0x0000; + + /* + * handle the fact that our original apple extensions didn't take + * into account the padding byte on a file name + */ + + myPtr = &theDirRecPtr->name[ (isonum_711(theDirRecPtr->name_len)) ]; + + /* if string length is even, bump myPtr for padding byte */ + if ( ((isonum_711(theDirRecPtr->name_len)) & 0x01) == 0 ) + myPtr++; + myAppleExtPtr = (AppleExtension *) myPtr; + + /* + * checking for whether or not the new 'AA' code is being + * called (and if so, correctly) + */ + if ( (isonum_711(theDirRecPtr->length)) <= + ISO_DIRECTORY_RECORD_SIZE + (isonum_711(theDirRecPtr->name_len)) ) { + foundStuff = 0; + goto DoneLooking; + } + + foundStuff = 0; /* now we default to *false* until we find a good one */ + myPtr = (char *) myAppleExtPtr; + + if ( (theMountPointPtr->im_flags2 & IMF2_IS_CDXA) != 0 ) + myPtr += 14;/* add in CD-XA fixed record offset (tnx, Phillips) */ + myNewAppleExtPtr = (NewAppleExtension *) myPtr; + + /* calculate the "real" end of the directory record information */ + myPtr = ((char *) theDirRecPtr) + (isonum_711(theDirRecPtr->length)); + while( (char *) myNewAppleExtPtr < myPtr ) /* end of directory buffer */ + { + /* + * If we get here, we can assume that ALL further entries in this + * directory record are of the form: + * + * struct OptionalSystemUse + * { + * byte Signature[2]; + * byte systemUseID; + * byte OSULength; + * byte fileType[4]; # only if HFS + * byte fileCreator[4]; # only if HFS + * byte finderFlags[2]; # only if HFS + * }; + * + * This means that we can examine the Signature bytes to see + * if they are 'AA' (the NEW Apple extension signature). + * If they are, deal with them. If they aren't, + * the OSULength field will tell us how long this extension + * info is (including the signature and length bytes) and that + * will allow us to walk the OptionalSystemUse records until + * we hit the end of them or run off the end of the + * directory record. + */ + u_char *myFromPtr, *myToPtr; + union + { + u_int32_t fourchars; + u_char chars[4]; + } myChars; + + if ( (myNewAppleExtPtr->signature[0] == 'A') && + (myNewAppleExtPtr->signature[1] == 'A') ) { + if ( isonum_711(myNewAppleExtPtr->systemUseID) == 2 ) { + /* HFS */ + foundStuff = 1; /* we got one! */ + + myFromPtr = &myNewAppleExtPtr->fileType[0]; + myToPtr = &myChars.chars[0]; + *myToPtr++ = *myFromPtr++; + *myToPtr++ = *myFromPtr++; + *myToPtr++ = *myFromPtr++; + *myToPtr = *myFromPtr; + myType = myChars.fourchars; /* copy file type to user var */ + + myFromPtr = &myNewAppleExtPtr->fileCreator[0]; + myToPtr = &myChars.chars[0]; + *myToPtr++ = *myFromPtr++; + *myToPtr++ = *myFromPtr++; + *myToPtr++ = *myFromPtr++; + *myToPtr = *myFromPtr; + myCreator = myChars.fourchars; /* copy creator to user var */ + + myFromPtr = &myNewAppleExtPtr->finderFlags[0]; + myToPtr = &myChars.chars[2]; /* *flags* is a short */ + myChars.fourchars = 0; + *myToPtr++ = *myFromPtr++; + *myToPtr = *myFromPtr; + myFinderFlags = myChars.fourchars; + myFinderFlags &= + ( fAlwaysBit | fSystemBit | fHasBundleBit | fLockedBit ); + /* return Finder flags to user var */ + *theFlagsPtr = (myFinderFlags | fInitedBit); + + break; /* exit the loop */ + } + } + + /* + * Check to see if we have a reasonable OSULength value. + * ZERO is not an acceptable value. Nor is any value less than 4. + */ + + if ( (isonum_711(myNewAppleExtPtr->OSULength)) < 4 ) + break; /* not acceptable - get out! */ + + /* otherwise, step past this SystemUse record */ + (char *)myNewAppleExtPtr += (isonum_711(myNewAppleExtPtr->OSULength)); + + } /* end of while loop */ + +DoneLooking: + if ( foundStuff != 0 ) { + *theTypePtr = myType; + *theCreatorPtr = myCreator; + } else { + *theTypePtr = 0; + *theCreatorPtr = 0; + } + + return; + +} /* DRGetTypeCreatorAndFlags */ + + +/* + * Vnode pointer to File handle + */ +/* ARGSUSED */ +int +cd9660_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + register struct iso_node *ip = VTOI(vp); + register struct ifid *ifhp; + + ifhp = (struct ifid *)fhp; + ifhp->ifid_len = sizeof(struct ifid); + + ifhp->ifid_ino = ip->i_number; + ifhp->ifid_start = ip->iso_start; + +#ifdef ISOFS_DBG + printf("vptofh: ino %d, start %ld\n", + ifhp->ifid_ino,ifhp->ifid_start); +#endif + return (0); +} + +/* + * Fast-FileSystem only? + */ +int +cd9660_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int * name; + u_int namelen; + void* oldp; + size_t * oldlenp; + void * newp; + size_t newlen; + struct proc * p; +{ + return (EOPNOTSUPP); +} + diff --git a/bsd/isofs/cd9660/cd9660_vnops.c b/bsd/isofs/cd9660/cd9660_vnops.c new file mode 100644 index 000000000..c6624b06f --- /dev/null +++ b/bsd/isofs/cd9660/cd9660_vnops.c @@ -0,0 +1,1382 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: cd9660_vnops.c,v 1.22 1994/12/27 19:05:12 mycroft Exp $ */ + +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cd9660_vnops.c 8.15 (Berkeley) 12/5/94 + * + * HISTORY + * 02-Feb-00 chw Add cd9660_copyfile to return error + * 29-Sep-98 djb Add cd9660_getattrlist VOP for VDI support. + * 15-sep-98 added cd9660_rmdir to do proper unlocking - chw + * 12-aug-98 added cd9660_remove which will do proper unlocking - chw + * 17-Feb-98 radar 1669467 - changed lock protocols to use the lock manager - chw + * 22-Jan-98 radar 1669467 - ISO 9660 CD support - jwc + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* + * Open called. + * + * Nothing to do. + */ +/* ARGSUSED */ +int +cd9660_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + return (0); +} + +/* + * Close called + * + * Update the times on the inode on writeable file systems. + */ +/* ARGSUSED */ +int +cd9660_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + return (0); +} + +/* + * Check mode permission on inode pointer. Mode is READ, WRITE or EXEC. + * The mode is shifted to select the owner/group/other fields. The + * super user is granted all permissions. + */ +/* ARGSUSED */ +int +cd9660_access(ap) + struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct iso_node *ip = VTOI(vp); + struct ucred *cred = ap->a_cred; + mode_t mask, mode = ap->a_mode; + register gid_t *gp; + int i, error; + + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + if (mode & VWRITE) { + switch (vp->v_type) { + case VDIR: + case VLNK: + case VREG: + return (EROFS); + /* NOT REACHED */ + default: + break; + } + } + + /* If immutable bit set, nobody gets to write it. */ +#if 0 + if ((mode & VWRITE) && (ip->i_flag & IMMUTABLE)) + return (EPERM); +#endif + /* Otherwise, user id 0 always gets access. */ + if (cred->cr_uid == 0) + return (0); + + mask = 0; + + /* Otherwise, check the owner. */ + if (cred->cr_uid == ip->inode.iso_uid) { + if (mode & VEXEC) + mask |= S_IXUSR; + if (mode & VREAD) + mask |= S_IRUSR; + if (mode & VWRITE) + mask |= S_IWUSR; + return ((ip->inode.iso_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check the groups. */ + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) + if (ip->inode.iso_gid == *gp) { + if (mode & VEXEC) + mask |= S_IXGRP; + if (mode & VREAD) + mask |= S_IRGRP; + if (mode & VWRITE) + mask |= S_IWGRP; + return ((ip->inode.iso_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check everyone else. */ + if (mode & VEXEC) + mask |= S_IXOTH; + if (mode & VREAD) + mask |= S_IROTH; + if (mode & VWRITE) + mask |= S_IWOTH; + return ((ip->inode.iso_mode & mask) == mask ? 0 : EACCES); +} + +int +cd9660_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; + +{ + struct vnode *vp = ap->a_vp; + register struct vattr *vap = ap->a_vap; + register struct iso_node *ip = VTOI(vp); + + vap->va_fsid = ip->i_dev; + vap->va_fileid = ip->i_number; + + vap->va_mode = ip->inode.iso_mode; + vap->va_nlink = ip->inode.iso_links; + vap->va_uid = ip->inode.iso_uid; + vap->va_gid = ip->inode.iso_gid; + vap->va_atime = ip->inode.iso_atime; + vap->va_mtime = ip->inode.iso_mtime; + vap->va_ctime = ip->inode.iso_ctime; + vap->va_rdev = ip->inode.iso_rdev; + + vap->va_size = (u_quad_t) ip->i_size; + if (ip->i_size == 0 && (vap->va_mode & S_IFMT) == S_IFLNK) { + struct vop_readlink_args rdlnk; + struct iovec aiov; + struct uio auio; + char *cp; + + MALLOC(cp, char *, MAXPATHLEN, M_TEMP, M_WAITOK); + aiov.iov_base = cp; + aiov.iov_len = MAXPATHLEN; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_procp = ap->a_p; + auio.uio_resid = MAXPATHLEN; + rdlnk.a_uio = &auio; + rdlnk.a_vp = ap->a_vp; + rdlnk.a_cred = ap->a_cred; + if (cd9660_readlink(&rdlnk) == 0) + vap->va_size = MAXPATHLEN - auio.uio_resid; + FREE(cp, M_TEMP); + } + vap->va_flags = 0; + vap->va_gen = 1; + vap->va_blocksize = ip->i_mnt->logical_block_size; + vap->va_bytes = (u_quad_t) (ip->i_size + ip->i_rsrcsize); + vap->va_type = vp->v_type; + + return (0); +} + + +/* + * Vnode op for reading. + */ +int +cd9660_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + register struct uio *uio = ap->a_uio; + register struct iso_node *ip = VTOI(vp); + register struct iso_mnt *imp; + struct buf *bp; + daddr_t lbn, rablock; + off_t diff; + int rasize, error = 0; + long size, n, on; + int devBlockSize = 0; + + if (uio->uio_resid == 0) + return (0); + if (uio->uio_offset < 0) + return (EINVAL); + + imp = ip->i_mnt; + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + if (UBCISVALID(vp)) + error = cluster_read(vp, uio, (off_t)ip->i_size, devBlockSize, 0); + else { + + do { + lbn = lblkno(imp, uio->uio_offset); + on = blkoff(imp, uio->uio_offset); + n = min((u_int)(imp->logical_block_size - on), + uio->uio_resid); + diff = (off_t)ip->i_size - uio->uio_offset; + if (diff <= 0) + return (0); + if (diff < n) + n = diff; + size = blksize(imp, ip, lbn); + rablock = lbn + 1; + + if (vp->v_lastr + 1 == lbn && + lblktosize(imp, rablock) < ip->i_size) { + rasize = blksize(imp, ip, rablock); + error = breadn(vp, lbn, size, &rablock, + &rasize, 1, NOCRED, &bp); + } else + error = bread(vp, lbn, size, NOCRED, &bp); + + vp->v_lastr = lbn; + n = min(n, size - bp->b_resid); + if (error) { + brelse(bp); + return (error); + } + + error = uiomove(bp->b_data + on, (int)n, uio); + if (n + on == imp->logical_block_size || + uio->uio_offset == (off_t)ip->i_size) + bp->b_flags |= B_AGE; + brelse(bp); + } while (error == 0 && uio->uio_resid > 0 && n != 0); + } + + return (error); +} + +/* ARGSUSED */ +int +cd9660_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + u_long a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + printf("You did ioctl for isofs !!\n"); + return (ENOTTY); +} + +/* ARGSUSED */ +int +cd9660_select(ap) + struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + /* + * We should really check to see if I/O is possible. + */ + return (1); +} + +/* + * Mmap a file + * + * NB Currently unsupported. + */ +/* ARGSUSED */ +int +cd9660_mmap(ap) + struct vop_mmap_args /* { + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + return (EINVAL); +} + +/* + * Seek on a file + * + * Nothing to do, so just return. + */ +/* ARGSUSED */ +int +cd9660_seek(ap) + struct vop_seek_args /* { + struct vnode *a_vp; + off_t a_oldoff; + off_t a_newoff; + struct ucred *a_cred; + } */ *ap; +{ + + return (0); +} + +/* + * Structure for reading directories + */ +struct isoreaddir { + struct dirent saveent; + struct dirent current; + off_t saveoff; + off_t curroff; + struct uio *uio; + off_t uio_off; + int eofflag; +// u_long **cookies; +// int *ncookies; +}; + +static int +iso_uiodir(idp,dp,off) + struct isoreaddir *idp; + struct dirent *dp; + off_t off; +{ + int error; + + dp->d_name[dp->d_namlen] = 0; + dp->d_reclen = DIRSIZ(dp); + + if (idp->uio->uio_resid < dp->d_reclen) { + idp->eofflag = 0; + return (-1); + } + +#if 0 + if (idp->cookies) { + if (*idp->ncookies <= 0) { + idp->eofflag = 0; + return (-1); + } + + **idp->cookies++ = off; + --*idp->ncookies; + } +#endif + + if ( (error = uiomove( (caddr_t)dp, dp->d_reclen, idp->uio )) ) + return (error); + idp->uio_off = off; + return (0); +} + +static int +iso_shipdir(idp) + struct isoreaddir *idp; +{ + struct dirent *dp; + int cl, sl; + int error; + char *cname, *sname; + + cl = idp->current.d_namlen; + cname = idp->current.d_name; + + dp = &idp->saveent; + sname = dp->d_name; + sl = dp->d_namlen; + if (sl > 0) { + if (sl != cl + || bcmp(sname,cname,sl)) { + if (idp->saveent.d_namlen) { + if ( (error = iso_uiodir(idp,&idp->saveent,idp->saveoff)) ) + return (error); + idp->saveent.d_namlen = 0; + } + } + } + idp->current.d_reclen = DIRSIZ(&idp->current); + idp->saveoff = idp->curroff; + bcopy(&idp->current,&idp->saveent,idp->current.d_reclen); + return (0); +} + +/* + * Vnode op for readdir + */ +int +cd9660_readdir(ap) + struct vop_readdir_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + int *a_ncookies; + u_long **a_cookies; + } */ *ap; +{ + register struct uio *uio = ap->a_uio; + off_t startingOffset = uio->uio_offset; + size_t lost = 0; + struct isoreaddir *idp; + struct vnode *vdp = ap->a_vp; + struct iso_node *dp; + struct iso_mnt *imp; + struct buf *bp = NULL; + struct iso_directory_record *ep; + int entryoffsetinblock; + doff_t endsearch; + u_long bmask; + int error = 0; + int reclen; + u_short namelen; + + dp = VTOI(vdp); + imp = dp->i_mnt; + bmask = imp->im_bmask; + + MALLOC(idp, struct isoreaddir *, sizeof(*idp), M_TEMP, M_WAITOK); + idp->saveent.d_namlen = 0; + /* + * XXX + * Is it worth trying to figure out the type? + */ + idp->saveent.d_type = idp->current.d_type = DT_UNKNOWN; + idp->uio = uio; + idp->eofflag = 1; + idp->curroff = uio->uio_offset; + + if ((entryoffsetinblock = idp->curroff & bmask) && + (error = VOP_BLKATOFF(vdp, (off_t)idp->curroff, NULL, &bp))) { + FREE(idp, M_TEMP); + return (error); + } + endsearch = dp->i_size; + + while (idp->curroff < endsearch) { + /* + * If offset is on a block boundary, + * read the next directory block. + * Release previous if it exists. + */ + if ((idp->curroff & bmask) == 0) { + if (bp != NULL) + brelse(bp); + if ( (error = VOP_BLKATOFF(vdp, (off_t)idp->curroff, NULL, &bp)) ) + break; + entryoffsetinblock = 0; + } + /* + * Get pointer to next entry. + */ + ep = (struct iso_directory_record *) + ((char *)bp->b_data + entryoffsetinblock); + + reclen = isonum_711(ep->length); + if (reclen == 0) { + /* skip to next block, if any */ + idp->curroff = + (idp->curroff & ~bmask) + imp->logical_block_size; + continue; + } + + if (reclen < ISO_DIRECTORY_RECORD_SIZE) { + error = EINVAL; + /* illegal entry, stop */ + break; + } + + if (entryoffsetinblock + reclen > imp->logical_block_size) { + error = EINVAL; + /* illegal directory, so stop looking */ + break; + } + + idp->current.d_namlen = isonum_711(ep->name_len); + + if (reclen < ISO_DIRECTORY_RECORD_SIZE + idp->current.d_namlen) { + error = EINVAL; + /* illegal entry, stop */ + break; + } + + /* skip over associated files (Mac OS resource fork) */ + if (isonum_711(ep->flags) & associatedBit) { + idp->curroff += reclen; + entryoffsetinblock += reclen; + continue; + } + + if ( isonum_711(ep->flags) & directoryBit ) + idp->current.d_fileno = isodirino(ep, imp); + else { + idp->current.d_fileno = (bp->b_blkno << imp->im_bshift) + entryoffsetinblock; + } + + idp->curroff += reclen; + + switch (imp->iso_ftype) { + case ISO_FTYPE_RRIP: + cd9660_rrip_getname(ep,idp->current.d_name, &namelen, + &idp->current.d_fileno,imp); + idp->current.d_namlen = (u_char)namelen; + if (idp->current.d_namlen) + error = iso_uiodir(idp,&idp->current,idp->curroff); + break; + + case ISO_FTYPE_JOLIET: + ucsfntrans((u_int16_t *)ep->name, idp->current.d_namlen, + idp->current.d_name, &namelen, + isonum_711(ep->flags) & directoryBit); + idp->current.d_namlen = (u_char)namelen; + if (idp->current.d_namlen) + error = iso_uiodir(idp,&idp->current,idp->curroff); + break; + + default: /* ISO_FTYPE_DEFAULT || ISO_FTYPE_9660 */ + strcpy(idp->current.d_name,".."); + switch (ep->name[0]) { + case 0: + idp->current.d_namlen = 1; + error = iso_uiodir(idp,&idp->current,idp->curroff); + break; + case 1: + idp->current.d_namlen = 2; + error = iso_uiodir(idp,&idp->current,idp->curroff); + break; + default: + isofntrans(ep->name,idp->current.d_namlen, + idp->current.d_name, &namelen, + imp->iso_ftype == ISO_FTYPE_9660); + idp->current.d_namlen = (u_char)namelen; + if (imp->iso_ftype == ISO_FTYPE_DEFAULT) + error = iso_shipdir(idp); + else + error = iso_uiodir(idp,&idp->current,idp->curroff); + break; + } + } + if (error) + break; + + entryoffsetinblock += reclen; + } + + if (!error && imp->iso_ftype == ISO_FTYPE_DEFAULT) { + idp->current.d_namlen = 0; + error = iso_shipdir(idp); + } + + if (!error && ap->a_ncookies) { + struct dirent *dp, *dpstart; + off_t bufferOffset; + u_long *cookies; + int ncookies; + + /* + * Only the NFS server uses cookies, and it loads the + * directory block into system space, so we can just look at + * it directly. + * + * We assume the entire transfer is done to a single contiguous buffer. + */ + if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) + panic("ufs_readdir: lost in space"); + + /* + * Make a first pass over the buffer just generated, + * counting the number of entries: + */ + dpstart = (struct dirent *) (uio->uio_iov->iov_base - (uio->uio_offset - startingOffset)); + for (dp = dpstart, bufferOffset = startingOffset, ncookies = 0; + bufferOffset < uio->uio_offset; ) { + if (dp->d_reclen == 0) + break; + bufferOffset += dp->d_reclen; + ncookies++; + dp = (struct dirent *)((caddr_t)dp + dp->d_reclen); + } + lost += uio->uio_offset - bufferOffset; + uio->uio_offset = bufferOffset; + + /* + * Allocate a buffer to hold the cookies requested: + */ + MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK); + *ap->a_ncookies = ncookies; + *ap->a_cookies = cookies; + + /* + * Fill in the offsets for each entry in the buffer just allocated: + */ + for (bufferOffset = startingOffset, dp = dpstart; bufferOffset < uio->uio_offset; ) { + *(cookies++) = bufferOffset; + bufferOffset += dp->d_reclen; + dp = (struct dirent *)((caddr_t)dp + dp->d_reclen); + } + } + + if (error < 0) + error = 0; + + if (bp) + brelse (bp); + + uio->uio_offset = idp->uio_off; + *ap->a_eofflag = idp->eofflag; + + FREE(idp, M_TEMP); + + return (error); +} + +/* + * Return target name of a symbolic link + * Shouldn't we get the parent vnode and read the data from there? + * This could eventually result in deadlocks in cd9660_lookup. + * But otherwise the block read here is in the block buffer two times. + */ +typedef struct iso_directory_record ISODIR; +typedef struct iso_node ISONODE; +typedef struct iso_mnt ISOMNT; +int +cd9660_readlink(ap) + struct vop_readlink_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ *ap; +{ + ISONODE *ip; + ISODIR *dirp; + ISOMNT *imp; + struct buf *bp; + struct uio *uio; + u_short symlen; + int error; + char *symname; + + ip = VTOI(ap->a_vp); + imp = ip->i_mnt; + uio = ap->a_uio; + + if (imp->iso_ftype != ISO_FTYPE_RRIP) + return (EINVAL); + + /* + * Get parents directory record block that this inode included. + */ + error = bread(imp->im_devvp, + (ip->i_number >> imp->im_bshift), + imp->logical_block_size, NOCRED, &bp); + if (error) { + brelse(bp); + return (EINVAL); + } + + /* + * Setup the directory pointer for this inode + */ + dirp = (ISODIR *)(bp->b_data + (ip->i_number & imp->im_bmask)); + + /* + * Just make sure, we have a right one.... + * 1: Check not cross boundary on block + */ + if ((ip->i_number & imp->im_bmask) + isonum_711(dirp->length) + > imp->logical_block_size) { + brelse(bp); + return (EINVAL); + } + + /* + * Now get a buffer + * Abuse a namei buffer for now. + */ + if (uio->uio_segflg == UIO_SYSSPACE) + symname = uio->uio_iov->iov_base; + else + MALLOC_ZONE(symname, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + + /* + * Ok, we just gathering a symbolic name in SL record. + */ + if (cd9660_rrip_getsymname(dirp, symname, &symlen, imp) == 0) { + if (uio->uio_segflg != UIO_SYSSPACE) + FREE_ZONE(symname, MAXPATHLEN, M_NAMEI); + brelse(bp); + return (EINVAL); + } + /* + * Don't forget before you leave from home ;-) + */ + brelse(bp); + + /* + * return with the symbolic name to caller's. + */ + if (uio->uio_segflg != UIO_SYSSPACE) { + error = uiomove(symname, symlen, uio); + FREE_ZONE(symname, MAXPATHLEN, M_NAMEI); + return (error); + } + uio->uio_resid -= symlen; + uio->uio_iov->iov_base += symlen; + uio->uio_iov->iov_len -= symlen; + return (0); +} + +/* + * Ufs abort op, called after namei() when a CREATE/DELETE isn't actually + * done. If a buffer has been saved in anticipation of a CREATE, delete it. + */ +int +cd9660_abortop(ap) + struct vop_abortop_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + } */ *ap; +{ + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + return (0); +} + +/* + * Lock an inode. + */ + +int +cd9660_lock(ap) + struct vop_lock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + if (VTOI(vp) == (struct iso_node *) NULL) + panic ("cd9660_lock: null inode"); + return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags, &vp->v_interlock,ap->a_p)); +} + +/* + * Unlock an inode. + */ + +int +cd9660_unlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags | LK_RELEASE, &vp->v_interlock,ap->a_p)); + +} + +/* + * Calculate the logical to physical mapping if not done already, + * then call the device strategy routine. + */ +int +cd9660_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + register struct buf *bp = ap->a_bp; + register struct vnode *vp = bp->b_vp; + register struct iso_node *ip; + int error; + + ip = VTOI(vp); + if (vp->v_type == VBLK || vp->v_type == VCHR) + panic("cd9660_strategy: spec"); + if (bp->b_blkno == bp->b_lblkno) { + if ( (error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL)) ) { + bp->b_error = error; + bp->b_flags |= B_ERROR; + biodone(bp); + return (error); + } + if ((long)bp->b_blkno == -1) + clrbuf(bp); + } + if ((long)bp->b_blkno == -1) { + biodone(bp); + return (0); + } + vp = ip->i_devvp; + bp->b_dev = vp->v_rdev; + VOCALL (vp->v_op, VOFFSET(vop_strategy), ap); + return (0); +} + +/* + * Print out the contents of an inode. + */ +int +cd9660_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + printf("tag VT_ISOFS, isofs vnode\n"); + return (0); +} + +/* + * Check for a locked inode. + */ +int +cd9660_islocked(ap) + struct vop_islocked_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + return (lockstatus(&VTOI(ap->a_vp)->i_lock)); +} + +/* + * Return POSIX pathconf information applicable to cd9660 filesystems. + */ +int +cd9660_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + register_t *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = 1; + return (0); + case _PC_NAME_MAX: + switch (VTOI(ap->a_vp)->i_mnt->iso_ftype) { + case ISO_FTYPE_RRIP: + *ap->a_retval = ISO_RRIP_NAMEMAX; + break; + case ISO_FTYPE_JOLIET: + *ap->a_retval = ISO_JOLIET_NAMEMAX; + break; + default: + *ap->a_retval = ISO_NAMEMAX; + } + return (0); + case _PC_PATH_MAX: + *ap->a_retval = PATH_MAX; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_NO_TRUNC: + *ap->a_retval = 1; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Unsupported operation + */ +int +cd9660_enotsupp() +{ + + return (EOPNOTSUPP); +} +/* Pagein. similar to read */ +int +cd9660_pagein(ap) + struct vop_pagein_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + upl_t pl = ap->a_pl; + size_t size= ap->a_size; + off_t f_offset = ap->a_f_offset; + vm_offset_t pl_offset = ap->a_pl_offset; + int flags = ap->a_flags; + register struct iso_node *ip = VTOI(vp); + int devBlockSize=0, error; + + /* check pageouts are for reg file only and ubc info is present*/ + if (UBCINVALID(vp)) + panic("cd9660_pagein: Not a VREG"); + UBCINFOCHECK("cd9660_pagein", vp); + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + error = cluster_pagein(vp, pl, pl_offset, f_offset, size, + (off_t)ip->i_size, devBlockSize, flags); + return (error); +} + +/* + * cd9660_remove - not possible to remove a file from iso cds + * + * Locking policy: a_dvp and vp locked on entry, unlocked on exit + */ +int +cd9660_remove(ap) + struct vop_remove_args /* { struct vnode *a_dvp; struct vnode *a_vp; + struct componentname *a_cnp; } */ *ap; +{ + if (ap->a_dvp == ap->a_vp) + vrele(ap->a_vp); + else + vput(ap->a_vp); + vput(ap->a_dvp); + + return (EROFS); +} + + +/* + * cd9660_rmdir - not possible to remove a directory from iso cds + * + * Locking policy: a_dvp and vp locked on entry, unlocked on exit + */ +int +cd9660_rmdir(ap) + struct vop_rmdir_args /* { struct vnode *a_dvp; struct vnode *a_vp; + struct componentname *a_cnp; } */ *ap; +{ + (void) nop_rmdir(ap); + return (EROFS); +} + +/* + +# +#% getattrlist vp = = = +# + vop_getattrlist { + IN struct vnode *vp; + IN struct attrlist *alist; + INOUT struct uio *uio; + IN struct ucred *cred; + IN struct proc *p; + }; + + */ +int +cd9660_getattrlist(ap) + struct vop_getattrlist_args /* { + struct vnode *a_vp; + struct attrlist *a_alist + struct uio *a_uio; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct attrlist *alist = ap->a_alist; + int fixedblocksize; + int attrblocksize; + int attrbufsize; + void *attrbufptr; + void *attrptr; + void *varptr; + int error = 0; + + if ((alist->bitmapcount != ATTR_BIT_MAP_COUNT) || + ((alist->commonattr & ~ATTR_CMN_VALIDMASK) != 0) || + ((alist->volattr & ~ATTR_VOL_VALIDMASK) != 0) || + ((alist->dirattr & ~ATTR_DIR_VALIDMASK) != 0) || + ((alist->fileattr & ~ATTR_FILE_VALIDMASK) != 0) || + ((alist->forkattr & ~ATTR_FORK_VALIDMASK) != 0)) { + return EINVAL; + }; + + /* + * Requesting volume information requires setting the ATTR_VOL_INFO bit and + * volume info requests are mutually exclusive with all other info requests: + */ + if ((alist->volattr != 0) && + (((alist->volattr & ATTR_VOL_INFO) == 0) || + (alist->dirattr != 0) || + (alist->fileattr != 0) || + (alist->forkattr != 0) )) { + return EINVAL; + }; + + /* + * Reject requests for unsupported options for now: + */ + if (alist->volattr & ATTR_VOL_MOUNTPOINT) return EINVAL; + if (alist->commonattr & (ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST)) return EINVAL; + if (alist->fileattr & + (ATTR_FILE_FILETYPE | + ATTR_FILE_FORKCOUNT | + ATTR_FILE_FORKLIST | + ATTR_FILE_DATAEXTENTS | + ATTR_FILE_RSRCEXTENTS)) { + return EINVAL; + }; + + + fixedblocksize = attrcalcsize(alist); + attrblocksize = fixedblocksize + (sizeof(u_long)); /* u_long for length longword */ + if (alist->commonattr & ATTR_CMN_NAME) attrblocksize += NAME_MAX; + if (alist->commonattr & ATTR_CMN_NAMEDATTRLIST) attrblocksize += 0; /* XXX PPD */ + if (alist->volattr & ATTR_VOL_MOUNTPOINT) attrblocksize += PATH_MAX; + if (alist->volattr & ATTR_VOL_NAME) attrblocksize += NAME_MAX; + if (alist->fileattr & ATTR_FILE_FORKLIST) attrblocksize += 0; /* XXX PPD */ + + attrbufsize = MIN(ap->a_uio->uio_resid, attrblocksize); + MALLOC(attrbufptr, void *, attrblocksize, M_TEMP, M_WAITOK); + attrptr = attrbufptr; + *((u_long *)attrptr) = 0; /* Set buffer length in case of errors */ + ++((u_long *)attrptr); /* Reserve space for length field */ + varptr = ((char *)attrptr) + fixedblocksize; /* Point to variable-length storage */ + + packattrblk(alist, ap->a_vp, &attrptr, &varptr); + + /* Store length of fixed + var block */ + *((u_long *)attrbufptr) = ((char*)varptr - (char*)attrbufptr); + /* Don't copy out more data than was generated */ + attrbufsize = MIN(attrbufsize, (char*)varptr - (char*)attrbufptr); + + error = uiomove((caddr_t)attrbufptr, attrbufsize, ap->a_uio); + + FREE(attrbufptr, M_TEMP); + + return error; +} + +/* + * Global vfs data structures for isofs + */ +#define cd9660_create \ + ((int (*) __P((struct vop_create_args *)))err_create) +#define cd9660_mknod ((int (*) __P((struct vop_mknod_args *)))err_mknod) +#define cd9660_setattr \ + ((int (*) __P((struct vop_setattr_args *)))cd9660_enotsupp) +#define cd9660_write ((int (*) __P((struct vop_write_args *)))cd9660_enotsupp) +#if NFSSERVER +int lease_check __P((struct vop_lease_args *)); +#define cd9660_lease_check lease_check +#else +#define cd9660_lease_check ((int (*) __P((struct vop_lease_args *)))nullop) +#endif +#define cd9660_fsync ((int (*) __P((struct vop_fsync_args *)))nullop) +#define cd9660_rename \ + ((int (*) __P((struct vop_rename_args *)))err_rename) +#define cd9660_copyfile \ + ((int (*) __P((struct vop_copyfile_args *)))err_copyfile) +#define cd9660_link ((int (*) __P((struct vop_link_args *)))err_link) +#define cd9660_mkdir ((int (*) __P((struct vop_mkdir_args *)))err_mkdir) +#define cd9660_symlink \ + ((int (*) __P((struct vop_symlink_args *)))err_symlink) +#define cd9660_advlock \ + ((int (*) __P((struct vop_advlock_args *)))cd9660_enotsupp) +#define cd9660_valloc ((int(*) __P(( \ + struct vnode *pvp, \ + int mode, \ + struct ucred *cred, \ + struct vnode **vpp))) cd9660_enotsupp) +#define cd9660_vfree ((int (*) __P((struct vop_vfree_args *)))cd9660_enotsupp) +#define cd9660_truncate \ + ((int (*) __P((struct vop_truncate_args *)))cd9660_enotsupp) +#define cd9660_update \ + ((int (*) __P((struct vop_update_args *)))cd9660_enotsupp) +#define cd9660_bwrite \ + ((int (*) __P((struct vop_bwrite_args *)))cd9660_enotsupp) +#define cd9660_pageout \ + ((int (*) __P((struct vop_pageout_args *)))cd9660_enotsupp) +int cd9660_blktooff(struct vop_blktooff_args *ap); +int cd9660_offtoblk(struct vop_offtoblk_args *ap); +int cd9660_cmap(struct vop_cmap_args *ap); + +#define VOPFUNC int (*)(void *) +/* + * Global vfs data structures for cd9660 + */ +int (**cd9660_vnodeop_p)(void *); +struct vnodeopv_entry_desc cd9660_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)cd9660_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)cd9660_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)cd9660_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)cd9660_open }, /* open */ + { &vop_close_desc, (VOPFUNC)cd9660_close }, /* close */ + { &vop_access_desc, (VOPFUNC)cd9660_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)cd9660_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)cd9660_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)cd9660_read }, /* read */ + { &vop_write_desc, (VOPFUNC)cd9660_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)cd9660_lease_check },/* lease */ + { &vop_ioctl_desc, (VOPFUNC)cd9660_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)cd9660_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)cd9660_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)cd9660_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)cd9660_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)cd9660_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)cd9660_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)cd9660_rename }, /* rename */ + { &vop_copyfile_desc, (VOPFUNC)cd9660_copyfile },/* copyfile */ + { &vop_mkdir_desc, (VOPFUNC)cd9660_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)cd9660_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)cd9660_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)cd9660_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)cd9660_readlink },/* readlink */ + { &vop_abortop_desc, (VOPFUNC)cd9660_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)cd9660_inactive },/* inactive */ + { &vop_reclaim_desc, (VOPFUNC)cd9660_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)cd9660_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)cd9660_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)cd9660_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)cd9660_strategy },/* strategy */ + { &vop_print_desc, (VOPFUNC)cd9660_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)cd9660_islocked },/* islocked */ + { &vop_pathconf_desc, (VOPFUNC)cd9660_pathconf },/* pathconf */ + { &vop_advlock_desc, (VOPFUNC)cd9660_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)cd9660_blkatoff },/* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)cd9660_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)cd9660_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)cd9660_truncate },/* truncate */ + { &vop_update_desc, (VOPFUNC)cd9660_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_pagein_desc, (VOPFUNC)cd9660_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)cd9660_pageout }, /* Pageout */ + { &vop_getattrlist_desc, (VOPFUNC)cd9660_getattrlist }, /* getattrlist */ + { &vop_blktooff_desc, (VOPFUNC)cd9660_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)cd9660_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)cd9660_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc cd9660_vnodeop_opv_desc = + { &cd9660_vnodeop_p, cd9660_vnodeop_entries }; + +/* + * Special device vnode ops + */ +int (**cd9660_specop_p)(void *); +struct vnodeopv_entry_desc cd9660_specop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)spec_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)spec_open }, /* open */ + { &vop_close_desc, (VOPFUNC)spec_close }, /* close */ + { &vop_access_desc, (VOPFUNC)cd9660_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)cd9660_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)cd9660_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)spec_read }, /* read */ + { &vop_write_desc, (VOPFUNC)spec_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)spec_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)spec_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)cd9660_inactive },/* inactive */ + { &vop_reclaim_desc, (VOPFUNC)cd9660_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)cd9660_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)cd9660_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)cd9660_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)cd9660_islocked },/* islocked */ + { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)spec_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)cd9660_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */ + { &vop_pagein_desc, (VOPFUNC)cd9660_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)cd9660_pageout }, /* Pageout */ + { &vop_blktooff_desc, (VOPFUNC)cd9660_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)cd9660_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)cd9660_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc cd9660_specop_opv_desc = + { &cd9660_specop_p, cd9660_specop_entries }; + +#if FIFO +int (**cd9660_fifoop_p)(void *); +struct vnodeopv_entry_desc cd9660_fifoop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */ + { &vop_close_desc, (VOPFUNC)fifo_close }, /* close */ + { &vop_access_desc, (VOPFUNC)cd9660_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)cd9660_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)cd9660_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)fifo_read }, /* read */ + { &vop_write_desc, (VOPFUNC)fifo_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)fifo_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)fifo_link } , /* link */ + { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)cd9660_inactive },/* inactive */ + { &vop_reclaim_desc, (VOPFUNC)cd9660_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)cd9660_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)cd9660_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)cd9660_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)cd9660_islocked },/* islocked */ + { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)fifo_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)cd9660_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_pagein_desc, (VOPFUNC)cd9660_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)cd9660_pageout }, /* Pageout */ + { &vop_blktooff_desc, (VOPFUNC)cd9660_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)cd9660_offtoblk }, /* offtoblk */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc cd9660_fifoop_opv_desc = + { &cd9660_fifoop_p, cd9660_fifoop_entries }; +#endif /* FIFO */ diff --git a/bsd/isofs/cd9660/iso.h b/bsd/isofs/cd9660/iso.h new file mode 100644 index 000000000..7e6f702c9 --- /dev/null +++ b/bsd/isofs/cd9660/iso.h @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: iso.h,v 1.9 1995/01/18 09:23:19 mycroft Exp $ */ + +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso.h 8.4 (Berkeley) 12/5/94 + */ +#ifndef _ISO_H_ +#define _ISO_H_ + +#define ISODCL(from, to) (to - from + 1) + +struct iso_volume_descriptor { + char type [ISODCL(1,1)]; /* 711 */ + char volume_desc_id [ISODCL(2,6)]; + char version [ISODCL(7,7)]; + char data [ISODCL(8,2048)]; +}; + +/* volume descriptor types */ +#define ISO_VD_BOOT 0 +#define ISO_VD_PRIMARY 1 +#define ISO_VD_SUPPLEMENTARY 2 +#define ISO_VD_PARTITION 3 +#define ISO_VD_END 255 + +#define ISO_STANDARD_ID "CD001" +#define ISO_ECMA_ID "CDW01" +#define ISO_XA_ID "CD-XA001" /* XA style disk signature */ +#define ISO9660SIGNATURE 0x4147 /* for getattrlist ATTR_VOL_SIGNATURE */ + +/* Universal Character Set implementation levels (for Joliet) */ +#define ISO_UCS2_Level_1 "%/@" /* No combining chars */ +#define ISO_UCS2_Level_2 "%/C" /* Combining chars allowed with restrictions */ +#define ISO_UCS2_Level_3 "%/E" /* Combining chars allowed, no restrictions */ + +#define UCS_SEPARATOR1 0x002e +#define UCS_SEPARATOR2 0x003b + +/* pathconf filename lengths */ +#define ISO_NAMEMAX (31+1) +#define ISO_JOLIET_NAMEMAX (64*3) +#define ISO_RRIP_NAMEMAX 255 + +/* Finder flags, from Technical Note 40 */ +#define fLockedBit 0x8000 +#define fInvisibleBit 0x4000 +#define fHasBundleBit 0x2000 +#define fSystemBit 0x1000 +#define fNoCopyBit 0x0800 +#define fBusyBit 0x0400 +#define fChangedBit 0x0200 +#define fInitedBit 0x0100 +#define fCachedBit 0x0080 +#define fSharedBit 0x0040 +#define fAlwaysBit 0x0020 /* always switch-launch */ +#define fNeverBit 0x0010 /* never switch-launch */ +#define fOwnApplBit 0x0002 +#define fOnDesktopBit 0x0001 + +#define EXTFNDRINFOSIZE 16 + +struct finder_info { + unsigned long fdType; + unsigned long fdCreator; + unsigned short fdFlags; + unsigned short fdReserved[3]; +}; + +struct iso_primary_descriptor { + char type [ISODCL ( 1, 1)]; /* 711 */ + char volume_desc_id [ISODCL ( 2, 6)]; + char version [ISODCL ( 7, 7)]; /* 711 */ + char flags [ISODCL ( 8, 8)]; /* SVD only */ + char system_id [ISODCL ( 9, 40)]; /* achars */ + char volume_id [ISODCL ( 41, 72)]; /* dchars */ + char unused2 [ISODCL ( 73, 80)]; + char volume_space_size [ISODCL ( 81, 88)]; /* 733 */ + char escape_seq [ISODCL ( 89, 120)]; /* SVD only */ + char volume_set_size [ISODCL (121, 124)]; /* 723 */ + char volume_sequence_number [ISODCL (125, 128)]; /* 723 */ + char logical_block_size [ISODCL (129, 132)]; /* 723 */ + char path_table_size [ISODCL (133, 140)]; /* 733 */ + char type_l_path_table [ISODCL (141, 144)]; /* 731 */ + char opt_type_l_path_table [ISODCL (145, 148)]; /* 731 */ + char type_m_path_table [ISODCL (149, 152)]; /* 732 */ + char opt_type_m_path_table [ISODCL (153, 156)]; /* 732 */ + char root_directory_record [ISODCL (157, 190)]; /* 9.1 */ + char volume_set_id [ISODCL (191, 318)]; /* dchars */ + char publisher_id [ISODCL (319, 446)]; /* achars */ + char preparer_id [ISODCL (447, 574)]; /* achars */ + char application_id [ISODCL (575, 702)]; /* achars */ + char copyright_file_id [ISODCL (703, 739)]; /* 7.5 dchars */ + char abstract_file_id [ISODCL (740, 776)]; /* 7.5 dchars */ + char bibliographic_file_id [ISODCL (777, 813)]; /* 7.5 dchars */ + char creation_date [ISODCL (814, 830)]; /* 8.4.26.1 */ + char modification_date [ISODCL (831, 847)]; /* 8.4.26.1 */ + char expiration_date [ISODCL (848, 864)]; /* 8.4.26.1 */ + char effective_date [ISODCL (865, 881)]; /* 8.4.26.1 */ + char file_structure_version [ISODCL (882, 882)]; /* 711 */ + char unused4 [ISODCL (883, 883)]; + char application_data1 [ISODCL (884, 1024)]; + char CDXASignature [ISODCL (1025, 1032)]; + char CDXAResv [ISODCL (1033, 1050)]; + char application_data2 [ISODCL (1051, 1395)]; +}; +#define ISO_DEFAULT_BLOCK_SIZE 2048 + +/* from HighSierra.h in MacOS land */ +typedef struct +{ + char signature [ISODCL (1, 2)]; /* x42 x41 - 'BA' signature */ + u_char systemUseID [ISODCL (3, 3)]; /* 02 = no icon, 03 = icon, 04 = icon + bundle */ + u_char fileType [ISODCL (4, 7)]; /* such as 'TEXT' or 'STAK' */ + u_char fileCreator [ISODCL (8, 11)]; /* such as 'hscd' or 'WILD' */ + u_char finderFlags [ISODCL (12, 13)]; /* optional for type 06 */ +} AppleExtension; + +typedef struct +{ + char signature [ISODCL (1, 2)]; /* x41 x41 - 'AA' signature */ + u_char OSULength [ISODCL (3, 3)]; /* optional SystemUse length (size of this struct) */ + u_char systemUseID [ISODCL (4, 4)]; /* 1 = ProDOS 2 = HFS */ + u_char fileType [ISODCL (5, 8)]; /* such as 'TEXT' or 'STAK' */ + u_char fileCreator [ISODCL (9, 12)]; /* such as 'hscd' or 'WILD' */ + u_char finderFlags [ISODCL (13, 14)]; /* only certain bits of this are used */ +} NewAppleExtension; + +struct iso_directory_record { + char length [ISODCL (1, 1)]; /* 711 */ + char ext_attr_length [ISODCL (2, 2)]; /* 711 */ + u_char extent [ISODCL (3, 10)]; /* 733 */ + u_char size [ISODCL (11, 18)]; /* 733 */ + char date [ISODCL (19, 25)]; /* 7 by 711 */ + char flags [ISODCL (26, 26)]; + char file_unit_size [ISODCL (27, 27)]; /* 711 */ + char interleave [ISODCL (28, 28)]; /* 711 */ + char volume_sequence_number [ISODCL (29, 32)]; /* 723 */ + char name_len [ISODCL (33, 33)]; /* 711 */ + char name [1]; /* XXX */ +}; +/* + * cannot take sizeof(iso_directory_record), because of + * possible alignment + * of the last entry (34 instead of 33) + */ +#define ISO_DIRECTORY_RECORD_SIZE 33 + +/* + * iso_directory_record.flags for Directory Records (except CD-I discs) + */ +#define existenceBit 0x01 /* Invisible */ +#define directoryBit 0x02 +#define associatedBit 0x04 +#define recordBit 0x08 +#define protectionBit 0x10 +#define multiextentBit 0x80 + +struct iso_extended_attributes { + u_char owner [ISODCL (1, 4)]; /* 723 */ + u_char group [ISODCL (5, 8)]; /* 723 */ + u_char perm [ISODCL (9, 10)]; /* 9.5.3 */ + char ctime [ISODCL (11, 27)]; /* 8.4.26.1 */ + char mtime [ISODCL (28, 44)]; /* 8.4.26.1 */ + char xtime [ISODCL (45, 61)]; /* 8.4.26.1 */ + char ftime [ISODCL (62, 78)]; /* 8.4.26.1 */ + char recfmt [ISODCL (79, 79)]; /* 711 */ + char recattr [ISODCL (80, 80)]; /* 711 */ + u_char reclen [ISODCL (81, 84)]; /* 723 */ + char system_id [ISODCL (85, 116)]; /* achars */ + char system_use [ISODCL (117, 180)]; + char version [ISODCL (181, 181)]; /* 711 */ + char len_esc [ISODCL (182, 182)]; /* 711 */ + char reserved [ISODCL (183, 246)]; + u_char len_au [ISODCL (247, 250)]; /* 723 */ +}; + +/* CD-ROM Format type */ +enum ISO_FTYPE { ISO_FTYPE_DEFAULT, ISO_FTYPE_9660, ISO_FTYPE_RRIP, + ISO_FTYPE_JOLIET, ISO_FTYPE_ECMA }; + +#ifndef ISOFSMNT_ROOT +#define ISOFSMNT_ROOT 0 +#endif + +struct iso_mnt { + int im_flags; /* mount flags */ + int im_flags2; /* misc flags */ + + struct mount *im_mountp; + dev_t im_dev; + struct vnode *im_devvp; + + int logical_block_size; + int im_bshift; + int im_bmask; + + int volume_space_size; + struct netexport im_export; + + char root[ISODCL (157, 190)]; + int root_extent; + int root_size; + enum ISO_FTYPE iso_ftype; + + int rr_skip; + int rr_skip0; + + struct timespec creation_date; /* needed for getattrlist */ + struct timespec modification_date; /* needed for getattrlist */ + u_char volume_id[32]; /* name of volume */ +}; + +/* bit settings for iso_mnt.im_flags2 */ + +/* + * CD is in XA format. Need this to find where apple extensions + * are in the iso_directory_record + */ +#define IMF2_IS_CDXA 0x00000001 + +#define VFSTOISOFS(mp) ((struct iso_mnt *)((mp)->mnt_data)) + +#define blkoff(imp, loc) ((loc) & (imp)->im_bmask) +#define lblktosize(imp, blk) ((blk) << (imp)->im_bshift) +#define lblkno(imp, loc) ((loc) >> (imp)->im_bshift) +#define blksize(imp, ip, lbn) ((imp)->logical_block_size) + +int cd9660_mount __P((struct mount *, + char *, caddr_t, struct nameidata *, struct proc *)); +int cd9660_start __P((struct mount *, int, struct proc *)); +int cd9660_unmount __P((struct mount *, int, struct proc *)); +int cd9660_root __P((struct mount *, struct vnode **)); +int cd9660_quotactl __P((struct mount *, int, uid_t, caddr_t, struct proc *)); +int cd9660_statfs __P((struct mount *, struct statfs *, struct proc *)); +int cd9660_sync __P((struct mount *, int, struct ucred *, struct proc *)); +int cd9660_vget __P((struct mount *, void *, struct vnode **)); +int cd9660_fhtovp __P((struct mount *, struct fid *, struct mbuf *, + struct vnode **, int *, struct ucred **)); +int cd9660_vptofh __P((struct vnode *, struct fid *)); +int cd9660_init __P(()); + +int cd9660_mountroot __P((void)); + +int cd9660_sysctl __P((int *, u_int, void *, size_t *, void *, size_t, struct proc *)); + +extern int (**cd9660_vnodeop_p)(void *); +extern int (**cd9660_specop_p)(void *); +#if FIFO +extern int (**cd9660_fifoop_p)(void *); +#endif + +static __inline int +isonum_711(p) + u_char *p; +{ + return *p; +} + +static __inline int +isonum_712(p) + char *p; +{ + return *p; +} + +#ifndef UNALIGNED_ACCESS + +static __inline int +isonum_723(p) + u_char *p; +{ + return *p|(p[1] << 8); +} + +static __inline int +isonum_733(p) + u_char *p; +{ + return *p|(p[1] << 8)|(p[2] << 16)|(p[3] << 24); +} + +#else /* UNALIGNED_ACCESS */ + +#if BYTE_ORDER == LITTLE_ENDIAN + +static __inline int +isonum_723(p) + u_char *p +{ + return *(u_int16t *)p; +} + +static __inline int +isonum_733(p) + u_char *p; +{ + return *(u_int32t *)p; +} + +#endif + +#if BYTE_ORDER == BIG_ENDIAN + +static __inline int +isonum_723(p) + u_char *p +{ + return *(u_int16t *)(p + 2); +} + +static __inline int +isonum_733(p) + u_char *p; +{ + return *(u_int32t *)(p + 4); +} + +#endif + +#endif /* UNALIGNED_ACCESS */ + +int isofncmp __P((u_char *, int, u_char *, int)); +int ucsfncmp __P((u_int16_t *, int, u_int16_t *, int)); +void isofntrans __P((u_char *, int, u_char *, u_short *, int)); +void ucsfntrans __P((u_int16_t *, int, u_char *, u_short *, int)); +ino_t isodirino __P((struct iso_directory_record *, struct iso_mnt *)); +int attrcalcsize __P((struct attrlist *attrlist)); +void packattrblk __P((struct attrlist *alist, struct vnode *vp, + void **attrbufptrptr, void **varbufptrptr)); + + +/* + * Associated files have a leading '='. + */ +#define ASSOCCHAR '=' + +#endif /* ! _ISO_H_ */ diff --git a/bsd/isofs/cd9660/iso_rrip.h b/bsd/isofs/cd9660/iso_rrip.h new file mode 100644 index 000000000..08e9f4ec6 --- /dev/null +++ b/bsd/isofs/cd9660/iso_rrip.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: iso_rrip.h,v 1.3 1994/06/29 06:32:02 cgd Exp $ */ + +/*- + * Copyright (c) 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley + * by Pace Willisson (pace@blitz.com). The Rock Ridge Extension + * Support code is derived from software contributed to Berkeley + * by Atsushi Murai (amurai@spec.co.jp). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_rrip.h 8.2 (Berkeley) 1/23/94 + */ + + +/* + * Analyze function flag (similar to RR field bits) + */ +#define ISO_SUSP_ATTR 0x0001 +#define ISO_SUSP_DEVICE 0x0002 +#define ISO_SUSP_SLINK 0x0004 +#define ISO_SUSP_ALTNAME 0x0008 +#define ISO_SUSP_CLINK 0x0010 +#define ISO_SUSP_PLINK 0x0020 +#define ISO_SUSP_RELDIR 0x0040 +#define ISO_SUSP_TSTAMP 0x0080 +#define ISO_SUSP_IDFLAG 0x0100 +#define ISO_SUSP_EXTREF 0x0200 +#define ISO_SUSP_CONT 0x0400 +#define ISO_SUSP_OFFSET 0x0800 +#define ISO_SUSP_STOP 0x1000 +#define ISO_SUSP_UNKNOWN 0x8000 + +typedef struct { + struct iso_node *inop; + int fields; /* interesting fields in this analysis */ + daddr_t iso_ce_blk; /* block of continuation area */ + off_t iso_ce_off; /* offset of continuation area */ + int iso_ce_len; /* length of continuation area */ + struct iso_mnt *imp; /* mount structure */ + ino_t *inump; /* inode number pointer */ + char *outbuf; /* name/symbolic link output area */ + u_short *outlen; /* length of above */ + u_short maxlen; /* maximum length of above */ + int cont; /* continuation of above */ +} ISO_RRIP_ANALYZE; + +int cd9660_rrip_analyze __P((struct iso_directory_record *isodir, + struct iso_node *inop, struct iso_mnt *imp)); +int cd9660_rrip_getname __P((struct iso_directory_record *isodir, + char *outbuf, u_short *outlen, + ino_t *inump, struct iso_mnt *imp)); +int cd9660_rrip_getsymname __P((struct iso_directory_record *isodir, + char *outbuf, u_short *outlen, + struct iso_mnt *imp)); +int cd9660_rrip_offset __P((struct iso_directory_record *isodir, + struct iso_mnt *imp)); diff --git a/bsd/kern/ast.h b/bsd/kern/ast.h new file mode 100644 index 000000000..1ebbe2c1c --- /dev/null +++ b/bsd/kern/ast.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* CMU_HIST */ + +/* + * kern/ast.h: Definitions for Asynchronous System Traps. + */ + +#ifndef _KERN_AST_H_ +#define _KERN_AST_H_ + +#include + +#ifdef BSD_USE_APC + +extern thread_apc_handler_t bsd_ast; + +#else /* !BSD_USE_APC */ + +#define AST_NONE 0 +#define AST_BSD 0x80 +#define AST_BSD_INIT 0x100 + +extern void ast_on(int); +extern thread_ast_set(thread_act_t, int); +extern void bsd_ast(thread_act_t); + +#endif /* !BSD_USE_APC */ + +#endif /* _KERN_AST_H_ */ diff --git a/bsd/kern/bsd_init.c b/bsd/kern/bsd_init.c new file mode 100644 index 000000000..e6e41c402 --- /dev/null +++ b/bsd/kern/bsd_init.c @@ -0,0 +1,782 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)init_main.c 8.16 (Berkeley) 5/14/95 + */ + +/* + * + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * HISTORY + * 16-Apr-98 A. Ramesh at Apple + * Created for Apple Core from DR2 init_main.c. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include + +#include +#include + +#include + +#include +#include +#include +#include + +extern shared_region_mapping_t system_shared_region; + +char copyright[] = +"Copyright (c) 1982, 1986, 1989, 1991, 1993\n\tThe Regents of the University of California. All rights reserved.\n\n"; + +extern void ux_handler(); + +/* Components of the first process -- never freed. */ +struct proc proc0; +struct session session0; +struct pgrp pgrp0; +struct pcred cred0; +struct filedesc filedesc0; +struct plimit limit0; +struct pstats pstats0; +struct sigacts sigacts0; +struct proc *kernproc, *initproc; + + +long cp_time[CPUSTATES]; +long dk_seek[DK_NDRIVE]; +long dk_time[DK_NDRIVE]; +long dk_wds[DK_NDRIVE]; +long dk_wpms[DK_NDRIVE]; +long dk_xfer[DK_NDRIVE]; +long dk_bps[DK_NDRIVE]; + +int dk_busy; +int dk_ndrive; + +long tk_cancc; +long tk_nin; +long tk_nout; +long tk_rawcc; + +/* Global variables to make pstat happy. We do swapping differently */ +int nswdev, nswap; +int nswapmap; +void *swapmap; +struct swdevt swdevt[1]; + +dev_t rootdev; /* device of the root */ +dev_t dumpdev; /* device to take dumps on */ +long dumplo; /* offset into dumpdev */ +long hostid; +char hostname[MAXHOSTNAMELEN]; +int hostnamelen; +char domainname[MAXDOMNAMELEN]; +int domainnamelen; + +char rootdevice[16]; /* hfs device names have at least 9 chars */ +struct timeval boottime; /* GRODY! This has to go... */ +#if FIXME /* [ */ +struct timeval time; +#endif /* FIXME ] */ + +#ifdef KMEMSTATS +struct kmemstats kmemstats[M_LAST]; +#endif + +int lbolt; /* awoken once a second */ +struct vnode *rootvp; +int boothowto = RB_DEBUG; + +vm_map_t bsd_pageable_map; +vm_map_t mb_map; + +int cmask = CMASK; + +int parse_bsd_args(void); +extern int bsd_hardclockinit; +extern vm_address_t bsd_init_task; +extern char init_task_failure_data[]; + +funnel_t * kernel_flock; +funnel_t * network_flock; +int disable_funnel = 0; /* disables split funnel */ +int enable_funnel = 0; /* disables split funnel */ + +/* + * Initialization code. + * Called from cold start routine as + * soon as a stack and segmentation + * have been established. + * Functions: + * clear and free user core + * turn on clock + * hand craft 0th process + * call all initialization routines + * fork - process 0 to schedule + * - process 1 execute bootstrap + * - process 2 to page out + */ + +/* + * Sets the name for the given task. + */ +void proc_name(s, p) + char *s; + struct proc *p; +{ + int length = strlen(s); + + bcopy(s, p->p_comm, + length >= sizeof(p->p_comm) ? sizeof(p->p_comm) : + length + 1); +} + + +/* To allow these values to be patched, they're globals here */ +#include +struct rlimit vm_initial_limit_stack = { DFLSSIZ, MAXSSIZ }; +struct rlimit vm_initial_limit_data = { DFLDSIZ, MAXDSIZ }; +struct rlimit vm_initial_limit_core = { DFLCSIZ, MAXCSIZ }; + +extern thread_t first_thread; + +#define SPL_DEBUG 0 +#if SPL_DEBUG +#define dprintf(x) printf x +#else SPL_DEBUG +#define dprintf(x) +#endif /* SPL_DEBUG */ + +extern thread_t cloneproc(struct proc *, int); + +void +bsd_init() +{ + register struct proc *p; + extern struct ucred *rootcred; + register int i; + int s; + thread_t th; + extern void bsdinit_task(); + void lightning_bolt(void ); + kern_return_t ret; + boolean_t funnel_state; + extern void uthread_zone_init(); + + extern int (*mountroot) __P((void)); + + +#if 1 + /* split funnel is enabled by default */ + PE_parse_boot_arg("dfnl", &disable_funnel); +#else + /* split funnel is disabled befault */ + disable_funnel = 1; + PE_parse_boot_arg("efnl", &enable_funnel); + if (enable_funnel) { + /* enable only if efnl is set in bootarg */ + disable_funnel = 0; + } +#endif + + kernel_flock = funnel_alloc(KERNEL_FUNNEL); + if (kernel_flock == (funnel_t *)0 ) { + panic("fail to allocate kernel mutex lock\n"); + } + + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + if (!disable_funnel) { + network_flock = funnel_alloc(NETWORK_FUNNEL); + if (network_flock == (funnel_t *)0 ) { + panic("fail to allocate network mutex lock\n"); + } + } else { + network_flock = kernel_flock; + } + + + printf(copyright); + + kmeminit(); + + parse_bsd_args(); + + bsd_bufferinit(); + + /* Initialize the uthread zone */ + uthread_zone_init(); + + /* + * Initialize process and pgrp structures. + */ + procinit(); + + kernproc = &proc0; + + p = kernproc; + + /* kernel_task->proc = kernproc; */ + set_bsdtask_info(kernel_task,(void *)kernproc); + p->p_pid = 0; + + /* give kernproc a name */ + proc_name("kernel_task", p); + + if (current_task() != kernel_task) + printf("We are in for problem, current task in not kernel task\n"); + + /* + * Create process 0. + */ + LIST_INSERT_HEAD(&allproc, p, p_list); + p->p_pgrp = &pgrp0; + LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); + LIST_INIT(&pgrp0.pg_members); + LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist); + + pgrp0.pg_session = &session0; + session0.s_count = 1; + session0.s_leader = p; + + p->task = kernel_task; + + p->p_stat = SRUN; + p->p_flag = P_INMEM|P_SYSTEM; + p->p_nice = NZERO; + p->p_pptr = p; + lockinit(&p->signal_lock, PVM, "signal", 0, 0); + p->sigwait = FALSE; + p->sigwait_thread = THREAD_NULL; + p->exit_thread = THREAD_NULL; + + /* Create credentials. */ + lockinit(&cred0.pc_lock, PLOCK, "proc0 cred", 0, 0); + cred0.p_refcnt = 1; + p->p_cred = &cred0; + p->p_ucred = crget(); + p->p_ucred->cr_ngroups = 1; /* group 0 */ + + /* Create the file descriptor table. */ + filedesc0.fd_refcnt = 1+1; /* +1 so shutdown will not _FREE_ZONE */ + p->p_fd = &filedesc0; + filedesc0.fd_cmask = cmask; + + /* Create the limits structures. */ + p->p_limit = &limit0; + for (i = 0; i < sizeof(p->p_rlimit)/sizeof(p->p_rlimit[0]); i++) + limit0.pl_rlimit[i].rlim_cur = + limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; + limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; + limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = MAXUPRC; + limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; + limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; + limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; + limit0.p_refcnt = 1; + + p->p_stats = &pstats0; + p->p_sigacts = &sigacts0; + + /* + * Charge root for one process. + */ + (void)chgproccnt(0, 1); + + + /* + * Allocate a kernel submap for pageable memory + * for temporary copying (table(), execve()). + */ + { + vm_offset_t min; + + ret = kmem_suballoc(kernel_map, + &min, + (vm_size_t)512*1024, + TRUE, + TRUE, + &bsd_pageable_map); + if (ret != KERN_SUCCESS) + panic("Failed to allocare bsd pageable map\n"); + } + + /* + * Initialize the calendar by + * reading the BBC, if not already set. + */ + IOKitResetTime(); + + ubc_init(); + + /* Initialize the file systems. */ + vfsinit(); + + /* Initialize mbuf's. */ + mbinit(); + + /* Initialize syslog */ + log_init(); + + /* Initialize SysV shm */ + shminit(); + + /* POSIX Shm and Sem */ + pshm_cache_init(); + psem_cache_init(); + + /* + * Initialize protocols. Block reception of incoming packets + * until everything is ready. + */ + s = splimp(); + sysctl_register_fixed(); + dlil_init(); + socketinit(); + domaininit(); + splx(s); + + /* + * Create kernel idle cpu processes. This must be done + * before a context switch can occur (and hence I/O can + * happen in the binit() call). + */ + p->p_fd->fd_cdir = NULL; + p->p_fd->fd_rdir = NULL; + + +#ifdef GPROF + /* Initialize kernel profiling. */ + kmstartup(); +#endif + + /* kick off timeout driven events by calling first time */ + thread_wakeup(&lbolt); + timeout(lightning_bolt,0,hz); + + bsd_autoconf(); + + /* + * We attach the loopback interface *way* down here to ensure + * it happens after autoconf(), otherwise it becomes the + * "primary" interface. + */ +#include +#if NLOOP > 0 + loopattach(); /* XXX */ +#endif + + vnode_pager_bootstrap(); + + /* Mount the root file system. */ + while( TRUE) { + int err; + + setconf(); + /* + * read the time after clock_initialize_calendar() + * and before nfs mount + */ + microtime(&time); + + if (0 == (err = vfs_mountroot())) + break; + printf("cannot mount root, errno = %d\n", err); + boothowto |= RB_ASKNAME; + } + + mountlist.cqh_first->mnt_flag |= MNT_ROOTFS; + + /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ + if (VFS_ROOT(mountlist.cqh_first, &rootvnode)) + panic("cannot find root vnode"); + filedesc0.fd_cdir = rootvnode; + VREF(rootvnode); + VOP_UNLOCK(rootvnode, 0, p); + + + /* + * Now can look at time, having had a chance to verify the time + * from the file system. Reset p->p_rtime as it may have been + * munched in mi_switch() after the time got set. + */ + p->p_stats->p_start = boottime = time; + p->p_rtime.tv_sec = p->p_rtime.tv_usec = 0; + +#ifdef DEVFS + { + extern void devfs_kernel_mount(char * str); + + devfs_kernel_mount("/dev"); + } +#endif DEVFS + + /* Initialize signal state for process 0. */ + siginit(p); + + /* printf("Launching user process\n"); */ + + bsd_utaskbootstrap(); + + (void) thread_funnel_set(kernel_flock, FALSE); +} + +void +bsdinit_task() +{ + struct proc *p = current_proc(); + struct uthread *ut; + kern_return_t kr; + thread_act_t th_act; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + +#if FIXME /* [ */ + + ipc_port_t master_bootstrap_port; + task_t bootstrap_task; + thread_act_t bootstrap_thr_act; + ipc_port_t root_device_port; + + master_bootstrap_port = ipc_port_alloc_kernel(); + if (master_bootstrap_port == IP_NULL) + panic("can't allocate master bootstrap port"); + printf("setting bootstrap port \n"); + task_set_special_port(bootstrap_task, + TASK_BOOTSTRAP_PORT, + ipc_port_make_send(master_bootstrap_port)); + + printf("Setting exception port for the init task\n"); + (void) task_set_exception_ports(get_threadtask(th), + EXC_MASK_ALL & + ~(EXC_MASK_SYSCALL | + EXC_MASK_MACH_SYSCALL | EXC_MASK_RPC_ALERT), + ux_exception_port, + EXCEPTION_DEFAULT, 0); + +#endif /* FIXME ] */ + proc_name("init", p); + + ux_handler_init(); + /* port_reference(ux_exception_port);*/ + + th_act = current_act(); + (void) host_set_exception_ports(host_priv_self(), + EXC_MASK_ALL & ~(EXC_MASK_SYSCALL | + EXC_MASK_MACH_SYSCALL | + EXC_MASK_RPC_ALERT), + ux_exception_port, + EXCEPTION_DEFAULT, 0); + + (void) task_set_exception_ports(get_threadtask(th_act), + EXC_MASK_ALL & ~(EXC_MASK_SYSCALL | + EXC_MASK_MACH_SYSCALL | + EXC_MASK_RPC_ALERT), + ux_exception_port, + EXCEPTION_DEFAULT, 0); + + + + + ut = (uthread_t)get_bsdthread_info(th_act); + ut->uu_ar0 = (void *)get_user_regs(th_act); + + bsd_hardclockinit = 1; /* Start bsd hardclock */ + bsd_init_task = get_threadtask(th_act); + init_task_failure_data[0] = 0; + vm_set_shared_region(get_threadtask(th_act), system_shared_region); + load_init_program(p); + + (void) thread_funnel_set(kernel_flock, FALSE); + +} + +void +lightning_bolt() +{ + boolean_t funnel_state; + extern void klogwakeup(void); + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + thread_wakeup(&lbolt); + timeout(lightning_bolt,0,hz); + klogwakeup(); + + (void) thread_funnel_set(kernel_flock, FALSE); +} + +bsd_autoconf(){ + extern kern_return_t IOKitBSDInit( void ); + + kminit(); + + /* + * Early startup for bsd pseudodevices. + */ + { + struct pseudo_init *pi; + + for (pi = pseudo_inits; pi->ps_func; pi++) + (*pi->ps_func) (pi->ps_count); + } + + return( IOKitBSDInit()); +} + + +#include // for MAXPARTITIONS + +setconf() +{ + extern kern_return_t IOFindBSDRoot( char * rootName, + dev_t * root, u_int32_t * flags ); + + extern int (*mountroot) __P((void)); + extern int nfs_mountroot(); /* nfs_vfsops.c */ + + u_int32_t flags; + kern_return_t err; + + /* + * calls into IOKit can generate networking registrations + * which needs to be under network funnel. Right thing to do + * here is to drop the funnel alltogether and regrab it afterwards + */ + thread_funnel_set(kernel_flock, FALSE); + err = IOFindBSDRoot( rootdevice, &rootdev, &flags ); + thread_funnel_set(kernel_flock, TRUE); + if( err) { + printf("setconf: IOFindBSDRoot returned an error (%d);" + "setting rootdevice to 'sd0a'.\n", err); /* XXX DEBUG TEMP */ + rootdev = makedev( 6, 0 ); + strcpy( rootdevice, "sd0a" ); + flags = 0; + } + + /* if network device then force nfs root */ + if( flags & 1 ) { + printf("mounting nfs root\n"); + mountroot = nfs_mountroot; + } else { + /* otherwise have vfs determine root filesystem */ + mountroot = NULL; + } + +} + +bsd_utaskbootstrap() +{ + thread_act_t th_act; + + th_act = (thread_act_t)cloneproc(kernproc, 0); + initproc = pfind(1); + thread_hold(th_act); + (void) thread_stop_wait(getshuttle_thread(th_act)); + thread_ast_set(th_act,AST_BSD_INIT); + thread_release(th_act); + thread_unstop(getshuttle_thread(th_act)); + (void) thread_resume(th_act); +} + +parse_bsd_args() +{ + extern char init_args[]; + char namep[16]; + extern int boothowto; + extern int srv; + extern int ncl; + + int len; + + if (PE_parse_boot_arg("-s", namep)) { + boothowto |= RB_SINGLE; + len = strlen(init_args); + if(len != 0) + strcat(init_args," -s"); + else + strcat(init_args,"-s"); + } + if (PE_parse_boot_arg("-b", namep)) { + boothowto |= RB_NOBOOTRC; + len = strlen(init_args); + if(len != 0) + strcat(init_args," -b"); + else + strcat(init_args,"-b"); + } + + if (PE_parse_boot_arg("-F", namep)) { + len = strlen(init_args); + if(len != 0) + strcat(init_args," -F"); + else + strcat(init_args,"-F"); + } + + if (PE_parse_boot_arg("-v", namep)) { + len = strlen(init_args); + if(len != 0) + strcat(init_args," -v"); + else + strcat(init_args,"-v"); + } + + if (PE_parse_boot_arg("-x", namep)) { /* safe boot */ + len = strlen(init_args); + if(len != 0) + strcat(init_args," -x"); + else + strcat(init_args,"-x"); + } + + PE_parse_boot_arg("srv", &srv); + PE_parse_boot_arg("ncl", &ncl); + PE_parse_boot_arg("nbuf", &nbuf); + + return 0; +} + +boolean_t +thread_funnel_switch( + int oldfnl, + int newfnl) +{ + thread_t cur_thread; + boolean_t funnel_state_prev; + int curfnl; + funnel_t * curflock; + funnel_t * oldflock; + funnel_t * newflock; + funnel_t * exist_funnel; + extern int disable_funnel; + + + if (disable_funnel) + return(TRUE); + + if(oldfnl == newfnl) { + panic("thread_funnel_switch: can't switch to same funnel"); + } + + if ((oldfnl != NETWORK_FUNNEL) && (oldfnl != KERNEL_FUNNEL)) + { + panic("thread_funnel_switch: invalid oldfunnel"); + } + if ((newfnl != NETWORK_FUNNEL) && (newfnl != KERNEL_FUNNEL)) + { + panic("thread_funnel_switch: invalid oldfunnel"); + } + + if((curflock = thread_funnel_get()) == THR_FUNNEL_NULL) { + panic("thread_funnel_switch: no funnel held"); + } + + cur_thread = current_thread(); + + if ((oldfnl == NETWORK_FUNNEL) && (curflock != network_flock)) + panic("thread_funnel_switch: network funnel not held"); + + if ((oldfnl == KERNEL_FUNNEL) && (curflock != kernel_flock)) + panic("thread_funnel_switch: network funnel not held"); + + if(oldfnl == NETWORK_FUNNEL) { + oldflock = network_flock; + newflock = kernel_flock; + } else { + oldflock = kernel_flock; + newflock = network_flock; + } + KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, oldflock, 1, 0, 0, 0); + thread_funnel_set(oldflock, FALSE); + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, newflock, 1, 0, 0, 0); + thread_funnel_set(newflock, TRUE); + KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE, newflock, 1, 0, 0, 0); + + return(TRUE); +} diff --git a/bsd/kern/bsd_stubs.c b/bsd/kern/bsd_stubs.c new file mode 100644 index 000000000..1952e98cc --- /dev/null +++ b/bsd/kern/bsd_stubs.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for SET */ + +/* Just to satisfy pstat command */ +int dmmin, dmmax, dmtext; + +kmem_mb_alloc(vm_map_t mbmap, int size) +{ + vm_offset_t addr; + if (kernel_memory_allocate(mbmap, &addr, size, + 0, + KMA_NOPAGEWAIT|KMA_KOBJECT) == KERN_SUCCESS) + return((void *)addr); + else + return(0); + +} + +pcb_synch() {} +unix_master() {} +unix_release() {} + +struct proc * +current_proc(void) +{ + /* Never returns a NULL */ + struct proc *p = (struct proc *)get_bsdtask_info(current_task()); + if (p == NULL) + p = kernproc; + return (p); +} + +/* Device switch add delete routines */ + +extern int nblkdev, nchrdev; + +struct bdevsw nobdev = NO_BDEVICE; +struct cdevsw nocdev = NO_CDEVICE; +/* + * if index is -1, return a free slot if avaliable + * else see whether the index is free + * return the major number that is free else -1 + * + */ +int +bdevsw_isfree(int index) +{ + struct bdevsw *devsw; + if (index == -1) { + devsw = bdevsw; + for(index=0; index < nblkdev; index++, devsw++) { + if(memcmp((char *)devsw, + (char *)&nobdev, + sizeof(struct bdevsw)) == 0) + break; + } + } + + if ((index < 0) || (index >= nblkdev) || + (memcmp((char *)devsw, + (char *)&nobdev, + sizeof(struct bdevsw)) != 0)) { + return(-1); + } + return(index); +} + +/* + * if index is -1, find a free slot to add + * else see whether the slot is free + * return the major number that is used else -1 + */ +int +bdevsw_add(int index, struct bdevsw * bsw) +{ + struct bdevsw *devsw; + + if (index == -1) { + devsw = bdevsw; + for(index=0; index < nblkdev; index++, devsw++) { + if(memcmp((char *)devsw, + (char *)&nobdev, + sizeof(struct bdevsw)) == 0) + break; + } + } + devsw = &bdevsw[index]; + if ((index < 0) || (index >= nblkdev) || + (memcmp((char *)devsw, + (char *)&nobdev, + sizeof(struct bdevsw)) != 0)) { + return(-1); + } + bdevsw[index] = *bsw; + return(index); +} +/* + * if the slot has the same bsw, then remove + * else -1 + */ +int +bdevsw_remove(int index, struct bdevsw * bsw) +{ + struct bdevsw *devsw; + + devsw = &bdevsw[index]; + if ((index < 0) || (index >= nblkdev) || + (memcmp((char *)devsw, + (char *)bsw, + sizeof(struct bdevsw)) != 0)) { + return(-1); + } + bdevsw[index] = nobdev; + return(index); +} + +/* + * if index is -1, return a free slot if avaliable + * else see whether the index is free + * return the major number that is free else -1 + */ +int +cdevsw_isfree(int index) +{ + struct cdevsw *devsw; + + if (index == -1) { + devsw = cdevsw; + for(index=0; index < nchrdev; index++, devsw++) { + if(memcmp((char *)devsw, + (char *)&nocdev, + sizeof(struct cdevsw)) == 0) + break; + } + } + devsw = &cdevsw[index]; + if ((index < 0) || (index >= nchrdev) || + (memcmp((char *)devsw, + (char *)&nocdev, + sizeof(struct cdevsw)) != 0)) { + return(-1); + } + return(index); +} + +/* + * if index is -1, find a free slot to add + * else see whether the slot is free + * return the major number that is used else -1 + */ +int +cdevsw_add(int index, struct cdevsw * csw) +{ + struct cdevsw *devsw; + + if (index == -1) { + devsw = cdevsw; + for(index=0; index < nchrdev; index++, devsw++) { + if(memcmp((char *)devsw, + (char *)&nocdev, + sizeof(struct cdevsw)) == 0) + break; + } + } + devsw = &cdevsw[index]; + if ((index < 0) || (index >= nchrdev) || + (memcmp((char *)devsw, + (char *)&nocdev, + sizeof(struct cdevsw)) != 0)) { + return(-1); + } + cdevsw[index] = *csw; + return(index); +} +/* + * if the index has the same bsw, then remove + * else -1 + */ +int +cdevsw_remove(int index, struct cdevsw * csw) +{ + struct cdevsw *devsw; + + devsw = &cdevsw[index]; + if ((index < 0) || (index >= nchrdev) || + (memcmp((char *)devsw, + (char *)csw, + sizeof(struct cdevsw)) != 0)) { + return(-1); + } + cdevsw[index] = nocdev; + return(index); +} + +int +memcmp(s1, s2, n) + register char *s1, *s2; + register n; +{ + while (--n >= 0) + if (*s1++ != *s2++) + return (*--s1 - *--s2); + return (0); +} +int +issingleuser(void) +{ + char namep[16]; + + + if (PE_parse_boot_arg("-s", namep)) { + return(1); + } else { + return(0); + } +} + +void * +tbeproc(void *procp) +{ + struct proc *p = procp; + + if (p) + SET(p->p_flag, P_TBE); + return; +} + diff --git a/bsd/kern/init_sysent.c b/bsd/kern/init_sysent.c new file mode 100644 index 000000000..58cfb48ec --- /dev/null +++ b/bsd/kern/init_sysent.c @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995-1999 Apple Computer, Inc. All Rights Reserved */ +/* + * HISTORY + * 12-Feb-00 Clark Warner (warner_c) at Apple + * Added copyfile system call + * 26-Jul-99 Earsh Nandkeshwar (earsh) at Apple + * Changed getdirentryattr to getdirentriesattr + * 22-Jan-98 Clark Warner (warner_c) at Apple + * Created new system calls for supporting HFS/HFS Plus file system semantics + * + * 04-Jun-95 Mac Gillon (mgillon) at NeXT + * Created new version based on NS3.3 and 4.4BSD + * + */ + + +#include +#include +#include +#include + +/* serial or parallel system call */ +#define syss(fn,no) {no, 0, KERNEL_FUNNEL, fn} +#define sysp(fn,no) {no, 1, KERNEL_FUNNEL, fn} +#define sysnets(fn,no) {no, 0, NETWORK_FUNNEL, fn} +#define sysnetp(fn,no) {no, 1, NETWORK_FUNNEL, fn} +/* + * definitions + */ +int nosys(); +int exit(); +int fork(); +int read(); +int write(); +int open(); +int close(); +int wait4(); +int link(); +int unlink(); +int chdir(); +int fchdir(); +int mknod(); +int chmod(); +int chown(); +int obreak(); +int getfsstat(); +#if COMPAT_GETFSSTAT +int ogetfsstat(); +#endif +int getpid(); +int mount(); +int unmount(); +int setuid(); +int getuid(); +int geteuid(); +int ptrace(); +int recvmsg(); +int sendmsg(); +int recvfrom(); +int accept(); +int getpeername(); +int getsockname(); +int access(); +int chflags(); +int fchflags(); +int sync(); +int kill(); +int getppid(); +int dup(); +int pipe(); +int getegid(); +int profil(); +int load_shared_file(); +int reset_shared_file(); +#if KTRACE +int ktrace(); +#else +#endif +int sigaction(); +int getgid(); +int sigprocmask(); +int getlogin(); +int setlogin(); +int acct(); +int sigpending(); +int sigaltstack(); +int ioctl(); +int reboot(); +int revoke(); +int symlink(); +int readlink(); +int execve(); +int umask(); +int chroot(); +int msync(); +int vfork(); +int sbrk(); +int sstk(); +int ovadvise(); +int munmap(); +int mprotect(); +int madvise(); +int mincore(); +int getgroups(); +int setgroups(); +int getpgrp(); +int setpgid(); +int setitimer(); +int swapon(); +int getitimer(); +int getdtablesize(); +int dup2(); +int fcntl(); +int select(); +int fsync(); +int setpriority(); +int socket(); +int connect(); +int getpriority(); +int sigreturn(); +int bind(); +int setsockopt(); +int listen(); +int sigsuspend(); +#if TRACE +int vtrace(); +#else +#endif +int gettimeofday(); +int getrusage(); +int getsockopt(); +int readv(); +int writev(); +int settimeofday(); +int fchown(); +int fchmod(); +int rename(); +int flock(); +int mkfifo(); +int sendto(); +int shutdown(); +int socketpair(); +int mkdir(); +int rmdir(); +int utimes(); +int adjtime(); +int setsid(); +int quotactl(); +int nfssvc(); +int statfs(); +int fstatfs(); +int getfh(); +int setgid(); +int setegid(); +int seteuid(); +#if LFS +int lfs_bmapv(); +int lfs_markv(); +int lfs_segclean(); +int lfs_segwait(); +#else +#endif +int stat(); +int fstat(); +int lstat(); +int pathconf(); +int fpathconf(); +int getrlimit(); +int setrlimit(); +int getdirentries(); +int mmap(); +int nosys(); +int lseek(); +int truncate(); +int ftruncate(); +int __sysctl(); +int undelete(); +int setprivexec(); +int add_profil(); +#ifdef NOTEVER +int table(); +#endif /* NOTEVER */ + +int kdebug_trace(); + +int mlock(); +int munlock(); +int minherit(); +int mlockall(); +int munlockall(); +#if COMPAT_43 +#define compat(name,n) syss(__CONCAT(o,name),n) +#define compatp(name,n) sysp(__CONCAT(o,name),n) +#define comaptnet(name,n) sysnets(__CONCAT(o,name),n) +#define comaptnetp(name,n) sysnetp(__CONCAT(o,name),n) + +int ocreat(); +int olseek(); +int ostat(); +int olstat(); +#if KTRACE +#else +#endif +int ofstat(); +int ogetkerninfo(); +int osmmap(); +int ogetpagesize(); +int ommap(); +int owait(); +int ogethostname(); +int osethostname(); +int oaccept(); +int osend(); +int orecv(); +int osigvec(); +int osigblock(); +int osigsetmask(); +int osigstack(); +int orecvmsg(); +int osendmsg(); +#if TRACE +#else +#endif +int orecvfrom(); +int osetreuid(); +int osetregid(); +int otruncate(); +int oftruncate(); +int ogetpeername(); +int ogethostid(); +int osethostid(); +int ogetrlimit(); +int osetrlimit(); +int okillpg(); +int oquota(); +int ogetsockname(); +int ogetdomainname(); +int osetdomainname(); +int owait3(); +#if NFS +#else +#endif +int ogetdirentries(); +#if NFS +#else +#endif +#if LFS +#else +#endif + +#if NETAT +int ATsocket(); +int ATgetmsg(); +int ATputmsg(); +int ATPsndreq(); +int ATPsndrsp(); +int ATPgetreq(); +int ATPgetrsp(); +#endif /* NETAT */ + +/* Calls for supporting HFS Semantics */ + +int mkcomplex(); +int statv(); +int lstatv(); +int fstatv(); +int getattrlist(); +int setattrlist(); +int getdirentriesattr(); +int exchangedata(); +int checkuseraccess(); +int searchfs(); +int delete(); +int copyfile(); + +/* end of HFS calls */ + +#else /* COMPAT_43 */ +#define compat(n, name) syss(nosys,0) +#define compatp(n, name) sysp(nosys,0) +#define comaptnet(n, name) sysnets(nosys,0) +#define comaptnetp(n, name) sysnetp(nosys,0) +#endif /* COMPAT_43 */ + +int watchevent(); +int waitevent(); +int modwatch(); +int fsctl(); +int semsys(); +int msgsys(); +int shmsys(); +int semctl(); +int semget(); +int semop(); +int semconfig(); +int msgctl(); +int msgget(); +int msgsnd(); +int msgrcv(); +int shmat(); +int shmctl(); +int shmdt(); +int shmget(); +int shm_open(); +int shm_unlink(); +int sem_open(); +int sem_close(); +int sem_unlink(); +int sem_wait(); +int sem_trywait(); +int sem_post(); +int sem_getvalue(); +int sem_init(); +int sem_destroy(); +/* + * System call switch table. + */ + + +struct sysent sysent[] = { + syss(nosys,0), /* 0 = indir */ + syss(exit,1), /* 1 = exit */ + syss(fork,0), /* 2 = fork */ + sysp(read,3), /* 3 = read */ + sysp(write,3), /* 4 = write */ + syss(open,3), /* 5 = open */ + syss(close,1), /* 6 = close */ + syss(wait4, 4), /* 7 = wait4 */ + compat(creat,2), /* 8 = old creat */ + syss(link,2), /* 9 = link */ + syss(unlink,1), /* 10 = unlink */ + syss(nosys, 0), /* 11 was obsolete execv */ + syss(chdir,1), /* 12 = chdir */ + syss(fchdir,1), /* 13 = fchdir */ + syss(mknod,3), /* 14 = mknod */ + syss(chmod,2), /* 15 = chmod */ + syss(chown,3), /* 16 = chown; now 3 args */ + syss(obreak,1), /* 17 = old break */ +#if COMPAT_GETFSSTAT + syss(ogetfsstat, 3), /* 18 = ogetfsstat */ +#else + syss(getfsstat, 3), /* 18 = getfsstat */ +#endif + compat(lseek,3), /* 19 = old lseek */ + sysp(getpid,0), /* 20 = getpid */ + syss(nosys, 0), /* 21 was obsolete mount */ + syss(nosys, 0), /* 22 was obsolete umount */ + syss(setuid,1), /* 23 = setuid */ + sysp(getuid,0), /* 24 = getuid */ + sysp(geteuid,0), /* 25 = geteuid */ + syss(ptrace,4), /* 26 = ptrace */ + sysnets(recvmsg,3), /* 27 = recvmsg */ + sysnets(sendmsg,3), /* 28 = sendmsg */ + sysnets(recvfrom,6), /* 29 = recvfrom */ + sysnets(accept,3), /* 30 = accept */ + sysnets(getpeername,3), /* 31 = getpeername */ + sysnets(getsockname,3), /* 32 = getsockname */ + syss(access,2), /* 33 = access */ + syss(chflags,2), /* 34 = chflags */ + syss(fchflags,2), /* 35 = fchflags */ + syss(sync,0), /* 36 = sync */ + syss(kill,2), /* 37 = kill */ + compat(stat,2), /* 38 = old stat */ + sysp(getppid,0), /* 39 = getppid */ + compat(lstat,2), /* 40 = old lstat */ + syss(dup,2), /* 41 = dup */ + syss(pipe,0), /* 42 = pipe */ + sysp(getegid,0), /* 43 = getegid */ + syss(profil,4), /* 44 = profil */ +#if KTRACE + syss(ktrace,4), /* 45 = ktrace */ +#else + syss(nosys,0), /* 45 = nosys */ +#endif + syss(sigaction,3), /* 46 = sigaction */ + sysp(getgid,0), /* 47 = getgid */ + syss(sigprocmask,2), /* 48 = sigprocmask */ + syss(getlogin,2), /* 49 = getlogin */ + syss(setlogin,1), /* 50 = setlogin */ + syss(acct,1), /* 51 = turn acct off/on */ + syss(sigpending,0), /* 52 = sigpending */ + syss(sigaltstack,2), /* 53 = sigaltstack */ + syss(ioctl,3), /* 54 = ioctl */ + syss(reboot,2), /* 55 = reboot */ + syss(revoke,1), /* 56 = revoke */ + syss(symlink,2), /* 57 = symlink */ + syss(readlink,3), /* 58 = readlink */ + syss(execve,3), /* 59 = execve */ + syss(umask,1), /* 60 = umask */ + syss(chroot,1), /* 61 = chroot */ + compat(fstat,2), /* 62 = old fstat */ + syss(nosys,0), /* 63 = used internally, reserved */ + compat(getpagesize,0), /* 64 = old getpagesize */ + syss(msync,3), /* 65 = msync */ + syss(vfork,0), /* 66 = vfork */ + syss(nosys,0), /* 67 was obsolete vread */ + syss(nosys,0), /* 68 was obsolete vwrite */ + syss(sbrk,1), /* 69 = sbrk */ + syss(sstk,1), /* 70 = sstk */ + compat(smmap,6), /* 71 = old mmap */ + syss(ovadvise,1), /* 72 = old vadvise */ + syss(munmap,2), /* 73 = munmap */ + syss(mprotect,3), /* 74 = mprotect */ + syss(madvise,3), /* 75 = madvise */ + syss(nosys,0), /* 76 was obsolete vhangup */ + syss(nosys,0), /* 77 was obsolete vlimit */ + syss(mincore,3), /* 78 = mincore */ + sysp(getgroups,2), /* 79 = getgroups */ + sysp(setgroups,2), /* 80 = setgroups */ + sysp(getpgrp,0), /* 81 = getpgrp */ + sysp(setpgid,2), /* 82 = setpgid */ + syss(setitimer,3), /* 83 = setitimer */ + compat(wait,0), /* 84 = old wait */ + syss(swapon,1), /* 85 = swapon */ + syss(getitimer,2), /* 86 = getitimer */ + compat(gethostname,2), /* 87 = old gethostname */ + compat(sethostname,2), /* 88 = old sethostname */ + sysp(getdtablesize, 0), /* 89 getdtablesize */ + syss(dup2,2), /* 90 = dup2 */ + syss(nosys,0), /* 91 was obsolete getdopt */ + syss(fcntl,3), /* 92 = fcntl */ + syss(select,5), /* 93 = select */ + syss(nosys,0), /* 94 was obsolete setdopt */ + syss(fsync,1), /* 95 = fsync */ + sysp(setpriority,3), /* 96 = setpriority */ + sysnets(socket,3), /* 97 = socket */ + sysnets(connect,3), /* 98 = connect */ + comaptnet(accept,3), /* 99 = accept */ + sysp(getpriority,2), /* 100 = getpriority */ + comaptnet(send,4), /* 101 = old send */ + comaptnet(recv,4), /* 102 = old recv */ + syss(sigreturn,1), /* 103 = sigreturn */ + sysnets(bind,3), /* 104 = bind */ + sysnets(setsockopt,5), /* 105 = setsockopt */ + sysnets(listen,2), /* 106 = listen */ + syss(nosys,0), /* 107 was vtimes */ + compat(sigvec,3), /* 108 = sigvec */ + compat(sigblock,1), /* 109 = sigblock */ + compat(sigsetmask,1), /* 110 = sigsetmask */ + syss(sigsuspend,1), /* 111 = sigpause */ + compat(sigstack,2), /* 112 = sigstack */ + comaptnet(recvmsg,3), /* 113 = recvmsg */ + comaptnet(sendmsg,3), /* 114 = sendmsg */ + syss(nosys,0), /* 115 = old vtrace */ + syss(gettimeofday,2), /* 116 = gettimeofday */ + sysp(getrusage,2), /* 117 = getrusage */ + sysnets(getsockopt,5), /* 118 = getsockopt */ + syss(nosys,0), /* 119 = old resuba */ + sysp(readv,3), /* 120 = readv */ + sysp(writev,3), /* 121 = writev */ + syss(settimeofday,2), /* 122 = settimeofday */ + syss(fchown,3), /* 123 = fchown */ + syss(fchmod,2), /* 124 = fchmod */ + comaptnet(recvfrom,6), /* 125 = recvfrom */ + compat(setreuid,2), /* 126 = setreuid */ + compat(setregid,2), /* 127 = setregid */ + syss(rename,2), /* 128 = rename */ + compat(truncate,2), /* 129 = old truncate */ + compat(ftruncate,2), /* 130 = ftruncate */ + syss(flock,2), /* 131 = flock */ + syss(mkfifo,2), /* 132 = nosys */ + sysnets(sendto,6), /* 133 = sendto */ + sysnets(shutdown,2), /* 134 = shutdown */ + sysnets(socketpair,5), /* 135 = socketpair */ + syss(mkdir,2), /* 136 = mkdir */ + syss(rmdir,1), /* 137 = rmdir */ + syss(utimes,2), /* 138 = utimes */ + syss(nosys,0), /* 139 = used internally */ + syss(adjtime,2), /* 140 = adjtime */ + comaptnet(getpeername,3),/* 141 = getpeername */ + compat(gethostid,0), /* 142 = old gethostid */ + sysp(nosys,0), /* 143 = old sethostid */ + compat(getrlimit,2), /* 144 = old getrlimit */ + compat(setrlimit,2), /* 145 = old setrlimit */ + compat(killpg,2), /* 146 = old killpg */ + syss(setsid,0), /* 147 = setsid */ + syss(nosys,0), /* 148 was setquota */ + syss(nosys,0), /* 149 was qquota */ + comaptnet(getsockname,3),/* 150 = getsockname */ + /* + * Syscalls 151-183 inclusive are reserved for vendor-specific + * system calls. (This includes various calls added for compatibity + * with other Unix variants.) + */ + syss(nosys,0), /* 151 was m68k specific machparam */ + sysp(setprivexec,1),/* 152 = setprivexec */ + syss(nosys,0), /* 153 */ + syss(nosys,0), /* 154 */ + syss(nfssvc,2), /* 155 = nfs_svc */ + compat(getdirentries,4), /* 156 = old getdirentries */ + syss(statfs, 2), /* 157 = statfs */ + syss(fstatfs, 2), /* 158 = fstatfs */ + syss(unmount, 2), /* 159 = unmount */ + syss(nosys,0), /* 160 was async_daemon */ + syss(getfh,2), /* 161 = get file handle */ + /*?????*/ + compat(getdomainname,2), /* 162 = getdomainname */ + compat(setdomainname,2), /* 163 = setdomainname */ + syss(nosys,0), /* 164 */ +#if QUOTA + syss(quotactl, 4), /* 165 = quotactl */ +#else QUOTA + syss(nosys, 0), /* 165 = not configured */ +#endif /* QUOTA */ + syss(nosys,0), /* 166 was exportfs */ + syss(mount, 4), /* 167 = mount */ + syss(nosys,0), /* 168 was ustat */ + syss(nosys,0), /* 169 = nosys */ + syss(nosys,0), /* 170 was table */ + compat(wait3,3), /* 171 = old wait3 */ + syss(nosys,0), /* 172 was rpause */ + syss(nosys,0), /* 173 = nosys */ + syss(nosys,0), /* 174 was getdents */ + syss(nosys,0), /* 175 was gc_control */ + syss(add_profil,4), /* 176 = add_profil */ + syss(nosys,0), /* 177 */ + syss(nosys,0), /* 178 */ + syss(nosys,0), /* 179 */ + syss(kdebug_trace,0), /* 180 */ + syss(setgid,1), /* 181 */ + syss(setegid,1), /* 182 */ + syss(seteuid,1), /* 183 */ +#if LFS + syss(lfs_bmapv,3), /* 184 = lfs_bmapv */ + syss(lfs_markv,3), /* 185 = lfs_markv */ + syss(lfs_segclean,2), /* 186 = lfs_segclean */ + syss(lfs_segwait,2), /* 187 = lfs_segwait */ +#else + syss(nosys,0), /* 184 = nosys */ + syss(nosys,0), /* 185 = nosys */ + syss(nosys,0), /* 186 = nosys */ + syss(nosys,0), /* 187 = nosys */ +#endif + syss(stat,2), /* 188 = stat */ + syss(fstat,2), /* 189 = fstat */ + syss(lstat,2), /* 190 = lstat */ + syss(pathconf,2), /* 191 = pathconf */ + syss(fpathconf,2), /* 192 = fpathconf */ +#if COMPAT_GETFSSTAT + syss(getfsstat,0), /* 193 = getfsstat */ +#else + syss(nosys,0), /* 193 is unused */ +#endif + syss(getrlimit,2), /* 194 = getrlimit */ + syss(setrlimit,2), /* 195 = setrlimit */ + syss(getdirentries,4), /* 196 = getdirentries */ +#ifdef DOUBLE_ALIGN_PARAMS + syss(mmap,8), /* 197 = mmap */ +#else + syss(mmap,7), /* 197 = mmap */ +#endif + syss(nosys,0), /* 198 = __syscall */ + syss(lseek,5), /* 199 = lseek */ + syss(truncate,4), /* 200 = truncate */ + syss(ftruncate,4), /* 201 = ftruncate */ + syss(__sysctl,6), /* 202 = __sysctl */ + sysp(mlock, 2), /* 203 = mlock */ + syss(munlock, 2), /* 204 = munlock */ +#if NETAT + syss(undelete,1), /* 205 = undelete */ + sysnets(ATsocket,1), /* 206 = AppleTalk ATsocket */ + sysnets(ATgetmsg,4), /* 207 = AppleTalk ATgetmsg*/ + sysnets(ATputmsg,4), /* 208 = AppleTalk ATputmsg*/ + sysnets(ATPsndreq,4), /* 209 = AppleTalk ATPsndreq*/ + sysnets(ATPsndrsp,4), /* 210 = AppleTalk ATPsndrsp*/ + sysnets(ATPgetreq,3), /* 211 = AppleTalk ATPgetreq*/ + sysnets(ATPgetrsp,2), /* 212 = AppleTalk ATPgetrsp*/ + syss(nosys,0), /* 213 = Reserved for AT expansion */ + syss(nosys,0), /* 214 = Reserved for AT expansion */ + syss(nosys,0), /* 215 = Reserved for AT expansion */ +#else + syss(undelete,1), /* 205 = undelete */ + +/* System calls 205 - 215 are reserved to allow HFS and AT to coexist */ +/* CHW 1/22/98 */ + + syss(nosys,0), /* 206 = Reserved for AppleTalk */ + syss(nosys,0), /* 207 = Reserved for AppleTalk */ + syss(nosys,0), /* 208 = Reserved for AppleTalk */ + syss(nosys,0), /* 209 = Reserved for AppleTalk */ + syss(nosys,0), /* 210 = Reserved for AppleTalk */ + syss(nosys,0), /* 211 = Reserved for AppleTalk */ + syss(nosys,0), /* 212 = Reserved for AppleTalk */ + syss(nosys,0), /* 213 = Reserved for AppleTalk */ + syss(nosys,0), /* 214 = Reserved for AppleTalk */ + syss(nosys,0), /* 215 = Reserved for AppleTalk */ +#endif /* NETAT */ + +/* + * System Calls 216 - 230 are reserved for calls to support HFS/HFS Plus + * file system semantics. Currently, we only use 215-227. The rest is + * for future expansion in anticipation of new MacOS APIs for HFS Plus. + * These calls are not conditionalized becuase while they are specific + * to HFS semantics, they are not specific to the HFS filesystem. + * We expect all filesystems to recognize the call and report that it is + * not supported or to actually implement it. + */ + syss(nosys,3), /* 216 = HFS make complex file call (multipel forks */ + syss(nosys,2), /* 217 = HFS statv extended stat call for HFS */ + syss(nosys,2), /* 218 = HFS lstatv extended lstat call for HFS */ + syss(nosys,2), /* 219 = HFS fstatv extended fstat call for HFS */ + syss(getattrlist,5), /* 220 = HFS getarrtlist get attribute list cal */ + syss(setattrlist,5), /* 221 = HFS setattrlist set attribute list */ + syss(getdirentriesattr,8), /* 222 = HFS getdirentriesattr get directory attributes */ + syss(exchangedata,3), /* 223 = HFS exchangedata exchange file contents */ + syss(checkuseraccess,6),/* 224 = HFS checkuseraccess check access to a file */ + syss(searchfs,6), /* 225 = HFS searchfs to implement catalog searching */ + syss(delete,1), /* 226 = private HFS delete (with Mac OS semantics) */ + syss(copyfile,4), /* 227 = Copyfile for orignally for AFP */ + syss(nosys,0), /* 228 */ + syss(nosys,0), /* 229 */ + syss(nosys,0), /* 230 */ + sysnets(watchevent,2), /* 231 */ + sysnets(waitevent,2), /* 232 */ + sysnets(modwatch,2), /* 233 */ + syss(nosys,0), /* 234 */ + syss(nosys,0), /* 235 */ + syss(nosys,0), /* 236 */ + syss(nosys,0), /* 237 */ + syss(nosys,0), /* 238 */ + syss(nosys,0), /* 239 */ + syss(nosys,0), /* 240 */ + syss(nosys,0), /* 241 */ + syss(fsctl,0), /* 242 */ + syss(nosys,0), /* 243 */ + syss(nosys,0), /* 244 */ + syss(nosys,0), /* 245 */ + syss(nosys,0), /* 246 */ + syss(nosys,0), /* 247 */ + syss(nosys,0), /* 248 */ + syss(nosys,0), /* 249 */ + syss(minherit,3), /* 250 */ + syss(semsys,5), /* 251 = semsys */ + syss(msgsys,6), /* 252 = msgsys */ + syss(shmsys,4), /* 253 = shmsys */ + syss(semctl,4), /* 254 = semctl */ + syss(semget,3), /* 255 = semget */ + syss(semop,3), /* 256 = semop */ + syss(semconfig,1), /* 257 = semconfig */ + syss(msgctl,3), /* 258 = msgctl */ + syss(msgget,2), /* 259 = msgget */ + syss(msgsnd,4), /* 260 = msgsnd */ + syss(msgrcv,5), /* 261 = msgrcv */ + syss(shmat,3), /* 262 = shmat */ + syss(shmctl,3), /* 263 = shmctl */ + syss(shmdt,1), /* 264 = shmdt */ + syss(shmget,3), /* 265 = shmget */ + syss(shm_open,3), /* 266 = shm_open */ + syss(shm_unlink,1), /* 267 = shm_unlink */ + syss(sem_open,4), /* 268 = sem_open */ + syss(sem_close,1), /* 269 = sem_close */ + syss(sem_unlink,1), /* 270 = sem_unlink */ + syss(sem_wait,1), /* 271 = sem_wait */ + syss(sem_trywait,1), /* 272 = sem_trywait */ + syss(sem_post,1), /* 273 = sem_post */ + syss(sem_getvalue,2), /* 274 = sem_getvalue */ + syss(sem_init,3), /* 275 = sem_init */ + syss(sem_destroy,1), /* 276 = sem_destroy */ + syss(nosys,0), /* 277 */ + syss(nosys,0), /* 278 */ + syss(nosys,0), /* 279 */ + syss(nosys,0), /* 280 */ + syss(nosys,0), /* 281 */ + syss(nosys,0), /* 282 */ + syss(nosys,0), /* 283 */ + syss(nosys,0), /* 284 */ + syss(nosys,0), /* 285 */ + syss(nosys,0), /* 286 */ + syss(nosys,0), /* 287 */ + syss(nosys,0), /* 288 */ + syss(nosys,0), /* 289 */ + syss(nosys,0), /* 290 */ + syss(nosys,0), /* 291 */ + syss(nosys,0), /* 292 */ + syss(nosys,0), /* 293 */ + syss(nosys,0), /* 294 */ + syss(nosys,0), /* 295 */ + syss(load_shared_file,7), /* 296 */ + syss(reset_shared_file,3), /* 297 */ + syss(nosys,0), /* 298 */ + syss(nosys,0), /* 299 */ + syss(nosys,0), /* 300 */ + syss(nosys,0), /* 301 */ + syss(nosys,0), /* 302 */ + syss(nosys,0), /* 303 */ + syss(nosys,0), /* 304 */ + syss(nosys,0), /* 305 */ + syss(nosys,0), /* 306 */ + syss(nosys,0), /* 307 */ + syss(nosys,0), /* 308 */ + syss(nosys,0), /* 309 */ + syss(nosys,0), /* 310 */ + syss(nosys,0), /* 311 */ + syss(nosys,0), /* 312 */ + syss(nosys,0), /* 313 */ + syss(nosys,0), /* 314 */ + syss(nosys,0), /* 315 */ + syss(nosys,0), /* 316 */ + syss(nosys,0), /* 317 */ + syss(nosys,0), /* 318 */ + syss(nosys,0), /* 319 */ + syss(nosys,0), /* 320 */ + syss(nosys,0), /* 321 */ + syss(nosys,0), /* 322 */ + syss(nosys,0), /* 323 */ + syss(mlockall,1), /* 324 */ + syss(munlockall,1), /* 325 */ + syss(nosys,0), /* 326 */ + syss(nosys,0), /* 327 */ + syss(nosys,0), /* 328 */ + syss(nosys,0), /* 329 */ + syss(nosys,0), /* 330 */ + syss(nosys,0), /* 331 */ + syss(nosys,0), /* 332 */ + syss(nosys,0), /* 333 */ + syss(nosys,0), /* 334 */ + syss(nosys,0), /* 335 */ + syss(nosys,0), /* 336 */ + syss(nosys,0), /* 337 */ + syss(nosys,0), /* 338 */ + syss(nosys,0), /* 339 */ + syss(nosys,0), /* 340 */ + syss(nosys,0), /* 341 */ + syss(nosys,0), /* 342 */ + syss(nosys,0), /* 343 */ + syss(nosys,0), /* 344 */ + syss(nosys,0), /* 345 */ + syss(nosys,0), /* 346 */ + syss(nosys,0), /* 347 */ + syss(nosys,0), /* 348 */ + syss(nosys,0) /* 349 */ +}; +int nsysent = sizeof(sysent) / sizeof(sysent[0]); diff --git a/bsd/kern/kdebug.c b/bsd/kern/kdebug.c new file mode 100644 index 000000000..89fff7000 --- /dev/null +++ b/bsd/kern/kdebug.c @@ -0,0 +1,976 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#define HZ 100 +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */ +kd_buf * kd_bufptr; +unsigned int kd_buftomem=0; +kd_buf * kd_buffer=0; +kd_buf * kd_buflast; +kd_buf * kd_readlast; +unsigned int nkdbufs = 8192; +unsigned int kd_bufsize = 0; +unsigned int kdebug_flags = 0; +unsigned int kdebug_enable=0; +unsigned int kdebug_nolog=1; +unsigned int kdlog_beg=0; +unsigned int kdlog_end=0; +unsigned int kdlog_value1=0; +unsigned int kdlog_value2=0; +unsigned int kdlog_value3=0; +unsigned int kdlog_value4=0; + +unsigned long long kd_prev_timebase = 0LL; +decl_simple_lock_data(,kd_trace_lock); + +kd_threadmap *kd_mapptr = 0; +unsigned int kd_mapsize = 0; +unsigned int kd_mapcount = 0; +unsigned int kd_maptomem = 0; + +pid_t global_state_pid = -1; /* Used to control exclusive use of kd_buffer */ + +#define DBG_FUNC_MASK 0xfffffffc + +#ifdef ppc +extern natural_t rtclock_decrementer_min; +#endif /* ppc */ + +struct kdebug_args { + int code; + int arg1; + int arg2; + int arg3; + int arg4; + int arg5; +}; + +struct krt +{ + kd_threadmap *map; /* pointer to the map buffer */ + int count; + int maxcount; + struct proc *p; +}; + +typedef struct krt krt_t; + +/* Support syscall SYS_kdebug_trace */ +kdebug_trace(p, uap, retval) + struct proc *p; + struct kdebug_args *uap; + register_t *retval; +{ + if (kdebug_nolog) + return(EINVAL); + + kernel_debug(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, 0); + return(0); +} + + +void +kernel_debug(debugid, arg1, arg2, arg3, arg4, arg5) +unsigned int debugid, arg1, arg2, arg3, arg4, arg5; +{ + kd_buf * kd; + struct proc *curproc; + int s; + unsigned long long now; + mach_timespec_t *tsp; + + s = ml_set_interrupts_enabled(FALSE); + + if (kdebug_nolog) + { + ml_set_interrupts_enabled(s); + return; + } + + simple_lock(&kd_trace_lock); + if (kdebug_flags & KDBG_PIDCHECK) + { + /* If kdebug flag is not set for current proc, return */ + curproc = current_proc(); + if ((curproc && !(curproc->p_flag & P_KDEBUG)) && + ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + else if (kdebug_flags & KDBG_PIDEXCLUDE) + { + /* If kdebug flag is set for current proc, return */ + curproc = current_proc(); + if ((curproc && (curproc->p_flag & P_KDEBUG)) && + ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + + if (kdebug_flags & KDBG_RANGECHECK) + { + if ((debugid < kdlog_beg) || (debugid > kdlog_end) + && (debugid >> 24 != DBG_TRACE)) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + else if (kdebug_flags & KDBG_VALCHECK) + { + if ((debugid & DBG_FUNC_MASK) != kdlog_value1 && + (debugid & DBG_FUNC_MASK) != kdlog_value2 && + (debugid & DBG_FUNC_MASK) != kdlog_value3 && + (debugid & DBG_FUNC_MASK) != kdlog_value4 && + (debugid >> 24 != DBG_TRACE)) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + kd = kd_bufptr; + kd->debugid = debugid; + kd->arg1 = arg1; + kd->arg2 = arg2; + kd->arg3 = arg3; + kd->arg4 = arg4; + kd->arg5 = (int)current_thread(); + if (cpu_number()) + kd->arg5 |= KDBG_CPU_MASK; + + ml_get_timebase((unsigned long long *)&kd->timestamp); + + /* Watch for out of order timestamps */ + now = (((unsigned long long)kd->timestamp.tv_sec) << 32) | + (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec)); + + if (now < kd_prev_timebase) + { + /* timestamps are out of order -- adjust */ + kd_prev_timebase++; + tsp = (mach_timespec_t *)&kd_prev_timebase; + kd->timestamp.tv_sec = tsp->tv_sec; + kd->timestamp.tv_nsec = tsp->tv_nsec; + } + else + { + /* Then just store the previous timestamp */ + kd_prev_timebase = now; + } + + + kd_bufptr++; + + if (kd_bufptr >= kd_buflast) + kd_bufptr = kd_buffer; + if (kd_bufptr == kd_readlast) { + if (kdebug_flags & KDBG_NOWRAP) + kdebug_nolog = 1; + kdebug_flags |= KDBG_WRAPPED; + } + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); +} + +void +kernel_debug1(debugid, arg1, arg2, arg3, arg4, arg5) +unsigned int debugid, arg1, arg2, arg3, arg4, arg5; +{ + kd_buf * kd; + struct proc *curproc; + int s; + unsigned long long now; + mach_timespec_t *tsp; + + s = ml_set_interrupts_enabled(FALSE); + + if (kdebug_nolog) + { + ml_set_interrupts_enabled(s); + return; + } + + simple_lock(&kd_trace_lock); + if (kdebug_flags & KDBG_PIDCHECK) + { + /* If kdebug flag is not set for current proc, return */ + curproc = current_proc(); + if ((curproc && !(curproc->p_flag & P_KDEBUG)) && + ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + else if (kdebug_flags & KDBG_PIDEXCLUDE) + { + /* If kdebug flag is set for current proc, return */ + curproc = current_proc(); + if ((curproc && (curproc->p_flag & P_KDEBUG)) && + ((debugid&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED, 0) | DBG_FUNC_NONE))) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + + if (kdebug_flags & KDBG_RANGECHECK) + { + if ((debugid < kdlog_beg) || (debugid > kdlog_end) + && (debugid >> 24 != DBG_TRACE)) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + else if (kdebug_flags & KDBG_VALCHECK) + { + if ((debugid & DBG_FUNC_MASK) != kdlog_value1 && + (debugid & DBG_FUNC_MASK) != kdlog_value2 && + (debugid & DBG_FUNC_MASK) != kdlog_value3 && + (debugid & DBG_FUNC_MASK) != kdlog_value4 && + (debugid >> 24 != DBG_TRACE)) + { + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + return; + } + } + + kd = kd_bufptr; + kd->debugid = debugid; + kd->arg1 = arg1; + kd->arg2 = arg2; + kd->arg3 = arg3; + kd->arg4 = arg4; + kd->arg5 = arg5; + ml_get_timebase((unsigned long long *)&kd->timestamp); + + /* Watch for out of order timestamps */ + now = (((unsigned long long)kd->timestamp.tv_sec) << 32) | + (unsigned long long)((unsigned int)(kd->timestamp.tv_nsec)); + + if (now < kd_prev_timebase) + { + /* timestamps are out of order -- adjust */ + kd_prev_timebase++; + tsp = (mach_timespec_t *)&kd_prev_timebase; + kd->timestamp.tv_sec = tsp->tv_sec; + kd->timestamp.tv_nsec = tsp->tv_nsec; + } + else + { + /* Then just store the previous timestamp */ + kd_prev_timebase = now; + } + + kd_bufptr++; + + if (kd_bufptr >= kd_buflast) + kd_bufptr = kd_buffer; + if (kd_bufptr == kd_readlast) { + if (kdebug_flags & KDBG_NOWRAP) + kdebug_nolog = 1; + kdebug_flags |= KDBG_WRAPPED; + } + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); +} + + +kdbg_bootstrap() +{ + kd_bufsize = nkdbufs * sizeof(kd_buf); + if (kmem_alloc(kernel_map, &kd_buftomem, + (vm_size_t)kd_bufsize) == KERN_SUCCESS) + kd_buffer = (kd_buf *) kd_buftomem; + else kd_buffer= (kd_buf *) 0; + kdebug_flags &= ~KDBG_WRAPPED; + if (kd_buffer) { + simple_lock_init(&kd_trace_lock); + kdebug_flags |= (KDBG_INIT | KDBG_BUFINIT); + kd_bufptr = kd_buffer; + kd_buflast = &kd_bufptr[nkdbufs]; + kd_readlast = kd_bufptr; + kd_prev_timebase = 0LL; + return(0); + } else { + kd_bufsize=0; + kdebug_flags &= ~(KDBG_INIT | KDBG_BUFINIT); + return(EINVAL); + } + +} + +kdbg_reinit() +{ + int x; + int ret=0; + + kdebug_enable = 0; + kdebug_nolog = 1; + + if ((kdebug_flags & KDBG_INIT) && (kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) + kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize); + + if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) + { + kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); + kdebug_flags &= ~KDBG_MAPINIT; + kd_mapsize = 0; + kd_mapptr = (kd_threadmap *) 0; + kd_mapcount = 0; + } + + ret= kdbg_bootstrap(); + + return(ret); +} + +void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4) +{ + int i; + char *dbg_nameptr; + int dbg_namelen; + long dbg_parms[4]; + + if (!proc) + { + *arg1 = 0; + *arg2 = 0; + *arg3 = 0; + *arg4 = 0; + return; + } + + /* Collect the pathname for tracing */ + dbg_nameptr = proc->p_comm; + dbg_namelen = strlen(proc->p_comm); + dbg_parms[0]=0L; + dbg_parms[1]=0L; + dbg_parms[2]=0L; + dbg_parms[3]=0L; + + if(dbg_namelen > sizeof(dbg_parms)) + dbg_namelen = sizeof(dbg_parms); + + for(i=0;dbg_namelen > 0; i++) + { + dbg_parms[i]=*(long*)dbg_nameptr; + dbg_nameptr += sizeof(long); + dbg_namelen -= sizeof(long); + } + + *arg1=dbg_parms[0]; + *arg2=dbg_parms[1]; + *arg3=dbg_parms[2]; + *arg4=dbg_parms[3]; +} + +kdbg_resolve_map(thread_act_t th_act, krt_t *t) +{ + kd_threadmap *mapptr; + + if(t->count < t->maxcount) + { + mapptr=&t->map[t->count]; + mapptr->thread = (unsigned int)getshuttle_thread(th_act); + mapptr->valid = 1; + (void) strncpy (mapptr->command, t->p->p_comm, + sizeof(t->p->p_comm)-1); + mapptr->command[sizeof(t->p->p_comm)-1] = '\0'; + t->count++; + } +} + +void kdbg_mapinit() +{ + struct proc *p; + struct krt akrt; + + if (kdebug_flags & KDBG_MAPINIT) + return; + + /* Calculate size of thread map buffer */ + for (p = allproc.lh_first, kd_mapcount=0; p; + p = p->p_list.le_next) + { + kd_mapcount += get_task_numacts((task_t)p->task); + } + + kd_mapsize = kd_mapcount * sizeof(kd_threadmap); + if((kmem_alloc(kernel_map, & kd_maptomem, + (vm_size_t)kd_mapsize) == KERN_SUCCESS)) + kd_mapptr = (kd_threadmap *) kd_maptomem; + else + kd_mapptr = (kd_threadmap *) 0; + + if (kd_mapptr) + { + kdebug_flags |= KDBG_MAPINIT; + /* Initialize thread map data */ + akrt.map = kd_mapptr; + akrt.count = 0; + akrt.maxcount = kd_mapcount; + + for (p = allproc.lh_first; p; p = p->p_list.le_next) + { + akrt.p = p; + task_act_iterate_wth_args((task_t)p->task, kdbg_resolve_map, &akrt); + } + } +} + +kdbg_clear() +{ +int x; + + /* Clean up the trace buffer */ + global_state_pid = -1; + kdebug_enable = 0; + kdebug_nolog = 1; + kdebug_flags &= ~KDBG_BUFINIT; + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags &= ~(KDBG_NOWRAP | KDBG_RANGECHECK | KDBG_VALCHECK); + kdebug_flags &= ~(KDBG_PIDCHECK | KDBG_PIDEXCLUDE); + kmem_free(kernel_map, (char *)kd_buffer, kd_bufsize); + kd_buffer = (kd_buf *)0; + kd_bufsize = 0; + kd_prev_timebase = 0LL; + + /* Clean up the thread map buffer */ + kdebug_flags &= ~KDBG_MAPINIT; + kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); + kd_mapptr = (kd_threadmap *) 0; + kd_mapsize = 0; + kd_mapcount = 0; +} + +kdbg_setpid(kd_regtype *kdr) +{ + pid_t pid; + int flag, ret=0; + struct proc *p; + + pid = (pid_t)kdr->value1; + flag = (int)kdr->value2; + + if (pid > 0) + { + if ((p = pfind(pid)) == NULL) + ret = ESRCH; + else + { + if (flag == 1) /* turn on pid check for this and all pids */ + { + kdebug_flags |= KDBG_PIDCHECK; + kdebug_flags &= ~KDBG_PIDEXCLUDE; + p->p_flag |= P_KDEBUG; + } + else /* turn off pid check for this pid value */ + { + /* Don't turn off all pid checking though */ + /* kdebug_flags &= ~KDBG_PIDCHECK;*/ + p->p_flag &= ~P_KDEBUG; + } + } + } + else + ret = EINVAL; + return(ret); +} + +/* This is for pid exclusion in the trace buffer */ +kdbg_setpidex(kd_regtype *kdr) +{ + pid_t pid; + int flag, ret=0; + struct proc *p; + + pid = (pid_t)kdr->value1; + flag = (int)kdr->value2; + + if (pid > 0) + { + if ((p = pfind(pid)) == NULL) + ret = ESRCH; + else + { + if (flag == 1) /* turn on pid exclusion */ + { + kdebug_flags |= KDBG_PIDEXCLUDE; + kdebug_flags &= ~KDBG_PIDCHECK; + p->p_flag |= P_KDEBUG; + } + else /* turn off pid exclusion for this pid value */ + { + /* Don't turn off all pid exclusion though */ + /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/ + p->p_flag &= ~P_KDEBUG; + } + } + } + else + ret = EINVAL; + return(ret); +} + +/* This is for setting a minimum decrementer value */ +kdbg_setrtcdec(kd_regtype *kdr) +{ + int ret=0; + natural_t decval; + + decval = (natural_t)kdr->value1; + + if (decval && decval < KDBG_MINRTCDEC) + ret = EINVAL; +#ifdef ppc + else + rtclock_decrementer_min = decval; +#else + else + ret = EOPNOTSUPP; +#endif /* ppc */ + + return(ret); +} + +kdbg_setreg(kd_regtype * kdr) +{ + int i,j, ret=0; + unsigned int val_1, val_2, val; + switch (kdr->type) { + + case KDBG_CLASSTYPE : + val_1 = (kdr->value1 & 0xff); + val_2 = (kdr->value2 & 0xff); + kdlog_beg = (val_1<<24); + kdlog_end = (val_2<<24); + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ + kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE); + break; + case KDBG_SUBCLSTYPE : + val_1 = (kdr->value1 & 0xff); + val_2 = (kdr->value2 & 0xff); + val = val_2 + 1; + kdlog_beg = ((val_1<<24) | (val_2 << 16)); + kdlog_end = ((val_1<<24) | (val << 16)); + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ + kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE); + break; + case KDBG_RANGETYPE : + kdlog_beg = (kdr->value1); + kdlog_end = (kdr->value2); + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ + kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE); + break; + case KDBG_VALCHECK: + kdlog_value1 = (kdr->value1); + kdlog_value2 = (kdr->value2); + kdlog_value3 = (kdr->value3); + kdlog_value4 = (kdr->value4); + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags &= ~KDBG_RANGECHECK; /* Turn off range check */ + kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */ + break; + case KDBG_TYPENONE : + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdlog_beg = 0; + kdlog_end = 0; + break; + default : + ret = EINVAL; + break; + } + return(ret); +} + +kdbg_getreg(kd_regtype * kdr) +{ + int i,j, ret=0; + unsigned int val_1, val_2, val; +#if 0 + switch (kdr->type) { + case KDBG_CLASSTYPE : + val_1 = (kdr->value1 & 0xff); + val_2 = val_1 + 1; + kdlog_beg = (val_1<<24); + kdlog_end = (val_2<<24); + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE); + break; + case KDBG_SUBCLSTYPE : + val_1 = (kdr->value1 & 0xff); + val_2 = (kdr->value2 & 0xff); + val = val_2 + 1; + kdlog_beg = ((val_1<<24) | (val_2 << 16)); + kdlog_end = ((val_1<<24) | (val << 16)); + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE); + break; + case KDBG_RANGETYPE : + kdlog_beg = (kdr->value1); + kdlog_end = (kdr->value2); + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdebug_flags |= (KDBG_RANGECHECK | KDBG_RANGETYPE); + break; + case KDBG_TYPENONE : + kdebug_flags &= (unsigned int)~KDBG_CKTYPES; + kdlog_beg = 0; + kdlog_end = 0; + break; + default : + ret = EINVAL; + break; + } +#endif /* 0 */ + return(EINVAL); +} + + + +kdbg_readmap(kd_threadmap *buffer, size_t *number) +{ + int avail = *number; + int ret = 0; + int count = 0; + + count = avail/sizeof (kd_threadmap); + + if (count && (count <= kd_mapcount)) + { + if((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) + { + if (*number < kd_mapsize) + ret=EINVAL; + else + { + if (copyout(kd_mapptr, buffer, kd_mapsize)) + ret=EINVAL; + } + } + else + ret=EINVAL; + } + else + ret=EINVAL; + + if ((kdebug_flags & KDBG_MAPINIT) && kd_mapsize && kd_mapptr) + { + kmem_free(kernel_map, (char *)kd_mapptr, kd_mapsize); + kdebug_flags &= ~KDBG_MAPINIT; + kd_mapsize = 0; + kd_mapptr = (kd_threadmap *) 0; + kd_mapcount = 0; + } + + return(ret); +} + + +kdbg_control(name, namelen, where, sizep) +int *name; +u_int namelen; +char *where; +size_t *sizep; +{ +int ret=0; +int size=*sizep; +int max_entries; +unsigned int value = name[1]; +kd_regtype kd_Reg; +kbufinfo_t kd_bufinfo; + +pid_t curpid; +struct proc *p, *curproc; + + if(curproc = current_proc()) + curpid = curproc->p_pid; + else + return (ESRCH); + + if (global_state_pid == -1) + global_state_pid = curpid; + else if (global_state_pid != curpid) + { + if((p = pfind(global_state_pid)) == NULL) + { + /* The global pid no longer exists */ + global_state_pid = curpid; + } + else + { + /* The global pid exists, deny this request */ + return(EBUSY); + } + } + + switch(name[0]) { + case KERN_KDEFLAGS: + value &= KDBG_USERFLAGS; + kdebug_flags |= value; + break; + case KERN_KDDFLAGS: + value &= KDBG_USERFLAGS; + kdebug_flags &= ~value; + break; + case KERN_KDENABLE: /* used to enable or disable */ + if (value) + { + /* enable only if buffer is initialized */ + if (!(kdebug_flags & KDBG_BUFINIT)) + { + ret=EINVAL; + break; + } + } + kdebug_enable=(value)?1:0; + kdebug_nolog = (value)?0:1; + if (kdebug_enable) + kdbg_mapinit(); + break; + case KERN_KDSETBUF: + /* We allow a maximum buffer size of 25% of memory */ + /* 'value' is the desired number of trace entries */ + max_entries = (mem_size/4) / sizeof(kd_buf); + if (value <= max_entries) + nkdbufs = value; + else + nkdbufs = max_entries; + break; + case KERN_KDGETBUF: + if(size < sizeof(kbufinfo_t)) { + ret=EINVAL; + break; + } + kd_bufinfo.nkdbufs = nkdbufs; + kd_bufinfo.nkdthreads = kd_mapsize / sizeof(kd_threadmap); + kd_bufinfo.nolog = kdebug_nolog; + kd_bufinfo.flags = kdebug_flags; + if(copyout (&kd_bufinfo, where, sizeof(kbufinfo_t))) { + ret=EINVAL; + } + break; + case KERN_KDSETUP: + ret=kdbg_reinit(); + break; + case KERN_KDREMOVE: + kdbg_clear(); + break; + case KERN_KDSETREG: + if(size < sizeof(kd_regtype)) { + ret=EINVAL; + break; + } + if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { + ret= EINVAL; + break; + } + ret = kdbg_setreg(&kd_Reg); + break; + case KERN_KDGETREG: + if(size < sizeof(kd_regtype)) { + ret = EINVAL; + break; + } + ret = kdbg_getreg(&kd_Reg); + if (copyout(&kd_Reg, where, sizeof(kd_regtype))){ + ret=EINVAL; + } + break; + case KERN_KDREADTR: + ret = kdbg_read(where, sizep); + break; + case KERN_KDPIDTR: + if (size < sizeof(kd_regtype)) { + ret = EINVAL; + break; + } + if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { + ret= EINVAL; + break; + } + ret = kdbg_setpid(&kd_Reg); + break; + case KERN_KDPIDEX: + if (size < sizeof(kd_regtype)) { + ret = EINVAL; + break; + } + if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { + ret= EINVAL; + break; + } + ret = kdbg_setpidex(&kd_Reg); + break; + case KERN_KDTHRMAP: + ret = kdbg_readmap((kd_threadmap *)where, sizep); + break; + case KERN_KDSETRTCDEC: + if (size < sizeof(kd_regtype)) { + ret = EINVAL; + break; + } + if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { + ret= EINVAL; + break; + } + ret = kdbg_setrtcdec(&kd_Reg); + break; + + default: + ret= EINVAL; + } + return(ret); +} + +kdbg_read(kd_buf * buffer, size_t *number) +{ +int avail=*number; +int count=0; +int copycount=0; +int totalcount=0; +int s; +unsigned int my_kdebug_flags; +kd_buf * my_kd_bufptr; + + s = ml_set_interrupts_enabled(FALSE); + simple_lock(&kd_trace_lock); + my_kdebug_flags = kdebug_flags; + my_kd_bufptr = kd_bufptr; + simple_unlock(&kd_trace_lock); + ml_set_interrupts_enabled(s); + + count = avail/sizeof(kd_buf); + if (count) { + if ((my_kdebug_flags & KDBG_BUFINIT) && kd_bufsize && kd_buffer) { + if (count > nkdbufs) + count = nkdbufs; + if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr > kd_readlast)) + { + copycount = my_kd_bufptr-kd_readlast; + if (copycount > count) + copycount = count; + + if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) + { + *number = 0; + return(EINVAL); + } + kd_readlast += copycount; + *number = copycount; + return(0); + } + else if (!(my_kdebug_flags & KDBG_WRAPPED) && (my_kd_bufptr == kd_readlast)) + { + *number = 0; + return(0); + } + else + { + if (my_kdebug_flags & KDBG_WRAPPED) + { + kd_readlast = my_kd_bufptr; + kdebug_flags &= ~KDBG_WRAPPED; + } + + /* Note that by setting kd_readlast equal to my_kd_bufptr, + we now treat the kd_buffer read the same as if we weren't + wrapped and my_kd_bufptr was less than kd_readlast. + */ + + /* first copyout from readlast to end of kd_buffer */ + copycount = kd_buflast - kd_readlast; + if (copycount > count) + copycount = count; + if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) + { + *number = 0; + return(EINVAL); + } + buffer += copycount; + count -= copycount; + totalcount = copycount; + kd_readlast += copycount; + if (kd_readlast == kd_buflast) + kd_readlast = kd_buffer; + if (count == 0) + { + *number = totalcount; + return(0); + } + + /* second copyout from top of kd_buffer to bufptr */ + copycount = my_kd_bufptr - kd_readlast; + if (copycount > count) + copycount = count; + if (copycount == 0) + { + *number = totalcount; + return(0); + } + if (copyout(kd_readlast, buffer, copycount * sizeof(kd_buf))) + { + return(EINVAL); + } + kd_readlast += copycount; + totalcount += copycount; + *number = totalcount; + return(0); + } + } /* end if KDBG_BUFINIT */ + } /* end if count */ + return (EINVAL); +} diff --git a/bsd/kern/kern_acct.c b/bsd/kern/kern_acct.c new file mode 100644 index 000000000..3654a9dc8 --- /dev/null +++ b/bsd/kern/kern_acct.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_acct.c 8.1 (Berkeley) 6/14/93 + */ +/* HISTORY + * 08-May-95 Mac Gillon (mgillon) at NeXT + * Purged old history + * New version based on 4.4 + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The routines implemented in this file are described in: + * Leffler, et al.: The Design and Implementation of the 4.3BSD + * UNIX Operating System (Addison Welley, 1989) + * on pages 62-63. + * + * Arguably, to simplify accounting operations, this mechanism should + * be replaced by one in which an accounting log file (similar to /dev/klog) + * is read by a user process, etc. However, that has its own problems. + */ + +/* + * Internal accounting functions. + * The former's operation is described in Leffler, et al., and the latter + * was provided by UCB with the 4.4BSD-Lite release + */ +comp_t encode_comp_t __P((u_long, u_long)); +void acctwatch __P((void *)); +void acctwatch_funnel __P((void *)); + +/* + * Accounting vnode pointer, and saved vnode pointer. + */ +struct vnode *acctp; +struct vnode *savacctp; + +/* + * Values associated with enabling and disabling accounting + */ +int acctsuspend = 2; /* stop accounting when < 2% free space left */ +int acctresume = 4; /* resume when free space risen to > 4% */ +int acctchkfreq = 15; /* frequency (in seconds) to check space */ + +/* + * Accounting system call. Written based on the specification and + * previous implementation done by Mark Tinguely. + */ +struct acct_args { + char *path; +}; +acct(p, uap, retval) + struct proc *p; + struct acct_args *uap; + int *retval; +{ + struct nameidata nd; + int error; + + /* Make sure that the caller is root. */ + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + + /* + * If accounting is to be started to a file, open that file for + * writing and make sure it's a 'normal'. + */ + if (uap->path != NULL) { + NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, uap->path, p); + if (error = vn_open(&nd, FWRITE, 0)) + return (error); + VOP_UNLOCK(nd.ni_vp, 0, p); + if (nd.ni_vp->v_type != VREG) { + vn_close(nd.ni_vp, FWRITE, p->p_ucred, p); + return (EACCES); + } + } + + /* + * If accounting was previously enabled, kill the old space-watcher, + * close the file, and (if no new file was specified, leave). + */ + if (acctp != NULLVP || savacctp != NULLVP) { + untimeout(acctwatch_funnel, NULL); + error = vn_close((acctp != NULLVP ? acctp : savacctp), FWRITE, + p->p_ucred, p); + acctp = savacctp = NULLVP; + } + if (uap->path == NULL) + return (error); + + /* + * Save the new accounting file vnode, and schedule the new + * free space watcher. + */ + acctp = nd.ni_vp; + acctwatch(NULL); + return (error); +} + +/* + * Write out process accounting information, on process exit. + * Data to be written out is specified in Leffler, et al. + * and are enumerated below. (They're also noted in the system + * "acct.h" header file.) + */ +acct_process(p) + struct proc *p; +{ + struct acct acct; + struct rusage *r; + struct timeval ut, st, tmp; + int s, t; + struct vnode *vp; + + /* If accounting isn't enabled, don't bother */ + vp = acctp; + if (vp == NULLVP) + return (0); + + /* + * Get process accounting information. + */ + + /* (1) The name of the command that ran */ + bcopy(p->p_comm, acct.ac_comm, sizeof acct.ac_comm); + + /* (2) The amount of user and system time that was used */ + calcru(p, &ut, &st, NULL); + acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_usec); + acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_usec); + + /* (3) The elapsed time the commmand ran (and its starting time) */ + acct.ac_btime = p->p_stats->p_start.tv_sec; + s = splclock(); + tmp = time; + splx(s); + timevalsub(&tmp, &p->p_stats->p_start); + acct.ac_etime = encode_comp_t(tmp.tv_sec, tmp.tv_usec); + + /* (4) The average amount of memory used */ + r = &p->p_stats->p_ru; + tmp = ut; + timevaladd(&tmp, &st); + t = tmp.tv_sec * hz + tmp.tv_usec / tick; + if (t) + acct.ac_mem = (r->ru_ixrss + r->ru_idrss + r->ru_isrss) / t; + else + acct.ac_mem = 0; + + /* (5) The number of disk I/O operations done */ + acct.ac_io = encode_comp_t(r->ru_inblock + r->ru_oublock, 0); + + /* (6) The UID and GID of the process */ + acct.ac_uid = p->p_cred->p_ruid; + acct.ac_gid = p->p_cred->p_rgid; + + /* (7) The terminal from which the process was started */ + if ((p->p_flag & P_CONTROLT) && p->p_pgrp->pg_session->s_ttyp) + acct.ac_tty = p->p_pgrp->pg_session->s_ttyp->t_dev; + else + acct.ac_tty = NODEV; + + /* (8) The boolean flags that tell how the process terminated, etc. */ + acct.ac_flag = p->p_acflag; + + /* + * Now, just write the accounting information to the file. + */ + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + return (vn_rdwr(UIO_WRITE, vp, (caddr_t)&acct, sizeof (acct), + (off_t)0, UIO_SYSSPACE, IO_APPEND|IO_UNIT, p->p_ucred, + (int *)0, p)); +} + +/* + * Encode_comp_t converts from ticks in seconds and microseconds + * to ticks in 1/AHZ seconds. The encoding is described in + * Leffler, et al., on page 63. + */ + +#define MANTSIZE 13 /* 13 bit mantissa. */ +#define EXPSIZE 3 /* Base 8 (3 bit) exponent. */ +#define MAXFRACT ((1 << MANTSIZE) - 1) /* Maximum fractional value. */ + +comp_t +encode_comp_t(s, us) + u_long s, us; +{ + int exp, rnd; + + exp = 0; + rnd = 0; + s *= AHZ; + s += us / (1000000 / AHZ); /* Maximize precision. */ + + while (s > MAXFRACT) { + rnd = s & (1 << (EXPSIZE - 1)); /* Round up? */ + s >>= EXPSIZE; /* Base 8 exponent == 3 bit shift. */ + exp++; + } + + /* If we need to round up, do it (and handle overflow correctly). */ + if (rnd && (++s > MAXFRACT)) { + s >>= EXPSIZE; + exp++; + } + + /* Clean it up and polish it off. */ + exp <<= MANTSIZE; /* Shift the exponent into place */ + exp += s; /* and add on the mantissa. */ + return (exp); +} + +void +acctwatch_funnel(a) + void *a; +{ + thread_funnel_set(kernel_flock, TRUE); + acctwatch(a); + thread_funnel_set(kernel_flock, FALSE); +} + + +/* + * Periodically check the file system to see if accounting + * should be turned on or off. Beware the case where the vnode + * has been vgone()'d out from underneath us, e.g. when the file + * system containing the accounting file has been forcibly unmounted. + */ +/* ARGSUSED */ +void +acctwatch(a) + void *a; +{ + struct statfs sb; + + if (savacctp != NULLVP) { + if (savacctp->v_type == VBAD) { + (void) vn_close(savacctp, FWRITE, NOCRED, NULL); + savacctp = NULLVP; + return; + } + (void)VFS_STATFS(savacctp->v_mount, &sb, (struct proc *)0); + if (sb.f_bavail > acctresume * sb.f_blocks / 100) { + acctp = savacctp; + savacctp = NULLVP; + log(LOG_NOTICE, "Accounting resumed\n"); + } + } else if (acctp != NULLVP) { + if (acctp->v_type == VBAD) { + (void) vn_close(acctp, FWRITE, NOCRED, NULL); + acctp = NULLVP; + return; + } + (void)VFS_STATFS(acctp->v_mount, &sb, (struct proc *)0); + if (sb.f_bavail <= acctsuspend * sb.f_blocks / 100) { + savacctp = acctp; + acctp = NULLVP; + log(LOG_NOTICE, "Accounting suspended\n"); + } + } else { + return; + } + + timeout(acctwatch_funnel, NULL, acctchkfreq * hz); +} diff --git a/bsd/kern/kern_clock.c b/bsd/kern/kern_clock.c new file mode 100644 index 000000000..582f46377 --- /dev/null +++ b/bsd/kern/kern_clock.c @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 + */ +/* + * HISTORY + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef GPROF +#include +#endif + +#include +#include +#include +#include + +#include + +/* + * Clock handling routines. + * + * This code is written to operate with two timers which run + * independently of each other. The main clock, running at hz + * times per second, is used to do scheduling and timeout calculations. + * The second timer does resource utilization estimation statistically + * based on the state of the machine phz times a second. Both functions + * can be performed by a single clock (ie hz == phz), however the + * statistics will be much more prone to errors. Ideally a machine + * would have separate clocks measuring time spent in user state, system + * state, interrupt state, and idle state. These clocks would allow a non- + * approximate measure of resource utilization. + */ + +/* + * The hz hardware interval timer. + * We update the events relating to real time. + * If this timer is also being used to gather statistics, + * we run through the statistics gathering routine as well. + */ + +int bsd_hardclockinit = 0; +/*ARGSUSED*/ +void +bsd_hardclock(usermode, pc, numticks) + boolean_t usermode; + caddr_t pc; + int numticks; +{ + register struct proc *p; + register int s; + int ticks = numticks; + extern int tickdelta; + extern long timedelta; + register thread_t thread; + int nusecs = numticks * tick; + + if (!bsd_hardclockinit) + return; + + thread = current_thread(); + + /* + * Charge the time out based on the mode the cpu is in. + * Here again we fudge for the lack of proper interval timers + * assuming that the current state has been around at least + * one tick. + */ + p = (struct proc *)get_bsdtask_info(current_task()); + if (p && ((p->p_flag & P_WEXIT) == NULL)) { + if (usermode) { + if (p) { + if (p->p_stats && p->p_stats->p_prof.pr_scale) { + p->p_flag |= P_OWEUPC; + ast_on(AST_BSD); + } + } + + /* + * CPU was in user state. Increment + * user time counter, and process process-virtual time + * interval timer. + */ + if (p->p_stats && + timerisset(&p->p_stats->p_timer[ITIMER_VIRTUAL].it_value) && + itimerdecr(&p->p_stats->p_timer[ITIMER_VIRTUAL], nusecs) == 0) { + extern void psignal_vtalarm(struct proc *); + + /* does psignal(p, SIGVTALRM) in a thread context */ + thread_call_func((thread_call_func_t)psignal_vtalarm, p, FALSE); + } + } + + /* + * If the cpu is currently scheduled to a process, then + * charge it with resource utilization for a tick, updating + * statistics which run in (user+system) virtual time, + * such as the cpu time limit and profiling timers. + * This assumes that the current process has been running + * the entire last tick. + */ + if (p && !(is_thread_idle(thread))) + { + if (p->p_limit && (p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY)) { + time_value_t sys_time, user_time; + + thread_read_times(thread, &user_time, &sys_time); + if ((sys_time.seconds + user_time.seconds + 1) > + p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur) { + extern void psignal_xcpu(struct proc *); + + /* does psignal(p, SIGXCPU) in a thread context */ + thread_call_func((thread_call_func_t)psignal_xcpu, p, FALSE); + + if (p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur < + p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_max) + p->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur += 5; + } + } + if (timerisset(&p->p_stats->p_timer[ITIMER_PROF].it_value) && + itimerdecr(&p->p_stats->p_timer[ITIMER_PROF], nusecs) == 0) { + extern void psignal_sigprof(struct proc *); + + /* does psignal(p, SIGPROF) in a thread context */ + thread_call_func((thread_call_func_t)psignal_sigprof, p, FALSE); + } + } + + /* + * Increment the time-of-day, and schedule + * processing of the callouts at a very low cpu priority, + * so we don't keep the relatively high clock interrupt + * priority any longer than necessary. + */ + + /* + * Gather the statistics. + */ + gatherstats(usermode, pc); + + } + if (timedelta != 0) { + register delta; + clock_res_t nsdelta = tickdelta * NSEC_PER_USEC; + + if (timedelta < 0) { + delta = ticks - tickdelta; + timedelta += tickdelta; + nsdelta = -nsdelta; + } else { + delta = ticks + tickdelta; + timedelta -= tickdelta; + } + clock_adjust_calendar(nsdelta); + } + microtime(&time); +} + +/* + * Gather statistics on resource utilization. + * + * We make a gross assumption: that the system has been in the + * state it is in (user state, kernel state, interrupt state, + * or idle state) for the entire last time interval, and + * update statistics accordingly. + */ +/*ARGSUSED*/ +void +gatherstats(usermode, pc) + boolean_t usermode; + caddr_t pc; +{ + register int cpstate, s; + struct proc *proc =current_proc(); +#ifdef GPROF + struct gmonparam *p = &_gmonparam; +#endif + + /* + * Determine what state the cpu is in. + */ + if (usermode) { + /* + * CPU was in user state. + */ + if (proc->p_nice > NZERO) + cpstate = CP_NICE; + else + cpstate = CP_USER; + } else { + /* + * CPU was in system state. If profiling kernel + * increment a counter. If no process is running + * then this is a system tick if we were running + * at a non-zero IPL (in a driver). If a process is running, + * then we charge it with system time even if we were + * at a non-zero IPL, since the system often runs + * this way during processing of system calls. + * This is approximate, but the lack of true interval + * timers makes doing anything else difficult. + */ + cpstate = CP_SYS; + if (is_thread_idle(current_thread())) + cpstate = CP_IDLE; +#ifdef GPROF + if (p->state == GMON_PROF_ON) { + s = pc - p->lowpc; + if (s < p->textsize) { + s /= (HISTFRACTION * sizeof(*p->kcount)); + p->kcount[s]++; + } + } +#endif + } + /* + * We maintain statistics shown by user-level statistics + * programs: the amount of time in each cpu state, and + * the amount of time each of DK_NDRIVE ``drives'' is busy. + */ + cp_time[cpstate]++; + for (s = 0; s < DK_NDRIVE; s++) + if (dk_busy & (1 << s)) + dk_time[s]++; +} + + +/* + * Kernel timeout services. + */ + +/* + * Set a timeout. + * + * fcn: function to call + * param: parameter to pass to function + * interval: timeout interval, in hz. + */ +void +timeout( + timeout_fcn_t fcn, + void *param, + int interval) +{ + AbsoluteTime deadline; + + clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline); + thread_call_func_delayed((thread_call_func_t)fcn, param, deadline); +} + +/* + * Cancel a timeout. + */ +void +untimeout( + register timeout_fcn_t fcn, + register void *param) +{ + thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE); +} + + + +/* + * Compute number of hz until specified time. + * Used to compute third argument to timeout() from an + * absolute time. + */ +hzto(tv) + struct timeval *tv; +{ + register long ticks; + register long sec; + int s = splhigh(); + + /* + * If number of milliseconds will fit in 32 bit arithmetic, + * then compute number of milliseconds to time and scale to + * ticks. Otherwise just compute number of hz in time, rounding + * times greater than representible to maximum value. + * + * Delta times less than 25 days can be computed ``exactly''. + * Maximum value for any timeout in 10ms ticks is 250 days. + */ + sec = tv->tv_sec - time.tv_sec; + if (sec <= 0x7fffffff / 1000 - 1000) + ticks = ((tv->tv_sec - time.tv_sec) * 1000 + + (tv->tv_usec - time.tv_usec) / 1000) + / (tick / 1000); + else if (sec <= 0x7fffffff / hz) + ticks = sec * hz; + else + ticks = 0x7fffffff; + splx(s); + return (ticks); +} + +#if 0 /* [ */ +/* + * Convert ticks to a timeval + */ +ticks_to_timeval(ticks, tvp) + register long ticks; + struct timeval *tvp; +{ + tvp->tv_sec = ticks/hz; + tvp->tv_usec = (ticks%hz) * tick; + asert(tvp->tv_usec < 1000000); +} +#endif /* ] */ + +/* + * Return information about system clocks. + */ +int +sysctl_clockrate(where, sizep) + register char *where; + size_t *sizep; +{ + struct clockinfo clkinfo; + + /* + * Construct clockinfo structure. + */ + clkinfo.hz = hz; + clkinfo.tick = tick; + clkinfo.profhz = hz; + clkinfo.stathz = hz; + return sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo)); +} + + +/* + * Compute number of ticks in the specified amount of time. + */ +int +tvtohz(tv) + struct timeval *tv; +{ + register unsigned long ticks; + register long sec, usec; + + /* + * If the number of usecs in the whole seconds part of the time + * difference fits in a long, then the total number of usecs will + * fit in an unsigned long. Compute the total and convert it to + * ticks, rounding up and adding 1 to allow for the current tick + * to expire. Rounding also depends on unsigned long arithmetic + * to avoid overflow. + * + * Otherwise, if the number of ticks in the whole seconds part of + * the time difference fits in a long, then convert the parts to + * ticks separately and add, using similar rounding methods and + * overflow avoidance. This method would work in the previous + * case but it is slightly slower and assumes that hz is integral. + * + * Otherwise, round the time difference down to the maximum + * representable value. + * + * If ints have 32 bits, then the maximum value for any timeout in + * 10ms ticks is 248 days. + */ + sec = tv->tv_sec; + usec = tv->tv_usec; + if (usec < 0) { + sec--; + usec += 1000000; + } + if (sec < 0) { +#ifdef DIAGNOSTIC + if (usec > 0) { + sec++; + usec -= 1000000; + } + printf("tvotohz: negative time difference %ld sec %ld usec\n", + sec, usec); +#endif + ticks = 1; + } else if (sec <= LONG_MAX / 1000000) + ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) + / tick + 1; + else if (sec <= LONG_MAX / hz) + ticks = sec * hz + + ((unsigned long)usec + (tick - 1)) / tick + 1; + else + ticks = LONG_MAX; + if (ticks > INT_MAX) + ticks = INT_MAX; + return ((int)ticks); +} + + +/* + * Start profiling on a process. + * + * Kernel profiling passes kernel_proc which never exits and hence + * keeps the profile clock running constantly. + */ +void +startprofclock(p) + register struct proc *p; +{ + if ((p->p_flag & P_PROFIL) == 0) + p->p_flag |= P_PROFIL; +} + +/* + * Stop profiling on a process. + */ +void +stopprofclock(p) + register struct proc *p; +{ + if (p->p_flag & P_PROFIL) + p->p_flag &= ~P_PROFIL; +} + +void +bsd_uprofil(struct time_value *syst, unsigned int pc) +{ +struct proc *p = current_proc(); +int ticks; +struct timeval *tv; +struct timeval st; + + if (p == NULL) + return; + if ( !(p->p_flag & P_PROFIL)) + return; + + st.tv_sec = syst->seconds; + st.tv_usec = syst->microseconds; + + tv = &(p->p_stats->p_ru.ru_stime); + + ticks = ((tv->tv_sec - st.tv_sec) * 1000 + + (tv->tv_usec - st.tv_usec) / 1000) / + (tick / 1000); + if (ticks) + addupc_task(p, pc, ticks); +} + +void +get_procrustime(time_value_t *tv) +{ + struct proc *p = current_proc(); + struct timeval st; + + if (p == NULL) + return; + if ( !(p->p_flag & P_PROFIL)) + return; + + st = p->p_stats->p_ru.ru_stime; + + tv->seconds = st.tv_sec; + tv->microseconds = st.tv_usec; +} diff --git a/bsd/kern/kern_core.c b/bsd/kern/kern_core.c new file mode 100644 index 000000000..7776eb45d --- /dev/null +++ b/bsd/kern/kern_core.c @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. + * + * File: bsd/kern/kern_core.c + * + * This file contains machine independent code for performing core dumps. + * + * HISTORY + * 16-Feb-91 Mike DeMoney (mike@next.com) + * Massaged into MI form from m68k/core.c. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +typedef struct { + int flavor; /* the number for this flavor */ + int count; /* count of ints in this flavor */ +} mythread_state_flavor_t; + +#if defined (__ppc__) + +mythread_state_flavor_t thread_flavor_array[]={ + {PPC_THREAD_STATE , PPC_THREAD_STATE_COUNT}, + {PPC_FLOAT_STATE, PPC_FLOAT_STATE_COUNT}, + {PPC_EXCEPTION_STATE, PPC_EXCEPTION_STATE_COUNT} + }; +int mynum_flavors=3; +#elif defined (__i386__) +mythread_state_flavor_t thread_flavor_array [] = { + {i386_THREAD_STATE, i386_THREAD_STATE_COUNT}, + {i386_THREAD_FPSTATE, i386_THREAD_FPSTATE_COUNT}, + {i386_THREAD_EXCEPTSTATE, i386_THREAD_EXCEPTSTATE_COUNT}, + {i386_THREAD_CTHREADSTATE, i386_THREAD_CTHREADSTATE_COUNT}, + {i386_NEW_THREAD_STATE, i386_NEW_THREAD_STATE_COUNT}, + {i386_FLOAT_STATE, i386_FLOAT_STATE_COUNT}, + {i386_ISA_PORT_MAP_STATE, i386_ISA_PORT_MAP_STATE_COUNT}, + {i386_V86_ASSIST_STATE, i386_V86_ASSIST_STATE_COUNT}, + {THREAD_SYSCALL_STATE, i386_THREAD_SYSCALL_STATE_COUNT} + }; +int mynum_flavors=9; + +#else +#error architecture not supported +#endif + + +typedef struct { + vm_offset_t header; + int hoffset; + mythread_state_flavor_t *flavors; + int tstate_size; +} tir_t; + +collectth_state(thread_act_t th_act, tir_t *t) +{ + vm_offset_t header; + int hoffset, i ; + mythread_state_flavor_t *flavors; + struct thread_command *tc; + /* + * Fill in thread command structure. + */ + header = t->header; + hoffset = t->hoffset; + flavors = t->flavors; + + tc = (struct thread_command *) (header + hoffset); + tc->cmd = LC_THREAD; + tc->cmdsize = sizeof(struct thread_command) + + t->tstate_size; + hoffset += sizeof(struct thread_command); + /* + * Follow with a struct thread_state_flavor and + * the appropriate thread state struct for each + * thread state flavor. + */ + for (i = 0; i < mynum_flavors; i++) { + *(mythread_state_flavor_t *)(header+hoffset) = + flavors[i]; + hoffset += sizeof(mythread_state_flavor_t); + thread_getstatus(th_act, flavors[i].flavor, + (thread_state_t *)(header+hoffset), + &flavors[i].count); + hoffset += flavors[i].count*sizeof(int); + } + + t->hoffset = hoffset; +} +/* + * Create a core image on the file "core". + */ +#define MAX_TSTATE_FLAVORS 10 +int +coredump(p) + register struct proc *p; +{ + int error=0; + register struct pcred *pcred = p->p_cred; + register struct ucred *cred = pcred->pc_ucred; + struct nameidata nd; + struct vattr vattr; + vm_map_t map; + int thread_count, segment_count; + int command_size, header_size, tstate_size; + int hoffset, foffset, vmoffset; + vm_offset_t header; + struct machine_slot *ms; + struct mach_header *mh; + struct segment_command *sc; + struct thread_command *tc; + vm_size_t size; + vm_prot_t prot; + vm_prot_t maxprot; + vm_inherit_t inherit; + vm_offset_t offset; + int error1; + task_t task; + char core_name[MAXCOMLEN+6]; + mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; + vm_size_t nflavors,mapsize; + int i; + int nesting_depth = 0; + kern_return_t kret; + struct vm_region_submap_info_64 vbr; + int vbrcount=0; + tir_t tir1; + struct vnode * vp; + + + if (pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid) + return (EFAULT); + + task = current_task(); + map = current_map(); + mapsize = get_vmmap_size(map); + + if (mapsize >= p->p_rlimit[RLIMIT_CORE].rlim_cur) + return (EFAULT); + (void) task_suspend(task); + + /* + * Make sure all registers, etc. are in pcb so they get + * into core file. + */ +#if defined (__ppc__) + fpu_save(); +#endif + sprintf(core_name, "/cores/core.%d", p->p_pid); + NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, core_name, p); + if(error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR )) + return (error); + vp = nd.ni_vp; + + /* Don't dump to non-regular files or files with links. */ + if (vp->v_type != VREG || + VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) { + error = EFAULT; + goto out; + } + + VATTR_NULL(&vattr); + vattr.va_size = 0; + VOP_LEASE(vp, p, cred, LEASE_WRITE); + VOP_SETATTR(vp, &vattr, cred, p); + p->p_acflag |= ACORE; + + /* + * If the task is modified while dumping the file + * (e.g., changes in threads or VM, the resulting + * file will not necessarily be correct. + */ + + thread_count = get_task_numacts(task); + segment_count = get_vmmap_entries(map); /* XXX */ + /* + * nflavors here is really the number of ints in flavors + * to meet the thread_getstatus() calling convention + */ +#if 0 + nflavors = sizeof(flavors)/sizeof(int); + if (thread_getstatus(current_thread(), THREAD_STATE_FLAVOR_LIST, + (thread_state_t)(flavors), + &nflavors) != KERN_SUCCESS) + panic("core flavor list"); + /* now convert to number of flavors */ + nflavors /= sizeof(mythread_state_flavor_t)/sizeof(int); +#else + nflavors = mynum_flavors; + bcopy(thread_flavor_array,flavors,sizeof(thread_flavor_array)); +#endif + tstate_size = 0; + for (i = 0; i < nflavors; i++) + tstate_size += sizeof(mythread_state_flavor_t) + + (flavors[i].count * sizeof(int)); + + command_size = segment_count*sizeof(struct segment_command) + + thread_count*sizeof(struct thread_command) + + tstate_size*thread_count; + + header_size = command_size + sizeof(struct mach_header); + + (void) kmem_alloc_wired(kernel_map, + (vm_offset_t *)&header, + (vm_size_t)header_size); + + /* + * Set up Mach-O header. + */ + mh = (struct mach_header *) header; + ms = &machine_slot[cpu_number()]; + mh->magic = MH_MAGIC; + mh->cputype = ms->cpu_type; + mh->cpusubtype = ms->cpu_subtype; + mh->filetype = MH_CORE; + mh->ncmds = segment_count + thread_count; + mh->sizeofcmds = command_size; + + hoffset = sizeof(struct mach_header); /* offset into header */ + foffset = round_page(header_size); /* offset into file */ + vmoffset = VM_MIN_ADDRESS; /* offset into VM */ + /* We use to check for an error, here, now we try and get + * as much as we can + */ + while (segment_count > 0){ + /* + * Get region information for next region. + */ + + while (1) { + vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; + if((kret = vm_region_recurse_64(map, + &vmoffset, &size, &nesting_depth, + &vbr, &vbrcount)) != KERN_SUCCESS) { + break; + } + if(vbr.is_submap) { + nesting_depth++; + continue; + } else { + break; + } + } + if(kret != KERN_SUCCESS) + break; + + prot = vbr.protection; + maxprot = vbr.max_protection; + inherit = vbr.inheritance; + /* + * Fill in segment command structure. + */ + sc = (struct segment_command *) (header + hoffset); + sc->cmd = LC_SEGMENT; + sc->cmdsize = sizeof(struct segment_command); + /* segment name is zerod by kmem_alloc */ + sc->vmaddr = vmoffset; + sc->vmsize = size; + sc->fileoff = foffset; + sc->filesize = size; + sc->maxprot = maxprot; + sc->initprot = prot; + sc->nsects = 0; + + /* + * Write segment out. Try as hard as possible to + * get read access to the data. + */ + if ((prot & VM_PROT_READ) == 0) { + vm_protect(map, vmoffset, size, FALSE, + prot|VM_PROT_READ); + } + /* + * Only actually perform write if we can read. + * Note: if we can't read, then we end up with + * a hole in the file. + */ + if ((maxprot & VM_PROT_READ) == VM_PROT_READ) { + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)vmoffset, size, foffset, + UIO_USERSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p); + } + + hoffset += sizeof(struct segment_command); + foffset += size; + vmoffset += size; + segment_count--; + } + +#if 0 /* [ */ + task_lock(task); + thread = (thread_t) queue_first(&task->thread_list); + while (thread_count > 0) { + /* + * Fill in thread command structure. + */ + tc = (struct thread_command *) (header + hoffset); + tc->cmd = LC_THREAD; + tc->cmdsize = sizeof(struct thread_command) + + tstate_size; + hoffset += sizeof(struct thread_command); + /* + * Follow with a struct thread_state_flavor and + * the appropriate thread state struct for each + * thread state flavor. + */ + for (i = 0; i < nflavors; i++) { + *(mythread_state_flavor_t *)(header+hoffset) = + flavors[i]; + hoffset += sizeof(mythread_state_flavor_t); + thread_getstatus(thread, flavors[i].flavor, + (thread_state_t *)(header+hoffset), + &flavors[i].count); + hoffset += flavors[i].count*sizeof(int); + } + thread = (thread_t) queue_next(&thread->thread_list); + thread_count--; + } + task_unlock(task); +#else /* /* 0 ][ */ + tir1.header = header; + tir1.hoffset = hoffset; + tir1.flavors = flavors; + tir1.tstate_size = tstate_size; + task_act_iterate_wth_args(task, collectth_state,&tir1); + +#endif /* 0 ] */ + /* + * Write out the Mach header at the beginning of the + * file. + */ + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0, + UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p); + kmem_free(kernel_map, header, header_size); +out: + VOP_UNLOCK(vp, 0, p); + error1 = vn_close(vp, FWRITE, cred, p); + if (error == 0) + error = error1; +} diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c new file mode 100644 index 000000000..cf9ba2d06 --- /dev/null +++ b/bsd/kern/kern_descrip.c @@ -0,0 +1,1318 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95 + * + * History: + * CHW 8/5/98 Added F_SETSIZE command to truncate without + * zero filling space + * CHW 7/6/98 Updated Preallocate command to take a structure + * and return output. + * CHW 6/25/98 Fixed a bug in the lock call in fcntl + * Preallocate command + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Descriptor management. + */ +struct filelist filehead; /* head of list of open files */ +int nfiles; /* actual number of open files */ + +/* + * System calls on descriptors. + */ +/* ARGSUSED */ +int +getdtablesize(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); + return (0); +} + +/* ARGSUSED */ +int +ogetdtablesize(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, NOFILE); + return (0); +} + +static __inline__ +void _fdrelse(fdp, fd) + register struct filedesc *fdp; + register int fd; +{ + if (fd < fdp->fd_freefile) + fdp->fd_freefile = fd; +#if DIAGNOSTIC + if (fd > fdp->fd_lastfile) + panic("fdrelse: fd_lastfile inconsistent"); +#endif + fdp->fd_ofiles[fd] = NULL; + fdp->fd_ofileflags[fd] = 0; + while ((fd = fdp->fd_lastfile) > 0 && + fdp->fd_ofiles[fd] == NULL && + !(fdp->fd_ofileflags[fd] & UF_RESERVED)) + fdp->fd_lastfile--; +} + +/* + * Duplicate a file descriptor. + */ +struct dup_args { + u_int fd; +}; +/* ARGSUSED */ +int +dup(p, uap, retval) + struct proc *p; + struct dup_args *uap; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register int old = uap->fd; + int new, error; + + if ((u_int)old >= fdp->fd_nfiles || + fdp->fd_ofiles[old] == NULL || + (fdp->fd_ofileflags[old] & UF_RESERVED)) + return (EBADF); + if (error = fdalloc(p, 0, &new)) + return (error); + return (finishdup(fdp, old, new, retval)); +} + +/* + * Duplicate a file descriptor to a particular value. + */ +struct dup2_args { + u_int from; + u_int to; +}; +/* ARGSUSED */ +int +dup2(p, uap, retval) + struct proc *p; + struct dup2_args *uap; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register int old = uap->from, new = uap->to; + int i, error; + + if ((u_int)old >= fdp->fd_nfiles || + fdp->fd_ofiles[old] == NULL || + (fdp->fd_ofileflags[old] & UF_RESERVED) || + (u_int)new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || + (u_int)new >= maxfiles) + return (EBADF); + if (old == new) { + *retval = new; + return (0); + } + if ((u_int)new >= fdp->fd_nfiles) { + if (error = fdalloc(p, new, &i)) + return (error); + if (new != i) { + _fdrelse(fdp, i); + goto closeit; + } + } + else { + struct file **fpp; + char flags; +closeit: + if ((flags = fdp->fd_ofileflags[new]) & UF_RESERVED) + return (EBADF); + fdp->fd_ofileflags[new] = (flags & ~UF_MAPPED) | UF_RESERVED; + /* + * dup2() must succeed even if the close has an error. + */ + if (*(fpp = &fdp->fd_ofiles[new])) { + struct file *fp = *fpp; + + *fpp = NULL; (void) closef(fp, p); + } + } + return (finishdup(fdp, old, new, retval)); +} + +/* + * The file control system call. + */ +struct fcntl_args { + int fd; + int cmd; + int arg; +}; +/* ARGSUSED */ +int +fcntl(p, uap, retval) + struct proc *p; + register struct fcntl_args *uap; + register_t *retval; +{ + int fd = uap->fd; + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + register char *pop; + struct vnode *vp, *devvp; + int i, tmp, error, error2, flg = F_POSIX; + struct flock fl; + fstore_t alloc_struct; /* structure for allocate command */ + u_int32_t alloc_flags = 0; + off_t offset; /* used for F_SETSIZE */ + int newmin; + struct radvisory ra_struct; + fbootstraptransfer_t fbt_struct; /* for F_READBOOTSTRAP and F_WRITEBOOTSTRAP */ + struct log2phys l2p_struct; /* structure for allocate command */ + daddr_t lbn, bn; + int devBlockSize = 0; + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + pop = &fdp->fd_ofileflags[fd]; + switch (uap->cmd) { + + case F_DUPFD: + newmin = (long)uap->arg; + if ((u_int)newmin >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || + (u_int)newmin >= maxfiles) + return (EINVAL); + if (error = fdalloc(p, newmin, &i)) + return (error); + return (finishdup(fdp, fd, i, retval)); + + case F_GETFD: + *retval = (*pop & UF_EXCLOSE)? 1 : 0; + return (0); + + case F_SETFD: + *pop = (*pop &~ UF_EXCLOSE) | + ((long)(uap->arg) & 1)? UF_EXCLOSE : 0; + return (0); + + case F_GETFL: + *retval = OFLAGS(fp->f_flag); + return (0); + + case F_SETFL: + fp->f_flag &= ~FCNTLFLAGS; + fp->f_flag |= FFLAGS((long)uap->arg) & FCNTLFLAGS; + tmp = fp->f_flag & FNONBLOCK; + error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&tmp, p); + if (error) + return (error); + tmp = fp->f_flag & FASYNC; + error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, (caddr_t)&tmp, p); + if (!error) + return (0); + fp->f_flag &= ~FNONBLOCK; + tmp = 0; + (void) (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&tmp, p); + return (error); + + case F_GETOWN: + if (fp->f_type == DTYPE_SOCKET) { + *retval = ((struct socket *)fp->f_data)->so_pgid; + return (0); + } + error = (*fp->f_ops->fo_ioctl) + (fp, (int)TIOCGPGRP, (caddr_t)retval, p); + *retval = -*retval; + return (error); + + case F_SETOWN: + if (fp->f_type == DTYPE_SOCKET) { + ((struct socket *)fp->f_data)->so_pgid = + (long)uap->arg; + return (0); + } + if ((long)uap->arg <= 0) { + uap->arg = (void *)(-(long)(uap->arg)); + } else { + struct proc *p1 = pfind((long)uap->arg); + if (p1 == 0) + return (ESRCH); + uap->arg = (void *)(long)p1->p_pgrp->pg_id; + } + return ((*fp->f_ops->fo_ioctl) + (fp, (int)TIOCSPGRP, (caddr_t)&uap->arg, p)); + + case F_SETLKW: + flg |= F_WAIT; + /* Fall into F_SETLK */ + + case F_SETLK: + if (fp->f_type != DTYPE_VNODE) + return (EBADF); + vp = (struct vnode *)fp->f_data; + /* Copy in the lock structure */ + error = copyin((caddr_t)uap->arg, (caddr_t)&fl, + sizeof (fl)); + if (error) + return (error); + if (fl.l_whence == SEEK_CUR) + fl.l_start += fp->f_offset; + switch (fl.l_type) { + + case F_RDLCK: + if ((fp->f_flag & FREAD) == 0) + return (EBADF); + p->p_flag |= P_ADVLOCK; + return (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg)); + + case F_WRLCK: + if ((fp->f_flag & FWRITE) == 0) + return (EBADF); + p->p_flag |= P_ADVLOCK; + return (VOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg)); + + case F_UNLCK: + return (VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &fl, + F_POSIX)); + + default: + return (EINVAL); + } + + case F_GETLK: + if (fp->f_type != DTYPE_VNODE) + return (EBADF); + vp = (struct vnode *)fp->f_data; + /* Copy in the lock structure */ + error = copyin((caddr_t)uap->arg, (caddr_t)&fl, + sizeof (fl)); + if (error) + return (error); + if (fl.l_whence == SEEK_CUR) + fl.l_start += fp->f_offset; + if (error = VOP_ADVLOCK(vp, (caddr_t)p, F_GETLK, &fl, F_POSIX)) + return (error); + return (copyout((caddr_t)&fl, (caddr_t)uap->arg, + sizeof (fl))); + + case F_PREALLOCATE: + + /* Copy in the structure */ + + error = copyin((caddr_t)uap->arg, (caddr_t)&alloc_struct, + sizeof (alloc_struct)); + + if (error) + return (error); + + /* now set the space allocated to 0 and pass it out in + case we get a parameter checking error */ + + alloc_struct.fst_bytesalloc = 0; + + error = copyout((caddr_t)&alloc_struct, (caddr_t)uap->arg, + sizeof (alloc_struct)); + + if (error) + return(error); + + /* First make sure that we have write permission */ + + if ((fp->f_flag & FWRITE) == 0) + return (EBADF); + + + /* Do some simple parameter checking */ + + + /* set up the flags */ + + alloc_flags |= PREALLOCATE; + + if (alloc_struct.fst_flags & F_ALLOCATECONTIG) { + alloc_flags |= ALLOCATECONTIG; + } + + if (alloc_struct.fst_flags & F_ALLOCATEALL) { + alloc_flags |= ALLOCATEALL; + } + + /* Do any position mode specific stuff. The only */ + /* position mode supported now is PEOFPOSMODE */ + + switch (alloc_struct.fst_posmode) { + + case F_PEOFPOSMODE: + + if ((alloc_struct.fst_offset != 0) || + (alloc_struct.fst_length < 0)) + return (EINVAL); + + alloc_flags |= ALLOCATEFROMPEOF; + break; + + default: + + return(EINVAL); + + } + + + /* Now lock the vnode and call allocate to get the space */ + + vp = (struct vnode *)fp->f_data; + + VOP_LOCK(vp,LK_EXCLUSIVE,p); + error = VOP_ALLOCATE(vp,alloc_struct.fst_length,alloc_flags, + &alloc_struct.fst_bytesalloc,fp->f_cred,p); + VOP_UNLOCK(vp,0,p); + + if (error2 = (copyout((caddr_t)&alloc_struct, (caddr_t)uap->arg, + sizeof (alloc_struct)))) { + if (error) { + return(error); + } else { + return(error2); + } + } + + return(error); + + case F_SETSIZE: + + /* Copy in the structure */ + + error = copyin((caddr_t)uap->arg, (caddr_t)&offset, + sizeof (off_t)); + + if (error) + return (error); + + + /* First make sure that we are root. Growing a file */ + /* without zero filling the data is a security hole */ + /* root would have access anyway so we'll allow it */ + + if (!is_suser()) { + return (EACCES); + } + + /* Now lock the vnode and call allocate to get the space */ + + vp = (struct vnode *)fp->f_data; + + VOP_LOCK(vp,LK_EXCLUSIVE,p); + error = VOP_TRUNCATE(vp,offset,IO_NOZEROFILL,fp->f_cred,p); + VOP_UNLOCK(vp,0,p); + + return(error); + + case F_RDAHEAD: + vp = (struct vnode *)fp->f_data; + + simple_lock(&vp->v_interlock); + if (uap->arg) + vp->v_flag &= ~VRAOFF; + else + vp->v_flag |= VRAOFF; + simple_unlock(&vp->v_interlock); + + return (0); + + case F_NOCACHE: + vp = (struct vnode *)fp->f_data; + + simple_lock(&vp->v_interlock); + if (uap->arg) + vp->v_flag |= VNOCACHE_DATA; + else + vp->v_flag &= ~VNOCACHE_DATA; + simple_unlock(&vp->v_interlock); + + return (0); + + case F_RDADVISE: + vp = (struct vnode *)fp->f_data; + + if (error = copyin((caddr_t)uap->arg, (caddr_t)&ra_struct, sizeof (ra_struct))) + return(error); + return (VOP_IOCTL(vp, 1, &ra_struct, 0, fp->f_cred, p)); + + case F_READBOOTSTRAP: + case F_WRITEBOOTSTRAP: + + /* Copy in the structure */ + + error = copyin((caddr_t)uap->arg, (caddr_t)&fbt_struct, + sizeof (fbt_struct)); + + if (error) + return (error); + + + if (uap->cmd == F_WRITEBOOTSTRAP) { + /* First make sure that we are root. Updating the */ + /* bootstrap on a disk could be a security hole */ + + if (!is_suser()) { + return (EACCES); + } + }; + + /* Now lock the vnode and call VOP_IOCTL to handle the I/O: */ + + vp = (struct vnode *)fp->f_data; + if (vp->v_tag != VT_HFS) { + error = EINVAL; + } else { + VOP_LOCK(vp,LK_EXCLUSIVE,p); + error = VOP_IOCTL(vp, (uap->cmd == F_WRITEBOOTSTRAP) ? 3 : 2, &fbt_struct, 0, fp->f_cred, p); + VOP_UNLOCK(vp,0,p); + }; + + return(error); + + case F_LOG2PHYS: + if (fp->f_type != DTYPE_VNODE) + return (EBADF); + vp = (struct vnode *)fp->f_data; + VOP_LOCK(vp, LK_EXCLUSIVE, p); + if (VOP_OFFTOBLK(vp, fp->f_offset, &lbn)) + panic("fcntl LOG2PHYS OFFTOBLK"); + if (VOP_BLKTOOFF(vp, lbn, &offset)) + panic("fcntl LOG2PHYS BLKTOOFF1"); + error = VOP_BMAP(vp, lbn, &devvp, &bn, 0); + VOP_DEVBLOCKSIZE(devvp, &devBlockSize); + VOP_UNLOCK(vp, 0, p); + if (!error) { + l2p_struct.l2p_flags = 0; /* for now */ + l2p_struct.l2p_contigbytes = 0; /* for now */ + l2p_struct.l2p_devoffset = bn * devBlockSize; + l2p_struct.l2p_devoffset += fp->f_offset - offset; + error = copyout((caddr_t)&l2p_struct, + (caddr_t)uap->arg, + sizeof (l2p_struct)); + } + return (error); + + + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Common code for dup, dup2, and fcntl(F_DUPFD). + */ +int +finishdup(fdp, old, new, retval) + register struct filedesc *fdp; + register int old, new; + register_t *retval; +{ + register struct file *fp; + + if ((fp = fdp->fd_ofiles[old]) == NULL || + (fdp->fd_ofileflags[old] & UF_RESERVED)) { + _fdrelse(fdp, new); + return (EBADF); + } + fdp->fd_ofiles[new] = fp; + fdp->fd_ofileflags[new] = fdp->fd_ofileflags[old] &~ UF_EXCLOSE; + (void)fref(fp); + if (new > fdp->fd_lastfile) + fdp->fd_lastfile = new; + *retval = new; + return (0); +} + +/* + * Close a file descriptor. + */ +struct close_args { + int fd; +}; +/* ARGSUSED */ +int +close(p, uap, retval) + struct proc *p; + struct close_args *uap; + register_t *retval; +{ + int fd = uap->fd; + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + _fdrelse(fdp, fd); + return (closef(fp, p)); +} + +/* + * Return status information about a file descriptor. + */ +struct fstat_args { + int fd; + struct stat *sb; +}; +/* ARGSUSED */ +int +fstat(p, uap, retval) + struct proc *p; + register struct fstat_args *uap; + register_t *retval; +{ + int fd = uap->fd; + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + struct stat ub; + int error; + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + switch (fp->f_type) { + + case DTYPE_VNODE: + error = vn_stat((struct vnode *)fp->f_data, &ub, p); + break; + + case DTYPE_SOCKET: + error = soo_stat((struct socket *)fp->f_data, &ub); + break; + + case DTYPE_PSXSHM: + error = pshm_stat((void *)fp->f_data, &ub); + break; + default: + panic("fstat"); + /*NOTREACHED*/ + } + if (error == 0) + error = copyout((caddr_t)&ub, (caddr_t)uap->sb, + sizeof (ub)); + return (error); +} + +#if COMPAT_43 +/* + * Return status information about a file descriptor. + */ +struct ofstat_args { + int fd; + struct ostat *sb; +}; +/* ARGSUSED */ +ofstat(p, uap, retval) + struct proc *p; + register struct ofstat_args *uap; + register_t *retval; +{ + int fd = uap->fd; + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + struct stat ub; + struct ostat oub; + int error; + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + switch (fp->f_type) { + + case DTYPE_VNODE: + error = vn_stat((struct vnode *)fp->f_data, &ub, p); + break; + + case DTYPE_SOCKET: + error = soo_stat((struct socket *)fp->f_data, &ub); + break; + + default: + panic("ofstat"); + /*NOTREACHED*/ + } + cvtstat(&ub, &oub); + if (error == 0) + error = copyout((caddr_t)&oub, (caddr_t)uap->sb, + sizeof (oub)); + return (error); +} +#endif /* COMPAT_43 */ + +/* + * Return pathconf information about a file descriptor. + */ +struct fpathconf_args { + int fd; + int name; +}; +/* ARGSUSED */ +fpathconf(p, uap, retval) + struct proc *p; + register struct fpathconf_args *uap; + register_t *retval; +{ + int fd = uap->fd; + struct filedesc *fdp = p->p_fd; + struct file *fp; + struct vnode *vp; + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + switch (fp->f_type) { + + case DTYPE_SOCKET: + if (uap->name != _PC_PIPE_BUF) + return (EINVAL); + *retval = PIPE_BUF; + return (0); + + case DTYPE_VNODE: + vp = (struct vnode *)fp->f_data; + return (VOP_PATHCONF(vp, uap->name, retval)); + + default: + panic("fpathconf"); + } + /*NOTREACHED*/ +} + +/* + * Allocate a file descriptor for the process. + */ +int fdexpand; + +int +fdalloc(p, want, result) + struct proc *p; + int want; + int *result; +{ + register struct filedesc *fdp = p->p_fd; + register int i; + int lim, last, nfiles, oldnfiles; + struct file **newofiles, **ofiles; + char *newofileflags, *ofileflags; + + /* + * Search for a free descriptor starting at the higher + * of want or fd_freefile. If that fails, consider + * expanding the ofile array. + */ + lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); + for (;;) { + last = min(fdp->fd_nfiles, lim); + if ((i = want) < fdp->fd_freefile) + i = fdp->fd_freefile; + ofiles = &fdp->fd_ofiles[i]; + ofileflags = &fdp->fd_ofileflags[i]; + for (; i < last; i++) { + if (*ofiles == NULL && !(*ofileflags & UF_RESERVED)) { + *ofileflags = UF_RESERVED; + if (i > fdp->fd_lastfile) + fdp->fd_lastfile = i; + if (want <= fdp->fd_freefile) + fdp->fd_freefile = i; + *result = i; + return (0); + } + ofiles++; ofileflags++; + } + + /* + * No space in current array. Expand? + */ + if (fdp->fd_nfiles >= lim) + return (EMFILE); + if (fdp->fd_nfiles < NDEXTENT) + nfiles = NDEXTENT; + else + nfiles = 2 * fdp->fd_nfiles; + /* Enforce lim */ + if (nfiles > lim) + nfiles = lim; + MALLOC_ZONE(newofiles, struct file **, + nfiles * OFILESIZE, M_OFILETABL, M_WAITOK); + if (fdp->fd_nfiles >= nfiles) { + FREE_ZONE(newofiles, nfiles * OFILESIZE, M_OFILETABL); + continue; + } + newofileflags = (char *) &newofiles[nfiles]; + /* + * Copy the existing ofile and ofileflags arrays + * and zero the new portion of each array. + */ + oldnfiles = fdp->fd_nfiles; + (void) memcpy(newofiles, fdp->fd_ofiles, + oldnfiles * sizeof *fdp->fd_ofiles); + (void) memset(&newofiles[oldnfiles], 0, + (nfiles - oldnfiles) * sizeof *fdp->fd_ofiles); + + (void) memcpy(newofileflags, fdp->fd_ofileflags, + oldnfiles * sizeof *fdp->fd_ofileflags); + (void) memset(&newofileflags[oldnfiles], 0, + (nfiles - oldnfiles) * + sizeof *fdp->fd_ofileflags); + ofiles = fdp->fd_ofiles; + fdp->fd_ofiles = newofiles; + fdp->fd_ofileflags = newofileflags; + fdp->fd_nfiles = nfiles; + FREE_ZONE(ofiles, oldnfiles * OFILESIZE, M_OFILETABL); + fdexpand++; + } +} + +/* + * Check to see whether n user file descriptors + * are available to the process p. + */ +int +fdavail(p, n) + struct proc *p; + register int n; +{ + register struct filedesc *fdp = p->p_fd; + register struct file **fpp; + register char *flags; + register int i, lim; + + lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); + if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) + return (1); + fpp = &fdp->fd_ofiles[fdp->fd_freefile]; + flags = &fdp->fd_ofileflags[fdp->fd_freefile]; + for (i = fdp->fd_nfiles - fdp->fd_freefile; --i >= 0; fpp++, flags++) + if (*fpp == NULL && !(*flags & UF_RESERVED) && --n <= 0) + return (1); + return (0); +} + +void +fdrelse(p, fd) + struct proc *p; + int fd; +{ + _fdrelse(p->p_fd, fd); +} + +int +fdgetf(p, fd, resultfp) + register struct proc *p; + register int fd; + struct file **resultfp; +{ + register struct filedesc *fdp = p->p_fd; + struct file *fp; + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + + if (resultfp) + *resultfp = fp; + return (0); +} + +/* + * Create a new open file structure and allocate + * a file decriptor for the process that refers to it. + */ +int +falloc(p, resultfp, resultfd) + register struct proc *p; + struct file **resultfp; + int *resultfd; +{ + register struct file *fp, *fq; + int error, i; + + if (error = fdalloc(p, 0, &i)) + return (error); + if (nfiles >= maxfiles) { + tablefull("file"); + return (ENFILE); + } + /* + * Allocate a new file descriptor. + * If the process has file descriptor zero open, add to the list + * of open files at that point, otherwise put it at the front of + * the list of open files. + */ + nfiles++; + MALLOC_ZONE(fp, struct file *, sizeof(struct file), M_FILE, M_WAITOK); + bzero(fp, sizeof(struct file)); + if (fq = p->p_fd->fd_ofiles[0]) { + LIST_INSERT_AFTER(fq, fp, f_list); + } else { + LIST_INSERT_HEAD(&filehead, fp, f_list); + } + p->p_fd->fd_ofiles[i] = fp; + fp->f_count = 1; + fp->f_cred = p->p_ucred; + crhold(fp->f_cred); + if (resultfp) + *resultfp = fp; + if (resultfd) + *resultfd = i; + return (0); +} + +/* + * Free a file structure. + */ +void +ffree(fp) + register struct file *fp; +{ + register struct file *fq; + struct ucred *cred; + + LIST_REMOVE(fp, f_list); + cred = fp->f_cred; + if (cred != NOCRED) { + fp->f_cred = NOCRED; + crfree(cred); + } +#if 1 || DIAGNOSTIC + fp->f_count = 0; +#endif + nfiles--; + FREE_ZONE(fp, sizeof *fp, M_FILE); +} + +void +fdexec(p) + struct proc *p; +{ + register struct filedesc *fdp = p->p_fd; + register int i = fdp->fd_lastfile; + register struct file **fpp = &fdp->fd_ofiles[i]; + register char *flags = &fdp->fd_ofileflags[i]; + + while (i >= 0) { + if ((*flags & (UF_RESERVED|UF_EXCLOSE)) == UF_EXCLOSE) { + register struct file *fp = *fpp; + + *fpp = NULL; *flags = 0; + if (i == fdp->fd_lastfile && i > 0) + fdp->fd_lastfile--; + closef(fp, p); + } + else + *flags &= ~UF_MAPPED; + + i--; fpp--; flags--; + } +} + +/* + * Copy a filedesc structure. + */ +struct filedesc * +fdcopy(p) + struct proc *p; +{ + register struct filedesc *newfdp, *fdp = p->p_fd; + register int i; + + MALLOC_ZONE(newfdp, struct filedesc *, + sizeof *newfdp, M_FILEDESC, M_WAITOK); + (void) memcpy(newfdp, fdp, sizeof *newfdp); + VREF(newfdp->fd_cdir); + if (newfdp->fd_rdir) + VREF(newfdp->fd_rdir); + newfdp->fd_refcnt = 1; + + /* + * If the number of open files fits in the internal arrays + * of the open file structure, use them, otherwise allocate + * additional memory for the number of descriptors currently + * in use. + */ + if (newfdp->fd_lastfile < NDFILE) + i = NDFILE; + else { + /* + * Compute the smallest multiple of NDEXTENT needed + * for the file descriptors currently in use, + * allowing the table to shrink. + */ + i = newfdp->fd_nfiles; + while (i > 2 * NDEXTENT && i > newfdp->fd_lastfile * 2) + i /= 2; + } + MALLOC_ZONE(newfdp->fd_ofiles, struct file **, + i * OFILESIZE, M_OFILETABL, M_WAITOK); + newfdp->fd_ofileflags = (char *) &newfdp->fd_ofiles[i]; + newfdp->fd_nfiles = i; + if (fdp->fd_nfiles > 0) { + register struct file **fpp; + register char *flags; + + (void) memcpy(newfdp->fd_ofiles, fdp->fd_ofiles, + i * sizeof *fdp->fd_ofiles); + (void) memcpy(newfdp->fd_ofileflags, fdp->fd_ofileflags, + i * sizeof *fdp->fd_ofileflags); + + fpp = newfdp->fd_ofiles; + flags = newfdp->fd_ofileflags; + for (i = newfdp->fd_lastfile; i-- >= 0; fpp++, flags++) + if (*fpp != NULL && !(*flags & UF_RESERVED)) { + (void)fref(*fpp); + } else { + *fpp = NULL; + *flags = 0; + } + } + else + (void) memset(newfdp->fd_ofiles, 0, i * OFILESIZE); + + return (newfdp); +} + +/* + * Release a filedesc structure. + */ +void +fdfree(p) + struct proc *p; +{ + register struct filedesc *fdp; + register struct file **fpp; + register int i; + + if ((fdp = p->p_fd) == NULL) + return; + if (--fdp->fd_refcnt > 0) + return; + p->p_fd = NULL; + if (fdp->fd_nfiles > 0) { + fpp = fdp->fd_ofiles; + for (i = fdp->fd_lastfile; i-- >= 0; fpp++) + if (*fpp) + (void) closef(*fpp, p); + FREE_ZONE(fdp->fd_ofiles, + fdp->fd_nfiles * OFILESIZE, M_OFILETABL); + } + vrele(fdp->fd_cdir); + if (fdp->fd_rdir) + vrele(fdp->fd_rdir); + FREE_ZONE(fdp, sizeof *fdp, M_FILEDESC); +} + +/* + * Internal form of close. + * Decrement reference count on file structure. + * Note: p may be NULL when closing a file + * that was being passed in a message. + */ +int +closef(fp, p) + register struct file *fp; + register struct proc *p; +{ + struct vnode *vp; + struct flock lf; + int error; + + if (fp == NULL) + return (0); + /* + * POSIX record locking dictates that any close releases ALL + * locks owned by this process. This is handled by setting + * a flag in the unlock to free ONLY locks obeying POSIX + * semantics, and not to free BSD-style file locks. + * If the descriptor was in a message, POSIX-style locks + * aren't passed with the descriptor. + */ + if (p && (p->p_flag & P_ADVLOCK) && fp->f_type == DTYPE_VNODE) { + lf.l_whence = SEEK_SET; + lf.l_start = 0; + lf.l_len = 0; + lf.l_type = F_UNLCK; + vp = (struct vnode *)fp->f_data; + (void) VOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX); + } + if (frele(fp) > 0) + return (0); + if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) { + lf.l_whence = SEEK_SET; + lf.l_start = 0; + lf.l_len = 0; + lf.l_type = F_UNLCK; + vp = (struct vnode *)fp->f_data; + (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK); + } + if (fp->f_ops) + error = (*fp->f_ops->fo_close)(fp, p); + else + error = 0; + ffree(fp); + return (error); +} + +/* + * Apply an advisory lock on a file descriptor. + * + * Just attempt to get a record lock of the requested type on + * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0). + */ +struct flock_args { + int fd; + int how; +}; +/* ARGSUSED */ +int +flock(p, uap, retval) + struct proc *p; + register struct flock_args *uap; + register_t *retval; +{ + int fd = uap->fd; + int how = uap->how; + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + struct vnode *vp; + struct flock lf; + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + if (fp->f_type != DTYPE_VNODE) + return (EOPNOTSUPP); + vp = (struct vnode *)fp->f_data; + lf.l_whence = SEEK_SET; + lf.l_start = 0; + lf.l_len = 0; + if (how & LOCK_UN) { + lf.l_type = F_UNLCK; + fp->f_flag &= ~FHASLOCK; + return (VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, F_FLOCK)); + } + if (how & LOCK_EX) + lf.l_type = F_WRLCK; + else if (how & LOCK_SH) + lf.l_type = F_RDLCK; + else + return (EBADF); + fp->f_flag |= FHASLOCK; + if (how & LOCK_NB) + return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK)); + return (VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_FLOCK|F_WAIT)); +} + +/* + * File Descriptor pseudo-device driver (/dev/fd/). + * + * Opening minor device N dup()s the file (if any) connected to file + * descriptor N belonging to the calling process. Note that this driver + * consists of only the ``open()'' routine, because all subsequent + * references to this file will be direct to the other driver. + */ +/* ARGSUSED */ +int +fdopen(dev, mode, type, p) + dev_t dev; + int mode, type; + struct proc *p; +{ + + /* + * XXX Kludge: set curproc->p_dupfd to contain the value of the + * the file descriptor being sought for duplication. The error + * return ensures that the vnode for this device will be released + * by vn_open. Open will detect this special error and take the + * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN + * will simply report the error. + */ + p->p_dupfd = minor(dev); + return (ENODEV); +} + +/* + * Duplicate the specified descriptor to a free descriptor. + */ +int +dupfdopen(fdp, indx, dfd, mode, error) + register struct filedesc *fdp; + register int indx, dfd; + int mode; + int error; +{ + register struct file *wfp; + struct file *fp; + + /* + * If the to-be-dup'd fd number is greater than the allowed number + * of file descriptors, or the fd to be dup'd has already been + * closed, reject. Note, check for new == old is necessary as + * falloc could allocate an already closed to-be-dup'd descriptor + * as the new descriptor. + */ + fp = fdp->fd_ofiles[indx]; + if ((u_int)dfd >= fdp->fd_nfiles || + (wfp = fdp->fd_ofiles[dfd]) == NULL || wfp == fp || + (fdp->fd_ofileflags[dfd] & UF_RESERVED)) + return (EBADF); + + /* + * There are two cases of interest here. + * + * For ENODEV simply dup (dfd) to file descriptor + * (indx) and return. + * + * For ENXIO steal away the file structure from (dfd) and + * store it in (indx). (dfd) is effectively closed by + * this operation. + * + * Any other error code is just returned. + */ + switch (error) { + case ENODEV: + /* + * Check that the mode the file is being opened for is a + * subset of the mode of the existing descriptor. + */ + if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) + return (EACCES); + (void)fref(wfp); + if (indx > fdp->fd_lastfile) + fdp->fd_lastfile = indx;; + fdp->fd_ofiles[indx] = wfp; + fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; + return (0); + + case ENXIO: + /* + * Steal away the file pointer from dfd, and stuff it into indx. + */ + if (indx > fdp->fd_lastfile) + fdp->fd_lastfile = indx;; + fdp->fd_ofiles[indx] = fdp->fd_ofiles[dfd]; + fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd]; + _fdrelse(fdp, dfd); + return (0); + + default: + return (error); + } + /* NOTREACHED */ +} + +/* Reference manipulation routines for the file structure */ + +int +fref(struct file *fp) +{ + if (++fp->f_count <= 0) + panic("fref: f_count"); + return ((int)fp->f_count); +} + +int +frele(struct file *fp) +{ + if (--fp->f_count < 0) + panic("frele: count < 0"); + return ((int)fp->f_count); +} + +int +fcount(struct file *fp) +{ + return ((int)fp->f_count); +} + diff --git a/bsd/kern/kern_event.c b/bsd/kern/kern_event.c new file mode 100644 index 000000000..a431f36b2 --- /dev/null +++ b/bsd/kern/kern_event.c @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + * + */ +/* + * @(#)kern_event.c 1.0 (3/31/2000) + */ + +#include +#include +#include +#include +#include +#include +#include + + +extern struct domain systemdomain; + + + +int raw_usrreq(); +struct pr_usrreqs event_usrreqs; + +struct protosw eventsw[] = { + { + SOCK_RAW, &systemdomain, SYSPROTO_EVENT, PR_ATOMIC, + 0, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &event_usrreqs + } +}; + +static +struct kern_event_head kern_event_head; + +static u_long static_event_id = 0; + +int kev_attach(struct socket *so, int proto, struct proc *p) +{ + int error; + struct kern_event_pcb *ev_pcb; + + ev_pcb = _MALLOC(sizeof(struct kern_event_pcb), M_PCB, M_WAITOK); + if (ev_pcb == 0) + return ENOBUFS; + + ev_pcb->ev_socket = so; + ev_pcb->vendor_code_filter = 0xffffffff; + + so->so_pcb = (caddr_t) ev_pcb; + LIST_INSERT_HEAD(&kern_event_head, ev_pcb, ev_link); + error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE); + if (error) + return error; + + return 0; +} + + +int kev_detach(struct socket *so) +{ + struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb; + + LIST_REMOVE(ev_pcb, ev_link); + if (ev_pcb) + FREE(ev_pcb, M_PCB); + + return 0; +} + + +int kev_post_msg(struct kev_msg *event_msg) +{ + struct mbuf *m, *m2; + struct kern_event_pcb *ev_pcb; + struct kern_event_msg *ev; + char *tmp; + int total_size; + int i; + + + m = m_get(M_DONTWAIT, MT_DATA); + if (m == 0) + return ENOBUFS; + + ev = mtod(m, struct kern_event_msg *); + total_size = KEV_MSG_HEADER_SIZE; + + tmp = (char *) &ev->event_data[0]; + for (i = 0; i < 5; i++) { + if (event_msg->dv[i].data_length == 0) + break; + + total_size += event_msg->dv[i].data_length; + bcopy(event_msg->dv[i].data_ptr, tmp, + event_msg->dv[i].data_length); + tmp += event_msg->dv[i].data_length; + } + + + ev->id = ++static_event_id; + ev->total_size = total_size; + ev->vendor_code = event_msg->vendor_code; + ev->kev_class = event_msg->kev_class; + ev->kev_subclass = event_msg->kev_subclass; + ev->event_code = event_msg->event_code; + + m->m_len = total_size; + ev_pcb = LIST_FIRST(&kern_event_head); + for (ev_pcb = LIST_FIRST(&kern_event_head); + ev_pcb; + ev_pcb = LIST_NEXT(ev_pcb, ev_link)) { + + if (ev_pcb->vendor_code_filter != KEV_ANY_VENDOR) { + if (ev_pcb->vendor_code_filter != ev->vendor_code) + continue; + + if (ev_pcb->class_filter != KEV_ANY_CLASS) { + if (ev_pcb->class_filter != ev->kev_class) + continue; + + if ((ev_pcb->subclass_filter != KEV_ANY_SUBCLASS) && + (ev_pcb->subclass_filter != ev->kev_subclass)) + continue; + } + } + + m2 = m_copym(m, 0, m->m_len, M_NOWAIT); + if (m2 == 0) { + m_free(m); + return ENOBUFS; + } + + sbappendrecord(&ev_pcb->ev_socket->so_rcv, m2); + sorwakeup(ev_pcb->ev_socket); + } + + + m_free(m); + return 0; +} + + +int kev_control(so, cmd, data, ifp, p) + struct socket *so; + u_long cmd; + caddr_t data; + register struct ifnet *ifp; + struct proc *p; +{ + struct kev_request *kev_req = (struct kev_request *) data; + int stat = 0; + struct kern_event_pcb *ev_pcb; + u_long *id_value = (u_long *) data; + + + switch (cmd) { + + case SIOCGKEVID: + *id_value = static_event_id; + break; + + case SIOCSKEVFILT: + ev_pcb = (struct kern_event_pcb *) so->so_pcb; + ev_pcb->vendor_code_filter = kev_req->vendor_code; + ev_pcb->class_filter = kev_req->kev_class; + ev_pcb->subclass_filter = kev_req->kev_subclass; + break; + + case SIOCGKEVFILT: + ev_pcb = (struct kern_event_pcb *) so->so_pcb; + kev_req->vendor_code = ev_pcb->vendor_code_filter; + kev_req->kev_class = ev_pcb->class_filter; + kev_req->kev_subclass = ev_pcb->subclass_filter; + break; + + default: + return EOPNOTSUPP; + } + + return 0; +} + + +struct pr_usrreqs event_usrreqs = { + pru_abort_notsupp, pru_accept_notsupp, kev_attach, pru_bind_notsupp, pru_connect_notsupp, + pru_connect2_notsupp, kev_control, kev_detach, pru_disconnect_notsupp, + pru_listen_notsupp, pru_peeraddr_notsupp, pru_rcvd_notsupp, pru_rcvoob_notsupp, + pru_send_notsupp, pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp, + pru_sosend_notsupp, soreceive, sopoll +}; + + + diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c new file mode 100644 index 000000000..1db47973a --- /dev/null +++ b/bsd/kern/kern_exec.c @@ -0,0 +1,908 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +#include + +/*- + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)kern_exec.c 8.1 (Berkeley) 6/10/93 + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +extern vm_map_t bsd_pageable_map; + +#define ROUND_PTR(type, addr) \ + (type *)( ( (unsigned)(addr) + 16 - 1) \ + & ~(16 - 1) ) + +static int load_return_to_errno(load_return_t lrtn); +int execve(struct proc *p, struct execve_args *uap, register_t *retval); + +int +execv(p, args, retval) + struct proc *p; + void *args; + int *retval; +{ + ((struct execve_args *)args)->envp = NULL; + return (execve(p, args, retval)); +} + +/* ARGSUSED */ +int +execve(p, uap, retval) + register struct proc *p; + register struct execve_args *uap; + register_t *retval; +{ + register struct ucred *cred = p->p_ucred; + register struct filedesc *fdp = p->p_fd; + register nc; + register char *cp; + int na, ne, ucp, ap, cc; + unsigned len; + int indir; + char *sharg; + char *execnamep; + struct vnode *vp; + struct vattr vattr; + struct vattr origvattr; + vm_offset_t execargs; + struct nameidata nd; + struct ps_strings ps; +#define SHSIZE 512 + char cfarg[SHSIZE]; + boolean_t is_fat; + kern_return_t ret; + struct mach_header *mach_header; + struct fat_header *fat_header; + struct fat_arch fat_arch; + load_return_t lret; + load_result_t load_result; + struct uthread *uthread; + int i; + union { + /* #! and name of interpreter */ + char ex_shell[SHSIZE]; + /* Mach-O executable */ + struct mach_header mach_header; + /* Fat executable */ + struct fat_header fat_header; + char pad[512]; + } exdata; + int resid, error; + char *savedpath; + int savedpathlen = 0; + vm_offset_t *execargsp; + char *cpnospace; + task_t tsk; + int numthreads; + + tsk = current_task(); + + + if(tsk != kernel_task) { + numthreads = get_task_numacts(tsk); + if (numthreads <= 0 ) + return(EINVAL); + if (numthreads > 1) { + return(EOPNOTSUPP); + } + } + + ret = kmem_alloc_pageable(bsd_pageable_map, &execargs, NCARGS); + if (ret != KERN_SUCCESS) + return(ENOMEM); + + uthread = get_bsdthread_info(current_act()); + + savedpath = execargs; + + /* + * To support new app package launching for Mac OS X, the dyld + * needs the first argument to execve() stored on the user stack. + * Copyin the "path" at the begining of the "execargs" buffer + * allocated above. + * + * We have to do this before namei() because in case of + * symbolic links, namei() would overwrite the original "path". + * In case the last symbolic link resolved was a relative pathname + * we would loose the original "path", which could be an + * absolute pathname. This might be unacceptable for dyld. + */ + /* XXX We could optimize to avoid copyinstr in the namei() */ + + error = copyinstr(uap->fname, savedpath, MAXPATHLEN, &savedpathlen); + if (error) + return (error); + /* + * copyinstr will put in savedpathlen, the count of + * characters (including NULL) in the path. + */ + + /* Save the name aside for future use */ + execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen); + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | SAVENAME, + UIO_USERSPACE, uap->fname, p); + if ((error = namei(&nd))) + goto bad1; + vp = nd.ni_vp; + VOP_LEASE(vp, p, p->p_ucred, LEASE_READ); + + if ((error = VOP_GETATTR(vp, &origvattr, p->p_ucred, p))) + goto bad; + + /* Check mount point */ + if (vp->v_mount->mnt_flag & MNT_NOEXEC) { + error = EACCES; + goto bad; + } + + indir = 0; + if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED)) + origvattr.va_mode &= ~(VSUID | VSGID); + + *(&vattr) = *(&origvattr); + +again: + error = check_exec_access(p, vp, &vattr); + if (error) + goto bad; + + /* + * Read in first few bytes of file for segment sizes, magic number: + * 407 = plain executable + * 410 = RO text + * 413 = demand paged RO text + * Also an ASCII line beginning with #! is + * the file name of a ``shell'' and arguments may be prepended + * to the argument list if given here. + * + * SHELL NAMES ARE LIMITED IN LENGTH. + * + * ONLY ONE ARGUMENT MAY BE PASSED TO THE SHELL FROM + * THE ASCII LINE. + */ + + exdata.ex_shell[0] = '\0'; /* for zero length files */ + + error = vn_rdwr(UIO_READ, vp, (caddr_t)&exdata, sizeof (exdata), 0, + UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p); + + if (error) + goto bad; + +#ifndef lint + if (resid > sizeof(exdata) - min(sizeof(exdata.mach_header), + sizeof(exdata.fat_header)) + && exdata.ex_shell[0] != '#') { + error = ENOEXEC; + goto bad; + } +#endif /* lint */ + mach_header = &exdata.mach_header; + fat_header = &exdata.fat_header; + if (mach_header->magic == MH_MAGIC) + is_fat = FALSE; + else if (fat_header->magic == FAT_MAGIC || + fat_header->magic == FAT_CIGAM) + is_fat = TRUE; + else if (mach_header->magic == MH_CIGAM) { + error = EBADARCH; + goto bad; + } else { + if (exdata.ex_shell[0] != '#' || + exdata.ex_shell[1] != '!' || + indir) { + error = ENOEXEC; + goto bad; + } + cp = &exdata.ex_shell[2]; /* skip "#!" */ + while (cp < &exdata.ex_shell[SHSIZE]) { + if (*cp == '\t') + *cp = ' '; + else if (*cp == '\n') { + *cp = '\0'; + break; + } + cp++; + } + if (*cp != '\0') { + error = ENOEXEC; + goto bad; + } + cp = &exdata.ex_shell[2]; + while (*cp == ' ') + cp++; + execnamep = cp; + while (*cp && *cp != ' ') + cp++; + cfarg[0] = '\0'; + cpnospace = cp; + if (*cp) { + *cp++ = '\0'; + while (*cp == ' ') + cp++; + if (*cp) + bcopy((caddr_t)cp, (caddr_t)cfarg, SHSIZE); + } + + /* + * Support for new app package launching for Mac OS X. + * We are about to retry the execve() by changing the path to the + * interpreter name. Need to re-initialize the savedpath and + * savedpathlen. +1 for NULL. + */ + savedpathlen = (cpnospace - execnamep + 1); + error = copystr(execnamep, savedpath, savedpathlen, &savedpathlen); + if (error) + goto bad; + + /* Save the name aside for future use */ + execargsp = (vm_offset_t *)((char *)(execargs) + savedpathlen); + + indir = 1; + vput(vp); + nd.ni_cnd.cn_nameiop = LOOKUP; + nd.ni_cnd.cn_flags = (nd.ni_cnd.cn_flags & HASBUF) | + (FOLLOW | LOCKLEAF | SAVENAME); + nd.ni_segflg = UIO_SYSSPACE; + nd.ni_dirp = execnamep; + if ((error = namei(&nd))) + goto bad1; + vp = nd.ni_vp; + VOP_LEASE(vp, p, cred, LEASE_READ); + if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, p))) + goto bad; + goto again; + } + + /* + * Collect arguments on "file" in swap space. + */ + na = 0; + ne = 0; + nc = 0; + cc = 0; + /* + * Support for new app package launching for Mac OS X allocates + * the "path" at the begining. + * execargs get allocated after that + */ + cp = (char *) execargsp; /* running pointer for copy */ + /* + * size of execargs less sizeof "path", + * a pointer to "path" and a NULL poiter + */ + cc = NCARGS - savedpathlen - 2*NBPW; + /* + * Copy arguments into file in argdev area. + */ + if (uap->argp) for (;;) { + ap = NULL; + sharg = NULL; + if (indir && na == 0) { + sharg = nd.ni_cnd.cn_nameptr; + ap = (int)sharg; + uap->argp++; /* ignore argv[0] */ + } else if (indir && (na == 1 && cfarg[0])) { + sharg = cfarg; + ap = (int)sharg; + } else if (indir && (na == 1 || (na == 2 && cfarg[0]))) + ap = (int)uap->fname; + else if (uap->argp) { + ap = fuword((caddr_t)uap->argp); + uap->argp++; + } + if (ap == NULL && uap->envp) { + uap->argp = NULL; + if ((ap = fuword((caddr_t)uap->envp)) != NULL) + uap->envp++, ne++; + } + if (ap == NULL) + break; + na++; + if (ap == -1) { + error = EFAULT; + break; + } + do { + if (nc >= (NCARGS - savedpathlen - 2*NBPW -1)) { + error = E2BIG; + break; + } + if (sharg) { + error = copystr(sharg, cp, (unsigned)cc, &len); + sharg += len; + } else { + error = copyinstr((caddr_t)ap, cp, (unsigned)cc, + &len); + ap += len; + } + cp += len; + nc += len; + cc -= len; + } while (error == ENAMETOOLONG); + if (error) { + goto bad; + } + } + nc = (nc + NBPW-1) & ~(NBPW-1); + + /* + * If we have a fat file, find "our" executable. + */ + if (is_fat) { + /* + * Look up our architecture in the fat file. + */ + lret = fatfile_getarch(vp, (vm_offset_t)fat_header, &fat_arch); + if (lret != LOAD_SUCCESS) { + error = load_return_to_errno(lret); + goto bad; + } + /* Read the Mach-O header out of it */ + error = vn_rdwr(UIO_READ, vp, (caddr_t)&exdata.mach_header, + sizeof (exdata.mach_header), + fat_arch.offset, + UIO_SYSSPACE, (IO_UNIT|IO_NODELOCKED), cred, &resid, p); + + if (error) { + goto bad; + } + + /* Did we read a complete header? */ + if (resid) { + error = EBADEXEC; + goto bad; + } + + /* Is what we found a Mach-O executable */ + if (mach_header->magic != MH_MAGIC) { + error = ENOEXEC; + goto bad; + } + + /* + * Load the Mach-O file. + */ + VOP_UNLOCK(vp, 0, p); + lret = load_machfile(vp, mach_header, fat_arch.offset, + fat_arch.size, &load_result); + } else { + /* + * Load the Mach-O file. + */ + VOP_UNLOCK(vp, 0, p); + lret = load_machfile(vp, mach_header, 0, + (u_long)vattr.va_size, &load_result); + } + + if (lret != LOAD_SUCCESS) { + error = load_return_to_errno(lret); + goto bad; + } + + /* load_machfile() maps the vnode */ + ubc_map(vp); + + /* + * deal with set[ug]id. + */ + p->p_flag &= ~P_SUGID; + if (((origvattr.va_mode & VSUID) != 0 && + p->p_ucred->cr_uid != origvattr.va_uid) + || (origvattr.va_mode & VSGID) != 0 && + p->p_ucred->cr_gid != origvattr.va_gid) { + p->p_ucred = crcopy(cred); +#if KTRACE + /* + * If process is being ktraced, turn off - unless + * root set it. + */ + if (p->p_tracep && !(p->p_traceflag & KTRFAC_ROOT)) { + vrele(p->p_tracep); + p->p_tracep = NULL; + p->p_traceflag = 0; + } +#endif + if (origvattr.va_mode & VSUID) + p->p_ucred->cr_uid = origvattr.va_uid; + if (origvattr.va_mode & VSGID) + p->p_ucred->cr_gid = origvattr.va_gid; + + set_security_token(p); + p->p_flag |= P_SUGID; + + /* Radar 2261856; setuid security hole fix */ + /* Patch from OpenBSD: A. Ramesh */ + /* + * XXX For setuid processes, attempt to ensure that + * stdin, stdout, and stderr are already allocated. + * We do not want userland to accidentally allocate + * descriptors in this range which has implied meaning + * to libc. + */ + for (i = 0; i < 3; i++) { + extern struct fileops vnops; + struct nameidata nd1; + struct file *fp; + int indx; + + if (p->p_fd->fd_ofiles[i] == NULL) { + if ((error = falloc(p, &fp, &indx)) != 0) + continue; + NDINIT(&nd1, LOOKUP, FOLLOW, UIO_SYSSPACE, + "/dev/null", p); + if ((error = vn_open(&nd1, FREAD, 0)) != 0) { + ffree(fp); + p->p_fd->fd_ofiles[indx] = NULL; + break; + } + fp->f_flag = FREAD; + fp->f_type = DTYPE_VNODE; + fp->f_ops = &vnops; + fp->f_data = (caddr_t)nd1.ni_vp; + VOP_UNLOCK(nd1.ni_vp, 0, p); + } + } + } + p->p_cred->p_svuid = p->p_ucred->cr_uid; + p->p_cred->p_svgid = p->p_ucred->cr_gid; + + if (p->p_flag & P_TRACED) { + psignal(p, SIGTRAP); +#ifdef BSD_USE_APC + thread_apc_set(current_act(), bsd_ast); +#else + ast_on(AST_BSD); +#endif + } + + if (error) { + goto bad; + } + VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, p); + vput(vp); + vp = NULL; + + if (load_result.unixproc && + create_unix_stack(current_map(), + load_result.user_stack, p)) { + error = load_return_to_errno(LOAD_NOSPACE); + goto bad; + } + + /* + * Copy back arglist if necessary. + */ + + ucp = p->user_stack; + if (load_result.unixproc) { + int pathptr; + + ucp = ucp - nc - NBPW; /* begining of the STRING AREA */ + + /* + * Support for new app package launching for Mac OS X allocates + * the "path" at the begining of the execargs buffer. + * copy it just before the string area. + */ + savedpathlen = (savedpathlen + NBPW-1) & ~(NBPW-1); + len = 0; + pathptr = ucp - savedpathlen; + error = copyoutstr(savedpath, (caddr_t)pathptr, + (unsigned)savedpathlen, &len); + if (error) + goto bad; + + /* Save a NULL pointer below it */ + (void) suword((caddr_t)(pathptr - NBPW), 0); + + /* Save the pointer to "path" just below it */ + (void) suword((caddr_t)(pathptr - 2*NBPW), pathptr); + + /* + * na includes arg[] and env[]. + * NBPW for 2 NULL one each ofter arg[argc -1] and env[n] + * NBPW for argc + * skip over saved path, NBPW for pointer to path, + * and NBPW for the NULL after pointer to path. + */ + ap = ucp - na*NBPW - 3*NBPW - savedpathlen - 2*NBPW; + uthread->uu_ar0[SP] = ap; + (void) suword((caddr_t)ap, na-ne); /* argc */ + nc = 0; + cc = 0; + + cp = (char *) execargsp; + cc = NCARGS - savedpathlen - 2*NBPW; + ps.ps_argvstr = (char *)ucp; /* first argv string */ + ps.ps_nargvstr = na - ne; /* argc */ + for (;;) { + ap += NBPW; + if (na == ne) { + (void) suword((caddr_t)ap, 0); + ap += NBPW; + ps.ps_envstr = (char *)ucp; + ps.ps_nenvstr = ne; + } + if (--na < 0) + break; + (void) suword((caddr_t)ap, ucp); + do { + error = copyoutstr(cp, (caddr_t)ucp, + (unsigned)cc, &len); + ucp += len; + cp += len; + nc += len; + cc -= len; + } while (error == ENAMETOOLONG); + if (error == EFAULT) + break; /* bad stack - user's problem */ + } + (void) suword((caddr_t)ap, 0); + } + + if (load_result.dynlinker) { + ap = uthread->uu_ar0[SP] -= 4; + (void) suword((caddr_t)ap, load_result.mach_header); + } + +#if defined(i386) || defined(ppc) + uthread->uu_ar0[PC] = load_result.entry_point; +#else +#error architecture not implemented! +#endif + + /* Stop profiling */ + stopprofclock(p); + + /* + * Reset signal state. + */ + execsigs(p); + + /* + * Close file descriptors + * which specify close-on-exec. + */ + fdexec(p); + /* FIXME: Till vmspace inherit is fixed: */ + if (p->vm_shm) + shmexit(p); + + /* + * Remember file name for accounting. + */ + p->p_acflag &= ~AFORK; + if (nd.ni_cnd.cn_namelen > MAXCOMLEN) + nd.ni_cnd.cn_namelen = MAXCOMLEN; + bcopy((caddr_t)nd.ni_cnd.cn_nameptr, (caddr_t)p->p_comm, + (unsigned)nd.ni_cnd.cn_namelen); + p->p_comm[nd.ni_cnd.cn_namelen] = '\0'; + + { + /* This is for kdebug */ + long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; + + /* Collect the pathname for tracing */ + kdbg_trace_string(p, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); + KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 2)) | DBG_FUNC_NONE, + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); + } + + /* + * mark as execed, wakeup the process that vforked (if any) and tell + * it that it now has it's own resources back + */ + p->p_flag |= P_EXEC; + if (p->p_pptr && (p->p_flag & P_PPWAIT)) { + p->p_flag &= ~P_PPWAIT; + wakeup((caddr_t)p->p_pptr); + } + +bad: + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + if (vp) + vput(vp); +bad1: +#if FIXME /* [ */ + if (execargs) + kmem_free_wakeup(bsd_pageable_map, execargs, NCARGS); +#else /* FIXME ][ */ + if (execargs) + kmem_free(bsd_pageable_map, execargs, NCARGS); +#endif /* FIXME ] */ + return(error); +} + + +#define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur) + +kern_return_t +create_unix_stack(map, user_stack, p) + vm_map_t map; + vm_offset_t user_stack; + struct proc *p; +{ + vm_size_t size; + vm_offset_t addr; + + p->user_stack = user_stack; + size = round_page(unix_stack_size(p)); +#if STACK_GROWTH_UP + /* stack always points to first address for stacks */ + addr = user_stack; +#else STACK_GROWTH_UP + addr = trunc_page(user_stack - size); +#endif /* STACK_GROWTH_UP */ + return (vm_allocate(map,&addr, size, FALSE)); +} + +#include + +char init_program_name[128] = "/sbin/mach_init\0"; + +char init_args[128] = ""; + +struct execve_args init_exec_args; +int init_attempts = 0; + + +void +load_init_program(p) + struct proc *p; +{ + vm_offset_t init_addr; + int *old_ap; + char *argv[3]; + int error; + register_t retval[2]; + struct uthread * ut; + + unix_master(); + + error = 0; + + /* init_args are copied in string form directly from bootstrap */ + + do { + if (boothowto & RB_INITNAME) { + printf("init program? "); +#if FIXME /* [ */ + gets(init_program_name, init_program_name); +#endif /* FIXME ] */ + } + + if (error && ((boothowto & RB_INITNAME) == 0) && + (init_attempts == 1)) { + static char other_init[] = "/etc/mach_init"; + printf("Load of %s, errno %d, trying %s\n", + init_program_name, error, other_init); + error = 0; + bcopy(other_init, init_program_name, + sizeof(other_init)); + } + + init_attempts++; + + if (error) { + printf("Load of %s failed, errno %d\n", + init_program_name, error); + error = 0; + boothowto |= RB_INITNAME; + continue; + } + + /* + * Copy out program name. + */ + + init_addr = VM_MIN_ADDRESS; + (void) vm_allocate(current_map(), &init_addr, + PAGE_SIZE, TRUE); + if (init_addr == 0) + init_addr++; + (void) copyout((caddr_t) init_program_name, + (caddr_t) (init_addr), + (unsigned) sizeof(init_program_name)+1); + + argv[0] = (char *) init_addr; + init_addr += sizeof(init_program_name); + init_addr = (vm_offset_t)ROUND_PTR(char, init_addr); + + /* + * Put out first (and only) argument, similarly. + * Assumes everything fits in a page as allocated + * above. + */ + + (void) copyout((caddr_t) init_args, + (caddr_t) (init_addr), + (unsigned) sizeof(init_args)); + + argv[1] = (char *) init_addr; + init_addr += sizeof(init_args); + init_addr = (vm_offset_t)ROUND_PTR(char, init_addr); + + /* + * Null-end the argument list + */ + + argv[2] = (char *) 0; + + /* + * Copy out the argument list. + */ + + (void) copyout((caddr_t) argv, + (caddr_t) (init_addr), + (unsigned) sizeof(argv)); + + /* + * Set up argument block for fake call to execve. + */ + + init_exec_args.fname = argv[0]; + init_exec_args.argp = (char **) init_addr; + init_exec_args.envp = 0; + + /* So that mach_init task + * is set with uid,gid 0 token + */ + set_security_token(p); + + error = execve(p,&init_exec_args,retval); + } while (error); + + unix_release(); +} + +/* + * Convert a load_return_t to an errno. + */ +static int +load_return_to_errno(load_return_t lrtn) +{ + switch (lrtn) { + case LOAD_SUCCESS: + return 0; + case LOAD_BADARCH: + return EBADARCH; + case LOAD_BADMACHO: + return EBADMACHO; + case LOAD_SHLIB: + return ESHLIBVERS; + case LOAD_NOSPACE: + return ENOMEM; + case LOAD_PROTECT: + return EACCES; + case LOAD_RESOURCE: + case LOAD_FAILURE: + default: + return EBADEXEC; + } +} + +/* + * exec_check_access() + */ +int +check_exec_access(p, vp, vap) + struct proc *p; + struct vnode *vp; + struct vattr *vap; +{ + int flag; + int error; + + if (error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) + return (error); + flag = p->p_flag; + if (flag & P_TRACED) { + if (error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) + return (error); + } + if (vp->v_type != VREG || + (vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) + return (EACCES); + return (0); +} + diff --git a/bsd/kern/kern_exit.c b/bsd/kern/kern_exit.c new file mode 100644 index 000000000..385c63711 --- /dev/null +++ b/bsd/kern/kern_exit.c @@ -0,0 +1,721 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_exit.c 8.7 (Berkeley) 2/12/94 + */ + +#include +#include + +#include "compat_43.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +extern char init_task_failure_data[]; +void exit1 __P((struct proc *, int)); + +/* + * exit -- + * Death of process. + */ +struct exit_args { + int rval; +}; +void +exit(p, uap, retval) + struct proc *p; + struct exit_args *uap; + int *retval; +{ + exit1(p, W_EXITCODE(uap->rval, 0)); + + /* drop funnel befewo we return */ + thread_funnel_set(kernel_flock, FALSE); + thread_exception_return(); + /* NOTREACHED */ + while (TRUE) + thread_block(0); + /* NOTREACHED */ +} + +/* + * Exit: deallocate address space and other resources, change proc state + * to zombie, and unlink proc from allproc and parent's lists. Save exit + * status and rusage for wait(). Check for child processes and orphan them. + */ +void +exit1(p, rv) + register struct proc *p; + int rv; +{ + register struct proc *q, *nq; + thread_t self = current_thread(); + thread_act_t th_act_self = current_act(); + struct task *task = p->task; + register int i,s; + struct uthread *ut; + + /* + * If a thread in this task has already + * called exit(), then halt any others + * right here. + */ + signal_lock(p); + while (p->exit_thread != self) { + if (sig_try_locked(p) <= 0) { + if (get_threadtask(th_act_self) != task) { + signal_unlock(p); + return; + } + signal_unlock(p); + thread_terminate(th_act_self); + thread_funnel_set(kernel_flock, FALSE); + thread_exception_return(); + /* NOTREACHED */ + } + sig_lock_to_exit(p); + } + signal_unlock(p); + if (p->p_pid == 1) { + printf("pid 1 exited (signal %d, exit %d)", + WTERMSIG(rv), WEXITSTATUS(rv)); + panic("init died\nState at Last Exception:\n\n%s", + init_task_failure_data); + } + + s = splsched(); + p->p_flag |= P_WEXIT; + splx(s); + proc_prepareexit(p); + p->p_xstat = rv; + + /* task terminate will call proc_terminate and that cleans it up */ + task_terminate_internal(task); + + /* + * we come back and returns to AST which + * should cleanup the rest + */ +#if 0 + if (task == current_task()) { + thread_exception_return(); + /*NOTREACHED*/ + } + + while (task == current_task()) { + thread_terminate_self(); + /*NOTREACHED*/ + } +#endif +} + +void +proc_prepareexit(struct proc *p) +{ + int s; + struct uthread *ut; + thread_t self = current_thread(); + thread_act_t th_act_self = current_act(); + + + /* + * Remove proc from allproc queue and from pidhash chain. + * Need to do this before we do anything that can block. + * Not doing causes things like mount() find this on allproc + * in partially cleaned state. + */ + LIST_REMOVE(p, p_list); + LIST_REMOVE(p, p_hash); + +#ifdef PGINPROF + vmsizmon(); +#endif + /* + * If parent is waiting for us to exit or exec, + * P_PPWAIT is set; we will wakeup the parent below. + */ + p->p_flag &= ~(P_TRACED | P_PPWAIT); + p->p_sigignore = ~0; + p->p_siglist = 0; + ut = get_bsdthread_info(th_act_self); + ut->uu_sig = 0; + untimeout(realitexpire, (caddr_t)p); + +} + +void +proc_exit(struct proc *p) +{ + register struct proc *q, *nq; + thread_t self = current_thread(); + thread_act_t th_act_self = current_act(); + struct task *task = p->task; + register int i,s; + struct uthread *ut; + boolean_t funnel_state; + + /* This can happen if thread_terminate of the single thread + * process + */ + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + if( !(p->p_flag & P_WEXIT)) { + s = splsched(); + p->p_flag |= P_WEXIT; + splx(s); + proc_prepareexit(p); + } + + MALLOC_ZONE(p->p_ru, struct rusage *, + sizeof (*p->p_ru), M_ZOMBIE, M_WAITOK); + + /* + * Close open files and release open-file table. + * This may block! + */ + fdfree(p); + + /* Close ref SYSV Shared memory*/ + if (p->vm_shm) + shmexit(p); + + if (SESS_LEADER(p)) { + register struct session *sp = p->p_session; + + if (sp->s_ttyvp) { + /* + * Controlling process. + * Signal foreground pgrp, + * drain controlling terminal + * and revoke access to controlling terminal. + */ + if (sp->s_ttyp->t_session == sp) { + if (sp->s_ttyp->t_pgrp) + pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1); + (void) ttywait(sp->s_ttyp); + /* + * The tty could have been revoked + * if we blocked. + */ + if (sp->s_ttyvp) + VOP_REVOKE(sp->s_ttyvp, REVOKEALL); + } + if (sp->s_ttyvp) + vrele(sp->s_ttyvp); + sp->s_ttyvp = NULL; + /* + * s_ttyp is not zero'd; we use this to indicate + * that the session once had a controlling terminal. + * (for logging and informational purposes) + */ + } + sp->s_leader = NULL; + } + + fixjobc(p, p->p_pgrp, 0); + p->p_rlimit[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; +#if KTRACE + /* + * release trace file + */ + p->p_traceflag = 0; /* don't trace the vrele() */ + if (p->p_tracep) + vrele(p->p_tracep); +#endif + + + q = p->p_children.lh_first; + if (q) /* only need this if any child is S_ZOMB */ + wakeup((caddr_t) initproc); + for (; q != 0; q = nq) { + nq = q->p_sibling.le_next; + proc_reparent(q, initproc); + /* + * Traced processes are killed + * since their existence means someone is messing up. + */ + if (q->p_flag & P_TRACED) { + q->p_flag &= ~P_TRACED; + if (q->sigwait_thread) { + thread_t sig_shuttle = getshuttle_thread(q->sigwait_thread); + /* + * The sigwait_thread could be stopped at a + * breakpoint. Wake it up to kill. + * Need to do this as it could be a thread which is not + * the first thread in the task. So any attempts to kill + * the process would result into a deadlock on q->sigwait. + */ + thread_resume((struct thread *)q->sigwait_thread); + clear_wait(sig_shuttle, THREAD_INTERRUPTED); + threadsignal(q->sigwait_thread, SIGKILL, 0); + } + psignal(q, SIGKILL); + } + } + + + /* + * Save exit status and final rusage info, adding in child rusage + * info and self times. + */ + *p->p_ru = p->p_stats->p_ru; + + timerclear(&p->p_ru->ru_utime); + timerclear(&p->p_ru->ru_stime); + + if (task) { + task_basic_info_data_t tinfo; + task_thread_times_info_data_t ttimesinfo; + int task_info_stuff, task_ttimes_stuff; + struct timeval ut,st; + + task_info_stuff = TASK_BASIC_INFO_COUNT; + task_info(task, TASK_BASIC_INFO, + &tinfo, &task_info_stuff); + p->p_ru->ru_utime.tv_sec = tinfo.user_time.seconds; + p->p_ru->ru_utime.tv_usec = tinfo.user_time.microseconds; + p->p_ru->ru_stime.tv_sec = tinfo.system_time.seconds; + p->p_ru->ru_stime.tv_usec = tinfo.system_time.microseconds; + + task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; + task_info(task, TASK_THREAD_TIMES_INFO, + &ttimesinfo, &task_ttimes_stuff); + + ut.tv_sec = ttimesinfo.user_time.seconds; + ut.tv_usec = ttimesinfo.user_time.microseconds; + st.tv_sec = ttimesinfo.system_time.seconds; + st.tv_usec = ttimesinfo.system_time.microseconds; + timeradd(&ut,&p->p_ru->ru_utime,&p->p_ru->ru_utime); + timeradd(&st,&p->p_ru->ru_stime,&p->p_ru->ru_stime); + } + + + ruadd(p->p_ru, &p->p_stats->p_cru); + + /* + * Free up profiling buffers. + */ + { + struct uprof *p0 = &p->p_stats->p_prof, *p1, *pn; + + p1 = p0->pr_next; + p0->pr_next = NULL; + p0->pr_scale = 0; + + for (; p1 != NULL; p1 = pn) { + pn = p1->pr_next; + kfree((vm_offset_t)p1, sizeof *p1); + } + } + + /* + * Other substructures are freed from wait(). + */ + FREE_ZONE(p->p_stats, sizeof *p->p_stats, M_SUBPROC); + p->p_stats = NULL; + + FREE_ZONE(p->p_sigacts, sizeof *p->p_sigacts, M_SUBPROC); + p->p_sigacts = NULL; + + if (--p->p_limit->p_refcnt == 0) + FREE_ZONE(p->p_limit, sizeof *p->p_limit, M_SUBPROC); + p->p_limit = NULL; + + /* + * Finish up by terminating the task + * and halt this thread (only if a + * member of the task exiting). + */ + p->task = TASK_NULL; + //task->proc = NULL; + set_bsdtask_info(task, NULL); + + /* + * Notify parent that we're gone. + */ + psignal(p->p_pptr, SIGCHLD); + + /* Place onto zombproc. */ + LIST_INSERT_HEAD(&zombproc, p, p_list); + p->p_stat = SZOMB; + + /* and now wakeup the parent */ + wakeup((caddr_t)p->p_pptr); + + (void) thread_funnel_set(kernel_flock, funnel_state); +} + + +struct wait4_args { + int pid; + int *status; + int options; + struct rusage *rusage; +}; + +#if COMPAT_43 +int +owait(p, uap, retval) + struct proc *p; + void *uap; + int *retval; +{ + struct wait4_args *a; + + a = (struct wait4_args *)get_bsduthreadarg(current_act()); + + a->options = 0; + a->rusage = NULL; + a->pid = WAIT_ANY; + a->status = NULL; + return (wait1(p, a, retval, 1)); +} + +int +wait4(p, uap, retval) + struct proc *p; + struct wait4_args *uap; + int *retval; +{ + + return (wait1(p, uap, retval, 0)); +} + +struct owait3_args { + int *status; + int options; + struct rusage *rusage; +}; + +int +owait3(p, uap, retval) + struct proc *p; + struct owait3_args *uap; + int *retval; +{ + struct wait4_args *a; + + a = (struct wait4_args *)get_bsduthreadarg(current_act); + + a->rusage = uap->rusage; + a->options = uap->options; + a->status = uap->status; + a->pid = WAIT_ANY; + + return (wait1(p, a, retval, 1)); +} + +#else +#define wait1 wait4 +#endif + +int +wait1continue(result) +{ + void *vt; + thread_act_t thread; + struct uthread *ut; + int *retval; + struct proc *p; + + p = get_bsdtask_info(current_task()); + p->p_flag &= ~P_WAITING; + + if (result != 0) { + return(result); + } + + thread = current_act(); + ut = get_bsdthread_info(thread); + vt = get_bsduthreadarg(thread); + retval = get_bsduthreadrval(thread); + wait1((struct proc *)p, (struct wait4_args *)vt, retval, 0); +} + +int +wait1(q, uap, retval, compat) + register struct proc *q; + register struct wait4_args *uap; + register_t *retval; +#if COMPAT_43 + int compat; +#endif +{ + register int nfound; + register struct proc *p, *t; + int status, error; + + +#if 0 + /* since we are funneled we don't need to do this atomically, yet */ + if (q->p_flag & P_WAITING) { + return(EINVAL); + } + q->p_flag |= P_WAITING; /* only allow single thread to wait() */ +#endif + + if (uap->pid == 0) + uap->pid = -q->p_pgid; + +loop: + nfound = 0; + for (p = q->p_children.lh_first; p != 0; p = p->p_sibling.le_next) { + if (uap->pid != WAIT_ANY && + p->p_pid != uap->pid && + p->p_pgid != -(uap->pid)) + continue; + nfound++; + if (p->p_stat == SZOMB) { + retval[0] = p->p_pid; +#if COMPAT_43 + if (compat) + retval[1] = p->p_xstat; + else +#endif + if (uap->status) { + status = p->p_xstat; /* convert to int */ + if (error = copyout((caddr_t)&status, + (caddr_t)uap->status, + sizeof(status))) { + q->p_flag &= ~P_WAITING; + return (error); + } + } + if (uap->rusage && + (error = copyout((caddr_t)p->p_ru, + (caddr_t)uap->rusage, + sizeof (struct rusage)))) { + q->p_flag &= ~P_WAITING; + return (error); + } + /* + * If we got the child via a ptrace 'attach', + * we need to give it back to the old parent. + */ + if (p->p_oppid && (t = pfind(p->p_oppid))) { + p->p_oppid = 0; + proc_reparent(p, t); + psignal(t, SIGCHLD); + wakeup((caddr_t)t); + q->p_flag &= ~P_WAITING; + return (0); + } + p->p_xstat = 0; + if (p->p_ru) { + ruadd(&q->p_stats->p_cru, p->p_ru); + FREE_ZONE(p->p_ru, sizeof *p->p_ru, M_ZOMBIE); + p->p_ru = NULL; + } else { + printf("Warning : lost p_ru for %s\n", p->p_comm); + } + + /* + * Decrement the count of procs running with this uid. + */ + (void)chgproccnt(p->p_cred->p_ruid, -1); + + /* + * Free up credentials. + */ + if (--p->p_cred->p_refcnt == 0) { + struct ucred *ucr = p->p_ucred; + struct pcred *pcr; + + if (ucr != NOCRED) { + p->p_ucred = NOCRED; + crfree(ucr); + } + pcr = p->p_cred; + p->p_cred = NULL; + FREE_ZONE(pcr, sizeof *pcr, M_SUBPROC); + } + + /* + * Release reference to text vnode + */ + if (p->p_textvp) + vrele(p->p_textvp); + + /* + * Finally finished with old proc entry. + * Unlink it from its process group and free it. + */ + leavepgrp(p); + LIST_REMOVE(p, p_list); /* off zombproc */ + LIST_REMOVE(p, p_sibling); + FREE_ZONE(p, sizeof *p, M_PROC); + nprocs--; + q->p_flag &= ~P_WAITING; + return (0); + } + if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && + (p->p_flag & P_TRACED || uap->options & WUNTRACED)) { + p->p_flag |= P_WAITED; + retval[0] = p->p_pid; +#if COMPAT_43 + if (compat) { + retval[1] = W_STOPCODE(p->p_xstat); + error = 0; + } else +#endif + if (uap->status) { + status = W_STOPCODE(p->p_xstat); + error = copyout((caddr_t)&status, + (caddr_t)uap->status, + sizeof(status)); + } else + error = 0; + q->p_flag &= ~P_WAITING; + return (error); + } + } + if (nfound == 0) { + q->p_flag &= ~P_WAITING; + return (ECHILD); + } + if (uap->options & WNOHANG) { + retval[0] = 0; + q->p_flag &= ~P_WAITING; + return (0); + } + + if (error = tsleep0((caddr_t)q, PWAIT | PCATCH, "wait", 0, wait1continue)) { + q->p_flag &= ~P_WAITING; + return (error); + } + goto loop; +} + +/* + * make process 'parent' the new parent of process 'child'. + */ +void +proc_reparent(child, parent) + register struct proc *child; + register struct proc *parent; +{ + + if (child->p_pptr == parent) + return; + + LIST_REMOVE(child, p_sibling); + LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); + child->p_pptr = parent; +} + +kern_return_t +init_process(void) +/* + * Make the current process an "init" process, meaning + * that it doesn't have a parent, and that it won't be + * gunned down by kill(-1, 0). + */ +{ + register struct proc *p = current_proc(); + + if (suser(p->p_ucred, &p->p_acflag)) + return(KERN_NO_ACCESS); + + if (p->p_pid != 1 && p->p_pgid != p->p_pid) + enterpgrp(p, p->p_pid, 0); + p->p_flag |= P_SYSTEM; + + /* + * Take us out of the sibling chain, and + * out of our parent's child chain. + */ + LIST_REMOVE(p, p_sibling); + p->p_sibling.le_prev = NULL; + p->p_sibling.le_next = NULL; + p->p_pptr = kernproc; + + return(KERN_SUCCESS); +} + +void +process_terminate_self(void) +{ + struct proc *p = current_proc(); + + if (p != NULL) { + exit1(p, W_EXITCODE(0, SIGKILL)); + /*NOTREACHED*/ + } +} diff --git a/bsd/kern/kern_fork.c b/bsd/kern/kern_fork.c new file mode 100644 index 000000000..a11175f7b --- /dev/null +++ b/bsd/kern/kern_fork.c @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_fork.c 8.8 (Berkeley) 2/14/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +thread_t cloneproc(struct proc *, int); +thread_t procdup(); + +#define DOFORK 0x1 /* fork() system call */ +#define DOVFORK 0x2 /* vfork() system call */ +static int fork1(struct proc *, long, register_t *); + +/* + * fork system call. + */ +int +fork(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + return (fork1(p, (long)DOFORK, retval)); +} + +/* + * vfork system call + */ +int +vfork(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + return (fork1(p, (long)DOVFORK, retval)); +} + +static int +fork1(p1, flags, retval) + struct proc *p1; + long flags; + register_t *retval; +{ + register struct proc *p2; + register uid_t uid; + thread_t newth, self = current_thread(); + int s, count; + task_t t; + + /* + * Although process entries are dynamically created, we still keep + * a global limit on the maximum number we will create. Don't allow + * a nonprivileged user to use the last process; don't let root + * exceed the limit. The variable nprocs is the current number of + * processes, maxproc is the limit. + */ + uid = p1->p_cred->p_ruid; + if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) { + tablefull("proc"); + retval[1] = 0; + return (EAGAIN); + } + + /* + * Increment the count of procs running with this uid. Don't allow + * a nonprivileged user to exceed their current limit. + */ + count = chgproccnt(uid, 1); + if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) { + (void)chgproccnt(uid, -1); + return (EAGAIN); + } + + /* The newly created process comes with signal lock held */ + newth = cloneproc(p1, 1); + thread_dup(current_act(), newth); + /* p2 = newth->task->proc; */ + p2 = (struct proc *)(get_bsdtask_info(get_threadtask(newth))); + + thread_set_child(newth, p2->p_pid); + + s = splhigh(); + p2->p_stats->p_start = time; + splx(s); + p2->p_acflag = AFORK; + + /* + * Preserve synchronization semantics of vfork. If waiting for + * child to exec or exit, set P_PPWAIT on child, and sleep on our + * proc (in case of exit). + */ + if (flags == DOVFORK) + p2->p_flag |= P_PPWAIT; + /* drop the signal lock on the child */ + signal_unlock(p2); + + (void) thread_resume(newth); + + /* drop the extra references we got during the creation */ + if (t = get_threadtask(newth)) { + task_deallocate(t); + } + act_deallocate(newth); + + while (p2->p_flag & P_PPWAIT) + tsleep(p1, PWAIT, "ppwait", 0); + + retval[0] = p2->p_pid; + retval[1] = 0; /* mark parent */ + + return (0); +} + +/* + * cloneproc() + * + * Create a new process from a specified process. + * On return newly created child process has signal + * lock held to block delivery of signal to it if called with + * lock set. fork() code needs to explicity remove this lock + * before signals can be delivered + */ +thread_t +cloneproc(p1, lock) + register struct proc *p1; + register int lock; +{ + register struct proc *p2, *newproc; + static int nextpid = 0, pidchecked = 0; + thread_t th; + + /* Allocate new proc. */ + MALLOC_ZONE(newproc, struct proc *, + sizeof *newproc, M_PROC, M_WAITOK); + MALLOC_ZONE(newproc->p_cred, struct pcred *, + sizeof *newproc->p_cred, M_SUBPROC, M_WAITOK); + MALLOC_ZONE(newproc->p_stats, struct pstats *, + sizeof *newproc->p_stats, M_SUBPROC, M_WAITOK); + MALLOC_ZONE(newproc->p_sigacts, struct sigacts *, + sizeof *newproc->p_sigacts, M_SUBPROC, M_WAITOK); + + /* + * Find an unused process ID. We remember a range of unused IDs + * ready to use (from nextpid+1 through pidchecked-1). + */ + nextpid++; +retry: + /* + * If the process ID prototype has wrapped around, + * restart somewhat above 0, as the low-numbered procs + * tend to include daemons that don't exit. + */ + if (nextpid >= PID_MAX) { + nextpid = 100; + pidchecked = 0; + } + if (nextpid >= pidchecked) { + int doingzomb = 0; + + pidchecked = PID_MAX; + /* + * Scan the active and zombie procs to check whether this pid + * is in use. Remember the lowest pid that's greater + * than nextpid, so we can avoid checking for a while. + */ + p2 = allproc.lh_first; +again: + for (; p2 != 0; p2 = p2->p_list.le_next) { + while (p2->p_pid == nextpid || + p2->p_pgrp->pg_id == nextpid) { + nextpid++; + if (nextpid >= pidchecked) + goto retry; + } + if (p2->p_pid > nextpid && pidchecked > p2->p_pid) + pidchecked = p2->p_pid; + if (p2->p_pgrp && p2->p_pgrp->pg_id > nextpid && + pidchecked > p2->p_pgrp->pg_id) + pidchecked = p2->p_pgrp->pg_id; + } + if (!doingzomb) { + doingzomb = 1; + p2 = zombproc.lh_first; + goto again; + } + } + + nprocs++; + p2 = newproc; + p2->p_stat = SIDL; + p2->p_pid = nextpid; + + /* + * Make a proc table entry for the new process. + * Start by zeroing the section of proc that is zero-initialized, + * then copy the section that is copied directly from the parent. + */ + bzero(&p2->p_startzero, + (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero)); + bcopy(&p1->p_startcopy, &p2->p_startcopy, + (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy)); + p2->vm_shm = (void *)NULL; /* Make sure it is zero */ + + /* + * Duplicate sub-structures as needed. + * Increase reference counts on shared objects. + * The p_stats and p_sigacts substructs are set in vm_fork. + */ + p2->p_flag = P_INMEM; + if (p1->p_flag & P_PROFIL) + startprofclock(p2); + bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); + p2->p_cred->p_refcnt = 1; + crhold(p1->p_ucred); + lockinit(&p2->p_cred->pc_lock, PLOCK, "proc cred", 0, 0); + + /* bump references to the text vnode (for procfs) */ + p2->p_textvp = p1->p_textvp; + if (p2->p_textvp) + VREF(p2->p_textvp); + + p2->p_fd = fdcopy(p1); + if (p1->vm_shm) { + shmfork(p1,p2); + } + /* + * If p_limit is still copy-on-write, bump refcnt, + * otherwise get a copy that won't be modified. + * (If PL_SHAREMOD is clear, the structure is shared + * copy-on-write.) + */ + if (p1->p_limit->p_lflags & PL_SHAREMOD) + p2->p_limit = limcopy(p1->p_limit); + else { + p2->p_limit = p1->p_limit; + p2->p_limit->p_refcnt++; + } + + bzero(&p2->p_stats->pstat_startzero, + (unsigned) ((caddr_t)&p2->p_stats->pstat_endzero - + (caddr_t)&p2->p_stats->pstat_startzero)); + bcopy(&p1->p_stats->pstat_startcopy, &p2->p_stats->pstat_startcopy, + ((caddr_t)&p2->p_stats->pstat_endcopy - + (caddr_t)&p2->p_stats->pstat_startcopy)); + + if (p1->p_sigacts != NULL) + (void)memcpy(p2->p_sigacts, + p1->p_sigacts, sizeof *p2->p_sigacts); + else + (void)memset(p2->p_sigacts, 0, sizeof *p2->p_sigacts); + + if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT) + p2->p_flag |= P_CONTROLT; + + p2->p_xstat = 0; + p2->p_ru = NULL; + + p2->p_debugger = 0; /* don't inherit */ + lockinit(&p2->signal_lock, PVM, "signal", 0, 0); + /* block all signals to reach the process */ + if (lock) + signal_lock(p2); + p2->sigwait = FALSE; + p2->sigwait_thread = NULL; + p2->exit_thread = NULL; + p2->user_stack = p1->user_stack; + p2->p_sigpending = 0; + +#if KTRACE + /* + * Copy traceflag and tracefile if enabled. + * If not inherited, these were zeroed above. + */ + if (p1->p_traceflag&KTRFAC_INHERIT) { + p2->p_traceflag = p1->p_traceflag; + if ((p2->p_tracep = p1->p_tracep) != NULL) + VREF(p2->p_tracep); + } +#endif + + th = procdup(p2, p1); /* child, parent */ + LIST_INSERT_AFTER(p1, p2, p_pglist); + p2->p_pptr = p1; + LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling); + LIST_INIT(&p2->p_children); + LIST_INSERT_HEAD(&allproc, p2, p_list); + LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); + TAILQ_INIT(&p2->p_evlist); + /* + * Make child runnable, set start time. + */ + p2->p_stat = SRUN; + + return(th); +} + +#include + +struct zone *uthread_zone; +int uthread_zone_inited = 0; + +void +uthread_zone_init() +{ + if (!uthread_zone_inited) { + uthread_zone = zinit(sizeof(struct uthread), + THREAD_MAX * sizeof(struct uthread), + THREAD_CHUNK * sizeof(struct uthread), + "uthreads"); + uthread_zone_inited = 1; + } +} + +void * +uthread_alloc(void) +{ + void *ut; + + if (!uthread_zone_inited) + uthread_zone_init(); + + ut = (void *)zalloc(uthread_zone); + bzero(ut, sizeof(struct uthread)); + return (ut); +} + +void +uthread_free(void *uthread) +{ + struct _select *sel; + struct uthread *uth = (struct uthread *)uthread; + + sel = &uth->uu_state.ss_select; + /* cleanup the select bit space */ + if (sel->nbytes) { + FREE(sel->ibits, M_TEMP); + FREE(sel->obits, M_TEMP); + } + + /* and free the uthread itself */ + zfree(uthread_zone, (vm_offset_t)uthread); +} diff --git a/bsd/kern/kern_ktrace.c b/bsd/kern/kern_ktrace.c new file mode 100644 index 000000000..c88bbf81a --- /dev/null +++ b/bsd/kern/kern_ktrace.c @@ -0,0 +1,501 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if KTRACE + +struct ktr_header * +ktrgetheader(type) + int type; +{ + register struct ktr_header *kth; + struct proc *p = current_proc(); /* XXX */ + + MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header), + M_TEMP, M_WAITOK); + kth->ktr_type = type; + microtime(&kth->ktr_time); + kth->ktr_pid = p->p_pid; + bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN); + return (kth); +} + +void +ktrsyscall(vp, code, argsize, args) + struct vnode *vp; + register_t code; + size_t argsize; + register_t args[]; +{ + struct ktr_header *kth; + struct ktr_syscall *ktp; + register len = sizeof(struct ktr_syscall) + argsize; + struct proc *p = current_proc(); /* XXX */ + register_t *argp; + int i; + + p->p_traceflag |= KTRFAC_ACTIVE; + kth = ktrgetheader(KTR_SYSCALL); + MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK); + ktp->ktr_code = code; + ktp->ktr_argsize = argsize; + argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall)); + for (i = 0; i < (argsize / sizeof *argp); i++) + *argp++ = args[i]; + kth->ktr_buf = (caddr_t)ktp; + kth->ktr_len = len; + ktrwrite(vp, kth); + FREE(ktp, M_TEMP); + FREE(kth, M_TEMP); + p->p_traceflag &= ~KTRFAC_ACTIVE; +} + +void +ktrsysret(vp, code, error, retval) + struct vnode *vp; + register_t code; + int error; + register_t retval; +{ + struct ktr_header *kth; + struct ktr_sysret ktp; + struct proc *p = current_proc(); /* XXX */ + + p->p_traceflag |= KTRFAC_ACTIVE; + kth = ktrgetheader(KTR_SYSRET); + ktp.ktr_code = code; + ktp.ktr_error = error; + ktp.ktr_retval = retval; /* what about val2 ? */ + + kth->ktr_buf = (caddr_t)&ktp; + kth->ktr_len = sizeof(struct ktr_sysret); + + ktrwrite(vp, kth); + FREE(kth, M_TEMP); + p->p_traceflag &= ~KTRFAC_ACTIVE; +} + +void +ktrnamei(vp, path) + struct vnode *vp; + char *path; +{ + struct ktr_header *kth; + struct proc *p = current_proc(); /* XXX */ + + p->p_traceflag |= KTRFAC_ACTIVE; + kth = ktrgetheader(KTR_NAMEI); + kth->ktr_len = strlen(path); + kth->ktr_buf = path; + + ktrwrite(vp, kth); + FREE(kth, M_TEMP); + p->p_traceflag &= ~KTRFAC_ACTIVE; +} + +void +ktrgenio(vp, fd, rw, iov, len, error) + struct vnode *vp; + int fd; + enum uio_rw rw; + register struct iovec *iov; + int len, error; +{ + struct ktr_header *kth; + register struct ktr_genio *ktp; + register caddr_t cp; + register int resid = len, cnt; + struct proc *p = current_proc(); /* XXX */ + + if (error) + return; + p->p_traceflag |= KTRFAC_ACTIVE; + kth = ktrgetheader(KTR_GENIO); + MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len, + M_TEMP, M_WAITOK); + ktp->ktr_fd = fd; + ktp->ktr_rw = rw; + cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio)); + while (resid > 0) { + if ((cnt = iov->iov_len) > resid) + cnt = resid; + if (copyin(iov->iov_base, cp, (unsigned)cnt)) + goto done; + cp += cnt; + resid -= cnt; + iov++; + } + kth->ktr_buf = (caddr_t)ktp; + kth->ktr_len = sizeof (struct ktr_genio) + len; + + ktrwrite(vp, kth); +done: + FREE(kth, M_TEMP); + FREE(ktp, M_TEMP); + p->p_traceflag &= ~KTRFAC_ACTIVE; +} + +void +ktrpsig(vp, sig, action, mask, code) + struct vnode *vp; + int sig; + sig_t action; + int mask, code; +{ + struct ktr_header *kth; + struct ktr_psig kp; + struct proc *p = current_proc(); /* XXX */ + + p->p_traceflag |= KTRFAC_ACTIVE; + kth = ktrgetheader(KTR_PSIG); + kp.signo = (char)sig; + kp.action = action; + kp.mask = mask; + kp.code = code; + kth->ktr_buf = (caddr_t)&kp; + kth->ktr_len = sizeof (struct ktr_psig); + + ktrwrite(vp, kth); + FREE(kth, M_TEMP); + p->p_traceflag &= ~KTRFAC_ACTIVE; +} + +void +ktrcsw(vp, out, user) + struct vnode *vp; + int out, user; +{ + struct ktr_header *kth; + struct ktr_csw kc; + struct proc *p = current_proc(); /* XXX */ + + p->p_traceflag |= KTRFAC_ACTIVE; + kth = ktrgetheader(KTR_CSW); + kc.out = out; + kc.user = user; + kth->ktr_buf = (caddr_t)&kc; + kth->ktr_len = sizeof (struct ktr_csw); + + ktrwrite(vp, kth); + FREE(kth, M_TEMP); + p->p_traceflag &= ~KTRFAC_ACTIVE; +} + +/* Interface and common routines */ + +/* + * ktrace system call + */ +struct ktrace_args { + char * fname; + int ops; + int facs; + int pid; +}; +/* ARGSUSED */ +int +ktrace(curp, uap, retval) + struct proc *curp; + register struct ktrace_args *uap; + register_t *retval; +{ + register struct vnode *vp = NULL; + register struct proc *p; + struct pgrp *pg; + int facs = SCARG(uap, facs) & ~KTRFAC_ROOT; + int ops = KTROP(SCARG(uap, ops)); + int descend = SCARG(uap, ops) & KTRFLAG_DESCEND; + int ret = 0; + int error = 0; + struct nameidata nd; + + curp->p_traceflag |= KTRFAC_ACTIVE; + if (ops != KTROP_CLEAR) { + /* + * an operation which requires a file argument. + */ + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname), + curp); + if (error = vn_open(&nd, FREAD|FWRITE, 0)) { + curp->p_traceflag &= ~KTRFAC_ACTIVE; + return (error); + } + vp = nd.ni_vp; + VOP_UNLOCK(vp, 0, p); + if (vp->v_type != VREG) { + (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); + curp->p_traceflag &= ~KTRFAC_ACTIVE; + return (EACCES); + } + } + /* + * Clear all uses of the tracefile + */ + if (ops == KTROP_CLEARFILE) { + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + if (p->p_tracep == vp) { + if (ktrcanset(curp, p)) { + p->p_tracep = NULL; + p->p_traceflag = 0; + (void) vn_close(vp, FREAD|FWRITE, + p->p_ucred, p); + } else + error = EPERM; + } + } + goto done; + } + /* + * need something to (un)trace (XXX - why is this here?) + */ + if (!facs) { + error = EINVAL; + goto done; + } + /* + * do it + */ + if (SCARG(uap, pid) < 0) { + /* + * by process group + */ + pg = pgfind(-SCARG(uap, pid)); + if (pg == NULL) { + error = ESRCH; + goto done; + } + for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) + if (descend) + ret |= ktrsetchildren(curp, p, ops, facs, vp); + else + ret |= ktrops(curp, p, ops, facs, vp); + + } else { + /* + * by pid + */ + p = pfind(SCARG(uap, pid)); + if (p == NULL) { + error = ESRCH; + goto done; + } + if (descend) + ret |= ktrsetchildren(curp, p, ops, facs, vp); + else + ret |= ktrops(curp, p, ops, facs, vp); + } + if (!ret) + error = EPERM; +done: + if (vp != NULL) + (void) vn_close(vp, FWRITE, curp->p_ucred, curp); + curp->p_traceflag &= ~KTRFAC_ACTIVE; + return (error); +} + +int +ktrops(curp, p, ops, facs, vp) + struct proc *p, *curp; + int ops, facs; + struct vnode *vp; +{ + + if (!ktrcanset(curp, p)) + return (0); + if (ops == KTROP_SET) { + if (p->p_tracep != vp) { + /* + * if trace file already in use, relinquish + */ + if (p->p_tracep != NULL) + vrele(p->p_tracep); + VREF(vp); + p->p_tracep = vp; + } + p->p_traceflag |= facs; + if (curp->p_ucred->cr_uid == 0) + p->p_traceflag |= KTRFAC_ROOT; + } else { + /* KTROP_CLEAR */ + if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { + /* no more tracing */ + p->p_traceflag = 0; + if (p->p_tracep != NULL) { + vrele(p->p_tracep); + p->p_tracep = NULL; + } + } + } + + return (1); +} + +ktrsetchildren(curp, top, ops, facs, vp) + struct proc *curp, *top; + int ops, facs; + struct vnode *vp; +{ + register struct proc *p; + register int ret = 0; + + p = top; + for (;;) { + ret |= ktrops(curp, p, ops, facs, vp); + /* + * If this process has children, descend to them next, + * otherwise do any siblings, and if done with this level, + * follow back up the tree (but not past top). + */ + if (p->p_children.lh_first) + p = p->p_children.lh_first; + else for (;;) { + if (p == top) + return (ret); + if (p->p_sibling.le_next) { + p = p->p_sibling.le_next; + break; + } + p = p->p_pptr; + } + } + /*NOTREACHED*/ +} + +ktrwrite(vp, kth) + struct vnode *vp; + register struct ktr_header *kth; +{ + struct uio auio; + struct iovec aiov[2]; + register struct proc *p = current_proc(); /* XXX */ + int error; + + if (vp == NULL) + return; + auio.uio_iov = &aiov[0]; + auio.uio_offset = 0; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_WRITE; + aiov[0].iov_base = (caddr_t)kth; + aiov[0].iov_len = sizeof(struct ktr_header); + auio.uio_resid = sizeof(struct ktr_header); + auio.uio_iovcnt = 1; + auio.uio_procp = (struct proc *)0; + if (kth->ktr_len > 0) { + auio.uio_iovcnt++; + aiov[1].iov_base = kth->ktr_buf; + aiov[1].iov_len = kth->ktr_len; + auio.uio_resid += kth->ktr_len; + } + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred); + VOP_UNLOCK(vp, 0, p); + if (!error) + return; + /* + * If error encountered, give up tracing on this vnode. + */ + log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", + error); + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + if (p->p_tracep == vp) { + p->p_tracep = NULL; + p->p_traceflag = 0; + vrele(vp); + } + } +} + +/* + * Return true if caller has permission to set the ktracing state + * of target. Essentially, the target can't possess any + * more permissions than the caller. KTRFAC_ROOT signifies that + * root previously set the tracing status on the target process, and + * so, only root may further change it. + * + * TODO: check groups. use caller effective gid. + */ +ktrcanset(callp, targetp) + struct proc *callp, *targetp; +{ + register struct pcred *caller = callp->p_cred; + register struct pcred *target = targetp->p_cred; + + if ((caller->pc_ucred->cr_uid == target->p_ruid && + target->p_ruid == target->p_svuid && + caller->p_rgid == target->p_rgid && /* XXX */ + target->p_rgid == target->p_svgid && + (targetp->p_traceflag & KTRFAC_ROOT) == 0) || + caller->pc_ucred->cr_uid == 0) + return (1); + + return (0); +} + +#endif diff --git a/bsd/kern/kern_lock.c b/bsd/kern/kern_lock.c new file mode 100644 index 000000000..117407d6d --- /dev/null +++ b/bsd/kern/kern_lock.c @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1995 + * The Regents of the University of California. All rights reserved. + * + * This code contains ideas from software contributed to Berkeley by + * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating + * System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 + */ + +#include +#include +#include +#include +#include + +#include + +/* + * Locking primitives implementation. + * Locks provide shared/exclusive sychronization. + */ + +#if 0 +#define COUNT(p, x) if (p) (p)->p_locks += (x) +#else +#define COUNT(p, x) +#endif + +#if NCPUS > 1 + +/* + * For multiprocessor system, try spin lock first. + * + * This should be inline expanded below, but we cannot have #if + * inside a multiline define. + */ +int lock_wait_time = 100; +#define PAUSE(lkp, wanted) \ + if (lock_wait_time > 0) { \ + int i; \ + \ + simple_unlock(&lkp->lk_interlock); \ + for (i = lock_wait_time; i > 0; i--) \ + if (!(wanted)) \ + break; \ + simple_lock(&lkp->lk_interlock); \ + } \ + if (!(wanted)) \ + break; + +#else /* NCPUS == 1 */ + +/* + * It is an error to spin on a uniprocessor as nothing will ever cause + * the simple lock to clear while we are executing. + */ +#define PAUSE(lkp, wanted) + +#endif /* NCPUS == 1 */ + +/* + * Acquire a resource. + */ +#define ACQUIRE(lkp, error, extflags, wanted) \ + PAUSE(lkp, wanted); \ + for (error = 0; wanted; ) { \ + (lkp)->lk_waitcount++; \ + simple_unlock(&(lkp)->lk_interlock); \ + error = tsleep((void *)lkp, (lkp)->lk_prio, \ + (lkp)->lk_wmesg, (lkp)->lk_timo); \ + simple_lock(&(lkp)->lk_interlock); \ + (lkp)->lk_waitcount--; \ + if (error) \ + break; \ + if ((extflags) & LK_SLEEPFAIL) { \ + error = ENOLCK; \ + break; \ + } \ + } + +/* + * Initialize a lock; required before use. + */ +void +lockinit(lkp, prio, wmesg, timo, flags) + struct lock__bsd__ *lkp; + int prio; + char *wmesg; + int timo; + int flags; +{ + + bzero(lkp, sizeof(struct lock__bsd__)); + simple_lock_init(&lkp->lk_interlock); + lkp->lk_flags = flags & LK_EXTFLG_MASK; + lkp->lk_prio = prio; + lkp->lk_timo = timo; + lkp->lk_wmesg = wmesg; + lkp->lk_lockholder = LK_NOPROC; + lkp->lk_lockthread = 0; +} + +/* + * Determine the status of a lock. + */ +int +lockstatus(lkp) + struct lock__bsd__ *lkp; +{ + int lock_type = 0; + + simple_lock(&lkp->lk_interlock); + if (lkp->lk_exclusivecount != 0) + lock_type = LK_EXCLUSIVE; + else if (lkp->lk_sharecount != 0) + lock_type = LK_SHARED; + simple_unlock(&lkp->lk_interlock); + return (lock_type); +} + +/* + * Set, change, or release a lock. + * + * Shared requests increment the shared count. Exclusive requests set the + * LK_WANT_EXCL flag (preventing further shared locks), and wait for already + * accepted shared locks and shared-to-exclusive upgrades to go away. + */ +int +lockmgr(lkp, flags, interlkp, p) + struct lock__bsd__ *lkp; + u_int flags; + simple_lock_t interlkp; + struct proc *p; +{ + int error; + pid_t pid; + int extflags; + void *self; + + error = 0; self = current_thread(); + if (p) + pid = p->p_pid; + else + pid = LK_KERNPROC; + simple_lock(&lkp->lk_interlock); + if (flags & LK_INTERLOCK) + simple_unlock(interlkp); + extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; +#if 0 + /* + * Once a lock has drained, the LK_DRAINING flag is set and an + * exclusive lock is returned. The only valid operation thereafter + * is a single release of that exclusive lock. This final release + * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any + * further requests of any sort will result in a panic. The bits + * selected for these two flags are chosen so that they will be set + * in memory that is freed (freed memory is filled with 0xdeadbeef). + * The final release is permitted to give a new lease on life to + * the lock by specifying LK_REENABLE. + */ + if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) { + if (lkp->lk_flags & LK_DRAINED) + panic("lockmgr: using decommissioned lock"); + if ((flags & LK_TYPE_MASK) != LK_RELEASE || + (lkp->lk_lockholder != pid && lkp->lk_lockthread != self) + panic("lockmgr: non-release on draining lock: %d\n", + flags & LK_TYPE_MASK); + lkp->lk_flags &= ~LK_DRAINING; + if ((flags & LK_REENABLE) == 0) + lkp->lk_flags |= LK_DRAINED; + } +#endif + + switch (flags & LK_TYPE_MASK) { + + case LK_SHARED: + if (lkp->lk_lockholder != pid || lkp->lk_lockthread != self) { + /* + * If just polling, check to see if we will block. + */ + if ((extflags & LK_NOWAIT) && (lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) { + error = EBUSY; + break; + } + /* + * Wait for exclusive locks and upgrades to clear. + */ + ACQUIRE(lkp, error, extflags, lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)); + if (error) + break; + lkp->lk_sharecount++; + COUNT(p, 1); + break; + } + /* + * We hold an exclusive lock, so downgrade it to shared. + * An alternative would be to fail with EDEADLK. + */ + lkp->lk_sharecount++; + COUNT(p, 1); + /* fall into downgrade */ + + case LK_DOWNGRADE: + if (lkp->lk_lockholder != pid || + lkp->lk_lockthread != self || + lkp->lk_exclusivecount == 0) + panic("lockmgr: not holding exclusive lock"); + lkp->lk_sharecount += lkp->lk_exclusivecount; + lkp->lk_exclusivecount = 0; + lkp->lk_flags &= ~LK_HAVE_EXCL; + lkp->lk_lockholder = LK_NOPROC; + lkp->lk_lockthread = 0; + if (lkp->lk_waitcount) + wakeup((void *)lkp); + break; + + case LK_EXCLUPGRADE: + /* + * If another process is ahead of us to get an upgrade, + * then we want to fail rather than have an intervening + * exclusive access. + */ + if (lkp->lk_flags & LK_WANT_UPGRADE) { + lkp->lk_sharecount--; + COUNT(p, -1); + error = EBUSY; + break; + } + /* fall into normal upgrade */ + + case LK_UPGRADE: + /* + * Upgrade a shared lock to an exclusive one. If another + * shared lock has already requested an upgrade to an + * exclusive lock, our shared lock is released and an + * exclusive lock is requested (which will be granted + * after the upgrade). If we return an error, the file + * will always be unlocked. + */ + if ((lkp->lk_lockholder == pid && + lkp->lk_lockthread == self) || + lkp->lk_sharecount <= 0) + panic("lockmgr: upgrade exclusive lock"); + lkp->lk_sharecount--; + COUNT(p, -1); + /* + * If we are just polling, check to see if we will block. + */ + if ((extflags & LK_NOWAIT) && + ((lkp->lk_flags & LK_WANT_UPGRADE) || + lkp->lk_sharecount > 1)) { + error = EBUSY; + break; + } + if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { + /* + * We are first shared lock to request an upgrade, so + * request upgrade and wait for the shared count to + * drop to zero, then take exclusive lock. + */ + lkp->lk_flags |= LK_WANT_UPGRADE; + ACQUIRE(lkp, error, extflags, lkp->lk_sharecount); + lkp->lk_flags &= ~LK_WANT_UPGRADE; + if (error) + break; + lkp->lk_flags |= LK_HAVE_EXCL; + lkp->lk_lockholder = pid; + lkp->lk_lockthread = self; + if (lkp->lk_exclusivecount != 0) + panic("lockmgr: non-zero exclusive count"); + lkp->lk_exclusivecount = 1; + COUNT(p, 1); + break; + } + /* + * Someone else has requested upgrade. Release our shared + * lock, awaken upgrade requestor if we are the last shared + * lock, then request an exclusive lock. + */ + if (lkp->lk_sharecount == 0 && lkp->lk_waitcount) + wakeup((void *)lkp); + /* fall into exclusive request */ + + case LK_EXCLUSIVE: + if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self) { + /* + * Recursive lock. + */ + if ((extflags & LK_CANRECURSE) == 0) + panic("lockmgr: locking against myself"); + lkp->lk_exclusivecount++; + COUNT(p, 1); + break; + } + /* + * If we are just polling, check to see if we will sleep. + */ + if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0)) { + error = EBUSY; + break; + } + /* + * Try to acquire the want_exclusive flag. + */ + ACQUIRE(lkp, error, extflags, lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL)); + if (error) + break; + lkp->lk_flags |= LK_WANT_EXCL; + /* + * Wait for shared locks and upgrades to finish. + */ + ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 || + (lkp->lk_flags & LK_WANT_UPGRADE)); + lkp->lk_flags &= ~LK_WANT_EXCL; + if (error) + break; + lkp->lk_flags |= LK_HAVE_EXCL; + lkp->lk_lockholder = pid; + lkp->lk_lockthread = self; + if (lkp->lk_exclusivecount != 0) + panic("lockmgr: non-zero exclusive count"); + lkp->lk_exclusivecount = 1; + COUNT(p, 1); + break; + + case LK_RELEASE: + if (lkp->lk_exclusivecount != 0) { + if (pid != lkp->lk_lockholder || + lkp->lk_lockthread != self) + panic("lockmgr: pid %d, not %s %d unlocking", + pid, "exclusive lock holder", + lkp->lk_lockholder); + lkp->lk_exclusivecount--; + COUNT(p, -1); + if (lkp->lk_exclusivecount == 0) { + lkp->lk_flags &= ~LK_HAVE_EXCL; + lkp->lk_lockholder = LK_NOPROC; + lkp->lk_lockthread = 0; + } + } else if (lkp->lk_sharecount != 0) { + lkp->lk_sharecount--; + COUNT(p, -1); + } + if (lkp->lk_waitcount) + wakeup((void *)lkp); + break; + + case LK_DRAIN: + /* + * Check that we do not already hold the lock, as it can + * never drain if we do. Unfortunately, we have no way to + * check for holding a shared lock, but at least we can + * check for an exclusive one. + */ + if (lkp->lk_lockholder == pid && lkp->lk_lockthread == self) + panic("lockmgr: draining against myself"); + /* + * If we are just polling, check to see if we will sleep. + */ + if ((extflags & LK_NOWAIT) && ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) { + error = EBUSY; + break; + } + PAUSE(lkp, ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)); + for (error = 0; ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) || + lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) { + lkp->lk_flags |= LK_WAITDRAIN; + simple_unlock(&lkp->lk_interlock); + if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio, + lkp->lk_wmesg, lkp->lk_timo)) + return (error); + if ((extflags) & LK_SLEEPFAIL) + return (ENOLCK); + simple_lock(&lkp->lk_interlock); + } + lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; + lkp->lk_lockholder = pid; + lkp->lk_lockthread = self; + lkp->lk_exclusivecount = 1; + COUNT(p, 1); + break; + + default: + simple_unlock(&lkp->lk_interlock); + panic("lockmgr: unknown locktype request %d", + flags & LK_TYPE_MASK); + /* NOTREACHED */ + } + if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags & + (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 && + lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) { + lkp->lk_flags &= ~LK_WAITDRAIN; + wakeup((void *)&lkp->lk_flags); + } + simple_unlock(&lkp->lk_interlock); + return (error); +} + +/* + * Print out information about state of a lock. Used by VOP_PRINT + * routines to display ststus about contained locks. + */ +lockmgr_printinfo(lkp) + struct lock__bsd__ *lkp; +{ + + if (lkp->lk_sharecount) + printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, + lkp->lk_sharecount); + else if (lkp->lk_flags & LK_HAVE_EXCL) + printf(" lock type %s: EXCL (count %d) by pid %d", + lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder); + if (lkp->lk_waitcount > 0) + printf(" with %d pending", lkp->lk_waitcount); +} diff --git a/bsd/kern/kern_malloc.c b/bsd/kern/kern_malloc.c new file mode 100644 index 000000000..24086441b --- /dev/null +++ b/bsd/kern/kern_malloc.c @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1987, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_malloc.c 8.4 (Berkeley) 5/20/95 + */ + +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include + +struct kmemstats kmemstats[M_LAST]; +char *memname[] = INITKMEMNAMES; + +struct kmzones { + size_t kz_elemsize; + void *kz_zalloczone; +#define KMZ_CREATEZONE ((void *)-2) +#define KMZ_LOOKUPZONE ((void *)-1) +#define KMZ_MALLOC ((void *)0) +#define KMZ_SHAREZONE ((void *)1) +} kmzones[M_LAST] = { +#define SOS(sname) sizeof (struct sname) +#define SOX(sname) -1 + -1, 0, /* 0 M_FREE */ + MSIZE, KMZ_CREATEZONE, /* 1 M_MBUF */ + 0, KMZ_MALLOC, /* 2 M_DEVBUF */ + SOS(socket), KMZ_CREATEZONE, /* 3 M_SOCKET */ + SOS(inpcb), KMZ_LOOKUPZONE, /* 4 M_PCB */ + M_MBUF, KMZ_SHAREZONE, /* 5 M_RTABLE */ + M_MBUF, KMZ_SHAREZONE, /* 6 M_HTABLE */ + M_MBUF, KMZ_SHAREZONE, /* 7 M_FTABLE */ + SOS(rusage), KMZ_CREATEZONE, /* 8 M_ZOMBIE */ + 0, KMZ_MALLOC, /* 9 M_IFADDR */ + M_MBUF, KMZ_SHAREZONE, /* 10 M_SOOPTS */ + 0, KMZ_MALLOC, /* 11 M_SONAME */ + MAXPATHLEN, KMZ_CREATEZONE, /* 12 M_NAMEI */ + 0, KMZ_MALLOC, /* 13 M_GPROF */ + 0, KMZ_MALLOC, /* 14 M_IOCTLOPS */ + 0, KMZ_MALLOC, /* 15 M_MAPMEM */ + SOS(ucred), KMZ_CREATEZONE, /* 16 M_CRED */ + SOS(pgrp), KMZ_CREATEZONE, /* 17 M_PGRP */ + SOS(session), KMZ_CREATEZONE, /* 18 M_SESSION */ + SOS(iovec), KMZ_LOOKUPZONE, /* 19 M_IOV */ + SOS(mount), KMZ_CREATEZONE, /* 20 M_MOUNT */ + 0, KMZ_MALLOC, /* 21 M_FHANDLE */ + SOS(nfsreq), KMZ_CREATEZONE, /* 22 M_NFSREQ */ + SOS(nfsmount), KMZ_CREATEZONE, /* 23 M_NFSMNT */ + SOS(nfsnode), KMZ_CREATEZONE, /* 24 M_NFSNODE */ + SOS(vnode), KMZ_CREATEZONE, /* 25 M_VNODE */ + SOS(namecache), KMZ_CREATEZONE, /* 26 M_CACHE */ + SOX(dquot), KMZ_LOOKUPZONE, /* 27 M_DQUOT */ + SOX(ufsmount), KMZ_LOOKUPZONE, /* 28 M_UFSMNT */ + 0, KMZ_MALLOC, /* 29 M_CGSUM */ + 0, KMZ_MALLOC, /* 30 M_VMMAP */ + 0, KMZ_MALLOC, /* 31 M_VMMAPENT */ + 0, KMZ_MALLOC, /* 32 M_VMOBJ */ + 0, KMZ_MALLOC, /* 33 M_VMOBJHASH */ + 0, KMZ_MALLOC, /* 34 M_VMPMAP */ + 0, KMZ_MALLOC, /* 35 M_VMPVENT */ + 0, KMZ_MALLOC, /* 36 M_VMPAGER */ + 0, KMZ_MALLOC, /* 37 M_VMPGDATA */ + SOS(file), KMZ_CREATEZONE, /* 38 M_FILE */ + SOS(filedesc), KMZ_CREATEZONE, /* 39 M_FILEDESC */ + SOX(lockf), KMZ_CREATEZONE, /* 40 M_LOCKF */ + SOS(proc), KMZ_CREATEZONE, /* 41 M_PROC */ + SOS(pcred), KMZ_CREATEZONE, /* 42 M_SUBPROC */ + 0, KMZ_MALLOC, /* 43 M_SEGMENT */ + M_FFSNODE, KMZ_SHAREZONE, /* 44 M_LFSNODE */ + SOS(inode), KMZ_CREATEZONE, /* 45 M_FFSNODE */ + M_FFSNODE, KMZ_SHAREZONE, /* 46 M_MFSNODE */ + SOS(nqlease), KMZ_CREATEZONE, /* 47 M_NQLEASE */ + SOS(nqm), KMZ_CREATEZONE, /* 48 M_NQMHOST */ + 0, KMZ_MALLOC, /* 49 M_NETADDR */ + SOX(nfssvc_sock), + KMZ_CREATEZONE, /* 50 M_NFSSVC */ + SOS(nfsuid), KMZ_CREATEZONE, /* 51 M_NFSUID */ + SOX(nfsrvcache), + KMZ_CREATEZONE, /* 52 M_NFSD */ + SOX(ip_moptions), + KMZ_LOOKUPZONE, /* 53 M_IPMOPTS */ + SOX(in_multi), KMZ_LOOKUPZONE, /* 54 M_IPMADDR */ + SOX(ether_multi), + KMZ_LOOKUPZONE, /* 55 M_IFMADDR */ + SOX(mrt), KMZ_CREATEZONE, /* 56 M_MRTABLE */ + SOX(iso_mnt), KMZ_LOOKUPZONE, /* 57 M_ISOFSMNT */ + SOS(iso_node), KMZ_CREATEZONE, /* 58 M_ISOFSNODE */ + SOS(nfsrv_descript), + KMZ_CREATEZONE, /* 59 M_NFSRVDESC */ + SOS(nfsdmap), KMZ_CREATEZONE, /* 60 M_NFSDIROFF */ + SOS(fhandle), KMZ_LOOKUPZONE, /* 61 M_NFSBIGFH */ + 0, KMZ_MALLOC, /* 62 M_MSDOSFSMNT */ + 0, KMZ_MALLOC, /* 63 M_MSDOSFSFAT */ + 0, KMZ_MALLOC, /* 64 M_MSDOSFSNODE */ + SOS(tty), KMZ_CREATEZONE, /* 65 M_TTYS */ + 0, KMZ_MALLOC, /* 66 M_EXEC */ + 0, KMZ_MALLOC, /* 67 M_MISCFSMNT */ + 0, KMZ_MALLOC, /* 68 M_MISCFSNODE */ + 0, KMZ_MALLOC, /* 69 M_ADOSFSMNT */ + 0, KMZ_MALLOC, /* 70 M_ADOSFSNODE */ + 0, KMZ_MALLOC, /* 71 M_ANODE */ + SOX(buf), KMZ_CREATEZONE, /* 72 M_BUFHDR */ + (NDFILE * OFILESIZE), + KMZ_CREATEZONE, /* 73 M_OFILETABL */ + MCLBYTES, KMZ_CREATEZONE, /* 74 M_MCLUST */ + SOX(hfsmount), KMZ_LOOKUPZONE, /* 75 M_HFSMNT */ + SOS(hfsnode), KMZ_CREATEZONE, /* 76 M_HFSNODE */ + SOS(hfsfilemeta), KMZ_CREATEZONE, /* 77 M_HFSFMETA */ + SOX(volfs_mntdata), KMZ_LOOKUPZONE, /* 78 M_VOLFSMNT */ + SOS(volfs_vndata), KMZ_CREATEZONE, /* 79 M_VOLFSNODE */ + 0, KMZ_MALLOC, /* 80 M_TEMP */ + 0, KMZ_MALLOC, /* 81 M_SECA */ + 0, KMZ_MALLOC, /* 82 M_DEVFS */ + 0, KMZ_MALLOC, /* 83 M_IPFW */ + 0, KMZ_MALLOC, /* 84 M_UDFNODE */ + 0, KMZ_MALLOC, /* 85 M_UDFMOUNT */ + 0, KMZ_MALLOC, /* 86 M_IP6NDP */ + 0, KMZ_MALLOC, /* 87 M_IP6OPT */ + 0, KMZ_MALLOC, /* 88 M_NATPT */ + +#undef SOS +#undef SOX +}; + + +/* + * Initialize the kernel memory allocator + */ +void +kmeminit(void) +{ + struct kmzones *kmz; + + kmz = kmzones; + while (kmz < &kmzones[M_LAST]) { +/* XXX */ + if (kmz->kz_elemsize == -1) + ; + else +/* XXX */ + if (kmz->kz_zalloczone == KMZ_CREATEZONE) { + kmz->kz_zalloczone = zinit(kmz->kz_elemsize, + 1024 * 1024, PAGE_SIZE, + memname[kmz - kmzones]); + } + else if (kmz->kz_zalloczone == KMZ_LOOKUPZONE) + kmz->kz_zalloczone = kalloc_zone(kmz->kz_elemsize); + + kmz++; + } + + kmz = kmzones; + while (kmz < &kmzones[M_LAST]) { +/* XXX */ + if (kmz->kz_elemsize == -1) + ; + else +/* XXX */ + if (kmz->kz_zalloczone == KMZ_SHAREZONE) { + kmz->kz_zalloczone = + kmzones[kmz->kz_elemsize].kz_zalloczone; + kmz->kz_elemsize = + kmzones[kmz->kz_elemsize].kz_elemsize; + } + + kmz++; + } +} + +#define MDECL(reqlen) \ +union { \ + struct _mhead hdr; \ + char _m[(reqlen) + sizeof (struct _mhead)]; \ +} + +struct _mhead { + size_t mlen; + char dat[0]; +}; + +void *_MALLOC( + size_t size, + int type, + int flags) +{ + MDECL(size) *mem; + size_t memsize = sizeof (*mem); + + if (type >= M_LAST) + panic("_malloc TYPE"); + + if (size == 0) + return (0); + + if (flags & M_NOWAIT) { + mem = (void *)kalloc_noblock(memsize); + } else { + mem = (void *)kalloc(memsize); + } + if (!mem) + return (0); + + mem->hdr.mlen = memsize; + + return (mem->hdr.dat); +} + +void _FREE( + void *addr, + int type) +{ + struct _mhead *hdr; + + if (type >= M_LAST) + panic("_free TYPE"); + + if (!addr) + return; + + hdr = addr; hdr--; + kfree((vm_offset_t)hdr, hdr->mlen); +} + +void *_MALLOC_ZONE( + size_t size, + int type, + int flags) +{ + struct kmzones *kmz; + void *elem; + + if (type >= M_LAST) + panic("_malloc_zone TYPE"); + + kmz = &kmzones[type]; + if (kmz->kz_zalloczone == KMZ_MALLOC) + panic("_malloc_zone ZONE"); + +/* XXX */ + if (kmz->kz_elemsize == -1) + panic("_malloc_zone XXX"); +/* XXX */ + if (size == kmz->kz_elemsize) + if (flags & M_NOWAIT) { + elem = (void *)zalloc_noblock(kmz->kz_zalloczone); + } else { + elem = (void *)zalloc(kmz->kz_zalloczone); + } + else + if (flags & M_NOWAIT) { + elem = (void *)kalloc_noblock(size); + } else { + elem = (void *)kalloc(size); + } + + return (elem); +} + +void _FREE_ZONE( + void *elem, + size_t size, + int type) +{ + struct kmzones *kmz; + + if (type >= M_LAST) + panic("FREE_SIZE"); + + kmz = &kmzones[type]; + if (kmz->kz_zalloczone == KMZ_MALLOC) + panic("free_zone ZONE"); + +/* XXX */ + if (kmz->kz_elemsize == -1) + panic("FREE_SIZE XXX"); +/* XXX */ + if (size == kmz->kz_elemsize) + zfree(kmz->kz_zalloczone, (vm_offset_t)elem); + else + kfree((vm_offset_t)elem, size); +} diff --git a/bsd/kern/kern_mib.c b/bsd/kern/kern_mib.c new file mode 100644 index 000000000..d6c43effe --- /dev/null +++ b/bsd/kern/kern_mib.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Mike Karels at Berkeley Software Design, Inc. + * + * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD + * project, to make these variables more userfriendly. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 + */ + +#include +#include +#include +#include +#include +#include + +#if defined(SMP) +#include +#endif + +SYSCTL_NODE(, 0, sysctl, CTLFLAG_RW, 0, + "Sysctl internal magic"); +SYSCTL_NODE(, CTL_KERN, kern, CTLFLAG_RW, 0, + "High kernel, proc, limits &c"); +SYSCTL_NODE(, CTL_VM, vm, CTLFLAG_RW, 0, + "Virtual memory"); +SYSCTL_NODE(, CTL_VFS, vfs, CTLFLAG_RW, 0, + "File system"); +SYSCTL_NODE(, CTL_NET, net, CTLFLAG_RW, 0, + "Network, (see socket.h)"); +SYSCTL_NODE(, CTL_DEBUG, debug, CTLFLAG_RW, 0, + "Debugging"); +SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW, 0, + "hardware"); +SYSCTL_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW, 0, + "machine dependent"); +SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW, 0, + "user-level"); + diff --git a/bsd/kern/kern_mman.c b/bsd/kern/kern_mman.c new file mode 100644 index 000000000..3fd5b0650 --- /dev/null +++ b/bsd/kern/kern_mman.c @@ -0,0 +1,1163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: vm_mmap.c 1.6 91/10/21$ + * + * @(#)vm_mmap.c 8.10 (Berkeley) 2/19/95 + */ + +/* + * Mapped file (mmap) interface to VM + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include + +struct sbrk_args { + int incr; +}; + +/* ARGSUSED */ +int +sbrk(p, uap, retval) + struct proc *p; + struct sbrk_args *uap; + register_t *retval; +{ + /* Not yet implemented */ + return (EOPNOTSUPP); +} + +struct sstk_args { + int incr; +} *uap; + +/* ARGSUSED */ +int +sstk(p, uap, retval) + struct proc *p; + struct sstk_args *uap; + register_t *retval; +{ + /* Not yet implemented */ + return (EOPNOTSUPP); +} + +#if COMPAT_43 +/* ARGSUSED */ +int +ogetpagesize(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = PAGE_SIZE; + return (0); +} +#endif /* COMPAT_43 */ + +struct osmmap_args { + caddr_t addr; + int len; + int prot; + int share; + int fd; + long pos; +}; + +osmmap(curp, uap, retval) + struct proc *curp; + register struct osmmap_args *uap; + register_t *retval; +{ +struct mmap_args { + caddr_t addr; + size_t len; + int prot; + int flags; + int fd; +#ifdef DOUBLE_ALIGN_PARAMS + long pad; +#endif + off_t pos; +} newargs; + + if ((uap->share == MAP_SHARED )|| (uap->share == MAP_PRIVATE )) { + newargs.addr = uap->addr; + newargs.len = (size_t)uap->len; + newargs.prot = uap->prot; + newargs.flags = uap->share; + newargs.fd = uap->fd; + newargs.pos = (off_t)uap->pos; + return(mmap(curp,&newargs, retval)); + } else + return(EINVAL); +} + +struct mmap_args { + caddr_t addr; + size_t len; + int prot; + int flags; + int fd; +#ifdef DOUBLE_ALIGN_PARAMS + long pad; +#endif + off_t pos; +}; +int +mmap(p, uap, retval) + struct proc *p; + struct mmap_args *uap; + register_t *retval; +{ + /* + * Map in special device (must be SHARED) or file + */ + struct file *fp; + register struct vnode *vp; + int flags; + int prot; + int err=0; + vm_map_t user_map; + kern_return_t result; + vm_offset_t user_addr; + vm_size_t user_size; + vm_offset_t pageoff; + vm_object_offset_t file_pos; + boolean_t find_space, docow; + vm_prot_t maxprot; + void *handle; + vm_pager_t pager; + int mapanon=0; + + user_addr = (vm_offset_t)uap->addr; + user_size = (vm_size_t) uap->len; + prot = (uap->prot & VM_PROT_ALL); + flags = uap->flags; + + /* + * The vm code does not have prototypes & compiler doesn't do the' + * the right thing when you cast 64bit value and pass it in function + * call. So here it is. + */ + file_pos = (vm_object_offset_t)uap->pos; + + + /* make sure mapping fits into numeric range etc */ + if ((file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64) || + ((ssize_t) uap->len < 0 )|| + ((flags & MAP_ANON) && uap->fd != -1)) + return (EINVAL); + + /* + * Align the file position to a page boundary, + * and save its page offset component. + */ + pageoff = ((vm_offset_t)file_pos & PAGE_MASK); + file_pos -= (vm_object_offset_t)pageoff; + + + /* Adjust size for rounding (on both ends). */ + user_size += pageoff; /* low end... */ + user_size = (vm_size_t) round_page(user_size); /* hi end */ + + + /* + * Check for illegal addresses. Watch out for address wrap... Note + * that VM_*_ADDRESS are not constants due to casts (argh). + */ + if (flags & MAP_FIXED) { + /* + * The specified address must have the same remainder + * as the file offset taken modulo PAGE_SIZE, so it + * should be aligned after adjustment by pageoff. + */ + user_addr -= pageoff; + if (user_addr & PAGE_MASK) + return (EINVAL); + /* Address range must be all in user VM space. */ + if (VM_MAX_ADDRESS > 0 && (user_addr + user_size > VM_MAX_ADDRESS)) + return (EINVAL); + if (VM_MIN_ADDRESS > 0 && user_addr < VM_MIN_ADDRESS) + return (EINVAL); + if (user_addr + user_size < user_addr) + return (EINVAL); + } +#ifdef notyet + /* DO not have apis to get this info, need to wait till then*/ + /* + * XXX for non-fixed mappings where no hint is provided or + * the hint would fall in the potential heap space, + * place it after the end of the largest possible heap. + * + * There should really be a pmap call to determine a reasonable + * location. + */ + else if (addr < round_page(p->p_vmspace->vm_daddr + MAXDSIZ)) + addr = round_page(p->p_vmspace->vm_daddr + MAXDSIZ); + +#endif + + + if (flags & MAP_ANON) { + /* + * Mapping blank space is trivial. + */ + handle = NULL; + maxprot = VM_PROT_ALL; + file_pos = 0; + mapanon = 1; + } else { + /* + * Mapping file, get fp for validation. Obtain vnode and make + * sure it is of appropriate type. + */ + err = fdgetf(p, uap->fd, &fp); + if (err) + return(err); + if(fp->f_type == DTYPE_PSXSHM) { + uap->addr = user_addr; + uap->len = user_size; + uap->prot = prot; + uap->flags = flags; + uap->pos = file_pos; + return(pshm_mmap(p, uap, retval, fp , pageoff)); + } + + if (fp->f_type != DTYPE_VNODE) + return(EINVAL); + vp = (struct vnode *)fp->f_data; + + if (vp->v_type != VREG && vp->v_type != VCHR) + return (EINVAL); + /* + * XXX hack to handle use of /dev/zero to map anon memory (ala + * SunOS). + */ + if (vp->v_type == VCHR || vp->v_type == VSTR) { + return(EOPNOTSUPP); + } else { + /* + * Ensure that file and memory protections are + * compatible. Note that we only worry about + * writability if mapping is shared; in this case, + * current and max prot are dictated by the open file. + * XXX use the vnode instead? Problem is: what + * credentials do we use for determination? What if + * proc does a setuid? + */ + maxprot = VM_PROT_EXECUTE; /* ??? */ + if (fp->f_flag & FREAD) + maxprot |= VM_PROT_READ; + else if (prot & PROT_READ) + return (EACCES); + /* + * If we are sharing potential changes (either via + * MAP_SHARED or via the implicit sharing of character + * device mappings), and we are trying to get write + * permission although we opened it without asking + * for it, bail out. + */ + + if ((flags & MAP_SHARED) != 0) { + if ((fp->f_flag & FWRITE) != 0) { + struct vattr va; + if ((err = + VOP_GETATTR(vp, &va, + p->p_ucred, p))) + return (err); + if ((va.va_flags & + (IMMUTABLE|APPEND)) == 0) + maxprot |= VM_PROT_WRITE; + else if (prot & PROT_WRITE) + return (EPERM); + } else if ((prot & PROT_WRITE) != 0) + return (EACCES); + } else + maxprot |= VM_PROT_WRITE; + + handle = (void *)vp; + } + } + + if (user_size == 0) + return(0); + + /* + * We bend a little - round the start and end addresses + * to the nearest page boundary. + */ + user_size = round_page(user_size); + + if (file_pos & PAGE_MASK_64) + return (EINVAL); + + user_map = current_map(); + + if ((flags & MAP_FIXED) == 0) { + find_space = TRUE; + user_addr = round_page(user_addr); + } else { + if (user_addr != trunc_page(user_addr)) + return (EINVAL); + find_space = FALSE; + (void) vm_deallocate(user_map, user_addr, user_size); + } + + + /* + * Lookup/allocate object. + */ + if (flags & MAP_ANON) { + /* + * Unnamed anonymous regions always start at 0. + */ + if (handle == 0) + file_pos = 0; + } + + if (handle == NULL) { + pager = NULL; +#ifdef notyet +/* Hmm .. */ +#if defined(VM_PROT_READ_IS_EXEC) + if (prot & VM_PROT_READ) + prot |= VM_PROT_EXECUTE; + + if (maxprot & VM_PROT_READ) + maxprot |= VM_PROT_EXECUTE; +#endif +#endif + result = vm_allocate(user_map, &user_addr, user_size, find_space); + if (result != KERN_SUCCESS) + goto out; + + } else { + UBCINFOCHECK("mmap", vp); + pager = ubc_getpager(vp); + + if (pager == NULL) + return (ENOMEM); + + /* + * Set credentials: + * FIXME: if we're writing the file we need a way to + * ensure that someone doesn't replace our R/W creds + * with ones that only work for read. + */ + + ubc_setcred(vp, p); + docow = FALSE; + if ((flags & (MAP_ANON|MAP_SHARED)) == 0) { + docow = TRUE; + } + +#ifdef notyet +/* Hmm .. */ +#if defined(VM_PROT_READ_IS_EXEC) + if (prot & VM_PROT_READ) + prot |= VM_PROT_EXECUTE; + + if (maxprot & VM_PROT_READ) + maxprot |= VM_PROT_EXECUTE; +#endif +#endif /* notyet */ + + result = vm_map_64(user_map, &user_addr, user_size, + 0, find_space, pager, file_pos, docow, + prot, maxprot, + VM_INHERIT_DEFAULT); + + if (result != KERN_SUCCESS) + goto out; + + ubc_map(vp); + } + + if (flags & (MAP_SHARED|MAP_INHERIT)) { + result = vm_inherit(user_map, user_addr, user_size, + VM_INHERIT_SHARE); + if (result != KERN_SUCCESS) { + (void) vm_deallocate(user_map, user_addr, user_size); + goto out; + } + } + +out: + switch (result) { + case KERN_SUCCESS: + if (!mapanon) + *fdflags(p, uap->fd) |= UF_MAPPED; + *retval = (register_t)(user_addr + pageoff); + return (0); + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } + /*NOTREACHED*/ +} + +struct msync_args { + caddr_t addr; + int len; + int flags; +}; +int +msync(p, uap, retval) + struct proc *p; + struct msync_args *uap; + register_t *retval; +{ + vm_offset_t addr; + vm_size_t size, pageoff; + int flags; + vm_map_t user_map; + int rv; + vm_sync_t sync_flags=0; + + addr = (vm_offset_t) uap->addr; + pageoff = (addr & PAGE_MASK); + addr -= pageoff; + size = uap->len; + size = (vm_size_t) round_page(size); + flags = uap->flags; + + if (addr + size < addr) + return(EINVAL); + + user_map = current_map(); + + if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) + return (EINVAL); + + if (size == 0) { + /* + * We cannot support this properly without maintaining + * list all mmaps done. Cannot use vm_map_entry as they could be + * split or coalesced by indepenedant actions. So instead of + * inaccurate results, lets just return error as invalid size + * specified + */ + return(EINVAL); + } + + if (flags & MS_KILLPAGES) + sync_flags |= VM_SYNC_KILLPAGES; + if (flags & MS_DEACTIVATE) + sync_flags |= VM_SYNC_DEACTIVATE; + if (flags & MS_INVALIDATE) + sync_flags |= VM_SYNC_INVALIDATE; + + if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) { + if (flags & MS_ASYNC) + sync_flags |= VM_SYNC_ASYNCHRONOUS; + else + sync_flags |= VM_SYNC_SYNCHRONOUS; + } + rv = vm_msync(user_map, addr, size, sync_flags); + + switch (rv) { + case KERN_SUCCESS: + break; + case KERN_INVALID_ADDRESS: + return (EINVAL); /* Sun returns ENOMEM? */ + case KERN_FAILURE: + return (EIO); + default: + return (EINVAL); + } + + return (0); + +} + + +mremap() +{ + /* Not yet implemented */ + return (EOPNOTSUPP); +} + +struct munmap_args { + caddr_t addr; + int len; +}; +munmap(p, uap, retval) + struct proc *p; + struct munmap_args *uap; + register_t *retval; + +{ + vm_offset_t user_addr; + vm_size_t user_size, pageoff; + kern_return_t result; + + user_addr = (vm_offset_t) uap->addr; + user_size = (vm_size_t) uap->len; + + pageoff = (user_addr & PAGE_MASK); + + user_addr -= pageoff; + user_size += pageoff; + user_size = round_page(user_size); + if (user_addr + user_size < user_addr) + return(EINVAL); + + if (user_size == 0) + return (0); + + /* Address range must be all in user VM space. */ + if (VM_MAX_ADDRESS > 0 && (user_addr + user_size > VM_MAX_ADDRESS)) + return (EINVAL); + if (VM_MIN_ADDRESS > 0 && user_addr < VM_MIN_ADDRESS) + return (EINVAL); + + + result = vm_deallocate(current_map(), user_addr, user_size); + if (result != KERN_SUCCESS) { + return(EINVAL); + } + return(0); +} + +void +munmapfd(p, fd) + struct proc *p; + int fd; +{ + /* + * XXX should vm_deallocate any regions mapped to this file + */ + *fdflags(p, fd) &= ~UF_MAPPED; +} + +struct mprotect_args { + caddr_t addr; + int len; + int prot; +}; +int +mprotect(p, uap, retval) + struct proc *p; + struct mprotect_args *uap; + register_t *retval; +{ + register vm_prot_t prot; + vm_offset_t user_addr; + vm_size_t user_size, pageoff; + kern_return_t result; + vm_map_t user_map; + + user_addr = (vm_offset_t) uap->addr; + user_size = (vm_size_t) uap->len; + prot = (vm_prot_t)(uap->prot & VM_PROT_ALL); + +#ifdef notyet +/* Hmm .. */ +#if defined(VM_PROT_READ_IS_EXEC) + if (prot & VM_PROT_READ) + prot |= VM_PROT_EXECUTE; +#endif +#endif /* notyet */ + + pageoff = (user_addr & PAGE_MASK); + user_addr -= pageoff; + user_size += pageoff; + user_size = round_page(user_size); + if (user_addr + user_size < user_addr) + return(EINVAL); + + user_map = current_map(); + + result = vm_map_protect(user_map, user_addr, user_addr+user_size, prot, + FALSE); + switch (result) { + case KERN_SUCCESS: + return (0); + case KERN_PROTECTION_FAILURE: + return (EACCES); + } + return (EINVAL); +} + + +struct minherit_args { + void *addr; + size_t len; + int inherit; +}; + +int +minherit(p, uap, retval) + struct proc *p; + struct minherit_args *uap; + register_t *retval; +{ + vm_offset_t addr; + vm_size_t size, pageoff; + register vm_inherit_t inherit; + vm_map_t user_map; + kern_return_t result; + + addr = (vm_offset_t)uap->addr; + size = uap->len; + inherit = uap->inherit; + + pageoff = (addr & PAGE_MASK); + addr -= pageoff; + size += pageoff; + size = (vm_size_t) round_page(size); + if (addr + size < addr) + return(EINVAL); + + user_map = current_map(); + result = vm_inherit(user_map, addr, size, + inherit); + switch (result) { + case KERN_SUCCESS: + return (0); + case KERN_PROTECTION_FAILURE: + return (EACCES); + } + return (EINVAL); +} + +struct madvise_args { + caddr_t addr; + int len; + int behav; +}; +/* ARGSUSED */ +int +madvise(p, uap, retval) + struct proc *p; + struct madvise_args *uap; + register_t *retval; +{ + vm_map_t user_map; + vm_offset_t start, end; + vm_behavior_t new_behavior; + kern_return_t result; + + /* + * Check for illegal addresses. Watch out for address wrap... Note + * that VM_*_ADDRESS are not constants due to casts (argh). + */ + if (VM_MAX_ADDRESS > 0 && + ((vm_offset_t) uap->addr + uap->len) > VM_MAX_ADDRESS) + return (EINVAL); + if (VM_MIN_ADDRESS > 0 && uap->addr < VM_MIN_ADDRESS) + return (EINVAL); + + if (((vm_offset_t) uap->addr + uap->len) < (vm_offset_t) uap->addr) + return (EINVAL); + + /* + * Since this routine is only advisory, we default to conservative + * behavior. + */ + start = trunc_page((vm_offset_t) uap->addr); + end = round_page((vm_offset_t) uap->addr + uap->len); + + user_map = current_map(); + + switch (uap->behav) { + case MADV_RANDOM: + new_behavior = VM_BEHAVIOR_RANDOM; + case MADV_SEQUENTIAL: + new_behavior = VM_BEHAVIOR_SEQUENTIAL; + case MADV_NORMAL: + default: + new_behavior = VM_BEHAVIOR_DEFAULT; + } + + result = vm_behavior_set(user_map, start, end, uap->behav); + switch (result) { + case KERN_SUCCESS: + return (0); + case KERN_INVALID_ADDRESS: + return (EINVAL); + } + + return (EINVAL); +} + +struct mincore_args { + const void *addr; + size_t len; + char *vec; +}; +/* ARGSUSED */ +int +mincore(p, uap, retval) + struct proc *p; + struct mincore_args *uap; + register_t *retval; +{ + vm_offset_t addr, first_addr; + vm_offset_t end; + vm_map_t map; + char *vec; + int error; + int vecindex, lastvecindex; + int mincoreinfo=0; + int pqueryinfo; + kern_return_t ret; + int numref; + + map = current_map(); + + /* + * Make sure that the addresses presented are valid for user + * mode. + */ + first_addr = addr = trunc_page((vm_offset_t) uap->addr); + end = addr + (vm_size_t)round_page(uap->len); + + if (VM_MAX_ADDRESS > 0 && end > VM_MAX_ADDRESS) + return (EINVAL); + if (end < addr) + return (EINVAL); + + /* + * Address of byte vector + */ + vec = uap->vec; + + map = current_map(); + + /* + * Do this on a map entry basis so that if the pages are not + * in the current processes address space, we can easily look + * up the pages elsewhere. + */ + lastvecindex = -1; + for(addr; addr < end; addr += PAGE_SIZE) { + pqueryinfo = 0; + ret = vm_map_page_query(map, addr, &pqueryinfo, &numref); + if (ret != KERN_SUCCESS) + pqueryinfo = 0; + mincoreinfo = 0; + if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT) + mincoreinfo |= MINCORE_INCORE; + if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF) + mincoreinfo |= MINCORE_REFERENCED; + if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY) + mincoreinfo |= MINCORE_MODIFIED; + + + /* + * calculate index into user supplied byte vector + */ + vecindex = (addr - first_addr)>> PAGE_SHIFT; + + /* + * If we have skipped map entries, we need to make sure that + * the byte vector is zeroed for those skipped entries. + */ + while((lastvecindex + 1) < vecindex) { + error = subyte( vec + lastvecindex, 0); + if (error) { + return (EFAULT); + } + ++lastvecindex; + } + + /* + * Pass the page information to the user + */ + error = subyte( vec + vecindex, mincoreinfo); + if (error) { + return (EFAULT); + } + lastvecindex = vecindex; + } + + + /* + * Zero the last entries in the byte vector. + */ + vecindex = (end - first_addr) >> PAGE_SHIFT; + while((lastvecindex + 1) < vecindex) { + error = subyte( vec + lastvecindex, 0); + if (error) { + return (EFAULT); + } + ++lastvecindex; + } + + return (0); +} + +struct mlock_args { + caddr_t addr; + size_t len; +}; + +int +mlock(p, uap, retval) + struct proc *p; + struct mlock_args *uap; + register_t *retval; +{ + vm_map_t user_map; + vm_offset_t addr; + vm_size_t size, pageoff; + int error; + kern_return_t result; + + addr = (vm_offset_t) uap->addr; + size = uap->len; + + pageoff = (addr & PAGE_MASK); + addr -= pageoff; + size += pageoff; + size = (vm_size_t) round_page(size); + + /* disable wrap around */ + if (addr + size < addr) + return (EINVAL); +#ifdef notyet +/* Hmm.. What am I going to do with this? */ + if (atop(size) + cnt.v_wire_count > vm_page_max_wired) + return (EAGAIN); +#ifdef pmap_wired_count + if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > + p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur) + return (ENOMEM); +#else + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); +#endif +#endif /* notyet */ + + user_map = current_map(); + + /* vm_wire */ + result = vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_ALL); + return (result == KERN_SUCCESS ? 0 : ENOMEM); +} + +struct munlock_args { + caddr_t addr; + size_t len; +}; +int +munlock(p, uap, retval) + struct proc *p; + struct munlock_args *uap; + register_t *retval; +{ + vm_offset_t addr; + vm_size_t size, pageoff; + int error; + vm_map_t user_map; + kern_return_t result; + + addr = (vm_offset_t) uap->addr; + size = uap->len; + + pageoff = (addr & PAGE_MASK); + addr -= pageoff; + size += pageoff; + size = (vm_size_t) round_page(size); + + /* disable wrap around */ + if (addr + size < addr) + return (EINVAL); + +#ifdef notyet +/* Hmm.. What am I going to do with this? */ +#ifndef pmap_wired_count + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); +#endif +#endif /* notyet */ + + user_map = current_map(); + + /* vm_wire */ + result = vm_wire(host_priv_self(), user_map, addr, size, VM_PROT_NONE); + return (result == KERN_SUCCESS ? 0 : ENOMEM); +} + + +struct mlockall_args { + int how; +}; + +int +mlockall(p, uap) + struct proc *p; + struct mlockall_args *uap; +{ + return (ENOSYS); +} + +struct munlockall_args { + int how; +}; + +int +munlockall(p, uap) + struct proc *p; + struct munlockall_args *uap; +{ + return(ENOSYS); +} + + +/* BEGIN DEFUNCT */ +struct obreak_args { + char *nsiz; +}; +obreak(p, uap, retval) + struct proc *p; + struct obreak_args *uap; + register_t *retval; +{ + /* Not implemented, obsolete */ + return (ENOMEM); +} + +int both; + +ovadvise() +{ + +#ifdef lint + both = 0; +#endif +} +/* END DEFUNCT */ +#if 1 +int print_map_addr=0; +#endif /* 1 */ + +/* CDY need to fix interface to allow user to map above 32 bits */ +kern_return_t map_fd( + int fd, + vm_offset_t offset, + vm_offset_t *va, + boolean_t findspace, + vm_size_t size) +{ + kern_return_t ret; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + ret = map_fd_funneled( fd, (vm_object_offset_t)offset, + va, findspace, size); + + (void) thread_funnel_set(kernel_flock, FALSE); + + return ret; +} + +kern_return_t map_fd_funneled( + int fd, + vm_object_offset_t offset, + vm_offset_t *va, + boolean_t findspace, + vm_size_t size) +{ + kern_return_t result; + struct file *fp; + struct vnode *vp; + void * pager; + vm_offset_t map_addr=0; + vm_size_t map_size; + vm_map_copy_t tmp; + int err=0; + vm_map_t my_map; + struct proc *p =(struct proc *)(get_bsdtask_info(current_task())); +#if 0 + extern int print_map_addr; +#endif /* 0 */ + + /* + * Find the inode; verify that it's a regular file. + */ + + err = fdgetf(p, fd, &fp); + if (err) + return(err); + + if (fp->f_type != DTYPE_VNODE) + return(KERN_INVALID_ARGUMENT); + vp = (struct vnode *)fp->f_data; + + if (vp->v_type != VREG) + return (KERN_INVALID_ARGUMENT); + + if (offset & PAGE_MASK_64) { + printf("map_fd: file offset not page aligned(%d : %s\)n",p->p_pid, p->p_comm); + return (KERN_INVALID_ARGUMENT); + } + map_size = round_page(size); + + /* + * Allow user to map in a zero length file. + */ + if (size == 0) + return (KERN_SUCCESS); + /* + * Map in the file. + */ + UBCINFOCHECK("map_fd_funneled", vp); + pager = (void *) ubc_getpager(vp); + if (pager == NULL) + return (KERN_FAILURE); + + + my_map = current_map(); + + result = vm_map_64( + my_map, + &map_addr, map_size, (vm_offset_t)0, TRUE, + pager, offset, TRUE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (result != KERN_SUCCESS) + return (result); + + + if (!findspace) { + vm_offset_t dst_addr; + vm_map_copy_t tmp; + + if (copyin(va, &dst_addr, sizeof (dst_addr)) || + trunc_page(dst_addr) != dst_addr) { + (void) vm_map_remove( + my_map, + map_addr, map_addr + map_size, + VM_MAP_NO_FLAGS); + return (KERN_INVALID_ADDRESS); + } + + result = vm_map_copyin( + my_map, + map_addr, map_size, TRUE, + &tmp); + if (result != KERN_SUCCESS) { + + (void) vm_map_remove( + my_map, + map_addr, map_addr + map_size, + VM_MAP_NO_FLAGS); + return (result); + } + + result = vm_map_copy_overwrite( + my_map, + dst_addr, tmp, FALSE); + if (result != KERN_SUCCESS) { + vm_map_copy_discard(tmp); + return (result); + } + } else { + if (copyout(&map_addr, va, sizeof (map_addr))) { + (void) vm_map_remove( + my_map, + map_addr, map_addr + map_size, + VM_MAP_NO_FLAGS); + return (KERN_INVALID_ADDRESS); + } + } + + ubc_setcred(vp, current_proc()); + ubc_map(vp); + + return (KERN_SUCCESS); +} diff --git a/bsd/kern/kern_newsysctl.c b/bsd/kern/kern_newsysctl.c new file mode 100644 index 000000000..0bba174a6 --- /dev/null +++ b/bsd/kern/kern_newsysctl.c @@ -0,0 +1,1159 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Mike Karels at Berkeley Software Design, Inc. + * + * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD + * project, to make these variables more userfriendly. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 + */ + + +#include +#include +#include +#include +#include +#include +#include + +/* +struct sysctl_oid_list sysctl__debug_children; +struct sysctl_oid_list sysctl__kern_children; +struct sysctl_oid_list sysctl__net_children; +struct sysctl_oid_list sysctl__sysctl_children; +*/ + +extern struct sysctl_oid *newsysctl_list[]; + + +static void +sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i); + + + +/* + * Locking and stats + */ +static struct sysctl_lock { + int sl_lock; + int sl_want; + int sl_locked; +} memlock; + +static int sysctl_root SYSCTL_HANDLER_ARGS; + +struct sysctl_oid_list sysctl__children; /* root list */ + +/* + * Initialization of the MIB tree. + * + * Order by number in each list. + */ + +void sysctl_register_oid(struct sysctl_oid *oidp) +{ + struct sysctl_oid_list *parent = oidp->oid_parent; + struct sysctl_oid *p; + struct sysctl_oid *q; + int n; + + /* + * If this oid has a number OID_AUTO, give it a number which + * is greater than any current oid. Make sure it is at least + * 100 to leave space for pre-assigned oid numbers. + */ +/* sysctl_sysctl_debug_dump_node(parent, 3); */ + if (oidp->oid_number == OID_AUTO) { + /* First, find the highest oid in the parent list >99 */ + n = 99; + SLIST_FOREACH(p, parent, oid_link) { + if (p->oid_number > n) + n = p->oid_number; + } + oidp->oid_number = n + 1; + } + + /* + * Insert the oid into the parent's list in order. + */ + q = NULL; + SLIST_FOREACH(p, parent, oid_link) { + if (oidp->oid_number < p->oid_number) + break; + q = p; + } + if (q) + SLIST_INSERT_AFTER(q, oidp, oid_link); + else + SLIST_INSERT_HEAD(parent, oidp, oid_link); +} + +void sysctl_unregister_oid(struct sysctl_oid *oidp) +{ + SLIST_REMOVE(oidp->oid_parent, oidp, sysctl_oid, oid_link); +} + +/* + * Bulk-register all the oids in a linker_set. + */ +void sysctl_register_set(struct linker_set *lsp) +{ + int count = lsp->ls_length; + int i; + for (i = 0; i < count; i++) + sysctl_register_oid((struct sysctl_oid *) lsp->ls_items[i]); +} + +void sysctl_unregister_set(struct linker_set *lsp) +{ + int count = lsp->ls_length; + int i; + for (i = 0; i < count; i++) + sysctl_unregister_oid((struct sysctl_oid *) lsp->ls_items[i]); +} + + +/* + * Register OID's from fixed list + */ + +void sysctl_register_fixed() +{ + int i = 0; + + + while (newsysctl_list[i]) { +/* printf("Registering %d\n", i); */ + sysctl_register_oid(newsysctl_list[i++]); + } +} + +/* + * Register the kernel's oids on startup. + */ +struct linker_set sysctl_set; + +void sysctl_register_all(void *arg) +{ + sysctl_register_set(&sysctl_set); +} + +SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_ANY, sysctl_register_all, 0); + +/* + * "Staff-functions" + * + * These functions implement a presently undocumented interface + * used by the sysctl program to walk the tree, and get the type + * so it can print the value. + * This interface is under work and consideration, and should probably + * be killed with a big axe by the first person who can find the time. + * (be aware though, that the proper interface isn't as obvious as it + * may seem, there are various conflicting requirements. + * + * {0,0} printf the entire MIB-tree. + * {0,1,...} return the name of the "..." OID. + * {0,2,...} return the next OID. + * {0,3} return the OID of the name in "new" + * {0,4,...} return the kind & format info for the "..." OID. + */ + +static void +sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i) +{ + int k; + struct sysctl_oid *oidp; + + SLIST_FOREACH(oidp, l, oid_link) { + + for (k=0; koid_number, oidp->oid_name); + + printf("%c%c", + oidp->oid_kind & CTLFLAG_RD ? 'R':' ', + oidp->oid_kind & CTLFLAG_WR ? 'W':' '); + + if (oidp->oid_handler) + printf(" *Handler"); + + switch (oidp->oid_kind & CTLTYPE) { + case CTLTYPE_NODE: + printf(" Node\n"); + if (!oidp->oid_handler) { + sysctl_sysctl_debug_dump_node( + oidp->oid_arg1, i+2); + } + break; + case CTLTYPE_INT: printf(" Int\n"); break; + case CTLTYPE_STRING: printf(" String\n"); break; + case CTLTYPE_QUAD: printf(" Quad\n"); break; + case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break; + default: printf("\n"); + } + + } +} + +static int +sysctl_sysctl_debug SYSCTL_HANDLER_ARGS +{ + sysctl_sysctl_debug_dump_node(&sysctl__children, 0); + return ENOENT; +} + +SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD, + 0, 0, sysctl_sysctl_debug, "-", ""); + +static int +sysctl_sysctl_name SYSCTL_HANDLER_ARGS +{ + int *name = (int *) arg1; + u_int namelen = arg2; + int error = 0; + struct sysctl_oid *oid; + struct sysctl_oid_list *lsp = &sysctl__children, *lsp2; + char buf[10]; + + while (namelen) { + if (!lsp) { + snprintf(buf,sizeof(buf),"%d",*name); + if (req->oldidx) + error = SYSCTL_OUT(req, ".", 1); + if (!error) + error = SYSCTL_OUT(req, buf, strlen(buf)); + if (error) + return (error); + namelen--; + name++; + continue; + } + lsp2 = 0; + SLIST_FOREACH(oid, lsp, oid_link) { + if (oid->oid_number != *name) + continue; + + if (req->oldidx) + error = SYSCTL_OUT(req, ".", 1); + if (!error) + error = SYSCTL_OUT(req, oid->oid_name, + strlen(oid->oid_name)); + if (error) + return (error); + + namelen--; + name++; + + if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE) + break; + + if (oid->oid_handler) + break; + + lsp2 = (struct sysctl_oid_list *)oid->oid_arg1; + break; + } + lsp = lsp2; + } + return (SYSCTL_OUT(req, "", 1)); +} + +SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD, sysctl_sysctl_name, ""); + +static int +sysctl_sysctl_next_ls (struct sysctl_oid_list *lsp, int *name, u_int namelen, + int *next, int *len, int level, struct sysctl_oid **oidpp) +{ + struct sysctl_oid *oidp; + + *len = level; + SLIST_FOREACH(oidp, lsp, oid_link) { + *next = oidp->oid_number; + *oidpp = oidp; + + if (!namelen) { + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + return 0; + if (oidp->oid_handler) + /* We really should call the handler here...*/ + return 0; + lsp = (struct sysctl_oid_list *)oidp->oid_arg1; + if (!sysctl_sysctl_next_ls (lsp, 0, 0, next+1, + len, level+1, oidpp)) + return 0; + goto next; + } + + if (oidp->oid_number < *name) + continue; + + if (oidp->oid_number > *name) { + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + return 0; + if (oidp->oid_handler) + return 0; + lsp = (struct sysctl_oid_list *)oidp->oid_arg1; + if (!sysctl_sysctl_next_ls (lsp, name+1, namelen-1, + next+1, len, level+1, oidpp)) + return (0); + goto next; + } + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + continue; + + if (oidp->oid_handler) + continue; + + lsp = (struct sysctl_oid_list *)oidp->oid_arg1; + if (!sysctl_sysctl_next_ls (lsp, name+1, namelen-1, next+1, + len, level+1, oidpp)) + return (0); + next: + namelen = 1; + *len = level; + } + return 1; +} + +static int +sysctl_sysctl_next SYSCTL_HANDLER_ARGS +{ + int *name = (int *) arg1; + u_int namelen = arg2; + int i, j, error; + struct sysctl_oid *oid; + struct sysctl_oid_list *lsp = &sysctl__children; + int newoid[CTL_MAXNAME]; + + i = sysctl_sysctl_next_ls (lsp, name, namelen, newoid, &j, 1, &oid); + if (i) + return ENOENT; + error = SYSCTL_OUT(req, newoid, j * sizeof (int)); + return (error); +} + +SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD, sysctl_sysctl_next, ""); + +static int +name2oid (char *name, int *oid, int *len, struct sysctl_oid **oidpp) +{ + int i; + struct sysctl_oid *oidp; + struct sysctl_oid_list *lsp = &sysctl__children; + char *p; + + if (!*name) + return ENOENT; + + p = name + strlen(name) - 1 ; + if (*p == '.') + *p = '\0'; + + *len = 0; + + for (p = name; *p && *p != '.'; p++) + ; + i = *p; + if (i == '.') + *p = '\0'; + + oidp = SLIST_FIRST(lsp); + + while (oidp && *len < CTL_MAXNAME) { + if (strcmp(name, oidp->oid_name)) { + oidp = SLIST_NEXT(oidp, oid_link); + continue; + } + *oid++ = oidp->oid_number; + (*len)++; + + if (!i) { + if (oidpp) + *oidpp = oidp; + return (0); + } + + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + break; + + if (oidp->oid_handler) + break; + + lsp = (struct sysctl_oid_list *)oidp->oid_arg1; + oidp = SLIST_FIRST(lsp); + name = p+1; + for (p = name; *p && *p != '.'; p++) + ; + i = *p; + if (i == '.') + *p = '\0'; + } + return ENOENT; +} + +static int +sysctl_sysctl_name2oid SYSCTL_HANDLER_ARGS +{ + char *p; + int error, oid[CTL_MAXNAME], len; + struct sysctl_oid *op = 0; + + if (!req->newlen) + return ENOENT; + if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */ + return (ENAMETOOLONG); + + p = _MALLOC(req->newlen+1, M_TEMP, M_WAITOK); + + if (!p) + return ENOMEM; + + error = SYSCTL_IN(req, p, req->newlen); + if (error) { + FREE(p, M_TEMP); + return (error); + } + + p [req->newlen] = '\0'; + + error = name2oid(p, oid, &len, &op); + + FREE(p, M_TEMP); + + if (error) + return (error); + + error = SYSCTL_OUT(req, oid, len * sizeof *oid); + return (error); +} + +SYSCTL_PROC(_sysctl, 3, name2oid, CTLFLAG_RW|CTLFLAG_ANYBODY, 0, 0, + sysctl_sysctl_name2oid, "I", ""); + +static int +sysctl_sysctl_oidfmt SYSCTL_HANDLER_ARGS +{ + int *name = (int *) arg1, error; + u_int namelen = arg2; + int indx; + struct sysctl_oid *oid; + struct sysctl_oid_list *lsp = &sysctl__children; + + oid = SLIST_FIRST(lsp); + + indx = 0; + while (oid && indx < CTL_MAXNAME) { + if (oid->oid_number == name[indx]) { + indx++; + if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { + if (oid->oid_handler) + goto found; + if (indx == namelen) + goto found; + lsp = (struct sysctl_oid_list *)oid->oid_arg1; + oid = SLIST_FIRST(lsp); + } else { + if (indx != namelen) + return EISDIR; + goto found; + } + } else { + oid = SLIST_NEXT(oid, oid_link); + } + } + return ENOENT; +found: + if (!oid->oid_fmt) + return ENOENT; + error = SYSCTL_OUT(req, + &oid->oid_kind, sizeof(oid->oid_kind)); + if (!error) + error = SYSCTL_OUT(req, oid->oid_fmt, + strlen(oid->oid_fmt)+1); + return (error); +} + + +SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD, sysctl_sysctl_oidfmt, ""); + +/* + * Default "handler" functions. + */ + +/* + * Handle an int, signed or unsigned. + * Two cases: + * a variable: point arg1 at it. + * a constant: pass it in arg2. + */ + +int +sysctl_handle_int SYSCTL_HANDLER_ARGS +{ + int error = 0; + + if (arg1) + error = SYSCTL_OUT(req, arg1, sizeof(int)); + else + error = SYSCTL_OUT(req, &arg2, sizeof(int)); + + if (error || !req->newptr) + return (error); + + if (!arg1) + error = EPERM; + else + error = SYSCTL_IN(req, arg1, sizeof(int)); + return (error); +} + +/* + * Handle a long, signed or unsigned. arg1 points to it. + */ + +int +sysctl_handle_long SYSCTL_HANDLER_ARGS +{ + int error = 0; + + if (!arg1) + return (EINVAL); + error = SYSCTL_OUT(req, arg1, sizeof(long)); + + if (error || !req->newptr) + return (error); + + error = SYSCTL_IN(req, arg1, sizeof(long)); + return (error); +} + +/* + * Handle our generic '\0' terminated 'C' string. + * Two cases: + * a variable string: point arg1 at it, arg2 is max length. + * a constant string: point arg1 at it, arg2 is zero. + */ + +int +sysctl_handle_string SYSCTL_HANDLER_ARGS +{ + int error=0; + + error = SYSCTL_OUT(req, arg1, strlen((char *)arg1)+1); + + if (error || !req->newptr) + return (error); + + if ((req->newlen - req->newidx) >= arg2) { + error = EINVAL; + } else { + arg2 = (req->newlen - req->newidx); + error = SYSCTL_IN(req, arg1, arg2); + ((char *)arg1)[arg2] = '\0'; + } + + return (error); +} + +/* + * Handle any kind of opaque data. + * arg1 points to it, arg2 is the size. + */ + +int +sysctl_handle_opaque SYSCTL_HANDLER_ARGS +{ + int error; + + error = SYSCTL_OUT(req, arg1, arg2); + + if (error || !req->newptr) + return (error); + + error = SYSCTL_IN(req, arg1, arg2); + + return (error); +} + +/* + * Transfer functions to/from kernel space. + * XXX: rather untested at this point + */ +static int +sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l) +{ + size_t i = 0; + int error = 0; + + if (req->oldptr) { + i = l; + if (i > req->oldlen - req->oldidx) + i = req->oldlen - req->oldidx; + if (i > 0) { + error = copyout(p, (char *)req->oldptr + req->oldidx, i); + if (error) + return error; + } + } + req->oldidx += l; + if (req->oldptr && i != l) + return (ENOMEM); + return (0); +} + +static int +sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l) +{ + if (!req->newptr) + return 0; + if (req->newlen - req->newidx < l) + return (EINVAL); + copyin((char *)req->newptr + req->newidx, p, l); + req->newidx += l; + return (0); +} + +int +kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen, size_t *retval) +{ + int error = 0; + struct sysctl_req req; + + bzero(&req, sizeof req); + + req.p = p; + + if (oldlenp) { + req.oldlen = *oldlenp; + } + + if (old) { + req.oldptr= old; + } + + if (newlen) { + req.newlen = newlen; + req.newptr = new; + } + + req.oldfunc = sysctl_old_kernel; + req.newfunc = sysctl_new_kernel; + req.lock = 1; + + /* XXX this should probably be done in a general way */ + while (memlock.sl_lock) { + memlock.sl_want = 1; + (void) tsleep((caddr_t)&memlock, PRIBIO+1, "sysctl", 0); + memlock.sl_locked++; + } + memlock.sl_lock = 1; + + error = sysctl_root(0, name, namelen, &req); + + if (req.lock == 2) + vsunlock(req.oldptr, req.oldlen, B_WRITE); + + memlock.sl_lock = 0; + + if (memlock.sl_want) { + memlock.sl_want = 0; + wakeup((caddr_t)&memlock); + } + + if (error && error != ENOMEM) + return (error); + + if (retval) { + if (req.oldptr && req.oldidx > req.oldlen) + *retval = req.oldlen; + else + *retval = req.oldidx; + } + return (error); +} + +/* + * Transfer function to/from user space. + */ +static int +sysctl_old_user(struct sysctl_req *req, const void *p, size_t l) +{ + int error = 0; + size_t i = 0; + + if (req->oldptr) { + if (req->oldlen - req->oldidx < l) + return (ENOMEM); + i = l; + if (i > req->oldlen - req->oldidx) + i = req->oldlen - req->oldidx; + if (i > 0) + error = copyout(p, (char *)req->oldptr + req->oldidx, + i); + } + req->oldidx += l; + if (error) + return (error); + if (req->oldptr && i < l) + return (ENOMEM); + return (0); +} + +static int +sysctl_new_user(struct sysctl_req *req, void *p, size_t l) +{ + int error; + + if (!req->newptr) + return 0; + if (req->newlen - req->newidx < l) + return (EINVAL); + error = copyin((char *)req->newptr + req->newidx, p, l); + req->newidx += l; + return (error); +} + +/* + * Traverse our tree, and find the right node, execute whatever it points + * at, and return the resulting error code. + */ + +int +sysctl_root SYSCTL_HANDLER_ARGS +{ + int *name = (int *) arg1; + u_int namelen = arg2; + int indx, i; + struct sysctl_oid *oid; + struct sysctl_oid_list *lsp = &sysctl__children; + int error; + + oid = SLIST_FIRST(lsp); + + indx = 0; + while (oid && indx < CTL_MAXNAME) { + if (oid->oid_number == name[indx]) { + indx++; + if (oid->oid_kind & CTLFLAG_NOLOCK) + req->lock = 0; + if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { + if (oid->oid_handler) + goto found; + if (indx == namelen) + return ENOENT; + lsp = (struct sysctl_oid_list *)oid->oid_arg1; + oid = SLIST_FIRST(lsp); + } else { + if (indx != namelen) + return EISDIR; + goto found; + } + } else { + oid = SLIST_NEXT(oid, oid_link); + } + } + return ENOENT; +found: + /* If writing isn't allowed */ + if (req->newptr && (!(oid->oid_kind & CTLFLAG_WR) || + ((oid->oid_kind & CTLFLAG_SECURE) && securelevel > 0))) { + return (EPERM); + } + + /* Most likely only root can write */ + if (!(oid->oid_kind & CTLFLAG_ANYBODY) && + req->newptr && req->p && + (error = suser(req->p->p_ucred, &req->p->p_acflag))) + return (error); + + if (!oid->oid_handler) { + return EINVAL; + } + + /* + * Switch to the NETWORK funnel for CTL_NET and KERN_IPC sysctls + */ + + if ((req->newptr) && ((name[0] == CTL_NET) || ((name[0] == CTL_KERN) && + (name[1] == KERN_IPC)))) + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { + i = (oid->oid_handler) (oid, + name + indx, namelen - indx, + req); + } else { + i = (oid->oid_handler) (oid, + oid->oid_arg1, oid->oid_arg2, + req); + } + + /* + * Switch back to the KERNEL funnel, if necessary + */ + + if ((req->newptr) && ((name[0] == CTL_NET) || ((name[0] == CTL_KERN) && + (name[1] == KERN_IPC)))) + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + return (i); +} + +#ifndef _SYS_SYSPROTO_H_ +struct sysctl_args { + int *name; + u_int namelen; + void *old; + size_t *oldlenp; + void *new; + size_t newlen; +}; +#endif + +int +/* __sysctl(struct proc *p, struct sysctl_args *uap) */ +new_sysctl(struct proc *p, struct sysctl_args *uap) +{ + int error, i, name[CTL_MAXNAME]; + size_t j; + + if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) + return (EINVAL); + + error = copyin(uap->name, &name, uap->namelen * sizeof(int)); + if (error) + return (error); + + error = userland_sysctl(p, name, uap->namelen, + uap->old, uap->oldlenp, 0, + uap->new, uap->newlen, &j); + if (error && error != ENOMEM) + return (error); + if (uap->oldlenp) { + i = copyout(&j, uap->oldlenp, sizeof(j)); + if (i) + return (i); + } + return (error); +} + +/* + * This is used from various compatibility syscalls too. That's why name + * must be in kernel space. + */ +int +userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval) +{ + int error = 0; + struct sysctl_req req, req2; + + bzero(&req, sizeof req); + + req.p = p; + + if (oldlenp) { + if (inkernel) { + req.oldlen = *oldlenp; + } else { + error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp)); + if (error) + return (error); + } + } + + if (old) { + req.oldptr= old; + } + + if (newlen) { + req.newlen = newlen; + req.newptr = new; + } + + req.oldfunc = sysctl_old_user; + req.newfunc = sysctl_new_user; + req.lock = 1; + + do { + req2 = req; + error = sysctl_root(0, name, namelen, &req2); + } while (error == EAGAIN); + + req = req2; + + if (error && error != ENOMEM) + return (error); + + if (retval) { + if (req.oldptr && req.oldidx > req.oldlen) + *retval = req.oldlen; + else + *retval = req.oldidx; + } + return (error); +} +#if 0 + +#if COMPAT_43 +#include +#include + +#define KINFO_PROC (0<<8) +#define KINFO_RT (1<<8) +#define KINFO_VNODE (2<<8) +#define KINFO_FILE (3<<8) +#define KINFO_METER (4<<8) +#define KINFO_LOADAVG (5<<8) +#define KINFO_CLOCKRATE (6<<8) + +/* Non-standard BSDI extension - only present on their 4.3 net-2 releases */ +#define KINFO_BSDI_SYSINFO (101<<8) + +/* + * XXX this is bloat, but I hope it's better here than on the potentially + * limited kernel stack... -Peter + */ + +static struct { + int bsdi_machine; /* "i386" on BSD/386 */ +/* ^^^ this is an offset to the string, relative to the struct start */ + char *pad0; + long pad1; + long pad2; + long pad3; + u_long pad4; + u_long pad5; + u_long pad6; + + int bsdi_ostype; /* "BSD/386" on BSD/386 */ + int bsdi_osrelease; /* "1.1" on BSD/386 */ + long pad7; + long pad8; + char *pad9; + + long pad10; + long pad11; + int pad12; + long pad13; + quad_t pad14; + long pad15; + + struct timeval pad16; + /* we dont set this, because BSDI's uname used gethostname() instead */ + int bsdi_hostname; /* hostname on BSD/386 */ + + /* the actual string data is appended here */ + +} bsdi_si; +/* + * this data is appended to the end of the bsdi_si structure during copyout. + * The "char *" offsets are relative to the base of the bsdi_si struct. + * This contains "FreeBSD\02.0-BUILT-nnnnnn\0i386\0", and these strings + * should not exceed the length of the buffer here... (or else!! :-) + */ +static char bsdi_strings[80]; /* It had better be less than this! */ + +#ifndef _SYS_SYSPROTO_H_ +struct getkerninfo_args { + int op; + char *where; + size_t *size; + int arg; +}; +#endif + +int +ogetkerninfo(struct proc *p, struct getkerninfo_args *uap) +{ + int error, name[6]; + size_t size; + + switch (uap->op & 0xff00) { + + case KINFO_RT: + name[0] = CTL_NET; + name[1] = PF_ROUTE; + name[2] = 0; + name[3] = (uap->op & 0xff0000) >> 16; + name[4] = uap->op & 0xff; + name[5] = uap->arg; + error = userland_sysctl(p, name, 6, uap->where, uap->size, + 0, 0, 0, &size); + break; + + case KINFO_VNODE: + name[0] = CTL_KERN; + name[1] = KERN_VNODE; + error = userland_sysctl(p, name, 2, uap->where, uap->size, + 0, 0, 0, &size); + break; + + case KINFO_PROC: + name[0] = CTL_KERN; + name[1] = KERN_PROC; + name[2] = uap->op & 0xff; + name[3] = uap->arg; + error = userland_sysctl(p, name, 4, uap->where, uap->size, + 0, 0, 0, &size); + break; + + case KINFO_FILE: + name[0] = CTL_KERN; + name[1] = KERN_FILE; + error = userland_sysctl(p, name, 2, uap->where, uap->size, + 0, 0, 0, &size); + break; + + case KINFO_METER: + name[0] = CTL_VM; + name[1] = VM_METER; + error = userland_sysctl(p, name, 2, uap->where, uap->size, + 0, 0, 0, &size); + break; + + case KINFO_LOADAVG: + name[0] = CTL_VM; + name[1] = VM_LOADAVG; + error = userland_sysctl(p, name, 2, uap->where, uap->size, + 0, 0, 0, &size); + break; + + case KINFO_CLOCKRATE: + name[0] = CTL_KERN; + name[1] = KERN_CLOCKRATE; + error = userland_sysctl(p, name, 2, uap->where, uap->size, + 0, 0, 0, &size); + break; + + case KINFO_BSDI_SYSINFO: { + /* + * this is pretty crude, but it's just enough for uname() + * from BSDI's 1.x libc to work. + * + * In particular, it doesn't return the same results when + * the supplied buffer is too small. BSDI's version apparently + * will return the amount copied, and set the *size to how + * much was needed. The emulation framework here isn't capable + * of that, so we just set both to the amount copied. + * BSDI's 2.x product apparently fails with ENOMEM in this + * scenario. + */ + + u_int needed; + u_int left; + char *s; + + bzero((char *)&bsdi_si, sizeof(bsdi_si)); + bzero(bsdi_strings, sizeof(bsdi_strings)); + + s = bsdi_strings; + + bsdi_si.bsdi_ostype = (s - bsdi_strings) + sizeof(bsdi_si); + strcpy(s, ostype); + s += strlen(s) + 1; + + bsdi_si.bsdi_osrelease = (s - bsdi_strings) + sizeof(bsdi_si); + strcpy(s, osrelease); + s += strlen(s) + 1; + + bsdi_si.bsdi_machine = (s - bsdi_strings) + sizeof(bsdi_si); + strcpy(s, machine); + s += strlen(s) + 1; + + needed = sizeof(bsdi_si) + (s - bsdi_strings); + + if (uap->where == NULL) { + /* process is asking how much buffer to supply.. */ + size = needed; + error = 0; + break; + } + + + /* if too much buffer supplied, trim it down */ + if (size > needed) + size = needed; + + /* how much of the buffer is remaining */ + left = size; + + if ((error = copyout((char *)&bsdi_si, uap->where, left)) != 0) + break; + + /* is there any point in continuing? */ + if (left > sizeof(bsdi_si)) { + left -= sizeof(bsdi_si); + error = copyout(&bsdi_strings, + uap->where + sizeof(bsdi_si), left); + } + break; + } + + default: + return (EOPNOTSUPP); + } + if (error) + return (error); + p->p_retval[0] = size; + if (uap->size) + error = copyout((caddr_t)&size, (caddr_t)uap->size, + sizeof(size)); + return (error); +} +#endif /* COMPAT_43 */ + +#endif diff --git a/bsd/kern/kern_pcsamples.c b/bsd/kern/kern_pcsamples.c new file mode 100644 index 000000000..68ba7cc97 --- /dev/null +++ b/bsd/kern/kern_pcsamples.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +unsigned int pc_buftomem = 0; +u_long * pc_buffer = 0; /* buffer that holds each pc */ +u_long * pc_bufptr = 0; +u_long * pc_buflast = 0; +unsigned int npcbufs = 8192; /* number of pc entries in buffer */ +unsigned int pc_bufsize = 0; +unsigned int pcsample_flags = 0; +unsigned int pcsample_enable = 0; + +char pcsample_comm[MAXCOMLEN + 1]; + +/* Set the default framework boundaries */ +u_long pcsample_beg = 0; +u_long pcsample_end = 0; + +static pid_t global_state_pid = -1; /* Used to control exclusive use of pc_buffer */ + +extern int pc_trace_buf[]; +extern int pc_trace_cnt; + +void +add_pcbuffer() +{ + int i; + u_long pc; + struct proc *curproc; + extern unsigned int kdebug_flags; + + if (!pcsample_enable) + return; + + if (pcsample_comm[0] != '\0') + { + /* If command string does not match, then return */ + curproc = current_proc(); + if (curproc && + (strncmp(curproc->p_comm, pcsample_comm, sizeof(pcsample_comm)))) + return; + } + + for (i=0; i < pc_trace_cnt; i++) + { + pc = pc_trace_buf[i]; + + if ((pcsample_beg <= pc) && (pc < pcsample_end)) + { + if (pc_bufptr > pc_buffer) + { + if ( (*(pc_bufptr-1)) == pc ) + continue; /* Ignore, probably spinning */ + } + + /* Then the sample is in our range */ + *pc_bufptr = (u_long)pc; + pc_bufptr++; + } + } + + /* We never wrap the buffer */ + if ((pc_bufptr + pc_trace_cnt) >= pc_buflast) + { + pcsample_enable = 0; + (void)clr_be_bit(); + wakeup(&pcsample_enable); + } + return; +} + +pcsamples_bootstrap() +{ + if (!clr_be_bit()) + return(ENOTSUP); + + pc_bufsize = npcbufs * sizeof(* pc_buffer); + if (kmem_alloc(kernel_map, &pc_buftomem, + (vm_size_t)pc_bufsize) == KERN_SUCCESS) + pc_buffer = (u_long *) pc_buftomem; + else + pc_buffer= (u_long *) 0; + + if (pc_buffer) { + pc_bufptr = pc_buffer; + pc_buflast = &pc_bufptr[npcbufs]; + pcsample_enable = 0; + return(0); + } else { + pc_bufsize=0; + return(EINVAL); + } + +} + +pcsamples_reinit() +{ +int x; +int ret=0; + + pcsample_enable = 0; + + if (pc_bufsize && pc_buffer) + kmem_free(kernel_map,pc_buffer,pc_bufsize); + + ret= pcsamples_bootstrap(); + return(ret); +} + +pcsamples_clear() +{ + /* Clean up the sample buffer, set defaults */ + global_state_pid = -1; + pcsample_enable = 0; + if(pc_bufsize && pc_buffer) + kmem_free(kernel_map,pc_buffer,pc_bufsize); + pc_buffer = (u_long *)0; + pc_bufptr = (u_long *)0; + pc_buflast = (u_long *)0; + pc_bufsize = 0; + pcsample_beg= 0; + pcsample_end= 0; + bzero((void *)pcsample_comm, sizeof(pcsample_comm)); + (void)clr_be_bit(); +} + +pcsamples_control(name, namelen, where, sizep) +int *name; +u_int namelen; +char *where; +size_t *sizep; +{ +int ret=0; +int size=*sizep; +unsigned int value = name[1]; +pcinfo_t pc_bufinfo; + +pid_t curpid; +struct proc *p, *curproc; + + if (name[0] != PCSAMPLE_GETNUMBUF) + { + if(curproc = current_proc()) + curpid = curproc->p_pid; + else + return (ESRCH); + + if (global_state_pid == -1) + global_state_pid = curpid; + else if (global_state_pid != curpid) + { + if((p = pfind(global_state_pid)) == NULL) + { + /* The global pid no longer exists */ + global_state_pid = curpid; + } + else + { + /* The global pid exists, deny this request */ + return(EBUSY); + } + } + } + + + switch(name[0]) { + case PCSAMPLE_DISABLE: /* used to disable */ + pcsample_enable=0; + break; + case PCSAMPLE_SETNUMBUF: + /* The buffer size is bounded by a min and max number of samples */ + if (value < pc_trace_cnt) { + ret=EINVAL; + break; + } + if (value <= MAX_PCSAMPLES) + /* npcbufs = value & ~(PC_TRACE_CNT-1); */ + npcbufs = value; + else + npcbufs = MAX_PCSAMPLES; + break; + case PCSAMPLE_GETNUMBUF: + if(size < sizeof(pcinfo_t)) { + ret=EINVAL; + break; + } + pc_bufinfo.npcbufs = npcbufs; + pc_bufinfo.bufsize = pc_bufsize; + pc_bufinfo.enable = pcsample_enable; + pc_bufinfo.pcsample_beg = pcsample_beg; + pc_bufinfo.pcsample_end = pcsample_end; + if(copyout (&pc_bufinfo, where, sizeof(pc_bufinfo))) + { + ret=EINVAL; + } + break; + case PCSAMPLE_SETUP: + ret=pcsamples_reinit(); + break; + case PCSAMPLE_REMOVE: + pcsamples_clear(); + break; + case PCSAMPLE_READBUF: + /* A nonzero value says enable and wait on the buffer */ + /* A zero value says read up the buffer immediately */ + if (value == 0) + { + /* Do not wait on the buffer */ + pcsample_enable = 0; + (void)clr_be_bit(); + ret = pcsamples_read(where, sizep); + break; + } + else if ((pc_bufsize <= 0) || (!pc_buffer)) + { + /* enable only if buffer is initialized */ + ret=EINVAL; + break; + } + + /* Turn on branch tracing */ + if (!set_be_bit()) + { + ret = ENOTSUP;; + break; + } + + /* Enable sampling */ + pcsample_enable = 1; + + ret = tsleep(&pcsample_enable, PRIBIO | PCATCH, "pcsample", 0); + pcsample_enable = 0; + (void)clr_be_bit(); + + if (ret) + { + /* Eventually fix this... if (ret != EINTR) */ + if (ret) + { + /* On errors, except EINTR, we want to cleanup buffer ptrs */ + /* pc_bufptr = pc_buffer; */ + *sizep = 0; + } + } + else + { + /* The only way to get here is if the buffer is full */ + ret = pcsamples_read(where, sizep); + } + + break; + case PCSAMPLE_SETREG: + if (size < sizeof(pcinfo_t)) + { + ret = EINVAL; + break; + } + if (copyin(where, &pc_bufinfo, sizeof(pcinfo_t))) + { + ret = EINVAL; + break; + } + + pcsample_beg = pc_bufinfo.pcsample_beg; + pcsample_end = pc_bufinfo.pcsample_end; + break; + case PCSAMPLE_COMM: + if (!(sizeof(pcsample_comm) > size)) + { + ret = EINVAL; + break; + } + bzero((void *)pcsample_comm, sizeof(pcsample_comm)); + if (copyin(where, pcsample_comm, size)) + { + ret = EINVAL; + } + break; + default: + ret= EOPNOTSUPP; + break; + } + return(ret); +} + + +/* + This buffer must be read up in one call. + If the buffer isn't big enough to hold + all the samples, it will copy up enough + to fill the buffer and throw the rest away. + This buffer never wraps. +*/ +pcsamples_read(u_long *buffer, size_t *number) +{ +int count=0; +int ret=0; +int copycount; + + count = (*number)/sizeof(u_long); + + if (count && pc_bufsize && pc_buffer) + { + copycount = pc_bufptr - pc_buffer; + + if (copycount <= 0) + { + *number = 0; + return(0); + } + + if (copycount > count) + copycount = count; + + /* We actually have data to send up */ + if(copyout(pc_buffer, buffer, copycount * sizeof(u_long))) + { + *number = 0; + return(EINVAL); + } + *number = copycount; + pc_bufptr = pc_buffer; + return(0); + } + else + { + *number = 0; + return(0); + } +} + + + + diff --git a/bsd/kern/kern_physio.c b/bsd/kern/kern_physio.c new file mode 100644 index 000000000..c4f2415f5 --- /dev/null +++ b/bsd/kern/kern_physio.c @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)kern_physio.c 8.1 (Berkeley) 6/10/93 + */ +/* + * HISTORY + * 27-July-97 Umesh Vaishampayan (umeshv@apple.com) + * Allow physio() to kernel space. + */ + +#include +#include +#include +#include +#include + +int +physio(strategy, bp, dev, flags, minphys, uio, blocksize) + void (*strategy)(); + struct buf *bp; + dev_t dev; + int flags; + u_int (*minphys)(); + struct uio *uio; + int blocksize; +{ + struct iovec *iovp; + struct proc *p = current_proc(); + int error, done, i, nobuf, s, todo; + + error = 0; + flags &= B_READ | B_WRITE; + + /* + * [check user read/write access to the data buffer] + * + * Check each iov one by one. Note that we know if we're reading or + * writing, so we ignore the uio's rw parameter. Also note that if + * we're doing a read, that's a *write* to user-space. + */ + for (i = 0; i < uio->uio_iovcnt; i++) { + if(uio->uio_segflg != UIO_SYSSPACE) { + if (!useracc(uio->uio_iov[i].iov_base, + uio->uio_iov[i].iov_len, + (flags == B_READ) ? B_WRITE : B_READ)) + return (EFAULT); + } + } + /* Make sure we have a buffer, creating one if necessary. */ + if (nobuf = (bp == NULL)) { +// bp = getphysbuf(); + panic("physio: null buf pointer\n"); + } + + /* [raise the processor priority level to splbio;] */ + s = splbio(); + + /* [while the buffer is marked busy] */ + while (bp->b_flags & B_BUSY) { + /* [mark the buffer wanted] */ + bp->b_flags |= B_WANTED; + /* [wait until the buffer is available] */ + tsleep((caddr_t)bp, PRIBIO+1, "physbuf", 0); + } + + /* Mark it busy, so nobody else will use it. */ + bp->b_flags |= B_BUSY; + + /* [lower the priority level] */ + splx(s); + + /* [set up the fixed part of the buffer for a transfer] */ + bp->b_dev = dev; + bp->b_error = 0; + bp->b_proc = p; + + /* + * [while there are data to transfer and no I/O error] + * Note that I/O errors are handled with a 'goto' at the bottom + * of the 'while' loop. + */ + for (i = 0; i < uio->uio_iovcnt; i++) { + iovp = &uio->uio_iov[i]; + while (iovp->iov_len > 0) { + /* + * [mark the buffer busy for physical I/O] + * (i.e. set B_PHYS (because it's an I/O to user + * memory, and B_RAW, because B_RAW is to be + * "Set by physio for raw transfers.", in addition + * to the "busy" and read/write flag.) + */ + s = splbio(); + bp->b_flags = B_BUSY | B_PHYS | B_RAW | flags; + splx(s); + + /* [set up the buffer for a maximum-sized transfer] */ + bp->b_blkno = uio->uio_offset / blocksize; + bp->b_bcount = iovp->iov_len; + bp->b_data = iovp->iov_base; + + /* + * [call minphys to bound the tranfer size] + * and remember the amount of data to transfer, + * for later comparison. + */ + (*minphys)(bp); + todo = bp->b_bcount; + + /* + * [lock the part of the user address space involved + * in the transfer] + * Beware vmapbuf(); it clobbers b_data and + * saves it in b_saveaddr. However, vunmapbuf() + * restores it. + */ + + if(uio->uio_segflg != UIO_SYSSPACE) + vslock(bp->b_data, todo); + +#if 0 + vmapbuf(bp, todo); +#endif /* 0 */ + /* [call strategy to start the transfer] */ + (*strategy)(bp); + + /* + * Note that the raise/wait/lower/get error + * steps below would be done by biowait(), but + * we want to unlock the address space before + * we lower the priority. + * + * [raise the priority level to splbio] + */ + s = splbio(); + + /* [wait for the transfer to complete] */ + while ((bp->b_flags & B_DONE) == 0) + tsleep((caddr_t) bp, PRIBIO + 1, "physio", 0); + + /* + * [unlock the part of the address space previously + * locked] + */ +#if 0 + vunmapbuf(bp, todo); +#endif /* 0 */ + if(uio->uio_segflg != UIO_SYSSPACE) + vsunlock(bp->b_data, todo); + + /* remember error value (save a splbio/splx pair) */ + if (bp->b_flags & B_ERROR) + error = (bp->b_error ? bp->b_error : EIO); + + /* [lower the priority level] */ + splx(s); + + /* + * [deduct the transfer size from the total number + * of data to transfer] + */ + done = bp->b_bcount - bp->b_resid; + iovp->iov_len -= done; + iovp->iov_base += done; + uio->uio_offset += done; + uio->uio_resid -= done; + + /* + * Now, check for an error. + * Also, handle weird end-of-disk semantics. + */ + if (error || done < todo) + goto done; + } + } + +done: + /* + * [clean up the state of the buffer] + * Remember if somebody wants it, so we can wake them up below. + * Also, if we had to steal it, give it back. + */ + s = splbio(); + bp->b_flags &= ~(B_BUSY | B_PHYS | B_RAW); +#if 0 + if (nobuf) + putphysbuf(bp); + + else +#endif /* 0 */ + { + /* + * [if another process is waiting for the raw I/O buffer, + * wake up processes waiting to do physical I/O; + */ + if (bp->b_flags & B_WANTED) { + bp->b_flags &= ~B_WANTED; + wakeup(bp); + } + } + splx(s); + + return (error); +} + +/* + * Leffler, et al., says on p. 231: + * "The minphys() routine is called by physio() to adjust the + * size of each I/O transfer before the latter is passed to + * the strategy routine..." + * + * so, just adjust the buffer's count accounting to MAXPHYS here, + * and return the new count; + */ +u_int +minphys(bp) + struct buf *bp; +{ + + bp->b_bcount = min(MAXPHYS, bp->b_bcount); + return bp->b_bcount; +} + +/* + * Do a read on a device for a user process. + */ +rawread(dev, uio) + dev_t dev; + struct uio *uio; +{ + return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, + dev, B_READ, minphys, uio, DEV_BSIZE)); +} + +/* + * Do a write on a device for a user process. + */ +rawwrite(dev, uio) + dev_t dev; + struct uio *uio; +{ + return (physio(cdevsw[major(dev)].d_strategy, (struct buf *)NULL, + dev, B_WRITE, minphys, uio, DEV_BSIZE)); +} diff --git a/bsd/kern/kern_proc.c b/bsd/kern/kern_proc.c new file mode 100644 index 000000000..e040cd76e --- /dev/null +++ b/bsd/kern/kern_proc.c @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_proc.c 8.4 (Berkeley) 1/4/94 + */ +/* HISTORY + * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com) + * Added current_proc_EXTERNAL() function for the use of kernel + * lodable modules. + * + * 05-Jun-95 Mac Gillon (mgillon) at NeXT + * New version based on 3.3NS and 4.4 + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Structure associated with user cacheing. + */ +struct uidinfo { + LIST_ENTRY(uidinfo) ui_hash; + uid_t ui_uid; + long ui_proccnt; +}; +#define UIHASH(uid) (&uihashtbl[(uid) & uihash]) +LIST_HEAD(uihashhead, uidinfo) *uihashtbl; +u_long uihash; /* size of hash table - 1 */ + +/* + * Other process lists + */ +struct pidhashhead *pidhashtbl; +u_long pidhash; +struct pgrphashhead *pgrphashtbl; +u_long pgrphash; +struct proclist allproc; +struct proclist zombproc; + +/* + * Initialize global process hashing structures. + */ +void +procinit() +{ + + LIST_INIT(&allproc); + LIST_INIT(&zombproc); + pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); + pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); + uihashtbl = hashinit(maxproc / 16, M_PROC, &uihash); +} + +/* + * Change the count associated with number of processes + * a given user is using. + */ +int +chgproccnt(uid, diff) + uid_t uid; + int diff; +{ + register struct uidinfo *uip; + register struct uihashhead *uipp; + + uipp = UIHASH(uid); + for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) + if (uip->ui_uid == uid) + break; + if (uip) { + uip->ui_proccnt += diff; + if (uip->ui_proccnt > 0) + return (uip->ui_proccnt); + if (uip->ui_proccnt < 0) + panic("chgproccnt: procs < 0"); + LIST_REMOVE(uip, ui_hash); + FREE_ZONE(uip, sizeof *uip, M_PROC); + return (0); + } + if (diff <= 0) { + if (diff == 0) + return(0); + panic("chgproccnt: lost user"); + } + MALLOC_ZONE(uip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK); + LIST_INSERT_HEAD(uipp, uip, ui_hash); + uip->ui_uid = uid; + uip->ui_proccnt = diff; + return (diff); +} + +/* + * Is p an inferior of the current process? + */ +int +inferior(p) + register struct proc *p; +{ + + for (; p != current_proc(); p = p->p_pptr) + if (p->p_pid == 0) + return (0); + return (1); +} + +/* + * Locate a process by number + */ +struct proc * +pfind(pid) + register pid_t pid; +{ + register struct proc *p; + + if (!pid) + return (kernproc); + + for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) + if (p->p_pid == pid) + return (p); + return (NULL); +} + +/* + * Locate a process group by number + */ +struct pgrp * +pgfind(pgid) + register pid_t pgid; +{ + register struct pgrp *pgrp; + + for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) + if (pgrp->pg_id == pgid) + return (pgrp); + return (NULL); +} + + +/* + * Move p to a new or existing process group (and session) + */ +int +enterpgrp(p, pgid, mksess) + register struct proc *p; + pid_t pgid; + int mksess; +{ + register struct pgrp *pgrp = pgfind(pgid); + +#if DIAGNOSTIC + if (pgrp != NULL && mksess) /* firewalls */ + panic("enterpgrp: setsid into non-empty pgrp"); + if (SESS_LEADER(p)) + panic("enterpgrp: session leader attempted setpgrp"); +#endif + if (pgrp == NULL) { + pid_t savepid = p->p_pid; + struct proc *np; + /* + * new process group + */ +#if DIAGNOSTIC + if (p->p_pid != pgid) + panic("enterpgrp: new pgrp and pid != pgid"); +#endif + MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP, + M_WAITOK); + if ((np = pfind(savepid)) == NULL || np != p) + return (ESRCH); + if (mksess) { + register struct session *sess; + + /* + * new session + */ + MALLOC_ZONE(sess, struct session *, + sizeof(struct session), M_SESSION, M_WAITOK); + sess->s_leader = p; + sess->s_count = 1; + sess->s_ttyvp = NULL; + sess->s_ttyp = NULL; + bcopy(p->p_session->s_login, sess->s_login, + sizeof(sess->s_login)); + p->p_flag &= ~P_CONTROLT; + pgrp->pg_session = sess; +#if DIAGNOSTIC + if (p != current_proc()) + panic("enterpgrp: mksession and p != curproc"); +#endif + } else { + pgrp->pg_session = p->p_session; + pgrp->pg_session->s_count++; + } + pgrp->pg_id = pgid; + LIST_INIT(&pgrp->pg_members); + LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); + pgrp->pg_jobc = 0; + } else if (pgrp == p->p_pgrp) + return (0); + + /* + * Adjust eligibility of affected pgrps to participate in job control. + * Increment eligibility counts before decrementing, otherwise we + * could reach 0 spuriously during the first call. + */ + fixjobc(p, pgrp, 1); + fixjobc(p, p->p_pgrp, 0); + + LIST_REMOVE(p, p_pglist); + if (p->p_pgrp->pg_members.lh_first == 0) + pgdelete(p->p_pgrp); + p->p_pgrp = pgrp; + LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); + return (0); +} + +/* + * remove process from process group + */ +int +leavepgrp(p) + register struct proc *p; +{ + + LIST_REMOVE(p, p_pglist); + if (p->p_pgrp->pg_members.lh_first == 0) + pgdelete(p->p_pgrp); + p->p_pgrp = 0; + return (0); +} + +/* + * delete a process group + */ +void +pgdelete(pgrp) + register struct pgrp *pgrp; +{ + + if (pgrp->pg_session->s_ttyp != NULL && + pgrp->pg_session->s_ttyp->t_pgrp == pgrp) + pgrp->pg_session->s_ttyp->t_pgrp = NULL; + LIST_REMOVE(pgrp, pg_hash); + if (--pgrp->pg_session->s_count == 0) + FREE_ZONE(pgrp->pg_session, sizeof(struct session), M_SESSION); + FREE_ZONE(pgrp, sizeof *pgrp, M_PGRP); +} + +void +sessrele(sess) + struct session *sess; +{ + if (--sess->s_count == 0) + FREE_ZONE(sess, sizeof (struct session), M_SESSION); +} + +static void orphanpg(); + +/* + * Adjust pgrp jobc counters when specified process changes process group. + * We count the number of processes in each process group that "qualify" + * the group for terminal job control (those with a parent in a different + * process group of the same session). If that count reaches zero, the + * process group becomes orphaned. Check both the specified process' + * process group and that of its children. + * entering == 0 => p is leaving specified group. + * entering == 1 => p is entering specified group. + */ +void +fixjobc(p, pgrp, entering) + register struct proc *p; + register struct pgrp *pgrp; + int entering; +{ + register struct pgrp *hispgrp; + register struct session *mysession = pgrp->pg_session; + + /* + * Check p's parent to see whether p qualifies its own process + * group; if so, adjust count for p's process group. + */ + if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && + hispgrp->pg_session == mysession) + if (entering) + pgrp->pg_jobc++; + else if (--pgrp->pg_jobc == 0) + orphanpg(pgrp); + + /* + * Check this process' children to see whether they qualify + * their process groups; if so, adjust counts for children's + * process groups. + */ + for (p = p->p_children.lh_first; p != 0; p = p->p_sibling.le_next) + if ((hispgrp = p->p_pgrp) != pgrp && + hispgrp->pg_session == mysession && + p->p_stat != SZOMB) + if (entering) + hispgrp->pg_jobc++; + else if (--hispgrp->pg_jobc == 0) + orphanpg(hispgrp); +} + +/* + * A process group has become orphaned; + * if there are any stopped processes in the group, + * hang-up all process in that group. + */ +static void +orphanpg(pg) + struct pgrp *pg; +{ + register struct proc *p; + + for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) { + if (p->p_stat == SSTOP) { + for (p = pg->pg_members.lh_first; p != 0; + p = p->p_pglist.le_next) { + pt_setrunnable(p); + psignal(p, SIGHUP); + psignal(p, SIGCONT); + } + return; + } + } +} + +#ifdef DEBUG +void +pgrpdump() +{ + register struct pgrp *pgrp; + register struct proc *p; + register i; + + for (i = 0; i <= pgrphash; i++) { + if (pgrp = pgrphashtbl[i].lh_first) { + printf("\tindx %d\n", i); + for (; pgrp != 0; pgrp = pgrp->pg_hash.le_next) { + printf("\tpgrp 0x%08x, pgid %d, sess %p, sesscnt %d, mem %p\n", + pgrp, pgrp->pg_id, pgrp->pg_session, + pgrp->pg_session->s_count, + pgrp->pg_members.lh_first); + for (p = pgrp->pg_members.lh_first; p != 0; + p = p->p_pglist.le_next) { + printf("\t\tpid %d addr 0x%08x pgrp 0x%08x\n", + p->p_pid, p, p->p_pgrp); + } + } + } + } +} +#endif /* DEBUG */ + +struct proc * current_proc_EXTERNAL() +{ + return (current_proc()); +} diff --git a/bsd/kern/kern_prot.c b/bsd/kern/kern_prot.c new file mode 100644 index 000000000..579c321f6 --- /dev/null +++ b/bsd/kern/kern_prot.c @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_prot.c 8.9 (Berkeley) 2/14/95 + */ + +/* + * System calls related to processes and protection + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * setprivexec: (dis)allow this process to hold + * task, thread, or execption ports of processes about to exec. + */ +struct setprivexec_args { + int flag; +}; +int +setprivexec(p, uap, retval) + struct proc *p; + register struct setprivexec_args *uap; + register_t *retval; +{ + *retval = p->p_debugger; + p->p_debugger = (uap->flag != 0); + return(0); +} + +/* ARGSUSED */ +getpid(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_pid; +#if COMPAT_43 + retval[1] = p->p_pptr->p_pid; +#endif + return (0); +} + +/* ARGSUSED */ +getppid(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_pptr->p_pid; + return (0); +} + +/* Get process group ID; note that POSIX getpgrp takes no parameter */ +getpgrp(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_pgrp->pg_id; + return (0); +} + +/* ARGSUSED */ +getuid(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_cred->p_ruid; +#if COMPAT_43 + retval[1] = p->p_ucred->cr_uid; +#endif + return (0); +} + +/* ARGSUSED */ +geteuid(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_ucred->cr_uid; + return (0); +} + +/* ARGSUSED */ +getgid(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_cred->p_rgid; +#if COMPAT_43 + retval[1] = p->p_ucred->cr_groups[0]; +#endif + return (0); +} + +/* + * Get effective group ID. The "egid" is groups[0], and could be obtained + * via getgroups. This syscall exists because it is somewhat painful to do + * correctly in a library function. + */ +/* ARGSUSED */ +getegid(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_ucred->cr_groups[0]; + return (0); +} + +struct getgroups_args { + u_int gidsetsize; + gid_t *gidset; +}; +getgroups(p, uap, retval) + struct proc *p; + register struct getgroups_args *uap; + register_t *retval; +{ + register struct pcred *pc = p->p_cred; + register u_int ngrp; + int error; + + if ((ngrp = uap->gidsetsize) == 0) { + *retval = pc->pc_ucred->cr_ngroups; + return (0); + } + if (ngrp < pc->pc_ucred->cr_ngroups) + return (EINVAL); + pcred_readlock(p); + ngrp = pc->pc_ucred->cr_ngroups; + if (error = copyout((caddr_t)pc->pc_ucred->cr_groups, + (caddr_t)uap->gidset, ngrp * sizeof(gid_t))) { + pcred_unlock(p); + return (error); + } + pcred_unlock(p); + *retval = ngrp; + return (0); +} + +/* ARGSUSED */ +setsid(p, uap, retval) + register struct proc *p; + void *uap; + register_t *retval; +{ + + if (p->p_pgid == p->p_pid || pgfind(p->p_pid)) { + return (EPERM); + } else { + (void)enterpgrp(p, p->p_pid, 1); + *retval = p->p_pid; + return (0); + } +} + +/* + * set process group (setpgid/old setpgrp) + * + * caller does setpgid(targpid, targpgid) + * + * pid must be caller or child of caller (ESRCH) + * if a child + * pid must be in same session (EPERM) + * pid can't have done an exec (EACCES) + * if pgid != pid + * there must exist some pid in same session having pgid (EPERM) + * pid must not be session leader (EPERM) + */ +struct setpgid_args { + int pid; + int pgid; +}; +/* ARGSUSED */ +setpgid(curp, uap, retval) + struct proc *curp; + register struct setpgid_args *uap; + register_t *retval; +{ + register struct proc *targp; /* target process */ + register struct pgrp *pgrp; /* target pgrp */ + + if (uap->pid != 0 && uap->pid != curp->p_pid) { + if ((targp = pfind(uap->pid)) == 0 || !inferior(targp)) + return (ESRCH); + if (targp->p_session != curp->p_session) + return (EPERM); + if (targp->p_flag & P_EXEC) + return (EACCES); + } else + targp = curp; + if (SESS_LEADER(targp)) + return (EPERM); + if (uap->pgid == 0) + uap->pgid = targp->p_pid; + else if (uap->pgid != targp->p_pid) + if ((pgrp = pgfind(uap->pgid)) == 0 || + pgrp->pg_session != curp->p_session) + return (EPERM); + return (enterpgrp(targp, uap->pgid, 0)); +} + +struct setuid_args { + uid_t uid; +}; +/* ARGSUSED */ +setuid(p, uap, retval) + struct proc *p; + struct setuid_args *uap; + register_t *retval; +{ + register struct pcred *pc = p->p_cred; + register uid_t uid; + int error; + + uid = uap->uid; + if (uid != pc->p_ruid && + (error = suser(pc->pc_ucred, &p->p_acflag))) + return (error); + /* + * Everything's okay, do it. + * Transfer proc count to new user. + * Copy credentials so other references do not see our changes. + */ + pcred_writelock(p); + (void)chgproccnt(pc->p_ruid, -1); + (void)chgproccnt(uid, 1); + pc->pc_ucred = crcopy(pc->pc_ucred); + pc->pc_ucred->cr_uid = uid; + pc->p_ruid = uid; + pc->p_svuid = uid; + pcred_unlock(p); + set_security_token(p); + p->p_flag |= P_SUGID; + return (0); +} + +struct seteuid_args { + uid_t euid; +}; +/* ARGSUSED */ +seteuid(p, uap, retval) + struct proc *p; + struct seteuid_args *uap; + register_t *retval; +{ + register struct pcred *pc = p->p_cred; + register uid_t euid; + int error; + + euid = uap->euid; + if (euid != pc->p_ruid && euid != pc->p_svuid && + (error = suser(pc->pc_ucred, &p->p_acflag))) + return (error); + /* + * Everything's okay, do it. Copy credentials so other references do + * not see our changes. + */ + pcred_writelock(p); + pc->pc_ucred = crcopy(pc->pc_ucred); + pc->pc_ucred->cr_uid = euid; + pcred_unlock(p); + set_security_token(p); + p->p_flag |= P_SUGID; + return (0); +} + +struct setgid_args { + gid_t gid; +}; +/* ARGSUSED */ +setgid(p, uap, retval) + struct proc *p; + struct setgid_args *uap; + register_t *retval; +{ + register struct pcred *pc = p->p_cred; + register gid_t gid; + int error; + + gid = uap->gid; + if (gid != pc->p_rgid && (error = suser(pc->pc_ucred, &p->p_acflag))) + return (error); + pcred_writelock(p); + pc->pc_ucred = crcopy(pc->pc_ucred); + pc->pc_ucred->cr_groups[0] = gid; + pc->p_rgid = gid; + pc->p_svgid = gid; /* ??? */ + pcred_unlock(p); + set_security_token(p); + p->p_flag |= P_SUGID; + return (0); +} + +struct setegid_args { + gid_t egid; +}; +/* ARGSUSED */ +setegid(p, uap, retval) + struct proc *p; + struct setegid_args *uap; + register_t *retval; +{ + register struct pcred *pc = p->p_cred; + register gid_t egid; + int error; + + egid = uap->egid; + if (egid != pc->p_rgid && egid != pc->p_svgid && + (error = suser(pc->pc_ucred, &p->p_acflag))) + return (error); + pcred_writelock(p); + pc->pc_ucred = crcopy(pc->pc_ucred); + pc->pc_ucred->cr_groups[0] = egid; + pcred_unlock(p); + set_security_token(p); + p->p_flag |= P_SUGID; + return (0); +} + +struct setgroups_args{ + u_int gidsetsize; + gid_t *gidset; +}; + +/* ARGSUSED */ +setgroups(p, uap, retval) + struct proc *p; + struct setgroups_args *uap; + register_t *retval; +{ + register struct pcred *pc = p->p_cred; + struct ucred *new, *old; + register u_int ngrp; + int error; + + if (error = suser(pc->pc_ucred, &p->p_acflag)) + return (error); + ngrp = uap->gidsetsize; + if (ngrp < 1 || ngrp > NGROUPS) + return (EINVAL); + new = crget(); + error = copyin((caddr_t)uap->gidset, + (caddr_t)new->cr_groups, ngrp * sizeof(gid_t)); + if (error) { + crfree(new); + return (error); + } + new->cr_ngroups = ngrp; + pcred_writelock(p); + old = pc->pc_ucred; + new->cr_uid = old->cr_uid; + pc->pc_ucred = new; + pcred_unlock(p); + set_security_token(p); + p->p_flag |= P_SUGID; + if (old != NOCRED) + crfree(old); + return (0); +} + +#if COMPAT_43 +struct osetreuid_args{ + int ruid; + int euid; +}; +/* ARGSUSED */ +osetreuid(p, uap, retval) + register struct proc *p; + struct osetreuid_args *uap; + register_t *retval; +{ + struct seteuid_args seuidargs; + struct setuid_args suidargs; + + /* + * There are five cases, and we attempt to emulate them in + * the following fashion: + * -1, -1: return 0. This is correct emulation. + * -1, N: call seteuid(N). This is correct emulation. + * N, -1: if we called setuid(N), our euid would be changed + * to N as well. the theory is that we don't want to + * revoke root access yet, so we call seteuid(N) + * instead. This is incorrect emulation, but often + * suffices enough for binary compatibility. + * N, N: call setuid(N). This is correct emulation. + * N, M: call setuid(N). This is close to correct emulation. + */ + if (uap->ruid == (uid_t)-1) { + if (uap->euid == (uid_t)-1) + return (0); /* -1, -1 */ + seuidargs.euid = uap->euid; /* -1, N */ + return (seteuid(p, &seuidargs, retval)); + } + if (uap->euid == (uid_t)-1) { + seuidargs.euid = uap->ruid; /* N, -1 */ + return (seteuid(p, &seuidargs, retval)); + } + suidargs.uid = uap->ruid; /* N, N and N, M */ + return (setuid(p, &suidargs, retval)); +} + +struct osetregid_args { + int rgid; + int egid; +}; +/* ARGSUSED */ +osetregid(p, uap, retval) + register struct proc *p; + struct osetregid_args *uap; + register_t *retval; +{ + struct setegid_args segidargs; + struct setgid_args sgidargs; + + /* + * There are five cases, described above in osetreuid() + */ + if (uap->rgid == (gid_t)-1) { + if (uap->egid == (gid_t)-1) + return (0); /* -1, -1 */ + segidargs.egid = uap->egid; /* -1, N */ + return (setegid(p, &segidargs, retval)); + } + if (uap->egid == (gid_t)-1) { + segidargs.egid = uap->rgid; /* N, -1 */ + return (setegid(p, &segidargs, retval)); + } + sgidargs.gid = uap->rgid; /* N, N and N, M */ + return (setgid(p, &sgidargs, retval)); +} +#endif /* COMPAT_43 */ + +/* + * Check if gid is a member of the group set. + */ +groupmember(gid, cred) + gid_t gid; + register struct ucred *cred; +{ + register gid_t *gp; + gid_t *egp; + + egp = &(cred->cr_groups[cred->cr_ngroups]); + for (gp = cred->cr_groups; gp < egp; gp++) + if (*gp == gid) + return (1); + return (0); +} + +/* + * Test whether the specified credentials imply "super-user" + * privilege; if so, and we have accounting info, set the flag + * indicating use of super-powers. + * Returns 0 or error. + */ +suser(cred, acflag) + struct ucred *cred; + u_short *acflag; +{ +#if DIAGNOSTIC + if (cred == NOCRED || cred == FSCRED) + panic("suser"); +#endif + if (cred->cr_uid == 0) { + if (acflag) + *acflag |= ASU; + return (0); + } + return (EPERM); +} + +int +is_suser(void) +{ + struct proc *p = current_proc(); + + if (!p) + return (0); + + return (suser(p->p_ucred, &p->p_acflag) == 0); +} + +int +is_suser1(void) +{ + struct proc *p = current_proc(); + + if (!p) + return (0); + + return (suser(p->p_ucred, &p->p_acflag) == 0 || + p->p_cred->p_ruid == 0 || p->p_cred->p_svuid == 0); +} + +/* + * Allocate a zeroed cred structure. + */ +struct ucred * +crget() +{ + register struct ucred *cr; + + MALLOC_ZONE(cr, struct ucred *, sizeof(*cr), M_CRED, M_WAITOK); + bzero((caddr_t)cr, sizeof(*cr)); + cr->cr_ref = 1; + return (cr); +} + +/* + * Free a cred structure. + * Throws away space when ref count gets to 0. + */ +void +crfree(cr) + struct ucred *cr; +{ +#if DIAGNOSTIC + if (cr == NOCRED || cr == FSCRED) + panic("crfree"); +#endif + if (--cr->cr_ref == 0) + FREE_ZONE((caddr_t)cr, sizeof *cr, M_CRED); +} + +/* + * Copy cred structure to a new one and free the old one. + */ +struct ucred * +crcopy(cr) + struct ucred *cr; +{ + struct ucred *newcr; + +#if DIAGNOSTIC + if (cr == NOCRED || cr == FSCRED) + panic("crcopy"); +#endif + if (cr->cr_ref == 1) + return (cr); + newcr = crget(); + *newcr = *cr; + crfree(cr); + newcr->cr_ref = 1; + return (newcr); +} + +/* + * Dup cred struct to a new held one. + */ +struct ucred * +crdup(cr) + struct ucred *cr; +{ + struct ucred *newcr; + +#if DIAGNOSTIC + if (cr == NOCRED || cr == FSCRED) + panic("crdup"); +#endif + newcr = crget(); + *newcr = *cr; + newcr->cr_ref = 1; + return (newcr); +} + +/* + * Get login name, if available. + */ +struct getlogin_args { + char *namebuf; + u_int namelen; +}; +/* ARGSUSED */ +getlogin(p, uap, retval) + struct proc *p; + struct getlogin_args *uap; + register_t *retval; +{ + + if (uap->namelen > sizeof (p->p_pgrp->pg_session->s_login)) + uap->namelen = sizeof (p->p_pgrp->pg_session->s_login); + return (copyout((caddr_t) p->p_pgrp->pg_session->s_login, + (caddr_t)uap->namebuf, uap->namelen)); +} + +/* + * Set login name. + */ +struct setlogin_args { + char *namebuf; +}; +/* ARGSUSED */ +setlogin(p, uap, retval) + struct proc *p; + struct setlogin_args *uap; + register_t *retval; +{ + int error; + int dummy=0; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + + error = copyinstr((caddr_t) uap->namebuf, + (caddr_t) p->p_pgrp->pg_session->s_login, + sizeof (p->p_pgrp->pg_session->s_login) - 1, (size_t *)&dummy); + if (error == ENAMETOOLONG) + error = EINVAL; + return (error); +} + + +/* Set the secrity token of the task with current euid and eguid */ +void +set_security_token(struct proc * p) +{ + security_token_t sec_token; + + sec_token.val[0] = p->p_ucred->cr_uid; + sec_token.val[1] = p->p_ucred->cr_gid; + (void)host_security_set_task_token(host_security_self(), + p->task, + sec_token, + (sec_token.val[0]) ? + HOST_PRIV_NULL : + host_priv_self()); +} diff --git a/bsd/kern/kern_resource.c b/bsd/kern/kern_resource.c new file mode 100644 index 000000000..5a41676e2 --- /dev/null +++ b/bsd/kern/kern_resource.c @@ -0,0 +1,543 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +#include + +int donice __P((struct proc *curp, struct proc *chgp, int n)); +int dosetrlimit __P((struct proc *p, u_int which, struct rlimit *limp)); + +rlim_t maxdmap = MAXDSIZ; /* XXX */ +rlim_t maxsmap = MAXSSIZ; /* XXX */ + +/* + * Resource controls and accounting. + */ +struct getpriority_args { + int which; + int who; +}; +int +getpriority(curp, uap, retval) + struct proc *curp; + register struct getpriority_args *uap; + register_t *retval; +{ + register struct proc *p; + register int low = PRIO_MAX + 1; + + switch (uap->which) { + + case PRIO_PROCESS: + if (uap->who == 0) + p = curp; + else + p = pfind(uap->who); + if (p == 0) + break; + low = p->p_nice; + break; + + case PRIO_PGRP: { + register struct pgrp *pg; + + if (uap->who == 0) + pg = curp->p_pgrp; + else if ((pg = pgfind(uap->who)) == NULL) + break; + for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) { + if (p->p_nice < low) + low = p->p_nice; + } + break; + } + + case PRIO_USER: + if (uap->who == 0) + uap->who = curp->p_ucred->cr_uid; + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) + if (p->p_ucred->cr_uid == uap->who && + p->p_nice < low) + low = p->p_nice; + break; + + default: + return (EINVAL); + } + if (low == PRIO_MAX + 1) + return (ESRCH); + *retval = low; + return (0); +} + +struct setpriority_args { + int which; + int who; + int prio; +}; +/* ARGSUSED */ +int +setpriority(curp, uap, retval) + struct proc *curp; + register struct setpriority_args *uap; + register_t *retval; +{ + register struct proc *p; + int found = 0, error = 0; + + switch (uap->which) { + + case PRIO_PROCESS: + if (uap->who == 0) + p = curp; + else + p = pfind(uap->who); + if (p == 0) + break; + error = donice(curp, p, uap->prio); + found++; + break; + + case PRIO_PGRP: { + register struct pgrp *pg; + + if (uap->who == 0) + pg = curp->p_pgrp; + else if ((pg = pgfind(uap->who)) == NULL) + break; + for (p = pg->pg_members.lh_first; p != 0; + p = p->p_pglist.le_next) { + error = donice(curp, p, uap->prio); + found++; + } + break; + } + + case PRIO_USER: + if (uap->who == 0) + uap->who = curp->p_ucred->cr_uid; + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) + if (p->p_ucred->cr_uid == uap->who) { + error = donice(curp, p, uap->prio); + found++; + } + break; + + default: + return (EINVAL); + } + if (found == 0) + return (ESRCH); + return (error); +} + +int +donice(curp, chgp, n) + register struct proc *curp, *chgp; + register int n; +{ + register struct pcred *pcred = curp->p_cred; + + if (pcred->pc_ucred->cr_uid && pcred->p_ruid && + pcred->pc_ucred->cr_uid != chgp->p_ucred->cr_uid && + pcred->p_ruid != chgp->p_ucred->cr_uid) + return (EPERM); + if (n > PRIO_MAX) + n = PRIO_MAX; + if (n < PRIO_MIN) + n = PRIO_MIN; + if (n < chgp->p_nice && suser(pcred->pc_ucred, &curp->p_acflag)) + return (EACCES); + chgp->p_nice = n; + (void)resetpriority(chgp); + return (0); +} + +#if COMPAT_43 +struct osetrlimit_args { + u_int which; + struct ogetrlimit * rlp; +}; +/* ARGSUSED */ +int +osetrlimit(p, uap, retval) + struct proc *p; + struct osetrlimit_args *uap; + register_t *retval; +{ + struct orlimit olim; + struct rlimit lim; + int error; + + if (error = copyin((caddr_t)uap->rlp, (caddr_t)&olim, + sizeof (struct orlimit))) + return (error); + lim.rlim_cur = olim.rlim_cur; + lim.rlim_max = olim.rlim_max; + return (dosetrlimit(p, uap->which, &lim)); +} + +struct ogetrlimit_args { + u_int which; + struct ogetrlimit * rlp; +}; +/* ARGSUSED */ +int +ogetrlimit(p, uap, retval) + struct proc *p; + struct ogetrlimit_args *uap; + register_t *retval; +{ + struct orlimit olim; + + if (uap->which >= RLIM_NLIMITS) + return (EINVAL); + olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur; + if (olim.rlim_cur == -1) + olim.rlim_cur = 0x7fffffff; + olim.rlim_max = p->p_rlimit[uap->which].rlim_max; + if (olim.rlim_max == -1) + olim.rlim_max = 0x7fffffff; + return (copyout((caddr_t)&olim, (caddr_t)uap->rlp, + sizeof(olim))); +} +#endif /* COMPAT_43 */ + +struct setrlimit_args { + u_int which; + struct rlimit * rlp; +}; +/* ARGSUSED */ +int +setrlimit(p, uap, retval) + struct proc *p; + register struct setrlimit_args *uap; + register_t *retval; +{ + struct rlimit alim; + int error; + + if (error = copyin((caddr_t)uap->rlp, (caddr_t)&alim, + sizeof (struct rlimit))) + return (error); + return (dosetrlimit(p, uap->which, &alim)); +} + +int +dosetrlimit(p, which, limp) + struct proc *p; + u_int which; + struct rlimit *limp; +{ + register struct rlimit *alimp; + extern rlim_t maxdmap, maxsmap; + int error; + + if (which >= RLIM_NLIMITS) + return (EINVAL); + alimp = &p->p_rlimit[which]; + if (limp->rlim_cur > alimp->rlim_max || + limp->rlim_max > alimp->rlim_max) + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + if (limp->rlim_cur > limp->rlim_max) + limp->rlim_cur = limp->rlim_max; + if (p->p_limit->p_refcnt > 1 && + (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { + p->p_limit->p_refcnt--; + p->p_limit = limcopy(p->p_limit); + alimp = &p->p_rlimit[which]; + } + + switch (which) { + + case RLIMIT_DATA: + if (limp->rlim_cur > maxdmap) + limp->rlim_cur = maxdmap; + if (limp->rlim_max > maxdmap) + limp->rlim_max = maxdmap; + break; + + case RLIMIT_STACK: + if (limp->rlim_cur > maxsmap) + limp->rlim_cur = maxsmap; + if (limp->rlim_max > maxsmap) + limp->rlim_max = maxsmap; + /* + * Stack is allocated to the max at exec time with only + * "rlim_cur" bytes accessible. If stack limit is going + * up make more accessible, if going down make inaccessible. + */ + if (limp->rlim_cur != alimp->rlim_cur) { + vm_offset_t addr; + vm_size_t size; + vm_prot_t prot; + + if (limp->rlim_cur > alimp->rlim_cur) { + /* grow stack */ + size = round_page(limp->rlim_cur); + size -= round_page(alimp->rlim_cur); + +#if STACK_GROWTH_UP + /* go to top of current stack */ + addr = trunc_page(p->user_stack + alimp->rlim_cur); +#else STACK_GROWTH_UP + addr = trunc_page(p->user_stack - alimp->rlim_cur); + addr -= size; +#endif /* STACK_GROWTH_UP */ + if (vm_allocate(current_map(), + &addr, size, FALSE) != KERN_SUCCESS) + return(EINVAL); + } else { + /* shrink stack */ + } + } + break; + + case RLIMIT_NOFILE: + /* + * Only root can get the maxfiles limits, as it is systemwide resource + */ + if (is_suser()) { + if (limp->rlim_cur > maxfiles) + limp->rlim_cur = maxfiles; + if (limp->rlim_max > maxfiles) + limp->rlim_max = maxfiles; + } else { + if (limp->rlim_cur > OPEN_MAX) + limp->rlim_cur = OPEN_MAX; + if (limp->rlim_max > OPEN_MAX) + limp->rlim_max = OPEN_MAX; + } + break; + + case RLIMIT_NPROC: + /* + * Only root can get the maxproc limits, as it is systemwide resource + */ + if (is_suser()) { + if (limp->rlim_cur > maxproc) + limp->rlim_cur = maxproc; + if (limp->rlim_max > maxproc) + limp->rlim_max = maxproc; + } else { + if (limp->rlim_cur > CHILD_MAX) + limp->rlim_cur = CHILD_MAX; + if (limp->rlim_max > CHILD_MAX) + limp->rlim_max = CHILD_MAX; + } + break; + } + *alimp = *limp; + return (0); +} + +struct getrlimit_args { + u_int which; + struct rlimit * rlp; +}; +/* ARGSUSED */ +int +getrlimit(p, uap, retval) + struct proc *p; + register struct getrlimit_args *uap; + register_t *retval; +{ + + if (uap->which >= RLIM_NLIMITS) + return (EINVAL); + return (copyout((caddr_t)&p->p_rlimit[uap->which], + (caddr_t)uap->rlp, sizeof (struct rlimit))); +} + +/* + * Transform the running time and tick information in proc p into user, + * system, and interrupt time usage. + */ +void +calcru(p, up, sp, ip) + register struct proc *p; + register struct timeval *up; + register struct timeval *sp; + register struct timeval *ip; +{ + task_t task; + + timerclear(up); + timerclear(sp); + if (ip != NULL) + timerclear(ip); + + task = p->task; + if (task) { + task_basic_info_data_t tinfo; + task_thread_times_info_data_t ttimesinfo; + int task_info_stuff, task_ttimes_stuff; + struct timeval ut,st; + + task_info_stuff = TASK_BASIC_INFO_COUNT; + task_info(task, TASK_BASIC_INFO, + &tinfo, &task_info_stuff); + ut.tv_sec = tinfo.user_time.seconds; + ut.tv_usec = tinfo.user_time.microseconds; + st.tv_sec = tinfo.system_time.seconds; + st.tv_usec = tinfo.system_time.microseconds; + timeradd(&ut,up,up); + timeradd(&st,up,up); + + task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; + task_info(task, TASK_THREAD_TIMES_INFO, + &ttimesinfo, &task_ttimes_stuff); + + ut.tv_sec = ttimesinfo.user_time.seconds; + ut.tv_usec = ttimesinfo.user_time.microseconds; + st.tv_sec = ttimesinfo.system_time.seconds; + st.tv_usec = ttimesinfo.system_time.microseconds; + timeradd(&ut,up,up); + timeradd(&st,up,up); + } +} + +struct getrusage_args { + int who; + struct rusage * rusage; +}; +/* ARGSUSED */ +int +getrusage(p, uap, retval) + register struct proc *p; + register struct getrusage_args *uap; + register_t *retval; +{ + struct rusage *rup, rubuf; + + switch (uap->who) { + + case RUSAGE_SELF: + rup = &p->p_stats->p_ru; + calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); + rubuf = *rup; + break; + + case RUSAGE_CHILDREN: + rup = &p->p_stats->p_cru; + rubuf = *rup; + break; + + default: + return (EINVAL); + } + return (copyout((caddr_t)&rubuf, (caddr_t)uap->rusage, + sizeof (struct rusage))); +} + +void +ruadd(ru, ru2) + register struct rusage *ru, *ru2; +{ + register long *ip, *ip2; + register int i; + + timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); + timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); + if (ru->ru_maxrss < ru2->ru_maxrss) + ru->ru_maxrss = ru2->ru_maxrss; + ip = &ru->ru_first; ip2 = &ru2->ru_first; + for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) + *ip++ += *ip2++; +} + +/* + * Make a copy of the plimit structure. + * We share these structures copy-on-write after fork, + * and copy when a limit is changed. + */ +struct plimit * +limcopy(lim) + struct plimit *lim; +{ + register struct plimit *copy; + + MALLOC_ZONE(copy, struct plimit *, + sizeof(struct plimit), M_SUBPROC, M_WAITOK); + bcopy(lim->pl_rlimit, copy->pl_rlimit, + sizeof(struct rlimit) * RLIM_NLIMITS); + copy->p_lflags = 0; + copy->p_refcnt = 1; + return (copy); +} diff --git a/bsd/kern/kern_shutdown.c b/bsd/kern/kern_shutdown.c new file mode 100644 index 000000000..e9a2a2c52 --- /dev/null +++ b/bsd/kern/kern_shutdown.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: bsd/kern/kern_shutdown.c + * + * Copyright (C) 1989, NeXT, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if NCPUS > 1 +#include +#include +#include +#endif /* NCPUS > 1 */ +#include +#include +#include +#include + +int waittime = -1; + +void +boot(paniced, howto, command) + int paniced, howto; + char *command; +{ + register int i; + int s; + struct proc *p = current_proc(); /* XXX */ + int hostboot_option=0; + int funnel_state; + + static void proc_shutdown(); + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + /* md_prepare_for_shutdown(paniced, howto, command); */ + + if ((howto&RB_NOSYNC)==0 && waittime < 0) { + int iter, nbusy; + + waittime = 0; + + printf("syncing disks... "); + + /* + * Release vnodes held by texts before sync. + */ + + /* handle live procs (deallocate their root and current directories). */ + proc_shutdown(); + + sync(p, (void *)NULL, (int *)NULL); + + /* Release vnodes from the VM object cache */ + ubc_unmountall(); + + IOSleep( 1 * 1000 ); + + /* + * Unmount filesystems + */ + if (panicstr == 0) + vfs_unmountall(); + + /* Wait for the buffer cache to clean remaining dirty buffers */ + for (iter = 0; iter < 20; iter++) { + nbusy = count_busy_buffers(); + if (nbusy == 0) + break; + printf("%d ", nbusy); + IOSleep( 4 * nbusy ); + } + if (nbusy) + printf("giving up\n"); + else + printf("done\n"); + } + + /* + * Can't just use an splnet() here to disable the network + * because that will lock out softints which the disk + * drivers depend on to finish DMAs. + */ + if_down_all(); + + if (howto & RB_POWERDOWN) + hostboot_option = HOST_REBOOT_HALT; + if (howto & RB_HALT) + hostboot_option = HOST_REBOOT_HALT; + if (paniced == RB_PANIC) + hostboot_option = HOST_REBOOT_HALT; + + if (hostboot_option == HOST_REBOOT_HALT) + IOSleep( 1 * 1000 ); + + host_reboot(host_priv_self(), hostboot_option); + + thread_funnel_set(kernel_flock, FALSE); +} + +/* + * proc_shutdown() + * + * Shutdown down proc system (release references to current and root + * dirs for each process). + * + * POSIX modifications: + * + * For POSIX fcntl() file locking call vno_lockrelease() on + * the file to release all of its record locks, if any. + */ + +static void +proc_shutdown() +{ + struct proc *p, *self; + struct vnode **cdirp, **rdirp, *vp; + int restart, i, TERM_catch; + + /* + * Kill as many procs as we can. (Except ourself...) + */ + self = (struct proc *)(get_bsdtask_info(current_task())); + + /* + * Suspend /etc/init + */ + p = pfind(1); + if (p && p != self) + task_suspend(p->task); /* stop init */ + + /* + * Suspend mach_init + */ + p = pfind(2); + if (p && p != self) + task_suspend(p->task); /* stop mach_init */ + + printf("Killing all processes "); + + /* + * send SIGTERM to those procs interested in catching one + */ + for (p = allproc.lh_first; p; p = p->p_list.le_next) { + if (((p->p_flag&P_SYSTEM) == 0) && (p->p_pptr->p_pid != 0) && (p != self)) { + if (p->p_sigcatch & sigmask(SIGTERM)) + psignal(p, SIGTERM); + } + } + /* + * now wait for up to 30 seconds to allow those procs catching SIGTERM + * to digest it + * as soon as these procs have exited, we'll continue on to the next step + */ + for (i = 0; i < 300; i++) { + /* + * sleep for a tenth of a second + * and then check to see if the tasks that were sent a + * SIGTERM have exited + */ + IOSleep(100); + TERM_catch = 0; + + for (p = allproc.lh_first; p; p = p->p_list.le_next) { + if (((p->p_flag&P_SYSTEM) == 0) && (p->p_pptr->p_pid != 0) && (p != self)) { + if (p->p_sigcatch & sigmask(SIGTERM)) + TERM_catch++; + } + } + if (TERM_catch == 0) + break; + } + + /* + * send a SIGKILL to all the procs still hanging around + */ + for (p = allproc.lh_first; p; p = p->p_list.le_next) { + if (((p->p_flag&P_SYSTEM) == 0) && (p->p_pptr->p_pid != 0) && (p != self)) + psignal(p, SIGKILL); + } + /* + * wait for up to 60 seconds to allow these procs to exit normally + */ + for (i = 0; i < 300; i++) { + IOSleep(200); /* double the time from 100 to 200 for NFS requests in particular */ + + for (p = allproc.lh_first; p; p = p->p_list.le_next) { + if (((p->p_flag&P_SYSTEM) == 0) && (p->p_pptr->p_pid != 0) && (p != self)) + break; + } + if (!p) + break; + } + + /* + * if we still have procs that haven't exited, then brute force 'em + */ + p = allproc.lh_first; + while (p) { + if ((p->p_flag&P_SYSTEM) || (p->p_pptr->p_pid == 0) || (p == self)) { + p = p->p_list.le_next; + } + else { + /* + * NOTE: following code ignores sig_lock and plays + * with exit_thread correctly. This is OK unless we + * are a multiprocessor, in which case I do not + * understand the sig_lock. This needs to be fixed. + * XXX + */ + if (p->exit_thread) { /* someone already doing it */ + thread_block(0);/* give him a chance */ + } + else { + p->exit_thread = current_thread(); + printf("."); + exit1(p, 1); + } + p = allproc.lh_first; + } + } + printf("\n"); + /* + * Forcibly free resources of what's left. + */ + p = allproc.lh_first; + while (p) { + /* + * Close open files and release open-file table. + * This may block! + */ +#ifdef notyet + /* panics on reboot due to "zfree: non-allocated memory in collectable zone" message */ + fdfree(p); +#endif /* notyet */ + p = p->p_list.le_next; + } + /* Wait for the reaper thread to run, and clean up what we have done + * before we proceed with the hardcore shutdown. This reduces the race + * between kill_tasks and the reaper thread. + */ + /* thread_wakeup(&reaper_queue); */ + /* IOSleep( 1 * 1000); */ + printf("continuing\n"); +} + diff --git a/bsd/kern/kern_sig.c b/bsd/kern/kern_sig.c new file mode 100644 index 000000000..92f02ba12 --- /dev/null +++ b/bsd/kern/kern_sig.c @@ -0,0 +1,1834 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995-1998 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_sig.c 8.7 (Berkeley) 4/18/94 + */ + +#define SIGPROP /* include signal properties table */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include /* for coredump */ +#include /* for APC support */ +#include +#include + +void stop __P((struct proc *p)); +int cansignal __P((struct proc *, struct pcred *, struct proc *, int)); +int killpg1 __P((struct proc *, int, int, int)); +void sigexit_locked __P((struct proc *, int)); +void setsigvec __P((struct proc *, int, struct sigaction *)); +void exit1 __P((struct proc *, int)); +int signal_lock __P((struct proc *)); +int signal_unlock __P((struct proc *)); +void signal_setast __P((thread_act_t *)); +void signal_clearast __P((thread_act_t *)); +void psignal_lock __P((struct proc *, int, int, int)); + +#if SIGNAL_DEBUG +void ram_printf __P((int)); +int ram_debug=0; +unsigned int rdebug_proc=0; +void +ram_printf(int x) +{ + printf("x is %d",x); + +} + +#endif /* SIGNAL_DEBUG */ +int +signal_lock(struct proc *p) +{ +#if SIGNAL_DEBUG +#ifdef __ppc__ + { + int register sp, *fp, numsaved; + + __asm__ volatile("mr %0,r1" : "=r" (sp)); + + fp = (int *)*((int *)sp); + for (numsaved = 0; numsaved < 3; numsaved++) { + p->lockpc[numsaved] = fp[2]; + if ((int)fp <= 0) + break; + fp = (int *)*fp; + } + } +#endif /* __ppc__ */ +#endif /* SIGNAL_DEBUG */ + + return(lockmgr(&p->signal_lock, LK_EXCLUSIVE, 0, (struct proc *)0)); +} + +int +signal_unlock(struct proc *p) +{ +#if SIGNAL_DEBUG +#ifdef __ppc__ + { + int register sp, *fp, numsaved; + + __asm__ volatile("mr %0,r1" : "=r" (sp)); + + fp = (int *)*((int *)sp); + for (numsaved = 0; numsaved < 3; numsaved++) { + p->unlockpc[numsaved] = fp[2]; + if ((int)fp <= 0) + break; + fp = (int *)*fp; + } + } +#endif /* __ppc__ */ +#endif /* SIGNAL_DEBUG */ + + return(lockmgr(&p->signal_lock, LK_RELEASE, (simple_lock_t)0, (struct proc *)0)); +} + +void +signal_setast(sig_actthread) +thread_act_t *sig_actthread; +{ + thread_ast_set(sig_actthread, AST_BSD); + if ((thread_act_t *)current_act() == sig_actthread) + ast_on(AST_BSD); +} + +void +signal_clearast(sig_actthread) +thread_act_t *sig_actthread; +{ + thread_ast_clear(sig_actthread, AST_BSD); + if ((thread_act_t *)current_act() == sig_actthread) + ast_off(AST_BSD); +} + +/* + * Can process p, with pcred pc, send the signal signum to process q? + */ +int +cansignal(p, pc, q, signum) + struct proc *p; + struct pcred *pc; + struct proc *q; + int signum; +{ + if (pc->pc_ucred->cr_uid == 0) + return (1); /* root can always signal */ + + if (signum == SIGCONT && q->p_session == p->p_session) + return (1); /* SIGCONT in session */ + + /* + * Using kill(), only certain signals can be sent to setugid + * child processes + */ + if (q->p_flag & P_SUGID) { + switch (signum) { + case 0: + case SIGKILL: + case SIGINT: + case SIGTERM: + case SIGSTOP: + case SIGTTIN: + case SIGTTOU: + case SIGTSTP: + case SIGHUP: + case SIGUSR1: + case SIGUSR2: + if (pc->p_ruid == q->p_cred->p_ruid || + pc->pc_ucred->cr_uid == q->p_cred->p_ruid || + pc->p_ruid == q->p_ucred->cr_uid || + pc->pc_ucred->cr_uid == q->p_ucred->cr_uid) + return (1); + } + return (0); + } + + /* XXX + * because the P_SUGID test exists, this has extra tests which + * could be removed. + */ + if (pc->p_ruid == q->p_cred->p_ruid || + pc->p_ruid == q->p_cred->p_svuid || + pc->pc_ucred->cr_uid == q->p_cred->p_ruid || + pc->pc_ucred->cr_uid == q->p_cred->p_svuid || + pc->p_ruid == q->p_ucred->cr_uid || + pc->pc_ucred->cr_uid == q->p_ucred->cr_uid) + return (1); + return (0); +} + +struct sigaction_args { + int signum; + struct sigaction *nsa; + struct sigaction *osa; +}; + +/* ARGSUSED */ +int +sigaction(p, uap, retval) + struct proc *p; + register struct sigaction_args *uap; + register_t *retval; +{ + struct sigaction vec; + register struct sigaction *sa; + register struct sigacts *ps = p->p_sigacts; + register int signum; + int bit, error; + + signum = uap->signum; + if (signum <= 0 || signum >= NSIG || + signum == SIGKILL || signum == SIGSTOP) + return (EINVAL); + sa = &vec; + if (uap->osa) { + sa->sa_handler = ps->ps_sigact[signum]; + sa->sa_mask = ps->ps_catchmask[signum]; + bit = sigmask(signum); + sa->sa_flags = 0; + if ((ps->ps_sigonstack & bit) != 0) + sa->sa_flags |= SA_ONSTACK; + if ((ps->ps_sigintr & bit) == 0) + sa->sa_flags |= SA_RESTART; + if (p->p_flag & P_NOCLDSTOP) + sa->sa_flags |= SA_NOCLDSTOP; + if (error = copyout((caddr_t)sa, (caddr_t)uap->osa, + sizeof (vec))) + return (error); + } + if (uap->nsa) { + if (error = copyin((caddr_t)uap->nsa, (caddr_t)sa, + sizeof (vec))) + return (error); + setsigvec(p, signum, sa); + } + return (0); +} + +static int +reset_sigbits(thread_act_t th_act, int bit) +{ +struct uthread *ut; + ut = get_bsdthread_info(th_act); + if (ut) { + ut->uu_sig &= ~bit; + } +} + +int +clear_sigbits (struct proc *p, int bit) +{ +task_t task = p->task; + + p->p_siglist &= ~(bit); + task_act_iterate_wth_args(task, reset_sigbits, bit); + return(0); +} + + +void +setsigvec(p, signum, sa) + register struct proc *p; + int signum; + register struct sigaction *sa; +{ + register struct sigacts *ps = p->p_sigacts; + register int bit; + + bit = sigmask(signum); + /* + * Change setting atomically. + */ + ps->ps_sigact[signum] = sa->sa_handler; + ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; + if ((sa->sa_flags & SA_RESTART) == 0) + ps->ps_sigintr |= bit; + else + ps->ps_sigintr &= ~bit; + if (sa->sa_flags & SA_ONSTACK) + ps->ps_sigonstack |= bit; + else + ps->ps_sigonstack &= ~bit; + if (sa->sa_flags & SA_USERTRAMP) + ps->ps_usertramp |= bit; + else + ps->ps_usertramp &= ~bit; + if (signum == SIGCHLD) { + if (sa->sa_flags & SA_NOCLDSTOP) + p->p_flag |= P_NOCLDSTOP; + else + p->p_flag &= ~P_NOCLDSTOP; + } + /* + * Set bit in p_sigignore for signals that are set to SIG_IGN, + * and for signals set to SIG_DFL where the default is to ignore. + * However, don't put SIGCONT in p_sigignore, + * as we have to restart the process. + */ + if (sa->sa_handler == SIG_IGN || + (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { + p->p_siglist &= ~bit; /* never to be seen again */ + /* + * If this is a thread signal, clean out the + * threads as well. + */ + if (bit & threadmask) { + register task_t task = p->task; + + task_act_iterate_wth_args(task, reset_sigbits, bit); + } + if (signum != SIGCONT) + p->p_sigignore |= bit; /* easier in psignal */ + p->p_sigcatch &= ~bit; + } else { + p->p_sigignore &= ~bit; + if (sa->sa_handler == SIG_DFL) + p->p_sigcatch &= ~bit; + else + p->p_sigcatch |= bit; + } +} + +/* + * Initialize signal state for process 0; + * set to ignore signals that are ignored by default. + */ +void +siginit(p) + struct proc *p; +{ + register int i; + + for (i = 0; i < NSIG; i++) + if (sigprop[i] & SA_IGNORE && i != SIGCONT) + p->p_sigignore |= sigmask(i); +} + +/* + * Reset signals for an exec of the specified process. + */ +void +execsigs(p) + register struct proc *p; +{ + register struct sigacts *ps = p->p_sigacts; + register int nc, mask; + + /* + * Reset caught signals. Held signals remain held + * through p_sigmask (unless they were caught, + * and are now ignored by default). + */ + while (p->p_sigcatch) { + nc = ffs((long)p->p_sigcatch); + mask = sigmask(nc); + p->p_sigcatch &= ~mask; + if (sigprop[nc] & SA_IGNORE) { + if (nc != SIGCONT) + p->p_sigignore |= mask; + p->p_siglist &= ~mask; + } + ps->ps_sigact[nc] = SIG_DFL; + } + /* + * Reset stack state to the user stack. + * Clear set of signals caught on the signal stack. + */ + ps->ps_sigstk.ss_flags = SA_DISABLE; + ps->ps_sigstk.ss_size = 0; + ps->ps_sigstk.ss_sp = 0; + ps->ps_flags = 0; +} + +/* + * Manipulate signal mask. + * Note that we receive new mask, not pointer, + * and return old mask as return value; + * the library stub does the rest. + */ +struct sigprocmask_args { + int how; + sigset_t mask; +}; +int +sigprocmask(p, uap, retval) + register struct proc *p; + struct sigprocmask_args *uap; + register_t *retval; +{ + int error = 0; + + *retval = p->p_sigmask; + + switch (uap->how) { + case SIG_BLOCK: + p->p_sigmask |= uap->mask &~ sigcantmask; + break; + + case SIG_UNBLOCK: + p->p_sigmask &= ~(uap->mask); + signal_setast(current_act()); + break; + + case SIG_SETMASK: + p->p_sigmask = uap->mask &~ sigcantmask; + signal_setast(current_act()); + break; + + default: + error = EINVAL; + break; + } + return (error); +} + +/* ARGSUSED */ +int +sigpending(p, uap, retval) + struct proc *p; + void *uap; + register_t *retval; +{ + + *retval = p->p_siglist; + return (0); +} + +#if COMPAT_43 +/* + * Generalized interface signal handler, 4.3-compatible. + */ +struct osigvec_args { + int signum; + struct sigvec *nsv; + struct sigvec *osv; +}; +/* ARGSUSED */ +int +osigvec(p, uap, retval) + struct proc *p; + register struct osigvec_args *uap; + register_t *retval; +{ + struct sigvec vec; + register struct sigacts *ps = p->p_sigacts; + register struct sigvec *sv; + register int signum; + int bit, error; + + signum = uap->signum; + if (signum <= 0 || signum >= NSIG || + signum == SIGKILL || signum == SIGSTOP) + return (EINVAL); + sv = &vec; + if (uap->osv) { + *(sig_t *)&sv->sv_handler = ps->ps_sigact[signum]; + sv->sv_mask = ps->ps_catchmask[signum]; + bit = sigmask(signum); + sv->sv_flags = 0; + if ((ps->ps_sigonstack & bit) != 0) + sv->sv_flags |= SV_ONSTACK; + if ((ps->ps_sigintr & bit) != 0) + sv->sv_flags |= SV_INTERRUPT; + if (p->p_flag & P_NOCLDSTOP) + sv->sv_flags |= SA_NOCLDSTOP; + if (error = copyout((caddr_t)sv, (caddr_t)uap->osv, + sizeof (vec))) + return (error); + } + if (uap->nsv) { + if (error = copyin((caddr_t)uap->nsv, (caddr_t)sv, + sizeof (vec))) + return (error); + sv->sv_flags ^= SA_RESTART; /* opposite of SV_INTERRUPT */ + setsigvec(p, signum, (struct sigaction *)sv); + } + return (0); +} + +struct osigblock_args { + int mask; +}; +int +osigblock(p, uap, retval) + register struct proc *p; + struct osigblock_args *uap; + register_t *retval; +{ + + *retval = p->p_sigmask; + p->p_sigmask |= uap->mask &~ sigcantmask; + return (0); +} + +struct osigsetmask_args { + int mask; +}; +int +osigsetmask(p, uap, retval) + struct proc *p; + struct osigsetmask_args *uap; + register_t *retval; +{ + + *retval = p->p_sigmask; + p->p_sigmask = uap->mask &~ sigcantmask; + return (0); +} +#endif /* COMPAT_43 */ + +/* + * Suspend process until signal, providing mask to be set + * in the meantime. Note nonstandard calling convention: + * libc stub passes mask, not pointer, to save a copyin. + */ + +int +sigcontinue(error) +{ + unix_syscall_return(EINTR); +} + +struct sigsuspend_args { + int mask; +}; + +/* ARGSUSED */ +int +sigsuspend(p, uap, retval) + register struct proc *p; + struct sigsuspend_args *uap; + register_t *retval; +{ + register struct sigacts *ps = p->p_sigacts; + + /* + * When returning from sigpause, we want + * the old mask to be restored after the + * signal handler has finished. Thus, we + * save it here and mark the sigacts structure + * to indicate this. + */ + ps->ps_oldmask = p->p_sigmask; + ps->ps_flags |= SAS_OLDMASK; + p->p_sigmask = uap->mask &~ sigcantmask; + (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue); + /* always return EINTR rather than ERESTART... */ + return (EINTR); +} + +#if COMPAT_43 +struct osigstack_args { + struct sigstack *nss; + struct sigstack *oss; +}; +/* ARGSUSED */ +int +osigstack(p, uap, retval) + struct proc *p; + register struct osigstack_args *uap; + register_t *retval; +{ + struct sigstack ss; + struct sigacts *psp; + int error = 0; + + psp = p->p_sigacts; + ss.ss_sp = psp->ps_sigstk.ss_sp; + ss.ss_onstack = psp->ps_sigstk.ss_flags & SA_ONSTACK; + if (uap->oss && (error = copyout((caddr_t)&ss, + (caddr_t)uap->oss, sizeof (struct sigstack)))) + return (error); + if (uap->nss && (error = copyin((caddr_t)uap->nss, + (caddr_t)&ss, sizeof (ss))) == 0) { + psp->ps_sigstk.ss_sp = ss.ss_sp; + psp->ps_sigstk.ss_size = 0; + psp->ps_sigstk.ss_flags |= ss.ss_onstack & SA_ONSTACK; + psp->ps_flags |= SAS_ALTSTACK; + } + return (error); +} +#endif /* COMPAT_43 */ + +struct sigaltstack_args { + struct sigaltstack *nss; + struct sigaltstack *oss; +}; +/* ARGSUSED */ +int +sigaltstack(p, uap, retval) + struct proc *p; + register struct sigaltstack_args *uap; + register_t *retval; +{ + struct sigacts *psp; + struct sigaltstack ss; + int error; + + psp = p->p_sigacts; + if ((psp->ps_flags & SAS_ALTSTACK) == 0) + psp->ps_sigstk.ss_flags |= SA_DISABLE; + if (uap->oss && (error = copyout((caddr_t)&psp->ps_sigstk, + (caddr_t)uap->oss, sizeof (struct sigaltstack)))) + return (error); + if (uap->nss == 0) + return (0); + if (error = copyin((caddr_t)uap->nss, (caddr_t)&ss, + sizeof (ss))) + return (error); + if (ss.ss_flags & SA_DISABLE) { + if (psp->ps_sigstk.ss_flags & SA_ONSTACK) + return (EINVAL); + psp->ps_flags &= ~SAS_ALTSTACK; + psp->ps_sigstk.ss_flags = ss.ss_flags; + return (0); + } + if (ss.ss_size < MINSIGSTKSZ) + return (ENOMEM); + psp->ps_flags |= SAS_ALTSTACK; + psp->ps_sigstk= ss; + return (0); +} + +struct kill_args { + int pid; + int signum; +}; +/* ARGSUSED */ +int +kill(cp, uap, retval) + register struct proc *cp; + register struct kill_args *uap; + register_t *retval; +{ + register struct proc *p; + register struct pcred *pc = cp->p_cred; + + if ((u_int)uap->signum >= NSIG) + return (EINVAL); + if (uap->pid > 0) { + /* kill single process */ + if ((p = pfind(uap->pid)) == NULL) + return (ESRCH); + if (!cansignal(cp, pc, p, uap->signum)) + return (EPERM); + if (uap->signum) + psignal(p, uap->signum); + return (0); + } + switch (uap->pid) { + case -1: /* broadcast signal */ + return (killpg1(cp, uap->signum, 0, 1)); + case 0: /* signal own process group */ + return (killpg1(cp, uap->signum, 0, 0)); + default: /* negative explicit process group */ + return (killpg1(cp, uap->signum, -(uap->pid), 0)); + } + /* NOTREACHED */ +} + +#if COMPAT_43 +struct okillpg_args { + int pgid; + int signum; +}; +/* ARGSUSED */ +int +okillpg(p, uap, retval) + struct proc *p; + register struct okillpg_args *uap; + register_t *retval; +{ + + if ((u_int)uap->signum >= NSIG) + return (EINVAL); + return (killpg1(p, uap->signum, uap->pgid, 0)); +} +#endif /* COMPAT_43 */ + +/* + * Common code for kill process group/broadcast kill. + * cp is calling process. + */ +int +killpg1(cp, signum, pgid, all) + register struct proc *cp; + int signum, pgid, all; +{ + register struct proc *p; + register struct pcred *pc = cp->p_cred; + struct pgrp *pgrp; + int nfound = 0; + + if (all) { + /* + * broadcast + */ + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || + p == cp || !cansignal(cp, pc, p, signum)) + continue; + nfound++; + if (signum) + psignal(p, signum); + } + } else { + if (pgid == 0) + /* + * zero pgid means send to my process group. + */ + pgrp = cp->p_pgrp; + else { + pgrp = pgfind(pgid); + if (pgrp == NULL) + return (ESRCH); + } + for (p = pgrp->pg_members.lh_first; p != 0; + p = p->p_pglist.le_next) { + if (p->p_pid <= 1 || p->p_flag & P_SYSTEM || + p->p_stat == SZOMB || + !cansignal(cp, pc, p, signum)) + continue; + nfound++; + if (signum) + psignal(p, signum); + } + } + return (nfound ? 0 : ESRCH); +} + +/* + * Send a signal to a process group. + */ +void +gsignal(pgid, signum) + int pgid, signum; +{ + struct pgrp *pgrp; + + if (pgid && (pgrp = pgfind(pgid))) + pgsignal(pgrp, signum, 0); +} + +/* + * Send a signal to a process group. If checktty is 1, + * limit to members which have a controlling terminal. + */ +void +pgsignal(pgrp, signum, checkctty) + struct pgrp *pgrp; + int signum, checkctty; +{ + register struct proc *p; + + if (pgrp) + for (p = pgrp->pg_members.lh_first; p != 0; + p = p->p_pglist.le_next) + if (checkctty == 0 || p->p_flag & P_CONTROLT) + psignal(p, signum); +} + +/* + * Send a signal caused by a trap to a specific thread. + */ +void +threadsignal(sig_actthread, signum, code) + register thread_act_t *sig_actthread; + register int signum; + u_long code; +{ + register struct uthread *uth; + register struct task * sig_task; + register struct proc *p ; + int mask; + + if ((u_int)signum >= NSIG || signum == 0) + return; + + mask = sigmask(signum); + if ((mask & threadmask) == 0) + return; + sig_task = get_threadtask(sig_actthread); + /* p = sig_task->proc; */ + p = (struct proc *)(get_bsdtask_info(sig_task)); + + if (!(p->p_flag & P_TRACED) && (p->p_sigignore & mask)) + return; + + uth = get_bsdthread_info(sig_actthread); + uth->uu_sig |= mask; + uth->uu_code = code; + /* mark on process as well */ + p->p_siglist |= mask; + signal_setast(sig_actthread); +} + + +void +psignal_pend(p) + register struct proc *p; +{ + boolean_t funnel_state; + register int sigbits, mask, signum; + + thread_funnel_set(kernel_flock, TRUE); + + if (p->p_sigpending == 0) + return; + + + signal_lock(p); + + for (;;) { + sigbits = p->p_sigpending; + if (sigbits == 0) + goto out; + signum = ffs((long)sigbits); + mask = sigmask(signum); + p->p_sigpending &= ~mask; + + psignal_lock(p, signum, 0, 0); + + } +out: + p->p_flag &= ~P_SIGTHR; + signal_unlock(p); + thread_funnel_set(kernel_flock, FALSE); +} + +void +psignal(p, signum) + register struct proc *p; + register int signum; +{ + psignal_lock(p, signum, 1, 1); +} + + +/* + * Send the signal to the process. If the signal has an action, the action + * is usually performed by the target process rather than the caller; we add + * the signal to the set of pending signals for the process. + * + * Exceptions: + * o When a stop signal is sent to a sleeping process that takes the + * default action, the process is stopped without awakening it. + * o SIGCONT restarts stopped processes (or puts them back to sleep) + * regardless of the signal action (eg, blocked or ignored). + * + * Other ignored signals are discarded immediately. + */ +void +psignal_lock(p, signum, withlock, pend) + register struct proc *p; + register int signum; + register int withlock; + register int pend; +{ + register int s, prop; + register sig_t action; + thread_act_t sig_thread_act; + thread_t sig_thread; + register task_t sig_task; + register thread_t cur_thread; + thread_act_t *cur_act; + int mask; + kern_return_t kret; + + if ((u_int)signum >= NSIG || signum == 0) + panic("psignal signal number"); + mask = sigmask(signum); + prop = sigprop[signum]; + +#if SIGNAL_DEBUG + if(rdebug_proc && (p == rdebug_proc)) { + ram_printf(3); + } +#endif /* SIGNAL_DEBUG */ + /* + * We will need the task pointer later. Grab it now to + * check for a zombie process. Also don't send signals + * to kernel internal tasks. + */ + if (((sig_task = p->task) == TASK_NULL) || is_kerneltask(sig_task)) + return; + + /* + * do not send signals to the process that has the thread + * doing a reboot(). Not doing so will mark that thread aborted + * and can cause IO failures wich will cause data loss. + */ + if (ISSET(p->p_flag, P_REBOOT)) + return; + + /* + * if the traced process is blocked waiting for + * gdb then do not block the caller just pend + * the signal. Setup a callout to process the + * pended signal if not alreadu set + */ + if (pend && (p->p_flag & P_TRACED) && p->sigwait) { + p->p_sigpending |= mask; + if (!(p->p_flag & P_SIGTHR)) { + p->p_flag |= P_SIGTHR; + thread_call_func((thread_call_func_t)psignal_pend, p, + FALSE); + } + return; + } + + if (withlock) + signal_lock(p); + + /* + * If proc is traced, always give parent a chance. + */ + if (p->p_flag & P_TRACED) + action = SIG_DFL; + else { + /* + * If the signal is being ignored, + * then we forget about it immediately. + * (Note: we don't set SIGCONT in p_sigignore, + * and if it is set to SIG_IGN, + * action will be SIG_DFL here.) + */ + if (p->p_sigignore & mask) + goto psigout; + if (p->p_sigmask & mask) + action = SIG_HOLD; + else if (p->p_sigcatch & mask) + action = SIG_CATCH; + else + action = SIG_DFL; + } + + if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && + (p->p_flag & P_TRACED) == 0) + p->p_nice = NZERO; + + if (prop & SA_CONT) + p->p_siglist &= ~stopsigmask; + + if (prop & SA_STOP) { + /* + * If sending a tty stop signal to a member of an orphaned + * process group, discard the signal here if the action + * is default; don't stop the process below if sleeping, + * and don't clear any pending SIGCONT. + */ + if (prop & SA_TTYSTOP && p->p_pgrp->pg_jobc == 0 && + action == SIG_DFL) + goto psigout; + p->p_siglist &= ~contsigmask; + } + p->p_siglist |= mask; + + /* + * Defer further processing for signals which are held, + * except that stopped processes must be continued by SIGCONT. + */ + if (action == SIG_HOLD && ((prop & SA_CONT) == 0 || p->p_stat != SSTOP)) + goto psigout; + + /* + * Deliver the signal to the first thread in the task. This + * allows single threaded applications which use signals to + * be able to be linked with multithreaded libraries. We have + * an implicit reference to the current_thread, but need + * an explicit one otherwise. The thread reference keeps + * the corresponding task data structures around too. This + * reference is released by thread_deallocate. + */ + + cur_thread = current_thread(); /* this is a shuttle */ + cur_act = current_act(); + + /* If successful return with ast set */ + kret = (kern_return_t)get_signalact(sig_task, + &sig_thread_act, &sig_thread, 1); + + if ((kret != KERN_SUCCESS) || (sig_thread_act == THREAD_NULL)) { + /* XXXX FIXME + /* if it is sigkill, may be we should + * inject a thread to terminate + */ + printf("WARNING: no activation in psignal\n"); +#if SIGNAL_DEBUG + ram_printf(1); +#endif /* SIGNAL_DEBUG */ + goto psigout; + } + + if (sig_thread == THREAD_NULL) { + printf("WARNING: valid act; but no shutte in psignal\n"); +#if 0 + /* FIXME : NO VALID SHUTTLE */ + goto psigout; +#endif + } + + /* + * SIGKILL priority twiddling moved here from above because + * it needs sig_thread. Could merge it into large switch + * below if we didn't care about priority for tracing + * as SIGKILL's action is always SIG_DFL. + */ + if ((signum == SIGKILL) && (p->p_nice > NZERO)) { + p->p_nice = NZERO; +#if XXX + /* + * we need to make changes here to get nice to work + * reset priority to BASEPRI_USER + */ +#endif + } + + /* + * Process is traced - wake it up (if not already + * stopped) so that it can discover the signal in + * issig() and stop for the parent. + */ + if (p->p_flag & P_TRACED) { + if (p->p_stat != SSTOP) + goto run; + else + goto psigout; + } + + if (action != SIG_DFL) { + /* + * User wants to catch the signal. + * Wake up the thread, but don't un-suspend it + * (except for SIGCONT). + */ + if (prop & SA_CONT) + (void) task_resume(sig_task); + goto run; + } else { + /* Default action - varies */ + if (mask & stopsigmask) { + /* + * These are the signals which by default + * stop a process. + * + * Don't clog system with children of init + * stopped from the keyboard. + */ + if (!(prop & SA_STOP) && p->p_pptr == initproc) { + psignal_lock(p, SIGKILL, 0, 1); + p->p_siglist &= ~mask; + goto psigout; + } + + /* + * Stop the task. + */ + if (!is_thread_running(sig_thread)) { + /* Thread is not running + * If task hasn't already been stopped by + * a signal, stop it. + */ + p->p_siglist &= ~mask; + if (get_task_userstop(sig_task) == 0) { + /* + * p_cursig must not be set, because + * it will be psig()'d if it is not + * zero, and the signal is being + * handled here. But save the signal + * in p_stopsig so WUNTRACED + * option to wait can find it. + */ + p->p_xstat = signum; + if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) + psignal(p->p_pptr, SIGCHLD); + stop(p); + } +#if 0 + /* unconditional check is bad */ + signal_clearast(sig_thread_act); +#endif + goto psigout; + } else { + if (p->p_stat != SZOMB) + signal_setast(cur_act); + goto psigout; + } + } + + switch (signum) { + /* + * Signals ignored by default have been dealt + * with already, since their bits are on in + * p_sigignore. + */ + + case SIGKILL: + /* + * Kill signal always sets process running and + * unsuspends it. + */ + /* + * Process will be running after 'run' + */ + p->p_stat = SRUN; + + thread_abort(sig_thread_act); + + goto psigout; + + case SIGCONT: + /* + * Let the process run. If it's sleeping on an + * event, it remains so. + */ + if (p->p_flag & P_TTYSLEEP) { + p->p_flag &= ~P_TTYSLEEP; + wakeup(&p->p_siglist); + } else { + (void) task_resume(sig_task); + } + p->p_siglist &= ~mask; + p->p_stat = SRUN; +#if 0 + /* do not clear AST as tcsh is sendig SIGTERM followed by + * SIGCONT and the ast was getting cleared unconditinally + * This is not right. + */ + signal_clearast(sig_thread_act); +#endif + goto psigout; + + default: + /* + * All other signals wake up the process, but don't + * resume it. + */ + goto run; + } + } + /*NOTREACHED*/ +run: + /* + * If we're being traced (possibly because someone attached us + * while we were stopped), check for a signal from the debugger. + */ + if (p->p_stat == SSTOP) { + if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) + p->p_siglist |= sigmask(p->p_xstat); + } + + /* + * setrunnable(p) in BSD + */ + p->p_stat = SRUN; + + /* + * Wake up the thread if it is interruptible. + */ + thread_abort_safely(sig_thread_act); +psigout: + if (withlock) + signal_unlock(p); +} + +__inline__ void +sig_lock_to_exit( + struct proc *p) +{ + thread_t self = current_thread(); + + p->exit_thread = self; + (void) task_suspend(p->task); +} + +__inline__ int +sig_try_locked( + struct proc *p) +{ + thread_t self = current_thread(); + + while (p->sigwait || p->exit_thread) { + if (p->exit_thread) { + if (p->exit_thread != self) { + /* + * Already exiting - no signals. + */ + thread_abort(current_act()); + } + return(0); + } + if(assert_wait_possible()) { + assert_wait((caddr_t)&p->sigwait_thread, + (THREAD_INTERRUPTIBLE)); + } + signal_unlock(p); + thread_block(0); + signal_lock(p); + if (thread_should_abort(self)) { + /* + * Terminate request - clean up. + */ + return -1; + } + } + return 1; +} + +/* + * If the current process has received a signal (should be caught or cause + * termination, should interrupt current syscall), return the signal number. + * Stop signals with default action are processed immediately, then cleared; + * they aren't returned. This is checked after each entry to the system for + * a syscall or trap (though this can usually be done without calling issignal + * by checking the pending signal masks in the CURSIG macro.) The normal call + * sequence is + * + * while (signum = CURSIG(curproc)) + * postsig(signum); + */ +int +issignal(p) + register struct proc *p; +{ + register int signum, mask, prop, sigbits; + task_t task = p->task; + thread_t cur_thread; + thread_act_t cur_act; + int s; + struct uthread * ut; + kern_return_t kret; + + cur_thread = current_thread(); + cur_act = current_act(); + + signal_lock(p); + + + /* + * Try to grab the signal lock. + */ + if (sig_try_locked(p) <= 0) { + signal_unlock(p); + return (0); + } + + ut = get_bsdthread_info(cur_act); + for(;;) { + sigbits = (ut->uu_sig |p->p_siglist) & ~p->p_sigmask; + + if (p->p_flag & P_PPWAIT) + sigbits &= ~stopsigmask; + if (sigbits == 0) { /* no signal to send */ + signal_unlock(p); + return (0); + } + signum = ffs((long)sigbits); + mask = sigmask(signum); + prop = sigprop[signum]; + + if (mask & threadmask) { + /* we can take this signal */ + ut->uu_sig &= ~mask; + } + + /* + * We should see pending but ignored signals + * only if P_TRACED was on when they were posted. + */ + if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) { + p->p_siglist &= ~mask; /* take the signal! */ + continue; + } + if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { + register int hold; + register task_t task; + /* + * If traced, always stop, and stay + * stopped until released by the debugger. + */ + /* ptrace debugging */ + p->p_xstat = signum; + psignal(p->p_pptr, SIGCHLD); + /* + * XXX Have to really stop for debuggers; + * XXX stop() doesn't do the right thing. + * XXX Inline the task_suspend because we + * XXX have to diddle Unix state in the + * XXX middle of it. + */ + task = p->task; + task_hold(task); + p->sigwait = TRUE; + p->sigwait_thread = cur_act; + p->p_stat = SSTOP; + p->p_flag &= ~P_WAITED; + p->p_siglist &= ~mask; /* clear the old signal */ + + wakeup((caddr_t)p->p_pptr); + assert_wait((caddr_t)&p->sigwait, (THREAD_INTERRUPTIBLE)); + thread_block(0); + p->sigwait = FALSE; + p->sigwait_thread = NULL; + wakeup((caddr_t)&p->sigwait_thread); + + /* + * This code is to detect when gdb is killed + * even as the traced program is attached. + * pgsignal would get the SIGKILL to traced program + * That's what we are trying to see (I hope) + */ + if (p->p_siglist & sigmask(SIGKILL)) { + /* + * Wait event may still be outstanding; + * clear it, since sig_lock_to_exit will + * wait. + */ + clear_wait(current_thread(), THREAD_INTERRUPTED); + sig_lock_to_exit(p); + /* + * Since this thread will be resumed + * to allow the current syscall to + * be completed, must save u_qsave + * before calling exit(). (Since exit() + * calls closef() which can trash u_qsave.) + */ + signal_unlock(p); + exit1(p,signum); + return(0); + } + + /* + * We may have to quit + */ + if (thread_should_abort(current_thread())) { + signal_unlock(p); + return(0); + } + /* + * If parent wants us to take the signal, + * then it will leave it in p->p_xstat; + * otherwise we just look for signals again. + */ + signum = p->p_xstat; + if (signum == 0) + continue; + /* + * Put the new signal into p_siglist. If the + * signal is being masked, look for other signals. + */ + mask = sigmask(signum); + if (mask & threadmask) + ut->uu_sig |= mask; + else + p->p_siglist |= mask; + if (p->p_sigmask & mask) + continue; + } + + /* + * Decide whether the signal should be returned. + * Return the signal's number, or fall through + * to clear it from the pending mask. + */ + + switch ((long)p->p_sigacts->ps_sigact[signum]) { + + case (long)SIG_DFL: + /* + * Don't take default actions on system processes. + */ + if (p->p_pptr->p_pid == 0) { +#if DIAGNOSTIC + /* + * Are you sure you want to ignore SIGSEGV + * in init? XXX + */ + printf("Process (pid %d) got signal %d\n", + p->p_pid, signum); +#endif + break; /* == ignore */ + } + + /* + * If there is a pending stop signal to process + * with default action, stop here, + * then clear the signal. However, + * if process is member of an orphaned + * process group, ignore tty stop signals. + */ + if (prop & SA_STOP) { + if (p->p_flag & P_TRACED || + (p->p_pgrp->pg_jobc == 0 && + prop & SA_TTYSTOP)) + break; /* == ignore */ + p->p_xstat = signum; + stop(p); + if ((p->p_pptr->p_flag & P_NOCLDSTOP) == 0) + psignal(p->p_pptr, SIGCHLD); + thread_block(0); + /* + * We may have to quit + */ + if (thread_should_abort(current_thread())) { + signal_unlock(p); + return(0); + } + break; + } else if (prop & SA_IGNORE) { + /* + * Except for SIGCONT, shouldn't get here. + * Default action is to ignore; drop it. + */ + break; /* == ignore */ + } else { + signal_unlock(p); + return (signum); + } + /*NOTREACHED*/ + + case (long)SIG_IGN: + /* + * Masking above should prevent us ever trying + * to take action on an ignored signal other + * than SIGCONT, unless process is traced. + */ + if ((prop & SA_CONT) == 0 && + (p->p_flag & P_TRACED) == 0) + printf("issignal\n"); + break; /* == ignore */ + + default: + /* + * This signal has an action, let + * postsig() process it. + */ + signal_unlock(p); + return (signum); + } + p->p_siglist &= ~mask; /* take the signal! */ + p->p_sigpending &= ~mask; /* take the pending signal */ + } + /* NOTREACHED */ +} + +/* called from _sleep */ +int +CURSIG(p) + register struct proc *p; +{ + register int signum, mask, prop, sigbits; + task_t task = p->task; + thread_t cur_thread; + thread_act_t cur_act; + int s; + struct uthread * ut; + int retnum = 0; + + if (p->p_siglist == 0) + return (0); + + if (((p->p_siglist & ~p->p_sigmask) == 0) && ((p->p_flag & P_TRACED) == 0)) + return (0); + + cur_thread = current_thread(); + cur_act = current_act(); + + ut = get_bsdthread_info(cur_act); + + sigbits = (ut->uu_sig | p->p_siglist) & ~p->p_sigmask; + + for(;;) { + if (p->p_flag & P_PPWAIT) + sigbits &= ~stopsigmask; + if (sigbits == 0) { /* no signal to send */ + return (retnum); + } + + signum = ffs((long)sigbits); + mask = sigmask(signum); + prop = sigprop[signum]; + + /* + * We should see pending but ignored signals + * only if P_TRACED was on when they were posted. + */ + if (mask & p->p_sigignore && (p->p_flag & P_TRACED) == 0) { + continue; + } + if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) { + /* + * Put the new signal into p_siglist. If the + * signal is being masked, look for other signals. + */ + mask = sigmask(signum); + if (p->p_sigmask & mask) + continue; + return(signum); + } + + /* + * Decide whether the signal should be returned. + * Return the signal's number, or fall through + * to clear it from the pending mask. + */ + + switch ((long)p->p_sigacts->ps_sigact[signum]) { + + case (long)SIG_DFL: + /* + * Don't take default actions on system processes. + */ + if (p->p_pptr->p_pid == 0) { +#if DIAGNOSTIC + /* + * Are you sure you want to ignore SIGSEGV + * in init? XXX + */ + printf("Process (pid %d) got signal %d\n", + p->p_pid, signum); +#endif + break; /* == ignore */ + } + + /* + * If there is a pending stop signal to process + * with default action, stop here, + * then clear the signal. However, + * if process is member of an orphaned + * process group, ignore tty stop signals. + */ + if (prop & SA_STOP) { + if (p->p_flag & P_TRACED || + (p->p_pgrp->pg_jobc == 0 && + prop & SA_TTYSTOP)) + break; /* == ignore */ + retnum = signum; + break; + } else if (prop & SA_IGNORE) { + /* + * Except for SIGCONT, shouldn't get here. + * Default action is to ignore; drop it. + */ + break; /* == ignore */ + } else { + return (signum); + } + /*NOTREACHED*/ + + case (long)SIG_IGN: + /* + * Masking above should prevent us ever trying + * to take action on an ignored signal other + * than SIGCONT, unless process is traced. + */ + if ((prop & SA_CONT) == 0 && + (p->p_flag & P_TRACED) == 0) + printf("issignal\n"); + break; /* == ignore */ + + default: + /* + * This signal has an action, let + * postsig() process it. + */ + return (signum); + } + sigbits &= ~mask; /* take the signal! */ + } + /* NOTREACHED */ +} + +/* + * Put the argument process into the stopped state and notify the parent + * via wakeup. Signals are handled elsewhere. The process must not be + * on the run queue. + */ +void +stop(p) + register struct proc *p; +{ + p->p_stat = SSTOP; + p->p_flag &= ~P_WAITED; + wakeup((caddr_t)p->p_pptr); + (void) task_suspend(p->task); /*XXX*/ +} + +/* + * Take the action for the specified signal + * from the current set of pending signals. + */ +void +postsig(signum) + register int signum; +{ + register struct proc *p = current_proc(); + register struct sigacts *ps = p->p_sigacts; + register sig_t action; + u_long code; + int mask, returnmask; + +#if DIAGNOSTIC + if (signum == 0) + panic("postsig"); + /* + * This must be called on master cpu + */ + if (cpu_number() != master_cpu) + panic("psig not on master"); +#endif + + signal_lock(p); + /* + * Try to grab the signal lock. + */ + if (sig_try_locked(p) <= 0) { + signal_unlock(p); + return; + } + + mask = sigmask(signum); + p->p_siglist &= ~mask; + action = ps->ps_sigact[signum]; +#if KTRACE + if (KTRPOINT(p, KTR_PSIG)) + ktrpsig(p->p_tracep, + signum, action, ps->ps_flags & SAS_OLDMASK ? + ps->ps_oldmask : p->p_sigmask, 0); +#endif + if (action == SIG_DFL) { + /* + * Default action, where the default is to kill + * the process. (Other cases were ignored above.) + */ + /* called with signal_lock() held */ + sigexit_locked(p, signum); + return; + /* NOTREACHED */ + } else { + /* + * If we get here, the signal must be caught. + */ +#if DIAGNOSTIC + if (action == SIG_IGN || (p->p_sigmask & mask)) + log(LOG_WARNING, + "postsig: processing masked or ignored signal\n"); +#endif + /* + * Set the new mask value and also defer further + * occurences of this signal. + * + * Special case: user has done a sigpause. Here the + * current mask is not of interest, but rather the + * mask from before the sigpause is what we want + * restored after the signal processing is completed. + */ + if (ps->ps_flags & SAS_OLDMASK) { + returnmask = ps->ps_oldmask; + ps->ps_flags &= ~SAS_OLDMASK; + } else + returnmask = p->p_sigmask; + p->p_sigmask |= ps->ps_catchmask[signum] | mask; + if (ps->ps_sig != signum) { + code = 0; + } else { + code = ps->ps_code; + ps->ps_code = 0; + } + p->p_stats->p_ru.ru_nsignals++; + sendsig(p, action, signum, returnmask, code); + } + signal_unlock(p); +} + +/* + * Force the current process to exit with the specified signal, dumping core + * if appropriate. We bypass the normal tests for masked and caught signals, + * allowing unrecoverable failures to terminate the process without changing + * signal state. Mark the accounting record with the signal termination. + * If dumping core, save the signal number for the debugger. Calls exit and + * does not return. + */ + /* called with signal lock */ +void +sigexit_locked(p, signum) + register struct proc *p; + int signum; +{ + + sig_lock_to_exit(p); + p->p_acflag |= AXSIG; + if (sigprop[signum] & SA_CORE) { + p->p_sigacts->ps_sig = signum; + if (coredump(p) == 0) + signum |= WCOREFLAG; + } + signal_unlock(p); + exit1(p, W_EXITCODE(0, signum)); + /* NOTREACHED */ +} + +void +bsd_ast(thread_act_t thr_act) +{ + struct proc *p = current_proc(); + struct uthread *ut = get_bsdthread_info(thr_act); + int signum; + unsigned int pc; + boolean_t funnel_state; + + if (p == NULL) + return; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + if ((p->p_flag & P_OWEUPC) && (p->p_flag & P_PROFIL)) { + pc = get_useraddr(); + addupc_task(p, pc, 1); + p->p_flag &= ~P_OWEUPC; + } + + if (CHECK_SIGNALS(p, current_thread(), ut)) { + while (signum = issignal(p)) + postsig(signum); + } + ast_off(AST_BSD); + + (void) thread_funnel_set(kernel_flock, FALSE); +} + +/* + * Follwing routines are called using callout from bsd_hardclock + * so that psignals are called in a thread context and are funneled + */ +void +psignal_vtalarm(struct proc *p) +{ + boolean_t funnel_state; + + if (p == NULL) + return; + funnel_state = thread_funnel_set(kernel_flock, TRUE); + psignal_lock(p, SIGVTALRM, 1, 1); + (void) thread_funnel_set(kernel_flock, FALSE); +} + +void +psignal_xcpu(struct proc *p) +{ + boolean_t funnel_state; + + if (p == NULL) + return; + funnel_state = thread_funnel_set(kernel_flock, TRUE); + psignal_lock(p, SIGXCPU, 1, 1); + (void) thread_funnel_set(kernel_flock, FALSE); +} + +void +psignal_sigprof(struct proc *p) +{ + boolean_t funnel_state; + + if (p == NULL) + return; + funnel_state = thread_funnel_set(kernel_flock, TRUE); + psignal_lock(p, SIGPROF, 1, 1); + (void) thread_funnel_set(kernel_flock, FALSE); +} + +/* ptrace set runnalbe */ +void +pt_setrunnable(struct proc *p) +{ +task_t task; + + task = p->task; + + if (p->p_flag & P_TRACED) { + p->p_stat = SRUN; + if (p->sigwait) { + wakeup((caddr_t)&(p->sigwait)); + task_release(task); + } + } +} diff --git a/bsd/kern/kern_subr.c b/bsd/kern/kern_subr.c new file mode 100644 index 000000000..7cd4f6770 --- /dev/null +++ b/bsd/kern/kern_subr.c @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 + */ + +#include +#include +#include +#include +#include + +#include + +#include +#define DBG_UIO_COPYOUT 16 +#define DBG_UIO_COPYIN 17 + + +int +uiomove(cp, n, uio) + register caddr_t cp; + register int n; + register struct uio *uio; +{ + register struct iovec *iov; + u_int cnt; + int error = 0; + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) + panic("uiomove: mode"); + if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc()) + panic("uiomove proc"); +#endif + + while (n > 0 && uio->uio_resid) { + iov = uio->uio_iov; + cnt = iov->iov_len; + if (cnt == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + continue; + } + if (cnt > n) + cnt = n; + switch (uio->uio_segflg) { + + case UIO_USERSPACE: + case UIO_USERISPACE: + if (uio->uio_rw == UIO_READ) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, + cp, iov->iov_base, cnt, 0,0); + + error = copyout(cp, iov->iov_base, cnt); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, + cp, iov->iov_base, cnt, 0,0); + } + else + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, + iov->iov_base, cp, cnt, 0,0); + + error = copyin(iov->iov_base, cp, cnt); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, + iov->iov_base, cp, cnt, 0,0); + } + if (error) + return (error); + break; + + case UIO_SYSSPACE: + if (uio->uio_rw == UIO_READ) + error = copywithin((caddr_t)cp, iov->iov_base, + cnt); + else + error = copywithin(iov->iov_base, (caddr_t)cp, + cnt); + break; +#ifdef ppc + case UIO_PHYS_USERSPACE: + if (uio->uio_rw == UIO_READ) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, + cp, iov->iov_base, cnt, 1,0); + + error = copyp2v(cp, iov->iov_base, cnt); + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, + cp, iov->iov_base, cnt, 1,0); + } + else + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, + iov->iov_base, cp, cnt, 1,0); + + panic("copyv2p not implemented yet\n"); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, + iov->iov_base, cp, cnt, 1,0); + } + if (error) + return (error); + break; +#endif + } + iov->iov_base += cnt; + iov->iov_len -= cnt; + uio->uio_resid -= cnt; + uio->uio_offset += cnt; + cp += cnt; + n -= cnt; + } + return (error); +} + +/* + * Give next character to user as result of read. + */ +int +ureadc(c, uio) + register int c; + register struct uio *uio; +{ + register struct iovec *iov; + + if (uio->uio_resid <= 0) + panic("ureadc: non-positive resid"); +again: + if (uio->uio_iovcnt == 0) + panic("ureadc: non-positive iovcnt"); + iov = uio->uio_iov; + if (iov->iov_len <= 0) { + uio->uio_iovcnt--; + uio->uio_iov++; + goto again; + } + switch (uio->uio_segflg) { + + case UIO_USERSPACE: + if (subyte(iov->iov_base, c) < 0) + return (EFAULT); + break; + + case UIO_SYSSPACE: + *iov->iov_base = c; + break; + + case UIO_USERISPACE: + if (suibyte(iov->iov_base, c) < 0) + return (EFAULT); + break; + } + iov->iov_base++; + iov->iov_len--; + uio->uio_resid--; + uio->uio_offset++; + return (0); +} + +#if defined(vax) || defined(ppc) +/* unused except by ct.c, other oddities XXX */ +/* + * Get next character written in by user from uio. + */ +uwritec(uio) + struct uio *uio; +{ + register struct iovec *iov; + register int c; + + if (uio->uio_resid <= 0) + return (-1); +again: + if (uio->uio_iovcnt <= 0) + panic("uwritec: non-positive iovcnt"); + iov = uio->uio_iov; + if (iov->iov_len == 0) { + uio->uio_iov++; + if (--uio->uio_iovcnt == 0) + return (-1); + goto again; + } + switch (uio->uio_segflg) { + + case UIO_USERSPACE: + c = fubyte(iov->iov_base); + break; + + case UIO_SYSSPACE: + c = *iov->iov_base & 0377; + break; + + case UIO_USERISPACE: + c = fuibyte(iov->iov_base); + break; + + default: + c = 0; /* avoid uninitialized variable warning */ + panic("uwritec: bogus uio_segflg"); + break; + } + if (c < 0) + return (-1); + iov->iov_base++; + iov->iov_len--; + uio->uio_resid--; + uio->uio_offset++; + return (c); +} +#endif /* vax || ppc */ + +/* + * General routine to allocate a hash table. + */ +void * +hashinit(elements, type, hashmask) + int elements, type; + u_long *hashmask; +{ + long hashsize; + LIST_HEAD(generic, generic) *hashtbl; + int i; + + if (elements <= 0) + panic("hashinit: bad cnt"); + for (hashsize = 1; hashsize <= elements; hashsize <<= 1) + continue; + hashsize >>= 1; + MALLOC(hashtbl, struct generic *, + (u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); + bzero(hashtbl, (u_long)hashsize * sizeof(*hashtbl)); + for (i = 0; i < hashsize; i++) + LIST_INIT(&hashtbl[i]); + *hashmask = hashsize - 1; + return (hashtbl); +} diff --git a/bsd/kern/kern_symfile.c b/bsd/kern/kern_symfile.c new file mode 100644 index 000000000..d90edce20 --- /dev/null +++ b/bsd/kern/kern_symfile.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * File: bsd/kern/kern_symfile.c + * + * This file contains creates a dummy symbol file for mach_kernel based on + * the symbol table information passed by the SecondaryLoader/PlatformExpert. + * This allows us to correctly link other executables (drivers, etc) against the + * the kernel in cases where the kernel image on the root device does not match + * the live kernel. This can occur during net-booting where the actual kernel + * image is obtained from the network via tftp rather than the root + * device. + * + * If a symbol table is available, then the file /mach.sym will be created + * containing a Mach Header and a LC_SYMTAB load command followed by the + * the symbol table data for mach_kernel. + * + * HISTORY + * + * . + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +extern unsigned char rootdevice[]; +extern vm_size_t page_size; + + +int kernel_symfile_opened = 0; +int error_code = 0; + +extern int IODTGetLoaderInfo( char *key, void **infoAddr, int *infoSize ); +extern void IODTFreeLoaderInfo( char *key, void *infoAddr, int infoSize ); + +struct segment_command *findSegmentByName( struct mach_header *mh, const char *section_name ); + +/* + * + */ +int get_kernel_symfile( struct proc *p, char **symfile ) +{ + if ( kernel_symfile_opened == 0 ) + { + kernel_symfile_opened = 1; + error_code = output_kernel_symbols( p ); + } + if ( error_code == 0 ) *symfile = "\\mach.sym"; + + return error_code; +} + +/* + * + */ +int output_kernel_symbols( register struct proc *p ) +{ + register struct vnode *vp; + register struct pcred *pcred = p->p_cred; + register struct ucred *cred = pcred->pc_ucred; + struct nameidata nd; + struct vattr vattr; + struct mach_header *orig_mh, *mh; + struct load_command *lc; + struct segment_command *orig_ds, *orig_ts, *sg; + struct section *se; + struct symtab_command *sc, *sc0; + struct nlist *nl; + vm_size_t orig_mhsize, sc0_size; + vm_offset_t header; + vm_size_t header_size; + int error, error1; + int i, j; + int symfoffset, symsize; + int rc_mh, rc_sc; + + error = EFAULT; + + vp = NULL; + header = NULL; + orig_mh = NULL; + sc0 = NULL; + + rc_mh = IODTGetLoaderInfo( "Kernel-__HEADER", (void **)&orig_mh, &orig_mhsize ); + rc_sc = IODTGetLoaderInfo( "Kernel-__SYMTAB", (void **)&sc0, &sc0_size ); + + if ( rc_mh != 0 || orig_mh == 0 || orig_mhsize < sizeof(struct mach_header) ) goto out; + if ( rc_sc != 0 || sc0 == 0 || sc0_size < sizeof(struct symtab_command) ) goto out; + + if ( pcred->p_svuid != pcred->p_ruid || pcred->p_svgid != pcred->p_rgid ) goto out; + + if ( rootdevice[0] == 'e' && rootdevice[1] == 'n' ) goto out; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, "mach.sym", p); + if( (error = vn_open(&nd, O_CREAT | FWRITE, S_IRUSR | S_IRGRP | S_IROTH )) != 0 ) goto out; + + vp = nd.ni_vp; + + /* Don't dump to non-regular files or files with links. */ + error = EFAULT; + if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred, p) || vattr.va_nlink != 1) goto out; + + VATTR_NULL(&vattr); + vattr.va_size = 0; + VOP_LEASE(vp, p, cred, LEASE_WRITE); + VOP_SETATTR(vp, &vattr, cred, p); + p->p_acflag |= ACORE; + + orig_ts = findSegmentByName(orig_mh, "__TEXT"); + orig_ds = findSegmentByName(orig_mh, "__DATA"); + + if ( orig_ts == NULL || orig_ds == NULL ) goto out; + + header_size = sizeof(struct mach_header) + + orig_ts->cmdsize + + orig_ds->cmdsize + + sizeof(struct symtab_command); + + (void) kmem_alloc_wired( kernel_map, + (vm_offset_t *)&header, + (vm_size_t)header_size); + + if ( header == NULL ) goto out; + + bzero( (void *)header, header_size ); + + /* + * Set up Mach-O header. + */ + mh = (struct mach_header *) header; + mh->magic = orig_mh->magic; + mh->cputype = orig_mh->cputype; + mh->cpusubtype = orig_mh->cpusubtype; + mh->filetype = orig_mh->filetype; + mh->ncmds = 3; + mh->sizeofcmds = header_size - sizeof(struct mach_header); + + /* + * Copy __DATA and __TEXT segment commands from mach_kernel so loadable drivers + * get correct section alignment hints. + */ + sg = (struct segment_command *)(mh+1); + bcopy( orig_ts, sg, orig_ts->cmdsize ); + + sg = (struct segment_command *)((int)sg + sg->cmdsize); + bcopy( orig_ds, sg, orig_ds->cmdsize ); + + sg = (struct segment_command *)(mh+1); + + for ( i = 0; i < 2; i++ ) + { + sg->vmaddr = 0; + sg->vmsize = 0x1000; + sg->fileoff = 0; + sg->filesize = 0; + sg->maxprot = 0; + sg->initprot = 0; + sg->flags = 0; + + se = (struct section *)(sg+1); + for ( j = 0; j < sg->nsects; j++, se++ ) + { + se->addr = 0; + se->size = 0; + se->offset = 0; + se->nreloc = 0; + } + + sg = (struct segment_command *)((int)sg + sg->cmdsize); + } + + symfoffset = round_page(header_size); + + /* + * Set up LC_SYMTAB command + */ + sc = (struct symtab_command *)sg; + sc->cmd = LC_SYMTAB; + sc->cmdsize = sizeof(struct symtab_command); + sc->symoff = symfoffset; + sc->nsyms = sc0->nsyms; + sc->strsize = sc0->strsize; + sc->stroff = symfoffset + sc->nsyms * sizeof(struct nlist); + + symsize = sc->nsyms * sizeof(struct nlist) + sc->strsize; + + nl = (struct nlist *)(sc0+1); + for (i = 0; i < sc->nsyms; i++, nl++ ) + { + if ( (nl->n_type & N_TYPE) == N_SECT ) + { + nl->n_sect = NO_SECT; + nl->n_type = (nl->n_type & ~N_TYPE) | N_ABS; + } + } + + /* + * Write out the load commands at the beginning of the + * file. + */ + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)mh, header_size, (off_t)0, + UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p); + if ( error != 0 ) goto out; + + /* + * Write out kernel symbols + */ + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)(sc0+1), symsize, symfoffset, + UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p); + if ( error != 0 ) goto out; + +out: + if ( header != 0 ) kmem_free(kernel_map, header, header_size); + if ( orig_mh != 0 ) IODTFreeLoaderInfo( "Kernel-__HEADER", (void *)orig_mh, round_page(orig_mhsize) ); + if ( sc0 != 0 ) IODTFreeLoaderInfo( "Kernel-__SYMTAB", (void *)sc0, round_page(sc0_size) ); + + if ( vp != 0 ) + { + VOP_UNLOCK(vp, 0, p); + error1 = vn_close(vp, FWRITE, cred, p); + if (error == 0) error = error1; + } + + return(error); +} + +/* + * + */ +struct segment_command *findSegmentByName( struct mach_header *mh, const char *section_name ) +{ + struct segment_command *sg; + int i; + + sg = (struct segment_command *)(mh+1); + + for ( i=0; i < mh->ncmds; i++ ) + { + if ( (sg->cmd == LC_SEGMENT) && (strcmp(sg->segname, section_name) == 0) ) + { + return sg; + } + + sg = (struct segment_command *)((int)sg + sg->cmdsize); + } + + return NULL; +} + + + + + diff --git a/bsd/kern/kern_synch.c b/bsd/kern/kern_synch.c new file mode 100644 index 000000000..ef076878d --- /dev/null +++ b/bsd/kern/kern_synch.c @@ -0,0 +1,363 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +_sleep_continue() +{ + register struct proc *p; + register thread_t thread = current_thread(); + thread_act_t th_act; + struct uthread * ut; + int sig, catch; + int error = 0; + + th_act = current_act(); + ut = get_bsdthread_info(th_act); + catch = ut->uu_pri & PCATCH; + p = current_proc(); + +#if FIXME /* [ */ + thread->wait_mesg = NULL; +#endif /* FIXME ] */ + switch (get_thread_waitresult(thread)) { + case THREAD_TIMED_OUT: + error = EWOULDBLOCK; + break; + case THREAD_AWAKENED: + /* + * Posix implies any signal should be delivered + * first, regardless of whether awakened due + * to receiving event. + */ + if (!catch) + break; + /* else fall through */ + case THREAD_INTERRUPTED: + if (catch) { + unix_master(); + if (thread_should_abort(current_thread())) { + error = EINTR; + } else if (SHOULDissignal(p,ut)) { + if (sig = CURSIG(p)) { + if (p->p_sigacts->ps_sigintr & sigmask(sig)) + error = EINTR; + else + error = ERESTART; + } + if (thread_should_abort(current_thread())) { + error = EINTR; + } + } + unix_release(); + } else + error = EINTR; + break; + } + + if ((error == EINTR) || (error == ERESTART)) { +#ifdef BSD_USE_APC + thread_apc_set(th_act, bsd_ast); +#else + thread_ast_set(th_act, AST_BSD); + ast_on(AST_BSD); +#endif + } + if (ut->uu_timo) + thread_cancel_timer(); + +#if 0 + /* We should never get here without funnel, so we should not grab again */ + thread_funnel_set(kernel_flock, TRUE); +#endif /* 0 */ + unix_syscall_return((*ut->uu_continuation)(error)); +} + +/* + * Give up the processor till a wakeup occurs + * on chan, at which time the process + * enters the scheduling queue at priority pri. + * The most important effect of pri is that when + * pri<=PZERO a signal cannot disturb the sleep; + * if pri>PZERO signals will be processed. + * If pri&PCATCH is set, signals will cause sleep + * to return 1, rather than longjmp. + * Callers of this routine must be prepared for + * premature return, and check that the reason for + * sleeping has gone away. + */ + +#if FIXME +static __inline__ +#endif +int +_sleep(chan, pri, wmsg, timo, continuation) + caddr_t chan; + int pri; + char *wmsg; + int timo; + int (*continuation)(); +{ + register struct proc *p; + register thread_t thread = current_thread(); + thread_act_t th_act; + struct uthread * ut; + int sig, catch = pri & PCATCH; + int sigttblock = pri & PTTYBLOCK; + int error = 0; + spl_t s; + + s = splhigh(); + + th_act = current_act(); + ut = get_bsdthread_info(th_act); + + p = current_proc(); +#if KTRACE + if (KTRPOINT(p, KTR_CSW)) + ktrcsw(p->p_tracep, 1, 0); +#endif + p->p_priority = pri & PRIMASK; + + if (chan) + assert_wait(chan, (catch) ? THREAD_ABORTSAFE : THREAD_UNINT); + + if (timo) + thread_set_timer(timo, NSEC_PER_SEC / hz); + /* + * We start our timeout + * before calling CURSIG, as we could stop there, and a wakeup + * or a SIGCONT (or both) could occur while we were stopped. + * A SIGCONT would cause us to be marked as SSLEEP + * without resuming us, thus we must be ready for sleep + * when CURSIG is called. If the wakeup happens while we're + * stopped, p->p_wchan will be 0 upon return from CURSIG. + */ + if (catch) { + unix_master(); + if (SHOULDissignal(p,ut)) { + if (sig = CURSIG(p)) { + clear_wait(thread, THREAD_INTERRUPTED); + /* if SIGTTOU or SIGTTIN then block till SIGCONT */ + if (sigttblock && ((sig == SIGTTOU) || (sig == SIGTTIN))) { + p->p_flag |= P_TTYSLEEP; + /* reset signal bits */ + clear_sigbits(p, sig); + assert_wait(&p->p_siglist, THREAD_ABORTSAFE); + /* assert wait can block and SIGCONT should be checked */ + if (p->p_flag & P_TTYSLEEP) + thread_block(0); + /* return with success */ + error = 0; + goto out; + } + if (p->p_sigacts->ps_sigintr & sigmask(sig)) + error = EINTR; + else + error = ERESTART; + unix_release(); + goto out; + } + } + if (thread_should_abort(current_thread())) { + clear_wait(thread, THREAD_INTERRUPTED); + error = EINTR; + unix_release(); + goto out; + } + if (get_thread_waitevent(thread) == 0) { /*already happened */ + unix_release(); + goto out; + } + unix_release(); + } + +#if FIXME /* [ */ + thread->wait_mesg = wmsg; +#endif /* FIXME ] */ + splx(s); + p->p_stats->p_ru.ru_nvcsw++; + + if (continuation != (int (*)()) 0 ) { + ut->uu_continuation = continuation; + ut->uu_pri = pri; + ut->uu_timo = timo; + thread_block(_sleep_continue); + /* NOTREACHED */ + } + + thread_block(0); + +#if FIXME /* [ */ + thread->wait_mesg = NULL; +#endif /* FIXME ] */ + switch (get_thread_waitresult(thread)) { + case THREAD_TIMED_OUT: + error = EWOULDBLOCK; + break; + case THREAD_AWAKENED: + /* + * Posix implies any signal should be delivered + * first, regardless of whether awakened due + * to receiving event. + */ + if (!catch) + break; + /* else fall through */ + case THREAD_INTERRUPTED: + if (catch) { + unix_master(); + if (thread_should_abort(current_thread())) { + error = EINTR; + } else if (SHOULDissignal(p,ut)) { + if (sig = CURSIG(p)) { + if (p->p_sigacts->ps_sigintr & sigmask(sig)) + error = EINTR; + else + error = ERESTART; + } + if (thread_should_abort(current_thread())) { + error = EINTR; + } + } + unix_release(); + } else + error = EINTR; + break; + } +out: + if ((error == EINTR) || (error == ERESTART)) { +#ifdef BSD_USE_APC + thread_apc_set(th_act, bsd_ast); +#else + thread_ast_set(th_act, AST_BSD); + ast_on(AST_BSD); +#endif + } + if (timo) + thread_cancel_timer(); + (void) splx(s); + return (error); +} + +int sleep(chan, pri) + void *chan; + int pri; +{ + + return (_sleep((caddr_t)chan, pri, (char *)NULL, 0, (void (*)())0 )); + +} + +int tsleep(chan, pri, wmsg, timo) + void *chan; + int pri; + char * wmsg; + int timo; +{ + return(_sleep((caddr_t)chan, pri, wmsg, timo, (void (*)())0 )); +} + +int tsleep0(chan, pri, wmsg, timo, continuation) + void *chan; + int pri; + char * wmsg; + int timo; + int (*continuation)(); +{ +#if defined (__i386__) + return(_sleep((caddr_t)chan, pri, wmsg, timo, (void (*)())0 )); +#else + return(_sleep((caddr_t)chan, pri, wmsg, timo, continuation)); +#endif +} + +/* + * Wake up all processes sleeping on chan. + */ +void +wakeup(chan) + register void *chan; +{ + thread_wakeup_prim((caddr_t)chan,FALSE, THREAD_AWAKENED); +} + +/* + * Wake up the first process sleeping on chan. + * + * Be very sure that the first process is really + * the right one to wakeup. + */ +wakeup_one(chan) + register caddr_t chan; +{ + thread_wakeup_prim((caddr_t)chan, TRUE, THREAD_AWAKENED); +} + +/* + * Compute the priority of a process when running in user mode. + * Arrange to reschedule if the resulting priority is better + * than that of the current process. + */ +void +resetpriority(p) + register struct proc *p; +{ + int newpri; +#if FIXME + if (p->p_nice < 0) + newpri = BASEPRI_USER + + (p->p_nice * (MAXPRI_USER - BASEPRI_USER)) / PRIO_MIN; + else + newpri = BASEPRI_USER - + (p->p_nice * BASEPRI_USER) / PRIO_MAX; + + (void)task_priority(p->task, newpri, TRUE); +#endif /* FIXME */ +} + diff --git a/bsd/kern/kern_sysctl.c b/bsd/kern/kern_sysctl.c new file mode 100644 index 000000000..c7332ad05 --- /dev/null +++ b/bsd/kern/kern_sysctl.c @@ -0,0 +1,1213 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Mike Karels at Berkeley Software Design, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 + */ + +/* + * sysctl system call. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern vm_map_t bsd_pageable_map; + +#include +#include + +#include +#include + +#if __ppc__ +#include +#endif + +sysctlfn kern_sysctl; +sysctlfn hw_sysctl; +#ifdef DEBUG +sysctlfn debug_sysctl; +#endif +extern sysctlfn vm_sysctl; +extern sysctlfn vfs_sysctl; +extern sysctlfn net_sysctl; +extern sysctlfn cpu_sysctl; + + +int +userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t + *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval); + +void +fill_proc(struct proc *p,struct kinfo_proc *kp, int doingzomb); + +void +fill_externproc(struct proc *p, struct extern_proc *exp); + + + +/* + * temporary location for vm_sysctl. This should be machine independant + */ +vm_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + int error, level, inthostid; + extern long avenrun[3], mach_factor[3]; + struct loadavg loadinfo; + + //if (namelen != 1 && !(name[0] == VM_LOADAVG)) + //return (ENOTDIR); /* overloaded */ + + switch (name[0]) { + case VM_LOADAVG: + loadinfo.ldavg[0] = avenrun[0]; + loadinfo.ldavg[1] = avenrun[1]; + loadinfo.ldavg[2] = avenrun[2]; + loadinfo.fscale = LSCALE; + return (sysctl_struct(oldp, oldlenp, newp, newlen, &loadinfo, sizeof(struct loadavg))); + case VM_MACHFACTOR: + loadinfo.ldavg[0] = mach_factor[0]; + loadinfo.ldavg[1] = mach_factor[1]; + loadinfo.ldavg[2] = mach_factor[2]; + loadinfo.fscale = LSCALE; + return (sysctl_struct(oldp, oldlenp, newp, newlen, &loadinfo, sizeof(struct loadavg))); + case VM_METER: + return (EOPNOTSUPP); + case VM_MAXID: + return (EOPNOTSUPP); + default: + return (EOPNOTSUPP); + } + /* NOTREACHED */ + return (EOPNOTSUPP); +} + +/* + * Locking and stats + */ +static struct sysctl_lock { + int sl_lock; + int sl_want; + int sl_locked; +} memlock; + +struct __sysctl_args { + int *name; + u_int namelen; + void *old; + size_t *oldlenp; + void *new; + size_t newlen; +}; +int +__sysctl(p, uap, retval) + struct proc *p; + register struct __sysctl_args *uap; + register_t *retval; +{ + int error, dolock = 1; + size_t savelen, oldlen = 0; + sysctlfn *fn; + int name[CTL_MAXNAME]; + int i; + + /* + * all top-level sysctl names are non-terminal + */ + if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) + return (EINVAL); + if (error = + copyin(uap->name, &name, uap->namelen * sizeof(int))) + return (error); + + /* CTL_UNSPEC is used to get oid to AUTO_OID */ + if (uap->new != NULL && + (((name[0] == CTL_KERN) && (name[1] != KERN_IPC)) || + (name[0] == CTL_HW) || (name[0] == CTL_VM) || + (name[0] == CTL_VFS)) && + (error = suser(p->p_ucred, &p->p_acflag))) + return (error); + + switch (name[0]) { + case CTL_KERN: + fn = kern_sysctl; + if (name[1] != KERN_VNODE) /* XXX */ + dolock = 0; + break; + case CTL_HW: + fn = hw_sysctl; + break; + case CTL_VM: + fn = vm_sysctl; + break; + + case CTL_VFS: + fn = vfs_sysctl; + break; +#if FIXME /* [ */ + case CTL_MACHDEP: + fn = cpu_sysctl; + break; +#endif /* FIXME ] */ +#ifdef DEBUG + case CTL_DEBUG: + fn = debug_sysctl; + break; +#endif + default: + fn = 0; + } + + if (uap->oldlenp && + (error = copyin(uap->oldlenp, &oldlen, sizeof(oldlen)))) + return (error); + + if (uap->old != NULL) { + if (!useracc(uap->old, oldlen, B_WRITE)) + return (EFAULT); + + /* The pc sampling mechanism does not need to take this lock */ + if (name[1] != KERN_PCSAMPLES) { + while (memlock.sl_lock) { + memlock.sl_want = 1; + sleep((caddr_t)&memlock, PRIBIO+1); + memlock.sl_locked++; + } + memlock.sl_lock = 1; + } + + if (dolock) + vslock(uap->old, oldlen); + savelen = oldlen; + } + + if (fn) + error = (*fn)(name + 1, uap->namelen - 1, uap->old, + &oldlen, uap->new, uap->newlen, p); + else + error = EOPNOTSUPP; + + if ( (name[0] != CTL_VFS) && (error == EOPNOTSUPP)) + error = userland_sysctl(p, name, uap->namelen, + uap->old, uap->oldlenp, 0, + uap->new, uap->newlen, &oldlen); + + if (uap->old != NULL) { + if (dolock) + vsunlock(uap->old, savelen, B_WRITE); + if (name[1] != KERN_PCSAMPLES) { + memlock.sl_lock = 0; + if (memlock.sl_want) { + memlock.sl_want = 0; + wakeup((caddr_t)&memlock); + } + } + } + if ((error) && (error != ENOMEM)) + return (error); + + if (uap->oldlenp) { + i = copyout(&oldlen, uap->oldlenp, sizeof(oldlen)); + if (i) + return i; + } + + return (error); +} + +/* + * Attributes stored in the kernel. + */ +extern char hostname[MAXHOSTNAMELEN]; /* defined in bsd/kern/init_main.c */ +extern int hostnamelen; +extern char domainname[MAXHOSTNAMELEN]; +extern int domainnamelen; +extern long hostid; +#ifdef INSECURE +int securelevel = -1; +#else +int securelevel; +#endif + +int get_kernel_symfile( struct proc *p, char **symfile ); + +/* + * kernel related system variables. + */ +kern_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + int error, level, inthostid; + unsigned int oldval=0; + extern char ostype[], osrelease[], version[]; + + /* all sysctl names at this level are terminal */ + if (namelen != 1 && !(name[0] == KERN_PROC || name[0] == KERN_PROF + || name[0] == KERN_KDEBUG + || name[0] == KERN_PROCARGS + || name[0] == KERN_PCSAMPLES + || name[0] == KERN_IPC + )) + return (ENOTDIR); /* overloaded */ + + switch (name[0]) { + case KERN_OSTYPE: + return (sysctl_rdstring(oldp, oldlenp, newp, ostype)); + case KERN_OSRELEASE: + return (sysctl_rdstring(oldp, oldlenp, newp, osrelease)); + case KERN_OSREV: + return (sysctl_rdint(oldp, oldlenp, newp, BSD)); + case KERN_VERSION: + return (sysctl_rdstring(oldp, oldlenp, newp, version)); + case KERN_MAXVNODES: + oldval = desiredvnodes; + error = sysctl_int(oldp, oldlenp, newp, + newlen, &desiredvnodes); + reset_vmobjectcache(oldval, desiredvnodes); + return(error); + case KERN_MAXPROC: + return (sysctl_int(oldp, oldlenp, newp, newlen, &maxproc)); + case KERN_MAXFILES: + return (sysctl_int(oldp, oldlenp, newp, newlen, &maxfiles)); + case KERN_ARGMAX: + return (sysctl_rdint(oldp, oldlenp, newp, ARG_MAX)); + case KERN_SECURELVL: + level = securelevel; + if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &level)) || + newp == NULL) + return (error); + if (level < securelevel && p->p_pid != 1) + return (EPERM); + securelevel = level; + return (0); + case KERN_HOSTNAME: + error = sysctl_string(oldp, oldlenp, newp, newlen, + hostname, sizeof(hostname)); + if (newp && !error) + hostnamelen = newlen; + return (error); + case KERN_DOMAINNAME: + error = sysctl_string(oldp, oldlenp, newp, newlen, + domainname, sizeof(domainname)); + if (newp && !error) + domainnamelen = newlen; + return (error); + case KERN_HOSTID: + inthostid = hostid; /* XXX assumes sizeof long <= sizeof int */ + error = sysctl_int(oldp, oldlenp, newp, newlen, &inthostid); + hostid = inthostid; + return (error); + case KERN_CLOCKRATE: + return (sysctl_clockrate(oldp, oldlenp)); + case KERN_BOOTTIME: + return (sysctl_rdstruct(oldp, oldlenp, newp, &boottime, + sizeof(struct timeval))); + case KERN_VNODE: + return (sysctl_vnode(oldp, oldlenp)); + case KERN_PROC: + return (sysctl_doproc(name + 1, namelen - 1, oldp, oldlenp)); + case KERN_FILE: + return (sysctl_file(oldp, oldlenp)); +#ifdef GPROF + case KERN_PROF: + return (sysctl_doprof(name + 1, namelen - 1, oldp, oldlenp, + newp, newlen)); +#endif + case KERN_POSIX1: + return (sysctl_rdint(oldp, oldlenp, newp, _POSIX_VERSION)); + case KERN_NGROUPS: + return (sysctl_rdint(oldp, oldlenp, newp, NGROUPS_MAX)); + case KERN_JOB_CONTROL: + return (sysctl_rdint(oldp, oldlenp, newp, 1)); + case KERN_SAVED_IDS: +#ifdef _POSIX_SAVED_IDS + return (sysctl_rdint(oldp, oldlenp, newp, 1)); +#else + return (sysctl_rdint(oldp, oldlenp, newp, 0)); +#endif +#if FIXME /* [ */ + case KERN_MAXPARTITIONS: + return (sysctl_rdint(oldp, oldlenp, newp, MAXPARTITIONS)); +#endif /* FIXME ] */ + case KERN_KDEBUG: + return (kdebug_ops(name + 1, namelen - 1, oldp, oldlenp, p)); + case KERN_PCSAMPLES: + return (pcsamples_ops(name + 1, namelen - 1, oldp, oldlenp, p)); + case KERN_PROCARGS: + /* new one as it does not use kinfo_proc */ + return (sysctl_procargs(name + 1, namelen - 1, oldp, oldlenp)); + case KERN_SYMFILE: + { + char *str; + error = get_kernel_symfile( p, &str ); + if ( error ) return error; + return (sysctl_rdstring(oldp, oldlenp, newp, str)); + } + default: + return (EOPNOTSUPP); + } + /* NOTREACHED */ +} + +/* + * hardware related system variables. + */ +hw_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + char dummy[65]; + int epochTemp; + extern int vm_page_wire_count; +#if __ppc__ + ml_ppc_cpu_info_t cpu_info; + + ml_ppc_get_info(&cpu_info); +#endif + + /* all sysctl names at this level are terminal */ + if (namelen != 1) + return (ENOTDIR); /* overloaded */ + + switch (name[0]) { + case HW_MACHINE: + if(!PEGetMachineName(dummy,64)) + return(EINVAL); + return (sysctl_rdstring(oldp, oldlenp, newp, dummy)); + case HW_MODEL: + if(!PEGetModelName(dummy,64)) + return(EINVAL); + return (sysctl_rdstring(oldp, oldlenp, newp, dummy)); + case HW_NCPU: { + int numcpus=1; + host_basic_info_data_t hinfo; + kern_return_t kret; + int count= HOST_BASIC_INFO_COUNT; +#define BSD_HOST 1 + + kret = host_info(BSD_HOST, HOST_BASIC_INFO, &hinfo, &count); + if (kret == KERN_SUCCESS) { + numcpus = hinfo.avail_cpus; + return (sysctl_rdint(oldp, oldlenp, newp, numcpus)); + } else { + return(EINVAL); + } + } + case HW_BYTEORDER: + return (sysctl_rdint(oldp, oldlenp, newp, BYTE_ORDER)); + case HW_PHYSMEM: + return (sysctl_rdint(oldp, oldlenp, newp, mem_size)); + case HW_USERMEM: + return (sysctl_rdint(oldp, oldlenp, newp, + (mem_size - vm_page_wire_count * page_size))); + case HW_PAGESIZE: + return (sysctl_rdint(oldp, oldlenp, newp, page_size)); + case HW_EPOCH: + epochTemp = PEGetPlatformEpoch(); + if (epochTemp == -1) return(EINVAL); + return (sysctl_rdint(oldp, oldlenp, newp, epochTemp)); + case HW_BUS_FREQ: + return (sysctl_rdint(oldp, oldlenp, newp, gPEClockFrequencyInfo.bus_clock_rate_hz)); + case HW_CPU_FREQ: + return (sysctl_rdint(oldp, oldlenp, newp, gPEClockFrequencyInfo.cpu_clock_rate_hz)); +#if __ppc__ + case HW_VECTORUNIT: + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.vector_unit)); + case HW_CACHELINE: + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.cache_line_size)); + case HW_L1ICACHESIZE: + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.l1_icache_size)); + case HW_L1DCACHESIZE: + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.l1_dcache_size)); + case HW_L2SETTINGS: + if (cpu_info.l2_cache_size == 0xFFFFFFFF) return(EINVAL); + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.l2_settings)); + case HW_L2CACHESIZE: + if (cpu_info.l2_cache_size == 0xFFFFFFFF) return(EINVAL); + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.l2_cache_size)); + case HW_L3SETTINGS: + if (cpu_info.l3_cache_size == 0xFFFFFFFF) return(EINVAL); + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.l3_settings)); + case HW_L3CACHESIZE: + if (cpu_info.l3_cache_size == 0xFFFFFFFF) return(EINVAL); + return (sysctl_rdint(oldp, oldlenp, newp, cpu_info.l3_cache_size)); +#endif + default: + return (EOPNOTSUPP); + } +} + +#ifdef DEBUG +/* + * Debugging related system variables. + */ +#if DIAGNOSTIC +extern +#endif /* DIAGNOSTIC */ +struct ctldebug debug0, debug1; +struct ctldebug debug2, debug3, debug4; +struct ctldebug debug5, debug6, debug7, debug8, debug9; +struct ctldebug debug10, debug11, debug12, debug13, debug14; +struct ctldebug debug15, debug16, debug17, debug18, debug19; +static struct ctldebug *debugvars[CTL_DEBUG_MAXID] = { + &debug0, &debug1, &debug2, &debug3, &debug4, + &debug5, &debug6, &debug7, &debug8, &debug9, + &debug10, &debug11, &debug12, &debug13, &debug14, + &debug15, &debug16, &debug17, &debug18, &debug19, +}; +int +debug_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + struct ctldebug *cdp; + + /* all sysctl names at this level are name and field */ + if (namelen != 2) + return (ENOTDIR); /* overloaded */ + cdp = debugvars[name[0]]; + if (cdp->debugname == 0) + return (EOPNOTSUPP); + switch (name[1]) { + case CTL_DEBUG_NAME: + return (sysctl_rdstring(oldp, oldlenp, newp, cdp->debugname)); + case CTL_DEBUG_VALUE: + return (sysctl_int(oldp, oldlenp, newp, newlen, cdp->debugvar)); + default: + return (EOPNOTSUPP); + } + /* NOTREACHED */ +} +#endif /* DEBUG */ + +/* + * Validate parameters and get old / set new parameters + * for an integer-valued sysctl function. + */ +sysctl_int(oldp, oldlenp, newp, newlen, valp) + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + int *valp; +{ + int error = 0; + + if (oldp && *oldlenp < sizeof(int)) + return (ENOMEM); + if (newp && newlen != sizeof(int)) + return (EINVAL); + *oldlenp = sizeof(int); + if (oldp) + error = copyout(valp, oldp, sizeof(int)); + if (error == 0 && newp) + error = copyin(newp, valp, sizeof(int)); + return (error); +} + +/* + * As above, but read-only. + */ +sysctl_rdint(oldp, oldlenp, newp, val) + void *oldp; + size_t *oldlenp; + void *newp; + int val; +{ + int error = 0; + + if (oldp && *oldlenp < sizeof(int)) + return (ENOMEM); + if (newp) + return (EPERM); + *oldlenp = sizeof(int); + if (oldp) + error = copyout((caddr_t)&val, oldp, sizeof(int)); + return (error); +} + +/* + * Validate parameters and get old / set new parameters + * for a string-valued sysctl function. + */ +sysctl_string(oldp, oldlenp, newp, newlen, str, maxlen) + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + char *str; + int maxlen; +{ + int len, error = 0; + + len = strlen(str) + 1; + if (oldp && *oldlenp < len) + return (ENOMEM); + if (newp && newlen >= maxlen) + return (EINVAL); + if (oldp) { + *oldlenp = len; + error = copyout(str, oldp, len); + } + if (error == 0 && newp) { + error = copyin(newp, str, newlen); + str[newlen] = 0; + } + return (error); +} + +/* + * As above, but read-only. + */ +sysctl_rdstring(oldp, oldlenp, newp, str) + void *oldp; + size_t *oldlenp; + void *newp; + char *str; +{ + int len, error = 0; + + len = strlen(str) + 1; + if (oldp && *oldlenp < len) + return (ENOMEM); + if (newp) + return (EPERM); + *oldlenp = len; + if (oldp) + error = copyout(str, oldp, len); + return (error); +} + +/* + * Validate parameters and get old / set new parameters + * for a structure oriented sysctl function. + */ +sysctl_struct(oldp, oldlenp, newp, newlen, sp, len) + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + void *sp; + int len; +{ + int error = 0; + + if (oldp && *oldlenp < len) + return (ENOMEM); + if (newp && newlen > len) + return (EINVAL); + if (oldp) { + *oldlenp = len; + error = copyout(sp, oldp, len); + } + if (error == 0 && newp) + error = copyin(newp, sp, len); + return (error); +} + +/* + * Validate parameters and get old parameters + * for a structure oriented sysctl function. + */ +sysctl_rdstruct(oldp, oldlenp, newp, sp, len) + void *oldp; + size_t *oldlenp; + void *newp, *sp; + int len; +{ + int error = 0; + + if (oldp && *oldlenp < len) + return (ENOMEM); + if (newp) + return (EPERM); + *oldlenp = len; + if (oldp) + error = copyout(sp, oldp, len); + return (error); +} + +/* + * Get file structures. + */ +sysctl_file(where, sizep) + char *where; + size_t *sizep; +{ + int buflen, error; + struct file *fp; + char *start = where; + + buflen = *sizep; + if (where == NULL) { + /* + * overestimate by 10 files + */ + *sizep = sizeof(filehead) + (nfiles + 10) * sizeof(struct file); + return (0); + } + + /* + * first copyout filehead + */ + if (buflen < sizeof(filehead)) { + *sizep = 0; + return (0); + } + if (error = copyout((caddr_t)&filehead, where, sizeof(filehead))) + return (error); + buflen -= sizeof(filehead); + where += sizeof(filehead); + + /* + * followed by an array of file structures + */ + for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) { + if (buflen < sizeof(struct file)) { + *sizep = where - start; + return (ENOMEM); + } + if (error = copyout((caddr_t)fp, where, sizeof (struct file))) + return (error); + buflen -= sizeof(struct file); + where += sizeof(struct file); + } + *sizep = where - start; + return (0); +} + +/* + * try over estimating by 5 procs + */ +#define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc)) + +sysctl_doproc(name, namelen, where, sizep) + int *name; + u_int namelen; + char *where; + size_t *sizep; +{ + register struct proc *p; + register struct kinfo_proc *dp = (struct kinfo_proc *)where; + register int needed = 0; + int buflen = where != NULL ? *sizep : 0; + int doingzomb; + struct kinfo_proc kproc; + int error = 0; + + if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL)) + return (EINVAL); + p = allproc.lh_first; + doingzomb = 0; +again: + for (; p != 0; p = p->p_list.le_next) { + /* + * Skip embryonic processes. + */ + if (p->p_stat == SIDL) + continue; + /* + * TODO - make more efficient (see notes below). + * do by session. + */ + switch (name[0]) { + + case KERN_PROC_PID: + /* could do this with just a lookup */ + if (p->p_pid != (pid_t)name[1]) + continue; + break; + + case KERN_PROC_PGRP: + /* could do this by traversing pgrp */ + if (p->p_pgrp->pg_id != (pid_t)name[1]) + continue; + break; + + case KERN_PROC_TTY: + if ( doingzomb || (p->p_flag & P_CONTROLT) == 0 || + p->p_session->s_ttyp == NULL || + p->p_session->s_ttyp->t_dev != (dev_t)name[1]) + continue; + break; + + case KERN_PROC_UID: + if (doingzomb || (p->p_ucred->cr_uid != (uid_t)name[1])) + continue; + break; + + case KERN_PROC_RUID: + if ( doingzomb || (p->p_cred->p_ruid != (uid_t)name[1])) + continue; + break; + } + if (buflen >= sizeof(struct kinfo_proc)) { + fill_proc(p, &kproc, doingzomb); + if (error = copyout((caddr_t)&kproc, &dp->kp_proc, + sizeof(struct kinfo_proc))) + return (error); + dp++; + buflen -= sizeof(struct kinfo_proc); + } + needed += sizeof(struct kinfo_proc); + } + if (doingzomb == 0) { + p = zombproc.lh_first; + doingzomb++; + goto again; + } + if (where != NULL) { + *sizep = (caddr_t)dp - where; + if (needed > *sizep) + return (ENOMEM); + } else { + needed += KERN_PROCSLOP; + *sizep = needed; + } + return (0); +} + +void +fill_proc(p,kp, doingzomb) + register struct proc *p; + register struct kinfo_proc *kp; + int doingzomb; +{ + fill_externproc(p, &kp->kp_proc); + if (!doingzomb) + fill_eproc(p, &kp->kp_eproc); +} +/* + * Fill in an eproc structure for the specified process. + */ +void +fill_eproc(p, ep) + register struct proc *p; + register struct eproc *ep; +{ + register struct tty *tp; + + ep->e_paddr = p; + ep->e_sess = p->p_pgrp->pg_session; + ep->e_pcred = *p->p_cred; + ep->e_ucred = *p->p_ucred; + if (p->p_stat == SIDL || p->p_stat == SZOMB) { + ep->e_vm.vm_rssize = 0; + ep->e_vm.vm_tsize = 0; + ep->e_vm.vm_dsize = 0; + ep->e_vm.vm_ssize = 0; + /* ep->e_vm.vm_pmap = XXX; */ + } else { +#if FIXME /* [ */ + register vm_map_t vm = ((task_t)p->task)->map; + + ep->e_vm.vm_rssize = pmap_resident_count(vm->pmap); /*XXX*/ +// ep->e_vm.vm_tsize = vm->vm_tsize; +// ep->e_vm.vm_dsize = vm->vm_dsize; +// ep->e_vm.vm_ssize = vm->vm_ssize; +#else /* FIXME ][ */ + ep->e_vm.vm_rssize = 0; /*XXX*/ +#endif /* FIXME ] */ + } + if (p->p_pptr) + ep->e_ppid = p->p_pptr->p_pid; + else + ep->e_ppid = 0; + ep->e_pgid = p->p_pgrp->pg_id; + ep->e_jobc = p->p_pgrp->pg_jobc; + if ((p->p_flag & P_CONTROLT) && + (tp = ep->e_sess->s_ttyp)) { + ep->e_tdev = tp->t_dev; + ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; + ep->e_tsess = tp->t_session; + } else + ep->e_tdev = NODEV; + ep->e_flag = ep->e_sess->s_ttyvp ? EPROC_CTTY : 0; + if (SESS_LEADER(p)) + ep->e_flag |= EPROC_SLEADER; + if (p->p_wmesg) + strncpy(ep->e_wmesg, p->p_wmesg, WMESGLEN); + ep->e_xsize = ep->e_xrssize = 0; + ep->e_xccount = ep->e_xswrss = 0; +} +/* + * Fill in an eproc structure for the specified process. + */ +void +fill_externproc(p, exp) + register struct proc *p; + register struct extern_proc *exp; +{ + exp->p_forw = exp->p_back = NULL; + exp->p_vmspace = NULL; + exp->p_sigacts = p->p_sigacts; + exp->p_flag = p->p_flag; + exp->p_stat = p->p_stat ; + exp->p_pid = p->p_pid ; + exp->p_oppid = p->p_oppid ; + exp->p_dupfd = p->p_dupfd ; + /* Mach related */ + exp->user_stack = p->user_stack ; + exp->exit_thread = p->exit_thread ; + exp->p_debugger = p->p_debugger ; + exp->sigwait = p->sigwait ; + /* scheduling */ + exp->p_estcpu = p->p_estcpu ; + exp->p_cpticks = p->p_cpticks ; + exp->p_pctcpu = p->p_pctcpu ; + exp->p_wchan = p->p_wchan ; + exp->p_wmesg = p->p_wmesg ; + exp->p_swtime = p->p_swtime ; + exp->p_slptime = p->p_slptime ; + bcopy(&p->p_realtimer, &exp->p_realtimer,sizeof(struct itimerval)); + bcopy(&p->p_rtime, &exp->p_rtime,sizeof(struct timeval)); + exp->p_uticks = p->p_uticks ; + exp->p_sticks = p->p_sticks ; + exp->p_iticks = p->p_iticks ; + exp->p_traceflag = p->p_traceflag ; + exp->p_tracep = p->p_tracep ; + exp->p_siglist = p->p_siglist ; + exp->p_textvp = p->p_textvp ; + exp->p_holdcnt = 0 ; + exp->p_sigmask = p->p_sigmask ; + exp->p_sigignore = p->p_sigignore ; + exp->p_sigcatch = p->p_sigcatch ; + exp->p_priority = p->p_priority ; + exp->p_usrpri = p->p_usrpri ; + exp->p_nice = p->p_nice ; + bcopy(&p->p_comm, &exp->p_comm,MAXCOMLEN); + exp->p_comm[MAXCOMLEN] = '\0'; + exp->p_pgrp = p->p_pgrp ; + exp->p_addr = NULL; + exp->p_xstat = p->p_xstat ; + exp->p_acflag = p->p_acflag ; + exp->p_ru = p->p_ru ; +} + +kdebug_ops(name, namelen, where, sizep, p) +int *name; +u_int namelen; +char *where; +size_t *sizep; +struct proc *p; +{ +int size=*sizep; +int ret=0; +extern int kdbg_control(int *name, u_int namelen, char * where,size_t * sizep); + + if (ret = suser(p->p_ucred, &p->p_acflag)) + return(ret); + + switch(name[0]) { + case KERN_KDEFLAGS: + case KERN_KDDFLAGS: + case KERN_KDENABLE: + case KERN_KDGETBUF: + case KERN_KDSETUP: + case KERN_KDREMOVE: + case KERN_KDSETREG: + case KERN_KDGETREG: + case KERN_KDREADTR: + case KERN_KDPIDTR: + case KERN_KDTHRMAP: + case KERN_KDPIDEX: + case KERN_KDSETRTCDEC: + case KERN_KDSETBUF: + ret = kdbg_control(name, namelen, where, sizep); + break; + default: + ret= EOPNOTSUPP; + break; + } + return(ret); +} + +pcsamples_ops(name, namelen, where, sizep, p) +int *name; +u_int namelen; +char *where; +size_t *sizep; +struct proc *p; +{ +int ret=0; +extern int pcsamples_control(int *name, u_int namelen, char * where,size_t * sizep); + + if (ret = suser(p->p_ucred, &p->p_acflag)) + return(ret); + + switch(name[0]) { + case KERN_PCDISABLE: + case KERN_PCGETBUF: + case KERN_PCSETUP: + case KERN_PCREMOVE: + case KERN_PCREADBUF: + case KERN_PCSETREG: + case KERN_PCSETBUF: + case KERN_PCCOMM: + ret = pcsamples_control(name, namelen, where, sizep); + break; + default: + ret= EOPNOTSUPP; + break; + } + return(ret); +} + +/* + * Returns the top N bytes of the user stack, with + * everything below the first argument character + * zeroed for security reasons. + * Odd data structure is for compatibility. + */ +sysctl_procargs(name, namelen, where, sizep) + int *name; + u_int namelen; + char *where; + size_t *sizep; +{ + register struct proc *p; + register int needed = 0; + int buflen = where != NULL ? *sizep : 0; + int error = 0; + struct vm_map *proc_map; + struct task * task; + vm_map_copy_t tmp; + vm_offset_t arg_addr; + vm_size_t arg_size; + caddr_t data; + unsigned size; + vm_offset_t copy_start, copy_end; + vm_offset_t dealloc_start; /* area to remove from kernel map */ + vm_offset_t dealloc_end; + int *ip; + kern_return_t ret; + int pid; + + + if ((buflen <= 0) || (buflen > (PAGE_SIZE << 1))) { + return(EINVAL); + } + arg_size = buflen; + + /* + * Lookup process by pid + */ + pid = name[0]; + + p = pfind(pid); + if (p == NULL) { + return(EINVAL); + } + + /* + * Copy the top N bytes of the stack. + * On all machines we have so far, the stack grows + * downwards. + * + * If the user expects no more than N bytes of + * argument list, use that as a guess for the + * size. + */ + + if (!p->user_stack) + return(EINVAL); + + arg_addr = (vm_offset_t)(p->user_stack - arg_size); + + + /* + * Before we can block (any VM code), make another + * reference to the map to keep it alive. We do + * that by getting a reference on the task itself. + */ + task = p->task; + if (task == NULL) + return(EINVAL); + + task_reference(task); + + ret = kmem_alloc(kernel_map, ©_start, round_page(arg_size)); + if (ret != KERN_SUCCESS) { + task_deallocate(task); + return(ENOMEM); + } + + proc_map = get_task_map(task); + copy_end = round_page(copy_start + arg_size); + + if( vm_map_copyin(proc_map, trunc_page(arg_addr), round_page(arg_size), + FALSE, &tmp) != KERN_SUCCESS) { + task_deallocate(task); + kmem_free(kernel_map, copy_start, + round_page(arg_size)); + return (EIO); + } + + /* + * Now that we've done the copyin from the process' + * map, we can release the reference to it. + */ + task_deallocate(task); + + if( vm_map_copy_overwrite(kernel_map, copy_start, + tmp, FALSE) != KERN_SUCCESS) { + kmem_free(kernel_map, copy_start, + round_page(arg_size)); + return (EIO); + } + + data = (caddr_t) (copy_end - arg_size); + ip = (int *) copy_end; + size = arg_size; + + /* + * Now look down the stack for the bottom of the + * argument list. Since this call is otherwise + * unprotected, we can't let the nosy user see + * anything else on the stack. + * + * The arguments are pushed on the stack by + * execve() as: + * + * .long 0 + * arg 0 (null-terminated) + * arg 1 + * ... + * arg N + * .long 0 + * + */ + + ip -= 2; /*skip trailing 0 word and assume at least one + argument. The last word of argN may be just + the trailing 0, in which case we'd stop + there */ + while (*--ip) + if (ip == (int *)data) + break; + /* + * To account for saved path name and not having a null after that + * Run the sweep again. If we have already sweeped entire range skip this + */ + if (ip != (int *)data) { + while (*--ip) + if (ip == (int *)data) + break; + } + + bzero(data, (unsigned) ((int)ip - (int)data)); + + dealloc_start = copy_start; + dealloc_end = copy_end; + + + size = MIN(size, buflen); + error = copyout(data, where, size); + + if (dealloc_start != (vm_offset_t) 0) { + kmem_free(kernel_map, dealloc_start, + dealloc_end - dealloc_start); + } + if (error) { + return(error); + } + + if (where != NULL) + *sizep = size; + return (0); +} diff --git a/bsd/kern/kern_time.c b/bsd/kern/kern_time.c new file mode 100644 index 000000000..0f225ed52 --- /dev/null +++ b/bsd/kern/kern_time.c @@ -0,0 +1,538 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#define HZ 100 /* XXX */ + +struct timeval time; + +/* + * Time of day and interval timer support. + * + * These routines provide the kernel entry points to get and set + * the time-of-day and per-process interval timers. Subroutines + * here provide support for adding and subtracting timeval structures + * and decrementing interval timers, optionally reloading the interval + * timers when they expire. + */ +struct gettimeofday_args{ + struct timeval *tp; + struct timezone *tzp; +}; +/* ARGSUSED */ +int +gettimeofday(p, uap, retval) + struct proc *p; + register struct gettimeofday_args *uap; + register_t *retval; +{ + struct timeval atv; + int error = 0; + + if (uap->tp) { + microtime(&atv); + if (error = copyout((caddr_t)&atv, (caddr_t)uap->tp, + sizeof (atv))) + return(error); + } + + if (uap->tzp) + error = copyout((caddr_t)&tz, (caddr_t)uap->tzp, + sizeof (tz)); + + return(error); +} + +struct settimeofday_args { + struct timeval *tv; + struct timezone *tzp; +}; +/* ARGSUSED */ +int +settimeofday(p, uap, retval) + struct proc *p; + struct settimeofday_args *uap; + register_t *retval; +{ + struct timeval atv; + struct timezone atz; + int error, s; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + /* Verify all parameters before changing time. */ + if (uap->tv && (error = copyin((caddr_t)uap->tv, + (caddr_t)&atv, sizeof(atv)))) + return (error); + if (uap->tzp && (error = copyin((caddr_t)uap->tzp, + (caddr_t)&atz, sizeof(atz)))) + return (error); + if (uap->tv) + setthetime(&atv); + if (uap->tzp) + tz = atz; + return (0); +} + +setthetime(tv) + struct timeval *tv; +{ + mach_timespec_t now; + long delta; + int s; + + now.tv_sec = tv->tv_sec; + now.tv_nsec = tv->tv_usec * NSEC_PER_USEC; + + clock_set_calendar_value(now); + delta = tv->tv_sec - time.tv_sec; + boottime.tv_sec += delta; +#if NFSCLIENT || NFSSERVER + lease_updatetime(delta); +#endif + s = splhigh(); + microtime(&time); + splx(s); +} + +int tickadj = 240000 / (60 * HZ); /* "standard" clock skew, us./tick */ +int tickdelta; /* current clock skew, us. per tick */ +long timedelta; /* unapplied time correction, us. */ +long bigadj = 1000000; /* use 10x skew above bigadj us. */ + +struct adjtime_args { + struct timeval *delta; + struct timeval *olddelta; +}; +/* ARGSUSED */ +int +adjtime(p, uap, retval) + struct proc *p; + register struct adjtime_args *uap; + register_t *retval; +{ + struct timeval atv, oatv; + register long ndelta; + int s, error; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + if(error = copyin((caddr_t)uap->delta, (caddr_t)&atv, + sizeof (struct timeval))) + return(error); + + ndelta = atv.tv_sec * 1000000 + atv.tv_usec; + if (timedelta == 0) + if (ndelta > bigadj) + tickdelta = 10 * tickadj; + else + tickdelta = tickadj; + if (ndelta % tickdelta) + ndelta = ndelta / tickdelta * tickdelta; + + s = splclock(); + if (uap->olddelta) { + oatv.tv_sec = timedelta / 1000000; + oatv.tv_usec = timedelta % 1000000; + } + timedelta = ndelta; + splx(s); + + if (uap->olddelta) + (void) copyout((caddr_t)&oatv, (caddr_t)uap->olddelta, + sizeof (struct timeval)); + return(0); +} + +#define SECDAY ((unsigned)(24*60*60)) /* seconds per day */ +#define SECYR ((unsigned)(365*SECDAY)) /* per common year */ +#define YRREF 70 /* UNIX time referenced to 1970 */ + +/* + * Initialze the time of day register. + * Trust the RTC except for the case where it is set before + * the UNIX epoch. In that case use the the UNIX epoch. + * The argument passed in is ignored. + */ +void +inittodr(base) + time_t base; +{ + /* + * Initialize the calendar by + * reading the BBC, if not already set. + */ + clock_initialize_calendar(); + + /* + * The value returned by microtime() + * is gotten from the calendar. + */ + microtime(&time); + + /* + * This variable still exists to keep + * 'w' happy. It should only be considered + * an approximation. + */ + boottime.tv_sec = time.tv_sec; + boottime.tv_usec = 0; + + /* + * If the RTC does not have acceptable value, i.e. time before + * the UNIX epoch, set it to the UNIX epoch + */ + if (time.tv_sec < 0) { + printf ("WARNING: preposterous time in Real Time Clock"); + time.tv_sec = 0; /* the UNIX epoch */ + time.tv_usec = 0; + setthetime(&time); + boottime = time; + printf(" -- CHECK AND RESET THE DATE!\n"); + } + + return; +} + +/* + * Get value of an interval timer. The process virtual and + * profiling virtual time timers are kept in the u. area, since + * they can be swapped out. These are kept internally in the + * way they are specified externally: in time until they expire. + * + * The real time interval timer is kept in the process table slot + * for the process, and its value (it_value) is kept as an + * absolute time rather than as a delta, so that it is easy to keep + * periodic real-time signals from drifting. + * + * Virtual time timers are processed in the hardclock() routine of + * kern_clock.c. The real time timer is processed by a timeout + * routine, called from the softclock() routine. Since a callout + * may be delayed in real time due to interrupt processing in the system, + * it is possible for the real time timeout routine (realitexpire, given below), + * to be delayed in real time past when it is supposed to occur. It + * does not suffice, therefore, to reload the real timer .it_value from the + * real time timers .it_interval. Rather, we compute the next time in + * absolute time the timer should go off. + */ + +struct getitimer_args { + u_int which; + struct itimerval *itv; +}; +/* ARGSUSED */ +int +getitimer(p, uap, retval) + struct proc *p; + register struct getitimer_args *uap; + register_t *retval; +{ + struct itimerval aitv; + int s; + + if (uap->which > ITIMER_PROF) + return(EINVAL); + + s = splclock(); + if (uap->which == ITIMER_REAL) { + /* + * Convert from absoulte to relative time in .it_value + * part of real time timer. If time for real time timer + * has passed return 0, else return difference between + * current time and time for the timer to go off. + */ + aitv = p->p_realtimer; + if (timerisset(&aitv.it_value)) + if (timercmp(&aitv.it_value, &time, <)) + timerclear(&aitv.it_value); + else + timevalsub(&aitv.it_value, &time); + } else + aitv =p->p_stats->p_timer[uap->which]; + splx(s); + return(copyout((caddr_t)&aitv, (caddr_t)uap->itv, + sizeof (struct itimerval))); +} + +struct setitimer_args { + u_int which; + struct itimerval *itv; + struct itimerval *oitv; +}; +/* ARGSUSED */ +int +setitimer(p, uap, retval) + struct proc *p; + register struct setitimer_args *uap; + register_t *retval; +{ + struct itimerval aitv; + register struct itimerval *itvp; + int s, error; + + if (uap->which > ITIMER_PROF) + return(EINVAL); + itvp = uap->itv; + if (itvp && (error = copyin((caddr_t)itvp, (caddr_t)&aitv, + sizeof(struct itimerval)))) + return (error); + if ((uap->itv = uap->oitv) && + (error = getitimer(p, uap, retval))) + return (error); + if (itvp == 0) + return (0); + if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) + return (EINVAL); + s = splclock(); + if (uap->which == ITIMER_REAL) { + untimeout(realitexpire, (caddr_t)p); + if (timerisset(&aitv.it_value)) { + timevaladd(&aitv.it_value, &time); + timeout(realitexpire, (caddr_t)p, hzto(&aitv.it_value)); + } + p->p_realtimer = aitv; + } else + p->p_stats->p_timer[uap->which] = aitv; + splx(s); + return(0); /* To insure good return value on success */ +} + +/* + * Real interval timer expired: + * send process whose timer expired an alarm signal. + * If time is not set up to reload, then just return. + * Else compute next time timer should go off which is > current time. + * This is where delay in processing this timeout causes multiple + * SIGALRM calls to be compressed into one. + */ +void +realitexpire(arg) + void *arg; +{ + register struct proc *p; + int s; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock,TRUE); + + p = (struct proc *)arg; + psignal(p, SIGALRM); + if (!timerisset(&p->p_realtimer.it_interval)) { + timerclear(&p->p_realtimer.it_value); + (void) thread_funnel_set(kernel_flock, FALSE); + return; + } + + /* + * If the time's way off, don't try to compensate by getting + * there incrementally. + */ + s = splclock(); + if (p->p_realtimer.it_value.tv_sec < time.tv_sec - 10) { + p->p_realtimer.it_value = time; + timeout(realitexpire, (caddr_t)p, + hzto(&p->p_realtimer.it_value)); + splx(s); + (void) thread_funnel_set(kernel_flock, FALSE); + return; + + } + splx(s); + + for (;;) { + s = splclock(); + timevaladd(&p->p_realtimer.it_value, + &p->p_realtimer.it_interval); + if (timercmp(&p->p_realtimer.it_value, &time, >)) { + timeout(realitexpire, (caddr_t)p, + hzto(&p->p_realtimer.it_value)); + splx(s); + (void) thread_funnel_set(kernel_flock, FALSE); + return; + } + splx(s); + } + + (void) thread_funnel_set(kernel_flock, FALSE); +} + +/* + * Check that a proposed value to load into the .it_value or + * .it_interval part of an interval timer is acceptable, and + * fix it to have at least minimal value (i.e. if it is less + * than the resolution of the clock, round it up.) + */ +int +itimerfix(tv) + struct timeval *tv; +{ + + if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || + tv->tv_usec < 0 || tv->tv_usec >= 1000000) + return (EINVAL); + if (tv->tv_sec == 0 && tv->tv_usec != 0 && tv->tv_usec < tick) + tv->tv_usec = tick; + return (0); +} + +/* + * Decrement an interval timer by a specified number + * of microseconds, which must be less than a second, + * i.e. < 1000000. If the timer expires, then reload + * it. In this case, carry over (usec - old value) to + * reducint the value reloaded into the timer so that + * the timer does not drift. This routine assumes + * that it is called in a context where the timers + * on which it is operating cannot change in value. + */ +int +itimerdecr(itp, usec) + register struct itimerval *itp; + int usec; +{ + + if (itp->it_value.tv_usec < usec) { + if (itp->it_value.tv_sec == 0) { + /* expired, and already in next interval */ + usec -= itp->it_value.tv_usec; + goto expire; + } + itp->it_value.tv_usec += 1000000; + itp->it_value.tv_sec--; + } + itp->it_value.tv_usec -= usec; + usec = 0; + if (timerisset(&itp->it_value)) + return (1); + /* expired, exactly at end of interval */ +expire: + if (timerisset(&itp->it_interval)) { + itp->it_value = itp->it_interval; + itp->it_value.tv_usec -= usec; + if (itp->it_value.tv_usec < 0) { + itp->it_value.tv_usec += 1000000; + itp->it_value.tv_sec--; + } + } else + itp->it_value.tv_usec = 0; /* sec is already 0 */ + return (0); +} + +/* + * Add and subtract routines for timevals. + * N.B.: subtract routine doesn't deal with + * results which are before the beginning, + * it just gets very confused in this case. + * Caveat emptor. + */ +void +timevaladd(t1, t2) + struct timeval *t1, *t2; +{ + + t1->tv_sec += t2->tv_sec; + t1->tv_usec += t2->tv_usec; + timevalfix(t1); +} +void +timevalsub(t1, t2) + struct timeval *t1, *t2; +{ + + t1->tv_sec -= t2->tv_sec; + t1->tv_usec -= t2->tv_usec; + timevalfix(t1); +} +void +timevalfix(t1) + struct timeval *t1; +{ + + if (t1->tv_usec < 0) { + t1->tv_sec--; + t1->tv_usec += 1000000; + } + if (t1->tv_usec >= 1000000) { + t1->tv_sec++; + t1->tv_usec -= 1000000; + } +} + +/* + * Return the best possible estimate of the time in the timeval + * to which tvp points. + */ +void +microtime(struct timeval * tvp) +{ + mach_timespec_t now = clock_get_calendar_value(); + + tvp->tv_sec = now.tv_sec; + tvp->tv_usec = now.tv_nsec / NSEC_PER_USEC; +} diff --git a/bsd/kern/kern_xxx.c b/bsd/kern/kern_xxx.c new file mode 100644 index 000000000..7da98c9ee --- /dev/null +++ b/bsd/kern/kern_xxx.c @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_xxx.c 8.2 (Berkeley) 11/14/93 + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if COMPAT_43 +/* ARGSUSED */ +int +ogethostid(p, uap, retval) +struct proc *p; +void *uap; +register_t *retval; +{ + + *retval = hostid; + return 0; +} + +struct osethostid_args { + long hostid; +}; +/* ARGSUSED */ +int +osethostid(p, uap, retval) +struct proc *p; +register struct osethostid_args *uap; +register_t *retval; +{ + int error; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + hostid = uap->hostid; + return (0); + +} + +struct ogethostname_args { + char *hostname; + u_int len; +}; +/* ARGSUSED */ +int +ogethostname(p, uap, retval) +struct proc *p; +register struct ogethostname_args *uap; +register_t *retval; +{ + int name; + + name = KERN_HOSTNAME; + + return (kern_sysctl(&name, 1, uap->hostname, &uap->len), + 0, 0); +} + +struct osethostname_args { + char *hostname; + u_int len; +}; +/* ARGSUSED */ +int +osethostname(p, uap, retval) +struct proc *p; +register struct osethostname_args *uap; +register_t *retval; +{ + int name; + int error; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + + name = KERN_HOSTNAME; + return (kern_sysctl(&name, 1, 0, 0, uap->hostname, + uap->len)); +} + +struct ogetdomainname_args { + char *domainname; + int len; +}; +/* ARGSUSED */ +int +ogetdomainname(p, uap, retval) +struct proc *p; +register struct ogetdomainname_args *uap; +register_t *retval; +{ + int name; + + name = KERN_DOMAINNAME; + return (kern_sysctl(&name, 1, uap->domainname, + &uap->len, 0, 0)); +} + +struct osetdomainname_args { + char *domainname; + u_int len; +}; +/* ARGSUSED */ +int +osetdomainname(p, uap, retval) +struct proc *p; +register struct osetdomainname_args *uap; +register_t *retval; +{ + int name; + int error; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + name = KERN_DOMAINNAME; + return (kern_sysctl(&name, 1, 0, 0, uap->domainname, + uap->len)); +} +#endif /* COMPAT_43 */ + +struct reboot_args { + int opt; + char *command; +}; + +reboot(p, uap, retval) +struct proc *p; +register struct reboot_args *uap; +register_t *retval; +{ + char command[64]; + int error; + int dummy=0; + + command[0] = '\0'; + + if (error = suser(p->p_cred->pc_ucred, &p->p_acflag)) + return(error); + + if (uap->opt & RB_COMMAND) + error = copyinstr(uap->command, + command, sizeof(command), &dummy); + if (!error) { + SET(p->p_flag, P_REBOOT); /* No more signals for this proc */ + boot(RB_BOOT, uap->opt, command); + } + return(error); +} + diff --git a/bsd/kern/mach_fat.c b/bsd/kern/mach_fat.c new file mode 100644 index 000000000..899c8c0b0 --- /dev/null +++ b/bsd/kern/mach_fat.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. + * + * File: kern/mach_fat.c + * Author: Peter King + * + * Fat file support routines. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/********************************************************************** + * Routine: fatfile_getarch() + * + * Function: Locate the architecture-dependant contents of a fat + * file that match this CPU. + * + * Args: vp: The vnode for the fat file. + * header: A pointer to the fat file header. + * archret (out): Pointer to fat_arch structure to hold + * the results. + * + * Returns: KERN_SUCCESS: Valid architecture found. + * KERN_FAILURE: No valid architecture found. + **********************************************************************/ +load_return_t +fatfile_getarch( + struct vnode *vp, + vm_offset_t data_ptr, + struct fat_arch *archret) +{ + /* vm_pager_t pager; */ + vm_offset_t addr; + vm_size_t size; + kern_return_t kret; + load_return_t lret; + struct machine_slot *ms; + struct fat_arch *arch; + struct fat_arch *best_arch; + int grade; + int best_grade; + int nfat_arch; + int end_of_archs; + struct proc *p = current_proc(); /* XXXX */ + struct fat_header *header; + off_t filesize; + + /* + * Get the pager for the file. + */ + + header = (struct fat_header *)data_ptr; + + /* + * Map portion that must be accessible directly into + * kernel's map. + */ + nfat_arch = NXSwapBigLongToHost(header->nfat_arch); + + end_of_archs = sizeof(struct fat_header) + + nfat_arch * sizeof(struct fat_arch); +#if 0 + filesize = ubc_getsize(vp); + if (end_of_archs > (int)filesize) + return(LOAD_BADMACHO); + } +#endif + + /* This is beacuse we are reading only 512 bytes */ + + if (end_of_archs > 512) + return(LOAD_BADMACHO); + /* + * Round size of fat_arch structures up to page boundry. + */ + size = round_page(end_of_archs); + if (size <= 0) + return(LOAD_BADMACHO); + + /* + * Scan the fat_arch's looking for the best one. + */ + addr = data_ptr; + ms = &machine_slot[cpu_number()]; + best_arch = NULL; + best_grade = 0; + arch = (struct fat_arch *) (addr + sizeof(struct fat_header)); + for (; nfat_arch-- > 0; arch++) { + + /* + * Check to see if right cpu type. + */ + if(NXSwapBigIntToHost(arch->cputype) != ms->cpu_type) + continue; + + /* + * Get the grade of the cpu subtype. + */ + grade = grade_cpu_subtype( + NXSwapBigIntToHost(arch->cpusubtype)); + + /* + * Remember it if it's the best we've seen. + */ + if (grade > best_grade) { + best_grade = grade; + best_arch = arch; + } + } + + /* + * Return our results. + */ + if (best_arch == NULL) { + lret = LOAD_BADARCH; + } else { + archret->cputype = + NXSwapBigIntToHost(best_arch->cputype); + archret->cpusubtype = + NXSwapBigIntToHost(best_arch->cpusubtype); + archret->offset = + NXSwapBigLongToHost(best_arch->offset); + archret->size = + NXSwapBigLongToHost(best_arch->size); + archret->align = + NXSwapBigLongToHost(best_arch->align); + + lret = LOAD_SUCCESS; + } + + /* + * Free the memory we allocated and return. + */ + return(lret); +} + + diff --git a/bsd/kern/mach_header.c b/bsd/kern/mach_header.c new file mode 100644 index 000000000..fe3c02312 --- /dev/null +++ b/bsd/kern/mach_header.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: kern/mach_header.c + * + * Functions for accessing mach-o headers. + * + * HISTORY + * 27-MAR-97 Umesh Vaishampayan (umeshv@NeXT.com) + * Added getsegdatafromheader(); + * + * 29-Jan-92 Mike DeMoney (mike@next.com) + * Made into machine independent form from machdep/m68k/mach_header.c. + * Ifdef'ed out most of this since I couldn't find any references. + */ + +#if !defined(KERNEL_PRELOAD) +#include + +extern struct mach_header _mh_execute_header; + +struct section *getsectbynamefromheader( + struct mach_header *header, + char *seg_name, + char *sect_name); +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name); + +/* + * return the last address (first avail) + */ +vm_offset_t getlastaddr(void) +{ + struct segment_command *sgp; + vm_offset_t last_addr = 0; + struct mach_header *header = &_mh_execute_header; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if ( sgp->cmd == LC_SEGMENT) { + if (sgp->vmaddr + sgp->vmsize > last_addr) + last_addr = sgp->vmaddr + sgp->vmsize; + } + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return last_addr; +} + +#if FIXME /* [ */ +struct mach_header ** +getmachheaders(void) +{ + extern struct mach_header _mh_execute_header; + struct mach_header **tl; + tl = (struct mach_header **)malloc(2*sizeof(struct mach_header *)); + tl[0] = &_mh_execute_header; + tl[1] = (struct mach_header *)0; + return tl; +} +#endif /* FIXME ] */ + +/* + * This routine returns the a pointer to the data for the named section in the + * named segment if it exist in the mach header passed to it. Also it returns + * the size of the section data indirectly through the pointer size. Otherwise + * it returns zero for the pointer and the size. + */ +void * +getsectdatafromheader( + struct mach_header *mhp, + char *segname, + char *sectname, + int *size) +{ + const struct section *sp; + void *result; + + sp = getsectbynamefromheader(mhp, segname, sectname); + if(sp == (struct section *)0){ + *size = 0; + return((char *)0); + } + *size = sp->size; + result = (void *)sp->addr; + return result; +} + +/* + * This routine returns the a pointer to the data for the named segment + * if it exist in the mach header passed to it. Also it returns + * the size of the segment data indirectly through the pointer size. + * Otherwise it returns zero for the pointer and the size. + */ +void * +getsegdatafromheader( + struct mach_header *mhp, + char *segname, + int *size) +{ + const struct segment_command *sc; + void *result; + + sc = getsegbynamefromheader(mhp, segname); + if(sc == (struct segment_command *)0){ + *size = 0; + return((char *)0); + } + *size = sc->vmsize; + result = (void *)sc->vmaddr; + return result; +} + +/* + * This routine returns the section structure for the named section in the + * named segment for the mach_header pointer passed to it if it exist. + * Otherwise it returns zero. + */ +struct section * +getsectbynamefromheader( + struct mach_header *mhp, + char *segname, + char *sectname) +{ + struct segment_command *sgp; + struct section *sp; + long i, j; + + sgp = (struct segment_command *) + ((char *)mhp + sizeof(struct mach_header)); + for(i = 0; i < mhp->ncmds; i++){ + if(sgp->cmd == LC_SEGMENT) + if(strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0 || + mhp->filetype == MH_OBJECT){ + sp = (struct section *)((char *)sgp + + sizeof(struct segment_command)); + for(j = 0; j < sgp->nsects; j++){ + if(strncmp(sp->sectname, sectname, + sizeof(sp->sectname)) == 0 && + strncmp(sp->segname, segname, + sizeof(sp->segname)) == 0) + return(sp); + sp = (struct section *)((char *)sp + + sizeof(struct section)); + } + } + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return((struct section *)0); +} + +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name) +{ + struct segment_command *sgp; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if ( sgp->cmd == LC_SEGMENT + && !strncmp(sgp->segname, seg_name, sizeof(sgp->segname))) + return sgp; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return (struct segment_command *)0; +} + + +/* + * For now at least, all the rest of this seems unused. + * NOTE: The constant in here for segment alignment is machine-dependent, + * so if you include this, define a machine dependent constant for it's + * value. + */ +static struct { + struct segment_command seg; + struct section sect; +} fvm_data = { + { + LC_SEGMENT, // cmd + sizeof(fvm_data), // cmdsize + "__USER", // segname + 0, // vmaddr + 0, // vmsize + 0, // fileoff + 0, // filesize + VM_PROT_READ, // maxprot + VM_PROT_READ, // initprot, + 1, // nsects + 0 // flags + }, + { + "", // sectname + "__USER", // segname + 0, // addr + 0, // size + 0, // offset + 4, // align + 0, // reloff + 0, // nreloc + 0 // flags + } +}; + +struct segment_command *fvm_seg; + +static struct fvmfile_command *fvmfilefromheader(struct mach_header *header); +static vm_offset_t getsizeofmacho(struct mach_header *header); + +/* + * Return the first segment_command in the header. + */ +struct segment_command *firstseg(void) +{ + return firstsegfromheader(&_mh_execute_header); +} + +struct segment_command *firstsegfromheader(struct mach_header *header) +{ + struct segment_command *sgp; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if (sgp->cmd == LC_SEGMENT) + return sgp; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return (struct segment_command *)0; +} + +struct segment_command *nextseg(struct segment_command *sgp) +{ + struct segment_command *this; + + this = nextsegfromheader(&_mh_execute_header, sgp); + + /* + * For the kernel's header add on the faked segment for the + * USER boot code identified by a FVMFILE_COMMAND in the mach header. + */ + if (!this && sgp != fvm_seg) + this = fvm_seg; + + return this; +} + +struct segment_command *nextsegfromheader( + struct mach_header *header, + struct segment_command *seg) +{ + struct segment_command *sgp; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++) { + if (sgp == seg) + break; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + + if (i == header->ncmds) + return (struct segment_command *)0; + + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + for (; i < header->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT) + return sgp; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + + return (struct segment_command *)0; +} + + +/* + * Return the address of the named Mach-O segment, or NULL. + */ +struct segment_command *getsegbyname(char *seg_name) +{ + struct segment_command *this; + + this = getsegbynamefromheader(&_mh_execute_header, seg_name); + + /* + * For the kernel's header add on the faked segment for the + * USER boot code identified by a FVMFILE_COMMAND in the mach header. + */ + if (!this && strcmp(seg_name, fvm_seg->segname) == 0) + this = fvm_seg; + + return this; +} + +/* + * This routine returns the a pointer the section structure of the named + * section in the named segment if it exist in the mach executable it is + * linked into. Otherwise it returns zero. + */ +struct section * +getsectbyname( + char *segname, + char *sectname) +{ + return(getsectbynamefromheader( + (struct mach_header *)&_mh_execute_header, segname, sectname)); +} + +struct section *firstsect(struct segment_command *sgp) +{ + struct section *sp; + + if (!sgp || sgp->nsects == 0) + return (struct section *)0; + + return (struct section *)(sgp+1); +} + +struct section *nextsect(struct segment_command *sgp, struct section *sp) +{ + struct section *fsp = firstsect(sgp); + + if (sp - fsp >= sgp->nsects-1) + return (struct section *)0; + + return sp+1; +} + +static struct fvmfile_command *fvmfilefromheader(struct mach_header *header) +{ + struct fvmfile_command *fvp; + int i; + + fvp = (struct fvmfile_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if (fvp->cmd == LC_FVMFILE) + return fvp; + fvp = (struct fvmfile_command *)((char *)fvp + fvp->cmdsize); + } + return (struct fvmfile_command *)0; +} + +/* + * Create a fake USER seg if a fvmfile_command is present. + */ +struct segment_command *getfakefvmseg(void) +{ + struct segment_command *sgp = getsegbyname("__USER"); + struct fvmfile_command *fvp = fvmfilefromheader(&_mh_execute_header); + struct section *sp; + + if (sgp) + return sgp; + + if (!fvp) + return (struct segment_command *)0; + + fvm_seg = &fvm_data.seg; + sgp = fvm_seg; + sp = &fvm_data.sect; + + sgp->vmaddr = fvp->header_addr; + sgp->vmsize = getsizeofmacho((struct mach_header *)(sgp->vmaddr)); + + strcpy(sp->sectname, fvp->name.ptr); + sp->addr = sgp->vmaddr; + sp->size = sgp->vmsize; + +#if DEBUG + printf("fake fvm seg __USER/\"%s\" at 0x%x, size 0x%x\n", + sp->sectname, sp->addr, sp->size); +#endif DEBUG +} + +/* + * Figure out the size the size of the data associated with a + * loaded mach_header. + */ +static vm_offset_t getsizeofmacho(struct mach_header *header) +{ + struct segment_command *sgp; + struct section *sp; + vm_offset_t last_addr; + + last_addr = 0; + for ( sgp = firstsegfromheader(header) + ; sgp + ; sgp = nextsegfromheader(header, sgp)) + { + if (sgp->fileoff + sgp->filesize > last_addr) + last_addr = sgp->fileoff + sgp->filesize; + } + + return last_addr; +} +#endif /* !defined(KERNEL_PRELOAD) */ diff --git a/bsd/kern/mach_header.h b/bsd/kern/mach_header.h new file mode 100644 index 000000000..1e4cbeaba --- /dev/null +++ b/bsd/kern/mach_header.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: kern/mach_header.h + * + * Definitions for accessing mach-o headers. + * + * HISTORY + * 29-Jan-92 Mike DeMoney (mike@next.com) + * Made into machine independent form from machdep/m68k/mach_header.h. + * Ifdef'ed out most of this since I couldn't find any references. + */ + +#ifndef _KERN_MACH_HEADER_ +#define _KERN_MACH_HEADER_ + +#include +#include + +#if KERNEL +struct mach_header **getmachheaders(void); +vm_offset_t getlastaddr(void); + +struct segment_command *firstseg(void); +struct segment_command *firstsegfromheader(struct mach_header *header); +struct segment_command *nextseg(struct segment_command *sgp); +struct segment_command *nextsegfromheader( + struct mach_header *header, + struct segment_command *seg); +struct segment_command *getsegbyname(char *seg_name); +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name); +void *getsegdatafromheader(struct mach_header *, char *, int *); +struct section *getsectbyname(char *seg_name, char *sect_name); +struct section *getsectbynamefromheader( + struct mach_header *header, + char *seg_name, + char *sect_name); +void *getsectdatafromheader(struct mach_header *, char *, char *, int *); +struct section *firstsect(struct segment_command *sgp); +struct section *nextsect(struct segment_command *sgp, struct section *sp); +struct fvmlib_command *fvmlib(void); +struct fvmlib_command *fvmlibfromheader(struct mach_header *header); +struct segment_command *getfakefvmseg(void); + +#endif /* KERNEL */ + +#endif /* _KERN_MACH_HEADER_ */ diff --git a/bsd/kern/mach_loader.c b/bsd/kern/mach_loader.c new file mode 100644 index 000000000..7f30881e9 --- /dev/null +++ b/bsd/kern/mach_loader.c @@ -0,0 +1,1098 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1988, 1989, NeXT, Inc. + * + * File: kern/mach_loader.c + * Author: Avadis Tevanian, Jr. + * + * Mach object file loader (kernel version, for now). + * + * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT + * Started. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +/* + * Prototypes of static functions. + */ +static +load_return_t +parse_machfile( + struct vnode *vp, + vm_map_t map, + struct mach_header *header, + unsigned long file_offset, + unsigned long macho_size, + int depth, + unsigned long *lib_version, + load_result_t *result +), +load_segment( + struct segment_command *scp, + void * pager, + unsigned long pager_offset, + unsigned long macho_size, + unsigned long end_of_file, + vm_map_t map, + load_result_t *result +), +load_unixthread( + struct thread_command *tcp, + load_result_t *result +), +load_thread( + struct thread_command *tcp, + load_result_t *result +), +load_threadstate( + thread_t thread, + unsigned long *ts, + unsigned long total_size +), +load_threadstack( + thread_t thread, + unsigned long *ts, + unsigned long total_size, + vm_offset_t *user_stack +), +load_threadentry( + thread_t thread, + unsigned long *ts, + unsigned long total_size, + vm_offset_t *entry_point +), +load_fvmlib( + struct fvmlib_command *lcp, + vm_map_t map, + int depth +), +load_idfvmlib( + struct fvmlib_command *lcp, + unsigned long *version +), +load_dylinker( + struct dylinker_command *lcp, + vm_map_t map, + int depth, + load_result_t *result +), +get_macho_vnode( + char *path, + struct mach_header *mach_header, + unsigned long *file_offset, + unsigned long *macho_size, + struct vnode **vpp +); + +load_return_t +load_machfile( + struct vnode *vp, + struct mach_header *header, + unsigned long file_offset, + unsigned long macho_size, + load_result_t *result +) +{ + pmap_t pmap; + vm_map_t map; + vm_map_t old_map; + load_result_t myresult; + kern_return_t kret; + load_return_t lret; + + old_map = current_map(); +#ifdef i386 + pmap = get_task_pmap(current_task()); + pmap_reference(pmap); +#else + pmap = pmap_create((vm_size_t) 0); +#endif + map = vm_map_create(pmap, + get_map_min(old_map), + get_map_max(old_map), + TRUE); /**** FIXME ****/ + + if (!result) + result = &myresult; + + *result = (load_result_t) { 0 }; + + lret = parse_machfile(vp, map, header, file_offset, macho_size, + 0, (unsigned long *)0, result); + + if (lret != LOAD_SUCCESS) { + vm_map_deallocate(map); /* will lose pmap reference too */ + return(lret); + } + /* + * Commit to new map. First make sure that the current + * users of the task get done with it, and that we clean + * up the old contents of IPC and memory. The task is + * guaranteed to be single threaded upon return (us). + * + * Swap the new map for the old at the task level and at + * our activation. The latter consumes our new map reference + * but each leaves us responsible for the old_map reference. + * That lets us get off the pmap associated with it, and + * then we can release it. + */ + task_halt(current_task()); + + old_map = swap_task_map(current_task(), map); + vm_map_deallocate(old_map); + + old_map = swap_act_map(current_act(), map); + +#ifndef i386 + pmap_switch(pmap); /* Make sure we are using the new pmap */ +#endif + + vm_map_deallocate(old_map); + return(LOAD_SUCCESS); +} + +int dylink_test = 1; +extern vm_offset_t system_shared_region; + +static +load_return_t +parse_machfile( + struct vnode *vp, + vm_map_t map, + struct mach_header *header, + unsigned long file_offset, + unsigned long macho_size, + int depth, + unsigned long *lib_version, + load_result_t *result +) +{ + struct machine_slot *ms; + int ncmds; + struct load_command *lcp, *next; + struct dylinker_command *dlp = 0; + void * pager; + load_return_t ret; + vm_offset_t addr, kl_addr; + vm_size_t size,kl_size; + int offset; + int pass; + struct proc *p = current_proc(); /* XXXX */ + int error; + int resid=0; + + /* + * Break infinite recursion + */ + if (depth > 6) + return(LOAD_FAILURE); + depth++; + + /* + * Check to see if right machine type. + */ + ms = &machine_slot[cpu_number()]; + if ((header->cputype != ms->cpu_type) || + !check_cpu_subtype(header->cpusubtype)) + return(LOAD_BADARCH); + + switch (header->filetype) { + + case MH_OBJECT: + case MH_EXECUTE: + case MH_PRELOAD: + if (depth != 1) + return (LOAD_FAILURE); + break; + + case MH_FVMLIB: + case MH_DYLIB: + if (depth == 1) + return (LOAD_FAILURE); + break; + + case MH_DYLINKER: + if (depth != 2) + return (LOAD_FAILURE); + break; + + default: + return (LOAD_FAILURE); + } + + /* + * Get the pager for the file. + */ + UBCINFOCHECK("parse_machfile", vp); + pager = (void *) ubc_getpager(vp); + + /* + * Map portion that must be accessible directly into + * kernel's map. + */ + if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size) + return(LOAD_BADMACHO); + + /* + * Round size of Mach-O commands up to page boundry. + */ + size = round_page(sizeof (struct mach_header) + header->sizeofcmds); + if (size <= 0) + return(LOAD_BADMACHO); + + /* + * Map the load commands into kernel memory. + */ + addr = 0; +#if 0 /* [ +#if FIXME + ret = vm_allocate_with_pager(kernel_map, &addr, size, TRUE, pager, + file_offset); +#else + ret = vm_map(kernel_map,&addr,size,0,TRUE, pager, file_offset, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); +#endif /* FIXME */ + if (ret != KERN_SUCCESS) { + return(LOAD_NOSPACE); + } + ubc_map(vp); +#else /* 0 ][ */ + kl_size = size; + kl_addr = kalloc(size); + addr = kl_addr; + if (addr == NULL) { + printf("No space to readin load commands\n"); + return(LOAD_NOSPACE); + } + if(error = vn_rdwr(UIO_READ, vp, addr, size, file_offset, + UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) { + printf("Load command read over nfs failed\n"); + if (kl_addr ) kfree(kl_addr,kl_size); + return(EIO); + } + /* ubc_map(vp); */ /* NOT HERE */ + +#endif /* 0 ] */ + /* + * Scan through the commands, processing each one as necessary. + */ + for (pass = 1; pass <= 2; pass++) { + offset = sizeof(struct mach_header); + ncmds = header->ncmds; + while (ncmds--) { + /* + * Get a pointer to the command. + */ + lcp = (struct load_command *)(addr + offset); + offset += lcp->cmdsize; + + /* + * Check for valid lcp pointer by checking + * next offset. + */ + if (offset > header->sizeofcmds + + sizeof(struct mach_header)) { +#if 0 + vm_map_remove(kernel_map, addr, addr + size); +#endif + if (kl_addr ) kfree(kl_addr,kl_size); + return(LOAD_BADMACHO); + } + + /* + * Check for valid command. + */ + switch(lcp->cmd) { + case LC_SEGMENT: + if (pass != 1) + break; + ret = load_segment( + (struct segment_command *) lcp, + pager, file_offset, + macho_size, + (unsigned long)ubc_getsize(vp), + map, + result); + break; + case LC_THREAD: + if (pass != 2) + break; + ret = load_thread((struct thread_command *)lcp, + result); + break; + case LC_UNIXTHREAD: + if (pass != 2) + break; + ret = load_unixthread( + (struct thread_command *) lcp, + result); + break; + case LC_LOADFVMLIB: + if (pass != 1) + break; + ret = load_fvmlib((struct fvmlib_command *)lcp, + map, depth); + break; + case LC_IDFVMLIB: + if (pass != 1) + break; + if (lib_version) { + ret = load_idfvmlib( + (struct fvmlib_command *)lcp, + lib_version); + } + break; + case LC_LOAD_DYLINKER: + if (pass != 2) + break; + if (depth == 1 || dlp == 0) + dlp = (struct dylinker_command *)lcp; + else + ret = LOAD_FAILURE; + break; + default: + ret = KERN_SUCCESS;/* ignore other stuff */ + } + if (ret != LOAD_SUCCESS) + break; + } + if (ret != LOAD_SUCCESS) + break; + } + if (ret == LOAD_SUCCESS && dlp != 0) { + vm_offset_t addr; + shared_region_mapping_t shared_region; + struct shared_region_task_mappings map_info; + shared_region_mapping_t next; + +RedoLookup: + vm_get_shared_region(current_task(), &shared_region); + map_info.self = (vm_offset_t)shared_region; + shared_region_mapping_info(shared_region, + &(map_info.text_region), + &(map_info.text_size), + &(map_info.data_region), + &(map_info.data_size), + &(map_info.region_mappings), + &(map_info.client_base), + &(map_info.alternate_base), + &(map_info.alternate_next), + &(map_info.flags), &next); + + if((map_info.flags & SHARED_REGION_FULL) && + (map_info.flags & SHARED_REGION_SYSTEM)) { + if(map_info.self != (vm_offset_t)system_shared_region) { + shared_region_mapping_ref(system_shared_region); + vm_set_shared_region(current_task(), + system_shared_region); + shared_region_mapping_dealloc( + (shared_region_mapping_t)map_info.self); + goto RedoLookup; + } + } + + + if (dylink_test) { + addr = map_info.client_base; + vm_map(map, &addr, map_info.text_size, 0, + (VM_MEMORY_SHARED_PMAP << 24) + | SHARED_LIB_ALIAS, + map_info.text_region, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); + addr = map_info.client_base + map_info.text_size; + vm_map(map, &addr, map_info.data_size, + 0, SHARED_LIB_ALIAS, + map_info.data_region, 0, TRUE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); + } + ret = load_dylinker(dlp, map, depth, result); + } + + if (kl_addr ) kfree(kl_addr,kl_size); +#if 0 + vm_map_remove(kernel_map, addr, addr + size); +#endif + if ((ret == LOAD_SUCCESS) && (depth == 1) && + (result->thread_count == 0)) + ret = LOAD_FAILURE; + if (ret == LOAD_SUCCESS) + ubc_map(vp); + + return(ret); +} + +static +load_return_t +load_segment( + struct segment_command *scp, + void * pager, + unsigned long pager_offset, + unsigned long macho_size, + unsigned long end_of_file, + vm_map_t map, + load_result_t *result +) +{ + kern_return_t ret; + vm_offset_t map_addr, map_offset; + vm_size_t map_size, seg_size, delta_size; + caddr_t tmp; + vm_prot_t initprot; + vm_prot_t maxprot; +#if 1 + extern int print_map_addr; +#endif /* 1 */ + + /* + * Make sure what we get from the file is really ours (as specified + * by macho_size). + */ + if (scp->fileoff + scp->filesize > macho_size) + return (LOAD_BADMACHO); + + seg_size = round_page(scp->vmsize); + if (seg_size == 0) + return(KERN_SUCCESS); + + /* + * Round sizes to page size. + */ + map_size = round_page(scp->filesize); + map_addr = trunc_page(scp->vmaddr); + + map_offset = pager_offset + scp->fileoff; + + if (map_size > 0) { + initprot = (scp->initprot) & VM_PROT_ALL; + maxprot = (scp->maxprot) & VM_PROT_ALL; + /* + * Map a copy of the file into the address space. + */ + ret = vm_map(map, + &map_addr, map_size, (vm_offset_t)0, FALSE, + pager, map_offset, TRUE, + initprot, maxprot, + VM_INHERIT_DEFAULT); + if (ret != KERN_SUCCESS) + return(LOAD_NOSPACE); + +#if 1 + if (print_map_addr) + printf("LSegment: Mapped addr= %x; size = %x\n", map_addr, map_size); +#endif /* 1 */ + /* + * If the file didn't end on a page boundary, + * we need to zero the leftover. + */ + delta_size = map_size - scp->filesize; +#if FIXME + if (delta_size > 0) { + vm_offset_t tmp; + + ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE); + if (ret != KERN_SUCCESS) + return(LOAD_RESOURCE); + + if (copyout(tmp, map_addr + scp->filesize, + delta_size)) { + (void) vm_deallocate( + kernel_map, tmp, delta_size); + return(LOAD_FAILURE); + } + + (void) vm_deallocate(kernel_map, tmp, delta_size); + } +#endif /* FIXME */ + } + + /* + * If the virtual size of the segment is greater + * than the size from the file, we need to allocate + * zero fill memory for the rest. + */ + delta_size = seg_size - map_size; + if (delta_size > 0) { + vm_offset_t tmp = map_addr + map_size; + + ret = vm_allocate(map, &tmp, delta_size, FALSE); + if (ret != KERN_SUCCESS) + return(LOAD_NOSPACE); + } + + /* + * Set protection values. (Note: ignore errors!) + */ + + if (scp->maxprot != VM_PROT_DEFAULT) { + (void) vm_protect(map, + map_addr, seg_size, + TRUE, scp->maxprot); + } + if (scp->initprot != VM_PROT_DEFAULT) { + (void) vm_protect(map, + map_addr, seg_size, + FALSE, scp->initprot); + } + if ( (scp->fileoff == 0) && (scp->filesize != 0) ) + result->mach_header = map_addr; + return(LOAD_SUCCESS); +} + +static +load_return_t +load_unixthread( + struct thread_command *tcp, + load_result_t *result +) +{ + thread_t thread = current_thread(); + load_return_t ret; + + if (result->thread_count != 0) + return (LOAD_FAILURE); + + ret = load_threadstack(thread, + (unsigned long *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &result->user_stack); + if (ret != LOAD_SUCCESS) + return(ret); + + ret = load_threadentry(thread, + (unsigned long *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &result->entry_point); + if (ret != LOAD_SUCCESS) + return(ret); + + ret = load_threadstate(thread, + (unsigned long *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command)); + if (ret != LOAD_SUCCESS) + return (ret); + + result->unixproc = TRUE; + result->thread_count++; + + return(LOAD_SUCCESS); +} + +static +load_return_t +load_thread( + struct thread_command *tcp, + load_result_t *result +) +{ + thread_t thread; + kern_return_t kret; + load_return_t lret; + + if (result->thread_count == 0) + thread = current_thread(); + else { + kret = thread_create(current_task(), &thread); + if (kret != KERN_SUCCESS) + return(LOAD_RESOURCE); + thread_deallocate(thread); + } + + lret = load_threadstate(thread, + (unsigned long *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command)); + if (lret != LOAD_SUCCESS) + return (lret); + + if (result->thread_count == 0) { + lret = load_threadstack(current_thread(), + (unsigned long *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &result->user_stack); + if (lret != LOAD_SUCCESS) + return(lret); + + lret = load_threadentry(current_thread(), + (unsigned long *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &result->entry_point); + if (lret != LOAD_SUCCESS) + return(lret); + } + /* + * Resume thread now, note that this means that the thread + * commands should appear after all the load commands to + * be sure they don't reference anything not yet mapped. + */ + else + thread_resume(thread); + + result->thread_count++; + + return(LOAD_SUCCESS); +} + +static +load_return_t +load_threadstate( + thread_t thread, + unsigned long *ts, + unsigned long total_size +) +{ + kern_return_t ret; + unsigned long size; + int flavor; + + /* + * Set the thread state. + */ + + while (total_size > 0) { + flavor = *ts++; + size = *ts++; + total_size -= (size+2)*sizeof(unsigned long); + if (total_size < 0) + return(LOAD_BADMACHO); + ret = thread_setstatus(getact_thread(thread), flavor, ts, size); + if (ret != KERN_SUCCESS) + return(LOAD_FAILURE); + ts += size; /* ts is a (unsigned long *) */ + } + return(LOAD_SUCCESS); +} + +static +load_return_t +load_threadstack( + thread_t thread, + unsigned long *ts, + unsigned long total_size, + vm_offset_t *user_stack +) +{ + kern_return_t ret; + unsigned long size; + int flavor; + + /* + * Set the thread state. + */ + *user_stack = 0; + while (total_size > 0) { + flavor = *ts++; + size = *ts++; + total_size -= (size+2)*sizeof(unsigned long); + if (total_size < 0) + return(LOAD_BADMACHO); + ret = thread_userstack(thread, flavor, ts, size, user_stack); + if (ret != KERN_SUCCESS) + return(LOAD_FAILURE); + ts += size; /* ts is a (unsigned long *) */ + } + return(LOAD_SUCCESS); +} + +static +load_return_t +load_threadentry( + thread_t thread, + unsigned long *ts, + unsigned long total_size, + vm_offset_t *entry_point +) +{ + kern_return_t ret; + unsigned long size; + int flavor; + + /* + * Set the thread state. + */ + *entry_point = 0; + while (total_size > 0) { + flavor = *ts++; + size = *ts++; + total_size -= (size+2)*sizeof(unsigned long); + if (total_size < 0) + return(LOAD_BADMACHO); + ret = thread_entrypoint(thread, flavor, ts, size, entry_point); + if (ret != KERN_SUCCESS) + return(LOAD_FAILURE); + ts += size; /* ts is a (unsigned long *) */ + } + return(LOAD_SUCCESS); +} + +static +load_return_t +load_fvmlib( + struct fvmlib_command *lcp, + vm_map_t map, + int depth +) +{ + char *name; + char *p; + struct vnode *vp; + struct mach_header header; + unsigned long file_offset; + unsigned long macho_size; + unsigned long lib_version; + load_result_t myresult; + kern_return_t ret; + + name = (char *)lcp + lcp->fvmlib.name.offset; + /* + * Check for a proper null terminated string. + */ + p = name; + do { + if (p >= (char *)lcp + lcp->cmdsize) + return(LOAD_BADMACHO); + } while (*p++); + + ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp); + if (ret) + return (ret); + + myresult = (load_result_t) { 0 }; + + /* + * Load the Mach-O. + */ + ret = parse_machfile(vp, map, &header, + file_offset, macho_size, + depth, &lib_version, &myresult); + + if ((ret == LOAD_SUCCESS) && + (lib_version < lcp->fvmlib.minor_version)) + ret = LOAD_SHLIB; + + vrele(vp); + return(ret); +} + +static +load_return_t +load_idfvmlib( + struct fvmlib_command *lcp, + unsigned long *version +) +{ + *version = lcp->fvmlib.minor_version; + return(LOAD_SUCCESS); +} + +static +load_return_t +load_dylinker( + struct dylinker_command *lcp, + vm_map_t map, + int depth, + load_result_t *result +) +{ + char *name; + char *p; + struct vnode *vp; + struct mach_header header; + unsigned long file_offset; + unsigned long macho_size; + vm_map_t copy_map; + load_result_t myresult; + kern_return_t ret; + vm_map_copy_t tmp; + vm_offset_t dyl_start, map_addr; + vm_size_t dyl_length; + + name = (char *)lcp + lcp->name.offset; + /* + * Check for a proper null terminated string. + */ + p = name; + do { + if (p >= (char *)lcp + lcp->cmdsize) + return(LOAD_BADMACHO); + } while (*p++); + + ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp); + if (ret) + return (ret); + + myresult = (load_result_t) { 0 }; + + /* + * Load the Mach-O. + */ + + copy_map = vm_map_create(pmap_create(macho_size), + get_map_min(map), get_map_max( map), TRUE); + + ret = parse_machfile(vp, copy_map, &header, + file_offset, macho_size, + depth, 0, &myresult); + + if (ret) + goto out; + + if (get_map_nentries(copy_map) > 0) { + + dyl_start = get_map_start(copy_map); + dyl_length = get_map_end(copy_map) - dyl_start; + + map_addr = dyl_start; + ret = vm_allocate(map, &map_addr, dyl_length, FALSE); + if (ret != KERN_SUCCESS) { + ret = vm_allocate(map, &map_addr, dyl_length, TRUE); + } + + if (ret != KERN_SUCCESS) { + ret = LOAD_NOSPACE; + goto out; + + } + ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE, + &tmp); + if (ret != KERN_SUCCESS) { + (void) vm_map_remove(map, + map_addr, + map_addr + dyl_length, + VM_MAP_NO_FLAGS); + goto out; + } + + ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE); + if (ret != KERN_SUCCESS) { + vm_map_copy_discard(tmp); + (void) vm_map_remove(map, + map_addr, + map_addr + dyl_length, + VM_MAP_NO_FLAGS); + goto out; } + + if (map_addr != dyl_start) + myresult.entry_point += (map_addr - dyl_start); + } else + ret = LOAD_FAILURE; + + if (ret == LOAD_SUCCESS) { + result->dynlinker = TRUE; + result->entry_point = myresult.entry_point; + ubc_map(vp); + } +out: + vm_map_deallocate(copy_map); + + vrele(vp); + return (ret); + +} + +static +load_return_t +get_macho_vnode( + char *path, + struct mach_header *mach_header, + unsigned long *file_offset, + unsigned long *macho_size, + struct vnode **vpp +) +{ + struct vnode *vp; + struct vattr attr, *atp; + struct nameidata nid, *ndp; + struct proc *p = current_proc(); /* XXXX */ + boolean_t is_fat; + struct fat_arch fat_arch; + int error; + int resid; + union { + struct mach_header mach_header; + struct fat_header fat_header; + char pad[512]; + } header; + error = KERN_SUCCESS; + + ndp = &nid; + atp = &attr; + + /* init the namei data to point the file user's program name */ + NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | SAVENAME, UIO_SYSSPACE, path, p); + + if (error = namei(ndp)) + return(error); + + vp = ndp->ni_vp; + + /* check for regular file */ + if (vp->v_type != VREG) { + error = EACCES; + goto bad1; + } + + /* get attributes */ + if (error = VOP_GETATTR(vp, &attr, p->p_ucred, p)) + goto bad1; + + /* Check mount point */ + if (vp->v_mount->mnt_flag & MNT_NOEXEC) { + error = EACCES; + goto bad1; + } + + if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED)) + atp->va_mode &= ~(VSUID | VSGID); + + /* check access. for root we have to see if any exec bit on */ + if (error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) + goto bad1; + if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { + error = EACCES; + goto bad1; + } + + /* try to open it */ + if (error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) + goto bad1; + if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0, + UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p)) + goto bad2; + +/* XXXX WMG - we should check for a short read of the header here */ + + if (header.mach_header.magic == MH_MAGIC) + is_fat = FALSE; + else if (header.fat_header.magic == FAT_MAGIC || + header.fat_header.magic == FAT_CIGAM) + is_fat = TRUE; + else { + error = LOAD_BADMACHO; + goto bad2; + } + + if (is_fat) { + /* + * Look up our architecture in the fat file. + */ + error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch); + if (error != LOAD_SUCCESS) { + goto bad2; + } + /* + * Read the Mach-O header out of it + */ + error = vn_rdwr(UIO_READ, vp, &header.mach_header, + sizeof(header.mach_header), fat_arch.offset, + UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p); + if (error) { + error = LOAD_FAILURE; + goto bad2; + } + + /* + * Is this really a Mach-O? + */ + if (header.mach_header.magic != MH_MAGIC) { + error = LOAD_BADMACHO; + goto bad2; + } + + *mach_header = header.mach_header; + *file_offset = fat_arch.offset; + *macho_size = fat_arch.size; + *vpp = vp; + /* leaks otherwise - A.R */ + FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI); + + /* i_lock exclusive panics, otherwise during pageins */ + VOP_UNLOCK(vp, 0, p); + return (error); + } else { + + *mach_header = header.mach_header; + *file_offset = 0; + if (UBCISVALID(vp)) + ubc_setsize(vp, attr.va_size); /* XXX why? */ + *macho_size = attr.va_size; + *vpp = vp; + /* leaks otherwise - A.R */ + FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI); + + /* i_lock exclusive panics, otherwise during pageins */ + VOP_UNLOCK(vp, 0, p); + return (error); + } + +bad2: + /* + * unlock and close the vnode, restore the old one, free the + * pathname buf, and punt. + */ + VOP_UNLOCK(vp, 0, p); + vn_close(vp, FREAD, p->p_ucred, p); + FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI); + return (error); +bad1: + /* + * free the namei pathname buffer, and put the vnode + * (which we don't yet have open). + */ + FREE_ZONE(ndp->ni_cnd.cn_pnbuf, ndp->ni_cnd.cn_pnlen, M_NAMEI); + vput(vp); + return(error); +} diff --git a/bsd/kern/mach_loader.h b/bsd/kern/mach_loader.h new file mode 100644 index 000000000..8e9715bcc --- /dev/null +++ b/bsd/kern/mach_loader.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1992, NeXT, Inc. + * + * File: kern/mach_loader.h + * + * Mach object file loader API. + * + * HISTORY + * 24-Aug-92 Doug Mitchell at NeXT + * Created. + */ + +#ifndef _BSD_KERN_MACH_LOADER_H_ +#define _BSD_KERN_MACH_LOADER_H_ + +#include + +#include + +typedef int load_return_t; + +typedef struct _load_result { + vm_offset_t mach_header; + vm_offset_t entry_point; + vm_offset_t user_stack; + int thread_count; + unsigned int + /* boolean_t */ unixproc :1, + dynlinker :1, + :0; +} load_result_t; + +load_return_t load_machfile( + struct vnode *vp, + struct mach_header *header, + unsigned long file_offset, + unsigned long macho_size, + load_result_t *result); + +#define LOAD_SUCCESS 0 +#define LOAD_BADARCH 1 /* CPU type/subtype not found */ +#define LOAD_BADMACHO 2 /* malformed mach-o file */ +#define LOAD_SHLIB 3 /* shlib version mismatch */ +#define LOAD_FAILURE 4 /* Miscellaneous error */ +#define LOAD_NOSPACE 5 /* No VM available */ +#define LOAD_PROTECT 6 /* protection violation */ +#define LOAD_RESOURCE 7 /* resource allocation failure */ + +#endif /* _BSD_KERN_MACH_LOADER_H_ */ diff --git a/bsd/kern/mach_process.c b/bsd/kern/mach_process.c new file mode 100644 index 000000000..284e21a88 --- /dev/null +++ b/bsd/kern/mach_process.c @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)sys_process.c 8.1 (Berkeley) 6/10/93 + */ +/* + * HISTORY + * + * 10-Jun-97 Umesh Vaishampayan (umeshv@apple.com) + * Ported to PPC. Cleaned up the architecture specific code. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* Macros to clear/set/test flags. */ +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) + +void psignal_lock __P((struct proc *, int, int, int)); +/* + * sys-trace system call. + */ +struct ptrace_args { + int req; + pid_t pid; + caddr_t addr; + int data; +}; +int +ptrace(p, uap, retval) + struct proc *p; + struct ptrace_args *uap; + register_t *retval; +{ + struct proc *t = current_proc(); /* target process */ + vm_offset_t start_addr, end_addr, + kern_addr, offset; + vm_size_t size; + boolean_t change_protection; + task_t task; + thread_t thread; + thread_act_t th_act; + struct uthread *ut; + int *locr0; + int error = 0; +#if defined(ppc) + struct ppc_saved_state statep; +#elif defined(i386) + struct i386_saved_state statep; +#else +#error architecture not supported +#endif + unsigned long state_count; + + + /* + * Intercept and deal with "please trace me" request. + */ + if (uap->req == PT_TRACE_ME) { + SET(p->p_flag, P_TRACED); + /* Non-attached case, our tracer is our parent. */ + t->p_oppid = t->p_pptr->p_pid; + return(0); + } + + /* + * Locate victim, and make sure it is traceable. + */ + if ((t = pfind(uap->pid)) == NULL) + return (ESRCH); + + + /* We do not want ptrace to do anything with kernel, init + * and mach_init + */ + if (uap->pid <=2 ) + return (EPERM); + + task = t->task; + if (uap->req == PT_ATTACH) { + + /* + * You can't attach to a process if: + * (1) it's the process that's doing the attaching, + */ + if (t->p_pid == p->p_pid) + return (EINVAL); + + /* + * (2) it's already being traced, or + */ + if (ISSET(t->p_flag, P_TRACED)) + return (EBUSY); + + /* + * (3) it's not owned by you, or is set-id on exec + * (unless you're root). + */ + if ((t->p_cred->p_ruid != p->p_cred->p_ruid || + ISSET(t->p_flag, P_SUGID)) && + (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return (error); + + SET(t->p_flag, P_TRACED); + t->p_oppid = t->p_pptr->p_pid; + if (t->p_pptr != p) + proc_reparent(t, p); + + if (get_task_userstop(task) == 0 ) { + t->p_xstat = 0; + psignal(t, SIGSTOP); + } else { + t->p_xstat = SIGSTOP; + task_resume(task); + } + return(0); + } + + /* + * You can't do what you want to the process if: + * (1) It's not being traced at all, + */ + if (!ISSET(t->p_flag, P_TRACED)) + return (EPERM); + + /* + * (2) it's not being traced by _you_, or + */ + if (t->p_pptr != p) + return (EBUSY); + + /* + * (3) it's not currently stopped. + */ + if (t->p_stat != SSTOP) + return (EBUSY); + + /* + * Mach version of ptrace executes request directly here, + * thus simplifying the interaction of ptrace and signals. + */ + switch (uap->req) { + + case PT_DETACH: + if (t->p_oppid != t->p_pptr->p_pid) { + struct proc *pp; + + pp = pfind(t->p_oppid); + proc_reparent(t, pp ? pp : initproc); + } + + t->p_oppid = 0; + CLR(t->p_flag, P_TRACED); + goto resume; + + case PT_KILL: + /* + * Tell child process to kill itself after it + * is resumed by adding NSIG to p_cursig. [see issig] + */ + psignal_lock(t, SIGKILL, 0, 1); + goto resume; + + case PT_STEP: /* single step the child */ + case PT_CONTINUE: /* continue the child */ + th_act = (thread_act_t)get_firstthread(task); + if (th_act == THREAD_NULL) + goto errorLabel; + ut = (uthread_t)get_bsdthread_info(th_act); + locr0 = ut->uu_ar0; +#if defined(i386) + state_count = i386_NEW_THREAD_STATE_COUNT; + if (act_machine_get_state(th_act, i386_NEW_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + goto errorLabel; + } +#elif defined(ppc) + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_get_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + goto errorLabel; + } +#else +#error architecture not supported +#endif + if ((int)uap->addr != 1) { +#if defined(i386) + locr0[PC] = (int)uap->addr; +#elif defined(ppc) +#define ALIGNED(addr,size) (((unsigned)(addr)&((size)-1))==0) + if (!ALIGNED((int)uap->addr, sizeof(int))) + return (ERESTART); + + statep.srr0 = (int)uap->addr; + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_set_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + goto errorLabel; + } +#undef ALIGNED +#else +#error architecture not implemented! +#endif + } /* (int)uap->addr != 1 */ + + if ((unsigned)uap->data < 0 || (unsigned)uap->data >= NSIG) + goto errorLabel; + + if (uap->data != 0) { + psignal_lock(t, uap->data, 0, 1); + } +#if defined(ppc) + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_get_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + goto errorLabel; + } +#endif + +#define MSR_SE_BIT 21 + + if (uap->req == PT_STEP) { +#if defined(i386) + locr0[PS] |= PSL_T; +#elif defined(ppc) + statep.srr1 |= MASK(MSR_SE); +#else +#error architecture not implemented! +#endif + } /* uap->req == PT_STEP */ + else { /* PT_CONTINUE - clear trace bit if set */ +#if defined(i386) + locr0[PS] &= ~PSL_T; +#elif defined(ppc) + statep.srr1 &= ~MASK(MSR_SE); +#endif + } +#if defined (ppc) + state_count = PPC_THREAD_STATE_COUNT; + if (act_machine_set_state(th_act, PPC_THREAD_STATE, &statep, &state_count) != KERN_SUCCESS) { + goto errorLabel; + } +#endif + resume: + t->p_xstat = uap->data; + t->p_stat = SRUN; + if (t->sigwait) { + wakeup((caddr_t)&(t->sigwait)); + task_release(task); + } + break; + + default: + errorLabel: + return(EINVAL); + } + return(0); +} + diff --git a/bsd/kern/md5c.c b/bsd/kern/md5c.c new file mode 100644 index 000000000..80fce2d7a --- /dev/null +++ b/bsd/kern/md5c.c @@ -0,0 +1,342 @@ +/* + * MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm + * + * Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All + * rights reserved. + * + * License to copy and use this software is granted provided that it + * is identified as the "RSA Data Security, Inc. MD5 Message-Digest + * Algorithm" in all material mentioning or referencing this software + * or this function. + * + * License is also granted to make and use derivative works provided + * that such works are identified as "derived from the RSA Data + * Security, Inc. MD5 Message-Digest Algorithm" in all material + * mentioning or referencing the derived work. + * + * RSA Data Security, Inc. makes no representations concerning either + * the merchantability of this software or the suitability of this + * software for any particular purpose. It is provided "as is" + * without express or implied warranty of any kind. + * + * These notices must be retained in any copies of any part of this + * documentation and/or software. + * + * $Id: md5c.c,v 1.2 2000/09/14 20:34:44 lindak Exp $ + * + * This code is the same as the code published by RSA Inc. It has been + * edited for clarity and style only. + */ + +#include + +#ifdef KERNEL +#include +#else +#include +#endif + +#include + + +#ifdef KERNEL +#define memset(x,y,z) bzero(x,z); +#define memcpy(x,y,z) bcopy(y, x, z) +#endif + +#if defined(__i386__) || defined(__alpha__) +#define Encode memcpy +#define Decode memcpy +#else /* __i386__ */ + +/* + * Encodes input (u_int32_t) into output (unsigned char). Assumes len is + * a multiple of 4. + */ + +/* XXX not prototyped, and not compatible with memcpy(). */ +static void +Encode (output, input, len) + unsigned char *output; + u_int32_t *input; + unsigned int len; +{ + unsigned int i, j; + + for (i = 0, j = 0; j < len; i++, j += 4) { + output[j] = (unsigned char)(input[i] & 0xff); + output[j+1] = (unsigned char)((input[i] >> 8) & 0xff); + output[j+2] = (unsigned char)((input[i] >> 16) & 0xff); + output[j+3] = (unsigned char)((input[i] >> 24) & 0xff); + } +} + +/* + * Decodes input (unsigned char) into output (u_int32_t). Assumes len is + * a multiple of 4. + */ + +static void +Decode (output, input, len) + u_int32_t *output; + const unsigned char *input; + unsigned int len; +{ + unsigned int i, j; + + for (i = 0, j = 0; j < len; i++, j += 4) + output[i] = ((u_int32_t)input[j]) | (((u_int32_t)input[j+1]) << 8) | + (((u_int32_t)input[j+2]) << 16) | (((u_int32_t)input[j+3]) << 24); +} +#endif /* i386 */ + +static unsigned char PADDING[64] = { + 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 +}; + +/* F, G, H and I are basic MD5 functions. */ +#define F(x, y, z) (((x) & (y)) | ((~x) & (z))) +#define G(x, y, z) (((x) & (z)) | ((y) & (~z))) +#define H(x, y, z) ((x) ^ (y) ^ (z)) +#define I(x, y, z) ((y) ^ ((x) | (~z))) + +/* ROTATE_LEFT rotates x left n bits. */ +#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) + +/* + * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. + * Rotation is separate from addition to prevent recomputation. + */ +#define FF(a, b, c, d, x, s, ac) { \ + (a) += F ((b), (c), (d)) + (x) + (u_int32_t)(ac); \ + (a) = ROTATE_LEFT ((a), (s)); \ + (a) += (b); \ + } +#define GG(a, b, c, d, x, s, ac) { \ + (a) += G ((b), (c), (d)) + (x) + (u_int32_t)(ac); \ + (a) = ROTATE_LEFT ((a), (s)); \ + (a) += (b); \ + } +#define HH(a, b, c, d, x, s, ac) { \ + (a) += H ((b), (c), (d)) + (x) + (u_int32_t)(ac); \ + (a) = ROTATE_LEFT ((a), (s)); \ + (a) += (b); \ + } +#define II(a, b, c, d, x, s, ac) { \ + (a) += I ((b), (c), (d)) + (x) + (u_int32_t)(ac); \ + (a) = ROTATE_LEFT ((a), (s)); \ + (a) += (b); \ + } + +/* MD5 initialization. Begins an MD5 operation, writing a new context. */ + +void +MD5Init (context) + MD5_CTX *context; +{ + + context->count[0] = context->count[1] = 0; + + /* Load magic initialization constants. */ + context->state[0] = 0x67452301; + context->state[1] = 0xefcdab89; + context->state[2] = 0x98badcfe; + context->state[3] = 0x10325476; +} + +/* + * MD5 block update operation. Continues an MD5 message-digest + * operation, processing another message block, and updating the + * context. + */ + +void +MD5Update (context, input, inputLen) + MD5_CTX *context; + const unsigned char *input; + unsigned int inputLen; +{ + unsigned int i, index, partLen; + + /* Compute number of bytes mod 64 */ + index = (unsigned int)((context->count[0] >> 3) & 0x3F); + + /* Update number of bits */ + if ((context->count[0] += ((u_int32_t)inputLen << 3)) + < ((u_int32_t)inputLen << 3)) + context->count[1]++; + context->count[1] += ((u_int32_t)inputLen >> 29); + + partLen = 64 - index; + + /* Transform as many times as possible. */ + if (inputLen >= partLen) { + memcpy((void *)&context->buffer[index], (const void *)input, + partLen); + MD5Transform (context->state, context->buffer); + + for (i = partLen; i + 63 < inputLen; i += 64) + MD5Transform (context->state, &input[i]); + + index = 0; + } + else + i = 0; + + /* Buffer remaining input */ + memcpy ((void *)&context->buffer[index], (const void *)&input[i], + inputLen-i); +} + +/* + * MD5 padding. Adds padding followed by original length. + */ + +void +MD5Pad (context) + MD5_CTX *context; +{ + unsigned char bits[8]; + unsigned int index, padLen; + + /* Save number of bits */ + Encode (bits, context->count, 8); + + /* Pad out to 56 mod 64. */ + index = (unsigned int)((context->count[0] >> 3) & 0x3f); + padLen = (index < 56) ? (56 - index) : (120 - index); + MD5Update (context, PADDING, padLen); + + /* Append length (before padding) */ + MD5Update (context, bits, 8); +} + +/* + * MD5 finalization. Ends an MD5 message-digest operation, writing the + * the message digest and zeroizing the context. + */ + +void +MD5Final (digest, context) + unsigned char digest[16]; + MD5_CTX *context; +{ + /* Do padding. */ + MD5Pad (context); + + /* Store state in digest */ + Encode (digest, context->state, 16); + + /* Zeroize sensitive information. */ + memset ((void *)context, 0, sizeof (*context)); +} + +/* MD5 basic transformation. Transforms state based on block. */ + +void +MD5Transform (state, block) + u_int32_t state[4]; + const unsigned char block[64]; +{ + u_int32_t a = state[0], b = state[1], c = state[2], d = state[3], x[16]; + + Decode (x, block, 64); + + /* Round 1 */ +#define S11 7 +#define S12 12 +#define S13 17 +#define S14 22 + FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */ + FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */ + FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */ + FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */ + FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */ + FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */ + FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */ + FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */ + FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */ + FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */ + FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ + FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ + FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ + FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ + FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */ + FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */ + + /* Round 2 */ +#define S21 5 +#define S22 9 +#define S23 14 +#define S24 20 + GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */ + GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */ + GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ + GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */ + GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */ + GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */ + GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */ + GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */ + GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */ + GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */ + GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */ + GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */ + GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ + GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */ + GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */ + GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ + + /* Round 3 */ +#define S31 4 +#define S32 11 +#define S33 16 +#define S34 23 + HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */ + HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */ + HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ + HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */ + HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */ + HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */ + HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */ + HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ + HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ + HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */ + HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */ + HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */ + HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */ + HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ + HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */ + HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */ + + /* Round 4 */ +#define S41 6 +#define S42 10 +#define S43 15 +#define S44 21 + II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */ + II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */ + II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */ + II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */ + II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ + II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */ + II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ + II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */ + II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */ + II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */ + II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */ + II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ + II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */ + II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ + II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */ + II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */ + + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; + + /* Zeroize sensitive information. */ + memset ((void *)x, 0, sizeof (x)); +} diff --git a/bsd/kern/parallel.c b/bsd/kern/parallel.c new file mode 100644 index 000000000..2d93edd97 --- /dev/null +++ b/bsd/kern/parallel.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * Copyright (c) 1988 Carnegie-Mellon University + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:06:13 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1997/09/30 02:44:39 wsanchez + * Import of kernel from umeshv/kernel + * + * Revision 2.4 89/12/22 15:52:48 rpd + * MACH_HOST support: when releasing master, context switch away + * immediately if thread is not assigned to default processor set. + * [89/11/16 dlb] + * + * Revision 2.3 89/10/11 14:19:20 dlb + * Processor logic - explicitly record bound processor in thread + * instead of changing whichq pointer. + * [88/09/30 dlb] + * + * Revision 2.2 89/02/25 18:07:24 gm0w + * Changes for cleanup. + * + * 15-Oct-87 David Golub (dbg) at Carnegie-Mellon University + * Use thread_bind (inline version) to bind thread to master cpu + * while holding unix-lock. + * + * 9-Oct-87 Robert Baron (rvb) at Carnegie-Mellon University + * Define unix_reset for longjmp/setjmp reset. + * + * 25-Sep-87 Robert Baron (rvb) at Carnegie-Mellon University + * Clean out some debugging code. + * + * 21-Sep-87 Robert Baron (rvb) at Carnegie-Mellon University + * Created. + * + */ + + +#include +#include + +#if NCPUS > 1 + +#include +#include +#include +#include + +void unix_master() +{ + register thread_t t = current_thread(); + + if (! (++( t->unix_lock ))) { + + /* thread_bind(t, master_processor); */ + t->bound_processor = master_processor; + + if (cpu_number() != master_cpu) { + t->interruptible = FALSE; + thread_block(0); + } + } +} + +void unix_release() +{ + register thread_t t = current_thread(); + + t->unix_lock--; + if (t->unix_lock < 0) { + /* thread_bind(t, PROCESSOR_NULL); */ + t->bound_processor = PROCESSOR_NULL; +#if MACH_HOST + if (t->processor_set != &default_pset) + thread_block(0); +#endif MACH_HOST + } +} + +void unix_reset() +{ + register thread_t t = current_thread(); + + if (t->unix_lock != -1) + t->unix_lock = 0; +} + +#endif NCPUS > 1 diff --git a/bsd/kern/parallel.h b/bsd/kern/parallel.h new file mode 100644 index 000000000..0538a4630 --- /dev/null +++ b/bsd/kern/parallel.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * Copyright (c) 1988 Carnegie-Mellon University + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * HISTORY + * Revision 1.1.1.1 1997/09/30 02:44:35 wsanchez + * Import of kernel from umeshv/kernel + * + * Revision 2.3 89/03/09 20:14:51 rpd + * More cleanup. + * + * Revision 2.2 89/02/25 18:07:31 gm0w + * Kernel code cleanup. + * Put entire file under #indef KERNEL. + * [89/02/15 mrt] + * + * 9-Oct-87 Robert Baron (rvb) at Carnegie-Mellon University + * Define unix_reset for longjmp/setjmp reset. + * + * 21-Sep-87 Robert Baron (rvb) at Carnegie-Mellon University + * Created. + * + */ + +#ifndef _KERN_PARALLEL_H_ +#define _KERN_PARALLEL_H_ + + +#define unix_master() +#define unix_release() +#define unix_reset() + + +#endif /* _KERN_PARALLEL_H_ */ diff --git a/bsd/kern/posix_sem.c b/bsd/kern/posix_sem.c new file mode 100644 index 000000000..3905e5299 --- /dev/null +++ b/bsd/kern/posix_sem.c @@ -0,0 +1,931 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ +/* + * posix_shm.c : Support for POSIX semaphore apis + * + * File: posix_sem.c + * Author: Ananthakrishna Ramesh + * + * HISTORY + * 2-Sep-1999 A.Ramesh + * Created for MacOSX + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define PSEMNAMLEN 31 /* maximum name segment length we bother with */ + +struct pseminfo { + unsigned int psem_flags; + unsigned int psem_usecount; + mode_t psem_mode; + uid_t psem_uid; + gid_t psem_gid; + char psem_name[PSEMNAMLEN + 1]; /* segment name */ + void * psem_semobject; + struct proc * sem_proc; +}; +#define PSEMINFO_NULL (struct pseminfo *)0 + +#define PSEM_NONE 1 +#define PSEM_DEFINED 2 +#define PSEM_ALLOCATED 4 +#define PSEM_MAPPED 8 +#define PSEM_INUSE 0x10 +#define PSEM_REMOVED 0x20 +#define PSEM_INCREATE 0x40 +#define PSEM_INDELETE 0x80 + +struct psemcache { + LIST_ENTRY(psemcache) psem_hash; /* hash chain */ + struct pseminfo *pseminfo; /* vnode the name refers to */ + int psem_nlen; /* length of name */ + char psem_name[PSEMNAMLEN + 1]; /* segment name */ +}; +#define PSEMCACHE_NULL (struct psemcache *)0 + +struct psemstats { + long goodhits; /* hits that we can really use */ + long neghits; /* negative hits that we can use */ + long badhits; /* hits we must drop */ + long falsehits; /* hits with id mismatch */ + long miss; /* misses */ + long longnames; /* long names that ignore cache */ +}; + +struct psemname { + char *psem_nameptr; /* pointer to looked up name */ + long psem_namelen; /* length of looked up component */ + u_long psem_hash; /* hash value of looked up name */ +}; + +struct psemnode { + struct pseminfo *pinfo; +#if DIAGNOSTIC + unsigned int readcnt; + unsigned int writecnt; +#endif +}; +#define PSEMNODE_NULL (struct psemnode *)0 + + +#define PSEMHASH(pnp) \ + (&psemhashtbl[(pnp)->psem_hash & psemhash]) +LIST_HEAD(psemhashhead, psemcache) *psemhashtbl; /* Hash Table */ +u_long psemhash; /* size of hash table - 1 */ +long psemnument; /* number of cache entries allocated */ +struct psemstats psemstats; /* cache effectiveness statistics */ + +int psem_cache_search __P((struct pseminfo **, struct psemname *, struct psemcache **)); + +int psem_read __P((struct file *fp, struct uio *uio, + struct ucred *cred)); +int psem_write __P((struct file *fp, struct uio *uio, + struct ucred *cred)); +int psem_ioctl __P((struct file *fp, u_long com, + caddr_t data, struct proc *p)); +int psem_select __P((struct file *fp, int which, + struct proc *p)); +int psem_closefile __P((struct file *fp, struct proc *p)); + +struct fileops psemops = + { psem_read, psem_write, psem_ioctl, psem_select, psem_closefile }; + +/* + * Lookup an entry in the cache + * + * + * status of -1 is returned if matches + * If the lookup determines that the name does not exist + * (negative cacheing), a status of ENOENT is returned. If the lookup + * fails, a status of zero is returned. + */ + +int +psem_cache_search(psemp, pnp, pcache) + struct pseminfo **psemp; + struct psemname *pnp; + struct psemcache **pcache; +{ + register struct psemcache *pcp, *nnp; + register struct psemhashhead *pcpp; + + if (pnp->psem_namelen > PSEMNAMLEN) { + psemstats.longnames++; + return (0); + } + + pcpp = PSEMHASH(pnp); + for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) { + nnp = pcp->psem_hash.le_next; + if (pcp->psem_nlen == pnp->psem_namelen && + !bcmp(pcp->psem_name, pnp->psem_nameptr, (u_int)pcp-> psem_nlen)) + break; + } + + if (pcp == 0) { + psemstats.miss++; + return (0); + } + + /* We found a "positive" match, return the vnode */ + if (pcp->pseminfo) { + psemstats.goodhits++; + /* TOUCH(ncp); */ + *psemp = pcp->pseminfo; + *pcache = pcp; + return (-1); + } + + /* + * We found a "negative" match, ENOENT notifies client of this match. + * The nc_vpid field records whether this is a whiteout. + */ + psemstats.neghits++; + return (ENOENT); +} + +/* + * Add an entry to the cache. + */ +int +psem_cache_add(psemp, pnp) + struct pseminfo *psemp; + struct psemname *pnp; +{ + register struct psemcache *pcp; + register struct psemhashhead *pcpp; + struct pseminfo *dpinfo; + struct psemcache *dpcp; + +#if DIAGNOSTIC + if (pnp->psem_namelen > NCHNAMLEN) + panic("cache_enter: name too long"); +#endif + + /* + * We allocate a new entry if we are less than the maximum + * allowed and the one at the front of the LRU list is in use. + * Otherwise we use the one at the front of the LRU list. + */ + pcp = (struct psemcache *)_MALLOC(sizeof(struct psemcache), M_SHM, M_WAITOK); + /* if the entry has already been added by some one else return */ + if (psem_cache_search(&dpinfo, pnp, &dpcp) == -1) { + _FREE(pcp, M_SHM); + return(EEXIST); + } + psemnument++; + + bzero(pcp, sizeof(struct psemcache)); + /* + * Fill in cache info, if vp is NULL this is a "negative" cache entry. + * For negative entries, we have to record whether it is a whiteout. + * the whiteout flag is stored in the nc_vpid field which is + * otherwise unused. + */ + pcp->pseminfo = psemp; + pcp->psem_nlen = pnp->psem_namelen; + bcopy(pnp->psem_nameptr, pcp->psem_name, (unsigned)pcp->psem_nlen); + pcpp = PSEMHASH(pnp); +#if DIAGNOSTIC + { + register struct psemcache *p; + + for (p = pcpp->lh_first; p != 0; p = p->psem_hash.le_next) + if (p == pcp) + panic("psem:cache_enter duplicate"); + } +#endif + LIST_INSERT_HEAD(pcpp, pcp, psem_hash); + return(0); +} + +/* + * Name cache initialization, from vfs_init() when we are booting + */ +void +psem_cache_init() +{ + psemhashtbl = hashinit(desiredvnodes, M_SHM, &psemhash); +} + +/* + * Invalidate a all entries to particular vnode. + * + * We actually just increment the v_id, that will do it. The entries will + * be purged by lookup as they get found. If the v_id wraps around, we + * need to ditch the entire cache, to avoid confusion. No valid vnode will + * ever have (v_id == 0). + */ +void +psem_cache_purge(void) +{ + struct psemcache *pcp; + struct psemhashhead *pcpp; + + for (pcpp = &psemhashtbl[psemhash]; pcpp >= psemhashtbl; pcpp--) { + while (pcp = pcpp->lh_first) + psem_cache_delete(pcp); + } +} + +psem_cache_delete(pcp) + struct psemcache *pcp; +{ +#if DIAGNOSTIC + if (pcp->psem_hash.le_prev == 0) + panic("psem namecache purge le_prev"); + if (pcp->psem_hash.le_next == pcp) + panic("namecache purge le_next"); +#endif /* DIAGNOSTIC */ + LIST_REMOVE(pcp, psem_hash); + pcp->psem_hash.le_prev = 0; + psemnument--; +} + + +struct sem_open_args { +const char *name; +int oflag; +int mode; +int value; +}; + +int +sem_open(p, uap, retval) +struct proc *p; +register struct sem_open_args *uap; +register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + register struct vnode *vp; + int flags, i; + struct file *nfp; + int type, indx, error; + struct psemname nd; + struct pseminfo *pinfo; + extern struct fileops psemops; + char * pnbuf; + char * nameptr; + char * cp; + size_t pathlen, plen; + int fmode ; + int cmode = uap->mode; + int value = uap->value; + int incache = 0; + struct psemnode * pnode = PSEMNODE_NULL; + struct psemcache * pcache = PSEMCACHE_NULL; + kern_return_t kret = KERN_SUCCESS; + int pinfo_alloc = 0; + + pinfo = PSEMINFO_NULL; + + MALLOC_ZONE(pnbuf, caddr_t, + MAXPATHLEN, M_NAMEI, M_WAITOK); + pathlen = MAXPATHLEN; + error = copyinstr(uap->name, pnbuf, + MAXPATHLEN, &pathlen); + if (error) { + goto bad; + } + if (pathlen > PSEMNAMLEN) { + error = ENAMETOOLONG; + goto bad; + } + + +#ifdef PSXSEM_NAME_RESTRICT + nameptr = pnbuf; + if (*nameptr == '/') { + while (*(nameptr++) == '/') { + plen--; + error = EINVAL; + goto bad; + } + } else { + error = EINVAL; + goto bad; + } +#endif /* PSXSEM_NAME_RESTRICT */ + + plen = pathlen; + nameptr = pnbuf; + nd.psem_nameptr = nameptr; + nd.psem_namelen = plen; + nd. psem_hash =0; + + for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { + nd.psem_hash += (unsigned char)*cp * i; + } + + error = psem_cache_search(&pinfo, &nd, &pcache); + + if (error == ENOENT) { + error = EINVAL; + goto bad; + + } + if (!error) { + incache = 0; + } else + incache = 1; + fmode = FFLAGS(uap->oflag); + + if (error = falloc(p, &nfp, &indx)) { + goto bad; + } + + fp = nfp; + cmode &= ALLPERMS; + + if (((fmode & (O_CREAT | O_EXCL))==(O_CREAT | O_EXCL)) && incache) { + /* sem exists and opened O_EXCL */ +#if notyet + if (pinfo->psem_flags & PSEM_INDELETE) { + } +#endif + error = EEXIST; + goto bad1; + } + if (((fmode & (O_CREAT | O_EXCL))== O_CREAT) && incache) { + /* As per POSIX, O_CREAT has no effect */ + fmode &= ~O_CREAT; + } + + if (fmode & O_CREAT) { + if((value < 0) && (value > SEM_VALUE_MAX)) { + error = EINVAL; + goto bad1; + } + pinfo = (struct pseminfo *)_MALLOC(sizeof(struct pseminfo), M_SHM, M_WAITOK); + bzero(pinfo, sizeof(struct pseminfo)); + pinfo_alloc = 1; + pinfo->psem_flags = PSEM_DEFINED | PSEM_INCREATE; + pinfo->psem_usecount = 1; + pinfo->psem_mode = cmode; + pinfo->psem_uid = p->p_ucred->cr_uid; + pinfo->psem_gid = p->p_ucred->cr_gid; + kret = semaphore_create(kernel_task, &pinfo->psem_semobject, + SYNC_POLICY_FIFO, value); + if(kret != KERN_SUCCESS) + goto bad3; + pinfo->psem_flags &= ~PSEM_DEFINED; + pinfo->psem_flags |= PSEM_ALLOCATED; + pinfo->sem_proc = p; + } else { + /* semaphore should exist as it is without O_CREAT */ + if (!incache) { + error = ENOENT; + goto bad1; + } + if( pinfo->psem_flags & PSEM_INDELETE) { + error = ENOENT; + goto bad1; + } + if (error = psem_access(pinfo, fmode, p->p_ucred, p)) + goto bad1; + } + pnode = (struct psemnode *)_MALLOC(sizeof(struct psemnode), M_SHM, M_WAITOK); + bzero(pnode, sizeof(struct psemnode)); + + if (!incache) { + if (error = psem_cache_add(pinfo, &nd)) { + goto bad2; + } + } + pinfo->psem_flags &= ~PSEM_INCREATE; + pinfo->psem_usecount++; + pnode->pinfo = pinfo; + fp->f_flag = flags & FMASK; + fp->f_type = DTYPE_PSXSEM; + fp->f_ops = &psemops; + fp->f_data = (caddr_t)pnode; + *fdflags(p, indx) &= ~UF_RESERVED; + *retval = indx; + _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + return (0); + +bad3: + switch (kret) { + case KERN_RESOURCE_SHORTAGE: + error = ENOMEM; + case KERN_PROTECTION_FAILURE: + error = EACCES; + default: + error = EINVAL; + } + goto bad1; +bad2: + _FREE(pnode, M_SHM); + if (pinfo_alloc) + _FREE(pinfo, M_SHM); +bad1: + fdrelse(p, indx); + ffree(nfp); +bad: + _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + return (error); +} + + + +int +psem_access(struct pseminfo *pinfo, int mode, struct ucred *cred, struct proc *p) +{ + mode_t mask; + register gid_t *gp; + int i, error; + + /* Otherwise, user id 0 always gets access. */ + if (cred->cr_uid == 0) + return (0); + + mask = 0; + + /* Otherwise, check the owner. */ + if (cred->cr_uid == pinfo->psem_uid) { + if (mode & FREAD) + mask |= S_IRUSR; + if (mode & FWRITE) + mask |= S_IWUSR; + return ((pinfo->psem_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check the groups. */ + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) + if (pinfo->psem_gid == *gp) { + if (mode & FREAD) + mask |= S_IRGRP; + if (mode & FWRITE) + mask |= S_IWGRP; + return ((pinfo->psem_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check everyone else. */ + if (mode & FREAD) + mask |= S_IROTH; + if (mode & FWRITE) + mask |= S_IWOTH; + return ((pinfo->psem_mode & mask) == mask ? 0 : EACCES); +} + + + + +struct sem_unlink_args { +const char *name; +}; + +int +sem_unlink(p, uap, retval) +struct proc *p; +register struct sem_unlink_args *uap; +register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + int flags, i; + int error=0; + struct psemname nd; + struct pseminfo *pinfo; + extern struct fileops psemops; + char * pnbuf; + char * nameptr; + char * cp; + size_t pathlen, plen; + int fmode, cmode ; + int incache = 0; + struct psemnode * pnode = PSEMNODE_NULL; + struct psemcache *pcache = PSEMCACHE_NULL; + kern_return_t kret; + + pinfo = PSEMINFO_NULL; + + MALLOC_ZONE(pnbuf, caddr_t, + MAXPATHLEN, M_NAMEI, M_WAITOK); + pathlen = MAXPATHLEN; + error = copyinstr(uap->name, pnbuf, + MAXPATHLEN, &pathlen); + if (error) { + goto bad; + } + if (pathlen > PSEMNAMLEN) { + error = ENAMETOOLONG; + goto bad; + } + + +#ifdef PSXSEM_NAME_RESTRICT + nameptr = pnbuf; + if (*nameptr == '/') { + while (*(nameptr++) == '/') { + plen--; + error = EINVAL; + goto bad; + } + } else { + error = EINVAL; + goto bad; + } +#endif /* PSXSEM_NAME_RESTRICT */ + + plen = pathlen; + nameptr = pnbuf; + nd.psem_nameptr = nameptr; + nd.psem_namelen = plen; + nd. psem_hash =0; + + for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { + nd.psem_hash += (unsigned char)*cp * i; + } + + error = psem_cache_search(&pinfo, &nd, &pcache); + + if (error == ENOENT) { + error = EINVAL; + goto bad; + + } + if (!error) { + error = EINVAL; + goto bad; + } else + incache = 1; + if (error = psem_access(pinfo, pinfo->psem_mode, p->p_ucred, p)) + goto bad; + + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED))==0) { + return (EINVAL); + } + + if (pinfo->psem_flags & PSEM_INDELETE) { + error = 0; + goto bad; + } + pinfo->psem_flags |= PSEM_INDELETE; + pinfo->psem_usecount--; + + if (!pinfo->psem_usecount) { + psem_delete(pinfo); + _FREE(pinfo,M_SHM); + } else + pinfo->psem_flags |= PSEM_REMOVED; + + psem_cache_delete(pcache); + _FREE(pcache, M_SHM); + error = 0; +bad: + _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + return (error); +} + +struct sem_close_args { +sem_t *sem; +}; + +int +sem_close(p, uap, retval) + struct proc *p; + struct sem_close_args *uap; + register_t *retval; +{ + int fd = (int)uap->sem; + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + int error = 0; + + + if ((u_int)fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + fdrelse(p, fd); + if( error = closef(fp, p)) + return(error); + return(0); + + +} + +struct sem_wait_args { +sem_t *sem; +}; + +int +sem_wait(p, uap, retval) + struct proc *p; + struct sem_wait_args *uap; + register_t *retval; +{ + int fd = (int)uap->sem; + register struct filedesc *fdp = p->p_fd; + struct file *fp; + struct pseminfo * pinfo; + struct psemnode * pnode ; + kern_return_t kret; + int error; + + if (error = fdgetf(p, (int)uap->sem, &fp)) + return (error); + if (fp->f_type != DTYPE_PSXSEM) + return(EBADF); + if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL ) + return(EINVAL); + if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) + return(EINVAL); + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) + != PSEM_ALLOCATED) { + return(EINVAL); + } + + kret = semaphore_wait(pinfo->psem_semobject); + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_PROTECTION_FAILURE: + return (EACCES); + case KERN_ABORTED: + case KERN_OPERATION_TIMED_OUT: + return (EINTR); + case KERN_SUCCESS: + return(0); + default: + return (EINVAL); + } + +} + +struct sem_trywait_args { +sem_t *sem; +}; + +int +sem_trywait(p, uap, retval) + struct proc *p; + struct sem_wait_args *uap; + register_t *retval; +{ + int fd = (int)uap->sem; + register struct filedesc *fdp = p->p_fd; + struct file *fp; + struct pseminfo * pinfo; + struct psemnode * pnode ; + kern_return_t kret; + mach_timespec_t wait_time; + int error; + + if (error = fdgetf(p, (int)uap->sem, &fp)) + return (error); + if (fp->f_type != DTYPE_PSXSEM) + return(EBADF); + if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL ) + return(EINVAL); + if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) + return(EINVAL); + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) + != PSEM_ALLOCATED) { + return(EINVAL); + } + + wait_time.tv_sec = 0; + wait_time.tv_nsec = 0; + + kret = semaphore_timedwait(pinfo->psem_semobject, MACH_TIMESPEC_ZERO); + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_PROTECTION_FAILURE: + return (EINVAL); + case KERN_ABORTED: + return (EINTR); + case KERN_OPERATION_TIMED_OUT: + return (EAGAIN); + case KERN_SUCCESS: + return(0); + default: + return (EINVAL); + } + +} + +struct sem_post_args { +sem_t *sem; +}; + +int +sem_post(p, uap, retval) + struct proc *p; + struct sem_wait_args *uap; + register_t *retval; +{ + int fd = (int)uap->sem; + register struct filedesc *fdp = p->p_fd; + struct file *fp; + struct pseminfo * pinfo; + struct psemnode * pnode ; + kern_return_t kret; + int error; + + if (error = fdgetf(p, (int)uap->sem, &fp)) + return (error); + if (fp->f_type != DTYPE_PSXSEM) + return(EBADF); + if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL ) + return(EINVAL); + if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) + return(EINVAL); + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) + != PSEM_ALLOCATED) { + return(EINVAL); + } + + kret = semaphore_signal(pinfo->psem_semobject); + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_PROTECTION_FAILURE: + return (EINVAL); + case KERN_ABORTED: + case KERN_OPERATION_TIMED_OUT: + return (EINTR); + case KERN_SUCCESS: + return(0); + default: + return (EINVAL); + } + +} + +struct sem_init_args { +sem_t *sem; +int phsared; +unsigned int value; +}; + +int +sem_init(p, uap, retval) + struct proc *p; + struct sem_init_args *uap; + register_t *retval; +{ + return(ENOSYS); +} + +struct sem_destroy_args { +sem_t *sem; +}; + +int +sem_destroy(p, uap, retval) + struct proc *p; + struct sem_destroy_args *uap; + register_t *retval; +{ + return(ENOSYS); +} + +struct sem_getvalue_args { +sem_t *sem; +int * sval; +}; + +int +sem_getvalue(p, uap, retval) + struct proc *p; + struct sem_getvalue_args *uap; + register_t *retval; +{ + return(ENOSYS); +} + +int +psem_closefile(fp, p) + struct file *fp; + struct proc *p; +{ + + return (psem_close(((struct psemnode *)fp->f_data), fp->f_flag, + fp->f_cred, p)); +} + + +int +psem_close(pnode, flags, cred, p) + register struct psemnode *pnode; + int flags; + struct ucred *cred; + struct proc *p; +{ + int error=0; + kern_return_t kret; + register struct pseminfo *pinfo; + + if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) + return(EINVAL); + + if ((pinfo->psem_flags & PSEM_ALLOCATED) != PSEM_ALLOCATED) { + return(EINVAL); + } +#if DIAGNOSTIC + if(!pinfo->psem_usecount) { + kprintf("negative usecount in psem_close\n"); + } +#endif /* DIAGNOSTIC */ + pinfo->psem_usecount--; + + if ((pinfo->psem_flags & PSEM_REMOVED) && !pinfo->psem_usecount) { + error = psem_delete(pinfo); + _FREE(pinfo,M_SHM); + } + _FREE(pnode, M_SHM); + return (error); +} + +int +psem_delete(struct pseminfo * pinfo) +{ + kern_return_t kret; + + kret = semaphore_destroy(kernel_task, pinfo->psem_semobject); + + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_PROTECTION_FAILURE: + return (EINVAL); + case KERN_ABORTED: + case KERN_OPERATION_TIMED_OUT: + return (EINTR); + case KERN_SUCCESS: + return(0); + default: + return (EINVAL); + } + +} + + +int +psem_read(struct file *fp, struct uio *uio, struct ucred *cred) +{ + return(EOPNOTSUPP); +} +int +psem_write(struct file *fp, struct uio *uio, struct ucred *cred) +{ + return(EOPNOTSUPP); +} +int +psem_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) +{ + return(EOPNOTSUPP); +} +int +psem_select(struct file *fp, int which, struct proc *p) +{ + return(EOPNOTSUPP); +} diff --git a/bsd/kern/posix_shm.c b/bsd/kern/posix_shm.c new file mode 100644 index 000000000..c011cb3e5 --- /dev/null +++ b/bsd/kern/posix_shm.c @@ -0,0 +1,874 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ +/* + * posix_shm.c : Support for POSIX shared memory apis + * + * File: posix_shm.c + * Author: Ananthakrishna Ramesh + * + * HISTORY + * 2-Sep-1999 A.Ramesh + * Created for MacOSX + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define PSHMNAMLEN 31 /* maximum name segment length we bother with */ + +struct pshminfo { + unsigned int pshm_flags; + unsigned int pshm_usecount; + off_t pshm_length; + mode_t pshm_mode; + uid_t pshm_uid; + gid_t pshm_gid; + char pshm_name[PSHMNAMLEN + 1]; /* segment name */ + void * pshm_memobject; +#if DIAGNOSTIC + unsigned int pshm_readcount; + unsigned int pshm_writecount; + struct proc * pshm_proc; +#endif /* DIAGNOSTIC */ +}; +#define PSHMINFO_NULL (struct pshminfo *)0 + +#define PSHM_NONE 1 +#define PSHM_DEFINED 2 +#define PSHM_ALLOCATED 4 +#define PSHM_MAPPED 8 +#define PSHM_INUSE 0x10 +#define PSHM_REMOVED 0x20 +#define PSHM_INCREATE 0x40 +#define PSHM_INDELETE 0x80 + +struct pshmcache { + LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */ + struct pshminfo *pshminfo; /* vnode the name refers to */ + int pshm_nlen; /* length of name */ + char pshm_name[PSHMNAMLEN + 1]; /* segment name */ +}; +#define PSHMCACHE_NULL (struct pshmcache *)0 + +struct pshmstats { + long goodhits; /* hits that we can really use */ + long neghits; /* negative hits that we can use */ + long badhits; /* hits we must drop */ + long falsehits; /* hits with id mismatch */ + long miss; /* misses */ + long longnames; /* long names that ignore cache */ +}; + +struct pshmname { + char *pshm_nameptr; /* pointer to looked up name */ + long pshm_namelen; /* length of looked up component */ + u_long pshm_hash; /* hash value of looked up name */ +}; + +struct pshmnode { + off_t mapp_addr; + size_t map_size; + struct pshminfo *pinfo; + unsigned int pshm_usecount; +#if DIAGNOSTIC + unsigned int readcnt; + unsigned int writecnt; +#endif +}; +#define PSHMNODE_NULL (struct pshmnode *)0 + + +#define PSHMHASH(pnp) \ + (&pshmhashtbl[(pnp)->pshm_hash & pshmhash]) +LIST_HEAD(pshmhashhead, pshmcache) *pshmhashtbl; /* Hash Table */ +u_long pshmhash; /* size of hash table - 1 */ +long pshmnument; /* number of cache entries allocated */ +struct pshmstats pshmstats; /* cache effectiveness statistics */ + +int pshm_read __P((struct file *fp, struct uio *uio, + struct ucred *cred)); +int pshm_write __P((struct file *fp, struct uio *uio, + struct ucred *cred)); +int pshm_ioctl __P((struct file *fp, u_long com, + caddr_t data, struct proc *p)); +int pshm_select __P((struct file *fp, int which, + struct proc *p)); +int pshm_closefile __P((struct file *fp, struct proc *p)); + +struct fileops pshmops = + { pshm_read, pshm_write, pshm_ioctl, pshm_select, pshm_closefile }; + + + +/* + * Lookup an entry in the cache + * + * + * status of -1 is returned if matches + * If the lookup determines that the name does not exist + * (negative cacheing), a status of ENOENT is returned. If the lookup + * fails, a status of zero is returned. + */ + +int +pshm_cache_search(pshmp, pnp, pcache) + struct pshminfo **pshmp; + struct pshmname *pnp; + struct pshmcache **pcache; +{ + register struct pshmcache *pcp, *nnp; + register struct pshmhashhead *pcpp; + + if (pnp->pshm_namelen > PSHMNAMLEN) { + pshmstats.longnames++; + return (0); + } + + pcpp = PSHMHASH(pnp); + for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) { + nnp = pcp->pshm_hash.le_next; + if (pcp->pshm_nlen == pnp->pshm_namelen && + !bcmp(pcp->pshm_name, pnp->pshm_nameptr, (u_int)pcp-> pshm_nlen)) + break; + } + + if (pcp == 0) { + pshmstats.miss++; + return (0); + } + + /* We found a "positive" match, return the vnode */ + if (pcp->pshminfo) { + pshmstats.goodhits++; + /* TOUCH(ncp); */ + *pshmp = pcp->pshminfo; + *pcache = pcp; + return (-1); + } + + /* + * We found a "negative" match, ENOENT notifies client of this match. + * The nc_vpid field records whether this is a whiteout. + */ + pshmstats.neghits++; + return (ENOENT); +} + +/* + * Add an entry to the cache. + */ +int +pshm_cache_add(pshmp, pnp) + struct pshminfo *pshmp; + struct pshmname *pnp; +{ + register struct pshmcache *pcp; + register struct pshmhashhead *pcpp; + register struct pshminfo *dpinfo; + register struct pshmcache *dpcp; + +#if DIAGNOSTIC + if (pnp->pshm_namelen > NCHNAMLEN) + panic("cache_enter: name too long"); +#endif + + /* + * We allocate a new entry if we are less than the maximum + * allowed and the one at the front of the LRU list is in use. + * Otherwise we use the one at the front of the LRU list. + */ + pcp = (struct pshmcache *)_MALLOC(sizeof(struct pshmcache), M_SHM, M_WAITOK); + /* if the entry has already been added by some one else return */ + if (pshm_cache_search(&dpinfo, pnp, &dpcp) == -1) { + _FREE(pcp, M_SHM); + return(EEXIST); + } + pshmnument++; + + bzero(pcp, sizeof(struct pshmcache)); + /* + * Fill in cache info, if vp is NULL this is a "negative" cache entry. + * For negative entries, we have to record whether it is a whiteout. + * the whiteout flag is stored in the nc_vpid field which is + * otherwise unused. + */ + pcp->pshminfo = pshmp; + pcp->pshm_nlen = pnp->pshm_namelen; + bcopy(pnp->pshm_nameptr, pcp->pshm_name, (unsigned)pcp->pshm_nlen); + pcpp = PSHMHASH(pnp); +#if DIAGNOSTIC + { + register struct pshmcache *p; + + for (p = pcpp->lh_first; p != 0; p = p->pshm_hash.le_next) + if (p == pcp) + panic("cache_enter: duplicate"); + } +#endif + LIST_INSERT_HEAD(pcpp, pcp, pshm_hash); + return(0); +} + +/* + * Name cache initialization, from vfs_init() when we are booting + */ +void +pshm_cache_init() +{ + pshmhashtbl = hashinit(desiredvnodes, M_SHM, &pshmhash); +} + +/* + * Invalidate a all entries to particular vnode. + * + * We actually just increment the v_id, that will do it. The entries will + * be purged by lookup as they get found. If the v_id wraps around, we + * need to ditch the entire cache, to avoid confusion. No valid vnode will + * ever have (v_id == 0). + */ +void +pshm_cache_purge(void) +{ + struct pshmcache *pcp; + struct pshmhashhead *pcpp; + + for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) { + while (pcp = pcpp->lh_first) + pshm_cache_delete(pcp); + } +} + +pshm_cache_delete(pcp) + struct pshmcache *pcp; +{ +#if DIAGNOSTIC + if (pcp->pshm_hash.le_prev == 0) + panic("namecache purge le_prev"); + if (pcp->pshm_hash.le_next == pcp) + panic("namecache purge le_next"); +#endif /* DIAGNOSTIC */ + LIST_REMOVE(pcp, pshm_hash); + pcp->pshm_hash.le_prev = 0; + pshmnument--; +} + + +struct shm_open_args { +const char *name; +int oflag; +int mode; +}; + +int +shm_open(p, uap, retval) +struct proc *p; +register struct shm_open_args *uap; +register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + register struct vnode *vp; + int i; + struct file *nfp; + int type, indx, error; + struct pshmname nd; + struct pshminfo *pinfo; + extern struct fileops pshmops; + char * pnbuf; + char * nameptr; + char * cp; + size_t pathlen, plen; + int fmode ; + int cmode = uap->mode; + int incache = 0; + struct pshmnode * pnode = PSHMNODE_NULL; + struct pshmcache * pcache = PSHMCACHE_NULL; + + + pinfo = PSHMINFO_NULL; + + MALLOC_ZONE(pnbuf, caddr_t, + MAXPATHLEN, M_NAMEI, M_WAITOK); + pathlen = MAXPATHLEN; + error = copyinstr(uap->name, pnbuf, + MAXPATHLEN, &pathlen); + if (error) { + goto bad; + } + if (pathlen > PSHMNAMLEN) { + error = ENAMETOOLONG; + goto bad; + } + + +#ifdef PSXSHM_NAME_RESTRICT + nameptr = pnbuf; + if (*nameptr == '/') { + while (*(nameptr++) == '/') { + plen--; + error = EINVAL; + goto bad; + } + } else { + error = EINVAL; + goto bad; + } +#endif /* PSXSHM_NAME_RESTRICT */ + + plen = pathlen; + nameptr = pnbuf; + nd.pshm_nameptr = nameptr; + nd.pshm_namelen = plen; + nd. pshm_hash =0; + + for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { + nd.pshm_hash += (unsigned char)*cp * i; + } + + error = pshm_cache_search(&pinfo, &nd, &pcache); + + if (error == ENOENT) { + error = EINVAL; + goto bad; + + } + if (!error) { + incache = 0; + } else + incache = 1; + fmode = FFLAGS(uap->oflag); + if ((fmode & (FREAD | FWRITE))==0) + return(EINVAL); + + if (error = falloc(p, &nfp, &indx)) + return (error); + fp = nfp; + + cmode &= ALLPERMS; + + if (fmode & O_CREAT) { + if ((fmode & O_EXCL) && incache) { + /* shm obj exists and opened O_EXCL */ +#if notyet + if (pinfo->pshm_flags & PSHM_INDELETE) { + } +#endif + error = EEXIST; + goto bad; + } + if (!incache) { + /* create a new one */ + pinfo = (struct pshminfo *)_MALLOC(sizeof(struct pshminfo), M_SHM, M_WAITOK); + bzero(pinfo, sizeof(struct pshminfo)); + pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE; + pinfo->pshm_usecount = 1; + pinfo->pshm_mode = cmode; + pinfo->pshm_uid = p->p_ucred->cr_uid; + pinfo->pshm_gid = p->p_ucred->cr_gid; + } else { + /* already exists */ + if( pinfo->pshm_flags & PSHM_INDELETE) { + error = ENOENT; + goto bad; + } + if (error = pshm_access(pinfo, fmode, p->p_ucred, p)) + goto bad; + } + } else { + if (!incache) { + /* O_CREAT is not set and the shm obecj does not exist */ + error = ENOENT; + goto bad; + } + if( pinfo->pshm_flags & PSHM_INDELETE) { + error = ENOENT; + goto bad; + } + if (error = pshm_access(pinfo, fmode, p->p_ucred, p)) + goto bad; + } + if (fmode & O_TRUNC) { + error = EINVAL; + goto bad1; + } +#if DIAGNOSTIC + if (fmode & FWRITE) + pinfo->pshm_writecount++; + if (fmode & FREAD) + pinfo->pshm_readcount++; +#endif + pnode = (struct pshmnode *)_MALLOC(sizeof(struct pshmnode), M_SHM, M_WAITOK); + bzero(pnode, sizeof(struct pshmnode)); + + if (!incache) { + if (error = pshm_cache_add(pinfo, &nd)) { + goto bad2; + } + } + pinfo->pshm_flags &= ~PSHM_INCREATE; + pinfo->pshm_usecount++; + pnode->pinfo = pinfo; + fp->f_flag = fmode & FMASK; + fp->f_type = DTYPE_PSXSHM; + fp->f_ops = &pshmops; + fp->f_data = (caddr_t)pnode; + *fdflags(p, indx) &= ~UF_RESERVED; + *retval = indx; + _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + return (0); +bad2: + _FREE(pnode, M_SHM); + +bad1: + _FREE(pinfo, M_SHM); + +bad: + _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + return (error); +} + + +/* ARGSUSED */ +int +pshm_truncate(p, fp, fd, length, retval) + struct proc *p; + struct file *fp; + int fd; + off_t length; + register_t *retval; +{ + struct pshminfo * pinfo; + struct pshmnode * pnode ; + kern_return_t kret; + vm_offset_t user_addr; + void * mem_object; + vm_size_t size; + + if (fp->f_type != DTYPE_PSXSHM) { + return(EINVAL); + } + + + if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL ) + return(EINVAL); + + if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) + return(EINVAL); + if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED)) + != PSHM_DEFINED) { + return(EINVAL); + } + + size = round_page (length); + kret = vm_allocate(current_map(), &user_addr, size, TRUE); + if (kret != KERN_SUCCESS) + goto out; + + kret = mach_make_memory_entry (current_map(), &size, + user_addr, VM_PROT_DEFAULT, &mem_object, 0); + + if (kret != KERN_SUCCESS) + goto out; + + vm_deallocate(current_map(), user_addr, size); + + pinfo->pshm_flags &= ~PSHM_DEFINED; + pinfo->pshm_flags = PSHM_ALLOCATED; + pinfo->pshm_memobject = mem_object; + pinfo->pshm_length = size; + return(0); + +out: + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + + } +} + +int +pshm_stat(pnode, sb) +struct pshmnode *pnode; +struct stat *sb; +{ + struct pshminfo *pinfo; + + if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) + return(EINVAL); + + bzero(sb, sizeof(struct stat)); + sb->st_mode = pinfo->pshm_mode; + sb->st_uid = pinfo->pshm_uid; + sb->st_gid = pinfo->pshm_gid; + sb->st_size = pinfo->pshm_length; + + return(0); +} + +int +pshm_access(struct pshminfo *pinfo, int mode, struct ucred *cred, struct proc *p) +{ + mode_t mask; + register gid_t *gp; + int i, error; + + /* Otherwise, user id 0 always gets access. */ + if (cred->cr_uid == 0) + return (0); + + mask = 0; + + /* Otherwise, check the owner. */ + if (cred->cr_uid == pinfo->pshm_uid) { + if (mode & FREAD) + mask |= S_IRUSR; + if (mode & FWRITE) + mask |= S_IWUSR; + return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check the groups. */ + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) + if (pinfo->pshm_gid == *gp) { + if (mode & FREAD) + mask |= S_IRGRP; + if (mode & FWRITE) + mask |= S_IWGRP; + return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check everyone else. */ + if (mode & FREAD) + mask |= S_IROTH; + if (mode & FWRITE) + mask |= S_IWOTH; + return ((pinfo->pshm_mode & mask) == mask ? 0 : EACCES); +} +struct mmap_args { + caddr_t addr; + size_t len; + int prot; + int flags; + int fd; +#ifdef DOUBLE_ALIGN_PARAMS + long pad; +#endif + off_t pos; +}; + +int +pshm_mmap(struct proc *p, struct mmap_args *uap, register_t *retval, struct file *fp, vm_size_t pageoff) +{ + vm_offset_t user_addr = uap->addr; + vm_size_t user_size = uap->len ; + int prot = uap->prot; + int flags = uap->flags; + vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos; + int fd = uap->fd; + vm_map_t user_map; + boolean_t find_space,docow; + kern_return_t kret; + struct pshminfo * pinfo; + struct pshmnode * pnode; + void * mem_object; + + if (user_size == 0) + return(0); + + if ((flags & MAP_SHARED) == 0) + return(EINVAL); + + + if ((prot & PROT_WRITE) && ((fp->f_flag & FWRITE) == 0)) { + return(EPERM); + } + + if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL ) + return(EINVAL); + + if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) + return(EINVAL); + + if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) { + return(EINVAL); + } + if (user_size > pinfo->pshm_length) { + return(EINVAL); + } + if ((off_t)user_size + file_pos > pinfo->pshm_length) { + return(EINVAL); + } + if ((mem_object = pinfo->pshm_memobject) == NULL) { + return(EINVAL); + } + + + user_map = current_map(); + + if ((flags & MAP_FIXED) == 0) { + find_space = TRUE; + user_addr = round_page(user_addr); + } else { + if (user_addr != trunc_page(user_addr)) + return (EINVAL); + find_space = FALSE; + (void) vm_deallocate(user_map, user_addr, user_size); + } + docow = FALSE; + + kret = vm_map_64(user_map, &user_addr, user_size, + 0, find_space, pinfo->pshm_memobject, file_pos, docow, + prot, VM_PROT_DEFAULT, + VM_INHERIT_DEFAULT); + + if (kret != KERN_SUCCESS) + goto out; + kret = vm_inherit(user_map, user_addr, user_size, + VM_INHERIT_SHARE); + if (kret != KERN_SUCCESS) { + (void) vm_deallocate(user_map, user_addr, user_size); + goto out; + } + pnode->mapp_addr = user_addr; + pnode->map_size = user_size; + pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE); +out: + switch (kret) { + case KERN_SUCCESS: + *fdflags(p, fd) |= UF_MAPPED; + *retval = (register_t)(user_addr + pageoff); + return (0); + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } + +} + +struct shm_unlink_args { +const char *name; +}; + +int +shm_unlink(p, uap, retval) +struct proc *p; +register struct shm_unlink_args *uap; +register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + int flags, i; + int error=0; + struct pshmname nd; + struct pshminfo *pinfo; + extern struct fileops pshmops; + char * pnbuf; + char * nameptr; + char * cp; + size_t pathlen, plen; + int fmode, cmode ; + int incache = 0; + struct pshmnode * pnode = PSHMNODE_NULL; + struct pshmcache *pcache = PSHMCACHE_NULL; + kern_return_t kret; + + pinfo = PSHMINFO_NULL; + + MALLOC_ZONE(pnbuf, caddr_t, + MAXPATHLEN, M_NAMEI, M_WAITOK); + pathlen = MAXPATHLEN; + error = copyinstr(uap->name, pnbuf, + MAXPATHLEN, &pathlen); + if (error) { + goto bad; + } + if (pathlen > PSHMNAMLEN) { + error = ENAMETOOLONG; + goto bad; + } + + +#ifdef PSXSHM_NAME_RESTRICT + nameptr = pnbuf; + if (*nameptr == '/') { + while (*(nameptr++) == '/') { + plen--; + error = EINVAL; + goto bad; + } + } else { + error = EINVAL; + goto bad; + } +#endif /* PSXSHM_NAME_RESTRICT */ + + plen = pathlen; + nameptr = pnbuf; + nd.pshm_nameptr = nameptr; + nd.pshm_namelen = plen; + nd. pshm_hash =0; + + for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { + nd.pshm_hash += (unsigned char)*cp * i; + } + + error = pshm_cache_search(&pinfo, &nd, &pcache); + + if (error == ENOENT) { + error = EINVAL; + goto bad; + + } + if (!error) { + error = EINVAL; + goto bad; + } else + incache = 1; + + if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) { + return (EINVAL); + } + + if (pinfo->pshm_flags & PSHM_INDELETE) { + error = 0; + goto bad; + } + + if (pinfo->pshm_memobject == NULL) { + error = EINVAL; + goto bad; + } + + pinfo->pshm_flags |= PSHM_INDELETE; + pinfo->pshm_usecount--; + kret = mach_destroy_memory_entry(pinfo->pshm_memobject); + pshm_cache_delete(pcache); + _FREE(pcache, M_SHM); + pinfo->pshm_flags |= PSHM_REMOVED; + error = 0; +bad: + _FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + return (error); +out: + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } +} +int +pshm_closefile(fp, p) + struct file *fp; + struct proc *p; +{ + + return (pshm_close(((struct pshmnode *)fp->f_data), fp->f_flag, + fp->f_cred, p)); +} + +int +pshm_close(pnode, flags, cred, p) + register struct pshmnode *pnode; + int flags; + struct ucred *cred; + struct proc *p; +{ + int error=0; + kern_return_t kret; + register struct pshminfo *pinfo; + + if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) + return(EINVAL); + + if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) { + return(EINVAL); + } +#if DIAGNOSTIC + if(!pinfo->pshm_usecount) { + kprintf("negative usecount in pshm_close\n"); + } +#endif /* DIAGNOSTIC */ + pinfo->pshm_usecount--; + + if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) { + _FREE(pinfo,M_SHM); + } + _FREE(pnode, M_SHM); + return (error); +} +int +pshm_read(struct file *fp, struct uio *uio, struct ucred *cred) +{ + return(EOPNOTSUPP); +} +int +pshm_write(struct file *fp, struct uio *uio, struct ucred *cred) +{ + return(EOPNOTSUPP); +} +int +pshm_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p) +{ + return(EOPNOTSUPP); +} +int +pshm_select(struct file *fp, int which, struct proc *p) +{ + return(EOPNOTSUPP); +} diff --git a/bsd/kern/preload.h b/bsd/kern/preload.h new file mode 100644 index 000000000..7db056c43 --- /dev/null +++ b/bsd/kern/preload.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_KERN_PRELOAD_H_ +#define _BSD_KERN_PRELOAD_H_ + +#if KERNEL_PRIVATE + +#if PRELOAD + +#define PRELOAD_MAGIC 0x1395 +#define LC_PRELOAD 0x20 + +struct preload_command{ + unsigned long preload_cmd; /* LC_PRELOAD */ + unsigned long preload_cmdsize;/* includes entries */ + short preload_magic; /* Magic number */ + short preload_rev; /* Rev of preload header */ + int preload_hdr_size;/* Size of preload header */ + int preload_entries;/* Number of preload entries */ +}; + +struct preload_entry{ + vm_offset_t preload_vaddr; /* Address of page */ + vm_size_t preload_size; /* size */ +}; +#endif + +#endif + + +#endif /* _BSD_KERN_PRELOAD_H_ */ diff --git a/bsd/kern/qsort.c b/bsd/kern/qsort.c new file mode 100644 index 000000000..6ccb04112 --- /dev/null +++ b/bsd/kern/qsort.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved + * + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)qsort.c 8.1 (Berkeley) 6/4/93 + */ + + +#include +#include + +static inline char *med3 __P((char *, char *, char *, int (*)())); +static inline void swapfunc __P((char *, char *, int, int)); + +#define min(a, b) (a) < (b) ? a : b + +/* + * Qsort routine from Bentley & McIlroy's "Engineering a Sort Function". + */ +#define swapcode(TYPE, parmi, parmj, n) { \ + long i = (n) / sizeof (TYPE); \ + register TYPE *pi = (TYPE *) (parmi); \ + register TYPE *pj = (TYPE *) (parmj); \ + do { \ + register TYPE t = *pi; \ + *pi++ = *pj; \ + *pj++ = t; \ + } while (--i > 0); \ +} + +#define SWAPINIT(a, es) swaptype = ((char *)a - (char *)0) % sizeof(long) || \ + es % sizeof(long) ? 2 : es == sizeof(long)? 0 : 1; + +static inline void +swapfunc(a, b, n, swaptype) + char *a, *b; + int n, swaptype; +{ + if(swaptype <= 1) + swapcode(long, a, b, n) + else + swapcode(char, a, b, n) +} + +#define swap(a, b) \ + if (swaptype == 0) { \ + long t = *(long *)(a); \ + *(long *)(a) = *(long *)(b); \ + *(long *)(b) = t; \ + } else \ + swapfunc(a, b, es, swaptype) + +#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype) + +static inline char * +med3(a, b, c, cmp) + char *a, *b, *c; + int (*cmp)(); +{ + return cmp(a, b) < 0 ? + (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a )) + :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c )); +} + +void +qsort(a, n, es, cmp) + void *a; + size_t n, es; + int (*cmp)(); +{ + char *pa, *pb, *pc, *pd, *pl, *pm, *pn; + int d, r, swaptype, swap_cnt; + +loop: SWAPINIT(a, es); + swap_cnt = 0; + if (n < 7) { + for (pm = a + es; pm < (char *) a + n * es; pm += es) + for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; + pl -= es) + swap(pl, pl - es); + return; + } + pm = a + (n / 2) * es; + if (n > 7) { + pl = a; + pn = a + (n - 1) * es; + if (n > 40) { + d = (n / 8) * es; + pl = med3(pl, pl + d, pl + 2 * d, cmp); + pm = med3(pm - d, pm, pm + d, cmp); + pn = med3(pn - 2 * d, pn - d, pn, cmp); + } + pm = med3(pl, pm, pn, cmp); + } + swap(a, pm); + pa = pb = a + es; + + pc = pd = a + (n - 1) * es; + for (;;) { + while (pb <= pc && (r = cmp(pb, a)) <= 0) { + if (r == 0) { + swap_cnt = 1; + swap(pa, pb); + pa += es; + } + pb += es; + } + while (pb <= pc && (r = cmp(pc, a)) >= 0) { + if (r == 0) { + swap_cnt = 1; + swap(pc, pd); + pd -= es; + } + pc -= es; + } + if (pb > pc) + break; + swap(pb, pc); + swap_cnt = 1; + pb += es; + pc -= es; + } + if (swap_cnt == 0) { /* Switch to insertion sort */ + for (pm = a + es; pm < (char *) a + n * es; pm += es) + for (pl = pm; pl > (char *) a && cmp(pl - es, pl) > 0; + pl -= es) + swap(pl, pl - es); + return; + } + + pn = a + n * es; + r = min(pa - (char *)a, pb - pa); + vecswap(a, pb - r, r); + r = min(pd - pc, pn - pd - es); + vecswap(pb, pn - r, r); + if ((r = pb - pa) > es) + qsort(a, r / es, es, cmp); + if ((r = pd - pc) > es) { + /* Iterate rather than recurse to save stack space */ + a = pn - r; + n = r / es; + goto loop; + } +/* qsort(pn - r, r / es, es, cmp);*/ +} diff --git a/bsd/kern/spl.c b/bsd/kern/spl.c new file mode 100644 index 000000000..4ab15677d --- /dev/null +++ b/bsd/kern/spl.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +unsigned +sploff( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "sploff()"); + return(0); +} + +unsigned +splhigh( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splhigh()"); + return(0); +} + +unsigned +splsched( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splsched()"); + return(0); +} + +unsigned +splclock ( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splclock()"); + return(0); +} + +unsigned +splpower ( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splpower()"); + return(0); +} + +unsigned +splvm( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splvm()"); + return(0); +} + +unsigned +splbio ( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splbio()"); + return(0); +} + +unsigned +splimp( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splimp()"); + return(0); +} + +unsigned +spltty(void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "spltty()"); + return(0); +} + +unsigned +splnet( + void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splnet()"); + return(0); +} + +unsigned +splsoftclock(void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splsoftclock()"); + return(0); +} + +void +spllo(void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "spllo()"); + return; +} + +void +spl0(void) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "spl0()"); + return; +} + +void +spln(unsigned t) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "spln()"); + return; +} + +void +splx(unsigned l) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splx()"); + return; +} + +void +splon(unsigned l) +{ + if(thread_funnel_get() == THR_FUNNEL_NULL) + panic("%s not under funnel", "splon()"); + return; +} diff --git a/bsd/kern/subr_disk.c b/bsd/kern/subr_disk.c new file mode 100644 index 000000000..d40c94295 --- /dev/null +++ b/bsd/kern/subr_disk.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94 + */ + +#include +#include +#include +#include +#include + +/* + * Seek sort for disks. We depend on the driver which calls us using b_resid + * as the current cylinder number. + * + * The argument ap structure holds a b_actf activity chain pointer on which we + * keep two queues, sorted in ascending cylinder order. The first queue holds + * those requests which are positioned after the current cylinder (in the first + * request); the second holds requests which came in after their cylinder number + * was passed. Thus we implement a one way scan, retracting after reaching the + * end of the drive to the first request on the second queue, at which time it + * becomes the first queue. + * + * A one-way scan is natural because of the way UNIX read-ahead blocks are + * allocated. + */ + +/* + * For portability with historic industry practice, the + * cylinder number has to be maintained in the `b_resid' + * field. + */ +#define b_cylinder b_resid + +void +disksort(ap, bp) + register struct buf *ap, *bp; +{ + register struct buf *bq; + + /* If the queue is empty, then it's easy. */ + if (ap->b_actf == NULL) { + bp->b_actf = NULL; + ap->b_actf = bp; + return; + } + + /* + * If we lie after the first (currently active) request, then we + * must locate the second request list and add ourselves to it. + */ + bq = ap->b_actf; + if (bp->b_cylinder < bq->b_cylinder) { + while (bq->b_actf) { + /* + * Check for an ``inversion'' in the normally ascending + * cylinder numbers, indicating the start of the second + * request list. + */ + if (bq->b_actf->b_cylinder < bq->b_cylinder) { + /* + * Search the second request list for the first + * request at a larger cylinder number. We go + * before that; if there is no such request, we + * go at end. + */ + do { + if (bp->b_cylinder < + bq->b_actf->b_cylinder) + goto insert; + if (bp->b_cylinder == + bq->b_actf->b_cylinder && + bp->b_blkno < bq->b_actf->b_blkno) + goto insert; + bq = bq->b_actf; + } while (bq->b_actf); + goto insert; /* after last */ + } + bq = bq->b_actf; + } + /* + * No inversions... we will go after the last, and + * be the first request in the second request list. + */ + goto insert; + } + /* + * Request is at/after the current request... + * sort in the first request list. + */ + while (bq->b_actf) { + /* + * We want to go after the current request if there is an + * inversion after it (i.e. it is the end of the first + * request list), or if the next request is a larger cylinder + * than our request. + */ + if (bq->b_actf->b_cylinder < bq->b_cylinder || + bp->b_cylinder < bq->b_actf->b_cylinder || + (bp->b_cylinder == bq->b_actf->b_cylinder && + bp->b_blkno < bq->b_actf->b_blkno)) + goto insert; + bq = bq->b_actf; + } + /* + * Neither a second list nor a larger request... we go at the end of + * the first list, which is the same as the end of the whole schebang. + */ +insert: bp->b_actf = bq->b_actf; + bq->b_actf = bp; +} + +/* encoding of disk minor numbers, should be elsewhere... */ +#define dkunit(dev) (minor(dev) >> 3) +#define dkpart(dev) (minor(dev) & 07) +#define dkminor(unit, part) (((unit) << 3) | (part)) + +/* + * Compute checksum for disk label. + */ +u_int +dkcksum(lp) + register struct disklabel *lp; +{ + register u_short *start, *end; + register u_short sum = 0; + + start = (u_short *)lp; + end = (u_short *)&lp->d_partitions[lp->d_npartitions]; + while (start < end) + sum ^= *start++; + return (sum); +} + +/* + * Disk error is the preface to plaintive error messages + * about failing disk transfers. It prints messages of the form + +hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d) + + * if the offset of the error in the transfer and a disk label + * are both available. blkdone should be -1 if the position of the error + * is unknown; the disklabel pointer may be null from drivers that have not + * been converted to use them. The message is printed with printf + * if pri is LOG_PRINTF, otherwise it uses log at the specified priority. + * The message should be completed (with at least a newline) with printf + * or addlog, respectively. There is no trailing space. + */ +void +diskerr(bp, dname, what, pri, blkdone, lp) + register struct buf *bp; + char *dname, *what; + int pri, blkdone; + register struct disklabel *lp; +{ + int unit = dkunit(bp->b_dev), part = dkpart(bp->b_dev); + register void (*pr) __P((const char *, ...)); + char partname = 'a' + part; + int sn; + + if (pri != LOG_PRINTF) { + log(pri, ""); + pr = addlog; + } else + pr = printf; + (*pr)("%s%d%c: %s %sing fsbn ", dname, unit, partname, what, + bp->b_flags & B_READ ? "read" : "writ"); + sn = bp->b_blkno; + if (bp->b_bcount <= DEV_BSIZE) + (*pr)("%d", sn); + else { + if (blkdone >= 0) { + sn += blkdone; + (*pr)("%d of ", sn); + } + (*pr)("%d-%d", bp->b_blkno, + bp->b_blkno + (bp->b_bcount - 1) / DEV_BSIZE); + } + if (lp && (blkdone >= 0 || bp->b_bcount <= lp->d_secsize)) { +#ifdef tahoe + sn *= DEV_BSIZE / lp->d_secsize; /* XXX */ +#endif + sn += lp->d_partitions[part].p_offset; + (*pr)(" (%s%d bn %d; cn %d", dname, unit, sn, + sn / lp->d_secpercyl); + sn %= lp->d_secpercyl; + (*pr)(" tn %d sn %d)", sn / lp->d_nsectors, sn % lp->d_nsectors); + } +} diff --git a/bsd/kern/subr_log.c b/bsd/kern/subr_log.c new file mode 100644 index 000000000..792975a0f --- /dev/null +++ b/bsd/kern/subr_log.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)subr_log.c 8.3 (Berkeley) 2/14/95 + */ + +/* + * Error log buffer for kernel printf's. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if 0 /* [ */ +#include +#include +#endif /* 0 ] */ + +#define LOG_RDPRI (PZERO + 1) + +#define LOG_NBIO 0x02 +#define LOG_ASYNC 0x04 +#define LOG_RDWAIT 0x08 + +struct logsoftc { + int sc_state; /* see above for possibilities */ + struct selinfo sc_selp; /* thread waiting for select */ + int sc_pgid; /* process/group for async I/O */ +} logsoftc; + +int log_open; /* also used in log() */ +struct msgbuf temp_msgbuf; +struct msgbuf *msgbufp; +static int _logentrypend = 0; + +/* + * Serialize log access. Note that the log can be written at interrupt level, + * so any log manipulations that can be done from, or affect, another processor + * at interrupt level must be guarded with a spin lock. + */ +decl_simple_lock_data(,log_lock); /* stop races dead in their tracks */ +#define LOG_LOCK() simple_lock(&log_lock) +#define LOG_UNLOCK() simple_unlock(&log_lock) +#define LOG_LOCK_INIT() simple_lock_init(&log_lock) + +/*ARGSUSED*/ +logopen(dev, flags, mode, p) + dev_t dev; + int flags, mode; + struct proc *p; +{ + unix_master(); /* for pg_id, sigh */ + LOG_LOCK(); + if (log_open) { + LOG_UNLOCK(); + unix_release(); + return (EBUSY); + } + log_open = 1; + logsoftc.sc_pgid = p->p_pid; /* signal process only */ + /* + * Potential race here with putchar() but since putchar should be + * called by autoconf, msg_magic should be initialized by the time + * we get here. + */ + if (msgbufp->msg_magic != MSG_MAGIC) { + register int i; + + msgbufp->msg_magic = MSG_MAGIC; + msgbufp->msg_bufx = msgbufp->msg_bufr = 0; + for (i=0; i < MSG_BSIZE; i++) + msgbufp->msg_bufc[i] = 0; + } + LOG_UNLOCK(); + unix_release(); + return (0); +} + +/*ARGSUSED*/ +int +logclose(dev, flag) + dev_t dev; +{ + int oldpri; + LOG_LOCK(); + log_open = 0; + selwakeup(&logsoftc.sc_selp); + oldpri = splhigh(); + selthreadclear(&logsoftc.sc_selp); + splx(oldpri); + LOG_UNLOCK(); + return (0); +} + +/*ARGSUSED*/ +int +logread(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + register long l; + register int s; + int error = 0; + + s = splhigh(); + while (msgbufp->msg_bufr == msgbufp->msg_bufx) { + if (flag & IO_NDELAY) { + splx(s); + return (EWOULDBLOCK); + } + if (logsoftc.sc_state & LOG_NBIO) { + splx(s); + return (EWOULDBLOCK); + } + logsoftc.sc_state |= LOG_RDWAIT; + if (error = tsleep((caddr_t)msgbufp, LOG_RDPRI | PCATCH, + "klog", 0)) { + splx(s); + return (error); + } + } + splx(s); + logsoftc.sc_state &= ~LOG_RDWAIT; + + while (uio->uio_resid > 0) { + l = msgbufp->msg_bufx - msgbufp->msg_bufr; + if (l < 0) + l = MSG_BSIZE - msgbufp->msg_bufr; + l = min(l, uio->uio_resid); + if (l == 0) + break; + error = uiomove((caddr_t)&msgbufp->msg_bufc[msgbufp->msg_bufr], + (int)l, uio); + if (error) + break; + msgbufp->msg_bufr += l; + if (msgbufp->msg_bufr < 0 || msgbufp->msg_bufr >= MSG_BSIZE) + msgbufp->msg_bufr = 0; + } + return (error); +} + +/*ARGSUSED*/ +int +logselect(dev, rw, p) + dev_t dev; + int rw; + struct proc *p; +{ + int s = splhigh(); + + switch (rw) { + + case FREAD: + if (msgbufp->msg_bufr != msgbufp->msg_bufx) { + splx(s); + return (1); + } + selrecord(p, &logsoftc.sc_selp); + break; + } + splx(s); + return (0); +} + +void +logwakeup() +{ + struct proc *p; + int pgid; + boolean_t funnel_state; + + if (!log_open) + return; + funnel_state = thread_funnel_set(kernel_flock, TRUE); + selwakeup(&logsoftc.sc_selp); + if (logsoftc.sc_state & LOG_ASYNC) { + unix_master(); + LOG_LOCK(); + pgid = logsoftc.sc_pgid; + LOG_UNLOCK(); + if (pgid < 0) + gsignal(-pgid, SIGIO); + else if (p = pfind(pgid)) + psignal(p, SIGIO); + unix_release(); + } + if (logsoftc.sc_state & LOG_RDWAIT) { + wakeup((caddr_t)msgbufp); + logsoftc.sc_state &= ~LOG_RDWAIT; + } + (void) thread_funnel_set(kernel_flock, funnel_state); +} + +void +klogwakeup() +{ + + if (_logentrypend) { + _logentrypend = 0; + logwakeup(); + } +} + +/*ARGSUSED*/ +int +logioctl(com, data, flag) + caddr_t data; +{ + long l; + int s; + + switch (com) { + + /* return number of characters immediately available */ + case FIONREAD: + s = splhigh(); + l = msgbufp->msg_bufx - msgbufp->msg_bufr; + splx(s); + if (l < 0) + l += MSG_BSIZE; + *(off_t *)data = l; + break; + + case FIONBIO: + if (*(int *)data) + logsoftc.sc_state |= LOG_NBIO; + else + logsoftc.sc_state &= ~LOG_NBIO; + break; + + case FIOASYNC: + if (*(int *)data) + logsoftc.sc_state |= LOG_ASYNC; + else + logsoftc.sc_state &= ~LOG_ASYNC; + break; + + case TIOCSPGRP: + LOG_LOCK(); + logsoftc.sc_pgid = *(int *)data; + LOG_UNLOCK(); + break; + + case TIOCGPGRP: + LOG_LOCK(); + *(int *)data = logsoftc.sc_pgid; + LOG_UNLOCK(); + break; + + default: + return (-1); + } + return (0); +} + +void +log_init() +{ + msgbufp = &temp_msgbuf; + LOG_LOCK_INIT(); +} + +void +log_putc(char c) +{ + register struct msgbuf *mbp; + + if (msgbufp == NULL) + msgbufp =&temp_msgbuf; + + mbp = msgbufp; + if (mbp-> msg_magic != MSG_MAGIC) { + register int i; + + mbp->msg_magic = MSG_MAGIC; + mbp->msg_bufx = mbp->msg_bufr = 0; + for (i=0; i < MSG_BSIZE; i++) + mbp->msg_bufc[i] = 0; + } + mbp->msg_bufc[mbp->msg_bufx++] = c; + _logentrypend = 1; + if (mbp->msg_bufx < 0 || mbp->msg_bufx >= MSG_BSIZE) + mbp->msg_bufx = 0; +} diff --git a/bsd/kern/subr_prf.c b/bsd/kern/subr_prf.c new file mode 100644 index 000000000..814ac7056 --- /dev/null +++ b/bsd/kern/subr_prf.c @@ -0,0 +1,902 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1986, 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)subr_prf.c 8.4 (Berkeley) 5/4/95 + */ +/* HISTORY + * 22-Sep-1997 Umesh Vaishampayan (umeshv@apple.com) + * Cleaned up m68k crud. Fixed vlog() to do logpri() for ppc, too. + * + * 17-July-97 Umesh Vaishampayan (umeshv@apple.com) + * Eliminated multiple definition of constty which is defined + * in bsd/dev/XXX/cons.c + * + * 26-MAR-1997 Umesh Vaishampayan (umeshv@NeXT.com + * Fixed tharshing format in many functions. Cleanup. + * + * 17-Jun-1995 Mac Gillon (mgillon) at NeXT + * Purged old history + * New version based on 4.4 and NS3.3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* for cpu_number() */ +#include +#include + +struct snprintf_arg { + char *str; + size_t remain; +}; + + +/* + * In case console is off, + * panicstr contains argument to last + * call to panic. + */ +extern const char *panicstr; + +extern cnputc(); /* standard console putc */ +int (*v_putc)() = cnputc; /* routine to putc on virtual console */ + +extern struct tty cons; /* standard console tty */ +extern struct tty *constty; /* pointer to console "window" tty */ + +/* + * Record cpu that panic'd and lock around panic data + */ + +static void puts(const char *s, int flags, struct tty *ttyp); +static void printn(u_long n, int b, int flags, struct tty *ttyp, int zf, int fld_size); + +/* MP printf stuff */ +decl_simple_lock_data(,printf_lock) +#if NCPUS > 1 +boolean_t new_printf_cpu_number; /* do we need to output who we are */ +#endif + +extern void logwakeup(); +extern void halt_cpu(); +extern boot(); +int putchar(); + +static void +snprintf_func(int ch, void *arg); + + + +/* + * Uprintf prints to the controlling terminal for the current process. + * It may block if the tty queue is overfull. No message is printed if + * the queue does not clear in a reasonable time. + */ +void +uprintf(const char *fmt, ...) +{ + register struct proc *p = current_proc(); + va_list ap; + + unix_master(); /* sessions, sigh */ + if (p->p_flag & P_CONTROLT && p->p_session->s_ttyvp) { + va_start(ap, fmt); + prf(fmt, ap, TOTTY, (struct tty *)p->p_session->s_ttyvp); + va_end(ap); + } + unix_release(); +} + +tpr_t +tprintf_open(p) + register struct proc *p; +{ + unix_master(); /* sessions, sigh */ + if (p->p_flag & P_CONTROLT && p->p_session->s_ttyvp) { + SESSHOLD(p->p_session); + unix_release(); + return ((tpr_t) p->p_session); + } + unix_release(); + return ((tpr_t) NULL); +} + +void +tprintf_close(sess) + tpr_t sess; +{ + unix_master(); /* sessions, sigh */ + if (sess) + SESSRELE((struct session *) sess); + unix_release(); +} + +/* + * tprintf prints on the controlling terminal associated + * with the given session. + */ +void +tprintf(tpr_t tpr, const char *fmt, ...) +{ + register struct session *sess = (struct session *)tpr; + struct tty *tp = NULL; + int flags = TOLOG; + va_list ap; + + logpri(LOG_INFO); + unix_master(); /* sessions, sigh */ + if (sess && sess->s_ttyvp && ttycheckoutq(sess->s_ttyp, 0)) { + flags |= TOTTY; + tp = sess->s_ttyp; + } + if (tp != NULL) { + va_start(ap, fmt); + prf(fmt, ap, TOTTY, tp); + va_end(ap); + } + unix_release(); + logwakeup(); +} + +/* + * Ttyprintf displays a message on a tty; it should be used only by + * the tty driver, or anything that knows the underlying tty will not + * be revoke(2)'d away. Other callers should use tprintf. + */ +void +ttyprintf(struct tty *tp, const char *fmt, ...) +{ + va_list ap; + + if (tp != NULL) { + va_start(ap, fmt); + prf(fmt, ap, TOTTY, tp); + va_end(ap); + } +} + +extern int log_open; + + +void +logpri(level) + int level; +{ + + putchar('<', TOLOG, (struct tty *)0); + printn((u_long)level, 10, TOLOG, (struct tty *)0, 0, 0); + putchar('>', TOLOG, (struct tty *)0); +} + +void +addlog(const char *fmt, ...) +{ + register s = splhigh(); + va_list ap; + + va_start(ap, fmt); + prf(fmt, ap, TOLOG, (struct tty *)0); + splx(s); + if (!log_open) + prf(fmt, ap, TOCONS, (struct tty *)0); + va_end(ap); + logwakeup(); +} +void _printf(int flags, struct tty *ttyp, const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + prf(format, ap, flags, ttyp); + va_end(ap); +} + +int prf(const char *fmt, va_list ap, int flags, struct tty *ttyp) +{ + register int b, c, i; + char *s; + int any; + int zf = 0, fld_size; + +#if NCPUS > 1 + int cpun = cpu_number(); + + if(ttyp == 0) { + simple_lock(&printf_lock); + } else + TTY_LOCK(ttyp); + + if (cpun != master_cpu) + new_printf_cpu_number = TRUE; + + if (new_printf_cpu_number) { + putchar('{', flags, ttyp); + printn((u_long)cpun, 10, flags, ttyp, 0, 0); + putchar('}', flags, ttyp); + } +#endif /* NCPUS > 1 */ +loop: + while ((c = *fmt++) != '%') { + if (c == '\0') { +#if NCPUS > 1 + if(ttyp == 0) { + simple_unlock(&printf_lock); + } else + TTY_UNLOCK(ttyp); +#endif + return 0; + } + putchar(c, flags, ttyp); + } +again: + zf = 0; + fld_size = 0; + c = *fmt++; + if (c == '0') + zf = '0'; + fld_size = 0; + for (;c <= '9' && c >= '0'; c = *fmt++) + fld_size = fld_size * 10 + c - '0'; + + /* THIS CODE IS VAX DEPENDENT IN HANDLING %l? AND %c */ + switch (c) { + + case 'l': + goto again; + case 'x': case 'X': + b = 16; + goto number; + case 'd': case 'D': + case 'u': /* what a joke */ + b = 10; + goto number; + case 'o': case 'O': + b = 8; +number: + printn(va_arg(ap, unsigned), b, flags, ttyp, zf, fld_size); + break; + case 'c': + b = va_arg(ap, unsigned); +#if BYTE_ORDER == LITTLE_ENDIAN + for (i = 24; i >= 0; i -= 8) + if (c = (b >> i) & 0x7f) + putchar(c, flags, ttyp); +#endif +#if BYTE_ORDER == BIG_ENDIAN + if ((c = (b & 0x7f))) + putchar(c, flags, ttyp); +#endif + break; + case 'b': + b = va_arg(ap, unsigned); + s = va_arg(ap, char *); + printn((u_long)b, *s++, flags, ttyp, 0, 0); + any = 0; + if (b) { + while ((i = *s++)) { + if (*s <= 32) { + register int j; + + if (any++) + putchar(',', flags, ttyp); + j = *s++ ; + for (; (c = *s) > 32 ; s++) + putchar(c, flags, ttyp); + printn( (u_long)( (b >> (j-1)) & + ( (2 << (i-j)) -1)), + 8, flags, ttyp, 0, 0); + } else if (b & (1 << (i-1))) { + putchar(any? ',' : '<', flags, ttyp); + any = 1; + for (; (c = *s) > 32; s++) + putchar(c, flags, ttyp); + } else + for (; *s > 32; s++) + ; + } + putchar('>', flags, ttyp); + } + break; + + case 's': + s = va_arg(ap, char *); +#ifdef DEBUG + if (fld_size) { + while (fld_size-- > 0) + putchar((c = *s++)? c : '_', flags, ttyp); + } else { + while ((c = *s++)) + putchar(c, flags, ttyp); + } +#else + while (c = *s++) + putchar(c, flags, ttyp); +#endif + break; + + case '%': + putchar('%', flags, ttyp); + goto loop; + case 'C': + b = va_arg(ap, unsigned); +#if BYTE_ORDER == LITTLE_ENDIAN + for (i = 24; i >= 0; i -= 8) + if (c = (b >> i) & 0x7f) + putchar(c, flags, ttyp); +#endif +#if BYTE_ORDER == BIG_ENDIAN + if ((c = (b & 0x7f))) + putchar(c, flags, ttyp); +#endif + + case 'r': + case 'R': + b = va_arg(ap, unsigned); + s = va_arg(ap, char *); + if (c == 'R') { + puts("0x", flags, ttyp); + printn((u_long)b, 16, flags, ttyp, 0, 0); + } + any = 0; + if (c == 'r' || b) { + register struct reg_desc *rd; + register struct reg_values *rv; + unsigned field; + + putchar('<', flags, ttyp); + for (rd = (struct reg_desc *)s; rd->rd_mask; rd++) { + field = b & rd->rd_mask; + field = (rd->rd_shift > 0) + ? field << rd->rd_shift + : field >> -rd->rd_shift; + if (any && + (rd->rd_format || rd->rd_values + || (rd->rd_name && field) + ) + ) + putchar(',', flags, ttyp); + if (rd->rd_name) { + if (rd->rd_format || rd->rd_values + || field) { + puts(rd->rd_name, flags, ttyp); + any = 1; + } + if (rd->rd_format || rd->rd_values) { + putchar('=', flags, ttyp); + any = 1; + } + } + if (rd->rd_format) { + _printf(flags, ttyp, rd->rd_format, + field); + any = 1; + if (rd->rd_values) + putchar(':', flags, ttyp); + } + if (rd->rd_values) { + any = 1; + for (rv = rd->rd_values; + rv->rv_name; + rv++) { + if (field == rv->rv_value) { + puts(rv->rv_name, flags, + ttyp); + break; + } + } + if (rv->rv_name == NULL) + puts("???", flags, ttyp); + } + } + putchar('>', flags, ttyp); + } + break; + + case 'n': + case 'N': + { + register struct reg_values *rv; + + b = va_arg(ap, unsigned); + s = va_arg(ap,char *); + for (rv = (struct reg_values *)s; rv->rv_name; rv++) { + if (b == rv->rv_value) { + puts(rv->rv_name, flags, ttyp); + break; + } + } + if (rv->rv_name == NULL) + puts("???", flags, ttyp); + if (c == 'N' || rv->rv_name == NULL) { + putchar(':', flags, ttyp); + printn((u_long)b, 10, flags, ttyp, 0, 0); + } + } + break; + } + goto loop; +} + +static void puts(const char *s, int flags, struct tty *ttyp) +{ + register char c; + + while ((c = *s++)) + putchar(c, flags, ttyp); +} + +/* + * Printn prints a number n in base b. + * We don't use recursion to avoid deep kernel stacks. + */ +static void printn(u_long n, int b, int flags, struct tty *ttyp, int zf, int fld_size) +{ + char prbuf[11]; + register char *cp; + + if (b == 10 && (int)n < 0) { + putchar('-', flags, ttyp); + n = (unsigned)(-(int)n); + } + cp = prbuf; + do { + *cp++ = "0123456789abcdef"[n%b]; + n /= b; + } while (n); + if (fld_size) { + for (fld_size -= cp - prbuf; fld_size > 0; fld_size--) + if (zf) + putchar('0', flags, ttyp); + else + putchar(' ', flags, ttyp); + } + do + putchar(*--cp, flags, ttyp); + while (cp > prbuf); +} + + + +/* + * Warn that a system table is full. + */ +void tablefull(const char *tab) +{ + log(LOG_ERR, "%s: table is full\n", tab); +} + +/* + * Print a character on console or users terminal. + * If destination is console then the last MSGBUFS characters + * are saved in msgbuf for inspection later. + */ +/*ARGSUSED*/ +int +putchar(c, flags, tp) + register int c; + struct tty *tp; +{ + register struct msgbuf *mbp; + char **sp = (char**) tp; + + if (panicstr) + constty = 0; + if ((flags & TOCONS) && tp == NULL && constty) { + tp = constty; + flags |= TOTTY; + } + if ((flags & TOTTY) && tp && tputchar(c, tp) < 0 && + (flags & TOCONS) && tp == constty) + constty = 0; + if ((flags & TOLOG) && c != '\0' && c != '\r' && c != 0177) + log_putc(c); + if ((flags & TOCONS) && constty == 0 && c != '\0') + (*v_putc)(c); + if (flags & TOSTR) { + **sp = c; + (*sp)++; + } + return 0; +} + + + +/* + * Scaled down version of vsprintf(3). + */ +int +vsprintf(char *buf, const char *cfmt, va_list ap) +{ + int retval; + + retval = kvprintf(cfmt, NULL, (void *)buf, 10, ap); + buf[retval] = '\0'; + return retval; +} + +/* + * Scaled down version of snprintf(3). + */ +int +snprintf(char *str, size_t size, const char *format, ...) +{ + int retval; + va_list ap; + + va_start(ap, format); + retval = vsnprintf(str, size, format, ap); + va_end(ap); + return(retval); +} + +/* + * Scaled down version of vsnprintf(3). + */ +int +vsnprintf(char *str, size_t size, const char *format, va_list ap) +{ + struct snprintf_arg info; + int retval; + + info.str = str; + info.remain = size; + retval = kvprintf(format, snprintf_func, &info, 10, ap); + if (info.remain >= 1) + *info.str++ = '\0'; + return retval; +} + +static void +snprintf_func(int ch, void *arg) +{ + struct snprintf_arg *const info = arg; + + if (info->remain >= 2) { + *info->str++ = ch; + info->remain--; + } +} + +/* + * Put a number (base <= 16) in a buffer in reverse order; return an + * optional length and a pointer to the NULL terminated (preceded?) + * buffer. + */ +static char * +ksprintn(ul, base, lenp) + register u_long ul; + register int base, *lenp; +{ /* A long in base 8, plus NULL. */ + static char buf[sizeof(long) * NBBY / 3 + 2]; + register char *p; + + p = buf; + do { + *++p = hex2ascii(ul % base); + } while (ul /= base); + if (lenp) + *lenp = p - buf; + return (p); +} + +/* + * Scaled down version of printf(3). + * + * Two additional formats: + * + * The format %b is supported to decode error registers. + * Its usage is: + * + * printf("reg=%b\n", regval, "*"); + * + * where is the output base expressed as a control character, e.g. + * \10 gives octal; \20 gives hex. Each arg is a sequence of characters, + * the first of which gives the bit number to be inspected (origin 1), and + * the next characters (up to a control character, i.e. a character <= 32), + * give the name of the register. Thus: + * + * kvprintf("reg=%b\n", 3, "\10\2BITTWO\1BITONE\n"); + * + * would produce output: + * + * reg=3 + * + * XXX: %D -- Hexdump, takes pointer and separator string: + * ("%6D", ptr, ":") -> XX:XX:XX:XX:XX:XX + * ("%*D", len, ptr, " " -> XX XX XX XX ... + */ +int +kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_list ap) +{ +#define PCHAR(c) {int cc=(c); if (func) (*func)(cc,arg); else *d++ = cc; retval++; } + char *p, *q, *d; + u_char *up; + int ch, n; + u_long ul; + int base, lflag, tmp, width, ladjust, sharpflag, neg, sign, dot; + int dwidth; + char padc; + int retval = 0; + + if (!func) + d = (char *) arg; + else + d = NULL; + + if (fmt == NULL) + fmt = "(fmt null)\n"; + + if (radix < 2 || radix > 36) + radix = 10; + + for (;;) { + padc = ' '; + width = 0; + while ((ch = (u_char)*fmt++) != '%') { + if (ch == '\0') + return retval; + PCHAR(ch); + } + lflag = 0; ladjust = 0; sharpflag = 0; neg = 0; + sign = 0; dot = 0; dwidth = 0; +reswitch: switch (ch = (u_char)*fmt++) { + case '.': + dot = 1; + goto reswitch; + case '#': + sharpflag = 1; + goto reswitch; + case '+': + sign = 1; + goto reswitch; + case '-': + ladjust = 1; + goto reswitch; + case '%': + PCHAR(ch); + break; + case '*': + if (!dot) { + width = va_arg(ap, int); + if (width < 0) { + ladjust = !ladjust; + width = -width; + } + } else { + dwidth = va_arg(ap, int); + } + goto reswitch; + case '0': + if (!dot) { + padc = '0'; + goto reswitch; + } + case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + for (n = 0;; ++fmt) { + n = n * 10 + ch - '0'; + ch = *fmt; + if (ch < '0' || ch > '9') + break; + } + if (dot) + dwidth = n; + else + width = n; + goto reswitch; + case 'b': + ul = va_arg(ap, int); + p = va_arg(ap, char *); + for (q = ksprintn(ul, *p++, NULL); *q;) + PCHAR(*q--); + + if (!ul) + break; + + for (tmp = 0; *p;) { + n = *p++; + if (ul & (1 << (n - 1))) { + PCHAR(tmp ? ',' : '<'); + for (; (n = *p) > ' '; ++p) + PCHAR(n); + tmp = 1; + } else + for (; *p > ' '; ++p) + continue; + } + if (tmp) + PCHAR('>'); + break; + case 'c': + PCHAR(va_arg(ap, int)); + break; + case 'D': + up = va_arg(ap, u_char *); + p = va_arg(ap, char *); + if (!width) + width = 16; + while(width--) { + PCHAR(hex2ascii(*up >> 4)); + PCHAR(hex2ascii(*up & 0x0f)); + up++; + if (width) + for (q=p;*q;q++) + PCHAR(*q); + } + break; + case 'd': + ul = lflag ? va_arg(ap, long) : va_arg(ap, int); + sign = 1; + base = 10; + goto number; + case 'l': + lflag = 1; + goto reswitch; + case 'o': + ul = lflag ? va_arg(ap, u_long) : va_arg(ap, u_int); + base = 8; + goto nosign; + case 'p': + ul = (uintptr_t)va_arg(ap, void *); + base = 16; + sharpflag = (width == 0); + goto nosign; + case 'n': + case 'r': + ul = lflag ? va_arg(ap, u_long) : + sign ? (u_long)va_arg(ap, int) : va_arg(ap, u_int); + base = radix; + goto number; + case 's': + p = va_arg(ap, char *); + if (p == NULL) + p = "(null)"; + if (!dot) + n = strlen (p); + else + for (n = 0; n < dwidth && p[n]; n++) + continue; + + width -= n; + + if (!ladjust && width > 0) + while (width--) + PCHAR(padc); + while (n--) + PCHAR(*p++); + if (ladjust && width > 0) + while (width--) + PCHAR(padc); + break; + case 'u': + ul = lflag ? va_arg(ap, u_long) : va_arg(ap, u_int); + base = 10; + goto nosign; + case 'x': + ul = lflag ? va_arg(ap, u_long) : va_arg(ap, u_int); + base = 16; + goto nosign; + case 'z': + ul = lflag ? va_arg(ap, u_long) : + sign ? (u_long)va_arg(ap, int) : va_arg(ap, u_int); + base = 16; + goto number; +nosign: sign = 0; +number: if (sign && (long)ul < 0L) { + neg = 1; + ul = -(long)ul; + } + p = ksprintn(ul, base, &tmp); + if (sharpflag && ul != 0) { + if (base == 8) + tmp++; + else if (base == 16) + tmp += 2; + } + if (neg) + tmp++; + + if (!ladjust && width && (width -= tmp) > 0) + while (width--) + PCHAR(padc); + if (neg) + PCHAR('-'); + if (sharpflag && ul != 0) { + if (base == 8) { + PCHAR('0'); + } else if (base == 16) { + PCHAR('0'); + PCHAR('x'); + } + } + + while (*p) + PCHAR(*p--); + + if (ladjust && width && (width -= tmp) > 0) + while (width--) + PCHAR(padc); + + break; + default: + PCHAR('%'); + if (lflag) + PCHAR('l'); + PCHAR(ch); + break; + } + } +#undef PCHAR +} diff --git a/bsd/kern/subr_prof.c b/bsd/kern/subr_prof.c new file mode 100644 index 000000000..ed0455eeb --- /dev/null +++ b/bsd/kern/subr_prof.c @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93 + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#ifdef GPROF +#include +#include +#include +#include + +/* + * Froms is actually a bunch of unsigned shorts indexing tos + */ +struct gmonparam _gmonparam = { GMON_PROF_OFF }; + +kmstartup() +{ + char *cp; + u_long fromssize, tossize; + struct segment_command *sgp; + struct gmonparam *p = &_gmonparam; + + sgp = getsegbyname("__TEXT"); + p->lowpc = (u_long)sgp->vmaddr; + p->highpc = (u_long)(sgp->vmaddr + sgp->vmsize); + + /* + * Round lowpc and highpc to multiples of the density we're using + * so the rest of the scaling (here and in gprof) stays in ints. + */ + p->lowpc = ROUNDDOWN(p->lowpc, HISTFRACTION * sizeof(HISTCOUNTER)); + p->highpc = ROUNDUP(p->highpc, HISTFRACTION * sizeof(HISTCOUNTER)); + p->textsize = p->highpc - p->lowpc; + printf("Profiling kernel, textsize=%d [0x%08x..0x%08x]\n", + p->textsize, p->lowpc, p->highpc); + p->kcountsize = p->textsize / HISTFRACTION; + p->hashfraction = HASHFRACTION; + p->fromssize = p->textsize / HASHFRACTION; + p->tolimit = p->textsize * ARCDENSITY / 100; + if (p->tolimit < MINARCS) + p->tolimit = MINARCS; + else if (p->tolimit > MAXARCS) + p->tolimit = MAXARCS; + p->tossize = p->tolimit * sizeof(struct tostruct); + /* Why not use MALLOC with M_GPROF ? */ + cp = (char *)kalloc(p->kcountsize + p->fromssize + p->tossize); + if (cp == 0) { + printf("No memory for profiling.\n"); + return; + } + bzero(cp, p->kcountsize + p->tossize + p->fromssize); + p->tos = (struct tostruct *)cp; + cp += p->tossize; + p->kcount = (u_short *)cp; + cp += p->kcountsize; + p->froms = (u_short *)cp; +} + +/* + * Return kernel profiling information. + */ +sysctl_doprof(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + struct gmonparam *gp = &_gmonparam; + int error; + + /* all sysctl names at this level are terminal */ + if (namelen != 1) + return (ENOTDIR); /* overloaded */ + + switch (name[0]) { + case GPROF_STATE: + error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state); + if (error) + return (error); + if (gp->state == GMON_PROF_OFF) + stopprofclock(kernproc); + else + startprofclock(kernproc); + return (0); + case GPROF_COUNT: + return (sysctl_struct(oldp, oldlenp, newp, newlen, + gp->kcount, gp->kcountsize)); + case GPROF_FROMS: + return (sysctl_struct(oldp, oldlenp, newp, newlen, + gp->froms, gp->fromssize)); + case GPROF_TOS: + return (sysctl_struct(oldp, oldlenp, newp, newlen, + gp->tos, gp->tossize)); + case GPROF_GMONPARAM: + return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp)); + default: + return (EOPNOTSUPP); + } + /* NOTREACHED */ +} + + +/* + * mcount() called with interrupts disabled. + */ +void +mcount( + register u_long frompc, + register u_long selfpc +) +{ + unsigned short *frompcindex; + register struct tostruct *top, *prevtop; + struct gmonparam *p = &_gmonparam; + register long toindex; + MCOUNT_INIT; + + /* + * check that we are profiling + * and that we aren't recursively invoked. + */ + if (p->state != GMON_PROF_ON) + return; + + MCOUNT_ENTER; + + /* + * check that frompcindex is a reasonable pc value. + * for example: signal catchers get called from the stack, + * not from text space. too bad. + */ + frompc -= p->lowpc; + if (frompc > p->textsize) + goto done; + + frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))]; + toindex = *frompcindex; + if (toindex == 0) { + /* + * first time traversing this arc + */ + toindex = ++p->tos[0].link; + if (toindex >= p->tolimit) { + /* halt further profiling */ + goto overflow; + } + *frompcindex = toindex; + top = &p->tos[toindex]; + top->selfpc = selfpc; + top->count = 1; + top->link = 0; + goto done; + } + top = &p->tos[toindex]; + if (top->selfpc == selfpc) { + /* + * arc at front of chain; usual case. + */ + top->count++; + goto done; + } + /* + * have to go looking down chain for it. + * top points to what we are looking at, + * prevtop points to previous top. + * we know it is not at the head of the chain. + */ + for (; /* goto done */; ) { + if (top->link == 0) { + /* + * top is end of the chain and none of the chain + * had top->selfpc == selfpc. + * so we allocate a new tostruct + * and link it to the head of the chain. + */ + toindex = ++p->tos[0].link; + if (toindex >= p->tolimit) { + goto overflow; + } + top = &p->tos[toindex]; + top->selfpc = selfpc; + top->count = 1; + top->link = *frompcindex; + *frompcindex = toindex; + goto done; + } + /* + * otherwise, check the next arc on the chain. + */ + prevtop = top; + top = &p->tos[top->link]; + if (top->selfpc == selfpc) { + /* + * there it is. + * increment its count + * move it to the head of the chain. + */ + top->count++; + toindex = prevtop->link; + prevtop->link = top->link; + top->link = *frompcindex; + *frompcindex = toindex; + goto done; + } + + } +done: + MCOUNT_EXIT; + return; + +overflow: + p->state = GMON_PROF_ERROR; + MCOUNT_EXIT; + printf("mcount: tos overflow\n"); + return; +} + +#endif /* GPROF */ + +#if NCPUS > 1 +#define PROFILE_LOCK(x) simple_lock(x) +#define PROFILE_UNLOCK(x) simple_unlock(x) +#else +#define PROFILE_LOCK(x) +#define PROFILE_UNLOCK(x) +#endif + +struct profil_args { + short *bufbase; + u_int bufsize; + u_int pcoffset; + u_int pcscale; +}; +int +profil(p, uap, retval) + struct proc *p; + register struct profil_args *uap; + register_t *retval; +{ + register struct uprof *upp = &p->p_stats->p_prof; + struct uprof *upc, *nupc; + int s; + + if (uap->pcscale > (1 << 16)) + return (EINVAL); + if (uap->pcscale == 0) { + stopprofclock(p); + return (0); + } + + /* Block profile interrupts while changing state. */ + s = splstatclock(); + PROFILE_LOCK(&upp->pr_lock); + upp->pr_base = (caddr_t)uap->bufbase; + upp->pr_size = uap->bufsize; + upp->pr_off = uap->pcoffset; + upp->pr_scale = uap->pcscale; + + /* remove buffers previously allocated with add_profil() */ + for (upc = upp->pr_next; upc; upc = nupc) { + nupc = upc->pr_next; + kfree(upc, sizeof (struct uprof)); + } + + upp->pr_next = 0; + PROFILE_UNLOCK(&upp->pr_lock); + startprofclock(p); + splx(s); + return(0); +} + +struct add_profile_args { + short *bufbase; + u_int bufsize; + u_int pcoffset; + u_int pcscale; +}; +int +add_profil(p, uap, retval) + struct proc *p; + register struct add_profile_args *uap; + register_t *retval; +{ + struct uprof *upp = &p->p_stats->p_prof, *upc; + int s; + + if (upp->pr_scale == 0) + return (0); + s = splstatclock(); + upc = (struct uprof *) kalloc(sizeof (struct uprof)); + upc->pr_base = (caddr_t)uap->bufbase; + upc->pr_size = uap->bufsize; + upc->pr_off = uap->pcoffset; + upc->pr_scale = uap->pcscale; + PROFILE_LOCK(&upp->pr_lock); + upc->pr_next = upp->pr_next; + upp->pr_next = upc; + PROFILE_UNLOCK(&upp->pr_lock); + splx(s); + return(0); +} + +/* + * Scale is a fixed-point number with the binary point 16 bits + * into the value, and is <= 1.0. pc is at most 32 bits, so the + * intermediate result is at most 48 bits. + */ +#define PC_TO_INDEX(pc, prof) \ + ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ + (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) + +/* + * Collect user-level profiling statistics; called on a profiling tick, + * when a process is running in user-mode. We use + * an AST that will vector us to trap() with a context in which copyin + * and copyout will work. Trap will then call addupc_task(). + * + * Note that we may (rarely) not get around to the AST soon enough, and + * lose profile ticks when the next tick overwrites this one, but in this + * case the system is overloaded and the profile is probably already + * inaccurate. + * + * We can afford to take faults here. If the + * update fails, we simply turn off profiling. + */ +void +addupc_task(p, pc, ticks) + register struct proc *p; + register u_long pc; + u_int ticks; +{ + register struct uprof *prof; + register short *cell; + register u_int off; + u_short count; + + /* Testing P_PROFIL may be unnecessary, but is certainly safe. */ + if ((p->p_flag & P_PROFIL) == 0 || ticks == 0) + return; + + for (prof = &p->p_stats->p_prof; prof; prof = prof->pr_next) { + off = PC_TO_INDEX(pc,prof); + cell = (short *)(prof->pr_base + off); + if (cell >= (short *)prof->pr_base && + cell < (short*)(prof->pr_size + (int) prof->pr_base)) { + if (copyin((caddr_t)cell, (caddr_t) &count, sizeof(count)) == 0) { + count += ticks; + if(copyout((caddr_t) &count, (caddr_t)cell, sizeof(count)) == 0) + return; + } + p->p_stats->p_prof.pr_scale = 0; + stopprofclock(p); + break; + } + } +} diff --git a/bsd/kern/subr_xxx.c b/bsd/kern/subr_xxx.c new file mode 100644 index 000000000..503401a6c --- /dev/null +++ b/bsd/kern/subr_xxx.c @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)subr_xxx.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Unsupported device function (e.g. writing to read-only device). + */ +int +enodev() +{ + return (ENODEV); +} + +/* + * Unsupported strategy function. + */ +void +enodev_strat() +{ + return; +} + +/* + * Unconfigured device function; driver not configured. + */ +int +enxio() +{ + return (ENXIO); +} + +/* + * Unsupported ioctl function. + */ +int +enoioctl() +{ + return (ENOTTY); +} + + +/* + * Unsupported system function. + * This is used for an otherwise-reasonable operation + * that is not supported by the current system binary. + */ +int +enosys() +{ + return (ENOSYS); +} + +/* + * Return error for operation not supported + * on a specific object or file type. + */ +int +eopnotsupp() +{ + return (EOPNOTSUPP); +} + +/* + * Generic null operation, always returns success. + */ +int +nullop() +{ + return (0); +} + + +/* + * Null routine; placed in insignificant entries + * in the bdevsw and cdevsw tables. + */ +int +nulldev() +{ + return (0); +} + +/* + * Null system calls. Not invalid, just not configured. + */ +int +errsys() +{ + return(EINVAL); +} + +void +nullsys() +{ +} + +/* + * nonexistent system call-- signal process (may want to handle it) + * flag error if process won't see signal immediately + * Q: should we do that all the time ?? + */ +/* ARGSUSED */ +int +nosys(p, args, retval) + struct proc *p; + void *args; + register_t *retval; +{ + psignal(p, SIGSYS); + return (ENOSYS); +} + +#ifdef GPROF +/* + * Stub routine in case it is ever possible to free space. + */ +void +cfreemem(cp, size) + caddr_t cp; + int size; +{ + printf("freeing %x, size %d\n", cp, size); +} +#endif + + diff --git a/bsd/kern/sys_domain.c b/bsd/kern/sys_domain.c new file mode 100644 index 000000000..1f6cd83af --- /dev/null +++ b/bsd/kern/sys_domain.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + * + */ +/* + * @(#)sys_domain.c 1.0 (6/1/2000) + */ + +#include +#include +#include +#include +#include + + +extern struct protosw eventsw; + +struct domain systemdomain = + { PF_SYSTEM, "system", 0, 0, 0, + &eventsw}; diff --git a/bsd/kern/sys_generic.c b/bsd/kern/sys_generic.c new file mode 100644 index 000000000..b8c6e975e --- /dev/null +++ b/bsd/kern/sys_generic.c @@ -0,0 +1,1266 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)sys_generic.c 8.9 (Berkeley) 2/14/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if KTRACE +#include +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Read system call. + */ +struct read_args { + int fd; + char *cbuf; + u_int nbyte; +}; +/* ARGSUSED */ +read(p, uap, retval) + struct proc *p; + register struct read_args *uap; + register_t *retval; +{ + struct uio auio; + struct iovec aiov; + + aiov.iov_base = (caddr_t)uap->cbuf; + aiov.iov_len = uap->nbyte; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_rw = UIO_READ; + return (rwuio(p, uap->fd, &auio, UIO_READ, retval)); +} + +struct readv_args { + int fd; + struct iovec *iovp; + u_int iovcnt; +}; +readv(p, uap, retval) + struct proc *p; + register struct readv_args *uap; + int *retval; +{ + struct uio auio; + register struct iovec *iov; + int error; + struct iovec aiov[UIO_SMALLIOV]; + + if (uap->iovcnt > UIO_SMALLIOV) { + if (uap->iovcnt > UIO_MAXIOV) + return (EINVAL); + if ((iov = (struct iovec *) + kalloc(sizeof(struct iovec) * (uap->iovcnt))) == 0) + return (ENOMEM); + } else + iov = aiov; + auio.uio_iov = iov; + auio.uio_iovcnt = uap->iovcnt; + auio.uio_rw = UIO_READ; + error = copyin((caddr_t)uap->iovp, (caddr_t)iov, + uap->iovcnt * sizeof (struct iovec)); + if (!error) + error = rwuio(p, uap->fd, &auio, UIO_READ, retval); + if (uap->iovcnt > UIO_SMALLIOV) + kfree(iov, sizeof(struct iovec)*uap->iovcnt); + return (error); +} + +/* + * Write system call + */ +struct write_args { + int fd; + char *cbuf; + u_int nbyte; +}; +write(p, uap, retval) + struct proc *p; + register struct write_args *uap; + int *retval; +{ + struct uio auio; + struct iovec aiov; + + aiov.iov_base = uap->cbuf; + aiov.iov_len = uap->nbyte; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_rw = UIO_WRITE; + return (rwuio(p, uap->fd, &auio, UIO_WRITE, retval)); +} + +struct writev_args { + int fd; + struct iovec *iovp; + u_int iovcnt; +}; +writev(p, uap, retval) + struct proc *p; + register struct writev_args *uap; + int *retval; +{ + struct uio auio; + register struct iovec *iov; + int error; + struct iovec aiov[UIO_SMALLIOV]; + + if (uap->iovcnt > UIO_SMALLIOV) { + if (uap->iovcnt > UIO_MAXIOV) + return (EINVAL); + if ((iov = (struct iovec *) + kalloc(sizeof(struct iovec) * (uap->iovcnt))) == 0) + return (ENOMEM); + } else + iov = aiov; + auio.uio_iov = iov; + auio.uio_iovcnt = uap->iovcnt; + auio.uio_rw = UIO_WRITE; + error = copyin((caddr_t)uap->iovp, (caddr_t)iov, + uap->iovcnt * sizeof (struct iovec)); + if (!error) + error = rwuio(p, uap->fd, &auio, UIO_WRITE, retval); + if (uap->iovcnt > UIO_SMALLIOV) + kfree(iov, sizeof(struct iovec)*uap->iovcnt); + return (error); +} + +rwuio(p, fdes, uio, rw, retval) + struct proc *p; + int fdes; + register struct uio *uio; + enum uio_rw rw; + int *retval; +{ + struct file *fp; + register struct iovec *iov; + int i, count, flag, error; + + if (error = fdgetf(p, fdes, &fp)) + return (error); + + if ((fp->f_flag&(rw==UIO_READ ? FREAD : FWRITE)) == 0) { + return(EBADF); + } + uio->uio_resid = 0; + uio->uio_segflg = UIO_USERSPACE; + uio->uio_procp = p; + iov = uio->uio_iov; + for (i = 0; i < uio->uio_iovcnt; i++) { + if (iov->iov_len < 0) { + return(EINVAL); + } + uio->uio_resid += iov->iov_len; + if (uio->uio_resid < 0) { + return(EINVAL); + } + iov++; + } + count = uio->uio_resid; + if (rw == UIO_READ) { + if (error = (*fp->f_ops->fo_read)(fp, uio, fp->f_cred)) + if (uio->uio_resid != count && (error == ERESTART || + error == EINTR || error == EWOULDBLOCK)) + error = 0; + } else { + if (error = (*fp->f_ops->fo_write)(fp, uio, fp->f_cred)) { + if (uio->uio_resid != count && (error == ERESTART || + error == EINTR || error == EWOULDBLOCK)) + error = 0; + if (error == EPIPE) + psignal(p, SIGPIPE); + } + } + *retval = count - uio->uio_resid; + return(error); +} + +/* + * Ioctl system call + */ +struct ioctl_args { + int fd; + u_long com; + caddr_t data; +}; +/* ARGSUSED */ +ioctl(p, uap, retval) + struct proc *p; + register struct ioctl_args *uap; + register_t *retval; +{ + struct file *fp; + register u_long com; + register int error; + register u_int size; + caddr_t data, memp; + int tmp; +#define STK_PARAMS 128 + char stkbuf[STK_PARAMS]; + + if (error = fdgetf(p, uap->fd, &fp)) + return (error); + + if ((fp->f_flag & (FREAD | FWRITE)) == 0) + return (EBADF); + + /*### LD 6/11/97 Hack Alert: this is to get AppleTalk to work + * while implementing an ATioctl system call + */ +#if NETAT + { + extern int appletalk_inited; + + if (appletalk_inited && ((uap->com & 0x0000FFFF) == 0xff99)) { +#ifdef APPLETALK_DEBUG + kprintf("ioctl: special AppleTalk \n"); +#endif + error = (*fp->f_ops->fo_ioctl)(fp, uap->com, uap->data, p); + return(error); + } + } + +#endif /* NETAT */ + + + switch (com = uap->com) { + case FIONCLEX: + *fdflags(p, uap->fd) &= ~UF_EXCLOSE; + return (0); + case FIOCLEX: + *fdflags(p, uap->fd) |= UF_EXCLOSE; + return (0); + } + + /* + * Interpret high order word to find amount of data to be + * copied to/from the user's address space. + */ + size = IOCPARM_LEN(com); + if (size > IOCPARM_MAX) + return (ENOTTY); + memp = NULL; + if (size > sizeof (stkbuf)) { + if ((memp = (caddr_t)kalloc(size)) == 0) + return(ENOMEM); + data = memp; + } else + data = stkbuf; + if (com&IOC_IN) { + if (size) { + error = copyin(uap->data, data, (u_int)size); + if (error) { + if (memp) + kfree(memp, size); + return (error); + } + } else + *(caddr_t *)data = uap->data; + } else if ((com&IOC_OUT) && size) + /* + * Zero the buffer so the user always + * gets back something deterministic. + */ + bzero(data, size); + else if (com&IOC_VOID) + *(caddr_t *)data = uap->data; + + switch (com) { + + case FIONBIO: + if (tmp = *(int *)data) + fp->f_flag |= FNONBLOCK; + else + fp->f_flag &= ~FNONBLOCK; + error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&tmp, p); + break; + + case FIOASYNC: + if (tmp = *(int *)data) + fp->f_flag |= FASYNC; + else + fp->f_flag &= ~FASYNC; + error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, (caddr_t)&tmp, p); + break; + + case FIOSETOWN: + tmp = *(int *)data; + if (fp->f_type == DTYPE_SOCKET) { + ((struct socket *)fp->f_data)->so_pgid = tmp; + error = 0; + break; + } + if (tmp <= 0) { + tmp = -tmp; + } else { + struct proc *p1 = pfind(tmp); + if (p1 == 0) { + error = ESRCH; + break; + } + tmp = p1->p_pgrp->pg_id; + } + error = (*fp->f_ops->fo_ioctl) + (fp, (int)TIOCSPGRP, (caddr_t)&tmp, p); + break; + + case FIOGETOWN: + if (fp->f_type == DTYPE_SOCKET) { + error = 0; + *(int *)data = ((struct socket *)fp->f_data)->so_pgid; + break; + } + error = (*fp->f_ops->fo_ioctl)(fp, TIOCGPGRP, data, p); + *(int *)data = -*(int *)data; + break; + + default: + error = (*fp->f_ops->fo_ioctl)(fp, com, data, p); + /* + * Copy any data to user, size was + * already set and checked above. + */ + if (error == 0 && (com&IOC_OUT) && size) + error = copyout(data, uap->data, (u_int)size); + break; + } + if (memp) + kfree(memp, size); + return (error); +} + + +int selwait, nselcoll; + +/* + * Select system call. + */ +struct select_args { + int nd; + u_int32_t *in; + u_int32_t *ou; + u_int32_t *ex; + struct timeval *tv; +}; + +extern int selcontinue(int error); +static int selscan( struct proc *p, u_int32_t *ibits, u_int32_t *obits, + int nfd, register_t *retval); + +select(p, uap, retval) + register struct proc *p; + register struct select_args *uap; + register_t *retval; +{ + int s, error = 0, timo; + u_int ni, nw; + thread_act_t th_act; + struct uthread *uth; + struct _select *sel; + int needzerofill = 1; + + th_act = current_act(); + uth = get_bsdthread_info(th_act); + sel = &uth->uu_state.ss_select; + retval = (int *)get_bsduthreadrval(th_act); + *retval = 0; + + if (uap->nd < 0) + return (EINVAL); + + if (uap->nd > p->p_fd->fd_nfiles) + uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */ + + nw = howmany(uap->nd, NFDBITS); + ni = nw * sizeof(fd_mask); + + /* + * if this is the first select by the thread + * allocate the space for bits. + */ + if (sel->nbytes == 0) { + sel->nbytes = 3 * ni; + MALLOC(sel->ibits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK); + MALLOC(sel->obits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK); + bzero((caddr_t)sel->ibits, sel->nbytes); + bzero((caddr_t)sel->obits, sel->nbytes); + needzerofill = 0; + } + + /* + * if the previously allocated space for the bits + * is smaller than what is requested. Reallocate. + */ + if (sel->nbytes < (3 * ni)) { + sel->nbytes = (3 * ni); + FREE(sel->ibits, M_TEMP); + FREE(sel->obits, M_TEMP); + MALLOC(sel->ibits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK); + MALLOC(sel->obits, u_int32_t *, sel->nbytes, M_TEMP, M_WAITOK); + bzero((caddr_t)sel->ibits, sel->nbytes); + bzero((caddr_t)sel->obits, sel->nbytes); + needzerofill = 0; + } + + if (needzerofill) { + bzero((caddr_t)sel->ibits, sel->nbytes); + bzero((caddr_t)sel->obits, sel->nbytes); + } + + /* + * get the bits from the user address space + */ +#define getbits(name, x) \ + do { \ + if (uap->name && (error = copyin((caddr_t)uap->name, \ + (caddr_t)&sel->ibits[(x) * nw], ni))) \ + goto continuation; \ + } while (0) + + getbits(in, 0); + getbits(ou, 1); + getbits(ex, 2); +#undef getbits + + if (uap->tv) { + error = copyin((caddr_t)uap->tv, (caddr_t)&sel->atv, + sizeof (sel->atv)); + if (error) + goto continuation; + if (itimerfix(&sel->atv)) { + error = EINVAL; + goto continuation; + } + s = splhigh(); + timeradd(&sel->atv, &time, &sel->atv); + timo = hzto(&sel->atv); + splx(s); + } else + timo = 0; + sel->poll = timo; +continuation: + selcontinue(error); +} + +int +selcontinue(error) +{ + int s, ncoll, timo; + u_int ni, nw; + thread_act_t th_act; + struct uthread *uth; + struct proc *p; + struct select_args *uap; + int *retval; + struct _select *sel; + + p = current_proc(); + th_act = current_act(); + uap = (struct select_args *)get_bsduthreadarg(th_act); + retval = (int *)get_bsduthreadrval(th_act); + uth = get_bsdthread_info(th_act); + sel = &uth->uu_state.ss_select; + +retry: + if (error != 0) + goto done; + ncoll = nselcoll; + p->p_flag |= P_SELECT; + error = selscan(p, sel->ibits, sel->obits, uap->nd, retval); + if (error || *retval) + goto done; + s = splhigh(); + /* this should be timercmp(&time, &atv, >=) */ + if (uap->tv && (time.tv_sec > sel->atv.tv_sec || + time.tv_sec == sel->atv.tv_sec && time.tv_usec >= sel->atv.tv_usec)) { + splx(s); + goto done; + } + /* + * To effect a poll, the timeout argument should be + * non-nil, pointing to a zero-valued timeval structure. + */ + timo = sel->poll; + + if (uap->tv && (timo == 0)) { + splx(s); + goto done; + } + if ((p->p_flag & P_SELECT) == 0 || nselcoll != ncoll) { + splx(s); + goto retry; + } + p->p_flag &= ~P_SELECT; + +#if 1 /* Use Continuations */ + error = tsleep0((caddr_t)&selwait, PSOCK | PCATCH, "select", timo, selcontinue); + /* NOTREACHED */ +#else + error = tsleep((caddr_t)&selwait, PSOCK | PCATCH, "select", timo); +#endif + splx(s); + if (error == 0) + goto retry; +done: + p->p_flag &= ~P_SELECT; + /* select is not restarted after signals... */ + if (error == ERESTART) + error = EINTR; + if (error == EWOULDBLOCK) + error = 0; + + nw = howmany(uap->nd, NFDBITS); + ni = nw * sizeof(fd_mask); + +#define putbits(name, x) \ + do { \ + if (uap->name && (error2 = copyout((caddr_t)&sel->obits[(x) * nw], \ + (caddr_t)uap->name, ni))) \ + error = error2; \ + } while (0) + + if (error == 0) { + int error2; + + putbits(in, 0); + putbits(ou, 1); + putbits(ex, 2); +#undef putbits + } + +#if defined (__i386__) + return(error); +#else + unix_syscall_return(error); +#endif +} + +static int +selscan(p, ibits, obits, nfd, retval) + struct proc *p; + u_int32_t *ibits, *obits; + int nfd; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register int msk, i, j, fd; + register u_int32_t bits; + struct file *fp; + int n = 0; + static int flag[3] = { FREAD, FWRITE, 0 }; + u_int32_t *iptr, *optr; + u_int nw; + + /* + * Problems when reboot; due to MacOSX signal probs + * in Beaker1C ; verify that the p->p_fd is valid + */ + if (fdp == NULL) { + *retval=0; + return(EIO); + } + + nw = howmany(nfd, NFDBITS); + + for (msk = 0; msk < 3; msk++) { + iptr = (u_int32_t *)&ibits[msk * nw]; + optr = (u_int32_t *)&obits[msk * nw]; + for (i = 0; i < nfd; i += NFDBITS) { + bits = iptr[i/NFDBITS]; + while ((j = ffs(bits)) && (fd = i + --j) < nfd) { + bits &= ~(1 << j); + fp = fdp->fd_ofiles[fd]; + if (fp == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) + return (EBADF); + if (fp->f_ops && (*fp->f_ops->fo_select)(fp, flag[msk], p)) { + optr[fd/NFDBITS] |= (1 << (fd % NFDBITS)); + n++; + } + } + } + } + *retval = n; + return (0); +} + +/*ARGSUSED*/ +seltrue(dev, flag, p) + dev_t dev; + int flag; + struct proc *p; +{ + + return (1); +} + +/* + * Record a select request. + */ +void +selrecord(selector, sip) + struct proc *selector; + struct selinfo *sip; +{ + int oldpri = splhigh(); + thread_t my_thread = current_thread(); + thread_t selthread; + + selthread = sip->si_thread; + + if (selthread == my_thread) { + splx(oldpri); + return; + } + + if (selthread && is_thread_active(selthread) && + get_thread_waitevent(selthread) == (caddr_t)&selwait) { + sip->si_flags |= SI_COLL; + splx(oldpri); + } else { + sip->si_thread = my_thread; + splx(oldpri); + if (selthread) { + /* thread_deallocate(selthread); */ + act_deallocate(getact_thread(selthread)); + } + /* do I need act reference ??? */ + /* thread_reference(sip->si_thread); */ + act_reference(getact_thread(sip->si_thread)); + } + + return; +} + +void +selwakeup(sip) + register struct selinfo *sip; +{ + register thread_t the_thread = (thread_t)sip->si_thread; + int oldpri; + struct proc *p; + thread_act_t th_act; + + if (the_thread == 0) + return; + + if (sip->si_flags & SI_COLL) { + nselcoll++; + sip->si_flags &= ~SI_COLL; + wakeup((caddr_t)&selwait); + } + + oldpri = splhigh(); + + th_act = (thread_act_t)getact_thread(the_thread); + + if (is_thread_active(the_thread)) { + if (get_thread_waitevent(the_thread) == &selwait) + clear_wait(the_thread, THREAD_AWAKENED); + if (p = current_proc()) + p->p_flag &= ~P_SELECT; + } + + /* th_act = (thread_act_t)getact_thread(the_thread); */ + + act_deallocate(th_act); + + sip->si_thread = 0; + + splx(oldpri); +} + +void +selthreadclear(sip) + register struct selinfo *sip; +{ + thread_act_t th_act; + + if (sip->si_thread) { + th_act = (thread_act_t)getact_thread(sip->si_thread); + act_deallocate(th_act); + } +} + + +extern struct eventqelt *evprocdeque(struct proc *p, struct eventqelt *eqp); + +/* + * called upon socket close. deque and free all events for + * the socket + */ +evsofree(struct socket *sp) +{ + struct eventqelt *eqp, *next; + + if (sp == NULL) return; + + for (eqp = sp->so_evlist.tqh_first; eqp != NULL; eqp = next) { + next = eqp->ee_slist.tqe_next; + evprocdeque(eqp->ee_proc, eqp); // remove from proc q if there + TAILQ_REMOVE(&sp->so_evlist, eqp, ee_slist); // remove from socket q + FREE(eqp, M_TEMP); + } +} + + +#define DBG_EVENT 0x10 + +#define DBG_POST 0x10 +#define DBG_WATCH 0x11 +#define DBG_WAIT 0x12 +#define DBG_MOD 0x13 +#define DBG_EWAKEUP 0x14 +#define DBG_ENQUEUE 0x15 +#define DBG_DEQUEUE 0x16 + +#define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST) +#define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH) +#define DBG_MISC_WAIT MISCDBG_CODE(DBG_EVENT,DBG_WAIT) +#define DBG_MISC_MOD MISCDBG_CODE(DBG_EVENT,DBG_MOD) +#define DBG_MISC_EWAKEUP MISCDBG_CODE(DBG_EVENT,DBG_EWAKEUP) +#define DBG_MISC_ENQUEUE MISCDBG_CODE(DBG_EVENT,DBG_ENQUEUE) +#define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE) + + +/* + * enque this event if it's not already queued. wakeup + the proc if we do queue this event to it. + */ +evprocenque(struct eventqelt *eqp) +{ + struct proc *p; + + assert(eqp); + KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_START, eqp, eqp->ee_flags, eqp->ee_eventmask,0,0); + if (eqp->ee_flags & EV_QUEUED) { + KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0); + return; + } + eqp->ee_flags |= EV_QUEUED; + eqp->ee_eventmask = 0; // disarm + p = eqp->ee_proc; + TAILQ_INSERT_TAIL(&p->p_evlist, eqp, ee_plist); + KERNEL_DEBUG(DBG_MISC_EWAKEUP,0,0,0,eqp,0); + wakeup(&p->p_evlist); + KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0); +} + +/* + * given either a sockbuf or a socket run down the + * event list and queue ready events found + */ +postevent(struct socket *sp, struct sockbuf *sb, int event) +{ + int mask; + struct eventqelt *evq; + register struct tcpcb *tp; + + if (sb) sp = sb->sb_so; + if (!sp || sp->so_evlist.tqh_first == NULL) return; + + KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, event,0,0,0,0); + + for (evq = sp->so_evlist.tqh_first; + evq != NULL; evq = evq->ee_slist.tqe_next) { + + mask = 0; + + /* ready for reading: + - byte cnt >= receive low water mark + - read-half of conn closed + - conn pending for listening sock + - socket error pending + + ready for writing + - byte cnt avail >= send low water mark + - write half of conn closed + - socket error pending + - non-blocking conn completed successfully + + exception pending + - out of band data + - sock at out of band mark + + */ + switch (event & EV_DMASK) { + + case EV_RWBYTES: + case EV_OOB: + case EV_RWBYTES|EV_OOB: + if (event & EV_OOB) { + if ((evq->ee_eventmask & EV_EX)) { + if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) { + mask |= EV_EX|EV_OOB; + } + } + } + if (event & EV_RWBYTES) { + if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) { + if ((sp->so_type == SOCK_STREAM) && (sp->so_error == ECONNREFUSED) || + (sp->so_error == ECONNRESET)) { + if ((sp->so_pcb == 0) || + !(tp = sototcpcb(sp)) || + (tp->t_state == TCPS_CLOSED)) { + mask |= EV_RE|EV_RESET; + break; + } + } + if (sp->so_state & SS_CANTRCVMORE) { + mask |= EV_RE|EV_FIN; + evq->ee_req.er_rcnt = sp->so_rcv.sb_cc; + break; + } + mask |= EV_RE; + evq->ee_req.er_rcnt = sp->so_rcv.sb_cc; + } + + if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) { + if ((sp->so_type == SOCK_STREAM) &&(sp->so_error == ECONNREFUSED) || + (sp->so_error == ECONNRESET)) { + if ((sp->so_pcb == 0) || + !(tp = sototcpcb(sp)) || + (tp->t_state == TCPS_CLOSED)) { + mask |= EV_WR|EV_RESET; + break; + } + } + mask |= EV_WR; + evq->ee_req.er_wcnt = sbspace(&sp->so_snd); + } + } + break; + + case EV_RCONN: + if ((evq->ee_eventmask & EV_RE)) { + evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one + mask |= EV_RE|EV_RCONN; + } + break; + + case EV_WCONN: + if ((evq->ee_eventmask & EV_WR)) { + mask |= EV_WR|EV_WCONN; + } + break; + + case EV_RCLOSED: + if ((evq->ee_eventmask & EV_RE)) { + mask |= EV_RE|EV_RCLOSED; + } + break; + + case EV_WCLOSED: + if ((evq->ee_eventmask & EV_WR)) { + mask |= EV_WR|EV_WCLOSED; + } + break; + + case EV_FIN: + if (evq->ee_eventmask & EV_RE) { + mask |= EV_RE|EV_FIN; + } + break; + + case EV_RESET: + case EV_TIMEOUT: + if (evq->ee_eventmask & EV_RE) { + mask |= EV_RE | event; + } + if (evq->ee_eventmask & EV_WR) { + mask |= EV_WR | event; + } + break; + + default: + return; + } /* switch */ + + if (mask) { + evq->ee_req.er_eventbits |= mask; + KERNEL_DEBUG(DBG_MISC_POST, evq, evq->ee_req.er_eventbits, mask,0,0); + evprocenque(evq); + } + } + KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, 0,0,0,0,0); +} + +/* + * remove and return the first event (eqp=NULL) or a specific + * event, or return NULL if no events found + */ +struct eventqelt * +evprocdeque(struct proc *p, struct eventqelt *eqp) +{ + + KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_START,p,eqp,0,0,0); + + if (eqp && ((eqp->ee_flags & EV_QUEUED) == NULL)) { + KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_END,0,0,0,0,0); + return(NULL); + } + if (p->p_evlist.tqh_first == NULL) { + KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_END,0,0,0,0,0); + return(NULL); + } + if (eqp == NULL) { // remove first + eqp = p->p_evlist.tqh_first; + } + TAILQ_REMOVE(&p->p_evlist, eqp, ee_plist); + eqp->ee_flags &= ~EV_QUEUED; + KERNEL_DEBUG(DBG_MISC_DEQUEUE|DBG_FUNC_END,eqp,0,0,0,0); + return(eqp); +} + +struct evwatch_args { + struct eventreq *u_req; + int u_eventmask; +}; + + +/* + * watchevent system call. user passes us an event to watch + * for. we malloc an event object, initialize it, and queue + * it to the open socket. when the event occurs, postevent() + * will enque it back to our proc where we can retrieve it + * via waitevent(). + * + * should this prevent duplicate events on same socket? + */ +int +watchevent(p, uap, retval) + struct proc *p; + struct evwatch_args *uap; + register_t *retval; +{ + struct eventqelt *eqp = (struct eventqelt *)0; + struct eventqelt *np; + struct eventreq *erp; + struct file *fp; + struct socket *sp; + int error; + + KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_START, 0,0,0,0,0); + + // get a qelt and fill with users req + MALLOC(eqp, struct eventqelt *, sizeof(struct eventqelt), M_TEMP, M_WAITOK); + if (!eqp) panic("can't MALLOC eqp"); + erp = &eqp->ee_req; + // get users request pkt + if (error = copyin((caddr_t)uap->u_req, (caddr_t)erp, + sizeof(struct eventreq))) { + FREE(eqp, M_TEMP); + KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0); + return(error); + } + KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle,uap->u_eventmask,eqp,0,0); + // validate, freeing qelt if errors + error = 0; + if (erp->er_type != EV_FD) { + error = EINVAL; + } else if (erp->er_handle < 0) { + error = EBADF; + } else if (erp->er_handle > p->p_fd->fd_nfiles) { + error = EBADF; + } else if ((fp = *fdfile(p, erp->er_handle)) == NULL) { + error = EBADF; + } else if (fp->f_type != DTYPE_SOCKET) { + error = EINVAL; + } + if (error) { + FREE(eqp,M_TEMP); + KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0); + return(error); + } + + erp->er_rcnt = erp->er_wcnt = erp->er_eventbits = 0; + eqp->ee_proc = p; + eqp->ee_eventmask = uap->u_eventmask & EV_MASK; + eqp->ee_flags = 0; + + sp = (struct socket *)fp->f_data; + assert(sp != NULL); + + // only allow one watch per file per proc + for (np = sp->so_evlist.tqh_first; np != NULL; np = np->ee_slist.tqe_next) { + if (np->ee_proc == p) { + FREE(eqp,M_TEMP); + KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0); + return(EINVAL); + } + } + + TAILQ_INSERT_TAIL(&sp->so_evlist, eqp, ee_slist); + postevent(sp, 0, EV_RWBYTES); // catch existing events + KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, 0,0,0,0,0); + return(0); +} + +struct evwait_args { + struct eventreq *u_req; + struct timeval *tv; +}; + +/* + * waitevent system call. + * grabs the next waiting event for this proc and returns + * it. if no events, user can request to sleep with timeout + * or poll mode (tv=NULL); + */ +int +waitevent(p, uap, retval) + struct proc *p; + struct evwait_args *uap; + register_t *retval; +{ + int error = 0; + struct eventqelt *eqp; + int timo; + struct timeval atv; + int s; + + if (uap->tv) { + error = copyin((caddr_t)uap->tv, (caddr_t)&atv, + sizeof (atv)); + if (error) + return(error); + if (itimerfix(&atv)) { + error = EINVAL; + return(error); + } + s = splhigh(); + timeradd(&atv, &time, &atv); + timo = hzto(&atv); + splx(s); + } else + timo = 0; + + KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_START, 0,0,0,0,0); + +retry: + s = splhigh(); + if ((eqp = evprocdeque(p,NULL)) != NULL) { + splx(s); + error = copyout((caddr_t)&eqp->ee_req, (caddr_t)uap->u_req, + sizeof(struct eventreq)); + KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error, + eqp->ee_req.er_handle,eqp->ee_req.er_eventbits,eqp,0); + return(error); + } else { + if (uap->tv && (timo == 0)) { + splx(s); + *retval = 1; // poll failed + KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,0,0,0,0); + return(error); + } + + KERNEL_DEBUG(DBG_MISC_WAIT, 1,&p->p_evlist,0,0,0); + error = tsleep(&p->p_evlist, PSOCK | PCATCH, "waitevent", timo); + KERNEL_DEBUG(DBG_MISC_WAIT, 2,&p->p_evlist,0,0,0); + splx(s); + if (error == 0) + goto retry; + if (error == ERESTART) + error = EINTR; + if (error == EWOULDBLOCK) { + *retval = 1; + error = 0; + } + } + KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, 0,0,0,0,0); + return(error); +} + +struct modwatch_args { + struct eventreq *u_req; + int u_eventmask; +}; + +/* + * modwatch system call. user passes in event to modify. + * if we find it we reset the event bits and que/deque event + * it needed. + */ +int +modwatch(p, uap, retval) + struct proc *p; + struct modwatch_args *uap; + register_t *retval; +{ + struct eventreq er; + struct eventreq *erp = &er; + struct eventqelt *evq; + int error; + struct file *fp; + struct socket *sp; + int flag; + + KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_START, 0,0,0,0,0); + + // get users request pkt + if (error = copyin((caddr_t)uap->u_req, (caddr_t)erp, + sizeof(struct eventreq))) return(error); + + if (erp->er_type != EV_FD) return(EINVAL); + if (erp->er_handle < 0) return(EBADF); + if (erp->er_handle > p->p_fd->fd_nfiles) return(EBADF); + if ((fp = *fdfile(p, erp->er_handle)) == NULL) + return(EBADF); + if (fp->f_type != DTYPE_SOCKET) return(EINVAL); // for now must be sock + sp = (struct socket *)fp->f_data; + assert(sp != NULL); + + + // locate event if possible + for (evq = sp->so_evlist.tqh_first; + evq != NULL; evq = evq->ee_slist.tqe_next) { + if (evq->ee_proc == p) break; + } + + if (evq == NULL) { + KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, EINVAL,0,0,0,0); + return(EINVAL); + } + KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle,uap->u_eventmask,evq,0,0); + + if (uap->u_eventmask == EV_RM) { + evprocdeque(p, evq); + TAILQ_REMOVE(&sp->so_evlist, evq, ee_slist); + FREE(evq, M_TEMP); + KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, 0,0,0,0,0); + return(0); + } + + switch (uap->u_eventmask & EV_MASK) { + + case 0: + flag = 0; + break; + + case EV_RE: + case EV_WR: + case EV_RE|EV_WR: + flag = EV_RWBYTES; + break; + + case EV_EX: + flag = EV_OOB; + break; + + case EV_EX|EV_RE: + case EV_EX|EV_WR: + case EV_EX|EV_RE|EV_WR: + flag = EV_OOB|EV_RWBYTES; + break; + + default: + return(EINVAL); + } + + evq->ee_eventmask = uap->u_eventmask & EV_MASK; + evprocdeque(p, evq); + evq->ee_req.er_eventbits = 0; + postevent(sp, 0, flag); + KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, evq->ee_req.er_handle,evq->ee_eventmask,sp,flag,0); + return(0); +} diff --git a/bsd/kern/sys_socket.c b/bsd/kern/sys_socket.c new file mode 100644 index 000000000..3a14f6c95 --- /dev/null +++ b/bsd/kern/sys_socket.c @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)sys_socket.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include /* XXX */ +#include +#include +#include +#include + +#include +#include + +int soo_read __P((struct file *fp, struct uio *uio, + struct ucred *cred)); +int soo_write __P((struct file *fp, struct uio *uio, + struct ucred *cred)); +int soo_close __P((struct file *fp, struct proc *p)); + +int soo_select __P((struct file *fp, int which, struct proc *p)); + +struct fileops socketops = + { soo_read, soo_write, soo_ioctl, soo_select, soo_close }; + +/* ARGSUSED */ +int +soo_read(fp, uio, cred) + struct file *fp; + struct uio *uio; + struct ucred *cred; +{ + struct socket *so = (struct socket *)fp->f_data; + struct kextcb *kp; + int stat; + int (*fsoreceive) __P((struct socket *so, + struct sockaddr **paddr, + struct uio *uio, struct mbuf **mp0, + struct mbuf **controlp, int *flagsp)); + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + fsoreceive = so->so_proto->pr_usrreqs->pru_soreceive; + if (fsoreceive != soreceive) + { kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soreceive) + (*kp->e_soif->sf_soreceive)(so, 0, &uio, + 0, 0, 0, kp); + kp = kp->e_next; + } + + } + + stat = (*fsoreceive)(so, 0, uio, 0, 0, 0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return stat; +} + +/* ARGSUSED */ +int +soo_write(fp, uio, cred) + struct file *fp; + struct uio *uio; + struct ucred *cred; +{ + struct socket *so = (struct socket *)fp->f_data; + int (*fsosend) __P((struct socket *so, struct sockaddr *addr, + struct uio *uio, struct mbuf *top, + struct mbuf *control, int flags)); + struct kextcb *kp; + int stat; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + fsosend = so->so_proto->pr_usrreqs->pru_sosend; + if (fsosend != sosend) + { kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sosend) + (*kp->e_soif->sf_sosend)(so, 0, &uio, + 0, 0, 0, kp); + kp = kp->e_next; + } + } + + stat = (*fsosend)(so, 0, uio, 0, 0, 0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return stat; +} + +int +soo_ioctl(fp, cmd, data, p) + struct file *fp; + u_long cmd; + register caddr_t data; + struct proc *p; +{ + register struct socket *so = (struct socket *)fp->f_data; + + struct sockopt sopt; + struct kextcb *kp; + int error = 0; + kp = sotokextcb(so); + sopt.sopt_level = cmd; + sopt.sopt_name = (int)data; + sopt.sopt_p = p; + + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + while (kp) + { if (kp->e_soif && kp->e_soif->sf_socontrol) + (*kp->e_soif->sf_socontrol)(so, &sopt, kp); + kp = kp->e_next; + } + + switch (cmd) { + + case FIONBIO: + if (*(int *)data) + so->so_state |= SS_NBIO; + else + so->so_state &= ~SS_NBIO; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + + case FIOASYNC: + if (*(int *)data) { + so->so_state |= SS_ASYNC; + so->so_rcv.sb_flags |= SB_ASYNC; + so->so_snd.sb_flags |= SB_ASYNC; + } else { + so->so_state &= ~SS_ASYNC; + so->so_rcv.sb_flags &= ~SB_ASYNC; + so->so_snd.sb_flags &= ~SB_ASYNC; + } + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + + case FIONREAD: + *(int *)data = so->so_rcv.sb_cc; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + + case SIOCSPGRP: + so->so_pgid = *(int *)data; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + + case SIOCGPGRP: + *(int *)data = so->so_pgid; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + + case SIOCATMARK: + *(int *)data = (so->so_state&SS_RCVATMARK) != 0; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + + case SIOCSETOT: { + /* + * Set socket level options here and then call protocol + * specific routine. + */ + struct socket *cloned_so = NULL; + int cloned_fd = *(int *)data; + + /* let's make sure it's either -1 or a valid file descriptor */ + if (cloned_fd != -1) { + struct file *cloned_fp; + error = getsock(p->p_fd, cloned_fd, &cloned_fp); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + + cloned_so = (struct socket *)cloned_fp->f_data; + } + + /* Always set socket non-blocking for OT */ + fp->f_flag |= FNONBLOCK; + so->so_state |= SS_NBIO; + so->so_options |= SO_DONTTRUNC | SO_WANTMORE; + + if (cloned_so && so != cloned_so) { + /* Flags options */ + so->so_options |= cloned_so->so_options & ~SO_ACCEPTCONN; + + /* SO_LINGER */ + if (so->so_options & SO_LINGER) + so->so_linger = cloned_so->so_linger; + + /* SO_SNDBUF, SO_RCVBUF */ + if (cloned_so->so_snd.sb_hiwat > 0) { + if (sbreserve(&so->so_snd, cloned_so->so_snd.sb_hiwat) == 0) { + error = ENOBUFS; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + } + if (cloned_so->so_rcv.sb_hiwat > 0) { + if (sbreserve(&so->so_rcv, cloned_so->so_rcv.sb_hiwat) == 0) { + error = ENOBUFS; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + } + + /* SO_SNDLOWAT, SO_RCVLOWAT */ + so->so_snd.sb_lowat = + (cloned_so->so_snd.sb_lowat > so->so_snd.sb_hiwat) ? + so->so_snd.sb_hiwat : cloned_so->so_snd.sb_lowat; + so->so_rcv.sb_lowat = + (cloned_so->so_rcv.sb_lowat > so->so_rcv.sb_hiwat) ? + so->so_rcv.sb_hiwat : cloned_so->so_rcv.sb_lowat; + + /* SO_SNDTIMEO, SO_RCVTIMEO */ + so->so_snd.sb_timeo = cloned_so->so_snd.sb_timeo; + so->so_rcv.sb_timeo = cloned_so->so_rcv.sb_timeo; + } + + error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, 0, p); + /* Just ignore protocols that do not understand it */ + if (error == EOPNOTSUPP) + error = 0; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + } + /* + * Interface/routing/protocol specific ioctls: + * interface and routing ioctls should have a + * different entry since a socket's unnecessary + */ + if (IOCGROUP(cmd) == 'i') + error = ifioctl(so, cmd, data, p); + else + if (IOCGROUP(cmd) == 'r') + error = rtioctl(cmd, data, p); + else + error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, 0, p); + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return error; +} + +int +soo_select(fp, which, p) + struct file *fp; + int which; + struct proc *p; +{ + register struct socket *so = (struct socket *)fp->f_data; + register int s = splnet(); + int retnum=0; + +/* thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); */ + + switch (which) { + + case FREAD: + if (soreadable(so)) { + splx(s); + retnum = 1; + goto done; + } + selrecord(p, &so->so_rcv.sb_sel); + so->so_rcv.sb_flags |= SB_SEL; + break; + + case FWRITE: + if (sowriteable(so)) { + splx(s); + retnum = 1; + goto done; + } + selrecord(p, &so->so_snd.sb_sel); + so->so_snd.sb_flags |= SB_SEL; + break; + + case 0: + if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { + splx(s); + retnum = 1; + goto done; + } + selrecord(p, &so->so_rcv.sb_sel); + so->so_rcv.sb_flags |= SB_SEL; + break; + } + splx(s); +done: +/* thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); */ + return (retnum); +} + + +int +soo_stat(so, ub) + register struct socket *so; + register struct stat *ub; +{ + int stat; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + bzero((caddr_t)ub, sizeof (*ub)); + ub->st_mode = S_IFSOCK; + stat = (*so->so_proto->pr_usrreqs->pru_sense)(so, ub); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return stat; +} + +/* ARGSUSED */ +int +soo_close(fp, p) + struct file *fp; + struct proc *p; +{ + int error = 0; + + if (fp->f_data) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = soclose((struct socket *)fp->f_data); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + } + + fp->f_data = 0; + return (error); +} diff --git a/bsd/kern/syscalls.c b/bsd/kern/syscalls.c new file mode 100644 index 000000000..5bd80b32a --- /dev/null +++ b/bsd/kern/syscalls.c @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992,1995-1999 Apple Computer, Inc. All rights resereved. */ +/* + * HISTORY + * + * 02-10-99 Clark Warner (warner_c) ta Apple + * Chaned call 227 to copyfile + * 07-14-99 Earsh Nandkeshwar (earsh) at Apple + * Renamed getdirentryattr to getdirentriesattr + * 01-22-98 Clark Warner (warner_c) at Apple + * Created new HFS style Systemcalls + * 25-May-95 Mac Gillon (mgillon) at NeXT + * Created from NS 3.3 and 4.4BSD + * + */ + +char *syscallnames[] = { + "syscall", /* 0 = syscall */ + "exit", /* 1 = exit */ + "fork", /* 2 = fork */ + "read", /* 3 = read */ + "write", /* 4 = write */ + "open", /* 5 = open */ + "close", /* 6 = close */ + "wait4", /* 7 = wait4 */ + "old_creat", /* 8 = old creat */ + "link", /* 9 = link */ + "unlink", /* 10 = unlink */ + "obs_execv", /* 11 = obsolete execv */ + "chdir", /* 12 = chdir */ + "fchdir", /* 13 = fchdir */ + "mknod", /* 14 = mknod */ + "chmod", /* 15 = chmod */ + "chown", /* 16 = chown */ + "sbreak", /* 17 = obsolete sbreak */ + "obs_stat", /* 18 = obsolete stat */ + "old_lseek", /* 19 = old lseek */ + "getpid", /* 20 = getpid */ + "obs_mount", /* 21 = obsolete mount */ + "obs_unmount", /* 22 = obsolete unmount */ + "setuid", /* 23 = setuid */ + "getuid", /* 24 = getuid */ + "geteuid", /* 25 = geteuid */ + "ptrace", /* 26 = ptrace */ + "recvmsg", /* 27 = recvmsg */ + "sendmsg", /* 28 = sendmsg */ + "recvfrom", /* 29 = recvfrom */ + "accept", /* 30 = accept */ + "getpeername", /* 31 = getpeername */ + "getsockname", /* 32 = getsockname */ + "access", /* 33 = access */ + "chflags", /* 34 = chflags */ + "fchflags", /* 35 = fchflags */ + "sync", /* 36 = sync */ + "kill", /* 37 = kill */ + "old_stat", /* 38 = old stat */ + "getppid", /* 39 = getppid */ + "old_lstat", /* 40 = old lstat */ + "dup", /* 41 = dup */ + "pipe", /* 42 = pipe */ + "getegid", /* 43 = getegid */ + "profil", /* 44 = profil */ + "ktrace", /* 45 = ktrace */ + "sigaction", /* 46 = sigaction */ + "getgid", /* 47 = getgid */ + "sigprocmask", /* 48 = sigprocmask */ + "getlogin", /* 49 = getlogin */ + "setlogin", /* 50 = setlogin */ + "acct", /* 51 = acct */ + "sigpending", /* 52 = sigpending */ + "sigaltstack", /* 53 = sigaltstack */ + "ioctl", /* 54 = ioctl */ + "reboot", /* 55 = reboot */ + "revoke", /* 56 = revoke */ + "symlink", /* 57 = symlink */ + "readlink", /* 58 = readlink */ + "execve", /* 59 = execve */ + "umask", /* 60 = umask */ + "chroot", /* 61 = chroot */ + "old_fstat", /* 62 = old fstat */ + "old_getkerninfo", /* 63 = old getkerninfo */ + "old_getpagesize", /* 64 = old getpagesize */ + "msync", /* 65 = msync */ + "vfork", /* 66 = vfork */ + "obs_vread", /* 67 = obsolete vread */ + "obs_vwrite", /* 68 = obsolete vwrite */ + "sbrk", /* 69 = sbrk */ + "sstk", /* 70 = sstk */ + "old_mmap", /* 71 = old mmap */ + "obs_vadvise", /* 72 = obsolete vadvise */ + "munmap", /* 73 = munmap */ + "mprotect", /* 74 = mprotect */ + "madvise", /* 75 = madvise */ + "obs_vhangup", /* 76 = obsolete vhangup */ + "obs_vlimit", /* 77 = obsolete vlimit */ + "mincore", /* 78 = mincore */ + "getgroups", /* 79 = getgroups */ + "setgroups", /* 80 = setgroups */ + "getpgrp", /* 81 = getpgrp */ + "setpgid", /* 82 = setpgid */ + "setitimer", /* 83 = setitimer */ + "old_wait", /* 84 = old wait */ + "swapon", /* 85 = swapon */ + "getitimer", /* 86 = getitimer */ + "old_gethostname", /* 87 = old gethostname */ + "old_sethostname", /* 88 = old sethostname */ + "getdtablesize", /* 89 = getdtablesize */ + "dup2", /* 90 = dup2 */ + "#91", /* 91 = getdopt */ + "fcntl", /* 92 = fcntl */ + "select", /* 93 = select */ + "#94", /* 94 = setdopt */ + "fsync", /* 95 = fsync */ + "setpriority", /* 96 = setpriority */ + "socket", /* 97 = socket */ + "connect", /* 98 = connect */ + "old_accept", /* 99 = old accept */ + "getpriority", /* 100 = getpriority */ + "old_send", /* 101 = old send */ + "old_recv", /* 102 = old recv */ + "sigreturn", /* 103 = sigreturn */ + "bind", /* 104 = bind */ + "setsockopt", /* 105 = setsockopt */ + "listen", /* 106 = listen */ + "obs_vtimes", /* 107 = obsolete vtimes */ + "old_sigvec", /* 108 = old sigvec */ + "old_sigblock", /* 109 = old sigblock */ + "old_sigsetmask", /* 110 = old sigsetmask */ + "sigsuspend", /* 111 = sigsuspend */ + "old_sigstack", /* 112 = old sigstack */ + "old_recvmsg", /* 113 = old recvmsg */ + "old_sendmsg", /* 114 = old sendmsg */ + "obs_vtrace", /* 115 = obsolete vtrace */ + "gettimeofday", /* 116 = gettimeofday */ + "getrusage", /* 117 = getrusage */ + "getsockopt", /* 118 = getsockopt */ + "#119", /* 119 = nosys */ + "readv", /* 120 = readv */ + "writev", /* 121 = writev */ + "settimeofday", /* 122 = settimeofday */ + "fchown", /* 123 = fchown */ + "fchmod", /* 124 = fchmod */ + "old_recvfrom", /* 125 = old recvfrom */ + "old_setreuid", /* 126 = old setreuid */ + "old_setregid", /* 127 = old setregid */ + "rename", /* 128 = rename */ + "old_truncate", /* 129 = old truncate */ + "old_ftruncate", /* 130 = old ftruncate */ + "flock", /* 131 = flock */ + "mkfifo", /* 132 = mkfifo */ + "sendto", /* 133 = sendto */ + "shutdown", /* 134 = shutdown */ + "socketpair", /* 135 = socketpair */ + "mkdir", /* 136 = mkdir */ + "rmdir", /* 137 = rmdir */ + "utimes", /* 138 = utimes */ + "#139", /* 139 = nosys */ + "adjtime", /* 140 = adjtime */ + "old_getpeername", /* 141 = old getpeername */ + "old_gethostid", /* 142 = old gethostid */ + "old_sethostid", /* 143 = old sethostid */ + "old_getrlimit", /* 144 = old getrlimit */ + "old_setrlimit", /* 145 = old setrlimit */ + "old_killpg", /* 146 = old killpg */ + "setsid", /* 147 = setsid */ + "obs_setquota", /* 148 = obsolete setquota */ + "obs_quota", /* 149 = obsolete quota */ + "old_getsockname", /* 150 = old getsockname */ + "#151", /* 151 = nosys */ + "setprivexec", /* 152 = setprivexec */ + "#153", /* 153 = nosys */ + "#154", /* 154 = nosys */ + "nfssvc", /* 155 = nfssvc */ + "getdirentries", /* 156 =getdirentries */ + "statfs", /* 157 = statfs */ + "fstatfs", /* 158 = fstatfs */ + "unmount", /* 159 = unmount */ + "obs_async_daemon", /* 160 = obsolete async_daemon */ + "getfh", /* 161 = getfh */ + "old_getdomainname",/* 162 = old getdomainname */ + "old_setdomainname",/* 163 = old setdomainname */ + "obs_pcfs_mount", /* 164 = obsolete pcfs_mount */ + "quotactl", /* 165 = quotactl */ + "obs_exportfs", /* 166 = obsolete exportfs */ + "mount", /* 167 = mount */ + "obs_ustat", /* 168 = obsolete ustat */ + "#169", /* 169 = nosys */ + "obs_table", /* 170 = obsolete table */ + "old_wait_3", /* 171 = old wait_3 */ + "obs_rpause", /* 172 = obsolete rpause */ + "#173", /* 173 = nosys */ + "obs_getdents", /* 174 = obsolete getdents */ + "#175", /* 175 = nosys */ + "add_profil", /* 176 = add_profil */ /* NeXT */ + "#177", /* 177 = nosys */ + "#178", /* 178 = nosys */ + "#179", /* 179 = nosys */ + "kdebug_trace", /* 180 = kdebug_trace */ + "setgid", /* 181 = setgid */ + "setegid", /* 182 = setegid */ + "seteuid", /* 183 = seteuid */ +#ifdef LFS + "lfs_bmapv", /* 184 = lfs_bmapv */ + "lfs_markv", /* 185 = lfs_markv */ + "lfs_segclean", /* 186 = lfs_segclean */ + "lfs_segwait", /* 187 = lfs_segwait */ +#else + "#184", /* 184 = nosys */ + "#185", /* 185 = nosys */ + "#186", /* 186 = nosys */ + "#187", /* 187 = nosys */ +#endif + "stat", /* 188 = stat */ + "fstat", /* 189 = fstat */ + "lstat", /* 190 = lstat */ + "pathconf", /* 191 = pathconf */ + "fpathconf", /* 192 = fpathconf */ + "#193", /* 193 = nosys */ + "getrlimit", /* 194 = getrlimit */ + "setrlimit", /* 195 = setrlimit */ + "#196", /* 196 = unused */ + "mmap", /* 197 = mmap */ + "__syscall", /* 198 = __syscall */ + "lseek", /* 199 = lseek */ + "truncate", /* 200 = truncate */ + "ftruncate", /* 201 = ftruncate */ + "__sysctl", /* 202 = __sysctl */ + "mlock", /* 203 = mlock */ + "munlock", /* 204 = munlock */ + "#205", /* 205 = nosys */ + + /* + * 206 - 215 are all reserved for AppleTalk. + * When AppleTalk is defined some of them are in use + */ + + "#206", /* 206 = nosys */ + "#207", /* 207 = nosys */ + "#208", /* 208 = nosys */ + "#209", /* 209 = nosys */ + "#210", /* 210 = nosys */ + "#211", /* 205 = nosys */ + "#212", /* 206 = nosys */ + "#213", /* 207 = nosys */ + "#214", /* 208 = nosys */ + "#215", /* 209 = nosys */ + "mkcomplex", /* 216 = mkcomplex */ + "statv", /* 217 = stav */ + "lstatv", /* 218 = lstav */ + "fstatv", /* 219 = fstav */ + "getattrlist", /* 220 = getattrlist */ + "setattrlist", /* 221 = setattrlist */ + "getdirentriesattr", /* 222 = getdirentriesattr*/ + "exchangedata", /* 223 = exchangedata */ + "checkuseraccess", /* 224 - checkuseraccess*/ + "searchfs", /* 225 = searchfs */ + "#226", /* 226 = private delete call */ + "#227", /* 227 = copyfile */ + "#228", /* 228 = nosys */ + "#229", /* 229 = nosys */ + "#230", /* 230 = reserved for AFS */ + + /* + * 216 - 230 are all reserved for suppoorting HFS/AFP File System + * Semantics. 225-230 are reserved for future use. + */ + "watchevent", /* 231 = watchevent */ + "waitevent", /* 232 = waitevent */ + "modwatch", /* 233 = modwatch */ + "#234", /* 234 = nosys */ + "#235", /* 235 = nosys */ + "#236", /* 236 = nosys */ + "#237", /* 237 = nosys */ + "#238", /* 238 = nosys */ + "#239", /* 239 = nosys */ + "#240", /* 240 = nosys */ + "#241", /* 241 = nosys */ + "#242", /* 242 = nosys */ + "#243", /* 243 = nosys */ + "#244", /* 244 = nosys */ + "#245", /* 245 = nosys */ + "#246", /* 246 = nosys */ + "#247", /* 247 = nosys */ + "#248", /* 248 = nosys */ + "#249", /* 249 = nosys */ + "minherit", /* 250 = minherit */ + "semsys", /* 251 = semsys */ + "msgsys", /* 252 = msgsys */ + "shmsys", /* 253 = shmsys */ + "semctl", /* 254 = semctl */ + "semget", /* 255 = semget */ + "semop", /* 256 = semop */ + "semconfig", /* 257 = semconfig */ + "msgctl", /* 258 = msgctl */ + "msgget", /* 259 = msgget */ + "msgsnd", /* 260 = msgsnd */ + "msgrcv", /* 261 = msgrcv */ + "shmat", /* 262 = shmat */ + "shmctl", /* 263 = shmctl */ + "shmdt", /* 264 = shmdt */ + "shmget", /* 265 = shmget */ + "shm_open", /* 266 = shm_open */ + "shm_unlink", /* 267 = shm_unlink */ + "sem_open", /* 268 = sem_open */ + "sem_close", /* 269 = sem_close */ + "sem_unlink", /* 270 = sem_unlink */ + "sem_wait", /* 271 = sem_wait */ + "sem_trywait", /* 272 = sem_trywait */ + "sem_post", /* 273 = sem_post */ + "sem_getvalue", /* 274 = sem_getvalue */ + "sem_init", /* 275 = sem_init */ + "sem_destroy", /* 276 = sem_destroy */ + "#277", /* 277 = nosys */ + "#278", /* 278 = nosys */ + "#279", /* 279 = nosys */ + "#280", /* 280 = nosys */ + "#281", /* 281 = nosys */ + "#282", /* 282 = nosys */ + "#283", /* 283 = nosys */ + "#284", /* 284 = nosys */ + "#285", /* 285 = nosys */ + "#286", /* 286 = nosys */ + "#287", /* 287 = nosys */ + "#288", /* 288 = nosys */ + "#289", /* 289 = nosys */ + "#290", /* 290 = nosys */ + "#291", /* 291 = nosys */ + "#292", /* 292 = nosys */ + "#293", /* 293 = nosys */ + "#294", /* 294 = nosys */ + "#295", /* 295 = nosys */ + "load_shared_file", /* 296 = load_shared_file */ + "reset_shared_file", /* 297 = reset_shared_file */ + "#298", /* 298 = nosys */ + "#299", /* 299 = nosys */ + "#300", /* 300 = modnext */ + "#301", /* 301 = modstat */ + "#302", /* 302 = modfnext */ + "#303", /* 303 = modfind */ + "#304", /* 304 = kldload */ + "#305", /* 305 = kldunload */ + "#306", /* 306 = kldfind */ + "#307", /* 307 = kldnext */ + "#308", /* 308 = kldstat */ + "#309", /* 309 = kldfirstmod */ + "#310", /* 310 = getsid */ + "#311", /* 311 = setresuid */ + "#312", /* 312 = setresgid */ + "#313", /* 313 = obsolete signanosleep */ + "#314", /* 314 = aio_return */ + "#315", /* 315 = aio_suspend */ + "#316", /* 316 = aio_cancel */ + "#317", /* 317 = aio_error */ + "#318", /* 318 = aio_read */ + "#319", /* 319 = aio_write */ + "#320", /* 320 = lio_listio */ + "#321", /* 321 = yield */ + "#322", /* 322 = thr_sleep */ + "#323", /* 323 = thr_wakeup */ + "mlockall", /* 324 = mlockall */ + "munlockall" /* 325 = munlockall */ +}; diff --git a/bsd/kern/sysctl_init.c b/bsd/kern/sysctl_init.c new file mode 100644 index 000000000..5402e009f --- /dev/null +++ b/bsd/kern/sysctl_init.c @@ -0,0 +1,551 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +extern struct sysctl_oid sysctl__debug_bpf_bufsize; + +#if TUN +extern struct sysctl_oid sysctl__debug_if_tun_debug; +#endif + +#if COMPAT_43 +#ifndef NeXT +extern struct sysctl_oid sysctl__debug_ttydebug; +#endif +#endif + +extern struct sysctl_oid sysctl__kern_dummy; +extern struct sysctl_oid sysctl__kern_ipc_maxsockbuf; +extern struct sysctl_oid sysctl__kern_ipc_nmbclusters; +extern struct sysctl_oid sysctl__kern_ipc_sockbuf_waste_factor; +extern struct sysctl_oid sysctl__kern_ipc_somaxconn; +extern struct sysctl_oid sysctl__kern_ipc_sosendminchain; +extern struct sysctl_oid sysctl__kern_ipc_maxsockets; +extern struct sysctl_oid sysctl__net_inet_icmp_icmplim; +extern struct sysctl_oid sysctl__net_inet_icmp_maskrepl; +extern struct sysctl_oid sysctl__net_inet_icmp_bmcastecho; +extern struct sysctl_oid sysctl__net_inet_ip_accept_sourceroute; + +#if IPCTL_DEFMTU +extern struct sysctl_oid sysctl__net_inet_ip_mtu; +#endif + +extern struct sysctl_oid sysctl__net_inet_ip_ttl; +extern struct sysctl_oid sysctl__net_inet_ip_fastforwarding; +extern struct sysctl_oid sysctl__net_inet_ip_forwarding; +extern struct sysctl_oid sysctl__net_inet_ip_intr_queue_drops; +extern struct sysctl_oid sysctl__net_inet_ip_intr_queue_maxlen; +extern struct sysctl_oid sysctl__net_inet_ip_rtexpire; +extern struct sysctl_oid sysctl__net_inet_ip_rtmaxcache; +extern struct sysctl_oid sysctl__net_inet_ip_rtminexpire; +extern struct sysctl_oid sysctl__net_inet_ip_redirect; +extern struct sysctl_oid sysctl__net_inet_ip_sourceroute; +extern struct sysctl_oid sysctl__net_inet_ip_subnets_are_local; +extern struct sysctl_oid sysctl__net_inet_ip_keepfaith; +#if NGIF > 0 +extern struct sysctl_oid sysctl__net_inet_ip_gifttl; +#endif + +#if DUMMYNET +extern struct sysctl_oid sysctl__net_inet_ip_dummynet_calls; +extern struct sysctl_oid sysctl__net_inet_ip_dummynet_debug; +extern struct sysctl_oid sysctl__net_inet_ip_dummynet_idle; +extern struct sysctl_oid sysctl__net_inet_ip_dummynet; +#endif + +#if IPFIREWALL && !IPFIREWALL_KEXT +extern struct sysctl_oid sysctl__net_inet_ip_fw_debug; +extern struct sysctl_oid sysctl__net_inet_ip_fw_verbose; +extern struct sysctl_oid sysctl__net_inet_ip_fw_verbose_limit; +extern struct sysctl_oid sysctl__net_inet_ip_fw_one_pass; +extern struct sysctl_oid sysctl__net_inet_ip_fw; +#endif + +extern struct sysctl_oid sysctl__net_inet_raw_maxdgram; +extern struct sysctl_oid sysctl__net_inet_raw_recvspace; +extern struct sysctl_oid sysctl__net_inet_tcp_always_keepalive; +extern struct sysctl_oid sysctl__net_inet_tcp_delayed_ack; +extern struct sysctl_oid sysctl__net_inet_tcp_log_in_vain; +extern struct sysctl_oid sysctl__net_inet_tcp_pcbcount; +extern struct sysctl_oid sysctl__net_inet_tcp_rfc1323; +extern struct sysctl_oid sysctl__net_inet_tcp_rfc1644; +extern struct sysctl_oid sysctl__net_inet_tcp_keepidle; +extern struct sysctl_oid sysctl__net_inet_tcp_keepinit; +extern struct sysctl_oid sysctl__net_inet_tcp_keepintvl; +extern struct sysctl_oid sysctl__net_inet_tcp_mssdflt; +extern struct sysctl_oid sysctl__net_inet_tcp_recvspace; +extern struct sysctl_oid sysctl__net_inet_tcp_rttdflt; +extern struct sysctl_oid sysctl__net_inet_tcp_sendspace; +extern struct sysctl_oid sysctl__net_inet_tcp_v6mssdflt; +extern struct sysctl_oid sysctl__net_inet_udp_log_in_vain; +extern struct sysctl_oid sysctl__net_inet_udp_checksum; +extern struct sysctl_oid sysctl__net_inet_udp_maxdgram; +extern struct sysctl_oid sysctl__net_inet_udp_recvspace; + +#if NETAT +extern struct sysctl_oid sysctl__net_appletalk_debug; +extern struct sysctl_oid sysctl__net_appletalk_routermix; +extern struct sysctl_oid sysctl__net_appletalk_ddpstats; +#endif /* NETAT */ + +#if BRIDGE +extern struct sysctl_oid sysctl__net_link_ether_bdgfwc; +extern struct sysctl_oid sysctl__net_link_ether_bdgfwt; +extern struct sysctl_oid sysctl__net_link_ether_bdginc; +extern struct sysctl_oid sysctl__net_link_ether_bdgint; +extern struct sysctl_oid sysctl__net_link_ether_bridge_ipfw; +extern struct sysctl_oid sysctl__net_link_ethe_bdgstats; +#endif + +extern struct sysctl_oid sysctl__net_link_ether_inet_host_down_time; +extern struct sysctl_oid sysctl__net_link_ether_inet_max_age; +extern struct sysctl_oid sysctl__net_link_ether_inet_maxtries; +extern struct sysctl_oid sysctl__net_link_ether_inet_proxyall; +extern struct sysctl_oid sysctl__net_link_ether_inet_prune_intvl; +extern struct sysctl_oid sysctl__net_link_ether_inet_useloopback; + +#if NETMIBS +extern struct sysctl_oid sysctl__net_link_generic_system_ifcount; +extern struct sysctl_oid sysctl__net_link_generic; +extern struct sysctl_oid sysctl__net_link_generic_ifdata; +extern struct sysctl_oid sysctl__net_link_generic_system; +#endif + +#if VLAN +extern struct sysctl_oid sysctl__net_link_vlan_link_proto; +extern struct sysctl_oid sysctl__net_link_vlan; +extern struct sysctl_oid sysctl__net_link_vlan_link; +#endif + +extern struct sysctl_oid sysctl__net_local_inflight; +extern struct sysctl_oid sysctl__net_local_dgram_maxdgram; +extern struct sysctl_oid sysctl__net_local_dgram_recvspace; +extern struct sysctl_oid sysctl__net_local_stream_recvspace; +extern struct sysctl_oid sysctl__net_local_stream_sendspace; + +#if 0 +extern struct sysctl_oid sysctl__vfs_nfs_nfs_privport; +extern struct sysctl_oid sysctl__vfs_nfs_async; +extern struct sysctl_oid sysctl__vfs_nfs_debug; +extern struct sysctl_oid sysctl__vfs_nfs_defect; +extern struct sysctl_oid sysctl__vfs_nfs_diskless_valid; +extern struct sysctl_oid sysctl__vfs_nfs_gatherdelay; +extern struct sysctl_oid sysctl__vfs_nfs_gatherdelay_v3; +extern struct sysctl_oid sysctl__vfs_nfs; +extern struct sysctl_oid sysctl__vfs_nfs_diskless_rootaddr; +extern struct sysctl_oid sysctl__vfs_nfs_diskless_swapaddr; +extern struct sysctl_oid sysctl__vfs_nfs_diskless_rootpath; +extern struct sysctl_oid sysctl__vfs_nfs_diskless_swappath; +extern struct sysctl_oid sysctl__vfs_nfs_nfsstats; +#endif + +extern struct sysctl_oid sysctl__kern_ipc; +extern struct sysctl_oid sysctl__net_inet; + +#if NETAT +extern struct sysctl_oid sysctl__net_appletalk; +#endif /* NETAT */ + +extern struct sysctl_oid sysctl__net_link; +extern struct sysctl_oid sysctl__net_local; +extern struct sysctl_oid sysctl__net_routetable; + +#if IPDIVERT +extern struct sysctl_oid sysctl__net_inet_div; +#endif + +extern struct sysctl_oid sysctl__net_inet_icmp; +extern struct sysctl_oid sysctl__net_inet_igmp; +extern struct sysctl_oid sysctl__net_inet_ip; +extern struct sysctl_oid sysctl__net_inet_raw; +extern struct sysctl_oid sysctl__net_inet_tcp; +extern struct sysctl_oid sysctl__net_inet_udp; +extern struct sysctl_oid sysctl__net_inet_ip_portrange; + +extern struct sysctl_oid sysctl__net_link_ether; +extern struct sysctl_oid sysctl__net_link_ether_inet; + +extern struct sysctl_oid sysctl__net_local_dgram; +extern struct sysctl_oid sysctl__net_local_stream; +extern struct sysctl_oid sysctl__sysctl_name; +extern struct sysctl_oid sysctl__sysctl_next; +extern struct sysctl_oid sysctl__sysctl_oidfmt; +extern struct sysctl_oid sysctl__net_inet_ip_portrange_first; +extern struct sysctl_oid sysctl__net_inet_ip_portrange_hifirst; +extern struct sysctl_oid sysctl__net_inet_ip_portrange_hilast; +extern struct sysctl_oid sysctl__net_inet_ip_portrange_last; +extern struct sysctl_oid sysctl__net_inet_ip_portrange_lowfirst; +extern struct sysctl_oid sysctl__net_inet_ip_portrange_lowlast; +extern struct sysctl_oid sysctl__net_inet_raw_pcblist; +extern struct sysctl_oid sysctl__net_inet_tcp_pcblist; +extern struct sysctl_oid sysctl__net_inet_udp_pcblist; +extern struct sysctl_oid sysctl__net_link_ether_bridge; +extern struct sysctl_oid sysctl__net_local_dgram_pcblist; +extern struct sysctl_oid sysctl__net_local_stream_pcblist; +extern struct sysctl_oid sysctl__sysctl_debug; +extern struct sysctl_oid sysctl__sysctl_name2oid; +extern struct sysctl_oid sysctl__net_inet_icmp_stats; +extern struct sysctl_oid sysctl__net_inet_igmp_stats; +extern struct sysctl_oid sysctl__net_inet_ip_stats; +extern struct sysctl_oid sysctl__net_inet_tcp_stats; +extern struct sysctl_oid sysctl__net_inet_udp_stats; +extern struct sysctl_oid sysctl__kern; +extern struct sysctl_oid sysctl__hw; +extern struct sysctl_oid sysctl__net; +extern struct sysctl_oid sysctl__debug; +extern struct sysctl_oid sysctl__vfs; +extern struct sysctl_oid sysctl__sysctl; + +#if INET6 +extern struct sysctl_oid sysctl__net_inet6; +extern struct sysctl_oid sysctl__net_inet6_ip6; +extern struct sysctl_oid sysctl__net_inet6_icmp6; +extern struct sysctl_oid sysctl__net_inet6_ip6_forwarding; +extern struct sysctl_oid sysctl__net_inet6_ip6_redirect; +extern struct sysctl_oid sysctl__net_inet6_ip6_hlim; +extern struct sysctl_oid sysctl__net_inet6_ip6_maxfragpackets; +extern struct sysctl_oid sysctl__net_inet6_ip6_accept_rtadv; +extern struct sysctl_oid sysctl__net_inet6_ip6_keepfaith; +extern struct sysctl_oid sysctl__net_inet6_ip6_log_interval; +extern struct sysctl_oid sysctl__net_inet6_ip6_hdrnestlimit; +extern struct sysctl_oid sysctl__net_inet6_ip6_dad_count; +extern struct sysctl_oid sysctl__net_inet6_ip6_auto_flowlabel; +extern struct sysctl_oid sysctl__net_inet6_ip6_defmcasthlim; +extern struct sysctl_oid sysctl__net_inet6_ip6_gifhlim; +extern struct sysctl_oid sysctl__net_inet6_ip6_kame_version; +extern struct sysctl_oid sysctl__net_inet6_ip6_use_deprecated; +extern struct sysctl_oid sysctl__net_inet6_ip6_rr_prune; +#if MAPPED_ADDR_ENABLED +extern struct sysctl_oid sysctl__net_inet6_ip6_mapped_addr; +#endif +#if IPV6FIREWALL +extern struct sysctl_oid sysctl__net_inet6_ip6_fw; +extern struct sysctl_oid sysctl__net_inet6_ip6_fw_debug; +extern struct sysctl_oid sysctl__net_inet6_ip6_fw_verbose; +extern struct sysctl_oid sysctl__net_inet6_ip6_fw_verbose_limit; +#endif +extern struct sysctl_oid sysctl__net_inet6_icmp6_rediraccept; +extern struct sysctl_oid sysctl__net_inet6_icmp6_redirtimeout; +extern struct sysctl_oid sysctl__net_inet6_icmp6_errratelimit; +extern struct sysctl_oid sysctl__net_inet6_icmp6_nd6_prune; +extern struct sysctl_oid sysctl__net_inet6_icmp6_nd6_delay; +extern struct sysctl_oid sysctl__net_inet6_icmp6_nd6_umaxtries; +extern struct sysctl_oid sysctl__net_inet6_icmp6_nd6_mmaxtries; +extern struct sysctl_oid sysctl__net_inet6_icmp6_nd6_useloopback; +extern struct sysctl_oid sysctl__net_inet6_icmp6_nodeinfo; +#if IPSEC +extern struct sysctl_oid sysctl__net_inet6_ipsec6; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_def_policy; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_esp_trans_deflev; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_esp_net_deflev; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_ah_trans_deflev; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_ah_net_deflev; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_inbound_call_ike; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_ecn; +extern struct sysctl_oid sysctl__net_inet6_ipsec6_debug; +#endif +#endif +#if IPSEC +extern struct sysctl_oid sysctl__net_inet_ipsec; +extern struct sysctl_oid sysctl__net_inet_ipsec_def_policy; +extern struct sysctl_oid sysctl__net_inet_ipsec_esp_trans_deflev; +extern struct sysctl_oid sysctl__net_inet_ipsec_esp_net_deflev; +extern struct sysctl_oid sysctl__net_inet_ipsec_ah_trans_deflev; +extern struct sysctl_oid sysctl__net_inet_ipsec_ah_net_deflev; +extern struct sysctl_oid sysctl__net_inet_ipsec_inbound_call_ike; +extern struct sysctl_oid sysctl__net_inet_ipsec_ah_cleartos; +extern struct sysctl_oid sysctl__net_inet_ipsec_ah_offsetmask; +extern struct sysctl_oid sysctl__net_inet_ipsec_dfbit; +extern struct sysctl_oid sysctl__net_inet_ipsec_ecn; +extern struct sysctl_oid sysctl__net_inet_ipsec_debug; +extern struct sysctl_oid sysctl__net_key; +extern struct sysctl_oid sysctl__net_key_debug; +extern struct sysctl_oid sysctl__net_key_spi_trycnt; +extern struct sysctl_oid sysctl__net_key_spi_minval; +extern struct sysctl_oid sysctl__net_key_spi_maxval; +extern struct sysctl_oid sysctl__net_key_int_random; +extern struct sysctl_oid sysctl__net_key_larval_lifetime; +extern struct sysctl_oid sysctl__net_key_blockacq_count; +extern struct sysctl_oid sysctl__net_key_blockacq_lifetime; +#endif + + +struct sysctl_oid *newsysctl_list[] = +{ + &sysctl__kern, + &sysctl__hw, + &sysctl__net, + &sysctl__debug, + &sysctl__vfs, + &sysctl__sysctl, + &sysctl__debug_bpf_bufsize +#if TUN + ,&sysctl__debug_if_tun_debug +#endif + +#if COMPAT_43 +#ifndef NeXT + ,&sysctl__debug_ttydebug +#endif +#endif + + ,&sysctl__kern_dummy + ,&sysctl__kern_ipc_maxsockbuf + ,&sysctl__kern_ipc_nmbclusters + ,&sysctl__kern_ipc_sockbuf_waste_factor + ,&sysctl__kern_ipc_somaxconn + ,&sysctl__kern_ipc_sosendminchain + ,&sysctl__kern_ipc_maxsockets + ,&sysctl__net_inet_icmp_icmplim + ,&sysctl__net_inet_icmp_maskrepl + ,&sysctl__net_inet_icmp_bmcastecho + ,&sysctl__net_inet_ip_accept_sourceroute +#if IPCTL_DEFMTU + ,&sysctl__net_inet_ip_mtu +#endif + ,&sysctl__net_inet_ip_ttl + ,&sysctl__net_inet_ip_fastforwarding + ,&sysctl__net_inet_ip_forwarding + ,&sysctl__net_inet_ip_intr_queue_drops + ,&sysctl__net_inet_ip_intr_queue_maxlen + ,&sysctl__net_inet_ip_rtexpire + ,&sysctl__net_inet_ip_rtmaxcache + ,&sysctl__net_inet_ip_rtminexpire + ,&sysctl__net_inet_ip_redirect + ,&sysctl__net_inet_ip_sourceroute + ,&sysctl__net_inet_ip_subnets_are_local + ,&sysctl__net_inet_ip_keepfaith +#if NGIF > 0 + ,&sysctl__net_inet_ip_gifttl +#endif +#if DUMMYNET + ,&sysctl__net_inet_ip_dummynet_calls + ,&sysctl__net_inet_ip_dummynet_debug + ,&sysctl__net_inet_ip_dummynet_idle + ,&sysctl__net_inet_ip_dummynet +#endif + +#if IPFIREWALL && !IPFIREWALL_KEXT + ,&sysctl__net_inet_ip_fw_debug + ,&sysctl__net_inet_ip_fw_verbose + ,&sysctl__net_inet_ip_fw_verbose_limit + ,&sysctl__net_inet_ip_fw_one_pass + ,&sysctl__net_inet_ip_fw +#endif + ,&sysctl__net_inet_raw_maxdgram + ,&sysctl__net_inet_raw_recvspace + ,&sysctl__net_inet_tcp_always_keepalive + ,&sysctl__net_inet_tcp_delayed_ack + ,&sysctl__net_inet_tcp_log_in_vain + ,&sysctl__net_inet_tcp_pcbcount + ,&sysctl__net_inet_tcp_rfc1323 + ,&sysctl__net_inet_tcp_rfc1644 + ,&sysctl__net_inet_tcp_keepidle + ,&sysctl__net_inet_tcp_keepinit + ,&sysctl__net_inet_tcp_keepintvl + ,&sysctl__net_inet_tcp_mssdflt + ,&sysctl__net_inet_tcp_recvspace + ,&sysctl__net_inet_tcp_rttdflt + ,&sysctl__net_inet_tcp_sendspace + ,&sysctl__net_inet_tcp_v6mssdflt + ,&sysctl__net_inet_udp_log_in_vain + ,&sysctl__net_inet_udp_checksum + ,&sysctl__net_inet_udp_maxdgram + ,&sysctl__net_inet_udp_recvspace + +#if NETAT + ,&sysctl__net_appletalk_debug + ,&sysctl__net_appletalk_routermix + ,&sysctl__net_appletalk_ddpstats +#endif /* NETAT */ + +#if BRIDGE + ,&sysctl__net_link_ether_bdgfwc + ,&sysctl__net_link_ether_bdgfwt + ,&sysctl__net_link_ether_bdginc + ,&sysctl__net_link_ether_bdgint + ,&sysctl__net_link_ether_bridge_ipfw + ,&sysctl__net_link_ethe_bdgstats + ,&sysctl__net_link_ether_bridge +#endif + + ,&sysctl__net_link_ether_inet_host_down_time + ,&sysctl__net_link_ether_inet_max_age + ,&sysctl__net_link_ether_inet_maxtries + ,&sysctl__net_link_ether_inet_proxyall + ,&sysctl__net_link_ether_inet_prune_intvl + ,&sysctl__net_link_ether_inet_useloopback +#if NETMIBS + ,&sysctl__net_link_generic_system_ifcount + ,&sysctl__net_link_generic + ,&sysctl__net_link_generic_ifdata + ,&sysctl__net_link_generic_system +#endif + +#if VLAN + ,&sysctl__net_link_vlan_link_proto + ,&sysctl__net_link_vlan + ,&sysctl__net_link_vlan_link +#endif + + ,&sysctl__net_local_inflight + ,&sysctl__net_local_dgram_maxdgram + ,&sysctl__net_local_dgram_recvspace + ,&sysctl__net_local_stream_recvspace + ,&sysctl__net_local_stream_sendspace +#if 0 + ,&sysctl__vfs_nfs_nfs_privport + ,&sysctl__vfs_nfs_async + ,&sysctl__vfs_nfs_debug + ,&sysctl__vfs_nfs_defect + ,&sysctl__vfs_nfs_diskless_valid + ,&sysctl__vfs_nfs_gatherdelay + ,&sysctl__vfs_nfs_gatherdelay_v3 + ,&sysctl__vfs_nfs + ,&sysctl__vfs_nfs_diskless_rootaddr + ,&sysctl__vfs_nfs_diskless_swapaddr + ,&sysctl__vfs_nfs_diskless_rootpath + ,&sysctl__vfs_nfs_diskless_swappath + ,&sysctl__vfs_nfs_nfsstats +#endif + ,&sysctl__kern_ipc + ,&sysctl__net_inet +#if NETAT + ,&sysctl__net_appletalk +#endif /* NETAT */ + ,&sysctl__net_link + ,&sysctl__net_local + ,&sysctl__net_routetable +#if IPDIVERT + ,&sysctl__net_inet_div +#endif + ,&sysctl__net_inet_icmp + ,&sysctl__net_inet_igmp + ,&sysctl__net_inet_ip + ,&sysctl__net_inet_raw + ,&sysctl__net_inet_tcp + ,&sysctl__net_inet_udp + ,&sysctl__net_inet_ip_portrange + ,&sysctl__net_link_ether + ,&sysctl__net_link_ether_inet + ,&sysctl__net_local_dgram + ,&sysctl__net_local_stream + ,&sysctl__sysctl_name + ,&sysctl__sysctl_next + ,&sysctl__sysctl_oidfmt + ,&sysctl__net_inet_ip_portrange_first + ,&sysctl__net_inet_ip_portrange_hifirst + ,&sysctl__net_inet_ip_portrange_hilast + ,&sysctl__net_inet_ip_portrange_last + ,&sysctl__net_inet_ip_portrange_lowfirst + ,&sysctl__net_inet_ip_portrange_lowlast + ,&sysctl__net_inet_raw_pcblist + ,&sysctl__net_inet_tcp_pcblist + ,&sysctl__net_inet_udp_pcblist + ,&sysctl__net_local_dgram_pcblist + ,&sysctl__net_local_stream_pcblist + ,&sysctl__sysctl_debug + ,&sysctl__sysctl_name2oid + ,&sysctl__net_inet_icmp_stats + ,&sysctl__net_inet_igmp_stats + ,&sysctl__net_inet_ip_stats + ,&sysctl__net_inet_tcp_stats + ,&sysctl__net_inet_udp_stats +#if INET6 + ,&sysctl__net_inet6 + ,&sysctl__net_inet6_ip6 + ,&sysctl__net_inet6_icmp6 + ,&sysctl__net_inet6_ip6_forwarding + ,&sysctl__net_inet6_ip6_redirect + ,&sysctl__net_inet6_ip6_hlim + ,&sysctl__net_inet6_ip6_maxfragpackets + ,&sysctl__net_inet6_ip6_accept_rtadv + ,&sysctl__net_inet6_ip6_keepfaith + ,&sysctl__net_inet6_ip6_log_interval + ,&sysctl__net_inet6_ip6_hdrnestlimit + ,&sysctl__net_inet6_ip6_dad_count + ,&sysctl__net_inet6_ip6_auto_flowlabel + ,&sysctl__net_inet6_ip6_defmcasthlim + ,&sysctl__net_inet6_ip6_gifhlim + ,&sysctl__net_inet6_ip6_kame_version + ,&sysctl__net_inet6_ip6_use_deprecated + ,&sysctl__net_inet6_ip6_rr_prune +#if MAPPED_ADDR_ENABLED + ,&sysctl__net_inet6_ip6_mapped_addr +#endif + ,&sysctl__net_inet6_icmp6_rediraccept + ,&sysctl__net_inet6_icmp6_redirtimeout + ,&sysctl__net_inet6_icmp6_errratelimit + ,&sysctl__net_inet6_icmp6_nd6_prune + ,&sysctl__net_inet6_icmp6_nd6_delay + ,&sysctl__net_inet6_icmp6_nd6_umaxtries + ,&sysctl__net_inet6_icmp6_nd6_mmaxtries + ,&sysctl__net_inet6_icmp6_nd6_useloopback + ,&sysctl__net_inet6_icmp6_nodeinfo +#if IPV6FIREWALL + ,&sysctl__net_inet6_ip6_fw + ,&sysctl__net_inet6_ip6_fw_debug + ,&sysctl__net_inet6_ip6_fw_verbose + ,&sysctl__net_inet6_ip6_fw_verbose_limit +#endif +#if IPSEC + ,&sysctl__net_inet6_ipsec6 + ,&sysctl__net_inet6_ipsec6_def_policy + ,&sysctl__net_inet6_ipsec6_esp_trans_deflev + ,&sysctl__net_inet6_ipsec6_esp_net_deflev + ,&sysctl__net_inet6_ipsec6_ah_trans_deflev + ,&sysctl__net_inet6_ipsec6_ah_net_deflev + ,&sysctl__net_inet6_ipsec6_inbound_call_ike + ,&sysctl__net_inet6_ipsec6_ecn + ,&sysctl__net_inet6_ipsec6_debug +#endif +#endif +#if IPSEC + ,&sysctl__net_key + ,&sysctl__net_key_debug + ,&sysctl__net_key_spi_trycnt + ,&sysctl__net_key_spi_minval + ,&sysctl__net_key_spi_maxval + ,&sysctl__net_key_int_random + ,&sysctl__net_key_larval_lifetime + ,&sysctl__net_key_blockacq_count + ,&sysctl__net_key_blockacq_lifetime + ,&sysctl__net_inet_ipsec + ,&sysctl__net_inet_ipsec_def_policy + ,&sysctl__net_inet_ipsec_esp_trans_deflev + ,&sysctl__net_inet_ipsec_esp_net_deflev + ,&sysctl__net_inet_ipsec_ah_trans_deflev + ,&sysctl__net_inet_ipsec_ah_net_deflev + ,&sysctl__net_inet_ipsec_inbound_call_ike + ,&sysctl__net_inet_ipsec_ah_cleartos + ,&sysctl__net_inet_ipsec_ah_offsetmask + ,&sysctl__net_inet_ipsec_dfbit + ,&sysctl__net_inet_ipsec_ecn + ,&sysctl__net_inet_ipsec_debug +#endif + ,(struct sysctl_oid *) 0 +}; + diff --git a/bsd/kern/sysv_ipc.c b/bsd/kern/sysv_ipc.c new file mode 100644 index 000000000..321125c84 --- /dev/null +++ b/bsd/kern/sysv_ipc.c @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: sysv_ipc.c,v 1.7 1994/06/29 06:33:11 cgd Exp $ */ + +/* + * Copyright (c) 1994 Herb Peyerl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Herb Peyerl. + * 4. The name of Herb Peyerl may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +#include +#include + + +/* + * Check for ipc permission + * + * XXX: Should pass proc argument so that we can pass + * XXX: proc->p_acflag to suser() + */ + +int +ipcperm(cred, perm, mode) + struct ucred *cred; + struct ipc_perm *perm; + int mode; +{ + + if (suser(cred, (u_short *)NULL)) + return (0); + + /* Check for user match. */ + if (cred->cr_uid != perm->cuid && cred->cr_uid != perm->uid) { + if (mode & IPC_M) + return (EPERM); + /* Check for group match. */ + mode >>= 3; + if (!groupmember(perm->gid, cred) && + !groupmember(perm->cgid, cred)) + /* Check for `other' match. */ + mode >>= 3; + } + + if (mode & IPC_M) + return (0); + return ((mode & perm->mode) == mode ? 0 : EACCES); +} + + + + +/* + * SYSVSEM stubs + */ + +int +semsys(p, uap) + struct proc *p; +#if 0 + struct semsys_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +semconfig(p, uap) + struct proc *p; +#if 0 + struct semconfig_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +semctl(p, uap) + struct proc *p; +#if 0 + register struct semctl_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +semget(p, uap) + struct proc *p; +#if 0 + register struct semget_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +semop(p, uap) + struct proc *p; +#if 0 + register struct semop_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +/* called from kern_exit.c */ +void +semexit(p) + struct proc *p; +{ + return; +} + + + + +/* + * SYSVMSG stubs + */ + +int +msgsys(p, uap) + struct proc *p; + /* XXX actually varargs. */ +#if 0 + struct msgsys_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +msgctl(p, uap) + struct proc *p; +#if 0 + register struct msgctl_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +msgget(p, uap) + struct proc *p; +#if 0 + register struct msgget_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +msgsnd(p, uap) + struct proc *p; +#if 0 + register struct msgsnd_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; + +int +msgrcv(p, uap) + struct proc *p; +#if 0 + register struct msgrcv_args *uap; +#else + void *uap; +#endif +{ + return(EOPNOTSUPP); +}; diff --git a/bsd/kern/sysv_msg.c b/bsd/kern/sysv_msg.c new file mode 100644 index 000000000..dc3ca0a81 --- /dev/null +++ b/bsd/kern/sysv_msg.c @@ -0,0 +1,1049 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Implementation of SVID messages + * + * Author: Daniel Boulet + * + * Copyright 1993 Daniel Boulet and RTMX Inc. + * + * This system call was implemented by Daniel Boulet under contract from RTMX. + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + */ + +#include +#include +#include +#include +#include +#include +#include + +static void msginit __P((void *)); +SYSINIT(sysv_msg, SI_SUB_SYSV_MSG, SI_ORDER_FIRST, msginit, NULL) + +#define MSG_DEBUG +#undef MSG_DEBUG_OK + +#ifndef _SYS_SYSPROTO_H_ +struct msgctl_args; +int msgctl __P((struct proc *p, struct msgctl_args *uap)); +struct msgget_args; +int msgget __P((struct proc *p, struct msgget_args *uap)); +struct msgsnd_args; +int msgsnd __P((struct proc *p, struct msgsnd_args *uap)); +struct msgrcv_args; +int msgrcv __P((struct proc *p, struct msgrcv_args *uap)); +#endif +static void msg_freehdr __P((struct msg *msghdr)); + +/* XXX casting to (sy_call_t *) is bogus, as usual. */ +static sy_call_t *msgcalls[] = { + (sy_call_t *)msgctl, (sy_call_t *)msgget, + (sy_call_t *)msgsnd, (sy_call_t *)msgrcv +}; + +static int nfree_msgmaps; /* # of free map entries */ +static short free_msgmaps; /* head of linked list of free map entries */ +static struct msg *free_msghdrs; /* list of free msg headers */ +char *msgpool; /* MSGMAX byte long msg buffer pool */ +struct msgmap *msgmaps; /* MSGSEG msgmap structures */ +struct msg *msghdrs; /* MSGTQL msg headers */ +struct msqid_ds *msqids; /* MSGMNI msqid_ds struct's */ + +void +msginit(dummy) + void *dummy; +{ + register int i; + + /* + * msginfo.msgssz should be a power of two for efficiency reasons. + * It is also pretty silly if msginfo.msgssz is less than 8 + * or greater than about 256 so ... + */ + + i = 8; + while (i < 1024 && i != msginfo.msgssz) + i <<= 1; + if (i != msginfo.msgssz) { + printf("msginfo.msgssz=%d (0x%x)\n", msginfo.msgssz, + msginfo.msgssz); + panic("msginfo.msgssz not a small power of 2"); + } + + if (msginfo.msgseg > 32767) { + printf("msginfo.msgseg=%d\n", msginfo.msgseg); + panic("msginfo.msgseg > 32767"); + } + + if (msgmaps == NULL) + panic("msgmaps is NULL"); + + for (i = 0; i < msginfo.msgseg; i++) { + if (i > 0) + msgmaps[i-1].next = i; + msgmaps[i].next = -1; /* implies entry is available */ + } + free_msgmaps = 0; + nfree_msgmaps = msginfo.msgseg; + + if (msghdrs == NULL) + panic("msghdrs is NULL"); + + for (i = 0; i < msginfo.msgtql; i++) { + msghdrs[i].msg_type = 0; + if (i > 0) + msghdrs[i-1].msg_next = &msghdrs[i]; + msghdrs[i].msg_next = NULL; + } + free_msghdrs = &msghdrs[0]; + + if (msqids == NULL) + panic("msqids is NULL"); + + for (i = 0; i < msginfo.msgmni; i++) { + msqids[i].msg_qbytes = 0; /* implies entry is available */ + msqids[i].msg_perm.seq = 0; /* reset to a known value */ + } +} + +/* + * Entry point for all MSG calls + */ +int +msgsys(p, uap) + struct proc *p; + /* XXX actually varargs. */ + struct msgsys_args /* { + u_int which; + int a2; + int a3; + int a4; + int a5; + int a6; + } */ *uap; +{ + + if (uap->which >= sizeof(msgcalls)/sizeof(msgcalls[0])) + return (EINVAL); + return ((*msgcalls[uap->which])(p, &uap->a2)); +} + +static void +msg_freehdr(msghdr) + struct msg *msghdr; +{ + while (msghdr->msg_ts > 0) { + short next; + if (msghdr->msg_spot < 0 || msghdr->msg_spot >= msginfo.msgseg) + panic("msghdr->msg_spot out of range"); + next = msgmaps[msghdr->msg_spot].next; + msgmaps[msghdr->msg_spot].next = free_msgmaps; + free_msgmaps = msghdr->msg_spot; + nfree_msgmaps++; + msghdr->msg_spot = next; + if (msghdr->msg_ts >= msginfo.msgssz) + msghdr->msg_ts -= msginfo.msgssz; + else + msghdr->msg_ts = 0; + } + if (msghdr->msg_spot != -1) + panic("msghdr->msg_spot != -1"); + msghdr->msg_next = free_msghdrs; + free_msghdrs = msghdr; +} + +#ifndef _SYS_SYSPROTO_H_ +struct msgctl_args { + int msqid; + int cmd; + struct msqid_ds *buf; +}; +#endif + +int +msgctl(p, uap) + struct proc *p; + register struct msgctl_args *uap; +{ + int msqid = uap->msqid; + int cmd = uap->cmd; + struct msqid_ds *user_msqptr = uap->buf; + struct ucred *cred = p->p_ucred; + int rval, eval; + struct msqid_ds msqbuf; + register struct msqid_ds *msqptr; + +#ifdef MSG_DEBUG_OK + printf("call to msgctl(%d, %d, 0x%x)\n", msqid, cmd, user_msqptr); +#endif + + msqid = IPCID_TO_IX(msqid); + + if (msqid < 0 || msqid >= msginfo.msgmni) { +#ifdef MSG_DEBUG_OK + printf("msqid (%d) out of range (0<=msqid<%d)\n", msqid, + msginfo.msgmni); +#endif + return(EINVAL); + } + + msqptr = &msqids[msqid]; + + if (msqptr->msg_qbytes == 0) { +#ifdef MSG_DEBUG_OK + printf("no such msqid\n"); +#endif + return(EINVAL); + } + if (msqptr->msg_perm.seq != IPCID_TO_SEQ(uap->msqid)) { +#ifdef MSG_DEBUG_OK + printf("wrong sequence number\n"); +#endif + return(EINVAL); + } + + eval = 0; + rval = 0; + + switch (cmd) { + + case IPC_RMID: + { + struct msg *msghdr; + if ((eval = ipcperm(cred, &msqptr->msg_perm, IPC_M))) + return(eval); + /* Free the message headers */ + msghdr = msqptr->msg_first; + while (msghdr != NULL) { + struct msg *msghdr_tmp; + + /* Free the segments of each message */ + msqptr->msg_cbytes -= msghdr->msg_ts; + msqptr->msg_qnum--; + msghdr_tmp = msghdr; + msghdr = msghdr->msg_next; + msg_freehdr(msghdr_tmp); + } + + if (msqptr->msg_cbytes != 0) + panic("msg_cbytes is messed up"); + if (msqptr->msg_qnum != 0) + panic("msg_qnum is messed up"); + + msqptr->msg_qbytes = 0; /* Mark it as free */ + + wakeup((caddr_t)msqptr); + } + + break; + + case IPC_SET: + if ((eval = ipcperm(cred, &msqptr->msg_perm, IPC_M))) + return(eval); + if ((eval = copyin(user_msqptr, &msqbuf, sizeof(msqbuf))) != 0) + return(eval); + if (msqbuf.msg_qbytes > msqptr->msg_qbytes) { + eval = suser(cred, &p->p_acflag); + if (eval) + return(eval); + } + if (msqbuf.msg_qbytes > msginfo.msgmnb) { +#ifdef MSG_DEBUG_OK + printf("can't increase msg_qbytes beyond %d (truncating)\n", + msginfo.msgmnb); +#endif + msqbuf.msg_qbytes = msginfo.msgmnb; /* silently restrict qbytes to system limit */ + } + if (msqbuf.msg_qbytes == 0) { +#ifdef MSG_DEBUG_OK + printf("can't reduce msg_qbytes to 0\n"); +#endif + return(EINVAL); /* non-standard errno! */ + } + msqptr->msg_perm.uid = msqbuf.msg_perm.uid; /* change the owner */ + msqptr->msg_perm.gid = msqbuf.msg_perm.gid; /* change the owner */ + msqptr->msg_perm.mode = (msqptr->msg_perm.mode & ~0777) | + (msqbuf.msg_perm.mode & 0777); + msqptr->msg_qbytes = msqbuf.msg_qbytes; + msqptr->msg_ctime = time_second; + break; + + case IPC_STAT: + if ((eval = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { +#ifdef MSG_DEBUG_OK + printf("requester doesn't have read access\n"); +#endif + return(eval); + } + eval = copyout((caddr_t)msqptr, user_msqptr, + sizeof(struct msqid_ds)); + break; + + default: +#ifdef MSG_DEBUG_OK + printf("invalid command %d\n", cmd); +#endif + return(EINVAL); + } + + if (eval == 0) + p->p_retval[0] = rval; + return(eval); +} + +#ifndef _SYS_SYSPROTO_H_ +struct msgget_args { + key_t key; + int msgflg; +}; +#endif + +int +msgget(p, uap) + struct proc *p; + register struct msgget_args *uap; +{ + int msqid, eval; + int key = uap->key; + int msgflg = uap->msgflg; + struct ucred *cred = p->p_ucred; + register struct msqid_ds *msqptr = NULL; + +#ifdef MSG_DEBUG_OK + printf("msgget(0x%x, 0%o)\n", key, msgflg); +#endif + + if (key != IPC_PRIVATE) { + for (msqid = 0; msqid < msginfo.msgmni; msqid++) { + msqptr = &msqids[msqid]; + if (msqptr->msg_qbytes != 0 && + msqptr->msg_perm.key == key) + break; + } + if (msqid < msginfo.msgmni) { +#ifdef MSG_DEBUG_OK + printf("found public key\n"); +#endif + if ((msgflg & IPC_CREAT) && (msgflg & IPC_EXCL)) { +#ifdef MSG_DEBUG_OK + printf("not exclusive\n"); +#endif + return(EEXIST); + } + if ((eval = ipcperm(cred, &msqptr->msg_perm, msgflg & 0700 ))) { +#ifdef MSG_DEBUG_OK + printf("requester doesn't have 0%o access\n", + msgflg & 0700); +#endif + return(eval); + } + goto found; + } + } + +#ifdef MSG_DEBUG_OK + printf("need to allocate the msqid_ds\n"); +#endif + if (key == IPC_PRIVATE || (msgflg & IPC_CREAT)) { + for (msqid = 0; msqid < msginfo.msgmni; msqid++) { + /* + * Look for an unallocated and unlocked msqid_ds. + * msqid_ds's can be locked by msgsnd or msgrcv while + * they are copying the message in/out. We can't + * re-use the entry until they release it. + */ + msqptr = &msqids[msqid]; + if (msqptr->msg_qbytes == 0 && + (msqptr->msg_perm.mode & MSG_LOCKED) == 0) + break; + } + if (msqid == msginfo.msgmni) { +#ifdef MSG_DEBUG_OK + printf("no more msqid_ds's available\n"); +#endif + return(ENOSPC); + } +#ifdef MSG_DEBUG_OK + printf("msqid %d is available\n", msqid); +#endif + msqptr->msg_perm.key = key; + msqptr->msg_perm.cuid = cred->cr_uid; + msqptr->msg_perm.uid = cred->cr_uid; + msqptr->msg_perm.cgid = cred->cr_gid; + msqptr->msg_perm.gid = cred->cr_gid; + msqptr->msg_perm.mode = (msgflg & 0777); + /* Make sure that the returned msqid is unique */ + msqptr->msg_perm.seq++; + msqptr->msg_first = NULL; + msqptr->msg_last = NULL; + msqptr->msg_cbytes = 0; + msqptr->msg_qnum = 0; + msqptr->msg_qbytes = msginfo.msgmnb; + msqptr->msg_lspid = 0; + msqptr->msg_lrpid = 0; + msqptr->msg_stime = 0; + msqptr->msg_rtime = 0; + msqptr->msg_ctime = time_second; + } else { +#ifdef MSG_DEBUG_OK + printf("didn't find it and wasn't asked to create it\n"); +#endif + return(ENOENT); + } + +found: + /* Construct the unique msqid */ + p->p_retval[0] = IXSEQ_TO_IPCID(msqid, msqptr->msg_perm); + return(0); +} + +#ifndef _SYS_SYSPROTO_H_ +struct msgsnd_args { + int msqid; + void *msgp; + size_t msgsz; + int msgflg; +}; +#endif + +int +msgsnd(p, uap) + struct proc *p; + register struct msgsnd_args *uap; +{ + int msqid = uap->msqid; + void *user_msgp = uap->msgp; + size_t msgsz = uap->msgsz; + int msgflg = uap->msgflg; + int segs_needed, eval; + struct ucred *cred = p->p_ucred; + register struct msqid_ds *msqptr; + register struct msg *msghdr; + short next; + +#ifdef MSG_DEBUG_OK + printf("call to msgsnd(%d, 0x%x, %d, %d)\n", msqid, user_msgp, msgsz, + msgflg); +#endif + + msqid = IPCID_TO_IX(msqid); + + if (msqid < 0 || msqid >= msginfo.msgmni) { +#ifdef MSG_DEBUG_OK + printf("msqid (%d) out of range (0<=msqid<%d)\n", msqid, + msginfo.msgmni); +#endif + return(EINVAL); + } + + msqptr = &msqids[msqid]; + if (msqptr->msg_qbytes == 0) { +#ifdef MSG_DEBUG_OK + printf("no such message queue id\n"); +#endif + return(EINVAL); + } + if (msqptr->msg_perm.seq != IPCID_TO_SEQ(uap->msqid)) { +#ifdef MSG_DEBUG_OK + printf("wrong sequence number\n"); +#endif + return(EINVAL); + } + + if ((eval = ipcperm(cred, &msqptr->msg_perm, IPC_W))) { +#ifdef MSG_DEBUG_OK + printf("requester doesn't have write access\n"); +#endif + return(eval); + } + + segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; +#ifdef MSG_DEBUG_OK + printf("msgsz=%d, msgssz=%d, segs_needed=%d\n", msgsz, msginfo.msgssz, + segs_needed); +#endif + for (;;) { + int need_more_resources = 0; + + /* + * check msgsz + * (inside this loop in case msg_qbytes changes while we sleep) + */ + + if (msgsz > msqptr->msg_qbytes) { +#ifdef MSG_DEBUG_OK + printf("msgsz > msqptr->msg_qbytes\n"); +#endif + return(EINVAL); + } + + if (msqptr->msg_perm.mode & MSG_LOCKED) { +#ifdef MSG_DEBUG_OK + printf("msqid is locked\n"); +#endif + need_more_resources = 1; + } + if (msgsz + msqptr->msg_cbytes > msqptr->msg_qbytes) { +#ifdef MSG_DEBUG_OK + printf("msgsz + msg_cbytes > msg_qbytes\n"); +#endif + need_more_resources = 1; + } + if (segs_needed > nfree_msgmaps) { +#ifdef MSG_DEBUG_OK + printf("segs_needed > nfree_msgmaps\n"); +#endif + need_more_resources = 1; + } + if (free_msghdrs == NULL) { +#ifdef MSG_DEBUG_OK + printf("no more msghdrs\n"); +#endif + need_more_resources = 1; + } + + if (need_more_resources) { + int we_own_it; + + if ((msgflg & IPC_NOWAIT) != 0) { +#ifdef MSG_DEBUG_OK + printf("need more resources but caller doesn't want to wait\n"); +#endif + return(EAGAIN); + } + + if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) { +#ifdef MSG_DEBUG_OK + printf("we don't own the msqid_ds\n"); +#endif + we_own_it = 0; + } else { + /* Force later arrivals to wait for our + request */ +#ifdef MSG_DEBUG_OK + printf("we own the msqid_ds\n"); +#endif + msqptr->msg_perm.mode |= MSG_LOCKED; + we_own_it = 1; + } +#ifdef MSG_DEBUG_OK + printf("goodnight\n"); +#endif + eval = tsleep((caddr_t)msqptr, (PZERO - 4) | PCATCH, + "msgwait", 0); +#ifdef MSG_DEBUG_OK + printf("good morning, eval=%d\n", eval); +#endif + if (we_own_it) + msqptr->msg_perm.mode &= ~MSG_LOCKED; + if (eval != 0) { +#ifdef MSG_DEBUG_OK + printf("msgsnd: interrupted system call\n"); +#endif + return(EINTR); + } + + /* + * Make sure that the msq queue still exists + */ + + if (msqptr->msg_qbytes == 0) { +#ifdef MSG_DEBUG_OK + printf("msqid deleted\n"); +#endif + /* The SVID says to return EIDRM. */ +#ifdef EIDRM + return(EIDRM); +#else + /* Unfortunately, BSD doesn't define that code + yet! */ + return(EINVAL); +#endif + } + + } else { +#ifdef MSG_DEBUG_OK + printf("got all the resources that we need\n"); +#endif + break; + } + } + + /* + * We have the resources that we need. + * Make sure! + */ + + if (msqptr->msg_perm.mode & MSG_LOCKED) + panic("msg_perm.mode & MSG_LOCKED"); + if (segs_needed > nfree_msgmaps) + panic("segs_needed > nfree_msgmaps"); + if (msgsz + msqptr->msg_cbytes > msqptr->msg_qbytes) + panic("msgsz + msg_cbytes > msg_qbytes"); + if (free_msghdrs == NULL) + panic("no more msghdrs"); + + /* + * Re-lock the msqid_ds in case we page-fault when copying in the + * message + */ + + if ((msqptr->msg_perm.mode & MSG_LOCKED) != 0) + panic("msqid_ds is already locked"); + msqptr->msg_perm.mode |= MSG_LOCKED; + + /* + * Allocate a message header + */ + + msghdr = free_msghdrs; + free_msghdrs = msghdr->msg_next; + msghdr->msg_spot = -1; + msghdr->msg_ts = msgsz; + + /* + * Allocate space for the message + */ + + while (segs_needed > 0) { + if (nfree_msgmaps <= 0) + panic("not enough msgmaps"); + if (free_msgmaps == -1) + panic("nil free_msgmaps"); + next = free_msgmaps; + if (next <= -1) + panic("next too low #1"); + if (next >= msginfo.msgseg) + panic("next out of range #1"); +#ifdef MSG_DEBUG_OK + printf("allocating segment %d to message\n", next); +#endif + free_msgmaps = msgmaps[next].next; + nfree_msgmaps--; + msgmaps[next].next = msghdr->msg_spot; + msghdr->msg_spot = next; + segs_needed--; + } + + /* + * Copy in the message type + */ + + if ((eval = copyin(user_msgp, &msghdr->msg_type, + sizeof(msghdr->msg_type))) != 0) { +#ifdef MSG_DEBUG_OK + printf("error %d copying the message type\n", eval); +#endif + msg_freehdr(msghdr); + msqptr->msg_perm.mode &= ~MSG_LOCKED; + wakeup((caddr_t)msqptr); + return(eval); + } + user_msgp = (char *)user_msgp + sizeof(msghdr->msg_type); + + /* + * Validate the message type + */ + + if (msghdr->msg_type < 1) { + msg_freehdr(msghdr); + msqptr->msg_perm.mode &= ~MSG_LOCKED; + wakeup((caddr_t)msqptr); +#ifdef MSG_DEBUG_OK + printf("mtype (%d) < 1\n", msghdr->msg_type); +#endif + return(EINVAL); + } + + /* + * Copy in the message body + */ + + next = msghdr->msg_spot; + while (msgsz > 0) { + size_t tlen; + if (msgsz > msginfo.msgssz) + tlen = msginfo.msgssz; + else + tlen = msgsz; + if (next <= -1) + panic("next too low #2"); + if (next >= msginfo.msgseg) + panic("next out of range #2"); + if ((eval = copyin(user_msgp, &msgpool[next * msginfo.msgssz], + tlen)) != 0) { +#ifdef MSG_DEBUG_OK + printf("error %d copying in message segment\n", eval); +#endif + msg_freehdr(msghdr); + msqptr->msg_perm.mode &= ~MSG_LOCKED; + wakeup((caddr_t)msqptr); + return(eval); + } + msgsz -= tlen; + user_msgp = (char *)user_msgp + tlen; + next = msgmaps[next].next; + } + if (next != -1) + panic("didn't use all the msg segments"); + + /* + * We've got the message. Unlock the msqid_ds. + */ + + msqptr->msg_perm.mode &= ~MSG_LOCKED; + + /* + * Make sure that the msqid_ds is still allocated. + */ + + if (msqptr->msg_qbytes == 0) { + msg_freehdr(msghdr); + wakeup((caddr_t)msqptr); + /* The SVID says to return EIDRM. */ +#ifdef EIDRM + return(EIDRM); +#else + /* Unfortunately, BSD doesn't define that code yet! */ + return(EINVAL); +#endif + } + + /* + * Put the message into the queue + */ + + if (msqptr->msg_first == NULL) { + msqptr->msg_first = msghdr; + msqptr->msg_last = msghdr; + } else { + msqptr->msg_last->msg_next = msghdr; + msqptr->msg_last = msghdr; + } + msqptr->msg_last->msg_next = NULL; + + msqptr->msg_cbytes += msghdr->msg_ts; + msqptr->msg_qnum++; + msqptr->msg_lspid = p->p_pid; + msqptr->msg_stime = time_second; + + wakeup((caddr_t)msqptr); + p->p_retval[0] = 0; + return(0); +} + +#ifndef _SYS_SYSPROTO_H_ +struct msgrcv_args { + int msqid; + void *msgp; + size_t msgsz; + long msgtyp; + int msgflg; +}; +#endif + +int +msgrcv(p, uap) + struct proc *p; + register struct msgrcv_args *uap; +{ + int msqid = uap->msqid; + void *user_msgp = uap->msgp; + size_t msgsz = uap->msgsz; + long msgtyp = uap->msgtyp; + int msgflg = uap->msgflg; + size_t len; + struct ucred *cred = p->p_ucred; + register struct msqid_ds *msqptr; + register struct msg *msghdr; + int eval; + short next; + +#ifdef MSG_DEBUG_OK + printf("call to msgrcv(%d, 0x%x, %d, %ld, %d)\n", msqid, user_msgp, + msgsz, msgtyp, msgflg); +#endif + + msqid = IPCID_TO_IX(msqid); + + if (msqid < 0 || msqid >= msginfo.msgmni) { +#ifdef MSG_DEBUG_OK + printf("msqid (%d) out of range (0<=msqid<%d)\n", msqid, + msginfo.msgmni); +#endif + return(EINVAL); + } + + msqptr = &msqids[msqid]; + if (msqptr->msg_qbytes == 0) { +#ifdef MSG_DEBUG_OK + printf("no such message queue id\n"); +#endif + return(EINVAL); + } + if (msqptr->msg_perm.seq != IPCID_TO_SEQ(uap->msqid)) { +#ifdef MSG_DEBUG_OK + printf("wrong sequence number\n"); +#endif + return(EINVAL); + } + + if ((eval = ipcperm(cred, &msqptr->msg_perm, IPC_R))) { +#ifdef MSG_DEBUG_OK + printf("requester doesn't have read access\n"); +#endif + return(eval); + } + + msghdr = NULL; + while (msghdr == NULL) { + if (msgtyp == 0) { + msghdr = msqptr->msg_first; + if (msghdr != NULL) { + if (msgsz < msghdr->msg_ts && + (msgflg & MSG_NOERROR) == 0) { +#ifdef MSG_DEBUG_OK + printf("first message on the queue is too big (want %d, got %d)\n", + msgsz, msghdr->msg_ts); +#endif + return(E2BIG); + } + if (msqptr->msg_first == msqptr->msg_last) { + msqptr->msg_first = NULL; + msqptr->msg_last = NULL; + } else { + msqptr->msg_first = msghdr->msg_next; + if (msqptr->msg_first == NULL) + panic("msg_first/last messed up #1"); + } + } + } else { + struct msg *previous; + struct msg **prev; + + previous = NULL; + prev = &(msqptr->msg_first); + while ((msghdr = *prev) != NULL) { + /* + * Is this message's type an exact match or is + * this message's type less than or equal to + * the absolute value of a negative msgtyp? + * Note that the second half of this test can + * NEVER be true if msgtyp is positive since + * msg_type is always positive! + */ + + if (msgtyp == msghdr->msg_type || + msghdr->msg_type <= -msgtyp) { +#ifdef MSG_DEBUG_OK + printf("found message type %d, requested %d\n", + msghdr->msg_type, msgtyp); +#endif + if (msgsz < msghdr->msg_ts && + (msgflg & MSG_NOERROR) == 0) { +#ifdef MSG_DEBUG_OK + printf("requested message on the queue is too big (want %d, got %d)\n", + msgsz, msghdr->msg_ts); +#endif + return(E2BIG); + } + *prev = msghdr->msg_next; + if (msghdr == msqptr->msg_last) { + if (previous == NULL) { + if (prev != + &msqptr->msg_first) + panic("msg_first/last messed up #2"); + msqptr->msg_first = + NULL; + msqptr->msg_last = + NULL; + } else { + if (prev == + &msqptr->msg_first) + panic("msg_first/last messed up #3"); + msqptr->msg_last = + previous; + } + } + break; + } + previous = msghdr; + prev = &(msghdr->msg_next); + } + } + + /* + * We've either extracted the msghdr for the appropriate + * message or there isn't one. + * If there is one then bail out of this loop. + */ + + if (msghdr != NULL) + break; + + /* + * Hmph! No message found. Does the user want to wait? + */ + + if ((msgflg & IPC_NOWAIT) != 0) { +#ifdef MSG_DEBUG_OK + printf("no appropriate message found (msgtyp=%d)\n", + msgtyp); +#endif + /* The SVID says to return ENOMSG. */ +#ifdef ENOMSG + return(ENOMSG); +#else + /* Unfortunately, BSD doesn't define that code yet! */ + return(EAGAIN); +#endif + } + + /* + * Wait for something to happen + */ + +#ifdef MSG_DEBUG_OK + printf("msgrcv: goodnight\n"); +#endif + eval = tsleep((caddr_t)msqptr, (PZERO - 4) | PCATCH, "msgwait", + 0); +#ifdef MSG_DEBUG_OK + printf("msgrcv: good morning (eval=%d)\n", eval); +#endif + + if (eval != 0) { +#ifdef MSG_DEBUG_OK + printf("msgsnd: interrupted system call\n"); +#endif + return(EINTR); + } + + /* + * Make sure that the msq queue still exists + */ + + if (msqptr->msg_qbytes == 0 || + msqptr->msg_perm.seq != IPCID_TO_SEQ(uap->msqid)) { +#ifdef MSG_DEBUG_OK + printf("msqid deleted\n"); +#endif + /* The SVID says to return EIDRM. */ +#ifdef EIDRM + return(EIDRM); +#else + /* Unfortunately, BSD doesn't define that code yet! */ + return(EINVAL); +#endif + } + } + + /* + * Return the message to the user. + * + * First, do the bookkeeping (before we risk being interrupted). + */ + + msqptr->msg_cbytes -= msghdr->msg_ts; + msqptr->msg_qnum--; + msqptr->msg_lrpid = p->p_pid; + msqptr->msg_rtime = time_second; + + /* + * Make msgsz the actual amount that we'll be returning. + * Note that this effectively truncates the message if it is too long + * (since msgsz is never increased). + */ + +#ifdef MSG_DEBUG_OK + printf("found a message, msgsz=%d, msg_ts=%d\n", msgsz, + msghdr->msg_ts); +#endif + if (msgsz > msghdr->msg_ts) + msgsz = msghdr->msg_ts; + + /* + * Return the type to the user. + */ + + eval = copyout((caddr_t)&(msghdr->msg_type), user_msgp, + sizeof(msghdr->msg_type)); + if (eval != 0) { +#ifdef MSG_DEBUG_OK + printf("error (%d) copying out message type\n", eval); +#endif + msg_freehdr(msghdr); + wakeup((caddr_t)msqptr); + return(eval); + } + user_msgp = (char *)user_msgp + sizeof(msghdr->msg_type); + + /* + * Return the segments to the user + */ + + next = msghdr->msg_spot; + for (len = 0; len < msgsz; len += msginfo.msgssz) { + size_t tlen; + + if (msgsz > msginfo.msgssz) + tlen = msginfo.msgssz; + else + tlen = msgsz; + if (next <= -1) + panic("next too low #3"); + if (next >= msginfo.msgseg) + panic("next out of range #3"); + eval = copyout((caddr_t)&msgpool[next * msginfo.msgssz], + user_msgp, tlen); + if (eval != 0) { +#ifdef MSG_DEBUG_OK + printf("error (%d) copying out message segment\n", + eval); +#endif + msg_freehdr(msghdr); + wakeup((caddr_t)msqptr); + return(eval); + } + user_msgp = (char *)user_msgp + tlen; + next = msgmaps[next].next; + } + + /* + * Done, return the actual number of bytes copied out. + */ + + msg_freehdr(msghdr); + wakeup((caddr_t)msqptr); + p->p_retval[0] = msgsz; + return(0); +} diff --git a/bsd/kern/sysv_sem.c b/bsd/kern/sysv_sem.c new file mode 100644 index 000000000..b49976487 --- /dev/null +++ b/bsd/kern/sysv_sem.c @@ -0,0 +1,996 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Implementation of SVID semaphores + * + * Author: Daniel Boulet + * + * This software is provided ``AS IS'' without any warranties of any kind. + */ + +#include +#include +#include +#include +#include +#include +#include + +static void seminit __P((void *)); +SYSINIT(sysv_sem, SI_SUB_SYSV_SEM, SI_ORDER_FIRST, seminit, NULL) + +#ifndef _SYS_SYSPROTO_H_ +struct __semctl_args; +int __semctl __P((struct proc *p, struct __semctl_args *uap)); +struct semget_args; +int semget __P((struct proc *p, struct semget_args *uap)); +struct semop_args; +int semop __P((struct proc *p, struct semop_args *uap)); +struct semconfig_args; +int semconfig __P((struct proc *p, struct semconfig_args *uap)); +#endif + +static struct sem_undo *semu_alloc __P((struct proc *p)); +static int semundo_adjust __P((struct proc *p, struct sem_undo **supptr, + int semid, int semnum, int adjval)); +static void semundo_clear __P((int semid, int semnum)); + +/* XXX casting to (sy_call_t *) is bogus, as usual. */ +static sy_call_t *semcalls[] = { + (sy_call_t *)__semctl, (sy_call_t *)semget, + (sy_call_t *)semop, (sy_call_t *)semconfig +}; + +static int semtot = 0; +struct semid_ds *sema; /* semaphore id pool */ +struct sem *sem; /* semaphore pool */ +static struct sem_undo *semu_list; /* list of active undo structures */ +int *semu; /* undo structure pool */ + +static struct proc *semlock_holder = NULL; + +void +seminit(dummy) + void *dummy; +{ + register int i; + + if (sema == NULL) + panic("sema is NULL"); + if (semu == NULL) + panic("semu is NULL"); + + for (i = 0; i < seminfo.semmni; i++) { + sema[i].sem_base = 0; + sema[i].sem_perm.mode = 0; + } + for (i = 0; i < seminfo.semmnu; i++) { + register struct sem_undo *suptr = SEMU(i); + suptr->un_proc = NULL; + } + semu_list = NULL; +} + +/* + * Entry point for all SEM calls + */ +int +semsys(p, uap) + struct proc *p; + /* XXX actually varargs. */ + struct semsys_args /* { + u_int which; + int a2; + int a3; + int a4; + int a5; + } */ *uap; +{ + + while (semlock_holder != NULL && semlock_holder != p) + (void) tsleep((caddr_t)&semlock_holder, (PZERO - 4), "semsys", 0); + + if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0])) + return (EINVAL); + return ((*semcalls[uap->which])(p, &uap->a2)); +} + +/* + * Lock or unlock the entire semaphore facility. + * + * This will probably eventually evolve into a general purpose semaphore + * facility status enquiry mechanism (I don't like the "read /dev/kmem" + * approach currently taken by ipcs and the amount of info that we want + * to be able to extract for ipcs is probably beyond what the capability + * of the getkerninfo facility. + * + * At the time that the current version of semconfig was written, ipcs is + * the only user of the semconfig facility. It uses it to ensure that the + * semaphore facility data structures remain static while it fishes around + * in /dev/kmem. + */ + +#ifndef _SYS_SYSPROTO_H_ +struct semconfig_args { + semconfig_ctl_t flag; +}; +#endif + +int +semconfig(p, uap) + struct proc *p; + struct semconfig_args *uap; +{ + int eval = 0; + + switch (uap->flag) { + case SEM_CONFIG_FREEZE: + semlock_holder = p; + break; + + case SEM_CONFIG_THAW: + semlock_holder = NULL; + wakeup((caddr_t)&semlock_holder); + break; + + default: + printf("semconfig: unknown flag parameter value (%d) - ignored\n", + uap->flag); + eval = EINVAL; + break; + } + + p->p_retval[0] = 0; + return(eval); +} + +/* + * Allocate a new sem_undo structure for a process + * (returns ptr to structure or NULL if no more room) + */ + +static struct sem_undo * +semu_alloc(p) + struct proc *p; +{ + register int i; + register struct sem_undo *suptr; + register struct sem_undo **supptr; + int attempt; + + /* + * Try twice to allocate something. + * (we'll purge any empty structures after the first pass so + * two passes are always enough) + */ + + for (attempt = 0; attempt < 2; attempt++) { + /* + * Look for a free structure. + * Fill it in and return it if we find one. + */ + + for (i = 0; i < seminfo.semmnu; i++) { + suptr = SEMU(i); + if (suptr->un_proc == NULL) { + suptr->un_next = semu_list; + semu_list = suptr; + suptr->un_cnt = 0; + suptr->un_proc = p; + return(suptr); + } + } + + /* + * We didn't find a free one, if this is the first attempt + * then try to free some structures. + */ + + if (attempt == 0) { + /* All the structures are in use - try to free some */ + int did_something = 0; + + supptr = &semu_list; + while ((suptr = *supptr) != NULL) { + if (suptr->un_cnt == 0) { + suptr->un_proc = NULL; + *supptr = suptr->un_next; + did_something = 1; + } else + supptr = &(suptr->un_next); + } + + /* If we didn't free anything then just give-up */ + if (!did_something) + return(NULL); + } else { + /* + * The second pass failed even though we freed + * something after the first pass! + * This is IMPOSSIBLE! + */ + panic("semu_alloc - second attempt failed"); + } + } + return (NULL); +} + +/* + * Adjust a particular entry for a particular proc + */ + +static int +semundo_adjust(p, supptr, semid, semnum, adjval) + register struct proc *p; + struct sem_undo **supptr; + int semid, semnum; + int adjval; +{ + register struct sem_undo *suptr; + register struct undo *sunptr; + int i; + + /* Look for and remember the sem_undo if the caller doesn't provide + it */ + + suptr = *supptr; + if (suptr == NULL) { + for (suptr = semu_list; suptr != NULL; + suptr = suptr->un_next) { + if (suptr->un_proc == p) { + *supptr = suptr; + break; + } + } + if (suptr == NULL) { + if (adjval == 0) + return(0); + suptr = semu_alloc(p); + if (suptr == NULL) + return(ENOSPC); + *supptr = suptr; + } + } + + /* + * Look for the requested entry and adjust it (delete if adjval becomes + * 0). + */ + sunptr = &suptr->un_ent[0]; + for (i = 0; i < suptr->un_cnt; i++, sunptr++) { + if (sunptr->un_id != semid || sunptr->un_num != semnum) + continue; + if (adjval == 0) + sunptr->un_adjval = 0; + else + sunptr->un_adjval += adjval; + if (sunptr->un_adjval == 0) { + suptr->un_cnt--; + if (i < suptr->un_cnt) + suptr->un_ent[i] = + suptr->un_ent[suptr->un_cnt]; + } + return(0); + } + + /* Didn't find the right entry - create it */ + if (adjval == 0) + return(0); + if (suptr->un_cnt != seminfo.semume) { + sunptr = &suptr->un_ent[suptr->un_cnt]; + suptr->un_cnt++; + sunptr->un_adjval = adjval; + sunptr->un_id = semid; sunptr->un_num = semnum; + } else + return(EINVAL); + return(0); +} + +static void +semundo_clear(semid, semnum) + int semid, semnum; +{ + register struct sem_undo *suptr; + + for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next) { + register struct undo *sunptr = &suptr->un_ent[0]; + register int i = 0; + + while (i < suptr->un_cnt) { + if (sunptr->un_id == semid) { + if (semnum == -1 || sunptr->un_num == semnum) { + suptr->un_cnt--; + if (i < suptr->un_cnt) { + suptr->un_ent[i] = + suptr->un_ent[suptr->un_cnt]; + continue; + } + } + if (semnum != -1) + break; + } + i++, sunptr++; + } + } +} + +/* + * Note that the user-mode half of this passes a union, not a pointer + */ +#ifndef _SYS_SYSPROTO_H_ +struct __semctl_args { + int semid; + int semnum; + int cmd; + union semun *arg; +}; +#endif + +int +__semctl(p, uap) + struct proc *p; + register struct __semctl_args *uap; +{ + int semid = uap->semid; + int semnum = uap->semnum; + int cmd = uap->cmd; + union semun *arg = uap->arg; + union semun real_arg; + struct ucred *cred = p->p_ucred; + int i, rval, eval; + struct semid_ds sbuf; + register struct semid_ds *semaptr; + +#ifdef SEM_DEBUG + printf("call to semctl(%d, %d, %d, 0x%x)\n", semid, semnum, cmd, arg); +#endif + + semid = IPCID_TO_IX(semid); + if (semid < 0 || semid >= seminfo.semmsl) + return(EINVAL); + + semaptr = &sema[semid]; + if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || + semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) + return(EINVAL); + + eval = 0; + rval = 0; + + switch (cmd) { + case IPC_RMID: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_M))) + return(eval); + semaptr->sem_perm.cuid = cred->cr_uid; + semaptr->sem_perm.uid = cred->cr_uid; + semtot -= semaptr->sem_nsems; + for (i = semaptr->sem_base - sem; i < semtot; i++) + sem[i] = sem[i + semaptr->sem_nsems]; + for (i = 0; i < seminfo.semmni; i++) { + if ((sema[i].sem_perm.mode & SEM_ALLOC) && + sema[i].sem_base > semaptr->sem_base) + sema[i].sem_base -= semaptr->sem_nsems; + } + semaptr->sem_perm.mode = 0; + semundo_clear(semid, -1); + wakeup((caddr_t)semaptr); + break; + + case IPC_SET: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_M))) + return(eval); + if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0) + return(eval); + if ((eval = copyin(real_arg.buf, (caddr_t)&sbuf, + sizeof(sbuf))) != 0) + return(eval); + semaptr->sem_perm.uid = sbuf.sem_perm.uid; + semaptr->sem_perm.gid = sbuf.sem_perm.gid; + semaptr->sem_perm.mode = (semaptr->sem_perm.mode & ~0777) | + (sbuf.sem_perm.mode & 0777); + semaptr->sem_ctime = time_second; + break; + + case IPC_STAT: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R))) + return(eval); + if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0) + return(eval); + eval = copyout((caddr_t)semaptr, real_arg.buf, + sizeof(struct semid_ds)); + break; + + case GETNCNT: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R))) + return(eval); + if (semnum < 0 || semnum >= semaptr->sem_nsems) + return(EINVAL); + rval = semaptr->sem_base[semnum].semncnt; + break; + + case GETPID: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R))) + return(eval); + if (semnum < 0 || semnum >= semaptr->sem_nsems) + return(EINVAL); + rval = semaptr->sem_base[semnum].sempid; + break; + + case GETVAL: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R))) + return(eval); + if (semnum < 0 || semnum >= semaptr->sem_nsems) + return(EINVAL); + rval = semaptr->sem_base[semnum].semval; + break; + + case GETALL: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R))) + return(eval); + if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0) + return(eval); + for (i = 0; i < semaptr->sem_nsems; i++) { + eval = copyout((caddr_t)&semaptr->sem_base[i].semval, + &real_arg.array[i], sizeof(real_arg.array[0])); + if (eval != 0) + break; + } + break; + + case GETZCNT: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_R))) + return(eval); + if (semnum < 0 || semnum >= semaptr->sem_nsems) + return(EINVAL); + rval = semaptr->sem_base[semnum].semzcnt; + break; + + case SETVAL: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W))) + return(eval); + if (semnum < 0 || semnum >= semaptr->sem_nsems) + return(EINVAL); + if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0) + return(eval); + semaptr->sem_base[semnum].semval = real_arg.val; + semundo_clear(semid, semnum); + wakeup((caddr_t)semaptr); + break; + + case SETALL: + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W))) + return(eval); + if ((eval = copyin(arg, &real_arg, sizeof(real_arg))) != 0) + return(eval); + for (i = 0; i < semaptr->sem_nsems; i++) { + eval = copyin(&real_arg.array[i], + (caddr_t)&semaptr->sem_base[i].semval, + sizeof(real_arg.array[0])); + if (eval != 0) + break; + } + semundo_clear(semid, -1); + wakeup((caddr_t)semaptr); + break; + + default: + return(EINVAL); + } + + if (eval == 0) + p->p_retval[0] = rval; + return(eval); +} + +#ifndef _SYS_SYSPROTO_H_ +struct semget_args { + key_t key; + int nsems; + int semflg; +}; +#endif + +int +semget(p, uap) + struct proc *p; + register struct semget_args *uap; +{ + int semid, eval; + int key = uap->key; + int nsems = uap->nsems; + int semflg = uap->semflg; + struct ucred *cred = p->p_ucred; + +#ifdef SEM_DEBUG + printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg); +#endif + + if (key != IPC_PRIVATE) { + for (semid = 0; semid < seminfo.semmni; semid++) { + if ((sema[semid].sem_perm.mode & SEM_ALLOC) && + sema[semid].sem_perm.key == key) + break; + } + if (semid < seminfo.semmni) { +#ifdef SEM_DEBUG + printf("found public key\n"); +#endif + if ((eval = ipcperm(cred, &sema[semid].sem_perm, + semflg & 0700))) + return(eval); + if (nsems > 0 && sema[semid].sem_nsems < nsems) { +#ifdef SEM_DEBUG + printf("too small\n"); +#endif + return(EINVAL); + } + if ((semflg & IPC_CREAT) && (semflg & IPC_EXCL)) { +#ifdef SEM_DEBUG + printf("not exclusive\n"); +#endif + return(EEXIST); + } + goto found; + } + } + +#ifdef SEM_DEBUG + printf("need to allocate the semid_ds\n"); +#endif + if (key == IPC_PRIVATE || (semflg & IPC_CREAT)) { + if (nsems <= 0 || nsems > seminfo.semmsl) { +#ifdef SEM_DEBUG + printf("nsems out of range (0<%d<=%d)\n", nsems, + seminfo.semmsl); +#endif + return(EINVAL); + } + if (nsems > seminfo.semmns - semtot) { +#ifdef SEM_DEBUG + printf("not enough semaphores left (need %d, got %d)\n", + nsems, seminfo.semmns - semtot); +#endif + return(ENOSPC); + } + for (semid = 0; semid < seminfo.semmni; semid++) { + if ((sema[semid].sem_perm.mode & SEM_ALLOC) == 0) + break; + } + if (semid == seminfo.semmni) { +#ifdef SEM_DEBUG + printf("no more semid_ds's available\n"); +#endif + return(ENOSPC); + } +#ifdef SEM_DEBUG + printf("semid %d is available\n", semid); +#endif + sema[semid].sem_perm.key = key; + sema[semid].sem_perm.cuid = cred->cr_uid; + sema[semid].sem_perm.uid = cred->cr_uid; + sema[semid].sem_perm.cgid = cred->cr_gid; + sema[semid].sem_perm.gid = cred->cr_gid; + sema[semid].sem_perm.mode = (semflg & 0777) | SEM_ALLOC; + sema[semid].sem_perm.seq = + (sema[semid].sem_perm.seq + 1) & 0x7fff; + sema[semid].sem_nsems = nsems; + sema[semid].sem_otime = 0; + sema[semid].sem_ctime = time_second; + sema[semid].sem_base = &sem[semtot]; + semtot += nsems; + bzero(sema[semid].sem_base, + sizeof(sema[semid].sem_base[0])*nsems); +#ifdef SEM_DEBUG + printf("sembase = 0x%x, next = 0x%x\n", sema[semid].sem_base, + &sem[semtot]); +#endif + } else { +#ifdef SEM_DEBUG + printf("didn't find it and wasn't asked to create it\n"); +#endif + return(ENOENT); + } + +found: + p->p_retval[0] = IXSEQ_TO_IPCID(semid, sema[semid].sem_perm); + return(0); +} + +#ifndef _SYS_SYSPROTO_H_ +struct semop_args { + int semid; + struct sembuf *sops; + int nsops; +}; +#endif + +int +semop(p, uap) + struct proc *p; + register struct semop_args *uap; +{ + int semid = uap->semid; + int nsops = uap->nsops; + struct sembuf sops[MAX_SOPS]; + register struct semid_ds *semaptr; + register struct sembuf *sopptr; + register struct sem *semptr; + struct sem_undo *suptr = NULL; + struct ucred *cred = p->p_ucred; + int i, j, eval; + int do_wakeup, do_undos; + +#ifdef SEM_DEBUG + printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops); +#endif + + semid = IPCID_TO_IX(semid); /* Convert back to zero origin */ + + if (semid < 0 || semid >= seminfo.semmsl) + return(EINVAL); + + semaptr = &sema[semid]; + if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0) + return(EINVAL); + if (semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) + return(EINVAL); + + if ((eval = ipcperm(cred, &semaptr->sem_perm, IPC_W))) { +#ifdef SEM_DEBUG + printf("eval = %d from ipaccess\n", eval); +#endif + return(eval); + } + + if (nsops > MAX_SOPS) { +#ifdef SEM_DEBUG + printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops); +#endif + return(E2BIG); + } + + if ((eval = copyin(uap->sops, &sops, nsops * sizeof(sops[0]))) != 0) { +#ifdef SEM_DEBUG + printf("eval = %d from copyin(%08x, %08x, %d)\n", eval, + uap->sops, &sops, nsops * sizeof(sops[0])); +#endif + return(eval); + } + + /* + * Loop trying to satisfy the vector of requests. + * If we reach a point where we must wait, any requests already + * performed are rolled back and we go to sleep until some other + * process wakes us up. At this point, we start all over again. + * + * This ensures that from the perspective of other tasks, a set + * of requests is atomic (never partially satisfied). + */ + do_undos = 0; + + for (;;) { + do_wakeup = 0; + + for (i = 0; i < nsops; i++) { + sopptr = &sops[i]; + + if (sopptr->sem_num >= semaptr->sem_nsems) + return(EFBIG); + + semptr = &semaptr->sem_base[sopptr->sem_num]; + +#ifdef SEM_DEBUG + printf("semop: semaptr=%x, sem_base=%x, semptr=%x, sem[%d]=%d : op=%d, flag=%s\n", + semaptr, semaptr->sem_base, semptr, + sopptr->sem_num, semptr->semval, sopptr->sem_op, + (sopptr->sem_flg & IPC_NOWAIT) ? "nowait" : "wait"); +#endif + + if (sopptr->sem_op < 0) { + if (semptr->semval + sopptr->sem_op < 0) { +#ifdef SEM_DEBUG + printf("semop: can't do it now\n"); +#endif + break; + } else { + semptr->semval += sopptr->sem_op; + if (semptr->semval == 0 && + semptr->semzcnt > 0) + do_wakeup = 1; + } + if (sopptr->sem_flg & SEM_UNDO) + do_undos = 1; + } else if (sopptr->sem_op == 0) { + if (semptr->semval > 0) { +#ifdef SEM_DEBUG + printf("semop: not zero now\n"); +#endif + break; + } + } else { + if (semptr->semncnt > 0) + do_wakeup = 1; + semptr->semval += sopptr->sem_op; + if (sopptr->sem_flg & SEM_UNDO) + do_undos = 1; + } + } + + /* + * Did we get through the entire vector? + */ + if (i >= nsops) + goto done; + + /* + * No ... rollback anything that we've already done + */ +#ifdef SEM_DEBUG + printf("semop: rollback 0 through %d\n", i-1); +#endif + for (j = 0; j < i; j++) + semaptr->sem_base[sops[j].sem_num].semval -= + sops[j].sem_op; + + /* + * If the request that we couldn't satisfy has the + * NOWAIT flag set then return with EAGAIN. + */ + if (sopptr->sem_flg & IPC_NOWAIT) + return(EAGAIN); + + if (sopptr->sem_op == 0) + semptr->semzcnt++; + else + semptr->semncnt++; + +#ifdef SEM_DEBUG + printf("semop: good night!\n"); +#endif + eval = tsleep((caddr_t)semaptr, (PZERO - 4) | PCATCH, + "semwait", 0); +#ifdef SEM_DEBUG + printf("semop: good morning (eval=%d)!\n", eval); +#endif + + suptr = NULL; /* sem_undo may have been reallocated */ + + if (eval != 0) + return(EINTR); +#ifdef SEM_DEBUG + printf("semop: good morning!\n"); +#endif + + /* + * Make sure that the semaphore still exists + */ + if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0 || + semaptr->sem_perm.seq != IPCID_TO_SEQ(uap->semid)) { + /* The man page says to return EIDRM. */ + /* Unfortunately, BSD doesn't define that code! */ +#ifdef EIDRM + return(EIDRM); +#else + return(EINVAL); +#endif + } + + /* + * The semaphore is still alive. Readjust the count of + * waiting processes. + */ + if (sopptr->sem_op == 0) + semptr->semzcnt--; + else + semptr->semncnt--; + } + +done: + /* + * Process any SEM_UNDO requests. + */ + if (do_undos) { + for (i = 0; i < nsops; i++) { + /* + * We only need to deal with SEM_UNDO's for non-zero + * op's. + */ + int adjval; + + if ((sops[i].sem_flg & SEM_UNDO) == 0) + continue; + adjval = sops[i].sem_op; + if (adjval == 0) + continue; + eval = semundo_adjust(p, &suptr, semid, + sops[i].sem_num, -adjval); + if (eval == 0) + continue; + + /* + * Oh-Oh! We ran out of either sem_undo's or undo's. + * Rollback the adjustments to this point and then + * rollback the semaphore ups and down so we can return + * with an error with all structures restored. We + * rollback the undo's in the exact reverse order that + * we applied them. This guarantees that we won't run + * out of space as we roll things back out. + */ + for (j = i - 1; j >= 0; j--) { + if ((sops[j].sem_flg & SEM_UNDO) == 0) + continue; + adjval = sops[j].sem_op; + if (adjval == 0) + continue; + if (semundo_adjust(p, &suptr, semid, + sops[j].sem_num, adjval) != 0) + panic("semop - can't undo undos"); + } + + for (j = 0; j < nsops; j++) + semaptr->sem_base[sops[j].sem_num].semval -= + sops[j].sem_op; + +#ifdef SEM_DEBUG + printf("eval = %d from semundo_adjust\n", eval); +#endif + return(eval); + } /* loop through the sops */ + } /* if (do_undos) */ + + /* We're definitely done - set the sempid's */ + for (i = 0; i < nsops; i++) { + sopptr = &sops[i]; + semptr = &semaptr->sem_base[sopptr->sem_num]; + semptr->sempid = p->p_pid; + } + + /* Do a wakeup if any semaphore was up'd. */ + if (do_wakeup) { +#ifdef SEM_DEBUG + printf("semop: doing wakeup\n"); +#ifdef SEM_WAKEUP + sem_wakeup((caddr_t)semaptr); +#else + wakeup((caddr_t)semaptr); +#endif + printf("semop: back from wakeup\n"); +#else + wakeup((caddr_t)semaptr); +#endif + } +#ifdef SEM_DEBUG + printf("semop: done\n"); +#endif + p->p_retval[0] = 0; + return(0); +} + +/* + * Go through the undo structures for this process and apply the adjustments to + * semaphores. + */ +void +semexit(p) + struct proc *p; +{ + register struct sem_undo *suptr; + register struct sem_undo **supptr; + int did_something; + + /* + * If somebody else is holding the global semaphore facility lock + * then sleep until it is released. + */ + while (semlock_holder != NULL && semlock_holder != p) { +#ifdef SEM_DEBUG + printf("semaphore facility locked - sleeping ...\n"); +#endif + (void) tsleep((caddr_t)&semlock_holder, (PZERO - 4), "semext", 0); + } + + did_something = 0; + + /* + * Go through the chain of undo vectors looking for one + * associated with this process. + */ + + for (supptr = &semu_list; (suptr = *supptr) != NULL; + supptr = &suptr->un_next) { + if (suptr->un_proc == p) + break; + } + + if (suptr == NULL) + goto unlock; + +#ifdef SEM_DEBUG + printf("proc @%08x has undo structure with %d entries\n", p, + suptr->un_cnt); +#endif + + /* + * If there are any active undo elements then process them. + */ + if (suptr->un_cnt > 0) { + int ix; + + for (ix = 0; ix < suptr->un_cnt; ix++) { + int semid = suptr->un_ent[ix].un_id; + int semnum = suptr->un_ent[ix].un_num; + int adjval = suptr->un_ent[ix].un_adjval; + struct semid_ds *semaptr; + + semaptr = &sema[semid]; + if ((semaptr->sem_perm.mode & SEM_ALLOC) == 0) + panic("semexit - semid not allocated"); + if (semnum >= semaptr->sem_nsems) + panic("semexit - semnum out of range"); + +#ifdef SEM_DEBUG + printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n", + suptr->un_proc, suptr->un_ent[ix].un_id, + suptr->un_ent[ix].un_num, + suptr->un_ent[ix].un_adjval, + semaptr->sem_base[semnum].semval); +#endif + + if (adjval < 0) { + if (semaptr->sem_base[semnum].semval < -adjval) + semaptr->sem_base[semnum].semval = 0; + else + semaptr->sem_base[semnum].semval += + adjval; + } else + semaptr->sem_base[semnum].semval += adjval; + +#ifdef SEM_WAKEUP + sem_wakeup((caddr_t)semaptr); +#else + wakeup((caddr_t)semaptr); +#endif +#ifdef SEM_DEBUG + printf("semexit: back from wakeup\n"); +#endif + } + } + + /* + * Deallocate the undo vector. + */ +#ifdef SEM_DEBUG + printf("removing vector\n"); +#endif + suptr->un_proc = NULL; + *supptr = suptr->un_next; + +unlock: + /* + * If the exiting process is holding the global semaphore facility + * lock then release it. + */ + if (semlock_holder == p) { + semlock_holder = NULL; + wakeup((caddr_t)&semlock_holder); + } +} diff --git a/bsd/kern/sysv_shm.c b/bsd/kern/sysv_shm.c new file mode 100644 index 000000000..dc25f2efc --- /dev/null +++ b/bsd/kern/sysv_shm.c @@ -0,0 +1,670 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ + +/* + * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Adam Glass and Charles + * Hannum. + * 4. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct shmat_args; +extern int shmat __P((struct proc *p, struct shmat_args *uap, int *retval)); +struct shmctl_args; +extern int shmctl __P((struct proc *p, struct shmctl_args *uap, int *retval)); +struct shmdt_args; +extern int shmdt __P((struct proc *p, struct shmdt_args *uap, int *retval)); +struct shmget_args; +extern int shmget __P((struct proc *p, struct shmget_args *uap, int *retval)); + +#if 0 +static void shminit __P((void *)); +SYSINIT(sysv_shm, SI_SUB_SYSV_SHM, SI_ORDER_FIRST, shminit, NULL) +#endif 0 + +struct oshmctl_args; +static int oshmctl __P((struct proc *p, struct oshmctl_args *uap, int * retval)); +static int shmget_allocate_segment __P((struct proc *p, struct shmget_args *uap, int mode, int * retval)); +static int shmget_existing __P((struct proc *p, struct shmget_args *uap, int mode, int segnum, int * retval)); + +typedef int sy_call_t __P((struct proc *, void *, int *)); + +/* XXX casting to (sy_call_t *) is bogus, as usual. */ +static sy_call_t *shmcalls[] = { + (sy_call_t *)shmat, (sy_call_t *)oshmctl, + (sy_call_t *)shmdt, (sy_call_t *)shmget, + (sy_call_t *)shmctl +}; + +#define SHMSEG_FREE 0x0200 +#define SHMSEG_REMOVED 0x0400 +#define SHMSEG_ALLOCATED 0x0800 +#define SHMSEG_WANTED 0x1000 + +static int shm_last_free, shm_nused, shm_committed; +struct shmid_ds *shmsegs; + +struct shm_handle { + /* vm_offset_t kva; */ + void * shm_object; +}; + +struct shmmap_state { + vm_offset_t va; + int shmid; +}; + +static void shm_deallocate_segment __P((struct shmid_ds *)); +static int shm_find_segment_by_key __P((key_t)); +static struct shmid_ds *shm_find_segment_by_shmid __P((int)); +static int shm_delete_mapping __P((struct proc *, struct shmmap_state *)); + +static int +shm_find_segment_by_key(key) + key_t key; +{ + int i; + + for (i = 0; i < shminfo.shmmni; i++) + if ((shmsegs[i].shm_perm.mode & SHMSEG_ALLOCATED) && + shmsegs[i].shm_perm.key == key) + return i; + return -1; +} + +static struct shmid_ds * +shm_find_segment_by_shmid(shmid) + int shmid; +{ + int segnum; + struct shmid_ds *shmseg; + + segnum = IPCID_TO_IX(shmid); + if (segnum < 0 || segnum >= shminfo.shmmni) + return NULL; + shmseg = &shmsegs[segnum]; + if ((shmseg->shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) + != SHMSEG_ALLOCATED || + shmseg->shm_perm.seq != IPCID_TO_SEQ(shmid)) + return NULL; + return shmseg; +} + +static void +shm_deallocate_segment(shmseg) + struct shmid_ds *shmseg; +{ + struct shm_handle *shm_handle; + struct shmmap_state *shmmap_s=NULL; + size_t size; + char * ptr; + + shm_handle = shmseg->shm_internal; + size = round_page(shmseg->shm_segsz); + mach_destroy_memory_entry(shm_handle->shm_object); + FREE((caddr_t)shm_handle, M_SHM); + shmseg->shm_internal = NULL; + shm_committed -= btoc(size); + shm_nused--; + shmseg->shm_perm.mode = SHMSEG_FREE; +} + +static int +shm_delete_mapping(p, shmmap_s) + struct proc *p; + struct shmmap_state *shmmap_s; +{ + struct shmid_ds *shmseg; + int segnum, result; + size_t size; + + segnum = IPCID_TO_IX(shmmap_s->shmid); + shmseg = &shmsegs[segnum]; + size = round_page(shmseg->shm_segsz); + result = vm_deallocate(current_map(), shmmap_s->va, size); + if (result != KERN_SUCCESS) + return EINVAL; + shmmap_s->shmid = -1; + shmseg->shm_dtime = time_second; + if ((--shmseg->shm_nattch <= 0) && + (shmseg->shm_perm.mode & SHMSEG_REMOVED)) { + shm_deallocate_segment(shmseg); + shm_last_free = segnum; + } + return 0; +} + +struct shmdt_args { + void *shmaddr; +}; + +int +shmdt(p, uap, retval) + struct proc *p; + struct shmdt_args *uap; + register_t *retval; +{ + struct shmmap_state *shmmap_s; + int i; + + shmmap_s = (struct shmmap_state *)p->vm_shm; + if (shmmap_s == NULL) + return EINVAL; + for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) + if (shmmap_s->shmid != -1 && + shmmap_s->va == (vm_offset_t)uap->shmaddr) + break; + if (i == shminfo.shmseg) + return EINVAL; + return shm_delete_mapping(p, shmmap_s); +} + +#ifndef _SYS_SYSPROTO_H_ +struct shmat_args { + int shmid; + void *shmaddr; + int shmflg; +}; +#endif + +int +shmat(p, uap, retval) + struct proc *p; + struct shmat_args *uap; + register_t *retval; +{ + int error, i, flags; + struct ucred *cred = p->p_ucred; + struct shmid_ds *shmseg; + struct shmmap_state *shmmap_s = NULL; + struct shm_handle *shm_handle; + vm_offset_t attach_va; + vm_prot_t prot; + vm_size_t size; + kern_return_t rv; + + shmmap_s = (struct shmmap_state *)p->vm_shm; + if (shmmap_s == NULL) { + size = shminfo.shmseg * sizeof(struct shmmap_state); + shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK); + for (i = 0; i < shminfo.shmseg; i++) + shmmap_s[i].shmid = -1; + p->vm_shm = (caddr_t)shmmap_s; + } + shmseg = shm_find_segment_by_shmid(uap->shmid); + if (shmseg == NULL) + return EINVAL; + error = ipcperm(cred, &shmseg->shm_perm, + (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); + if (error) + return error; + for (i = 0; i < shminfo.shmseg; i++) { + if (shmmap_s->shmid == -1) + break; + shmmap_s++; + } + if (i >= shminfo.shmseg) + return EMFILE; + size = round_page(shmseg->shm_segsz); + prot = VM_PROT_READ; + if ((uap->shmflg & SHM_RDONLY) == 0) + prot |= VM_PROT_WRITE; + flags = MAP_ANON | MAP_SHARED; + if (uap->shmaddr) { + flags |= MAP_FIXED; + if (uap->shmflg & SHM_RND) + attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1); + else if (((vm_offset_t)uap->shmaddr & (SHMLBA-1)) == 0) + attach_va = (vm_offset_t)uap->shmaddr; + else + return EINVAL; + } else { + attach_va = round_page(uap->shmaddr); + } + + shm_handle = shmseg->shm_internal; + rv = vm_map(current_map(), &attach_va, size, 0, (flags & MAP_FIXED)? FALSE: TRUE, + shm_handle->shm_object, 0, FALSE, prot, prot, VM_INHERIT_DEFAULT); + if (rv != KERN_SUCCESS) + goto out; + rv = vm_inherit(current_map(), attach_va, size, + VM_INHERIT_SHARE); + if (rv != KERN_SUCCESS) { + (void) vm_deallocate(current_map(), attach_va, size); + goto out; + } + + shmmap_s->va = attach_va; + shmmap_s->shmid = uap->shmid; + shmseg->shm_lpid = p->p_pid; + shmseg->shm_atime = time_second; + shmseg->shm_nattch++; + *retval = attach_va; + return( 0); +out: + switch (rv) { + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } + +} + +struct oshmid_ds { + struct ipc_perm shm_perm; /* operation perms */ + int shm_segsz; /* size of segment (bytes) */ + ushort shm_cpid; /* pid, creator */ + ushort shm_lpid; /* pid, last operation */ + short shm_nattch; /* no. of current attaches */ + time_t shm_atime; /* last attach time */ + time_t shm_dtime; /* last detach time */ + time_t shm_ctime; /* last change time */ + void *shm_handle; /* internal handle for shm segment */ +}; + +struct oshmctl_args { + int shmid; + int cmd; + struct oshmid_ds *ubuf; +}; + +static int +oshmctl(p, uap, retval) + struct proc *p; + struct oshmctl_args *uap; + register_t *retval; +{ +#ifdef COMPAT_43 + int error; + struct ucred *cred = p->p_ucred; + struct shmid_ds *shmseg; + struct oshmid_ds outbuf; + + shmseg = shm_find_segment_by_shmid(uap->shmid); + if (shmseg == NULL) + return EINVAL; + switch (uap->cmd) { + case IPC_STAT: + error = ipcperm(cred, &shmseg->shm_perm, IPC_R); + if (error) + return error; + outbuf.shm_perm = shmseg->shm_perm; + outbuf.shm_segsz = shmseg->shm_segsz; + outbuf.shm_cpid = shmseg->shm_cpid; + outbuf.shm_lpid = shmseg->shm_lpid; + outbuf.shm_nattch = shmseg->shm_nattch; + outbuf.shm_atime = shmseg->shm_atime; + outbuf.shm_dtime = shmseg->shm_dtime; + outbuf.shm_ctime = shmseg->shm_ctime; + outbuf.shm_handle = shmseg->shm_internal; + error = copyout((caddr_t)&outbuf, uap->ubuf, sizeof(outbuf)); + if (error) + return error; + break; + default: + /* XXX casting to (sy_call_t *) is bogus, as usual. */ + return ((sy_call_t *)shmctl)(p, uap, retval); + } + return 0; +#else + return EINVAL; +#endif +} + +#ifndef _SYS_SYSPROTO_H_ +struct shmctl_args { + int shmid; + int cmd; + struct shmid_ds *buf; +}; +#endif + +int +shmctl(p, uap, retval) + struct proc *p; + struct shmctl_args *uap; + register_t *retval; +{ + int error; + struct ucred *cred = p->p_ucred; + struct shmid_ds inbuf; + struct shmid_ds *shmseg; + + shmseg = shm_find_segment_by_shmid(uap->shmid); + if (shmseg == NULL) + return EINVAL; + switch (uap->cmd) { + case IPC_STAT: + error = ipcperm(cred, &shmseg->shm_perm, IPC_R); + if (error) + return error; + error = copyout((caddr_t)shmseg, uap->buf, sizeof(inbuf)); + if (error) + return error; + break; + case IPC_SET: + error = ipcperm(cred, &shmseg->shm_perm, IPC_M); + if (error) + return error; + error = copyin(uap->buf, (caddr_t)&inbuf, sizeof(inbuf)); + if (error) + return error; + shmseg->shm_perm.uid = inbuf.shm_perm.uid; + shmseg->shm_perm.gid = inbuf.shm_perm.gid; + shmseg->shm_perm.mode = + (shmseg->shm_perm.mode & ~ACCESSPERMS) | + (inbuf.shm_perm.mode & ACCESSPERMS); + shmseg->shm_ctime = time_second; + break; + case IPC_RMID: + error = ipcperm(cred, &shmseg->shm_perm, IPC_M); + if (error) + return error; + shmseg->shm_perm.key = IPC_PRIVATE; + shmseg->shm_perm.mode |= SHMSEG_REMOVED; + if (shmseg->shm_nattch <= 0) { + shm_deallocate_segment(shmseg); + shm_last_free = IPCID_TO_IX(uap->shmid); + } + break; +#if 0 + case SHM_LOCK: + case SHM_UNLOCK: +#endif + default: + return EINVAL; + } + return 0; +} + +#ifndef _SYS_SYSPROTO_H_ +struct shmget_args { + key_t key; + size_t size; + int shmflg; +}; +#endif + +static int +shmget_existing(p, uap, mode, segnum, retval) + struct proc *p; + struct shmget_args *uap; + int mode; + int segnum; + int *retval; +{ + struct shmid_ds *shmseg; + struct ucred *cred = p->p_ucred; + int error; + + shmseg = &shmsegs[segnum]; + if (shmseg->shm_perm.mode & SHMSEG_REMOVED) { + /* + * This segment is in the process of being allocated. Wait + * until it's done, and look the key up again (in case the + * allocation failed or it was freed). + */ + shmseg->shm_perm.mode |= SHMSEG_WANTED; + error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); + if (error) + return error; + return EAGAIN; + } + error = ipcperm(cred, &shmseg->shm_perm, mode); + if (error) + return error; + if (uap->size && uap->size > shmseg->shm_segsz) + return EINVAL; + if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) + return EEXIST; + *retval = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); + return 0; +} + +static int +shmget_allocate_segment(p, uap, mode, retval) + struct proc *p; + struct shmget_args *uap; + int mode; + int * retval; +{ + int i, segnum, shmid, size; + struct ucred *cred = p->p_ucred; + struct shmid_ds *shmseg; + struct shm_handle *shm_handle; + kern_return_t kret; + vm_offset_t user_addr; + void * mem_object; + + if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) + return EINVAL; + if (shm_nused >= shminfo.shmmni) /* any shmids left? */ + return ENOSPC; + size = round_page(uap->size); + if (shm_committed + btoc(size) > shminfo.shmall) + return ENOMEM; + if (shm_last_free < 0) { + for (i = 0; i < shminfo.shmmni; i++) + if (shmsegs[i].shm_perm.mode & SHMSEG_FREE) + break; + if (i == shminfo.shmmni) + panic("shmseg free count inconsistent"); + segnum = i; + } else { + segnum = shm_last_free; + shm_last_free = -1; + } + shmseg = &shmsegs[segnum]; + /* + * In case we sleep in malloc(), mark the segment present but deleted + * so that noone else tries to create the same key. + */ + kret = vm_allocate(current_map(), &user_addr, size, TRUE); + if (kret != KERN_SUCCESS) + goto out; + + kret = mach_make_memory_entry (current_map(), &size, + user_addr, VM_PROT_DEFAULT, &mem_object, 0); + + if (kret != KERN_SUCCESS) + goto out; + shmseg->shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED; + shmseg->shm_perm.key = uap->key; + shmseg->shm_perm.seq = (shmseg->shm_perm.seq + 1) & 0x7fff; + shm_handle = (struct shm_handle *) + _MALLOC(sizeof(struct shm_handle), M_SHM, M_WAITOK); + shm_handle->shm_object = mem_object; + shmid = IXSEQ_TO_IPCID(segnum, shmseg->shm_perm); + + shmseg->shm_internal = shm_handle; + shmseg->shm_perm.cuid = shmseg->shm_perm.uid = cred->cr_uid; + shmseg->shm_perm.cgid = shmseg->shm_perm.gid = cred->cr_gid; + shmseg->shm_perm.mode = (shmseg->shm_perm.mode & SHMSEG_WANTED) | + (mode & ACCESSPERMS) | SHMSEG_ALLOCATED; + shmseg->shm_segsz = uap->size; + shmseg->shm_cpid = p->p_pid; + shmseg->shm_lpid = shmseg->shm_nattch = 0; + shmseg->shm_atime = shmseg->shm_dtime = 0; + shmseg->shm_ctime = time_second; + shm_committed += btoc(size); + shm_nused++; + if (shmseg->shm_perm.mode & SHMSEG_WANTED) { + /* + * Somebody else wanted this key while we were asleep. Wake + * them up now. + */ + shmseg->shm_perm.mode &= ~SHMSEG_WANTED; + wakeup((caddr_t)shmseg); + } + *retval = shmid; + return 0; +out: + switch (kret) { + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } + +} + +int +shmget(p, uap, retval) + struct proc *p; + struct shmget_args *uap; + register_t *retval; +{ + int segnum, mode, error; + + mode = uap->shmflg & ACCESSPERMS; + if (uap->key != IPC_PRIVATE) { + again: + segnum = shm_find_segment_by_key(uap->key); + if (segnum >= 0) { + error = shmget_existing(p, uap, mode, segnum, retval); + if (error == EAGAIN) + goto again; + return(error); + } + if ((uap->shmflg & IPC_CREAT) == 0) + return ENOENT; + } + return( shmget_allocate_segment(p, uap, mode, retval));; + /*NOTREACHED*/ + +} + +struct shmsys_args { + u_int which; + int a2; + int a3; + int a4; +}; +int +shmsys(p, uap, retval) + struct proc *p; + /* XXX actually varargs. */ + struct shmsys_args *uap; + register_t *retval; +{ + + if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) + return EINVAL; + return ((*shmcalls[uap->which])(p, &uap->a2, retval)); +} + +void +shmfork(p1, p2) + struct proc *p1, *p2; +{ + struct shmmap_state *shmmap_s; + size_t size; + int i; + + size = shminfo.shmseg * sizeof(struct shmmap_state); + shmmap_s = (struct shmmap_state *)_MALLOC(size, M_SHM, M_WAITOK); + bcopy((caddr_t)p1->vm_shm, (caddr_t)shmmap_s, size); + p2->vm_shm = (caddr_t)shmmap_s; + for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) + if (shmmap_s->shmid != -1) + shmsegs[IPCID_TO_IX(shmmap_s->shmid)].shm_nattch++; +} + +void +shmexit(p) + struct proc *p; +{ + struct shmmap_state *shmmap_s; + int i; + + shmmap_s = (struct shmmap_state *)p->vm_shm; + for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) + if (shmmap_s->shmid != -1) + shm_delete_mapping(p, shmmap_s); + FREE((caddr_t)p->vm_shm, M_SHM); + p->vm_shm = NULL; +} + +void +shminit(dummy) + void *dummy; +{ + int i; + int s; + + s = sizeof(struct shmid_ds) * shminfo.shmmni; + + MALLOC(shmsegs, struct shmid_ds *, s, + M_SHM, M_WAITOK); + for (i = 0; i < shminfo.shmmni; i++) { + shmsegs[i].shm_perm.mode = SHMSEG_FREE; + shmsegs[i].shm_perm.seq = 0; + } + shm_last_free = 0; + shm_nused = 0; + shm_committed = 0; +} diff --git a/bsd/kern/tty.c b/bsd/kern/tty.c new file mode 100644 index 000000000..922299b3d --- /dev/null +++ b/bsd/kern/tty.c @@ -0,0 +1,2553 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tty.c 8.8 (Berkeley) 1/21/94 + */ +/*- + * TODO: + * o Fix races for sending the start char in ttyflush(). + * o Handle inter-byte timeout for "MIN > 0, TIME > 0" in ttyselect(). + * With luck, there will be MIN chars before select() returns(). + * o Handle CLOCAL consistently for ptys. Perhaps disallow setting it. + * o Don't allow input in TS_ZOMBIE case. It would be visible through + * FIONREAD. + * o Do the new sio locking stuff here and use it to avoid special + * case for EXTPROC? + * o Lock PENDIN too? + * o Move EXTPROC and/or PENDIN to t_state? + * o Wrap most of ttioctl in spltty/splx. + * o Implement TIOCNOTTY or remove it from . + * o Send STOP if IXOFF is toggled off while TS_TBLOCK is set. + * o Don't allow certain termios flags to affect disciplines other + * than TTYDISC. Cancel their effects before switch disciplines + * and ignore them if they are set while we are in another + * discipline. + * o Handle c_ispeed = 0 to c_ispeed = c_ospeed conversion here instead + * of in drivers and fix drivers that write to tp->t_termios. + * o Check for TS_CARR_ON being set while everything is closed and not + * waiting for carrier. TS_CARR_ON isn't cleared if nothing is open, + * so it would live until the next open even if carrier drops. + * o Restore TS_WOPEN since it is useful in pstat. It must be cleared + * only when _all_ openers leave open(). + */ +#ifdef NeXT +#define NSNP 0 +#else +#include "snp.h" +#include "opt_uconsole.h" +#endif + +#include +#define TTYDEFCHARS 1 +#include +#undef TTYDEFCHARS +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef NeXT +#include +#endif +#include +#if NSNP > 0 +#include +#endif + +#ifndef NeXT +#include +#include +#include +#include +#include +#include +#else +#include +#include +#include +#if 0 /* [ */ +#include +#endif /* 0 ] */ +#endif /* !NeXT */ + +#ifndef NeXT +static int proc_compare __P((struct proc *p1, struct proc *p2)); +#endif /* NeXT */ +static int ttnread __P((struct tty *tp)); +static void ttyecho __P((int c, struct tty *tp)); +static int ttyoutput __P((int c, register struct tty *tp)); +static void ttypend __P((struct tty *tp)); +static void ttyretype __P((struct tty *tp)); +static void ttyrub __P((int c, struct tty *tp)); +static void ttyrubo __P((struct tty *tp, int cnt)); +static void ttystop __P((struct tty *tp, int rw)); +static void ttyunblock __P((struct tty *tp)); +static int ttywflush __P((struct tty *tp)); + +/* + * Table with character classes and parity. The 8th bit indicates parity, + * the 7th bit indicates the character is an alphameric or underscore (for + * ALTWERASE), and the low 6 bits indicate delay type. If the low 6 bits + * are 0 then the character needs no special processing on output; classes + * other than 0 might be translated or (not currently) require delays. + */ +#define E 0x00 /* Even parity. */ +#define O 0x80 /* Odd parity. */ +#define PARITY(c) (char_type[c] & O) + +#define ALPHA 0x40 /* Alpha or underscore. */ +#define ISALPHA(c) (char_type[(c) & TTY_CHARMASK] & ALPHA) + +#define CCLASSMASK 0x3f +#define CCLASS(c) (char_type[c] & CCLASSMASK) + +#define BS BACKSPACE +#define CC CONTROL +#define CR RETURN +#define NA ORDINARY | ALPHA +#define NL NEWLINE +#define NO ORDINARY +#define TB TAB +#define VT VTAB + +static u_char const char_type[] = { + E|CC, O|CC, O|CC, E|CC, O|CC, E|CC, E|CC, O|CC, /* nul - bel */ + O|BS, E|TB, E|NL, O|CC, E|VT, O|CR, O|CC, E|CC, /* bs - si */ + O|CC, E|CC, E|CC, O|CC, E|CC, O|CC, O|CC, E|CC, /* dle - etb */ + E|CC, O|CC, O|CC, E|CC, O|CC, E|CC, E|CC, O|CC, /* can - us */ + O|NO, E|NO, E|NO, O|NO, E|NO, O|NO, O|NO, E|NO, /* sp - ' */ + E|NO, O|NO, O|NO, E|NO, O|NO, E|NO, E|NO, O|NO, /* ( - / */ + E|NA, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* 0 - 7 */ + O|NA, E|NA, E|NO, O|NO, E|NO, O|NO, O|NO, E|NO, /* 8 - ? */ + O|NO, E|NA, E|NA, O|NA, E|NA, O|NA, O|NA, E|NA, /* @ - G */ + E|NA, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* H - O */ + E|NA, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* P - W */ + O|NA, E|NA, E|NA, O|NO, E|NO, O|NO, O|NO, O|NA, /* X - _ */ + E|NO, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* ` - g */ + O|NA, E|NA, E|NA, O|NA, E|NA, O|NA, O|NA, E|NA, /* h - o */ + O|NA, E|NA, E|NA, O|NA, E|NA, O|NA, O|NA, E|NA, /* p - w */ + E|NA, O|NA, O|NA, E|NO, O|NO, E|NO, E|NO, O|CC, /* x - del */ + /* + * Meta chars; should be settable per character set; + * for now, treat them all as normal characters. + */ + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, +}; +#undef BS +#undef CC +#undef CR +#undef NA +#undef NL +#undef NO +#undef TB +#undef VT + +/* Macros to clear/set/test flags. */ +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) + +/* + * Input control starts when we would not be able to fit the maximum + * contents of the ping-pong buffers and finishes when we would be able + * to fit that much plus 1/8 more. + */ +#define I_HIGH_WATER (TTYHOG - 2 * 256) /* XXX */ +#define I_LOW_WATER ((TTYHOG - 2 * 256) * 7 / 8) /* XXX */ + +#undef MAX_INPUT /* XXX wrong in */ +#define MAX_INPUT TTYHOG + +/* + * Initial open of tty, or (re)entry to standard tty line discipline. + */ +int +ttyopen(device, tp) + dev_t device; + register struct tty *tp; +{ + int s; + + s = spltty(); + tp->t_dev = device; + if (!ISSET(tp->t_state, TS_ISOPEN)) { + SET(tp->t_state, TS_ISOPEN); + if (ISSET(tp->t_cflag, CLOCAL)) { + SET(tp->t_state, TS_CONNECTED); } + bzero(&tp->t_winsize, sizeof(tp->t_winsize)); + } + +#ifndef NeXT + /* + * Initialize or restore a cblock allocation policy suitable for + * the standard line discipline. + */ + clist_alloc_cblocks(&tp->t_canq, TTYHOG, 512); + clist_alloc_cblocks(&tp->t_outq, TTMAXHIWAT + OBUFSIZ + 100, + TTMAXHIWAT + OBUFSIZ + 100); + clist_alloc_cblocks(&tp->t_rawq, TTYHOG, TTYHOG); +#endif /* !NeXT */ + + splx(s); + return (0); +} + +/* + * Handle close() on a tty line: flush and set to initial state, + * bumping generation number so that pending read/write calls + * can detect recycling of the tty. + * XXX our caller should have done `spltty(); l_close(); ttyclose();' + * and l_close() should have flushed, but we repeat the spltty() and + * the flush in case there are buggy callers. + */ +int +ttyclose(tp) + register struct tty *tp; +{ + int s; + + s = spltty(); + if (constty == tp) { + constty = NULL; + +splx(s); +spltty(); + +#ifdef NeXT + /* + * Closing current console tty; disable printing of console + * messages at bottom-level driver. + */ + (*cdevsw[major(tp->t_dev)].d_ioctl) + (tp->t_dev, KMIOCDISABLCONS, NULL, 0, current_proc()); +#endif /* NeXT */ + } + + ttyflush(tp, FREAD | FWRITE); +#ifndef NeXT + clist_free_cblocks(&tp->t_canq); + clist_free_cblocks(&tp->t_outq); + clist_free_cblocks(&tp->t_rawq); +#endif + +#if NSNP > 0 + if (ISSET(tp->t_state, TS_SNOOP) && tp->t_sc != NULL) + snpdown((struct snoop *)tp->t_sc); +#endif + + tp->t_gen++; + tp->t_line = TTYDISC; + tp->t_pgrp = NULL; + tp->t_session = NULL; + tp->t_state = 0; +#if NeXT + selthreadclear(&tp->t_wsel); + selthreadclear(&tp->t_rsel); +#endif + splx(s); + return (0); +} + +#define FLUSHQ(q) { \ + if ((q)->c_cc) \ + ndflush(q, (q)->c_cc); \ +} + +/* Is 'c' a line delimiter ("break" character)? */ +#define TTBREAKC(c, lflag) \ + ((c) == '\n' || (((c) == cc[VEOF] || \ + (c) == cc[VEOL] || ((c) == cc[VEOL2] && lflag & IEXTEN)) && \ + (c) != _POSIX_VDISABLE)) + +/* + * Process input of a single character received on a tty. + */ +int +ttyinput(c, tp) + register int c; + register struct tty *tp; +{ + register tcflag_t iflag, lflag; + register cc_t *cc; + int i, err, retval; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + /* + * If input is pending take it first. + */ + lflag = tp->t_lflag; + if (ISSET(lflag, PENDIN)) + ttypend(tp); + /* + * Gather stats. + */ + if (ISSET(lflag, ICANON)) { + ++tk_cancc; + ++tp->t_cancc; + } else { + ++tk_rawcc; + ++tp->t_rawcc; + } + ++tk_nin; + + /* + * Block further input iff: + * current input > threshold AND input is available to user program + * AND input flow control is enabled and not yet invoked. + * The 3 is slop for PARMRK. + */ + iflag = tp->t_iflag; + if (tp->t_rawq.c_cc + tp->t_canq.c_cc > I_HIGH_WATER - 3 && + (!ISSET(lflag, ICANON) || tp->t_canq.c_cc != 0) && + (ISSET(tp->t_cflag, CRTS_IFLOW) || ISSET(iflag, IXOFF)) && + !ISSET(tp->t_state, TS_TBLOCK)) + ttyblock(tp); + + /* Handle exceptional conditions (break, parity, framing). */ + cc = tp->t_cc; + err = (ISSET(c, TTY_ERRORMASK)); + if (err) { + CLR(c, TTY_ERRORMASK); + if (ISSET(err, TTY_BI)) { + if (ISSET(iflag, IGNBRK)) { + thread_funnel_set(kernel_flock, funnel_state); + return (0); + } + if (ISSET(iflag, BRKINT)) { + ttyflush(tp, FREAD | FWRITE); + pgsignal(tp->t_pgrp, SIGINT, 1); + goto endcase; + } + if (ISSET(iflag, PARMRK)) + goto parmrk; + } else if ((ISSET(err, TTY_PE) && ISSET(iflag, INPCK)) + || ISSET(err, TTY_FE)) { + if (ISSET(iflag, IGNPAR)) { + thread_funnel_set(kernel_flock, funnel_state); + return (0); + } + else if (ISSET(iflag, PARMRK)) { +parmrk: + if (tp->t_rawq.c_cc + tp->t_canq.c_cc > + MAX_INPUT - 3) + goto input_overflow; + (void)putc(0377 | TTY_QUOTE, &tp->t_rawq); + (void)putc(0 | TTY_QUOTE, &tp->t_rawq); + (void)putc(c | TTY_QUOTE, &tp->t_rawq); + goto endcase; + } else + c = 0; + } + } + + if (!ISSET(tp->t_state, TS_TYPEN) && ISSET(iflag, ISTRIP)) + CLR(c, 0x80); + if (!ISSET(lflag, EXTPROC)) { + /* + * Check for literal nexting very first + */ + if (ISSET(tp->t_state, TS_LNCH)) { + SET(c, TTY_QUOTE); + CLR(tp->t_state, TS_LNCH); + } + /* + * Scan for special characters. This code + * is really just a big case statement with + * non-constant cases. The bottom of the + * case statement is labeled ``endcase'', so goto + * it after a case match, or similar. + */ + + /* + * Control chars which aren't controlled + * by ICANON, ISIG, or IXON. + */ + if (ISSET(lflag, IEXTEN)) { + if (CCEQ(cc[VLNEXT], c)) { + if (ISSET(lflag, ECHO)) { + if (ISSET(lflag, ECHOE)) { + (void)ttyoutput('^', tp); + (void)ttyoutput('\b', tp); + } else + ttyecho(c, tp); + } + SET(tp->t_state, TS_LNCH); + goto endcase; + } + if (CCEQ(cc[VDISCARD], c)) { + if (ISSET(lflag, FLUSHO)) + CLR(tp->t_lflag, FLUSHO); + else { + ttyflush(tp, FWRITE); + ttyecho(c, tp); + if (tp->t_rawq.c_cc + tp->t_canq.c_cc) + ttyretype(tp); + SET(tp->t_lflag, FLUSHO); + } + goto startoutput; + } + } + /* + * Signals. + */ + if (ISSET(lflag, ISIG)) { + if (CCEQ(cc[VINTR], c) || CCEQ(cc[VQUIT], c)) { + if (!ISSET(lflag, NOFLSH)) + ttyflush(tp, FREAD | FWRITE); + ttyecho(c, tp); + pgsignal(tp->t_pgrp, + CCEQ(cc[VINTR], c) ? SIGINT : SIGQUIT, 1); + goto endcase; + } + if (CCEQ(cc[VSUSP], c)) { + if (!ISSET(lflag, NOFLSH)) + ttyflush(tp, FREAD); + ttyecho(c, tp); + pgsignal(tp->t_pgrp, SIGTSTP, 1); + goto endcase; + } + } + /* + * Handle start/stop characters. + */ + if (ISSET(iflag, IXON)) { + if (CCEQ(cc[VSTOP], c)) { + if (!ISSET(tp->t_state, TS_TTSTOP)) { + SET(tp->t_state, TS_TTSTOP); + ttystop(tp, 0); + thread_funnel_set(kernel_flock, funnel_state); + return (0); + } + if (!CCEQ(cc[VSTART], c)) { + thread_funnel_set(kernel_flock, funnel_state); + return (0); + } + /* + * if VSTART == VSTOP then toggle + */ + goto endcase; + } + if (CCEQ(cc[VSTART], c)) + goto restartoutput; + } + /* + * IGNCR, ICRNL, & INLCR + */ + if (c == '\r') { + if (ISSET(iflag, IGNCR)) { + thread_funnel_set(kernel_flock, funnel_state); + return (0); + } + else if (ISSET(iflag, ICRNL)) + c = '\n'; + } else if (c == '\n' && ISSET(iflag, INLCR)) + c = '\r'; + } + if (!ISSET(tp->t_lflag, EXTPROC) && ISSET(lflag, ICANON)) { + /* + * From here on down canonical mode character + * processing takes place. + */ + /* + * erase (^H / ^?) + */ + if (CCEQ(cc[VERASE], c)) { + if (tp->t_rawq.c_cc) + ttyrub(unputc(&tp->t_rawq), tp); + goto endcase; + } + /* + * kill (^U) + */ + if (CCEQ(cc[VKILL], c)) { + if (ISSET(lflag, ECHOKE) && + tp->t_rawq.c_cc == tp->t_rocount && + !ISSET(lflag, ECHOPRT)) + while (tp->t_rawq.c_cc) + ttyrub(unputc(&tp->t_rawq), tp); + else { + ttyecho(c, tp); + if (ISSET(lflag, ECHOK) || + ISSET(lflag, ECHOKE)) + ttyecho('\n', tp); + FLUSHQ(&tp->t_rawq); + tp->t_rocount = 0; + } + CLR(tp->t_state, TS_LOCAL); + goto endcase; + } + /* + * word erase (^W) + */ + if (CCEQ(cc[VWERASE], c) && ISSET(lflag, IEXTEN)) { + int ctype; + + /* + * erase whitespace + */ + while ((c = unputc(&tp->t_rawq)) == ' ' || c == '\t') + ttyrub(c, tp); + if (c == -1) + goto endcase; + /* + * erase last char of word and remember the + * next chars type (for ALTWERASE) + */ + ttyrub(c, tp); + c = unputc(&tp->t_rawq); + if (c == -1) + goto endcase; + if (c == ' ' || c == '\t') { + (void)putc(c, &tp->t_rawq); + goto endcase; + } + ctype = ISALPHA(c); + /* + * erase rest of word + */ + do { + ttyrub(c, tp); + c = unputc(&tp->t_rawq); + if (c == -1) + goto endcase; + } while (c != ' ' && c != '\t' && + (!ISSET(lflag, ALTWERASE) || ISALPHA(c) == ctype)); + (void)putc(c, &tp->t_rawq); + goto endcase; + } + /* + * reprint line (^R) + */ + if (CCEQ(cc[VREPRINT], c) && ISSET(lflag, IEXTEN)) { + ttyretype(tp); + goto endcase; + } + /* + * ^T - kernel info and generate SIGINFO + */ + if (CCEQ(cc[VSTATUS], c) && ISSET(lflag, IEXTEN)) { + if (ISSET(lflag, ISIG)) + pgsignal(tp->t_pgrp, SIGINFO, 1); + if (!ISSET(lflag, NOKERNINFO)) + ttyinfo(tp); + goto endcase; + } + } + /* + * Check for input buffer overflow + */ + if (tp->t_rawq.c_cc + tp->t_canq.c_cc >= MAX_INPUT) { +input_overflow: + if (ISSET(iflag, IMAXBEL)) { + if (tp->t_outq.c_cc < tp->t_hiwat) + (void)ttyoutput(CTRL('g'), tp); + } + goto endcase; + } + + if ( c == 0377 && ISSET(iflag, PARMRK) && !ISSET(iflag, ISTRIP) + && ISSET(iflag, IGNBRK|IGNPAR) != (IGNBRK|IGNPAR)) + (void)putc(0377 | TTY_QUOTE, &tp->t_rawq); + + /* + * Put data char in q for user and + * wakeup on seeing a line delimiter. + */ + if (putc(c, &tp->t_rawq) >= 0) { + if (!ISSET(lflag, ICANON)) { + ttwakeup(tp); + ttyecho(c, tp); + goto endcase; + } + if (TTBREAKC(c, lflag)) { + tp->t_rocount = 0; + catq(&tp->t_rawq, &tp->t_canq); + ttwakeup(tp); + } else if (tp->t_rocount++ == 0) + tp->t_rocol = tp->t_column; + if (ISSET(tp->t_state, TS_ERASE)) { + /* + * end of prterase \.../ + */ + CLR(tp->t_state, TS_ERASE); + (void)ttyoutput('/', tp); + } + i = tp->t_column; + ttyecho(c, tp); + if (CCEQ(cc[VEOF], c) && ISSET(lflag, ECHO)) { + /* + * Place the cursor over the '^' of the ^D. + */ + i = min(2, tp->t_column - i); + while (i > 0) { + (void)ttyoutput('\b', tp); + i--; + } + } + } +endcase: + /* + * IXANY means allow any character to restart output. + */ + if (ISSET(tp->t_state, TS_TTSTOP) && + !ISSET(iflag, IXANY) && cc[VSTART] != cc[VSTOP]) { + thread_funnel_set(kernel_flock, funnel_state); + return (0); + } +restartoutput: + CLR(tp->t_lflag, FLUSHO); + CLR(tp->t_state, TS_TTSTOP); +startoutput: + retval = ttstart(tp); + thread_funnel_set(kernel_flock, funnel_state); + return (retval); +} + +/* + * Output a single character on a tty, doing output processing + * as needed (expanding tabs, newline processing, etc.). + * Returns < 0 if succeeds, otherwise returns char to resend. + * Must be recursive. + */ +static int +ttyoutput(c, tp) + register int c; + register struct tty *tp; +{ + register tcflag_t oflag; + register int col, s; + + oflag = tp->t_oflag; + if (!ISSET(oflag, OPOST)) { + if (ISSET(tp->t_lflag, FLUSHO)) + return (-1); + if (putc(c, &tp->t_outq)) + return (c); + tk_nout++; + tp->t_outcc++; + return (-1); + } + /* + * Do tab expansion if OXTABS is set. Special case if we external + * processing, we don't do the tab expansion because we'll probably + * get it wrong. If tab expansion needs to be done, let it happen + * externally. + */ + CLR(c, ~TTY_CHARMASK); + if (c == '\t' && + ISSET(oflag, OXTABS) && !ISSET(tp->t_lflag, EXTPROC)) { + c = 8 - (tp->t_column & 7); + if (!ISSET(tp->t_lflag, FLUSHO)) { + s = spltty(); /* Don't interrupt tabs. */ + c -= b_to_q(" ", c, &tp->t_outq); + tk_nout += c; + tp->t_outcc += c; + splx(s); + } + tp->t_column += c; + return (c ? -1 : '\t'); + } + if (c == CEOT && ISSET(oflag, ONOEOT)) + return (-1); + + /* + * Newline translation: if ONLCR is set, + * translate newline into "\r\n". + */ + if (c == '\n' && ISSET(tp->t_oflag, ONLCR)) { + tk_nout++; + tp->t_outcc++; + if (putc('\r', &tp->t_outq)) + return (c); + } + tk_nout++; + tp->t_outcc++; + if (!ISSET(tp->t_lflag, FLUSHO) && putc(c, &tp->t_outq)) + return (c); + + col = tp->t_column; + switch (CCLASS(c)) { + case BACKSPACE: + if (col > 0) + --col; + break; + case CONTROL: + break; + case NEWLINE: + case RETURN: + col = 0; + break; + case ORDINARY: + ++col; + break; + case TAB: + col = (col + 8) & ~7; + break; + } + tp->t_column = col; + return (-1); +} + +/* + * Ioctls for all tty devices. Called after line-discipline specific ioctl + * has been called to do discipline-specific functions and/or reject any + * of these ioctl commands. + */ +/* ARGSUSED */ +int +#ifndef NeXT +ttioctl(tp, cmd, data, flag) + register struct tty *tp; + int cmd, flag; + void *data; +#else +ttioctl(tp, cmd, data, flag, p) + register struct tty *tp; + u_long cmd; + caddr_t data; + int flag; + struct proc *p; +#endif +{ +#ifndef NeXT + register struct proc *p = curproc; /* XXX */ +#endif + int s, error; + + /* If the ioctl involves modification, hang if in the background. */ + switch (cmd) { + case TIOCFLUSH: + case TIOCSETA: + case TIOCSETD: + case TIOCSETAF: + case TIOCSETAW: +#ifdef notdef + case TIOCSPGRP: +#endif + case TIOCSTAT: + case TIOCSTI: + case TIOCSWINSZ: +#if defined(COMPAT_43) || defined(COMPAT_SUNOS) + case TIOCLBIC: + case TIOCLBIS: + case TIOCLSET: + case TIOCSETC: + case OTIOCSETD: + case TIOCSETN: + case TIOCSETP: + case TIOCSLTC: +#endif + while (isbackground(p, tp) && + (p->p_flag & P_PPWAIT) == 0 && + (p->p_sigignore & sigmask(SIGTTOU)) == 0 && + (p->p_sigmask & sigmask(SIGTTOU)) == 0) { + if (p->p_pgrp->pg_jobc == 0) + return (EIO); + pgsignal(p->p_pgrp, SIGTTOU, 1); + error = ttysleep(tp, &lbolt, TTOPRI | PCATCH | PTTYBLOCK, "ttybg1", + 0); + if (error) + return (error); + } + break; + } + + switch (cmd) { /* Process the ioctl. */ + case FIOASYNC: /* set/clear async i/o */ + s = spltty(); + if (*(int *)data) + SET(tp->t_state, TS_ASYNC); + else + CLR(tp->t_state, TS_ASYNC); + splx(s); + break; + case FIONBIO: /* set/clear non-blocking i/o */ + break; /* XXX: delete. */ + case FIONREAD: /* get # bytes to read */ + s = spltty(); + *(int *)data = ttnread(tp); + splx(s); + break; + case TIOCEXCL: /* set exclusive use of tty */ + s = spltty(); + SET(tp->t_state, TS_XCLUDE); + splx(s); + break; + case TIOCFLUSH: { /* flush buffers */ + register int flags = *(int *)data; + + if (flags == 0) + flags = FREAD | FWRITE; + else + flags &= FREAD | FWRITE; + ttyflush(tp, flags); + break; + } +#ifdef NeXT + case TIOCSCONS: { + /* Set current console device to this line */ + int bogusData = 1; + data = (caddr_t) &bogusData; + + /* No break - Fall through to BSD code */ + } +#endif /* NeXT */ + case TIOCCONS: { /* become virtual console */ + if (*(int *)data) { + if (constty && constty != tp && + ISSET(constty->t_state, TS_CONNECTED)) { + return (EBUSY); + } +#if defined(NeXT) || !defined(UCONSOLE) + if ( (error = suser(p->p_ucred, &p->p_acflag)) ) + return (error); +#endif + constty = tp; + } else if (tp == constty) { + constty = NULL; + } +#ifdef NeXT + if (constty) { + (*cdevsw[major(cons.t_dev)].d_ioctl) + (cons.t_dev, KMIOCDISABLCONS, NULL, 0, p); + } else { + (*cdevsw[major(tp->t_dev)].d_ioctl) + (tp->t_dev, KMIOCDISABLCONS, NULL, 0, p); + } +#endif /* NeXT */ + break; + } + case TIOCDRAIN: /* wait till output drained */ + error = ttywait(tp); + if (error) + return (error); + break; + case TIOCGETA: { /* get termios struct */ + struct termios *t = (struct termios *)data; + + bcopy(&tp->t_termios, t, sizeof(struct termios)); + break; + } + case TIOCGETD: /* get line discipline */ + *(int *)data = tp->t_line; + break; + case TIOCGWINSZ: /* get window size */ + *(struct winsize *)data = tp->t_winsize; + break; + case TIOCGPGRP: /* get pgrp of tty */ + if (!isctty(p, tp)) + return (ENOTTY); + *(int *)data = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; + break; +#ifdef TIOCHPCL + case TIOCHPCL: /* hang up on last close */ + s = spltty(); + SET(tp->t_cflag, HUPCL); + splx(s); + break; +#endif + case TIOCNXCL: /* reset exclusive use of tty */ + s = spltty(); + CLR(tp->t_state, TS_XCLUDE); + splx(s); + break; + case TIOCOUTQ: /* output queue size */ + *(int *)data = tp->t_outq.c_cc; + break; + case TIOCSETA: /* set termios struct */ + case TIOCSETAW: /* drain output, set */ + case TIOCSETAF: { /* drn out, fls in, set */ + register struct termios *t = (struct termios *)data; + + if (t->c_ispeed < 0 || t->c_ospeed < 0) + return (EINVAL); + s = spltty(); + if (cmd == TIOCSETAW || cmd == TIOCSETAF) { + error = ttywait(tp); + if (error) { + splx(s); + return (error); + } + if (cmd == TIOCSETAF) + ttyflush(tp, FREAD); + } + if (!ISSET(t->c_cflag, CIGNORE)) { + /* + * Set device hardware. + */ + if (tp->t_param && (error = (*tp->t_param)(tp, t))) { + splx(s); + return (error); + } + if (ISSET(t->c_cflag, CLOCAL) && + !ISSET(tp->t_cflag, CLOCAL)) { + /* + * XXX disconnections would be too hard to + * get rid of without this kludge. The only + * way to get rid of controlling terminals + * is to exit from the session leader. + */ + CLR(tp->t_state, TS_ZOMBIE); + + wakeup(TSA_CARR_ON(tp)); + ttwakeup(tp); + ttwwakeup(tp); + } + if ((ISSET(tp->t_state, TS_CARR_ON) || + ISSET(t->c_cflag, CLOCAL)) && + !ISSET(tp->t_state, TS_ZOMBIE)) + SET(tp->t_state, TS_CONNECTED); + else + CLR(tp->t_state, TS_CONNECTED); + tp->t_cflag = t->c_cflag; + tp->t_ispeed = t->c_ispeed; + tp->t_ospeed = t->c_ospeed; + ttsetwater(tp); + } + if (ISSET(t->c_lflag, ICANON) != ISSET(tp->t_lflag, ICANON) && + cmd != TIOCSETAF) { + if (ISSET(t->c_lflag, ICANON)) + SET(tp->t_lflag, PENDIN); + else { + /* + * XXX we really shouldn't allow toggling + * ICANON while we're in a non-termios line + * discipline. Now we have to worry about + * panicing for a null queue. + */ +#ifndef NeXT + if (tp->t_canq.c_cbreserved > 0 && + tp->t_rawq.c_cbreserved > 0) { + catq(&tp->t_rawq, &tp->t_canq); + /* + * XXX the queue limits may be + * different, so the old queue + * swapping method no longer works. + */ + catq(&tp->t_canq, &tp->t_rawq); + } +#else + if (tp->t_rawq.c_cs && tp->t_canq.c_cs) { + struct clist tq; + + catq(&tp->t_rawq, &tp->t_canq); + tq = tp->t_rawq; + tp->t_rawq = tp->t_canq; + tp->t_canq = tq; + } +#endif /* !NeXT */ + CLR(tp->t_lflag, PENDIN); + } + ttwakeup(tp); + } + tp->t_iflag = t->c_iflag; + tp->t_oflag = t->c_oflag; + /* + * Make the EXTPROC bit read only. + */ + if (ISSET(tp->t_lflag, EXTPROC)) + SET(t->c_lflag, EXTPROC); + else + CLR(t->c_lflag, EXTPROC); + tp->t_lflag = t->c_lflag | ISSET(tp->t_lflag, PENDIN); + if (t->c_cc[VMIN] != tp->t_cc[VMIN] || + t->c_cc[VTIME] != tp->t_cc[VTIME]) + ttwakeup(tp); + bcopy(t->c_cc, tp->t_cc, sizeof(t->c_cc)); + splx(s); + break; + } + case TIOCSETD: { /* set line discipline */ + register int t = *(int *)data; + dev_t device = tp->t_dev; + extern int nlinesw; + + if ((u_int)t >= nlinesw) + return (ENXIO); + if (t != tp->t_line) { + s = spltty(); + (*linesw[tp->t_line].l_close)(tp, flag); + error = (*linesw[t].l_open)(device, tp); + if (error) { + (void)(*linesw[tp->t_line].l_open)(device, tp); + splx(s); + return (error); + } + tp->t_line = t; + splx(s); + } + break; + } + case TIOCSTART: /* start output, like ^Q */ + s = spltty(); + if (ISSET(tp->t_state, TS_TTSTOP) || + ISSET(tp->t_lflag, FLUSHO)) { + CLR(tp->t_lflag, FLUSHO); + CLR(tp->t_state, TS_TTSTOP); + ttstart(tp); + } + splx(s); + break; + case TIOCSTI: /* simulate terminal input */ + if (p->p_ucred->cr_uid && (flag & FREAD) == 0) + return (EPERM); + if (p->p_ucred->cr_uid && !isctty(p, tp)) + return (EACCES); + s = spltty(); + (*linesw[tp->t_line].l_rint)(*(u_char *)data, tp); + splx(s); + break; + case TIOCSTOP: /* stop output, like ^S */ + s = spltty(); + if (!ISSET(tp->t_state, TS_TTSTOP)) { + SET(tp->t_state, TS_TTSTOP); + ttystop(tp, 0); + } + splx(s); + break; + case TIOCSCTTY: /* become controlling tty */ + /* Session ctty vnode pointer set in vnode layer. */ + if (!SESS_LEADER(p) || + ((p->p_session->s_ttyvp || tp->t_session) && + (tp->t_session != p->p_session))) + return (EPERM); + tp->t_session = p->p_session; + tp->t_pgrp = p->p_pgrp; + p->p_session->s_ttyp = tp; + p->p_flag |= P_CONTROLT; + break; + case TIOCSPGRP: { /* set pgrp of tty */ + register struct pgrp *pgrp = pgfind(*(int *)data); + + if (!isctty(p, tp)) + return (ENOTTY); + else if (pgrp == NULL || pgrp->pg_session != p->p_session) + return (EPERM); + tp->t_pgrp = pgrp; + break; + } + case TIOCSTAT: /* simulate control-T */ + s = spltty(); + ttyinfo(tp); + splx(s); + break; + case TIOCSWINSZ: /* set window size */ + if (bcmp((caddr_t)&tp->t_winsize, data, + sizeof (struct winsize))) { + tp->t_winsize = *(struct winsize *)data; + pgsignal(tp->t_pgrp, SIGWINCH, 1); + } + break; + case TIOCSDRAINWAIT: + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + tp->t_timeout = *(int *)data * hz; + wakeup(TSA_OCOMPLETE(tp)); + wakeup(TSA_OLOWAT(tp)); + break; + case TIOCGDRAINWAIT: + *(int *)data = tp->t_timeout / hz; + break; + default: +#if defined(COMPAT_43) || defined(COMPAT_SUNOS) +#ifdef NeXT + return (ttcompat(tp, cmd, data, flag, p)); +#else + return (ttcompat(tp, cmd, data, flag)); +#endif /* NeXT */ +#else + return (-1); +#endif + } + + return (0); +} + +int +ttyselect(tp, rw, p) + struct tty *tp; + int rw; + struct proc *p; +{ + int s; + + if (tp == NULL) + return (ENXIO); + + s = spltty(); + switch (rw) { + case FREAD: + if (ttnread(tp) > 0 || ISSET(tp->t_state, TS_ZOMBIE)) + goto win; + selrecord(p, &tp->t_rsel); + break; + case FWRITE: + if ((tp->t_outq.c_cc <= tp->t_lowat && + ISSET(tp->t_state, TS_CONNECTED)) + || ISSET(tp->t_state, TS_ZOMBIE)) { +win: splx(s); + return (1); + } + selrecord(p, &tp->t_wsel); + break; + } + splx(s); + return (0); +} + +/* + * This is a wrapper for compatibility with the select vector used by + * cdevsw. It relies on a proper xxxdevtotty routine. + */ +int +ttselect(dev, rw, p) + dev_t dev; + int rw; + struct proc *p; +{ +#ifndef NeXT + return ttyselect((*cdevsw[major(dev)]->d_devtotty)(dev), rw, p); +#else + return ttyselect(cdevsw[major(dev)].d_ttys[minor(dev)], rw, p); +#endif +} + +/* + * Must be called at spltty(). + */ +static int +ttnread(tp) + struct tty *tp; +{ + int nread; + + if (ISSET(tp->t_lflag, PENDIN)) + ttypend(tp); + nread = tp->t_canq.c_cc; + if (!ISSET(tp->t_lflag, ICANON)) { + nread += tp->t_rawq.c_cc; + if (nread < tp->t_cc[VMIN] && tp->t_cc[VTIME] == 0) + nread = 0; + } + return (nread); +} + +/* + * Wait for output to drain. + */ +int +ttywait(tp) + register struct tty *tp; +{ + int error, s; + + error = 0; + s = spltty(); + while ((tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY)) && + ISSET(tp->t_state, TS_CONNECTED) && tp->t_oproc) { + (*tp->t_oproc)(tp); + if ((tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY)) && + ISSET(tp->t_state, TS_CONNECTED)) { + SET(tp->t_state, TS_SO_OCOMPLETE); + error = ttysleep(tp, TSA_OCOMPLETE(tp), + TTOPRI | PCATCH, "ttywai", + tp->t_timeout); + if (error) { + if (error == EWOULDBLOCK) + error = EIO; + break; + } + } else + break; + } + if (!error && (tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY))) + error = EIO; + splx(s); + return (error); +} + +static void +ttystop(tp, rw) + struct tty *tp; + int rw; +{ +#ifdef sun4c /* XXX */ + (*tp->t_stop)(tp, rw); +#elif defined(NeXT) + (*cdevsw[major(tp->t_dev)].d_stop)(tp, rw); +#else + (*cdevsw[major(tp->t_dev)]->d_stop)(tp, rw); +#endif +} + +/* + * Flush if successfully wait. + */ +static int +ttywflush(tp) + struct tty *tp; +{ + int error; + + if ((error = ttywait(tp)) == 0) + ttyflush(tp, FREAD); + return (error); +} + +/* + * Flush tty read and/or write queues, notifying anyone waiting. + */ +void +ttyflush(tp, rw) + register struct tty *tp; + int rw; +{ + register int s; + + s = spltty(); +#if 0 +again: +#endif + if (rw & FWRITE) { + FLUSHQ(&tp->t_outq); + CLR(tp->t_state, TS_TTSTOP); + } + ttystop(tp, rw); + if (rw & FREAD) { + FLUSHQ(&tp->t_canq); + FLUSHQ(&tp->t_rawq); + CLR(tp->t_lflag, PENDIN); + tp->t_rocount = 0; + tp->t_rocol = 0; + CLR(tp->t_state, TS_LOCAL); + ttwakeup(tp); + if (ISSET(tp->t_state, TS_TBLOCK)) { + if (rw & FWRITE) + FLUSHQ(&tp->t_outq); + ttyunblock(tp); + + /* + * Don't let leave any state that might clobber the + * next line discipline (although we should do more + * to send the START char). Not clearing the state + * may have caused the "putc to a clist with no + * reserved cblocks" panic/printf. + */ + CLR(tp->t_state, TS_TBLOCK); + +#if 0 /* forget it, sleeping isn't always safe and we don't know when it is */ + if (ISSET(tp->t_iflag, IXOFF)) { + /* + * XXX wait a bit in the hope that the stop + * character (if any) will go out. Waiting + * isn't good since it allows races. This + * will be fixed when the stop character is + * put in a special queue. Don't bother with + * the checks in ttywait() since the timeout + * will save us. + */ + SET(tp->t_state, TS_SO_OCOMPLETE); + ttysleep(tp, TSA_OCOMPLETE(tp), TTOPRI, + "ttyfls", hz / 10); + /* + * Don't try sending the stop character again. + */ + CLR(tp->t_state, TS_TBLOCK); + goto again; + } +#endif + } + } + if (rw & FWRITE) { + FLUSHQ(&tp->t_outq); + ttwwakeup(tp); + } + splx(s); +} + +/* + * Copy in the default termios characters. + */ +void +termioschars(t) + struct termios *t; +{ + + bcopy(ttydefchars, t->c_cc, sizeof t->c_cc); +} + +/* + * Old interface. + */ +void +ttychars(tp) + struct tty *tp; +{ + + termioschars(&tp->t_termios); +} + +/* + * Handle input high water. Send stop character for the IXOFF case. Turn + * on our input flow control bit and propagate the changes to the driver. + * XXX the stop character should be put in a special high priority queue. + */ +void +ttyblock(tp) + struct tty *tp; +{ + + SET(tp->t_state, TS_TBLOCK); + if (ISSET(tp->t_iflag, IXOFF) && tp->t_cc[VSTOP] != _POSIX_VDISABLE && + putc(tp->t_cc[VSTOP], &tp->t_outq) != 0) + CLR(tp->t_state, TS_TBLOCK); /* try again later */ + ttstart(tp); +} + +/* + * Handle input low water. Send start character for the IXOFF case. Turn + * off our input flow control bit and propagate the changes to the driver. + * XXX the start character should be put in a special high priority queue. + */ +static void +ttyunblock(tp) + struct tty *tp; +{ + + CLR(tp->t_state, TS_TBLOCK); + if (ISSET(tp->t_iflag, IXOFF) && tp->t_cc[VSTART] != _POSIX_VDISABLE && + putc(tp->t_cc[VSTART], &tp->t_outq) != 0) + SET(tp->t_state, TS_TBLOCK); /* try again later */ + ttstart(tp); +} + +#if defined(NeXT) || defined(notyet) +/* FreeBSD: Not used by any current (i386) drivers. */ +/* + * Restart after an inter-char delay. + */ +void +ttrstrt(tp_arg) + void *tp_arg; +{ + struct tty *tp; + int s; + +#if DIAGNOSTIC + if (tp_arg == NULL) + panic("ttrstrt"); +#endif + tp = tp_arg; + s = spltty(); + + CLR(tp->t_state, TS_TIMEOUT); + ttstart(tp); + + splx(s); +} +#endif /* NeXT || notyet */ + +int +ttstart(tp) + struct tty *tp; +{ + + if (tp->t_oproc != NULL) /* XXX: Kludge for pty. */ + (*tp->t_oproc)(tp); + return (0); +} + +/* + * "close" a line discipline + */ +int +ttylclose(tp, flag) + struct tty *tp; + int flag; +{ + if ( (flag & FNONBLOCK) || ttywflush(tp)) + ttyflush(tp, FREAD | FWRITE); + return (0); +} + +/* + * Handle modem control transition on a tty. + * Flag indicates new state of carrier. + * Returns 0 if the line should be turned off, otherwise 1. + */ +int +ttymodem(tp, flag) + register struct tty *tp; + int flag; +{ + + if (ISSET(tp->t_state, TS_CARR_ON) && ISSET(tp->t_cflag, MDMBUF)) { + /* + * MDMBUF: do flow control according to carrier flag + * XXX TS_CAR_OFLOW doesn't do anything yet. TS_TTSTOP + * works if IXON and IXANY are clear. + */ + if (flag) { + CLR(tp->t_state, TS_CAR_OFLOW); + CLR(tp->t_state, TS_TTSTOP); + ttstart(tp); + } else if (!ISSET(tp->t_state, TS_CAR_OFLOW)) { + SET(tp->t_state, TS_CAR_OFLOW); + SET(tp->t_state, TS_TTSTOP); + ttystop(tp, 0); + } + } else if (flag == 0) { + /* + * Lost carrier. + */ + CLR(tp->t_state, TS_CARR_ON); + if (ISSET(tp->t_state, TS_ISOPEN) && + !ISSET(tp->t_cflag, CLOCAL)) { + SET(tp->t_state, TS_ZOMBIE); + CLR(tp->t_state, TS_CONNECTED); + if (tp->t_session && tp->t_session->s_leader) + psignal(tp->t_session->s_leader, SIGHUP); + ttyflush(tp, FREAD | FWRITE); + return (0); + } + } else { + /* + * Carrier now on. + */ + SET(tp->t_state, TS_CARR_ON); + if (!ISSET(tp->t_state, TS_ZOMBIE)) + SET(tp->t_state, TS_CONNECTED); + wakeup(TSA_CARR_ON(tp)); + ttwakeup(tp); + ttwwakeup(tp); + } + return (1); +} + +/* + * Reinput pending characters after state switch + * call at spltty(). + */ +static void +ttypend(tp) + register struct tty *tp; +{ + struct clist tq; + register int c; + + CLR(tp->t_lflag, PENDIN); + SET(tp->t_state, TS_TYPEN); +#ifndef NeXT + /* + * XXX this assumes too much about clist internals. It may even + * fail if the cblock slush pool is empty. We can't allocate more + * cblocks here because we are called from an interrupt handler + * and clist_alloc_cblocks() can wait. + */ + tq = tp->t_rawq; + bzero(&tp->t_rawq, sizeof tp->t_rawq); + tp->t_rawq.c_cbmax = tq.c_cbmax; + tp->t_rawq.c_cbreserved = tq.c_cbreserved; +#else + tq = tp->t_rawq; + tp->t_rawq.c_cc = 0; + tp->t_rawq.c_cf = tp->t_rawq.c_cl = 0; +#endif /* !NeXT */ + while ((c = getc(&tq)) >= 0) + ttyinput(c, tp); + CLR(tp->t_state, TS_TYPEN); +} + +/* + * Process a read call on a tty device. + */ +int +ttread(tp, uio, flag) + register struct tty *tp; + struct uio *uio; + int flag; +{ + register struct clist *qp; + register int c; + register tcflag_t lflag; + register cc_t *cc = tp->t_cc; + register struct proc *p = current_proc(); + int s, first, error = 0; + int has_etime = 0, last_cc = 0; + long slp = 0; /* XXX this should be renamed `timo'. */ + +loop: + s = spltty(); + lflag = tp->t_lflag; + /* + * take pending input first + */ + if (ISSET(lflag, PENDIN)) { + ttypend(tp); + splx(s); /* reduce latency */ + s = spltty(); + lflag = tp->t_lflag; /* XXX ttypend() clobbers it */ + } + + /* + * Hang process if it's in the background. + */ + if (isbackground(p, tp)) { + splx(s); + if ((p->p_sigignore & sigmask(SIGTTIN)) || + (p->p_sigmask & sigmask(SIGTTIN)) || + p->p_flag & P_PPWAIT || p->p_pgrp->pg_jobc == 0) + return (EIO); + pgsignal(p->p_pgrp, SIGTTIN, 1); + error = ttysleep(tp, &lbolt, TTIPRI | PCATCH | PTTYBLOCK, "ttybg2", 0); + if (error) + return (error); + goto loop; + } + + if (ISSET(tp->t_state, TS_ZOMBIE)) { + splx(s); + return (0); /* EOF */ + } + + /* + * If canonical, use the canonical queue, + * else use the raw queue. + * + * (should get rid of clists...) + */ + qp = ISSET(lflag, ICANON) ? &tp->t_canq : &tp->t_rawq; + + if (flag & IO_NDELAY) { + if (qp->c_cc > 0) + goto read; + if (!ISSET(lflag, ICANON) && cc[VMIN] == 0) { + splx(s); + return (0); + } + splx(s); + return (EWOULDBLOCK); + } + if (!ISSET(lflag, ICANON)) { + int m = cc[VMIN]; + long t = cc[VTIME]; + struct timeval etime, timecopy; + int x; + + /* + * Check each of the four combinations. + * (m > 0 && t == 0) is the normal read case. + * It should be fairly efficient, so we check that and its + * companion case (m == 0 && t == 0) first. + * For the other two cases, we compute the target sleep time + * into slp. + */ + if (t == 0) { + if (qp->c_cc < m) + goto sleep; + if (qp->c_cc > 0) + goto read; + + /* m, t and qp->c_cc are all 0. 0 is enough input. */ + splx(s); + return (0); + } + t *= 100000; /* time in us */ +#define diff(t1, t2) (((t1).tv_sec - (t2).tv_sec) * 1000000 + \ + ((t1).tv_usec - (t2).tv_usec)) + if (m > 0) { + if (qp->c_cc <= 0) + goto sleep; + if (qp->c_cc >= m) + goto read; + x = splclock(); + timecopy = time; + splx(x); + if (!has_etime) { + /* first character, start timer */ + has_etime = 1; + + etime.tv_sec = t / 1000000; + etime.tv_usec = (t - (etime.tv_sec * 1000000)); + timeradd(&etime, &timecopy, &etime); + + slp = t; + } else if (qp->c_cc > last_cc) { + /* got a character, restart timer */ + + etime.tv_sec = t / 1000000; + etime.tv_usec = (t - (etime.tv_sec * 1000000)); + timeradd(&etime, &timecopy, &etime); + + slp = t; + } else { + /* nothing, check expiration */ + if (timercmp(&etime, &timecopy, <=)) + goto read; + + slp = diff(etime, timecopy); + } + last_cc = qp->c_cc; + } else { /* m == 0 */ + if (qp->c_cc > 0) + goto read; + x = splclock(); + timecopy = time; + splx(x); + if (!has_etime) { + has_etime = 1; + + etime.tv_sec = t / 1000000; + etime.tv_usec = (t - (etime.tv_sec * 1000000)); + timeradd(&etime, &timecopy, &etime); + + slp = t; + } else { + if (timercmp(&etime, &timecopy, <=)) { + /* Timed out, but 0 is enough input. */ + splx(s); + return (0); + } + slp = diff(etime, timecopy); + } + } +#undef diff + /* + * Rounding down may make us wake up just short + * of the target, so we round up. + * The formula is ceiling(slp * hz/1000000). + * 32-bit arithmetic is enough for hz < 169. + * XXX see hzto() for how to avoid overflow if hz + * is large (divide by `tick' and/or arrange to + * use hzto() if hz is large). + */ + slp = (long) (((u_long)slp * hz) + 999999) / 1000000; + goto sleep; + } + if (qp->c_cc <= 0) { +sleep: + /* + * There is no input, or not enough input and we can block. + */ + error = ttysleep(tp, TSA_HUP_OR_INPUT(tp), TTIPRI | PCATCH, + ISSET(tp->t_state, TS_CONNECTED) ? + "ttyin" : "ttyhup", (int)slp); + splx(s); + if (error == EWOULDBLOCK) + error = 0; + else if (error) + return (error); + /* + * XXX what happens if another process eats some input + * while we are asleep (not just here)? It would be + * safest to detect changes and reset our state variables + * (has_stime and last_cc). + */ + slp = 0; + goto loop; + } +read: + splx(s); + /* + * Input present, check for input mapping and processing. + */ + first = 1; +#ifdef NeXT + if (ISSET(lflag, ICANON) + || (ISSET(lflag, IEXTEN | ISIG) == (IEXTEN | ISIG)) ) +#else + if (ISSET(lflag, ICANON | ISIG)) +#endif + goto slowcase; + for (;;) { + char ibuf[IBUFSIZ]; + int icc; + + icc = min(uio->uio_resid, IBUFSIZ); + icc = q_to_b(qp, ibuf, icc); + if (icc <= 0) { + if (first) + goto loop; + break; + } + error = uiomove(ibuf, icc, uio); + /* + * XXX if there was an error then we should ungetc() the + * unmoved chars and reduce icc here. + */ +#if NSNP > 0 + if (ISSET(tp->t_lflag, ECHO) && + ISSET(tp->t_state, TS_SNOOP) && tp->t_sc != NULL) + snpin((struct snoop *)tp->t_sc, ibuf, icc); +#endif + if (error) + break; + if (uio->uio_resid == 0) + break; + first = 0; + } + goto out; +slowcase: + for (;;) { + c = getc(qp); + if (c < 0) { + if (first) + goto loop; + break; + } + /* + * delayed suspend (^Y) + */ + if (CCEQ(cc[VDSUSP], c) && + ISSET(lflag, IEXTEN | ISIG) == (IEXTEN | ISIG)) { + pgsignal(tp->t_pgrp, SIGTSTP, 1); + if (first) { + error = ttysleep(tp, &lbolt, TTIPRI | PCATCH, + "ttybg3", 0); + if (error) + break; + goto loop; + } + break; + } + /* + * Interpret EOF only in canonical mode. + */ + if (CCEQ(cc[VEOF], c) && ISSET(lflag, ICANON)) + break; + /* + * Give user character. + */ + error = ureadc(c, uio); + if (error) + /* XXX should ungetc(c, qp). */ + break; +#if NSNP > 0 + /* + * Only snoop directly on input in echo mode. Non-echoed + * input will be snooped later iff the application echoes it. + */ + if (ISSET(tp->t_lflag, ECHO) && + ISSET(tp->t_state, TS_SNOOP) && tp->t_sc != NULL) + snpinc((struct snoop *)tp->t_sc, (char)c); +#endif + if (uio->uio_resid == 0) + break; + /* + * In canonical mode check for a "break character" + * marking the end of a "line of input". + */ + if (ISSET(lflag, ICANON) && TTBREAKC(c, lflag)) + break; + first = 0; + } + +out: + /* + * Look to unblock input now that (presumably) + * the input queue has gone down. + */ + s = spltty(); + if (ISSET(tp->t_state, TS_TBLOCK) && + tp->t_rawq.c_cc + tp->t_canq.c_cc <= I_LOW_WATER) + ttyunblock(tp); + splx(s); + + return (error); +} + +/* + * Check the output queue on tp for space for a kernel message (from uprintf + * or tprintf). Allow some space over the normal hiwater mark so we don't + * lose messages due to normal flow control, but don't let the tty run amok. + * Sleeps here are not interruptible, but we return prematurely if new signals + * arrive. + */ +int +ttycheckoutq(tp, wait) + register struct tty *tp; + int wait; +{ + int hiwat, s, oldsig; + + hiwat = tp->t_hiwat; + s = spltty(); + oldsig = wait ? current_proc()->p_siglist : 0; + if (tp->t_outq.c_cc > hiwat + OBUFSIZ + 100) + while (tp->t_outq.c_cc > hiwat) { + ttstart(tp); + if (tp->t_outq.c_cc <= hiwat) + break; + if (wait == 0 || current_proc()->p_siglist != oldsig) { + splx(s); + return (0); + } + SET(tp->t_state, TS_SO_OLOWAT); + tsleep(TSA_OLOWAT(tp), PZERO - 1, "ttoutq", hz); + } + splx(s); + return (1); +} + +/* + * Process a write call on a tty device. + */ +int +ttwrite(tp, uio, flag) + register struct tty *tp; + register struct uio *uio; + int flag; +{ + register char *cp = NULL; + register int cc, ce; + register struct proc *p; + int i, hiwat, cnt, error, s; + char obuf[OBUFSIZ]; + + hiwat = tp->t_hiwat; + cnt = uio->uio_resid; + error = 0; + cc = 0; +loop: + s = spltty(); + if (ISSET(tp->t_state, TS_ZOMBIE)) { + splx(s); + if (uio->uio_resid == cnt) + error = EIO; + goto out; + } + if (!ISSET(tp->t_state, TS_CONNECTED)) { + if (flag & IO_NDELAY) { + splx(s); + error = EWOULDBLOCK; + goto out; + } + error = ttysleep(tp, TSA_CARR_ON(tp), TTIPRI | PCATCH, + "ttydcd", 0); + splx(s); + if (error) { + goto out; } + goto loop; + } + splx(s); + /* + * Hang the process if it's in the background. + */ + p = current_proc(); + if (isbackground(p, tp) && + ISSET(tp->t_lflag, TOSTOP) && (p->p_flag & P_PPWAIT) == 0 && + (p->p_sigignore & sigmask(SIGTTOU)) == 0 && + (p->p_sigmask & sigmask(SIGTTOU)) == 0) { + if (p->p_pgrp->pg_jobc == 0) { + error = EIO; + goto out; + } + pgsignal(p->p_pgrp, SIGTTOU, 1); + error = ttysleep(tp, &lbolt, TTIPRI | PCATCH | PTTYBLOCK, "ttybg4", 0); + if (error) + goto out; + goto loop; + } + /* + * Process the user's data in at most OBUFSIZ chunks. Perform any + * output translation. Keep track of high water mark, sleep on + * overflow awaiting device aid in acquiring new space. + */ + while (uio->uio_resid > 0 || cc > 0) { + if (ISSET(tp->t_lflag, FLUSHO)) { + uio->uio_resid = 0; + return (0); + } + if (tp->t_outq.c_cc > hiwat) + goto ovhiwat; + /* + * Grab a hunk of data from the user, unless we have some + * leftover from last time. + */ + if (cc == 0) { + cc = min(uio->uio_resid, OBUFSIZ); + cp = obuf; + error = uiomove(cp, cc, uio); + if (error) { + cc = 0; + break; + } +#if NSNP > 0 + if (ISSET(tp->t_state, TS_SNOOP) && tp->t_sc != NULL) + snpin((struct snoop *)tp->t_sc, cp, cc); +#endif + } + /* + * If nothing fancy need be done, grab those characters we + * can handle without any of ttyoutput's processing and + * just transfer them to the output q. For those chars + * which require special processing (as indicated by the + * bits in char_type), call ttyoutput. After processing + * a hunk of data, look for FLUSHO so ^O's will take effect + * immediately. + */ + while (cc > 0) { + if (!ISSET(tp->t_oflag, OPOST)) + ce = cc; + else { + ce = cc - scanc((u_int)cc, (u_char *)cp, + (u_char *)char_type, CCLASSMASK); + /* + * If ce is zero, then we're processing + * a special character through ttyoutput. + */ + if (ce == 0) { + tp->t_rocount = 0; + if (ttyoutput(*cp, tp) >= 0) { +#ifdef NeXT + /* out of space */ + goto overfull; +#else + /* No Clists, wait a bit. */ + ttstart(tp); + if (flag & IO_NDELAY) { + error = EWOULDBLOCK; + goto out; + } + error = ttysleep(tp, &lbolt, + TTOPRI|PCATCH, + "ttybf1", 0); + if (error) + goto out; + goto loop; +#endif /* NeXT */ + } + cp++; + cc--; + if (ISSET(tp->t_lflag, FLUSHO) || + tp->t_outq.c_cc > hiwat) + goto ovhiwat; + continue; + } + } + /* + * A bunch of normal characters have been found. + * Transfer them en masse to the output queue and + * continue processing at the top of the loop. + * If there are any further characters in this + * <= OBUFSIZ chunk, the first should be a character + * requiring special handling by ttyoutput. + */ + tp->t_rocount = 0; + i = b_to_q(cp, ce, &tp->t_outq); + ce -= i; + tp->t_column += ce; + cp += ce, cc -= ce, tk_nout += ce; + tp->t_outcc += ce; + if (i > 0) { +#ifdef NeXT + /* out of space */ + goto overfull; +#else + /* No Clists, wait a bit. */ + ttstart(tp); + if (flag & IO_NDELAY) { + error = EWOULDBLOCK; + goto out; + } + error = ttysleep(tp, &lbolt, TTOPRI | PCATCH, + "ttybf2", 0); + if (error) + goto out; + goto loop; +#endif /* NeXT */ + } + if (ISSET(tp->t_lflag, FLUSHO) || + tp->t_outq.c_cc > hiwat) + break; + } + ttstart(tp); + } +out: + /* + * If cc is nonzero, we leave the uio structure inconsistent, as the + * offset and iov pointers have moved forward, but it doesn't matter + * (the call will either return short or restart with a new uio). + */ + uio->uio_resid += cc; + return (error); + +#ifdef NeXT +overfull: + + /* + * Since we are using ring buffers, if we can't insert any more into + * the output queue, we can assume the ring is full and that someone + * forgot to set the high water mark correctly. We set it and then + * proceed as normal. + */ + hiwat = tp->t_outq.c_cc - 1; +#endif + +ovhiwat: + ttstart(tp); + s = spltty(); + /* + * This can only occur if FLUSHO is set in t_lflag, + * or if ttstart/oproc is synchronous (or very fast). + */ + if (tp->t_outq.c_cc <= hiwat) { + splx(s); + goto loop; + } + if (flag & IO_NDELAY) { + splx(s); + uio->uio_resid += cc; + return (uio->uio_resid == cnt ? EWOULDBLOCK : 0); + } + SET(tp->t_state, TS_SO_OLOWAT); + error = ttysleep(tp, TSA_OLOWAT(tp), TTOPRI | PCATCH, "ttywri", + tp->t_timeout); + splx(s); + if (error == EWOULDBLOCK) + error = EIO; + if (error) + goto out; + goto loop; +} + +/* + * Rubout one character from the rawq of tp + * as cleanly as possible. + */ +static void +ttyrub(c, tp) + register int c; + register struct tty *tp; +{ + register u_char *cp; + register int savecol; + int tabc, s; + + if (!ISSET(tp->t_lflag, ECHO) || ISSET(tp->t_lflag, EXTPROC)) + return; + CLR(tp->t_lflag, FLUSHO); + if (ISSET(tp->t_lflag, ECHOE)) { + if (tp->t_rocount == 0) { + /* + * Messed up by ttwrite; retype + */ + ttyretype(tp); + return; + } + if (c == ('\t' | TTY_QUOTE) || c == ('\n' | TTY_QUOTE)) + ttyrubo(tp, 2); + else { + CLR(c, ~TTY_CHARMASK); + switch (CCLASS(c)) { + case ORDINARY: + ttyrubo(tp, 1); + break; + case BACKSPACE: + case CONTROL: + case NEWLINE: + case RETURN: + case VTAB: + if (ISSET(tp->t_lflag, ECHOCTL)) + ttyrubo(tp, 2); + break; + case TAB: + if (tp->t_rocount < tp->t_rawq.c_cc) { + ttyretype(tp); + return; + } + s = spltty(); + savecol = tp->t_column; + SET(tp->t_state, TS_CNTTB); + SET(tp->t_lflag, FLUSHO); + tp->t_column = tp->t_rocol; +#ifndef NeXT + cp = tp->t_rawq.c_cf; + if (cp) + tabc = *cp; /* XXX FIX NEXTC */ + for (; cp; cp = nextc(&tp->t_rawq, cp, &tabc)) + ttyecho(tabc, tp); +#else + for (cp = firstc(&tp->t_rawq, &tabc); cp; + cp = nextc(&tp->t_rawq, cp, &tabc)) + ttyecho(tabc, tp); +#endif /* !NeXT */ + CLR(tp->t_lflag, FLUSHO); + CLR(tp->t_state, TS_CNTTB); + splx(s); + + /* savecol will now be length of the tab. */ + savecol -= tp->t_column; + tp->t_column += savecol; + if (savecol > 8) + savecol = 8; /* overflow fixup */ + while (--savecol >= 0) + (void)ttyoutput('\b', tp); + break; + default: /* XXX */ +#define PANICSTR "ttyrub: would panic c = %d, val = %d\n" + (void)printf(PANICSTR, c, CCLASS(c)); +#ifdef notdef + panic(PANICSTR, c, CCLASS(c)); +#endif + } + } + } else if (ISSET(tp->t_lflag, ECHOPRT)) { + if (!ISSET(tp->t_state, TS_ERASE)) { + SET(tp->t_state, TS_ERASE); + (void)ttyoutput('\\', tp); + } + ttyecho(c, tp); + } else + ttyecho(tp->t_cc[VERASE], tp); + --tp->t_rocount; +} + +/* + * Back over cnt characters, erasing them. + */ +static void +ttyrubo(tp, cnt) + register struct tty *tp; + int cnt; +{ + + while (cnt-- > 0) { + (void)ttyoutput('\b', tp); + (void)ttyoutput(' ', tp); + (void)ttyoutput('\b', tp); + } +} + +/* + * ttyretype -- + * Reprint the rawq line. Note, it is assumed that c_cc has already + * been checked. + */ +static void +ttyretype(tp) + register struct tty *tp; +{ + register u_char *cp; + int s, c; + + /* Echo the reprint character. */ + if (tp->t_cc[VREPRINT] != _POSIX_VDISABLE) + ttyecho(tp->t_cc[VREPRINT], tp); + + (void)ttyoutput('\n', tp); + + /* + * FREEBSD XXX + * FIX: NEXTC IS BROKEN - DOESN'T CHECK QUOTE + * BIT OF FIRST CHAR. + */ + s = spltty(); +#ifndef NeXT + for (cp = tp->t_canq.c_cf, c = (cp != NULL ? *cp : 0); + cp != NULL; cp = nextc(&tp->t_canq, cp, &c)) + ttyecho(c, tp); + for (cp = tp->t_rawq.c_cf, c = (cp != NULL ? *cp : 0); + cp != NULL; cp = nextc(&tp->t_rawq, cp, &c)) + ttyecho(c, tp); +#else NeXT + for (cp = firstc(&tp->t_canq, &c); cp; cp = nextc(&tp->t_canq, cp, &c)) + ttyecho(c, tp); + for (cp = firstc(&tp->t_rawq, &c); cp; cp = nextc(&tp->t_rawq, cp, &c)) + ttyecho(c, tp); +#endif /* !NeXT */ + CLR(tp->t_state, TS_ERASE); + splx(s); + + tp->t_rocount = tp->t_rawq.c_cc; + tp->t_rocol = 0; +} + +/* + * Echo a typed character to the terminal. + */ +static void +ttyecho(c, tp) + register int c; + register struct tty *tp; +{ + + if (!ISSET(tp->t_state, TS_CNTTB)) + CLR(tp->t_lflag, FLUSHO); + if ((!ISSET(tp->t_lflag, ECHO) && + (c != '\n' || !ISSET(tp->t_lflag, ECHONL))) || + ISSET(tp->t_lflag, EXTPROC)) + return; + if (ISSET(tp->t_lflag, ECHOCTL) && + ((ISSET(c, TTY_CHARMASK) <= 037 && c != '\t' && c != '\n') || + ISSET(c, TTY_CHARMASK) == 0177)) { + (void)ttyoutput('^', tp); + CLR(c, ~TTY_CHARMASK); + if (c == 0177) + c = '?'; + else + c += 'A' - 1; + } + (void)ttyoutput(c, tp); +} + +/* + * Wake up any readers on a tty. + */ +void +ttwakeup(tp) + register struct tty *tp; +{ + +#ifndef NeXT + if (tp->t_rsel.si_pid != 0) +#endif + selwakeup(&tp->t_rsel); + if (ISSET(tp->t_state, TS_ASYNC)) + pgsignal(tp->t_pgrp, SIGIO, 1); + wakeup(TSA_HUP_OR_INPUT(tp)); +} + +/* + * Wake up any writers on a tty. + */ +void +ttwwakeup(tp) + register struct tty *tp; +{ +#ifndef NeXT + if (tp->t_wsel.si_pid != 0 && tp->t_outq.c_cc <= tp->t_lowat) +#else + if (tp->t_outq.c_cc <= tp->t_lowat) +#endif + selwakeup(&tp->t_wsel); + if (ISSET(tp->t_state, TS_BUSY | TS_SO_OCOMPLETE) == + TS_SO_OCOMPLETE && tp->t_outq.c_cc == 0) { + CLR(tp->t_state, TS_SO_OCOMPLETE); + wakeup(TSA_OCOMPLETE(tp)); + } + if (ISSET(tp->t_state, TS_SO_OLOWAT) && + tp->t_outq.c_cc <= tp->t_lowat) { + CLR(tp->t_state, TS_SO_OLOWAT); + wakeup(TSA_OLOWAT(tp)); + } +} + +/* + * Look up a code for a specified speed in a conversion table; + * used by drivers to map software speed values to hardware parameters. + */ +int +ttspeedtab(speed, table) + int speed; + register struct speedtab *table; +{ + + for ( ; table->sp_speed != -1; table++) + if (table->sp_speed == speed) + return (table->sp_code); + return (-1); +} + +/* + * Set tty hi and low water marks. + * + * Try to arrange the dynamics so there's about one second + * from hi to low water. + * + */ +void +ttsetwater(tp) + struct tty *tp; +{ + register int cps, x; + +#define CLAMP(x, h, l) ((x) > h ? h : ((x) < l) ? l : (x)) + + cps = tp->t_ospeed / 10; + tp->t_lowat = x = CLAMP(cps / 2, TTMAXLOWAT, TTMINLOWAT); + x += cps; + x = CLAMP(x, TTMAXHIWAT, TTMINHIWAT); + tp->t_hiwat = roundup(x, CBSIZE); +#undef CLAMP +} + +/* NeXT ttyinfo has been converted to the MACH kernel */ +#include + +/* + * Report on state of foreground process group. + */ +void +ttyinfo(tp) + register struct tty *tp; +{ + /* NOT IMPLEMENTED FOR MACH */ +} + +#ifndef NeXT +/* + * Returns 1 if p2 is "better" than p1 + * + * The algorithm for picking the "interesting" process is thus: + * + * 1) Only foreground processes are eligible - implied. + * 2) Runnable processes are favored over anything else. The runner + * with the highest cpu utilization is picked (p_estcpu). Ties are + * broken by picking the highest pid. + * 3) The sleeper with the shortest sleep time is next. With ties, + * we pick out just "short-term" sleepers (P_SINTR == 0). + * 4) Further ties are broken by picking the highest pid. + */ +#define ISRUN(p) (((p)->p_stat == SRUN) || ((p)->p_stat == SIDL)) +#define TESTAB(a, b) ((a)<<1 | (b)) +#define ONLYA 2 +#define ONLYB 1 +#define BOTH 3 + +static int +proc_compare(p1, p2) + register struct proc *p1, *p2; +{ + + if (p1 == NULL) + return (1); + /* + * see if at least one of them is runnable + */ + switch (TESTAB(ISRUN(p1), ISRUN(p2))) { + case ONLYA: + return (0); + case ONLYB: + return (1); + case BOTH: + /* + * tie - favor one with highest recent cpu utilization + */ + if (p2->p_estcpu > p1->p_estcpu) + return (1); + if (p1->p_estcpu > p2->p_estcpu) + return (0); + return (p2->p_pid > p1->p_pid); /* tie - return highest pid */ + } + /* + * weed out zombies + */ + switch (TESTAB(p1->p_stat == SZOMB, p2->p_stat == SZOMB)) { + case ONLYA: + return (1); + case ONLYB: + return (0); + case BOTH: + return (p2->p_pid > p1->p_pid); /* tie - return highest pid */ + } + /* + * pick the one with the smallest sleep time + */ + if (p2->p_slptime > p1->p_slptime) + return (0); + if (p1->p_slptime > p2->p_slptime) + return (1); + /* + * favor one sleeping in a non-interruptible sleep + */ + if (p1->p_flag & P_SINTR && (p2->p_flag & P_SINTR) == 0) + return (1); + if (p2->p_flag & P_SINTR && (p1->p_flag & P_SINTR) == 0) + return (0); + return (p2->p_pid > p1->p_pid); /* tie - return highest pid */ +} +#endif /* NeXT */ + +/* + * Output char to tty; console putchar style. + */ +int +tputchar(c, tp) + int c; + struct tty *tp; +{ + register int s; + + s = spltty(); + if (!ISSET(tp->t_state, TS_CONNECTED)) { + splx(s); + return (-1); + } + if (c == '\n') + (void)ttyoutput('\r', tp); + (void)ttyoutput(c, tp); + ttstart(tp); + splx(s); + return (0); +} + +/* + * Sleep on chan, returning ERESTART if tty changed while we napped and + * returning any errors (e.g. EINTR/EWOULDBLOCK) reported by tsleep. If + * the tty is revoked, restarting a pending call will redo validation done + * at the start of the call. + */ +int +ttysleep(tp, chan, pri, wmesg, timo) + struct tty *tp; + void *chan; + int pri, timo; + char *wmesg; +{ + int error; + int gen; + + gen = tp->t_gen; + error = tsleep(chan, pri, wmesg, timo); + if (error) + return (error); + return (tp->t_gen == gen ? 0 : ERESTART); +} + +#ifdef NeXT +/* + * Allocate a tty structure and its associated buffers. + */ +struct tty * +ttymalloc() +{ + struct tty *tp; + + MALLOC(tp, struct tty *, sizeof(struct tty), M_TTYS, M_WAITOK); + bzero(tp, sizeof *tp); + /* XXX: default to TTYCLSIZE(1024) chars for now */ + clalloc(&tp->t_rawq, TTYCLSIZE, 1); + clalloc(&tp->t_canq, TTYCLSIZE, 1); + /* output queue doesn't need quoting */ + clalloc(&tp->t_outq, TTYCLSIZE, 0); + return(tp); +} + +/* + * Free a tty structure and its buffers. + */ +void +ttyfree(tp) +struct tty *tp; +{ + clfree(&tp->t_rawq); + clfree(&tp->t_canq); + clfree(&tp->t_outq); + FREE(tp, M_TTYS); +} + +#else /* !NeXT */ + +#ifdef notyet +/* + * XXX this is usable not useful or used. Most tty drivers have + * ifdefs for using ttymalloc() but assume a different interface. + */ +/* + * Allocate a tty struct. Clists in the struct will be allocated by + * ttyopen(). + */ +struct tty * +ttymalloc() +{ + struct tty *tp; + + tp = _MALLOC(sizeof *tp, M_TTYS, M_WAITOK); + bzero(tp, sizeof *tp); + return (tp); +} +#endif + +#if 0 /* XXX not yet usable: session leader holds a ref (see kern_exit.c). */ +/* + * Free a tty struct. Clists in the struct should have been freed by + * ttyclose(). + */ +void +ttyfree(tp) + struct tty *tp; +{ + FREE(tp, M_TTYS); +} +#endif /* 0 */ +#endif /* NeXT */ diff --git a/bsd/kern/tty_compat.c b/bsd/kern/tty_compat.c new file mode 100644 index 000000000..cf6e818b1 --- /dev/null +++ b/bsd/kern/tty_compat.c @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tty_compat.c 8.1 (Berkeley) 6/10/93 + */ + +/* + * mapping routines for old line discipline (yuck) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* NeXT Move define down here cause COMPAT_43 not valid earlier */ +#if COMPAT_43 || defined(COMPAT_SUNOS) + +static int ttcompatgetflags __P((struct tty *tp)); +static void ttcompatsetflags __P((struct tty *tp, struct termios *t)); +static void ttcompatsetlflags __P((struct tty *tp, struct termios *t)); +static int ttcompatspeedtab __P((int speed, struct speedtab *table)); + + +static int ttydebug = 0; + +#ifndef NeXT +SYSCTL_INT(_debug, OID_AUTO, ttydebug, CTLFLAG_RW, &ttydebug, 0, ""); +#endif + +static struct speedtab compatspeeds[] = { +#define MAX_SPEED 17 + { 115200, 17 }, + { 57600, 16 }, + { 38400, 15 }, + { 19200, 14 }, + { 9600, 13 }, + { 4800, 12 }, + { 2400, 11 }, + { 1800, 10 }, + { 1200, 9 }, + { 600, 8 }, + { 300, 7 }, + { 200, 6 }, + { 150, 5 }, + { 134, 4 }, + { 110, 3 }, + { 75, 2 }, + { 50, 1 }, + { 0, 0 }, + { -1, -1 }, +}; +static int compatspcodes[] = { + 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, + 1800, 2400, 4800, 9600, 19200, 38400, 57600, 115200, +}; + +static int +ttcompatspeedtab(speed, table) + int speed; + register struct speedtab *table; +{ + if (speed == 0) + return (0); /* hangup */ + for ( ; table->sp_speed > 0; table++) + if (table->sp_speed <= speed) /* nearest one, rounded down */ + return (table->sp_code); + return (1); /* 50, min and not hangup */ +} + +#ifndef NeXT +int +ttsetcompat(tp, com, data, term) + register struct tty *tp; + int *com; + caddr_t data; + struct termios *term; +#else +__private_extern__ int +ttsetcompat(tp, com, data, term) + register struct tty *tp; + u_long *com; + caddr_t data; + struct termios *term; +#endif /* !NeXT */ +{ + switch (*com) { + case TIOCSETP: + case TIOCSETN: { + register struct sgttyb *sg = (struct sgttyb *)data; + int speed; + + if ((speed = sg->sg_ispeed) > MAX_SPEED || speed < 0) + return(EINVAL); + else if (speed != ttcompatspeedtab(tp->t_ispeed, compatspeeds)) + term->c_ispeed = compatspcodes[speed]; + else + term->c_ispeed = tp->t_ispeed; + if ((speed = sg->sg_ospeed) > MAX_SPEED || speed < 0) + return(EINVAL); + else if (speed != ttcompatspeedtab(tp->t_ospeed, compatspeeds)) + term->c_ospeed = compatspcodes[speed]; + else + term->c_ospeed = tp->t_ospeed; + term->c_cc[VERASE] = sg->sg_erase; + term->c_cc[VKILL] = sg->sg_kill; + tp->t_flags = (tp->t_flags&0xffff0000) | (sg->sg_flags&0xffff); + ttcompatsetflags(tp, term); + *com = (*com == TIOCSETP) ? TIOCSETAF : TIOCSETA; + break; + } + case TIOCSETC: { + struct tchars *tc = (struct tchars *)data; + register cc_t *cc; + + cc = term->c_cc; + cc[VINTR] = tc->t_intrc; + cc[VQUIT] = tc->t_quitc; + cc[VSTART] = tc->t_startc; + cc[VSTOP] = tc->t_stopc; + cc[VEOF] = tc->t_eofc; + cc[VEOL] = tc->t_brkc; + if (tc->t_brkc == -1) + cc[VEOL2] = _POSIX_VDISABLE; + *com = TIOCSETA; + break; + } + case TIOCSLTC: { + struct ltchars *ltc = (struct ltchars *)data; + register cc_t *cc; + + cc = term->c_cc; + cc[VSUSP] = ltc->t_suspc; + cc[VDSUSP] = ltc->t_dsuspc; + cc[VREPRINT] = ltc->t_rprntc; + cc[VDISCARD] = ltc->t_flushc; + cc[VWERASE] = ltc->t_werasc; + cc[VLNEXT] = ltc->t_lnextc; + *com = TIOCSETA; + break; + } + case TIOCLBIS: + case TIOCLBIC: + case TIOCLSET: + if (*com == TIOCLSET) + tp->t_flags = (tp->t_flags&0xffff) | *(int *)data<<16; + else { + tp->t_flags = + (ttcompatgetflags(tp)&0xffff0000)|(tp->t_flags&0xffff); + if (*com == TIOCLBIS) + tp->t_flags |= *(int *)data<<16; + else + tp->t_flags &= ~(*(int *)data<<16); + } + ttcompatsetlflags(tp, term); + *com = TIOCSETA; + break; + } + return 0; +} + +/*ARGSUSED*/ +#ifndef NeXT +int +ttcompat(tp, com, data, flag) + register struct tty *tp; + int com; + caddr_t data; + int flag; +#else +__private_extern__ int +ttcompat(tp, com, data, flag, p) + register struct tty *tp; + u_long com; + caddr_t data; + int flag; + struct proc *p; +#endif /* !NeXT */ +{ + switch (com) { + case TIOCSETP: + case TIOCSETN: + case TIOCSETC: + case TIOCSLTC: + case TIOCLBIS: + case TIOCLBIC: + case TIOCLSET: { + struct termios term; + int error; + + term = tp->t_termios; + if ((error = ttsetcompat(tp, &com, data, &term)) != 0) + return error; +#ifdef NeXT + return ttioctl(tp, com, (caddr_t) &term, flag, p); +#else + return ttioctl(tp, com, &term, flag); +#endif + } + case TIOCGETP: { + register struct sgttyb *sg = (struct sgttyb *)data; + register cc_t *cc = tp->t_cc; + + sg->sg_ospeed = ttcompatspeedtab(tp->t_ospeed, compatspeeds); + if (tp->t_ispeed == 0) + sg->sg_ispeed = sg->sg_ospeed; + else + sg->sg_ispeed = ttcompatspeedtab(tp->t_ispeed, compatspeeds); + sg->sg_erase = cc[VERASE]; + sg->sg_kill = cc[VKILL]; + sg->sg_flags = tp->t_flags = ttcompatgetflags(tp); + break; + } + case TIOCGETC: { + struct tchars *tc = (struct tchars *)data; + register cc_t *cc = tp->t_cc; + + tc->t_intrc = cc[VINTR]; + tc->t_quitc = cc[VQUIT]; + tc->t_startc = cc[VSTART]; + tc->t_stopc = cc[VSTOP]; + tc->t_eofc = cc[VEOF]; + tc->t_brkc = cc[VEOL]; + break; + } + case TIOCGLTC: { + struct ltchars *ltc = (struct ltchars *)data; + register cc_t *cc = tp->t_cc; + + ltc->t_suspc = cc[VSUSP]; + ltc->t_dsuspc = cc[VDSUSP]; + ltc->t_rprntc = cc[VREPRINT]; + ltc->t_flushc = cc[VDISCARD]; + ltc->t_werasc = cc[VWERASE]; + ltc->t_lnextc = cc[VLNEXT]; + break; + } + case TIOCLGET: + tp->t_flags = + (ttcompatgetflags(tp) & 0xffff0000UL) + | (tp->t_flags & 0xffff); + *(int *)data = tp->t_flags>>16; +#ifndef NeXT + if (ttydebug) + printf("CLGET: returning %x\n", *(int *)data); +#endif + break; + + case OTIOCGETD: + *(int *)data = tp->t_line ? tp->t_line : 2; + break; + +#ifndef NeXT + case OTIOCSETD: { + int ldisczero = 0; + + return (ttioctl(tp, TIOCSETD, + *(int *)data == 2 ? (caddr_t)&ldisczero : data, flag)); + } + + case OTIOCCONS: + *(int *)data = 1; + return (ttioctl(tp, TIOCCONS, data, flag)); +#else + case OTIOCSETD: { + int ldisczero = 0; + + return (ttioctl(tp, TIOCSETD, + *(int *)data == 2 ? (caddr_t)&ldisczero : data, flag, p)); + } + + case OTIOCCONS: + *(int *)data = 1; + return (ttioctl(tp, TIOCCONS, data, flag, p)); + + case TIOCGSID: + if (tp->t_session == NULL) + return ENOTTY; + + if (tp->t_session->s_leader == NULL) + return ENOTTY; + + *(int *) data = tp->t_session->s_leader->p_pid; + break; +#endif /* NeXT */ + + default: + return (-1); + } + return (0); +} + +static int +ttcompatgetflags(tp) + register struct tty *tp; +{ + register tcflag_t iflag = tp->t_iflag; + register tcflag_t lflag = tp->t_lflag; + register tcflag_t oflag = tp->t_oflag; + register tcflag_t cflag = tp->t_cflag; + register flags = 0; + + if (iflag&IXOFF) + flags |= TANDEM; + if (iflag&ICRNL || oflag&ONLCR) + flags |= CRMOD; + if ((cflag&CSIZE) == CS8) { + flags |= PASS8; + if (iflag&ISTRIP) + flags |= ANYP; + } + else if (cflag&PARENB) { + if (iflag&INPCK) { + if (cflag&PARODD) + flags |= ODDP; + else + flags |= EVENP; + } else + flags |= EVENP | ODDP; + } + + if ((lflag&ICANON) == 0) { + /* fudge */ + if (iflag&(INPCK|ISTRIP|IXON) || lflag&(IEXTEN|ISIG) + || cflag&(CSIZE|PARENB) != CS8) + flags |= CBREAK; + else + flags |= RAW; + } + if (!(flags&RAW) && !(oflag&OPOST) && cflag&(CSIZE|PARENB) == CS8) + flags |= LITOUT; + if (cflag&MDMBUF) + flags |= MDMBUF; + if ((cflag&HUPCL) == 0) + flags |= NOHANG; + if (oflag&OXTABS) + flags |= XTABS; + if (lflag&ECHOE) + flags |= CRTERA|CRTBS; + if (lflag&ECHOKE) + flags |= CRTKIL|CRTBS; + if (lflag&ECHOPRT) + flags |= PRTERA; + if (lflag&ECHOCTL) + flags |= CTLECH; + if ((iflag&IXANY) == 0) + flags |= DECCTQ; + flags |= lflag&(ECHO|TOSTOP|FLUSHO|PENDIN|NOFLSH); +#ifndef NeXT + if (ttydebug) + printf("getflags: %x\n", flags); +#endif + return (flags); +} + +static void +ttcompatsetflags(tp, t) + register struct tty *tp; + register struct termios *t; +{ + register flags = tp->t_flags; + register tcflag_t iflag = t->c_iflag; + register tcflag_t oflag = t->c_oflag; + register tcflag_t lflag = t->c_lflag; + register tcflag_t cflag = t->c_cflag; + + if (flags & RAW) { + iflag = IGNBRK; + lflag &= ~(ECHOCTL|ISIG|ICANON|IEXTEN); + } else { + iflag &= ~(PARMRK|IGNPAR|IGNCR|INLCR); + iflag |= BRKINT|IXON|IMAXBEL; + lflag |= ISIG|IEXTEN|ECHOCTL; /* XXX was echoctl on ? */ + if (flags & XTABS) + oflag |= OXTABS; + else + oflag &= ~OXTABS; + if (flags & CBREAK) + lflag &= ~ICANON; + else + lflag |= ICANON; + if (flags&CRMOD) { + iflag |= ICRNL; + oflag |= ONLCR; + } else { + iflag &= ~ICRNL; + oflag &= ~ONLCR; + } + } + if (flags&ECHO) + lflag |= ECHO; + else + lflag &= ~ECHO; + + cflag &= ~(CSIZE|PARENB); + if (flags&(RAW|LITOUT|PASS8)) { + cflag |= CS8; + if (!(flags&(RAW|PASS8)) + || (flags&(RAW|PASS8|ANYP)) == (PASS8|ANYP)) + iflag |= ISTRIP; + else + iflag &= ~ISTRIP; + if (flags&(RAW|LITOUT)) + oflag &= ~OPOST; + else + oflag |= OPOST; + } else { + cflag |= CS7|PARENB; + iflag |= ISTRIP; + oflag |= OPOST; + } + /* XXX don't set INPCK if RAW or PASS8? */ + if ((flags&(EVENP|ODDP)) == EVENP) { + iflag |= INPCK; + cflag &= ~PARODD; + } else if ((flags&(EVENP|ODDP)) == ODDP) { + iflag |= INPCK; + cflag |= PARODD; + } else + iflag &= ~INPCK; + if (flags&TANDEM) + iflag |= IXOFF; + else + iflag &= ~IXOFF; + if ((flags&DECCTQ) == 0) + iflag |= IXANY; + else + iflag &= ~IXANY; + t->c_iflag = iflag; + t->c_oflag = oflag; + t->c_lflag = lflag; + t->c_cflag = cflag; +} + +static void +ttcompatsetlflags(tp, t) + register struct tty *tp; + register struct termios *t; +{ + register flags = tp->t_flags; + register tcflag_t iflag = t->c_iflag; + register tcflag_t oflag = t->c_oflag; + register tcflag_t lflag = t->c_lflag; + register tcflag_t cflag = t->c_cflag; + + iflag &= ~(PARMRK|IGNPAR|IGNCR|INLCR); + if (flags&CRTERA) + lflag |= ECHOE; + else + lflag &= ~ECHOE; + if (flags&CRTKIL) + lflag |= ECHOKE; + else + lflag &= ~ECHOKE; + if (flags&PRTERA) + lflag |= ECHOPRT; + else + lflag &= ~ECHOPRT; + if (flags&CTLECH) + lflag |= ECHOCTL; + else + lflag &= ~ECHOCTL; + if (flags&TANDEM) + iflag |= IXOFF; + else + iflag &= ~IXOFF; + if ((flags&DECCTQ) == 0) + iflag |= IXANY; + else + iflag &= ~IXANY; + if (flags & MDMBUF) + cflag |= MDMBUF; + else + cflag &= ~MDMBUF; + if (flags&NOHANG) + cflag &= ~HUPCL; + else + cflag |= HUPCL; + lflag &= ~(TOSTOP|FLUSHO|PENDIN|NOFLSH); + lflag |= flags&(TOSTOP|FLUSHO|PENDIN|NOFLSH); + + /* + * The next if-else statement is copied from above so don't bother + * checking it separately. We could avoid fiddlling with the + * character size if the mode is already RAW or if neither the + * LITOUT bit or the PASS8 bit is being changed, but the delta of + * the change is not available here and skipping the RAW case would + * make the code different from above. + */ + cflag &= ~(CSIZE|PARENB); + if (flags&(RAW|LITOUT|PASS8)) { + cflag |= CS8; + if (!(flags&(RAW|PASS8)) + || (flags&(RAW|PASS8|ANYP)) == (PASS8|ANYP)) + iflag |= ISTRIP; + else + iflag &= ~ISTRIP; + if (flags&(RAW|LITOUT)) + oflag &= ~OPOST; + else + oflag |= OPOST; + } else { + cflag |= CS7|PARENB; + iflag |= ISTRIP; + oflag |= OPOST; + } + t->c_iflag = iflag; + t->c_oflag = oflag; + t->c_lflag = lflag; + t->c_cflag = cflag; +} +#endif /* COMPAT_43 || COMPAT_SUNOS */ diff --git a/bsd/kern/tty_conf.c b/bsd/kern/tty_conf.c new file mode 100644 index 000000000..bb4a8c9c0 --- /dev/null +++ b/bsd/kern/tty_conf.c @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tty_conf.c 8.4 (Berkeley) 1/21/94 + */ + +#include +#include +#include +#include + +#ifndef MAXLDISC +#define MAXLDISC 8 +#endif + +#ifndef NeXT +static l_open_t l_noopen; +static l_close_t l_noclose; +static l_ioctl_t l_nullioctl; +static l_rint_t l_norint; +static l_start_t l_nostart; +#else /* NeXT */ +#define l_noopen ((int (*) __P((dev_t, struct tty *)))enodev) +#define l_noclose ((int (*) __P((struct tty *, int flags)))enodev) +#define l_noread ((int (*) __P((struct tty *, struct uio *, int)))enodev) +#define l_nowrite l_noread +#define l_norint ((int (*) __P((int c, struct tty *)))enodev) +#define l_nostart ((int (*) __P((struct tty *)))enodev) +static int +l_nullioctl(struct tty *tp, u_long cmd, caddr_t data, int flag, struct proc *p); +#endif /* !NeXT */ + +/* + * XXX it probably doesn't matter what the entries other than the l_open + * entry are here. The l_nullioctl and ttymodem entries still look fishy. + * Reconsider the removal of nullmodem anyway. It was too much like + * ttymodem, but a completely null version might be useful. + */ +#define NODISC(n) \ + { l_noopen, l_noclose, l_noread, l_nowrite, \ + l_nullioctl, l_norint, l_nostart, ttymodem } + +struct linesw linesw[MAXLDISC] = +{ + /* 0- termios */ + { ttyopen, ttylclose, ttread, ttwrite, + l_nullioctl, ttyinput, ttstart, ttymodem }, + NODISC(1), /* 1- defunct */ + /* 2- NTTYDISC */ +#ifdef COMPAT_43 + { ttyopen, ttylclose, ttread, ttwrite, + l_nullioctl, ttyinput, ttstart, ttymodem }, +#else + NODISC(2), +#endif + NODISC(3), /* TABLDISC */ + NODISC(4), /* SLIPDISC */ + NODISC(5), /* PPPDISC */ + NODISC(6), /* loadable */ + NODISC(7), /* loadable */ +}; + +int nlinesw = sizeof (linesw) / sizeof (linesw[0]); + +static struct linesw nodisc = NODISC(0); + +#define LOADABLE_LDISC 6 +/* + * ldisc_register: Register a line discipline. + * + * discipline: Index for discipline to load, or LDISC_LOAD for us to choose. + * linesw_p: Pointer to linesw_p. + * + * Returns: Index used or -1 on failure. + */ +int +ldisc_register(discipline, linesw_p) + int discipline; + struct linesw *linesw_p; +{ + int slot = -1; + + if (discipline == LDISC_LOAD) { + int i; + for (i = LOADABLE_LDISC; i < MAXLDISC; i++) + if (bcmp(linesw + i, &nodisc, sizeof(nodisc)) == 0) { + slot = i; + } + } + else if (discipline >= 0 && discipline < MAXLDISC) { + slot = discipline; + } + + if (slot != -1 && linesw_p) + linesw[slot] = *linesw_p; + + return slot; +} + +/* + * ldisc_deregister: Deregister a line discipline obtained with + * ldisc_register. Can only deregister "loadable" ones now. + * + * discipline: Index for discipline to unload. + */ +void +ldisc_deregister(discipline) + int discipline; +{ + if (discipline >= LOADABLE_LDISC && discipline < MAXLDISC) { + linesw[discipline] = nodisc; + } +} + +#ifndef NeXT +static int +l_noopen(dev, tp) + dev_t dev; + struct tty *tp; +{ + + return (ENODEV); +} + +static int +l_noclose(tp, flag) + struct tty *tp; + int flag; +{ + + return (ENODEV); +} + +int +l_noread(tp, uio, flag) + struct tty *tp; + struct uio *uio; + int flag; +{ + + return (ENODEV); +} + +int +l_nowrite(tp, uio, flag) + struct tty *tp; + struct uio *uio; + int flag; +{ + + return (ENODEV); +} + +static int +l_norint(c, tp) + int c; + struct tty *tp; +{ + + return (ENODEV); +} + +static int +l_nostart(tp) + struct tty *tp; +{ + + return (ENODEV); +} +#endif /* !NeXT */ + +/* + * Do nothing specific version of line + * discipline specific ioctl command. + */ +static int +l_nullioctl(tp, cmd, data, flags, p) + struct tty *tp; + u_long cmd; + caddr_t data; + int flags; + struct proc *p; +{ + + return (-1); +} diff --git a/bsd/kern/tty_pty.c b/bsd/kern/tty_pty.c new file mode 100644 index 000000000..f4eb1b31b --- /dev/null +++ b/bsd/kern/tty_pty.c @@ -0,0 +1,945 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tty_pty.c 8.4 (Berkeley) 2/20/95 + */ + +/* + * Pseudo-teletype Driver + * (Actually two drivers, requiring two entries in 'cdevsw') + */ +#include "pty.h" /* XXX */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef NeXT + +#define FREE_BSDSTATIC static +#else +#include + +#define FREE_BSDSTATIC __private_extern__ +#define d_open_t open_close_fcn_t +#define d_close_t open_close_fcn_t +#define d_devtotty_t struct tty ** +#define d_ioctl_t ioctl_fcn_t +#define d_read_t read_write_fcn_t +#define d_write_t read_write_fcn_t +#define d_select_t select_fcn_t +typedef void d_stop_t __P((struct tty *tp, int rw)); +#endif /* NeXT */ + +#ifdef notyet +static void ptyattach __P((int n)); +#endif +static void ptsstart __P((struct tty *tp)); +static void ptcwakeup __P((struct tty *tp, int flag)); + +FREE_BSDSTATIC d_open_t ptsopen; +FREE_BSDSTATIC d_close_t ptsclose; +FREE_BSDSTATIC d_read_t ptsread; +FREE_BSDSTATIC d_write_t ptswrite; +FREE_BSDSTATIC d_ioctl_t ptyioctl; +FREE_BSDSTATIC d_stop_t ptsstop; +FREE_BSDSTATIC d_devtotty_t ptydevtotty; +FREE_BSDSTATIC d_open_t ptcopen; +FREE_BSDSTATIC d_close_t ptcclose; +FREE_BSDSTATIC d_read_t ptcread; +FREE_BSDSTATIC d_write_t ptcwrite; +FREE_BSDSTATIC d_select_t ptcselect; + +#ifndef NeXT +#define CDEV_MAJOR_S 5 +#define CDEV_MAJOR_C 6 +static struct cdevsw pts_cdevsw = + { ptsopen, ptsclose, ptsread, ptswrite, /*5*/ + ptyioctl, ptsstop, nullreset, ptydevtotty,/* ttyp */ + ttselect, nommap, NULL, "pts", NULL, -1 }; + +static struct cdevsw ptc_cdevsw = + { ptcopen, ptcclose, ptcread, ptcwrite, /*6*/ + ptyioctl, nullstop, nullreset, ptydevtotty,/* ptyp */ + ptcselect, nommap, NULL, "ptc", NULL, -1 }; +#endif /* !NeXT */ + + +#if NPTY == 1 +#undef NPTY +#define NPTY 32 /* crude XXX */ +#warning You have only one pty defined, redefining to 32. +#endif + +#ifndef NeXT +#ifdef DEVFS +#define MAXUNITS (8 * 32) +static void *devfs_token_pts[MAXUNITS]; +static void *devfs_token_ptc[MAXUNITS]; +static const char jnames[] = "pqrsPQRS"; +#if NPTY > MAXUNITS +#undef NPTY +#define NPTY MAXUNITS +#warning Can't have more than 256 pty's with DEVFS defined. +#endif /* NPTY > MAXUNITS */ +#endif /* DEVFS */ +#endif /* !NeXT */ + +#define BUFSIZ 100 /* Chunk size iomoved to/from user */ + +/* + * pts == /dev/tty[pqrsPQRS][0123456789abcdefghijklmnopqrstuv] + * ptc == /dev/pty[pqrsPQRS][0123456789abcdefghijklmnopqrstuv] + */ +#ifndef NeXT +FREE_BSDSTATIC struct tty pt_tty[NPTY]; /* XXX */ +#else /* NeXT */ +/* NeXT All references to have been changed to indirections in the file */ +FREE_BSDSTATIC struct tty *pt_tty[NPTY] = { NULL }; +#endif /* ! NeXT */ + +static struct pt_ioctl { + int pt_flags; + struct selinfo pt_selr, pt_selw; + u_char pt_send; + u_char pt_ucntl; +} pt_ioctl[NPTY]; /* XXX */ +static int npty = NPTY; /* for pstat -t */ + +#define PF_PKT 0x08 /* packet mode */ +#define PF_STOPPED 0x10 /* user told stopped */ +#define PF_REMOTE 0x20 /* remote and flow controlled input */ +#define PF_NOSTOP 0x40 +#define PF_UCNTL 0x80 /* user control mode */ + +#ifdef notyet +/* + * Establish n (or default if n is 1) ptys in the system. + * + * XXX cdevsw & pstat require the array `pty[]' to be an array + */ +FREEBSD_STATIC void +ptyattach(n) + int n; +{ + char *mem; + register u_long ntb; +#define DEFAULT_NPTY 32 + + /* maybe should allow 0 => none? */ + if (n <= 1) + n = DEFAULT_NPTY; + ntb = n * sizeof(struct tty); +#ifndef NeXT + mem = malloc(ntb + ALIGNBYTES + n * sizeof(struct pt_ioctl), + M_DEVBUF, M_WAITOK); +#else + MALLOC(mem, char *, ntb + ALIGNBYTES + n * sizeof(struct pt_ioctl), + M_DEVBUF, M_WAITOK); +#endif /* !NeXT */ + pt_tty = (struct tty *)mem; + mem = (char *)ALIGN(mem + ntb); + pt_ioctl = (struct pt_ioctl *)mem; + npty = n; +} +#endif + +#ifndef DEVFS +int pty_init() +{ + return 0; +} +#else +#include +#define START_CHAR 'p' +#define HEX_BASE 16 +int pty_init(int n_ptys) +{ + int i; + int j; + + /* create the pseudo tty device nodes */ + for (j = 0; j < 10; j++) { + for (i = 0; i < HEX_BASE; i++) { + int m = j * HEX_BASE + i; + if (m == n_ptys) + goto done; + (void)devfs_make_node(makedev(4, m), + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, + "tty%c%x", j + START_CHAR, i); + (void)devfs_make_node(makedev(5, m), + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, + "pty%c%x", j + START_CHAR, i); + } + } + done: + return (0); +} +#endif DEVFS + +/*ARGSUSED*/ +FREE_BSDSTATIC int +ptsopen(dev, flag, devtype, p) + dev_t dev; + int flag, devtype; + struct proc *p; +{ + register struct tty *tp; + int error; + +#ifndef NeXT + tp = &pt_tty[minor(dev)]; +#else + /* + * You will see this sourt of code coming up in diffs later both + * the ttymalloc and the tp indirection. + */ + if (minor(dev) >= npty) + return (ENXIO); + if (!pt_tty[minor(dev)]) { + tp = pt_tty[minor(dev)] = ttymalloc(); + } else + tp = pt_tty[minor(dev)]; +#endif + if ((tp->t_state & TS_ISOPEN) == 0) { + ttychars(tp); /* Set up default chars */ + tp->t_iflag = TTYDEF_IFLAG; + tp->t_oflag = TTYDEF_OFLAG; + tp->t_lflag = TTYDEF_LFLAG; + tp->t_cflag = TTYDEF_CFLAG; + tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; + ttsetwater(tp); /* would be done in xxparam() */ + } else if (tp->t_state&TS_XCLUDE && p->p_ucred->cr_uid != 0) + return (EBUSY); + if (tp->t_oproc) /* Ctrlr still around. */ + (void)(*linesw[tp->t_line].l_modem)(tp, 1); + while ((tp->t_state & TS_CARR_ON) == 0) { + if (flag&FNONBLOCK) + break; + error = ttysleep(tp, TSA_CARR_ON(tp), TTIPRI | PCATCH, + "ptsopn", 0); + if (error) + return (error); + } + error = (*linesw[tp->t_line].l_open)(dev, tp); + if (error == 0) + ptcwakeup(tp, FREAD|FWRITE); + return (error); +} + +FREE_BSDSTATIC int +ptsclose(dev, flag, mode, p) + dev_t dev; + int flag, mode; + struct proc *p; +{ + register struct tty *tp; + int err; + + tp = pt_tty[minor(dev)]; + err = (*linesw[tp->t_line].l_close)(tp, flag); + ptsstop(tp, FREAD|FWRITE); + (void) ttyclose(tp); + return (err); +} + +FREE_BSDSTATIC int +ptsread(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ +#ifndef NeXT + struct proc *p = curproc; +#else + struct proc *p = current_proc(); +#endif /* NeXT */ + register struct tty *tp = pt_tty[minor(dev)]; + register struct pt_ioctl *pti = &pt_ioctl[minor(dev)]; + int error = 0; + +again: + if (pti->pt_flags & PF_REMOTE) { + while (isbackground(p, tp)) { + if ((p->p_sigignore & sigmask(SIGTTIN)) || + (p->p_sigmask & sigmask(SIGTTIN)) || + p->p_pgrp->pg_jobc == 0 || + p->p_flag & P_PPWAIT) + return (EIO); + pgsignal(p->p_pgrp, SIGTTIN, 1); + error = ttysleep(tp, &lbolt, TTIPRI | PCATCH | PTTYBLOCK, "ptsbg", + 0); + if (error) + return (error); + } + if (tp->t_canq.c_cc == 0) { + if (flag & IO_NDELAY) + return (EWOULDBLOCK); + error = ttysleep(tp, TSA_PTS_READ(tp), TTIPRI | PCATCH, + "ptsin", 0); + if (error) + return (error); + goto again; + } + while (tp->t_canq.c_cc > 1 && uio->uio_resid > 0) + if (ureadc(getc(&tp->t_canq), uio) < 0) { + error = EFAULT; + break; + } + if (tp->t_canq.c_cc == 1) + (void) getc(&tp->t_canq); + if (tp->t_canq.c_cc) + return (error); + } else + if (tp->t_oproc) + error = (*linesw[tp->t_line].l_read)(tp, uio, flag); + ptcwakeup(tp, FWRITE); + return (error); +} + +/* + * Write to pseudo-tty. + * Wakeups of controlling tty will happen + * indirectly, when tty driver calls ptsstart. + */ +FREE_BSDSTATIC int +ptswrite(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + register struct tty *tp; + + tp = pt_tty[minor(dev)]; + if (tp->t_oproc == 0) + return (EIO); + return ((*linesw[tp->t_line].l_write)(tp, uio, flag)); +} + +/* + * Start output on pseudo-tty. + * Wake up process selecting or sleeping for input from controlling tty. + */ +static void +ptsstart(tp) + struct tty *tp; +{ + register struct pt_ioctl *pti = &pt_ioctl[minor(tp->t_dev)]; + + if (tp->t_state & TS_TTSTOP) + return; + if (pti->pt_flags & PF_STOPPED) { + pti->pt_flags &= ~PF_STOPPED; + pti->pt_send = TIOCPKT_START; + } + ptcwakeup(tp, FREAD); +} + +static void +ptcwakeup(tp, flag) + struct tty *tp; + int flag; +{ + struct pt_ioctl *pti = &pt_ioctl[minor(tp->t_dev)]; + + if (flag & FREAD) { + selwakeup(&pti->pt_selr); + wakeup(TSA_PTC_READ(tp)); + } + if (flag & FWRITE) { + selwakeup(&pti->pt_selw); + wakeup(TSA_PTC_WRITE(tp)); + } +} + +FREE_BSDSTATIC int +ptcopen(dev, flag, devtype, p) + dev_t dev; + int flag, devtype; + struct proc *p; +{ + register struct tty *tp; + struct pt_ioctl *pti; + + if (minor(dev) >= npty) + return (ENXIO); + if(!pt_tty[minor(dev)]) { + tp = pt_tty[minor(dev)] = ttymalloc(); + } else + tp = pt_tty[minor(dev)]; + if (tp->t_oproc) + return (EIO); + tp->t_oproc = ptsstart; +#ifdef sun4c + tp->t_stop = ptsstop; +#endif + (void)(*linesw[tp->t_line].l_modem)(tp, 1); + tp->t_lflag &= ~EXTPROC; + pti = &pt_ioctl[minor(dev)]; + pti->pt_flags = 0; + pti->pt_send = 0; + pti->pt_ucntl = 0; + return (0); +} + +FREE_BSDSTATIC int +ptcclose(dev, flags, fmt, p) + dev_t dev; + int flags; + int fmt; + struct proc *p; +{ + register struct tty *tp; + + tp = pt_tty[minor(dev)]; + (void)(*linesw[tp->t_line].l_modem)(tp, 0); + + /* + * XXX MDMBUF makes no sense for ptys but would inhibit the above + * l_modem(). CLOCAL makes sense but isn't supported. Special + * l_modem()s that ignore carrier drop make no sense for ptys but + * may be in use because other parts of the line discipline make + * sense for ptys. Recover by doing everything that a normal + * ttymodem() would have done except for sending a SIGHUP. + */ + if (tp->t_state & TS_ISOPEN) { + tp->t_state &= ~(TS_CARR_ON | TS_CONNECTED); + tp->t_state |= TS_ZOMBIE; + ttyflush(tp, FREAD | FWRITE); + } + + tp->t_oproc = 0; /* mark closed */ + return (0); +} + +FREE_BSDSTATIC int +ptcread(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + register struct tty *tp = pt_tty[minor(dev)]; + struct pt_ioctl *pti = &pt_ioctl[minor(dev)]; + char buf[BUFSIZ]; + int error = 0, cc; + + /* + * We want to block until the slave + * is open, and there's something to read; + * but if we lost the slave or we're NBIO, + * then return the appropriate error instead. + */ + for (;;) { + if (tp->t_state&TS_ISOPEN) { + if (pti->pt_flags&PF_PKT && pti->pt_send) { + error = ureadc((int)pti->pt_send, uio); + if (error) + return (error); + if (pti->pt_send & TIOCPKT_IOCTL) { + cc = min(uio->uio_resid, + sizeof(tp->t_termios)); + uiomove((caddr_t)&tp->t_termios, cc, + uio); + } + pti->pt_send = 0; + return (0); + } + if (pti->pt_flags&PF_UCNTL && pti->pt_ucntl) { + error = ureadc((int)pti->pt_ucntl, uio); + if (error) + return (error); + pti->pt_ucntl = 0; + return (0); + } + if (tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) + break; + } + if ((tp->t_state & TS_CONNECTED) == 0) + return (0); /* EOF */ + if (flag & IO_NDELAY) + return (EWOULDBLOCK); + error = tsleep(TSA_PTC_READ(tp), TTIPRI | PCATCH, "ptcin", 0); + if (error) + return (error); + } + if (pti->pt_flags & (PF_PKT|PF_UCNTL)) + error = ureadc(0, uio); + while (uio->uio_resid > 0 && error == 0) { + cc = q_to_b(&tp->t_outq, buf, min(uio->uio_resid, BUFSIZ)); + if (cc <= 0) + break; + error = uiomove(buf, cc, uio); + } + ttwwakeup(tp); + return (error); +} + +FREE_BSDSTATIC void +ptsstop(tp, flush) + register struct tty *tp; + int flush; +{ + struct pt_ioctl *pti = &pt_ioctl[minor(tp->t_dev)]; + int flag; + + /* note: FLUSHREAD and FLUSHWRITE already ok */ + if (flush == 0) { + flush = TIOCPKT_STOP; + pti->pt_flags |= PF_STOPPED; + } else + pti->pt_flags &= ~PF_STOPPED; + pti->pt_send |= flush; + /* change of perspective */ + flag = 0; + if (flush & FREAD) + flag |= FWRITE; + if (flush & FWRITE) + flag |= FREAD; + ptcwakeup(tp, flag); +} + +FREE_BSDSTATIC int +ptcselect(dev, rw, p) + dev_t dev; + int rw; + struct proc *p; +{ + register struct tty *tp = pt_tty[minor(dev)]; + struct pt_ioctl *pti = &pt_ioctl[minor(dev)]; + int s; + + if ((tp->t_state & TS_CONNECTED) == 0) + return (1); + switch (rw) { + + case FREAD: + /* + * Need to block timeouts (ttrstart). + */ + s = spltty(); + if ((tp->t_state&TS_ISOPEN) && + tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) { + splx(s); + return (1); + } + splx(s); + /* FALLTHROUGH */ + + case 0: /* exceptional */ + if ((tp->t_state&TS_ISOPEN) && + ((pti->pt_flags&PF_PKT && pti->pt_send) || + (pti->pt_flags&PF_UCNTL && pti->pt_ucntl))) + return (1); + selrecord(p, &pti->pt_selr); + break; + + + case FWRITE: + if (tp->t_state&TS_ISOPEN) { + if (pti->pt_flags & PF_REMOTE) { + if (tp->t_canq.c_cc == 0) + return (1); + } else { + if (tp->t_rawq.c_cc + tp->t_canq.c_cc < TTYHOG-2) + return (1); + if (tp->t_canq.c_cc == 0 && (tp->t_iflag&ICANON)) + return (1); + } + } + selrecord(p, &pti->pt_selw); + break; + + } + return (0); +} + +FREE_BSDSTATIC int +ptcwrite(dev, uio, flag) + dev_t dev; + register struct uio *uio; + int flag; +{ + register struct tty *tp = pt_tty[minor(dev)]; + register u_char *cp = NULL; + register int cc = 0; + u_char locbuf[BUFSIZ]; + int cnt = 0; + struct pt_ioctl *pti = &pt_ioctl[minor(dev)]; + int error = 0; + +again: + if ((tp->t_state&TS_ISOPEN) == 0) + goto block; + if (pti->pt_flags & PF_REMOTE) { + if (tp->t_canq.c_cc) + goto block; + while ((uio->uio_resid > 0 || cc > 0) && + tp->t_canq.c_cc < TTYHOG - 1) { + if (cc == 0) { + cc = min(uio->uio_resid, BUFSIZ); + cc = min(cc, TTYHOG - 1 - tp->t_canq.c_cc); + cp = locbuf; + error = uiomove((caddr_t)cp, cc, uio); + if (error) + return (error); + /* check again for safety */ + if ((tp->t_state & TS_ISOPEN) == 0) { + /* adjust as usual */ + uio->uio_resid += cc; + return (EIO); + } + } + if (cc > 0) { + cc = b_to_q((char *)cp, cc, &tp->t_canq); + /* + * XXX we don't guarantee that the canq size + * is >= TTYHOG, so the above b_to_q() may + * leave some bytes uncopied. However, space + * is guaranteed for the null terminator if + * we don't fail here since (TTYHOG - 1) is + * not a multiple of CBSIZE. + */ + if (cc > 0) + break; + } + } + /* adjust for data copied in but not written */ + uio->uio_resid += cc; + (void) putc(0, &tp->t_canq); + ttwakeup(tp); + wakeup(TSA_PTS_READ(tp)); + return (0); + } + while (uio->uio_resid > 0 || cc > 0) { + if (cc == 0) { + cc = min(uio->uio_resid, BUFSIZ); + cp = locbuf; + error = uiomove((caddr_t)cp, cc, uio); + if (error) + return (error); + /* check again for safety */ + if ((tp->t_state & TS_ISOPEN) == 0) { + /* adjust for data copied in but not written */ + uio->uio_resid += cc; + return (EIO); + } + } + while (cc > 0) { + if ((tp->t_rawq.c_cc + tp->t_canq.c_cc) >= TTYHOG - 2 && + (tp->t_canq.c_cc > 0 || !(tp->t_iflag&ICANON))) { + wakeup(TSA_HUP_OR_INPUT(tp)); + goto block; + } + (*linesw[tp->t_line].l_rint)(*cp++, tp); + cnt++; + cc--; + } + cc = 0; + } + return (0); +block: + /* + * Come here to wait for slave to open, for space + * in outq, or space in rawq, or an empty canq. + */ + if ((tp->t_state & TS_CONNECTED) == 0) { + /* adjust for data copied in but not written */ + uio->uio_resid += cc; + return (EIO); + } + if (flag & IO_NDELAY) { + /* adjust for data copied in but not written */ + uio->uio_resid += cc; + if (cnt == 0) + return (EWOULDBLOCK); + return (0); + } + error = tsleep(TSA_PTC_WRITE(tp), TTOPRI | PCATCH, "ptcout", 0); + if (error) { + /* adjust for data copied in but not written */ + uio->uio_resid += cc; + return (error); + } + goto again; +} + +#ifndef NeXT +/* XXX we eventually want to go to this model, + * but premier can't change the cdevsw */ +static struct tty * +ptydevtotty(dev) + dev_t dev; +{ + if (minor(dev) >= npty) + return (NULL); + + return &pt_tty[minor(dev)]; +} +#endif /* !NeXT */ + +/*ARGSUSED*/ +FREE_BSDSTATIC int +#ifndef NeXT +ptyioctl(dev, cmd, data, flag) + dev_t dev; + int cmd; + caddr_t data; + int flag; +#else +ptyioctl(dev, cmd, data, flag, p) + dev_t dev; + u_long cmd; + caddr_t data; + int flag; + struct proc *p; +#endif +{ + register struct tty *tp = pt_tty[minor(dev)]; + register struct pt_ioctl *pti = &pt_ioctl[minor(dev)]; + register u_char *cc = tp->t_cc; + int stop, error; + + /* + * IF CONTROLLER STTY THEN MUST FLUSH TO PREVENT A HANG. + * ttywflush(tp) will hang if there are characters in the outq. + */ + if (cmd == TIOCEXT) { + /* + * When the EXTPROC bit is being toggled, we need + * to send an TIOCPKT_IOCTL if the packet driver + * is turned on. + */ + if (*(int *)data) { + if (pti->pt_flags & PF_PKT) { + pti->pt_send |= TIOCPKT_IOCTL; + ptcwakeup(tp, FREAD); + } + tp->t_lflag |= EXTPROC; + } else { + if ((tp->t_lflag & EXTPROC) && + (pti->pt_flags & PF_PKT)) { + pti->pt_send |= TIOCPKT_IOCTL; + ptcwakeup(tp, FREAD); + } + tp->t_lflag &= ~EXTPROC; + } + return(0); + } else +#ifndef NeXT + if (cdevsw[major(dev)]->d_open == ptcopen) +#else + if (cdevsw[major(dev)].d_open == ptcopen) +#endif + switch (cmd) { + + case TIOCGPGRP: + /* + * We aviod calling ttioctl on the controller since, + * in that case, tp must be the controlling terminal. + */ + *(int *)data = tp->t_pgrp ? tp->t_pgrp->pg_id : 0; + return (0); + + case TIOCPKT: + if (*(int *)data) { + if (pti->pt_flags & PF_UCNTL) + return (EINVAL); + pti->pt_flags |= PF_PKT; + } else + pti->pt_flags &= ~PF_PKT; + return (0); + + case TIOCUCNTL: + if (*(int *)data) { + if (pti->pt_flags & PF_PKT) + return (EINVAL); + pti->pt_flags |= PF_UCNTL; + } else + pti->pt_flags &= ~PF_UCNTL; + return (0); + + case TIOCREMOTE: + if (*(int *)data) + pti->pt_flags |= PF_REMOTE; + else + pti->pt_flags &= ~PF_REMOTE; + ttyflush(tp, FREAD|FWRITE); + return (0); + +#ifdef COMPAT_43 + case TIOCSETP: + case TIOCSETN: +#endif + case TIOCSETD: + case TIOCSETA: + case TIOCSETAW: + case TIOCSETAF: + ndflush(&tp->t_outq, tp->t_outq.c_cc); + break; + + case TIOCSIG: + if (*(unsigned int *)data >= NSIG || + *(unsigned int *)data == 0) + return(EINVAL); + if ((tp->t_lflag&NOFLSH) == 0) + ttyflush(tp, FREAD|FWRITE); + pgsignal(tp->t_pgrp, *(unsigned int *)data, 1); + if ((*(unsigned int *)data == SIGINFO) && + ((tp->t_lflag&NOKERNINFO) == 0)) + ttyinfo(tp); + return(0); + } + error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p); + if (error < 0) + error = ttioctl(tp, cmd, data, flag, p); + if (error < 0) { + if (pti->pt_flags & PF_UCNTL && + (cmd & ~0xff) == UIOCCMD(0)) { + if (cmd & 0xff) { + pti->pt_ucntl = (u_char)cmd; + ptcwakeup(tp, FREAD); + } + return (0); + } + error = ENOTTY; + } + /* + * If external processing and packet mode send ioctl packet. + */ + if ((tp->t_lflag&EXTPROC) && (pti->pt_flags & PF_PKT)) { + switch(cmd) { + case TIOCSETA: + case TIOCSETAW: + case TIOCSETAF: +#ifdef COMPAT_43 + case TIOCSETP: + case TIOCSETN: +#endif +#if defined(COMPAT_43) || defined(COMPAT_SUNOS) + case TIOCSETC: + case TIOCSLTC: + case TIOCLBIS: + case TIOCLBIC: + case TIOCLSET: +#endif + pti->pt_send |= TIOCPKT_IOCTL; + ptcwakeup(tp, FREAD); + default: + break; + } + } + stop = (tp->t_iflag & IXON) && CCEQ(cc[VSTOP], CTRL('s')) + && CCEQ(cc[VSTART], CTRL('q')); + if (pti->pt_flags & PF_NOSTOP) { + if (stop) { + pti->pt_send &= ~TIOCPKT_NOSTOP; + pti->pt_send |= TIOCPKT_DOSTOP; + pti->pt_flags &= ~PF_NOSTOP; + ptcwakeup(tp, FREAD); + } + } else { + if (!stop) { + pti->pt_send &= ~TIOCPKT_DOSTOP; + pti->pt_send |= TIOCPKT_NOSTOP; + pti->pt_flags |= PF_NOSTOP; + ptcwakeup(tp, FREAD); + } + } + return (error); +} + +#ifndef NeXT +static ptc_devsw_installed = 0; + +static void +ptc_drvinit(void *unused) +{ +#ifdef DEVFS + int i,j,k; +#endif + dev_t dev; + + if( ! ptc_devsw_installed ) { + dev = makedev(CDEV_MAJOR_S, 0); + cdevsw_add(&dev, &pts_cdevsw, NULL); + dev = makedev(CDEV_MAJOR_C, 0); + cdevsw_add(&dev, &ptc_cdevsw, NULL); + ptc_devsw_installed = 1; +#ifdef DEVFS + for ( i = 0 ; i +#include +#include +#include +#include +#include + +#include + +/* + * At compile time, choose: + * There are two ways the TTY_QUOTE bit can be stored. If QBITS is + * defined we allocate an array of bits -- 1/8th as much memory but + * setbit(), clrbit(), and isset() take more cpu. If QBITS is + * undefined, we just use an array of bytes. + * + * If TTY_QUOTE functionality isn't required by a line discipline, + * it can free c_cq and set it to NULL. This speeds things up, + * and also does not use any extra memory. This is useful for (say) + * a SLIP line discipline that wants a 32K ring buffer for data + * but doesn't need quoting. + */ +#define QBITS + +#ifdef QBITS +#define QMEM(n) ((((n)-1)/NBBY)+1) +#else +#define QMEM(n) (n) +#endif + + +/* + * Initialize clists. + */ +void +cinit() +{ +} + +/* + * Initialize a particular clist. Ok, they are really ring buffers, + * of the specified length, with/without quoting support. + */ +int +clalloc(clp, size, quot) + struct clist *clp; + int size; + int quot; +{ + + MALLOC(clp->c_cs, u_char *, size, M_TTYS, M_WAITOK); + if (!clp->c_cs) + return (-1); + bzero(clp->c_cs, size); + + if(quot) { + MALLOC(clp->c_cq, u_char *, QMEM(size), M_TTYS, M_WAITOK); + if (!clp->c_cq) { + FREE(clp->c_cs, M_TTYS); + return (-1); + } + bzero(clp->c_cs, QMEM(size)); + } else + clp->c_cq = (u_char *)0; + + clp->c_cf = clp->c_cl = (u_char *)0; + clp->c_ce = clp->c_cs + size; + clp->c_cn = size; + clp->c_cc = 0; + return (0); +} + +void +clfree(clp) + struct clist *clp; +{ + if(clp->c_cs) + FREE(clp->c_cs, M_TTYS); + if(clp->c_cq) + FREE(clp->c_cq, M_TTYS); + clp->c_cs = clp->c_cq = (u_char *)0; +} + + +/* + * Get a character from a clist. + */ +int +getc(clp) + struct clist *clp; +{ + register int c = -1; + int s; + + s = spltty(); + if (clp->c_cc == 0) + goto out; + + c = *clp->c_cf & 0xff; + if (clp->c_cq) { +#ifdef QBITS + if (isset(clp->c_cq, clp->c_cf - clp->c_cs) ) + c |= TTY_QUOTE; +#else + if (*(clp->c_cf - clp->c_cs + clp->c_cq)) + c |= TTY_QUOTE; +#endif + } + if (++clp->c_cf == clp->c_ce) + clp->c_cf = clp->c_cs; + if (--clp->c_cc == 0) + clp->c_cf = clp->c_cl = (u_char *)0; +out: + splx(s); + return c; +} + +/* + * Copy clist to buffer. + * Return number of bytes moved. + */ +int +q_to_b(clp, cp, count) + struct clist *clp; + u_char *cp; + int count; +{ + register int cc; + u_char *p = cp; + int s; + + s = spltty(); + /* optimize this while loop */ + while (count > 0 && clp->c_cc > 0) { + cc = clp->c_cl - clp->c_cf; + if (clp->c_cf >= clp->c_cl) + cc = clp->c_ce - clp->c_cf; + if (cc > count) + cc = count; + bcopy(clp->c_cf, p, cc); + count -= cc; + p += cc; + clp->c_cc -= cc; + clp->c_cf += cc; + if (clp->c_cf == clp->c_ce) + clp->c_cf = clp->c_cs; + } + if (clp->c_cc == 0) + clp->c_cf = clp->c_cl = (u_char *)0; + splx(s); + return p - cp; +} + +/* + * Return count of contiguous characters in clist. + * Stop counting if flag&character is non-null. + */ +int +ndqb(clp, flag) + struct clist *clp; + int flag; +{ + int count = 0; + register int i; + register int cc; + int s; + + s = spltty(); + if ((cc = clp->c_cc) == 0) + goto out; + + if (flag == 0) { + count = clp->c_cl - clp->c_cf; + if (count <= 0) + count = clp->c_ce - clp->c_cf; + goto out; + } + + i = clp->c_cf - clp->c_cs; + if (flag & TTY_QUOTE) { + while (cc-- > 0 && !(clp->c_cs[i++] & (flag & ~TTY_QUOTE) || + isset(clp->c_cq, i))) { + count++; + if (i == clp->c_cn) + break; + } + } else { + while (cc-- > 0 && !(clp->c_cs[i++] & flag)) { + count++; + if (i == clp->c_cn) + break; + } + } +out: + splx(s); + return count; +} + +/* + * Flush count bytes from clist. + */ +void +ndflush(clp, count) + struct clist *clp; + int count; +{ + register int cc; + int s; + + s = spltty(); + if (count == clp->c_cc) { + clp->c_cc = 0; + clp->c_cf = clp->c_cl = (u_char *)0; + goto out; + } + /* optimize this while loop */ + while (count > 0 && clp->c_cc > 0) { + cc = clp->c_cl - clp->c_cf; + if (clp->c_cf >= clp->c_cl) + cc = clp->c_ce - clp->c_cf; + if (cc > count) + cc = count; + count -= cc; + clp->c_cc -= cc; + clp->c_cf += cc; + if (clp->c_cf == clp->c_ce) + clp->c_cf = clp->c_cs; + } + if (clp->c_cc == 0) + clp->c_cf = clp->c_cl = (u_char *)0; +out: + splx(s); +} + +/* + * Put a character into the output queue. + */ +int +putc(c, clp) + int c; + struct clist *clp; +{ + register int i; + int s; + + s = spltty(); + if (clp->c_cc == 0) { + if (!clp->c_cs) { +#if DIAGNOSTIC + //printf("putc: required clalloc\n"); +#endif + if(clalloc(clp, 1024, 1)) { +out: + splx(s); + return -1; + } + } + clp->c_cf = clp->c_cl = clp->c_cs; + } + + if (clp->c_cc == clp->c_cn) + goto out; + + *clp->c_cl = c & 0xff; + i = clp->c_cl - clp->c_cs; + if (clp->c_cq) { +#ifdef QBITS + if (c & TTY_QUOTE) + setbit(clp->c_cq, i); + else + clrbit(clp->c_cq, i); +#else + q = clp->c_cq + i; + *q = (c & TTY_QUOTE) ? 1 : 0; +#endif + } + clp->c_cc++; + clp->c_cl++; + if (clp->c_cl == clp->c_ce) + clp->c_cl = clp->c_cs; + splx(s); + return 0; +} + +#ifdef QBITS +/* + * optimized version of + * + * for (i = 0; i < len; i++) + * clrbit(cp, off + len); + */ +void +clrbits(cp, off, len) + u_char *cp; + int off; + int len; +{ + int sby, sbi, eby, ebi; + register int i; + u_char mask; + + if(len==1) { + clrbit(cp, off); + return; + } + + sby = off / NBBY; + sbi = off % NBBY; + eby = (off+len) / NBBY; + ebi = (off+len) % NBBY; + if (sby == eby) { + mask = ((1 << (ebi - sbi)) - 1) << sbi; + cp[sby] &= ~mask; + } else { + mask = (1<c_cc == 0) { + if (!clp->c_cs) { +#if DIAGNOSTIC + printf("b_to_q: required clalloc\n"); +#endif + if(clalloc(clp, 1024, 1)) + goto out; + } + clp->c_cf = clp->c_cl = clp->c_cs; + } + + if (clp->c_cc == clp->c_cn) + goto out; + + /* optimize this while loop */ + while (count > 0 && clp->c_cc < clp->c_cn) { + cc = clp->c_ce - clp->c_cl; + if (clp->c_cf > clp->c_cl) + cc = clp->c_cf - clp->c_cl; + if (cc > count) + cc = count; + bcopy(p, clp->c_cl, cc); + if (clp->c_cq) { +#ifdef QBITS + clrbits(clp->c_cq, clp->c_cl - clp->c_cs, cc); +#else + bzero(clp->c_cl - clp->c_cs + clp->c_cq, cc); +#endif + } + p += cc; + count -= cc; + clp->c_cc += cc; + clp->c_cl += cc; + if (clp->c_cl == clp->c_ce) + clp->c_cl = clp->c_cs; + } +out: + splx(s); + return count; +} + +static int cc; + +/* + * Given a non-NULL pointer into the clist return the pointer + * to the next character in the list or return NULL if no more chars. + * + * Callers must not allow getc's to happen between firstc's and getc's + * so that the pointer becomes invalid. Note that interrupts are NOT + * masked. + */ +u_char * +nextc(clp, cp, c) + struct clist *clp; + register u_char *cp; + int *c; +{ + + if (clp->c_cf == cp) { + /* + * First time initialization. + */ + cc = clp->c_cc; + } + if (cc == 0 || cp == NULL) + return NULL; + if (--cc == 0) + return NULL; + if (++cp == clp->c_ce) + cp = clp->c_cs; + *c = *cp & 0xff; + if (clp->c_cq) { +#ifdef QBITS + if (isset(clp->c_cq, cp - clp->c_cs)) + *c |= TTY_QUOTE; +#else + if (*(clp->c_cf - clp->c_cs + clp->c_cq)) + *c |= TTY_QUOTE; +#endif + } + return cp; +} + +/* + * Given a non-NULL pointer into the clist return the pointer + * to the first character in the list or return NULL if no more chars. + * + * Callers must not allow getc's to happen between firstc's and getc's + * so that the pointer becomes invalid. Note that interrupts are NOT + * masked. + * + * *c is set to the NEXT character + */ +u_char * +firstc(clp, c) + struct clist *clp; + int *c; +{ + register u_char *cp; + + cc = clp->c_cc; + if (cc == 0) + return NULL; + cp = clp->c_cf; + *c = *cp & 0xff; + if(clp->c_cq) { +#ifdef QBITS + if (isset(clp->c_cq, cp - clp->c_cs)) + *c |= TTY_QUOTE; +#else + if (*(cp - clp->c_cs + clp->c_cq)) + *c |= TTY_QUOTE; +#endif + } + return clp->c_cf; +} + +/* + * Remove the last character in the clist and return it. + */ +int +unputc(clp) + struct clist *clp; +{ + unsigned int c = -1; + int s; + + s = spltty(); + if (clp->c_cc == 0) + goto out; + + if (clp->c_cl == clp->c_cs) + clp->c_cl = clp->c_ce - 1; + else + --clp->c_cl; + clp->c_cc--; + + c = *clp->c_cl & 0xff; + if (clp->c_cq) { +#ifdef QBITS + if (isset(clp->c_cq, clp->c_cl - clp->c_cs)) + c |= TTY_QUOTE; +#else + if (*(clp->c_cf - clp->c_cs + clp->c_cq)) + c |= TTY_QUOTE; +#endif + } + if (clp->c_cc == 0) + clp->c_cf = clp->c_cl = (u_char *)0; +out: + splx(s); + return c; +} + +/* + * Put the chars in the from queue on the end of the to queue. + */ +void +catq(from, to) + struct clist *from, *to; +{ + int c; + + while ((c = getc(from)) != -1) + putc(c, to); +} + +#endif /* NeXT */ diff --git a/bsd/kern/tty_tb.c b/bsd/kern/tty_tb.c new file mode 100644 index 000000000..c04197e24 --- /dev/null +++ b/bsd/kern/tty_tb.c @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * @(#)tty_tb.c 8.1 (Berkeley) 6/10/93 + */ + +#include "tb.h" +#if NTB > 0 + +/* + * Line discipline for RS232 tablets; + * supplies binary coordinate data. + */ +#include +#include +#include +#if NeXT +#include +#endif + +/* + * Tablet configuration table. + */ +struct tbconf { + short tbc_recsize; /* input record size in bytes */ + short tbc_uiosize; /* size of data record returned user */ + int tbc_sync; /* mask for finding sync byte/bit */ + int (*tbc_decode)();/* decoding routine */ + char *tbc_run; /* enter run mode sequence */ + char *tbc_point; /* enter point mode sequence */ + char *tbc_stop; /* stop sequence */ + char *tbc_start; /* start/restart sequence */ + int tbc_flags; +#define TBF_POL 0x1 /* polhemus hack */ +#define TBF_INPROX 0x2 /* tablet has proximity info */ +}; + +static int tbdecode(), gtcodecode(), poldecode(); +static int tblresdecode(), tbhresdecode(); + +struct tbconf tbconf[TBTYPE] = { +{ 0 }, +{ 5, sizeof (struct tbpos), 0200, tbdecode, "6", "4" }, +{ 5, sizeof (struct tbpos), 0200, tbdecode, "\1CN", "\1RT", "\2", "\4" }, +{ 8, sizeof (struct gtcopos), 0200, gtcodecode }, +{17, sizeof (struct polpos), 0200, poldecode, 0, 0, "\21", "\5\22\2\23", + TBF_POL }, +{ 5, sizeof (struct tbpos), 0100, tblresdecode, "\1CN", "\1PT", "\2", "\4", + TBF_INPROX }, +{ 6, sizeof (struct tbpos), 0200, tbhresdecode, "\1CN", "\1PT", "\2", "\4", + TBF_INPROX }, +{ 5, sizeof (struct tbpos), 0100, tblresdecode, "\1CL\33", "\1PT\33", 0, 0}, +{ 6, sizeof (struct tbpos), 0200, tbhresdecode, "\1CL\33", "\1PT\33", 0, 0}, +}; + +/* + * Tablet state + */ +struct tb { + int tbflags; /* mode & type bits */ +#define TBMAXREC 17 /* max input record size */ + char cbuf[TBMAXREC]; /* input buffer */ + union { + struct tbpos tbpos; + struct gtcopos gtcopos; + struct polpos polpos; + } rets; /* processed state */ +#define NTBS 16 +} tb[NTBS]; + +/* + * Open as tablet discipline; called on discipline change. + */ +/*ARGSUSED*/ +tbopen(dev, tp) + dev_t dev; + register struct tty *tp; +{ + register struct tb *tbp; + + if (tp->t_line == TABLDISC) + return (ENODEV); + ttywflush(tp); + for (tbp = tb; tbp < &tb[NTBS]; tbp++) + if (tbp->tbflags == 0) + break; + if (tbp >= &tb[NTBS]) + return (EBUSY); + tbp->tbflags = TBTIGER|TBPOINT; /* default */ + tp->t_cp = tbp->cbuf; + tp->t_inbuf = 0; + bzero((caddr_t)&tbp->rets, sizeof (tbp->rets)); + tp->T_LINEP = (caddr_t)tbp; + tp->t_flags |= LITOUT; + return (0); +} + +/* + * Line discipline change or last device close. + */ +tbclose(tp) + register struct tty *tp; +{ + register int s; + int modebits = TBPOINT|TBSTOP; + +#ifndef NeXT + tbioctl(tp, BIOSMODE, &modebits, 0); +#else + tbioctl(tp, BIOSMODE, &modebits, 0, current_proc()); +#endif + s = spltty(); + ((struct tb *)tp->T_LINEP)->tbflags = 0; + tp->t_cp = 0; + tp->t_inbuf = 0; + tp->t_rawq.c_cc = 0; /* clear queues -- paranoid */ + tp->t_canq.c_cc = 0; + tp->t_line = 0; /* paranoid: avoid races */ + splx(s); +} + +/* + * Read from a tablet line. + * Characters have been buffered in a buffer and decoded. + */ +tbread(tp, uio) + register struct tty *tp; + struct uio *uio; +{ + register struct tb *tbp = (struct tb *)tp->T_LINEP; + register struct tbconf *tc = &tbconf[tbp->tbflags & TBTYPE]; + int ret; + + if ((tp->t_state&TS_CARR_ON) == 0) + return (EIO); + ret = uiomove(&tbp->rets, tc->tbc_uiosize, uio); + if (tc->tbc_flags&TBF_POL) + tbp->rets.polpos.p_key = ' '; + return (ret); +} + +/* + * Low level character input routine. + * Stuff the character in the buffer, and decode + * if all the chars are there. + * + * This routine could be expanded in-line in the receiver + * interrupt routine to make it run as fast as possible. + */ +tbinput(c, tp) + register int c; + register struct tty *tp; +{ + register struct tb *tbp = (struct tb *)tp->T_LINEP; + register struct tbconf *tc = &tbconf[tbp->tbflags & TBTYPE]; + + if (tc->tbc_recsize == 0 || tc->tbc_decode == 0) /* paranoid? */ + return; + /* + * Locate sync bit/byte or reset input buffer. + */ + if (c&tc->tbc_sync || tp->t_inbuf == tc->tbc_recsize) { + tp->t_cp = tbp->cbuf; + tp->t_inbuf = 0; + } + *tp->t_cp++ = c&0177; + /* + * Call decode routine only if a full record has been collected. + */ + if (++tp->t_inbuf == tc->tbc_recsize) + (*tc->tbc_decode)(tc, tbp->cbuf, &tbp->rets); +} + +/* + * Decode GTCO 8 byte format (high res, tilt, and pressure). + */ +static +gtcodecode(tc, cp, tbpos) + struct tbconf *tc; + register char *cp; + register struct gtcopos *tbpos; +{ + + tbpos->pressure = *cp >> 2; + tbpos->status = (tbpos->pressure > 16) | TBINPROX; /* half way down */ + tbpos->xpos = (*cp++ & 03) << 14; + tbpos->xpos |= *cp++ << 7; + tbpos->xpos |= *cp++; + tbpos->ypos = (*cp++ & 03) << 14; + tbpos->ypos |= *cp++ << 7; + tbpos->ypos |= *cp++; + tbpos->xtilt = *cp++; + tbpos->ytilt = *cp++; + tbpos->scount++; +} + +/* + * Decode old Hitachi 5 byte format (low res). + */ +static +tbdecode(tc, cp, tbpos) + struct tbconf *tc; + register char *cp; + register struct tbpos *tbpos; +{ + register char byte; + + byte = *cp++; + tbpos->status = (byte&0100) ? TBINPROX : 0; + byte &= ~0100; + if (byte > 036) + tbpos->status |= 1 << ((byte-040)/2); + tbpos->xpos = *cp++ << 7; + tbpos->xpos |= *cp++; + if (tbpos->xpos < 256) /* tablet wraps around at 256 */ + tbpos->status &= ~TBINPROX; /* make it out of proximity */ + tbpos->ypos = *cp++ << 7; + tbpos->ypos |= *cp++; + tbpos->scount++; +} + +/* + * Decode new Hitach 5-byte format (low res). + */ +static +tblresdecode(tc, cp, tbpos) + struct tbconf *tc; + register char *cp; + register struct tbpos *tbpos; +{ + + *cp &= ~0100; /* mask sync bit */ + tbpos->status = (*cp++ >> 2) | TBINPROX; + if (tc->tbc_flags&TBF_INPROX && tbpos->status&020) + tbpos->status &= ~(020|TBINPROX); + tbpos->xpos = *cp++; + tbpos->xpos |= *cp++ << 6; + tbpos->ypos = *cp++; + tbpos->ypos |= *cp++ << 6; + tbpos->scount++; +} + +/* + * Decode new Hitach 6-byte format (high res). + */ +static +tbhresdecode(tc, cp, tbpos) + struct tbconf *tc; + register char *cp; + register struct tbpos *tbpos; +{ + char byte; + + byte = *cp++; + tbpos->xpos = (byte & 03) << 14; + tbpos->xpos |= *cp++ << 7; + tbpos->xpos |= *cp++; + tbpos->ypos = *cp++ << 14; + tbpos->ypos |= *cp++ << 7; + tbpos->ypos |= *cp++; + tbpos->status = (byte >> 2) | TBINPROX; + if (tc->tbc_flags&TBF_INPROX && tbpos->status&020) + tbpos->status &= ~(020|TBINPROX); + tbpos->scount++; +} + +/* + * Polhemus decode. + */ +static +poldecode(tc, cp, polpos) + struct tbconf *tc; + register char *cp; + register struct polpos *polpos; +{ + + polpos->p_x = cp[4] | cp[3]<<7 | (cp[9] & 0x03) << 14; + polpos->p_y = cp[6] | cp[5]<<7 | (cp[9] & 0x0c) << 12; + polpos->p_z = cp[8] | cp[7]<<7 | (cp[9] & 0x30) << 10; + polpos->p_azi = cp[11] | cp[10]<<7 | (cp[16] & 0x03) << 14; + polpos->p_pit = cp[13] | cp[12]<<7 | (cp[16] & 0x0c) << 12; + polpos->p_rol = cp[15] | cp[14]<<7 | (cp[16] & 0x30) << 10; + polpos->p_stat = cp[1] | cp[0]<<7; + if (cp[2] != ' ') + polpos->p_key = cp[2]; +} + +/*ARGSUSED*/ +#ifndef NeXT +tbioctl(tp, cmd, data, flag) + struct tty *tp; + caddr_t data; +#else +tbtioctl(tp, cmd, data, flag, p) + struct tty *tp; + u_long cmd; + caddr_t data; + int flag; + struct proc *p; +#endif /* !NeXT */ +{ + register struct tb *tbp = (struct tb *)tp->T_LINEP; + + switch (cmd) { + + case BIOGMODE: + *(int *)data = tbp->tbflags & TBMODE; + break; + + case BIOSTYPE: + if (tbconf[*(int *)data & TBTYPE].tbc_recsize == 0 || + tbconf[*(int *)data & TBTYPE].tbc_decode == 0) + return (EINVAL); + tbp->tbflags &= ~TBTYPE; + tbp->tbflags |= *(int *)data & TBTYPE; + /* fall thru... to set mode bits */ + + case BIOSMODE: { + register struct tbconf *tc; + + tbp->tbflags &= ~TBMODE; + tbp->tbflags |= *(int *)data & TBMODE; + tc = &tbconf[tbp->tbflags & TBTYPE]; + if (tbp->tbflags&TBSTOP) { + if (tc->tbc_stop) + ttyout(tc->tbc_stop, tp); + } else if (tc->tbc_start) + ttyout(tc->tbc_start, tp); + if (tbp->tbflags&TBPOINT) { + if (tc->tbc_point) + ttyout(tc->tbc_point, tp); + } else if (tc->tbc_run) + ttyout(tc->tbc_run, tp); + ttstart(tp); + break; + } + + case BIOGTYPE: + *(int *)data = tbp->tbflags & TBTYPE; + break; + + case TIOCSETD: + case TIOCGETD: + case TIOCGETP: + case TIOCGETC: + return (-1); /* pass thru... */ + + default: + return (ENOTTY); + } + return (0); +} +#endif /* NTB > 0 */ diff --git a/bsd/kern/tty_tty.c b/bsd/kern/tty_tty.c new file mode 100644 index 000000000..0e6415ed6 --- /dev/null +++ b/bsd/kern/tty_tty.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tty_tty.c 8.2 (Berkeley) 9/23/93 + */ + +/* + * Indirect driver for controlling tty. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef NeXT +#include +#ifdef DEVFS +#include +#endif /*DEVFS*/ + +static d_open_t cttyopen; +static d_read_t cttyread; +static d_write_t cttywrite; +static d_ioctl_t cttyioctl; +static d_select_t cttyselect; + +#define CDEV_MAJOR 1 +/* Don't make static, fdesc_vnops uses this. */ +struct cdevsw ctty_cdevsw = + { cttyopen, nullclose, cttyread, cttywrite, /*1*/ + cttyioctl, nullstop, nullreset, nodevtotty,/* tty */ + cttyselect, nommap, NULL, "ctty", NULL, -1 }; + +#endif /* !NeXT */ + +#define cttyvp(p) ((p)->p_flag & P_CONTROLT ? (p)->p_session->s_ttyvp : NULL) + +/*ARGSUSED*/ +int +cttyopen(dev, flag, mode, p) + dev_t dev; + int flag, mode; + struct proc *p; +{ + struct vnode *ttyvp = cttyvp(p); + int error; + + if (ttyvp == NULL) + return (ENXIO); +#ifndef NeXT + VOP_LOCK(ttyvp); +#else + /* + * This is the only place that NeXT Guarding has been used for + * VOP_.*LOCK style calls. Note all of the other diffs should + * use the three paramater lock/unlock. + */ + vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p); +#endif + +#ifdef PARANOID + /* + * Since group is tty and mode is 620 on most terminal lines + * and since sessions protect terminals from processes outside + * your session, this check is probably no longer necessary. + * Since it inhibits setuid root programs that later switch + * to another user from accessing /dev/tty, we have decided + * to delete this test. (mckusick 5/93) + */ + error = VOP_ACCESS(ttyvp, + (flag&FREAD ? VREAD : 0) | (flag&FWRITE ? VWRITE : 0), p->p_ucred, p); + if (!error) +#endif /* PARANOID */ + error = VOP_OPEN(ttyvp, flag, NOCRED, p); + VOP_UNLOCK(ttyvp, 0, p); + return (error); +} + +/*ARGSUSED*/ +int +cttyread(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + struct proc *p = uio->uio_procp; + register struct vnode *ttyvp = cttyvp(uio->uio_procp); + int error; + + if (ttyvp == NULL) + return (EIO); + vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_READ(ttyvp, uio, flag, NOCRED); + VOP_UNLOCK(ttyvp, 0, p); + return (error); +} + +/*ARGSUSED*/ +int +cttywrite(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + struct proc *p = uio->uio_procp; + register struct vnode *ttyvp = cttyvp(uio->uio_procp); + int error; + + if (ttyvp == NULL) + return (EIO); + vn_lock(ttyvp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_WRITE(ttyvp, uio, flag, NOCRED); + VOP_UNLOCK(ttyvp, 0, p); + return (error); +} + +/*ARGSUSED*/ +#ifndef NeXT +static int +cttyioctl(dev, cmd, addr, flag, p) + dev_t dev; + int cmd; + caddr_t addr; + int flag; + struct proc *p; +#else +int +cttyioctl(dev, cmd, addr, flag, p) + dev_t dev; + u_long cmd; + caddr_t addr; + int flag; + struct proc *p; +#endif /* !NeXT */ +{ + struct vnode *ttyvp = cttyvp(p); + + if (ttyvp == NULL) + return (EIO); + if (cmd == TIOCSCTTY) /* don't allow controlling tty to be set */ + return EINVAL; /* to controlling tty -- infinite recursion */ + if (cmd == TIOCNOTTY) { + if (!SESS_LEADER(p)) { + p->p_flag &= ~P_CONTROLT; + return (0); + } else + return (EINVAL); + } + return (VOP_IOCTL(ttyvp, cmd, addr, flag, NOCRED, p)); +} + +/*ARGSUSED*/ +int +cttyselect(dev, flag, p) + dev_t dev; + int flag; + struct proc *p; +{ + struct vnode *ttyvp = cttyvp(p); + + if (ttyvp == NULL) + return (1); /* try operation to get EOF/failure */ + return (VOP_SELECT(ttyvp, flag, FREAD|FWRITE, NOCRED, p)); +} + +#ifndef NeXT +static ctty_devsw_installed = 0; +#ifdef DEVFS +static void *ctty_devfs_token; +#endif + +static void +ctty_drvinit(void *unused) +{ + dev_t dev; + + if( ! ctty_devsw_installed ) { + dev = makedev(CDEV_MAJOR,0); + cdevsw_add(&dev,&ctty_cdevsw,NULL); + ctty_devsw_installed = 1; +#ifdef DEVFS + ctty_devfs_token = + devfs_add_devswf(&ctty_cdevsw, 0, DV_CHR, 0, 0, + 0666, "tty"); +#endif + } +} + +SYSINIT(cttydev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,ctty_drvinit,NULL) + + +#endif /* !NeXT */ diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c new file mode 100644 index 000000000..c56fe8b2b --- /dev/null +++ b/bsd/kern/ubc_subr.c @@ -0,0 +1,1155 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: ubc_subr.c + * Author: Umesh Vaishampayan [umeshv@apple.com] + * 05-Aug-1999 umeshv Created. + * + * Functions related to Unified Buffer cache. + * + */ + +#define DIAGNOSTIC 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#if DIAGNOSTIC +#if defined(assert) +#undef assert() +#endif +#define assert(cond) \ + if (!(cond)) panic("%s:%d (%s)", __FILE__, __LINE__, # cond) +#else +#include +#endif /* DIAGNOSTIC */ + +struct zone *ubc_info_zone; + +#if DIAGNOSTIC +#define USHOULDNOT(fun) panic("%s: should not", (fun)); +#else +#define USHOULDNOT(fun) +#endif /* DIAGNOSTIC */ + + +static void *_ubc_getobject(struct vnode *, int); +static void ubc_lock(struct vnode *); +static void ubc_unlock(struct vnode *); + +static void +ubc_getobjref(struct vnode *vp) +{ + register struct ubc_info *uip; + void *pager_cport; + void *object; + + uip = vp->v_ubcinfo; + + if (pager_cport = (void *)vnode_pager_lookup(vp, uip->ui_pager)) + object = (void *)vm_object_lookup(pager_cport); + + if (object != uip->ui_object) { +#if 0 + Debugger("ubc_getobjref: object changed"); +#endif /* 0 */ + uip->ui_object = object; + } + + if (uip->ui_object == NULL) + panic("ubc_getobjref: lost object"); +} + +/* + * Initialization of the zone for Unified Buffer Cache. + */ +void +ubc_init() +{ + int i; + + i = (vm_size_t) sizeof (struct ubc_info); + /* XXX the number of elements should be tied in to maxvnodes */ + ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone"); + return; +} + +/* + * Initialize a ubc_info structure for a vnode. + */ +int +ubc_info_init(struct vnode *vp) +{ + register struct ubc_info *uip; + void * pager; + struct vattr vattr; + struct proc *p = current_proc(); + int error = 0; + kern_return_t kret; + void * pager_cport; + + assert(vp); + assert(UBCISVALID(vp)); + + ubc_lock(vp); + if (ISSET(vp->v_flag, VUINIT)) { + /* + * other thread is already doing this + * wait till done + */ + while (ISSET(vp->v_flag, VUINIT)) { + SET(vp->v_flag, VUWANT); /* XXX overloaded! */ + ubc_unlock(vp); + (void) tsleep((caddr_t)vp, PINOD, "ubcinfo", 0); + ubc_lock(vp); + } + ubc_unlock(vp); + return (0); + } else { + SET(vp->v_flag, VUINIT); + } + + uip = vp->v_ubcinfo; + if ((uip == UBC_INFO_NULL) || (uip == UBC_NOINFO)) { + ubc_unlock(vp); + uip = (struct ubc_info *) zalloc(ubc_info_zone); + bzero(uip, sizeof(struct ubc_info)); + ubc_lock(vp); + SET(uip->ui_flags, UI_INITED); + uip->ui_vnode = vp; + uip->ui_ucred = NOCRED; + } + + assert(uip->ui_flags != UI_NONE); + assert(uip->ui_vnode == vp); + +#if 0 + if(ISSET(uip->ui_flags, UI_HASPAGER)) + goto done; +#endif /* 0 */ + + /* now set this ubc_info in the vnode */ + vp->v_ubcinfo = uip; + SET(uip->ui_flags, UI_HASPAGER); + ubc_unlock(vp); + pager = (void *)vnode_pager_setup(vp, uip->ui_pager); + assert(pager); + + /* + * Can not use VOP_GETATTR() to get accurate value + * of ui_size. Thanks to NFS. + * nfs_getattr() can call vinvalbuf() and in this case + * ubc_info is not set up to deal with that. + * So use bogus size. + */ + + /* create a vm_object association */ + kret = vm_object_create_nomap(pager, (vm_object_offset_t)uip->ui_size); + if (kret != KERN_SUCCESS) + panic("ubc_info_init: vm_object_create_nomap returned %d", kret); + + /* _ubc_getobject() gets a reference on the memory object */ + if (_ubc_getobject(vp, 0) == NULL) + panic("ubc_info_init: lost vmobject : uip = 0X%08x", uip); + + /* + * vm_object_allocate() called from vm_object_create_nomap() + * created the object with a refcount of 1 + * need to drop the reference gained by vm_object_lookup() + */ + vm_object_deallocate(uip->ui_object); + + /* create a pager reference on the vnode */ + error = vget(vp, LK_INTERLOCK, p); + if (error) + panic("ubc_info_init: vget error = %d", error); + + /* initialize the size */ + error = VOP_GETATTR(vp, &vattr, p->p_ucred, p); + + ubc_lock(vp); + uip->ui_size = (error ? 0: vattr.va_size); + +done: + CLR(vp->v_flag, VUINIT); + if (ISSET(vp->v_flag, VUWANT)) { + CLR(vp->v_flag, VUWANT); + ubc_unlock(vp); + wakeup((caddr_t)vp); + } else + ubc_unlock(vp); + + return(error); +} + +/* Free the ubc_info */ +void +ubc_info_free(struct vnode *vp) +{ + register struct ubc_info *uip; + struct ucred *credp; + + assert(vp); + + uip = vp->v_ubcinfo; + vp->v_ubcinfo = UBC_INFO_NULL; + credp = uip->ui_ucred; + if (credp != NOCRED) { + uip->ui_ucred = NOCRED; + crfree(credp); + } + zfree(ubc_info_zone, (vm_offset_t)uip); + return; +} + +/* + * Communicate with VM the size change of the file + * returns 1 on success, 0 on failure + */ +int +ubc_setsize(struct vnode *vp, off_t nsize) +{ + off_t osize; /* ui_size before change */ + off_t lastpg, olastpgend, lastoff; + struct ubc_info *uip; + void *object; + kern_return_t kret; + int didhold; + +#if DIAGNOSTIC + assert(vp); + assert(nsize >= (off_t)0); +#endif + + if (UBCINVALID(vp)) + return(0); + + if (!UBCINFOEXISTS(vp)) + return(0); + + uip = vp->v_ubcinfo; + osize = uip->ui_size; /* call ubc_getsize() ??? */ + /* Update the size before flushing the VM */ + uip->ui_size = nsize; + + if (nsize >= osize) /* Nothing more to do */ + return(0); + + /* + * When the file shrinks, invalidate the pages beyond the + * new size. Also get rid of garbage beyond nsize on the + * last page. The ui_size already has the nsize. This + * insures that the pageout would not write beyond the new + * end of the file. + */ + + didhold = ubc_hold(vp); + lastpg = trunc_page_64(nsize); + olastpgend = round_page_64(osize); + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + lastoff = (nsize & PAGE_MASK_64); + + /* + * If length is multiple of page size, we should not flush + * invalidating is sufficient + */ + if (!lastoff) { + /* + * memory_object_lock_request() drops an object + * reference. gain a reference before calling it + */ + ubc_getobjref(vp); + + /* invalidate last page and old contents beyond nsize */ + kret = memory_object_lock_request(object, + (vm_object_offset_t)lastpg, + (memory_object_size_t)(olastpgend - lastpg), + MEMORY_OBJECT_RETURN_NONE,TRUE, + VM_PROT_NO_CHANGE,MACH_PORT_NULL); + if (kret != KERN_SUCCESS) + printf("ubc_setsize: invalidate failed (error = %d)\n", kret); + + if (didhold) + ubc_rele(vp); + return ((kret == KERN_SUCCESS) ? 1 : 0); + } + + /* + * memory_object_lock_request() drops an object + * reference. gain a reference before calling it + */ + ubc_getobjref(vp); + + /* flush the last page */ + kret = memory_object_lock_request(object, + (vm_object_offset_t)lastpg, + PAGE_SIZE_64, + MEMORY_OBJECT_RETURN_DIRTY,FALSE, + VM_PROT_NO_CHANGE,MACH_PORT_NULL); + + if (kret == KERN_SUCCESS) { + /* + * memory_object_lock_request() drops an object + * reference. gain a reference before calling it + */ + ubc_getobjref(vp); + + /* invalidate last page and old contents beyond nsize */ + kret = memory_object_lock_request(object, + (vm_object_offset_t)lastpg, + (memory_object_size_t)(olastpgend - lastpg), + MEMORY_OBJECT_RETURN_NONE,TRUE, + VM_PROT_NO_CHANGE,MACH_PORT_NULL); + if (kret != KERN_SUCCESS) + printf("ubc_setsize: invalidate failed (error = %d)\n", kret); + } else + printf("ubc_setsize: flush failed (error = %d)\n", kret); + + if (didhold) + ubc_rele(vp); + return ((kret == KERN_SUCCESS) ? 1 : 0); +} + +/* + * Get the size of the file + * For local file systems the size is locally cached. For NFS + * there might be a network transaction for this. + */ +off_t +ubc_getsize(struct vnode *vp) +{ + /* XXX deal with NFS */ + return (vp->v_ubcinfo->ui_size); +} + +/* lock for changes to struct UBC */ +static void +ubc_lock(struct vnode *vp) +{ + /* For now, just use the v_interlock */ + simple_lock(&vp->v_interlock); +} + +/* unlock */ +static void +ubc_unlock(struct vnode *vp) +{ + /* For now, just use the v_interlock */ + simple_unlock(&vp->v_interlock); +} + +/* + * Caller indicate that the object corresponding to the vnode + * can not be cached in object cache. Make it so. + * returns 1 on success, 0 on failure + * + * Caller of ubc_uncache() MUST have a valid reference on the vnode. + */ +int +ubc_uncache(struct vnode *vp) +{ + void *object; + kern_return_t kret; + struct ubc_info *uip; + memory_object_perf_info_data_t perf; + int didhold; + + assert(vp); + + if (!UBCINFOEXISTS(vp)) + return (0); + + uip = vp->v_ubcinfo; + + assert(uip != UBC_INFO_NULL); + + /* + * AGE it so that vfree() can make sure that it + * would get recycled soon after the last reference is gone + * This will insure that .nfs turds would not linger + */ + vagevp(vp); + + /* set the "do not cache" bit */ + SET(uip->ui_flags, UI_DONTCACHE); + + didhold = ubc_hold(vp); + + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + + /* + * memory_object_change_attributes() drops an object + * reference. gain a reference before calling it + */ + ubc_getobjref(vp); + + perf.cluster_size = PAGE_SIZE; /* XXX use real cluster_size. */ + perf.may_cache = FALSE; + kret = memory_object_change_attributes(object, + MEMORY_OBJECT_PERFORMANCE_INFO, + (memory_object_info_t) &perf, + MEMORY_OBJECT_PERF_INFO_COUNT, + MACH_PORT_NULL, 0); + + if (didhold) + ubc_rele(vp); + + if (kret != KERN_SUCCESS) { +#if DIAGNOSTIC + panic("ubc_uncache: memory_object_change_attributes " + "kret = %d", kret); +#endif /* DIAGNOSTIC */ + return (0); + } + + return (1); +} + + +/* + * call ubc_clean() and ubc_uncache() on all the vnodes + * for this mount point. + * returns 1 on success, 0 on failure + */ +int +ubc_umount(struct mount *mp) +{ + struct proc *p = current_proc(); + struct vnode *vp, *nvp; + int ret = 1; + +loop: + simple_lock(&mntvnode_slock); + for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { + if (vp->v_mount != mp) { + simple_unlock(&mntvnode_slock); + goto loop; + } + nvp = vp->v_mntvnodes.le_next; + simple_unlock(&mntvnode_slock); + if (UBCINFOEXISTS(vp)) { + ret &= ubc_clean(vp, 0); /* do not invalidate */ + ret &= ubc_uncache(vp); + ubc_release(vp); + } + simple_lock(&mntvnode_slock); + } + simple_unlock(&mntvnode_slock); + return (ret); +} + +/* + * Call ubc_unmount() for all filesystems. + * The list is traversed in reverse order + * of mounting to avoid dependencies. + */ +void +ubc_unmountall() +{ + struct mount *mp, *nmp; + + /* + * Since this only runs when rebooting, it is not interlocked. + */ + for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { + nmp = mp->mnt_list.cqe_prev; + (void) ubc_umount(mp); + } +} + +/* Get the credentials */ +struct ucred * +ubc_getcred(struct vnode *vp) +{ + struct ubc_info *uip; + + assert(vp); + + uip = vp->v_ubcinfo; + + assert(uip); + + if (UBCINVALID(vp)) { + return (NOCRED); + } + + return (uip->ui_ucred); +} + +/* + * Set the credentials + * existing credentials are not changed + * returns 1 on success and 0 on failure + */ + +int +ubc_setcred(struct vnode *vp, struct proc *p) +{ + struct ubc_info *uip; + struct ucred *credp; + + assert(vp); + assert(p); + + uip = vp->v_ubcinfo; + + assert(uip); + + if (UBCINVALID(vp)) { + USHOULDNOT("ubc_setcred"); + return (0); + } + + credp = uip->ui_ucred; + if (credp == NOCRED) { + crhold(p->p_ucred); + uip->ui_ucred = p->p_ucred; + } + + return (1); +} + +/* Get the pager */ +void * +ubc_getpager(struct vnode *vp) +{ + struct ubc_info *uip; + + assert(vp); + + uip = vp->v_ubcinfo; + + assert(uip); + + if (UBCINVALID(vp)) { + USHOULDNOT("ubc_getpager"); + return (0); + } + + return (uip->ui_pager); +} + +/* + * Get the memory object associated with this vnode + * If the vnode was reactivated, memory object would not exist. + * Unless "do not rectivate" was specified, look it up using the pager. + * The vm_object_lookup() would create a reference on the memory object. + * If hold was requested create an object reference of one does not + * exist already. + */ + +static void * +_ubc_getobject(struct vnode *vp, int flags) +{ + struct ubc_info *uip; + void *object; + + uip = vp->v_ubcinfo; + object = uip->ui_object; + + if ((object == NULL) && ISSET(uip->ui_flags, UI_HASPAGER) + && !(flags & UBC_NOREACTIVATE)) { + void *pager_cport; + + if (ISSET(uip->ui_flags, UI_HASOBJREF)) + panic("ubc_getobject: lost object"); + + if (pager_cport = (void *)vnode_pager_lookup(vp, uip->ui_pager)) { + object = (void *)vm_object_lookup(pager_cport); +#if 0 + if ((uip->ui_object) && (uip->ui_object != object)) + Debugger("_ubc_getobject: object changed"); +#endif /* 0 */ + + uip->ui_object = object; + } + + if (object != NULL) + SET(uip->ui_flags, UI_HASOBJREF); + } + + if ((flags & UBC_HOLDOBJECT) + && (object != NULL)) { + if (!ISSET(uip->ui_flags, UI_HASOBJREF)) { + ubc_getobjref(vp); + SET(uip->ui_flags, UI_HASOBJREF); + } + } + return (uip->ui_object); +} + +void * +ubc_getobject(struct vnode *vp, int flags) +{ + struct ubc_info *uip; + void *object; + + assert(vp); + uip = vp->v_ubcinfo; + assert(uip); + + if (UBCINVALID(vp)) { + return (0); + } + + object = _ubc_getobject(vp, flags); + assert(object); + + if (!ISSET(uip->ui_flags, (UI_HASOBJREF|UI_WASMAPPED)) + && !(uip->ui_holdcnt)) { + if (!(flags & UBC_PAGINGOP)) + panic("ubc_getobject: lost reference"); + } +} + +/* Set the pager */ +int +ubc_setpager(struct vnode *vp, void *pager) +{ + struct ubc_info *uip; + + assert(vp); + + uip = vp->v_ubcinfo; + + assert(uip); + + if (UBCINVALID(vp)) { + USHOULDNOT("ubc_setpager"); + return (0); + } + + uip->ui_pager = pager; + return (1); +} + +int +ubc_setflags(struct vnode * vp, int flags) +{ + struct ubc_info *uip; + + if (UBCINVALID(vp)) { + USHOULDNOT("ubc_setflags"); + return (EINVAL); + } + + assert(vp); + + uip = vp->v_ubcinfo; + + assert(uip); + + SET(uip->ui_flags, flags); + + return(0); +} + +int +ubc_clearflags(struct vnode * vp, int flags) +{ + struct ubc_info *uip; + + if (UBCINVALID(vp)) { + USHOULDNOT("ubc_clearflags"); + return (EINVAL); + } + + assert(vp); + + uip = vp->v_ubcinfo; + + assert(uip); + + CLR(uip->ui_flags, flags); + + return(0); +} + + +int +ubc_issetflags(struct vnode * vp, int flags) +{ + struct ubc_info *uip; + + if (UBCINVALID(vp)) { + USHOULDNOT("ubc_issetflags"); + return (EINVAL); + } + + assert(vp); + + uip = vp->v_ubcinfo; + + assert(uip); + + return(ISSET(uip->ui_flags, flags)); +} + +off_t +ubc_blktooff(struct vnode *vp, daddr_t blkno) +{ + off_t file_offset; + int error; + + assert(vp); + if (UBCINVALID(vp)) { + USHOULDNOT("ubc_blktooff"); + return ((off_t)-1); + } + + error = VOP_BLKTOOFF(vp, blkno, &file_offset); + if (error) + file_offset = -1; + + return (file_offset); +} +daddr_t +ubc_offtoblk(struct vnode *vp, off_t offset) +{ + daddr_t blkno; + int error=0; + + assert(vp); + if (UBCINVALID(vp)) { + return ((daddr_t)-1); + } + + error = VOP_OFFTOBLK(vp, offset, &blkno); + if (error) + blkno = -1; + + return (blkno); +} + +/* + * Cause the file data in VM to be pushed out to the storage + * it also causes all currently valid pages to be released + * returns 1 on success, 0 on failure + */ +int +ubc_clean(struct vnode *vp, int invalidate) +{ + off_t size; + struct ubc_info *uip; + void *object; + kern_return_t kret; + int flags = 0; + int didhold; + +#if DIAGNOSTIC + assert(vp); +#endif + + if (UBCINVALID(vp)) + return(0); + + if (!UBCINFOEXISTS(vp)) + return(0); + + /* + * if invalidate was requested, write dirty data and then discard + * the resident pages + */ + if (invalidate) + flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE); + + didhold = ubc_hold(vp); + uip = vp->v_ubcinfo; + size = uip->ui_size; /* call ubc_getsize() ??? */ + + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + + /* + * memory_object_lock_request() drops an object + * reference. gain a reference before calling it + */ + ubc_getobjref(vp); + + vp->v_flag &= ~VHASDIRTY; + vp->v_clen = 0; + + /* Write the dirty data in the file and discard cached pages */ + kret = memory_object_lock_request(object, + (vm_object_offset_t)0, + (memory_object_size_t)round_page_64(size), + MEMORY_OBJECT_RETURN_ALL, flags, + VM_PROT_NO_CHANGE,MACH_PORT_NULL); + + if (kret != KERN_SUCCESS) { + printf("ubc_clean: clean failed (error = %d)\n", kret); + } + + if (didhold) + ubc_rele(vp); + + return ((kret == KERN_SUCCESS) ? 1 : 0); +} + +/* + * Cause the file data in VM to be pushed out to the storage + * currently valid pages are NOT invalidated + * returns 1 on success, 0 on failure + */ +int +ubc_pushdirty(struct vnode *vp) +{ + off_t size; + struct ubc_info *uip; + void *object; + kern_return_t kret; + int didhold; + +#if DIAGNOSTIC + assert(vp); +#endif + + if (UBCINVALID(vp)) + return(0); + + if (!UBCINFOEXISTS(vp)) + return(0); + + didhold = ubc_hold(vp); + uip = vp->v_ubcinfo; + size = uip->ui_size; /* call ubc_getsize() ??? */ + + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + + /* + * memory_object_lock_request() drops an object + * reference. gain a reference before calling it + */ + ubc_getobjref(vp); + + vp->v_flag &= ~VHASDIRTY; + vp->v_clen = 0; + + /* Write the dirty data in the file and discard cached pages */ + kret = memory_object_lock_request(object, + (vm_object_offset_t)0, + (memory_object_size_t)round_page_64(size), + MEMORY_OBJECT_RETURN_DIRTY,FALSE, + VM_PROT_NO_CHANGE,MACH_PORT_NULL); + + if (kret != KERN_SUCCESS) { + printf("ubc_pushdirty: flush failed (error = %d)\n", kret); + } + + if (didhold) + ubc_rele(vp); + + return ((kret == KERN_SUCCESS) ? 1 : 0); +} + +/* + * Make sure the vm object does not vanish + * returns 1 if the hold count was incremented + * returns 0 if the hold count was not incremented + * This return value should be used to balance + * ubc_hold() and ubc_rele(). + */ +int +ubc_hold(struct vnode *vp) +{ + struct ubc_info *uip; + void *object; + + if (UBCINVALID(vp)) + return (0); + + if (!UBCINFOEXISTS(vp)) { + /* nothing more to do for a dying vnode */ + if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)) + return (0); + vp->v_ubcinfo = UBC_INFO_NULL; + ubc_info_init(vp); + } + uip = vp->v_ubcinfo; + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + + if (uip->ui_holdcnt++ == 0) + ubc_getobjref(vp); + if (uip->ui_holdcnt < 0) + panic("ubc_hold: ui_holdcnt"); + + return (1); +} + +/* relese the reference on the vm object */ +void +ubc_rele(struct vnode *vp) +{ + struct ubc_info *uip; + void *object; + + if (UBCINVALID(vp)) + return; + + if (!UBCINFOEXISTS(vp)) { + /* nothing more to do for a dying vnode */ + if ((vp->v_flag & VXLOCK) || (vp->v_flag & VTERMINATE)) + return; + panic("ubc_rele: can not"); + } + + uip = vp->v_ubcinfo; + + /* get the object before loosing to hold count */ + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + + if (uip->ui_holdcnt == 0) + panic("ubc_rele: ui_holdcnt"); + + if (--uip->ui_holdcnt == 0) { + /* If the object is already dead do nothing */ + if (object) + vm_object_deallocate(object); +#if DIAGNOSTIC + else + printf("ubc_rele: null object for %x", vp); +#endif /* DIAGNOSTIC */ + } + + return; +} + +/* + * The vnode is mapped explicitly + * Mark it so, and release the vm object reference gained in + * ubc_info_init() + */ +void +ubc_map(struct vnode *vp) +{ + struct ubc_info *uip; + void *object; + + ubc_lock(vp); +#if DIAGNOSTIC + assert(vp); +#endif + + if (UBCINVALID(vp)) { + ubc_unlock(vp); + return; + } + + if (!UBCINFOEXISTS(vp)) + panic("ubc_map: can not"); + + uip = vp->v_ubcinfo; + + SET(uip->ui_flags, UI_WASMAPPED); + uip->ui_mapped = 1; + ubc_unlock(vp); + +#if 1 + /* + * Do not release the ubc reference on the + * memory object right away. Let vnreclaim + * deal with that + */ +#else + /* + * Release the ubc reference. memory object cahe + * is responsible for caching this object now. + */ + if (ISSET(uip->ui_flags, UI_HASOBJREF)) { + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + CLR(uip->ui_flags, UI_HASOBJREF); + vm_object_deallocate(object); + } +#endif + + return; + +} + +/* + * Release the memory object reference on the vnode + * only if it is not in use + * Return 1 if the reference was released, 0 otherwise. + */ +int +ubc_release(struct vnode *vp) +{ + struct ubc_info *uip; + void *object; +#if DIAGNOSTIC + assert(vp); +#endif + + if (UBCINVALID(vp)) + return (0); + + if (!UBCINFOEXISTS(vp)) + panic("ubc_release: can not"); + + uip = vp->v_ubcinfo; + + /* can not release held vnodes */ + if (uip->ui_holdcnt) + return (0); + + if (ISSET(uip->ui_flags, UI_HASOBJREF)) { + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + CLR(uip->ui_flags, UI_HASOBJREF); + vm_object_deallocate(object); + return (1); + } else + return (0); +} + +/* + * Invalidate a range in the memory object that backs this + * vnode. The offset is truncated to the page boundary and the + * size is adjusted to include the last page in the range. + */ +int +ubc_invalidate(struct vnode *vp, off_t offset, size_t size) +{ + struct ubc_info *uip; + void *object; + kern_return_t kret; + off_t toff; + size_t tsize; + int didhold; + +#if DIAGNOSTIC + assert(vp); +#endif + + if (UBCINVALID(vp)) + return; + + if (!UBCINFOEXISTS(vp)) + panic("ubc_invalidate: can not"); + + didhold = ubc_hold(vp); + toff = trunc_page_64(offset); + tsize = (size_t)(round_page_64(offset+size) - toff); + uip = vp->v_ubcinfo; + object = _ubc_getobject(vp, UBC_NOREACTIVATE); + assert(object); + + /* + * memory_object_lock_request() drops an object + * reference. gain a reference before calling it + */ + ubc_getobjref(vp); + + /* invalidate pages in the range requested */ + kret = memory_object_lock_request(object, + (vm_object_offset_t)toff, + (memory_object_size_t)tsize, + MEMORY_OBJECT_RETURN_NONE, + (MEMORY_OBJECT_DATA_NO_CHANGE| MEMORY_OBJECT_DATA_FLUSH), + VM_PROT_NO_CHANGE,MACH_PORT_NULL); + if (kret != KERN_SUCCESS) + printf("ubc_invalidate: invalidate failed (error = %d)\n", kret); + + if (didhold) + ubc_rele(vp); + + return ((kret == KERN_SUCCESS) ? 1 : 0); +} + +/* + * Find out whether a vnode is in use by UBC + * Returns 1 if file is in use by UBC, 0 if not + */ +int +ubc_isinuse(struct vnode *vp, int tookref) +{ + int busycount = tookref ? 2 : 1; + + if (!UBCINFOEXISTS(vp)) + return(0); + + if (vp->v_usecount > busycount) + return (1); + + if ((vp->v_usecount == busycount) + && (vp->v_ubcinfo->ui_mapped == 1)) + return(1); + else + return(0); +} + + +/* -- UGLY HACK ALERT -- */ +/* + * The backdoor routine to clear the UI_WASMAPPED bit. + * MUST only be called by the VM + * + * Note that this routine is not under funnel. There are numerous + * thing about the calling sequence that make this work on SMP. + * Any code change in those paths can break this. + * + * This will be replaced soon. + */ +void +ubc_unmap(struct vnode *vp) +{ + struct ubc_info *uip; + +#if DIAGNOSTIC + assert(vp); +#endif + + if (UBCINVALID(vp)) { + return; + } + + if (!UBCINFOEXISTS(vp)) + panic("ubc_unmap: can not"); + + ubc_lock(vp); + uip = vp->v_ubcinfo; + + uip->ui_mapped = 0; + ubc_unlock(vp); + + return; +} + diff --git a/bsd/kern/uipc_domain.c b/bsd/kern/uipc_domain.c new file mode 100644 index 000000000..fbd7a1116 --- /dev/null +++ b/bsd/kern/uipc_domain.c @@ -0,0 +1,446 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uipc_domain.c 8.3 (Berkeley) 2/14/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void pffasttimo __P((void *)); +void pfslowtimo __P((void *)); + +/* + * Add/delete 'domain': Link structure into system list, + * invoke the domain init, and then the proto inits. + * To delete, just remove from the list (dom_refs must be zero) + */ + + +void init_domain(register struct domain *dp) +{ + struct protosw *pr; + + if (dp->dom_init) + (*dp->dom_init)(); + + /* and then init the currently installed protos in this domain */ + + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) { + if (pr->pr_usrreqs == 0) + panic("domaininit: %ssw[%d] has no usrreqs!", + dp->dom_name, + (int)(pr - dp->dom_protosw)); + + if (pr->pr_init) + (*pr->pr_init)(); + } + + /* Recompute for new protocol */ + if (max_linkhdr < 16) /* XXX - Sheesh; everything's ether? */ + max_linkhdr = 16; + if (dp->dom_protohdrlen > max_protohdr) + max_protohdr = dp->dom_protohdrlen; + max_hdr = max_linkhdr + max_protohdr; + max_datalen = MHLEN - max_hdr; +} + +void concat_domain(struct domain *dp) +{ + dp->dom_next = domains; + domains = dp; +} + +void +net_add_domain(register struct domain *dp) +{ register struct protosw *pr; + register int s; + extern int splhigh(void); + extern int splx(int); + + kprintf("Adding domain %s (family %d)\n", dp->dom_name, + dp->dom_family); + /* First, link in the domain */ + s = splhigh(); + + concat_domain(dp); + + init_domain(dp); + + splx(s); +} + +int +net_del_domain(register struct domain *dp) +{ register struct domain *dp1, *dp2; + register int s, retval = 0; + extern int splhigh(void); + extern int splx(int); + + if (dp->dom_refs) + return(EBUSY); + + s = splhigh(); + + for (dp2 = NULL, dp1 = domains; dp1; dp2 = dp1, dp1 = dp1->dom_next) + { if (dp == dp1) + break; + } + if (dp1) + { if (dp2) + dp2->dom_next = dp1->dom_next; + else + domains = dp1->dom_next; + } else + retval = EPFNOSUPPORT; + splx(s); + + return(retval); +} + +/* + * net_add_proto - link a protosw into a domain's protosw chain + */ +int +net_add_proto(register struct protosw *pp, + register struct domain *dp) +{ register struct protosw *pp1, *pp2; + register int s; + extern int splhigh(void); + extern int splx(int); + + s = splhigh(); + for (pp2 = NULL, pp1 = dp->dom_protosw; pp1; pp1 = pp1->pr_next) + { if (pp1->pr_type == pp->pr_type && + pp1->pr_protocol == pp->pr_protocol) { + splx(s); + return(EEXIST); + } + pp2 = pp1; + } + if (pp2 == NULL) + dp->dom_protosw = pp; + else + pp2->pr_next = pp; + pp->pr_next = NULL; + TAILQ_INIT(&pp->pr_sfilter); + if (pp->pr_init) + (*pp->pr_init)(); + + /* Make sure pr_init isn't called again!! */ + pp->pr_init = 0; + splx(s); + return(0); +} + +/* + * net_del_proto - remove a protosw from a domain's protosw chain. + * Search the protosw chain for the element with matching data. + * Then unlink and return. + */ +int +net_del_proto(register int type, + register int protocol, + register struct domain *dp) +{ register struct protosw *pp1, *pp2; + int s; + extern int splhigh(void); + extern int splx(int); + + s = splhigh(); + for (pp2 = NULL, pp1 = dp->dom_protosw; pp1; pp1 = pp1->pr_next) + { if (pp1->pr_type == type && + pp1->pr_protocol == protocol) + break; + pp2 = pp1; + } + if (pp1 == NULL) { + splx(s); + return(ENXIO); + } + if (pp2) + pp2->pr_next = pp1->pr_next; + else + dp->dom_protosw = pp1->pr_next; + splx(s); + return(0); +} + + +void +domaininit() +{ register struct domain *dp; + register struct protosw *pr; + extern struct domain localdomain, routedomain, ndrvdomain, inetdomain; + extern struct domain systemdomain; +#if NS + extern struct domain nsdomain; +#endif +#if ISO + extern struct domain isodomain; +#endif +#if CCITT + extern struct domain ccittdomain; +#endif + +#if NETAT + extern struct domain atalkdomain; +#endif +#if INET6 + extern struct domain inet6domain; +#endif +#if IPSEC + extern struct domain keydomain; +#endif + + /* + * Add all the static domains to the domains list + */ + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + concat_domain(&localdomain); + concat_domain(&routedomain); + concat_domain(&inetdomain); +#if NETAT + concat_domain(&atalkdomain); +#endif +#if INET6 + concat_domain(&inet6domain); +#endif +#if IPSEC + concat_domain(&keydomain); +#endif + +#if NS + concat_domain(&nsdomain); +#endif +#if ISO + concat_domain(&isodomain); +#endif +#if CCITT + concat_domain(&ccittdomain); +#endif + concat_domain(&ndrvdomain); + + concat_domain(&systemdomain); + + /* + * Now ask them all to init (XXX including the routing domain, + * see above) + */ + for (dp = domains; dp; dp = dp->dom_next) + init_domain(dp); + + timeout(pffasttimo, NULL, 1); + timeout(pfslowtimo, NULL, 1); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); +} + +struct protosw * +pffindtype(family, type) + int family, type; +{ + register struct domain *dp; + register struct protosw *pr; + + for (dp = domains; dp; dp = dp->dom_next) + if (dp->dom_family == family) + goto found; + return (0); +found: + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) + if (pr->pr_type && pr->pr_type == type) + return (pr); + return (0); +} + +struct domain * +pffinddomain(int pf) +{ struct domain *dp; + + dp = domains; + while (dp) + { if (dp->dom_family == pf) + return(dp); + dp = dp->dom_next; + } + return(NULL); +} + +struct protosw * +pffindproto(family, protocol, type) + int family, protocol, type; +{ + register struct domain *dp; + register struct protosw *pr; + struct protosw *maybe = 0; + + if (family == 0) + return (0); + for (dp = domains; dp; dp = dp->dom_next) + if (dp->dom_family == family) + goto found; + return (0); +found: + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) { + if ((pr->pr_protocol == protocol) && (pr->pr_type == type)) + return (pr); + + if (type == SOCK_RAW && pr->pr_type == SOCK_RAW && + pr->pr_protocol == 0 && maybe == (struct protosw *)0) + maybe = pr; + } + return (maybe); +} + +int +net_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + register struct domain *dp; + register struct protosw *pr; + int family, protocol; + + /* + * All sysctl names at this level are nonterminal; + * next two components are protocol family and protocol number, + * then at least one addition component. + */ + if (namelen < 3) + return (EISDIR); /* overloaded */ + family = name[0]; + protocol = name[1]; + + if (family == 0) + return (0); + for (dp = domains; dp; dp = dp->dom_next) + if (dp->dom_family == family) + goto found; + return (ENOPROTOOPT); +found: + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) + if (pr->pr_protocol == protocol && pr->pr_sysctl) + return ((*pr->pr_sysctl)(name + 2, namelen - 2, + oldp, oldlenp, newp, newlen)); + return (ENOPROTOOPT); +} + +void +pfctlinput(cmd, sa) + int cmd; + struct sockaddr *sa; +{ + register struct domain *dp; + register struct protosw *pr; + + for (dp = domains; dp; dp = dp->dom_next) + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) + if (pr->pr_ctlinput) + (*pr->pr_ctlinput)(cmd, sa, (caddr_t)0); +} + +void +pfslowtimo(arg) + void *arg; +{ + register struct domain *dp; + register struct protosw *pr; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + for (dp = domains; dp; dp = dp->dom_next) + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) + if (pr->pr_slowtimo) + (*pr->pr_slowtimo)(); + timeout(pfslowtimo, NULL, hz/2); + + (void) thread_funnel_set(network_flock, FALSE); +} + +void +pffasttimo(arg) + void *arg; +{ + register struct domain *dp; + register struct protosw *pr; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + for (dp = domains; dp; dp = dp->dom_next) + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) + if (pr->pr_fasttimo) + (*pr->pr_fasttimo)(); + timeout(pffasttimo, NULL, hz/5); + + (void) thread_funnel_set(network_flock, FALSE); +} diff --git a/bsd/kern/uipc_mbuf.c b/bsd/kern/uipc_mbuf.c new file mode 100644 index 000000000..a3ab82cc7 --- /dev/null +++ b/bsd/kern/uipc_mbuf.c @@ -0,0 +1,1286 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 + */ +/* HISTORY + * + * 10/15/97 Annette DeSchon (deschon@apple.com) + * Fixed bug in which all cluster mbufs were broken up + * into regular mbufs: Some clusters are now reserved. + * When a cluster is needed, regular mbufs are no longer + * used. (Radar 1683621) + * 20-May-95 Mac Gillon (mgillon) at NeXT + * New version based on 4.4 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern kernel_pmap; /* The kernel's pmap */ + +decl_simple_lock_data(, mbuf_slock); +struct mbuf *mfree; /* mbuf free list */ +struct mbuf *mfreelater; /* mbuf deallocation list */ +extern vm_map_t mb_map; /* special map */ +int m_want; /* sleepers on mbufs */ +extern int nmbclusters; /* max number of mapped clusters */ +short *mclrefcnt; /* mapped cluster reference counts */ +int *mcl_paddr; +union mcluster *mclfree; /* mapped cluster free list */ +int max_linkhdr; /* largest link-level header */ +int max_protohdr; /* largest protocol header */ +int max_hdr; /* largest link+protocol header */ +int max_datalen; /* MHLEN - max_hdr */ +struct mbstat mbstat; /* statistics */ +union mcluster *mbutl; /* first mapped cluster address */ +union mcluster *embutl; /* ending virtual address of mclusters */ + +static int nclpp; /* # clusters per physical page */ +static char mbfail[] = "mbuf not mapped"; + +static int m_howmany(); + +/* The number of cluster mbufs that are allocated, to start. */ +#define MINCL max(16, 2) + +extern int dlil_input_thread_wakeup; +extern int dlil_expand_mcl; +extern int dlil_initialized; + + +void +mbinit() +{ + int s,m; + int initmcl = 32; + + if (nclpp) + return; + nclpp = round_page(MCLBYTES) / MCLBYTES; /* see mbufgc() */ + if (nclpp < 1) nclpp = 1; + MBUF_LOCKINIT(); +// NETISR_LOCKINIT(); + if (nmbclusters == 0) + nmbclusters = NMBCLUSTERS; + MALLOC(mclrefcnt, short *, nmbclusters * sizeof (short), + M_TEMP, M_WAITOK); + if (mclrefcnt == 0) + panic("mbinit"); + for (m = 0; m < nmbclusters; m++) + mclrefcnt[m] = -1; + + MALLOC(mcl_paddr, int *, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int), + M_TEMP, M_WAITOK); + if (mcl_paddr == 0) + panic("mbinit1"); + bzero((char *)mcl_paddr, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int)); + + embutl = (union mcluster *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES)); + + PE_parse_boot_arg("initmcl", &initmcl); + + if (m_clalloc(max(PAGE_SIZE/CLBYTES, 1) * initmcl, M_WAIT) == 0) + goto bad; + MBUF_UNLOCK(); + return; +bad: + panic("mbinit"); +} + +/* + * Allocate some number of mbuf clusters + * and place on cluster free list. + */ +/* ARGSUSED */ +m_clalloc(ncl, nowait) + register int ncl; + int nowait; +{ + register union mcluster *mcl; + register int i; + vm_size_t size; + static char doing_alloc; + + /* + * Honor the caller's wish to block or not block. + * We have a way to grow the pool asynchronously, + * by kicking the dlil_input_thread. + */ + if ((i = m_howmany()) <= 0) + goto out; + + if ((nowait == M_DONTWAIT)) + goto out; + + if (ncl < i) + ncl = i; + size = round_page(ncl * MCLBYTES); + mcl = (union mcluster *)kmem_mb_alloc(mb_map, size); + + if (mcl == 0 && ncl > 1) { + size = round_page(MCLBYTES); /* Try for 1 if failed */ + mcl = (union mcluster *)kmem_mb_alloc(mb_map, size); + } + + if (mcl) { + MBUF_LOCK(); + ncl = size / MCLBYTES; + for (i = 0; i < ncl; i++) { + if (++mclrefcnt[mtocl(mcl)] != 0) + panic("m_clalloc already there"); + if (((int)mcl & PAGE_MASK) == 0) + mcl_paddr[((char *)mcl - (char *)mbutl)/PAGE_SIZE] = pmap_extract(kernel_pmap, (char *)mcl); + + mcl->mcl_next = mclfree; + mclfree = mcl++; + } + mbstat.m_clfree += ncl; + mbstat.m_clusters += ncl; + return (ncl); + } /* else ... */ +out: + MBUF_LOCK(); + + /* + * When non-blocking we kick the dlil thread if we havve to grow the + * pool or if the number of free clusters is less than requested. + */ + if ((nowait == M_DONTWAIT) && (i > 0 || ncl >= mbstat.m_clfree)) { + dlil_expand_mcl = 1; + if (dlil_initialized) + wakeup((caddr_t)&dlil_input_thread_wakeup); + } + + if (mbstat.m_clfree >= ncl) + return 1; + + mbstat.m_drops++; + + return 0; +} + +/* + * Add more free mbufs by cutting up a cluster. + */ +m_expand(canwait) + int canwait; +{ + register caddr_t mcl; + + if (mbstat.m_clfree < (mbstat.m_clusters >> 4)) + /* 1/16th of the total number of cluster mbufs allocated is + reserved for large packets. The number reserved must + always be < 1/2, or future allocation will be prevented. + */ + return 0; + + MCLALLOC(mcl, canwait); + if (mcl) { + register struct mbuf *m = (struct mbuf *)mcl; + register int i = NMBPCL; + MBUF_LOCK(); + mbstat.m_mtypes[MT_FREE] += i; + mbstat.m_mbufs += i; + while (i--) { + m->m_type = MT_FREE; + m->m_next = mfree; + mfree = m++; + } + i = m_want; + m_want = 0; + MBUF_UNLOCK(); + if (i) wakeup((caddr_t)&mfree); + return 1; + } + return 0; +} + +/* + * When MGET failes, ask protocols to free space when short of memory, + * then re-attempt to allocate an mbuf. + */ +struct mbuf * +m_retry(canwait, type) + int canwait, type; +{ +#define m_retry(h, t) 0 + register struct mbuf *m; + int wait, s; + funnel_t * fnl; + int fnl_switch = 0; + boolean_t funnel_state; + + for (;;) { + (void) m_expand(canwait); + MGET(m, XXX, type); + if (m || canwait == M_DONTWAIT) + break; + MBUF_LOCK(); + wait = m_want++; + + dlil_expand_mcl = 1; + MBUF_UNLOCK(); + + if (dlil_initialized) + wakeup((caddr_t)&dlil_input_thread_wakeup); + + if (wait == 0) { + mbstat.m_drain++; + } + else { + assert_wait((caddr_t)&mfree, THREAD_UNINT); + mbstat.m_wait++; + } + + /* + * Grab network funnel because m_reclaim calls into the + * socket domains and tsleep end-up calling splhigh + */ + fnl = thread_funnel_get(); + if (fnl && (fnl == kernel_flock)) { + fnl_switch = 1; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } else + funnel_state = thread_funnel_set(network_flock, TRUE); + if (wait == 0) { + m_reclaim(); + } else { + /* Sleep with a small timeout as insurance */ + (void) tsleep((caddr_t)0, PZERO-1, "m_retry", hz); + } + if (fnl_switch) + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + else + thread_funnel_set(network_flock, funnel_state); + } + return (m); +#undef m_retry +} + +/* + * As above; retry an MGETHDR. + */ +struct mbuf * +m_retryhdr(canwait, type) + int canwait, type; +{ + register struct mbuf *m; + + if (m = m_retry(canwait, type)) { + m->m_flags |= M_PKTHDR; + m->m_data = m->m_pktdat; + } + return (m); +} + +m_reclaim() +{ + register struct domain *dp; + register struct protosw *pr; + + for (dp = domains; dp; dp = dp->dom_next) + for (pr = dp->dom_protosw; pr; pr = pr->pr_next) + if (pr->pr_drain) + (*pr->pr_drain)(); + mbstat.m_drain++; +} + +/* + * Space allocation routines. + * These are also available as macros + * for critical paths. + */ +struct mbuf * +m_get(nowait, type) + int nowait, type; +{ + register struct mbuf *m; + + MGET(m, nowait, type); + return (m); +} + +struct mbuf * +m_gethdr(nowait, type) + int nowait, type; +{ + register struct mbuf *m; + + MGETHDR(m, nowait, type); + return (m); +} + +struct mbuf * +m_getclr(nowait, type) + int nowait, type; +{ + register struct mbuf *m; + + MGET(m, nowait, type); + if (m == 0) + return (0); + bzero(mtod(m, caddr_t), MLEN); + return (m); +} + +struct mbuf * +m_free(m) + struct mbuf *m; +{ + struct mbuf *n = m->m_next; + int i, s; + + if (m->m_type == MT_FREE) + panic("freeing free mbuf"); + + MBUF_LOCK(); + if (m->m_flags & M_EXT) { + if (MCLHASREFERENCE(m)) { + remque((queue_t)&m->m_ext.ext_refs); + } else if (m->m_ext.ext_free == NULL) { + union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf; + if (MCLUNREF(mcl)) { + mcl->mcl_next = mclfree; + mclfree = mcl; + ++mbstat.m_clfree; + } +#ifdef COMMENT_OUT +/* *** Since m_split() increments "mclrefcnt[mtocl(m->m_ext.ext_buf)]", + and AppleTalk ADSP uses m_split(), this incorrect sanity check + caused a panic. +*** */ + else /* sanity check - not referenced this way */ + panic("m_free m_ext cluster not free"); +#endif + } else { + (*(m->m_ext.ext_free))(m->m_ext.ext_buf, + m->m_ext.ext_size, m->m_ext.ext_arg); + } + } + mbstat.m_mtypes[m->m_type]--; + (void) MCLUNREF(m); + m->m_type = MT_FREE; + mbstat.m_mtypes[m->m_type]++; + m->m_flags = 0; + m->m_next = mfree; + m->m_len = 0; + mfree = m; + i = m_want; + m_want = 0; + MBUF_UNLOCK(); + if (i) wakeup((caddr_t)&mfree); + return (n); +} + +/* Best effort to get a mbuf cluster + pkthdr under one lock. + * If we don't have them avail, just bail out and use the regular + * path. + * Used by drivers to allocated packets on receive ring. + */ +struct mbuf * +m_getpacket(void) +{ + struct mbuf *m; + m_clalloc(1, M_DONTWAIT); /* takes the MBUF_LOCK, but doesn't release it... */ + if ((mfree != 0) && (mclfree != 0)) { /* mbuf + cluster are available */ + m = mfree; + mfree = m->m_next; + MCHECK(m); + ++mclrefcnt[mtocl(m)]; + mbstat.m_mtypes[MT_FREE]--; + mbstat.m_mtypes[MT_DATA]++; + m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */ + ++mclrefcnt[mtocl(m->m_ext.ext_buf)]; + mbstat.m_clfree--; + mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next; + + m->m_next = m->m_nextpkt = 0; + m->m_ext.ext_free = 0; + m->m_type = MT_DATA; + m->m_data = m->m_ext.ext_buf; + m->m_flags = M_PKTHDR | M_EXT; + m->m_pkthdr.aux = (struct mbuf *)NULL; + m->m_ext.ext_size = MCLBYTES; + m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = + &m->m_ext.ext_refs; + MBUF_UNLOCK(); + } + else { /* slow path: either mbuf or cluster need to be allocated anyway */ + MBUF_UNLOCK(); + + MGETHDR(m, M_WAITOK, MT_DATA ); + + if ( m == 0 ) + return (NULL); + + MCLGET( m, M_WAITOK ); + if ( ( m->m_flags & M_EXT ) == 0 ) + { + m_free(m); m = 0; + } + } + return (m); +} + +/* free and mbuf list (m_nextpkt) while following m_next under one lock. + * returns the count for mbufs packets freed. Used by the drivers. + */ +int +m_freem_list(m) + struct mbuf *m; +{ + struct mbuf *nextpkt; + int i, s, count=0; + +// s = splimp(); + MBUF_LOCK(); + while (m) { + if (m) + nextpkt = m->m_nextpkt; /* chain of linked mbufs from driver */ + else + nextpkt = 0; + count++; + while (m) { /* free the mbuf chain (like mfreem) */ + struct mbuf *n = m->m_next; + if (n && n->m_nextpkt) + panic("m_freem_list: m_nextpkt of m_next != NULL"); + if (m->m_type == MT_FREE) + panic("freeing free mbuf"); + if (m->m_flags & M_EXT) { + if (MCLHASREFERENCE(m)) { + remque((queue_t)&m->m_ext.ext_refs); + } else if (m->m_ext.ext_free == NULL) { + union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf; + if (MCLUNREF(mcl)) { + mcl->mcl_next = mclfree; + mclfree = mcl; + ++mbstat.m_clfree; + } + } else { + (*(m->m_ext.ext_free))(m->m_ext.ext_buf, + m->m_ext.ext_size, m->m_ext.ext_arg); + } + } + mbstat.m_mtypes[m->m_type]--; + (void) MCLUNREF(m); + m->m_type = MT_FREE; + mbstat.m_mtypes[m->m_type]++; + m->m_flags = 0; + m->m_len = 0; + m->m_next = mfree; + mfree = m; + m = n; + } + m = nextpkt; /* bump m with saved nextpkt if any */ + } + i = m_want; + m_want = 0; + MBUF_UNLOCK(); + if (i) wakeup((caddr_t)&mfree); + return (count); +} + +void +m_freem(m) + register struct mbuf *m; +{ + while (m) + m = m_free(m); +} + +/* + * Mbuffer utility routines. + */ +/* + * Compute the amount of space available + * before the current start of data in an mbuf. + */ +m_leadingspace(m) +register struct mbuf *m; +{ + if (m->m_flags & M_EXT) { + if (MCLHASREFERENCE(m)) + return(0); + return (m->m_data - m->m_ext.ext_buf); + } + if (m->m_flags & M_PKTHDR) + return (m->m_data - m->m_pktdat); + return (m->m_data - m->m_dat); +} + +/* + * Compute the amount of space available + * after the end of data in an mbuf. + */ +m_trailingspace(m) +register struct mbuf *m; +{ + if (m->m_flags & M_EXT) { + if (MCLHASREFERENCE(m)) + return(0); + return (m->m_ext.ext_buf + m->m_ext.ext_size - + (m->m_data + m->m_len)); + } + return (&m->m_dat[MLEN] - (m->m_data + m->m_len)); +} + +/* + * Lesser-used path for M_PREPEND: + * allocate new mbuf to prepend to chain, + * copy junk along. + */ +struct mbuf * +m_prepend(m, len, how) + register struct mbuf *m; + int len, how; +{ + struct mbuf *mn; + + MGET(mn, how, m->m_type); + if (mn == (struct mbuf *)NULL) { + m_freem(m); + return ((struct mbuf *)NULL); + } + if (m->m_flags & M_PKTHDR) { + M_COPY_PKTHDR(mn, m); + m->m_flags &= ~M_PKTHDR; + } + mn->m_next = m; + m = mn; + if (len < MHLEN) + MH_ALIGN(m, len); + m->m_len = len; + return (m); +} + +/* + * Make a copy of an mbuf chain starting "off0" bytes from the beginning, + * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. + * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller. + */ +int MCFail; + +struct mbuf * +m_copym(m, off0, len, wait) + register struct mbuf *m; + int off0, wait; + register int len; +{ + register struct mbuf *n, **np; + register int off = off0; + struct mbuf *top; + int copyhdr = 0; + + if (off < 0 || len < 0) + panic("m_copym"); + if (off == 0 && m->m_flags & M_PKTHDR) + copyhdr = 1; + while (off > 0) { + if (m == 0) + panic("m_copym"); + if (off < m->m_len) + break; + off -= m->m_len; + m = m->m_next; + } + np = ⊤ + top = 0; + while (len > 0) { + if (m == 0) { + if (len != M_COPYALL) + panic("m_copym"); + break; + } + MGET(n, wait, m->m_type); + *np = n; + if (n == 0) + goto nospace; + if (copyhdr) { + M_COPY_PKTHDR(n, m); + if (len == M_COPYALL) + n->m_pkthdr.len -= off0; + else + n->m_pkthdr.len = len; + copyhdr = 0; + } + if (len == M_COPYALL) { + if (min(len, (m->m_len - off)) == len) { + printf("m->m_len %d - off %d = %d, %d\n", + m->m_len, off, m->m_len - off, + min(len, (m->m_len - off))); + } + } + n->m_len = min(len, (m->m_len - off)); + if (n->m_len == M_COPYALL) { + printf("n->m_len == M_COPYALL, fixing\n"); + n->m_len = MHLEN; + } + if (m->m_flags & M_EXT) { + MBUF_LOCK(); + n->m_ext = m->m_ext; + insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs); + MBUF_UNLOCK(); + n->m_data = m->m_data + off; + n->m_flags |= M_EXT; + } else + bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), + (unsigned)n->m_len); + if (len != M_COPYALL) + len -= n->m_len; + off = 0; + m = m->m_next; + np = &n->m_next; + } + if (top == 0) + MCFail++; + return (top); +nospace: + m_freem(top); + MCFail++; + return (0); +} + +/* + * Copy data from an mbuf chain starting "off" bytes from the beginning, + * continuing for "len" bytes, into the indicated buffer. + */ +void m_copydata(m, off, len, cp) + register struct mbuf *m; + register int off; + register int len; + caddr_t cp; +{ + register unsigned count; + + if (off < 0 || len < 0) + panic("m_copydata"); + while (off > 0) { + if (m == 0) + panic("m_copydata"); + if (off < m->m_len) + break; + off -= m->m_len; + m = m->m_next; + } + while (len > 0) { + if (m == 0) + panic("m_copydata"); + count = min(m->m_len - off, len); + bcopy(mtod(m, caddr_t) + off, cp, count); + len -= count; + cp += count; + off = 0; + m = m->m_next; + } +} + +/* + * Concatenate mbuf chain n to m. + * Both chains must be of the same type (e.g. MT_DATA). + * Any m_pkthdr is not updated. + */ +void m_cat(m, n) + register struct mbuf *m, *n; +{ + while (m->m_next) + m = m->m_next; + while (n) { + if (m->m_flags & M_EXT || + m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { + /* just join the two chains */ + m->m_next = n; + return; + } + /* splat the data from one into the other */ + bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, + (u_int)n->m_len); + m->m_len += n->m_len; + n = m_free(n); + } +} + +void +m_adj(mp, req_len) + struct mbuf *mp; + int req_len; +{ + register int len = req_len; + register struct mbuf *m; + register count; + + if ((m = mp) == NULL) + return; + if (len >= 0) { + /* + * Trim from head. + */ + while (m != NULL && len > 0) { + if (m->m_len <= len) { + len -= m->m_len; + m->m_len = 0; + m = m->m_next; + } else { + m->m_len -= len; + m->m_data += len; + len = 0; + } + } + m = mp; + if (m->m_flags & M_PKTHDR) + m->m_pkthdr.len -= (req_len - len); + } else { + /* + * Trim from tail. Scan the mbuf chain, + * calculating its length and finding the last mbuf. + * If the adjustment only affects this mbuf, then just + * adjust and return. Otherwise, rescan and truncate + * after the remaining size. + */ + len = -len; + count = 0; + for (;;) { + count += m->m_len; + if (m->m_next == (struct mbuf *)0) + break; + m = m->m_next; + } + if (m->m_len >= len) { + m->m_len -= len; + m = mp; + if (m->m_flags & M_PKTHDR) + m->m_pkthdr.len -= len; + return; + } + count -= len; + if (count < 0) + count = 0; + /* + * Correct length for chain is "count". + * Find the mbuf with last data, adjust its length, + * and toss data from remaining mbufs on chain. + */ + m = mp; + if (m->m_flags & M_PKTHDR) + m->m_pkthdr.len = count; + for (; m; m = m->m_next) { + if (m->m_len >= count) { + m->m_len = count; + break; + } + count -= m->m_len; + } + while (m = m->m_next) + m->m_len = 0; + } +} + +/* + * Rearange an mbuf chain so that len bytes are contiguous + * and in the data area of an mbuf (so that mtod and dtom + * will work for a structure of size len). Returns the resulting + * mbuf chain on success, frees it and returns null on failure. + * If there is room, it will add up to max_protohdr-len extra bytes to the + * contiguous region in an attempt to avoid being called next time. + */ +int MPFail; + +struct mbuf * +m_pullup(n, len) + register struct mbuf *n; + int len; +{ + register struct mbuf *m; + register int count; + int space; + + /* + * If first mbuf has no cluster, and has room for len bytes + * without shifting current data, pullup into it, + * otherwise allocate a new mbuf to prepend to the chain. + */ + if ((n->m_flags & M_EXT) == 0 && + n->m_data + len < &n->m_dat[MLEN] && n->m_next) { + if (n->m_len >= len) + return (n); + m = n; + n = n->m_next; + len -= m->m_len; + } else { + if (len > MHLEN) + goto bad; + MGET(m, M_DONTWAIT, n->m_type); + if (m == 0) + goto bad; + m->m_len = 0; + if (n->m_flags & M_PKTHDR) { + M_COPY_PKTHDR(m, n); + n->m_flags &= ~M_PKTHDR; + } + } + space = &m->m_dat[MLEN] - (m->m_data + m->m_len); + do { + count = min(min(max(len, max_protohdr), space), n->m_len); + bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, + (unsigned)count); + len -= count; + m->m_len += count; + n->m_len -= count; + space -= count; + if (n->m_len) + n->m_data += count; + else + n = m_free(n); + } while (len > 0 && n); + if (len > 0) { + (void) m_free(m); + goto bad; + } + m->m_next = n; + return (m); +bad: + m_freem(n); + MPFail++; + return (0); +} + +/* + * Partition an mbuf chain in two pieces, returning the tail -- + * all but the first len0 bytes. In case of failure, it returns NULL and + * attempts to restore the chain to its original state. + */ +struct mbuf * +m_split(m0, len0, wait) + register struct mbuf *m0; + int len0, wait; +{ + register struct mbuf *m, *n; + unsigned len = len0, remain; + + for (m = m0; m && len > m->m_len; m = m->m_next) + len -= m->m_len; + if (m == 0) + return (0); + remain = m->m_len - len; + if (m0->m_flags & M_PKTHDR) { + MGETHDR(n, wait, m0->m_type); + if (n == 0) + return (0); + n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; + n->m_pkthdr.len = m0->m_pkthdr.len - len0; + m0->m_pkthdr.len = len0; + if (m->m_flags & M_EXT) + goto extpacket; + if (remain > MHLEN) { + /* m can't be the lead packet */ + MH_ALIGN(n, 0); + n->m_next = m_split(m, len, wait); + if (n->m_next == 0) { + (void) m_free(n); + return (0); + } else + return (n); + } else + MH_ALIGN(n, remain); + } else if (remain == 0) { + n = m->m_next; + m->m_next = 0; + return (n); + } else { + MGET(n, wait, m->m_type); + if (n == 0) + return (0); + M_ALIGN(n, remain); + } +extpacket: + if (m->m_flags & M_EXT) { + n->m_flags |= M_EXT; + n->m_ext = m->m_ext; + MBUF_LOCK(); + mclrefcnt[mtocl(m->m_ext.ext_buf)]++; + MBUF_UNLOCK(); + m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */ + n->m_data = m->m_data + len; + } else { + bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain); + } + n->m_len = remain; + m->m_len = len; + n->m_next = m->m_next; + m->m_next = 0; + return (n); +} +/* + * Routine to copy from device local memory into mbufs. + */ +struct mbuf * +m_devget(buf, totlen, off0, ifp, copy) + char *buf; + int totlen, off0; + struct ifnet *ifp; + void (*copy)(); +{ + register struct mbuf *m; + struct mbuf *top = 0, **mp = ⊤ + register int off = off0, len; + register char *cp; + char *epkt; + + cp = buf; + epkt = cp + totlen; + if (off) { + /* + * If 'off' is non-zero, packet is trailer-encapsulated, + * so we have to skip the type and length fields. + */ + cp += off + 2 * sizeof(u_int16_t); + totlen -= 2 * sizeof(u_int16_t); + } + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == 0) + return (0); + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = totlen; + m->m_len = MHLEN; + + while (totlen > 0) { + if (top) { + MGET(m, M_DONTWAIT, MT_DATA); + if (m == 0) { + m_freem(top); + return (0); + } + m->m_len = MLEN; + } + len = min(totlen, epkt - cp); + if (len >= MINCLSIZE) { + MCLGET(m, M_DONTWAIT); + if (m->m_flags & M_EXT) + m->m_len = len = min(len, MCLBYTES); + else { + /* give up when it's out of cluster mbufs */ + if (top) + m_freem(top); + m_freem(m); + return (0); + } + } else { + /* + * Place initial small packet/header at end of mbuf. + */ + if (len < m->m_len) { + if (top == 0 && len + max_linkhdr <= m->m_len) + m->m_data += max_linkhdr; + m->m_len = len; + } else + len = m->m_len; + } + if (copy) + copy(cp, mtod(m, caddr_t), (unsigned)len); + else + bcopy(cp, mtod(m, caddr_t), (unsigned)len); + cp += len; + *mp = m; + mp = &m->m_next; + totlen -= len; + if (cp == epkt) + cp = buf; + } + return (top); +} + +/* + * Cluster freelist allocation check. The mbuf lock must be held. + * Ensure hysteresis between hi/lo. + */ +static int +m_howmany() +{ + register int i; + + /* Under minimum */ + if (mbstat.m_clusters < MINCL) + return (MINCL - mbstat.m_clusters); + /* Too few (free < 1/2 total) and not over maximum */ + if (mbstat.m_clusters < nmbclusters && + (i = ((mbstat.m_clusters >> 1) - mbstat.m_clfree)) > 0) + return i; + return 0; +} + + +/* + * Copy data from a buffer back into the indicated mbuf chain, + * starting "off" bytes from the beginning, extending the mbuf + * chain if necessary. + */ +void +m_copyback(m0, off, len, cp) + struct mbuf *m0; + register int off; + register int len; + caddr_t cp; +{ + register int mlen; + register struct mbuf *m = m0, *n; + int totlen = 0; + + if (m0 == 0) + return; + while (off > (mlen = m->m_len)) { + off -= mlen; + totlen += mlen; + if (m->m_next == 0) { + n = m_getclr(M_DONTWAIT, m->m_type); + if (n == 0) + goto out; + n->m_len = min(MLEN, len + off); + m->m_next = n; + } + m = m->m_next; + } + while (len > 0) { + mlen = min (m->m_len - off, len); + bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); + cp += mlen; + len -= mlen; + mlen += off; + off = 0; + totlen += mlen; + if (len == 0) + break; + if (m->m_next == 0) { + n = m_get(M_DONTWAIT, m->m_type); + if (n == 0) + break; + n->m_len = min(MLEN, len); + m->m_next = n; + } + m = m->m_next; + } +out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) + m->m_pkthdr.len = totlen; +} + + +char *mcl_to_paddr(register char *addr) { + register int base_phys; + + if (addr < (char *)mbutl || addr >= (char *)embutl) + return (0); + base_phys = mcl_paddr[(addr - (char *)mbutl) >> PAGE_SHIFT]; + + if (base_phys == 0) + return (0); + return ((char *)((int)base_phys | ((int)addr & PAGE_MASK))); +} + +/* + * Dup the mbuf chain passed in. The whole thing. No cute additional cruft. + * And really copy the thing. That way, we don't "precompute" checksums + * for unsuspecting consumers. + * Assumption: m->m_nextpkt == 0. + * Trick: for small packets, don't dup into a cluster. That way received + * packets don't take up too much room in the sockbuf (cf. sbspace()). + */ +int MDFail; + +struct mbuf * +m_dup(register struct mbuf *m, int how) +{ register struct mbuf *n, **np; + struct mbuf *top; + int copyhdr = 0; + + np = ⊤ + top = 0; + if (m->m_flags & M_PKTHDR) + copyhdr = 1; + + /* + * Quick check: if we have one mbuf and its data fits in an + * mbuf with packet header, just copy and go. + */ + if (m->m_next == NULL) + { /* Then just move the data into an mbuf and be done... */ + if (copyhdr) + { if (m->m_pkthdr.len <= MHLEN) + { if ((n = m_gethdr(how, m->m_type)) == NULL) + return(NULL); + n->m_len = m->m_len; + n->m_flags |= (m->m_flags & M_COPYFLAGS); + n->m_pkthdr.len = m->m_pkthdr.len; + n->m_pkthdr.rcvif = m->m_pkthdr.rcvif; + n->m_pkthdr.header = NULL; + n->m_pkthdr.aux = NULL; + bcopy(m->m_data, n->m_data, m->m_pkthdr.len); + return(n); + } + } else if (m->m_len <= MLEN) + { if ((n = m_get(how, m->m_type)) == NULL) + return(NULL); + bcopy(m->m_data, n->m_data, m->m_len); + n->m_len = m->m_len; + return(n); + } + } + while (m) + { +#if BLUE_DEBUG + kprintf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len, + m->m_data); +#endif + if (copyhdr) + n = m_gethdr(how, m->m_type); + else + n = m_get(how, m->m_type); + if (n == 0) + goto nospace; + if (m->m_flags & M_EXT) + { MCLGET(n, how); + if ((n->m_flags & M_EXT) == 0) + goto nospace; + } + *np = n; + if (copyhdr) + { /* Don't use M_COPY_PKTHDR: preserve m_data */ + n->m_pkthdr = m->m_pkthdr; + n->m_flags |= (m->m_flags & M_COPYFLAGS); + copyhdr = 0; + if ((n->m_flags & M_EXT) == 0) + n->m_data = n->m_pktdat; + } + n->m_len = m->m_len; + /* + * Get the dup on the same bdry as the original + * Assume that the two mbufs have the same offset to data area + * (up to word bdries) + */ + bcopy(mtod(m, caddr_t), mtod(n, caddr_t), (unsigned)n->m_len); + m = m->m_next; + np = &n->m_next; +#if BLUE_DEBUG + kprintf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len, + n->m_data); +#endif + } + + if (top == 0) + MDFail++; + return (top); + nospace: + m_freem(top); + MDFail++; + return (0); +} + +#if 0 +#include + +static int mhog_num = 0; +static struct mbuf *mhog_chain = 0; +static int mhog_wait = 1; + +static int +sysctl_mhog_num SYSCTL_HANDLER_ARGS +{ + int old = mhog_num; + int error; + + error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); + if (!error && req->newptr) { + int i; + struct mbuf *m; + + if (mhog_chain) { + m_freem(mhog_chain); + mhog_chain = 0; + } + + for (i = 0; i < mhog_num; i++) { + MGETHDR(m, mhog_wait ? M_WAIT : M_DONTWAIT, MT_DATA); + if (m == 0) + break; + + MCLGET(m, mhog_wait ? M_WAIT : M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + m = 0; + break; + } + m->m_next = mhog_chain; + mhog_chain = m; + } + mhog_num = i; + } + + return error; +} + +SYSCTL_NODE(_kern_ipc, OID_AUTO, mhog, CTLFLAG_RW, 0, "mbuf hog"); + +SYSCTL_PROC(_kern_ipc_mhog, OID_AUTO, cluster, CTLTYPE_INT|CTLFLAG_RW, + &mhog_num, 0, &sysctl_mhog_num, "I", ""); +SYSCTL_INT(_kern_ipc_mhog, OID_AUTO, wait, CTLFLAG_RW, &mhog_wait, + 0, ""); +#endif + diff --git a/bsd/kern/uipc_mbuf2.c b/bsd/kern/uipc_mbuf2.c new file mode 100644 index 000000000..f5056527e --- /dev/null +++ b/bsd/kern/uipc_mbuf2.c @@ -0,0 +1,448 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */ + +/* + * Copyright (C) 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uipc_mbuf.c 8.4 (Berkeley) 2/14/95 + */ + +#define PULLDOWN_STAT +/*#define PULLDOWN_DEBUG*/ + +#ifdef PULLDOWN_STAT +#if defined(__NetBSD__) || (defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include "opt_inet.h" +#endif +#endif + +#include +#include +#include +#include +#include +#if defined(PULLDOWN_STAT) && defined(INET6) +#include +#include +#include +#endif + +/* + * ensure that [off, off + len) is contiguous on the mbuf chain "m". + * packet chain before "off" is kept untouched. + * if offp == NULL, the target will start at on resulting chain. + * if offp != NULL, the target will start at on resulting chain. + * + * on error return (NULL return value), original "m" will be freed. + * + * XXX M_TRAILINGSPACE/M_LEADINGSPACE on shared cluster (sharedcluster) + */ +struct mbuf * +m_pulldown(m, off, len, offp) + struct mbuf *m; + int off, len; + int *offp; +{ + struct mbuf *n, *o; + int hlen, tlen, olen; + int sharedcluster; +#if defined(PULLDOWN_STAT) && defined(INET6) + static struct mbuf *prev = NULL; + int prevlen = 0, prevmlen = 0; +#endif + + /* check invalid arguments. */ + if (m == NULL) + panic("m == NULL in m_pulldown()"); + if (len > MCLBYTES) { + m_freem(m); + return NULL; /* impossible */ + } + +#if defined(PULLDOWN_STAT) && defined(INET6) + ip6stat.ip6s_pulldown++; +#endif + +#if defined(PULLDOWN_STAT) && defined(INET6) + /* statistics for m_pullup */ + ip6stat.ip6s_pullup++; + if (off + len > MHLEN) + ip6stat.ip6s_pullup_fail++; + else { + int dlen, mlen; + + dlen = (prev == m) ? prevlen : m->m_len; + mlen = (prev == m) ? prevmlen : m->m_len + M_TRAILINGSPACE(m); + + if (dlen >= off + len) + ip6stat.ip6s_pullup--; /* call will not be made! */ + else if ((m->m_flags & M_EXT) != 0) { + ip6stat.ip6s_pullup_alloc++; + ip6stat.ip6s_pullup_copy++; + } else { + if (mlen >= off + len) + ip6stat.ip6s_pullup_copy++; + else { + ip6stat.ip6s_pullup_alloc++; + ip6stat.ip6s_pullup_copy++; + } + } + + prevlen = off + len; + prevmlen = MHLEN; + } + + /* statistics for m_pullup2 */ + ip6stat.ip6s_pullup2++; + if (off + len > MCLBYTES) + ip6stat.ip6s_pullup2_fail++; + else { + int dlen, mlen; + + dlen = (prev == m) ? prevlen : m->m_len; + mlen = (prev == m) ? prevmlen : m->m_len + M_TRAILINGSPACE(m); + prevlen = off + len; + prevmlen = mlen; + + if (dlen >= off + len) + ip6stat.ip6s_pullup2--; /* call will not be made! */ + else if ((m->m_flags & M_EXT) != 0) { + ip6stat.ip6s_pullup2_alloc++; + ip6stat.ip6s_pullup2_copy++; + prevmlen = (off + len > MHLEN) ? MCLBYTES : MHLEN; + } else { + if (mlen >= off + len) + ip6stat.ip6s_pullup2_copy++; + else { + ip6stat.ip6s_pullup2_alloc++; + ip6stat.ip6s_pullup2_copy++; + prevmlen = (off + len > MHLEN) ? MCLBYTES + : MHLEN; + } + } + } + + prev = m; +#endif + +#ifdef PULLDOWN_DEBUG + { + struct mbuf *t; + printf("before:"); + for (t = m; t; t = t->m_next) + printf(" %d", t->m_len); + printf("\n"); + } +#endif + n = m; + while (n != NULL && off > 0) { + if (n->m_len > off) + break; + off -= n->m_len; + n = n->m_next; + } + /* be sure to point non-empty mbuf */ + while (n != NULL && n->m_len == 0) + n = n->m_next; + if (!n) { + m_freem(m); + return NULL; /* mbuf chain too short */ + } + + /* + * the target data is on . + * if we got enough data on the mbuf "n", we're done. + */ + if ((off == 0 || offp) && len <= n->m_len - off) + goto ok; + +#if defined(PULLDOWN_STAT) && defined(INET6) + ip6stat.ip6s_pulldown_copy++; +#endif + + /* + * when len < n->m_len - off and off != 0, it is a special case. + * len bytes from sits in single mbuf, but the caller does + * not like the starting position (off). + * chop the current mbuf into two pieces, set off to 0. + */ + if (len < n->m_len - off) { + o = m_copym(n, off, n->m_len - off, M_DONTWAIT); + if (o == NULL) { + m_freem(m); + return NULL; /* ENOBUFS */ + } + n->m_len = off; + o->m_next = n->m_next; + n->m_next = o; + n = n->m_next; + off = 0; + goto ok; + } + + /* + * we need to take hlen from and tlen from m_next, 0>, + * and construct contiguous mbuf with m_len == len. + * note that hlen + tlen == len, and tlen > 0. + */ + hlen = n->m_len - off; + tlen = len - hlen; + + /* + * ensure that we have enough trailing data on mbuf chain. + * if not, we can do nothing about the chain. + */ + olen = 0; + for (o = n->m_next; o != NULL; o = o->m_next) + olen += o->m_len; + if (hlen + olen < len) { + m_freem(m); + return NULL; /* mbuf chain too short */ + } + + /* + * easy cases first. + * we need to use m_copydata() to get data from m_next, 0>. + */ + if ((n->m_flags & M_EXT) == 0) + sharedcluster = 0; + else { +#ifdef __bsdi__ + if (n->m_ext.ext_func) +#else + if (n->m_ext.ext_free) +#endif + sharedcluster = 1; +#ifdef __NetBSD__ + else if (MCLISREFERENCED(n)) +#else + else if (mclrefcnt[mtocl(n->m_ext.ext_buf)] > 1) +#endif + sharedcluster = 1; + else + sharedcluster = 0; + } + if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen + && !sharedcluster) { + m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len); + n->m_len += tlen; + m_adj(n->m_next, tlen); + goto ok; + } + if ((off == 0 || offp) && M_LEADINGSPACE(n->m_next) >= hlen + && !sharedcluster) { + n->m_next->m_data -= hlen; + n->m_next->m_len += hlen; + bcopy(mtod(n, caddr_t) + off, mtod(n->m_next, caddr_t), hlen); + n->m_len -= hlen; + n = n->m_next; + off = 0; + goto ok; + } + + /* + * now, we need to do the hard way. don't m_copy as there's no room + * on both end. + */ +#if defined(PULLDOWN_STAT) && defined(INET6) + ip6stat.ip6s_pulldown_alloc++; +#endif + MGET(o, M_DONTWAIT, m->m_type); + if (o == NULL) { + m_freem(m); + return NULL; /* ENOBUFS */ + } + if (len > MHLEN) { /* use MHLEN just for safety */ + MCLGET(o, M_DONTWAIT); + if ((o->m_flags & M_EXT) == 0) { + m_freem(m); + m_free(o); + return NULL; /* ENOBUFS */ + } + } + /* get hlen from into */ + o->m_len = hlen; + bcopy(mtod(n, caddr_t) + off, mtod(o, caddr_t), hlen); + n->m_len -= hlen; + /* get tlen from m_next, 0> into */ + m_copydata(n->m_next, 0, tlen, mtod(o, caddr_t) + o->m_len); + o->m_len += tlen; + m_adj(n->m_next, tlen); + o->m_next = n->m_next; + n->m_next = o; + n = o; + off = 0; + +ok: +#ifdef PULLDOWN_DEBUG + { + struct mbuf *t; + printf("after:"); + for (t = m; t; t = t->m_next) + printf("%c%d", t == n ? '*' : ' ', t->m_len); + printf(" (off=%d)\n", off); + } +#endif + if (offp) + *offp = off; + return n; +} + +/* + * pkthdr.aux chain manipulation. + * we don't allow clusters at this moment. + */ +struct mbuf * +m_aux_add(m, af, type) + struct mbuf *m; + int af, type; +{ + struct mbuf *n; + struct mauxtag *t; + + if ((m->m_flags & M_PKTHDR) == 0) + return NULL; + + n = m_aux_find(m, af, type); + if (n) + return n; + + MGET(n, M_DONTWAIT, m->m_type); + if (n == NULL) + return NULL; + + t = mtod(n, struct mauxtag *); + t->af = af; + t->type = type; + n->m_data += sizeof(struct mauxtag); + n->m_len = 0; + n->m_next = m->m_pkthdr.aux; + m->m_pkthdr.aux = n; + return n; +} + +struct mbuf * +m_aux_find(m, af, type) + struct mbuf *m; + int af, type; +{ + struct mbuf *n; + struct mauxtag *t; + + if ((m->m_flags & M_PKTHDR) == 0) + return NULL; + + for (n = m->m_pkthdr.aux; n; n = n->m_next) { + t = (struct mauxtag *)n->m_dat; + if (t->af == af && t->type == type) + return n; + } + return NULL; +} + +void +m_aux_delete(m, victim) + struct mbuf *m; + struct mbuf *victim; +{ + struct mbuf *n, *prev, *next; + struct mauxtag *t; + + if ((m->m_flags & M_PKTHDR) == 0) + return; + + prev = NULL; + n = m->m_pkthdr.aux; + while (n) { + t = (struct mauxtag *)n->m_dat; + next = n->m_next; + if (n == victim) { + if (prev) + prev->m_next = n->m_next; + else + m->m_pkthdr.aux = n->m_next; + n->m_next = NULL; + m_free(n); + } else + prev = n; + n = next; + } +} diff --git a/bsd/kern/uipc_proto.c b/bsd/kern/uipc_proto.c new file mode 100644 index 000000000..c6ce5216a --- /dev/null +++ b/bsd/kern/uipc_proto.c @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uipc_proto.c 8.2 (Berkeley) 2/14/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Definitions of protocols supported in the UNIX domain. + */ + +int uipc_usrreq(), raw_usrreq(); +void raw_init(), raw_input(), raw_ctlinput(); +extern struct domain localdomain; /* or at least forward */ + +static struct protosw localsw[] = { +{ SOCK_STREAM, &localdomain, 0, PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS, + 0, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &uipc_usrreqs +}, +{ SOCK_DGRAM, &localdomain, 0, PR_ATOMIC|PR_ADDR|PR_RIGHTS, + 0, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &uipc_usrreqs +}, +{ 0, 0, 0, 0, + raw_input, 0, raw_ctlinput, 0, + 0, + raw_init, 0, 0, 0, + 0, &raw_usrreqs +} +}; + +int local_proto_count = (sizeof (localsw) / sizeof (struct protosw)); + +static void pre_unp_init() +{ static int localdomain_initted = 0; + register int i; + register struct protosw *pr; + register struct domain *dp = &localdomain; + + for (i=0, pr = &localsw[0]; i +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int so_cache_hw = 0; +int so_cache_timeouts = 0; +int so_cache_max_freed = 0; +int cached_sock_count = 0; +struct socket *socket_cache_head = 0; +struct socket *socket_cache_tail = 0; +u_long so_cache_time = 0; +int so_cache_init_done = 0; +struct zone *so_cache_zone; +extern int get_inpcb_str_size(); +extern int get_tcp_str_size(); + +#include + +int socket_debug = 0; +int socket_zone = M_SOCKET; +so_gen_t so_gencnt; /* generation count for sockets */ + +MALLOC_DEFINE(M_SONAME, "soname", "socket name"); +MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); + +#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0) +#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2) +#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1) +#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3) +#define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1) +#define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8)) +#define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8)) + + +SYSCTL_DECL(_kern_ipc); + +static int somaxconn = SOMAXCONN; +SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW, &somaxconn, + 0, ""); + +/* Should we get a maximum also ??? */ +static int sosendminchain = 16384; +SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, CTLFLAG_RW, &sosendminchain, + 0, ""); + +void so_cache_timer(); + +/* + * Socket operation routines. + * These routines are called by the routines in + * sys_socket.c or from a system process, and + * implement the semantics of socket operations by + * switching out to the protocol specific routines. + */ + +void socketinit() +{ + vm_size_t str_size; + + so_cache_init_done = 1; + + timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz)); + str_size = (vm_size_t)( sizeof(struct socket) + 4 + + get_inpcb_str_size() + 4 + + get_tcp_str_size()); + so_cache_zone = zinit (str_size, 120000*str_size, 8192, "socache zone"); +#if TEMPDEBUG + kprintf("cached_sock_alloc -- so_cache_zone size is %x\n", str_size); +#endif + +} + +void cached_sock_alloc(so, waitok) +struct socket **so; +int waitok; + +{ + caddr_t temp; + int s; + register u_long offset; + + + s = splnet(); + if (cached_sock_count) { + cached_sock_count--; + *so = socket_cache_head; + if (*so == 0) + panic("cached_sock_alloc: cached sock is null"); + + socket_cache_head = socket_cache_head->cache_next; + if (socket_cache_head) + socket_cache_head->cache_prev = 0; + else + socket_cache_tail = 0; + splx(s); + + temp = (*so)->so_saved_pcb; + bzero((caddr_t)*so, sizeof(struct socket)); +#if TEMPDEBUG + kprintf("cached_sock_alloc - retreiving cached sock %x - count == %d\n", *so, + cached_sock_count); +#endif + (*so)->so_saved_pcb = temp; + } + else { +#if TEMPDEBUG + kprintf("Allocating cached sock %x from memory\n", *so); +#endif + + splx(s); + if (waitok) + *so = (struct socket *) zalloc(so_cache_zone); + else + *so = (struct socket *) zalloc_noblock(so_cache_zone); + + if (*so == 0) + return; + + bzero((caddr_t)*so, sizeof(struct socket)); + + /* + * Define offsets for extra structures into our single block of + * memory. Align extra structures on longword boundaries. + */ + + + offset = (u_long) *so; + offset += sizeof(struct socket); + if (offset & 0x3) { + offset += 4; + offset &= 0xfffffffc; + } + (*so)->so_saved_pcb = (caddr_t) offset; + offset += get_inpcb_str_size(); + if (offset & 0x3) { + offset += 4; + offset &= 0xfffffffc; + } + + ((struct inpcb *) (*so)->so_saved_pcb)->inp_saved_ppcb = (caddr_t) offset; +#if TEMPDEBUG + kprintf("Allocating cached socket - %x, pcb=%x tcpcb=%x\n", *so, + (*so)->so_saved_pcb, + ((struct inpcb *)(*so)->so_saved_pcb)->inp_saved_ppcb); +#endif + } + + (*so)->cached_in_sock_layer = 1; +} + + +void cached_sock_free(so) +struct socket *so; +{ + int s; + + + s = splnet(); + if (++cached_sock_count > MAX_CACHED_SOCKETS) { + --cached_sock_count; + splx(s); +#if TEMPDEBUG + kprintf("Freeing overflowed cached socket %x\n", so); +#endif + zfree(so_cache_zone, (vm_offset_t) so); + } + else { +#if TEMPDEBUG + kprintf("Freeing socket %x into cache\n", so); +#endif + if (so_cache_hw < cached_sock_count) + so_cache_hw = cached_sock_count; + + so->cache_next = socket_cache_head; + so->cache_prev = 0; + if (socket_cache_head) + socket_cache_head->cache_prev = so; + else + socket_cache_tail = so; + + so->cache_timestamp = so_cache_time; + socket_cache_head = so; + splx(s); + } + +#if TEMPDEBUG + kprintf("Freed cached sock %x into cache - count is %d\n", so, cached_sock_count); +#endif + + +} + + +void so_cache_timer() +{ + register struct socket *p; + register int s; + register int n_freed = 0; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + ++so_cache_time; + + s = splnet(); + + while (p = socket_cache_tail) + { + if ((so_cache_time - p->cache_timestamp) < SO_CACHE_TIME_LIMIT) + break; + + so_cache_timeouts++; + + if (socket_cache_tail = p->cache_prev) + p->cache_prev->cache_next = 0; + if (--cached_sock_count == 0) + socket_cache_head = 0; + + splx(s); + + zfree(so_cache_zone, (vm_offset_t) p); + + splnet(); + if (++n_freed >= SO_CACHE_MAX_FREE_BATCH) + { + so_cache_max_freed++; + break; + } + } + splx(s); + + timeout(so_cache_timer, NULL, (SO_CACHE_FLUSH_INTERVAL * hz)); + + (void) thread_funnel_set(network_flock, FALSE); + +} + + +/* + * Get a socket structure from our zone, and initialize it. + * We don't implement `waitok' yet (see comments in uipc_domain.c). + * Note that it would probably be better to allocate socket + * and PCB at the same time, but I'm not convinced that all + * the protocols can be easily modified to do this. + */ +struct socket * +soalloc(waitok, dom, type) + int waitok; + int dom; + int type; +{ + struct socket *so; + + if ((dom == PF_INET) && (type == SOCK_STREAM)) + cached_sock_alloc(&so, waitok); + else + { + so = _MALLOC_ZONE(sizeof(*so), socket_zone, M_WAITOK); + if (so) + bzero(so, sizeof *so); + } + /* XXX race condition for reentrant kernel */ + + if (so) { + so->so_gencnt = ++so_gencnt; + so->so_zone = socket_zone; + } + + return so; +} + +int +socreate(dom, aso, type, proto) + int dom; + struct socket **aso; + register int type; + int proto; + +{ + struct proc *p = current_proc(); + register struct protosw *prp; + struct socket *so; + register int error = 0; + + if (proto) + prp = pffindproto(dom, proto, type); + else + prp = pffindtype(dom, type); + if (prp == 0 || prp->pr_usrreqs->pru_attach == 0) + return (EPROTONOSUPPORT); + if (prp->pr_type != type) + return (EPROTOTYPE); + so = soalloc(p != 0, dom, type); + if (so == 0) + return (ENOBUFS); + + TAILQ_INIT(&so->so_incomp); + TAILQ_INIT(&so->so_comp); + so->so_type = type; + + if (p != 0) { + if (p->p_ucred->cr_uid == 0) + so->so_state = SS_PRIV; + + so->so_uid = p->p_ucred->cr_uid; + } + + so->so_proto = prp; + so->so_rcv.sb_flags |= SB_RECV; /* XXX */ + if (prp->pr_sfilter.tqh_first) + error = sfilter_init(so); + if (error == 0) + error = (*prp->pr_usrreqs->pru_attach)(so, proto, p); + + if (error) { + so->so_state |= SS_NOFDREF; + sofree(so); + return (error); + } + prp->pr_domain->dom_refs++; + so->so_rcv.sb_so = so->so_snd.sb_so = so; + TAILQ_INIT(&so->so_evlist); + *aso = so; + return (0); +} + +int +sobind(so, nam) + struct socket *so; + struct sockaddr *nam; + +{ + struct proc *p = current_proc(); + int error; + struct kextcb *kp; + int s = splnet(); + + error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p); + if (error == 0) /* ??? */ + { kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sobind) + { error = (*kp->e_soif->sf_sobind)(so, nam, kp); + if (error) + { if (error == EJUSTRETURN) + break; + splx(s); + return(error); + } + } + kp = kp->e_next; + } + } + splx(s); + return (error); +} + +void +sodealloc(so) + struct socket *so; +{ + so->so_gencnt = ++so_gencnt; + + if (so->cached_in_sock_layer == 1) + cached_sock_free(so); + else + _FREE_ZONE(so, sizeof(*so), so->so_zone); +} + +int +solisten(so, backlog) + register struct socket *so; + int backlog; + +{ + struct kextcb *kp; + struct proc *p = current_proc(); + int s, error; + + s = splnet(); + error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p); + if (error) { + splx(s); + return (error); + } + if (so->so_comp.tqh_first == NULL) + so->so_options |= SO_ACCEPTCONN; + if (backlog < 0 || backlog > somaxconn) + backlog = somaxconn; + so->so_qlimit = backlog; + kp = sotokextcb(so); + while (kp) + { + if (kp->e_soif && kp->e_soif->sf_solisten) + { error = (*kp->e_soif->sf_solisten)(so, kp); + if (error) + { if (error == EJUSTRETURN) + break; + splx(s); + return(error); + } + } + kp = kp->e_next; + } + + splx(s); + return (0); +} + + +void +sofree(so) + register struct socket *so; +{ int error; + struct kextcb *kp; + struct socket *head = so->so_head; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sofree) + { error = (*kp->e_soif->sf_sofree)(so, kp); + if (error) + return; /* void fn */ + } + kp = kp->e_next; + } + + if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0) + return; + if (head != NULL) { + if (so->so_state & SS_INCOMP) { + TAILQ_REMOVE(&head->so_incomp, so, so_list); + head->so_incqlen--; + } else if (so->so_state & SS_COMP) { + TAILQ_REMOVE(&head->so_comp, so, so_list); + } else { + panic("sofree: not queued"); + } + head->so_qlen--; + so->so_state &= ~(SS_INCOMP|SS_COMP); + so->so_head = NULL; + } + + sbrelease(&so->so_snd); + sorflush(so); + sfilter_term(so); + sodealloc(so); +} + +/* + * Close a socket on last file table reference removal. + * Initiate disconnect if connected. + * Free socket when disconnect complete. + */ +int +soclose(so) + register struct socket *so; +{ + int s = splnet(); /* conservative */ + int error = 0; + struct kextcb *kp; + +#if FB31SIG + funsetown(so->so_pgid); +#endif + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soclose) + { error = (*kp->e_soif->sf_soclose)(so, kp); + if (error) + { splx(s); + return((error == EJUSTRETURN) ? 0 : error); + } + } + kp = kp->e_next; + } + + if (so->so_options & SO_ACCEPTCONN) { + struct socket *sp, *sonext; + + for (sp = so->so_incomp.tqh_first; sp != NULL; sp = sonext) { + sonext = sp->so_list.tqe_next; + (void) soabort(sp); + } + for (sp = so->so_comp.tqh_first; sp != NULL; sp = sonext) { + sonext = sp->so_list.tqe_next; + (void) soabort(sp); + } + } + if (so->so_pcb == 0) + goto discard; + if (so->so_state & SS_ISCONNECTED) { + if ((so->so_state & SS_ISDISCONNECTING) == 0) { + error = sodisconnect(so); + if (error) + goto drop; + } + if (so->so_options & SO_LINGER) { + if ((so->so_state & SS_ISDISCONNECTING) && + (so->so_state & SS_NBIO)) + goto drop; + while (so->so_state & SS_ISCONNECTED) { + error = tsleep((caddr_t)&so->so_timeo, + PSOCK | PCATCH, "soclos", so->so_linger); + if (error) + break; + } + } + } +drop: + if (so->so_pcb) { + int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so); + if (error == 0) + error = error2; + } +discard: + if (so->so_state & SS_NOFDREF) + panic("soclose: NOFDREF"); + so->so_state |= SS_NOFDREF; + so->so_proto->pr_domain->dom_refs--; + evsofree(so); + sofree(so); + splx(s); + return (error); +} + +/* + * Must be called at splnet... + */ +int +soabort(so) + struct socket *so; +{ + + return (*so->so_proto->pr_usrreqs->pru_abort)(so); +} + +int +soaccept(so, nam) + register struct socket *so; + struct sockaddr **nam; +{ int s = splnet(); + int error; + struct kextcb *kp; + + if ((so->so_state & SS_NOFDREF) == 0) + panic("soaccept: !NOFDREF"); + so->so_state &= ~SS_NOFDREF; + error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); + if (error == 0) + { kp = sotokextcb(so); + while (kp) { + if (kp->e_soif && kp->e_soif->sf_soaccept) + { error = (*kp->e_soif->sf_soaccept)(so, nam, kp); + if (error) + { if (error == EJUSTRETURN) + break; + splx(s); + return(error); + } + } + kp = kp->e_next; + } + } + + + splx(s); + return (error); +} + +int +soconnect(so, nam) + register struct socket *so; + struct sockaddr *nam; + +{ + int s; + int error; + struct proc *p = current_proc(); + struct kextcb *kp; + + if (so->so_options & SO_ACCEPTCONN) + return (EOPNOTSUPP); + s = splnet(); + /* + * If protocol is connection-based, can only connect once. + * Otherwise, if connected, try to disconnect first. + * This allows user to disconnect by connecting to, e.g., + * a null address. + */ + if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && + ((so->so_proto->pr_flags & PR_CONNREQUIRED) || + (error = sodisconnect(so)))) + error = EISCONN; + else { + error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p); + if (error == 0) + { + kp = sotokextcb(so); + while (kp) + { + if (kp->e_soif && kp->e_soif->sf_soconnect) + { error = (*kp->e_soif->sf_soconnect)(so, nam, kp); + if (error) + { if (error == EJUSTRETURN) + break; + splx(s); + return(error); + } + } + kp = kp->e_next; + } + } + } + + splx(s); + return (error); +} + +int +soconnect2(so1, so2) + register struct socket *so1; + struct socket *so2; +{ + int s = splnet(); + int error; + struct kextcb *kp; + + error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); + if (error == 0) + { kp = sotokextcb(so1); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soconnect2) + { error = (*kp->e_soif->sf_soconnect2)(so1, so2, kp); + if (error) + { if (error == EJUSTRETURN) + break; + splx(s); + return(error); + } + } + kp = kp->e_next; + } + } + splx(s); + return (error); +} + +int +sodisconnect(so) + register struct socket *so; +{ + int s = splnet(); + int error; + struct kextcb *kp; + + if ((so->so_state & SS_ISCONNECTED) == 0) { + error = ENOTCONN; + goto bad; + } + if (so->so_state & SS_ISDISCONNECTING) { + error = EALREADY; + goto bad; + } + error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); + + if (error == 0) + { kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sodisconnect) + { error = (*kp->e_soif->sf_sodisconnect)(so, kp); + if (error) + { if (error == EJUSTRETURN) + break; + splx(s); + return(error); + } + } + kp = kp->e_next; + } + } + +bad: + splx(s); + return (error); +} + +#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_DONTWAIT : M_WAIT) +/* + * Send on a socket. + * If send must go all at once and message is larger than + * send buffering, then hard error. + * Lock against other senders. + * If must go all at once and not enough room now, then + * inform user that this would block and do nothing. + * Otherwise, if nonblocking, send as much as possible. + * The data to be sent is described by "uio" if nonzero, + * otherwise by the mbuf chain "top" (which must be null + * if uio is not). Data provided in mbuf chain must be small + * enough to send all at once. + * + * Returns nonzero on error, timeout or signal; callers + * must check for short counts if EINTR/ERESTART are returned. + * Data and control buffers are freed on return. + * Experiment: + * MSG_HOLD: go thru most of sosend(), but just enqueue the mbuf + * MSG_SEND: go thru as for MSG_HOLD on current fragment, then + * point at the mbuf chain being constructed and go from there. + */ +int +sosend(so, addr, uio, top, control, flags) + register struct socket *so; + struct sockaddr *addr; + struct uio *uio; + struct mbuf *top; + struct mbuf *control; + int flags; + +{ + struct mbuf **mp; + register struct mbuf *m; + register long space, len, resid; + int clen = 0, error, s, dontroute, mlen, sendflags; + int atomic = sosendallatonce(so) || top; + struct proc *p = current_proc(); + struct kextcb *kp; + + if (uio) + resid = uio->uio_resid; + else + resid = top->m_pkthdr.len; + + KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START), + so, + resid, + so->so_snd.sb_cc, + so->so_snd.sb_lowat, + so->so_snd.sb_hiwat); + + /* + * In theory resid should be unsigned. + * However, space must be signed, as it might be less than 0 + * if we over-committed, and we must use a signed comparison + * of space and resid. On the other hand, a negative resid + * causes us to loop sending 0-length segments to the protocol. + * + * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM + * type sockets since that's an error. + */ + if (resid < 0 || so->so_type == SOCK_STREAM && (flags & MSG_EOR)) { + error = EINVAL; + goto out; + } + + dontroute = + (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 && + (so->so_proto->pr_flags & PR_ATOMIC); + if (p) + p->p_stats->p_ru.ru_msgsnd++; + if (control) + clen = control->m_len; +#define snderr(errno) { error = errno; splx(s); goto release; } + +restart: + error = sblock(&so->so_snd, SBLOCKWAIT(flags)); + if (error) + goto out; + do { + s = splnet(); + if (so->so_state & SS_CANTSENDMORE) + snderr(EPIPE); + if (so->so_error) { + error = so->so_error; + so->so_error = 0; + splx(s); + goto release; + } + if ((so->so_state & SS_ISCONNECTED) == 0) { + /* + * `sendto' and `sendmsg' is allowed on a connection- + * based socket if it supports implied connect. + * Return ENOTCONN if not connected and no address is + * supplied. + */ + if ((so->so_proto->pr_flags & PR_CONNREQUIRED) && + (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) { + if ((so->so_state & SS_ISCONFIRMING) == 0 && + !(resid == 0 && clen != 0)) + snderr(ENOTCONN); + } else if (addr == 0 && !(flags&MSG_HOLD)) + snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ? + ENOTCONN : EDESTADDRREQ); + } + space = sbspace(&so->so_snd); + if (flags & MSG_OOB) + space += 1024; + if ((atomic && resid > so->so_snd.sb_hiwat) || + clen > so->so_snd.sb_hiwat) + snderr(EMSGSIZE); + if (space < resid + clen && uio && + (atomic || space < so->so_snd.sb_lowat || space < clen)) { + if (so->so_state & SS_NBIO) + snderr(EWOULDBLOCK); + sbunlock(&so->so_snd); + error = sbwait(&so->so_snd); + splx(s); + if (error) + goto out; + goto restart; + } + splx(s); + mp = ⊤ + space -= clen; + do { + if (uio == NULL) { + /* + * Data is prepackaged in "top". + */ + resid = 0; + if (flags & MSG_EOR) + top->m_flags |= M_EOR; + } else { + boolean_t funnel_state = TRUE; + int chainmbufs = (sosendminchain > 0 && resid >= sosendminchain); + + if (chainmbufs) + funnel_state = thread_funnel_set(network_flock, FALSE); + do { + KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0); + if (top == 0) { + MGETHDR(m, M_WAIT, MT_DATA); + mlen = MHLEN; + m->m_pkthdr.len = 0; + m->m_pkthdr.rcvif = (struct ifnet *)0; + } else { + MGET(m, M_WAIT, MT_DATA); + mlen = MLEN; + } + if (resid >= MINCLSIZE) { + MCLGET(m, M_WAIT); + if ((m->m_flags & M_EXT) == 0) + goto nopages; + mlen = MCLBYTES; + len = min(min(mlen, resid), space); + } else { +nopages: + len = min(min(mlen, resid), space); + /* + * For datagram protocols, leave room + * for protocol headers in first mbuf. + */ + if (atomic && top == 0 && len < mlen) + MH_ALIGN(m, len); + } + KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_NONE, -1, 0, 0, 0, 0); + space -= len; + error = uiomove(mtod(m, caddr_t), (int)len, uio); + resid = uio->uio_resid; + + m->m_len = len; + *mp = m; + top->m_pkthdr.len += len; + if (error) + break; + mp = &m->m_next; + if (resid <= 0) { + if (flags & MSG_EOR) + top->m_flags |= M_EOR; + break; + } + } while (space > 0 && (chainmbufs || atomic || resid < MINCLSIZE)); + if (chainmbufs) + funnel_state = thread_funnel_set(network_flock, TRUE); + if (error) + goto release; + } + + if (flags & (MSG_HOLD|MSG_SEND)) + { /* Enqueue for later, go away if HOLD */ + register struct mbuf *mb1; + if (so->so_temp && (flags & MSG_FLUSH)) + { m_freem(so->so_temp); + so->so_temp = NULL; + } + if (so->so_temp) + so->so_tail->m_next = top; + else + so->so_temp = top; + mb1 = top; + while (mb1->m_next) + mb1 = mb1->m_next; + so->so_tail = mb1; + if (flags&MSG_HOLD) + { top = NULL; + goto release; + } + top = so->so_temp; + } + if (dontroute) + so->so_options |= SO_DONTROUTE; + s = splnet(); /* XXX */ + kp = sotokextcb(so); + /* Compute flags here, for pru_send and NKEs */ + sendflags = (flags & MSG_OOB) ? PRUS_OOB : + /* + * If the user set MSG_EOF, the protocol + * understands this flag and nothing left to + * send then use PRU_SEND_EOF instead of PRU_SEND. + */ + ((flags & MSG_EOF) && + (so->so_proto->pr_flags & PR_IMPLOPCL) && + (resid <= 0)) ? + PRUS_EOF : + /* If there is more to send set PRUS_MORETOCOME */ + (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0; + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sosend) + { error = (*kp->e_soif->sf_sosend)(so, &addr, + &uio, &top, + &control, + &sendflags, + kp); + if (error) + { splx(s); + if (error == EJUSTRETURN) + { sbunlock(&so->so_snd); + return(0); + } + goto release; + } + } + kp = kp->e_next; + } + + error = (*so->so_proto->pr_usrreqs->pru_send)(so, + sendflags, top, addr, control, p); + splx(s); + if (flags & MSG_SEND) + so->so_temp = NULL; + + if (dontroute) + so->so_options &= ~SO_DONTROUTE; + clen = 0; + control = 0; + top = 0; + mp = ⊤ + if (error) + goto release; + } while (resid && space > 0); + } while (resid); + +release: + sbunlock(&so->so_snd); +out: + if (top) + m_freem(top); + if (control) + m_freem(control); + + KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END, + so, + resid, + so->so_snd.sb_cc, + space, + error); + + return (error); +} + +/* + * Implement receive operations on a socket. + * We depend on the way that records are added to the sockbuf + * by sbappend*. In particular, each record (mbufs linked through m_next) + * must begin with an address if the protocol so specifies, + * followed by an optional mbuf or mbufs containing ancillary data, + * and then zero or more mbufs of data. + * In order to avoid blocking network interrupts for the entire time here, + * we splx() while doing the actual copy to user space. + * Although the sockbuf is locked, new data may still be appended, + * and thus we must maintain consistency of the sockbuf during that time. + * + * The caller may receive the data as a single mbuf chain by supplying + * an mbuf **mp0 for use in returning the chain. The uio is then used + * only for the count in uio_resid. + */ +int +soreceive(so, psa, uio, mp0, controlp, flagsp) + register struct socket *so; + struct sockaddr **psa; + struct uio *uio; + struct mbuf **mp0; + struct mbuf **controlp; + int *flagsp; +{ + register struct mbuf *m, **mp; + register int flags, len, error, s, offset; + struct protosw *pr = so->so_proto; + struct mbuf *nextrecord; + int moff, type = 0; + int orig_resid = uio->uio_resid; + struct kextcb *kp; + + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START, + so, + uio->uio_resid, + so->so_rcv.sb_cc, + so->so_rcv.sb_lowat, + so->so_rcv.sb_hiwat); + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soreceive) + { error = (*kp->e_soif->sf_soreceive)(so, psa, &uio, + mp0, controlp, + flagsp, kp); + if (error) + return((error == EJUSTRETURN) ? 0 : error); + } + kp = kp->e_next; + } + + mp = mp0; + if (psa) + *psa = 0; + if (controlp) + *controlp = 0; + if (flagsp) + flags = *flagsp &~ MSG_EOR; + else + flags = 0; + /* + * When SO_WANTOOBFLAG is set we try to get out-of-band data + * regardless of the flags argument. Here is the case were + * out-of-band data is not inline. + */ + if ((flags & MSG_OOB) || + ((so->so_options & SO_WANTOOBFLAG) != 0 && + (so->so_options & SO_OOBINLINE) == 0 && + (so->so_oobmark || (so->so_state & SS_RCVATMARK)))) { + m = m_get(M_WAIT, MT_DATA); + error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); + if (error) + goto bad; + do { + error = uiomove(mtod(m, caddr_t), + (int) min(uio->uio_resid, m->m_len), uio); + m = m_free(m); + } while (uio->uio_resid && error == 0 && m); +bad: + if (m) + m_freem(m); + if ((so->so_options & SO_WANTOOBFLAG) != 0) { + if (error == EWOULDBLOCK || error == EINVAL) { + /* + * Let's try to get normal data: + * EWOULDBLOCK: out-of-band data not receive yet; + * EINVAL: out-of-band data already read. + */ + error = 0; + goto nooob; + } else if (error == 0 && flagsp) + *flagsp |= MSG_OOB; + } + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0); + return (error); + } +nooob: + if (mp) + *mp = (struct mbuf *)0; + if (so->so_state & SS_ISCONFIRMING && uio->uio_resid) + (*pr->pr_usrreqs->pru_rcvd)(so, 0); + +restart: + if (error = sblock(&so->so_rcv, SBLOCKWAIT(flags))) + { + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0); + return (error); + } + s = splnet(); + + m = so->so_rcv.sb_mb; + /* + * If we have less data than requested, block awaiting more + * (subject to any timeout) if: + * 1. the current count is less than the low water mark, or + * 2. MSG_WAITALL is set, and it is possible to do the entire + * receive operation at once if we block (resid <= hiwat). + * 3. MSG_DONTWAIT is not set + * If MSG_WAITALL is set but resid is larger than the receive buffer, + * we have to do the receive in sections, and thus risk returning + * a short count if a timeout or signal occurs after we start. + */ + if (m == 0 || (((flags & MSG_DONTWAIT) == 0 && + so->so_rcv.sb_cc < uio->uio_resid) && + (so->so_rcv.sb_cc < so->so_rcv.sb_lowat || + ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) && + m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) { + KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1")); + if (so->so_error) { + if (m) + goto dontblock; + error = so->so_error; + if ((flags & MSG_PEEK) == 0) + so->so_error = 0; + goto release; + } + if (so->so_state & SS_CANTRCVMORE) { + if (m) + goto dontblock; + else + goto release; + } + for (; m; m = m->m_next) + if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { + m = so->so_rcv.sb_mb; + goto dontblock; + } + if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && + (so->so_proto->pr_flags & PR_CONNREQUIRED)) { + error = ENOTCONN; + goto release; + } + if (uio->uio_resid == 0) + goto release; + if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) { + error = EWOULDBLOCK; + goto release; + } + sbunlock(&so->so_rcv); + if (socket_debug) + printf("Waiting for socket data\n"); + error = sbwait(&so->so_rcv); + if (socket_debug) + printf("SORECEIVE - sbwait returned %d\n", error); + splx(s); + if (error) + { + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error,0,0,0,0); + return (error); + } + goto restart; + } +dontblock: +#ifdef notyet /* XXXX */ + if (uio->uio_procp) + uio->uio_procp->p_stats->p_ru.ru_msgrcv++; +#endif + nextrecord = m->m_nextpkt; + if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) { + KASSERT(m->m_type == MT_SONAME, ("receive 1a")); + orig_resid = 0; + if (psa) + *psa = dup_sockaddr(mtod(m, struct sockaddr *), + mp0 == 0); + if (flags & MSG_PEEK) { + m = m->m_next; + } else { + sbfree(&so->so_rcv, m); + MFREE(m, so->so_rcv.sb_mb); + m = so->so_rcv.sb_mb; + } + } + while (m && m->m_type == MT_CONTROL && error == 0) { + if (flags & MSG_PEEK) { + if (controlp) + *controlp = m_copy(m, 0, m->m_len); + m = m->m_next; + } else { + sbfree(&so->so_rcv, m); + if (controlp) { + if (pr->pr_domain->dom_externalize && + mtod(m, struct cmsghdr *)->cmsg_type == + SCM_RIGHTS) + error = (*pr->pr_domain->dom_externalize)(m); + *controlp = m; + so->so_rcv.sb_mb = m->m_next; + m->m_next = 0; + m = so->so_rcv.sb_mb; + } else { + MFREE(m, so->so_rcv.sb_mb); + m = so->so_rcv.sb_mb; + } + } + if (controlp) { + orig_resid = 0; + controlp = &(*controlp)->m_next; + } + } + if (m) { + if ((flags & MSG_PEEK) == 0) + m->m_nextpkt = nextrecord; + type = m->m_type; + if (type == MT_OOBDATA) + flags |= MSG_OOB; + } + moff = 0; + offset = 0; + while (m && uio->uio_resid > 0 && error == 0) { + if (m->m_type == MT_OOBDATA) { + if (type != MT_OOBDATA) + break; + } else if (type == MT_OOBDATA) + break; +#if 0 +/* + * This assertion needs rework. The trouble is Appletalk is uses many + * mbuf types (NOT listed in mbuf.h!) which will trigger this panic. + * For now just remove the assertion... CSM 9/98 + */ + else + KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER, + ("receive 3")); +#endif + /* + * Make sure to allways set MSG_OOB event when getting + * out of band data inline. + */ + if ((so->so_options & SO_WANTOOBFLAG) != 0 && + (so->so_options & SO_OOBINLINE) != 0 && + (so->so_state & SS_RCVATMARK) != 0) { + flags |= MSG_OOB; + } + so->so_state &= ~SS_RCVATMARK; + len = uio->uio_resid; + if (so->so_oobmark && len > so->so_oobmark - offset) + len = so->so_oobmark - offset; + if (len > m->m_len - moff) + len = m->m_len - moff; + /* + * If mp is set, just pass back the mbufs. + * Otherwise copy them out via the uio, then free. + * Sockbuf must be consistent here (points to current mbuf, + * it points to next record) when we drop priority; + * we must note any additions to the sockbuf when we + * block interrupts again. + */ + if (mp == 0) { + splx(s); + error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio); + s = splnet(); + if (error) + goto release; + } else + uio->uio_resid -= len; + if (len == m->m_len - moff) { + if (m->m_flags & M_EOR) + flags |= MSG_EOR; + if (flags & MSG_PEEK) { + m = m->m_next; + moff = 0; + } else { + nextrecord = m->m_nextpkt; + sbfree(&so->so_rcv, m); + if (mp) { + *mp = m; + mp = &m->m_next; + so->so_rcv.sb_mb = m = m->m_next; + *mp = (struct mbuf *)0; + } else { + MFREE(m, so->so_rcv.sb_mb); + m = so->so_rcv.sb_mb; + } + if (m) + m->m_nextpkt = nextrecord; + } + } else { + if (flags & MSG_PEEK) + moff += len; + else { + if (mp) + *mp = m_copym(m, 0, len, M_WAIT); + m->m_data += len; + m->m_len -= len; + so->so_rcv.sb_cc -= len; + } + } + if (so->so_oobmark) { + if ((flags & MSG_PEEK) == 0) { + so->so_oobmark -= len; + if (so->so_oobmark == 0) { + so->so_state |= SS_RCVATMARK; + postevent(so, 0, EV_OOB); + break; + } + } else { + offset += len; + if (offset == so->so_oobmark) + break; + } + } + if (flags & MSG_EOR) + break; + /* + * If the MSG_WAITALL flag is set (for non-atomic socket), + * we must not quit until "uio->uio_resid == 0" or an error + * termination. If a signal/timeout occurs, return + * with a short count but without error. + * Keep sockbuf locked against other readers. + */ + while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 && + !sosendallatonce(so) && !nextrecord) { + if (so->so_error || so->so_state & SS_CANTRCVMORE) + break; + error = sbwait(&so->so_rcv); + if (error) { + sbunlock(&so->so_rcv); + splx(s); + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, 0,0,0,0,0); + return (0); + } + m = so->so_rcv.sb_mb; + if (m) + nextrecord = m->m_nextpkt; + } + } + + if (m && pr->pr_flags & PR_ATOMIC) { + if (so->so_options & SO_DONTTRUNC) + flags |= MSG_RCVMORE; + else + { flags |= MSG_TRUNC; + if ((flags & MSG_PEEK) == 0) + (void) sbdroprecord(&so->so_rcv); + } + } + if ((flags & MSG_PEEK) == 0) { + if (m == 0) + so->so_rcv.sb_mb = nextrecord; + if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) + (*pr->pr_usrreqs->pru_rcvd)(so, flags); + } + if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) + flags |= MSG_HAVEMORE; + if (orig_resid == uio->uio_resid && orig_resid && + (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { + sbunlock(&so->so_rcv); + splx(s); + goto restart; + } + + if (flagsp) + *flagsp |= flags; +release: + sbunlock(&so->so_rcv); + splx(s); + + KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, + so, + uio->uio_resid, + so->so_rcv.sb_cc, + 0, + error); + + return (error); +} + +int +soshutdown(so, how) + register struct socket *so; + register int how; +{ + register struct protosw *pr = so->so_proto; + struct kextcb *kp; + int ret; + + + KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_START, 0,0,0,0,0); + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soshutdown) + { ret = (*kp->e_soif->sf_soshutdown)(so, how, kp); + if (ret) + return((ret == EJUSTRETURN) ? 0 : ret); + } + kp = kp->e_next; + } + + how++; + if (how & FREAD) { + sorflush(so); + postevent(so, 0, EV_RCLOSED); + } + if (how & FWRITE) { + ret = ((*pr->pr_usrreqs->pru_shutdown)(so)); + postevent(so, 0, EV_WCLOSED); + KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0); + return(ret); + } + + KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, 0,0,0,0,0); + return (0); +} + +void +sorflush(so) + register struct socket *so; +{ + register struct sockbuf *sb = &so->so_rcv; + register struct protosw *pr = so->so_proto; + register int s, error; + struct sockbuf asb; + struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sorflush) + { if ((*kp->e_soif->sf_sorflush)(so, kp)) + return; + } + kp = kp->e_next; + } + + sb->sb_flags |= SB_NOINTR; + (void) sblock(sb, M_WAIT); + s = splimp(); + socantrcvmore(so); + sbunlock(sb); + asb = *sb; + bzero((caddr_t)sb, sizeof (*sb)); + splx(s); + if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose) + (*pr->pr_domain->dom_dispose)(asb.sb_mb); + sbrelease(&asb); +} + +/* + * Perhaps this routine, and sooptcopyout(), below, ought to come in + * an additional variant to handle the case where the option value needs + * to be some kind of integer, but not a specific size. + * In addition to their use here, these functions are also called by the + * protocol-level pr_ctloutput() routines. + */ +int +sooptcopyin(sopt, buf, len, minlen) + struct sockopt *sopt; + void *buf; + size_t len; + size_t minlen; +{ + size_t valsize; + + /* + * If the user gives us more than we wanted, we ignore it, + * but if we don't get the minimum length the caller + * wants, we return EINVAL. On success, sopt->sopt_valsize + * is set to however much we actually retrieved. + */ + if ((valsize = sopt->sopt_valsize) < minlen) + return EINVAL; + if (valsize > len) + sopt->sopt_valsize = valsize = len; + + if (sopt->sopt_p != 0) + return (copyin(sopt->sopt_val, buf, valsize)); + + bcopy(sopt->sopt_val, buf, valsize); + return 0; +} + +int +sosetopt(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error, optval; + struct linger l; + struct timeval tv; + short val; + struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_socontrol) + { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp); + if (error) + return((error == EJUSTRETURN) ? 0 : error); + } + kp = kp->e_next; + } + + error = 0; + if (sopt->sopt_level != SOL_SOCKET) { + if (so->so_proto && so->so_proto->pr_ctloutput) + return ((*so->so_proto->pr_ctloutput) + (so, sopt)); + error = ENOPROTOOPT; + } else { + switch (sopt->sopt_name) { + case SO_LINGER: + error = sooptcopyin(sopt, &l, sizeof l, sizeof l); + if (error) + goto bad; + + so->so_linger = l.l_linger; + if (l.l_onoff) + so->so_options |= SO_LINGER; + else + so->so_options &= ~SO_LINGER; + break; + + case SO_DEBUG: + case SO_KEEPALIVE: + case SO_DONTROUTE: + case SO_USELOOPBACK: + case SO_BROADCAST: + case SO_REUSEADDR: + case SO_REUSEPORT: + case SO_OOBINLINE: + case SO_TIMESTAMP: + case SO_DONTTRUNC: + case SO_WANTMORE: + case SO_WANTOOBFLAG: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + goto bad; + if (optval) + so->so_options |= sopt->sopt_name; + else + so->so_options &= ~sopt->sopt_name; + break; + + case SO_SNDBUF: + case SO_RCVBUF: + case SO_SNDLOWAT: + case SO_RCVLOWAT: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + goto bad; + + /* + * Values < 1 make no sense for any of these + * options, so disallow them. + */ + if (optval < 1) { + error = EINVAL; + goto bad; + } + + switch (sopt->sopt_name) { + case SO_SNDBUF: + case SO_RCVBUF: + if (sbreserve(sopt->sopt_name == SO_SNDBUF ? + &so->so_snd : &so->so_rcv, + (u_long) optval) == 0) { + error = ENOBUFS; + goto bad; + } + break; + + /* + * Make sure the low-water is never greater than + * the high-water. + */ + case SO_SNDLOWAT: + so->so_snd.sb_lowat = + (optval > so->so_snd.sb_hiwat) ? + so->so_snd.sb_hiwat : optval; + break; + case SO_RCVLOWAT: + so->so_rcv.sb_lowat = + (optval > so->so_rcv.sb_hiwat) ? + so->so_rcv.sb_hiwat : optval; + break; + } + break; + + case SO_SNDTIMEO: + case SO_RCVTIMEO: + error = sooptcopyin(sopt, &tv, sizeof tv, + sizeof tv); + if (error) + goto bad; + + if (tv.tv_sec > SHRT_MAX / hz - hz) { + error = EDOM; + goto bad; + } + val = tv.tv_sec * hz + tv.tv_usec / tick; + + switch (sopt->sopt_name) { + case SO_SNDTIMEO: + so->so_snd.sb_timeo = val; + break; + case SO_RCVTIMEO: + so->so_rcv.sb_timeo = val; + break; + } + break; + + case SO_NKE: + { struct so_nke nke; + struct NFDescriptor *nf1, *nf2 = NULL; + + error = sooptcopyin(sopt, &nke, + sizeof nke, sizeof nke); + if (error) + goto bad; + + error = nke_insert(so, &nke); + break; + } + + default: + error = ENOPROTOOPT; + break; + } + if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) { + (void) ((*so->so_proto->pr_ctloutput) + (so, sopt)); + } + } +bad: + return (error); +} + +/* Helper routine for getsockopt */ +int +sooptcopyout(sopt, buf, len) + struct sockopt *sopt; + void *buf; + size_t len; +{ + int error; + size_t valsize; + + error = 0; + + /* + * Documented get behavior is that we always return a value, + * possibly truncated to fit in the user's buffer. + * Traditional behavior is that we always tell the user + * precisely how much we copied, rather than something useful + * like the total amount we had available for her. + * Note that this interface is not idempotent; the entire answer must + * generated ahead of time. + */ + valsize = min(len, sopt->sopt_valsize); + sopt->sopt_valsize = valsize; + if (sopt->sopt_val != 0) { + if (sopt->sopt_p != 0) + error = copyout(buf, sopt->sopt_val, valsize); + else + bcopy(buf, sopt->sopt_val, valsize); + } + return error; +} + +int +sogetopt(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error, optval; + struct linger l; + struct timeval tv; + struct mbuf *m; + struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_socontrol) + { error = (*kp->e_soif->sf_socontrol)(so, sopt, kp); + if (error) + return((error == EJUSTRETURN) ? 0 : error); + } + kp = kp->e_next; + } + + error = 0; + if (sopt->sopt_level != SOL_SOCKET) { + if (so->so_proto && so->so_proto->pr_ctloutput) { + return ((*so->so_proto->pr_ctloutput) + (so, sopt)); + } else + return (ENOPROTOOPT); + } else { + switch (sopt->sopt_name) { + case SO_LINGER: + l.l_onoff = so->so_options & SO_LINGER; + l.l_linger = so->so_linger; + error = sooptcopyout(sopt, &l, sizeof l); + break; + + case SO_USELOOPBACK: + case SO_DONTROUTE: + case SO_DEBUG: + case SO_KEEPALIVE: + case SO_REUSEADDR: + case SO_REUSEPORT: + case SO_BROADCAST: + case SO_OOBINLINE: + case SO_TIMESTAMP: + case SO_DONTTRUNC: + case SO_WANTMORE: + case SO_WANTOOBFLAG: + optval = so->so_options & sopt->sopt_name; +integer: + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + + case SO_TYPE: + optval = so->so_type; + goto integer; + + case SO_NREAD: + { int pkt_total; + struct mbuf *m1; + + pkt_total = 0; + m1 = so->so_rcv.sb_mb; + if (so->so_proto->pr_flags & PR_ATOMIC) + { +#if 0 + kprintf("SKT CC: %d\n", so->so_rcv.sb_cc); +#endif + while (m1) + { if (m1->m_type == MT_DATA) + pkt_total += m1->m_len; +#if 0 + kprintf("CNT: %d/%d\n", m1->m_len, pkt_total); +#endif + m1 = m1->m_next; + } + optval = pkt_total; + } else + optval = so->so_rcv.sb_cc; +#if 0 + kprintf("RTN: %d\n", optval); +#endif + goto integer; + } + case SO_ERROR: + optval = so->so_error; + so->so_error = 0; + goto integer; + + case SO_SNDBUF: + optval = so->so_snd.sb_hiwat; + goto integer; + + case SO_RCVBUF: + optval = so->so_rcv.sb_hiwat; + goto integer; + + case SO_SNDLOWAT: + optval = so->so_snd.sb_lowat; + goto integer; + + case SO_RCVLOWAT: + optval = so->so_rcv.sb_lowat; + goto integer; + + case SO_SNDTIMEO: + case SO_RCVTIMEO: + optval = (sopt->sopt_name == SO_SNDTIMEO ? + so->so_snd.sb_timeo : so->so_rcv.sb_timeo); + + tv.tv_sec = optval / hz; + tv.tv_usec = (optval % hz) * tick; + error = sooptcopyout(sopt, &tv, sizeof tv); + break; + + default: + error = ENOPROTOOPT; + break; + } + return (error); + } +} + +void +sohasoutofband(so) + register struct socket *so; +{ + struct proc *p; + + struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sohasoutofband) + { if ((*kp->e_soif->sf_sohasoutofband)(so, kp)) + return; + } + kp = kp->e_next; + } + if (so->so_pgid < 0) + gsignal(-so->so_pgid, SIGURG); + else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) + psignal(p, SIGURG); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&so->so_rcv.sb_sel); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); +} + +/* + * Network filter support + */ +/* Run the list of filters, creating extension control blocks */ +sfilter_init(register struct socket *so) +{ struct kextcb *kp, **kpp; + struct protosw *prp; + struct NFDescriptor *nfp; + + prp = so->so_proto; + nfp = prp->pr_sfilter.tqh_first; /* non-null */ + kpp = &so->so_ext; + kp = NULL; + while (nfp) + { MALLOC(kp, struct kextcb *, sizeof(*kp), + M_TEMP, M_WAITOK); + if (kp == NULL) + return(ENOBUFS); /* so_free will clean up */ + *kpp = kp; + kpp = &kp->e_next; + kp->e_next = NULL; + kp->e_fcb = NULL; + kp->e_nfd = nfp; + kp->e_soif = nfp->nf_soif; + kp->e_sout = nfp->nf_soutil; + /* + * Ignore return value for create + * Everyone gets a chance at startup + */ + if (kp->e_soif && kp->e_soif->sf_socreate) + (*kp->e_soif->sf_socreate)(so, prp, kp); + nfp = nfp->nf_next.tqe_next; + } + return(0); +} + + +/* + * Run the list of filters, freeing extension control blocks + * Assumes the soif/soutil blocks have been handled. + */ +sfilter_term(struct socket *so) +{ struct kextcb *kp, *kp1; + + kp = so->so_ext; + while (kp) + { kp1 = kp->e_next; + /* + * Ignore return code on termination; everyone must + * get terminated. + */ + if (kp->e_soif && kp->e_soif->sf_sofree) + kp->e_soif->sf_sofree(so, kp); + FREE(kp, M_TEMP); + kp = kp1; + } + return(0); +} + + +int +sopoll(struct socket *so, int events, struct ucred *cred) +{ + struct proc *p = current_proc(); + int revents = 0; + int s = splnet(); + + if (events & (POLLIN | POLLRDNORM)) + if (soreadable(so)) + revents |= events & (POLLIN | POLLRDNORM); + + if (events & (POLLOUT | POLLWRNORM)) + if (sowriteable(so)) + revents |= events & (POLLOUT | POLLWRNORM); + + if (events & (POLLPRI | POLLRDBAND)) + if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) + revents |= events & (POLLPRI | POLLRDBAND); + + if (revents == 0) { + if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { + selrecord(p, &so->so_rcv.sb_sel); + so->so_rcv.sb_flags |= SB_SEL; + } + + if (events & (POLLOUT | POLLWRNORM)) { + selrecord(p, &so->so_snd.sb_sel); + so->so_snd.sb_flags |= SB_SEL; + } + } + + splx(s); + return (revents); +} + +/*#### IPv6 Integration. Added new routines */ +int +sooptgetm(struct sockopt *sopt, struct mbuf **mp) +{ + struct mbuf *m, *m_prev; + int sopt_size = sopt->sopt_valsize; + + MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA); + if (m == 0) + return ENOBUFS; + if (sopt_size > MLEN) { + MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + return ENOBUFS; + } + m->m_len = min(MCLBYTES, sopt_size); + } else { + m->m_len = min(MLEN, sopt_size); + } + sopt_size -= m->m_len; + *mp = m; + m_prev = m; + + while (sopt_size) { + MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA); + if (m == 0) { + m_freem(*mp); + return ENOBUFS; + } + if (sopt_size > MLEN) { + MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_freem(*mp); + return ENOBUFS; + } + m->m_len = min(MCLBYTES, sopt_size); + } else { + m->m_len = min(MLEN, sopt_size); + } + sopt_size -= m->m_len; + m_prev->m_next = m; + m_prev = m; + } + return 0; +} + +/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */ +int +sooptmcopyin(struct sockopt *sopt, struct mbuf *m) +{ + struct mbuf *m0 = m; + + if (sopt->sopt_val == NULL) + return 0; + while (m != NULL && sopt->sopt_valsize >= m->m_len) { + if (sopt->sopt_p != NULL) { + int error; + + error = copyin(sopt->sopt_val, mtod(m, char *), + m->m_len); + if (error != 0) { + m_freem(m0); + return(error); + } + } else + bcopy(sopt->sopt_val, mtod(m, char *), m->m_len); + sopt->sopt_valsize -= m->m_len; + (caddr_t)sopt->sopt_val += m->m_len; + m = m->m_next; + } + if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */ + panic("sooptmcopyin"); + return 0; +} + +/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */ +int +sooptmcopyout(struct sockopt *sopt, struct mbuf *m) +{ + struct mbuf *m0 = m; + size_t valsize = 0; + + if (sopt->sopt_val == NULL) + return 0; + while (m != NULL && sopt->sopt_valsize >= m->m_len) { + if (sopt->sopt_p != NULL) { + int error; + + error = copyout(mtod(m, char *), sopt->sopt_val, + m->m_len); + if (error != 0) { + m_freem(m0); + return(error); + } + } else + bcopy(mtod(m, char *), sopt->sopt_val, m->m_len); + sopt->sopt_valsize -= m->m_len; + (caddr_t)sopt->sopt_val += m->m_len; + valsize += m->m_len; + m = m->m_next; + } + if (m != NULL) { + /* enough soopt buffer should be given from user-land */ + m_freem(m0); + return(EINVAL); + } + sopt->sopt_valsize = valsize; + return 0; +} + diff --git a/bsd/kern/uipc_socket2.c b/bsd/kern/uipc_socket2.c new file mode 100644 index 000000000..0644a63d4 --- /dev/null +++ b/bsd/kern/uipc_socket2.c @@ -0,0 +1,1266 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Primitive routines for operating on sockets and socket buffers + */ + +u_long sb_max = SB_MAX; /* XXX should be static */ + +static u_long sb_efficiency = 8; /* parameter for sbreserve() */ + +char netcon[] = "netcon"; + +/* + * Procedures to manipulate state flags of socket + * and do appropriate wakeups. Normal sequence from the + * active (originating) side is that soisconnecting() is + * called during processing of connect() call, + * resulting in an eventual call to soisconnected() if/when the + * connection is established. When the connection is torn down + * soisdisconnecting() is called during processing of disconnect() call, + * and soisdisconnected() is called when the connection to the peer + * is totally severed. The semantics of these routines are such that + * connectionless protocols can call soisconnected() and soisdisconnected() + * only, bypassing the in-progress calls when setting up a ``connection'' + * takes no time. + * + * From the passive side, a socket is created with + * two queues of sockets: so_q0 for connections in progress + * and so_q for connections already made and awaiting user acceptance. + * As a protocol is preparing incoming connections, it creates a socket + * structure queued on so_q0 by calling sonewconn(). When the connection + * is established, soisconnected() is called, and transfers the + * socket structure to so_q, making it available to accept(). + * + * If a socket is closed with sockets on either + * so_q0 or so_q, these sockets are dropped. + * + * If higher level protocols are implemented in + * the kernel, the wakeups done here will sometimes + * cause software-interrupt process scheduling. + */ + +void +soisconnecting(so) + register struct socket *so; +{ + + so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); + so->so_state |= SS_ISCONNECTING; +} + +void +soisconnected(so) + register struct socket *so; +{ register struct kextcb *kp; + register struct socket *head = so->so_head; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soisconnected) + { if ((*kp->e_soif->sf_soisconnected)(so, kp)) + return; + } + kp = kp->e_next; + } + + so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); + so->so_state |= SS_ISCONNECTED; + if (head && (so->so_state & SS_INCOMP)) { + postevent(head,0,EV_RCONN); + TAILQ_REMOVE(&head->so_incomp, so, so_list); + head->so_incqlen--; + so->so_state &= ~SS_INCOMP; + TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); + so->so_state |= SS_COMP; + sorwakeup(head); + wakeup((caddr_t)&head->so_timeo); + } else { + postevent(so,0,EV_WCONN); + wakeup((caddr_t)&so->so_timeo); + sorwakeup(so); + sowwakeup(so); + } +} + +void +soisdisconnecting(so) + register struct socket *so; +{ register struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soisdisconnecting) + { if ((*kp->e_soif->sf_soisdisconnecting)(so, kp)) + return; + } + kp = kp->e_next; + } + + so->so_state &= ~SS_ISCONNECTING; + so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); + wakeup((caddr_t)&so->so_timeo); + sowwakeup(so); + sorwakeup(so); +} + +void +soisdisconnected(so) + register struct socket *so; +{ register struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soisdisconnected) + { if ((*kp->e_soif->sf_soisdisconnected)(so, kp)) + return; + } + kp = kp->e_next; + } + + so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); + so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE); + wakeup((caddr_t)&so->so_timeo); + sowwakeup(so); + sorwakeup(so); +} + +/* + * Return a random connection that hasn't been serviced yet and + * is eligible for discard. There is a one in qlen chance that + * we will return a null, saying that there are no dropable + * requests. In this case, the protocol specific code should drop + * the new request. This insures fairness. + * + * This may be used in conjunction with protocol specific queue + * congestion routines. + */ +struct socket * +sodropablereq(head) + register struct socket *head; +{ + register struct socket *so; + unsigned int i, j, qlen; + static int rnd; + static struct timeval old_runtime; + static unsigned int cur_cnt, old_cnt; + struct timeval tv; + + microtime(&tv); + if ((i = (tv.tv_sec - old_runtime.tv_sec)) != 0) { + old_runtime = tv; + old_cnt = cur_cnt / i; + cur_cnt = 0; + } + + so = TAILQ_FIRST(&head->so_incomp); + if (!so) + return (so); + + qlen = head->so_incqlen; + if (++cur_cnt > qlen || old_cnt > qlen) { + rnd = (314159 * rnd + 66329) & 0xffff; + j = ((qlen + 1) * rnd) >> 16; + + while (j-- && so) + so = TAILQ_NEXT(so, so_list); + } + + return (so); +} + +/* + * When an attempt at a new connection is noted on a socket + * which accepts connections, sonewconn is called. If the + * connection is possible (subject to space constraints, etc.) + * then we allocate a new structure, propoerly linked into the + * data structure of the original socket, and return this. + * Connstatus may be 0, or SO_ISCONFIRMING, or SO_ISCONNECTED. + */ +struct socket * +sonewconn(head, connstatus) + register struct socket *head; + int connstatus; +{ int error = 0; + register struct socket *so; + register struct kextcb *kp; + + if (head->so_qlen > 3 * head->so_qlimit / 2) + return ((struct socket *)0); + so = soalloc(0, head->so_proto->pr_domain->dom_family, head->so_type); + if (so == NULL) + return ((struct socket *)0); + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_sonewconn1) + { if ((*kp->e_soif->sf_sonewconn1)(so, connstatus, kp)) + return; + } + kp = kp->e_next; + } + + so->so_head = head; + so->so_type = head->so_type; + so->so_options = head->so_options &~ SO_ACCEPTCONN; + so->so_linger = head->so_linger; + so->so_state = head->so_state | SS_NOFDREF; + so->so_proto = head->so_proto; + so->so_timeo = head->so_timeo; + so->so_pgid = head->so_pgid; + so->so_uid = head->so_uid; + so->so_rcv.sb_flags |= SB_RECV; /* XXX */ + (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat); + + if (so->so_proto->pr_sfilter.tqh_first) + error = sfilter_init(so); + if (error == 0 && (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) { + sfilter_term(so); + sodealloc(so); + return ((struct socket *)0); + } + so->so_proto->pr_domain->dom_refs++; + + if (connstatus) { + TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); + so->so_state |= SS_COMP; + } else { + TAILQ_INSERT_TAIL(&head->so_incomp, so, so_list); + so->so_state |= SS_INCOMP; + head->so_incqlen++; + } + head->so_qlen++; + if (connstatus) { + sorwakeup(head); + wakeup((caddr_t)&head->so_timeo); + so->so_state |= connstatus; + } + so->so_rcv.sb_so = so->so_snd.sb_so = so; + TAILQ_INIT(&so->so_evlist); + return (so); +} + +/* + * Socantsendmore indicates that no more data will be sent on the + * socket; it would normally be applied to a socket when the user + * informs the system that no more data is to be sent, by the protocol + * code (in case PRU_SHUTDOWN). Socantrcvmore indicates that no more data + * will be received, and will normally be applied to the socket by a + * protocol when it detects that the peer will send no more data. + * Data queued for reading in the socket may yet be read. + */ + +void +socantsendmore(so) + struct socket *so; +{ register struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_socantsendmore) + { if ((*kp->e_soif->sf_socantsendmore)(so, kp)) + return; + } + kp = kp->e_next; + } + + + so->so_state |= SS_CANTSENDMORE; + sowwakeup(so); +} + +void +socantrcvmore(so) + struct socket *so; +{ register struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_socantrcvmore) + { if ((*kp->e_soif->sf_socantrcvmore)(so, kp)) + return; + } + kp = kp->e_next; + } + + + so->so_state |= SS_CANTRCVMORE; + sorwakeup(so); +} + +/* + * Wait for data to arrive at/drain from a socket buffer. + */ +int +sbwait(sb) + struct sockbuf *sb; +{ + + sb->sb_flags |= SB_WAIT; + return (tsleep((caddr_t)&sb->sb_cc, + (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", + sb->sb_timeo)); +} + +/* + * Lock a sockbuf already known to be locked; + * return any error returned from sleep (EINTR). + */ +int +sb_lock(sb) + register struct sockbuf *sb; +{ + int error; + + while (sb->sb_flags & SB_LOCK) { + sb->sb_flags |= SB_WANT; + error = tsleep((caddr_t)&sb->sb_flags, + (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK|PCATCH, + "sblock", 0); + if (error) + return (error); + } + sb->sb_flags |= SB_LOCK; + return (0); +} + +/* + * Wakeup processes waiting on a socket buffer. + * Do asynchronous notification via SIGIO + * if the socket has the SS_ASYNC flag set. + */ +void +sowakeup(so, sb) + register struct socket *so; + register struct sockbuf *sb; +{ + struct proc *p = current_proc(); + + + sb->sb_flags &= ~SB_SEL; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&sb->sb_sel); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + if (sb->sb_flags & SB_WAIT) { + sb->sb_flags &= ~SB_WAIT; + wakeup((caddr_t)&sb->sb_cc); + } + if (so->so_state & SS_ASYNC) { + if (so->so_pgid < 0) + gsignal(-so->so_pgid, SIGIO); + else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0) + psignal(p, SIGIO); + } + + if (sb->sb_flags & SB_UPCALL) + (*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT); +} + +/* + * Socket buffer (struct sockbuf) utility routines. + * + * Each socket contains two socket buffers: one for sending data and + * one for receiving data. Each buffer contains a queue of mbufs, + * information about the number of mbufs and amount of data in the + * queue, and other fields allowing select() statements and notification + * on data availability to be implemented. + * + * Data stored in a socket buffer is maintained as a list of records. + * Each record is a list of mbufs chained together with the m_next + * field. Records are chained together with the m_nextpkt field. The upper + * level routine soreceive() expects the following conventions to be + * observed when placing information in the receive buffer: + * + * 1. If the protocol requires each message be preceded by the sender's + * name, then a record containing that name must be present before + * any associated data (mbuf's must be of type MT_SONAME). + * 2. If the protocol supports the exchange of ``access rights'' (really + * just additional data associated with the message), and there are + * ``rights'' to be received, then a record containing this data + * should be present (mbuf's must be of type MT_RIGHTS). + * 3. If a name or rights record exists, then it must be followed by + * a data record, perhaps of zero length. + * + * Before using a new socket structure it is first necessary to reserve + * buffer space to the socket, by calling sbreserve(). This should commit + * some of the available buffer space in the system buffer pool for the + * socket (currently, it does nothing but enforce limits). The space + * should be released by calling sbrelease() when the socket is destroyed. + */ + +int +soreserve(so, sndcc, rcvcc) + register struct socket *so; + u_long sndcc, rcvcc; +{ + register struct kextcb *kp; + + kp = sotokextcb(so); + while (kp) + { if (kp->e_soif && kp->e_soif->sf_soreserve) + { if ((*kp->e_soif->sf_soreserve)(so, sndcc, rcvcc, kp)) + return; + } + kp = kp->e_next; + } + + if (sbreserve(&so->so_snd, sndcc) == 0) + goto bad; + if (sbreserve(&so->so_rcv, rcvcc) == 0) + goto bad2; + if (so->so_rcv.sb_lowat == 0) + so->so_rcv.sb_lowat = 1; + if (so->so_snd.sb_lowat == 0) + so->so_snd.sb_lowat = MCLBYTES; + if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) + so->so_snd.sb_lowat = so->so_snd.sb_hiwat; + return (0); +bad2: + sbrelease(&so->so_snd); +bad: + return (ENOBUFS); +} + +/* + * Allot mbufs to a sockbuf. + * Attempt to scale mbmax so that mbcnt doesn't become limiting + * if buffering efficiency is near the normal case. + */ +int +sbreserve(sb, cc) + struct sockbuf *sb; + u_long cc; +{ + if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES)) + return (0); + sb->sb_hiwat = cc; + sb->sb_mbmax = min(cc * sb_efficiency, sb_max); + if (sb->sb_lowat > sb->sb_hiwat) + sb->sb_lowat = sb->sb_hiwat; + return (1); +} + +/* + * Free mbufs held by a socket, and reserved mbuf space. + */ +void +sbrelease(sb) + struct sockbuf *sb; +{ + + sbflush(sb); + sb->sb_hiwat = sb->sb_mbmax = 0; + + { + int oldpri = splimp(); + selthreadclear(&sb->sb_sel); + splx(oldpri); + } +} + +/* + * Routines to add and remove + * data from an mbuf queue. + * + * The routines sbappend() or sbappendrecord() are normally called to + * append new mbufs to a socket buffer, after checking that adequate + * space is available, comparing the function sbspace() with the amount + * of data to be added. sbappendrecord() differs from sbappend() in + * that data supplied is treated as the beginning of a new record. + * To place a sender's address, optional access rights, and data in a + * socket receive buffer, sbappendaddr() should be used. To place + * access rights and data in a socket receive buffer, sbappendrights() + * should be used. In either case, the new data begins a new record. + * Note that unlike sbappend() and sbappendrecord(), these routines check + * for the caller that there will be enough space to store the data. + * Each fails if there is not enough space, or if it cannot find mbufs + * to store additional information in. + * + * Reliable protocols may use the socket send buffer to hold data + * awaiting acknowledgement. Data is normally copied from a socket + * send buffer in a protocol with m_copy for output to a peer, + * and then removing the data from the socket buffer with sbdrop() + * or sbdroprecord() when the data is acknowledged by the peer. + */ + +/* + * Append mbuf chain m to the last record in the + * socket buffer sb. The additional space associated + * the mbuf chain is recorded in sb. Empty mbufs are + * discarded and mbufs are compacted where possible. + */ +void +sbappend(sb, m) + struct sockbuf *sb; + struct mbuf *m; +{ register struct kextcb *kp; + register struct mbuf *n; + + if (m == 0) + return; + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbappend) + { if ((*kp->e_sout->su_sbappend)(sb, m, kp)) + return; + } + kp = kp->e_next; + } + + if (n = sb->sb_mb) { + while (n->m_nextpkt) + n = n->m_nextpkt; + do { + if (n->m_flags & M_EOR) { + sbappendrecord(sb, m); /* XXXXXX!!!! */ + return; + } + } while (n->m_next && (n = n->m_next)); + } + sbcompress(sb, m, n); +} + +#ifdef SOCKBUF_DEBUG +void +sbcheck(sb) + register struct sockbuf *sb; +{ + register struct mbuf *m; + register struct mbuf *n = 0; + register u_long len = 0, mbcnt = 0; + + for (m = sb->sb_mb; m; m = n) { + n = m->m_nextpkt; + for (; m; m = m->m_next) { + len += m->m_len; + mbcnt += MSIZE; + if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */ + mbcnt += m->m_ext.ext_size; + if (m->m_nextpkt) + panic("sbcheck nextpkt"); + } + if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { + printf("cc %ld != %ld || mbcnt %ld != %ld\n", len, sb->sb_cc, + mbcnt, sb->sb_mbcnt); + panic("sbcheck"); + } +} +#endif + +/* + * As above, except the mbuf chain + * begins a new record. + */ +void +sbappendrecord(sb, m0) + register struct sockbuf *sb; + register struct mbuf *m0; +{ + register struct mbuf *m; + register struct kextcb *kp; + + if (m0 == 0) + return; + + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbappendrecord) + { if ((*kp->e_sout->su_sbappendrecord)(sb, m0, kp)) + return; + } + kp = kp->e_next; + } + + m = sb->sb_mb; + if (m) + while (m->m_nextpkt) + m = m->m_nextpkt; + /* + * Put the first mbuf on the queue. + * Note this permits zero length records. + */ + sballoc(sb, m0); + if (m) + m->m_nextpkt = m0; + else + sb->sb_mb = m0; + m = m0->m_next; + m0->m_next = 0; + if (m && (m0->m_flags & M_EOR)) { + m0->m_flags &= ~M_EOR; + m->m_flags |= M_EOR; + } + sbcompress(sb, m, m0); +} + +/* + * As above except that OOB data + * is inserted at the beginning of the sockbuf, + * but after any other OOB data. + */ +void +sbinsertoob(sb, m0) + register struct sockbuf *sb; + register struct mbuf *m0; +{ + register struct mbuf *m; + register struct mbuf **mp; + register struct kextcb *kp; + + if (m0 == 0) + return; + + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbinsertoob) + { if ((*kp->e_sout->su_sbinsertoob)(sb, m0, kp)) + return; + } + kp = kp->e_next; + } + + for (mp = &sb->sb_mb; *mp ; mp = &((*mp)->m_nextpkt)) { + m = *mp; + again: + switch (m->m_type) { + + case MT_OOBDATA: + continue; /* WANT next train */ + + case MT_CONTROL: + m = m->m_next; + if (m) + goto again; /* inspect THIS train further */ + } + break; + } + /* + * Put the first mbuf on the queue. + * Note this permits zero length records. + */ + sballoc(sb, m0); + m0->m_nextpkt = *mp; + *mp = m0; + m = m0->m_next; + m0->m_next = 0; + if (m && (m0->m_flags & M_EOR)) { + m0->m_flags &= ~M_EOR; + m->m_flags |= M_EOR; + } + sbcompress(sb, m, m0); +} + +/* + * Append address and data, and optionally, control (ancillary) data + * to the receive queue of a socket. If present, + * m0 must include a packet header with total length. + * Returns 0 if no space in sockbuf or insufficient mbufs. + */ +int +sbappendaddr(sb, asa, m0, control) + register struct sockbuf *sb; + struct sockaddr *asa; + struct mbuf *m0, *control; +{ + register struct mbuf *m, *n; + int space = asa->sa_len; + register struct kextcb *kp; + + if (m0 && (m0->m_flags & M_PKTHDR) == 0) + panic("sbappendaddr"); + + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbappendaddr) + { if ((*kp->e_sout->su_sbappendaddr)(sb, asa, m0, control, kp)) + return 0; + } + kp = kp->e_next; + } + + if (m0) + space += m0->m_pkthdr.len; + for (n = control; n; n = n->m_next) { + space += n->m_len; + if (n->m_next == 0) /* keep pointer to last control buf */ + break; + } + if (space > sbspace(sb)) + return (0); + if (asa->sa_len > MLEN) + return (0); + MGET(m, M_DONTWAIT, MT_SONAME); + if (m == 0) + return (0); + m->m_len = asa->sa_len; + bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); + if (n) + n->m_next = m0; /* concatenate data to control */ + else + control = m0; + m->m_next = control; + for (n = m; n; n = n->m_next) + sballoc(sb, n); + n = sb->sb_mb; + if (n) { + while (n->m_nextpkt) + n = n->m_nextpkt; + n->m_nextpkt = m; + } else + sb->sb_mb = m; + postevent(0,sb,EV_RWBYTES); + return (1); +} + +int +sbappendcontrol(sb, m0, control) + struct sockbuf *sb; + struct mbuf *control, *m0; +{ + register struct mbuf *m, *n; + int space = 0; + register struct kextcb *kp; + + if (control == 0) + panic("sbappendcontrol"); + + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbappendcontrol) + { if ((*kp->e_sout->su_sbappendcontrol)(sb, m0, control, kp)) + return 0; + } + kp = kp->e_next; + } + + for (m = control; ; m = m->m_next) { + space += m->m_len; + if (m->m_next == 0) + break; + } + n = m; /* save pointer to last control buffer */ + for (m = m0; m; m = m->m_next) + space += m->m_len; + if (space > sbspace(sb)) + return (0); + n->m_next = m0; /* concatenate data to control */ + for (m = control; m; m = m->m_next) + sballoc(sb, m); + n = sb->sb_mb; + if (n) { + while (n->m_nextpkt) + n = n->m_nextpkt; + n->m_nextpkt = control; + } else + sb->sb_mb = control; + postevent(0,sb,EV_RWBYTES); + return (1); +} + +/* + * Compress mbuf chain m into the socket + * buffer sb following mbuf n. If n + * is null, the buffer is presumed empty. + */ +void +sbcompress(sb, m, n) + register struct sockbuf *sb; + register struct mbuf *m, *n; +{ + register int eor = 0; + register struct mbuf *o; + + while (m) { + eor |= m->m_flags & M_EOR; + if (m->m_len == 0 && + (eor == 0 || + (((o = m->m_next) || (o = n)) && + o->m_type == m->m_type))) { + m = m_free(m); + continue; + } + if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 && + (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] && + n->m_type == m->m_type) { + bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len, + (unsigned)m->m_len); + n->m_len += m->m_len; + sb->sb_cc += m->m_len; + m = m_free(m); + continue; + } + if (n) + n->m_next = m; + else + sb->sb_mb = m; + sballoc(sb, m); + n = m; + m->m_flags &= ~M_EOR; + m = m->m_next; + n->m_next = 0; + } + if (eor) { + if (n) + n->m_flags |= eor; + else + printf("semi-panic: sbcompress\n"); + } + postevent(0,sb, EV_RWBYTES); +} + +/* + * Free all mbufs in a sockbuf. + * Check that all resources are reclaimed. + */ +void +sbflush(sb) + register struct sockbuf *sb; +{ + register struct kextcb *kp; + + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbflush) + { if ((*kp->e_sout->su_sbflush)(sb, kp)) + return; + } + kp = kp->e_next; + } + + if (sb->sb_flags & SB_LOCK) + panic("sbflush: locked"); + while (sb->sb_mbcnt && sb->sb_cc) + sbdrop(sb, (int)sb->sb_cc); + if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt) + panic("sbflush: cc %ld || mb %p || mbcnt %ld", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt); + postevent(0, sb, EV_RWBYTES); +} + +/* + * Drop data from (the front of) a sockbuf. + */ +void +sbdrop(sb, len) + register struct sockbuf *sb; + register int len; +{ + register struct mbuf *m, *mn; + struct mbuf *next; + register struct kextcb *kp; + + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbdrop) + { if ((*kp->e_sout->su_sbdrop)(sb, len, kp)) + return; + } + kp = kp->e_next; + } + + next = (m = sb->sb_mb) ? m->m_nextpkt : 0; + while (len > 0) { + if (m == 0) { + if (next == 0) + panic("sbdrop"); + m = next; + next = m->m_nextpkt; + continue; + } + if (m->m_len > len) { + m->m_len -= len; + m->m_data += len; + sb->sb_cc -= len; + break; + } + len -= m->m_len; + sbfree(sb, m); + MFREE(m, mn); + m = mn; + } + while (m && m->m_len == 0) { + sbfree(sb, m); + MFREE(m, mn); + m = mn; + } + if (m) { + sb->sb_mb = m; + m->m_nextpkt = next; + } else + sb->sb_mb = next; + postevent(0, sb, EV_RWBYTES); +} + +/* + * Drop a record off the front of a sockbuf + * and move the next record to the front. + */ +void +sbdroprecord(sb) + register struct sockbuf *sb; +{ + register struct mbuf *m, *mn; + register struct kextcb *kp; + + kp = sotokextcb(sbtoso(sb)); + while (kp) + { if (kp->e_sout && kp->e_sout->su_sbdroprecord) + { if ((*kp->e_sout->su_sbdroprecord)(sb, kp)) + return; + } + kp = kp->e_next; + } + + m = sb->sb_mb; + if (m) { + sb->sb_mb = m->m_nextpkt; + do { + sbfree(sb, m); + MFREE(m, mn); + } while (m = mn); + } + postevent(0, sb, EV_RWBYTES); +} + +/* + * Create a "control" mbuf containing the specified data + * with the specified type for presentation on a socket buffer. + */ +struct mbuf * +sbcreatecontrol(p, size, type, level) + caddr_t p; + register int size; + int type, level; +{ + register struct cmsghdr *cp; + struct mbuf *m; + + if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL) + return ((struct mbuf *) NULL); + cp = mtod(m, struct cmsghdr *); + /* XXX check size? */ + (void)memcpy(CMSG_DATA(cp), p, size); + size += sizeof(*cp); + m->m_len = size; + cp->cmsg_len = size; + cp->cmsg_level = level; + cp->cmsg_type = type; + return (m); +} + +/* + * Some routines that return EOPNOTSUPP for entry points that are not + * supported by a protocol. Fill in as needed. + */ +int +pru_abort_notsupp(struct socket *so) +{ + return EOPNOTSUPP; +} + + +int +pru_accept_notsupp(struct socket *so, struct sockaddr **nam) +{ + return EOPNOTSUPP; +} + +int +pru_attach_notsupp(struct socket *so, int proto, struct proc *p) +{ + return EOPNOTSUPP; +} + +int +pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + return EOPNOTSUPP; +} + +int +pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + return EOPNOTSUPP; +} + +int +pru_connect2_notsupp(struct socket *so1, struct socket *so2) +{ + return EOPNOTSUPP; +} + +int +pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp, struct proc *p) +{ + return EOPNOTSUPP; +} + +int +pru_detach_notsupp(struct socket *so) +{ + return EOPNOTSUPP; +} + +int +pru_disconnect_notsupp(struct socket *so) +{ + return EOPNOTSUPP; +} + +int +pru_listen_notsupp(struct socket *so, struct proc *p) +{ + return EOPNOTSUPP; +} + +int +pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) +{ + return EOPNOTSUPP; +} + +int +pru_rcvd_notsupp(struct socket *so, int flags) +{ + return EOPNOTSUPP; +} + +int +pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) +{ + return EOPNOTSUPP; +} + +int +pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p) + +{ + return EOPNOTSUPP; +} + + +/* + * This isn't really a ``null'' operation, but it's the default one + * and doesn't do anything destructive. + */ +int +pru_sense_null(struct socket *so, struct stat *sb) +{ + sb->st_blksize = so->so_snd.sb_hiwat; + return 0; +} + + +int pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, + struct uio *uio, struct mbuf *top, + struct mbuf *control, int flags) + +{ + return EOPNOTSUPP; +} + +int pru_soreceive_notsupp(struct socket *so, + struct sockaddr **paddr, + struct uio *uio, struct mbuf **mp0, + struct mbuf **controlp, int *flagsp) +{ + return EOPNOTSUPP; +} + +int + +pru_shutdown_notsupp(struct socket *so) +{ + return EOPNOTSUPP; +} + +int +pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) +{ + return EOPNOTSUPP; +} + +int pru_sosend(struct socket *so, struct sockaddr *addr, + struct uio *uio, struct mbuf *top, + struct mbuf *control, int flags) +{ + return EOPNOTSUPP; +} + +int pru_soreceive(struct socket *so, + struct sockaddr **paddr, + struct uio *uio, struct mbuf **mp0, + struct mbuf **controlp, int *flagsp) +{ + return EOPNOTSUPP; +} + + +int pru_sopoll_notsupp(struct socket *so, int events, + struct ucred *cred) +{ + return EOPNOTSUPP; +} + + + +/* + * Make a copy of a sockaddr in a malloced buffer of type M_SONAME. + */ +struct sockaddr * +dup_sockaddr(sa, canwait) + struct sockaddr *sa; + int canwait; +{ + struct sockaddr *sa2; + + MALLOC(sa2, struct sockaddr *, sa->sa_len, M_SONAME, + canwait ? M_WAITOK : M_NOWAIT); + if (sa2) + bcopy(sa, sa2, sa->sa_len); + return sa2; +} + +/* + * Create an external-format (``xsocket'') structure using the information + * in the kernel-format socket structure pointed to by so. This is done + * to reduce the spew of irrelevant information over this interface, + * to isolate user code from changes in the kernel structure, and + * potentially to provide information-hiding if we decide that + * some of this information should be hidden from users. + */ +void +sotoxsocket(struct socket *so, struct xsocket *xso) +{ + xso->xso_len = sizeof *xso; + xso->xso_so = so; + xso->so_type = so->so_type; + xso->so_options = so->so_options; + xso->so_linger = so->so_linger; + xso->so_state = so->so_state; + xso->so_pcb = so->so_pcb; + xso->xso_protocol = so->so_proto->pr_protocol; + xso->xso_family = so->so_proto->pr_domain->dom_family; + xso->so_qlen = so->so_qlen; + xso->so_incqlen = so->so_incqlen; + xso->so_qlimit = so->so_qlimit; + xso->so_timeo = so->so_timeo; + xso->so_error = so->so_error; + xso->so_pgid = so->so_pgid; + xso->so_oobmark = so->so_oobmark; + sbtoxsockbuf(&so->so_snd, &xso->so_snd); + sbtoxsockbuf(&so->so_rcv, &xso->so_rcv); + xso->so_uid = so->so_uid; +} + +/* + * This does the same for sockbufs. Note that the xsockbuf structure, + * since it is always embedded in a socket, does not include a self + * pointer nor a length. We make this entry point public in case + * some other mechanism needs it. + */ +void +sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) +{ + xsb->sb_cc = sb->sb_cc; + xsb->sb_hiwat = sb->sb_hiwat; + xsb->sb_mbcnt = sb->sb_mbcnt; + xsb->sb_mbmax = sb->sb_mbmax; + xsb->sb_lowat = sb->sb_lowat; + xsb->sb_flags = sb->sb_flags; + xsb->sb_timeo = sb->sb_timeo; +} + +/* + * Here is the definition of some of the basic objects in the kern.ipc + * branch of the MIB. + */ + + +SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC"); + +/* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */ +static int dummy; +SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, ""); + +SYSCTL_INT(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW, &sb_max, 0, ""); +SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, &maxsockets, 0, ""); +SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW, + &sb_efficiency, 0, ""); +SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, &nmbclusters, 0, ""); + diff --git a/bsd/kern/uipc_syscalls.c b/bsd/kern/uipc_syscalls.c new file mode 100644 index 000000000..f27ada463 --- /dev/null +++ b/bsd/kern/uipc_syscalls.c @@ -0,0 +1,1974 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1989, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * sendfile(2) and related extensions: + * Copyright (c) 1998, David Greenman. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94 + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if KTRACE +#include +#endif +#include + +#include + +#if KDEBUG + +#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0) +#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2) +#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1) +#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3) +#define DBG_FNC_SENDMSG NETDBG_CODE(DBG_NETSOCK, (1 << 8) | 1) +#define DBG_FNC_SENDTO NETDBG_CODE(DBG_NETSOCK, (2 << 8) | 1) +#define DBG_FNC_SENDIT NETDBG_CODE(DBG_NETSOCK, (3 << 8) | 1) +#define DBG_FNC_RECVFROM NETDBG_CODE(DBG_NETSOCK, (5 << 8)) +#define DBG_FNC_RECVMSG NETDBG_CODE(DBG_NETSOCK, (6 << 8)) +#define DBG_FNC_RECVIT NETDBG_CODE(DBG_NETSOCK, (7 << 8)) + +#endif + +struct getsockname_args { + int fdes; + caddr_t asa; + int *alen; +}; + +struct getsockopt_args { + int s; + int level; + int name; + caddr_t val; + int *avalsize; +} ; + +struct accept_args { + int s; + caddr_t name; + int *anamelen; +}; + +struct getpeername_args { + int fdes; + caddr_t asa; + int *alen; +}; + + +/* ARGSUSED */ + +#if SENDFILE +static void sf_buf_init(void *arg); +SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL) +static struct sf_buf *sf_buf_alloc(void); +static void sf_buf_ref(caddr_t addr, u_int size); +static void sf_buf_free(caddr_t addr, u_int size); + +static SLIST_HEAD(, sf_buf) sf_freelist; +static vm_offset_t sf_base; +static struct sf_buf *sf_bufs; +static int sf_buf_alloc_want; +#endif + +static int sendit __P((struct proc *p, int s, struct msghdr *mp, int flags, register_t *retval)); +static int recvit __P((struct proc *p, int s, struct msghdr *mp, + caddr_t namelenp, register_t *retval)); + +static int accept1 __P((struct proc *p, struct accept_args *uap, register_t *retval, int compat)); +static int getsockname1 __P((struct proc *p, struct getsockname_args *uap, + register_t *retval, int compat)); +static int getpeername1 __P((struct proc *p, struct getpeername_args *uap, + register_t *retval, int compat)); + +/* + * System call interface to the socket abstraction. + */ +#if COMPAT_43 || defined(COMPAT_SUNOS) +#define COMPAT_OLDSOCK +#endif + +extern struct fileops socketops; + +struct socket_args { + int domain; + int type; + int protocol; +}; +int +socket(p, uap, retval) + struct proc *p; + register struct socket_args *uap; + register_t *retval; +{ + struct filedesc *fdp = p->p_fd; + struct socket *so; + struct file *fp; + int fd, error; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + error = falloc(p, &fp, &fd); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + if (error) + return (error); + fp->f_flag = FREAD|FWRITE; + fp->f_type = DTYPE_SOCKET; + fp->f_ops = &socketops; + if (error = socreate(uap->domain, &so, uap->type, + uap->protocol)) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + fdrelse(p, fd); + ffree(fp); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } else { + fp->f_data = (caddr_t)so; + *fdflags(p, fd) &= ~UF_RESERVED; + *retval = fd; + } + return (error); +} + +struct bind_args { + int s; + caddr_t name; + int namelen; +}; + +/* ARGSUSED */ +int +bind(p, uap, retval) + struct proc *p; + register struct bind_args *uap; + register_t *retval; +{ + struct file *fp; + struct sockaddr *sa; + int error; + + error = getsock(p->p_fd, uap->s, &fp); + if (error) + return (error); + error = getsockaddr(&sa, uap->name, uap->namelen); + if (error) + return (error); + error = sobind((struct socket *)fp->f_data, sa); + FREE(sa, M_SONAME); + return (error); +} + +struct listen_args { + int s; + int backlog; +}; + + + +int +listen(p, uap, retval) + struct proc *p; + register struct listen_args *uap; + register_t *retval; +{ + struct file *fp; + int error; + + error = getsock(p->p_fd, uap->s, &fp); + if (error) + return (error); + return (solisten((struct socket *)fp->f_data, uap->backlog)); +} + +#ifndef COMPAT_OLDSOCK +#define accept1 accept +#endif + + + +int +accept1(p, uap, retval, compat) + struct proc *p; + register struct accept_args *uap; + register_t *retval; + int compat; +{ + struct file *fp; + struct sockaddr *sa; + u_int namelen; + int error, s; + struct socket *head, *so; + int fd; + short fflag; /* type must match fp->f_flag */ + int tmpfd; + + if (uap->name) { + error = copyin((caddr_t)uap->anamelen, (caddr_t)&namelen, + sizeof (namelen)); + if(error) + return (error); + } + error = getsock(p->p_fd, uap->s, &fp); + if (error) + return (error); + s = splnet(); + head = (struct socket *)fp->f_data; + if ((head->so_options & SO_ACCEPTCONN) == 0) { + splx(s); + return (EINVAL); + } + if ((head->so_state & SS_NBIO) && head->so_comp.tqh_first == NULL) { + splx(s); + return (EWOULDBLOCK); + } + while (head->so_comp.tqh_first == NULL && head->so_error == 0) { + if (head->so_state & SS_CANTRCVMORE) { + head->so_error = ECONNABORTED; + break; + } + error = tsleep((caddr_t)&head->so_timeo, PSOCK | PCATCH, + "accept", 0); + if (error) { + splx(s); + return (error); + } + } + if (head->so_error) { + error = head->so_error; + head->so_error = 0; + splx(s); + return (error); + } + + + /* + * At this point we know that there is at least one connection + * ready to be accepted. Remove it from the queue prior to + * allocating the file descriptor for it since falloc() may + * block allowing another process to accept the connection + * instead. + */ + so = head->so_comp.tqh_first; + TAILQ_REMOVE(&head->so_comp, so, so_list); + head->so_qlen--; + + fflag = fp->f_flag; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + error = falloc(p, &fp, &fd); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if (error) { + /* + * Probably ran out of file descriptors. Put the + * unaccepted connection back onto the queue and + * do another wakeup so some other process might + * have a chance at it. + */ + TAILQ_INSERT_HEAD(&head->so_comp, so, so_list); + head->so_qlen++; + wakeup_one(&head->so_timeo); + splx(s); + return (error); + } else { + *fdflags(p, fd) &= ~UF_RESERVED; + *retval = fd; + } + + so->so_state &= ~SS_COMP; + so->so_head = NULL; + fp->f_type = DTYPE_SOCKET; + fp->f_flag = fflag; + fp->f_ops = &socketops; + fp->f_data = (caddr_t)so; + sa = 0; + (void) soaccept(so, &sa); + if (sa == 0) { + namelen = 0; + if (uap->name) + goto gotnoname; + return 0; + } + if (uap->name) { + /* check sa_len before it is destroyed */ + if (namelen > sa->sa_len) + namelen = sa->sa_len; +#ifdef COMPAT_OLDSOCK + if (compat) + ((struct osockaddr *)sa)->sa_family = + sa->sa_family; +#endif + error = copyout(sa, (caddr_t)uap->name, (u_int)namelen); + if (!error) +gotnoname: + error = copyout((caddr_t)&namelen, + (caddr_t)uap->anamelen, sizeof (*uap->anamelen)); + } + FREE(sa, M_SONAME); + splx(s); + return (error); +} + +int +accept(p, uap, retval) + struct proc *p; + struct accept_args *uap; + register_t *retval; +{ + + return (accept1(p, uap, retval, 0)); +} + +#ifdef COMPAT_OLDSOCK +int +oaccept(p, uap, retval) + struct proc *p; + struct accept_args *uap; + register_t *retval; +{ + + return (accept1(p, uap, retval, 1)); +} +#endif /* COMPAT_OLDSOCK */ + +struct connect_args { + int s; + caddr_t name; + int namelen; +}; +/* ARGSUSED */ +int +connect(p, uap, retval) + struct proc *p; + register struct connect_args *uap; + register_t *retval; +{ + struct file *fp; + register struct socket *so; + struct sockaddr *sa; + int error, s; + + error = getsock(p->p_fd, uap->s, &fp); + if (error) + return (error); + so = (struct socket *)fp->f_data; + if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) + return (EALREADY); + error = getsockaddr(&sa, uap->name, uap->namelen); + if (error) + return (error); + error = soconnect(so, sa); + if (error) + goto bad; + if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) { + FREE(sa, M_SONAME); + return (EINPROGRESS); + } + s = splnet(); + while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { + error = tsleep((caddr_t)&so->so_timeo, PSOCK | PCATCH, + "connec", 0); + if (error) + break; + } + if (error == 0) { + error = so->so_error; + so->so_error = 0; + } + splx(s); +bad: + so->so_state &= ~SS_ISCONNECTING; + FREE(sa, M_SONAME); + if (error == ERESTART) + error = EINTR; + return (error); +} + +struct socketpair_args { + int domain; + int type; + int protocol; + int *rsv; +}; +int +socketpair(p, uap, retval) + struct proc *p; + register struct socketpair_args *uap; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + struct file *fp1, *fp2; + struct socket *so1, *so2; + int fd, error, sv[2]; + + error = socreate(uap->domain, &so1, uap->type, uap->protocol); + if (error) + return (error); + error = socreate(uap->domain, &so2, uap->type, uap->protocol); + if (error) + goto free1; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + error = falloc(p, &fp1, &fd); + if (error) + goto free2; + sv[0] = fd; + fp1->f_flag = FREAD|FWRITE; + fp1->f_type = DTYPE_SOCKET; + fp1->f_ops = &socketops; + fp1->f_data = (caddr_t)so1; + error = falloc(p, &fp2, &fd); + if (error) + goto free3; + fp2->f_flag = FREAD|FWRITE; + fp2->f_type = DTYPE_SOCKET; + fp2->f_ops = &socketops; + fp2->f_data = (caddr_t)so2; + sv[1] = fd; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = soconnect2(so1, so2); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto free4; + } + + if (uap->type == SOCK_DGRAM) { + /* + * Datagram socket connection is asymmetric. + */ + error = soconnect2(so2, so1); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto free4; + } + } + *fdflags(p, sv[0]) &= ~UF_RESERVED; + *fdflags(p, sv[1]) &= ~UF_RESERVED; + error = copyout((caddr_t)sv, (caddr_t)uap->rsv, + 2 * sizeof (int)); +#if 0 /* old pipe(2) syscall compatability, unused these days */ + retval[0] = sv[0]; /* XXX ??? */ + retval[1] = sv[1]; /* XXX ??? */ +#endif /* 0 */ + return (error); +free4: + fdrelse(p, sv[1]); + ffree(fp2); +free3: + fdrelse(p, sv[0]); + ffree(fp1); +free2: + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + (void)soclose(so2); +free1: + (void)soclose(so1); + return (error); +} + +static int +sendit(p, s, mp, flags, retsize) + register struct proc *p; + int s; + register struct msghdr *mp; + int flags; + register_t *retsize; +{ + struct file *fp; + struct uio auio; + register struct iovec *iov; + register int i; + struct mbuf *control; + struct sockaddr *to; + int len, error; + struct socket *so; +#if KTRACE + struct iovec *ktriov = NULL; +#endif + + KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_START, 0,0,0,0,0); + + if (error = getsock(p->p_fd, s, &fp)) + { + KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_END, error,0,0,0,0); + return (error); + } + + auio.uio_iov = mp->msg_iov; + auio.uio_iovcnt = mp->msg_iovlen; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_rw = UIO_WRITE; + auio.uio_procp = p; + auio.uio_offset = 0; /* XXX */ + auio.uio_resid = 0; + iov = mp->msg_iov; + for (i = 0; i < mp->msg_iovlen; i++, iov++) { + if (iov->iov_len < 0) + { + KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_END, EINVAL,0,0,0,0); + return (EINVAL); + } + + if ((auio.uio_resid += iov->iov_len) < 0) + { + KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_END, EINVAL,0,0,0,0); + return (EINVAL); + } + } + if (mp->msg_name) { + error = getsockaddr(&to, mp->msg_name, mp->msg_namelen); + if (error) { + KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_END, error,0,0,0,0); + return (error); + } + } else + to = 0; + if (mp->msg_control) { + if (mp->msg_controllen < sizeof(struct cmsghdr) +#ifdef COMPAT_OLDSOCK + && mp->msg_flags != MSG_COMPAT +#endif + ) { + error = EINVAL; + goto bad; + } + error = sockargs(&control, mp->msg_control, + mp->msg_controllen, MT_CONTROL); + if (error) + goto bad; +#ifdef COMPAT_OLDSOCK + if (mp->msg_flags == MSG_COMPAT) { + register struct cmsghdr *cm; + + M_PREPEND(control, sizeof(*cm), M_WAIT); + if (control == 0) { + error = ENOBUFS; + goto bad; + } else { + cm = mtod(control, struct cmsghdr *); + cm->cmsg_len = control->m_len; + cm->cmsg_level = SOL_SOCKET; + cm->cmsg_type = SCM_RIGHTS; + } + } +#endif + } else + control = 0; + + len = auio.uio_resid; + so = (struct socket *)fp->f_data; + error = so->so_proto->pr_usrreqs->pru_sosend(so, to, &auio, 0, control, + flags); + if (error) { + if (auio.uio_resid != len && (error == ERESTART || + error == EINTR || error == EWOULDBLOCK)) + error = 0; + if (error == EPIPE) + psignal(p, SIGPIPE); + } + if (error == 0) + *retsize = len - auio.uio_resid; +#if KTRACE + if (ktriov != NULL) { + if (error == 0) + ktrgenio(p->p_tracep, s, UIO_WRITE, + ktriov, *retsize, error); + FREE(ktriov, M_TEMP); + } +#endif +bad: + if (to) + FREE(to, M_SONAME); + KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_END, error,0,0,0,0); + return (error); +} + + +struct sendto_args { + int s; + caddr_t buf; + size_t len; + int flags; + caddr_t to; + int tolen; +}; + +int +sendto(p, uap, retval) + struct proc *p; + register struct sendto_args /* { + int s; + caddr_t buf; + size_t len; + int flags; + caddr_t to; + int tolen; + } */ *uap; + register_t *retval; + +{ + struct msghdr msg; + struct iovec aiov; + int stat; + + KERNEL_DEBUG(DBG_FNC_SENDTO | DBG_FUNC_START, 0,0,0,0,0); + + msg.msg_name = uap->to; + msg.msg_namelen = uap->tolen; + msg.msg_iov = &aiov; + msg.msg_iovlen = 1; + msg.msg_control = 0; +#ifdef COMPAT_OLDSOCK + msg.msg_flags = 0; +#endif + aiov.iov_base = uap->buf; + aiov.iov_len = uap->len; + stat = sendit(p, uap->s, &msg, uap->flags, retval); + KERNEL_DEBUG(DBG_FNC_SENDTO | DBG_FUNC_END, stat, *retval,0,0,0); + return(stat); +} + +#ifdef COMPAT_OLDSOCK +struct osend_args { + int s; + caddr_t buf; + int len; + int flags; +}; + +int +osend(p, uap, retval) + struct proc *p; + register struct osend_args /* { + int s; + caddr_t buf; + int len; + int flags; + } */ *uap; + register_t *retval; + +{ + struct msghdr msg; + struct iovec aiov; + + msg.msg_name = 0; + msg.msg_namelen = 0; + msg.msg_iov = &aiov; + msg.msg_iovlen = 1; + aiov.iov_base = uap->buf; + aiov.iov_len = uap->len; + msg.msg_control = 0; + msg.msg_flags = 0; + return (sendit(p, uap->s, &msg, uap->flags, retval)); +} +struct osendmsg_args { + int s; + caddr_t msg; + int flags; +}; + +int +osendmsg(p, uap, retval) + struct proc *p; + register struct osendmsg_args /* { + int s; + caddr_t msg; + int flags; + } */ *uap; + register_t *retval; + +{ + struct msghdr msg; + struct iovec aiov[UIO_SMALLIOV], *iov; + int error; + + error = copyin(uap->msg, (caddr_t)&msg, sizeof (struct omsghdr)); + if (error) + return (error); + if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { + if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) + return (EMSGSIZE); + MALLOC(iov, struct iovec *, + sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, + M_WAITOK); + } else + iov = aiov; + error = copyin((caddr_t)msg.msg_iov, (caddr_t)iov, + (unsigned)(msg.msg_iovlen * sizeof (struct iovec))); + if (error) + goto done; + msg.msg_flags = MSG_COMPAT; + msg.msg_iov = iov; + error = sendit(p, uap->s, &msg, uap->flags, retval); +done: + if (iov != aiov) + FREE(iov, M_IOV); + return (error); +} +#endif + +struct sendmsg_args { + int s; + caddr_t msg; + int flags; +}; + +int +sendmsg(p, uap, retval) + struct proc *p; + register struct sendmsg_args *uap; + register_t *retval; +{ + struct msghdr msg; + struct iovec aiov[UIO_SMALLIOV], *iov; + int error; + + KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_START, 0,0,0,0,0); + if (error = copyin(uap->msg, (caddr_t)&msg, sizeof (msg))) + { + KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_END, error,0,0,0,0); + return (error); + } + + if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { + if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) { + KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_END, EMSGSIZE,0,0,0,0); + return (EMSGSIZE); + } + MALLOC(iov, struct iovec *, + sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, + M_WAITOK); + } else + iov = aiov; + if (msg.msg_iovlen && + (error = copyin((caddr_t)msg.msg_iov, (caddr_t)iov, + (unsigned)(msg.msg_iovlen * sizeof (struct iovec))))) + goto done; + msg.msg_iov = iov; +#ifdef COMPAT_OLDSOCK + msg.msg_flags = 0; +#endif + error = sendit(p, uap->s, &msg, uap->flags, retval); +done: + if (iov != aiov) + FREE(iov, M_IOV); + KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_END, error,0,0,0,0); + return (error); +} + +static int +recvit(p, s, mp, namelenp, retval) + register struct proc *p; + int s; + register struct msghdr *mp; + caddr_t namelenp; + register_t *retval; +{ + struct file *fp; + struct uio auio; + register struct iovec *iov; + register int i; + int len, error; + struct mbuf *m, *control = 0; + caddr_t ctlbuf; + struct socket *so; + struct sockaddr *fromsa = 0; +#if KTRACE + struct iovec *ktriov = NULL; +#endif + + KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_START, 0,0,0,0,0); + if (error = getsock(p->p_fd, s, &fp)) + { + KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, error,0,0,0,0); + return (error); + } + + auio.uio_iov = mp->msg_iov; + auio.uio_iovcnt = mp->msg_iovlen; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_rw = UIO_READ; + auio.uio_procp = p; + auio.uio_offset = 0; /* XXX */ + auio.uio_resid = 0; + iov = mp->msg_iov; + for (i = 0; i < mp->msg_iovlen; i++, iov++) { + if ((auio.uio_resid += iov->iov_len) < 0) { + KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, EINVAL,0,0,0,0); + return (EINVAL); + } + } +#if KTRACE + if (KTRPOINT(p, KTR_GENIO)) { + int iovlen = auio.uio_iovcnt * sizeof (struct iovec); + + MALLOC(ktriov, struct iovec *, iovlen, M_TEMP, M_WAITOK); + bcopy((caddr_t)auio.uio_iov, (caddr_t)ktriov, iovlen); + } +#endif + len = auio.uio_resid; + so = (struct socket *)fp->f_data; + error = so->so_proto->pr_usrreqs->pru_soreceive(so, &fromsa, &auio, + (struct mbuf **)0, mp->msg_control ? &control : (struct mbuf **)0, + &mp->msg_flags); + if (error) { + if (auio.uio_resid != len && (error == ERESTART || + error == EINTR || error == EWOULDBLOCK)) + error = 0; + } +#if KTRACE + if (ktriov != NULL) { + if (error == 0) + ktrgenio(p->p_tracep, s, UIO_WRITE, + ktriov, len - auio.uio_resid, error); + FREE(ktriov, M_TEMP); + } +#endif + if (error) + goto out; + *retval = len - auio.uio_resid; + if (mp->msg_name) { + len = mp->msg_namelen; + if (len <= 0 || fromsa == 0) + len = 0; + else { +#ifndef MIN +#define MIN(a,b) ((a)>(b)?(b):(a)) +#endif + /* save sa_len before it is destroyed by MSG_COMPAT */ + len = MIN(len, fromsa->sa_len); +#ifdef COMPAT_OLDSOCK + if (mp->msg_flags & MSG_COMPAT) + ((struct osockaddr *)fromsa)->sa_family = + fromsa->sa_family; +#endif + error = copyout(fromsa, + (caddr_t)mp->msg_name, (unsigned)len); + if (error) + goto out; + } + mp->msg_namelen = len; + if (namelenp && + (error = copyout((caddr_t)&len, namelenp, sizeof (int)))) { +#ifdef COMPAT_OLDSOCK + if (mp->msg_flags & MSG_COMPAT) + error = 0; /* old recvfrom didn't check */ + else +#endif + goto out; + } + } + if (mp->msg_control) { +#ifdef COMPAT_OLDSOCK + /* + * We assume that old recvmsg calls won't receive access + * rights and other control info, esp. as control info + * is always optional and those options didn't exist in 4.3. + * If we receive rights, trim the cmsghdr; anything else + * is tossed. + */ + if (control && mp->msg_flags & MSG_COMPAT) { + if (mtod(control, struct cmsghdr *)->cmsg_level != + SOL_SOCKET || + mtod(control, struct cmsghdr *)->cmsg_type != + SCM_RIGHTS) { + mp->msg_controllen = 0; + goto out; + } + control->m_len -= sizeof (struct cmsghdr); + control->m_data += sizeof (struct cmsghdr); + } +#endif + len = mp->msg_controllen; + m = control; + mp->msg_controllen = 0; + ctlbuf = (caddr_t) mp->msg_control; + + while (m && len > 0) { + unsigned int tocopy; + + if (len >= m->m_len) + tocopy = m->m_len; + else { + mp->msg_flags |= MSG_CTRUNC; + tocopy = len; + } + + if (error = copyout((caddr_t)mtod(m, caddr_t), + ctlbuf, tocopy)) + goto out; + + ctlbuf += tocopy; + len -= tocopy; + m = m->m_next; + } + mp->msg_controllen = ctlbuf - mp->msg_control; + } +out: + if (fromsa) + FREE(fromsa, M_SONAME); + if (control) + m_freem(control); + KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, error,0,0,0,0); + return (error); +} + + +struct recvfrom_args { + int s; + caddr_t buf; + size_t len; + int flags; + caddr_t from; + int *fromlenaddr; +}; + +int +recvfrom(p, uap, retval) + struct proc *p; + register struct recvfrom_args /* { + int s; + caddr_t buf; + size_t len; + int flags; + caddr_t from; + int *fromlenaddr; + } */ *uap; + register_t *retval; +{ + struct msghdr msg; + struct iovec aiov; + int error; + + KERNEL_DEBUG(DBG_FNC_RECVFROM | DBG_FUNC_START, 0,0,0,0,0); + + if (uap->fromlenaddr) { + error = copyin((caddr_t)uap->fromlenaddr, + (caddr_t)&msg.msg_namelen, sizeof (msg.msg_namelen)); + if (error) + return (error); + } else + msg.msg_namelen = 0; + msg.msg_name = uap->from; + msg.msg_iov = &aiov; + msg.msg_iovlen = 1; + aiov.iov_base = uap->buf; + aiov.iov_len = uap->len; + msg.msg_control = 0; + msg.msg_flags = uap->flags; + KERNEL_DEBUG(DBG_FNC_RECVFROM | DBG_FUNC_END, error,0,0,0,0); + return (recvit(p, uap->s, &msg, (caddr_t)uap->fromlenaddr, retval)); +} + +#ifdef COMPAT_OLDSOCK +int +orecvfrom(p, uap, retval) + struct proc *p; + struct recvfrom_args *uap; + register_t *retval; +{ + + uap->flags |= MSG_COMPAT; + return (recvfrom(p, uap)); +} +#endif + + +#ifdef COMPAT_OLDSOCK +int +orecv(p, uap, retval) + struct proc *p; + register struct orecv_args { + int s; + caddr_t buf; + int len; + int flags; + } *uap; + register_t *retval; +{ + struct msghdr msg; + struct iovec aiov; + + msg.msg_name = 0; + msg.msg_namelen = 0; + msg.msg_iov = &aiov; + msg.msg_iovlen = 1; + aiov.iov_base = uap->buf; + aiov.iov_len = uap->len; + msg.msg_control = 0; + msg.msg_flags = uap->flags; + return (recvit(p, uap->s, &msg, (caddr_t)0, retval)); +} + +/* + * Old recvmsg. This code takes advantage of the fact that the old msghdr + * overlays the new one, missing only the flags, and with the (old) access + * rights where the control fields are now. + */ +int +orecvmsg(p, uap, retval) + struct proc *p; + register struct orecvmsg_args { + int s; + struct omsghdr *msg; + int flags; + } *uap; + register_t *retval; +{ + struct msghdr msg; + struct iovec aiov[UIO_SMALLIOV], *iov; + int error; + + error = copyin((caddr_t)uap->msg, (caddr_t)&msg, + sizeof (struct omsghdr)); + if (error) + return (error); + if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { + if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) + return (EMSGSIZE); + MALLOC(iov, struct iovec *, + sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, + M_WAITOK); + } else + iov = aiov; + msg.msg_flags = uap->flags | MSG_COMPAT; + error = copyin((caddr_t)msg.msg_iov, (caddr_t)iov, + (unsigned)(msg.msg_iovlen * sizeof (struct iovec))); + if (error) + goto done; + msg.msg_iov = iov; + error = recvit(p, uap->s, &msg, (caddr_t)&uap->msg->msg_namelen, retval); + + if (msg.msg_controllen && error == 0) + error = copyout((caddr_t)&msg.msg_controllen, + (caddr_t)&uap->msg->msg_accrightslen, sizeof (int)); +done: + if (iov != aiov) + FREE(iov, M_IOV); + return (error); +} +#endif + +int +recvmsg(p, uap, retval) + struct proc *p; + register struct recvmsg_args { + int s; + struct msghdr *msg; + int flags; + } *uap; + register_t *retval; +{ + struct msghdr msg; + struct iovec aiov[UIO_SMALLIOV], *uiov, *iov; + register int error; + + KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_START, 0,0,0,0,0); + if (error = copyin((caddr_t)uap->msg, (caddr_t)&msg, + sizeof (msg))) + { + KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_END, error,0,0,0,0); + return (error); + } + + if ((u_int)msg.msg_iovlen >= UIO_SMALLIOV) { + if ((u_int)msg.msg_iovlen >= UIO_MAXIOV) { + KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_END, EMSGSIZE,0,0,0,0); + return (EMSGSIZE); + } + MALLOC(iov, struct iovec *, + sizeof(struct iovec) * (u_int)msg.msg_iovlen, M_IOV, + M_WAITOK); + } else + iov = aiov; +#ifdef COMPAT_OLDSOCK + msg.msg_flags = uap->flags &~ MSG_COMPAT; +#else + msg.msg_flags = uap->flags; +#endif + uiov = msg.msg_iov; + msg.msg_iov = iov; + error = copyin((caddr_t)uiov, (caddr_t)iov, + (unsigned)(msg.msg_iovlen * sizeof (struct iovec))); + if (error) + goto done; + error = recvit(p, uap->s, &msg, (caddr_t)0, retval); + if (!error) { + msg.msg_iov = uiov; + error = copyout((caddr_t)&msg, (caddr_t)uap->msg, sizeof(msg)); + } +done: + if (iov != aiov) + FREE(iov, M_IOV); + KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_END, error,0,0,0,0); + return (error); +} + +/* ARGSUSED */ +int +shutdown(p, uap, retval) + struct proc *p; + register struct shutdown_args { + int s; + int how; + } *uap; + register_t *retval; +{ + struct file *fp; + int error; + + error = getsock(p->p_fd, uap->s, &fp); + if (error) + return (error); + return (soshutdown((struct socket *)fp->f_data, uap->how)); +} + + + + + +/* ARGSUSED */ +int +setsockopt(p, uap, retval) + struct proc *p; + register struct setsockopt_args { + int s; + int level; + int name; + caddr_t val; + int valsize; + } *uap; + register_t *retval; +{ + struct file *fp; + struct sockopt sopt; + int error; + + if (uap->val == 0 && uap->valsize != 0) + return (EFAULT); + if (uap->valsize < 0) + return (EINVAL); + + error = getsock(p->p_fd, uap->s, &fp); + if (error) + return (error); + + sopt.sopt_dir = SOPT_SET; + sopt.sopt_level = uap->level; + sopt.sopt_name = uap->name; + sopt.sopt_val = uap->val; + sopt.sopt_valsize = uap->valsize; + sopt.sopt_p = p; + + return (sosetopt((struct socket *)fp->f_data, &sopt)); +} + + + +int +getsockopt(p, uap, retval) + struct proc *p; + struct getsockopt_args *uap; + register_t *retval; +{ + int valsize, error; + struct file *fp; + struct sockopt sopt; + + error = getsock(p->p_fd, uap->s, &fp); + if (error) + return (error); + if (uap->val) { + error = copyin((caddr_t)uap->avalsize, (caddr_t)&valsize, + sizeof (valsize)); + if (error) + return (error); + if (valsize < 0) + return (EINVAL); + } else + valsize = 0; + + sopt.sopt_dir = SOPT_GET; + sopt.sopt_level = uap->level; + sopt.sopt_name = uap->name; + sopt.sopt_val = uap->val; + sopt.sopt_valsize = (size_t)valsize; /* checked non-negative above */ + sopt.sopt_p = p; + + error = sogetopt((struct socket *)fp->f_data, &sopt); + if (error == 0) { + valsize = sopt.sopt_valsize; + error = copyout((caddr_t)&valsize, + (caddr_t)uap->avalsize, sizeof (valsize)); + } + return (error); +} + + + +struct pipe_args { + int dummy; +}; +/* ARGSUSED */ +int +pipe(p, uap, retval) + struct proc *p; + struct pipe_args *uap; + register_t *retval; +{ + struct file *rf, *wf; + struct socket *rso, *wso; + int fd, error; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if (error = socreate(AF_UNIX, &rso, SOCK_STREAM, 0)) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + if (error = socreate(AF_UNIX, &wso, SOCK_STREAM, 0)) { + goto free1; + } + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + error = falloc(p, &rf, &fd); + if (error) + goto free2; + retval[0] = fd; + rf->f_flag = FREAD; + rf->f_type = DTYPE_SOCKET; + rf->f_ops = &socketops; + rf->f_data = (caddr_t)rso; + if (error = falloc(p, &wf, &fd)) + goto free3; + wf->f_flag = FWRITE; + wf->f_type = DTYPE_SOCKET; + wf->f_ops = &socketops; + wf->f_data = (caddr_t)wso; + retval[1] = fd; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = unp_connect2(wso, rso); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + if (error) + goto free4; + *fdflags(p, retval[0]) &= ~UF_RESERVED; + *fdflags(p, retval[1]) &= ~UF_RESERVED; + return (0); +free4: + fdrelse(p, retval[1]); + ffree(wf); +free3: + fdrelse(p, retval[0]); + ffree(rf); +free2: + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + (void)soclose(wso); +free1: + (void)soclose(rso); + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); +} + + +/* + * Get socket name. + */ +/* ARGSUSED */ +static int +getsockname1(p, uap, retval, compat) + struct proc *p; + register struct getsockname_args *uap; + register_t *retval; + int compat; +{ + struct file *fp; + register struct socket *so; + struct sockaddr *sa; + u_int len; + int error; + + error = getsock(p->p_fd, uap->fdes, &fp); + if (error) + return (error); + error = copyin((caddr_t)uap->alen, (caddr_t)&len, sizeof (len)); + if (error) + return (error); + so = (struct socket *)fp->f_data; + sa = 0; + error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &sa); + if (error) + goto bad; + if (sa == 0) { + len = 0; + goto gotnothing; + } + + len = MIN(len, sa->sa_len); +#ifdef COMPAT_OLDSOCK + if (compat) + ((struct osockaddr *)sa)->sa_family = sa->sa_family; +#endif + error = copyout(sa, (caddr_t)uap->asa, (u_int)len); + if (error == 0) +gotnothing: + error = copyout((caddr_t)&len, (caddr_t)uap->alen, + sizeof (len)); +bad: + if (sa) + FREE(sa, M_SONAME); + return (error); +} + +int +getsockname(p, uap, retval) + struct proc *p; + struct getsockname_args *uap; + register_t *retval; +{ + + return (getsockname1(p, uap, retval, 0)); +} + +#ifdef COMPAT_OLDSOCK +int +ogetsockname(p, uap, retval) + struct proc *p; + struct getsockname_args *uap; + register_t *retval; +{ + + return (getsockname1(p, uap, retval, 1)); +} +#endif /* COMPAT_OLDSOCK */ + +/* + * Get name of peer for connected socket. + */ +/* ARGSUSED */ +int +getpeername1(p, uap, retval, compat) + struct proc *p; + register struct getpeername_args *uap; + register_t *retval; + int compat; +{ + struct file *fp; + register struct socket *so; + struct sockaddr *sa; + u_int len; + int error; + + error = getsock(p->p_fd, uap->fdes, &fp); + if (error) + return (error); + so = (struct socket *)fp->f_data; + if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) + return (ENOTCONN); + error = copyin((caddr_t)uap->alen, (caddr_t)&len, sizeof (len)); + if (error) + return (error); + sa = 0; + error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, &sa); + if (error) + goto bad; + if (sa == 0) { + len = 0; + goto gotnothing; + } + len = MIN(len, sa->sa_len); +#ifdef COMPAT_OLDSOCK + if (compat) + ((struct osockaddr *)sa)->sa_family = + sa->sa_family; +#endif + error = copyout(sa, (caddr_t)uap->asa, (u_int)len); + if (error) + goto bad; +gotnothing: + error = copyout((caddr_t)&len, (caddr_t)uap->alen, sizeof (len)); +bad: + if (sa) FREE(sa, M_SONAME); + return (error); +} + +int +getpeername(p, uap, retval) + struct proc *p; + struct getpeername_args *uap; + register_t *retval; +{ + + return (getpeername1(p, uap, retval, 0)); +} + +#ifdef COMPAT_OLDSOCK +int +ogetpeername(p, uap, retval) + struct proc *p; + struct ogetpeername_args *uap; + register_t *retval; +{ + + /* XXX uap should have type `getpeername_args *' to begin with. */ + return (getpeername1(p, (struct getpeername_args *)uap, retval, 1)); +} +#endif /* COMPAT_OLDSOCK */ + +int +sockargs(mp, buf, buflen, type) + struct mbuf **mp; + caddr_t buf; + int buflen, type; +{ + register struct sockaddr *sa; + register struct mbuf *m; + int error; + + if ((u_int)buflen > MLEN) { +#ifdef COMPAT_OLDSOCK + if (type == MT_SONAME && (u_int)buflen <= 112) + buflen = MLEN; /* unix domain compat. hack */ + else +#endif + return (EINVAL); + } + m = m_get(M_WAIT, type); + if (m == NULL) + return (ENOBUFS); + m->m_len = buflen; + error = copyin(buf, mtod(m, caddr_t), (u_int)buflen); + if (error) + (void) m_free(m); + else { + *mp = m; + if (type == MT_SONAME) { + sa = mtod(m, struct sockaddr *); + +#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN + if (sa->sa_family == 0 && sa->sa_len < AF_MAX) + sa->sa_family = sa->sa_len; +#endif + sa->sa_len = buflen; + } + } + return (error); +} + +int +getsockaddr(namp, uaddr, len) + struct sockaddr **namp; + caddr_t uaddr; + size_t len; +{ + struct sockaddr *sa; + int error; + + if (len > SOCK_MAXADDRLEN) + return ENAMETOOLONG; + + if (len == 0) + return EINVAL; + + MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK); + error = copyin(uaddr, sa, len); + if (error) { + FREE(sa, M_SONAME); + } else { +#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN + if (sa->sa_family == 0 && sa->sa_len < AF_MAX) + sa->sa_family = sa->sa_len; +#endif + sa->sa_len = len; + *namp = sa; + } + return error; +} + +int +getsock(fdp, fdes, fpp) + struct filedesc *fdp; + int fdes; + struct file **fpp; +{ + register struct file *fp; + + if ((unsigned)fdes >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fdes]) == NULL || + (fdp->fd_ofileflags[fdes] & UF_RESERVED)) + return (EBADF); + if (fp->f_type != DTYPE_SOCKET) + return (ENOTSOCK); + *fpp = fp; + return (0); +} + +#if SENDFILE +/* + * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-)) + * XXX - The sf_buf functions are currently private to sendfile(2), so have + * been made static, but may be useful in the future for doing zero-copy in + * other parts of the networking code. + */ +static void +sf_buf_init(void *arg) +{ + int i; + + SLIST_INIT(&sf_freelist); + sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE); + sf_bufs = _MALLOC(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT); + bzero(sf_bufs, nsfbufs * sizeof(struct sf_buf)); + for (i = 0; i < nsfbufs; i++) { + sf_bufs[i].kva = sf_base + i * PAGE_SIZE; + SLIST_INSERT_HEAD(&sf_freelist, &sf_bufs[i], free_list); + } +} + +/* + * Get an sf_buf from the freelist. Will block if none are available. + */ +static struct sf_buf * +sf_buf_alloc() +{ + struct sf_buf *sf; + int s; + + s = splimp(); + while ((sf = SLIST_FIRST(&sf_freelist)) == NULL) { + sf_buf_alloc_want = 1; + tsleep(&sf_freelist, PVM, "sfbufa", 0); + } + SLIST_REMOVE_HEAD(&sf_freelist, free_list); + splx(s); + sf->refcnt = 1; + return (sf); +} + +#define dtosf(x) (&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT]) +static void +sf_buf_ref(caddr_t addr, u_int size) +{ + struct sf_buf *sf; + + sf = dtosf(addr); + if (sf->refcnt == 0) + panic("sf_buf_ref: referencing a free sf_buf"); + sf->refcnt++; +} + +/* + * Lose a reference to an sf_buf. When none left, detach mapped page + * and release resources back to the system. + * + * Must be called at splimp. + */ +static void +sf_buf_free(caddr_t addr, u_int size) +{ + struct sf_buf *sf; + struct vm_page *m; + int s; + + sf = dtosf(addr); + if (sf->refcnt == 0) + panic("sf_buf_free: freeing free sf_buf"); + sf->refcnt--; + if (sf->refcnt == 0) { + pmap_qremove((vm_offset_t)addr, 1); + m = sf->m; + s = splvm(); + vm_page_unwire(m, 0); + /* + * Check for the object going away on us. This can + * happen since we don't hold a reference to it. + * If so, we're responsible for freeing the page. + */ + if (m->wire_count == 0 && m->object == NULL) + vm_page_lock_queues(); + vm_page_free(m); + vm_page_unlock_queues(); + splx(s); + sf->m = NULL; + SLIST_INSERT_HEAD(&sf_freelist, sf, free_list); + if (sf_buf_alloc_want) { + sf_buf_alloc_want = 0; + wakeup(&sf_freelist); + } + } +} + +/* + * sendfile(2). + * int sendfile(int fd, int s, off_t offset, size_t nbytes, + * struct sf_hdtr *hdtr, off_t *sbytes, int flags) + * + * Send a file specified by 'fd' and starting at 'offset' to a socket + * specified by 's'. Send only 'nbytes' of the file or until EOF if + * nbytes == 0. Optionally add a header and/or trailer to the socket + * output. If specified, write the total number of bytes sent into *sbytes. + */ +int +sendfile(struct proc *p, struct sendfile_args *uap) +{ + struct file *fp; + struct filedesc *fdp = p->p_fd; + struct vnode *vp; + struct vm_object *obj; + struct socket *so; + struct mbuf *m; + struct sf_buf *sf; + struct vm_page *pg; + struct writev_args nuap; + struct sf_hdtr hdtr; + off_t off, xfsize, sbytes = 0; + int error = 0, s; + + /* + * Do argument checking. Must be a regular file in, stream + * type and connected socket out, positive offset. + */ + if (((u_int)uap->fd) >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[uap->fd]) == NULL || + (fp->f_flag & FREAD) == 0) { + error = EBADF; + goto done; + } + if (fp->f_type != DTYPE_VNODE) { + error = EINVAL; + goto done; + } + vp = (struct vnode *)fp->f_data; + obj = vp->v_object; + if (vp->v_type != VREG || obj == NULL) { + error = EINVAL; + goto done; + } + error = getsock(p->p_fd, uap->s, &fp); + if (error) + goto done; + so = (struct socket *)fp->f_data; + if (so->so_type != SOCK_STREAM) { + error = EINVAL; + goto done; + } + if ((so->so_state & SS_ISCONNECTED) == 0) { + error = ENOTCONN; + goto done; + } + if (uap->offset < 0) { + error = EINVAL; + goto done; + } + + /* + * If specified, get the pointer to the sf_hdtr struct for + * any headers/trailers. + */ + if (uap->hdtr != NULL) { + error = copyin(uap->hdtr, &hdtr, sizeof(hdtr)); + if (error) + goto done; + /* + * Send any headers. Wimp out and use writev(2). + */ + if (hdtr.headers != NULL) { + nuap.fd = uap->s; + nuap.iovp = hdtr.headers; + nuap.iovcnt = hdtr.hdr_cnt; + error = writev(p, &nuap); + if (error) + goto done; + sbytes += p->p_retval[0]; + } + } + + /* + * Protect against multiple writers to the socket. + */ + (void) sblock(&so->so_snd, M_WAIT); + + /* + * Loop through the pages in the file, starting with the requested + * offset. Get a file page (do I/O if necessary), map the file page + * into an sf_buf, attach an mbuf header to the sf_buf, and queue + * it on the socket. + */ + for (off = uap->offset; ; off += xfsize, sbytes += xfsize) { + vm_object_offset_t pindex; + vm_object_offset_t pgoff; + + pindex = OFF_TO_IDX(off); +retry_lookup: + /* + * Calculate the amount to transfer. Not to exceed a page, + * the EOF, or the passed in nbytes. + */ + xfsize = obj->un_pager.vnp.vnp_size - off; + if (xfsize > PAGE_SIZE_64) + xfsize = PAGE_SIZE; + pgoff = (vm_object_offset_t)(off & PAGE_MASK_64); + if (PAGE_SIZE - pgoff < xfsize) + xfsize = PAGE_SIZE_64 - pgoff; + if (uap->nbytes && xfsize > (uap->nbytes - sbytes)) + xfsize = uap->nbytes - sbytes; + if (xfsize <= 0) + break; + /* + * Optimize the non-blocking case by looking at the socket space + * before going to the extra work of constituting the sf_buf. + */ + if ((so->so_state & SS_NBIO) && sbspace(&so->so_snd) <= 0) { + if (so->so_state & SS_CANTSENDMORE) + error = EPIPE; + else + error = EAGAIN; + sbunlock(&so->so_snd); + goto done; + } + /* + * Attempt to look up the page. If the page doesn't exist or the + * part we're interested in isn't valid, then read it from disk. + * If some other part of the kernel has this page (i.e. it's busy), + * then disk I/O may be occuring on it, so wait and retry. + */ + pg = vm_page_lookup(obj, pindex); + if (pg == NULL || (!(pg->flags & PG_BUSY) && !pg->busy && + !vm_page_is_valid(pg, pgoff, xfsize))) { + struct uio auio; + struct iovec aiov; + int bsize; + + if (pg == NULL) { + pg = vm_page_alloc(obj, pindex, VM_ALLOC_NORMAL); + if (pg == NULL) { + VM_WAIT; + goto retry_lookup; + } + /* + * don't just clear PG_BUSY manually - + * vm_page_alloc() should be considered opaque, + * use the VM routine provided to clear + * PG_BUSY. + */ + vm_page_wakeup(pg); + + } + /* + * Ensure that our page is still around when the I/O completes. + */ + vm_page_io_start(pg); + vm_page_wire(pg); + /* + * Get the page from backing store. + */ + bsize = vp->v_mount->mnt_stat.f_iosize; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + aiov.iov_base = 0; + aiov.iov_len = MAXBSIZE; + auio.uio_resid = MAXBSIZE; + auio.uio_offset = trunc_page(off); + auio.uio_segflg = UIO_NOCOPY; + auio.uio_rw = UIO_READ; + auio.uio_procp = p; + vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, p); + error = VOP_READ(vp, &auio, IO_VMIO | ((MAXBSIZE / bsize) << 16), + p->p_ucred); + VOP_UNLOCK(vp, 0, p); + vm_page_flag_clear(pg, PG_ZERO); + vm_page_io_finish(pg); + if (error) { + vm_page_unwire(pg, 0); + /* + * See if anyone else might know about this page. + * If not and it is not valid, then free it. + */ + if (pg->wire_count == 0 && pg->valid == 0 && + pg->busy == 0 && !(pg->flags & PG_BUSY) && + pg->hold_count == 0) + vm_page_lock_queues(); + vm_page_free(pg); + vm_page_unlock_queues(); + sbunlock(&so->so_snd); + goto done; + } + } else { + if ((pg->flags & PG_BUSY) || pg->busy) { + s = splvm(); + if ((pg->flags & PG_BUSY) || pg->busy) { + /* + * Page is busy. Wait and retry. + */ + vm_page_flag_set(pg, PG_WANTED); + tsleep(pg, PVM, "sfpbsy", 0); + splx(s); + goto retry_lookup; + } + splx(s); + } + /* + * Protect from having the page ripped out from beneath us. + */ + vm_page_wire(pg); + } + /* + * Allocate a kernel virtual page and insert the physical page + * into it. + */ + sf = sf_buf_alloc(); + sf->m = pg; + pmap_qenter(sf->kva, &pg, 1); + /* + * Get an mbuf header and set it up as having external storage. + */ + MGETHDR(m, M_WAIT, MT_DATA); + m->m_ext.ext_free = sf_buf_free; + m->m_ext.ext_ref = sf_buf_ref; + m->m_ext.ext_buf = (void *)sf->kva; + m->m_ext.ext_size = PAGE_SIZE; + m->m_data = (char *) sf->kva + pgoff; + m->m_flags |= M_EXT; + m->m_pkthdr.len = m->m_len = xfsize; + /* + * Add the buffer to the socket buffer chain. + */ + s = splnet(); +retry_space: + /* + * Make sure that the socket is still able to take more data. + * CANTSENDMORE being true usually means that the connection + * was closed. so_error is true when an error was sensed after + * a previous send. + * The state is checked after the page mapping and buffer + * allocation above since those operations may block and make + * any socket checks stale. From this point forward, nothing + * blocks before the pru_send (or more accurately, any blocking + * results in a loop back to here to re-check). + */ + if ((so->so_state & SS_CANTSENDMORE) || so->so_error) { + if (so->so_state & SS_CANTSENDMORE) { + error = EPIPE; + } else { + error = so->so_error; + so->so_error = 0; + } + m_freem(m); + sbunlock(&so->so_snd); + splx(s); + goto done; + } + /* + * Wait for socket space to become available. We do this just + * after checking the connection state above in order to avoid + * a race condition with sbwait(). + */ + if (sbspace(&so->so_snd) < so->so_snd.sb_lowat) { + if (so->so_state & SS_NBIO) { + m_freem(m); + sbunlock(&so->so_snd); + splx(s); + error = EAGAIN; + goto done; + } + error = sbwait(&so->so_snd); + /* + * An error from sbwait usually indicates that we've + * been interrupted by a signal. If we've sent anything + * then return bytes sent, otherwise return the error. + */ + if (error) { + m_freem(m); + sbunlock(&so->so_snd); + splx(s); + goto done; + } + goto retry_space; + } + error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, m, 0, 0, p); + splx(s); + if (error) { + sbunlock(&so->so_snd); + goto done; + } + } + sbunlock(&so->so_snd); + + /* + * Send trailers. Wimp out and use writev(2). + */ + if (uap->hdtr != NULL && hdtr.trailers != NULL) { + nuap.fd = uap->s; + nuap.iovp = hdtr.trailers; + nuap.iovcnt = hdtr.trl_cnt; + error = writev(p, &nuap); + if (error) + goto done; + sbytes += p->p_retval[0]; + } + +done: + if (uap->sbytes != NULL) { + copyout(&sbytes, uap->sbytes, sizeof(off_t)); + } + return (error); +} + +#endif diff --git a/bsd/kern/uipc_usrreq.c b/bsd/kern/uipc_usrreq.c new file mode 100644 index 000000000..480bc28f8 --- /dev/null +++ b/bsd/kern/uipc_usrreq.c @@ -0,0 +1,1231 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * From: @(#)uipc_usrreq.c 8.3 (Berkeley) 1/4/94 + */ + +#include +#include +#include +#include +#include +#include /* XXX must be before */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct zone *unp_zone; +static unp_gen_t unp_gencnt; +static u_int unp_count; + +static struct unp_head unp_shead, unp_dhead; + +/* + * Unix communications domain. + * + * TODO: + * SEQPACKET, RDM + * rethink name space problems + * need a proper out-of-band + * lock pushdown + */ +static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL }; +static ino_t unp_ino; /* prototype for fake inode numbers */ + +static int unp_attach __P((struct socket *)); +static void unp_detach __P((struct unpcb *)); +static int unp_bind __P((struct unpcb *,struct sockaddr *, struct proc *)); +static int unp_connect __P((struct socket *,struct sockaddr *, + struct proc *)); +static void unp_disconnect __P((struct unpcb *)); +static void unp_shutdown __P((struct unpcb *)); +static void unp_drop __P((struct unpcb *, int)); +static void unp_gc __P((void)); +static void unp_scan __P((struct mbuf *, void (*)(struct file *))); +static void unp_mark __P((struct file *)); +static void unp_discard __P((struct file *)); +static int unp_internalize __P((struct mbuf *, struct proc *)); + +static int +uipc_abort(struct socket *so) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + unp_drop(unp, ECONNABORTED); + return 0; +} + +static int +uipc_accept(struct socket *so, struct sockaddr **nam) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + + /* + * Pass back name of connected socket, + * if it was bound and we are still connected + * (our peer may have closed already!). + */ + if (unp->unp_conn && unp->unp_conn->unp_addr) { + *nam = dup_sockaddr((struct sockaddr *)unp->unp_conn->unp_addr, + 1); + } else { + *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1); + } + return 0; +} + +static int +uipc_attach(struct socket *so, int proto, struct proc *p) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp != 0) + return EISCONN; + return unp_attach(so); +} + +static int +uipc_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + + return unp_bind(unp, nam, p); +} + +static int +uipc_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + return unp_connect(so, nam, p); +} + +static int +uipc_connect2(struct socket *so1, struct socket *so2) +{ + struct unpcb *unp = sotounpcb(so1); + + if (unp == 0) + return EINVAL; + + return unp_connect2(so1, so2); +} + +/* control is EOPNOTSUPP */ + +static int +uipc_detach(struct socket *so) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + + unp_detach(unp); + return 0; +} + +static int +uipc_disconnect(struct socket *so) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + unp_disconnect(unp); + return 0; +} + +static int +uipc_listen(struct socket *so, struct proc *p) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0 || unp->unp_vnode == 0) + return EINVAL; + return 0; +} + +static int +uipc_peeraddr(struct socket *so, struct sockaddr **nam) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + if (unp->unp_conn && unp->unp_conn->unp_addr) + *nam = dup_sockaddr((struct sockaddr *)unp->unp_conn->unp_addr, + 1); + return 0; +} + +static int +uipc_rcvd(struct socket *so, int flags) +{ + struct unpcb *unp = sotounpcb(so); + struct socket *so2; + + if (unp == 0) + return EINVAL; + switch (so->so_type) { + case SOCK_DGRAM: + panic("uipc_rcvd DGRAM?"); + /*NOTREACHED*/ + + case SOCK_STREAM: +#define rcv (&so->so_rcv) +#define snd (&so2->so_snd) + if (unp->unp_conn == 0) + break; + so2 = unp->unp_conn->unp_socket; + /* + * Adjust backpressure on sender + * and wakeup any waiting to write. + */ + snd->sb_mbmax += unp->unp_mbcnt - rcv->sb_mbcnt; + unp->unp_mbcnt = rcv->sb_mbcnt; + snd->sb_hiwat += unp->unp_cc - rcv->sb_cc; + unp->unp_cc = rcv->sb_cc; + sowwakeup(so2); +#undef snd +#undef rcv + break; + + default: + panic("uipc_rcvd unknown socktype"); + } + return 0; +} + +/* pru_rcvoob is EOPNOTSUPP */ + +static int +uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, + struct mbuf *control, struct proc *p) +{ + int error = 0; + struct unpcb *unp = sotounpcb(so); + struct socket *so2; + + if (unp == 0) { + error = EINVAL; + goto release; + } + if (flags & PRUS_OOB) { + error = EOPNOTSUPP; + goto release; + } + + if (control && (error = unp_internalize(control, p))) + goto release; + + switch (so->so_type) { + case SOCK_DGRAM: + { + struct sockaddr *from; + + if (nam) { + if (unp->unp_conn) { + error = EISCONN; + break; + } + error = unp_connect(so, nam, p); + if (error) + break; + } else { + if (unp->unp_conn == 0) { + error = ENOTCONN; + break; + } + } + so2 = unp->unp_conn->unp_socket; + if (unp->unp_addr) + from = (struct sockaddr *)unp->unp_addr; + else + from = &sun_noname; + if (sbappendaddr(&so2->so_rcv, from, m, control)) { + sorwakeup(so2); + m = 0; + control = 0; + } else + error = ENOBUFS; + if (nam) + unp_disconnect(unp); + break; + } + + case SOCK_STREAM: +#define rcv (&so2->so_rcv) +#define snd (&so->so_snd) + /* Connect if not connected yet. */ + /* + * Note: A better implementation would complain + * if not equal to the peer's address. + */ + if ((so->so_state & SS_ISCONNECTED) == 0) { + if (nam) { + error = unp_connect(so, nam, p); + if (error) + break; /* XXX */ + } else { + error = ENOTCONN; + break; + } + } + + if (so->so_state & SS_CANTSENDMORE) { + error = EPIPE; + break; + } + if (unp->unp_conn == 0) + panic("uipc_send connected but no connection?"); + so2 = unp->unp_conn->unp_socket; + /* + * Send to paired receive port, and then reduce + * send buffer hiwater marks to maintain backpressure. + * Wake up readers. + */ + if (control) { + if (sbappendcontrol(rcv, m, control)) + control = 0; + } else + sbappend(rcv, m); + snd->sb_mbmax -= + rcv->sb_mbcnt - unp->unp_conn->unp_mbcnt; + unp->unp_conn->unp_mbcnt = rcv->sb_mbcnt; + snd->sb_hiwat -= rcv->sb_cc - unp->unp_conn->unp_cc; + unp->unp_conn->unp_cc = rcv->sb_cc; + sorwakeup(so2); + m = 0; +#undef snd +#undef rcv + break; + + default: + panic("uipc_send unknown socktype"); + } + + /* + * SEND_EOF is equivalent to a SEND followed by + * a SHUTDOWN. + */ + if (flags & PRUS_EOF) { + socantsendmore(so); + unp_shutdown(unp); + } + +release: + if (control) + m_freem(control); + if (m) + m_freem(m); + return error; +} + +static int +uipc_sense(struct socket *so, struct stat *sb) +{ + struct unpcb *unp = sotounpcb(so); + struct socket *so2; + + if (unp == 0) + return EINVAL; + sb->st_blksize = so->so_snd.sb_hiwat; + if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) { + so2 = unp->unp_conn->unp_socket; + sb->st_blksize += so2->so_rcv.sb_cc; + } + sb->st_dev = NODEV; + if (unp->unp_ino == 0) + unp->unp_ino = unp_ino++; + sb->st_ino = unp->unp_ino; + return (0); +} + +static int +uipc_shutdown(struct socket *so) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + socantsendmore(so); + unp_shutdown(unp); + return 0; +} + +static int +uipc_sockaddr(struct socket *so, struct sockaddr **nam) +{ + struct unpcb *unp = sotounpcb(so); + + if (unp == 0) + return EINVAL; + if (unp->unp_addr) + *nam = dup_sockaddr((struct sockaddr *)unp->unp_addr, 1); + return 0; +} + +struct pr_usrreqs uipc_usrreqs = { + uipc_abort, uipc_accept, uipc_attach, uipc_bind, uipc_connect, + uipc_connect2, pru_control_notsupp, uipc_detach, uipc_disconnect, + uipc_listen, uipc_peeraddr, uipc_rcvd, pru_rcvoob_notsupp, + uipc_send, uipc_sense, uipc_shutdown, uipc_sockaddr, + sosend, soreceive, sopoll +}; + +/* + * Both send and receive buffers are allocated PIPSIZ bytes of buffering + * for stream sockets, although the total for sender and receiver is + * actually only PIPSIZ. + * Datagram sockets really use the sendspace as the maximum datagram size, + * and don't really want to reserve the sendspace. Their recvspace should + * be large enough for at least one max-size datagram plus address. + */ +#ifndef PIPSIZ +#define PIPSIZ 8192 +#endif +static u_long unpst_sendspace = PIPSIZ; +static u_long unpst_recvspace = PIPSIZ; +static u_long unpdg_sendspace = 2*1024; /* really max datagram size */ +static u_long unpdg_recvspace = 4*1024; + +static int unp_rights; /* file descriptors in flight */ + +SYSCTL_DECL(_net_local_stream); +SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW, + &unpst_sendspace, 0, ""); +SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW, + &unpst_recvspace, 0, ""); +SYSCTL_DECL(_net_local_dgram); +SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW, + &unpdg_sendspace, 0, ""); +SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW, + &unpdg_recvspace, 0, ""); +SYSCTL_DECL(_net_local); +SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, ""); + +static int +unp_attach(so) + struct socket *so; +{ + register struct unpcb *unp; + int error; + + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { + switch (so->so_type) { + + case SOCK_STREAM: + error = soreserve(so, unpst_sendspace, unpst_recvspace); + break; + + case SOCK_DGRAM: + error = soreserve(so, unpdg_sendspace, unpdg_recvspace); + break; + + default: + panic("unp_attach"); + } + if (error) + return (error); + } + unp = zalloc(unp_zone); + if (unp == NULL) + return (ENOBUFS); + bzero(unp, sizeof *unp); + unp->unp_gencnt = ++unp_gencnt; + unp_count++; + LIST_INIT(&unp->unp_refs); + unp->unp_socket = so; + LIST_INSERT_HEAD(so->so_type == SOCK_DGRAM ? &unp_dhead + : &unp_shead, unp, unp_link); + so->so_pcb = (caddr_t)unp; + return (0); +} + +static void +unp_detach(unp) + register struct unpcb *unp; +{ + LIST_REMOVE(unp, unp_link); + unp->unp_gencnt = ++unp_gencnt; + --unp_count; + if (unp->unp_vnode) { + unp->unp_vnode->v_socket = 0; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + vrele(unp->unp_vnode); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + unp->unp_vnode = 0; + } + if (unp->unp_conn) + unp_disconnect(unp); + while (unp->unp_refs.lh_first) + unp_drop(unp->unp_refs.lh_first, ECONNRESET); + soisdisconnected(unp->unp_socket); + unp->unp_socket->so_pcb = 0; + if (unp_rights) { + /* + * Normally the receive buffer is flushed later, + * in sofree, but if our receive buffer holds references + * to descriptors that are now garbage, we will dispose + * of those descriptor references after the garbage collector + * gets them (resulting in a "panic: closef: count < 0"). + */ + sorflush(unp->unp_socket); + unp_gc(); + } + if (unp->unp_addr) + FREE(unp->unp_addr, M_SONAME); + zfree(unp_zone, unp); +} + +static int +unp_bind(unp, nam, p) + struct unpcb *unp; + struct sockaddr *nam; + struct proc *p; +{ + struct sockaddr_un *soun = (struct sockaddr_un *)nam; + register struct vnode *vp; + struct vattr vattr; + int error, namelen; + struct nameidata nd; + char buf[SOCK_MAXADDRLEN]; + + if (unp->unp_vnode != NULL) + return (EINVAL); +#define offsetof(s, e) ((char *)&((s *)0)->e - (char *)((s *)0)) + namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); + if (namelen <= 0) + return EINVAL; + strncpy(buf, soun->sun_path, namelen); + buf[namelen] = 0; /* null-terminate the string */ + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + NDINIT(&nd, CREATE, FOLLOW | LOCKPARENT, UIO_SYSSPACE, + buf, p); +/* SHOULD BE ABLE TO ADOPT EXISTING AND wakeup() ALA FIFO's */ + error = namei(&nd); + if (error) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (error); + } + vp = nd.ni_vp; + if (vp != NULL) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(vp); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (EADDRINUSE); + } + VATTR_NULL(&vattr); + vattr.va_type = VSOCK; + vattr.va_mode = (ACCESSPERMS & ~p->p_fd->fd_cmask); + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); +#if 0 + /* In FreeBSD create leave s parent held ; not here */ + vput(nd.ni_dvp); +#endif + if (error) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (error); + } + vp = nd.ni_vp; + vp->v_socket = unp->unp_socket; + unp->unp_vnode = vp; + unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam, 1); + VOP_UNLOCK(vp, 0, p); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (0); +} + +static int +unp_connect(so, nam, p) + struct socket *so; + struct sockaddr *nam; + struct proc *p; +{ + register struct sockaddr_un *soun = (struct sockaddr_un *)nam; + register struct vnode *vp; + register struct socket *so2, *so3; + struct unpcb *unp2, *unp3; + int error, len; + struct nameidata nd; + char buf[SOCK_MAXADDRLEN]; + + len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); + if (len <= 0) + return EINVAL; + strncpy(buf, soun->sun_path, len); + buf[len] = 0; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, buf, p); + error = namei(&nd); + if (error) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (error); + } + vp = nd.ni_vp; + if (vp->v_type != VSOCK) { + error = ENOTSOCK; + goto bad; + } + error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p); + if (error) + goto bad; + so2 = vp->v_socket; + if (so2 == 0) { + error = ECONNREFUSED; + goto bad; + } + if (so->so_type != so2->so_type) { + error = EPROTOTYPE; + goto bad; + } + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if (so->so_proto->pr_flags & PR_CONNREQUIRED) { + if ((so2->so_options & SO_ACCEPTCONN) == 0 || + (so3 = sonewconn(so2, 0)) == 0) { + error = ECONNREFUSED; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + unp2 = sotounpcb(so2); + unp3 = sotounpcb(so3); + if (unp2->unp_addr) + unp3->unp_addr = (struct sockaddr_un *) + dup_sockaddr((struct sockaddr *) + unp2->unp_addr, 1); + so2 = so3; + } + error = unp_connect2(so, so2); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); +bad: + vput(vp); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (error); +} + +int +unp_connect2(so, so2) + register struct socket *so; + register struct socket *so2; +{ + register struct unpcb *unp = sotounpcb(so); + register struct unpcb *unp2; + + if (so2->so_type != so->so_type) + return (EPROTOTYPE); + unp2 = sotounpcb(so2); + unp->unp_conn = unp2; + switch (so->so_type) { + + case SOCK_DGRAM: + LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); + soisconnected(so); + break; + + case SOCK_STREAM: + unp2->unp_conn = unp; + soisconnected(so); + soisconnected(so2); + break; + + default: + panic("unp_connect2"); + } + return (0); +} + +static void +unp_disconnect(unp) + struct unpcb *unp; +{ + register struct unpcb *unp2 = unp->unp_conn; + + if (unp2 == 0) + return; + unp->unp_conn = 0; + switch (unp->unp_socket->so_type) { + + case SOCK_DGRAM: + LIST_REMOVE(unp, unp_reflink); + unp->unp_socket->so_state &= ~SS_ISCONNECTED; + break; + + case SOCK_STREAM: + soisdisconnected(unp->unp_socket); + unp2->unp_conn = 0; + soisdisconnected(unp2->unp_socket); + break; + } +} + +#ifdef notdef +void +unp_abort(unp) + struct unpcb *unp; +{ + + unp_detach(unp); +} +#endif + +static int +unp_pcblist SYSCTL_HANDLER_ARGS +{ + int error, i, n; + struct unpcb *unp, **unp_list; + unp_gen_t gencnt; + struct xunpgen xug; + struct unp_head *head; + + head = ((intptr_t)arg1 == SOCK_DGRAM ? &unp_dhead : &unp_shead); + + /* + * The process of preparing the PCB list is too time-consuming and + * resource-intensive to repeat twice on every request. + */ + if (req->oldptr == 0) { + n = unp_count; + req->oldidx = 2 * (sizeof xug) + + (n + n/8) * sizeof(struct xunpcb); + return 0; + } + + if (req->newptr != 0) + return EPERM; + + /* + * OK, now we're committed to doing something. + */ + gencnt = unp_gencnt; + n = unp_count; + + xug.xug_len = sizeof xug; + xug.xug_count = n; + xug.xug_gen = gencnt; + xug.xug_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xug, sizeof xug); + if (error) + return error; + + unp_list = _MALLOC(n * sizeof *unp_list, M_TEMP, M_WAITOK); + if (unp_list == 0) + return ENOMEM; + + for (unp = head->lh_first, i = 0; unp && i < n; + unp = unp->unp_link.le_next) { + if (unp->unp_gencnt <= gencnt) + unp_list[i++] = unp; + } + n = i; /* in case we lost some during malloc */ + + error = 0; + for (i = 0; i < n; i++) { + unp = unp_list[i]; + if (unp->unp_gencnt <= gencnt) { + struct xunpcb xu; + xu.xu_len = sizeof xu; + xu.xu_unpp = unp; + /* + * XXX - need more locking here to protect against + * connect/disconnect races for SMP. + */ + if (unp->unp_addr) + bcopy(unp->unp_addr, &xu.xu_addr, + unp->unp_addr->sun_len); + if (unp->unp_conn && unp->unp_conn->unp_addr) + bcopy(unp->unp_conn->unp_addr, + &xu.xu_caddr, + unp->unp_conn->unp_addr->sun_len); + bcopy(unp, &xu.xu_unp, sizeof *unp); + sotoxsocket(unp->unp_socket, &xu.xu_socket); + error = SYSCTL_OUT(req, &xu, sizeof xu); + } + } + if (!error) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + xug.xug_gen = unp_gencnt; + xug.xug_sogen = so_gencnt; + xug.xug_count = unp_count; + error = SYSCTL_OUT(req, &xug, sizeof xug); + } + FREE(unp_list, M_TEMP); + return error; +} + +SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, CTLFLAG_RD, + (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", + "List of active local datagram sockets"); +SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, CTLFLAG_RD, + (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", + "List of active local stream sockets"); + +static void +unp_shutdown(unp) + struct unpcb *unp; +{ + struct socket *so; + + if (unp->unp_socket->so_type == SOCK_STREAM && unp->unp_conn && + (so = unp->unp_conn->unp_socket)) + socantrcvmore(so); +} + +static void +unp_drop(unp, errno) + struct unpcb *unp; + int errno; +{ + struct socket *so = unp->unp_socket; + + so->so_error = errno; + unp_disconnect(unp); + if (so->so_head) { + LIST_REMOVE(unp, unp_link); + unp->unp_gencnt = ++unp_gencnt; + unp_count--; + so->so_pcb = (caddr_t) 0; + if (unp->unp_addr) + FREE(unp->unp_addr, M_SONAME); + zfree(unp_zone, unp); + sofree(so); + } +} + +#ifdef notdef +void +unp_drain() +{ + +} +#endif + +int +unp_externalize(rights) + struct mbuf *rights; +{ + struct proc *p = current_proc(); /* XXX */ + register int i; + register struct cmsghdr *cm = mtod(rights, struct cmsghdr *); + register struct file **rp = (struct file **)(cm + 1); + register struct file *fp; + int newfds = (cm->cmsg_len - sizeof(*cm)) / sizeof (int); + int f; + + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + /* + * if the new FD's will not fit, then we free them all + */ + if (!fdavail(p, newfds)) { + for (i = 0; i < newfds; i++) { + fp = *rp; + unp_discard(fp); + *rp++ = 0; + } + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (EMSGSIZE); + } + /* + * now change each pointer to an fd in the global table to + * an integer that is the index to the local fd table entry + * that we set up to point to the global one we are transferring. + * XXX this assumes a pointer and int are the same size...! + */ + for (i = 0; i < newfds; i++) { + if (fdalloc(p, 0, &f)) + panic("unp_externalize"); + fp = *rp; + p->p_fd->fd_ofiles[f] = fp; + fp->f_msgcount--; + unp_rights--; + *(int *)rp++ = f; + } + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (0); +} + +void +unp_init(void) +{ + unp_zone = zinit(sizeof(struct unpcb), + (nmbclusters * sizeof(struct unpcb)), + 4096, "unpzone"); + if (unp_zone == 0) + panic("unp_init"); + LIST_INIT(&unp_dhead); + LIST_INIT(&unp_shead); +} + +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +static int +unp_internalize(control, p) + struct mbuf *control; + struct proc *p; +{ + register struct cmsghdr *cm = mtod(control, struct cmsghdr *); + register struct file **rp; + struct file *fp; + register int i, error; + int oldfds; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || + cm->cmsg_len != control->m_len) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (EINVAL); + } + + oldfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int); + rp = (struct file **)(cm + 1); + for (i = 0; i < oldfds; i++) + if (error = fdgetf(p, *(int *)rp++, 0)) { + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (error); + } + + rp = (struct file **)(cm + 1); + for (i = 0; i < oldfds; i++) { + (void) fdgetf(p, *(int *)rp, &fp); + *rp++ = fp; + fref(fp); + fp->f_msgcount++; + unp_rights++; + } + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (0); +} + +static int unp_defer, unp_gcing; + +static void +unp_gc() +{ + register struct file *fp, *nextfp; + register struct socket *so; + struct file **extra_ref, **fpp; + int nunref, i; + + if (unp_gcing) + return; + unp_gcing = 1; + unp_defer = 0; + /* + * before going through all this, set all FDs to + * be NOT defered and NOT externally accessible + */ + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) + fp->f_flag &= ~(FMARK|FDEFER); + do { + for (fp = filehead.lh_first; fp != 0; fp = fp->f_list.le_next) { + /* + * If the file is not open, skip it + */ + if (fcount(fp) == 0) + continue; + /* + * If we already marked it as 'defer' in a + * previous pass, then try process it this time + * and un-mark it + */ + if (fp->f_flag & FDEFER) { + fp->f_flag &= ~FDEFER; + unp_defer--; + } else { + /* + * if it's not defered, then check if it's + * already marked.. if so skip it + */ + if (fp->f_flag & FMARK) + continue; + /* + * If all references are from messages + * in transit, then skip it. it's not + * externally accessible. + */ + if (fcount(fp) == fp->f_msgcount) + continue; + /* + * If it got this far then it must be + * externally accessible. + */ + fp->f_flag |= FMARK; + } + /* + * either it was defered, or it is externally + * accessible and not already marked so. + * Now check if it is possibly one of OUR sockets. + */ + if (fp->f_type != DTYPE_SOCKET || + (so = (struct socket *)fp->f_data) == 0) + continue; + if (so->so_proto->pr_domain != &localdomain || + (so->so_proto->pr_flags&PR_RIGHTS) == 0) + continue; +#ifdef notdef + /* if this code is enabled need to run under network funnel */ + if (so->so_rcv.sb_flags & SB_LOCK) { + /* + * This is problematical; it's not clear + * we need to wait for the sockbuf to be + * unlocked (on a uniprocessor, at least), + * and it's also not clear what to do + * if sbwait returns an error due to receipt + * of a signal. If sbwait does return + * an error, we'll go into an infinite + * loop. Delete all of this for now. + */ + (void) sbwait(&so->so_rcv); + goto restart; + } +#endif + /* + * So, Ok, it's one of our sockets and it IS externally + * accessible (or was defered). Now we look + * to see if we hold any file descriptors in its + * message buffers. Follow those links and mark them + * as accessible too. + */ + unp_scan(so->so_rcv.sb_mb, unp_mark); + } + } while (unp_defer); + /* + * We grab an extra reference to each of the file table entries + * that are not otherwise accessible and then free the rights + * that are stored in messages on them. + * + * The bug in the orginal code is a little tricky, so I'll describe + * what's wrong with it here. + * + * It is incorrect to simply unp_discard each entry for f_msgcount + * times -- consider the case of sockets A and B that contain + * references to each other. On a last close of some other socket, + * we trigger a gc since the number of outstanding rights (unp_rights) + * is non-zero. If during the sweep phase the gc code un_discards, + * we end up doing a (full) closef on the descriptor. A closef on A + * results in the following chain. Closef calls soo_close, which + * calls soclose. Soclose calls first (through the switch + * uipc_usrreq) unp_detach, which re-invokes unp_gc. Unp_gc simply + * returns because the previous instance had set unp_gcing, and + * we return all the way back to soclose, which marks the socket + * with SS_NOFDREF, and then calls sofree. Sofree calls sorflush + * to free up the rights that are queued in messages on the socket A, + * i.e., the reference on B. The sorflush calls via the dom_dispose + * switch unp_dispose, which unp_scans with unp_discard. This second + * instance of unp_discard just calls closef on B. + * + * Well, a similar chain occurs on B, resulting in a sorflush on B, + * which results in another closef on A. Unfortunately, A is already + * being closed, and the descriptor has already been marked with + * SS_NOFDREF, and soclose panics at this point. + * + * Here, we first take an extra reference to each inaccessible + * descriptor. Then, we call sorflush ourself, since we know + * it is a Unix domain socket anyhow. After we destroy all the + * rights carried in messages, we do a last closef to get rid + * of our extra reference. This is the last close, and the + * unp_detach etc will shut down the socket. + * + * 91/09/19, bsy@cs.cmu.edu + */ + extra_ref = _MALLOC(nfiles * sizeof(struct file *), M_FILE, M_WAITOK); + for (nunref = 0, fp = filehead.lh_first, fpp = extra_ref; fp != 0; + fp = nextfp) { + nextfp = fp->f_list.le_next; + /* + * If it's not open, skip it + */ + if (fcount(fp) == 0) + continue; + /* + * If all refs are from msgs, and it's not marked accessible + * then it must be referenced from some unreachable cycle + * of (shut-down) FDs, so include it in our + * list of FDs to remove + */ + if (fcount(fp) == fp->f_msgcount && !(fp->f_flag & FMARK)) { + *fpp++ = fp; + nunref++; + fref(fp); + } + } + /* + * for each FD on our hit list, do the following two things + */ + for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) { + struct file *tfp = *fpp; + if (tfp->f_type == DTYPE_SOCKET && tfp->f_data != NULL) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + sorflush((struct socket *)(tfp->f_data)); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + } + } + + + for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) + closef(*fpp, (struct proc *) NULL); + FREE((caddr_t)extra_ref, M_FILE); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + unp_gcing = 0; +} + +void +unp_dispose(m) + struct mbuf *m; +{ + + if (m) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + unp_scan(m, unp_discard); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } +} + +/* should run under kernel funnel */ +static void +unp_scan(m0, op) + register struct mbuf *m0; + void (*op) __P((struct file *)); +{ + register struct mbuf *m; + register struct file **rp; + register struct cmsghdr *cm; + register int i; + int qfds; + + while (m0) { + for (m = m0; m; m = m->m_next) + if (m->m_type == MT_CONTROL && + m->m_len >= sizeof(*cm)) { + cm = mtod(m, struct cmsghdr *); + if (cm->cmsg_level != SOL_SOCKET || + cm->cmsg_type != SCM_RIGHTS) + continue; + qfds = (cm->cmsg_len - sizeof *cm) + / sizeof (struct file *); + rp = (struct file **)(cm + 1); + for (i = 0; i < qfds; i++) + (*op)(*rp++); + break; /* XXX, but saves time */ + } + m0 = m0->m_act; + } +} + +/* should run under kernel funnel */ +static void +unp_mark(fp) + struct file *fp; +{ + + if (fp->f_flag & FMARK) + return; + unp_defer++; + fp->f_flag |= (FMARK|FDEFER); +} + +/* should run under kernel funnel */ +static void +unp_discard(fp) + struct file *fp; +{ + + fp->f_msgcount--; + unp_rights--; + (void) closef(fp, (struct proc *)NULL); +} diff --git a/bsd/libkern/Makefile b/bsd/libkern/Makefile new file mode 100644 index 000000000..32aaccb2e --- /dev/null +++ b/bsd/libkern/Makefile @@ -0,0 +1,25 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + + +DATAFILES = \ + libkern.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = libkern + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = libkern + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/libkern/bcd.c b/bsd/libkern/bcd.c new file mode 100644 index 000000000..02f3fde8b --- /dev/null +++ b/bsd/libkern/bcd.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Some data-tables that are often used. + * Cannot be copyrighted. + */ + +#include "libkern.h" + +u_char const bcd2bin_data[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 0, 0, 0, 0, 0, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 0, 0, 0, 0, 0, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 0, 0, 0, 0, 0, 0, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 0, 0, 0, 0, 0, 0, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, 0, 0, 0, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 0, 0, 0, 0, 0, 0, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 0, 0, 0, 0, 0, 0, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 +}; + +u_char const bin2bcd_data[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, + 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99 +}; + +/* This is actually used with radix [2..36] */ +char const hex2ascii_data[] = "0123456789abcdefghijklmnopqrstuvwxyz"; diff --git a/bsd/libkern/bcmp.c b/bsd/libkern/bcmp.c new file mode 100644 index 000000000..e315fee7d --- /dev/null +++ b/bsd/libkern/bcmp.c @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#warning bcmp() not optimized for ppc +#if defined(ppc) +/* HP, i386 has its own tuned version of bcmp, so should ppc! */ + +#if defined(LIBC_SCCS) && !defined(lint) +/* static char sccsid[] = "@(#)bcmp.c 8.1 (Berkeley) 6/4/93"; */ +#endif /* LIBC_SCCS and not lint */ + +#include + +/* + * bcmp -- vax cmpc3 instruction + */ +int +bcmp(b1, b2, length) + const void *b1, *b2; + register size_t length; +{ + register char *p1, *p2; + + if (length == 0) + return(0); + p1 = (char *)b1; + p2 = (char *)b2; + do + if (*p1++ != *p2++) + break; + while (--length); + return(length); +} + +#endif /* ppc */ diff --git a/bsd/libkern/inet_ntoa.c b/bsd/libkern/inet_ntoa.c new file mode 100644 index 000000000..0925e8a3c --- /dev/null +++ b/bsd/libkern/inet_ntoa.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright 1994, 1995 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include + +#include + +char * +inet_ntoa(struct in_addr ina) +{ + static char buf[4*sizeof "123"]; + unsigned char *ucp = (unsigned char *)&ina; + + sprintf(buf, "%d.%d.%d.%d", + ucp[0] & 0xff, + ucp[1] & 0xff, + ucp[2] & 0xff, + ucp[3] & 0xff); + return buf; +} + diff --git a/bsd/libkern/libkern.h b/bsd/libkern/libkern.h new file mode 100644 index 000000000..25511e503 --- /dev/null +++ b/bsd/libkern/libkern.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)libkern.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _LIBKERN_LIBKERN_H_ +#define _LIBKERN_LIBKERN_H_ + +#include +#include + +/* BCD conversions. */ +extern u_char const bcd2bin_data[]; +extern u_char const bin2bcd_data[]; +extern char const hex2ascii_data[]; + +#define bcd2bin(bcd) (bcd2bin_data[bcd]) +#define bin2bcd(bin) (bin2bcd_data[bin]) +#define hex2ascii(hex) (hex2ascii_data[hex]) + + +__BEGIN_DECLS +static inline int +imax(int a, int b) +{ + return (a > b ? a : b); +} +static inline int +imin(int a, int b) +{ + return (a < b ? a : b); +} +static inline long +lmax(long a, long b) +{ + return (a > b ? a : b); +} +static inline long +lmin(long a, long b) +{ + return (a < b ? a : b); +} +static inline u_int +max(u_int a, u_int b) +{ + return (a > b ? a : b); +} +static inline u_int +min(u_int a, u_int b) +{ + return (a < b ? a : b); +} +static inline u_long +ulmax(u_long a, u_long b) +{ + return (a > b ? a : b); +} +static inline u_long +ulmin(u_long a, u_long b) +{ + return (a < b ? a : b); +} + +/* Prototypes for non-quad routines. */ +int bcmp __P((const void *, const void *, size_t)); +int ffs __P((int)); +int locc __P((int, char *, u_int)); +u_long random __P((void)); +char *rindex __P((const char *, int)); +int scanc __P((u_int, u_char *, u_char *, int)); +int skpc __P((int, int, char *)); +char *strcat __P((char *, const char *)); +char *strcpy __P((char *, const char *)); +size_t strlen __P((const char *)); +char *strncpy __P((char *, const char *, size_t)); +long strtol __P((const char*, char **, int)); +__END_DECLS + +#endif /* _LIBKERN_LIBKERN_H_ */ diff --git a/bsd/libkern/locc.c b/bsd/libkern/locc.c new file mode 100644 index 000000000..29572e4ee --- /dev/null +++ b/bsd/libkern/locc.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)locc.c 8.1 (Berkeley) 6/10/93 + */ + +#include + +int +locc(mask0, cp0, size) + int mask0; + char *cp0; + u_int size; +{ + register u_char *cp, *end, mask; + + mask = mask0; + cp = (u_char *)cp0; + for (end = &cp[size]; cp < end && *cp != mask; ++cp); + return (end - cp); +} diff --git a/bsd/libkern/random.c b/bsd/libkern/random.c new file mode 100644 index 000000000..aed1b355b --- /dev/null +++ b/bsd/libkern/random.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)random.c 8.1 (Berkeley) 6/10/93 + */ + +/* + * Modification History + * + * Feb 22, 1999 Dieter Siegmund (dieter@apple.com) + * - the first time, set the random seed to the microsecond time value + * to make the random numbers less predictable + */ +#include +#include + +/* + * Pseudo-random number generator for randomizing the profiling clock, + * and whatever else we might use it for. The result is uniform on + * [0, 2^31 - 1]. + */ +u_long +random() +{ + static int first = 1; + static u_long randseed = 1; + + register long x, hi, lo, t; + + if (first) { + struct timeval tv; + microtime(&tv); + randseed = tv.tv_usec; + if(!randseed) + randseed=1; + first = 0; + } + + /* + * Compute x[n + 1] = (7^5 * x[n]) mod (2^31 - 1). + * From "Random number generators: good ones are hard to find", + * Park and Miller, Communications of the ACM, vol. 31, no. 10, + * October 1988, p. 1195. + */ + x = randseed; + hi = x / 127773; + lo = x % 127773; + t = 16807 * lo - 2836 * hi; + if (t <= 0) + t += 0x7fffffff; + randseed = t; + return (t); +} diff --git a/bsd/libkern/rindex.c b/bsd/libkern/rindex.c new file mode 100644 index 000000000..a8d4044a0 --- /dev/null +++ b/bsd/libkern/rindex.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +/* static char sccsid[] = "@(#)rindex.c 8.1 (Berkeley) 6/4/93"; */ +#endif /* LIBC_SCCS and not lint */ + +#include + +char * +#ifdef STRRCHR +strrchr(p, ch) +#else +rindex(p, ch) +#endif + register const char *p; + register int ch; +{ + register char *save; + + for (save = NULL;; ++p) { + if (*p == ch) + save = (char *)p; + if (!*p) + return(save); + } + /* NOTREACHED */ +} diff --git a/bsd/libkern/scanc.c b/bsd/libkern/scanc.c new file mode 100644 index 000000000..5be4e6f2f --- /dev/null +++ b/bsd/libkern/scanc.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)scanc.c 8.1 (Berkeley) 6/10/93 + */ + +#include + +int +scanc(size, cp, table, mask0) + u_int size; + register u_char *cp, table[]; + int mask0; +{ + register u_char *end; + register u_char mask; + + mask = mask0; + for (end = &cp[size]; cp < end && (table[*cp] & mask) == 0; ++cp); + return (end - cp); +} diff --git a/bsd/libkern/skpc.c b/bsd/libkern/skpc.c new file mode 100644 index 000000000..68110e8a5 --- /dev/null +++ b/bsd/libkern/skpc.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)skpc.c 8.1 (Berkeley) 6/10/93 + */ + +#include + +int +skpc(mask0, size, cp0) + int mask0; + int size; + char *cp0; +{ + register u_char *cp, *end, mask; + + mask = mask0; + cp = (u_char *)cp0; + for (end = &cp[size]; cp < end && *cp == mask; ++cp); + return (end - cp); +} diff --git a/bsd/libkern/strtol.c b/bsd/libkern/strtol.c new file mode 100644 index 000000000..440d7e587 --- /dev/null +++ b/bsd/libkern/strtol.c @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All rights reserved. + * + * strol.c - The functions strtol() & strtoul() are exported as public API + * via the header file ~driverkit/generalFuncs.h + * + * HISTORY + * 25-Oct-1995 Dean Reece at NeXT + * Created based on BSD4.4's strtol.c & strtoul.c. + * Removed dependency on _ctype_ by static versions of isupper()... + * Added support for "0b101..." binary constants. + * Commented out references to errno. + */ + +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +static inline int +isupper(char c) +{ + return (c >= 'A' && c <= 'Z'); +} + +static inline int +isalpha(char c) +{ + return ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')); +} + + +static inline int +isspace(char c) +{ + return (c == ' ' || c == '\t' || c == '\n' || c == '\12'); +} + +static inline int +isdigit(char c) +{ + return (c >= '0' && c <= '9'); +} + +/* + * Convert a string to a long integer. + * + * Ignores `locale' stuff. Assumes that the upper and lower case + * alphabets and digits are each contiguous. + */ +long +strtol(nptr, endptr, base) + const char *nptr; + char **endptr; + register int base; +{ + register const char *s = nptr; + register unsigned long acc; + register int c; + register unsigned long cutoff; + register int neg = 0, any, cutlim; + + /* + * Skip white space and pick up leading +/- sign if any. + * If base is 0, allow 0x for hex and 0 for octal, else + * assume decimal; if base is already 16, allow 0x. + */ + do { + c = *s++; + } while (isspace(c)); + if (c == '-') { + neg = 1; + c = *s++; + } else if (c == '+') + c = *s++; + if ((base == 0 || base == 16) && + c == '0' && (*s == 'x' || *s == 'X')) { + c = s[1]; + s += 2; + base = 16; + } else if ((base == 0 || base == 2) && + c == '0' && (*s == 'b' || *s == 'B')) { + c = s[1]; + s += 2; + base = 2; + } + if (base == 0) + base = c == '0' ? 8 : 10; + + /* + * Compute the cutoff value between legal numbers and illegal + * numbers. That is the largest legal value, divided by the + * base. An input number that is greater than this value, if + * followed by a legal input character, is too big. One that + * is equal to this value may be valid or not; the limit + * between valid and invalid numbers is then based on the last + * digit. For instance, if the range for longs is + * [-2147483648..2147483647] and the input base is 10, + * cutoff will be set to 214748364 and cutlim to either + * 7 (neg==0) or 8 (neg==1), meaning that if we have accumulated + * a value > 214748364, or equal but the next digit is > 7 (or 8), + * the number is too big, and we will return a range error. + * + * Set any if any `digits' consumed; make it negative to indicate + * overflow. + */ + cutoff = neg ? -(unsigned long)LONG_MIN : LONG_MAX; + cutlim = cutoff % (unsigned long)base; + cutoff /= (unsigned long)base; + for (acc = 0, any = 0;; c = *s++) { + if (isdigit(c)) + c -= '0'; + else if (isalpha(c)) + c -= isupper(c) ? 'A' - 10 : 'a' - 10; + else + break; + if (c >= base) + break; + if (any < 0 || acc > cutoff || acc == cutoff && c > cutlim) + any = -1; + else { + any = 1; + acc *= base; + acc += c; + } + } + if (any < 0) { + acc = neg ? LONG_MIN : LONG_MAX; +// errno = ERANGE; + } else if (neg) + acc = -acc; + if (endptr != 0) + *endptr = (char *)(any ? s - 1 : nptr); + return (acc); +} + +/* + * Convert a string to an unsigned long integer. + * + * Ignores `locale' stuff. Assumes that the upper and lower case + * alphabets and digits are each contiguous. + */ +unsigned long +strtoul(nptr, endptr, base) + const char *nptr; + char **endptr; + register int base; +{ + register const char *s = nptr; + register unsigned long acc; + register int c; + register unsigned long cutoff; + register int neg = 0, any, cutlim; + + /* + * See strtol for comments as to the logic used. + */ + do { + c = *s++; + } while (isspace(c)); + if (c == '-') { + neg = 1; + c = *s++; + } else if (c == '+') + c = *s++; + if ((base == 0 || base == 16) && + c == '0' && (*s == 'x' || *s == 'X')) { + c = s[1]; + s += 2; + base = 16; + } else if ((base == 0 || base == 2) && + c == '0' && (*s == 'b' || *s == 'B')) { + c = s[1]; + s += 2; + base = 2; + } + if (base == 0) + base = c == '0' ? 8 : 10; + cutoff = (unsigned long)ULONG_MAX / (unsigned long)base; + cutlim = (unsigned long)ULONG_MAX % (unsigned long)base; + for (acc = 0, any = 0;; c = *s++) { + if (isdigit(c)) + c -= '0'; + else if (isalpha(c)) + c -= isupper(c) ? 'A' - 10 : 'a' - 10; + else + break; + if (c >= base) + break; + if (any < 0 || acc > cutoff || acc == cutoff && c > cutlim) + any = -1; + else { + any = 1; + acc *= base; + acc += c; + } + } + if (any < 0) { + acc = ULONG_MAX; +// errno = ERANGE; + } else if (neg) + acc = -acc; + if (endptr != 0) + *endptr = (char *)(any ? s - 1 : nptr); + return (acc); +} diff --git a/bsd/machine/Makefile b/bsd/machine/Makefile new file mode 100644 index 000000000..ee8dd6af2 --- /dev/null +++ b/bsd/machine/Makefile @@ -0,0 +1,29 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + + +DATAFILES = \ + ansi.h byte_order.h cons.h cpu.h disklabel.h endian.h exec.h \ + label_t.h param.h proc.h profile.h psl.h ptrace.h reboot.h \ + reg.h setjmp.h signal.h spl.h table.h trap.h types.h unix_traps.h \ + user.h vmparam.h + + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = machine + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = machine + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/machine/ansi.h b/bsd/machine/ansi.h new file mode 100644 index 000000000..57cef4960 --- /dev/null +++ b/bsd/machine/ansi.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + */ + +#ifndef _MACHINE_ANSI_H_ +#define _MACHINE_ANSI_H_ + + +#if defined (__ppc__) +#include "ppc/ansi.h" +#elif defined (__i386__) +#include "i386/ansi.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_ANSI_H_ */ diff --git a/bsd/machine/byte_order.h b/bsd/machine/byte_order.h new file mode 100644 index 000000000..c100caa6f --- /dev/null +++ b/bsd/machine/byte_order.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 NeXT Computer, Inc. + */ + +#ifndef _BSD_MACHINE_BYTE_ORDER_H_ +#define _BSD_MACHINE_BYTE_ORDER_H_ + +#include + +#endif /* _BSD_MACHINE_BYTE_ORDER_H_ */ diff --git a/bsd/machine/cons.h b/bsd/machine/cons.h new file mode 100644 index 000000000..c68a4af46 --- /dev/null +++ b/bsd/machine/cons.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_CONS_H_ +#define _BSD_MACHINE_CONS_H_ + + +#if defined (__ppc__) +#include +#elif defined (__i386__) +#include +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_CONS_H_ */ diff --git a/bsd/machine/cpu.h b/bsd/machine/cpu.h new file mode 100644 index 000000000..36ca09b25 --- /dev/null +++ b/bsd/machine/cpu.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _BSD_MACHINE_CPU_H_ +#define _BSD_MACHINE_CPU_H_ + + +#if defined (__ppc__) +#include "ppc/cpu.h" +#elif defined (__i386__) +#include "i386/cpu.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_CPU_H_ */ diff --git a/bsd/machine/disklabel.h b/bsd/machine/disklabel.h new file mode 100644 index 000000000..fed73e500 --- /dev/null +++ b/bsd/machine/disklabel.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_CPU_H_ +#define _BSD_MACHINE_CPU_H_ + + +#if defined (__ppc__) +#include "ppc/disklabel.h" +#elif defined (__i386__) +#include "i386/disklabel.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_CPU_H_ */ diff --git a/bsd/machine/endian.h b/bsd/machine/endian.h new file mode 100644 index 000000000..4aa1ad8f4 --- /dev/null +++ b/bsd/machine/endian.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +#ifndef _BSD_MACHINE_ENDIAN_H_ +#define _BSD_MACHINE_ENDIAN_H_ + + +#if defined (__ppc__) +#include "ppc/endian.h" +#elif defined (__i386__) +#include "i386/endian.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_ENDIAN_H_ */ diff --git a/bsd/machine/exec.h b/bsd/machine/exec.h new file mode 100644 index 000000000..979093289 --- /dev/null +++ b/bsd/machine/exec.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +#ifndef _BSD_MACHINE_EXEC_H_ +#define _BSD_MACHINE_EXEC_H_ + + +#if defined (__ppc__) +#include "ppc/exec.h" +#elif defined (__i386__) +#include "i386/exec.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_EXEC_H_ */ diff --git a/bsd/machine/label_t.h b/bsd/machine/label_t.h new file mode 100644 index 000000000..094c204ac --- /dev/null +++ b/bsd/machine/label_t.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_LABEL_T_H_ +#define _BSD_MACHINE_LABEL_T_H_ + + +#if defined (__ppc__) +#include "ppc/label_t.h" +#elif defined (__i386__) +#include "i386/label_t.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_LABEL_T_H_ */ diff --git a/bsd/machine/param.h b/bsd/machine/param.h new file mode 100644 index 000000000..4ee6af0fd --- /dev/null +++ b/bsd/machine/param.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +#ifndef _BSD_MACHINE_PARAM_H_ +#define _BSD_MACHINE_PARAM_H_ + + +#if defined (__ppc__) +#include "ppc/param.h" +#elif defined (__i386__) +#include "i386/param.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_PARAM_H_ */ diff --git a/bsd/machine/proc.h b/bsd/machine/proc.h new file mode 100644 index 000000000..47a46df41 --- /dev/null +++ b/bsd/machine/proc.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +#ifndef _BSD_MACHINE_PROC_H_ +#define _BSD_MACHINE_PROC_H_ + + +#if defined (__ppc__) +#include "ppc/proc.h" +#elif defined (__i386__) +#include "i386/proc.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_PROC_H_ */ diff --git a/bsd/machine/profile.h b/bsd/machine/profile.h new file mode 100644 index 000000000..94c316745 --- /dev/null +++ b/bsd/machine/profile.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997 Apple Computer, Inc. All rights reserved. + * + * History : + * 29-Sep-1997 Umesh Vaishampayan + * Created. + */ +#ifndef _BSD_MACHINE_PROFILE_H_ +#define _BSD_MACHINE_PROFILE_H_ + + +#if defined (__ppc__) +#include "ppc/profile.h" +#elif defined (__i386__) +#include "i386/profile.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_PROFILE_H_ */ diff --git a/bsd/machine/psl.h b/bsd/machine/psl.h new file mode 100644 index 000000000..e9763e864 --- /dev/null +++ b/bsd/machine/psl.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_PSL_H_ +#define _BSD_MACHINE_PSL_H_ + + +#if defined (__ppc__) +#include "ppc/psl.h" +#elif defined (__i386__) +#include "i386/psl.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_PSL_H_ */ diff --git a/bsd/machine/ptrace.h b/bsd/machine/ptrace.h new file mode 100644 index 000000000..f2abe2b6a --- /dev/null +++ b/bsd/machine/ptrace.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +#ifndef _BSD_MACHINE_PTRACE_H_ +#define _BSD_MACHINE_PTRACE_H_ + + +#if defined (__ppc__) +#include "ppc/ptrace.h" +#elif defined (__i386__) +#include "i386/ptrace.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_PTRACE_H_ */ diff --git a/bsd/machine/reboot.h b/bsd/machine/reboot.h new file mode 100644 index 000000000..368fa0aad --- /dev/null +++ b/bsd/machine/reboot.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_REBOOT_H_ +#define _BSD_MACHINE_REBOOT_H_ + + +#if defined (__ppc__) +#include "ppc/reboot.h" +#elif defined (__i386__) +#include "i386/reboot.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_REBOOT_H_ */ diff --git a/bsd/machine/reg.h b/bsd/machine/reg.h new file mode 100644 index 000000000..add5145e2 --- /dev/null +++ b/bsd/machine/reg.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_REG_H_ +#define _BSD_MACHINE_REG_H_ + + +#if defined (__ppc__) +#include "ppc/reg.h" +#elif defined (__i386__) +#include "i386/reg.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_REG_H_ */ diff --git a/bsd/machine/setjmp.h b/bsd/machine/setjmp.h new file mode 100644 index 000000000..c39a8ea77 --- /dev/null +++ b/bsd/machine/setjmp.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + */ + +#ifndef _MACHINE_SETJMP_H_ +#define _MACHINE_SETJMP_H_ + + +#if defined (__ppc__) +#include "ppc/setjmp.h" +#elif defined (__i386__) +#include "i386/setjmp.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_SETJMP_H_ */ diff --git a/bsd/machine/signal.h b/bsd/machine/signal.h new file mode 100644 index 000000000..b7c7300f1 --- /dev/null +++ b/bsd/machine/signal.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_SIGNAL_H_ +#define _BSD_MACHINE_SIGNAL_H_ + + +#if defined (__ppc__) +#include "ppc/signal.h" +#elif defined (__i386__) +#include "i386/signal.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_SIGNAL_H_ */ diff --git a/bsd/machine/spl.h b/bsd/machine/spl.h new file mode 100644 index 000000000..89d75fad5 --- /dev/null +++ b/bsd/machine/spl.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_SPL_H_ +#define _BSD_MACHINE_SPL_H_ + + +#if defined (__ppc__) +#include "ppc/spl.h" +#elif defined (__i386__) +#include "i386/spl.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_SPL_H_ */ diff --git a/bsd/machine/table.h b/bsd/machine/table.h new file mode 100644 index 000000000..e71d1101c --- /dev/null +++ b/bsd/machine/table.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_TABLE_H_ +#define _BSD_MACHINE_TABLE_H_ + + +#if defined (__ppc__) +#include "ppc/table.h" +#elif defined (__i386__) +#include "i386/table.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_TABLE_H_ */ diff --git a/bsd/machine/trap.h b/bsd/machine/trap.h new file mode 100644 index 000000000..6dd0fe1c7 --- /dev/null +++ b/bsd/machine/trap.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +#ifndef _BSD_MACHINE_TRAP_H_ +#define _BSD_MACHINE_TRAP_H_ + + +#if defined (__ppc__) +#include "ppc/trap.h" +#elif defined (__i386__) +#include "i386/trap.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_TRAP_H_ */ diff --git a/bsd/machine/types.h b/bsd/machine/types.h new file mode 100644 index 000000000..f5ade7c2f --- /dev/null +++ b/bsd/machine/types.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +#ifndef _BSD_MACHINE_TYPES_H_ +#define _BSD_MACHINE_TYPES_H_ + + +#if defined (__ppc__) +#include "ppc/types.h" +#elif defined (__i386__) +#include "i386/types.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_TYPES_H_ */ diff --git a/bsd/machine/unix_traps.h b/bsd/machine/unix_traps.h new file mode 100644 index 000000000..fc94186c8 --- /dev/null +++ b/bsd/machine/unix_traps.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_UNIX_TRAPS_H_ +#define _BSD_MACHINE_UNIX_TRAPS_H_ + + +#if defined (__ppc__) +#include "ppc/unix_traps.h" +#elif defined (__i386__) +#include "i386/unix_traps.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_UNIX_TRAPS_H_ */ diff --git a/bsd/machine/user.h b/bsd/machine/user.h new file mode 100644 index 000000000..4aaf1bbf0 --- /dev/null +++ b/bsd/machine/user.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_USER_H_ +#define _BSD_MACHINE_USER_H_ + + +#if defined (__ppc__) +#include "ppc/user.h" +#elif defined (__i386__) +#include "i386/user.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_USER_H_ */ diff --git a/bsd/machine/vmparam.h b/bsd/machine/vmparam.h new file mode 100644 index 000000000..d1375d280 --- /dev/null +++ b/bsd/machine/vmparam.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_MACHINE_VMPARAM_H_ +#define _BSD_MACHINE_VMPARAM_H_ + + +#if defined (__ppc__) +#include "ppc/vmparam.h" +#elif defined (__i386__) +#include "i386/vmparam.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_MACHINE_VMPARAM_H_ */ diff --git a/bsd/miscfs/Makefile b/bsd/miscfs/Makefile new file mode 100644 index 000000000..0aade1967 --- /dev/null +++ b/bsd/miscfs/Makefile @@ -0,0 +1,40 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + devfs \ + fdesc \ + specfs \ + union + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + devfs \ + fdesc \ + specfs \ + union + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +SETUP_SUBDIRS = \ + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/miscfs/deadfs/dead_vnops.c b/bsd/miscfs/deadfs/dead_vnops.c new file mode 100644 index 000000000..d6145e093 --- /dev/null +++ b/bsd/miscfs/deadfs/dead_vnops.c @@ -0,0 +1,464 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dead_vnops.c 8.3 (Berkeley) 5/14/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Prototypes for dead operations on vnodes. + */ +int dead_badop(), + dead_ebadf(); +int dead_lookup __P((struct vop_lookup_args *)); +#define dead_create ((int (*) __P((struct vop_create_args *)))dead_badop) +#define dead_mknod ((int (*) __P((struct vop_mknod_args *)))dead_badop) +int dead_open __P((struct vop_open_args *)); +#define dead_close ((int (*) __P((struct vop_close_args *)))nullop) +#define dead_access ((int (*) __P((struct vop_access_args *)))dead_ebadf) +#define dead_getattr ((int (*) __P((struct vop_getattr_args *)))dead_ebadf) +#define dead_setattr ((int (*) __P((struct vop_setattr_args *)))dead_ebadf) +int dead_read __P((struct vop_read_args *)); +int dead_write __P((struct vop_write_args *)); +int dead_ioctl __P((struct vop_ioctl_args *)); +int dead_select __P((struct vop_select_args *)); +#define dead_mmap ((int (*) __P((struct vop_mmap_args *)))dead_badop) +#define dead_fsync ((int (*) __P((struct vop_fsync_args *)))nullop) +#define dead_seek ((int (*) __P((struct vop_seek_args *)))nullop) +#define dead_remove ((int (*) __P((struct vop_remove_args *)))dead_badop) +#define dead_link ((int (*) __P((struct vop_link_args *)))dead_badop) +#define dead_rename ((int (*) __P((struct vop_rename_args *)))dead_badop) +#define dead_mkdir ((int (*) __P((struct vop_mkdir_args *)))dead_badop) +#define dead_rmdir ((int (*) __P((struct vop_rmdir_args *)))dead_badop) +#define dead_symlink ((int (*) __P((struct vop_symlink_args *)))dead_badop) +#define dead_readdir ((int (*) __P((struct vop_readdir_args *)))dead_ebadf) +#define dead_readlink ((int (*) __P((struct vop_readlink_args *)))dead_ebadf) +#define dead_abortop ((int (*) __P((struct vop_abortop_args *)))dead_badop) +#define dead_inactive ((int (*) __P((struct vop_inactive_args *)))nullop) +#define dead_reclaim ((int (*) __P((struct vop_reclaim_args *)))nullop) +int dead_lock __P((struct vop_lock_args *)); +#define dead_unlock ((int (*) __P((struct vop_unlock_args *)))nullop) +int dead_bmap __P((struct vop_bmap_args *)); +int dead_strategy __P((struct vop_strategy_args *)); +int dead_print __P((struct vop_print_args *)); +#define dead_islocked ((int (*) __P((struct vop_islocked_args *)))nullop) +#define dead_pathconf ((int (*) __P((struct vop_pathconf_args *)))dead_ebadf) +#define dead_advlock ((int (*) __P((struct vop_advlock_args *)))dead_ebadf) +#define dead_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))dead_badop) +#define dead_valloc ((int (*) __P((struct vop_valloc_args *)))dead_badop) +#define dead_vfree ((int (*) __P((struct vop_vfree_args *)))dead_badop) +#define dead_truncate ((int (*) __P((struct vop_truncate_args *)))nullop) +#define dead_update ((int (*) __P((struct vop_update_args *)))nullop) +#define dead_bwrite ((int (*) __P((struct vop_bwrite_args *)))nullop) +int dead_pagein __P((struct vop_pagein_args *)); +int dead_pageout __P((struct vop_pageout_args *)); +int dead_blktooff __P((struct vop_blktooff_args *)); +int dead_offtoblk __P((struct vop_offtoblk_args *)); +int dead_cmap __P((struct vop_cmap_args *)); + +#define VOPFUNC int (*)(void *) +int (**dead_vnodeop_p)(void *); +struct vnodeopv_entry_desc dead_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)dead_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)dead_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)dead_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)dead_open }, /* open */ + { &vop_close_desc, (VOPFUNC)dead_close }, /* close */ + { &vop_access_desc, (VOPFUNC)dead_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)dead_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)dead_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)dead_read }, /* read */ + { &vop_write_desc, (VOPFUNC)dead_write }, /* write */ + { &vop_ioctl_desc, (VOPFUNC)dead_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)dead_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)dead_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)dead_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)dead_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)dead_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)dead_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)dead_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)dead_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)dead_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)dead_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)dead_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)dead_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)dead_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)dead_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)dead_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)dead_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)dead_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)dead_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)dead_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)dead_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)dead_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)dead_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)dead_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)dead_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)dead_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)dead_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)dead_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)dead_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)dead_bwrite }, /* bwrite */ + { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)dead_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)dead_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)dead_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc dead_vnodeop_opv_desc = + { &dead_vnodeop_p, dead_vnodeop_entries }; + +/* + * Trivial lookup routine that always fails. + */ +/* ARGSUSED */ +int +dead_lookup(ap) + struct vop_lookup_args /* { + struct vnode * a_dvp; + struct vnode ** a_vpp; + struct componentname * a_cnp; + } */ *ap; +{ + + *ap->a_vpp = NULL; + return (ENOTDIR); +} + +/* + * Open always fails as if device did not exist. + */ +/* ARGSUSED */ +int +dead_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + return (ENXIO); +} + +/* + * Vnode op for read + */ +/* ARGSUSED */ +int +dead_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + + if (chkvnlock(ap->a_vp)) + panic("dead_read: lock"); + /* + * Return EOF for character devices, EIO for others + */ + if (ap->a_vp->v_type != VCHR) + return (EIO); + return (0); +} + +/* + * Vnode op for write + */ +/* ARGSUSED */ +int +dead_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + + if (chkvnlock(ap->a_vp)) + panic("dead_write: lock"); + return (EIO); +} + +/* + * Device ioctl operation. + */ +/* ARGSUSED */ +int +dead_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + u_long a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + if (!chkvnlock(ap->a_vp)) + return (EBADF); + return (VCALL(ap->a_vp, VOFFSET(vop_ioctl), ap)); +} + +/* ARGSUSED */ +int +dead_select(ap) + struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + /* + * Let the user find out that the descriptor is gone. + */ + return (1); +} + +/* + * Just call the device strategy routine + */ +int +dead_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + + if (ap->a_bp->b_vp == NULL || !chkvnlock(ap->a_bp->b_vp)) { + ap->a_bp->b_flags |= B_ERROR; + biodone(ap->a_bp); + return (EIO); + } + return (VOP_STRATEGY(ap->a_bp)); +} + +/* + * Wait until the vnode has finished changing state. + */ +int +dead_lock(ap) + struct vop_lock_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + struct vnode *vp = ap->a_vp; + + /* + * Since we are not using the lock manager, we must clear + * the interlock here. + */ + if (ap->a_flags & LK_INTERLOCK) { + simple_unlock(&vp->v_interlock); + ap->a_flags &= ~LK_INTERLOCK; + } + if (!chkvnlock(ap->a_vp)) + return (0); + return (VCALL(ap->a_vp, VOFFSET(vop_lock), ap)); +} + +/* + * Wait until the vnode has finished changing state. + */ +int +dead_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; + } */ *ap; +{ + + if (!chkvnlock(ap->a_vp)) + return (EIO); + return (VOP_BMAP(ap->a_vp, ap->a_bn, ap->a_vpp, ap->a_bnp, ap->a_runp)); +} + +/* + * Wait until the vnode has finished changing state. + */ +int +dead_cmap(ap) + struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_foffset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; + } */ *ap; +{ + + if (!chkvnlock(ap->a_vp)) + return (EIO); + return (VOP_CMAP(ap->a_vp, ap->a_foffset, ap->a_size, ap->a_bpn, ap->a_run, ap->a_poff)); +} + +/* + * Print out the contents of a dead vnode. + */ +/* ARGSUSED */ +int +dead_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + printf("tag VT_NON, dead vnode\n"); +} + +/* + * Empty vnode failed operation + */ +int +dead_ebadf() +{ + + return (EBADF); +} + +/* + * Empty vnode bad operation + */ +int +dead_badop() +{ + + panic("dead_badop called"); + /* NOTREACHED */ +} + +/* + * Empty vnode null operation + */ +int +dead_nullop() +{ + + return (0); +} + +/* + * We have to wait during times when the vnode is + * in a state of change. + */ +int +chkvnlock(vp) + register struct vnode *vp; +{ + int locked = 0; + + while (vp->v_flag & VXLOCK) { + vp->v_flag |= VXWANT; + sleep((caddr_t)vp, PINOD); + locked = 1; + } + return (locked); +} + + +/* Blktooff */ +int +dead_blktooff(ap) + struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; + } */ *ap; +{ + if (!chkvnlock(ap->a_vp)) + return (EIO); + + *ap->a_offset = (off_t)-1; /* failure */ + return (0); +} +/* Blktooff */ +int +dead_offtoblk(ap) +struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; + } */ *ap; +{ + if (!chkvnlock(ap->a_vp)) + return (EIO); + + *ap->a_lblkno = (daddr_t)-1; /* failure */ + return (0); +} diff --git a/bsd/miscfs/devfs/Makefile b/bsd/miscfs/devfs/Makefile new file mode 100644 index 000000000..b94c89528 --- /dev/null +++ b/bsd/miscfs/devfs/Makefile @@ -0,0 +1,41 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + devfs.h + +PRIVATE_DATAFILES = \ + devfs_proto.h devfsdefs.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = miscfs/devfs + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = miscfs/devfs + +INSTALL_MI_LIST = ${DATAFILES} ${PRIVATE_DATAFILES} + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/miscfs/devfs/README b/bsd/miscfs/devfs/README new file mode 100644 index 000000000..412eeeada --- /dev/null +++ b/bsd/miscfs/devfs/README @@ -0,0 +1,118 @@ +Note: The following comments are from the original FreeBSD 3.1 README + +this file is: /sys/miscfs/devfs/README + +to enable: add +options DEVFS + +to your config file.. +expect it to be highly useless for a while, +as the only devices that register themselves are the floppy, +the pcaudio stuff, speaker, null,mem,zero,io,kmem. + +it works like this: + +There is a tree of nodes that describe the layout of the DEVFS as seen by +the drivers.. they add nodes to this tree. This is called the 'back' layer +for reasons that will become obvious in a second. Think of it as a +BLUEPRINT of the DEVFS tree. Each back node has associated with it +a "devnode" struct, that holds information about the device +(or directory) and a pointer to the vnode if one has been associated +with that node. The back node itself can be considered to be +a directory entry, and contains the default name of the device, +and a link to the directory that holds it. It is sometimes refered +to in the code as the dev_name. The devnode can be considered the inode. + +When you mount the devfs somewhere (you can mount it multiple times in +multiple places), a front layer is created that contains a tree of 'front' +nodes. + +Think of this as a Transparency, layed over the top of the blueprint. +(or possibly a photocopy). + +The front and back nodes are identical in type, but the back nodes +are reserved for kernel use only, and are protected from the user. +The back plane has a mount structure and all that stuff, but it is in +fact not really mounted. (and is thus not reachable via namei). +Internal kernel routines can open devices in this plane +even if the external devfs has not been mounted yet :) +(e.g. to find the root device) + +To start with there is a 1:1 relationship between the front nodes +and the backing nodes, however once the front plane has been created +the nodes can be moved around within that plane (or deleted). +Think of this as the ability to revise a transparency... +the blueprint is untouched. + +There is a "devnode" struct associated with each front note also. +Front nodes that refer to devices, use the same "devnode" struct that is used +by their associated backing node, so that multiple front nodes that +point to the same device will use the same "devnode" struct, and through +that, the same vnode, ops, modification times, flags, owner and group. +Front nodes representing directories and symlinks have their own +"devnode" structs, and may therefore differ. (have different vnodes) +i.e. if you have two devfs trees mounted, you can change the +directories in one without changing the other. +e.g. remove or rename nodes + +Multiple mountings are like multiple transparencies, +each showing through to the original blueprint. + +Information that is to be shared between these mounts is stored +in the 'backing' node for that object. Once you have erased 'front' +object, there is no memory of where the backing object was, and +except for the possibility of searching the entire backing tree +for the node with the correct major/minor/type, I don't see that +it is easily recovered.. Particularly as there will eventually be +(I hope) devices that go direct from the backing node to the driver +without going via the cdevsw table.. they may not even have +major/minor numbers. + +I see 'mount -u' as a possible solution to recovering a broken dev tree. +(though umount+mount would do the same) + +Because non device nodes (directories and symlinks) have their own +"devnode" structs on each layer, these may have different +flags, owners, and contents on each layer. +e.g. if you have a chroot tree like erf.tfs.com has, you +may want different permissions or owners on the chroot mount of the DEVFS +than you want in the real one. You might also want to delete some sensitive +devices from the chroot tree. + +Directories also have backing nodes but there is nothing to stop +the user from removing a front node from the directory front node. +(except permissions of course). This is because the front directory +nodes keep their own records as to which front nodes are members +of that directory and do not refer to their original backing node +for this information. + +The front nodes may be moved to other directories (including +directories) however this does not break the linkage between the +backing nodes and the front nodes. The backing node never moves. If +a driver decides to remove a device from the backing tree, the FS +code follows the links to all the front nodes linked to that backing +node, and deletes them, no matter where they've been moved to. +(active vnodes are redirected to point to the deadfs). + +If a directory has been moved, and a new backing node is inserted +into its own back node, the new front node will appear in that front +directory, even though it's been moved, because the directory that +gets the front node is found via the links and not by name. + +a mount -u might be considered to be a request to 'refresh' the +plane that controls to the mount being updated.. that would have the +effect of 're-propogating' through any backing nodes that find they +have no front nodes in that plane. + + +NOTES FOR RELEASE 1.2 +1/ this is very preliminary +2/ the routines have greatly simplified since release 1.1 +(I guess the break did me good :) +3/ many features are not present yet.. +e.g. symlinks, a comprehensive registration interface (only a crude one) +ability to unlink and mv nodes. +4/ I'm pretty sure my use of vnodes is bad and it may be 'losing' +them, or alternatively, corrupting things.. I need a vnode specialist +to look at this. + diff --git a/bsd/miscfs/devfs/devfs.h b/bsd/miscfs/devfs/devfs.h new file mode 100644 index 000000000..4a395d31d --- /dev/null +++ b/bsd/miscfs/devfs/devfs.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997,1998 Julian Elischer. All rights reserved. + * julian@freebsd.org + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE HOLDER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * miscfs/devfs/devfs.h + */ + +#ifndef _MISCFS_DEVFS_DEVFS_H_ +#define _MISCFS_DEVFS_DEVFS_H_ + +#define DEVFS_CHAR 0 +#define DEVFS_BLOCK 1 + +__BEGIN_DECLS + +/* + * Function: devfs_make_node + * + * Purpose + * Create a device node with the given pathname in the devfs namespace. + * + * Parameters: + * dev - the dev_t value to associate + * chrblk - block or character device (DEVFS_CHAR or DEVFS_BLOCK) + * uid, gid - ownership + * perms - permissions + * fmt, ... - print format string and args to format the path name + * Returns: + * A handle to a device node if successful, NULL otherwise. + */ +void * devfs_make_node __P((dev_t dev, int chrblk, uid_t uid, gid_t gid, + int perms, char *fmt, ...)); + +/* + * Function: devfs_make_link + * + * Purpose: + * Create a link to a previously created device node. + * + * Returns: + * 0 if successful, -1 if failed + */ +int devfs_link __P((void * handle, char *fmt, ...)); + +/* + * Function: devfs_remove + * + * Purpose: + * Remove the device node returned by devfs_make_node() along with + * any links created with devfs_make_link(). + */ +void devfs_remove __P((void * handle)); + +__END_DECLS + +/* XXX */ +#define UID_ROOT 0 +#define UID_BIN 3 +#define UID_UUCP 66 + +/* XXX */ +#define GID_WHEEL 0 +#define GID_KMEM 2 +#define GID_OPERATOR 5 +#define GID_BIN 7 +#define GID_GAMES 13 +#define GID_DIALER 68 + +#endif /* !_MISCFS_DEVFS_DEVFS_H_ */ diff --git a/bsd/miscfs/devfs/devfs_proto.h b/bsd/miscfs/devfs/devfs_proto.h new file mode 100644 index 000000000..55f196b60 --- /dev/null +++ b/bsd/miscfs/devfs/devfs_proto.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* THIS FILE HAS BEEN PRODUCED AUTOMATICALLY */ +int devfs_sinit(void); +devdirent_t * dev_findname(devnode_t * dir,char *name); +int dev_add_name(char * name, devnode_t * dirnode, devdirent_t * back, + devnode_t * dnp, devdirent_t * *dirent_pp); +int dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, + devnode_t * *dn_pp, struct devfsmount *dvm); +int dev_touch(devdirent_t * key) /* update the node for this dev */; +void devnode_free(devnode_t * dnp); +void devfs_dn_free(devnode_t * dnp); +int devfs_propogate(devdirent_t * parent,devdirent_t * child); +int dev_dup_plane(struct devfsmount *devfs_mp_p); +void devfs_free_plane(struct devfsmount *devfs_mp_p); +int dev_dup_entry(devnode_t * parent, devdirent_t * back, devdirent_t * *dnm_pp, + struct devfsmount *dvm); +int dev_free_name(devdirent_t * dirent_p); +void dev_free_hier(devdirent_t * dirent_p); +int devfs_dntovn(devnode_t * dnp, struct vnode **vn_pp, struct proc * p); +int dev_add_entry(char *name, devnode_t * parent, int type, devnode_type_t * typeinfo, + devnode_t * proto, struct devfsmount *dvm, devdirent_t * *nm_pp); +int devfs_mount(struct mount *mp, char *path, caddr_t data, + struct nameidata *ndp, struct proc *p); +/* THIS FILE PRODUCED AUTOMATICALLY */ +/* DO NOT EDIT (see reproto.sh) */ diff --git a/bsd/miscfs/devfs/devfs_tree.c b/bsd/miscfs/devfs/devfs_tree.c new file mode 100644 index 000000000..8a335534a --- /dev/null +++ b/bsd/miscfs/devfs/devfs_tree.c @@ -0,0 +1,1192 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright 1997,1998 Julian Elischer. All rights reserved. + * julian@freebsd.org + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE HOLDER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * devfs_tree.c + */ + +/* + * HISTORY + * Dieter Siegmund (dieter@apple.com) Thu Apr 8 14:08:19 PDT 1999 + * - removed mounting of "hidden" mountpoint + * - fixed problem in which devnode->dn_vn pointer was not + * updated with the vnode returned from checkalias() + * - replaced devfs_vntodn() with a macro VTODN() + * - rewrote dev_finddir() to not use recursion + * - added locking to avoid data structure corruption (DEVFS_(UN)LOCK()) + * Dieter Siegmund (dieter@apple.com) Wed Jul 14 13:37:59 PDT 1999 + * - fixed problem with devfs_dntovn() checking the v_id against the + * value cached in the device node; a union mount on top of us causes + * the v_id to get incremented thus, we would end up returning a new + * vnode instead of the existing one that has the mounted_here + * field filled in; the net effect was that the filesystem mounted + * on top of us would never show up + * - added devfs_stats to store how many data structures are actually + * allocated + */ + +/* SPLIT_DEVS means each devfs uses a different devnode for the same device */ +/* Otherwise the same device always ends up at the same vnode even if */ +/* reached througgh a different devfs instance. The practical difference */ +/* is that with the same vnode, chmods and chowns show up on all instances of */ +/* a device. (etc) */ + +#define SPLIT_DEVS 1 /* maybe make this an option */ +/*#define SPLIT_DEVS 1*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "devfs.h" +#include "devfsdefs.h" + +struct lock__bsd__ devfs_lock; /* the "big switch" */ +devdirent_t * dev_root = NULL; /* root of backing tree */ +struct devfs_stats devfs_stats; /* hold stats */ + +#ifdef HIDDEN_MOUNTPOINT +static struct mount *devfs_hidden_mount; +#endif HIDDEN_MOINTPOINT + +static int devfs_ready = 0; + +#define NOCREATE FALSE +#define CREATE TRUE + +/* + * Set up the root directory node in the backing plane + * This is happenning before the vfs system has been + * set up yet, so be careful about what we reference.. + * Notice that the ops are by indirection.. as they haven't + * been set up yet! + * DEVFS has a hidden mountpoint that is used as the anchor point + * for the internal 'blueprint' version of the dev filesystem tree. + */ +/*proto*/ +int +devfs_sinit(void) +{ + lockinit(&devfs_lock, PINOD, "devfs", 0, 0); + if (dev_add_entry("root", NULL, DEV_DIR, NULL, NULL, NULL, + &dev_root)) { + printf("devfs_sinit: dev_add_entry failed "); + return (EOPNOTSUPP); + } +#ifdef HIDDEN_MOUNTPOINT + MALLOC(devfs_hidden_mount, struct mount *, sizeof(struct mount), + M_MOUNT, M_WAITOK); + bzero(devfs_hidden_mount,sizeof(struct mount)); + devfs_mount(devfs_hidden_mount,"dummy",NULL,NULL,NULL); + dev_root->de_dnp->dn_dvm + = (struct devfsmount *)devfs_hidden_mount->mnt_data; +#endif HIDDEN_MOUNTPOINT + devfs_ready = 1; + return (0); +} + +/***********************************************************************\ +************************************************************************* +* Routines used to find our way to a point in the tree * +************************************************************************* +\***********************************************************************/ + + +/***************************************************************\ +* Search down the linked list off a dir to find "name" * +* return the devnode_t * for that node. +\***************************************************************/ +/*proto*/ +devdirent_t * +dev_findname(devnode_t * dir,char *name) +{ + devdirent_t * newfp; + if (dir->dn_type != DEV_DIR) return 0;/*XXX*/ /* printf?*/ + + if (name[0] == '.') + { + if(name[1] == 0) + { + return dir->dn_typeinfo.Dir.myname; + } + if((name[1] == '.') && (name[2] == 0)) + { + /* for root, .. == . */ + return dir->dn_typeinfo.Dir.parent->dn_typeinfo.Dir.myname; + } + } + newfp = dir->dn_typeinfo.Dir.dirlist; + while(newfp) + { + if(!(strcmp(name,newfp->de_name))) + return newfp; + newfp = newfp->de_next; + } + return NULL; +} + +#if 0 +/***********************************************************************\ +* Given a starting node (0 for root) and a pathname, return the node * +* for the end item on the path. It MUST BE A DIRECTORY. If the 'CREATE' * +* option is true, then create any missing nodes in the path and create * +* and return the final node as well. * +* This is used to set up a directory, before making nodes in it.. * +* * +* Warning: This function is RECURSIVE. * +\***********************************************************************/ +int +dev_finddir(char * orig_path, /* find this dir (err if not dir) */ + devnode_t * dirnode, /* starting point */ + int create, /* create path? */ + devnode_t * * dn_pp) /* returned */ +{ + devdirent_t * dirent_p; + devnode_t * dnp = NULL; + char pathbuf[DEVMAXPATHSIZE]; + char *path; + char *name; + register char *cp; + int retval; + + + /***************************************\ + * If no parent directory is given * + * then start at the root of the tree * + \***************************************/ + if(!dirnode) dirnode = dev_root->de_dnp; + + /***************************************\ + * Sanity Checks * + \***************************************/ + if (dirnode->dn_type != DEV_DIR) return ENOTDIR; + if(strlen(orig_path) > (DEVMAXPATHSIZE - 1)) return ENAMETOOLONG; + + + path = pathbuf; + strcpy(path,orig_path); + + /***************************************\ + * always absolute, skip leading / * + * get rid of / or // or /// etc. * + \***************************************/ + while(*path == '/') path++; + + /***************************************\ + * If nothing left, then parent was it.. * + \***************************************/ + if ( *path == '\0' ) { + *dn_pp = dirnode; + return 0; + } + + /***************************************\ + * find the next segment of the name * + \***************************************/ + cp = name = path; + while((*cp != '/') && (*cp != 0)) { + cp++; + } + + /***********************************************\ + * Check to see if it's the last component * + \***********************************************/ + if(*cp) { + path = cp + 1; /* path refers to the rest */ + *cp = 0; /* name is now a separate string */ + if(!(*path)) { + path = (char *)0; /* was trailing slash */ + } + } else { + path = NULL; /* no more to do */ + } + + /***************************************\ + * Start scanning along the linked list * + \***************************************/ + dirent_p = dev_findname(dirnode,name); + if(dirent_p) { /* check it's a directory */ + dnp = dirent_p->de_dnp; + if(dnp->dn_type != DEV_DIR) return ENOTDIR; + } else { + /***************************************\ + * The required element does not exist * + * So we will add it if asked to. * + \***************************************/ + if(!create) return ENOENT; + + if((retval = dev_add_entry(name, dirnode, + DEV_DIR, NULL, NULL, NULL, + &dirent_p)) != 0) { + return retval; + } + dnp = dirent_p->de_dnp; + devfs_propogate(dirnode->dn_typeinfo.Dir.myname,dirent_p); + } + if(path != NULL) { /* decide whether to recurse more or return */ + return (dev_finddir(path,dnp,create,dn_pp)); + } else { + *dn_pp = dnp; + return 0; + } +} +#endif 0 +/***********************************************************************\ +* Given a starting node (0 for root) and a pathname, return the node * +* for the end item on the path. It MUST BE A DIRECTORY. If the 'CREATE' * +* option is true, then create any missing nodes in the path and create * +* and return the final node as well. * +* This is used to set up a directory, before making nodes in it.. * +\***********************************************************************/ +/* proto */ +int +dev_finddir(char * path, + devnode_t * dirnode, + int create, + devnode_t * * dn_pp) +{ + devnode_t * dnp = NULL; + int error = 0; + char * scan; + + + if (!dirnode) /* dirnode == NULL means start at root */ + dirnode = dev_root->de_dnp; + + if (dirnode->dn_type != DEV_DIR) + return ENOTDIR; + + if (strlen(path) > (DEVMAXPATHSIZE - 1)) + return ENAMETOOLONG; + + scan = path; + + while (*scan == '/') + scan++; + + *dn_pp = NULL; + + while (1) { + char component[DEVMAXPATHSIZE]; + devdirent_t * dirent_p; + char * start; + + if (*scan == 0) { + /* we hit the end of the string, we're done */ + *dn_pp = dirnode; + break; + } + start = scan; + while (*scan != '/' && *scan) + scan++; + + strncpy(component, start, scan - start); + if (*scan == '/') + scan++; + + dirent_p = dev_findname(dirnode, component); + if (dirent_p) { + dnp = dirent_p->de_dnp; + if (dnp->dn_type != DEV_DIR) { + error = ENOTDIR; + break; + } + } + else { + if (!create) { + error = ENOENT; + break; + } + error = dev_add_entry(component, dirnode, + DEV_DIR, NULL, NULL, NULL, &dirent_p); + if (error) + break; + dnp = dirent_p->de_dnp; + devfs_propogate(dirnode->dn_typeinfo.Dir.myname, dirent_p); + } + dirnode = dnp; /* continue relative to this directory */ + } + return (error); +} + + +/***********************************************************************\ +* Add a new NAME element to the devfs * +* If we're creating a root node, then dirname is NULL * +* Basically this creates a new namespace entry for the device node * +* * +* Creates a name node, and links it to the supplied node * +\***********************************************************************/ +/*proto*/ +int +dev_add_name(char * name, devnode_t * dirnode, devdirent_t * back, + devnode_t * dnp, devdirent_t * *dirent_pp) +{ + devdirent_t * dirent_p = NULL; + + if(dirnode != NULL ) { + if(dirnode->dn_type != DEV_DIR) return(ENOTDIR); + + if( dev_findname(dirnode,name)) + return(EEXIST); + } + /* + * make sure the name is legal + * slightly misleading in the case of NULL + */ + if (!name || (strlen(name) > (DEVMAXNAMESIZE - 1))) + return (ENAMETOOLONG); + + /* + * Allocate and fill out a new directory entry + */ + MALLOC(dirent_p, devdirent_t *, sizeof(devdirent_t), + M_DEVFSNAME, M_WAITOK); + if (!dirent_p) { + return ENOMEM; + } + bzero(dirent_p,sizeof(devdirent_t)); + + /* inherrit our parent's mount info */ /*XXX*/ + /* a kludge but.... */ + if(dirnode && ( dnp->dn_dvm == NULL)) { + dnp->dn_dvm = dirnode->dn_dvm; + /* if(!dnp->dn_dvm) printf("parent had null dvm "); */ + } + + /* + * Link the two together + * include the implicit link in the count of links to the devnode.. + * this stops it from being accidentally freed later. + */ + dirent_p->de_dnp = dnp; + dnp->dn_links++ ; /* implicit from our own name-node */ + + /* + * Make sure that we can find all the links that reference a node + * so that we can get them all if we need to zap the node. + */ + if(dnp->dn_linklist) { + dirent_p->de_nextlink = dnp->dn_linklist; + dirent_p->de_prevlinkp = dirent_p->de_nextlink->de_prevlinkp; + dirent_p->de_nextlink->de_prevlinkp = &(dirent_p->de_nextlink); + *dirent_p->de_prevlinkp = dirent_p; + } else { + dirent_p->de_nextlink = dirent_p; + dirent_p->de_prevlinkp = &(dirent_p->de_nextlink); + } + dnp->dn_linklist = dirent_p; + + /* + * If the node is a directory, then we need to handle the + * creation of the .. link. + * A NULL dirnode indicates a root node, so point to ourself. + */ + if(dnp->dn_type == DEV_DIR) { + dnp->dn_typeinfo.Dir.myname = dirent_p; + /* + * If we are unlinking from an old dir, decrement its links + * as we point our '..' elsewhere + * Note: it's up to the calling code to remove the + * us from the original directory's list + */ + if(dnp->dn_typeinfo.Dir.parent) { + dnp->dn_typeinfo.Dir.parent->dn_links--; + } + if(dirnode) { + dnp->dn_typeinfo.Dir.parent = dirnode; + } else { + dnp->dn_typeinfo.Dir.parent = dnp; + } + dnp->dn_typeinfo.Dir.parent->dn_links++; /* account for the new '..' */ + } + + /* + * put the name into the directory entry. + */ + strcpy(dirent_p->de_name, name); + + + /* + * Check if we are not making a root node.. + * (i.e. have parent) + */ + if(dirnode) { + /* + * Put it on the END of the linked list of directory entries + */ + int len; + + dirent_p->de_parent = dirnode; /* null for root */ + dirent_p->de_prevp = dirnode->dn_typeinfo.Dir.dirlast; + dirent_p->de_next = *(dirent_p->de_prevp); /* should be NULL */ + /*right?*/ + *(dirent_p->de_prevp) = dirent_p; + dirnode->dn_typeinfo.Dir.dirlast = &(dirent_p->de_next); + dirnode->dn_typeinfo.Dir.entrycount++; + dirnode->dn_len += strlen(name) + 8;/*ok, ok?*/ + } + + *dirent_pp = dirent_p; + DEVFS_INCR_ENTRIES(); + return 0 ; +} + + +/***********************************************************************\ +* Add a new element to the devfs plane. * +* * +* Creates a new dev_node to go with it if the prototype should not be * +* reused. (Is a DIR, or we select SPLIT_DEVS at compile time) * +* typeinfo gives us info to make our node if we don't have a prototype. * +* If typeinfo is null and proto exists, then the typeinfo field of * +* the proto is used intead in the CREATE case. * +* note the 'links' count is 0 (except if a dir) * +* but it is only cleared on a transition * +* so this is ok till we link it to something * +* Even in SPLIT_DEVS mode, * +* if the node already exists on the wanted plane, just return it * +\***********************************************************************/ +/*proto*/ +int +dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, + devnode_t * *dn_pp, struct devfsmount *dvm) +{ + devnode_t * dnp = NULL; + +#if defined SPLIT_DEVS + /* + * If we have a prototype, then check if there is already a sibling + * on the mount plane we are looking at, if so, just return it. + */ + if (proto) { + dnp = proto->dn_nextsibling; + while( dnp != proto) { + if (dnp->dn_dvm == dvm) { + *dn_pp = dnp; + return (0); + } + dnp = dnp->dn_nextsibling; + } + if (typeinfo == NULL) + typeinfo = &(proto->dn_typeinfo); + } +#else /* SPLIT_DEVS */ + if ( proto ) { + switch (proto->type) { + case DEV_BDEV: + case DEV_CDEV: + *dn_pp = proto; + return 0; + } + } +#endif /* SPLIT_DEVS */ + MALLOC(dnp, devnode_t *, sizeof(devnode_t), M_DEVFSNODE, M_WAITOK); + if (!dnp) { + return ENOMEM; + } + + /* + * If we have a proto, that means that we are duplicating some + * other device, which can only happen if we are not at the back plane + */ + if(proto) { + bcopy(proto, dnp, sizeof(devnode_t)); + dnp->dn_links = 0; + dnp->dn_linklist = NULL; + dnp->dn_vn = NULL; + dnp->dn_len = 0; + /* add to END of siblings list */ + dnp->dn_prevsiblingp = proto->dn_prevsiblingp; + *(dnp->dn_prevsiblingp) = dnp; + dnp->dn_nextsibling = proto; + proto->dn_prevsiblingp = &(dnp->dn_nextsibling); + } else { + struct timeval tv; + + /* + * We have no prototype, so start off with a clean slate + */ + tv = time; + bzero(dnp,sizeof(devnode_t)); + dnp->dn_type = entrytype; + dnp->dn_nextsibling = dnp; + dnp->dn_prevsiblingp = &(dnp->dn_nextsibling); + dnp->dn_atime.tv_sec = tv.tv_sec; + dnp->dn_mtime.tv_sec = tv.tv_sec; + dnp->dn_ctime.tv_sec = tv.tv_sec; + } + dnp->dn_dvm = dvm; + + /* + * fill out the dev node according to type + */ + switch(entrytype) { + case DEV_DIR: + /* + * As it's a directory, make sure + * it has a null entries list + */ + dnp->dn_typeinfo.Dir.dirlast = &(dnp->dn_typeinfo.Dir.dirlist); + dnp->dn_typeinfo.Dir.dirlist = (devdirent_t *)0; + dnp->dn_typeinfo.Dir.entrycount = 0; + /* until we know better, it has a null parent pointer*/ + dnp->dn_typeinfo.Dir.parent = NULL; + dnp->dn_links++; /* for .*/ + dnp->dn_typeinfo.Dir.myname = NULL; + /* + * make sure that the ops associated with it are the ops + * that we use (by default) for directories + */ + dnp->dn_ops = &devfs_vnodeop_p; + dnp->dn_mode |= 0555; /* default perms */ + break; + case DEV_SLNK: + /* + * As it's a symlink allocate and store the link info + * Symlinks should only ever be created by the user, + * so they are not on the back plane and should not be + * propogated forward.. a bit like directories in that way.. + * A symlink only exists on one plane and has its own + * node.. therefore we might be on any random plane. + */ + MALLOC(dnp->dn_typeinfo.Slnk.name, char *, + typeinfo->Slnk.namelen+1, + M_DEVFSNODE, M_WAITOK); + if (!dnp->dn_typeinfo.Slnk.name) { + FREE(dnp,M_DEVFSNODE); + return ENOMEM; + } + strncpy(dnp->dn_typeinfo.Slnk.name, typeinfo->Slnk.name, + typeinfo->Slnk.namelen); + dnp->dn_typeinfo.Slnk.name[typeinfo->Slnk.namelen] = '\0'; + dnp->dn_typeinfo.Slnk.namelen = typeinfo->Slnk.namelen; + DEVFS_INCR_STRINGSPACE(dnp->dn_typeinfo.Slnk.namelen + 1); + dnp->dn_ops = &devfs_vnodeop_p; + dnp->dn_mode |= 0555; /* default perms */ + break; + case DEV_CDEV: + case DEV_BDEV: + /* + * Make sure it has DEVICE type ops + * and device specific fields are correct + */ + dnp->dn_ops = &devfs_spec_vnodeop_p; + dnp->dn_typeinfo.dev = typeinfo->dev; + break; + default: + return EINVAL; + } + + *dn_pp = dnp; + DEVFS_INCR_NODES(); + return 0 ; +} + + +/*proto*/ +void +devnode_free(devnode_t * dnp) +{ + if (dnp->dn_type == DEV_SLNK) { + DEVFS_DECR_STRINGSPACE(dnp->dn_typeinfo.Slnk.namelen + 1); + FREE(dnp->dn_typeinfo.Slnk.name,M_DEVFSNODE); + } + FREE(dnp, M_DEVFSNODE); + DEVFS_DECR_NODES(); + return; +} + +/*proto*/ +void +devfs_dn_free(devnode_t * dnp) +{ + if(--dnp->dn_links <= 0 ) /* can be -1 for initial free, on error */ + { + /*probably need to do other cleanups XXX */ + if (dnp->dn_nextsibling != dnp) { + devnode_t * * prevp = dnp->dn_prevsiblingp; + *prevp = dnp->dn_nextsibling; + dnp->dn_nextsibling->dn_prevsiblingp = prevp; + + } + if (dnp->dn_vn == NULL) { +#if 0 + printf("devfs_dn_free: free'ing %x\n", (unsigned int)dnp); +#endif 0 + devnode_free(dnp); /* no accesses/references */ + } + else { +#if 0 + printf("devfs_dn_free: marking %x for deletion\n", + (unsigned int)dnp); +#endif 0 + dnp->dn_delete = TRUE; + } + } +} + +/***********************************************************************\ +* Front Node Operations * +* Add or delete a chain of front nodes * +\***********************************************************************/ + +/***********************************************************************\ +* Given a directory backing node, and a child backing node, add the * +* appropriate front nodes to the front nodes of the directory to * +* represent the child node to the user * +* * +* on failure, front nodes will either be correct or not exist for each * +* front dir, however dirs completed will not be stripped of completed * +* frontnodes on failure of a later frontnode * +* * +* This allows a new node to be propogated through all mounted planes * +* * +\***********************************************************************/ +/*proto*/ +int +devfs_propogate(devdirent_t * parent,devdirent_t * child) +{ + int error; + devdirent_t * newnmp; + devnode_t * dnp = child->de_dnp; + devnode_t * pdnp = parent->de_dnp; + devnode_t * adnp = parent->de_dnp; + int type = child->de_dnp->dn_type; + + /***********************************************\ + * Find the other instances of the parent node * + \***********************************************/ + for (adnp = pdnp->dn_nextsibling; + adnp != pdnp; + adnp = adnp->dn_nextsibling) + { + /* + * Make the node, using the original as a prototype) + * if the node already exists on that plane it won't be + * re-made.. + */ + if ((error = dev_add_entry(child->de_name, adnp, type, + NULL, dnp, adnp->dn_dvm, + &newnmp)) != 0) { + printf("duplicating %s failed\n",child->de_name); + } + } + return 0; /* for now always succeed */ +} + +/*********************************************************************** + * remove all instances of this devicename [for backing nodes..] + * note.. if there is another link to the node (non dir nodes only) + * then the devfs_node will still exist as the ref count will be non-0 + * removing a directory node will remove all sup-nodes on all planes (ZAP) + * + * Used by device drivers to remove nodes that are no longer relevant + * The argument is the 'cookie' they were given when they created the node + * this function is exported.. see devfs.h + ***********************************************************************/ +void +devfs_remove(void *dirent_p) +{ + devnode_t * dnp = ((devdirent_t *)dirent_p)->de_dnp; + devnode_t * dnp2; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + if (!devfs_ready) { + printf("devfs_remove: not ready for devices!\n"); + goto out; + } + + DEVFS_LOCK(0); + + /* keep removing the next sibling till only we exist. */ + while((dnp2 = dnp->dn_nextsibling) != dnp) { + + /* + * Keep removing the next front node till no more exist + */ + dnp->dn_nextsibling = dnp2->dn_nextsibling; + dnp->dn_nextsibling->dn_prevsiblingp = &(dnp->dn_nextsibling); + dnp2->dn_nextsibling = dnp2; + dnp2->dn_prevsiblingp = &(dnp2->dn_nextsibling); + while(dnp2->dn_linklist) { + dev_free_name(dnp2->dn_linklist); + } + } + + /* + * then free the main node + * If we are not running in SPLIT_DEVS mode, then + * THIS is what gets rid of the propogated nodes. + */ + while(dnp->dn_linklist) { + dev_free_name(dnp->dn_linklist); + } + DEVFS_UNLOCK(0); +out: + (void) thread_funnel_set(kernel_flock, funnel_state); + return ; +} + + +/*************************************************************** + * duplicate the backing tree into a tree of nodes hung off the + * mount point given as the argument. Do this by + * calling dev_dup_entry which recurses all the way + * up the tree.. + **************************************************************/ +/*proto*/ +int +dev_dup_plane(struct devfsmount *devfs_mp_p) +{ + devdirent_t * new; + int error = 0; + + if ((error = dev_dup_entry(NULL, dev_root, &new, devfs_mp_p))) + return error; + devfs_mp_p->plane_root = new; + return error; +} + + + +/***************************************************************\ +* Free a whole plane +\***************************************************************/ +/*proto*/ +void +devfs_free_plane(struct devfsmount *devfs_mp_p) +{ + devdirent_t * dirent_p; + + dirent_p = devfs_mp_p->plane_root; + if(dirent_p) { + dev_free_hier(dirent_p); + dev_free_name(dirent_p); + } + devfs_mp_p->plane_root = NULL; +} + +/***************************************************************\ +* Create and link in a new front element.. * +* Parent can be 0 for a root node * +* Not presently usable to make a symlink XXX * +* (Ok, symlinks don't propogate) +* recursively will create subnodes corresponding to equivalent * +* child nodes in the base level * +\***************************************************************/ +/*proto*/ +int +dev_dup_entry(devnode_t * parent, devdirent_t * back, devdirent_t * *dnm_pp, + struct devfsmount *dvm) +{ + devdirent_t * entry_p; + devdirent_t * newback; + devdirent_t * newfront; + int error; + devnode_t * dnp = back->de_dnp; + int type = dnp->dn_type; + + /* + * go get the node made (if we need to) + * use the back one as a prototype + */ + if ((error = dev_add_entry(back->de_name, parent, type, + NULL, dnp, + parent?parent->dn_dvm:dvm, &entry_p)) != 0) { + printf("duplicating %s failed\n",back->de_name); + } + + /* + * If we have just made the root, then insert the pointer to the + * mount information + */ + if(dvm) { + entry_p->de_dnp->dn_dvm = dvm; + } + + /* + * If it is a directory, then recurse down all the other + * subnodes in it.... + * note that this time we don't pass on the mount info.. + */ + if (type == DEV_DIR) + { + for(newback = back->de_dnp->dn_typeinfo.Dir.dirlist; + newback; newback = newback->de_next) + { + if((error = dev_dup_entry(entry_p->de_dnp, + newback, &newfront, NULL)) != 0) + { + break; /* back out with an error */ + } + } + } + *dnm_pp = entry_p; + return error; +} + +/***************************************************************\ +* Free a name node * +* remember that if there are other names pointing to the * +* dev_node then it may not get freed yet * +* can handle if there is no dnp * +\***************************************************************/ +/*proto*/ +int +dev_free_name(devdirent_t * dirent_p) +{ + devnode_t * parent = dirent_p->de_parent; + devnode_t * dnp = dirent_p->de_dnp; + + if(dnp) { + if(dnp->dn_type == DEV_DIR) + { + devnode_t * p; + + if(dnp->dn_typeinfo.Dir.dirlist) + return (ENOTEMPTY); + p = dnp->dn_typeinfo.Dir.parent; + devfs_dn_free(dnp); /* account for '.' */ + devfs_dn_free(p); /* '..' */ + } + /* + * unlink us from the list of links for this node + * If we are the only link, it's easy! + * if we are a DIR of course there should not be any + * other links. + */ + if(dirent_p->de_nextlink == dirent_p) { + dnp->dn_linklist = NULL; + } else { + if(dnp->dn_linklist == dirent_p) { + dnp->dn_linklist = dirent_p->de_nextlink; + } + dirent_p->de_nextlink->de_prevlinkp + = dirent_p->de_prevlinkp; + *dirent_p->de_prevlinkp = dirent_p->de_nextlink; + } + devfs_dn_free(dnp); + } + + /* + * unlink ourselves from the directory on this plane + */ + if(parent) /* if not fs root */ + { + if( (*dirent_p->de_prevp = dirent_p->de_next) )/* yes, assign */ + { + dirent_p->de_next->de_prevp = dirent_p->de_prevp; + } + else + { + parent->dn_typeinfo.Dir.dirlast + = dirent_p->de_prevp; + } + parent->dn_typeinfo.Dir.entrycount--; + parent->dn_len -= strlen(dirent_p->de_name) + 8; + } + + DEVFS_DECR_ENTRIES(); + FREE(dirent_p,M_DEVFSNAME); + return 0; +} + +/***************************************************************\ +* Free a hierarchy starting at a directory node name * +* remember that if there are other names pointing to the * +* dev_node then it may not get freed yet * +* can handle if there is no dnp * +* leave the node itself allocated. * +\***************************************************************/ +/*proto*/ +void +dev_free_hier(devdirent_t * dirent_p) +{ + devnode_t * dnp = dirent_p->de_dnp; + + if(dnp) { + if(dnp->dn_type == DEV_DIR) + { + while(dnp->dn_typeinfo.Dir.dirlist) + { + dev_free_hier(dnp->dn_typeinfo.Dir.dirlist); + dev_free_name(dnp->dn_typeinfo.Dir.dirlist); + } + } + } +} + +/***************************************************************\ +* given a dev_node, find the appropriate vnode if one is already +* associated, or get a new one and associate it with the dev_node +\***************************************************************/ +/*proto*/ +int +devfs_dntovn(devnode_t * dnp, struct vnode **vn_pp, struct proc * p) +{ + struct vnode *vn_p, *nvp; + int error = 0; + + *vn_pp = NULL; + vn_p = dnp->dn_vn; + if (vn_p) { /* already has a vnode */ + *vn_pp = vn_p; + return(vget(vn_p, LK_EXCLUSIVE, p)); + } + if (!(error = getnewvnode(VT_DEVFS, dnp->dn_dvm->mount, + *(dnp->dn_ops), &vn_p))) { + switch(dnp->dn_type) { + case DEV_SLNK: + vn_p->v_type = VLNK; + break; + case DEV_DIR: + if (dnp->dn_typeinfo.Dir.parent == dnp) { + vn_p->v_flag |= VROOT; + } + vn_p->v_type = VDIR; + break; + case DEV_BDEV: + case DEV_CDEV: + vn_p->v_type + = (dnp->dn_type == DEV_BDEV) ? VBLK : VCHR; + if ((nvp = checkalias(vn_p, dnp->dn_typeinfo.dev, + dnp->dn_dvm->mount)) != NULL) { + vput(vn_p); + vn_p = nvp; + } + break; + } + vn_p->v_mount = dnp->dn_dvm->mount;/* XXX Duplicated */ + *vn_pp = vn_p; + vn_p->v_data = (void *)dnp; + dnp->dn_vn = vn_p; + error = vn_lock(vn_p, LK_EXCLUSIVE | LK_RETRY, p); + } + return error; +} + +/***********************************************************************\ +* add a whole device, with no prototype.. make name element and node * +* Used for adding the original device entries * +\***********************************************************************/ +/*proto*/ +int +dev_add_entry(char *name, devnode_t * parent, int type, devnode_type_t * typeinfo, + devnode_t * proto, struct devfsmount *dvm, devdirent_t * *nm_pp) +{ + devnode_t * dnp; + int error = 0; + + if ((error = dev_add_node(type, typeinfo, proto, &dnp, + (parent?parent->dn_dvm:dvm))) != 0) + { + printf("devfs: %s: base node allocation failed (Errno=%d)\n", + name,error); + return error; + } + if ((error = dev_add_name(name ,parent ,NULL, dnp, nm_pp)) != 0) + { + devfs_dn_free(dnp); /* 1->0 for dir, 0->(-1) for other */ + printf("devfs: %s: name slot allocation failed (Errno=%d)\n", + name,error); + + } + return error; +} + +#include + +/* + * Function: devfs_make_node + * + * Purpose + * Create a device node with the given pathname in the devfs namespace. + * + * Parameters: + * dev - the dev_t value to associate + * chrblk - block or character device (DEVFS_CHAR or DEVFS_BLOCK) + * uid, gid - ownership + * perms - permissions + * fmt, ... - path format string with printf args to format the path name + * Returns: + * A handle to a device node if successful, NULL otherwise. + */ +void * +devfs_make_node(dev_t dev, int chrblk, uid_t uid, + gid_t gid, int perms, char *fmt, ...) +{ + devdirent_t * new_dev = NULL; + devnode_t * dnp; /* devnode for parent directory */ + devnode_type_t typeinfo; + + char *name, *path, buf[256]; /* XXX */ + char * b_ptr = buf; + boolean_t funnel_state; + int i; + va_list ap; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + if (!devfs_ready) { + printf("devfs_make_node: not ready for devices!\n"); + goto out; + } + + if (chrblk != DEVFS_CHAR && chrblk != DEVFS_BLOCK) + goto out; + + va_start(ap, fmt); + prf(fmt, ap, TOSTR, (struct tty *)&b_ptr); + va_end(ap); + *b_ptr = 0; + + name = NULL; + + for(i=strlen(buf); i>0; i--) + if(buf[i] == '/') { + name=&buf[i]; + buf[i]=0; + break; + } + + if (name) { + *name++ = '\0'; + path = buf; + } else { + name = buf; + path = "/"; + } + + DEVFS_LOCK(0); + /* find/create directory path ie. mkdir -p */ + if (dev_finddir(path, NULL, CREATE, &dnp) == 0) { + typeinfo.dev = dev; + if (dev_add_entry(name, dnp, + (chrblk == DEVFS_CHAR) ? DEV_CDEV : DEV_BDEV, + &typeinfo, NULL, NULL, &new_dev) == 0) { + new_dev->de_dnp->dn_gid = gid; + new_dev->de_dnp->dn_uid = uid; + new_dev->de_dnp->dn_mode |= perms; + devfs_propogate(dnp->dn_typeinfo.Dir.myname, new_dev); + } + } + DEVFS_UNLOCK(0); + +out: + (void) thread_funnel_set(kernel_flock, funnel_state); + return new_dev; +} + +/* + * Function: devfs_make_link + * + * Purpose: + * Create a link to a previously created device node. + * + * Returns: + * 0 if successful, -1 if failed + */ +int +devfs_make_link(void *original, char *fmt, ...) +{ + devdirent_t * new_dev = NULL; + devdirent_t * orig = (devdirent_t *) original; + devnode_t * dirnode; /* devnode for parent directory */ + + va_list ap; + char *p, buf[256]; /* XXX */ + char * b_ptr = buf; + int i; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + if (!devfs_ready) { + printf("devfs_make_link: not ready for devices!\n"); + goto out; + } + + va_start(ap, fmt); + prf(fmt, ap, TOSTR, (struct tty *)&b_ptr); + va_end(ap); + *b_ptr = 0; + + p = NULL; + + for(i=strlen(buf); i>0; i--) + if(buf[i] == '/') { + p=&buf[i]; + buf[i]=0; + break; + } + DEVFS_LOCK(0); + if (p) { + *p++ = '\0'; + if (dev_finddir(buf, NULL, CREATE, &dirnode) + || dev_add_name(p, dirnode, NULL, orig->de_dnp, &new_dev)) + goto fail; + } else { + if (dev_finddir("", NULL, CREATE, &dirnode) + || dev_add_name(buf, dirnode, NULL, orig->de_dnp, &new_dev)) + goto fail; + } + devfs_propogate(dirnode->dn_typeinfo.Dir.myname, new_dev); +fail: + DEVFS_UNLOCK(0); +out: + (void) thread_funnel_set(kernel_flock, funnel_state); + return ((new_dev != NULL) ? 0 : -1); +} + diff --git a/bsd/miscfs/devfs/devfs_vfsops.c b/bsd/miscfs/devfs/devfs_vfsops.c new file mode 100644 index 000000000..4b11822bf --- /dev/null +++ b/bsd/miscfs/devfs/devfs_vfsops.c @@ -0,0 +1,426 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright 1997,1998 Julian Elischer. All rights reserved. + * julian@freebsd.org + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE HOLDER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * devfs_vfsops.c + * + */ +/* + * HISTORY + * Dieter Siegmund (dieter@apple.com) Wed Jul 14 13:37:59 PDT 1999 + * - modified devfs_statfs() to use devfs_stats to calculate the + * amount of memory used by devfs + */ + + +#include +#include +#include +#include +#include +#include +#include + +#include "devfs.h" +#include "devfsdefs.h" + +static int devfs_statfs( struct mount *mp, struct statfs *sbp, struct proc *p); + +static struct vfsconf * devfs_vfsp = 0; +static int kernel_mount = 0; + + +/*- + * Called from the generic VFS startups. + * This is the second stage of DEVFS initialisation. + * The probed devices have already been loaded and the + * basic structure of the DEVFS created. + * We take the oportunity to mount the hidden DEVFS layer, so that + * devices from devfs get sync'd. + */ +static int +devfs_init(struct vfsconf *vfsp) +{ + devfs_vfsp = vfsp; /* remember this for devfs_kernel_mount below */ + + if (devfs_sinit()) + return (EOPNOTSUPP); + printf("devfs enabled\n"); + devfs_make_node(makedev(0, 0), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0622, "console"); + devfs_make_node(makedev(2, 0), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "tty"); + devfs_make_node(makedev(3, 0), DEVFS_CHAR, + UID_ROOT, GID_KMEM, 0640, "mem"); + devfs_make_node(makedev(3, 1), DEVFS_CHAR, + UID_ROOT, GID_KMEM, 0640, "kmem"); + devfs_make_node(makedev(3, 2), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "null"); + devfs_make_node(makedev(3, 3), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "zero"); + devfs_make_node(makedev(6, 0), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0600, "klog"); + return 0; +} + +/*- + * mp - pointer to 'mount' structure + * path - addr in user space of mount point (ie /usr or whatever) + * data - addr in user space of mount params including the + * name of the block special file to treat as a filesystem. + * (NOT USED) + * ndp - namei data pointer (NOT USED) + * p - proc pointer + * devfs is special in that it doesn't require any device to be mounted.. + * It makes up its data as it goes along. + * it must be mounted during single user.. until it is, only std{in/out/err} + * and the root filesystem are available. + */ +/*proto*/ +int +devfs_mount(struct mount *mp, char *path, caddr_t data, + struct nameidata *ndp, struct proc *p) +{ + struct devfsmount *devfs_mp_p; /* devfs specific mount info */ + int error; + size_t size; + + /*- + * If they just want to update, we don't need to do anything. + */ + if (mp->mnt_flag & MNT_UPDATE) + { + return 0; + } + + /*- + * Well, it's not an update, it's a real mount request. + * Time to get dirty. + * HERE we should check to see if we are already mounted here. + */ + + MALLOC(devfs_mp_p, struct devfsmount *, sizeof(struct devfsmount), + M_DEVFSMNT, M_WAITOK); + if (devfs_mp_p == NULL) + return (ENOMEM); + bzero(devfs_mp_p,sizeof(*devfs_mp_p)); + devfs_mp_p->mount = mp; + + /*- + * Fill out some fields + */ + mp->mnt_data = (qaddr_t)devfs_mp_p; + mp->mnt_stat.f_type = mp->mnt_vfc->vfc_typenum; + mp->mnt_stat.f_fsid.val[0] = (int32_t)(void *)devfs_mp_p; + mp->mnt_stat.f_fsid.val[1] = mp->mnt_stat.f_type; + mp->mnt_flag |= MNT_LOCAL; + + DEVFS_LOCK(p); + error = dev_dup_plane(devfs_mp_p); + DEVFS_UNLOCK(p); + if (error) { + mp->mnt_data = (qaddr_t)0; + FREE((caddr_t)devfs_mp_p, M_DEVFSMNT); + return (error); + } + + /*- + * Copy in the name of the directory the filesystem + * is to be mounted on. + * And we clear the remainder of the character strings + * to be tidy. + */ + + if (!kernel_mount) { + copyinstr(path, (caddr_t)mp->mnt_stat.f_mntonname, + sizeof(mp->mnt_stat.f_mntonname)-1, &size); + bzero(mp->mnt_stat.f_mntonname + size, + sizeof(mp->mnt_stat.f_mntonname) - size); + } + bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); + bcopy("devfs",mp->mnt_stat.f_mntfromname, 5); + DEVFS_INCR_MOUNTS(); + (void)devfs_statfs(mp, &mp->mnt_stat, p); + return 0; +} + + +static int +devfs_start(struct mount *mp, int flags, struct proc *p) +{ + return 0; +} + +/*- + * Unmount the filesystem described by mp. + */ +static int +devfs_unmount( struct mount *mp, int mntflags, struct proc *p) +{ + struct devfsmount *devfs_mp_p = (struct devfsmount *)mp->mnt_data; + int flags = 0; + int error; + + if (mntflags & MNT_FORCE) { + flags |= FORCECLOSE; + } + error = vflush(mp, NULLVP, flags); + if (error) + return error; + + DEVFS_LOCK(p); + devfs_free_plane(devfs_mp_p); + DEVFS_UNLOCK(p); + FREE((caddr_t)devfs_mp_p, M_DEVFSMNT); + DEVFS_DECR_MOUNTS(); + mp->mnt_data = (qaddr_t)0; + mp->mnt_flag &= ~MNT_LOCAL; + + return 0; +} + +/* return the address of the root vnode in *vpp */ +static int +devfs_root(struct mount *mp, struct vnode **vpp) +{ + struct devfsmount *devfs_mp_p = (struct devfsmount *)(mp->mnt_data); + int error; + + error = devfs_dntovn(devfs_mp_p->plane_root->de_dnp,vpp, + current_proc()); + return error; +} + +static int +devfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t arg, + struct proc *p) +{ + return EOPNOTSUPP; +} + +static int +devfs_statfs( struct mount *mp, struct statfs *sbp, struct proc *p) +{ + struct devfsmount *devfs_mp_p = (struct devfsmount *)mp->mnt_data; + + /*- + * Fill in the stat block. + */ + sbp->f_type = mp->mnt_stat.f_type; + sbp->f_flags = 0; /* XXX */ + sbp->f_bsize = 512; + sbp->f_iosize = 512; + sbp->f_blocks = (devfs_stats.mounts * sizeof(struct devfsmount) + + devfs_stats.nodes * sizeof(devnode_t) + + devfs_stats.entries * sizeof(devdirent_t) + + devfs_stats.stringspace + ) / sbp->f_bsize; + sbp->f_bfree = 0; + sbp->f_bavail = 0; + sbp->f_files = devfs_stats.nodes; + sbp->f_ffree = 0; + sbp->f_fsid.val[0] = (int32_t)(void *)devfs_mp_p; + sbp->f_fsid.val[1] = mp->mnt_stat.f_type; + + /*- + * Copy the mounted on and mounted from names into + * the passed in stat block, if it is not the one + * in the mount structure. + */ + if (sbp != &mp->mnt_stat) { + bcopy((caddr_t)mp->mnt_stat.f_mntonname, + (caddr_t)&sbp->f_mntonname[0], MNAMELEN); + bcopy((caddr_t)mp->mnt_stat.f_mntfromname, + (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); + } + return 0; +} + +static int +devfs_sync(struct mount *mp, int waitfor,struct ucred *cred,struct proc *p) +{ + return (0); +} + + +static int +devfs_vget(struct mount *mp, void * ino,struct vnode **vpp) +{ + return EOPNOTSUPP; +} + +/************************************************************* + * The concept of exporting a kernel generated devfs is stupid + * So don't handle filehandles + */ + +static int +devfs_fhtovp (struct mount *mp, struct fid *fhp, struct mbuf *nam, + struct vnode **vpp, int *exflagsp, struct ucred **credanonp) +{ + return (EINVAL); +} + + +static int +devfs_vptofh (struct vnode *vp, struct fid *fhp) +{ + return (EINVAL); +} + +static int +devfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + return (EOPNOTSUPP); +} + +#include + +/* + * Function: devfs_kernel_mount + * Purpose: + * Mount devfs at the given mount point from within the kernel. + */ +int +devfs_kernel_mount(char * mntname) +{ + struct mount *mp; + int error; + struct proc *procp; + struct nameidata nd; + struct vnode * vp; + + if (devfs_vfsp == NULL) { + printf("devfs_kernel_mount: devfs_vfsp is NULL\n"); + return (EINVAL); + } + procp = current_proc(); + + /* + * Get vnode to be covered + */ + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, + mntname, procp); + if ((error = namei(&nd))) { + printf("devfs_kernel_mount: failed to find directory '%s', %d", + mntname, error); + return (error); + } + vp = nd.ni_vp; + if ((error = vinvalbuf(vp, V_SAVE, procp->p_ucred, procp, 0, 0))) { + printf("devfs_kernel_mount: vinval failed: %d\n", error); + vput(vp); + return (error); + } + if (vp->v_type != VDIR) { + printf("devfs_kernel_mount: '%s' is not a directory\n", mntname); + vput(vp); + return (ENOTDIR); + } + if (vp->v_mountedhere != NULL) { + vput(vp); + return (EBUSY); + } + + /* + * Allocate and initialize the filesystem. + */ + mp = _MALLOC_ZONE((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); + bzero((char *)mp, (u_long)sizeof(struct mount)); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + (void)vfs_busy(mp, LK_NOWAIT, 0, procp); + LIST_INIT(&mp->mnt_vnodelist); + mp->mnt_op = devfs_vfsp->vfc_vfsops; + mp->mnt_vfc = devfs_vfsp; + devfs_vfsp->vfc_refcount++; + mp->mnt_flag = 0; + mp->mnt_flag |= devfs_vfsp->vfc_flags & MNT_VISFLAGMASK; + strncpy(mp->mnt_stat.f_fstypename, devfs_vfsp->vfc_name, MFSNAMELEN); + vp->v_mountedhere = mp; + mp->mnt_vnodecovered = vp; + mp->mnt_stat.f_owner = procp->p_ucred->cr_uid; + (void) copystr(mntname, mp->mnt_stat.f_mntonname, MNAMELEN - 1, 0); + + kernel_mount = 1; + error = devfs_mount(mp, mntname, NULL, NULL, procp); + kernel_mount = 0; + if (error) { + printf("devfs_kernel_mount: mount %s failed: %d", mntname, error); + mp->mnt_vfc->vfc_refcount--; + vfs_unbusy(mp, procp); + _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + vput(vp); + return (error); + } + printf("devfs on %s\n", mntname); + simple_lock(&mountlist_slock); + CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); + simple_unlock(&mountlist_slock); + VOP_UNLOCK(vp, 0, procp); + vfs_unbusy(mp, procp); + return (0); +} + +struct vfsops devfs_vfsops = { + devfs_mount, + devfs_start, + devfs_unmount, + devfs_root, + devfs_quotactl, + devfs_statfs, + devfs_sync, + devfs_vget, + devfs_fhtovp, + devfs_vptofh, + devfs_init, + devfs_sysctl, +}; diff --git a/bsd/miscfs/devfs/devfs_vnops.c b/bsd/miscfs/devfs/devfs_vnops.c new file mode 100644 index 000000000..6c25d8044 --- /dev/null +++ b/bsd/miscfs/devfs/devfs_vnops.c @@ -0,0 +1,1606 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997,1998 Julian Elischer. All rights reserved. + * julian@freebsd.org + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE HOLDER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * devfs_vnops.c + */ + +/* + * HISTORY + * Clark Warner (warner_c@apple.com) Tue Feb 10 2000 + * - Added err_copyfile to the vnode operations table + * Dieter Siegmund (dieter@apple.com) Thu Apr 8 14:08:19 PDT 1999 + * - instead of duplicating specfs here, created a vnode-ops table + * that redirects most operations to specfs (as is done with ufs); + * - removed routines that made no sense + * - cleaned up reclaim: replaced devfs_vntodn() with a macro VTODN() + * - cleaned up symlink, link locking + * - added the devfs_lock to protect devfs data structures against + * driver's calling devfs_add_devswf()/etc. + * Dieter Siegmund (dieter@apple.com) Wed Jul 14 13:37:59 PDT 1999 + * - free the devfs devnode in devfs_inactive(), not just in devfs_reclaim() + * to free up kernel memory as soon as it's available + * - got rid of devfsspec_{read, write} + * Dieter Siegmund (dieter@apple.com) Fri Sep 17 09:58:38 PDT 1999 + * - update the mod/access times + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "devfsdefs.h" + +/* + * Convert a component of a pathname into a pointer to a locked node. + * This is a very central and rather complicated routine. + * If the file system is not maintained in a strict tree hierarchy, + * this can result in a deadlock situation (see comments in code below). + * + * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on + * whether the name is to be looked up, created, renamed, or deleted. + * When CREATE, RENAME, or DELETE is specified, information usable in + * creating, renaming, or deleting a directory entry may be calculated. + * If flag has LOCKPARENT or'ed into it and the target of the pathname + * exists, lookup returns both the target and its parent directory locked. + * When creating or renaming and LOCKPARENT is specified, the target may + * not be ".". When deleting and LOCKPARENT is specified, the target may + * be "."., but the caller must check to ensure it does an vrele and DNUNLOCK + * instead of two DNUNLOCKs. + * + * Overall outline of devfs_lookup: + * + * check accessibility of directory + * null terminate the component (lookup leaves the whole string alone) + * look for name in cache, if found, then if at end of path + * and deleting or creating, drop it, else return name + * search for name in directory, to found or notfound + * notfound: + * if creating, return locked directory, + * else return error + * found: + * if at end of path and deleting, return information to allow delete + * if at end of path and rewriting (RENAME and LOCKPARENT), lock target + * node and return info to allow rewrite + * if not at end, add name to cache; if at end and neither creating + * nor deleting, add name to cache + * On return to lookup, remove the null termination we put in at the start. + * + * NOTE: (LOOKUP | LOCKPARENT) currently returns the parent node unlocked. + */ +static int +devfs_lookup(struct vop_lookup_args *ap) + /*struct vop_lookup_args { + struct vnode * a_dvp; directory vnode ptr + struct vnode ** a_vpp; where to put the result + struct componentname * a_cnp; the name we want + };*/ +{ + struct componentname *cnp = ap->a_cnp; + struct vnode *dir_vnode = ap->a_dvp; + struct vnode **result_vnode = ap->a_vpp; + devnode_t * dir_node; /* the directory we are searching */ + devnode_t * node = NULL; /* the node we are searching for */ + devdirent_t * nodename; + int flags = cnp->cn_flags; + int op = cnp->cn_nameiop; /* LOOKUP, CREATE, RENAME, or DELETE */ + int lockparent = flags & LOCKPARENT; + int wantparent = flags & (LOCKPARENT|WANTPARENT); + int error = 0; + struct proc *p = cnp->cn_proc; + char heldchar; /* the char at the end of the name componet */ + + *result_vnode = NULL; /* safe not sorry */ /*XXX*/ + + if (dir_vnode->v_usecount == 0) + printf("devfs_lookup: dir had no refs "); + dir_node = VTODN(dir_vnode); + + /* + * Check accessiblity of directory. + */ + if (dir_node->dn_type != DEV_DIR) { + return (ENOTDIR); + } + + if ((error = VOP_ACCESS(dir_vnode, VEXEC, cnp->cn_cred, p)) != 0) { + return (error); + } + + /* temporarily terminate string component */ + heldchar = cnp->cn_nameptr[cnp->cn_namelen]; + cnp->cn_nameptr[cnp->cn_namelen] = '\0'; + DEVFS_LOCK(p); + nodename = dev_findname(dir_node,cnp->cn_nameptr); + if (nodename) { + /* entry exists */ + node = nodename->de_dnp; + node->dn_last_lookup = nodename; /* for unlink */ + /* Do potential vnode allocation here inside the lock + * to make sure that our device node has a non-NULL dn_vn + * associated with it. The device node might otherwise + * get deleted out from under us (see devfs_dn_free()). + */ + error = devfs_dntovn(node, result_vnode, p); + } + DEVFS_UNLOCK(p); + /* restore saved character */ + cnp->cn_nameptr[cnp->cn_namelen] = heldchar; + + if (error) + return (error); + + if (!nodename) { /* no entry */ + /* If it doesn't exist and we're not the last component, + * or we're at the last component, but we're not creating + * or renaming, return ENOENT. + */ + if (!(flags & ISLASTCN) || !(op == CREATE || op == RENAME)) { + return ENOENT; + } + /* + * Access for write is interpreted as allowing + * creation of files in the directory. + */ + if ((error = VOP_ACCESS(dir_vnode, VWRITE, + cnp->cn_cred, p)) != 0) + { + return (error); + } + /* + * We return with the directory locked, so that + * the parameters we set up above will still be + * valid if we actually decide to add a new entry. + * We return ni_vp == NULL to indicate that the entry + * does not currently exist; we leave a pointer to + * the (locked) directory vnode in namei_data->ni_dvp. + * The pathname buffer is saved so that the name + * can be obtained later. + * + * NB - if the directory is unlocked, then this + * information cannot be used. + */ + cnp->cn_flags |= SAVENAME; + if (!lockparent) + VOP_UNLOCK(dir_vnode, 0, p); + return (EJUSTRETURN); + } + + /* + * If deleting, and at end of pathname, return + * parameters which can be used to remove file. + * If the wantparent flag isn't set, we return only + * the directory (in namei_data->ni_dvp), otherwise we go + * on and lock the node, being careful with ".". + */ + if (op == DELETE && (flags & ISLASTCN)) { + /* + * Write access to directory required to delete files. + */ + if ((error = VOP_ACCESS(dir_vnode, VWRITE, + cnp->cn_cred, p)) != 0) + return (error); + /* + * we are trying to delete '.'. What does this mean? XXX + */ + if (dir_node == node) { + VREF(dir_vnode); + *result_vnode = dir_vnode; + return (0); + } +#ifdef NOTYET + /* + * If directory is "sticky", then user must own + * the directory, or the file in it, else she + * may not delete it (unless she's root). This + * implements append-only directories. + */ + if ((dir_node->mode & ISVTX) && + cnp->cn_cred->cr_uid != 0 && + cnp->cn_cred->cr_uid != dir_node->uid && + cnp->cn_cred->cr_uid != node->uid) { + VOP_UNLOCK(*result_vnode, 0, p); + return (EPERM); + } +#endif + if (!lockparent) + VOP_UNLOCK(dir_vnode, 0, p); + return (0); + } + + /* + * If rewriting (RENAME), return the vnode and the + * information required to rewrite the present directory + * Must get node of directory entry to verify it's a + * regular file, or empty directory. + */ + if (op == RENAME && wantparent && (flags & ISLASTCN)) { + /* + * Are we allowed to change the holding directory? + */ + if ((error = VOP_ACCESS(dir_vnode, VWRITE, + cnp->cn_cred, p)) != 0) + return (error); + /* + * Careful about locking second node. + * This can only occur if the target is ".". + */ + if (dir_node == node) + return (EISDIR); + /* hmm save the 'from' name (we need to delete it) */ + cnp->cn_flags |= SAVENAME; + if (!lockparent) + VOP_UNLOCK(dir_vnode, 0, p); + return (0); + } + + /* + * Step through the translation in the name. We do not unlock the + * directory because we may need it again if a symbolic link + * is relative to the current directory. Instead we save it + * unlocked as "saved_dir_node" XXX. We must get the target + * node before unlocking + * the directory to insure that the node will not be removed + * before we get it. We prevent deadlock by always fetching + * nodes from the root, moving down the directory tree. Thus + * when following backward pointers ".." we must unlock the + * parent directory before getting the requested directory. + * There is a potential race condition here if both the current + * and parent directories are removed before the lock for the + * node associated with ".." returns. We hope that this occurs + * infrequently since we cannot avoid this race condition without + * implementing a sophisticated deadlock detection algorithm. + * Note also that this simple deadlock detection scheme will not + * work if the file system has any hard links other than ".." + * that point backwards in the directory structure. + */ + if (flags & ISDOTDOT) { + VOP_UNLOCK(dir_vnode, 0, p); /* race to get the node */ + if (lockparent && (flags & ISLASTCN)) + vn_lock(dir_vnode, LK_EXCLUSIVE | LK_RETRY, p); + } else if (dir_node == node) { +#if 0 + /* + * this next statement is wrong: we already did a vget in + * devfs_dntovn(); DWS 4/16/1999 + */ + VREF(dir_vnode); /* we want ourself, ie "." */ +#endif + *result_vnode = dir_vnode; + } else { + if (!lockparent || (flags & ISLASTCN)) + VOP_UNLOCK(dir_vnode, 0, p); + } + + return (0); +} + +static int +devfs_access(struct vop_access_args *ap) + /*struct vop_access_args { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ +{ + /* + * mode is filled with a combination of VREAD, VWRITE, + * and/or VEXEC bits turned on. In an octal number these + * are the Y in 0Y00. + */ + struct vnode *vp = ap->a_vp; + int mode = ap->a_mode; + struct ucred *cred = ap->a_cred; + devnode_t * file_node; + gid_t *gp; + int i; + struct proc *p = ap->a_p; + + file_node = VTODN(vp); + /* + * if we are not running as a process, we are in the + * kernel and we DO have permission + */ + if (p == NULL) + return 0; + + /* + * Access check is based on only one of owner, group, public. + * If not owner, then check group. If not a member of the + * group, then check public access. + */ + if (cred->cr_uid != file_node->dn_uid) + { + /* failing that.. try groups */ + mode >>= 3; + gp = cred->cr_groups; + for (i = 0; i < cred->cr_ngroups; i++, gp++) + { + if (file_node->dn_gid == *gp) + { + goto found; + } + } + /* failing that.. try general access */ + mode >>= 3; +found: + ; + } + if ((file_node->dn_mode & mode) == mode) + return (0); + /* + * Root gets to do anything. + * but only use suser prives as a last resort + * (Use of super powers is recorded in ap->a_p->p_acflag) + */ + if( suser(cred, &ap->a_p->p_acflag) == 0) /* XXX what if no proc? */ + return 0; + return (EACCES); +} + +static int +devfs_getattr(struct vop_getattr_args *ap) + /*struct vop_getattr_args { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ +{ + struct vnode *vp = ap->a_vp; + struct vattr *vap = ap->a_vap; + devnode_t * file_node; + struct timeval tv; + + file_node = VTODN(vp); + tv = time; + dn_times(file_node, tv, tv); + vap->va_rdev = 0;/* default value only */ + vap->va_mode = file_node->dn_mode; + switch (file_node->dn_type) + { + case DEV_DIR: + vap->va_rdev = (dev_t)file_node->dn_dvm; + vap->va_mode |= (S_IFDIR); + break; + case DEV_CDEV: + vap->va_rdev = file_node->dn_typeinfo.dev; + vap->va_mode |= (S_IFCHR); + break; + case DEV_BDEV: + vap->va_rdev = file_node->dn_typeinfo.dev; + vap->va_mode |= (S_IFBLK); + break; + case DEV_SLNK: + vap->va_mode |= (S_IFLNK); + break; + } + vap->va_type = vp->v_type; + vap->va_nlink = file_node->dn_links; + vap->va_uid = file_node->dn_uid; + vap->va_gid = file_node->dn_gid; + vap->va_fsid = (int32_t)(void *)file_node->dn_dvm; + vap->va_fileid = (int32_t)(void *)file_node; + vap->va_size = file_node->dn_len; /* now a u_quad_t */ + /* this doesn't belong here */ + if (vp->v_type == VBLK) + vap->va_blocksize = BLKDEV_IOSIZE; + else if (vp->v_type == VCHR) + vap->va_blocksize = MAXPHYSIO; + else + vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; + /* if the time is bogus, set it to the boot time */ + if (file_node->dn_ctime.tv_sec == 0) + file_node->dn_ctime.tv_sec = boottime.tv_sec; + if (file_node->dn_mtime.tv_sec == 0) + file_node->dn_mtime.tv_sec = boottime.tv_sec; + if (file_node->dn_atime.tv_sec == 0) + file_node->dn_atime.tv_sec = boottime.tv_sec; + vap->va_ctime = file_node->dn_ctime; + vap->va_mtime = file_node->dn_mtime; + vap->va_atime = file_node->dn_atime; + vap->va_gen = 0; + vap->va_flags = 0; + vap->va_bytes = file_node->dn_len; /* u_quad_t */ + vap->va_filerev = 0; /* XXX */ /* u_quad_t */ + vap->va_vaflags = 0; /* XXX */ + return 0; +} + +static int +devfs_setattr(struct vop_setattr_args *ap) + /*struct vop_setattr_args { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ +{ + struct vnode *vp = ap->a_vp; + struct vattr *vap = ap->a_vap; + struct ucred *cred = ap->a_cred; + struct proc *p = ap->a_p; + int error = 0; + gid_t *gp; + int i; + devnode_t * file_node; + struct timeval atimeval, mtimeval; + + if (vap->va_flags != VNOVAL) /* XXX needs to be implemented */ + return (EOPNOTSUPP); + + file_node = VTODN(vp); + + if ((vap->va_type != VNON) || + (vap->va_nlink != VNOVAL) || + (vap->va_fsid != VNOVAL) || + (vap->va_fileid != VNOVAL) || + (vap->va_blocksize != VNOVAL) || + (vap->va_rdev != VNOVAL) || + (vap->va_bytes != VNOVAL) || + (vap->va_gen != VNOVAL )) + { + return EINVAL; + } + + /* + * Go through the fields and update iff not VNOVAL. + */ + if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { + if (cred->cr_uid != file_node->dn_uid && + (error = suser(cred, &p->p_acflag)) && + ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || + (error = VOP_ACCESS(vp, VWRITE, cred, p)))) + return (error); + if (vap->va_atime.tv_sec != VNOVAL) + file_node->dn_flags |= DN_ACCESS; + if (vap->va_mtime.tv_sec != VNOVAL) + file_node->dn_flags |= DN_CHANGE | DN_UPDATE; + atimeval.tv_sec = vap->va_atime.tv_sec; + atimeval.tv_usec = vap->va_atime.tv_nsec / 1000; + mtimeval.tv_sec = vap->va_mtime.tv_sec; + mtimeval.tv_usec = vap->va_mtime.tv_nsec / 1000; + if (error = VOP_UPDATE(vp, &atimeval, &mtimeval, 1)) + return (error); + } + + /* + * Change the permissions.. must be root or owner to do this. + */ + if (vap->va_mode != (u_short)VNOVAL) { + if ((cred->cr_uid != file_node->dn_uid) + && (error = suser(cred, &p->p_acflag))) + return (error); + file_node->dn_mode &= ~07777; + file_node->dn_mode |= vap->va_mode & 07777; + } + + /* + * Change the owner.. must be root to do this. + */ + if (vap->va_uid != (uid_t)VNOVAL) { + if (error = suser(cred, &p->p_acflag)) + return (error); + file_node->dn_uid = vap->va_uid; + } + + /* + * Change the group.. must be root or owner to do this. + * If we are the owner, we must be in the target group too. + * don't use suser() unless you have to as it reports + * whether you needed suser powers or not. + */ + if (vap->va_gid != (gid_t)VNOVAL) { + if (cred->cr_uid == file_node->dn_uid){ + gp = cred->cr_groups; + for (i = 0; i < cred->cr_ngroups; i++, gp++) { + if (vap->va_gid == *gp) + goto cando; + } + } + /* + * we can't do it with normal privs, + * do we have an ace up our sleeve? + */ + if (error = suser(cred, &p->p_acflag)) + return (error); +cando: + file_node->dn_gid = vap->va_gid; + } +#if 0 + /* + * Copied from somewhere else + * but only kept as a marker and reminder of the fact that + * flags should be handled some day + */ + if (vap->va_flags != VNOVAL) { + if (error = suser(cred, &p->p_acflag)) + return error; + if (cred->cr_uid == 0) + ; + else { + } + } +#endif + return error; +} + +static int +devfs_read(struct vop_read_args *ap) + /*struct vop_read_args { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ +{ + devnode_t * dn_p = VTODN(ap->a_vp); + + switch (ap->a_vp->v_type) { + case VDIR: { + dn_p->dn_flags |= DN_ACCESS; + return VOP_READDIR(ap->a_vp, ap->a_uio, ap->a_cred, + NULL, NULL, NULL); + } + default: { + printf("devfs_read(): bad file type %d", ap->a_vp->v_type); + return(EINVAL); + break; + } + } + return (0); /* not reached */ +} + +static int +devfs_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode * vp = ap->a_vp; + register devnode_t * dnp = VTODN(vp); + + simple_lock(&vp->v_interlock); + if (vp->v_usecount > 1) + dn_times(dnp, time, time); + simple_unlock(&vp->v_interlock); + return (0); +} + +static int +devfsspec_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode * vp = ap->a_vp; + register devnode_t * dnp = VTODN(vp); + + simple_lock(&vp->v_interlock); + if (vp->v_usecount > 1) + dn_times(dnp, time, time); + simple_unlock(&vp->v_interlock); + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap)); +} + +static int +devfsspec_read(struct vop_read_args *ap) + /*struct vop_read_args { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ +{ + VTODN(ap->a_vp)->dn_flags |= DN_ACCESS; + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap)); +} + +static int +devfsspec_write(struct vop_write_args *ap) + /*struct vop_write_args { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ +{ + VTODN(ap->a_vp)->dn_flags |= DN_CHANGE | DN_UPDATE; + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap)); +} + +/* + * Write data to a file or directory. + */ +static int +devfs_write(struct vop_write_args *ap) + /*struct vop_write_args { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ +{ + switch (ap->a_vp->v_type) { + case VDIR: + return(EISDIR); + default: + printf("devfs_write(): bad file type %d", ap->a_vp->v_type); + return (EINVAL); + } + return 0; /* not reached */ +} + +static int +devfs_remove(struct vop_remove_args *ap) + /*struct vop_remove_args { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; + } */ +{ + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + devnode_t * tp; + devnode_t * tdp; + devdirent_t * tnp; + int doingdirectory = 0; + int error = 0; + uid_t ouruid = cnp->cn_cred->cr_uid; + struct proc *p = cnp->cn_proc; + + /* + * Lock our directories and get our name pointers + * assume that the names are null terminated as they + * are the end of the path. Get pointers to all our + * devfs structures. + */ + tp = VTODN(vp); + tdp = VTODN(dvp); + /* + * Assuming we are atomic, dev_lookup left this for us + */ + tnp = tp->dn_last_lookup; + + /* + * Check we are doing legal things WRT the new flags + */ + if ((tp->dn_flags & (IMMUTABLE | APPEND)) + || (tdp->dn_flags & APPEND) /*XXX eh?*/ ) { + error = EPERM; + goto abort; + } + + /* + * Make sure that we don't try do something stupid + */ + if ((tp->dn_type) == DEV_DIR) { + /* + * Avoid ".", "..", and aliases of "." for obvious reasons. + */ + if ( (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') + || (cnp->cn_flags&ISDOTDOT) ) { + error = EINVAL; + goto abort; + } + doingdirectory++; + } + + /*********************************** + * Start actually doing things.... * + ***********************************/ + tdp->dn_flags |= DN_CHANGE | DN_UPDATE; + + /* + * own the parent directory, or the destination of the rename, + * otherwise the destination may not be changed (except by + * root). This implements append-only directories. + * XXX shoudn't this be in generic code? + */ + if ((tdp->dn_mode & S_ISTXT) + && ouruid != 0 + && ouruid != tdp->dn_uid + && ouruid != tp->dn_uid ) { + error = EPERM; + goto abort; + } + /* + * Target must be empty if a directory and have no links + * to it. Also, ensure source and target are compatible + * (both directories, or both not directories). + */ + if (( doingdirectory) && (tp->dn_links > 2)) { + error = ENOTEMPTY; + goto abort; + } + DEVFS_LOCK(p); + dev_free_name(tnp); + DEVFS_UNLOCK(p); + abort: + if (dvp == vp) + vrele(vp); + else + vput(vp); + vput(dvp); + return (error); +} + +/* + */ +static int +devfs_link(struct vop_link_args *ap) + /*struct vop_link_args { + struct vnode *a_tdvp; + struct vnode *a_vp; + struct componentname *a_cnp; + } */ +{ + struct vnode *vp = ap->a_vp; + struct vnode *tdvp = ap->a_tdvp; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + devnode_t * fp; + devnode_t * tdp; + devdirent_t * tnp; + int error = 0; + struct timeval tv; + + /* + * First catch an arbitrary restriction for this FS + */ + if (cnp->cn_namelen > DEVMAXNAMESIZE) { + error = ENAMETOOLONG; + goto out1; + } + + /* + * Lock our directories and get our name pointers + * assume that the names are null terminated as they + * are the end of the path. Get pointers to all our + * devfs structures. + */ + tdp = VTODN(tdvp); + fp = VTODN(vp); + + if (tdvp->v_mount != vp->v_mount) { + error = EXDEV; + VOP_ABORTOP(tdvp, cnp); + goto out2; + } + if (tdvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE, p))) { + VOP_ABORTOP(tdvp, cnp); + goto out2; + } + + /* + * Check we are doing legal things WRT the new flags + */ + if (fp->dn_flags & (IMMUTABLE | APPEND)) { + VOP_ABORTOP(tdvp, cnp); + error = EPERM; + goto out1; + } + + /*********************************** + * Start actually doing things.... * + ***********************************/ + fp->dn_flags |= DN_CHANGE; + tv = time; + error = VOP_UPDATE(vp, &tv, &tv, 1); + if (!error) { + DEVFS_LOCK(p); + error = dev_add_name(cnp->cn_nameptr, tdp, NULL, fp, &tnp); + DEVFS_UNLOCK(p); + } +out1: + if (tdvp != vp) + VOP_UNLOCK(vp, 0, p); +out2: + vput(tdvp); + return (error); +} + +/* + * Check if source directory is in the path of the target directory. + * Target is supplied locked, source is unlocked. + * The target is always vput before returning. + */ +int +devfs_checkpath(source, target) + devnode_t *source, *target; +{ + int error = 0; + devnode_t * ntmp; + devnode_t * tmp; + struct vnode *vp; + + vp = target->dn_vn; + tmp = target; + + do { + if (tmp == source) { + error = EINVAL; + break; + } + ntmp = tmp; + } while ((tmp = tmp->dn_typeinfo.Dir.parent) != ntmp); + + if (vp != NULL) + vput(vp); + return (error); +} + +/* + * Rename system call. Seems overly complicated to me... + * rename("foo", "bar"); + * is essentially + * unlink("bar"); + * link("foo", "bar"); + * unlink("foo"); + * but ``atomically''. + * + * When the target exists, both the directory + * and target vnodes are locked. + * the source and source-parent vnodes are referenced + * + * + * Basic algorithm is: + * + * 1) Bump link count on source while we're linking it to the + * target. This also ensure the inode won't be deleted out + * from underneath us while we work (it may be truncated by + * a concurrent `trunc' or `open' for creation). + * 2) Link source to destination. If destination already exists, + * delete it first. + * 3) Unlink source reference to node if still around. If a + * directory was moved and the parent of the destination + * is different from the source, patch the ".." entry in the + * directory. + */ +static int +devfs_rename(struct vop_rename_args *ap) + /*struct vop_rename_args { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; + } */ +{ + struct vnode *tvp = ap->a_tvp; + struct vnode *tdvp = ap->a_tdvp; + struct vnode *fvp = ap->a_fvp; + struct vnode *fdvp = ap->a_fdvp; + struct componentname *tcnp = ap->a_tcnp; + struct componentname *fcnp = ap->a_fcnp; + struct proc *p = fcnp->cn_proc; + devnode_t *fp, *fdp, *tp, *tdp; + devdirent_t *fnp,*tnp; + int doingdirectory = 0; + int error = 0; + struct timeval tv; + + /* + * First catch an arbitrary restriction for this FS + */ + if(tcnp->cn_namelen > DEVMAXNAMESIZE) { + error = ENAMETOOLONG; + goto abortit; + } + + /* + * Lock our directories and get our name pointers + * assume that the names are null terminated as they + * are the end of the path. Get pointers to all our + * devfs structures. + */ + tdp = VTODN(tdvp); + fdp = VTODN(fdvp); + fp = VTODN(fvp); + fnp = fp->dn_last_lookup; + tp = NULL; + tnp = NULL; + if (tvp) { + tp = VTODN(tvp); + tnp = tp->dn_last_lookup; + } + + /* + * trying to move it out of devfs? + * if we move a dir across mnt points. we need to fix all + * the mountpoint pointers! XXX + * so for now keep dirs within the same mount + */ + if ((fvp->v_mount != tdvp->v_mount) || + (tvp && (fvp->v_mount != tvp->v_mount))) { + error = EXDEV; +abortit: + VOP_ABORTOP(tdvp, tcnp); + if (tdvp == tvp) /* eh? */ + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + VOP_ABORTOP(fdvp, fcnp); /* XXX, why not in NFS? */ + vrele(fdvp); + vrele(fvp); + return (error); + } + + /* + * Check we are doing legal things WRT the new flags + */ + if ((tp && (tp->dn_flags & (IMMUTABLE | APPEND))) + || (fp->dn_flags & (IMMUTABLE | APPEND)) + || (fdp->dn_flags & APPEND)) { + error = EPERM; + goto abortit; + } + + /* + * Make sure that we don't try do something stupid + */ + if ((fp->dn_type) == DEV_DIR) { + /* + * Avoid ".", "..", and aliases of "." for obvious reasons. + */ + if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') + || (fcnp->cn_flags&ISDOTDOT) + || (tcnp->cn_namelen == 1 && tcnp->cn_nameptr[0] == '.') + || (tcnp->cn_flags&ISDOTDOT) + || (tdp == fp )) { + error = EINVAL; + goto abortit; + } + doingdirectory++; + } + + /* + * If ".." must be changed (ie the directory gets a new + * parent) then the source directory must not be in the + * directory hierarchy above the target, as this would + * orphan everything below the source directory. Also + * the user must have write permission in the source so + * as to be able to change "..". + */ + if (doingdirectory && (tdp != fdp)) { + devnode_t * tmp, *ntmp; + error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_proc); + tmp = tdp; + do { + if(tmp == fp) { + /* XXX unlock stuff here probably */ + error = EINVAL; + goto out; + } + ntmp = tmp; + } while ((tmp = tmp->dn_typeinfo.Dir.parent) != ntmp); + } + + /*********************************** + * Start actually doing things.... * + ***********************************/ + fp->dn_flags |= DN_CHANGE; + tv = time; + if (error = VOP_UPDATE(fvp, &tv, &tv, 1)) { + VOP_UNLOCK(fvp, 0, p); + goto bad; + } + /* + * Check if just deleting a link name. + */ + if (fvp == tvp) { + if (fvp->v_type == VDIR) { + error = EINVAL; + goto abortit; + } + + /* Release destination completely. */ + VOP_ABORTOP(tdvp, tcnp); + vput(tdvp); + vput(tvp); + + /* Delete source. */ + VOP_ABORTOP(fdvp, fcnp); /*XXX*/ + vrele(fdvp); + vrele(fvp); + dev_free_name(fnp); + return 0; + } + + vrele(fdvp); + + /* + * 1) Bump link count while we're moving stuff + * around. If we crash somewhere before + * completing our work, too bad :) + */ + fp->dn_links++; + /* + * If the target exists zap it (unless it's a non-empty directory) + * We could do that as well but won't + */ + if (tp) { + int ouruid = tcnp->cn_cred->cr_uid; + /* + * If the parent directory is "sticky", then the user must + * own the parent directory, or the destination of the rename, + * otherwise the destination may not be changed (except by + * root). This implements append-only directories. + * XXX shoudn't this be in generic code? + */ + if ((tdp->dn_mode & S_ISTXT) + && ouruid != 0 + && ouruid != tdp->dn_uid + && ouruid != tp->dn_uid ) { + error = EPERM; + goto bad; + } + /* + * Target must be empty if a directory and have no links + * to it. Also, ensure source and target are compatible + * (both directories, or both not directories). + */ + if (( doingdirectory) && (tp->dn_links > 2)) { + error = ENOTEMPTY; + goto bad; + } + dev_free_name(tnp); + tp = NULL; + } + dev_add_name(tcnp->cn_nameptr,tdp,NULL,fp,&tnp); + fnp->de_dnp = NULL; + fp->dn_links--; /* one less link to it.. */ + dev_free_name(fnp); + fp->dn_links--; /* we added one earlier*/ + if (tdp) + vput(tdvp); + if (tp) + vput(fvp); + vrele(fvp); + return (error); + +bad: + if (tp) + vput(tvp); + vput(tdvp); +out: + if (vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p) == 0) { + fp->dn_links--; /* we added one earlier*/ + vput(fvp); + } else + vrele(fvp); + return (error); +} + +static int +devfs_symlink(struct vop_symlink_args *ap) + /*struct vop_symlink_args { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; + } */ +{ + struct componentname * cnp = ap->a_cnp; + struct vnode *vp = NULL; + int error = 0; + devnode_t * dir_p; + devnode_type_t typeinfo; + devdirent_t * nm_p; + devnode_t * dev_p; + struct vattr * vap = ap->a_vap; + struct vnode * * vpp = ap->a_vpp; + struct proc *p = cnp->cn_proc; + struct timeval tv; + + dir_p = VTODN(ap->a_dvp); + typeinfo.Slnk.name = ap->a_target; + typeinfo.Slnk.namelen = strlen(ap->a_target); + DEVFS_LOCK(p); + error = dev_add_entry(cnp->cn_nameptr, dir_p, DEV_SLNK, + &typeinfo, NULL, NULL, &nm_p); + DEVFS_UNLOCK(p); + if (error) { + goto failure; + } + + dev_p = nm_p->de_dnp; + dev_p->dn_uid = dir_p->dn_uid; + dev_p->dn_gid = dir_p->dn_gid; + dev_p->dn_mode = vap->va_mode; + dn_copy_times(dev_p, dir_p); + error = devfs_dntovn(dev_p, vpp, p); + if (error) + goto failure; + vp = *vpp; + vput(vp); + failure: + if ((cnp->cn_flags & SAVESTART) == 0) + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vput(ap->a_dvp); + return error; +} + +/* + * Mknod vnode call + */ +/* ARGSUSED */ +int +devfs_mknod(ap) + struct vop_mknod_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + struct componentname * cnp = ap->a_cnp; + devnode_t * dev_p; + devdirent_t * devent; + devnode_t * dir_p; /* devnode for parent directory */ + struct vnode * dvp = ap->a_dvp; + int error = 0; + devnode_type_t typeinfo; + struct vattr * vap = ap->a_vap; + struct vnode ** vpp = ap->a_vpp; + struct proc * p = cnp->cn_proc; + + *vpp = NULL; + if (!vap->va_type == VBLK && !vap->va_type == VCHR) { + error = EINVAL; /* only support mknod of special files */ + goto failure; + } + dir_p = VTODN(dvp); + typeinfo.dev = vap->va_rdev; + DEVFS_LOCK(p); + error = dev_add_entry(cnp->cn_nameptr, dir_p, + (vap->va_type == VBLK) ? DEV_BDEV : DEV_CDEV, + &typeinfo, NULL, NULL, &devent); + DEVFS_UNLOCK(p); + if (error) { + goto failure; + } + dev_p = devent->de_dnp; + error = devfs_dntovn(dev_p, vpp, p); + if (error) + goto failure; + dev_p->dn_uid = cnp->cn_cred->cr_uid; + dev_p->dn_gid = dir_p->dn_gid; + dev_p->dn_mode = vap->va_mode; + failure: + if (*vpp) { + vput(*vpp); + *vpp = 0; + } + if ((cnp->cn_flags & SAVESTART) == 0) + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vput(dvp); + return (error); +} + +/* + * Vnode op for readdir + */ +static int +devfs_readdir(struct vop_readdir_args *ap) + /*struct vop_readdir_args { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *eofflag; + int *ncookies; + u_int **cookies; + } */ +{ + struct vnode *vp = ap->a_vp; + struct uio *uio = ap->a_uio; + struct dirent dirent; + devnode_t * dir_node; + devdirent_t * name_node; + char *name; + int error = 0; + int reclen; + int nodenumber; + int startpos,pos; + struct proc * p = uio->uio_procp; + + /* set up refs to dir */ + dir_node = VTODN(vp); + if(dir_node->dn_type != DEV_DIR) + return(ENOTDIR); + + pos = 0; + startpos = uio->uio_offset; + DEVFS_LOCK(p); + name_node = dir_node->dn_typeinfo.Dir.dirlist; + nodenumber = 0; + dir_node->dn_flags |= DN_ACCESS; + + while ((name_node || (nodenumber < 2)) && (uio->uio_resid > 0)) + { + switch(nodenumber) + { + case 0: + dirent.d_fileno = (int32_t)(void *)dir_node; + name = "."; + dirent.d_namlen = 1; + dirent.d_type = DT_DIR; + break; + case 1: + if(dir_node->dn_typeinfo.Dir.parent) + dirent.d_fileno + = (int32_t)dir_node->dn_typeinfo.Dir.parent; + else + dirent.d_fileno = (u_int32_t)dir_node; + name = ".."; + dirent.d_namlen = 2; + dirent.d_type = DT_DIR; + break; + default: + dirent.d_fileno = (int32_t)(void *)name_node->de_dnp; + dirent.d_namlen = strlen(name_node->de_name); + name = name_node->de_name; + switch(name_node->de_dnp->dn_type) { + case DEV_BDEV: + dirent.d_type = DT_BLK; + break; + case DEV_CDEV: + dirent.d_type = DT_CHR; + break; + case DEV_DIR: + dirent.d_type = DT_DIR; + break; + case DEV_SLNK: + dirent.d_type = DT_LNK; + break; + default: + dirent.d_type = DT_UNKNOWN; + } + } +#define GENERIC_DIRSIZ(dp) \ + ((sizeof (struct dirent) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3)) + + reclen = dirent.d_reclen = GENERIC_DIRSIZ(&dirent); + + if(pos >= startpos) /* made it to the offset yet? */ + { + if (uio->uio_resid < reclen) /* will it fit? */ + break; + strcpy( dirent.d_name,name); + if ((error = uiomove ((caddr_t)&dirent, + dirent.d_reclen, uio)) != 0) + break; + } + pos += reclen; + if((nodenumber >1) && name_node) + name_node = name_node->de_next; + nodenumber++; + } + DEVFS_UNLOCK(p); + uio->uio_offset = pos; + + return (error); +} + + +/* + */ +static int +devfs_readlink(struct vop_readlink_args *ap) + /*struct vop_readlink_args { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ +{ + struct vnode *vp = ap->a_vp; + struct uio *uio = ap->a_uio; + devnode_t * lnk_node; + int error = 0; + + /* set up refs to dir */ + lnk_node = VTODN(vp); + if(lnk_node->dn_type != DEV_SLNK) + return(EINVAL); + if ((error = VOP_ACCESS(vp, VREAD, ap->a_cred, NULL)) != 0) { /* XXX */ + return error; + } + error = uiomove(lnk_node->dn_typeinfo.Slnk.name, + lnk_node->dn_typeinfo.Slnk.namelen, uio); + return error; +} + +static int +devfs_abortop(struct vop_abortop_args *ap) + /*struct vop_abortop_args { + struct vnode *a_dvp; + struct componentname *a_cnp; + } */ +{ + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) { + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + } + return 0; +} + + +static int +devfs_reclaim(struct vop_reclaim_args *ap) + /*struct vop_reclaim_args { + struct vnode *a_vp; + } */ +{ + struct vnode * vp = ap->a_vp; + devnode_t * dnp = VTODN(vp); + + if (dnp) { + /* + * do the same as devfs_inactive in case it is not called + * before us (can that ever happen?) + */ + dnp->dn_vn = NULL; + vp->v_data = NULL; + if (dnp->dn_delete) { + devnode_free(dnp); + } + } + return(0); +} + +/* + * Print out the contents of a /devfs vnode. + */ +static int +devfs_print(struct vop_print_args *ap) + /*struct vop_print_args { + struct vnode *a_vp; + } */ +{ + + return (0); +} + +/**************************************************************************\ +* pseudo ops * +\**************************************************************************/ + +/* + * + * struct vop_inactive_args { + * struct vnode *a_vp; + * struct proc *a_p; + * } + */ + +static int +devfs_inactive(struct vop_inactive_args *ap) +{ + struct vnode * vp = ap->a_vp; + devnode_t * dnp = VTODN(vp); + + if (dnp) { + dnp->dn_vn = NULL; + vp->v_data = NULL; + if (dnp->dn_delete) { + devnode_free(dnp); + } + } + VOP_UNLOCK(vp, 0, ap->a_p); + return (0); +} + +int +devfs_update(ap) + struct vop_update_args /* { + struct vnode *a_vp; + struct timeval *a_access; + struct timeval *a_modify; + int a_waitfor; + } */ *ap; +{ + register struct fs *fs; + int error; + devnode_t * ip; + + ip = VTODN(ap->a_vp); + if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) { + ip->dn_flags &= + ~(DN_ACCESS | DN_CHANGE | DN_MODIFIED | DN_UPDATE); + return (0); + } + if ((ip->dn_flags & + (DN_ACCESS | DN_CHANGE | DN_MODIFIED | DN_UPDATE)) == 0) + return (0); + dn_times(ip, time, time); + return (0); +} + +#define VOPFUNC int (*)(void *) + +/* The following ops are used by directories and symlinks */ +int (**devfs_vnodeop_p)(void *); +static struct vnodeopv_entry_desc devfs_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)devfs_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)err_create }, /* create */ + { &vop_whiteout_desc, (VOPFUNC)err_whiteout }, /* whiteout */ + { &vop_mknod_desc, (VOPFUNC)devfs_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)nop_open }, /* open */ + { &vop_close_desc, (VOPFUNC)devfs_close }, /* close */ + { &vop_access_desc, (VOPFUNC)devfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)devfs_read }, /* read */ + { &vop_write_desc, (VOPFUNC)devfs_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)nop_lease }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)err_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)err_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)err_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)nop_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)err_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)devfs_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)devfs_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)devfs_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)devfs_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)devfs_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)devfs_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)devfs_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)nop_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)nop_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)err_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)err_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)err_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)nop_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)err_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)err_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)err_valloc }, /* valloc */ + { &vop_reallocblks_desc, (VOPFUNC)err_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)err_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)devfs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)err_bwrite }, + { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)err_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc devfs_vnodeop_opv_desc = + { &devfs_vnodeop_p, devfs_vnodeop_entries }; + +/* The following ops are used by the device nodes */ +int (**devfs_spec_vnodeop_p)(void *); +static struct vnodeopv_entry_desc devfs_spec_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)spec_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)spec_open }, /* open */ + { &vop_close_desc, (VOPFUNC)devfsspec_close }, /* close */ + { &vop_access_desc, (VOPFUNC)devfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)devfsspec_read }, /* read */ + { &vop_write_desc, (VOPFUNC)devfsspec_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)spec_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)devfs_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)devfs_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)nop_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)nop_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)devfs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)nop_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */ + { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (VOPFUNC)nop_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)devfs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */ + { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */ + { &vop_blktooff_desc, (VOPFUNC)spec_offtoblk }, /* blkofftoblk */ + { &vop_cmap_desc, (VOPFUNC)spec_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc devfs_spec_vnodeop_opv_desc = + { &devfs_spec_vnodeop_p, devfs_spec_vnodeop_entries }; + diff --git a/bsd/miscfs/devfs/devfsdefs.h b/bsd/miscfs/devfs/devfsdefs.h new file mode 100644 index 000000000..630795bb4 --- /dev/null +++ b/bsd/miscfs/devfs/devfsdefs.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997,1998 Julian Elischer. All rights reserved. + * julian@freebsd.org + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE HOLDER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * devfsdefs.h + */ + +/* + * HISTORY + * 8-April-1999 Dieter Siegmund (dieter@apple.com) + * Ported to from FreeBSD 3.1 + * Removed unnecessary/unused defines + * Renamed structures/elements to clarify usage in code. + */ + + +#define DEVMAXNAMESIZE 32 /* XXX */ +#define DEVMAXPATHSIZE 128 /* XXX */ + +typedef enum { + DEV_DIR, + DEV_BDEV, + DEV_CDEV, + DEV_SLNK, +} devfstype_t; + +extern int (**devfs_vnodeop_p)(void *); /* our own vector array for dirs */ +extern int (**devfs_spec_vnodeop_p)(void *); /* our own vector array for devs */ +extern struct vfsops devfs_vfsops; + +typedef struct devnode devnode_t; +typedef struct devdirent devdirent_t; +typedef union devnode_type devnode_type_t; + +struct devfs_stats { + int nodes; + int entries; + int mounts; + int stringspace; +}; + +union devnode_type { + dev_t dev; + struct { + devdirent_t * dirlist; + devdirent_t * * dirlast; + devnode_t * parent; + devdirent_t * myname; /* my entry in .. */ + int entrycount; + }Dir; + struct { + char * name; /* must be allocated separately */ + int namelen; + }Slnk; +}; + +#define DN_ACCESS 0x0001 /* Access time update request. */ +#define DN_CHANGE 0x0002 /* Inode change time update request. */ +#define DN_UPDATE 0x0004 /* Modification time update request. */ +#define DN_MODIFIED 0x0008 /* Inode has been modified. */ +#define DN_RENAME 0x0010 /* Inode is being renamed. */ + +struct devnode +{ + devfstype_t dn_type; + int dn_flags; + u_short dn_mode; + u_short dn_uid; + u_short dn_gid; + struct timespec dn_atime;/* time of last access */ + struct timespec dn_mtime;/* time of last modification */ + struct timespec dn_ctime;/* time file changed */ + int (***dn_ops)(void *);/* yuk... pointer to pointer(s) to funcs */ + int dn_links;/* how many file links does this node have? */ + struct devfsmount * dn_dvm; /* the mount structure for this 'plane' */ + struct vnode * dn_vn; /* address of last vnode that represented us */ + int dn_len; /* of any associated info (e.g. dir data) */ + devdirent_t * dn_linklist;/* circular list of hardlinks to this node */ + devdirent_t * dn_last_lookup; /* name I was last looked up from */ + devnode_t * dn_nextsibling; /* the list of equivalent nodes */ + devnode_t * * dn_prevsiblingp;/* backpointer for the above */ + devnode_type_t dn_typeinfo; + int dn_delete; /* mark for deletion */ +}; + +struct devdirent +{ + /*-----------------------directory entry fields-------------*/ + char de_name[DEVMAXNAMESIZE]; + devnode_t * de_dnp; /* the "inode" (devnode) pointer */ + devnode_t * de_parent; /* backpointer to the directory itself */ + devdirent_t * de_next; /* next object in this directory */ + devdirent_t * *de_prevp; /* previous pointer in directory linked list */ + devdirent_t * de_nextlink; /* next hardlink to this node */ + devdirent_t * *de_prevlinkp; /* previous hardlink pointer for this node */ +}; + +extern devdirent_t * dev_root; +extern struct lock__bsd__ devfs_lock; +extern struct devfs_stats devfs_stats; + +/* + * Rules for front nodes: + * Dirs hava a strict 1:1 relationship with their OWN devnode + * Symlinks similarly + * Device Nodes ALWAYS point to the devnode that is linked + * to the Backing node. (with a ref count) + */ + +/* + * DEVFS specific per/mount information, used to link a monted fs to a + * particular 'plane' of front nodes. + */ +struct devfsmount +{ + struct mount * mount; /* vfs mount struct for this fs */ + devdirent_t * plane_root;/* the root of this 'plane' */ +}; + +/* + * Prototypes for DEVFS virtual filesystem operations + */ +#include +#include + +//#define HIDDEN_MOUNTPOINT 1 + +/* misc */ +#define M_DEVFSNAME M_DEVFS +#define M_DEVFSNODE M_DEVFS +#define M_DEVFSMNT M_DEVFS + +static __inline__ void +getnanotime(struct timespec * t_p) +{ + struct timeval tv; + + microtime(&tv); + t_p->tv_sec = tv.tv_sec; + t_p->tv_nsec = tv.tv_usec * 1000; + return; +} + +#define VTODN(vp) ((devnode_t *)(vp)->v_data) +extern void cache_purge(struct vnode *vp); /* vfs_cache.c */ + +static __inline__ int +DEVFS_LOCK(struct proc * p) +{ + return (lockmgr(&devfs_lock, LK_EXCLUSIVE, NULL, p)); +} + +static __inline__ int +DEVFS_UNLOCK(struct proc * p) +{ + return (lockmgr(&devfs_lock, LK_RELEASE, NULL, p)); +} + +static __inline__ void +DEVFS_INCR_ENTRIES() +{ + devfs_stats.entries++; +} + +static __inline__ void +DEVFS_DECR_ENTRIES() +{ + devfs_stats.entries--; +} + +static __inline__ void +DEVFS_INCR_NODES() +{ + devfs_stats.nodes++; +} + +static __inline__ void +DEVFS_DECR_NODES() +{ + devfs_stats.nodes--; +} + +static __inline__ void +DEVFS_INCR_MOUNTS() +{ + devfs_stats.mounts++; +} + +static __inline__ void +DEVFS_DECR_MOUNTS() +{ + devfs_stats.mounts--; +} + +static __inline__ void +DEVFS_INCR_STRINGSPACE(int space) +{ + devfs_stats.stringspace += space; +} + +static __inline__ void +DEVFS_DECR_STRINGSPACE(int space) +{ + devfs_stats.stringspace -= space; + if (devfs_stats.stringspace < 0) { + printf("DEVFS_DECR_STRINGSPACE: (%d - %d < 0)\n", + devfs_stats.stringspace + space, space); + devfs_stats.stringspace = 0; + } +} + +static __inline__ void +dn_times(devnode_t * dnp, struct timeval t1, struct timeval t2) +{ + if (dnp->dn_flags & (DN_ACCESS | DN_CHANGE | DN_UPDATE)) { + if (dnp->dn_flags & DN_ACCESS) { + dnp->dn_atime.tv_sec = t1.tv_sec; + dnp->dn_atime.tv_nsec = t1.tv_usec * 1000; + } + if (dnp->dn_flags & DN_UPDATE) { + dnp->dn_mtime.tv_sec = t2.tv_sec; + dnp->dn_mtime.tv_nsec = t2.tv_usec * 1000; + } + if (dnp->dn_flags & DN_CHANGE) { + dnp->dn_ctime.tv_sec = time.tv_sec; + dnp->dn_ctime.tv_nsec = time.tv_usec * 1000; + } + dnp->dn_flags &= ~(DN_ACCESS | DN_CHANGE | DN_UPDATE); + } + return; +} + +static __inline__ void +dn_copy_times(devnode_t * target, devnode_t * source) +{ + target->dn_atime = source->dn_atime; + target->dn_mtime = source->dn_mtime; + target->dn_ctime = source->dn_ctime; + return; +} diff --git a/bsd/miscfs/devfs/index.html b/bsd/miscfs/devfs/index.html new file mode 100644 index 000000000..2c1626603 --- /dev/null +++ b/bsd/miscfs/devfs/index.html @@ -0,0 +1,22 @@ + +FTP Menu at ftp2.FreeBSD.ORG + +

FTP Menu

+
+ +[TXT] README
+ +[TXT] devfs_proto.h
+ +[TXT] devfs_tree.c
+ +[TXT] devfs_vfsops.c
+ +[TXT] devfs_vnops.c
+ +[TXT] devfsdefs.h
+ +[TXT] reproto.sh
+

+http-gw version 3.2 / 0 + (17.254.0.77) diff --git a/bsd/miscfs/devfs/reproto.sh b/bsd/miscfs/devfs/reproto.sh new file mode 100644 index 000000000..e994e0c2b --- /dev/null +++ b/bsd/miscfs/devfs/reproto.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# +# This used to be a shell script, but had to become more sophisticated +# to allow for KNF function definitions. So rewrote in perl, but wrapped +# as a shell script. +# +exec /usr/bin/perl << *EOF* +open(PROTO, ">devfs_proto.h") || die "Cannot open devfs_proto.h\n"; + +print PROTO "/* THIS FILE HAS BEEN PRODUCED AUTOMATICALLY */\n"; + +while (\$file = <*.c>) { + if(open(F, \$file) == 0) { + warn "Cannot open \$file.\n"; + next; + } + + while() { + chop; + if (m|/\*proto\*/|) { + \$collecting = 1; + \$idx = 0; + } elsif (\$collecting) { + if (/^{/) { + \$text[\$idx - 1] .= ';'; + for (\$i = 0; \$i < \$idx; \$i++) { + print PROTO "\$text[\$i]"; + print PROTO \$i == 0? "\t": "\n"; + } + \$collecting = 0; + next; + } + \$text[\$idx++] = \$_; + } + } + close F; +} + +print PROTO "/* THIS FILE PRODUCED AUTOMATICALLY */\n" . + "/* DO NOT EDIT (see reproto.sh) */\n"; + +*EOF* diff --git a/bsd/miscfs/fdesc/Makefile b/bsd/miscfs/fdesc/Makefile new file mode 100644 index 000000000..80e84ca5a --- /dev/null +++ b/bsd/miscfs/fdesc/Makefile @@ -0,0 +1,40 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = + +PRIVATE_DATAFILES = \ + fdesc.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = miscfs/fdesc + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = miscfs/fdesc + +INSTALL_MI_LIST = ${DATAFILES} ${PRIVATE_DATAFILES} + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/miscfs/fdesc/fdesc.h b/bsd/miscfs/fdesc/fdesc.h new file mode 100644 index 000000000..5de532434 --- /dev/null +++ b/bsd/miscfs/fdesc/fdesc.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fdesc.h 8.6 (Berkeley) 8/20/94 + * + * #Id: fdesc.h,v 1.8 1993/04/06 15:28:33 jsp Exp # + */ + +#ifdef KERNEL +struct fdescmount { + struct vnode *f_root; /* Root node */ +}; + +#define FD_ROOT 2 +#define FD_DEVFD 3 +#define FD_STDIN 4 +#define FD_STDOUT 5 +#define FD_STDERR 6 +#define FD_CTTY 7 +#define FD_DESC 8 +#define FD_MAX 12 + +typedef enum { + Froot, + Fdevfd, + Fdesc, + Flink, + Fctty +} fdntype; + +struct fdescnode { + LIST_ENTRY(fdescnode) fd_hash; /* Hash list */ + struct vnode *fd_vnode; /* Back ptr to vnode */ + fdntype fd_type; /* Type of this node */ + unsigned fd_fd; /* Fd to be dup'ed */ + char *fd_link; /* Link to fd/n */ + int fd_ix; /* filesystem index */ +}; + +#define VFSTOFDESC(mp) ((struct fdescmount *)((mp)->mnt_data)) +#define VTOFDESC(vp) ((struct fdescnode *)(vp)->v_data) + +extern dev_t devctty; +extern int fdesc_init __P((struct vfsconf *)); +extern int fdesc_root __P((struct mount *, struct vnode **)); +extern int fdesc_allocvp __P((fdntype, int, struct mount *, struct vnode **)); +extern int (**fdesc_vnodeop_p)(void *); +extern struct vfsops fdesc_vfsops; +#endif /* KERNEL */ diff --git a/bsd/miscfs/fdesc/fdesc_vfsops.c b/bsd/miscfs/fdesc/fdesc_vfsops.c new file mode 100644 index 000000000..31a987a43 --- /dev/null +++ b/bsd/miscfs/fdesc/fdesc_vfsops.c @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fdesc_vfsops.c 8.10 (Berkeley) 5/14/95 + * + */ + +/* + * /dev/fd Filesystem + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Mount the per-process file descriptors (/dev/fd) + */ +int +fdesc_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + int error = 0; + u_int size; + struct fdescmount *fmp; + struct vnode *rvp; + + /* + * Update is a no-op + */ + if (mp->mnt_flag & MNT_UPDATE) + return (EOPNOTSUPP); + + error = fdesc_allocvp(Froot, FD_ROOT, mp, &rvp); + if (error) + return (error); + + MALLOC(fmp, struct fdescmount *, sizeof(struct fdescmount), + M_UFSMNT, M_WAITOK); /* XXX */ + rvp->v_type = VDIR; + rvp->v_flag |= VROOT; + fmp->f_root = rvp; + /* XXX -- don't mark as local to work around fts() problems */ + /*mp->mnt_flag |= MNT_LOCAL;*/ + mp->mnt_data = (qaddr_t) fmp; + vfs_getnewfsid(mp); + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); + bcopy("fdesc", mp->mnt_stat.f_mntfromname, sizeof("fdesc")); + return (0); +} + +int +fdesc_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + return (0); +} + +int +fdesc_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + int error; + int flags = 0; + struct vnode *rootvp = VFSTOFDESC(mp)->f_root; + + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + + /* + * Clear out buffer cache. I don't think we + * ever get anything cached at this level at the + * moment, but who knows... + */ + if (rootvp->v_usecount > 1) + return (EBUSY); + if (error = vflush(mp, rootvp, flags)) + return (error); + + /* + * Release reference on underlying root vnode + */ + vrele(rootvp); + /* + * And blow it away for future re-use + */ + vgone(rootvp); + /* + * Finally, throw away the fdescmount structure + */ + _FREE(mp->mnt_data, M_UFSMNT); /* XXX */ + mp->mnt_data = 0; + + return (0); +} + +int +fdesc_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp; + + /* + * Return locked reference to root. + */ + vp = VFSTOFDESC(mp)->f_root; + VREF(vp); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + *vpp = vp; + return (0); +} + +int +fdesc_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + struct filedesc *fdp; + int lim; + int i; + int last; + int freefd; + + /* + * Compute number of free file descriptors. + * [ Strange results will ensue if the open file + * limit is ever reduced below the current number + * of open files... ] + */ + lim = p->p_rlimit[RLIMIT_NOFILE].rlim_cur; + fdp = p->p_fd; + last = min(fdp->fd_nfiles, lim); + freefd = 0; + for (i = fdp->fd_freefile; i < last; i++) + if (fdp->fd_ofiles[i] == NULL && + !(fdp->fd_ofileflags[i] & UF_RESERVED)) + freefd++; + + /* + * Adjust for the fact that the fdesc array may not + * have been fully allocated yet. + */ + if (fdp->fd_nfiles < lim) + freefd += (lim - fdp->fd_nfiles); + + sbp->f_flags = 0; + sbp->f_bsize = DEV_BSIZE; + sbp->f_iosize = DEV_BSIZE; + sbp->f_blocks = 2; /* 1K to keep df happy */ + sbp->f_bfree = 0; + sbp->f_bavail = 0; + sbp->f_files = lim + 1; /* Allow for "." */ + sbp->f_ffree = freefd; /* See comments above */ + if (sbp != &mp->mnt_stat) { + sbp->f_type = mp->mnt_vfc->vfc_typenum; + bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + return (0); +} + +int +fdesc_sync(mp, waitfor) + struct mount *mp; + int waitfor; +{ + + return (0); +} + +#define fdesc_fhtovp ((int (*) __P((struct mount *, struct fid *, \ + struct mbuf *, struct vnode **, int *, struct ucred **)))eopnotsupp) +#define fdesc_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \ + struct proc *)))eopnotsupp) +#define fdesc_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \ + size_t, struct proc *)))eopnotsupp) +#define fdesc_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \ + eopnotsupp) +#define fdesc_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp) + +struct vfsops fdesc_vfsops = { + fdesc_mount, + fdesc_start, + fdesc_unmount, + fdesc_root, + fdesc_quotactl, + fdesc_statfs, + fdesc_sync, + fdesc_vget, + fdesc_fhtovp, + fdesc_vptofh, + fdesc_init, + fdesc_sysctl, +}; diff --git a/bsd/miscfs/fdesc/fdesc_vnops.c b/bsd/miscfs/fdesc/fdesc_vnops.c new file mode 100644 index 000000000..d45582763 --- /dev/null +++ b/bsd/miscfs/fdesc/fdesc_vnops.c @@ -0,0 +1,990 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fdesc_vnops.c 8.17 (Berkeley) 5/22/95 + * + */ + +/* + * /dev/fd Filesystem + */ + +#include +#include +#include +#include +#include +#include /* boottime */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define cttyvp(p) ((p)->p_flag & P_CONTROLT ? (p)->p_session->s_ttyvp : NULL) + +#define FDL_WANT 0x01 +#define FDL_LOCKED 0x02 +static int fdcache_lock; + +dev_t devctty; + +#if (FD_STDIN != FD_STDOUT-1) || (FD_STDOUT != FD_STDERR-1) +FD_STDIN, FD_STDOUT, FD_STDERR must be a sequence n, n+1, n+2 +#endif + +#define NFDCACHE 4 + +#define FD_NHASH(ix) \ + (&fdhashtbl[(ix) & fdhash]) +LIST_HEAD(fdhashhead, fdescnode) *fdhashtbl; +u_long fdhash; + +/* + * Initialise cache headers + */ +fdesc_init(vfsp) + struct vfsconf *vfsp; +{ + + devctty = makedev(nchrdev, 0); + fdhashtbl = hashinit(NFDCACHE, M_CACHE, &fdhash); +} + +int +fdesc_allocvp(ftype, ix, mp, vpp) + fdntype ftype; + int ix; + struct mount *mp; + struct vnode **vpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct fdhashhead *fc; + struct fdescnode *fd; + int error = 0; + + fc = FD_NHASH(ix); +loop: + for (fd = fc->lh_first; fd != 0; fd = fd->fd_hash.le_next) { + if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) { + if (vget(fd->fd_vnode, 0, p)) + goto loop; + *vpp = fd->fd_vnode; + return (error); + } + } + + /* + * otherwise lock the array while we call getnewvnode + * since that can block. + */ + if (fdcache_lock & FDL_LOCKED) { + fdcache_lock |= FDL_WANT; + sleep((caddr_t) &fdcache_lock, PINOD); + goto loop; + } + fdcache_lock |= FDL_LOCKED; + + MALLOC(fd, void *, sizeof(struct fdescnode), M_TEMP, M_WAITOK); + error = getnewvnode(VT_FDESC, mp, fdesc_vnodeop_p, vpp); + if (error) { + FREE(fd, M_TEMP); + goto out; + } + (*vpp)->v_data = fd; + fd->fd_vnode = *vpp; + fd->fd_type = ftype; + fd->fd_fd = -1; + fd->fd_link = 0; + fd->fd_ix = ix; + LIST_INSERT_HEAD(fc, fd, fd_hash); + +out: + fdcache_lock &= ~FDL_LOCKED; + + if (fdcache_lock & FDL_WANT) { + fdcache_lock &= ~FDL_WANT; + wakeup((caddr_t) &fdcache_lock); + } + + return (error); +} + +/* + * vp is the current namei directory + * ndp is the name to locate in that directory... + */ +int +fdesc_lookup(ap) + struct vop_lookup_args /* { + struct vnode * a_dvp; + struct vnode ** a_vpp; + struct componentname * a_cnp; + } */ *ap; +{ + struct vnode **vpp = ap->a_vpp; + struct vnode *dvp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + char *pname = cnp->cn_nameptr; + struct proc *p = cnp->cn_proc; + int nfiles = p->p_fd->fd_nfiles; + unsigned fd; + int error; + struct vnode *fvp; + char *ln; + + VOP_UNLOCK(dvp, 0, p); + if (cnp->cn_namelen == 1 && *pname == '.') { + *vpp = dvp; + VREF(dvp); + vn_lock(dvp, LK_SHARED | LK_RETRY, p); + return (0); + } + + switch (VTOFDESC(dvp)->fd_type) { + default: + case Flink: + case Fdesc: + case Fctty: + error = ENOTDIR; + goto bad; + + case Froot: + if (cnp->cn_namelen == 2 && bcmp(pname, "fd", 2) == 0) { + error = fdesc_allocvp(Fdevfd, FD_DEVFD, dvp->v_mount, &fvp); + if (error) + goto bad; + *vpp = fvp; + fvp->v_type = VDIR; + vn_lock(fvp, LK_SHARED | LK_RETRY, p); + return (0); + } + + if (cnp->cn_namelen == 3 && bcmp(pname, "tty", 3) == 0) { + struct vnode *ttyvp = cttyvp(p); + if (ttyvp == NULL) { + error = ENXIO; + goto bad; + } + error = fdesc_allocvp(Fctty, FD_CTTY, dvp->v_mount, &fvp); + if (error) + goto bad; + *vpp = fvp; + fvp->v_type = VCHR; + vn_lock(fvp, LK_SHARED | LK_RETRY, p); + return (0); + } + + ln = 0; + switch (cnp->cn_namelen) { + case 5: + if (bcmp(pname, "stdin", 5) == 0) { + ln = "fd/0"; + fd = FD_STDIN; + } + break; + case 6: + if (bcmp(pname, "stdout", 6) == 0) { + ln = "fd/1"; + fd = FD_STDOUT; + } else + if (bcmp(pname, "stderr", 6) == 0) { + ln = "fd/2"; + fd = FD_STDERR; + } + break; + } + + if (ln) { + error = fdesc_allocvp(Flink, fd, dvp->v_mount, &fvp); + if (error) + goto bad; + VTOFDESC(fvp)->fd_link = ln; + *vpp = fvp; + fvp->v_type = VLNK; + vn_lock(fvp, LK_SHARED | LK_RETRY, p); + return (0); + } else { + error = ENOENT; + goto bad; + } + + /* FALL THROUGH */ + + case Fdevfd: + if (cnp->cn_namelen == 2 && bcmp(pname, "..", 2) == 0) { + if (error = fdesc_root(dvp->v_mount, vpp)) + goto bad; + return (0); + } + + fd = 0; + while (*pname >= '0' && *pname <= '9') { + fd = 10 * fd + *pname++ - '0'; + if (fd >= nfiles) + break; + } + + if (*pname != '\0') { + error = ENOENT; + goto bad; + } + + if (fd >= nfiles || + *fdfile(p, fd) == NULL || + (*fdflags(p, fd) & UF_RESERVED)) { + error = EBADF; + goto bad; + } + + error = fdesc_allocvp(Fdesc, FD_DESC+fd, dvp->v_mount, &fvp); + if (error) + goto bad; + VTOFDESC(fvp)->fd_fd = fd; + vn_lock(fvp, LK_SHARED | LK_RETRY, p); + *vpp = fvp; + return (0); + } + +bad:; + vn_lock(dvp, LK_SHARED | LK_RETRY, p); + *vpp = NULL; + return (error); +} + +int +fdesc_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + int error = 0; + + switch (VTOFDESC(vp)->fd_type) { + case Fdesc: + /* + * XXX Kludge: set p->p_dupfd to contain the value of the + * the file descriptor being sought for duplication. The error + * return ensures that the vnode for this device will be + * released by vn_open. Open will detect this special error and + * take the actions in dupfdopen. Other callers of vn_open or + * VOP_OPEN will simply report the error. + */ + ap->a_p->p_dupfd = VTOFDESC(vp)->fd_fd; /* XXX */ + error = ENODEV; + break; + + case Fctty: + error = cttyopen(devctty, ap->a_mode, 0, ap->a_p); + break; + } + + return (error); +} + +static int +fdesc_attr(fd, vap, cred, p) + int fd; + struct vattr *vap; + struct ucred *cred; + struct proc *p; +{ + struct file *fp; + struct stat stb; + int error; + + if (error = fdgetf(p, fd, &fp)) + return (error); + switch (fp->f_type) { + case DTYPE_VNODE: + error = VOP_GETATTR((struct vnode *) fp->f_data, vap, cred, p); + if (error == 0 && vap->va_type == VDIR) { + /* + * directories can cause loops in the namespace, + * so turn off the 'x' bits to avoid trouble. + */ + vap->va_mode &= ~((VEXEC)|(VEXEC>>3)|(VEXEC>>6)); + } + break; + + case DTYPE_SOCKET: + error = soo_stat((struct socket *)fp->f_data, &stb); + if (error == 0) { + vattr_null(vap); + vap->va_type = VSOCK; + vap->va_mode = stb.st_mode; + vap->va_nlink = stb.st_nlink; + vap->va_uid = stb.st_uid; + vap->va_gid = stb.st_gid; + vap->va_fsid = stb.st_dev; + vap->va_fileid = stb.st_ino; + vap->va_size = stb.st_size; + vap->va_blocksize = stb.st_blksize; + vap->va_atime = stb.st_atimespec; + vap->va_mtime = stb.st_mtimespec; + vap->va_ctime = stb.st_ctimespec; + vap->va_gen = stb.st_gen; + vap->va_flags = stb.st_flags; + vap->va_rdev = stb.st_rdev; + vap->va_bytes = stb.st_blocks * stb.st_blksize; + } + break; + + default: + panic("fdesc attr"); + break; + } + + return (error); +} + +int +fdesc_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vattr *vap = ap->a_vap; + unsigned fd; + int error = 0; + + switch (VTOFDESC(vp)->fd_type) { + case Froot: + case Fdevfd: + case Flink: + case Fctty: + bzero((caddr_t) vap, sizeof(*vap)); + vattr_null(vap); + vap->va_fileid = VTOFDESC(vp)->fd_ix; + + vap->va_uid = 0; + vap->va_gid = 0; + vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; + vap->va_blocksize = DEV_BSIZE; + vap->va_atime.tv_sec = boottime.tv_sec; + vap->va_atime.tv_nsec = 0; + vap->va_mtime = vap->va_atime; + vap->va_ctime = vap->va_mtime; + vap->va_gen = 0; + vap->va_flags = 0; + vap->va_rdev = 0; + vap->va_bytes = 0; + + switch (VTOFDESC(vp)->fd_type) { + case Flink: + vap->va_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH; + vap->va_type = VLNK; + vap->va_nlink = 1; + vap->va_size = strlen(VTOFDESC(vp)->fd_link); + break; + + case Fctty: + vap->va_mode = S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH; + vap->va_type = VCHR; + vap->va_nlink = 1; + vap->va_size = 0; + vap->va_rdev = devctty; + break; + + default: + vap->va_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH; + vap->va_type = VDIR; + vap->va_nlink = 2; + vap->va_size = DEV_BSIZE; + break; + } + break; + + case Fdesc: + fd = VTOFDESC(vp)->fd_fd; + error = fdesc_attr(fd, vap, ap->a_cred, ap->a_p); + break; + + default: + panic("fdesc_getattr"); + break; + } + + if (error == 0) { + vp->v_type = vap->va_type; + } + + return (error); +} + +int +fdesc_setattr(ap) + struct vop_setattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct file *fp; + unsigned fd; + int error; + + /* + * Can't mess with the root vnode + */ + switch (VTOFDESC(ap->a_vp)->fd_type) { + case Fdesc: + break; + + case Fctty: + return (0); + + default: + return (EACCES); + } + + fd = VTOFDESC(ap->a_vp)->fd_fd; + if (error = fdgetf(ap->a_p, fd, &fp)) + return (error); + + /* + * Can setattr the underlying vnode, but not sockets! + */ + switch (fp->f_type) { + case DTYPE_VNODE: + error = VOP_SETATTR((struct vnode *) fp->f_data, ap->a_vap, ap->a_cred, ap->a_p); + break; + + case DTYPE_SOCKET: + error = 0; + break; + + default: + kprintf("fp->f_type = %d\n", fp->f_type); + error = EBADF; + break; + } + + return (error); +} + +#define UIO_MX 16 + +static struct dirtmp { + u_long d_fileno; + u_short d_reclen; + u_short d_namlen; + char d_name[8]; +} rootent[] = { + { FD_DEVFD, UIO_MX, 2, "fd" }, + { FD_STDIN, UIO_MX, 5, "stdin" }, + { FD_STDOUT, UIO_MX, 6, "stdout" }, + { FD_STDERR, UIO_MX, 6, "stderr" }, + { FD_CTTY, UIO_MX, 3, "tty" }, + { 0 } +}; + +int +fdesc_readdir(ap) + struct vop_readdir_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + u_long *a_cookies; + int a_ncookies; + } */ *ap; +{ + struct uio *uio = ap->a_uio; + struct proc *p = uio->uio_procp; + int i, error; + + /* + * We don't allow exporting fdesc mounts, and currently local + * requests do not need cookies. + */ + if (ap->a_ncookies) + panic("fdesc_readdir: not hungry"); + + switch (VTOFDESC(ap->a_vp)->fd_type) { + case Fctty: + return (0); + + case Fdesc: + return (ENOTDIR); + + default: + break; + } + + if (VTOFDESC(ap->a_vp)->fd_type == Froot) { + struct dirent d; + struct dirent *dp = &d; + struct dirtmp *dt; + int fd; + + i = uio->uio_offset / UIO_MX; + error = 0; + + while (uio->uio_resid > 0) { + dt = &rootent[i]; + if (dt->d_fileno == 0) { + /**eofflagp = 1;*/ + break; + } + i++; + + switch (dt->d_fileno) { + case FD_CTTY: + if (cttyvp(uio->uio_procp) == NULL) + continue; + break; + + case FD_STDIN: + case FD_STDOUT: + case FD_STDERR: + fd = dt->d_fileno - FD_STDIN; + if (fd >= p->p_fd->fd_nfiles) + continue; + if (*fdfile(p, fd) == NULL && + !(*fdflags(p, fd) & + UF_RESERVED)) + continue; + break; + } + bzero((caddr_t) dp, UIO_MX); + dp->d_fileno = dt->d_fileno; + dp->d_namlen = dt->d_namlen; + dp->d_type = DT_UNKNOWN; + dp->d_reclen = dt->d_reclen; + bcopy(dt->d_name, dp->d_name, dp->d_namlen+1); + error = uiomove((caddr_t) dp, UIO_MX, uio); + if (error) + break; + } + uio->uio_offset = i * UIO_MX; + return (error); + } + + i = uio->uio_offset / UIO_MX; + error = 0; + while (uio->uio_resid > 0) { + if (i >= p->p_fd->fd_nfiles) + break; + + if (*fdfile(p, i) != NULL && !(*fdflags(p, i) & UF_RESERVED)) { + struct dirent d; + struct dirent *dp = &d; + + bzero((caddr_t) dp, UIO_MX); + + dp->d_namlen = sprintf(dp->d_name, "%d", i); + dp->d_reclen = UIO_MX; + dp->d_type = DT_UNKNOWN; + dp->d_fileno = i + FD_STDIN; + /* + * And ship to userland + */ + error = uiomove((caddr_t) dp, UIO_MX, uio); + if (error) + break; + } + i++; + } + + uio->uio_offset = i * UIO_MX; + return (error); +} + +int +fdesc_readlink(ap) + struct vop_readlink_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + int error; + + if (vp->v_type != VLNK) + return (EPERM); + + if (VTOFDESC(vp)->fd_type == Flink) { + char *ln = VTOFDESC(vp)->fd_link; + error = uiomove(ln, strlen(ln), ap->a_uio); + } else { + error = EOPNOTSUPP; + } + + return (error); +} + +int +fdesc_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + int error = EOPNOTSUPP; + + switch (VTOFDESC(ap->a_vp)->fd_type) { + case Fctty: + error = cttyread(devctty, ap->a_uio, ap->a_ioflag); + break; + + default: + error = EOPNOTSUPP; + break; + } + + return (error); +} + +int +fdesc_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + int error = EOPNOTSUPP; + + switch (VTOFDESC(ap->a_vp)->fd_type) { + case Fctty: + error = cttywrite(devctty, ap->a_uio, ap->a_ioflag); + break; + + default: + error = EOPNOTSUPP; + break; + } + + return (error); +} + +int +fdesc_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + int a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + int error = EOPNOTSUPP; + + switch (VTOFDESC(ap->a_vp)->fd_type) { + case Fctty: + error = cttyioctl(devctty, ap->a_command, ap->a_data, + ap->a_fflag, ap->a_p); + break; + + default: + error = EOPNOTSUPP; + break; + } + + return (error); +} + +int +fdesc_select(ap) + struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + int error = EOPNOTSUPP; + + switch (VTOFDESC(ap->a_vp)->fd_type) { + case Fctty: + error = cttyselect(devctty, ap->a_fflags, ap->a_p); + break; + + default: + error = EOPNOTSUPP; + break; + } + + return (error); +} + +int +fdesc_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + /* + * Clear out the v_type field to avoid + * nasty things happening in vgone(). + */ + VOP_UNLOCK(vp, 0, ap->a_p); + vp->v_type = VNON; + return (0); +} + +int +fdesc_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct fdescnode *fd = VTOFDESC(vp); + + LIST_REMOVE(fd, fd_hash); + FREE(vp->v_data, M_TEMP); + vp->v_data = 0; + + return (0); +} + +/* + * Return POSIX pathconf information applicable to special devices. + */ +fdesc_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_MAX_CANON: + *ap->a_retval = MAX_CANON; + return (0); + case _PC_MAX_INPUT: + *ap->a_retval = MAX_INPUT; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_VDISABLE: + *ap->a_retval = _POSIX_VDISABLE; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Print out the contents of a /dev/fd vnode. + */ +/* ARGSUSED */ +int +fdesc_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + printf("tag VT_NON, fdesc vnode\n"); + return (0); +} + +/*void*/ +int +fdesc_vfree(ap) + struct vop_vfree_args /* { + struct vnode *a_pvp; + ino_t a_ino; + int a_mode; + } */ *ap; +{ + + return (0); +} + +/* + * /dev/fd "should never get here" operation + */ +int +fdesc_badop() +{ + + panic("fdesc: bad op"); + /* NOTREACHED */ +} + +#define VOPFUNC int (*)(void *) + +#define fdesc_create ((int (*) __P((struct vop_create_args *)))eopnotsupp) +#define fdesc_mknod ((int (*) __P((struct vop_mknod_args *)))eopnotsupp) +#define fdesc_close ((int (*) __P((struct vop_close_args *)))nullop) +#define fdesc_access ((int (*) __P((struct vop_access_args *)))nullop) +#define fdesc_mmap ((int (*) __P((struct vop_mmap_args *)))eopnotsupp) +#define fdesc_revoke vop_revoke +#define fdesc_fsync ((int (*) __P((struct vop_fsync_args *)))nullop) +#define fdesc_seek ((int (*) __P((struct vop_seek_args *)))nullop) +#define fdesc_remove ((int (*) __P((struct vop_remove_args *)))eopnotsupp) +#define fdesc_link ((int (*) __P((struct vop_link_args *)))eopnotsupp) +#define fdesc_rename ((int (*) __P((struct vop_rename_args *)))eopnotsupp) +#define fdesc_mkdir ((int (*) __P((struct vop_mkdir_args *)))eopnotsupp) +#define fdesc_rmdir ((int (*) __P((struct vop_rmdir_args *)))eopnotsupp) +#define fdesc_symlink ((int (*) __P((struct vop_symlink_args *)))eopnotsupp) +#define fdesc_abortop ((int (*) __P((struct vop_abortop_args *)))nullop) +#define fdesc_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) +#define fdesc_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) +#define fdesc_bmap ((int (*) __P((struct vop_bmap_args *)))fdesc_badop) +#define fdesc_strategy ((int (*) __P((struct vop_strategy_args *)))fdesc_badop) +#define fdesc_islocked \ + ((int (*) __P((struct vop_islocked_args *)))vop_noislocked) +#define fdesc_advlock ((int (*) __P((struct vop_advlock_args *)))eopnotsupp) +#define fdesc_blkatoff \ + ((int (*) __P((struct vop_blkatoff_args *)))eopnotsupp) +#define fdesc_valloc ((int(*) __P(( \ + struct vnode *pvp, \ + int mode, \ + struct ucred *cred, \ + struct vnode **vpp))) eopnotsupp) +#define fdesc_truncate \ + ((int (*) __P((struct vop_truncate_args *)))eopnotsupp) +#define fdesc_update ((int (*) __P((struct vop_update_args *)))eopnotsupp) +#define fdesc_bwrite ((int (*) __P((struct vop_bwrite_args *)))eopnotsupp) +#define fdesc_blktooff ((int (*) __P((struct vop_blktooff_args *)))eopnotsupp) +#define fdesc_offtoblk ((int (*) __P((struct vop_offtoblk_args *)))eopnotsupp) +#define fdesc_cmap ((int (*) __P((struct vop_cmap_args *)))eopnotsupp) + +int (**fdesc_vnodeop_p)(void *); +struct vnodeopv_entry_desc fdesc_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)fdesc_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)fdesc_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)fdesc_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)fdesc_open }, /* open */ + { &vop_close_desc, (VOPFUNC)fdesc_close }, /* close */ + { &vop_access_desc, (VOPFUNC)fdesc_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)fdesc_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)fdesc_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)fdesc_read }, /* read */ + { &vop_write_desc, (VOPFUNC)fdesc_write }, /* write */ + { &vop_ioctl_desc, (VOPFUNC)fdesc_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)fdesc_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)fdesc_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)fdesc_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)fdesc_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)fdesc_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)fdesc_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)fdesc_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)fdesc_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)fdesc_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)fdesc_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)fdesc_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)fdesc_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)fdesc_readlink },/* readlink */ + { &vop_abortop_desc, (VOPFUNC)fdesc_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)fdesc_inactive },/* inactive */ + { &vop_reclaim_desc, (VOPFUNC)fdesc_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)fdesc_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)fdesc_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)fdesc_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)fdesc_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)fdesc_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)fdesc_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)fdesc_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)fdesc_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)fdesc_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)fdesc_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)fdesc_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)fdesc_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)fdesc_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)fdesc_bwrite }, /* bwrite */ + { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* pagein */ + { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)fdesc_blktooff }, /* blktooff */ + { &vop_blktooff_desc, (VOPFUNC)fdesc_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)fdesc_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } +}; +struct vnodeopv_desc fdesc_vnodeop_opv_desc = + { &fdesc_vnodeop_p, fdesc_vnodeop_entries }; diff --git a/bsd/miscfs/fifofs/fifo.h b/bsd/miscfs/fifofs/fifo.h new file mode 100644 index 000000000..b5d55b2e9 --- /dev/null +++ b/bsd/miscfs/fifofs/fifo.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fifo.h 8.3 (Berkeley) 8/10/94 + */ + +/* + * Prototypes for fifo operations on vnodes. + */ +int fifo_badop(), + fifo_ebadf(); + +int fifo_lookup __P((struct vop_lookup_args *)); +#define fifo_create ((int (*) __P((struct vop_create_args *)))fifo_badop) +#define fifo_mknod ((int (*) __P((struct vop_mknod_args *)))fifo_badop) +int fifo_open __P((struct vop_open_args *)); +int fifo_close __P((struct vop_close_args *)); +#define fifo_access ((int (*) __P((struct vop_access_args *)))fifo_ebadf) +#define fifo_getattr ((int (*) __P((struct vop_getattr_args *)))fifo_ebadf) +#define fifo_setattr ((int (*) __P((struct vop_setattr_args *)))fifo_ebadf) +int fifo_read __P((struct vop_read_args *)); +int fifo_write __P((struct vop_write_args *)); +#define fifo_lease_check ((int (*) __P((struct vop_lease_args *)))nullop) +int fifo_ioctl __P((struct vop_ioctl_args *)); +int fifo_select __P((struct vop_select_args *)); +#define fifo_revoke vop_revoke +#define fifo_mmap ((int (*) __P((struct vop_mmap_args *)))fifo_badop) +#define fifo_fsync ((int (*) __P((struct vop_fsync_args *)))nullop) +#define fifo_seek ((int (*) __P((struct vop_seek_args *)))fifo_badop) +#define fifo_remove ((int (*) __P((struct vop_remove_args *)))fifo_badop) +#define fifo_link ((int (*) __P((struct vop_link_args *)))fifo_badop) +#define fifo_rename ((int (*) __P((struct vop_rename_args *)))fifo_badop) +#define fifo_mkdir ((int (*) __P((struct vop_mkdir_args *)))fifo_badop) +#define fifo_rmdir ((int (*) __P((struct vop_rmdir_args *)))fifo_badop) +#define fifo_symlink ((int (*) __P((struct vop_symlink_args *)))fifo_badop) +#define fifo_readdir ((int (*) __P((struct vop_readdir_args *)))fifo_badop) +#define fifo_readlink ((int (*) __P((struct vop_readlink_args *)))fifo_badop) +#define fifo_abortop ((int (*) __P((struct vop_abortop_args *)))fifo_badop) +int fifo_inactive __P((struct vop_inactive_args *)); +#define fifo_reclaim ((int (*) __P((struct vop_reclaim_args *)))nullop) +#define fifo_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) +#define fifo_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) +int fifo_bmap __P((struct vop_bmap_args *)); +#define fifo_strategy ((int (*) __P((struct vop_strategy_args *)))fifo_badop) +int fifo_print __P((struct vop_print_args *)); +#define fifo_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked) +int fifo_pathconf __P((struct vop_pathconf_args *)); +int fifo_advlock __P((struct vop_advlock_args *)); +#define fifo_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))fifo_badop) +#define fifo_valloc ((int (*) __P((struct vop_valloc_args *)))fifo_badop) +#define fifo_reallocblks \ + ((int (*) __P((struct vop_reallocblks_args *)))fifo_badop) +#define fifo_vfree ((int (*) __P((struct vop_vfree_args *)))fifo_badop) +#define fifo_truncate ((int (*) __P((struct vop_truncate_args *)))nullop) +#define fifo_update ((int (*) __P((struct vop_update_args *)))nullop) +#define fifo_bwrite ((int (*) __P((struct vop_bwrite_args *)))nullop) +#define fifo_blktooff ((int (*) __P((struct vop_blktooff_args *)))fifo_badop) + diff --git a/bsd/miscfs/fifofs/fifo_vnops.c b/bsd/miscfs/fifofs/fifo_vnops.c new file mode 100644 index 000000000..d4edca03e --- /dev/null +++ b/bsd/miscfs/fifofs/fifo_vnops.c @@ -0,0 +1,572 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1990, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fifo_vnops.c 8.4 (Berkeley) 8/10/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This structure is associated with the FIFO vnode and stores + * the state associated with the FIFO. + */ +struct fifoinfo { + struct socket *fi_readsock; + struct socket *fi_writesock; + long fi_readers; + long fi_writers; +}; + +#define VOPFUNC int (*)(void *) + +int (**fifo_vnodeop_p)(void *); +struct vnodeopv_entry_desc fifo_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */ + { &vop_close_desc, (VOPFUNC)fifo_close }, /* close */ + { &vop_access_desc, (VOPFUNC)fifo_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)fifo_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)fifo_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)fifo_read }, /* read */ + { &vop_write_desc, (VOPFUNC)fifo_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)fifo_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)fifo_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)fifo_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)fifo_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)fifo_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)fifo_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)fifo_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)fifo_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)fifo_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)fifo_bwrite }, /* bwrite */ + { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)err_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc fifo_vnodeop_opv_desc = + { &fifo_vnodeop_p, fifo_vnodeop_entries }; + +/* + * Trivial lookup routine that always fails. + */ +/* ARGSUSED */ +fifo_lookup(ap) + struct vop_lookup_args /* { + struct vnode * a_dvp; + struct vnode ** a_vpp; + struct componentname * a_cnp; + } */ *ap; +{ + + *ap->a_vpp = NULL; + return (ENOTDIR); +} + +/* + * Open called to set up a new instance of a fifo or + * to find an active instance of a fifo. + */ +/* ARGSUSED */ +fifo_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct fifoinfo *fip; + struct proc *p = ap->a_p; + struct socket *rso, *wso; + int error; + + if ((fip = vp->v_fifoinfo) == NULL) { + MALLOC_ZONE(fip, struct fifoinfo *, + sizeof(*fip), M_VNODE, M_WAITOK); + vp->v_fifoinfo = fip; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if (error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0)) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + _FREE_ZONE(fip, sizeof *fip, M_VNODE); + vp->v_fifoinfo = NULL; + return (error); + } + fip->fi_readsock = rso; + if (error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0)) { + (void)soclose(rso); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + _FREE_ZONE(fip, sizeof *fip, M_VNODE); + vp->v_fifoinfo = NULL; + return (error); + } + fip->fi_writesock = wso; + if (error = unp_connect2(wso, rso)) { + (void)soclose(wso); + (void)soclose(rso); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + _FREE_ZONE(fip, sizeof *fip, M_VNODE); + vp->v_fifoinfo = NULL; + return (error); + } + wso->so_state |= SS_CANTRCVMORE; + wso->so_snd.sb_lowat = PIPE_BUF; + rso->so_state |= SS_CANTSENDMORE; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + fip->fi_readers = fip->fi_writers = 0; + } + if (ap->a_mode & FREAD) { + fip->fi_readers++; + if (fip->fi_readers == 1) { + fip->fi_writesock->so_state &= ~SS_CANTSENDMORE; + if (fip->fi_writers > 0) + wakeup((caddr_t)&fip->fi_writers); + } + } + if (ap->a_mode & FWRITE) { + fip->fi_writers++; + if (fip->fi_writers == 1) { + fip->fi_readsock->so_state &= ~SS_CANTRCVMORE; + if (fip->fi_readers > 0) + wakeup((caddr_t)&fip->fi_readers); + } + } + if ((ap->a_mode & FREAD) && (ap->a_mode & O_NONBLOCK) == 0) { + if (fip->fi_writers == 0) { + VOP_UNLOCK(vp, 0, p); + error = tsleep((caddr_t)&fip->fi_readers, + PCATCH | PSOCK, "fifoor", 0); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error) + goto bad; + if (fip->fi_readers == 1) { + if (fip->fi_writers > 0) + wakeup((caddr_t)&fip->fi_writers); + } + } + } + if (ap->a_mode & FWRITE) { + if (ap->a_mode & O_NONBLOCK) { + if (fip->fi_readers == 0) { + error = ENXIO; + goto bad; + } + } else { + if (fip->fi_readers == 0) { + VOP_UNLOCK(vp, 0, p); + error = tsleep((caddr_t)&fip->fi_writers, + PCATCH | PSOCK, "fifoow", 0); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error) + goto bad; + if (fip->fi_writers == 1) { + if (fip->fi_readers > 0) + wakeup((caddr_t)&fip->fi_readers); + } + } + } + } + return (0); +bad: + if (error) + VOP_CLOSE(vp, ap->a_mode, ap->a_cred, p); + return (error); +} + +/* + * Vnode op for read + */ +/* ARGSUSED */ +fifo_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + struct uio *uio = ap->a_uio; + struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock; + struct proc *p = uio->uio_procp; + int error, startresid; + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_READ) + panic("fifo_read mode"); +#endif + if (uio->uio_resid == 0) + return (0); + if (ap->a_ioflag & IO_NDELAY) + rso->so_state |= SS_NBIO; + startresid = uio->uio_resid; + VOP_UNLOCK(ap->a_vp, 0, p); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = soreceive(rso, (struct sockaddr **)0, uio, (struct mbuf **)0, + (struct mbuf **)0, (int *)0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p); + /* + * Clear EOF indication after first such return. + */ + if (uio->uio_resid == startresid) + rso->so_state &= ~SS_CANTRCVMORE; + if (ap->a_ioflag & IO_NDELAY) + rso->so_state &= ~SS_NBIO; + return (error); +} + +/* + * Vnode op for write + */ +/* ARGSUSED */ +fifo_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + struct socket *wso = ap->a_vp->v_fifoinfo->fi_writesock; + struct proc *p = ap->a_uio->uio_procp; + int error; + +#if DIAGNOSTIC + if (ap->a_uio->uio_rw != UIO_WRITE) + panic("fifo_write mode"); +#endif + if (ap->a_ioflag & IO_NDELAY) + wso->so_state |= SS_NBIO; + VOP_UNLOCK(ap->a_vp, 0, p); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = sosend(wso, (struct sockaddr *)0, ap->a_uio, 0, (struct mbuf *)0, 0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p); + if (ap->a_ioflag & IO_NDELAY) + wso->so_state &= ~SS_NBIO; + return (error); +} + +/* + * Device ioctl operation. + */ +/* ARGSUSED */ +fifo_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + int a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct file filetmp; + int error; + + if (ap->a_command == FIONBIO) + return (0); + if (ap->a_fflag & FREAD) { + filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; + error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_p); + if (error) + return (error); + } + if (ap->a_fflag & FWRITE) { + filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; + error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_p); + if (error) + return (error); + } + return (0); +} + +/* ARGSUSED */ +fifo_select(ap) + struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct file filetmp; + int ready; + + if (ap->a_fflags & FREAD) { + filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; + ready = soo_select(&filetmp, ap->a_which, ap->a_p); + if (ready) + return (ready); + } + if (ap->a_fflags & FWRITE) { + filetmp.f_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; + ready = soo_select(&filetmp, ap->a_which, ap->a_p); + if (ready) + return (ready); + } + return (0); +} + +int +fifo_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + + VOP_UNLOCK(ap->a_vp, 0, ap->a_p); + return (0); +} + +/* + * This is a noop, simply returning what one has been given. + */ +fifo_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; + } */ *ap; +{ + + if (ap->a_vpp != NULL) + *ap->a_vpp = ap->a_vp; + if (ap->a_bnp != NULL) + *ap->a_bnp = ap->a_bn; + if (ap->a_runp != NULL) + *ap->a_runp = 0; + return (0); +} + +/* + * Device close routine + */ +/* ARGSUSED */ +fifo_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct fifoinfo *fip = vp->v_fifoinfo; + int error1, error2; + + if (ap->a_fflag & FREAD) { + fip->fi_readers--; + if (fip->fi_readers == 0){ + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + socantsendmore(fip->fi_writesock); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + } + } + if (ap->a_fflag & FWRITE) { + fip->fi_writers--; + if (fip->fi_writers == 0) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + socantrcvmore(fip->fi_readsock); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + } + } + if (vp->v_usecount > 1) + return (0); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error1 = soclose(fip->fi_readsock); + error2 = soclose(fip->fi_writesock); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + FREE_ZONE(fip, sizeof *fip, M_VNODE); + vp->v_fifoinfo = NULL; + if (error1) + return (error1); + return (error2); +} + +/* + * Print out the contents of a fifo vnode. + */ +fifo_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + printf("tag VT_NON"); + fifo_printinfo(ap->a_vp); + printf("\n"); +} + +/* + * Print out internal contents of a fifo vnode. + */ +fifo_printinfo(vp) + struct vnode *vp; +{ + register struct fifoinfo *fip = vp->v_fifoinfo; + + printf(", fifo with %d readers and %d writers", + fip->fi_readers, fip->fi_writers); +} + +/* + * Return POSIX pathconf information applicable to fifo's. + */ +fifo_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Fifo failed operation + */ +fifo_ebadf() +{ + + return (EBADF); +} + +/* + * Fifo advisory byte-level locks. + */ +/* ARGSUSED */ +fifo_advlock(ap) + struct vop_advlock_args /* { + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; + } */ *ap; +{ + + return (EOPNOTSUPP); +} + +/* + * Fifo bad operation + */ +fifo_badop() +{ + + panic("fifo_badop called"); + /* NOTREACHED */ +} diff --git a/bsd/miscfs/kernfs/kernfs.h b/bsd/miscfs/kernfs/kernfs.h new file mode 100644 index 000000000..e40e53220 --- /dev/null +++ b/bsd/miscfs/kernfs/kernfs.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: kernfs.h,v 1.9 1995/03/29 22:08:22 briggs Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kernfs.h 8.5 (Berkeley) 6/15/94 + */ + +#define _PATH_KERNFS "/kern" /* Default mountpoint */ + +#ifdef KERNEL +struct kernfs_mount { + struct vnode *kf_root; /* Root node */ +}; + +struct kernfs_node { + struct kern_target *kf_kt; +}; + +#define VFSTOKERNFS(mp) ((struct kernfs_mount *)((mp)->mnt_data)) +#define VTOKERN(vp) ((struct kernfs_node *)(vp)->v_data) + +extern int (**kernfs_vnodeop_p)(void *); +extern struct vfsops kernfs_vfsops; +extern dev_t rrootdev; +#endif /* KERNEL */ diff --git a/bsd/miscfs/kernfs/kernfs_vfsops.c b/bsd/miscfs/kernfs/kernfs_vfsops.c new file mode 100644 index 000000000..76d28e5ff --- /dev/null +++ b/bsd/miscfs/kernfs/kernfs_vfsops.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: kernfs_vfsops.c,v 1.23 1995/03/09 12:05:52 mycroft Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kernfs_vfsops.c 8.5 (Berkeley) 6/15/94 + */ + +/* + * Kernel params Filesystem + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +dev_t rrootdev = NODEV; + +kernfs_init() +{ + +} + +void +kernfs_get_rrootdev() +{ + static int tried = 0; + int cmaj; + + if (tried) { + /* Already did it once. */ + return; + } + tried = 1; + + if (rootdev == NODEV) + return; + for (cmaj = 0; cmaj < nchrdev; cmaj++) { + rrootdev = makedev(cmaj, minor(rootdev)); + if (chrtoblk(rrootdev) == rootdev) + return; + } + rrootdev = NODEV; + printf("kernfs_get_rrootdev: no raw root device\n"); +} + +/* + * Mount the Kernel params filesystem + */ +kernfs_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + int error = 0; + size_t size; + struct kernfs_mount *fmp; + struct vnode *rvp; + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_mount(mp = %x)\n", mp); +#endif + + /* + * Update is a no-op + */ + if (mp->mnt_flag & MNT_UPDATE) + return (EOPNOTSUPP); + + MALLOC(fmp, struct kernfs_mount *, sizeof(struct kernfs_mount), + M_MISCFSMNT, M_WAITOK); + if (error = getnewvnode(VT_KERNFS, mp, kernfs_vnodeop_p, &rvp)) { + FREE(fmp, M_MISCFSMNT); + return (error); + } + + rvp->v_type = VDIR; + rvp->v_flag |= VROOT; +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_mount: root vp = %x\n", rvp); +#endif + fmp->kf_root = rvp; + mp->mnt_flag |= MNT_LOCAL; + mp->mnt_data = (qaddr_t)fmp; + getnewfsid(mp, makefstype(MOUNT_KERNFS)); + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); + bcopy("kernfs", mp->mnt_stat.f_mntfromname, sizeof("kernfs")); +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_mount: at %s\n", mp->mnt_stat.f_mntonname); +#endif + + kernfs_get_rrootdev(); + return (0); +} + +kernfs_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + + return (0); +} + +kernfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + int error; + int flags = 0; + extern int doforce; + struct vnode *rootvp = VFSTOKERNFS(mp)->kf_root; + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_unmount(mp = %x)\n", mp); +#endif + + if (mntflags & MNT_FORCE) { + /* kernfs can never be rootfs so don't check for it */ + if (!doforce) + return (EINVAL); + flags |= FORCECLOSE; + } + + /* + * Clear out buffer cache. I don't think we + * ever get anything cached at this level at the + * moment, but who knows... + */ + if (rootvp->v_usecount > 1) + return (EBUSY); +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_unmount: calling vflush\n"); +#endif + if (error = vflush(mp, rootvp, flags)) + return (error); + +#ifdef KERNFS_DIAGNOSTIC + vprint("kernfs root", rootvp); +#endif + /* + * Clean out the old root vnode for reuse. + */ + vrele(rootvp); + vgone(rootvp); + /* + * Finally, throw away the kernfs_mount structure + */ + free(mp->mnt_data, M_MISCFSMNT); + mp->mnt_data = 0; + return (0); +} + +kernfs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct vnode *vp; + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_root(mp = %x)\n", mp); +#endif + + /* + * Return locked reference to root. + */ + vp = VFSTOKERNFS(mp)->kf_root; + VREF(vp); + VOP_LOCK(vp); + *vpp = vp; + return (0); +} + +kernfs_quotactl(mp, cmd, uid, arg, p) + struct mount *mp; + int cmd; + uid_t uid; + caddr_t arg; + struct proc *p; +{ + + return (EOPNOTSUPP); +} + +kernfs_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_statfs(mp = %x)\n", mp); +#endif + +#ifdef COMPAT_09 + sbp->f_type = 7; +#else + sbp->f_type = 0; +#endif + sbp->f_bsize = DEV_BSIZE; + sbp->f_iosize = DEV_BSIZE; + sbp->f_blocks = 2; /* 1K to keep df happy */ + sbp->f_bfree = 0; + sbp->f_bavail = 0; + sbp->f_files = 0; + sbp->f_ffree = 0; + if (sbp != &mp->mnt_stat) { + bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN); + sbp->f_fstypename[MFSNAMELEN] = '\0'; + return (0); +} + +kernfs_sync(mp, waitfor) + struct mount *mp; + int waitfor; +{ + + return (0); +} + +/* + * Kernfs flat namespace lookup. + * Currently unsupported. + */ +kernfs_vget(mp, ino, vpp) + struct mount *mp; + ino_t ino; + struct vnode **vpp; +{ + + return (EOPNOTSUPP); +} + + +kernfs_fhtovp(mp, fhp, setgen, vpp) + struct mount *mp; + struct fid *fhp; + int setgen; + struct vnode **vpp; +{ + + return (EOPNOTSUPP); +} + +kernfs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + + return (EOPNOTSUPP); +} + +struct vfsops kernfs_vfsops = { + MOUNT_KERNFS, + kernfs_mount, + kernfs_start, + kernfs_unmount, + kernfs_root, + kernfs_quotactl, + kernfs_statfs, + kernfs_sync, + kernfs_vget, + kernfs_fhtovp, + kernfs_vptofh, + kernfs_init, +}; diff --git a/bsd/miscfs/kernfs/kernfs_vnops.c b/bsd/miscfs/kernfs/kernfs_vnops.c new file mode 100644 index 000000000..d416d9879 --- /dev/null +++ b/bsd/miscfs/kernfs/kernfs_vnops.c @@ -0,0 +1,785 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: kernfs_vnops.c,v 1.35 1995/02/03 16:18:46 mycroft Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kernfs_vnops.c 8.9 (Berkeley) 6/15/94 + */ + +/* + * Kernel parameter filesystem (/kern) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KSTRING 256 /* Largest I/O available via this filesystem */ +#define UIO_MX 32 + +#define READ_MODE (S_IRUSR|S_IRGRP|S_IROTH) +#define WRITE_MODE (S_IWUSR|S_IRUSR|S_IRGRP|S_IROTH) +#define DIR_MODE (S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH) + +struct kern_target { + u_char kt_type; + u_char kt_namlen; + char *kt_name; + void *kt_data; +#define KTT_NULL 1 +#define KTT_TIME 5 +#define KTT_INT 17 +#define KTT_STRING 31 +#define KTT_HOSTNAME 47 +#define KTT_AVENRUN 53 +#define KTT_DEVICE 71 +#define KTT_MSGBUF 89 + u_char kt_tag; + u_char kt_vtype; + mode_t kt_mode; +} kern_targets[] = { +/* NOTE: The name must be less than UIO_MX-16 chars in length */ +#define N(s) sizeof(s)-1, s + /* name data tag type ro/rw */ + { DT_DIR, N("."), 0, KTT_NULL, VDIR, DIR_MODE }, + { DT_DIR, N(".."), 0, KTT_NULL, VDIR, DIR_MODE }, + { DT_REG, N("boottime"), &boottime.tv_sec, KTT_INT, VREG, READ_MODE }, + { DT_REG, N("copyright"), copyright, KTT_STRING, VREG, READ_MODE }, + { DT_REG, N("hostname"), 0, KTT_HOSTNAME, VREG, WRITE_MODE }, + { DT_REG, N("hz"), &hz, KTT_INT, VREG, READ_MODE }, + { DT_REG, N("loadavg"), 0, KTT_AVENRUN, VREG, READ_MODE }, + { DT_REG, N("msgbuf"), 0, KTT_MSGBUF, VREG, READ_MODE }, + { DT_REG, N("pagesize"), &cnt.v_page_size, KTT_INT, VREG, READ_MODE }, + { DT_REG, N("physmem"), &physmem, KTT_INT, VREG, READ_MODE }, +#if 0 + { DT_DIR, N("root"), 0, KTT_NULL, VDIR, DIR_MODE }, +#endif + { DT_BLK, N("rootdev"), &rootdev, KTT_DEVICE, VBLK, READ_MODE }, + { DT_CHR, N("rrootdev"), &rrootdev, KTT_DEVICE, VCHR, READ_MODE }, + { DT_REG, N("time"), 0, KTT_TIME, VREG, READ_MODE }, + { DT_REG, N("version"), version, KTT_STRING, VREG, READ_MODE }, +#undef N +}; +static int nkern_targets = sizeof(kern_targets) / sizeof(kern_targets[0]); + +int +kernfs_xread(kt, off, bufp, len) + struct kern_target *kt; + int off; + char **bufp; + int len; +{ + + switch (kt->kt_tag) { + case KTT_TIME: { + struct timeval tv; + + microtime(&tv); + sprintf(*bufp, "%d %d\n", tv.tv_sec, tv.tv_usec); + break; + } + + case KTT_INT: { + int *ip = kt->kt_data; + + sprintf(*bufp, "%d\n", *ip); + break; + } + + case KTT_STRING: { + char *cp = kt->kt_data; + + *bufp = cp; + break; + } + + case KTT_MSGBUF: { + extern struct msgbuf *msgbufp; + long n; + + if (off >= MSG_BSIZE) + return (0); + n = msgbufp->msg_bufx + off; + if (n >= MSG_BSIZE) + n -= MSG_BSIZE; + len = min(MSG_BSIZE - n, MSG_BSIZE - off); + *bufp = msgbufp->msg_bufc + n; + return (len); + } + + case KTT_HOSTNAME: { + char *cp = hostname; + int xlen = hostnamelen; + + if (xlen >= (len-2)) + return (EINVAL); + + bcopy(cp, *bufp, xlen); + (*bufp)[xlen] = '\n'; + (*bufp)[xlen+1] = '\0'; + break; + } + + case KTT_AVENRUN: + averunnable.fscale = FSCALE; + sprintf(*bufp, "%ld %ld %ld %ld\n", + averunnable.ldavg[0], averunnable.ldavg[1], + averunnable.ldavg[2], averunnable.fscale); + break; + + default: + return (0); + } + + len = strlen(*bufp); + if (len <= off) + return (0); + *bufp += off; + return (len - off); +} + +int +kernfs_xwrite(kt, buf, len) + struct kern_target *kt; + char *buf; + int len; +{ + + switch (kt->kt_tag) { + case KTT_HOSTNAME: + if (buf[len-1] == '\n') + --len; + bcopy(buf, hostname, len); + hostname[len] = '\0'; + hostnamelen = len; + return (0); + + default: + return (EIO); + } +} + + +/* + * vp is the current namei directory + * ndp is the name to locate in that directory... + */ +kernfs_lookup(ap) + struct vop_lookup_args /* { + struct vnode * a_dvp; + struct vnode ** a_vpp; + struct componentname * a_cnp; + } */ *ap; +{ + struct componentname *cnp = ap->a_cnp; + struct vnode **vpp = ap->a_vpp; + struct vnode *dvp = ap->a_dvp; + char *pname = cnp->cn_nameptr; + struct kern_target *kt; + struct vnode *fvp; + int error, i; + struct kernfs_node *kp; + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_lookup(%x)\n", ap); + printf("kernfs_lookup(dp = %x, vpp = %x, cnp = %x)\n", dvp, vpp, ap->a_cnp); + printf("kernfs_lookup(%s)\n", pname); +#endif + + *vpp = NULLVP; + + if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) + return (EROFS); + + if (cnp->cn_namelen == 1 && *pname == '.') { + *vpp = dvp; + VREF(dvp); + /*VOP_LOCK(dvp);*/ + return (0); + } + +#if 0 + if (cnp->cn_namelen == 4 && bcmp(pname, "root", 4) == 0) { + *vpp = rootdir; + VREF(rootdir); + VOP_LOCK(rootdir); + return (0); + } +#endif + + for (kt = kern_targets, i = 0; i < nkern_targets; kt++, i++) { + if (cnp->cn_namelen == kt->kt_namlen && + bcmp(kt->kt_name, pname, cnp->cn_namelen) == 0) + goto found; + } + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_lookup: i = %d, failed", i); +#endif + + return (cnp->cn_nameiop == LOOKUP ? ENOENT : EROFS); + +found: + if (kt->kt_tag == KTT_DEVICE) { + dev_t *dp = kt->kt_data; + loop: + if (*dp == NODEV || !vfinddev(*dp, kt->kt_vtype, &fvp)) + return (ENOENT); + *vpp = fvp; + if (vget(fvp, LK_SHARED, current_proc())) + goto loop; + return (0); + } + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_lookup: allocate new vnode\n"); +#endif + MALLOC(kp, void *, sizeof(struct kernfs_node), M_TEMP, M_WAITOK); + if (error = getnewvnode(VT_KERNFS, dvp->v_mount, kernfs_vnodeop_p, + &fvp)) { + FREE(kp, M_TEMP); + return (error); + } + + fvp->v_data = kp; + VTOKERN(fvp)->kf_kt = kt; + fvp->v_type = kt->kt_vtype; + if (fvp->v_type == VREG) + ubc_info_init(fvp); + *vpp = fvp; + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_lookup: newvp = %x\n", fvp); +#endif + return (0); +} + +kernfs_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + /* Only need to check access permissions. */ + return (0); +} + +int +kernfs_access(ap) + struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + mode_t fmode = + (vp->v_flag & VROOT) ? DIR_MODE : VTOKERN(vp)->kf_kt->kt_mode; + + return (vaccess(fmode, (uid_t)0, (gid_t)0, ap->a_mode, ap->a_cred)); +} + +kernfs_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vattr *vap = ap->a_vap; + int error = 0; + char strbuf[KSTRING], *buf; + + bzero((caddr_t) vap, sizeof(*vap)); + vattr_null(vap); + vap->va_uid = 0; + vap->va_gid = 0; + vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; + vap->va_size = 0; + vap->va_blocksize = DEV_BSIZE; + microtime(&vap->va_atime); + vap->va_mtime = vap->va_atime; + vap->va_ctime = vap->va_ctime; + vap->va_gen = 0; + vap->va_flags = 0; + vap->va_rdev = 0; + vap->va_bytes = 0; + + if (vp->v_flag & VROOT) { +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_getattr: stat rootdir\n"); +#endif + vap->va_type = VDIR; + vap->va_mode = DIR_MODE; + vap->va_nlink = 2; + vap->va_fileid = 2; + vap->va_size = DEV_BSIZE; + } else { + struct kern_target *kt = VTOKERN(vp)->kf_kt; + int nbytes, total; +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_getattr: stat target %s\n", kt->kt_name); +#endif + vap->va_type = kt->kt_vtype; + vap->va_mode = kt->kt_mode; + vap->va_nlink = 1; + vap->va_fileid = 1 + (kt - kern_targets) / sizeof(*kt); + total = 0; + while (buf = strbuf, + nbytes = kernfs_xread(kt, total, &buf, sizeof(strbuf))) + total += nbytes; + vap->va_size = total; + } + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_getattr: return error %d\n", error); +#endif + return (error); +} + +kernfs_setattr(ap) + struct vop_setattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + /* + * Silently ignore attribute changes. + * This allows for open with truncate to have no + * effect until some data is written. I want to + * do it this way because all writes are atomic. + */ + return (0); +} + +int +kernfs_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct uio *uio = ap->a_uio; + struct kern_target *kt; + char strbuf[KSTRING], *buf; + int off, len; + int error; + + if (vp->v_type == VDIR) + return (EOPNOTSUPP); + + kt = VTOKERN(vp)->kf_kt; + +#ifdef KERNFS_DIAGNOSTIC + printf("kern_read %s\n", kt->kt_name); +#endif + + off = uio->uio_offset; +#if 0 + while (buf = strbuf, +#else + if (buf = strbuf, +#endif + len = kernfs_xread(kt, off, &buf, sizeof(strbuf))) { + if (error = uiomove(buf, len, uio)) + return (error); + off += len; + } + return (0); +} + +int +kernfs_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct uio *uio = ap->a_uio; + struct kern_target *kt; + int error, xlen; + char strbuf[KSTRING]; + + if (vp->v_type == VDIR) + return (EOPNOTSUPP); + + kt = VTOKERN(vp)->kf_kt; + + if (uio->uio_offset != 0) + return (EINVAL); + + xlen = min(uio->uio_resid, KSTRING-1); + if (error = uiomove(strbuf, xlen, uio)) + return (error); + + if (uio->uio_resid != 0) + return (EIO); + + strbuf[xlen] = '\0'; + xlen = strlen(strbuf); + return (kernfs_xwrite(kt, strbuf, xlen)); +} + +kernfs_readdir(ap) + struct vop_readdir_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + u_long *a_cookies; + int a_ncookies; + } */ *ap; +{ + struct uio *uio = ap->a_uio; + struct kern_target *kt; + struct dirent d; + int i; + int error; + + if (ap->a_vp->v_type != VDIR) + return (ENOTDIR); + + /* + * We don't allow exporting kernfs mounts, and currently local + * requests do not need cookies. + */ + if (ap->a_ncookies != NULL) + panic("kernfs_readdir: not hungry"); + + i = uio->uio_offset / UIO_MX; + error = 0; + for (kt = &kern_targets[i]; + uio->uio_resid >= UIO_MX && i < nkern_targets; kt++, i++) { + struct dirent *dp = &d; +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_readdir: i = %d\n", i); +#endif + + if (kt->kt_tag == KTT_DEVICE) { + dev_t *dp = kt->kt_data; + struct vnode *fvp; + + if (*dp == NODEV || !vfinddev(*dp, kt->kt_vtype, &fvp)) + continue; + } + + bzero((caddr_t)dp, UIO_MX); + dp->d_namlen = kt->kt_namlen; + bcopy(kt->kt_name, dp->d_name, kt->kt_namlen+1); + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_readdir: name = %s, len = %d\n", + dp->d_name, dp->d_namlen); +#endif + /* + * Fill in the remaining fields + */ + dp->d_reclen = UIO_MX; + dp->d_fileno = i + 3; + dp->d_type = kt->kt_type; + /* + * And ship to userland + */ + if (error = uiomove((caddr_t)dp, UIO_MX, uio)) + break; + } + + uio->uio_offset = i * UIO_MX; + + return (error); +} + +kernfs_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_inactive(%x)\n", vp); +#endif + /* + * Clear out the v_type field to avoid + * nasty things happening in vgone(). + */ + vp->v_type = VNON; + return (0); +} + +kernfs_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + +#ifdef KERNFS_DIAGNOSTIC + printf("kernfs_reclaim(%x)\n", vp); +#endif + if (vp->v_data) { + FREE(vp->v_data, M_TEMP); + vp->v_data = 0; + } + return (0); +} + +/* + * Return POSIX pathconf information applicable to special devices. + */ +kernfs_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + register_t *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_MAX_CANON: + *ap->a_retval = MAX_CANON; + return (0); + case _PC_MAX_INPUT: + *ap->a_retval = MAX_INPUT; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_VDISABLE: + *ap->a_retval = _POSIX_VDISABLE; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Print out the contents of a /dev/fd vnode. + */ +/* ARGSUSED */ +kernfs_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + printf("tag VT_KERNFS, kernfs vnode\n"); + return (0); +} + +/*void*/ +kernfs_vfree(ap) + struct vop_vfree_args /* { + struct vnode *a_pvp; + ino_t a_ino; + int a_mode; + } */ *ap; +{ + + return (0); +} + +/* + * /dev/fd vnode unsupported operation + */ +kernfs_enotsupp() +{ + + return (EOPNOTSUPP); +} + +/* + * /dev/fd "should never get here" operation + */ +kernfs_badop() +{ + + panic("kernfs: bad op"); + /* NOTREACHED */ +} + +/* + * kernfs vnode null operation + */ +kernfs_nullop() +{ + + return (0); +} + +#define kernfs_create ((int (*) __P((struct vop_create_args *)))kernfs_enotsupp) +#define kernfs_mknod ((int (*) __P((struct vop_mknod_args *)))kernfs_enotsupp) +#define kernfs_close ((int (*) __P((struct vop_close_args *)))nullop) +#define kernfs_ioctl ((int (*) __P((struct vop_ioctl_args *)))kernfs_enotsupp) +#define kernfs_select ((int (*) __P((struct vop_select_args *)))kernfs_enotsupp) +#define kernfs_mmap ((int (*) __P((struct vop_mmap_args *)))kernfs_enotsupp) +#define kernfs_fsync ((int (*) __P((struct vop_fsync_args *)))nullop) +#define kernfs_seek ((int (*) __P((struct vop_seek_args *)))nullop) +#define kernfs_remove ((int (*) __P((struct vop_remove_args *)))kernfs_enotsupp) +#define kernfs_link ((int (*) __P((struct vop_link_args *)))kernfs_enotsupp) +#define kernfs_rename ((int (*) __P((struct vop_rename_args *)))kernfs_enotsupp) +#define kernfs_mkdir ((int (*) __P((struct vop_mkdir_args *)))kernfs_enotsupp) +#define kernfs_rmdir ((int (*) __P((struct vop_rmdir_args *)))kernfs_enotsupp) +#define kernfs_symlink ((int (*) __P((struct vop_symlink_args *)))kernfs_enotsupp) +#define kernfs_readlink \ + ((int (*) __P((struct vop_readlink_args *)))kernfs_enotsupp) +#define kernfs_abortop ((int (*) __P((struct vop_abortop_args *)))nullop) +#define kernfs_lock ((int (*) __P((struct vop_lock_args *)))nullop) +#define kernfs_unlock ((int (*) __P((struct vop_unlock_args *)))nullop) +#define kernfs_bmap ((int (*) __P((struct vop_bmap_args *)))kernfs_badop) +#define kernfs_strategy ((int (*) __P((struct vop_strategy_args *)))kernfs_badop) +#define kernfs_islocked ((int (*) __P((struct vop_islocked_args *)))nullop) +#define kernfs_advlock ((int (*) __P((struct vop_advlock_args *)))kernfs_enotsupp) +#define kernfs_blkatoff \ + ((int (*) __P((struct vop_blkatoff_args *)))kernfs_enotsupp) +#define kernfs_valloc ((int(*) __P(( \ + struct vnode *pvp, \ + int mode, \ + struct ucred *cred, \ + struct vnode **vpp))) kernfs_enotsupp) +#define kernfs_truncate \ + ((int (*) __P((struct vop_truncate_args *)))kernfs_enotsupp) +#define kernfs_update ((int (*) __P((struct vop_update_args *)))kernfs_enotsupp) +#define kernfs_bwrite ((int (*) __P((struct vop_bwrite_args *)))kernfs_enotsupp) + +#define VOPFUNC int (*)(void *) + +int (**kernfs_vnodeop_p)(void *); +struct vnodeopv_entry_desc kernfs_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)kernfs_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)kernfs_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)kernfs_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)kernfs_open }, /* open */ + { &vop_close_desc, (VOPFUNC)kernfs_close }, /* close */ + { &vop_access_desc, (VOPFUNC)kernfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)kernfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)kernfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)kernfs_read }, /* read */ + { &vop_write_desc, (VOPFUNC)kernfs_write }, /* write */ + { &vop_ioctl_desc, (VOPFUNC)kernfs_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)kernfs_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)kernfs_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)kernfs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)kernfs_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)kernfs_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)kernfs_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)kernfs_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)kernfs_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)kernfs_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)kernfs_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)kernfs_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)kernfs_readlink },/* readlink */ + { &vop_abortop_desc, (VOPFUNC)kernfs_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)kernfs_inactive },/* inactive */ + { &vop_reclaim_desc, (VOPFUNC)kernfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)kernfs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)kernfs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)kernfs_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)kernfs_strategy },/* strategy */ + { &vop_print_desc, (VOPFUNC)kernfs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)kernfs_islocked },/* islocked */ + { &vop_pathconf_desc, (VOPFUNC)kernfs_pathconf },/* pathconf */ + { &vop_advlock_desc, (VOPFUNC)kernfs_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)kernfs_blkatoff },/* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)kernfs_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)kernfs_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)kernfs_truncate },/* truncate */ + { &vop_update_desc, (VOPFUNC)kernfs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)kernfs_bwrite }, /* bwrite */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc kernfs_vnodeop_opv_desc = + { &kernfs_vnodeop_p, kernfs_vnodeop_entries }; diff --git a/bsd/miscfs/nullfs/null.h b/bsd/miscfs/nullfs/null.h new file mode 100644 index 000000000..d709c61b7 --- /dev/null +++ b/bsd/miscfs/nullfs/null.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null.h 8.3 (Berkeley) 8/20/94 + * + * null.h 8.2 (Berkeley) 1/21/94 + */ + +struct null_args { + char *target; /* Target of loopback */ +}; + +struct null_mount { + struct mount *nullm_vfs; + struct vnode *nullm_rootvp; /* Reference to root null_node */ +}; + +#ifdef KERNEL +/* + * A cache of vnode references + */ +struct null_node { + LIST_ENTRY(null_node) null_hash; /* Hash list */ + struct vnode *null_lowervp; /* VREFed once */ + struct vnode *null_vnode; /* Back pointer */ +}; + +extern int null_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp)); + +#define MOUNTTONULLMOUNT(mp) ((struct null_mount *)((mp)->mnt_data)) +#define VTONULL(vp) ((struct null_node *)(vp)->v_data) +#define NULLTOV(xp) ((xp)->null_vnode) +#ifdef NULLFS_DIAGNOSTIC +extern struct vnode *null_checkvp __P((struct vnode *vp, char *fil, int lno)); +#define NULLVPTOLOWERVP(vp) null_checkvp((vp), __FILE__, __LINE__) +#else +#define NULLVPTOLOWERVP(vp) (VTONULL(vp)->null_lowervp) +#endif + +extern int (**null_vnodeop_p)(void *); +extern struct vfsops null_vfsops; +#endif /* KERNEL */ diff --git a/bsd/miscfs/nullfs/null_subr.c b/bsd/miscfs/nullfs/null_subr.c new file mode 100644 index 000000000..133cdb932 --- /dev/null +++ b/bsd/miscfs/nullfs/null_subr.c @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null_subr.c 8.7 (Berkeley) 5/14/95 + * + * null_subr.c 8.4 (Berkeley) 1/21/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */ +#define NNULLNODECACHE 16 + +/* + * Null layer cache: + * Each cache entry holds a reference to the lower vnode + * along with a pointer to the alias vnode. When an + * entry is added the lower vnode is VREF'd. When the + * alias is removed the lower vnode is vrele'd. + */ + +#define NULL_NHASH(vp) \ + (&null_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & null_node_hash]) +LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl; +u_long null_node_hash; + +/* + * Initialise cache headers + */ +nullfs_init() +{ + +#ifdef NULLFS_DIAGNOSTIC + printf("nullfs_init\n"); /* printed during system boot */ +#endif + null_node_hashtbl = hashinit(NNULLNODECACHE, M_CACHE, &null_node_hash); +} + +/* + * Return a VREF'ed alias for lower vnode if already exists, else 0. + */ +static struct vnode * +null_node_find(mp, lowervp) + struct mount *mp; + struct vnode *lowervp; +{ + struct proc *p = curproc; /* XXX */ + struct null_node_hashhead *hd; + struct null_node *a; + struct vnode *vp; + + /* + * Find hash base, and then search the (two-way) linked + * list looking for a null_node structure which is referencing + * the lower vnode. If found, the increment the null_node + * reference count (but NOT the lower vnode's VREF counter). + */ + hd = NULL_NHASH(lowervp); +loop: + for (a = hd->lh_first; a != 0; a = a->null_hash.le_next) { + if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) { + vp = NULLTOV(a); + /* + * We need vget for the VXLOCK + * stuff, but we don't want to lock + * the lower node. + */ + if (vget(vp, 0, p)) { + printf ("null_node_find: vget failed.\n"); + goto loop; + }; + return (vp); + } + } + + return NULL; +} + + +/* + * Make a new null_node node. + * Vp is the alias vnode, lofsvp is the lower vnode. + * Maintain a reference to (lowervp). + */ +static int +null_node_alloc(mp, lowervp, vpp) + struct mount *mp; + struct vnode *lowervp; + struct vnode **vpp; +{ + struct null_node_hashhead *hd; + struct null_node *xp; + struct vnode *othervp, *vp; + int error; + + MALLOC(xp, struct null_node *, sizeof(struct null_node), M_TEMP, M_WAITOK); + if (error = getnewvnode(VT_NULL, mp, null_vnodeop_p, vpp)) { + FREE(xp, M_TEMP); + return (error); + } + vp = *vpp; + + vp->v_type = lowervp->v_type; + xp->null_vnode = vp; + vp->v_data = xp; + xp->null_lowervp = lowervp; + /* + * Before we insert our new node onto the hash chains, + * check to see if someone else has beaten us to it. + */ + if (othervp = null_node_find(lowervp)) { + FREE(xp, M_TEMP); + vp->v_type = VBAD; /* node is discarded */ + vp->v_usecount = 0; /* XXX */ + vp->v_data = 0; /* prevent access to freed data */ + *vpp = othervp; + return 0; + }; + if (vp->v_type == VREG) + ubc_info_init(vp); + VREF(lowervp); /* Extra VREF will be vrele'd in null_node_create */ + hd = NULL_NHASH(lowervp); + LIST_INSERT_HEAD(hd, xp, null_hash); + return 0; +} + + +/* + * Try to find an existing null_node vnode refering + * to it, otherwise make a new null_node vnode which + * contains a reference to the lower vnode. + */ +int +null_node_create(mp, lowervp, newvpp) + struct mount *mp; + struct vnode *lowervp; + struct vnode **newvpp; +{ + struct vnode *aliasvp; + + if (aliasvp = null_node_find(mp, lowervp)) { + /* + * null_node_find has taken another reference + * to the alias vnode. + */ +#ifdef NULLFS_DIAGNOSTIC + vprint("null_node_create: exists", NULLTOV(ap)); +#endif + /* VREF(aliasvp); --- done in null_node_find */ + } else { + int error; + + /* + * Get new vnode. + */ +#ifdef NULLFS_DIAGNOSTIC + printf("null_node_create: create new alias vnode\n"); +#endif + + /* + * Make new vnode reference the null_node. + */ + if (error = null_node_alloc(mp, lowervp, &aliasvp)) + return error; + + /* + * aliasvp is already VREF'd by getnewvnode() + */ + } + + vrele(lowervp); + +#if DIAGNOSTIC + if (lowervp->v_usecount < 1) { + /* Should never happen... */ + vprint ("null_node_create: alias ", aliasvp); + vprint ("null_node_create: lower ", lowervp); + panic ("null_node_create: lower has 0 usecount."); + }; +#endif + +#ifdef NULLFS_DIAGNOSTIC + vprint("null_node_create: alias", aliasvp); + vprint("null_node_create: lower", lowervp); +#endif + + *newvpp = aliasvp; + return (0); +} +#ifdef NULLFS_DIAGNOSTIC +struct vnode * +null_checkvp(vp, fil, lno) + struct vnode *vp; + char *fil; + int lno; +{ + struct null_node *a = VTONULL(vp); +#ifdef notyet + /* + * Can't do this check because vop_reclaim runs + * with a funny vop vector. + */ + if (vp->v_op != null_vnodeop_p) { + printf ("null_checkvp: on non-null-node\n"); + while (null_checkvp_barrier) /*WAIT*/ ; + panic("null_checkvp"); + }; +#endif + if (a->null_lowervp == NULL) { + /* Should never happen */ + int i; u_long *p; + printf("vp = %x, ZERO ptr\n", vp); + for (p = (u_long *) a, i = 0; i < 8; i++) + printf(" %x", p[i]); + printf("\n"); + /* wait for debugger */ + while (null_checkvp_barrier) /*WAIT*/ ; + panic("null_checkvp"); + } + if (a->null_lowervp->v_usecount < 1) { + int i; u_long *p; + printf("vp = %x, unref'ed lowervp\n", vp); + for (p = (u_long *) a, i = 0; i < 8; i++) + printf(" %x", p[i]); + printf("\n"); + /* wait for debugger */ + while (null_checkvp_barrier) /*WAIT*/ ; + panic ("null with unref'ed lowervp"); + }; +#ifdef notyet + printf("null %x/%d -> %x/%d [%s, %d]\n", + NULLTOV(a), NULLTOV(a)->v_usecount, + a->null_lowervp, a->null_lowervp->v_usecount, + fil, lno); +#endif + return a->null_lowervp; +} +#endif diff --git a/bsd/miscfs/nullfs/null_vfsops.c b/bsd/miscfs/nullfs/null_vfsops.c new file mode 100644 index 000000000..ffcaa3d81 --- /dev/null +++ b/bsd/miscfs/nullfs/null_vfsops.c @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null_vfsops.c 8.7 (Berkeley) 5/14/95 + * + * @(#)lofs_vfsops.c 1.2 (Berkeley) 6/18/92 + */ + +/* + * Null Layer + * (See null_vnops.c for a description of what this does.) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Mount null layer + */ +int +nullfs_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + int error = 0; + struct null_args args; + struct vnode *lowerrootvp, *vp; + struct vnode *nullm_rootvp; + struct null_mount *xmp; + u_int size; + +#ifdef NULLFS_DIAGNOSTIC + printf("nullfs_mount(mp = %x)\n", mp); +#endif + + /* + * Update is a no-op + */ + if (mp->mnt_flag & MNT_UPDATE) { + return (EOPNOTSUPP); + /* return VFS_MOUNT(MOUNTTONULLMOUNT(mp)->nullm_vfs, path, data, ndp, p);*/ + } + + /* + * Get argument + */ + if (error = copyin(data, (caddr_t)&args, sizeof(struct null_args))) + return (error); + + /* + * Find lower node + */ + NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT|LOCKLEAF, + UIO_USERSPACE, args.target, p); + if (error = namei(ndp)) + return (error); + + /* + * Sanity check on lower vnode + */ + lowerrootvp = ndp->ni_vp; + + vrele(ndp->ni_dvp); + ndp->ni_dvp = NULL; + + xmp = (struct null_mount *) _MALLOC(sizeof(struct null_mount), + M_UFSMNT, M_WAITOK); /* XXX */ + + /* + * Save reference to underlying FS + */ + xmp->nullm_vfs = lowerrootvp->v_mount; + + /* + * Save reference. Each mount also holds + * a reference on the root vnode. + */ + error = null_node_create(mp, lowerrootvp, &vp); + /* + * Unlock the node (either the lower or the alias) + */ + VOP_UNLOCK(vp, 0, p); + /* + * Make sure the node alias worked + */ + if (error) { + vrele(lowerrootvp); + FREE(xmp, M_UFSMNT); /* XXX */ + return (error); + } + + /* + * Keep a held reference to the root vnode. + * It is vrele'd in nullfs_unmount. + */ + nullm_rootvp = vp; + nullm_rootvp->v_flag |= VROOT; + xmp->nullm_rootvp = nullm_rootvp; + if (NULLVPTOLOWERVP(nullm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) + mp->mnt_flag |= MNT_LOCAL; + mp->mnt_data = (qaddr_t) xmp; + vfs_getnewfsid(mp); + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + (void) copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); +#ifdef NULLFS_DIAGNOSTIC + printf("nullfs_mount: lower %s, alias at %s\n", + mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); +#endif + return (0); +} + +/* + * VFS start. Nothing needed here - the start routine + * on the underlying filesystem will have been called + * when that filesystem was mounted. + */ +int +nullfs_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + return (0); + /* return VFS_START(MOUNTTONULLMOUNT(mp)->nullm_vfs, flags, p); */ +} + +/* + * Free reference to null layer + */ +int +nullfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + struct vnode *nullm_rootvp = MOUNTTONULLMOUNT(mp)->nullm_rootvp; + int error; + int flags = 0; + +#ifdef NULLFS_DIAGNOSTIC + printf("nullfs_unmount(mp = %x)\n", mp); +#endif + + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + + /* + * Clear out buffer cache. I don't think we + * ever get anything cached at this level at the + * moment, but who knows... + */ +#if 0 + mntflushbuf(mp, 0); + if (mntinvalbuf(mp, 1)) + return (EBUSY); +#endif + if (nullm_rootvp->v_usecount > 1) + return (EBUSY); + if (error = vflush(mp, nullm_rootvp, flags)) + return (error); + +#ifdef NULLFS_DIAGNOSTIC + vprint("alias root of lower", nullm_rootvp); +#endif + /* + * Release reference on underlying root vnode + */ + vrele(nullm_rootvp); + /* + * And blow it away for future re-use + */ + vgone(nullm_rootvp); + /* + * Finally, throw away the null_mount structure + */ + FREE(mp->mnt_data, M_UFSMNT); /* XXX */ + mp->mnt_data = 0; + return 0; +} + +int +nullfs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct proc *p = curproc; /* XXX */ + struct vnode *vp; + +#ifdef NULLFS_DIAGNOSTIC + printf("nullfs_root(mp = %x, vp = %x->%x)\n", mp, + MOUNTTONULLMOUNT(mp)->nullm_rootvp, + NULLVPTOLOWERVP(MOUNTTONULLMOUNT(mp)->nullm_rootvp) + ); +#endif + + /* + * Return locked reference to root. + */ + vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp; + VREF(vp); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + *vpp = vp; + return 0; +} + +int +nullfs_quotactl(mp, cmd, uid, arg, p) + struct mount *mp; + int cmd; + uid_t uid; + caddr_t arg; + struct proc *p; +{ + return VFS_QUOTACTL(MOUNTTONULLMOUNT(mp)->nullm_vfs, cmd, uid, arg, p); +} + +int +nullfs_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + int error; + struct statfs mstat; + +#ifdef NULLFS_DIAGNOSTIC + printf("nullfs_statfs(mp = %x, vp = %x->%x)\n", mp, + MOUNTTONULLMOUNT(mp)->nullm_rootvp, + NULLVPTOLOWERVP(MOUNTTONULLMOUNT(mp)->nullm_rootvp) + ); +#endif + + bzero(&mstat, sizeof(mstat)); + + error = VFS_STATFS(MOUNTTONULLMOUNT(mp)->nullm_vfs, &mstat, p); + if (error) + return (error); + + /* now copy across the "interesting" information and fake the rest */ + sbp->f_type = mstat.f_type; + sbp->f_flags = mstat.f_flags; + sbp->f_bsize = mstat.f_bsize; + sbp->f_iosize = mstat.f_iosize; + sbp->f_blocks = mstat.f_blocks; + sbp->f_bfree = mstat.f_bfree; + sbp->f_bavail = mstat.f_bavail; + sbp->f_files = mstat.f_files; + sbp->f_ffree = mstat.f_ffree; + if (sbp != &mp->mnt_stat) { + bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + return (0); +} + +int +nullfs_sync(mp, waitfor, cred, p) + struct mount *mp; + int waitfor; + struct ucred *cred; + struct proc *p; +{ + /* + * XXX - Assumes no data cached at null layer. + */ + return (0); +} + +int +nullfs_vget(mp, ino, vpp) + struct mount *mp; + ino_t ino; + struct vnode **vpp; +{ + + return VFS_VGET(MOUNTTONULLMOUNT(mp)->nullm_vfs, ino, vpp); +} + +int +nullfs_fhtovp(mp, fidp, nam, vpp, exflagsp, credanonp) + struct mount *mp; + struct fid *fidp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred**credanonp; +{ + + return VFS_FHTOVP(MOUNTTONULLMOUNT(mp)->nullm_vfs, fidp, nam, vpp, exflagsp,credanonp); +} + +int +nullfs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + return VFS_VPTOFH(NULLVPTOLOWERVP(vp), fhp); +} + +int nullfs_init __P((struct vfsconf *)); + +#define nullfs_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \ + size_t, struct proc *)))eopnotsupp) + +struct vfsops null_vfsops = { + nullfs_mount, + nullfs_start, + nullfs_unmount, + nullfs_root, + nullfs_quotactl, + nullfs_statfs, + nullfs_sync, + nullfs_vget, + nullfs_fhtovp, + nullfs_vptofh, + nullfs_init, + nullfs_sysctl, +}; diff --git a/bsd/miscfs/nullfs/null_vnops.c b/bsd/miscfs/nullfs/null_vnops.c new file mode 100644 index 000000000..fecb1278a --- /dev/null +++ b/bsd/miscfs/nullfs/null_vnops.c @@ -0,0 +1,669 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * John Heidemann of the UCLA Ficus project. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)null_vnops.c 8.6 (Berkeley) 5/27/95 + * + * Ancestors: + * @(#)lofs_vnops.c 1.2 (Berkeley) 6/18/92 + * ...and... + * @(#)null_vnodeops.c 1.20 92/07/07 UCLA Ficus project + */ + +/* + * Null Layer + * + * (See mount_null(8) for more information.) + * + * The null layer duplicates a portion of the file system + * name space under a new name. In this respect, it is + * similar to the loopback file system. It differs from + * the loopback fs in two respects: it is implemented using + * a stackable layers techniques, and it's "null-node"s stack above + * all lower-layer vnodes, not just over directory vnodes. + * + * The null layer has two purposes. First, it serves as a demonstration + * of layering by proving a layer which does nothing. (It actually + * does everything the loopback file system does, which is slightly + * more than nothing.) Second, the null layer can serve as a prototype + * layer. Since it provides all necessary layer framework, + * new file system layers can be created very easily be starting + * with a null layer. + * + * The remainder of this man page examines the null layer as a basis + * for constructing new layers. + * + * + * INSTANTIATING NEW NULL LAYERS + * + * New null layers are created with mount_null(8). + * Mount_null(8) takes two arguments, the pathname + * of the lower vfs (target-pn) and the pathname where the null + * layer will appear in the namespace (alias-pn). After + * the null layer is put into place, the contents + * of target-pn subtree will be aliased under alias-pn. + * + * + * OPERATION OF A NULL LAYER + * + * The null layer is the minimum file system layer, + * simply bypassing all possible operations to the lower layer + * for processing there. The majority of its activity centers + * on the bypass routine, though which nearly all vnode operations + * pass. + * + * The bypass routine accepts arbitrary vnode operations for + * handling by the lower layer. It begins by examing vnode + * operation arguments and replacing any null-nodes by their + * lower-layer equivlants. It then invokes the operation + * on the lower layer. Finally, it replaces the null-nodes + * in the arguments and, if a vnode is return by the operation, + * stacks a null-node on top of the returned vnode. + * + * Although bypass handles most operations, vop_getattr, vop_lock, + * vop_unlock, vop_inactive, vop_reclaim, and vop_print are not + * bypassed. Vop_getattr must change the fsid being returned. + * Vop_lock and vop_unlock must handle any locking for the + * current vnode as well as pass the lock request down. + * Vop_inactive and vop_reclaim are not bypassed so that + * they can handle freeing null-layer specific data. Vop_print + * is not bypassed to avoid excessive debugging information. + * Also, certain vnode operations change the locking state within + * the operation (create, mknod, remove, link, rename, mkdir, rmdir, + * and symlink). Ideally these operations should not change the + * lock state, but should be changed to let the caller of the + * function unlock them. Otherwise all intermediate vnode layers + * (such as union, umapfs, etc) must catch these functions to do + * the necessary locking at their layer. + * + * + * INSTANTIATING VNODE STACKS + * + * Mounting associates the null layer with a lower layer, + * effect stacking two VFSes. Vnode stacks are instead + * created on demand as files are accessed. + * + * The initial mount creates a single vnode stack for the + * root of the new null layer. All other vnode stacks + * are created as a result of vnode operations on + * this or other null vnode stacks. + * + * New vnode stacks come into existance as a result of + * an operation which returns a vnode. + * The bypass routine stacks a null-node above the new + * vnode before returning it to the caller. + * + * For example, imagine mounting a null layer with + * "mount_null /usr/include /dev/layer/null". + * Changing directory to /dev/layer/null will assign + * the root null-node (which was created when the null layer was mounted). + * Now consider opening "sys". A vop_lookup would be + * done on the root null-node. This operation would bypass through + * to the lower layer which would return a vnode representing + * the UFS "sys". Null_bypass then builds a null-node + * aliasing the UFS "sys" and returns this to the caller. + * Later operations on the null-node "sys" will repeat this + * process when constructing other vnode stacks. + * + * + * CREATING OTHER FILE SYSTEM LAYERS + * + * One of the easiest ways to construct new file system layers is to make + * a copy of the null layer, rename all files and variables, and + * then begin modifing the copy. Sed can be used to easily rename + * all variables. + * + * The umap layer is an example of a layer descended from the + * null layer. + * + * + * INVOKING OPERATIONS ON LOWER LAYERS + * + * There are two techniques to invoke operations on a lower layer + * when the operation cannot be completely bypassed. Each method + * is appropriate in different situations. In both cases, + * it is the responsibility of the aliasing layer to make + * the operation arguments "correct" for the lower layer + * by mapping an vnode arguments to the lower layer. + * + * The first approach is to call the aliasing layer's bypass routine. + * This method is most suitable when you wish to invoke the operation + * currently being hanldled on the lower layer. It has the advantage + * that the bypass routine already must do argument mapping. + * An example of this is null_getattrs in the null layer. + * + * A second approach is to directly invoked vnode operations on + * the lower layer with the VOP_OPERATIONNAME interface. + * The advantage of this method is that it is easy to invoke + * arbitrary operations on the lower layer. The disadvantage + * is that vnodes arguments must be manualy mapped. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +int null_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ + +/* + * This is the 10-Apr-92 bypass routine. + * This version has been optimized for speed, throwing away some + * safety checks. It should still always work, but it's not as + * robust to programmer errors. + * Define SAFETY to include some error checking code. + * + * In general, we map all vnodes going down and unmap them on the way back. + * As an exception to this, vnodes can be marked "unmapped" by setting + * the Nth bit in operation's vdesc_flags. + * + * Also, some BSD vnode operations have the side effect of vrele'ing + * their arguments. With stacking, the reference counts are held + * by the upper node, not the lower one, so we must handle these + * side-effects here. This is not of concern in Sun-derived systems + * since there are no such side-effects. + * + * This makes the following assumptions: + * - only one returned vpp + * - no INOUT vpp's (Sun's vop_open has one of these) + * - the vnode operation vector of the first vnode should be used + * to determine what implementation of the op should be invoked + * - all mapped vnodes are of our vnode-type (NEEDSWORK: + * problems on rmdir'ing mount points and renaming?) + */ +int +null_bypass(ap) + struct vop_generic_args /* { + struct vnodeop_desc *a_desc; + + } */ *ap; +{ + extern int (**null_vnodeop_p)(void *); /* not extern, really "forward" */ + register struct vnode **this_vp_p; + int error; + struct vnode *old_vps[VDESC_MAX_VPS]; + struct vnode **vps_p[VDESC_MAX_VPS]; + struct vnode ***vppp; + struct vnodeop_desc *descp = ap->a_desc; + int reles, i; + + if (null_bug_bypass) + printf ("null_bypass: %s\n", descp->vdesc_name); + +#ifdef SAFETY + /* + * We require at least one vp. + */ + if (descp->vdesc_vp_offsets == NULL || + descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) + panic ("null_bypass: no vp's in map.\n"); +#endif + + /* + * Map the vnodes going in. + * Later, we'll invoke the operation based on + * the first mapped vnode's operation vector. + */ + reles = descp->vdesc_flags; + for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { + if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) + break; /* bail out at end of list */ + vps_p[i] = this_vp_p = + VOPARG_OFFSETTO(struct vnode**,descp->vdesc_vp_offsets[i],ap); + /* + * We're not guaranteed that any but the first vnode + * are of our type. Check for and don't map any + * that aren't. (We must always map first vp or vclean fails.) + */ + if (i && (*this_vp_p == NULL || + (*this_vp_p)->v_op != null_vnodeop_p)) { + old_vps[i] = NULL; + } else { + old_vps[i] = *this_vp_p; + *(vps_p[i]) = NULLVPTOLOWERVP(*this_vp_p); + /* + * XXX - Several operations have the side effect + * of vrele'ing their vp's. We must account for + * that. (This should go away in the future.) + */ + if (reles & 1) + VREF(*this_vp_p); + } + + } + + /* + * Call the operation on the lower layer + * with the modified argument structure. + */ + error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap); + + /* + * Maintain the illusion of call-by-value + * by restoring vnodes in the argument structure + * to their original value. + */ + reles = descp->vdesc_flags; + for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { + if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) + break; /* bail out at end of list */ + if (old_vps[i]) { + *(vps_p[i]) = old_vps[i]; + if (reles & 1) + vrele(*(vps_p[i])); + } + } + + /* + * Map the possible out-going vpp + * (Assumes that the lower layer always returns + * a VREF'ed vpp unless it gets an error.) + */ + if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && + !(descp->vdesc_flags & VDESC_NOMAP_VPP) && + !error) { + /* + * XXX - even though some ops have vpp returned vp's, + * several ops actually vrele this before returning. + * We must avoid these ops. + * (This should go away when these ops are regularized.) + */ + if (descp->vdesc_flags & VDESC_VPP_WILLRELE) + goto out; + vppp = VOPARG_OFFSETTO(struct vnode***, + descp->vdesc_vpp_offset,ap); + error = null_node_create(old_vps[0]->v_mount, **vppp, *vppp); + } + + out: + return (error); +} + +/* + * We have to carry on the locking protocol on the null layer vnodes + * as we progress through the tree. We also have to enforce read-only + * if this layer is mounted read-only. + */ +null_lookup(ap) + struct vop_lookup_args /* { + struct vnode * a_dvp; + struct vnode ** a_vpp; + struct componentname * a_cnp; + } */ *ap; +{ + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + int flags = cnp->cn_flags; + struct vop_lock_args lockargs; + struct vop_unlock_args unlockargs; + struct vnode *dvp, *vp; + int error; + + if ((flags & ISLASTCN) && (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) && + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) + return (EROFS); + error = null_bypass(ap); + if (error == EJUSTRETURN && (flags & ISLASTCN) && + (ap->a_dvp->v_mount->mnt_flag & MNT_RDONLY) && + (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) + error = EROFS; + /* + * We must do the same locking and unlocking at this layer as + * is done in the layers below us. We could figure this out + * based on the error return and the LASTCN, LOCKPARENT, and + * LOCKLEAF flags. However, it is more expidient to just find + * out the state of the lower level vnodes and set ours to the + * same state. + */ + dvp = ap->a_dvp; + vp = *ap->a_vpp; + if (dvp == vp) + return (error); + if (!VOP_ISLOCKED(dvp)) { + unlockargs.a_vp = dvp; + unlockargs.a_flags = 0; + unlockargs.a_p = p; + vop_nounlock(&unlockargs); + } + if (vp != NULL && VOP_ISLOCKED(vp)) { + lockargs.a_vp = vp; + lockargs.a_flags = LK_SHARED; + lockargs.a_p = p; + vop_nolock(&lockargs); + } + return (error); +} + +/* + * Setattr call. Disallow write attempts if the layer is mounted read-only. + */ +int +null_setattr(ap) + struct vop_setattr_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vattr *vap = ap->a_vap; + + if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || + vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || + vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && + (vp->v_mount->mnt_flag & MNT_RDONLY)) + return (EROFS); + if (vap->va_size != VNOVAL) { + switch (vp->v_type) { + case VDIR: + return (EISDIR); + case VCHR: + case VBLK: + case VSOCK: + case VFIFO: + return (0); + case VREG: + case VLNK: + default: + /* + * Disallow write attempts if the filesystem is + * mounted read-only. + */ + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + } + } + return (null_bypass(ap)); +} + +/* + * We handle getattr only to change the fsid. + */ +int +null_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + int error; + + if (error = null_bypass(ap)) + return (error); + /* Requires that arguments be restored. */ + ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; + return (0); +} + +int +null_access(ap) + struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + mode_t mode = ap->a_mode; + + /* + * Disallow write attempts on read-only layers; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + if (mode & VWRITE) { + switch (vp->v_type) { + case VDIR: + case VLNK: + case VREG: + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + break; + } + } + return (null_bypass(ap)); +} + +/* + * We need to process our own vnode lock and then clear the + * interlock flag as it applies only to our vnode, not the + * vnodes below us on the stack. + */ +int +null_lock(ap) + struct vop_lock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + + vop_nolock(ap); + if ((ap->a_flags & LK_TYPE_MASK) == LK_DRAIN) + return (0); + ap->a_flags &= ~LK_INTERLOCK; + return (null_bypass(ap)); +} + +/* + * We need to process our own vnode unlock and then clear the + * interlock flag as it applies only to our vnode, not the + * vnodes below us on the stack. + */ +int +null_unlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + vop_nounlock(ap); + ap->a_flags &= ~LK_INTERLOCK; + return (null_bypass(ap)); +} + +int +null_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + /* + * Do nothing (and _don't_ bypass). + * Wait to vrele lowervp until reclaim, + * so that until then our null_node is in the + * cache and reusable. + * + * NEEDSWORK: Someday, consider inactive'ing + * the lowervp and then trying to reactivate it + * with capabilities (v_id) + * like they do in the name lookup cache code. + * That's too much work for now. + */ + VOP_UNLOCK(ap->a_vp, 0, ap->a_p); + return (0); +} + +int +null_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct null_node *xp = VTONULL(vp); + struct vnode *lowervp = xp->null_lowervp; + + /* + * Note: in vop_reclaim, vp->v_op == dead_vnodeop_p, + * so we can't call VOPs on ourself. + */ + /* After this assignment, this node will not be re-used. */ + xp->null_lowervp = NULL; + LIST_REMOVE(xp, null_hash); + FREE(vp->v_data, M_TEMP); + vp->v_data = NULL; + vrele (lowervp); + return (0); +} + +int +null_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + printf ("\ttag VT_NULLFS, vp=%x, lowervp=%x\n", vp, NULLVPTOLOWERVP(vp)); + return (0); +} + +/* + * XXX - vop_strategy must be hand coded because it has no + * vnode in its arguments. + * This goes away with a merged VM/buffer cache. + */ +int +null_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + struct buf *bp = ap->a_bp; + int error; + struct vnode *savedvp; + + savedvp = bp->b_vp; + bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); + + error = VOP_STRATEGY(bp); + + bp->b_vp = savedvp; + + return (error); +} + +/* + * XXX - like vop_strategy, vop_bwrite must be hand coded because it has no + * vnode in its arguments. + * This goes away with a merged VM/buffer cache. + */ +int +null_bwrite(ap) + struct vop_bwrite_args /* { + struct buf *a_bp; + } */ *ap; +{ + struct buf *bp = ap->a_bp; + int error; + struct vnode *savedvp; + + savedvp = bp->b_vp; + bp->b_vp = NULLVPTOLOWERVP(bp->b_vp); + + error = VOP_BWRITE(bp); + + bp->b_vp = savedvp; + + return (error); +} + +/* + * Global vfs data structures + */ + +#define VOPFUNC int (*)(void *) + +int (**null_vnodeop_p)(void *); +struct vnodeopv_entry_desc null_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)null_bypass }, + + { &vop_lookup_desc, (VOPFUNC)null_lookup }, + { &vop_setattr_desc, (VOPFUNC)null_setattr }, + { &vop_getattr_desc, (VOPFUNC)null_getattr }, + { &vop_access_desc, (VOPFUNC)null_access }, + { &vop_lock_desc, (VOPFUNC)null_lock }, + { &vop_unlock_desc, (VOPFUNC)null_unlock }, + { &vop_inactive_desc, (VOPFUNC)null_inactive }, + { &vop_reclaim_desc, (VOPFUNC)null_reclaim }, + { &vop_print_desc, (VOPFUNC)null_print }, + + { &vop_strategy_desc, (VOPFUNC)null_strategy }, + { &vop_bwrite_desc, (VOPFUNC)null_bwrite }, + + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc null_vnodeop_opv_desc = + { &null_vnodeop_p, null_vnodeop_entries }; diff --git a/bsd/miscfs/portal/portal.h b/bsd/miscfs/portal/portal.h new file mode 100644 index 000000000..44a251605 --- /dev/null +++ b/bsd/miscfs/portal/portal.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)portal.h 8.4 (Berkeley) 1/21/94 + */ + +struct portal_args { + char *pa_config; /* Config file */ + int pa_socket; /* Socket to server */ +}; + +struct portal_cred { + int pcr_flag; /* File open mode */ + uid_t pcr_uid; /* From ucred */ + short pcr_ngroups; /* From ucred */ + gid_t pcr_groups[NGROUPS]; /* From ucred */ +}; + +#ifdef KERNEL +struct portalmount { + struct vnode *pm_root; /* Root node */ + struct file *pm_server; /* Held reference to server socket */ +}; + +struct portalnode { + int pt_size; /* Length of Arg */ + char *pt_arg; /* Arg to send to server */ + int pt_fileid; /* cookie */ +}; + +#define VFSTOPORTAL(mp) ((struct portalmount *)((mp)->mnt_data)) +#define VTOPORTAL(vp) ((struct portalnode *)(vp)->v_data) + +#define PORTAL_ROOTFILEID 2 + +extern int (**portal_vnodeop_p)(void *); +extern struct vfsops portal_vfsops; +#endif /* KERNEL */ diff --git a/bsd/miscfs/portal/portal_vfsops.c b/bsd/miscfs/portal/portal_vfsops.c new file mode 100644 index 000000000..2cdb39e78 --- /dev/null +++ b/bsd/miscfs/portal/portal_vfsops.c @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)portal_vfsops.c 8.11 (Berkeley) 5/14/95 + * + * @(#)portal_vfsops.c 8.6 (Berkeley) 1/21/94 + */ + +/* + * Portal Filesystem + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int +portal_init(vfsp) + struct vfsconf *vfsp; +{ + + return (0); +} + +/* + * Mount the per-process file descriptors (/dev/fd) + */ +int +portal_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + struct file *fp; + struct portal_args args; + struct portalmount *fmp; + struct socket *so; + struct vnode *rvp; + u_int size; + int error; + struct portalnode *pnp; + + /* + * Update is a no-op + */ + if (mp->mnt_flag & MNT_UPDATE) + return (EOPNOTSUPP); + + if (error = copyin(data, (caddr_t) &args, sizeof(struct portal_args))) + return (error); + + if (error = getsock(p->p_fd, args.pa_socket, &fp)) + return (error); + so = (struct socket *) fp->f_data; + if (so->so_proto->pr_domain->dom_family != AF_UNIX) + return (ESOCKTNOSUPPORT); + + fmp = (struct portalmount *) _MALLOC(sizeof(struct portalmount), + M_UFSMNT, M_WAITOK); /* XXX */ + MALLOC(pnp, void *, sizeof(struct portalnode), M_TEMP, M_WAITOK); + error = getnewvnode(VT_PORTAL, mp, portal_vnodeop_p, &rvp); /* XXX */ + if (error) { + FREE(pnp, M_TEMP); + FREE(fmp, M_UFSMNT); + return (error); + } + rvp->v_data = pnp; + + rvp->v_type = VDIR; + rvp->v_flag |= VROOT; + VTOPORTAL(rvp)->pt_arg = 0; + VTOPORTAL(rvp)->pt_size = 0; + VTOPORTAL(rvp)->pt_fileid = PORTAL_ROOTFILEID; + fmp->pm_root = rvp; + fmp->pm_server = fp; + + (void)fref(fp); + + mp->mnt_flag |= MNT_LOCAL; + mp->mnt_data = (qaddr_t) fmp; + vfs_getnewfsid(mp); + + (void)copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + (void)copyinstr(args.pa_config, + mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); + +#ifdef notdef + bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); + bcopy("portal", mp->mnt_stat.f_mntfromname, sizeof("portal")); +#endif + + return (0); +} + +int +portal_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + + return (0); +} + +int +portal_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + struct vnode *rootvp = VFSTOPORTAL(mp)->pm_root; + int error, flags = 0; + + + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + + /* + * Clear out buffer cache. I don't think we + * ever get anything cached at this level at the + * moment, but who knows... + */ +#ifdef notyet + mntflushbuf(mp, 0); + if (mntinvalbuf(mp, 1)) + return (EBUSY); +#endif + if (rootvp->v_usecount > 1) + return (EBUSY); + if (error = vflush(mp, rootvp, flags)) + return (error); + + /* + * Release reference on underlying root vnode + */ + vrele(rootvp); + /* + * And blow it away for future re-use + */ + vgone(rootvp); + /* + * Shutdown the socket. This will cause the select in the + * daemon to wake up, and then the accept will get ECONNABORTED + * which it interprets as a request to go and bury itself. + */ + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + soshutdown((struct socket *) VFSTOPORTAL(mp)->pm_server->f_data, 2); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + /* + * Discard reference to underlying file. Must call closef because + * this may be the last reference. + */ + closef(VFSTOPORTAL(mp)->pm_server, (struct proc *) 0); + /* + * Finally, throw away the portalmount structure + */ + _FREE(mp->mnt_data, M_UFSMNT); /* XXX */ + mp->mnt_data = 0; + return (0); +} + +int +portal_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp; + + /* + * Return locked reference to root. + */ + vp = VFSTOPORTAL(mp)->pm_root; + VREF(vp); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + *vpp = vp; + return (0); +} + +int +portal_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + + sbp->f_flags = 0; + sbp->f_bsize = DEV_BSIZE; + sbp->f_iosize = DEV_BSIZE; + sbp->f_blocks = 2; /* 1K to keep df happy */ + sbp->f_bfree = 0; + sbp->f_bavail = 0; + sbp->f_files = 1; /* Allow for "." */ + sbp->f_ffree = 0; /* See comments above */ + if (sbp != &mp->mnt_stat) { + sbp->f_type = mp->mnt_vfc->vfc_typenum; + bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + return (0); +} + +#define portal_fhtovp ((int (*) __P((struct mount *, struct fid *, \ + struct mbuf *, struct vnode **, int *, struct ucred **)))eopnotsupp) +#define portal_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \ + struct proc *)))eopnotsupp) +#define portal_sync ((int (*) __P((struct mount *, int, struct ucred *, \ + struct proc *)))nullop) +#define portal_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \ + size_t, struct proc *)))eopnotsupp) +#define portal_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \ + eopnotsupp) +#define portal_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp) + +struct vfsops portal_vfsops = { + portal_mount, + portal_start, + portal_unmount, + portal_root, + portal_quotactl, + portal_statfs, + portal_sync, + portal_vget, + portal_fhtovp, + portal_vptofh, + portal_init, + portal_sysctl, +}; diff --git a/bsd/miscfs/portal/portal_vnops.c b/bsd/miscfs/portal/portal_vnops.c new file mode 100644 index 000000000..8059b1ca0 --- /dev/null +++ b/bsd/miscfs/portal/portal_vnops.c @@ -0,0 +1,792 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)portal_vnops.c 8.14 (Berkeley) 5/21/95 + * + * @(#)portal_vnops.c 8.8 (Berkeley) 1/21/94 + */ + +/* + * Portal Filesystem + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int portal_fileid = PORTAL_ROOTFILEID+1; + +static void +portal_closefd(p, fd) + struct proc *p; + int fd; +{ + int error; + struct { + int fd; + } ua; + int rc; + + ua.fd = fd; + error = close(p, &ua, &rc); + /* + * We should never get an error, and there isn't anything + * we could do if we got one, so just print a message. + */ + if (error) + printf("portal_closefd: error = %d\n", error); +} + +/* + * vp is the current namei directory + * cnp is the name to locate in that directory... + */ +int +portal_lookup(ap) + struct vop_lookup_args /* { + struct vnode * a_dvp; + struct vnode ** a_vpp; + struct componentname * a_cnp; + } */ *ap; +{ + struct componentname *cnp = ap->a_cnp; + struct vnode **vpp = ap->a_vpp; + struct vnode *dvp = ap->a_dvp; + char *pname = cnp->cn_nameptr; + struct portalnode *pt; + int error; + struct vnode *fvp = 0; + char *path; + int size; + + *vpp = NULLVP; + + if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) + return (EROFS); + + if (cnp->cn_namelen == 1 && *pname == '.') { + *vpp = dvp; + VREF(dvp); + /*VOP_LOCK(dvp);*/ + return (0); + } + + MALLOC(pt, void *, sizeof(struct portalnode), M_TEMP, M_WAITOK); + error = getnewvnode(VT_PORTAL, dvp->v_mount, portal_vnodeop_p, &fvp); + if (error) { + FREE(pt, M_TEMP); + goto bad; + } + fvp->v_type = VREG; + ubc_info_init(fvp); + fvp->v_data = pt; + + /* + * Save all of the remaining pathname and + * advance the namei next pointer to the end + * of the string. + */ + for (size = 0, path = pname; *path; path++) + size++; + cnp->cn_consume = size - cnp->cn_namelen; + + MALLOC(pt->pt_arg, caddr_t, size+1, M_TEMP, M_WAITOK); + pt->pt_size = size+1; + bcopy(pname, pt->pt_arg, pt->pt_size); + pt->pt_fileid = portal_fileid++; + + *vpp = fvp; + /*VOP_LOCK(fvp);*/ + return (0); + +bad: + if (fvp) + vrele(fvp); + return (error); +} + +/* This should be called only from network funnel */ +static int +portal_connect(so, so2) + struct socket *so; + struct socket *so2; +{ + /* from unp_connect, bypassing the namei stuff... */ + struct socket *so3; + struct unpcb *unp2; + struct unpcb *unp3; + + if (so2 == 0) + return (ECONNREFUSED); + + if (so->so_type != so2->so_type) + return (EPROTOTYPE); + + if ((so2->so_options & SO_ACCEPTCONN) == 0) + return (ECONNREFUSED); + + if ((so3 = sonewconn(so2, 0)) == 0) + return (ECONNREFUSED); + + unp2 = sotounpcb(so2); + unp3 = sotounpcb(so3); + if (unp2->unp_addr) + unp3->unp_addr = m_copy(unp2->unp_addr, 0, (int)M_COPYALL); + + so2 = so3; + + + return (unp_connect2(so, so2)); +} + +int +portal_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct socket *so = 0; + struct portalnode *pt; + struct proc *p = ap->a_p; + struct vnode *vp = ap->a_vp; + int s; + struct uio auio; + struct iovec aiov[2]; + int res; + struct mbuf *cm = 0; + struct cmsghdr *cmsg; + int newfds; + int *ip; + int fd; + int error; + int len; + struct portalmount *fmp; + struct file *fp; + struct portal_cred pcred; + + /* + * Nothing to do when opening the root node. + */ + if (vp->v_flag & VROOT) + return (0); + + /* + * Can't be opened unless the caller is set up + * to deal with the side effects. Check for this + * by testing whether the p_dupfd has been set. + */ + if (p->p_dupfd >= 0) + return (ENODEV); + + pt = VTOPORTAL(vp); + fmp = VFSTOPORTAL(vp->v_mount); + + /* + * Create a new socket. + */ + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = socreate(AF_UNIX, &so, SOCK_STREAM, 0); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + + /* + * Reserve some buffer space + */ + res = pt->pt_size + sizeof(pcred) + 512; /* XXX */ + error = soreserve(so, res, res); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + + /* + * Kick off connection + */ + error = portal_connect(so, (struct socket *)fmp->pm_server->f_data); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + + /* + * Wait for connection to complete + */ + /* + * XXX: Since the mount point is holding a reference on the + * underlying server socket, it is not easy to find out whether + * the server process is still running. To handle this problem + * we loop waiting for the new socket to be connected (something + * which will only happen if the server is still running) or for + * the reference count on the server socket to drop to 1, which + * will happen if the server dies. Sleep for 5 second intervals + * and keep polling the reference count. XXX. + */ + s = splnet(); + while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { + if (fcount(fmp->pm_server) == 1) { + error = ECONNREFUSED; + splx(s); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + (void) tsleep((caddr_t) &so->so_timeo, PSOCK, "portalcon", 5 * hz); + } + splx(s); + + if (so->so_error) { + error = so->so_error; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + + /* + * Set miscellaneous flags + */ + so->so_rcv.sb_timeo = 0; + so->so_snd.sb_timeo = 0; + so->so_rcv.sb_flags |= SB_NOINTR; + so->so_snd.sb_flags |= SB_NOINTR; + + + pcred.pcr_flag = ap->a_mode; + pcred.pcr_uid = ap->a_cred->cr_uid; + pcred.pcr_ngroups = ap->a_cred->cr_ngroups; + bcopy(ap->a_cred->cr_groups, pcred.pcr_groups, NGROUPS * sizeof(gid_t)); + aiov[0].iov_base = (caddr_t) &pcred; + aiov[0].iov_len = sizeof(pcred); + aiov[1].iov_base = pt->pt_arg; + aiov[1].iov_len = pt->pt_size; + auio.uio_iov = aiov; + auio.uio_iovcnt = 2; + auio.uio_rw = UIO_WRITE; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_procp = p; + auio.uio_offset = 0; + auio.uio_resid = aiov[0].iov_len + aiov[1].iov_len; + + error = sosend(so, (struct sockaddr *) 0, &auio, + (struct mbuf *) 0, (struct mbuf *) 0, 0); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + + len = auio.uio_resid = sizeof(int); + do { + struct mbuf *m = 0; + int flags = MSG_WAITALL; + error = soreceive(so, (struct sockaddr **) 0, &auio, + &m, &cm, &flags); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + + /* + * Grab an error code from the mbuf. + */ + if (m) { + m = m_pullup(m, sizeof(int)); /* Needed? */ + if (m) { + error = *(mtod(m, int *)); + m_freem(m); + } else { + error = EINVAL; + } + } else { + if (cm == 0) { + error = ECONNRESET; /* XXX */ +#ifdef notdef + break; +#endif + } + } + } while (cm == 0 && auio.uio_resid == len && !error); + + if (cm == 0) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + + if (auio.uio_resid) { + error = 0; +#ifdef notdef + error = EMSGSIZE; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; +#endif + } + + /* + * XXX: Break apart the control message, and retrieve the + * received file descriptor. Note that more than one descriptor + * may have been received, or that the rights chain may have more + * than a single mbuf in it. What to do? + */ + cmsg = mtod(cm, struct cmsghdr *); + newfds = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof (int); + if (newfds == 0) { + error = ECONNREFUSED; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto bad; + } + /* + * At this point the rights message consists of a control message + * header, followed by a data region containing a vector of + * integer file descriptors. The fds were allocated by the action + * of receiving the control message. + */ + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + ip = (int *) (cmsg + 1); + fd = *ip++; + if (newfds > 1) { + /* + * Close extra fds. + */ + int i; + printf("portal_open: %d extra fds\n", newfds - 1); + for (i = 1; i < newfds; i++) { + portal_closefd(p, *ip); + ip++; + } + } + + /* + * Check that the mode the file is being opened for is a subset + * of the mode of the existing descriptor. + */ + fp = *fdfile(p, fd); + if (((ap->a_mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) { + portal_closefd(p, fd); + error = EACCES; + goto bad; + } + + /* + * Save the dup fd in the proc structure then return the + * special error code (ENXIO) which causes magic things to + * happen in vn_open. The whole concept is, well, hmmm. + */ + p->p_dupfd = fd; + error = ENXIO; + +bad:; + /* + * And discard the control message. + */ + if (cm) { + m_freem(cm); + } + + if (so) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + soshutdown(so, 2); + soclose(so); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + } + return (error); +} + +int +portal_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vattr *vap = ap->a_vap; + struct timeval tv; + + bzero(vap, sizeof(*vap)); + vattr_null(vap); + vap->va_uid = 0; + vap->va_gid = 0; + vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; + vap->va_size = DEV_BSIZE; + vap->va_blocksize = DEV_BSIZE; + microtime(&tv); + TIMEVAL_TO_TIMESPEC(&tv, &vap->va_atime); + vap->va_mtime = vap->va_atime; + vap->va_ctime = vap->va_ctime; + vap->va_gen = 0; + vap->va_flags = 0; + vap->va_rdev = 0; + /* vap->va_qbytes = 0; */ + vap->va_bytes = 0; + /* vap->va_qsize = 0; */ + if (vp->v_flag & VROOT) { + vap->va_type = VDIR; + vap->va_mode = S_IRUSR|S_IWUSR|S_IXUSR| + S_IRGRP|S_IWGRP|S_IXGRP| + S_IROTH|S_IWOTH|S_IXOTH; + vap->va_nlink = 2; + vap->va_fileid = 2; + } else { + vap->va_type = VREG; + vap->va_mode = S_IRUSR|S_IWUSR| + S_IRGRP|S_IWGRP| + S_IROTH|S_IWOTH; + vap->va_nlink = 1; + vap->va_fileid = VTOPORTAL(vp)->pt_fileid; + } + return (0); +} + +int +portal_setattr(ap) + struct vop_setattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + /* + * Can't mess with the root vnode + */ + if (ap->a_vp->v_flag & VROOT) + return (EACCES); + + return (0); +} + +/* + * Fake readdir, just return empty directory. + * It is hard to deal with '.' and '..' so don't bother. + */ +int +portal_readdir(ap) + struct vop_readdir_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + u_long *a_cookies; + int a_ncookies; + } */ *ap; +{ + + /* + * We don't allow exporting portal mounts, and currently local + * requests do not need cookies. + */ + if (ap->a_ncookies) + panic("portal_readdir: not hungry"); + + return (0); +} + +int +portal_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + + VOP_UNLOCK(ap->a_vp, 0, ap->a_p); + return (0); +} + +int +portal_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct portalnode *pt = VTOPORTAL(ap->a_vp); + + if (pt->pt_arg) { + _FREE((caddr_t) pt->pt_arg, M_TEMP); + pt->pt_arg = 0; + } + FREE(ap->a_vp->v_data, M_TEMP); + ap->a_vp->v_data = 0; + + return (0); +} + +/* + * Return POSIX pathconf information applicable to special devices. + */ +portal_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_MAX_CANON: + *ap->a_retval = MAX_CANON; + return (0); + case _PC_MAX_INPUT: + *ap->a_retval = MAX_INPUT; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_VDISABLE: + *ap->a_retval = _POSIX_VDISABLE; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Print out the contents of a Portal vnode. + */ +/* ARGSUSED */ +int +portal_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + printf("tag VT_PORTAL, portal vnode\n"); + return (0); +} + +/*void*/ +int +portal_vfree(ap) + struct vop_vfree_args /* { + struct vnode *a_pvp; + ino_t a_ino; + int a_mode; + } */ *ap; +{ + + return (0); +} + + +/* + * Portal vnode unsupported operation + */ +int +portal_enotsupp() +{ + + return (EOPNOTSUPP); +} + +/* + * Portal "should never get here" operation + */ +int +portal_badop() +{ + + panic("portal: bad op"); + /* NOTREACHED */ +} + +/* + * Portal vnode null operation + */ +int +portal_nullop() +{ + + return (0); +} + +#define portal_create ((int (*) __P((struct vop_create_args *)))portal_enotsupp) +#define portal_mknod ((int (*) __P((struct vop_mknod_args *)))portal_enotsupp) +#define portal_close ((int (*) __P((struct vop_close_args *)))nullop) +#define portal_access ((int (*) __P((struct vop_access_args *)))nullop) +#define portal_read ((int (*) __P((struct vop_read_args *)))portal_enotsupp) +#define portal_write ((int (*) __P((struct vop_write_args *)))portal_enotsupp) +#define portal_ioctl ((int (*) __P((struct vop_ioctl_args *)))portal_enotsupp) +#define portal_select ((int (*) __P((struct vop_select_args *)))portal_enotsupp) +#define portal_mmap ((int (*) __P((struct vop_mmap_args *)))portal_enotsupp) +#define portal_revoke vop_revoke +#define portal_fsync ((int (*) __P((struct vop_fsync_args *)))nullop) +#define portal_seek ((int (*) __P((struct vop_seek_args *)))nullop) +#define portal_remove ((int (*) __P((struct vop_remove_args *)))portal_enotsupp) +#define portal_link ((int (*) __P((struct vop_link_args *)))portal_enotsupp) +#define portal_rename ((int (*) __P((struct vop_rename_args *)))portal_enotsupp) +#define portal_mkdir ((int (*) __P((struct vop_mkdir_args *)))portal_enotsupp) +#define portal_rmdir ((int (*) __P((struct vop_rmdir_args *)))portal_enotsupp) +#define portal_symlink \ + ((int (*) __P((struct vop_symlink_args *)))portal_enotsupp) +#define portal_readlink \ + ((int (*) __P((struct vop_readlink_args *)))portal_enotsupp) +#define portal_abortop ((int (*) __P((struct vop_abortop_args *)))nullop) +#define portal_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) +#define portal_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) +#define portal_bmap ((int (*) __P((struct vop_bmap_args *)))portal_badop) +#define portal_strategy \ + ((int (*) __P((struct vop_strategy_args *)))portal_badop) +#define portal_islocked \ + ((int (*) __P((struct vop_islocked_args *)))vop_noislocked) +#define fifo_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked) +#define portal_advlock \ + ((int (*) __P((struct vop_advlock_args *)))portal_enotsupp) +#define portal_blkatoff \ + ((int (*) __P((struct vop_blkatoff_args *)))portal_enotsupp) +#define portal_valloc ((int(*) __P(( \ + struct vnode *pvp, \ + int mode, \ + struct ucred *cred, \ + struct vnode **vpp))) portal_enotsupp) +#define portal_truncate \ + ((int (*) __P((struct vop_truncate_args *)))portal_enotsupp) +#define portal_update ((int (*) __P((struct vop_update_args *)))portal_enotsupp) +#define portal_copyfile ((int (*) __P((struct vop_copyfile *)))err_copyfile) +#define portal_bwrite ((int (*) __P((struct vop_bwrite_args *)))portal_enotsupp) +#define portal_blktooff \ + ((int (*) __P((struct vop_blktooff_args *)))portal_enotsupp) +#define portal_offtoblk \ + ((int (*) __P((struct vop_offtoblk_args *)))portal_enotsupp) +#define portal_cmap \ + ((int (*) __P((struct vop_cmap_args *)))portal_enotsupp) + +#define VOPFUNC int (*)(void *) + +int (**portal_vnodeop_p)(void *); +struct vnodeopv_entry_desc portal_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)portal_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)portal_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)portal_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)portal_open }, /* open */ + { &vop_close_desc, (VOPFUNC)portal_close }, /* close */ + { &vop_access_desc, (VOPFUNC)portal_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)portal_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)portal_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)portal_read }, /* read */ + { &vop_write_desc, (VOPFUNC)portal_write }, /* write */ + { &vop_ioctl_desc, (VOPFUNC)portal_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)portal_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)portal_mmap }, /* mmap */ + { &vop_revoke_desc, (VOPFUNC)portal_revoke }, /* revoke */ + { &vop_fsync_desc, (VOPFUNC)portal_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)portal_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)portal_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)portal_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)portal_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)portal_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)portal_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)portal_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)portal_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)portal_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)portal_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)portal_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)portal_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)portal_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)portal_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)portal_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)portal_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)portal_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)portal_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)portal_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)portal_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)portal_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)portal_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)portal_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)portal_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)portal_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)portal_bwrite }, /* bwrite */ + { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)portal_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)portal_blktooff }, /* blktooff */ + { &vop_blktooff_desc, (VOPFUNC)portal_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)portal_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc portal_vnodeop_opv_desc = + { &portal_vnodeop_p, portal_vnodeop_entries }; diff --git a/bsd/miscfs/procfs/procfs.h b/bsd/miscfs/procfs/procfs.h new file mode 100644 index 000000000..143366050 --- /dev/null +++ b/bsd/miscfs/procfs/procfs.h @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs.h,v 1.13 1995/03/29 22:08:30 briggs Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs.h 8.7 (Berkeley) 6/15/94 + */ + +/* + * The different types of node in a procfs filesystem + */ +typedef enum { + Proot, /* the filesystem root */ + Pcurproc, /* symbolic link for curproc */ + Pproc, /* a process-specific sub-directory */ + Pfile, /* the executable file */ + Pmem, /* the process's memory image */ + Pregs, /* the process's register set */ + Pfpregs, /* the process's FP register set */ + Pctl, /* process control */ + Pstatus, /* process status */ + Pnote, /* process notifier */ + Pnotepg /* process group notifier */ +} pfstype; + +/* + * control data for the proc file system. + */ +struct pfsnode { + struct pfsnode *pfs_next; /* next on list */ + struct vnode *pfs_vnode; /* vnode associated with this pfsnode */ + pfstype pfs_type; /* type of procfs node */ + pid_t pfs_pid; /* associated process */ + u_short pfs_mode; /* mode bits for stat() */ + u_long pfs_flags; /* open flags */ + u_long pfs_fileno; /* unique file id */ +}; + +#define PROCFS_NOTELEN 64 /* max length of a note (/proc/$pid/note) */ +#define PROCFS_CTLLEN 8 /* max length of a ctl msg (/proc/$pid/ctl */ + +/* + * Kernel stuff follows + */ +#ifdef KERNEL +#define CNEQ(cnp, s, len) \ + ((cnp)->cn_namelen == (len) && \ + (bcmp((s), (cnp)->cn_nameptr, (len)) == 0)) + +/* + * Format of a directory entry in /proc, ... + * This must map onto struct dirent (see ) + */ +#define PROCFS_NAMELEN 8 +struct pfsdent { + u_int32_t d_fileno; + u_int16_t d_reclen; + u_int8_t d_type; + u_int8_t d_namlen; + char d_name[PROCFS_NAMELEN]; +}; +#define UIO_MX sizeof(struct pfsdent) +#define PROCFS_FILENO(pid, type) \ + (((type) < Pproc) ? \ + ((type) + 2) : \ + ((((pid)+1) << 4) + ((int) (type)))) + +/* + * Convert between pfsnode vnode + */ +#define VTOPFS(vp) ((struct pfsnode *)(vp)->v_data) +#define PFSTOV(pfs) ((pfs)->pfs_vnode) + +typedef struct vfs_namemap vfs_namemap_t; +struct vfs_namemap { + const char *nm_name; + int nm_val; +}; + +int vfs_getuserstr __P((struct uio *, char *, int *)); +vfs_namemap_t *vfs_findname __P((vfs_namemap_t *, char *, int)); + +#define PFIND(pid) ((pid) ? pfind(pid) : &kernel_proc) +int procfs_freevp __P((struct vnode *)); +int procfs_allocvp __P((struct mount *, struct vnode **, long, pfstype)); +struct vnode *procfs_findtextvp __P((struct proc *)); +int procfs_donote __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio)); +int procfs_doregs __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio)); +int procfs_dofpregs __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio)); +int procfs_domem __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio)); +int procfs_doctl __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio)); +int procfs_dostatus __P((struct proc *, struct proc *, struct pfsnode *pfsp, struct uio *uio)); + +/* functions to check whether or not files should be displayed */ +int procfs_validfile __P((struct proc *)); +int procfs_validfpregs __P((struct proc *)); +int procfs_validregs __P((struct proc *)); + +#define PROCFS_LOCKED 0x01 +#define PROCFS_WANT 0x02 + +extern int (**procfs_vnodeop_p)(void *); +extern struct vfsops procfs_vfsops; + +/* + * Prototypes for procfs vnode ops + */ +int procfs_badop(); /* varargs */ +int procfs_rw __P((struct vop_read_args *)); +int procfs_lookup __P((struct vop_lookup_args *)); +#define procfs_create ((int (*) __P((struct vop_create_args *))) procfs_badop) +#define procfs_mknod ((int (*) __P((struct vop_mknod_args *))) procfs_badop) +int procfs_open __P((struct vop_open_args *)); +int procfs_close __P((struct vop_close_args *)); +int procfs_access __P((struct vop_access_args *)); +int procfs_getattr __P((struct vop_getattr_args *)); +int procfs_setattr __P((struct vop_setattr_args *)); +#define procfs_read procfs_rw +#define procfs_write procfs_rw +int procfs_ioctl __P((struct vop_ioctl_args *)); +#define procfs_select ((int (*) __P((struct vop_select_args *))) procfs_badop) +#define procfs_mmap ((int (*) __P((struct vop_mmap_args *))) procfs_badop) +#define procfs_fsync ((int (*) __P((struct vop_fsync_args *))) procfs_badop) +#define procfs_seek ((int (*) __P((struct vop_seek_args *))) procfs_badop) +#define procfs_remove ((int (*) __P((struct vop_remove_args *))) procfs_badop) +#define procfs_link ((int (*) __P((struct vop_link_args *))) procfs_badop) +#define procfs_rename ((int (*) __P((struct vop_rename_args *))) procfs_badop) +#define procfs_mkdir ((int (*) __P((struct vop_mkdir_args *))) procfs_badop) +#define procfs_rmdir ((int (*) __P((struct vop_rmdir_args *))) procfs_badop) +#define procfs_symlink ((int (*) __P((struct vop_symlink_args *))) procfs_badop) +int procfs_readdir __P((struct vop_readdir_args *)); +int procfs_readlink __P((struct vop_readlink_args *)); +int procfs_abortop __P((struct vop_abortop_args *)); +int procfs_inactive __P((struct vop_inactive_args *)); +int procfs_reclaim __P((struct vop_reclaim_args *)); +#define procfs_lock ((int (*) __P((struct vop_lock_args *))) nullop) +#define procfs_unlock ((int (*) __P((struct vop_unlock_args *))) nullop) +int procfs_bmap __P((struct vop_bmap_args *)); +#define procfs_strategy ((int (*) __P((struct vop_strategy_args *))) procfs_badop) +int procfs_print __P((struct vop_print_args *)); +#define procfs_islocked ((int (*) __P((struct vop_islocked_args *))) nullop) +#define procfs_advlock ((int (*) __P((struct vop_advlock_args *))) procfs_badop) +#define procfs_blkatoff ((int (*) __P((struct vop_blkatoff_args *))) procfs_badop) +#define procfs_valloc ((int (*) __P((struct vop_valloc_args *))) procfs_badop) +#define procfs_vfree ((int (*) __P((struct vop_vfree_args *))) nullop) +#define procfs_truncate ((int (*) __P((struct vop_truncate_args *))) procfs_badop) +#define procfs_update ((int (*) __P((struct vop_update_args *))) nullop) +#endif /* KERNEL */ diff --git a/bsd/miscfs/procfs/procfs_ctl.c b/bsd/miscfs/procfs/procfs_ctl.c new file mode 100644 index 000000000..8b5854ed3 --- /dev/null +++ b/bsd/miscfs/procfs/procfs_ctl.c @@ -0,0 +1,315 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_ctl.c,v 1.12 1994/06/29 06:34:46 cgd Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_ctl.c 8.4 (Berkeley) 6/15/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * True iff process (p) is in trace wait state + * relative to process (curp) + */ +#define TRACE_WAIT_P(curp, p) \ + ((p)->p_stat == SSTOP && \ + (p)->p_pptr == (curp) && \ + ((p)->p_flag & P_TRACED)) + +#define PROCFS_CTL_ATTACH 1 +#define PROCFS_CTL_DETACH 2 +#define PROCFS_CTL_STEP 3 +#define PROCFS_CTL_RUN 4 +#define PROCFS_CTL_WAIT 5 + +static vfs_namemap_t ctlnames[] = { + /* special /proc commands */ + { "attach", PROCFS_CTL_ATTACH }, + { "detach", PROCFS_CTL_DETACH }, + { "step", PROCFS_CTL_STEP }, + { "run", PROCFS_CTL_RUN }, + { "wait", PROCFS_CTL_WAIT }, + { 0 }, +}; + +static vfs_namemap_t signames[] = { + /* regular signal names */ + { "hup", SIGHUP }, { "int", SIGINT }, + { "quit", SIGQUIT }, { "ill", SIGILL }, + { "trap", SIGTRAP }, { "abrt", SIGABRT }, + { "iot", SIGIOT }, { "emt", SIGEMT }, + { "fpe", SIGFPE }, { "kill", SIGKILL }, + { "bus", SIGBUS }, { "segv", SIGSEGV }, + { "sys", SIGSYS }, { "pipe", SIGPIPE }, + { "alrm", SIGALRM }, { "term", SIGTERM }, + { "urg", SIGURG }, { "stop", SIGSTOP }, + { "tstp", SIGTSTP }, { "cont", SIGCONT }, + { "chld", SIGCHLD }, { "ttin", SIGTTIN }, + { "ttou", SIGTTOU }, { "io", SIGIO }, + { "xcpu", SIGXCPU }, { "xfsz", SIGXFSZ }, + { "vtalrm", SIGVTALRM }, { "prof", SIGPROF }, + { "winch", SIGWINCH }, { "info", SIGINFO }, + { "usr1", SIGUSR1 }, { "usr2", SIGUSR2 }, + { 0 }, +}; + +static int +procfs_control(curp, p, op) + struct proc *curp; + struct proc *p; + int op; +{ + int error; + + /* + * Attach - attaches the target process for debugging + * by the calling process. + */ + if (op == PROCFS_CTL_ATTACH) { + /* check whether already being traced */ + if (p->p_flag & P_TRACED) + return (EBUSY); + + /* can't trace yourself! */ + if (p->p_pid == curp->p_pid) + return (EINVAL); + + /* + * Go ahead and set the trace flag. + * Save the old parent (it's reset in + * _DETACH, and also in kern_exit.c:wait4() + * Reparent the process so that the tracing + * proc gets to see all the action. + * Stop the target. + */ + p->p_flag |= P_TRACED; + p->p_xstat = 0; /* XXX ? */ + if (p->p_pptr != curp) { + p->p_oppid = p->p_pptr->p_pid; + proc_reparent(p, curp); + } + psignal(p, SIGSTOP); + return (0); + } + + /* + * Target process must be stopped, owned by (curp) and + * be set up for tracing (P_TRACED flag set). + * Allow DETACH to take place at any time for sanity. + * Allow WAIT any time, of course. + */ + switch (op) { + case PROCFS_CTL_DETACH: + case PROCFS_CTL_WAIT: + break; + + default: + if (!TRACE_WAIT_P(curp, p)) + return (EBUSY); + } + + /* + * do single-step fixup if needed + */ + FIX_SSTEP(p); + + /* + * Don't deliver any signal by default. + * To continue with a signal, just send + * the signal name to the ctl file + */ + p->p_xstat = 0; + + switch (op) { + /* + * Detach. Cleans up the target process, reparent it if possible + * and set it running once more. + */ + case PROCFS_CTL_DETACH: + /* if not being traced, then this is a painless no-op */ + if ((p->p_flag & P_TRACED) == 0) + return (0); + + /* not being traced any more */ + p->p_flag &= ~P_TRACED; + + /* give process back to original parent */ + if (p->p_oppid != p->p_pptr->p_pid) { + struct proc *pp; + + pp = pfind(p->p_oppid); + if (pp) + proc_reparent(p, pp); + } + + p->p_oppid = 0; + p->p_flag &= ~P_WAITED; /* XXX ? */ + wakeup((caddr_t) curp); /* XXX for CTL_WAIT below ? */ + + break; + + /* + * Step. Let the target process execute a single instruction. + */ + case PROCFS_CTL_STEP: + if (error = process_sstep(p, 1)) + return (error); + break; + + /* + * Run. Let the target process continue running until a breakpoint + * or some other trap. + */ + case PROCFS_CTL_RUN: + break; + + /* + * Wait for the target process to stop. + * If the target is not being traced then just wait + * to enter + */ + case PROCFS_CTL_WAIT: + error = 0; + if (p->p_flag & P_TRACED) { + while (error == 0 && + (p->p_stat != SSTOP) && + (p->p_flag & P_TRACED) && + (p->p_pptr == curp)) { + error = tsleep((caddr_t) p, + PWAIT|PCATCH, "procfsx", 0); + } + if (error == 0 && !TRACE_WAIT_P(curp, p)) + error = EBUSY; + } else { + while (error == 0 && p->p_stat != SSTOP) { + error = tsleep((caddr_t) p, + PWAIT|PCATCH, "procfs", 0); + } + } + return (error); + + default: + panic("procfs_control"); + } + + if (p->p_stat == SSTOP) + setrunnable(p); + return (0); +} + +int +procfs_doctl(curp, p, pfs, uio) + struct proc *curp; + struct pfsnode *pfs; + struct uio *uio; + struct proc *p; +{ + int xlen; + int error; + char msg[PROCFS_CTLLEN+1]; + vfs_namemap_t *nm; + + if (uio->uio_rw != UIO_WRITE) + return (EOPNOTSUPP); + + xlen = PROCFS_CTLLEN; + error = vfs_getuserstr(uio, msg, &xlen); + if (error) + return (error); + + /* + * Map signal names into signal generation + * or debug control. Unknown commands and/or signals + * return EOPNOTSUPP. + * + * Sending a signal while the process is being debugged + * also has the side effect of letting the target continue + * to run. There is no way to single-step a signal delivery. + */ + error = EOPNOTSUPP; + + nm = vfs_findname(ctlnames, msg, xlen); + if (nm) { + error = procfs_control(curp, p, nm->nm_val); + } else { + nm = vfs_findname(signames, msg, xlen); + if (nm) { + if (TRACE_WAIT_P(curp, p)) { + p->p_xstat = nm->nm_val; + FIX_SSTEP(p); + setrunnable(p); + } else { + psignal(p, nm->nm_val); + } + error = 0; + } + } + + return (error); +} diff --git a/bsd/miscfs/procfs/procfs_fpregs.c b/bsd/miscfs/procfs/procfs_fpregs.c new file mode 100644 index 000000000..58c8d5a41 --- /dev/null +++ b/bsd/miscfs/procfs/procfs_fpregs.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_fpregs.c,v 1.3 1994/06/29 06:34:48 cgd Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_fpregs.c 8.2 (Berkeley) 6/15/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int +procfs_dofpregs(curp, p, pfs, uio) + struct proc *curp; + struct proc *p; + struct pfsnode *pfs; + struct uio *uio; +{ +#if defined(PT_GETFPREGS) || defined(PT_SETFPREGS) + int error; + struct fpreg r; + char *kv; + int kl; + + kl = sizeof(r); + kv = (char *) &r; + + kv += uio->uio_offset; + kl -= uio->uio_offset; + if (kl > uio->uio_resid) + kl = uio->uio_resid; + + if (kl < 0) + error = EINVAL; + else + error = process_read_fpregs(p, &r); + if (error == 0) + error = uiomove(kv, kl, uio); + if (error == 0 && uio->uio_rw == UIO_WRITE) { + if (p->p_stat != SSTOP) + error = EBUSY; + else + error = process_write_fpregs(p, &r); + } + + uio->uio_offset = 0; + return (error); +#else + return (EINVAL); +#endif +} + +int +procfs_validfpregs(p) + struct proc *p; +{ + +#if defined(PT_SETFPREGS) || defined(PT_GETFPREGS) + return ((p->p_flag & P_SYSTEM) == 0); +#else + return (0); +#endif +} diff --git a/bsd/miscfs/procfs/procfs_mem.c b/bsd/miscfs/procfs/procfs_mem.c new file mode 100644 index 000000000..37830f9c0 --- /dev/null +++ b/bsd/miscfs/procfs/procfs_mem.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_mem.c,v 1.7 1995/01/05 07:10:54 chopps Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 Sean Eric Fagan + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry and Sean Eric Fagan. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94 + */ + +/* + * This is a lightly hacked and merged version + * of sef's pread/pwrite functions + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +procfs_rwmem(p, uio) + struct proc *p; + struct uio *uio; +{ + int error; + int writing; + + writing = uio->uio_rw == UIO_WRITE; + + /* + * Only map in one page at a time. We don't have to, but it + * makes things easier. This way is trivial - right? + */ + do { + vm_map_t map, tmap; + vm_object_t object; + vm_offset_t kva; + vm_offset_t uva; + int page_offset; /* offset into page */ + vm_offset_t pageno; /* page number */ + vm_map_entry_t out_entry; + vm_prot_t out_prot; + vm_page_t m; + boolean_t wired, single_use; + vm_offset_t off; + u_int len; + int fix_prot; + + uva = (vm_offset_t) uio->uio_offset; + if (uva > VM_MAXUSER_ADDRESS) { + error = 0; + break; + } + + /* + * Get the page number of this segment. + */ + pageno = trunc_page(uva); + page_offset = uva - pageno; + + /* + * How many bytes to copy + */ + len = min(PAGE_SIZE - page_offset, uio->uio_resid); + + /* + * The map we want... + */ + map = &p->p_vmspace->vm_map; + + /* + * Check the permissions for the area we're interested + * in. + */ + fix_prot = 0; + if (writing) + fix_prot = !vm_map_check_protection(map, pageno, + pageno + PAGE_SIZE, VM_PROT_WRITE); + + if (fix_prot) { + /* + * If the page is not writable, we make it so. + * XXX It is possible that a page may *not* be + * read/executable, if a process changes that! + * We will assume, for now, that a page is either + * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE. + */ + error = vm_map_protect(map, pageno, + pageno + PAGE_SIZE, VM_PROT_ALL, 0); + if (error) + break; + } + + /* + * Now we need to get the page. out_entry, out_prot, wired, + * and single_use aren't used. One would think the vm code + * would be a *bit* nicer... We use tmap because + * vm_map_lookup() can change the map argument. + */ + tmap = map; + error = vm_map_lookup(&tmap, pageno, + writing ? VM_PROT_WRITE : VM_PROT_READ, + &out_entry, &object, &off, &out_prot, + &wired, &single_use); + /* + * We're done with tmap now. + */ + if (!error) + vm_map_lookup_done(tmap, out_entry); + + /* + * Fault the page in... + */ + if (!error && writing && object->shadow) { + m = vm_page_lookup(object, off); + if (m == 0 || (m->flags & PG_COPYONWRITE)) + error = vm_fault(map, pageno, + VM_PROT_WRITE, FALSE); + } + + /* Find space in kernel_map for the page we're interested in */ + if (!error) { + kva = VM_MIN_KERNEL_ADDRESS; + error = vm_map_find(kernel_map, object, off, &kva, + PAGE_SIZE, 1); + } + + if (!error) { + /* + * Neither vm_map_lookup() nor vm_map_find() appear + * to add a reference count to the object, so we do + * that here and now. + */ + vm_object_reference(object); + + /* + * Mark the page we just found as pageable. + */ + error = vm_map_pageable(kernel_map, kva, + kva + PAGE_SIZE, 0); + + /* + * Now do the i/o move. + */ + if (!error) + error = uiomove(kva + page_offset, len, uio); + + vm_map_remove(kernel_map, kva, kva + PAGE_SIZE); + } + if (fix_prot) + vm_map_protect(map, pageno, pageno + PAGE_SIZE, + VM_PROT_READ|VM_PROT_EXECUTE, 0); + } while (error == 0 && uio->uio_resid > 0); + + return (error); +} + +/* + * Copy data in and out of the target process. + * We do this by mapping the process's page into + * the kernel and then doing a uiomove direct + * from the kernel address space. + */ +int +procfs_domem(curp, p, pfs, uio) + struct proc *curp; + struct proc *p; + struct pfsnode *pfs; + struct uio *uio; +{ + + if (uio->uio_resid == 0) + return (0); + + return (procfs_rwmem(p, uio)); +} + +/* + * Given process (p), find the vnode from which + * it's text segment is being executed. + * + * It would be nice to grab this information from + * the VM system, however, there is no sure-fire + * way of doing that. Instead, fork(), exec() and + * wait() all maintain the p_textvp field in the + * process proc structure which contains a held + * reference to the exec'ed vnode. + */ +struct vnode * +procfs_findtextvp(p) + struct proc *p; +{ + + return (p->p_textvp); +} + + +#ifdef probably_never +/* + * Given process (p), find the vnode from which + * it's text segment is being mapped. + * + * (This is here, rather than in procfs_subr in order + * to keep all the VM related code in one place.) + */ +struct vnode * +procfs_findtextvp(p) + struct proc *p; +{ + int error; + vm_object_t object; + vm_offset_t pageno; /* page number */ + + /* find a vnode pager for the user address space */ + + for (pageno = VM_MIN_ADDRESS; + pageno < VM_MAXUSER_ADDRESS; + pageno += PAGE_SIZE) { + vm_map_t map; + vm_map_entry_t out_entry; + vm_prot_t out_prot; + boolean_t wired, single_use; + vm_offset_t off; + + map = &p->p_vmspace->vm_map; + error = vm_map_lookup(&map, pageno, + VM_PROT_READ, + &out_entry, &object, &off, &out_prot, + &wired, &single_use); + + if (!error) { + vm_pager_t pager; + + printf("procfs: found vm object\n"); + vm_map_lookup_done(map, out_entry); + printf("procfs: vm object = %x\n", object); + + /* + * At this point, assuming no errors, object + * is the VM object mapping UVA (pageno). + * Ensure it has a vnode pager, then grab + * the vnode from that pager's handle. + */ + + pager = object->pager; + printf("procfs: pager = %x\n", pager); + if (pager) + printf("procfs: found pager, type = %d\n", pager->pg_type); + if (pager && pager->pg_type == PG_VNODE) { + struct vnode *vp; + + vp = (struct vnode *) pager->pg_handle; + printf("procfs: vp = 0x%x\n", vp); + return (vp); + } + } + } + + printf("procfs: text object not found\n"); + return (0); +} +#endif /* probably_never */ diff --git a/bsd/miscfs/procfs/procfs_note.c b/bsd/miscfs/procfs/procfs_note.c new file mode 100644 index 000000000..3f9d2c080 --- /dev/null +++ b/bsd/miscfs/procfs/procfs_note.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_note.c,v 1.8 1994/06/29 06:34:53 cgd Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_note.c 8.2 (Berkeley) 1/21/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +int +procfs_donote(curp, p, pfs, uio) + struct proc *curp; + struct proc *p; + struct pfsnode *pfs; + struct uio *uio; +{ + int xlen; + int error; + char note[PROCFS_NOTELEN+1]; + + if (uio->uio_rw != UIO_WRITE) + return (EINVAL); + + xlen = PROCFS_NOTELEN; + error = vfs_getuserstr(uio, note, &xlen); + if (error) + return (error); + + /* send to process's notify function */ + return (EOPNOTSUPP); +} diff --git a/bsd/miscfs/procfs/procfs_regs.c b/bsd/miscfs/procfs/procfs_regs.c new file mode 100644 index 000000000..eb0998939 --- /dev/null +++ b/bsd/miscfs/procfs/procfs_regs.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_regs.c,v 1.8 1994/06/29 06:34:54 cgd Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_regs.c 8.4 (Berkeley) 6/15/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int +procfs_doregs(curp, p, pfs, uio) + struct proc *curp; + struct proc *p; + struct pfsnode *pfs; + struct uio *uio; +{ +#if defined(PT_GETREGS) || defined(PT_SETREGS) + int error; + struct reg r; + char *kv; + int kl; + + kl = sizeof(r); + kv = (char *) &r; + + kv += uio->uio_offset; + kl -= uio->uio_offset; + if (kl > uio->uio_resid) + kl = uio->uio_resid; + + if (kl < 0) + error = EINVAL; + else + error = process_read_regs(p, &r); + if (error == 0) + error = uiomove(kv, kl, uio); + if (error == 0 && uio->uio_rw == UIO_WRITE) { + if (p->p_stat != SSTOP) + error = EBUSY; + else + error = process_write_regs(p, &r); + } + + uio->uio_offset = 0; + return (error); +#else + return (EINVAL); +#endif +} + +int +procfs_validregs(p) + struct proc *p; +{ + +#if defined(PT_SETREGS) || defined(PT_GETREGS) + return ((p->p_flag & P_SYSTEM) == 0); +#else + return (0); +#endif +} diff --git a/bsd/miscfs/procfs/procfs_status.c b/bsd/miscfs/procfs/procfs_status.c new file mode 100644 index 000000000..0d5def29d --- /dev/null +++ b/bsd/miscfs/procfs/procfs_status.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_status.c,v 1.9 1994/06/29 06:34:56 cgd Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_status.c 8.4 (Berkeley) 6/15/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int +procfs_dostatus(curp, p, pfs, uio) + struct proc *curp; + struct proc *p; + struct pfsnode *pfs; + struct uio *uio; +{ + struct session *sess; + struct tty *tp; + struct ucred *cr; + char *ps; + char *sep; + int pid, ppid, pgid, sid; + int i; + int xlen; + int error; + char psbuf[256]; /* XXX - conservative */ + + if (uio->uio_rw != UIO_READ) + return (EOPNOTSUPP); + + pid = p->p_pid; + ppid = p->p_pptr ? p->p_pptr->p_pid : 0, + pgid = p->p_pgrp->pg_id; + sess = p->p_pgrp->pg_session; + sid = sess->s_leader ? sess->s_leader->p_pid : 0; + +/* comm pid ppid pgid sid maj,min ctty,sldr start ut st wmsg uid groups ... */ + + ps = psbuf; + bcopy(p->p_comm, ps, MAXCOMLEN); + ps[MAXCOMLEN] = '\0'; + ps += strlen(ps); + ps += sprintf(ps, " %d %d %d %d ", pid, ppid, pgid, sid); + + if ((p->p_flag&P_CONTROLT) && (tp = sess->s_ttyp)) + ps += sprintf(ps, "%d,%d ", major(tp->t_dev), minor(tp->t_dev)); + else + ps += sprintf(ps, "%d,%d ", -1, -1); + + sep = ""; + if (sess->s_ttyvp) { + ps += sprintf(ps, "%sctty", sep); + sep = ","; + } + if (SESS_LEADER(p)) { + ps += sprintf(ps, "%ssldr", sep); + sep = ","; + } + if (*sep != ',') + ps += sprintf(ps, "noflags"); + + if (p->p_flag & P_INMEM) + ps += sprintf(ps, " %d,%d", + p->p_stats->p_start.tv_sec, + p->p_stats->p_start.tv_usec); + else + ps += sprintf(ps, " -1,-1"); + + { + struct timeval ut, st; + + calcru(p, &ut, &st, (void *) 0); + ps += sprintf(ps, " %d,%d %d,%d", + ut.tv_sec, + ut.tv_usec, + st.tv_sec, + st.tv_usec); + } + + ps += sprintf(ps, " %s", + (p->p_wchan && p->p_wmesg) ? p->p_wmesg : "nochan"); + + cr = p->p_ucred; + + ps += sprintf(ps, " %d", cr->cr_uid); + for (i = 0; i < cr->cr_ngroups; i++) + ps += sprintf(ps, ",%d", cr->cr_groups[i]); + ps += sprintf(ps, "\n"); + + xlen = ps - psbuf; + xlen -= uio->uio_offset; + ps = psbuf + uio->uio_offset; + xlen = imin(xlen, uio->uio_resid); + if (xlen <= 0) + error = 0; + else + error = uiomove(ps, xlen, uio); + + return (error); +} diff --git a/bsd/miscfs/procfs/procfs_subr.c b/bsd/miscfs/procfs/procfs_subr.c new file mode 100644 index 000000000..dc245204d --- /dev/null +++ b/bsd/miscfs/procfs/procfs_subr.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_subr.c,v 1.13 1994/06/29 06:34:57 cgd Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_subr.c 8.5 (Berkeley) 6/15/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct pfsnode *pfshead; +static int pfsvplock; + +/* + * allocate a pfsnode/vnode pair. the vnode is + * referenced, but not locked. + * + * the pid, pfs_type, and mount point uniquely + * identify a pfsnode. the mount point is needed + * because someone might mount this filesystem + * twice. + * + * all pfsnodes are maintained on a singly-linked + * list. new nodes are only allocated when they cannot + * be found on this list. entries on the list are + * removed when the vfs reclaim entry is called. + * + * a single lock is kept for the entire list. this is + * needed because the getnewvnode() function can block + * waiting for a vnode to become free, in which case there + * may be more than one process trying to get the same + * vnode. this lock is only taken if we are going to + * call getnewvnode, since the kernel itself is single-threaded. + * + * if an entry is found on the list, then call vget() to + * take a reference. this is done because there may be + * zero references to it and so it needs to removed from + * the vnode free list. + */ +int +procfs_allocvp(mp, vpp, pid, pfs_type) + struct mount *mp; + struct vnode **vpp; + long pid; + pfstype pfs_type; +{ + struct pfsnode *pfs; + struct vnode *vp; + struct pfsnode **pp; + int error; + +loop: + for (pfs = pfshead; pfs != 0; pfs = pfs->pfs_next) { + vp = PFSTOV(pfs); + if (pfs->pfs_pid == pid && + pfs->pfs_type == pfs_type && + vp->v_mount == mp) { + if (vget(vp, 0, current_proc())) + goto loop; + *vpp = vp; + return (0); + } + } + + /* + * otherwise lock the vp list while we call getnewvnode + * since that can block. + */ + if (pfsvplock & PROCFS_LOCKED) { + pfsvplock |= PROCFS_WANT; + sleep((caddr_t) &pfsvplock, PINOD); + goto loop; + } + pfsvplock |= PROCFS_LOCKED; + + MALLOC(pfs, void *, sizeof(struct pfsnode), M_TEMP, M_WAITOK); + if (error = getnewvnode(VT_PROCFS, mp, procfs_vnodeop_p, vpp)) { + FREE(pfs, M_TEMP); + goto out; + } + vp = *vpp; + vp->v_data = pfs; + + pfs->pfs_next = 0; + pfs->pfs_pid = (pid_t) pid; + pfs->pfs_type = pfs_type; + pfs->pfs_vnode = vp; + pfs->pfs_flags = 0; + pfs->pfs_fileno = PROCFS_FILENO(pid, pfs_type); + + switch (pfs_type) { + case Proot: /* /proc = dr-xr-xr-x */ + pfs->pfs_mode = (VREAD|VEXEC) | + (VREAD|VEXEC) >> 3 | + (VREAD|VEXEC) >> 6; + vp->v_type = VDIR; + vp->v_flag |= VROOT; + break; + + case Pcurproc: /* /proc/curproc = lr--r--r-- */ + pfs->pfs_mode = (VREAD) | + (VREAD >> 3) | + (VREAD >> 6); + vp->v_type = VLNK; + break; + + case Pproc: + pfs->pfs_mode = (VREAD|VEXEC) | + (VREAD|VEXEC) >> 3 | + (VREAD|VEXEC) >> 6; + vp->v_type = VDIR; + break; + + case Pfile: + case Pmem: + case Pregs: + case Pfpregs: + pfs->pfs_mode = (VREAD|VWRITE); + vp->v_type = VREG; + break; + + case Pctl: + case Pnote: + case Pnotepg: + pfs->pfs_mode = (VWRITE); + vp->v_type = VREG; + break; + + case Pstatus: + pfs->pfs_mode = (VREAD) | + (VREAD >> 3) | + (VREAD >> 6); + vp->v_type = VREG; + break; + + default: + panic("procfs_allocvp"); + } + + if (vp->v_type == VREG) + ubc_info_init(vp); + + /* add to procfs vnode list */ + for (pp = &pfshead; *pp; pp = &(*pp)->pfs_next) + continue; + *pp = pfs; + +out: + pfsvplock &= ~PROCFS_LOCKED; + + if (pfsvplock & PROCFS_WANT) { + pfsvplock &= ~PROCFS_WANT; + wakeup((caddr_t) &pfsvplock); + } + + return (error); +} + +int +procfs_freevp(vp) + struct vnode *vp; +{ + struct pfsnode **pfspp; + struct pfsnode *pfs = VTOPFS(vp); + + for (pfspp = &pfshead; *pfspp != 0; pfspp = &(*pfspp)->pfs_next) { + if (*pfspp == pfs) { + *pfspp = pfs->pfs_next; + break; + } + } + + FREE(vp->v_data, M_TEMP); + vp->v_data = 0; + return (0); +} + +int +procfs_rw(ap) + struct vop_read_args *ap; +{ + struct vnode *vp = ap->a_vp; + struct uio *uio = ap->a_uio; + struct proc *curp = uio->uio_procp; + struct pfsnode *pfs = VTOPFS(vp); + struct proc *p; + + p = PFIND(pfs->pfs_pid); + if (p == 0) + return (EINVAL); + + switch (pfs->pfs_type) { + case Pnote: + case Pnotepg: + return (procfs_donote(curp, p, pfs, uio)); + + case Pregs: + return (procfs_doregs(curp, p, pfs, uio)); + + case Pfpregs: + return (procfs_dofpregs(curp, p, pfs, uio)); + + case Pctl: + return (procfs_doctl(curp, p, pfs, uio)); + + case Pstatus: + return (procfs_dostatus(curp, p, pfs, uio)); + + case Pmem: + return (procfs_domem(curp, p, pfs, uio)); + + default: + return (EOPNOTSUPP); + } +} + +/* + * Get a string from userland into (buf). Strip a trailing + * nl character (to allow easy access from the shell). + * The buffer should be *buflenp + 1 chars long. vfs_getuserstr + * will automatically add a nul char at the end. + * + * Returns 0 on success or the following errors + * + * EINVAL: file offset is non-zero. + * EMSGSIZE: message is longer than kernel buffer + * EFAULT: user i/o buffer is not addressable + */ +int +vfs_getuserstr(uio, buf, buflenp) + struct uio *uio; + char *buf; + int *buflenp; +{ + int xlen; + int error; + + if (uio->uio_offset != 0) + return (EINVAL); + + xlen = *buflenp; + + /* must be able to read the whole string in one go */ + if (xlen < uio->uio_resid) + return (EMSGSIZE); + xlen = uio->uio_resid; + + if (error = uiomove(buf, xlen, uio)) + return (error); + + /* allow multiple writes without seeks */ + uio->uio_offset = 0; + + /* cleanup string and remove trailing newline */ + buf[xlen] = '\0'; + xlen = strlen(buf); + if (xlen > 0 && buf[xlen-1] == '\n') + buf[--xlen] = '\0'; + *buflenp = xlen; + + return (0); +} + +vfs_namemap_t * +vfs_findname(nm, buf, buflen) + vfs_namemap_t *nm; + char *buf; + int buflen; +{ + + for (; nm->nm_name; nm++) + if (bcmp(buf, (char *) nm->nm_name, buflen+1) == 0) + return (nm); + + return (0); +} diff --git a/bsd/miscfs/procfs/procfs_vfsops.c b/bsd/miscfs/procfs/procfs_vfsops.c new file mode 100644 index 000000000..ef382ca6c --- /dev/null +++ b/bsd/miscfs/procfs/procfs_vfsops.c @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_vfsops.c,v 1.23 1995/03/09 12:05:54 mycroft Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_vfsops.c 8.5 (Berkeley) 6/15/94 + */ + +/* + * procfs VFS interface + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for PAGE_SIZE */ + +/* + * VFS Operations. + * + * mount system call + */ +/* ARGSUSED */ +procfs_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + size_t size; + + if (UIO_MX & (UIO_MX-1)) { + log(LOG_ERR, "procfs: invalid directory entry size"); + return (EINVAL); + } + + if (mp->mnt_flag & MNT_UPDATE) + return (EOPNOTSUPP); + + mp->mnt_flag |= MNT_LOCAL; + mp->mnt_data = 0; + getnewfsid(mp, makefstype(MOUNT_PROCFS)); + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + bzero(mp->mnt_stat.f_mntfromname, MNAMELEN); + bcopy("procfs", mp->mnt_stat.f_mntfromname, sizeof("procfs")); + return (0); +} + +/* + * unmount system call + */ +procfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + int error; + extern int doforce; + int flags = 0; + + if (mntflags & MNT_FORCE) { + /* procfs can never be rootfs so don't check for it */ + if (!doforce) + return (EINVAL); + flags |= FORCECLOSE; + } + + if (error = vflush(mp, 0, flags)) + return (error); + + return (0); +} + +procfs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + + return (procfs_allocvp(mp, vpp, 0, Proot)); +} + +/* ARGSUSED */ +procfs_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + + return (0); +} + +/* + * Get file system statistics. + */ +procfs_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + +#ifdef COMPAT_09 + sbp->f_type = 10; +#else + sbp->f_type = 0; +#endif + sbp->f_bsize = PAGE_SIZE; + sbp->f_iosize = PAGE_SIZE; + sbp->f_blocks = 1; /* avoid divide by zero in some df's */ + sbp->f_bfree = 0; + sbp->f_bavail = 0; + sbp->f_files = maxproc; /* approx */ + sbp->f_ffree = maxproc - nprocs; /* approx */ + if (sbp != &mp->mnt_stat) { + bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN); + sbp->f_fstypename[MFSNAMELEN] = '\0'; + return (0); +} + +procfs_quotactl(mp, cmds, uid, arg, p) + struct mount *mp; + int cmds; + uid_t uid; + caddr_t arg; + struct proc *p; +{ + + return (EOPNOTSUPP); +} + +procfs_sync(mp, waitfor) + struct mount *mp; + int waitfor; +{ + + return (0); +} + +procfs_vget(mp, ino, vpp) + struct mount *mp; + ino_t ino; + struct vnode **vpp; +{ + + return (EOPNOTSUPP); +} + +procfs_fhtovp(mp, fhp, vpp) + struct mount *mp; + struct fid *fhp; + struct vnode **vpp; +{ + + return (EINVAL); +} + +procfs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + + return (EINVAL); +} + +procfs_init() +{ + + return (0); +} + +struct vfsops procfs_vfsops = { + MOUNT_PROCFS, + procfs_mount, + procfs_start, + procfs_unmount, + procfs_root, + procfs_quotactl, + procfs_statfs, + procfs_sync, + procfs_vget, + procfs_fhtovp, + procfs_vptofh, + procfs_init, +}; diff --git a/bsd/miscfs/procfs/procfs_vnops.c b/bsd/miscfs/procfs/procfs_vnops.c new file mode 100644 index 000000000..6a9b52654 --- /dev/null +++ b/bsd/miscfs/procfs/procfs_vnops.c @@ -0,0 +1,918 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: procfs_vnops.c,v 1.32 1995/02/03 16:18:55 mycroft Exp $ */ + +/* + * Copyright (c) 1993 Jan-Simon Pendry + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)procfs_vnops.c 8.8 (Berkeley) 6/15/94 + */ + +/* + * procfs vnode interface + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for PAGE_SIZE */ +#include +#include +#include + +/* + * Vnode Operations. + * + */ + +/* + * This is a list of the valid names in the + * process-specific sub-directories. It is + * used in procfs_lookup and procfs_readdir + */ +struct proc_target { + u_char pt_type; + u_char pt_namlen; + char *pt_name; + pfstype pt_pfstype; + int (*pt_valid) __P((struct proc *p)); +} proc_targets[] = { +#define N(s) sizeof(s)-1, s + /* name type validp */ + { DT_DIR, N("."), Pproc, NULL }, + { DT_DIR, N(".."), Proot, NULL }, + { DT_REG, N("file"), Pfile, procfs_validfile }, + { DT_REG, N("mem"), Pmem, NULL }, + { DT_REG, N("regs"), Pregs, procfs_validregs }, + { DT_REG, N("fpregs"), Pfpregs, procfs_validfpregs }, + { DT_REG, N("ctl"), Pctl, NULL }, + { DT_REG, N("status"), Pstatus, NULL }, + { DT_REG, N("note"), Pnote, NULL }, + { DT_REG, N("notepg"), Pnotepg, NULL }, +#undef N +}; +static int nproc_targets = sizeof(proc_targets) / sizeof(proc_targets[0]); + +static pid_t atopid __P((const char *, u_int)); + +/* + * set things up for doing i/o on + * the pfsnode (vp). (vp) is locked + * on entry, and should be left locked + * on exit. + * + * for procfs we don't need to do anything + * in particular for i/o. all that is done + * is to support exclusive open on process + * memory images. + */ +procfs_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct pfsnode *pfs = VTOPFS(ap->a_vp); + + switch (pfs->pfs_type) { + case Pmem: + if (PFIND(pfs->pfs_pid) == 0) + return (ENOENT); /* was ESRCH, jsp */ + + if ((pfs->pfs_flags & FWRITE) && (ap->a_mode & O_EXCL) || + (pfs->pfs_flags & O_EXCL) && (ap->a_mode & FWRITE)) + return (EBUSY); + + if (ap->a_mode & FWRITE) + pfs->pfs_flags = ap->a_mode & (FWRITE|O_EXCL); + + return (0); + + default: + break; + } + + return (0); +} + +/* + * close the pfsnode (vp) after doing i/o. + * (vp) is not locked on entry or exit. + * + * nothing to do for procfs other than undo + * any exclusive open flag (see _open above). + */ +procfs_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct pfsnode *pfs = VTOPFS(ap->a_vp); + + switch (pfs->pfs_type) { + case Pmem: + if ((ap->a_fflag & FWRITE) && (pfs->pfs_flags & O_EXCL)) + pfs->pfs_flags &= ~(FWRITE|O_EXCL); + break; + } + + return (0); +} + +/* + * do an ioctl operation on pfsnode (vp). + * (vp) is not locked on entry or exit. + */ +procfs_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + u_long a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + return (ENOTTY); +} + +/* + * do block mapping for pfsnode (vp). + * since we don't use the buffer cache + * for procfs this function should never + * be called. in any case, it's not clear + * what part of the kernel ever makes use + * of this function. for sanity, this is the + * usual no-op bmap, although returning + * (EIO) would be a reasonable alternative. + */ +procfs_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + } */ *ap; +{ + + if (ap->a_vpp != NULL) + *ap->a_vpp = ap->a_vp; + if (ap->a_bnp != NULL) + *ap->a_bnp = ap->a_bn; + return (0); +} + +/* + * _inactive is called when the pfsnode + * is vrele'd and the reference count goes + * to zero. (vp) will be on the vnode free + * list, so to get it back vget() must be + * used. + * + * for procfs, check if the process is still + * alive and if it isn't then just throw away + * the vnode by calling vgone(). this may + * be overkill and a waste of time since the + * chances are that the process will still be + * there and PFIND is not free. + * + * (vp) is not locked on entry or exit. + */ +procfs_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct pfsnode *pfs = VTOPFS(ap->a_vp); + + if (PFIND(pfs->pfs_pid) == 0) + vgone(ap->a_vp); + + return (0); +} + +/* + * _reclaim is called when getnewvnode() + * wants to make use of an entry on the vnode + * free list. at this time the filesystem needs + * to free any private data and remove the node + * from any private lists. + */ +procfs_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + return (procfs_freevp(ap->a_vp)); +} + +/* + * Return POSIX pathconf information applicable to special devices. + */ +procfs_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + register_t *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_MAX_CANON: + *ap->a_retval = MAX_CANON; + return (0); + case _PC_MAX_INPUT: + *ap->a_retval = MAX_INPUT; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_VDISABLE: + *ap->a_retval = _POSIX_VDISABLE; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * _print is used for debugging. + * just print a readable description + * of (vp). + */ +procfs_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct pfsnode *pfs = VTOPFS(ap->a_vp); + + printf("tag VT_PROCFS, type %s, pid %d, mode %x, flags %x\n", + pfs->pfs_type, pfs->pfs_pid, pfs->pfs_mode, pfs->pfs_flags); +} + +/* + * _abortop is called when operations such as + * rename and create fail. this entry is responsible + * for undoing any side-effects caused by the lookup. + * this will always include freeing the pathname buffer. + */ +procfs_abortop(ap) + struct vop_abortop_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + } */ *ap; +{ + + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + return (0); +} + +/* + * generic entry point for unsupported operations + */ +procfs_badop() +{ + + return (EIO); +} + +/* + * Invent attributes for pfsnode (vp) and store + * them in (vap). + * Directories lengths are returned as zero since + * any real length would require the genuine size + * to be computed, and nothing cares anyway. + * + * this is relatively minimal for procfs. + */ +procfs_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct pfsnode *pfs = VTOPFS(ap->a_vp); + struct vattr *vap = ap->a_vap; + struct proc *procp; + int error; + + /* first check the process still exists */ + switch (pfs->pfs_type) { + case Proot: + case Pcurproc: + procp = 0; + break; + + default: + procp = PFIND(pfs->pfs_pid); + if (procp == 0) + return (ENOENT); + } + + error = 0; + + /* start by zeroing out the attributes */ + VATTR_NULL(vap); + + /* next do all the common fields */ + vap->va_type = ap->a_vp->v_type; + vap->va_mode = pfs->pfs_mode; + vap->va_fileid = pfs->pfs_fileno; + vap->va_flags = 0; + vap->va_blocksize = PAGE_SIZE; + vap->va_bytes = vap->va_size = 0; + + /* + * Make all times be current TOD. + * It would be possible to get the process start + * time from the p_stat structure, but there's + * no "file creation" time stamp anyway, and the + * p_stat structure is not addressible if u. gets + * swapped out for that process. + * + * XXX + * Note that microtime() returns a timeval, not a timespec. + */ + microtime(&vap->va_ctime); + vap->va_atime = vap->va_mtime = vap->va_ctime; + + /* + * If the process has exercised some setuid or setgid + * privilege, then rip away read/write permission so + * that only root can gain access. + */ + switch (pfs->pfs_type) { + case Pmem: + case Pregs: + case Pfpregs: + if (procp->p_flag & P_SUGID) + vap->va_mode &= ~((VREAD|VWRITE)| + ((VREAD|VWRITE)>>3)| + ((VREAD|VWRITE)>>6)); + case Pctl: + case Pstatus: + case Pnote: + case Pnotepg: + vap->va_nlink = 1; + vap->va_uid = procp->p_ucred->cr_uid; + vap->va_gid = procp->p_ucred->cr_gid; + break; + } + + /* + * now do the object specific fields + * + * The size could be set from struct reg, but it's hardly + * worth the trouble, and it puts some (potentially) machine + * dependent data into this machine-independent code. If it + * becomes important then this function should break out into + * a per-file stat function in the corresponding .c file. + */ + + switch (pfs->pfs_type) { + case Proot: + /* + * Set nlink to 1 to tell fts(3) we don't actually know. + */ + vap->va_nlink = 1; + vap->va_uid = 0; + vap->va_gid = 0; + vap->va_size = vap->va_bytes = DEV_BSIZE; + break; + + case Pcurproc: { + char buf[16]; /* should be enough */ + vap->va_nlink = 1; + vap->va_uid = 0; + vap->va_gid = 0; + vap->va_size = vap->va_bytes = + sprintf(buf, "%ld", (long)curproc->p_pid); + break; + } + + case Pproc: + vap->va_nlink = 2; + vap->va_uid = procp->p_ucred->cr_uid; + vap->va_gid = procp->p_ucred->cr_gid; + vap->va_size = vap->va_bytes = DEV_BSIZE; + break; + + case Pfile: + error = EOPNOTSUPP; + break; + + case Pmem: + vap->va_bytes = vap->va_size = + ctob(procp->p_vmspace->vm_tsize + + procp->p_vmspace->vm_dsize + + procp->p_vmspace->vm_ssize); + break; + +#if defined(PT_GETREGS) || defined(PT_SETREGS) + case Pregs: + vap->va_bytes = vap->va_size = sizeof(struct reg); + break; +#endif + +#if defined(PT_GETFPREGS) || defined(PT_SETFPREGS) + case Pfpregs: + vap->va_bytes = vap->va_size = sizeof(struct fpreg); + break; +#endif + + case Pctl: + case Pstatus: + case Pnote: + case Pnotepg: + break; + + default: + panic("procfs_getattr"); + } + + return (error); +} + +procfs_setattr(ap) + struct vop_setattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + /* + * just fake out attribute setting + * it's not good to generate an error + * return, otherwise things like creat() + * will fail when they try to set the + * file length to 0. worse, this means + * that echo $note > /proc/$pid/note will fail. + */ + + return (0); +} + +/* + * implement access checking. + * + * actually, the check for super-user is slightly + * broken since it will allow read access to write-only + * objects. this doesn't cause any particular trouble + * but does mean that the i/o entry points need to check + * that the operation really does make sense. + */ +procfs_access(ap) + struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vattr va; + int error; + + if (error = VOP_GETATTR(ap->a_vp, &va, ap->a_cred, ap->a_p)) + return (error); + + return (vaccess(va.va_mode, va.va_uid, va.va_gid, ap->a_mode, + ap->a_cred)); +} + +/* + * lookup. this is incredibly complicated in the + * general case, however for most pseudo-filesystems + * very little needs to be done. + * + * unless you want to get a migraine, just make sure your + * filesystem doesn't do any locking of its own. otherwise + * read and inwardly digest ufs_lookup(). + */ +procfs_lookup(ap) + struct vop_lookup_args /* { + struct vnode * a_dvp; + struct vnode ** a_vpp; + struct componentname * a_cnp; + } */ *ap; +{ + struct componentname *cnp = ap->a_cnp; + struct vnode **vpp = ap->a_vpp; + struct vnode *dvp = ap->a_dvp; + char *pname = cnp->cn_nameptr; + struct proc_target *pt; + struct vnode *fvp; + pid_t pid; + struct pfsnode *pfs; + struct proc *p; + int i; + + *vpp = NULL; + + if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) + return (EROFS); + + if (cnp->cn_namelen == 1 && *pname == '.') { + *vpp = dvp; + VREF(dvp); + /*VOP_LOCK(dvp);*/ + return (0); + } + + pfs = VTOPFS(dvp); + switch (pfs->pfs_type) { + case Proot: + if (cnp->cn_flags & ISDOTDOT) + return (EIO); + + if (CNEQ(cnp, "curproc", 7)) + return (procfs_allocvp(dvp->v_mount, vpp, 0, Pcurproc)); + + pid = atopid(pname, cnp->cn_namelen); + if (pid == NO_PID) + break; + + p = PFIND(pid); + if (p == 0) + break; + + return (procfs_allocvp(dvp->v_mount, vpp, pid, Pproc)); + + case Pproc: + if (cnp->cn_flags & ISDOTDOT) + return (procfs_root(dvp->v_mount, vpp)); + + p = PFIND(pfs->pfs_pid); + if (p == 0) + break; + + for (pt = proc_targets, i = 0; i < nproc_targets; pt++, i++) { + if (cnp->cn_namelen == pt->pt_namlen && + bcmp(pt->pt_name, pname, cnp->cn_namelen) == 0 && + (pt->pt_valid == NULL || (*pt->pt_valid)(p))) + goto found; + } + break; + + found: + if (pt->pt_pfstype == Pfile) { + fvp = procfs_findtextvp(p); + /* We already checked that it exists. */ + VREF(fvp); + VOP_LOCK(fvp); + *vpp = fvp; + return (0); + } + + return (procfs_allocvp(dvp->v_mount, vpp, pfs->pfs_pid, + pt->pt_pfstype)); + + default: + return (ENOTDIR); + } + + return (cnp->cn_nameiop == LOOKUP ? ENOENT : EROFS); +} + +int +procfs_validfile(p) + struct proc *p; +{ + + return (procfs_findtextvp(p) != NULLVP); +} + +/* + * readdir returns directory entries from pfsnode (vp). + * + * the strategy here with procfs is to generate a single + * directory entry at a time (struct pfsdent) and then + * copy that out to userland using uiomove. a more efficent + * though more complex implementation, would try to minimize + * the number of calls to uiomove(). for procfs, this is + * hardly worth the added code complexity. + * + * this should just be done through read() + */ +procfs_readdir(ap) + struct vop_readdir_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + u_long *a_cookies; + int a_ncookies; + } */ *ap; +{ + struct uio *uio = ap->a_uio; + struct pfsdent d; + struct pfsdent *dp = &d; + struct pfsnode *pfs; + int error; + int count; + int i; + + /* + * We don't allow exporting procfs mounts, and currently local + * requests do not need cookies. + */ + if (ap->a_ncookies) + panic("procfs_readdir: not hungry"); + + pfs = VTOPFS(ap->a_vp); + + if (uio->uio_resid < UIO_MX) + return (EINVAL); + if (uio->uio_offset & (UIO_MX-1)) + return (EINVAL); + if (uio->uio_offset < 0) + return (EINVAL); + + error = 0; + count = 0; + i = uio->uio_offset / UIO_MX; + + switch (pfs->pfs_type) { + /* + * this is for the process-specific sub-directories. + * all that is needed to is copy out all the entries + * from the procent[] table (top of this file). + */ + case Pproc: { + struct proc *p; + struct proc_target *pt; + + p = PFIND(pfs->pfs_pid); + if (p == NULL) + break; + + for (pt = &proc_targets[i]; + uio->uio_resid >= UIO_MX && i < nproc_targets; pt++, i++) { + if (pt->pt_valid && (*pt->pt_valid)(p) == 0) + continue; + + dp->d_reclen = UIO_MX; + dp->d_fileno = PROCFS_FILENO(pfs->pfs_pid, pt->pt_pfstype); + dp->d_namlen = pt->pt_namlen; + bcopy(pt->pt_name, dp->d_name, pt->pt_namlen + 1); + dp->d_type = pt->pt_type; + + if (error = uiomove((caddr_t)dp, UIO_MX, uio)) + break; + } + + break; + } + + /* + * this is for the root of the procfs filesystem + * what is needed is a special entry for "curproc" + * followed by an entry for each process on allproc +#ifdef PROCFS_ZOMBIE + * and zombproc. +#endif + */ + + case Proot: { +#ifdef PROCFS_ZOMBIE + int doingzomb = 0; +#endif + int pcnt = 0; + volatile struct proc *p = allproc.lh_first; + + again: + for (; p && uio->uio_resid >= UIO_MX; i++, pcnt++) { + bzero((char *) dp, UIO_MX); + dp->d_reclen = UIO_MX; + + switch (i) { + case 0: /* `.' */ + case 1: /* `..' */ + dp->d_fileno = PROCFS_FILENO(0, Proot); + dp->d_namlen = i + 1; + bcopy("..", dp->d_name, dp->d_namlen); + dp->d_name[i + 1] = '\0'; + dp->d_type = DT_DIR; + break; + + case 2: + dp->d_fileno = PROCFS_FILENO(0, Pcurproc); + dp->d_namlen = 7; + bcopy("curproc", dp->d_name, 8); + dp->d_type = DT_LNK; + break; + + default: + while (pcnt < i) { + pcnt++; + p = p->p_list.le_next; + if (!p) + goto done; + } + dp->d_fileno = PROCFS_FILENO(p->p_pid, Pproc); + dp->d_namlen = sprintf(dp->d_name, "%ld", + (long)p->p_pid); + dp->d_type = DT_REG; + p = p->p_list.le_next; + break; + } + + if (error = uiomove((caddr_t)dp, UIO_MX, uio)) + break; + } + done: + +#ifdef PROCFS_ZOMBIE + if (p == 0 && doingzomb == 0) { + doingzomb = 1; + p = zombproc.lh_first; + goto again; + } +#endif + + break; + + } + + default: + error = ENOTDIR; + break; + } + + uio->uio_offset = i * UIO_MX; + + return (error); +} + +/* + * readlink reads the link of `curproc' + */ +procfs_readlink(ap) + struct vop_readlink_args *ap; +{ + struct uio *uio = ap->a_uio; + char buf[16]; /* should be enough */ + int len; + + if (VTOPFS(ap->a_vp)->pfs_fileno != PROCFS_FILENO(0, Pcurproc)) + return (EINVAL); + + len = sprintf(buf, "%ld", (long)curproc->p_pid); + + return (uiomove((caddr_t)buf, len, ap->a_uio)); +} + +/* + * convert decimal ascii to pid_t + */ +static pid_t +atopid(b, len) + const char *b; + u_int len; +{ + pid_t p = 0; + + while (len--) { + char c = *b++; + if (c < '0' || c > '9') + return (NO_PID); + p = 10 * p + (c - '0'); + if (p > PID_MAX) + return (NO_PID); + } + + return (p); +} + +/* + * procfs vnode operations. + */ + +#define VOPFUNC int (*)(void *) + +int (**procfs_vnodeop_p)(void *); +struct vnodeopv_entry_desc procfs_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)procfs_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)procfs_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)procfs_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)procfs_open }, /* open */ + { &vop_close_desc, (VOPFUNC)procfs_close }, /* close */ + { &vop_access_desc, (VOPFUNC)procfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)procfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)procfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)procfs_read }, /* read */ + { &vop_write_desc, (VOPFUNC)procfs_write }, /* write */ + { &vop_ioctl_desc, (VOPFUNC)procfs_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)procfs_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)procfs_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)procfs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)procfs_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)procfs_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)procfs_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)procfs_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)procfs_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)procfs_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)procfs_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)procfs_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)procfs_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)procfs_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)procfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)procfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)procfs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)procfs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)procfs_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)procfs_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)procfs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)procfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)procfs_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)procfs_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)procfs_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)procfs_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)procfs_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)procfs_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)procfs_update }, /* update */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ +{ (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc procfs_vnodeop_opv_desc = + { &procfs_vnodeop_p, procfs_vnodeop_entries }; diff --git a/bsd/miscfs/specfs/Makefile b/bsd/miscfs/specfs/Makefile new file mode 100644 index 000000000..52832cc71 --- /dev/null +++ b/bsd/miscfs/specfs/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + specdev.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = miscfs/specfs + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = miscfs/specfs + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/miscfs/specfs/spec_vnops.c b/bsd/miscfs/specfs/spec_vnops.c new file mode 100644 index 000000000..5391c20aa --- /dev/null +++ b/bsd/miscfs/specfs/spec_vnops.c @@ -0,0 +1,840 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)spec_vnops.c 8.14 (Berkeley) 5/21/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +struct vnode *speclisth[SPECHSZ]; + +/* symbolic sleep message strings for devices */ +char devopn[] = "devopn"; +char devio[] = "devio"; +char devwait[] = "devwait"; +char devin[] = "devin"; +char devout[] = "devout"; +char devioc[] = "devioc"; +char devcls[] = "devcls"; + +#define VOPFUNC int (*)(void *) + +int (**spec_vnodeop_p)(void *); +struct vnodeopv_entry_desc spec_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)err_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)spec_open }, /* open */ + { &vop_close_desc, (VOPFUNC)spec_close }, /* close */ + { &vop_access_desc, (VOPFUNC)spec_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)spec_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)spec_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)spec_read }, /* read */ + { &vop_write_desc, (VOPFUNC)spec_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)nop_lease }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)spec_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)err_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)err_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)err_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)err_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)err_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)nop_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)nop_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)nop_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)nop_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)spec_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)nop_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)err_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)err_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)nop_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)nop_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)spec_bwrite }, /* bwrite */ + { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */ + { &vop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)spec_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)spec_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc spec_vnodeop_opv_desc = + { &spec_vnodeop_p, spec_vnodeop_entries }; + +/* + * Trivial lookup routine that always fails. + */ +int +spec_lookup(ap) + struct vop_lookup_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + + *ap->a_vpp = NULL; + return (ENOTDIR); +} + +void +set_blocksize(struct vnode *vp, dev_t dev) +{ + int (*size)(); + int rsize; + + if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) { + rsize = (*size)(dev); + if (rsize <= 0) /* did size fail? */ + vp->v_specsize = DEV_BSIZE; + else + vp->v_specsize = rsize; + } + else + vp->v_specsize = DEV_BSIZE; +} + +void +set_fsblocksize(struct vnode *vp) +{ + + if (vp->v_type == VBLK) { + dev_t dev = (dev_t)vp->v_rdev; + int maj = major(dev); + + if ((u_int)maj >= nblkdev) + return; + + set_blocksize(vp, dev); + } + +} + + +/* + * Open a special file. + */ +/* ARGSUSED */ +spec_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct proc *p = ap->a_p; + struct vnode *bvp, *vp = ap->a_vp; + dev_t bdev, dev = (dev_t)vp->v_rdev; + int maj = major(dev); + int error; + + /* + * Don't allow open if fs is mounted -nodev. + */ + if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) + return (ENXIO); + + switch (vp->v_type) { + + case VCHR: + if ((u_int)maj >= nchrdev) + return (ENXIO); + if (ap->a_cred != FSCRED && (ap->a_mode & FWRITE)) { + /* + * When running in very secure mode, do not allow + * opens for writing of any disk character devices. + */ + if (securelevel >= 2 && isdisk(dev, VCHR)) + return (EPERM); + /* + * When running in secure mode, do not allow opens + * for writing of /dev/mem, /dev/kmem, or character + * devices whose corresponding block devices are + * currently mounted. + */ + if (securelevel >= 1) { + if ((bdev = chrtoblk(dev)) != NODEV && + vfinddev(bdev, VBLK, &bvp) && + bvp->v_usecount > 0 && + (error = vfs_mountedon(bvp))) + return (error); + if (iskmemdev(dev)) + return (EPERM); + } + } + if (cdevsw[maj].d_type == D_TTY) + vp->v_flag |= VISTTY; + VOP_UNLOCK(vp, 0, p); + error = (*cdevsw[maj].d_open)(dev, ap->a_mode, S_IFCHR, p); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + return (error); + + case VBLK: + if ((u_int)maj >= nblkdev) + return (ENXIO); + /* + * When running in very secure mode, do not allow + * opens for writing of any disk block devices. + */ + if (securelevel >= 2 && ap->a_cred != FSCRED && + (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) + return (EPERM); + /* + * Do not allow opens of block devices that are + * currently mounted. + */ + if (error = vfs_mountedon(vp)) + return (error); + error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p); + if (!error) { + set_blocksize(vp, dev); + } + return(error); + } + return (0); +} + +/* + * Vnode op for read + */ +/* ARGSUSED */ +spec_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct uio *uio = ap->a_uio; + struct proc *p = uio->uio_procp; + struct buf *bp; + daddr_t bn, nextbn; + long bsize, bscale; + int devBlockSize=0; + int n, on, majordev, (*ioctl)(); + int error = 0; + dev_t dev; + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_READ) + panic("spec_read mode"); + if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc()) + panic("spec_read proc"); +#endif + if (uio->uio_resid == 0) + return (0); + + switch (vp->v_type) { + + case VCHR: + VOP_UNLOCK(vp, 0, p); + error = (*cdevsw[major(vp->v_rdev)].d_read) + (vp->v_rdev, uio, ap->a_ioflag); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + return (error); + + case VBLK: + if (uio->uio_offset < 0) + return (EINVAL); + + dev = vp->v_rdev; + + devBlockSize = vp->v_specsize; + + if (devBlockSize > PAGE_SIZE) + return (EINVAL); + + bscale = PAGE_SIZE / devBlockSize; + bsize = bscale * devBlockSize; + + do { + on = uio->uio_offset % bsize; + + bn = (uio->uio_offset / devBlockSize) &~ (bscale - 1); + + if (vp->v_lastr + bscale == bn) { + nextbn = bn + bscale; + error = breadn(vp, bn, (int)bsize, &nextbn, + (int *)&bsize, 1, NOCRED, &bp); + } else + error = bread(vp, bn, (int)bsize, NOCRED, &bp); + + vp->v_lastr = bn; + n = bsize - bp->b_resid; + if ((on > n) || error) { + if (!error) + error = EINVAL; + brelse(bp); + return (error); + } + n = min((unsigned)(n - on), uio->uio_resid); + + error = uiomove((char *)bp->b_data + on, n, uio); + if (n + on == bsize) + bp->b_flags |= B_AGE; + brelse(bp); + } while (error == 0 && uio->uio_resid > 0 && n != 0); + return (error); + + default: + panic("spec_read type"); + } + /* NOTREACHED */ +} + +/* + * Vnode op for write + */ +/* ARGSUSED */ +spec_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct uio *uio = ap->a_uio; + struct proc *p = uio->uio_procp; + struct buf *bp; + daddr_t bn; + int bsize, blkmask, bscale; + register int io_sync; + register int io_size; + int devBlockSize=0; + register int n, on; + int error = 0; + dev_t dev; + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_WRITE) + panic("spec_write mode"); + if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc()) + panic("spec_write proc"); +#endif + + switch (vp->v_type) { + + case VCHR: + VOP_UNLOCK(vp, 0, p); + error = (*cdevsw[major(vp->v_rdev)].d_write) + (vp->v_rdev, uio, ap->a_ioflag); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + return (error); + + case VBLK: + if (uio->uio_resid == 0) + return (0); + if (uio->uio_offset < 0) + return (EINVAL); + + io_sync = (ap->a_ioflag & IO_SYNC); + io_size = uio->uio_resid; + + dev = (vp->v_rdev); + + devBlockSize = vp->v_specsize; + if (devBlockSize > PAGE_SIZE) + return(EINVAL); + + bscale = PAGE_SIZE / devBlockSize; + blkmask = bscale - 1; + bsize = bscale * devBlockSize; + + + do { + bn = (uio->uio_offset / devBlockSize) &~ blkmask; + on = uio->uio_offset % bsize; + + n = min((unsigned)(bsize - on), uio->uio_resid); + + if (n == bsize) + bp = getblk(vp, bn, bsize, 0, 0, BLK_WRITE); + else + error = bread(vp, bn, bsize, NOCRED, &bp); + + if (error) { + brelse(bp); + return (error); + } + n = min(n, bsize - bp->b_resid); + + error = uiomove((char *)bp->b_data + on, n, uio); + + bp->b_flags |= B_AGE; + + if (io_sync) + bwrite(bp); + else { + if ((n + on) == bsize) + bawrite(bp); + else + bdwrite(bp); + } + } while (error == 0 && uio->uio_resid > 0 && n != 0); + return (error); + + default: + panic("spec_write type"); + } + /* NOTREACHED */ +} + +/* + * Device ioctl operation. + */ +/* ARGSUSED */ +spec_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + int a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + dev_t dev = ap->a_vp->v_rdev; + + switch (ap->a_vp->v_type) { + + case VCHR: + return ((*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, + ap->a_fflag, ap->a_p)); + + case VBLK: + if (ap->a_command == 0 && (int)ap->a_data == B_TAPE) + if (bdevsw[major(dev)].d_type == D_TAPE) + return (0); + else + return (1); + return ((*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, + ap->a_fflag, ap->a_p)); + + default: + panic("spec_ioctl"); + /* NOTREACHED */ + } +} + +/* ARGSUSED */ +spec_select(ap) + struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register dev_t dev; + + switch (ap->a_vp->v_type) { + + default: + return (1); /* XXX */ + + case VCHR: + dev = ap->a_vp->v_rdev; + return (*cdevsw[major(dev)].d_select)(dev, ap->a_which, ap->a_p); + } +} +/* + * Synch buffers associated with a block device + */ +/* ARGSUSED */ +int +spec_fsync(ap) + struct vop_fsync_args /* { + struct vnode *a_vp; + struct ucred *a_cred; + int a_waitfor; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct buf *bp; + struct buf *nbp; + int s; + + if (vp->v_type == VCHR) + return (0); + /* + * Flush all dirty buffers associated with a block device. + */ +loop: + s = splbio(); + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if ((bp->b_flags & B_BUSY)) + continue; + if ((bp->b_flags & B_DELWRI) == 0) + panic("spec_fsync: not dirty"); + bremfree(bp); + bp->b_flags |= B_BUSY; + splx(s); + bawrite(bp); + goto loop; + } + if (ap->a_waitfor == MNT_WAIT) { + while (vp->v_numoutput) { + vp->v_flag |= VBWAIT; + tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "spec_fsync", 0); + } +#if DIAGNOSTIC + if (vp->v_dirtyblkhd.lh_first) { + vprint("spec_fsync: dirty", vp); + splx(s); + goto loop; + } +#endif + } + splx(s); + return (0); +} + +/* + * Just call the device strategy routine + */ +spec_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + (*bdevsw[major(ap->a_bp->b_dev)].d_strategy)(ap->a_bp); + return (0); +} + +/* + * This is a noop, simply returning what one has been given. + */ +spec_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; + } */ *ap; +{ + + if (ap->a_vpp != NULL) + *ap->a_vpp = ap->a_vp; + if (ap->a_bnp != NULL) + *ap->a_bnp = ap->a_bn * (PAGE_SIZE / ap->a_vp->v_specsize); + if (ap->a_runp != NULL) + *ap->a_runp = (MAXPHYSIO / PAGE_SIZE) - 1; + return (0); +} + +/* + * This is a noop, simply returning what one has been given. + */ +spec_cmap(ap) + struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_offset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; + } */ *ap; +{ + return (EOPNOTSUPP); +} + + +/* + * Device close routine + */ +/* ARGSUSED */ +spec_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + dev_t dev = vp->v_rdev; + int (*devclose) __P((dev_t, int, int, struct proc *)); + int mode, error; + + switch (vp->v_type) { + + case VCHR: + /* + * Hack: a tty device that is a controlling terminal + * has a reference from the session structure. + * We cannot easily tell that a character device is + * a controlling terminal, unless it is the closing + * process' controlling terminal. In that case, + * if the reference count is 2 (this last descriptor + * plus the session), release the reference from the session. + */ + if (vcount(vp) == 2 && ap->a_p && + vp == ap->a_p->p_session->s_ttyvp) { + vrele(vp); + ap->a_p->p_session->s_ttyvp = NULL; + } + /* + * If the vnode is locked, then we are in the midst + * of forcably closing the device, otherwise we only + * close on last reference. + */ + if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) + return (0); + devclose = cdevsw[major(dev)].d_close; + mode = S_IFCHR; + break; + + case VBLK: + /* + * On last close of a block device (that isn't mounted) + * we must invalidate any in core blocks, so that + * we can, for instance, change floppy disks. + */ + if (error = vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0)) + return (error); + /* + * We do not want to really close the device if it + * is still in use unless we are trying to close it + * forcibly. Since every use (buffer, vnode, swap, cmap) + * holds a reference to the vnode, and because we mark + * any other vnodes that alias this device, when the + * sum of the reference counts on all the aliased + * vnodes descends to one, we are on last close. + */ + if (vcount(vp) > 1 && (vp->v_flag & VXLOCK) == 0) + return (0); + devclose = bdevsw[major(dev)].d_close; + mode = S_IFBLK; + break; + + default: + panic("spec_close: not special"); + } + + return ((*devclose)(dev, ap->a_fflag, mode, ap->a_p)); +} + +/* + * Print out the contents of a special device vnode. + */ +spec_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + printf("tag VT_NON, dev %d, %d\n", major(ap->a_vp->v_rdev), + minor(ap->a_vp->v_rdev)); +} + +/* + * Return POSIX pathconf information applicable to special devices. + */ +spec_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_MAX_CANON: + *ap->a_retval = MAX_CANON; + return (0); + case _PC_MAX_INPUT: + *ap->a_retval = MAX_INPUT; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_VDISABLE: + *ap->a_retval = _POSIX_VDISABLE; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +int +spec_devblocksize(ap) + struct vop_devblocksize_args /* { + struct vnode *a_vp; + int *a_retval; + } */ *ap; +{ + *ap->a_retval = (ap->a_vp->v_specsize); + return (0); +} + +/* + * Special device failed operation + */ +spec_ebadf() +{ + + return (EBADF); +} + +/* + * Special device bad operation + */ +spec_badop() +{ + + panic("spec_badop called"); + /* NOTREACHED */ +} + +/* Blktooff derives file offset from logical block number */ +int +spec_blktooff(ap) + struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + + switch (vp->v_type) { + case VCHR: + *ap->a_offset = (off_t)-1; /* failure */ + return (EOPNOTSUPP); + + case VBLK: + printf("spec_blktooff: not implemented for VBLK\n"); + *ap->a_offset = (off_t)-1; /* failure */ + return (EOPNOTSUPP); + + default: + panic("spec_blktooff type"); + } + /* NOTREACHED */ +} + +/* Offtoblk derives logical block number from file offset */ +int +spec_offtoblk(ap) + struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + + switch (vp->v_type) { + case VCHR: + *ap->a_lblkno = (daddr_t)-1; /* failure */ + return (EOPNOTSUPP); + + case VBLK: + printf("spec_offtoblk: not implemented for VBLK\n"); + *ap->a_lblkno = (daddr_t)-1; /* failure */ + return (EOPNOTSUPP); + + default: + panic("spec_offtoblk type"); + } + /* NOTREACHED */ +} diff --git a/bsd/miscfs/specfs/specdev.h b/bsd/miscfs/specfs/specdev.h new file mode 100644 index 000000000..fa1e5c2bc --- /dev/null +++ b/bsd/miscfs/specfs/specdev.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995, 1998 Apple Computer, Inc. All Rights Reserved. + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)specdev.h 8.6 (Berkeley) 5/21/95 + */ + +#ifndef _MISCFS_SPECFS_SPECDEV_H_ +#define _MISCFS_SPECFS_SPECDEV_H_ + +#include + +/* + * This structure defines the information maintained about + * special devices. It is allocated in checkalias and freed + * in vgone. + */ +struct specinfo { + struct vnode **si_hashchain; + struct vnode *si_specnext; + long si_flags; + dev_t si_rdev; + daddr_t si_size; /* block device size in bytes */ +}; +/* + * Exported shorthand + */ +#define v_rdev v_specinfo->si_rdev +#define v_hashchain v_specinfo->si_hashchain +#define v_specnext v_specinfo->si_specnext +#define v_specflags v_specinfo->si_flags +#define v_specsize v_specinfo->si_size + +/* + * Flags for specinfo + */ +#define SI_MOUNTEDON 0x0001 /* block special device is mounted on */ + +/* + * Special device management + */ +#define SPECHSZ 64 +#if ((SPECHSZ&(SPECHSZ-1)) == 0) +#define SPECHASH(rdev) (((rdev>>5)+(rdev))&(SPECHSZ-1)) +#else +#define SPECHASH(rdev) (((unsigned)((rdev>>5)+(rdev)))%SPECHSZ) +#endif + +extern struct vnode *speclisth[SPECHSZ]; + +/* + * Prototypes for special file operations on vnodes. + */ +extern int (**spec_vnodeop_p)(void *); +struct nameidata; +struct componentname; +struct ucred; +struct flock; +struct buf; +struct uio; + +int spec_ebadf(); + +int spec_lookup __P((struct vop_lookup_args *)); +#define spec_create ((int (*) __P((struct vop_access_args *)))err_create) +#define spec_mknod ((int (*) __P((struct vop_access_args *)))err_mknod) +int spec_open __P((struct vop_open_args *)); +int spec_close __P((struct vop_close_args *)); +#define spec_access ((int (*) __P((struct vop_access_args *)))spec_ebadf) +#define spec_getattr ((int (*) __P((struct vop_getattr_args *)))spec_ebadf) +#define spec_setattr ((int (*) __P((struct vop_setattr_args *)))spec_ebadf) +int spec_read __P((struct vop_read_args *)); +int spec_write __P((struct vop_write_args *)); +#define spec_lease_check ((int (*) __P((struct vop_access_args *)))nop_lease) +int spec_ioctl __P((struct vop_ioctl_args *)); +int spec_select __P((struct vop_select_args *)); +#define spec_revoke ((int (*) __P((struct vop_access_args *)))nop_revoke) +#define spec_mmap ((int (*) __P((struct vop_access_args *)))err_mmap) +int spec_fsync __P((struct vop_fsync_args *)); +#define spec_seek ((int (*) __P((struct vop_access_args *)))err_seek) +#define spec_remove ((int (*) __P((struct vop_access_args *)))err_remove) +#define spec_link ((int (*) __P((struct vop_access_args *)))err_link) +#define spec_rename ((int (*) __P((struct vop_access_args *)))err_rename) +#define spec_mkdir ((int (*) __P((struct vop_access_args *)))err_mkdir) +#define spec_rmdir ((int (*) __P((struct vop_access_args *)))err_rmdir) +#define spec_symlink ((int (*) __P((struct vop_access_args *)))err_symlink) +#define spec_readdir ((int (*) __P((struct vop_access_args *)))err_readdir) +#define spec_readlink ((int (*) __P((struct vop_access_args *)))err_readlink) +#define spec_abortop ((int (*) __P((struct vop_access_args *)))err_abortop) +#define spec_inactive ((int (*) __P((struct vop_access_args *)))nop_inactive) +#define spec_reclaim ((int (*) __P((struct vop_access_args *)))nop_reclaim) +#define spec_lock ((int (*) __P((struct vop_access_args *)))nop_lock) +#define spec_unlock ((int (*) __P((struct vop_access_args *)))nop_unlock) +int spec_bmap __P((struct vop_bmap_args *)); +int spec_strategy __P((struct vop_strategy_args *)); +int spec_print __P((struct vop_print_args *)); +#define spec_islocked ((int (*) __P((struct vop_access_args *)))nop_islocked) +int spec_pathconf __P((struct vop_pathconf_args *)); +#define spec_advlock ((int (*) __P((struct vop_access_args *)))err_advlock) +#define spec_blkatoff ((int (*) __P((struct vop_access_args *)))err_blkatoff) +#define spec_valloc ((int (*) __P((struct vop_access_args *)))err_valloc) +#define spec_vfree ((int (*) __P((struct vop_access_args *)))err_vfree) +#define spec_truncate ((int (*) __P((struct vop_access_args *)))nop_truncate) +#define spec_update ((int (*) __P((struct vop_access_args *)))nop_update) +#define spec_reallocblks \ + ((int (*) __P((struct vop_reallocblks_args *)))err_reallocblks) +#define spec_bwrite ((int (*) __P((struct vop_bwrite_args *)))nop_bwrite) +int spec_devblocksize __P((struct vop_devblocksize_args *)); +int spec_blktooff __P((struct vop_blktooff_args *)); +int spec_offtoblk __P((struct vop_offtoblk_args *)); +int spec_cmap __P((struct vop_cmap_args *)); + +#endif /* _MISCFS_SPECFS_SPECDEV_H_ */ diff --git a/bsd/miscfs/synthfs/synthfs.h b/bsd/miscfs/synthfs/synthfs.h new file mode 100644 index 000000000..74fb3f1f2 --- /dev/null +++ b/bsd/miscfs/synthfs/synthfs.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, Apple Computer, Inc. All rights reserved. */ +/* + * Header file for synthfs data structures + * + * Change History: + * + * 17-Aug-1999 Pat Dirks New today. + * + */ + +#ifndef __SYNTHFS_H__ +#define __SYNTHFS_H__ + +#include +#include +#include +#include + + +#if DEBUG +extern void Debugger(const char *message); /* Private to pexpert... */ +#endif +__END_DECLS + +/* XXX Get rid of this as soon as sys/malloc.h can be updated to define a real M_SYNTHFS */ +#define M_SYNTHFS M_TEMP + +/* XXX Get rid of this as soon as sys/vnode.h can be updated to define a real VT_SYNTHFS */ +#define VT_SYNTHFS (VT_OTHER+1) + + +struct synthfs_mntdata +{ + struct mount *synthfs_mp; /* filesystem vfs structure */ + struct vnode *synthfs_rootvp; + dev_t synthfs_mounteddev; + unsigned long synthfs_nextid; + unsigned long synthfs_filecount; + unsigned long synthfs_dircount; + unsigned long synthfs_encodingsused; + LIST_HEAD(synthfs_fsvnodelist, vnode) synthfs_fsvnodes; +}; + +/* + * Various sorts of synthfs vnodes: + */ +enum synthfsnodetype { + SYNTHFS_DIRECTORY = 1, + SYNTHFS_FILE, + SYNTHFS_SYMLINK +}; + +struct synthfs_dir_node { + unsigned long d_entrycount; + TAILQ_HEAD(synthfs_d_subnodelist, synthfsnode) d_subnodes; + +}; + +struct synthfs_file_node { + off_t f_size; +}; + +struct synthfs_symlink_node { + int s_length; + char *s_symlinktarget; /* Dynamically allocated */ +}; + + +struct synthfsnode +{ + TAILQ_ENTRY(synthfsnode) s_sibling; /* synthfsnodes in a given directory */ + enum synthfsnodetype s_type; + struct synthfsnode *s_parent; + struct vnode *s_vp; + char *s_name; + struct lock__bsd__ s_lock; + unsigned long s_nodeflags; /* Internal synthfs flags: IN_CHANGED, IN_MODIFIED, etc. */ + unsigned long s_pflags; /* File system flags: IMMUTABLE, etc. */ + unsigned long s_nodeid; + unsigned long s_generation; + mode_t s_mode; + short s_linkcount; + uid_t s_uid; + gid_t s_gid; + dev_t s_rdev; + struct timeval s_createtime; + struct timeval s_accesstime; + struct timeval s_modificationtime; + struct timeval s_changetime; + struct timeval s_backuptime; + unsigned long s_flags; /* inode flags: IMMUTABLE, APPEND, etc. */ + unsigned long s_script; + unsigned long s_finderInfo[8]; + union { + struct synthfs_dir_node d; + struct synthfs_file_node f; + struct synthfs_symlink_node s; + } s_u; +}; + +#define ROOT_DIRID 2 +#define FIRST_SYNTHFS_ID 0x10 + +/* These flags are kept in flags. */ +#define IN_ACCESS 0x0001 /* Access time update request. */ +#define IN_CHANGE 0x0002 /* Change time update request. */ +#define IN_UPDATE 0x0004 /* Modification time update request. */ +#define IN_MODIFIED 0x0008 /* Node has been modified. */ +#define IN_RENAME 0x0010 /* Node is being renamed. */ +//#define IN_SHLOCK 0x0020 /* File has shared lock. */ +//#define IN_EXLOCK 0x0040 /* File has exclusive lock. */ +//#define IN_ALLOCATING 0x1000 /* vnode is in transit, wait or ignore */ +//#define IN_WANT 0x2000 /* Its being waited for */ + +#define SYNTHFSTIMES(sp, t1, t2) { \ + if ((sp)->s_nodeflags & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) { \ + (sp)->s_nodeflags |= IN_MODIFIED; \ + if ((sp)->s_nodeflags & IN_ACCESS) { \ + (sp)->s_accesstime = *(t1); \ + }; \ + if ((sp)->s_nodeflags & IN_UPDATE) { \ + (sp)->s_modificationtime = *(t2); \ + } \ + if ((sp)->s_nodeflags & IN_CHANGE) { \ + (sp)->s_changetime = time; \ + }; \ + (sp)->s_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE); \ + } \ +} + +#define ATTR_REF_DATA(attrrefptr) (((char *)(attrrefptr)) + ((attrrefptr)->attr_dataoffset)) + +#define STOV(SP) ((SP)->s_vp) + +#define VTOS(VP) ((struct synthfsnode *)((VP)->v_data)) + +#define VTOVFS(VP) ((VP)->v_mount) +#define STOVFS(HP) ((SP)->s_vp->v_mount) +#define SFSTOVFS(SFSMP) ((SFSMP)->sfs_mp) + +#define VTOSFS(VP) ((struct synthfs_mntdata *)((VP)->v_mount->mnt_data)) +#define STOTFS(SP) ((struct synthfs_mntdata *)(SP)->s_vp->v_mount->mnt_data) +#define VFSTOSFS(MP) ((struct synthfs_mntdata *)(MP)->mnt_data) + +#if DEBUG +#define DBG_TRACE(P) printf P; +#define DBG_INIT(P) printf P; +#define DBG_VOP(P) printf P; +//#define DBG_ASSERT(a) { if (!(a)) { panic("File "__FILE__", line %d: assertion '%s' failed.\n", __LINE__, #a); } } + #define DBG_ASSERT(a) { if (!(a)) { Debugger("Oops - File __FILE__ , line __LINE__: assertion '"#a"' failed."); } } +#else +#define DBG_TRACE(P) +#define DBG_INIT(P) +#define DBG_VOP(P) +#define DBG_ASSERT(a) +#endif + +extern int (**synthfs_vnodeop_p)(void *); + +__BEGIN_DECLS +int synthfs_mount __P((struct mount *, char *, caddr_t, struct nameidata *, struct proc *)); +int synthfs_start __P((struct mount *, int, struct proc *)); +int synthfs_unmount __P((struct mount *, int, struct proc *)); +int synthfs_root __P((struct mount *, struct vnode **)); +int synthfs_quotactl __P((struct mount *, int, uid_t, caddr_t, struct proc *)); +int synthfs_statfs __P((struct mount *, struct statfs *, struct proc *)); +int synthfs_sync __P((struct mount *, int, struct ucred *, struct proc *)); +int synthfs_vget __P((struct mount *, void *ino, struct vnode **)); +int synthfs_fhtovp __P((struct mount *, struct fid *, struct mbuf *, struct vnode **, int *, struct ucred **)); +int synthfs_vptofh __P((struct vnode *, struct fid *)); +int synthfs_init __P((struct vfsconf *)); +int synthfs_sysctl __P((int *, u_int, void *, size_t *, void *, size_t, struct proc *)); + +int synthfs_create __P((struct vop_create_args *)); +int synthfs_open __P((struct vop_open_args *)); +int synthfs_mmap __P((struct vop_mmap_args *)); +int synthfs_access __P((struct vop_access_args *)); +int synthfs_getattr __P((struct vop_getattr_args *)); +int synthfs_setattr __P((struct vop_setattr_args *)); +int synthfs_rename __P((struct vop_rename_args *)); +int synthfs_select __P((struct vop_select_args *)); +int synthfs_remove __P((struct vop_remove_args *)); +int synthfs_mkdir __P((struct vop_mkdir_args *)); +int synthfs_rmdir __P((struct vop_rmdir_args *)); +int synthfs_symlink __P((struct vop_symlink_args *)); +int synthfs_readlink __P((struct vop_readlink_args *)); +int synthfs_readdir __P((struct vop_readdir_args *)); +int synthfs_cached_lookup __P((struct vop_cachedlookup_args *)); +int synthfs_lookup __P((struct vop_cachedlookup_args *)); +int synthfs_pathconf __P((struct vop_pathconf_args *)); +int synthfs_update __P((struct vop_update_args *)); + +int synthfs_lock __P((struct vop_lock_args *)); +int synthfs_unlock __P((struct vop_unlock_args *)); +int synthfs_islocked __P((struct vop_islocked_args *)); + +int synthfs_inactive __P((struct vop_inactive_args*)); +int synthfs_reclaim __P((struct vop_reclaim_args*)); + +void synthfs_setupuio __P((struct iovec *iov, struct uio *uio, void *buffer, size_t bufsize, enum uio_seg space, enum uio_rw direction, struct proc *p)); +int synthfs_new_directory __P((struct mount *mp, struct vnode *dp, const char *name, unsigned long nodeid, mode_t mode, struct proc *p, struct vnode **vpp)); +int synthfs_new_symlink __P((struct mount *mp, struct vnode *dp, const char *name, unsigned long nodeid, char *targetstring, struct proc *p, struct vnode **vpp)); +long synthfs_adddirentry __P((u_int32_t fileno, u_int8_t type, const char *name, struct uio *uio)); +int synthfs_remove_entry __P((struct vnode *vp)); +int synthfs_remove_directory __P((struct vnode *vp)); +int synthfs_remove_symlink __P((struct vnode *vp)); +int synthfs_move_rename_entry __P((struct vnode *source_vp, struct vnode *newparent_vp, char *newname)); +int synthfs_derive_vnode_path __P((struct vnode *vp, char *vnpath, size_t pathbuffersize)); + +#endif /* __SYNTHFS_H__ */ diff --git a/bsd/miscfs/synthfs/synthfs_util.c b/bsd/miscfs/synthfs/synthfs_util.c new file mode 100644 index 000000000..814536547 --- /dev/null +++ b/bsd/miscfs/synthfs/synthfs_util.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, Apple Computer, Inc. All rights reserved. */ +/* + * Change History: + * + * 17-Aug-1999 Pat Dirks New today. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "synthfs.h" + +struct synthfs_direntry_head { + u_int32_t d_fileno; /* file number of entry */ + u_int16_t d_reclen; /* length of this record */ + u_int8_t d_type; /* file type, see below */ + u_int8_t d_namlen; /* length of string in d_name */ +}; + + +#define PATHSEPARATOR '/' +#define ROOTDIRID 2 + +void synthfs_setupuio(struct iovec *iov, + struct uio *uio, + void *buffer, + size_t bufsize, + enum uio_seg space, + enum uio_rw direction, + struct proc *p) { + iov->iov_base = (char *)buffer; + iov->iov_len = bufsize; + + uio->uio_iov = iov; + uio->uio_iovcnt = 1; + uio->uio_offset = 0; + uio->uio_resid = bufsize; + uio->uio_segflg = space; + uio->uio_rw = direction; + uio->uio_procp = p; +} + + +static int synthfs_insertnode(struct synthfsnode *newnode_sp, struct synthfsnode *parent_sp) { + struct timeval now; + + DBG_ASSERT(parent_sp->s_type == SYNTHFS_DIRECTORY); + + TAILQ_INSERT_TAIL(&parent_sp->s_u.d.d_subnodes, newnode_sp, s_sibling); + ++parent_sp->s_u.d.d_entrycount; + newnode_sp->s_parent = parent_sp; + + parent_sp->s_nodeflags |= IN_CHANGE | IN_MODIFIED; + now = time; + VOP_UPDATE(STOV(parent_sp), &now, &now, 0); + + return 0; +} + + + +static int synthfs_newnode(struct mount *mp, struct vnode *dp, const char *name, unsigned long nodeid, mode_t mode, struct proc *p, struct vnode **vpp) { + int result; + struct synthfsnode *sp; + struct vnode *vp; + struct timeval now; + char *nodename; + + /* Allocate the synthfsnode now to avoid blocking between the call + to getnewvnode(), below, and the initialization of v_data: */ + MALLOC(sp, struct synthfsnode *, sizeof(struct synthfsnode), M_SYNTHFS, M_WAITOK); + + if (name == NULL) { + MALLOC(nodename, char *, 1, M_TEMP, M_WAITOK); + nodename[0] = 0; + } else { + MALLOC(nodename, char *, strlen(name) + 1, M_TEMP, M_WAITOK); + strcpy(nodename, name); + }; + + /* + Note that getnewvnode() returns the vnode with a refcount of +1; + this routine returns the newly created vnode with this positive refcount. + */ + result = getnewvnode(VT_SYNTHFS, mp, synthfs_vnodeop_p, &vp); + if (result != 0) { + DBG_VOP(("getnewvnode failed with error code %d\n", result)); + FREE(nodename, M_TEMP); + FREE(sp, M_TEMP); + return result; + } + if (vp == NULL) { + DBG_VOP(("getnewvnod returned NULL without an error!\n")); + FREE(nodename, M_TEMP); + FREE(sp, M_TEMP); + return EINVAL; + } + + /* Initialize the relevant synthfsnode fields: */ + bzero(sp, sizeof(*sp)); + lockinit(&sp->s_lock, PINOD, "synthfsnode", 0, 0); + sp->s_nodeid = nodeid; + + /* Initialize all times from a consistent snapshot of the clock: */ + now = time; + sp->s_createtime = now; + sp->s_accesstime = now; + sp->s_modificationtime = now; + sp->s_changetime = now; + sp->s_name = nodename; + sp->s_mode = mode; + + sp->s_vp = vp; + vp->v_data = sp; + + vget(vp, LK_EXCLUSIVE, p); + + /* If there's a parent directory, update its subnode structures to insert this new node: */ + if (dp) { + result = synthfs_insertnode(sp, VTOS(dp)); + }; + + *vpp = vp; + + return result; +} + + + +int synthfs_remove_entry(struct vnode *vp) { + struct synthfsnode *sp = VTOS(vp); + struct synthfsnode *psp = sp->s_parent; + struct timeval now; + + if (psp) { + TAILQ_REMOVE(&psp->s_u.d.d_subnodes, sp, s_sibling); + --psp->s_u.d.d_entrycount; + + psp->s_nodeflags |= IN_CHANGE | IN_MODIFIED; + now = time; + VOP_UPDATE(STOV(psp), &now, &now, 0); + }; + + return 0; +} + + + +int synthfs_move_rename_entry(struct vnode *source_vp, struct vnode *newparent_vp, char *new_name) { + struct synthfsnode *source_sp = VTOS(source_vp); + struct synthfsnode *parent_sp = VTOS(newparent_vp); + char *new_name_ptr; + int result; + + if (parent_sp == source_sp->s_parent) return 0; + + /* Unlink the entry from its current place: */ + result = synthfs_remove_entry(source_vp); + if (result) return result; + + /* Change the name as necessary: */ + FREE(source_sp->s_name, M_TEMP); + if (new_name == NULL) { + MALLOC(new_name_ptr, char *, 1, M_TEMP, M_WAITOK); + new_name_ptr[0] = 0; + } else { + MALLOC(new_name_ptr, char *, strlen(new_name) + 1, M_TEMP, M_WAITOK); + strcpy(new_name_ptr, new_name); + }; + source_sp->s_name = new_name_ptr; + + /* Insert the entry in its new home: */ + return synthfs_insertnode(source_sp, parent_sp); +} + + + +int synthfs_new_directory(struct mount *mp, struct vnode *dp, const char *name, unsigned long nodeid, mode_t mode, struct proc *p, struct vnode **vpp) { + int result; + struct vnode *vp; + struct synthfsnode *sp; + + result = synthfs_newnode(mp, dp, name, nodeid, mode, p, &vp); + if (result) { + return result; + }; + sp = VTOS(vp); + sp->s_linkcount = 2; + + /* Initialize the relevant vnode fields: */ + vp->v_type = VDIR; + if (dp) { + ++VTOS(dp)->s_linkcount; /* Account for the [fictitious] ".." link */ + }; + + /* Set up the directory-specific fields: */ + sp->s_type = SYNTHFS_DIRECTORY; + sp->s_u.d.d_entrycount = 0; /* No entries in this directory yet */ + TAILQ_INIT(&sp->s_u.d.d_subnodes); /* No subnodes of this directory yet */ + + *vpp = vp; + + return 0; +} + + + +int synthfs_remove_directory(struct vnode *vp) { + struct synthfsnode *sp = VTOS(vp); + struct synthfsnode *psp = sp->s_parent; + + if (psp && (sp->s_type == SYNTHFS_DIRECTORY) && (psp != sp)) { + --psp->s_linkcount; /* account for the [fictitious] ".." link now removed */ + }; + + /* Do the standard cleanup involved in pruning an entry from the filesystem: */ + return synthfs_remove_entry(vp); /* Do whatever standard cleanup is required */ +} + + + +int synthfs_new_symlink( + struct mount *mp, + struct vnode *dp, + const char *name, + unsigned long nodeid, + char *targetstring, + struct proc *p, + struct vnode **vpp) { + + int result; + struct vnode *vp; + struct synthfsnode *sp; + + result = synthfs_newnode(mp, dp, name, nodeid, 0, p, &vp); + if (result) { + return result; + }; + sp = VTOS(vp); + sp->s_linkcount = 1; + + /* Initialize the relevant vnode fields: */ + vp->v_type = VLNK; + + /* Set up the symlink-specific fields: */ + sp->s_type = SYNTHFS_SYMLINK; + sp->s_u.s.s_length = strlen(targetstring); + MALLOC(sp->s_u.s.s_symlinktarget, char *, sp->s_u.s.s_length + 1, M_TEMP, M_WAITOK); + strcpy(sp->s_u.s.s_symlinktarget, targetstring); + + *vpp = vp; + + return 0; +} + + + +int synthfs_remove_symlink(struct vnode *vp) { + struct synthfsnode *sp = VTOS(vp); + + FREE(sp->s_u.s.s_symlinktarget, M_TEMP); + + /* Do the standard cleanup involved in pruning an entry from the filesystem: */ + return synthfs_remove_entry(vp); /* Do whatever standard cleanup is required */ +} + + + + + + +long synthfs_adddirentry(u_int32_t fileno, u_int8_t type, const char *name, struct uio *uio) { + struct synthfs_direntry_head direntry; + long namelength; + int padding; + long padtext = 0; + unsigned short direntrylength; + + namelength = ((name == NULL) ? 0 : strlen(name)); + padding = (4 - (namelength & 3)) & 3; + direntrylength = sizeof(struct synthfs_direntry_head) + namelength + padding; + + direntry.d_fileno = fileno; + direntry.d_reclen = direntrylength; + direntry.d_type = type; + direntry.d_namlen = namelength; + + if (uio->uio_resid < direntry.d_reclen) { + direntrylength = 0; + } else { + uiomove((caddr_t)(&direntry), sizeof(direntry), uio); + if (name != NULL) { + uiomove((caddr_t)name, namelength, uio); + }; + if (padding > 0) { + uiomove((caddr_t)&padtext, padding, uio); + }; + }; + + return direntrylength; +} + + diff --git a/bsd/miscfs/synthfs/synthfs_vfsops.c b/bsd/miscfs/synthfs/synthfs_vfsops.c new file mode 100644 index 000000000..44a64a09f --- /dev/null +++ b/bsd/miscfs/synthfs/synthfs_vfsops.c @@ -0,0 +1,583 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998 Apple Computer, Inc. All Rights Reserved */ +/* + * Change History: + * + * 17-Aug-1999 Pat Dirks New today. + * + */ + +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "synthfs.h" + +#define LOADABLE_FS 0 + +typedef int (*PFI)(); + +struct vfsops synthfs_vfsops = { + synthfs_mount, + synthfs_start, + synthfs_unmount, + synthfs_root, + synthfs_quotactl, + synthfs_statfs, + synthfs_sync, + synthfs_vget, + synthfs_fhtovp, + synthfs_vptofh, + synthfs_init, + synthfs_sysctl +}; + +#define ROOTMPMODE 0755 +#define ROOTPLACEHOLDERMODE 0700 +static char synthfs_fs_name[MFSNAMELEN] = "synthfs"; +static char synthfs_fake_mntfromname[] = ""; + + +extern struct vnodeopv_desc synthfs_vnodeop_opv_desc; + +/* The following refer to kernel global variables used in the loading/initialization: */ +extern int maxvfsslots; /* Total number of slots in the system's vfsconf table */ +extern int maxvfsconf; /* The highest fs type number [old-style ID] in use [dispite its name] */ +extern int vfs_opv_numops; /* The total number of defined vnode operations */ + +int vn_mkdir(struct proc *p, char *path, int mode); +int vn_symlink(struct proc *p, char *path, char *link); + + + + +#if LOADABLE_FS +void +synthfs_load(int loadArgument) { + struct vfsconf *newvfsconf = NULL; + int j; + int (***opv_desc_vector_p)() = NULL; + int (**opv_desc_vector)(); + struct vnodeopv_entry_desc *opve_descp; + int error = 0; + +#pragma unused(loadArgument) + + /* + * This routine is responsible for all the initialization that would + * ordinarily be done as part of the system startup; it calls synthfs_init + * to do the initialization that is strictly synthfs-specific. + */ + + DBG_VOP(("load_synthfs: starting ...\n")); + + MALLOC(newvfsconf, void *, sizeof(struct vfsconf), M_SYNTHFS, M_WAITOK); + DBG_VOP(("load_synthfs: Allocated new vfsconf list entry, newvfsconf = 0x%08lx.\n", (unsigned long)newvfsconf)); + bzero(newvfsconf, sizeof(struct vfsconf)); + + if (newvfsconf) { + DBG_VOP(("load_synthfs: filling in newly allocated vfsconf entry at 0x%08lX.\n", (long)newvfsconf)); + newvfsconf->vfc_vfsops = &synthfs_vfsops; + strncpy(&newvfsconf->vfc_name[0], synthfs_fs_name, MFSNAMELEN); + newvfsconf->vfc_typenum = maxvfsconf++; + newvfsconf->vfc_refcount = 0; + newvfsconf->vfc_flags = 0; + newvfsconf->vfc_mountroot = NULL; /* Can't mount root of file system [yet] */ + + newvfsconf->vfc_next = NULL; + + /* Based on vfs_op_init and ... */ + opv_desc_vector_p = synthfs_vnodeop_opv_desc.opv_desc_vector_p; + + DBG_VOP(("load_synthfs: Allocating and initializing VNode ops vector...\n")); + + /* + * Allocate and init the vector. + * Also handle backwards compatibility. + */ + + MALLOC(*opv_desc_vector_p, PFI *, vfs_opv_numops*sizeof(PFI), M_SYNTHFS, M_WAITOK); + bzero (*opv_desc_vector_p, vfs_opv_numops*sizeof(PFI)); + opv_desc_vector = *opv_desc_vector_p; + for (j=0; synthfs_vnodeop_opv_desc.opv_desc_ops[j].opve_op; j++) { + opve_descp = &(synthfs_vnodeop_opv_desc.opv_desc_ops[j]); + + /* + * Sanity check: is this operation listed + * in the list of operations? We check this + * by seeing if its offest is zero. Since + * the default routine should always be listed + * first, it should be the only one with a zero + * offset. Any other operation with a zero + * offset is probably not listed in + * vfs_op_descs, and so is probably an error. + * + * A panic here means the layer programmer + * has committed the all-too common bug + * of adding a new operation to the layer's + * list of vnode operations but + * not adding the operation to the system-wide + * list of supported operations. + */ + if (opve_descp->opve_op->vdesc_offset == 0 && + opve_descp->opve_op->vdesc_offset != VOFFSET(vop_default)) { + DBG_VOP(("load_synthfs: operation %s not listed in %s.\n", + opve_descp->opve_op->vdesc_name, + "vfs_op_descs")); + panic ("load_synthfs: bad operation"); + } + /* + * Fill in this entry. + */ + opv_desc_vector[opve_descp->opve_op->vdesc_offset] = + opve_descp->opve_impl; + } + + /* + * Finally, go back and replace unfilled routines + * with their default. (Sigh, an O(n^3) algorithm. I + * could make it better, but that'd be work, and n is small.) + */ + opv_desc_vector_p = synthfs_vnodeop_opv_desc.opv_desc_vector_p; + + /* + * Force every operations vector to have a default routine. + */ + opv_desc_vector = *opv_desc_vector_p; + if (opv_desc_vector[VOFFSET(vop_default)]==NULL) { + panic("load_vp;fs: operation vector without default routine."); + } + for (j = 0;jmnt_stat.f_fstypename, synthfs_fs_name, sizeof(mp->mnt_stat.f_fstypename)); + (void) copyinstr(path, mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname) - 1, &size); + strncpy(mp->mnt_stat.f_mntfromname, synthfs_fake_mntfromname, sizeof(mp->mnt_stat.f_mntfromname)); + priv_mnt_data->synthfs_mounteddev = (dev_t)0; + priv_mnt_data->synthfs_nextid = FIRST_SYNTHFS_ID; + priv_mnt_data->synthfs_filecount = 0; + priv_mnt_data->synthfs_dircount = 0; + priv_mnt_data->synthfs_encodingsused = 0x00000001; + + /* + Set up the root vnode for fast reference in the future. + Note that synthfs_new_directory() returns the vnode with a refcount of +2. + The root vnode's refcount is maintained unlocked but with a pos. ref count until unmount. + */ + error = synthfs_new_directory(mp, NULL, "", ROOT_DIRID, (S_IRWXU|S_IRWXG|S_IROTH|S_IXOTH), p, &priv_mnt_data->synthfs_rootvp); + if (error) { + DBG_VOP(("Attempt to create root directory failed with error %d.\n", error)); + return error; + }; + priv_mnt_data->synthfs_rootvp->v_flag |= VROOT; + + priv_mnt_data->synthfs_mp = mp; + mp->mnt_data = (void *)priv_mnt_data; + + /* Drop the freshly acquired reference on the root, leaving v_usecount=1 to prevent + the vnode from beeing freed: */ + vput(priv_mnt_data->synthfs_rootvp); + + return (0); +} + + + +int +synthfs_mount(mp, path, data, ndp, p) + register struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + size_t size; + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname) - 1, &size); + synthfs_mount_fs(mp, path, data, ndp, p); +} + + + + + + +/* + * Initialize the filesystem + */ +int +synthfs_init(vfsp) + struct vfsconf *vfsp; +{ + DBG_VOP(("synthfs_init called.\n")); + return 0; +} + +int +synthfs_start(mp, flags, p) +struct mount * mp; +int flags; +struct proc * p; +{ + DBG_VOP(("synthfs_start called.\n")); + return 0; +} + +/* + * Return the root of a filesystem. + */ +int +synthfs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + unsigned long root_nodeid = ROOT_DIRID; + + DBG_VOP(("synthfs_root called.\n")); + + *vpp = VFSTOSFS(mp)->synthfs_rootvp; + return vget(VFSTOSFS(mp)->synthfs_rootvp, LK_EXCLUSIVE | LK_RETRY, current_proc()); +} + +int +synthfs_quotactl(mp, cmds, uid, arg, p) +struct mount *mp; +int cmds; +uid_t uid; +caddr_t arg; +struct proc * p; +{ + DBG_VOP(("synthfs_quotactl called.\n")); + return (0); +} + +/* + * unmount system call + */ +int +synthfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + struct synthfs_mntdata *synth; + struct vnode *root_vp; + int retval; + + DBG_VOP(("synthfs_unmount called.\n")); + synth = (struct synthfs_mntdata *)mp->mnt_data; + + root_vp = synth->synthfs_rootvp; + retval = vflush(mp, root_vp, (mntflags & MNT_FORCE) ? FORCECLOSE : 0); + if (retval && ((mntflags & MNT_FORCE) == 0)) goto Err_Exit; + + /* Free the root vnode. + Note that there's no need to vget() or vref() it before locking it here: + the ref. count has been maintained at +1 ever since mount time. */ + if (root_vp) { + retval = vn_lock(root_vp, LK_EXCLUSIVE | LK_RETRY, p); + if ((mntflags & MNT_FORCE) == 0) { + if (retval) goto Err_Exit; + + if (root_vp->v_usecount > 1) { + DBG_VOP(("synthfs ERROR: root vnode = %x, usecount = %d\n", (int)root_vp, synth->synthfs_rootvp->v_usecount)); + VOP_UNLOCK(root_vp, 0, p); + retval = EBUSY; + goto Err_Exit; + }; + }; + + synth->synthfs_rootvp = NULL; + + if (retval == 0) { + vput(root_vp); /* This drops synthfs's own refcount */ + vgone(root_vp); + }; + }; + + /* All vnodes should be gone, and no errors, clean up the last */ + + mp->mnt_data = NULL; + FREE(synth, M_SYNTHFS); + +Err_Exit: + + if (mntflags & MNT_FORCE) retval = 0; + + return(retval); +} + +/* + * Get file system statistics. + */ +int +synthfs_statfs(mp, sbp, p) + struct mount *mp; + register struct statfs *sbp; + struct proc *p; +{ + DBG_VOP(("synthfs_statfs called.\n")); + + sbp->f_bsize = 512; + sbp->f_iosize = 512; + sbp->f_blocks = 1024; // lies, darn lies and virtual file systems + sbp->f_bfree = 0; // Nope, can't write here! + sbp->f_bavail = 0; + sbp->f_files = VFSTOSFS(mp)->synthfs_filecount + VFSTOSFS(mp)->synthfs_dircount; + sbp->f_ffree = 0; + strncpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, sizeof(sbp->f_mntonname)); + strncpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, sizeof(sbp->f_mntfromname)); + + return (0); +} + +/* + * synthfs doesn't have any data or backing store and you can't write into any of the synthfs + * structures, so don't do anything + */ +int +synthfs_sync(mp, waitfor, cred, p) + struct mount *mp; + int waitfor; + struct ucred *cred; + struct proc *p; +{ +// DBG_VOP(("synthfs_sync called\n")); + return 0; +} +/* + * Look up a synthfs node by node number. + */ +int +synthfs_vget(mp, ino, vpp) + struct mount *mp; + void *ino; + struct vnode **vpp; +{ + struct vnode *vp; + +// DBG_VOP(("synthfs_vget called\n")); + + /* Check for unmount in progress */ + if (mp->mnt_kern_flag & MNTK_UNMOUNT) { + *vpp = NULL; + return (EPERM); + } + +loop: + LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { + if (VTOS(vp)->s_nodeid == *((unsigned long *)ino)) { + if (vget(vp, LK_EXCLUSIVE, current_proc()) != 0) { + goto loop; + }; + *vpp = vp; + return 0; + }; + }; + *vpp = NULL; + return -1; +} + +/* + * fast filesystem related variables. + */ +int +synthfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + DBG_VOP(("synthfs_sysctl called.\n")); + return (EOPNOTSUPP); +} + +/* + * File handle to vnode + * + */ +int +synthfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) + register struct mount *mp; + struct fid *fhp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred **credanonp; +{ + DBG_VOP(("synthfs_fhtovp called.\n")); + return EOPNOTSUPP; +} + +/* + * Vnode pointer to File handle + */ +/* ARGSUSED */ +int +synthfs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + DBG_VOP(("synthfs_vptofh called.\n")); + return EOPNOTSUPP; +} + + + + + + +int +vn_mkdir(struct proc *p, char *path, int mode) { + struct nameidata nd; + struct vnode *vp; + struct vattr vattr; + int error; + + NDINIT(&nd, CREATE, LOCKPARENT, UIO_SYSSPACE, path, p); + if (error = namei(&nd)) { + DBG_VOP(("vn_mkdir: error from namei, error = %d.\n", error)); + return (error); + }; + vp = nd.ni_vp; + if (vp != NULL) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(vp); + DBG_VOP(("vn_mkdir: target already exists; returning EEXIST.\n")); + return (EEXIST); + } + VATTR_NULL(&vattr); + vattr.va_type = VDIR; + vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask; + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); + if (error) { + DBG_VOP(("vn_mkdir: error from VOP_MKDIR (%d).\n", error)); + } else { + vput(nd.ni_vp); + }; + return (error); +} + + + +int +vn_symlink(struct proc *p, char *path, char *link) { + struct nameidata nd; + struct vattr vattr; + int error; + + NDINIT(&nd, CREATE, LOCKPARENT, UIO_SYSSPACE, link, p); + if (error = namei(&nd)) return error; + + if (nd.ni_vp) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(nd.ni_vp); + return EEXIST; + } + VATTR_NULL(&vattr); + vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + return VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path); +} + + diff --git a/bsd/miscfs/synthfs/synthfs_vnops.c b/bsd/miscfs/synthfs/synthfs_vnops.c new file mode 100644 index 000000000..da66e50bd --- /dev/null +++ b/bsd/miscfs/synthfs/synthfs_vnops.c @@ -0,0 +1,1844 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer, Inc. All Rights Reserved. + * + * Modification History: + * + * 02-Feb-2000 Clark Warner Added copyfile to table + * 17-Aug-1999 Pat Dirks New today. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "synthfs.h" + +#define RWSUPPORT 0 + +#if RWSUPPORT +#error NOT PORTED FOR UBC +/* when porting to UBC, do not just replace + * vnode_uncache by ubc_uncache - there's more + * to it than that! + */ +#include +#endif + +/* external routines defined in vfs_cache.c */ +extern void cache_purge (struct vnode *vp); +extern int cache_lookup (struct vnode *dvp, struct vnode **vpp, struct componentname *cnp); +extern void cache_enter (struct vnode *dvp, struct vnode *vpp, struct componentname *cnp); + +//extern void vnode_uncache(struct vnode *); + +extern int groupmember(gid_t gid, struct ucred* cred); + +#define VOPFUNC int (*)(void *) + +/* Global vfs data structures for synthfs. */ +int (**synthfs_vnodeop_p) (void *); +struct vnodeopv_entry_desc synthfs_vnodeop_entries[] = { + {&vop_default_desc, (VOPFUNC)vn_default_error}, + {&vop_strategy_desc, (VOPFUNC)err_strategy}, /* strategy - not supported */ + {&vop_bwrite_desc, (VOPFUNC)err_bwrite}, /* bwrite - not supported */ + {&vop_lookup_desc, (VOPFUNC)synthfs_cached_lookup}, /* cached lookup */ + {&vop_create_desc, (VOPFUNC)synthfs_create}, /* create - DEBUGGER */ + {&vop_whiteout_desc, (VOPFUNC)err_whiteout}, /* whiteout - not supported */ + {&vop_mknod_desc, (VOPFUNC)err_mknod}, /* mknod - not supported */ + {&vop_mkcomplex_desc, (VOPFUNC)err_mkcomplex}, /* mkcomplex - not supported */ + {&vop_open_desc, (VOPFUNC)synthfs_open}, /* open - DEBUGGER */ + {&vop_close_desc, (VOPFUNC)nop_close}, /* close - NOP */ + {&vop_access_desc, (VOPFUNC)synthfs_access}, /* access */ + {&vop_getattr_desc, (VOPFUNC)synthfs_getattr}, /* getattr */ + {&vop_setattr_desc, (VOPFUNC)synthfs_setattr}, /* setattr */ + {&vop_getattrlist_desc, (VOPFUNC)err_getattrlist}, /* getattrlist - not supported */ + {&vop_setattrlist_desc, (VOPFUNC)err_setattrlist}, /* setattrlist - not supported */ + {&vop_read_desc, (VOPFUNC)err_read}, /* read - not supported */ + {&vop_write_desc, (VOPFUNC)err_write}, /* write - not supported */ + {&vop_lease_desc, (VOPFUNC)err_lease}, /* lease - not supported */ + {&vop_ioctl_desc, (VOPFUNC)err_ioctl}, /* ioctl - not supported */ + {&vop_select_desc, (VOPFUNC)synthfs_select}, /* select */ + {&vop_exchange_desc, (VOPFUNC)err_exchange}, /* exchange - not supported */ + {&vop_revoke_desc, (VOPFUNC)nop_revoke}, /* revoke - NOP */ + {&vop_mmap_desc, (VOPFUNC)synthfs_mmap}, /* mmap - DEBUGGER */ + {&vop_fsync_desc, (VOPFUNC)nop_fsync}, /* fsync - NOP */ + {&vop_seek_desc, (VOPFUNC)nop_seek}, /* seek - NOP */ + {&vop_remove_desc, (VOPFUNC)synthfs_remove}, /* remove */ + {&vop_link_desc, (VOPFUNC)err_link}, /* link - not supported */ + {&vop_rename_desc, (VOPFUNC)synthfs_rename}, /* rename */ + {&vop_mkdir_desc, (VOPFUNC)synthfs_mkdir}, /* mkdir */ + {&vop_rmdir_desc, (VOPFUNC)synthfs_rmdir}, /* rmdir */ + {&vop_symlink_desc, (VOPFUNC)synthfs_symlink}, /* symlink */ + {&vop_readdir_desc, (VOPFUNC)synthfs_readdir}, /* readdir */ + {&vop_readdirattr_desc, (VOPFUNC)err_readdirattr}, /* readdirattr - not supported */ + {&vop_readlink_desc, (VOPFUNC)synthfs_readlink}, /* readlink */ + {&vop_abortop_desc, (VOPFUNC)nop_abortop}, /* abortop - NOP */ + {&vop_inactive_desc, (VOPFUNC)synthfs_inactive}, /* inactive */ + {&vop_reclaim_desc, (VOPFUNC)synthfs_reclaim}, /* reclaim */ + {&vop_lock_desc, (VOPFUNC)synthfs_lock}, /* lock */ + {&vop_unlock_desc, (VOPFUNC)synthfs_unlock}, /* unlock */ + {&vop_bmap_desc, (VOPFUNC)err_bmap}, /* bmap - not supported */ + {&vop_print_desc, (VOPFUNC)err_print}, /* print - not supported */ + {&vop_islocked_desc, (VOPFUNC)synthfs_islocked}, /* islocked */ + {&vop_pathconf_desc, (VOPFUNC)synthfs_pathconf}, /* pathconf */ + {&vop_advlock_desc, (VOPFUNC)err_advlock}, /* advlock - not supported */ + {&vop_blkatoff_desc, (VOPFUNC)err_blkatoff}, /* blkatoff - not supported */ + {&vop_valloc_desc, (VOPFUNC)err_valloc}, /* valloc - not supported */ + {&vop_reallocblks_desc, (VOPFUNC)err_reallocblks}, /* reallocblks - not supported */ + {&vop_vfree_desc, (VOPFUNC)err_vfree}, /* vfree - not supported */ + {&vop_truncate_desc, (VOPFUNC)err_truncate}, /* truncate - not supported */ + {&vop_allocate_desc, (VOPFUNC)err_allocate}, /* allocate - not supported */ + {&vop_update_desc, (VOPFUNC)synthfs_update}, /* update */ + {&vop_pgrd_desc, (VOPFUNC)err_pgrd}, /* pgrd - not supported */ + {&vop_pgwr_desc, (VOPFUNC)err_pgwr}, /* pgwr - not supported */ + {&vop_pagein_desc, (VOPFUNC)err_pagein}, /* pagein - not supported */ + {&vop_pageout_desc, (VOPFUNC)err_pageout}, /* pageout - not supported */ + {&vop_devblocksize_desc, (VOPFUNC)err_devblocksize}, /* devblocksize - not supported */ + {&vop_searchfs_desc, (VOPFUNC)err_searchfs}, /* searchfs - not supported */ + {&vop_copyfile_desc, (VOPFUNC)err_copyfile}, /* copyfile - not supported */ + { &vop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff not supported */ + { &vop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk not supported */ + { &vop_cmap_desc, (VOPFUNC)err_cmap }, /* cmap not supported */ + {(struct vnodeop_desc *) NULL, (int (*) ()) NULL} +}; + +/* + * Oh what a tangled web we weave. This structure will be used by + * bsd/vfs/vfs_conf.c to actually do the initialization of synthfs_vnodeop_p + */ +struct vnodeopv_desc synthfs_vnodeop_opv_desc = +{&synthfs_vnodeop_p, synthfs_vnodeop_entries}; + + + +/* + * Create a regular file +#% create dvp L U U +#% create vpp - L - +# + vop_create { + IN WILLRELE struct vnode *dvp; + OUT struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + + We are responsible for freeing the namei buffer, it is done in hfs_makenode(), unless there is + a previous error. + +*/ + +int +synthfs_create(ap) +struct vop_create_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */ *ap; +{ +#if DEBUG + struct vnode *dvp = ap->a_dvp; + char debugmsg[255]; + + sprintf(debugmsg, "synthfs_create: attempt to create '%s' in '%s' ?!", ap->a_cnp->cn_nameptr, VTOS(dvp)->s_name); + Debugger(debugmsg); +#endif + + return EOPNOTSUPP; +} + + + +/* + * Open called. +#% open vp L L L +# + vop_open { + IN struct vnode *vp; + IN int mode; + IN struct ucred *cred; + IN struct proc *p; + */ + +int +synthfs_open(ap) +struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + + if (vp->v_type == VDIR) { + return 0; + } else { +#if DEBUG + struct synthfsnode *sp = VTOS(vp); + char debugmsg[255]; + + sprintf(debugmsg, "synthfs_open: attempt to open '/%s' ?!", sp->s_name); + Debugger(debugmsg); +#endif + }; + + return 0; +} + + + +/* + * Mmap a file + * + * NB Currently unsupported. +# XXX - not used +# + vop_mmap { + IN struct vnode *vp; + IN int fflags; + IN struct ucred *cred; + IN struct proc *p; + + */ + +/* ARGSUSED */ + +int +synthfs_mmap(ap) +struct vop_mmap_args /* { + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ +#if DEBUG + struct vnode *vp = ap->a_vp; + char debugmsg[255]; + + sprintf(debugmsg, "synthfs_mmap: attempt to map '/%s' ?!", VTOS(vp)->s_name); + Debugger(debugmsg); +#endif + + return EINVAL; +} + + + +/* +#% access vp L L L +# + vop_access { + IN struct vnode *vp; + IN int mode; + IN struct ucred *cred; + IN struct proc *p; + +*/ + +int +synthfs_access(ap) +struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + mode_t mode = ap->a_mode; + struct ucred *cred = ap->a_cred; + struct synthfsnode *sp = VTOS(vp); + register gid_t *gp; + mode_t mask; + int retval = 0; + int i; + + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + if (mode & VWRITE) { + switch (vp->v_type) { + case VDIR: + case VLNK: + case VREG: + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) + return (EROFS); + break; + default: + break; + } + } + + /* If immutable bit set, nobody gets to write it. */ + if ((mode & VWRITE) && (sp->s_flags & IMMUTABLE)) + return (EPERM); + + /* Otherwise, user id 0 always gets access. */ + if (ap->a_cred->cr_uid == 0) { + retval = 0; + goto Exit; + }; + + mask = 0; + + /* Otherwise, check the owner. */ + if (cred->cr_uid == sp->s_uid) { + if (mode & VEXEC) + mask |= S_IXUSR; + if (mode & VREAD) + mask |= S_IRUSR; + if (mode & VWRITE) + mask |= S_IWUSR; + retval = ((sp->s_mode & mask) == mask ? 0 : EACCES); + goto Exit; + } + + /* Otherwise, check the groups. */ + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) + if (sp->s_gid == *gp) { + if (mode & VEXEC) + mask |= S_IXGRP; + if (mode & VREAD) + mask |= S_IRGRP; + if (mode & VWRITE) + mask |= S_IWGRP; + retval = ((sp->s_mode & mask) == mask ? 0 : EACCES); + goto Exit; + } + + /* Otherwise, check everyone else. */ + if (mode & VEXEC) + mask |= S_IXOTH; + if (mode & VREAD) + mask |= S_IROTH; + if (mode & VWRITE) + mask |= S_IWOTH; + retval = ((sp->s_mode & mask) == mask ? 0 : EACCES); + +Exit: + return (retval); +} + +/* +#% getattr vp = = = +# + vop_getattr { + IN struct vnode *vp; + IN struct vattr *vap; + IN struct ucred *cred; + IN struct proc *p; + +*/ +int +synthfs_getattr(ap) +struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vattr *vap = ap->a_vap; + struct synthfsnode *sp = VTOS(vp); + struct synthfs_mntdata *smp = VTOSFS(vp); + + vap->va_type = vp->v_type; + vap->va_mode = sp->s_mode; + vap->va_nlink = sp->s_linkcount; + vap->va_uid = sp->s_uid; + vap->va_gid = sp->s_gid; + vap->va_fsid = VTOVFS(vp)->mnt_stat.f_fsid.val[0]; + vap->va_fileid = sp->s_nodeid; + switch (vp->v_type) { + case VDIR: + vap->va_size = (sp->s_u.d.d_entrycount + 2) * sizeof(struct dirent); + break; + + case VREG: + vap->va_size = sp->s_u.f.f_size; + break; + + case VLNK: + vap->va_size = sp->s_u.s.s_length; + break; + + default: + vap->va_size = 0; + }; + vap->va_blocksize = 512; + vap->va_atime.tv_sec = sp->s_accesstime.tv_sec; + vap->va_atime.tv_nsec = sp->s_accesstime.tv_usec * 1000; + vap->va_mtime.tv_sec = sp->s_modificationtime.tv_sec; + vap->va_mtime.tv_nsec = sp->s_modificationtime.tv_usec * 1000; + vap->va_ctime.tv_sec = sp->s_changetime.tv_sec; + vap->va_ctime.tv_nsec = sp->s_changetime.tv_usec * 1000; + vap->va_gen = sp->s_generation; + vap->va_flags = sp->s_flags; + vap->va_rdev = sp->s_rdev; + vap->va_bytes = vap->va_blocksize * ((vap->va_size + vap->va_blocksize - 1) / vap->va_blocksize); + vap->va_filerev = 0; + vap->va_vaflags = 0; + + return (0); +} + + + +/* + * Change the mode on a file or directory. + * vnode vp must be locked on entry. + */ +int synthfs_chmod(struct vnode *vp, int mode, struct ucred *cred, struct proc *p) +{ + struct synthfsnode *sp = VTOS(vp); + int result; + + if ((cred->cr_uid != sp->s_uid) && + (result = suser(cred, &p->p_acflag))) + return result; + if (cred->cr_uid) { + if (vp->v_type != VDIR && (mode & S_ISTXT)) + return EFTYPE; + if (!groupmember(sp->s_gid, cred) && (mode & S_ISGID)) + return (EPERM); + } + sp->s_mode &= ~ALLPERMS; + sp->s_mode |= (mode & ALLPERMS); + sp->s_nodeflags |= IN_CHANGE; +#if RWSUPPORT + if ((vp->v_flag & VTEXT) && (sp->s_mode & S_ISTXT) == 0) (void) vnode_uncache(vp); +#endif + + return 0; +} + + + +/* + * Change the flags on a file or directory. + * vnode vp must be locked on entry. + */ +int synthfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred, struct proc *p) +{ + struct synthfsnode *sp = VTOS(vp); + int result; + + if (cred->cr_uid != sp->s_uid && + (result = suser(cred, &p->p_acflag))) + return result; + + if (cred->cr_uid == 0) { + if ((sp->s_flags & (SF_IMMUTABLE | SF_APPEND)) && + securelevel > 0) { + return EPERM; + }; + sp->s_flags = flags; + } else { + if (sp->s_flags & (SF_IMMUTABLE | SF_APPEND) || + (flags & UF_SETTABLE) != flags) { + return EPERM; + }; + sp->s_flags &= SF_SETTABLE; + sp->s_flags |= (flags & UF_SETTABLE); + } + sp->s_nodeflags |= IN_CHANGE; + + return 0; +} + + + +/* + * Perform chown operation on vnode vp; + * vnode vp must be locked on entry. + */ +int synthfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred, struct proc *p) +{ + struct synthfsnode *sp = VTOS(vp); + uid_t ouid; + gid_t ogid; + int result = 0; + + if (uid == (uid_t)VNOVAL) uid = sp->s_uid; + if (gid == (gid_t)VNOVAL) gid = sp->s_gid; + + /* + * If we don't own the file, are trying to change the owner + * of the file, or are not a member of the target group, + * the caller must be superuser or the call fails. + */ + if ((cred->cr_uid != sp->s_uid || uid != sp->s_uid || + (gid != sp->s_gid && !groupmember((gid_t)gid, cred))) && + (result = suser(cred, &p->p_acflag))) + return result; + + ogid = sp->s_gid; + ouid = sp->s_uid; + + sp->s_gid = gid; + sp->s_uid = uid; + + if (ouid != uid || ogid != gid) sp->s_nodeflags |= IN_CHANGE; + if (ouid != uid && cred->cr_uid != 0) sp->s_mode &= ~S_ISUID; + if (ogid != gid && cred->cr_uid != 0) sp->s_mode &= ~S_ISGID; + + return 0; +} + + + +/* + * Set attribute vnode op. called from several syscalls +#% setattr vp L L L +# + vop_setattr { + IN struct vnode *vp; + IN struct vattr *vap; + IN struct ucred *cred; + IN struct proc *p; + + */ + +int +synthfs_setattr(ap) +struct vop_setattr_args /* { +struct vnode *a_vp; +struct vattr *a_vap; +struct ucred *a_cred; +struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct synthfsnode *sp = VTOS(vp); + struct vattr *vap = ap->a_vap; + struct ucred *cred = ap->a_cred; + struct proc *p = ap->a_p; + struct timeval atimeval, mtimeval; + int result; + + /* + * Check for unsettable attributes. + */ + if (((vap->va_type != VNON) && (vap->va_type != vp->v_type)) || + (vap->va_nlink != VNOVAL) || + (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || + (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || + ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { + result = EINVAL; + goto Err_Exit; + } + + if (vap->va_flags != VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + result = EROFS; + goto Err_Exit; + }; + if ((result = synthfs_chflags(vp, vap->va_flags, cred, p))) { + goto Err_Exit; + }; + if (vap->va_flags & (IMMUTABLE | APPEND)) { + result = 0; + goto Err_Exit; + }; + } + + if (sp->s_flags & (IMMUTABLE | APPEND)) { + result = EPERM; + goto Err_Exit; + }; + + /* + * Go through the fields and update iff not VNOVAL. + */ + if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + result = EROFS; + goto Err_Exit; + }; + if ((result = synthfs_chown(vp, vap->va_uid, vap->va_gid, cred, p))) { + goto Err_Exit; + }; + } + if (vap->va_size != VNOVAL) { + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + switch (vp->v_type) { + case VDIR: + result = EISDIR; + goto Err_Exit; + case VLNK: + case VREG: + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + result = EROFS; + goto Err_Exit; + }; + break; + default: + break; + } +#if RWSUPPORT + if ((result = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p))) { + goto Err_Exit; + }; +#else + result = EINVAL; + goto Err_Exit; +#endif + } + + sp = VTOS(vp); + if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + result = EROFS; + goto Err_Exit; + }; + if (cred->cr_uid != sp->s_uid && + (result = suser(cred, &p->p_acflag)) && + ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || + (result = VOP_ACCESS(vp, VWRITE, cred, p)))) { + goto Err_Exit; + }; + if (vap->va_atime.tv_sec != VNOVAL) + sp->s_nodeflags |= IN_ACCESS; + if (vap->va_mtime.tv_sec != VNOVAL) + sp->s_nodeflags |= IN_CHANGE | IN_UPDATE; + atimeval.tv_sec = vap->va_atime.tv_sec; + atimeval.tv_usec = vap->va_atime.tv_nsec / 1000; + mtimeval.tv_sec = vap->va_mtime.tv_sec; + mtimeval.tv_usec = vap->va_mtime.tv_nsec / 1000; + if ((result = VOP_UPDATE(vp, &atimeval, &mtimeval, 1))) { + goto Err_Exit; + }; + } + + result = 0; + if (vap->va_mode != (mode_t)VNOVAL) { + if (VTOVFS(vp)->mnt_flag & MNT_RDONLY) { + result = EROFS; + goto Err_Exit; + }; + result = synthfs_chmod(vp, (int)vap->va_mode, cred, p); + }; + +Err_Exit: ; + + DBG_VOP(("synthfs_setattr: returning %d...\n", result)); + + return (result); +} + + + +/* + +#% rename sourcePar_vp U U U +#% rename source_vp U U U +#% rename targetPar_vp L U U +#% rename target_vp X U U +# + vop_rename { + IN WILLRELE struct vnode *sourcePar_vp; + IN WILLRELE struct vnode *source_vp; + IN struct componentname *source_cnp; + IN WILLRELE struct vnode *targetPar_vp; + IN WILLRELE struct vnode *target_vp; + IN struct componentname *target_cnp; + + + */ + +/* + * On entry: + * source's parent directory is unlocked + * source file or directory is unlocked + * destination's parent directory is locked + * destination file or directory is locked if it exists + * + * On exit: + * all denodes should be released + * + */ + +int +synthfs_rename(ap) +struct vop_rename_args /* { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; +} */ *ap; +{ + struct vnode *target_vp = ap->a_tvp; + struct vnode *targetPar_vp = ap->a_tdvp; + struct vnode *source_vp = ap->a_fvp; + struct vnode *sourcePar_vp = ap->a_fdvp; + struct componentname *target_cnp = ap->a_tcnp; + struct componentname *source_cnp = ap->a_fcnp; + struct proc *p = source_cnp->cn_proc; + struct synthfsnode *target_sp, *targetPar_sp, *source_sp, *sourcePar_sp; + u_short doingdirectory = 0, oldparent = 0, newparent = 0; + int retval = 0; + struct timeval tv; + +#if SYNTHFS_DIAGNOSTIC + if ((target_cnp->cn_flags & HASBUF) == 0 || + (source_cnp->cn_flags & HASBUF) == 0) + panic("synthfs_rename: no name"); +#endif + + DBG_ASSERT((ap->a_fdvp->v_type == VDIR) && (ap->a_tdvp->v_type == VDIR)); + target_sp = targetPar_sp = source_sp = sourcePar_sp = NULL; + + /* + * Check for cross-device rename. + */ + if ((source_vp->v_mount != targetPar_vp->v_mount) || + (target_vp && (source_vp->v_mount != target_vp->v_mount))) { + retval = EXDEV; + goto abortit; + } + + /* + * Check for access permissions + */ + if (target_vp && ((VTOS(target_vp)->s_pflags & (IMMUTABLE | APPEND)) || + (VTOS(targetPar_vp)->s_pflags & APPEND))) { + retval = EPERM; + goto abortit; + } + + if ((retval = vn_lock(source_vp, LK_EXCLUSIVE, p))) + goto abortit; + + sourcePar_sp = VTOS(sourcePar_vp); + source_sp = VTOS(source_vp); + oldparent = sourcePar_sp->s_nodeid; + if ((source_sp->s_pflags & (IMMUTABLE | APPEND)) || (sourcePar_sp->s_pflags & APPEND)) { + VOP_UNLOCK(source_vp, 0, p); + retval = EPERM; + goto abortit; + } + + /* + * Be sure we are not renaming ".", "..", or an alias of ".". This + * leads to a crippled directory tree. It's pretty tough to do a + * "ls" or "pwd" with the "." directory entry missing, and "cd .." + * doesn't work if the ".." entry is missing. + */ + if (source_sp->s_type == SYNTHFS_DIRECTORY) { + if ((source_cnp->cn_namelen == 1 && source_cnp->cn_nameptr[0] == '.') + || sourcePar_sp == source_sp + || (source_cnp->cn_flags & ISDOTDOT) + || (source_sp->s_nodeflags & IN_RENAME)) { + VOP_UNLOCK(source_vp, 0, p); + retval = EINVAL; + goto abortit; + } + source_sp->s_nodeflags |= IN_RENAME; + doingdirectory = TRUE; + } + + /* Transit between abort and bad */ + + targetPar_sp = VTOS(targetPar_vp); + target_sp = target_vp ? VTOS(target_vp) : NULL; + newparent = targetPar_sp->s_nodeid; + + retval = VOP_ACCESS(source_vp, VWRITE, target_cnp->cn_cred, target_cnp->cn_proc); + if (doingdirectory && (newparent != oldparent)) { + if (retval) /* write access check above */ + goto bad; + } + + /* + * If the destination exists, then be sure its type (file or dir) + * matches that of the source. And, if it is a directory make sure + * it is empty. Then delete the destination. + */ + if (target_vp) { + /* + * If the parent directory is "sticky", then the user must + * own the parent directory, or the destination of the rename, + * otherwise the destination may not be changed (except by + * root). This implements append-only directories. + */ + if ((targetPar_sp->s_mode & S_ISTXT) && target_cnp->cn_cred->cr_uid != 0 && + target_cnp->cn_cred->cr_uid != targetPar_sp->s_uid && + target_sp->s_uid != target_cnp->cn_cred->cr_uid) { + retval = EPERM; + goto bad; + } + + /* + * VOP_REMOVE will vput targetPar_vp so we better bump + * its ref count and relockit, always set target_vp to + * NULL afterwards to indicate that were done with it. + */ + VREF(targetPar_vp); +#if RWSUPPORT + if (target_vp->v_type == VREG) { + (void) vnode_uncache(target_vp); + }; +#endif + cache_purge(target_vp); + + target_cnp->cn_flags &= ~SAVENAME; + retval = VOP_REMOVE(targetPar_vp, target_vp, target_cnp); + (void) vn_lock(targetPar_vp, LK_EXCLUSIVE | LK_RETRY, p); + + target_vp = NULL; + target_sp = NULL; + + if (retval) goto bad; + }; + + + if (newparent != oldparent) + vn_lock(sourcePar_vp, LK_EXCLUSIVE | LK_RETRY, p); + + /* remove the existing entry from the namei cache: */ + if (source_vp->v_type == VREG) cache_purge(source_vp); + + retval = synthfs_move_rename_entry( source_vp, targetPar_vp, target_cnp->cn_nameptr); + + if (newparent != oldparent) + VOP_UNLOCK(sourcePar_vp, 0, p); + + if (retval) goto bad; + + source_sp->s_nodeflags &= ~IN_RENAME; + + /* + * Timestamp both parent directories. + * Note that if this is a rename within the same directory, + * (where targetPar_hp == sourcePar_hp) + * the code below is still safe and correct. + */ + targetPar_sp->s_nodeflags |= IN_UPDATE; + sourcePar_sp->s_nodeflags |= IN_UPDATE; + tv = time; + SYNTHFSTIMES(targetPar_sp, &tv, &tv); + SYNTHFSTIMES(sourcePar_sp, &tv, &tv); + + vput(targetPar_vp); + vrele(sourcePar_vp); + vput(source_vp); + + return (retval); + +bad:; + if (retval && doingdirectory) + source_sp->s_nodeflags &= ~IN_RENAME; + + if (targetPar_vp == target_vp) + vrele(targetPar_vp); + else + vput(targetPar_vp); + + if (target_vp) + vput(target_vp); + + vrele(sourcePar_vp); + + if (VOP_ISLOCKED(source_vp)) + vput(source_vp); + else + vrele(source_vp); + + return (retval); + +abortit:; + + VOP_ABORTOP(targetPar_vp, target_cnp); /* XXX, why not in NFS? */ + + if (targetPar_vp == target_vp) + vrele(targetPar_vp); + else + vput(targetPar_vp); + + if (target_vp) + vput(target_vp); + + VOP_ABORTOP(sourcePar_vp, source_cnp); /* XXX, why not in NFS? */ + + vrele(sourcePar_vp); + vrele(source_vp); + + return (retval); +} + + + +/* + * Mkdir system call + +#% mkdir dvp L U U +#% mkdir vpp - L - +# + vop_mkdir { + IN WILLRELE struct vnode *dvp; + OUT struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + + We are responsible for freeing the namei buffer, it is done in synthfs_makenode(), unless there is + a previous error. + +*/ + +int +synthfs_mkdir(ap) +struct vop_mkdir_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */ *ap; +{ + int retval; + struct vnode *dvp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + int mode = MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode); + struct vnode *vp = NULL; + + *ap->a_vpp = NULL; + + retval = synthfs_new_directory(VTOVFS(dvp), dvp, cnp->cn_nameptr, VTOSFS(dvp)->synthfs_nextid++, mode, ap->a_cnp->cn_proc, &vp); + if (retval) goto Error_Exit; + + retval = VOP_SETATTR(vp, ap->a_vap, cnp->cn_cred, cnp->cn_proc); + if (retval != 0) goto Error_Exit; + + *ap->a_vpp = vp; + +Error_Exit:; + if (retval != 0) { + if (vp) synthfs_remove_directory(vp); + VOP_ABORTOP(dvp, cnp); + } + vput(dvp); + + return retval; +} + + + +/* + +#% remove dvp L U U +#% remove vp L U U +# + vop_remove { + IN WILLRELE struct vnode *dvp; + IN WILLRELE struct vnode *vp; + IN struct componentname *cnp; + + */ + +int +synthfs_remove(ap) +struct vop_remove_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + struct synthfsnode *sp = VTOS(vp); + struct timeval tv; + int retval = 0; + + if ((sp->s_flags & (IMMUTABLE | APPEND)) || + (VTOS(dvp)->s_flags & APPEND)) { + retval = EPERM; + goto out; + }; + + /* This is sort of silly right now but someday it may make sense... */ + if (sp->s_nodeflags & IN_MODIFIED) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 0); + }; + + /* remove the entry from the namei cache: */ + cache_purge(vp); + + /* remove entry from tree and reclaim any resources consumed: */ + switch (sp->s_type) { + case SYNTHFS_DIRECTORY: + synthfs_remove_directory(vp); + break; + + + case SYNTHFS_SYMLINK: + synthfs_remove_symlink(vp); + break; + + case SYNTHFS_FILE: + /* Fall through to default case */ + + default: + synthfs_remove_entry(vp); + }; + +out: + + if (! retval) + VTOS(dvp)->s_nodeflags |= IN_CHANGE | IN_UPDATE; + + if (dvp == vp) { + vrele(vp); + } else { + vput(vp); + }; + + vput(dvp); + return (retval); +} + + + +/* +#% rmdir dvp L U U +#% rmdir vp L U U +# + vop_rmdir { + IN WILLRELE struct vnode *dvp; + IN WILLRELE struct vnode *vp; + IN struct componentname *cnp; + + */ + +int +synthfs_rmdir(ap) + struct vop_rmdir_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +} */ *ap; +{ + DBG_VOP(("synthfs_rmdir called\n")); + return synthfs_remove((struct vop_remove_args *)ap); +} + + + +/* + * synthfs_select - just say OK. Only possible op is readdir + * + * Locking policy: ignore + */ +int +synthfs_select(ap) +struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +} */ *ap; +{ + DBG_VOP(("synthfs_select called\n")); + + return (1); +} + +/* +# +#% symlink dvp L U U +#% symlink vpp - U - +# +# XXX - note that the return vnode has already been vrele'ed +# by the filesystem layer. To use it you must use vget, +# possibly with a further namei. +# + vop_symlink { + IN WILLRELE struct vnode *dvp; + OUT WILLRELE struct vnode **vpp; + IN struct componentname *cnp; + IN struct vattr *vap; + IN char *target; + + We are responsible for freeing the namei buffer, it is done in synthfs_makenode(), unless there is + a previous error. + + +*/ + +int +synthfs_symlink(ap) + struct vop_symlink_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; + } */ *ap; +{ + struct vnode *dvp = ap->a_dvp; + struct vnode **vpp = ap->a_vpp; + struct componentname *cnp = ap->a_cnp; + int retval; + + *vpp = NULL; + + retval = synthfs_new_symlink(VTOVFS(dvp), dvp, cnp->cn_nameptr, VTOSFS(dvp)->synthfs_nextid++, ap->a_target, ap->a_cnp->cn_proc, vpp); + if (retval) goto Error_Exit; + + VOP_UNLOCK(*vpp, 0, cnp->cn_proc); + +Error_Exit:; + + if (retval != 0) { + VOP_ABORTOP(dvp, cnp); + } + vput(dvp); + + return (retval); +} + + + +/* +# +#% readlink vp L L L +# + vop_readlink { + IN struct vnode *vp; + INOUT struct uio *uio; + IN struct ucred *cred; + */ + +int +synthfs_readlink(ap) +struct vop_readlink_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct synthfsnode *sp = VTOS(vp); + struct uio *uio = ap->a_uio; + int retval; + unsigned long count; + + if (ap->a_uio->uio_offset > sp->s_u.s.s_length) { + return 0; + }; + + if (uio->uio_offset + uio->uio_resid <= sp->s_u.s.s_length) { + count = uio->uio_resid; + } else { + count = sp->s_u.s.s_length - uio->uio_offset; + }; + retval = uiomove((void *)((unsigned char *)sp->s_u.s.s_symlinktarget + uio->uio_offset), count, uio); + return (retval); + +} + + + + + + +/* +#% readdir vp L L L +# +vop_readdir { + IN struct vnode *vp; + INOUT struct uio *uio; + IN struct ucred *cred; + INOUT int *eofflag; + OUT int *ncookies; + INOUT u_long **cookies; +*/ + + +int +synthfs_readdir(ap) +struct vop_readdir_args /* { + struct vnode *vp; + struct uio *uio; + struct ucred *cred; + int *eofflag; + int *ncookies; + u_long **cookies; +} */ *ap; +{ + struct synthfsnode *sp = VTOS(ap->a_vp); + register struct uio *uio = ap->a_uio; + off_t diroffset; /* Offset into simulated directory file */ + struct synthfsnode *entry; + + DBG_VOP(("\tuio_offset = %d, uio_resid = %d\n", (int) uio->uio_offset, uio->uio_resid)); + + /* We assume it's all one big buffer... */ + if (uio->uio_iovcnt > 1) { + DBG_VOP(("\tuio->uio_iovcnt = %d?\n", uio->uio_iovcnt)); + return EINVAL; + }; + + /* + NFS cookies are not supported: + */ + if ((ap->a_cookies != NULL) || (ap->a_ncookies != NULL)) { + return EINVAL; + }; + + diroffset = 0; + + /* + * We must synthesize . and .. + */ + DBG_VOP(("\tstarting ... uio_offset = %d, uio_resid = %d\n", (int) uio->uio_offset, uio->uio_resid)); + if (uio->uio_offset == diroffset) + { + DBG_VOP(("\tAdding .\n")); + diroffset += synthfs_adddirentry(sp->s_nodeid, DT_DIR, ".", uio); + DBG_VOP(("\t after adding ., uio_offset = %d, uio_resid = %d\n", (int) uio->uio_offset, uio->uio_resid)); + } + if ((uio->uio_resid > 0) && (diroffset > uio->uio_offset)) { + /* Oops - we skipped over a partial entry: at best, diroffset should've just matched uio->uio_offset */ + return EINVAL; + }; + + if (uio->uio_offset == diroffset) + { + DBG_VOP(("\tAdding ..\n")); + if (sp->s_parent != NULL) { + diroffset += synthfs_adddirentry(sp->s_parent->s_nodeid, DT_DIR, "..", uio); + } else { + diroffset += synthfs_adddirentry(sp->s_nodeid, DT_DIR, "..", uio); + } + DBG_VOP(("\t after adding .., uio_offset = %d, uio_resid = %d\n", (int) uio->uio_offset, uio->uio_resid)); + } + if ((uio->uio_resid > 0) && (diroffset > uio->uio_offset)) { + /* Oops - we skipped over a partial entry: at best, diroffset should've just matched uio->uio_offset */ + return EINVAL; + }; + + /* OK, so much for the fakes. Now for the "real thing": */ + TAILQ_FOREACH(entry, &sp->s_u.d.d_subnodes, s_sibling) { + if (diroffset == uio->uio_offset) { + /* Return this entry */ + diroffset += synthfs_adddirentry(entry->s_nodeid, VTTOIF(STOV(entry)->v_type), entry->s_name, uio); + }; + if ((uio->uio_resid > 0) && (diroffset > uio->uio_offset)) { + /* Oops - we skipped over a partial entry: at best, diroffset should've just matched uio->uio_offset */ + return EINVAL; + }; + }; + + if (ap->a_eofflag) + *ap->a_eofflag = (entry == NULL); /* If we ran all the way through the list, there is no more */ + + return 0; +} + + + +/* + +#% lookup dvp L ? ? +#% lookup vpp - L - + + */ + +int +synthfs_cached_lookup(ap) + struct vop_cachedlookup_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + struct vnode *dp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + u_long nameiop = cnp->cn_nameiop; + u_long flags = cnp->cn_flags; + boolean_t lockparent = (flags & LOCKPARENT); + struct proc *p = cnp->cn_proc; + struct ucred *cred = cnp->cn_cred; + struct vnode *target_vp = NULL; + u_int32_t target_vnode_id; /* Capability ID of the target vnode for .. unlock/relock handling check */ + struct vnode **vpp = ap->a_vpp; + int result = 0; + + DBG_VOP(("synthfs_cached_lookup called, name = %s, namelen = %ld\n", ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen)); + if (flags & LOCKPARENT) DBG_VOP(("\tLOCKPARENT is set\n")); + if (flags & ISLASTCN) DBG_VOP(("\tISLASTCN is set\n")); + + *vpp = NULL; + + if (dp->v_type != VDIR) { + result = ENOTDIR; + goto Err_Exit; + }; + + if ((flags & ISLASTCN) && + (VTOVFS(dp)->mnt_flag & MNT_RDONLY) && + ((nameiop == DELETE) || (nameiop == RENAME))) { + result = EROFS; + goto Err_Exit; + }; + + result = VOP_ACCESS(dp, VEXEC, cred, cnp->cn_proc); + if (result != 0) goto Err_Exit; + + /* + * Look up an entry in the namei cache + */ + + result = cache_lookup(dp, vpp, cnp); + if (result == 0) { + /* There was no entry in the cache for this parent vnode/name pair: + do the full-blown pathname lookup + */ + return synthfs_lookup(ap); + }; + if (result == ENOENT) return result; + + /* An entry matching the parent vnode/name was found in the cache: */ + + + target_vp = *vpp; + target_vnode_id = target_vp->v_id; + if (target_vp == dp) { + /* lookup on "." */ + VREF(target_vp); + result = 0; + } else if (flags & ISDOTDOT) { + /* + * Carefully now: trying to step from child to parent; + * must release lock on child before trying to lock parent + * vnode. + */ + VOP_UNLOCK(dp, 0, p); + result = vget(target_vp, LK_EXCLUSIVE, p); + if ((result == 0) && lockparent && (flags & ISLASTCN)) { + result = vn_lock(dp, LK_EXCLUSIVE, p); + } + } else { + result = vget(target_vp, LK_EXCLUSIVE, p); + if (!lockparent || (result != 0) || !(flags & ISLASTCN)) { + VOP_UNLOCK(dp, 0, p); + }; + }; + + /* + Check to make sure the target vnode ID didn't change while we + tried to lock it: + */ + if (result == 0) { + if (target_vnode_id == target_vp->v_id) { + return 0; /* THIS IS THE NORMAL EXIT PATH */ + }; + + /* The vnode ID didn't match anymore: we've got another vnode! */ + vput(target_vp); + /* Unlock the parent vnode in the cases where it should've been left locked: */ + if (lockparent && (dp != target_vp) && (flags & ISLASTCN)) { + VOP_UNLOCK(dp, 0, p); + }; + }; + + /* One last try for a successful lookup through the complete lookup path: */ + result = vn_lock(dp, LK_EXCLUSIVE, p); + if (result == 0) { + return synthfs_lookup(ap); + }; + +Err_Exit:; + return result; +} + + + +int +synthfs_lookup(ap) + struct vop_cachedlookup_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + struct vnode *dp = ap->a_dvp; + struct synthfsnode *dsp = VTOS(dp); + struct componentname *cnp = ap->a_cnp; + u_long nameiop = cnp->cn_nameiop; +// char *nameptr = cnp->cn_nameptr; + u_long flags = cnp->cn_flags; + long namelen = cnp->cn_namelen; + struct proc *p = cnp->cn_proc; + struct ucred *cred = cnp->cn_cred; + struct synthfsnode *entry; + struct vnode *target_vp = NULL; + int result = 0; + boolean_t found = FALSE; + boolean_t isDot = FALSE; + boolean_t isDotDot = FALSE; + struct vnode *starting_parent = dp; + + DBG_VOP(("synthfs_lookup called, name = %s, namelen = %ld\n", ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen)); + if (flags & LOCKPARENT) DBG_VOP(("\tLOCKPARENT is set\n")); + if (flags & ISLASTCN) DBG_VOP(("\tISLASTCN is set\n")); + + *ap->a_vpp = NULL; + + if (dp->v_type != VDIR) { + result = ENOTDIR; + goto Err_Exit; + }; + + if ((flags & ISLASTCN) && + (VTOVFS(dp)->mnt_flag & MNT_RDONLY) && + ((nameiop == DELETE) || (nameiop == RENAME))) { + result = EROFS; + goto Err_Exit; + }; + + result = VOP_ACCESS(dp, VEXEC, cred, cnp->cn_proc); + if (result != 0) goto Err_Exit; + + /* first check for "." and ".." */ + if (cnp->cn_nameptr[0] == '.') { + if (namelen == 1) { + /* + "." requested + */ + isDot = TRUE; + found = TRUE; + + target_vp = dp; + VREF(target_vp); + + result = 0; + + goto Std_Exit; + } else if ((namelen == 2) && (cnp->cn_nameptr[1] == '.')) { + /* + ".." requested + */ + isDotDot = TRUE; + found = TRUE; + + if ((dsp->s_parent != NULL) && (dsp->s_parent != VTOS(dp))) { + target_vp = STOV(dsp->s_parent); + /* + * Special case for ".." to prevent deadlock: + * always release the parent vnode BEFORE trying to acquire + * ITS parent. This avoids deadlocking with another lookup + * starting from the target_vp trying to vget() this directory. + */ + VOP_UNLOCK(dp, 0, p); + result = vget(target_vp, LK_EXCLUSIVE | LK_RETRY, p); + if (result != 0) { + vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p); + goto Err_Exit; + } + if ((flags & LOCKPARENT) && (flags & ISLASTCN)) { + result = vn_lock(dp, LK_EXCLUSIVE, p); + // vput(target_vp); /* XXX WHY WAS THIS HERE? */ + } + } else { + target_vp = dp; + /* dp is alread locked and ref'ed */ + result = 0; + } + + goto Std_Exit; + } + } + + /* finally, just look for entries by name (making sure the entry's length + matches the cnp's namelen... */ + TAILQ_FOREACH(entry, &dsp->s_u.d.d_subnodes, s_sibling) { + if ((bcmp(cnp->cn_nameptr, entry->s_name, (unsigned)namelen) == 0) && + (*(entry->s_name + namelen) == (char)0)) { + found = TRUE; + target_vp = STOV(entry); + result = vget(target_vp, LK_EXCLUSIVE | LK_RETRY, p); /* vget is not really needed because refcount is always > 0... */ + if (result != 0) { + vrele(target_vp); + goto Err_Exit; + }; + + /* The specified entry was found and successfully acquired: */ + goto Std_Exit; + }; + }; + + found = FALSE; + +Std_Exit:; + if (found) { + if ((nameiop == DELETE) && (flags & ISLASTCN)) { + /* + * Deleting entries requires write access: + */ + result = VOP_ACCESS(dp, VWRITE, cred, p); + if (result != 0) goto Err_Exit; + + /* + * If the parent directory is "sticky" then the user must own + * the directory, or the file in it, in order to be allowed to + * delete it (unless the user is root). This implements + * append-only directories + */ + if ((dsp->s_mode & S_ISVTX) && + (cred->cr_uid != 0) && + (cred->cr_uid != dsp->s_uid) && + (target_vp != NULL) && + (target_vp->v_type != VLNK) && + (VTOS(target_vp)->s_uid != cred->cr_uid)) { + vput(target_vp); + result = EPERM; + goto Err_Exit; + }; + }; + + if ((nameiop == RENAME) && (flags & WANTPARENT) && (flags * ISLASTCN)) { + result = VOP_ACCESS(dp, VWRITE, cred, p); + if (result != 0) goto Err_Exit; + + if (isDot) { + vrele(target_vp); + result = EISDIR; + goto Err_Exit; + }; + }; + } else { + /* The specified entry wasn't found: */ + result = ENOENT; + + if ((flags & ISLASTCN) && + ((nameiop == CREATE) || + (nameiop == RENAME) || + ((nameiop == DELETE) && (flags & DOWHITEOUT) && (flags & ISWHITEOUT)))) { + /* Write access is required to create entries in the directory: */ + result = VOP_ACCESS(dp, VWRITE, cred, p); + if (result != 0) goto Err_Exit; + + cnp->cn_flags |= SAVENAME; + + result = EJUSTRETURN; + } + }; + + /* XXX PPD Should we do something special in case LOCKLEAF isn't set? */ + if (found && !isDot && !isDotDot && (!(flags & LOCKPARENT) || !(flags & ISLASTCN))) { + VOP_UNLOCK(dp, 0, p); + }; + + *ap->a_vpp = target_vp; + +Err_Exit:; + DBG_VOP(("synthfs_lookup: result = %d.\n", result)); + if (found) { + if (target_vp) { + if (VOP_ISLOCKED(target_vp)) { + DBG_VOP(("synthfs_lookup: target_vp = 0x%08X (locked).\n", (u_long)target_vp)); + } else { + DBG_VOP(("synthfs_lookup: target_vp = 0x%08X (NOT locked?).\n", (u_long)target_vp)); + }; + } else { + DBG_VOP(("synthfs_lookup: found = true but target_vp = NULL?\n")); + }; + } else { + DBG_VOP(("synthf_lookup: target not found.\n")); + }; + if (VOP_ISLOCKED(starting_parent)) { + DBG_VOP(("synthfs_lookup: dp = %08X; starting_parent = 0x%08X (LOCKED).\n", (u_long)dp, (u_long)starting_parent)); + } else { + DBG_VOP(("synthfs_lookup: dp = %08X; starting_parent = 0x%08X (UNLOCKED).\n", (u_long)dp, (u_long)starting_parent)); + }; + + return result; +} + + + +/* + +#% pathconf vp L L L +# + vop_pathconf { + IN struct vnode *vp; + IN int name; + OUT register_t *retval; +*/ +int +synthfs_pathconf(ap) +struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; +} */ *ap; +{ + DBG_VOP(("synthfs_pathconf called\n")); + + switch (ap->a_name) + { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_NAME_MAX: + *ap->a_retval = NAME_MAX; + return (0); + case _PC_PATH_MAX: + *ap->a_retval = PATH_MAX; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_NO_TRUNC: + *ap->a_retval = 1; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + + +/* + * Update the access, modified, and node change times as specified by the + * IACCESS, IUPDATE, and ICHANGE flags respectively. The IMODIFIED flag is + * used to specify that the node needs to be updated but that the times have + * already been set. The access and modified times are taken from the second + * and third parameters; the node change time is always taken from the current + * time. If waitfor is set, then wait for the disk write of the node to + * complete. + */ +/* +#% update vp L L L + IN struct vnode *vp; + IN struct timeval *access; + IN struct timeval *modify; + IN int waitfor; +*/ + +int +synthfs_update(ap) + struct vop_update_args /* { + struct vnode *a_vp; + struct timeval *a_access; + struct timeval *a_modify; + int a_waitfor; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct synthfsnode *sp = VTOS(vp); + + DBG_ASSERT(sp != NULL); + DBG_ASSERT(*((int*)&vp->v_interlock) == 0); + + if (((sp->s_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) != 0) && + !(VTOVFS(ap->a_vp)->mnt_flag & MNT_RDONLY)) { + if (sp->s_nodeflags & IN_ACCESS) sp->s_accesstime = *ap->a_access; + if (sp->s_nodeflags & IN_UPDATE) sp->s_modificationtime = *ap->a_modify; + if (sp->s_nodeflags & IN_CHANGE) sp->s_changetime = time; + }; + + /* After the updates are finished, clear the flags */ + sp->s_nodeflags &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + +// DBG_ASSERT(*((int*)&ap->a_vp->v_interlock) == 0); + return 0; +} + + + +/******************************************************************************************* + + Utility/housekeeping vnode operations: + + ******************************************************************************************/ + + +/* +#% lock vp U L U +# + vop_lock { + IN struct vnode *vp; + IN int flags; + IN struct proc *p; +*/ + +int +synthfs_lock(ap) +struct vop_lock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +} */ *ap; +{ + return lockmgr(&VTOS(ap->a_vp)->s_lock, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p); +} + +/* + * Unlock an synthfsnode. +#% unlock vp L U L +# + vop_unlock { + IN struct vnode *vp; + IN int flags; + IN struct proc *p; + + */ +int +synthfs_unlock(ap) +struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +} */ *ap; +{ + return lockmgr(&VTOS(ap->a_vp)->s_lock, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, ap->a_p); +} + +/* + * Check for a locked synthfsnode. +#% islocked vp = = = +# + vop_islocked { + IN struct vnode *vp; + + */ +int +synthfs_islocked(ap) +struct vop_islocked_args /* { + struct vnode *a_vp; +} */ *ap; +{ + return lockstatus(&VTOS(ap->a_vp)->s_lock); +} + + + +/* +# +#% inactive vp L U U +# + vop_inactive { + IN struct vnode *vp; + IN struct proc *p; + +*/ + +int +synthfs_inactive(ap) +struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; +} */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct proc *p = ap->a_p; + struct synthfsnode *sp = VTOS(vp); + struct timeval tv; + + if (vp->v_usecount != 0) + DBG_VOP(("synthfs_inactive: bad usecount = %d\n", vp->v_usecount )); + + /* + * Ignore nodes related to stale file handles. + */ + if (vp->v_type == VNON) + goto out; + + /* This is sort of silly but might make sense in the future: */ + if (sp->s_nodeflags & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 0); + } + +out: + VOP_UNLOCK(vp, 0, p); + /* + * If we are done with the inode, reclaim it + * so that it can be reused immediately. + */ + if (vp->v_type == VNON) { + vrecycle(vp, (struct slock *)0, p); + }; + + return 0; +} + + + +/* + * synthfs_reclaim - Reclaim a vnode so that it can be used for other purposes. + * + * Locking policy: ignored + */ +int +synthfs_reclaim(ap) + struct vop_reclaim_args /* { struct vnode *a_vp; struct proc *a_p; } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct synthfsnode *sp = VTOS(vp); + void *name = sp->s_name; + + sp->s_name = NULL; + FREE(name, M_TEMP); + + vp->v_data = NULL; + FREE((void *)sp, M_SYNTHFS); + + return (0); +} diff --git a/bsd/miscfs/umapfs/umap.h b/bsd/miscfs/umapfs/umap.h new file mode 100644 index 000000000..190f6cc50 --- /dev/null +++ b/bsd/miscfs/umapfs/umap.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: umap.h,v 1.4 1995/03/29 22:09:57 briggs Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * the UCLA Ficus project. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)null_vnops.c 1.5 (Berkeley) 7/10/92 + * @(#)umap.h 8.3 (Berkeley) 1/21/94 + */ + +#define MAPFILEENTRIES 64 +#define GMAPFILEENTRIES 16 +#define NOBODY 32767 +#define NULLGROUP 65534 + +struct umap_args { + char *target; /* Target of loopback */ + int nentries; /* # of entries in user map array */ + int gnentries; /* # of entries in group map array */ + uid_t (*mapdata)[2]; /* pointer to array of user mappings */ + gid_t (*gmapdata)[2]; /* pointer to array of group mappings */ +}; + +struct umap_mount { + struct mount *umapm_vfs; + struct vnode *umapm_rootvp; /* Reference to root umap_node */ + int info_nentries; /* number of uid mappings */ + int info_gnentries; /* number of gid mappings */ + uid_t info_mapdata[MAPFILEENTRIES][2]; /* mapping data for + user mapping in ficus */ + gid_t info_gmapdata[GMAPFILEENTRIES][2]; /*mapping data for + group mapping in ficus */ +}; + +#ifdef KERNEL +/* + * A cache of vnode references + */ +struct umap_node { + LIST_ENTRY(umap_node) umap_hash; /* Hash list */ + struct vnode *umap_lowervp; /* Aliased vnode - VREFed once */ + struct vnode *umap_vnode; /* Back pointer to vnode/umap_node */ +}; + +extern int umap_node_create __P((struct mount *mp, struct vnode *target, struct vnode **vpp)); +extern u_long umap_reverse_findid __P((u_long id, u_long map[][2], int nentries)); +extern void umap_mapids __P((struct mount *v_mount, struct ucred *credp)); + +#define MOUNTTOUMAPMOUNT(mp) ((struct umap_mount *)((mp)->mnt_data)) +#define VTOUMAP(vp) ((struct umap_node *)(vp)->v_data) +#define UMAPTOV(xp) ((xp)->umap_vnode) +#ifdef UMAPFS_DIAGNOSTIC +extern struct vnode *umap_checkvp __P((struct vnode *vp, char *fil, int lno)); +#define UMAPVPTOLOWERVP(vp) umap_checkvp((vp), __FILE__, __LINE__) +#else +#define UMAPVPTOLOWERVP(vp) (VTOUMAP(vp)->umap_lowervp) +#endif + +extern int (**umap_vnodeop_p)(void *); +extern struct vfsops umap_vfsops; +#endif /* KERNEL */ diff --git a/bsd/miscfs/umapfs/umap_subr.c b/bsd/miscfs/umapfs/umap_subr.c new file mode 100644 index 000000000..5afa9be51 --- /dev/null +++ b/bsd/miscfs/umapfs/umap_subr.c @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: umap_subr.c,v 1.4 1994/09/20 06:43:02 cgd Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Id: lofs_subr.c, v 1.11 1992/05/30 10:05:43 jsp Exp + * @(#)umap_subr.c 8.6 (Berkeley) 1/26/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LOG2_SIZEVNODE 7 /* log2(sizeof struct vnode) */ +#define NUMAPNODECACHE 16 + +/* + * Null layer cache: + * Each cache entry holds a reference to the target vnode + * along with a pointer to the alias vnode. When an + * entry is added the target vnode is VREF'd. When the + * alias is removed the target vnode is vrele'd. + */ + +#define UMAP_NHASH(vp) \ + (&umap_node_hashtbl[(((u_long)vp)>>LOG2_SIZEVNODE) & umap_node_hash]) +LIST_HEAD(umap_node_hashhead, umap_node) *umap_node_hashtbl; +u_long umap_node_hash; + +/* + * Initialise cache headers + */ +umapfs_init() +{ + +#ifdef UMAPFS_DIAGNOSTIC + printf("umapfs_init\n"); /* printed during system boot */ +#endif + umap_node_hashtbl = hashinit(NUMAPNODECACHE, M_CACHE, &umap_node_hash); +} + +/* + * umap_findid is called by various routines in umap_vnodeops.c to + * find a user or group id in a map. + */ +static u_long +umap_findid(id, map, nentries) + u_long id; + u_long map[][2]; + int nentries; +{ + int i; + + /* Find uid entry in map */ + i = 0; + while ((ilh_first; a != 0; a = a->umap_hash.le_next) { + if (a->umap_lowervp == targetvp && + a->umap_vnode->v_mount == mp) { + vp = UMAPTOV(a); + /* + * We need vget for the VXLOCK + * stuff, but we don't want to lock + * the lower node. + */ + if (vget(vp, 0, current_proc())) { +#ifdef UMAPFS_DIAGNOSTIC + printf ("umap_node_find: vget failed.\n"); +#endif + goto loop; + } + return (vp); + } + } + +#ifdef UMAPFS_DIAGNOSTIC + printf("umap_node_find(%x, %x): NOT found\n", mp, targetvp); +#endif + + return (0); +} + +/* + * Make a new umap_node node. + * Vp is the alias vnode, lowervp is the target vnode. + * Maintain a reference to lowervp. + */ +static int +umap_node_alloc(mp, lowervp, vpp) + struct mount *mp; + struct vnode *lowervp; + struct vnode **vpp; +{ + struct umap_node_hashhead *hd; + struct umap_node *xp; + struct vnode *vp, *nvp; + int error; + extern int (**dead_vnodeop_p)(void *); + struct specinfo *sp = (struct specinfo *)0; + + if (lowervp->v_type == VBLK || lowervp->v_type == VCHR) + MALLOC_ZONE(sp, struct specinfo *, sizeof(struct specinfo), + M_VNODE, M_WAITOK); + + MALLOC(xp, struct umap_node *, sizeof(struct umap_node), M_TEMP, + M_WAITOK); + if (error = getnewvnode(VT_UMAP, mp, umap_vnodeop_p, &vp)) { + FREE(xp, M_TEMP); + if (sp) + FREE_ZONE(sp, sizeof (struct specinfo), M_VNODE); + return (error); + } + vp->v_type = lowervp->v_type; + + if (vp->v_type == VBLK || vp->v_type == VCHR) { + vp->v_specinfo = sp; + vp->v_rdev = lowervp->v_rdev; + } + + vp->v_data = xp; + xp->umap_vnode = vp; + xp->umap_lowervp = lowervp; + /* + * Before we insert our new node onto the hash chains, + * check to see if someone else has beaten us to it. + */ + if (nvp = umap_node_find(lowervp)) { + *vpp = nvp; + + /* free the substructures we've allocated. */ + FREE(xp, M_TEMP); + if (sp) { + vp->v_specinfo = (struct specinfo *)0; + FREE_ZONE(sp, sizeof (struct specinfo), M_VNODE); + } + + vp->v_type = VBAD; /* node is discarded */ + vp->v_op = dead_vnodeop_p; /* so ops will still work */ + vrele(vp); /* get rid of it. */ + return (0); + } + + /* + * XXX if it's a device node, it needs to be checkalias()ed. + * however, for locking reasons, that's just not possible. + * so we have to do most of the dirty work inline. Note that + * this is a limited case; we know that there's going to be + * an alias, and we know that that alias will be a "real" + * device node, i.e. not tagged VT_NON. + */ + if (vp->v_type == VBLK || vp->v_type == VCHR) { + struct vnode *cvp, **cvpp; + + cvpp = &speclisth[SPECHASH(vp->v_rdev)]; +loop: + for (cvp = *cvpp; cvp; cvp = cvp->v_specnext) { + if (vp->v_rdev != cvp->v_rdev || + vp->v_type != cvp->v_type) + continue; + + /* + * Alias, but not in use, so flush it out. + */ + if (cvp->v_usecount == 0) { + vgone(cvp); + goto loop; + } + if (vget(cvp, 0, current_proc())) /* can't lock; will die! */ + goto loop; + break; + } + + vp->v_hashchain = cvpp; + vp->v_specnext = *cvpp; + vp->v_specflags = 0; + *cvpp = vp; +#if DIAGNOSTIC + if (cvp == NULLVP) + panic("umap_node_alloc: no alias for device"); +#endif + vp->v_flag |= VALIASED; + cvp->v_flag |= VALIASED; + vrele(cvp); + } + /* XXX end of transmogrified checkalias() */ + + if (vp->v_type == VREG) + ubc_info_init(); + + *vpp = vp; + VREF(lowervp); /* Extra VREF will be vrele'd in umap_node_create */ + hd = UMAP_NHASH(lowervp); + LIST_INSERT_HEAD(hd, xp, umap_hash); + return (0); +} + + +/* + * Try to find an existing umap_node vnode refering + * to it, otherwise make a new umap_node vnode which + * contains a reference to the target vnode. + */ +int +umap_node_create(mp, targetvp, newvpp) + struct mount *mp; + struct vnode *targetvp; + struct vnode **newvpp; +{ + struct vnode *aliasvp; + + if (aliasvp = umap_node_find(mp, targetvp)) { + /* + * Take another reference to the alias vnode + */ +#ifdef UMAPFS_DIAGNOSTIC + vprint("umap_node_create: exists", ap->umap_vnode); +#endif + /* VREF(aliasvp); */ + } else { + int error; + + /* + * Get new vnode. + */ +#ifdef UMAPFS_DIAGNOSTIC + printf("umap_node_create: create new alias vnode\n"); +#endif + /* + * Make new vnode reference the umap_node. + */ + if (error = umap_node_alloc(mp, targetvp, &aliasvp)) + return (error); + + /* + * aliasvp is already VREF'd by getnewvnode() + */ + } + + vrele(targetvp); + +#ifdef UMAPFS_DIAGNOSTIC + vprint("umap_node_create: alias", aliasvp); + vprint("umap_node_create: target", targetvp); +#endif + + *newvpp = aliasvp; + return (0); +} + +#ifdef UMAPFS_DIAGNOSTIC +int umap_checkvp_barrier = 1; +struct vnode * +umap_checkvp(vp, fil, lno) + struct vnode *vp; + char *fil; + int lno; +{ + struct umap_node *a = VTOUMAP(vp); +#if 0 + /* + * Can't do this check because vop_reclaim runs + * with funny vop vector. + */ + if (vp->v_op != umap_vnodeop_p) { + printf ("umap_checkvp: on non-umap-node\n"); + while (umap_checkvp_barrier) /*WAIT*/ ; + panic("umap_checkvp"); + } +#endif + if (a->umap_lowervp == NULL) { + /* Should never happen */ + int i; u_long *p; + printf("vp = %x, ZERO ptr\n", vp); + for (p = (u_long *) a, i = 0; i < 8; i++) + printf(" %x", p[i]); + printf("\n"); + /* wait for debugger */ + while (umap_checkvp_barrier) /*WAIT*/ ; + panic("umap_checkvp"); + } + if (a->umap_lowervp->v_usecount < 1) { + int i; u_long *p; + printf("vp = %x, unref'ed lowervp\n", vp); + for (p = (u_long *) a, i = 0; i < 8; i++) + printf(" %x", p[i]); + printf("\n"); + /* wait for debugger */ + while (umap_checkvp_barrier) /*WAIT*/ ; + panic ("umap with unref'ed lowervp"); + } +#if 0 + printf("umap %x/%d -> %x/%d [%s, %d]\n", + a->umap_vnode, a->umap_vnode->v_usecount, + a->umap_lowervp, a->umap_lowervp->v_usecount, + fil, lno); +#endif + return (a->umap_lowervp); +} +#endif + +/* umap_mapids maps all of the ids in a credential, both user and group. */ + +void +umap_mapids(v_mount, credp) + struct mount *v_mount; + struct ucred *credp; +{ + int i, unentries, gnentries; + uid_t uid, *usermap; + gid_t gid, *groupmap; + + unentries = MOUNTTOUMAPMOUNT(v_mount)->info_nentries; + usermap = &(MOUNTTOUMAPMOUNT(v_mount)->info_mapdata[0][0]); + gnentries = MOUNTTOUMAPMOUNT(v_mount)->info_gnentries; + groupmap = &(MOUNTTOUMAPMOUNT(v_mount)->info_gmapdata[0][0]); + + /* Find uid entry in map */ + + uid = (uid_t) umap_findid(credp->cr_uid, usermap, unentries); + + if (uid != -1) + credp->cr_uid = uid; + else + credp->cr_uid = (uid_t) NOBODY; + +#ifdef notdef + /* cr_gid is the same as cr_groups[0] in 4BSD */ + + /* Find gid entry in map */ + + gid = (gid_t) umap_findid(credp->cr_gid, groupmap, gnentries); + + if (gid != -1) + credp->cr_gid = gid; + else + credp->cr_gid = NULLGROUP; +#endif + + /* Now we must map each of the set of groups in the cr_groups + structure. */ + + i = 0; + while (credp->cr_groups[i] != 0) { + gid = (gid_t) umap_findid(credp->cr_groups[i], + groupmap, gnentries); + + if (gid != -1) + credp->cr_groups[i++] = gid; + else + credp->cr_groups[i++] = NULLGROUP; + } +} diff --git a/bsd/miscfs/umapfs/umap_vfsops.c b/bsd/miscfs/umapfs/umap_vfsops.c new file mode 100644 index 000000000..2c60387fd --- /dev/null +++ b/bsd/miscfs/umapfs/umap_vfsops.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: umap_vfsops.c,v 1.7 1995/03/09 12:05:59 mycroft Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * the UCLA Ficus project. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)null_vfsops.c 1.5 (Berkeley) 7/10/92 + * @(#)umap_vfsops.c 8.3 (Berkeley) 1/21/94 + */ + +/* + * Umap Layer + * (See mount_umap(8) for a description of this layer.) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Mount umap layer + */ +int +umapfs_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + struct umap_args args; + struct vnode *lowerrootvp, *vp; + struct vnode *umapm_rootvp; + struct umap_mount *amp; + size_t size; + int error; + +#ifdef UMAPFS_DIAGNOSTIC + printf("umapfs_mount(mp = %x)\n", mp); +#endif + + /* + * Update is a no-op + */ + if (mp->mnt_flag & MNT_UPDATE) { + return (EOPNOTSUPP); + /* return (VFS_MOUNT(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, path, data, ndp, p));*/ + } + + /* + * Get argument + */ + if (error = copyin(data, (caddr_t)&args, sizeof(struct umap_args))) + return (error); + + /* + * Find lower node + */ + NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT|LOCKLEAF, + UIO_USERSPACE, args.target, p); + if (error = namei(ndp)) + return (error); + + /* + * Sanity check on lower vnode + */ + lowerrootvp = ndp->ni_vp; +#ifdef UMAPFS_DIAGNOSTIC + printf("vp = %x, check for VDIR...\n", lowerrootvp); +#endif + vrele(ndp->ni_dvp); + ndp->ni_dvp = 0; + + if (lowerrootvp->v_type != VDIR) { + vput(lowerrootvp); + return (EINVAL); + } + +#ifdef UMAPFS_DIAGNOSTIC + printf("mp = %x\n", mp); +#endif + +// amp = (struct umap_mount *) malloc(sizeof(struct umap_mount), +// M_UFSMNT, M_WAITOK); /* XXX */ + MALLOC(amp, struct umap_mount *, sizeof(struct umap_mount), + M_UFSMNT, M_WAITOK); + + /* + * Save reference to underlying FS + */ + amp->umapm_vfs = lowerrootvp->v_mount; + + /* + * Now copy in the number of entries and maps for umap mapping. + */ + amp->info_nentries = args.nentries; + amp->info_gnentries = args.gnentries; + error = copyin(args.mapdata, (caddr_t)amp->info_mapdata, + 2*sizeof(u_long)*args.nentries); + if (error) + return (error); + +#ifdef UMAP_DIAGNOSTIC + printf("umap_mount:nentries %d\n",args.nentries); + for (i = 0; i < args.nentries; i++) + printf(" %d maps to %d\n", amp->info_mapdata[i][0], + amp->info_mapdata[i][1]); +#endif + + error = copyin(args.gmapdata, (caddr_t)amp->info_gmapdata, + 2*sizeof(u_long)*args.nentries); + if (error) + return (error); + +#ifdef UMAP_DIAGNOSTIC + printf("umap_mount:gnentries %d\n",args.gnentries); + for (i = 0; i < args.gnentries; i++) + printf(" group %d maps to %d\n", + amp->info_gmapdata[i][0], + amp->info_gmapdata[i][1]); +#endif + + + /* + * Save reference. Each mount also holds + * a reference on the root vnode. + */ + error = umap_node_create(mp, lowerrootvp, &vp); + /* + * Unlock the node (either the lower or the alias) + */ + VOP_UNLOCK(vp); + /* + * Make sure the node alias worked + */ + if (error) { + vrele(lowerrootvp); + free(amp, M_UFSMNT); /* XXX */ + return (error); + } + + /* + * Keep a held reference to the root vnode. + * It is vrele'd in umapfs_unmount. + */ + umapm_rootvp = vp; + umapm_rootvp->v_flag |= VROOT; + amp->umapm_rootvp = umapm_rootvp; + if (UMAPVPTOLOWERVP(umapm_rootvp)->v_mount->mnt_flag & MNT_LOCAL) + mp->mnt_flag |= MNT_LOCAL; + mp->mnt_data = (qaddr_t) amp; + getnewfsid(mp, makefstype(MOUNT_LOFS)); + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + (void) copyinstr(args.target, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); +#ifdef UMAPFS_DIAGNOSTIC + printf("umapfs_mount: lower %s, alias at %s\n", + mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); +#endif + return (0); +} + +/* + * VFS start. Nothing needed here - the start routine + * on the underlying filesystem will have been called + * when that filesystem was mounted. + */ +int +umapfs_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + + return (0); + /* return (VFS_START(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, flags, p)); */ +} + +/* + * Free reference to umap layer + */ +int +umapfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + struct vnode *umapm_rootvp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp; + int error; + int flags = 0; + extern int doforce; + +#ifdef UMAPFS_DIAGNOSTIC + printf("umapfs_unmount(mp = %x)\n", mp); +#endif + + if (mntflags & MNT_FORCE) { + /* lofs can never be rootfs so don't check for it */ + if (!doforce) + return (EINVAL); + flags |= FORCECLOSE; + } + + /* + * Clear out buffer cache. I don't think we + * ever get anything cached at this level at the + * moment, but who knows... + */ +#ifdef notyet + mntflushbuf(mp, 0); + if (mntinvalbuf(mp, 1)) + return (EBUSY); +#endif + if (umapm_rootvp->v_usecount > 1) + return (EBUSY); + if (error = vflush(mp, umapm_rootvp, flags)) + return (error); + +#ifdef UMAPFS_DIAGNOSTIC + vprint("alias root of lower", umapm_rootvp); +#endif + /* + * Release reference on underlying root vnode + */ + vrele(umapm_rootvp); + /* + * And blow it away for future re-use + */ + vgone(umapm_rootvp); + /* + * Finally, throw away the umap_mount structure + */ + free(mp->mnt_data, M_UFSMNT); /* XXX */ + mp->mnt_data = 0; + return (0); +} + +int +umapfs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct vnode *vp; + +#ifdef UMAPFS_DIAGNOSTIC + printf("umapfs_root(mp = %x, vp = %x->%x)\n", mp, + MOUNTTOUMAPMOUNT(mp)->umapm_rootvp, + UMAPVPTOLOWERVP(MOUNTTOUMAPMOUNT(mp)->umapm_rootvp) + ); +#endif + + /* + * Return locked reference to root. + */ + vp = MOUNTTOUMAPMOUNT(mp)->umapm_rootvp; + VREF(vp); + VOP_LOCK(vp); + *vpp = vp; + return (0); +} + +int +umapfs_quotactl(mp, cmd, uid, arg, p) + struct mount *mp; + int cmd; + uid_t uid; + caddr_t arg; + struct proc *p; +{ + + return (VFS_QUOTACTL(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, cmd, uid, arg, p)); +} + +int +umapfs_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + int error; + struct statfs mstat; + +#ifdef UMAPFS_DIAGNOSTIC + printf("umapfs_statfs(mp = %x, vp = %x->%x)\n", mp, + MOUNTTOUMAPMOUNT(mp)->umapm_rootvp, + UMAPVPTOLOWERVP(MOUNTTOUMAPMOUNT(mp)->umapm_rootvp) + ); +#endif + + bzero(&mstat, sizeof(mstat)); + + error = VFS_STATFS(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, &mstat, p); + if (error) + return (error); + + /* now copy across the "interesting" information and fake the rest */ + sbp->f_type = mstat.f_type; + sbp->f_flags = mstat.f_flags; + sbp->f_bsize = mstat.f_bsize; + sbp->f_iosize = mstat.f_iosize; + sbp->f_blocks = mstat.f_blocks; + sbp->f_bfree = mstat.f_bfree; + sbp->f_bavail = mstat.f_bavail; + sbp->f_files = mstat.f_files; + sbp->f_ffree = mstat.f_ffree; + if (sbp != &mp->mnt_stat) { + bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN); + sbp->f_fstypename[MFSNAMELEN] = '\0'; + return (0); +} + +int +umapfs_sync(mp, waitfor, cred, p) + struct mount *mp; + int waitfor; + struct ucred *cred; + struct proc *p; +{ + + /* + * XXX - Assumes no data cached at umap layer. + */ + return (0); +} + +int +umapfs_vget(mp, ino, vpp) + struct mount *mp; + ino_t ino; + struct vnode **vpp; +{ + + return (VFS_VGET(MOUNTTOUMAPMOUNT(mp)->umapm_vfs, ino, vpp)); +} + +int +umapfs_fhtovp(mp, fidp, nam, vpp, exflagsp, credanonp) + struct mount *mp; + struct fid *fidp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred**credanonp; +{ + + return (EOPNOTSUPP); +} + +int +umapfs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + + return (EOPNOTSUPP); +} + +int umapfs_init __P((void)); + +struct vfsops umap_vfsops = { + MOUNT_UMAP, + umapfs_mount, + umapfs_start, + umapfs_unmount, + umapfs_root, + umapfs_quotactl, + umapfs_statfs, + umapfs_sync, + umapfs_vget, + umapfs_fhtovp, + umapfs_vptofh, + umapfs_init, +}; diff --git a/bsd/miscfs/umapfs/umap_vnops.c b/bsd/miscfs/umapfs/umap_vnops.c new file mode 100644 index 000000000..47600b41d --- /dev/null +++ b/bsd/miscfs/umapfs/umap_vnops.c @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: umap_vnops.c,v 1.3 1994/08/19 11:25:42 mycroft Exp $ */ + +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software donated to Berkeley by + * the UCLA Ficus project. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)umap_vnops.c 8.3 (Berkeley) 1/5/94 + */ + +/* + * Umap Layer + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +int umap_bug_bypass = 0; /* for debugging: enables bypass printf'ing */ + +/* + * This is the 10-Apr-92 bypass routine. + * See null_vnops.c:null_bypass for more details. + */ +int +umap_bypass(ap) + struct vop_generic_args /* { + struct vnodeop_desc *a_desc; + + } */ *ap; +{ + extern int (**umap_vnodeop_p)(void *); /* not extern, really "forward" */ + struct ucred **credpp = 0, *credp = 0; + struct ucred *savecredp, *savecompcredp = 0; + struct ucred *compcredp = 0; + struct vnode **this_vp_p; + int error; + struct vnode *old_vps[VDESC_MAX_VPS]; + struct vnode *vp1 = 0; + struct vnode **vps_p[VDESC_MAX_VPS]; + struct vnode ***vppp; + struct vnodeop_desc *descp = ap->a_desc; + int reles, i; + struct componentname **compnamepp = 0; + + if (umap_bug_bypass) + printf ("umap_bypass: %s\n", descp->vdesc_name); + +#ifdef SAFETY + /* + * We require at least one vp. + */ + if (descp->vdesc_vp_offsets == NULL || + descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) + panic ("umap_bypass: no vp's in map.\n"); +#endif + + /* + * Map the vnodes going in. + * Later, we'll invoke the operation based on + * the first mapped vnode's operation vector. + */ + reles = descp->vdesc_flags; + for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { + if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) + break; /* bail out at end of list */ + vps_p[i] = this_vp_p = + VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[i], ap); + + if (i == 0) { + vp1 = *vps_p[0]; + } + + /* + * We're not guaranteed that any but the first vnode + * are of our type. Check for and don't map any + * that aren't. (Must map first vp or vclean fails.) + */ + + if (i && (*this_vp_p)->v_op != umap_vnodeop_p) { + old_vps[i] = NULL; + } else { + old_vps[i] = *this_vp_p; + *(vps_p[i]) = UMAPVPTOLOWERVP(*this_vp_p); + if (reles & 1) + VREF(*this_vp_p); + } + + } + + /* + * Fix the credentials. (That's the purpose of this layer.) + */ + + if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) { + + credpp = VOPARG_OFFSETTO(struct ucred**, + descp->vdesc_cred_offset, ap); + + /* Save old values */ + + savecredp = *credpp; + if (savecredp != NOCRED) + *credpp = crdup(savecredp); + credp = *credpp; + + if (umap_bug_bypass && credp->cr_uid != 0) + printf("umap_bypass: user was %d, group %d\n", + credp->cr_uid, credp->cr_gid); + + /* Map all ids in the credential structure. */ + + umap_mapids(vp1->v_mount, credp); + + if (umap_bug_bypass && credp->cr_uid != 0) + printf("umap_bypass: user now %d, group %d\n", + credp->cr_uid, credp->cr_gid); + } + + /* BSD often keeps a credential in the componentname structure + * for speed. If there is one, it better get mapped, too. + */ + + if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) { + + compnamepp = VOPARG_OFFSETTO(struct componentname**, + descp->vdesc_componentname_offset, ap); + + savecompcredp = (*compnamepp)->cn_cred; + if (savecompcredp != NOCRED) + (*compnamepp)->cn_cred = crdup(savecompcredp); + compcredp = (*compnamepp)->cn_cred; + + if (umap_bug_bypass && compcredp->cr_uid != 0) + printf("umap_bypass: component credit user was %d, group %d\n", + compcredp->cr_uid, compcredp->cr_gid); + + /* Map all ids in the credential structure. */ + + umap_mapids(vp1->v_mount, compcredp); + + if (umap_bug_bypass && compcredp->cr_uid != 0) + printf("umap_bypass: component credit user now %d, group %d\n", + compcredp->cr_uid, compcredp->cr_gid); + } + + /* + * Call the operation on the lower layer + * with the modified argument structure. + */ + error = VCALL(*(vps_p[0]), descp->vdesc_offset, ap); + + /* + * Maintain the illusion of call-by-value + * by restoring vnodes in the argument structure + * to their original value. + */ + reles = descp->vdesc_flags; + for (i = 0; i < VDESC_MAX_VPS; reles >>= 1, i++) { + if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) + break; /* bail out at end of list */ + if (old_vps[i]) { + *(vps_p[i]) = old_vps[i]; + if (reles & 1) + vrele(*(vps_p[i])); + }; + }; + + /* + * Map the possible out-going vpp + * (Assumes that the lower layer always returns + * a VREF'ed vpp unless it gets an error.) + */ + if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && + !(descp->vdesc_flags & VDESC_NOMAP_VPP) && + !error) { + if (descp->vdesc_flags & VDESC_VPP_WILLRELE) + goto out; + vppp = VOPARG_OFFSETTO(struct vnode***, + descp->vdesc_vpp_offset, ap); + error = umap_node_create(old_vps[0]->v_mount, **vppp, *vppp); + }; + + out: + /* + * Free duplicate cred structure and restore old one. + */ + if (descp->vdesc_cred_offset != VDESC_NO_OFFSET) { + if (umap_bug_bypass && credp && credp->cr_uid != 0) + printf("umap_bypass: returning-user was %d\n", + credp->cr_uid); + + if (savecredp != NOCRED) { + if (credp != NOCRED) + crfree(credp); + *credpp = savecredp; + if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0) + printf("umap_bypass: returning-user now %d\n\n", + savecredp->cr_uid); + } + } + + if (descp->vdesc_componentname_offset != VDESC_NO_OFFSET) { + if (umap_bug_bypass && compcredp && compcredp->cr_uid != 0) + printf("umap_bypass: returning-component-user was %d\n", + compcredp->cr_uid); + + if (savecompcredp != NOCRED) { + if (compcredp != NOCRED) + crfree(compcredp); + (*compnamepp)->cn_cred = savecompcredp; + if (umap_bug_bypass && credpp && (*credpp)->cr_uid != 0) + printf("umap_bypass: returning-component-user now %d\n", + savecompcredp->cr_uid); + } + } + + return (error); +} + + +/* + * We handle getattr to change the fsid. + */ +int +umap_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + uid_t uid; + gid_t gid; + int error, tmpid, nentries, gnentries; + uid_t (*mapdata)[2]; + gid_t (*gmapdata)[2]; + struct vnode **vp1p; + struct vnodeop_desc *descp = ap->a_desc; + + if (error = umap_bypass(ap)) + return (error); + /* Requires that arguments be restored. */ + ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; + + /* + * Umap needs to map the uid and gid returned by a stat + * into the proper values for this site. This involves + * finding the returned uid in the mapping information, + * translating it into the uid on the other end, + * and filling in the proper field in the vattr + * structure pointed to by ap->a_vap. The group + * is easier, since currently all groups will be + * translate to the NULLGROUP. + */ + + /* Find entry in map */ + + uid = ap->a_vap->va_uid; + gid = ap->a_vap->va_gid; + if (umap_bug_bypass) + printf("umap_getattr: mapped uid = %d, mapped gid = %d\n", uid, + gid); + + vp1p = VOPARG_OFFSETTO(struct vnode**, descp->vdesc_vp_offsets[0], ap); + nentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_nentries; + mapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_mapdata); + gnentries = MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gnentries; + gmapdata = (MOUNTTOUMAPMOUNT((*vp1p)->v_mount)->info_gmapdata); + + /* Reverse map the uid for the vnode. Since it's a reverse + map, we can't use umap_mapids() to do it. */ + + tmpid = umap_reverse_findid(uid, mapdata, nentries); + + if (tmpid != -1) { + ap->a_vap->va_uid = (uid_t) tmpid; + if (umap_bug_bypass) + printf("umap_getattr: original uid = %d\n", uid); + } else + ap->a_vap->va_uid = (uid_t) NOBODY; + + /* Reverse map the gid for the vnode. */ + + tmpid = umap_reverse_findid(gid, gmapdata, gnentries); + + if (tmpid != -1) { + ap->a_vap->va_gid = (gid_t) tmpid; + if (umap_bug_bypass) + printf("umap_getattr: original gid = %d\n", gid); + } else + ap->a_vap->va_gid = (gid_t) NULLGROUP; + + return (0); +} + +int +umap_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + } */ *ap; +{ + /* + * Do nothing (and _don't_ bypass). + * Wait to vrele lowervp until reclaim, + * so that until then our umap_node is in the + * cache and reusable. + * + */ + return (0); +} + +int +umap_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct umap_node *xp = VTOUMAP(vp); + struct vnode *lowervp = xp->umap_lowervp; + + /* After this assignment, this node will not be re-used. */ + xp->umap_lowervp = NULL; + LIST_REMOVE(xp, umap_hash); + FREE(vp->v_data, M_TEMP); + vp->v_data = NULL; + vrele(lowervp); + return (0); +} + +int +umap_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + struct buf *bp = ap->a_bp; + int error; + struct vnode *savedvp; + + savedvp = bp->b_vp; + bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp); + + error = VOP_STRATEGY(ap->a_bp); + + bp->b_vp = savedvp; + + return (error); +} + +int +umap_bwrite(ap) + struct vop_bwrite_args /* { + struct buf *a_bp; + } */ *ap; +{ + struct buf *bp = ap->a_bp; + int error; + struct vnode *savedvp; + + savedvp = bp->b_vp; + bp->b_vp = UMAPVPTOLOWERVP(bp->b_vp); + + error = VOP_BWRITE(ap->a_bp); + + bp->b_vp = savedvp; + + return (error); +} + + +int +umap_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + printf("\ttag VT_UMAPFS, vp=%x, lowervp=%x\n", vp, UMAPVPTOLOWERVP(vp)); + return (0); +} + +int +umap_rename(ap) + struct vop_rename_args /* { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; + } */ *ap; +{ + int error; + struct componentname *compnamep; + struct ucred *compcredp, *savecompcredp; + struct vnode *vp; + + /* + * Rename is irregular, having two componentname structures. + * We need to map the cre in the second structure, + * and then bypass takes care of the rest. + */ + + vp = ap->a_fdvp; + compnamep = ap->a_tcnp; + compcredp = compnamep->cn_cred; + + savecompcredp = compcredp; + compcredp = compnamep->cn_cred = crdup(savecompcredp); + + if (umap_bug_bypass && compcredp->cr_uid != 0) + printf("umap_rename: rename component credit user was %d, group %d\n", + compcredp->cr_uid, compcredp->cr_gid); + + /* Map all ids in the credential structure. */ + + umap_mapids(vp->v_mount, compcredp); + + if (umap_bug_bypass && compcredp->cr_uid != 0) + printf("umap_rename: rename component credit user now %d, group %d\n", + compcredp->cr_uid, compcredp->cr_gid); + + error = umap_bypass(ap); + + /* Restore the additional mapped componentname cred structure. */ + + crfree(compcredp); + compnamep->cn_cred = savecompcredp; + + return error; +} + +/* + * Global vfs data structures + */ +/* + * XXX - strategy, bwrite are hand coded currently. They should + * go away with a merged buffer/block cache. + * + */ + +#define VOPFUNC int (*)(void *) + +int (**umap_vnodeop_p)(void *); +struct vnodeopv_entry_desc umap_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)umap_bypass }, + { &vop_getattr_desc, (VOPFUNC)umap_getattr }, + { &vop_inactive_desc, (VOPFUNC)umap_inactive }, + { &vop_reclaim_desc, (VOPFUNC)umap_reclaim }, + { &vop_print_desc, (VOPFUNC)umap_print }, + { &vop_rename_desc, (VOPFUNC)umap_rename }, + { &vop_strategy_desc, (VOPFUNC)umap_strategy }, + { &vop_bwrite_desc, (VOPFUNC)umap_bwrite }, + + { (struct vnodeop_desc*) NULL, (int(*)()) NULL } +}; +struct vnodeopv_desc umap_vnodeop_opv_desc = + { &umap_vnodeop_p, umap_vnodeop_entries }; diff --git a/bsd/miscfs/union/Makefile b/bsd/miscfs/union/Makefile new file mode 100644 index 000000000..72ccd7707 --- /dev/null +++ b/bsd/miscfs/union/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + union.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = miscfs/union + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = miscfs/union + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/miscfs/union/union.h b/bsd/miscfs/union/union.h new file mode 100644 index 000000000..9181e8d51 --- /dev/null +++ b/bsd/miscfs/union/union.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1994 The Regents of the University of California. + * Copyright (c) 1994 Jan-Simon Pendry. + * All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)union.h 8.9 (Berkeley) 12/10/94 + */ + +struct union_args { + char *target; /* Target of loopback */ + int mntflags; /* Options on the mount */ +}; + +#define UNMNT_ABOVE 0x0001 /* Target appears below mount point */ +#define UNMNT_BELOW 0x0002 /* Target appears below mount point */ +#define UNMNT_REPLACE 0x0003 /* Target replaces mount point */ +#define UNMNT_OPMASK 0x0003 + +struct union_mount { + struct vnode *um_uppervp; + struct vnode *um_lowervp; + struct ucred *um_cred; /* Credentials of user calling mount */ + int um_cmode; /* cmask from mount process */ + int um_op; /* Operation mode */ +}; + +#ifdef KERNEL + +/* + * DEFDIRMODE is the mode bits used to create a shadow directory. + */ +#define VRWXMODE (VREAD|VWRITE|VEXEC) +#define VRWMODE (VREAD|VWRITE) +#define UN_DIRMODE ((VRWXMODE)|(VRWXMODE>>3)|(VRWXMODE>>6)) +#define UN_FILEMODE ((VRWMODE)|(VRWMODE>>3)|(VRWMODE>>6)) + +/* + * A cache of vnode references + */ +struct union_node { + LIST_ENTRY(union_node) un_cache; /* Hash chain */ + struct vnode *un_vnode; /* Back pointer */ + struct vnode *un_uppervp; /* overlaying object */ + struct vnode *un_lowervp; /* underlying object */ + struct vnode *un_dirvp; /* Parent dir of uppervp */ + struct vnode *un_pvp; /* Parent vnode */ + char *un_path; /* saved component name */ + int un_hash; /* saved un_path hash value */ + int un_openl; /* # of opens on lowervp */ + unsigned int un_flags; + struct vnode **un_dircache; /* cached union stack */ + off_t un_uppersz; /* size of upper object */ + off_t un_lowersz; /* size of lower object */ +#if DIAGNOSTIC + pid_t un_pid; +#endif +}; + +#define UN_WANT 0x01 +#define UN_LOCKED 0x02 +#define UN_ULOCK 0x04 /* Upper node is locked */ +#define UN_KLOCK 0x08 /* Keep upper node locked on vput */ +#define UN_CACHED 0x10 /* In union cache */ + +extern int union_allocvp __P((struct vnode **, struct mount *, + struct vnode *, struct vnode *, + struct componentname *, struct vnode *, + struct vnode *, int)); +extern int union_copyfile __P((struct vnode *, struct vnode *, + struct ucred *, struct proc *)); +extern int union_copyup __P((struct union_node *, int, struct ucred *, + struct proc *)); +extern int union_dowhiteout __P((struct union_node *, struct ucred *, + struct proc *)); +extern int union_mkshadow __P((struct union_mount *, struct vnode *, + struct componentname *, struct vnode **)); +extern int union_mkwhiteout __P((struct union_mount *, struct vnode *, + struct componentname *, char *)); +extern int union_vn_create __P((struct vnode **, struct union_node *, + struct proc *)); +extern int union_cn_close __P((struct vnode *, int, struct ucred *, + struct proc *)); +extern void union_removed_upper __P((struct union_node *un)); +extern struct vnode *union_lowervp __P((struct vnode *)); +extern void union_newlower __P((struct union_node *, struct vnode *)); +extern void union_newupper __P((struct union_node *, struct vnode *)); +extern void union_newsize __P((struct vnode *, off_t, off_t)); + +#define MOUNTTOUNIONMOUNT(mp) ((struct union_mount *)((mp)->mnt_data)) +#define VTOUNION(vp) ((struct union_node *)(vp)->v_data) +#define UNIONTOV(un) ((un)->un_vnode) +#define LOWERVP(vp) (VTOUNION(vp)->un_lowervp) +#define UPPERVP(vp) (VTOUNION(vp)->un_uppervp) +#define OTHERVP(vp) (UPPERVP(vp) ? UPPERVP(vp) : LOWERVP(vp)) + +extern int (**union_vnodeop_p)(void *); +extern struct vfsops union_vfsops; +#endif /* KERNEL */ diff --git a/bsd/miscfs/union/union_subr.c b/bsd/miscfs/union/union_subr.c new file mode 100644 index 000000000..8f5ce12ac --- /dev/null +++ b/bsd/miscfs/union/union_subr.c @@ -0,0 +1,1119 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1994 Jan-Simon Pendry + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)union_subr.c 8.20 (Berkeley) 5/20/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if DIAGNOSTIC +#include +#endif + +/* must be power of two, otherwise change UNION_HASH() */ +#define NHASH 32 + +/* unsigned int ... */ +#define UNION_HASH(u, l) \ + (((((unsigned long) (u)) + ((unsigned long) l)) >> 8) & (NHASH-1)) + +static LIST_HEAD(unhead, union_node) unhead[NHASH]; +static int unvplock[NHASH]; + +int +union_init() +{ + int i; + + for (i = 0; i < NHASH; i++) + LIST_INIT(&unhead[i]); + bzero((caddr_t) unvplock, sizeof(unvplock)); +} + +static int +union_list_lock(ix) + int ix; +{ + + if (unvplock[ix] & UN_LOCKED) { + unvplock[ix] |= UN_WANT; + sleep((caddr_t) &unvplock[ix], PINOD); + return (1); + } + + unvplock[ix] |= UN_LOCKED; + + return (0); +} + +static void +union_list_unlock(ix) + int ix; +{ + + unvplock[ix] &= ~UN_LOCKED; + + if (unvplock[ix] & UN_WANT) { + unvplock[ix] &= ~UN_WANT; + wakeup((caddr_t) &unvplock[ix]); + } +} + +void +union_updatevp(un, uppervp, lowervp) + struct union_node *un; + struct vnode *uppervp; + struct vnode *lowervp; +{ + int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp); + int nhash = UNION_HASH(uppervp, lowervp); + int docache = (lowervp != NULLVP || uppervp != NULLVP); + int lhash, hhash, uhash; + + /* + * Ensure locking is ordered from lower to higher + * to avoid deadlocks. + */ + if (nhash < ohash) { + lhash = nhash; + uhash = ohash; + } else { + lhash = ohash; + uhash = nhash; + } + + if (lhash != uhash) + while (union_list_lock(lhash)) + continue; + + while (union_list_lock(uhash)) + continue; + + if (ohash != nhash || !docache) { + if (un->un_flags & UN_CACHED) { + un->un_flags &= ~UN_CACHED; + LIST_REMOVE(un, un_cache); + } + } + + if (ohash != nhash) + union_list_unlock(ohash); + + if (un->un_lowervp != lowervp) { + if (un->un_lowervp) { + vrele(un->un_lowervp); + if (un->un_path) { + _FREE(un->un_path, M_TEMP); + un->un_path = 0; + } + if (un->un_dirvp) { + vrele(un->un_dirvp); + un->un_dirvp = NULLVP; + } + } + un->un_lowervp = lowervp; + un->un_lowersz = VNOVAL; + } + + if (un->un_uppervp != uppervp) { + if (un->un_uppervp) + vrele(un->un_uppervp); + + un->un_uppervp = uppervp; + un->un_uppersz = VNOVAL; + } + + if (docache && (ohash != nhash)) { + LIST_INSERT_HEAD(&unhead[nhash], un, un_cache); + un->un_flags |= UN_CACHED; + } + + union_list_unlock(nhash); +} + +void +union_newlower(un, lowervp) + struct union_node *un; + struct vnode *lowervp; +{ + + union_updatevp(un, un->un_uppervp, lowervp); +} + +void +union_newupper(un, uppervp) + struct union_node *un; + struct vnode *uppervp; +{ + + union_updatevp(un, uppervp, un->un_lowervp); +} + +/* + * Keep track of size changes in the underlying vnodes. + * If the size changes, then callback to the vm layer + * giving priority to the upper layer size. + */ +void +union_newsize(vp, uppersz, lowersz) + struct vnode *vp; + off_t uppersz, lowersz; +{ + struct union_node *un; + off_t sz; + + /* only interested in regular files */ + if (vp->v_type != VREG) + return; + + un = VTOUNION(vp); + sz = VNOVAL; + + if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) { + un->un_uppersz = uppersz; + if (sz == VNOVAL) + sz = un->un_uppersz; + } + + if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) { + un->un_lowersz = lowersz; + if (sz == VNOVAL) + sz = un->un_lowersz; + } + + if (sz != VNOVAL) { +#ifdef UNION_DIAGNOSTIC + printf("union: %s size now %ld\n", + uppersz != VNOVAL ? "upper" : "lower", (long) sz); +#endif + if (UBCISVALID(vp)) + ubc_setsize(vp, sz); /* XXX check error */ + } +} + +/* + * allocate a union_node/vnode pair. the vnode is + * referenced and locked. the new vnode is returned + * via (vpp). (mp) is the mountpoint of the union filesystem, + * (dvp) is the parent directory where the upper layer object + * should exist (but doesn't) and (cnp) is the componentname + * information which is partially copied to allow the upper + * layer object to be created at a later time. (uppervp) + * and (lowervp) reference the upper and lower layer objects + * being mapped. either, but not both, can be nil. + * if supplied, (uppervp) is locked. + * the reference is either maintained in the new union_node + * object which is allocated, or they are vrele'd. + * + * all union_nodes are maintained on a singly-linked + * list. new nodes are only allocated when they cannot + * be found on this list. entries on the list are + * removed when the vfs reclaim entry is called. + * + * a single lock is kept for the entire list. this is + * needed because the getnewvnode() function can block + * waiting for a vnode to become free, in which case there + * may be more than one process trying to get the same + * vnode. this lock is only taken if we are going to + * call getnewvnode, since the kernel itself is single-threaded. + * + * if an entry is found on the list, then call vget() to + * take a reference. this is done because there may be + * zero references to it and so it needs to removed from + * the vnode free list. + */ +int +union_allocvp(vpp, mp, undvp, dvp, cnp, uppervp, lowervp, docache) + struct vnode **vpp; + struct mount *mp; + struct vnode *undvp; /* parent union vnode */ + struct vnode *dvp; /* may be null */ + struct componentname *cnp; /* may be null */ + struct vnode *uppervp; /* may be null */ + struct vnode *lowervp; /* may be null */ + int docache; +{ + int error; + struct union_node *un; + struct union_node **pp; + struct vnode *xlowervp = NULLVP; + struct union_mount *um = MOUNTTOUNIONMOUNT(mp); + int hash; + int vflag; + int try; + struct union_node *unp; + + if (uppervp == NULLVP && lowervp == NULLVP) + panic("union: unidentifiable allocation"); + + if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) { + xlowervp = lowervp; + lowervp = NULLVP; + } + + /* detect the root vnode (and aliases) */ + vflag = 0; + if ((uppervp == um->um_uppervp) && + ((lowervp == NULLVP) || lowervp == um->um_lowervp)) { + if (lowervp == NULLVP) { + lowervp = um->um_lowervp; + if (lowervp != NULLVP) + VREF(lowervp); + } + vflag = VROOT; + } + +loop: + if (!docache) { + un = 0; + } else for (try = 0; try < 3; try++) { + switch (try) { + case 0: + if (lowervp == NULLVP) + continue; + hash = UNION_HASH(uppervp, lowervp); + break; + + case 1: + if (uppervp == NULLVP) + continue; + hash = UNION_HASH(uppervp, NULLVP); + break; + + case 2: + if (lowervp == NULLVP) + continue; + hash = UNION_HASH(NULLVP, lowervp); + break; + } + + while (union_list_lock(hash)) + continue; + + for (un = unhead[hash].lh_first; un != 0; + un = un->un_cache.le_next) { + if ((un->un_lowervp == lowervp || + un->un_lowervp == NULLVP) && + (un->un_uppervp == uppervp || + un->un_uppervp == NULLVP) && + (UNIONTOV(un)->v_mount == mp)) { + if (vget(UNIONTOV(un), 0, + cnp ? cnp->cn_proc : NULL)) { + union_list_unlock(hash); + goto loop; + } + break; + } + } + + union_list_unlock(hash); + + if (un) + break; + } + + if (un) { + /* + * Obtain a lock on the union_node. + * uppervp is locked, though un->un_uppervp + * may not be. this doesn't break the locking + * hierarchy since in the case that un->un_uppervp + * is not yet locked it will be vrele'd and replaced + * with uppervp. + */ + + if ((dvp != NULLVP) && (uppervp == dvp)) { + /* + * Access ``.'', so (un) will already + * be locked. Since this process has + * the lock on (uppervp) no other + * process can hold the lock on (un). + */ +#if DIAGNOSTIC + if ((un->un_flags & UN_LOCKED) == 0) + panic("union: . not locked"); + else if (current_proc() && un->un_pid != current_proc()->p_pid && + un->un_pid > -1 && current_proc()->p_pid > -1) + panic("union: allocvp not lock owner"); +#endif + } else { + if (un->un_flags & UN_LOCKED) { + vrele(UNIONTOV(un)); + un->un_flags |= UN_WANT; + sleep((caddr_t) &un->un_flags, PINOD); + goto loop; + } + un->un_flags |= UN_LOCKED; + +#if DIAGNOSTIC + if (current_proc()) + un->un_pid = current_proc()->p_pid; + else + un->un_pid = -1; +#endif + } + + /* + * At this point, the union_node is locked, + * un->un_uppervp may not be locked, and uppervp + * is locked or nil. + */ + + /* + * Save information about the upper layer. + */ + if (uppervp != un->un_uppervp) { + union_newupper(un, uppervp); + } else if (uppervp) { + vrele(uppervp); + } + + if (un->un_uppervp) { + un->un_flags |= UN_ULOCK; + un->un_flags &= ~UN_KLOCK; + } + + /* + * Save information about the lower layer. + * This needs to keep track of pathname + * and directory information which union_vn_create + * might need. + */ + if (lowervp != un->un_lowervp) { + union_newlower(un, lowervp); + if (cnp && (lowervp != NULLVP)) { + un->un_hash = cnp->cn_hash; + MALLOC(un->un_path, caddr_t, cnp->cn_namelen+1, + M_TEMP, M_WAITOK); + bcopy(cnp->cn_nameptr, un->un_path, + cnp->cn_namelen); + un->un_path[cnp->cn_namelen] = '\0'; + VREF(dvp); + un->un_dirvp = dvp; + } + } else if (lowervp) { + vrele(lowervp); + } + *vpp = UNIONTOV(un); + return (0); + } + + if (docache) { + /* + * otherwise lock the vp list while we call getnewvnode + * since that can block. + */ + hash = UNION_HASH(uppervp, lowervp); + + if (union_list_lock(hash)) + goto loop; + } + + MALLOC(unp, void *, sizeof(struct union_node), M_TEMP, M_WAITOK); + error = getnewvnode(VT_UNION, mp, union_vnodeop_p, vpp); + if (error) { + FREE(unp, M_TEMP); + if (uppervp) { + if (dvp == uppervp) + vrele(uppervp); + else + vput(uppervp); + } + if (lowervp) + vrele(lowervp); + + goto out; + } + + (*vpp)->v_data = unp; + (*vpp)->v_flag |= vflag; + if (uppervp) + (*vpp)->v_type = uppervp->v_type; + else + (*vpp)->v_type = lowervp->v_type; + + if ((*vpp)->v_type == VREG) + ubc_info_init(*vpp); + + un = VTOUNION(*vpp); + un->un_vnode = *vpp; + un->un_uppervp = uppervp; + un->un_uppersz = VNOVAL; + un->un_lowervp = lowervp; + un->un_lowersz = VNOVAL; + un->un_pvp = undvp; + if (undvp != NULLVP) + VREF(undvp); + un->un_dircache = 0; + un->un_openl = 0; + un->un_flags = UN_LOCKED; + if (un->un_uppervp) + un->un_flags |= UN_ULOCK; +#if DIAGNOSTIC + if (current_proc()) + un->un_pid = current_proc()->p_pid; + else + un->un_pid = -1; +#endif + if (cnp && (lowervp != NULLVP)) { + un->un_hash = cnp->cn_hash; + un->un_path = _MALLOC(cnp->cn_namelen+1, M_TEMP, M_WAITOK); + bcopy(cnp->cn_nameptr, un->un_path, cnp->cn_namelen); + un->un_path[cnp->cn_namelen] = '\0'; + VREF(dvp); + un->un_dirvp = dvp; + } else { + un->un_hash = 0; + un->un_path = 0; + un->un_dirvp = 0; + } + + if (docache) { + LIST_INSERT_HEAD(&unhead[hash], un, un_cache); + un->un_flags |= UN_CACHED; + } + + if (xlowervp) + vrele(xlowervp); + +out: + if (docache) + union_list_unlock(hash); + + return (error); +} + +int +union_freevp(vp) + struct vnode *vp; +{ + struct union_node *un = VTOUNION(vp); + + if (un->un_flags & UN_CACHED) { + un->un_flags &= ~UN_CACHED; + LIST_REMOVE(un, un_cache); + } + + if (un->un_pvp != NULLVP) + vrele(un->un_pvp); + if (un->un_uppervp != NULLVP) + vrele(un->un_uppervp); + if (un->un_lowervp != NULLVP) + vrele(un->un_lowervp); + if (un->un_dirvp != NULLVP) + vrele(un->un_dirvp); + if (un->un_path) + _FREE(un->un_path, M_TEMP); + + FREE(vp->v_data, M_TEMP); + vp->v_data = 0; + + return (0); +} + +/* + * copyfile. copy the vnode (fvp) to the vnode (tvp) + * using a sequence of reads and writes. both (fvp) + * and (tvp) are locked on entry and exit. + */ +int +union_copyfile(fvp, tvp, cred, p) + struct vnode *fvp; + struct vnode *tvp; + struct ucred *cred; + struct proc *p; +{ + char *buf; + struct uio uio; + struct iovec iov; + int error = 0; + + /* + * strategy: + * allocate a buffer of size MAXPHYSIO. + * loop doing reads and writes, keeping track + * of the current uio offset. + * give up at the first sign of trouble. + */ + + uio.uio_procp = p; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_offset = 0; + + VOP_UNLOCK(fvp, 0, p); /* XXX */ + VOP_LEASE(fvp, p, cred, LEASE_READ); + vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ + VOP_UNLOCK(tvp, 0, p); /* XXX */ + VOP_LEASE(tvp, p, cred, LEASE_WRITE); + vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ + + buf = _MALLOC(MAXPHYSIO, M_TEMP, M_WAITOK); + + /* ugly loop follows... */ + do { + off_t offset = uio.uio_offset; + + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + iov.iov_base = buf; + iov.iov_len = MAXPHYSIO; + uio.uio_resid = iov.iov_len; + uio.uio_rw = UIO_READ; + error = VOP_READ(fvp, &uio, 0, cred); + + if (error == 0) { + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + iov.iov_base = buf; + iov.iov_len = MAXPHYSIO - uio.uio_resid; + uio.uio_offset = offset; + uio.uio_rw = UIO_WRITE; + uio.uio_resid = iov.iov_len; + + if (uio.uio_resid == 0) + break; + + do { + error = VOP_WRITE(tvp, &uio, 0, cred); + } while ((uio.uio_resid > 0) && (error == 0)); + } + + } while (error == 0); + + _FREE(buf, M_TEMP); + return (error); +} + +/* + * (un) is assumed to be locked on entry and remains + * locked on exit. + */ +int +union_copyup(un, docopy, cred, p) + struct union_node *un; + int docopy; + struct ucred *cred; + struct proc *p; +{ + int error; + struct vnode *lvp, *uvp; + + error = union_vn_create(&uvp, un, p); + if (error) + return (error); + + /* at this point, uppervp is locked */ + union_newupper(un, uvp); + un->un_flags |= UN_ULOCK; + + lvp = un->un_lowervp; + + if (docopy) { + /* + * XX - should not ignore errors + * from VOP_CLOSE + */ + vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_OPEN(lvp, FREAD, cred, p); + if (error == 0) { + error = union_copyfile(lvp, uvp, cred, p); + VOP_UNLOCK(lvp, 0, p); + (void) VOP_CLOSE(lvp, FREAD, cred, p); + } +#ifdef UNION_DIAGNOSTIC + if (error == 0) + uprintf("union: copied up %s\n", un->un_path); +#endif + + } + un->un_flags &= ~UN_ULOCK; + VOP_UNLOCK(uvp, 0, p); + union_vn_close(uvp, FWRITE, cred, p); + vn_lock(uvp, LK_EXCLUSIVE | LK_RETRY, p); + un->un_flags |= UN_ULOCK; + + /* + * Subsequent IOs will go to the top layer, so + * call close on the lower vnode and open on the + * upper vnode to ensure that the filesystem keeps + * its references counts right. This doesn't do + * the right thing with (cred) and (FREAD) though. + * Ignoring error returns is not right, either. + */ + if (error == 0) { + int i; + + for (i = 0; i < un->un_openl; i++) { + (void) VOP_CLOSE(lvp, FREAD, cred, p); + (void) VOP_OPEN(uvp, FREAD, cred, p); + } + un->un_openl = 0; + } + + return (error); + +} + +static int +union_relookup(um, dvp, vpp, cnp, cn, path, pathlen) + struct union_mount *um; + struct vnode *dvp; + struct vnode **vpp; + struct componentname *cnp; + struct componentname *cn; + char *path; + int pathlen; +{ + int error; + + /* + * A new componentname structure must be faked up because + * there is no way to know where the upper level cnp came + * from or what it is being used for. This must duplicate + * some of the work done by NDINIT, some of the work done + * by namei, some of the work done by lookup and some of + * the work done by VOP_LOOKUP when given a CREATE flag. + * Conclusion: Horrible. + * + * The pathname buffer will be FREEed by VOP_MKDIR. + */ + cn->cn_namelen = pathlen; + cn->cn_pnbuf = _MALLOC_ZONE(cn->cn_namelen+1, M_NAMEI, M_WAITOK); + cn->cn_pnlen = cn->cn_namelen+1; + bcopy(path, cn->cn_pnbuf, cn->cn_namelen); + cn->cn_pnbuf[cn->cn_namelen] = '\0'; + + cn->cn_nameiop = CREATE; + cn->cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN); + cn->cn_proc = cnp->cn_proc; + if (um->um_op == UNMNT_ABOVE) + cn->cn_cred = cnp->cn_cred; + else + cn->cn_cred = um->um_cred; + cn->cn_nameptr = cn->cn_pnbuf; + cn->cn_hash = cnp->cn_hash; + cn->cn_consume = cnp->cn_consume; + + VREF(dvp); + error = relookup(dvp, vpp, cn); + if (!error) + vrele(dvp); + + return (error); +} + +/* + * Create a shadow directory in the upper layer. + * The new vnode is returned locked. + * + * (um) points to the union mount structure for access to the + * the mounting process's credentials. + * (dvp) is the directory in which to create the shadow directory. + * it is unlocked on entry and exit. + * (cnp) is the componentname to be created. + * (vpp) is the returned newly created shadow directory, which + * is returned locked. + */ +int +union_mkshadow(um, dvp, cnp, vpp) + struct union_mount *um; + struct vnode *dvp; + struct componentname *cnp; + struct vnode **vpp; +{ + int error; + struct vattr va; + struct proc *p = cnp->cn_proc; + struct componentname cn; + + error = union_relookup(um, dvp, vpp, cnp, &cn, + cnp->cn_nameptr, cnp->cn_namelen); + if (error) + return (error); + + if (*vpp) { + VOP_ABORTOP(dvp, &cn); + VOP_UNLOCK(dvp, 0, p); + vrele(*vpp); + *vpp = NULLVP; + return (EEXIST); + } + + /* + * policy: when creating the shadow directory in the + * upper layer, create it owned by the user who did + * the mount, group from parent directory, and mode + * 777 modified by umask (ie mostly identical to the + * mkdir syscall). (jsp, kb) + */ + + VATTR_NULL(&va); + va.va_type = VDIR; + va.va_mode = um->um_cmode; + + /* VOP_LEASE: dvp is locked */ + VOP_LEASE(dvp, p, cn.cn_cred, LEASE_WRITE); + + error = VOP_MKDIR(dvp, vpp, &cn, &va); + return (error); +} + +/* + * Create a whiteout entry in the upper layer. + * + * (um) points to the union mount structure for access to the + * the mounting process's credentials. + * (dvp) is the directory in which to create the whiteout. + * it is locked on entry and exit. + * (cnp) is the componentname to be created. + */ +int +union_mkwhiteout(um, dvp, cnp, path) + struct union_mount *um; + struct vnode *dvp; + struct componentname *cnp; + char *path; +{ + int error; + struct vattr va; + struct proc *p = cnp->cn_proc; + struct vnode *wvp; + struct componentname cn; + + VOP_UNLOCK(dvp, 0, p); + error = union_relookup(um, dvp, &wvp, cnp, &cn, path, strlen(path)); + if (error) { + vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); + return (error); + } + + if (wvp) { + VOP_ABORTOP(dvp, &cn); + vrele(dvp); + vrele(wvp); + return (EEXIST); + } + + /* VOP_LEASE: dvp is locked */ + VOP_LEASE(dvp, p, p->p_ucred, LEASE_WRITE); + + error = VOP_WHITEOUT(dvp, &cn, CREATE); + if (error) + VOP_ABORTOP(dvp, &cn); + + vrele(dvp); + + return (error); +} + +/* + * union_vn_create: creates and opens a new shadow file + * on the upper union layer. this function is similar + * in spirit to calling vn_open but it avoids calling namei(). + * the problem with calling namei is that a) it locks too many + * things, and b) it doesn't start at the "right" directory, + * whereas relookup is told where to start. + */ +int +union_vn_create(vpp, un, p) + struct vnode **vpp; + struct union_node *un; + struct proc *p; +{ + struct vnode *vp; + struct ucred *cred = p->p_ucred; + struct vattr vat; + struct vattr *vap = &vat; + int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL); + int error; + int cmode = UN_FILEMODE & ~p->p_fd->fd_cmask; + char *cp; + struct componentname cn; + + *vpp = NULLVP; + + /* + * Build a new componentname structure (for the same + * reasons outlines in union_mkshadow). + * The difference here is that the file is owned by + * the current user, rather than by the person who + * did the mount, since the current user needs to be + * able to write the file (that's why it is being + * copied in the first place). + */ + cn.cn_namelen = strlen(un->un_path); + cn.cn_pnbuf = (caddr_t) _MALLOC_ZONE(cn.cn_namelen+1, + M_NAMEI, M_WAITOK); + cn.cn_pnlen = cn.cn_namelen+1; + bcopy(un->un_path, cn.cn_pnbuf, cn.cn_namelen+1); + cn.cn_nameiop = CREATE; + cn.cn_flags = (LOCKPARENT|HASBUF|SAVENAME|SAVESTART|ISLASTCN); + cn.cn_proc = p; + cn.cn_cred = p->p_ucred; + cn.cn_nameptr = cn.cn_pnbuf; + cn.cn_hash = un->un_hash; + cn.cn_consume = 0; + + VREF(un->un_dirvp); + if (error = relookup(un->un_dirvp, &vp, &cn)) + return (error); + vrele(un->un_dirvp); + + if (vp) { + VOP_ABORTOP(un->un_dirvp, &cn); + if (un->un_dirvp == vp) + vrele(un->un_dirvp); + else + vput(un->un_dirvp); + vrele(vp); + return (EEXIST); + } + + /* + * Good - there was no race to create the file + * so go ahead and create it. The permissions + * on the file will be 0666 modified by the + * current user's umask. Access to the file, while + * it is unioned, will require access to the top *and* + * bottom files. Access when not unioned will simply + * require access to the top-level file. + * TODO: confirm choice of access permissions. + */ + VATTR_NULL(vap); + vap->va_type = VREG; + vap->va_mode = cmode; + VOP_LEASE(un->un_dirvp, p, cred, LEASE_WRITE); + if (error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap)) + return (error); + + if (error = VOP_OPEN(vp, fmode, cred, p)) { + vput(vp); + return (error); + } + + if (++vp->v_writecount <= 0) + panic("union: v_writecount"); + *vpp = vp; + return (0); +} + +int +union_vn_close(vp, fmode, cred, p) + struct vnode *vp; + int fmode; + struct ucred *cred; + struct proc *p; +{ + + if (fmode & FWRITE) + --vp->v_writecount; + return (VOP_CLOSE(vp, fmode, cred, p)); +} + +void +union_removed_upper(un) + struct union_node *un; +{ + struct proc *p = current_proc(); /* XXX */ + + union_newupper(un, NULLVP); + if (un->un_flags & UN_CACHED) { + un->un_flags &= ~UN_CACHED; + LIST_REMOVE(un, un_cache); + } + + if (un->un_flags & UN_ULOCK) { + un->un_flags &= ~UN_ULOCK; + VOP_UNLOCK(un->un_uppervp, 0, p); + } +} + +#if 0 +struct vnode * +union_lowervp(vp) + struct vnode *vp; +{ + struct union_node *un = VTOUNION(vp); + + if ((un->un_lowervp != NULLVP) && + (vp->v_type == un->un_lowervp->v_type)) { + if (vget(un->un_lowervp, 0, current_proc()) == 0) + return (un->un_lowervp); + } + + return (NULLVP); +} +#endif + +/* + * determine whether a whiteout is needed + * during a remove/rmdir operation. + */ +int +union_dowhiteout(un, cred, p) + struct union_node *un; + struct ucred *cred; + struct proc *p; +{ + struct vattr va; + + if (un->un_lowervp != NULLVP) + return (1); + + if (VOP_GETATTR(un->un_uppervp, &va, cred, p) == 0 && + (va.va_flags & OPAQUE)) + return (1); + + return (0); +} + +static void +union_dircache_r(vp, vppp, cntp) + struct vnode *vp; + struct vnode ***vppp; + int *cntp; +{ + struct union_node *un; + + if (vp->v_op != union_vnodeop_p) { + if (vppp) { + VREF(vp); + *(*vppp)++ = vp; + if (--(*cntp) == 0) + panic("union: dircache table too small"); + } else { + (*cntp)++; + } + + return; + } + + un = VTOUNION(vp); + if (un->un_uppervp != NULLVP) + union_dircache_r(un->un_uppervp, vppp, cntp); + if (un->un_lowervp != NULLVP) + union_dircache_r(un->un_lowervp, vppp, cntp); +} + +struct vnode * +union_dircache(vp, p) + struct vnode *vp; + struct proc *p; +{ + int cnt; + struct vnode *nvp; + struct vnode **vpp; + struct vnode **dircache; + struct union_node *un; + int error; + + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + dircache = VTOUNION(vp)->un_dircache; + + nvp = NULLVP; + + if (dircache == 0) { + cnt = 0; + union_dircache_r(vp, 0, &cnt); + cnt++; + dircache = (struct vnode **) + _MALLOC(cnt * sizeof(struct vnode *), + M_TEMP, M_WAITOK); + vpp = dircache; + union_dircache_r(vp, &vpp, &cnt); + *vpp = NULLVP; + vpp = dircache + 1; + } else { + vpp = dircache; + do { + if (*vpp++ == VTOUNION(vp)->un_uppervp) + break; + } while (*vpp != NULLVP); + } + + if (*vpp == NULLVP) + goto out; + + vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, p); + VREF(*vpp); + error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, *vpp, NULLVP, 0); + if (error) + goto out; + + VTOUNION(vp)->un_dircache = 0; + un = VTOUNION(nvp); + un->un_dircache = dircache; + +out: + VOP_UNLOCK(vp, 0, p); + return (nvp); +} diff --git a/bsd/miscfs/union/union_vfsops.c b/bsd/miscfs/union/union_vfsops.c new file mode 100644 index 000000000..2ddfa135b --- /dev/null +++ b/bsd/miscfs/union/union_vfsops.c @@ -0,0 +1,524 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1994, 1995 The Regents of the University of California. + * Copyright (c) 1994, 1995 Jan-Simon Pendry. + * All rights reserved. + * + * This code is derived from software donated to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)union_vfsops.c 8.20 (Berkeley) 5/20/95 + */ + +/* + * Union Layer + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Mount union filesystem + */ +int +union_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + int error = 0; + struct union_args args; + struct vnode *lowerrootvp = NULLVP; + struct vnode *upperrootvp = NULLVP; + struct union_mount *um = 0; + struct ucred *cred = 0; + struct ucred *scred; + struct vattr va; + char *cp; + int len; + u_int size; + +#ifdef UNION_DIAGNOSTIC + printf("union_mount(mp = %x)\n", mp); +#endif + + /* + * Update is a no-op + */ + if (mp->mnt_flag & MNT_UPDATE) { + /* + * Need to provide. + * 1. a way to convert between rdonly and rdwr mounts. + * 2. support for nfs exports. + */ + error = EOPNOTSUPP; + goto bad; + } + + /* + * Get argument + */ + if (error = copyin(data, (caddr_t)&args, sizeof(struct union_args))) + goto bad; + + lowerrootvp = mp->mnt_vnodecovered; + VREF(lowerrootvp); + + /* + * Find upper node. + */ + NDINIT(ndp, LOOKUP, FOLLOW|WANTPARENT, + UIO_USERSPACE, args.target, p); + + if (error = namei(ndp)) + goto bad; + + upperrootvp = ndp->ni_vp; + vrele(ndp->ni_dvp); + ndp->ni_dvp = NULL; + + if (upperrootvp->v_type != VDIR) { + error = EINVAL; + goto bad; + } + +// um = (struct union_mount *) malloc(sizeof(struct union_mount), +// M_UFSMNT, M_WAITOK); /* XXX */ + MALLOC(um, struct union_mount *, sizeof(struct union_mount), + M_UFSMNT, M_WAITOK); + + /* + * Keep a held reference to the target vnodes. + * They are vrele'd in union_unmount. + * + * Depending on the _BELOW flag, the filesystems are + * viewed in a different order. In effect, this is the + * same as providing a mount under option to the mount syscall. + */ + + um->um_op = args.mntflags & UNMNT_OPMASK; + switch (um->um_op) { + case UNMNT_ABOVE: + um->um_lowervp = lowerrootvp; + um->um_uppervp = upperrootvp; + break; + + case UNMNT_BELOW: + um->um_lowervp = upperrootvp; + um->um_uppervp = lowerrootvp; + break; + + case UNMNT_REPLACE: + vrele(lowerrootvp); + lowerrootvp = NULLVP; + um->um_uppervp = upperrootvp; + um->um_lowervp = lowerrootvp; + break; + + default: + error = EINVAL; + goto bad; + } + + /* + * Unless the mount is readonly, ensure that the top layer + * supports whiteout operations + */ + if ((mp->mnt_flag & MNT_RDONLY) == 0) { + error = VOP_WHITEOUT(um->um_uppervp, (struct componentname *) 0, LOOKUP); + if (error) + goto bad; + } + + um->um_cred = p->p_ucred; + crhold(um->um_cred); + um->um_cmode = UN_DIRMODE &~ p->p_fd->fd_cmask; + + /* + * Depending on what you think the MNT_LOCAL flag might mean, + * you may want the && to be || on the conditional below. + * At the moment it has been defined that the filesystem is + * only local if it is all local, ie the MNT_LOCAL flag implies + * that the entire namespace is local. If you think the MNT_LOCAL + * flag implies that some of the files might be stored locally + * then you will want to change the conditional. + */ + if (um->um_op == UNMNT_ABOVE) { + if (((um->um_lowervp == NULLVP) || + (um->um_lowervp->v_mount->mnt_flag & MNT_LOCAL)) && + (um->um_uppervp->v_mount->mnt_flag & MNT_LOCAL)) + mp->mnt_flag |= MNT_LOCAL; + } + + /* + * Copy in the upper layer's RDONLY flag. This is for the benefit + * of lookup() which explicitly checks the flag, rather than asking + * the filesystem for it's own opinion. This means, that an update + * mount of the underlying filesystem to go from rdonly to rdwr + * will leave the unioned view as read-only. + */ + mp->mnt_flag |= (um->um_uppervp->v_mount->mnt_flag & MNT_RDONLY); + + mp->mnt_data = (qaddr_t) um; + vfs_getnewfsid(mp); + + (void) copyinstr(path, mp->mnt_stat.f_mntonname, MNAMELEN - 1, &size); + bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size); + + switch (um->um_op) { + case UNMNT_ABOVE: + cp = ":"; + break; + case UNMNT_BELOW: + cp = ":"; + break; + case UNMNT_REPLACE: + cp = ""; + break; + } + len = strlen(cp); + bcopy(cp, mp->mnt_stat.f_mntfromname, len); + + cp = mp->mnt_stat.f_mntfromname + len; + len = MNAMELEN - len; + + (void) copyinstr(args.target, cp, len - 1, &size); + bzero(cp + size, len - size); + +#ifdef UNION_DIAGNOSTIC + printf("union_mount: from %s, on %s\n", + mp->mnt_stat.f_mntfromname, mp->mnt_stat.f_mntonname); +#endif + return (0); + +bad: + if (um) + _FREE(um, M_UFSMNT); + if (cred != NOCRED) + crfree(cred); + if (upperrootvp) + vrele(upperrootvp); + if (lowerrootvp) + vrele(lowerrootvp); + return (error); +} + +/* + * VFS start. Nothing needed here - the start routine + * on the underlying filesystem(s) will have been called + * when that filesystem was mounted. + */ +int +union_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + + return (0); +} + +/* + * Free reference to union layer + */ +int +union_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + struct union_mount *um = MOUNTTOUNIONMOUNT(mp); + struct vnode *um_rootvp; + int error; + int freeing; + int flags = 0; + struct ucred *cred; + +#ifdef UNION_DIAGNOSTIC + printf("union_unmount(mp = %x)\n", mp); +#endif + + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + + if (error = union_root(mp, &um_rootvp)) + return (error); + + /* + * Keep flushing vnodes from the mount list. + * This is needed because of the un_pvp held + * reference to the parent vnode. + * If more vnodes have been freed on a given pass, + * the try again. The loop will iterate at most + * (d) times, where (d) is the maximum tree depth + * in the filesystem. + */ + for (freeing = 0; vflush(mp, um_rootvp, flags) != 0;) { + struct vnode *vp; + int n; + + /* count #vnodes held on mount list */ + for (n = 0, vp = mp->mnt_vnodelist.lh_first; + vp != NULLVP; + vp = vp->v_mntvnodes.le_next) + n++; + + /* if this is unchanged then stop */ + if (n == freeing) + break; + + /* otherwise try once more time */ + freeing = n; + } + + /* At this point the root vnode should have a single reference */ + if (um_rootvp->v_usecount > 1) { + vput(um_rootvp); + return (EBUSY); + } + +#ifdef UNION_DIAGNOSTIC + vprint("union root", um_rootvp); +#endif + /* + * Discard references to upper and lower target vnodes. + */ + if (um->um_lowervp) + vrele(um->um_lowervp); + vrele(um->um_uppervp); + cred = um->um_cred; + if (cred != NOCRED) { + um->um_cred = NOCRED; + crfree(cred); + } + /* + * Release reference on underlying root vnode + */ + vput(um_rootvp); + /* + * And blow it away for future re-use + */ + vgone(um_rootvp); + /* + * Finally, throw away the union_mount structure + */ + _FREE(mp->mnt_data, M_UFSMNT); /* XXX */ + mp->mnt_data = 0; + return (0); +} + +int +union_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct union_mount *um = MOUNTTOUNIONMOUNT(mp); + int error; + int loselock; + + /* + * Return locked reference to root. + */ + VREF(um->um_uppervp); + if ((um->um_op == UNMNT_BELOW) && + VOP_ISLOCKED(um->um_uppervp)) { + loselock = 1; + } else { + vn_lock(um->um_uppervp, LK_EXCLUSIVE | LK_RETRY, p); + loselock = 0; + } + if (um->um_lowervp) + VREF(um->um_lowervp); + error = union_allocvp(vpp, mp, + (struct vnode *) 0, + (struct vnode *) 0, + (struct componentname *) 0, + um->um_uppervp, + um->um_lowervp, + 1); + + if (error) { + if (loselock) + vrele(um->um_uppervp); + else + vput(um->um_uppervp); + if (um->um_lowervp) + vrele(um->um_lowervp); + } else { + if (loselock) + VTOUNION(*vpp)->un_flags &= ~UN_ULOCK; + } + + return (error); +} + +int +union_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + int error; + struct union_mount *um = MOUNTTOUNIONMOUNT(mp); + struct statfs mstat; + int lbsize; + +#ifdef UNION_DIAGNOSTIC + printf("union_statfs(mp = %x, lvp = %x, uvp = %x)\n", mp, + um->um_lowervp, + um->um_uppervp); +#endif + + bzero(&mstat, sizeof(mstat)); + + if (um->um_lowervp) { + error = VFS_STATFS(um->um_lowervp->v_mount, &mstat, p); + if (error) + return (error); + } + + /* now copy across the "interesting" information and fake the rest */ +#if 0 + sbp->f_type = mstat.f_type; + sbp->f_flags = mstat.f_flags; + sbp->f_bsize = mstat.f_bsize; + sbp->f_iosize = mstat.f_iosize; +#endif + lbsize = mstat.f_bsize; + sbp->f_blocks = mstat.f_blocks; + sbp->f_bfree = mstat.f_bfree; + sbp->f_bavail = mstat.f_bavail; + sbp->f_files = mstat.f_files; + sbp->f_ffree = mstat.f_ffree; + + error = VFS_STATFS(um->um_uppervp->v_mount, &mstat, p); + if (error) + return (error); + + sbp->f_flags = mstat.f_flags; + sbp->f_bsize = mstat.f_bsize; + sbp->f_iosize = mstat.f_iosize; + + /* + * if the lower and upper blocksizes differ, then frig the + * block counts so that the sizes reported by df make some + * kind of sense. none of this makes sense though. + */ + + if (mstat.f_bsize != lbsize) + sbp->f_blocks = sbp->f_blocks * lbsize / mstat.f_bsize; + + /* + * The "total" fields count total resources in all layers, + * the "free" fields count only those resources which are + * free in the upper layer (since only the upper layer + * is writeable). + */ + sbp->f_blocks += mstat.f_blocks; + sbp->f_bfree = mstat.f_bfree; + sbp->f_bavail = mstat.f_bavail; + sbp->f_files += mstat.f_files; + sbp->f_ffree = mstat.f_ffree; + + if (sbp != &mp->mnt_stat) { + sbp->f_type = mp->mnt_vfc->vfc_typenum; + bcopy(&mp->mnt_stat.f_fsid, &sbp->f_fsid, sizeof(sbp->f_fsid)); + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + return (0); +} + +/* + * XXX - Assumes no data cached at union layer. + */ +#define union_sync ((int (*) __P((struct mount *, int, struct ucred *, \ + struct proc *)))nullop) + +#define union_fhtovp ((int (*) __P((struct mount *, struct fid *, \ + struct mbuf *, struct vnode **, int *, struct ucred **)))eopnotsupp) +int union_init __P((struct vfsconf *)); +#define union_quotactl ((int (*) __P((struct mount *, int, uid_t, caddr_t, \ + struct proc *)))eopnotsupp) +#define union_sysctl ((int (*) __P((int *, u_int, void *, size_t *, void *, \ + size_t, struct proc *)))eopnotsupp) +#define union_vget ((int (*) __P((struct mount *, ino_t, struct vnode **))) \ + eopnotsupp) +#define union_vptofh ((int (*) __P((struct vnode *, struct fid *)))eopnotsupp) + +struct vfsops union_vfsops = { + union_mount, + union_start, + union_unmount, + union_root, + union_quotactl, + union_statfs, + union_sync, + union_vget, + union_fhtovp, + union_vptofh, + union_init, + union_sysctl, +}; diff --git a/bsd/miscfs/union/union_vnops.c b/bsd/miscfs/union/union_vnops.c new file mode 100644 index 000000000..e67eb52df --- /dev/null +++ b/bsd/miscfs/union/union_vnops.c @@ -0,0 +1,1829 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993, 1994, 1995 Jan-Simon Pendry. + * Copyright (c) 1992, 1993, 1994, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Jan-Simon Pendry. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)union_vnops.c 8.32 (Berkeley) 6/23/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FIXUP(un, p) { \ + if (((un)->un_flags & UN_ULOCK) == 0) { \ + union_fixup(un, p); \ + } \ +} + +static void +union_fixup(un, p) + struct union_node *un; + struct proc *p; +{ + + vn_lock(un->un_uppervp, LK_EXCLUSIVE | LK_RETRY, p); + un->un_flags |= UN_ULOCK; +} + +static int +union_lookup1(udvp, dvpp, vpp, cnp) + struct vnode *udvp; + struct vnode **dvpp; + struct vnode **vpp; + struct componentname *cnp; +{ + int error; + struct proc *p = cnp->cn_proc; + struct vnode *tdvp; + struct vnode *dvp; + struct mount *mp; + + dvp = *dvpp; + + /* + * If stepping up the directory tree, check for going + * back across the mount point, in which case do what + * lookup would do by stepping back down the mount + * hierarchy. + */ + if (cnp->cn_flags & ISDOTDOT) { + while ((dvp != udvp) && (dvp->v_flag & VROOT)) { + /* + * Don't do the NOCROSSMOUNT check + * at this level. By definition, + * union fs deals with namespaces, not + * filesystems. + */ + tdvp = dvp; + *dvpp = dvp = dvp->v_mount->mnt_vnodecovered; + vput(tdvp); + VREF(dvp); + vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); + } + } + + error = VOP_LOOKUP(dvp, &tdvp, cnp); + if (error) + return (error); + + /* + * The parent directory will have been unlocked, unless lookup + * found the last component. In which case, re-lock the node + * here to allow it to be unlocked again (phew) in union_lookup. + */ + if (dvp != tdvp && !(cnp->cn_flags & ISLASTCN)) + vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); + + dvp = tdvp; + + /* + * Lastly check if the current node is a mount point in + * which case walk up the mount hierarchy making sure not to + * bump into the root of the mount tree (ie. dvp != udvp). + */ + while (dvp != udvp && (dvp->v_type == VDIR) && + (mp = dvp->v_mountedhere)) { + + if (vfs_busy(mp, 0, 0, p)) + continue; + + error = VFS_ROOT(mp, &tdvp); + vfs_unbusy(mp, p); + if (error) { + vput(dvp); + return (error); + } + + vput(dvp); + dvp = tdvp; + } + + *vpp = dvp; + return (0); +} + +int +union_lookup(ap) + struct vop_lookup_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + int error; + int uerror, lerror; + struct vnode *uppervp, *lowervp; + struct vnode *upperdvp, *lowerdvp; + struct vnode *dvp = ap->a_dvp; + struct union_node *dun = VTOUNION(dvp); + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + int lockparent = cnp->cn_flags & LOCKPARENT; + int rdonly = cnp->cn_flags & RDONLY; + struct union_mount *um = MOUNTTOUNIONMOUNT(dvp->v_mount); + struct ucred *saved_cred; + int iswhiteout; + struct vattr va; + +#ifdef notyet + if (cnp->cn_namelen == 3 && + cnp->cn_nameptr[2] == '.' && + cnp->cn_nameptr[1] == '.' && + cnp->cn_nameptr[0] == '.') { + dvp = *ap->a_vpp = LOWERVP(ap->a_dvp); + if (dvp == NULLVP) + return (ENOENT); + VREF(dvp); + vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p); + if (!lockparent || !(cnp->cn_flags & ISLASTCN)) + VOP_UNLOCK(ap->a_dvp, 0, p); + return (0); + } +#endif + + cnp->cn_flags |= LOCKPARENT; + + upperdvp = dun->un_uppervp; + lowerdvp = dun->un_lowervp; + uppervp = NULLVP; + lowervp = NULLVP; + iswhiteout = 0; + + /* + * do the lookup in the upper level. + * if that level comsumes additional pathnames, + * then assume that something special is going + * on and just return that vnode. + */ + if (upperdvp != NULLVP) { + FIXUP(dun, p); + uerror = union_lookup1(um->um_uppervp, &upperdvp, + &uppervp, cnp); + /*if (uppervp == upperdvp) + dun->un_flags |= UN_KLOCK;*/ + + if (cnp->cn_consume != 0) { + *ap->a_vpp = uppervp; + if (!lockparent) + cnp->cn_flags &= ~LOCKPARENT; + return (uerror); + } + if (uerror == ENOENT || uerror == EJUSTRETURN) { + if (cnp->cn_flags & ISWHITEOUT) { + iswhiteout = 1; + } else if (lowerdvp != NULLVP) { + lerror = VOP_GETATTR(upperdvp, &va, + cnp->cn_cred, cnp->cn_proc); + if (lerror == 0 && (va.va_flags & OPAQUE)) + iswhiteout = 1; + } + } + } else { + uerror = ENOENT; + } + + /* + * in a similar way to the upper layer, do the lookup + * in the lower layer. this time, if there is some + * component magic going on, then vput whatever we got + * back from the upper layer and return the lower vnode + * instead. + */ + if (lowerdvp != NULLVP && !iswhiteout) { + int nameiop; + + vn_lock(lowerdvp, LK_EXCLUSIVE | LK_RETRY, p); + + /* + * Only do a LOOKUP on the bottom node, since + * we won't be making changes to it anyway. + */ + nameiop = cnp->cn_nameiop; + cnp->cn_nameiop = LOOKUP; + if (um->um_op == UNMNT_BELOW) { + saved_cred = cnp->cn_cred; + cnp->cn_cred = um->um_cred; + } + lerror = union_lookup1(um->um_lowervp, &lowerdvp, + &lowervp, cnp); + if (um->um_op == UNMNT_BELOW) + cnp->cn_cred = saved_cred; + cnp->cn_nameiop = nameiop; + + if (lowervp != lowerdvp) + VOP_UNLOCK(lowerdvp, 0, p); + + if (cnp->cn_consume != 0) { + if (uppervp != NULLVP) { + if (uppervp == upperdvp) + vrele(uppervp); + else + vput(uppervp); + uppervp = NULLVP; + } + *ap->a_vpp = lowervp; + if (!lockparent) + cnp->cn_flags &= ~LOCKPARENT; + return (lerror); + } + } else { + lerror = ENOENT; + if ((cnp->cn_flags & ISDOTDOT) && dun->un_pvp != NULLVP) { + lowervp = LOWERVP(dun->un_pvp); + if (lowervp != NULLVP) { + VREF(lowervp); + vn_lock(lowervp, LK_EXCLUSIVE | LK_RETRY, p); + lerror = 0; + } + } + } + + if (!lockparent) + cnp->cn_flags &= ~LOCKPARENT; + + /* + * at this point, we have uerror and lerror indicating + * possible errors with the lookups in the upper and lower + * layers. additionally, uppervp and lowervp are (locked) + * references to existing vnodes in the upper and lower layers. + * + * there are now three cases to consider. + * 1. if both layers returned an error, then return whatever + * error the upper layer generated. + * + * 2. if the top layer failed and the bottom layer succeeded + * then two subcases occur. + * a. the bottom vnode is not a directory, in which + * case just return a new union vnode referencing + * an empty top layer and the existing bottom layer. + * b. the bottom vnode is a directory, in which case + * create a new directory in the top-level and + * continue as in case 3. + * + * 3. if the top layer succeeded then return a new union + * vnode referencing whatever the new top layer and + * whatever the bottom layer returned. + */ + + *ap->a_vpp = NULLVP; + + /* case 1. */ + if ((uerror != 0) && (lerror != 0)) { + return (uerror); + } + + /* case 2. */ + if (uerror != 0 /* && (lerror == 0) */ ) { + if (lowervp->v_type == VDIR) { /* case 2b. */ + dun->un_flags &= ~UN_ULOCK; + VOP_UNLOCK(upperdvp, 0, p); + uerror = union_mkshadow(um, upperdvp, cnp, &uppervp); + vn_lock(upperdvp, LK_EXCLUSIVE | LK_RETRY, p); + dun->un_flags |= UN_ULOCK; + + if (uerror) { + if (lowervp != NULLVP) { + vput(lowervp); + lowervp = NULLVP; + } + return (uerror); + } + } + } + + if (lowervp != NULLVP) + VOP_UNLOCK(lowervp, 0, p); + + error = union_allocvp(ap->a_vpp, dvp->v_mount, dvp, upperdvp, cnp, + uppervp, lowervp, 1); + + if (error) { + if (uppervp != NULLVP) + vput(uppervp); + if (lowervp != NULLVP) + vrele(lowervp); + } else { + if (*ap->a_vpp != dvp) + if (!lockparent || !(cnp->cn_flags & ISLASTCN)) + VOP_UNLOCK(dvp, 0, p); + } + + return (error); +} + +int +union_create(ap) + struct vop_create_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_dvp); + struct vnode *dvp = un->un_uppervp; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + + if (dvp != NULLVP) { + int error; + struct vnode *vp; + struct mount *mp; + + FIXUP(un, p); + + VREF(dvp); + un->un_flags |= UN_KLOCK; + mp = ap->a_dvp->v_mount; + vput(ap->a_dvp); + error = VOP_CREATE(dvp, &vp, cnp, ap->a_vap); + if (error) + return (error); + + error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, cnp, vp, + NULLVP, 1); + if (error) + vput(vp); + return (error); + } + + vput(ap->a_dvp); + return (EROFS); +} + +int +union_whiteout(ap) + struct vop_whiteout_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + int a_flags; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_dvp); + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + + if (un->un_uppervp == NULLVP) + return (EOPNOTSUPP); + + FIXUP(un, p); + return (VOP_WHITEOUT(un->un_uppervp, cnp, ap->a_flags)); +} + +int +union_mknod(ap) + struct vop_mknod_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_dvp); + struct vnode *dvp = un->un_uppervp; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + + if (dvp != NULLVP) { + int error; + struct vnode *vp; + struct mount *mp; + + FIXUP(un, p); + + VREF(dvp); + un->un_flags |= UN_KLOCK; + mp = ap->a_dvp->v_mount; + vput(ap->a_dvp); + error = VOP_MKNOD(dvp, &vp, cnp, ap->a_vap); + if (error) + return (error); + + if (vp != NULLVP) { + error = union_allocvp(ap->a_vpp, mp, NULLVP, NULLVP, + cnp, vp, NULLVP, 1); + if (error) + vput(vp); + } + return (error); + } + + vput(ap->a_dvp); + return (EROFS); +} + +int +union_open(ap) + struct vop_open_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_vp); + struct vnode *tvp; + int mode = ap->a_mode; + struct ucred *cred = ap->a_cred; + struct proc *p = ap->a_p; + int error; + + /* + * If there is an existing upper vp then simply open that. + */ + tvp = un->un_uppervp; + if (tvp == NULLVP) { + /* + * If the lower vnode is being opened for writing, then + * copy the file contents to the upper vnode and open that, + * otherwise can simply open the lower vnode. + */ + tvp = un->un_lowervp; + if ((ap->a_mode & FWRITE) && (tvp->v_type == VREG)) { + error = union_copyup(un, (mode&O_TRUNC) == 0, cred, p); + if (error == 0) + error = VOP_OPEN(un->un_uppervp, mode, cred, p); + return (error); + } + + /* + * Just open the lower vnode + */ + un->un_openl++; + vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_OPEN(tvp, mode, cred, p); + VOP_UNLOCK(tvp, 0, p); + + return (error); + } + + FIXUP(un, p); + + error = VOP_OPEN(tvp, mode, cred, p); + + return (error); +} + +int +union_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_vp); + struct vnode *vp; + + if ((vp = un->un_uppervp) == NULLVP) { +#ifdef UNION_DIAGNOSTIC + if (un->un_openl <= 0) + panic("union: un_openl cnt"); +#endif + --un->un_openl; + vp = un->un_lowervp; + } + + ap->a_vp = vp; + return (VCALL(vp, VOFFSET(vop_close), ap)); +} + +/* + * Check access permission on the union vnode. + * The access check being enforced is to check + * against both the underlying vnode, and any + * copied vnode. This ensures that no additional + * file permissions are given away simply because + * the user caused an implicit file copy. + */ +int +union_access(ap) + struct vop_access_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_vp); + struct proc *p = ap->a_p; + int error = EACCES; + struct vnode *vp; + + if ((vp = un->un_uppervp) != NULLVP) { + FIXUP(un, p); + ap->a_vp = vp; + return (VCALL(vp, VOFFSET(vop_access), ap)); + } + + if ((vp = un->un_lowervp) != NULLVP) { + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + ap->a_vp = vp; + error = VCALL(vp, VOFFSET(vop_access), ap); + if (error == 0) { + struct union_mount *um = MOUNTTOUNIONMOUNT(vp->v_mount); + + if (um->um_op == UNMNT_BELOW) { + ap->a_cred = um->um_cred; + error = VCALL(vp, VOFFSET(vop_access), ap); + } + } + VOP_UNLOCK(vp, 0, p); + if (error) + return (error); + } + + return (error); +} + +/* + * We handle getattr only to change the fsid and + * track object sizes + */ +int +union_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + int error; + struct union_node *un = VTOUNION(ap->a_vp); + struct vnode *vp = un->un_uppervp; + struct proc *p = ap->a_p; + struct vattr *vap; + struct vattr va; + + + /* + * Some programs walk the filesystem hierarchy by counting + * links to directories to avoid stat'ing all the time. + * This means the link count on directories needs to be "correct". + * The only way to do that is to call getattr on both layers + * and fix up the link count. The link count will not necessarily + * be accurate but will be large enough to defeat the tree walkers. + */ + + vap = ap->a_vap; + + vp = un->un_uppervp; + if (vp != NULLVP) { + /* + * It's not clear whether VOP_GETATTR is to be + * called with the vnode locked or not. stat() calls + * it with (vp) locked, and fstat calls it with + * (vp) unlocked. + * In the mean time, compensate here by checking + * the union_node's lock flag. + */ + if (un->un_flags & UN_LOCKED) + FIXUP(un, p); + + error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); + if (error) + return (error); + union_newsize(ap->a_vp, vap->va_size, VNOVAL); + } + + if (vp == NULLVP) { + vp = un->un_lowervp; + } else if (vp->v_type == VDIR) { + vp = un->un_lowervp; + vap = &va; + } else { + vp = NULLVP; + } + + if (vp != NULLVP) { + error = VOP_GETATTR(vp, vap, ap->a_cred, ap->a_p); + if (error) + return (error); + union_newsize(ap->a_vp, VNOVAL, vap->va_size); + } + + if ((vap != ap->a_vap) && (vap->va_type == VDIR)) + ap->a_vap->va_nlink += vap->va_nlink; + + ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; + return (0); +} + +int +union_setattr(ap) + struct vop_setattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_vp); + struct proc *p = ap->a_p; + int error; + + /* + * Handle case of truncating lower object to zero size, + * by creating a zero length upper object. This is to + * handle the case of open with O_TRUNC and O_CREAT. + */ + if ((un->un_uppervp == NULLVP) && + /* assert(un->un_lowervp != NULLVP) */ + (un->un_lowervp->v_type == VREG)) { + error = union_copyup(un, (ap->a_vap->va_size != 0), + ap->a_cred, ap->a_p); + if (error) + return (error); + } + + /* + * Try to set attributes in upper layer, + * otherwise return read-only filesystem error. + */ + if (un->un_uppervp != NULLVP) { + FIXUP(un, p); + error = VOP_SETATTR(un->un_uppervp, ap->a_vap, + ap->a_cred, ap->a_p); + if ((error == 0) && (ap->a_vap->va_size != VNOVAL)) + union_newsize(ap->a_vp, ap->a_vap->va_size, VNOVAL); + } else { + error = EROFS; + } + + return (error); +} + +int +union_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + int error; + struct proc *p = ap->a_uio->uio_procp; + struct vnode *vp = OTHERVP(ap->a_vp); + int dolock = (vp == LOWERVP(ap->a_vp)); + + if (dolock) + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + else + FIXUP(VTOUNION(ap->a_vp), p); + error = VOP_READ(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); + if (dolock) + VOP_UNLOCK(vp, 0, p); + + /* + * XXX + * perhaps the size of the underlying object has changed under + * our feet. take advantage of the offset information present + * in the uio structure. + */ + if (error == 0) { + struct union_node *un = VTOUNION(ap->a_vp); + off_t cur = ap->a_uio->uio_offset; + + if (vp == un->un_uppervp) { + if (cur > un->un_uppersz) + union_newsize(ap->a_vp, cur, VNOVAL); + } else { + if (cur > un->un_lowersz) + union_newsize(ap->a_vp, VNOVAL, cur); + } + } + + return (error); +} + +int +union_write(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + int error; + struct vnode *vp; + struct union_node *un = VTOUNION(ap->a_vp); + struct proc *p = ap->a_uio->uio_procp; + + vp = UPPERVP(ap->a_vp); + if (vp == NULLVP) + panic("union: missing upper layer in write"); + + FIXUP(un, p); + error = VOP_WRITE(vp, ap->a_uio, ap->a_ioflag, ap->a_cred); + + /* + * the size of the underlying object may be changed by the + * write. + */ + if (error == 0) { + off_t cur = ap->a_uio->uio_offset; + + if (cur > un->un_uppersz) + union_newsize(ap->a_vp, cur, VNOVAL); + } + + return (error); +} + +union_lease(ap) + struct vop_lease_args /* { + struct vnode *a_vp; + struct proc *a_p; + struct ucred *a_cred; + int a_flag; + } */ *ap; +{ + register struct vnode *ovp = OTHERVP(ap->a_vp); + + ap->a_vp = ovp; + return (VCALL(ovp, VOFFSET(vop_lease), ap)); +} + +int +union_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + int a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *ovp = OTHERVP(ap->a_vp); + + ap->a_vp = ovp; + return (VCALL(ovp, VOFFSET(vop_ioctl), ap)); +} + +int +union_select(ap) + struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *ovp = OTHERVP(ap->a_vp); + + ap->a_vp = ovp; + return (VCALL(ovp, VOFFSET(vop_select), ap)); +} + +int +union_revoke(ap) + struct vop_revoke_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + if (UPPERVP(vp)) + VOP_REVOKE(UPPERVP(vp), ap->a_flags); + if (LOWERVP(vp)) + VOP_REVOKE(LOWERVP(vp), ap->a_flags); + vgone(vp); +} + +int +union_mmap(ap) + struct vop_mmap_args /* { + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *ovp = OTHERVP(ap->a_vp); + + ap->a_vp = ovp; + return (VCALL(ovp, VOFFSET(vop_mmap), ap)); +} + +int +union_fsync(ap) + struct vop_fsync_args /* { + struct vnode *a_vp; + struct ucred *a_cred; + int a_waitfor; + struct proc *a_p; + } */ *ap; +{ + int error = 0; + struct proc *p = ap->a_p; + struct vnode *targetvp = OTHERVP(ap->a_vp); + + if (targetvp != NULLVP) { + int dolock = (targetvp == LOWERVP(ap->a_vp)); + + if (dolock) + vn_lock(targetvp, LK_EXCLUSIVE | LK_RETRY, p); + else + FIXUP(VTOUNION(ap->a_vp), p); + error = VOP_FSYNC(targetvp, ap->a_cred, ap->a_waitfor, p); + if (dolock) + VOP_UNLOCK(targetvp, 0, p); + } + + return (error); +} + +int +union_seek(ap) + struct vop_seek_args /* { + struct vnode *a_vp; + off_t a_oldoff; + off_t a_newoff; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *ovp = OTHERVP(ap->a_vp); + + ap->a_vp = ovp; + return (VCALL(ovp, VOFFSET(vop_seek), ap)); +} + +int +union_remove(ap) + struct vop_remove_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; + } */ *ap; +{ + int error; + struct union_node *dun = VTOUNION(ap->a_dvp); + struct union_node *un = VTOUNION(ap->a_vp); + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + + if (dun->un_uppervp == NULLVP) + panic("union remove: null upper vnode"); + + if (un->un_uppervp != NULLVP) { + struct vnode *dvp = dun->un_uppervp; + struct vnode *vp = un->un_uppervp; + + FIXUP(dun, p); + VREF(dvp); + dun->un_flags |= UN_KLOCK; + vput(ap->a_dvp); + FIXUP(un, p); + VREF(vp); + un->un_flags |= UN_KLOCK; + vput(ap->a_vp); + + if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc)) + cnp->cn_flags |= DOWHITEOUT; + error = VOP_REMOVE(dvp, vp, cnp); + if (!error) + union_removed_upper(un); + } else { + FIXUP(dun, p); + error = union_mkwhiteout( + MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), + dun->un_uppervp, ap->a_cnp, un->un_path); + vput(ap->a_dvp); + vput(ap->a_vp); + } + + return (error); +} + +int +union_link(ap) + struct vop_link_args /* { + struct vnode *a_vp; + struct vnode *a_tdvp; + struct componentname *a_cnp; + } */ *ap; +{ + int error = 0; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + struct union_node *un; + struct vnode *vp; + struct vnode *tdvp; + + un = VTOUNION(ap->a_tdvp); + + if (ap->a_tdvp->v_op != ap->a_vp->v_op) { + vp = ap->a_vp; + } else { + struct union_node *tun = VTOUNION(ap->a_vp); + if (tun->un_uppervp == NULLVP) { + vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY, p); + if (un->un_uppervp == tun->un_dirvp) { + un->un_flags &= ~UN_ULOCK; + VOP_UNLOCK(un->un_uppervp, 0, p); + } + error = union_copyup(tun, 1, cnp->cn_cred, p); + if (un->un_uppervp == tun->un_dirvp) { + vn_lock(un->un_uppervp, + LK_EXCLUSIVE | LK_RETRY, p); + un->un_flags |= UN_ULOCK; + } + VOP_UNLOCK(ap->a_vp, 0, p); + } + vp = tun->un_uppervp; + } + + tdvp = un->un_uppervp; + if (tdvp == NULLVP) + error = EROFS; + + if (error) { + vput(ap->a_tdvp); + return (error); + } + + FIXUP(un, p); + VREF(tdvp); + un->un_flags |= UN_KLOCK; + vput(ap->a_tdvp); + + return (VOP_LINK(vp, tdvp, cnp)); +} + +int +union_rename(ap) + struct vop_rename_args /* { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; + } */ *ap; +{ + int error; + + struct vnode *fdvp = ap->a_fdvp; + struct vnode *fvp = ap->a_fvp; + struct vnode *tdvp = ap->a_tdvp; + struct vnode *tvp = ap->a_tvp; + + if (fdvp->v_op == union_vnodeop_p) { /* always true */ + struct union_node *un = VTOUNION(fdvp); + if (un->un_uppervp == NULLVP) { + /* + * this should never happen in normal + * operation but might if there was + * a problem creating the top-level shadow + * directory. + */ + error = EXDEV; + goto bad; + } + + fdvp = un->un_uppervp; + VREF(fdvp); + vrele(ap->a_fdvp); + } + + if (fvp->v_op == union_vnodeop_p) { /* always true */ + struct union_node *un = VTOUNION(fvp); + if (un->un_uppervp == NULLVP) { + /* XXX: should do a copyup */ + error = EXDEV; + goto bad; + } + + if (un->un_lowervp != NULLVP) + ap->a_fcnp->cn_flags |= DOWHITEOUT; + + fvp = un->un_uppervp; + VREF(fvp); + vrele(ap->a_fvp); + } + + if (tdvp->v_op == union_vnodeop_p) { + struct union_node *un = VTOUNION(tdvp); + if (un->un_uppervp == NULLVP) { + /* + * this should never happen in normal + * operation but might if there was + * a problem creating the top-level shadow + * directory. + */ + error = EXDEV; + goto bad; + } + + tdvp = un->un_uppervp; + VREF(tdvp); + un->un_flags |= UN_KLOCK; + vput(ap->a_tdvp); + } + + if (tvp != NULLVP && tvp->v_op == union_vnodeop_p) { + struct union_node *un = VTOUNION(tvp); + + tvp = un->un_uppervp; + if (tvp != NULLVP) { + VREF(tvp); + un->un_flags |= UN_KLOCK; + } + vput(ap->a_tvp); + } + + return (VOP_RENAME(fdvp, fvp, ap->a_fcnp, tdvp, tvp, ap->a_tcnp)); + +bad: + vrele(fdvp); + vrele(fvp); + vput(tdvp); + if (tvp != NULLVP) + vput(tvp); + + return (error); +} + +int +union_mkdir(ap) + struct vop_mkdir_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_dvp); + struct vnode *dvp = un->un_uppervp; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + + if (dvp != NULLVP) { + int error; + struct vnode *vp; + + FIXUP(un, p); + VREF(dvp); + un->un_flags |= UN_KLOCK; + VOP_UNLOCK(ap->a_dvp, 0, p); + error = VOP_MKDIR(dvp, &vp, cnp, ap->a_vap); + if (error) { + vrele(ap->a_dvp); + return (error); + } + + error = union_allocvp(ap->a_vpp, ap->a_dvp->v_mount, ap->a_dvp, + NULLVP, cnp, vp, NULLVP, 1); + vrele(ap->a_dvp); + if (error) + vput(vp); + return (error); + } + + vput(ap->a_dvp); + return (EROFS); +} + +int +union_rmdir(ap) + struct vop_rmdir_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; + } */ *ap; +{ + int error; + struct union_node *dun = VTOUNION(ap->a_dvp); + struct union_node *un = VTOUNION(ap->a_vp); + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + + if (dun->un_uppervp == NULLVP) + panic("union rmdir: null upper vnode"); + + if (un->un_uppervp != NULLVP) { + struct vnode *dvp = dun->un_uppervp; + struct vnode *vp = un->un_uppervp; + + FIXUP(dun, p); + VREF(dvp); + dun->un_flags |= UN_KLOCK; + vput(ap->a_dvp); + FIXUP(un, p); + VREF(vp); + un->un_flags |= UN_KLOCK; + vput(ap->a_vp); + + if (union_dowhiteout(un, cnp->cn_cred, cnp->cn_proc)) + cnp->cn_flags |= DOWHITEOUT; + error = VOP_RMDIR(dvp, vp, ap->a_cnp); + if (!error) + union_removed_upper(un); + } else { + FIXUP(dun, p); + error = union_mkwhiteout( + MOUNTTOUNIONMOUNT(UNIONTOV(dun)->v_mount), + dun->un_uppervp, ap->a_cnp, un->un_path); + vput(ap->a_dvp); + vput(ap->a_vp); + } + + return (error); +} + +int +union_symlink(ap) + struct vop_symlink_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_dvp); + struct vnode *dvp = un->un_uppervp; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + + if (dvp != NULLVP) { + int error; + struct vnode *vp; + struct mount *mp = ap->a_dvp->v_mount; + + FIXUP(un, p); + VREF(dvp); + un->un_flags |= UN_KLOCK; + vput(ap->a_dvp); + error = VOP_SYMLINK(dvp, &vp, cnp, ap->a_vap, ap->a_target); + *ap->a_vpp = NULLVP; + return (error); + } + + vput(ap->a_dvp); + return (EROFS); +} + +/* + * union_readdir works in concert with getdirentries and + * readdir(3) to provide a list of entries in the unioned + * directories. getdirentries is responsible for walking + * down the union stack. readdir(3) is responsible for + * eliminating duplicate names from the returned data stream. + */ +int +union_readdir(ap) + struct vop_readdir_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + u_long *a_cookies; + int a_ncookies; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_vp); + struct vnode *uvp = un->un_uppervp; + struct proc *p = ap->a_uio->uio_procp; + + if (uvp == NULLVP) + return (0); + + FIXUP(un, p); + ap->a_vp = uvp; + return (VCALL(uvp, VOFFSET(vop_readdir), ap)); +} + +int +union_readlink(ap) + struct vop_readlink_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ *ap; +{ + int error; + struct uio *uio = ap->a_uio; + struct proc *p = uio->uio_procp; + struct vnode *vp = OTHERVP(ap->a_vp); + int dolock = (vp == LOWERVP(ap->a_vp)); + + if (dolock) + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + else + FIXUP(VTOUNION(ap->a_vp), p); + ap->a_vp = vp; + error = VCALL(vp, VOFFSET(vop_readlink), ap); + if (dolock) + VOP_UNLOCK(vp, 0, p); + + return (error); +} + +int +union_abortop(ap) + struct vop_abortop_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + } */ *ap; +{ + int error; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + struct vnode *vp = OTHERVP(ap->a_dvp); + struct union_node *un = VTOUNION(ap->a_dvp); + int islocked = un->un_flags & UN_LOCKED; + int dolock = (vp == LOWERVP(ap->a_dvp)); + + if (islocked) { + if (dolock) + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + else + FIXUP(VTOUNION(ap->a_dvp), p); + } + ap->a_dvp = vp; + error = VCALL(vp, VOFFSET(vop_abortop), ap); + if (islocked && dolock) + VOP_UNLOCK(vp, 0, p); + + return (error); +} + +int +union_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct proc *p = ap->a_p; + struct union_node *un = VTOUNION(vp); + struct vnode **vpp; + + /* + * Do nothing (and _don't_ bypass). + * Wait to vrele lowervp until reclaim, + * so that until then our union_node is in the + * cache and reusable. + * + * NEEDSWORK: Someday, consider inactive'ing + * the lowervp and then trying to reactivate it + * with capabilities (v_id) + * like they do in the name lookup cache code. + * That's too much work for now. + */ + + if (un->un_dircache != 0) { + for (vpp = un->un_dircache; *vpp != NULLVP; vpp++) + vrele(*vpp); + _FREE(un->un_dircache, M_TEMP); + un->un_dircache = 0; + } + + VOP_UNLOCK(vp, 0, p); + + if ((un->un_flags & UN_CACHED) == 0) + vgone(vp); + + return (0); +} + +int +union_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + union_freevp(ap->a_vp); + + return (0); +} + +int +union_lock(ap) + struct vop_lock_args *ap; +{ + struct vnode *vp = ap->a_vp; + struct proc *p = ap->a_p; + int flags = ap->a_flags; + struct union_node *un; + int error; + + + vop_nolock(ap); + /* + * Need to do real lockmgr-style locking here. + * in the mean time, draining won't work quite right, + * which could lead to a few race conditions. + * the following test was here, but is not quite right, we + * still need to take the lock: + if ((flags & LK_TYPE_MASK) == LK_DRAIN) + return (0); + */ + flags &= ~LK_INTERLOCK; + +start: + un = VTOUNION(vp); + + if (un->un_uppervp != NULLVP) { + if (((un->un_flags & UN_ULOCK) == 0) && + (vp->v_usecount != 0)) { + error = vn_lock(un->un_uppervp, flags, p); + if (error) + return (error); + un->un_flags |= UN_ULOCK; + } +#if DIAGNOSTIC + if (un->un_flags & UN_KLOCK) { + vprint("union: dangling klock", vp); + panic("union: dangling upper lock (%lx)", vp); + } +#endif + } + + if (un->un_flags & UN_LOCKED) { +#if DIAGNOSTIC + if (current_proc() && un->un_pid == current_proc()->p_pid && + un->un_pid > -1 && current_proc()->p_pid > -1) + panic("union: locking against myself"); +#endif + un->un_flags |= UN_WANT; + tsleep((caddr_t)&un->un_flags, PINOD, "unionlk2", 0); + goto start; + } + +#if DIAGNOSTIC + if (current_proc()) + un->un_pid = current_proc()->p_pid; + else + un->un_pid = -1; +#endif + + un->un_flags |= UN_LOCKED; + return (0); +} + +/* + * When operations want to vput() a union node yet retain a lock on + * the upper vnode (say, to do some further operations like link(), + * mkdir(), ...), they set UN_KLOCK on the union node, then call + * vput() which calls VOP_UNLOCK() and comes here. union_unlock() + * unlocks the union node (leaving the upper vnode alone), clears the + * KLOCK flag, and then returns to vput(). The caller then does whatever + * is left to do with the upper vnode, and ensures that it gets unlocked. + * + * If UN_KLOCK isn't set, then the upper vnode is unlocked here. + */ +int +union_unlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct union_node *un = VTOUNION(ap->a_vp); + struct proc *p = ap->a_p; + +#if DIAGNOSTIC + if ((un->un_flags & UN_LOCKED) == 0) + panic("union: unlock unlocked node"); + if (current_proc() && un->un_pid != current_proc()->p_pid && + current_proc()->p_pid > -1 && un->un_pid > -1) + panic("union: unlocking other process's union node"); +#endif + + un->un_flags &= ~UN_LOCKED; + + if ((un->un_flags & (UN_ULOCK|UN_KLOCK)) == UN_ULOCK) + VOP_UNLOCK(un->un_uppervp, 0, p); + + un->un_flags &= ~(UN_ULOCK|UN_KLOCK); + + if (un->un_flags & UN_WANT) { + un->un_flags &= ~UN_WANT; + wakeup((caddr_t) &un->un_flags); + } + +#if DIAGNOSTIC + un->un_pid = 0; +#endif + vop_nounlock(ap); + + return (0); +} + +int +union_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; + } */ *ap; +{ + int error; + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp = OTHERVP(ap->a_vp); + int dolock = (vp == LOWERVP(ap->a_vp)); + + if (dolock) + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + else + FIXUP(VTOUNION(ap->a_vp), p); + ap->a_vp = vp; + error = VCALL(vp, VOFFSET(vop_bmap), ap); + if (dolock) + VOP_UNLOCK(vp, 0, p); + + return (error); +} + +int +union_cmap(ap) + struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_offset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; + } */ *ap; +{ + int error; + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp = OTHERVP(ap->a_vp); + int dolock = (vp == LOWERVP(ap->a_vp)); + + if (dolock) + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + else + FIXUP(VTOUNION(ap->a_vp), p); + ap->a_vp = vp; + error = VCALL(vp, VOFFSET(vop_cmap), ap); + if (dolock) + VOP_UNLOCK(vp, 0, p); + + return (error); +} + +int +union_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + printf("\ttag VT_UNION, vp=%x, uppervp=%x, lowervp=%x\n", + vp, UPPERVP(vp), LOWERVP(vp)); + if (UPPERVP(vp) != NULLVP) + vprint("union: upper", UPPERVP(vp)); + if (LOWERVP(vp) != NULLVP) + vprint("union: lower", LOWERVP(vp)); + + return (0); +} + +int +union_islocked(ap) + struct vop_islocked_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + return ((VTOUNION(ap->a_vp)->un_flags & UN_LOCKED) ? 1 : 0); +} + +int +union_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; + } */ *ap; +{ + int error; + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp = OTHERVP(ap->a_vp); + int dolock = (vp == LOWERVP(ap->a_vp)); + + if (dolock) + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + else + FIXUP(VTOUNION(ap->a_vp), p); + ap->a_vp = vp; + error = VCALL(vp, VOFFSET(vop_pathconf), ap); + if (dolock) + VOP_UNLOCK(vp, 0, p); + + return (error); +} + +int +union_advlock(ap) + struct vop_advlock_args /* { + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; + } */ *ap; +{ + register struct vnode *ovp = OTHERVP(ap->a_vp); + + ap->a_vp = ovp; + return (VCALL(ovp, VOFFSET(vop_advlock), ap)); +} + + +/* + * XXX - vop_strategy must be hand coded because it has no + * vnode in its arguments. + * This goes away with a merged VM/buffer cache. + */ +int +union_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + struct buf *bp = ap->a_bp; + int error; + struct vnode *savedvp; + + savedvp = bp->b_vp; + bp->b_vp = OTHERVP(bp->b_vp); + +#if DIAGNOSTIC + if (bp->b_vp == NULLVP) + panic("union_strategy: nil vp"); + if (((bp->b_flags & B_READ) == 0) && + (bp->b_vp == LOWERVP(savedvp))) + panic("union_strategy: writing to lowervp"); +#endif + + error = VOP_STRATEGY(bp); + bp->b_vp = savedvp; + + return (error); +} + +/* Pagein */ +union_pagein(ap) + struct vop_pagein_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + int error; + struct proc *p = current_proc(); + struct vnode *vp = OTHERVP(ap->a_vp); + + error = VOP_PAGEIN(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, + ap->a_size, ap->a_cred,ap->a_flags); + + /* + * XXX + * perhaps the size of the underlying object has changed under + * our feet. take advantage of the offset information present + * in the uio structure. + */ + if (error == 0) { + struct union_node *un = VTOUNION(ap->a_vp); + off_t cur = ap->a_f_offset + (off_t)ap->a_pl_offset; + + if (vp == un->un_uppervp) { + if (cur > un->un_uppersz) + union_newsize(ap->a_vp, cur, VNOVAL); + } else { + if (cur > un->un_lowersz) + union_newsize(ap->a_vp, VNOVAL, cur); + } + } + + return (error); +} + +/* Pageout */ +union_pageout(ap) + struct vop_pageout_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + int error; + struct vnode *vp; + struct union_node *un = VTOUNION(ap->a_vp); + + vp = UPPERVP(ap->a_vp); + if (vp == NULLVP) + panic("union: missing upper layer in pageout"); + + error = VOP_PAGEOUT(vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, + ap->a_size, ap->a_cred,ap->a_flags); + + /* + * the size of the underlying object may be changed by the + * write. + */ + if (error == 0) { + off_t cur = ap->a_f_offset + (off_t)ap->a_pl_offset; + + if (cur > un->un_uppersz) + union_newsize(ap->a_vp, cur, VNOVAL); + } + + return (error); +} + +/* Blktooff derives file offset for the given logical block number */ +int +union_blktooff(ap) + struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; + } */ *ap; +{ + int error; + struct vnode *vp = OTHERVP(ap->a_vp); + + error = VOP_BLKTOOFF(vp, ap->a_lblkno, ap->a_offset); + + return(error); +} + +/* offtoblk derives file offset for the given logical block number */ +int +union_offtoblk(ap) + struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; + } */ *ap; +{ + int error; + struct vnode *vp = OTHERVP(ap->a_vp); + + error = VOP_OFFTOBLK(vp, ap->a_offset, ap->a_lblkno); + + return(error); +} + +#define VOPFUNC int (*)(void *) + +/* + * Global vfs data structures + */ +int (**union_vnodeop_p)(void *); +struct vnodeopv_entry_desc union_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)union_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)union_create }, /* create */ + { &vop_whiteout_desc, (VOPFUNC)union_whiteout }, /* whiteout */ + { &vop_mknod_desc, (VOPFUNC)union_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)union_open }, /* open */ + { &vop_close_desc, (VOPFUNC)union_close }, /* close */ + { &vop_access_desc, (VOPFUNC)union_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)union_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)union_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)union_read }, /* read */ + { &vop_write_desc, (VOPFUNC)union_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)union_lease }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)union_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)union_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)union_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)union_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)union_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)union_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)union_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)union_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)union_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)union_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)union_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)union_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)union_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)union_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)union_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)union_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)union_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)union_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)union_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)union_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)union_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)union_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)union_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)union_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)union_advlock }, /* advlock */ +#ifdef notdef + { &vop_blkatoff_desc, (VOPFUNC)union_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)union_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)union_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)union_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)union_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)union_bwrite }, /* bwrite */ +#endif + { &vop_pagein_desc, (VOPFUNC)union_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)union_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (VOPFUNC)union_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)union_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)union_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc union_vnodeop_opv_desc = + { &union_vnodeop_p, union_vnodeop_entries }; diff --git a/bsd/miscfs/volfs/volfs.h b/bsd/miscfs/volfs/volfs.h new file mode 100644 index 000000000..91395838e --- /dev/null +++ b/bsd/miscfs/volfs/volfs.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, Apple Computer, Inc. All rights reserved. */ +/* + * Header file for volfs + */ + +struct volfs_mntdata +{ + struct vnode *volfs_rootvp; + LIST_HEAD(volfs_fsvnodelist, vnode) volfs_fsvnodes; +}; + +/* + * Volfs vnodes exist only for the root, which allows for the enumeration + * of all volfs accessible filesystems, and for the filesystems which + * volfs handles. + */ +#define VOLFS_ROOT 1 /* This volfs vnode represents root of volfs */ +#define VOLFS_FSNODE 2 /* This volfs vnode represents a file system */ + +struct volfs_vndata +{ + int vnode_type; + struct lock__bsd__ lock; + unsigned int nodeID; /* the dev entry of a file system */ + struct mount * fs_mount; +}; + +#define MAXVLFSNAMLEN 24 /* max length is really 10, pad to 24 since + * some of the math depends on VLFSDIRENTLEN + * being a power of 2 */ +#define VLFSDIRENTLEN (MAXVLFSNAMLEN + sizeof(u_int32_t) + sizeof(u_int16_t) + sizeof(u_int8_t) + sizeof(u_int8_t)) + +#define ROOT_DIRID 2 + +extern int (**volfs_vnodeop_p)(void *); +__BEGIN_DECLS + +int volfs_mount __P((struct mount *, char *, caddr_t, struct nameidata *, + struct proc *)); +int volfs_start __P((struct mount *, int, struct proc *)); +int volfs_unmount __P((struct mount *, int, struct proc *)); +int volfs_root __P((struct mount *, struct vnode **)); +int volfs_quotactl __P((struct mount *, int, uid_t, caddr_t, + struct proc *)); +int volfs_statfs __P((struct mount *, struct statfs *, struct proc *)); +int volfs_sync __P((struct mount *, int, struct ucred *, struct proc *)); +int volfs_vget __P((struct mount *, void *ino_t, struct vnode **)); +int volfs_fhtovp __P((struct mount *, struct fid *, struct mbuf *, + struct vnode **, int *, struct ucred **)); +int volfs_vptofh __P((struct vnode *, struct fid *)); +int volfs_init __P((struct vfsconf *)); +int volfs_sysctl __P((int *, u_int, void *, size_t *, void *, size_t, + struct proc *)); + +int volfs_reclaim __P((struct vop_reclaim_args*)); +int volfs_access __P((struct vop_access_args *)); +int volfs_getattr __P((struct vop_getattr_args *)); +int volfs_select __P((struct vop_select_args *)); +int volfs_rmdir __P((struct vop_rmdir_args *)); +int volfs_readdir __P((struct vop_readdir_args *)); +int volfs_lock __P((struct vop_lock_args *)); +int volfs_unlock __P((struct vop_unlock_args *)); +int volfs_islocked __P((struct vop_islocked_args *)); +int volfs_pathconf __P((struct vop_pathconf_args *)); +int volfs_lookup __P((struct vop_lookup_args *)); +__END_DECLS + +#define VTOVL(VP) ((struct volfs_vndata *)((VP)->v_data)) + +#define PRINTIT kprintf + +#if VOLFS_DEBUG + #define DBG_VOP_TEST_LOCKS 1 + #define DBG_FUNC_NAME(FSTR) static char *funcname = FSTR + #define DBG_PRINT_FUNC_NAME() PRINTIT("%s\n", funcname); + #define DBG_VOP_PRINT_FUNCNAME() PRINTIT("%s: ", funcname); + #define DBG_VOP_PRINT_CPN_INFO(CN) PRINTIT("name: %s",(CN)->cn_nameptr); + #define DBG_VOP(STR) PRINTIT STR; + #define DBG_VOP_PRINT_VNODE_INFO(VP) { if ((VP)) \ + { if ((VP)->v_tag == VT_NON) \ + PRINTIT("\tfs:%s id: %d v: 0x%x ", VTOVL(VP)->fs_mount->mnt_stat.f_fstypename, VTOVL(VP)->nodeID, (u_int)(VP)); \ + else PRINTIT("\t%s v: 0x%x ", (VP)->v_mount->mnt_stat.f_fstypename, (u_int)(VP)); \ + } else { PRINTIT("*** NULL NODE ***"); } } + +#else /* VOLFS_DEBUG */ + #define DBG_VOP_TEST_LOCKS 0 + #define DBG_FUNC_NAME(FSTR) + #define DBG_PRINT_FUNC_NAME() + #define DBG_VOP_PRINT_FUNCNAME() + #define DBG_VOP_PRINT_CPN_INFO(CN) + #define DBG_VOP(A) + #define DBG_VOP_PRINT_VNODE_INFO(VP) +#endif /* VOLFS_DEBUG */ + + +#if DBG_VOP_TEST_LOCKS + +#define VOPDBG_IGNORE 0 +#define VOPDBG_LOCKED 1 +#define VOPDBG_UNLOCKED -1 +#define VOPDBG_LOCKNOTNIL 2 +#define VOPDBG_SAME 3 + +#define VOPDBG_ZERO 0 +#define VOPDBG_POS 1 + + +#define MAXDBGLOCKS 15 + +typedef struct VopDbgStoreRec { + short id; + struct vnode *vp; + short inState; + short outState; + short errState; + int inValue; + int outValue; + } VopDbgStoreRec; + + +/* This sets up the test for the lock state of vnodes. The entry paramaters are: + * I = index of paramater + * VP = pointer to a vnode + * ENTRYSTATE = the inState of the lock + * EXITSTATE = the outState of the lock + * ERRORSTATE = the error state of the lock + * It initializes the structure, does some preliminary validity checks, but does nothing + * if the instate is set to be ignored. + */ + +#define DBG_VOP_LOCKS_DECL(I) VopDbgStoreRec VopDbgStore[I];short numOfLockSlots=I +#define DBG_VOP_LOCKS_INIT(I,VP,ENTRYSTATE,EXITSTATE,ERRORSTATE,CHECKFLAG) \ + if (I >= numOfLockSlots) { \ + PRINTIT("%s: DBG_VOP_LOCKS_INIT: Entry #%d greater than allocated slots!\n", funcname, I); \ + }; \ + VopDbgStore[I].id = I; \ + VopDbgStore[I].vp = (VP); \ + VopDbgStore[I].inState = ENTRYSTATE; \ + VopDbgStore[I].outState = EXITSTATE; \ + VopDbgStore[I].errState = ERRORSTATE; \ + VopDbgStore[I].inValue = 0; \ + VopDbgStore[I].outValue = 0; \ + if ((VopDbgStore[I].inState != VOPDBG_IGNORE)) { \ + if ((VP) == NULL) \ + PRINTIT ("%s: DBG_VOP_LOCK on start: Null vnode ptr\n", funcname); \ + else \ + VopDbgStore[I].inValue = lockstatus (&((struct volfs_vndata *)((VP)->v_data))->lock); \ + } \ + if ((VP) != NULL) \ + { \ + if (CHECKFLAG==VOPDBG_POS && (VP)->v_usecount <= 0) \ + PRINTIT("%s: BAD USECOUNT OF %d !!!!\n", funcname, (VP)->v_usecount); \ + else if ((VP)->v_usecount < 0) \ + PRINTIT("%s: BAD USECOUNT OF %d !!!!\n", funcname, (VP)->v_usecount); \ + } +#define DBG_VOP_UPDATE_VP(I, VP) \ + VopDbgStore[I].vp = (VP); + + +#define DBG_VOP_LOCKS_TEST(status) DbgVopTest (numOfLockSlots, status, VopDbgStore, funcname); + +#else /*DBG_VOP_TEST_LOCKS */ +#define DBG_VOP_LOCKS_DECL(A) +#define DBG_VOP_LOCKS_INIT(A,B,C,D,E,F) +#define DBG_VOP_LOCKS_TEST(a) +#define DBG_VOP_UPDATE_VP(I, VP) + +#endif /* DBG_VOP_TEST_LOCKS */ diff --git a/bsd/miscfs/volfs/volfs_vfsops.c b/bsd/miscfs/volfs/volfs_vfsops.c new file mode 100644 index 000000000..096873b7f --- /dev/null +++ b/bsd/miscfs/volfs/volfs_vfsops.c @@ -0,0 +1,481 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998 Apple Computer, Inc. All Rights Reserved */ +/* + * Change History: + * + * 29-May-1998 Pat Dirks Changed to cache pointer to root vnode until unmount. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "volfs.h" + +struct vfsops volfs_vfsops = { + volfs_mount, + volfs_start, + volfs_unmount, + volfs_root, + volfs_quotactl, + volfs_statfs, + volfs_sync, + volfs_vget, + volfs_fhtovp, + volfs_vptofh, + volfs_init, + volfs_sysctl +}; + +static char volfs_fs_name[MFSNAMELEN] = "volfs"; +extern struct vnodeopv_desc volfs_vnodeop_opv_desc; + +/* The following refer to kernel global variables used in the loading/initialization: */ +extern int maxvfsslots; /* Total number of slots in the system's vfsconf table */ +extern int maxvfsconf; /* The highest fs type number [old-style ID] in use [dispite its name] */ +extern int vfs_opv_numops; /* The total number of defined vnode operations */ +extern int kdp_flag; + +void +volfs_load(int loadArgument) { + struct vfsconf *vfsconflistentry; + int entriesRemaining; + struct vfsconf *newvfsconf = NULL; + struct vfsconf *lastentry = NULL; + int j; + int (***opv_desc_vector_p)(); + int (**opv_desc_vector)(); + struct vnodeopv_entry_desc *opve_descp; + +#pragma unused(loadArgument) + + /* + * This routine is responsible for all the initialization that would + * ordinarily be done as part of the system startup; it calls volfs_init + * to do the initialization that is strictly volfs-specific. + */ + + /* + prevvfsconf is supposed to be the entry preceding the new entry. + To make sure we can always get hooked in SOMEWHERE in the list, + start it out at the first entry of the list. This assumes the + first entry in the list will be non-empty and not volfs. + + This becomes irrelevant when volfs is compiled into the list. + */ + DBG_VOP(("load_volfs: Scanning vfsconf list...\n")); + vfsconflistentry = vfsconf; + for (entriesRemaining = maxvfsslots; entriesRemaining > 0; --entriesRemaining) { + if (vfsconflistentry->vfc_vfsops != NULL) { + /* + * Check to see if we're reloading a new version of volfs during debugging + * and overwrite the previously assigned entry if we find one: + */ + if (strcmp(vfsconflistentry->vfc_name, volfs_fs_name) == 0) { + newvfsconf = vfsconflistentry; + break; + } else { + lastentry = vfsconflistentry; + }; + } else { + /* + * This is at least a POSSIBLE place to insert the new entry... + */ + newvfsconf = vfsconflistentry; + }; + ++vfsconflistentry; + }; + + if (newvfsconf) { + DBG_VOP(("load_volfs: filling in vfsconf entry at 0x%08lX; lastentry = 0x%08lX.\n", (long)newvfsconf, (long)lastentry)); + newvfsconf->vfc_vfsops = &volfs_vfsops; + strncpy(&newvfsconf->vfc_name[0], "volfs", MFSNAMELEN); + newvfsconf->vfc_typenum = maxvfsconf++; + newvfsconf->vfc_refcount = 0; + newvfsconf->vfc_flags = 0; + newvfsconf->vfc_mountroot = NULL; /* Can't mount root of file system [yet] */ + + /* Hook into the list: */ + newvfsconf->vfc_next = NULL; + if (lastentry) { + newvfsconf->vfc_next = lastentry->vfc_next; + lastentry->vfc_next = newvfsconf; + }; + + /* Based on vfs_op_init and ... */ + opv_desc_vector_p = volfs_vnodeop_opv_desc.opv_desc_vector_p; + + DBG_VOP(("load_volfs: Allocating and initializing VNode ops vector...\n")); + + /* + * Allocate and init the vector. + * Also handle backwards compatibility. + */ + MALLOC(*opv_desc_vector_p, PFI *, vfs_opv_numops*sizeof(PFI), M_TEMP, M_WAITOK); + + bzero (*opv_desc_vector_p, vfs_opv_numops*sizeof(PFI)); + + opv_desc_vector = *opv_desc_vector_p; + for (j=0; volfs_vnodeop_opv_desc.opv_desc_ops[j].opve_op; j++) { + opve_descp = &(volfs_vnodeop_opv_desc.opv_desc_ops[j]); + + /* + * Sanity check: is this operation listed + * in the list of operations? We check this + * by seeing if its offest is zero. Since + * the default routine should always be listed + * first, it should be the only one with a zero + * offset. Any other operation with a zero + * offset is probably not listed in + * vfs_op_descs, and so is probably an error. + * + * A panic here means the layer programmer + * has committed the all-too common bug + * of adding a new operation to the layer's + * list of vnode operations but + * not adding the operation to the system-wide + * list of supported operations. + */ + if (opve_descp->opve_op->vdesc_offset == 0 && + opve_descp->opve_op->vdesc_offset != VOFFSET(vop_default)) { + DBG_VOP(("load_volfs: operation %s not listed in %s.\n", + opve_descp->opve_op->vdesc_name, + "vfs_op_descs")); + panic ("load_volfs: bad operation"); + } + /* + * Fill in this entry. + */ + opv_desc_vector[opve_descp->opve_op->vdesc_offset] = + opve_descp->opve_impl; + } + + /* + * Finally, go back and replace unfilled routines + * with their default. (Sigh, an O(n^3) algorithm. I + * could make it better, but that'd be work, and n is small.) + */ + opv_desc_vector_p = volfs_vnodeop_opv_desc.opv_desc_vector_p; + + /* + * Force every operations vector to have a default routine. + */ + opv_desc_vector = *opv_desc_vector_p; + if (opv_desc_vector[VOFFSET(vop_default)]==NULL) { + panic("load_vp;fs: operation vector without default routine."); + } + for (j = 0;jvolfs_fsvnodes); + DBG_VOP(("LIST_INIT succeeded\n")); + + mp->mnt_data = (void *)priv_mnt_data; + strcpy(mp->mnt_stat.f_fstypename, "volfs"); + (void) copyinstr(path, mp->mnt_stat.f_mntonname, sizeof(mp->mnt_stat.f_mntonname) - 1, &size); + strcpy(mp->mnt_stat.f_mntfromname, ""); + + /* Set up the root vnode for fast reference in the future. + Note that the root is maintained unlocked but with a pos. ref count until unmount. */ + + MALLOC(priv_vn_data, struct volfs_vndata *, sizeof(struct volfs_vndata), M_VOLFSNODE, M_WAITOK); + error = getnewvnode(VT_VOLFS, mp, volfs_vnodeop_p, &root_vp); + if (error != 0) + { + FREE(priv_mnt_data, M_VOLFSMNT); + FREE(priv_vn_data, M_VOLFSNODE); + DBG_VOP(("getnewvnode failed with error code %d\n", error)); + return(error); + } + root_vp->v_type = VDIR; + root_vp->v_flag |= VROOT; + lockinit(&priv_vn_data->lock, PINOD, "volfsnode", 0, 0); + priv_vn_data->vnode_type = VOLFS_ROOT; + priv_vn_data->nodeID = 0; + priv_vn_data->fs_mount = mp; + root_vp->v_data = priv_vn_data; + + priv_mnt_data->volfs_rootvp = root_vp; + + return (0); +} + +int +volfs_start(mp, flags, p) +struct mount * mp; +int flags; +struct proc * p; +{ + DBG_VOP(("volfs_start called\n")); + return (0); +} + +/* + * Return the root of a filesystem. For volfs the root vnode is a directory + * containing the list of all filesystems volfs can work with. + */ +int +volfs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct volfs_mntdata *priv_data; + // struct volfs_vndata *priv_vn_data; + // int error; + + DBG_VOP(("volfs_root called\n")); + priv_data = (struct volfs_mntdata *)mp->mnt_data; + + if (priv_data->volfs_rootvp) { + vref(priv_data->volfs_rootvp); + VOP_LOCK(priv_data->volfs_rootvp, LK_EXCLUSIVE, current_proc()); + *vpp = priv_data->volfs_rootvp; + } else { + panic("volfs: root vnode missing!"); + }; + + DBG_VOP(("volfs_root returned with ")); + DBG_VOP_PRINT_VNODE_INFO(*vpp);DBG_VOP(("\n")); + + return(0); +} + +int +volfs_quotactl(mp, cmds, uid, arg, p) +struct mount *mp; +int cmds; +uid_t uid; +caddr_t arg; +struct proc * p; +{ + DBG_VOP(("volfs_quotactl called\n")); + return (0); +} + +/* + * unmount system call + */ +int +volfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + struct volfs_mntdata *priv_data; + struct vnode *root_vp; + int retval; + + DBG_VOP(("volfs_unmount called\n")); + priv_data = (struct volfs_mntdata *)mp->mnt_data; + + root_vp = priv_data->volfs_rootvp; + retval = vflush(mp, root_vp, 0); + if (retval) goto Err_Exit; + + /* Free the root vnode. + Note that there's no need to vget() or vref() it before locking it here: + the ref. count has been maintained at +1 ever since mount time. */ + if (root_vp) { + retval = vn_lock(root_vp, LK_EXCLUSIVE, p); + if (retval) goto Err_Exit; + if (root_vp->v_usecount > 1) { + DBG_VOP(("VOLFS ERROR: root vnode = %x, usecount = %d\n", (int)root_vp, priv_data->volfs_rootvp->v_usecount)); + VOP_UNLOCK(root_vp, 0, p); + retval = EBUSY; + goto Err_Exit; + }; + + priv_data->volfs_rootvp = NULL; + vput(root_vp); /* This drops volfs's own refcount */ + vgone(root_vp); + }; + + /* All vnodes should be gone, and no errors, clean up the last */ + /* XXX DBG_ASSERT(mp->mnt_vnodelist.lh_first == NULL); */ + /* XXX DBG_ASSERT(retval == 0); */ + + mp->mnt_data = NULL; + FREE(priv_data, M_VOLFSMNT); + +Err_Exit: + + return(retval); +} + +/* + * Get file system statistics. + */ +int +volfs_statfs(mp, sbp, p) + struct mount *mp; + register struct statfs *sbp; + struct proc *p; +{ + DBG_VOP(("volfs_statfs called\n")); + sbp->f_bsize = 512; + sbp->f_iosize = 512; + sbp->f_blocks = 1024; // lies, darn lies and virtual file systems + sbp->f_bfree = 0; // Nope, can't write here! + sbp->f_bavail = 0; + sbp->f_files = 0; // Hmmm...maybe later + sbp->f_ffree = 0; + return (0); +} + +/* + * volfs doesn't have any data and you can't write into any of the volfs + * structures, so don't do anything + */ +int +volfs_sync(mp, waitfor, cred, p) + struct mount *mp; + int waitfor; + struct ucred *cred; + struct proc *p; +{ +// DBG_VOP(("volfs_sync called\n")); + return 0; +} +/* + * Look up a FFS dinode number to find its incore vnode, otherwise read it + * in from disk. If it is in core, wait for the lock bit to clear, then + * return the inode locked. Detection and handling of mount points must be + * done by the calling routine. + */ +int +volfs_vget(mp, ino, vpp) + struct mount *mp; + void *ino; + struct vnode **vpp; +{ +// DBG_VOP(("volfs_vget called\n")); + return(0); +} +/* + * File handle to vnode + * + * Have to be really careful about stale file handles: + * - check that the inode number is valid + * - call ffs_vget() to get the locked inode + * - check for an unallocated inode (i_mode == 0) + * - check that the given client host has export rights and return + * those rights via. exflagsp and credanonp + */ +int +volfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) + register struct mount *mp; + struct fid *fhp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred **credanonp; +{ + DBG_VOP(("volfs_fhtovp called\n")); + return(0); +} +/* + * Vnode pointer to File handle + */ +/* ARGSUSED */ +int +volfs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + DBG_VOP(("volfs_vptofh called\n")); + return(0); +} +/* + * Initialize the filesystem + */ +int +volfs_init(vfsp) + struct vfsconf *vfsp; +{ + DBG_VOP(("volfs_init called\n")); + return (0); +} + +/* + * fast filesystem related variables. + */ +int +volfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + DBG_VOP(("volfs_sysctl called\n")); + return (EOPNOTSUPP); +} + diff --git a/bsd/miscfs/volfs/volfs_vnops.c b/bsd/miscfs/volfs/volfs_vnops.c new file mode 100644 index 000000000..dd54bb59a --- /dev/null +++ b/bsd/miscfs/volfs/volfs_vnops.c @@ -0,0 +1,1149 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer, Inc. All Rights Reserved. + * + * Modification History: + * + * 2/10/2000 Clark Warner Added copyfile + * 5/24/1999 Don Brady Fixed security hole in get_fsvnode. + * 11/18/1998 Don Brady Special case 2 to mean the root of a file system. + * 9/28/1998 Umesh Vaishampayan Use the default vnode ops. Cleanup + * header includes. + * 11/12/1998 Scott Roberts validfsnode only checks to see if the volfs mount flag is set + * 8/5/1998 Don Brady fix validfsnode logic to handle a "bad" VFS_GET + * 7/5/1998 Don Brady In volfs_reclaim set vp->v_data to NULL after private data is free (VFS expects a NULL). + * 4/5/1998 Don Brady Changed lockstatus calls to VOP_ISLOCKED (radar #2231108); + * 3/25/1998 Pat Dirks Added include for sys/attr.h, which is no longer included indirectly. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "volfs.h" + +/* + * volfs acts as a bridge between the requirements of the MacOS API and the Unix API. + * MacOS applications describe files by a triple. + * The Unix API describes files by pathname. Volfs is a virtual file system that sits over + * the HFS VFS interface and allows files to be described by a // + * pathname. + * + * The root of the volfs filesystem consists of directories named the volume ID's of all the + * currently mounted filesystems which support the VFS vget() routine. Each of those directories + * supports the lookup by file ID of all files and directories within the filesystem. When a + * file or directory is resolved its vnode from that filesystem rather than a volfs vnode is returned + * allowing immediate access to the target file or directory. + * + * Readdir on the root of the volfs filesystem returns the list of available file systems. Readdir + * on a filesystem node, however, returns only . and .. since it is not practical to list all + * of the file ID's in a timely fashion and furthermore VFS does not provide a mechanism for + * enumerating all of the file id's. + * + * Volume ID's are taken from the low 32 bits of the f_fsid field, formatted as a base 10 ASCII + * string with no leading zeros (volume ID 1 is represented as "1"). + * + * File ID's are created in same manner, with their 32 bits formatted as a base 10 ASCII + * string with no leading zeros. + * + * Volfs does create a security hole since it is possible to bypass directory permissions higher + * in the namespace tree. This security hole is about the same as the one created by NFS which uses + * a similar mechanism. + */ + +#define VOPFUNC int (*)(void *) + +/* Global vfs data structures for volfs. */ +int (**volfs_vnodeop_p) (void *); +struct vnodeopv_entry_desc volfs_vnodeop_entries[] = { + {&vop_default_desc, (VOPFUNC)vn_default_error}, + {&vop_strategy_desc, (VOPFUNC)err_strategy}, /* strategy */ + {&vop_bwrite_desc, (VOPFUNC)err_bwrite}, /* bwrite */ + {&vop_lookup_desc, (VOPFUNC)volfs_lookup}, /* lookup */ + {&vop_create_desc, (VOPFUNC)err_create}, /* create */ + {&vop_whiteout_desc, (VOPFUNC)err_whiteout}, /* whiteout */ + {&vop_mknod_desc, (VOPFUNC)err_mknod}, /* mknod */ + {&vop_mkcomplex_desc, (VOPFUNC)err_mkcomplex}, /* mkcomplex */ + {&vop_open_desc, (VOPFUNC)nop_open}, /* open */ + {&vop_close_desc, (VOPFUNC)nop_close}, /* close */ + {&vop_access_desc, (VOPFUNC)volfs_access}, /* access */ + {&vop_getattr_desc, (VOPFUNC)volfs_getattr}, /* getattr */ + {&vop_setattr_desc, (VOPFUNC)err_setattr}, /* setattr */ + {&vop_getattrlist_desc, (VOPFUNC)err_getattrlist}, /* getattrlist */ + {&vop_setattrlist_desc, (VOPFUNC)err_setattrlist}, /* setattrlist */ + {&vop_read_desc, (VOPFUNC)err_read}, /* read */ + {&vop_write_desc, (VOPFUNC)err_write}, /* write */ + {&vop_lease_desc, (VOPFUNC)err_lease}, /* lease */ + {&vop_ioctl_desc, (VOPFUNC)err_ioctl}, /* ioctl */ + {&vop_select_desc, (VOPFUNC)volfs_select}, /* select */ + {&vop_exchange_desc, (VOPFUNC)err_exchange}, /* exchange */ + {&vop_revoke_desc, (VOPFUNC)nop_revoke}, /* revoke */ + {&vop_mmap_desc, (VOPFUNC)err_mmap}, /* mmap */ + {&vop_fsync_desc, (VOPFUNC)err_fsync}, /* fsync */ + {&vop_seek_desc, (VOPFUNC)nop_seek}, /* seek */ + {&vop_remove_desc, (VOPFUNC)err_remove}, /* remove */ + {&vop_link_desc, (VOPFUNC)err_link}, /* link */ + {&vop_rename_desc, (VOPFUNC)err_rename}, /* rename */ + {&vop_mkdir_desc, (VOPFUNC)err_mkdir}, /* mkdir */ + {&vop_rmdir_desc, (VOPFUNC)volfs_rmdir}, /* rmdir */ + {&vop_symlink_desc, (VOPFUNC)err_symlink}, /* symlink */ + {&vop_readdir_desc, (VOPFUNC)volfs_readdir}, /* readdir */ + {&vop_readdirattr_desc, (VOPFUNC)err_readdirattr}, /* readdirattr */ + {&vop_readlink_desc, (VOPFUNC)err_readlink}, /* readlink */ + {&vop_abortop_desc, (VOPFUNC)err_abortop}, /* abortop */ + {&vop_inactive_desc, (VOPFUNC)err_inactive}, /* inactive */ + {&vop_reclaim_desc, (VOPFUNC)volfs_reclaim}, /* reclaim */ + {&vop_lock_desc, (VOPFUNC)volfs_lock}, /* lock */ + {&vop_unlock_desc, (VOPFUNC)volfs_unlock}, /* unlock */ + {&vop_bmap_desc, (VOPFUNC)err_bmap}, /* bmap */ + {&vop_print_desc, (VOPFUNC)err_print}, /* print */ + {&vop_islocked_desc, (VOPFUNC)volfs_islocked}, /* islocked */ + {&vop_pathconf_desc, (VOPFUNC)volfs_pathconf}, /* pathconf */ + {&vop_advlock_desc, (VOPFUNC)err_advlock}, /* advlock */ + {&vop_blkatoff_desc, (VOPFUNC)err_blkatoff}, /* blkatoff */ + {&vop_valloc_desc, (VOPFUNC)err_valloc}, /* valloc */ + {&vop_reallocblks_desc, (VOPFUNC)err_reallocblks}, /* reallocblks */ + {&vop_vfree_desc, (VOPFUNC)err_vfree}, /* vfree */ + {&vop_truncate_desc, (VOPFUNC)err_truncate}, /* truncate */ + {&vop_allocate_desc, (VOPFUNC)err_allocate}, /* allocate */ + {&vop_update_desc, (VOPFUNC)err_update}, /* update */ + {&vop_pgrd_desc, (VOPFUNC)err_pgrd}, /* pgrd */ + {&vop_pgwr_desc, (VOPFUNC)err_pgwr}, /* pgwr */ + {&vop_pagein_desc, (VOPFUNC)err_pagein}, /* pagein */ + {&vop_pageout_desc, (VOPFUNC)err_pageout}, /* pageout */ + {&vop_devblocksize_desc, (VOPFUNC)err_devblocksize}, /* devblocksize */ + {&vop_searchfs_desc, (VOPFUNC)err_searchfs}, /* searchfs */ + {&vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + {&vop_blktooff_desc, (VOPFUNC)err_blktooff}, /* blktooff */ + {&vop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ + {&vop_cmap_desc, (VOPFUNC)err_cmap }, /* cmap */ + {(struct vnodeop_desc *) NULL, (int (*) ()) NULL} +}; + +/* + * Oh what a tangled web we weave. This structure will be used by + * bsd/vfs/vfs_conf.c to actually do the initialization of volfs_vnodeop_p + */ +struct vnodeopv_desc volfs_vnodeop_opv_desc = +{&volfs_vnodeop_p, volfs_vnodeop_entries}; + + +static int validfsnode(struct mount *fsnode); + +#if DBG_VOP_TEST_LOCKS +static void DbgVopTest (int max, int error, VopDbgStoreRec *VopDbgStore, char *funcname); +#endif /* DBG_VOP_TEST_LOCKS */ + + +/* + * volfs_reclaim - Reclaim a vnode so that it can be used for other purposes. + * + * Locking policy: ignored + */ +int +volfs_reclaim(ap) + struct vop_reclaim_args /* { struct vnode *a_vp; struct proc *a_p; } */ *ap; +{ + struct vnode *vp = ap->a_vp; + void *data = vp->v_data; + + DBG_FUNC_NAME("volfs_reclaim"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); + + DBG_VOP_LOCKS_INIT(0, vp, VOPDBG_UNLOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_ZERO); + + vp->v_data = NULL; + FREE(data, M_VOLFSNODE); + + DBG_VOP_LOCKS_TEST(0); + return (0); +} + +/* + * volfs_access - same access policy for all vnodes and all users (file/directory vnodes + * for the actual file systems are handled by actual file system) + * + * Locking policy: a_vp locked on input and output + */ +int +volfs_access(ap) + struct vop_access_args /* { struct vnode *a_vp; int a_mode; struct + ucred *a_cred; struct proc *a_p; } */ *ap; +{ + int ret_err; + DBG_FUNC_NAME("volfs_access"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + + /* + * We don't need to check credentials! FS is read-only for everyone + */ + if (ap->a_mode == VREAD || ap->a_mode == VEXEC) + ret_err = 0; + else + ret_err = EACCES; + + DBG_VOP_LOCKS_TEST(ret_err); + return (ret_err); +} + +/* + * volfs_getattr - fill in the attributes for this vnode + * + * Locking policy: don't change anything + */ +int +volfs_getattr(ap) + struct vop_getattr_args /* { struct vnode *a_vp; struct vattr *a_vap; + struct ucred *a_cred; struct proc *a_p; } */ *ap; +{ + struct volfs_vndata *priv_data; + struct vnode *a_vp; + struct vattr *a_vap; + int numMounts = 0; + DBG_FUNC_NAME("volfs_getattr"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_SAME, VOPDBG_POS); + + a_vp = ap->a_vp; + a_vap = ap->a_vap; + + priv_data = a_vp->v_data; + + a_vap->va_type = VDIR; + a_vap->va_mode = 0444; /* Yup, hard - coded to read - only */ + a_vap->va_nlink = 2; + a_vap->va_uid = 0; /* Always owned by root */ + a_vap->va_gid = 0; /* Always part of group 0 */ + a_vap->va_fsid = (int) a_vp->v_mount->mnt_stat.f_fsid.val[0]; + a_vap->va_fileid = priv_data->nodeID; + + /* + * If it's the root vnode calculate its size based on the number of eligible + * file systems + */ + if (priv_data->vnode_type == VOLFS_ROOT) + { + register struct mount *mp, *nmp; + + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, ap->a_p)) { + nmp = mp->mnt_list.cqe_next; + continue; + } + + if (mp != a_vp->v_mount && validfsnode(mp)) + numMounts++; + + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, ap->a_p); + } + simple_unlock(&mountlist_slock); + + DBG_VOP(("found %d file systems that volfs can support\n", numMounts)); + a_vap->va_size = (numMounts + 2) * VLFSDIRENTLEN; + } + else + { + a_vap->va_size = 2 * VLFSDIRENTLEN; + } + DBG_VOP(("va_size = %d, VLFSDIRENTLEN = %ld\n", (int) a_vap->va_size, VLFSDIRENTLEN)); + a_vap->va_blocksize = 512; + + a_vap->va_atime.tv_sec = boottime.tv_sec; + a_vap->va_atime.tv_nsec = 0; + + a_vap->va_mtime.tv_sec = boottime.tv_sec; + a_vap->va_mtime.tv_nsec = 0; + + a_vap->va_ctime.tv_sec = boottime.tv_sec; + a_vap->va_ctime.tv_nsec = 0; + + a_vap->va_gen = 0; + a_vap->va_flags = 0; + a_vap->va_rdev = 0; + a_vap->va_bytes = a_vap->va_size; + a_vap->va_filerev = 0; + a_vap->va_vaflags = 0; + + DBG_VOP_LOCKS_TEST(0); + return (0); +} + +/* + * volfs_select - just say OK. Only possible op is readdir + * + * Locking policy: ignore + */ +int +volfs_select(ap) + struct vop_select_args /* { struct vnode *a_vp; int a_which; int + * a_fflags; struct ucred *a_cred; struct + proc *a_p; } */ *ap; +{ + DBG_VOP(("volfs_select called\n")); + + return (1); +} + +/* + * vofls_rmdir - not possible to remove directories in volfs + * + * Locking policy: a_dvp & a_vp - locked on entry, unlocked on exit + */ +int +volfs_rmdir(ap) + struct vop_rmdir_args /* { struct vnode *a_dvp; struct vnode *a_vp; + struct componentname *a_cnp; } */ *ap; +{ + DBG_VOP(("volfs_rmdir called\n")); + if (ap->a_dvp == ap->a_vp) { + (void) nop_rmdir(ap); + return (EINVAL); + } else + return (err_rmdir(ap)); +} + +/* + * volfs_readdir - Get directory entries + * + * Directory listings are only produced for the root volfs node. Filesystems + * just return . & .. + * Filesystems contained within the volfs root are named by the decimal + * equivalent of the f_fsid.val[0] from their mount structure (typically + * the device id of the volume). The maximum length for a name, then is + * 10 characters. + * + * Locking policy: a_vp locked on entry and exit + */ +int +volfs_readdir(ap) + struct vop_readdir_args /* { struct vnode *a_vp; struct uio *a_uio; + * struct ucred *a_cred; int *a_eofflag; int + *ncookies; u_long **a_cookies; } */ *ap; +{ + struct volfs_vndata *priv_data; + register struct uio *uio = ap->a_uio; + int error = 0; + size_t count, lost; + int rec_offset; + struct dirent local_dir; + int i; + int starting_resid; + off_t off; + DBG_FUNC_NAME("volfs_readdir"); + DBG_VOP_LOCKS_DECL(1); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_LOCKED, VOPDBG_POS); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); + + DBG_VOP(("\tuio_offset = %d, uio_resid = %d\n", (int) uio->uio_offset, uio->uio_resid)); + /* We assume it's all one big buffer... */ + if (uio->uio_iovcnt > 1) + DBG_VOP(("\tuio->uio_iovcnt = %d?\n", uio->uio_iovcnt)); + + off = uio->uio_offset; + priv_data = ap->a_vp->v_data; + starting_resid = uio->uio_resid; + count = uio->uio_resid; + + /* Make sure we don't return partial entries. */ + count -= (uio->uio_offset + count) & (VLFSDIRENTLEN - 1); + if (count <= 0) + { + DBG_VOP(("volfs_readdir: Not enough buffer to read in entries\n")); + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } + /* + * Make sure we're starting on a directory boundary + */ + if (off & (VLFSDIRENTLEN - 1)) + { + DBG_VOP_LOCKS_TEST(EINVAL); + return (EINVAL); + } + rec_offset = off / VLFSDIRENTLEN; + lost = uio->uio_resid - count; + uio->uio_resid = count; + uio->uio_iov->iov_len = count; + + local_dir.d_reclen = VLFSDIRENTLEN; + /* + * We must synthesize . and .. + */ + DBG_VOP(("\tstarting ... uio_offset = %d, uio_resid = %d\n", + (int) uio->uio_offset, uio->uio_resid)); + if (rec_offset == 0) + { + DBG_VOP(("\tAdding .\n")); + /* + * Synthesize . + */ + local_dir.d_fileno = priv_data->nodeID; + local_dir.d_type = DT_DIR; + local_dir.d_namlen = 1; + local_dir.d_name[0] = '.'; + for (i = 1; i < MAXVLFSNAMLEN; i++) + local_dir.d_name[i] = 0; + error = uiomove((char *) &local_dir, VLFSDIRENTLEN, uio); + DBG_VOP(("\t after adding ., uio_offset = %d, uio_resid = %d\n", + (int) uio->uio_offset, uio->uio_resid)); + rec_offset++; + } + if (rec_offset == 1) + { + DBG_VOP(("\tAdding ..\n")); + /* + * Synthesize .. + * We only have two levels in the volfs hierarchy. Root's + * .. points to itself and the second level points to root, + * hence we've hardcoded d_fileno for .. here + */ + local_dir.d_fileno = ROOT_DIRID; + local_dir.d_type = DT_DIR; + local_dir.d_namlen = 2; + local_dir.d_name[0] = '.'; + local_dir.d_name[1] = '.'; + for (i = 2; i < MAXVLFSNAMLEN; i++) + local_dir.d_name[i] = 0; + error = uiomove((char *) &local_dir, VLFSDIRENTLEN, uio); + rec_offset++; + DBG_VOP(("\t after adding .., uio_offset = %d, uio_resid = %d\n", + (int) uio->uio_offset, uio->uio_resid)); + } + + /* + * OK, we've given them the . & .. entries. If this is a + * filesystem node then we've gone as far as we're going + * to go + */ + if (priv_data->vnode_type == VOLFS_FSNODE) + { + *ap->a_eofflag = 1; /* we got all the way to the end */ + DBG_VOP_LOCKS_TEST(error); + return (error); + } + + if (rec_offset > 1) { + register struct mount *mp, *nmp; + int validnodeindex; + struct proc *p = uio->uio_procp; + + validnodeindex = 1; /* we always have "." and ".." */ + + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { + nmp = mp->mnt_list.cqe_next; + continue; + } + + if (mp != ap->a_vp->v_mount && validfsnode(mp)) + validnodeindex++; + + if (rec_offset == validnodeindex) + { + local_dir.d_fileno = mp->mnt_stat.f_fsid.val[0]; + local_dir.d_type = DT_DIR; + local_dir.d_reclen = VLFSDIRENTLEN; + DBG_VOP(("\tAdding dir entry %d for offset %d\n", mp->mnt_stat.f_fsid.val[0], rec_offset)); + local_dir.d_namlen = sprintf(&local_dir.d_name[0], "%d", mp->mnt_stat.f_fsid.val[0]); + error = uiomove((char *) &local_dir, VLFSDIRENTLEN, uio); + DBG_VOP(("\t after adding entry '%s', uio_offset = %d, uio_resid = %d\n", + &local_dir.d_name[0], (int) uio->uio_offset, uio->uio_resid)); + rec_offset++; + } + + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + } + simple_unlock(&mountlist_slock); + + if (mp == (void *) &mountlist) + *ap->a_eofflag = 1; /* we got all the way to the end */ + } + + uio->uio_resid += lost; + if (starting_resid == uio->uio_resid) + uio->uio_offset = 0; + + DBG_VOP(("\tExiting, uio_offset = %d, uio_resid = %d, ap->a_eofflag = %d\n", + (int) uio->uio_offset, uio->uio_resid, *ap->a_eofflag)); + + DBG_VOP_LOCKS_TEST(error); + return (error); +} + + +/* + * validfsnode - test to see if a file system supports VGET + * + * This can cause context switching, so caller should be lock safe + */ +static int +validfsnode(struct mount *fsnode) +{ + + /* + * Just check to see if the the mount flag is set, if it is we assume the + * file system supports all of volfs symantecs + */ + + if ((! (fsnode->mnt_kern_flag & MNTK_UNMOUNT)) && (fsnode->mnt_flag & MNT_DOVOLFS)) + return 1; + else + return 0; +} + +/* + * volfs_lock - Lock an inode. + * If its already locked, set the WANT bit and sleep. + * + * Locking policy: handled by lockmgr + */ +int +volfs_lock(ap) + struct vop_lock_args /* { struct vnode *a_vp; int a_flags; struct + proc *a_p; } */ *ap; +{ + int retval; + struct volfs_vndata *priv_data; + DBG_FUNC_NAME("volfs_lock"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_UNLOCKED, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_ZERO); + + priv_data = (struct volfs_vndata *) ap->a_vp->v_data; + retval = lockmgr(&priv_data->lock, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p); + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* + * volfs_unlock - Unlock an inode. + * + * Locking policy: handled by lockmgr + */ +int +volfs_unlock(ap) + struct vop_unlock_args /* { struct vnode *a_vp; int a_flags; struct + proc *a_p; } */ *ap; +{ + int retval; + struct volfs_vndata *priv_data; + DBG_FUNC_NAME("volfs_unlock"); + DBG_VOP_LOCKS_DECL(1); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP_PRINT_VNODE_INFO(ap->a_vp);DBG_VOP(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_LOCKED, VOPDBG_UNLOCKED, VOPDBG_LOCKED, VOPDBG_ZERO); + + priv_data = (struct volfs_vndata *) ap->a_vp->v_data; + retval = lockmgr(&priv_data->lock, ap->a_flags | LK_RELEASE, + &ap->a_vp->v_interlock, ap->a_p); + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* + * volfs_islocked - Check for a locked inode. + * + * Locking policy: ignore + */ +int +volfs_islocked(ap) + struct vop_islocked_args /* { struct vnode *a_vp; } */ *ap; +{ + int retval; + struct volfs_vndata *priv_data; + + DBG_FUNC_NAME("volfs_islocked"); + DBG_VOP_LOCKS_DECL(1); + //DBG_VOP_PRINT_FUNCNAME();DBG_VOP(("\n")); + + DBG_VOP_LOCKS_INIT(0,ap->a_vp, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_ZERO); + priv_data = (struct volfs_vndata *) ap->a_vp->v_data; + retval = lockstatus(&priv_data->lock); + + DBG_VOP_LOCKS_TEST(retval); + return (retval); +} + +/* + * volfs_pathconf - Return POSIX pathconf information applicable to ufs filesystems. + * + * Locking policy: a_vp locked on input and output + */ +int +volfs_pathconf(ap) + struct vop_pathconf_args /* { struct vnode *a_vp; int a_name; int + *a_retval; } */ *ap; +{ + DBG_VOP(("volfs_pathconf called\n")); + + switch (ap->a_name) + { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_NAME_MAX: + *ap->a_retval = NAME_MAX; + return (0); + case _PC_PATH_MAX: + *ap->a_retval = PATH_MAX; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_NO_TRUNC: + *ap->a_retval = 1; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * get_fsvnode - internal routine to create a vnode for a file system. Called with mount pointer, + * id of filesystem to lookup and pointer to vnode pointer to fill in + */ +static int +get_fsvnode(our_mount, id, ret_vnode) + struct mount *our_mount; + int id; + struct vnode **ret_vnode; +{ + register struct mount *mp; + struct mount *cur_mount; + struct vnode *cur_vnode; + struct volfs_vndata *cur_privdata; + int retval; + + //DBG_VOP(("volfs: get_fsvnode called\n")); + + /* + * OK, first look up the matching mount on the list of mounted file systems + */ + cur_mount = NULL; + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = mp->mnt_list.cqe_next) + { + if (validfsnode(mp) && mp->mnt_stat.f_fsid.val[0] == id) + { + cur_mount = mp; + break; + } + } + simple_unlock(&mountlist_slock); + + if (cur_mount == NULL) { + /* + * No mounted file system by the specified ID currently exists in the system. + * + * XXX We could deal with a vnode that is still hanging about for an FS that + * does not exists or has been unmounted now, or count on the update below + * to happen later... + */ + *ret_vnode = NULL; + return ENOENT; + }; + + /* + * Now search the list attached to the mount structure to + * see if this vnode is already floating around + */ +search_vnodelist: + cur_vnode = our_mount->mnt_vnodelist.lh_first; + while (cur_vnode != NULL) + { + cur_privdata = (struct volfs_vndata *) cur_vnode->v_data; + if (cur_privdata->nodeID == id) + { + if (cur_privdata->fs_mount != cur_mount) { + DBG_VOP(("volfs get_fsvnode: Updating fs_mount for vnode 0x%08lX (id = %d) from 0x%08lX to 0x%08lX...\n", + (unsigned long)cur_vnode, + cur_privdata->nodeID, + (unsigned long)cur_privdata->fs_mount, + (unsigned long)cur_mount)); + cur_privdata->fs_mount = cur_mount; + }; + break; + } + cur_vnode = cur_vnode->v_mntvnodes.le_next; + } + + //DBG_VOP(("\tfinal cur_mount: 0x%x\n",cur_mount)); + if (cur_vnode) { + /* If vget returns an error, cur_vnode will not be what we think it is, try again */ + if (vget(cur_vnode, LK_EXCLUSIVE, current_proc()) != 0) { + goto search_vnodelist; + }; + } + else + { + MALLOC(cur_privdata, struct volfs_vndata *, + sizeof(struct volfs_vndata), M_VOLFSNODE, M_WAITOK); + retval = getnewvnode(VT_VOLFS, our_mount, volfs_vnodeop_p, &cur_vnode); + if (retval != 0) { + FREE(cur_privdata, M_VOLFSNODE); + return retval; + }; + + cur_privdata->vnode_type = VOLFS_FSNODE; + cur_privdata->nodeID = id; + + cur_privdata->fs_mount = cur_mount; + lockinit(&cur_privdata->lock, PINOD, "volfsnode", 0, 0); + lockmgr(&cur_privdata->lock, LK_EXCLUSIVE, (struct slock *)0, current_proc()); + cur_vnode->v_data = cur_privdata; + cur_vnode->v_type = VDIR; + DBG_VOP(("get_fsvnode returned with new node of ")); + DBG_VOP_PRINT_VNODE_INFO(cur_vnode);DBG_VOP(("\n")); + } + + *ret_vnode = cur_vnode; + + return (0); +} + + + +/* + * get_filevnode - returns the vnode for the given id within a filesystem. The parent vnode + * is a filesystem, id is the 32-bit id of the file/directory and ret_vnode is a pointer + * to a vnode pointer + */ +static int +get_filevnode(parent_fs, id, ret_vnode) + struct mount *parent_fs; + u_int id; + struct vnode **ret_vnode; +{ + int retval; + + DBG_VOP(("get_filevnode called for ID %d\n", id)); + + /* + * Special case 2 to mean the root of a file system + */ + if (id == 2) + retval = VFS_ROOT(parent_fs, ret_vnode); + else + retval = VFS_VGET(parent_fs, &id, ret_vnode); + + return (retval); +} + + +int +volfs_lookup(ap) + struct vop_lookup_args /* { struct vnode *a_dvp; struct vnode + **a_vpp; struct componentname *a_cnp; } */ *ap; +{ + struct volfs_vndata *priv_data; + char *cnp; + long namelen; + struct mount *parent_fs; + int unlocked_parent = 0; + int ret_err = ENOENT; + DBG_FUNC_NAME("volfs_lookup"); + DBG_VOP_LOCKS_DECL(2); + + DBG_VOP(("volfs_lookup called, name = %s, namelen = %ld\n", ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen)); + + DBG_VOP_LOCKS_INIT(0,ap->a_dvp, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_IGNORE, VOPDBG_POS); + DBG_VOP_LOCKS_INIT(1,*ap->a_vpp, VOPDBG_IGNORE, VOPDBG_LOCKED, VOPDBG_IGNORE, VOPDBG_POS); + DBG_VOP_PRINT_FUNCNAME();DBG_VOP(("\n")); + DBG_VOP(("\t"));DBG_VOP_PRINT_CPN_INFO(ap->a_cnp);DBG_VOP(("\n")); + if (ap->a_cnp->cn_flags & LOCKPARENT) + DBG_VOP(("\tLOCKPARENT is set\n")); + if (ap->a_cnp->cn_flags & ISLASTCN) + { + DBG_VOP(("\tISLASTCN is set\n")); + if (ap->a_cnp->cn_nameiop == DELETE || ap->a_cnp->cn_nameiop == RENAME) /* XXX PPD Shouldn't we check for CREATE, too? */ + { + ret_err = EROFS; + goto Err_Exit; + } + } + priv_data = ap->a_dvp->v_data; + cnp = ap->a_cnp->cn_nameptr; + namelen = ap->a_cnp->cn_namelen; + +#if VOLFS_DEBUG + switch (priv_data->vnode_type) { + case VOLFS_ROOT: + DBG_VOP(("\tparent directory (vnode 0x%08lX) vnode_type is VOLFS_ROOT.\n", (unsigned long)ap->a_dvp)); + break; + + case VOLFS_FSNODE: + DBG_VOP(("\tparent directory (vnode 0x%08lX) vnode_type is VOLFS_FSNODE, nodeID = %d, fs_mount = 0x%08lX.\n", + (unsigned long)ap->a_dvp, + priv_data->nodeID, + (unsigned long)priv_data->fs_mount)); + + default: + DBG_VOP(("\tparent directory (vnode 0x%08lX) has unknown vnode_type (%d), nodeID = %d.\n", + (unsigned long)ap->a_dvp, + priv_data->vnode_type, + priv_data->nodeID)); + }; +#endif /* VOLFS_DEBUG */ + + /* first check for "." and ".." */ + if (cnp[0] == '.') + { + if (namelen == 1) + { + /* "." requested */ + *ap->a_vpp = ap->a_dvp; + VREF(*ap->a_vpp); + DBG_VOP_LOCKS_TEST(0); + return (0); + } + else if (cnp[1] == '.' && namelen == 2) + { + /* ".." requested */ + ret_err = volfs_root(ap->a_dvp->v_mount, ap->a_vpp); + } + } + + /* then look for special file system root symbol ('@') */ + else if (cnp[0] == '@') + { + if ((namelen == 1) && (priv_data->vnode_type != VOLFS_ROOT)) { + parent_fs = priv_data->fs_mount; + if (!(ap->a_cnp->cn_flags & LOCKPARENT) || !(ap->a_cnp->cn_flags & ISLASTCN)) { + VOP_UNLOCK(ap->a_dvp, 0, ap->a_cnp->cn_proc); + unlocked_parent = 1; + }; + ret_err = VFS_ROOT(parent_fs, ap->a_vpp); + } else { + DBG_VOP(("volfs_lookup: pathname = '@' but namelen = %ld and parent vnode_type = %d.\n", namelen, priv_data->vnode_type)); + *ap->a_vpp = NULL; + ret_err = ENOENT; + }; + } + + /* finally, just look for numeric ids... */ + else if (namelen <= 10 && cnp[0] > '0' && cnp[0] <= '9') /* 10 digits max lead digit must be 1 - 9 */ + { + char *check_ptr; + u_long id; + + id = strtol(cnp, &check_ptr, 10); + + /* + * strtol will leave us at the first non-numeric character. + * we've checked to make sure the component name does + * begin with a numeric so check_ptr must wind up on + * the terminating null or there was other junk following the + * number + */ + if ((check_ptr - cnp) == namelen) + { + if (priv_data->vnode_type == VOLFS_ROOT) + ret_err = get_fsvnode(ap->a_dvp->v_mount, id, ap->a_vpp); + else { + parent_fs = priv_data->fs_mount; + if (!(ap->a_cnp->cn_flags & LOCKPARENT) || !(ap->a_cnp->cn_flags & ISLASTCN)) { + VOP_UNLOCK(ap->a_dvp, 0, ap->a_cnp->cn_proc); + unlocked_parent = 1; + }; + ret_err = get_filevnode(parent_fs, id, ap->a_vpp); + } + } + + } + + if (!unlocked_parent && (!(ap->a_cnp->cn_flags & LOCKPARENT) || !(ap->a_cnp->cn_flags & ISLASTCN))) { + VOP_UNLOCK(ap->a_dvp, 0, ap->a_cnp->cn_proc); + }; + + /* XXX PPD Should we do something special in case LOCKLEAF isn't set? */ + +Err_Exit: + + DBG_VOP_UPDATE_VP(1, *ap->a_vpp); + DBG_VOP_LOCKS_TEST(ret_err); + + return (ret_err); +} + +#if DBG_VOP_TEST_LOCKS + +#if 0 +static void DbgLookupTest( char *funcname, struct componentname *cnp, struct vnode *dvp, struct vnode *vp) +{ + int flags = cnp->cn_flags; + int nameiop = cnp->cn_nameiop; + + DBG_VOP (("%s: Action:", funcname)); + switch (nameiop) + { + case LOOKUP: + PRINTIT ("LOOKUP"); + break; + case CREATE: + PRINTIT ("CREATE"); + break; + case DELETE: + PRINTIT ("DELETE"); + break; + case RENAME: + PRINTIT ("RENAME"); + break; + default: + PRINTIT ("!!!UNKNOWN!!!!"); + break; + } + PRINTIT(" flags: 0x%x ",flags ); + if (flags & LOCKPARENT) + PRINTIT (" Lock Parent"); + if (flags & ISLASTCN) + PRINTIT (" Last Action"); + PRINTIT("\n"); + + if (dvp) + { + PRINTIT ("%s: Parent vnode exited ", funcname); + if (VOP_ISLOCKED(dvp)) + PRINTIT("LOCKED\n"); + else + PRINTIT("UNLOCKED\n"); + } + if (vp && vp==dvp) + { + PRINTIT ("%s: Found and Parent are the same\n", funcname); + } + else if (vp) + { + PRINTIT ("%s: Found vnode exited ", funcname); + if (VOP_ISLOCKED(vp)) + PRINTIT("LOCKED\n"); + else + PRINTIT("UNLOCKED\n"); + } + else + PRINTIT ("%s: Found vnode exited NULL\n", funcname); + + +} +#endif + +static void DbgVopTest( int maxSlots, + int retval, + VopDbgStoreRec *VopDbgStore, + char *funcname) +{ + int index; + + for (index = 0; index < maxSlots; index++) + { + if (VopDbgStore[index].id != index) { + PRINTIT("%s: DBG_VOP_LOCK: invalid id field (%d) in target entry (#%d).\n", funcname, VopDbgStore[index].id, index); + return; + }; + + if ((VopDbgStore[index].vp != NULL) && + ((VopDbgStore[index].vp->v_data==NULL))) + continue; + + switch (VopDbgStore[index].inState) + { + case VOPDBG_IGNORE: + case VOPDBG_SAME: + /* Do Nothing !!! */ + break; + case VOPDBG_LOCKED: + case VOPDBG_UNLOCKED: + case VOPDBG_LOCKNOTNIL: + { + if (VopDbgStore[index].vp == NULL && (VopDbgStore[index].inState != VOPDBG_LOCKNOTNIL)) { + PRINTIT ("%s: InState check: Null vnode ptr in entry #%d\n", funcname, index); + } else if (VopDbgStore[index].vp != NULL) { + switch (VopDbgStore[index].inState) + { + case VOPDBG_LOCKED: + case VOPDBG_LOCKNOTNIL: + if (VopDbgStore[index].inValue == 0) + { + PRINTIT ("%s: %d Entry: not LOCKED:", funcname, index); DBG_VOP(("\n")); + } + break; + case VOPDBG_UNLOCKED: + if (VopDbgStore[index].inValue != 0) + { + PRINTIT ("%s: %d Entry: not UNLOCKED:", funcname, index); DBG_VOP(("\n")); + } + break; + } + } + break; + } + default: + PRINTIT ("%s: DBG_VOP_LOCK on entry: bad lock test value: %d\n", funcname, VopDbgStore[index].errState); + } + + + if (retval != 0) + { + switch (VopDbgStore[index].errState) + { + case VOPDBG_IGNORE: + /* Do Nothing !!! */ + break; + case VOPDBG_LOCKED: + case VOPDBG_UNLOCKED: + case VOPDBG_SAME: + { + if (VopDbgStore[index].vp == NULL) { + PRINTIT ("%s: ErrState check: Null vnode ptr in entry #%d\n", funcname, index); + } else { + VopDbgStore[index].outValue = VOP_ISLOCKED(VopDbgStore[index].vp); + switch (VopDbgStore[index].errState) + { + case VOPDBG_LOCKED: + if (VopDbgStore[index].outValue == 0) + { + PRINTIT ("%s: %d Error: not LOCKED:", funcname, index); DBG_VOP(("\n")); + } + break; + case VOPDBG_UNLOCKED: + if (VopDbgStore[index].outValue != 0) + { + PRINTIT ("%s: %d Error: not UNLOCKED:", funcname, index); DBG_VOP(("\n")); + } + break; + case VOPDBG_SAME: + if (VopDbgStore[index].outValue != VopDbgStore[index].inValue) + PRINTIT ("%s: Error: In/Out locks are DIFFERENT: 0x%x, inis %d and out is %d\n", funcname, (u_int)VopDbgStore[index].vp, VopDbgStore[index].inValue, VopDbgStore[index].outValue); + break; + } + } + break; + } + case VOPDBG_LOCKNOTNIL: + if (VopDbgStore[index].vp != NULL) { + VopDbgStore[index].outValue = VOP_ISLOCKED(VopDbgStore[index].vp); + if (VopDbgStore[index].outValue == 0) + PRINTIT ("%s: Error: %d Not LOCKED: 0x%x\n", funcname, index, (u_int)VopDbgStore[index].vp); + } + break; + default: + PRINTIT ("%s: Error: bad lock test value: %d\n", funcname, VopDbgStore[index].errState); + } + } + else + { + switch (VopDbgStore[index].outState) + { + case VOPDBG_IGNORE: + /* Do Nothing !!! */ + break; + case VOPDBG_LOCKED: + case VOPDBG_UNLOCKED: + case VOPDBG_SAME: + if (VopDbgStore[index].vp == NULL) { + PRINTIT ("%s: OutState: Null vnode ptr in entry #%d\n", funcname, index); + }; + if (VopDbgStore[index].vp != NULL) + { + VopDbgStore[index].outValue = VOP_ISLOCKED(VopDbgStore[index].vp); + switch (VopDbgStore[index].outState) + { + case VOPDBG_LOCKED: + if (VopDbgStore[index].outValue == 0) + { + PRINTIT ("%s: %d Out: not LOCKED:", funcname, index); DBG_VOP(("\n")); + } + break; + case VOPDBG_UNLOCKED: + if (VopDbgStore[index].outValue != 0) + { + PRINTIT ("%s: %d Out: not UNLOCKED:", funcname, index); DBG_VOP(("\n")); + } + break; + case VOPDBG_SAME: + if (VopDbgStore[index].outValue != VopDbgStore[index].inValue) + PRINTIT ("%s: Out: In/Out locks are DIFFERENT: 0x%x, inis %d and out is %d\n", funcname, (u_int)VopDbgStore[index].vp, VopDbgStore[index].inValue, VopDbgStore[index].outValue); + break; + } + } + break; + case VOPDBG_LOCKNOTNIL: + if (VopDbgStore[index].vp != NULL) { + if (&((struct volfs_vndata *)(VopDbgStore[index].vp->v_data))->lock == NULL) + PRINTIT ("%s: DBG_VOP_LOCK on out: Null lock on vnode 0x%x\n", funcname, (u_int)VopDbgStore[index].vp); + else { + VopDbgStore[index].outValue = VOP_ISLOCKED(VopDbgStore[index].vp); + if (VopDbgStore[index].outValue == 0) + { + PRINTIT ("%s: DBG_VOP_LOCK on out: Should be LOCKED:", funcname); DBG_VOP(("\n")); + } + } + } + break; + default: + PRINTIT ("%s: DBG_VOP_LOCK on out: bad lock test value: %d\n", funcname, VopDbgStore[index].outState); + } + } + + VopDbgStore[index].id = -1; /* Invalidate the entry to allow panic-free re-use */ + } +} + +#endif /* DBG_VOP_TEST_LOCKS */ + diff --git a/bsd/net/Makefile b/bsd/net/Makefile new file mode 100644 index 000000000..1cb5c616e --- /dev/null +++ b/bsd/net/Makefile @@ -0,0 +1,42 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES= \ + bpf.h bpf_compat.h bpfdesc.h bridge.h dlil.h dlil_pvt.h \ + etherdefs.h ethernet.h hostcache.h if.h if_arp.h if_atm.h \ + if_dl.h if_gif.h if_llc.h if_media.h if_mib.h \ + if_ppp.h if_pppvar.h if_slvar.h if_sppp.h if_tun.h \ + if_tunvar.h if_types.h if_var.h if_vlan_var.h kext_net.h \ + ndrv.h net_osdep.h netisr.h pfkeyv2.h ppp_comp.h ppp_defs.h \ + radix.h raw_cb.h route.h slcompress.h slip.h tokendefs.h \ + tokensr.h zlib.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = net + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = net + + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/bsd/net/bpf.c b/bsd/net/bpf.c new file mode 100644 index 000000000..6363b4fb9 --- /dev/null +++ b/bsd/net/bpf.c @@ -0,0 +1,1426 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from the Stanford/CMU enet packet filter, + * (net/enet.c) distributed as part of 4.3BSD, and code contributed + * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence + * Berkeley Laboratory. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf.c 8.2 (Berkeley) 3/28/94 + * + */ + +#include "bpfilter.h" + +#if NBPFILTER > 0 + +#ifndef __GNUC__ +#define inline +#else +#define inline __inline +#endif + +#include +#include +#include +#include +#include +#include +#include + + +#include + + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + + +#include +#include + +/* + * Older BSDs don't have kernel malloc. + */ +#if BSD < 199103 +extern bcopy(); +static caddr_t bpf_alloc(); + +#define BPF_BUFSIZE (MCLBYTES-8) +#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) +#else +#define BPF_BUFSIZE 4096 +#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) +#endif + +#define PRINET 26 /* interruptible */ + +/* + * The default read buffer size is patchable. + */ +static int bpf_bufsize = BPF_BUFSIZE; + + + +SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, + &bpf_bufsize, 0, ""); + + +/* + * bpf_iflist is the list of interfaces; each corresponds to an ifnet + * bpf_dtab holds the descriptors, indexed by minor device # + */ +static struct bpf_if *bpf_iflist; +static struct bpf_d bpf_dtab[NBPFILTER]; +static int bpf_dtab_init; +static int nbpfilter = NBPFILTER; + +static int bpf_allocbufs __P((struct bpf_d *)); +static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); +static void bpf_detachd __P((struct bpf_d *d)); +static void bpf_freed __P((struct bpf_d *)); +static void bpf_ifname __P((struct ifnet *, struct ifreq *)); +static void bpf_mcopy __P((const void *, void *, size_t)); +static int bpf_movein __P((struct uio *, int, + struct mbuf **, struct sockaddr *, int *)); +static int bpf_setif __P((struct bpf_d *, struct ifreq *)); +static inline void + bpf_wakeup __P((struct bpf_d *)); +static void catchpacket __P((struct bpf_d *, u_char *, u_int, + u_int, void (*)(const void *, void *, size_t))); +static void reset_d __P((struct bpf_d *)); +static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); + + d_open_t bpfopen; + d_close_t bpfclose; + d_read_t bpfread; + d_write_t bpfwrite; + d_ioctl_t bpfioctl; + + +#define BPF_MAJOR 7 + +void bpf_mtap(struct ifnet *, struct mbuf *); + +int bpfopen(), bpfclose(), bpfread(), bpfwrite(), bpfioctl(), + bpfpoll(); + + +static struct cdevsw bpf_cdevsw = { + bpfopen, bpfclose, bpfread, bpfwrite, + bpfioctl, nulldev, nulldev, NULL, bpfpoll, + eno_mmap, eno_strat, eno_getc, eno_putc, 0 +}; + +static int +bpf_movein(uio, linktype, mp, sockp, datlen) + register struct uio *uio; + int linktype, *datlen; + register struct mbuf **mp; + register struct sockaddr *sockp; +{ + struct mbuf *m; + int error; + int len; + int hlen; + + /* + * Build a sockaddr based on the data link layer type. + * We do this at this level because the ethernet header + * is copied directly into the data field of the sockaddr. + * In the case of SLIP, there is no header and the packet + * is forwarded as is. + * Also, we are careful to leave room at the front of the mbuf + * for the link level header. + */ + switch (linktype) { + + case DLT_SLIP: + sockp->sa_family = AF_INET; + hlen = 0; + break; + + case DLT_EN10MB: + sockp->sa_family = AF_UNSPEC; + /* XXX Would MAXLINKHDR be better? */ + hlen = sizeof(struct ether_header); + break; + + case DLT_FDDI: +#if defined(__FreeBSD__) || defined(__bsdi__) + sockp->sa_family = AF_IMPLINK; + hlen = 0; +#else + sockp->sa_family = AF_UNSPEC; + /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ + hlen = 24; +#endif + break; + + case DLT_RAW: + case DLT_NULL: + sockp->sa_family = AF_UNSPEC; + hlen = 0; + break; + +#ifdef __FreeBSD__ + case DLT_ATM_RFC1483: + /* + * en atm driver requires 4-byte atm pseudo header. + * though it isn't standard, vpi:vci needs to be + * specified anyway. + */ + sockp->sa_family = AF_UNSPEC; + hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ + break; +#endif + + default: + return (EIO); + } + + len = uio->uio_resid; + *datlen = len - hlen; + if ((unsigned)len > MCLBYTES) + return (EIO); + + MGETHDR(m, M_WAIT, MT_DATA); + if (m == 0) + return (ENOBUFS); + if (len > MHLEN) { +#if BSD >= 199103 + MCLGET(m, M_WAIT); + if ((m->m_flags & M_EXT) == 0) { +#else + MCLGET(m); + if (m->m_len != MCLBYTES) { +#endif + error = ENOBUFS; + goto bad; + } + } + m->m_pkthdr.len = m->m_len = len; + m->m_pkthdr.rcvif = NULL; + *mp = m; + /* + * Make room for link header. + */ + if (hlen != 0) { + m->m_pkthdr.len -= hlen; + m->m_len -= hlen; +#if BSD >= 199103 + m->m_data += hlen; /* XXX */ +#else + m->m_off += hlen; +#endif + error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); + if (error) + goto bad; + } + error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); + if (!error) + return (0); + bad: + m_freem(m); + return (error); +} + +int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + /* + * Do nothing if the BPF tap has been turned off. + * This is to protect from a potential race where this + * call blocks on the funnel lock. And in the meantime + * BPF is turned off, which will clear if_bpf. + */ + if (ifp->if_bpf) + bpf_mtap(ifp, m); + + thread_funnel_set(network_flock, funnel_state); + return 0; +} + + +/* + * Attach file to the bpf interface, i.e. make d listen on bp. + * Must be called at splimp. + */ +static void +bpf_attachd(d, bp) + struct bpf_d *d; + struct bpf_if *bp; +{ + struct ifnet *ifp; + + /* + * Point d at bp, and add d to the interface's list of listeners. + * Finally, point the driver's bpf cookie at the interface so + * it will divert packets to bpf. + */ + d->bd_bif = bp; + d->bd_next = bp->bif_dlist; + bp->bif_dlist = d; + + bp->bif_ifp->if_bpf = bp; + ifp = bp->bif_ifp; + + if (ifp->if_set_bpf_tap) + (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); +} + +/* + * Detach a file from its interface. + */ +static void +bpf_detachd(d) + struct bpf_d *d; +{ + struct bpf_d **p; + struct bpf_if *bp; + struct ifnet *ifp; + + ifp = d->bd_bif->bif_ifp; + if (ifp->if_set_bpf_tap) + (*ifp->if_set_bpf_tap)(ifp, BPF_TAP_DISABLE, 0); + + bp = d->bd_bif; + /* + * Check if this descriptor had requested promiscuous mode. + * If so, turn it off. + */ + if (d->bd_promisc) { + d->bd_promisc = 0; + if (ifpromisc(bp->bif_ifp, 0)) + /* + * Something is really wrong if we were able to put + * the driver into promiscuous mode, but can't + * take it out. + */ + panic("bpf: ifpromisc failed"); + } + /* Remove d from the interface's descriptor list. */ + p = &bp->bif_dlist; + while (*p != d) { + p = &(*p)->bd_next; + if (*p == 0) + panic("bpf_detachd: descriptor not in list"); + } + *p = (*p)->bd_next; + if (bp->bif_dlist == 0) + /* + * Let the driver know that there are no more listeners. + */ + d->bd_bif->bif_ifp->if_bpf = 0; + d->bd_bif = 0; +} + + +/* + * Mark a descriptor free by making it point to itself. + * This is probably cheaper than marking with a constant since + * the address should be in a register anyway. + */ +#define D_ISFREE(d) ((d) == (d)->bd_next) +#define D_MARKFREE(d) ((d)->bd_next = (d)) +#define D_MARKUSED(d) ((d)->bd_next = 0) + +/* + * Open ethernet device. Returns ENXIO for illegal minor device number, + * EBUSY if file is open by another process. + */ +/* ARGSUSED */ + int +bpfopen(dev, flags, fmt, p) + dev_t dev; + int flags; + int fmt; + struct proc *p; +{ + register struct bpf_d *d; + + if (minor(dev) >= nbpfilter) + return (ENXIO); + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + /* + * Each minor can be opened by only one process. If the requested + * minor is in use, return EBUSY. + */ + d = &bpf_dtab[minor(dev)]; + if (!D_ISFREE(d)) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (EBUSY); + } + + /* Mark "free" and do most initialization. */ + bzero((char *)d, sizeof(*d)); + d->bd_bufsize = bpf_bufsize; + d->bd_sig = SIGIO; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); +} + +/* + * Close the descriptor by detaching it from its interface, + * deallocating its buffers, and marking it free. + */ +/* ARGSUSED */ + int +bpfclose(dev, flags, fmt, p) + dev_t dev; + int flags; + int fmt; + struct proc *p; +{ + register struct bpf_d *d; + register int s; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + s = splimp(); + d = &bpf_dtab[minor(dev)]; + if (d->bd_bif) + bpf_detachd(d); + splx(s); + bpf_freed(d); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); +} + +/* + * Support for SunOS, which does not have tsleep. + */ +#if BSD < 199103 +static +bpf_timeout(arg) + caddr_t arg; +{ + boolean_t funnel_state; + struct bpf_d *d = (struct bpf_d *)arg; + + + funnel_state = thread_funnel_set(network_flock, TRUE); + d->bd_timedout = 1; + wakeup(arg); + (void) thread_funnel_set(network_flock, FALSE); +} + +#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) + +int +bpf_sleep(d) + register struct bpf_d *d; +{ + register int rto = d->bd_rtout; + register int st; + + if (rto != 0) { + d->bd_timedout = 0; + timeout(bpf_timeout, (caddr_t)d, rto); + } + st = sleep((caddr_t)d, PRINET|PCATCH); + if (rto != 0) { + if (d->bd_timedout == 0) + untimeout(bpf_timeout, (caddr_t)d); + else if (st == 0) + return EWOULDBLOCK; + } + return (st != 0) ? EINTR : 0; +} +#else +#define BPF_SLEEP tsleep +#endif + +/* + * Rotate the packet buffers in descriptor d. Move the store buffer + * into the hold slot, and the free buffer into the store slot. + * Zero the length of the new store buffer. + */ +#define ROTATE_BUFFERS(d) \ + (d)->bd_hbuf = (d)->bd_sbuf; \ + (d)->bd_hlen = (d)->bd_slen; \ + (d)->bd_sbuf = (d)->bd_fbuf; \ + (d)->bd_slen = 0; \ + (d)->bd_fbuf = 0; +/* + * bpfread - read next chunk of packets from buffers + */ + int +bpfread(dev, uio, ioflag) + dev_t dev; + struct uio *uio; + int ioflag; +{ + register struct bpf_d *d; + int error; + int s; + + + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + d = &bpf_dtab[minor(dev)]; + + /* + * Restrict application to use a buffer the same size as + * as kernel buffers. + */ + if (uio->uio_resid != d->bd_bufsize) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (EINVAL); + } + + s = splimp(); + /* + * If the hold buffer is empty, then do a timed sleep, which + * ends when the timeout expires or when enough packets + * have arrived to fill the store buffer. + */ + while (d->bd_hbuf == 0) { + if (d->bd_immediate && d->bd_slen != 0) { + /* + * A packet(s) either arrived since the previous + * read or arrived while we were asleep. + * Rotate the buffers and return what's here. + */ + ROTATE_BUFFERS(d); + break; + } + if (ioflag & IO_NDELAY) + error = EWOULDBLOCK; + else + error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", + d->bd_rtout); + if (error == EINTR || error == ERESTART) { + splx(s); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + if (error == EWOULDBLOCK) { + /* + * On a timeout, return what's in the buffer, + * which may be nothing. If there is something + * in the store buffer, we can rotate the buffers. + */ + if (d->bd_hbuf) + /* + * We filled up the buffer in between + * getting the timeout and arriving + * here, so we don't need to rotate. + */ + break; + + if (d->bd_slen == 0) { + splx(s); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + } + ROTATE_BUFFERS(d); + break; + } + } + /* + * At this point, we know we have something in the hold slot. + */ + splx(s); + + /* + * Move data from hold buffer into user space. + * We know the entire buffer is transferred since + * we checked above that the read buffer is bpf_bufsize bytes. + */ + error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); + + s = splimp(); + d->bd_fbuf = d->bd_hbuf; + d->bd_hbuf = 0; + d->bd_hlen = 0; + splx(s); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); +} + + +/* + * If there are processes sleeping on this descriptor, wake them up. + */ +static inline void +bpf_wakeup(d) + register struct bpf_d *d; +{ + wakeup((caddr_t)d); + if (d->bd_async && d->bd_sig && d->bd_sigio) + pgsigio(d->bd_sigio, d->bd_sig, 0); + +#if BSD >= 199103 + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&d->bd_sel); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + /* XXX */ + d->bd_sel.si_thread = 0; +#else + if (d->bd_selproc) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(d->bd_selproc, (int)d->bd_selcoll); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + d->bd_selcoll = 0; + d->bd_selproc = 0; + } +#endif +} + + int +bpfwrite(dev, uio, ioflag) + dev_t dev; + struct uio *uio; + int ioflag; +{ + register struct bpf_d *d; + + struct ifnet *ifp; + struct mbuf *m; + int error, s; + static struct sockaddr dst; + int datlen; + + + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + d = &bpf_dtab[minor(dev)]; + if (d->bd_bif == 0) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (ENXIO); + } + + ifp = d->bd_bif->bif_ifp; + + if (uio->uio_resid == 0) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (0); + } + + error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); + if (error) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + + if (datlen > ifp->if_mtu) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (EMSGSIZE); + } + + s = splnet(); + + error = dlil_output((u_long) ifp, m, + (caddr_t) 0, &dst, 0); + + /* + error = dlil_inject_if_output(m, DLIL_NULL_FILTER); + */ + + splx(s); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + /* + * The driver frees the mbuf. + */ + return (error); +} + +/* + * Reset a descriptor by flushing its packet buffer and clearing the + * receive and drop counts. Should be called at splimp. + */ +static void +reset_d(d) + struct bpf_d *d; +{ + if (d->bd_hbuf) { + /* Free the hold buffer. */ + d->bd_fbuf = d->bd_hbuf; + d->bd_hbuf = 0; + } + d->bd_slen = 0; + d->bd_hlen = 0; + d->bd_rcount = 0; + d->bd_dcount = 0; +} + +/* + * FIONREAD Check for read packet available. + * SIOCGIFADDR Get interface address - convenient hook to driver. + * BIOCGBLEN Get buffer len [for read()]. + * BIOCSETF Set ethernet read filter. + * BIOCFLUSH Flush read packet buffer. + * BIOCPROMISC Put interface into promiscuous mode. + * BIOCGDLT Get link layer type. + * BIOCGETIF Get interface name. + * BIOCSETIF Set interface. + * BIOCSRTIMEOUT Set read timeout. + * BIOCGRTIMEOUT Get read timeout. + * BIOCGSTATS Get packet stats. + * BIOCIMMEDIATE Set immediate mode. + * BIOCVERSION Get filter language version. + */ +/* ARGSUSED */ + int +bpfioctl(dev, cmd, addr, flags, p) + dev_t dev; + u_long cmd; + caddr_t addr; + int flags; + struct proc *p; +{ + register struct bpf_d *d; + int s, error = 0; + + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + d = &bpf_dtab[minor(dev)]; + + switch (cmd) { + + default: + error = EINVAL; + break; + + /* + * Check for read packet available. + */ + case FIONREAD: + { + int n; + + s = splimp(); + n = d->bd_slen; + if (d->bd_hbuf) + n += d->bd_hlen; + splx(s); + + *(int *)addr = n; + break; + } + + case SIOCGIFADDR: + { + struct ifnet *ifp; + + if (d->bd_bif == 0) + error = EINVAL; + else { + ifp = d->bd_bif->bif_ifp; + error = (*ifp->if_ioctl)(ifp, cmd, addr); + } + break; + } + + /* + * Get buffer len [for read()]. + */ + case BIOCGBLEN: + *(u_int *)addr = d->bd_bufsize; + break; + + /* + * Set buffer length. + */ + case BIOCSBLEN: +#if BSD < 199103 + error = EINVAL; +#else + if (d->bd_bif != 0) + error = EINVAL; + else { + register u_int size = *(u_int *)addr; + + if (size > BPF_MAXBUFSIZE) + *(u_int *)addr = size = BPF_MAXBUFSIZE; + else if (size < BPF_MINBUFSIZE) + *(u_int *)addr = size = BPF_MINBUFSIZE; + d->bd_bufsize = size; + } +#endif + break; + + /* + * Set link layer read filter. + */ + case BIOCSETF: + error = bpf_setf(d, (struct bpf_program *)addr); + break; + + /* + * Flush read packet buffer. + */ + case BIOCFLUSH: + s = splimp(); + reset_d(d); + splx(s); + break; + + /* + * Put interface into promiscuous mode. + */ + case BIOCPROMISC: + if (d->bd_bif == 0) { + /* + * No interface attached yet. + */ + error = EINVAL; + break; + } + s = splimp(); + if (d->bd_promisc == 0) { + error = ifpromisc(d->bd_bif->bif_ifp, 1); + if (error == 0) + d->bd_promisc = 1; + } + splx(s); + break; + + /* + * Get device parameters. + */ + case BIOCGDLT: + if (d->bd_bif == 0) + error = EINVAL; + else + *(u_int *)addr = d->bd_bif->bif_dlt; + break; + + /* + * Set interface name. + */ + case BIOCGETIF: + if (d->bd_bif == 0) + error = EINVAL; + else + bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); + break; + + /* + * Set interface. + */ + case BIOCSETIF: + error = bpf_setif(d, (struct ifreq *)addr); + break; + + /* + * Set read timeout. + */ + case BIOCSRTIMEOUT: + { + struct timeval *tv = (struct timeval *)addr; + + /* + * Subtract 1 tick from tvtohz() since this isn't + * a one-shot timer. + */ + if ((error = itimerfix(tv)) == 0) + d->bd_rtout = tvtohz(tv) - 1; + break; + } + + /* + * Get read timeout. + */ + case BIOCGRTIMEOUT: + { + struct timeval *tv = (struct timeval *)addr; + + tv->tv_sec = d->bd_rtout / hz; + tv->tv_usec = (d->bd_rtout % hz) * tick; + break; + } + + /* + * Get packet stats. + */ + case BIOCGSTATS: + { + struct bpf_stat *bs = (struct bpf_stat *)addr; + + bs->bs_recv = d->bd_rcount; + bs->bs_drop = d->bd_dcount; + break; + } + + /* + * Set immediate mode. + */ + case BIOCIMMEDIATE: + d->bd_immediate = *(u_int *)addr; + break; + + case BIOCVERSION: + { + struct bpf_version *bv = (struct bpf_version *)addr; + + bv->bv_major = BPF_MAJOR_VERSION; + bv->bv_minor = BPF_MINOR_VERSION; + break; + } + + case FIONBIO: /* Non-blocking I/O */ + break; + + case FIOASYNC: /* Send signal on receive packets */ + d->bd_async = *(int *)addr; + break; +#if ISFB31 + case FIOSETOWN: + error = fsetown(*(int *)addr, &d->bd_sigio); + break; + + case FIOGETOWN: + *(int *)addr = fgetown(d->bd_sigio); + break; + + /* This is deprecated, FIOSETOWN should be used instead. */ + case TIOCSPGRP: + error = fsetown(-(*(int *)addr), &d->bd_sigio); + break; + + /* This is deprecated, FIOGETOWN should be used instead. */ + case TIOCGPGRP: + *(int *)addr = -fgetown(d->bd_sigio); + break; +#endif + case BIOCSRSIG: /* Set receive signal */ + { + u_int sig; + + sig = *(u_int *)addr; + + if (sig >= NSIG) + error = EINVAL; + else + d->bd_sig = sig; + break; + } + case BIOCGRSIG: + *(u_int *)addr = d->bd_sig; + break; + } + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); +} + +/* + * Set d's packet filter program to fp. If this file already has a filter, + * free it and replace it. Returns EINVAL for bogus requests. + */ +static int +bpf_setf(d, fp) + struct bpf_d *d; + struct bpf_program *fp; +{ + struct bpf_insn *fcode, *old; + u_int flen, size; + int s; + + old = d->bd_filter; + if (fp->bf_insns == 0) { + if (fp->bf_len != 0) + return (EINVAL); + s = splimp(); + d->bd_filter = 0; + reset_d(d); + splx(s); + if (old != 0) + FREE((caddr_t)old, M_DEVBUF); + return (0); + } + flen = fp->bf_len; + if (flen > BPF_MAXINSNS) + return (EINVAL); + + size = flen * sizeof(*fp->bf_insns); + fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); + if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && + bpf_validate(fcode, (int)flen)) { + s = splimp(); + d->bd_filter = fcode; + reset_d(d); + splx(s); + if (old != 0) + FREE((caddr_t)old, M_DEVBUF); + + return (0); + } + FREE((caddr_t)fcode, M_DEVBUF); + return (EINVAL); +} + +/* + * Detach a file from its current interface (if attached at all) and attach + * to the interface indicated by the name stored in ifr. + * Return an errno or 0. + */ +static int +bpf_setif(d, ifr) + struct bpf_d *d; + struct ifreq *ifr; +{ + struct bpf_if *bp; + int s, error; + struct ifnet *theywant; + + theywant = ifunit(ifr->ifr_name); + if (theywant == 0) + return ENXIO; + + /* + * Look through attached interfaces for the named one. + */ + for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { + struct ifnet *ifp = bp->bif_ifp; + + if (ifp == 0 || ifp != theywant) + continue; + /* + * We found the requested interface. + * If it's not up, return an error. + * Allocate the packet buffers if we need to. + * If we're already attached to requested interface, + * just flush the buffer. + */ + if ((ifp->if_flags & IFF_UP) == 0) + return (ENETDOWN); + + if (d->bd_sbuf == 0) { + error = bpf_allocbufs(d); + if (error != 0) + return (error); + } + s = splimp(); + if (bp != d->bd_bif) { + if (d->bd_bif) + /* + * Detach if attached to something else. + */ + bpf_detachd(d); + + bpf_attachd(d, bp); + } + reset_d(d); + splx(s); + return (0); + } + /* Not found. */ + return (ENXIO); +} + +/* + * Convert an interface name plus unit number of an ifp to a single + * name which is returned in the ifr. + */ +static void +bpf_ifname(ifp, ifr) + struct ifnet *ifp; + struct ifreq *ifr; +{ + char *s = ifp->if_name; + char *d = ifr->ifr_name; + + while (*d++ = *s++) + continue; + d--; /* back to the null */ + /* XXX Assume that unit number is less than 10. */ + *d++ = ifp->if_unit + '0'; + *d = '\0'; +} + + + +/* + * Support for select() and poll() system calls + * + * Return true iff the specific operation will not block indefinitely. + * Otherwise, return false but make a note that a selwakeup() must be done. + */ +int +bpfpoll(dev, events, p) + register dev_t dev; + int events; + struct proc *p; +{ + register struct bpf_d *d; + register int s; + int revents = 0; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + /* + * An imitation of the FIONREAD ioctl code. + */ + d = &bpf_dtab[minor(dev)]; + + s = splimp(); + if (events & (POLLIN | POLLRDNORM)) + if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) + revents |= events & (POLLIN | POLLRDNORM); + else + selrecord(p, &d->bd_sel); + + splx(s); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (revents); +} + +/* + * Incoming linkage from device drivers. Process the packet pkt, of length + * pktlen, which is stored in a contiguous buffer. The packet is parsed + * by each process' filter, and if accepted, stashed into the corresponding + * buffer. + */ +void +bpf_tap(ifp, pkt, pktlen) + struct ifnet *ifp; + register u_char *pkt; + register u_int pktlen; +{ + struct bpf_if *bp; + register struct bpf_d *d; + register u_int slen; + /* + * Note that the ipl does not have to be raised at this point. + * The only problem that could arise here is that if two different + * interfaces shared any data. This is not the case. + */ + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if ((bp = ifp->if_bpf)) { + for (d = bp->bif_dlist; d != 0; d = d->bd_next) { + ++d->bd_rcount; + slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); + if (slen != 0) + catchpacket(d, pkt, pktlen, slen, bcopy); + } + } + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); +} + +/* + * Copy data from an mbuf chain into a buffer. This code is derived + * from m_copydata in sys/uipc_mbuf.c. + */ +static void +bpf_mcopy(src_arg, dst_arg, len) + const void *src_arg; + void *dst_arg; + register size_t len; +{ + register const struct mbuf *m; + register u_int count; + u_char *dst; + + m = src_arg; + dst = dst_arg; + while (len > 0) { + if (m == 0) + panic("bpf_mcopy"); + count = min(m->m_len, len); + bcopy(mtod(m, void *), dst, count); + m = m->m_next; + dst += count; + len -= count; + } +} + +/* + * Incoming linkage from device drivers, when packet is in an mbuf chain. + */ +void +bpf_mtap(ifp, m) + struct ifnet *ifp; + struct mbuf *m; +{ + struct bpf_if *bp = ifp->if_bpf; + struct bpf_d *d; + u_int pktlen, slen; + struct mbuf *m0; + + pktlen = 0; + for (m0 = m; m0 != 0; m0 = m0->m_next) + pktlen += m0->m_len; + + for (d = bp->bif_dlist; d != 0; d = d->bd_next) { + ++d->bd_rcount; + slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); + if (slen != 0) + catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); + } +} + +/* + * Move the packet data from interface memory (pkt) into the + * store buffer. Return 1 if it's time to wakeup a listener (buffer full), + * otherwise 0. "copy" is the routine called to do the actual data + * transfer. bcopy is passed in to copy contiguous chunks, while + * bpf_mcopy is passed in to copy mbuf chains. In the latter case, + * pkt is really an mbuf. + */ +static void +catchpacket(d, pkt, pktlen, snaplen, cpfn) + register struct bpf_d *d; + register u_char *pkt; + register u_int pktlen, snaplen; + register void (*cpfn) __P((const void *, void *, size_t)); +{ + register struct bpf_hdr *hp; + register int totlen, curlen; + register int hdrlen = d->bd_bif->bif_hdrlen; + /* + * Figure out how many bytes to move. If the packet is + * greater or equal to the snapshot length, transfer that + * much. Otherwise, transfer the whole packet (unless + * we hit the buffer size limit). + */ + totlen = hdrlen + min(snaplen, pktlen); + if (totlen > d->bd_bufsize) + totlen = d->bd_bufsize; + + /* + * Round up the end of the previous packet to the next longword. + */ + curlen = BPF_WORDALIGN(d->bd_slen); + if (curlen + totlen > d->bd_bufsize) { + /* + * This packet will overflow the storage buffer. + * Rotate the buffers if we can, then wakeup any + * pending reads. + */ + if (d->bd_fbuf == 0) { + /* + * We haven't completed the previous read yet, + * so drop the packet. + */ + ++d->bd_dcount; + return; + } + ROTATE_BUFFERS(d); + bpf_wakeup(d); + curlen = 0; + } + else if (d->bd_immediate) + /* + * Immediate mode is set. A packet arrived so any + * reads should be woken up. + */ + bpf_wakeup(d); + + /* + * Append the bpf header. + */ + hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); +#if BSD >= 199103 + microtime(&hp->bh_tstamp); +#elif defined(sun) + uniqtime(&hp->bh_tstamp); +#else + hp->bh_tstamp = time; +#endif + hp->bh_datalen = pktlen; + hp->bh_hdrlen = hdrlen; + /* + * Copy the packet data into the store buffer and update its length. + */ + (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); + d->bd_slen = curlen + totlen; +} + +/* + * Initialize all nonzero fields of a descriptor. + */ +static int +bpf_allocbufs(d) + register struct bpf_d *d; +{ + d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); + if (d->bd_fbuf == 0) + return (ENOBUFS); + + d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); + if (d->bd_sbuf == 0) { + FREE(d->bd_fbuf, M_DEVBUF); + return (ENOBUFS); + } + d->bd_slen = 0; + d->bd_hlen = 0; + return (0); +} + +/* + * Free buffers currently in use by a descriptor. + * Called on close. + */ +static void +bpf_freed(d) + register struct bpf_d *d; +{ + /* + * We don't need to lock out interrupts since this descriptor has + * been detached from its interface and it yet hasn't been marked + * free. + */ + if (d->bd_sbuf != 0) { + FREE(d->bd_sbuf, M_DEVBUF); + if (d->bd_hbuf != 0) + FREE(d->bd_hbuf, M_DEVBUF); + if (d->bd_fbuf != 0) + FREE(d->bd_fbuf, M_DEVBUF); + } + if (d->bd_filter) + FREE((caddr_t)d->bd_filter, M_DEVBUF); + + D_MARKFREE(d); +} + +/* + * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) + * in the driver's softc; dlt is the link layer type; hdrlen is the fixed + * size of the link header (variable length headers not yet supported). + */ +void +bpfattach(ifp, dlt, hdrlen) + struct ifnet *ifp; + u_int dlt, hdrlen; +{ + struct bpf_if *bp; + int i; + bp = (struct bpf_if *) _MALLOC(sizeof(*bp), M_DEVBUF, M_DONTWAIT); + if (bp == 0) + panic("bpfattach"); + + bp->bif_dlist = 0; + bp->bif_ifp = ifp; + bp->bif_dlt = dlt; + + bp->bif_next = bpf_iflist; + bpf_iflist = bp; + + bp->bif_ifp->if_bpf = 0; + + /* + * Compute the length of the bpf header. This is not necessarily + * equal to SIZEOF_BPF_HDR because we want to insert spacing such + * that the network layer header begins on a longword boundary (for + * performance reasons and to alleviate alignment restrictions). + */ + bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; + + /* + * Mark all the descriptors free if this hasn't been done. + */ + if (!bpf_dtab_init) { + for (i = 0; i < nbpfilter; ++i) + D_MARKFREE(&bpf_dtab[i]); + bpf_dtab_init = 1; + } +#if 0 + if (bootverbose) + printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); +#endif +} + +static void *bpf_devfs_token[NBPFILTER]; + +static int bpf_devsw_installed; + +void bpf_init __P((void *unused)); +void +bpf_init(unused) + void *unused; +{ + int i; + int maj; + + if (!bpf_devsw_installed ) { + bpf_devsw_installed = 1; + maj = cdevsw_add(BPF_MAJOR, &bpf_cdevsw); + if (maj == -1) { + printf("bpf_init: failed to allocate a major number!\n"); + nbpfilter = 0; + return; + } + for (i = 0 ; i < nbpfilter; i++) { + bpf_devfs_token[i] = devfs_make_node(makedev(maj, i), + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, + "bpf%x", i); + } + } +} + +/* +SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) +*/ + +#endif diff --git a/bsd/net/bpf.h b/bsd/net/bpf.h new file mode 100644 index 000000000..a930619f0 --- /dev/null +++ b/bsd/net/bpf.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from the Stanford/CMU enet packet filter, + * (net/enet.c) distributed as part of 4.3BSD, and code contributed + * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence + * Berkeley Laboratory. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf.h 8.1 (Berkeley) 6/10/93 + * @(#)bpf.h 1.34 (LBL) 6/16/96 + * + */ + +#ifndef _NET_BPF_H_ +#define _NET_BPF_H_ + +/* BSD style release date */ +#define BPF_RELEASE 199606 + +typedef int32_t bpf_int32; +typedef u_int32_t bpf_u_int32; + +/* + * Alignment macros. BPF_WORDALIGN rounds up to the next + * even multiple of BPF_ALIGNMENT. + */ +#define BPF_ALIGNMENT sizeof(long) +#define BPF_WORDALIGN(x) (((x)+(BPF_ALIGNMENT-1))&~(BPF_ALIGNMENT-1)) + +#define BPF_MAXINSNS 512 +#define BPF_MAXBUFSIZE 0x8000 +#define BPF_MINBUFSIZE 32 + + +/* + * Structure for BIOCSETF. + */ +struct bpf_program { + u_int bf_len; + struct bpf_insn *bf_insns; +}; + +/* + * Struct returned by BIOCGSTATS. + */ +struct bpf_stat { + u_int bs_recv; /* number of packets received */ + u_int bs_drop; /* number of packets dropped */ +}; + +/* + * Struct return by BIOCVERSION. This represents the version number of + * the filter language described by the instruction encodings below. + * bpf understands a program iff kernel_major == filter_major && + * kernel_minor >= filter_minor, that is, if the value returned by the + * running kernel has the same major number and a minor number equal + * equal to or less than the filter being downloaded. Otherwise, the + * results are undefined, meaning an error may be returned or packets + * may be accepted haphazardly. + * It has nothing to do with the source code version. + */ +struct bpf_version { + u_short bv_major; + u_short bv_minor; +}; +/* Current version number of filter architecture. */ +#define BPF_MAJOR_VERSION 1 +#define BPF_MINOR_VERSION 1 + +#define BIOCGBLEN _IOR('B',102, u_int) +#define BIOCSBLEN _IOWR('B',102, u_int) +#define BIOCSETF _IOW('B',103, struct bpf_program) +#define BIOCFLUSH _IO('B',104) +#define BIOCPROMISC _IO('B',105) +#define BIOCGDLT _IOR('B',106, u_int) +#define BIOCGETIF _IOR('B',107, struct ifreq) +#define BIOCSETIF _IOW('B',108, struct ifreq) +#define BIOCSRTIMEOUT _IOW('B',109, struct timeval) +#define BIOCGRTIMEOUT _IOR('B',110, struct timeval) +#define BIOCGSTATS _IOR('B',111, struct bpf_stat) +#define BIOCIMMEDIATE _IOW('B',112, u_int) +#define BIOCVERSION _IOR('B',113, struct bpf_version) +#define BIOCGRSIG _IOR('B',114, u_int) +#define BIOCSRSIG _IOW('B',115, u_int) + +/* + * Structure prepended to each packet. + */ +struct bpf_hdr { + struct timeval bh_tstamp; /* time stamp */ + bpf_u_int32 bh_caplen; /* length of captured portion */ + bpf_u_int32 bh_datalen; /* original length of packet */ + u_short bh_hdrlen; /* length of bpf header (this struct + plus alignment padding) */ +}; +/* + * Because the structure above is not a multiple of 4 bytes, some compilers + * will insist on inserting padding; hence, sizeof(struct bpf_hdr) won't work. + * Only the kernel needs to know about it; applications use bh_hdrlen. + */ +#ifdef KERNEL +#define SIZEOF_BPF_HDR (sizeof(struct bpf_hdr) <= 20 ? 18 : \ + sizeof(struct bpf_hdr)) +#endif + +/* + * Data-link level type codes. + */ +#define DLT_NULL 0 /* no link-layer encapsulation */ +#define DLT_EN10MB 1 /* Ethernet (10Mb) */ +#define DLT_EN3MB 2 /* Experimental Ethernet (3Mb) */ +#define DLT_AX25 3 /* Amateur Radio AX.25 */ +#define DLT_PRONET 4 /* Proteon ProNET Token Ring */ +#define DLT_CHAOS 5 /* Chaos */ +#define DLT_IEEE802 6 /* IEEE 802 Networks */ +#define DLT_ARCNET 7 /* ARCNET */ +#define DLT_SLIP 8 /* Serial Line IP */ +#define DLT_PPP 9 /* Point-to-point Protocol */ +#define DLT_FDDI 10 /* FDDI */ +#define DLT_ATM_RFC1483 11 /* LLC/SNAP encapsulated atm */ +#define DLT_RAW 12 /* raw IP */ +#define DLT_SLIP_BSDOS 13 /* BSD/OS Serial Line IP */ +#define DLT_PPP_BSDOS 14 /* BSD/OS Point-to-point Protocol */ + +/* + * The instruction encodings. + */ +/* instruction classes */ +#define BPF_CLASS(code) ((code) & 0x07) +#define BPF_LD 0x00 +#define BPF_LDX 0x01 +#define BPF_ST 0x02 +#define BPF_STX 0x03 +#define BPF_ALU 0x04 +#define BPF_JMP 0x05 +#define BPF_RET 0x06 +#define BPF_MISC 0x07 + +/* ld/ldx fields */ +#define BPF_SIZE(code) ((code) & 0x18) +#define BPF_W 0x00 +#define BPF_H 0x08 +#define BPF_B 0x10 +#define BPF_MODE(code) ((code) & 0xe0) +#define BPF_IMM 0x00 +#define BPF_ABS 0x20 +#define BPF_IND 0x40 +#define BPF_MEM 0x60 +#define BPF_LEN 0x80 +#define BPF_MSH 0xa0 + +/* alu/jmp fields */ +#define BPF_OP(code) ((code) & 0xf0) +#define BPF_ADD 0x00 +#define BPF_SUB 0x10 +#define BPF_MUL 0x20 +#define BPF_DIV 0x30 +#define BPF_OR 0x40 +#define BPF_AND 0x50 +#define BPF_LSH 0x60 +#define BPF_RSH 0x70 +#define BPF_NEG 0x80 +#define BPF_JA 0x00 +#define BPF_JEQ 0x10 +#define BPF_JGT 0x20 +#define BPF_JGE 0x30 +#define BPF_JSET 0x40 +#define BPF_SRC(code) ((code) & 0x08) +#define BPF_K 0x00 +#define BPF_X 0x08 + +/* ret - BPF_K and BPF_X also apply */ +#define BPF_RVAL(code) ((code) & 0x18) +#define BPF_A 0x10 + +/* misc */ +#define BPF_MISCOP(code) ((code) & 0xf8) +#define BPF_TAX 0x00 +#define BPF_TXA 0x80 + +/* + * The instruction data structure. + */ +struct bpf_insn { + u_short code; + u_char jt; + u_char jf; + bpf_u_int32 k; +}; + +/* + * Macros for insn array initializers. + */ +#define BPF_STMT(code, k) { (u_short)(code), 0, 0, k } +#define BPF_JUMP(code, k, jt, jf) { (u_short)(code), jt, jf, k } + +#ifdef KERNEL +int bpf_validate __P((struct bpf_insn *, int)); +void bpf_tap __P((struct ifnet *, u_char *, u_int)); +void bpf_mtap __P((struct ifnet *, struct mbuf *)); +void bpfattach __P(( struct ifnet *, u_int, u_int)); +void bpfilterattach __P((int)); +u_int bpf_filter __P((struct bpf_insn *, u_char *, u_int, u_int)); +#endif + +/* + * Number of scratch memory words (for BPF_LD|BPF_MEM and BPF_ST). + */ +#define BPF_MEMWORDS 16 + +#define BPF_TAP(x, y, z) bpf_tap(x,y,z) +#define BPF_MTAP(x, y) bpf_mtap(x, y) +#endif diff --git a/bsd/net/bpf_compat.h b/bsd/net/bpf_compat.h new file mode 100644 index 000000000..550bdd836 --- /dev/null +++ b/bsd/net/bpf_compat.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf_compat.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NET_BPF_COMPAT_H_ +#define _NET_BPF_COMPAT_H_ + +/* + * Some hacks for compatibility across SunOS and 4.4BSD. We emulate malloc + * and free with mbuf clusters. We store a pointer to the mbuf in the first + * word of the mbuf and return 8 bytes passed the start of data (for double + * word alignment). We cannot just use offsets because clusters are not at + * a fixed offset from the associated mbuf. Sorry for this kludge. + */ +#define malloc(size, type, canwait) _MALLOC(size, type, canwait) +#define free(cp, type) m_free(*(struct mbuf **)(cp - 8)) + +#endif diff --git a/bsd/net/bpf_filter.c b/bsd/net/bpf_filter.c new file mode 100644 index 000000000..72976d9c8 --- /dev/null +++ b/bsd/net/bpf_filter.c @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from the Stanford/CMU enet packet filter, + * (net/enet.c) distributed as part of 4.3BSD, and code contributed + * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence + * Berkeley Laboratory. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93 + * + */ + +#include + +#ifdef sun +#include +#endif + +#if defined(sparc) || defined(mips) || defined(ibm032) || defined(__alpha__) +#define BPF_ALIGN +#endif + +#ifndef BPF_ALIGN +#define EXTRACT_SHORT(p) ((u_int16_t)ntohs(*(u_int16_t *)p)) +#define EXTRACT_LONG(p) (ntohl(*(u_int32_t *)p)) +#else +#define EXTRACT_SHORT(p)\ + ((u_int16_t)\ + ((u_int16_t)*((u_char *)p+0)<<8|\ + (u_int16_t)*((u_char *)p+1)<<0)) +#define EXTRACT_LONG(p)\ + ((u_int32_t)*((u_char *)p+0)<<24|\ + (u_int32_t)*((u_char *)p+1)<<16|\ + (u_int32_t)*((u_char *)p+2)<<8|\ + (u_int32_t)*((u_char *)p+3)<<0) +#endif + +#ifdef KERNEL +#include +#endif +#include +#ifdef KERNEL +#define MINDEX(m, k) \ +{ \ + register int len = m->m_len; \ + \ + while (k >= len) { \ + k -= len; \ + m = m->m_next; \ + if (m == 0) \ + return 0; \ + len = m->m_len; \ + } \ +} + +static u_int16_t m_xhalf __P((struct mbuf *m, bpf_u_int32 k, int *err)); +static u_int32_t m_xword __P((struct mbuf *m, bpf_u_int32 k, int *err)); + +static u_int32_t +m_xword(m, k, err) + register struct mbuf *m; + register bpf_u_int32 k; + register int *err; +{ + register size_t len; + register u_char *cp, *np; + register struct mbuf *m0; + + len = m->m_len; + while (k >= len) { + k -= len; + m = m->m_next; + if (m == 0) + goto bad; + len = m->m_len; + } + cp = mtod(m, u_char *) + k; + if (len - k >= 4) { + *err = 0; + return EXTRACT_LONG(cp); + } + m0 = m->m_next; + if (m0 == 0 || m0->m_len + len - k < 4) + goto bad; + *err = 0; + np = mtod(m0, u_char *); + switch (len - k) { + + case 1: + return + ((u_int32_t)cp[0] << 24) | + ((u_int32_t)np[0] << 16) | + ((u_int32_t)np[1] << 8) | + (u_int32_t)np[2]; + + case 2: + return + ((u_int32_t)cp[0] << 24) | + ((u_int32_t)cp[1] << 16) | + ((u_int32_t)np[0] << 8) | + (u_int32_t)np[1]; + + default: + return + ((u_int32_t)cp[0] << 24) | + ((u_int32_t)cp[1] << 16) | + ((u_int32_t)cp[2] << 8) | + (u_int32_t)np[0]; + } + bad: + *err = 1; + return 0; +} + +static u_int16_t +m_xhalf(m, k, err) + register struct mbuf *m; + register bpf_u_int32 k; + register int *err; +{ + register size_t len; + register u_char *cp; + register struct mbuf *m0; + + len = m->m_len; + while (k >= len) { + k -= len; + m = m->m_next; + if (m == 0) + goto bad; + len = m->m_len; + } + cp = mtod(m, u_char *) + k; + if (len - k >= 2) { + *err = 0; + return EXTRACT_SHORT(cp); + } + m0 = m->m_next; + if (m0 == 0) + goto bad; + *err = 0; + return (cp[0] << 8) | mtod(m0, u_char *)[0]; + bad: + *err = 1; + return 0; +} +#endif + +/* + * Execute the filter program starting at pc on the packet p + * wirelen is the length of the original packet + * buflen is the amount of data present + */ +u_int +bpf_filter(pc, p, wirelen, buflen) + register struct bpf_insn *pc; + register u_char *p; + u_int wirelen; + register u_int buflen; +{ + register u_int32_t A = 0, X = 0; + register bpf_u_int32 k; + int32_t mem[BPF_MEMWORDS]; + + if (pc == 0) + /* + * No filter means accept all. + */ + return (u_int)-1; + + --pc; + while (1) { + ++pc; + switch (pc->code) { + + default: +#ifdef KERNEL + return 0; +#else + abort(); +#endif + case BPF_RET|BPF_K: + return (u_int)pc->k; + + case BPF_RET|BPF_A: + return (u_int)A; + + case BPF_LD|BPF_W|BPF_ABS: + k = pc->k; + if (k > buflen || sizeof(int32_t) > buflen - k) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xword((struct mbuf *)p, k, &merr); + if (merr != 0) + return 0; + continue; +#else + return 0; +#endif + } +#if BPF_ALIGN + if (((intptr_t)(p + k) & 3) != 0) + A = EXTRACT_LONG(&p[k]); + else +#endif + A = ntohl(*(int32_t *)(p + k)); + continue; + + case BPF_LD|BPF_H|BPF_ABS: + k = pc->k; + if (k > buflen || sizeof(int16_t) > buflen - k) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xhalf((struct mbuf *)p, k, &merr); + continue; +#else + return 0; +#endif + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_ABS: + k = pc->k; + if (k >= buflen) { +#ifdef KERNEL + register struct mbuf *m; + + if (buflen != 0) + return 0; + m = (struct mbuf *)p; + MINDEX(m, k); + A = mtod(m, u_char *)[k]; + continue; +#else + return 0; +#endif + } + A = p[k]; + continue; + + case BPF_LD|BPF_W|BPF_LEN: + A = wirelen; + continue; + + case BPF_LDX|BPF_W|BPF_LEN: + X = wirelen; + continue; + + case BPF_LD|BPF_W|BPF_IND: + k = X + pc->k; + if (pc->k > buflen || X > buflen - pc->k || sizeof(int32_t) > buflen - k) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xword((struct mbuf *)p, k, &merr); + if (merr != 0) + return 0; + continue; +#else + return 0; +#endif + } +#if BPF_ALIGN + if (((intptr_t)(p + k) & 3) != 0) + A = EXTRACT_LONG(&p[k]); + else +#endif + A = ntohl(*(int32_t *)(p + k)); + continue; + + case BPF_LD|BPF_H|BPF_IND: + k = X + pc->k; + if (X > buflen || pc->k > buflen - X || sizeof(int16_t) > buflen - pc->k) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xhalf((struct mbuf *)p, k, &merr); + if (merr != 0) + return 0; + continue; +#else + return 0; +#endif + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_IND: + k = X + pc->k; + if (pc->k >= buflen || X >= buflen - pc->k) { +#ifdef KERNEL + register struct mbuf *m; + + if (buflen != 0) + return 0; + m = (struct mbuf *)p; + MINDEX(m, k); + A = mtod(m, char *)[k]; + continue; +#else + return 0; +#endif + } + A = p[k]; + continue; + + case BPF_LDX|BPF_MSH|BPF_B: + k = pc->k; + if (k >= buflen) { +#ifdef KERNEL + register struct mbuf *m; + + if (buflen != 0) + return 0; + m = (struct mbuf *)p; + MINDEX(m, k); + X = (mtod(m, char *)[k] & 0xf) << 2; + continue; +#else + return 0; +#endif + } + X = (p[pc->k] & 0xf) << 2; + continue; + + case BPF_LD|BPF_IMM: + A = pc->k; + continue; + + case BPF_LDX|BPF_IMM: + X = pc->k; + continue; + + case BPF_LD|BPF_MEM: + A = mem[pc->k]; + continue; + + case BPF_LDX|BPF_MEM: + X = mem[pc->k]; + continue; + + case BPF_ST: + mem[pc->k] = A; + continue; + + case BPF_STX: + mem[pc->k] = X; + continue; + + case BPF_JMP|BPF_JA: + pc += pc->k; + continue; + + case BPF_JMP|BPF_JGT|BPF_K: + pc += (A > pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_K: + pc += (A >= pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_K: + pc += (A == pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_K: + pc += (A & pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGT|BPF_X: + pc += (A > X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_X: + pc += (A >= X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_X: + pc += (A == X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_X: + pc += (A & X) ? pc->jt : pc->jf; + continue; + + case BPF_ALU|BPF_ADD|BPF_X: + A += X; + continue; + + case BPF_ALU|BPF_SUB|BPF_X: + A -= X; + continue; + + case BPF_ALU|BPF_MUL|BPF_X: + A *= X; + continue; + + case BPF_ALU|BPF_DIV|BPF_X: + if (X == 0) + return 0; + A /= X; + continue; + + case BPF_ALU|BPF_AND|BPF_X: + A &= X; + continue; + + case BPF_ALU|BPF_OR|BPF_X: + A |= X; + continue; + + case BPF_ALU|BPF_LSH|BPF_X: + A <<= X; + continue; + + case BPF_ALU|BPF_RSH|BPF_X: + A >>= X; + continue; + + case BPF_ALU|BPF_ADD|BPF_K: + A += pc->k; + continue; + + case BPF_ALU|BPF_SUB|BPF_K: + A -= pc->k; + continue; + + case BPF_ALU|BPF_MUL|BPF_K: + A *= pc->k; + continue; + + case BPF_ALU|BPF_DIV|BPF_K: + A /= pc->k; + continue; + + case BPF_ALU|BPF_AND|BPF_K: + A &= pc->k; + continue; + + case BPF_ALU|BPF_OR|BPF_K: + A |= pc->k; + continue; + + case BPF_ALU|BPF_LSH|BPF_K: + A <<= pc->k; + continue; + + case BPF_ALU|BPF_RSH|BPF_K: + A >>= pc->k; + continue; + + case BPF_ALU|BPF_NEG: + A = -A; + continue; + + case BPF_MISC|BPF_TAX: + X = A; + continue; + + case BPF_MISC|BPF_TXA: + A = X; + continue; + } + } +} + +#ifdef KERNEL +/* + * Return true if the 'fcode' is a valid filter program. + * The constraints are that each jump be forward and to a valid + * code. The code must terminate with either an accept or reject. + * 'valid' is an array for use by the routine (it must be at least + * 'len' bytes long). + * + * The kernel needs to be able to verify an application's filter code. + * Otherwise, a bogus program could easily crash the system. + */ +int +bpf_validate(f, len) + struct bpf_insn *f; + int len; +{ + register int i; + register struct bpf_insn *p; + + for (i = 0; i < len; ++i) { + /* + * Check that that jumps are forward, and within + * the code block. + */ + p = &f[i]; + if (BPF_CLASS(p->code) == BPF_JMP) { + register int from = i + 1; + + if (BPF_OP(p->code) == BPF_JA) { + if (from >= len || p->k >= len - from) + return 0; + } + else if (from >= len || p->jt >= len - from || p->jf >= len - from) + return 0; + } + /* + * Check that memory operations use valid addresses. + */ + if ((BPF_CLASS(p->code) == BPF_ST || + (BPF_CLASS(p->code) == BPF_LD && + (p->code & 0xe0) == BPF_MEM)) && + p->k >= BPF_MEMWORDS) + return 0; + /* + * Check for constant division by 0. + */ + if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0) + return 0; + } + return BPF_CLASS(f[len - 1].code) == BPF_RET; +} +#endif diff --git a/bsd/net/bpfdesc.h b/bsd/net/bpfdesc.h new file mode 100644 index 000000000..194640f3c --- /dev/null +++ b/bsd/net/bpfdesc.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from the Stanford/CMU enet packet filter, + * (net/enet.c) distributed as part of 4.3BSD, and code contributed + * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence + * Berkeley Laboratory. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpfdesc.h 8.1 (Berkeley) 6/10/93 + * + */ + +#ifndef _NET_BPFDESC_H_ +#define _NET_BPFDESC_H_ + +#include + +/* + * Descriptor associated with each open bpf file. + */ +struct bpf_d { + struct bpf_d *bd_next; /* Linked list of descriptors */ + /* + * Buffer slots: two mbuf clusters buffer the incoming packets. + * The model has three slots. Sbuf is always occupied. + * sbuf (store) - Receive interrupt puts packets here. + * hbuf (hold) - When sbuf is full, put cluster here and + * wakeup read (replace sbuf with fbuf). + * fbuf (free) - When read is done, put cluster here. + * On receiving, if sbuf is full and fbuf is 0, packet is dropped. + */ + caddr_t bd_sbuf; /* store slot */ + caddr_t bd_hbuf; /* hold slot */ + caddr_t bd_fbuf; /* free slot */ + int bd_slen; /* current length of store buffer */ + int bd_hlen; /* current length of hold buffer */ + + int bd_bufsize; /* absolute length of buffers */ + + struct bpf_if * bd_bif; /* interface descriptor */ + u_long bd_rtout; /* Read timeout in 'ticks' */ + struct bpf_insn *bd_filter; /* filter code */ + u_long bd_rcount; /* number of packets received */ + u_long bd_dcount; /* number of packets dropped */ + + u_char bd_promisc; /* true if listening promiscuously */ + u_char bd_state; /* idle, waiting, or timed out */ + u_char bd_immediate; /* true to return on packet arrival */ + int bd_async; /* non-zero if packet reception should generate signal */ + int bd_sig; /* signal to send upon packet reception */ +#if ISFB31 + struct sigio * bd_sigio; /* information for async I/O */ +#else + pid_t bd_sigio; +#endif + +#if BSD < 199103 + u_char bd_selcoll; /* true if selects collide */ + int bd_timedout; + struct proc * bd_selproc; /* process that last selected us */ +#else + u_char bd_pad; /* explicit alignment */ + struct selinfo bd_sel; /* bsd select info */ +#endif +}; + +/* + * Descriptor associated with each attached hardware interface. + */ +struct bpf_if { + struct bpf_if *bif_next; /* list of all interfaces */ + struct bpf_d *bif_dlist; /* descriptor list */ + u_int bif_dlt; /* link layer type */ + u_int bif_hdrlen; /* length of header (with padding) */ + struct ifnet *bif_ifp; /* corresponding interface */ +}; + +#endif diff --git a/bsd/net/bridge.c b/bsd/net/bridge.c new file mode 100644 index 000000000..53cf96617 --- /dev/null +++ b/bsd/net/bridge.c @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Luigi Rizzo + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +/* + * This code implements bridging in FreeBSD. It only acts on ethernet + * type of interfaces (others are still usable for routing). + * A bridging table holds the source MAC address/dest. interface for each + * known node. The table is indexed using an hash of the source address. + * + * Input packets are tapped near the end of the input routine in each + * driver (near the call to bpf_mtap, or before the call to ether_input) + * and analysed calling bridge_in(). Depending on the result, the packet + * can be forwarded to one or more output interfaces using bdg_forward(), + * and/or sent to the upper layer (e.g. in case of multicast). + * + * Output packets are intercepted near the end of ether_output(), + * the correct destination is selected calling bdg_dst_lookup(), + * and then forwarding is done using bdg_forward(). + * Bridging is controlled by the sysctl variable net.link.ether.bridge + * + * The arp code is also modified to let a machine answer to requests + * irrespective of the port the request came from. + * + * In case of loops in the bridging topology, the bridge detects this + * event and temporarily mutes output bridging on one of the ports. + * Periodically, interfaces are unmuted by bdg_timeout(). (For the + * mute flag i am temporarily using IFF_LINK2 but this has to + * change.) Muting is only implemented as a safety measure, and also as + * a mechanism to support a user-space implementation of the spanning + * tree algorithm. In the final release, unmuting will only occur + * because of explicit action of the user-level daemon. + * + * To build a bridging kernel, use the following option + * option BRIDGE + * and then at runtime set the sysctl variable to enable bridging. + * + * Only one interface is supposed to have addresses set (but + * there are no problems in practice if you set addresses for more + * than one interface). + * Bridging will act before routing, but nothing prevents a machine + * from doing both (modulo bugs in the implementation...). + * + * THINGS TO REMEMBER + * - bridging requires some (small) modifications to the interface + * driver. Currently (980911) the "ed", "de", "tx", "lnc" drivers + * have been modified and tested. "fxp", "ep", "fe" have been + * modified but not tested. See the "ed" and "de" drivers as + * examples on how to operate. + * - bridging is incompatible with multicast routing on the same + * machine. There is not an easy fix to this. + * - loop detection is still not very robust. + * - the interface of bdg_forward() could be improved. + */ + +#include +#include +#include +#include +#include /* for net/if.h */ +#include +#include + +#include +#include + +#include /* for struct arpcom */ +#include +#include +#include +#include /* for struct arpcom */ + +#include "opt_ipfw.h" +#include "opt_ipdn.h" + +#if defined(IPFIREWALL) && defined(DUMMYNET) +#include +#include +#include +#endif + +#include + +/* + * For debugging, you can use the following macros. + * remember, rdtsc() only works on Pentium-class machines + + quad_t ticks; + DDB(ticks = rdtsc();) + ... interesting code ... + DDB(bdg_fw_ticks += (u_long)(rdtsc() - ticks) ; bdg_fw_count++ ;) + + * + */ + +#define DDB(x) x +#define DEB(x) + +/* + * System initialization + */ + +static void bdginit(void *); +static void flush_table(void); + +SYSINIT(interfaces, SI_SUB_PROTO_IF, SI_ORDER_FIRST, bdginit, NULL) + +static int bdg_ipfw = 0 ; +int do_bridge = 0; +bdg_hash_table *bdg_table = NULL ; + +/* + * we need additional info for the bridge. The bdg_ifp2sc[] array + * provides a pointer to this struct using the if_index. + * bdg_softc has a backpointer to the struct ifnet, the bridge + * flags, and a group (bridging occurs only between port of the + * same group). + */ +struct bdg_softc { + struct ifnet *ifp ; + /* ((struct arpcom *)ifp)->ac_enaddr is the eth. addr */ + int flags ; + int group ; +} ; + +static struct bdg_softc **ifp2sc = NULL ; + +#if 0 /* new code using ifp2sc */ +#define SAMEGROUP(ifp,src) (src == NULL || \ + ifp2sc[ifp->if_index]->group == ifp2sc[src->if_index]->group ) +#define MUTED(ifp) (ifp2sc[ifp->if_index]->flags & IFF_MUTE) +#define MUTE(ifp) ifp2sc[ifp->if_index]->flags |= IFF_MUTE +#define UNMUTE(ifp) ifp2sc[ifp->if_index]->flags &= ~IFF_MUTE +#else +#define SAMEGROUP(a,b) 1 +#define MUTED(ifp) (ifp->if_flags & IFF_MUTE) +#define MUTE(ifp) ifp->if_flags |= IFF_MUTE +#define UNMUTE(ifp) ifp->if_flags &= ~IFF_MUTE +#endif + +static int +sysctl_bdg SYSCTL_HANDLER_ARGS +{ + int error, oldval = do_bridge ; + + error = sysctl_handle_int(oidp, + oidp->oid_arg1, oidp->oid_arg2, req); + printf("called sysctl for bridge name %s arg2 %d val %d->%d\n", + oidp->oid_name, oidp->oid_arg2, + oldval, do_bridge); + if (bdg_table == NULL) + do_bridge = 0 ; + if (oldval != do_bridge) { + flush_table(); + } + return error ; +} + +SYSCTL_DECL(_net_link_ether); +SYSCTL_PROC(_net_link_ether, OID_AUTO, bridge, CTLTYPE_INT|CTLFLAG_RW, + &do_bridge, 0, &sysctl_bdg, "I", "Bridging"); + +SYSCTL_INT(_net_link_ether, OID_AUTO, bridge_ipfw, CTLFLAG_RW, &bdg_ipfw,0,""); +#if 1 /* diagnostic vars */ +int bdg_in_count = 0 , bdg_in_ticks = 0 , bdg_fw_count = 0, bdg_fw_ticks = 0 ; +SYSCTL_INT(_net_link_ether, OID_AUTO, bdginc, CTLFLAG_RW, &bdg_in_count,0,""); +SYSCTL_INT(_net_link_ether, OID_AUTO, bdgint, CTLFLAG_RW, &bdg_in_ticks,0,""); +SYSCTL_INT(_net_link_ether, OID_AUTO, bdgfwc, CTLFLAG_RW, &bdg_fw_count,0,""); +SYSCTL_INT(_net_link_ether, OID_AUTO, bdgfwt, CTLFLAG_RW, &bdg_fw_ticks,0,""); +#endif +static struct bdg_stats bdg_stats ; +SYSCTL_STRUCT(_net_link_ether, PF_BDG, bdgstats, + CTLFLAG_RD, &bdg_stats , bdg_stats, "bridge statistics"); + +static int bdg_loops ; + +/* + * completely flush the bridge table. + */ +static void +flush_table() +{ + int s,i; + + if (bdg_table == NULL) + return ; + s = splimp(); + for (i=0; i< HASH_SIZE; i++) + bdg_table[i].name= NULL; /* clear table */ + splx(s); +} + +/* wrapper for funnel */ +void +bdg_timeout_funneled(void * dummy) +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + bdg_timeout(dummy); + funnel_state = thread_funnel_set(network_flock, FALSE); +} + +/* + * called periodically to flush entries etc. + */ +static void +bdg_timeout(void *dummy) +{ + struct ifnet *ifp ; + int s ; + static int slowtimer = 0 ; + boolean_t funnel_state; + + + if (do_bridge) { + static int age_index = 0 ; /* index of table position to age */ + int l = age_index + HASH_SIZE/4 ; + /* + * age entries in the forwarding table. + */ + if (l > HASH_SIZE) + l = HASH_SIZE ; + for (; age_index < l ; age_index++) + if (bdg_table[age_index].used) + bdg_table[age_index].used = 0 ; + else if (bdg_table[age_index].name) { + /* printf("xx flushing stale entry %d\n", age_index); */ + bdg_table[age_index].name = NULL ; + } + if (age_index >= HASH_SIZE) + age_index = 0 ; + + if (--slowtimer <= 0 ) { + slowtimer = 5 ; + + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) { + if (ifp->if_type != IFT_ETHER) + continue ; + if ( 0 == ( ifp->if_flags & IFF_UP) ) { + s = splimp(); + if_up(ifp); + splx(s); + } + if ( 0 == ( ifp->if_flags & IFF_PROMISC) ) { + int ret ; + s = splimp(); + ret = ifpromisc(ifp, 1); + splx(s); + printf(">> now %s%d flags 0x%x promisc %d\n", + ifp->if_name, ifp->if_unit, + ifp->if_flags, ret); + } + if (MUTED(ifp)) { + printf(">> unmuting %s%d\n", ifp->if_name, ifp->if_unit); + UNMUTE(ifp) ; + } + } + bdg_loops = 0 ; + } + } + timeout(bdg_timeout_funneled, (void *)0, 2*hz ); + +} + +/* + * local MAC addresses are held in a small array. This makes comparisons + * much faster. + */ +unsigned char bdg_addresses[6*BDG_MAX_PORTS]; +int bdg_ports ; + +/* + * initialization of bridge code. + */ +static void +bdginit(dummy) + void *dummy; +{ + int i ; + struct ifnet *ifp; + struct arpcom *ac ; + u_char *eth_addr ; + /* + * initialization of bridge code + */ + if (bdg_table == NULL) + bdg_table = (struct hash_table *) + _MALLOC(HASH_SIZE * sizeof(struct hash_table), + M_IFADDR, M_WAITOK); + flush_table(); + + ifp2sc = _MALLOC(if_index * sizeof(struct bdg_softc *), M_IFADDR, M_WAITOK ); + bzero(ifp2sc, if_index * sizeof(struct bdg_softc *) ); + + bzero(&bdg_stats, sizeof(bdg_stats) ); + bdg_ports = 0 ; + eth_addr = bdg_addresses ; + + printf("BRIDGE 981214, have %d interfaces\n", if_index); + for (i = 0 , ifp = ifnet.tqh_first ; i < if_index ; + i++, ifp = ifp->if_link.tqe_next) + if (ifp->if_type == IFT_ETHER) { /* ethernet ? */ + ac = (struct arpcom *)ifp; + sprintf(bdg_stats.s[ifp->if_index].name, + "%s%d", ifp->if_name, ifp->if_unit); + printf("-- index %d %s type %d phy %d addrl %d addr %6D\n", + ifp->if_index, + bdg_stats.s[ifp->if_index].name, + (int)ifp->if_type, (int) ifp->if_physical, + (int)ifp->if_addrlen, + ac->ac_enaddr, "." ); + bcopy(ac->ac_enaddr, eth_addr, 6); + eth_addr += 6 ; + + ifp2sc[bdg_ports] = _MALLOC(sizeof(struct bdg_softc), + M_IFADDR, M_WAITOK ); + ifp2sc[bdg_ports]->ifp = ifp ; + ifp2sc[bdg_ports]->flags = 0 ; + ifp2sc[bdg_ports]->group = 0 ; + bdg_ports ++ ; + } + bdg_timeout(0); + do_bridge=0; +} + +/* + * bridge_in() is invoked to perform bridging decision on input packets. + * On Input: + * m packet to be bridged. The mbuf need not to hold the + * whole packet, only the first 14 bytes suffice. We + * assume them to be contiguous. No alignment assumptions + * because they are not a problem on i386 class machines. + * + * On Return: destination of packet, one of + * BDG_BCAST broadcast + * BDG_MCAST multicast + * BDG_LOCAL is only for a local address (do not forward) + * BDG_DROP drop the packet + * ifp ifp of the destination interface. + * + * Forwarding is not done directly to give a chance to some drivers + * to fetch more of the packet, or simply drop it completely. + */ + + +struct ifnet * +bridge_in(struct mbuf *m) +{ + int index; + struct ifnet *ifp = m->m_pkthdr.rcvif, *dst , *old ; + int dropit = MUTED(ifp) ; + struct ether_header *eh; + + eh = mtod(m, struct ether_header *); + + /* + * hash the source address + */ + index= HASH_FN(eh->ether_shost); + bdg_table[index].used = 1 ; + old = bdg_table[index].name ; + if ( old ) { /* the entry is valid. */ + if (!BDG_MATCH( eh->ether_shost, bdg_table[index].etheraddr) ) { + printf("collision at %d\n", index); + bdg_table[index].name = NULL ; + } else if (old != ifp) { + /* + * found a loop. Either a machine has moved, or there + * is a misconfiguration/reconfiguration of the network. + * First, do not forward this packet! + * Record the relocation anyways; then, if loops persist, + * suspect a reconfiguration and disable forwarding + * from the old interface. + */ + bdg_table[index].name = ifp ; /* relocate address */ + printf("-- loop (%d) %6D to %s%d from %s%d (%s)\n", + bdg_loops, eh->ether_shost, ".", + ifp->if_name, ifp->if_unit, + old->if_name, old->if_unit, + old->if_flags & IFF_MUTE ? "muted":"ignore"); + dropit = 1 ; + if ( !MUTED(old) ) { + if (++bdg_loops > 10) + MUTE(old) ; + } + } + } + + /* + * now write the source address into the table + */ + if (bdg_table[index].name == NULL) { + DEB(printf("new addr %6D at %d for %s%d\n", + eh->ether_shost, ".", index, ifp->if_name, ifp->if_unit);) + bcopy(eh->ether_shost, bdg_table[index].etheraddr, 6); + bdg_table[index].name = ifp ; + } + dst = bridge_dst_lookup(m); + /* Return values: + * BDG_BCAST, BDG_MCAST, BDG_LOCAL, BDG_UNKNOWN, BDG_DROP, ifp. + * For muted interfaces, the first 3 are changed in BDG_LOCAL, + * and others to BDG_DROP. Also, for incoming packets, ifp is changed + * to BDG_DROP in case ifp == src . These mods are not necessary + * for outgoing packets from ether_output(). + */ + BDG_STAT(ifp, BDG_IN); + switch ((int)dst) { + case (int)BDG_BCAST: + case (int)BDG_MCAST: + case (int)BDG_LOCAL: + case (int)BDG_UNKNOWN: + case (int)BDG_DROP: + BDG_STAT(ifp, dst); + break ; + default : + if (dst == ifp || dropit ) + BDG_STAT(ifp, BDG_DROP); + else + BDG_STAT(ifp, BDG_FORWARD); + break ; + } + + if ( dropit ) { + if (dst == BDG_BCAST || dst == BDG_MCAST || dst == BDG_LOCAL) + return BDG_LOCAL ; + else + return BDG_DROP ; + } else { + return (dst == ifp ? BDG_DROP : dst ) ; + } +} + +/* + * Forward to dst, excluding src port and (if not a single interface) + * muted interfaces. The packet is freed if marked as such + * and not for a local destination. + * A cleaner implementation would be to make bdg_forward() + * always consume the packet, leaving to the caller the task + * to make a copy if it needs it. As it is now, bdg_forward() + * can keep a copy alive in some cases. + */ +int +bdg_forward (struct mbuf **m0, struct ifnet *dst) +{ + struct ifnet *src = (*m0)->m_pkthdr.rcvif; /* could be NULL in output */ + struct ifnet *ifp ; + struct ip *ip; + int error=0, s ; + int once = 0; /* execute the loop only once */ + int canfree = 1 ; /* can free the buf at the end */ + struct mbuf *m ; + + struct ether_header *eh = mtod(*m0, struct ether_header *); /* XXX */ + + if (dst == BDG_DROP) { /* this should not happen */ + printf("xx bdg_forward for BDG_DROP)\n"); + m_freem(*m0) ; + *m0 = NULL ; + return 0; + } + if (dst == BDG_LOCAL) { /* this should not happen as well */ + printf("xx ouch, bdg_forward for local pkt\n"); + return 0; + } + if (dst == BDG_BCAST || dst == BDG_MCAST || dst == BDG_UNKNOWN) { + ifp = ifnet.tqh_first ; + once = 0 ; + if (dst != BDG_UNKNOWN) + canfree = 0 ; + } else { + ifp = dst ; + once = 1 ; /* and also canfree */ + } +#if IPFIREWALL + /* + * do filtering in a very similar way to what is done + * in ip_output. Only for IP packets, and only pass/fail/dummynet + * is supported. The tricky thing is to make sure that enough of + * the packet (basically, Eth+IP+TCP/UDP headers) is contiguous + * so that calls to m_pullup in ip_fw_chk will not kill the + * ethernet header. + */ + if (ip_fw_chk_ptr) { + u_int16_t dummy ; + struct ip_fw_chain *rule; + int off; + + m = *m0 ; + if (m->m_type == MT_DUMMYNET) { + /* + * the packet was already tagged, so part of the + * processing was already done, and we need to go down. + */ + rule = (struct ip_fw_chain *)(m->m_data) ; + (*m0) = m = m->m_next ; + + src = m->m_pkthdr.rcvif; /* could be NULL in output */ + eh = mtod(m, struct ether_header *); /* XXX */ + canfree = 1 ; /* for sure, a copy is not needed later. */ + goto forward; /* HACK! */ + } else + rule = NULL ; + if (bdg_ipfw == 0) + goto forward ; + if (src == NULL) + goto forward ; /* do not apply to packets from ether_output */ + if (canfree == 0 ) /* need to make a copy */ + m = m_copypacket(*m0, M_DONTWAIT); + if (m == NULL) { + /* fail... */ + return 0 ; + } + + dummy = 0 ; + /* + * before calling the firewall, swap fields the same as IP does. + * here we assume the pkt is an IP one and the header is contiguous + */ + eh = mtod(m, struct ether_header *); + ip = (struct ip *)(eh + 1 ) ; + NTOHS(ip->ip_len); + NTOHS(ip->ip_id); + NTOHS(ip->ip_off); + + /* + * The third parameter to the firewall code is the dst. interface. + * Since we apply checks only on input pkts we use NULL. + */ + off = (*ip_fw_chk_ptr)(NULL, 0, NULL, &dummy, &m, &rule, NULL) ; + if (m == NULL) { /* pkt discarded by firewall */ + if (canfree) + *m0 = NULL ; + return 0 ; + } + /* + * on return, the mbuf pointer might have changed. Restore + * *m0 (if it was the same as m), eh, ip and then + * restore original ordering. + */ + eh = mtod(m, struct ether_header *); + ip = (struct ip *)(eh + 1 ) ; + if (canfree) /* m was a reference to *m0, so update *m0 */ + *m0 = m ; + HTONS(ip->ip_len); + HTONS(ip->ip_id); + HTONS(ip->ip_off); + if (off == 0) { + if (canfree == 0) + m_freem(m); + goto forward ; + } +#if DUMMYNET + if (off & 0x10000) { + /* + * pass the pkt to dummynet. Need to include m, dst, rule. + * Dummynet consumes the packet in all cases. + */ + dummynet_io((off & 0xffff), DN_TO_BDG_FWD, m, dst, NULL, 0, rule); + if (canfree) /* dummynet has consumed the original one */ + *m0 = NULL ; + return 0 ; + } +#endif + /* if none of the above matches, we have to drop the pkt */ + if (m) + m_freem(m); + if (canfree && m != *m0) { + m_freem(*m0); + *m0 = NULL ; + } + return 0 ; + } +forward: +#endif /* COMPAT_IPFW */ + if (canfree && once) + m = *m0 ; + else + m = NULL ; + + for ( ; ifp ; ifp = ifp->if_link.tqe_next ) { + if (ifp != src && ifp->if_type == IFT_ETHER && + (ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING) && + SAMEGROUP(ifp, src) && !MUTED(ifp) ) { + if (m == NULL) { /* do i need to make a copy ? */ + if (canfree && ifp->if_link.tqe_next == NULL) /* last one! */ + m = *m0 ; + else /* on a P5-90, m_packetcopy takes 540 ticks */ + m = m_copypacket(*m0, M_DONTWAIT); + if (m == NULL) { + printf("bdg_forward: sorry, m_copy failed!\n"); + return ENOBUFS ; + } + } + /* + * execute last part of ether_output. + */ + s = splimp(); + /* + * Queue message on interface, and start output if interface + * not yet active. + */ + if (IF_QFULL(&ifp->if_snd)) { + IF_DROP(&ifp->if_snd); + MUTE(ifp); /* good measure... */ + splx(s); + error = ENOBUFS ; + } else { + ifp->if_obytes += m->m_pkthdr.len ; + if (m->m_flags & M_MCAST) + ifp->if_omcasts++; + IF_ENQUEUE(&ifp->if_snd, m); + if ((ifp->if_flags & IFF_OACTIVE) == 0) + (*ifp->if_start)(ifp); + splx(s); + if (m == *m0) + *m0 = NULL ; /* the packet is gone... */ + m = NULL ; + } + BDG_STAT(ifp, BDG_OUT); + } + if (once) + break ; + } + + /* cleanup any mbuf leftover. */ + if (m) + m_freem(m); + if (m == *m0) + *m0 = NULL ; + if (canfree && *m0) { + m_freem(*m0); + *m0 = NULL ; + } + return error ; +} diff --git a/bsd/net/bridge.h b/bsd/net/bridge.h new file mode 100644 index 000000000..7f348134e --- /dev/null +++ b/bsd/net/bridge.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Luigi Rizzo + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#ifndef _NET_BRIDGE_H_ +#define _NET_BRIDGE_H_ + +extern int do_bridge; +/* + * the hash table for bridge + */ +typedef struct hash_table { + struct ifnet *name ; + unsigned char etheraddr[6] ; + unsigned short used ; +} bdg_hash_table ; + +extern bdg_hash_table *bdg_table ; + +#define BDG_MAX_PORTS 128 +extern unsigned char bdg_addresses[6*BDG_MAX_PORTS]; +extern int bdg_ports ; + +/* + * out of the 6 bytes, the last ones are more "variable". Since + * we are on a little endian machine, we have to do some gimmick... + */ +#define HASH_SIZE 8192 /* must be a power of 2 */ +#define HASH_FN(addr) ( \ + ntohs( ((short *)addr)[1] ^ ((short *)addr)[2] ) & (HASH_SIZE -1)) + +#define IFF_MUTE IFF_LINK2 /* will need a separate flag... */ + +struct ifnet *bridge_in(struct mbuf *m); +/* bdg_forward frees the mbuf if necessary, returning null */ +int bdg_forward (struct mbuf **m, struct ifnet *dst); + +#ifdef __i386__ +#define BDG_MATCH(a,b) ( \ + ((unsigned short *)(a))[2] == ((unsigned short *)(b))[2] && \ + *((unsigned int *)(a)) == *((unsigned int *)(b)) ) +#define IS_ETHER_BROADCAST(a) ( \ + *((unsigned int *)(a)) == 0xffffffff && \ + ((unsigned short *)(a))[2] == 0xffff ) +#else +#warning... must complete these for the alpha etc. +#define BDG_MATCH(a,b) (!bcmp(a, b, ETHER_ADDR_LEN) ) +#endif +/* + * The following constants are not legal ifnet pointers, and are used + * as return values from the classifier, bridge_dst_lookup() + * The same values are used as index in the statistics arrays, + * with BDG_FORWARD replacing specifically forwarded packets. + */ +#define BDG_BCAST ( (struct ifnet *)1 ) +#define BDG_MCAST ( (struct ifnet *)2 ) +#define BDG_LOCAL ( (struct ifnet *)3 ) +#define BDG_DROP ( (struct ifnet *)4 ) +#define BDG_UNKNOWN ( (struct ifnet *)5 ) +#define BDG_IN ( (struct ifnet *)7 ) +#define BDG_OUT ( (struct ifnet *)8 ) +#define BDG_FORWARD ( (struct ifnet *)9 ) + +#define PF_BDG 3 /* XXX superhack */ +/* + * statistics, passed up with sysctl interface and ns -p bdg + */ + +#define STAT_MAX (int)BDG_FORWARD +struct bdg_port_stat { + char name[16]; + u_long collisions; + u_long p_in[STAT_MAX+1]; +} ; + +struct bdg_stats { + struct bdg_port_stat s[16]; +} ; + + +#define BDG_STAT(ifp, type) bdg_stats.s[ifp->if_index].p_in[(int)type]++ + +#if KERNEL +/* + * Find the right pkt destination: + * BDG_BCAST is a broadcast + * BDG_MCAST is a multicast + * BDG_LOCAL is for a local address + * BDG_DROP must be dropped + * other ifp of the dest. interface (incl.self) + */ +static __inline +struct ifnet * +bridge_dst_lookup(struct mbuf *m) +{ + struct ether_header *eh = mtod(m, struct ether_header *); + struct ifnet *dst ; + int index ; + u_char *eth_addr = bdg_addresses ; + + if (IS_ETHER_BROADCAST(eh->ether_dhost)) + return BDG_BCAST ; + if (eh->ether_dhost[0] & 1) + return BDG_MCAST ; + /* + * Lookup local addresses in case one matches. + */ + for (index = bdg_ports, eth_addr = bdg_addresses ; + index ; index--, eth_addr += 6 ) + if (BDG_MATCH(eth_addr, eh->ether_dhost) ) + return BDG_LOCAL ; + /* + * Look for a possible destination in table + */ + index= HASH_FN( eh->ether_dhost ); + dst = bdg_table[index].name; + if ( dst && BDG_MATCH( bdg_table[index].etheraddr, eh->ether_dhost) ) + return dst ; + else + return BDG_UNKNOWN ; +} + +#endif /* KERNEL */ + +#endif /* ! _NET_BRIDGE_H_ */ + diff --git a/bsd/net/bsd_comp.c b/bsd/net/bsd_comp.c new file mode 100644 index 000000000..d81e66f93 --- /dev/null +++ b/bsd/net/bsd_comp.c @@ -0,0 +1,1133 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Because this code is derived from the 4.3BSD compress source: + * + * + * Copyright (c) 1985, 1986 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * James A. Woods, derived from original work by Spencer Thomas + * and Joseph Orost. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * This version is for use with mbufs on BSD-derived systems. + * + */ + +#include +#include +#include +#include +#include + +#define PACKETPTR struct mbuf * +#include + +#if DO_BSD_COMPRESS +/* + * PPP "BSD compress" compression + * The differences between this compression and the classic BSD LZW + * source are obvious from the requirement that the classic code worked + * with files while this handles arbitrarily long streams that + * are broken into packets. They are: + * + * When the code size expands, a block of junk is not emitted by + * the compressor and not expected by the decompressor. + * + * New codes are not necessarily assigned every time an old + * code is output by the compressor. This is because a packet + * end forces a code to be emitted, but does not imply that a + * new sequence has been seen. + * + * The compression ratio is checked at the first end of a packet + * after the appropriate gap. Besides simplifying and speeding + * things up, this makes it more likely that the transmitter + * and receiver will agree when the dictionary is cleared when + * compression is not going well. + */ + +/* + * A dictionary for doing BSD compress. + */ +struct bsd_db { + int totlen; /* length of this structure */ + u_int hsize; /* size of the hash table */ + u_char hshift; /* used in hash function */ + u_char n_bits; /* current bits/code */ + u_char maxbits; + u_char debug; + u_char unit; + u_int16_t seqno; /* sequence # of next packet */ + u_int hdrlen; /* header length to preallocate */ + u_int mru; + u_int maxmaxcode; /* largest valid code */ + u_int max_ent; /* largest code in use */ + u_int in_count; /* uncompressed bytes, aged */ + u_int bytes_out; /* compressed bytes, aged */ + u_int ratio; /* recent compression ratio */ + u_int checkpoint; /* when to next check the ratio */ + u_int clear_count; /* times dictionary cleared */ + u_int incomp_count; /* incompressible packets */ + u_int incomp_bytes; /* incompressible bytes */ + u_int uncomp_count; /* uncompressed packets */ + u_int uncomp_bytes; /* uncompressed bytes */ + u_int comp_count; /* compressed packets */ + u_int comp_bytes; /* compressed bytes */ + u_int16_t *lens; /* array of lengths of codes */ + struct bsd_dict { + union { /* hash value */ + u_int32_t fcode; + struct { +#if BYTE_ORDER == LITTLE_ENDIAN + u_int16_t prefix; /* preceding code */ + u_char suffix; /* last character of new code */ + u_char pad; +#else + u_char pad; + u_char suffix; /* last character of new code */ + u_int16_t prefix; /* preceding code */ +#endif + } hs; + } f; + u_int16_t codem1; /* output of hash table -1 */ + u_int16_t cptr; /* map code to hash table entry */ + } dict[1]; +}; + +#define BSD_OVHD 2 /* BSD compress overhead/packet */ +#define BSD_INIT_BITS BSD_MIN_BITS + +static void bsd_clear __P((struct bsd_db *db)); +static int bsd_check __P((struct bsd_db *db)); +static void *bsd_alloc __P((u_char *options, int opt_len, int decomp)); +static int bsd_init_comp_db __P((struct bsd_db *db, u_char *options, int opt_len, + int unit, int hdrlen, int mru, int debug, + int decomp)); +static void *bsd_comp_alloc __P((u_char *options, int opt_len)); +static void *bsd_decomp_alloc __P((u_char *options, int opt_len)); +static void bsd_free __P((void *state)); +static int bsd_comp_init __P((void *state, u_char *options, int opt_len, + int unit, int hdrlen, int debug)); +static int bsd_decomp_init __P((void *state, u_char *options, int opt_len, + int unit, int hdrlen, int mru, int debug)); +static int bsd_compress __P((void *state, struct mbuf **mret, + struct mbuf *mp, int slen, int maxolen)); +static void bsd_incomp __P((void *state, struct mbuf *dmsg)); +static int bsd_decompress __P((void *state, struct mbuf *cmp, + struct mbuf **dmpp)); +static void bsd_reset __P((void *state)); +static void bsd_comp_stats __P((void *state, struct compstat *stats)); + +/* + * Procedures exported to if_ppp.c. + */ +struct compressor ppp_bsd_compress = { + CI_BSD_COMPRESS, /* compress_proto */ + bsd_comp_alloc, /* comp_alloc */ + bsd_free, /* comp_free */ + bsd_comp_init, /* comp_init */ + bsd_reset, /* comp_reset */ + bsd_compress, /* compress */ + bsd_comp_stats, /* comp_stat */ + bsd_decomp_alloc, /* decomp_alloc */ + bsd_free, /* decomp_free */ + bsd_decomp_init, /* decomp_init */ + bsd_reset, /* decomp_reset */ + bsd_decompress, /* decompress */ + bsd_incomp, /* incomp */ + bsd_comp_stats, /* decomp_stat */ +}; + +/* + * the next two codes should not be changed lightly, as they must not + * lie within the contiguous general code space. + */ +#define CLEAR 256 /* table clear output code */ +#define FIRST 257 /* first free entry */ +#define LAST 255 + +#define MAXCODE(b) ((1 << (b)) - 1) +#define BADCODEM1 MAXCODE(BSD_MAX_BITS) + +#define BSD_HASH(prefix,suffix,hshift) ((((u_int32_t)(suffix)) << (hshift)) \ + ^ (u_int32_t)(prefix)) +#define BSD_KEY(prefix,suffix) ((((u_int32_t)(suffix)) << 16) \ + + (u_int32_t)(prefix)) + +#define CHECK_GAP 10000 /* Ratio check interval */ + +#define RATIO_SCALE_LOG 8 +#define RATIO_SCALE (1<>RATIO_SCALE_LOG) + +/* + * clear the dictionary + */ +static void +bsd_clear(db) + struct bsd_db *db; +{ + db->clear_count++; + db->max_ent = FIRST-1; + db->n_bits = BSD_INIT_BITS; + db->ratio = 0; + db->bytes_out = 0; + db->in_count = 0; + db->checkpoint = CHECK_GAP; +} + +/* + * If the dictionary is full, then see if it is time to reset it. + * + * Compute the compression ratio using fixed-point arithmetic + * with 8 fractional bits. + * + * Since we have an infinite stream instead of a single file, + * watch only the local compression ratio. + * + * Since both peers must reset the dictionary at the same time even in + * the absence of CLEAR codes (while packets are incompressible), they + * must compute the same ratio. + */ +static int /* 1=output CLEAR */ +bsd_check(db) + struct bsd_db *db; +{ + u_int new_ratio; + + if (db->in_count >= db->checkpoint) { + /* age the ratio by limiting the size of the counts */ + if (db->in_count >= RATIO_MAX + || db->bytes_out >= RATIO_MAX) { + db->in_count -= db->in_count/4; + db->bytes_out -= db->bytes_out/4; + } + + db->checkpoint = db->in_count + CHECK_GAP; + + if (db->max_ent >= db->maxmaxcode) { + /* Reset the dictionary only if the ratio is worse, + * or if it looks as if it has been poisoned + * by incompressible data. + * + * This does not overflow, because + * db->in_count <= RATIO_MAX. + */ + new_ratio = db->in_count << RATIO_SCALE_LOG; + if (db->bytes_out != 0) + new_ratio /= db->bytes_out; + + if (new_ratio < db->ratio || new_ratio < 1 * RATIO_SCALE) { + bsd_clear(db); + return 1; + } + db->ratio = new_ratio; + } + } + return 0; +} + +/* + * Return statistics. + */ +static void +bsd_comp_stats(state, stats) + void *state; + struct compstat *stats; +{ + struct bsd_db *db = (struct bsd_db *) state; + u_int out; + + stats->unc_bytes = db->uncomp_bytes; + stats->unc_packets = db->uncomp_count; + stats->comp_bytes = db->comp_bytes; + stats->comp_packets = db->comp_count; + stats->inc_bytes = db->incomp_bytes; + stats->inc_packets = db->incomp_count; + stats->ratio = db->in_count; + out = db->bytes_out; + if (stats->ratio <= 0x7fffff) + stats->ratio <<= 8; + else + out >>= 8; + if (out != 0) + stats->ratio /= out; +} + +/* + * Reset state, as on a CCP ResetReq. + */ +static void +bsd_reset(state) + void *state; +{ + struct bsd_db *db = (struct bsd_db *) state; + + db->seqno = 0; + bsd_clear(db); + db->clear_count = 0; +} + +/* + * Allocate space for a (de) compressor. + */ +static void * +bsd_alloc(options, opt_len, decomp) + u_char *options; + int opt_len, decomp; +{ + int bits; + u_int newlen, hsize, hshift, maxmaxcode; + struct bsd_db *db; + + if (opt_len < CILEN_BSD_COMPRESS || options[0] != CI_BSD_COMPRESS + || options[1] != CILEN_BSD_COMPRESS + || BSD_VERSION(options[2]) != BSD_CURRENT_VERSION) + return NULL; + bits = BSD_NBITS(options[2]); + switch (bits) { + case 9: /* needs 82152 for both directions */ + case 10: /* needs 84144 */ + case 11: /* needs 88240 */ + case 12: /* needs 96432 */ + hsize = 5003; + hshift = 4; + break; + case 13: /* needs 176784 */ + hsize = 9001; + hshift = 5; + break; + case 14: /* needs 353744 */ + hsize = 18013; + hshift = 6; + break; + case 15: /* needs 691440 */ + hsize = 35023; + hshift = 7; + break; + case 16: /* needs 1366160--far too much, */ + /* hsize = 69001; */ /* and 69001 is too big for cptr */ + /* hshift = 8; */ /* in struct bsd_db */ + /* break; */ + default: + return NULL; + } + + maxmaxcode = MAXCODE(bits); + newlen = sizeof(*db) + (hsize-1) * (sizeof(db->dict[0])); + MALLOC(db, struct bsd_db *, newlen, M_DEVBUF, M_NOWAIT); + if (!db) + return NULL; + bzero(db, sizeof(*db) - sizeof(db->dict)); + + if (!decomp) { + db->lens = NULL; + } else { + MALLOC(db->lens, u_int16_t *, (maxmaxcode+1) * sizeof(db->lens[0]), + M_DEVBUF, M_NOWAIT); + if (!db->lens) { + FREE(db, M_DEVBUF); + return NULL; + } + } + + db->totlen = newlen; + db->hsize = hsize; + db->hshift = hshift; + db->maxmaxcode = maxmaxcode; + db->maxbits = bits; + + return (void *) db; +} + +static void +bsd_free(state) + void *state; +{ + struct bsd_db *db = (struct bsd_db *) state; + + if (db->lens) + FREE(db->lens, M_DEVBUF); + FREE(db, M_DEVBUF); +} + +static void * +bsd_comp_alloc(options, opt_len) + u_char *options; + int opt_len; +{ + return bsd_alloc(options, opt_len, 0); +} + +static void * +bsd_decomp_alloc(options, opt_len) + u_char *options; + int opt_len; +{ + return bsd_alloc(options, opt_len, 1); +} + +/* + * Initialize the database. + */ +static int +bsd_init_comp_db(db, options, opt_len, unit, hdrlen, mru, debug, decomp) + struct bsd_db *db; + u_char *options; + int opt_len, unit, hdrlen, mru, debug, decomp; +{ + int i; + + if (opt_len < CILEN_BSD_COMPRESS || options[0] != CI_BSD_COMPRESS + || options[1] != CILEN_BSD_COMPRESS + || BSD_VERSION(options[2]) != BSD_CURRENT_VERSION + || BSD_NBITS(options[2]) != db->maxbits + || (decomp && db->lens == NULL)) + return 0; + + if (decomp) { + i = LAST+1; + while (i != 0) + db->lens[--i] = 1; + } + i = db->hsize; + while (i != 0) { + db->dict[--i].codem1 = BADCODEM1; + db->dict[i].cptr = 0; + } + + db->unit = unit; + db->hdrlen = hdrlen; + db->mru = mru; +#ifndef DEBUG + if (debug) +#endif + db->debug = 1; + + bsd_reset(db); + + return 1; +} + +static int +bsd_comp_init(state, options, opt_len, unit, hdrlen, debug) + void *state; + u_char *options; + int opt_len, unit, hdrlen, debug; +{ + return bsd_init_comp_db((struct bsd_db *) state, options, opt_len, + unit, hdrlen, 0, debug, 0); +} + +static int +bsd_decomp_init(state, options, opt_len, unit, hdrlen, mru, debug) + void *state; + u_char *options; + int opt_len, unit, hdrlen, mru, debug; +{ + return bsd_init_comp_db((struct bsd_db *) state, options, opt_len, + unit, hdrlen, mru, debug, 1); +} + + +/* + * compress a packet + * One change from the BSD compress command is that when the + * code size expands, we do not output a bunch of padding. + */ +int /* new slen */ +bsd_compress(state, mret, mp, slen, maxolen) + void *state; + struct mbuf **mret; /* return compressed mbuf chain here */ + struct mbuf *mp; /* from here */ + int slen; /* uncompressed length */ + int maxolen; /* max compressed length */ +{ + struct bsd_db *db = (struct bsd_db *) state; + int hshift = db->hshift; + u_int max_ent = db->max_ent; + u_int n_bits = db->n_bits; + u_int bitno = 32; + u_int32_t accm = 0, fcode; + struct bsd_dict *dictp; + u_char c; + int hval, disp, ent, ilen; + u_char *rptr, *wptr; + u_char *cp_end; + int olen; + struct mbuf *m; + +#define PUTBYTE(v) { \ + ++olen; \ + if (wptr) { \ + *wptr++ = (v); \ + if (wptr >= cp_end) { \ + m->m_len = wptr - mtod(m, u_char *); \ + MGET(m->m_next, M_DONTWAIT, MT_DATA); \ + m = m->m_next; \ + if (m) { \ + m->m_len = 0; \ + if (maxolen - olen > MLEN) \ + MCLGET(m, M_DONTWAIT); \ + wptr = mtod(m, u_char *); \ + cp_end = wptr + M_TRAILINGSPACE(m); \ + } else \ + wptr = NULL; \ + } \ + } \ +} + +#define OUTPUT(ent) { \ + bitno -= n_bits; \ + accm |= ((ent) << bitno); \ + do { \ + PUTBYTE(accm >> 24); \ + accm <<= 8; \ + bitno += 8; \ + } while (bitno <= 24); \ +} + + /* + * If the protocol is not in the range we're interested in, + * just return without compressing the packet. If it is, + * the protocol becomes the first byte to compress. + */ + rptr = mtod(mp, u_char *); + ent = PPP_PROTOCOL(rptr); + if (ent < 0x21 || ent > 0xf9) { + *mret = NULL; + return slen; + } + + /* Don't generate compressed packets which are larger than + the uncompressed packet. */ + if (maxolen > slen) + maxolen = slen; + + /* Allocate one mbuf to start with. */ + MGET(m, M_DONTWAIT, MT_DATA); + *mret = m; + if (m != NULL) { + m->m_len = 0; + if (maxolen + db->hdrlen > MLEN) + MCLGET(m, M_DONTWAIT); + m->m_data += db->hdrlen; + wptr = mtod(m, u_char *); + cp_end = wptr + M_TRAILINGSPACE(m); + } else + wptr = cp_end = NULL; + + /* + * Copy the PPP header over, changing the protocol, + * and install the 2-byte packet sequence number. + */ + if (wptr) { + *wptr++ = PPP_ADDRESS(rptr); /* assumes the ppp header is */ + *wptr++ = PPP_CONTROL(rptr); /* all in one mbuf */ + *wptr++ = 0; /* change the protocol */ + *wptr++ = PPP_COMP; + *wptr++ = db->seqno >> 8; + *wptr++ = db->seqno; + } + ++db->seqno; + + olen = 0; + rptr += PPP_HDRLEN; + slen = mp->m_len - PPP_HDRLEN; + ilen = slen + 1; + for (;;) { + if (slen <= 0) { + mp = mp->m_next; + if (!mp) + break; + rptr = mtod(mp, u_char *); + slen = mp->m_len; + if (!slen) + continue; /* handle 0-length buffers */ + ilen += slen; + } + + slen--; + c = *rptr++; + fcode = BSD_KEY(ent, c); + hval = BSD_HASH(ent, c, hshift); + dictp = &db->dict[hval]; + + /* Validate and then check the entry. */ + if (dictp->codem1 >= max_ent) + goto nomatch; + if (dictp->f.fcode == fcode) { + ent = dictp->codem1+1; + continue; /* found (prefix,suffix) */ + } + + /* continue probing until a match or invalid entry */ + disp = (hval == 0) ? 1 : hval; + do { + hval += disp; + if (hval >= db->hsize) + hval -= db->hsize; + dictp = &db->dict[hval]; + if (dictp->codem1 >= max_ent) + goto nomatch; + } while (dictp->f.fcode != fcode); + ent = dictp->codem1 + 1; /* finally found (prefix,suffix) */ + continue; + + nomatch: + OUTPUT(ent); /* output the prefix */ + + /* code -> hashtable */ + if (max_ent < db->maxmaxcode) { + struct bsd_dict *dictp2; + /* expand code size if needed */ + if (max_ent >= MAXCODE(n_bits)) + db->n_bits = ++n_bits; + + /* Invalidate old hash table entry using + * this code, and then take it over. + */ + dictp2 = &db->dict[max_ent+1]; + if (db->dict[dictp2->cptr].codem1 == max_ent) + db->dict[dictp2->cptr].codem1 = BADCODEM1; + dictp2->cptr = hval; + dictp->codem1 = max_ent; + dictp->f.fcode = fcode; + + db->max_ent = ++max_ent; + } + ent = c; + } + + OUTPUT(ent); /* output the last code */ + db->bytes_out += olen; + db->in_count += ilen; + if (bitno < 32) + ++db->bytes_out; /* count complete bytes */ + + if (bsd_check(db)) + OUTPUT(CLEAR); /* do not count the CLEAR */ + + /* + * Pad dribble bits of last code with ones. + * Do not emit a completely useless byte of ones. + */ + if (bitno != 32) + PUTBYTE((accm | (0xff << (bitno-8))) >> 24); + + if (m != NULL) { + m->m_len = wptr - mtod(m, u_char *); + m->m_next = NULL; + } + + /* + * Increase code size if we would have without the packet + * boundary and as the decompressor will. + */ + if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) + db->n_bits++; + + db->uncomp_bytes += ilen; + ++db->uncomp_count; + if (olen + PPP_HDRLEN + BSD_OVHD > maxolen) { + /* throw away the compressed stuff if it is longer than uncompressed */ + if (*mret != NULL) { + m_freem(*mret); + *mret = NULL; + } + ++db->incomp_count; + db->incomp_bytes += ilen; + } else { + ++db->comp_count; + db->comp_bytes += olen + BSD_OVHD; + } + + return olen + PPP_HDRLEN + BSD_OVHD; +#undef OUTPUT +#undef PUTBYTE +} + + +/* + * Update the "BSD Compress" dictionary on the receiver for + * incompressible data by pretending to compress the incoming data. + */ +static void +bsd_incomp(state, dmsg) + void *state; + struct mbuf *dmsg; +{ + struct bsd_db *db = (struct bsd_db *) state; + u_int hshift = db->hshift; + u_int max_ent = db->max_ent; + u_int n_bits = db->n_bits; + struct bsd_dict *dictp; + u_int32_t fcode; + u_char c; + u_int32_t hval, disp; + int slen, ilen; + u_int bitno = 7; + u_char *rptr; + u_int ent; + + /* + * If the protocol is not in the range we're interested in, + * just return without looking at the packet. If it is, + * the protocol becomes the first byte to "compress". + */ + rptr = mtod(dmsg, u_char *); + ent = PPP_PROTOCOL(rptr); + if (ent < 0x21 || ent > 0xf9) + return; + + db->seqno++; + ilen = 1; /* count the protocol as 1 byte */ + rptr += PPP_HDRLEN; + slen = dmsg->m_len - PPP_HDRLEN; + for (;;) { + if (slen <= 0) { + dmsg = dmsg->m_next; + if (!dmsg) + break; + rptr = mtod(dmsg, u_char *); + slen = dmsg->m_len; + continue; + } + ilen += slen; + + do { + c = *rptr++; + fcode = BSD_KEY(ent, c); + hval = BSD_HASH(ent, c, hshift); + dictp = &db->dict[hval]; + + /* validate and then check the entry */ + if (dictp->codem1 >= max_ent) + goto nomatch; + if (dictp->f.fcode == fcode) { + ent = dictp->codem1+1; + continue; /* found (prefix,suffix) */ + } + + /* continue probing until a match or invalid entry */ + disp = (hval == 0) ? 1 : hval; + do { + hval += disp; + if (hval >= db->hsize) + hval -= db->hsize; + dictp = &db->dict[hval]; + if (dictp->codem1 >= max_ent) + goto nomatch; + } while (dictp->f.fcode != fcode); + ent = dictp->codem1+1; + continue; /* finally found (prefix,suffix) */ + + nomatch: /* output (count) the prefix */ + bitno += n_bits; + + /* code -> hashtable */ + if (max_ent < db->maxmaxcode) { + struct bsd_dict *dictp2; + /* expand code size if needed */ + if (max_ent >= MAXCODE(n_bits)) + db->n_bits = ++n_bits; + + /* Invalidate previous hash table entry + * assigned this code, and then take it over. + */ + dictp2 = &db->dict[max_ent+1]; + if (db->dict[dictp2->cptr].codem1 == max_ent) + db->dict[dictp2->cptr].codem1 = BADCODEM1; + dictp2->cptr = hval; + dictp->codem1 = max_ent; + dictp->f.fcode = fcode; + + db->max_ent = ++max_ent; + db->lens[max_ent] = db->lens[ent]+1; + } + ent = c; + } while (--slen != 0); + } + bitno += n_bits; /* output (count) the last code */ + db->bytes_out += bitno/8; + db->in_count += ilen; + (void)bsd_check(db); + + ++db->incomp_count; + db->incomp_bytes += ilen; + ++db->uncomp_count; + db->uncomp_bytes += ilen; + + /* Increase code size if we would have without the packet + * boundary and as the decompressor will. + */ + if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) + db->n_bits++; +} + + +/* + * Decompress "BSD Compress". + * + * Because of patent problems, we return DECOMP_ERROR for errors + * found by inspecting the input data and for system problems, but + * DECOMP_FATALERROR for any errors which could possibly be said to + * be being detected "after" decompression. For DECOMP_ERROR, + * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be + * infringing a patent of Motorola's if we do, so we take CCP down + * instead. + * + * Given that the frame has the correct sequence number and a good FCS, + * errors such as invalid codes in the input most likely indicate a + * bug, so we return DECOMP_FATALERROR for them in order to turn off + * compression, even though they are detected by inspecting the input. + */ +int +bsd_decompress(state, cmp, dmpp) + void *state; + struct mbuf *cmp, **dmpp; +{ + struct bsd_db *db = (struct bsd_db *) state; + u_int max_ent = db->max_ent; + u_int32_t accm = 0; + u_int bitno = 32; /* 1st valid bit in accm */ + u_int n_bits = db->n_bits; + u_int tgtbitno = 32-n_bits; /* bitno when we have a code */ + struct bsd_dict *dictp; + int explen, i, seq, len; + u_int incode, oldcode, finchar; + u_char *p, *rptr, *wptr; + struct mbuf *m, *dmp, *mret; + int adrs, ctrl, ilen; + int space, codelen, extra; + + /* + * Save the address/control from the PPP header + * and then get the sequence number. + */ + *dmpp = NULL; + rptr = mtod(cmp, u_char *); + adrs = PPP_ADDRESS(rptr); + ctrl = PPP_CONTROL(rptr); + rptr += PPP_HDRLEN; + len = cmp->m_len - PPP_HDRLEN; + seq = 0; + for (i = 0; i < 2; ++i) { + while (len <= 0) { + cmp = cmp->m_next; + if (cmp == NULL) + return DECOMP_ERROR; + rptr = mtod(cmp, u_char *); + len = cmp->m_len; + } + seq = (seq << 8) + *rptr++; + --len; + } + + /* + * Check the sequence number and give up if it differs from + * the value we're expecting. + */ + if (seq != db->seqno) { + if (db->debug) + printf("bsd_decomp%d: bad sequence # %d, expected %d\n", + db->unit, seq, db->seqno - 1); + return DECOMP_ERROR; + } + ++db->seqno; + + /* + * Allocate one mbuf to start with. + */ + MGETHDR(dmp, M_DONTWAIT, MT_DATA); + if (dmp == NULL) + return DECOMP_ERROR; + mret = dmp; + dmp->m_len = 0; + dmp->m_next = NULL; + MCLGET(dmp, M_DONTWAIT); + dmp->m_data += db->hdrlen; + wptr = mtod(dmp, u_char *); + space = M_TRAILINGSPACE(dmp) - PPP_HDRLEN + 1; + + /* + * Fill in the ppp header, but not the last byte of the protocol + * (that comes from the decompressed data). + */ + wptr[0] = adrs; + wptr[1] = ctrl; + wptr[2] = 0; + wptr += PPP_HDRLEN - 1; + + ilen = len; + oldcode = CLEAR; + explen = 0; + for (;;) { + if (len == 0) { + cmp = cmp->m_next; + if (!cmp) /* quit at end of message */ + break; + rptr = mtod(cmp, u_char *); + len = cmp->m_len; + ilen += len; + continue; /* handle 0-length buffers */ + } + + /* + * Accumulate bytes until we have a complete code. + * Then get the next code, relying on the 32-bit, + * unsigned accm to mask the result. + */ + bitno -= 8; + accm |= *rptr++ << bitno; + --len; + if (tgtbitno < bitno) + continue; + incode = accm >> tgtbitno; + accm <<= n_bits; + bitno += n_bits; + + if (incode == CLEAR) { + /* + * The dictionary must only be cleared at + * the end of a packet. But there could be an + * empty mbuf at the end. + */ + if (len > 0 || cmp->m_next != NULL) { + while ((cmp = cmp->m_next) != NULL) + len += cmp->m_len; + if (len > 0) { + m_freem(mret); + if (db->debug) + printf("bsd_decomp%d: bad CLEAR\n", db->unit); + return DECOMP_FATALERROR; /* probably a bug */ + } + } + bsd_clear(db); + explen = ilen = 0; + break; + } + + if (incode > max_ent + 2 || incode > db->maxmaxcode + || (incode > max_ent && oldcode == CLEAR)) { + m_freem(mret); + if (db->debug) { + printf("bsd_decomp%d: bad code 0x%x oldcode=0x%x ", + db->unit, incode, oldcode); + printf("max_ent=0x%x explen=%d seqno=%d\n", + max_ent, explen, db->seqno); + } + return DECOMP_FATALERROR; /* probably a bug */ + } + + /* Special case for KwKwK string. */ + if (incode > max_ent) { + finchar = oldcode; + extra = 1; + } else { + finchar = incode; + extra = 0; + } + + codelen = db->lens[finchar]; + explen += codelen + extra; + if (explen > db->mru + 1) { + m_freem(mret); + if (db->debug) { + printf("bsd_decomp%d: ran out of mru\n", db->unit); +#ifdef DEBUG + while ((cmp = cmp->m_next) != NULL) + len += cmp->m_len; + printf(" len=%d, finchar=0x%x, codelen=%d, explen=%d\n", + len, finchar, codelen, explen); +#endif + } + return DECOMP_FATALERROR; + } + + /* + * For simplicity, the decoded characters go in a single mbuf, + * so we allocate a single extra cluster mbuf if necessary. + */ + if ((space -= codelen + extra) < 0) { + dmp->m_len = wptr - mtod(dmp, u_char *); + MGET(m, M_DONTWAIT, MT_DATA); + if (m == NULL) { + m_freem(mret); + return DECOMP_ERROR; + } + m->m_len = 0; + m->m_next = NULL; + dmp->m_next = m; + MCLGET(m, M_DONTWAIT); + space = M_TRAILINGSPACE(m) - (codelen + extra); + if (space < 0) { + /* now that's what I call *compression*. */ + m_freem(mret); + return DECOMP_ERROR; + } + dmp = m; + wptr = mtod(dmp, u_char *); + } + + /* + * Decode this code and install it in the decompressed buffer. + */ + p = (wptr += codelen); + while (finchar > LAST) { + dictp = &db->dict[db->dict[finchar].cptr]; +#ifdef DEBUG + if (--codelen <= 0 || dictp->codem1 != finchar-1) + goto bad; +#endif + *--p = dictp->f.hs.suffix; + finchar = dictp->f.hs.prefix; + } + *--p = finchar; + +#ifdef DEBUG + if (--codelen != 0) + printf("bsd_decomp%d: short by %d after code 0x%x, max_ent=0x%x\n", + db->unit, codelen, incode, max_ent); +#endif + + if (extra) /* the KwKwK case again */ + *wptr++ = finchar; + + /* + * If not first code in a packet, and + * if not out of code space, then allocate a new code. + * + * Keep the hash table correct so it can be used + * with uncompressed packets. + */ + if (oldcode != CLEAR && max_ent < db->maxmaxcode) { + struct bsd_dict *dictp2; + u_int32_t fcode; + u_int32_t hval, disp; + + fcode = BSD_KEY(oldcode,finchar); + hval = BSD_HASH(oldcode,finchar,db->hshift); + dictp = &db->dict[hval]; + + /* look for a free hash table entry */ + if (dictp->codem1 < max_ent) { + disp = (hval == 0) ? 1 : hval; + do { + hval += disp; + if (hval >= db->hsize) + hval -= db->hsize; + dictp = &db->dict[hval]; + } while (dictp->codem1 < max_ent); + } + + /* + * Invalidate previous hash table entry + * assigned this code, and then take it over + */ + dictp2 = &db->dict[max_ent+1]; + if (db->dict[dictp2->cptr].codem1 == max_ent) { + db->dict[dictp2->cptr].codem1 = BADCODEM1; + } + dictp2->cptr = hval; + dictp->codem1 = max_ent; + dictp->f.fcode = fcode; + + db->max_ent = ++max_ent; + db->lens[max_ent] = db->lens[oldcode]+1; + + /* Expand code size if needed. */ + if (max_ent >= MAXCODE(n_bits) && max_ent < db->maxmaxcode) { + db->n_bits = ++n_bits; + tgtbitno = 32-n_bits; + } + } + oldcode = incode; + } + dmp->m_len = wptr - mtod(dmp, u_char *); + + /* + * Keep the checkpoint right so that incompressible packets + * clear the dictionary at the right times. + */ + db->bytes_out += ilen; + db->in_count += explen; + if (bsd_check(db) && db->debug) { + printf("bsd_decomp%d: peer should have cleared dictionary\n", + db->unit); + } + + ++db->comp_count; + db->comp_bytes += ilen + BSD_OVHD; + ++db->uncomp_count; + db->uncomp_bytes += explen; + + *dmpp = mret; + return DECOMP_OK; + +#ifdef DEBUG + bad: + if (codelen <= 0) { + printf("bsd_decomp%d: fell off end of chain ", db->unit); + printf("0x%x at 0x%x by 0x%x, max_ent=0x%x\n", + incode, finchar, db->dict[finchar].cptr, max_ent); + } else if (dictp->codem1 != finchar-1) { + printf("bsd_decomp%d: bad code chain 0x%x finchar=0x%x ", + db->unit, incode, finchar); + printf("oldcode=0x%x cptr=0x%x codem1=0x%x\n", oldcode, + db->dict[finchar].cptr, dictp->codem1); + } + m_freem(mret); + return DECOMP_FATALERROR; +#endif /* DEBUG */ +} +#endif /* DO_BSD_COMPRESS */ diff --git a/bsd/net/dlil.c b/bsd/net/dlil.c new file mode 100644 index 000000000..fcc3f96a5 --- /dev/null +++ b/bsd/net/dlil.c @@ -0,0 +1,1997 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. + * + * Data Link Inteface Layer + * Author: Ted Walker + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +#define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0) +#define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2) +#define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8)) +#define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8)) +#define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8)) + + +#define MAX_DL_TAGS 50 +#define MAX_DLIL_FILTERS 50 +#define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */ +#define MAX_LINKADDR 4 /* LONGWORDS */ +#define M_NKE M_IFADDR + +#define PFILT(x) ((struct dlil_filterq_entry *) (x))->variants.pr_filter +#define IFILT(x) ((struct dlil_filterq_entry *) (x))->variants.if_filter + +struct dl_tag_str { + struct ifnet *ifp; + struct if_proto *proto; + struct dlil_filterq_head *pr_flt_head; +}; + + +struct dlil_stats_str { + int inject_pr_in1; + int inject_pr_in2; + int inject_pr_out1; + int inject_pr_out2; + int inject_if_in1; + int inject_if_in2; + int inject_if_out1; + int inject_if_out2; +}; + + +struct dlil_filter_id_str { + int type; + struct dlil_filterq_head *head; + struct dlil_filterq_entry *filter_ptr; + struct ifnet *ifp; + struct if_proto *proto; +}; + + + +struct if_family_str { + TAILQ_ENTRY(if_family_str) if_fam_next; + u_long if_family; + int refcnt; + int flags; + +#define DLIL_SHUTDOWN 1 + + int (*add_if)(struct ifnet *ifp); + int (*del_if)(struct ifnet *ifp); + int (*add_proto)(struct ddesc_head_str *demux_desc_head, + struct if_proto *proto, u_long dl_tag); + int (*del_proto)(struct if_proto *proto, u_long dl_tag); + int (*ifmod_ioctl)(struct ifnet *ifp, u_long command, caddr_t data); + int (*shutdown)(); +}; + + + +struct dlil_stats_str dlil_stats; + +static +struct dlil_filter_id_str dlil_filters[MAX_DLIL_FILTERS+1]; + +static +struct dl_tag_str dl_tag_array[MAX_DL_TAGS+1]; + +static +TAILQ_HEAD(, if_family_str) if_family_head; + +static ifnet_inited = 0; + +int dlil_initialized = 0; +decl_simple_lock_data(, dlil_input_lock) +int dlil_input_thread_wakeup = 0; +int dlil_expand_mcl; +static struct mbuf *dlil_input_mbuf_head = NULL; +static struct mbuf *dlil_input_mbuf_tail = NULL; +static struct mbuf *dlil_input_loop_head = NULL; +static struct mbuf *dlil_input_loop_tail = NULL; + +static void dlil_input_thread(void); +extern void run_netisr(void); + + +/* + * Internal functions. + */ + +static +struct if_family_str *find_family_module(u_long if_family) +{ + struct if_family_str *mod = NULL; + + TAILQ_FOREACH(mod, &if_family_head, if_fam_next) { + if (mod->if_family == (if_family & 0xffff)) + break; + } + + return mod; +} + + +/* + * Public functions. + */ + +struct ifnet *ifbyfamily(u_long family, short unit) +{ + struct ifnet *ifp; + + TAILQ_FOREACH(ifp, &ifnet, if_link) + if ((family == ifp->if_family) && + (ifp->if_unit == unit)) + return ifp; + + return 0; +} + +struct if_proto *dlttoproto(dl_tag) + u_long dl_tag; +{ + return dl_tag_array[dl_tag].proto; +} + + +u_long ifptodlt(struct ifnet *ifp, u_long proto_family) +{ + struct if_proto *proto; + struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; + + + TAILQ_FOREACH(proto, tmp, next) + if (proto->ifp == ifp) + if (proto->protocol_family == proto_family) + return proto->dl_tag; + + return 0; +} + + +int dlil_find_dltag(u_long if_family, short unit, u_long proto_family, u_long *dl_tag) +{ + struct ifnet *ifp; + + ifp = ifbyfamily(if_family, unit); + if (!ifp) + return ENOENT; + + *dl_tag = ifptodlt(ifp, proto_family); + if (*dl_tag == 0) + return EPROTONOSUPPORT; + else + return 0; +} + + +int dlil_get_next_dl_tag(u_long current_tag, struct dl_tag_attr_str *next) +{ + int i; + + for (i = (current_tag+1); i < MAX_DL_TAGS; i++) + if (dl_tag_array[i].ifp) { + next->dl_tag = i; + next->if_flags = dl_tag_array[i].ifp->if_flags; + next->if_unit = dl_tag_array[i].ifp->if_unit; + next->protocol_family = dl_tag_array[i].proto->protocol_family; + next->if_family = dl_tag_array[i].ifp->if_family; + return 0; + } + + /* + * If we got here, there are no more entries + */ + + return ENOENT; +} + + +void +dlil_init() +{ + int i; + + printf("dlil_init\n"); + + TAILQ_INIT(&if_family_head); + for (i=0; i < MAX_DL_TAGS; i++) + dl_tag_array[i].ifp = 0; + + for (i=0; i < MAX_DLIL_FILTERS; i++) + dlil_filters[i].type = 0; + + bzero(&dlil_stats, sizeof(dlil_stats)); + + simple_lock_init(&dlil_input_lock); + + /* + * Start up the dlil input thread once everything is initialized + */ + (void) kernel_thread(kernel_task, dlil_input_thread); +} + + +u_long get_new_filter_id() +{ + u_long i; + + for (i=1; i < MAX_DLIL_FILTERS; i++) + if (dlil_filters[i].type == 0) + return i; + + return 0; +} + + +int dlil_attach_interface_filter(struct ifnet *ifp, + struct dlil_if_flt_str *if_filter, + u_long *filter_id, + int insertion_point) +{ + int s; + int retval; + struct dlil_filterq_entry *tmp_ptr; + struct dlil_filterq_entry *if_filt; + struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + boolean_t funnel_state; + + + MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK); + bcopy((caddr_t) if_filter, (caddr_t) &tmp_ptr->variants.if_filter, + sizeof(struct dlil_if_flt_str)); + + funnel_state = thread_funnel_set(network_flock, TRUE); + + s = splnet(); + + if (insertion_point != DLIL_LAST_FILTER) { + TAILQ_FOREACH(if_filt, fhead, que) + if (insertion_point == if_filt->filter_id) { + TAILQ_INSERT_BEFORE(if_filt, tmp_ptr, que); + break; + } + } + else + TAILQ_INSERT_TAIL(fhead, tmp_ptr, que); + + if (*filter_id = get_new_filter_id()) { + dlil_filters[*filter_id].filter_ptr = tmp_ptr; + dlil_filters[*filter_id].head = (struct dlil_filterq_head *) &ifp->if_flt_head; + dlil_filters[*filter_id].type = DLIL_IF_FILTER; + dlil_filters[*filter_id].ifp = ifp; + tmp_ptr->filter_id = *filter_id; + tmp_ptr->type = DLIL_IF_FILTER; + retval = 0; + } + else { + kprintf("dlil_attach_interface_filter - can't alloc filter_id\n"); + TAILQ_REMOVE(fhead, tmp_ptr, que); + FREE(tmp_ptr, M_NKE); + retval = ENOMEM; + } + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return retval; +} + + +int dlil_attach_protocol_filter(u_long dl_tag, + struct dlil_pr_flt_str *pr_filter, + u_long *filter_id, + int insertion_point) +{ + struct dlil_filterq_entry *tmp_ptr; + struct dlil_filterq_entry *pr_filt; + int s; + int retval; + boolean_t funnel_state; + + if (dl_tag > MAX_DL_TAGS) + return ERANGE; + + if (dl_tag_array[dl_tag].ifp == 0) + return ENOENT; + + MALLOC(tmp_ptr, struct dlil_filterq_entry *, sizeof(*tmp_ptr), M_NKE, M_WAITOK); + bcopy((caddr_t) pr_filter, (caddr_t) &tmp_ptr->variants.pr_filter, + sizeof(struct dlil_pr_flt_str)); + + funnel_state = thread_funnel_set(network_flock, TRUE); + + s = splnet(); + if (insertion_point != DLIL_LAST_FILTER) { + TAILQ_FOREACH(pr_filt, dl_tag_array[dl_tag].pr_flt_head, que) + if (insertion_point == pr_filt->filter_id) { + TAILQ_INSERT_BEFORE(pr_filt, tmp_ptr, que); + break; + } + } + else + TAILQ_INSERT_TAIL(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que); + + + if (*filter_id = get_new_filter_id()) { + dlil_filters[*filter_id].filter_ptr = tmp_ptr; + dlil_filters[*filter_id].head = dl_tag_array[dl_tag].pr_flt_head; + dlil_filters[*filter_id].type = DLIL_PR_FILTER; + dlil_filters[*filter_id].proto = dl_tag_array[dl_tag].proto; + dlil_filters[*filter_id].ifp = dl_tag_array[dl_tag].ifp; + tmp_ptr->filter_id = *filter_id; + tmp_ptr->type = DLIL_PR_FILTER; + retval = 0; + } + else { + kprintf("dlil_attach_protocol_filter - can't alloc filter_id\n"); + TAILQ_REMOVE(dl_tag_array[dl_tag].pr_flt_head, tmp_ptr, que); + FREE(tmp_ptr, M_NKE); + retval = ENOMEM; + } + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return retval; +} + + +int +dlil_detach_filter(u_long filter_id) +{ + struct dlil_filter_id_str *flt; + int s; + boolean_t funnel_state; + + if (filter_id > MAX_DLIL_FILTERS) { + kprintf("dlil_detach_filter - Bad filter_id value %d\n", filter_id); + return ERANGE; + } + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + flt = &dlil_filters[filter_id]; + if (flt->type == 0) { + kprintf("dlil_detach_filter - no such filter_id %d\n", filter_id); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + + if (flt->type == DLIL_IF_FILTER) { + if (IFILT(flt->filter_ptr).filter_detach) + (*IFILT(flt->filter_ptr).filter_detach)(IFILT(flt->filter_ptr).cookie); + } + else { + if (flt->type == DLIL_PR_FILTER) { + if (PFILT(flt->filter_ptr).filter_detach) + (*PFILT(flt->filter_ptr).filter_detach)(PFILT(flt->filter_ptr).cookie); + } + } + + TAILQ_REMOVE(flt->head, flt->filter_ptr, que); + FREE(flt->filter_ptr, M_NKE); + flt->type = 0; + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; +} + + +void +dlil_input_thread_continue(void) +{ + while (1) { + struct mbuf *m, *m_loop; + int expand_mcl; + + simple_lock(&dlil_input_lock); + m = dlil_input_mbuf_head; + dlil_input_mbuf_head = NULL; + dlil_input_mbuf_tail = NULL; + m_loop = dlil_input_loop_head; + dlil_input_loop_head = NULL; + dlil_input_loop_tail = NULL; + simple_unlock(&dlil_input_lock); + + MBUF_LOCK(); + expand_mcl = dlil_expand_mcl; + dlil_expand_mcl = 0; + MBUF_UNLOCK(); + if (expand_mcl) { + caddr_t p; + MCLALLOC(p, M_WAIT); + MCLFREE(p); + } + + /* + * NOTE warning %%% attention !!!! + * We should think about putting some thread starvation safeguards if + * we deal with long chains of packets. + */ + while (m) { + struct mbuf *m0 = m->m_nextpkt; + void *header = m->m_pkthdr.header; + + m->m_nextpkt = NULL; + m->m_pkthdr.header = NULL; + (void) dlil_input_packet(m->m_pkthdr.rcvif, m, header); + m = m0; + } + m = m_loop; + while (m) { + struct mbuf *m0 = m->m_nextpkt; + void *header = m->m_pkthdr.header; + struct ifnet *ifp = (struct ifnet *) m->m_pkthdr.aux; + + m->m_nextpkt = NULL; + m->m_pkthdr.header = NULL; + m->m_pkthdr.aux = NULL; + (void) dlil_input_packet(ifp, m, header); + m = m0; + } + + if (netisr != 0) + run_netisr(); + + if (dlil_input_mbuf_head == NULL && + dlil_input_loop_head == NULL && + netisr == 0) { + assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT); +#if defined (__i386__) + thread_block(0); +#else + thread_block(dlil_input_thread_continue); +#endif + /* NOTREACHED */ + } + } +} + +void dlil_input_thread(void) +{ + register thread_t self = current_thread(); + extern void stack_privilege(thread_t thread); + + printf("dlil_input_thread %x\n", self); + + /* + * Make sure that this thread + * always has a kernel stack, and + * bind it to the master cpu. + */ + stack_privilege(self); + + /* The dlil thread is always funneled */ + thread_funnel_set(network_flock, TRUE); + dlil_initialized = 1; + dlil_input_thread_continue(); +} + +int +dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail) +{ + /* WARNING + * Because of loopbacked multicast we cannot stuff the ifp in + * the rcvif of the packet header: loopback has its own dlil + * input queue + */ + + simple_lock(&dlil_input_lock); + if (ifp->if_type != IFT_LOOP) { + if (dlil_input_mbuf_head == NULL) + dlil_input_mbuf_head = m_head; + else if (dlil_input_mbuf_tail != NULL) + dlil_input_mbuf_tail->m_nextpkt = m_head; + dlil_input_mbuf_tail = m_tail ? m_tail : m_head; + } else { + if (dlil_input_loop_head == NULL) + dlil_input_loop_head = m_head; + else if (dlil_input_loop_tail != NULL) + dlil_input_loop_tail->m_nextpkt = m_head; + dlil_input_loop_tail = m_tail ? m_tail : m_head; + } + simple_unlock(&dlil_input_lock); + + wakeup((caddr_t)&dlil_input_thread_wakeup); + + return 0; +} + +int +dlil_input_packet(struct ifnet *ifp, struct mbuf *m, + char *frame_header) +{ + struct ifnet *orig_ifp = 0; + struct dlil_filterq_entry *tmp; + int retval; + struct if_proto *ifproto = 0; + struct if_proto *proto; + struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + + + KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START,0,0,0,0,0); + + /* + * Run interface filters + */ + + while (orig_ifp != ifp) { + orig_ifp = ifp; + + TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) { + if (IFILT(tmp).filter_if_input) { + retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie, + &ifp, + &m, + &frame_header); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + } + + if (ifp != orig_ifp) + break; + } + } + + ifp->if_lastchange = time; + + /* + * Call family demux module. If the demux module finds a match + * for the frame it will fill-in the ifproto pointer. + */ + + retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto ); + + if (m->m_flags & (M_BCAST|M_MCAST)) + ifp->if_imcasts++; + + if ((retval) && (retval != EJUSTRETURN) && (ifp->offercnt)) { + /* + * No match was found, look for any offers. + */ + struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; + TAILQ_FOREACH(proto, tmp, next) { + if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) { + ifproto = proto; + retval = 0; + break; + } + } + } + + if (retval) { + if (retval != EJUSTRETURN) { + m_freem(m); + return retval; + } + else + return 0; + } + else + if (ifproto == 0) { + printf("ERROR - dlil_input - if_demux didn't return an if_proto pointer\n"); + m_freem(m); + return 0; + } + +/* + * Call any attached protocol filters. + */ + + TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) { + if (PFILT(tmp).filter_dl_input) { + retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie, + &m, + &frame_header, + &ifp); + + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + } + } + + + + retval = (*ifproto->dl_input)(m, frame_header, + ifp, ifproto->dl_tag, + TRUE); + + if (retval == EJUSTRETURN) + retval = 0; + else + if (retval) + m_freem(m); + + KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END,0,0,0,0,0); + return retval; +} + + + +void ether_input(ifp, eh, m) + struct ifnet *ifp; + struct ether_header *eh; + struct mbuf *m; + +{ + kprintf("Someone is calling ether_input!!\n"); + + dlil_input(ifp, m, NULL); +} + + +int +dlil_event(struct ifnet *ifp, struct kern_event_msg *event) +{ + struct dlil_filterq_entry *filt; + int retval = 0; + struct ifnet *orig_ifp = 0; + struct if_proto *proto; + struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + struct kev_msg kev_msg; + struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; + boolean_t funnel_state; + + + funnel_state = thread_funnel_set(network_flock, TRUE); + + while (orig_ifp != ifp) { + orig_ifp = ifp; + + TAILQ_FOREACH_REVERSE(filt, fhead, que, dlil_filterq_head) { + if (IFILT(filt).filter_if_event) { + retval = (*IFILT(filt).filter_if_event)(IFILT(filt).cookie, + &ifp, + &event); + + if (retval) { + (void) thread_funnel_set(network_flock, funnel_state); + if (retval == EJUSTRETURN) + return 0; + else + return retval; + } + } + + if (ifp != orig_ifp) + break; + } + } + + + /* + * Call Interface Module event hook, if any. + */ + + if (ifp->if_event) { + retval = ifp->if_event(ifp, (caddr_t) event); + + if (retval) { + (void) thread_funnel_set(network_flock, funnel_state); + + if (retval == EJUSTRETURN) + return 0; + else + return retval; + } + } + + /* + * Call dl_event entry point for all protocols attached to this interface + */ + + TAILQ_FOREACH(proto, tmp, next) { + /* + * Call any attached protocol filters. + */ + + TAILQ_FOREACH_REVERSE(filt, &proto->pr_flt_head, que, dlil_filterq_head) { + if (PFILT(filt).filter_dl_event) { + retval = (*PFILT(filt).filter_dl_event)(PFILT(filt).cookie, + event); + + if (retval) { + (void) thread_funnel_set(network_flock, funnel_state); + if (retval == EJUSTRETURN) + return 0; + else + return retval; + } + } + } + + + /* + * Finally, call the dl_event entry point (if any) + */ + + if (proto->dl_event) + retval = (*proto->dl_event)(event, proto->dl_tag); + + if (retval == EJUSTRETURN) { + (void) thread_funnel_set(network_flock, funnel_state); + return 0; + } + } + + + /* + * Now, post this event to the Kernel Event message queue + */ + + kev_msg.vendor_code = event->vendor_code; + kev_msg.kev_class = event->kev_class; + kev_msg.kev_subclass = event->kev_subclass; + kev_msg.event_code = event->event_code; + kev_msg.dv[0].data_ptr = &event->event_data[0]; + kev_msg.dv[0].data_length = event->total_size - KEV_MSG_HEADER_SIZE; + kev_msg.dv[1].data_length = 0; + + kev_post_msg(&kev_msg); + + (void) thread_funnel_set(network_flock, funnel_state); + return 0; +} + + + +int +dlil_output(u_long dl_tag, + struct mbuf *m, + caddr_t route, + struct sockaddr *dest, + int raw + ) +{ + char *frame_type; + char *dst_linkaddr; + struct ifnet *orig_ifp = 0; + struct ifnet *ifp; + struct if_proto *proto; + struct dlil_filterq_entry *tmp; + int retval = 0; + char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4]; + char dst_linkaddr_buffer[MAX_LINKADDR * 4]; + struct dlil_filterq_head *fhead; + + + KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_START,0,0,0,0,0); + + /* + * Temporary hackery until all the existing protocols can become fully + * "dl_tag aware". Some things only have the ifp, so this handles that + * case for the time being. + */ + + if (dl_tag > MAX_DL_TAGS) { + /* dl_tag is really an ifnet pointer! */ + + ifp = (struct ifnet *) dl_tag; + dl_tag = ifp->if_data.default_proto; + if (dl_tag) + proto = dl_tag_array[dl_tag].proto; + else + retval = ENOENT; + } + else { + if ((dl_tag == 0) || (dl_tag_array[dl_tag].ifp == 0)) + retval = ENOENT; + else { + ifp = dl_tag_array[dl_tag].ifp; + proto = dl_tag_array[dl_tag].proto; + } + } + + if (retval) { + m_freem(m); + return retval; + } + + frame_type = frame_type_buffer; + dst_linkaddr = dst_linkaddr_buffer; + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + + if ((raw == 0) && (proto->dl_pre_output)) { + retval = (*proto->dl_pre_output)(ifp, &m, dest, route, + frame_type, dst_linkaddr, dl_tag); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + } + +/* + * Run any attached protocol filters. + */ + + if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) { + TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) { + if (PFILT(tmp).filter_dl_output) { + retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie, + &m, &ifp, &dest, dst_linkaddr, frame_type); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + } + } + } + + +/* + * Call framing module + */ + if ((raw == 0) && (ifp->if_framer)) { + retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else + { + m_freem(m); + return retval; + } + } + } + +#if BRIDGE + if (do_bridge) { + struct mbuf *m0 = m ; + + if (m->m_pkthdr.rcvif) + m->m_pkthdr.rcvif = NULL ; + ifp = bridge_dst_lookup(m); + bdg_forward(&m0, ifp); + if (m0) + m_freem(m0); + + return 0; + } +#endif + + +/* + * Let interface filters (if any) do their thing ... + */ + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + if (TAILQ_EMPTY(fhead) == 0) { + while (orig_ifp != ifp) { + orig_ifp = ifp; + TAILQ_FOREACH(tmp, fhead, que) { + if (IFILT(tmp).filter_if_output) { + retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie, + &ifp, + &m); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + + } + + if (ifp != orig_ifp) + break; + } + } + } + +/* + * Finally, call the driver. + */ + + KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_START, 0,0,0,0,0); + retval = (*ifp->if_output)(ifp, m); + KERNEL_DEBUG(DBG_FNC_DLIL_IFOUT | DBG_FUNC_END, 0,0,0,0,0); + + KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END,0,0,0,0,0); + + if ((retval == 0) || (retval == EJUSTRETURN)) + return 0; + else + return retval; +} + + +int +dlil_ioctl(u_long proto_fam, + struct ifnet *ifp, + u_long ioctl_code, + caddr_t ioctl_arg) +{ + struct dlil_filterq_entry *tmp; + struct dlil_filterq_head *fhead; + int retval = EOPNOTSUPP; + int retval2 = EOPNOTSUPP; + u_long dl_tag; + struct if_family_str *if_family; + + + if (proto_fam) { + retval = dlil_find_dltag(ifp->if_family, ifp->if_unit, + proto_fam, &dl_tag); + + if (retval == 0) { + if (dl_tag_array[dl_tag].ifp != ifp) + return ENOENT; + +/* + * Run any attached protocol filters. + */ + TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) { + if (PFILT(tmp).filter_dl_ioctl) { + retval = + (*PFILT(tmp).filter_dl_ioctl)(PFILT(tmp).cookie, + dl_tag_array[dl_tag].ifp, + ioctl_code, + ioctl_arg); + + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else + return retval; + } + } + } + + if (dl_tag_array[dl_tag].proto->dl_ioctl) + retval = + (*dl_tag_array[dl_tag].proto->dl_ioctl)(dl_tag, + dl_tag_array[dl_tag].ifp, + ioctl_code, + ioctl_arg); + else + retval = EOPNOTSUPP; + } + else + retval = 0; + } + + if ((retval) && (retval != EOPNOTSUPP)) { + if (retval == EJUSTRETURN) + return 0; + else + return retval; + } + + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + TAILQ_FOREACH(tmp, fhead, que) { + if (IFILT(tmp).filter_if_ioctl) { + retval2 = (*IFILT(tmp).filter_if_ioctl)(IFILT(tmp).cookie, ifp, + ioctl_code, ioctl_arg); + if (retval2) { + if (retval2 == EJUSTRETURN) + return 0; + else + return retval2; + } + } + } + + + if_family = find_family_module(ifp->if_family); + if ((if_family) && (if_family->ifmod_ioctl)) { + retval2 = (*if_family->ifmod_ioctl)(ifp, ioctl_code, ioctl_arg); + + if ((retval2) && (retval2 != EOPNOTSUPP)) { + if (retval2 == EJUSTRETURN) + return 0; + else + return retval; + } + + if (retval == EOPNOTSUPP) + retval = retval2; + } + + if (ifp->if_ioctl) + retval2 = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg); + + if (retval == EOPNOTSUPP) + return retval2; + else { + if (retval2 == EOPNOTSUPP) + return 0; + else + return retval2; + } +} + + +int +dlil_attach_protocol(struct dlil_proto_reg_str *proto, + u_long *dl_tag) +{ + struct ifnet *ifp; + struct if_proto *ifproto; + u_long i; + struct if_family_str *if_family; + int error; + struct dlil_proto_head *tmp; + int s; + boolean_t funnel_state; + + + if ((proto->protocol_family == 0) || (proto->interface_family == 0)) + return EINVAL; + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + if_family = find_family_module(proto->interface_family); + if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) { + kprintf("dlil_attach_protocol -- no interface family module %d", + proto->interface_family); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + ifp = ifbyfamily(proto->interface_family, proto->unit_number); + if (!ifp) { + kprintf("dlil_attach_protocol -- no such interface %d unit %d\n", + proto->interface_family, proto->unit_number); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + if (dlil_find_dltag(proto->interface_family, proto->unit_number, + proto->protocol_family, &i) == 0) { + thread_funnel_set(network_flock, funnel_state); + return EEXIST; + } + + for (i=1; i < MAX_DL_TAGS; i++) + if (dl_tag_array[i].ifp == 0) + break; + + if (i >= MAX_DL_TAGS) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOMEM; + } + + /* + * Allocate and init a new if_proto structure + */ + + ifproto = _MALLOC(sizeof(struct if_proto), M_IFADDR, M_WAITOK); + if (!ifproto) { + printf("ERROR - DLIL failed if_proto allocation\n"); + thread_funnel_set(network_flock, funnel_state); + return ENOMEM; + } + + bzero(ifproto, sizeof(struct if_proto)); + + dl_tag_array[i].ifp = ifp; + dl_tag_array[i].proto = ifproto; + dl_tag_array[i].pr_flt_head = &ifproto->pr_flt_head; + ifproto->dl_tag = i; + *dl_tag = i; + + if (proto->default_proto) { + if (ifp->if_data.default_proto == 0) + ifp->if_data.default_proto = i; + else + printf("ERROR - dlil_attach_protocol -- Attempt to attach more than one default protocol\n"); + } + + ifproto->protocol_family = proto->protocol_family; + ifproto->dl_input = proto->input; + ifproto->dl_pre_output = proto->pre_output; + ifproto->dl_event = proto->event; + ifproto->dl_offer = proto->offer; + ifproto->dl_ioctl = proto->ioctl; + ifproto->ifp = ifp; + TAILQ_INIT(&ifproto->pr_flt_head); + + /* + * Call family module add_proto routine so it can refine the + * demux descriptors as it wishes. + */ + error = (*if_family->add_proto)(&proto->demux_desc_head, ifproto, *dl_tag); + if (error) { + dl_tag_array[*dl_tag].ifp = 0; + FREE(ifproto, M_IFADDR); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return error; + } + + + /* + * Add to if_proto list for this interface + */ + + tmp = (struct dlil_proto_head *) &ifp->proto_head; + TAILQ_INSERT_TAIL(tmp, ifproto, next); + ifp->refcnt++; + if (ifproto->dl_offer) + ifp->offercnt++; + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; +} + + + +int +dlil_detach_protocol(u_long dl_tag) +{ + struct ifnet *ifp; + struct ifnet *orig_ifp=0; + struct if_proto *proto; + struct dlil_proto_head *tmp; + struct if_family_str *if_family; + struct dlil_filterq_entry *filter; + int s, retval; + struct dlil_filterq_head *fhead; + struct kev_msg ev_msg; + struct net_event_data ev_data; + boolean_t funnel_state; + + + if (dl_tag > MAX_DL_TAGS) + return ERANGE; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + s = splnet(); + if (dl_tag_array[dl_tag].ifp == 0) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + ifp = dl_tag_array[dl_tag].ifp; + proto = dl_tag_array[dl_tag].proto; + + if_family = find_family_module(ifp->if_family); + if (if_family == NULL) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + tmp = (struct dlil_proto_head *) &ifp->proto_head; + + /* + * Call family module del_proto + */ + + (*if_family->del_proto)(proto, dl_tag); + + + /* + * Remove and deallocate any attached protocol filters + */ + + while (filter = TAILQ_FIRST(&proto->pr_flt_head)) + dlil_detach_filter(filter->filter_id); + + if (proto->dl_offer) + ifp->offercnt--; + + dl_tag_array[dl_tag].ifp = 0; + + TAILQ_REMOVE(tmp, proto, next); + FREE(proto, M_IFADDR); + + if (--ifp->refcnt == 0) { + if (ifp->if_flags & IFF_UP) + printf("WARNING - dlil_detach_protocol - ifp refcnt 0, but IF still up\n"); + + TAILQ_REMOVE(&ifnet, ifp, if_link); + + (*if_family->del_if)(ifp); + + if (--if_family->refcnt == 0) { + if (if_family->shutdown) + (*if_family->shutdown)(); + + TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); + FREE(if_family, M_IFADDR); + } + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + while (orig_ifp != ifp) { + orig_ifp = ifp; + + TAILQ_FOREACH(filter, fhead, que) { + if (IFILT(filter).filter_if_free) { + retval = (*IFILT(filter).filter_if_free)(IFILT(filter).cookie, ifp); + if (retval) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; + } + } + if (ifp != orig_ifp) + break; + } + } + + (*ifp->if_free)(ifp); + + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_IF_DETACHED; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + } + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; +} + + + + + +int +dlil_if_attach(struct ifnet *ifp) +{ + u_long interface_family = ifp->if_family; + struct if_family_str *if_family; + struct dlil_proto_head *tmp; + int stat; + int s; + struct kev_msg ev_msg; + struct net_event_data ev_data; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + if (ifnet_inited == 0) { + TAILQ_INIT(&ifnet); + ifnet_inited = 1; + } + + if_family = find_family_module(interface_family); + + if ((!if_family) || (if_family->flags & DLIL_SHUTDOWN)) { + splx(s); + kprintf("Attempt to attach interface without family module - %d\n", + interface_family); + thread_funnel_set(network_flock, funnel_state); + return ENODEV; + } + + + /* + * Call the family module to fill in the appropriate fields in the + * ifnet structure. + */ + + stat = (*if_family->add_if)(ifp); + if (stat) { + splx(s); + kprintf("dlil_if_attach -- add_if failed with %d\n", stat); + thread_funnel_set(network_flock, funnel_state); + return stat; + } + + /* + * Add the ifp to the interface list. + */ + + tmp = (struct dlil_proto_head *) &ifp->proto_head; + TAILQ_INIT(tmp); + + ifp->if_data.default_proto = 0; + ifp->refcnt = 1; + ifp->offercnt = 0; + TAILQ_INIT(&ifp->if_flt_head); + old_if_attach(ifp); + if_family->refcnt++; + + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_IF_ATTACHED; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; +} + + +int +dlil_if_detach(struct ifnet *ifp) +{ + struct if_proto *proto; + struct dlil_filterq_entry *if_filter; + struct if_family_str *if_family; + struct dlil_filterq_head *fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + int s; + struct kev_msg ev_msg; + struct net_event_data ev_data; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + if (ifp->if_flags & IFF_UP) + printf("WARNING - dlil_if_detach called for UP interface\n"); + + if_family = find_family_module(ifp->if_family); + + if (!if_family) { + kprintf("Attempt to detach interface without family module - %s\n", + ifp->if_name); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENODEV; + } + + while (if_filter = TAILQ_FIRST(fhead)) + dlil_detach_filter(if_filter->filter_id); + + if (--ifp->refcnt == 0) { + TAILQ_REMOVE(&ifnet, ifp, if_link); + + (*if_family->del_if)(ifp); + + if (--if_family->refcnt == 0) { + if (if_family->shutdown) + (*if_family->shutdown)(); + + TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); + FREE(if_family, M_IFADDR); + } + + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_IF_DETACHED; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + + ev_msg.dv[1].data_length = 0; + kev_post_msg(&ev_msg); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; + } + else + { + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_IF_DETACHING; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return DLIL_WAIT_FOR_FREE; + } +} + + +int +dlil_reg_if_modules(u_long interface_family, + struct dlil_ifmod_reg_str *ifmod) +{ + struct if_family_str *if_family; + int s; + boolean_t funnel_state; + + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + if (find_family_module(interface_family)) { + kprintf("Attempt to register dlil family module more than once - %d\n", + interface_family); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return EEXIST; + } + + if ((!ifmod->add_if) || (!ifmod->del_if) || + (!ifmod->add_proto) || (!ifmod->del_proto)) { + kprintf("dlil_reg_if_modules passed at least one null pointer\n"); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return EINVAL; + } + + if_family = (struct if_family_str *) _MALLOC(sizeof(struct if_family_str), M_IFADDR, M_WAITOK); + if (!if_family) { + kprintf("dlil_reg_if_modules failed allocation\n"); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOMEM; + } + + bzero(if_family, sizeof(struct if_family_str)); + + if_family->if_family = interface_family & 0xffff; + if_family->shutdown = ifmod->shutdown; + if_family->add_if = ifmod->add_if; + if_family->del_if = ifmod->del_if; + if_family->add_proto = ifmod->add_proto; + if_family->del_proto = ifmod->del_proto; + if_family->ifmod_ioctl = ifmod->ifmod_ioctl; + if_family->refcnt = 1; + if_family->flags = 0; + + TAILQ_INSERT_TAIL(&if_family_head, if_family, if_fam_next); + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; +} + +int dlil_dereg_if_modules(u_long interface_family) +{ + struct if_family_str *if_family; + int s; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + if_family = find_family_module(interface_family); + if (if_family == 0) { + splx(s); + thread_funnel_set(network_flock, funnel_state); + return ENOENT; + } + + if (--if_family->refcnt == 0) { + if (if_family->shutdown) + (*if_family->shutdown)(); + + TAILQ_REMOVE(&if_family_head, if_family, if_fam_next); + FREE(if_family, M_IFADDR); + } + else + if_family->flags |= DLIL_SHUTDOWN; + + splx(s); + thread_funnel_set(network_flock, funnel_state); + return 0; +} + + + + + +/* + * Old if_attach no-op'ed function defined here for temporary backwards compatibility + */ + +void if_attach(ifp) + struct ifnet *ifp; +{ + dlil_if_attach(ifp); +} + + + +int +dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id) +{ + struct ifnet *orig_ifp = 0; + struct ifnet *ifp; + struct if_proto *ifproto; + struct if_proto *proto; + struct dlil_filterq_entry *tmp; + int retval = 0; + struct dlil_filterq_head *fhead; + int match_found; + + + dlil_stats.inject_if_in1++; + if (from_id > MAX_DLIL_FILTERS) + return ERANGE; + + if (dlil_filters[from_id].type != DLIL_IF_FILTER) + return ENOENT; + + ifp = dlil_filters[from_id].ifp; + +/* + * Let interface filters (if any) do their thing ... + */ + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + match_found = 0; + + if (TAILQ_EMPTY(fhead) == 0) { + while (orig_ifp != ifp) { + orig_ifp = ifp; + TAILQ_FOREACH_REVERSE(tmp, fhead, que, dlil_filterq_head) { + if ((match_found) && (IFILT(tmp).filter_if_input)) { + retval = (*IFILT(tmp).filter_if_input)(IFILT(tmp).cookie, + &ifp, + &m, + &frame_header); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + + } + + if (ifp != orig_ifp) + break; + + if (from_id == tmp->filter_id) + match_found = 1; + } + } + } + + ifp->if_lastchange = time; + + /* + * Call family demux module. If the demux module finds a match + * for the frame it will fill-in the ifproto pointer. + */ + + retval = (*ifp->if_demux)(ifp, m, frame_header, &ifproto ); + + if (m->m_flags & (M_BCAST|M_MCAST)) + ifp->if_imcasts++; + + if ((retval) && (ifp->offercnt)) { + /* + * No match was found, look for any offers. + */ + struct dlil_proto_head *tmp = (struct dlil_proto_head *) &ifp->proto_head; + TAILQ_FOREACH(proto, tmp, next) { + if ((proto->dl_offer) && (proto->dl_offer(m, frame_header) == 0)) { + ifproto = proto; + retval = 0; + break; + } + } + } + + if (retval) { + if (retval != EJUSTRETURN) { + m_freem(m); + return retval; + } + else + return 0; + } + else + if (ifproto == 0) { + printf("ERROR - dlil_inject_if_input -- if_demux didn't return an if_proto pointer\n"); + m_freem(m); + return 0; + } + +/* + * Call any attached protocol filters. + */ + TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) { + if (PFILT(tmp).filter_dl_input) { + retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie, + &m, + &frame_header, + &ifp); + + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + } + } + + + + retval = (*ifproto->dl_input)(m, frame_header, + ifp, ifproto->dl_tag, + FALSE); + + dlil_stats.inject_if_in2++; + if (retval == EJUSTRETURN) + retval = 0; + else + if (retval) + m_freem(m); + + return retval; + +} + + + + + +int +dlil_inject_pr_input(struct mbuf *m, char *frame_header, u_long from_id) +{ + struct ifnet *orig_ifp = 0; + struct dlil_filterq_entry *tmp; + int retval; + struct if_proto *ifproto = 0; + int match_found; + struct ifnet *ifp; + + + dlil_stats.inject_pr_in1++; + if (from_id > MAX_DLIL_FILTERS) + return ERANGE; + + if (dlil_filters[from_id].type != DLIL_PR_FILTER) + return ENOENT; + + ifproto = dlil_filters[from_id].proto; + ifp = dlil_filters[from_id].ifp; + + +/* + * Call any attached protocol filters. + */ + + match_found = 0; + TAILQ_FOREACH_REVERSE(tmp, &ifproto->pr_flt_head, que, dlil_filterq_head) { + if ((match_found) && (PFILT(tmp).filter_dl_input)) { + retval = (*PFILT(tmp).filter_dl_input)(PFILT(tmp).cookie, + &m, + &frame_header, + &ifp); + + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + } + + if (tmp->filter_id == from_id) + match_found = 1; + } + + + retval = (*ifproto->dl_input)(m, frame_header, + ifp, ifproto->dl_tag, + FALSE); + + if (retval == EJUSTRETURN) + retval = 0; + else + if (retval) + m_freem(m); + + dlil_stats.inject_pr_in2++; + return retval; +} + + + +int +dlil_inject_pr_output(struct mbuf *m, + struct sockaddr *dest, + int raw, + char *frame_type, + char *dst_linkaddr, + u_long from_id) +{ + struct ifnet *orig_ifp = 0; + struct ifnet *ifp; + struct dlil_filterq_entry *tmp; + int retval = 0; + char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4]; + char dst_linkaddr_buffer[MAX_LINKADDR * 4]; + struct dlil_filterq_head *fhead; + int match_found; + u_long dl_tag; + + + dlil_stats.inject_pr_out1++; + if (raw == 0) { + if (frame_type) + bcopy(frame_type, &frame_type_buffer[0], MAX_FRAME_TYPE_SIZE * 4); + else + return EINVAL; + + if (dst_linkaddr) + bcopy(dst_linkaddr, &dst_linkaddr_buffer, MAX_LINKADDR * 4); + else + return EINVAL; + } + + if (from_id > MAX_DLIL_FILTERS) + return ERANGE; + + if (dlil_filters[from_id].type != DLIL_PR_FILTER) + return ENOENT; + + ifp = dlil_filters[from_id].ifp; + dl_tag = dlil_filters[from_id].proto->dl_tag; + + + frame_type = frame_type_buffer; + dst_linkaddr = dst_linkaddr_buffer; + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + +/* + * Run any attached protocol filters. + */ + match_found = 0; + + if (TAILQ_EMPTY(dl_tag_array[dl_tag].pr_flt_head) == 0) { + TAILQ_FOREACH(tmp, dl_tag_array[dl_tag].pr_flt_head, que) { + if ((match_found) && (PFILT(tmp).filter_dl_output)) { + retval = (*PFILT(tmp).filter_dl_output)(PFILT(tmp).cookie, + &m, &ifp, &dest, dst_linkaddr, frame_type); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + } + + if (tmp->filter_id == from_id) + match_found = 1; + } + } + + +/* + * Call framing module + */ + if ((raw == 0) && (ifp->if_framer)) { + retval = (*ifp->if_framer)(ifp, &m, dest, dst_linkaddr, frame_type); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else + { + m_freem(m); + return retval; + } + } + } + + +#if BRIDGE + if (do_bridge) { + struct mbuf *m0 = m ; + + if (m->m_pkthdr.rcvif) + m->m_pkthdr.rcvif = NULL ; + ifp = bridge_dst_lookup(m); + bdg_forward(&m0, ifp); + if (m0) + m_freem(m0); + + return 0; + } +#endif + + +/* + * Let interface filters (if any) do their thing ... + */ + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + if (TAILQ_EMPTY(fhead) == 0) { + while (orig_ifp != ifp) { + orig_ifp = ifp; + TAILQ_FOREACH(tmp, fhead, que) { + if (IFILT(tmp).filter_if_output) { + retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie, + &ifp, + &m); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + + } + + if (ifp != orig_ifp) + break; + } + } + } + +/* + * Finally, call the driver. + */ + + retval = (*ifp->if_output)(ifp, m); + dlil_stats.inject_pr_out2++; + if ((retval == 0) || (retval == EJUSTRETURN)) + return 0; + else + return retval; +} + + +int +dlil_inject_if_output(struct mbuf *m, u_long from_id) +{ + struct ifnet *orig_ifp = 0; + struct ifnet *ifp; + struct dlil_filterq_entry *tmp; + int retval = 0; + struct dlil_filterq_head *fhead; + int match_found; + + + dlil_stats.inject_if_out1++; + if (from_id > MAX_DLIL_FILTERS) + return ERANGE; + + if (dlil_filters[from_id].type != DLIL_IF_FILTER) + return ENOENT; + + ifp = dlil_filters[from_id].ifp; + +/* + * Let interface filters (if any) do their thing ... + */ + + fhead = (struct dlil_filterq_head *) &ifp->if_flt_head; + match_found = 0; + + if (TAILQ_EMPTY(fhead) == 0) { + while (orig_ifp != ifp) { + orig_ifp = ifp; + TAILQ_FOREACH(tmp, fhead, que) { + if ((match_found) && (IFILT(tmp).filter_if_output)) { + retval = (*IFILT(tmp).filter_if_output)(IFILT(tmp).cookie, + &ifp, + &m); + if (retval) { + if (retval == EJUSTRETURN) + return 0; + else { + m_freem(m); + return retval; + } + } + + } + + if (ifp != orig_ifp) + break; + + if (from_id == tmp->filter_id) + match_found = 1; + } + } + } + +/* + * Finally, call the driver. + */ + + retval = (*ifp->if_output)(ifp, m); + dlil_stats.inject_if_out2++; + if ((retval == 0) || (retval == EJUSTRETURN)) + return 0; + else + return retval; +} diff --git a/bsd/net/dlil.h b/bsd/net/dlil.h new file mode 100644 index 000000000..63479e676 --- /dev/null +++ b/bsd/net/dlil.h @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. + * + * Data Link Inteface Layer + * Author: Ted Walker + */ + + +#ifndef DLIL_H +#define DLIL_H + +#if __STDC__ + +struct ifnet; +struct mbuf; +struct ether_header; + +#endif + + + +#define DLIL_LAST_FILTER -1 +#define DLIL_NULL_FILTER -2 + +#define DLIL_WAIT_FOR_FREE -2 + +#define DLIL_BLUEBOX 1 + + + +#include +#include +#include + +enum { + BPF_TAP_DISABLE, + BPF_TAP_INPUT, + BPF_TAP_OUTPUT, + BPF_TAP_INPUT_OUTPUT +}; + + +struct dl_tag_attr_str { + u_long dl_tag; + short if_flags; + short if_unit; + u_long if_family; + u_long protocol_family; +}; + + +struct dlil_pr_flt_str { + caddr_t cookie; + + int (*filter_dl_input)(caddr_t cookie, + struct mbuf **m, + char **frame_header, + struct ifnet **ifp); + + + int (*filter_dl_output)(caddr_t cookie, + struct mbuf **m, + struct ifnet **ifp, + struct sockaddr **dest, + char *dest_linkaddr, + char *frame_type); + + int (*filter_dl_event)(caddr_t cookie, + struct kern_event_msg *event_msg); + + int (*filter_dl_ioctl)(caddr_t cookie, + struct ifnet *ifp, + u_long ioctl_cmd, + caddr_t ioctl_arg); + + int (*filter_detach)(caddr_t cookie); +}; + +struct dlil_if_flt_str { + caddr_t cookie; + int (*filter_if_input)(caddr_t cookie, + struct ifnet **ifnet_ptr, + struct mbuf **mbuf_ptr, + char **frame_ptr); + + int (*filter_if_event)(caddr_t cookie, + struct ifnet **ifnet_ptr, + struct kern_event_msg **event_msg_ptr); + + int (*filter_if_output)(caddr_t cookie, + struct ifnet **ifnet_ptr, + struct mbuf **mbuf_ptr); + + + int (*filter_if_ioctl)(caddr_t cookie, + struct ifnet *ifnet_ptr, + u_long ioctl_code_ptr, + caddr_t ioctl_arg_ptr); + + int (*filter_if_free)(caddr_t cookie, + struct ifnet *ifnet_ptr); + + int (*filter_detach)(caddr_t cookie); +}; + + +#define DLIL_PR_FILTER 1 +#define DLIL_IF_FILTER 2 + + + +typedef int (*dl_input_func)(struct mbuf *m, char *frame_header, + struct ifnet *ifp, u_long dl_tag, int sync_ok); +typedef int (*dl_pre_output_func)(struct ifnet *ifp, + struct mbuf **m, + struct sockaddr *dest, + caddr_t route_entry, + char *frame_type, + char *dst_addr, + u_long dl_tag); + +typedef int (*dl_event_func)(struct kern_event_msg *event, + u_long dl_tag); + +typedef int (*dl_offer_func)(struct mbuf *m, char *frame_header); +typedef int (*dl_ioctl_func)(u_long dl_tag, + struct ifnet *ifp, + u_long ioctl_cmd, + caddr_t ioctl_arg); + + + +struct dlil_filterq_entry { + TAILQ_ENTRY(dlil_filterq_entry) que; + u_long filter_id; + int type; + union { + struct dlil_if_flt_str if_filter; + struct dlil_pr_flt_str pr_filter; + } variants; +}; + + +TAILQ_HEAD(dlil_filterq_head, dlil_filterq_entry); + + +struct if_proto { + TAILQ_ENTRY(if_proto) next; + u_long dl_tag; + struct dlil_filterq_head pr_flt_head; + struct ifnet *ifp; + dl_input_func dl_input; + dl_pre_output_func dl_pre_output; + dl_event_func dl_event; + dl_offer_func dl_offer; + dl_ioctl_func dl_ioctl; + u_long protocol_family; + +}; + +TAILQ_HEAD(dlil_proto_head, if_proto); + +struct dlil_tag_list_entry { + TAILQ_ENTRY(dlil_tag_list_entry) next; + struct ifnet *ifp; + u_long dl_tag; +}; + + +#define DLIL_DESC_RAW 1 +#define DLIL_DESC_802_2 2 +#define DLIL_DESC_802_2_SNAP 3 + +struct dlil_demux_desc { + TAILQ_ENTRY(dlil_demux_desc) next; + int type; + + u_char *native_type; + union { + struct { + u_long proto_id_length; /* IN LONGWORDS!!! */ + u_char *proto_id; + u_char *proto_id_mask; + + } bitmask; + + struct { + u_char dsap; + u_char ssap; + u_char control_code; + u_char pad; + } desc_802_2; + + struct { + u_char dsap; + u_char ssap; + u_char control_code; + u_char org[3]; + u_short protocol_type; + } desc_802_2_SNAP; + } variants; +}; + +TAILQ_HEAD(ddesc_head_str, dlil_demux_desc); + + +struct dlil_proto_reg_str { + struct ddesc_head_str demux_desc_head; + u_long interface_family; + u_long protocol_family; + short unit_number; + int default_proto; /* 0 or 1 */ + dl_input_func input; + dl_pre_output_func pre_output; + dl_event_func event; + dl_offer_func offer; + dl_ioctl_func ioctl; +}; + + +int dlil_attach_interface_filter(struct ifnet *ifnet_ptr, + struct dlil_if_flt_str *interface_filter, + u_long *filter_id, + int insertion_point); + +int +dlil_input(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail); + +int +dlil_output(u_long dl_tag, + struct mbuf *m, + caddr_t route, + struct sockaddr *dest, + int raw); + + +int +dlil_ioctl(u_long proto_family, + struct ifnet *ifp, + u_long ioctl_code, + caddr_t ioctl_arg); + +int +dlil_attach_protocol(struct dlil_proto_reg_str *proto, + u_long *dl_tag); + +int +dlil_detach_protocol(u_long dl_tag); + +int +dlil_if_attach(struct ifnet *ifp); + +int +dlil_attach_protocol_filter(u_long dl_tag, + struct dlil_pr_flt_str *proto_filter, + u_long *filter_id, + int insertion_point); +int +dlil_detach_filter(u_long filter_id); + +struct dlil_ifmod_reg_str { + int (*add_if)(struct ifnet *ifp); + int (*del_if)(struct ifnet *ifp); + int (*add_proto)(struct ddesc_head_str *demux_desc_head, + struct if_proto *proto, u_long dl_tag); + int (*del_proto)(struct if_proto *proto, u_long dl_tag); + int (*ifmod_ioctl)(struct ifnet *ifp, u_long ioctl_cmd, caddr_t data); + int (*shutdown)(); +}; + + +int dlil_reg_if_modules(u_long interface_family, + struct dlil_ifmod_reg_str *ifmod_reg); + +int +dlil_inject_if_input(struct mbuf *m, char *frame_header, u_long from_id); + +int +dlil_inject_pr_input(struct mbuf *m, char *frame_header, u_long from_id); + +int +dlil_inject_pr_output(struct mbuf *m, + struct sockaddr *dest, + int raw, + char *frame_type, + char *dst_linkaddr, + u_long from_id); + +int +dlil_inject_if_output(struct mbuf *m, u_long from_id); + +int +dlil_find_dltag(u_long if_family, short unit, u_long proto_family, u_long *dl_tag); + + +int +dlil_event(struct ifnet *ifp, struct kern_event_msg *event); + +int dlil_dereg_if_modules(u_long interface_family); + +int +dlil_if_detach(struct ifnet *ifp); + +#endif /* DLIL_H */ diff --git a/bsd/net/dlil_ethersubr.c b/bsd/net/dlil_ethersubr.c new file mode 100644 index 000000000..3d85f62f1 --- /dev/null +++ b/bsd/net/dlil_ethersubr.c @@ -0,0 +1,1227 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#if INET +#include +#include +#include +#include +#include +#endif +#if INET6 +#include +#include +#endif + + +#include +#include + +#include + + +#if LLC && CCITT +extern struct ifqueue pkintrq; +#endif + +/* General stuff from if_ethersubr.c - may not need some of it */ + +#include +#if NETAT +extern struct ifqueue atalkintrq; +#endif + + +#if BRIDGE +#include +#endif + +/* #include "vlan.h" */ +#if NVLAN > 0 +#include +#endif /* NVLAN > 0 */ + + +extern struct ifnet_blue *blue_if; +extern struct mbuf *splitter_input(struct mbuf *, struct ifnet *); + +static u_long lo_dlt = 0; +static ivedonethis = 0; +static int ether_resolvemulti __P((struct ifnet *, struct sockaddr **, + struct sockaddr *)); +u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +#define IFP2AC(IFP) ((struct arpcom *)IFP) + +/* This stuff is new */ + +#define DB_HEADER_SIZE 20 +struct en_desc { + short total_len; + u_short ethertype; + u_long dl_tag; + struct ifnet *ifp; + struct if_proto *proto; + u_long proto_id_length; + u_long proto_id_data[8]; /* probably less - proto-id and bitmasks */ +}; + +#define LITMUS_SIZE 16 +#define ETHER_DESC_BLK_SIZE 50 +#define MAX_INTERFACES 50 + +/* + * Statics for demux module + */ + +struct ether_desc_blk_str { + u_long n_blocks; + u_long *block_ptr; +}; + +struct dl_es_at_entry +{ + struct ifnet *ifp; + u_long dl_tag; + int ref_count; +}; + + +static struct ether_desc_blk_str ether_desc_blk[MAX_INTERFACES]; +static u_long litmus_mask[LITMUS_SIZE]; +static u_long litmus_length = 0; + + +/* + * Temp static for protocol registration XXX + */ + +#define MAX_EN_COUNT 30 + +static struct dl_es_at_entry en_at_array[MAX_EN_COUNT]; + +/* + * This could be done below in-line with heavy casting, but the pointer arithmetic is + * prone to error. + */ + +static +int desc_in_bounds(block, current_ptr, offset_length) + u_int block; + char *current_ptr; + u_long offset_length; +{ + u_long end_of_block; + u_long current_ptr_tmp; + + current_ptr_tmp = (u_long) current_ptr; + end_of_block = (u_long) ether_desc_blk[block].block_ptr; + end_of_block += (ETHER_DESC_BLK_SIZE * ether_desc_blk[block].n_blocks); + if ((current_ptr_tmp + offset_length) < end_of_block) + return 1; + else + return 0; +} + + +/* + * Release all descriptor entries owned by this dl_tag (there may be several). + * Setting the dl_tag to 0 releases the entry. Eventually we should compact-out + * the unused entries. + */ +static +int ether_del_proto(struct if_proto *proto, u_long dl_tag) +{ + char *current_ptr = (char *) ether_desc_blk[proto->ifp->family_cookie].block_ptr; + struct en_desc *ed; + int i; + int found = 0; + + ed = (struct en_desc *) current_ptr; + + while(ed->total_len) { + if (ed->dl_tag == dl_tag) { + found = 1; + ed->dl_tag = 0; + } + + current_ptr += ed->total_len; + ed = (struct en_desc *) current_ptr; + } + } + + + +static +int ether_add_proto(struct ddesc_head_str *desc_head, struct if_proto *proto, u_long dl_tag) +{ + char *current_ptr; + struct dlil_demux_desc *desc; + u_long id_length; /* IN LONGWORDS!!! */ + struct en_desc *ed; + u_long *bitmask; + u_long *proto_id; + int i; + short total_length; + u_long block_count; + u_long *tmp; + + + TAILQ_FOREACH(desc, desc_head, next) { + switch (desc->type) + { + case DLIL_DESC_RAW: + id_length = desc->variants.bitmask.proto_id_length; + break; + + case DLIL_DESC_802_2: + id_length = 1; + break; + + case DLIL_DESC_802_2_SNAP: + id_length = 2; + break; + + default: + return EINVAL; + } + +restart: + block_count = ether_desc_blk[proto->ifp->family_cookie].n_blocks; + current_ptr = (char *) ether_desc_blk[proto->ifp->family_cookie].block_ptr; + ed = (struct en_desc *) current_ptr; + total_length = ((id_length << 2) * 2) + DB_HEADER_SIZE; + + while ((ed->total_len) && (desc_in_bounds(proto->ifp->family_cookie, + current_ptr, total_length))) { + if ((ed->dl_tag == 0) && (total_length <= ed->total_len)) + break; + else + current_ptr += *(short *)current_ptr; + + ed = (struct en_desc *) current_ptr; + } + + if (!desc_in_bounds(proto->ifp->family_cookie, current_ptr, total_length)) { + + tmp = _MALLOC((ETHER_DESC_BLK_SIZE * (block_count + 1)), + M_IFADDR, M_NOWAIT); + if (tmp == 0) { + /* + * Remove any previous descriptors set in the call. + */ + ether_del_proto(proto, dl_tag); + return ENOMEM; + } + + bzero(tmp, ETHER_DESC_BLK_SIZE * (block_count + 1)); + bcopy(ether_desc_blk[proto->ifp->family_cookie].block_ptr, + tmp, (ETHER_DESC_BLK_SIZE * block_count)); + FREE(ether_desc_blk[proto->ifp->family_cookie].block_ptr, M_IFADDR); + ether_desc_blk[proto->ifp->family_cookie].n_blocks = block_count + 1; + ether_desc_blk[proto->ifp->family_cookie].block_ptr = tmp; + goto restart; + } + + if (ed->total_len == 0) + ed->total_len = total_length; + ed->ethertype = *((u_short *) desc->native_type); + + ed->dl_tag = dl_tag; + ed->proto = proto; + ed->proto_id_length = id_length; + ed->ifp = proto->ifp; + + switch (desc->type) + { + case DLIL_DESC_RAW: + bcopy(desc->variants.bitmask.proto_id, &ed->proto_id_data[0], (id_length << 2) ); + bcopy(desc->variants.bitmask.proto_id_mask, &ed->proto_id_data[id_length], + (id_length << 2)); + break; + + case DLIL_DESC_802_2: + ed->proto_id_data[0] = 0; + bcopy(&desc->variants.desc_802_2, &ed->proto_id_data[0], 3); + ed->proto_id_data[1] = 0xffffff00; + break; + + case DLIL_DESC_802_2_SNAP: + /* XXX Add verification of fixed values here */ + + ed->proto_id_data[0] = 0; + ed->proto_id_data[1] = 0; + bcopy(&desc->variants.desc_802_2_SNAP, &ed->proto_id_data[0], 8); + ed->proto_id_data[2] = 0xffffffff; + ed->proto_id_data[3] = 0xffffffff;; + break; + } + + if (id_length) { + proto_id = (u_long *) &ed->proto_id_data[0]; + bitmask = (u_long *) &ed->proto_id_data[id_length]; + for (i=0; i < (id_length); i++) { + litmus_mask[i] &= bitmask[i]; + litmus_mask[i] &= proto_id[i]; + } + if (id_length > litmus_length) + litmus_length = id_length; + } + } + + return 0; +} + + +static +int ether_shutdown() +{ + return 0; +} + + + + +/* + * Process a received Ethernet packet; + * the packet is in the mbuf chain m without + * the ether header, which is provided separately. + */ +int +new_ether_input(m, frame_header, ifp, dl_tag, sync_ok) + struct mbuf *m; + char *frame_header; + struct ifnet *ifp; + u_long dl_tag; + int sync_ok; + +{ + register struct ether_header *eh = (struct ether_header *) frame_header; + register struct ifqueue *inq=0; + u_short ether_type; + int s; + u_int16_t ptype = -1; + unsigned char buf[18]; + +#if ISO || LLC || NETAT + register struct llc *l; +#endif + + +#if DLIL_BLUEBOX + + /* + * Y-adapter input processing: + * - Don't split if coming from a dummy if + * - If coming from a real if, if splitting enabled, + * then filter the incoming packet + */ + if (ifp != (struct ifnet *)blue_if) + { /* Is splitter turned on? */ + if (ifp->if_flags&IFF_SPLITTER) + { m->m_data -= sizeof(struct ether_header); + m->m_len += sizeof (struct ether_header); + m->m_pkthdr.len += sizeof(struct ether_header); + /* + * Check to see if destined for BlueBox or Rhapsody + * If NULL return, mbuf's been consumed by the BlueBox. + * Otherwise, send on to Rhapsody + */ + if ((m = splitter_input(m, ifp)) == NULL) + return EJUSTRETURN; + m->m_data += sizeof(struct ether_header); + m->m_len -= sizeof (struct ether_header); + m->m_pkthdr.len -= sizeof(struct ether_header); + } + } else + { /* Get the "real" IF */ + ifp = ((struct ndrv_cb *)(blue_if->ifb_so->so_pcb))->nd_if; + m->m_pkthdr.rcvif = ifp; + blue_if->pkts_looped_b2r++; + } + +#endif + if ((ifp->if_flags & IFF_UP) == 0) { + m_freem(m); + return EJUSTRETURN; + } + + ifp->if_lastchange = time; + + if (eh->ether_dhost[0] & 1) { + if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost, + sizeof(etherbroadcastaddr)) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + } + if (m->m_flags & (M_BCAST|M_MCAST)) + ifp->if_imcasts++; + + ether_type = ntohs(eh->ether_type); + +#if NVLAN > 0 + if (ether_type == vlan_proto) { + if (vlan_input(eh, m) < 0) + ifp->if_data.ifi_noproto++; + return EJUSTRETURN; + } +#endif /* NVLAN > 0 */ + + switch (ether_type) { +#if INET + case ETHERTYPE_IP: + if (ipflow_fastforward(m)) + return EJUSTRETURN; + ptype = mtod(m, struct ip *)->ip_p; + if ((sync_ok == 0) || + (ptype != IPPROTO_TCP && ptype != IPPROTO_UDP)) { + schednetisr(NETISR_IP); + } + + inq = &ipintrq; + break; + + case ETHERTYPE_ARP: + schednetisr(NETISR_ARP); + inq = &arpintrq; + break; +#endif +#if INET6 + case ETHERTYPE_IPV6: + schednetisr(NETISR_IPV6); + inq = &ip6intrq; + break; +#endif + + + default: { +#if NETAT + if (ether_type > ETHERMTU) + return ENOENT; + l = mtod(m, struct llc *); + switch (l->llc_dsap) { + case LLC_SNAP_LSAP: + + /* Temporary hack: check for AppleTalk and AARP packets */ + /* WARNING we're checking only on the "ether_type" (the 2 bytes + * of the SNAP header. This shouldn't be a big deal, + * AppleTalk pat_input is making sure we have the right packets + * because it needs to discrimante AARP from EtherTalk packets. + */ + + if (l->llc_ssap == LLC_SNAP_LSAP && + l->llc_un.type_snap.control == 0x03) { + +#ifdef APPLETALK_DEBUG + printf("new_ether_input: SNAP Cntrol type=0x%x Src=%s\n", + l->llc_un.type_snap.ether_type, + ether_sprintf(buf, &eh->ether_shost)); + printf(" Dst=%s\n", + ether_sprintf(buf, &eh->ether_dhost)); +#endif /* APPLETALK_DEBUG */ + + if ((l->llc_un.type_snap.ether_type == 0x809B) || + (l->llc_un.type_snap.ether_type == 0x80F3)) { + + + /* + * note: for AppleTalk we need to pass the enet header of the + * packet up stack. To do so, we made sure in that the FULL packet + * is copied in the mbuf by the mace driver, and only the m_data and + * length have been shifted to make IP and the other guys happy. + */ + + m->m_data -= sizeof(*eh); + m->m_len += sizeof(*eh); + m->m_pkthdr.len += sizeof(*eh); +#ifdef APPLETALK_DEBUG + l == (struct llc *)(eh+1); + if (l->llc_un.type_snap.ether_type == 0x80F3) { + kprintf("new_ether_input: RCV AppleTalk type=0x%x Src=%s\n", + l->llc_un.type_snap.ether_type, + ether_sprintf(buf, &eh->ether_shost)); + kprintf(" Dst=%s\n", + ether_sprintf(buf, &eh->ether_dhost)); + } +#endif /* APPLETALK_DEBUG */ + schednetisr(NETISR_APPLETALK); + inq = &atalkintrq ; + + break; + } + } + + break; + + + default: + return ENOENT; + } + +#else /*NETAT*/ + return ENOENT; +#endif /* NETAT */ + + } + } + + if (inq == 0) + return ENOENT; + + s = splimp(); + if (IF_QFULL(inq)) { + IF_DROP(inq); + m_freem(m); + splx(s); + return EJUSTRETURN; + } else + IF_ENQUEUE(inq, m); + splx(s); + + if ((sync_ok) && + (ptype == IPPROTO_TCP || ptype == IPPROTO_UDP)) { + extern void ipintr(void); + + s = splnet(); + ipintr(); + splx(s); + } + + return 0; +} + + + + +int ether_demux(ifp, m, frame_header, proto) + struct ifnet *ifp; + struct mbuf *m; + char *frame_header; + struct if_proto **proto; + +{ + register struct ether_header *eh = (struct ether_header *)frame_header; + u_short ether_type; + char *current_ptr = (char *) ether_desc_blk[ifp->family_cookie].block_ptr; + struct dlil_demux_desc *desc; + register u_long temp; + u_long *data; + register struct if_proto *ifproto; + u_long i; + struct en_desc *ed; + + + if (eh->ether_dhost[0] & 1) { + if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost, + sizeof(etherbroadcastaddr)) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + } + + ether_type = ntohs(eh->ether_type); + + /* + * Search through the connected protocols for a match. + */ + + + data = mtod(m, u_long *); + ed = (struct en_desc *) current_ptr; + while (desc_in_bounds(ifp->family_cookie, current_ptr, DB_HEADER_SIZE)) { + if (ed->total_len == 0) + break; + + if ((ed->dl_tag != 0) && (ed->ifp == ifp) && + ((ed->ethertype == ntohs(eh->ether_type)) || (ed->ethertype == 0))) { + if (ed->proto_id_length) { + for (i=0; i < (ed->proto_id_length); i++) { + temp = ntohs(data[i]) & ed->proto_id_data[ed->proto_id_length + i]; + if ((temp ^ ed->proto_id_data[i])) + break; + } + + if (i >= (ed->proto_id_length)) { + *proto = ed->proto; + return 0; + } + } + else { + *proto = ed->proto; + return 0; + } + } + current_ptr += ed->total_len; + ed = (struct en_desc *) current_ptr; + } + +/* + kprintf("ether_demux - No match for <%x><%x><%x><%x><%x><%x><%x<%x>\n", + eh->ether_type,data[0], data[1], data[2], data[3], data[4],data[5],data[6]); +*/ + + return ENOENT; +} + + + +/* + * Ethernet output routine. + * Encapsulate a packet of type family for the local net. + * Use trailer local net encapsulation if enough data in first + * packet leaves a multiple of 512 bytes of data in remainder. + * Assumes that ifp is actually pointer to arpcom structure. + */ +int +ether_frameout(ifp, m, ndest, edst, ether_type) + register struct ifnet *ifp; + struct mbuf **m; + struct sockaddr *ndest; + char *edst; + char *ether_type; +{ + register struct ether_header *eh; + int hlen; /* link layer header lenght */ + struct arpcom *ac = IFP2AC(ifp); + + + hlen = ETHER_HDR_LEN; + + /* + * If a simplex interface, and the packet is being sent to our + * Ethernet address or a broadcast address, loopback a copy. + * XXX To make a simplex device behave exactly like a duplex + * device, we should copy in the case of sending to our own + * ethernet address (thus letting the original actually appear + * on the wire). However, we don't do that here for security + * reasons and compatibility with the original behavior. + */ + if ((ifp->if_flags & IFF_SIMPLEX) && + ((*m)->m_flags & M_LOOP)) { + if (lo_dlt == 0) + dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET, &lo_dlt); + + if (lo_dlt) { + if ((*m)->m_flags & M_BCAST) { + struct mbuf *n = m_copy(*m, 0, (int)M_COPYALL); + dlil_output(lo_dlt, n, 0, ndest, 0); + } + else + { + if (bcmp(edst, ac->ac_enaddr, ETHER_ADDR_LEN) == 0) { + dlil_output(lo_dlt, *m, 0, ndest, 0); + return EJUSTRETURN; + } + } + } + } + + + /* + * Add local net header. If no space in first mbuf, + * allocate another. + */ + M_PREPEND(*m, sizeof (struct ether_header), M_DONTWAIT); + if (*m == 0) { + return (EJUSTRETURN); + } + + + eh = mtod(*m, struct ether_header *); + (void)memcpy(&eh->ether_type, ether_type, + sizeof(eh->ether_type)); + (void)memcpy(eh->ether_dhost, edst, 6); + (void)memcpy(eh->ether_shost, ac->ac_enaddr, + sizeof(eh->ether_shost)); + +#if DLIL_BLUEBOX + /* + * We're already to send. Let's check for the blue box... + */ + if (ifp->if_flags&IFF_SPLITTER) + { + (*m)->m_flags |= 0x10; + if ((*m = splitter_input(*m, ifp)) == NULL) + return EJUSTRETURN; + else + return (0); + } + else +#endif + return 0; +} + + +static +int ether_add_if(struct ifnet *ifp) +{ + u_long i; + + ifp->if_framer = ether_frameout; + ifp->if_demux = ether_demux; + + for (i=0; i < MAX_INTERFACES; i++) + if (ether_desc_blk[i].n_blocks == 0) + break; + + if (i == MAX_INTERFACES) + return EOVERFLOW; + + ether_desc_blk[i].block_ptr = _MALLOC(ETHER_DESC_BLK_SIZE, M_IFADDR, M_NOWAIT); + if (ether_desc_blk[i].block_ptr == 0) + return ENOMEM; + + ether_desc_blk[i].n_blocks = 1; + bzero(ether_desc_blk[i].block_ptr, ETHER_DESC_BLK_SIZE); + + ifp->family_cookie = i; + + return 0; +} + +static +int ether_del_if(struct ifnet *ifp) +{ + if ((ifp->family_cookie < MAX_INTERFACES) && + (ether_desc_blk[ifp->family_cookie].n_blocks)) { + FREE(ether_desc_blk[ifp->family_cookie].block_ptr, M_IFADDR); + ether_desc_blk[ifp->family_cookie].n_blocks = 0; + return 0; + } + else + return ENOENT; +} + + + + +int +ether_pre_output(ifp, m0, dst_netaddr, route, type, edst, dl_tag ) + struct ifnet *ifp; + struct mbuf **m0; + struct sockaddr *dst_netaddr; + caddr_t route; + char *type; + char *edst; + u_long dl_tag; +{ + struct rtentry *rt0 = (struct rtentry *) route; + int s; + register struct mbuf *m = *m0; + register struct rtentry *rt; + register struct ether_header *eh; + int off, len = m->m_pkthdr.len; + int hlen; /* link layer header lenght */ + struct arpcom *ac = IFP2AC(ifp); + + + + if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + return ENETDOWN; + + rt = rt0; + if (rt) { + if ((rt->rt_flags & RTF_UP) == 0) { + rt0 = rt = rtalloc1(dst_netaddr, 1, 0UL); + if (rt0) + rt->rt_refcnt--; + else + return EHOSTUNREACH; + } + + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_gwroute == 0) + goto lookup; + if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { + rtfree(rt); rt = rt0; + lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1, + 0UL); + if ((rt = rt->rt_gwroute) == 0) + return (EHOSTUNREACH); + } + } + + + if (rt->rt_flags & RTF_REJECT) + if (rt->rt_rmx.rmx_expire == 0 || + time_second < rt->rt_rmx.rmx_expire) + return (rt == rt0 ? EHOSTDOWN : EHOSTUNREACH); + } + + hlen = ETHER_HDR_LEN; + + /* + * Tell ether_frameout it's ok to loop packet unless negated below. + */ + m->m_flags |= M_LOOP; + + switch (dst_netaddr->sa_family) { + +#if INET + case AF_INET: + if (!arpresolve(ac, rt, m, dst_netaddr, edst, rt0)) + return (EJUSTRETURN); /* if not yet resolved */ + off = m->m_pkthdr.len - m->m_len; + *(u_short *)type = htons(ETHERTYPE_IP); + break; +#endif + +#if INET6 + case AF_INET6: + if (!nd6_storelladdr(&ac->ac_if, rt, m, dst_netaddr, (u_char *)edst)) { + /* this must be impossible, so we bark */ + kprintf("nd6_storelladdr failed\n"); + return(0); + } + off = m->m_pkthdr.len - m->m_len; + *(u_short *)type = htons(ETHERTYPE_IPV6); + break; +#endif + + + case AF_UNSPEC: + m->m_flags &= ~M_LOOP; + eh = (struct ether_header *)dst_netaddr->sa_data; + (void)memcpy(edst, eh->ether_dhost, 6); + *(u_short *)type = eh->ether_type; + break; + +#if NETAT + case AF_APPLETALK: + { + eh = (struct ether_header *)dst_netaddr->sa_data; + bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, 6); + + *(u_short *)type = m->m_pkthdr.len; + } + break; + +#endif /* NETAT */ + + default: + kprintf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit, + dst_netaddr->sa_family); + + return EAFNOSUPPORT; + } + + return (0); +} + + + + + +int +ether_ioctl(dl_tag, ifp, command, data) + u_long dl_tag; + struct ifnet *ifp; + int command; + caddr_t data; +{ + struct ifaddr *ifa = (struct ifaddr *) data; + struct ifreq *ifr = (struct ifreq *) data; + int error = 0; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(TRUE); + + switch (command) { + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP; + + switch (ifa->ifa_addr->sa_family) { + + case AF_INET: + + if (ifp->if_init) + ifp->if_init(ifp->if_softc); /* before arpwhohas */ + + + arp_ifinit(IFP2AC(ifp), ifa); + + break; + + default: + break; + } + + break; + + case SIOCGIFADDR: + { + struct sockaddr *sa; + + sa = (struct sockaddr *) & ifr->ifr_data; + bcopy(IFP2AC(ifp)->ac_enaddr, + (caddr_t) sa->sa_data, ETHER_ADDR_LEN); + } + break; + + case SIOCSIFMTU: + /* + * Set the interface MTU. + */ + if (ifr->ifr_mtu > ETHERMTU) { + error = EINVAL; + } else { + ifp->if_mtu = ifr->ifr_mtu; + } + break; + } + + (void) thread_funnel_set(funnel_state); + + return (error); +} + + + + +/* + * Y-adapter filter check + * The rules here: + * For Rhap: return 1 + * For Both: return 0 + * Not for Rhap: return -1 + * Multicast/Broadcast => For Both + * Atalk address registered + * filter matches => For Rhap else Not For Rhap + * IP address registered + * filter matches => For Rhap else Not For Rhap + * For Rhap + * Note this is *not* a general filter mechanism in that we know + * what we *could* be looking for. + * WARNING: this is a big-endian routine. + * Note: ARP and AARP packets are implicitly accepted for "both" + */ +int +Filter_check(struct mbuf **m0) +{ register struct BlueFilter *bf; + register unsigned char *p; + register unsigned short *s; + register unsigned long *l; + int total, flags; + struct mbuf *m; + extern struct mbuf *m_pullup(struct mbuf *, int); + extern void kprintf( const char *, ...); +#define FILTER_LEN 32 + + m = *m0; + flags = m->m_flags; + if (FILTER_LEN > m->m_pkthdr.len) + return(1); + while ((FILTER_LEN > m->m_len) && m->m_next) { + total = m->m_len + (m->m_next)->m_len; + if ((m = m_pullup(m, min(FILTER_LEN, total))) == 0) + return(-1); + } + *m0 = m; + + p = mtod(m, unsigned char *); /* Point to destination media addr */ + if (p[0] & 0x01) /* Multicast/broadcast */ + return(0); + s = (unsigned short *)p; + bf = &RhapFilter[BFS_ATALK]; +#if 0 + kprintf("!PKT: %x, %x, %x\n", s[6], s[7], s[8]); +#endif + + if (bf->BF_flags) /* Filtering Appletalk */ + { + l = (unsigned long *)&s[8]; +#if 0 + kprintf("!AT: %x, %x, %x, %x, %x, %x\n", s[6], s[7], + *l, s[10], s[13], p[30]); +#endif + if (s[6] <= ETHERMTU) + { if (s[7] == 0xaaaa) /* Could be Atalk */ + { /* Verify SNAP header */ + if (*l == 0x03080007 && s[10] == 0x809b) + { if (s[13] == bf->BF_address && + p[30] == bf->BF_node) + return(1); + } else if (*l == 0x03000000 && s[10] == 0x80f3) + /* AARP pkts aren't net-addressed */ + return(0); + return(0); + } else /* Not for us? */ + return(0); + } /* Fall through */ + } /* Fall through */ + bf++; /* Look for IP next */ + if (bf->BF_flags) /* Filtering IP */ + { + l = (unsigned long *)&s[15]; +#if 0 + kprintf("!IP: %x, %x\n", s[6], *l); +#endif + if (s[6] > ETHERMTU) + { if (s[6] == 0x800) /* Is IP */ + { /* Verify IP address */ + if (*l == bf->BF_address) + return(1); + else /* Not for us */ + return(0); + } else if (s[6] == 0x806) + /* ARP pkts aren't net-addressed */ + return(0); + } + } + return(0); /* No filters => Accept */ +} + + + +int ether_family_init() +{ + + int i; + + if (ivedonethis) + return 0; + + ivedonethis = 1; + + + if (dlil_reg_if_modules(APPLE_IF_FAM_ETHERNET, ether_add_if, ether_del_if, + ether_add_proto, ether_del_proto, + ether_shutdown)) { + printf("WARNING: ether_family_init -- Can't register if family modules\n"); + return EIO; + } + + for (i=0; i < (LITMUS_SIZE/4); i++) + litmus_mask[i] = 0xffffffff; + + for (i=0; i < MAX_INTERFACES; i++) + ether_desc_blk[i].n_blocks = 0; + + for (i=0; i < MAX_EN_COUNT; i++) + en_at_array[i].ifp = 0; + + return 0; +} + + + +u_long ether_attach_inet(struct ifnet *ifp) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + struct dlil_demux_desc desc2; +#if INET6 + struct dlil_demux_desc desc3; +#endif + u_long ip_dl_tag=0; + u_short en_native=ETHERTYPE_IP; + u_short arp_native=ETHERTYPE_ARP; +#if INET6 + u_short en_6native=ETHERTYPE_IPV6; +#endif + int stat; + int i; + + + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET, &ip_dl_tag); + if (stat == 0) + return ip_dl_tag; + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_RAW; + desc.variants.bitmask.proto_id_length = 0; + desc.variants.bitmask.proto_id = 0; + desc.variants.bitmask.proto_id_mask = 0; + desc.native_type = (char *) &en_native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = new_ether_input; + reg.pre_output = ether_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = ether_ioctl; + reg.default_proto = 1; + reg.protocol_family = PF_INET; + + desc2 = desc; + desc2.native_type = (char *) &arp_native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc2, next); + +#if INET6 + desc3 = desc; + desc3.native_type = (char *) &en_6native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc3, next); +#endif + + stat = dlil_attach_protocol(®, &ip_dl_tag); + if (stat) { + printf("WARNING: ether_attach_inet can't attach ip to interface\n"); + return stat; + } + + return ip_dl_tag; +} + +void ether_attach_at(struct ifnet *ifp, u_long *at_dl_tag, u_long *aarp_dl_tag) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + struct dlil_demux_desc desc2; + u_short native = 0; /* 802.2 frames use a length here */ + int stat; + int first_empty; + int i; + + + first_empty = MAX_EN_COUNT; + for (i=0; i < MAX_EN_COUNT; i++) { + if (en_at_array[i].ifp == 0) + first_empty = i; + + if (en_at_array[i].ifp == ifp) { + en_at_array[i].ref_count++; + *at_dl_tag = *aarp_dl_tag = en_at_array[i].dl_tag; + return; + } + } + + if (first_empty == MAX_EN_COUNT) + return; + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_802_2_SNAP; + desc.variants.desc_802_2_SNAP.dsap = LLC_SNAP_LSAP; + desc.variants.desc_802_2_SNAP.ssap = LLC_SNAP_LSAP; + desc.variants.desc_802_2_SNAP.control_code = 0x03; + desc.variants.desc_802_2_SNAP.org[0] = 0x08; + desc.variants.desc_802_2_SNAP.org[1] = 0x00; + desc.variants.desc_802_2_SNAP.org[2] = 0x07; + desc.variants.desc_802_2_SNAP.protocol_type = 0x809B; + desc.native_type = (char *) &native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = new_ether_input; + reg.pre_output = ether_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = ether_ioctl; + reg.default_proto = 0; + reg.protocol_family = PF_APPLETALK; + + desc2 = desc; + desc2.variants.desc_802_2_SNAP.protocol_type = 0x80F3; + desc2.variants.desc_802_2_SNAP.org[0] = 0; + desc2.variants.desc_802_2_SNAP.org[1] = 0; + desc2.variants.desc_802_2_SNAP.org[2] = 0; + + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc2, next); + + stat = dlil_attach_protocol(®, at_dl_tag); + if (stat) { + printf("WARNING: ether_attach_at can't attach at to interface\n"); + return; + } + + *aarp_dl_tag = *at_dl_tag; + + en_at_array[first_empty].ifp = ifp; + en_at_array[first_empty].dl_tag = *at_dl_tag; + en_at_array[first_empty].ref_count = 1; + +} /* ether_attach_at */ + + +void ether_detach_at(struct ifnet *ifp) +{ + int i; + + for (i=0; i < MAX_EN_COUNT; i++) { + if (en_at_array[i].ifp == ifp) + break; + } + + if (i < MAX_EN_COUNT) { + if (en_at_array[i].ref_count > 1) + en_at_array[i].ref_count--; + else { + if (en_at_array[i].ref_count == 1) { + dlil_detach_protocol(en_at_array[i].dl_tag); + en_at_array[i].ifp = 0; + } + } + } +} diff --git a/bsd/net/dlil_pvt.h b/bsd/net/dlil_pvt.h new file mode 100644 index 000000000..273614cf8 --- /dev/null +++ b/bsd/net/dlil_pvt.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef DLIL_PVT_H +#define DLIL_PVT_H + +#include +#include + +struct dlil_if_filterq_entry { + TAILQ_ENTRY(dlil_if_filterq_entry) que; + struct dlil_interface_filter_str if_filter; +}; + + +struct dlil_family_mod_str { + TAILQ_ENTRY(dlil_family_mod_str) dl_fam_next; + char *interface_family; + int (*add_if)(struct ifnet_ptr *ifp); + int (*del_if)(struct ifnet *ifp); + int (*add_proto)(TAILQ_HEAD(ddesc_head_name, dlil_demux_desc) demux_desc_head, + struct if_proto *proto); + int (*del_proto)(struct if_proto *proto); +} + + + +#endif diff --git a/bsd/net/ether_at_pr_module.c b/bsd/net/ether_at_pr_module.c new file mode 100644 index 000000000..8d2785094 --- /dev/null +++ b/bsd/net/ether_at_pr_module.c @@ -0,0 +1,469 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#if NETAT +extern struct ifqueue atalkintrq; +#endif + + +#if BRIDGE +#include +#endif + +/* #include "vlan.h" */ +#if NVLAN > 0 +#include +#endif /* NVLAN > 0 */ + +static +u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +#define IFP2AC(IFP) ((struct arpcom *)IFP) + + +struct dl_es_at_entry +{ + struct ifnet *ifp; + u_long dl_tag; + int ref_count; +}; + + +/* + * Temp static for protocol registration XXX + */ + +#define MAX_EN_COUNT 30 + +static struct dl_es_at_entry en_at_array[MAX_EN_COUNT]; + +/* + * Process a received Ethernet packet; + * the packet is in the mbuf chain m without + * the ether header, which is provided separately. + */ +int +at_ether_input(m, frame_header, ifp, dl_tag, sync_ok) + struct mbuf *m; + char *frame_header; + struct ifnet *ifp; + u_long dl_tag; + int sync_ok; + +{ + register struct ether_header *eh = (struct ether_header *) frame_header; + register struct ifqueue *inq=0; + u_short ether_type; + int s; + u_int16_t ptype = -1; + unsigned char buf[18]; + +#if NETAT + register struct llc *l; +#endif + + if ((ifp->if_flags & IFF_UP) == 0) { + m_freem(m); + return EJUSTRETURN; + } + + ifp->if_lastchange = time; + + if (eh->ether_dhost[0] & 1) { + if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost, + sizeof(etherbroadcastaddr)) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + } + if (m->m_flags & (M_BCAST|M_MCAST)) + ifp->if_imcasts++; + + ether_type = ntohs(eh->ether_type); + +#if NVLAN > 0 + if (ether_type == vlan_proto) { + if (vlan_input(eh, m) < 0) + ifp->if_data.ifi_noproto++; + return EJUSTRETURN; + } +#endif /* NVLAN > 0 */ + + if (ether_type > ETHERMTU) + return ENOENT; + +#if NETAT + l = mtod(m, struct llc *); + + switch (l->llc_dsap) { + case LLC_SNAP_LSAP: + + /* Temporary hack: check for AppleTalk and AARP packets */ + /* WARNING we're checking only on the "ether_type" (the 2 bytes + * of the SNAP header. This shouldn't be a big deal, + * AppleTalk pat_input is making sure we have the right packets + * because it needs to discrimante AARP from EtherTalk packets. + */ + + if (l->llc_ssap == LLC_SNAP_LSAP && + l->llc_un.type_snap.control == 0x03) { + +#ifdef APPLETALK_DEBUG + printf("new_ether_input: SNAP Cntrol type=0x%x Src=%s\n", + l->llc_un.type_snap.ether_type, + ether_sprintf(buf, &eh->ether_shost)); + printf(" Dst=%s\n", + ether_sprintf(buf, &eh->ether_dhost)); +#endif /* APPLETALK_DEBUG */ + + if ((l->llc_un.type_snap.ether_type == 0x809B) || + (l->llc_un.type_snap.ether_type == 0x80F3)) { + + + /* + * note: for AppleTalk we need to pass the enet header of the + * packet up stack. To do so, we made sure in that the FULL packet + * is copied in the mbuf by the mace driver, and only the m_data and + * length have been shifted to make IP and the other guys happy. + */ + + m->m_data -= sizeof(*eh); + m->m_len += sizeof(*eh); + m->m_pkthdr.len += sizeof(*eh); +#ifdef APPLETALK_DEBUG + l == (struct llc *)(eh+1); + if (l->llc_un.type_snap.ether_type == 0x80F3) { + kprintf("new_ether_input: RCV AppleTalk type=0x%x Src=%s\n", + l->llc_un.type_snap.ether_type, + ether_sprintf(buf, &eh->ether_shost)); + kprintf(" Dst=%s\n", + ether_sprintf(buf, &eh->ether_dhost)); + } +#endif /* APPLETALK_DEBUG */ + schednetisr(NETISR_APPLETALK); + inq = &atalkintrq ; + + break; + } + } + + break; + + + default: + return ENOENT; + } + + + if (inq == 0) + return ENOENT; + + s = splimp(); + if (IF_QFULL(inq)) { + IF_DROP(inq); + m_freem(m); + splx(s); + return EJUSTRETURN; + } else + IF_ENQUEUE(inq, m); + splx(s); + return 0; +#else + return ENOENT; +#endif /* NETAT */ +} + + + +int +ether_pre_output(ifp, m0, dst_netaddr, route, type, edst, dl_tag ) + struct ifnet *ifp; + struct mbuf **m0; + struct sockaddr *dst_netaddr; + caddr_t route; + char *type; + char *edst; + u_long dl_tag; +{ + int s; + register struct mbuf *m = *m0; + register struct rtentry *rt; + register struct ether_header *eh; + int off, len = m->m_pkthdr.len; + int hlen; /* link layer header lenght */ + struct arpcom *ac = IFP2AC(ifp); + + + + if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + return ENETDOWN; + + hlen = ETHER_HDR_LEN; + + /* + * Tell ether_frameout it's ok to loop packet unless negated below. + */ + m->m_flags |= M_LOOP; + + switch (dst_netaddr->sa_family) { + case AF_UNSPEC: + m->m_flags &= ~M_LOOP; + eh = (struct ether_header *)dst_netaddr->sa_data; + (void)memcpy(edst, eh->ether_dhost, 6); + *(u_short *)type = eh->ether_type; + break; + + + case AF_APPLETALK: + { + eh = (struct ether_header *)dst_netaddr->sa_data; + bcopy((caddr_t)eh->ether_dhost, (caddr_t)edst, 6); + + *(u_short *)type = m->m_pkthdr.len; + } + break; + + + default: + kprintf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit, + dst_netaddr->sa_family); + + return EAFNOSUPPORT; + } + + return (0); +} + + + + + +int +ether_prmod_ioctl(dl_tag, ifp, command, data) + u_long dl_tag; + struct ifnet *ifp; + int command; + caddr_t data; +{ + struct ifaddr *ifa = (struct ifaddr *) data; + struct ifreq *ifr = (struct ifreq *) data; + int error = 0; + boolean_t funnel_state; + struct arpcom *ac = (struct arpcom *) ifp; + struct sockaddr_dl *sdl; + struct sockaddr_in *sin; + u_char *e_addr; + + + funnel_state = thread_funnel_set(network_flock, TRUE); + + switch (command) { + + case SIOCSIFADDR: + if ((ifp->if_flags & IFF_RUNNING) == 0) { + ifp->if_flags |= IFF_UP; + dlil_ioctl(0, ifp, SIOCSIFFLAGS, (caddr_t) 0); + } + + break; + + case SIOCGIFADDR: + { + struct sockaddr *sa; + + sa = (struct sockaddr *) & ifr->ifr_data; + bcopy(IFP2AC(ifp)->ac_enaddr, + (caddr_t) sa->sa_data, ETHER_ADDR_LEN); + } + break; + + case SIOCSIFMTU: + /* + * Set the interface MTU. + */ + if (ifr->ifr_mtu > ETHERMTU) { + error = EINVAL; + } else { + ifp->if_mtu = ifr->ifr_mtu; + } + break; + + default: + return EOPNOTSUPP; + } + + (void) thread_funnel_set(network_flock, funnel_state); + + return (error); +} + + + +void +ether_attach_at(struct ifnet *ifp, u_long *at_dl_tag, u_long *aarp_dl_tag) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + struct dlil_demux_desc desc2; + u_short native = 0; /* 802.2 frames use a length here */ + int stat; + int first_empty; + int i; + + + first_empty = MAX_EN_COUNT; + for (i=0; i < MAX_EN_COUNT; i++) { + if (en_at_array[i].ifp == 0) + first_empty = i; + + if (en_at_array[i].ifp == ifp) { + en_at_array[i].ref_count++; + *at_dl_tag = *aarp_dl_tag = en_at_array[i].dl_tag; + return; + } + } + + if (first_empty == MAX_EN_COUNT) + return; + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_802_2_SNAP; + desc.variants.desc_802_2_SNAP.dsap = LLC_SNAP_LSAP; + desc.variants.desc_802_2_SNAP.ssap = LLC_SNAP_LSAP; + desc.variants.desc_802_2_SNAP.control_code = 0x03; + desc.variants.desc_802_2_SNAP.org[0] = 0x08; + desc.variants.desc_802_2_SNAP.org[1] = 0x00; + desc.variants.desc_802_2_SNAP.org[2] = 0x07; + desc.variants.desc_802_2_SNAP.protocol_type = 0x809B; + desc.native_type = (char *) &native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = at_ether_input; + reg.pre_output = ether_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = ether_prmod_ioctl; + reg.default_proto = 0; + reg.protocol_family = PF_APPLETALK; + + desc2 = desc; + desc2.variants.desc_802_2_SNAP.protocol_type = 0x80F3; + desc2.variants.desc_802_2_SNAP.org[0] = 0; + desc2.variants.desc_802_2_SNAP.org[1] = 0; + desc2.variants.desc_802_2_SNAP.org[2] = 0; + + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc2, next); + + stat = dlil_attach_protocol(®, at_dl_tag); + if (stat) { + printf("WARNING: ether_attach_at can't attach at to interface\n"); + return; + } + + *aarp_dl_tag = *at_dl_tag; + + en_at_array[first_empty].ifp = ifp; + en_at_array[first_empty].dl_tag = *at_dl_tag; + en_at_array[first_empty].ref_count = 1; + +} /* ether_attach_at */ + + +void +ether_detach_at(struct ifnet *ifp) +{ + int i; + + for (i=0; i < MAX_EN_COUNT; i++) { + if (en_at_array[i].ifp == ifp) + break; + } + + if (i < MAX_EN_COUNT) { + if (en_at_array[i].ref_count > 1) + en_at_array[i].ref_count--; + else { + if (en_at_array[i].ref_count == 1) { + dlil_detach_protocol(en_at_array[i].dl_tag); + en_at_array[i].ifp = 0; + } + } + } +} diff --git a/bsd/net/ether_if_module.c b/bsd/net/ether_if_module.c new file mode 100644 index 000000000..2d1a9f52a --- /dev/null +++ b/bsd/net/ether_if_module.c @@ -0,0 +1,618 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* +#if INET +#include +#include + +#include +#include +#endif +*/ + +#include + +#include + + +#if LLC && CCITT +extern struct ifqueue pkintrq; +#endif + +/* General stuff from if_ethersubr.c - may not need some of it */ + +#include +#if NETAT +extern struct ifqueue atalkintrq; +#endif + + +#if BRIDGE +#include +#endif + +/* #include "vlan.h" */ +#if NVLAN > 0 +#include +#endif /* NVLAN > 0 */ + +static u_long lo_dlt = 0; +static ivedonethis = 0; + +#define IFP2AC(IFP) ((struct arpcom *)IFP) + +u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + + +#define DB_HEADER_SIZE 20 +struct en_desc { + short total_len; + u_short ethertype; + u_long dl_tag; + struct ifnet *ifp; + struct if_proto *proto; + u_long proto_id_length; + u_long proto_id_data[8]; /* probably less - proto-id and bitmasks */ +}; + +#define LITMUS_SIZE 16 +#define ETHER_DESC_BLK_SIZE 50 +#define MAX_INTERFACES 50 + +/* + * Statics for demux module + */ + +struct ether_desc_blk_str { + u_long n_blocks; + u_long *block_ptr; +}; + +struct dl_es_at_entry +{ + struct ifnet *ifp; + u_long dl_tag; + int ref_count; +}; + + +static struct ether_desc_blk_str ether_desc_blk[MAX_INTERFACES]; +static u_long litmus_mask[LITMUS_SIZE]; +static u_long litmus_length = 0; + + +/* + * Temp static for protocol registration XXX + */ + +#define MAX_EN_COUNT 30 + +static struct dl_es_at_entry en_at_array[MAX_EN_COUNT]; + +/* + * This could be done below in-line with heavy casting, but the pointer arithmetic is + * prone to error. + */ + +static +int desc_in_bounds(block, current_ptr, offset_length) + u_int block; + char *current_ptr; + u_long offset_length; +{ + u_long end_of_block; + u_long current_ptr_tmp; + + current_ptr_tmp = (u_long) current_ptr; + end_of_block = (u_long) ether_desc_blk[block].block_ptr; + end_of_block += (ETHER_DESC_BLK_SIZE * ether_desc_blk[block].n_blocks); + if ((current_ptr_tmp + offset_length) < end_of_block) + return 1; + else + return 0; +} + + +/* + * Release all descriptor entries owned by this dl_tag (there may be several). + * Setting the dl_tag to 0 releases the entry. Eventually we should compact-out + * the unused entries. + */ +static +int ether_del_proto(struct if_proto *proto, u_long dl_tag) +{ + char *current_ptr = (char *) ether_desc_blk[proto->ifp->family_cookie].block_ptr; + struct en_desc *ed; + int i; + int found = 0; + + ed = (struct en_desc *) current_ptr; + + while(ed->total_len) { + if (ed->dl_tag == dl_tag) { + found = 1; + ed->dl_tag = 0; + } + + current_ptr += ed->total_len; + ed = (struct en_desc *) current_ptr; + } + } + + + +static +int ether_add_proto(struct ddesc_head_str *desc_head, struct if_proto *proto, u_long dl_tag) +{ + char *current_ptr; + struct dlil_demux_desc *desc; + u_long id_length; /* IN LONGWORDS!!! */ + struct en_desc *ed; + u_long *bitmask; + u_long *proto_id; + int i; + short total_length; + u_long block_count; + u_long *tmp; + + + TAILQ_FOREACH(desc, desc_head, next) { + switch (desc->type) + { + case DLIL_DESC_RAW: + id_length = desc->variants.bitmask.proto_id_length; + break; + + case DLIL_DESC_802_2: + id_length = 1; + break; + + case DLIL_DESC_802_2_SNAP: + id_length = 2; + break; + + default: + return EINVAL; + } + +restart: + block_count = ether_desc_blk[proto->ifp->family_cookie].n_blocks; + current_ptr = (char *) ether_desc_blk[proto->ifp->family_cookie].block_ptr; + ed = (struct en_desc *) current_ptr; + total_length = ((id_length << 2) * 2) + DB_HEADER_SIZE; + + while ((ed->total_len) && (desc_in_bounds(proto->ifp->family_cookie, + current_ptr, total_length))) { + if ((ed->dl_tag == 0) && (total_length <= ed->total_len)) + break; + else + current_ptr += *(short *)current_ptr; + + ed = (struct en_desc *) current_ptr; + } + + if (!desc_in_bounds(proto->ifp->family_cookie, current_ptr, total_length)) { + + tmp = _MALLOC((ETHER_DESC_BLK_SIZE * (block_count + 1)), + M_IFADDR, M_NOWAIT); + if (tmp == 0) { + /* + * Remove any previous descriptors set in the call. + */ + ether_del_proto(proto, dl_tag); + return ENOMEM; + } + + bzero(tmp, ETHER_DESC_BLK_SIZE * (block_count + 1)); + bcopy(ether_desc_blk[proto->ifp->family_cookie].block_ptr, + tmp, (ETHER_DESC_BLK_SIZE * block_count)); + FREE(ether_desc_blk[proto->ifp->family_cookie].block_ptr, M_IFADDR); + ether_desc_blk[proto->ifp->family_cookie].n_blocks = block_count + 1; + ether_desc_blk[proto->ifp->family_cookie].block_ptr = tmp; + goto restart; + } + + if (ed->total_len == 0) + ed->total_len = total_length; + ed->ethertype = *((u_short *) desc->native_type); + + ed->dl_tag = dl_tag; + ed->proto = proto; + ed->proto_id_length = id_length; + ed->ifp = proto->ifp; + + switch (desc->type) + { + case DLIL_DESC_RAW: + bcopy(desc->variants.bitmask.proto_id, &ed->proto_id_data[0], (id_length << 2) ); + bcopy(desc->variants.bitmask.proto_id_mask, &ed->proto_id_data[id_length], + (id_length << 2)); + break; + + case DLIL_DESC_802_2: + ed->proto_id_data[0] = 0; + bcopy(&desc->variants.desc_802_2, &ed->proto_id_data[0], 3); + ed->proto_id_data[1] = 0xffffff00; + break; + + case DLIL_DESC_802_2_SNAP: + /* XXX Add verification of fixed values here */ + + ed->proto_id_data[0] = 0; + ed->proto_id_data[1] = 0; + bcopy(&desc->variants.desc_802_2_SNAP, &ed->proto_id_data[0], 8); + ed->proto_id_data[2] = 0xffffffff; + ed->proto_id_data[3] = 0xffffffff;; + break; + } + + if (id_length) { + proto_id = (u_long *) &ed->proto_id_data[0]; + bitmask = (u_long *) &ed->proto_id_data[id_length]; + for (i=0; i < (id_length); i++) { + litmus_mask[i] &= bitmask[i]; + litmus_mask[i] &= proto_id[i]; + } + if (id_length > litmus_length) + litmus_length = id_length; + } + } + + return 0; +} + + +static +int ether_shutdown() +{ + return 0; +} + + + + + +int ether_demux(ifp, m, frame_header, proto) + struct ifnet *ifp; + struct mbuf *m; + char *frame_header; + struct if_proto **proto; + +{ + register struct ether_header *eh = (struct ether_header *)frame_header; + u_short ether_type; + char *current_ptr = (char *) ether_desc_blk[ifp->family_cookie].block_ptr; + struct dlil_demux_desc *desc; + register u_long temp; + u_long *data; + register struct if_proto *ifproto; + u_long i; + struct en_desc *ed; + + + if (eh->ether_dhost[0] & 1) { + if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost, + sizeof(etherbroadcastaddr)) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + } + + ether_type = ntohs(eh->ether_type); + + /* + * Search through the connected protocols for a match. + */ + + + data = mtod(m, u_long *); + ed = (struct en_desc *) current_ptr; + while (desc_in_bounds(ifp->family_cookie, current_ptr, DB_HEADER_SIZE)) { + if (ed->total_len == 0) + break; + + if ((ed->dl_tag != 0) && (ed->ifp == ifp) && + ((ed->ethertype == ntohs(eh->ether_type)) || (ed->ethertype == 0))) { + if (ed->proto_id_length) { + for (i=0; i < (ed->proto_id_length); i++) { + temp = ntohs(data[i]) & ed->proto_id_data[ed->proto_id_length + i]; + if ((temp ^ ed->proto_id_data[i])) + break; + } + + if (i >= (ed->proto_id_length)) { + *proto = ed->proto; + return 0; + } + } + else { + *proto = ed->proto; + return 0; + } + } + current_ptr += ed->total_len; + ed = (struct en_desc *) current_ptr; + } + +/* + kprintf("ether_demux - No match for <%x><%x><%x><%x><%x><%x><%x<%x>\n", + eh->ether_type,data[0], data[1], data[2], data[3], data[4],data[5],data[6]); +*/ + + return ENOENT; +} + + + +/* + * Ethernet output routine. + * Encapsulate a packet of type family for the local net. + * Use trailer local net encapsulation if enough data in first + * packet leaves a multiple of 512 bytes of data in remainder. + * Assumes that ifp is actually pointer to arpcom structure. + */ +int +ether_frameout(ifp, m, ndest, edst, ether_type) + register struct ifnet *ifp; + struct mbuf **m; + struct sockaddr *ndest; + char *edst; + char *ether_type; +{ + register struct ether_header *eh; + int hlen; /* link layer header lenght */ + struct arpcom *ac = IFP2AC(ifp); + + + hlen = ETHER_HDR_LEN; + + /* + * If a simplex interface, and the packet is being sent to our + * Ethernet address or a broadcast address, loopback a copy. + * XXX To make a simplex device behave exactly like a duplex + * device, we should copy in the case of sending to our own + * ethernet address (thus letting the original actually appear + * on the wire). However, we don't do that here for security + * reasons and compatibility with the original behavior. + */ + if ((ifp->if_flags & IFF_SIMPLEX) && + ((*m)->m_flags & M_LOOP)) { + if (lo_dlt == 0) + dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET, &lo_dlt); + + if (lo_dlt) { + if ((*m)->m_flags & M_BCAST) { + struct mbuf *n = m_copy(*m, 0, (int)M_COPYALL); + dlil_output(lo_dlt, n, 0, ndest, 0); + } + else + { + if (bcmp(edst, ac->ac_enaddr, ETHER_ADDR_LEN) == 0) { + dlil_output(lo_dlt, *m, 0, ndest, 0); + return EJUSTRETURN; + } + } + } + } + + + /* + * Add local net header. If no space in first mbuf, + * allocate another. + */ + M_PREPEND(*m, sizeof (struct ether_header), M_DONTWAIT); + if (*m == 0) { + return (EJUSTRETURN); + } + + + eh = mtod(*m, struct ether_header *); + (void)memcpy(&eh->ether_type, ether_type, + sizeof(eh->ether_type)); + (void)memcpy(eh->ether_dhost, edst, 6); + (void)memcpy(eh->ether_shost, ac->ac_enaddr, + sizeof(eh->ether_shost)); + + return 0; +} + + +static +int ether_add_if(struct ifnet *ifp) +{ + u_long i; + + ifp->if_framer = ether_frameout; + ifp->if_demux = ether_demux; + ifp->if_event = 0; + + for (i=0; i < MAX_INTERFACES; i++) + if (ether_desc_blk[i].n_blocks == 0) + break; + + if (i == MAX_INTERFACES) + return ENOMEM; + + ether_desc_blk[i].block_ptr = _MALLOC(ETHER_DESC_BLK_SIZE, M_IFADDR, M_NOWAIT); + if (ether_desc_blk[i].block_ptr == 0) + return ENOMEM; + + ether_desc_blk[i].n_blocks = 1; + bzero(ether_desc_blk[i].block_ptr, ETHER_DESC_BLK_SIZE); + + ifp->family_cookie = i; + + return 0; +} + +static +int ether_del_if(struct ifnet *ifp) +{ + if ((ifp->family_cookie < MAX_INTERFACES) && + (ether_desc_blk[ifp->family_cookie].n_blocks)) { + FREE(ether_desc_blk[ifp->family_cookie].block_ptr, M_IFADDR); + ether_desc_blk[ifp->family_cookie].n_blocks = 0; + return 0; + } + else + return ENOENT; +} + + + +int +ether_ifmod_ioctl(ifp, command, data) + struct ifnet *ifp; + u_long command; + caddr_t data; +{ + struct rslvmulti_req *rsreq = (struct rslvmulti_req *) data; + int error = 0; + struct sockaddr_dl *sdl; + struct sockaddr_in *sin; + u_char *e_addr; + + + switch (command) + { + case SIOCRSLVMULTI: + switch(rsreq->sa->sa_family) + { + case AF_UNSPEC: + /* AppleTalk uses AF_UNSPEC for multicast registration. + * No mapping needed. Just check that it's a valid MC address. + */ + e_addr = &rsreq->sa->sa_data[0]; + if ((e_addr[0] & 1) != 1) + return EADDRNOTAVAIL; + *rsreq->llsa = 0; + return EJUSTRETURN; + + + case AF_LINK: + /* + * No mapping needed. Just check that it's a valid MC address. + */ + sdl = (struct sockaddr_dl *)rsreq->sa; + e_addr = LLADDR(sdl); + if ((e_addr[0] & 1) != 1) + return EADDRNOTAVAIL; + *rsreq->llsa = 0; + return EJUSTRETURN; + + default: + return EAFNOSUPPORT; + } + + default: + return EOPNOTSUPP; + } +} + + +int ether_family_init() +{ + int i; + struct dlil_ifmod_reg_str ifmod_reg; + + if (ivedonethis) + return 0; + + ivedonethis = 1; + + ifmod_reg.add_if = ether_add_if; + ifmod_reg.del_if = ether_del_if; + ifmod_reg.add_proto = ether_add_proto; + ifmod_reg.del_proto = ether_del_proto; + ifmod_reg.ifmod_ioctl = ether_ifmod_ioctl; + ifmod_reg.shutdown = ether_shutdown; + + if (dlil_reg_if_modules(APPLE_IF_FAM_ETHERNET, &ifmod_reg)) { + printf("WARNING: ether_family_init -- Can't register if family modules\n"); + return EIO; + } + + for (i=0; i < (LITMUS_SIZE/4); i++) + litmus_mask[i] = 0xffffffff; + + for (i=0; i < MAX_INTERFACES; i++) + ether_desc_blk[i].n_blocks = 0; + + for (i=0; i < MAX_EN_COUNT; i++) + en_at_array[i].ifp = 0; + + return 0; +} diff --git a/bsd/net/ether_inet6_pr_module.c b/bsd/net/ether_inet6_pr_module.c new file mode 100644 index 000000000..66aea6d16 --- /dev/null +++ b/bsd/net/ether_inet6_pr_module.c @@ -0,0 +1,423 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if INET6 +#include +#include +#endif + + + +#include +#include + +#include + + +#if LLC && CCITT +extern struct ifqueue pkintrq; +#endif + + +#if BRIDGE +#include +#endif + +/* #include "vlan.h" */ +#if NVLAN > 0 +#include +#endif /* NVLAN > 0 */ + + +extern struct ifnet_blue *blue_if; +extern struct mbuf *splitter_input(struct mbuf *, struct ifnet *); + +static u_long lo_dlt = 0; +static ivedonethis = 0; +static u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +#define IFP2AC(IFP) ((struct arpcom *)IFP) + + + + +/* + * Process a received Ethernet packet; + * the packet is in the mbuf chain m without + * the ether header, which is provided separately. + */ +int +inet6_ether_input(m, frame_header, ifp, dl_tag, sync_ok) + struct mbuf *m; + char *frame_header; + struct ifnet *ifp; + u_long dl_tag; + int sync_ok; + +{ + register struct ether_header *eh = (struct ether_header *) frame_header; + register struct ifqueue *inq=0; + u_short ether_type; + int s; + u_int16_t ptype = -1; + unsigned char buf[18]; + + + + if ((ifp->if_flags & IFF_UP) == 0) { + m_freem(m); + return EJUSTRETURN; + } + + ifp->if_lastchange = time; + + if (eh->ether_dhost[0] & 1) { + if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost, + sizeof(etherbroadcastaddr)) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + } + if (m->m_flags & (M_BCAST|M_MCAST)) + ifp->if_imcasts++; + + ether_type = ntohs(eh->ether_type); + + + switch (ether_type) { + + case ETHERTYPE_IPV6: + schednetisr(NETISR_IPV6); + inq = &ip6intrq; + break; + + default: { + return ENOENT; + } + } + + if (inq == 0) + return ENOENT; + + s = splimp(); + if (IF_QFULL(inq)) { + IF_DROP(inq); + m_freem(m); + splx(s); + return EJUSTRETURN; + } else + IF_ENQUEUE(inq, m); + splx(s); + + + return 0; +} + + + + +int +inet6_ether_pre_output(ifp, m0, dst_netaddr, route, type, edst, dl_tag ) + struct ifnet *ifp; + struct mbuf **m0; + struct sockaddr *dst_netaddr; + caddr_t route; + char *type; + char *edst; + u_long dl_tag; +{ + struct rtentry *rt0 = (struct rtentry *) route; + int s; + register struct mbuf *m = *m0; + register struct rtentry *rt; + register struct ether_header *eh; + int off, len = m->m_pkthdr.len; + int hlen; /* link layer header lenght */ + struct arpcom *ac = IFP2AC(ifp); + + + + if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + return ENETDOWN; + + rt = rt0; + if (rt) { + if ((rt->rt_flags & RTF_UP) == 0) { + rt0 = rt = rtalloc1(dst_netaddr, 1, 0UL); + if (rt0) + rt->rt_refcnt--; + else + return EHOSTUNREACH; + } + + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_gwroute == 0) + goto lookup; + if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { + rtfree(rt); rt = rt0; + lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1, + 0UL); + if ((rt = rt->rt_gwroute) == 0) + return (EHOSTUNREACH); + } + } + + + if (rt->rt_flags & RTF_REJECT) + if (rt->rt_rmx.rmx_expire == 0 || + time_second < rt->rt_rmx.rmx_expire) + return (rt == rt0 ? EHOSTDOWN : EHOSTUNREACH); + } + + hlen = ETHER_HDR_LEN; + + /* + * Tell ether_frameout it's ok to loop packet unless negated below. + */ + m->m_flags |= M_LOOP; + + switch (dst_netaddr->sa_family) { + + + case AF_INET6: + if (!nd6_storelladdr(&ac->ac_if, rt, m, dst_netaddr, (u_char *)edst)) { + /* this must be impossible, so we bark */ + printf("nd6_storelladdr failed\n"); + return(0); + } + off = m->m_pkthdr.len - m->m_len; + *(u_short *)type = htons(ETHERTYPE_IPV6); + break; + + default: + printf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit, + dst_netaddr->sa_family); + + return EAFNOSUPPORT; + } + + return (0); +} + + +int +ether_inet6_prmod_ioctl(dl_tag, ifp, command, data) + u_long dl_tag; + struct ifnet *ifp; + int command; + caddr_t data; +{ + struct ifaddr *ifa = (struct ifaddr *) data; + struct ifreq *ifr = (struct ifreq *) data; + struct rslvmulti_req *rsreq = (struct rslvmulti_req *) data; + int error = 0; + boolean_t funnel_state; + struct arpcom *ac = (struct arpcom *) ifp; + struct sockaddr_dl *sdl; + struct sockaddr_in *sin; + struct sockaddr_in6 *sin6; + + u_char *e_addr; + + + funnel_state = thread_funnel_set(TRUE); + + switch (command) { + case SIOCRSLVMULTI: { + switch(rsreq->sa->sa_family) { + + case AF_INET6: + sin6 = (struct sockaddr_in6 *)rsreq->sa; + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { + /* + * An IP6 address of 0 means listen to all + * of the Ethernet multicast address used for IP6. + * (This is used for multicast routers.) + */ + ifp->if_flags |= IFF_ALLMULTI; + *rsreq->llsa = 0; + return 0; + } + MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR, + M_WAITOK); + sdl->sdl_len = sizeof *sdl; + sdl->sdl_family = AF_LINK; + sdl->sdl_index = ifp->if_index; + sdl->sdl_type = IFT_ETHER; + sdl->sdl_nlen = 0; + sdl->sdl_alen = ETHER_ADDR_LEN; + sdl->sdl_slen = 0; + e_addr = LLADDR(sdl); + ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr); + printf("ether_resolvemulti AF_INET6 Adding %x:%x:%x:%x:%x:%x\n", + e_addr[0], e_addr[1], e_addr[2], e_addr[3], e_addr[4], e_addr[5]); + *rsreq->llsa = (struct sockaddr *)sdl; + return 0; + + default: + /* + * Well, the text isn't quite right, but it's the name + * that counts... + */ + return EAFNOSUPPORT; + } + + } + case SIOCSIFADDR: + if ((ifp->if_flags & IFF_RUNNING) == 0) { + ifp->if_flags |= IFF_UP; + dlil_ioctl(0, ifp, SIOCSIFFLAGS, (caddr_t) 0); + } + + break; + + case SIOCGIFADDR: + { + struct sockaddr *sa; + + sa = (struct sockaddr *) & ifr->ifr_data; + bcopy(IFP2AC(ifp)->ac_enaddr, + (caddr_t) sa->sa_data, ETHER_ADDR_LEN); + } + break; + + case SIOCSIFMTU: + /* + * Set the interface MTU. + */ + if (ifr->ifr_mtu > ETHERMTU) { + error = EINVAL; + } else { + ifp->if_mtu = ifr->ifr_mtu; + } + break; + + default: + return EOPNOTSUPP; + } + + (void) thread_funnel_set(funnel_state); + + return (error); +} + + + + + +u_long ether_attach_inet6(struct ifnet *ifp) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + u_long ip_dl_tag=0; + u_short en_6native=ETHERTYPE_IPV6; + int stat; + int i; + + + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET6, &ip_dl_tag); + if (stat == 0) + return ip_dl_tag; + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_RAW; + desc.variants.bitmask.proto_id_length = 0; + desc.variants.bitmask.proto_id = 0; + desc.variants.bitmask.proto_id_mask = 0; + desc.native_type = (char *) &en_6native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = inet6_ether_input; + reg.pre_output = inet6_ether_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = ether_inet6_prmod_ioctl; + reg.default_proto = 1; + reg.protocol_family = PF_INET6; + + stat = dlil_attach_protocol(®, &ip_dl_tag); + if (stat) { + printf("WARNING: ether_attach_inet6 can't attach ip to interface\n"); + return stat; + } + + return ip_dl_tag; +} diff --git a/bsd/net/ether_inet_pr_module.c b/bsd/net/ether_inet_pr_module.c new file mode 100644 index 000000000..dd029a0fa --- /dev/null +++ b/bsd/net/ether_inet_pr_module.c @@ -0,0 +1,468 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + + + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#if LLC && CCITT +extern struct ifqueue pkintrq; +#endif + + +#if BRIDGE +#include +#endif + +/* #include "vlan.h" */ +#if NVLAN > 0 +#include +#endif /* NVLAN > 0 */ + +static u_long lo_dlt = 0; +static ivedonethis = 0; +static u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +#define IFP2AC(IFP) ((struct arpcom *)IFP) + + + + +/* + * Process a received Ethernet packet; + * the packet is in the mbuf chain m without + * the ether header, which is provided separately. + */ +int +inet_ether_input(m, frame_header, ifp, dl_tag, sync_ok) + struct mbuf *m; + char *frame_header; + struct ifnet *ifp; + u_long dl_tag; + int sync_ok; + +{ + register struct ether_header *eh = (struct ether_header *) frame_header; + register struct ifqueue *inq=0; + u_short ether_type; + int s; + u_int16_t ptype = -1; + unsigned char buf[18]; + +#if ISO || LLC || NETAT + register struct llc *l; +#endif + + if ((ifp->if_flags & IFF_UP) == 0) { + m_freem(m); + return EJUSTRETURN; + } + + ifp->if_lastchange = time; + + if (eh->ether_dhost[0] & 1) { + if (bcmp((caddr_t)etherbroadcastaddr, (caddr_t)eh->ether_dhost, + sizeof(etherbroadcastaddr)) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + } + if (m->m_flags & (M_BCAST|M_MCAST)) + ifp->if_imcasts++; + + ether_type = ntohs(eh->ether_type); + +#if NVLAN > 0 + if (ether_type == vlan_proto) { + if (vlan_input(eh, m) < 0) + ifp->if_data.ifi_noproto++; + return EJUSTRETURN; + } +#endif /* NVLAN > 0 */ + + switch (ether_type) { + + case ETHERTYPE_IP: + if (ipflow_fastforward(m)) + return EJUSTRETURN; + ptype = mtod(m, struct ip *)->ip_p; + if ((sync_ok == 0) || + (ptype != IPPROTO_TCP && ptype != IPPROTO_UDP)) { + schednetisr(NETISR_IP); + } + + inq = &ipintrq; + break; + + case ETHERTYPE_ARP: + schednetisr(NETISR_ARP); + inq = &arpintrq; + break; + + default: { + return ENOENT; + } + } + + if (inq == 0) + return ENOENT; + + s = splimp(); + if (IF_QFULL(inq)) { + IF_DROP(inq); + m_freem(m); + splx(s); + return EJUSTRETURN; + } else + IF_ENQUEUE(inq, m); + splx(s); + + if ((sync_ok) && + (ptype == IPPROTO_TCP || ptype == IPPROTO_UDP)) { + extern void ipintr(void); + + s = splnet(); + ipintr(); + splx(s); + } + + return 0; +} + + + + +int +inet_ether_pre_output(ifp, m0, dst_netaddr, route, type, edst, dl_tag ) + struct ifnet *ifp; + struct mbuf **m0; + struct sockaddr *dst_netaddr; + caddr_t route; + char *type; + char *edst; + u_long dl_tag; +{ + struct rtentry *rt0 = (struct rtentry *) route; + int s; + register struct mbuf *m = *m0; + register struct rtentry *rt; + register struct ether_header *eh; + int off, len = m->m_pkthdr.len; + int hlen; /* link layer header lenght */ + struct arpcom *ac = IFP2AC(ifp); + + + + if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + return ENETDOWN; + + rt = rt0; + if (rt) { + if ((rt->rt_flags & RTF_UP) == 0) { + rt0 = rt = rtalloc1(dst_netaddr, 1, 0UL); + if (rt0) + rt->rt_refcnt--; + else + return EHOSTUNREACH; + } + + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_gwroute == 0) + goto lookup; + if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { + rtfree(rt); rt = rt0; + lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1, + 0UL); + if ((rt = rt->rt_gwroute) == 0) + return (EHOSTUNREACH); + } + } + + + if (rt->rt_flags & RTF_REJECT) + if (rt->rt_rmx.rmx_expire == 0 || + time_second < rt->rt_rmx.rmx_expire) + return (rt == rt0 ? EHOSTDOWN : EHOSTUNREACH); + } + + hlen = ETHER_HDR_LEN; + + /* + * Tell ether_frameout it's ok to loop packet unless negated below. + */ + m->m_flags |= M_LOOP; + + switch (dst_netaddr->sa_family) { + + case AF_INET: + if (!arpresolve(ac, rt, m, dst_netaddr, edst, rt0)) + return (EJUSTRETURN); /* if not yet resolved */ + off = m->m_pkthdr.len - m->m_len; + *(u_short *)type = htons(ETHERTYPE_IP); + break; + + case AF_UNSPEC: + m->m_flags &= ~M_LOOP; + eh = (struct ether_header *)dst_netaddr->sa_data; + (void)memcpy(edst, eh->ether_dhost, 6); + *(u_short *)type = eh->ether_type; + break; + + default: + kprintf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit, + dst_netaddr->sa_family); + + return EAFNOSUPPORT; + } + + return (0); +} + + +int +ether_inet_prmod_ioctl(dl_tag, ifp, command, data) + u_long dl_tag; + struct ifnet *ifp; + int command; + caddr_t data; +{ + struct ifaddr *ifa = (struct ifaddr *) data; + struct ifreq *ifr = (struct ifreq *) data; + struct rslvmulti_req *rsreq = (struct rslvmulti_req *) data; + int error = 0; + boolean_t funnel_state; + struct arpcom *ac = (struct arpcom *) ifp; + struct sockaddr_dl *sdl; + struct sockaddr_in *sin; + u_char *e_addr; + + +#if 0 + /* No tneeded at soo_ioctlis already funnelled */ + funnel_state = thread_funnel_set(network_flock,TRUE); +#endif + + switch (command) { + case SIOCRSLVMULTI: { + switch(rsreq->sa->sa_family) { + + case AF_INET: + sin = (struct sockaddr_in *)rsreq->sa; + if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) + return EADDRNOTAVAIL; + MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR, + M_WAITOK); + sdl->sdl_len = sizeof *sdl; + sdl->sdl_family = AF_LINK; + sdl->sdl_index = ifp->if_index; + sdl->sdl_type = IFT_ETHER; + sdl->sdl_nlen = 0; + sdl->sdl_alen = ETHER_ADDR_LEN; + sdl->sdl_slen = 0; + e_addr = LLADDR(sdl); + ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr); + *rsreq->llsa = (struct sockaddr *)sdl; + return EJUSTRETURN; + + default: + /* + * Well, the text isn't quite right, but it's the name + * that counts... + */ + return EAFNOSUPPORT; + } + + } + case SIOCSIFADDR: + if ((ifp->if_flags & IFF_RUNNING) == 0) { + ifp->if_flags |= IFF_UP; + dlil_ioctl(0, ifp, SIOCSIFFLAGS, (caddr_t) 0); + } + + switch (ifa->ifa_addr->sa_family) { + + case AF_INET: + + if (ifp->if_init) + ifp->if_init(ifp->if_softc); /* before arpwhohas */ + + // + // See if another station has *our* IP address. + // i.e.: There is an address conflict! If a + // conflict exists, a message is sent to the + // console. + // + if (IA_SIN(ifa)->sin_addr.s_addr != 0) + { + /* don't bother for 0.0.0.0 */ + ac->ac_ipaddr = IA_SIN(ifa)->sin_addr; + arpwhohas(ac, &IA_SIN(ifa)->sin_addr); + } + + arp_ifinit(IFP2AC(ifp), ifa); + + break; + + default: + break; + } + + break; + + case SIOCGIFADDR: + { + struct sockaddr *sa; + + sa = (struct sockaddr *) & ifr->ifr_data; + bcopy(IFP2AC(ifp)->ac_enaddr, + (caddr_t) sa->sa_data, ETHER_ADDR_LEN); + } + break; + + case SIOCSIFMTU: + /* + * Set the interface MTU. + */ + if (ifr->ifr_mtu > ETHERMTU) { + error = EINVAL; + } else { + ifp->if_mtu = ifr->ifr_mtu; + } + break; + + default: + return EOPNOTSUPP; + } + + //(void) thread_funnel_set(network_flock, FALSE); + + return (error); +} + + + + + +u_long +ether_attach_inet(struct ifnet *ifp) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + struct dlil_demux_desc desc2; + u_long ip_dl_tag=0; + u_short en_native=ETHERTYPE_IP; + u_short arp_native=ETHERTYPE_ARP; + int stat; + int i; + + + stat = dlil_find_dltag(ifp->if_family, ifp->if_unit, PF_INET, &ip_dl_tag); + if (stat == 0) + return ip_dl_tag; + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_RAW; + desc.variants.bitmask.proto_id_length = 0; + desc.variants.bitmask.proto_id = 0; + desc.variants.bitmask.proto_id_mask = 0; + desc.native_type = (char *) &en_native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = inet_ether_input; + reg.pre_output = inet_ether_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = ether_inet_prmod_ioctl; + reg.default_proto = 1; + reg.protocol_family = PF_INET; + + desc2 = desc; + desc2.native_type = (char *) &arp_native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc2, next); + + stat = dlil_attach_protocol(®, &ip_dl_tag); + if (stat) { + printf("WARNING: ether_attach_inet can't attach ip to interface\n"); + return stat; + } + + return ip_dl_tag; +} diff --git a/bsd/net/etherdefs.h b/bsd/net/etherdefs.h new file mode 100644 index 000000000..cd56710f9 --- /dev/null +++ b/bsd/net/etherdefs.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +/* + * Copyright (c) 1982, 1986 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and that due credit is given + * to the University of California at Berkeley. The name of the University + * may not be used to endorse or promote products derived from this + * software without specific prior written permission. This software + * is provided ``as is'' without express or implied warranty. + * + * HISTORY + * 11-Jul-93 Mac Gillon (mgillon) at NeXT + * Integrated MULTICAST support + * + * 09-Apr-90 Bradley Taylor (btaylor) at NeXT, Inc. + * Created. Originally part of . + */ +#ifndef _ETHERDEFS_ +#define _ETHERDEFS_ + +#include +/* + * Ethernet address - 6 octets + */ +#define NUM_EN_ADDR_BYTES 6 + + +typedef struct ether_addr enet_addr_t; + +typedef struct ether_header ether_header_t; + +#define IFTYPE_ETHERNET "10MB Ethernet" + +#define ETHERHDRSIZE 14 +#define ETHERMAXPACKET (ETHERHDRSIZE + ETHERMTU) +#define ETHERMINPACKET 64 +#define ETHERCRC 4 + +/* + * Byte and bit in an enet_addr_t defining individual/group destination. + */ +#define EA_GROUP_BYTE 0 +#define EA_GROUP_BIT 0x01 + + +#endif /* _ETHERDEFS_ */ diff --git a/bsd/net/ethernet.h b/bsd/net/ethernet.h new file mode 100644 index 000000000..de84f184e --- /dev/null +++ b/bsd/net/ethernet.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Fundamental constants relating to ethernet. + * + */ + +#ifndef _NET_ETHERNET_H_ +#define _NET_ETHERNET_H_ + +/* + * The number of bytes in an ethernet (MAC) address. + */ +#define ETHER_ADDR_LEN 6 + +/* + * The number of bytes in the type field. + */ +#define ETHER_TYPE_LEN 2 + +/* + * The number of bytes in the trailing CRC field. + */ +#define ETHER_CRC_LEN 4 + +/* + * The length of the combined header. + */ +#define ETHER_HDR_LEN (ETHER_ADDR_LEN*2+ETHER_TYPE_LEN) + +/* + * The minimum packet length. + */ +#define ETHER_MIN_LEN 64 + +/* + * The maximum packet length. + */ +#define ETHER_MAX_LEN 1518 + +/* + * A macro to validate a length with + */ +#define ETHER_IS_VALID_LEN(foo) \ + ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) + +/* + * Structure of a 10Mb/s Ethernet header. + */ +struct ether_header { + u_char ether_dhost[ETHER_ADDR_LEN]; + u_char ether_shost[ETHER_ADDR_LEN]; + u_short ether_type; +}; + +/* + * Structure of a 48-bit Ethernet address. + */ +struct ether_addr { + u_char octet[ETHER_ADDR_LEN]; +}; + +#define ether_addr_octet octet + +#define ETHERTYPE_PUP 0x0200 /* PUP protocol */ +#define ETHERTYPE_IP 0x0800 /* IP protocol */ +#define ETHERTYPE_ARP 0x0806 /* Addr. resolution protocol */ +#define ETHERTYPE_REVARP 0x8035 /* reverse Addr. resolution protocol */ +#define ETHERTYPE_VLAN 0x8100 /* IEEE 802.1Q VLAN tagging */ +#define ETHERTYPE_IPV6 0x86dd /* IPv6 */ +#define ETHERTYPE_LOOPBACK 0x9000 /* used to test interfaces */ +/* XXX - add more useful types here */ + +/* + * The ETHERTYPE_NTRAILER packet types starting at ETHERTYPE_TRAIL have + * (type-ETHERTYPE_TRAIL)*512 bytes of data followed + * by an ETHER type (as given above) and then the (variable-length) header. + */ +#define ETHERTYPE_TRAIL 0x1000 /* Trailer packet */ +#define ETHERTYPE_NTRAILER 16 + +#define ETHERMTU (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) +#define ETHERMIN (ETHER_MIN_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) + +#if KERNEL +struct ether_addr *ether_aton __P((char *)); +#endif + +#if !KERNEL +#include + +/* + * Ethernet address conversion/parsing routines. + */ +__BEGIN_DECLS + +int ether_hostton __P((char *, struct ether_addr *)); +int ether_line __P((char *, struct ether_addr *, char *)); +char *ether_ntoa __P((struct ether_addr *)); +int ether_ntohost __P((char *, struct ether_addr *)); +__END_DECLS +#endif /* !KERNEL */ + +#endif /* !_NET_ETHERNET_H_ */ diff --git a/bsd/net/hostcache.c b/bsd/net/hostcache.c new file mode 100644 index 000000000..f2a6ff463 --- /dev/null +++ b/bsd/net/hostcache.c @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "per-host cache structure"); + +static struct hctable hctable[AF_MAX]; +static int hc_timeout_interval = 120; +static int hc_maxidle = 1800; + +static int cmpsa(const struct sockaddr *sa1, const struct sockaddr *sa2); +static void hc_timeout(void *xhct); +static void maybe_bump_hash(struct hctable *hct); + +int +hc_init(int af, struct hccallback *hccb, int init_nelem, int primes) +{ + struct hctable *hct; + struct hchead *heads; + u_long nelem; + + hct = &hctable[af]; + nelem = init_nelem; + if (hct->hct_nentries) + return 0; + + if (primes) { + heads = phashinit(init_nelem, M_HOSTCACHE, &nelem); + } else { + int i; + MALLOC(heads, struct hchead *, nelem * sizeof *heads, + M_HOSTCACHE, M_WAITOK); + for (i = 0; i < nelem; i++) { + LIST_INIT(&heads[i]); + } + } + + hct->hct_heads = heads; + hct->hct_nentries = nelem; + hct->hct_primes = primes; + timeout(hc_timeout, hct, hc_timeout_interval * hz); + return 0; +} + +struct hcentry * +hc_get(struct sockaddr *sa) +{ + u_long hash; + struct hcentry *hc; + struct hctable *hct; + int s; + + hct = &hctable[sa->sa_family]; + if (hct->hct_nentries == 0) + return 0; + hash = hct->hct_cb->hccb_hash(sa, hct->hct_nentries); + hc = hct->hct_heads[hash].lh_first; + for (; hc; hc = hc->hc_link.le_next) { + if (cmpsa(hc->hc_host, sa) == 0) + break; + } + if (hc == 0) + return 0; + s = splnet(); + if (hc->hc_rt && (hc->hc_rt->rt_flags & RTF_UP) == 0) { + RTFREE(hc->hc_rt); + hc->hc_rt = 0; + } + if (hc->hc_rt == 0) { + hc->hc_rt = rtalloc1(hc->hc_host, 1, 0); + } + hc_ref(hc); + splx(s); + /* XXX move to front of list? */ + return hc; +} + +void +hc_ref(struct hcentry *hc) +{ + int s = splnet(); + if (hc->hc_refcnt++ == 0) { + hc->hc_hct->hct_idle--; + hc->hc_hct->hct_active++; + } + splx(s); +} + +void +hc_rele(struct hcentry *hc) +{ + int s = splnet(); +#ifdef DIAGNOSTIC + printf("hc_rele: %p: negative refcnt!\n", (void *)hc); +#endif + hc->hc_refcnt--; + if (hc->hc_refcnt == 0) { + hc->hc_hct->hct_idle++; + hc->hc_hct->hct_active--; + hc->hc_idlesince = mono_time; /* XXX right one? */ + } + splx(s); +} + +/* + * The user is expected to initialize hc_host with the address and everything + * else to the appropriate form of `0'. + */ +int +hc_insert(struct hcentry *hc) +{ + struct hcentry *hc2; + struct hctable *hct; + u_long hash; + int s; + + hct = &hctable[hc->hc_host->sa_family]; + hash = hct->hct_cb->hccb_hash(hc->hc_host, hct->hct_nentries); + + hc2 = hct->hct_heads[hash].lh_first; + for (; hc2; hc2 = hc2->hc_link.le_next) { + if (cmpsa(hc2->hc_host, hc->hc_host) == 0) + break; + } + if (hc2 != 0) + return EEXIST; + hc->hc_hct = hct; + s = splnet(); + LIST_INSERT_HEAD(&hct->hct_heads[hash], hc, hc_link); + hct->hct_idle++; + /* + * If the table is now more than 75% full, consider bumping it. + */ + if (100 * (hct->hct_idle + hct->hct_active) > 75 * hct->hct_nentries) + maybe_bump_hash(hct); + splx(s); + return 0; +} + +/* + * It's not clear to me how much sense this makes as an external interface, + * since it is expected that the deletion will normally be handled by + * the cache timeout. + */ +int +hc_delete(struct hcentry *hc) +{ + struct hctable *hct; + int error, s; + + if (hc->hc_refcnt > 0) + return 0; + + hct = hc->hc_hct; + error = hct->hct_cb->hccb_delete(hc); + if (error) + return 0; + + s = splnet(); + LIST_REMOVE(hc, hc_link); + hc->hc_hct->hct_idle--; + splx(s); + FREE(hc, M_HOSTCACHE); + return 0; +} + +static void +hc_timeout(void *xhct) +{ + struct hcentry *hc; + struct hctable *hct; + int j, s; + time_t start; + + hct = xhct; + start = mono_time.tv_sec; /* for simplicity */ + + if (hct->hct_idle == 0) + return; + for (j = 0; j < hct->hct_nentries; j++) { + for (hc = hct->hct_heads[j].lh_first; hc; + hc = hc->hc_link.le_next) { + if (hc->hc_refcnt > 0) + continue; + if (hc->hc_idlesince.tv_sec + hc_maxidle <= start) { + if (hct->hct_cb->hccb_delete(hc)) + continue; + s = splnet(); + LIST_REMOVE(hc, hc_link); + hct->hct_idle--; + splx(s); + } + } + } + /* + * Fiddle something here based on tot_idle... + */ + timeout(hc_timeout, xhct, hc_timeout_interval * hz); +} + +static int +cmpsa(const struct sockaddr *sa1, const struct sockaddr *sa2) +{ + if (sa1->sa_len != sa2->sa_len) + return ((int)sa1->sa_len - sa2->sa_len); + return bcmp(sa1, sa2, sa1->sa_len); +} + +static void +maybe_bump_hash(struct hctable *hct) +{ + ; /* XXX fill me in */ +} diff --git a/bsd/net/hostcache.h b/bsd/net/hostcache.h new file mode 100644 index 000000000..616c2d0d8 --- /dev/null +++ b/bsd/net/hostcache.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef _NET_HOSTCACHE_H +#define _NET_HOSTCACHE_H 1 + +/* + * This file defines the interface between network protocols and + * the cache of host-specific information maintained by the kernel. + * The generic interface takes care of inserting and deleting entries, + * maintaining mutual exclusion, and enforcing policy constraint on the + * size of the cache and the maximum age of its entries. + * It replaces an earlier scheme which overloaded the routing table + * for this purpose, and should be significantly more efficient + * at performing most operations. (It does keep a route to each + * entry in the cache.) Most protocols will want to define a + * structure which begins with `struct hcentry' so that they + * can keep additional, protocol-specific information in it. + */ + +#include + +struct hcentry { + LIST_ENTRY(hcentry) hc_link; + struct timeval hc_idlesince; /* time last ref dropped */ + struct sockaddr *hc_host; /* address of this entry's host */ + struct rtentry *hc_rt; /* route to get there */ + /* struct nexthop *hc_nh; */ + int hc_refcnt; /* reference count */ + struct hctable *hc_hct; /* back ref to table */ +}; + +struct hccallback { + u_long (*hccb_hash)(struct sockaddr *, u_long); + int (*hccb_delete)(struct hcentry *); + u_long (*hccb_bump)(u_long); +}; + +LIST_HEAD(hchead, hcentry); + +struct hctable { + u_long hct_nentries; + u_long hct_active; + u_long hct_idle; + struct hchead *hct_heads; + struct hccallback *hct_cb; + int hct_primes; +}; + +#ifdef KERNEL + +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_HOSTCACHE); +#endif +/* + * The table-modification functions must be called from user mode, as + * they may block waiting for memory and/or locks. + */ +int hc_init(int af, struct hccallback *hccb, int init_nelem, int primes); +struct hcentry *hc_get(struct sockaddr *sa); +void hc_ref(struct hcentry *hc); +void hc_rele(struct hcentry *hc); +int hc_insert(struct hcentry *hc); +int hc_delete(struct hcentry *hc); +#endif /* KERNEL */ + +#endif /* _NET_HOSTCACHE_H */ diff --git a/bsd/net/if.c b/bsd/net/if.c new file mode 100644 index 000000000..088e52b99 --- /dev/null +++ b/bsd/net/if.c @@ -0,0 +1,1291 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if.c 8.3 (Berkeley) 1/4/94 + */ + +/* +#include "opt_compat.h" +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * System initialization + */ + +static int ifconf __P((u_long, caddr_t)); +static void if_qflush __P((struct ifqueue *)); +static void link_rtrequest __P((int, struct rtentry *, struct sockaddr *)); + +MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); +MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); + +int ifqmaxlen = IFQ_MAXLEN; +struct ifnethead ifnet; /* depend on static init XXX */ + +#if INET6 +/* + * XXX: declare here to avoid to include many inet6 related files.. + * should be more generalized? + */ +extern void nd6_setmtu __P((struct ifnet *)); +#endif + +/* + * Network interface utility routines. + * + * Routines with ifa_ifwith* names take sockaddr *'s as + * parameters. + * + * This routine assumes that it will be called at splimp() or higher. + */ +/* ARGSUSED*/ + + +int if_index = 0; +struct ifaddr **ifnet_addrs; +struct ifnet **ifindex2ifnet = NULL; + + +/* + * Attach an interface to the + * list of "active" interfaces. + */ +void +old_if_attach(ifp) + struct ifnet *ifp; +{ + unsigned socksize, ifasize; + int namelen, masklen; + char workbuf[64]; + register struct sockaddr_dl *sdl; + register struct ifaddr *ifa; + static int if_indexlim = 8; + + + if (ifp->if_snd.ifq_maxlen == 0) + ifp->if_snd.ifq_maxlen = ifqmaxlen; + + TAILQ_INSERT_TAIL(&ifnet, ifp, if_link); + ifp->if_index = ++if_index; + /* + * XXX - + * The old code would work if the interface passed a pre-existing + * chain of ifaddrs to this code. We don't trust our callers to + * properly initialize the tailq, however, so we no longer allow + * this unlikely case. + */ + TAILQ_INIT(&ifp->if_addrhead); + LIST_INIT(&ifp->if_multiaddrs); + getmicrotime(&ifp->if_lastchange); + if (ifnet_addrs == 0 || if_index >= if_indexlim) { + unsigned n = (if_indexlim <<= 1) * sizeof(ifa); + struct ifaddr **q = (struct ifaddr **) + _MALLOC(n, M_IFADDR, M_WAITOK); + bzero((caddr_t)q, n); + if (ifnet_addrs) { + bcopy((caddr_t)ifnet_addrs, (caddr_t)q, n/2); + FREE((caddr_t)ifnet_addrs, M_IFADDR); + } + ifnet_addrs = (struct ifaddr **)q; + + /* grow ifindex2ifnet */ + n = if_indexlim * sizeof(struct ifnet *); + q = (caddr_t)_MALLOC(n, M_IFADDR, M_WAITOK); + bzero(q, n); + if (ifindex2ifnet) { + bcopy((caddr_t)ifindex2ifnet, q, n/2); + _FREE((caddr_t)ifindex2ifnet, M_IFADDR); + } + ifindex2ifnet = (struct ifnet **)q; + } + + ifindex2ifnet[if_index] = ifp; + + /* + * create a Link Level name for this device + */ + namelen = snprintf(workbuf, sizeof(workbuf), + "%s%d", ifp->if_name, ifp->if_unit); +#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m)) + masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen; + socksize = masklen + ifp->if_addrlen; +#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1))) + if (socksize < sizeof(*sdl)) + socksize = sizeof(*sdl); + socksize = ROUNDUP(socksize); + ifasize = sizeof(*ifa) + 2 * socksize; + ifa = (struct ifaddr *) _MALLOC(ifasize, M_IFADDR, M_WAITOK); + if (ifa) { + bzero((caddr_t)ifa, ifasize); + sdl = (struct sockaddr_dl *)(ifa + 1); + sdl->sdl_len = socksize; + sdl->sdl_family = AF_LINK; + bcopy(workbuf, sdl->sdl_data, namelen); + sdl->sdl_nlen = namelen; + sdl->sdl_index = ifp->if_index; + sdl->sdl_type = ifp->if_type; + ifnet_addrs[if_index - 1] = ifa; + ifa->ifa_ifp = ifp; + ifa->ifa_rtrequest = link_rtrequest; + ifa->ifa_addr = (struct sockaddr *)sdl; + sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); + ifa->ifa_netmask = (struct sockaddr *)sdl; + sdl->sdl_len = masklen; + while (namelen != 0) + sdl->sdl_data[--namelen] = 0xff; + TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); + } +} +/* + * Locate an interface based on a complete address. + */ +/*ARGSUSED*/ +struct ifaddr * +ifa_ifwithaddr(addr) + register struct sockaddr *addr; +{ + register struct ifnet *ifp; + register struct ifaddr *ifa; + +#define equal(a1, a2) \ + (bcmp((caddr_t)(a1), (caddr_t)(a2), ((struct sockaddr *)(a1))->sa_len) == 0) + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) { + if (ifa->ifa_addr->sa_family != addr->sa_family) + continue; + if (equal(addr, ifa->ifa_addr)) + return (ifa); + if ((ifp->if_flags & IFF_BROADCAST) && ifa->ifa_broadaddr && + /* IP6 doesn't have broadcast */ + ifa->ifa_broadaddr->sa_len != 0 && + equal(ifa->ifa_broadaddr, addr)) + return (ifa); + } + return ((struct ifaddr *)0); +} +/* + * Locate the point to point interface with a given destination address. + */ +/*ARGSUSED*/ +struct ifaddr * +ifa_ifwithdstaddr(addr) + register struct sockaddr *addr; +{ + register struct ifnet *ifp; + register struct ifaddr *ifa; + + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) + if (ifp->if_flags & IFF_POINTOPOINT) + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) { + if (ifa->ifa_addr->sa_family != addr->sa_family) + continue; + if (ifa->ifa_dstaddr && equal(addr, ifa->ifa_dstaddr)) + return (ifa); + } + return ((struct ifaddr *)0); +} + +/* + * Find an interface on a specific network. If many, choice + * is most specific found. + */ +struct ifaddr * +ifa_ifwithnet(addr) + struct sockaddr *addr; +{ + register struct ifnet *ifp; + register struct ifaddr *ifa; + struct ifaddr *ifa_maybe = (struct ifaddr *) 0; + u_int af = addr->sa_family; + char *addr_data = addr->sa_data, *cplim; + + /* + * AF_LINK addresses can be looked up directly by their index number, + * so do that if we can. + */ + if (af == AF_LINK) { + register struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; + if (sdl->sdl_index && sdl->sdl_index <= if_index) + return (ifnet_addrs[sdl->sdl_index - 1]); + } + + /* + * Scan though each interface, looking for ones that have + * addresses in this address family. + */ + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) { + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) { + register char *cp, *cp2, *cp3; + + if (ifa->ifa_addr->sa_family != af) +next: continue; +#if 0 /* for maching gif tunnel dst as routing entry gateway */ + if (ifp->if_flags & IFF_POINTOPOINT) { + /* + * This is a bit broken as it doesn't + * take into account that the remote end may + * be a single node in the network we are + * looking for. + * The trouble is that we don't know the + * netmask for the remote end. + */ + if (ifa->ifa_dstaddr != 0 + && equal(addr, ifa->ifa_dstaddr)) + return (ifa); + } else +#endif + { + /* + * if we have a special address handler, + * then use it instead of the generic one. + */ + if (ifa->ifa_claim_addr) { + if ((*ifa->ifa_claim_addr)(ifa, addr)) { + return (ifa); + } else { + continue; + } + } + + /* + * Scan all the bits in the ifa's address. + * If a bit dissagrees with what we are + * looking for, mask it with the netmask + * to see if it really matters. + * (A byte at a time) + */ + if (ifa->ifa_netmask == 0) + continue; + cp = addr_data; + cp2 = ifa->ifa_addr->sa_data; + cp3 = ifa->ifa_netmask->sa_data; + cplim = ifa->ifa_netmask->sa_len + + (char *)ifa->ifa_netmask; + while (cp3 < cplim) + if ((*cp++ ^ *cp2++) & *cp3++) + goto next; /* next address! */ + /* + * If the netmask of what we just found + * is more specific than what we had before + * (if we had one) then remember the new one + * before continuing to search + * for an even better one. + */ + if (ifa_maybe == 0 || + rn_refines((caddr_t)ifa->ifa_netmask, + (caddr_t)ifa_maybe->ifa_netmask)) + ifa_maybe = ifa; + } + } + } + return (ifa_maybe); +} + +/* + * Find an interface address specific to an interface best matching + * a given address. + */ +struct ifaddr * +ifaof_ifpforaddr(addr, ifp) + struct sockaddr *addr; + register struct ifnet *ifp; +{ + register struct ifaddr *ifa; + register char *cp, *cp2, *cp3; + register char *cplim; + struct ifaddr *ifa_maybe = 0; + u_int af = addr->sa_family; + + if (af >= AF_MAX) + return (0); + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) { + if (ifa->ifa_addr->sa_family != af) + continue; + if (ifa_maybe == 0) + ifa_maybe = ifa; + if (ifa->ifa_netmask == 0) { + if (equal(addr, ifa->ifa_addr) || + (ifa->ifa_dstaddr && equal(addr, ifa->ifa_dstaddr))) + return (ifa); + continue; + } + if (ifp->if_flags & IFF_POINTOPOINT) { + if (equal(addr, ifa->ifa_dstaddr)) + return (ifa); + } else { + cp = addr->sa_data; + cp2 = ifa->ifa_addr->sa_data; + cp3 = ifa->ifa_netmask->sa_data; + cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; + for (; cp3 < cplim; cp3++) + if ((*cp++ ^ *cp2++) & *cp3) + break; + if (cp3 == cplim) + return (ifa); + } + } + return (ifa_maybe); +} + +#include + +/* + * Default action when installing a route with a Link Level gateway. + * Lookup an appropriate real ifa to point to. + * This should be moved to /sys/net/link.c eventually. + */ +static void +link_rtrequest(cmd, rt, sa) + int cmd; + register struct rtentry *rt; + struct sockaddr *sa; +{ + register struct ifaddr *ifa; + struct sockaddr *dst; + struct ifnet *ifp; + + if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) || + ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0)) + return; + ifa = ifaof_ifpforaddr(dst, ifp); + if (ifa) { + IFAFREE(rt->rt_ifa); + rt->rt_ifa = ifa; + ifa->ifa_refcnt++; + if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) + ifa->ifa_rtrequest(cmd, rt, sa); + } +} + +/* + * Mark an interface down and notify protocols of + * the transition. + * NOTE: must be called at splnet or eqivalent. + */ +void +if_unroute(ifp, flag, fam) + register struct ifnet *ifp; + int flag, fam; +{ + register struct ifaddr *ifa; + + ifp->if_flags &= ~flag; + getmicrotime(&ifp->if_lastchange); + TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) + if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) + pfctlinput(PRC_IFDOWN, ifa->ifa_addr); + if_qflush(&ifp->if_snd); + rt_ifmsg(ifp); +} + +/* + * Mark an interface up and notify protocols of + * the transition. + * NOTE: must be called at splnet or eqivalent. + */ +void +if_route(ifp, flag, fam) + register struct ifnet *ifp; + int flag, fam; +{ + register struct ifaddr *ifa; + + ifp->if_flags |= flag; + getmicrotime(&ifp->if_lastchange); + TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) + if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) + pfctlinput(PRC_IFUP, ifa->ifa_addr); + rt_ifmsg(ifp); +#if INET6 + in6_if_up(ifp); +#endif +} + +/* + * Mark an interface down and notify protocols of + * the transition. + * NOTE: must be called at splnet or eqivalent. + */ +void +if_down(ifp) + register struct ifnet *ifp; +{ + + if_unroute(ifp, IFF_UP, AF_UNSPEC); +} + +/* + * Mark an interface up and notify protocols of + * the transition. + * NOTE: must be called at splnet or eqivalent. + */ +void +if_up(ifp) + register struct ifnet *ifp; +{ + + if_route(ifp, IFF_UP, AF_UNSPEC); +} + +/* + * Flush an interface queue. + */ +static void +if_qflush(ifq) + register struct ifqueue *ifq; +{ + register struct mbuf *m, *n; + + n = ifq->ifq_head; + while ((m = n) != 0) { + n = m->m_act; + m_freem(m); + } + ifq->ifq_head = 0; + ifq->ifq_tail = 0; + ifq->ifq_len = 0; +} + + +/* + * Map interface name to + * interface structure pointer. + */ +struct ifnet * +ifunit(name) + register char *name; +{ + char namebuf[IFNAMSIZ + 1]; + register char *cp, *cp2; + char *end; + register struct ifnet *ifp; + int unit; + unsigned len; + register char c = '\0'; + + /* + * Look for a non numeric part + */ + end = name + IFNAMSIZ; + cp2 = namebuf; + cp = name; + while ((cp < end) && (c = *cp)) { + if (c >= '0' && c <= '9') + break; + *cp2++ = c; + cp++; + } + if ((cp == end) || (c == '\0') || (cp == name)) + return ((struct ifnet *)0); + *cp2 = '\0'; + /* + * check we have a legal number (limit to 7 digits?) + */ + len = cp - name + 1; + for (unit = 0; + ((c = *cp) >= '0') && (c <= '9') && (unit < 1000000); cp++ ) + unit = (unit * 10) + (c - '0'); + if (*cp != '\0') + return 0; /* no trailing garbage allowed */ + /* + * Now search all the interfaces for this name/number + */ + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) { + if (bcmp(ifp->if_name, namebuf, len)) + continue; + if (unit == ifp->if_unit) + break; + } + return (ifp); +} + + +/* + * Map interface name in a sockaddr_dl to + * interface structure pointer. + */ +struct ifnet * +if_withname(sa) + struct sockaddr *sa; +{ + char ifname[IFNAMSIZ+1]; + struct sockaddr_dl *sdl = (struct sockaddr_dl *)sa; + + if ( (sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) || + (sdl->sdl_nlen > IFNAMSIZ) ) + return NULL; + + /* + * ifunit wants a null-terminated name. It may not be null-terminated + * in the sockaddr. We don't want to change the caller's sockaddr, + * and there might not be room to put the trailing null anyway, so we + * make a local copy that we know we can null terminate safely. + */ + + bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen); + ifname[sdl->sdl_nlen] = '\0'; + return ifunit(ifname); +} + + +/* + * Interface ioctls. + */ +int +ifioctl(so, cmd, data, p) + struct socket *so; + u_long cmd; + caddr_t data; + struct proc *p; +{ + register struct ifnet *ifp; + register struct ifreq *ifr; + int error = 0; + struct kev_msg ev_msg; + short oif_flags; + struct net_event_data ev_data; + + switch (cmd) { + + case SIOCGIFCONF: + case OSIOCGIFCONF: + return (ifconf(cmd, data)); + } + ifr = (struct ifreq *)data; + ifp = ifunit(ifr->ifr_name); + if (ifp == 0) + return (ENXIO); + switch (cmd) { + + case SIOCGIFFLAGS: + ifr->ifr_flags = ifp->if_flags; + break; + + case SIOCGIFMETRIC: + ifr->ifr_metric = ifp->if_metric; + break; + + case SIOCGIFMTU: + ifr->ifr_mtu = ifp->if_mtu; + break; + + case SIOCGIFPHYS: + ifr->ifr_phys = ifp->if_physical; + break; + + case SIOCSIFFLAGS: + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + if (ifp->if_flags & IFF_UP && (ifr->ifr_flags & IFF_UP) == 0) { + int s = splimp(); + if_down(ifp); + splx(s); + } + if (ifr->ifr_flags & IFF_UP && (ifp->if_flags & IFF_UP) == 0) { + int s = splimp(); + if_up(ifp); + splx(s); + } + ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | + (ifr->ifr_flags &~ IFF_CANTCHANGE); + + error = dlil_ioctl(so->so_proto->pr_domain->dom_family, + ifp, cmd, (caddr_t) data); + + if (error == 0) { + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_SIFFLAGS; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + ev_msg.dv[1].data_length = 0; + kev_post_msg(&ev_msg); + + } + getmicrotime(&ifp->if_lastchange); + break; + + case SIOCSIFMETRIC: + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + ifp->if_metric = ifr->ifr_metric; + + + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_SIFMETRICS; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + + ev_msg.dv[1].data_length = 0; + kev_post_msg(&ev_msg); + + getmicrotime(&ifp->if_lastchange); + break; + + case SIOCSIFPHYS: + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return error; + + error = dlil_ioctl(so->so_proto->pr_domain->dom_family, + ifp, cmd, (caddr_t) data); + + if (error == 0) { + + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_SIFPHYS; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + ev_msg.dv[1].data_length = 0; + kev_post_msg(&ev_msg); + + getmicrotime(&ifp->if_lastchange); + } + return(error); + + case SIOCSIFMTU: + { + u_long oldmtu = ifp->if_mtu; + + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + if (ifp->if_ioctl == NULL) + return (EOPNOTSUPP); + /* + * 72 was chosen below because it is the size of a TCP/IP + * header (40) + the minimum mss (32). + */ + if (ifr->ifr_mtu < 72 || ifr->ifr_mtu > 65535) + return (EINVAL); + + error = dlil_ioctl(so->so_proto->pr_domain->dom_family, + ifp, cmd, (caddr_t) data); + + if (error == 0) { + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + + ev_msg.event_code = KEV_DL_SIFMTU; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + ev_msg.dv[1].data_length = 0; + kev_post_msg(&ev_msg); + + getmicrotime(&ifp->if_lastchange); + } + /* + * If the link MTU changed, do network layer specific procedure. + */ +#ifdef INET6 + if (ifp->if_mtu != oldmtu) { + nd6_setmtu(ifp); + } +#endif + } + return(error); + + case SIOCADDMULTI: + case SIOCDELMULTI: + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + + /* Don't allow group membership on non-multicast interfaces. */ + if ((ifp->if_flags & IFF_MULTICAST) == 0) + return EOPNOTSUPP; + +#if 0 + /* + * Don't let users change protocols' entries. + */ + if (ifr->ifr_addr.sa_family != AF_LINK) + return EINVAL; +#endif + if (cmd == SIOCADDMULTI) { + struct ifmultiaddr *ifma; + error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); + ev_msg.event_code = KEV_DL_ADDMULTI; + } else { + error = if_delmulti(ifp, &ifr->ifr_addr); + ev_msg.event_code = KEV_DL_DELMULTI; + } + if (error == 0) { + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; + strncpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); + + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (unsigned long) ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; + ev_msg.dv[1].data_length = 0; + kev_post_msg(&ev_msg); + + getmicrotime(&ifp->if_lastchange); + } + return error; + + case SIOCSIFMEDIA: + case SIOCSIFGENERIC: + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + + error = dlil_ioctl(so->so_proto->pr_domain->dom_family, + ifp, cmd, (caddr_t) data); + + if (error == 0) + getmicrotime(&ifp->if_lastchange); + return error; + + case SIOCGIFMEDIA: + case SIOCGIFGENERIC: + + return dlil_ioctl(so->so_proto->pr_domain->dom_family, + ifp, cmd, (caddr_t) data); + + default: + oif_flags = ifp->if_flags; + if (so->so_proto == 0) + return (EOPNOTSUPP); +#if !COMPAT_43 + return ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, + data, + ifp, p)); +#else + { + int ocmd = cmd; + + switch (cmd) { + + case SIOCSIFDSTADDR: + case SIOCSIFADDR: + case SIOCSIFBRDADDR: + case SIOCSIFNETMASK: +#if BYTE_ORDER != BIG_ENDIAN + if (ifr->ifr_addr.sa_family == 0 && + ifr->ifr_addr.sa_len < 16) { + ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len; + ifr->ifr_addr.sa_len = 16; + } +#else + if (ifr->ifr_addr.sa_len == 0) + ifr->ifr_addr.sa_len = 16; +#endif + /* Fall through! */ + break; + + case OSIOCGIFADDR: + cmd = SIOCGIFADDR; + break; + + case OSIOCGIFDSTADDR: + cmd = SIOCGIFDSTADDR; + break; + + case OSIOCGIFBRDADDR: + cmd = SIOCGIFBRDADDR; + break; + + case OSIOCGIFNETMASK: + cmd = SIOCGIFNETMASK; + } + + error = ((*so->so_proto->pr_usrreqs->pru_control)(so, + cmd, + data, + ifp, p)); + switch (ocmd) { + + case OSIOCGIFADDR: + case OSIOCGIFDSTADDR: + case OSIOCGIFBRDADDR: + case OSIOCGIFNETMASK: + *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family; + } + + + } + + if (error == EOPNOTSUPP) + error = dlil_ioctl(so->so_proto->pr_domain->dom_family, + ifp, cmd, (caddr_t) data); + +#if INET6 + if ((oif_flags ^ ifp->if_flags) & IFF_UP) { + if (ifp->if_flags & IFF_UP) { + int s = splimp(); + in6_if_up(ifp); + splx(s); + } + } +#endif +#endif + + } + return (error); +} + +/* + * Set/clear promiscuous mode on interface ifp based on the truth value + * of pswitch. The calls are reference counted so that only the first + * "on" request actually has an effect, as does the final "off" request. + * Results are undefined if the "off" and "on" requests are not matched. + */ +int +ifpromisc(ifp, pswitch) + struct ifnet *ifp; + int pswitch; +{ + struct ifreq ifr; + int error; + + if (pswitch) { + /* + * If the device is not configured up, we cannot put it in + * promiscuous mode. + */ + if ((ifp->if_flags & IFF_UP) == 0) + return (ENETDOWN); + if (ifp->if_pcount++ != 0) + return (0); + ifp->if_flags |= IFF_PROMISC; + log(LOG_INFO, "%s%d: promiscuous mode enabled\n", + ifp->if_name, ifp->if_unit); + } else { + if (--ifp->if_pcount > 0) + return (0); + ifp->if_flags &= ~IFF_PROMISC; + } + ifr.ifr_flags = ifp->if_flags; + error = dlil_ioctl(0, ifp, SIOCSIFFLAGS, (caddr_t)&ifr); + if (error == 0) + rt_ifmsg(ifp); + return error; +} + +/* + * Return interface configuration + * of system. List may be used + * in later ioctl's (above) to get + * other information. + */ +/*ARGSUSED*/ +static int +ifconf(cmd, data) + u_long cmd; + caddr_t data; +{ + register struct ifconf *ifc = (struct ifconf *)data; + register struct ifnet *ifp = ifnet.tqh_first; + register struct ifaddr *ifa; + struct ifreq ifr, *ifrp; + int space = ifc->ifc_len, error = 0; + + ifrp = ifc->ifc_req; + for (; space > sizeof (ifr) && ifp; ifp = ifp->if_link.tqe_next) { + char workbuf[64]; + int ifnlen; + + ifnlen = snprintf(workbuf, sizeof(workbuf), + "%s%d", ifp->if_name, ifp->if_unit); + if(ifnlen + 1 > sizeof ifr.ifr_name) { + error = ENAMETOOLONG; + } else { + strcpy(ifr.ifr_name, workbuf); + } + + if ((ifa = ifp->if_addrhead.tqh_first) == 0) { + bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr)); + error = copyout((caddr_t)&ifr, (caddr_t)ifrp, + sizeof (ifr)); + if (error) + break; + space -= sizeof (ifr), ifrp++; + } else + for ( ; space > sizeof (ifr) && ifa; + ifa = ifa->ifa_link.tqe_next) { + register struct sockaddr *sa = ifa->ifa_addr; +#if COMPAT_43 + if (cmd == OSIOCGIFCONF) { + struct osockaddr *osa = + (struct osockaddr *)&ifr.ifr_addr; + ifr.ifr_addr = *sa; + osa->sa_family = sa->sa_family; + error = copyout((caddr_t)&ifr, (caddr_t)ifrp, + sizeof (ifr)); + ifrp++; + } else +#endif + if (sa->sa_len <= sizeof(*sa)) { + ifr.ifr_addr = *sa; + error = copyout((caddr_t)&ifr, (caddr_t)ifrp, + sizeof (ifr)); + ifrp++; + } else { + space -= sa->sa_len - sizeof(*sa); + if (space < sizeof (ifr)) + break; + error = copyout((caddr_t)&ifr, (caddr_t)ifrp, + sizeof (ifr.ifr_name)); + if (error == 0) + error = copyout((caddr_t)sa, + (caddr_t)&ifrp->ifr_addr, sa->sa_len); + ifrp = (struct ifreq *) + (sa->sa_len + (caddr_t)&ifrp->ifr_addr); + } + if (error) + break; + space -= sizeof (ifr); + } + } + ifc->ifc_len -= space; + return (error); +} + +/* + * Just like if_promisc(), but for all-multicast-reception mode. + */ +int +if_allmulti(ifp, onswitch) + struct ifnet *ifp; + int onswitch; +{ + int error = 0; + int s = splimp(); + + if (onswitch) { + if (ifp->if_amcount++ == 0) { + ifp->if_flags |= IFF_ALLMULTI; + error = dlil_ioctl(0, ifp, SIOCSIFFLAGS, (caddr_t) 0); + } + } else { + if (ifp->if_amcount > 1) { + ifp->if_amcount--; + } else { + ifp->if_amcount = 0; + ifp->if_flags &= ~IFF_ALLMULTI; + error = dlil_ioctl(0, ifp, SIOCSIFFLAGS, (caddr_t) 0); + } + } + splx(s); + + if (error == 0) + rt_ifmsg(ifp); + return error; +} + +/* + * Add a multicast listenership to the interface in question. + * The link layer provides a routine which converts + */ +int +if_addmulti(ifp, sa, retifma) + struct ifnet *ifp; /* interface to manipulate */ + struct sockaddr *sa; /* address to add */ + struct ifmultiaddr **retifma; +{ + struct sockaddr *llsa = 0; + struct sockaddr *dupsa; + int error, s; + struct ifmultiaddr *ifma; + struct rslvmulti_req rsreq; + + /* + * If the matching multicast address already exists + * then don't add a new one, just add a reference + */ + for (ifma = ifp->if_multiaddrs.lh_first; ifma; + ifma = ifma->ifma_link.le_next) { + if (equal(sa, ifma->ifma_addr)) { + ifma->ifma_refcount++; + if (retifma) + *retifma = ifma; + return 0; + } + } + + /* + * Give the link layer a chance to accept/reject it, and also + * find out which AF_LINK address this maps to, if it isn't one + * already. + */ + + rsreq.sa = sa; + rsreq.llsa = &llsa; + + error = dlil_ioctl(sa->sa_family, ifp, SIOCRSLVMULTI, (caddr_t) &rsreq); + + if (error) + return error; + + + MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, M_IFMADDR, M_WAITOK); + MALLOC(dupsa, struct sockaddr *, sa->sa_len, M_IFMADDR, M_WAITOK); + bcopy(sa, dupsa, sa->sa_len); + + ifma->ifma_addr = dupsa; + ifma->ifma_lladdr = llsa; + ifma->ifma_ifp = ifp; + ifma->ifma_refcount = 1; + ifma->ifma_protospec = 0; + rt_newmaddrmsg(RTM_NEWMADDR, ifma); + + /* + * Some network interfaces can scan the address list at + * interrupt time; lock them out. + */ + s = splimp(); + LIST_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); + splx(s); + if (retifma) + *retifma = ifma; + + if (llsa != 0) { + for (ifma = ifp->if_multiaddrs.lh_first; ifma; + ifma = ifma->ifma_link.le_next) { + if (equal(ifma->ifma_addr, llsa)) + break; + } + if (ifma) { + ifma->ifma_refcount++; + } else { + MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, + M_IFMADDR, M_WAITOK); + MALLOC(dupsa, struct sockaddr *, llsa->sa_len, + M_IFMADDR, M_WAITOK); + bcopy(llsa, dupsa, llsa->sa_len); + ifma->ifma_addr = dupsa; + ifma->ifma_lladdr = 0; + ifma->ifma_ifp = ifp; + ifma->ifma_refcount = 1; + s = splimp(); + LIST_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); + splx(s); + } + } + /* + * We are certain we have added something, so call down to the + * interface to let them know about it. + */ + s = splimp(); + + dlil_ioctl(0, ifp, SIOCADDMULTI, (caddr_t) 0); + splx(s); + + return 0; +} + +/* + * Remove a reference to a multicast address on this interface. Yell + * if the request does not match an existing membership. + */ +int +if_delmulti(ifp, sa) + struct ifnet *ifp; + struct sockaddr *sa; +{ + struct ifmultiaddr *ifma; + int s; + + for (ifma = ifp->if_multiaddrs.lh_first; ifma; + ifma = ifma->ifma_link.le_next) + if (equal(sa, ifma->ifma_addr)) + break; + if (ifma == 0) + return ENOENT; + + if (ifma->ifma_refcount > 1) { + ifma->ifma_refcount--; + return 0; + } + + rt_newmaddrmsg(RTM_DELMADDR, ifma); + sa = ifma->ifma_lladdr; + s = splimp(); + LIST_REMOVE(ifma, ifma_link); + splx(s); +#if INET6 /* XXX: for IPv6 multicast routers */ + if (ifma->ifma_addr->sa_family == AF_INET6 ) { + struct sockaddr_in6 *sin6; + /* + * An IP6 address of all 0 means stop listening + * to all of Ethernet multicast addresses. + */ + sin6 = (struct sockaddr_in6 *)ifma->ifma_addr; + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) + ifp->if_flags &= ~IFF_ALLMULTI; + } +#endif /* INET6 */ + FREE(ifma->ifma_addr, M_IFMADDR); + FREE(ifma, M_IFMADDR); + if (sa == 0) + return 0; + + /* + * Now look for the link-layer address which corresponds to + * this network address. It had been squirreled away in + * ifma->ifma_lladdr for this purpose (so we don't have + * to call SIOCRSLVMULTI again), and we saved that + * value in sa above. If some nasty deleted the + * link-layer address out from underneath us, we can deal because + * the address we stored was is not the same as the one which was + * in the record for the link-layer address. (So we don't complain + * in that case.) + */ + for (ifma = ifp->if_multiaddrs.lh_first; ifma; + ifma = ifma->ifma_link.le_next) + if (equal(sa, ifma->ifma_addr)) + break; + if (ifma == 0) + return 0; + + if (ifma->ifma_refcount > 1) { + ifma->ifma_refcount--; + return 0; + } + + s = splimp(); + LIST_REMOVE(ifma, ifma_link); + dlil_ioctl(0, ifp, SIOCDELMULTI, (caddr_t) 0); + splx(s); + FREE(ifma->ifma_addr, M_IFMADDR); + FREE(sa, M_IFMADDR); + FREE(ifma, M_IFMADDR); + + return 0; +} + +struct ifmultiaddr * +ifmaof_ifpforaddr(sa, ifp) + struct sockaddr *sa; + struct ifnet *ifp; +{ + struct ifmultiaddr *ifma; + + for (ifma = ifp->if_multiaddrs.lh_first; ifma; + ifma = ifma->ifma_link.le_next) + if (equal(ifma->ifma_addr, sa)) + break; + + return ifma; +} + +SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); +SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); + + +/* + * Shutdown all network activity. Used boot() when halting + * system. + */ +int if_down_all(void) +{ + struct ifnet *ifp; + int s; + + s = splnet(); + TAILQ_FOREACH(ifp, &ifnet, if_link) + if_down(ifp); + + splx(s); + return(0); /* Sheesh */ +} diff --git a/bsd/net/if.h b/bsd/net/if.h new file mode 100644 index 000000000..e601dbf4d --- /dev/null +++ b/bsd/net/if.h @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NET_IF_H_ +#define _NET_IF_H_ + + +/* + * Define Data-Link event subclass, and associated + * events. + */ + +#define KEV_DL_SUBCLASS 2 + +#define KEV_DL_SIFFLAGS 1 +#define KEV_DL_SIFMETRICS 2 +#define KEV_DL_SIFMTU 3 +#define KEV_DL_SIFPHYS 4 +#define KEV_DL_SIFMEDIA 5 +#define KEV_DL_SIFGENERIC 6 +#define KEV_DL_ADDMULTI 7 +#define KEV_DL_DELMULTI 8 +#define KEV_DL_IF_ATTACHED 9 +#define KEV_DL_IF_DETACHING 10 +#define KEV_DL_IF_DETACHED 11 +#define KEV_DL_LINK_OFF 12 +#define KEV_DL_LINK_ON 13 + +/* + * does not depend on on most other systems. This + * helps userland compatability. (struct timeval ifi_lastchange) + */ + +#include +#include + + +#define IFF_UP 0x1 /* interface is up */ +#define IFF_BROADCAST 0x2 /* broadcast address valid */ +#define IFF_DEBUG 0x4 /* turn on debugging */ +#define IFF_LOOPBACK 0x8 /* is a loopback net */ +#define IFF_POINTOPOINT 0x10 /* interface is point-to-point link */ +#define IFF_NOTRAILERS 0x20 /* obsolete: avoid use of trailers */ +#define IFF_RUNNING 0x40 /* resources allocated */ +#define IFF_NOARP 0x80 /* no address resolution protocol */ +#define IFF_PROMISC 0x100 /* receive all packets */ +#define IFF_ALLMULTI 0x200 /* receive all multicast packets */ +#define IFF_OACTIVE 0x400 /* transmission in progress */ +#define IFF_SIMPLEX 0x800 /* can't hear own transmissions */ +#define IFF_LINK0 0x1000 /* per link layer defined bit */ +#define IFF_LINK1 0x2000 /* per link layer defined bit */ +#define IFF_LINK2 0x4000 /* per link layer defined bit */ +#define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */ +#define IFF_MULTICAST 0x8000 /* supports multicast */ +#define IFF_SPLITTER IFF_LINK2 /* Y splitter in force */ + + + +#define IFEF_DVR_REENTRY_OK 0x20 /* When set, driver may be reentered from its own thread */ + + +/* flags set internally only: */ +#define IFF_CANTCHANGE \ + (IFF_BROADCAST|IFF_POINTOPOINT|IFF_RUNNING|IFF_OACTIVE|\ + IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI) + +#define IFQ_MAXLEN 50 +#define IFNET_SLOWHZ 1 /* granularity is 1 second */ + +/* + * Message format for use in obtaining information about interfaces + * from getkerninfo and the routing socket + */ +struct if_msghdr { + u_short ifm_msglen; /* to skip over non-understood messages */ + u_char ifm_version; /* future binary compatability */ + u_char ifm_type; /* message type */ + int ifm_addrs; /* like rtm_addrs */ + int ifm_flags; /* value of if_flags */ + u_short ifm_index; /* index for associated ifp */ + struct if_data ifm_data;/* statistics and other data about if */ +}; + +/* + * Message format for use in obtaining information about interface addresses + * from getkerninfo and the routing socket + */ +struct ifa_msghdr { + u_short ifam_msglen; /* to skip over non-understood messages */ + u_char ifam_version; /* future binary compatability */ + u_char ifam_type; /* message type */ + int ifam_addrs; /* like rtm_addrs */ + int ifam_flags; /* value of ifa_flags */ + u_short ifam_index; /* index for associated ifp */ + int ifam_metric; /* value of ifa_metric */ +}; + +/* + * Message format for use in obtaining information about multicast addresses + * from the routing socket + */ +struct ifma_msghdr { + u_short ifmam_msglen; /* to skip over non-understood messages */ + u_char ifmam_version; /* future binary compatability */ + u_char ifmam_type; /* message type */ + int ifmam_addrs; /* like rtm_addrs */ + int ifmam_flags; /* value of ifa_flags */ + u_short ifmam_index; /* index for associated ifp */ +}; + +/* + * Interface request structure used for socket + * ioctl's. All interface ioctl's must have parameter + * definitions which begin with ifr_name. The + * remainder may be interface specific. + */ +#define IF_NAMESIZE IFNAMSIZ +struct ifreq { +#define IFNAMSIZ 16 + char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + union { + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + short ifru_flags; + int ifru_metric; + int ifru_mtu; + int ifru_phys; + int ifru_media; + caddr_t ifru_data; + } ifr_ifru; +#define ifr_addr ifr_ifru.ifru_addr /* address */ +#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */ +#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */ +#define ifr_flags ifr_ifru.ifru_flags /* flags */ +#define ifr_metric ifr_ifru.ifru_metric /* metric */ +#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */ +#define ifr_phys ifr_ifru.ifru_phys /* physical wire */ +#define ifr_media ifr_ifru.ifru_media /* physical media */ +#define ifr_data ifr_ifru.ifru_data /* for use by interface */ +}; + +#define _SIZEOF_ADDR_IFREQ(ifr) \ + ((ifr).ifr_addr.sa_len > sizeof(struct sockaddr) ? \ + (sizeof(struct ifreq) - sizeof(struct sockaddr) + \ + (ifr).ifr_addr.sa_len) : sizeof(struct ifreq)) + +struct ifaliasreq { + char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct sockaddr ifra_addr; + struct sockaddr ifra_broadaddr; + struct sockaddr ifra_mask; +}; + +struct rslvmulti_req { + struct sockaddr *sa; + struct sockaddr **llsa; +}; + +struct ifmediareq { + char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + int ifm_current; /* current media options */ + int ifm_mask; /* don't care mask */ + int ifm_status; /* media status */ + int ifm_active; /* active options */ + int ifm_count; /* # entries in ifm_ulist array */ + int *ifm_ulist; /* media words */ +}; +/* + * Structure used in SIOCGIFCONF request. + * Used to retrieve interface configuration + * for machine (useful for programs which + * must know all networks accessible). + */ +struct ifconf { + int ifc_len; /* size of associated buffer */ + union { + caddr_t ifcu_buf; + struct ifreq *ifcu_req; + } ifc_ifcu; +#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ +#define ifc_req ifc_ifcu.ifcu_req /* array of structures returned */ +}; + +/* + * Structure for SIOC[AGD]LIFADDR + */ +struct if_laddrreq { + char iflr_name[IFNAMSIZ]; + unsigned int flags; +#define IFLR_PREFIX 0x8000 /* in: prefix given out: kernel fills id */ + unsigned int prefixlen; /* in/out */ + struct sockaddr_storage addr; /* in/out */ + struct sockaddr_storage dstaddr; /* out */ +}; + + +#ifdef KERNEL +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_IFADDR); +MALLOC_DECLARE(M_IFMADDR); +#endif +#endif + +#ifndef KERNEL +struct if_nameindex { + unsigned int if_index; /* 1, 2, ... */ + char *if_name; /* null terminated name: "le0", ... */ +}; + +__BEGIN_DECLS +unsigned int if_nametoindex __P((const char *)); +char *if_indextoname __P((unsigned int, char *)); +struct if_nameindex *if_nameindex __P((void)); +void if_freenameindex __P((struct if_nameindex *)); +__END_DECLS +#endif + +/* XXX - this should go away soon */ +#ifdef KERNEL +#include +#endif + +#endif /* !_NET_IF_H_ */ diff --git a/bsd/net/if_arp.h b/bsd/net/if_arp.h new file mode 100644 index 000000000..2c3b44705 --- /dev/null +++ b/bsd/net/if_arp.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_arp.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NET_IF_ARP_H_ +#define _NET_IF_ARP_H_ + +/* + * Address Resolution Protocol. + * + * See RFC 826 for protocol description. ARP packets are variable + * in size; the arphdr structure defines the fixed-length portion. + * Protocol type values are the same as those for 10 Mb/s Ethernet. + * It is followed by the variable-sized fields ar_sha, arp_spa, + * arp_tha and arp_tpa in that order, according to the lengths + * specified. Field names used correspond to RFC 826. + */ +struct arphdr { + u_short ar_hrd; /* format of hardware address */ +#define ARPHRD_ETHER 1 /* ethernet hardware format */ +#define ARPHRD_FRELAY 15 /* frame relay hardware format */ + u_short ar_pro; /* format of protocol address */ + u_char ar_hln; /* length of hardware address */ + u_char ar_pln; /* length of protocol address */ + u_short ar_op; /* one of: */ +#define ARPOP_REQUEST 1 /* request to resolve address */ +#define ARPOP_REPLY 2 /* response to previous request */ +#define ARPOP_REVREQUEST 3 /* request protocol address given hardware */ +#define ARPOP_REVREPLY 4 /* response giving protocol address */ +#define ARPOP_INVREQUEST 8 /* request to identify peer */ +#define ARPOP_INVREPLY 9 /* response identifying peer */ +/* + * The remaining fields are variable in size, + * according to the sizes above. + */ +#ifdef COMMENT_ONLY + u_char ar_sha[]; /* sender hardware address */ + u_char ar_spa[]; /* sender protocol address */ + u_char ar_tha[]; /* target hardware address */ + u_char ar_tpa[]; /* target protocol address */ +#endif +}; + +/* + * ARP ioctl request + */ +struct arpreq { + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ +}; +/* arp_flags and at_flags field values */ +#define ATF_INUSE 0x01 /* entry in use */ +#define ATF_COM 0x02 /* completed entry (enaddr valid) */ +#define ATF_PERM 0x04 /* permanent entry */ +#define ATF_PUBL 0x08 /* publish entry (respond for other host) */ +#define ATF_USETRAILERS 0x10 /* has requested trailers */ + + +/* + * Ethernet multicast address structure. There is one of these for each + * multicast address or range of multicast addresses that we are supposed + * to listen to on a particular interface. They are kept in a linked list, + * rooted in the interface's arpcom structure. (This really has nothing to + * do with ARP, or with the Internet address family, but this appears to be + * the minimally-disrupting place to put it.) + */ +struct ether_multi { + u_char enm_addrlo[6]; /* low or only address of range */ + u_char enm_addrhi[6]; /* high or only address of range */ + struct arpcom *enm_ac; /* back pointer to arpcom */ + u_int enm_refcount; /* no. claims to this addr/range */ + struct ether_multi *enm_next; /* ptr to next ether_multi */ +}; + +/* + * Structure used by macros below to remember position when stepping through + * all of the ether_multi records. + */ +struct ether_multistep { + struct ether_multi *e_enm; +}; + +#ifdef KERNEL +/* + * Structure shared between the ethernet driver modules and + * the address resolution code. For example, each ec_softc or il_softc + * begins with this structure. + */ +struct arpcom { + /* + * The ifnet struct _must_ be at the head of this structure. + */ + struct ifnet ac_if; /* network-visible interface */ + u_char ac_enaddr[6]; /* ethernet hardware address */ + struct in_addr ac_ipaddr; /* copy of ip address- XXX */ + struct ether_multi *ac_multiaddrs; /* list of ether multicast addrs */ + int ac_multicnt; /* length of ac_multiaddrs list */ +}; + + +#endif + +#endif /* !_NET_IF_ARP_H_ */ diff --git a/bsd/net/if_atm.h b/bsd/net/if_atm.h new file mode 100644 index 000000000..2d79db332 --- /dev/null +++ b/bsd/net/if_atm.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_atm.h,v 1.7 1996/11/09 23:02:27 chuck Exp $ */ + +/* + * + * Copyright (c) 1996 Charles D. Cranor and Washington University. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Charles D. Cranor and + * Washington University. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * net/if_atm.h + */ + + +#ifndef NO_ATM_PVCEXT +/* + * ATM_PVCEXT enables PVC extention: VP/VC shaping + * and PVC shadow interfaces. + */ +#define ATM_PVCEXT /* enable pvc extention */ +#endif + +#if defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__) +#define RTALLOC1(A,B) rtalloc1((A),(B)) +#elif defined(__FreeBSD__) +#define RTALLOC1(A,B) rtalloc1((A),(B),0UL) +#endif + +/* + * pseudo header for packet transmission + */ +struct atm_pseudohdr { + u_int8_t atm_ph[4]; /* flags+VPI+VCI1(msb)+VCI2(lsb) */ +}; + +#define ATM_PH_FLAGS(X) ((X)->atm_ph[0]) +#define ATM_PH_VPI(X) ((X)->atm_ph[1]) +#define ATM_PH_VCI(X) ((((X)->atm_ph[2]) << 8) | ((X)->atm_ph[3])) +#define ATM_PH_SETVCI(X,V) { \ + (X)->atm_ph[2] = ((V) >> 8) & 0xff; \ + (X)->atm_ph[3] = ((V) & 0xff); \ +} + +#define ATM_PH_AAL5 0x01 /* use AAL5? (0 == aal0) */ +#define ATM_PH_LLCSNAP 0x02 /* use the LLC SNAP encoding (iff aal5) */ + +#if ATM_PVCEXT +#define ATM_PH_INERNAL 0x20 /* reserve for kernel internal use */ +#endif +#define ATM_PH_DRIVER7 0x40 /* reserve for driver's use */ +#define ATM_PH_DRIVER8 0x80 /* reserve for driver's use */ + +#define ATMMTU 9180 /* ATM MTU size for IP */ + /* XXX: could be 9188 with LLC/SNAP according + to comer */ + +/* user's ioctl hook for raw atm mode */ +#define SIOCRAWATM _IOWR('a', 122, int) /* set driver's raw mode */ + +/* atm_pseudoioctl: turns on and off RX VCIs [for internal use only!] */ +struct atm_pseudoioctl { + struct atm_pseudohdr aph; + void *rxhand; +}; +#define SIOCATMENA _IOWR('a', 123, struct atm_pseudoioctl) /* enable */ +#define SIOCATMDIS _IOWR('a', 124, struct atm_pseudoioctl) /* disable */ + +#if ATM_PVCEXT + +/* structure to control PVC transmitter */ +struct pvctxreq { + /* first entry must be compatible with struct ifreq */ + char pvc_ifname[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct atm_pseudohdr pvc_aph; /* (flags) + vpi:vci */ + struct atm_pseudohdr pvc_joint; /* for vp shaping: another vc + to share the shaper */ + int pvc_pcr; /* peak cell rate (shaper value) */ +}; + +/* use ifioctl for now */ +#define SIOCSPVCTX _IOWR('i', 95, struct pvctxreq) +#define SIOCGPVCTX _IOWR('i', 96, struct pvctxreq) +#define SIOCSPVCSIF _IOWR('i', 97, struct ifreq) +#define SIOCGPVCSIF _IOWR('i', 98, struct ifreq) + +#ifdef KERNEL +#define ATM_PH_PVCSIF ATM_PH_INERNAL /* pvc shadow interface */ +#endif +#endif /* ATM_PVCEXT */ + +/* + * XXX forget all the garbage in if_llc.h and do it the easy way + */ + +#define ATMLLC_HDR "\252\252\3\0\0\0" +struct atmllc { + u_int8_t llchdr[6]; /* aa.aa.03.00.00.00 */ + u_int8_t type[2]; /* "ethernet" type */ +}; + +/* ATM_LLC macros: note type code in host byte order */ +#define ATM_LLC_TYPE(X) (((X)->type[0] << 8) | ((X)->type[1])) +#define ATM_LLC_SETTYPE(X,V) { \ + (X)->type[1] = ((V) >> 8) & 0xff; \ + (X)->type[0] = ((V) & 0xff); \ +} + +#ifdef KERNEL +void atm_ifattach __P((struct ifnet *)); +void atm_input __P((struct ifnet *, struct atm_pseudohdr *, + struct mbuf *, void *)); +int atm_output __P((struct ifnet *, struct mbuf *, struct sockaddr *, + struct rtentry *)); +#endif +#if ATM_PVCEXT +char *shadow2if __P((char *)); +#ifdef KERNEL +struct ifnet *pvc_attach __P((struct ifnet *)); +int pvc_setaph __P((struct ifnet *, struct atm_pseudohdr *)); +#endif +#endif diff --git a/bsd/net/if_atmsubr.c b/bsd/net/if_atmsubr.c new file mode 100644 index 000000000..eb7bdb7f5 --- /dev/null +++ b/bsd/net/if_atmsubr.c @@ -0,0 +1,652 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_atmsubr.c,v 1.10 1997/03/11 23:19:51 chuck Exp $ */ + +/* + * + * Copyright (c) 1996 Charles D. Cranor and Washington University. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Charles D. Cranor and + * Washington University. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * if_atmsubr.c + */ + +#include "opt_inet.h" +#include "opt_natm.h" + +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include +#include +#include + +#include +#include +#include /* XXX: for ETHERTYPE_* */ +#if defined(INET) || defined(INET6) +#include +#endif +#if NATM +#include +#endif + +#ifndef ETHERTYPE_IPV6 +#define ETHERTYPE_IPV6 0x86dd +#endif + +#define senderr(e) { error = (e); goto bad;} + +/* + * atm_output: ATM output routine + * inputs: + * "ifp" = ATM interface to output to + * "m0" = the packet to output + * "dst" = the sockaddr to send to (either IP addr, or raw VPI/VCI) + * "rt0" = the route to use + * returns: error code [0 == ok] + * + * note: special semantic: if (dst == NULL) then we assume "m" already + * has an atm_pseudohdr on it and just send it directly. + * [for native mode ATM output] if dst is null, then + * rt0 must also be NULL. + */ + +int +atm_output(ifp, m0, dst, rt0) + register struct ifnet *ifp; + struct mbuf *m0; + struct sockaddr *dst; + struct rtentry *rt0; +{ + u_int16_t etype = 0; /* if using LLC/SNAP */ + int s, error = 0, sz; + struct atm_pseudohdr atmdst, *ad; + register struct mbuf *m = m0; + register struct rtentry *rt; + struct atmllc *atmllc; + struct atmllc *llc_hdr = NULL; + u_int32_t atm_flags; + + if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + senderr(ENETDOWN); + + /* + * check route + */ + if ((rt = rt0) != NULL) { + + if ((rt->rt_flags & RTF_UP) == 0) { /* route went down! */ + if ((rt0 = rt = RTALLOC1(dst, 0)) != NULL) + rt->rt_refcnt--; + else + senderr(EHOSTUNREACH); + } + + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_gwroute == 0) + goto lookup; + if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { + rtfree(rt); rt = rt0; + lookup: rt->rt_gwroute = RTALLOC1(rt->rt_gateway, 0); + if ((rt = rt->rt_gwroute) == 0) + senderr(EHOSTUNREACH); + } + } + + /* XXX: put RTF_REJECT code here if doing ATMARP */ + + } + + /* + * check for non-native ATM traffic (dst != NULL) + */ + if (dst) { + switch (dst->sa_family) { +#if defined(INET) || defined(INET6) + case AF_INET: + case AF_INET6: + if (!atmresolve(rt, m, dst, &atmdst)) { + m = NULL; + /* XXX: atmresolve already free'd it */ + senderr(EHOSTUNREACH); + /* XXX: put ATMARP stuff here */ + /* XXX: watch who frees m on failure */ + } + if (dst->sa_family == AF_INET6) + etype = htons(ETHERTYPE_IPV6); + else + etype = htons(ETHERTYPE_IP); + break; +#endif /* INET || INET6 */ + + case AF_UNSPEC: + /* + * XXX: bpfwrite or output from a pvc shadow if. + * assuming dst contains 12 bytes (atm pseudo + * header (4) + LLC/SNAP (8)) + */ + bcopy(dst->sa_data, &atmdst, sizeof(atmdst)); + llc_hdr = (struct atmllc *)(dst->sa_data + sizeof(atmdst)); + break; + + default: +#if defined(__NetBSD__) || defined(__OpenBSD__) + printf("%s: can't handle af%d\n", ifp->if_xname, + dst->sa_family); +#elif defined(__FreeBSD__) || defined(__bsdi__) + printf("%s%d: can't handle af%d\n", ifp->if_name, + ifp->if_unit, dst->sa_family); +#endif + senderr(EAFNOSUPPORT); + } + + /* + * must add atm_pseudohdr to data + */ + sz = sizeof(atmdst); + atm_flags = ATM_PH_FLAGS(&atmdst); + if (atm_flags & ATM_PH_LLCSNAP) sz += 8; /* sizeof snap == 8 */ + M_PREPEND(m, sz, M_DONTWAIT); + if (m == 0) + senderr(ENOBUFS); + ad = mtod(m, struct atm_pseudohdr *); + *ad = atmdst; + if (atm_flags & ATM_PH_LLCSNAP) { + atmllc = (struct atmllc *)(ad + 1); + if (llc_hdr == NULL) { + bcopy(ATMLLC_HDR, atmllc->llchdr, + sizeof(atmllc->llchdr)); + ATM_LLC_SETTYPE(atmllc, etype); + /* note: already in network order */ + } + else + bcopy(llc_hdr, atmllc, sizeof(struct atmllc)); + } + } + + /* + * Queue message on interface, and start output if interface + * not yet active. + */ + s = splimp(); + if (IF_QFULL(&ifp->if_snd)) { + IF_DROP(&ifp->if_snd); + splx(s); + senderr(ENOBUFS); + } + ifp->if_obytes += m->m_pkthdr.len; + IF_ENQUEUE(&ifp->if_snd, m); + if ((ifp->if_flags & IFF_OACTIVE) == 0) + (*ifp->if_start)(ifp); + splx(s); + return (error); + +bad: + if (m) + m_freem(m); + return (error); +} + +/* + * Process a received ATM packet; + * the packet is in the mbuf chain m. + */ +void +atm_input(ifp, ah, m, rxhand) + struct ifnet *ifp; + register struct atm_pseudohdr *ah; + struct mbuf *m; + void *rxhand; +{ + register struct ifqueue *inq; + u_int16_t etype = ETHERTYPE_IP; /* default */ + int s; + + if ((ifp->if_flags & IFF_UP) == 0) { + m_freem(m); + return; + } + ifp->if_ibytes += m->m_pkthdr.len; + +#if ATM_PVCEXT + if (ATM_PH_FLAGS(ah) & ATM_PH_PVCSIF) { + /* + * when PVC shadow interface is used, pointer to + * the shadow interface is passed as rxhand. + * override the receive interface of the packet. + */ + m->m_pkthdr.rcvif = (struct ifnet *)rxhand; + rxhand = NULL; + } +#endif /* ATM_PVCEXT */ + + if (rxhand) { +#if NATM + struct natmpcb *npcb = rxhand; + s = splimp(); /* in case 2 atm cards @ diff lvls */ + npcb->npcb_inq++; /* count # in queue */ + splx(s); + schednetisr(NETISR_NATM); + inq = &natmintrq; + m->m_pkthdr.rcvif = rxhand; /* XXX: overload */ +#else + printf("atm_input: NATM detected but not configured in kernel\n"); + m_freem(m); + return; +#endif + } else { + /* + * handle LLC/SNAP header, if present + */ + if (ATM_PH_FLAGS(ah) & ATM_PH_LLCSNAP) { + struct atmllc *alc; + if (m->m_len < sizeof(*alc) && + (m = m_pullup(m, sizeof(*alc))) == 0) + return; /* failed */ + alc = mtod(m, struct atmllc *); + if (bcmp(alc, ATMLLC_HDR, 6)) { +#if defined(__NetBSD__) || defined(__OpenBSD__) + printf("%s: recv'd invalid LLC/SNAP frame [vp=%d,vc=%d]\n", + ifp->if_xname, ATM_PH_VPI(ah), ATM_PH_VCI(ah)); +#elif defined(__FreeBSD__) || defined(__bsdi__) + printf("%s%d: recv'd invalid LLC/SNAP frame [vp=%d,vc=%d]\n", + ifp->if_name, ifp->if_unit, ATM_PH_VPI(ah), ATM_PH_VCI(ah)); +#endif + m_freem(m); + return; + } + etype = ATM_LLC_TYPE(alc); + m_adj(m, sizeof(*alc)); + } + + switch (etype) { +#if INET + case ETHERTYPE_IP: + schednetisr(NETISR_IP); + inq = &ipintrq; + break; +#endif +#if INET6 + case ETHERTYPE_IPV6: + schednetisr(NETISR_IPV6); + inq = &ip6intrq; + break; +#endif + default: + m_freem(m); + return; + } + } + + s = splimp(); + if (IF_QFULL(inq)) { + IF_DROP(inq); + m_freem(m); + } else + IF_ENQUEUE(inq, m); + splx(s); +} + +/* + * Perform common duties while attaching to interface list + */ +void +atm_ifattach(ifp) + register struct ifnet *ifp; +{ + register struct ifaddr *ifa; + register struct sockaddr_dl *sdl; + + ifp->if_type = IFT_ATM; + ifp->if_addrlen = 0; + ifp->if_hdrlen = 0; + ifp->if_mtu = ATMMTU; + ifp->if_output = atm_output; + +#if defined(__NetBSD__) || defined(__OpenBSD__) + for (ifa = ifp->if_addrlist.tqh_first; ifa != 0; + ifa = ifa->ifa_list.tqe_next) +#elif defined(__FreeBSD__) && (__FreeBSD__ > 2) + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) +#elif defined(__FreeBSD__) || defined(__bsdi__) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#endif + if ((sdl = (struct sockaddr_dl *)ifa->ifa_addr) && + sdl->sdl_family == AF_LINK) { + sdl->sdl_type = IFT_ATM; + sdl->sdl_alen = ifp->if_addrlen; +#ifdef notyet /* if using ATMARP, store hardware address using the next line */ + bcopy(ifp->hw_addr, LLADDR(sdl), ifp->if_addrlen); +#endif + break; + } + +} + +#if ATM_PVCEXT +/* + * ATM PVC shadow interface: a trick to assign a shadow interface + * to a PVC. + * with shadow interface, each PVC looks like an individual + * Point-to-Point interface. + * as oposed to the NBMA model, a shadow interface is inherently + * multicast capable (no LANE/MARS required). + */ +struct pvcsif { + struct ifnet sif_shadow; /* shadow ifnet structure per pvc */ + struct atm_pseudohdr sif_aph; /* flags + vpi:vci */ + struct ifnet *sif_ifp; /* pointer to the genuine interface */ +}; + +static int pvc_output __P((struct ifnet *, struct mbuf *, + struct sockaddr *, struct rtentry *)); +static int pvc_ioctl __P((struct ifnet *, u_long, caddr_t)); + +/* + * create and attach per pvc shadow interface + * (currently detach is not supported) + */ +static int pvc_number = 0; + +struct ifnet * +pvc_attach(ifp) + struct ifnet *ifp; +{ + struct pvcsif *pvcsif; + struct ifnet *shadow; + struct ifaddr *ifa; + struct sockaddr_dl *sdl; + int s; + + MALLOC(pvcsif, struct pvcsif *, sizeof(struct pvcsif), + M_DEVBUF, M_WAITOK); + bzero(pvcsif, sizeof(struct pvcsif)); + + pvcsif->sif_ifp = ifp; + shadow = &pvcsif->sif_shadow; + + shadow->if_name = "pvc"; + shadow->if_family = APPLE_IF_FAM_PVC; + shadow->if_unit = pvc_number++; + shadow->if_flags = ifp->if_flags | (IFF_POINTOPOINT | IFF_MULTICAST); + shadow->if_ioctl = pvc_ioctl; + shadow->if_output = pvc_output; + shadow->if_start = NULL; + shadow->if_mtu = ifp->if_mtu; + shadow->if_type = ifp->if_type; + shadow->if_addrlen = ifp->if_addrlen; + shadow->if_hdrlen = ifp->if_hdrlen; + shadow->if_softc = pvcsif; + shadow->if_snd.ifq_maxlen = 50; /* dummy */ + + s = splimp(); + if_attach(shadow); + +#if defined(__NetBSD__) || defined(__OpenBSD__) + for (ifa = shadow->if_addrlist.tqh_first; ifa != 0; + ifa = ifa->ifa_list.tqe_next) +#elif defined(__FreeBSD__) && (__FreeBSD__ > 2) + for (ifa = shadow->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) +#elif defined(__FreeBSD__) || defined(__bsdi__) + for (ifa = shadow->if_addrlist; ifa; ifa = ifa->ifa_next) +#endif + if ((sdl = (struct sockaddr_dl *)ifa->ifa_addr) && + sdl->sdl_family == AF_LINK) { + sdl->sdl_type = IFT_ATM; + sdl->sdl_alen = shadow->if_addrlen; + break; + } + splx(s); + + return (shadow); +} + +/* + * pvc_output relays the packet to atm_output along with vpi:vci info. + */ +static int +pvc_output(shadow, m, dst, rt) + struct ifnet *shadow; + struct mbuf *m; + struct sockaddr *dst; + struct rtentry *rt; +{ + struct pvcsif *pvcsif; + struct sockaddr dst_addr; + struct atmllc *atmllc; + u_int16_t etype = 0; + int error = 0; + + if ((shadow->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + senderr(ENETDOWN); + + pvcsif = shadow->if_softc; + if (ATM_PH_VCI(&pvcsif->sif_aph) == 0) + senderr(ENETDOWN); + + /* + * create a dummy sockaddr: (using bpfwrite interface) + * put atm pseudo header and llc/snap into sa_data (12 bytes) + * and mark it as AF_UNSPEC. + */ + if (dst) { + switch (dst->sa_family) { +#if defined(INET) || defined(INET6) + case AF_INET: + case AF_INET6: + if (dst->sa_family == AF_INET6) + etype = htons(ETHERTYPE_IPV6); + else + etype = htons(ETHERTYPE_IP); + break; +#endif + + default: + printf("%s%d: can't handle af%d\n", shadow->if_name, + shadow->if_unit, dst->sa_family); + senderr(EAFNOSUPPORT); + } + } + + dst_addr.sa_family = AF_UNSPEC; + bcopy(&pvcsif->sif_aph, dst_addr.sa_data, + sizeof(struct atm_pseudohdr)); + atmllc = (struct atmllc *) + (dst_addr.sa_data + sizeof(struct atm_pseudohdr)); + bcopy(ATMLLC_HDR, atmllc->llchdr, sizeof(atmllc->llchdr)); + ATM_LLC_SETTYPE(atmllc, etype); /* note: already in network order */ + + return atm_output(pvcsif->sif_ifp, m, &dst_addr, rt); + +bad: + if (m) + m_freem(m); + return (error); +} + +static int +pvc_ioctl(shadow, cmd, data) + struct ifnet *shadow; + u_long cmd; + caddr_t data; +{ + struct ifnet *ifp; + struct pvcsif *pvcsif; + struct ifreq *ifr = (struct ifreq *) data; + void (*ifa_rtrequest)(int, struct rtentry *, struct sockaddr *) = NULL; + int error = 0; + + pvcsif = (struct pvcsif *)shadow->if_softc; + ifp = pvcsif->sif_ifp; + if (ifp == 0 || ifp->if_ioctl == 0) + return (EOPNOTSUPP); + + /* + * pre process + */ + switch (cmd) { + case SIOCGPVCSIF: + snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), + "%s%d", ifp->if_name, ifp->if_unit); + return (0); + + case SIOCGPVCTX: + do { + struct pvctxreq *pvcreq = (struct pvctxreq *)data; + + snprintf(pvcreq->pvc_ifname, + sizeof(pvcreq->pvc_ifname), "%s%d", + ifp->if_name, ifp->if_unit); + pvcreq->pvc_aph = pvcsif->sif_aph; + } while (0); + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifr == 0) + return (EAFNOSUPPORT); /* XXX */ + switch (ifr->ifr_addr.sa_family) { +#if INET + case AF_INET: + return (0); +#endif +#if INET6 + case AF_INET6: + return (0); +#endif + default: + return (EAFNOSUPPORT); + } + break; + case SIOCSIFADDR: + if (ifp->if_flags & IFF_UP) { + /* real if is already up */ + shadow->if_flags = ifp->if_flags | + (IFF_POINTOPOINT|IFF_MULTICAST); + return (0); + } + /* + * XXX: save the rtrequest field since the atm driver + * overwrites this field. + */ + ifa_rtrequest = ((struct ifaddr *)data)->ifa_rtrequest; + break; + + case SIOCSIFFLAGS: + if ((shadow->if_flags & IFF_UP) == 0) { + /* + * interface down. don't pass this to + * the real interface. + */ + return (0); + } + if (shadow->if_flags & IFF_UP) { + /* + * interface up. if the real if is already up, + * nothing to do. + */ + if (ifp->if_flags & IFF_UP) { + shadow->if_flags = ifp->if_flags | + (IFF_POINTOPOINT|IFF_MULTICAST); + return (0); + } + } + break; + } + + /* + * pass the ioctl to the genuine interface + */ + error = (*ifp->if_ioctl)(ifp, cmd, data); + + /* + * post process + */ + switch (cmd) { + case SIOCSIFMTU: + shadow->if_mtu = ifp->if_mtu; + break; + case SIOCSIFADDR: + /* restore rtrequest */ + ((struct ifaddr *)data)->ifa_rtrequest = ifa_rtrequest; + /* fall into... */ + case SIOCSIFFLAGS: + /* update if_flags */ + shadow->if_flags = ifp->if_flags + | (IFF_POINTOPOINT|IFF_MULTICAST); + break; + } + + return (error); +} + +int pvc_setaph(shadow, aph) + struct ifnet *shadow; + struct atm_pseudohdr *aph; +{ + struct pvcsif *pvcsif; + + pvcsif = shadow->if_softc; + bcopy(aph, &pvcsif->sif_aph, sizeof(struct atm_pseudohdr)); + return (0); +} + +#endif /* ATM_PVCEXT */ diff --git a/bsd/net/if_blue.c b/bsd/net/if_blue.c new file mode 100644 index 000000000..55458ec4b --- /dev/null +++ b/bsd/net/if_blue.c @@ -0,0 +1,677 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997, 1998 Apple Computer, Inc. All Rights Reserved */ +/* + * @(#)if_blue.c 1.1 (MacOSX) 6/10/43 + * Justin Walker, 9970520 + * First wave - splitter and notification support for the Blue Box + * 980130 - Second wave - Performance improvements, reorg and cleanup + */ + +#include +#if KDEBUG + +#define DBG_SPLT_BFCHK DRVDBG_CODE(DBG_DRVSPLT, 0) +#define DBG_SPLT_APPND DRVDBG_CODE(DBG_DRVSPLT, 1) +#define DBG_SPLT_MBUF DRVDBG_CODE(DBG_DRVSPLT, 2) +#define DBG_SPLT_DUP DRVDBG_CODE(DBG_DRVSPLT, 3) +#define DBG_SPLT_PAD DRVDBG_CODE(DBG_DRVSPLT, 4) + +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include "if_blue.h" +#include "ndrv.h" + +#if INET +#include +#include +#endif +#include + +#if NS +#include +#include +#endif + +#if ISO +#include +#include +#include +#include +#endif + +#if LLC +#include +#include +#endif + +#include +#include +#include +#include + +/* Dummy IFs to differentiate source of looped packets */ +struct ifnet rhap_if_s; +struct ifnet *rhap_if = &rhap_if_s; +struct ifnet_blue *blue_if; +struct sockaddr_dl ndrvsrc = {sizeof (struct sockaddr_dl), AF_NDRV}; + +struct ifqueue blueq; + +extern int if_register(register struct BlueFilter *f +#ifdef BF_if + , + register struct ifnet *ifp +#endif + ); + +/* + * Blue Box support: + * 1st cut: the Y splitter + * A process turns on the splitter by opening the "raw" device + * (socket() for AF_NDRV) and issuing an SIOCSSPLITTER ioctl. + * Incoming packets are routed into MacOSX as well as to the requesting + * interface. + * Outbound packets are sent, and are examined to see if they should go + * back up (loopback, sort of). Packets that are looped back include: + * broadcast + * multicast + */ +int +new_splitter(register struct socket *so) +{ register struct ifnet_blue *ifb; + register struct ndrv_cb *np; + register struct ifnet *ifp; + struct BlueFilter filter; + int retval; + + if ((ifb = _MALLOC(sizeof (struct ifnet_blue), M_PCB, M_WAITOK)) + == NULL) + { +#if BLUE_DEBUG + kprintf("Can't create new splitter\n"); +#endif + return(ENOBUFS); + } + bzero(ifb, sizeof(struct ifnet_blue)); + np = (struct ndrv_cb *)so->so_pcb; +#if BLUE_DEBUG + kprintf("NEW SPLT: %x, %x\n", so, np); + if (np) + printf("SIG: %x, ifp: %x\n", np->nd_signature, np->nd_if); +#endif + if (np == NULL) + return(EINVAL); /* XXX */ + if (np->nd_signature != NDRV_SIGNATURE) + return(EINVAL); /* XXX */ + if ((ifp = np->nd_if) == NULL) + return(EINVAL); /* XXX */ + if (ifp->if_flags & IFF_SPLITTER) + return(EBUSY); + if ((ifp->if_flags&IFF_UP) == 0) + return(ENXIO); + /* + * Bump the receive sockbuf size - need a big buffer + * to offset the scheduling latencies of the system + * Try to get something if our grandiose design fails. + */ + if (sbreserve(&so->so_rcv, 131072) == 0) + { if (sbreserve(&so->so_rcv, 65536) == 0 && + sbreserve(&so->so_rcv, 32768) == 0 && + sbreserve(&so->so_rcv, 16384) == 0) + return(ENOBUFS); + } + ifp->if_flags |= IFF_SPLITTER; + /* + * Register each IP address associated with this ifnet + * This takes care of addresses registered prior to startup + * of the BlueBox. + * TODO: Appletalk sockaddrs + */ +#define IFA2IN(ifa) \ + ((struct in_addr) \ + ((struct sockaddr_in *)(ifa->ifa_addr))->sin_addr).s_addr + { struct ifaddr *ifa; + + TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) + { if (ifa->ifa_addr->sa_family == AF_INET) + { filter.BF_flags = (BF_ALLOC|BF_IP); + filter.BF_address = IFA2IN(ifa); +#if BLUE_DEBUG + kprintf("[1] IP registering [%x] %x\n", + filter.BF_flags, + (unsigned int)filter.BF_address); +#endif + retval = if_register(&filter); +#if BLUE_DEBUG + if (retval) + kprintf("if_register(IP) returns %d\n", + retval); +#endif + } + } + } + + blue_if = (struct ifnet_blue *)ifb; + ifb->blue_pid = ((struct proc *)current_proc())->p_pid; + ifb->ifb_so = so; + ifp->if_Y = (void *)ifb; + return(0); +} + +/* + * Determine if destined for BlueBox or not. Called from ether_output() + * and ether_input(). + * Returns NULL if we ate the packet, otherwise, the mbuf to continue with. + */ +struct mbuf * +splitter_input(register struct mbuf *m, register struct ifnet *ifp) +{ register struct ifnet_blue *ifb; +#if 0 + register int s, flags; +#else + register int flags; +#endif + int rv; + register struct mbuf *m0 = NULL; + struct mbuf *m1; + extern struct mbuf *m_dup(struct mbuf *, int); + extern int BlueFilter_check(struct mbuf **, struct ifnet_blue *); + extern void blue_notify(struct mbuf *); + extern int blue_notify1(struct mbuf *); + + if ((ifb = (struct ifnet_blue *)ifp->if_Y) == NULL) + { ifp->if_flags &= ~IFF_SPLITTER; + return(m); + } + flags = m->m_flags; + m1 = m; + /* Check filters */ + if ((rv = BlueFilter_check(&m1, ifb)) == -1) + return(m1); /* Not for BB, MacOSX will want to see it. */ + m = m1; + if (rv == 0) /* It's for both - dup the packet */ + { m0 = m_dup(m, M_DONTWAIT); + if (m0 == NULL) + { blue_if->no_bufs1++; + return(m); /* Give it to MacOSX */ + } + } else + { /* Oy, veh! The depths to which we stoop! */ + /* We'll just assume M_PKTHDR is set */ + if (m->m_next == 0 && (m->m_flags & M_EXT) + && m->m_pkthdr.len <= MHLEN) + { m0 = m_dup(m, M_DONTWAIT); + if (m0) + { m_freem(m); + m = NULL; + } else + m0 = m; + } else + m0 = m; + } + if (flags & 0x10) + blue_if->pkts_looped_r2b++; + +#if 0 + schednetisr(NETISR_BLUE); + s = splimp(); + if (IF_QFULL(&blueq)) { + IF_DROP(&blueq); + m_freem(m0); + } else + IF_ENQUEUE(&blueq, m0); + splx(s); +#else + blue_notify1(m0); + sorwakeup(blue_if->ifb_so); + blue_if->sig_sent++; +#endif + /* If we eat the packet (rv==1) return NULL */ + return(rv == 0 ? m : NULL); +} + +void +blue_notify() +{ register int do_notify = 0; + register int s; + register struct mbuf *m; + extern int blue_notify1(struct mbuf *); + + /* + * Move the packets from the blue queue to the indicated socket + * If we haven't told anyone yet, send a signal. + */ + for (;;) + { s = splimp(); + IF_DEQUEUE(&blueq, m); + splx(s); + if (m == 0) + break; + + do_notify = blue_notify1(m); + } + if (do_notify) + sorwakeup(blue_if->ifb_so); /* Start by using SIGIO */ +} + +int +blue_notify1(register struct mbuf *m) +{ register int rv; + + /* move packet from if queue to socket */ + /* !!!Fix this to work generically!!! */ + ndrvsrc.sdl_type = IFT_ETHER; + ndrvsrc.sdl_nlen = 0; + ndrvsrc.sdl_alen = 6; + ndrvsrc.sdl_slen = 0; + bcopy(m->m_data+6, &ndrvsrc.sdl_data, 6); + + if (sbappendaddr(&(blue_if->ifb_so->so_rcv), + (struct sockaddr *)&ndrvsrc, m, + (struct mbuf *)0) == 0) + { register struct mbuf *n; + + KERNEL_DEBUG(DBG_SPLT_APPND | DBG_FUNC_NONE, + blue_if->ifb_so->so_rcv.sb_cc, + blue_if->ifb_so->so_rcv.sb_hiwat, + blue_if->ifb_so->so_rcv.sb_mbcnt, + blue_if->ifb_so->so_rcv.sb_mbmax, + blue_if->ifb_so->so_rcv.sb_lowat ); + if (m->m_flags & M_PKTHDR) + KERNEL_DEBUG(DBG_SPLT_MBUF, 0, m->m_pkthdr.len, + m->m_flags, 0, 0); + for (n = m; n; n = n->m_next) + KERNEL_DEBUG(DBG_SPLT_MBUF, 1, + (int)n, (int)n->m_next, n->m_len, + n->m_flags); + m_freem(m); + blue_if->full_sockbuf++; + rv = 1; + } else + { register struct mbuf *n; + + KERNEL_DEBUG(DBG_SPLT_APPND | DBG_FUNC_NONE, + blue_if->ifb_so->so_rcv.sb_cc, + blue_if->ifb_so->so_rcv.sb_hiwat, + blue_if->ifb_so->so_rcv.sb_mbcnt, + blue_if->ifb_so->so_rcv.sb_mbmax, + blue_if->ifb_so->so_rcv.sb_lowat ); + if (m->m_flags & M_PKTHDR) + KERNEL_DEBUG(DBG_SPLT_MBUF, 2, m->m_pkthdr.len, + m->m_flags, 0, 0); + for (n = m; n; n = n->m_next) + KERNEL_DEBUG(DBG_SPLT_MBUF, 3, + (int)n, (int)n->m_next, n->m_len, + n->m_flags); + blue_if->pkts_up++; + rv = 0; + } + return(rv); +} + +/* + * Check the incoming packet against the registered filters + * Rules (the rules are subtly different for input to the + * y-adapter customer and the "real" stacks): + * For BB: return 1 + * For Both: return 0 + * Not For BB: return -1 + * Multicast/Broadcast => For Both + * Hack: + * if no registered filters, For Both + * Atalk filter registered + * filter matches => For BB else Not For BB + * IP filter registered + * filter matches => For BB else Not For BB + * Not For BB + * WARNING: this is a big-endian routine. + * WARNING 2: m_pullup can give you a new mbuf! + */ +int +BlueFilter_check(struct mbuf **m0, register struct ifnet_blue *ifb) +{ register struct BlueFilter *bf; + register unsigned char *p; + register unsigned short *s; + register unsigned long *l; + int total, flags; + register struct mbuf *m; + extern struct mbuf *m_pullup(struct mbuf *, int); +#define FILTER_LEN 32 + + KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_START, 0, 0, 0, 0, 0 ); + + m = *m0; + if (FILTER_LEN > m->m_pkthdr.len) + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 0, 0, 0, 0, 0 ); + return(-1); + } + flags = m->m_flags; + while ((FILTER_LEN > m->m_len) && m->m_next) { + total = m->m_len + (m->m_next)->m_len; + if ((m = m_pullup(m, min(FILTER_LEN, total))) == 0) + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 1, flags, total, 0, 0); + return(-1); + } + } + *m0 = m; /* Update, just in case */ + + p = mtod(m, unsigned char *); /* Point to destination media addr */ + if (p[0] & 0x01) /* Multicast/broadcast */ + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 2, 0, 0, 0, 0 ); + return(0); + } + s = (unsigned short *)p; + bf = &ifb->filter[BFS_ATALK]; + if (!bf->BF_flags && !bf[1].BF_flags) /* Hack for Developer Release Blue Box */ + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 3, 0, 0, 0, 0 ); + return(0); + } +#if BLUE_DEBUG + kprintf("PKT: %x, %x, %x\n", s[6], s[7], s[8]); +#endif + if (bf->BF_flags) /* Filtering Appletalk */ + { l = (unsigned long *)&s[8]; +#if BLUE_DEBUG + kprintf("AT: %x, %x, %x, %x, %x, %x\n", s[6], s[7], + *l, s[10], s[13], p[30]); +#endif + if (s[6] <= ETHERMTU) + { if (s[7] == 0xaaaa) /* Could be Atalk */ + { /* Verify SNAP header */ + if (*l == 0x03080007 && s[10] == 0x809b) + { if ((bf->BF_flags&BF_VALID) == 0 || + (s[13] == bf->BF_address && + p[30] == bf->BF_node)) + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 4, + s[13], p[30], 0, 0 ); + return(1); + } + } else if (*l == 0x03000000 && s[10] == 0x80f3) + /* AARP pkts aren't net-addressed */ + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 5, 0, 0, 0, 0 ); + return(0); + } + /* Not for us */ + KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 6, s[13], p[30], 0, 0 ); + return(-1); + } else /* Not for us? */ + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 7, s[7], 0, 0, 0 ); + return(-1); + } + } /* Fall through */ + } /* Fall through */ + bf++; /* Look for IP next */ + if (bf->BF_flags) /* Filtering IP */ + { + l = (unsigned long *)&s[15]; +#if BLUE_DEBUG + kprintf("IP: %x, %x\n", s[6], *l); +#endif + if (s[6] > ETHERMTU) + { if (s[6] == 0x800) /* Is IP */ + { /* Verify IP address */ + if ((bf->BF_flags&BF_VALID) == 0 || + *l == bf->BF_address) + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 8, *l, 0, 0, 0 ); + return(1); + } else /* Not for us */ + { KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 9, *l, 0, 0, 0 ); + return(-1); + } + } else if (s[6] == 0x806) + { /* ARP pkts aren't net-addressed */ + KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 10, 0, 0, 0, 0 ); + return(0); + } + } + } + KERNEL_DEBUG(DBG_SPLT_BFCHK | DBG_FUNC_END, 11, s[6], 0, 0, 0 ); + return(-1); +} + +int +splitter_ctl(register struct socket *so, register int cmd, + register caddr_t data, register struct ifnet *ifp) +{ register struct ndrv_cb *np = sotondrvcb(so); + register struct ifnet_blue *ifb; + register struct BlueFilter *bf = (struct BlueFilter *)data, *bf1; + u_long at_dl_tag; + + + if ((ifb = np->nd_if->if_Y) == NULL) + return(ENXIO); + + if (cmd == SIOCSSPLTFILT) + { +#if BLUE_DEBUG +kprintf("Filter: %s, %x, %x, %x\n", bf->ifr_name, bf->BF_flags, bf->BF_address, + bf->BF_node); +#endif + if (bf->BF_flags & BF_ATALK) + bf1 = &ifb->filter[BFS_ATALK]; + else if (bf->BF_flags & BF_IP) + bf1 = &ifb->filter[BFS_IP]; + else + return(EINVAL); + if (bf->BF_flags&BF_ALLOC) + { if ((bf1->BF_flags&(BF_ALLOC|BF_VALID)) == + (BF_ALLOC|BF_VALID)) + return(EBUSY); + *bf1 = *bf; + bf1->BF_flags |= BF_VALID; + } else if (bf->BF_flags&BF_DEALLOC) + { if (bf1->BF_flags&BF_ALLOC) + bf1->BF_flags = 0; + else + return(EINVAL); + } + /* Register AppleTalk Tags if not registered */ + + ether_attach_at(ifp, &at_dl_tag, + &at_dl_tag); + + + } else if (cmd == SIOCZSPLTSTAT) + { ifb->pkts_up = 0; + ifb->pkts_out = 0; + ifb->pkts_looped_r2b = 0; + ifb->pkts_looped_b2r = 0; + ifb->no_bufs1 = 0; + ifb->no_bufs2 = 0; + ifb->full_sockbuf = 0; + } else if (cmd == SIOCGSPLTSTAT) + { register struct Ystats *ys = (struct Ystats *)data; + ys->YS_blue_pid = ifb->blue_pid; + ys->YS_filter[BFS_ATALK] = ifb->filter[BFS_ATALK]; + ys->YS_filter[BFS_IP] = ifb->filter[BFS_IP]; + ys->YS_pkts_up = ifb->pkts_up; + ys->YS_pkts_out = ifb->pkts_out; + ys->YS_pkts_looped_b2r = ifb->pkts_looped_b2r; + ys->YS_pkts_looped_r2b = ifb->pkts_looped_r2b; + ys->YS_no_bufs1 = ifb->no_bufs1; + ys->YS_no_bufs2 = ifb->no_bufs2; + ys->YS_full_sockbuf = ifb->full_sockbuf; + } else + return(EINVAL); + return(0); +} + +void +splitter_close(register struct ndrv_cb *np) +{ extern struct ifnet_blue *blue_if; + extern void ndrv_flushq(struct ifqueue *); + + if (blue_if) + { /* If we're the guy holding the Y-adapter, clean it up */ + if (blue_if->blue_pid == + ((struct proc *)current_proc())->p_pid) + { if (np->nd_if) + { np->nd_if->if_flags &= ~IFF_SPLITTER; + np->nd_if->if_Y = 0; + } + + BFIx = 0; + /* Clean out the filter supply */ + bzero(RhapFilter, + sizeof(struct BlueFilter) * BFCount); + blue_if->ifb_so = 0; + blue_if->filter[0].BF_flags = 0; + blue_if->filter[1].BF_flags = 0; + ndrv_flushq(&blueq); + if (np->nd_laddr) + { FREE((caddr_t) np->nd_laddr, M_IFADDR); + np->nd_laddr = 0; + } + } + } + remque((queue_t)np); + FREE((caddr_t)np, M_PCB); +} + +/* + * Dup the mbuf chain passed in. The whole thing. No cute additional cruft. + * And really copy the thing. That way, we don't "precompute" checksums + * for unsuspecting consumers. + * Assumption: m->m_nextpkt == 0. + * Trick: for small packets, don't dup into a cluster. That way received + * packets don't take up too much room in the sockbuf (cf. sbspace()). + */ +int MDFail; + +struct mbuf * +m_dup(register struct mbuf *m, int how) +{ register struct mbuf *n, **np; + struct mbuf *top; + int copyhdr = 0; + + KERNEL_DEBUG(DBG_SPLT_DUP | DBG_FUNC_START, m->m_flags, m->m_len, + m->m_pkthdr.len, 0, 0 ); + np = ⊤ + top = 0; + if (m->m_flags & M_PKTHDR) + copyhdr = 1; + + /* + * Quick check: if we have one mbuf and its data fits in an + * mbuf with packet header, just copy and go. + */ + if (m->m_next == NULL) + { /* Then just move the data into an mbuf and be done... */ + if (copyhdr) + { if (m->m_pkthdr.len <= MHLEN) + { if ((n = m_gethdr(how, m->m_type)) == NULL) + return(NULL); + bcopy(m->m_data, n->m_data, m->m_pkthdr.len); + n->m_pkthdr.len = m->m_pkthdr.len; + n->m_len = m->m_len; + KERNEL_DEBUG(DBG_SPLT_DUP | DBG_FUNC_END, 2, + m->m_pkthdr.len, m->m_flags, + n->m_flags, 0 ); + return(n); + } + } else if (m->m_len <= MLEN) + { if ((n = m_get(how, m->m_type)) == NULL) + return(NULL); + bcopy(m->m_data, n->m_data, m->m_len); + n->m_len = m->m_len; + KERNEL_DEBUG(DBG_SPLT_DUP | DBG_FUNC_END, 3, m->m_len, + m->m_flags, n->m_flags, 0 ); + return(n); + } + } + while (m) + { +#if BLUE_DEBUG + kprintf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len, + m->m_data); +#endif + if (copyhdr) + n = m_gethdr(how, m->m_type); + else + n = m_get(how, m->m_type); + if (n == 0) + goto nospace; + if (m->m_flags & M_EXT) + { MCLGET(n, how); + if ((n->m_flags & M_EXT) == 0) + goto nospace; + } + *np = n; + if (copyhdr) + { /* Don't use M_COPY_PKTHDR: preserve m_data */ + n->m_pkthdr = m->m_pkthdr; + n->m_pkthdr.aux = (struct mbuf *)NULL; /*###LD080800 Avoid problems with IPsec */ + + n->m_flags |= (m->m_flags & M_COPYFLAGS); + copyhdr = 0; + if ((n->m_flags & M_EXT) == 0) + n->m_data = n->m_pktdat; + } + n->m_len = m->m_len; + /* + * Get the dup on the same bdry as the original + * Assume that the two mbufs have the same offset to data area + * (up to word bdries) + */ + bcopy(mtod(m, caddr_t), mtod(n, caddr_t), (unsigned)n->m_len); + m = m->m_next; + np = &n->m_next; +#if BLUE_DEBUG + kprintf(">%x: %x, %x, %x\n", n, n->m_flags, n->m_len, + n->m_data); +#endif + } + + if (top == 0) + MDFail++; + KERNEL_DEBUG(DBG_SPLT_DUP | DBG_FUNC_END, 0, (int)top, 0, 0, 0 ); + return (top); + nospace: + m_freem(top); + MDFail++; + KERNEL_DEBUG(DBG_SPLT_DUP | DBG_FUNC_END, 1, 0, 0, 0, 0 ); + return (0); +} diff --git a/bsd/net/if_blue.h b/bsd/net/if_blue.h new file mode 100644 index 000000000..fcc906e35 --- /dev/null +++ b/bsd/net/if_blue.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997, 1998 Apple Computer, Inc. All Rights Reserved */ +/* + * @(#)if_blue.h 1.1 (MacOSX) 6/10/43 + * Justin Walker + * 970520 - First version + * 980130 - Second version - performance improvements + */ + +#ifndef _IF_BLUE_H +#define _IF_BLUE_H + +#define BLUE_DEBUG 0 + +/* + * Y-adapter filter mechanism. + * Specifies the Atalk or IP network address of this node. + * If BF_ALLOC is set and BF_VALID is not, the corresponding + * protocol type should be captured. + */ +struct BlueFilter +{ +#define IFNAMSIZ 16 + char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + short BF_flags; + unsigned long BF_address; /* IP address or Atalk Network # */ + unsigned char BF_node; /* Atalk node # */ +#ifdef notyet + struct ifnet *BF_if; /* Destination of "passed" pkts */ +#endif +}; + +#define BF_ALLOC 0x01 /* Entry in use */ +#define BF_DEALLOC 0x02 /* Clear matching entry */ +#define BF_VALID 0x04 /* Address is valid */ +#define BF_ATALK 0x08 /* Appletalk network address */ +#define BF_IP 0x10 /* IP network address */ + +struct Ystats +{ char YS_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct BlueFilter YS_filter[2]; + pid_t YS_blue_pid; + int YS_pkts_up; + int YS_pkts_out; + int YS_pkts_looped_r2b; + int YS_pkts_looped_b2r; + int YS_no_bufs1; + int YS_no_bufs2; + int YS_full_sockbuf; +}; + +struct ifnet_blue +{ struct ifnet ifb_ifn; + struct socket *ifb_so; + pid_t blue_pid; + int sig_to_send; + int sig_sent; /* Set when new pkt arrives; cleared when mt */ + struct BlueFilter filter[2]; /* Only need to check IP, A/talk */ + /* Stats */ + int pkts_up; + int pkts_out; + int pkts_looped_r2b; + int pkts_looped_b2r; + int no_bufs1; /* splitter_input got null mbuf */ + int no_bufs2; /* ndrv_output couldn't dup mbuf */ + int full_sockbuf; +}; + +/* Preallocate slots in blue_if to simplify filtering */ +#define BFS_ATALK 0x0 /* The Atalk filter */ +#define BFS_IP 0x1 /* The IP filter */ + +#define SIOCSSPLITTER _IOW('i', 123, struct ifreq) /* set 'splitter' */ +#define SIOCGSPLITTER _IOR('i', 122, struct ifreq) /* get 'splitter' */ +#define SIOCGSPLTSTAT _IOWR('i', 121, struct Ystats) +#define SIOCSSPLTFILT _IOW('i', 120, struct BlueFilter) +#define SIOCZSPLTSTAT _IO('i', 119) /* Clear stats */ + +/* + * Config structure for the Y adapter - NYI + */ +struct if_splitter +{ char ifs_on; /* 1=>on */ + char ifs_qmax; /* !0 => maxqlen */ + short ifs_wait; /* Time to wait for signal */ + short ifs_sig; /* Signal to send */ + short ifs_pad; /* Extra space */ +}; + +#ifdef KERNEL +extern struct ifqueue blueq; /* Place to put incoming BB packets */ + +#define BFCount 10 +extern struct BlueFilter RhapFilter[]; /* Filters for MacOSX side */ +extern int BFIx; +#endif +#endif /* _IF_BLUE_H */ diff --git a/bsd/net/if_disc.c b/bsd/net/if_disc.c new file mode 100644 index 000000000..5a95376fb --- /dev/null +++ b/bsd/net/if_disc.c @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * From: @(#)if_loop.c 8.1 (Berkeley) 6/10/93 + */ + +/* + * Discard interface driver for protocol testing and timing. + * (Based on the loopback.) + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "bpfilter.h" +#include "opt_inet.h" + +#if TINY_DSMTU +#define DSMTU (1024+512) +#else +#define DSMTU 65532 +#endif + +static void discattach __P((void *dummy)); +PSEUDO_SET(discattach, if_disc); + +static struct ifnet discif; +static int discoutput(struct ifnet *, struct mbuf *, struct sockaddr *, + struct rtentry *); +static void discrtrequest(int cmd, struct rtentry *rt, struct sockaddr *sa); +static int discioctl(struct ifnet *, u_long, caddr_t); + +/* ARGSUSED */ +static void +discattach(dummy) + void *dummy; +{ + register struct ifnet *ifp = &discif; + + ifp->if_name = "ds"; + ifp->if_family = APPLE_IF_FAM_DISC; + ifp->if_mtu = DSMTU; + ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST; + ifp->if_ioctl = discioctl; + ifp->if_output = discoutput; + ifp->if_type = IFT_LOOP; + ifp->if_hdrlen = 0; + ifp->if_addrlen = 0; + if_attach(ifp); +#if NBPFILTER > 0 + bpfattach(ifp, DLT_NULL, sizeof(u_int)); +#endif +} + +static int +discoutput(ifp, m, dst, rt) + struct ifnet *ifp; + register struct mbuf *m; + struct sockaddr *dst; + register struct rtentry *rt; +{ + if ((m->m_flags & M_PKTHDR) == 0) + panic("discoutput no HDR"); +#if NBPFILTER > 0 + /* BPF write needs to be handled specially */ + if (dst->sa_family == AF_UNSPEC) { + dst->sa_family = *(mtod(m, int *)); + m->m_len -= sizeof(int); + m->m_pkthdr.len -= sizeof(int); + m->m_data += sizeof(int); + } + + if (discif.if_bpf) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a dummy header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer a to it). + */ + struct mbuf m0; + u_int af = dst->sa_family; + + m0.m_next = m; + m0.m_len = 4; + m0.m_data = (char *)⁡ + + bpf_mtap(&discif, &m0); + } +#endif + m->m_pkthdr.rcvif = ifp; + + ifp->if_opackets++; + ifp->if_obytes += m->m_pkthdr.len; + + m_freem(m); + return 0; +} + +/* ARGSUSED */ +static void +discrtrequest(cmd, rt, sa) + int cmd; + struct rtentry *rt; + struct sockaddr *sa; +{ + if (rt) + rt->rt_rmx.rmx_mtu = DSMTU; +} + +/* + * Process an ioctl request. + */ +/* ARGSUSED */ +static int +discioctl(ifp, cmd, data) + register struct ifnet *ifp; + u_long cmd; + caddr_t data; +{ + register struct ifaddr *ifa; + register struct ifreq *ifr = (struct ifreq *)data; + register int error = 0; + + switch (cmd) { + + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP; + ifa = (struct ifaddr *)data; + if (ifa != 0) + ifa->ifa_rtrequest = discrtrequest; + /* + * Everything else is done at a higher level. + */ + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifr == 0) { + error = EAFNOSUPPORT; /* XXX */ + break; + } + switch (ifr->ifr_addr.sa_family) { + +#if INET + case AF_INET: + break; +#endif + + default: + error = EAFNOSUPPORT; + break; + } + break; + + case SIOCSIFMTU: + ifp->if_mtu = ifr->ifr_mtu; + break; + + default: + error = EINVAL; + } + return (error); +} diff --git a/bsd/net/if_dl.h b/bsd/net/if_dl.h new file mode 100644 index 000000000..ff1f3a03b --- /dev/null +++ b/bsd/net/if_dl.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_dl.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NET_IF_DL_H_ +#define _NET_IF_DL_H_ + +/* + * A Link-Level Sockaddr may specify the interface in one of two + * ways: either by means of a system-provided index number (computed + * anew and possibly differently on every reboot), or by a human-readable + * string such as "il0" (for managerial convenience). + * + * Census taking actions, such as something akin to SIOCGCONF would return + * both the index and the human name. + * + * High volume transactions (such as giving a link-level ``from'' address + * in a recvfrom or recvmsg call) may be likely only to provide the indexed + * form, (which requires fewer copy operations and less space). + * + * The form and interpretation of the link-level address is purely a matter + * of convention between the device driver and its consumers; however, it is + * expected that all drivers for an interface of a given if_type will agree. + */ + +/* + * Structure of a Link-Level sockaddr: + */ +struct sockaddr_dl { + u_char sdl_len; /* Total length of sockaddr */ + u_char sdl_family; /* AF_DLI */ + u_short sdl_index; /* if != 0, system given index for interface */ + u_char sdl_type; /* interface type */ + u_char sdl_nlen; /* interface name length, no trailing 0 reqd. */ + u_char sdl_alen; /* link level address length */ + u_char sdl_slen; /* link layer selector length */ + char sdl_data[12]; /* minimum work area, can be larger; + contains both if name and ll address */ +}; + +#define LLADDR(s) ((caddr_t)((s)->sdl_data + (s)->sdl_nlen)) + +#ifndef KERNEL + +#include + +__BEGIN_DECLS +void link_addr __P((const char *, struct sockaddr_dl *)); +char *link_ntoa __P((const struct sockaddr_dl *)); +__END_DECLS + +#endif /* !KERNEL */ + +#endif diff --git a/bsd/net/if_dummy.c b/bsd/net/if_dummy.c new file mode 100644 index 000000000..ef184de9e --- /dev/null +++ b/bsd/net/if_dummy.c @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * derived from + * @(#)if_loop.c 8.1 (Berkeley) 6/10/93 + * Id: if_loop.c,v 1.22 1996/06/19 16:24:10 wollman Exp + */ + +/* + * Loopback interface driver for protocol testing and timing. + */ +#if BSD310 +#include "opt_inet.h" +#endif +#include "dummy.h" +#if NDUMMY > 0 + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if INET +#include +#include +#include +#include +#endif + +#if IPX +#include +#include +#endif + +#if INET6 +#ifndef INET +#include +#endif +#include +#include +#endif + +#if NS +#include +#include +#endif + +#if ISO +#include +#include +#endif + +#if NETATALK +#include +#include +#include +#endif NETATALK + +#include "bpfilter.h" + +static int dummyioctl __P((struct ifnet *, u_long, caddr_t)); +int dummyoutput __P((struct ifnet *, register struct mbuf *, struct sockaddr *, + register struct rtentry *)); +static void dummyrtrequest __P((int, struct rtentry *, struct sockaddr *)); + +static void dummyattach __P((void *)); +PSEUDO_SET(dummyattach, if_dummy); + +#if TINY_DUMMYMTU +#define DUMMYMTU (1024+512) +#else +#define DUMMYMTU 16384 +#endif +#define HAVE_OLD_BPF 1 + +static struct ifnet dummyif[NDUMMY]; + +/* ARGSUSED */ +static void +dummyattach(dummy) + void *dummy; +{ + register struct ifnet *ifp; + register int i = 0; + + for (i = 0; i < NDUMMY; i++) { + ifp = &dummyif[i]; +#if defined(__NetBSD__) || defined(__OpenBSD__) + sprintf(ifp->if_xname, "dummy%d", i); +#else + ifp->if_name = "dummy"; + ifp->if_unit = i; +#endif +#ifndef __bsdi__ + ifp->if_softc = NULL; +#endif + ifp->if_mtu = DUMMYMTU; + /* Change to BROADCAST experimentaly to announce its prefix. */ + ifp->if_flags = /* IFF_LOOPBACK */ IFF_BROADCAST | IFF_MULTICAST; + ifp->if_ioctl = dummyioctl; + ifp->if_output = dummyoutput; + ifp->if_type = IFT_DUMMY; + ifp->if_hdrlen = 0; + ifp->if_addrlen = 0; + if_attach(ifp); +#if NBPFILTER > 0 +#ifdef HAVE_OLD_BPF + bpfattach(ifp, DLT_NULL, sizeof(u_int)); +#else + bpfattach(&ifp->if_bpf, ifp, DLT_NULL, sizeof(u_int)); +#endif +#endif + } +} + +int +dummyoutput(ifp, m, dst, rt) + struct ifnet *ifp; + register struct mbuf *m; + struct sockaddr *dst; + register struct rtentry *rt; +{ + int s, isr; + register struct ifqueue *ifq = 0; + + if ((m->m_flags & M_PKTHDR) == 0) + panic("dummyoutput no HDR"); +#if NBPFILTER > 0 + /* BPF write needs to be handled specially */ + if (dst->sa_family == AF_UNSPEC) { + dst->sa_family = *(mtod(m, int *)); + m->m_len -= sizeof(int); + m->m_pkthdr.len -= sizeof(int); + m->m_data += sizeof(int); + } + + if (ifp->if_bpf) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a dummy header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer a to it). + */ + struct mbuf m0; + u_int af = dst->sa_family; + + m0.m_next = m; + m0.m_len = 4; + m0.m_data = (char *)⁡ + +#ifdef HAVE_OLD_BPF + bpf_mtap(ifp, &m0); +#else + bpf_mtap(ifp->if_bpf, &m0); +#endif + } +#endif + m->m_pkthdr.rcvif = ifp; + + if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { + m_freem(m); + return (rt->rt_flags & RTF_BLACKHOLE ? 0 : + rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); + } + ifp->if_opackets++; + ifp->if_obytes += m->m_pkthdr.len; + switch (dst->sa_family) { + +#if INET + case AF_INET: + ifq = &ipintrq; + isr = NETISR_IP; + break; +#endif +#if IPX + case AF_IPX: + ifq = &ipxintrq; + isr = NETISR_IPX; + break; +#endif +#if INET6 + case AF_INET6: + ifq = &ip6intrq; + isr = NETISR_IPV6; + break; +#endif +#if NS + case AF_NS: + ifq = &nsintrq; + isr = NETISR_NS; + break; +#endif +#if ISO + case AF_ISO: + ifq = &clnlintrq; + isr = NETISR_ISO; + break; +#endif +#if NETATALK + case AF_APPLETALK: + ifq = &atintrq2; + isr = NETISR_ATALK; + break; +#endif NETATALK + default: + printf("%s: can't handle af%d\n", + if_name(ifp), dst->sa_family); + m_freem(m); + return (EAFNOSUPPORT); + } + s = splimp(); + if (IF_QFULL(ifq)) { + IF_DROP(ifq); + m_freem(m); + splx(s); + return (ENOBUFS); + } + IF_ENQUEUE(ifq, m); + schednetisr(isr); + ifp->if_ipackets++; + ifp->if_ibytes += m->m_pkthdr.len; + splx(s); + return (0); +} + +/* ARGSUSED */ +static void +dummyrtrequest(cmd, rt, sa) + int cmd; + struct rtentry *rt; + struct sockaddr *sa; +{ + if (rt) { + rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; /* for ISO */ + /* + * For optimal performance, the send and receive buffers + * should be at least twice the MTU plus a little more for + * overhead. + */ + rt->rt_rmx.rmx_recvpipe = + rt->rt_rmx.rmx_sendpipe = 3 * DUMMYMTU; + } +} + +/* + * Process an ioctl request. + */ +/* ARGSUSED */ +static int +dummyioctl(ifp, cmd, data) + register struct ifnet *ifp; + u_long cmd; + caddr_t data; +{ + register struct ifaddr *ifa; + register struct ifreq *ifr = (struct ifreq *)data; + register int error = 0; + + switch (cmd) { + + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP | IFF_RUNNING; + ifa = (struct ifaddr *)data; + ifa->ifa_rtrequest = dummyrtrequest; + /* + * Everything else is done at a higher level. + */ + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifr == 0) { + error = EAFNOSUPPORT; /* XXX */ + break; + } + switch (ifr->ifr_addr.sa_family) { + +#if INET + case AF_INET: + break; +#endif +#if INET6 + case AF_INET6: + break; +#endif + + default: + error = EAFNOSUPPORT; + break; + } + break; + + case SIOCSIFMTU: + ifp->if_mtu = ifr->ifr_mtu; + break; + + case SIOCSIFFLAGS: + break; + + default: + error = EINVAL; + } + return (error); +} +#endif /* NDUMMY > 0 */ diff --git a/bsd/net/if_ether.c b/bsd/net/if_ether.c new file mode 100644 index 000000000..5c8af20b3 --- /dev/null +++ b/bsd/net/if_ether.c @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ diff --git a/bsd/net/if_ethersubr.c b/bsd/net/if_ethersubr.c new file mode 100644 index 000000000..ab0c05d0a --- /dev/null +++ b/bsd/net/if_ethersubr.c @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_ethersubr.c 8.1 (Berkeley) 6/10/93 + */ + +#if NOTFB31 +#include "opt_atalk.h" +#include "opt_inet.h" +#include "opt_ipx.h" +#include "opt_bdg.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#if INET +#include +#include +#include +#include +#include +#endif + +#if IPX +#include +#include +#endif + +#if NS +#include +#include +ushort ns_nettype; +int ether_outputdebug = 0; +int ether_inputdebug = 0; +#endif + +#if ISO +#include +#include +#include +#include +#endif + +/*#if LLC +#include +#include +#endif*/ + +#include + +#if LLC && CCITT +extern struct ifqueue pkintrq; +#endif + +#if BRIDGE +#include +#endif + +/* #include "vlan.h" */ +#if NVLAN > 0 +#include +#endif /* NVLAN > 0 */ + +static int ether_resolvemulti __P((struct ifnet *, struct sockaddr **, + struct sockaddr *)); +extern u_char etherbroadcastaddr[]; +#define senderr(e) do { error = (e); goto bad;} while (0) +#define IFP2AC(IFP) ((struct arpcom *)IFP) + +/* + * Perform common duties while attaching to interface list + */ + + +void +ether_ifattach(ifp) + register struct ifnet *ifp; +{ + register struct ifaddr *ifa; + register struct sockaddr_dl *sdl; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + printf("ether_ifattach called for %s\n", ifp->if_name); + ether_family_init(); + + ifp->if_name = "en"; + ifp->if_family = APPLE_IF_FAM_ETHERNET; + ifp->if_type = IFT_ETHER; + ifp->if_addrlen = 6; + ifp->if_hdrlen = 14; + ifp->if_mtu = ETHERMTU; + ifp->if_resolvemulti = ether_resolvemulti; + if (ifp->if_baudrate == 0) + ifp->if_baudrate = 10000000; + + dlil_if_attach(ifp); + ifa = ifnet_addrs[ifp->if_index - 1]; + if (ifa == 0) { + printf("ether_ifattach: no lladdr!\n"); + (void) thread_funnel_set(network_flock, funnel_state); + return; + } + sdl = (struct sockaddr_dl *)ifa->ifa_addr; + sdl->sdl_type = IFT_ETHER; + sdl->sdl_alen = ifp->if_addrlen; + bcopy((IFP2AC(ifp))->ac_enaddr, LLADDR(sdl), ifp->if_addrlen); +#ifdef INET6 + in6_ifattach_getifid(ifp); +#endif + (void) thread_funnel_set(network_flock, funnel_state); +} + +SYSCTL_DECL(_net_link); +SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW, 0, "Ethernet"); + +int +ether_resolvemulti(ifp, llsa, sa) + struct ifnet *ifp; + struct sockaddr **llsa; + struct sockaddr *sa; +{ + struct sockaddr_dl *sdl; + struct sockaddr_in *sin; + u_char *e_addr; +#if INET6 + struct sockaddr_in6 *sin6; +#endif + + + switch(sa->sa_family) { + case AF_UNSPEC: + /* AppleTalk uses AF_UNSPEC for multicast registration. + * No mapping needed. Just check that it's a valid MC address. + */ + e_addr = &sa->sa_data[0]; + if ((e_addr[0] & 1) != 1) + return EADDRNOTAVAIL; + *llsa = 0; + return 0; + + case AF_LINK: + /* + * No mapping needed. Just check that it's a valid MC address. + */ + sdl = (struct sockaddr_dl *)sa; + e_addr = LLADDR(sdl); + if ((e_addr[0] & 1) != 1) + return EADDRNOTAVAIL; + *llsa = 0; + return 0; + +#if INET + case AF_INET: + sin = (struct sockaddr_in *)sa; + if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) + return EADDRNOTAVAIL; + MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR, + M_WAITOK); + sdl->sdl_len = sizeof *sdl; + sdl->sdl_family = AF_LINK; + sdl->sdl_index = ifp->if_index; + sdl->sdl_type = IFT_ETHER; + sdl->sdl_nlen = 0; + sdl->sdl_alen = ETHER_ADDR_LEN; + sdl->sdl_slen = 0; + e_addr = LLADDR(sdl); + ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr); + *llsa = (struct sockaddr *)sdl; + return 0; +#endif +#if INET6 + case AF_INET6: + sin6 = (struct sockaddr_in6 *)sa; + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { + /* + * An IP6 address of 0 means listen to all + * of the Ethernet multicast address used for IP6. + * (This is used for multicast routers.) + */ + ifp->if_flags |= IFF_ALLMULTI; + *llsa = 0; + return 0; + } + MALLOC(sdl, struct sockaddr_dl *, sizeof *sdl, M_IFMADDR, + M_WAITOK); + sdl->sdl_len = sizeof *sdl; + sdl->sdl_family = AF_LINK; + sdl->sdl_index = ifp->if_index; + sdl->sdl_type = IFT_ETHER; + sdl->sdl_nlen = 0; + sdl->sdl_alen = ETHER_ADDR_LEN; + sdl->sdl_slen = 0; + e_addr = LLADDR(sdl); + ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr); + kprintf("ether_resolvemulti Adding %x:%x:%x:%x:%x:%x\n", + e_addr[0], e_addr[1], e_addr[2], e_addr[3], e_addr[4], e_addr[5]); + *llsa = (struct sockaddr *)sdl; + return 0; +#endif + + default: + /* + * Well, the text isn't quite right, but it's the name + * that counts... + */ + return EAFNOSUPPORT; + } +} + + + + + +u_char ether_ipmulticast_min[6] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +u_char ether_ipmulticast_max[6] = { 0x01, 0x00, 0x5e, 0x7f, 0xff, 0xff }; +/* + * Add an Ethernet multicast address or range of addresses to the list for a + * given interface. + */ +int +ether_addmulti(ifr, ac) + struct ifreq *ifr; + register struct arpcom *ac; +{ + register struct ether_multi *enm; + struct sockaddr_in *sin; + u_char addrlo[6]; + u_char addrhi[6]; + int s = splimp(); + + switch (ifr->ifr_addr.sa_family) { + + case AF_UNSPEC: + bcopy(ifr->ifr_addr.sa_data, addrlo, 6); + bcopy(addrlo, addrhi, 6); + break; + +#if INET + case AF_INET: + sin = (struct sockaddr_in *)&(ifr->ifr_addr); + if (sin->sin_addr.s_addr == INADDR_ANY) { + /* + * An IP address of INADDR_ANY means listen to all + * of the Ethernet multicast addresses used for IP. + * (This is for the sake of IP multicast routers.) + */ + bcopy(ether_ipmulticast_min, addrlo, 6); + bcopy(ether_ipmulticast_max, addrhi, 6); + } + else { + ETHER_MAP_IP_MULTICAST(&sin->sin_addr, addrlo); + bcopy(addrlo, addrhi, 6); + } + break; +#endif + + default: + splx(s); + return (EAFNOSUPPORT); + } + + /* + * Verify that we have valid Ethernet multicast addresses. + */ + if ((addrlo[0] & 0x01) != 1 || (addrhi[0] & 0x01) != 1) { + splx(s); + return (EINVAL); + } + /* + * See if the address range is already in the list. + */ + ETHER_LOOKUP_MULTI(addrlo, addrhi, ac, enm); + if (enm != NULL) { + /* + * Found it; just increment the reference count. + */ + ++enm->enm_refcount; + splx(s); + return (0); + } + /* + * New address or range; malloc a new multicast record + * and link it into the interface's multicast list. + */ + enm = (struct ether_multi *)_MALLOC(sizeof(*enm), M_IFMADDR, M_WAITOK); + if (enm == NULL) { + splx(s); + return (ENOBUFS); + } + bcopy(addrlo, enm->enm_addrlo, 6); + bcopy(addrhi, enm->enm_addrhi, 6); + enm->enm_ac = ac; + enm->enm_refcount = 1; + enm->enm_next = ac->ac_multiaddrs; + ac->ac_multiaddrs = enm; + splx(s); + /* + * Return ENETRESET to inform the driver that the list has changed + * and its reception filter should be adjusted accordingly. + */ + return (ENETRESET); +} + +/* + * Delete a multicast address record. + */ +int +ether_delmulti(ifr, ac, ret_mca) + struct ifreq *ifr; + register struct arpcom *ac; + struct ether_addr * ret_mca; +{ + register struct ether_multi *enm; + register struct ether_multi **p; + struct sockaddr_in *sin; + u_char addrlo[6]; + u_char addrhi[6]; + int s = splimp(); + + switch (ifr->ifr_addr.sa_family) { + + case AF_UNSPEC: + bcopy(ifr->ifr_addr.sa_data, addrlo, 6); + bcopy(addrlo, addrhi, 6); + break; + +#if INET + case AF_INET: + sin = (struct sockaddr_in *)&(ifr->ifr_addr); + if (sin->sin_addr.s_addr == INADDR_ANY) { + /* + * An IP address of INADDR_ANY means stop listening + * to the range of Ethernet multicast addresses used + * for IP. + */ + bcopy(ether_ipmulticast_min, addrlo, 6); + bcopy(ether_ipmulticast_max, addrhi, 6); + } + else { + ETHER_MAP_IP_MULTICAST(&sin->sin_addr, addrlo); + bcopy(addrlo, addrhi, 6); + } + break; +#endif + + default: + splx(s); + return (EAFNOSUPPORT); + } + + /* + * Look up the address in our list. + */ + ETHER_LOOKUP_MULTI(addrlo, addrhi, ac, enm); + if (enm == NULL) { + splx(s); + return (ENXIO); + } + if (--enm->enm_refcount != 0) { + /* + * Still some claims to this record. + */ + splx(s); + return (0); + } + + /* save the low and high address of the range before deletion */ + if (ret_mca) { + *ret_mca = *((struct ether_addr *)addrlo); + *(ret_mca + 1) = *((struct ether_addr *)addrhi); + } + + /* + * No remaining claims to this record; unlink and free it. + */ + for (p = &enm->enm_ac->ac_multiaddrs; + *p != enm; + p = &(*p)->enm_next) + continue; + *p = (*p)->enm_next; + FREE(enm, M_IFMADDR); + splx(s); + /* + * Return ENETRESET to inform the driver that the list has changed + * and its reception filter should be adjusted accordingly. + */ + return (ENETRESET); +} + +/* + * Convert Ethernet address to printable (loggable) representation. + */ +static u_char digits[] = "0123456789abcdef"; +char * +ether_sprintf(p, ap) + register u_char *p; + register u_char *ap; +{ register char *cp; + register i; + + for (cp = p, i = 0; i < 6; i++) { + *cp++ = digits[*ap >> 4]; + *cp++ = digits[*ap++ & 0xf]; + *cp++ = ':'; + } + *--cp = 0; + return (p); +} diff --git a/bsd/net/if_faith.c b/bsd/net/if_faith.c new file mode 100644 index 000000000..6aafc2ecf --- /dev/null +++ b/bsd/net/if_faith.c @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: if_faith.c,v 1.11 2000/02/22 14:01:46 itojun Exp $ */ + +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * derived from + * @(#)if_loop.c 8.1 (Berkeley) 6/10/93 + * Id: if_loop.c,v 1.22 1996/06/19 16:24:10 wollman Exp + */ + +/* + * Loopback interface driver for protocol testing and timing. + */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include "opt_inet.h" +#endif + +#include "faith.h" +#if NFAITH > 0 + +#include +#include +#include +#include +#include +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined(__APPLE__) +#include +#else +#include +#endif +#include +#if defined(__bsdi__) || defined(__NetBSD__) +#include +#endif + +#include +#include +#include +#include +#include + +#if INET +#include +#include +#include +#include +#endif + +#if INET6 +#ifndef INET +#include +#endif +#include +#include +#endif + +#include +#include "faith.h" +#include "bpfilter.h" + +#include + +#if defined(__FreeBSD__) && __FreeBSD__ < 3 +static int faithioctl __P((struct ifnet *, int, caddr_t)); +#else +static int faithioctl __P((struct ifnet *, u_long, caddr_t)); +#endif +int faith_pre_output __P((struct ifnet *, register struct mbuf **, struct sockaddr *, + register struct rtentry *, char *, char *, u_long)); +static void faithrtrequest __P((int, struct rtentry *, struct sockaddr *)); + +#if defined(__FreeBSD__) || defined (__APPLE__) +void faithattach __P((void *)); +#else +void faithattach __P((int)); +#endif + +#define HAVE_OLD_BPF 1 + +static struct ifnet faithif[NFAITH]; +static struct if_proto *faith_array[NFAITH]; +static int faith_count = 0; + +#define FAITHMTU 1500 + +static +int faith_add_if(struct ifnet *ifp) +{ + ifp->if_demux = 0; + ifp->if_framer = 0; + return 0; +} + +static +int faith_del_if(struct ifnet *ifp) +{ + return 0; +} + +static +int faith_add_proto(struct ddesc_head_str *desc_head, struct if_proto *proto, u_long dl_tag) +{ + int i; + + for (i=0; i < faith_count; i++) + if (faith_array[i] == 0) { + faith_array[faith_count] = proto; + return 0; + } + + if ((i == faith_count) && (faith_count == NFAITH)) + panic("faith_add_proto -- Too many attachments\n"); + + faith_array[faith_count++] = proto; + + return (0); +} + +static +int faith_del_proto(struct if_proto *proto, u_long dl_tag) +{ + int i; + + + for (i=0; i < faith_count; i++) + if (faith_array[i] == proto) { + faith_array[i] = 0; + return 0; + } + + return ENOENT; +} + +int faith_shutdown() +{ + return 0; +} + +void faith_reg_if_mods() +{ + struct dlil_ifmod_reg_str faith_ifmod; + + faith_ifmod.add_if = faith_add_if; + faith_ifmod.del_if = faith_del_if; + faith_ifmod.add_proto = faith_add_proto; + faith_ifmod.del_proto = faith_del_proto; + faith_ifmod.ifmod_ioctl = 0; + faith_ifmod.shutdown = faith_shutdown; + + + if (dlil_reg_if_modules(APPLE_IF_FAM_FAITH, &faith_ifmod)) + panic("Couldn't register faith modules\n"); + +} + +u_long faith_attach_inet(struct ifnet *ifp) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + u_long dl_tag=0; + short native=0; + int stat; + int i; + + for (i=0; i < faith_count; i++) { + if (faith_array[i] && (faith_array[i]->ifp == ifp) && + (faith_array[i]->protocol_family == PF_INET)) { +#if 0 + kprintf("faith_array for %s%d found dl_tag=%d\n", + ifp->if_name, ifp->if_unit, faith_array[i]->dl_tag); +#endif + return faith_array[i]->dl_tag; + + } + } + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_RAW; + desc.variants.bitmask.proto_id_length = 0; + desc.variants.bitmask.proto_id = 0; + desc.variants.bitmask.proto_id_mask = 0; + desc.native_type = (char *) &native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = 0; + reg.pre_output = faith_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = faithioctl; + reg.default_proto = 0; + reg.protocol_family = PF_INET; + + stat = dlil_attach_protocol(®, &dl_tag); + if (stat) { + panic("faith_attach_inet can't attach interface\n"); + } + + return dl_tag; +} + +void +faithattach(faith) + void *faith; +{ + struct ifnet *ifp; + int i; + + faith_reg_if_mods(); /* DLIL modules */ + + for (i = 0; i < NFAITH; i++) { + ifp = &faithif[i]; + bzero(ifp, sizeof(faithif[i])); + ifp->if_name = "faith"; + ifp->if_unit = i; + ifp->if_family = APPLE_IF_FAM_FAITH; + ifp->if_mtu = FAITHMTU; + /* Change to BROADCAST experimentaly to announce its prefix. */ + ifp->if_flags = /* IFF_LOOPBACK */ IFF_BROADCAST | IFF_MULTICAST; + ifp->if_ioctl = faithioctl; + ifp->if_output = NULL; + ifp->if_type = IFT_FAITH; + ifp->if_hdrlen = 0; + ifp->if_addrlen = 0; + dlil_if_attach(ifp); +#if NBPFILTER > 0 +#ifdef HAVE_OLD_BPF + bpfattach(ifp, DLT_NULL, sizeof(u_int)); +#else + bpfattach(&ifp->if_bpf, ifp, DLT_NULL, sizeof(u_int)); +#endif +#endif + } +} + +int +faith_pre_output(ifp, m0, dst, rt, frame_type, dst_addr, dl_tag) + struct ifnet *ifp; + register struct mbuf **m0; + struct sockaddr *dst; + register struct rtentry *rt; + char *frame_type; + char *dst_addr; + u_long dl_tag; +{ + int s, isr; + register struct ifqueue *ifq = 0; + register struct mbuf *m = *m0; + + if ((m->m_flags & M_PKTHDR) == 0) + panic("faithoutput no HDR"); +#if NBPFILTER > 0 + /* BPF write needs to be handled specially */ + if (dst && dst->sa_family == AF_UNSPEC) { + dst->sa_family = *(mtod(m, int *)); + m->m_len -= sizeof(int); + m->m_pkthdr.len -= sizeof(int); + m->m_data += sizeof(int); + } + + if (ifp->if_bpf) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a faith header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer a to it). + */ + struct mbuf m0; + u_int af = dst->sa_family; + + m0.m_next = m; + m0.m_len = 4; + m0.m_data = (char *)⁡ + +#if HAVE_OLD_BPF + bpf_mtap(ifp, &m0); +#else + bpf_mtap(ifp->if_bpf, &m0); +#endif + } +#endif + + if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { + m_freem(m); + return (EJUSTRETURN); +// return (rt->rt_flags & RTF_BLACKHOLE ? 0 : +// rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); + } + ifp->if_opackets++; + ifp->if_obytes += m->m_pkthdr.len; + switch (dst->sa_family) { +#if INET + case AF_INET: + ifq = &ipintrq; + isr = NETISR_IP; + break; +#endif +#if INET6 + case AF_INET6: + ifq = &ip6intrq; + isr = NETISR_IPV6; + break; +#endif + default: + kprintf("faith_pre_output: m=%x family is unknown...(0x%x\n", m, dst->sa_family); + m_freem(m); + return EAFNOSUPPORT; + } + + /* XXX do we need more sanity checks? */ + + m->m_pkthdr.rcvif = ifp; + s = splimp(); + if (IF_QFULL(ifq)) { + IF_DROP(ifq); + m_freem(m); + splx(s); + return (EJUSTRETURN); + } + IF_ENQUEUE(ifq, m); + schednetisr(isr); + ifp->if_ipackets++; + ifp->if_ibytes += m->m_pkthdr.len; + splx(s); + return (EJUSTRETURN); +} + +/* ARGSUSED */ +static void +faithrtrequest(cmd, rt, sa) + int cmd; + struct rtentry *rt; + struct sockaddr *sa; +{ + if (rt) { + rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; /* for ISO */ + /* + * For optimal performance, the send and receive buffers + * should be at least twice the MTU plus a little more for + * overhead. + */ + rt->rt_rmx.rmx_recvpipe = + rt->rt_rmx.rmx_sendpipe = 3 * FAITHMTU; + } +} + +/* + * Process an ioctl request. + */ +/* ARGSUSED */ +static int +faithioctl(ifp, cmd, data) + register struct ifnet *ifp; +#if defined(__FreeBSD__) && __FreeBSD__ < 3 + int cmd; +#else + u_long cmd; +#endif + caddr_t data; +{ + register struct ifaddr *ifa; + register struct ifreq *ifr = (struct ifreq *)data; + register int error = 0; + + switch (cmd) { + + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP | IFF_RUNNING; + ifa = (struct ifaddr *)data; + ifa->ifa_rtrequest = faithrtrequest; + /* + * Everything else is done at a higher level. + */ + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifr == 0) { + error = EAFNOSUPPORT; /* XXX */ + break; + } + switch (ifr->ifr_addr.sa_family) { +#if INET + case AF_INET: + break; +#endif +#if INET6 + case AF_INET6: + break; +#endif + + default: + error = EAFNOSUPPORT; + break; + } + break; + +#ifdef SIOCSIFMTU +#ifndef __OpenBSD__ + case SIOCSIFMTU: + ifp->if_mtu = ifr->ifr_mtu; + break; +#endif +#endif + + case SIOCSIFFLAGS: + break; + + default: + error = EINVAL; + } + return (error); +} +#endif /* NFAITH > 0 */ diff --git a/bsd/net/if_fddisubr.c b/bsd/net/if_fddisubr.c new file mode 100644 index 000000000..9f893cba4 --- /dev/null +++ b/bsd/net/if_fddisubr.c @@ -0,0 +1,653 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995, 1996 + * Matt Thomas . All rights reserved. + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: if_ethersubr.c,v 1.5 1994/12/13 22:31:45 wollman Exp + */ + +#include "opt_atalk.h" +#include "opt_inet.h" +#include "opt_ipx.h" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if INET +#include +#include +#include +#endif +#if defined(__FreeBSD__) +#include +#else +#include +#endif + +#if IPX +#include +#include +#endif + +#if NS +#include +#include +#endif + +#if DECNET +#include +#endif + +#if ISO +#include +#include +#include +#include +#endif + +#if LLC +#include +#include +#endif + + +#if LLC && CCITT +extern struct ifqueue pkintrq; +#endif + +#include "bpfilter.h" + +#define senderr(e) { error = (e); goto bad;} + +/* + * This really should be defined in if_llc.h but in case it isn't. + */ +#ifndef llc_snap +#define llc_snap llc_un.type_snap +#endif + +#if defined(__bsdi__) || defined(__NetBSD__) +#define RTALLOC1(a, b) rtalloc1(a, b) +#define ARPRESOLVE(a, b, c, d, e, f) arpresolve(a, b, c, d, e) +#elif defined(__FreeBSD__) +#define RTALLOC1(a, b) rtalloc1(a, b, 0UL) +#define ARPRESOLVE(a, b, c, d, e, f) arpresolve(a, b, c, d, e, f) +#endif +/* + * FDDI output routine. + * Encapsulate a packet of type family for the local net. + * Use trailer local net encapsulation if enough data in first + * packet leaves a multiple of 512 bytes of data in remainder. + * Assumes that ifp is actually pointer to arpcom structure. + */ +int +fddi_output(ifp, m0, dst, rt0) + register struct ifnet *ifp; + struct mbuf *m0; + struct sockaddr *dst; + struct rtentry *rt0; +{ + u_int16_t type; + int s, loop_copy = 0, error = 0; + u_char edst[6]; + register struct mbuf *m = m0; + register struct rtentry *rt; + register struct fddi_header *fh; + struct arpcom *ac = (struct arpcom *)ifp; + + if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + senderr(ENETDOWN); + getmicrotime(&ifp->if_lastchange); +#if !defined(__bsdi__) || _BSDI_VERSION >= 199401 + if (rt = rt0) { + if ((rt->rt_flags & RTF_UP) == 0) { + if (rt0 = rt = RTALLOC1(dst, 1)) + rt->rt_refcnt--; + else + senderr(EHOSTUNREACH); + } + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_gwroute == 0) + goto lookup; + if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { + rtfree(rt); rt = rt0; + lookup: rt->rt_gwroute = RTALLOC1(rt->rt_gateway, 1); + if ((rt = rt->rt_gwroute) == 0) + senderr(EHOSTUNREACH); + } + } + if (rt->rt_flags & RTF_REJECT) + if (rt->rt_rmx.rmx_expire == 0 || + time_second < rt->rt_rmx.rmx_expire) + senderr(rt == rt0 ? EHOSTDOWN : EHOSTUNREACH); + } +#endif + switch (dst->sa_family) { + +#if INET + case AF_INET: { +#if !defined(__bsdi__) || _BSDI_VERSION >= 199401 + if (!ARPRESOLVE(ac, rt, m, dst, edst, rt0)) + return (0); /* if not yet resolved */ +#else + int usetrailers; + if (!arpresolve(ac, m, &((struct sockaddr_in *)dst)->sin_addr, edst, &usetrailers)) + return (0); /* if not yet resolved */ +#endif + type = htons(ETHERTYPE_IP); + break; + } +#endif +#if IPX + case AF_IPX: + type = htons(ETHERTYPE_IPX); + bcopy((caddr_t)&(((struct sockaddr_ipx *)dst)->sipx_addr.x_host), + (caddr_t)edst, sizeof (edst)); + break; +#endif + +#if NS + case AF_NS: + type = htons(ETHERTYPE_NS); + bcopy((caddr_t)&(((struct sockaddr_ns *)dst)->sns_addr.x_host), + (caddr_t)edst, sizeof (edst)); + break; +#endif +#if ISO + case AF_ISO: { + int snpalen; + struct llc *l; + register struct sockaddr_dl *sdl; + + if (rt && (sdl = (struct sockaddr_dl *)rt->rt_gateway) && + sdl->sdl_family == AF_LINK && sdl->sdl_alen > 0) { + bcopy(LLADDR(sdl), (caddr_t)edst, sizeof(edst)); + } else if (error = + iso_snparesolve(ifp, (struct sockaddr_iso *)dst, + (char *)edst, &snpalen)) + goto bad; /* Not Resolved */ + /* If broadcasting on a simplex interface, loopback a copy */ + if (*edst & 1) + m->m_flags |= (M_BCAST|M_MCAST); + M_PREPEND(m, 3, M_DONTWAIT); + if (m == NULL) + return (0); + type = 0; + l = mtod(m, struct llc *); + l->llc_dsap = l->llc_ssap = LLC_ISO_LSAP; + l->llc_control = LLC_UI; + IFDEBUG(D_ETHER) + int i; + printf("unoutput: sending pkt to: "); + for (i=0; i<6; i++) + printf("%x ", edst[i] & 0xff); + printf("\n"); + ENDDEBUG + } break; +#endif /* ISO */ +#if LLC +/* case AF_NSAP: */ + case AF_CCITT: { + register struct sockaddr_dl *sdl = + (struct sockaddr_dl *) rt -> rt_gateway; + + if (sdl && sdl->sdl_family != AF_LINK && sdl->sdl_alen <= 0) + goto bad; /* Not a link interface ? Funny ... */ + bcopy(LLADDR(sdl), (char *)edst, sizeof(edst)); + if (*edst & 1) + loop_copy = 1; + type = 0; +#if LLC_DEBUG + { + int i; + register struct llc *l = mtod(m, struct llc *); + + printf("fddi_output: sending LLC2 pkt to: "); + for (i=0; i<6; i++) + printf("%x ", edst[i] & 0xff); + printf(" len 0x%x dsap 0x%x ssap 0x%x control 0x%x\n", + type & 0xff, l->llc_dsap & 0xff, l->llc_ssap &0xff, + l->llc_control & 0xff); + + } +#endif /* LLC_DEBUG */ + } break; +#endif /* LLC */ + + case AF_UNSPEC: + { + struct ether_header *eh; + loop_copy = -1; + eh = (struct ether_header *)dst->sa_data; + (void)memcpy((caddr_t)edst, (caddr_t)eh->ether_dhost, sizeof (edst)); + if (*edst & 1) + m->m_flags |= (M_BCAST|M_MCAST); + type = eh->ether_type; + break; + } + +#if NBPFILTER > 0 + case AF_IMPLINK: + { + fh = mtod(m, struct fddi_header *); + error = EPROTONOSUPPORT; + switch (fh->fddi_fc & (FDDIFC_C|FDDIFC_L|FDDIFC_F)) { + case FDDIFC_LLC_ASYNC: { + /* legal priorities are 0 through 7 */ + if ((fh->fddi_fc & FDDIFC_Z) > 7) + goto bad; + break; + } + case FDDIFC_LLC_SYNC: { + /* FDDIFC_Z bits reserved, must be zero */ + if (fh->fddi_fc & FDDIFC_Z) + goto bad; + break; + } + case FDDIFC_SMT: { + /* FDDIFC_Z bits must be non zero */ + if ((fh->fddi_fc & FDDIFC_Z) == 0) + goto bad; + break; + } + default: { + /* anything else is too dangerous */ + goto bad; + } + } + error = 0; + if (fh->fddi_dhost[0] & 1) + m->m_flags |= (M_BCAST|M_MCAST); + goto queue_it; + } +#endif + default: + printf("%s%d: can't handle af%d\n", ifp->if_name, ifp->if_unit, + dst->sa_family); + senderr(EAFNOSUPPORT); + } + + if (type != 0) { + register struct llc *l; + M_PREPEND(m, sizeof (struct llc), M_DONTWAIT); + if (m == 0) + senderr(ENOBUFS); + l = mtod(m, struct llc *); + l->llc_control = LLC_UI; + l->llc_dsap = l->llc_ssap = LLC_SNAP_LSAP; + l->llc_snap.org_code[0] = l->llc_snap.org_code[1] = l->llc_snap.org_code[2] = 0; + (void)memcpy((caddr_t) &l->llc_snap.ether_type, (caddr_t) &type, + sizeof(u_int16_t)); + } + + /* + * Add local net header. If no space in first mbuf, + * allocate another. + */ + M_PREPEND(m, sizeof (struct fddi_header), M_DONTWAIT); + if (m == 0) + senderr(ENOBUFS); + fh = mtod(m, struct fddi_header *); + fh->fddi_fc = FDDIFC_LLC_ASYNC|FDDIFC_LLC_PRIO4; + (void)memcpy((caddr_t)fh->fddi_dhost, (caddr_t)edst, sizeof (edst)); + queue_it: + (void)memcpy((caddr_t)fh->fddi_shost, (caddr_t)ac->ac_enaddr, + sizeof(fh->fddi_shost)); + + /* + * If a simplex interface, and the packet is being sent to our + * Ethernet address or a broadcast address, loopback a copy. + * XXX To make a simplex device behave exactly like a duplex + * device, we should copy in the case of sending to our own + * ethernet address (thus letting the original actually appear + * on the wire). However, we don't do that here for security + * reasons and compatibility with the original behavior. + */ + if ((ifp->if_flags & IFF_SIMPLEX) && + (loop_copy != -1)) { + if ((m->m_flags & M_BCAST) || loop_copy) { + struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); + + (void) if_simloop(ifp, + n, dst, sizeof(struct fddi_header)); + } else if (bcmp(fh->fddi_dhost, + fh->fddi_shost, sizeof(fh->fddi_shost)) == 0) { + (void) if_simloop(ifp, + m, dst, sizeof(struct fddi_header)); + return(0); /* XXX */ + } + } + + s = splimp(); + /* + * Queue message on interface, and start output if interface + * not yet active. + */ + if (IF_QFULL(&ifp->if_snd)) { + IF_DROP(&ifp->if_snd); + splx(s); + senderr(ENOBUFS); + } + ifp->if_obytes += m->m_pkthdr.len; + IF_ENQUEUE(&ifp->if_snd, m); + if ((ifp->if_flags & IFF_OACTIVE) == 0) + (*ifp->if_start)(ifp); + splx(s); + if (m->m_flags & M_MCAST) + ifp->if_omcasts++; + return (error); + +bad: + if (m) + m_freem(m); + return (error); +} + +/* + * Process a received FDDI packet; + * the packet is in the mbuf chain m without + * the fddi header, which is provided separately. + */ +void +fddi_input(ifp, fh, m) + struct ifnet *ifp; + register struct fddi_header *fh; + struct mbuf *m; +{ + register struct ifqueue *inq; + register struct llc *l; + int s; + + if ((ifp->if_flags & IFF_UP) == 0) { + m_freem(m); + return; + } + getmicrotime(&ifp->if_lastchange); + ifp->if_ibytes += m->m_pkthdr.len + sizeof (*fh); + if (fh->fddi_dhost[0] & 1) { + if (bcmp((caddr_t)fddibroadcastaddr, (caddr_t)fh->fddi_dhost, + sizeof(fddibroadcastaddr)) == 0) + m->m_flags |= M_BCAST; + else + m->m_flags |= M_MCAST; + ifp->if_imcasts++; + } else if ((ifp->if_flags & IFF_PROMISC) + && bcmp(((struct arpcom *)ifp)->ac_enaddr, (caddr_t)fh->fddi_dhost, + sizeof(fh->fddi_dhost)) != 0) { + m_freem(m); + return; + } + +#ifdef M_LINK0 + /* + * If this has a LLC priority of 0, then mark it so upper + * layers have a hint that it really came via a FDDI/Ethernet + * bridge. + */ + if ((fh->fddi_fc & FDDIFC_LLC_PRIO7) == FDDIFC_LLC_PRIO0) + m->m_flags |= M_LINK0; +#endif + + l = mtod(m, struct llc *); + switch (l->llc_dsap) { +#if defined(INET) || NS || IPX || defined(NETATALK) + case LLC_SNAP_LSAP: + { + u_int16_t type; + if (l->llc_control != LLC_UI || l->llc_ssap != LLC_SNAP_LSAP) + goto dropanyway; + + if (l->llc_snap.org_code[0] != 0 || l->llc_snap.org_code[1] != 0|| l->llc_snap.org_code[2] != 0) + goto dropanyway; + type = ntohs(l->llc_snap.ether_type); + m_adj(m, 8); + switch (type) { +#if INET + case ETHERTYPE_IP: + if (ipflow_fastforward(m)) + return; + schednetisr(NETISR_IP); + inq = &ipintrq; + break; + + case ETHERTYPE_ARP: +#if !defined(__bsdi__) || _BSDI_VERSION >= 199401 + schednetisr(NETISR_ARP); + inq = &arpintrq; + break; +#else + arpinput((struct arpcom *)ifp, m); + return; +#endif +#endif +#if IPX + case ETHERTYPE_IPX: + schednetisr(NETISR_IPX); + inq = &ipxintrq; + break; +#endif +#if NS + case ETHERTYPE_NS: + schednetisr(NETISR_NS); + inq = &nsintrq; + break; +#endif +#if DECNET + case ETHERTYPE_DECNET: + schednetisr(NETISR_DECNET); + inq = &decnetintrq; + break; +#endif + + default: + /* printf("fddi_input: unknown protocol 0x%x\n", type); */ + ifp->if_noproto++; + goto dropanyway; + } + break; + } +#endif /* INET || NS */ +#if ISO + case LLC_ISO_LSAP: + switch (l->llc_control) { + case LLC_UI: + /* LLC_UI_P forbidden in class 1 service */ + if ((l->llc_dsap == LLC_ISO_LSAP) && + (l->llc_ssap == LLC_ISO_LSAP)) { + /* LSAP for ISO */ + m->m_data += 3; /* XXX */ + m->m_len -= 3; /* XXX */ + m->m_pkthdr.len -= 3; /* XXX */ + M_PREPEND(m, sizeof *fh, M_DONTWAIT); + if (m == 0) + return; + *mtod(m, struct fddi_header *) = *fh; + IFDEBUG(D_ETHER) + printf("clnp packet"); + ENDDEBUG + schednetisr(NETISR_ISO); + inq = &clnlintrq; + break; + } + goto dropanyway; + + case LLC_XID: + case LLC_XID_P: + if(m->m_len < 6) + goto dropanyway; + l->llc_window = 0; + l->llc_fid = 9; + l->llc_class = 1; + l->llc_dsap = l->llc_ssap = 0; + /* Fall through to */ + case LLC_TEST: + case LLC_TEST_P: + { + struct sockaddr sa; + register struct ether_header *eh; + struct arpcom *ac = (struct arpcom *) ifp; + int i; + u_char c = l->llc_dsap; + + l->llc_dsap = l->llc_ssap; + l->llc_ssap = c; + if (m->m_flags & (M_BCAST | M_MCAST)) + bcopy((caddr_t)ac->ac_enaddr, + (caddr_t)eh->ether_dhost, 6); + sa.sa_family = AF_UNSPEC; + sa.sa_len = sizeof(sa); + eh = (struct ether_header *)sa.sa_data; + for (i = 0; i < 6; i++) { + eh->ether_shost[i] = fh->fddi_dhost[i]; + eh->ether_dhost[i] = fh->fddi_shost[i]; + } + eh->ether_type = 0; + ifp->if_output(ifp, m, &sa, NULL); + return; + } + default: + m_freem(m); + return; + } + break; +#endif /* ISO */ +#if LLC + case LLC_X25_LSAP: + { + M_PREPEND(m, sizeof(struct sdl_hdr) , M_DONTWAIT); + if (m == 0) + return; + if ( !sdl_sethdrif(ifp, fh->fddi_shost, LLC_X25_LSAP, + fh->fddi_dhost, LLC_X25_LSAP, 6, + mtod(m, struct sdl_hdr *))) + panic("ETHER cons addr failure"); + mtod(m, struct sdl_hdr *)->sdlhdr_len = m->m_pkthdr.len - sizeof(struct sdl_hdr); +#if LLC_DEBUG + printf("llc packet\n"); +#endif /* LLC_DEBUG */ + schednetisr(NETISR_CCITT); + inq = &llcintrq; + break; + } +#endif /* LLC */ + + default: + /* printf("fddi_input: unknown dsap 0x%x\n", l->llc_dsap); */ + ifp->if_noproto++; + dropanyway: + m_freem(m); + return; + } + + s = splimp(); + if (IF_QFULL(inq)) { + IF_DROP(inq); + m_freem(m); + } else + IF_ENQUEUE(inq, m); + splx(s); +} +/* + * Perform common duties while attaching to interface list + */ +#ifdef __NetBSD__ +#define ifa_next ifa_list.tqe_next +#endif + +void +fddi_ifattach(ifp) + register struct ifnet *ifp; +{ + register struct ifaddr *ifa; + register struct sockaddr_dl *sdl; + + ifp->if_type = IFT_FDDI; + ifp->if_addrlen = 6; + ifp->if_hdrlen = 21; + ifp->if_mtu = FDDIMTU; + ifp->if_baudrate = 100000000; +#if IFF_NOTRAILERS + ifp->if_flags |= IFF_NOTRAILERS; +#endif +#if defined(__FreeBSD__) + ifa = ifnet_addrs[ifp->if_index - 1]; + sdl = (struct sockaddr_dl *)ifa->ifa_addr; + sdl->sdl_type = IFT_FDDI; + sdl->sdl_alen = ifp->if_addrlen; + bcopy(((struct arpcom *)ifp)->ac_enaddr, LLADDR(sdl), ifp->if_addrlen); +#elif defined(__NetBSD__) + LIST_INIT(&((struct arpcom *)ifp)->ac_multiaddrs); + for (ifa = ifp->if_addrlist.tqh_first; ifa != NULL; ifa = ifa->ifa_list.tqe_next) +#else + for (ifa = ifp->if_addrlist; ifa != NULL; ifa = ifa->ifa_next) +#endif +#if !defined(__FreeBSD__) + if ((sdl = (struct sockaddr_dl *)ifa->ifa_addr) && + sdl->sdl_family == AF_LINK) { + sdl->sdl_type = IFT_FDDI; + sdl->sdl_alen = ifp->if_addrlen; + bcopy((caddr_t)((struct arpcom *)ifp)->ac_enaddr, + LLADDR(sdl), ifp->if_addrlen); + break; + } +#endif +} diff --git a/bsd/net/if_gif.c b/bsd/net/if_gif.c new file mode 100644 index 000000000..aa0d3df0a --- /dev/null +++ b/bsd/net/if_gif.c @@ -0,0 +1,694 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: if_gif.c,v 1.15 2000/02/22 14:01:46 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * gif.c + */ +#if BSD310 +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if INET +#include +#include +#include +#include +#include +#endif /* INET */ + +#if INET6 +#ifndef INET +#include +#endif +#include +#include +#include +#include +#include +#endif /* INET6 */ + +#include +#include +#include + +#include "gif.h" +#include "bpfilter.h" + +#include + +#if NGIF > 0 + +void gifattach __P((void *)); +int gif_pre_output __P((struct ifnet *, register struct mbuf **, struct sockaddr *, + register struct rtentry *, char *, char *, u_long)); + +/* + * gif global variable definitions + */ +int ngif = NGIF; /* number of interfaces */ +struct gif_softc *gif = 0; +static struct if_proto *gif_array[NGIF]; +static gif_count = 0 ; +#ifndef MAX_GIF_NEST +/* + * This macro controls the upper limitation on nesting of gif tunnels. + * Since, setting a large value to this macro with a careless configuration + * may introduce system crash, we don't allow any nestings by default. + * If you need to configure nested gif tunnels, you can define this macro + * in your kernel configuration file. However, if you do so, please be + * careful to configure the tunnels so that it won't make a loop. + */ +#define MAX_GIF_NEST 1 +#endif +static int max_gif_nesting = MAX_GIF_NEST; + + + +#if 0 +int gif_demux(ifp, m, frame_header, proto) + struct ifnet *ifp; + struct mbuf *m; + char *frame_header; + struct if_proto **proto; +{ + int i; + return 0; +} + +int gif_framer(ifp, m, dest, dest_linkaddr, frame_type) + struct ifnet *ifp; + struct mbuf **m; + struct sockaddr *dest; + char *dest_linkaddr; + char *frame_type; + +{ + char *to_ptr; + + return 0; +} +#endif +static +int gif_add_if(struct ifnet *ifp) +{ + ifp->if_demux = 0; + ifp->if_framer = 0; + return 0; +} + +static +int gif_del_if(struct ifnet *ifp) +{ + return 0; +} + +static +int gif_add_proto(struct ddesc_head_str *desc_head, struct if_proto *proto, u_long dl_tag) +{ + int i; + + for (i=0; i < gif_count; i++) + if (gif_array[i] == 0) { + gif_array[gif_count] = proto; + return 0; + } + + if ((i == gif_count) && (gif_count == NGIF)) + panic("gif_add_proto -- Too many attachments\n"); + + gif_array[gif_count++] = proto; + + return (0); +} + +static +int gif_del_proto(struct if_proto *proto, u_long dl_tag) +{ + int i; + + for (i=0; i < gif_count; i++) + if (gif_array[i] == proto) { + gif_array[i] = 0; + return 0; + } + + return ENOENT; +} + +int gif_shutdown() +{ + return 0; +} + +void gif_reg_if_mods() +{ + struct dlil_ifmod_reg_str gif_ifmod; + + gif_ifmod.add_if = gif_add_if; + gif_ifmod.del_if = gif_del_if; + gif_ifmod.add_proto = gif_add_proto; + gif_ifmod.del_proto = gif_del_proto; + gif_ifmod.ifmod_ioctl = 0; + gif_ifmod.shutdown = gif_shutdown; + + if (dlil_reg_if_modules(APPLE_IF_FAM_GIF, &gif_ifmod)) + panic("Couldn't register gif modules\n"); + +} + +u_long gif_attach_inet(struct ifnet *ifp) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + u_long dl_tag=0; + short native=0; + int stat; + int i; + + for (i=0; i < gif_count; i++) { + if (gif_array[i] && (gif_array[i]->ifp == ifp) && + (gif_array[i]->protocol_family == PF_INET)) { +#if 0 + kprintf("gif_attach for %s%d found dl_tag=%d\n", + ifp->if_name, ifp->if_unit, gif_array[i]->dl_tag); +#endif + return gif_array[i]->dl_tag; + + } + } + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_RAW; + desc.variants.bitmask.proto_id_length = 0; + desc.variants.bitmask.proto_id = 0; + desc.variants.bitmask.proto_id_mask = 0; + desc.native_type = (char *) &native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = gif_input; + reg.pre_output = gif_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = gif_ioctl; + reg.default_proto = 0; + reg.protocol_family = PF_INET; + + stat = dlil_attach_protocol(®, &dl_tag); + if (stat) { + panic("gif_attach_inet can't attach interface\n"); + } + + return dl_tag; +} + +void +gifattach(dummy) + void *dummy; +{ + register struct gif_softc *sc; + register int i; + + gif_reg_if_mods(); /* DLIL modules */ + + gif = sc = _MALLOC (ngif * sizeof(struct gif_softc), M_DEVBUF, M_WAIT); + bzero(sc, ngif * sizeof(struct gif_softc)); + for (i = 0; i < ngif; sc++, i++) { + sc->gif_if.if_name = "gif"; + sc->gif_if.if_unit = i; + sc->gif_if.if_family = APPLE_IF_FAM_GIF; + sc->gif_if.if_mtu = GIF_MTU; + sc->gif_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST; + sc->gif_if.if_ioctl = gif_ioctl; + sc->gif_if.if_output = NULL; + sc->gif_if.if_type = IFT_GIF; + dlil_if_attach(&sc->gif_if); +#if 0 + kprintf("gifattach: Attaching gif%d sc=%x gif_if=%x\n", i, sc, &sc->gif_if); +#endif +#if NBPFILTER > 0 +#ifdef HAVE_OLD_BPF + bpfattach(&sc->gif_if, DLT_NULL, sizeof(u_int)); +#else + bpfattach(&sc->gif_if.if_bpf, &sc->gif_if, DLT_NULL, sizeof(u_int)); +#endif +#endif + } +} + +#ifdef __FreeBSD__ +PSEUDO_SET(gifattach, if_gif); +#endif + +int +gif_pre_output(ifp, m0, dst, rt, frame, address, dl_tag) + struct ifnet *ifp; + struct mbuf **m0; + struct sockaddr *dst; + struct rtentry *rt; /* added in net2 */ + char *frame; + char *address; + u_long dl_tag; +{ + register struct gif_softc *sc = (struct gif_softc*)ifp; + register struct mbuf * m = *m0; + int error = 0; + static int called = 0; /* XXX: MUTEX */ + + /* + * gif may cause infinite recursion calls when misconfigured. + * We'll prevent this by introducing upper limit. + * XXX: this mechanism may introduce another problem about + * mutual exclusion of the variable CALLED, especially if we + * use kernel thread. + */ + if (++called > max_gif_nesting) { + log(LOG_NOTICE, + "gif_output: recursively called too many times(%d)\n", + called); + m_freem(m); + error = EIO; /* is there better errno? */ + goto end; + } + + getmicrotime(&ifp->if_lastchange); + m->m_flags &= ~(M_BCAST|M_MCAST); + if (!(ifp->if_flags & IFF_UP) || +#if 0 + sc->gif_flags & GIFF_INUSE || +#endif + sc->gif_psrc == NULL || sc->gif_pdst == NULL) { + m_freem(m); + error = ENETDOWN; + printf("gif_output: packed discarded ENETDOWN\n"); + goto end; + } + +#if NBPFILTER > 0 + if (ifp->if_bpf) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a dummy header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer a to it). + */ + struct mbuf m0; + u_int af = dst->sa_family; + + m0.m_next = m; + m0.m_len = 4; + m0.m_data = (char *)⁡ + +#ifdef HAVE_OLD_BPF + bpf_mtap(ifp, &m0); +#else + bpf_mtap(ifp->if_bpf, &m0); +#endif + } +#endif + ifp->if_opackets++; + ifp->if_obytes += m->m_pkthdr.len; +#if 0 + s = splnet(); + sc->gif_flags |= GIFF_INUSE; +#endif + + switch (sc->gif_psrc->sa_family) { +#if INET + case AF_INET: + error = in_gif_output(ifp, dst->sa_family, m, rt); + if (error) + printf("in_gif_output returned error=%d\n", error); + break; +#endif +#if INET6 + case AF_INET6: + error = in6_gif_output(ifp, dst->sa_family, m, rt); + if (error) + printf("in6_gif_output returned error=%d\n", error); + break; +#endif + default: + m_freem(m); + error = ENETDOWN; + } +#if 0 + sc->gif_flags &= ~GIFF_INUSE; + splx(s); +#endif + + end: + called = 0; /* reset recursion counter */ + if (error) ifp->if_oerrors++; + return EJUSTRETURN; +} + +void +gif_input(m, af, gifp) + struct mbuf *m; + int af; + struct ifnet *gifp; +{ + int s, isr; + register struct ifqueue *ifq = 0; + + if (gifp == NULL) { + /* just in case */ + m_freem(m); + return; + } + + if (m->m_pkthdr.rcvif) + m->m_pkthdr.rcvif = gifp; + +#if NBPFILTER > 0 + if (gifp->if_bpf) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a dummy header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer a to it). + */ + struct mbuf m0; + u_int af = AF_INET6; + + m0.m_next = m; + m0.m_len = 4; + m0.m_data = (char *)⁡ + +#ifdef HAVE_OLD_BPF + bpf_mtap(gifp, &m0); +#else + bpf_mtap(gifp->if_bpf, &m0); +#endif + } +#endif /*NBPFILTER > 0*/ + + /* + * Put the packet to the network layer input queue according to the + * specified address family. + * Note: older versions of gif_input directly called network layer + * input functions, e.g. ip6_input, here. We changed the policy to + * prevent too many recursive calls of such input functions, which + * might cause kernel panic. But the change may introduce another + * problem; if the input queue is full, packets are discarded. + * We believed it rarely occurs and changed the policy. If we find + * it occurs more times than we thought, we may change the policy + * again. + */ + switch (af) { +#if INET + case AF_INET: + ifq = &ipintrq; + isr = NETISR_IP; + break; +#endif +#if INET6 + case AF_INET6: + ifq = &ip6intrq; + isr = NETISR_IPV6; + break; +#endif + default: + m_freem(m); + return; + } + + s = splimp(); + if (IF_QFULL(ifq)) { + IF_DROP(ifq); /* update statistics */ + m_freem(m); + splx(s); + return; + } + IF_ENQUEUE(ifq, m); + /* we need schednetisr since the address family may change */ + schednetisr(isr); + gifp->if_ipackets++; + gifp->if_ibytes += m->m_pkthdr.len; + splx(s); + + return; +} + +/* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */ +int +gif_ioctl(ifp, cmd, data) + struct ifnet *ifp; + u_long cmd; + caddr_t data; +{ + struct gif_softc *sc = (struct gif_softc*)ifp; + struct ifreq *ifr = (struct ifreq*)data; + int error = 0, size; + struct sockaddr *dst, *src; + int i; + struct gif_softc *sc2; + + switch (cmd) { + case SIOCSIFADDR: + break; + + case SIOCSIFDSTADDR: + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + /* Called from if_addmulti() with data == NULL if __FreeBSD__ >= 3 */ +#if !defined(__APPLE__) + switch (ifr->ifr_addr.sa_family) { +#ifdef INET + case AF_INET: /* IP supports Multicast */ + break; +#endif /* INET */ +#ifdef INET6 + case AF_INET6: /* IP6 supports Multicast */ + break; +#endif /* INET6 */ + default: /* Other protocols doesn't support Multicast */ + error = EAFNOSUPPORT; + break; + } +#endif /*not FreeBSD3*/ + break; + +#ifdef SIOCSIFMTU /* xxx */ +#ifndef __OpenBSD__ + case SIOCGIFMTU: + break; + case SIOCSIFMTU: + { +#ifdef __bsdi__ + short mtu; + mtu = *(short *)ifr->ifr_data; +#else + u_long mtu; + mtu = ifr->ifr_mtu; +#endif + if (mtu < GIF_MTU_MIN || mtu > GIF_MTU_MAX) { + return (EINVAL); + } + ifp->if_mtu = mtu; + } + break; +#endif +#endif /* SIOCSIFMTU */ + + case SIOCSIFPHYADDR: +#if INET6 + case SIOCSIFPHYADDR_IN6: +#endif /* INET6 */ + /* can't configure same pair of address onto two gif */ + src = (struct sockaddr *) + &(((struct in_aliasreq *)data)->ifra_addr); + dst = (struct sockaddr *) + &(((struct in_aliasreq *)data)->ifra_dstaddr); + for (i = 0; i < ngif; i++) { + sc2 = gif + i; + if (sc2 == sc) + continue; + if (!sc2->gif_pdst || !sc2->gif_psrc) + continue; + if (sc2->gif_pdst->sa_family == dst->sa_family && + sc2->gif_pdst->sa_len == dst->sa_family && + bcmp(sc2->gif_pdst, dst, dst->sa_len) == 0 && + sc2->gif_psrc->sa_family == src->sa_family && + sc2->gif_psrc->sa_len == src->sa_family && + bcmp(sc2->gif_psrc, src, src->sa_len) == 0) { + error = EADDRNOTAVAIL; + goto bad; + } + } + + switch (ifr->ifr_addr.sa_family) { +#if INET + case AF_INET: + return in_gif_ioctl(ifp, cmd, data); +#endif /* INET */ +#if INET6 + case AF_INET6: + return in6_gif_ioctl(ifp, cmd, data); +#endif /* INET6 */ + default: + error = EPROTOTYPE; + goto bad; + break; + } + break; + + case SIOCGIFPSRCADDR: +#if INET6 + case SIOCGIFPSRCADDR_IN6: +#endif /* INET6 */ + if (sc->gif_psrc == NULL) { + error = EADDRNOTAVAIL; + goto bad; + } + src = sc->gif_psrc; + switch (sc->gif_psrc->sa_family) { +#if INET + case AF_INET: + dst = &ifr->ifr_addr; + size = sizeof(struct sockaddr_in); + break; +#endif /* INET */ +#if INET6 + case AF_INET6: + dst = (struct sockaddr *) + &(((struct in6_ifreq *)data)->ifr_addr); + size = sizeof(struct sockaddr_in6); + break; +#endif /* INET6 */ + default: + error = EADDRNOTAVAIL; + goto bad; + } + bcopy((caddr_t)src, (caddr_t)dst, size); + break; + + case SIOCGIFPDSTADDR: +#if INET6 + case SIOCGIFPDSTADDR_IN6: +#endif /* INET6 */ + if (sc->gif_pdst == NULL) { + error = EADDRNOTAVAIL; + goto bad; + } + src = sc->gif_pdst; + switch (sc->gif_pdst->sa_family) { +#if INET + case AF_INET: + dst = &ifr->ifr_addr; + size = sizeof(struct sockaddr_in); + break; +#endif /* INET */ +#if INET6 + case AF_INET6: + dst = (struct sockaddr *) + &(((struct in6_ifreq *)data)->ifr_addr); + size = sizeof(struct sockaddr_in6); + break; +#endif /* INET6 */ + default: + error = EADDRNOTAVAIL; + goto bad; + } + bcopy((caddr_t)src, (caddr_t)dst, size); + break; + + case SIOCSIFFLAGS: + if (sc->gif_psrc == NULL) + break; + switch (sc->gif_psrc->sa_family) { +#if INET + case AF_INET: + return in_gif_ioctl(ifp, cmd, data); +#endif /* INET */ +#if INET6 + case AF_INET6: + return in6_gif_ioctl(ifp, cmd, data); +#endif /* INET6 */ + default: + error = EPROTOTYPE; + goto bad; + break; + } + break; + + default: + error = EINVAL; + break; + } + bad: + return error; +} +#endif /*NGIF > 0*/ diff --git a/bsd/net/if_gif.h b/bsd/net/if_gif.h new file mode 100644 index 000000000..3e5d4996b --- /dev/null +++ b/bsd/net/if_gif.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: if_gif.h,v 1.7 2000/02/22 14:01:46 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * if_gif.h + */ + +#ifndef _NET_IF_GIF_H_ +#define _NET_IF_GIF_H_ + +#include +/* xxx sigh, why route have struct route instead of pointer? */ + +struct encaptab; + +struct gif_softc { + struct ifnet gif_if; /* common area */ + struct sockaddr *gif_psrc; /* Physical src addr */ + struct sockaddr *gif_pdst; /* Physical dst addr */ + union { + struct route gifscr_ro; /* xxx */ +#if INET6 + struct route_in6 gifscr_ro6; /* xxx */ +#endif + } gifsc_gifscr; + int gif_flags; + const struct encaptab *encap_cookie; + short gif_oflags; /* copy of ifp->if_flags */ +}; + +#define gif_ro gifsc_gifscr.gifscr_ro +#if INET6 +#define gif_ro6 gifsc_gifscr.gifscr_ro6 +#endif + +#define GIFF_INUSE 0x1 /* gif is in use */ + +#define GIF_MTU (1280) /* Default MTU */ +#define GIF_MTU_MIN (1280) /* Minimum MTU */ +#define GIF_MTU_MAX (8192) /* Maximum MTU */ + +#define GIF_TTL 30 +#define GIF_HLIM 30 + +extern int ngif; +extern struct gif_softc *gif; + +/* Prototypes */ +void gif_input __P((struct mbuf *, int, struct ifnet *)); +int gif_output __P((struct ifnet *, struct mbuf *, + struct sockaddr *, struct rtentry *)); +int gif_ioctl __P((struct ifnet *, u_long, caddr_t)); + +#endif /* _NET_IF_GIF_H_ */ diff --git a/bsd/net/if_llc.h b/bsd/net/if_llc.h new file mode 100644 index 000000000..2d8fe4f03 --- /dev/null +++ b/bsd/net/if_llc.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_llc.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NET_IF_LLC_H_ +#define _NET_IF_LLC_H_ + +/* + * IEEE 802.2 Link Level Control headers, for use in conjunction with + * 802.{3,4,5} media access control methods. + * + * Headers here do not use bit fields due to shortcomings in many + * compilers. + */ + +struct llc { + u_char llc_dsap; + u_char llc_ssap; + union { + struct { + u_char control; + u_char format_id; + u_char class; + u_char window_x2; + } type_u; + struct { + u_char num_snd_x2; + u_char num_rcv_x2; + } type_i; + struct { + u_char control; + u_char num_rcv_x2; + } type_s; + struct { + u_char control; + struct frmrinfo { + u_char rej_pdu_0; + u_char rej_pdu_1; + u_char frmr_control; + u_char frmr_control_ext; + u_char frmr_cause; + } frmrinfo; + } type_frmr; + struct { + u_char control; + u_char org_code[3]; + u_short ether_type; + } type_snap; + struct { + u_char control; + u_char control_ext; + } type_raw; + } llc_un; +}; +#define llc_control llc_un.type_u.control +#define llc_control_ext llc_un.type_raw.control_ext +#define llc_fid llc_un.type_u.format_id +#define llc_class llc_un.type_u.class +#define llc_window llc_un.type_u.window_x2 +#define llc_frmrinfo llc_un.type_frmr.frmrinfo +#define llc_frmr_pdu0 llc_un.type_frmr.frmrinfo.rej_pdu0 +#define llc_frmr_pdu1 llc_un.type_frmr.frmrinfo.rej_pdu1 +#define llc_frmr_control llc_un.type_frmr.frmrinfo.frmr_control +#define llc_frmr_control_ext llc_un.type_frmr.frmrinfo.frmr_control_ext +#define llc_frmr_cause llc_un.type_frmr.frmrinfo.frmr_control_ext + +/* + * Don't use sizeof(struct llc_un) for LLC header sizes + */ +#define LLC_ISFRAMELEN 4 +#define LLC_UFRAMELEN 3 +#define LLC_FRMRLEN 7 + +/* + * Unnumbered LLC format commands + */ +#define LLC_UI 0x3 +#define LLC_UI_P 0x13 +#define LLC_DISC 0x43 +#define LLC_DISC_P 0x53 +#define LLC_UA 0x63 +#define LLC_UA_P 0x73 +#define LLC_TEST 0xe3 +#define LLC_TEST_P 0xf3 +#define LLC_FRMR 0x87 +#define LLC_FRMR_P 0x97 +#define LLC_DM 0x0f +#define LLC_DM_P 0x1f +#define LLC_XID 0xaf +#define LLC_XID_P 0xbf +#define LLC_SABME 0x6f +#define LLC_SABME_P 0x7f + +/* + * Supervisory LLC commands + */ +#define LLC_RR 0x01 +#define LLC_RNR 0x05 +#define LLC_REJ 0x09 + +/* + * Info format - dummy only + */ +#define LLC_INFO 0x00 + +/* + * ISO PDTR 10178 contains among others + */ +#define LLC_X25_LSAP 0x7e +#define LLC_SNAP_LSAP 0xaa +#define LLC_ISO_LSAP 0xfe + +#endif diff --git a/bsd/net/if_loop.c b/bsd/net/if_loop.c new file mode 100644 index 000000000..422303b87 --- /dev/null +++ b/bsd/net/if_loop.c @@ -0,0 +1,614 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_loop.c 8.1 (Berkeley) 6/10/93 + */ + +/* + * Loopback interface driver for protocol testing and timing. + */ +#include "loop.h" +#if NLOOP > 0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if INET +#include +#include +#endif + +#if IPX +#include +#include +#endif + +#if INET6 +#ifndef INET +#include +#endif +#include +#include +#endif + +#if NS +#include +#include +#endif + +#if ISO +#include +#include +#endif + +#include + +#if NETAT +extern struct ifqueue atalkintrq; +#endif + +#include "bpfilter.h" +#if NBPFILTER > 0 +#include +#endif + +#define NLOOP_ATTACHMENTS (NLOOP * 12) + +struct lo_statics_str { + int bpf_mode; + int (*bpf_callback)(struct ifnet *, struct mbuf *); +}; + +static struct if_proto *lo_array[NLOOP_ATTACHMENTS]; +static struct lo_statics_str lo_statics[NLOOP]; +static lo_count = 0; + + +#ifdef TINY_LOMTU +#define LOMTU (1024+512) +#else +#define LOMTU 16384 +#endif + +struct ifnet loif[NLOOP]; + +void lo_reg_if_mods(); + + + + +int lo_demux(ifp, m, frame_header, proto) + struct ifnet *ifp; + struct mbuf *m; + char *frame_header; + struct if_proto **proto; +{ + int i; + struct if_proto **proto_ptr; + + proto_ptr = mtod(m, struct if_proto **); + *proto = *proto_ptr; + m_adj(m, sizeof(u_long)); + return 0; +} + + +int lo_framer(ifp, m, dest, dest_linkaddr, frame_type) + struct ifnet *ifp; + struct mbuf **m; + struct sockaddr *dest; + char *dest_linkaddr; + char *frame_type; + +{ + char *to_ptr; + + M_PREPEND(*m, (4 * sizeof(u_long)), M_WAITOK); + to_ptr = mtod(*m, char *); + bcopy(dest_linkaddr, to_ptr, (4 * sizeof(u_long))); + return 0; +} + +static +int lo_add_if(struct ifnet *ifp) +{ + ifp->if_demux = lo_demux; + ifp->if_framer = lo_framer; + ifp->if_event = 0; + return 0; +} + +static +int lo_del_if(struct ifnet *ifp) +{ + return 0; +} + + + + +static +int lo_add_proto(struct ddesc_head_str *desc_head, struct if_proto *proto, u_long dl_tag) +{ + int i; + + for (i=0; i < lo_count; i++) + if (lo_array[i] == 0) { + lo_array[lo_count] = proto; + return 0; + } + + if ((i == lo_count) && (lo_count == NLOOP_ATTACHMENTS)) + panic("lo_add_proto -- Too many attachments\n"); + + lo_array[lo_count++] = proto; + return 0; +} + + +static +int lo_del_proto(struct if_proto *proto, u_long dl_tag) +{ + int i; + + for (i=0; i < lo_count; i++) + if (lo_array[i] == proto) { + lo_array[i] = 0; + return 0; + } + + return ENOENT; +} + +static int +lo_output(ifp, m) + struct ifnet *ifp; + register struct mbuf *m; +{ u_int *prepend_ptr; + u_int af; + u_long saved_header[3]; + + if ((m->m_flags & M_PKTHDR) == 0) + panic("lo_output: no HDR"); + + /* + * Don't overwrite the rcvif field if it is in use. + * This is used to match multicast packets, sent looping + * back, with the appropriate group record on input. + */ + if (m->m_pkthdr.rcvif == NULL) + m->m_pkthdr.rcvif = ifp; + prepend_ptr = mtod(m, u_int *); + af = *prepend_ptr; + m_adj(m, sizeof(u_int)); + + +#if NBPFILTER > 0 + if (lo_statics[ifp->if_unit].bpf_mode != BPF_TAP_DISABLE) { + struct mbuf m0, *n; + + bcopy(mtod(m, caddr_t), &saved_header[0], (3 * sizeof(u_long))); + m_adj(m, (3 * sizeof(u_long))); + + n = m; + if (ifp->if_bpf->bif_dlt == DLT_NULL) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a dummy header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer a to it). + */ + m0.m_next = m; + m0.m_len = 4; + m0.m_data = (char *)⁡ + n = &m0; + } + + (*lo_statics[ifp->if_unit].bpf_callback)(ifp, n); + + M_PREPEND(m, (3 * sizeof(u_long)), M_WAITOK); + bcopy(&saved_header[0], mtod(m, caddr_t), (3 * sizeof(u_long))); + + } +#endif + + ifp->if_ibytes += m->m_pkthdr.len; + ifp->if_obytes += m->m_pkthdr.len; + + ifp->if_opackets++; + ifp->if_ipackets++; + + /* WARNING + * This won't work for loopbacked multicast + */ + m->m_pkthdr.header = mtod(m, char *); + m->m_pkthdr.aux = ifp; /* HACKERY */ + return dlil_input(ifp, m, m); +} + + +/* + * This is a common pre-output route used by INET, AT, etc. This could + * (should?) be split into separate pre-output routines for each protocol. + */ + +static int +lo_pre_output(ifp, m, dst, route, frame_type, dst_addr, dl_tag) + struct ifnet *ifp; + register struct mbuf **m; + struct sockaddr *dst; + void *route; + char *frame_type; + char *dst_addr; + u_long dl_tag; + +{ + int s, isr; + register struct ifqueue *ifq = 0; + u_long *prepend_ptr; + register struct rtentry *rt = (struct rtentry *) route; + + prepend_ptr = (u_long *) dst_addr; + if (((*m)->m_flags & M_PKTHDR) == 0) + panic("looutput no HDR"); + + if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { + if (rt->rt_flags & RTF_BLACKHOLE) { + m_freem(*m); + return EJUSTRETURN; + } + else + return ((rt->rt_flags & RTF_HOST) ? EHOSTUNREACH : ENETUNREACH); + } + + switch (dst->sa_family) { +#if INET + case AF_INET: + ifq = &ipintrq; + isr = NETISR_IP; + break; +#endif +#if INET6 + case AF_INET6: + ifq = &ip6intrq; + isr = NETISR_IPV6; + break; +#endif +#if IPX + case AF_IPX: + ifq = &ipxintrq; + isr = NETISR_IPX; + break; +#endif +#if NS + case AF_NS: + ifq = &nsintrq; + isr = NETISR_NS; + break; +#endif +#if ISO + case AF_ISO: + ifq = &clnlintrq; + isr = NETISR_ISO; + break; +#endif +#if NETAT + case AF_APPLETALK: + ifq = &atalkintrq; + isr = NETISR_APPLETALK; + break; +#endif NETAT + default: + return (EAFNOSUPPORT); + } + + *prepend_ptr++ = dst->sa_family; /* For lo_output(BPF) */ + *prepend_ptr++ = dlttoproto(dl_tag); /* For lo_demux */ + *prepend_ptr++ = (u_long) ifq; /* For lo_input */ + *prepend_ptr = isr; /* For lo_input */ + + return 0; +} + + + + +/* + * lo_input - This should work for all attached protocols that use the + * ifq/schednetisr input mechanism. + */ + + +int +lo_input(m, fh, ifp, dl_tag, sync_ok) + register struct mbuf *m; + char *fh; + struct ifnet *ifp; + u_long dl_tag; + int sync_ok; + +{ + u_long *prepend_ptr; + int s, isr; + register struct ifqueue *ifq = 0; + + prepend_ptr = mtod(m, u_long *); + ifq = (struct ifqueue *) *prepend_ptr++; + isr = *prepend_ptr; + m_adj(m, (2 * sizeof(u_long))); + + s = splimp(); + if (IF_QFULL(ifq)) { + IF_DROP(ifq); + m_freem(m); + splx(s); + return (EJUSTRETURN); + } + + IF_ENQUEUE(ifq, m); + schednetisr(isr); + splx(s); + return (0); +} + + + + +/* ARGSUSED */ +static void +lortrequest(cmd, rt, sa) + int cmd; + struct rtentry *rt; + struct sockaddr *sa; +{ + if (rt) { + rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; /* for ISO */ + /* + * For optimal performance, the send and receive buffers + * should be at least twice the MTU plus a little more for + * overhead. + */ + rt->rt_rmx.rmx_recvpipe = + rt->rt_rmx.rmx_sendpipe = 3 * LOMTU; + } +} + +/* + * Process an ioctl request. + */ +/* ARGSUSED */ +static int +loioctl(dl_tag, ifp, cmd, data) + u_long dl_tag; + register struct ifnet *ifp; + u_long cmd; + void *data; +{ + register struct ifaddr *ifa; + register struct ifreq *ifr = (struct ifreq *)data; + register int error = 0; + + switch (cmd) { + + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP | IFF_RUNNING; + ifa = (struct ifaddr *)data; + ifa->ifa_rtrequest = lortrequest; + /* + * Everything else is done at a higher level. + */ + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + if (ifr == 0) { + error = EAFNOSUPPORT; /* XXX */ + break; + } + switch (ifr->ifr_addr.sa_family) { + +#if INET + case AF_INET: + break; +#endif +#if INET6 + case AF_INET6: + break; +#endif + + default: + error = EAFNOSUPPORT; + break; + } + break; + + case SIOCSIFMTU: + ifp->if_mtu = ifr->ifr_mtu; + break; + + case SIOCSIFFLAGS: + break; + + default: + error = EOPNOTSUPP; + } + return (error); +} +#endif /* NLOOP > 0 */ + + +int lo_shutdown() +{ + return 0; +} + + +void lo_reg_if_mods() +{ + struct dlil_ifmod_reg_str lo_ifmod; + + lo_ifmod.add_if = lo_add_if; + lo_ifmod.del_if = lo_del_if; + lo_ifmod.add_proto = lo_add_proto; + lo_ifmod.del_proto = lo_del_proto; + lo_ifmod.ifmod_ioctl = 0; + lo_ifmod.shutdown = lo_shutdown; + + if (dlil_reg_if_modules(APPLE_IF_FAM_LOOPBACK, &lo_ifmod)) + panic("Couldn't register lo modules\n"); +} + + +u_long lo_attach_inet(struct ifnet *ifp) +{ + struct dlil_proto_reg_str reg; + struct dlil_demux_desc desc; + u_long dl_tag=0; + short native=0; + int stat; + int i; + + for (i=0; i < lo_count; i++) { + if ((lo_array[i]) && (lo_array[i]->ifp == ifp)) { + if (lo_array[i]->protocol_family == PF_INET) + return lo_array[i]->dl_tag; + } + } + + TAILQ_INIT(®.demux_desc_head); + desc.type = DLIL_DESC_RAW; + desc.variants.bitmask.proto_id_length = 0; + desc.variants.bitmask.proto_id = 0; + desc.variants.bitmask.proto_id_mask = 0; + desc.native_type = (char *) &native; + TAILQ_INSERT_TAIL(®.demux_desc_head, &desc, next); + reg.interface_family = ifp->if_family; + reg.unit_number = ifp->if_unit; + reg.input = lo_input; + reg.pre_output = lo_pre_output; + reg.event = 0; + reg.offer = 0; + reg.ioctl = loioctl; + reg.default_proto = 0; + reg.protocol_family = PF_INET; + + stat = dlil_attach_protocol(®, &dl_tag); + if (stat) { + panic("lo_attach_inet can't attach interface\n"); + } + + return dl_tag; +} + + +int lo_set_bpf_tap(struct ifnet *ifp, int mode, int (*bpf_callback)(struct ifnet *, struct mbuf *)) +{ + + /* + * NEED MUTEX HERE XXX + */ + if (mode == BPF_TAP_DISABLE) { + lo_statics[ifp->if_unit].bpf_mode = mode; + lo_statics[ifp->if_unit].bpf_callback = bpf_callback; + } + else { + lo_statics[ifp->if_unit].bpf_callback = bpf_callback; + lo_statics[ifp->if_unit].bpf_mode = mode; + } + + return 0; +} + + +/* ARGSUSED */ +void +loopattach(dummy) + void *dummy; +{ + register struct ifnet *ifp; + register int i = 0; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + lo_reg_if_mods(); + + for (ifp = loif; i < NLOOP; ifp++) { + lo_statics[i].bpf_callback = 0; + lo_statics[i].bpf_mode = BPF_TAP_DISABLE; + ifp->if_name = "lo"; + ifp->if_family = APPLE_IF_FAM_LOOPBACK; + ifp->if_unit = i++; + ifp->if_mtu = LOMTU; + ifp->if_flags = IFF_LOOPBACK | IFF_MULTICAST; + ifp->if_ioctl = 0; + ifp->if_set_bpf_tap = lo_set_bpf_tap; + ifp->if_output = lo_output; + ifp->if_type = IFT_LOOP; + dlil_if_attach(ifp); +#if NBPFILTER > 0 + bpfattach(ifp, DLT_NULL, sizeof(u_int)); +#endif + } + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); +} diff --git a/bsd/net/if_media.c b/bsd/net/if_media.c new file mode 100644 index 000000000..afbf3ef82 --- /dev/null +++ b/bsd/net/if_media.c @@ -0,0 +1,504 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_media.c,v 1.1 1997/03/17 02:55:15 thorpej Exp $ */ + +/* + * Copyright (c) 1997 + * Jonathan Stone and Jason R. Thorpe. All rights reserved. + * + * This software is derived from information provided by Matt Thomas. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Jonathan Stone + * and Jason R. Thorpe for the NetBSD Project. + * 4. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * BSD/OS-compatible network interface media selection. + * + * Where it is safe to do so, this code strays slightly from the BSD/OS + * design. Software which uses the API (device drivers, basically) + * shouldn't notice any difference. + * + * Many thanks to Matt Thomas for providing the information necessary + * to implement this interface. + */ + +#include +#include +#include +#include +#include + +#include +#include + +/* + * Compile-time options: + * IFMEDIA_DEBUG: + * turn on implementation-level debug printfs. + * Useful for debugging newly-ported drivers. + */ + +static struct ifmedia_entry *ifmedia_match __P((struct ifmedia *ifm, + int flags, int mask)); + +#ifdef IFMEDIA_DEBUG +int ifmedia_debug = 0; +static void ifmedia_printword __P((int)); +#endif + +/* + * Initialize if_media struct for a specific interface instance. + */ +void +ifmedia_init(ifm, dontcare_mask, change_callback, status_callback) + struct ifmedia *ifm; + int dontcare_mask; + ifm_change_cb_t change_callback; + ifm_stat_cb_t status_callback; +{ + + LIST_INIT(&ifm->ifm_list); + ifm->ifm_cur = NULL; + ifm->ifm_media = 0; + ifm->ifm_mask = dontcare_mask; /* IF don't-care bits */ + ifm->ifm_change = change_callback; + ifm->ifm_status = status_callback; +} + +/* + * Add a media configuration to the list of supported media + * for a specific interface instance. + */ +void +ifmedia_add(ifm, mword, data, aux) + struct ifmedia *ifm; + int mword; + int data; + void *aux; +{ + register struct ifmedia_entry *entry; + +#ifdef IFMEDIA_DEBUG + if (ifmedia_debug) { + if (ifm == NULL) { + printf("ifmedia_add: null ifm\n"); + return; + } + printf("Adding entry for "); + ifmedia_printword(mword); + } +#endif + + entry = _MALLOC(sizeof(*entry), M_IFADDR, M_NOWAIT); + if (entry == NULL) + panic("ifmedia_add: can't malloc entry"); + + entry->ifm_media = mword; + entry->ifm_data = data; + entry->ifm_aux = aux; + + LIST_INSERT_HEAD(&ifm->ifm_list, entry, ifm_list); +} + +/* + * Add an array of media configurations to the list of + * supported media for a specific interface instance. + */ +void +ifmedia_list_add(ifm, lp, count) + struct ifmedia *ifm; + struct ifmedia_entry *lp; + int count; +{ + int i; + + for (i = 0; i < count; i++) + ifmedia_add(ifm, lp[i].ifm_media, lp[i].ifm_data, + lp[i].ifm_aux); +} + +/* + * Set the default active media. + * + * Called by device-specific code which is assumed to have already + * selected the default media in hardware. We do _not_ call the + * media-change callback. + */ +void +ifmedia_set(ifm, target) + struct ifmedia *ifm; + int target; + +{ + struct ifmedia_entry *match; + + match = ifmedia_match(ifm, target, ifm->ifm_mask); + + if (match == NULL) { + printf("ifmedia_set: no match for 0x%x/0x%x\n", + target, ~ifm->ifm_mask); + panic("ifmedia_set"); + } + ifm->ifm_cur = match; + +#ifdef IFMEDIA_DEBUG + if (ifmedia_debug) { + printf("ifmedia_set: target "); + ifmedia_printword(target); + printf("ifmedia_set: setting to "); + ifmedia_printword(ifm->ifm_cur->ifm_media); + } +#endif +} + +/* + * Device-independent media ioctl support function. + */ +int +ifmedia_ioctl(ifp, ifr, ifm, cmd) + struct ifnet *ifp; + struct ifreq *ifr; + struct ifmedia *ifm; + u_long cmd; +{ + struct ifmedia_entry *match; + struct ifmediareq *ifmr = (struct ifmediareq *) ifr; + int error = 0, sticky; + + if (ifp == NULL || ifr == NULL || ifm == NULL) + return(EINVAL); + + switch (cmd) { + + /* + * Set the current media. + */ + case SIOCSIFMEDIA: + { + struct ifmedia_entry *oldentry; + int oldmedia; + int newmedia = ifr->ifr_media; + + match = ifmedia_match(ifm, newmedia, ifm->ifm_mask); + if (match == NULL) { +#ifdef IFMEDIA_DEBUG + if (ifmedia_debug) { + printf( + "ifmedia_ioctl: no media found for 0x%x\n", + newmedia); + } +#endif + return (ENXIO); + } + + /* + * If no change, we're done. + * XXX Automedia may invole software intervention. + * Keep going in case the the connected media changed. + * Similarly, if best match changed (kernel debugger?). + */ + if ((IFM_SUBTYPE(newmedia) != IFM_AUTO) && + (newmedia == ifm->ifm_media) && + (match == ifm->ifm_cur)) + return 0; + + /* + * We found a match, now make the driver switch to it. + * Make sure to preserve our old media type in case the + * driver can't switch. + */ +#ifdef IFMEDIA_DEBUG + if (ifmedia_debug) { + printf("ifmedia_ioctl: switching %s to ", + ifp->if_xname); + ifmedia_printword(match->ifm_media); + } +#endif + oldentry = ifm->ifm_cur; + oldmedia = ifm->ifm_media; + ifm->ifm_cur = match; + ifm->ifm_media = newmedia; + error = (*ifm->ifm_change)(ifp); + if (error) { + ifm->ifm_cur = oldentry; + ifm->ifm_media = oldmedia; + } + break; + } + + /* + * Get list of available media and current media on interface. + */ + case SIOCGIFMEDIA: + { + struct ifmedia_entry *ep; + int *kptr, count; + + kptr = NULL; /* XXX gcc */ + + ifmr->ifm_active = ifmr->ifm_current = ifm->ifm_cur ? + ifm->ifm_cur->ifm_media : IFM_NONE; + ifmr->ifm_mask = ifm->ifm_mask; + ifmr->ifm_status = 0; + (*ifm->ifm_status)(ifp, ifmr); + + count = 0; + ep = ifm->ifm_list.lh_first; + + if (ifmr->ifm_count != 0) { + kptr = (int *) _MALLOC(ifmr->ifm_count * sizeof(int), + M_TEMP, M_WAITOK); + + /* + * Get the media words from the interface's list. + */ + for (; ep != NULL && count < ifmr->ifm_count; + ep = ep->ifm_list.le_next, count++) + kptr[count] = ep->ifm_media; + + if (ep != NULL) + error = E2BIG; /* oops! */ + } + + /* + * If there are more interfaces on the list, count + * them. This allows the caller to set ifmr->ifm_count + * to 0 on the first call to know how much space to + * callocate. + */ + for (; ep != NULL; ep = ep->ifm_list.le_next) + count++; + + /* + * We do the copyout on E2BIG, because that's + * just our way of telling userland that there + * are more. This is the behavior I've observed + * under BSD/OS 3.0 + */ + sticky = error; + if ((error == 0 || error == E2BIG) && ifmr->ifm_count != 0) { + error = copyout((caddr_t)kptr, + (caddr_t)ifmr->ifm_ulist, + ifmr->ifm_count * sizeof(int)); + } + + if (error == 0) + error = sticky; + + if (ifmr->ifm_count != 0) + FREE(kptr, M_TEMP); + + ifmr->ifm_count = count; + break; + } + + default: + return (EINVAL); + } + + return (error); +} + +/* + * Find media entry matching a given ifm word. + * + */ +static struct ifmedia_entry * +ifmedia_match(ifm, target, mask) + struct ifmedia *ifm; + int target; + int mask; +{ + struct ifmedia_entry *match, *next; + + match = NULL; + mask = ~mask; + + for (next = ifm->ifm_list.lh_first; next != NULL; + next = next->ifm_list.le_next) { + if ((next->ifm_media & mask) == (target & mask)) { +#if defined(IFMEDIA_DEBUG) || defined(DIAGNOSTIC) + if (match) { + printf("ifmedia_match: multiple match for " + "0x%x/0x%x\n", target, mask); + } +#endif + match = next; + } + } + + return match; +} + +#ifdef IFMEDIA_DEBUG +struct ifmedia_description ifm_type_descriptions[] = + IFM_TYPE_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_ethernet_descriptions[] = + IFM_SUBTYPE_ETHERNET_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_ethernet_option_descriptions[] = + IFM_SUBTYPE_ETHERNET_OPTION_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_tokenring_descriptions[] = + IFM_SUBTYPE_TOKENRING_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_tokenring_option_descriptions[] = + IFM_SUBTYPE_TOKENRING_OPTION_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_fddi_descriptions[] = + IFM_SUBTYPE_FDDI_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_fddi_option_descriptions[] = + IFM_SUBTYPE_FDDI_OPTION_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_80211_descriptions[] = + IFM_SUBTYPE_IEEE80211_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_80211_option_descriptions[] = + IFM_SUBTYPE_IEEE80211_OPTION_DESCRIPTIONS; + +struct ifmedia_description ifm_subtype_shared_descriptions[] = + IFM_SUBTYPE_SHARED_DESCRIPTIONS; + +struct ifmedia_description ifm_shared_option_descriptions[] = + IFM_SHARED_OPTION_DESCRIPTIONS; + +struct ifmedia_type_to_subtype { + struct ifmedia_description *subtypes; + struct ifmedia_description *options; +}; + +/* must be in the same order as IFM_TYPE_DESCRIPTIONS */ +struct ifmedia_type_to_subtype ifmedia_types_to_subtypes[] = { + { + &ifm_subtype_ethernet_descriptions[0], + &ifm_subtype_ethernet_option_descriptions[0] + }, + { + &ifm_subtype_tokenring_descriptions[0], + &ifm_subtype_tokenring_option_descriptions[0] + }, + { + &ifm_subtype_fddi_descriptions[0], + &ifm_subtype_fddi_option_descriptions[0] + }, + { + &ifm_subtype_80211_descriptions[0], + &ifm_subtype_80211_option_descriptions[0] + }, +}; + +/* + * print a media word. + */ +static void +ifmedia_printword(ifmw) + int ifmw; +{ + struct ifmedia_description *desc; + struct ifmedia_type_to_subtype *ttos; + int seen_option = 0; + + /* Find the top-level interface type. */ + for (desc = ifm_type_descriptions, ttos = ifmedia_types_to_subtypes; + desc->ifmt_string != NULL; desc++, ttos++) + if (IFM_TYPE(ifmw) == desc->ifmt_word) + break; + if (desc->ifmt_string == NULL) { + printf("\n"); + return; + } + printf(desc->ifmt_string); + + /* + * Check for the shared subtype descriptions first, then the + * type-specific ones. + */ + for (desc = ifm_subtype_shared_descriptions; + desc->ifmt_string != NULL; desc++) + if (IFM_SUBTYPE(ifmw) == desc->ifmt_word) + goto got_subtype; + + for (desc = ttos->subtypes; desc->ifmt_string != NULL; desc++) + if (IFM_SUBTYPE(ifmw) == desc->ifmt_word) + break; + if (desc->ifmt_string == NULL) { + printf(" \n"); + return; + } + + got_subtype: + printf(" %s", desc->ifmt_string); + + /* + * Look for shared options. + */ + for (desc = ifm_shared_option_descriptions; + desc->ifmt_string != NULL; desc++) { + if (ifmw & desc->ifmt_word) { + if (seen_option == 0) + printf(" <"); + printf("%s%s", seen_option++ ? "," : "", + desc->ifmt_string); + } + } + + /* + * Look for subtype-specific options. + */ + for (desc = ttos->options; desc->ifmt_string != NULL; desc++) { + if (ifmw & desc->ifmt_word) { + if (seen_option == 0) + printf(" <"); + printf("%s%s", seen_option++ ? "," : "", + desc->ifmt_string); + } + } + printf("%s\n", seen_option ? ">" : ""); +} +#endif /* IFMEDIA_DEBUG */ diff --git a/bsd/net/if_media.h b/bsd/net/if_media.h new file mode 100644 index 000000000..827081bfd --- /dev/null +++ b/bsd/net/if_media.h @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_media.h,v 1.3 1997/03/26 01:19:27 thorpej Exp $ */ + +/* + * Copyright (c) 1997 + * Jonathan Stone and Jason R. Thorpe. All rights reserved. + * + * This software is derived from information provided by Matt Thomas. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Jonathan Stone + * and Jason R. Thorpe for the NetBSD Project. + * 4. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NET_IF_MEDIA_H_ +#define _NET_IF_MEDIA_H_ + +/* + * Prototypes and definitions for BSD/OS-compatible network interface + * media selection. + * + * Where it is safe to do so, this code strays slightly from the BSD/OS + * design. Software which uses the API (device drivers, basically) + * shouldn't notice any difference. + * + * Many thanks to Matt Thomas for providing the information necessary + * to implement this interface. + */ + +#ifdef KERNEL + +#include + +/* + * Driver callbacks for media status and change requests. + */ +typedef int (*ifm_change_cb_t) __P((struct ifnet *ifp)); +typedef void (*ifm_stat_cb_t) __P((struct ifnet *ifp, struct ifmediareq *req)); + +/* + * In-kernel representation of a single supported media type. + */ +struct ifmedia_entry { + LIST_ENTRY(ifmedia_entry) ifm_list; + int ifm_media; /* description of this media attachment */ + int ifm_data; /* for driver-specific use */ + void *ifm_aux; /* for driver-specific use */ +}; + +/* + * One of these goes into a network interface's softc structure. + * It is used to keep general media state. + */ +struct ifmedia { + int ifm_mask; /* mask of changes we don't care about */ + int ifm_media; /* current user-set media word */ + struct ifmedia_entry *ifm_cur; /* currently selected media */ + LIST_HEAD(, ifmedia_entry) ifm_list; /* list of all supported media */ + ifm_change_cb_t ifm_change; /* media change driver callback */ + ifm_stat_cb_t ifm_status; /* media status driver callback */ +}; + +/* Initialize an interface's struct if_media field. */ +void ifmedia_init __P((struct ifmedia *ifm, int dontcare_mask, + ifm_change_cb_t change_callback, ifm_stat_cb_t status_callback)); + +/* Add one supported medium to a struct ifmedia. */ +void ifmedia_add __P((struct ifmedia *ifm, int mword, int data, void *aux)); + +/* Add an array (of ifmedia_entry) media to a struct ifmedia. */ +void ifmedia_list_add(struct ifmedia *mp, struct ifmedia_entry *lp, + int count); + +/* Set default media type on initialization. */ +void ifmedia_set __P((struct ifmedia *ifm, int mword)); + +/* Common ioctl function for getting/setting media, called by driver. */ +int ifmedia_ioctl __P((struct ifnet *ifp, struct ifreq *ifr, + struct ifmedia *ifm, u_long cmd)); + +#endif /*KERNEL */ + +/* + * if_media Options word: + * Bits Use + * ---- ------- + * 0-4 Media subtype + * 5-7 Media type + * 8-15 Type specific options + * 16-19 RFU + * 20-27 Shared (global) options + * 28-31 Instance + */ + +/* + * Ethernet + */ +#define IFM_ETHER 0x00000020 +#define IFM_10_T 3 /* 10BaseT - RJ45 */ +#define IFM_10_2 4 /* 10Base2 - Thinnet */ +#define IFM_10_5 5 /* 10Base5 - AUI */ +#define IFM_100_TX 6 /* 100BaseTX - RJ45 */ +#define IFM_100_FX 7 /* 100BaseFX - Fiber */ +#define IFM_100_T4 8 /* 100BaseT4 - 4 pair cat 3 */ +#define IFM_100_VG 9 /* 100VG-AnyLAN */ +#define IFM_100_T2 10 /* 100BaseT2 */ +#define IFM_1000_SX 11 /* 1000BaseSX - multi-mode fiber */ +#define IFM_10_STP 12 /* 10BaseT over shielded TP */ +#define IFM_10_FL 13 /* 10BaseFL - Fiber */ +#define IFM_1000_LX 14 /* 1000baseLX - single-mode fiber */ +#define IFM_1000_CX 15 /* 1000baseCX - 150ohm STP */ +#define IFM_1000_TX 16 /* 1000baseTX - 4 pair cat 5 */ +#define IFM_HPNA_1 17 /* HomePNA 1.0 (1Mb/s) */ + +/* + * Token ring + */ +#define IFM_TOKEN 0x00000040 +#define IFM_TOK_STP4 3 /* Shielded twisted pair 4m - DB9 */ +#define IFM_TOK_STP16 4 /* Shielded twisted pair 16m - DB9 */ +#define IFM_TOK_UTP4 5 /* Unshielded twisted pair 4m - RJ45 */ +#define IFM_TOK_UTP16 6 /* Unshielded twisted pair 16m - RJ45 */ +#define IFM_TOK_ETR 0x00000200 /* Early token release */ +#define IFM_TOK_SRCRT 0x00000400 /* Enable source routing features */ +#define IFM_TOK_ALLR 0x00000800 /* All routes / Single route bcast */ + +/* + * FDDI + */ +#define IFM_FDDI 0x00000060 +#define IFM_FDDI_SMF 3 /* Single-mode fiber */ +#define IFM_FDDI_MMF 4 /* Multi-mode fiber */ +#define IFM_FDDI_UTP 5 /* CDDI / UTP */ +#define IFM_FDDI_DA 0x00000100 /* Dual attach / single attach */ + +/* + * IEEE 802.11 Wireless + */ +#define IFM_IEEE80211 0x00000080 +#define IFM_IEEE80211_FH1 3 /* Frequency Hopping 1Mbps */ +#define IFM_IEEE80211_FH2 4 /* Frequency Hopping 2Mbps */ +#define IFM_IEEE80211_DS2 5 /* Direct Sequence 2Mbps */ +#define IFM_IEEE80211_DS5 6 /* Direct Sequence 5Mbps*/ +#define IFM_IEEE80211_DS11 7 /* Direct Sequence 11Mbps*/ +#define IFM_IEEE80211_DS1 8 /* Direct Sequence 1Mbps */ +#define IFM_IEEE80211_ADHOC 0x00000100 /* Operate in Adhoc mode */ + +/* + * Shared media sub-types + */ +#define IFM_AUTO 0 /* Autoselect best media */ +#define IFM_MANUAL 1 /* Jumper/dipswitch selects media */ +#define IFM_NONE 2 /* Deselect all media */ + +/* + * Shared options + */ +#define IFM_FDX 0x00100000 /* Force full duplex */ +#define IFM_HDX 0x00200000 /* Force half duplex */ +#define IFM_FLOW 0x00400000 /* enable hardware flow control */ +#define IFM_FLAG0 0x01000000 /* Driver defined flag */ +#define IFM_FLAG1 0x02000000 /* Driver defined flag */ +#define IFM_FLAG2 0x04000000 /* Driver defined flag */ +#define IFM_LOOP 0x08000000 /* Put hardware in loopback */ + +/* + * Masks + */ +#define IFM_NMASK 0x000000e0 /* Network type */ +#define IFM_TMASK 0x0000001f /* Media sub-type */ +#define IFM_IMASK 0xf0000000 /* Instance */ +#define IFM_ISHIFT 28 /* Instance shift */ +#define IFM_OMASK 0x0000ff00 /* Type specific options */ +#define IFM_GMASK 0x0ff00000 /* Global options */ + +/* + * Status bits + */ +#define IFM_AVALID 0x00000001 /* Active bit valid */ +#define IFM_ACTIVE 0x00000002 /* Interface attached to working net */ + +/* + * Macros to extract various bits of information from the media word. + */ +#define IFM_TYPE(x) ((x) & IFM_NMASK) +#define IFM_SUBTYPE(x) ((x) & IFM_TMASK) +#define IFM_INST(x) (((x) & IFM_IMASK) >> IFM_ISHIFT) + +/* + * NetBSD extension not defined in the BSDI API. This is used in various + * places to get the canonical description for a given type/subtype. + * + * NOTE: all but the top-level type descriptions must contain NO whitespace! + * Otherwise, parsing these in ifconfig(8) would be a nightmare. + */ +struct ifmedia_description { + int ifmt_word; /* word value; may be masked */ + const char *ifmt_string; /* description */ +}; + +#define IFM_TYPE_DESCRIPTIONS { \ + { IFM_ETHER, "Ethernet" }, \ + { IFM_TOKEN, "Token ring" }, \ + { IFM_FDDI, "FDDI" }, \ + { IFM_IEEE80211, "IEEE802.11" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_ETHERNET_DESCRIPTIONS { \ + { IFM_10_T, "10baseT/UTP" }, \ + { IFM_10_2, "10base2/BNC" }, \ + { IFM_10_5, "10base5/AUI" }, \ + { IFM_100_TX, "100baseTX" }, \ + { IFM_100_FX, "100baseFX" }, \ + { IFM_100_T4, "100baseT4" }, \ + { IFM_100_VG, "100baseVG" }, \ + { IFM_100_T2, "100baseT2" }, \ + { IFM_1000_SX, "1000baseSX" }, \ + { IFM_10_STP, "10baseSTP" }, \ + { IFM_10_FL, "10baseFL" }, \ + { IFM_1000_LX, "1000baseLX" }, \ + { IFM_1000_CX, "1000baseCX" }, \ + { IFM_1000_TX, "1000baseTX" }, \ + { IFM_HPNA_1, "HomePNA1" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_ETHERNET_ALIASES { \ + { IFM_10_T, "UTP" }, \ + { IFM_10_T, "10UTP" }, \ + { IFM_10_2, "BNC" }, \ + { IFM_10_2, "10BNC" }, \ + { IFM_10_5, "AUI" }, \ + { IFM_10_5, "10AUI" }, \ + { IFM_100_TX, "100TX" }, \ + { IFM_100_FX, "100FX" }, \ + { IFM_100_T4, "100T4" }, \ + { IFM_100_VG, "100VG" }, \ + { IFM_100_T2, "100T2" }, \ + { IFM_1000_SX, "1000SX" }, \ + { IFM_10_STP, "STP" }, \ + { IFM_10_STP, "10STP" }, \ + { IFM_10_FL, "FL" }, \ + { IFM_10_FL, "10FL" }, \ + { IFM_1000_LX, "1000LX" }, \ + { IFM_1000_CX, "1000CX" }, \ + { IFM_1000_TX, "1000TX" }, \ + { IFM_HPNA_1, "HPNA1" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_ETHERNET_OPTION_DESCRIPTIONS { \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_TOKENRING_DESCRIPTIONS { \ + { IFM_TOK_STP4, "DB9/4Mbit" }, \ + { IFM_TOK_STP16, "DB9/16Mbit" }, \ + { IFM_TOK_UTP4, "UTP/4Mbit" }, \ + { IFM_TOK_UTP16, "UTP/16Mbit" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_TOKENRING_ALIASES { \ + { IFM_TOK_STP4, "4STP" }, \ + { IFM_TOK_STP16, "16STP" }, \ + { IFM_TOK_UTP4, "4UTP" }, \ + { IFM_TOK_UTP16, "16UTP" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_TOKENRING_OPTION_DESCRIPTIONS { \ + { IFM_TOK_ETR, "EarlyTokenRelease" }, \ + { IFM_TOK_SRCRT, "SourceRouting" }, \ + { IFM_TOK_ALLR, "AllRoutes" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_FDDI_DESCRIPTIONS { \ + { IFM_FDDI_SMF, "Single-mode" }, \ + { IFM_FDDI_MMF, "Multi-mode" }, \ + { IFM_FDDI_UTP, "UTP" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_FDDI_ALIASES { \ + { IFM_FDDI_SMF, "SMF" }, \ + { IFM_FDDI_MMF, "MMF" }, \ + { IFM_FDDI_UTP, "CDDI" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_FDDI_OPTION_DESCRIPTIONS { \ + { IFM_FDDI_DA, "Dual-attach" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_IEEE80211_DESCRIPTIONS { \ + { IFM_IEEE80211_FH1, "FH1" }, \ + { IFM_IEEE80211_FH2, "FH2" }, \ + { IFM_IEEE80211_DS1, "DS1" }, \ + { IFM_IEEE80211_DS2, "DS2" }, \ + { IFM_IEEE80211_DS5, "DS5" }, \ + { IFM_IEEE80211_DS11, "DS11" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_IEEE80211_OPTION_DESCRIPTIONS { \ + { IFM_IEEE80211_ADHOC, "adhoc" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_SHARED_DESCRIPTIONS { \ + { IFM_AUTO, "autoselect" }, \ + { IFM_MANUAL, "manual" }, \ + { IFM_NONE, "none" }, \ + { 0, NULL }, \ +} + +#define IFM_SUBTYPE_SHARED_ALIASES { \ + { IFM_AUTO, "auto" }, \ + { 0, NULL }, \ +} + +#define IFM_SHARED_OPTION_DESCRIPTIONS { \ + { IFM_FDX, "full-duplex" }, \ + { IFM_HDX, "half-duplex" }, \ + { IFM_FLOW, "flow-control" }, \ + { IFM_FLAG0, "flag0" }, \ + { IFM_FLAG1, "flag1" }, \ + { IFM_FLAG2, "flag2" }, \ + { IFM_LOOP, "hw-loopback" }, \ + { 0, NULL }, \ +} + +#endif /* _NET_IF_MEDIA_H_ */ diff --git a/bsd/net/if_mib.c b/bsd/net/if_mib.c new file mode 100644 index 000000000..c57aa756b --- /dev/null +++ b/bsd/net/if_mib.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +#if NETMIBS + +/* + * A sysctl(3) MIB for generic interface information. This information + * is exported in the net.link.generic branch, which has the following + * structure: + * + * net.link.generic .system - system-wide control variables + * and statistics (node) + * .ifdata..general + * - what's in `struct ifdata' + * plus some other info + * .ifdata..linkspecific + * - a link-type-specific data + * structure (as might be used + * by an SNMP agent + * + * Perhaps someday we will make addresses accessible via this interface + * as well (then there will be four such...). The reason that the + * index comes before the last element in the name is because it + * seems more orthogonal that way, particularly with the possibility + * of other per-interface data living down here as well (e.g., integrated + * services stuff). + */ + +SYSCTL_DECL(_net_link_generic); +SYSCTL_NODE(_net_link_generic, IFMIB_SYSTEM, system, CTLFLAG_RW, 0, + "Variables global to all interfaces"); +SYSCTL_INT(_net_link_generic_system, IFMIB_IFCOUNT, ifcount, CTLFLAG_RD, + &if_index, 0, "Number of configured interfaces"); + +static int +sysctl_ifdata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ +{ + int *name = (int *)arg1; + int error, ifnlen; + u_int namelen = arg2; + struct ifnet *ifp; + char workbuf[64]; + struct ifmibdata ifmd; + + if (namelen != 2) + return EINVAL; + + if (name[0] <= 0 || name[0] > if_index) + return ENOENT; + + ifp = ifnet_addrs[name[0] - 1]->ifa_ifp; + + switch(name[1]) { + default: + return ENOENT; + + case IFDATA_GENERAL: + /* + ifnlen = snprintf(workbuf, sizeof(workbuf), + "%s%d", ifp->if_name, ifp->if_unit); + if(ifnlen + 1 > sizeof ifmd.ifmd_name) { + return ENAMETOOLONG; + } else { + strcpy(ifmd.ifmd_name, workbuf); + } + */ + +#define COPY(fld) ifmd.ifmd_##fld = ifp->if_##fld + COPY(pcount); + COPY(flags); + COPY(data); +#undef COPY + ifmd.ifmd_snd_len = ifp->if_snd.ifq_len; + ifmd.ifmd_snd_maxlen = ifp->if_snd.ifq_maxlen; + ifmd.ifmd_snd_drops = ifp->if_snd.ifq_drops; + + error = SYSCTL_OUT(req, &ifmd, sizeof ifmd); + if (error || !req->newptr) + return error; + + error = SYSCTL_IN(req, &ifmd, sizeof ifmd); + if (error) + return error; + +#define DONTCOPY(fld) ifmd.ifmd_data.ifi_##fld = ifp->if_data.ifi_##fld + DONTCOPY(type); + DONTCOPY(physical); + DONTCOPY(addrlen); + DONTCOPY(hdrlen); + DONTCOPY(mtu); + DONTCOPY(metric); + DONTCOPY(baudrate); +#undef DONTCOPY +#define COPY(fld) ifp->if_##fld = ifmd.ifmd_##fld + COPY(data); + ifp->if_snd.ifq_maxlen = ifmd.ifmd_snd_maxlen; + ifp->if_snd.ifq_drops = ifmd.ifmd_snd_drops; +#undef COPY + break; + + case IFDATA_LINKSPECIFIC: + error = SYSCTL_OUT(req, ifp->if_linkmib, ifp->if_linkmiblen); + if (error || !req->newptr) + return error; + + error = SYSCTL_IN(req, ifp->if_linkmib, ifp->if_linkmiblen); + if (error) + return error; + + } + return 0; +} + +SYSCTL_NODE(_net_link_generic, IFMIB_IFDATA, ifdata, CTLFLAG_RW, + sysctl_ifdata, "Interface table"); + +#endif diff --git a/bsd/net/if_mib.h b/bsd/net/if_mib.h new file mode 100644 index 000000000..6e51fb2ab --- /dev/null +++ b/bsd/net/if_mib.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef _NET_IF_MIB_H +#define _NET_IF_MIB_H 1 + +struct ifmibdata { + char ifmd_name[IFNAMSIZ]; /* name of interface */ + int ifmd_pcount; /* number of promiscuous listeners */ + int ifmd_flags; /* interface flags */ + int ifmd_snd_len; /* instantaneous length of send queue */ + int ifmd_snd_maxlen; /* maximum length of send queue */ + int ifmd_snd_drops; /* number of drops in send queue */ + int ifmd_filler[4]; /* for future expansion */ + struct if_data ifmd_data; /* generic information and statistics */ +}; + +/* + * sysctl MIB tags at the net.link.generic level + */ +#define IFMIB_SYSTEM 1 /* non-interface-specific */ +#define IFMIB_IFDATA 2 /* per-interface data table */ + +/* + * MIB tags for the various net.link.generic.ifdata tables + */ +#define IFDATA_GENERAL 1 /* generic stats for all kinds of ifaces */ +#define IFDATA_LINKSPECIFIC 2 /* specific to the type of interface */ + +/* + * MIB tags at the net.link.generic.system level + */ +#define IFMIB_IFCOUNT 1 /* number of interfaces configured */ + +/* + * MIB tags as the net.link level + * All of the other values are IFT_* names defined in if_types.h. + */ +#define NETLINK_GENERIC 0 /* functions not specific to a type of iface */ + +/* + * The reason why the IFDATA_LINKSPECIFIC stuff is not under the + * net.link. branches is twofold: + * 1) It's easier to code this way, and doesn't require duplication. + * 2) The fourth level under net.link. is ; that is to say, + * the net.link. tree instruments the adaptation layers between + * and a particular protocol family (e.g., net.link.ether.inet + * instruments ARP). This does not really leave room for anything else + * that needs to have a well-known number. + */ + +/* + * Link-specific MIB structures for various link types. + */ + +/* For IFT_ETHER, IFT_ISO88023, and IFT_STARLAN, as used by RFC 1650 */ +struct ifmib_iso_8802_3 { + u_int32_t dot3StatsAlignmentErrors; + u_int32_t dot3StatsFCSErrors; + u_int32_t dot3StatsSingleCollisionFrames; + u_int32_t dot3StatsMultipleCollisionFrames; + u_int32_t dot3StatsSQETestErrors; + u_int32_t dot3StatsDeferredTransmissions; + u_int32_t dot3StatsLateCollisions; + u_int32_t dot3StatsExcessiveCollisions; + u_int32_t dot3StatsInternalMacTransmitErrors; + u_int32_t dot3StatsCarrierSenseErrors; + u_int32_t dot3StatsFrameTooLongs; + u_int32_t dot3StatsInternalMacReceiveErrors; + u_int32_t dot3StatsEtherChipSet; + /* Matt Thomas wants this one, not included in RFC 1650: */ + u_int32_t dot3StatsMissedFrames; + + u_int32_t dot3StatsCollFrequencies[16]; /* NB: index origin */ + + u_int32_t dot3Compliance; +#define DOT3COMPLIANCE_STATS 1 +#define DOT3COMPLIANCE_COLLS 2 +}; + +/* + * Chipset identifiers are normally part of the vendor's enterprise MIB. + * However, we don't want to be trying to represent arbitrary-length + * OBJECT IDENTIFIERs here (ick!), and the right value is not necessarily + * obvious to the driver implementor. So, we define our own identification + * mechanism here, and let the agent writer deal with the translation. + */ +#define DOT3CHIPSET_VENDOR(x) ((x) >> 16) +#define DOT3CHIPSET_PART(x) ((x) & 0xffff) +#define DOT3CHIPSET(v,p) (((v) << 16) + ((p) & 0xffff)) + +/* Driver writers! Add your vendors here! */ +enum dot3Vendors { + dot3VendorAMD = 1, + dot3VendorIntel = 2, + dot3VendorNational = 4, + dot3VendorFujitsu = 5, + dot3VendorDigital = 6, + dot3VendorWesternDigital = 7 +}; + +/* Driver writers! Add your chipsets here! */ +enum { + dot3ChipSetAMD7990 = 1, + dot3ChipSetAMD79900 = 2, + dot3ChipSetAMD79C940 = 3 +}; + +enum { + dot3ChipSetIntel82586 = 1, + dot3ChipSetIntel82596 = 2, + dot3ChipSetIntel82557 = 3 +}; + +enum { + dot3ChipSetNational8390 = 1, + dot3ChipSetNationalSonic = 2 +}; + +enum { + dot3ChipSetFujitsu86950 = 1 +}; + +enum { + dot3ChipSetDigitalDC21040 = 1, + dot3ChipSetDigitalDC21140 = 2, + dot3ChipSetDigitalDC21041 = 3, + dot3ChipSetDigitalDC21140A = 4, + dot3ChipSetDigitalDC21142 = 5 +}; + +enum { + dot3ChipSetWesternDigital83C690 = 1, + dot3ChipSetWesternDigital83C790 = 2 +}; +/* END of Ethernet-link MIB stuff */ + +/* + * Put other types of interface MIBs here, or in interface-specific + * header files if convenient ones already exist. + */ +#endif /* _NET_IF_MIB_H */ diff --git a/bsd/net/if_ppp.h b/bsd/net/if_ppp.h new file mode 100644 index 000000000..50e6969ec --- /dev/null +++ b/bsd/net/if_ppp.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * if_ppp.h - Point-to-Point Protocol definitions. + * + * Copyright (c) 1989 Carnegie Mellon University. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by Carnegie Mellon University. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#ifndef _IF_PPP_H_ +#define _IF_PPP_H_ + +/* XXX this used to be self-contained. */ +#include +#include + +/* + * Packet sizes + */ +#define PPP_MTU 1500 /* Default MTU (size of Info field) */ +#define PPP_MAXMRU 65000 /* Largest MRU we allow */ +#define PPP_MAXMTU 16384 /* Largest MTU we allow */ + +/* + * Bit definitions for flags. + */ +#define SC_COMP_PROT 0x00000001 /* protocol compression (output) */ +#define SC_COMP_AC 0x00000002 /* header compression (output) */ +#define SC_COMP_TCP 0x00000004 /* TCP (VJ) compression (output) */ +#define SC_NO_TCP_CCID 0x00000008 /* disable VJ connection-id comp. */ +#define SC_REJ_COMP_AC 0x00000010 /* reject adrs/ctrl comp. on input */ +#define SC_REJ_COMP_TCP 0x00000020 /* reject TCP (VJ) comp. on input */ +#define SC_CCP_OPEN 0x00000040 /* Look at CCP packets */ +#define SC_CCP_UP 0x00000080 /* May send/recv compressed packets */ +#define SC_DEBUG 0x00010000 /* enable debug messages */ +#define SC_LOG_INPKT 0x00020000 /* log contents of good pkts recvd */ +#define SC_LOG_OUTPKT 0x00040000 /* log contents of pkts sent */ +#define SC_LOG_RAWIN 0x00080000 /* log all chars received */ +#define SC_LOG_FLUSH 0x00100000 /* log all chars flushed */ +#define SC_RCV_B7_0 0x01000000 /* have rcvd char with bit 7 = 0 */ +#define SC_RCV_B7_1 0x02000000 /* have rcvd char with bit 7 = 1 */ +#define SC_RCV_EVNP 0x04000000 /* have rcvd char with even parity */ +#define SC_RCV_ODDP 0x08000000 /* have rcvd char with odd parity */ +#define SC_MASK 0x0fff00ff /* bits that user can change */ + +/* + * State bits in sc_flags, not changeable by user. + */ +#define SC_TIMEOUT 0x00000400 /* timeout is currently pending */ +#define SC_VJ_RESET 0x00000800 /* need to reset VJ decomp */ +#define SC_COMP_RUN 0x00001000 /* compressor has been initiated */ +#define SC_DECOMP_RUN 0x00002000 /* decompressor has been initiated */ +#define SC_DC_ERROR 0x00004000 /* non-fatal decomp error detected */ +#define SC_DC_FERROR 0x00008000 /* fatal decomp error detected */ +#define SC_TBUSY 0x10000000 /* xmitter doesn't need a packet yet */ +#define SC_PKTLOST 0x20000000 /* have lost or dropped a packet */ +#define SC_FLUSH 0x40000000 /* flush input until next PPP_FLAG */ +#define SC_ESCAPED 0x80000000 /* saw a PPP_ESCAPE */ + +/* + * Ioctl definitions. + */ + +struct npioctl { + int protocol; /* PPP procotol, e.g. PPP_IP */ + enum NPmode mode; +}; + +/* Structure describing a CCP configuration option, for PPPIOCSCOMPRESS */ +struct ppp_option_data { + u_char *ptr; + u_int length; + int transmit; +}; + +struct ifpppstatsreq { + char ifr_name[IFNAMSIZ]; + struct ppp_stats stats; +}; + +struct ifpppcstatsreq { + char ifr_name[IFNAMSIZ]; + struct ppp_comp_stats stats; +}; + +/* + * Ioctl definitions. + */ + +#define PPPIOCGFLAGS _IOR('t', 90, int) /* get configuration flags */ +#define PPPIOCSFLAGS _IOW('t', 89, int) /* set configuration flags */ +#define PPPIOCGASYNCMAP _IOR('t', 88, int) /* get async map */ +#define PPPIOCSASYNCMAP _IOW('t', 87, int) /* set async map */ +#define PPPIOCGUNIT _IOR('t', 86, int) /* get ppp unit number */ +#define PPPIOCGRASYNCMAP _IOR('t', 85, int) /* get receive async map */ +#define PPPIOCSRASYNCMAP _IOW('t', 84, int) /* set receive async map */ +#define PPPIOCGMRU _IOR('t', 83, int) /* get max receive unit */ +#define PPPIOCSMRU _IOW('t', 82, int) /* set max receive unit */ +#define PPPIOCSMAXCID _IOW('t', 81, int) /* set VJ max slot ID */ +#define PPPIOCGXASYNCMAP _IOR('t', 80, ext_accm) /* get extended ACCM */ +#define PPPIOCSXASYNCMAP _IOW('t', 79, ext_accm) /* set extended ACCM */ +#define PPPIOCXFERUNIT _IO('t', 78) /* transfer PPP unit */ +#define PPPIOCSCOMPRESS _IOW('t', 77, struct ppp_option_data) +#define PPPIOCGNPMODE _IOWR('t', 76, struct npioctl) /* get NP mode */ +#define PPPIOCSNPMODE _IOW('t', 75, struct npioctl) /* set NP mode */ +#define PPPIOCGIDLE _IOR('t', 74, struct ppp_idle) /* get idle time */ +#define PPPIOCSPASS _IOW('t', 71, struct bpf_program) /* set pass filter */ +#define PPPIOCSACTIVE _IOW('t', 70, struct bpf_program) /* set active filt */ + +/* PPPIOC[GS]MTU are alternatives to SIOC[GS]IFMTU, used under Ultrix */ +#define PPPIOCGMTU _IOR('t', 73, int) /* get interface MTU */ +#define PPPIOCSMTU _IOW('t', 72, int) /* set interface MTU */ + +/* + * These two are interface ioctls so that pppstats can do them on + * a socket without having to open the serial device. + */ +#define SIOCGPPPSTATS _IOWR('i', 123, struct ifpppstatsreq) +#define SIOCGPPPCSTATS _IOWR('i', 122, struct ifpppcstatsreq) + +#if !defined(ifr_mtu) +#define ifr_mtu ifr_ifru.ifru_metric +#endif + +#endif /* _IF_PPP_H_ */ diff --git a/bsd/net/if_pppvar.h b/bsd/net/if_pppvar.h new file mode 100644 index 000000000..a302b64ad --- /dev/null +++ b/bsd/net/if_pppvar.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * if_pppvar.h - private structures and declarations for PPP. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + * + * Copyright (c) 1989 Carnegie Mellon University. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by Carnegie Mellon University. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +/* + * Supported network protocols. These values are used for + * indexing sc_npmode. + */ +#define NP_IP 0 /* Internet Protocol */ +#define NUM_NP 1 /* Number of NPs. */ + +/* + * Structure describing each ppp unit. + */ +struct ppp_softc { + struct ifnet sc_if; /* network-visible interface */ +/*hi*/ u_int sc_flags; /* control/status bits; see if_ppp.h */ + struct callout_handle sc_ch; /* Used for scheduling timeouts */ + void *sc_devp; /* pointer to device-dep structure */ + void (*sc_start) __P((struct ppp_softc *)); /* start output proc */ + void (*sc_ctlp) __P((struct ppp_softc *)); /* rcvd control pkt */ + void (*sc_relinq) __P((struct ppp_softc *)); /* relinquish ifunit */ + void (*sc_setmtu) __P((struct ppp_softc *)); /* set mtu */ + short sc_mru; /* max receive unit */ + pid_t sc_xfer; /* used in transferring unit */ +/*hi*/ struct ifqueue sc_rawq; /* received packets */ +/*net*/ struct ifqueue sc_inq; /* queue of input packets for daemon */ +/*net*/ struct ifqueue sc_fastq; /* interactive output packet q */ + struct mbuf *sc_npqueue; /* output packets not to be sent yet */ + struct mbuf **sc_npqtail; /* ptr to last next ptr in npqueue */ + struct pppstat sc_stats; /* count of bytes/pkts sent/rcvd */ + enum NPmode sc_npmode[NUM_NP]; /* what to do with each NP */ + struct compressor *sc_xcomp; /* transmit compressor */ + void *sc_xc_state; /* transmit compressor state */ + struct compressor *sc_rcomp; /* receive decompressor */ + void *sc_rc_state; /* receive decompressor state */ + time_t sc_last_sent; /* time (secs) last NP pkt sent */ + time_t sc_last_recv; /* time (secs) last NP pkt rcvd */ +#if PPP_FILTER + struct bpf_program sc_pass_filt; /* filter for packets to pass */ + struct bpf_program sc_active_filt; /* filter for "non-idle" packets */ +#endif /* PPP_FILTER */ +#if VJC + struct slcompress *sc_comp; /* vjc control buffer */ +#endif + + /* Device-dependent part for async lines. */ + ext_accm sc_asyncmap; /* async control character map */ + u_long sc_rasyncmap; /* receive async control char map */ + struct mbuf *sc_outm; /* mbuf chain currently being output */ + struct mbuf *sc_m; /* pointer to input mbuf chain */ + struct mbuf *sc_mc; /* pointer to current input mbuf */ + char *sc_mp; /* ptr to next char in input mbuf */ + short sc_ilen; /* length of input packet so far */ + u_short sc_fcs; /* FCS so far (input) */ + u_short sc_outfcs; /* FCS so far for output packet */ + u_char sc_rawin[16]; /* chars as received */ + int sc_rawin_count; /* # in sc_rawin */ +}; + +extern struct ppp_softc ppp_softc[NPPP]; + +struct ppp_softc *pppalloc __P((pid_t pid)); +void pppdealloc __P((struct ppp_softc *sc)); +int pppioctl __P((struct ppp_softc *sc, u_long cmd, caddr_t data, + int flag, struct proc *p)); +int pppoutput __P((struct ifnet *ifp, struct mbuf *m0, + struct sockaddr *dst, struct rtentry *rtp)); +void ppp_restart __P((struct ppp_softc *sc)); +void ppppktin __P((struct ppp_softc *sc, struct mbuf *m, int lost)); +struct mbuf *ppp_dequeue __P((struct ppp_softc *sc)); diff --git a/bsd/net/if_sl.c b/bsd/net/if_sl.c new file mode 100644 index 000000000..1ee15c39a --- /dev/null +++ b/bsd/net/if_sl.c @@ -0,0 +1,897 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1987, 1989, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_sl.c 8.9 (Berkeley) 1/9/95 + */ + +/* + * Serial Line interface + * + * Rick Adams + * Center for Seismic Studies + * 1300 N 17th Street, Suite 1450 + * Arlington, Virginia 22209 + * (703)276-7900 + * rick@seismo.ARPA + * seismo!rick + * + * Pounded on heavily by Chris Torek (chris@mimsy.umd.edu, umcp-cs!chris). + * N.B.: this belongs in netinet, not net, the way it stands now. + * Should have a link-layer type designation, but wouldn't be + * backwards-compatible. + * + * Converted to 4.3BSD Beta by Chris Torek. + * Other changes made at Berkeley, based in part on code by Kirk Smith. + * W. Jolitz added slip abort. + * + * Hacked almost beyond recognition by Van Jacobson (van@helios.ee.lbl.gov). + * Added priority queuing for "interactive" traffic; hooks for TCP + * header compression; ICMP filtering (at 2400 baud, some cretin + * pinging you can use up all your bandwidth). Made low clist behavior + * more robust and slightly less likely to hang serial line. + * Sped up a bunch of things. + * + * Note that splimp() is used throughout to block both (tty) input + * interrupts and network activity; thus, splimp must be >= spltty. + */ + +#include "sl.h" +#if NSL > 0 + +#include "bpfilter.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#if INET +#include +#include +#include +#include +#else +Huh? Slip without inet? +#endif + +#include +#include +#include + +#if NBPFILTER > 0 +#include +#include +#endif + +/* + * SLMAX is a hard limit on input packet size. To simplify the code + * and improve performance, we require that packets fit in an mbuf + * cluster, and if we get a compressed packet, there's enough extra + * room to expand the header into a max length tcp/ip header (128 + * bytes). So, SLMAX can be at most + * MCLBYTES - 128 + * + * SLMTU is a hard limit on output packet size. To insure good + * interactive response, SLMTU wants to be the smallest size that + * amortizes the header cost. (Remember that even with + * type-of-service queuing, we have to wait for any in-progress + * packet to finish. I.e., we wait, on the average, 1/2 * mtu / + * cps, where cps is the line speed in characters per second. + * E.g., 533ms wait for a 1024 byte MTU on a 9600 baud line. The + * average compressed header size is 6-8 bytes so any MTU > 90 + * bytes will give us 90% of the line bandwidth. A 100ms wait is + * tolerable (500ms is not), so want an MTU around 296. (Since TCP + * will send 256 byte segments (to allow for 40 byte headers), the + * typical packet size on the wire will be around 260 bytes). In + * 4.3tahoe+ systems, we can set an MTU in a route so we do that & + * leave the interface MTU relatively high (so we don't IP fragment + * when acting as a gateway to someone using a stupid MTU). + * + * Similar considerations apply to SLIP_HIWAT: It's the amount of + * data that will be queued 'downstream' of us (i.e., in clists + * waiting to be picked up by the tty output interrupt). If we + * queue a lot of data downstream, it's immune to our t.o.s. queuing. + * E.g., if SLIP_HIWAT is 1024, the interactive traffic in mixed + * telnet/ftp will see a 1 sec wait, independent of the mtu (the + * wait is dependent on the ftp window size but that's typically + * 1k - 4k). So, we want SLIP_HIWAT just big enough to amortize + * the cost (in idle time on the wire) of the tty driver running + * off the end of its clists & having to call back slstart for a + * new packet. For a tty interface with any buffering at all, this + * cost will be zero. Even with a totally brain dead interface (like + * the one on a typical workstation), the cost will be <= 1 character + * time. So, setting SLIP_HIWAT to ~100 guarantees that we'll lose + * at most 1% while maintaining good interactive response. + */ +#if NBPFILTER > 0 +#define BUFOFFSET (128+sizeof(struct ifnet **)+SLIP_HDRLEN) +#else +#define BUFOFFSET (128+sizeof(struct ifnet **)) +#endif +#define SLMAX (MCLBYTES - BUFOFFSET) +#define SLBUFSIZE (SLMAX + BUFOFFSET) +#define SLMTU 296 +#define SLIP_HIWAT roundup(50,CBSIZE) + +/* + * SLIP ABORT ESCAPE MECHANISM: + * (inspired by HAYES modem escape arrangement) + * 1sec escape 1sec escape 1sec escape { 1sec escape 1sec escape } + * within window time signals a "soft" exit from slip mode by remote end + * if the IFF_DEBUG flag is on. + */ +#define ABT_ESC '\033' /* can't be t_intr - distant host must know it*/ +#define ABT_IDLE 1 /* in seconds - idle before an escape */ +#define ABT_COUNT 3 /* count of escapes for abort */ +#define ABT_WINDOW (ABT_COUNT*2+2) /* in seconds - time to count */ + +struct sl_softc sl_softc[NSL]; + +#define FRAME_END 0xc0 /* Frame End */ +#define FRAME_ESCAPE 0xdb /* Frame Esc */ +#define TRANS_FRAME_END 0xdc /* transposed frame end */ +#define TRANS_FRAME_ESCAPE 0xdd /* transposed frame esc */ + +static int slinit __P((struct sl_softc *)); +static struct mbuf *sl_btom __P((struct sl_softc *, int)); + +/* + * Called from boot code to establish sl interfaces. + */ +void +slattach() +{ + register struct sl_softc *sc; + register int i = 0; + + for (sc = sl_softc; i < NSL; sc++) { + sc->sc_if.if_name = "sl"; + sc->sc_if.if_family = APPLE_IF_FAM_SLIP; + sc->sc_if.if_next = NULL; + sc->sc_if.if_unit = i++; + sc->sc_if.if_mtu = SLMTU; + sc->sc_if.if_flags = + IFF_POINTOPOINT | SC_AUTOCOMP | IFF_MULTICAST; + sc->sc_if.if_type = IFT_SLIP; + sc->sc_if.if_ioctl = slioctl; + sc->sc_if.if_output = sloutput; + sc->sc_if.if_snd.ifq_maxlen = 50; + sc->sc_fastq.ifq_maxlen = 32; + if_attach(&sc->sc_if); +#if NBPFILTER > 0 + bpfattach(&sc->sc_bpf, &sc->sc_if, DLT_SLIP, SLIP_HDRLEN); +#endif + } +} + +static int +slinit(sc) + register struct sl_softc *sc; +{ + register caddr_t p; + + if (sc->sc_ep == (u_char *) 0) { + MCLALLOC(p, M_WAIT); + if (p) + sc->sc_ep = (u_char *)p + SLBUFSIZE; + else { + printf("sl%d: can't allocate buffer\n", sc - sl_softc); + sc->sc_if.if_flags &= ~IFF_UP; + return (0); + } + } + sc->sc_buf = sc->sc_ep - SLMAX; + sc->sc_mp = sc->sc_buf; + sl_compress_init(&sc->sc_comp, -1); + return (1); +} + +/* + * Line specific open routine. + * Attach the given tty to the first available sl unit. + */ +/* ARGSUSED */ +int +slopen(dev, tp) + dev_t dev; + register struct tty *tp; +{ + struct proc *p = curproc; /* XXX */ + register struct sl_softc *sc; + register int nsl; + int error; + int s; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + + if (tp->t_line == SLIPDISC) + return (0); + + for (nsl = NSL, sc = sl_softc; --nsl >= 0; sc++) + if (sc->sc_ttyp == NULL) { + if (slinit(sc) == 0) + return (ENOBUFS); + tp->t_sc = (caddr_t)sc; + sc->sc_ttyp = tp; + sc->sc_if.if_baudrate = tp->t_ospeed; + ttyflush(tp, FREAD | FWRITE); + + /* + * make sure tty output queue is large enough + * to hold a full-sized packet (including frame + * end, and a possible extra frame end). full-sized + * packet occupies a max of 2*SLMTU bytes (because + * of possible escapes), and add two on for frame + * ends. + */ + s = spltty(); + if (tp->t_outq.c_cn < 2*SLMTU+2) { + sc->sc_oldbufsize = tp->t_outq.c_cn; + sc->sc_oldbufquot = tp->t_outq.c_cq != 0; + + clfree(&tp->t_outq); + error = clalloc(&tp->t_outq, 3*SLMTU, 0); + if (error) { + splx(s); + return(error); + } + } else + sc->sc_oldbufsize = sc->sc_oldbufquot = 0; + splx(s); + + return (0); + } + return (ENXIO); +} + +/* + * Line specific close routine. + * Detach the tty from the sl unit. + */ +void +slclose(tp) + struct tty *tp; +{ + register struct sl_softc *sc; + int s; + + ttywflush(tp); + s = splimp(); /* actually, max(spltty, splnet) */ + tp->t_line = 0; + tp->t_state = 0; + sc = (struct sl_softc *)tp->t_sc; + if (sc != NULL) { + if_down(&sc->sc_if); + sc->sc_ttyp = NULL; + tp->t_sc = NULL; + MCLFREE((caddr_t)(sc->sc_ep - SLBUFSIZE)); + sc->sc_ep = 0; + sc->sc_mp = 0; + sc->sc_buf = 0; + } + /* if necessary, install a new outq buffer of the appropriate size */ + if (sc->sc_oldbufsize != 0) { + clfree(&tp->t_outq); + clalloc(&tp->t_outq, sc->sc_oldbufsize, sc->sc_oldbufquot); + } + splx(s); +} + +/* + * Line specific (tty) ioctl routine. + * Provide a way to get the sl unit number. + */ +/* ARGSUSED */ +int +sltioctl(tp, cmd, data, flag) + struct tty *tp; + u_long cmd; + caddr_t data; + int flag; +{ + struct sl_softc *sc = (struct sl_softc *)tp->t_sc; + + switch (cmd) { + case SLIOCGUNIT: + *(int *)data = sc->sc_if.if_unit; + break; + + default: + return (-1); + } + return (0); +} + +/* + * Queue a packet. Start transmission if not active. + * Compression happens in slstart; if we do it here, IP TOS + * will cause us to not compress "background" packets, because + * ordering gets trashed. It can be done for all packets in slstart. + */ +int +sloutput(ifp, m, dst, rtp) + struct ifnet *ifp; + register struct mbuf *m; + struct sockaddr *dst; + struct rtentry *rtp; +{ + register struct sl_softc *sc = &sl_softc[ifp->if_unit]; + register struct ip *ip; + register struct ifqueue *ifq; + int s; + + /* + * `Cannot happen' (see slioctl). Someday we will extend + * the line protocol to support other address families. + */ + if (dst->sa_family != AF_INET) { + printf("sl%d: af%d not supported\n", sc->sc_if.if_unit, + dst->sa_family); + m_freem(m); + sc->sc_if.if_noproto++; + return (EAFNOSUPPORT); + } + + if (sc->sc_ttyp == NULL) { + m_freem(m); + return (ENETDOWN); /* sort of */ + } + if ((sc->sc_ttyp->t_state & TS_CARR_ON) == 0 && + (sc->sc_ttyp->t_cflag & CLOCAL) == 0) { + m_freem(m); + return (EHOSTUNREACH); + } + ifq = &sc->sc_if.if_snd; + ip = mtod(m, struct ip *); + if (sc->sc_if.if_flags & SC_NOICMP && ip->ip_p == IPPROTO_ICMP) { + m_freem(m); + return (ENETRESET); /* XXX ? */ + } + if (ip->ip_tos & IPTOS_LOWDELAY) + ifq = &sc->sc_fastq; + s = splimp(); + if (sc->sc_oqlen && sc->sc_ttyp->t_outq.c_cc == sc->sc_oqlen) { + /* if output's been stalled for too long, and restart */ + timersub(&time, &sc->sc_if.if_lastchange, &tv); + if (tv.tv_sec > 0) { + sc->sc_otimeout++; + slstart(sc->sc_ttyp); + } + } + if (IF_QFULL(ifq)) { + IF_DROP(ifq); + m_freem(m); + splx(s); + sc->sc_if.if_oerrors++; + return (ENOBUFS); + } + IF_ENQUEUE(ifq, m); + sc->sc_if.if_lastchange = time; + if ((sc->sc_oqlen = sc->sc_ttyp->t_outq.c_cc) == 0) + slstart(sc->sc_ttyp); + splx(s); + return (0); +} + +/* + * Start output on interface. Get another datagram + * to send from the interface queue and map it to + * the interface before starting output. + */ +void +slstart(tp) + register struct tty *tp; +{ + register struct sl_softc *sc = (struct sl_softc *)tp->t_sc; + register struct mbuf *m; + register u_char *cp; + register struct ip *ip; + int s; + struct mbuf *m2; +#if NBPFILTER > 0 + u_char bpfbuf[SLMTU + SLIP_HDRLEN]; + register int len; +#endif + + for (;;) { + /* + * If there is more in the output queue, just send it now. + * We are being called in lieu of ttstart and must do what + * it would. + */ + if (tp->t_outq.c_cc != 0) { + (*tp->t_oproc)(tp); + if (tp->t_outq.c_cc > SLIP_HIWAT) + return; + } + /* + * This happens briefly when the line shuts down. + */ + if (sc == NULL) + return; + + /* + * Do not remove the packet from the IP queue if it + * doesn't look like the packet will fit into the + * current serial output queue, with a packet full of + * escapes this could be as bad as SLMTU*2+2. + */ + if (tp->t_outq.c_cn - tp->t_outq.c_cc < 2*SLMTU+2) + return; + + /* + * Get a packet and send it to the interface. + */ + s = splimp(); + IF_DEQUEUE(&sc->sc_fastq, m); + if (m) + sc->sc_if.if_omcasts++; /* XXX */ + else + IF_DEQUEUE(&sc->sc_if.if_snd, m); + splx(s); + if (m == NULL) + return; + + /* + * We do the header compression here rather than in sloutput + * because the packets will be out of order if we are using TOS + * queueing, and the connection id compression will get + * munged when this happens. + */ +#if NBPFILTER > 0 + if (sc->sc_bpf) { + /* + * We need to save the TCP/IP header before it's + * compressed. To avoid complicated code, we just + * copy the entire packet into a stack buffer (since + * this is a serial line, packets should be short + * and/or the copy should be negligible cost compared + * to the packet transmission time). + */ + register struct mbuf *m1 = m; + register u_char *cp = bpfbuf + SLIP_HDRLEN; + + len = 0; + do { + register int mlen = m1->m_len; + + bcopy(mtod(m1, caddr_t), cp, mlen); + cp += mlen; + len += mlen; + } while (m1 = m1->m_next); + } +#endif + if ((ip = mtod(m, struct ip *))->ip_p == IPPROTO_TCP) { + if (sc->sc_if.if_flags & SC_COMPRESS) + *mtod(m, u_char *) |= sl_compress_tcp(m, ip, + &sc->sc_comp, 1); + } +#if NBPFILTER > 0 + if (sc->sc_bpf) { + /* + * Put the SLIP pseudo-"link header" in place. The + * compressed header is now at the beginning of the + * mbuf. + */ + bpfbuf[SLX_DIR] = SLIPDIR_OUT; + bcopy(mtod(m, caddr_t), &bpfbuf[SLX_CHDR], CHDR_LEN); + BPF_TAP(sc->sc_bpf, bpfbuf, len + SLIP_HDRLEN); + } +#endif + sc->sc_if.if_lastchange = time; + + /* + * The extra FRAME_END will start up a new packet, and thus + * will flush any accumulated garbage. We do this whenever + * the line may have been idle for some time. + */ + if (tp->t_outq.c_cc == 0) { + ++sc->sc_if.if_obytes; + (void) putc(FRAME_END, &tp->t_outq); + } + + while (m) { + register u_char *ep; + + cp = mtod(m, u_char *); ep = cp + m->m_len; + while (cp < ep) { + /* + * Find out how many bytes in the string we can + * handle without doing something special. + */ + register u_char *bp = cp; + + while (cp < ep) { + switch (*cp++) { + case FRAME_ESCAPE: + case FRAME_END: + --cp; + goto out; + } + } + out: + if (cp > bp) { + /* + * Put n characters at once + * into the tty output queue. + */ + if (b_to_q((u_char *)bp, cp - bp, + &tp->t_outq)) + break; + sc->sc_if.if_obytes += cp - bp; + } + /* + * If there are characters left in the mbuf, + * the first one must be special.. + * Put it out in a different form. + */ + if (cp < ep) { + if (putc(FRAME_ESCAPE, &tp->t_outq)) + break; + if (putc(*cp++ == FRAME_ESCAPE ? + TRANS_FRAME_ESCAPE : TRANS_FRAME_END, + &tp->t_outq)) { + (void) unputc(&tp->t_outq); + break; + } + sc->sc_if.if_obytes += 2; + } + } + MFREE(m, m2); + m = m2; + } + + if (putc(FRAME_END, &tp->t_outq)) { + /* + * Not enough room. Remove a char to make room + * and end the packet normally. + * If you get many collisions (more than one or two + * a day) you probably do not have enough clists + * and you should increase "nclist" in param.c. + */ + (void) unputc(&tp->t_outq); + (void) putc(FRAME_END, &tp->t_outq); + sc->sc_if.if_collisions++; + } else { + ++sc->sc_if.if_obytes; + sc->sc_if.if_opackets++; + } + } +} + +/* + * Copy data buffer to mbuf chain; add ifnet pointer. + */ +static struct mbuf * +sl_btom(sc, len) + register struct sl_softc *sc; + register int len; +{ + register struct mbuf *m; + + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) + return (NULL); + + /* + * If we have more than MHLEN bytes, it's cheaper to + * queue the cluster we just filled & allocate a new one + * for the input buffer. Otherwise, fill the mbuf we + * allocated above. Note that code in the input routine + * guarantees that packet will fit in a cluster. + */ + if (len >= MHLEN) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + /* + * we couldn't get a cluster - if memory's this + * low, it's time to start dropping packets. + */ + (void) m_free(m); + return (NULL); + } + sc->sc_ep = mtod(m, u_char *) + SLBUFSIZE; + m->m_data = (caddr_t)sc->sc_buf; + m->m_ext.ext_buf = (caddr_t)((long)sc->sc_buf &~ MCLOFSET); + } else + bcopy((caddr_t)sc->sc_buf, mtod(m, caddr_t), len); + + m->m_len = len; + m->m_pkthdr.len = len; + m->m_pkthdr.rcvif = &sc->sc_if; + return (m); +} + +/* + * tty interface receiver interrupt. + */ +void +slinput(c, tp) + register int c; + register struct tty *tp; +{ + register struct sl_softc *sc; + register struct mbuf *m; + register int len; + int s; +#if NBPFILTER > 0 + u_char chdr[CHDR_LEN]; +#endif + + tk_nin++; + sc = (struct sl_softc *)tp->t_sc; + if (sc == NULL) + return; + if (c & TTY_ERRORMASK || ((tp->t_state & TS_CARR_ON) == 0 && + (tp->t_cflag & CLOCAL) == 0)) { + sc->sc_flags |= SC_ERROR; + return; + } + c &= TTY_CHARMASK; + + ++sc->sc_if.if_ibytes; + + if (sc->sc_if.if_flags & IFF_DEBUG) { + + if (c == ABT_ESC) { + /* + * If we have a previous abort, see whether + * this one is within the time limit. + */ + if (sc->sc_abortcount && + time.tv_sec >= sc->sc_starttime + ABT_WINDOW) + sc->sc_abortcount = 0; + /* + * If we see an abort after "idle" time, count it; + * record when the first abort escape arrived. + */ + if (time.tv_sec >= sc->sc_lasttime + ABT_IDLE) { + if (++sc->sc_abortcount == 1) + sc->sc_starttime = time.tv_sec; + if (sc->sc_abortcount >= ABT_COUNT) { + slclose(tp); + return; + } + } + } else + sc->sc_abortcount = 0; + sc->sc_lasttime = time.tv_sec; + } + + switch (c) { + + case TRANS_FRAME_ESCAPE: + if (sc->sc_escape) + c = FRAME_ESCAPE; + break; + + case TRANS_FRAME_END: + if (sc->sc_escape) + c = FRAME_END; + break; + + case FRAME_ESCAPE: + sc->sc_escape = 1; + return; + + case FRAME_END: + if(sc->sc_flags & SC_ERROR) { + sc->sc_flags &= ~SC_ERROR; + goto newpack; + } + len = sc->sc_mp - sc->sc_buf; + if (len < 3) + /* less than min length packet - ignore */ + goto newpack; + +#if NBPFILTER > 0 + if (sc->sc_bpf) { + /* + * Save the compressed header, so we + * can tack it on later. Note that we + * will end up copying garbage in some + * cases but this is okay. We remember + * where the buffer started so we can + * compute the new header length. + */ + bcopy(sc->sc_buf, chdr, CHDR_LEN); + } +#endif + + if ((c = (*sc->sc_buf & 0xf0)) != (IPVERSION << 4)) { + if (c & 0x80) + c = TYPE_COMPRESSED_TCP; + else if (c == TYPE_UNCOMPRESSED_TCP) + *sc->sc_buf &= 0x4f; /* XXX */ + /* + * We've got something that's not an IP packet. + * If compression is enabled, try to decompress it. + * Otherwise, if `auto-enable' compression is on and + * it's a reasonable packet, decompress it and then + * enable compression. Otherwise, drop it. + */ + if (sc->sc_if.if_flags & SC_COMPRESS) { + len = sl_uncompress_tcp(&sc->sc_buf, len, + (u_int)c, &sc->sc_comp); + if (len <= 0) + goto error; + } else if ((sc->sc_if.if_flags & SC_AUTOCOMP) && + c == TYPE_UNCOMPRESSED_TCP && len >= 40) { + len = sl_uncompress_tcp(&sc->sc_buf, len, + (u_int)c, &sc->sc_comp); + if (len <= 0) + goto error; + sc->sc_if.if_flags |= SC_COMPRESS; + } else + goto error; + } +#if NBPFILTER > 0 + if (sc->sc_bpf) { + /* + * Put the SLIP pseudo-"link header" in place. + * We couldn't do this any earlier since + * decompression probably moved the buffer + * pointer. Then, invoke BPF. + */ + register u_char *hp = sc->sc_buf - SLIP_HDRLEN; + + hp[SLX_DIR] = SLIPDIR_IN; + bcopy(chdr, &hp[SLX_CHDR], CHDR_LEN); + BPF_TAP(sc->sc_bpf, hp, len + SLIP_HDRLEN); + } +#endif + m = sl_btom(sc, len); + if (m == NULL) + goto error; + + sc->sc_if.if_ipackets++; + sc->sc_if.if_lastchange = time; + s = splimp(); + if (IF_QFULL(&ipintrq)) { + IF_DROP(&ipintrq); + sc->sc_if.if_ierrors++; + sc->sc_if.if_iqdrops++; + m_freem(m); + } else { + IF_ENQUEUE(&ipintrq, m); + schednetisr(NETISR_IP); + } + splx(s); + goto newpack; + } + if (sc->sc_mp < sc->sc_ep) { + *sc->sc_mp++ = c; + sc->sc_escape = 0; + return; + } + + /* can't put lower; would miss an extra frame */ + sc->sc_flags |= SC_ERROR; + +error: + sc->sc_if.if_ierrors++; +newpack: + sc->sc_mp = sc->sc_buf = sc->sc_ep - SLMAX; + sc->sc_escape = 0; +} + +/* + * Process an ioctl request. + */ +int +slioctl(ifp, cmd, data) + register struct ifnet *ifp; + u_long cmd; + caddr_t data; +{ + register struct ifaddr *ifa = (struct ifaddr *)data; + register struct ifreq *ifr; + register int s = splimp(), error = 0; + + switch (cmd) { + + case SIOCSIFADDR: + if (ifa->ifa_addr->sa_family == AF_INET) + ifp->if_flags |= IFF_UP; + else + error = EAFNOSUPPORT; + break; + + case SIOCSIFDSTADDR: + if (ifa->ifa_addr->sa_family != AF_INET) + error = EAFNOSUPPORT; + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + ifr = (struct ifreq *)data; + if (ifr == 0) { + error = EAFNOSUPPORT; /* XXX */ + break; + } + switch (ifr->ifr_addr.sa_family) { + +#if INET + case AF_INET: + break; +#endif + + default: + error = EAFNOSUPPORT; + break; + } + break; + + default: + error = EINVAL; + } + splx(s); + return (error); +} +#endif diff --git a/bsd/net/if_slvar.h b/bsd/net/if_slvar.h new file mode 100644 index 000000000..25786201c --- /dev/null +++ b/bsd/net/if_slvar.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_slvar.h 8.3 (Berkeley) 2/1/94 + * + */ + +#ifndef _NET_IF_SLVAR_H_ +#define _NET_IF_SLVAR_H_ + +#include +#include +#include + + +/* + * Definitions for SLIP interface data structures + * + * (This exists so programs like slstats can get at the definition + * of sl_softc.) + */ +struct sl_softc { + struct ifnet sc_if; /* network-visible interface */ + struct ifqueue sc_fastq; /* interactive output queue */ + struct tty *sc_ttyp; /* pointer to tty structure */ + u_char *sc_mp; /* pointer to next available buf char */ + u_char *sc_ep; /* pointer to last available buf char */ + u_char *sc_buf; /* input buffer */ + u_int sc_flags; /* see below */ + u_int sc_escape; /* =1 if last char input was FRAME_ESCAPE */ + long sc_lasttime; /* last time a char arrived */ + long sc_abortcount; /* number of abort escape chars */ + long sc_starttime; /* time of first abort in window */ + u_int sc_keepalive; /* time to decide link hang */ + u_int sc_outfill; /* time to send FRAME_END when output idle */ + /* + * Handles for scheduling outfill and + * keepalive timeouts. + */ +#if FB3x + struct callout_handle sc_ofhandle; + struct callout_handle sc_kahandle; +#endif + struct slcompress sc_comp; /* tcp compression data */ +}; + +/* internal flags */ +#define SC_ERROR 0x0001 /* had an input error */ +#define SC_OUTWAIT 0x0002 /* waiting for output fill */ +#define SC_KEEPALIVE 0x0004 /* input keepalive */ +#define SC_STATIC 0x0008 /* it is static unit */ + +/* visible flags */ +#define SC_COMPRESS IFF_LINK0 /* compress TCP traffic */ +#define SC_NOICMP IFF_LINK1 /* suppress ICMP traffic */ +#define SC_AUTOCOMP IFF_LINK2 /* auto-enable TCP compression */ + + +#endif diff --git a/bsd/net/if_sppp.h b/bsd/net/if_sppp.h new file mode 100644 index 000000000..713ef0832 --- /dev/null +++ b/bsd/net/if_sppp.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Defines for synchronous PPP/Cisco link level subroutines. + * + * Copyright (C) 1994 Cronyx Ltd. + * Author: Serge Vakulenko, + * + * Heavily revamped to conform to RFC 1661. + * Copyright (C) 1997, Joerg Wunsch. + * + * This software is distributed with NO WARRANTIES, not even the implied + * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Authors grant any other persons or organizations permission to use + * or modify this software as long as this message is kept with the software, + * all derivative works or modified versions. + * + * From: Version 2.0, Fri Oct 6 20:39:21 MSK 1995 + * + */ + +#ifndef _NET_IF_SPPP_H_ +#define _NET_IF_SPPP_H_ 1 + +#define IDX_LCP 0 /* idx into state table */ + +struct slcp { + u_long opts; /* LCP options to send (bitfield) */ + u_long magic; /* local magic number */ + u_long mru; /* our max receive unit */ + u_long their_mru; /* their max receive unit */ + u_long protos; /* bitmask of protos that are started */ + u_char echoid; /* id of last keepalive echo request */ + /* restart max values, see RFC 1661 */ + int timeout; + int max_terminate; + int max_configure; + int max_failure; +}; + +#define IDX_IPCP 1 /* idx into state table */ + +struct sipcp { + u_long opts; /* IPCP options to send (bitfield) */ + u_int flags; +#define IPCP_HISADDR_SEEN 1 /* have seen his address already */ +#define IPCP_MYADDR_DYN 2 /* my address is dynamically assigned */ +#define IPCP_MYADDR_SEEN 4 /* have seen his address already */ +}; + +#define AUTHNAMELEN 32 +#define AUTHKEYLEN 16 + +struct sauth { + u_short proto; /* authentication protocol to use */ + u_short flags; +#define AUTHFLAG_NOCALLOUT 1 /* do not require authentication on */ + /* callouts */ +#define AUTHFLAG_NORECHALLENGE 2 /* do not re-challenge CHAP */ + u_char name[AUTHNAMELEN]; /* system identification name */ + u_char secret[AUTHKEYLEN]; /* secret password */ + u_char challenge[AUTHKEYLEN]; /* random challenge */ +}; + +#define IDX_PAP 2 +#define IDX_CHAP 3 + +#define IDX_COUNT (IDX_CHAP + 1) /* bump this when adding cp's! */ + +/* + * Don't change the order of this. Ordering the phases this way allows + * for a comparision of ``pp_phase >= PHASE_AUTHENTICATE'' in order to + * know whether LCP is up. + */ +enum ppp_phase { + PHASE_DEAD, PHASE_ESTABLISH, PHASE_TERMINATE, + PHASE_AUTHENTICATE, PHASE_NETWORK +}; + +struct sppp { + /* NB: pp_if _must_ be first */ + struct ifnet pp_if; /* network interface data */ + struct ifqueue pp_fastq; /* fast output queue */ + struct ifqueue pp_cpq; /* PPP control protocol queue */ + struct sppp *pp_next; /* next interface in keepalive list */ + u_int pp_flags; /* use Cisco protocol instead of PPP */ + u_short pp_alivecnt; /* keepalive packets counter */ + u_short pp_loopcnt; /* loopback detection counter */ + u_long pp_seq; /* local sequence number */ + u_long pp_rseq; /* remote sequence number */ + enum ppp_phase pp_phase; /* phase we're currently in */ + int state[IDX_COUNT]; /* state machine */ + u_char confid[IDX_COUNT]; /* id of last configuration request */ + int rst_counter[IDX_COUNT]; /* restart counter */ + int fail_counter[IDX_COUNT]; /* negotiation failure counter */ + struct callout_handle ch[IDX_COUNT]; /* per-proto and if callouts */ + struct callout_handle pap_my_to_ch; /* PAP needs one more... */ + struct slcp lcp; /* LCP params */ + struct sipcp ipcp; /* IPCP params */ + struct sauth myauth; /* auth params, i'm peer */ + struct sauth hisauth; /* auth params, i'm authenticator */ + /* + * These functions are filled in by sppp_attach(), and are + * expected to be used by the lower layer (hardware) drivers + * in order to communicate the (un)availability of the + * communication link. Lower layer drivers that are always + * ready to communicate (like hardware HDLC) can shortcut + * pp_up from pp_tls, and pp_down from pp_tlf. + */ + void (*pp_up)(struct sppp *sp); + void (*pp_down)(struct sppp *sp); + /* + * These functions need to be filled in by the lower layer + * (hardware) drivers if they request notification from the + * PPP layer whether the link is actually required. They + * correspond to the tls and tlf actions. + */ + void (*pp_tls)(struct sppp *sp); + void (*pp_tlf)(struct sppp *sp); + /* + * These (optional) functions may be filled by the hardware + * driver if any notification of established connections + * (currently: IPCP up) is desired (pp_con) or any internal + * state change of the interface state machine should be + * signaled for monitoring purposes (pp_chg). + */ + void (*pp_con)(struct sppp *sp); + void (*pp_chg)(struct sppp *sp, int new_state); + /* These two fields are for use by the lower layer */ + void *pp_lowerp; + int pp_loweri; +}; + +#define PP_KEEPALIVE 0x01 /* use keepalive protocol */ +#define PP_CISCO 0x02 /* use Cisco protocol instead of PPP */ + /* 0x04 was PP_TIMO */ +#define PP_CALLIN 0x08 /* we are being called */ +#define PP_NEEDAUTH 0x10 /* remote requested authentication */ + + +#define PP_MTU 1500 /* default/minimal MRU */ +#define PP_MAX_MRU 2048 /* maximal MRU we want to negotiate */ + +/* + * Definitions to pass struct sppp data down into the kernel using the + * SIOC[SG]IFGENERIC ioctl interface. + * + * In order to use this, create a struct spppreq, fill in the cmd + * field with SPPPIOGDEFS, and put the address of this structure into + * the ifr_data portion of a struct ifreq. Pass this struct to a + * SIOCGIFGENERIC ioctl. Then replace the cmd field by SPPPIOCDEFS, + * modify the defs field as desired, and pass the struct ifreq now + * to a SIOCSIFGENERIC ioctl. + */ + +#define SPPPIOGDEFS ((caddr_t)(('S' << 24) + (1 << 16) + sizeof(struct sppp))) +#define SPPPIOSDEFS ((caddr_t)(('S' << 24) + (2 << 16) + sizeof(struct sppp))) + +struct spppreq { + int cmd; + struct sppp defs; +}; + +#ifdef KERNEL +void sppp_attach (struct ifnet *ifp); +void sppp_detach (struct ifnet *ifp); +void sppp_input (struct ifnet *ifp, struct mbuf *m); +int sppp_ioctl (struct ifnet *ifp, u_long cmd, void *data); +struct mbuf *sppp_dequeue (struct ifnet *ifp); +struct mbuf *sppp_pick(struct ifnet *ifp); +int sppp_isempty (struct ifnet *ifp); +void sppp_flush (struct ifnet *ifp); +#endif + +#endif /* _NET_IF_SPPP_H_ */ diff --git a/bsd/net/if_spppsubr.c b/bsd/net/if_spppsubr.c new file mode 100644 index 000000000..01f2f3e67 --- /dev/null +++ b/bsd/net/if_spppsubr.c @@ -0,0 +1,4295 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Synchronous PPP/Cisco link level subroutines. + * Keepalive protocol implemented in both Cisco and PPP modes. + * + * Copyright (C) 1994-1996 Cronyx Engineering Ltd. + * Author: Serge Vakulenko, + * + * Heavily revamped to conform to RFC 1661. + * Copyright (C) 1997, Joerg Wunsch. + * + * This software is distributed with NO WARRANTIES, not even the implied + * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Authors grant any other persons or organisations permission to use + * or modify this software as long as this message is kept with the software, + * all derivative works or modified versions. + * + * From: Version 2.4, Thu Apr 30 17:17:21 MSD 1997 + * + */ + +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include "opt_inet.h" +#include "opt_ipx.h" +#endif + +#ifdef NetBSD1_3 +# if NetBSD1_3 > 6 +# include "opt_inet.h" +# include "opt_iso.h" +# endif +#endif + +#include +#include +#include +#include +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include +#endif +#include +#include + + +#if defined (__OpenBSD__) +#include +#else +#include +#endif + +#include +#include +#include +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include +#endif +#if defined (__NetBSD__) || defined (__OpenBSD__) +#include /* XXX for softnet */ +#endif + +#include + +#if INET +#include +#include +#include +#include +#include +# if defined (__FreeBSD__) || defined (__OpenBSD__) +# include +# else +# include +# endif +#else +# error Huh? sppp without INET? +#endif + +#if IPX +#include +#include +#endif + +#if NS +#include +#include +#endif + +#if ISO +#include +#include +#include +#include +#endif + +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +# define UNTIMEOUT(fun, arg, handle) untimeout(fun, arg, handle) +# define TIMEOUT(fun, arg1, arg2, handle) handle = timeout(fun, arg1, arg2) +# define IOCTL_CMD_T u_long +#else +# define UNTIMEOUT(fun, arg, handle) untimeout(fun, arg) +# define TIMEOUT(fun, arg1, arg2, handle) timeout(fun, arg1, arg2) +# define IOCTL_CMD_T int +#endif + +#define MAXALIVECNT 3 /* max. alive packets */ + +/* + * Interface flags that can be set in an ifconfig command. + * + * Setting link0 will make the link passive, i.e. it will be marked + * as being administrative openable, but won't be opened to begin + * with. Incoming calls will be answered, or subsequent calls with + * -link1 will cause the administrative open of the LCP layer. + * + * Setting link1 will cause the link to auto-dial only as packets + * arrive to be sent. + * + * Setting IFF_DEBUG will syslog the option negotiation and state + * transitions at level kern.debug. Note: all logs consistently look + * like + * + * : + * + * with being something like "bppp0", and + * being one of "lcp", "ipcp", "cisco", "chap", "pap", etc. + */ + +#define IFF_PASSIVE IFF_LINK0 /* wait passively for connection */ +#define IFF_AUTO IFF_LINK1 /* auto-dial on output */ + +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_IP 0x0021 /* Internet Protocol */ +#define PPP_ISO 0x0023 /* ISO OSI Protocol */ +#define PPP_XNS 0x0025 /* Xerox NS Protocol */ +#define PPP_IPX 0x002b /* Novell IPX Protocol */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#define PPP_CHAP 0xc223 /* Challenge-Handshake Auth Protocol */ +#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */ + +#define CONF_REQ 1 /* PPP configure request */ +#define CONF_ACK 2 /* PPP configure acknowledge */ +#define CONF_NAK 3 /* PPP configure negative ack */ +#define CONF_REJ 4 /* PPP configure reject */ +#define TERM_REQ 5 /* PPP terminate request */ +#define TERM_ACK 6 /* PPP terminate acknowledge */ +#define CODE_REJ 7 /* PPP code reject */ +#define PROTO_REJ 8 /* PPP protocol reject */ +#define ECHO_REQ 9 /* PPP echo request */ +#define ECHO_REPLY 10 /* PPP echo reply */ +#define DISC_REQ 11 /* PPP discard request */ + +#define LCP_OPT_MRU 1 /* maximum receive unit */ +#define LCP_OPT_ASYNC_MAP 2 /* async control character map */ +#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */ +#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */ +#define LCP_OPT_MAGIC 5 /* magic number */ +#define LCP_OPT_RESERVED 6 /* reserved */ +#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */ +#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */ + +#define IPCP_OPT_ADDRESSES 1 /* both IP addresses; deprecated */ +#define IPCP_OPT_COMPRESSION 2 /* IP compression protocol (VJ) */ +#define IPCP_OPT_ADDRESS 3 /* local IP address */ + +#define PAP_REQ 1 /* PAP name/password request */ +#define PAP_ACK 2 /* PAP acknowledge */ +#define PAP_NAK 3 /* PAP fail */ + +#define CHAP_CHALLENGE 1 /* CHAP challenge request */ +#define CHAP_RESPONSE 2 /* CHAP challenge response */ +#define CHAP_SUCCESS 3 /* CHAP response ok */ +#define CHAP_FAILURE 4 /* CHAP response failed */ + +#define CHAP_MD5 5 /* hash algorithm - MD5 */ + +#define CISCO_MULTICAST 0x8f /* Cisco multicast address */ +#define CISCO_UNICAST 0x0f /* Cisco unicast address */ +#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */ +#define CISCO_ADDR_REQ 0 /* Cisco address request */ +#define CISCO_ADDR_REPLY 1 /* Cisco address reply */ +#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */ + +/* states are named and numbered according to RFC 1661 */ +#define STATE_INITIAL 0 +#define STATE_STARTING 1 +#define STATE_CLOSED 2 +#define STATE_STOPPED 3 +#define STATE_CLOSING 4 +#define STATE_STOPPING 5 +#define STATE_REQ_SENT 6 +#define STATE_ACK_RCVD 7 +#define STATE_ACK_SENT 8 +#define STATE_OPENED 9 + +struct ppp_header { + u_char address; + u_char control; + u_short protocol; +}; +#define PPP_HEADER_LEN sizeof (struct ppp_header) + +struct lcp_header { + u_char type; + u_char ident; + u_short len; +}; +#define LCP_HEADER_LEN sizeof (struct lcp_header) + +struct cisco_packet { + u_long type; + u_long par1; + u_long par2; + u_short rel; + u_short time0; + u_short time1; +}; +#define CISCO_PACKET_LEN 18 + +/* + * We follow the spelling and capitalization of RFC 1661 here, to make + * it easier comparing with the standard. Please refer to this RFC in + * case you can't make sense out of these abbreviation; it will also + * explain the semantics related to the various events and actions. + */ +struct cp { + u_short proto; /* PPP control protocol number */ + u_char protoidx; /* index into state table in struct sppp */ + u_char flags; +#define CP_LCP 0x01 /* this is the LCP */ +#define CP_AUTH 0x02 /* this is an authentication protocol */ +#define CP_NCP 0x04 /* this is a NCP */ +#define CP_QUAL 0x08 /* this is a quality reporting protocol */ + const char *name; /* name of this control protocol */ + /* event handlers */ + void (*Up)(struct sppp *sp); + void (*Down)(struct sppp *sp); + void (*Open)(struct sppp *sp); + void (*Close)(struct sppp *sp); + void (*TO)(void *sp); + int (*RCR)(struct sppp *sp, struct lcp_header *h, int len); + void (*RCN_rej)(struct sppp *sp, struct lcp_header *h, int len); + void (*RCN_nak)(struct sppp *sp, struct lcp_header *h, int len); + /* actions */ + void (*tlu)(struct sppp *sp); + void (*tld)(struct sppp *sp); + void (*tls)(struct sppp *sp); + void (*tlf)(struct sppp *sp); + void (*scr)(struct sppp *sp); +}; + +static struct sppp *spppq; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static struct callout_handle keepalive_ch; +#endif + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#define SPP_FMT "%s%d: " +#define SPP_ARGS(ifp) (ifp)->if_name, (ifp)->if_unit +#else +#define SPP_FMT "%s: " +#define SPP_ARGS(ifp) (ifp)->if_xname +#endif + +/* + * The following disgusting hack gets around the problem that IP TOS + * can't be set yet. We want to put "interactive" traffic on a high + * priority queue. To decide if traffic is interactive, we check that + * a) it is TCP and b) one of its ports is telnet, rlogin or ftp control. + * + * XXX is this really still necessary? - joerg - + */ +static u_short interactive_ports[8] = { + 0, 513, 0, 0, + 0, 21, 0, 23, +}; +#define INTERACTIVE(p) (interactive_ports[(p) & 7] == (p)) + +/* almost every function needs these */ +#define STDDCL \ + struct ifnet *ifp = &sp->pp_if; \ + int debug = ifp->if_flags & IFF_DEBUG + +static int sppp_output(struct ifnet *ifp, struct mbuf *m, + struct sockaddr *dst, struct rtentry *rt); + +static void sppp_cisco_send(struct sppp *sp, int type, long par1, long par2); +static void sppp_cisco_input(struct sppp *sp, struct mbuf *m); + +static void sppp_cp_input(const struct cp *cp, struct sppp *sp, + struct mbuf *m); +static void sppp_cp_send(struct sppp *sp, u_short proto, u_char type, + u_char ident, u_short len, void *data); +/* static void sppp_cp_timeout(void *arg); */ +static void sppp_cp_change_state(const struct cp *cp, struct sppp *sp, + int newstate); +static void sppp_auth_send(const struct cp *cp, + struct sppp *sp, unsigned int type, unsigned int id, + ...); + +static void sppp_up_event(const struct cp *cp, struct sppp *sp); +static void sppp_down_event(const struct cp *cp, struct sppp *sp); +static void sppp_open_event(const struct cp *cp, struct sppp *sp); +static void sppp_close_event(const struct cp *cp, struct sppp *sp); +static void sppp_to_event(const struct cp *cp, struct sppp *sp); + +static void sppp_null(struct sppp *sp); + +static void sppp_lcp_init(struct sppp *sp); +static void sppp_lcp_up(struct sppp *sp); +static void sppp_lcp_down(struct sppp *sp); +static void sppp_lcp_open(struct sppp *sp); +static void sppp_lcp_close(struct sppp *sp); +static void sppp_lcp_TO(void *sp); +static int sppp_lcp_RCR(struct sppp *sp, struct lcp_header *h, int len); +static void sppp_lcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len); +static void sppp_lcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len); +static void sppp_lcp_tlu(struct sppp *sp); +static void sppp_lcp_tld(struct sppp *sp); +static void sppp_lcp_tls(struct sppp *sp); +static void sppp_lcp_tlf(struct sppp *sp); +static void sppp_lcp_scr(struct sppp *sp); +static void sppp_lcp_check_and_close(struct sppp *sp); +static int sppp_ncp_check(struct sppp *sp); + +static void sppp_ipcp_init(struct sppp *sp); +static void sppp_ipcp_up(struct sppp *sp); +static void sppp_ipcp_down(struct sppp *sp); +static void sppp_ipcp_open(struct sppp *sp); +static void sppp_ipcp_close(struct sppp *sp); +static void sppp_ipcp_TO(void *sp); +static int sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len); +static void sppp_ipcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len); +static void sppp_ipcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len); +static void sppp_ipcp_tlu(struct sppp *sp); +static void sppp_ipcp_tld(struct sppp *sp); +static void sppp_ipcp_tls(struct sppp *sp); +static void sppp_ipcp_tlf(struct sppp *sp); +static void sppp_ipcp_scr(struct sppp *sp); + +static void sppp_pap_input(struct sppp *sp, struct mbuf *m); +static void sppp_pap_init(struct sppp *sp); +static void sppp_pap_open(struct sppp *sp); +static void sppp_pap_close(struct sppp *sp); +static void sppp_pap_TO(void *sp); +static void sppp_pap_my_TO(void *sp); +static void sppp_pap_tlu(struct sppp *sp); +static void sppp_pap_tld(struct sppp *sp); +static void sppp_pap_scr(struct sppp *sp); + +static void sppp_chap_input(struct sppp *sp, struct mbuf *m); +static void sppp_chap_init(struct sppp *sp); +static void sppp_chap_open(struct sppp *sp); +static void sppp_chap_close(struct sppp *sp); +static void sppp_chap_TO(void *sp); +static void sppp_chap_tlu(struct sppp *sp); +static void sppp_chap_tld(struct sppp *sp); +static void sppp_chap_scr(struct sppp *sp); + +static const char *sppp_auth_type_name(u_short proto, u_char type); +static const char *sppp_cp_type_name(u_char type); +static const char *sppp_dotted_quad(u_long addr); +static const char *sppp_ipcp_opt_name(u_char opt); +static const char *sppp_lcp_opt_name(u_char opt); +static const char *sppp_phase_name(enum ppp_phase phase); +static const char *sppp_proto_name(u_short proto); +static const char *sppp_state_name(int state); +static int sppp_params(struct sppp *sp, u_long cmd, void *data); +static int sppp_strnlen(u_char *p, int max); +static void sppp_get_ip_addrs(struct sppp *sp, u_long *src, u_long *dst, + u_long *srcmask); +static void sppp_keepalive(void *dummy); +static void sppp_phase_network(struct sppp *sp); +static void sppp_print_bytes(const u_char *p, u_short len); +static void sppp_print_string(const char *p, u_short len); +static void sppp_qflush(struct ifqueue *ifq); +static void sppp_set_ip_addr(struct sppp *sp, u_long src); + +/* our control protocol descriptors */ +static const struct cp lcp = { + PPP_LCP, IDX_LCP, CP_LCP, "lcp", + sppp_lcp_up, sppp_lcp_down, sppp_lcp_open, sppp_lcp_close, + sppp_lcp_TO, sppp_lcp_RCR, sppp_lcp_RCN_rej, sppp_lcp_RCN_nak, + sppp_lcp_tlu, sppp_lcp_tld, sppp_lcp_tls, sppp_lcp_tlf, + sppp_lcp_scr +}; + +static const struct cp ipcp = { + PPP_IPCP, IDX_IPCP, CP_NCP, "ipcp", + sppp_ipcp_up, sppp_ipcp_down, sppp_ipcp_open, sppp_ipcp_close, + sppp_ipcp_TO, sppp_ipcp_RCR, sppp_ipcp_RCN_rej, sppp_ipcp_RCN_nak, + sppp_ipcp_tlu, sppp_ipcp_tld, sppp_ipcp_tls, sppp_ipcp_tlf, + sppp_ipcp_scr +}; + +static const struct cp pap = { + PPP_PAP, IDX_PAP, CP_AUTH, "pap", + sppp_null, sppp_null, sppp_pap_open, sppp_pap_close, + sppp_pap_TO, 0, 0, 0, + sppp_pap_tlu, sppp_pap_tld, sppp_null, sppp_null, + sppp_pap_scr +}; + +static const struct cp chap = { + PPP_CHAP, IDX_CHAP, CP_AUTH, "chap", + sppp_null, sppp_null, sppp_chap_open, sppp_chap_close, + sppp_chap_TO, 0, 0, 0, + sppp_chap_tlu, sppp_chap_tld, sppp_null, sppp_null, + sppp_chap_scr +}; + +static const struct cp *cps[IDX_COUNT] = { + &lcp, /* IDX_LCP */ + &ipcp, /* IDX_IPCP */ + &pap, /* IDX_PAP */ + &chap, /* IDX_CHAP */ +}; + + + /* + * Exported functions, comprising our interface to the lower layer. + */ + +/* + * Process the received packet. + */ +void +sppp_input(struct ifnet *ifp, struct mbuf *m) +{ + struct ppp_header *h; + struct ifqueue *inq = 0; + int s; + struct sppp *sp = (struct sppp *)ifp; + int debug = ifp->if_flags & IFF_DEBUG; + + if (ifp->if_flags & IFF_UP) + /* Count received bytes, add FCS and one flag */ + ifp->if_ibytes += m->m_pkthdr.len + 3; + + if (m->m_pkthdr.len <= PPP_HEADER_LEN) { + /* Too small packet, drop it. */ + if (debug) + log(LOG_DEBUG, + SPP_FMT "input packet is too small, %d bytes\n", + SPP_ARGS(ifp), m->m_pkthdr.len); + drop: + ++ifp->if_ierrors; + ++ifp->if_iqdrops; + m_freem (m); + return; + } + + /* Get PPP header. */ + h = mtod (m, struct ppp_header*); + m_adj (m, PPP_HEADER_LEN); + + switch (h->address) { + case PPP_ALLSTATIONS: + if (h->control != PPP_UI) + goto invalid; + if (sp->pp_flags & PP_CISCO) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "PPP packet in Cisco mode " + "\n", + SPP_ARGS(ifp), + h->address, h->control, ntohs(h->protocol)); + goto drop; + } + switch (ntohs (h->protocol)) { + default: + if (sp->state[IDX_LCP] == STATE_OPENED) + sppp_cp_send (sp, PPP_LCP, PROTO_REJ, + ++sp->pp_seq, m->m_pkthdr.len + 2, + &h->protocol); + if (debug) + log(LOG_DEBUG, + SPP_FMT "invalid input protocol " + "\n", + SPP_ARGS(ifp), + h->address, h->control, ntohs(h->protocol)); + ++ifp->if_noproto; + goto drop; + case PPP_LCP: + sppp_cp_input(&lcp, sp, m); + m_freem (m); + return; + case PPP_PAP: + if (sp->pp_phase >= PHASE_AUTHENTICATE) + sppp_pap_input(sp, m); + m_freem (m); + return; + case PPP_CHAP: + if (sp->pp_phase >= PHASE_AUTHENTICATE) + sppp_chap_input(sp, m); + m_freem (m); + return; +#if INET + case PPP_IPCP: + if (sp->pp_phase == PHASE_NETWORK) + sppp_cp_input(&ipcp, sp, m); + m_freem (m); + return; + case PPP_IP: + if (sp->state[IDX_IPCP] == STATE_OPENED) { + schednetisr (NETISR_IP); + inq = &ipintrq; + } + break; +#endif +#if IPX + case PPP_IPX: + /* IPX IPXCP not implemented yet */ + if (sp->pp_phase == PHASE_NETWORK) { + schednetisr (NETISR_IPX); + inq = &ipxintrq; + } + break; +#endif +#if NS + case PPP_XNS: + /* XNS IDPCP not implemented yet */ + if (sp->pp_phase == PHASE_NETWORK) { + schednetisr (NETISR_NS); + inq = &nsintrq; + } + break; +#endif +#if ISO + case PPP_ISO: + /* OSI NLCP not implemented yet */ + if (sp->pp_phase == PHASE_NETWORK) { + schednetisr (NETISR_ISO); + inq = &clnlintrq; + } + break; +#endif + } + break; + case CISCO_MULTICAST: + case CISCO_UNICAST: + /* Don't check the control field here (RFC 1547). */ + if (! (sp->pp_flags & PP_CISCO)) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "Cisco packet in PPP mode " + "\n", + SPP_ARGS(ifp), + h->address, h->control, ntohs(h->protocol)); + goto drop; + } + switch (ntohs (h->protocol)) { + default: + ++ifp->if_noproto; + goto invalid; + case CISCO_KEEPALIVE: + sppp_cisco_input ((struct sppp*) ifp, m); + m_freem (m); + return; +#if INET + case ETHERTYPE_IP: + schednetisr (NETISR_IP); + inq = &ipintrq; + break; +#endif +#if IPX + case ETHERTYPE_IPX: + schednetisr (NETISR_IPX); + inq = &ipxintrq; + break; +#endif +#if NS + case ETHERTYPE_NS: + schednetisr (NETISR_NS); + inq = &nsintrq; + break; +#endif + } + break; + default: /* Invalid PPP packet. */ + invalid: + if (debug) + log(LOG_DEBUG, + SPP_FMT "invalid input packet " + "\n", + SPP_ARGS(ifp), + h->address, h->control, ntohs(h->protocol)); + goto drop; + } + + if (! (ifp->if_flags & IFF_UP) || ! inq) + goto drop; + + /* Check queue. */ + s = splimp(); + if (IF_QFULL (inq)) { + /* Queue overflow. */ + IF_DROP(inq); + splx(s); + if (debug) + log(LOG_DEBUG, SPP_FMT "protocol queue overflow\n", + SPP_ARGS(ifp)); + goto drop; + } + IF_ENQUEUE(inq, m); + splx(s); +} + +/* + * Enqueue transmit packet. + */ +static int +sppp_output(struct ifnet *ifp, struct mbuf *m, + struct sockaddr *dst, struct rtentry *rt) +{ + struct sppp *sp = (struct sppp*) ifp; + struct ppp_header *h; + struct ifqueue *ifq; + int s, rv = 0; + int debug = ifp->if_flags & IFF_DEBUG; + + s = splimp(); + + if ((ifp->if_flags & IFF_UP) == 0 || + (ifp->if_flags & (IFF_RUNNING | IFF_AUTO)) == 0) { + m_freem (m); + splx (s); + return (ENETDOWN); + } + + if ((ifp->if_flags & (IFF_RUNNING | IFF_AUTO)) == IFF_AUTO) { + /* + * Interface is not yet running, but auto-dial. Need + * to start LCP for it. + */ + ifp->if_flags |= IFF_RUNNING; + splx(s); + lcp.Open(sp); + s = splimp(); + } + + ifq = &ifp->if_snd; +#if INET + if (dst->sa_family == AF_INET) { + /* XXX Check mbuf length here? */ + struct ip *ip = mtod (m, struct ip*); + struct tcphdr *tcp = (struct tcphdr*) ((long*)ip + ip->ip_hl); + + /* + * When using dynamic local IP address assignment by using + * 0.0.0.0 as a local address, the first TCP session will + * not connect because the local TCP checksum is computed + * using 0.0.0.0 which will later become our real IP address + * so the TCP checksum computed at the remote end will + * become invalid. So we + * - don't let packets with src ip addr 0 thru + * - we flag TCP packets with src ip 0 as an error + */ + + if(ip->ip_src.s_addr == INADDR_ANY) /* -hm */ + { + m_freem(m); + splx(s); + if(ip->ip_p == IPPROTO_TCP) + return(EADDRNOTAVAIL); + else + return(0); + } + + /* + * Put low delay, telnet, rlogin and ftp control packets + * in front of the queue. + */ + if (IF_QFULL (&sp->pp_fastq)) + ; + else if (ip->ip_tos & IPTOS_LOWDELAY) + ifq = &sp->pp_fastq; + else if (m->m_len < sizeof *ip + sizeof *tcp) + ; + else if (ip->ip_p != IPPROTO_TCP) + ; + else if (INTERACTIVE (ntohs (tcp->th_sport))) + ifq = &sp->pp_fastq; + else if (INTERACTIVE (ntohs (tcp->th_dport))) + ifq = &sp->pp_fastq; + } +#endif + + /* + * Prepend general data packet PPP header. For now, IP only. + */ + M_PREPEND (m, PPP_HEADER_LEN, M_DONTWAIT); + if (! m) { + if (debug) + log(LOG_DEBUG, SPP_FMT "no memory for transmit header\n", + SPP_ARGS(ifp)); + ++ifp->if_oerrors; + splx (s); + return (ENOBUFS); + } + /* + * May want to check size of packet + * (albeit due to the implementation it's always enough) + */ + h = mtod (m, struct ppp_header*); + if (sp->pp_flags & PP_CISCO) { + h->address = CISCO_UNICAST; /* unicast address */ + h->control = 0; + } else { + h->address = PPP_ALLSTATIONS; /* broadcast address */ + h->control = PPP_UI; /* Unnumbered Info */ + } + + switch (dst->sa_family) { +#if INET + case AF_INET: /* Internet Protocol */ + if (sp->pp_flags & PP_CISCO) + h->protocol = htons (ETHERTYPE_IP); + else { + /* + * Don't choke with an ENETDOWN early. It's + * possible that we just started dialing out, + * so don't drop the packet immediately. If + * we notice that we run out of buffer space + * below, we will however remember that we are + * not ready to carry IP packets, and return + * ENETDOWN, as opposed to ENOBUFS. + */ + h->protocol = htons(PPP_IP); + if (sp->state[IDX_IPCP] != STATE_OPENED) + rv = ENETDOWN; + } + break; +#endif +#if NS + case AF_NS: /* Xerox NS Protocol */ + h->protocol = htons ((sp->pp_flags & PP_CISCO) ? + ETHERTYPE_NS : PPP_XNS); + break; +#endif +#if IPX + case AF_IPX: /* Novell IPX Protocol */ + h->protocol = htons ((sp->pp_flags & PP_CISCO) ? + ETHERTYPE_IPX : PPP_IPX); + break; +#endif +#if ISO + case AF_ISO: /* ISO OSI Protocol */ + if (sp->pp_flags & PP_CISCO) + goto nosupport; + h->protocol = htons (PPP_ISO); + break; +nosupport: +#endif + default: + m_freem (m); + ++ifp->if_oerrors; + splx (s); + return (EAFNOSUPPORT); + } + + /* + * Queue message on interface, and start output if interface + * not yet active. + */ + if (IF_QFULL (ifq)) { + IF_DROP (&ifp->if_snd); + m_freem (m); + ++ifp->if_oerrors; + splx (s); + return (rv? rv: ENOBUFS); + } + IF_ENQUEUE (ifq, m); + if (! (ifp->if_flags & IFF_OACTIVE)) + (*ifp->if_start) (ifp); + + /* + * Count output packets and bytes. + * The packet length includes header, FCS and 1 flag, + * according to RFC 1333. + */ + ifp->if_obytes += m->m_pkthdr.len + 3; + splx (s); + return (0); +} + +void +sppp_attach(struct ifnet *ifp) +{ + struct sppp *sp = (struct sppp*) ifp; + + /* Initialize keepalive handler. */ + if (! spppq) + TIMEOUT(sppp_keepalive, 0, hz * 10, keepalive_ch); + + /* Insert new entry into the keepalive list. */ + sp->pp_next = spppq; + spppq = sp; + + sp->pp_if.if_mtu = PP_MTU; + sp->pp_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST; + sp->pp_if.if_type = IFT_PPP; + sp->pp_if.if_output = sppp_output; +#if 0 + sp->pp_flags = PP_KEEPALIVE; +#endif + sp->pp_fastq.ifq_maxlen = 32; + sp->pp_cpq.ifq_maxlen = 20; + sp->pp_loopcnt = 0; + sp->pp_alivecnt = 0; + sp->pp_seq = 0; + sp->pp_rseq = 0; + sp->pp_phase = PHASE_DEAD; + sp->pp_up = lcp.Up; + sp->pp_down = lcp.Down; + + sppp_lcp_init(sp); + sppp_ipcp_init(sp); + sppp_pap_init(sp); + sppp_chap_init(sp); +} + +void +sppp_detach(struct ifnet *ifp) +{ + struct sppp **q, *p, *sp = (struct sppp*) ifp; + int i; + + /* Remove the entry from the keepalive list. */ + for (q = &spppq; (p = *q); q = &p->pp_next) + if (p == sp) { + *q = p->pp_next; + break; + } + + /* Stop keepalive handler. */ + if (! spppq) + UNTIMEOUT(sppp_keepalive, 0, keepalive_ch); + + for (i = 0; i < IDX_COUNT; i++) + UNTIMEOUT((cps[i])->TO, (void *)sp, sp->ch[i]); + UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch); +} + +/* + * Flush the interface output queue. + */ +void +sppp_flush(struct ifnet *ifp) +{ + struct sppp *sp = (struct sppp*) ifp; + + sppp_qflush (&sp->pp_if.if_snd); + sppp_qflush (&sp->pp_fastq); + sppp_qflush (&sp->pp_cpq); +} + +/* + * Check if the output queue is empty. + */ +int +sppp_isempty(struct ifnet *ifp) +{ + struct sppp *sp = (struct sppp*) ifp; + int empty, s; + + s = splimp(); + empty = !sp->pp_fastq.ifq_head && !sp->pp_cpq.ifq_head && + !sp->pp_if.if_snd.ifq_head; + splx(s); + return (empty); +} + +/* + * Get next packet to send. + */ +struct mbuf * +sppp_dequeue(struct ifnet *ifp) +{ + struct sppp *sp = (struct sppp*) ifp; + struct mbuf *m; + int s; + + s = splimp(); + /* + * Process only the control protocol queue until we have at + * least one NCP open. + * + * Do always serve all three queues in Cisco mode. + */ + IF_DEQUEUE(&sp->pp_cpq, m); + if (m == NULL && + (sppp_ncp_check(sp) || (sp->pp_flags & PP_CISCO) != 0)) { + IF_DEQUEUE(&sp->pp_fastq, m); + if (m == NULL) + IF_DEQUEUE (&sp->pp_if.if_snd, m); + } + splx(s); + return m; +} + +/* + * Pick the next packet, do not remove it from the queue. + */ +struct mbuf * +sppp_pick(struct ifnet *ifp) +{ + struct sppp *sp = (struct sppp*)ifp; + struct mbuf *m; + int s; + + s= splimp (); + + m = sp->pp_cpq.ifq_head; + if (m == NULL && + (sp->pp_phase == PHASE_NETWORK || + (sp->pp_flags & PP_CISCO) != 0)) + if ((m = sp->pp_fastq.ifq_head) == NULL) + m = sp->pp_if.if_snd.ifq_head; + splx (s); + return (m); +} + +/* + * Process an ioctl request. Called on low priority level. + */ +int +sppp_ioctl(struct ifnet *ifp, IOCTL_CMD_T cmd, void *data) +{ + struct ifreq *ifr = (struct ifreq*) data; + struct sppp *sp = (struct sppp*) ifp; + int s, rv, going_up, going_down, newmode; + + s = splimp(); + rv = 0; + switch (cmd) { + case SIOCAIFADDR: + case SIOCSIFDSTADDR: + break; + + case SIOCSIFADDR: + if_up(ifp); + /* fall through... */ + + case SIOCSIFFLAGS: + going_up = ifp->if_flags & IFF_UP && + (ifp->if_flags & IFF_RUNNING) == 0; + going_down = (ifp->if_flags & IFF_UP) == 0 && + ifp->if_flags & IFF_RUNNING; + newmode = ifp->if_flags & (IFF_AUTO | IFF_PASSIVE); + if (newmode == (IFF_AUTO | IFF_PASSIVE)) { + /* sanity */ + newmode = IFF_PASSIVE; + ifp->if_flags &= ~IFF_AUTO; + } + + if (going_up || going_down) + lcp.Close(sp); + if (going_up && newmode == 0) { + /* neither auto-dial nor passive */ + ifp->if_flags |= IFF_RUNNING; + if (!(sp->pp_flags & PP_CISCO)) + lcp.Open(sp); + } else if (going_down) { + sppp_flush(ifp); + ifp->if_flags &= ~IFF_RUNNING; + } + + break; + +#ifdef SIOCSIFMTU +#ifndef ifr_mtu +#define ifr_mtu ifr_metric +#endif + case SIOCSIFMTU: + if (ifr->ifr_mtu < 128 || ifr->ifr_mtu > sp->lcp.their_mru) + return (EINVAL); + ifp->if_mtu = ifr->ifr_mtu; + break; +#endif +#ifdef SLIOCSETMTU + case SLIOCSETMTU: + if (*(short*)data < 128 || *(short*)data > sp->lcp.their_mru) + return (EINVAL); + ifp->if_mtu = *(short*)data; + break; +#endif +#ifdef SIOCGIFMTU + case SIOCGIFMTU: + ifr->ifr_mtu = ifp->if_mtu; + break; +#endif +#ifdef SLIOCGETMTU + case SLIOCGETMTU: + *(short*)data = ifp->if_mtu; + break; +#endif + case SIOCADDMULTI: + case SIOCDELMULTI: + break; + + case SIOCGIFGENERIC: + case SIOCSIFGENERIC: + rv = sppp_params(sp, cmd, data); + break; + + default: + rv = ENOTTY; + } + splx(s); + return rv; +} + + + /* + * Cisco framing implementation. + */ + +/* + * Handle incoming Cisco keepalive protocol packets. + */ +static void +sppp_cisco_input(struct sppp *sp, struct mbuf *m) +{ + STDDCL; + struct cisco_packet *h; + u_long me, mymask; + + if (m->m_pkthdr.len < CISCO_PACKET_LEN) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "cisco invalid packet length: %d bytes\n", + SPP_ARGS(ifp), m->m_pkthdr.len); + return; + } + h = mtod (m, struct cisco_packet*); + if (debug) + log(LOG_DEBUG, + SPP_FMT "cisco input: %d bytes " + "<0x%lx 0x%lx 0x%lx 0x%x 0x%x-0x%x>\n", + SPP_ARGS(ifp), m->m_pkthdr.len, + (u_long)ntohl (h->type), (u_long)h->par1, (u_long)h->par2, (u_int)h->rel, + (u_int)h->time0, (u_int)h->time1); + switch (ntohl (h->type)) { + default: + if (debug) + addlog(SPP_FMT "cisco unknown packet type: 0x%lx\n", + SPP_ARGS(ifp), (u_long)ntohl (h->type)); + break; + case CISCO_ADDR_REPLY: + /* Reply on address request, ignore */ + break; + case CISCO_KEEPALIVE_REQ: + sp->pp_alivecnt = 0; + sp->pp_rseq = ntohl (h->par1); + if (sp->pp_seq == sp->pp_rseq) { + /* Local and remote sequence numbers are equal. + * Probably, the line is in loopback mode. */ + if (sp->pp_loopcnt >= MAXALIVECNT) { + printf (SPP_FMT "loopback\n", + SPP_ARGS(ifp)); + sp->pp_loopcnt = 0; + if (ifp->if_flags & IFF_UP) { + if_down (ifp); + sppp_qflush (&sp->pp_cpq); + } + } + ++sp->pp_loopcnt; + + /* Generate new local sequence number */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + sp->pp_seq = random(); +#else + sp->pp_seq ^= time.tv_sec ^ time.tv_usec; +#endif + break; + } + sp->pp_loopcnt = 0; + if (! (ifp->if_flags & IFF_UP) && + (ifp->if_flags & IFF_RUNNING)) { + if_up(ifp); + printf (SPP_FMT "up\n", SPP_ARGS(ifp)); + } + break; + case CISCO_ADDR_REQ: + sppp_get_ip_addrs(sp, &me, 0, &mymask); + if (me != 0L) + sppp_cisco_send(sp, CISCO_ADDR_REPLY, me, mymask); + break; + } +} + +/* + * Send Cisco keepalive packet. + */ +static void +sppp_cisco_send(struct sppp *sp, int type, long par1, long par2) +{ + STDDCL; + struct ppp_header *h; + struct cisco_packet *ch; + struct mbuf *m; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + struct timeval tv; +#else + u_long t = (time.tv_sec - boottime.tv_sec) * 1000; +#endif + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + getmicrouptime(&tv); +#endif + + MGETHDR (m, M_DONTWAIT, MT_DATA); + if (! m) + return; + m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + CISCO_PACKET_LEN; + m->m_pkthdr.rcvif = 0; + + h = mtod (m, struct ppp_header*); + h->address = CISCO_MULTICAST; + h->control = 0; + h->protocol = htons (CISCO_KEEPALIVE); + + ch = (struct cisco_packet*) (h + 1); + ch->type = htonl (type); + ch->par1 = htonl (par1); + ch->par2 = htonl (par2); + ch->rel = -1; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + ch->time0 = htons ((u_short) (tv.tv_sec >> 16)); + ch->time1 = htons ((u_short) tv.tv_sec); +#else + ch->time0 = htons ((u_short) (t >> 16)); + ch->time1 = htons ((u_short) t); +#endif + + if (debug) + log(LOG_DEBUG, + SPP_FMT "cisco output: <0x%lx 0x%lx 0x%lx 0x%x 0x%x-0x%x>\n", + SPP_ARGS(ifp), (u_long)ntohl (ch->type), (u_long)ch->par1, + (u_long)ch->par2, (u_int)ch->rel, (u_int)ch->time0, (u_int)ch->time1); + + if (IF_QFULL (&sp->pp_cpq)) { + IF_DROP (&sp->pp_fastq); + IF_DROP (&ifp->if_snd); + m_freem (m); + } else + IF_ENQUEUE (&sp->pp_cpq, m); + if (! (ifp->if_flags & IFF_OACTIVE)) + (*ifp->if_start) (ifp); + ifp->if_obytes += m->m_pkthdr.len + 3; +} + + /* + * PPP protocol implementation. + */ + +/* + * Send PPP control protocol packet. + */ +static void +sppp_cp_send(struct sppp *sp, u_short proto, u_char type, + u_char ident, u_short len, void *data) +{ + STDDCL; + struct ppp_header *h; + struct lcp_header *lh; + struct mbuf *m; + + if (len > MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN) + len = MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN; + MGETHDR (m, M_DONTWAIT, MT_DATA); + if (! m) + return; + m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + LCP_HEADER_LEN + len; + m->m_pkthdr.rcvif = 0; + + h = mtod (m, struct ppp_header*); + h->address = PPP_ALLSTATIONS; /* broadcast address */ + h->control = PPP_UI; /* Unnumbered Info */ + h->protocol = htons (proto); /* Link Control Protocol */ + + lh = (struct lcp_header*) (h + 1); + lh->type = type; + lh->ident = ident; + lh->len = htons (LCP_HEADER_LEN + len); + if (len) + bcopy (data, lh+1, len); + + if (debug) { + log(LOG_DEBUG, SPP_FMT "%s output <%s id=0x%x len=%d", + SPP_ARGS(ifp), + sppp_proto_name(proto), + sppp_cp_type_name (lh->type), lh->ident, + ntohs (lh->len)); + if (len) + sppp_print_bytes ((u_char*) (lh+1), len); + addlog(">\n"); + } + if (IF_QFULL (&sp->pp_cpq)) { + IF_DROP (&sp->pp_fastq); + IF_DROP (&ifp->if_snd); + m_freem (m); + ++ifp->if_oerrors; + } else + IF_ENQUEUE (&sp->pp_cpq, m); + if (! (ifp->if_flags & IFF_OACTIVE)) + (*ifp->if_start) (ifp); + ifp->if_obytes += m->m_pkthdr.len + 3; +} + +/* + * Handle incoming PPP control protocol packets. + */ +static void +sppp_cp_input(const struct cp *cp, struct sppp *sp, struct mbuf *m) +{ + STDDCL; + struct lcp_header *h; + int len = m->m_pkthdr.len; + int rv; + u_char *p; + + if (len < 4) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "%s invalid packet length: %d bytes\n", + SPP_ARGS(ifp), cp->name, len); + return; + } + h = mtod (m, struct lcp_header*); + if (debug) { + log(LOG_DEBUG, + SPP_FMT "%s input(%s): <%s id=0x%x len=%d", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx]), + sppp_cp_type_name (h->type), h->ident, ntohs (h->len)); + if (len > 4) + sppp_print_bytes ((u_char*) (h+1), len-4); + addlog(">\n"); + } + if (len > ntohs (h->len)) + len = ntohs (h->len); + p = (u_char *)(h + 1); + switch (h->type) { + case CONF_REQ: + if (len < 4) { + if (debug) + addlog(SPP_FMT "%s invalid conf-req length %d\n", + SPP_ARGS(ifp), cp->name, + len); + ++ifp->if_ierrors; + break; + } + /* handle states where RCR doesn't get a SCA/SCN */ + switch (sp->state[cp->protoidx]) { + case STATE_CLOSING: + case STATE_STOPPING: + return; + case STATE_CLOSED: + sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, + 0, 0); + return; + } + rv = (cp->RCR)(sp, h, len); + switch (sp->state[cp->protoidx]) { + case STATE_OPENED: + (cp->tld)(sp); + (cp->scr)(sp); + /* fall through... */ + case STATE_ACK_SENT: + case STATE_REQ_SENT: + sppp_cp_change_state(cp, sp, rv? + STATE_ACK_SENT: STATE_REQ_SENT); + break; + case STATE_STOPPED: + sp->rst_counter[cp->protoidx] = sp->lcp.max_configure; + (cp->scr)(sp); + sppp_cp_change_state(cp, sp, rv? + STATE_ACK_SENT: STATE_REQ_SENT); + break; + case STATE_ACK_RCVD: + if (rv) { + sppp_cp_change_state(cp, sp, STATE_OPENED); + if (debug) + log(LOG_DEBUG, SPP_FMT "%s tlu\n", + SPP_ARGS(ifp), + cp->name); + (cp->tlu)(sp); + } else + sppp_cp_change_state(cp, sp, STATE_ACK_RCVD); + break; + default: + printf(SPP_FMT "%s illegal %s in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_cp_type_name(h->type), + sppp_state_name(sp->state[cp->protoidx])); + ++ifp->if_ierrors; + } + break; + case CONF_ACK: + if (h->ident != sp->confid[cp->protoidx]) { + if (debug) + addlog(SPP_FMT "%s id mismatch 0x%x != 0x%x\n", + SPP_ARGS(ifp), cp->name, + h->ident, sp->confid[cp->protoidx]); + ++ifp->if_ierrors; + break; + } + switch (sp->state[cp->protoidx]) { + case STATE_CLOSED: + case STATE_STOPPED: + sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0); + break; + case STATE_CLOSING: + case STATE_STOPPING: + break; + case STATE_REQ_SENT: + sp->rst_counter[cp->protoidx] = sp->lcp.max_configure; + sppp_cp_change_state(cp, sp, STATE_ACK_RCVD); + break; + case STATE_OPENED: + (cp->tld)(sp); + /* fall through */ + case STATE_ACK_RCVD: + (cp->scr)(sp); + sppp_cp_change_state(cp, sp, STATE_REQ_SENT); + break; + case STATE_ACK_SENT: + sp->rst_counter[cp->protoidx] = sp->lcp.max_configure; + sppp_cp_change_state(cp, sp, STATE_OPENED); + if (debug) + log(LOG_DEBUG, SPP_FMT "%s tlu\n", + SPP_ARGS(ifp), cp->name); + (cp->tlu)(sp); + break; + default: + printf(SPP_FMT "%s illegal %s in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_cp_type_name(h->type), + sppp_state_name(sp->state[cp->protoidx])); + ++ifp->if_ierrors; + } + break; + case CONF_NAK: + case CONF_REJ: + if (h->ident != sp->confid[cp->protoidx]) { + if (debug) + addlog(SPP_FMT "%s id mismatch 0x%x != 0x%x\n", + SPP_ARGS(ifp), cp->name, + h->ident, sp->confid[cp->protoidx]); + ++ifp->if_ierrors; + break; + } + if (h->type == CONF_NAK) + (cp->RCN_nak)(sp, h, len); + else /* CONF_REJ */ + (cp->RCN_rej)(sp, h, len); + + switch (sp->state[cp->protoidx]) { + case STATE_CLOSED: + case STATE_STOPPED: + sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0); + break; + case STATE_REQ_SENT: + case STATE_ACK_SENT: + sp->rst_counter[cp->protoidx] = sp->lcp.max_configure; + (cp->scr)(sp); + break; + case STATE_OPENED: + (cp->tld)(sp); + /* fall through */ + case STATE_ACK_RCVD: + sppp_cp_change_state(cp, sp, STATE_ACK_SENT); + (cp->scr)(sp); + break; + case STATE_CLOSING: + case STATE_STOPPING: + break; + default: + printf(SPP_FMT "%s illegal %s in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_cp_type_name(h->type), + sppp_state_name(sp->state[cp->protoidx])); + ++ifp->if_ierrors; + } + break; + + case TERM_REQ: + switch (sp->state[cp->protoidx]) { + case STATE_ACK_RCVD: + case STATE_ACK_SENT: + sppp_cp_change_state(cp, sp, STATE_REQ_SENT); + /* fall through */ + case STATE_CLOSED: + case STATE_STOPPED: + case STATE_CLOSING: + case STATE_STOPPING: + case STATE_REQ_SENT: + sta: + /* Send Terminate-Ack packet. */ + if (debug) + log(LOG_DEBUG, SPP_FMT "%s send terminate-ack\n", + SPP_ARGS(ifp), cp->name); + sppp_cp_send(sp, cp->proto, TERM_ACK, h->ident, 0, 0); + break; + case STATE_OPENED: + (cp->tld)(sp); + sp->rst_counter[cp->protoidx] = 0; + sppp_cp_change_state(cp, sp, STATE_STOPPING); + goto sta; + break; + default: + printf(SPP_FMT "%s illegal %s in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_cp_type_name(h->type), + sppp_state_name(sp->state[cp->protoidx])); + ++ifp->if_ierrors; + } + break; + case TERM_ACK: + switch (sp->state[cp->protoidx]) { + case STATE_CLOSED: + case STATE_STOPPED: + case STATE_REQ_SENT: + case STATE_ACK_SENT: + break; + case STATE_CLOSING: + sppp_cp_change_state(cp, sp, STATE_CLOSED); + (cp->tlf)(sp); + break; + case STATE_STOPPING: + sppp_cp_change_state(cp, sp, STATE_STOPPED); + (cp->tlf)(sp); + break; + case STATE_ACK_RCVD: + sppp_cp_change_state(cp, sp, STATE_REQ_SENT); + break; + case STATE_OPENED: + (cp->tld)(sp); + (cp->scr)(sp); + sppp_cp_change_state(cp, sp, STATE_ACK_RCVD); + break; + default: + printf(SPP_FMT "%s illegal %s in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_cp_type_name(h->type), + sppp_state_name(sp->state[cp->protoidx])); + ++ifp->if_ierrors; + } + break; + case CODE_REJ: + case PROTO_REJ: + /* XXX catastrophic rejects (RXJ-) aren't handled yet. */ + log(LOG_INFO, + SPP_FMT "%s: ignoring RXJ (%s) for proto 0x%x, " + "danger will robinson\n", + SPP_ARGS(ifp), cp->name, + sppp_cp_type_name(h->type), ntohs(*((u_short *)p))); + switch (sp->state[cp->protoidx]) { + case STATE_CLOSED: + case STATE_STOPPED: + case STATE_REQ_SENT: + case STATE_ACK_SENT: + case STATE_CLOSING: + case STATE_STOPPING: + case STATE_OPENED: + break; + case STATE_ACK_RCVD: + sppp_cp_change_state(cp, sp, STATE_REQ_SENT); + break; + default: + printf(SPP_FMT "%s illegal %s in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_cp_type_name(h->type), + sppp_state_name(sp->state[cp->protoidx])); + ++ifp->if_ierrors; + } + break; + case DISC_REQ: + if (cp->proto != PPP_LCP) + goto illegal; + /* Discard the packet. */ + break; + case ECHO_REQ: + if (cp->proto != PPP_LCP) + goto illegal; + if (sp->state[cp->protoidx] != STATE_OPENED) { + if (debug) + addlog(SPP_FMT "lcp echo req but lcp closed\n", + SPP_ARGS(ifp)); + ++ifp->if_ierrors; + break; + } + if (len < 8) { + if (debug) + addlog(SPP_FMT "invalid lcp echo request " + "packet length: %d bytes\n", + SPP_ARGS(ifp), len); + break; + } + if (ntohl (*(long*)(h+1)) == sp->lcp.magic) { + /* Line loopback mode detected. */ + printf(SPP_FMT "loopback\n", SPP_ARGS(ifp)); + if_down (ifp); + sppp_qflush (&sp->pp_cpq); + + /* Shut down the PPP link. */ + /* XXX */ + lcp.Down(sp); + lcp.Up(sp); + break; + } + *(long*)(h+1) = htonl (sp->lcp.magic); + if (debug) + addlog(SPP_FMT "got lcp echo req, sending echo rep\n", + SPP_ARGS(ifp)); + sppp_cp_send (sp, PPP_LCP, ECHO_REPLY, h->ident, len-4, h+1); + break; + case ECHO_REPLY: + if (cp->proto != PPP_LCP) + goto illegal; + if (h->ident != sp->lcp.echoid) { + ++ifp->if_ierrors; + break; + } + if (len < 8) { + if (debug) + addlog(SPP_FMT "lcp invalid echo reply " + "packet length: %d bytes\n", + SPP_ARGS(ifp), len); + break; + } + if (debug) + addlog(SPP_FMT "lcp got echo rep\n", + SPP_ARGS(ifp)); + if (ntohl (*(long*)(h+1)) != sp->lcp.magic) + sp->pp_alivecnt = 0; + break; + default: + /* Unknown packet type -- send Code-Reject packet. */ + illegal: + if (debug) + addlog(SPP_FMT "%s send code-rej for 0x%x\n", + SPP_ARGS(ifp), cp->name, h->type); + sppp_cp_send(sp, cp->proto, CODE_REJ, ++sp->pp_seq, + m->m_pkthdr.len, h); + ++ifp->if_ierrors; + } +} + + +/* + * The generic part of all Up/Down/Open/Close/TO event handlers. + * Basically, the state transition handling in the automaton. + */ +static void +sppp_up_event(const struct cp *cp, struct sppp *sp) +{ + STDDCL; + + if (debug) + log(LOG_DEBUG, SPP_FMT "%s up(%s)\n", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx])); + + switch (sp->state[cp->protoidx]) { + case STATE_INITIAL: + sppp_cp_change_state(cp, sp, STATE_CLOSED); + break; + case STATE_STARTING: + sp->rst_counter[cp->protoidx] = sp->lcp.max_configure; + (cp->scr)(sp); + sppp_cp_change_state(cp, sp, STATE_REQ_SENT); + break; + default: + printf(SPP_FMT "%s illegal up in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx])); + } +} + +static void +sppp_down_event(const struct cp *cp, struct sppp *sp) +{ + STDDCL; + + if (debug) + log(LOG_DEBUG, SPP_FMT "%s down(%s)\n", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx])); + + switch (sp->state[cp->protoidx]) { + case STATE_CLOSED: + case STATE_CLOSING: + sppp_cp_change_state(cp, sp, STATE_INITIAL); + break; + case STATE_STOPPED: + sppp_cp_change_state(cp, sp, STATE_STARTING); + (cp->tls)(sp); + break; + case STATE_STOPPING: + case STATE_REQ_SENT: + case STATE_ACK_RCVD: + case STATE_ACK_SENT: + sppp_cp_change_state(cp, sp, STATE_STARTING); + break; + case STATE_OPENED: + (cp->tld)(sp); + sppp_cp_change_state(cp, sp, STATE_STARTING); + break; + default: + printf(SPP_FMT "%s illegal down in state %s\n", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx])); + } +} + + +static void +sppp_open_event(const struct cp *cp, struct sppp *sp) +{ + STDDCL; + + if (debug) + log(LOG_DEBUG, SPP_FMT "%s open(%s)\n", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx])); + + switch (sp->state[cp->protoidx]) { + case STATE_INITIAL: + sppp_cp_change_state(cp, sp, STATE_STARTING); + (cp->tls)(sp); + break; + case STATE_STARTING: + break; + case STATE_CLOSED: + sp->rst_counter[cp->protoidx] = sp->lcp.max_configure; + (cp->scr)(sp); + sppp_cp_change_state(cp, sp, STATE_REQ_SENT); + break; + case STATE_STOPPED: + case STATE_STOPPING: + case STATE_REQ_SENT: + case STATE_ACK_RCVD: + case STATE_ACK_SENT: + case STATE_OPENED: + break; + case STATE_CLOSING: + sppp_cp_change_state(cp, sp, STATE_STOPPING); + break; + } +} + + +static void +sppp_close_event(const struct cp *cp, struct sppp *sp) +{ + STDDCL; + + if (debug) + log(LOG_DEBUG, SPP_FMT "%s close(%s)\n", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx])); + + switch (sp->state[cp->protoidx]) { + case STATE_INITIAL: + case STATE_CLOSED: + case STATE_CLOSING: + break; + case STATE_STARTING: + sppp_cp_change_state(cp, sp, STATE_INITIAL); + (cp->tlf)(sp); + break; + case STATE_STOPPED: + sppp_cp_change_state(cp, sp, STATE_CLOSED); + break; + case STATE_STOPPING: + sppp_cp_change_state(cp, sp, STATE_CLOSING); + break; + case STATE_OPENED: + (cp->tld)(sp); + /* fall through */ + case STATE_REQ_SENT: + case STATE_ACK_RCVD: + case STATE_ACK_SENT: + sp->rst_counter[cp->protoidx] = sp->lcp.max_terminate; + sppp_cp_send(sp, cp->proto, TERM_REQ, ++sp->pp_seq, 0, 0); + sppp_cp_change_state(cp, sp, STATE_CLOSING); + break; + } +} + +static void +sppp_to_event(const struct cp *cp, struct sppp *sp) +{ + STDDCL; + int s; + + s = splimp(); + if (debug) + log(LOG_DEBUG, SPP_FMT "%s TO(%s) rst_counter = %d\n", + SPP_ARGS(ifp), cp->name, + sppp_state_name(sp->state[cp->protoidx]), + sp->rst_counter[cp->protoidx]); + + if (--sp->rst_counter[cp->protoidx] < 0) + /* TO- event */ + switch (sp->state[cp->protoidx]) { + case STATE_CLOSING: + sppp_cp_change_state(cp, sp, STATE_CLOSED); + (cp->tlf)(sp); + break; + case STATE_STOPPING: + sppp_cp_change_state(cp, sp, STATE_STOPPED); + (cp->tlf)(sp); + break; + case STATE_REQ_SENT: + case STATE_ACK_RCVD: + case STATE_ACK_SENT: + sppp_cp_change_state(cp, sp, STATE_STOPPED); + (cp->tlf)(sp); + break; + } + else + /* TO+ event */ + switch (sp->state[cp->protoidx]) { + case STATE_CLOSING: + case STATE_STOPPING: + sppp_cp_send(sp, cp->proto, TERM_REQ, ++sp->pp_seq, + 0, 0); + TIMEOUT(cp->TO, (void *)sp, sp->lcp.timeout, + sp->ch[cp->protoidx]); + break; + case STATE_REQ_SENT: + case STATE_ACK_RCVD: + (cp->scr)(sp); + /* sppp_cp_change_state() will restart the timer */ + sppp_cp_change_state(cp, sp, STATE_REQ_SENT); + break; + case STATE_ACK_SENT: + (cp->scr)(sp); + TIMEOUT(cp->TO, (void *)sp, sp->lcp.timeout, + sp->ch[cp->protoidx]); + break; + } + + splx(s); +} + +/* + * Change the state of a control protocol in the state automaton. + * Takes care of starting/stopping the restart timer. + */ +void +sppp_cp_change_state(const struct cp *cp, struct sppp *sp, int newstate) +{ + sp->state[cp->protoidx] = newstate; + + UNTIMEOUT(cp->TO, (void *)sp, sp->ch[cp->protoidx]); + switch (newstate) { + case STATE_INITIAL: + case STATE_STARTING: + case STATE_CLOSED: + case STATE_STOPPED: + case STATE_OPENED: + break; + case STATE_CLOSING: + case STATE_STOPPING: + case STATE_REQ_SENT: + case STATE_ACK_RCVD: + case STATE_ACK_SENT: + TIMEOUT(cp->TO, (void *)sp, sp->lcp.timeout, + sp->ch[cp->protoidx]); + break; + } +} + /* + *--------------------------------------------------------------------------* + * * + * The LCP implementation. * + * * + *--------------------------------------------------------------------------* + */ +static void +sppp_lcp_init(struct sppp *sp) +{ + sp->lcp.opts = (1 << LCP_OPT_MAGIC); + sp->lcp.magic = 0; + sp->state[IDX_LCP] = STATE_INITIAL; + sp->fail_counter[IDX_LCP] = 0; + sp->lcp.protos = 0; + sp->lcp.mru = sp->lcp.their_mru = PP_MTU; + + /* + * Initialize counters and timeout values. Note that we don't + * use the 3 seconds suggested in RFC 1661 since we are likely + * running on a fast link. XXX We should probably implement + * the exponential backoff option. Note that these values are + * relevant for all control protocols, not just LCP only. + */ + sp->lcp.timeout = 1 * hz; + sp->lcp.max_terminate = 2; + sp->lcp.max_configure = 10; + sp->lcp.max_failure = 10; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + callout_handle_init(&sp->ch[IDX_LCP]); +#endif +} + +static void +sppp_lcp_up(struct sppp *sp) +{ + STDDCL; + + /* + * If this interface is passive or dial-on-demand, and we are + * still in Initial state, it means we've got an incoming + * call. Activate the interface. + */ + if ((ifp->if_flags & (IFF_AUTO | IFF_PASSIVE)) != 0) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "Up event", SPP_ARGS(ifp)); + ifp->if_flags |= IFF_RUNNING; + if (sp->state[IDX_LCP] == STATE_INITIAL) { + if (debug) + addlog("(incoming call)\n"); + sp->pp_flags |= PP_CALLIN; + lcp.Open(sp); + } else if (debug) + addlog("\n"); + } + + sppp_up_event(&lcp, sp); +} + +static void +sppp_lcp_down(struct sppp *sp) +{ + STDDCL; + + sppp_down_event(&lcp, sp); + + /* + * If this is neither a dial-on-demand nor a passive + * interface, simulate an ``ifconfig down'' action, so the + * administrator can force a redial by another ``ifconfig + * up''. XXX For leased line operation, should we immediately + * try to reopen the connection here? + */ + if ((ifp->if_flags & (IFF_AUTO | IFF_PASSIVE)) == 0) { + log(LOG_INFO, + SPP_FMT "Down event, taking interface down.\n", + SPP_ARGS(ifp)); + if_down(ifp); + } else { + if (debug) + log(LOG_DEBUG, + SPP_FMT "Down event (carrier loss)\n", + SPP_ARGS(ifp)); + } + sp->pp_flags &= ~PP_CALLIN; + if (sp->state[IDX_LCP] != STATE_INITIAL) + lcp.Close(sp); + ifp->if_flags &= ~IFF_RUNNING; +} + +static void +sppp_lcp_open(struct sppp *sp) +{ + /* + * If we are authenticator, negotiate LCP_AUTH + */ + if (sp->hisauth.proto != 0) + sp->lcp.opts |= (1 << LCP_OPT_AUTH_PROTO); + else + sp->lcp.opts &= ~(1 << LCP_OPT_AUTH_PROTO); + sp->pp_flags &= ~PP_NEEDAUTH; + sppp_open_event(&lcp, sp); +} + +static void +sppp_lcp_close(struct sppp *sp) +{ + sppp_close_event(&lcp, sp); +} + +static void +sppp_lcp_TO(void *cookie) +{ + sppp_to_event(&lcp, (struct sppp *)cookie); +} + +/* + * Analyze a configure request. Return true if it was agreeable, and + * caused action sca, false if it has been rejected or nak'ed, and + * caused action scn. (The return value is used to make the state + * transition decision in the state automaton.) + */ +static int +sppp_lcp_RCR(struct sppp *sp, struct lcp_header *h, int len) +{ + STDDCL; + u_char *buf, *r, *p; + int origlen, rlen; + u_long nmagic; + u_short authproto; + + len -= 4; + origlen = len; + buf = r = _MALLOC(len, M_TEMP, M_NOWAIT); + if (! buf) + return (0); + + if (debug) + log(LOG_DEBUG, SPP_FMT "lcp parse opts: ", + SPP_ARGS(ifp)); + + /* pass 1: check for things that need to be rejected */ + p = (void*) (h+1); + for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) { + if (debug) + addlog(" %s ", sppp_lcp_opt_name(*p)); + switch (*p) { + case LCP_OPT_MAGIC: + /* Magic number. */ + /* fall through, both are same length */ + case LCP_OPT_ASYNC_MAP: + /* Async control character map. */ + if (len >= 6 || p[1] == 6) + continue; + if (debug) + addlog("[invalid] "); + break; + case LCP_OPT_MRU: + /* Maximum receive unit. */ + if (len >= 4 && p[1] == 4) + continue; + if (debug) + addlog("[invalid] "); + break; + case LCP_OPT_AUTH_PROTO: + if (len < 4) { + if (debug) + addlog("[invalid] "); + break; + } + authproto = (p[2] << 8) + p[3]; + if (authproto == PPP_CHAP && p[1] != 5) { + if (debug) + addlog("[invalid chap len] "); + break; + } + if (sp->myauth.proto == 0) { + /* we are not configured to do auth */ + if (debug) + addlog("[not configured] "); + break; + } + /* + * Remote want us to authenticate, remember this, + * so we stay in PHASE_AUTHENTICATE after LCP got + * up. + */ + sp->pp_flags |= PP_NEEDAUTH; + continue; + default: + /* Others not supported. */ + if (debug) + addlog("[rej] "); + break; + } + /* Add the option to rejected list. */ + bcopy (p, r, p[1]); + r += p[1]; + rlen += p[1]; + } + if (rlen) { + if (debug) + addlog(" send conf-rej\n"); + sppp_cp_send (sp, PPP_LCP, CONF_REJ, h->ident, rlen, buf); + return 0; + } else if (debug) + addlog("\n"); + + /* + * pass 2: check for option values that are unacceptable and + * thus require to be nak'ed. + */ + if (debug) + log(LOG_DEBUG, SPP_FMT "lcp parse opt values: ", + SPP_ARGS(ifp)); + + p = (void*) (h+1); + len = origlen; + for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) { + if (debug) + addlog(" %s ", sppp_lcp_opt_name(*p)); + switch (*p) { + case LCP_OPT_MAGIC: + /* Magic number -- extract. */ + nmagic = (u_long)p[2] << 24 | + (u_long)p[3] << 16 | p[4] << 8 | p[5]; + if (nmagic != sp->lcp.magic) { + if (debug) + addlog("0x%lx ", nmagic); + continue; + } + /* + * Local and remote magics equal -- loopback? + */ + if (sp->pp_loopcnt >= MAXALIVECNT*5) { + printf (SPP_FMT "loopback\n", + SPP_ARGS(ifp)); + sp->pp_loopcnt = 0; + if (ifp->if_flags & IFF_UP) { + if_down(ifp); + sppp_qflush(&sp->pp_cpq); + /* XXX ? */ + lcp.Down(sp); + lcp.Up(sp); + } + } else if (debug) + addlog("[glitch] "); + ++sp->pp_loopcnt; + /* + * We negate our magic here, and NAK it. If + * we see it later in an NAK packet, we + * suggest a new one. + */ + nmagic = ~sp->lcp.magic; + /* Gonna NAK it. */ + p[2] = nmagic >> 24; + p[3] = nmagic >> 16; + p[4] = nmagic >> 8; + p[5] = nmagic; + break; + + case LCP_OPT_ASYNC_MAP: + /* Async control character map -- check to be zero. */ + if (! p[2] && ! p[3] && ! p[4] && ! p[5]) { + if (debug) + addlog("[empty] "); + continue; + } + if (debug) + addlog("[non-empty] "); + /* suggest a zero one */ + p[2] = p[3] = p[4] = p[5] = 0; + break; + + case LCP_OPT_MRU: + /* + * Maximum receive unit. Always agreeable, + * but ignored by now. + */ + sp->lcp.their_mru = p[2] * 256 + p[3]; + if (debug) + addlog("%lu ", sp->lcp.their_mru); + continue; + + case LCP_OPT_AUTH_PROTO: + authproto = (p[2] << 8) + p[3]; + if (sp->myauth.proto != authproto) { + /* not agreed, nak */ + if (debug) + addlog("[mine %s != his %s] ", + sppp_proto_name(sp->hisauth.proto), + sppp_proto_name(authproto)); + p[2] = sp->myauth.proto >> 8; + p[3] = sp->myauth.proto; + break; + } + if (authproto == PPP_CHAP && p[4] != CHAP_MD5) { + if (debug) + addlog("[chap not MD5] "); + p[4] = CHAP_MD5; + break; + } + continue; + } + /* Add the option to nak'ed list. */ + bcopy (p, r, p[1]); + r += p[1]; + rlen += p[1]; + } + if (rlen) { + if (++sp->fail_counter[IDX_LCP] >= sp->lcp.max_failure) { + if (debug) + addlog(" max_failure (%d) exceeded, " + "send conf-rej\n", + sp->lcp.max_failure); + sppp_cp_send(sp, PPP_LCP, CONF_REJ, h->ident, rlen, buf); + } else { + if (debug) + addlog(" send conf-nak\n"); + sppp_cp_send (sp, PPP_LCP, CONF_NAK, h->ident, rlen, buf); + } + return 0; + } else { + if (debug) + addlog(" send conf-ack\n"); + sp->fail_counter[IDX_LCP] = 0; + sp->pp_loopcnt = 0; + sppp_cp_send (sp, PPP_LCP, CONF_ACK, + h->ident, origlen, h+1); + } + + FREE(buf, M_TEMP); + return (rlen == 0); +} + +/* + * Analyze the LCP Configure-Reject option list, and adjust our + * negotiation. + */ +static void +sppp_lcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len) +{ + STDDCL; + u_char *buf, *p; + + len -= 4; + buf = MALLOC (len, M_TEMP, M_NOWAIT); + if (!buf) + return; + + if (debug) + log(LOG_DEBUG, SPP_FMT "lcp rej opts: ", + SPP_ARGS(ifp)); + + p = (void*) (h+1); + for (; len > 1 && p[1]; len -= p[1], p += p[1]) { + if (debug) + addlog(" %s ", sppp_lcp_opt_name(*p)); + switch (*p) { + case LCP_OPT_MAGIC: + /* Magic number -- can't use it, use 0 */ + sp->lcp.opts &= ~(1 << LCP_OPT_MAGIC); + sp->lcp.magic = 0; + break; + case LCP_OPT_MRU: + /* + * Should not be rejected anyway, since we only + * negotiate a MRU if explicitly requested by + * peer. + */ + sp->lcp.opts &= ~(1 << LCP_OPT_MRU); + break; + case LCP_OPT_AUTH_PROTO: + /* + * Peer doesn't want to authenticate himself, + * deny unless this is a dialout call, and + * AUTHFLAG_NOCALLOUT is set. + */ + if ((sp->pp_flags & PP_CALLIN) == 0 && + (sp->hisauth.flags & AUTHFLAG_NOCALLOUT) != 0) { + if (debug) + addlog("[don't insist on auth " + "for callout]"); + sp->lcp.opts &= ~(1 << LCP_OPT_AUTH_PROTO); + break; + } + if (debug) + addlog("[access denied]\n"); + lcp.Close(sp); + break; + } + } + if (debug) + addlog("\n"); + FREE(buf, M_TEMP); + return; +} + +/* + * Analyze the LCP Configure-NAK option list, and adjust our + * negotiation. + */ +static void +sppp_lcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len) +{ + STDDCL; + u_char *buf, *p; + u_long magic; + + len -= 4; + buf = MALLOC (len, M_TEMP, M_NOWAIT); + if (!buf) + return; + + if (debug) + log(LOG_DEBUG, SPP_FMT "lcp nak opts: ", + SPP_ARGS(ifp)); + + p = (void*) (h+1); + for (; len > 1 && p[1]; len -= p[1], p += p[1]) { + if (debug) + addlog(" %s ", sppp_lcp_opt_name(*p)); + switch (*p) { + case LCP_OPT_MAGIC: + /* Magic number -- renegotiate */ + if ((sp->lcp.opts & (1 << LCP_OPT_MAGIC)) && + len >= 6 && p[1] == 6) { + magic = (u_long)p[2] << 24 | + (u_long)p[3] << 16 | p[4] << 8 | p[5]; + /* + * If the remote magic is our negated one, + * this looks like a loopback problem. + * Suggest a new magic to make sure. + */ + if (magic == ~sp->lcp.magic) { + if (debug) + addlog("magic glitch "); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + sp->lcp.magic = random(); +#else + sp->lcp.magic = time.tv_sec + time.tv_usec; +#endif + } else { + sp->lcp.magic = magic; + if (debug) + addlog("%lu ", magic); + } + } + break; + case LCP_OPT_MRU: + /* + * Peer wants to advise us to negotiate an MRU. + * Agree on it if it's reasonable, or use + * default otherwise. + */ + if (len >= 4 && p[1] == 4) { + u_int mru = p[2] * 256 + p[3]; + if (debug) + addlog("%d ", mru); + if (mru < PP_MTU || mru > PP_MAX_MRU) + mru = PP_MTU; + sp->lcp.mru = mru; + sp->lcp.opts |= (1 << LCP_OPT_MRU); + } + break; + case LCP_OPT_AUTH_PROTO: + /* + * Peer doesn't like our authentication method, + * deny. + */ + if (debug) + addlog("[access denied]\n"); + lcp.Close(sp); + break; + } + } + if (debug) + addlog("\n"); + FREE(buf, M_TEMP); + return; +} + +static void +sppp_lcp_tlu(struct sppp *sp) +{ + STDDCL; + int i; + u_long mask; + + /* XXX ? */ + if (! (ifp->if_flags & IFF_UP) && + (ifp->if_flags & IFF_RUNNING)) { + /* Coming out of loopback mode. */ + if_up(ifp); + printf (SPP_FMT "up\n", SPP_ARGS(ifp)); + } + + for (i = 0; i < IDX_COUNT; i++) + if ((cps[i])->flags & CP_QUAL) + (cps[i])->Open(sp); + + if ((sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0 || + (sp->pp_flags & PP_NEEDAUTH) != 0) + sp->pp_phase = PHASE_AUTHENTICATE; + else + sp->pp_phase = PHASE_NETWORK; + + if (debug) + log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp), + sppp_phase_name(sp->pp_phase)); + + /* + * Open all authentication protocols. This is even required + * if we already proceeded to network phase, since it might be + * that remote wants us to authenticate, so we might have to + * send a PAP request. Undesired authentication protocols + * don't do anything when they get an Open event. + */ + for (i = 0; i < IDX_COUNT; i++) + if ((cps[i])->flags & CP_AUTH) + (cps[i])->Open(sp); + + if (sp->pp_phase == PHASE_NETWORK) { + /* Notify all NCPs. */ + for (i = 0; i < IDX_COUNT; i++) + if ((cps[i])->flags & CP_NCP) + (cps[i])->Open(sp); + } + + /* Send Up events to all started protos. */ + for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1) + if (sp->lcp.protos & mask && ((cps[i])->flags & CP_LCP) == 0) + (cps[i])->Up(sp); + + /* notify low-level driver of state change */ + if (sp->pp_chg) + sp->pp_chg(sp, (int)sp->pp_phase); + + if (sp->pp_phase == PHASE_NETWORK) + /* if no NCP is starting, close down */ + sppp_lcp_check_and_close(sp); +} + +static void +sppp_lcp_tld(struct sppp *sp) +{ + STDDCL; + int i; + u_long mask; + + sp->pp_phase = PHASE_TERMINATE; + + if (debug) + log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp), + sppp_phase_name(sp->pp_phase)); + + /* + * Take upper layers down. We send the Down event first and + * the Close second to prevent the upper layers from sending + * ``a flurry of terminate-request packets'', as the RFC + * describes it. + */ + for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1) + if (sp->lcp.protos & mask && ((cps[i])->flags & CP_LCP) == 0) { + (cps[i])->Down(sp); + (cps[i])->Close(sp); + } +} + +static void +sppp_lcp_tls(struct sppp *sp) +{ + STDDCL; + + sp->pp_phase = PHASE_ESTABLISH; + + if (debug) + log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp), + sppp_phase_name(sp->pp_phase)); + + /* Notify lower layer if desired. */ + if (sp->pp_tls) + (sp->pp_tls)(sp); + else + (sp->pp_up)(sp); +} + +static void +sppp_lcp_tlf(struct sppp *sp) +{ + STDDCL; + + sp->pp_phase = PHASE_DEAD; + if (debug) + log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp), + sppp_phase_name(sp->pp_phase)); + + /* Notify lower layer if desired. */ + if (sp->pp_tlf) + (sp->pp_tlf)(sp); + else + (sp->pp_down)(sp); +} + +static void +sppp_lcp_scr(struct sppp *sp) +{ + char opt[6 /* magicnum */ + 4 /* mru */ + 5 /* chap */]; + int i = 0; + u_short authproto; + + if (sp->lcp.opts & (1 << LCP_OPT_MAGIC)) { + if (! sp->lcp.magic) +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + sp->lcp.magic = random(); +#else + sp->lcp.magic = time.tv_sec + time.tv_usec; +#endif + opt[i++] = LCP_OPT_MAGIC; + opt[i++] = 6; + opt[i++] = sp->lcp.magic >> 24; + opt[i++] = sp->lcp.magic >> 16; + opt[i++] = sp->lcp.magic >> 8; + opt[i++] = sp->lcp.magic; + } + + if (sp->lcp.opts & (1 << LCP_OPT_MRU)) { + opt[i++] = LCP_OPT_MRU; + opt[i++] = 4; + opt[i++] = sp->lcp.mru >> 8; + opt[i++] = sp->lcp.mru; + } + + if (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) { + authproto = sp->hisauth.proto; + opt[i++] = LCP_OPT_AUTH_PROTO; + opt[i++] = authproto == PPP_CHAP? 5: 4; + opt[i++] = authproto >> 8; + opt[i++] = authproto; + if (authproto == PPP_CHAP) + opt[i++] = CHAP_MD5; + } + + sp->confid[IDX_LCP] = ++sp->pp_seq; + sppp_cp_send (sp, PPP_LCP, CONF_REQ, sp->confid[IDX_LCP], i, &opt); +} + +/* + * Check the open NCPs, return true if at least one NCP is open. + */ +static int +sppp_ncp_check(struct sppp *sp) +{ + int i, mask; + + for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1) + if (sp->lcp.protos & mask && (cps[i])->flags & CP_NCP) + return 1; + return 0; +} + +/* + * Re-check the open NCPs and see if we should terminate the link. + * Called by the NCPs during their tlf action handling. + */ +static void +sppp_lcp_check_and_close(struct sppp *sp) +{ + + if (sp->pp_phase < PHASE_NETWORK) + /* don't bother, we are already going down */ + return; + + if (sppp_ncp_check(sp)) + return; + + lcp.Close(sp); +} + /* + *--------------------------------------------------------------------------* + * * + * The IPCP implementation. * + * * + *--------------------------------------------------------------------------* + */ + +static void +sppp_ipcp_init(struct sppp *sp) +{ + sp->ipcp.opts = 0; + sp->ipcp.flags = 0; + sp->state[IDX_IPCP] = STATE_INITIAL; + sp->fail_counter[IDX_IPCP] = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + callout_handle_init(&sp->ch[IDX_IPCP]); +#endif +} + +static void +sppp_ipcp_up(struct sppp *sp) +{ + sppp_up_event(&ipcp, sp); +} + +static void +sppp_ipcp_down(struct sppp *sp) +{ + sppp_down_event(&ipcp, sp); +} + +static void +sppp_ipcp_open(struct sppp *sp) +{ + STDDCL; + u_long myaddr, hisaddr; + + sp->ipcp.flags &= ~(IPCP_HISADDR_SEEN|IPCP_MYADDR_SEEN|IPCP_MYADDR_DYN); + + sppp_get_ip_addrs(sp, &myaddr, &hisaddr, 0); + /* + * If we don't have his address, this probably means our + * interface doesn't want to talk IP at all. (This could + * be the case if somebody wants to speak only IPX, for + * example.) Don't open IPCP in this case. + */ + if (hisaddr == 0L) { + /* XXX this message should go away */ + if (debug) + log(LOG_DEBUG, SPP_FMT "ipcp_open(): no IP interface\n", + SPP_ARGS(ifp)); + return; + } + + if (myaddr == 0L) { + /* + * I don't have an assigned address, so i need to + * negotiate my address. + */ + sp->ipcp.flags |= IPCP_MYADDR_DYN; + sp->ipcp.opts |= (1 << IPCP_OPT_ADDRESS); + } else + sp->ipcp.flags |= IPCP_MYADDR_SEEN; + sppp_open_event(&ipcp, sp); +} + +static void +sppp_ipcp_close(struct sppp *sp) +{ + sppp_close_event(&ipcp, sp); + if (sp->ipcp.flags & IPCP_MYADDR_DYN) + /* + * My address was dynamic, clear it again. + */ + sppp_set_ip_addr(sp, 0L); +} + +static void +sppp_ipcp_TO(void *cookie) +{ + sppp_to_event(&ipcp, (struct sppp *)cookie); +} + +/* + * Analyze a configure request. Return true if it was agreeable, and + * caused action sca, false if it has been rejected or nak'ed, and + * caused action scn. (The return value is used to make the state + * transition decision in the state automaton.) + */ +static int +sppp_ipcp_RCR(struct sppp *sp, struct lcp_header *h, int len) +{ + u_char *buf, *r, *p; + struct ifnet *ifp = &sp->pp_if; + int rlen, origlen, debug = ifp->if_flags & IFF_DEBUG; + u_long hisaddr, desiredaddr; + int gotmyaddr = 0; + + len -= 4; + origlen = len; + /* + * Make sure to allocate a buf that can at least hold a + * conf-nak with an `address' option. We might need it below. + */ + buf = r = MALLOC ((len < 6? 6: len), M_TEMP, M_NOWAIT); + if (! buf) + return (0); + + /* pass 1: see if we can recognize them */ + if (debug) + log(LOG_DEBUG, SPP_FMT "ipcp parse opts: ", + SPP_ARGS(ifp)); + p = (void*) (h+1); + for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) { + if (debug) + addlog(" %s ", sppp_ipcp_opt_name(*p)); + switch (*p) { +#ifdef notyet + case IPCP_OPT_COMPRESSION: + if (len >= 6 && p[1] >= 6) { + /* correctly formed compress option */ + continue; + } + if (debug) + addlog("[invalid] "); + break; +#endif + case IPCP_OPT_ADDRESS: + if (len >= 6 && p[1] == 6) { + /* correctly formed address option */ + continue; + } + if (debug) + addlog("[invalid] "); + break; + default: + /* Others not supported. */ + if (debug) + addlog("[rej] "); + break; + } + /* Add the option to rejected list. */ + bcopy (p, r, p[1]); + r += p[1]; + rlen += p[1]; + } + if (rlen) { + if (debug) + addlog(" send conf-rej\n"); + sppp_cp_send (sp, PPP_IPCP, CONF_REJ, h->ident, rlen, buf); + return 0; + } else if (debug) + addlog("\n"); + + /* pass 2: parse option values */ + sppp_get_ip_addrs(sp, 0, &hisaddr, 0); + if (debug) + log(LOG_DEBUG, SPP_FMT "ipcp parse opt values: ", + SPP_ARGS(ifp)); + p = (void*) (h+1); + len = origlen; + for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) { + if (debug) + addlog(" %s ", sppp_ipcp_opt_name(*p)); + switch (*p) { +#ifdef notyet + case IPCP_OPT_COMPRESSION: + continue; +#endif + case IPCP_OPT_ADDRESS: + /* This is the address he wants in his end */ + desiredaddr = p[2] << 24 | p[3] << 16 | + p[4] << 8 | p[5]; + if (desiredaddr == hisaddr || + (hisaddr == 1 && desiredaddr != 0)) { + /* + * Peer's address is same as our value, + * or we have set it to 0.0.0.1 to + * indicate that we do not really care, + * this is agreeable. Gonna conf-ack + * it. + */ + if (debug) + addlog("%s [ack] ", + sppp_dotted_quad(hisaddr)); + /* record that we've seen it already */ + sp->ipcp.flags |= IPCP_HISADDR_SEEN; + continue; + } + /* + * The address wasn't agreeable. This is either + * he sent us 0.0.0.0, asking to assign him an + * address, or he send us another address not + * matching our value. Either case, we gonna + * conf-nak it with our value. + * XXX: we should "rej" if hisaddr == 0 + */ + if (debug) { + if (desiredaddr == 0) + addlog("[addr requested] "); + else + addlog("%s [not agreed] ", + sppp_dotted_quad(desiredaddr)); + + p[2] = hisaddr >> 24; + p[3] = hisaddr >> 16; + p[4] = hisaddr >> 8; + p[5] = hisaddr; + } + break; + } + /* Add the option to nak'ed list. */ + bcopy (p, r, p[1]); + r += p[1]; + rlen += p[1]; + } + + /* + * If we are about to conf-ack the request, but haven't seen + * his address so far, gonna conf-nak it instead, with the + * `address' option present and our idea of his address being + * filled in there, to request negotiation of both addresses. + * + * XXX This can result in an endless req - nak loop if peer + * doesn't want to send us his address. Q: What should we do + * about it? XXX A: implement the max-failure counter. + */ + if (rlen == 0 && !(sp->ipcp.flags & IPCP_HISADDR_SEEN) && !gotmyaddr) { + buf[0] = IPCP_OPT_ADDRESS; + buf[1] = 6; + buf[2] = hisaddr >> 24; + buf[3] = hisaddr >> 16; + buf[4] = hisaddr >> 8; + buf[5] = hisaddr; + rlen = 6; + if (debug) + addlog("still need hisaddr "); + } + + if (rlen) { + if (debug) + addlog(" send conf-nak\n"); + sppp_cp_send (sp, PPP_IPCP, CONF_NAK, h->ident, rlen, buf); + } else { + if (debug) + addlog(" send conf-ack\n"); + sppp_cp_send (sp, PPP_IPCP, CONF_ACK, + h->ident, origlen, h+1); + } + + FREE(buf, M_TEMP); + return (rlen == 0); +} + +/* + * Analyze the IPCP Configure-Reject option list, and adjust our + * negotiation. + */ +static void +sppp_ipcp_RCN_rej(struct sppp *sp, struct lcp_header *h, int len) +{ + u_char *buf, *p; + struct ifnet *ifp = &sp->pp_if; + int debug = ifp->if_flags & IFF_DEBUG; + + len -= 4; + buf = MALLOC (len, M_TEMP, M_NOWAIT); + if (!buf) + return; + + if (debug) + log(LOG_DEBUG, SPP_FMT "ipcp rej opts: ", + SPP_ARGS(ifp)); + + p = (void*) (h+1); + for (; len > 1 && p[1]; len -= p[1], p += p[1]) { + if (debug) + addlog(" %s ", sppp_ipcp_opt_name(*p)); + switch (*p) { + case IPCP_OPT_ADDRESS: + /* + * Peer doesn't grok address option. This is + * bad. XXX Should we better give up here? + * XXX We could try old "addresses" option... + */ + sp->ipcp.opts &= ~(1 << IPCP_OPT_ADDRESS); + break; +#ifdef notyet + case IPCP_OPT_COMPRESS: + sp->ipcp.opts &= ~(1 << IPCP_OPT_COMPRESS); + break; +#endif + } + } + if (debug) + addlog("\n"); + FREE(buf, M_TEMP); + return; +} + +/* + * Analyze the IPCP Configure-NAK option list, and adjust our + * negotiation. + */ +static void +sppp_ipcp_RCN_nak(struct sppp *sp, struct lcp_header *h, int len) +{ + u_char *buf, *p; + struct ifnet *ifp = &sp->pp_if; + int debug = ifp->if_flags & IFF_DEBUG; + u_long wantaddr; + + len -= 4; + buf = MALLOC (len, M_TEMP, M_NOWAIT); + if (!buf) + return; + + if (debug) + log(LOG_DEBUG, SPP_FMT "ipcp nak opts: ", + SPP_ARGS(ifp)); + + p = (void*) (h+1); + for (; len > 1 && p[1]; len -= p[1], p += p[1]) { + if (debug) + addlog(" %s ", sppp_ipcp_opt_name(*p)); + switch (*p) { + case IPCP_OPT_ADDRESS: + /* + * Peer doesn't like our local IP address. See + * if we can do something for him. We'll drop + * him our address then. + */ + if (len >= 6 && p[1] == 6) { + wantaddr = p[2] << 24 | p[3] << 16 | + p[4] << 8 | p[5]; + sp->ipcp.opts |= (1 << IPCP_OPT_ADDRESS); + if (debug) + addlog("[wantaddr %s] ", + sppp_dotted_quad(wantaddr)); + /* + * When doing dynamic address assignment, + * we accept his offer. Otherwise, we + * ignore it and thus continue to negotiate + * our already existing value. + * XXX: Bogus, if he said no once, he'll + * just say no again, might as well die. + */ + if (sp->ipcp.flags & IPCP_MYADDR_DYN) { + sppp_set_ip_addr(sp, wantaddr); + if (debug) + addlog("[agree] "); + sp->ipcp.flags |= IPCP_MYADDR_SEEN; + } + } + break; +#ifdef notyet + case IPCP_OPT_COMPRESS: + /* + * Peer wants different compression parameters. + */ + break; +#endif + } + } + if (debug) + addlog("\n"); + FREE(buf, M_TEMP); + return; +} + +static void +sppp_ipcp_tlu(struct sppp *sp) +{ + /* we are up - notify isdn daemon */ + if (sp->pp_con) + sp->pp_con(sp); +} + +static void +sppp_ipcp_tld(struct sppp *sp) +{ +} + +static void +sppp_ipcp_tls(struct sppp *sp) +{ + /* indicate to LCP that it must stay alive */ + sp->lcp.protos |= (1 << IDX_IPCP); +} + +static void +sppp_ipcp_tlf(struct sppp *sp) +{ + /* we no longer need LCP */ + sp->lcp.protos &= ~(1 << IDX_IPCP); + sppp_lcp_check_and_close(sp); +} + +static void +sppp_ipcp_scr(struct sppp *sp) +{ + char opt[6 /* compression */ + 6 /* address */]; + u_long ouraddr; + int i = 0; + +#ifdef notyet + if (sp->ipcp.opts & (1 << IPCP_OPT_COMPRESSION)) { + opt[i++] = IPCP_OPT_COMPRESSION; + opt[i++] = 6; + opt[i++] = 0; /* VJ header compression */ + opt[i++] = 0x2d; /* VJ header compression */ + opt[i++] = max_slot_id; + opt[i++] = comp_slot_id; + } +#endif + + if (sp->ipcp.opts & (1 << IPCP_OPT_ADDRESS)) { + sppp_get_ip_addrs(sp, &ouraddr, 0, 0); + opt[i++] = IPCP_OPT_ADDRESS; + opt[i++] = 6; + opt[i++] = ouraddr >> 24; + opt[i++] = ouraddr >> 16; + opt[i++] = ouraddr >> 8; + opt[i++] = ouraddr; + } + + sp->confid[IDX_IPCP] = ++sp->pp_seq; + sppp_cp_send(sp, PPP_IPCP, CONF_REQ, sp->confid[IDX_IPCP], i, &opt); +} + + + /* + *--------------------------------------------------------------------------* + * * + * The CHAP implementation. * + * * + *--------------------------------------------------------------------------* + */ + +/* + * The authentication protocols don't employ a full-fledged state machine as + * the control protocols do, since they do have Open and Close events, but + * not Up and Down, nor are they explicitly terminated. Also, use of the + * authentication protocols may be different in both directions (this makes + * sense, think of a machine that never accepts incoming calls but only + * calls out, it doesn't require the called party to authenticate itself). + * + * Our state machine for the local authentication protocol (we are requesting + * the peer to authenticate) looks like: + * + * RCA- + * +--------------------------------------------+ + * V scn,tld| + * +--------+ Close +---------+ RCA+ + * | |<----------------------------------| |------+ + * +--->| Closed | TO* | Opened | sca | + * | | |-----+ +-------| |<-----+ + * | +--------+ irc | | +---------+ + * | ^ | | ^ + * | | | | | + * | | | | | + * | TO-| | | | + * | |tld TO+ V | | + * | | +------->+ | | + * | | | | | | + * | +--------+ V | | + * | | |<----+<--------------------+ | + * | | Req- | scr | + * | | Sent | | + * | | | | + * | +--------+ | + * | RCA- | | RCA+ | + * +------+ +------------------------------------------+ + * scn,tld sca,irc,ict,tlu + * + * + * with: + * + * Open: LCP reached authentication phase + * Close: LCP reached terminate phase + * + * RCA+: received reply (pap-req, chap-response), acceptable + * RCN: received reply (pap-req, chap-response), not acceptable + * TO+: timeout with restart counter >= 0 + * TO-: timeout with restart counter < 0 + * TO*: reschedule timeout for CHAP + * + * scr: send request packet (none for PAP, chap-challenge) + * sca: send ack packet (pap-ack, chap-success) + * scn: send nak packet (pap-nak, chap-failure) + * ict: initialize re-challenge timer (CHAP only) + * + * tlu: this-layer-up, LCP reaches network phase + * tld: this-layer-down, LCP enters terminate phase + * + * Note that in CHAP mode, after sending a new challenge, while the state + * automaton falls back into Req-Sent state, it doesn't signal a tld + * event to LCP, so LCP remains in network phase. Only after not getting + * any response (or after getting an unacceptable response), CHAP closes, + * causing LCP to enter terminate phase. + * + * With PAP, there is no initial request that can be sent. The peer is + * expected to send one based on the successful negotiation of PAP as + * the authentication protocol during the LCP option negotiation. + * + * Incoming authentication protocol requests (remote requests + * authentication, we are peer) don't employ a state machine at all, + * they are simply answered. Some peers [Ascend P50 firmware rev + * 4.50] react allergically when sending IPCP requests while they are + * still in authentication phase (thereby violating the standard that + * demands that these NCP packets are to be discarded), so we keep + * track of the peer demanding us to authenticate, and only proceed to + * phase network once we've seen a positive acknowledge for the + * authentication. + */ + +/* + * Handle incoming CHAP packets. + */ +void +sppp_chap_input(struct sppp *sp, struct mbuf *m) +{ + STDDCL; + struct lcp_header *h; + int len, x; + u_char *value, *name, digest[AUTHKEYLEN], dsize; + int value_len, name_len; + MD5_CTX ctx; + + len = m->m_pkthdr.len; + if (len < 4) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "chap invalid packet length: %d bytes\n", + SPP_ARGS(ifp), len); + return; + } + h = mtod (m, struct lcp_header*); + if (len > ntohs (h->len)) + len = ntohs (h->len); + + switch (h->type) { + /* challenge, failure and success are his authproto */ + case CHAP_CHALLENGE: + value = 1 + (u_char*)(h+1); + value_len = value[-1]; + name = value + value_len; + name_len = len - value_len - 5; + if (name_len < 0) { + if (debug) { + log(LOG_DEBUG, + SPP_FMT "chap corrupted challenge " + "<%s id=0x%x len=%d", + SPP_ARGS(ifp), + sppp_auth_type_name(PPP_CHAP, h->type), + h->ident, ntohs(h->len)); + if (len > 4) + sppp_print_bytes((u_char*) (h+1), len-4); + addlog(">\n"); + } + break; + } + + if (debug) { + log(LOG_DEBUG, + SPP_FMT "chap input <%s id=0x%x len=%d name=", + SPP_ARGS(ifp), + sppp_auth_type_name(PPP_CHAP, h->type), h->ident, + ntohs(h->len)); + sppp_print_string((char*) name, name_len); + addlog(" value-size=%d value=", value_len); + sppp_print_bytes(value, value_len); + addlog(">\n"); + } + + /* Compute reply value. */ + MD5Init(&ctx); + MD5Update(&ctx, &h->ident, 1); + MD5Update(&ctx, sp->myauth.secret, + sppp_strnlen(sp->myauth.secret, AUTHKEYLEN)); + MD5Update(&ctx, value, value_len); + MD5Final(digest, &ctx); + dsize = sizeof digest; + + sppp_auth_send(&chap, sp, CHAP_RESPONSE, h->ident, + sizeof dsize, (const char *)&dsize, + sizeof digest, digest, + (size_t)sppp_strnlen(sp->myauth.name, AUTHNAMELEN), + sp->myauth.name, + 0); + break; + + case CHAP_SUCCESS: + if (debug) { + log(LOG_DEBUG, SPP_FMT "chap success", + SPP_ARGS(ifp)); + if (len > 4) { + addlog(": "); + sppp_print_string((char*)(h + 1), len - 4); + } + addlog("\n"); + } + x = splimp(); + sp->pp_flags &= ~PP_NEEDAUTH; + if (sp->myauth.proto == PPP_CHAP && + (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) && + (sp->lcp.protos & (1 << IDX_CHAP)) == 0) { + /* + * We are authenticator for CHAP but didn't + * complete yet. Leave it to tlu to proceed + * to network phase. + */ + splx(x); + break; + } + splx(x); + sppp_phase_network(sp); + break; + + case CHAP_FAILURE: + if (debug) { + log(LOG_INFO, SPP_FMT "chap failure", + SPP_ARGS(ifp)); + if (len > 4) { + addlog(": "); + sppp_print_string((char*)(h + 1), len - 4); + } + addlog("\n"); + } else + log(LOG_INFO, SPP_FMT "chap failure\n", + SPP_ARGS(ifp)); + /* await LCP shutdown by authenticator */ + break; + + /* response is my authproto */ + case CHAP_RESPONSE: + value = 1 + (u_char*)(h+1); + value_len = value[-1]; + name = value + value_len; + name_len = len - value_len - 5; + if (name_len < 0) { + if (debug) { + log(LOG_DEBUG, + SPP_FMT "chap corrupted response " + "<%s id=0x%x len=%d", + SPP_ARGS(ifp), + sppp_auth_type_name(PPP_CHAP, h->type), + h->ident, ntohs(h->len)); + if (len > 4) + sppp_print_bytes((u_char*)(h+1), len-4); + addlog(">\n"); + } + break; + } + if (h->ident != sp->confid[IDX_CHAP]) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "chap dropping response for old ID " + "(got %d, expected %d)\n", + SPP_ARGS(ifp), + h->ident, sp->confid[IDX_CHAP]); + break; + } + if (name_len != sppp_strnlen(sp->hisauth.name, AUTHNAMELEN) + || bcmp(name, sp->hisauth.name, name_len) != 0) { + log(LOG_INFO, SPP_FMT "chap response, his name ", + SPP_ARGS(ifp)); + sppp_print_string(name, name_len); + addlog(" != expected "); + sppp_print_string(sp->hisauth.name, + sppp_strnlen(sp->hisauth.name, AUTHNAMELEN)); + addlog("\n"); + } + if (debug) { + log(LOG_DEBUG, SPP_FMT "chap input(%s) " + "<%s id=0x%x len=%d name=", + SPP_ARGS(ifp), + sppp_state_name(sp->state[IDX_CHAP]), + sppp_auth_type_name(PPP_CHAP, h->type), + h->ident, ntohs (h->len)); + sppp_print_string((char*)name, name_len); + addlog(" value-size=%d value=", value_len); + sppp_print_bytes(value, value_len); + addlog(">\n"); + } + if (value_len != AUTHKEYLEN) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "chap bad hash value length: " + "%d bytes, should be %d\n", + SPP_ARGS(ifp), value_len, + AUTHKEYLEN); + break; + } + + MD5Init(&ctx); + MD5Update(&ctx, &h->ident, 1); + MD5Update(&ctx, sp->hisauth.secret, + sppp_strnlen(sp->hisauth.secret, AUTHKEYLEN)); + MD5Update(&ctx, sp->myauth.challenge, AUTHKEYLEN); + MD5Final(digest, &ctx); + +#define FAILMSG "Failed..." +#define SUCCMSG "Welcome!" + + if (value_len != sizeof digest || + bcmp(digest, value, value_len) != 0) { + /* action scn, tld */ + sppp_auth_send(&chap, sp, CHAP_FAILURE, h->ident, + sizeof(FAILMSG) - 1, (u_char *)FAILMSG, + 0); + chap.tld(sp); + break; + } + /* action sca, perhaps tlu */ + if (sp->state[IDX_CHAP] == STATE_REQ_SENT || + sp->state[IDX_CHAP] == STATE_OPENED) + sppp_auth_send(&chap, sp, CHAP_SUCCESS, h->ident, + sizeof(SUCCMSG) - 1, (u_char *)SUCCMSG, + 0); + if (sp->state[IDX_CHAP] == STATE_REQ_SENT) { + sppp_cp_change_state(&chap, sp, STATE_OPENED); + chap.tlu(sp); + } + break; + + default: + /* Unknown CHAP packet type -- ignore. */ + if (debug) { + log(LOG_DEBUG, SPP_FMT "chap unknown input(%s) " + "<0x%x id=0x%xh len=%d", + SPP_ARGS(ifp), + sppp_state_name(sp->state[IDX_CHAP]), + h->type, h->ident, ntohs(h->len)); + if (len > 4) + sppp_print_bytes((u_char*)(h+1), len-4); + addlog(">\n"); + } + break; + + } +} + +static void +sppp_chap_init(struct sppp *sp) +{ + /* Chap doesn't have STATE_INITIAL at all. */ + sp->state[IDX_CHAP] = STATE_CLOSED; + sp->fail_counter[IDX_CHAP] = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + callout_handle_init(&sp->ch[IDX_CHAP]); +#endif +} + +static void +sppp_chap_open(struct sppp *sp) +{ + if (sp->myauth.proto == PPP_CHAP && + (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0) { + /* we are authenticator for CHAP, start it */ + chap.scr(sp); + sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure; + sppp_cp_change_state(&chap, sp, STATE_REQ_SENT); + } + /* nothing to be done if we are peer, await a challenge */ +} + +static void +sppp_chap_close(struct sppp *sp) +{ + if (sp->state[IDX_CHAP] != STATE_CLOSED) + sppp_cp_change_state(&chap, sp, STATE_CLOSED); +} + +static void +sppp_chap_TO(void *cookie) +{ + struct sppp *sp = (struct sppp *)cookie; + STDDCL; + int s; + + s = splimp(); + if (debug) + log(LOG_DEBUG, SPP_FMT "chap TO(%s) rst_counter = %d\n", + SPP_ARGS(ifp), + sppp_state_name(sp->state[IDX_CHAP]), + sp->rst_counter[IDX_CHAP]); + + if (--sp->rst_counter[IDX_CHAP] < 0) + /* TO- event */ + switch (sp->state[IDX_CHAP]) { + case STATE_REQ_SENT: + chap.tld(sp); + sppp_cp_change_state(&chap, sp, STATE_CLOSED); + break; + } + else + /* TO+ (or TO*) event */ + switch (sp->state[IDX_CHAP]) { + case STATE_OPENED: + /* TO* event */ + sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure; + /* fall through */ + case STATE_REQ_SENT: + chap.scr(sp); + /* sppp_cp_change_state() will restart the timer */ + sppp_cp_change_state(&chap, sp, STATE_REQ_SENT); + break; + } + + splx(s); +} + +static void +sppp_chap_tlu(struct sppp *sp) +{ + STDDCL; + int i, x; + + i = 0; + sp->rst_counter[IDX_CHAP] = sp->lcp.max_configure; + + /* + * Some broken CHAP implementations (Conware CoNet, firmware + * 4.0.?) don't want to re-authenticate their CHAP once the + * initial challenge-response exchange has taken place. + * Provide for an option to avoid rechallenges. + */ + if ((sp->hisauth.flags & AUTHFLAG_NORECHALLENGE) == 0) { + /* + * Compute the re-challenge timeout. This will yield + * a number between 300 and 810 seconds. + */ + i = 300 + ((unsigned)(random() & 0xff00) >> 7); + TIMEOUT(chap.TO, (void *)sp, i * hz, sp->ch[IDX_CHAP]); + } + + if (debug) { + log(LOG_DEBUG, + SPP_FMT "chap %s, ", + SPP_ARGS(ifp), + sp->pp_phase == PHASE_NETWORK? "reconfirmed": "tlu"); + if ((sp->hisauth.flags & AUTHFLAG_NORECHALLENGE) == 0) + addlog("next re-challenge in %d seconds\n", i); + else + addlog("re-challenging supressed\n"); + } + + x = splimp(); + /* indicate to LCP that we need to be closed down */ + sp->lcp.protos |= (1 << IDX_CHAP); + + if (sp->pp_flags & PP_NEEDAUTH) { + /* + * Remote is authenticator, but his auth proto didn't + * complete yet. Defer the transition to network + * phase. + */ + splx(x); + return; + } + splx(x); + + /* + * If we are already in phase network, we are done here. This + * is the case if this is a dummy tlu event after a re-challenge. + */ + if (sp->pp_phase != PHASE_NETWORK) + sppp_phase_network(sp); +} + +static void +sppp_chap_tld(struct sppp *sp) +{ + STDDCL; + + if (debug) + log(LOG_DEBUG, SPP_FMT "chap tld\n", SPP_ARGS(ifp)); + UNTIMEOUT(chap.TO, (void *)sp, sp->ch[IDX_CHAP]); + sp->lcp.protos &= ~(1 << IDX_CHAP); + + lcp.Close(sp); +} + +static void +sppp_chap_scr(struct sppp *sp) +{ + u_long *ch, seed; + u_char clen; + + /* Compute random challenge. */ + ch = (u_long *)sp->myauth.challenge; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + read_random(&seed, sizeof seed); +#else + { + struct timeval tv; + microtime(&tv); + seed = tv.tv_sec ^ tv.tv_usec; + } +#endif + ch[0] = seed ^ random(); + ch[1] = seed ^ random(); + ch[2] = seed ^ random(); + ch[3] = seed ^ random(); + clen = AUTHKEYLEN; + + sp->confid[IDX_CHAP] = ++sp->pp_seq; + + sppp_auth_send(&chap, sp, CHAP_CHALLENGE, sp->confid[IDX_CHAP], + sizeof clen, (const char *)&clen, + (size_t)AUTHKEYLEN, sp->myauth.challenge, + (size_t)sppp_strnlen(sp->myauth.name, AUTHNAMELEN), + sp->myauth.name, + 0); +} + /* + *--------------------------------------------------------------------------* + * * + * The PAP implementation. * + * * + *--------------------------------------------------------------------------* + */ +/* + * For PAP, we need to keep a little state also if we are the peer, not the + * authenticator. This is since we don't get a request to authenticate, but + * have to repeatedly authenticate ourself until we got a response (or the + * retry counter is expired). + */ + +/* + * Handle incoming PAP packets. */ +static void +sppp_pap_input(struct sppp *sp, struct mbuf *m) +{ + STDDCL; + struct lcp_header *h; + int len, x; + u_char *name, *passwd, mlen; + int name_len, passwd_len; + + len = m->m_pkthdr.len; + if (len < 5) { + if (debug) + log(LOG_DEBUG, + SPP_FMT "pap invalid packet length: %d bytes\n", + SPP_ARGS(ifp), len); + return; + } + h = mtod (m, struct lcp_header*); + if (len > ntohs (h->len)) + len = ntohs (h->len); + switch (h->type) { + /* PAP request is my authproto */ + case PAP_REQ: + name = 1 + (u_char*)(h+1); + name_len = name[-1]; + passwd = name + name_len + 1; + if (name_len > len - 6 || + (passwd_len = passwd[-1]) > len - 6 - name_len) { + if (debug) { + log(LOG_DEBUG, SPP_FMT "pap corrupted input " + "<%s id=0x%x len=%d", + SPP_ARGS(ifp), + sppp_auth_type_name(PPP_PAP, h->type), + h->ident, ntohs(h->len)); + if (len > 4) + sppp_print_bytes((u_char*)(h+1), len-4); + addlog(">\n"); + } + break; + } + if (debug) { + log(LOG_DEBUG, SPP_FMT "pap input(%s) " + "<%s id=0x%x len=%d name=", + SPP_ARGS(ifp), + sppp_state_name(sp->state[IDX_PAP]), + sppp_auth_type_name(PPP_PAP, h->type), + h->ident, ntohs(h->len)); + sppp_print_string((char*)name, name_len); + addlog(" passwd="); + sppp_print_string((char*)passwd, passwd_len); + addlog(">\n"); + } + if (name_len > AUTHNAMELEN || + passwd_len > AUTHKEYLEN || + bcmp(name, sp->hisauth.name, name_len) != 0 || + bcmp(passwd, sp->hisauth.secret, passwd_len) != 0) { + /* action scn, tld */ + mlen = sizeof(FAILMSG) - 1; + sppp_auth_send(&pap, sp, PAP_NAK, h->ident, + sizeof mlen, (const char *)&mlen, + sizeof(FAILMSG) - 1, (u_char *)FAILMSG, + 0); + pap.tld(sp); + break; + } + /* action sca, perhaps tlu */ + if (sp->state[IDX_PAP] == STATE_REQ_SENT || + sp->state[IDX_PAP] == STATE_OPENED) { + mlen = sizeof(SUCCMSG) - 1; + sppp_auth_send(&pap, sp, PAP_ACK, h->ident, + sizeof mlen, (const char *)&mlen, + sizeof(SUCCMSG) - 1, (u_char *)SUCCMSG, + 0); + } + if (sp->state[IDX_PAP] == STATE_REQ_SENT) { + sppp_cp_change_state(&pap, sp, STATE_OPENED); + pap.tlu(sp); + } + break; + + /* ack and nak are his authproto */ + case PAP_ACK: + UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch); + if (debug) { + log(LOG_DEBUG, SPP_FMT "pap success", + SPP_ARGS(ifp)); + name_len = *((char *)h); + if (len > 5 && name_len) { + addlog(": "); + sppp_print_string((char*)(h+1), name_len); + } + addlog("\n"); + } + x = splimp(); + sp->pp_flags &= ~PP_NEEDAUTH; + if (sp->myauth.proto == PPP_PAP && + (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) && + (sp->lcp.protos & (1 << IDX_PAP)) == 0) { + /* + * We are authenticator for PAP but didn't + * complete yet. Leave it to tlu to proceed + * to network phase. + */ + splx(x); + break; + } + splx(x); + sppp_phase_network(sp); + break; + + case PAP_NAK: + UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch); + if (debug) { + log(LOG_INFO, SPP_FMT "pap failure", + SPP_ARGS(ifp)); + name_len = *((char *)h); + if (len > 5 && name_len) { + addlog(": "); + sppp_print_string((char*)(h+1), name_len); + } + addlog("\n"); + } else + log(LOG_INFO, SPP_FMT "pap failure\n", + SPP_ARGS(ifp)); + /* await LCP shutdown by authenticator */ + break; + + default: + /* Unknown PAP packet type -- ignore. */ + if (debug) { + log(LOG_DEBUG, SPP_FMT "pap corrupted input " + "<0x%x id=0x%x len=%d", + SPP_ARGS(ifp), + h->type, h->ident, ntohs(h->len)); + if (len > 4) + sppp_print_bytes((u_char*)(h+1), len-4); + addlog(">\n"); + } + break; + + } +} + +static void +sppp_pap_init(struct sppp *sp) +{ + /* PAP doesn't have STATE_INITIAL at all. */ + sp->state[IDX_PAP] = STATE_CLOSED; + sp->fail_counter[IDX_PAP] = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + callout_handle_init(&sp->ch[IDX_PAP]); + callout_handle_init(&sp->pap_my_to_ch); +#endif +} + +static void +sppp_pap_open(struct sppp *sp) +{ + if (sp->hisauth.proto == PPP_PAP && + (sp->lcp.opts & (1 << LCP_OPT_AUTH_PROTO)) != 0) { + /* we are authenticator for PAP, start our timer */ + sp->rst_counter[IDX_PAP] = sp->lcp.max_configure; + sppp_cp_change_state(&pap, sp, STATE_REQ_SENT); + } + if (sp->myauth.proto == PPP_PAP) { + /* we are peer, send a request, and start a timer */ + pap.scr(sp); + TIMEOUT(sppp_pap_my_TO, (void *)sp, sp->lcp.timeout, + sp->pap_my_to_ch); + } +} + +static void +sppp_pap_close(struct sppp *sp) +{ + if (sp->state[IDX_PAP] != STATE_CLOSED) + sppp_cp_change_state(&pap, sp, STATE_CLOSED); +} + +/* + * That's the timeout routine if we are authenticator. Since the + * authenticator is basically passive in PAP, we can't do much here. + */ +static void +sppp_pap_TO(void *cookie) +{ + struct sppp *sp = (struct sppp *)cookie; + STDDCL; + int s; + + s = splimp(); + if (debug) + log(LOG_DEBUG, SPP_FMT "pap TO(%s) rst_counter = %d\n", + SPP_ARGS(ifp), + sppp_state_name(sp->state[IDX_PAP]), + sp->rst_counter[IDX_PAP]); + + if (--sp->rst_counter[IDX_PAP] < 0) + /* TO- event */ + switch (sp->state[IDX_PAP]) { + case STATE_REQ_SENT: + pap.tld(sp); + sppp_cp_change_state(&pap, sp, STATE_CLOSED); + break; + } + else + /* TO+ event, not very much we could do */ + switch (sp->state[IDX_PAP]) { + case STATE_REQ_SENT: + /* sppp_cp_change_state() will restart the timer */ + sppp_cp_change_state(&pap, sp, STATE_REQ_SENT); + break; + } + + splx(s); +} + +/* + * That's the timeout handler if we are peer. Since the peer is active, + * we need to retransmit our PAP request since it is apparently lost. + * XXX We should impose a max counter. + */ +static void +sppp_pap_my_TO(void *cookie) +{ + struct sppp *sp = (struct sppp *)cookie; + STDDCL; + + if (debug) + log(LOG_DEBUG, SPP_FMT "pap peer TO\n", + SPP_ARGS(ifp)); + + pap.scr(sp); +} + +static void +sppp_pap_tlu(struct sppp *sp) +{ + STDDCL; + int x; + + sp->rst_counter[IDX_PAP] = sp->lcp.max_configure; + + if (debug) + log(LOG_DEBUG, SPP_FMT "%s tlu\n", + SPP_ARGS(ifp), pap.name); + + x = splimp(); + /* indicate to LCP that we need to be closed down */ + sp->lcp.protos |= (1 << IDX_PAP); + + if (sp->pp_flags & PP_NEEDAUTH) { + /* + * Remote is authenticator, but his auth proto didn't + * complete yet. Defer the transition to network + * phase. + */ + splx(x); + return; + } + splx(x); + sppp_phase_network(sp); +} + +static void +sppp_pap_tld(struct sppp *sp) +{ + STDDCL; + + if (debug) + log(LOG_DEBUG, SPP_FMT "pap tld\n", SPP_ARGS(ifp)); + UNTIMEOUT(pap.TO, (void *)sp, sp->ch[IDX_PAP]); + UNTIMEOUT(sppp_pap_my_TO, (void *)sp, sp->pap_my_to_ch); + sp->lcp.protos &= ~(1 << IDX_PAP); + + lcp.Close(sp); +} + +static void +sppp_pap_scr(struct sppp *sp) +{ + u_char idlen, pwdlen; + + sp->confid[IDX_PAP] = ++sp->pp_seq; + pwdlen = sppp_strnlen(sp->myauth.secret, AUTHKEYLEN); + idlen = sppp_strnlen(sp->myauth.name, AUTHNAMELEN); + + sppp_auth_send(&pap, sp, PAP_REQ, sp->confid[IDX_PAP], + sizeof idlen, (const char *)&idlen, + (size_t)idlen, sp->myauth.name, + sizeof pwdlen, (const char *)&pwdlen, + (size_t)pwdlen, sp->myauth.secret, + 0); +} + /* + * Random miscellaneous functions. + */ + +/* + * Send a PAP or CHAP proto packet. + * + * Varadic function, each of the elements for the ellipsis is of type + * ``size_t mlen, const u_char *msg''. Processing will stop iff + * mlen == 0. + * NOTE: never declare variadic functions with types subject to type + * promotion (i.e. u_char). This is asking for big trouble depending + * on the architecture you are on... + */ + +static void +sppp_auth_send(const struct cp *cp, struct sppp *sp, + unsigned int type, unsigned int id, + ...) +{ + STDDCL; + struct ppp_header *h; + struct lcp_header *lh; + struct mbuf *m; + u_char *p; + int len; + unsigned int mlen; + const char *msg; + va_list ap; + + MGETHDR (m, M_DONTWAIT, MT_DATA); + if (! m) + return; + m->m_pkthdr.rcvif = 0; + + h = mtod (m, struct ppp_header*); + h->address = PPP_ALLSTATIONS; /* broadcast address */ + h->control = PPP_UI; /* Unnumbered Info */ + h->protocol = htons(cp->proto); + + lh = (struct lcp_header*)(h + 1); + lh->type = type; + lh->ident = id; + p = (u_char*) (lh+1); + + va_start(ap, id); + len = 0; + + while ((mlen = (unsigned int)va_arg(ap, size_t)) != 0) { + msg = va_arg(ap, const char *); + len += mlen; + if (len > MHLEN - PPP_HEADER_LEN - LCP_HEADER_LEN) { + va_end(ap); + m_freem(m); + return; + } + + bcopy(msg, p, mlen); + p += mlen; + } + va_end(ap); + + m->m_pkthdr.len = m->m_len = PPP_HEADER_LEN + LCP_HEADER_LEN + len; + lh->len = htons (LCP_HEADER_LEN + len); + + if (debug) { + log(LOG_DEBUG, SPP_FMT "%s output <%s id=0x%x len=%d", + SPP_ARGS(ifp), cp->name, + sppp_auth_type_name(cp->proto, lh->type), + lh->ident, ntohs(lh->len)); + if (len) + sppp_print_bytes((u_char*) (lh+1), len); + addlog(">\n"); + } + if (IF_QFULL (&sp->pp_cpq)) { + IF_DROP (&sp->pp_fastq); + IF_DROP (&ifp->if_snd); + m_freem (m); + ++ifp->if_oerrors; + } else + IF_ENQUEUE (&sp->pp_cpq, m); + if (! (ifp->if_flags & IFF_OACTIVE)) + (*ifp->if_start) (ifp); + ifp->if_obytes += m->m_pkthdr.len + 3; +} + +/* + * Flush interface queue. + */ +static void +sppp_qflush(struct ifqueue *ifq) +{ + struct mbuf *m, *n; + + n = ifq->ifq_head; + while ((m = n)) { + n = m->m_act; + m_freem (m); + } + ifq->ifq_head = 0; + ifq->ifq_tail = 0; + ifq->ifq_len = 0; +} + +/* + * Send keepalive packets, every 10 seconds. + */ +static void +sppp_keepalive(void *dummy) +{ + struct sppp *sp; + int s; + + s = splimp(); + for (sp=spppq; sp; sp=sp->pp_next) { + struct ifnet *ifp = &sp->pp_if; + + /* Keepalive mode disabled or channel down? */ + if (! (sp->pp_flags & PP_KEEPALIVE) || + ! (ifp->if_flags & IFF_RUNNING)) + continue; + + /* No keepalive in PPP mode if LCP not opened yet. */ + if (! (sp->pp_flags & PP_CISCO) && + sp->pp_phase < PHASE_AUTHENTICATE) + continue; + + if (sp->pp_alivecnt == MAXALIVECNT) { + /* No keepalive packets got. Stop the interface. */ + printf (SPP_FMT "down\n", SPP_ARGS(ifp)); + if_down (ifp); + sppp_qflush (&sp->pp_cpq); + if (! (sp->pp_flags & PP_CISCO)) { + /* XXX */ + /* Shut down the PPP link. */ + lcp.Down(sp); + /* Initiate negotiation. XXX */ + lcp.Up(sp); + } + } + if (sp->pp_alivecnt <= MAXALIVECNT) + ++sp->pp_alivecnt; + if (sp->pp_flags & PP_CISCO) + sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq, + sp->pp_rseq); + else if (sp->pp_phase >= PHASE_AUTHENTICATE) { + long nmagic = htonl (sp->lcp.magic); + sp->lcp.echoid = ++sp->pp_seq; + sppp_cp_send (sp, PPP_LCP, ECHO_REQ, + sp->lcp.echoid, 4, &nmagic); + } + } + splx(s); + TIMEOUT(sppp_keepalive, 0, hz * 10, keepalive_ch); +} + +/* + * Get both IP addresses. + */ +static void +sppp_get_ip_addrs(struct sppp *sp, u_long *src, u_long *dst, u_long *srcmask) +{ + struct ifnet *ifp = &sp->pp_if; + struct ifaddr *ifa; + struct sockaddr_in *si, *sm; + u_long ssrc, ddst; + + sm = NULL; + ssrc = ddst = 0L; + /* + * Pick the first AF_INET address from the list, + * aliases don't make any sense on a p2p link anyway. + */ + si = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) +#elif defined(__NetBSD__) || defined (__OpenBSD__) + for (ifa = ifp->if_addrlist.tqh_first; + ifa; + ifa = ifa->ifa_list.tqe_next) +#else + for (ifa = ifp->if_addrlist; + ifa; + ifa = ifa->ifa_next) +#endif + if (ifa->ifa_addr->sa_family == AF_INET) { + si = (struct sockaddr_in *)ifa->ifa_addr; + sm = (struct sockaddr_in *)ifa->ifa_netmask; + if (si) + break; + } + if (ifa) { + if (si && si->sin_addr.s_addr) { + ssrc = si->sin_addr.s_addr; + if (srcmask) + *srcmask = ntohl(sm->sin_addr.s_addr); + } + + si = (struct sockaddr_in *)ifa->ifa_dstaddr; + if (si && si->sin_addr.s_addr) + ddst = si->sin_addr.s_addr; + } + + if (dst) *dst = ntohl(ddst); + if (src) *src = ntohl(ssrc); +} + +/* + * Set my IP address. Must be called at splimp. + */ +static void +sppp_set_ip_addr(struct sppp *sp, u_long src) +{ + STDDCL; + struct ifaddr *ifa; + struct sockaddr_in *si; + + /* + * Pick the first AF_INET address from the list, + * aliases don't make any sense on a p2p link anyway. + */ + si = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) +#elif defined(__NetBSD__) || defined (__OpenBSD__) + for (ifa = ifp->if_addrlist.tqh_first; + ifa; + ifa = ifa->ifa_list.tqe_next) +#else + for (ifa = ifp->if_addrlist; + ifa; + ifa = ifa->ifa_next) +#endif + { + if (ifa->ifa_addr->sa_family == AF_INET) + { + si = (struct sockaddr_in *)ifa->ifa_addr; + if (si) + break; + } + } + + if (ifa && si) + { + int error; +#if __NetBSD_Version__ >= 103080000 + struct sockaddr_in new_sin = *si; + + new_sin.sin_addr.s_addr = htonl(src); + error = in_ifinit(ifp, ifatoia(ifa), &new_sin, 1); + if(debug && error) + { + log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: in_ifinit " + " failed, error=%d\n", SPP_ARGS(ifp), error); + } +#else + /* delete old route */ + error = rtinit(ifa, (int)RTM_DELETE, RTF_HOST); + if(debug && error) + { + log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: rtinit DEL failed, error=%d\n", + SPP_ARGS(ifp), error); + } + + /* set new address */ + si->sin_addr.s_addr = htonl(src); + + /* add new route */ + error = rtinit(ifa, (int)RTM_ADD, RTF_HOST); + if (debug && error) + { + log(LOG_DEBUG, SPP_FMT "sppp_set_ip_addr: rtinit ADD failed, error=%d", + SPP_ARGS(ifp), error); + } +#endif + } +} + +static int +sppp_params(struct sppp *sp, u_long cmd, void *data) +{ + u_long subcmd; + struct ifreq *ifr = (struct ifreq *)data; + struct spppreq spr; + + /* + * ifr->ifr_data is supposed to point to a struct spppreq. + * Check the cmd word first before attempting to fetch all the + * data. + */ + if ((subcmd = fuword(ifr->ifr_data)) == -1) + return EFAULT; + + if (copyin((caddr_t)ifr->ifr_data, &spr, sizeof spr) != 0) + return EFAULT; + + switch (subcmd) { + case SPPPIOGDEFS: + if (cmd != SIOCGIFGENERIC) + return EINVAL; + /* + * We copy over the entire current state, but clean + * out some of the stuff we don't wanna pass up. + * Remember, SIOCGIFGENERIC is unprotected, and can be + * called by any user. No need to ever get PAP or + * CHAP secrets back to userland anyway. + */ + bcopy(sp, &spr.defs, sizeof(struct sppp)); + bzero(spr.defs.myauth.secret, AUTHKEYLEN); + bzero(spr.defs.myauth.challenge, AUTHKEYLEN); + bzero(spr.defs.hisauth.secret, AUTHKEYLEN); + bzero(spr.defs.hisauth.challenge, AUTHKEYLEN); + return copyout(&spr, (caddr_t)ifr->ifr_data, sizeof spr); + + case SPPPIOSDEFS: + if (cmd != SIOCSIFGENERIC) + return EINVAL; + /* + * We have a very specific idea of which fields we allow + * being passed back from userland, so to not clobber our + * current state. For one, we only allow setting + * anything if LCP is in dead phase. Once the LCP + * negotiations started, the authentication settings must + * not be changed again. (The administrator can force an + * ifconfig down in order to get LCP back into dead + * phase.) + * + * Also, we only allow for authentication parameters to be + * specified. + * + * XXX Should allow to set or clear pp_flags. + * + * Finally, if the respective authentication protocol to + * be used is set differently than 0, but the secret is + * passed as all zeros, we don't trash the existing secret. + * This allows an administrator to change the system name + * only without clobbering the secret (which he didn't get + * back in a previous SPPPIOGDEFS call). However, the + * secrets are cleared if the authentication protocol is + * reset to 0. + */ + if (sp->pp_phase != PHASE_DEAD) + return EBUSY; + + if ((spr.defs.myauth.proto != 0 && spr.defs.myauth.proto != PPP_PAP && + spr.defs.myauth.proto != PPP_CHAP) || + (spr.defs.hisauth.proto != 0 && spr.defs.hisauth.proto != PPP_PAP && + spr.defs.hisauth.proto != PPP_CHAP)) + return EINVAL; + + if (spr.defs.myauth.proto == 0) + /* resetting myauth */ + bzero(&sp->myauth, sizeof sp->myauth); + else { + /* setting/changing myauth */ + sp->myauth.proto = spr.defs.myauth.proto; + bcopy(spr.defs.myauth.name, sp->myauth.name, AUTHNAMELEN); + if (spr.defs.myauth.secret[0] != '\0') + bcopy(spr.defs.myauth.secret, sp->myauth.secret, + AUTHKEYLEN); + } + if (spr.defs.hisauth.proto == 0) + /* resetting hisauth */ + bzero(&sp->hisauth, sizeof sp->hisauth); + else { + /* setting/changing hisauth */ + sp->hisauth.proto = spr.defs.hisauth.proto; + sp->hisauth.flags = spr.defs.hisauth.flags; + bcopy(spr.defs.hisauth.name, sp->hisauth.name, AUTHNAMELEN); + if (spr.defs.hisauth.secret[0] != '\0') + bcopy(spr.defs.hisauth.secret, sp->hisauth.secret, + AUTHKEYLEN); + } + break; + + default: + return EINVAL; + } + + return 0; +} + +static void +sppp_phase_network(struct sppp *sp) +{ + STDDCL; + int i; + u_long mask; + + sp->pp_phase = PHASE_NETWORK; + + if (debug) + log(LOG_DEBUG, SPP_FMT "phase %s\n", SPP_ARGS(ifp), + sppp_phase_name(sp->pp_phase)); + + /* Notify NCPs now. */ + for (i = 0; i < IDX_COUNT; i++) + if ((cps[i])->flags & CP_NCP) + (cps[i])->Open(sp); + + /* Send Up events to all NCPs. */ + for (i = 0, mask = 1; i < IDX_COUNT; i++, mask <<= 1) + if (sp->lcp.protos & mask && ((cps[i])->flags & CP_NCP)) + (cps[i])->Up(sp); + + /* if no NCP is starting, all this was in vain, close down */ + sppp_lcp_check_and_close(sp); +} + + +static const char * +sppp_cp_type_name(u_char type) +{ + static char buf[12]; + switch (type) { + case CONF_REQ: return "conf-req"; + case CONF_ACK: return "conf-ack"; + case CONF_NAK: return "conf-nak"; + case CONF_REJ: return "conf-rej"; + case TERM_REQ: return "term-req"; + case TERM_ACK: return "term-ack"; + case CODE_REJ: return "code-rej"; + case PROTO_REJ: return "proto-rej"; + case ECHO_REQ: return "echo-req"; + case ECHO_REPLY: return "echo-reply"; + case DISC_REQ: return "discard-req"; + } + snprintf (buf, sizeof(buf), "0x%x", type); + return buf; +} + +static const char * +sppp_auth_type_name(u_short proto, u_char type) +{ + static char buf[12]; + switch (proto) { + case PPP_CHAP: + switch (type) { + case CHAP_CHALLENGE: return "challenge"; + case CHAP_RESPONSE: return "response"; + case CHAP_SUCCESS: return "success"; + case CHAP_FAILURE: return "failure"; + } + case PPP_PAP: + switch (type) { + case PAP_REQ: return "req"; + case PAP_ACK: return "ack"; + case PAP_NAK: return "nak"; + } + } + snprintf (buf, sizeof(buf), "0x%x", type); + return buf; +} + +static const char * +sppp_lcp_opt_name(u_char opt) +{ + static char buf[12]; + switch (opt) { + case LCP_OPT_MRU: return "mru"; + case LCP_OPT_ASYNC_MAP: return "async-map"; + case LCP_OPT_AUTH_PROTO: return "auth-proto"; + case LCP_OPT_QUAL_PROTO: return "qual-proto"; + case LCP_OPT_MAGIC: return "magic"; + case LCP_OPT_PROTO_COMP: return "proto-comp"; + case LCP_OPT_ADDR_COMP: return "addr-comp"; + } + snprintf (buf, sizeof(buf), "0x%x", opt); + return buf; +} + +static const char * +sppp_ipcp_opt_name(u_char opt) +{ + static char buf[12]; + switch (opt) { + case IPCP_OPT_ADDRESSES: return "addresses"; + case IPCP_OPT_COMPRESSION: return "compression"; + case IPCP_OPT_ADDRESS: return "address"; + } + snprintf (buf, sizeof(buf), "0x%x", opt); + return buf; +} + +static const char * +sppp_state_name(int state) +{ + switch (state) { + case STATE_INITIAL: return "initial"; + case STATE_STARTING: return "starting"; + case STATE_CLOSED: return "closed"; + case STATE_STOPPED: return "stopped"; + case STATE_CLOSING: return "closing"; + case STATE_STOPPING: return "stopping"; + case STATE_REQ_SENT: return "req-sent"; + case STATE_ACK_RCVD: return "ack-rcvd"; + case STATE_ACK_SENT: return "ack-sent"; + case STATE_OPENED: return "opened"; + } + return "illegal"; +} + +static const char * +sppp_phase_name(enum ppp_phase phase) +{ + switch (phase) { + case PHASE_DEAD: return "dead"; + case PHASE_ESTABLISH: return "establish"; + case PHASE_TERMINATE: return "terminate"; + case PHASE_AUTHENTICATE: return "authenticate"; + case PHASE_NETWORK: return "network"; + } + return "illegal"; +} + +static const char * +sppp_proto_name(u_short proto) +{ + static char buf[12]; + switch (proto) { + case PPP_LCP: return "lcp"; + case PPP_IPCP: return "ipcp"; + case PPP_PAP: return "pap"; + case PPP_CHAP: return "chap"; + } + snprintf(buf, sizeof(buf), "0x%x", (unsigned)proto); + return buf; +} + +static void +sppp_print_bytes(const u_char *p, u_short len) +{ + addlog(" %02x", *p++); + while (--len > 0) + addlog("-%02x", *p++); +} + +static void +sppp_print_string(const char *p, u_short len) +{ + u_char c; + + while (len-- > 0) { + c = *p++; + /* + * Print only ASCII chars directly. RFC 1994 recommends + * using only them, but we don't rely on it. */ + if (c < ' ' || c > '~') + addlog("\\x%x", c); + else + addlog("%c", c); + } +} + +static const char * +sppp_dotted_quad(u_long addr) +{ + static char s[16]; + sprintf(s, "%d.%d.%d.%d", + (int)((addr >> 24) & 0xff), + (int)((addr >> 16) & 0xff), + (int)((addr >> 8) & 0xff), + (int)(addr & 0xff)); + return s; +} + +static int +sppp_strnlen(u_char *p, int max) +{ + int len; + + for (len = 0; len < max && *p; ++p) + ++len; + return len; +} + +/* a dummy, used to drop uninteresting events */ +static void +sppp_null(struct sppp *unused) +{ + /* do just nothing */ +} diff --git a/bsd/net/if_tun.c b/bsd/net/if_tun.c new file mode 100644 index 000000000..833251c07 --- /dev/null +++ b/bsd/net/if_tun.c @@ -0,0 +1,765 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */ + +/* + * Copyright (c) 1988, Julian Onions + * Nottingham University 1987. + * + * This source may be freely distributed, however I would be interested + * in any changes that are made. + * + * This driver takes packets off the IP i/f and hands them up to a + * user process to have its wicked way with. This driver has it's + * roots in a similar driver written by Phil Cockcroft (formerly) at + * UCL. This driver is based much more on read/write/poll mode of + * operation though. + */ + +#include "tun.h" +#if NTUN > 0 + +#include "opt_devfs.h" +#include "opt_inet.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if DEVFS +#include +#endif /*DEVFS*/ +#include +#include +#include + +#include +#include +#include +#include + +#if INET +#include +#include +#endif + +#if INET6 +#include +#include +#include +#endif /* INET6 */ + +#if NS +#include +#include +#endif + +#include "bpfilter.h" +#if NBPFILTER > 0 +#include +#endif + +#include +#include + +static void tunattach __P((void *)); +PSEUDO_SET(tunattach, if_tun); + +#define TUNDEBUG if (tundebug) printf +static int tundebug = 0; +SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, ""); + +static struct tun_softc tunctl[NTUN]; + +static int tunoutput __P((struct ifnet *, struct mbuf *, struct sockaddr *, + struct rtentry *rt)); +static int tunifioctl __P((struct ifnet *, u_long, caddr_t)); +static int tuninit __P((int, int, u_char)); + +static d_open_t tunopen; +static d_close_t tunclose; +static d_read_t tunread; +static d_write_t tunwrite; +static d_ioctl_t tunioctl; +static d_poll_t tunpoll; + +#define CDEV_MAJOR 52 +static struct cdevsw tun_cdevsw = { + tunopen, tunclose, tunread, tunwrite, + tunioctl, nullstop, noreset, nodevtotty, + tunpoll, nommap, nostrategy, "tun", NULL, -1 +}; + + +static int tun_devsw_installed; +#if DEVFS +static void *tun_devfs_token[NTUN]; +#endif + +#define minor_val(n) ((((n) & ~0xff) << 8) | ((n) & 0xff)) +#define dev_val(n) (((n) >> 8) | ((n) & 0xff)) + +static void +tunattach(dummy) + void *dummy; +{ + register int i; + struct ifnet *ifp; + dev_t dev; + + if ( tun_devsw_installed ) + return; + dev = makedev(CDEV_MAJOR, 0); + cdevsw_add(&dev, &tun_cdevsw, NULL); + tun_devsw_installed = 1; + for ( i = 0; i < NTUN; i++ ) { +#if DEVFS + tun_devfs_token[i] = devfs_add_devswf(&tun_cdevsw, minor_val(i), + DV_CHR, UID_UUCP, + GID_DIALER, 0600, + "tun%d", i); +#endif + tunctl[i].tun_flags = TUN_INITED; + + ifp = &tunctl[i].tun_if; + ifp->if_unit = i; + ifp->if_name = "tun"; + ifp->if_family = APPLE_IF_FAM_TUN; + ifp->if_mtu = TUNMTU; + ifp->if_ioctl = tunifioctl; + ifp->if_output = tunoutput; + ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; + ifp->if_type = IFT_PPP; /* necessary init value for IPv6 lladdr auto conf */ + ifp->if_snd.ifq_maxlen = ifqmaxlen; + if_attach(ifp); +#if NBPFILTER > 0 + bpfattach(ifp, DLT_NULL, sizeof(u_int)); +#endif + } +} + +/* + * tunnel open - must be superuser & the device must be + * configured in + */ +static int +tunopen(dev, flag, mode, p) + dev_t dev; + int flag, mode; + struct proc *p; +{ + struct ifnet *ifp; + struct tun_softc *tp; + register int unit, error; + + error = suser(p->p_ucred, &p->p_acflag); + if (error) + return (error); + + if ((unit = dev_val(minor(dev))) >= NTUN) + return (ENXIO); + tp = &tunctl[unit]; + if (tp->tun_flags & TUN_OPEN) + return EBUSY; + ifp = &tp->tun_if; + tp->tun_flags |= TUN_OPEN; + TUNDEBUG("%s%d: open\n", ifp->if_name, ifp->if_unit); + return (0); +} + +/* + * tunclose - close the device - mark i/f down & delete + * routing info + */ +static int +tunclose(dev, foo, bar, p) + dev_t dev; + int foo; + int bar; + struct proc *p; +{ + register int unit = dev_val(minor(dev)), s; + struct tun_softc *tp = &tunctl[unit]; + struct ifnet *ifp = &tp->tun_if; + struct mbuf *m; + + tp->tun_flags &= ~TUN_OPEN; + + /* + * junk all pending output + */ + do { + s = splimp(); + IF_DEQUEUE(&ifp->if_snd, m); + splx(s); + if (m) + m_freem(m); + } while (m); + + if (ifp->if_flags & IFF_UP) { + s = splimp(); + if_down(ifp); + if (ifp->if_flags & IFF_RUNNING) { + /* find internet addresses and delete routes */ + register struct ifaddr *ifa; + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) { + switch (ifa->ifa_addr->sa_family) { +#if INET + case AF_INET: +#endif +#if INET6 + case AF_INET6: +#endif + rtinit(ifa, (int)RTM_DELETE, + tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0); + break; + } + } + } + splx(s); + } + ifp->if_flags &= ~IFF_RUNNING; + funsetown(tp->tun_sigio); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&tp->tun_rsel); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + TUNDEBUG ("%s%d: closed\n", ifp->if_name, ifp->if_unit); + return (0); +} + +static int +tuninit(unit, cmd, af) + int unit; + int cmd; + u_char af; +{ + struct tun_softc *tp = &tunctl[unit]; + struct ifnet *ifp = &tp->tun_if; + register struct ifaddr *ifa; + + TUNDEBUG("%s%d: tuninit\n", ifp->if_name, ifp->if_unit); + + ifp->if_flags |= IFF_UP | IFF_RUNNING; + getmicrotime(&ifp->if_lastchange); + + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) { +#if INET + if (ifa->ifa_addr->sa_family == AF_INET) { + struct sockaddr_in *si; + + si = (struct sockaddr_in *)ifa->ifa_addr; + if (si && si->sin_addr.s_addr) + tp->tun_flags |= TUN_IASET; + + si = (struct sockaddr_in *)ifa->ifa_dstaddr; + if (si && si->sin_addr.s_addr) + tp->tun_flags |= TUN_DSTADDR; + } +#endif + } + return 0; +} + +/* + * Process an ioctl request. + */ +int +tunifioctl(ifp, cmd, data) + struct ifnet *ifp; + u_long cmd; + caddr_t data; +{ + register struct ifreq *ifr = (struct ifreq *)data; + int error = 0, s; + + s = splimp(); + switch(cmd) { + case SIOCSIFADDR: + tuninit(ifp->if_unit); + TUNDEBUG("%s%d: address set\n", + ifp->if_name, ifp->if_unit); + break; + case SIOCSIFDSTADDR: +#if 0 +#if defined(INET6) && defined(__FreeBSD__) && __FreeBSD__ >= 3 + if (found_first_ifid == 0) + in6_ifattach_noifid(ifp); +#endif /* defined(INET6) && defined(__FreeBSD__) && __FreeBSD__ >= 3 */ +#endif + tuninit(ifp->if_unit, cmd, ifr->ifr_addr.sa_family); + break; + case SIOCSIFMTU: + ifp->if_mtu = ifr->ifr_mtu; + TUNDEBUG("%s%d: mtu set\n", + ifp->if_name, ifp->if_unit); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + break; + + case SIOCSIFFLAGS: + if ((ifp->if_flags & IFF_UP) != 0) + ifp->if_flags |= IFF_RUNNING; + else if ((ifp->if_flags & IFF_UP) == 0) + ifp->if_flags &= ~IFF_RUNNING; + break; + + default: + error = EINVAL; + } + splx(s); + return (error); +} + +/* + * tunoutput - queue packets from higher level ready to put out. + */ +/* Packet data format between tun and ppp is changed to enable checking of + * Address Family of sending packet. When INET6 is defined, 4byte AF field + * is appended to packet data as following. + * + * 0 1 2 3 4 5 6 7 8 ..... + * ------------------------------ + * | af | packet data ..... + * ------------------------------ + * ^^^^^^^^^^^^^ + * Newly added part. The size is sizeof(u_long). + * + * However, this is not adopted for tun -> ppp AF_INET packet for + * backword compatibility, because the ppp process may be an existing + * ip only supporting one. + * Also in ppp->tun case, when af value is unknown, (af > 255) is checked and + * if it is true, AF_INET is assumed. (the 4byte may be the head of + * AF_INET packet. Despite the byte order, the value must always be + * greater than 255, because of ip_len field or (ip_v and ip_hl) + * field. (Idea from Mr. Noritoshi Demize) + */ +int +tunoutput(ifp, m0, dst, rt) + struct ifnet *ifp; + struct mbuf *m0; + struct sockaddr *dst; + struct rtentry *rt; +{ + struct tun_softc *tp = &tunctl[ifp->if_unit]; + int s; + + TUNDEBUG ("%s%d: tunoutput\n", ifp->if_name, ifp->if_unit); + + if ((tp->tun_flags & TUN_READY) != TUN_READY) { + TUNDEBUG ("%s%d: not ready 0%o\n", ifp->if_name, + ifp->if_unit, tp->tun_flags); + m_freem (m0); + return EHOSTDOWN; + } + +#if NBPFILTER > 0 + /* BPF write needs to be handled specially */ + if (dst->sa_family == AF_UNSPEC) { + dst->sa_family = *(mtod(m0, int *)); + m0->m_len -= sizeof(int); + m0->m_pkthdr.len -= sizeof(int); + m0->m_data += sizeof(int); + } + + if (ifp->if_bpf) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a dummy header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer to it). + */ + struct mbuf m; + u_int af = dst->sa_family; + + m.m_next = m0; + m.m_len = 4; + m.m_data = (char *)⁡ + + bpf_mtap(ifp, &m); + } +#endif + + switch(dst->sa_family) { +#if defined(INET) || defined(INET6) +#if INET6 + case AF_INET6: + M_PREPEND(m0, sizeof(u_long) /* af field passed to upper */, + M_DONTWAIT); + if (m0 == 0) + return (ENOBUFS); + *mtod(m0, u_long *) = (u_long)dst->sa_family; + /* FALLTHROUGH */ +#endif /* INET6 */ +#if INET + case AF_INET: +#endif /* INET */ +#endif /* INET || INET6 */ + s = splimp(); + if (IF_QFULL(&ifp->if_snd)) { + IF_DROP(&ifp->if_snd); + m_freem(m0); + splx(s); + ifp->if_collisions++; + return (ENOBUFS); + } + ifp->if_obytes += m0->m_pkthdr.len; + IF_ENQUEUE(&ifp->if_snd, m0); + splx(s); + ifp->if_opackets++; + break; + default: + m_freem(m0); + return EAFNOSUPPORT; + } + + if (tp->tun_flags & TUN_RWAIT) { + tp->tun_flags &= ~TUN_RWAIT; + wakeup((caddr_t)tp); + } + if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) + pgsigio(tp->tun_sigio, SIGIO, 0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&tp->tun_rsel); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return 0; +} + +/* + * the cdevsw interface is now pretty minimal. + */ +static int +tunioctl(dev, cmd, data, flag, p) + dev_t dev; + u_long cmd; + caddr_t data; + int flag; + struct proc *p; +{ + int unit = dev_val(minor(dev)), s; + struct tun_softc *tp = &tunctl[unit]; + struct tuninfo *tunp; + + switch (cmd) { + case TUNSIFINFO: + tunp = (struct tuninfo *)data; + tp->tun_if.if_mtu = tunp->mtu; + tp->tun_if.if_type = tunp->type; + tp->tun_if.if_baudrate = tunp->baudrate; + break; + case TUNGIFINFO: + tunp = (struct tuninfo *)data; + tunp->mtu = tp->tun_if.if_mtu; + tunp->type = tp->tun_if.if_type; + tunp->baudrate = tp->tun_if.if_baudrate; + break; + case TUNSDEBUG: + tundebug = *(int *)data; + break; + case TUNGDEBUG: + *(int *)data = tundebug; + break; + case FIONBIO: + break; + case FIOASYNC: + if (*(int *)data) + tp->tun_flags |= TUN_ASYNC; + else + tp->tun_flags &= ~TUN_ASYNC; + break; + case FIONREAD: + s = splimp(); + if (tp->tun_if.if_snd.ifq_head) { + struct mbuf *mb = tp->tun_if.if_snd.ifq_head; + for( *(int *)data = 0; mb != 0; mb = mb->m_next) + *(int *)data += mb->m_len; + } else + *(int *)data = 0; + splx(s); + break; + case FIOSETOWN: + return (fsetown(*(int *)data, &tp->tun_sigio)); + + case FIOGETOWN: + *(int *)data = fgetown(tp->tun_sigio); + return (0); + + /* This is deprecated, FIOSETOWN should be used instead. */ + case TIOCSPGRP: + return (fsetown(-(*(int *)data), &tp->tun_sigio)); + + /* This is deprecated, FIOGETOWN should be used instead. */ + case TIOCGPGRP: + *(int *)data = -fgetown(tp->tun_sigio); + return (0); + + default: + return (ENOTTY); + } + return (0); +} + +/* + * The cdevsw read interface - reads a packet at a time, or at + * least as much of a packet as can be read. + */ +static int +tunread(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + int unit = dev_val(minor(dev)); + struct tun_softc *tp = &tunctl[unit]; + struct ifnet *ifp = &tp->tun_if; + struct mbuf *m, *m0; + int error=0, len, s; + + TUNDEBUG ("%s%d: read\n", ifp->if_name, ifp->if_unit); + if ((tp->tun_flags & TUN_READY) != TUN_READY) { + TUNDEBUG ("%s%d: not ready 0%o\n", ifp->if_name, + ifp->if_unit, tp->tun_flags); + return EHOSTDOWN; + } + + tp->tun_flags &= ~TUN_RWAIT; + + s = splimp(); + do { + IF_DEQUEUE(&ifp->if_snd, m0); + if (m0 == 0) { + if (flag & IO_NDELAY) { + splx(s); + return EWOULDBLOCK; + } + tp->tun_flags |= TUN_RWAIT; + if( error = tsleep((caddr_t)tp, PCATCH | (PZERO + 1), + "tunread", 0)) { + splx(s); + return error; + } + } + } while (m0 == 0); + splx(s); + + while (m0 && uio->uio_resid > 0 && error == 0) { + len = min(uio->uio_resid, m0->m_len); + if (len == 0) + break; + error = uiomove(mtod(m0, caddr_t), len, uio); + MFREE(m0, m); + m0 = m; + } + + if (m0) { + TUNDEBUG("Dropping mbuf\n"); + m_freem(m0); + } + return error; +} + +/* + * the cdevsw write interface - an atomic write is a packet - or else! + */ +/* See top of tunoutput() about interface change between ppp process and + * tun. */ +static int +tunwrite(dev, uio, flag) + dev_t dev; + struct uio *uio; + int flag; +{ + int unit = dev_val(minor(dev)); + struct ifnet *ifp = &tunctl[unit].tun_if; + struct mbuf *top, **mp, *m; + int error=0, s, tlen, mlen; + u_long af; + u_int netisr_af; + struct ifqueue *afintrq = NULL; + + TUNDEBUG("%s%d: tunwrite\n", ifp->if_name, ifp->if_unit); + + if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) { + TUNDEBUG("%s%d: len=%d!\n", ifp->if_name, ifp->if_unit, + uio->uio_resid); + return EIO; + } + tlen = uio->uio_resid; + + /* get a header mbuf */ + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) + return ENOBUFS; + if (tlen > MHLEN) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + return ENOBUFS; + } + mlen = m->m_ext.ext_size; + } else + mlen = MHLEN; + + top = 0; + mp = ⊤ + while (error == 0 && uio->uio_resid > 0) { + m->m_len = min(mlen, uio->uio_resid); + error = uiomove(mtod (m, caddr_t), m->m_len, uio); + *mp = m; + mp = &m->m_next; + if (uio->uio_resid > 0) { + MGET (m, M_DONTWAIT, MT_DATA); + if (m == 0) { + error = ENOBUFS; + break; + } + mlen = MLEN; + } + } + /* Change for checking Address Family of sending packet. */ + af = *mtod(top, u_long *); + switch (af) { +#if INET + case AF_INET: + netisr_af = NETISR_IP; + afintrq = &ipintrq; + break; +#endif /* INET */ +#if INET6 + case AF_INET6: + netisr_af = NETISR_IPV6; + afintrq = &ip6intrq; + break; +#endif /* INET6 */ + default: + if (af > 255) { /* see description at the top of tunoutput */ + af = AF_INET; + netisr_af = NETISR_IP; + afintrq = &ipintrq; + goto af_decided; + } + error = EAFNOSUPPORT; + break; + } + m_adj(top, sizeof(u_long)); /* remove af field passed from upper */ + tlen -= sizeof(u_long); + af_decided: + if (error) { + if (top) + m_freem (top); + return error; + } + + top->m_pkthdr.len = tlen; + top->m_pkthdr.rcvif = ifp; + +#if NBPFILTER > 0 + if (ifp->if_bpf) { + /* + * We need to prepend the address family as + * a four byte field. Cons up a dummy header + * to pacify bpf. This is safe because bpf + * will only read from the mbuf (i.e., it won't + * try to free it or keep a pointer to it). + */ + struct mbuf m; + + m.m_next = top; + m.m_len = 4; + m.m_data = (char *)⁡ + + bpf_mtap(ifp, &m); + } +#endif + + /* just for safety */ + if (!afintrq) + return EAFNOSUPPORT; + + s = splimp(); + if (IF_QFULL (afintrq)) { + IF_DROP(afintrq); + splx(s); + ifp->if_collisions++; + m_freem(top); + return ENOBUFS; + } + IF_ENQUEUE(afintrq, top); + splx(s); + ifp->if_ibytes += tlen; + ifp->if_ipackets++; + schednetisr(netisr_af); + return error; +} + +/* + * tunpoll - the poll interface, this is only useful on reads + * really. The write detect always returns true, write never blocks + * anyway, it either accepts the packet or drops it. + */ +static int +tunpoll(dev, events, p) + dev_t dev; + int events; + struct proc *p; +{ + int unit = dev_val(minor(dev)), s; + struct tun_softc *tp = &tunctl[unit]; + struct ifnet *ifp = &tp->tun_if; + int revents = 0; + + s = splimp(); + TUNDEBUG("%s%d: tunpoll\n", ifp->if_name, ifp->if_unit); + + if (events & (POLLIN | POLLRDNORM)) + if (ifp->if_snd.ifq_len > 0) { + TUNDEBUG("%s%d: tunpoll q=%d\n", ifp->if_name, + ifp->if_unit, ifp->if_snd.ifq_len); + revents |= events & (POLLIN | POLLRDNORM); + } else { + TUNDEBUG("%s%d: tunpoll waiting\n", ifp->if_name, + ifp->if_unit); + selrecord(p, &tp->tun_rsel); + } + + if (events & (POLLOUT | POLLWRNORM)) + revents |= events & (POLLOUT | POLLWRNORM); + + splx(s); + return (revents); +} + + +#endif /* NTUN */ diff --git a/bsd/net/if_tun.h b/bsd/net/if_tun.h new file mode 100644 index 000000000..8ccbda90e --- /dev/null +++ b/bsd/net/if_tun.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_tun.h,v 1.5 1994/06/29 06:36:27 cgd Exp $ */ + +/* + * Copyright (c) 1988, Julian Onions + * Nottingham University 1987. + * + * This source may be freely distributed, however I would be interested + * in any changes that are made. + * + * This driver takes packets off the IP i/f and hands them up to a + * user process to have its wicked way with. This driver has it's + * roots in a similar driver written by Phil Cockcroft (formerly) at + * UCL. This driver is based much more on read/write/select mode of + * operation though. + * + */ + +#ifndef _NET_IF_TUN_H_ +#define _NET_IF_TUN_H_ + +/* Refer to if_tunvar.h for the softc stuff */ + +/* Maximum transmit packet size (default) */ +#define TUNMTU 1500 + +/* Maximum receive packet size (hard limit) */ +#define TUNMRU 16384 + +struct tuninfo { + int baudrate; /* linespeed */ + short mtu; /* maximum transmission unit */ + u_char type; /* ethernet, tokenring, etc. */ + u_char dummy; /* place holder */ +}; + +/* ioctl's for get/set debug */ +#define TUNSDEBUG _IOW('t', 90, int) +#define TUNGDEBUG _IOR('t', 89, int) +#define TUNSIFINFO _IOW('t', 91, struct tuninfo) +#define TUNGIFINFO _IOR('t', 92, struct tuninfo) + +#endif /* !_NET_IF_TUN_H_ */ diff --git a/bsd/net/if_tunvar.h b/bsd/net/if_tunvar.h new file mode 100644 index 000000000..ebf00d705 --- /dev/null +++ b/bsd/net/if_tunvar.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1998 Brian Somers + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef _NET_IF_TUNVAR_H_ +#define _NET_IF_TUNVAR_H_ + +struct tun_softc { + u_short tun_flags; /* misc flags */ +#define TUN_OPEN 0x0001 +#define TUN_INITED 0x0002 +#define TUN_RCOLL 0x0004 +#define TUN_IASET 0x0008 +#define TUN_DSTADDR 0x0010 +#define TUN_RWAIT 0x0040 +#define TUN_ASYNC 0x0080 + +#define TUN_READY (TUN_OPEN | TUN_INITED) + + struct ifnet tun_if; /* the interface */ + struct sigio *tun_sigio; /* information for async I/O */ + struct selinfo tun_rsel; /* read select */ + struct selinfo tun_wsel; /* write select (not used) */ +}; + +#endif /* !_NET_IF_TUNVAR_H_ */ diff --git a/bsd/net/if_types.h b/bsd/net/if_types.h new file mode 100644 index 000000000..38d8eaac9 --- /dev/null +++ b/bsd/net/if_types.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_types.h 8.2 (Berkeley) 4/20/94 + */ + +#ifndef _NET_IF_TYPES_H_ +#define _NET_IF_TYPES_H_ + +/* + * Interface types for benefit of parsing media address headers. + * This list is derived from the SNMP list of ifTypes, currently + * documented in RFC1573. + */ + +#define IFT_OTHER 0x1 /* none of the following */ +#define IFT_1822 0x2 /* old-style arpanet imp */ +#define IFT_HDH1822 0x3 /* HDH arpanet imp */ +#define IFT_X25DDN 0x4 /* x25 to imp */ +#define IFT_X25 0x5 /* PDN X25 interface (RFC877) */ +#define IFT_ETHER 0x6 /* Ethernet CSMACD */ +#define IFT_ISO88023 0x7 /* CMSA CD */ +#define IFT_ISO88024 0x8 /* Token Bus */ +#define IFT_ISO88025 0x9 /* Token Ring */ +#define IFT_ISO88026 0xa /* MAN */ +#define IFT_STARLAN 0xb +#define IFT_P10 0xc /* Proteon 10MBit ring */ +#define IFT_P80 0xd /* Proteon 80MBit ring */ +#define IFT_HY 0xe /* Hyperchannel */ +#define IFT_FDDI 0xf +#define IFT_LAPB 0x10 +#define IFT_SDLC 0x11 +#define IFT_T1 0x12 +#define IFT_CEPT 0x13 /* E1 - european T1 */ +#define IFT_ISDNBASIC 0x14 +#define IFT_ISDNPRIMARY 0x15 +#define IFT_PTPSERIAL 0x16 /* Proprietary PTP serial */ +#define IFT_PPP 0x17 /* RFC 1331 */ +#define IFT_LOOP 0x18 /* loopback */ +#define IFT_EON 0x19 /* ISO over IP */ +#define IFT_XETHER 0x1a /* obsolete 3MB experimental ethernet */ +#define IFT_NSIP 0x1b /* XNS over IP */ +#define IFT_SLIP 0x1c /* IP over generic TTY */ +#define IFT_ULTRA 0x1d /* Ultra Technologies */ +#define IFT_DS3 0x1e /* Generic T3 */ +#define IFT_SIP 0x1f /* SMDS */ +#define IFT_FRELAY 0x20 /* Frame Relay DTE only */ +#define IFT_RS232 0x21 +#define IFT_PARA 0x22 /* parallel-port */ +#define IFT_ARCNET 0x23 +#define IFT_ARCNETPLUS 0x24 +#define IFT_ATM 0x25 /* ATM cells */ +#define IFT_MIOX25 0x26 +#define IFT_SONET 0x27 /* SONET or SDH */ +#define IFT_X25PLE 0x28 +#define IFT_ISO88022LLC 0x29 +#define IFT_LOCALTALK 0x2a +#define IFT_SMDSDXI 0x2b +#define IFT_FRELAYDCE 0x2c /* Frame Relay DCE */ +#define IFT_V35 0x2d +#define IFT_HSSI 0x2e +#define IFT_HIPPI 0x2f +#define IFT_MODEM 0x30 /* Generic Modem */ +#define IFT_AAL5 0x31 /* AAL5 over ATM */ +#define IFT_SONETPATH 0x32 +#define IFT_SONETVT 0x33 +#define IFT_SMDSICIP 0x34 /* SMDS InterCarrier Interface */ +#define IFT_PROPVIRTUAL 0x35 /* Proprietary Virtual/internal */ +#define IFT_PROPMUX 0x36 /* Proprietary Multiplexing */ +#define IFT_GIF 0x37 +#define IFT_DUMMY 0x38 +#define IFT_PVC 0x39 +#define IFT_FAITH 0x3a +#define IFT_STF 0x3b + +#endif diff --git a/bsd/net/if_var.h b/bsd/net/if_var.h new file mode 100644 index 000000000..a34df56d6 --- /dev/null +++ b/bsd/net/if_var.h @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * From: @(#)if.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NET_IF_VAR_H_ +#define _NET_IF_VAR_H_ + +#define APPLE_IF_FAM_LOOPBACK 1 +#define APPLE_IF_FAM_ETHERNET 2 +#define APPLE_IF_FAM_SLIP 3 +#define APPLE_IF_FAM_TUN 4 +#define APPLE_IF_FAM_VLAN 5 +#define APPLE_IF_FAM_PPP 6 +#define APPLE_IF_FAM_PVC 7 +#define APPLE_IF_FAM_DISC 8 +#define APPLE_IF_FAM_MDECAP 9 +#define APPLE_IF_FAM_GIF 10 +#define APPLE_IF_FAM_FAITH 11 + + +/* + * Structures defining a network interface, providing a packet + * transport mechanism (ala level 0 of the PUP protocols). + * + * Each interface accepts output datagrams of a specified maximum + * length, and provides higher level routines with input datagrams + * received from its medium. + * + * Output occurs when the routine if_output is called, with three parameters: + * (*ifp->if_output)(ifp, m, dst, rt) + * Here m is the mbuf chain to be sent and dst is the destination address. + * The output routine encapsulates the supplied datagram if necessary, + * and then transmits it on its medium. + * + * On input, each interface unwraps the data received by it, and either + * places it on the input queue of a internetwork datagram routine + * and posts the associated software interrupt, or passes the datagram to a raw + * packet input routine. + * + * Routines exist for locating interfaces by their addresses + * or for locating a interface on a certain network, as well as more general + * routing and gateway routines maintaining information used to locate + * interfaces. These routines live in the files if.c and route.c + */ + +#ifdef __STDC__ +/* + * Forward structure declarations for function prototypes [sic]. + */ +struct mbuf; +struct proc; +struct rtentry; +struct socket; +struct sockaddr_dl; +#endif + +#define IFNAMSIZ 16 + +#include /* get TAILQ macros */ + +struct tqdummy { +}; + +TAILQ_HEAD(tailq_head, tqdummy); + + +/* This belongs up in socket.h or socketvar.h, depending on how far the + * event bubbles up. + */ + +struct net_event_data { + u_long if_family; + u_long if_unit; + char if_name[IFNAMSIZ]; +}; + + + +TAILQ_HEAD(ifnethead, ifnet); /* we use TAILQs so that the order of */ +TAILQ_HEAD(ifaddrhead, ifaddr); /* instantiation is preserved in the list */ +LIST_HEAD(ifmultihead, ifmultiaddr); + + +/* + * Structure describing information about an interface + * which may be of interest to management entities. + */ +struct if_data { + /* generic interface information */ + u_char ifi_type; /* ethernet, tokenring, etc */ + u_char ifi_typelen; /* Length of frame type id */ + u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ + u_char ifi_addrlen; /* media address length */ + u_char ifi_hdrlen; /* media header length */ + u_char ifi_recvquota; /* polling quota for receive intrs */ + u_char ifi_xmitquota; /* polling quota for xmit intrs */ + u_long ifi_mtu; /* maximum transmission unit */ + u_long ifi_metric; /* routing metric (external only) */ + u_long ifi_baudrate; /* linespeed */ + /* volatile statistics */ + u_long ifi_ipackets; /* packets received on interface */ + u_long ifi_ierrors; /* input errors on interface */ + u_long ifi_opackets; /* packets sent on interface */ + u_long ifi_oerrors; /* output errors on interface */ + u_long ifi_collisions; /* collisions on csma interfaces */ + u_long ifi_ibytes; /* total number of octets received */ + u_long ifi_obytes; /* total number of octets sent */ + u_long ifi_imcasts; /* packets received via multicast */ + u_long ifi_omcasts; /* packets sent via multicast */ + u_long ifi_iqdrops; /* dropped on input, this interface */ + u_long ifi_noproto; /* destined for unsupported protocol */ + u_long ifi_recvtiming; /* usec spent receiving when timing */ + u_long ifi_xmittiming; /* usec spent xmitting when timing */ + struct timeval ifi_lastchange; /* time of last administrative change */ + u_long default_proto; /* Default dl_tag when none is specified + * on dlil_output */ + u_long ifi_hwassist; /* HW offload capabilities */ + u_long ifi_reserved1; /* for future use */ + u_long ifi_reserved2; /* for future use */ +}; + + +/* + * Structure defining a queue for a network interface. + */ +struct ifqueue { + struct mbuf *ifq_head; + struct mbuf *ifq_tail; + int ifq_len; + int ifq_maxlen; + int ifq_drops; +}; + +/* + * Structure defining a network interface. + * + * (Would like to call this struct ``if'', but C isn't PL/1.) + */ +struct ifnet { + void *if_softc; /* pointer to driver state */ + char *if_name; /* name, e.g. ``en'' or ``lo'' */ + TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */ + struct ifaddrhead if_addrhead; /* linked list of addresses per if */ + struct tailq_head proto_head; /* Head for if_proto structures */ + int if_pcount; /* number of promiscuous listeners */ + struct bpf_if *if_bpf; /* packet filter structure */ + u_short if_index; /* numeric abbreviation for this if */ + short if_unit; /* sub-unit for lower level driver */ + short if_timer; /* time 'til if_watchdog called */ + short if_flags; /* up/down, broadcast, etc. */ + int if_ipending; /* interrupts pending */ + void *if_linkmib; /* link-type-specific MIB data */ + size_t if_linkmiblen; /* length of above data */ + struct if_data if_data; + +/* New with DLIL */ + int refcnt; + int offercnt; + int (*if_output)(struct ifnet *ifnet_ptr, struct mbuf *m); + int (*if_ioctl)(struct ifnet *ifnet_ptr, u_long ioctl_code, void *ioctl_arg); + int (*if_set_bpf_tap)(struct ifnet *ifp, int mode, + int (*bpf_callback)(struct ifnet *, struct mbuf *)); + int (*if_free)(struct ifnet *ifnet_ptr); + int (*if_demux)(struct ifnet *ifnet_ptr, struct mbuf *mbuf_ptr, + char *frame_ptr, void *if_proto_ptr); + + int (*if_event)(struct ifnet *ifnet_ptr, caddr_t event_ptr); + + int (*if_framer)(struct ifnet *ifp, + struct mbuf **m, + struct sockaddr *dest, + char *dest_linkaddr, + char *frame_type); + + u_long if_family; /* ulong assigned by Apple */ + struct tailq_head if_flt_head; + +/* End DLIL specific */ + +/* #if defined(ppc) */ + void *if_Y; /* For Y-adapter connection */ +/* #endif */ + void *if_private; /* private to interface */ +/* procedure handles */ +#if __APPLE__ + long if_eflags; /* autoaddr, autoaddr done, etc. */ +#else + int (*if_done) /* output complete routine */ + __P((struct ifnet *)); /* (XXX not used; fake prototype) */ +#endif + + + struct ifmultihead if_multiaddrs; /* multicast addresses configured */ + int if_amcount; /* number of all-multicast requests */ +/* procedure handles */ + int (*if_poll_recv) /* polled receive routine */ + __P((struct ifnet *, int *)); + int (*if_poll_xmit) /* polled transmit routine */ + __P((struct ifnet *, int *)); + void (*if_poll_intren) /* polled interrupt reenable routine */ + __P((struct ifnet *)); + void (*if_poll_slowinput) /* input routine for slow devices */ + __P((struct ifnet *, struct mbuf *)); + void (*if_init) /* Init routine */ + __P((void *)); + int (*if_resolvemulti) /* validate/resolve multicast */ + __P((struct ifnet *, struct sockaddr **, struct sockaddr *)); + struct ifqueue if_snd; /* output queue */ + struct ifqueue *if_poll_slowq; /* input queue for slow devices */ + u_long family_cookie; + struct ifprefix *if_prefixlist; /* linked list of prefixes per if */ + void *reserved1; /* for future use */ + void *reserved2; /* for future use */ +}; +typedef void if_init_f_t __P((void *)); + +#define if_mtu if_data.ifi_mtu +#define if_type if_data.ifi_type +#define if_typelen if_data.ifi_typelen +#define if_physical if_data.ifi_physical +#define if_addrlen if_data.ifi_addrlen +#define if_hdrlen if_data.ifi_hdrlen +#define if_metric if_data.ifi_metric +#define if_baudrate if_data.ifi_baudrate +#define if_hwassist if_data.ifi_hwassist +#define if_ipackets if_data.ifi_ipackets +#define if_ierrors if_data.ifi_ierrors +#define if_opackets if_data.ifi_opackets +#define if_oerrors if_data.ifi_oerrors +#define if_collisions if_data.ifi_collisions +#define if_ibytes if_data.ifi_ibytes +#define if_obytes if_data.ifi_obytes +#define if_imcasts if_data.ifi_imcasts +#define if_omcasts if_data.ifi_omcasts +#define if_iqdrops if_data.ifi_iqdrops +#define if_noproto if_data.ifi_noproto +#define if_lastchange if_data.ifi_lastchange +#define if_recvquota if_data.ifi_recvquota +#define if_xmitquota if_data.ifi_xmitquota +#define if_rawoutput(if, m, sa) if_output(if, m, sa, (struct rtentry *)0) + +/* + * Bit values in if_ipending + */ +#define IFI_RECV 1 /* I want to receive */ +#define IFI_XMIT 2 /* I want to transmit */ + +/* + * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq) + * are queues of messages stored on ifqueue structures + * (defined above). Entries are added to and deleted from these structures + * by these macros, which should be called with ipl raised to splimp(). + */ +#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) +#define IF_DROP(ifq) ((ifq)->ifq_drops++) +#define IF_ENQUEUE(ifq, m) { \ + (m)->m_nextpkt = 0; \ + if ((ifq)->ifq_tail == 0) \ + (ifq)->ifq_head = m; \ + else \ + (ifq)->ifq_tail->m_nextpkt = m; \ + (ifq)->ifq_tail = m; \ + (ifq)->ifq_len++; \ +} +#define IF_PREPEND(ifq, m) { \ + (m)->m_nextpkt = (ifq)->ifq_head; \ + if ((ifq)->ifq_tail == 0) \ + (ifq)->ifq_tail = (m); \ + (ifq)->ifq_head = (m); \ + (ifq)->ifq_len++; \ +} +#define IF_DEQUEUE(ifq, m) { \ + (m) = (ifq)->ifq_head; \ + if (m) { \ + if (((ifq)->ifq_head = (m)->m_nextpkt) == 0) \ + (ifq)->ifq_tail = 0; \ + (m)->m_nextpkt = 0; \ + (ifq)->ifq_len--; \ + } \ +} + +#ifdef KERNEL +#define IF_ENQ_DROP(ifq, m) if_enq_drop(ifq, m) + +#if defined(__GNUC__) && defined(MT_HEADER) +static __inline int +if_queue_drop(struct ifqueue *ifq, struct mbuf *m) +{ + IF_DROP(ifq); + return 0; +} + +static __inline int +if_enq_drop(struct ifqueue *ifq, struct mbuf *m) +{ + if (IF_QFULL(ifq) && + !if_queue_drop(ifq, m)) + return 0; + IF_ENQUEUE(ifq, m); + return 1; +} +#else + +#ifdef MT_HEADER +int if_enq_drop __P((struct ifqueue *, struct mbuf *)); +#endif + +#endif +#endif /* KERNEL */ + +/* + * The ifaddr structure contains information about one address + * of an interface. They are maintained by the different address families, + * are allocated and attached when an address is set, and are linked + * together so all addresses for an interface can be located. + */ +struct ifaddr { + struct sockaddr *ifa_addr; /* address of interface */ + struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */ +#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */ + struct sockaddr *ifa_netmask; /* used to determine subnet */ + struct ifnet *ifa_ifp; /* back-pointer to interface */ + TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */ + void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */ + __P((int, struct rtentry *, struct sockaddr *)); + u_short ifa_flags; /* mostly rt_flags for cloning */ + short ifa_refcnt; /* references to this structure */ + int ifa_metric; /* cost of going out this interface */ +#ifdef notdef + struct rtentry *ifa_rt; /* XXXX for ROUTETOIF ????? */ +#endif + u_long ifa_dlt; + int (*ifa_claim_addr) /* check if an addr goes to this if */ + __P((struct ifaddr *, struct sockaddr *)); + +}; +#define IFA_ROUTE RTF_UP /* route installed */ + +/* + * The prefix structure contains information about one prefix + * of an interface. They are maintained by the different address families, + * are allocated and attached when an prefix or an address is set, + * and are linked together so all prefixes for an interface can be located. + */ +struct ifprefix { + struct sockaddr *ifpr_prefix; /* prefix of interface */ + struct ifnet *ifpr_ifp; /* back-pointer to interface */ + struct ifprefix *ifpr_next; + u_char ifpr_plen; /* prefix length in bits */ + u_char ifpr_type; /* protocol dependent prefix type */ +}; + +/* + * Multicast address structure. This is analogous to the ifaddr + * structure except that it keeps track of multicast addresses. + * Also, the reference count here is a count of requests for this + * address, not a count of pointers to this structure. + */ +struct ifmultiaddr { + LIST_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */ + struct sockaddr *ifma_addr; /* address this membership is for */ + struct sockaddr *ifma_lladdr; /* link-layer translation, if any */ + struct ifnet *ifma_ifp; /* back-pointer to interface */ + u_int ifma_refcount; /* reference count */ + void *ifma_protospec; /* protocol-specific state, if any */ +}; + +#if KERNEL +#define IFAFREE(ifa) \ + do { \ + if ((ifa)->ifa_refcnt <= 0) \ + ifafree(ifa); \ + else \ + (ifa)->ifa_refcnt--; \ + } while (0) + +extern struct ifnethead ifnet; +extern struct ifnet **ifindex2ifnet; +extern int ifqmaxlen; +extern struct ifnet loif[]; +extern int if_index; +extern struct ifaddr **ifnet_addrs; + +int if_addmulti __P((struct ifnet *, struct sockaddr *, + struct ifmultiaddr **)); +int if_allmulti __P((struct ifnet *, int)); +void if_attach __P((struct ifnet *)); +int if_delmulti __P((struct ifnet *, struct sockaddr *)); +void if_down __P((struct ifnet *)); +void if_route __P((struct ifnet *, int flag, int fam)); +void if_unroute __P((struct ifnet *, int flag, int fam)); +void if_up __P((struct ifnet *)); +/*void ifinit __P((void));*/ /* declared in systm.h for main() */ +int ifioctl __P((struct socket *, u_long, caddr_t, struct proc *)); +int ifpromisc __P((struct ifnet *, int)); +struct ifnet *ifunit __P((char *)); +struct ifnet *if_withname __P((struct sockaddr *)); + +int if_poll_recv_slow __P((struct ifnet *ifp, int *quotap)); +void if_poll_xmit_slow __P((struct ifnet *ifp, int *quotap)); +void if_poll_throttle __P((void)); +void if_poll_unthrottle __P((void *)); +void if_poll_init __P((void)); +void if_poll __P((void)); + +struct ifaddr *ifa_ifwithaddr __P((struct sockaddr *)); +struct ifaddr *ifa_ifwithdstaddr __P((struct sockaddr *)); +struct ifaddr *ifa_ifwithnet __P((struct sockaddr *)); +struct ifaddr *ifa_ifwithroute __P((int, struct sockaddr *, + struct sockaddr *)); +struct ifaddr *ifaof_ifpforaddr __P((struct sockaddr *, struct ifnet *)); +void ifafree __P((struct ifaddr *)); + +struct ifmultiaddr *ifmaof_ifpforaddr __P((struct sockaddr *, + struct ifnet *)); +int if_simloop __P((struct ifnet *ifp, struct mbuf *m, + struct sockaddr *dst, int hlen)); + +#endif /* KERNEL */ + + +#endif /* !_NET_IF_VAR_H_ */ diff --git a/bsd/net/if_vlan.c b/bsd/net/if_vlan.c new file mode 100644 index 000000000..e4a3585a5 --- /dev/null +++ b/bsd/net/if_vlan.c @@ -0,0 +1,587 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1998 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +/* + * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. + * Might be extended some day to also handle IEEE 802.1p priority + * tagging. This is sort of sneaky in the implementation, since + * we need to pretend to be enough of an Ethernet implementation + * to make arp work. The way we do this is by telling everyone + * that we are an Ethernet, and then catch the packets that + * ether_output() left on our output queue queue when it calls + * if_start(), rewrite them for use by the real outgoing interface, + * and ask it to send them. + * + * + * XXX It's incorrect to assume that we must always kludge up + * headers on the physical device's behalf: some devices support + * VLAN tag insersion and extraction in firmware. For these cases, + * one can change the behavior of the vlan interface by setting + * the LINK0 flag on it (that is setting the vlan interface's LINK0 + * flag, _not_ the parent's LINK0 flag; we try to leave the parent + * alone). If the interface as the LINK0 flag set, then it will + * not modify the ethernet header on output because the parent + * can do that for itself. On input, the parent can call vlan_input_tag() + * directly in order to supply us with an incoming mbuf and the vlan + * tag value that goes with it. + */ + +#include "vlan.h" +#if NVLAN > 0 +#include "opt_inet.h" +#include "bpfilter.h" + +#include +#include +#include +#include +#include +#include +#include + +#if NBPFILTER > 0 +#include +#endif +#include +#include +#include +#include +#include +#include + +#if INET +#include +#include +#endif + +SYSCTL_DECL(_net_link); +SYSCTL_NODE(_net_link, IFT_8021_VLAN, vlan, CTLFLAG_RW, 0, "IEEE 802.1Q VLAN"); +SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0, "for consistency"); + +u_int vlan_proto = ETHERTYPE_VLAN; +SYSCTL_INT(_net_link_vlan_link, VLANCTL_PROTO, proto, CTLFLAG_RW, &vlan_proto, + 0, "Ethernet protocol used for VLAN encapsulation"); + +static struct ifvlan ifv_softc[NVLAN]; + +static void vlan_start(struct ifnet *ifp); +static void vlan_ifinit(void *foo); +static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr); +static int vlan_setmulti(struct ifnet *ifp); +static int vlan_unconfig(struct ifnet *ifp); +static int vlan_config(struct ifvlan *ifv, struct ifnet *p); + +/* + * Program our multicast filter. What we're actually doing is + * programming the multicast filter of the parent. This has the + * side effect of causing the parent interface to receive multicast + * traffic that it doesn't really want, which ends up being discarded + * later by the upper protocol layers. Unfortunately, there's no way + * to avoid this: there really is only one physical interface. + */ +static int vlan_setmulti(struct ifnet *ifp) +{ + struct ifnet *ifp_p; + struct ifmultiaddr *ifma, *rifma = NULL; + struct ifvlan *sc; + struct vlan_mc_entry *mc = NULL; + struct sockaddr_dl sdl; + int error; + + /* Find the parent. */ + sc = ifp->if_softc; + ifp_p = sc->ifv_p; + + sdl.sdl_len = ETHER_ADDR_LEN; + sdl.sdl_family = AF_LINK; + + /* First, remove any existing filter entries. */ + while(sc->vlan_mc_listhead.slh_first != NULL) { + mc = sc->vlan_mc_listhead.slh_first; + bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN); + error = if_delmulti(ifp_p, (struct sockaddr *)&sdl); + if (error) + return(error); + SLIST_REMOVE_HEAD(&sc->vlan_mc_listhead, mc_entries); + FREE(mc, M_DEVBUF); + } + + /* Now program new ones. */ + for (ifma = ifp->if_multiaddrs.lh_first; + ifma != NULL;ifma = ifma->ifma_link.le_next) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + mc = _MALLOC(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT); + bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), + (char *)&mc->mc_addr, ETHER_ADDR_LEN); + SLIST_INSERT_HEAD(&sc->vlan_mc_listhead, mc, mc_entries); + error = if_addmulti(ifp_p, (struct sockaddr *)&sdl, &rifma); + if (error) + return(error); + } + + return(0); +} + +static void +vlaninit(void *dummy) +{ + int i; + + for (i = 0; i < NVLAN; i++) { + struct ifnet *ifp = &ifv_softc[i].ifv_if; + + ifp->if_softc = &ifv_softc[i]; + ifp->if_name = "vlan"; + ifp->if_family = APPLE_IF_FAM_VLAN; + ifp->if_unit = i; + /* NB: flags are not set here */ + ifp->if_linkmib = &ifv_softc[i].ifv_mib; + ifp->if_linkmiblen = sizeof ifv_softc[i].ifv_mib; + /* NB: mtu is not set here */ + + ifp->if_init = vlan_ifinit; + ifp->if_start = vlan_start; + ifp->if_ioctl = vlan_ioctl; + ifp->if_output = ether_output; + ifp->if_snd.ifq_maxlen = ifqmaxlen; + if_attach(ifp); + ether_ifattach(ifp); +#if NBPFILTER > 0 + bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); +#endif + /* Now undo some of the damage... */ + ifp->if_data.ifi_type = IFT_8021_VLAN; + ifp->if_data.ifi_hdrlen = EVL_ENCAPLEN; + ifp->if_resolvemulti = 0; + } +} +PSEUDO_SET(vlaninit, if_vlan); + +static void +vlan_ifinit(void *foo) +{ + return; +} + +static void +vlan_start(struct ifnet *ifp) +{ + struct ifvlan *ifv; + struct ifnet *p; + struct ether_vlan_header *evl; + struct mbuf *m; + + ifv = ifp->if_softc; + p = ifv->ifv_p; + + ifp->if_flags |= IFF_OACTIVE; + for (;;) { + IF_DEQUEUE(&ifp->if_snd, m); + if (m == 0) + break; +#if NBPFILTER > 0 + if (ifp->if_bpf) + bpf_mtap(ifp, m); +#endif /* NBPFILTER > 0 */ + + /* + * If the LINK0 flag is set, it means the underlying interface + * can do VLAN tag insertion itself and doesn't require us to + * create a special header for it. In this case, we just pass + * the packet along. However, we need some way to tell the + * interface where the packet came from so that it knows how + * to find the VLAN tag to use, so we set the rcvif in the + * mbuf header to our ifnet. + * + * Note: we also set the M_PROTO1 flag in the mbuf to let + * the parent driver know that the rcvif pointer is really + * valid. We need to do this because sometimes mbufs will + * be allocated by other parts of the system that contain + * garbage in the rcvif pointer. Using the M_PROTO1 flag + * lets the driver perform a proper sanity check and avoid + * following potentially bogus rcvif pointers off into + * never-never land. + */ + if (ifp->if_flags & IFF_LINK0) { + m->m_pkthdr.rcvif = ifp; + m->m_flags |= M_PROTO1; + } else { + M_PREPEND(m, EVL_ENCAPLEN, M_DONTWAIT); + if (m == 0) + continue; + /* M_PREPEND takes care of m_len, m_pkthdr.len for us */ + + /* + * Transform the Ethernet header into an Ethernet header + * with 802.1Q encapsulation. + */ + bcopy(mtod(m, char *) + EVL_ENCAPLEN, mtod(m, char *), + sizeof(struct ether_header)); + evl = mtod(m, struct ether_vlan_header *); + evl->evl_proto = evl->evl_encap_proto; + evl->evl_encap_proto = htons(vlan_proto); + evl->evl_tag = htons(ifv->ifv_tag); +#ifdef DEBUG + printf("vlan_start: %*D\n", sizeof *evl, + (char *)evl, ":"); +#endif + } + + /* + * Send it, precisely as ether_output() would have. + * We are already running at splimp. + */ + if (IF_QFULL(&p->if_snd)) { + IF_DROP(&p->if_snd); + /* XXX stats */ + ifp->if_oerrors++; + m_freem(m); + continue; + } + IF_ENQUEUE(&p->if_snd, m); + if ((p->if_flags & IFF_OACTIVE) == 0) { + p->if_start(p); + ifp->if_opackets++; + } + } + ifp->if_flags &= ~IFF_OACTIVE; + + return; +} + +void +vlan_input_tag(struct ether_header *eh, struct mbuf *m, u_int16_t t) +{ + int i; + struct ifvlan *ifv; + + for (i = 0; i < NVLAN; i++) { + ifv = &ifv_softc[i]; + if (ifv->ifv_tag == t) + break; + } + + if (i >= NVLAN || (ifv->ifv_if.if_flags & IFF_UP) == 0) { + m_freem(m); + ifv->ifv_p->if_data.ifi_noproto++; + return; + } + + /* + * Having found a valid vlan interface corresponding to + * the given source interface and vlan tag, run the + * the real packet through ethert_input(). + */ + m->m_pkthdr.rcvif = &ifv->ifv_if; + +#if NBPFILTER > 0 + if (ifv->ifv_if.if_bpf) { + /* + * Do the usual BPF fakery. Note that we don't support + * promiscuous mode here, since it would require the + * drivers to know about VLANs and we're not ready for + * that yet. + */ + struct mbuf m0; + m0.m_next = m; + m0.m_len = sizeof(struct ether_header); + m0.m_data = (char *)eh; + bpf_mtap(&ifv->ifv_if, &m0); + } +#endif + ifv->ifv_if.if_ipackets++; + ether_input(&ifv->ifv_if, eh, m); + return; +} + +int +vlan_input(struct ether_header *eh, struct mbuf *m) +{ + int i; + struct ifvlan *ifv; + + for (i = 0; i < NVLAN; i++) { + ifv = &ifv_softc[i]; + if (m->m_pkthdr.rcvif == ifv->ifv_p + && (EVL_VLANOFTAG(ntohs(*mtod(m, u_int16_t *))) + == ifv->ifv_tag)) + break; + } + + if (i >= NVLAN || (ifv->ifv_if.if_flags & IFF_UP) == 0) { + m_freem(m); + return -1; /* so ether_input can take note */ + } + + /* + * Having found a valid vlan interface corresponding to + * the given source interface and vlan tag, remove the + * encapsulation, and run the real packet through + * ether_input() a second time (it had better be + * reentrant!). + */ + m->m_pkthdr.rcvif = &ifv->ifv_if; + eh->ether_type = mtod(m, u_int16_t *)[1]; + m->m_data += EVL_ENCAPLEN; + m->m_len -= EVL_ENCAPLEN; + m->m_pkthdr.len -= EVL_ENCAPLEN; + +#if NBPFILTER > 0 + if (ifv->ifv_if.if_bpf) { + /* + * Do the usual BPF fakery. Note that we don't support + * promiscuous mode here, since it would require the + * drivers to know about VLANs and we're not ready for + * that yet. + */ + struct mbuf m0; + m0.m_next = m; + m0.m_len = sizeof(struct ether_header); + m0.m_data = (char *)eh; + bpf_mtap(&ifv->ifv_if, &m0); + } +#endif + ifv->ifv_if.if_ipackets++; + ether_input(&ifv->ifv_if, eh, m); + return 0; +} + +static int +vlan_config(struct ifvlan *ifv, struct ifnet *p) +{ + struct ifaddr *ifa1, *ifa2; + struct sockaddr_dl *sdl1, *sdl2; + + if (p->if_data.ifi_type != IFT_ETHER) + return EPROTONOSUPPORT; + if (ifv->ifv_p) + return EBUSY; + ifv->ifv_p = p; + if (p->if_data.ifi_hdrlen == sizeof(struct ether_vlan_header)) + ifv->ifv_if.if_mtu = p->if_mtu; + else + ifv->ifv_if.if_mtu = p->if_data.ifi_mtu - EVL_ENCAPLEN; + + /* + * Preserve the state of the LINK0 flag for ourselves. + */ + ifv->ifv_if.if_flags = (p->if_flags & ~(IFF_LINK0)); + + /* + * Set up our ``Ethernet address'' to reflect the underlying + * physical interface's. + */ + ifa1 = ifnet_addrs[ifv->ifv_if.if_index - 1]; + ifa2 = ifnet_addrs[p->if_index - 1]; + sdl1 = (struct sockaddr_dl *)ifa1->ifa_addr; + sdl2 = (struct sockaddr_dl *)ifa2->ifa_addr; + sdl1->sdl_type = IFT_ETHER; + sdl1->sdl_alen = ETHER_ADDR_LEN; + bcopy(LLADDR(sdl2), LLADDR(sdl1), ETHER_ADDR_LEN); + bcopy(LLADDR(sdl2), ifv->ifv_ac.ac_enaddr, ETHER_ADDR_LEN); + return 0; +} + +static int +vlan_unconfig(struct ifnet *ifp) +{ + struct ifaddr *ifa; + struct sockaddr_dl *sdl; + struct vlan_mc_entry *mc; + struct ifvlan *ifv; + struct ifnet *p; + int error; + + ifv = ifp->if_softc; + p = ifv->ifv_p; + + /* + * Since the interface is being unconfigured, we need to + * empty the list of multicast groups that we may have joined + * while we were alive and remove them from the parent's list + * as well. + */ + while(ifv->vlan_mc_listhead.slh_first != NULL) { + struct sockaddr_dl sdl; + + sdl.sdl_len = ETHER_ADDR_LEN; + sdl.sdl_family = AF_LINK; + mc = ifv->vlan_mc_listhead.slh_first; + bcopy((char *)&mc->mc_addr, LLADDR(&sdl), ETHER_ADDR_LEN); + error = if_delmulti(p, (struct sockaddr *)&sdl); + error = if_delmulti(ifp, (struct sockaddr *)&sdl); + if (error) + return(error); + SLIST_REMOVE_HEAD(&ifv->vlan_mc_listhead, mc_entries); + FREE(mc, M_DEVBUF); + } + + /* Disconnect from parent. */ + ifv->ifv_p = NULL; + ifv->ifv_if.if_mtu = ETHERMTU; + + /* Clear our MAC address. */ + ifa = ifnet_addrs[ifv->ifv_if.if_index - 1]; + sdl = (struct sockaddr_dl *)ifa->ifa_addr; + sdl->sdl_type = IFT_ETHER; + sdl->sdl_alen = ETHER_ADDR_LEN; + bzero(LLADDR(sdl), ETHER_ADDR_LEN); + bzero(ifv->ifv_ac.ac_enaddr, ETHER_ADDR_LEN); + + return 0; +} + +static int +vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct ifaddr *ifa; + struct ifnet *p; + struct ifreq *ifr; + struct ifvlan *ifv; + struct vlanreq vlr; + int error = 0; + + ifr = (struct ifreq *)data; + ifa = (struct ifaddr *)data; + ifv = ifp->if_softc; + + switch (cmd) { + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP; + + switch (ifa->ifa_addr->sa_family) { +#if INET + case AF_INET: + arp_ifinit(&ifv->ifv_ac, ifa); + break; +#endif + default: + break; + } + break; + + case SIOCGIFADDR: + { + struct sockaddr *sa; + + sa = (struct sockaddr *) &ifr->ifr_data; + bcopy(((struct arpcom *)ifp->if_softc)->ac_enaddr, + (caddr_t) sa->sa_data, ETHER_ADDR_LEN); + } + break; + + case SIOCSIFMTU: + /* + * Set the interface MTU. + * This is bogus. The underlying interface might support + * jumbo frames. + */ + if (ifr->ifr_mtu > ETHERMTU) { + error = EINVAL; + } else { + ifp->if_mtu = ifr->ifr_mtu; + } + break; + + case SIOCSETVLAN: + error = copyin(ifr->ifr_data, &vlr, sizeof vlr); + if (error) + break; + if (vlr.vlr_parent[0] == '\0') { + vlan_unconfig(ifp); + if_down(ifp); + ifp->if_flags = 0; + break; + } + p = ifunit(vlr.vlr_parent); + if (p == 0) { + error = ENOENT; + break; + } + error = vlan_config(ifv, p); + if (error) + break; + ifv->ifv_tag = vlr.vlr_tag; + break; + + case SIOCGETVLAN: + bzero(&vlr, sizeof vlr); + if (ifv->ifv_p) { + snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), + "%s%d", ifv->ifv_p->if_name, ifv->ifv_p->if_unit); + vlr.vlr_tag = ifv->ifv_tag; + } + error = copyout(&vlr, ifr->ifr_data, sizeof vlr); + break; + + case SIOCSIFFLAGS: + /* + * We don't support promiscuous mode + * right now because it would require help from the + * underlying drivers, which hasn't been implemented. + */ + if (ifr->ifr_flags & (IFF_PROMISC)) { + ifp->if_flags &= ~(IFF_PROMISC); + error = EINVAL; + } + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + error = vlan_setmulti(ifp); + break; + default: + error = EINVAL; + } + return error; +} + +#endif /* NVLAN > 0 */ diff --git a/bsd/net/if_vlan_var.h b/bsd/net/if_vlan_var.h new file mode 100644 index 000000000..51656d442 --- /dev/null +++ b/bsd/net/if_vlan_var.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1998 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef _NET_IF_VLAN_VAR_H_ +#define _NET_IF_VLAN_VAR_H_ 1 + +#ifdef KERNEL +struct ifvlan { + struct arpcom ifv_ac; /* make this an interface */ + struct ifnet *ifv_p; /* parent inteface of this vlan */ + struct ifv_linkmib { + int ifvm_parent; + u_int16_t ifvm_proto; /* encapsulation ethertype */ + u_int16_t ifvm_tag; /* tag to apply on packets leaving if */ + } ifv_mib; +}; +#define ifv_if ifv_ac.ac_if +#define ifv_tag ifv_mib.ifvm_tag +#endif /* KERNEL */ + +struct ether_vlan_header { + u_char evl_dhost[ETHER_ADDR_LEN]; + u_char evl_shost[ETHER_ADDR_LEN]; + u_int16_t evl_encap_proto; + u_int16_t evl_tag; + u_int16_t evl_proto; +}; + +#define EVL_VLANOFTAG(tag) ((tag) & 4095) +#define EVL_PRIOFTAG(tag) (((tag) >> 13) & 7) +#define EVL_ENCAPLEN 4 /* length in octets of encapsulation */ + +/* When these sorts of interfaces get their own identifier... */ +#define IFT_8021_VLAN IFT_PROPVIRTUAL + +/* sysctl(3) tags, for compatibility purposes */ +#define VLANCTL_PROTO 1 +#define VLANCTL_MAX 2 + +/* + * Configuration structure for SIOCSETVLAN and SIOCGETVLAN ioctls. + */ +struct vlanreq { + char vlr_parent[IFNAMSIZ]; + u_short vlr_tag; +}; +#define SIOCSETVLAN SIOCSIFGENERIC +#define SIOCGETVLAN SIOCGIFGENERIC + +#ifdef KERNEL +/* shared with if_ethersubr.c: */ +extern u_int vlan_proto; +extern int vlan_input(struct ether_header *eh, struct mbuf *m); +#endif + +#endif /* _NET_IF_VLAN_VAR_H_ */ diff --git a/bsd/net/kext_net.c b/bsd/net/kext_net.c new file mode 100644 index 000000000..d96e427d0 --- /dev/null +++ b/bsd/net/kext_net.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (C) 1999 Apple Computer, Inc. */ + +/* + * Support for Network Kernel Extensions: Socket Filters + * + * Justin C. Walker, 990319 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kext_net.h" + +/* List of kernel extensions (networking) known to kernel */ +struct nf_list nf_list; + +/* + * Register a global filter for the specified protocol + * Make a few checks and then insert the new descriptor in the + * filter list and, if global, in its protosw's chain. + */ +int +register_sockfilter(struct NFDescriptor *nfp, struct NFDescriptor *nfp1, + struct protosw *pr, int flags) +{ int s; + static int NF_initted = 0; + + if (nfp == NULL) + return(EINVAL); + + s = splhigh(); + if (!NF_initted) + { NF_initted = 1; + TAILQ_INIT(&nf_list); + } + + /* + * Install the extension: + * First, put it in the global list of all filters + * Then, if global, install in the protosw's list + */ + TAILQ_INSERT_TAIL(&nf_list, nfp, nf_list); + if (nfp->nf_flags & NFD_GLOBAL) + { if (flags & NFF_BEFORE) + { if (nfp1 == NULL) + { TAILQ_INSERT_HEAD(&pr->pr_sfilter, + nfp, nf_next); + } else + TAILQ_INSERT_BEFORE(nfp1, nfp, nf_next); + } else /* Default: AFTER */ + { if (nfp1 == NULL) + { TAILQ_INSERT_TAIL(&pr->pr_sfilter, + nfp, nf_next); + } else + TAILQ_INSERT_AFTER(&pr->pr_sfilter, nfp1, + nfp, nf_next); + } + } + splx(s); + return(0); +} + +unregister_sockfilter(struct NFDescriptor *nfp, struct protosw *pr, int flags) +{ int s; + + s = splhigh(); + TAILQ_REMOVE(&nf_list, nfp, nf_list); + /* Only globals are attached to the protosw entry */ + if (nfp->nf_flags & NFD_GLOBAL) + TAILQ_REMOVE(&pr->pr_sfilter, nfp, nf_next); + splx(s); + return(0); +} + +struct NFDescriptor * +find_nke(unsigned int handle) +{ struct NFDescriptor *nfp; + + nfp = nf_list.tqh_first; + while (nfp) + { if (nfp->nf_handle == handle) + return(nfp); + nfp = nfp->nf_list.tqe_next; + } + return(NULL); +} + +/* + * Insert a previously registered, non-global, NKE into the list of + * active NKEs for this socket. Then invoke its "attach/create" entry. + * Assumed called with protection in place (spl/mutex/whatever) + * XXX: How to which extension is not found, on error. + */ +int +nke_insert(struct socket *so, struct so_nke *np) +{ int s, error; + struct kextcb *kp, *kp1; + struct NFDescriptor *nf1, *nf2 = NULL; + + if (np->nke_where != NULL) + { if ((nf2 = find_nke(np->nke_where)) == NULL) + { /* ??? */ + return(ENXIO);/* XXX */ + } + } + + if ((nf1 = find_nke(np->nke_handle)) == NULL) + { /* ??? */ + return(ENXIO);/* XXX */ + } + + kp = so->so_ext; + kp1 = NULL; + if (np->nke_flags & NFF_BEFORE) + { if (nf2) + { while (kp) + { if (kp->e_nfd == nf2) + break; + kp1 = kp; + kp = kp->e_next; + } + if (kp == NULL) + return(ENXIO);/* XXX */ + } + } else + { if (nf2) + { while (kp) + { if (kp->e_nfd == nf2) + break; + kp1 = kp; + kp = kp->e_next; + } + if (kp == NULL) + return(ENXIO);/* XXX */ + } + kp1 = kp; + } + /* + * Here with kp1 pointing to the insertion point. + * If null, this is first entry. + * Now, create and insert the descriptor. + */ + + MALLOC(kp, struct kextcb *, sizeof(*kp), M_TEMP, M_WAITOK); + if (kp == NULL) + return(ENOBUFS); /* so_free will clean up */ + bzero(kp, sizeof (*kp)); + if (kp1 == NULL) + { kp->e_next = so->so_ext; + so->so_ext = kp; + } else + { kp->e_next = kp1->e_next; + kp1->e_next = kp; + } + kp->e_fcb = NULL; + kp->e_nfd = nf1; + kp->e_soif = nf1->nf_soif; + kp->e_sout = nf1->nf_soutil; + /* + * Ignore return value for create + * Everyone gets a chance at startup + */ + if (kp->e_soif && kp->e_soif->sf_socreate) + (*kp->e_soif->sf_socreate)(so, so->so_proto, kp); + return(0); +} diff --git a/bsd/net/kext_net.h b/bsd/net/kext_net.h new file mode 100644 index 000000000..a048de3a7 --- /dev/null +++ b/bsd/net/kext_net.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (C) 1999 Apple Computer, Inc. */ +/* + * Support for network filter kernel extensions + * Justin C. Walker, 990319 + */ +#ifndef NET_KEXT_NET_H +#define NET_KEXT_NET_H + +#include +#include + +struct mbuf; +struct socket; +struct uio; +struct sockbuf; +struct sockaddr; +struct kextcb; +struct protosw; +struct sockif; +struct sockutil; +struct sockopt; + +/* + * This structure gives access to the functionality of the filter. + * The kextcb provides the link from the socket structure. + */ +struct NFDescriptor +{ TAILQ_ENTRY(NFDescriptor) nf_next; /* protosw chain */ + TAILQ_ENTRY(NFDescriptor) nf_list; /* descriptor list */ + unsigned int nf_handle; /* Identifier */ + int nf_flags; + /* Dispatch for PF_FILTER control */ + int (*nf_connect)(); /* Make contact */ + void (*nf_disconnect)(); /* Break contact */ + int (*nf_read)(); /* Get data from filter */ + int (*nf_write)(); /* Send data to filter */ + int (*nf_get)(); /* Get filter config */ + int (*nf_set)(); /* Set filter config */ + /* + * Socket function dispatch vectors - copied to kextcb + * during socreate() + */ + struct sockif *nf_soif; /* Socket functions */ + struct sockutil *nf_soutil; /* Sockbuf utility functions */ +}; + +#define NFD_GLOBAL 0x01 +#define NFD_PROG 0x02 +#define NFD_VISIBLE 0x80000000 + +#define NFF_BEFORE 0x01 +#define NFF_AFTER 0x02 + +#ifdef KERNEL +/* How to register: filter, insert location, target protosw, flags */ +extern int register_sockfilter(struct NFDescriptor *, + struct NFDescriptor *, + struct protosw *, int); +/* How to unregister: filter, original protosw, flags */ +extern int unregister_sockfilter(struct NFDescriptor *, struct protosw *, int); + +TAILQ_HEAD(nf_list, NFDescriptor); + +extern struct nf_list nf_list; +#endif + +#define NKE_OK 0 +#define NKE_REMOVE -1 + +/* + * Interface structure for inserting an installed socket NKE into an + * existing socket. + * 'handle' is the NKE to be inserted, 'where' is an insertion point, + * and flags dictate the position of the to-be-inserted NKE relative to + * the 'where' NKE. If the latter is NULL, the flags indicate "first" + * or "last" + */ +struct so_nke +{ unsigned int nke_handle; + unsigned int nke_where; + int nke_flags; /* NFF_BEFORE, NFF_AFTER: net/kext_net.h */ +}; + +/* + * sockif: + * Contains socket interface: + * dispatch vector abstracting the interface between protocols and + * the socket layer. + * TODO: add sf_sosense() + */ +struct sockif +{ int (*sf_soabort)(struct socket *, struct kextcb *); + int (*sf_soaccept)(struct socket *, struct sockaddr **, + struct kextcb *); + int (*sf_sobind)(struct socket *, struct sockaddr *, struct kextcb *); + int (*sf_soclose)(struct socket *, struct kextcb *); + int (*sf_soconnect)(struct socket *, struct sockaddr *, + struct kextcb *); + int (*sf_soconnect2)(struct socket *, struct socket *, + struct kextcb *); + int (*sf_socontrol)(struct socket *, struct sockopt *, + struct kextcb *); + int (*sf_socreate)(struct socket *, struct protosw *, struct kextcb *); + int (*sf_sodisconnect)(struct socket *, struct kextcb *); + int (*sf_sofree)(struct socket *, struct kextcb *); + int (*sf_sogetopt)(struct socket *, int, int, struct mbuf **, + struct kextcb *); + int (*sf_sohasoutofband)(struct socket *, struct kextcb *); + int (*sf_solisten)(struct socket *, struct kextcb *); + int (*sf_soreceive)(struct socket *, struct sockaddr **, struct uio **, + struct mbuf **, struct mbuf **, int *, + struct kextcb *); + int (*sf_sorflush)(struct socket *, struct kextcb *); + int (*sf_sosend)(struct socket *, struct sockaddr **, struct uio **, + struct mbuf **, struct mbuf **, int *, + struct kextcb *); + int (*sf_sosetopt)(struct socket *, int, int, struct mbuf *, + struct kextcb *); + int (*sf_soshutdown)(struct socket *, int, struct kextcb *); + /* Calls sorwakeup() */ + int (*sf_socantrcvmore)(struct socket *, struct kextcb *); + /* Calls sowwakeup() */ + int (*sf_socantsendmore)(struct socket *, struct kextcb *); + /* Calls soqinsque(), sorwakeup(), sowwakeup() */ + int (*sf_soisconnected)(struct socket *, struct kextcb *); + int (*sf_soisconnecting)(struct socket *, struct kextcb *); + /* Calls sowwakeup(), sorwakeup() */ + int (*sf_soisdisconnected)(struct socket *, struct kextcb *); + /* Calls sowwakeup(), sorwakeup() */ + int (*sf_soisdisconnecting)(struct socket *, struct kextcb *); + /* Calls soreserve(), soqinsque(), soqremque(), sorwakeup() */ + struct socket *(*sf_sonewconn1)(struct socket *, int, struct kextcb *); + int (*sf_soqinsque)(struct socket *, struct socket *, int, + struct kextcb *); + int (*sf_soqremque)(struct socket *, int, struct kextcb *); + int (*sf_soreserve)(struct socket *, u_long, u_long, struct kextcb *); + int (*sf_sowakeup)(struct socket *, struct sockbuf *, + struct kextcb *); +}; + + +/* + * sockutil: + * Contains the utility functions for socket layer access + */ +struct sockutil +{ /* Sleeps if locked */ + int (*su_sb_lock)(struct sockbuf *, struct kextcb *); + /* Conditionally calls sbappendrecord, Calls sbcompress */ + int (*su_sbappend)(struct sockbuf *, struct mbuf *, struct kextcb *); + /* Calls sbspace(), sballoc() */ + int (*su_sbappendaddr)(struct sockbuf *, struct sockaddr *, + struct mbuf *, struct mbuf *, struct kextcb *); + /* Calls sbspace(), sballoc() */ + int (*su_sbappendcontrol)(struct sockbuf *, struct mbuf *, + struct mbuf *, struct kextcb *); + /* Calls sballoc(), sbcompress() */ + int (*su_sbappendrecord)(struct sockbuf *, struct mbuf *, + struct kextcb *); + /* Calls sballoc() */ + int (*su_sbcompress)(struct sockbuf *, struct mbuf *, struct mbuf *, + struct kextcb *); + /* Calls sbfree() */ + int (*su_sbdrop)(struct sockbuf *, int, struct kextcb *); + /* Calls sbfree() */ + int (*su_sbdroprecord)(struct sockbuf *, struct kextcb *); + /* Calls sbdrop() */ + int (*su_sbflush)(struct sockbuf *, struct kextcb *); + /* Calls sballoc(), sbcompress() */ + int (*su_sbinsertoob)(struct sockbuf *, struct mbuf *, + struct kextcb *); + /* Calls sbflush() */ + int (*su_sbrelease)(struct sockbuf *, struct kextcb *); + int (*su_sbreserve)(struct sockbuf *, u_long, struct kextcb *); + /* Calls tsleep() */ + int (*su_sbwait)(struct sockbuf *, struct kextcb *); +}; + +#endif diff --git a/bsd/net/ndrv.c b/bsd/net/ndrv.c new file mode 100644 index 000000000..820b8cc7b --- /dev/null +++ b/bsd/net/ndrv.c @@ -0,0 +1,831 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997, 1998 Apple Computer, Inc. All Rights Reserved */ +/* + * @(#)ndrv.c 1.1 (MacOSX) 6/10/43 + * Justin Walker, 970604 + * AF_NDRV support + * 980130 - Cleanup, reorg, performance improvemements + * 000816 - Removal of Y adapter cruft + */ + +/* + * PF_NDRV allows raw access to a specified network device, directly + * with a socket. Expected use involves a socket option to request + * protocol packets. This lets ndrv_output() call dlil_output(), and + * lets DLIL find the proper recipient for incoming packets. + * The purpose here is for user-mode protocol implementation. + * Note that "pure raw access" will still be accomplished with BPF. + * + * In addition to the former use, when combined with socket NKEs, + * PF_NDRV permits a fairly flexible mechanism for implementing + * strange protocol support. One of the main ones will be the + * BlueBox/Classic Shared IP Address support. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include "ndrv.h" + +#if INET +#include +#include +#endif +#include + +#if NS +#include +#include +#endif + +#if ISO +#include +#include +#include +#include +#endif + +#if LLC +#include +#include +#endif + +#include + +int ndrv_do_detach(struct ndrv_cb *); +int ndrv_do_disconnect(struct ndrv_cb *); + +unsigned long ndrv_sendspace = NDRVSNDQ; +unsigned long ndrv_recvspace = NDRVRCVQ; +struct ndrv_cb ndrvl; /* Head of controlblock list */ + +/* To handle input, need to map tag to ndrv_cb */ +struct ndrv_tag_map +{ unsigned int tm_tag; /* Tag in use */ + struct ndrv_cb *tm_np; /* Owning device */ + struct dlil_demux_desc *tm_dm; /* Our local copy */ +}; + +struct ndrv_tag_map *ndrv_tags; +#define TAG_MAP_COUNT 10 +int tag_map_count; + +struct domain ndrvdomain; +extern struct protosw ndrvsw[]; + + +/* + * Protocol init function for NDRV protocol + * Init the control block list. + */ +void +ndrv_init() +{ + ndrvl.nd_next = ndrvl.nd_prev = &ndrvl; +} + +/* + * Protocol output - Called to output a raw network packet directly + * to the driver. + */ +int +ndrv_output(register struct mbuf *m, register struct socket *so) +{ register struct ndrv_cb *np = sotondrvcb(so); + register struct ifnet *ifp = np->nd_if; + int s, error; + extern void kprintf(const char *, ...); + +#if NDRV_DEBUG + kprintf("NDRV output: %x, %x, %x\n", m, so, np); +#endif + + /* + * No header is a format error + */ + if ((m->m_flags&M_PKTHDR) == 0) + return(EINVAL); + + /* + * Can't do multicast accounting because we don't know + * (a) if our interface does multicast; and + * (b) what a multicast address looks like + */ + s = splimp(); + + /* + * Can't call DLIL to do the job - we don't have a tag + * and we aren't really a protocol + */ + + (*ifp->if_output)(ifp, m); + splx(s); + return (0); +} + +int +ndrv_input(struct mbuf *m, + char *frame_header, + struct ifnet *ifp, + u_long dl_tag, + int sync_ok) +{ int s; + struct socket *so; + struct sockaddr_dl ndrvsrc = {sizeof (struct sockaddr_dl), AF_NDRV}; + register struct ndrv_cb *np; + extern struct ndrv_cb *ndrv_find_tag(unsigned int); + + + /* move packet from if queue to socket */ + /* Should be media-independent */ + ndrvsrc.sdl_type = IFT_ETHER; + ndrvsrc.sdl_nlen = 0; + ndrvsrc.sdl_alen = 6; + ndrvsrc.sdl_slen = 0; + bcopy(frame_header, &ndrvsrc.sdl_data, 6); + + s = splnet(); + np = ndrv_find_tag(dl_tag); + if (np == NULL) + { splx(s); + return(ENOENT); + } + so = np->nd_socket; + if (sbappendaddr(&(so->so_rcv), (struct sockaddr *)&ndrvsrc, + m, (struct mbuf *)0) == 0) + { /* yes, sbappendaddr returns zero if the sockbuff is full... */ + splx(s); + return(ENOMEM); + } else + sorwakeup(so); + splx(s); + return(0); +} + +int +ndrv_ioctl(unsigned long dl_tag, + struct ifnet *ifp, + unsigned long command, + caddr_t data) +{ + if (ifp) + return((*ifp->if_ioctl)(ifp, command, data)); +} + +int +ndrv_control(struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp, struct proc *p) +{ + return (0); +} + +/* + * Allocate an ndrv control block and some buffer space for the socket + */ +int +ndrv_attach(struct socket *so, int proto, struct proc *p) +{ int error; + register struct ndrv_cb *np = sotondrvcb(so); + + if ((so->so_state & SS_PRIV) == 0) + return(EPERM); + +#if NDRV_DEBUG + kprintf("NDRV attach: %x, %x, %x\n", so, proto, np); +#endif + MALLOC(np, struct ndrv_cb *, sizeof(*np), M_PCB, M_WAITOK); +#if NDRV_DEBUG + kprintf("NDRV attach: %x, %x, %x\n", so, proto, np); +#endif + if ((so->so_pcb = (caddr_t)np)) + bzero(np, sizeof(*np)); + else + return(ENOBUFS); + if ((error = soreserve(so, ndrv_sendspace, ndrv_recvspace))) + return(error); + TAILQ_INIT(&np->nd_dlist); + np->nd_signature = NDRV_SIGNATURE; + np->nd_socket = so; + np->nd_proto.sp_family = so->so_proto->pr_domain->dom_family; + np->nd_proto.sp_protocol = proto; + insque((queue_t)np, (queue_t)&ndrvl); + return(0); +} + +/* + * Destroy state just before socket deallocation. + * Flush data or not depending on the options. + */ + +int +ndrv_detach(struct socket *so) +{ + register struct ndrv_cb *np = sotondrvcb(so); + + if (np == 0) + return EINVAL; + return ndrv_do_detach(np); +} + + +/* + * If a socket isn't bound to a single address, + * the ndrv input routine will hand it anything + * within that protocol family (assuming there's + * nothing else around it should go to). + * + * Don't expect this to be used. + */ + +int ndrv_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + register struct ndrv_cb *np = sotondrvcb(so); + + if (np == 0) + return EINVAL; + + if (np->nd_faddr) + return EISCONN; + + bcopy((caddr_t) nam, (caddr_t) np->nd_faddr, sizeof(struct sockaddr_ndrv)); + soisconnected(so); + return 0; +} + +/* + * This is the "driver open" hook - we 'bind' to the + * named driver. + * Here's where we latch onto the driver and make it ours. + */ +int +ndrv_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ register struct sockaddr_ndrv *sa = (struct sockaddr_ndrv *) nam; + register char *dname; + register struct ndrv_cb *np; + register struct ifnet *ifp; + extern int name_cmp(struct ifnet *, char *); + + if TAILQ_EMPTY(&ifnet) + return(EADDRNOTAVAIL); /* Quick sanity check */ + np = sotondrvcb(so); + if (np == 0) + return EINVAL; + + if (np->nd_laddr) + return EINVAL; /* XXX */ + + /* I think we just latch onto a copy here; the caller frees */ + np->nd_laddr = _MALLOC(sizeof(struct sockaddr_ndrv), M_IFADDR, M_WAITOK); + if (np->nd_laddr == NULL) + return(ENOMEM); + bcopy((caddr_t) sa, (caddr_t) np->nd_laddr, sizeof(struct sockaddr_ndrv)); + dname = sa->snd_name; + if (*dname == '\0') + return(EINVAL); +#if NDRV_DEBUG + kprintf("NDRV bind: %x, %x, %s\n", so, np, dname); +#endif + /* Track down the driver and its ifnet structure. + * There's no internal call for this so we have to dup the code + * in if.c/ifconf() + */ + TAILQ_FOREACH(ifp, &ifnet, if_link) { + if (name_cmp(ifp, dname) == 0) + break; + } + + if (ifp == NULL) + return(EADDRNOTAVAIL); + np->nd_if = ifp; + return(0); +} + +int +ndrv_disconnect(struct socket *so) +{ + register struct ndrv_cb *np = sotondrvcb(so); + + if (np == 0) + return EINVAL; + + if (np->nd_faddr == 0) + return ENOTCONN; + + ndrv_do_disconnect(np); + return 0; +} + +/* + * Mark the connection as being incapable of further input. + */ +int +ndrv_shutdown(struct socket *so) +{ + socantsendmore(so); + return 0; +} + +/* + * Ship a packet out. The ndrv output will pass it + * to the appropriate driver. The really tricky part + * is the destination address... + */ +int +ndrv_send(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p) +{ + int error; + + if (control) + return EOPNOTSUPP; + + error = ndrv_output(m, so); + m = NULL; + return error; +} + + +int +ndrv_abort(struct socket *so) +{ + register struct ndrv_cb *np = sotondrvcb(so); + + if (np == 0) + return EINVAL; + + ndrv_do_disconnect(np); + return 0; +} + +int +ndrv_sense(struct socket *so, struct stat *sb) +{ + /* + * stat: don't bother with a blocksize. + */ + return (0); +} + +int +ndrv_sockaddr(struct socket *so, struct sockaddr **nam) +{ + register struct ndrv_cb *np = sotondrvcb(so); + int len; + + if (np == 0) + return EINVAL; + + if (np->nd_laddr == 0) + return EINVAL; + + len = np->nd_laddr->snd_len; + bcopy((caddr_t)np->nd_laddr, *nam, + (unsigned)len); + return 0; +} + + +int +ndrv_peeraddr(struct socket *so, struct sockaddr **nam) +{ + register struct ndrv_cb *np = sotondrvcb(so); + int len; + + if (np == 0) + return EINVAL; + + if (np->nd_faddr == 0) + return ENOTCONN; + + len = np->nd_faddr->snd_len; + bcopy((caddr_t)np->nd_faddr, *nam, + (unsigned)len); + return 0; +} + + +/* Control input */ + +void +ndrv_ctlinput(int dummy1, struct sockaddr *dummy2, void *dummy3) +{ +} + +/* Control output */ + +int +ndrv_ctloutput(struct socket *so, struct sockopt *sopt) +{ register struct ndrv_cb *np = sotondrvcb(so); + struct ndrv_descr nd; + int count = 0, error = 0; + int ndrv_getspec(struct ndrv_cb *, + struct sockopt *, + struct ndrv_descr *); + int ndrv_setspec(struct ndrv_cb *, struct ndrv_descr *); + int ndrv_delspec(struct ndrv_cb *, struct ndrv_descr *); + + if (sopt->sopt_name != NDRV_DMXSPECCNT) + error = sooptcopyin(sopt, &nd, sizeof nd, sizeof nd); + if (error == 0) + { switch(sopt->sopt_name) + { case NDRV_DMXSPEC: /* Get/Set(Add) spec list */ + if (sopt->sopt_dir == SOPT_GET) + error = ndrv_getspec(np, sopt, &nd); + else + error = ndrv_setspec(np, &nd); + break; + case NDRV_DELDMXSPEC: /* Delete specified specs */ + error = ndrv_delspec(np, &nd); + break; + case NDRV_DMXSPECCNT: /* How many are in the list */ + count = np->nd_descrcnt; + error = sooptcopyout(sopt, &count, sizeof count); + break; + } + } +#ifdef NDRV_DEBUG + log(LOG_WARNING, "NDRV CTLOUT: %x returns %d\n", sopt->sopt_name, + error); +#endif + return(error); +} + +/* Drain the queues */ +void +ndrv_drain() +{ +} + +/* Sysctl hook for NDRV */ +int +ndrv_sysctl() +{ + return(0); +} + +int +ndrv_do_detach(register struct ndrv_cb *np) +{ register struct socket *so = np->nd_socket; + int ndrv_dump_descr(struct ndrv_cb *); + +#if NDRV_DEBUG + kprintf("NDRV detach: %x, %x\n", so, np); +#endif + if (!TAILQ_EMPTY(&np->nd_dlist)) + ndrv_dump_descr(np); + + remque((queue_t)np); + FREE((caddr_t)np, M_PCB); + so->so_pcb = 0; + sofree(so); + return(0); +} + +int +ndrv_do_disconnect(register struct ndrv_cb *np) +{ +#if NDRV_DEBUG + kprintf("NDRV disconnect: %x\n", np); +#endif + if (np->nd_faddr) + { m_freem(dtom(np->nd_faddr)); + np->nd_faddr = 0; + } + if (np->nd_socket->so_state & SS_NOFDREF) + ndrv_do_detach(np); + soisdisconnected(np->nd_socket); + return(0); +} + +/* + * Try to compare a device name (q) with one of the funky ifnet + * device names (ifp). + */ +int name_cmp(register struct ifnet *ifp, register char *q) +{ register char *r; + register int len; + char buf[IFNAMSIZ]; + static char *sprint_d(); + + r = buf; + len = strlen(ifp->if_name); + strncpy(r, ifp->if_name, IFNAMSIZ); + r += len; + (void)sprint_d(ifp->if_unit, r, IFNAMSIZ-(r-buf)); +#if NDRV_DEBUG + kprintf("Comparing %s, %s\n", buf, q); +#endif + return(strncmp(buf, q, IFNAMSIZ)); +} + +/* Hackery - return a string version of a decimal number */ +static char * +sprint_d(n, buf, buflen) + u_int n; + char *buf; + int buflen; +{ char dbuf[IFNAMSIZ]; + register char *cp = dbuf+IFNAMSIZ-1; + + *cp = 0; + do { buflen--; + cp--; + *cp = "0123456789"[n % 10]; + n /= 10; + } while (n != 0 && buflen > 0); + strncpy(buf, cp, IFNAMSIZ-buflen); + return (cp); +} + +/* + * When closing, dump any enqueued mbufs. + */ +void +ndrv_flushq(register struct ifqueue *q) +{ register struct mbuf *m; + register int s; + for (;;) + { s = splimp(); + IF_DEQUEUE(q, m); + if (m == NULL) + break; + IF_DROP(q); + splx(s); + if (m) + m_freem(m); + } + splx(s); +} + +int +ndrv_getspec(struct ndrv_cb *np, + struct sockopt *sopt, + struct ndrv_descr *nd) +{ struct dlil_demux_desc *mp, *mp1; + int i, k, error = 0; + + /* Compute # structs to copy */ + i = k = min(np->nd_descrcnt, + (nd->nd_len / sizeof (struct dlil_demux_desc))); + mp = (struct dlil_demux_desc *)nd->nd_buf; + TAILQ_FOREACH(mp1, &np->nd_dlist, next) + { if (k-- == 0) + break; + error = copyout(mp1, mp++, sizeof (struct dlil_demux_desc)); + if (error) + break; + } + if (error == 0) + { nd->nd_len = i * (sizeof (struct dlil_demux_desc)); + error = sooptcopyout(sopt, nd, sizeof (*nd)); + } + return(error); +} + +/* + * Install a protocol descriptor, making us a protocol handler. + * We expect the client to handle all output tasks (we get fully + * formed frames from the client and hand them to the driver + * directly). The reason we register is to get those incoming + * frames. We do it as a protocol handler because the network layer + * already knows how find the ones we want, so there's no need to + * duplicate effort. + * Since this mechanism is mostly for user mode, most of the procedures + * to be registered will be null. + * Note that we jam the pair (PF_XXX, native_type) into the native_type + * field of the demux descriptor. Yeah, it's a hack. + */ +int +ndrv_setspec(struct ndrv_cb *np, struct ndrv_descr *nd) +{ struct dlil_demux_desc *mp, *mp1; + int i = 0, error = 0, j; + unsigned long value; + int *native_values; + struct dlil_proto_reg_str proto_spec; + int ndrv_add_descr(struct ndrv_cb *, struct dlil_proto_reg_str *); + + bzero((caddr_t)&proto_spec, sizeof (proto_spec)); + i = nd->nd_len / (sizeof (struct dlil_demux_desc)); /* # elts */ + MALLOC(native_values,int *, i * sizeof (int), M_TEMP, M_WAITOK); + mp = (struct dlil_demux_desc *)nd->nd_buf; + for (j = 0; j++ < i;) + { MALLOC(mp1, struct dlil_demux_desc *, + sizeof (struct dlil_demux_desc), M_PCB, M_WAITOK); + if (mp1 == NULL) + { error = ENOBUFS; + break; + } + error = copyin(mp++, mp1, sizeof (struct dlil_demux_desc)); + if (error) + break; + TAILQ_INSERT_TAIL(&np->nd_dlist, mp1, next); + value = (unsigned long)mp1->native_type; + native_values[j] = (unsigned short)value; + mp1->native_type = (char *)&native_values[j]; + proto_spec.protocol_family = (unsigned char)(value>>16); /* Oy! */ + proto_spec.interface_family = np->nd_if->if_family; + proto_spec.unit_number = np->nd_if->if_unit; + /* Our input */ + proto_spec.input = ndrv_input; + proto_spec.pre_output = NULL; + /* No event/offer functionality needed */ + proto_spec.event = NULL; + proto_spec.offer = NULL; + proto_spec.ioctl = ndrv_ioctl; /* ??? */ + /* What exactly does this do again? */ + proto_spec.default_proto = 0; + + np->nd_descrcnt++; + } + if (error) + { struct dlil_demux_desc *mp2; + + TAILQ_FOREACH(mp2, &np->nd_dlist, next) + FREE(mp2, M_PCB); + } else + error = ndrv_add_descr(np, &proto_spec); +#ifdef NDRV_DEBUG + log(LOG_WARNING, "NDRV ADDSPEC: got error %d\n", error); +#endif + FREE(native_values, M_TEMP); + return(error); +} + +int +ndrv_delspec(struct ndrv_cb *np, struct ndrv_descr *nd) +{ struct dlil_demux_desc *mp; + + return(EINVAL); +} + +struct ndrv_cb * +ndrv_find_tag(unsigned int tag) +{ struct ndrv_tag_map *tmp; + int i; + + tmp = ndrv_tags; + for (i=0; i++ < tag_map_count; tmp++) + if (tmp->tm_tag == tag) + return(tmp->tm_np); + return(NULL); +} + +int +ndrv_add_tag(struct ndrv_cb *np, unsigned int tag, + struct dlil_demux_desc *mp) +{ struct ndrv_tag_map *tmp; + int i; + + tmp = ndrv_tags; + for (i=0; i++ < tag_map_count; tmp++) + if (tmp->tm_tag == 0) + { tmp->tm_tag = tag; + tmp->tm_np = np; +#ifdef NDRV_DEBUG + log(LOG_WARNING, "NDRV ADDING TAG %d\n", tag); +#endif + return(0); + } + + /* Oops - ran out of space. Realloc */ + i = tag_map_count + TAG_MAP_COUNT; + MALLOC(tmp, struct ndrv_tag_map *, i * sizeof (struct ndrv_tag_map), + M_PCB, M_WAITOK); + if (tmp == NULL) + return(ENOMEM); + /* Clear tail of new table, except for the slot we are creating ... */ + bzero((caddr_t)&tmp[tag_map_count+1], + (TAG_MAP_COUNT-1) * sizeof (struct ndrv_tag_map)); + /* ...and then copy in the original piece */ + if (tag_map_count) + bcopy(ndrv_tags, tmp, + tag_map_count * sizeof (struct ndrv_tag_map)); + /* ...and then install the new tag... */ + tmp[tag_map_count].tm_tag = tag; + tmp[tag_map_count].tm_np = np; + tag_map_count = i; + if (tag_map_count) + FREE(ndrv_tags, M_PCB); + ndrv_tags = tmp; +#ifdef NDRV_DEBUG + log(LOG_WARNING, "NDRV ADDING TAG %d (new chunk)\n", tag); +#endif + return(0); +} + +/* + * Attach the proto spec list, and record the tags. + */ +int +ndrv_add_descr(struct ndrv_cb *np, struct dlil_proto_reg_str *proto_spec) +{ unsigned long dl_tag; + int error; + struct dlil_demux_desc *mp; + + /* Attach to our device to get requested packets */ + TAILQ_INIT(&proto_spec->demux_desc_head); + error = dlil_attach_protocol(proto_spec, &dl_tag); + + if (error == 0) + error = ndrv_add_tag(np, dl_tag, mp); + + return(error); +} + +int +ndrv_dump_descr(struct ndrv_cb *np) +{ struct dlil_demux_desc *dm1, *dm2; + struct ndrv_tag_map *tmp; + int i, error = 0; + + if (dm1 = TAILQ_FIRST(&np->nd_dlist)) + { for (i = 0, tmp = &ndrv_tags[0]; i++ < tag_map_count; tmp++) + if (tmp->tm_np == np) + { error = dlil_detach_protocol(tmp->tm_tag); + while (dm1) + { dm2 = TAILQ_NEXT(dm1, next); + FREE(dm1, M_PCB); + dm1 = dm2; + } + log(LOG_WARNING, + "Detached tag %d (error %d)\n", + tmp->tm_tag, error); + tmp->tm_np = 0; + tmp->tm_tag = 0; + } + } + return(0); +} + +void ndrv_dominit() +{ + static int ndrv_dominited = 0; + + if (ndrv_dominited == 0) { + net_add_proto(&ndrvsw[0], &ndrvdomain); + + ndrv_dominited = 1; + } +} + +struct pr_usrreqs ndrv_usrreqs = { + ndrv_abort, pru_accept_notsupp, ndrv_attach, ndrv_bind, + ndrv_connect, pru_connect2_notsupp, ndrv_control, ndrv_detach, + ndrv_disconnect, pru_listen_notsupp, ndrv_peeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, ndrv_send, ndrv_sense, ndrv_shutdown, + ndrv_sockaddr, sosend, soreceive, sopoll +}; + +struct protosw ndrvsw[] = +{ { SOCK_RAW, &ndrvdomain, 0, PR_ATOMIC|PR_ADDR, + 0, ndrv_output, ndrv_ctlinput, ndrv_ctloutput, + 0, ndrv_init, 0, 0, + ndrv_drain, ndrv_sysctl, &ndrv_usrreqs + } +}; + +struct domain ndrvdomain = +{ AF_NDRV, "NetDriver", ndrv_dominit, NULL, NULL, + NULL, + NULL, NULL, 0, 0, 0, 0 +}; diff --git a/bsd/net/ndrv.h b/bsd/net/ndrv.h new file mode 100644 index 000000000..38b75b558 --- /dev/null +++ b/bsd/net/ndrv.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997, 1998 Apple Computer, Inc. All Rights Reserved */ +/* + * @(#)ndrv.h 1.1 (MacOSX) 6/10/43 + * Justin Walker - 970604 + */ + +#ifndef _NET_NDRV_H +#define _NET_NDRV_H + +struct sockaddr_ndrv +{ unsigned char snd_len; + unsigned char snd_family; + unsigned char snd_name[IFNAMSIZ]; /* from if.h */ +}; + +/* + * The cb is plugged into the socket (so_pcb), and the ifnet structure + * of BIND is plugged in here. + * For now, it looks like a raw_cb up front... + */ +struct ndrv_cb +{ struct ndrv_cb *nd_next; /* Doubly-linked list */ + struct ndrv_cb *nd_prev; + struct socket *nd_socket; /* Back to the socket */ + unsigned int nd_signature; /* Just double-checking */ + struct sockaddr_ndrv *nd_faddr; + struct sockaddr_ndrv *nd_laddr; + struct sockproto nd_proto; /* proto family, protocol */ + int nd_descrcnt; /* # elements in nd_dlist */ + TAILQ_HEAD(dlist, dlil_demux_desc) nd_dlist; /* Descr. list */ + struct ifnet *nd_if; +}; + +#define sotondrvcb(so) ((struct ndrv_cb *)(so)->so_pcb) +#define NDRV_SIGNATURE 0x4e445256 /* "NDRV" */ + +/* Nominal allocated space for NDRV sockets */ +#define NDRVSNDQ 8192 +#define NDRVRCVQ 8192 + +/* + * Support for user-mode protocol handlers + */ + +/* Arg to socket options */ +struct ndrv_descr +{ unsigned int nd_len; /* Length of descriptor buffer, in bytes */ + unsigned char *nd_buf; /* Descriptor buffer */ +}; + +#define NDRV_DMXSPEC 0x01 /* Get/Set (Add) a list of protocol specs */ +#define NDRV_DELDMXSPEC 0x02 /* Delete a list of protocol specs */ +#define NDRV_DMXSPECCNT 0x03 /* Return number of active protocol specs */ + +#if KERNEL +extern struct ndrv_cb ndrvl; /* Head of controlblock list */ +#endif +#endif /* _NET_NDRV_H */ diff --git a/bsd/net/net_osdep.c b/bsd/net/net_osdep.c new file mode 100644 index 000000000..6807504ea --- /dev/null +++ b/bsd/net/net_osdep.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#if !defined(__FreeBSD__) || __FreeBSD__ < 3 +#include +#endif +#include +#include +#include + +#include +#include +#include +#include +#include + +#if 0 +#ifdef INET +#include +#include +#include +#include +#include +#endif /* INET */ + +#if INET6 +#ifndef INET +#include +#endif +#include +#include +#include +#include +#include +#endif /* INET6 */ +#endif + +#if !(defined(__NetBSD__) || defined(__OpenBSD__)) || defined(__APPLE__) +const char * +if_name(ifp) + struct ifnet *ifp; +{ + static char nam[IFNAMSIZ + 10]; /*enough?*/ + +#ifdef __bsdi__ + sprintf(nam, "%s%d", ifp->if_name, ifp->if_unit); +#else + snprintf(nam, sizeof(nam), "%s%d", ifp->if_name, ifp->if_unit); +#endif + return nam; +} +#endif diff --git a/bsd/net/net_osdep.h b/bsd/net/net_osdep.h new file mode 100644 index 000000000..eeb7c2f02 --- /dev/null +++ b/bsd/net/net_osdep.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * glue for kernel code programming differences. + */ + +/* + * OS dependencies: + * + * - privileged process + * NetBSD, FreeBSD 3 + * struct proc *p; + * if (p && !suser(p->p_ucred, &p->p_acflag)) + * privileged; + * OpenBSD, BSDI [34], FreeBSD 2 + * struct socket *so; + * if (so->so_state & SS_PRIV) + * privileged; + * - foo_control + * NetBSD, FreeBSD 3 + * needs to give struct proc * as argument + * OpenBSD, BSDI [34], FreeBSD 2 + * do not need struct proc * + * - bpf: + * OpenBSD, NetBSD, BSDI [34] + * need caddr_t * (= if_bpf **) and struct ifnet * + * FreeBSD 2, FreeBSD 3 + * need only struct ifnet * as argument + * - struct ifnet + * use queue.h? member names if name + * --- --- --- + * FreeBSD 2 no old standard if_name+unit + * FreeBSD 3 yes strange if_name+unit + * OpenBSD yes standard if_xname + * NetBSD yes standard if_xname + * BSDI [34] no old standard if_name+unit + * - usrreq + * NetBSD, OpenBSD, BSDI [34], FreeBSD 2 + * single function with PRU_xx, arguments are mbuf + * FreeBSD 3 + * separates functions, non-mbuf arguments + * - {set,get}sockopt + * NetBSD, OpenBSD, BSDI [34], FreeBSD 2 + * manipulation based on mbuf + * FreeBSD 3 + * non-mbuf manipulation using sooptcopy{in,out}() + * - timeout() and untimeout() + * NetBSD, OpenBSD, BSDI [34], FreeBSD 2 + * timeout() is a void function + * FreeBSD 3 + * timeout() is non-void, must keep returned value for untimeuot() + * - sysctl + * NetBSD, OpenBSD + * foo_sysctl() + * BSDI [34] + * foo_sysctl() but with different style + * FreeBSD 2, FreeBSD 3 + * linker hack + * + * - if_ioctl + * NetBSD, FreeBSD 3, BSDI [34] + * 2nd argument is u_long cmd + * FreeBSD 2 + * 2nd argument is int cmd + * - if attach routines + * NetBSD + * void xxattach(int); + * FreeBSD 2, FreeBSD 3 + * void xxattach(void *); + * PSEUDO_SET(xxattach, if_xx); + * + * - ovbcopy() + * in NetBSD 1.4 or later, ovbcopy() is not supplied in the kernel. + * bcopy() is safe against overwrites. + * - splnet() + * NetBSD 1.4 or later requires splsoftnet(). + * other operating systems use splnet(). + * + * - dtom() + * NEVER USE IT! + * + * - struct ifnet for loopback interface + * BSDI3: struct ifnet loif; + * BSDI4: struct ifnet *loifp; + * NetBSD, OpenBSD, FreeBSD2: struct ifnet loif[NLOOP]; + * + * odd thing is that many of them refers loif as ifnet *loif, + * not loif[NLOOP], from outside of if_loop.c. + */ + +#ifndef __NET_NET_OSDEP_H_DEFINED_ +#define __NET_NET_OSDEP_H_DEFINED_ +#ifdef KERNEL + +#if defined(__NetBSD__) || defined(__OpenBSD__) +#define if_name(ifp) ((ifp)->if_xname) +#else +struct ifnet; +extern char *if_name __P((struct ifnet *)); +#endif + +#if defined (__APPLE__) +#define HAVE_OLD_BPF +#endif + +//#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#if defined (__APPLE__) +#define ifa_list ifa_link +#define if_addrlist if_addrhead +#define if_list if_link +#endif + +#if defined(__NetBSD__) && __NetBSD_Version__ >= 104000000 +#define ovbcopy bcopy +#endif + +#if defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) +#define HAVE_NRL_INPCB +#endif + +#endif /*_KERNEL*/ +#endif /*__NET_NET_OSDEP_H_DEFINED_ */ diff --git a/bsd/net/netisr.c b/bsd/net/netisr.c new file mode 100644 index 000000000..2ae5be8eb --- /dev/null +++ b/bsd/net/netisr.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +/* HISTORY + * 18-May-90 Avadis Tevanian (avie) at NeXT + * Changed to use sensible priorities (higher numbers -> higher pri). + * + * 1-Feb-88 David Golub (dbg) at Carnegie-Mellon University + * Goofed... netisr thread must run at splnet, because the routines + * it calls expect to be called from the softnet interrupt (at + * splnet). + * + * 19-Nov-87 David Golub (dbg) at Carnegie-Mellon University + * Created. + * + */ + +/* + * netisr.c + * + * Kernel thread for network code. + */ + + +#include +#include +#include + +#include +#include + +volatile int netisr; + + +void run_netisr(void) +{ + spl_t spl = splnet(); + + while (netisr != 0) { +#ifdef NIMP +#if NIMP > 0 + if (netisr & (1< 0 */ +#endif /* NIMP */ + +#if INET + if (netisr & (1<sadb_ext_len) +#define PFKEY_ADDR_PREFIX(ext) \ + (((struct sadb_address *)(ext))->sadb_address_prefixlen) +#define PFKEY_ADDR_PROTO(ext) \ + (((struct sadb_address *)(ext))->sadb_address_proto) +#define PFKEY_ADDR_SADDR(ext) \ + ((struct sockaddr *)((caddr_t)(ext) + sizeof(struct sadb_address))) + +#if 1 +/* in 64bits */ +#define PFKEY_UNUNIT64(a) ((a) << 3) +#define PFKEY_UNIT64(a) ((a) >> 3) +#else +#define PFKEY_UNUNIT64(a) (a) +#define PFKEY_UNIT64(a) (a) +#endif + +#ifndef KERNEL +extern void pfkey_sadump __P((struct sadb_msg *)); +extern void pfkey_spdump __P((struct sadb_msg *)); + +struct sockaddr; +int ipsec_check_keylen __P((u_int, u_int, u_int)); +u_int pfkey_set_softrate __P((u_int, u_int)); +u_int pfkey_get_softrate __P((u_int)); +int pfkey_send_getspi __P((int, u_int, u_int, struct sockaddr *, + struct sockaddr *, u_int32_t, u_int32_t, u_int32_t, u_int32_t)); +int pfkey_send_update __P((int, u_int, u_int, struct sockaddr *, + struct sockaddr *, u_int32_t, u_int32_t, u_int, + caddr_t, u_int, u_int, u_int, u_int, u_int, u_int32_t, u_int64_t, + u_int64_t, u_int64_t, u_int32_t)); +int pfkey_send_add __P((int, u_int, u_int, struct sockaddr *, + struct sockaddr *, u_int32_t, u_int32_t, u_int, + caddr_t, u_int, u_int, u_int, u_int, u_int, u_int32_t, u_int64_t, + u_int64_t, u_int64_t, u_int32_t)); +int pfkey_send_delete __P((int, u_int, u_int, + struct sockaddr *, struct sockaddr *, u_int32_t)); +int pfkey_send_get __P((int, u_int, u_int, + struct sockaddr *, struct sockaddr *, u_int32_t)); +int pfkey_send_register __P((int, u_int)); +int pfkey_recv_register __P((int)); +int pfkey_send_flush __P((int, u_int)); +int pfkey_send_dump __P((int, u_int)); +int pfkey_send_promisc_toggle __P((int, int)); +int pfkey_send_spdadd __P((int, struct sockaddr *, u_int, + struct sockaddr *, u_int, u_int, caddr_t, int, u_int32_t)); +int pfkey_send_spdupdate __P((int, struct sockaddr *, u_int, + struct sockaddr *, u_int, u_int, caddr_t, int, u_int32_t)); +int pfkey_send_spddelete __P((int, struct sockaddr *, u_int, + struct sockaddr *, u_int, u_int, caddr_t, int, u_int32_t)); +int pfkey_send_spddelete2 __P((int, u_int32_t)); +int pfkey_send_spdget __P((int, u_int32_t)); +int pfkey_send_spdsetidx __P((int, struct sockaddr *, u_int, + struct sockaddr *, u_int, u_int, caddr_t, int, u_int32_t)); +int pfkey_send_spdflush __P((int)); +int pfkey_send_spddump __P((int)); + +int pfkey_open __P((void)); +void pfkey_close __P((int)); +struct sadb_msg *pfkey_recv __P((int)); +int pfkey_send __P((int, struct sadb_msg *, int)); +int pfkey_align __P((struct sadb_msg *, caddr_t *)); +int pfkey_check __P((caddr_t *)); + +#endif /*!KERNEL*/ + +#endif /* __PFKEY_V2_H */ + +#endif /* _NET_PFKEYV2_H_ */ diff --git a/bsd/net/ppp_comp.h b/bsd/net/ppp_comp.h new file mode 100644 index 000000000..2d4e31e97 --- /dev/null +++ b/bsd/net/ppp_comp.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * ppp_comp.h - Definitions for doing PPP packet compression. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + * + */ + +#ifndef _NET_PPP_COMP_H +#define _NET_PPP_COMP_H + +/* + * The following symbols control whether we include code for + * various compression methods. + */ +#ifndef DO_BSD_COMPRESS +#define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */ +#endif +#ifndef DO_DEFLATE +#define DO_DEFLATE 1 /* by default, include Deflate */ +#endif +#define DO_PREDICTOR_1 0 +#define DO_PREDICTOR_2 0 + +/* + * Structure giving methods for compression/decompression. + */ +#if PACKETPTR +struct compressor { + int compress_proto; /* CCP compression protocol number */ + + /* Allocate space for a compressor (transmit side) */ + void *(*comp_alloc) __P((u_char *options, int opt_len)); + /* Free space used by a compressor */ + void (*comp_free) __P((void *state)); + /* Initialize a compressor */ + int (*comp_init) __P((void *state, u_char *options, int opt_len, + int unit, int hdrlen, int debug)); + /* Reset a compressor */ + void (*comp_reset) __P((void *state)); + /* Compress a packet */ + int (*compress) __P((void *state, PACKETPTR *mret, + PACKETPTR mp, int orig_len, int max_len)); + /* Return compression statistics */ + void (*comp_stat) __P((void *state, struct compstat *stats)); + + /* Allocate space for a decompressor (receive side) */ + void *(*decomp_alloc) __P((u_char *options, int opt_len)); + /* Free space used by a decompressor */ + void (*decomp_free) __P((void *state)); + /* Initialize a decompressor */ + int (*decomp_init) __P((void *state, u_char *options, int opt_len, + int unit, int hdrlen, int mru, int debug)); + /* Reset a decompressor */ + void (*decomp_reset) __P((void *state)); + /* Decompress a packet. */ + int (*decompress) __P((void *state, PACKETPTR mp, + PACKETPTR *dmpp)); + /* Update state for an incompressible packet received */ + void (*incomp) __P((void *state, PACKETPTR mp)); + /* Return decompression statistics */ + void (*decomp_stat) __P((void *state, struct compstat *stats)); +}; +#endif /* PACKETPTR */ + +/* + * Return values for decompress routine. + * We need to make these distinctions so that we can disable certain + * useful functionality, namely sending a CCP reset-request as a result + * of an error detected after decompression. This is to avoid infringing + * a patent held by Motorola. + * Don't you just lurve software patents. + */ +#define DECOMP_OK 0 /* everything went OK */ +#define DECOMP_ERROR 1 /* error detected before decomp. */ +#define DECOMP_FATALERROR 2 /* error detected after decomp. */ + +/* + * CCP codes. + */ +#define CCP_CONFREQ 1 +#define CCP_CONFACK 2 +#define CCP_TERMREQ 5 +#define CCP_TERMACK 6 +#define CCP_RESETREQ 14 +#define CCP_RESETACK 15 + +/* + * Max # bytes for a CCP option + */ +#define CCP_MAX_OPTION_LENGTH 32 + +/* + * Parts of a CCP packet. + */ +#define CCP_CODE(dp) ((dp)[0]) +#define CCP_ID(dp) ((dp)[1]) +#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) +#define CCP_HDRLEN 4 + +#define CCP_OPT_CODE(dp) ((dp)[0]) +#define CCP_OPT_LENGTH(dp) ((dp)[1]) +#define CCP_OPT_MINLEN 2 + +/* + * Definitions for BSD-Compress. + */ +#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ +#define CILEN_BSD_COMPRESS 3 /* length of config. option */ + +/* Macros for handling the 3rd byte of the BSD-Compress config option. */ +#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ +#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ +#define BSD_CURRENT_VERSION 1 /* current version number */ +#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) + +#define BSD_MIN_BITS 9 /* smallest code size supported */ +#define BSD_MAX_BITS 15 /* largest code size supported */ + +/* + * Definitions for Deflate. + */ +#define CI_DEFLATE 26 /* config option for Deflate */ +#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ +#define CILEN_DEFLATE 4 /* length of its config option */ + +#define DEFLATE_MIN_SIZE 8 +#define DEFLATE_MAX_SIZE 15 +#define DEFLATE_METHOD_VAL 8 +#define DEFLATE_SIZE(x) (((x) >> 4) + DEFLATE_MIN_SIZE) +#define DEFLATE_METHOD(x) ((x) & 0x0F) +#define DEFLATE_MAKE_OPT(w) ((((w) - DEFLATE_MIN_SIZE) << 4) \ + + DEFLATE_METHOD_VAL) +#define DEFLATE_CHK_SEQUENCE 0 + +/* + * Definitions for other, as yet unsupported, compression methods. + */ +#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ +#define CILEN_PREDICTOR_1 2 /* length of its config option */ +#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ +#define CILEN_PREDICTOR_2 2 /* length of its config option */ + +#endif /* _NET_PPP_COMP_H */ diff --git a/bsd/net/ppp_deflate.c b/bsd/net/ppp_deflate.c new file mode 100644 index 000000000..7d28a5b41 --- /dev/null +++ b/bsd/net/ppp_deflate.c @@ -0,0 +1,699 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * ppp_deflate.c - interface the zlib procedures for Deflate compression + * and decompression (as used by gzip) to the PPP code. + * This version is for use with mbufs on BSD-derived systems. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + */ + +#include +#include +#include +#include +#include +#include + +#define PACKETPTR struct mbuf * +#include + +#if DO_DEFLATE + +#define DEFLATE_DEBUG 1 + +/* + * State for a Deflate (de)compressor. + */ +struct deflate_state { + int seqno; + int w_size; + int unit; + int hdrlen; + int mru; + int debug; + z_stream strm; + struct compstat stats; +}; + +#define DEFLATE_OVHD 2 /* Deflate overhead/packet */ + +static void *z_alloc __P((void *, u_int items, u_int size)); +static void z_free __P((void *, void *ptr)); +static void *z_comp_alloc __P((u_char *options, int opt_len)); +static void *z_decomp_alloc __P((u_char *options, int opt_len)); +static void z_comp_free __P((void *state)); +static void z_decomp_free __P((void *state)); +static int z_comp_init __P((void *state, u_char *options, int opt_len, + int unit, int hdrlen, int debug)); +static int z_decomp_init __P((void *state, u_char *options, int opt_len, + int unit, int hdrlen, int mru, int debug)); +static int z_compress __P((void *state, struct mbuf **mret, + struct mbuf *mp, int slen, int maxolen)); +static void z_incomp __P((void *state, struct mbuf *dmsg)); +static int z_decompress __P((void *state, struct mbuf *cmp, + struct mbuf **dmpp)); +static void z_comp_reset __P((void *state)); +static void z_decomp_reset __P((void *state)); +static void z_comp_stats __P((void *state, struct compstat *stats)); + +/* + * Procedures exported to if_ppp.c. + */ +struct compressor ppp_deflate = { + CI_DEFLATE, /* compress_proto */ + z_comp_alloc, /* comp_alloc */ + z_comp_free, /* comp_free */ + z_comp_init, /* comp_init */ + z_comp_reset, /* comp_reset */ + z_compress, /* compress */ + z_comp_stats, /* comp_stat */ + z_decomp_alloc, /* decomp_alloc */ + z_decomp_free, /* decomp_free */ + z_decomp_init, /* decomp_init */ + z_decomp_reset, /* decomp_reset */ + z_decompress, /* decompress */ + z_incomp, /* incomp */ + z_comp_stats, /* decomp_stat */ +}; + +struct compressor ppp_deflate_draft = { + CI_DEFLATE_DRAFT, /* compress_proto */ + z_comp_alloc, /* comp_alloc */ + z_comp_free, /* comp_free */ + z_comp_init, /* comp_init */ + z_comp_reset, /* comp_reset */ + z_compress, /* compress */ + z_comp_stats, /* comp_stat */ + z_decomp_alloc, /* decomp_alloc */ + z_decomp_free, /* decomp_free */ + z_decomp_init, /* decomp_init */ + z_decomp_reset, /* decomp_reset */ + z_decompress, /* decompress */ + z_incomp, /* incomp */ + z_comp_stats, /* decomp_stat */ +}; + +/* + * Space allocation and freeing routines for use by zlib routines. + */ +void * +z_alloc(notused, items, size) + void *notused; + u_int items, size; +{ + void *ptr; + + MALLOC(ptr, void *, items * size, M_DEVBUF, M_NOWAIT); + return ptr; +} + +void +z_free(notused, ptr) + void *notused; + void *ptr; +{ + FREE(ptr, M_DEVBUF); +} + +/* + * Allocate space for a compressor. + */ +static void * +z_comp_alloc(options, opt_len) + u_char *options; + int opt_len; +{ + struct deflate_state *state; + int w_size; + + if (opt_len != CILEN_DEFLATE + || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) + || options[1] != CILEN_DEFLATE + || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL + || options[3] != DEFLATE_CHK_SEQUENCE) + return NULL; + w_size = DEFLATE_SIZE(options[2]); + if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) + return NULL; + + MALLOC(state, struct deflate_state *, sizeof(struct deflate_state), + M_DEVBUF, M_NOWAIT); + if (state == NULL) + return NULL; + + state->strm.next_in = NULL; + state->strm.zalloc = z_alloc; + state->strm.zfree = z_free; + if (deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION, DEFLATE_METHOD_VAL, + -w_size, 8, Z_DEFAULT_STRATEGY) != Z_OK) { + FREE(state, M_DEVBUF); + return NULL; + } + + state->w_size = w_size; + bzero(&state->stats, sizeof(state->stats)); + return (void *) state; +} + +static void +z_comp_free(arg) + void *arg; +{ + struct deflate_state *state = (struct deflate_state *) arg; + + deflateEnd(&state->strm); + FREE(state, M_DEVBUF); +} + +static int +z_comp_init(arg, options, opt_len, unit, hdrlen, debug) + void *arg; + u_char *options; + int opt_len, unit, hdrlen, debug; +{ + struct deflate_state *state = (struct deflate_state *) arg; + + if (opt_len < CILEN_DEFLATE + || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) + || options[1] != CILEN_DEFLATE + || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL + || DEFLATE_SIZE(options[2]) != state->w_size + || options[3] != DEFLATE_CHK_SEQUENCE) + return 0; + + state->seqno = 0; + state->unit = unit; + state->hdrlen = hdrlen; + state->debug = debug; + + deflateReset(&state->strm); + + return 1; +} + +static void +z_comp_reset(arg) + void *arg; +{ + struct deflate_state *state = (struct deflate_state *) arg; + + state->seqno = 0; + deflateReset(&state->strm); +} + +int +z_compress(arg, mret, mp, orig_len, maxolen) + void *arg; + struct mbuf **mret; /* compressed packet (out) */ + struct mbuf *mp; /* uncompressed packet (in) */ + int orig_len, maxolen; +{ + struct deflate_state *state = (struct deflate_state *) arg; + u_char *rptr, *wptr; + int proto, olen, wspace, r, flush; + struct mbuf *m; + + /* + * Check that the protocol is in the range we handle. + */ + rptr = mtod(mp, u_char *); + proto = PPP_PROTOCOL(rptr); + if (proto > 0x3fff || proto == 0xfd || proto == 0xfb) { + *mret = NULL; + return orig_len; + } + + /* Allocate one mbuf initially. */ + if (maxolen > orig_len) + maxolen = orig_len; + MGET(m, M_DONTWAIT, MT_DATA); + *mret = m; + if (m != NULL) { + m->m_len = 0; + if (maxolen + state->hdrlen > MLEN) + MCLGET(m, M_DONTWAIT); + wspace = M_TRAILINGSPACE(m); + if (state->hdrlen + PPP_HDRLEN + 2 < wspace) { + m->m_data += state->hdrlen; + wspace -= state->hdrlen; + } + wptr = mtod(m, u_char *); + + /* + * Copy over the PPP header and store the 2-byte sequence number. + */ + wptr[0] = PPP_ADDRESS(rptr); + wptr[1] = PPP_CONTROL(rptr); + wptr[2] = PPP_COMP >> 8; + wptr[3] = PPP_COMP; + wptr += PPP_HDRLEN; + wptr[0] = state->seqno >> 8; + wptr[1] = state->seqno; + wptr += 2; + state->strm.next_out = wptr; + state->strm.avail_out = wspace - (PPP_HDRLEN + 2); + } else { + state->strm.next_out = NULL; + state->strm.avail_out = 1000000; + wptr = NULL; + wspace = 0; + } + ++state->seqno; + + rptr += (proto > 0xff)? 2: 3; /* skip 1st proto byte if 0 */ + state->strm.next_in = rptr; + state->strm.avail_in = mtod(mp, u_char *) + mp->m_len - rptr; + mp = mp->m_next; + flush = (mp == NULL)? Z_PACKET_FLUSH: Z_NO_FLUSH; + olen = 0; + for (;;) { + r = deflate(&state->strm, flush); + if (r != Z_OK) { + printf("z_compress: deflate returned %d (%s)\n", + r, (state->strm.msg? state->strm.msg: "")); + break; + } + if (flush != Z_NO_FLUSH && state->strm.avail_out != 0) + break; /* all done */ + if (state->strm.avail_in == 0 && mp != NULL) { + state->strm.next_in = mtod(mp, u_char *); + state->strm.avail_in = mp->m_len; + mp = mp->m_next; + if (mp == NULL) + flush = Z_PACKET_FLUSH; + } + if (state->strm.avail_out == 0) { + if (m != NULL) { + m->m_len = wspace; + olen += wspace; + MGET(m->m_next, M_DONTWAIT, MT_DATA); + m = m->m_next; + if (m != NULL) { + m->m_len = 0; + if (maxolen - olen > MLEN) + MCLGET(m, M_DONTWAIT); + state->strm.next_out = mtod(m, u_char *); + state->strm.avail_out = wspace = M_TRAILINGSPACE(m); + } + } + if (m == NULL) { + state->strm.next_out = NULL; + state->strm.avail_out = 1000000; + } + } + } + if (m != NULL) + olen += (m->m_len = wspace - state->strm.avail_out); + + /* + * See if we managed to reduce the size of the packet. + */ + if (m != NULL && olen < orig_len) { + state->stats.comp_bytes += olen; + state->stats.comp_packets++; + } else { + if (*mret != NULL) { + m_freem(*mret); + *mret = NULL; + } + state->stats.inc_bytes += orig_len; + state->stats.inc_packets++; + olen = orig_len; + } + state->stats.unc_bytes += orig_len; + state->stats.unc_packets++; + + return olen; +} + +static void +z_comp_stats(arg, stats) + void *arg; + struct compstat *stats; +{ + struct deflate_state *state = (struct deflate_state *) arg; + u_int out; + + *stats = state->stats; + stats->ratio = stats->unc_bytes; + out = stats->comp_bytes + stats->inc_bytes; + if (stats->ratio <= 0x7ffffff) + stats->ratio <<= 8; + else + out >>= 8; + if (out != 0) + stats->ratio /= out; +} + +/* + * Allocate space for a decompressor. + */ +static void * +z_decomp_alloc(options, opt_len) + u_char *options; + int opt_len; +{ + struct deflate_state *state; + int w_size; + + if (opt_len != CILEN_DEFLATE + || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) + || options[1] != CILEN_DEFLATE + || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL + || options[3] != DEFLATE_CHK_SEQUENCE) + return NULL; + w_size = DEFLATE_SIZE(options[2]); + if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) + return NULL; + + MALLOC(state, struct deflate_state *, sizeof(struct deflate_state), + M_DEVBUF, M_NOWAIT); + if (state == NULL) + return NULL; + + state->strm.next_out = NULL; + state->strm.zalloc = z_alloc; + state->strm.zfree = z_free; + if (inflateInit2(&state->strm, -w_size) != Z_OK) { + FREE(state, M_DEVBUF); + return NULL; + } + + state->w_size = w_size; + bzero(&state->stats, sizeof(state->stats)); + return (void *) state; +} + +static void +z_decomp_free(arg) + void *arg; +{ + struct deflate_state *state = (struct deflate_state *) arg; + + inflateEnd(&state->strm); + FREE(state, M_DEVBUF); +} + +static int +z_decomp_init(arg, options, opt_len, unit, hdrlen, mru, debug) + void *arg; + u_char *options; + int opt_len, unit, hdrlen, mru, debug; +{ + struct deflate_state *state = (struct deflate_state *) arg; + + if (opt_len < CILEN_DEFLATE + || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) + || options[1] != CILEN_DEFLATE + || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL + || DEFLATE_SIZE(options[2]) != state->w_size + || options[3] != DEFLATE_CHK_SEQUENCE) + return 0; + + state->seqno = 0; + state->unit = unit; + state->hdrlen = hdrlen; + state->debug = debug; + state->mru = mru; + + inflateReset(&state->strm); + + return 1; +} + +static void +z_decomp_reset(arg) + void *arg; +{ + struct deflate_state *state = (struct deflate_state *) arg; + + state->seqno = 0; + inflateReset(&state->strm); +} + +/* + * Decompress a Deflate-compressed packet. + * + * Because of patent problems, we return DECOMP_ERROR for errors + * found by inspecting the input data and for system problems, but + * DECOMP_FATALERROR for any errors which could possibly be said to + * be being detected "after" decompression. For DECOMP_ERROR, + * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be + * infringing a patent of Motorola's if we do, so we take CCP down + * instead. + * + * Given that the frame has the correct sequence number and a good FCS, + * errors such as invalid codes in the input most likely indicate a + * bug, so we return DECOMP_FATALERROR for them in order to turn off + * compression, even though they are detected by inspecting the input. + */ +int +z_decompress(arg, mi, mop) + void *arg; + struct mbuf *mi, **mop; +{ + struct deflate_state *state = (struct deflate_state *) arg; + struct mbuf *mo, *mo_head; + u_char *rptr, *wptr; + int rlen, olen, ospace; + int seq, i, flush, r, decode_proto; + u_char hdr[PPP_HDRLEN + DEFLATE_OVHD]; + + *mop = NULL; + rptr = mtod(mi, u_char *); + rlen = mi->m_len; + for (i = 0; i < PPP_HDRLEN + DEFLATE_OVHD; ++i) { + while (rlen <= 0) { + mi = mi->m_next; + if (mi == NULL) + return DECOMP_ERROR; + rptr = mtod(mi, u_char *); + rlen = mi->m_len; + } + hdr[i] = *rptr++; + --rlen; + } + + /* Check the sequence number. */ + seq = (hdr[PPP_HDRLEN] << 8) + hdr[PPP_HDRLEN+1]; + if (seq != state->seqno) { + if (state->debug) + printf("z_decompress%d: bad seq # %d, expected %d\n", + state->unit, seq, state->seqno); + return DECOMP_ERROR; + } + ++state->seqno; + + /* Allocate an output mbuf. */ + MGETHDR(mo, M_DONTWAIT, MT_DATA); + if (mo == NULL) + return DECOMP_ERROR; + mo_head = mo; + mo->m_len = 0; + mo->m_next = NULL; + MCLGET(mo, M_DONTWAIT); + ospace = M_TRAILINGSPACE(mo); + if (state->hdrlen + PPP_HDRLEN < ospace) { + mo->m_data += state->hdrlen; + ospace -= state->hdrlen; + } + + /* + * Fill in the first part of the PPP header. The protocol field + * comes from the decompressed data. + */ + wptr = mtod(mo, u_char *); + wptr[0] = PPP_ADDRESS(hdr); + wptr[1] = PPP_CONTROL(hdr); + wptr[2] = 0; + + /* + * Set up to call inflate. We set avail_out to 1 initially so we can + * look at the first byte of the output and decide whether we have + * a 1-byte or 2-byte protocol field. + */ + state->strm.next_in = rptr; + state->strm.avail_in = rlen; + mi = mi->m_next; + flush = (mi == NULL)? Z_PACKET_FLUSH: Z_NO_FLUSH; + rlen += PPP_HDRLEN + DEFLATE_OVHD; + state->strm.next_out = wptr + 3; + state->strm.avail_out = 1; + decode_proto = 1; + olen = PPP_HDRLEN; + + /* + * Call inflate, supplying more input or output as needed. + */ + for (;;) { + r = inflate(&state->strm, flush); + if (r != Z_OK) { +#if !DEFLATE_DEBUG + if (state->debug) +#endif + printf("z_decompress%d: inflate returned %d (%s)\n", + state->unit, r, (state->strm.msg? state->strm.msg: "")); + m_freem(mo_head); + return DECOMP_FATALERROR; + } + if (flush != Z_NO_FLUSH && state->strm.avail_out != 0) + break; /* all done */ + if (state->strm.avail_in == 0 && mi != NULL) { + state->strm.next_in = mtod(mi, u_char *); + state->strm.avail_in = mi->m_len; + rlen += mi->m_len; + mi = mi->m_next; + if (mi == NULL) + flush = Z_PACKET_FLUSH; + } + if (state->strm.avail_out == 0) { + if (decode_proto) { + state->strm.avail_out = ospace - PPP_HDRLEN; + if ((wptr[3] & 1) == 0) { + /* 2-byte protocol field */ + wptr[2] = wptr[3]; + --state->strm.next_out; + ++state->strm.avail_out; + --olen; + } + decode_proto = 0; + } else { + mo->m_len = ospace; + olen += ospace; + MGET(mo->m_next, M_DONTWAIT, MT_DATA); + mo = mo->m_next; + if (mo == NULL) { + m_freem(mo_head); + return DECOMP_ERROR; + } + MCLGET(mo, M_DONTWAIT); + state->strm.next_out = mtod(mo, u_char *); + state->strm.avail_out = ospace = M_TRAILINGSPACE(mo); + } + } + } + if (decode_proto) { + m_freem(mo_head); + return DECOMP_ERROR; + } + olen += (mo->m_len = ospace - state->strm.avail_out); +#if DEFLATE_DEBUG + if (state->debug && olen > state->mru + PPP_HDRLEN) + printf("ppp_deflate%d: exceeded mru (%d > %d)\n", + state->unit, olen, state->mru + PPP_HDRLEN); +#endif + + state->stats.unc_bytes += olen; + state->stats.unc_packets++; + state->stats.comp_bytes += rlen; + state->stats.comp_packets++; + + *mop = mo_head; + return DECOMP_OK; +} + +/* + * Incompressible data has arrived - add it to the history. + */ +static void +z_incomp(arg, mi) + void *arg; + struct mbuf *mi; +{ + struct deflate_state *state = (struct deflate_state *) arg; + u_char *rptr; + int rlen, proto, r; + + /* + * Check that the protocol is one we handle. + */ + rptr = mtod(mi, u_char *); + proto = PPP_PROTOCOL(rptr); + if (proto > 0x3fff || proto == 0xfd || proto == 0xfb) + return; + + ++state->seqno; + + /* + * Iterate through the mbufs, adding the characters in them + * to the decompressor's history. For the first mbuf, we start + * at the either the 1st or 2nd byte of the protocol field, + * depending on whether the protocol value is compressible. + */ + rlen = mi->m_len; + state->strm.next_in = rptr + 3; + state->strm.avail_in = rlen - 3; + if (proto > 0xff) { + --state->strm.next_in; + ++state->strm.avail_in; + } + for (;;) { + r = inflateIncomp(&state->strm); + if (r != Z_OK) { + /* gak! */ +#if !DEFLATE_DEBUG + if (state->debug) +#endif + printf("z_incomp%d: inflateIncomp returned %d (%s)\n", + state->unit, r, (state->strm.msg? state->strm.msg: "")); + return; + } + mi = mi->m_next; + if (mi == NULL) + break; + state->strm.next_in = mtod(mi, u_char *); + state->strm.avail_in = mi->m_len; + rlen += mi->m_len; + } + + /* + * Update stats. + */ + state->stats.inc_bytes += rlen; + state->stats.inc_packets++; + state->stats.unc_bytes += rlen; + state->stats.unc_packets++; +} + +#endif /* DO_DEFLATE */ diff --git a/bsd/net/ppp_defs.h b/bsd/net/ppp_defs.h new file mode 100644 index 000000000..952a8c283 --- /dev/null +++ b/bsd/net/ppp_defs.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * ppp_defs.h - PPP definitions. + * + * Copyright (c) 1994 The Australian National University. + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation is hereby granted, provided that the above copyright + * notice appears in all copies. This software is provided without any + * warranty, express or implied. The Australian National University + * makes no representations about the suitability of this software for + * any purpose. + * + * IN NO EVENT SHALL THE AUSTRALIAN NATIONAL UNIVERSITY BE LIABLE TO ANY + * PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF + * THE AUSTRALIAN NATIONAL UNIVERSITY HAVE BEEN ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + * + * THE AUSTRALIAN NATIONAL UNIVERSITY SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE AUSTRALIAN NATIONAL UNIVERSITY HAS NO + * OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, + * OR MODIFICATIONS. + * + */ + +#ifndef _PPP_DEFS_H_ +#define _PPP_DEFS_H_ + +/* + * The basic PPP frame. + */ +#define PPP_HDRLEN 4 /* octets for standard ppp header */ +#define PPP_FCSLEN 2 /* octets for FCS */ +#define PPP_MRU 1500 /* default MRU = max length of info field */ + +#define PPP_ADDRESS(p) (((u_char *)(p))[0]) +#define PPP_CONTROL(p) (((u_char *)(p))[1]) +#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3]) + +/* + * Significant octet values. + */ +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_FLAG 0x7e /* Flag Sequence */ +#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ +#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ + +/* + * Protocol field values. + */ +#define PPP_IP 0x21 /* Internet Protocol */ +#define PPP_XNS 0x25 /* Xerox NS */ +#define PPP_AT 0x29 /* AppleTalk Protocol */ +#define PPP_IPX 0x2b /* IPX Datagram (RFC1552) */ +#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ +#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ +#define PPP_COMP 0xfd /* compressed packet */ +#define PPP_IPCP 0x8021 /* IP Control Protocol */ +#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ +#define PPP_IPXCP 0x802b /* IPX Control Protocol (RFC1552) */ +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#define PPP_LQR 0xc025 /* Link Quality Report protocol */ +#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ +#define PPP_CBCP 0xc029 /* Callback Control Protocol */ + +/* + * Values for FCS calculations. + */ +#define PPP_INITFCS 0xffff /* Initial FCS value */ +#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) + +/* + * Extended asyncmap - allows any character to be escaped. + */ +typedef u_int32_t ext_accm[8]; + +/* + * What to do with network protocol (NP) packets. + */ +enum NPmode { + NPMODE_PASS, /* pass the packet through */ + NPMODE_DROP, /* silently drop the packet */ + NPMODE_ERROR, /* return an error */ + NPMODE_QUEUE /* save it up for later. */ +}; + +/* + * Statistics. + */ +struct pppstat { + unsigned int ppp_ibytes; /* bytes received */ + unsigned int ppp_ipackets; /* packets received */ + unsigned int ppp_ierrors; /* receive errors */ + unsigned int ppp_obytes; /* bytes sent */ + unsigned int ppp_opackets; /* packets sent */ + unsigned int ppp_oerrors; /* transmit errors */ +}; + +struct vjstat { + unsigned int vjs_packets; /* outbound packets */ + unsigned int vjs_compressed; /* outbound compressed packets */ + unsigned int vjs_searches; /* searches for connection state */ + unsigned int vjs_misses; /* times couldn't find conn. state */ + unsigned int vjs_uncompressedin; /* inbound uncompressed packets */ + unsigned int vjs_compressedin; /* inbound compressed packets */ + unsigned int vjs_errorin; /* inbound unknown type packets */ + unsigned int vjs_tossed; /* inbound packets tossed because of error */ +}; + +struct ppp_stats { + struct pppstat p; /* basic PPP statistics */ + struct vjstat vj; /* VJ header compression statistics */ +}; + +struct compstat { + unsigned int unc_bytes; /* total uncompressed bytes */ + unsigned int unc_packets; /* total uncompressed packets */ + unsigned int comp_bytes; /* compressed bytes */ + unsigned int comp_packets; /* compressed packets */ + unsigned int inc_bytes; /* incompressible bytes */ + unsigned int inc_packets; /* incompressible packets */ + unsigned int ratio; /* recent compression ratio << 8 */ +}; + +struct ppp_comp_stats { + struct compstat c; /* packet compression statistics */ + struct compstat d; /* packet decompression statistics */ +}; + +/* + * The following structure records the time in seconds since + * the last NP packet was sent or received. + */ +struct ppp_idle { + time_t xmit_idle; /* time since last NP packet sent */ + time_t recv_idle; /* time since last NP packet received */ +}; + +#ifndef __P +#ifdef __STDC__ +#define __P(x) x +#else +#define __P(x) () +#endif +#endif + +#endif /* _PPP_DEFS_H_ */ diff --git a/bsd/net/radix.c b/bsd/net/radix.c new file mode 100644 index 000000000..1ec496334 --- /dev/null +++ b/bsd/net/radix.c @@ -0,0 +1,1045 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)radix.c 8.4 (Berkeley) 11/2/94 + */ + +/* + * Routines to build and maintain radix trees for routing lookups. + */ +#ifndef _RADIX_H_ +#include +#ifdef KERNEL +#include +#include +#define M_DONTWAIT M_NOWAIT +#include +#else +#include +#endif +#include +#include +#endif + +static int rn_walktree_from __P((struct radix_node_head *h, void *a, + void *m, walktree_f_t *f, void *w)); +static int rn_walktree __P((struct radix_node_head *, walktree_f_t *, void *)); +static struct radix_node + *rn_insert __P((void *, struct radix_node_head *, int *, + struct radix_node [2])), + *rn_newpair __P((void *, int, struct radix_node[2])), + *rn_search __P((void *, struct radix_node *)), + *rn_search_m __P((void *, struct radix_node *, void *)); + +static int max_keylen; +static struct radix_mask *rn_mkfreelist; +static struct radix_node_head *mask_rnhead; +static char *addmask_key; +static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; +static char *rn_zeros, *rn_ones; + +#define rn_masktop (mask_rnhead->rnh_treetop) +#undef Bcmp +#define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l)) + +static int rn_lexobetter __P((void *m_arg, void *n_arg)); +static struct radix_mask * + rn_new_radix_mask __P((struct radix_node *tt, + struct radix_mask *next)); +static int rn_satsifies_leaf __P((char *trial, struct radix_node *leaf, + int skip)); + +/* + * The data structure for the keys is a radix tree with one way + * branching removed. The index rn_b at an internal node n represents a bit + * position to be tested. The tree is arranged so that all descendants + * of a node n have keys whose bits all agree up to position rn_b - 1. + * (We say the index of n is rn_b.) + * + * There is at least one descendant which has a one bit at position rn_b, + * and at least one with a zero there. + * + * A route is determined by a pair of key and mask. We require that the + * bit-wise logical and of the key and mask to be the key. + * We define the index of a route to associated with the mask to be + * the first bit number in the mask where 0 occurs (with bit number 0 + * representing the highest order bit). + * + * We say a mask is normal if every bit is 0, past the index of the mask. + * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b, + * and m is a normal mask, then the route applies to every descendant of n. + * If the index(m) < rn_b, this implies the trailing last few bits of k + * before bit b are all 0, (and hence consequently true of every descendant + * of n), so the route applies to all descendants of the node as well. + * + * Similar logic shows that a non-normal mask m such that + * index(m) <= index(n) could potentially apply to many children of n. + * Thus, for each non-host route, we attach its mask to a list at an internal + * node as high in the tree as we can go. + * + * The present version of the code makes use of normal routes in short- + * circuiting an explict mask and compare operation when testing whether + * a key satisfies a normal route, and also in remembering the unique leaf + * that governs a subtree. + */ + +static struct radix_node * +rn_search(v_arg, head) + void *v_arg; + struct radix_node *head; +{ + register struct radix_node *x; + register caddr_t v; + + for (x = head, v = v_arg; x->rn_b >= 0;) { + if (x->rn_bmask & v[x->rn_off]) + x = x->rn_r; + else + x = x->rn_l; + } + return (x); +} + +static struct radix_node * +rn_search_m(v_arg, head, m_arg) + struct radix_node *head; + void *v_arg, *m_arg; +{ + register struct radix_node *x; + register caddr_t v = v_arg, m = m_arg; + + for (x = head; x->rn_b >= 0;) { + if ((x->rn_bmask & m[x->rn_off]) && + (x->rn_bmask & v[x->rn_off])) + x = x->rn_r; + else + x = x->rn_l; + } + return x; +} + +int +rn_refines(m_arg, n_arg) + void *m_arg, *n_arg; +{ + register caddr_t m = m_arg, n = n_arg; + register caddr_t lim, lim2 = lim = n + *(u_char *)n; + int longer = (*(u_char *)n++) - (int)(*(u_char *)m++); + int masks_are_equal = 1; + + if (longer > 0) + lim -= longer; + while (n < lim) { + if (*n & ~(*m)) + return 0; + if (*n++ != *m++) + masks_are_equal = 0; + } + while (n < lim2) + if (*n++) + return 0; + if (masks_are_equal && (longer < 0)) + for (lim2 = m - longer; m < lim2; ) + if (*m++) + return 1; + return (!masks_are_equal); +} + +struct radix_node * +rn_lookup(v_arg, m_arg, head) + void *v_arg, *m_arg; + struct radix_node_head *head; +{ + register struct radix_node *x; + caddr_t netmask = 0; + + if (m_arg) { + if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0) + return (0); + netmask = x->rn_key; + } + x = rn_match(v_arg, head); + if (x && netmask) { + while (x && x->rn_mask != netmask) + x = x->rn_dupedkey; + } + return x; +} + +static int +rn_satsifies_leaf(trial, leaf, skip) + char *trial; + register struct radix_node *leaf; + int skip; +{ + register char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask; + char *cplim; + int length = min(*(u_char *)cp, *(u_char *)cp2); + + if (cp3 == 0) + cp3 = rn_ones; + else + length = min(length, *(u_char *)cp3); + cplim = cp + length; cp3 += skip; cp2 += skip; + for (cp += skip; cp < cplim; cp++, cp2++, cp3++) + if ((*cp ^ *cp2) & *cp3) + return 0; + return 1; +} + +struct radix_node * +rn_match(v_arg, head) + void *v_arg; + struct radix_node_head *head; +{ + caddr_t v = v_arg; + register struct radix_node *t = head->rnh_treetop, *x; + register caddr_t cp = v, cp2; + caddr_t cplim; + struct radix_node *saved_t, *top = t; + int off = t->rn_off, vlen = *(u_char *)cp, matched_off; + register int test, b, rn_b; + + /* + * Open code rn_search(v, top) to avoid overhead of extra + * subroutine call. + */ + for (; t->rn_b >= 0; ) { + if (t->rn_bmask & cp[t->rn_off]) + t = t->rn_r; + else + t = t->rn_l; + } + /* + * See if we match exactly as a host destination + * or at least learn how many bits match, for normal mask finesse. + * + * It doesn't hurt us to limit how many bytes to check + * to the length of the mask, since if it matches we had a genuine + * match and the leaf we have is the most specific one anyway; + * if it didn't match with a shorter length it would fail + * with a long one. This wins big for class B&C netmasks which + * are probably the most common case... + */ + if (t->rn_mask) + vlen = *(u_char *)t->rn_mask; + cp += off; cp2 = t->rn_key + off; cplim = v + vlen; + for (; cp < cplim; cp++, cp2++) + if (*cp != *cp2) + goto on1; + /* + * This extra grot is in case we are explicitly asked + * to look up the default. Ugh! + */ + if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey) + t = t->rn_dupedkey; + return t; +on1: + test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ + for (b = 7; (test >>= 1) > 0;) + b--; + matched_off = cp - v; + b += matched_off << 3; + rn_b = -1 - b; + /* + * If there is a host route in a duped-key chain, it will be first. + */ + if ((saved_t = t)->rn_mask == 0) + t = t->rn_dupedkey; + for (; t; t = t->rn_dupedkey) + /* + * Even if we don't match exactly as a host, + * we may match if the leaf we wound up at is + * a route to a net. + */ + if (t->rn_flags & RNF_NORMAL) { + if (rn_b <= t->rn_b) + return t; + } else if (rn_satsifies_leaf(v, t, matched_off)) + return t; + t = saved_t; + /* start searching up the tree */ + do { + register struct radix_mask *m; + t = t->rn_p; + m = t->rn_mklist; + if (m) { + /* + * If non-contiguous masks ever become important + * we can restore the masking and open coding of + * the search and satisfaction test and put the + * calculation of "off" back before the "do". + */ + do { + if (m->rm_flags & RNF_NORMAL) { + if (rn_b <= m->rm_b) + return (m->rm_leaf); + } else { + off = min(t->rn_off, matched_off); + x = rn_search_m(v, t, m->rm_mask); + while (x && x->rn_mask != m->rm_mask) + x = x->rn_dupedkey; + if (x && rn_satsifies_leaf(v, x, off)) + return x; + } + m = m->rm_mklist; + } while (m); + } + } while (t != top); + return 0; +} + +#ifdef RN_DEBUG +int rn_nodenum; +struct radix_node *rn_clist; +int rn_saveinfo; +int rn_debug = 1; +#endif + +static struct radix_node * +rn_newpair(v, b, nodes) + void *v; + int b; + struct radix_node nodes[2]; +{ + register struct radix_node *tt = nodes, *t = tt + 1; + t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7); + t->rn_l = tt; t->rn_off = b >> 3; + tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t; + tt->rn_flags = t->rn_flags = RNF_ACTIVE; +#ifdef RN_DEBUG + tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; + tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; +#endif + return t; +} + +static struct radix_node * +rn_insert(v_arg, head, dupentry, nodes) + void *v_arg; + struct radix_node_head *head; + int *dupentry; + struct radix_node nodes[2]; +{ + caddr_t v = v_arg; + struct radix_node *top = head->rnh_treetop; + int head_off = top->rn_off, vlen = (int)*((u_char *)v); + register struct radix_node *t = rn_search(v_arg, top); + register caddr_t cp = v + head_off; + register int b; + struct radix_node *tt; + /* + * Find first bit at which v and t->rn_key differ + */ + { + register caddr_t cp2 = t->rn_key + head_off; + register int cmp_res; + caddr_t cplim = v + vlen; + + while (cp < cplim) + if (*cp2++ != *cp++) + goto on1; + *dupentry = 1; + return t; +on1: + *dupentry = 0; + cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; + for (b = (cp - v) << 3; cmp_res; b--) + cmp_res >>= 1; + } + { + register struct radix_node *p, *x = top; + cp = v; + do { + p = x; + if (cp[x->rn_off] & x->rn_bmask) + x = x->rn_r; + else x = x->rn_l; + } while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */ +#ifdef RN_DEBUG + if (rn_debug) + log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p); +#endif + t = rn_newpair(v_arg, b, nodes); tt = t->rn_l; + if ((cp[p->rn_off] & p->rn_bmask) == 0) + p->rn_l = t; + else + p->rn_r = t; + x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */ + if ((cp[t->rn_off] & t->rn_bmask) == 0) { + t->rn_r = x; + } else { + t->rn_r = tt; t->rn_l = x; + } +#ifdef RN_DEBUG + if (rn_debug) + log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p); +#endif + } + return (tt); +} + +struct radix_node * +rn_addmask(n_arg, search, skip) + int search, skip; + void *n_arg; +{ + caddr_t netmask = (caddr_t)n_arg; + register struct radix_node *x; + register caddr_t cp, cplim; + register int b = 0, mlen, j; + int maskduplicated, m0, isnormal; + struct radix_node *saved_x; + static int last_zeroed = 0; + + if ((mlen = *(u_char *)netmask) > max_keylen) + mlen = max_keylen; + if (skip == 0) + skip = 1; + if (mlen <= skip) + return (mask_rnhead->rnh_nodes); + if (skip > 1) + Bcopy(rn_ones + 1, addmask_key + 1, skip - 1); + if ((m0 = mlen) > skip) + Bcopy(netmask + skip, addmask_key + skip, mlen - skip); + /* + * Trim trailing zeroes. + */ + for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) + cp--; + mlen = cp - addmask_key; + if (mlen <= skip) { + if (m0 >= last_zeroed) + last_zeroed = mlen; + return (mask_rnhead->rnh_nodes); + } + if (m0 < last_zeroed) + Bzero(addmask_key + m0, last_zeroed - m0); + *addmask_key = last_zeroed = mlen; + x = rn_search(addmask_key, rn_masktop); + if (Bcmp(addmask_key, x->rn_key, mlen) != 0) + x = 0; + if (x || search) + return (x); + R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x)); + if ((saved_x = x) == 0) + return (0); + Bzero(x, max_keylen + 2 * sizeof (*x)); + netmask = cp = (caddr_t)(x + 2); + Bcopy(addmask_key, cp, mlen); + x = rn_insert(cp, mask_rnhead, &maskduplicated, x); + if (maskduplicated) { + log(LOG_ERR, "rn_addmask: mask impossibly already in tree"); + Free(saved_x); + return (x); + } + /* + * Calculate index of mask, and check for normalcy. + */ + cplim = netmask + mlen; isnormal = 1; + for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) + cp++; + if (cp != cplim) { + for (j = 0x80; (j & *cp) != 0; j >>= 1) + b++; + if (*cp != normal_chars[b] || cp != (cplim - 1)) + isnormal = 0; + } + b += (cp - netmask) << 3; + x->rn_b = -1 - b; + if (isnormal) + x->rn_flags |= RNF_NORMAL; + return (x); +} + +static int /* XXX: arbitrary ordering for non-contiguous masks */ +rn_lexobetter(m_arg, n_arg) + void *m_arg, *n_arg; +{ + register u_char *mp = m_arg, *np = n_arg, *lim; + + if (*mp > *np) + return 1; /* not really, but need to check longer one first */ + if (*mp == *np) + for (lim = mp + *mp; mp < lim;) + if (*mp++ > *np++) + return 1; + return 0; +} + +static struct radix_mask * +rn_new_radix_mask(tt, next) + register struct radix_node *tt; + register struct radix_mask *next; +{ + register struct radix_mask *m; + + MKGet(m); + if (m == 0) { + log(LOG_ERR, "Mask for route not entered\n"); + return (0); + } + Bzero(m, sizeof *m); + m->rm_b = tt->rn_b; + m->rm_flags = tt->rn_flags; + if (tt->rn_flags & RNF_NORMAL) + m->rm_leaf = tt; + else + m->rm_mask = tt->rn_mask; + m->rm_mklist = next; + tt->rn_mklist = m; + return m; +} + +struct radix_node * +rn_addroute(v_arg, n_arg, head, treenodes) + void *v_arg, *n_arg; + struct radix_node_head *head; + struct radix_node treenodes[2]; +{ + caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg; + register struct radix_node *t, *x = 0, *tt; + struct radix_node *saved_tt, *top = head->rnh_treetop; + short b = 0, b_leaf = 0; + int keyduplicated; + caddr_t mmask; + struct radix_mask *m, **mp; + + /* + * In dealing with non-contiguous masks, there may be + * many different routes which have the same mask. + * We will find it useful to have a unique pointer to + * the mask to speed avoiding duplicate references at + * nodes and possibly save time in calculating indices. + */ + if (netmask) { + if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0) + return (0); + b_leaf = x->rn_b; + b = -1 - x->rn_b; + netmask = x->rn_key; + } + /* + * Deal with duplicated keys: attach node to previous instance + */ + saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); + if (keyduplicated) { + for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) { + if (tt->rn_mask == netmask) + return (0); + if (netmask == 0 || + (tt->rn_mask && + ((b_leaf < tt->rn_b) || /* index(netmask) > node */ + rn_refines(netmask, tt->rn_mask) || + rn_lexobetter(netmask, tt->rn_mask)))) + break; + } + /* + * If the mask is not duplicated, we wouldn't + * find it among possible duplicate key entries + * anyway, so the above test doesn't hurt. + * + * We sort the masks for a duplicated key the same way as + * in a masklist -- most specific to least specific. + * This may require the unfortunate nuisance of relocating + * the head of the list. + */ + if (tt == saved_tt) { + struct radix_node *xx = x; + /* link in at head of list */ + (tt = treenodes)->rn_dupedkey = t; + tt->rn_flags = t->rn_flags; + tt->rn_p = x = t->rn_p; + t->rn_p = tt; /* parent */ + if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt; + saved_tt = tt; x = xx; + } else { + (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; + t->rn_dupedkey = tt; + tt->rn_p = t; /* parent */ + if (tt->rn_dupedkey) /* parent */ + tt->rn_dupedkey->rn_p = tt; /* parent */ + } +#ifdef RN_DEBUG + t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; + tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; +#endif + tt->rn_key = (caddr_t) v; + tt->rn_b = -1; + tt->rn_flags = RNF_ACTIVE; + } + /* + * Put mask in tree. + */ + if (netmask) { + tt->rn_mask = netmask; + tt->rn_b = x->rn_b; + tt->rn_flags |= x->rn_flags & RNF_NORMAL; + } + t = saved_tt->rn_p; + if (keyduplicated) + goto on2; + b_leaf = -1 - t->rn_b; + if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r; + /* Promote general routes from below */ + if (x->rn_b < 0) { + for (mp = &t->rn_mklist; x; x = x->rn_dupedkey) + if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) { + *mp = m = rn_new_radix_mask(x, 0); + if (m) + mp = &m->rm_mklist; + } + } else if (x->rn_mklist) { + /* + * Skip over masks whose index is > that of new node + */ + for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) + if (m->rm_b >= b_leaf) + break; + t->rn_mklist = m; *mp = 0; + } +on2: + /* Add new route to highest possible ancestor's list */ + if ((netmask == 0) || (b > t->rn_b )) + return tt; /* can't lift at all */ + b_leaf = tt->rn_b; + do { + x = t; + t = t->rn_p; + } while (b <= t->rn_b && x != top); + /* + * Search through routes associated with node to + * insert new route according to index. + * Need same criteria as when sorting dupedkeys to avoid + * double loop on deletion. + */ + for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) { + if (m->rm_b < b_leaf) + continue; + if (m->rm_b > b_leaf) + break; + if (m->rm_flags & RNF_NORMAL) { + mmask = m->rm_leaf->rn_mask; + if (tt->rn_flags & RNF_NORMAL) { + log(LOG_ERR, + "Non-unique normal route, mask not entered"); + return tt; + } + } else + mmask = m->rm_mask; + if (mmask == netmask) { + m->rm_refs++; + tt->rn_mklist = m; + return tt; + } + if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask)) + break; + } + *mp = rn_new_radix_mask(tt, *mp); + return tt; +} + +struct radix_node * +rn_delete(v_arg, netmask_arg, head) + void *v_arg, *netmask_arg; + struct radix_node_head *head; +{ + register struct radix_node *t, *p, *x, *tt; + struct radix_mask *m, *saved_m, **mp; + struct radix_node *dupedkey, *saved_tt, *top; + caddr_t v, netmask; + int b, head_off, vlen; + + v = v_arg; + netmask = netmask_arg; + x = head->rnh_treetop; + tt = rn_search(v, x); + head_off = x->rn_off; + vlen = *(u_char *)v; + saved_tt = tt; + top = x; + if (tt == 0 || + Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) + return (0); + /* + * Delete our route from mask lists. + */ + if (netmask) { + if ((x = rn_addmask(netmask, 1, head_off)) == 0) + return (0); + netmask = x->rn_key; + while (tt->rn_mask != netmask) + if ((tt = tt->rn_dupedkey) == 0) + return (0); + } + if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) + goto on1; + if (tt->rn_flags & RNF_NORMAL) { + if (m->rm_leaf != tt || m->rm_refs > 0) { + log(LOG_ERR, "rn_delete: inconsistent annotation\n"); + return 0; /* dangling ref could cause disaster */ + } + } else { + if (m->rm_mask != tt->rn_mask) { + log(LOG_ERR, "rn_delete: inconsistent annotation\n"); + goto on1; + } + if (--m->rm_refs >= 0) + goto on1; + } + b = -1 - tt->rn_b; + t = saved_tt->rn_p; + if (b > t->rn_b) + goto on1; /* Wasn't lifted at all */ + do { + x = t; + t = t->rn_p; + } while (b <= t->rn_b && x != top); + for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) + if (m == saved_m) { + *mp = m->rm_mklist; + MKFree(m); + break; + } + if (m == 0) { + log(LOG_ERR, "rn_delete: couldn't find our annotation\n"); + if (tt->rn_flags & RNF_NORMAL) + return (0); /* Dangling ref to us */ + } +on1: + /* + * Eliminate us from tree + */ + if (tt->rn_flags & RNF_ROOT) + return (0); +#ifdef RN_DEBUG + /* Get us out of the creation list */ + for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} + if (t) t->rn_ybro = tt->rn_ybro; +#endif + t = tt->rn_p; + dupedkey = saved_tt->rn_dupedkey; + if (dupedkey) { + /* + * at this point, tt is the deletion target and saved_tt + * is the head of the dupekey chain + */ + if (tt == saved_tt) { + /* remove from head of chain */ + x = dupedkey; x->rn_p = t; + if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x; + } else { + /* find node in front of tt on the chain */ + for (x = p = saved_tt; p && p->rn_dupedkey != tt;) + p = p->rn_dupedkey; + if (p) { + p->rn_dupedkey = tt->rn_dupedkey; + if (tt->rn_dupedkey) /* parent */ + tt->rn_dupedkey->rn_p = p; /* parent */ + } else log(LOG_ERR, "rn_delete: couldn't find us\n"); + } + t = tt + 1; + if (t->rn_flags & RNF_ACTIVE) { +#ifndef RN_DEBUG + *++x = *t; p = t->rn_p; +#else + b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p; +#endif + if (p->rn_l == t) p->rn_l = x; else p->rn_r = x; + x->rn_l->rn_p = x; x->rn_r->rn_p = x; + } + goto out; + } + if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l; + p = t->rn_p; + if (p->rn_r == t) p->rn_r = x; else p->rn_l = x; + x->rn_p = p; + /* + * Demote routes attached to us. + */ + if (t->rn_mklist) { + if (x->rn_b >= 0) { + for (mp = &x->rn_mklist; (m = *mp);) + mp = &m->rm_mklist; + *mp = t->rn_mklist; + } else { + /* If there are any key,mask pairs in a sibling + duped-key chain, some subset will appear sorted + in the same order attached to our mklist */ + for (m = t->rn_mklist; m && x; x = x->rn_dupedkey) + if (m == x->rn_mklist) { + struct radix_mask *mm = m->rm_mklist; + x->rn_mklist = 0; + if (--(m->rm_refs) < 0) + MKFree(m); + m = mm; + } + if (m) + log(LOG_ERR, + "rn_delete: Orphaned Mask %p at %p\n", + (void *)m, (void *)x); + } + } + /* + * We may be holding an active internal node in the tree. + */ + x = tt + 1; + if (t != x) { +#ifndef RN_DEBUG + *t = *x; +#else + b = t->rn_info; *t = *x; t->rn_info = b; +#endif + t->rn_l->rn_p = t; t->rn_r->rn_p = t; + p = x->rn_p; + if (p->rn_l == x) p->rn_l = t; else p->rn_r = t; + } +out: + tt->rn_flags &= ~RNF_ACTIVE; + tt[1].rn_flags &= ~RNF_ACTIVE; + return (tt); +} + +/* + * This is the same as rn_walktree() except for the parameters and the + * exit. + */ +static int +rn_walktree_from(h, a, m, f, w) + struct radix_node_head *h; + void *a, *m; + walktree_f_t *f; + void *w; +{ + int error; + struct radix_node *base, *next; + u_char *xa = (u_char *)a; + u_char *xm = (u_char *)m; + register struct radix_node *rn, *last = 0 /* shut up gcc */; + int stopping = 0; + int lastb; + + /* + * rn_search_m is sort-of-open-coded here. + */ + /* printf("about to search\n"); */ + for (rn = h->rnh_treetop; rn->rn_b >= 0; ) { + last = rn; + /* printf("rn_b %d, rn_bmask %x, xm[rn_off] %x\n", + rn->rn_b, rn->rn_bmask, xm[rn->rn_off]); */ + if (!(rn->rn_bmask & xm[rn->rn_off])) { + break; + } + if (rn->rn_bmask & xa[rn->rn_off]) { + rn = rn->rn_r; + } else { + rn = rn->rn_l; + } + } + /* printf("done searching\n"); */ + + /* + * Two cases: either we stepped off the end of our mask, + * in which case last == rn, or we reached a leaf, in which + * case we want to start from the last node we looked at. + * Either way, last is the node we want to start from. + */ + rn = last; + lastb = rn->rn_b; + + /* printf("rn %p, lastb %d\n", rn, lastb);*/ + + /* + * This gets complicated because we may delete the node + * while applying the function f to it, so we need to calculate + * the successor node in advance. + */ + while (rn->rn_b >= 0) + rn = rn->rn_l; + + while (!stopping) { + /* printf("node %p (%d)\n", rn, rn->rn_b); */ + base = rn; + /* If at right child go back up, otherwise, go right */ + while (rn->rn_p->rn_r == rn && !(rn->rn_flags & RNF_ROOT)) { + rn = rn->rn_p; + + /* if went up beyond last, stop */ + if (rn->rn_b < lastb) { + stopping = 1; + /* printf("up too far\n"); */ + } + } + + /* Find the next *leaf* since next node might vanish, too */ + for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) + rn = rn->rn_l; + next = rn; + /* Process leaves */ + while ((rn = base) != 0) { + base = rn->rn_dupedkey; + /* printf("leaf %p\n", rn); */ + if (!(rn->rn_flags & RNF_ROOT) + && (error = (*f)(rn, w))) + return (error); + } + rn = next; + + if (rn->rn_flags & RNF_ROOT) { + /* printf("root, stopping"); */ + stopping = 1; + } + + } + return 0; +} + +static int +rn_walktree(h, f, w) + struct radix_node_head *h; + walktree_f_t *f; + void *w; +{ + int error; + struct radix_node *base, *next; + register struct radix_node *rn = h->rnh_treetop; + /* + * This gets complicated because we may delete the node + * while applying the function f to it, so we need to calculate + * the successor node in advance. + */ + /* First time through node, go left */ + while (rn->rn_b >= 0) + rn = rn->rn_l; + for (;;) { + base = rn; + /* If at right child go back up, otherwise, go right */ + while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0) + rn = rn->rn_p; + /* Find the next *leaf* since next node might vanish, too */ + for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;) + rn = rn->rn_l; + next = rn; + /* Process leaves */ + while ((rn = base)) { + base = rn->rn_dupedkey; + if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w))) + return (error); + } + rn = next; + if (rn->rn_flags & RNF_ROOT) + return (0); + } + /* NOTREACHED */ +} + +int +rn_inithead(head, off) + void **head; + int off; +{ + register struct radix_node_head *rnh; + register struct radix_node *t, *tt, *ttt; + if (*head) + return (1); + R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); + if (rnh == 0) + return (0); + Bzero(rnh, sizeof (*rnh)); + *head = rnh; + t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); + ttt = rnh->rnh_nodes + 2; + t->rn_r = ttt; + t->rn_p = t; + tt = t->rn_l; + tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE; + tt->rn_b = -1 - off; + *ttt = *tt; + ttt->rn_key = rn_ones; + rnh->rnh_addaddr = rn_addroute; + rnh->rnh_deladdr = rn_delete; + rnh->rnh_matchaddr = rn_match; + rnh->rnh_lookup = rn_lookup; + rnh->rnh_walktree = rn_walktree; + rnh->rnh_walktree_from = rn_walktree_from; + rnh->rnh_treetop = t; + return (1); +} + +void +rn_init() +{ + char *cp, *cplim; +#ifdef KERNEL + struct domain *dom; + + for (dom = domains; dom; dom = dom->dom_next) + if (dom->dom_maxrtkey > max_keylen) + max_keylen = dom->dom_maxrtkey; +#endif + if (max_keylen == 0) { + log(LOG_ERR, + "rn_init: radix functions require max_keylen be set\n"); + return; + } + R_Malloc(rn_zeros, char *, 3 * max_keylen); + if (rn_zeros == NULL) + panic("rn_init"); + Bzero(rn_zeros, 3 * max_keylen); + rn_ones = cp = rn_zeros + max_keylen; + addmask_key = cplim = rn_ones + max_keylen; + while (cp < cplim) + *cp++ = -1; + if (rn_inithead((void **)&mask_rnhead, 0) == 0) + panic("rn_init 2"); +} diff --git a/bsd/net/radix.h b/bsd/net/radix.h new file mode 100644 index 000000000..be6c18d35 --- /dev/null +++ b/bsd/net/radix.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)radix.h 8.2 (Berkeley) 10/31/94 + */ + +#ifndef _RADIX_H_ +#define _RADIX_H_ + +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_RTABLE); +#endif + +/* + * Radix search tree node layout. + */ + +struct radix_node { + struct radix_mask *rn_mklist; /* list of masks contained in subtree */ + struct radix_node *rn_p; /* parent */ + short rn_b; /* bit offset; -1-index(netmask) */ + char rn_bmask; /* node: mask for bit test*/ + u_char rn_flags; /* enumerated next */ +#define RNF_NORMAL 1 /* leaf contains normal route */ +#define RNF_ROOT 2 /* leaf is root leaf for tree */ +#define RNF_ACTIVE 4 /* This node is alive (for rtfree) */ + union { + struct { /* leaf only data: */ + caddr_t rn_Key; /* object of search */ + caddr_t rn_Mask; /* netmask, if present */ + struct radix_node *rn_Dupedkey; + } rn_leaf; + struct { /* node only data: */ + int rn_Off; /* where to start compare */ + struct radix_node *rn_L;/* progeny */ + struct radix_node *rn_R;/* progeny */ + } rn_node; + } rn_u; +#ifdef RN_DEBUG + int rn_info; + struct radix_node *rn_twin; + struct radix_node *rn_ybro; +#endif +}; + +#define rn_dupedkey rn_u.rn_leaf.rn_Dupedkey +#define rn_key rn_u.rn_leaf.rn_Key +#define rn_mask rn_u.rn_leaf.rn_Mask +#define rn_off rn_u.rn_node.rn_Off +#define rn_l rn_u.rn_node.rn_L +#define rn_r rn_u.rn_node.rn_R + +/* + * Annotations to tree concerning potential routes applying to subtrees. + */ + +struct radix_mask { + short rm_b; /* bit offset; -1-index(netmask) */ + char rm_unused; /* cf. rn_bmask */ + u_char rm_flags; /* cf. rn_flags */ + struct radix_mask *rm_mklist; /* more masks to try */ + union { + caddr_t rmu_mask; /* the mask */ + struct radix_node *rmu_leaf; /* for normal routes */ + } rm_rmu; + int rm_refs; /* # of references to this struct */ +}; + +#define rm_mask rm_rmu.rmu_mask +#define rm_leaf rm_rmu.rmu_leaf /* extra field would make 32 bytes */ + +#define MKGet(m) {\ + if (rn_mkfreelist) {\ + m = rn_mkfreelist; \ + rn_mkfreelist = (m)->rm_mklist; \ + } else \ + R_Malloc(m, struct radix_mask *, sizeof (*(m))); }\ + +#define MKFree(m) { (m)->rm_mklist = rn_mkfreelist; rn_mkfreelist = (m);} + +typedef int walktree_f_t __P((struct radix_node *, void *)); + +struct radix_node_head { + struct radix_node *rnh_treetop; + int rnh_addrsize; /* permit, but not require fixed keys */ + int rnh_pktsize; /* permit, but not require fixed keys */ + struct radix_node *(*rnh_addaddr) /* add based on sockaddr */ + __P((void *v, void *mask, + struct radix_node_head *head, struct radix_node nodes[])); + struct radix_node *(*rnh_addpkt) /* add based on packet hdr */ + __P((void *v, void *mask, + struct radix_node_head *head, struct radix_node nodes[])); + struct radix_node *(*rnh_deladdr) /* remove based on sockaddr */ + __P((void *v, void *mask, struct radix_node_head *head)); + struct radix_node *(*rnh_delpkt) /* remove based on packet hdr */ + __P((void *v, void *mask, struct radix_node_head *head)); + struct radix_node *(*rnh_matchaddr) /* locate based on sockaddr */ + __P((void *v, struct radix_node_head *head)); + struct radix_node *(*rnh_lookup) /* locate based on sockaddr */ + __P((void *v, void *mask, struct radix_node_head *head)); + struct radix_node *(*rnh_matchpkt) /* locate based on packet hdr */ + __P((void *v, struct radix_node_head *head)); + int (*rnh_walktree) /* traverse tree */ + __P((struct radix_node_head *head, walktree_f_t *f, void *w)); + int (*rnh_walktree_from) /* traverse tree below a */ + __P((struct radix_node_head *head, void *a, void *m, + walktree_f_t *f, void *w)); + void (*rnh_close) /* do something when the last ref drops */ + __P((struct radix_node *rn, struct radix_node_head *head)); + struct radix_node rnh_nodes[3]; /* empty tree for common case */ +}; + +#ifndef KERNEL +#define Bcmp(a, b, n) bcmp(((char *)(a)), ((char *)(b)), (n)) +#define Bcopy(a, b, n) bcopy(((char *)(a)), ((char *)(b)), (unsigned)(n)) +#define Bzero(p, n) bzero((char *)(p), (int)(n)); +#define R_Malloc(p, t, n) (p = (t) malloc((unsigned int)(n))) +#define Free(p) free((char *)p); +#else +#define Bcmp(a, b, n) bcmp(((caddr_t)(a)), ((caddr_t)(b)), (unsigned)(n)) +#define Bcopy(a, b, n) bcopy(((caddr_t)(a)), ((caddr_t)(b)), (unsigned)(n)) +#define Bzero(p, n) bzero((caddr_t)(p), (unsigned)(n)); +#define R_Malloc(p, t, n) (p = (t) _MALLOC((unsigned long)(n), M_RTABLE, M_DONTWAIT)) +#define Free(p) FREE((caddr_t)p, M_RTABLE); +#endif /*KERNEL*/ + +void rn_init __P((void)); +int rn_inithead __P((void **, int)); +int rn_refines __P((void *, void *)); +struct radix_node + *rn_addmask __P((void *, int, int)), + *rn_addroute __P((void *, void *, struct radix_node_head *, + struct radix_node [2])), + *rn_delete __P((void *, void *, struct radix_node_head *)), + *rn_lookup __P((void *v_arg, void *m_arg, + struct radix_node_head *head)), + *rn_match __P((void *, struct radix_node_head *)); + + +#endif /* _RADIX_H_ */ diff --git a/bsd/net/raw_cb.c b/bsd/net/raw_cb.c new file mode 100644 index 000000000..105ef13c8 --- /dev/null +++ b/bsd/net/raw_cb.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)raw_cb.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include + +#include + +/* + * Routines to manage the raw protocol control blocks. + * + * TODO: + * hash lookups by protocol family/protocol + address family + * take care of unique address problems per AF? + * redo address binding to allow wildcards + */ + +struct rawcb_list_head rawcb_list; + +static u_long raw_sendspace = RAWSNDQ; +static u_long raw_recvspace = RAWRCVQ; + +/* + * Allocate a control block and a nominal amount + * of buffer space for the socket. + */ +int +raw_attach(so, proto) + register struct socket *so; + int proto; +{ + register struct rawcb *rp = sotorawcb(so); + int error; + + /* + * It is assumed that raw_attach is called + * after space has been allocated for the + * rawcb. + */ + if (rp == 0) + return (ENOBUFS); + error = soreserve(so, raw_sendspace, raw_recvspace); + if (error) + return (error); + rp->rcb_socket = so; + rp->rcb_proto.sp_family = so->so_proto->pr_domain->dom_family; + rp->rcb_proto.sp_protocol = proto; + LIST_INSERT_HEAD(&rawcb_list, rp, list); + return (0); +} + +/* + * Detach the raw connection block and discard + * socket resources. + */ +void +raw_detach(rp) + register struct rawcb *rp; +{ + struct socket *so = rp->rcb_socket; + + so->so_pcb = 0; + sofree(so); + LIST_REMOVE(rp, list); +#ifdef notdef + if (rp->rcb_laddr) + m_freem(dtom(rp->rcb_laddr)); + rp->rcb_laddr = 0; +#endif + FREE((caddr_t)(rp), M_PCB); +} + +/* + * Disconnect and possibly release resources. + */ +void +raw_disconnect(rp) + struct rawcb *rp; +{ + +#ifdef notdef + if (rp->rcb_faddr) + m_freem(dtom(rp->rcb_faddr)); + rp->rcb_faddr = 0; +#endif + if (rp->rcb_socket->so_state & SS_NOFDREF) + raw_detach(rp); +} + +#ifdef notdef +#include + +int +raw_bind(so, nam) + register struct socket *so; + struct mbuf *nam; +{ + struct sockaddr *addr = mtod(nam, struct sockaddr *); + register struct rawcb *rp; + + if (ifnet == 0) + return (EADDRNOTAVAIL); + rp = sotorawcb(so); + nam = m_copym(nam, 0, M_COPYALL, M_WAITOK); + rp->rcb_laddr = mtod(nam, struct sockaddr *); + return (0); +} +#endif diff --git a/bsd/net/raw_cb.h b/bsd/net/raw_cb.h new file mode 100644 index 000000000..541ade793 --- /dev/null +++ b/bsd/net/raw_cb.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)raw_cb.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NET_RAW_CB_H_ +#define _NET_RAW_CB_H_ + +#include + +/* + * Raw protocol interface control block. Used + * to tie a socket to the generic raw interface. + */ +struct rawcb { + LIST_ENTRY(rawcb) list; + struct socket *rcb_socket; /* back pointer to socket */ + struct sockaddr *rcb_faddr; /* destination address */ + struct sockaddr *rcb_laddr; /* socket's address */ + struct sockproto rcb_proto; /* protocol family, protocol */ +}; + +#define sotorawcb(so) ((struct rawcb *)(so)->so_pcb) + +/* + * Nominal space allocated to a raw socket. + */ +#define RAWSNDQ 8192 +#define RAWRCVQ 8192 + +#ifdef KERNEL +extern LIST_HEAD(rawcb_list_head, rawcb) rawcb_list; + +int raw_attach __P((struct socket *, int)); +void raw_ctlinput __P((int, struct sockaddr *, void *)); +void raw_detach __P((struct rawcb *)); +void raw_disconnect __P((struct rawcb *)); +void raw_init __P((void)); +void raw_input __P((struct mbuf *, + struct sockproto *, struct sockaddr *, struct sockaddr *)); + +extern struct pr_usrreqs raw_usrreqs; +#endif + +#endif diff --git a/bsd/net/raw_usrreq.c b/bsd/net/raw_usrreq.c new file mode 100644 index 000000000..b2f44395a --- /dev/null +++ b/bsd/net/raw_usrreq.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)raw_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * Initialize raw connection block q. + */ +void +raw_init() +{ + LIST_INIT(&rawcb_list); +} + + +/* + * Raw protocol input routine. Find the socket + * associated with the packet(s) and move them over. If + * nothing exists for this packet, drop it. + */ +/* + * Raw protocol interface. + */ +void +raw_input(m0, proto, src, dst) + struct mbuf *m0; + register struct sockproto *proto; + struct sockaddr *src, *dst; +{ + register struct rawcb *rp; + register struct mbuf *m = m0; + register int sockets = 0; + struct socket *last; + + last = 0; + LIST_FOREACH(rp, &rawcb_list, list) { + if (rp->rcb_proto.sp_family != proto->sp_family) + continue; + if (rp->rcb_proto.sp_protocol && + rp->rcb_proto.sp_protocol != proto->sp_protocol) + continue; + /* + * We assume the lower level routines have + * placed the address in a canonical format + * suitable for a structure comparison. + * + * Note that if the lengths are not the same + * the comparison will fail at the first byte. + */ +#define equal(a1, a2) \ + (bcmp((caddr_t)(a1), (caddr_t)(a2), a1->sa_len) == 0) + if (rp->rcb_laddr && !equal(rp->rcb_laddr, dst)) + continue; + if (rp->rcb_faddr && !equal(rp->rcb_faddr, src)) + continue; + if (last) { + struct mbuf *n; + n = m_copy(m, 0, (int)M_COPYALL); + if (n) { + if (sbappendaddr(&last->so_rcv, src, + n, (struct mbuf *)0) == 0) + /* should notify about lost packet */ + m_freem(n); + else { + sorwakeup(last); + sockets++; + } + } + } + last = rp->rcb_socket; + } + if (last) { + if (sbappendaddr(&last->so_rcv, src, + m, (struct mbuf *)0) == 0) + m_freem(m); + else { + sorwakeup(last); + sockets++; + } + } else + m_freem(m); +} + +/*ARGSUSED*/ +void +raw_ctlinput(cmd, arg, dummy) + int cmd; + struct sockaddr *arg; + void *dummy; +{ + + if (cmd < 0 || cmd > PRC_NCMDS) + return; + /* INCOMPLETE */ +} + +static int +raw_uabort(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return EINVAL; + raw_disconnect(rp); + sofree(so); + soisdisconnected(so); + return 0; +} + +/* pru_accept is EOPNOTSUPP */ + +static int +raw_uattach(struct socket *so, int proto, struct proc *p) +{ + struct rawcb *rp = sotorawcb(so); + int error; + + if (rp == 0) + return EINVAL; +#if ISFB31 + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; +#else + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); +#endif + + return raw_attach(so, proto); +} + +static int +raw_ubind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + return EINVAL; +} + +static int +raw_uconnect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + return EINVAL; +} + +/* pru_connect2 is EOPNOTSUPP */ +/* pru_control is EOPNOTSUPP */ + +static int +raw_udetach(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return EINVAL; + + raw_detach(rp); + return 0; +} + +static int +raw_udisconnect(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return EINVAL; + if (rp->rcb_faddr == 0) { + return ENOTCONN; + } + raw_disconnect(rp); + soisdisconnected(so); + return 0; +} + +/* pru_listen is EOPNOTSUPP */ + +static int +raw_upeeraddr(struct socket *so, struct sockaddr **nam) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return EINVAL; + if (rp->rcb_faddr == 0) { + return ENOTCONN; + } + *nam = dup_sockaddr(rp->rcb_faddr, 1); + return 0; +} + +/* pru_rcvd is EOPNOTSUPP */ +/* pru_rcvoob is EOPNOTSUPP */ + +static int +raw_usend(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *nam, struct mbuf *control, struct proc *p) +{ + int error; + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) { + error = EINVAL; + goto release; + } + + if (flags & PRUS_OOB) { + error = EOPNOTSUPP; + goto release; + } + + if (control && control->m_len) { + error = EOPNOTSUPP; + goto release; + } + if (nam) { + if (rp->rcb_faddr) { + error = EISCONN; + goto release; + } + rp->rcb_faddr = nam; + } else if (rp->rcb_faddr == 0) { + error = ENOTCONN; + goto release; + } + error = (*so->so_proto->pr_output)(m, so); + m = NULL; + if (nam) + rp->rcb_faddr = 0; +release: + if (m != NULL) + m_freem(m); + return (error); +} + +/* pru_sense is null */ + +static int +raw_ushutdown(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return EINVAL; + socantsendmore(so); + return 0; +} + +static int +raw_usockaddr(struct socket *so, struct sockaddr **nam) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return EINVAL; + if (rp->rcb_laddr == 0) + return EINVAL; + *nam = dup_sockaddr(rp->rcb_laddr, 1); + return 0; +} + +struct pr_usrreqs raw_usrreqs = { + raw_uabort, pru_accept_notsupp, raw_uattach, raw_ubind, raw_uconnect, + pru_connect2_notsupp, pru_control_notsupp, raw_udetach, + raw_udisconnect, pru_listen_notsupp, raw_upeeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, raw_usend, pru_sense_null, raw_ushutdown, + raw_usockaddr, sosend, soreceive, sopoll +}; diff --git a/bsd/net/route.c b/bsd/net/route.c new file mode 100644 index 000000000..22d8678d3 --- /dev/null +++ b/bsd/net/route.c @@ -0,0 +1,1088 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)route.c 8.2 (Berkeley) 11/15/93 + */ + +#if NOTFB31 +#include "opt_inet.h" +#include "opt_mrouting.h" +#endif + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#define SA(p) ((struct sockaddr *)(p)) + +struct route_cb route_cb; +static struct rtstat rtstat; +struct radix_node_head *rt_tables[AF_MAX+1]; + +static int rttrash; /* routes not in table but not freed */ + +static void rt_maskedcopy __P((struct sockaddr *, + struct sockaddr *, struct sockaddr *)); +static void rtable_init __P((void **)); + +static void +rtable_init(table) + void **table; +{ + struct domain *dom; + for (dom = domains; dom; dom = dom->dom_next) + if (dom->dom_rtattach) + dom->dom_rtattach(&table[dom->dom_family], + dom->dom_rtoffset); +} + +void +route_init() +{ + rn_init(); /* initialize all zeroes, all ones, mask table */ + rtable_init((void **)rt_tables); +} + +/* + * Packet routing routines. + */ +void +rtalloc(ro) + register struct route *ro; +{ + if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP)) + return; /* XXX */ + ro->ro_rt = rtalloc1(&ro->ro_dst, 1, 0UL); +} + +void +rtalloc_ign(ro, ignore) + register struct route *ro; + u_long ignore; +{ + if (ro->ro_rt && ro->ro_rt->rt_ifp && (ro->ro_rt->rt_flags & RTF_UP)) + return; /* XXX */ + ro->ro_rt = rtalloc1(&ro->ro_dst, 1, ignore); +} + +/* + * Look up the route that matches the address given + * Or, at least try.. Create a cloned route if needed. + */ +struct rtentry * +rtalloc1(dst, report, ignflags) + register struct sockaddr *dst; + int report; + u_long ignflags; +{ + register struct radix_node_head *rnh = rt_tables[dst->sa_family]; + register struct rtentry *rt; + register struct radix_node *rn; + struct rtentry *newrt = 0; + struct rt_addrinfo info; + u_long nflags; + int s = splnet(), err = 0, msgtype = RTM_MISS; + + /* + * Look up the address in the table for that Address Family + */ + if (rnh && (rn = rnh->rnh_matchaddr((caddr_t)dst, rnh)) && + ((rn->rn_flags & RNF_ROOT) == 0)) { + /* + * If we find it and it's not the root node, then + * get a refernce on the rtentry associated. + */ + newrt = rt = (struct rtentry *)rn; + nflags = rt->rt_flags & ~ignflags; + if (report && (nflags & (RTF_CLONING | RTF_PRCLONING))) { + /* + * We are apparently adding (report = 0 in delete). + * If it requires that it be cloned, do so. + * (This implies it wasn't a HOST route.) + */ + err = rtrequest(RTM_RESOLVE, dst, SA(0), + SA(0), 0, &newrt); + if (err) { + /* + * If the cloning didn't succeed, maybe + * what we have will do. Return that. + */ + newrt = rt; + rt->rt_refcnt++; + goto miss; + } + if ((rt = newrt) && (rt->rt_flags & RTF_XRESOLVE)) { + /* + * If the new route specifies it be + * externally resolved, then go do that. + */ + msgtype = RTM_RESOLVE; + goto miss; + } + } else + rt->rt_refcnt++; + } else { + /* + * Either we hit the root or couldn't find any match, + * Which basically means + * "caint get there frm here" + */ + rtstat.rts_unreach++; + miss: if (report) { + /* + * If required, report the failure to the supervising + * Authorities. + * For a delete, this is not an error. (report == 0) + */ + bzero((caddr_t)&info, sizeof(info)); + info.rti_info[RTAX_DST] = dst; + rt_missmsg(msgtype, &info, 0, err); + } + } + splx(s); + return (newrt); +} + +/* + * Remove a reference count from an rtentry. + * If the count gets low enough, take it out of the routing table + */ +void +rtfree(rt) + register struct rtentry *rt; +{ + /* + * find the tree for that address family + */ + register struct radix_node_head *rnh = + rt_tables[rt_key(rt)->sa_family]; + register struct ifaddr *ifa; + + if (rt == 0 || rnh == 0) + panic("rtfree"); + + /* + * decrement the reference count by one and if it reaches 0, + * and there is a close function defined, call the close function + */ + rt->rt_refcnt--; + if(rnh->rnh_close && rt->rt_refcnt == 0) { + rnh->rnh_close((struct radix_node *)rt, rnh); + } + + /* + * If we are no longer "up" (and ref == 0) + * then we can free the resources associated + * with the route. + */ + if (rt->rt_refcnt <= 0 && (rt->rt_flags & RTF_UP) == 0) { + if (rt->rt_nodes->rn_flags & (RNF_ACTIVE | RNF_ROOT)) + panic ("rtfree 2"); + /* + * the rtentry must have been removed from the routing table + * so it is represented in rttrash.. remove that now. + */ + rttrash--; + +#ifdef DIAGNOSTIC + if (rt->rt_refcnt < 0) { + printf("rtfree: %p not freed (neg refs)\n", rt); + return; + } +#endif + + /* + * release references on items we hold them on.. + * e.g other routes and ifaddrs. + */ + if((ifa = rt->rt_ifa)) + IFAFREE(ifa); + if (rt->rt_parent) { + RTFREE(rt->rt_parent); + } + + /* + * The key is separatly alloc'd so free it (see rt_setgate()). + * This also frees the gateway, as they are always malloc'd + * together. + */ + Free(rt_key(rt)); + + /* + * and the rtentry itself of course + */ + Free(rt); + } +} + +void +ifafree(ifa) + register struct ifaddr *ifa; +{ + if (ifa == NULL) + panic("ifafree"); + if (ifa->ifa_refcnt == 0) + FREE(ifa, M_IFADDR); + else + ifa->ifa_refcnt--; +} + +/* + * Force a routing table entry to the specified + * destination to go through the given gateway. + * Normally called as a result of a routing redirect + * message from the network layer. + * + * N.B.: must be called at splnet + * + */ +void +rtredirect(dst, gateway, netmask, flags, src, rtp) + struct sockaddr *dst, *gateway, *netmask, *src; + int flags; + struct rtentry **rtp; +{ + register struct rtentry *rt; + int error = 0; + short *stat = 0; + struct rt_addrinfo info; + struct ifaddr *ifa; + + /* verify the gateway is directly reachable */ + if ((ifa = ifa_ifwithnet(gateway)) == 0) { + error = ENETUNREACH; + goto out; + } + rt = rtalloc1(dst, 0, 0UL); + /* + * If the redirect isn't from our current router for this dst, + * it's either old or wrong. If it redirects us to ourselves, + * we have a routing loop, perhaps as a result of an interface + * going down recently. + */ +#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) + if (!(flags & RTF_DONE) && rt && + (!equal(src, rt->rt_gateway) || rt->rt_ifa != ifa)) + error = EINVAL; + else if (ifa_ifwithaddr(gateway)) + error = EHOSTUNREACH; + if (error) + goto done; + /* + * Create a new entry if we just got back a wildcard entry + * or the the lookup failed. This is necessary for hosts + * which use routing redirects generated by smart gateways + * to dynamically build the routing tables. + */ + if ((rt == 0) || (rt_mask(rt) && rt_mask(rt)->sa_len < 2)) + goto create; + /* + * Don't listen to the redirect if it's + * for a route to an interface. + */ + if (rt->rt_flags & RTF_GATEWAY) { + if (((rt->rt_flags & RTF_HOST) == 0) && (flags & RTF_HOST)) { + /* + * Changing from route to net => route to host. + * Create new route, rather than smashing route to net. + */ + create: + flags |= RTF_GATEWAY | RTF_DYNAMIC; + error = rtrequest((int)RTM_ADD, dst, gateway, + netmask, flags, + (struct rtentry **)0); + stat = &rtstat.rts_dynamic; + } else { + /* + * Smash the current notion of the gateway to + * this destination. Should check about netmask!!! + */ + rt->rt_flags |= RTF_MODIFIED; + flags |= RTF_MODIFIED; + stat = &rtstat.rts_newgateway; + /* + * add the key and gateway (in one malloc'd chunk). + */ + rt_setgate(rt, rt_key(rt), gateway); + } + } else + error = EHOSTUNREACH; +done: + if (rt) { + if (rtp && !error) + *rtp = rt; + else + rtfree(rt); + } +out: + if (error) + rtstat.rts_badredirect++; + else if (stat != NULL) + (*stat)++; + bzero((caddr_t)&info, sizeof(info)); + info.rti_info[RTAX_DST] = dst; + info.rti_info[RTAX_GATEWAY] = gateway; + info.rti_info[RTAX_NETMASK] = netmask; + info.rti_info[RTAX_AUTHOR] = src; + rt_missmsg(RTM_REDIRECT, &info, flags, error); +} + +/* +* Routing table ioctl interface. +*/ +int +rtioctl(req, data, p) + int req; + caddr_t data; + struct proc *p; +{ +#if INET + /* Multicast goop, grrr... */ +#if MROUTING + return mrt_ioctl(req, data); +#else + return mrt_ioctl(req, data, p); +#endif +#else /* INET */ + return ENXIO; +#endif /* INET */ +} + +struct ifaddr * +ifa_ifwithroute(flags, dst, gateway) + int flags; + struct sockaddr *dst, *gateway; +{ + register struct ifaddr *ifa; + if ((flags & RTF_GATEWAY) == 0) { + /* + * If we are adding a route to an interface, + * and the interface is a pt to pt link + * we should search for the destination + * as our clue to the interface. Otherwise + * we can use the local address. + */ + ifa = 0; + if (flags & RTF_HOST) { + ifa = ifa_ifwithdstaddr(dst); + } + if (ifa == 0) + ifa = ifa_ifwithaddr(gateway); + } else { + /* + * If we are adding a route to a remote net + * or host, the gateway may still be on the + * other end of a pt to pt link. + */ + ifa = ifa_ifwithdstaddr(gateway); + } + if (ifa == 0) + ifa = ifa_ifwithnet(gateway); + if (ifa == 0) { + struct rtentry *rt = rtalloc1(dst, 0, 0UL); + if (rt == 0) + return (0); + rt->rt_refcnt--; + if ((ifa = rt->rt_ifa) == 0) + return (0); + } + if (ifa->ifa_addr->sa_family != dst->sa_family) { + struct ifaddr *oifa = ifa; + ifa = ifaof_ifpforaddr(dst, ifa->ifa_ifp); + if (ifa == 0) + ifa = oifa; + } + return (ifa); +} + +#define ROUNDUP(a) (a>0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) + +static int rt_fixdelete __P((struct radix_node *, void *)); +static int rt_fixchange __P((struct radix_node *, void *)); + +struct rtfc_arg { + struct rtentry *rt0; + struct radix_node_head *rnh; +}; + +/* + * Do appropriate manipulations of a routing tree given + * all the bits of info needed + */ +int +rtrequest(req, dst, gateway, netmask, flags, ret_nrt) + int req, flags; + struct sockaddr *dst, *gateway, *netmask; + struct rtentry **ret_nrt; +{ + int s = splnet(); int error = 0; + register struct rtentry *rt; + register struct radix_node *rn; + register struct radix_node_head *rnh; + struct ifaddr *ifa; + struct sockaddr *ndst; +#define senderr(x) { error = x ; goto bad; } + + /* + * Find the correct routing tree to use for this Address Family + */ + if ((rnh = rt_tables[dst->sa_family]) == 0) + senderr(ESRCH); + /* + * If we are adding a host route then we don't want to put + * a netmask in the tree + */ + if (flags & RTF_HOST) + netmask = 0; + switch (req) { + case RTM_DELETE: + /* + * Remove the item from the tree and return it. + * Complain if it is not there and do no more processing. + */ + if ((rn = rnh->rnh_deladdr(dst, netmask, rnh)) == 0) + senderr(ESRCH); + if (rn->rn_flags & (RNF_ACTIVE | RNF_ROOT)) + panic ("rtrequest delete"); + rt = (struct rtentry *)rn; + + /* + * Now search what's left of the subtree for any cloned + * routes which might have been formed from this node. + */ + if ((rt->rt_flags & RTF_PRCLONING) && netmask) { + rnh->rnh_walktree_from(rnh, dst, netmask, + rt_fixdelete, rt); + } + + /* + * Remove any external references we may have. + * This might result in another rtentry being freed if + * we held its last reference. + */ + if (rt->rt_gwroute) { + rt = rt->rt_gwroute; + RTFREE(rt); + (rt = (struct rtentry *)rn)->rt_gwroute = 0; + } + + /* + * NB: RTF_UP must be set during the search above, + * because we might delete the last ref, causing + * rt to get freed prematurely. + * eh? then why not just add a reference? + * I'm not sure how RTF_UP helps matters. (JRE) + */ + rt->rt_flags &= ~RTF_UP; + + /* + * give the protocol a chance to keep things in sync. + */ + if ((ifa = rt->rt_ifa) && ifa->ifa_rtrequest) + ifa->ifa_rtrequest(RTM_DELETE, rt, SA(0)); + + /* + * one more rtentry floating around that is not + * linked to the routing table. + */ + rttrash++; + + /* + * If the caller wants it, then it can have it, + * but it's up to it to free the rtentry as we won't be + * doing it. + */ + if (ret_nrt) + *ret_nrt = rt; + else if (rt->rt_refcnt <= 0) { + rt->rt_refcnt++; + rtfree(rt); + } + break; + + case RTM_RESOLVE: + if (ret_nrt == 0 || (rt = *ret_nrt) == 0) + senderr(EINVAL); + ifa = rt->rt_ifa; + flags = rt->rt_flags & + ~(RTF_CLONING | RTF_PRCLONING | RTF_STATIC); + flags |= RTF_WASCLONED; + gateway = rt->rt_gateway; + if ((netmask = rt->rt_genmask) == 0) + flags |= RTF_HOST; + goto makeroute; + + case RTM_ADD: + if ((flags & RTF_GATEWAY) && !gateway) + panic("rtrequest: GATEWAY but no gateway"); + + if ((ifa = ifa_ifwithroute(flags, dst, gateway)) == 0) + senderr(ENETUNREACH); + + makeroute: + R_Malloc(rt, struct rtentry *, sizeof(*rt)); + if (rt == 0) + senderr(ENOBUFS); + Bzero(rt, sizeof(*rt)); + rt->rt_flags = RTF_UP | flags; + /* + * Add the gateway. Possibly re-malloc-ing the storage for it + * also add the rt_gwroute if possible. + */ + if (error = rt_setgate(rt, dst, gateway)) { + Free(rt); + senderr(error); + } + + /* + * point to the (possibly newly malloc'd) dest address. + */ + ndst = rt_key(rt); + + /* + * make sure it contains the value we want (masked if needed). + */ + if (netmask) { + rt_maskedcopy(dst, ndst, netmask); + } else + Bcopy(dst, ndst, dst->sa_len); + + /* + * Note that we now have a reference to the ifa. + * This moved from below so that rnh->rnh_addaddr() can + * examine the ifa and ifa->ifa_ifp if it so desires. + */ + ifa->ifa_refcnt++; + rt->rt_ifa = ifa; + rt->rt_ifp = ifa->ifa_ifp; + rt->rt_dlt = ifa->ifa_dlt; + rn = rnh->rnh_addaddr((caddr_t)ndst, (caddr_t)netmask, + rnh, rt->rt_nodes); + if (rn == 0) { + struct rtentry *rt2; + /* + * Uh-oh, we already have one of these in the tree. + * We do a special hack: if the route that's already + * there was generated by the protocol-cloning + * mechanism, then we just blow it away and retry + * the insertion of the new one. + */ + rt2 = rtalloc1(dst, 0, RTF_PRCLONING); + if (rt2 && rt2->rt_parent) { + rtrequest(RTM_DELETE, + (struct sockaddr *)rt_key(rt2), + rt2->rt_gateway, + rt_mask(rt2), rt2->rt_flags, 0); + RTFREE(rt2); + rn = rnh->rnh_addaddr((caddr_t)ndst, + (caddr_t)netmask, + rnh, rt->rt_nodes); + } else if (rt2) { + /* undo the extra ref we got */ + RTFREE(rt2); + } + } + + /* + * If it still failed to go into the tree, + * then un-make it (this should be a function) + */ + if (rn == 0) { + if (rt->rt_gwroute) + rtfree(rt->rt_gwroute); + if (rt->rt_ifa) { + IFAFREE(rt->rt_ifa); + } + Free(rt_key(rt)); + Free(rt); + senderr(EEXIST); + } + + rt->rt_parent = 0; + + /* + * If we got here from RESOLVE, then we are cloning + * so clone the rest, and note that we + * are a clone (and increment the parent's references) + */ + if (req == RTM_RESOLVE) { + rt->rt_rmx = (*ret_nrt)->rt_rmx; /* copy metrics */ + if ((*ret_nrt)->rt_flags & RTF_PRCLONING) { + rt->rt_parent = (*ret_nrt); + (*ret_nrt)->rt_refcnt++; + } + } + + /* + * if this protocol has something to add to this then + * allow it to do that as well. + */ + if (ifa->ifa_rtrequest) + ifa->ifa_rtrequest(req, rt, SA(ret_nrt ? *ret_nrt : 0)); + + /* + * We repeat the same procedure from rt_setgate() here because + * it doesn't fire when we call it there because the node + * hasn't been added to the tree yet. + */ + if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) { + struct rtfc_arg arg; + arg.rnh = rnh; + arg.rt0 = rt; + rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), + rt_fixchange, &arg); + } + + /* + * actually return a resultant rtentry and + * give the caller a single reference. + */ + if (ret_nrt) { + *ret_nrt = rt; + rt->rt_refcnt++; + } + break; + } +bad: + splx(s); + return (error); +} + +/* + * Called from rtrequest(RTM_DELETE, ...) to fix up the route's ``family'' + * (i.e., the routes related to it by the operation of cloning). This + * routine is iterated over all potential former-child-routes by way of + * rnh->rnh_walktree_from() above, and those that actually are children of + * the late parent (passed in as VP here) are themselves deleted. + */ +static int +rt_fixdelete(rn, vp) + struct radix_node *rn; + void *vp; +{ + struct rtentry *rt = (struct rtentry *)rn; + struct rtentry *rt0 = vp; + + if (rt->rt_parent == rt0 && !(rt->rt_flags & RTF_PINNED)) { + return rtrequest(RTM_DELETE, rt_key(rt), + (struct sockaddr *)0, rt_mask(rt), + rt->rt_flags, (struct rtentry **)0); + } + return 0; +} + +/* + * This routine is called from rt_setgate() to do the analogous thing for + * adds and changes. There is the added complication in this case of a + * middle insert; i.e., insertion of a new network route between an older + * network route and (cloned) host routes. For this reason, a simple check + * of rt->rt_parent is insufficient; each candidate route must be tested + * against the (mask, value) of the new route (passed as before in vp) + * to see if the new route matches it. Unfortunately, this has the obnoxious + * property of also triggering for insertion /above/ a pre-existing network + * route and clones. Sigh. This may be fixed some day. + * + * XXX - it may be possible to do fixdelete() for changes and reserve this + * routine just for adds. I'm not sure why I thought it was necessary to do + * changes this way. + */ +#ifdef DEBUG +static int rtfcdebug = 0; +#endif + +static int +rt_fixchange(rn, vp) + struct radix_node *rn; + void *vp; +{ + struct rtentry *rt = (struct rtentry *)rn; + struct rtfc_arg *ap = vp; + struct rtentry *rt0 = ap->rt0; + struct radix_node_head *rnh = ap->rnh; + u_char *xk1, *xm1, *xk2; + int i, len; + +#ifdef DEBUG + if (rtfcdebug) + printf("rt_fixchange: rt %p, rt0 %p\n", rt, rt0); +#endif + + if (!rt->rt_parent || (rt->rt_flags & RTF_PINNED)) { +#ifdef DEBUG + if(rtfcdebug) printf("no parent or pinned\n"); +#endif + return 0; + } + + if (rt->rt_parent == rt0) { +#ifdef DEBUG + if(rtfcdebug) printf("parent match\n"); +#endif + return rtrequest(RTM_DELETE, rt_key(rt), + (struct sockaddr *)0, rt_mask(rt), + rt->rt_flags, (struct rtentry **)0); + } + + /* + * There probably is a function somewhere which does this... + * if not, there should be. + */ + len = imin(((struct sockaddr *)rt_key(rt0))->sa_len, + ((struct sockaddr *)rt_key(rt))->sa_len); + + xk1 = (u_char *)rt_key(rt0); + xm1 = (u_char *)rt_mask(rt0); + xk2 = (u_char *)rt_key(rt); + + for (i = rnh->rnh_treetop->rn_off; i < len; i++) { + if ((xk2[i] & xm1[i]) != xk1[i]) { +#ifdef DEBUG + if(rtfcdebug) printf("no match\n"); +#endif + return 0; + } + } + + /* + * OK, this node is a clone, and matches the node currently being + * changed/added under the node's mask. So, get rid of it. + */ +#ifdef DEBUG + if(rtfcdebug) printf("deleting\n"); +#endif + return rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0, + rt_mask(rt), rt->rt_flags, (struct rtentry **)0); +} + +int +rt_setgate(rt0, dst, gate) + struct rtentry *rt0; + struct sockaddr *dst, *gate; +{ + caddr_t new, old; + int dlen = ROUNDUP(dst->sa_len), glen = ROUNDUP(gate->sa_len); + register struct rtentry *rt = rt0; + struct radix_node_head *rnh = rt_tables[dst->sa_family]; + + /* + * A host route with the destination equal to the gateway + * will interfere with keeping LLINFO in the routing + * table, so disallow it. + */ + if (((rt0->rt_flags & (RTF_HOST|RTF_GATEWAY|RTF_LLINFO)) == + (RTF_HOST|RTF_GATEWAY)) && + (dst->sa_len == gate->sa_len) && + (bcmp(dst, gate, dst->sa_len) == 0)) { + /* + * The route might already exist if this is an RTM_CHANGE + * or a routing redirect, so try to delete it. + */ + if (rt_key(rt0)) + rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt0), + rt0->rt_gateway, rt_mask(rt0), rt0->rt_flags, 0); + return EADDRNOTAVAIL; + } + + /* + * Both dst and gateway are stored in the same malloc'd chunk + * (If I ever get my hands on....) + * if we need to malloc a new chunk, then keep the old one around + * till we don't need it any more. + */ + if (rt->rt_gateway == 0 || glen > ROUNDUP(rt->rt_gateway->sa_len)) { + old = (caddr_t)rt_key(rt); + R_Malloc(new, caddr_t, dlen + glen); + if (new == 0) + return ENOBUFS; + rt->rt_nodes->rn_key = new; + } else { + /* + * otherwise just overwrite the old one + */ + new = rt->rt_nodes->rn_key; + old = 0; + } + + /* + * copy the new gateway value into the memory chunk + */ + Bcopy(gate, (rt->rt_gateway = (struct sockaddr *)(new + dlen)), glen); + + /* + * if we are replacing the chunk (or it's new) we need to + * replace the dst as well + */ + if (old) { + Bcopy(dst, new, dlen); + Free(old); + } + + /* + * If there is already a gwroute, it's now almost definitly wrong + * so drop it. + */ + if (rt->rt_gwroute) { + rt = rt->rt_gwroute; RTFREE(rt); + rt = rt0; rt->rt_gwroute = 0; + } + /* + * Cloning loop avoidance: + * In the presence of protocol-cloning and bad configuration, + * it is possible to get stuck in bottomless mutual recursion + * (rtrequest rt_setgate rtalloc1). We avoid this by not allowing + * protocol-cloning to operate for gateways (which is probably the + * correct choice anyway), and avoid the resulting reference loops + * by disallowing any route to run through itself as a gateway. + * This is obviously mandatory when we get rt->rt_output(). + */ + if (rt->rt_flags & RTF_GATEWAY) { + rt->rt_gwroute = rtalloc1(gate, 1, RTF_PRCLONING); + if (rt->rt_gwroute == rt) { + RTFREE(rt->rt_gwroute); + rt->rt_gwroute = 0; + return EDQUOT; /* failure */ + } + } + + /* + * This isn't going to do anything useful for host routes, so + * don't bother. Also make sure we have a reasonable mask + * (we don't yet have one during adds). + */ + if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) { + struct rtfc_arg arg; + arg.rnh = rnh; + arg.rt0 = rt; + rnh->rnh_walktree_from(rnh, rt_key(rt), rt_mask(rt), + rt_fixchange, &arg); + } + + return 0; +} + +static void +rt_maskedcopy(src, dst, netmask) + struct sockaddr *src, *dst, *netmask; +{ + register u_char *cp1 = (u_char *)src; + register u_char *cp2 = (u_char *)dst; + register u_char *cp3 = (u_char *)netmask; + u_char *cplim = cp2 + *cp3; + u_char *cplim2 = cp2 + *cp1; + + *cp2++ = *cp1++; *cp2++ = *cp1++; /* copies sa_len & sa_family */ + cp3 += 2; + if (cplim > cplim2) + cplim = cplim2; + while (cp2 < cplim) + *cp2++ = *cp1++ & *cp3++; + if (cp2 < cplim2) + bzero((caddr_t)cp2, (unsigned)(cplim2 - cp2)); +} + +/* + * Set up a routing table entry, normally + * for an interface. + */ +int +rtinit(ifa, cmd, flags) + register struct ifaddr *ifa; + int cmd, flags; +{ + register struct rtentry *rt; + register struct sockaddr *dst; + register struct sockaddr *deldst; + struct mbuf *m = 0; + struct rtentry *nrt = 0; + int error; + + dst = flags & RTF_HOST ? ifa->ifa_dstaddr : ifa->ifa_addr; + /* + * If it's a delete, check that if it exists, it's on the correct + * interface or we might scrub a route to another ifa which would + * be confusing at best and possibly worse. + */ + if (cmd == RTM_DELETE) { + /* + * It's a delete, so it should already exist.. + * If it's a net, mask off the host bits + * (Assuming we have a mask) + */ + if ((flags & RTF_HOST) == 0 && ifa->ifa_netmask) { + m = m_get(M_WAIT, MT_SONAME); + deldst = mtod(m, struct sockaddr *); + rt_maskedcopy(dst, deldst, ifa->ifa_netmask); + dst = deldst; + } + /* + * Get an rtentry that is in the routing tree and + * contains the correct info. (if this fails, can't get there). + * We set "report" to FALSE so that if it doesn't exist, + * it doesn't report an error or clone a route, etc. etc. + */ + rt = rtalloc1(dst, 0, 0UL); + if (rt) { + /* + * Ok so we found the rtentry. it has an extra reference + * for us at this stage. we won't need that so + * lop that off now. + */ + rt->rt_refcnt--; + if (rt->rt_ifa != ifa) { + /* + * If the interface in the rtentry doesn't match + * the interface we are using, then we don't + * want to delete it, so return an error. + * This seems to be the only point of + * this whole RTM_DELETE clause. + */ + if (m) + (void) m_free(m); + return (flags & RTF_HOST ? EHOSTUNREACH + : ENETUNREACH); + } + } + /* XXX */ +#if 0 + else { + /* + * One would think that as we are deleting, and we know + * it doesn't exist, we could just return at this point + * with an "ELSE" clause, but apparently not.. + */ + return (flags & RTF_HOST ? EHOSTUNREACH + : ENETUNREACH); + } +#endif + } + /* + * Do the actual request + */ + error = rtrequest(cmd, dst, ifa->ifa_addr, ifa->ifa_netmask, + flags | ifa->ifa_flags, &nrt); + if (m) + (void) m_free(m); + /* + * If we are deleting, and we found an entry, then + * it's been removed from the tree.. now throw it away. + */ + if (cmd == RTM_DELETE && error == 0 && (rt = nrt)) { + /* + * notify any listenning routing agents of the change + */ + rt_newaddrmsg(cmd, ifa, error, nrt); + if (rt->rt_refcnt <= 0) { + rt->rt_refcnt++; + rtfree(rt); + } + } + + /* + * We are adding, and we have a returned routing entry. + * We need to sanity check the result. + */ + if (cmd == RTM_ADD && error == 0 && (rt = nrt)) { + /* + * We just wanted to add it.. we don't actually need a reference + */ + rt->rt_refcnt--; + /* + * If it came back with an unexpected interface, then it must + * have already existed or something. (XXX) + */ + if (rt->rt_ifa != ifa) { + printf("rtinit: wrong ifa (%p) was (%p)\n", ifa, + rt->rt_ifa); + /* + * Ask that the protocol in question + * remove anything it has associated with + * this route and ifaddr. + */ + if (rt->rt_ifa->ifa_rtrequest) + rt->rt_ifa->ifa_rtrequest(RTM_DELETE, rt, SA(0)); + /* + * Remove the referenve to the it's ifaddr. + */ + IFAFREE(rt->rt_ifa); + /* + * And substitute in references to the ifaddr + * we are adding. + */ + rt->rt_ifa = ifa; + rt->rt_ifp = ifa->ifa_ifp; + rt->rt_dlt = ifa->ifa_dlt; + rt->rt_rmx.rmx_mtu = ifa->ifa_ifp->if_mtu; + ifa->ifa_refcnt++; + /* + * Now ask the protocol to check if it needs + * any special processing in its new form. + */ + if (ifa->ifa_rtrequest) + ifa->ifa_rtrequest(RTM_ADD, rt, SA(0)); + } + /* + * notify any listenning routing agents of the change + */ + rt_newaddrmsg(cmd, ifa, error, nrt); + } + return (error); +} diff --git a/bsd/net/route.h b/bsd/net/route.h new file mode 100644 index 000000000..0e2c09f91 --- /dev/null +++ b/bsd/net/route.h @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1980, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)route.h 8.3 (Berkeley) 4/19/94 + */ + +#ifndef _NET_ROUTE_H_ +#define _NET_ROUTE_H_ + +/* + * Kernel resident routing tables. + * + * The routing tables are initialized when interface addresses + * are set by making entries for all directly connected interfaces. + */ + +/* + * A route consists of a destination address and a reference + * to a routing entry. These are often held by protocols + * in their control blocks, e.g. inpcb. + */ +struct route { + struct rtentry *ro_rt; + struct sockaddr ro_dst; +}; + +/* + * These numbers are used by reliable protocols for determining + * retransmission behavior and are included in the routing structure. + */ +struct rt_metrics { + u_long rmx_locks; /* Kernel must leave these values alone */ + u_long rmx_mtu; /* MTU for this path */ + u_long rmx_hopcount; /* max hops expected */ + u_long rmx_expire; /* lifetime for route, e.g. redirect */ + u_long rmx_recvpipe; /* inbound delay-bandwidth product */ + u_long rmx_sendpipe; /* outbound delay-bandwidth product */ + u_long rmx_ssthresh; /* outbound gateway buffer limit */ + u_long rmx_rtt; /* estimated round trip time */ + u_long rmx_rttvar; /* estimated rtt variance */ + u_long rmx_pksent; /* packets sent using this route */ + u_long rmx_filler[4]; /* will be used for T/TCP later */ +}; + +/* + * rmx_rtt and rmx_rttvar are stored as microseconds; + * RTTTOPRHZ(rtt) converts to a value suitable for use + * by a protocol slowtimo counter. + */ +#define RTM_RTTUNIT 1000000 /* units for rtt, rttvar, as units per sec */ +#define RTTTOPRHZ(r) ((r) / (RTM_RTTUNIT / PR_SLOWHZ)) + +/* + * XXX kernel function pointer `rt_output' is visible to applications. + */ +struct mbuf; + +/* + * We distinguish between routes to hosts and routes to networks, + * preferring the former if available. For each route we infer + * the interface to use from the gateway address supplied when + * the route was entered. Routes that forward packets through + * gateways are marked so that the output routines know to address the + * gateway rather than the ultimate destination. + */ +#ifndef RNF_NORMAL +#include +#endif +struct rtentry { + struct radix_node rt_nodes[2]; /* tree glue, and other values */ +#define rt_key(r) ((struct sockaddr *)((r)->rt_nodes->rn_key)) +#define rt_mask(r) ((struct sockaddr *)((r)->rt_nodes->rn_mask)) + struct sockaddr *rt_gateway; /* value */ + short rt_filler; /* was short flags field */ + short rt_refcnt; /* # held references */ + u_long rt_flags; /* up/down?, host/net */ + struct ifnet *rt_ifp; /* the answer: interface to use */ + u_long rt_dlt; /* DLIL dl_tag */ + struct ifaddr *rt_ifa; /* the answer: interface to use */ + struct sockaddr *rt_genmask; /* for generation of cloned routes */ + caddr_t rt_llinfo; /* pointer to link level info cache */ + struct rt_metrics rt_rmx; /* metrics used by rx'ing protocols */ + struct rtentry *rt_gwroute; /* implied entry for gatewayed routes */ + int (*rt_output) __P((struct ifnet *, struct mbuf *, + struct sockaddr *, struct rtentry *)); + /* output routine for this (rt,if) */ + struct rtentry *rt_parent; /* cloning parent of this route */ + void *rt_filler2; /* more filler */ +}; + +/* + * Following structure necessary for 4.3 compatibility; + * We should eventually move it to a compat file. + */ +struct ortentry { + u_long rt_hash; /* to speed lookups */ + struct sockaddr rt_dst; /* key */ + struct sockaddr rt_gateway; /* value */ + short rt_flags; /* up/down?, host/net */ + short rt_refcnt; /* # held references */ + u_long rt_use; /* raw # packets forwarded */ + struct ifnet *rt_ifp; /* the answer: interface to use */ +}; + +#define rt_use rt_rmx.rmx_pksent + +#define RTF_UP 0x1 /* route usable */ +#define RTF_GATEWAY 0x2 /* destination is a gateway */ +#define RTF_HOST 0x4 /* host entry (net otherwise) */ +#define RTF_REJECT 0x8 /* host or net unreachable */ +#define RTF_DYNAMIC 0x10 /* created dynamically (by redirect) */ +#define RTF_MODIFIED 0x20 /* modified dynamically (by redirect) */ +#define RTF_DONE 0x40 /* message confirmed */ +/* 0x80 unused */ +#define RTF_CLONING 0x100 /* generate new routes on use */ +#define RTF_XRESOLVE 0x200 /* external daemon resolves name */ +#define RTF_LLINFO 0x400 /* generated by link layer (e.g. ARP) */ +#define RTF_STATIC 0x800 /* manually added */ +#define RTF_BLACKHOLE 0x1000 /* just discard pkts (during updates) */ +#define RTF_PROTO2 0x4000 /* protocol specific routing flag */ +#define RTF_PROTO1 0x8000 /* protocol specific routing flag */ + +#define RTF_PRCLONING 0x10000 /* protocol requires cloning */ +#define RTF_WASCLONED 0x20000 /* route generated through cloning */ +#define RTF_PROTO3 0x40000 /* protocol specific routing flag */ +/* 0x80000 unused */ +#define RTF_PINNED 0x100000 /* future use */ +#define RTF_LOCAL 0x200000 /* route represents a local address */ +#define RTF_BROADCAST 0x400000 /* route represents a bcast address */ +#define RTF_MULTICAST 0x800000 /* route represents a mcast address */ + /* 0x1000000 and up unassigned */ + +/* + * Routing statistics. + */ +struct rtstat { + short rts_badredirect; /* bogus redirect calls */ + short rts_dynamic; /* routes created by redirects */ + short rts_newgateway; /* routes modified by redirects */ + short rts_unreach; /* lookups which failed */ + short rts_wildcard; /* lookups satisfied by a wildcard */ +}; +/* + * Structures for routing messages. + */ +struct rt_msghdr { + u_short rtm_msglen; /* to skip over non-understood messages */ + u_char rtm_version; /* future binary compatibility */ + u_char rtm_type; /* message type */ + u_short rtm_index; /* index for associated ifp */ + int rtm_flags; /* flags, incl. kern & message, e.g. DONE */ + int rtm_addrs; /* bitmask identifying sockaddrs in msg */ + pid_t rtm_pid; /* identify sender */ + int rtm_seq; /* for sender to identify action */ + int rtm_errno; /* why failed */ + int rtm_use; /* from rtentry */ + u_long rtm_inits; /* which metrics we are initializing */ + struct rt_metrics rtm_rmx; /* metrics themselves */ +}; + +#define RTM_VERSION 5 /* Up the ante and ignore older versions */ + +/* + * Message types. + */ +#define RTM_ADD 0x1 /* Add Route */ +#define RTM_DELETE 0x2 /* Delete Route */ +#define RTM_CHANGE 0x3 /* Change Metrics or flags */ +#define RTM_GET 0x4 /* Report Metrics */ +#define RTM_LOSING 0x5 /* Kernel Suspects Partitioning */ +#define RTM_REDIRECT 0x6 /* Told to use different route */ +#define RTM_MISS 0x7 /* Lookup failed on this address */ +#define RTM_LOCK 0x8 /* fix specified metrics */ +#define RTM_OLDADD 0x9 /* caused by SIOCADDRT */ +#define RTM_OLDDEL 0xa /* caused by SIOCDELRT */ +#define RTM_RESOLVE 0xb /* req to resolve dst to LL addr */ +#define RTM_NEWADDR 0xc /* address being added to iface */ +#define RTM_DELADDR 0xd /* address being removed from iface */ +#define RTM_IFINFO 0xe /* iface going up/down etc. */ +#define RTM_NEWMADDR 0xf /* mcast group membership being added to if */ +#define RTM_DELMADDR 0x10 /* mcast group membership being deleted */ + +/* + * Bitmask values for rtm_inits and rmx_locks. + */ +#define RTV_MTU 0x1 /* init or lock _mtu */ +#define RTV_HOPCOUNT 0x2 /* init or lock _hopcount */ +#define RTV_EXPIRE 0x4 /* init or lock _expire */ +#define RTV_RPIPE 0x8 /* init or lock _recvpipe */ +#define RTV_SPIPE 0x10 /* init or lock _sendpipe */ +#define RTV_SSTHRESH 0x20 /* init or lock _ssthresh */ +#define RTV_RTT 0x40 /* init or lock _rtt */ +#define RTV_RTTVAR 0x80 /* init or lock _rttvar */ + +/* + * Bitmask values for rtm_addrs. + */ +#define RTA_DST 0x1 /* destination sockaddr present */ +#define RTA_GATEWAY 0x2 /* gateway sockaddr present */ +#define RTA_NETMASK 0x4 /* netmask sockaddr present */ +#define RTA_GENMASK 0x8 /* cloning mask sockaddr present */ +#define RTA_IFP 0x10 /* interface name sockaddr present */ +#define RTA_IFA 0x20 /* interface addr sockaddr present */ +#define RTA_AUTHOR 0x40 /* sockaddr for author of redirect */ +#define RTA_BRD 0x80 /* for NEWADDR, broadcast or p-p dest addr */ + +/* + * Index offsets for sockaddr array for alternate internal encoding. + */ +#define RTAX_DST 0 /* destination sockaddr present */ +#define RTAX_GATEWAY 1 /* gateway sockaddr present */ +#define RTAX_NETMASK 2 /* netmask sockaddr present */ +#define RTAX_GENMASK 3 /* cloning mask sockaddr present */ +#define RTAX_IFP 4 /* interface name sockaddr present */ +#define RTAX_IFA 5 /* interface addr sockaddr present */ +#define RTAX_AUTHOR 6 /* sockaddr for author of redirect */ +#define RTAX_BRD 7 /* for NEWADDR, broadcast or p-p dest addr */ +#define RTAX_MAX 8 /* size of array to allocate */ + +struct rt_addrinfo { + int rti_addrs; + struct sockaddr *rti_info[RTAX_MAX]; +}; + +struct route_cb { + int ip_count; + int ip6_count; + int ipx_count; + int ns_count; + int iso_count; + int any_count; +}; + +#ifdef KERNEL +#define RTFREE(rt) \ + if ((rt)->rt_refcnt <= 1) \ + rtfree(rt); \ + else \ + (rt)->rt_refcnt--; +#define RTHOLD(rt) { \ + if (++(rt)->rt_refcnt <= 0) \ + panic("RTHOLD"); \ +} + +extern struct route_cb route_cb; +extern struct radix_node_head *rt_tables[AF_MAX+1]; + +struct ifmultiaddr; +struct proc; + +void route_init __P((void)); +void rt_ifmsg __P((struct ifnet *)); +void rt_missmsg __P((int, struct rt_addrinfo *, int, int)); +void rt_newaddrmsg __P((int, struct ifaddr *, int, struct rtentry *)); +void rt_newmaddrmsg __P((int, struct ifmultiaddr *)); +int rt_setgate __P((struct rtentry *, + struct sockaddr *, struct sockaddr *)); +void rtalloc __P((struct route *)); +void rtalloc_ign __P((struct route *, unsigned long)); +struct rtentry * + rtalloc1 __P((struct sockaddr *, int, unsigned long)); +void rtfree __P((struct rtentry *)); +int rtinit __P((struct ifaddr *, int, int)); +int rtioctl __P((int, caddr_t, struct proc *)); +void rtredirect __P((struct sockaddr *, struct sockaddr *, + struct sockaddr *, int, struct sockaddr *, struct rtentry **)); +int rtrequest __P((int, struct sockaddr *, + struct sockaddr *, struct sockaddr *, int, struct rtentry **)); +#endif + +#endif diff --git a/bsd/net/rtsock.c b/bsd/net/rtsock.c new file mode 100644 index 000000000..8ac37c98d --- /dev/null +++ b/bsd/net/rtsock.c @@ -0,0 +1,1095 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)rtsock.c 8.5 (Berkeley) 11/2/94 + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables"); + +static struct sockaddr route_dst = { 2, PF_ROUTE, }; +static struct sockaddr route_src = { 2, PF_ROUTE, }; +static struct sockaddr sa_zero = { sizeof(sa_zero), AF_INET, }; +static struct sockproto route_proto = { PF_ROUTE, }; + + + +struct walkarg { + int w_tmemsize; + int w_op, w_arg; + caddr_t w_tmem; + struct sysctl_req *w_req; +}; + +static struct mbuf * + rt_msg1 __P((int, struct rt_addrinfo *)); +static int rt_msg2 __P((int, + struct rt_addrinfo *, caddr_t, struct walkarg *)); +static int rt_xaddrs __P((caddr_t, caddr_t, struct rt_addrinfo *)); +static int sysctl_dumpentry __P((struct radix_node *rn, void *vw)); +static int sysctl_iflist __P((int af, struct walkarg *w)); +static int route_output __P((struct mbuf *, struct socket *)); +static void rt_setmetrics __P((u_long, struct rt_metrics *, struct rt_metrics *)); +static void rt_setif __P((struct rtentry *, struct sockaddr *, struct sockaddr *, + struct sockaddr *)); + +/* Sleazy use of local variables throughout file, warning!!!! */ +#define dst info.rti_info[RTAX_DST] +#define gate info.rti_info[RTAX_GATEWAY] +#define netmask info.rti_info[RTAX_NETMASK] +#define genmask info.rti_info[RTAX_GENMASK] +#define ifpaddr info.rti_info[RTAX_IFP] +#define ifaaddr info.rti_info[RTAX_IFA] +#define brdaddr info.rti_info[RTAX_BRD] + +/* + * It really doesn't make any sense at all for this code to share much + * with raw_usrreq.c, since its functionality is so restricted. XXX + */ +static int +rts_abort(struct socket *so) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_abort(so); + splx(s); + return error; +} + +/* pru_accept is EOPNOTSUPP */ + +static int +rts_attach(struct socket *so, int proto, struct proc *p) +{ + struct rawcb *rp; + int s, error; + + if (sotorawcb(so) != 0) + return EISCONN; /* XXX panic? */ + MALLOC(rp, struct rawcb *, sizeof *rp, M_PCB, M_WAITOK); /* XXX */ + if (rp == 0) + return ENOBUFS; + bzero(rp, sizeof *rp); + + /* + * The splnet() is necessary to block protocols from sending + * error notifications (like RTM_REDIRECT or RTM_LOSING) while + * this PCB is extant but incompletely initialized. + * Probably we should try to do more of this work beforehand and + * eliminate the spl. + */ + s = splnet(); + so->so_pcb = (caddr_t)rp; + error = raw_usrreqs.pru_attach(so, proto, p); + rp = sotorawcb(so); + if (error) { + splx(s); + FREE(rp, M_PCB); + return error; + } + switch(rp->rcb_proto.sp_protocol) { + case AF_INET: + route_cb.ip_count++; + break; + case AF_INET6: + route_cb.ip6_count++; + break; + case AF_IPX: + route_cb.ipx_count++; + break; + case AF_NS: + route_cb.ns_count++; + break; + case AF_ISO: + route_cb.iso_count++; + break; + } + rp->rcb_faddr = &route_src; + route_cb.any_count++; + soisconnected(so); + so->so_options |= SO_USELOOPBACK; + splx(s); + return 0; +} + +static int +rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_bind(so, nam, p); /* xxx just EINVAL */ + splx(s); + return error; +} + +static int +rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_connect(so, nam, p); /* XXX just EINVAL */ + splx(s); + return error; +} + +/* pru_connect2 is EOPNOTSUPP */ +/* pru_control is EOPNOTSUPP */ + +static int +rts_detach(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + int s, error; + + s = splnet(); + if (rp != 0) { + switch(rp->rcb_proto.sp_protocol) { + case AF_INET: + route_cb.ip_count--; + break; + case AF_INET6: + route_cb.ip6_count--; + break; + case AF_IPX: + route_cb.ipx_count--; + break; + case AF_NS: + route_cb.ns_count--; + break; + case AF_ISO: + route_cb.iso_count--; + break; + } + route_cb.any_count--; + } + error = raw_usrreqs.pru_detach(so); + splx(s); + return error; +} + +static int +rts_disconnect(struct socket *so) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_disconnect(so); + splx(s); + return error; +} + +/* pru_listen is EOPNOTSUPP */ + +static int +rts_peeraddr(struct socket *so, struct sockaddr **nam) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_peeraddr(so, nam); + splx(s); + return error; +} + +/* pru_rcvd is EOPNOTSUPP */ +/* pru_rcvoob is EOPNOTSUPP */ + +static int +rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, + struct mbuf *control, struct proc *p) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_send(so, flags, m, nam, control, p); + splx(s); + return error; +} + +/* pru_sense is null */ + +static int +rts_shutdown(struct socket *so) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_shutdown(so); + splx(s); + return error; +} + +static int +rts_sockaddr(struct socket *so, struct sockaddr **nam) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_sockaddr(so, nam); + splx(s); + return error; +} + +static struct pr_usrreqs route_usrreqs = { + rts_abort, pru_accept_notsupp, rts_attach, rts_bind, rts_connect, + pru_connect2_notsupp, pru_control_notsupp, rts_detach, rts_disconnect, + pru_listen_notsupp, rts_peeraddr, pru_rcvd_notsupp, pru_rcvoob_notsupp, + rts_send, pru_sense_null, rts_shutdown, rts_sockaddr, + sosend, soreceive, sopoll +}; + +/*ARGSUSED*/ +static int +route_output(m, so) + register struct mbuf *m; + struct socket *so; +{ + register struct rt_msghdr *rtm = 0; + register struct rtentry *rt = 0; + struct rtentry *saved_nrt = 0; + struct radix_node_head *rnh; + struct rt_addrinfo info; + int len, error = 0; + struct ifnet *ifp = 0; + struct ifaddr *ifa = 0; + struct proc *curproc = current_proc(); + +#define senderr(e) { error = e; goto flush;} + if (m == 0 || ((m->m_len < sizeof(long)) && + (m = m_pullup(m, sizeof(long))) == 0)) + return (ENOBUFS); + if ((m->m_flags & M_PKTHDR) == 0) + panic("route_output"); + len = m->m_pkthdr.len; + if (len < sizeof(*rtm) || + len != mtod(m, struct rt_msghdr *)->rtm_msglen) { + dst = 0; + senderr(EINVAL); + } + R_Malloc(rtm, struct rt_msghdr *, len); + if (rtm == 0) { + dst = 0; + senderr(ENOBUFS); + } + m_copydata(m, 0, len, (caddr_t)rtm); + if (rtm->rtm_version != RTM_VERSION) { + dst = 0; + senderr(EPROTONOSUPPORT); + } + rtm->rtm_pid = curproc->p_pid; + info.rti_addrs = rtm->rtm_addrs; + if (rt_xaddrs((caddr_t)(rtm + 1), len + (caddr_t)rtm, &info)) { + dst = 0; + senderr(EINVAL); + } + if (dst == 0 || (dst->sa_family >= AF_MAX) + || (gate != 0 && (gate->sa_family >= AF_MAX))) + senderr(EINVAL); + if (genmask) { + struct radix_node *t; + t = rn_addmask((caddr_t)genmask, 0, 1); + if (t && Bcmp(genmask, t->rn_key, *(u_char *)genmask) == 0) + genmask = (struct sockaddr *)(t->rn_key); + else + senderr(ENOBUFS); + } + switch (rtm->rtm_type) { + + case RTM_ADD: + if (gate == 0) + senderr(EINVAL); + error = rtrequest(RTM_ADD, dst, gate, netmask, + rtm->rtm_flags, &saved_nrt); + if (error == 0 && saved_nrt) { + /* + * If the route request specified an interface with + * IFA and/or IFP, we set the requested interface on + * the route with rt_setif. It would be much better + * to do this inside rtrequest, but that would + * require passing the desired interface, in some + * form, to rtrequest. Since rtrequest is called in + * so many places (roughly 40 in our source), adding + * a parameter is to much for us to swallow; this is + * something for the FreeBSD developers to tackle. + * Instead, we let rtrequest compute whatever + * interface it wants, then come in behind it and + * stick in the interface that we really want. This + * works reasonably well except when rtrequest can't + * figure out what interface to use (with + * ifa_withroute) and returns ENETUNREACH. Ideally + * it shouldn't matter if rtrequest can't figure out + * the interface if we're going to explicitly set it + * ourselves anyway. But practically we can't + * recover here because rtrequest will not do any of + * the work necessary to add the route if it can't + * find an interface. As long as there is a default + * route that leads to some interface, rtrequest will + * find an interface, so this problem should be + * rarely encountered. + * dwiggins@bbn.com + */ + + rt_setif(saved_nrt, ifpaddr, ifaaddr, gate); + rt_setmetrics(rtm->rtm_inits, + &rtm->rtm_rmx, &saved_nrt->rt_rmx); + saved_nrt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); + saved_nrt->rt_rmx.rmx_locks |= + (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks); + saved_nrt->rt_refcnt--; + saved_nrt->rt_genmask = genmask; + } + break; + + case RTM_DELETE: + error = rtrequest(RTM_DELETE, dst, gate, netmask, + rtm->rtm_flags, &saved_nrt); + if (error == 0) { + if ((rt = saved_nrt)) + rt->rt_refcnt++; + goto report; + } + break; + + case RTM_GET: + case RTM_CHANGE: + case RTM_LOCK: + if ((rnh = rt_tables[dst->sa_family]) == 0) { + senderr(EAFNOSUPPORT); + } else if (rt = (struct rtentry *) + rnh->rnh_lookup(dst, netmask, rnh)) + rt->rt_refcnt++; + else + senderr(ESRCH); + switch(rtm->rtm_type) { + + case RTM_GET: + report: + dst = rt_key(rt); + gate = rt->rt_gateway; + netmask = rt_mask(rt); + genmask = rt->rt_genmask; + if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) { + ifp = rt->rt_ifp; + if (ifp) { + ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr; + ifaaddr = rt->rt_ifa->ifa_addr; + rtm->rtm_index = ifp->if_index; + } else { + ifpaddr = 0; + ifaaddr = 0; + } + } + len = rt_msg2(rtm->rtm_type, &info, (caddr_t)0, + (struct walkarg *)0); + if (len > rtm->rtm_msglen) { + struct rt_msghdr *new_rtm; + R_Malloc(new_rtm, struct rt_msghdr *, len); + if (new_rtm == 0) + senderr(ENOBUFS); + Bcopy(rtm, new_rtm, rtm->rtm_msglen); + Free(rtm); rtm = new_rtm; + } + (void)rt_msg2(rtm->rtm_type, &info, (caddr_t)rtm, + (struct walkarg *)0); + rtm->rtm_flags = rt->rt_flags; + rtm->rtm_rmx = rt->rt_rmx; + rtm->rtm_addrs = info.rti_addrs; + break; + + case RTM_CHANGE: + if (gate && (error = rt_setgate(rt, rt_key(rt), gate))) + senderr(error); + + /* + * If they tried to change things but didn't specify + * the required gateway, then just use the old one. + * This can happen if the user tries to change the + * flags on the default route without changing the + * default gateway. Changing flags still doesn't work. + */ + if ((rt->rt_flags & RTF_GATEWAY) && !gate) + gate = rt->rt_gateway; + + rt_setif(rt, ifpaddr, ifaaddr, gate); + + rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, + &rt->rt_rmx); + if (genmask) + rt->rt_genmask = genmask; + /* + * Fall into + */ + case RTM_LOCK: + rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); + rt->rt_rmx.rmx_locks |= + (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks); + break; + } + break; + + default: + senderr(EOPNOTSUPP); + } + +flush: + if (rtm) { + if (error) + rtm->rtm_errno = error; + else + rtm->rtm_flags |= RTF_DONE; + } + if (rt) + rtfree(rt); + { + register struct rawcb *rp = 0; + /* + * Check to see if we don't want our own messages. + */ + if ((so->so_options & SO_USELOOPBACK) == 0) { + if (route_cb.any_count <= 1) { + if (rtm) + Free(rtm); + m_freem(m); + return (error); + } + /* There is another listener, so construct message */ + rp = sotorawcb(so); + } + if (rtm) { + m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm); + Free(rtm); + } + if (rp) + rp->rcb_proto.sp_family = 0; /* Avoid us */ + if (dst) + route_proto.sp_protocol = dst->sa_family; + raw_input(m, &route_proto, &route_src, &route_dst); + if (rp) + rp->rcb_proto.sp_family = PF_ROUTE; + } + return (error); +} + +static void +rt_setmetrics(which, in, out) + u_long which; + register struct rt_metrics *in, *out; +{ +#define metric(f, e) if (which & (f)) out->e = in->e; + metric(RTV_RPIPE, rmx_recvpipe); + metric(RTV_SPIPE, rmx_sendpipe); + metric(RTV_SSTHRESH, rmx_ssthresh); + metric(RTV_RTT, rmx_rtt); + metric(RTV_RTTVAR, rmx_rttvar); + metric(RTV_HOPCOUNT, rmx_hopcount); + metric(RTV_MTU, rmx_mtu); + metric(RTV_EXPIRE, rmx_expire); +#undef metric +} + +/* + * Set route's interface given ifpaddr, ifaaddr, and gateway. + */ +static void +rt_setif(rt, Ifpaddr, Ifaaddr, Gate) + struct rtentry *rt; + struct sockaddr *Ifpaddr, *Ifaaddr, *Gate; +{ + struct ifaddr *ifa = 0; + struct ifnet *ifp = 0; + + /* new gateway could require new ifaddr, ifp; + flags may also be different; ifp may be specified + by ll sockaddr when protocol address is ambiguous */ + if (Ifpaddr && (ifa = ifa_ifwithnet(Ifpaddr)) && + (ifp = ifa->ifa_ifp) && (Ifaaddr || Gate)) + ifa = ifaof_ifpforaddr(Ifaaddr ? Ifaaddr : Gate, + ifp); + else if (Ifpaddr && (ifp = if_withname(Ifpaddr)) ) { + ifa = Gate ? ifaof_ifpforaddr(Gate, ifp) : + TAILQ_FIRST(&ifp->if_addrhead); + } + else if ((Ifaaddr && (ifa = ifa_ifwithaddr(Ifaaddr))) || + (Gate && (ifa = ifa_ifwithroute(rt->rt_flags, + rt_key(rt), Gate)))) + ifp = ifa->ifa_ifp; + if (ifa) { + register struct ifaddr *oifa = rt->rt_ifa; + if (oifa != ifa) { + if (oifa && oifa->ifa_rtrequest) + oifa->ifa_rtrequest(RTM_DELETE, + rt, Gate); + IFAFREE(rt->rt_ifa); + rt->rt_ifa = ifa; + ifa->ifa_refcnt++; + rt->rt_ifp = ifp; + rt->rt_rmx.rmx_mtu = ifp->if_mtu; + if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest) + rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, Gate); + } else + goto call_ifareq; + return; + } + call_ifareq: + /* XXX: to reset gateway to correct value, at RTM_CHANGE */ + if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest) + rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, Gate); +} + + +#define ROUNDUP(a) \ + ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) +#define ADVANCE(x, n) (x += ROUNDUP((n)->sa_len)) + + +/* + * Extract the addresses of the passed sockaddrs. + * Do a little sanity checking so as to avoid bad memory references. + * This data is derived straight from userland. + */ +static int +rt_xaddrs(cp, cplim, rtinfo) + register caddr_t cp, cplim; + register struct rt_addrinfo *rtinfo; +{ + register struct sockaddr *sa; + register int i; + + bzero(rtinfo->rti_info, sizeof(rtinfo->rti_info)); + for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) { + if ((rtinfo->rti_addrs & (1 << i)) == 0) + continue; + sa = (struct sockaddr *)cp; + /* + * It won't fit. + */ + if ( (cp + sa->sa_len) > cplim ) { + return (EINVAL); + } + + /* + * there are no more.. quit now + * If there are more bits, they are in error. + * I've seen this. route(1) can evidently generate these. + * This causes kernel to core dump. + * for compatibility, If we see this, point to a safe address. + */ + if (sa->sa_len == 0) { + rtinfo->rti_info[i] = &sa_zero; + return (0); /* should be EINVAL but for compat */ + } + + /* accept it */ + rtinfo->rti_info[i] = sa; + ADVANCE(cp, sa); + } + return (0); +} + +static struct mbuf * +rt_msg1(type, rtinfo) + int type; + register struct rt_addrinfo *rtinfo; +{ + register struct rt_msghdr *rtm; + register struct mbuf *m; + register int i; + register struct sockaddr *sa; + int len, dlen; + + m = m_gethdr(M_DONTWAIT, MT_DATA); + if (m == 0) + return (m); + switch (type) { + + case RTM_DELADDR: + case RTM_NEWADDR: + len = sizeof(struct ifa_msghdr); + break; + + case RTM_DELMADDR: + case RTM_NEWMADDR: + len = sizeof(struct ifma_msghdr); + break; + + case RTM_IFINFO: + len = sizeof(struct if_msghdr); + break; + + default: + len = sizeof(struct rt_msghdr); + } + if (len > MHLEN) + panic("rt_msg1"); + m->m_pkthdr.len = m->m_len = len; + m->m_pkthdr.rcvif = 0; + rtm = mtod(m, struct rt_msghdr *); + bzero((caddr_t)rtm, len); + for (i = 0; i < RTAX_MAX; i++) { + if ((sa = rtinfo->rti_info[i]) == NULL) + continue; + rtinfo->rti_addrs |= (1 << i); + dlen = ROUNDUP(sa->sa_len); + m_copyback(m, len, dlen, (caddr_t)sa); + len += dlen; + } + if (m->m_pkthdr.len != len) { + m_freem(m); + return (NULL); + } + rtm->rtm_msglen = len; + rtm->rtm_version = RTM_VERSION; + rtm->rtm_type = type; + return (m); +} + +static int +rt_msg2(type, rtinfo, cp, w) + int type; + register struct rt_addrinfo *rtinfo; + caddr_t cp; + struct walkarg *w; +{ + register int i; + int len, dlen, second_time = 0; + caddr_t cp0; + + rtinfo->rti_addrs = 0; +again: + switch (type) { + + case RTM_DELADDR: + case RTM_NEWADDR: + len = sizeof(struct ifa_msghdr); + break; + + case RTM_IFINFO: + len = sizeof(struct if_msghdr); + break; + + default: + len = sizeof(struct rt_msghdr); + } + cp0 = cp; + if (cp0) + cp += len; + for (i = 0; i < RTAX_MAX; i++) { + register struct sockaddr *sa; + + if ((sa = rtinfo->rti_info[i]) == 0) + continue; + rtinfo->rti_addrs |= (1 << i); + dlen = ROUNDUP(sa->sa_len); + if (cp) { + bcopy((caddr_t)sa, cp, (unsigned)dlen); + cp += dlen; + } + len += dlen; + } + if (cp == 0 && w != NULL && !second_time) { + register struct walkarg *rw = w; + + if (rw->w_req) { + if (rw->w_tmemsize < len) { + if (rw->w_tmem) + FREE(rw->w_tmem, M_RTABLE); + rw->w_tmem = (caddr_t) + _MALLOC(len, M_RTABLE, M_NOWAIT); + if (rw->w_tmem) + rw->w_tmemsize = len; + } + if (rw->w_tmem) { + cp = rw->w_tmem; + second_time = 1; + goto again; + } + } + } + if (cp) { + register struct rt_msghdr *rtm = (struct rt_msghdr *)cp0; + + rtm->rtm_version = RTM_VERSION; + rtm->rtm_type = type; + rtm->rtm_msglen = len; + } + return (len); +} + +/* + * This routine is called to generate a message from the routing + * socket indicating that a redirect has occured, a routing lookup + * has failed, or that a protocol has detected timeouts to a particular + * destination. + */ +void +rt_missmsg(type, rtinfo, flags, error) + int type, flags, error; + register struct rt_addrinfo *rtinfo; +{ + register struct rt_msghdr *rtm; + register struct mbuf *m; + struct sockaddr *sa = rtinfo->rti_info[RTAX_DST]; + + if (route_cb.any_count == 0) + return; + m = rt_msg1(type, rtinfo); + if (m == 0) + return; + rtm = mtod(m, struct rt_msghdr *); + rtm->rtm_flags = RTF_DONE | flags; + rtm->rtm_errno = error; + rtm->rtm_addrs = rtinfo->rti_addrs; + route_proto.sp_protocol = sa ? sa->sa_family : 0; + raw_input(m, &route_proto, &route_src, &route_dst); +} + +/* + * This routine is called to generate a message from the routing + * socket indicating that the status of a network interface has changed. + */ +void +rt_ifmsg(ifp) + register struct ifnet *ifp; +{ + register struct if_msghdr *ifm; + struct mbuf *m; + struct rt_addrinfo info; + + if (route_cb.any_count == 0) + return; + bzero((caddr_t)&info, sizeof(info)); + m = rt_msg1(RTM_IFINFO, &info); + if (m == 0) + return; + ifm = mtod(m, struct if_msghdr *); + ifm->ifm_index = ifp->if_index; + ifm->ifm_flags = (u_short)ifp->if_flags; + ifm->ifm_data = ifp->if_data; + ifm->ifm_addrs = 0; + route_proto.sp_protocol = 0; + raw_input(m, &route_proto, &route_src, &route_dst); +} + +/* + * This is called to generate messages from the routing socket + * indicating a network interface has had addresses associated with it. + * if we ever reverse the logic and replace messages TO the routing + * socket indicate a request to configure interfaces, then it will + * be unnecessary as the routing socket will automatically generate + * copies of it. + */ +void +rt_newaddrmsg(cmd, ifa, error, rt) + int cmd, error; + register struct ifaddr *ifa; + register struct rtentry *rt; +{ + struct rt_addrinfo info; + struct sockaddr *sa = 0; + int pass; + struct mbuf *m = 0; + struct ifnet *ifp = ifa->ifa_ifp; + + if (route_cb.any_count == 0) + return; + for (pass = 1; pass < 3; pass++) { + bzero((caddr_t)&info, sizeof(info)); + if ((cmd == RTM_ADD && pass == 1) || + (cmd == RTM_DELETE && pass == 2)) { + register struct ifa_msghdr *ifam; + int ncmd = cmd == RTM_ADD ? RTM_NEWADDR : RTM_DELADDR; + + ifaaddr = sa = ifa->ifa_addr; + ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr; + netmask = ifa->ifa_netmask; + brdaddr = ifa->ifa_dstaddr; + if ((m = rt_msg1(ncmd, &info)) == NULL) + continue; + ifam = mtod(m, struct ifa_msghdr *); + ifam->ifam_index = ifp->if_index; + ifam->ifam_metric = ifa->ifa_metric; + ifam->ifam_flags = ifa->ifa_flags; + ifam->ifam_addrs = info.rti_addrs; + } + if ((cmd == RTM_ADD && pass == 2) || + (cmd == RTM_DELETE && pass == 1)) { + register struct rt_msghdr *rtm; + + if (rt == 0) + continue; + netmask = rt_mask(rt); + dst = sa = rt_key(rt); + gate = rt->rt_gateway; + if ((m = rt_msg1(cmd, &info)) == NULL) + continue; + rtm = mtod(m, struct rt_msghdr *); + rtm->rtm_index = ifp->if_index; + rtm->rtm_flags |= rt->rt_flags; + rtm->rtm_errno = error; + rtm->rtm_addrs = info.rti_addrs; + } + route_proto.sp_protocol = sa ? sa->sa_family : 0; + raw_input(m, &route_proto, &route_src, &route_dst); + } +} + +/* + * This is the analogue to the rt_newaddrmsg which performs the same + * function but for multicast group memberhips. This is easier since + * there is no route state to worry about. + */ +void +rt_newmaddrmsg(cmd, ifma) + int cmd; + struct ifmultiaddr *ifma; +{ + struct rt_addrinfo info; + struct mbuf *m = 0; + struct ifnet *ifp = ifma->ifma_ifp; + struct ifma_msghdr *ifmam; + + if (route_cb.any_count == 0) + return; + + bzero((caddr_t)&info, sizeof(info)); + ifaaddr = ifma->ifma_addr; + ifpaddr = ifp->if_addrhead.tqh_first->ifa_addr; + /* + * If a link-layer address is present, present it as a ``gateway'' + * (similarly to how ARP entries, e.g., are presented). + */ + gate = ifma->ifma_lladdr; + if ((m = rt_msg1(cmd, &info)) == NULL) + return; + ifmam = mtod(m, struct ifma_msghdr *); + ifmam->ifmam_index = ifp->if_index; + ifmam->ifmam_addrs = info.rti_addrs; + route_proto.sp_protocol = ifma->ifma_addr->sa_family; + raw_input(m, &route_proto, &route_src, &route_dst); +} + +/* + * This is used in dumping the kernel table via sysctl(). + */ +int +sysctl_dumpentry(rn, vw) + struct radix_node *rn; + void *vw; +{ + register struct walkarg *w = vw; + register struct rtentry *rt = (struct rtentry *)rn; + int error = 0, size; + struct rt_addrinfo info; + + if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg)) + return 0; + bzero((caddr_t)&info, sizeof(info)); + dst = rt_key(rt); + gate = rt->rt_gateway; + netmask = rt_mask(rt); + genmask = rt->rt_genmask; + size = rt_msg2(RTM_GET, &info, 0, w); + if (w->w_req && w->w_tmem) { + register struct rt_msghdr *rtm = (struct rt_msghdr *)w->w_tmem; + + rtm->rtm_flags = rt->rt_flags; + rtm->rtm_use = rt->rt_use; + rtm->rtm_rmx = rt->rt_rmx; + rtm->rtm_index = rt->rt_ifp->if_index; + rtm->rtm_errno = rtm->rtm_pid = rtm->rtm_seq = 0; + rtm->rtm_addrs = info.rti_addrs; + error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size); + return (error); + } + return (error); +} + +int +sysctl_iflist(af, w) + int af; + register struct walkarg *w; +{ + register struct ifnet *ifp; + register struct ifaddr *ifa; + struct rt_addrinfo info; + int len, error = 0; + + bzero((caddr_t)&info, sizeof(info)); + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_link.tqe_next) { + if (w->w_arg && w->w_arg != ifp->if_index) + continue; + ifa = ifp->if_addrhead.tqh_first; + ifpaddr = ifa->ifa_addr; + len = rt_msg2(RTM_IFINFO, &info, (caddr_t)0, w); + ifpaddr = 0; + if (w->w_req && w->w_tmem) { + register struct if_msghdr *ifm; + + ifm = (struct if_msghdr *)w->w_tmem; + ifm->ifm_index = ifp->if_index; + ifm->ifm_flags = (u_short)ifp->if_flags; + ifm->ifm_data = ifp->if_data; + ifm->ifm_addrs = info.rti_addrs; + error = SYSCTL_OUT(w->w_req,(caddr_t)ifm, len); + if (error) + return (error); + } + while ((ifa = ifa->ifa_link.tqe_next) != 0) { + if (af && af != ifa->ifa_addr->sa_family) + continue; + ifaaddr = ifa->ifa_addr; + netmask = ifa->ifa_netmask; + brdaddr = ifa->ifa_dstaddr; + len = rt_msg2(RTM_NEWADDR, &info, 0, w); + if (w->w_req && w->w_tmem) { + register struct ifa_msghdr *ifam; + + ifam = (struct ifa_msghdr *)w->w_tmem; + ifam->ifam_index = ifa->ifa_ifp->if_index; + ifam->ifam_flags = ifa->ifa_flags; + ifam->ifam_metric = ifa->ifa_metric; + ifam->ifam_addrs = info.rti_addrs; + error = SYSCTL_OUT(w->w_req, w->w_tmem, len); + if (error) + return (error); + } + } + ifaaddr = netmask = brdaddr = 0; + } + return (0); +} + + +static int +sysctl_rtsock SYSCTL_HANDLER_ARGS +{ + int *name = (int *)arg1; + u_int namelen = arg2; + register struct radix_node_head *rnh; + int i, s, error = EINVAL; + u_char af; + struct walkarg w; + + name ++; + namelen--; + if (req->newptr) + return (EPERM); + if (namelen != 3) + return (EINVAL); + af = name[0]; + Bzero(&w, sizeof(w)); + w.w_op = name[1]; + w.w_arg = name[2]; + w.w_req = req; + + s = splnet(); + switch (w.w_op) { + + case NET_RT_DUMP: + case NET_RT_FLAGS: + for (i = 1; i <= AF_MAX; i++) + if ((rnh = rt_tables[i]) && (af == 0 || af == i) && + (error = rnh->rnh_walktree(rnh, + sysctl_dumpentry, &w))) + break; + break; + + case NET_RT_IFLIST: + error = sysctl_iflist(af, &w); + } + splx(s); + if (w.w_tmem) + FREE(w.w_tmem, M_RTABLE); + return (error); +} + +SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD, sysctl_rtsock, ""); + + + +/* + * Definitions of protocols supported in the ROUTE domain. + */ + +struct domain routedomain; /* or at least forward */ + + +static struct protosw routesw[] = { +{ SOCK_RAW, &routedomain, 0, PR_ATOMIC|PR_ADDR, + 0, route_output, raw_ctlinput, 0, + 0, + raw_init, 0, 0, 0, + 0, &route_usrreqs, 0, 0 +} +}; + +struct domain routedomain = + { PF_ROUTE, "route", route_init, 0, 0, + routesw}; + +DOMAIN_SET(route); + +#if MIP6 +#include +#endif diff --git a/bsd/net/rtsock_mip.c b/bsd/net/rtsock_mip.c new file mode 100644 index 000000000..6a6662206 --- /dev/null +++ b/bsd/net/rtsock_mip.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME$ */ + +/* to be included from net/rtsock.c - ugly but necessary for portability */ +/* + * Mobile IPv6 addition. + * Send a routing message to all routing socket listeners. + */ +void +rt_mip6msg(cmd, ifp, rt) + int cmd; + struct ifnet *ifp; + register struct rtentry *rt; +{ + struct rt_addrinfo info; + struct sockaddr *sa = 0; + struct mbuf *m = 0; + register struct rt_msghdr *rtm; + +#ifdef MIP6_DEBUG + printf("route_cb.any_count = %d\n", route_cb.any_count); +#endif + bzero((caddr_t)&info, sizeof(info)); + + if (rt == 0 || ifp == 0) + return; + netmask = rt_mask(rt); + dst = sa = rt_key(rt); + gate = rt->rt_gateway; + genmask = rt->rt_genmask; + if ((m = rt_msg1(cmd, &info)) == NULL) { +#ifdef MIP6_DEBUG + printf("failure... \n"); +#endif + return; + } + rtm = mtod(m, struct rt_msghdr *); + rtm->rtm_index = ifp->if_index; + rtm->rtm_flags |= rt->rt_flags; + rtm->rtm_rmx = rt->rt_rmx; + rtm->rtm_addrs = info.rti_addrs; + rtm->rtm_flags |= RTF_DONE; + + route_proto.sp_protocol = sa ? sa->sa_family : 0; +#ifdef __bsdi__ + raw_input(m, NULL, &route_proto, &route_src, &route_dst); +#else + raw_input(m, &route_proto, &route_src, &route_dst); +#endif +} diff --git a/bsd/net/slcompress.c b/bsd/net/slcompress.c new file mode 100644 index 000000000..7875c4e9d --- /dev/null +++ b/bsd/net/slcompress.c @@ -0,0 +1,623 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)slcompress.c 8.2 (Berkeley) 4/16/94 + */ + +/* + * Routines to compress and uncompess tcp packets (for transmission + * over low speed serial lines. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * - Initial distribution. + * + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#ifndef SL_NO_STATS +#define INCR(counter) ++comp->counter; +#else +#define INCR(counter) +#endif + +#define BCMP(p1, p2, n) bcmp((char *)(p1), (char *)(p2), (int)(n)) +#define BCOPY(p1, p2, n) bcopy((char *)(p1), (char *)(p2), (int)(n)) +#ifndef KERNEL +#define ovbcopy bcopy +#endif + +void +sl_compress_init(comp, max_state) + struct slcompress *comp; + int max_state; +{ + register u_int i; + register struct cstate *tstate = comp->tstate; + + if (max_state == -1) { + max_state = MAX_STATES - 1; + bzero((char *)comp, sizeof(*comp)); + } else { + /* Don't reset statistics */ + bzero((char *)comp->tstate, sizeof(comp->tstate)); + bzero((char *)comp->rstate, sizeof(comp->rstate)); + } + for (i = max_state; i > 0; --i) { + tstate[i].cs_id = i; + tstate[i].cs_next = &tstate[i - 1]; + } + tstate[0].cs_next = &tstate[max_state]; + tstate[0].cs_id = 0; + comp->last_cs = &tstate[0]; + comp->last_recv = 255; + comp->last_xmit = 255; + comp->flags = SLF_TOSS; +} + + +/* ENCODE encodes a number that is known to be non-zero. ENCODEZ + * checks for zero (since zero has to be encoded in the long, 3 byte + * form). + */ +#define ENCODE(n) { \ + if ((u_int16_t)(n) >= 256) { \ + *cp++ = 0; \ + cp[1] = (n); \ + cp[0] = (n) >> 8; \ + cp += 2; \ + } else { \ + *cp++ = (n); \ + } \ +} +#define ENCODEZ(n) { \ + if ((u_int16_t)(n) >= 256 || (u_int16_t)(n) == 0) { \ + *cp++ = 0; \ + cp[1] = (n); \ + cp[0] = (n) >> 8; \ + cp += 2; \ + } else { \ + *cp++ = (n); \ + } \ +} + +#define DECODEL(f) { \ + if (*cp == 0) {\ + (f) = htonl(ntohl(f) + ((cp[1] << 8) | cp[2])); \ + cp += 3; \ + } else { \ + (f) = htonl(ntohl(f) + (u_int32_t)*cp++); \ + } \ +} + +#define DECODES(f) { \ + if (*cp == 0) {\ + (f) = htons(ntohs(f) + ((cp[1] << 8) | cp[2])); \ + cp += 3; \ + } else { \ + (f) = htons(ntohs(f) + (u_int32_t)*cp++); \ + } \ +} + +#define DECODEU(f) { \ + if (*cp == 0) {\ + (f) = htons((cp[1] << 8) | cp[2]); \ + cp += 3; \ + } else { \ + (f) = htons((u_int32_t)*cp++); \ + } \ +} + +u_int +sl_compress_tcp(m, ip, comp, compress_cid) + struct mbuf *m; + register struct ip *ip; + struct slcompress *comp; + int compress_cid; +{ + register struct cstate *cs = comp->last_cs->cs_next; + register u_int hlen = ip->ip_hl; + register struct tcphdr *oth; + register struct tcphdr *th; + register u_int deltaS, deltaA; + register u_int changes = 0; + u_char new_seq[16]; + register u_char *cp = new_seq; + + /* + * Bail if this is an IP fragment or if the TCP packet isn't + * `compressible' (i.e., ACK isn't set or some other control bit is + * set). (We assume that the caller has already made sure the + * packet is IP proto TCP). + */ + if ((ip->ip_off & htons(0x3fff)) || m->m_len < 40) + return (TYPE_IP); + + th = (struct tcphdr *)&((int32_t *)ip)[hlen]; + if ((th->th_flags & (TH_SYN|TH_FIN|TH_RST|TH_ACK)) != TH_ACK) + return (TYPE_IP); + /* + * Packet is compressible -- we're going to send either a + * COMPRESSED_TCP or UNCOMPRESSED_TCP packet. Either way we need + * to locate (or create) the connection state. Special case the + * most recently used connection since it's most likely to be used + * again & we don't have to do any reordering if it's used. + */ + INCR(sls_packets) + if (ip->ip_src.s_addr != cs->cs_ip.ip_src.s_addr || + ip->ip_dst.s_addr != cs->cs_ip.ip_dst.s_addr || + *(int32_t *)th != ((int32_t *)&cs->cs_ip)[cs->cs_ip.ip_hl]) { + /* + * Wasn't the first -- search for it. + * + * States are kept in a circularly linked list with + * last_cs pointing to the end of the list. The + * list is kept in lru order by moving a state to the + * head of the list whenever it is referenced. Since + * the list is short and, empirically, the connection + * we want is almost always near the front, we locate + * states via linear search. If we don't find a state + * for the datagram, the oldest state is (re-)used. + */ + register struct cstate *lcs; + register struct cstate *lastcs = comp->last_cs; + + do { + lcs = cs; cs = cs->cs_next; + INCR(sls_searches) + if (ip->ip_src.s_addr == cs->cs_ip.ip_src.s_addr + && ip->ip_dst.s_addr == cs->cs_ip.ip_dst.s_addr + && *(int32_t *)th == + ((int32_t *)&cs->cs_ip)[cs->cs_ip.ip_hl]) + goto found; + } while (cs != lastcs); + + /* + * Didn't find it -- re-use oldest cstate. Send an + * uncompressed packet that tells the other side what + * connection number we're using for this conversation. + * Note that since the state list is circular, the oldest + * state points to the newest and we only need to set + * last_cs to update the lru linkage. + */ + INCR(sls_misses) + comp->last_cs = lcs; + hlen += th->th_off; + hlen <<= 2; + if (hlen > m->m_len) + return TYPE_IP; + goto uncompressed; + + found: + /* + * Found it -- move to the front on the connection list. + */ + if (cs == lastcs) + comp->last_cs = lcs; + else { + lcs->cs_next = cs->cs_next; + cs->cs_next = lastcs->cs_next; + lastcs->cs_next = cs; + } + } + + /* + * Make sure that only what we expect to change changed. The first + * line of the `if' checks the IP protocol version, header length & + * type of service. The 2nd line checks the "Don't fragment" bit. + * The 3rd line checks the time-to-live and protocol (the protocol + * check is unnecessary but costless). The 4th line checks the TCP + * header length. The 5th line checks IP options, if any. The 6th + * line checks TCP options, if any. If any of these things are + * different between the previous & current datagram, we send the + * current datagram `uncompressed'. + */ + oth = (struct tcphdr *)&((int32_t *)&cs->cs_ip)[hlen]; + deltaS = hlen; + hlen += th->th_off; + hlen <<= 2; + if (hlen > m->m_len) + return TYPE_IP; + + if (((u_int16_t *)ip)[0] != ((u_int16_t *)&cs->cs_ip)[0] || + ((u_int16_t *)ip)[3] != ((u_int16_t *)&cs->cs_ip)[3] || + ((u_int16_t *)ip)[4] != ((u_int16_t *)&cs->cs_ip)[4] || + th->th_off != oth->th_off || + (deltaS > 5 && + BCMP(ip + 1, &cs->cs_ip + 1, (deltaS - 5) << 2)) || + (th->th_off > 5 && + BCMP(th + 1, oth + 1, (th->th_off - 5) << 2))) + goto uncompressed; + + /* + * Figure out which of the changing fields changed. The + * receiver expects changes in the order: urgent, window, + * ack, seq (the order minimizes the number of temporaries + * needed in this section of code). + */ + if (th->th_flags & TH_URG) { + deltaS = ntohs(th->th_urp); + ENCODEZ(deltaS); + changes |= NEW_U; + } else if (th->th_urp != oth->th_urp) + /* argh! URG not set but urp changed -- a sensible + * implementation should never do this but RFC793 + * doesn't prohibit the change so we have to deal + * with it. */ + goto uncompressed; + + deltaS = (u_int16_t)(ntohs(th->th_win) - ntohs(oth->th_win)); + if (deltaS) { + ENCODE(deltaS); + changes |= NEW_W; + } + + deltaA = ntohl(th->th_ack) - ntohl(oth->th_ack); + if (deltaA) { + if (deltaA > 0xffff) + goto uncompressed; + ENCODE(deltaA); + changes |= NEW_A; + } + + deltaS = ntohl(th->th_seq) - ntohl(oth->th_seq); + if (deltaS) { + if (deltaS > 0xffff) + goto uncompressed; + ENCODE(deltaS); + changes |= NEW_S; + } + + switch(changes) { + + case 0: + /* + * Nothing changed. If this packet contains data and the + * last one didn't, this is probably a data packet following + * an ack (normal on an interactive connection) and we send + * it compressed. Otherwise it's probably a retransmit, + * retransmitted ack or window probe. Send it uncompressed + * in case the other side missed the compressed version. + */ + if (ip->ip_len != cs->cs_ip.ip_len && + ntohs(cs->cs_ip.ip_len) == hlen) + break; + + /* (fall through) */ + + case SPECIAL_I: + case SPECIAL_D: + /* + * actual changes match one of our special case encodings -- + * send packet uncompressed. + */ + goto uncompressed; + + case NEW_S|NEW_A: + if (deltaS == deltaA && + deltaS == ntohs(cs->cs_ip.ip_len) - hlen) { + /* special case for echoed terminal traffic */ + changes = SPECIAL_I; + cp = new_seq; + } + break; + + case NEW_S: + if (deltaS == ntohs(cs->cs_ip.ip_len) - hlen) { + /* special case for data xfer */ + changes = SPECIAL_D; + cp = new_seq; + } + break; + } + + deltaS = ntohs(ip->ip_id) - ntohs(cs->cs_ip.ip_id); + if (deltaS != 1) { + ENCODEZ(deltaS); + changes |= NEW_I; + } + if (th->th_flags & TH_PUSH) + changes |= TCP_PUSH_BIT; + /* + * Grab the cksum before we overwrite it below. Then update our + * state with this packet's header. + */ + deltaA = ntohs(th->th_sum); + BCOPY(ip, &cs->cs_ip, hlen); + + /* + * We want to use the original packet as our compressed packet. + * (cp - new_seq) is the number of bytes we need for compressed + * sequence numbers. In addition we need one byte for the change + * mask, one for the connection id and two for the tcp checksum. + * So, (cp - new_seq) + 4 bytes of header are needed. hlen is how + * many bytes of the original packet to toss so subtract the two to + * get the new packet size. + */ + deltaS = cp - new_seq; + cp = (u_char *)ip; + if (compress_cid == 0 || comp->last_xmit != cs->cs_id) { + comp->last_xmit = cs->cs_id; + hlen -= deltaS + 4; + cp += hlen; + *cp++ = changes | NEW_C; + *cp++ = cs->cs_id; + } else { + hlen -= deltaS + 3; + cp += hlen; + *cp++ = changes; + } + m->m_len -= hlen; + m->m_data += hlen; + *cp++ = deltaA >> 8; + *cp++ = deltaA; + BCOPY(new_seq, cp, deltaS); + INCR(sls_compressed) + return (TYPE_COMPRESSED_TCP); + + /* + * Update connection state cs & send uncompressed packet ('uncompressed' + * means a regular ip/tcp packet but with the 'conversation id' we hope + * to use on future compressed packets in the protocol field). + */ +uncompressed: + BCOPY(ip, &cs->cs_ip, hlen); + ip->ip_p = cs->cs_id; + comp->last_xmit = cs->cs_id; + return (TYPE_UNCOMPRESSED_TCP); +} + + +int +sl_uncompress_tcp(bufp, len, type, comp) + u_char **bufp; + int len; + u_int type; + struct slcompress *comp; +{ + u_char *hdr, *cp; + int hlen, vjlen; + + cp = bufp? *bufp: NULL; + vjlen = sl_uncompress_tcp_core(cp, len, len, type, comp, &hdr, &hlen); + if (vjlen < 0) + return (0); /* error */ + if (vjlen == 0) + return (len); /* was uncompressed already */ + + cp += vjlen; + len -= vjlen; + + /* + * At this point, cp points to the first byte of data in the + * packet. If we're not aligned on a 4-byte boundary, copy the + * data down so the ip & tcp headers will be aligned. Then back up + * cp by the tcp/ip header length to make room for the reconstructed + * header (we assume the packet we were handed has enough space to + * prepend 128 bytes of header). + */ + if ((intptr_t)cp & 3) { + if (len > 0) + (void) ovbcopy(cp, (caddr_t)((intptr_t)cp &~ 3), len); + cp = (u_char *)((intptr_t)cp &~ 3); + } + cp -= hlen; + len += hlen; + BCOPY(hdr, cp, hlen); + + *bufp = cp; + return (len); +} + +/* + * Uncompress a packet of total length total_len. The first buflen + * bytes are at buf; this must include the entire (compressed or + * uncompressed) TCP/IP header. This procedure returns the length + * of the VJ header, with a pointer to the uncompressed IP header + * in *hdrp and its length in *hlenp. + */ +int +sl_uncompress_tcp_core(buf, buflen, total_len, type, comp, hdrp, hlenp) + u_char *buf; + int buflen, total_len; + u_int type; + struct slcompress *comp; + u_char **hdrp; + u_int *hlenp; +{ + register u_char *cp; + register u_int hlen, changes; + register struct tcphdr *th; + register struct cstate *cs; + register struct ip *ip; + register u_int16_t *bp; + register u_int vjlen; + + switch (type) { + + case TYPE_UNCOMPRESSED_TCP: + ip = (struct ip *) buf; + if (ip->ip_p >= MAX_STATES) + goto bad; + cs = &comp->rstate[comp->last_recv = ip->ip_p]; + comp->flags &=~ SLF_TOSS; + ip->ip_p = IPPROTO_TCP; + /* + * Calculate the size of the TCP/IP header and make sure that + * we don't overflow the space we have available for it. + */ + hlen = ip->ip_hl << 2; + if (hlen + sizeof(struct tcphdr) > buflen) + goto bad; + hlen += ((struct tcphdr *)&((char *)ip)[hlen])->th_off << 2; + if (hlen > MAX_HDR || hlen > buflen) + goto bad; + BCOPY(ip, &cs->cs_ip, hlen); + cs->cs_hlen = hlen; + INCR(sls_uncompressedin) + *hdrp = (u_char *) &cs->cs_ip; + *hlenp = hlen; + return (0); + + default: + goto bad; + + case TYPE_COMPRESSED_TCP: + break; + } + /* We've got a compressed packet. */ + INCR(sls_compressedin) + cp = buf; + changes = *cp++; + if (changes & NEW_C) { + /* Make sure the state index is in range, then grab the state. + * If we have a good state index, clear the 'discard' flag. */ + if (*cp >= MAX_STATES) + goto bad; + + comp->flags &=~ SLF_TOSS; + comp->last_recv = *cp++; + } else { + /* this packet has an implicit state index. If we've + * had a line error since the last time we got an + * explicit state index, we have to toss the packet. */ + if (comp->flags & SLF_TOSS) { + INCR(sls_tossed) + return (-1); + } + } + cs = &comp->rstate[comp->last_recv]; + hlen = cs->cs_ip.ip_hl << 2; + th = (struct tcphdr *)&((u_char *)&cs->cs_ip)[hlen]; + th->th_sum = htons((*cp << 8) | cp[1]); + cp += 2; + if (changes & TCP_PUSH_BIT) + th->th_flags |= TH_PUSH; + else + th->th_flags &=~ TH_PUSH; + + switch (changes & SPECIALS_MASK) { + case SPECIAL_I: + { + register u_int i = ntohs(cs->cs_ip.ip_len) - cs->cs_hlen; + th->th_ack = htonl(ntohl(th->th_ack) + i); + th->th_seq = htonl(ntohl(th->th_seq) + i); + } + break; + + case SPECIAL_D: + th->th_seq = htonl(ntohl(th->th_seq) + ntohs(cs->cs_ip.ip_len) + - cs->cs_hlen); + break; + + default: + if (changes & NEW_U) { + th->th_flags |= TH_URG; + DECODEU(th->th_urp) + } else + th->th_flags &=~ TH_URG; + if (changes & NEW_W) + DECODES(th->th_win) + if (changes & NEW_A) + DECODEL(th->th_ack) + if (changes & NEW_S) + DECODEL(th->th_seq) + break; + } + if (changes & NEW_I) { + DECODES(cs->cs_ip.ip_id) + } else + cs->cs_ip.ip_id = htons(ntohs(cs->cs_ip.ip_id) + 1); + + /* + * At this point, cp points to the first byte of data in the + * packet. Fill in the IP total length and update the IP + * header checksum. + */ + vjlen = cp - buf; + buflen -= vjlen; + if (buflen < 0) + /* we must have dropped some characters (crc should detect + * this but the old slip framing won't) */ + goto bad; + + total_len += cs->cs_hlen - vjlen; + cs->cs_ip.ip_len = htons(total_len); + + /* recompute the ip header checksum */ + bp = (u_int16_t *) &cs->cs_ip; + cs->cs_ip.ip_sum = 0; + for (changes = 0; hlen > 0; hlen -= 2) + changes += *bp++; + changes = (changes & 0xffff) + (changes >> 16); + changes = (changes & 0xffff) + (changes >> 16); + cs->cs_ip.ip_sum = ~ changes; + + *hdrp = (u_char *) &cs->cs_ip; + *hlenp = cs->cs_hlen; + return vjlen; + +bad: + comp->flags |= SLF_TOSS; + INCR(sls_errorin) + return (-1); +} diff --git a/bsd/net/slcompress.h b/bsd/net/slcompress.h new file mode 100644 index 000000000..742a22a22 --- /dev/null +++ b/bsd/net/slcompress.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Definitions for tcp compression routines. + * + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Van Jacobson (van@helios.ee.lbl.gov), Dec 31, 1989: + * - Initial distribution. + */ + +#ifndef _NET_SLCOMPRESS_H_ +#define _NET_SLCOMPRESS_H_ + +#include + +#define MAX_STATES 16 /* must be > 2 and < 256 */ +#define MAX_HDR MLEN /* XXX 4bsd-ism: should really be 128 */ + +/* + * Compressed packet format: + * + * The first octet contains the packet type (top 3 bits), TCP + * 'push' bit, and flags that indicate which of the 4 TCP sequence + * numbers have changed (bottom 5 bits). The next octet is a + * conversation number that associates a saved IP/TCP header with + * the compressed packet. The next two octets are the TCP checksum + * from the original datagram. The next 0 to 15 octets are + * sequence number changes, one change per bit set in the header + * (there may be no changes and there are two special cases where + * the receiver implicitly knows what changed -- see below). + * + * There are 5 numbers which can change (they are always inserted + * in the following order): TCP urgent pointer, window, + * acknowledgement, sequence number and IP ID. (The urgent pointer + * is different from the others in that its value is sent, not the + * change in value.) Since typical use of SLIP links is biased + * toward small packets (see comments on MTU/MSS below), changes + * use a variable length coding with one octet for numbers in the + * range 1 - 255 and 3 octets (0, MSB, LSB) for numbers in the + * range 256 - 65535 or 0. (If the change in sequence number or + * ack is more than 65535, an uncompressed packet is sent.) + */ + +/* + * Packet types (must not conflict with IP protocol version) + * + * The top nibble of the first octet is the packet type. There are + * three possible types: IP (not proto TCP or tcp with one of the + * control flags set); uncompressed TCP (a normal IP/TCP packet but + * with the 8-bit protocol field replaced by an 8-bit connection id -- + * this type of packet syncs the sender & receiver); and compressed + * TCP (described above). + * + * LSB of 4-bit field is TCP "PUSH" bit (a worthless anachronism) and + * is logically part of the 4-bit "changes" field that follows. Top + * three bits are actual packet type. For backward compatibility + * and in the interest of conserving bits, numbers are chosen so the + * IP protocol version number (4) which normally appears in this nibble + * means "IP packet". + */ + +/* packet types */ +#define TYPE_IP 0x40 +#define TYPE_UNCOMPRESSED_TCP 0x70 +#define TYPE_COMPRESSED_TCP 0x80 +#define TYPE_ERROR 0x00 + +/* Bits in first octet of compressed packet */ +#define NEW_C 0x40 /* flag bits for what changed in a packet */ +#define NEW_I 0x20 +#define NEW_S 0x08 +#define NEW_A 0x04 +#define NEW_W 0x02 +#define NEW_U 0x01 + +/* reserved, special-case values of above */ +#define SPECIAL_I (NEW_S|NEW_W|NEW_U) /* echoed interactive traffic */ +#define SPECIAL_D (NEW_S|NEW_A|NEW_W|NEW_U) /* unidirectional data */ +#define SPECIALS_MASK (NEW_S|NEW_A|NEW_W|NEW_U) + +#define TCP_PUSH_BIT 0x10 + + +/* + * "state" data for each active tcp conversation on the wire. This is + * basically a copy of the entire IP/TCP header from the last packet + * we saw from the conversation together with a small identifier + * the transmit & receive ends of the line use to locate saved header. + */ +struct cstate { + struct cstate *cs_next; /* next most recently used cstate (xmit only) */ + u_int16_t cs_hlen; /* size of hdr (receive only) */ + u_char cs_id; /* connection # associated with this state */ + u_char cs_filler; + union { + char csu_hdr[MAX_HDR]; + struct ip csu_ip; /* ip/tcp hdr from most recent packet */ + } slcs_u; +}; +#define cs_ip slcs_u.csu_ip +#define cs_hdr slcs_u.csu_hdr + +/* + * all the state data for one serial line (we need one of these + * per line). + */ +struct slcompress { + struct cstate *last_cs; /* most recently used tstate */ + u_char last_recv; /* last rcvd conn. id */ + u_char last_xmit; /* last sent conn. id */ + u_int16_t flags; +#ifndef SL_NO_STATS + int sls_packets; /* outbound packets */ + int sls_compressed; /* outbound compressed packets */ + int sls_searches; /* searches for connection state */ + int sls_misses; /* times couldn't find conn. state */ + int sls_uncompressedin; /* inbound uncompressed packets */ + int sls_compressedin; /* inbound compressed packets */ + int sls_errorin; /* inbound unknown type packets */ + int sls_tossed; /* inbound packets tossed because of error */ +#endif + struct cstate tstate[MAX_STATES]; /* xmit connection states */ + struct cstate rstate[MAX_STATES]; /* receive connection states */ +}; +/* flag values */ +#define SLF_TOSS 1 /* tossing rcvd frames because of input err */ + +void sl_compress_init __P((struct slcompress *, int)); +u_int sl_compress_tcp __P((struct mbuf *, + struct ip *, struct slcompress *, int)); +int sl_uncompress_tcp __P((u_char **, int, u_int, struct slcompress *)); +int sl_uncompress_tcp_core __P((u_char *, int, int, u_int, + struct slcompress *, u_char **, u_int *)); + +#endif /* !_NET_SLCOMPRESS_H_ */ diff --git a/bsd/net/slip.h b/bsd/net/slip.h new file mode 100644 index 000000000..b46086862 --- /dev/null +++ b/bsd/net/slip.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)slip.h 8.1 (Berkeley) 2/12/94 + */ + +#ifndef _NET_SLIP_H_ +#define _NET_SLIP_H_ + +/* Ioctls operating on SLIP ttys. */ +#define SLIOCGUNIT _IOR('t', 88, int) /* get slip unit number */ +#define SLIOCSKEEPAL _IOW('t', 84, int) /* set keepalive */ +#define SLIOCSOUTFILL _IOW('t', 83, int) /* set out fill time */ +#define SLIOCGKEEPAL _IOR('t', 82, int) /* get keepalive time */ +#define SLIOCGOUTFILL _IOR('t', 81, int) /* get out fill time */ +#define SLIOCSUNIT _IOW('t', 80, int) /* set slip unit number */ + +/* + * Definitions of the pseudo-link-level header attached to slip + * packets grabbed by the packet filter (bpf) traffic monitor. + */ +#define SLIP_HDRLEN 16 /* BPF SLIP header length */ + +/* Offsets into BPF SLIP header. */ +#define SLX_DIR 0 /* direction; see below */ +#define SLX_CHDR 1 /* compressed header data */ +#define CHDR_LEN 15 /* length of compressed header data */ + +#define SLIPDIR_IN 0 /* incoming */ +#define SLIPDIR_OUT 1 /* outgoing */ + +#endif /* !_NET_SLIP_H */ diff --git a/bsd/net/tokendefs.h b/bsd/net/tokendefs.h new file mode 100644 index 000000000..2e0be8ead --- /dev/null +++ b/bsd/net/tokendefs.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. + * + * tokendefs.h - Token-Ring MAC header definitions. + * + * HISTORY + * 8-Oct-92 Joel Greenblatt at NeXT + * created + */ +#ifndef _NET_TOKENDEFS_H_ +#define _NET_TOKENDEFS_H_ + + +#include + +/* + * Token ring address - 6 octets + */ +#define NUM_TR_ADDR_BYTES 6 + +struct token_addr { + u_char token_addr_octet[NUM_TR_ADDR_BYTES]; +}; + +#define ta_byte token_addr_octet + +typedef struct token_addr token_addr_t; + +/* + * MAC header size + */ +#define MAC_HDR_MIN (1+1+6+6) /* MAC hdr size w/o ri field */ +#define MAC_HDR_MAX (MAC_HDR_MIN + RISIZE) /* MAC hdr size w/max ri */ + +/* + * The maximum size of the MAC information field as spec'd by ISO 8802/5. + */ +#define MAC_INFO_4MB 4472 /* max size of mac info field -- 4 Mbs */ +#define MAC_INFO_16MB 17800 /* max size of mac info field -- 16 Mbs */ + +/* + * Maximum DMA packet sizes for 4 & 16 Mbit assuming no CRC. + */ +#define MAC_DMA_MAX_4MB (MAC_HDR_MAX + MAC_INFO_4MB) +#define MAC_DMA_MAX_16MB (MAC_HDR_MAX + MAC_INFO_16MB) + +/* + * Routing control field. + */ +typedef struct { + +#if __BIG_ENDIAN__ + unsigned char bcast : 3, /* broadcast */ + len : 5; /* length */ + unsigned char dir : 1, /* direction */ + longf : 3, /* longest frame */ + rsrvd : 4; /* reserved */ + +#elif __LITTLE_ENDIAN__ + unsigned char len : 5, /* length */ + bcast : 3; /* broadcast */ + unsigned char rsrvd : 4, /* reserved */ + longf : 3, /* longest frame */ + dir : 1; /* direction */ +#else + error +#endif +} routing_ctl_t; + +/* bcast field ... */ +#define BI_SPECIFIC 0 /* b'0xx': non-broadcast (specific route) */ +#define BI_AR_BCAST 4 /* b'10x': all-routes broadcast */ +#define BI_SR_BCAST 6 /* b'11x': single-route broadcast */ + +/* + * longf field + */ +#define LF_S516 0 +#define LF_S1500 1 +#define LF_S2052 2 +#define LF_S4472 3 +#define LF_S8144 4 +#define LF_S11407 5 +#define LF_S17800 6 +#define LF_BCAST 7 /* All-routes broadcast */ + +#define LFB_4MB LF_S4472 /* encoded max info -- 4 Mb */ +#define LFB_16MB LF_S17800 /* encoded max info -- 16 Mb */ + +/* + * Source Routing field (2-18 bytes, must be even) + */ + +#define RISIZE 18 /* max size (bytes) of 802.5 routing field */ + +typedef struct { + routing_ctl_t rc; + u_char sn[RISIZE-sizeof(routing_ctl_t)]; +} sroute_t; + +/* + * Token Ring MAC header (IEEE 802.5, ISO 8802/5) + */ + +#define TR_DA_SIZE 6 + +typedef struct { + u_char ac; /* PPPTMRRR; PPP = token priority */ + u_char fc; /* FFrrZZZZ; FF = frame type */ +#define TR_FC_MASK 0xc0 /* mask for frame-type */ +#define TR_FC_MAC 0x00 /* frame-type = mac frame */ +#define TR_FC_DATA 0x40 /* frame-type = non-mac (data frame) */ + u_char da[TR_DA_SIZE]; /* destination address */ + u_char sa[TR_DA_SIZE]; /* source address */ +#define TR_RII 0x80 /* routing info indicator bit */ + sroute_t ri; /* routing information field */ +} tokenHeader_t; + +/* + * token-ring netif definitions + */ +#define IFTYPE_TOKENRING "4/16Mb Token-Ring" /* netif type-string */ + +/* + * Error codes + */ +#define TRINGDOWN ENETDOWN /* interface down */ +#define TNOBUFS ENOBUFS /* transmit queue full error */ +#define TBADDA EINVAL /* bad dest addr */ +#define TBADFSIZE EMSGSIZE /* bad frame size */ + +#endif /* ! _NET_TOKENDEFS_H_ */ diff --git a/bsd/net/tokensr.h b/bsd/net/tokensr.h new file mode 100644 index 000000000..8e9909acb --- /dev/null +++ b/bsd/net/tokensr.h @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * + * tokensr.h - Token-ring IEEE 802.5 source routing utility functions. + * + * We currently make these functions static inlines. These should + * be considered for movement to a library and made public (after + * sanitizing API). + * + * HISTORY + * + * 22-Jul-94 John Immordino (jimmord) at NeXT + * Converted static array of source routes to a hash table. + * Loosely coupled hash table entries to arp table entries, ie. + * when hash table is full, delete the first entry for which there + * is no arp entry before inserting the next source route. + * + * 26-Apr-94 John Immordino (jimmord) at NeXT + * Cleaned up. Fixed byte-swap problems, converted all addresses to + * character arrays, etc. + * + * 07-Apr-93 Joel Greenblatt at NeXT + * Created + * + */ + +#ifdef DRIVER_PRIVATE + +#ifndef _TOKENSR_ +#define _TOKENSR_ + +#include +#include +#include +#include +#include +#include +#include /* Not an Obj-C header */ + +/* + * Virtual driver parameters + * Used by if_vtrXX modules + */ +typedef struct { + int vunit; + int vflags; + int vmtu; + int vtokpri; +} vparms_t; + + +/* + * Source routing table entry + * Note: ipAddr must be the first element in order for our hash table + * code to work properly. + */ +typedef struct { + unsigned long ipAddr; /* IP address of this entry - */ + /* needed for our temporary */ + /* arp table lookup scheme */ + sroute_t ri; /* routing information field */ +} srtable_t; + + +/* + * Encoded source-routing broadcast type (used as parameter to + * source routing routines). + */ +typedef enum { + SRB_OFF, /* no source-route broadcast */ + SRB_AR, /* all-routes broadcast */ + SRB_SR, /* single-route broadcast */ + SRB_INVALID /* invalid entry */ +} srbcast_t; + +/* + * ARP code taken from bsd/netinet/if_ether.c. Need this in order + * to perform lookups of IP addresses to determine which source route + * entry to remove from the table. The first source route entry without + * a corresponding ARP entry will be removed. + */ +#ifdef GATEWAY +#define ARPTAB_BSIZ 16 /* bucket size */ +#define ARPTAB_NB 37 /* number of buckets */ +#else +#define ARPTAB_BSIZ 9 /* bucket size */ +#define ARPTAB_NB 19 /* number of buckets */ +#endif + +extern struct arptab arptab[]; + +#define ARPTAB_HASH(a) \ + ((u_long)(a) % ARPTAB_NB) + +/* + * Change to permit multiple heterogenous interfaces to co-exist. + */ +#define ARPTAB_LOOK(at,addr,ifp) { \ + register n; \ + at = &arptab[ARPTAB_HASH(addr) * ARPTAB_BSIZ]; \ + for (n = 0 ; n < ARPTAB_BSIZ ; n++,at++) \ + if (at->at_iaddr.s_addr == addr && \ + (!(ifp) || at->at_if == (ifp))) \ + break; \ + if (n >= ARPTAB_BSIZ) \ + at = 0; \ +} + + +/* + * Initialize callers source routing table. + */ +static __inline__ +void init_src_routing(NXHashTable **sourceRouteTable) +{ + extern NXHashTablePrototype SRTablePrototype; + *sourceRouteTable = NXCreateHashTable(SRTablePrototype, 0, NULL); +} + +/* + * Search for a source route (given a destination address). + */ +static __inline__ +sroute_t *find_sr(NXHashTable *sourceRouteTable, unsigned long idst) +{ + srtable_t *sourceRouteEntry = NXHashGet(sourceRouteTable, + (const void *)&idst); + if (sourceRouteEntry) { + return &sourceRouteEntry->ri; + } + return NULL; +} + +/* + * Add an entry to the callers source routing table. + */ +static __inline__ +void add_sr(netif_t netif, NXHashTable *sourceRouteTable, unsigned long ipAddr, + sroute_t *rip, unsigned long srLimit) +{ + srtable_t *sourceRouteEntry; + struct ifnet *ifp = (struct ifnet *)netif; + + if ((rip->rc.len > 18)|| (rip->rc.len < 2) || (rip->rc.len & 1)) + return; + + /* + * See if the entry is already in the table + */ + sourceRouteEntry = NXHashGet(sourceRouteTable,&ipAddr); + if (sourceRouteEntry) { + bcopy(rip, &sourceRouteEntry->ri, rip->rc.len); + sourceRouteEntry->ri.rc.bcast = 0; /* make non-bcast */ + sourceRouteEntry->ri.rc.dir = ~sourceRouteEntry->ri.rc.dir; + return; + } + + /* + * See if there's room in the table for another entry. + */ + if (NXCountHashTable(sourceRouteTable) >= srLimit) { + BOOL dumpedOne = NO; + NXHashState state = NXInitHashState(sourceRouteTable); + + /* + * Need to delete an entry. + */ + while (NXNextHashState(sourceRouteTable, &state, + (void **)&sourceRouteEntry)) { + + struct arptab *at; + + /* + * Look for an entry without a corresponding entry in the + * arp table. + */ + ARPTAB_LOOK(at, sourceRouteEntry->ipAddr, ifp); + if (at == NULL) { + /* + * Found one - try to remove it + */ + sourceRouteEntry = + NXHashRemove(sourceRouteTable, + (const void *)&sourceRouteEntry->ipAddr); + if (sourceRouteEntry) { + kfree(sourceRouteEntry,sizeof(srtable_t)); + dumpedOne = YES; + break; + } + } + } + if (dumpedOne == NO) { + printf("add_sr: source route table overflow\n"); + return; + } + } + + sourceRouteEntry = (srtable_t *)kalloc(sizeof(srtable_t)); + + sourceRouteEntry->ipAddr = ipAddr; + bcopy(rip, &sourceRouteEntry->ri, rip->rc.len); + sourceRouteEntry->ri.rc.bcast = 0; /* make non-bcast */ + sourceRouteEntry->ri.rc.dir = ~sourceRouteEntry->ri.rc.dir; + + sourceRouteEntry = + NXHashInsert(sourceRouteTable,(const void *)&sourceRouteEntry->ipAddr); + if (sourceRouteEntry) /* shouldn't happen */ + kfree(sourceRouteEntry,sizeof(srtable_t)); +} + +/* + * Find & return the source route to the callers address. + */ +static __inline__ +void get_src_route(NXHashTable *sourceRouteTable, unsigned long idst, + unsigned char *da, tokenHeader_t *th) +{ + sroute_t *sourceRoute; + + if (da[0] & 0x80) + return; /* don't handle group addresses */ + + /* + * Find source route in srtable and copy to caller's + * tokenHeader_t (or turn off sri bit). + */ + sourceRoute = find_sr(sourceRouteTable, idst); + if (sourceRoute) { + bcopy(sourceRoute, &th->ri, sourceRoute->rc.len); + th->sa[0] |= TR_RII; + } + else + th->sa[0] &= ~TR_RII; /* turn off source routing bit */ +} + +/* + * Save the source route in the callers MAC header. + */ +static __inline__ +void save_src_route(netif_t netif, NXHashTable *sourceRouteTable, + unsigned long ipAddr, tokenHeader_t *th, unsigned long srLimit) +{ + /* + * If frame has a routing field > 2 then save it (i.e. it's been + * thru at least one bridge). + */ + if ((th->sa[0] & TR_RII) && (th->ri.rc.len > 2)) + add_sr(netif, sourceRouteTable, ipAddr, &th->ri, srLimit); +} + + +/* + * Returns length of the source routing field in callers MAC header. + * Returns -1 if the header is invalid. + */ +static __inline__ +int get_ri_len(tokenHeader_t *th) +{ + int ri_len = 0; + sroute_t *rif = (sroute_t *)&th->ri; + + if (th->sa[0] & 0x80) { + ri_len = (int)rif->rc.len; + if ((ri_len & 1) || (ri_len < 2) || (ri_len > 18)) { + ri_len = -1; + } + } + return ri_len; +} + +/* + * Returns the length of an 802.5 MAC header (including routing field). + */ +static __inline__ +int get_8025_hdr_len(tokenHeader_t *th) +{ + int ri_len; + + ri_len = get_ri_len(th); + if (ri_len < 0) + return ri_len; // bad header + + return ri_len + MAC_HDR_MIN; +} + +/* + * Returns 1 if mac address is any type of broadcast, zero otherwise. + */ +static __inline__ +int check_mac_bcast(tokenHeader_t *th) +{ + if (th->da[0] & 0x80) + return 1; // group address (I/G bit) + return 0; +} + +/* + * Build a broadcast routing field in the callers MAC header. + */ +static __inline__ +void make_sr_bcast(tokenHeader_t *th, srbcast_t type) +{ + if ((type == SRB_OFF) || (type >= SRB_INVALID)) { + th->sa[0] &= ~TR_RII; + return; + } + + th->sa[0] |= TR_RII; /* turn on rii bit to ind. src rtng field */ + + /* + * Build the routing control field for the requested + * broadcast type. + */ + if (type == SRB_AR) + th->ri.rc.bcast = BI_AR_BCAST; + else + th->ri.rc.bcast = BI_SR_BCAST; + + th->ri.rc.len = 2; + th->ri.rc.dir = 0; + th->ri.rc.longf = LF_BCAST; + th->ri.rc.rsrvd = 0; +} + +/* + * Make the callers MAC header a reply to sender. + */ +static __inline__ +void make_mac_reply(tokenHeader_t *th) +{ + + /* + * Copy source address to destination address. Turn off RII bit in + * the destination address. + */ + bcopy(th->sa, th->da, sizeof(th->da)); + th->da[0] &= ~TR_RII; + + /* + * Convert the source routing field to a reply (flip direction + * bit & turn off broadcast bits). + */ + if (th->sa[0] & TR_RII) { + th->ri.rc.dir = ~th->ri.rc.dir; + th->ri.rc.bcast = 0; + } +} + + +#endif /* _TOKENSR_ */ + +#endif /* DRIVER_PRIVATE */ diff --git a/bsd/net/zlib.c b/bsd/net/zlib.c new file mode 100644 index 000000000..8ae3ac084 --- /dev/null +++ b/bsd/net/zlib.c @@ -0,0 +1,5383 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * This file is derived from various .h and .c files from the zlib-1.0.4 + * distribution by Jean-loup Gailly and Mark Adler, with some additions + * by Paul Mackerras to aid in implementing Deflate compression and + * decompression for PPP packets. See zlib.h for conditions of + * distribution and use. + * + * Changes that have been made include: + * - added Z_PACKET_FLUSH (see zlib.h for details) + * - added inflateIncomp and deflateOutputPending + * - allow strm->next_out to be NULL, meaning discard the output + * + */ + +/* + * ==FILEVERSION 971210== + * + * This marker is used by the Linux installation script to determine + * whether an up-to-date version of this file is already installed. + */ + +#define NO_DUMMY_DECL +#define NO_ZCFUNCS +#define MY_ZCALLOC + +#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL)) +#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */ +#endif + + +/* +++ zutil.h */ +/* zutil.h -- internal interface and configuration of the compression library + * Copyright (C) 1995-1996 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */ + +#ifndef _Z_UTIL_H +#define _Z_UTIL_H + + +#include + + +#if defined(KERNEL) +/* Assume this is a *BSD or SVR4 kernel */ +//#include +#include +//#include +# define HAVE_MEMCPY +# define memcpy(d, s, n) bcopy((s), (d), (n)) +# define memset(d, v, n) bzero((d), (n)) +# define memcmp bcmp + +#ifdef STDC +//# include +//# include +#endif + +#endif /* _KERNEL || KERNEL */ + +#ifndef local +# define local static +#endif +/* compile with -Dlocal if your debugger can't find static symbols */ + +typedef unsigned char uch; +typedef uch FAR uchf; +typedef unsigned short ush; +typedef ush FAR ushf; +typedef unsigned long ulg; + +extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */ +/* (size given to avoid silly warnings with Visual C++) */ + +#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] + +#define ERR_RETURN(strm,err) \ + return (strm->msg = (char*)ERR_MSG(err), (err)) +/* To be used only when the state is known to be valid */ + + /* common constants */ + +#ifndef DEF_WBITS +# define DEF_WBITS MAX_WBITS +#endif +/* default windowBits for decompression. MAX_WBITS is for compression only */ + +#if MAX_MEM_LEVEL >= 8 +# define DEF_MEM_LEVEL 8 +#else +# define DEF_MEM_LEVEL MAX_MEM_LEVEL +#endif +/* default memLevel */ + +#define STORED_BLOCK 0 +#define STATIC_TREES 1 +#define DYN_TREES 2 +/* The three kinds of block type */ + +#define MIN_MATCH 3 +#define MAX_MATCH 258 +/* The minimum and maximum match lengths */ + +#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ + + /* target dependencies */ + +#ifdef MSDOS +# define OS_CODE 0x00 +# ifdef __TURBOC__ +# include +# else /* MSC or DJGPP */ +# include +# endif +#endif + +#ifdef OS2 +# define OS_CODE 0x06 +#endif + +#ifdef WIN32 /* Window 95 & Windows NT */ +# define OS_CODE 0x0b +#endif + +#if defined(VAXC) || defined(VMS) +# define OS_CODE 0x02 +# define FOPEN(name, mode) \ + fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") +#endif + +#ifdef AMIGA +# define OS_CODE 0x01 +#endif + +#if defined(ATARI) || defined(atarist) +# define OS_CODE 0x05 +#endif + +#ifdef MACOS +# define OS_CODE 0x07 +#endif + +#ifdef __50SERIES /* Prime/PRIMOS */ +# define OS_CODE 0x0F +#endif + +#ifdef TOPS20 +# define OS_CODE 0x0a +#endif + +#if defined(_BEOS_) || defined(RISCOS) +# define fdopen(fd,mode) NULL /* No fdopen() */ +#endif + + /* Common defaults */ + +#ifndef OS_CODE +# define OS_CODE 0x03 /* assume Unix */ +#endif + +#ifndef FOPEN +# define FOPEN(name, mode) fopen((name), (mode)) +#endif + + /* functions */ + +#ifdef HAVE_STRERROR + extern char *strerror OF((int)); +# define zstrerror(errnum) strerror(errnum) +#else +# define zstrerror(errnum) "" +#endif + +#if defined(pyr) +# define NO_MEMCPY +#endif +#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER) + /* Use our own functions for small and medium model with MSC <= 5.0. + * You may have to use the same strategy for Borland C (untested). + */ +# define NO_MEMCPY +#endif +#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) +# define HAVE_MEMCPY +#endif +#ifdef HAVE_MEMCPY +# ifdef SMALL_MEDIUM /* MSDOS small or medium model */ +# define zmemcpy _fmemcpy +# define zmemcmp _fmemcmp +# define zmemzero(dest, len) _fmemset(dest, 0, len) +# else +# define zmemcpy memcpy +# define zmemcmp memcmp +# define zmemzero(dest, len) memset(dest, 0, len) +# endif +#else + extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len)); + extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len)); + extern void zmemzero OF((Bytef* dest, uInt len)); +#endif + +/* Diagnostic functions */ +#ifdef DEBUG_ZLIB +# include +# ifndef verbose +# define verbose 0 +# endif + extern void z_error OF((char *m)); +# define Assert(cond,msg) {if(!(cond)) z_error(msg);} +# define Trace(x) fprintf x +# define Tracev(x) {if (verbose) fprintf x ;} +# define Tracevv(x) {if (verbose>1) fprintf x ;} +# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} +# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + + +typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len)); + +voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size)); +void zcfree OF((voidpf opaque, voidpf ptr)); + +#define ZALLOC(strm, items, size) \ + (*((strm)->zalloc))((strm)->opaque, (items), (size)) +#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) +#define TRY_FREE(s, p) {if (p) ZFREE(s, p);} + +#endif /* _Z_UTIL_H */ +/* --- zutil.h */ + +/* +++ deflate.h */ +/* deflate.h -- internal compression state + * Copyright (C) 1995-1996 Jean-loup Gailly + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */ + +#ifndef _DEFLATE_H +#define _DEFLATE_H + +/* #include "zutil.h" */ + +/* =========================================================================== + * Internal compression state. + */ + +#define LENGTH_CODES 29 +/* number of length codes, not counting the special END_BLOCK code */ + +#define LITERALS 256 +/* number of literal bytes 0..255 */ + +#define L_CODES (LITERALS+1+LENGTH_CODES) +/* number of Literal or Length codes, including the END_BLOCK code */ + +#define D_CODES 30 +/* number of distance codes */ + +#define BL_CODES 19 +/* number of codes used to transfer the bit lengths */ + +#define HEAP_SIZE (2*L_CODES+1) +/* maximum heap size */ + +#define MAX_BITS 15 +/* All codes must not exceed MAX_BITS bits */ + +#define INIT_STATE 42 +#define BUSY_STATE 113 +#define FINISH_STATE 666 +/* Stream status */ + + +/* Data structure describing a single value and its code string. */ +typedef struct ct_data_s { + union { + ush freq; /* frequency count */ + ush code; /* bit string */ + } fc; + union { + ush dad; /* father node in Huffman tree */ + ush len; /* length of bit string */ + } dl; +} FAR ct_data; + +#define Freq fc.freq +#define Code fc.code +#define Dad dl.dad +#define Len dl.len + +typedef struct static_tree_desc_s static_tree_desc; + +typedef struct tree_desc_s { + ct_data *dyn_tree; /* the dynamic tree */ + int max_code; /* largest code with non zero frequency */ + static_tree_desc *stat_desc; /* the corresponding static tree */ +} FAR tree_desc; + +typedef ush Pos; +typedef Pos FAR Posf; +typedef unsigned IPos; + +/* A Pos is an index in the character window. We use short instead of int to + * save space in the various tables. IPos is used only for parameter passing. + */ + +typedef struct deflate_state { + z_streamp strm; /* pointer back to this zlib stream */ + int status; /* as the name implies */ + Bytef *pending_buf; /* output still pending */ + ulg pending_buf_size; /* size of pending_buf */ + Bytef *pending_out; /* next pending byte to output to the stream */ + int pending; /* nb of bytes in the pending buffer */ + int noheader; /* suppress zlib header and adler32 */ + Byte data_type; /* UNKNOWN, BINARY or ASCII */ + Byte method; /* STORED (for zip only) or DEFLATED */ + int last_flush; /* value of flush param for previous deflate call */ + + /* used by deflate.c: */ + + uInt w_size; /* LZ77 window size (32K by default) */ + uInt w_bits; /* log2(w_size) (8..16) */ + uInt w_mask; /* w_size - 1 */ + + Bytef *window; + /* Sliding window. Input bytes are read into the second half of the window, + * and move to the first half later to keep a dictionary of at least wSize + * bytes. With this organization, matches are limited to a distance of + * wSize-MAX_MATCH bytes, but this ensures that IO is always + * performed with a length multiple of the block size. Also, it limits + * the window size to 64K, which is quite useful on MSDOS. + * To do: use the user input buffer as sliding window. + */ + + ulg window_size; + /* Actual size of window: 2*wSize, except when the user input buffer + * is directly used as sliding window. + */ + + Posf *prev; + /* Link to older string with same hash index. To limit the size of this + * array to 64K, this link is maintained only for the last 32K strings. + * An index in this array is thus a window index modulo 32K. + */ + + Posf *head; /* Heads of the hash chains or NIL. */ + + uInt ins_h; /* hash index of string to be inserted */ + uInt hash_size; /* number of elements in hash table */ + uInt hash_bits; /* log2(hash_size) */ + uInt hash_mask; /* hash_size-1 */ + + uInt hash_shift; + /* Number of bits by which ins_h must be shifted at each input + * step. It must be such that after MIN_MATCH steps, the oldest + * byte no longer takes part in the hash key, that is: + * hash_shift * MIN_MATCH >= hash_bits + */ + + long block_start; + /* Window position at the beginning of the current output block. Gets + * negative when the window is moved backwards. + */ + + uInt match_length; /* length of best match */ + IPos prev_match; /* previous match */ + int match_available; /* set if previous match exists */ + uInt strstart; /* start of string to insert */ + uInt match_start; /* start of matching string */ + uInt lookahead; /* number of valid bytes ahead in window */ + + uInt prev_length; + /* Length of the best match at previous step. Matches not greater than this + * are discarded. This is used in the lazy match evaluation. + */ + + uInt max_chain_length; + /* To speed up deflation, hash chains are never searched beyond this + * length. A higher limit improves compression ratio but degrades the + * speed. + */ + + uInt max_lazy_match; + /* Attempt to find a better match only when the current match is strictly + * smaller than this value. This mechanism is used only for compression + * levels >= 4. + */ +# define max_insert_length max_lazy_match + /* Insert new strings in the hash table only if the match length is not + * greater than this length. This saves time but degrades compression. + * max_insert_length is used only for compression levels <= 3. + */ + + int level; /* compression level (1..9) */ + int strategy; /* favor or force Huffman coding*/ + + uInt good_match; + /* Use a faster search when the previous match is longer than this */ + + int nice_match; /* Stop searching when current match exceeds this */ + + /* used by trees.c: */ + /* Didn't use ct_data typedef below to supress compiler warning */ + struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ + struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ + struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ + + struct tree_desc_s l_desc; /* desc. for literal tree */ + struct tree_desc_s d_desc; /* desc. for distance tree */ + struct tree_desc_s bl_desc; /* desc. for bit length tree */ + + ush bl_count[MAX_BITS+1]; + /* number of codes at each bit length for an optimal tree */ + + int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ + int heap_len; /* number of elements in the heap */ + int heap_max; /* element of largest frequency */ + /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. + * The same heap array is used to build all trees. + */ + + uch depth[2*L_CODES+1]; + /* Depth of each subtree used as tie breaker for trees of equal frequency + */ + + uchf *l_buf; /* buffer for literals or lengths */ + + uInt lit_bufsize; + /* Size of match buffer for literals/lengths. There are 4 reasons for + * limiting lit_bufsize to 64K: + * - frequencies can be kept in 16 bit counters + * - if compression is not successful for the first block, all input + * data is still in the window so we can still emit a stored block even + * when input comes from standard input. (This can also be done for + * all blocks if lit_bufsize is not greater than 32K.) + * - if compression is not successful for a file smaller than 64K, we can + * even emit a stored file instead of a stored block (saving 5 bytes). + * This is applicable only for zip (not gzip or zlib). + * - creating new Huffman trees less frequently may not provide fast + * adaptation to changes in the input data statistics. (Take for + * example a binary file with poorly compressible code followed by + * a highly compressible string table.) Smaller buffer sizes give + * fast adaptation but have of course the overhead of transmitting + * trees more frequently. + * - I can't count above 4 + */ + + uInt last_lit; /* running index in l_buf */ + + ushf *d_buf; + /* Buffer for distances. To simplify the code, d_buf and l_buf have + * the same number of elements. To use different lengths, an extra flag + * array would be necessary. + */ + + ulg opt_len; /* bit length of current block with optimal trees */ + ulg static_len; /* bit length of current block with static trees */ + ulg compressed_len; /* total bit length of compressed file */ + uInt matches; /* number of string matches in current block */ + int last_eob_len; /* bit length of EOB code for last block */ + +#ifdef DEBUG_ZLIB + ulg bits_sent; /* bit length of the compressed data */ +#endif + + ush bi_buf; + /* Output buffer. bits are inserted starting at the bottom (least + * significant bits). + */ + int bi_valid; + /* Number of valid bits in bi_buf. All bits above the last valid bit + * are always zero. + */ + +} FAR deflate_state; + +/* Output a byte on the stream. + * IN assertion: there is enough room in pending_buf. + */ +#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);} + + +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) +/* Minimum amount of lookahead, except at the end of the input file. + * See deflate.c for comments about the MIN_MATCH+1. + */ + +#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) +/* In order to simplify the code, particularly on 16 bit machines, match + * distances are limited to MAX_DIST instead of WSIZE. + */ + + /* in trees.c */ +void _tr_init OF((deflate_state *s)); +int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); +ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, + int eof)); +void _tr_align OF((deflate_state *s)); +void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, + int eof)); +void _tr_stored_type_only OF((deflate_state *)); + +#endif +/* --- deflate.h */ + +/* +++ deflate.c */ +/* deflate.c -- compress data using the deflation algorithm + * Copyright (C) 1995-1996 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * ALGORITHM + * + * The "deflation" process depends on being able to identify portions + * of the input text which are identical to earlier input (within a + * sliding window trailing behind the input currently being processed). + * + * The most straightforward technique turns out to be the fastest for + * most input files: try all possible matches and select the longest. + * The key feature of this algorithm is that insertions into the string + * dictionary are very simple and thus fast, and deletions are avoided + * completely. Insertions are performed at each input character, whereas + * string matches are performed only when the previous match ends. So it + * is preferable to spend more time in matches to allow very fast string + * insertions and avoid deletions. The matching algorithm for small + * strings is inspired from that of Rabin & Karp. A brute force approach + * is used to find longer strings when a small match has been found. + * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze + * (by Leonid Broukhis). + * A previous version of this file used a more sophisticated algorithm + * (by Fiala and Greene) which is guaranteed to run in linear amortized + * time, but has a larger average cost, uses more memory and is patented. + * However the F&G algorithm may be faster for some highly redundant + * files if the parameter max_chain_length (described below) is too large. + * + * ACKNOWLEDGEMENTS + * + * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and + * I found it in 'freeze' written by Leonid Broukhis. + * Thanks to many people for bug reports and testing. + * + * REFERENCES + * + * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". + * Available in ftp://ds.internic.net/rfc/rfc1951.txt + * + * A description of the Rabin and Karp algorithm is given in the book + * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. + * + * Fiala,E.R., and Greene,D.H. + * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 + * + */ + +/* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */ + +/* #include "deflate.h" */ + +char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +/* =========================================================================== + * Function prototypes. + */ +typedef enum { + need_more, /* block not completed, need more input or more output */ + block_done, /* block flush performed */ + finish_started, /* finish started, need only more output at next deflate */ + finish_done /* finish done, accept no more input or output */ +} block_state; + +typedef block_state (*compress_func) OF((deflate_state *s, int flush)); +/* Compression function. Returns the block state after the call. */ + +local void fill_window OF((deflate_state *s)); +local block_state deflate_stored OF((deflate_state *s, int flush)); +local block_state deflate_fast OF((deflate_state *s, int flush)); +local block_state deflate_slow OF((deflate_state *s, int flush)); +local void lm_init OF((deflate_state *s)); +local void putShortMSB OF((deflate_state *s, uInt b)); +local void flush_pending OF((z_streamp strm)); +local int read_buf OF((z_streamp strm, charf *buf, unsigned size)); +#ifdef ASMV + void match_init OF((void)); /* asm code initialization */ + uInt longest_match OF((deflate_state *s, IPos cur_match)); +#else +local uInt longest_match OF((deflate_state *s, IPos cur_match)); +#endif + +#ifdef DEBUG_ZLIB +local void check_match OF((deflate_state *s, IPos start, IPos match, + int length)); +#endif + +/* =========================================================================== + * Local data + */ + +#define NIL 0 +/* Tail of hash chains */ + +#ifndef TOO_FAR +# define TOO_FAR 4096 +#endif +/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ + +#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) +/* Minimum amount of lookahead, except at the end of the input file. + * See deflate.c for comments about the MIN_MATCH+1. + */ + +/* Values for max_lazy_match, good_match and max_chain_length, depending on + * the desired pack level (0..9). The values given below have been tuned to + * exclude worst case performance for pathological files. Better values may be + * found for specific files. + */ +typedef struct config_s { + ush good_length; /* reduce lazy search above this match length */ + ush max_lazy; /* do not perform lazy search above this match length */ + ush nice_length; /* quit search above this match length */ + ush max_chain; + compress_func func; +} config; + +local config configuration_table[10] = { +/* good lazy nice chain */ +/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ +/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */ +/* 2 */ {4, 5, 16, 8, deflate_fast}, +/* 3 */ {4, 6, 32, 32, deflate_fast}, + +/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ +/* 5 */ {8, 16, 32, 32, deflate_slow}, +/* 6 */ {8, 16, 128, 128, deflate_slow}, +/* 7 */ {8, 32, 128, 256, deflate_slow}, +/* 8 */ {32, 128, 258, 1024, deflate_slow}, +/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */ + +/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 + * For deflate_fast() (levels <= 3) good is ignored and lazy has a different + * meaning. + */ + +#define EQUAL 0 +/* result of memcmp for equal strings */ + +#ifndef NO_DUMMY_DECL +struct static_tree_desc_s {int dummy;}; /* for buggy compilers */ +#endif + +/* =========================================================================== + * Update a hash value with the given input byte + * IN assertion: all calls to to UPDATE_HASH are made with consecutive + * input characters, so that a running hash key can be computed from the + * previous key instead of complete recalculation each time. + */ +#define UPDATE_HASH(s,h,c) (h = (((h)<hash_shift) ^ (c)) & s->hash_mask) + + +/* =========================================================================== + * Insert string str in the dictionary and set match_head to the previous head + * of the hash chain (the most recent string with same hash key). Return + * the previous length of the hash chain. + * IN assertion: all calls to to INSERT_STRING are made with consecutive + * input characters and the first MIN_MATCH bytes of str are valid + * (except for the last MIN_MATCH-1 bytes of the input file). + */ +#define INSERT_STRING(s, str, match_head) \ + (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ + s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \ + s->head[s->ins_h] = (Pos)(str)) + +/* =========================================================================== + * Initialize the hash table (avoiding 64K overflow for 16 bit systems). + * prev[] will be initialized on the fly. + */ +#define CLEAR_HASH(s) \ + s->head[s->hash_size-1] = NIL; \ + zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); + +/* ========================================================================= */ +int deflateInit_(strm, level, version, stream_size) + z_streamp strm; + int level; + const char *version; + int stream_size; +{ + return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, + Z_DEFAULT_STRATEGY, version, stream_size); + /* To do: ignore strm->next_in if we use it as window */ +} + +/* ========================================================================= */ +int deflateInit2_(strm, level, method, windowBits, memLevel, strategy, + version, stream_size) + z_streamp strm; + int level; + int method; + int windowBits; + int memLevel; + int strategy; + const char *version; + int stream_size; +{ + deflate_state *s; + int noheader = 0; + static char* my_version = ZLIB_VERSION; + + ushf *overlay; + /* We overlay pending_buf and d_buf+l_buf. This works since the average + * output size for (length,distance) codes is <= 24 bits. + */ + + if (version == Z_NULL || version[0] != my_version[0] || + stream_size != sizeof(z_stream)) { + return Z_VERSION_ERROR; + } + if (strm == Z_NULL) return Z_STREAM_ERROR; + + strm->msg = Z_NULL; +#ifndef NO_ZCFUNCS + if (strm->zalloc == Z_NULL) { + strm->zalloc = zcalloc; + strm->opaque = (voidpf)0; + } + if (strm->zfree == Z_NULL) strm->zfree = zcfree; +#endif + + if (level == Z_DEFAULT_COMPRESSION) level = 6; + + if (windowBits < 0) { /* undocumented feature: suppress zlib header */ + noheader = 1; + windowBits = -windowBits; + } + if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || + windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || + strategy < 0 || strategy > Z_HUFFMAN_ONLY) { + return Z_STREAM_ERROR; + } + s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); + if (s == Z_NULL) return Z_MEM_ERROR; + strm->state = (struct internal_state FAR *)s; + s->strm = strm; + + s->noheader = noheader; + s->w_bits = windowBits; + s->w_size = 1 << s->w_bits; + s->w_mask = s->w_size - 1; + + s->hash_bits = memLevel + 7; + s->hash_size = 1 << s->hash_bits; + s->hash_mask = s->hash_size - 1; + s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); + + s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); + s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); + s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); + + s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ + + overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); + s->pending_buf = (uchf *) overlay; + s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); + + if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || + s->pending_buf == Z_NULL) { + strm->msg = (char*)ERR_MSG(Z_MEM_ERROR); + deflateEnd (strm); + return Z_MEM_ERROR; + } + s->d_buf = overlay + s->lit_bufsize/sizeof(ush); + s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; + + s->level = level; + s->strategy = strategy; + s->method = (Byte)method; + + return deflateReset(strm); +} + +/* ========================================================================= */ +int deflateSetDictionary (strm, dictionary, dictLength) + z_streamp strm; + const Bytef *dictionary; + uInt dictLength; +{ + deflate_state *s; + uInt length = dictLength; + uInt n; + IPos hash_head = 0; + + if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL) + return Z_STREAM_ERROR; + + s = (deflate_state *) strm->state; + if (s->status != INIT_STATE) return Z_STREAM_ERROR; + + strm->adler = adler32(strm->adler, dictionary, dictLength); + + if (length < MIN_MATCH) return Z_OK; + if (length > MAX_DIST(s)) { + length = MAX_DIST(s); +#ifndef USE_DICT_HEAD + dictionary += dictLength - length; /* use the tail of the dictionary */ +#endif + } + zmemcpy((charf *)s->window, dictionary, length); + s->strstart = length; + s->block_start = (long)length; + + /* Insert all strings in the hash table (except for the last two bytes). + * s->lookahead stays null, so s->ins_h will be recomputed at the next + * call of fill_window. + */ + s->ins_h = s->window[0]; + UPDATE_HASH(s, s->ins_h, s->window[1]); + for (n = 0; n <= length - MIN_MATCH; n++) { + INSERT_STRING(s, n, hash_head); + } + if (hash_head) hash_head = 0; /* to make compiler happy */ + return Z_OK; +} + +/* ========================================================================= */ +int deflateReset (strm) + z_streamp strm; +{ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL || + strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR; + + strm->total_in = strm->total_out = 0; + strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ + strm->data_type = Z_UNKNOWN; + + s = (deflate_state *)strm->state; + s->pending = 0; + s->pending_out = s->pending_buf; + + if (s->noheader < 0) { + s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */ + } + s->status = s->noheader ? BUSY_STATE : INIT_STATE; + strm->adler = 1; + s->last_flush = Z_NO_FLUSH; + + _tr_init(s); + lm_init(s); + + return Z_OK; +} + +/* ========================================================================= */ +int deflateParams(strm, level, strategy) + z_streamp strm; + int level; + int strategy; +{ + deflate_state *s; + compress_func func; + int err = Z_OK; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = (deflate_state *) strm->state; + + if (level == Z_DEFAULT_COMPRESSION) { + level = 6; + } + if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { + return Z_STREAM_ERROR; + } + func = configuration_table[s->level].func; + + if (func != configuration_table[level].func && strm->total_in != 0) { + /* Flush the last buffer: */ + err = deflate(strm, Z_PARTIAL_FLUSH); + } + if (s->level != level) { + s->level = level; + s->max_lazy_match = configuration_table[level].max_lazy; + s->good_match = configuration_table[level].good_length; + s->nice_match = configuration_table[level].nice_length; + s->max_chain_length = configuration_table[level].max_chain; + } + s->strategy = strategy; + return err; +} + +/* ========================================================================= + * Put a short in the pending buffer. The 16-bit value is put in MSB order. + * IN assertion: the stream state is correct and there is enough room in + * pending_buf. + */ +local void putShortMSB (s, b) + deflate_state *s; + uInt b; +{ + put_byte(s, (Byte)(b >> 8)); + put_byte(s, (Byte)(b & 0xff)); +} + +/* ========================================================================= + * Flush as much pending output as possible. All deflate() output goes + * through this function so some applications may wish to modify it + * to avoid allocating a large strm->next_out buffer and copying into it. + * (See also read_buf()). + */ +local void flush_pending(strm) + z_streamp strm; +{ + deflate_state *s = (deflate_state *) strm->state; + unsigned len = s->pending; + + if (len > strm->avail_out) len = strm->avail_out; + if (len == 0) return; + + if (strm->next_out != Z_NULL) { + zmemcpy(strm->next_out, s->pending_out, len); + strm->next_out += len; + } + s->pending_out += len; + strm->total_out += len; + strm->avail_out -= len; + s->pending -= len; + if (s->pending == 0) { + s->pending_out = s->pending_buf; + } +} + +/* ========================================================================= */ +int deflate (strm, flush) + z_streamp strm; + int flush; +{ + int old_flush; /* value of flush param for previous deflate call */ + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL || + flush > Z_FINISH || flush < 0) { + return Z_STREAM_ERROR; + } + s = (deflate_state *) strm->state; + + if ((strm->next_in == Z_NULL && strm->avail_in != 0) || + (s->status == FINISH_STATE && flush != Z_FINISH)) { + ERR_RETURN(strm, Z_STREAM_ERROR); + } + if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); + + s->strm = strm; /* just in case */ + old_flush = s->last_flush; + s->last_flush = flush; + + /* Write the zlib header */ + if (s->status == INIT_STATE) { + + uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; + uInt level_flags = (s->level-1) >> 1; + + if (level_flags > 3) level_flags = 3; + header |= (level_flags << 6); + if (s->strstart != 0) header |= PRESET_DICT; + header += 31 - (header % 31); + + s->status = BUSY_STATE; + putShortMSB(s, header); + + /* Save the adler32 of the preset dictionary: */ + if (s->strstart != 0) { + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + } + strm->adler = 1L; + } + + /* Flush as much pending output as possible */ + if (s->pending != 0) { + flush_pending(strm); + if (strm->avail_out == 0) { + /* Since avail_out is 0, deflate will be called again with + * more output space, but possibly with both pending and + * avail_in equal to zero. There won't be anything to do, + * but this is not an error situation so make sure we + * return OK instead of BUF_ERROR at next call of deflate: + */ + s->last_flush = -1; + return Z_OK; + } + + /* Make sure there is something to do and avoid duplicate consecutive + * flushes. For repeated and useless calls with Z_FINISH, we keep + * returning Z_STREAM_END instead of Z_BUFF_ERROR. + */ + } else if (strm->avail_in == 0 && flush <= old_flush && + flush != Z_FINISH) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* User must not provide more input after the first FINISH: */ + if (s->status == FINISH_STATE && strm->avail_in != 0) { + ERR_RETURN(strm, Z_BUF_ERROR); + } + + /* Start a new block or continue the current one. + */ + if (strm->avail_in != 0 || s->lookahead != 0 || + (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { + block_state bstate; + + bstate = (*(configuration_table[s->level].func))(s, flush); + + if (bstate == finish_started || bstate == finish_done) { + s->status = FINISH_STATE; + } + if (bstate == need_more || bstate == finish_started) { + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ + } + return Z_OK; + /* If flush != Z_NO_FLUSH && avail_out == 0, the next call + * of deflate should use the same flush parameter to make sure + * that the flush is complete. So we don't have to output an + * empty block here, this will be done at next call. This also + * ensures that for a very small output buffer, we emit at most + * one empty block. + */ + } + if (bstate == block_done) { + if (flush == Z_PARTIAL_FLUSH) { + _tr_align(s); + } else if (flush == Z_PACKET_FLUSH) { + /* Output just the 3-bit `stored' block type value, + but not a zero length. */ + _tr_stored_type_only(s); + } else { /* FULL_FLUSH or SYNC_FLUSH */ + _tr_stored_block(s, (char*)0, 0L, 0); + /* For a full flush, this empty block will be recognized + * as a special marker by inflate_sync(). + */ + if (flush == Z_FULL_FLUSH) { + CLEAR_HASH(s); /* forget history */ + } + } + flush_pending(strm); + if (strm->avail_out == 0) { + s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ + return Z_OK; + } + } + } + Assert(strm->avail_out > 0, "bug2"); + + if (flush != Z_FINISH) return Z_OK; + if (s->noheader) return Z_STREAM_END; + + /* Write the zlib trailer (adler32) */ + putShortMSB(s, (uInt)(strm->adler >> 16)); + putShortMSB(s, (uInt)(strm->adler & 0xffff)); + flush_pending(strm); + /* If avail_out is zero, the application will call deflate again + * to flush the rest. + */ + s->noheader = -1; /* write the trailer only once! */ + return s->pending != 0 ? Z_OK : Z_STREAM_END; +} + +/* ========================================================================= */ +int deflateEnd (strm) + z_streamp strm; +{ + int status; + deflate_state *s; + + if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; + s = (deflate_state *) strm->state; + + status = s->status; + if (status != INIT_STATE && status != BUSY_STATE && + status != FINISH_STATE) { + return Z_STREAM_ERROR; + } + + /* Deallocate in reverse order of allocations: */ + TRY_FREE(strm, s->pending_buf); + TRY_FREE(strm, s->head); + TRY_FREE(strm, s->prev); + TRY_FREE(strm, s->window); + + ZFREE(strm, s); + strm->state = Z_NULL; + + return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; +} + +/* ========================================================================= + * Copy the source state to the destination state. + */ +int deflateCopy (dest, source) + z_streamp dest; + z_streamp source; +{ + deflate_state *ds; + deflate_state *ss; + ushf *overlay; + + if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL) + return Z_STREAM_ERROR; + ss = (deflate_state *) source->state; + + zmemcpy(dest, source, sizeof(*dest)); + + ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); + if (ds == Z_NULL) return Z_MEM_ERROR; + dest->state = (struct internal_state FAR *) ds; + zmemcpy(ds, ss, sizeof(*ds)); + ds->strm = dest; + + ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); + ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); + ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); + overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); + ds->pending_buf = (uchf *) overlay; + + if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || + ds->pending_buf == Z_NULL) { + deflateEnd (dest); + return Z_MEM_ERROR; + } + /* ??? following zmemcpy doesn't work for 16-bit MSDOS */ + zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); + zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); + zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); + zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); + + ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); + ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); + ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; + + ds->l_desc.dyn_tree = ds->dyn_ltree; + ds->d_desc.dyn_tree = ds->dyn_dtree; + ds->bl_desc.dyn_tree = ds->bl_tree; + + return Z_OK; +} + +/* =========================================================================== + * Return the number of bytes of output which are immediately available + * for output from the decompressor. + */ +int deflateOutputPending (strm) + z_streamp strm; +{ + if (strm == Z_NULL || strm->state == Z_NULL) return 0; + + return ((deflate_state *)(strm->state))->pending; +} + +/* =========================================================================== + * Read a new buffer from the current input stream, update the adler32 + * and total number of bytes read. All deflate() input goes through + * this function so some applications may wish to modify it to avoid + * allocating a large strm->next_in buffer and copying from it. + * (See also flush_pending()). + */ +local int read_buf(strm, buf, size) + z_streamp strm; + charf *buf; + unsigned size; +{ + unsigned len = strm->avail_in; + + if (len > size) len = size; + if (len == 0) return 0; + + strm->avail_in -= len; + + if (!((deflate_state *)(strm->state))->noheader) { + strm->adler = adler32(strm->adler, strm->next_in, len); + } + zmemcpy(buf, strm->next_in, len); + strm->next_in += len; + strm->total_in += len; + + return (int)len; +} + +/* =========================================================================== + * Initialize the "longest match" routines for a new zlib stream + */ +local void lm_init (s) + deflate_state *s; +{ + s->window_size = (ulg)2L*s->w_size; + + CLEAR_HASH(s); + + /* Set the default configuration parameters: + */ + s->max_lazy_match = configuration_table[s->level].max_lazy; + s->good_match = configuration_table[s->level].good_length; + s->nice_match = configuration_table[s->level].nice_length; + s->max_chain_length = configuration_table[s->level].max_chain; + + s->strstart = 0; + s->block_start = 0L; + s->lookahead = 0; + s->match_length = s->prev_length = MIN_MATCH-1; + s->match_available = 0; + s->ins_h = 0; +#ifdef ASMV + match_init(); /* initialize the asm code */ +#endif +} + +/* =========================================================================== + * Set match_start to the longest match starting at the given string and + * return its length. Matches shorter or equal to prev_length are discarded, + * in which case the result is equal to prev_length and match_start is + * garbage. + * IN assertions: cur_match is the head of the hash chain for the current + * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 + * OUT assertion: the match length is not greater than s->lookahead. + */ +#ifndef ASMV +/* For 80x86 and 680x0, an optimized version will be provided in match.asm or + * match.S. The code will be functionally equivalent. + */ +local uInt longest_match(s, cur_match) + deflate_state *s; + IPos cur_match; /* current match */ +{ + unsigned chain_length = s->max_chain_length;/* max hash chain length */ + register Bytef *scan = s->window + s->strstart; /* current string */ + register Bytef *match; /* matched string */ + register int len; /* length of current match */ + int best_len = s->prev_length; /* best match length so far */ + int nice_match = s->nice_match; /* stop if match long enough */ + IPos limit = s->strstart > (IPos)MAX_DIST(s) ? + s->strstart - (IPos)MAX_DIST(s) : NIL; + /* Stop when cur_match becomes <= limit. To simplify the code, + * we prevent matches with the string of window index 0. + */ + Posf *prev = s->prev; + uInt wmask = s->w_mask; + +#ifdef UNALIGNED_OK + /* Compare two bytes at a time. Note: this is not always beneficial. + * Try with and without -DUNALIGNED_OK to check. + */ + register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; + register ush scan_start = *(ushf*)scan; + register ush scan_end = *(ushf*)(scan+best_len-1); +#else + register Bytef *strend = s->window + s->strstart + MAX_MATCH; + register Byte scan_end1 = scan[best_len-1]; + register Byte scan_end = scan[best_len]; +#endif + + /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. + * It is easy to get rid of this optimization if necessary. + */ + Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); + + /* Do not waste too much time if we already have a good match: */ + if (s->prev_length >= s->good_match) { + chain_length >>= 2; + } + /* Do not look for matches beyond the end of the input. This is necessary + * to make deflate deterministic. + */ + if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead; + + Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + + do { + Assert(cur_match < s->strstart, "no future"); + match = s->window + cur_match; + + /* Skip to next match if the match length cannot increase + * or if the match length is less than 2: + */ +#if (defined(UNALIGNED_OK) && MAX_MATCH == 258) + /* This code assumes sizeof(unsigned short) == 2. Do not use + * UNALIGNED_OK if your compiler uses a different size. + */ + if (*(ushf*)(match+best_len-1) != scan_end || + *(ushf*)match != scan_start) continue; + + /* It is not necessary to compare scan[2] and match[2] since they are + * always equal when the other bytes match, given that the hash keys + * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at + * strstart+3, +5, ... up to strstart+257. We check for insufficient + * lookahead only every 4th comparison; the 128th check will be made + * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is + * necessary to put more guard bytes at the end of the window, or + * to check more often for insufficient lookahead. + */ + Assert(scan[2] == match[2], "scan[2]?"); + scan++, match++; + do { + } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + *(ushf*)(scan+=2) == *(ushf*)(match+=2) && + scan < strend); + /* The funny "do {}" generates better code on most compilers */ + + /* Here, scan <= window+strstart+257 */ + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + if (*scan == *match) scan++; + + len = (MAX_MATCH - 1) - (int)(strend-scan); + scan = strend - (MAX_MATCH-1); + +#else /* UNALIGNED_OK */ + + if (match[best_len] != scan_end || + match[best_len-1] != scan_end1 || + *match != *scan || + *++match != scan[1]) continue; + + /* The check at best_len-1 can be removed because it will be made + * again later. (This heuristic is not always a win.) + * It is not necessary to compare scan[2] and match[2] since they + * are always equal when the other bytes match, given that + * the hash keys are equal and that HASH_BITS >= 8. + */ + scan += 2, match++; + Assert(*scan == *match, "match[2]?"); + + /* We check for insufficient lookahead only every 8th comparison; + * the 256th check will be made at strstart+258. + */ + do { + } while (*++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + *++scan == *++match && *++scan == *++match && + scan < strend); + + Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + + len = MAX_MATCH - (int)(strend - scan); + scan = strend - MAX_MATCH; + +#endif /* UNALIGNED_OK */ + + if (len > best_len) { + s->match_start = cur_match; + best_len = len; + if (len >= nice_match) break; +#ifdef UNALIGNED_OK + scan_end = *(ushf*)(scan+best_len-1); +#else + scan_end1 = scan[best_len-1]; + scan_end = scan[best_len]; +#endif + } + } while ((cur_match = prev[cur_match & wmask]) > limit + && --chain_length != 0); + + if ((uInt)best_len <= s->lookahead) return best_len; + return s->lookahead; +} +#endif /* ASMV */ + +#ifdef DEBUG_ZLIB +/* =========================================================================== + * Check that the match at match_start is indeed a match. + */ +local void check_match(s, start, match, length) + deflate_state *s; + IPos start, match; + int length; +{ + /* check that the match is indeed a match */ + if (zmemcmp((charf *)s->window + match, + (charf *)s->window + start, length) != EQUAL) { + fprintf(stderr, " start %u, match %u, length %d\n", + start, match, length); + do { + fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); + } while (--length != 0); + z_error("invalid match"); + } + if (z_verbose > 1) { + fprintf(stderr,"\\[%d,%d]", start-match, length); + do { putc(s->window[start++], stderr); } while (--length != 0); + } +} +#else +# define check_match(s, start, match, length) +#endif + +/* =========================================================================== + * Fill the window when the lookahead becomes insufficient. + * Updates strstart and lookahead. + * + * IN assertion: lookahead < MIN_LOOKAHEAD + * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD + * At least one byte has been read, or avail_in == 0; reads are + * performed for at least two bytes (required for the zip translate_eol + * option -- not supported here). + */ +local void fill_window(s) + deflate_state *s; +{ + register unsigned n, m; + register Posf *p; + unsigned more; /* Amount of free space at the end of the window. */ + uInt wsize = s->w_size; + + do { + more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); + + /* Deal with !@#$% 64K limit: */ + if (more == 0 && s->strstart == 0 && s->lookahead == 0) { + more = wsize; + + } else if (more == (unsigned)(-1)) { + /* Very unlikely, but possible on 16 bit machine if strstart == 0 + * and lookahead == 1 (input done one byte at time) + */ + more--; + + /* If the window is almost full and there is insufficient lookahead, + * move the upper half to the lower one to make room in the upper half. + */ + } else if (s->strstart >= wsize+MAX_DIST(s)) { + + zmemcpy((charf *)s->window, (charf *)s->window+wsize, + (unsigned)wsize); + s->match_start -= wsize; + s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ + s->block_start -= (long) wsize; + + /* Slide the hash table (could be avoided with 32 bit values + at the expense of memory usage). We slide even when level == 0 + to keep the hash table consistent if we switch back to level > 0 + later. (Using level 0 permanently is not an optimal usage of + zlib, so we don't care about this pathological case.) + */ + n = s->hash_size; + p = &s->head[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m-wsize : NIL); + } while (--n); + + n = wsize; + p = &s->prev[n]; + do { + m = *--p; + *p = (Pos)(m >= wsize ? m-wsize : NIL); + /* If n is not on any hash chain, prev[n] is garbage but + * its value will never be used. + */ + } while (--n); + more += wsize; + } + if (s->strm->avail_in == 0) return; + + /* If there was no sliding: + * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && + * more == window_size - lookahead - strstart + * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) + * => more >= window_size - 2*WSIZE + 2 + * In the BIG_MEM or MMAP case (not yet supported), + * window_size == input_size + MIN_LOOKAHEAD && + * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. + * Otherwise, window_size == 2*WSIZE so more >= 2. + * If there was sliding, more >= WSIZE. So in all cases, more >= 2. + */ + Assert(more >= 2, "more < 2"); + + n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead, + more); + s->lookahead += n; + + /* Initialize the hash value now that we have some input: */ + if (s->lookahead >= MIN_MATCH) { + s->ins_h = s->window[s->strstart]; + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + } + /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, + * but this is not important since only literal bytes will be emitted. + */ + + } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); +} + +/* =========================================================================== + * Flush the current block, with given end-of-file flag. + * IN assertion: strstart is set to the end of the current match. + */ +#define FLUSH_BLOCK_ONLY(s, eof) { \ + _tr_flush_block(s, (s->block_start >= 0L ? \ + (charf *)&s->window[(unsigned)s->block_start] : \ + (charf *)Z_NULL), \ + (ulg)((long)s->strstart - s->block_start), \ + (eof)); \ + s->block_start = s->strstart; \ + flush_pending(s->strm); \ + Tracev((stderr,"[FLUSH]")); \ +} + +/* Same but force premature exit if necessary. */ +#define FLUSH_BLOCK(s, eof) { \ + FLUSH_BLOCK_ONLY(s, eof); \ + if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \ +} + +/* =========================================================================== + * Copy without compression as much as possible from the input stream, return + * the current block state. + * This function does not insert new strings in the dictionary since + * uncompressible data is probably not useful. This function is used + * only for the level=0 compression option. + * NOTE: this function should be optimized to avoid extra copying from + * window to pending_buf. + */ +local block_state deflate_stored(s, flush) + deflate_state *s; + int flush; +{ + /* Stored blocks are limited to 0xffff bytes, pending_buf is limited + * to pending_buf_size, and each stored block has a 5 byte header: + */ + ulg max_block_size = 0xffff; + ulg max_start; + + if (max_block_size > s->pending_buf_size - 5) { + max_block_size = s->pending_buf_size - 5; + } + + /* Copy as much as possible from input to output: */ + for (;;) { + /* Fill the window as much as possible: */ + if (s->lookahead <= 1) { + + Assert(s->strstart < s->w_size+MAX_DIST(s) || + s->block_start >= (long)s->w_size, "slide too late"); + + fill_window(s); + if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more; + + if (s->lookahead == 0) break; /* flush the current block */ + } + Assert(s->block_start >= 0L, "block gone"); + + s->strstart += s->lookahead; + s->lookahead = 0; + + /* Emit a stored block if pending_buf will be full: */ + max_start = s->block_start + max_block_size; + if (s->strstart == 0 || (ulg)s->strstart >= max_start) { + /* strstart == 0 is possible when wraparound on 16-bit machine */ + s->lookahead = (uInt)(s->strstart - max_start); + s->strstart = (uInt)max_start; + FLUSH_BLOCK(s, 0); + } + /* Flush if we may have to slide, otherwise block_start may become + * negative and the data will be gone: + */ + if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { + FLUSH_BLOCK(s, 0); + } + } + FLUSH_BLOCK(s, flush == Z_FINISH); + return flush == Z_FINISH ? finish_done : block_done; +} + +/* =========================================================================== + * Compress as much as possible from the input stream, return the current + * block state. + * This function does not perform lazy evaluation of matches and inserts + * new strings in the dictionary only for unmatched strings or for short + * matches. It is used only for the fast compression options. + */ +local block_state deflate_fast(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head = NIL; /* head of the hash chain */ + int bflush; /* set if current block must be flushed */ + + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + * At this point we have always match_length < MIN_MATCH + */ + if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + if (s->strategy != Z_HUFFMAN_ONLY) { + s->match_length = longest_match (s, hash_head); + } + /* longest_match() sets match_start */ + } + if (s->match_length >= MIN_MATCH) { + check_match(s, s->strstart, s->match_start, s->match_length); + + bflush = _tr_tally(s, s->strstart - s->match_start, + s->match_length - MIN_MATCH); + + s->lookahead -= s->match_length; + + /* Insert new strings in the hash table only if the match length + * is not too large. This saves time but degrades compression. + */ + if (s->match_length <= s->max_insert_length && + s->lookahead >= MIN_MATCH) { + s->match_length--; /* string at strstart already in hash table */ + do { + s->strstart++; + INSERT_STRING(s, s->strstart, hash_head); + /* strstart never exceeds WSIZE-MAX_MATCH, so there are + * always MIN_MATCH bytes ahead. + */ + } while (--s->match_length != 0); + s->strstart++; + } else { + s->strstart += s->match_length; + s->match_length = 0; + s->ins_h = s->window[s->strstart]; + UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); +#if MIN_MATCH != 3 + Call UPDATE_HASH() MIN_MATCH-3 more times +#endif + /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not + * matter since it will be recomputed at next deflate call. + */ + } + } else { + /* No match, output a literal byte */ + Tracevv((stderr,"%c", s->window[s->strstart])); + bflush = _tr_tally (s, 0, s->window[s->strstart]); + s->lookahead--; + s->strstart++; + } + if (bflush) FLUSH_BLOCK(s, 0); + } + FLUSH_BLOCK(s, flush == Z_FINISH); + return flush == Z_FINISH ? finish_done : block_done; +} + +/* =========================================================================== + * Same as above, but achieves better compression. We use a lazy + * evaluation for matches: a match is finally adopted only if there is + * no better match at the next window position. + */ +local block_state deflate_slow(s, flush) + deflate_state *s; + int flush; +{ + IPos hash_head = NIL; /* head of hash chain */ + int bflush; /* set if current block must be flushed */ + + /* Process the input block. */ + for (;;) { + /* Make sure that we always have enough lookahead, except + * at the end of the input file. We need MAX_MATCH bytes + * for the next match, plus MIN_MATCH bytes to insert the + * string following the next match. + */ + if (s->lookahead < MIN_LOOKAHEAD) { + fill_window(s); + if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { + return need_more; + } + if (s->lookahead == 0) break; /* flush the current block */ + } + + /* Insert the string window[strstart .. strstart+2] in the + * dictionary, and set hash_head to the head of the hash chain: + */ + if (s->lookahead >= MIN_MATCH) { + INSERT_STRING(s, s->strstart, hash_head); + } + + /* Find the longest match, discarding those <= prev_length. + */ + s->prev_length = s->match_length, s->prev_match = s->match_start; + s->match_length = MIN_MATCH-1; + + if (hash_head != NIL && s->prev_length < s->max_lazy_match && + s->strstart - hash_head <= MAX_DIST(s)) { + /* To simplify the code, we prevent matches with the string + * of window index 0 (in particular we have to avoid a match + * of the string with itself at the start of the input file). + */ + if (s->strategy != Z_HUFFMAN_ONLY) { + s->match_length = longest_match (s, hash_head); + } + /* longest_match() sets match_start */ + + if (s->match_length <= 5 && (s->strategy == Z_FILTERED || + (s->match_length == MIN_MATCH && + s->strstart - s->match_start > TOO_FAR))) { + + /* If prev_match is also MIN_MATCH, match_start is garbage + * but we will ignore the current match anyway. + */ + s->match_length = MIN_MATCH-1; + } + } + /* If there was a match at the previous step and the current + * match is not better, output the previous match: + */ + if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { + uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; + /* Do not insert strings in hash table beyond this. */ + + check_match(s, s->strstart-1, s->prev_match, s->prev_length); + + bflush = _tr_tally(s, s->strstart -1 - s->prev_match, + s->prev_length - MIN_MATCH); + + /* Insert in hash table all strings up to the end of the match. + * strstart-1 and strstart are already inserted. If there is not + * enough lookahead, the last two strings are not inserted in + * the hash table. + */ + s->lookahead -= s->prev_length-1; + s->prev_length -= 2; + do { + if (++s->strstart <= max_insert) { + INSERT_STRING(s, s->strstart, hash_head); + } + } while (--s->prev_length != 0); + s->match_available = 0; + s->match_length = MIN_MATCH-1; + s->strstart++; + + if (bflush) FLUSH_BLOCK(s, 0); + + } else if (s->match_available) { + /* If there was no match at the previous position, output a + * single literal. If there was a match but the current match + * is longer, truncate the previous match to a single literal. + */ + Tracevv((stderr,"%c", s->window[s->strstart-1])); + if (_tr_tally (s, 0, s->window[s->strstart-1])) { + FLUSH_BLOCK_ONLY(s, 0); + } + s->strstart++; + s->lookahead--; + if (s->strm->avail_out == 0) return need_more; + } else { + /* There is no previous match to compare with, wait for + * the next step to decide. + */ + s->match_available = 1; + s->strstart++; + s->lookahead--; + } + } + Assert (flush != Z_NO_FLUSH, "no flush?"); + if (s->match_available) { + Tracevv((stderr,"%c", s->window[s->strstart-1])); + _tr_tally (s, 0, s->window[s->strstart-1]); + s->match_available = 0; + } + FLUSH_BLOCK(s, flush == Z_FINISH); + return flush == Z_FINISH ? finish_done : block_done; +} +/* --- deflate.c */ + +/* +++ trees.c */ +/* trees.c -- output deflated data using Huffman coding + * Copyright (C) 1995-1996 Jean-loup Gailly + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* + * ALGORITHM + * + * The "deflation" process uses several Huffman trees. The more + * common source values are represented by shorter bit sequences. + * + * Each code tree is stored in a compressed form which is itself + * a Huffman encoding of the lengths of all the code strings (in + * ascending order by source values). The actual code strings are + * reconstructed from the lengths in the inflate process, as described + * in the deflate specification. + * + * REFERENCES + * + * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". + * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc + * + * Storer, James A. + * Data Compression: Methods and Theory, pp. 49-50. + * Computer Science Press, 1988. ISBN 0-7167-8156-5. + * + * Sedgewick, R. + * Algorithms, p290. + * Addison-Wesley, 1983. ISBN 0-201-06672-6. + */ + +/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */ + +/* #include "deflate.h" */ + +#ifdef DEBUG_ZLIB +# include +#endif + +/* =========================================================================== + * Constants + */ + +#define MAX_BL_BITS 7 +/* Bit length codes must not exceed MAX_BL_BITS bits */ + +#define END_BLOCK 256 +/* end of block literal code */ + +#define REP_3_6 16 +/* repeat previous bit length 3-6 times (2 bits of repeat count) */ + +#define REPZ_3_10 17 +/* repeat a zero length 3-10 times (3 bits of repeat count) */ + +#define REPZ_11_138 18 +/* repeat a zero length 11-138 times (7 bits of repeat count) */ + +local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ + = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; + +local int extra_dbits[D_CODES] /* extra bits for each distance code */ + = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +local int extra_blbits[BL_CODES]/* extra bits for each bit length code */ + = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; + +local uch bl_order[BL_CODES] + = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; +/* The lengths of the bit length codes are sent in order of decreasing + * probability, to avoid transmitting the lengths for unused bit length codes. + */ + +#define Buf_size (8 * 2*sizeof(char)) +/* Number of bits used within bi_buf. (bi_buf might be implemented on + * more than 16 bits on some systems.) + */ + +/* =========================================================================== + * Local data. These are initialized only once. + */ + +local ct_data static_ltree[L_CODES+2]; +/* The static literal tree. Since the bit lengths are imposed, there is no + * need for the L_CODES extra codes used during heap construction. However + * The codes 286 and 287 are needed to build a canonical tree (see _tr_init + * below). + */ + +local ct_data static_dtree[D_CODES]; +/* The static distance tree. (Actually a trivial tree since all codes use + * 5 bits.) + */ + +local uch dist_code[512]; +/* distance codes. The first 256 values correspond to the distances + * 3 .. 258, the last 256 values correspond to the top 8 bits of + * the 15 bit distances. + */ + +local uch length_code[MAX_MATCH-MIN_MATCH+1]; +/* length code for each normalized match length (0 == MIN_MATCH) */ + +local int base_length[LENGTH_CODES]; +/* First normalized length for each code (0 = MIN_MATCH) */ + +local int base_dist[D_CODES]; +/* First normalized distance for each code (0 = distance of 1) */ + +struct static_tree_desc_s { + ct_data *static_tree; /* static tree or NULL */ + intf *extra_bits; /* extra bits for each code or NULL */ + int extra_base; /* base index for extra_bits */ + int elems; /* max number of elements in the tree */ + int max_length; /* max bit length for the codes */ +}; + +local static_tree_desc static_l_desc = +{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; + +local static_tree_desc static_d_desc = +{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; + +local static_tree_desc static_bl_desc = +{(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; + +/* =========================================================================== + * Local (static) routines in this file. + */ + +local void tr_static_init OF((void)); +local void init_block OF((deflate_state *s)); +local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); +local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); +local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); +local void build_tree OF((deflate_state *s, tree_desc *desc)); +local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); +local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); +local int build_bl_tree OF((deflate_state *s)); +local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, + int blcodes)); +local void compress_block OF((deflate_state *s, ct_data *ltree, + ct_data *dtree)); +local void set_data_type OF((deflate_state *s)); +local unsigned bi_reverse OF((unsigned value, int length)); +local void bi_windup OF((deflate_state *s)); +local void bi_flush OF((deflate_state *s)); +local void copy_block OF((deflate_state *s, charf *buf, unsigned len, + int header)); + +#ifndef DEBUG_ZLIB +# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) + /* Send a code of the given tree. c and tree must not have side effects */ + +#else /* DEBUG_ZLIB */ +# define send_code(s, c, tree) \ + { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ + send_bits(s, tree[c].Code, tree[c].Len); } +#endif + +#define d_code(dist) \ + ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)]) +/* Mapping from a distance to a distance code. dist is the distance - 1 and + * must not have side effects. dist_code[256] and dist_code[257] are never + * used. + */ + +/* =========================================================================== + * Output a short LSB first on the stream. + * IN assertion: there is enough room in pendingBuf. + */ +#define put_short(s, w) { \ + put_byte(s, (uch)((w) & 0xff)); \ + put_byte(s, (uch)((ush)(w) >> 8)); \ +} + +/* =========================================================================== + * Send a value on a given number of bits. + * IN assertion: length <= 16 and value fits in length bits. + */ +#ifdef DEBUG_ZLIB +local void send_bits OF((deflate_state *s, int value, int length)); + +local void send_bits(s, value, length) + deflate_state *s; + int value; /* value to send */ + int length; /* number of bits */ +{ + Tracevv((stderr," l %2d v %4x ", length, value)); + Assert(length > 0 && length <= 15, "invalid length"); + s->bits_sent += (ulg)length; + + /* If not enough room in bi_buf, use (valid) bits from bi_buf and + * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) + * unused bits in value. + */ + if (s->bi_valid > (int)Buf_size - length) { + s->bi_buf |= (value << s->bi_valid); + put_short(s, s->bi_buf); + s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); + s->bi_valid += length - Buf_size; + } else { + s->bi_buf |= value << s->bi_valid; + s->bi_valid += length; + } +} +#else /* !DEBUG_ZLIB */ + +#define send_bits(s, value, length) \ +{ int len = length;\ + if (s->bi_valid > (int)Buf_size - len) {\ + int val = value;\ + s->bi_buf |= (val << s->bi_valid);\ + put_short(s, s->bi_buf);\ + s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ + s->bi_valid += len - Buf_size;\ + } else {\ + s->bi_buf |= (value) << s->bi_valid;\ + s->bi_valid += len;\ + }\ +} +#endif /* DEBUG_ZLIB */ + + +#define MAX(a,b) (a >= b ? a : b) +/* the arguments must not have side effects */ + +/* =========================================================================== + * Initialize the various 'constant' tables. In a multi-threaded environment, + * this function may be called by two threads concurrently, but this is + * harmless since both invocations do exactly the same thing. + */ +local void tr_static_init() +{ + static int static_init_done = 0; + int n; /* iterates over tree elements */ + int bits; /* bit counter */ + int length; /* length value */ + int code; /* code value */ + int dist; /* distance index */ + ush bl_count[MAX_BITS+1]; + /* number of codes at each bit length for an optimal tree */ + + if (static_init_done) return; + + /* Initialize the mapping length (0..255) -> length code (0..28) */ + length = 0; + for (code = 0; code < LENGTH_CODES-1; code++) { + base_length[code] = length; + for (n = 0; n < (1< dist code (0..29) */ + dist = 0; + for (code = 0 ; code < 16; code++) { + base_dist[code] = dist; + for (n = 0; n < (1<>= 7; /* from now on, all distances are divided by 128 */ + for ( ; code < D_CODES; code++) { + base_dist[code] = dist << 7; + for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { + dist_code[256 + dist++] = (uch)code; + } + } + Assert (dist == 256, "tr_static_init: 256+dist != 512"); + + /* Construct the codes of the static literal tree */ + for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; + n = 0; + while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; + while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; + while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; + while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; + /* Codes 286 and 287 do not exist, but we must include them in the + * tree construction to get a canonical Huffman tree (longest code + * all ones) + */ + gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); + + /* The static distance tree is trivial: */ + for (n = 0; n < D_CODES; n++) { + static_dtree[n].Len = 5; + static_dtree[n].Code = bi_reverse((unsigned)n, 5); + } + static_init_done = 1; +} + +/* =========================================================================== + * Initialize the tree data structures for a new zlib stream. + */ +void _tr_init(s) + deflate_state *s; +{ + tr_static_init(); + + s->compressed_len = 0L; + + s->l_desc.dyn_tree = s->dyn_ltree; + s->l_desc.stat_desc = &static_l_desc; + + s->d_desc.dyn_tree = s->dyn_dtree; + s->d_desc.stat_desc = &static_d_desc; + + s->bl_desc.dyn_tree = s->bl_tree; + s->bl_desc.stat_desc = &static_bl_desc; + + s->bi_buf = 0; + s->bi_valid = 0; + s->last_eob_len = 8; /* enough lookahead for inflate */ +#ifdef DEBUG_ZLIB + s->bits_sent = 0L; +#endif + + /* Initialize the first block of the first file: */ + init_block(s); +} + +/* =========================================================================== + * Initialize a new block. + */ +local void init_block(s) + deflate_state *s; +{ + int n; /* iterates over tree elements */ + + /* Initialize the trees. */ + for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; + for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; + for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; + + s->dyn_ltree[END_BLOCK].Freq = 1; + s->opt_len = s->static_len = 0L; + s->last_lit = s->matches = 0; +} + +#define SMALLEST 1 +/* Index within the heap array of least frequent node in the Huffman tree */ + + +/* =========================================================================== + * Remove the smallest element from the heap and recreate the heap with + * one less element. Updates heap and heap_len. + */ +#define pqremove(s, tree, top) \ +{\ + top = s->heap[SMALLEST]; \ + s->heap[SMALLEST] = s->heap[s->heap_len--]; \ + pqdownheap(s, tree, SMALLEST); \ +} + +/* =========================================================================== + * Compares to subtrees, using the tree depth as tie breaker when + * the subtrees have equal frequency. This minimizes the worst case length. + */ +#define smaller(tree, n, m, depth) \ + (tree[n].Freq < tree[m].Freq || \ + (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) + +/* =========================================================================== + * Restore the heap property by moving down the tree starting at node k, + * exchanging a node with the smallest of its two sons if necessary, stopping + * when the heap property is re-established (each father smaller than its + * two sons). + */ +local void pqdownheap(s, tree, k) + deflate_state *s; + ct_data *tree; /* the tree to restore */ + int k; /* node to move down */ +{ + int v = s->heap[k]; + int j = k << 1; /* left son of k */ + while (j <= s->heap_len) { + /* Set j to the smallest of the two sons: */ + if (j < s->heap_len && + smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { + j++; + } + /* Exit if v is smaller than both sons */ + if (smaller(tree, v, s->heap[j], s->depth)) break; + + /* Exchange v with the smallest son */ + s->heap[k] = s->heap[j]; k = j; + + /* And continue down the tree, setting j to the left son of k */ + j <<= 1; + } + s->heap[k] = v; +} + +/* =========================================================================== + * Compute the optimal bit lengths for a tree and update the total bit length + * for the current block. + * IN assertion: the fields freq and dad are set, heap[heap_max] and + * above are the tree nodes sorted by increasing frequency. + * OUT assertions: the field len is set to the optimal bit length, the + * array bl_count contains the frequencies for each bit length. + * The length opt_len is updated; static_len is also updated if stree is + * not null. + */ +local void gen_bitlen(s, desc) + deflate_state *s; + tree_desc *desc; /* the tree descriptor */ +{ + ct_data *tree = desc->dyn_tree; + int max_code = desc->max_code; + ct_data *stree = desc->stat_desc->static_tree; + intf *extra = desc->stat_desc->extra_bits; + int base = desc->stat_desc->extra_base; + int max_length = desc->stat_desc->max_length; + int h; /* heap index */ + int n, m; /* iterate over the tree elements */ + int bits; /* bit length */ + int xbits; /* extra bits */ + ush f; /* frequency */ + int overflow = 0; /* number of elements with bit length too large */ + + for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; + + /* In a first pass, compute the optimal bit lengths (which may + * overflow in the case of the bit length tree). + */ + tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ + + for (h = s->heap_max+1; h < HEAP_SIZE; h++) { + n = s->heap[h]; + bits = tree[tree[n].Dad].Len + 1; + if (bits > max_length) bits = max_length, overflow++; + tree[n].Len = (ush)bits; + /* We overwrite tree[n].Dad which is no longer needed */ + + if (n > max_code) continue; /* not a leaf node */ + + s->bl_count[bits]++; + xbits = 0; + if (n >= base) xbits = extra[n-base]; + f = tree[n].Freq; + s->opt_len += (ulg)f * (bits + xbits); + if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits); + } + if (overflow == 0) return; + + Trace((stderr,"\nbit length overflow\n")); + /* This happens for example on obj2 and pic of the Calgary corpus */ + + /* Find the first bit length which could increase: */ + do { + bits = max_length-1; + while (s->bl_count[bits] == 0) bits--; + s->bl_count[bits]--; /* move one leaf down the tree */ + s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ + s->bl_count[max_length]--; + /* The brother of the overflow item also moves one step up, + * but this does not affect bl_count[max_length] + */ + overflow -= 2; + } while (overflow > 0); + + /* Now recompute all bit lengths, scanning in increasing frequency. + * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all + * lengths instead of fixing only the wrong ones. This idea is taken + * from 'ar' written by Haruhiko Okumura.) + */ + for (bits = max_length; bits != 0; bits--) { + n = s->bl_count[bits]; + while (n != 0) { + m = s->heap[--h]; + if (m > max_code) continue; + if (tree[m].Len != (unsigned) bits) { + Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); + s->opt_len += ((long)bits - (long)tree[m].Len) + *(long)tree[m].Freq; + tree[m].Len = (ush)bits; + } + n--; + } + } +} + +/* =========================================================================== + * Generate the codes for a given tree and bit counts (which need not be + * optimal). + * IN assertion: the array bl_count contains the bit length statistics for + * the given tree and the field len is set for all tree elements. + * OUT assertion: the field code is set for all tree elements of non + * zero code length. + */ +local void gen_codes (tree, max_code, bl_count) + ct_data *tree; /* the tree to decorate */ + int max_code; /* largest code with non zero frequency */ + ushf *bl_count; /* number of codes at each bit length */ +{ + ush next_code[MAX_BITS+1]; /* next code value for each bit length */ + ush code = 0; /* running code value */ + int bits; /* bit index */ + int n; /* code index */ + + /* The distribution counts are first used to generate the code values + * without bit reversal. + */ + for (bits = 1; bits <= MAX_BITS; bits++) { + next_code[bits] = code = (code + bl_count[bits-1]) << 1; + } + /* Check that the bit counts in bl_count are consistent. The last code + * must be all ones. + */ + Assert (code + bl_count[MAX_BITS]-1 == (1<dyn_tree; + ct_data *stree = desc->stat_desc->static_tree; + int elems = desc->stat_desc->elems; + int n, m; /* iterate over heap elements */ + int max_code = -1; /* largest code with non zero frequency */ + int node; /* new node being created */ + + /* Construct the initial heap, with least frequent element in + * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. + * heap[0] is not used. + */ + s->heap_len = 0, s->heap_max = HEAP_SIZE; + + for (n = 0; n < elems; n++) { + if (tree[n].Freq != 0) { + s->heap[++(s->heap_len)] = max_code = n; + s->depth[n] = 0; + } else { + tree[n].Len = 0; + } + } + + /* The pkzip format requires that at least one distance code exists, + * and that at least one bit should be sent even if there is only one + * possible code. So to avoid special checks later on we force at least + * two codes of non zero frequency. + */ + while (s->heap_len < 2) { + node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); + tree[node].Freq = 1; + s->depth[node] = 0; + s->opt_len--; if (stree) s->static_len -= stree[node].Len; + /* node is 0 or 1 so it does not have extra bits */ + } + desc->max_code = max_code; + + /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, + * establish sub-heaps of increasing lengths: + */ + for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); + + /* Construct the Huffman tree by repeatedly combining the least two + * frequent nodes. + */ + node = elems; /* next internal node of the tree */ + do { + pqremove(s, tree, n); /* n = node of least frequency */ + m = s->heap[SMALLEST]; /* m = node of next least frequency */ + + s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ + s->heap[--(s->heap_max)] = m; + + /* Create a new node father of n and m */ + tree[node].Freq = tree[n].Freq + tree[m].Freq; + s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1); + tree[n].Dad = tree[m].Dad = (ush)node; +#ifdef DUMP_BL_TREE + if (tree == s->bl_tree) { + fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", + node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); + } +#endif + /* and insert the new node in the heap */ + s->heap[SMALLEST] = node++; + pqdownheap(s, tree, SMALLEST); + + } while (s->heap_len >= 2); + + s->heap[--(s->heap_max)] = s->heap[SMALLEST]; + + /* At this point, the fields freq and dad are set. We can now + * generate the bit lengths. + */ + gen_bitlen(s, (tree_desc *)desc); + + /* The field len is now set, we can generate the bit codes */ + gen_codes ((ct_data *)tree, max_code, s->bl_count); +} + +/* =========================================================================== + * Scan a literal or distance tree to determine the frequencies of the codes + * in the bit length tree. + */ +local void scan_tree (s, tree, max_code) + deflate_state *s; + ct_data *tree; /* the tree to be scanned */ + int max_code; /* and its largest code of non zero frequency */ +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + + if (nextlen == 0) max_count = 138, min_count = 3; + tree[max_code+1].Len = (ush)0xffff; /* guard */ + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; nextlen = tree[n+1].Len; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + s->bl_tree[curlen].Freq += count; + } else if (curlen != 0) { + if (curlen != prevlen) s->bl_tree[curlen].Freq++; + s->bl_tree[REP_3_6].Freq++; + } else if (count <= 10) { + s->bl_tree[REPZ_3_10].Freq++; + } else { + s->bl_tree[REPZ_11_138].Freq++; + } + count = 0; prevlen = curlen; + if (nextlen == 0) { + max_count = 138, min_count = 3; + } else if (curlen == nextlen) { + max_count = 6, min_count = 3; + } else { + max_count = 7, min_count = 4; + } + } +} + +/* =========================================================================== + * Send a literal or distance tree in compressed form, using the codes in + * bl_tree. + */ +local void send_tree (s, tree, max_code) + deflate_state *s; + ct_data *tree; /* the tree to be scanned */ + int max_code; /* and its largest code of non zero frequency */ +{ + int n; /* iterates over all tree elements */ + int prevlen = -1; /* last emitted length */ + int curlen; /* length of current code */ + int nextlen = tree[0].Len; /* length of next code */ + int count = 0; /* repeat count of the current code */ + int max_count = 7; /* max repeat count */ + int min_count = 4; /* min repeat count */ + + /* tree[max_code+1].Len = -1; */ /* guard already set */ + if (nextlen == 0) max_count = 138, min_count = 3; + + for (n = 0; n <= max_code; n++) { + curlen = nextlen; nextlen = tree[n+1].Len; + if (++count < max_count && curlen == nextlen) { + continue; + } else if (count < min_count) { + do { send_code(s, curlen, s->bl_tree); } while (--count != 0); + + } else if (curlen != 0) { + if (curlen != prevlen) { + send_code(s, curlen, s->bl_tree); count--; + } + Assert(count >= 3 && count <= 6, " 3_6?"); + send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); + + } else if (count <= 10) { + send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); + + } else { + send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); + } + count = 0; prevlen = curlen; + if (nextlen == 0) { + max_count = 138, min_count = 3; + } else if (curlen == nextlen) { + max_count = 6, min_count = 3; + } else { + max_count = 7, min_count = 4; + } + } +} + +/* =========================================================================== + * Construct the Huffman tree for the bit lengths and return the index in + * bl_order of the last bit length code to send. + */ +local int build_bl_tree(s) + deflate_state *s; +{ + int max_blindex; /* index of last bit length code of non zero freq */ + + /* Determine the bit length frequencies for literal and distance trees */ + scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); + scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); + + /* Build the bit length tree: */ + build_tree(s, (tree_desc *)(&(s->bl_desc))); + /* opt_len now includes the length of the tree representations, except + * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + */ + + /* Determine the number of bit length codes to send. The pkzip format + * requires that at least 4 bit length codes be sent. (appnote.txt says + * 3 but the actual value used is 4.) + */ + for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { + if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; + } + /* Update opt_len to include the bit length tree and counts */ + s->opt_len += 3*(max_blindex+1) + 5+5+4; + Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", + s->opt_len, s->static_len)); + + return max_blindex; +} + +/* =========================================================================== + * Send the header for a block using dynamic Huffman trees: the counts, the + * lengths of the bit length codes, the literal tree and the distance tree. + * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. + */ +local void send_all_trees(s, lcodes, dcodes, blcodes) + deflate_state *s; + int lcodes, dcodes, blcodes; /* number of codes for each tree */ +{ + int rank; /* index in bl_order */ + + Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); + Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, + "too many codes"); + Tracev((stderr, "\nbl counts: ")); + send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ + send_bits(s, dcodes-1, 5); + send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ + for (rank = 0; rank < blcodes; rank++) { + Tracev((stderr, "\nbl code %2d ", bl_order[rank])); + send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); + } + Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); + + send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ + Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); + + send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ + Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); +} + +/* =========================================================================== + * Send a stored block + */ +void _tr_stored_block(s, buf, stored_len, eof) + deflate_state *s; + charf *buf; /* input block */ + ulg stored_len; /* length of input block */ + int eof; /* true if this is the last block for a file */ +{ + send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */ + s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; + s->compressed_len += (stored_len + 4) << 3; + + copy_block(s, buf, (unsigned)stored_len, 1); /* with header */ +} + +/* Send just the `stored block' type code without any length bytes or data. + */ +void _tr_stored_type_only(s) + deflate_state *s; +{ + send_bits(s, (STORED_BLOCK << 1), 3); + bi_windup(s); + s->compressed_len = (s->compressed_len + 3) & ~7L; +} + + +/* =========================================================================== + * Send one empty static block to give enough lookahead for inflate. + * This takes 10 bits, of which 7 may remain in the bit buffer. + * The current inflate code requires 9 bits of lookahead. If the + * last two codes for the previous block (real code plus EOB) were coded + * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode + * the last real code. In this case we send two empty static blocks instead + * of one. (There are no problems if the previous block is stored or fixed.) + * To simplify the code, we assume the worst case of last real code encoded + * on one bit only. + */ +void _tr_align(s) + deflate_state *s; +{ + send_bits(s, STATIC_TREES<<1, 3); + send_code(s, END_BLOCK, static_ltree); + s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ + bi_flush(s); + /* Of the 10 bits for the empty block, we have already sent + * (10 - bi_valid) bits. The lookahead for the last real code (before + * the EOB of the previous block) was thus at least one plus the length + * of the EOB plus what we have just sent of the empty static block. + */ + if (1 + s->last_eob_len + 10 - s->bi_valid < 9) { + send_bits(s, STATIC_TREES<<1, 3); + send_code(s, END_BLOCK, static_ltree); + s->compressed_len += 10L; + bi_flush(s); + } + s->last_eob_len = 7; +} + +/* =========================================================================== + * Determine the best encoding for the current block: dynamic trees, static + * trees or store, and output the encoded block to the zip file. This function + * returns the total compressed length for the file so far. + */ +ulg _tr_flush_block(s, buf, stored_len, eof) + deflate_state *s; + charf *buf; /* input block, or NULL if too old */ + ulg stored_len; /* length of input block */ + int eof; /* true if this is the last block for a file */ +{ + ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ + int max_blindex = 0; /* index of last bit length code of non zero freq */ + + /* Build the Huffman trees unless a stored block is forced */ + if (s->level > 0) { + + /* Check if the file is ascii or binary */ + if (s->data_type == Z_UNKNOWN) set_data_type(s); + + /* Construct the literal and distance trees */ + build_tree(s, (tree_desc *)(&(s->l_desc))); + Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, + s->static_len)); + + build_tree(s, (tree_desc *)(&(s->d_desc))); + Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, + s->static_len)); + /* At this point, opt_len and static_len are the total bit lengths of + * the compressed block data, excluding the tree representations. + */ + + /* Build the bit length tree for the above two trees, and get the index + * in bl_order of the last bit length code to send. + */ + max_blindex = build_bl_tree(s); + + /* Determine the best encoding. Compute first the block length in bytes*/ + opt_lenb = (s->opt_len+3+7)>>3; + static_lenb = (s->static_len+3+7)>>3; + + Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", + opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, + s->last_lit)); + + if (static_lenb <= opt_lenb) opt_lenb = static_lenb; + + } else { + Assert(buf != (char*)0, "lost buf"); + opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ + } + + /* If compression failed and this is the first and last block, + * and if the .zip file can be seeked (to rewrite the local header), + * the whole file is transformed into a stored file: + */ +#ifdef STORED_FILE_OK +# ifdef FORCE_STORED_FILE + if (eof && s->compressed_len == 0L) { /* force stored file */ +# else + if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) { +# endif + /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */ + if (buf == (charf*)0) error ("block vanished"); + + copy_block(s, buf, (unsigned)stored_len, 0); /* without header */ + s->compressed_len = stored_len << 3; + s->method = STORED; + } else +#endif /* STORED_FILE_OK */ + +#ifdef FORCE_STORED + if (buf != (char*)0) { /* force stored block */ +#else + if (stored_len+4 <= opt_lenb && buf != (char*)0) { + /* 4: two words for the lengths */ +#endif + /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. + * Otherwise we can't have processed more than WSIZE input bytes since + * the last block flush, because compression would have been + * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to + * transform a block into a stored block. + */ + _tr_stored_block(s, buf, stored_len, eof); + +#ifdef FORCE_STATIC + } else if (static_lenb >= 0) { /* force static trees */ +#else + } else if (static_lenb == opt_lenb) { +#endif + send_bits(s, (STATIC_TREES<<1)+eof, 3); + compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree); + s->compressed_len += 3 + s->static_len; + } else { + send_bits(s, (DYN_TREES<<1)+eof, 3); + send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, + max_blindex+1); + compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree); + s->compressed_len += 3 + s->opt_len; + } + Assert (s->compressed_len == s->bits_sent, "bad compressed size"); + init_block(s); + + if (eof) { + bi_windup(s); + s->compressed_len += 7; /* align on byte boundary */ + } + Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, + s->compressed_len-7*eof)); + + return s->compressed_len >> 3; +} + +/* =========================================================================== + * Save the match info and tally the frequency counts. Return true if + * the current block must be flushed. + */ +int _tr_tally (s, dist, lc) + deflate_state *s; + unsigned dist; /* distance of matched string */ + unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ +{ + s->d_buf[s->last_lit] = (ush)dist; + s->l_buf[s->last_lit++] = (uch)lc; + if (dist == 0) { + /* lc is the unmatched char */ + s->dyn_ltree[lc].Freq++; + } else { + s->matches++; + /* Here, lc is the match length - MIN_MATCH */ + dist--; /* dist = match distance - 1 */ + Assert((ush)dist < (ush)MAX_DIST(s) && + (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && + (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); + + s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++; + s->dyn_dtree[d_code(dist)].Freq++; + } + + /* Try to guess if it is profitable to stop the current block here */ + if (s->level > 2 && (s->last_lit & 0xfff) == 0) { + /* Compute an upper bound for the compressed length */ + ulg out_length = (ulg)s->last_lit*8L; + ulg in_length = (ulg)((long)s->strstart - s->block_start); + int dcode; + for (dcode = 0; dcode < D_CODES; dcode++) { + out_length += (ulg)s->dyn_dtree[dcode].Freq * + (5L+extra_dbits[dcode]); + } + out_length >>= 3; + Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", + s->last_lit, in_length, out_length, + 100L - out_length*100L/in_length)); + if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; + } + return (s->last_lit == s->lit_bufsize-1); + /* We avoid equality with lit_bufsize because of wraparound at 64K + * on 16 bit machines and because stored blocks are restricted to + * 64K-1 bytes. + */ +} + +/* =========================================================================== + * Send the block data compressed using the given Huffman trees + */ +local void compress_block(s, ltree, dtree) + deflate_state *s; + ct_data *ltree; /* literal tree */ + ct_data *dtree; /* distance tree */ +{ + unsigned dist; /* distance of matched string */ + int lc; /* match length or unmatched char (if dist == 0) */ + unsigned lx = 0; /* running index in l_buf */ + unsigned code; /* the code to send */ + int extra; /* number of extra bits to send */ + + if (s->last_lit != 0) do { + dist = s->d_buf[lx]; + lc = s->l_buf[lx++]; + if (dist == 0) { + send_code(s, lc, ltree); /* send a literal byte */ + Tracecv(isgraph(lc), (stderr," '%c' ", lc)); + } else { + /* Here, lc is the match length - MIN_MATCH */ + code = length_code[lc]; + send_code(s, code+LITERALS+1, ltree); /* send the length code */ + extra = extra_lbits[code]; + if (extra != 0) { + lc -= base_length[code]; + send_bits(s, lc, extra); /* send the extra length bits */ + } + dist--; /* dist is now the match distance - 1 */ + code = d_code(dist); + Assert (code < D_CODES, "bad d_code"); + + send_code(s, code, dtree); /* send the distance code */ + extra = extra_dbits[code]; + if (extra != 0) { + dist -= base_dist[code]; + send_bits(s, dist, extra); /* send the extra distance bits */ + } + } /* literal or match pair ? */ + + /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ + Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow"); + + } while (lx < s->last_lit); + + send_code(s, END_BLOCK, ltree); + s->last_eob_len = ltree[END_BLOCK].Len; +} + +/* =========================================================================== + * Set the data type to ASCII or BINARY, using a crude approximation: + * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise. + * IN assertion: the fields freq of dyn_ltree are set and the total of all + * frequencies does not exceed 64K (to fit in an int on 16 bit machines). + */ +local void set_data_type(s) + deflate_state *s; +{ + int n = 0; + unsigned ascii_freq = 0; + unsigned bin_freq = 0; + while (n < 7) bin_freq += s->dyn_ltree[n++].Freq; + while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq; + while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq; + s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII); +} + +/* =========================================================================== + * Reverse the first len bits of a code, using straightforward code (a faster + * method would use a table) + * IN assertion: 1 <= len <= 15 + */ +local unsigned bi_reverse(code, len) + unsigned code; /* the value to invert */ + int len; /* its bit length */ +{ + register unsigned res = 0; + do { + res |= code & 1; + code >>= 1, res <<= 1; + } while (--len > 0); + return res >> 1; +} + +/* =========================================================================== + * Flush the bit buffer, keeping at most 7 bits in it. + */ +local void bi_flush(s) + deflate_state *s; +{ + if (s->bi_valid == 16) { + put_short(s, s->bi_buf); + s->bi_buf = 0; + s->bi_valid = 0; + } else if (s->bi_valid >= 8) { + put_byte(s, (Byte)s->bi_buf); + s->bi_buf >>= 8; + s->bi_valid -= 8; + } +} + +/* =========================================================================== + * Flush the bit buffer and align the output on a byte boundary + */ +local void bi_windup(s) + deflate_state *s; +{ + if (s->bi_valid > 8) { + put_short(s, s->bi_buf); + } else if (s->bi_valid > 0) { + put_byte(s, (Byte)s->bi_buf); + } + s->bi_buf = 0; + s->bi_valid = 0; +#ifdef DEBUG_ZLIB + s->bits_sent = (s->bits_sent+7) & ~7; +#endif +} + +/* =========================================================================== + * Copy a stored block, storing first the length and its + * one's complement if requested. + */ +local void copy_block(s, buf, len, header) + deflate_state *s; + charf *buf; /* the input data */ + unsigned len; /* its length */ + int header; /* true if block header must be written */ +{ + bi_windup(s); /* align on byte boundary */ + s->last_eob_len = 8; /* enough lookahead for inflate */ + + if (header) { + put_short(s, (ush)len); + put_short(s, (ush)~len); +#ifdef DEBUG_ZLIB + s->bits_sent += 2*16; +#endif + } +#ifdef DEBUG_ZLIB + s->bits_sent += (ulg)len<<3; +#endif + /* bundle up the put_byte(s, *buf++) calls */ + zmemcpy(&s->pending_buf[s->pending], buf, len); + s->pending += len; +} +/* --- trees.c */ + +/* +++ inflate.c */ +/* inflate.c -- zlib interface to inflate modules + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* #include "zutil.h" */ + +/* +++ infblock.h */ +/* infblock.h -- header to use infblock.c + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +struct inflate_blocks_state; +typedef struct inflate_blocks_state FAR inflate_blocks_statef; + +extern inflate_blocks_statef * inflate_blocks_new OF(( + z_streamp z, + check_func c, /* check function */ + uInt w)); /* window size */ + +extern int inflate_blocks OF(( + inflate_blocks_statef *, + z_streamp , + int)); /* initial return code */ + +extern void inflate_blocks_reset OF(( + inflate_blocks_statef *, + z_streamp , + uLongf *)); /* check value on output */ + +extern int inflate_blocks_free OF(( + inflate_blocks_statef *, + z_streamp , + uLongf *)); /* check value on output */ + +extern void inflate_set_dictionary OF(( + inflate_blocks_statef *s, + const Bytef *d, /* dictionary */ + uInt n)); /* dictionary length */ + +extern int inflate_addhistory OF(( + inflate_blocks_statef *, + z_streamp)); + +extern int inflate_packet_flush OF(( + inflate_blocks_statef *)); +/* --- infblock.h */ + +#ifndef NO_DUMMY_DECL +struct inflate_blocks_state {int dummy;}; /* for buggy compilers */ +#endif + +/* inflate private state */ +struct internal_state { + + /* mode */ + enum { + METHOD, /* waiting for method byte */ + FLAG, /* waiting for flag byte */ + DICT4, /* four dictionary check bytes to go */ + DICT3, /* three dictionary check bytes to go */ + DICT2, /* two dictionary check bytes to go */ + DICT1, /* one dictionary check byte to go */ + DICT0, /* waiting for inflateSetDictionary */ + BLOCKS, /* decompressing blocks */ + CHECK4, /* four check bytes to go */ + CHECK3, /* three check bytes to go */ + CHECK2, /* two check bytes to go */ + CHECK1, /* one check byte to go */ + DONE, /* finished check, done */ + BAD} /* got an error--stay here */ + mode; /* current inflate mode */ + + /* mode dependent information */ + union { + uInt method; /* if FLAGS, method byte */ + struct { + uLong was; /* computed check value */ + uLong need; /* stream check value */ + } check; /* if CHECK, check values to compare */ + uInt marker; /* if BAD, inflateSync's marker bytes count */ + } sub; /* submode */ + + /* mode independent information */ + int nowrap; /* flag for no wrapper */ + uInt wbits; /* log2(window size) (8..15, defaults to 15) */ + inflate_blocks_statef + *blocks; /* current inflate_blocks state */ + +}; + + +int inflateReset(z) +z_streamp z; +{ + uLong c; + + if (z == Z_NULL || z->state == Z_NULL) + return Z_STREAM_ERROR; + z->total_in = z->total_out = 0; + z->msg = Z_NULL; + z->state->mode = z->state->nowrap ? BLOCKS : METHOD; + inflate_blocks_reset(z->state->blocks, z, &c); + Trace((stderr, "inflate: reset\n")); + return Z_OK; +} + + +int inflateEnd(z) +z_streamp z; +{ + uLong c; + + if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL) + return Z_STREAM_ERROR; + if (z->state->blocks != Z_NULL) + inflate_blocks_free(z->state->blocks, z, &c); + ZFREE(z, z->state); + z->state = Z_NULL; + Trace((stderr, "inflate: end\n")); + return Z_OK; +} + + +int inflateInit2_(z, w, version, stream_size) +z_streamp z; +int w; +const char *version; +int stream_size; +{ + if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || + stream_size != sizeof(z_stream)) + return Z_VERSION_ERROR; + + /* initialize state */ + if (z == Z_NULL) + return Z_STREAM_ERROR; + z->msg = Z_NULL; +#ifndef NO_ZCFUNCS + if (z->zalloc == Z_NULL) + { + z->zalloc = zcalloc; + z->opaque = (voidpf)0; + } + if (z->zfree == Z_NULL) z->zfree = zcfree; +#endif + if ((z->state = (struct internal_state FAR *) + ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL) + return Z_MEM_ERROR; + z->state->blocks = Z_NULL; + + /* handle undocumented nowrap option (no zlib header or check) */ + z->state->nowrap = 0; + if (w < 0) + { + w = - w; + z->state->nowrap = 1; + } + + /* set window size */ + if (w < 8 || w > 15) + { + inflateEnd(z); + return Z_STREAM_ERROR; + } + z->state->wbits = (uInt)w; + + /* create inflate_blocks state */ + if ((z->state->blocks = + inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w)) + == Z_NULL) + { + inflateEnd(z); + return Z_MEM_ERROR; + } + Trace((stderr, "inflate: allocated\n")); + + /* reset state */ + inflateReset(z); + return Z_OK; +} + + +int inflateInit_(z, version, stream_size) +z_streamp z; +const char *version; +int stream_size; +{ + return inflateInit2_(z, DEF_WBITS, version, stream_size); +} + + +#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;} +#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++) + +int inflate(z, f) +z_streamp z; +int f; +{ + int r; + uInt b; + + if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0) + return Z_STREAM_ERROR; + r = Z_BUF_ERROR; + while (1) switch (z->state->mode) + { + case METHOD: + NEEDBYTE + if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED) + { + z->state->mode = BAD; + z->msg = (char*)"unknown compression method"; + z->state->sub.marker = 5; /* can't try inflateSync */ + break; + } + if ((z->state->sub.method >> 4) + 8 > z->state->wbits) + { + z->state->mode = BAD; + z->msg = (char*)"invalid window size"; + z->state->sub.marker = 5; /* can't try inflateSync */ + break; + } + z->state->mode = FLAG; + case FLAG: + NEEDBYTE + b = NEXTBYTE; + if (((z->state->sub.method << 8) + b) % 31) + { + z->state->mode = BAD; + z->msg = (char*)"incorrect header check"; + z->state->sub.marker = 5; /* can't try inflateSync */ + break; + } + Trace((stderr, "inflate: zlib header ok\n")); + if (!(b & PRESET_DICT)) + { + z->state->mode = BLOCKS; + break; + } + z->state->mode = DICT4; + case DICT4: + NEEDBYTE + z->state->sub.check.need = (uLong)NEXTBYTE << 24; + z->state->mode = DICT3; + case DICT3: + NEEDBYTE + z->state->sub.check.need += (uLong)NEXTBYTE << 16; + z->state->mode = DICT2; + case DICT2: + NEEDBYTE + z->state->sub.check.need += (uLong)NEXTBYTE << 8; + z->state->mode = DICT1; + case DICT1: + NEEDBYTE + z->state->sub.check.need += (uLong)NEXTBYTE; + z->adler = z->state->sub.check.need; + z->state->mode = DICT0; + return Z_NEED_DICT; + case DICT0: + z->state->mode = BAD; + z->msg = (char*)"need dictionary"; + z->state->sub.marker = 0; /* can try inflateSync */ + return Z_STREAM_ERROR; + case BLOCKS: + r = inflate_blocks(z->state->blocks, z, r); + if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0) + r = inflate_packet_flush(z->state->blocks); + if (r == Z_DATA_ERROR) + { + z->state->mode = BAD; + z->state->sub.marker = 0; /* can try inflateSync */ + break; + } + if (r != Z_STREAM_END) + return r; + r = Z_OK; + inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was); + if (z->state->nowrap) + { + z->state->mode = DONE; + break; + } + z->state->mode = CHECK4; + case CHECK4: + NEEDBYTE + z->state->sub.check.need = (uLong)NEXTBYTE << 24; + z->state->mode = CHECK3; + case CHECK3: + NEEDBYTE + z->state->sub.check.need += (uLong)NEXTBYTE << 16; + z->state->mode = CHECK2; + case CHECK2: + NEEDBYTE + z->state->sub.check.need += (uLong)NEXTBYTE << 8; + z->state->mode = CHECK1; + case CHECK1: + NEEDBYTE + z->state->sub.check.need += (uLong)NEXTBYTE; + + if (z->state->sub.check.was != z->state->sub.check.need) + { + z->state->mode = BAD; + z->msg = (char*)"incorrect data check"; + z->state->sub.marker = 5; /* can't try inflateSync */ + break; + } + Trace((stderr, "inflate: zlib check ok\n")); + z->state->mode = DONE; + case DONE: + return Z_STREAM_END; + case BAD: + return Z_DATA_ERROR; + default: + return Z_STREAM_ERROR; + } + + empty: + if (f != Z_PACKET_FLUSH) + return r; + z->state->mode = BAD; + z->msg = (char *)"need more for packet flush"; + z->state->sub.marker = 0; /* can try inflateSync */ + return Z_DATA_ERROR; +} + + +int inflateSetDictionary(z, dictionary, dictLength) +z_streamp z; +const Bytef *dictionary; +uInt dictLength; +{ + uInt length = dictLength; + + if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0) + return Z_STREAM_ERROR; + + if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR; + z->adler = 1L; + + if (length >= ((uInt)1<state->wbits)) + { + length = (1<state->wbits)-1; + dictionary += dictLength - length; + } + inflate_set_dictionary(z->state->blocks, dictionary, length); + z->state->mode = BLOCKS; + return Z_OK; +} + +/* + * This subroutine adds the data at next_in/avail_in to the output history + * without performing any output. The output buffer must be "caught up"; + * i.e. no pending output (hence s->read equals s->write), and the state must + * be BLOCKS (i.e. we should be willing to see the start of a series of + * BLOCKS). On exit, the output will also be caught up, and the checksum + * will have been updated if need be. + */ + +int inflateIncomp(z) +z_stream *z; +{ + if (z->state->mode != BLOCKS) + return Z_DATA_ERROR; + return inflate_addhistory(z->state->blocks, z); +} + + +int inflateSync(z) +z_streamp z; +{ + uInt n; /* number of bytes to look at */ + Bytef *p; /* pointer to bytes */ + uInt m; /* number of marker bytes found in a row */ + uLong r, w; /* temporaries to save total_in and total_out */ + + /* set up */ + if (z == Z_NULL || z->state == Z_NULL) + return Z_STREAM_ERROR; + if (z->state->mode != BAD) + { + z->state->mode = BAD; + z->state->sub.marker = 0; + } + if ((n = z->avail_in) == 0) + return Z_BUF_ERROR; + p = z->next_in; + m = z->state->sub.marker; + + /* search */ + while (n && m < 4) + { + if (*p == (Byte)(m < 2 ? 0 : 0xff)) + m++; + else if (*p) + m = 0; + else + m = 4 - m; + p++, n--; + } + + /* restore */ + z->total_in += p - z->next_in; + z->next_in = p; + z->avail_in = n; + z->state->sub.marker = m; + + /* return no joy or set up to restart on a new block */ + if (m != 4) + return Z_DATA_ERROR; + r = z->total_in; w = z->total_out; + inflateReset(z); + z->total_in = r; z->total_out = w; + z->state->mode = BLOCKS; + return Z_OK; +} + +#undef NEEDBYTE +#undef NEXTBYTE +/* --- inflate.c */ + +/* +++ infblock.c */ +/* infblock.c -- interpret and process block types to last block + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* #include "zutil.h" */ +/* #include "infblock.h" */ + +/* +++ inftrees.h */ +/* inftrees.h -- header to use inftrees.c + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +/* Huffman code lookup table entry--this entry is four bytes for machines + that have 16-bit pointers (e.g. PC's in the small or medium model). */ + +typedef struct inflate_huft_s FAR inflate_huft; + +struct inflate_huft_s { + union { + struct { + Byte Exop; /* number of extra bits or operation */ + Byte Bits; /* number of bits in this code or subcode */ + } what; + Bytef *pad; /* pad structure to a power of 2 (4 bytes for */ + } word; /* 16-bit, 8 bytes for 32-bit machines) */ + union { + uInt Base; /* literal, length base, or distance base */ + inflate_huft *Next; /* pointer to next level of table */ + } more; +}; + +#ifdef DEBUG_ZLIB + extern uInt inflate_hufts; +#endif + +extern int inflate_trees_bits OF(( + uIntf *, /* 19 code lengths */ + uIntf *, /* bits tree desired/actual depth */ + inflate_huft * FAR *, /* bits tree result */ + z_streamp )); /* for zalloc, zfree functions */ + +extern int inflate_trees_dynamic OF(( + uInt, /* number of literal/length codes */ + uInt, /* number of distance codes */ + uIntf *, /* that many (total) code lengths */ + uIntf *, /* literal desired/actual bit depth */ + uIntf *, /* distance desired/actual bit depth */ + inflate_huft * FAR *, /* literal/length tree result */ + inflate_huft * FAR *, /* distance tree result */ + z_streamp )); /* for zalloc, zfree functions */ + +extern int inflate_trees_fixed OF(( + uIntf *, /* literal desired/actual bit depth */ + uIntf *, /* distance desired/actual bit depth */ + inflate_huft * FAR *, /* literal/length tree result */ + inflate_huft * FAR *)); /* distance tree result */ + +extern int inflate_trees_free OF(( + inflate_huft *, /* tables to free */ + z_streamp )); /* for zfree function */ + +/* --- inftrees.h */ + +/* +++ infcodes.h */ +/* infcodes.h -- header to use infcodes.c + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +struct inflate_codes_state; +typedef struct inflate_codes_state FAR inflate_codes_statef; + +extern inflate_codes_statef *inflate_codes_new OF(( + uInt, uInt, + inflate_huft *, inflate_huft *, + z_streamp )); + +extern int inflate_codes OF(( + inflate_blocks_statef *, + z_streamp , + int)); + +extern void inflate_codes_free OF(( + inflate_codes_statef *, + z_streamp )); + +/* --- infcodes.h */ + +/* +++ infutil.h */ +/* infutil.h -- types and macros common to blocks and codes + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +#ifndef _INFUTIL_H +#define _INFUTIL_H + +typedef enum { + TYPE, /* get type bits (3, including end bit) */ + LENS, /* get lengths for stored */ + STORED, /* processing stored block */ + TABLE, /* get table lengths */ + BTREE, /* get bit lengths tree for a dynamic block */ + DTREE, /* get length, distance trees for a dynamic block */ + CODES, /* processing fixed or dynamic block */ + DRY, /* output remaining window bytes */ + DONEB, /* finished last block, done */ + BADB} /* got a data error--stuck here */ +inflate_block_mode; + +/* inflate blocks semi-private state */ +struct inflate_blocks_state { + + /* mode */ + inflate_block_mode mode; /* current inflate_block mode */ + + /* mode dependent information */ + union { + uInt left; /* if STORED, bytes left to copy */ + struct { + uInt table; /* table lengths (14 bits) */ + uInt index; /* index into blens (or border) */ + uIntf *blens; /* bit lengths of codes */ + uInt bb; /* bit length tree depth */ + inflate_huft *tb; /* bit length decoding tree */ + } trees; /* if DTREE, decoding info for trees */ + struct { + inflate_huft *tl; + inflate_huft *td; /* trees to free */ + inflate_codes_statef + *codes; + } decode; /* if CODES, current state */ + } sub; /* submode */ + uInt last; /* true if this block is the last block */ + + /* mode independent information */ + uInt bitk; /* bits in bit buffer */ + uLong bitb; /* bit buffer */ + Bytef *window; /* sliding window */ + Bytef *end; /* one byte after sliding window */ + Bytef *read; /* window read pointer */ + Bytef *write; /* window write pointer */ + check_func checkfn; /* check function */ + uLong check; /* check on output */ + +}; + + +/* defines for inflate input/output */ +/* update pointers and return */ +#define UPDBITS {s->bitb=b;s->bitk=k;} +#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;} +#define UPDOUT {s->write=q;} +#define UPDATE {UPDBITS UPDIN UPDOUT} +#define LEAVE {UPDATE return inflate_flush(s,z,r);} +/* get bytes and bits */ +#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;} +#define NEEDBYTE {if(n)r=Z_OK;else LEAVE} +#define NEXTBYTE (n--,*p++) +#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<>=(j);k-=(j);} +/* output bytes */ +#define WAVAIL (uInt)(qread?s->read-q-1:s->end-q) +#define LOADOUT {q=s->write;m=(uInt)WAVAIL;} +#define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}} +#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT} +#define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;} +#define OUTBYTE(a) {*q++=(Byte)(a);m--;} +/* load local pointers */ +#define LOAD {LOADIN LOADOUT} + +/* masks for lower bits (size given to avoid silly warnings with Visual C++) */ +extern uInt inflate_mask[17]; + +/* copy as much as possible from the sliding window to the output area */ +extern int inflate_flush OF(( + inflate_blocks_statef *, + z_streamp , + int)); + +#ifndef NO_DUMMY_DECL +struct internal_state {int dummy;}; /* for buggy compilers */ +#endif + +#endif +/* --- infutil.h */ + +#ifndef NO_DUMMY_DECL +struct inflate_codes_state {int dummy;}; /* for buggy compilers */ +#endif + +/* Table for deflate from PKZIP's appnote.txt. */ +local const uInt border[] = { /* Order of the bit length code lengths */ + 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; + +/* + Notes beyond the 1.93a appnote.txt: + + 1. Distance pointers never point before the beginning of the output + stream. + 2. Distance pointers can point back across blocks, up to 32k away. + 3. There is an implied maximum of 7 bits for the bit length table and + 15 bits for the actual data. + 4. If only one code exists, then it is encoded using one bit. (Zero + would be more efficient, but perhaps a little confusing.) If two + codes exist, they are coded using one bit each (0 and 1). + 5. There is no way of sending zero distance codes--a dummy must be + sent if there are none. (History: a pre 2.0 version of PKZIP would + store blocks with no distance codes, but this was discovered to be + too harsh a criterion.) Valid only for 1.93a. 2.04c does allow + zero distance codes, which is sent as one code of zero bits in + length. + 6. There are up to 286 literal/length codes. Code 256 represents the + end-of-block. Note however that the static length tree defines + 288 codes just to fill out the Huffman codes. Codes 286 and 287 + cannot be used though, since there is no length base or extra bits + defined for them. Similarily, there are up to 30 distance codes. + However, static trees define 32 codes (all 5 bits) to fill out the + Huffman codes, but the last two had better not show up in the data. + 7. Unzip can check dynamic Huffman blocks for complete code sets. + The exception is that a single code would not be complete (see #4). + 8. The five bits following the block type is really the number of + literal codes sent minus 257. + 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits + (1+6+6). Therefore, to output three times the length, you output + three codes (1+1+1), whereas to output four times the same length, + you only need two codes (1+3). Hmm. + 10. In the tree reconstruction algorithm, Code = Code + Increment + only if BitLength(i) is not zero. (Pretty obvious.) + 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19) + 12. Note: length code 284 can represent 227-258, but length code 285 + really is 258. The last length deserves its own, short code + since it gets used a lot in very redundant files. The length + 258 is special since 258 - 3 (the min match length) is 255. + 13. The literal/length and distance code bit lengths are read as a + single stream of lengths. It is possible (and advantageous) for + a repeat code (16, 17, or 18) to go across the boundary between + the two sets of lengths. + */ + + +void inflate_blocks_reset(s, z, c) +inflate_blocks_statef *s; +z_streamp z; +uLongf *c; +{ + if (s->checkfn != Z_NULL) + *c = s->check; + if (s->mode == BTREE || s->mode == DTREE) + ZFREE(z, s->sub.trees.blens); + if (s->mode == CODES) + { + inflate_codes_free(s->sub.decode.codes, z); + inflate_trees_free(s->sub.decode.td, z); + inflate_trees_free(s->sub.decode.tl, z); + } + s->mode = TYPE; + s->bitk = 0; + s->bitb = 0; + s->read = s->write = s->window; + if (s->checkfn != Z_NULL) + z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0); + Trace((stderr, "inflate: blocks reset\n")); +} + + +inflate_blocks_statef *inflate_blocks_new(z, c, w) +z_streamp z; +check_func c; +uInt w; +{ + inflate_blocks_statef *s; + + if ((s = (inflate_blocks_statef *)ZALLOC + (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL) + return s; + if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL) + { + ZFREE(z, s); + return Z_NULL; + } + s->end = s->window + w; + s->checkfn = c; + s->mode = TYPE; + Trace((stderr, "inflate: blocks allocated\n")); + inflate_blocks_reset(s, z, &s->check); + return s; +} + + +#ifdef DEBUG_ZLIB + extern uInt inflate_hufts; +#endif +int inflate_blocks(s, z, r) +inflate_blocks_statef *s; +z_streamp z; +int r; +{ + uInt t; /* temporary storage */ + uLong b; /* bit buffer */ + uInt k; /* bits in bit buffer */ + Bytef *p; /* input data pointer */ + uInt n; /* bytes available there */ + Bytef *q; /* output window write pointer */ + uInt m; /* bytes to end of window or read pointer */ + + /* copy input/output information to locals (UPDATE macro restores) */ + LOAD + + /* process input based on current state */ + while (1) switch (s->mode) + { + case TYPE: + NEEDBITS(3) + t = (uInt)b & 7; + s->last = t & 1; + switch (t >> 1) + { + case 0: /* stored */ + Trace((stderr, "inflate: stored block%s\n", + s->last ? " (last)" : "")); + DUMPBITS(3) + t = k & 7; /* go to byte boundary */ + DUMPBITS(t) + s->mode = LENS; /* get length of stored block */ + break; + case 1: /* fixed */ + Trace((stderr, "inflate: fixed codes block%s\n", + s->last ? " (last)" : "")); + { + uInt bl, bd; + inflate_huft *tl, *td; + + inflate_trees_fixed(&bl, &bd, &tl, &td); + s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z); + if (s->sub.decode.codes == Z_NULL) + { + r = Z_MEM_ERROR; + LEAVE + } + s->sub.decode.tl = Z_NULL; /* don't try to free these */ + s->sub.decode.td = Z_NULL; + } + DUMPBITS(3) + s->mode = CODES; + break; + case 2: /* dynamic */ + Trace((stderr, "inflate: dynamic codes block%s\n", + s->last ? " (last)" : "")); + DUMPBITS(3) + s->mode = TABLE; + break; + case 3: /* illegal */ + DUMPBITS(3) + s->mode = BADB; + z->msg = (char*)"invalid block type"; + r = Z_DATA_ERROR; + LEAVE + } + break; + case LENS: + NEEDBITS(32) + if ((((~b) >> 16) & 0xffff) != (b & 0xffff)) + { + s->mode = BADB; + z->msg = (char*)"invalid stored block lengths"; + r = Z_DATA_ERROR; + LEAVE + } + s->sub.left = (uInt)b & 0xffff; + b = k = 0; /* dump bits */ + Tracev((stderr, "inflate: stored length %u\n", s->sub.left)); + s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE); + break; + case STORED: + if (n == 0) + LEAVE + NEEDOUT + t = s->sub.left; + if (t > n) t = n; + if (t > m) t = m; + zmemcpy(q, p, t); + p += t; n -= t; + q += t; m -= t; + if ((s->sub.left -= t) != 0) + break; + Tracev((stderr, "inflate: stored end, %lu total out\n", + z->total_out + (q >= s->read ? q - s->read : + (s->end - s->read) + (q - s->window)))); + s->mode = s->last ? DRY : TYPE; + break; + case TABLE: + NEEDBITS(14) + s->sub.trees.table = t = (uInt)b & 0x3fff; +#ifndef PKZIP_BUG_WORKAROUND + if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29) + { + s->mode = BADB; + z->msg = (char*)"too many length or distance symbols"; + r = Z_DATA_ERROR; + LEAVE + } +#endif + t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f); + if (t < 19) + t = 19; + if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL) + { + r = Z_MEM_ERROR; + LEAVE + } + DUMPBITS(14) + s->sub.trees.index = 0; + Tracev((stderr, "inflate: table sizes ok\n")); + s->mode = BTREE; + case BTREE: + while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10)) + { + NEEDBITS(3) + s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7; + DUMPBITS(3) + } + while (s->sub.trees.index < 19) + s->sub.trees.blens[border[s->sub.trees.index++]] = 0; + s->sub.trees.bb = 7; + t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb, + &s->sub.trees.tb, z); + if (t != Z_OK) + { + ZFREE(z, s->sub.trees.blens); + r = t; + if (r == Z_DATA_ERROR) + s->mode = BADB; + LEAVE + } + s->sub.trees.index = 0; + Tracev((stderr, "inflate: bits tree ok\n")); + s->mode = DTREE; + case DTREE: + while (t = s->sub.trees.table, + s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f)) + { + inflate_huft *h; + uInt i, j, c; + + t = s->sub.trees.bb; + NEEDBITS(t) + h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]); + t = h->word.what.Bits; + c = h->more.Base; + if (c < 16) + { + DUMPBITS(t) + s->sub.trees.blens[s->sub.trees.index++] = c; + } + else /* c == 16..18 */ + { + i = c == 18 ? 7 : c - 14; + j = c == 18 ? 11 : 3; + NEEDBITS(t + i) + DUMPBITS(t) + j += (uInt)b & inflate_mask[i]; + DUMPBITS(i) + i = s->sub.trees.index; + t = s->sub.trees.table; + if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) || + (c == 16 && i < 1)) + { + inflate_trees_free(s->sub.trees.tb, z); + ZFREE(z, s->sub.trees.blens); + s->mode = BADB; + z->msg = (char*)"invalid bit length repeat"; + r = Z_DATA_ERROR; + LEAVE + } + c = c == 16 ? s->sub.trees.blens[i - 1] : 0; + do { + s->sub.trees.blens[i++] = c; + } while (--j); + s->sub.trees.index = i; + } + } + inflate_trees_free(s->sub.trees.tb, z); + s->sub.trees.tb = Z_NULL; + { + uInt bl, bd; + inflate_huft *tl, *td; + inflate_codes_statef *c; + + bl = 9; /* must be <= 9 for lookahead assumptions */ + bd = 6; /* must be <= 9 for lookahead assumptions */ + t = s->sub.trees.table; +#ifdef DEBUG_ZLIB + inflate_hufts = 0; +#endif + t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f), + s->sub.trees.blens, &bl, &bd, &tl, &td, z); + ZFREE(z, s->sub.trees.blens); + if (t != Z_OK) + { + if (t == (uInt)Z_DATA_ERROR) + s->mode = BADB; + r = t; + LEAVE + } + Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n", + inflate_hufts, sizeof(inflate_huft))); + if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL) + { + inflate_trees_free(td, z); + inflate_trees_free(tl, z); + r = Z_MEM_ERROR; + LEAVE + } + s->sub.decode.codes = c; + s->sub.decode.tl = tl; + s->sub.decode.td = td; + } + s->mode = CODES; + case CODES: + UPDATE + if ((r = inflate_codes(s, z, r)) != Z_STREAM_END) + return inflate_flush(s, z, r); + r = Z_OK; + inflate_codes_free(s->sub.decode.codes, z); + inflate_trees_free(s->sub.decode.td, z); + inflate_trees_free(s->sub.decode.tl, z); + LOAD + Tracev((stderr, "inflate: codes end, %lu total out\n", + z->total_out + (q >= s->read ? q - s->read : + (s->end - s->read) + (q - s->window)))); + if (!s->last) + { + s->mode = TYPE; + break; + } + if (k > 7) /* return unused byte, if any */ + { + Assert(k < 16, "inflate_codes grabbed too many bytes") + k -= 8; + n++; + p--; /* can always return one */ + } + s->mode = DRY; + case DRY: + FLUSH + if (s->read != s->write) + LEAVE + s->mode = DONEB; + case DONEB: + r = Z_STREAM_END; + LEAVE + case BADB: + r = Z_DATA_ERROR; + LEAVE + default: + r = Z_STREAM_ERROR; + LEAVE + } +} + + +int inflate_blocks_free(s, z, c) +inflate_blocks_statef *s; +z_streamp z; +uLongf *c; +{ + inflate_blocks_reset(s, z, c); + ZFREE(z, s->window); + ZFREE(z, s); + Trace((stderr, "inflate: blocks freed\n")); + return Z_OK; +} + + +void inflate_set_dictionary(s, d, n) +inflate_blocks_statef *s; +const Bytef *d; +uInt n; +{ + zmemcpy((charf *)s->window, d, n); + s->read = s->write = s->window + n; +} + +/* + * This subroutine adds the data at next_in/avail_in to the output history + * without performing any output. The output buffer must be "caught up"; + * i.e. no pending output (hence s->read equals s->write), and the state must + * be BLOCKS (i.e. we should be willing to see the start of a series of + * BLOCKS). On exit, the output will also be caught up, and the checksum + * will have been updated if need be. + */ +int inflate_addhistory(s, z) +inflate_blocks_statef *s; +z_stream *z; +{ + uLong b; /* bit buffer */ /* NOT USED HERE */ + uInt k; /* bits in bit buffer */ /* NOT USED HERE */ + uInt t; /* temporary storage */ + Bytef *p; /* input data pointer */ + uInt n; /* bytes available there */ + Bytef *q; /* output window write pointer */ + uInt m; /* bytes to end of window or read pointer */ + + if (s->read != s->write) + return Z_STREAM_ERROR; + if (s->mode != TYPE) + return Z_DATA_ERROR; + + /* we're ready to rock */ + LOAD + /* while there is input ready, copy to output buffer, moving + * pointers as needed. + */ + while (n) { + t = n; /* how many to do */ + /* is there room until end of buffer? */ + if (t > m) t = m; + /* update check information */ + if (s->checkfn != Z_NULL) + s->check = (*s->checkfn)(s->check, q, t); + zmemcpy(q, p, t); + q += t; + p += t; + n -= t; + z->total_out += t; + s->read = q; /* drag read pointer forward */ +/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */ + if (q == s->end) { + s->read = q = s->window; + m = WAVAIL; + } + } + UPDATE + return Z_OK; +} + + +/* + * At the end of a Deflate-compressed PPP packet, we expect to have seen + * a `stored' block type value but not the (zero) length bytes. + */ +int inflate_packet_flush(s) + inflate_blocks_statef *s; +{ + if (s->mode != LENS) + return Z_DATA_ERROR; + s->mode = TYPE; + return Z_OK; +} +/* --- infblock.c */ + +/* +++ inftrees.c */ +/* inftrees.c -- generate Huffman trees for efficient decoding + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* #include "zutil.h" */ +/* #include "inftrees.h" */ + +char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler "; +/* + If you use the zlib library in a product, an acknowledgment is welcome + in the documentation of your product. If for some reason you cannot + include such an acknowledgment, I would appreciate that you keep this + copyright string in the executable of your product. + */ + +#ifndef NO_DUMMY_DECL +struct internal_state {int dummy;}; /* for buggy compilers */ +#endif + +/* simplify the use of the inflate_huft type with some defines */ +#define base more.Base +#define next more.Next +#define exop word.what.Exop +#define bits word.what.Bits + + +local int huft_build OF(( + uIntf *, /* code lengths in bits */ + uInt, /* number of codes */ + uInt, /* number of "simple" codes */ + const uIntf *, /* list of base values for non-simple codes */ + const uIntf *, /* list of extra bits for non-simple codes */ + inflate_huft * FAR*,/* result: starting table */ + uIntf *, /* maximum lookup bits (returns actual) */ + z_streamp )); /* for zalloc function */ + +local voidpf falloc OF(( + voidpf, /* opaque pointer (not used) */ + uInt, /* number of items */ + uInt)); /* size of item */ + +/* Tables for deflate from PKZIP's appnote.txt. */ +local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, + 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; + /* see note #13 above about 258 */ +local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */ + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */ +local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */ + 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, + 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, + 8193, 12289, 16385, 24577}; +local const uInt cpdext[30] = { /* Extra bits for distance codes */ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, + 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, + 12, 12, 13, 13}; + +/* + Huffman code decoding is performed using a multi-level table lookup. + The fastest way to decode is to simply build a lookup table whose + size is determined by the longest code. However, the time it takes + to build this table can also be a factor if the data being decoded + is not very long. The most common codes are necessarily the + shortest codes, so those codes dominate the decoding time, and hence + the speed. The idea is you can have a shorter table that decodes the + shorter, more probable codes, and then point to subsidiary tables for + the longer codes. The time it costs to decode the longer codes is + then traded against the time it takes to make longer tables. + + This results of this trade are in the variables lbits and dbits + below. lbits is the number of bits the first level table for literal/ + length codes can decode in one step, and dbits is the same thing for + the distance codes. Subsequent tables are also less than or equal to + those sizes. These values may be adjusted either when all of the + codes are shorter than that, in which case the longest code length in + bits is used, or when the shortest code is *longer* than the requested + table size, in which case the length of the shortest code in bits is + used. + + There are two different values for the two tables, since they code a + different number of possibilities each. The literal/length table + codes 286 possible values, or in a flat code, a little over eight + bits. The distance table codes 30 possible values, or a little less + than five bits, flat. The optimum values for speed end up being + about one bit more than those, so lbits is 8+1 and dbits is 5+1. + The optimum values may differ though from machine to machine, and + possibly even between compilers. Your mileage may vary. + */ + + +/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */ +#define BMAX 15 /* maximum bit length of any code */ +#define N_MAX 288 /* maximum number of codes in any set */ + +#ifdef DEBUG_ZLIB + uInt inflate_hufts; +#endif + +local int huft_build(b, n, s, d, e, t, m, zs) +uIntf *b; /* code lengths in bits (all assumed <= BMAX) */ +uInt n; /* number of codes (assumed <= N_MAX) */ +uInt s; /* number of simple-valued codes (0..s-1) */ +const uIntf *d; /* list of base values for non-simple codes */ +const uIntf *e; /* list of extra bits for non-simple codes */ +inflate_huft * FAR *t; /* result: starting table */ +uIntf *m; /* maximum lookup bits, returns actual */ +z_streamp zs; /* for zalloc function */ +/* Given a list of code lengths and a maximum table size, make a set of + tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR + if the given code set is incomplete (the tables are still built in this + case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of + lengths), or Z_MEM_ERROR if not enough memory. */ +{ + + uInt a; /* counter for codes of length k */ + uInt c[BMAX+1]; /* bit length count table */ + uInt f; /* i repeats in table every f entries */ + int g; /* maximum code length */ + int h; /* table level */ + register uInt i; /* counter, current code */ + register uInt j; /* counter */ + register int k; /* number of bits in current code */ + int l; /* bits per table (returned in m) */ + register uIntf *p; /* pointer into c[], b[], or v[] */ + inflate_huft *q; /* points to current table */ + struct inflate_huft_s r; /* table entry for structure assignment */ + inflate_huft *u[BMAX]; /* table stack */ + uInt v[N_MAX]; /* values in order of bit length */ + register int w; /* bits before this table == (l * h) */ + uInt x[BMAX+1]; /* bit offsets, then code stack */ + uIntf *xp; /* pointer into x */ + int y; /* number of dummy codes added */ + uInt z; /* number of entries in current table */ + + + /* Generate counts for each bit length */ + p = c; +#define C0 *p++ = 0; +#define C2 C0 C0 C0 C0 +#define C4 C2 C2 C2 C2 + C4 /* clear c[]--assume BMAX+1 is 16 */ + p = b; i = n; + do { + c[*p++]++; /* assume all entries <= BMAX */ + } while (--i); + if (c[0] == n) /* null input--all zero length codes */ + { + *t = (inflate_huft *)Z_NULL; + *m = 0; + return Z_OK; + } + + + /* Find minimum and maximum length, bound *m by those */ + l = *m; + for (j = 1; j <= BMAX; j++) + if (c[j]) + break; + k = j; /* minimum code length */ + if ((uInt)l < j) + l = j; + for (i = BMAX; i; i--) + if (c[i]) + break; + g = i; /* maximum code length */ + if ((uInt)l > i) + l = i; + *m = l; + + + /* Adjust last length count to fill out codes, if needed */ + for (y = 1 << j; j < i; j++, y <<= 1) + if ((y -= c[j]) < 0) + return Z_DATA_ERROR; + if ((y -= c[i]) < 0) + return Z_DATA_ERROR; + c[i] += y; + + + /* Generate starting offsets into the value table for each length */ + x[1] = j = 0; + p = c + 1; xp = x + 2; + while (--i) { /* note that i == g from above */ + *xp++ = (j += *p++); + } + + + /* Make a table of values in order of bit lengths */ + p = b; i = 0; + do { + if ((j = *p++) != 0) + v[x[j]++] = i; + } while (++i < n); + n = x[g]; /* set n to length of v */ + + + /* Generate the Huffman codes and for each, make the table entries */ + x[0] = i = 0; /* first Huffman code is zero */ + p = v; /* grab values in bit order */ + h = -1; /* no tables yet--level -1 */ + w = -l; /* bits decoded == (l * h) */ + u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */ + q = (inflate_huft *)Z_NULL; /* ditto */ + z = 0; /* ditto */ + + /* go through the bit lengths (k already is bits in shortest code) */ + for (; k <= g; k++) + { + a = c[k]; + while (a--) + { + /* here i is the Huffman code of length k bits for value *p */ + /* make tables up to required level */ + while (k > w + l) + { + h++; + w += l; /* previous table always l bits */ + + /* compute minimum size table less than or equal to l bits */ + z = g - w; + z = z > (uInt)l ? l : z; /* table size upper limit */ + if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */ + { /* too few codes for k-w bit table */ + f -= a + 1; /* deduct codes from patterns left */ + xp = c + k; + if (j < z) + while (++j < z) /* try smaller tables up to z bits */ + { + if ((f <<= 1) <= *++xp) + break; /* enough codes to use up j bits */ + f -= *xp; /* else deduct codes from patterns */ + } + } + z = 1 << j; /* table entries for j-bit table */ + + /* allocate and link in new table */ + if ((q = (inflate_huft *)ZALLOC + (zs,z + 1,sizeof(inflate_huft))) == Z_NULL) + { + if (h) + inflate_trees_free(u[0], zs); + return Z_MEM_ERROR; /* not enough memory */ + } +#ifdef DEBUG_ZLIB + inflate_hufts += z + 1; +#endif + *t = q + 1; /* link to list for huft_free() */ + *(t = &(q->next)) = Z_NULL; + u[h] = ++q; /* table starts after link */ + + /* connect to last table, if there is one */ + if (h) + { + x[h] = i; /* save pattern for backing up */ + r.bits = (Byte)l; /* bits to dump before this table */ + r.exop = (Byte)j; /* bits in this table */ + r.next = q; /* pointer to this table */ + j = i >> (w - l); /* (get around Turbo C bug) */ + u[h-1][j] = r; /* connect to last table */ + } + } + + /* set up table entry in r */ + r.bits = (Byte)(k - w); + if (p >= v + n) + r.exop = 128 + 64; /* out of values--invalid code */ + else if (*p < s) + { + r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */ + r.base = *p++; /* simple code is just the value */ + } + else + { + r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */ + r.base = d[*p++ - s]; + } + + /* fill code-like entries with r */ + f = 1 << (k - w); + for (j = i >> w; j < z; j += f) + q[j] = r; + + /* backwards increment the k-bit code i */ + for (j = 1 << (k - 1); i & j; j >>= 1) + i ^= j; + i ^= j; + + /* backup over finished tables */ + while ((i & ((1 << w) - 1)) != x[h]) + { + h--; /* don't need to update q */ + w -= l; + } + } + } + + + /* Return Z_BUF_ERROR if we were given an incomplete table */ + return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK; +} + + +int inflate_trees_bits(c, bb, tb, z) +uIntf *c; /* 19 code lengths */ +uIntf *bb; /* bits tree desired/actual depth */ +inflate_huft * FAR *tb; /* bits tree result */ +z_streamp z; /* for zfree function */ +{ + int r; + + r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z); + if (r == Z_DATA_ERROR) + z->msg = (char*)"oversubscribed dynamic bit lengths tree"; + else if (r == Z_BUF_ERROR || *bb == 0) + { + inflate_trees_free(*tb, z); + z->msg = (char*)"incomplete dynamic bit lengths tree"; + r = Z_DATA_ERROR; + } + return r; +} + + +int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z) +uInt nl; /* number of literal/length codes */ +uInt nd; /* number of distance codes */ +uIntf *c; /* that many (total) code lengths */ +uIntf *bl; /* literal desired/actual bit depth */ +uIntf *bd; /* distance desired/actual bit depth */ +inflate_huft * FAR *tl; /* literal/length tree result */ +inflate_huft * FAR *td; /* distance tree result */ +z_streamp z; /* for zfree function */ +{ + int r; + + /* build literal/length tree */ + r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z); + if (r != Z_OK || *bl == 0) + { + if (r == Z_DATA_ERROR) + z->msg = (char*)"oversubscribed literal/length tree"; + else if (r != Z_MEM_ERROR) + { + inflate_trees_free(*tl, z); + z->msg = (char*)"incomplete literal/length tree"; + r = Z_DATA_ERROR; + } + return r; + } + + /* build distance tree */ + r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z); + if (r != Z_OK || (*bd == 0 && nl > 257)) + { + if (r == Z_DATA_ERROR) + z->msg = (char*)"oversubscribed distance tree"; + else if (r == Z_BUF_ERROR) { +#ifdef PKZIP_BUG_WORKAROUND + r = Z_OK; + } +#else + inflate_trees_free(*td, z); + z->msg = (char*)"incomplete distance tree"; + r = Z_DATA_ERROR; + } + else if (r != Z_MEM_ERROR) + { + z->msg = (char*)"empty distance tree with lengths"; + r = Z_DATA_ERROR; + } + inflate_trees_free(*tl, z); + return r; +#endif + } + + /* done */ + return Z_OK; +} + + +/* build fixed tables only once--keep them here */ +local int fixed_built = 0; +#define FIXEDH 530 /* number of hufts used by fixed tables */ +local inflate_huft fixed_mem[FIXEDH]; +local uInt fixed_bl; +local uInt fixed_bd; +local inflate_huft *fixed_tl; +local inflate_huft *fixed_td; + + +local voidpf falloc(q, n, s) +voidpf q; /* opaque pointer */ +uInt n; /* number of items */ +uInt s; /* size of item */ +{ + Assert(s == sizeof(inflate_huft) && n <= *(intf *)q, + "inflate_trees falloc overflow"); + *(intf *)q -= n+s-s; /* s-s to avoid warning */ + return (voidpf)(fixed_mem + *(intf *)q); +} + + +int inflate_trees_fixed(bl, bd, tl, td) +uIntf *bl; /* literal desired/actual bit depth */ +uIntf *bd; /* distance desired/actual bit depth */ +inflate_huft * FAR *tl; /* literal/length tree result */ +inflate_huft * FAR *td; /* distance tree result */ +{ + /* build fixed tables if not already (multiple overlapped executions ok) */ + if (!fixed_built) + { + int k; /* temporary variable */ + unsigned c[288]; /* length list for huft_build */ + z_stream z; /* for falloc function */ + int f = FIXEDH; /* number of hufts left in fixed_mem */ + + /* set up fake z_stream for memory routines */ + z.zalloc = falloc; + z.zfree = Z_NULL; + z.opaque = (voidpf)&f; + + /* literal table */ + for (k = 0; k < 144; k++) + c[k] = 8; + for (; k < 256; k++) + c[k] = 9; + for (; k < 280; k++) + c[k] = 7; + for (; k < 288; k++) + c[k] = 8; + fixed_bl = 7; + huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z); + + /* distance table */ + for (k = 0; k < 30; k++) + c[k] = 5; + fixed_bd = 5; + huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z); + + /* done */ + Assert(f == 0, "invalid build of fixed tables"); + fixed_built = 1; + } + *bl = fixed_bl; + *bd = fixed_bd; + *tl = fixed_tl; + *td = fixed_td; + return Z_OK; +} + + +int inflate_trees_free(t, z) +inflate_huft *t; /* table to free */ +z_streamp z; /* for zfree function */ +/* Free the malloc'ed tables built by huft_build(), which makes a linked + list of the tables it made, with the links in a dummy first entry of + each table. */ +{ + register inflate_huft *p, *q, *r; + + /* Reverse linked list */ + p = Z_NULL; + q = t; + while (q != Z_NULL) + { + r = (q - 1)->next; + (q - 1)->next = p; + p = q; + q = r; + } + /* Go through linked list, freeing from the malloced (t[-1]) address. */ + while (p != Z_NULL) + { + q = (--p)->next; + ZFREE(z,p); + p = q; + } + return Z_OK; +} +/* --- inftrees.c */ + +/* +++ infcodes.c */ +/* infcodes.c -- process literals and length/distance pairs + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* #include "zutil.h" */ +/* #include "inftrees.h" */ +/* #include "infblock.h" */ +/* #include "infcodes.h" */ +/* #include "infutil.h" */ + +/* +++ inffast.h */ +/* inffast.h -- header to use inffast.c + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* WARNING: this file should *not* be used by applications. It is + part of the implementation of the compression library and is + subject to change. Applications should only use zlib.h. + */ + +extern int inflate_fast OF(( + uInt, + uInt, + inflate_huft *, + inflate_huft *, + inflate_blocks_statef *, + z_streamp )); +/* --- inffast.h */ + +/* simplify the use of the inflate_huft type with some defines */ +#define base more.Base +#define next more.Next +#define exop word.what.Exop +#define bits word.what.Bits + +/* inflate codes private state */ +struct inflate_codes_state { + + /* mode */ + enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ + START, /* x: set up for LEN */ + LEN, /* i: get length/literal/eob next */ + LENEXT, /* i: getting length extra (have base) */ + DIST, /* i: get distance next */ + DISTEXT, /* i: getting distance extra */ + COPY, /* o: copying bytes in window, waiting for space */ + LIT, /* o: got literal, waiting for output space */ + WASH, /* o: got eob, possibly still output waiting */ + END, /* x: got eob and all data flushed */ + BADCODE} /* x: got error */ + mode; /* current inflate_codes mode */ + + /* mode dependent information */ + uInt len; + union { + struct { + inflate_huft *tree; /* pointer into tree */ + uInt need; /* bits needed */ + } code; /* if LEN or DIST, where in tree */ + uInt lit; /* if LIT, literal */ + struct { + uInt get; /* bits to get for extra */ + uInt dist; /* distance back to copy from */ + } copy; /* if EXT or COPY, where and how much */ + } sub; /* submode */ + + /* mode independent information */ + Byte lbits; /* ltree bits decoded per branch */ + Byte dbits; /* dtree bits decoder per branch */ + inflate_huft *ltree; /* literal/length/eob tree */ + inflate_huft *dtree; /* distance tree */ + +}; + + +inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z) +uInt bl, bd; +inflate_huft *tl; +inflate_huft *td; /* need separate declaration for Borland C++ */ +z_streamp z; +{ + inflate_codes_statef *c; + + if ((c = (inflate_codes_statef *) + ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL) + { + c->mode = START; + c->lbits = (Byte)bl; + c->dbits = (Byte)bd; + c->ltree = tl; + c->dtree = td; + Tracev((stderr, "inflate: codes new\n")); + } + return c; +} + + +int inflate_codes(s, z, r) +inflate_blocks_statef *s; +z_streamp z; +int r; +{ + uInt j; /* temporary storage */ + inflate_huft *t; /* temporary pointer */ + uInt e; /* extra bits or operation */ + uLong b; /* bit buffer */ + uInt k; /* bits in bit buffer */ + Bytef *p; /* input data pointer */ + uInt n; /* bytes available there */ + Bytef *q; /* output window write pointer */ + uInt m; /* bytes to end of window or read pointer */ + Bytef *f; /* pointer to copy strings from */ + inflate_codes_statef *c = s->sub.decode.codes; /* codes state */ + + /* copy input/output information to locals (UPDATE macro restores) */ + LOAD + + /* process input and output based on current state */ + while (1) switch (c->mode) + { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */ + case START: /* x: set up for LEN */ +#ifndef SLOW + if (m >= 258 && n >= 10) + { + UPDATE + r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z); + LOAD + if (r != Z_OK) + { + c->mode = r == Z_STREAM_END ? WASH : BADCODE; + break; + } + } +#endif /* !SLOW */ + c->sub.code.need = c->lbits; + c->sub.code.tree = c->ltree; + c->mode = LEN; + case LEN: /* i: get length/literal/eob next */ + j = c->sub.code.need; + NEEDBITS(j) + t = c->sub.code.tree + ((uInt)b & inflate_mask[j]); + DUMPBITS(t->bits) + e = (uInt)(t->exop); + if (e == 0) /* literal */ + { + c->sub.lit = t->base; + Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? + "inflate: literal '%c'\n" : + "inflate: literal 0x%02x\n", t->base)); + c->mode = LIT; + break; + } + if (e & 16) /* length */ + { + c->sub.copy.get = e & 15; + c->len = t->base; + c->mode = LENEXT; + break; + } + if ((e & 64) == 0) /* next table */ + { + c->sub.code.need = e; + c->sub.code.tree = t->next; + break; + } + if (e & 32) /* end of block */ + { + Tracevv((stderr, "inflate: end of block\n")); + c->mode = WASH; + break; + } + c->mode = BADCODE; /* invalid code */ + z->msg = (char*)"invalid literal/length code"; + r = Z_DATA_ERROR; + LEAVE + case LENEXT: /* i: getting length extra (have base) */ + j = c->sub.copy.get; + NEEDBITS(j) + c->len += (uInt)b & inflate_mask[j]; + DUMPBITS(j) + c->sub.code.need = c->dbits; + c->sub.code.tree = c->dtree; + Tracevv((stderr, "inflate: length %u\n", c->len)); + c->mode = DIST; + case DIST: /* i: get distance next */ + j = c->sub.code.need; + NEEDBITS(j) + t = c->sub.code.tree + ((uInt)b & inflate_mask[j]); + DUMPBITS(t->bits) + e = (uInt)(t->exop); + if (e & 16) /* distance */ + { + c->sub.copy.get = e & 15; + c->sub.copy.dist = t->base; + c->mode = DISTEXT; + break; + } + if ((e & 64) == 0) /* next table */ + { + c->sub.code.need = e; + c->sub.code.tree = t->next; + break; + } + c->mode = BADCODE; /* invalid code */ + z->msg = (char*)"invalid distance code"; + r = Z_DATA_ERROR; + LEAVE + case DISTEXT: /* i: getting distance extra */ + j = c->sub.copy.get; + NEEDBITS(j) + c->sub.copy.dist += (uInt)b & inflate_mask[j]; + DUMPBITS(j) + Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist)); + c->mode = COPY; + case COPY: /* o: copying bytes in window, waiting for space */ +#ifndef __TURBOC__ /* Turbo C bug for following expression */ + f = (uInt)(q - s->window) < c->sub.copy.dist ? + s->end - (c->sub.copy.dist - (q - s->window)) : + q - c->sub.copy.dist; +#else + f = q - c->sub.copy.dist; + if ((uInt)(q - s->window) < c->sub.copy.dist) + f = s->end - (c->sub.copy.dist - (uInt)(q - s->window)); +#endif + while (c->len) + { + NEEDOUT + OUTBYTE(*f++) + if (f == s->end) + f = s->window; + c->len--; + } + c->mode = START; + break; + case LIT: /* o: got literal, waiting for output space */ + NEEDOUT + OUTBYTE(c->sub.lit) + c->mode = START; + break; + case WASH: /* o: got eob, possibly more output */ + FLUSH + if (s->read != s->write) + LEAVE + c->mode = END; + case END: + r = Z_STREAM_END; + LEAVE + case BADCODE: /* x: got error */ + r = Z_DATA_ERROR; + LEAVE + default: + r = Z_STREAM_ERROR; + LEAVE + } +} + + +void inflate_codes_free(c, z) +inflate_codes_statef *c; +z_streamp z; +{ + ZFREE(z, c); + Tracev((stderr, "inflate: codes free\n")); +} +/* --- infcodes.c */ + +/* +++ infutil.c */ +/* inflate_util.c -- data and routines common to blocks and codes + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* #include "zutil.h" */ +/* #include "infblock.h" */ +/* #include "inftrees.h" */ +/* #include "infcodes.h" */ +/* #include "infutil.h" */ + +#ifndef NO_DUMMY_DECL +struct inflate_codes_state {int dummy;}; /* for buggy compilers */ +#endif + +/* And'ing with mask[n] masks the lower n bits */ +uInt inflate_mask[17] = { + 0x0000, + 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff, + 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff +}; + + +/* copy as much as possible from the sliding window to the output area */ +int inflate_flush(s, z, r) +inflate_blocks_statef *s; +z_streamp z; +int r; +{ + uInt n; + Bytef *p; + Bytef *q; + + /* local copies of source and destination pointers */ + p = z->next_out; + q = s->read; + + /* compute number of bytes to copy as far as end of window */ + n = (uInt)((q <= s->write ? s->write : s->end) - q); + if (n > z->avail_out) n = z->avail_out; + if (n && r == Z_BUF_ERROR) r = Z_OK; + + /* update counters */ + z->avail_out -= n; + z->total_out += n; + + /* update check information */ + if (s->checkfn != Z_NULL) + z->adler = s->check = (*s->checkfn)(s->check, q, n); + + /* copy as far as end of window */ + if (p != Z_NULL) { + zmemcpy(p, q, n); + p += n; + } + q += n; + + /* see if more to copy at beginning of window */ + if (q == s->end) + { + /* wrap pointers */ + q = s->window; + if (s->write == s->end) + s->write = s->window; + + /* compute bytes to copy */ + n = (uInt)(s->write - q); + if (n > z->avail_out) n = z->avail_out; + if (n && r == Z_BUF_ERROR) r = Z_OK; + + /* update counters */ + z->avail_out -= n; + z->total_out += n; + + /* update check information */ + if (s->checkfn != Z_NULL) + z->adler = s->check = (*s->checkfn)(s->check, q, n); + + /* copy */ + if (p != Z_NULL) { + zmemcpy(p, q, n); + p += n; + } + q += n; + } + + /* update pointers */ + z->next_out = p; + s->read = q; + + /* done */ + return r; +} +/* --- infutil.c */ + +/* +++ inffast.c */ +/* inffast.c -- process literals and length/distance pairs fast + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* #include "zutil.h" */ +/* #include "inftrees.h" */ +/* #include "infblock.h" */ +/* #include "infcodes.h" */ +/* #include "infutil.h" */ +/* #include "inffast.h" */ + +#ifndef NO_DUMMY_DECL +struct inflate_codes_state {int dummy;}; /* for buggy compilers */ +#endif + +/* simplify the use of the inflate_huft type with some defines */ +#define base more.Base +#define next more.Next +#define exop word.what.Exop +#define bits word.what.Bits + +/* macros for bit input with no checking and for returning unused bytes */ +#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<>3);p-=c;k&=7;} + +/* Called with number of bytes left to write in window at least 258 + (the maximum string length) and number of input bytes available + at least ten. The ten bytes are six bytes for the longest length/ + distance pair plus four bytes for overloading the bit buffer. */ + +int inflate_fast(bl, bd, tl, td, s, z) +uInt bl, bd; +inflate_huft *tl; +inflate_huft *td; /* need separate declaration for Borland C++ */ +inflate_blocks_statef *s; +z_streamp z; +{ + inflate_huft *t; /* temporary pointer */ + uInt e; /* extra bits or operation */ + uLong b; /* bit buffer */ + uInt k; /* bits in bit buffer */ + Bytef *p; /* input data pointer */ + uInt n; /* bytes available there */ + Bytef *q; /* output window write pointer */ + uInt m; /* bytes to end of window or read pointer */ + uInt ml; /* mask for literal/length tree */ + uInt md; /* mask for distance tree */ + uInt c; /* bytes to copy */ + uInt d; /* distance back to copy from */ + Bytef *r; /* copy source pointer */ + + /* load input, output, bit values */ + LOAD + + /* initialize masks */ + ml = inflate_mask[bl]; + md = inflate_mask[bd]; + + /* do until not enough input or output space for fast loop */ + do { /* assume called with m >= 258 && n >= 10 */ + /* get literal/length code */ + GRABBITS(20) /* max bits for literal/length code */ + if ((e = (t = tl + ((uInt)b & ml))->exop) == 0) + { + DUMPBITS(t->bits) + Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? + "inflate: * literal '%c'\n" : + "inflate: * literal 0x%02x\n", t->base)); + *q++ = (Byte)t->base; + m--; + continue; + } + do { + DUMPBITS(t->bits) + if (e & 16) + { + /* get extra bits for length */ + e &= 15; + c = t->base + ((uInt)b & inflate_mask[e]); + DUMPBITS(e) + Tracevv((stderr, "inflate: * length %u\n", c)); + + /* decode distance base of block to copy */ + GRABBITS(15); /* max bits for distance code */ + e = (t = td + ((uInt)b & md))->exop; + do { + DUMPBITS(t->bits) + if (e & 16) + { + /* get extra bits to add to distance base */ + e &= 15; + GRABBITS(e) /* get extra bits (up to 13) */ + d = t->base + ((uInt)b & inflate_mask[e]); + DUMPBITS(e) + Tracevv((stderr, "inflate: * distance %u\n", d)); + + /* do the copy */ + m -= c; + if ((uInt)(q - s->window) >= d) /* offset before dest */ + { /* just copy */ + r = q - d; + *q++ = *r++; c--; /* minimum count is three, */ + *q++ = *r++; c--; /* so unroll loop a little */ + } + else /* else offset after destination */ + { + e = d - (uInt)(q - s->window); /* bytes from offset to end */ + r = s->end - e; /* pointer to offset */ + if (c > e) /* if source crosses, */ + { + c -= e; /* copy to end of window */ + do { + *q++ = *r++; + } while (--e); + r = s->window; /* copy rest from start of window */ + } + } + do { /* copy all or what's left */ + *q++ = *r++; + } while (--c); + break; + } + else if ((e & 64) == 0) + e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop; + else + { + z->msg = (char*)"invalid distance code"; + UNGRAB + UPDATE + return Z_DATA_ERROR; + } + } while (1); + break; + } + if ((e & 64) == 0) + { + if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0) + { + DUMPBITS(t->bits) + Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ? + "inflate: * literal '%c'\n" : + "inflate: * literal 0x%02x\n", t->base)); + *q++ = (Byte)t->base; + m--; + break; + } + } + else if (e & 32) + { + Tracevv((stderr, "inflate: * end of block\n")); + UNGRAB + UPDATE + return Z_STREAM_END; + } + else + { + z->msg = (char*)"invalid literal/length code"; + UNGRAB + UPDATE + return Z_DATA_ERROR; + } + } while (1); + } while (m >= 258 && n >= 10); + + /* not enough input or output--restore pointers and return */ + UNGRAB + UPDATE + return Z_OK; +} +/* --- inffast.c */ + +/* +++ zutil.c */ +/* zutil.c -- target dependent utility functions for the compression library + * Copyright (C) 1995-1996 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */ + +#ifdef DEBUG_ZLIB +#include +#endif + +/* #include "zutil.h" */ + +#ifndef NO_DUMMY_DECL +struct internal_state {int dummy;}; /* for buggy compilers */ +#endif + +#ifndef STDC +extern void exit OF((int)); +#endif + +static const char *z_errmsg[10] = { +"need dictionary", /* Z_NEED_DICT 2 */ +"stream end", /* Z_STREAM_END 1 */ +"", /* Z_OK 0 */ +"file error", /* Z_ERRNO (-1) */ +"stream error", /* Z_STREAM_ERROR (-2) */ +"data error", /* Z_DATA_ERROR (-3) */ +"insufficient memory", /* Z_MEM_ERROR (-4) */ +"buffer error", /* Z_BUF_ERROR (-5) */ +"incompatible version",/* Z_VERSION_ERROR (-6) */ +""}; + + +const char *zlibVersion() +{ + return ZLIB_VERSION; +} + +#ifdef DEBUG_ZLIB +void z_error (m) + char *m; +{ + fprintf(stderr, "%s\n", m); + exit(1); +} +#endif + +#ifndef HAVE_MEMCPY + +void zmemcpy(dest, source, len) + Bytef* dest; + Bytef* source; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = *source++; /* ??? to be unrolled */ + } while (--len != 0); +} + +int zmemcmp(s1, s2, len) + Bytef* s1; + Bytef* s2; + uInt len; +{ + uInt j; + + for (j = 0; j < len; j++) { + if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; + } + return 0; +} + +void zmemzero(dest, len) + Bytef* dest; + uInt len; +{ + if (len == 0) return; + do { + *dest++ = 0; /* ??? to be unrolled */ + } while (--len != 0); +} +#endif + +#ifdef __TURBOC__ +#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__) +/* Small and medium model in Turbo C are for now limited to near allocation + * with reduced MAX_WBITS and MAX_MEM_LEVEL + */ +# define MY_ZCALLOC + +/* Turbo C malloc() does not allow dynamic allocation of 64K bytes + * and farmalloc(64K) returns a pointer with an offset of 8, so we + * must fix the pointer. Warning: the pointer must be put back to its + * original form in order to free it, use zcfree(). + */ + +#define MAX_PTR 10 +/* 10*64K = 640K */ + +local int next_ptr = 0; + +typedef struct ptr_table_s { + voidpf org_ptr; + voidpf new_ptr; +} ptr_table; + +local ptr_table table[MAX_PTR]; +/* This table is used to remember the original form of pointers + * to large buffers (64K). Such pointers are normalized with a zero offset. + * Since MSDOS is not a preemptive multitasking OS, this table is not + * protected from concurrent access. This hack doesn't work anyway on + * a protected system like OS/2. Use Microsoft C instead. + */ + +voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) +{ + voidpf buf = opaque; /* just to make some compilers happy */ + ulg bsize = (ulg)items*size; + + /* If we allocate less than 65520 bytes, we assume that farmalloc + * will return a usable pointer which doesn't have to be normalized. + */ + if (bsize < 65520L) { + buf = farmalloc(bsize); + if (*(ush*)&buf != 0) return buf; + } else { + buf = farmalloc(bsize + 16L); + } + if (buf == NULL || next_ptr >= MAX_PTR) return NULL; + table[next_ptr].org_ptr = buf; + + /* Normalize the pointer to seg:0 */ + *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; + *(ush*)&buf = 0; + table[next_ptr++].new_ptr = buf; + return buf; +} + +void zcfree (voidpf opaque, voidpf ptr) +{ + int n; + if (*(ush*)&ptr != 0) { /* object < 64K */ + farfree(ptr); + return; + } + /* Find the original pointer */ + for (n = 0; n < next_ptr; n++) { + if (ptr != table[n].new_ptr) continue; + + farfree(table[n].org_ptr); + while (++n < next_ptr) { + table[n-1] = table[n]; + } + next_ptr--; + return; + } + ptr = opaque; /* just to make some compilers happy */ + Assert(0, "zcfree: ptr not found"); +} +#endif +#endif /* __TURBOC__ */ + + +#if defined(M_I86) && !defined(__32BIT__) +/* Microsoft C in 16-bit mode */ + +# define MY_ZCALLOC + +#if (!defined(_MSC_VER) || (_MSC_VER < 600)) +# define _halloc halloc +# define _hfree hfree +#endif + +voidpf zcalloc (voidpf opaque, unsigned items, unsigned size) +{ + if (opaque) opaque = 0; /* to make compiler happy */ + return _halloc((long)items, size); +} + +void zcfree (voidpf opaque, voidpf ptr) +{ + if (opaque) opaque = 0; /* to make compiler happy */ + _hfree(ptr); +} + +#endif /* MSC */ + + +#ifndef MY_ZCALLOC /* Any system without a special alloc function */ + +#ifndef STDC +extern voidp calloc OF((uInt items, uInt size)); +extern void free OF((voidpf ptr)); +#endif + +voidpf zcalloc (opaque, items, size) + voidpf opaque; + unsigned items; + unsigned size; +{ + if (opaque) items += size - size; /* make compiler happy */ + return (voidpf)calloc(items, size); +} + +void zcfree (opaque, ptr) + voidpf opaque; + voidpf ptr; +{ + _FREE(ptr); + if (opaque) return; /* make compiler happy */ +} + +#endif /* MY_ZCALLOC */ +/* --- zutil.c */ + +/* +++ adler32.c */ +/* adler32.c -- compute the Adler-32 checksum of a data stream + * Copyright (C) 1995-1996 Mark Adler + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */ + +/* #include "zlib.h" */ + +#define BASE 65521L /* largest prime smaller than 65536 */ +#define NMAX 5552 +/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ + +#define DO1(buf,i) {s1 += buf[i]; s2 += s1;} +#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); +#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); +#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); +#define DO16(buf) DO8(buf,0); DO8(buf,8); + +/* ========================================================================= */ +uLong adler32(adler, buf, len) + uLong adler; + const Bytef *buf; + uInt len; +{ + unsigned long s1 = adler & 0xffff; + unsigned long s2 = (adler >> 16) & 0xffff; + int k; + + if (buf == Z_NULL) return 1L; + + while (len > 0) { + k = len < NMAX ? len : NMAX; + len -= k; + while (k >= 16) { + DO16(buf); + buf += 16; + k -= 16; + } + if (k != 0) do { + s1 += *buf++; + s2 += s1; + } while (--k); + s1 %= BASE; + s2 %= BASE; + } + return (s2 << 16) | s1; +} +/* --- adler32.c */ diff --git a/bsd/net/zlib.h b/bsd/net/zlib.h new file mode 100644 index 000000000..cd0433447 --- /dev/null +++ b/bsd/net/zlib.h @@ -0,0 +1,1013 @@ +/* $Id: zlib.h,v 1.5 2000/09/14 20:34:49 lindak Exp $ */ + +/* + * This file is derived from zlib.h and zconf.h from the zlib-1.0.4 + * distribution by Jean-loup Gailly and Mark Adler, with some additions + * by Paul Mackerras to aid in implementing Deflate compression and + * decompression for PPP packets. + */ + +/* + * ==FILEVERSION 971127== + * + * This marker is used by the Linux installation script to determine + * whether an up-to-date version of this file is already installed. + */ + + +/* +++ zlib.h */ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.0.4, Jul 24th, 1996. + + Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + gzip@prep.ai.mit.edu madler@alumni.caltech.edu + + + The data format used by the zlib library is described by RFCs (Request for + Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt + (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). +*/ + +#ifndef _ZLIB_H +#define _ZLIB_H + +#if __cplusplus +extern "C" { +#endif + + +/* +++ zconf.h */ +/* zconf.h -- configuration of the zlib compression library + * Copyright (C) 1995-1996 Jean-loup Gailly. + * For conditions of distribution and use, see copyright notice in zlib.h + */ + +/* From: zconf.h,v 1.20 1996/07/02 15:09:28 me Exp $ */ + +#ifndef _ZCONF_H +#define _ZCONF_H + +/* + * If you *really* need a unique prefix for all types and library functions, + * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. + */ +#if Z_PREFIX +# define deflateInit_ z_deflateInit_ +# define deflate z_deflate +# define deflateEnd z_deflateEnd +# define inflateInit_ z_inflateInit_ +# define inflate z_inflate +# define inflateEnd z_inflateEnd +# define deflateInit2_ z_deflateInit2_ +# define deflateSetDictionary z_deflateSetDictionary +# define deflateCopy z_deflateCopy +# define deflateReset z_deflateReset +# define deflateParams z_deflateParams +# define inflateInit2_ z_inflateInit2_ +# define inflateSetDictionary z_inflateSetDictionary +# define inflateSync z_inflateSync +# define inflateReset z_inflateReset +# define compress z_compress +# define uncompress z_uncompress +# define adler32 z_adler32 +# define crc32 z_crc32 +# define get_crc_table z_get_crc_table + +# define Byte z_Byte +# define uInt z_uInt +# define uLong z_uLong +# define Bytef z_Bytef +# define charf z_charf +# define intf z_intf +# define uIntf z_uIntf +# define uLongf z_uLongf +# define voidpf z_voidpf +# define voidp z_voidp +#endif + +#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32) +# define WIN32 +#endif +#if defined(__GNUC__) || defined(WIN32) || defined(__386__) || defined(i386) +# ifndef __32BIT__ +# define __32BIT__ +# endif +#endif +#if defined(__MSDOS__) && !defined(MSDOS) +# define MSDOS +#endif + +/* + * Compile with -DMAXSEG_64K if the alloc function cannot allocate more + * than 64k bytes at a time (needed on systems with 16-bit int). + */ +#if defined(MSDOS) && !defined(__32BIT__) +# define MAXSEG_64K +#endif +#if MSDOS +# define UNALIGNED_OK +#endif + +#if (defined(MSDOS) || defined(_WINDOWS) || defined(WIN32)) && !defined(STDC) +# define STDC +#endif +#if (defined(__STDC__) || defined(__cplusplus)) && !defined(STDC) +# define STDC +#endif + +#ifndef STDC +# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ +# define const +# endif +#endif + +/* Some Mac compilers merge all .h files incorrectly: */ +#if defined(__MWERKS__) || defined(applec) ||defined(THINK_C) ||defined(__SC__) +# define NO_DUMMY_DECL +#endif + +/* Maximum value for memLevel in deflateInit2 */ +#ifndef MAX_MEM_LEVEL +# ifdef MAXSEG_64K +# define MAX_MEM_LEVEL 8 +# else +# define MAX_MEM_LEVEL 9 +# endif +#endif + +/* Maximum value for windowBits in deflateInit2 and inflateInit2 */ +#ifndef MAX_WBITS +# define MAX_WBITS 15 /* 32K LZ77 window */ +#endif + +/* The memory requirements for deflate are (in bytes): + 1 << (windowBits+2) + 1 << (memLevel+9) + that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + plus a few kilobytes for small objects. For example, if you want to reduce + the default memory requirements from 256K to 128K, compile with + make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + Of course this will generally degrade compression (there's no free lunch). + + The memory requirements for inflate are (in bytes) 1 << windowBits + that is, 32K for windowBits=15 (default value) plus a few kilobytes + for small objects. +*/ + + /* Type declarations */ + +#ifndef OF /* function prototypes */ +# ifdef STDC +# define OF(args) args +# else +# define OF(args) () +# endif +#endif + +/* The following definitions for FAR are needed only for MSDOS mixed + * model programming (small or medium model with some far allocations). + * This was tested only with MSC; for other MSDOS compilers you may have + * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, + * just define FAR to be empty. + */ +#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(__32BIT__) + /* MSC small or medium model */ +# define SMALL_MEDIUM +# ifdef _MSC_VER +# define FAR __far +# else +# define FAR far +# endif +#endif +#if defined(__BORLANDC__) && (defined(__SMALL__) || defined(__MEDIUM__)) +# ifndef __32BIT__ +# define SMALL_MEDIUM +# define FAR __far +# endif +#endif +#ifndef FAR +# define FAR +#endif + +typedef unsigned char Byte; /* 8 bits */ +typedef unsigned int uInt; /* 16 bits or more */ +typedef unsigned long uLong; /* 32 bits or more */ + +#if defined(__BORLANDC__) && defined(SMALL_MEDIUM) + /* Borland C/C++ ignores FAR inside typedef */ +# define Bytef Byte FAR +#else + typedef Byte FAR Bytef; +#endif +typedef char FAR charf; +typedef int FAR intf; +typedef uInt FAR uIntf; +typedef uLong FAR uLongf; + +#ifdef STDC + typedef void FAR *voidpf; + typedef void *voidp; +#else + typedef Byte FAR *voidpf; + typedef Byte *voidp; +#endif + + +/* Compile with -DZLIB_DLL for Windows DLL support */ +#if (defined(_WINDOWS) || defined(WINDOWS)) && defined(ZLIB_DLL) +# include +# define EXPORT WINAPI +#else +# define EXPORT +#endif + +#endif /* _ZCONF_H */ +/* --- zconf.h */ + +#define ZLIB_VERSION "1.0.4P" + +/* + The 'zlib' compression library provides in-memory compression and + decompression functions, including integrity checks of the uncompressed + data. This version of the library supports only one compression method + (deflation) but other algorithms may be added later and will have the same + stream interface. + + For compression the application must provide the output buffer and + may optionally provide the input buffer for optimization. For decompression, + the application must provide the input buffer and may optionally provide + the output buffer for optimization. + + Compression can be done in a single step if the buffers are large + enough (for example if an input file is mmap'ed), or can be done by + repeated calls of the compression function. In the latter case, the + application must provide more input and/or consume the output + (providing more output space) before each call. + + The library does not install any signal handler. It is recommended to + add at least a handler for SIGSEGV when decompressing; the library checks + the consistency of the input data whenever possible but may go nuts + for some forms of corrupted input. +*/ + +typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); +typedef void (*free_func) OF((voidpf opaque, voidpf address)); + +struct internal_state; + +typedef struct z_stream_s { + Bytef *next_in; /* next input byte */ + uInt avail_in; /* number of bytes available at next_in */ + uLong total_in; /* total nb of input bytes read so far */ + + Bytef *next_out; /* next output byte should be put there */ + uInt avail_out; /* remaining free space at next_out */ + uLong total_out; /* total nb of bytes output so far */ + + char *msg; /* last error message, NULL if no error */ + struct internal_state FAR *state; /* not visible by applications */ + + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree; /* used to free the internal state */ + voidpf opaque; /* private data object passed to zalloc and zfree */ + + int data_type; /* best guess about the data type: ascii or binary */ + uLong adler; /* adler32 value of the uncompressed data */ + uLong reserved; /* reserved for future use */ +} z_stream; + +typedef z_stream FAR *z_streamp; + +/* + The application must update next_in and avail_in when avail_in has + dropped to zero. It must update next_out and avail_out when avail_out + has dropped to zero. The application must initialize zalloc, zfree and + opaque before calling the init function. All other fields are set by the + compression library and must not be updated by the application. + + The opaque value provided by the application will be passed as the first + parameter for calls of zalloc and zfree. This can be useful for custom + memory management. The compression library attaches no meaning to the + opaque value. + + zalloc must return Z_NULL if there is not enough memory for the object. + On 16-bit systems, the functions zalloc and zfree must be able to allocate + exactly 65536 bytes, but will not be required to allocate more than this + if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, + pointers returned by zalloc for objects of exactly 65536 bytes *must* + have their offset normalized to zero. The default allocation function + provided by this library ensures this (see zutil.c). To reduce memory + requirements and avoid any allocation of 64K objects, at the expense of + compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). + + The fields total_in and total_out can be used for statistics or + progress reports. After compression, total_in holds the total size of + the uncompressed data and may be saved for use in the decompressor + (particularly if the decompressor wants to decompress everything in + a single step). +*/ + + /* constants */ + +#define Z_NO_FLUSH 0 +#define Z_PARTIAL_FLUSH 1 +#define Z_PACKET_FLUSH 2 +#define Z_SYNC_FLUSH 3 +#define Z_FULL_FLUSH 4 +#define Z_FINISH 5 +/* Allowed flush values; see deflate() below for details */ + +#define Z_OK 0 +#define Z_STREAM_END 1 +#define Z_NEED_DICT 2 +#define Z_ERRNO (-1) +#define Z_STREAM_ERROR (-2) +#define Z_DATA_ERROR (-3) +#define Z_MEM_ERROR (-4) +#define Z_BUF_ERROR (-5) +#define Z_VERSION_ERROR (-6) +/* Return codes for the compression/decompression functions. Negative + * values are errors, positive values are used for special but normal events. + */ + +#define Z_NO_COMPRESSION 0 +#define Z_BEST_SPEED 1 +#define Z_BEST_COMPRESSION 9 +#define Z_DEFAULT_COMPRESSION (-1) +/* compression levels */ + +#define Z_FILTERED 1 +#define Z_HUFFMAN_ONLY 2 +#define Z_DEFAULT_STRATEGY 0 +/* compression strategy; see deflateInit2() below for details */ + +#define Z_BINARY 0 +#define Z_ASCII 1 +#define Z_UNKNOWN 2 +/* Possible values of the data_type field */ + +#define Z_DEFLATED 8 +/* The deflate compression method (the only one supported in this version) */ + +#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ + +#define zlib_version zlibVersion() +/* for compatibility with versions < 1.0.2 */ + + /* basic functions */ + +extern const char * EXPORT zlibVersion OF((void)); +/* The application can compare zlibVersion and ZLIB_VERSION for consistency. + If the first character differs, the library code actually used is + not compatible with the zlib.h header file used by the application. + This check is automatically made by deflateInit and inflateInit. + */ + +/* +extern int EXPORT deflateInit OF((z_streamp strm, int level)); + + Initializes the internal stream state for compression. The fields + zalloc, zfree and opaque must be initialized before by the caller. + If zalloc and zfree are set to Z_NULL, deflateInit updates them to + use default allocation functions. + + The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + 1 gives best speed, 9 gives best compression, 0 gives no compression at + all (the input data is simply copied a block at a time). + Z_DEFAULT_COMPRESSION requests a default compromise between speed and + compression (currently equivalent to level 6). + + deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if level is not a valid compression level, + Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + with the version assumed by the caller (ZLIB_VERSION). + msg is set to null if there is no error message. deflateInit does not + perform any compression: this will be done by deflate(). +*/ + + +extern int EXPORT deflate OF((z_streamp strm, int flush)); +/* + Performs one or both of the following actions: + + - Compress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in and avail_in are updated and + processing will resume at this point for the next call of deflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. This action is forced if the parameter flush is non zero. + Forcing flush frequently degrades the compression ratio, so this parameter + should be set only when necessary (in interactive applications). + Some output may be provided even if flush is not set. + + Before the call of deflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming + more output, and updating avail_in or avail_out accordingly; avail_out + should never be zero before the call. The application can consume the + compressed output when it wants, for example when the output buffer is full + (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK + and with zero avail_out, it must be called again after making room in the + output buffer because there might be more output pending. + + If the parameter flush is set to Z_PARTIAL_FLUSH, the current compression + block is terminated and flushed to the output buffer so that the + decompressor can get all input data available so far. For method 9, a future + variant on method 8, the current block will be flushed but not terminated. + Z_SYNC_FLUSH has the same effect as partial flush except that the compressed + output is byte aligned (the compressor can clear its internal bit buffer) + and the current block is always terminated; this can be useful if the + compressor has to be restarted from scratch after an interruption (in which + case the internal state of the compressor may be lost). + If flush is set to Z_FULL_FLUSH, the compression block is terminated, a + special marker is output and the compression dictionary is discarded; this + is useful to allow the decompressor to synchronize if one compressed block + has been damaged (see inflateSync below). Flushing degrades compression and + so should be used only when necessary. Using Z_FULL_FLUSH too often can + seriously degrade the compression. If deflate returns with avail_out == 0, + this function must be called again with the same value of the flush + parameter and more output space (updated avail_out), until the flush is + complete (deflate returns with non-zero avail_out). + + If the parameter flush is set to Z_PACKET_FLUSH, the compression + block is terminated, and a zero-length stored block is output, + omitting the length bytes (the effect of this is that the 3-bit type + code 000 for a stored block is output, and the output is then + byte-aligned). This is designed for use at the end of a PPP packet. + + If the parameter flush is set to Z_FINISH, pending input is processed, + pending output is flushed and deflate returns with Z_STREAM_END if there + was enough output space; if deflate returns with Z_OK, this function must be + called again with Z_FINISH and more output space (updated avail_out) but no + more input data, until it returns with Z_STREAM_END or an error. After + deflate has returned Z_STREAM_END, the only possible operations on the + stream are deflateReset or deflateEnd. + + Z_FINISH can be used immediately after deflateInit if all the compression + is to be done in a single step. In this case, avail_out must be at least + 0.1% larger than avail_in plus 12 bytes. If deflate does not return + Z_STREAM_END, then it must be called again as described above. + + deflate() may update data_type if it can make a good guess about + the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered + binary. This field is only for information purposes and does not affect + the compression algorithm in any manner. + + deflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if all input has been + consumed and all output has been produced (only when flush is set to + Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible. +*/ + + +extern int EXPORT deflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any + pending output. + + deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + stream state was inconsistent, Z_DATA_ERROR if the stream was freed + prematurely (some input or output was discarded). In the error case, + msg may be set but then points to a static string (which must not be + deallocated). +*/ + + +/* +extern int EXPORT inflateInit OF((z_streamp strm)); + + Initializes the internal stream state for decompression. The fields + zalloc, zfree and opaque must be initialized before by the caller. If + zalloc and zfree are set to Z_NULL, inflateInit updates them to use default + allocation functions. + + inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_VERSION_ERROR if the zlib library version is incompatible + with the version assumed by the caller. msg is set to null if there is no + error message. inflateInit does not perform any decompression: this will be + done by inflate(). +*/ + +#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL)) +#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */ +#endif + +extern int EXPORT inflate OF((z_streamp strm, int flush)); +/* + Performs one or both of the following actions: + + - Decompress more input starting at next_in and update next_in and avail_in + accordingly. If not all input can be processed (because there is not + enough room in the output buffer), next_in is updated and processing + will resume at this point for the next call of inflate(). + + - Provide more output starting at next_out and update next_out and avail_out + accordingly. inflate() provides as much output as possible, until there + is no more input data or no more space in the output buffer (see below + about the flush parameter). + + Before the call of inflate(), the application should ensure that at least + one of the actions is possible, by providing more input and/or consuming + more output, and updating the next_* and avail_* values accordingly. + The application can consume the uncompressed output when it wants, for + example when the output buffer is full (avail_out == 0), or after each + call of inflate(). If inflate returns Z_OK and with zero avail_out, it + must be called again after making room in the output buffer because there + might be more output pending. + + If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH, + inflate flushes as much output as possible to the output buffer. The + flushing behavior of inflate is not specified for values of the flush + parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the + current implementation actually flushes as much output as possible + anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data + has been consumed, it is expecting to see the length field of a stored + block; if not, it returns Z_DATA_ERROR. + + inflate() should normally be called until it returns Z_STREAM_END or an + error. However if all decompression is to be performed in a single step + (a single call of inflate), the parameter flush should be set to + Z_FINISH. In this case all pending input is processed and all pending + output is flushed; avail_out must be large enough to hold all the + uncompressed data. (The size of the uncompressed data may have been saved + by the compressor for this purpose.) The next operation on this stream must + be inflateEnd to deallocate the decompression state. The use of Z_FINISH + is never required, but can be used to inform inflate that a faster routine + may be used for the single inflate() call. + + inflate() returns Z_OK if some progress has been made (more input + processed or more output produced), Z_STREAM_END if the end of the + compressed data has been reached and all uncompressed output has been + produced, Z_NEED_DICT if a preset dictionary is needed at this point (see + inflateSetDictionary below), Z_DATA_ERROR if the input data was corrupted, + Z_STREAM_ERROR if the stream structure was inconsistent (for example if + next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory, + Z_BUF_ERROR if no progress is possible or if there was not enough room in + the output buffer when Z_FINISH is used. In the Z_DATA_ERROR case, the + application may then call inflateSync to look for a good compression block. + In the Z_NEED_DICT case, strm->adler is set to the Adler32 value of the + dictionary chosen by the compressor. +*/ + + +extern int EXPORT inflateEnd OF((z_streamp strm)); +/* + All dynamically allocated data structures for this stream are freed. + This function discards any unprocessed input and does not flush any + pending output. + + inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state + was inconsistent. In the error case, msg may be set but then points to a + static string (which must not be deallocated). +*/ + + /* Advanced functions */ + +/* + The following functions are needed only in some special applications. +*/ + +/* +extern int EXPORT deflateInit2 OF((z_streamp strm, + int level, + int method, + int windowBits, + int memLevel, + int strategy)); + + This is another version of deflateInit with more compression options. The + fields next_in, zalloc, zfree and opaque must be initialized before by + the caller. + + The method parameter is the compression method. It must be Z_DEFLATED in + this version of the library. (Method 9 will allow a 64K history buffer and + partial block flushes.) + + The windowBits parameter is the base two logarithm of the window size + (the size of the history buffer). It should be in the range 8..15 for this + version of the library (the value 16 will be allowed for method 9). Larger + values of this parameter result in better compression at the expense of + memory usage. The default value is 15 if deflateInit is used instead. + + The memLevel parameter specifies how much memory should be allocated + for the internal compression state. memLevel=1 uses minimum memory but + is slow and reduces compression ratio; memLevel=9 uses maximum memory + for optimal speed. The default value is 8. See zconf.h for total memory + usage as a function of windowBits and memLevel. + + The strategy parameter is used to tune the compression algorithm. Use the + value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no + string match). Filtered data consists mostly of small values with a + somewhat random distribution. In this case, the compression algorithm is + tuned to compress them better. The effect of Z_FILTERED is to force more + Huffman coding and less string matching; it is somewhat intermediate + between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects + the compression ratio but not the correctness of the compressed output even + if it is not set appropriately. + + If next_in is not null, the library will use this buffer to hold also + some history information; the buffer must either hold the entire input + data, or have at least 1<<(windowBits+1) bytes and be writable. If next_in + is null, the library will allocate its own history buffer (and leave next_in + null). next_out need not be provided here but must be provided by the + application for the next call of deflate(). + + If the history buffer is provided by the application, next_in must + must never be changed by the application since the compressor maintains + information inside this buffer from call to call; the application + must provide more input only by increasing avail_in. next_in is always + reset by the library in this case. + + deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was + not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as + an invalid method). msg is set to null if there is no error message. + deflateInit2 does not perform any compression: this will be done by + deflate(). +*/ + +extern int EXPORT deflateSetDictionary OF((z_streamp strm, + const Bytef *dictionary, + uInt dictLength)); +/* + Initializes the compression dictionary (history buffer) from the given + byte sequence without producing any compressed output. This function must + be called immediately after deflateInit or deflateInit2, before any call + of deflate. The compressor and decompressor must use exactly the same + dictionary (see inflateSetDictionary). + The dictionary should consist of strings (byte sequences) that are likely + to be encountered later in the data to be compressed, with the most commonly + used strings preferably put towards the end of the dictionary. Using a + dictionary is most useful when the data to be compressed is short and + can be predicted with good accuracy; the data can then be compressed better + than with the default empty dictionary. In this version of the library, + only the last 32K bytes of the dictionary are used. + Upon return of this function, strm->adler is set to the Adler32 value + of the dictionary; the decompressor may later use this value to determine + which dictionary has been used by the compressor. (The Adler32 value + applies to the whole dictionary even if only a subset of the dictionary is + actually used by the compressor.) + + deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + parameter is invalid (such as NULL dictionary) or the stream state + is inconsistent (for example if deflate has already been called for this + stream). deflateSetDictionary does not perform any compression: this will + be done by deflate(). +*/ + +extern int EXPORT deflateCopy OF((z_streamp dest, + z_streamp source)); +/* + Sets the destination stream as a complete copy of the source stream. If + the source stream is using an application-supplied history buffer, a new + buffer is allocated for the destination stream. The compressed output + buffer is always application-supplied. It's the responsibility of the + application to provide the correct values of next_out and avail_out for the + next call of deflate. + + This function can be useful when several compression strategies will be + tried, for example when there are several ways of pre-processing the input + data with a filter. The streams that will be discarded should then be freed + by calling deflateEnd. Note that deflateCopy duplicates the internal + compression state which can be quite large, so this strategy is slow and + can consume lots of memory. + + deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + (such as zalloc being NULL). msg is left unchanged in both source and + destination. +*/ + +extern int EXPORT deflateReset OF((z_streamp strm)); +/* + This function is equivalent to deflateEnd followed by deflateInit, + but does not free and reallocate all the internal compression state. + The stream will keep the same compression level and any other attributes + that may have been set by deflateInit2. + + deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent (such as zalloc or state being NULL). +*/ + +extern int EXPORT deflateParams OF((z_streamp strm, int level, int strategy)); +/* + Dynamically update the compression level and compression strategy. + This can be used to switch between compression and straight copy of + the input data, or to switch to a different kind of input data requiring + a different strategy. If the compression level is changed, the input + available so far is compressed with the old level (and may be flushed); + the new level will take effect only at the next call of deflate(). + + Before the call of deflateParams, the stream state must be set as for + a call of deflate(), since the currently available input may have to + be compressed and flushed. In particular, strm->avail_out must be non-zero. + + deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source + stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR + if strm->avail_out was zero. +*/ + +extern int EXPORT deflateOutputPending OF((z_streamp strm)); +/* + Returns the number of bytes of output which are immediately + available from the compressor (i.e. without any further input + or flush). +*/ + +/* +extern int EXPORT inflateInit2 OF((z_streamp strm, + int windowBits)); + + This is another version of inflateInit with more compression options. The + fields next_out, zalloc, zfree and opaque must be initialized before by + the caller. + + The windowBits parameter is the base two logarithm of the maximum window + size (the size of the history buffer). It should be in the range 8..15 for + this version of the library (the value 16 will be allowed soon). The + default value is 15 if inflateInit is used instead. If a compressed stream + with a larger window size is given as input, inflate() will return with + the error code Z_DATA_ERROR instead of trying to allocate a larger window. + + If next_out is not null, the library will use this buffer for the history + buffer; the buffer must either be large enough to hold the entire output + data, or have at least 1< +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef notdefn +struct adsp_debug adsp_dtable[1025]; +int ad_entry = 0; +#endif + +extern atlock_t adspgen_lock; + +adspAllocateCCB(gref) + register gref_t *gref; /* READ queue */ +{ + gbuf_t *ccb_mp; + register CCBPtr sp; + + if (!(ccb_mp = gbuf_alloc(sizeof(CCB), PRI_LO))) { + return (0); + } + bzero((caddr_t) gbuf_rptr(ccb_mp), sizeof(CCB)); + gbuf_wset(ccb_mp,sizeof(CCB)); + gref->info = (caddr_t) ccb_mp; + sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + + sp->pid = gref->pid; /* save the caller process pointer */ + sp->gref = gref; /* save a back pointer to the WRITE queue */ + sp->sp_mp = ccb_mp; /* and its message block */ + ATLOCKINIT(sp->lock); + ATLOCKINIT(sp->lockClose); + ATLOCKINIT(sp->lockRemove); + return 1; +} + +adspRelease(gref) + register gref_t *gref; /* READ queue */ +{ + register CCBPtr sp; + int s, l; + + ATDISABLE(l, adspgen_lock); + if (gref->info) { + sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + ATDISABLE(s, sp->lock); + ATENABLE(s, adspgen_lock); + /* Tells completion routine of close */ + /* packet to remove us. */ + + if (sp->state == sPassive || sp->state == sClosed || + sp->state == sOpening || sp->state == sListening) { + ATENABLE(l, sp->lock); + if (sp->state == sListening) + CompleteQueue(&sp->opb, errAborted); + sp->removing = 1; /* Prevent allowing another dspClose. */ + DoClose(sp, errAborted, 0); /* will remove CCB */ + return 0; + } else { /* sClosing & sOpen */ + sp->state = sClosing; + } + ATENABLE(l, sp->lock); + + if (CheckOkToClose(sp)) { /* going to close */ + sp->sendCtl = B_CTL_CLOSE; /* Send close advice */ + } else { + CheckSend(sp); /* try one more time to send out data */ + if (sp->state != sClosed) + sp->sendCtl = B_CTL_CLOSE; /* Setup to send close advice */ + } + CheckSend(sp); /* and force out the close */ + ATDISABLE(s, sp->lock); + sp->removing = 1; /* Prevent allowing another dspClose. */ + sp->state = sClosed; + ATENABLE(s, sp->lock); + DoClose(sp, errAborted, 0); /* to closed and remove CCB */ + } else + ATENABLE(l, adspgen_lock); +} + + + + +adspWriteHandler(gref, mp) + gref_t *gref; /* WRITE queue */ + gbuf_t *mp; +{ + + register ioc_t *iocbp; + register struct adspcmd *ap; + int error, flag; + void *sp; + + switch(gbuf_type(mp)) { + case MSG_DATA: + if (gref->info == 0) { + gbuf_freem(mp); + return(STR_IGNORE); + } + /* + * Fill in the global stuff + */ + ap = (struct adspcmd *)gbuf_rptr(mp); + ap->gref = gref; + ap->ioc = 0; + ap->mp = mp; + sp = (void *)gbuf_rptr(((gbuf_t *)gref->info)); + switch(ap->csCode) { + case dspWrite: + if ((error = adspWrite(sp, ap))) + gbuf_freem(mp); + return(STR_IGNORE); + case dspAttention: + if ((error = adspAttention(sp, ap))) + gbuf_freem(mp); + return(STR_IGNORE); + } + case MSG_IOCTL: + if (gref->info == 0) { + adspioc_ack(EPROTO, mp, gref); + return(STR_IGNORE); + } + iocbp = (ioc_t *) gbuf_rptr(mp); + if (ADSP_IOCTL(iocbp->ioc_cmd)) { + iocbp->ioc_count = sizeof(*ap) - 1; + if (gbuf_cont(mp) == 0) { + adspioc_ack(EINVAL, mp, gref); + return(STR_IGNORE); + } + ap = (struct adspcmd *) gbuf_rptr(gbuf_cont(mp)); + ap->gref = gref; + ap->ioc = (caddr_t) mp; + ap->mp = gbuf_cont(mp); /* request head */ + ap->ioResult = 0; + + if ((gref->info == 0) && ((iocbp->ioc_cmd != ADSPOPEN) && + (iocbp->ioc_cmd != ADSPCLLISTEN))) { + ap->ioResult = errState; + + adspioc_ack(EINVAL, mp, gref); + return(STR_IGNORE); + } + } + sp = (void *)gbuf_rptr(((gbuf_t *)gref->info)); + switch(iocbp->ioc_cmd) { + case ADSPOPEN: + case ADSPCLLISTEN: + ap->socket = ((CCBPtr)sp)->localSocket; + flag = (adspMode(ap) == ocAccept) ? 1 : 0; + if (flag && ap->socket) { + if (adspDeassignSocket((CCBPtr)sp) >= 0) + ap->socket = 0; + } + if ((ap->socket == 0) && + ((ap->socket = + (at_socket)adspAssignSocket(gref, flag)) == 0)) { + adspioc_ack(EADDRNOTAVAIL, mp, gref); + return(STR_IGNORE); + } + ap->csCode = iocbp->ioc_cmd == ADSPOPEN ? dspInit : dspCLInit; + if ((error = adspInit(sp, ap)) == 0) { + switch(ap->csCode) { + case dspInit: + /* and open the connection */ + ap->csCode = dspOpen; + error = adspOpen(sp, ap); + break; + case dspCLInit: + /* ADSPCLLISTEN */ + ap->csCode = dspCLListen; + error = adspCLListen(sp, ap); + break; + } + } + if (error) + adspioc_ack(error, mp, gref); /* if this failed req complete */ + return(STR_IGNORE); + case ADSPCLOSE: + ap->csCode = dspClose; + if ((error = adspClose(sp, ap))) { + adspioc_ack(error, mp, gref); + break; + } + break; + case ADSPCLREMOVE: + ap->csCode = dspCLRemove; + error = adspClose(sp, ap); + adspioc_ack(error, mp, gref); + return(STR_IGNORE); + case ADSPCLDENY: + ap->csCode = dspCLDeny; + if ((error = adspCLDeny(sp, ap))) { + adspioc_ack(error, mp, gref); + } + return(STR_IGNORE); + case ADSPSTATUS: + ap->csCode = dspStatus; + if ((error = adspStatus(sp, ap))) { + adspioc_ack(error, mp, gref); + } + return(STR_IGNORE); + case ADSPREAD: + ap->csCode = dspRead; + if ((error = adspRead(sp, ap))) { + adspioc_ack(error, mp, gref); + } + return(STR_IGNORE); + case ADSPATTENTION: + ap->csCode = dspAttention; + if ((error = adspReadAttention(sp, ap))) { + adspioc_ack(error, mp, gref); + } + return(STR_IGNORE); + case ADSPOPTIONS: + ap->csCode = dspOptions; + if ((error = adspOptions(sp, ap))) { + adspioc_ack(error, mp, gref); + } + return(STR_IGNORE); + case ADSPRESET: + ap->csCode = dspReset; + if ((error = adspReset(sp, ap))) { + adspioc_ack(error, mp, gref); + } + return(STR_IGNORE); + case ADSPNEWCID: + ap->csCode = dspNewCID; + if ((error = adspNewCID(sp, ap))) { + adspioc_ack(error, mp, gref); + } + return(STR_IGNORE); + default: + return(STR_PUTNEXT); /* pass it on down */ + } + return(STR_IGNORE); + case MSG_PROTO: + default: + gbuf_freem(mp); + } +} + + +adspReadHandler(gref, mp) + gref_t *gref; + gbuf_t *mp; +{ + int error; + + switch(gbuf_type(mp)) { + case MSG_DATA: + if ((error = adspPacket(gref, mp))) { + gbuf_freem(mp); + } + break; + + case MSG_IOCTL: + default: + return(STR_PUTNEXT); + break; + } + return(STR_IGNORE); +} + +/* + * adsp_sendddp() + * + * Description: + * This procedure a formats a DDP datagram header and calls the + * DDP module to queue it for routing and transmission according to + * the DDP parameters. We always take control of the datagram; + * if there is an error we free it, otherwise we pass it to the next + * layer. We don't need to set the src address fileds because the + * DDP layer fills these in for us. + * + * Calling Sequence: + * ret_status = adsp_sendddp(q, sp, mp, length, dstnetaddr, ddptype); + * + * Formal Parameters: + * sp Caller stream pointer + * mp gbuf_t chain containing the datagram to transmit + * The first mblk contains the ADSP header and space + * for the DDP header. + * length size of data portion of datagram + * dstnetaddr address of 4-byte destination internet address + * ddptype DDP protocol to assign to the datagram + * + * Completion Status: + * 0 Procedure successful completed. + * EMSGSIZE Specified datagram length is too big. + * + * Side Effects: + * NONE + */ + +adsp_sendddp(sp, mp, length, dstnetaddr, ddptype) + CCBPtr sp; + gbuf_t *mp; + int length; + AddrUnion *dstnetaddr; + int ddptype; +{ + DDPX_FRAME *ddp; + gbuf_t *mlist = mp; + + if (mp == 0) + return EINVAL; + + if (length > DDP_DATA_SIZE) { + gbuf_freel(mlist); + return EMSGSIZE; + } + + while (mp) { + + if (length == 0) + length = gbuf_msgsize(mp) - DDPL_FRAME_LEN; + /* Set up the DDP header */ + + ddp = (DDPX_FRAME *) gbuf_rptr(mp); + UAS_ASSIGN(ddp->ddpx_length, (length + DDPL_FRAME_LEN)); + UAS_ASSIGN(ddp->ddpx_cksm, 0); + if (sp) { + if (sp->useCheckSum) + UAS_ASSIGN(ddp->ddpx_cksm, 1); + } + + NET_ASSIGN(ddp->ddpx_dnet, dstnetaddr->a.net); + ddp->ddpx_dnode = dstnetaddr->a.node; + ddp->ddpx_source = sp ? sp->localSocket : ddp->ddpx_dest; + ddp->ddpx_dest = dstnetaddr->a.socket; + + ddp->ddpx_type = ddptype; + length = 0; + mp = gbuf_next(mp); + + } + + DDP_OUTPUT(mlist); + return 0; +} + +void NotifyUser(sp) + register CCBPtr sp; + +{ +/* + pidsig(sp->pid, SIGIO); +*/ +} + +void UrgentUser(sp) + register CCBPtr sp; +{ +/* + pidsig(sp->pid, SIGURG); +*/ +} diff --git a/bsd/netat/adsp.h b/bsd/netat/adsp.h new file mode 100644 index 000000000..ad59eef29 --- /dev/null +++ b/bsd/netat/adsp.h @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +#ifndef _NETAT_ADSP_H_ +#define _NETAT_ADSP_H_ + +/* ADSP flags for read, write, and close routines */ + +#define ADSP_EOM 0x01 /* Sent or received EOM with data */ +#define ADSP_FLUSH 0x02 /* Send all data in send queue */ +#define ADSP_WAIT 0x04 /* Graceful close, wait 'til snd queue emptys */ + + +/* ADSP events to be fielded by the user event handler */ + +#define ADSP_EV_ATTN 0x02 /* Attention data recvd. */ +#define ADSP_EV_RESET 0x04 /* Forward reset recvd. */ +#define ADSP_EV_CLOSE 0x08 /* Close advice recvd. */ + + +/* ADSP packet control codes */ + +#define ADSP_PROBEACK 0 /* Probe or acknowledgement */ +#define ADSP_OPENCONREQUEST 1 /* Open connection request */ +#define ADSP_OPENCONACK 2 /* Open connection acknowledgement */ +#define ADSP_OPENCONREQACK 3 /* Open connection request + ack */ +#define ADSP_OPENCONDENIAL 4 /* Open connection denial */ +#define ADSP_CLOSEADVICE 5 /* Close connection advice */ +#define ADSP_FORWARDRESET 6 /* Forward reset */ +#define ADSP_FORWARDRESETACK 7 /* Forward reset acknowledgement */ +#define ADSP_RETRANSADVICE 8 /* Retransmit advice */ + + +/* Miscellaneous constants */ + +#define ADSP_MAXDATA 572 /* Maximum data bytes in ADSP packet */ +#define ADSP_MAXATTNDATA 570 /* Maximum data bytes in attn msg */ +#define ADSP_DDPTYPE 7 /* DDP protocol type for ADSP */ +#define ADSP_VERSION 0x0100 /* ADSP version */ + + +/* Some additional ADSP error codes */ + +#define EQUEWASEMP 10001 +#define EONEENTQUE 10002 +#define EQUEBLOCKED 10003 +#define EFWDRESET 10004 +#define EENDOFMSG 10005 +#define EADDRNOTINUSE 10006 + + + +/* Tuning Parameter Block */ + +struct tpb { + unsigned Valid : 1; /* Tuning parameter block is valid */ + unsigned short TransThresh; /* Transmit threshold */ + unsigned TransTimerIntrvl; /* Transmit timer interval */ + unsigned short SndWdwCloThresh; /* Send window closing threshold */ + unsigned SndWdwCloIntrvl; /* Send window closed interval */ + unsigned char SndWdwCloBckoff; /* Send window closed backoff rate */ + unsigned ReTransIntrvl; /* Retransmit interval */ + unsigned char ReTransBckoff; /* Retransmit backoff rate */ + unsigned RestartIntrvl; /* Restart sender interval */ + unsigned char RestartBckoff; /* Restart sender backoff rate */ + unsigned SndQBufSize; /* Send queue buffer size */ + unsigned short RcvQMaxSize; /* Maximum size of the receive queue */ + unsigned short RcvQCpyThresh; /* Receive queue copy threshold */ + unsigned FwdRstIntrvl; /* Forward reset interval */ + unsigned char FwdRstBckoff; /* Forward reset backoff rate */ + unsigned AttnIntrvl; /* Retransmit attn msg interval */ + unsigned char AttnBckoff; /* Retransmit attn msg backoff rate */ + unsigned OpenIntrvl; /* Retransmit open request interval */ + unsigned char OpenMaxRetry; /* Open request maximum retrys */ + unsigned char RetransThresh; /* Retransmit advice threshold */ + unsigned ProbeRetryMax; /* Maximum number of probes */ + unsigned SndByteCntMax; /* Maximum number bytes in send queue */ +}; + + +/* Tuning Parameter Tags */ + +#define ADSP_TRANSTHRESH 1 /* Transmit threshold */ +#define ADSP_TRANSTIMERINTRVL 2 /* Transmit timer interval */ +#define ADSP_SNDWDWCLOTHRESH 3 /* Send window closing threshold */ +#define ADSP_SNDWDWCLOINTRVL 4 /* Send window closed interval */ +#define ADSP_SNDWDWCLOBCKOFF 5 /* Send window closed backoff rate */ +#define ADSP_RETRANSINTRVL 6 /* Retransmit interval */ +#define ADSP_RETRANSBCKOFF 7 /* Retransmit backoff rate */ +#define ADSP_RESTARTINTRVL 8 /* Restart sender interval */ +#define ADSP_RESTARTBCKOFF 9 /* Restart sender backoff rate */ +#define ADSP_SNDQBUFSIZE 10 /* Send queue buffer size */ +#define ADSP_RCVQMAXSIZE 11 /* Receive queue maximum size */ +#define ADSP_RCVQCPYTHRESH 12 /* Receive queue copy threshold */ +#define ADSP_FWDRSTINTRVL 13 /* Forward reset retransmit interval */ +#define ADSP_FWDRSTBCKOFF 14 /* Forward reset backoff rate */ +#define ADSP_ATTNINTRVL 15 /* Rexmit attention message interval */ +#define ADSP_ATTNBCKOFF 16 /* Attention message backoff rate */ +#define ADSP_OPENINTRVL 17 /* Retransmit open request interval */ +#define ADSP_OPENMAXRETRY 18 /* Open request max retrys */ +#define ADSP_RETRANSTHRESH 19 /* Retransmit advice threshold */ +#define ADSP_PROBERETRYMAX 20 +#define ADSP_SNDBYTECNTMAX 21 + +#define TuneParamCnt 21 /* The number of tuning parameters */ + +/* Connection Status Tags */ + +#define ADSP_STATE 1 /* The connection state */ +#define ADSP_SNDSEQ 2 /* Send sequence number */ +#define ADSP_FIRSTRTMTSEQ 3 /* First retransmit sequence number */ +#define ADSP_SNDWDWSEQ 4 /* Send window sequence number */ +#define ADSP_RCVSEQ 5 /* Receive sequence number */ +#define ADSP_ATTNSNDSEQ 6 /* Attn msg send sequence number */ +#define ADSP_ATTNRCVSEQ 7 /* Attn msg receive sequence number */ +#define ADSP_RCVWDW 8 /* Receive window size */ +#define ADSP_ATTNMSGWAIT 9 /* Attn msg is in the receive queue */ + +#define ConStatTagCnt 9 /* Number of connection status tags */ + +#define ADSP_INVALID 0 /* Invalid connection control block */ +#define ADSP_LISTEN 1 /* Waiting for an open con req */ +#define ADSP_OPENING 2 /* No state info, sending open req */ +#define ADSP_MYHALFOPEN 4 /* His state info, sending open req */ +#define ADSP_HISHALFOPEN 8 /* He has my state info, sndng op req */ +#define ADSP_OPEN 16 /* Connection is operational */ +#define ADSP_TORNDOWN 32 /* Probe timer has expired 4 times */ +#define ADSP_CLOSING 64 /* Client close, emptying send Queues */ +#define ADSP_CLOSED 128 /* Close adv rcvd, emptying rcv Queues */ + +/* Management Counters */ + +#define ADSP_ATTNACKRCVD 1 /* Attn msg ack received */ +#define ADSP_ATTNACKACPTD 2 /* Attn msg ack accepted */ +#define ADSP_PROBERCVD 3 /* Probe received */ +#define ADSP_ACKRCVD 4 /* Explicit ack msg received */ +#define ADSP_FWDRSTRCVD 5 /* Forward reset received */ +#define ADSP_FWDRSTACPTD 6 /* Forward reset accepted */ +#define ADSP_FWDRSTACKRCVD 7 /* Forward reset ack received */ +#define ADSP_FWDRSTACKACPTD 8 /* Forward reset ack accepted */ +#define ADSP_ATTNRCVD 9 /* Attn msg received */ +#define ADSP_ATTNACPTD 10 /* Attn msg accepted */ +#define ADSP_DATARCVD 11 /* Data msg received */ +#define ADSP_DATAACPTD 12 /* Data msg Accepted */ +#define ADSP_ACKFIELDCHKD 13 /* Ack field checked */ +#define ADSP_ACKNRSFIELDACPTD 14 /* Next receive seq field accepted */ +#define ADSP_ACKSWSFIELDACPTD 15 /* Send window seq field accepted */ +#define ADSP_ACKREQSTD 16 /* Ack requested */ +#define ADSP_LOWMEM 17 /* Low memory */ +#define ADSP_OPNREQEXP 18 /* Open request timer expired */ +#define ADSP_PROBEEXP 19 /* Probe timer expired */ +#define ADSP_FWDRSTEXP 20 /* Forward reset timer expired */ +#define ADSP_ATTNEXP 21 /* Attention timer expired */ +#define ADSP_TRANSEXP 22 /* Transmit timer expired */ +#define ADSP_RETRANSEXP 23 /* Retransmit timer expired */ +#define ADSP_SNDWDWCLOEXP 24 /* Send window closed timer expired */ +#define ADSP_RESTARTEXP 25 /* Restart sender timer expired */ +#define ADSP_RESLOWEXP 26 /* Resources are low timer expired */ +#define ADSP_RETRANSRCVD 27 /* Retransmit advice received */ + +#define InfoTagCnt 27 + +/* Length of the parameter and status lists */ + +#define ADSP_DEFLEN (TuneParamCnt * 6 + 1) +#define ADSP_STALEN (ConStatTagCnt * 6 + 1) +#define ADSP_INFOLEN (InfoTagCnt * 6 + 1) + +/* from h/ADSP.h */ + +/* result codes */ + +#define controlErr -17 /*I/O System Errors*/ + +#define errENOBUFS -1281 +#define errRefNum -1280 /* bad connection refNum */ +#define errAborted -1279 /* control call was aborted */ +#define errState -1278 /* bad connection state for this operation */ +#define errOpening -1277 /* open connection request failed */ +#define errAttention -1276 /* attention message too long */ +#define errFwdReset -1275 /* read terminated by forward reset */ +#define errDSPQueueSize -1274 /* DSP Read/Write Queue Too small */ +#define errOpenDenied -1273 /* open connection request was denied */ + +/* control codes */ + +#define dspInit 255 /* create a new connection end */ +#define dspRemove 254 /* remove a connection end */ +#define dspOpen 253 /* open a connection */ +#define dspClose 252 /* close a connection */ +#define dspCLInit 251 /* create a connection listener */ +#define dspCLRemove 250 /* remove a connection listener */ +#define dspCLListen 249 /* post a listener request */ +#define dspCLDeny 248 /* deny an open connection request */ +#define dspStatus 247 /* get status of connection end */ +#define dspRead 246 /* read data from the connection */ +#define dspWrite 245 /* write data on the connection */ +#define dspAttention 244 /* send an attention message */ +#define dspOptions 243 /* set connection end options */ +#define dspReset 242 /* forward reset the connection */ +#define dspNewCID 241 /* generate a cid for a connection end */ + + +/* connection opening modes */ + +#define ocRequest 1 /* request a connection with remote */ +#define ocPassive 2 /* wait for a connection request from remote */ +#define ocAccept 3 /* accept request as delivered by listener */ +#define ocEstablish 4 /* consider connection to be open */ + + +/* connection end states */ + +#define sListening 1 /* for connection listeners */ +#define sPassive 2 /* waiting for a connection request from remote */ +#define sOpening 3 /* requesting a connection with remote */ +#define sOpen 4 /* connection is open */ +#define sClosing 5 /* connection is being torn down */ +#define sClosed 6 /* connection end state is closed */ + + + +/* client event flags */ + +#define eClosed 0x80 /* received connection closed advice */ +#define eTearDown 0x40 /* connection closed due to broken connection */ +#define eAttention 0x20 /* received attention message */ +#define eFwdReset 0x10 /* received forward reset advice */ + +/* miscellaneous constants */ + +#define attnBufSize 570 /* size of client attention buffer */ +#define minDSPQueueSize 100 /* Minimum size of receive or send Queue */ +#define defaultDSPQS 16384 /* random guess */ +#define RecvQSize defaultDSPQS +#define SendQSize defaultDSPQS + +/* *** Seems to be a problem in Mac OS X too *** */ +/* Solaris defines u as (curproc->p_user) +#if defined(u) +#undef u +#endif +*/ + +typedef long (*ProcPtr)(); +typedef ProcPtr *ProcHandle; +typedef char *Ptr; +typedef Ptr *Handle; + +/* connection control block */ + +struct TRCCB { + u_char *ccbLink; /* link to next ccb */ + u_short refNum; /* user reference number */ + u_short state; /* state of the connection end */ + u_char userFlags; /* flags for unsolicited connection events */ + u_char localSocket; /* socket number of this connection end */ + at_inet_t remoteAddress; /* internet address of remote end */ + u_short attnCode; /* attention code received */ + u_short attnSize; /* size of received attention data */ + u_char *attnPtr; /* ptr to received attention data */ + u_char reserved[220]; /* for adsp internal use */ +}; + +typedef struct TRCCB TRCCB; +typedef TRCCB *TPCCB; + +/* init connection end parameters */ + +struct TRinitParams { + TPCCB ccbPtr; /* pointer to connection control block */ + ProcPtr userRoutine; /* client routine to call on event */ + u_char *sendQueue; /* client passed send queue buffer */ + u_char *recvQueue; /* client passed receive queue buffer */ + u_char *attnPtr; /* client passed receive attention buffer */ + u_short sendQSize; /* size of send queue (0..64K bytes) */ + u_short recvQSize; /* size of receive queue (0..64K bytes) */ + u_char localSocket; /* local socket number */ +}; + +typedef struct TRinitParams TRinitParams; + +/* open connection parameters */ + +struct TRopenParams { + u_short localCID; /* local connection id */ + u_short remoteCID; /* remote connection id */ + at_inet_t remoteAddress; /* address of remote end */ + at_inet_t filterAddress; /* address filter */ + unsigned long sendSeq; /* local send sequence number */ + u_long recvSeq; /* receive sequence number */ + u_long attnSendSeq; /* attention send sequence number */ + u_long attnRecvSeq; /* attention receive sequence number */ + u_short sendWindow; /* send window size */ + u_char ocMode; /* open connection mode */ + u_char ocInterval; /* open connection request retry interval */ + u_char ocMaximum; /* open connection request retry maximum */ +}; + +typedef struct TRopenParams TRopenParams; + +/* close connection parameters */ + +struct TRcloseParams { + u_char abort; /* abort connection immediately if non-zero */ +}; + +typedef struct TRcloseParams TRcloseParams; + +/* client status parameter block */ + +struct TRstatusParams { + TPCCB ccbPtr; /* pointer to ccb */ + u_short sendQPending; /* pending bytes in send queue */ + u_short sendQFree; /* available buffer space in send queue */ + u_short recvQPending; /* pending bytes in receive queue */ + u_short recvQFree; /* available buffer space in receive queue */ +}; + +typedef struct TRstatusParams TRstatusParams; + +/* read/write parameter block */ + +struct TRioParams { + u_short reqCount; /* requested number of bytes */ + u_short actCount; /* actual number of bytes */ + u_char *dataPtr; /* pointer to data buffer */ + u_char eom; /* indicates logical end of message */ + u_char flush; /* send data now */ + u_char dummy[2]; /*### LD */ +}; + +typedef struct TRioParams TRioParams; + +/* attention parameter block */ + +struct TRattnParams { + u_short attnCode; /* client attention code */ + u_short attnSize; /* size of attention data */ + u_char *attnData; /* pointer to attention data */ + u_char attnInterval; /* retransmit timer in 10-tick intervals */ + u_char dummy[3]; /* ### LD */ +}; + +typedef struct TRattnParams TRattnParams; + +/* client send option parameter block */ + +struct TRoptionParams { + u_short sendBlocking; /* quantum for data packets */ + u_char sendTimer; /* send timer in 10-tick intervals */ + u_char rtmtTimer; /* retransmit timer in 10-tick intervals */ + u_char badSeqMax; /* threshold for sending retransmit advice */ + u_char useCheckSum; /* use ddp packet checksum */ + u_short filler; /* ### LD */ + int newPID; /* ### Temp for backward compatibility 02/11/94 */ +}; + +typedef struct TRoptionParams TRoptionParams; + +/* new cid parameters */ + +struct TRnewcidParams { + u_short newcid; /* new connection id returned */ +}; + +typedef struct TRnewcidParams TRnewcidParams; + +union adsp_command { + TRinitParams initParams; /* dspInit, dspCLInit */ + TRopenParams openParams; /* dspOpen, dspCLListen, dspCLDeny */ + TRcloseParams closeParams; /* dspClose, dspRemove */ + TRioParams ioParams; /* dspRead, dspWrite, dspAttnRead */ + TRattnParams attnParams; /* dspAttention */ + TRstatusParams statusParams; /* dspStatus */ + TRoptionParams optionParams; /* dspOptions */ + TRnewcidParams newCIDParams; /* dspNewCID */ +}; + +/* ADSP CntrlParam ioQElement */ + +struct DSPParamBlock { + struct QElem *qLink; + short qType; + short ioTrap; + Ptr ioCmdAddr; + ProcPtr ioCompletion; + short ioResult; + char *ioNamePtr; + short ioVRefNum; + short ioCRefNum; /* adsp driver refNum */ + short csCode; /* adsp driver control code */ + long qStatus; /* adsp internal use */ + u_short ccbRefNum; /* connection end refNum */ + union adsp_command u; +}; + +typedef struct DSPParamBlock DSPParamBlock; +typedef DSPParamBlock *DSPPBPtr; + +struct adspcmd { + struct adspcmd *qLink; + u_int ccbRefNum; + caddr_t ioc; +#ifdef KERNEL + gref_t *gref; + gbuf_t *mp; +#else + void *gref; + void *mp; +#endif + short ioResult; + u_short ioDirection; + short csCode; + u_short socket; + union adsp_command u; +}; + +/* from h/adsp_frames.h */ + +#ifdef NOT_USED +/* + * LAP Frame Information + */ + +typedef struct { + u_char lap_dest; + u_char lap_src; + u_char lap_type; + u_char lap_data[1]; +} LAP_FRAME; + +#define LAP_FRAME_LEN 3 + +#define MAX_FRAME_SIZE 603 + +#define LAP_DDP 0x01 +#define LAP_DDPX 0x02 + +typedef struct { + ua_short ddp_length; /* length of ddp fields */ + u_char ddp_dest; /* destination socket */ + u_char ddp_source; /* source socket */ + u_char ddp_type; /* protocol type */ + u_char ddp_data[1]; /* data field */ +} DDP_FRAME; + +#define DDPS_FRAME_LEN 5 +#endif NOT_USED + +typedef struct { + ua_short ddpx_length; /* length and hop count */ + ua_short ddpx_cksm; /* checksum */ + at_net ddpx_dnet; /* destination network number */ + at_net ddpx_snet; /* source network number */ + u_char ddpx_dnode; /* destination node */ + u_char ddpx_snode; /* source node */ + u_char ddpx_dest; /* destination socket */ + u_char ddpx_source; /* source socket */ + u_char ddpx_type; /* protocol type */ + u_char ddpx_data[1]; /* data field */ +} DDPX_FRAME; + +#define DDPL_FRAME_LEN 13 + +#ifdef NOT_USED +typedef struct { + u_char nbp_ctrl_cnt; /* control and tuple count */ + u_char nbp_id; /* enquiry/reply id */ + u_char nbp_data[1]; /* tuple space */ +} NBP_FRAME; + +#define NBP_TYPE_MASK 0xf0 /* mask of ctrl_cnt field */ +#define NBP_CNT_MASK 0x0f /* mask for number of tuples */ +#define NBP_BROADCAST 0x10 /* internet lookup */ +#define NBP_LOOKUP 0x20 /* lookup request */ +#define NBP_REPLY 0x30 /* response to lookup */ + +typedef struct { + u_char atp_control; /* control field */ + u_char atp_map; /* bitmap for acknowlegement */ + ua_short atp_tid; /* transaction id */ + union + { + u_char b[4]; /* user u_chars */ + ua_long dw; + } atp_ub; + u_char atp_data[1]; /* data field */ +} ATP_FRAME; + +#define ATP_FRAME_LEN 8 + +#define ATP_TREQ 0x40 /* transaction request */ +#define ATP_TRESP 0x80 /* response packet */ +#define ATP_TREL 0xc0 /* transaction release packet */ +#define ATP_XO 0x20 /* exactly once flag */ +#define ATP_EOM 0x10 /* end of message flag */ +#define ATP_STS 0x08 /* send transaction status */ + +#define ATP_TYPE(x) ((x)->atp_control & 0xc0) + +typedef struct { + at_net net1; + u_char zonename[33]; +} ZIP_1; + +typedef struct { + at_net net1; + at_net net2; + u_char zonename[33]; +} ZIP_2; + +typedef struct { + u_char zip_command; /* zip command number */ + u_char flags; /* Bit-mapped */ + union + { + ZIP_1 o; /* Packet has one net number */ + ZIP_2 r; /* Packet has cable range */ + } u; +} ZIP_FRAME; + +/* Flags in the ZIP GetNetInfo & NetInfoReply buffer */ + +#define ZIPF_BROADCAST 0x80 +#define ZIPF_ZONE_INVALID 0x80 +#define ZIPF_USE_BROADCAST 0x40 +#define ZIPF_ONE_ZONE 0x20 + +#define ZIP_QUERY 1 /* ZIP Commands in zip frames */ +#define ZIP_REPLY 2 +#define ZIP_TAKEDOWN 3 +#define ZIP_BRINGUP 4 +#define ZIP_GETNETINFO 5 +#define ZIP_NETINFOREPLY 6 +#define ZIP_NOTIFY 7 + +#define ZIP_GETMYZONE 7 /* ZIP commands in atp user u_chars[0] */ +#define ZIP_GETZONELIST 8 +#define ZIP_GETLOCALZONES 9 +#define ZIP_GETYOURZONE 10 + +/* + * Response to Reponder Request type #1. + * + * The first 4 u_chars are actually the 4 ATP user u_chars + * Following this structure are 4 PASCAL strings: + * System Version String. (max 127) + * Finder Version String. (max 127) + * LaserWriter Version String. (max 127) + * AppleShare Version String. (max 24) + */ +typedef struct +{ + u_char UserU_Chars[2]; + ua_short ResponderVersion; + ua_short AtalkVersion; + u_char ROMVersion; + u_char SystemType; + u_char SystemClass; + u_char HdwrConfig; + ua_short ROM85Version; + u_char ResponderLevel; + u_char ResponderLink; + u_char data[1]; +} RESPONDER_FRAME; +#endif NOT_USED + +/* + * ADSP Frame + */ +typedef struct { + ua_short CID; + ua_long pktFirstByteSeq; + ua_long pktNextRecvSeq; + ua_short pktRecvWdw; + u_char descriptor; /* Bit-Mapped */ + u_char data[1]; +} ADSP_FRAME, *ADSP_FRAMEPtr; + +#define ADSP_FRAME_LEN 13 + +#define ADSP_CONTROL_BIT 0x80 +#define ADSP_ACK_REQ_BIT 0x40 +#define ADSP_EOM_BIT 0x20 +#define ADSP_ATTENTION_BIT 0x10 +#define ADSP_CONTROL_MASK 0x0F + +#define ADSP_CTL_PROBE 0x00 /* Probe or acknowledgement */ +#define ADSP_CTL_OREQ 0x01 /* Open Connection Request */ +#define ADSP_CTL_OACK 0x02 /* Open Request acknowledgment */ +#define ADSP_CTL_OREQACK 0x03 /* Open Request and acknowledgement */ +#define ADSP_CTL_ODENY 0x04 /* Open Request denial */ +#define ADSP_CTL_CLOSE 0x05 /* Close connection advice */ +#define ADSP_CTL_FRESET 0x06 /* Forward Reset */ +#define ADSP_CTL_FRESET_ACK 0x07 /* Forward Reset Acknowledgement */ +#define ADSP_CTL_RETRANSMIT 0x08 /* Retransmit advice */ + +typedef struct { + ua_short version; /* Must be in network byte order */ + ua_short dstCID; /* */ + ua_long pktAttnRecvSeq; /* Must be in network byte order */ +} ADSP_OPEN_DATA, *ADSP_OPEN_DATAPtr; + +#define ADSP_OPEN_FRAME_LEN 8 + +#define ADSP_MAX_DATA_LEN 572 + +/* from h/adsp_ioctl.h */ + +/* + * Defines that correspond to atlog.h in the N & C Appletalk + * sources. + */ + +#define AT_MID_ADSP 212 + +/* Streams ioctl definitions */ + +#define ADSP_IOCTL(i) ((i>>8) == AT_MID_ADSP) +#define ADSPATTNREAD ((AT_MID_ADSP<<8) | 254) /* read attention data */ +#define ADSPOPEN ((AT_MID_ADSP<<8) | 253) /* open a connection */ +#define ADSPCLOSE ((AT_MID_ADSP<<8) | 252) /* close a connection */ +#define ADSPCLINIT ((AT_MID_ADSP<<8) | 251) /* create a conn listener */ +#define ADSPCLREMOVE ((AT_MID_ADSP<<8) | 250) /* remove a conn listener */ +#define ADSPCLLISTEN ((AT_MID_ADSP<<8) | 249) /* post a listener request */ +#define ADSPCLDENY ((AT_MID_ADSP<<8) | 248) /* deny an open connection request */ +#define ADSPSTATUS ((AT_MID_ADSP<<8) | 247) /* get status of conn end */ +#define ADSPREAD ((AT_MID_ADSP<<8) | 246) /* read data from conn */ +#define ADSPWRITE ((AT_MID_ADSP<<8) | 245) /* write data on the conn */ +#define ADSPATTENTION ((AT_MID_ADSP<<8) | 244) /* send attention message */ +#define ADSPOPTIONS ((AT_MID_ADSP<<8) | 243) /* set conn end options */ +#define ADSPRESET ((AT_MID_ADSP<<8) | 242) /* forward reset connection */ +#define ADSPNEWCID ((AT_MID_ADSP<<8) | 241) /* generate a cid conn end */ +#define ADSPBINDREQ ((AT_MID_ADSP<<8) | 240) +#define ADSPGETSOCK ((AT_MID_ADSP<<8) | 239) +#define ADSPGETPEER ((AT_MID_ADSP<<8) | 238) + +#ifdef KERNEL + +/* from h/adsp_adsp.h */ + +/* Definitions from strgeneric.h (on AIX?) */ +#define STR_IGNORE 0 +#define STR_PUTNEXT 1 +#define STR_PUTBACK 2 +#define STR_QTIME (HZ >> 3) + +extern int adspInit(); +extern int adspOpen(); +extern int adspCLListen(); +extern int adspClose(); +extern int adspCLDeny(); +extern int adspStatus(); +extern int adspRead(); +extern int adspWrite(); +extern int adspAttention(); +extern int adspOptions(); +extern int adspReset(); +extern int adspNewCID(); +extern int adspPacket(); + + +struct adsp_debug { + int ad_time; + int ad_seq; + int ad_caller; + int ad_descriptor; + int ad_bits; + short ad_sendCnt; + short ad_sendMax; + int ad_maxSendSeq; + int ad_sendWdwSeq; +}; + +#endif +#endif /* _NETAT_ADSP_H_ */ diff --git a/bsd/netat/adsp_CLDeny.c b/bsd/netat/adsp_CLDeny.c new file mode 100644 index 000000000..9d6a9b7e7 --- /dev/null +++ b/bsd/netat/adsp_CLDeny.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * CLDeny.c + * + * From Mike Shoemaker 9/6/90 + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * dspCLDeny + * + * INPUTS: + * --> ccbRefNum refnum of connection listener + * --> remoteCID connection identifier of remote connection end + * --> remoteAddress internet address of remote connection end + * + * OUTPUTS: + * none + * + * ERRORS: + * errRefNum bad connection refnum + * errState not a connection listener + * errAborted request aborted by a Remove call + */ +int adspCLDeny(sp, pb) /* (DSPPBPtr pb) */ + struct adspcmd *pb; + CCBPtr sp; +{ + gbuf_t *mp; + ADSP_FRAMEPtr adspp; + ADSP_OPEN_DATAPtr adspop; + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + mp = gbuf_alloc(AT_WR_OFFSET + DDPL_FRAME_LEN + ADSP_FRAME_LEN + ADSP_OPEN_FRAME_LEN, + PRI_LO); + gbuf_rinc(mp,AT_WR_OFFSET); + gbuf_wset(mp,DDPL_FRAME_LEN); + adspp = (ADSP_FRAMEPtr)gbuf_wptr(mp); + gbuf_winc(mp,ADSP_FRAME_LEN); + bzero((caddr_t) gbuf_rptr(mp),DDPL_FRAME_LEN + ADSP_FRAME_LEN + ADSP_OPEN_FRAME_LEN); + adspp->descriptor = ADSP_CONTROL_BIT | ADSP_CTL_ODENY; + adspop = (ADSP_OPEN_DATAPtr)gbuf_wptr(mp); + gbuf_winc(mp,ADSP_OPEN_FRAME_LEN); + UAS_ASSIGN(adspop->dstCID, pb->u.openParams.remoteCID); + UAS_ASSIGN(adspop->version, 0x100); + adsp_sendddp(sp, mp, + DDPL_FRAME_LEN + ADSP_FRAME_LEN + ADSP_OPEN_FRAME_LEN, + &pb->u.openParams.remoteAddress, DDP_ADSP); + adspioc_ack(0, pb->ioc, pb->gref); + return 0; +} diff --git a/bsd/netat/adsp_CLListen.c b/bsd/netat/adsp_CLListen.c new file mode 100644 index 000000000..f4f33f25d --- /dev/null +++ b/bsd/netat/adsp_CLListen.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* dspCLListen.c + * + * From Mike Shoemaker v01.02 04/19/90 mbs + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * dspCLListen + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * --> filterAddress filter for incoming open connection requests + * + * OUTPUTS: + * <-- remoteCID connection identifier of remote connection end + * <-- remoteAddress internet address of remote connection end + * <-- sendSeq initial send sequence number to use + * <-- sendWindow initial size of remote end's receive buffer + * <-- attnSendSeq initial attention send sequence number to use + * + * ERRORS: + * errRefNum bad connection refnum + * errState not a connection listener + * errAborted request aborted by a Remove call + */ +int adspCLListen(sp, pb) /* (DSPPBPtr pb) */ + register CCBPtr sp; + register struct adspcmd *pb; +{ + register struct adspcmd *clpb; + gbuf_t *mp; + int s; + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + if (sp->state != sListening) { /* But this isn't a connection listener! */ + pb->ioResult = errState; + return EALREADY; + } + + if (mp = gbuf_copym(pb->mp)) { /* keep a copy of the parameter block */ + pb->ioResult = 1; /* not done */ + adspioc_ack(0, pb->ioc, pb->gref); /* release user ioctl block */ + clpb = (struct adspcmd *)gbuf_rptr(mp); + clpb->ioc = 0; + clpb->mp = mp; + ATDISABLE(s, sp->lock); + if (qAddToEnd(&sp->opb, clpb)){ /* Add to list of listeners */ + ATENABLE(s, sp->lock); + return EFAULT; /* bogus, but discriminate from other errors */ + } + ATENABLE(s, sp->lock); + } else { + pb->ioResult = errDSPQueueSize; + return ENOBUFS; + } + return 0; + +} diff --git a/bsd/netat/adsp_Close.c b/bsd/netat/adsp_Close.c new file mode 100644 index 000000000..d7e276e70 --- /dev/null +++ b/bsd/netat/adsp_Close.c @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1995-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* dspClose.c + * From Mike Shoemaker v01.16 06/29/90 mbs + */ +/* + * Change log: + * 06/29/95 - Modified to handle flow control for writing (Tuyen Nguyen) + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +extern atlock_t adspall_lock; + +static void qRemove(CCBPtr, CCBPtr); + + +/* + * CheckOkToClose + * + * Check to see if it is OK to close this connection cleanly. + * + * INPUTS: + * Stream pointer + * OUTPUTS: + * True if no outstanding transactions and we can close cleanly + */ +int CheckOkToClose(sp) /* (CCBPtr sp) */ + CCBPtr sp; +{ + + if (sp->sData) /* Outstanding data ? */ + return 0; + + if (sp->sapb) /* Outstanding send attention ? */ + return 0; + + if (sp->frpb) /* Outstanding forward reset ? */ + return 0; + + if (sp->sendAttnAck) + return 0; + + if (sp->sendDataAck) + return 0; + + /* + * Must be OK to close + */ + sp->sendCtl |= B_CTL_CLOSE; /* So, need to send close advice */ + sp->callSend = 1; + + return 1; /* It's OK to close */ +} + + +/* + * CompleteQueue + * + * Given the address of the head of a queue of DSP parameter blocks, zero + * the queue, and complete each item on the queue with the given result + * code. + * + * INPUTS: + * qhead Address of ptr to first queue element + * code The result code + * OUTPUTS: + * none + */ +int CompleteQueue(qhead, code) /* (DSPPBPtr FPTR qhead, OSErr code) */ + struct adspcmd **qhead; + int code; +{ + register struct adspcmd *p; + register struct adspcmd *n; + register gref_t *gref; + register int total = 0; + CCBPtr sp = 0; + int s; + + n = *qhead; /* Get first item */ + *qhead = 0; /* Zero out the queue */ + if (n) { + gref = n->gref; + if (gref->info) { + sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + atalk_flush(sp->gref); + ATDISABLE(s, sp->lock); + } + } + + while (p = n) { /* while items left */ + n = (struct adspcmd *)(p->qLink); /* Save next guy */ + p->ioResult = code; + if (sp) { + completepb(sp, p); /* complete the copy of the request */ + total++; + } else + gbuf_freem(p->mp); + } /* while */ + if (sp) + ATENABLE(s, sp->lock); + return(total); +} + +/* + * RemoveCCB + * + * Called from do close to free up the user's CCB. So, we remove the + * CCB from the list of CCB's. + * + * INPUTS: + * sp pointer to ccb + * pb a remove param block to complete when done + * OUTPUTS: + * none + */ + +void RemoveCCB(sp, pb) /* (CCBPtr sp, DSPPBPtr pb) */ + CCBPtr sp; + struct adspcmd *pb; +{ + gref_t *gref; + + if (sp->gref == 0) + return; + /* + * Unlink CCB from list + */ + qRemove(AT_ADSP_STREAMS, sp); /* remove sp from active streams queue */ + + if (pb) { + pb->ioResult = 0; + if (pb->ioc) /* is this a current or queued request */ + adspioc_ack(0, pb->ioc, pb->gref); /* current */ + else { + completepb(sp, pb); /* queued */ + } + + if (sp->opb && (pb != sp->opb)) { /* if the pb requested is not the */ + pb = sp->opb; /* waiting open pb, complete it too */ + sp->opb = 0; + pb->ioResult = 0; + completepb(sp, pb); + } else { + sp->opb = 0; + } + } + gref = sp->gref; + sp->gref = 0; + if (gref->info == (char *)sp->sp_mp) { /* queue head is still valid */ + unsigned char skt; + + if ((skt = sp->localSocket) != 0) { + if (adspDeassignSocket(sp) == 0) + ddp_notify_nbp(skt, sp->pid, DDP_ADSP); + } + + if (gref->info) { + gbuf_freem((gbuf_t *)gref->info); /* free the CCB */ + gref->info = 0; + } + } else + gbuf_freem(sp->sp_mp); /* our head is already gone, be sure + * to release our resources too */ +} + +int AbortIO(sp, err) + CCBPtr sp; + short err; +{ + register int total; + + if (sp->gref == 0) + return 0; + /* + * Complete all outstanding transactions. + */ + total += CompleteQueue(&sp->sapb, err); /* Abort outstanding send attentions */ + CompleteQueue(&sp->frpb, err); /* Abort outstanding forward resets */ + + if (sp->sbuf_mb) { /* clear the send queue */ + gbuf_freel(sp->sbuf_mb); + sp->sbuf_mb = 0; + } + + if (sp->csbuf_mb) { + gbuf_freem(sp->csbuf_mb); + sp->csbuf_mb = 0; + } + sp->sData = 0; + + return(total); +} + +/* + * DoClose + * + * Called from several places (probe timeout, recv close advice, + * dspRemove, etc.) to change state of connection to closed and + * complete all outstanding I/O. + * + * Will also remove the CCB if there is a dsp remove pending. + * + * INPUTS: + * sp An ADSP stream + * OUTPUTS: + * none + */ +void DoClose(sp, err, force_abort) /* (CCBPtr sp, OSErr err) */ + register CCBPtr sp; + int err; +{ + register struct adspcmd *pb, *np; + register gbuf_t *mp; + int aborted_count; + + dPrintf(D_M_ADSP, D_L_TRACE, ("DoClose: pid=%d,e=%d,a=%d,s=%d,r=%d\n", + sp->pid, err, force_abort, sp->localSocket, sp->removing)); + sp->userFlags |= eClosed; /* Set flag */ + sp->state = sClosed; + sp->openState = O_STATE_NOTHING; + + /* + * Clean up any timer elements + */ + RemoveTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer); + RemoveTimerElem(&adspGlobal.fastTimers, &sp->FlushTimer); + RemoveTimerElem(&adspGlobal.fastTimers, &sp->RetryTimer); + RemoveTimerElem(&adspGlobal.fastTimers, &sp->AttnTimer); + RemoveTimerElem(&adspGlobal.fastTimers, &sp->ResetTimer); + + aborted_count = AbortIO(sp, err); + np = sp->opb; /* Get list of close/removes to complete */ + sp->opb = 0; /* set this list null */ + + while (pb = np) { /* Handle all of the close/remove param blks */ + np = (struct adspcmd *)pb->qLink; /* Get next guy (if any) */ + pb->qLink = 0; + pb->ioResult = err; + completepb(sp, pb); + } + if (sp->removing && (force_abort >= 0)) { /* Abort outstanding receives */ + aborted_count += CompleteQueue(&sp->rpb, err); + + if (sp->deferred_mb) { + gbuf_freel(sp->deferred_mb); + sp->deferred_mb = 0; + } + if (sp->attn_mb) { + gbuf_freem(sp->attn_mb); + sp->attn_mb = 0; + } + if (sp->rbuf_mb) { /* clear the rcv queue */ + gbuf_freem(sp->rbuf_mb); + sp->rbuf_mb = 0; + } + if (sp->crbuf_mb) { + gbuf_freem(sp->crbuf_mb); + sp->crbuf_mb = 0; + } + sp->rData = 0; + + /* if our connection has been timed out */ + /* and the user wasn't notified of the TearDown */ + /* because of pending requests on this socket */ + /* then fake a read completion to force the notification */ + + if (force_abort && aborted_count == 0) { + if (mp = gbuf_alloc(sizeof(struct adspcmd), PRI_HI)) { + pb = (struct adspcmd *)gbuf_rptr(mp); + gbuf_wset(mp,sizeof(struct adspcmd)); + + bzero((caddr_t) pb, sizeof(struct adspcmd)); + pb->mp = mp; + pb->csCode = dspRead; + pb->ioResult = errAborted; + completepb(sp, pb); /* send fake read completion */ + } + } + sp->removing = 0; + RemoveCCB(sp, 0); /* Will call completion routine */ + } + sp->userFlags &= ~eClosed; +} + + +/* + * dspClose + * + * Also called for dspRemove and dspCLRemove. + * Must handle case of multiple close calls being issued (without + * abort bit set) Can only allow one pending remove though. + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * --> abort abort the connection + * + * OUTPUTS: + * none + * + * ERRORS: + * errRefNum Bad connection Refnum + */ +int adspClose(sp, pb) /* (DSPPBPtr pb) */ + register CCBPtr sp; + register struct adspcmd *pb; +{ + int s; + register gbuf_t *mp; + + /* Must execute nearly all of this with ints off because user could + * be issuing a second dspRemove while the first is pending. Until + * we can detect this, we must not allow interrupts. + * Also, we can't handle the case where a close was issued earlier, + * and now this is the remove. If the write completion for the + * close advice packet occurs in the middle of this, we might + * foul up. + */ + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + /* + * Handle dspCLRemove + */ + if (pb->csCode == (short)dspCLRemove) { /* Remove connection listener */ + if (sp->state != (short)sListening) { /* But it's not a listener! */ + pb->ioResult = errState; + return EINVAL; + } + CompleteQueue(&sp->opb, errAborted); /* Complete all dspListens */ + RemoveCCB(sp, pb); /* Will call completion routine */ + return 0; + } + + + /* + * Either dspClose or dspRemove + */ + + if (sp->removing) { /* Don't allow dspRemove or dspClose */ + /* after one dspRemove has been issued. */ + pb->ioResult = errState; + return EINVAL; + } + + + /* + * The previous Macintosh ADSP allowed you to call close on a + * connection that was in the process of opening or passively + * waiting for an open request. It is also legal to close a + * connection that is already closed. No error will be generated. + * + * It is also legal to issue a second close call while the first + * is still pending. + */ + if (pb->csCode == (short)dspClose) { + ATDISABLE(s, sp->lock); + if ((sp->state == (short)sPassive) || (sp->state == (short)sOpening)) { + sp->state = sClosed; + ATENABLE(s, sp->lock); + DoClose(sp, errAborted, 0); + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); + return 0; + } + + if (sp->state == (word)sClosed) { /* Ok to close a closed connection */ + ATENABLE(s, sp->lock); + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); + return 0; + } + if ((sp->state != (word)sOpen) && (sp->state != (word)sClosing)) { + ATENABLE(s, sp->lock); + pb->ioResult = errState; + return EINVAL; + } + + sp->state = sClosing; /* No matter what, we're closing */ + ATENABLE(s, sp->lock); + } /* dspClose */ + + else { /* dspRemove */ + ATDISABLE(s, sp->lock); + sp->removing = 1; /* Prevent allowing another dspClose. */ + /* Tells completion routine of close */ + /* packet to remove us. */ + + if (sp->state == sPassive || sp->state == sClosed || + sp->state == sOpening) { + sp->state = sClosed; + ATENABLE(s, sp->lock); + DoClose(sp, errAborted, 0); /* Will remove CCB! */ + return 0; + } else { /* sClosing & sOpen */ + sp->state = sClosing; + ATENABLE(s, sp->lock); + } + + } /* dspRemove */ + + if (pb->u.closeParams.abort || CheckOkToClose(sp)) /* going to close */ + { + AbortIO(sp, errAborted); + sp->sendCtl = B_CTL_CLOSE; /* Send close advice */ + } + + pb->ioResult = 1; + if ( (mp = gbuf_copym(pb->mp)) ) { /* duplicate user request */ + adspioc_ack(0, pb->ioc, pb->gref); /* release user */ + pb = (struct adspcmd *)gbuf_rptr(mp); /* get new parameter block */ + pb->ioc = 0; + pb->mp = mp; + ATDISABLE(s, sp->lock); + qAddToEnd(&sp->opb, pb); /* and save it */ + ATENABLE(s, sp->lock); + } else { + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); /* release user, and keep no copy + * for kernel bookkeeping, yetch! + */ + } + CheckSend(sp); + + return 0; +} + +static void qRemove(qptr, elem) + register CCBPtr qptr; + register CCBPtr elem; +{ + int s; + + ATDISABLE(s, adspall_lock); + while(qptr->ccbLink) { + if ((DSPPBPtr)(qptr->ccbLink) == (DSPPBPtr)elem) { + qptr->ccbLink = elem->ccbLink; + elem->ccbLink = 0; + ATENABLE(s, adspall_lock); + return; + } + qptr = qptr->ccbLink; + } + ATENABLE(s, adspall_lock); +} + +int RxClose(sp) + register CCBPtr sp; +{ + register gbuf_t *mp; + register struct adspcmd *pb; + int s, l; + + ATDISABLE(l, sp->lockClose); + ATDISABLE(s, sp->lock); + if ((sp->state == sClosing) || (sp->state == sClosed)) { + ATENABLE(s, sp->lock); + ATENABLE(l, sp->lockClose); + return 0; + } + sp->state = sClosed; + ATENABLE(s, sp->lock); + CheckReadQueue(sp); /* try to deliver all remaining data */ + + if ( (mp = gbuf_alloc(sizeof(struct adspcmd), PRI_HI)) ) { + pb = (struct adspcmd *)gbuf_rptr(mp); + gbuf_wset(mp,sizeof(struct adspcmd)); + pb->ioc = 0; + pb->mp = mp; + + pb->csCode = dspClose; + pb->ioResult = 0; + completepb(sp, pb); /* send close completion */ + } + +if ((sp->userFlags & eClosed) == 0) + DoClose(sp, errAborted, -1); /* abort send requests and timers */ + + ATENABLE(l, sp->lockClose); + return 0; +} diff --git a/bsd/netat/adsp_Control.c b/bsd/netat/adsp_Control.c new file mode 100644 index 000000000..69221b4fb --- /dev/null +++ b/bsd/netat/adsp_Control.c @@ -0,0 +1,546 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1995-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* Control.c + * From Mike Shoemaker v01.25 07/02/90 for MacOS + * 09/07/95 - Modified for performance (Tuyen Nguyen) + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* # of additional ticks to add to any timer that we're queuing up. For + * very short delays (1 and 2), the timer fires before the transmit + * even takes place */ +#define TX_DLY 2 + +int adsp_window = 1; + +/* + * CalcRecvWdw + * + * INPUTS: + * sp ADSP Stream + * OUTPUTS: + * # of bytes in avail in local receive queue + */ +int CalcRecvWdw(sp) /* (CCBPtr sp) */ + CCBPtr sp; +{ + int bytes; + + bytes = calcRecvQ(sp); + bytes = sp->rbuflen - bytes; /* get what is left */ + + if (bytes <= 16) { /* %%% this should be zero */ + sp->rbufFull = 1; /* Save flag that our recv buf is full */ + return 0; + } + else + return ((bytes+bytes+bytes) >> 2) + 1; /* %%% */ +} + +calcRecvQ(sp) + CCBPtr sp; +{ + int bytes = 0; +#ifdef AT_Socket + register struct mbuf *m, *p; + + if (((sp->gref)->so)->so_rcv.sb_mb) + for (p = ((sp->gref)->so)->so_rcv.sb_mb; p; p = p->m_nextpkt) + for (m = p; m; m = m->m_next) + bytes += m->m_len; +#else + register gbuf_t *mb; + + if (sp->rData) { /* There is data in buffer */ + if (mb = sp->rbuf_mb) { + do { + bytes += gbuf_msgsize(mb); + mb = gbuf_next(mb); + } while (mb); + } + if (mb = sp->crbuf_mb) + bytes += gbuf_msgsize(mb); + } +#endif + return bytes; +} + +/* + * CheckSend + * + * Check to see if the transmit PB is available and if there is anything + * to transmit. Start off any pending transmit. + * + * Normally called from the write completion routine + * + * INPUTS: + * sp Connection control block + * OUTPUTS: + * true if sent a packet + */ +void CheckSend(sp) /* (CCBPtr sp) */ + register CCBPtr sp; +{ + int i; + int attnMsg; /* True if attention message */ + int s; + register gbuf_t *mp; /* send message block */ +#ifdef notdef + register gbuf_t *tmp; + u_char current; +#endif + char *dp; /* a data pointer */ + int use_attention_code; + int len; /* length used in allocd mblk */ + int datalen; /* amount of data attached to mblk */ + gbuf_t *mprev, *mlist = 0; + +top: + + if (sp->state == sClosed) + return; + + /* get a message block to hold DDP and + * ADSP headers + 2 bytes of attention + * code if necessary */ + if ((mp = gbuf_alloc(AT_WR_OFFSET + DDPL_FRAME_LEN + ADSP_FRAME_LEN + ADSP_OPEN_FRAME_LEN + 2, + PRI_LO)) == 0) { + if (mlist) + gbuf_freel(mlist); + return; /* can't get buffers... do nothing! */ + } + ATDISABLE(s, sp->lock); + sp->callSend = 0; /* Clear flag */ + use_attention_code = 0; + len = 0; + datalen = 0; + + gbuf_rinc(mp,AT_WR_OFFSET); + gbuf_wset(mp,DDPL_FRAME_LEN); /* leave room for DDP header */ + + if (sp->sendCtl) { + short mask; + + i = sp->sendCtl; /* get local copy bitmap of */ + /* which ctl packets to send. */ + attnMsg = 0; + + if (i & 0x1E) /* One of the open ctrl packets */ + { + + /* point past ADSP header (no attention) */ + dp = ((char *) gbuf_wptr(mp)) + ADSP_FRAME_LEN; + UAL_ASSIGN(sp->f.pktFirstByteSeq, netdw(sp->firstRtmtSeq)); + + UAS_ASSIGN(sp->of.version, netw(0x0100)); /* Fill in open connection parms */ + UAS_ASSIGN(sp->of.dstCID, sp->remCID); /* Destination CID */ + UAL_ASSIGN(sp->of.pktAttnRecvSeq, netdw(sp->attnRecvSeq)); + bcopy((caddr_t) &sp->of, (caddr_t) dp, ADSP_OPEN_FRAME_LEN); + len += ADSP_OPEN_FRAME_LEN; + + if (i & B_CTL_OREQ) { + UAS_ASSIGN(sp->f.CID, sp->locCID); + mask = B_CTL_OREQ; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_OREQ; + } else if (i & B_CTL_OACK) { + UAS_ASSIGN(sp->f.CID, sp->locCID); + mask = B_CTL_OACK; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_OACK; + } else if (i & B_CTL_OREQACK) { + UAS_ASSIGN(sp->f.CID, sp->locCID); + mask = B_CTL_OREQACK; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_OREQACK; + } else /* Deny */ + { + UAS_ASSIGN(sp->f.CID, 0); + mask = B_CTL_ODENY; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_ODENY; + UAL_ASSIGN(sp->f.pktFirstByteSeq, 0); + } + + if (i & (B_CTL_OREQ | B_CTL_OREQACK)) + /* Need to start up a timer for it */ + { + /* It's possible that we've received a duplicate + * open request. In this case, there will already be + * a timer queued up for the request+ack + * packet we sent the first time. So remove the timer + * and start another. + */ + RemoveTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer); + InsertTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer, + sp->openInterval+1); + } + } else { + /* seq # of next byte to send */ + UAL_ASSIGN(sp->f.pktFirstByteSeq, netdw(sp->sendSeq)); + + if (i & B_CTL_CLOSE) { + sp->state = sClosed; /* Now we're closed */ + mask = B_CTL_CLOSE; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_CLOSE; + } else if (i & B_CTL_PROBE) { + mask = B_CTL_PROBE; + sp->f.descriptor = + ADSP_CONTROL_BIT | ADSP_CTL_PROBE | ADSP_ACK_REQ_BIT; + } else if (i & B_CTL_FRESET) { + mask = B_CTL_FRESET; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_FRESET; + InsertTimerElem(&adspGlobal.fastTimers, + &sp->ResetTimer, sp->rtmtInterval+TX_DLY); + } else if (i & B_CTL_FRESETACK) { + mask = B_CTL_FRESETACK; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_FRESET_ACK; + } + else if (i & B_CTL_RETRANSMIT) { + mask = B_CTL_RETRANSMIT; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_CTL_RETRANSMIT; + } + else { + dPrintf(D_M_ADSP, D_L_ERROR, ("CheckSend: Control bit error\n")); + } + } /* non open control packet */ + + sp->sendCtl &= ~mask; + goto sendit; + } /* send control packet */ + + if (sp->sendAttnData) /* Send attn ready to go? */ + { + sp->sendAttnData = 0; /* Clear Flags */ + if (sp->sapb) { + sp->sendAttnAck = 0; /* This will also do an Attn Ack */ + + attnMsg = 1; + sp->f.descriptor = ADSP_ATTENTION_BIT | ADSP_ACK_REQ_BIT; + if (gbuf_cont(sp->sapb->mp)) { + gbuf_cont(mp) = gbuf_dupm(gbuf_cont(sp->sapb->mp)); + /* Major hack here. The ADSP Attn code is butted up against + * the end of the adsp packet header, and the length is + * increased by 2. (There is a pad field behind the adsp + * header in the CCB just for this purpose.) + */ + } + use_attention_code++; + + sp->f.data[0] = high(sp->sapb->u.attnParams.attnCode); + sp->f.data[1] = low(sp->sapb->u.attnParams.attnCode); + InsertTimerElem(&adspGlobal.fastTimers, &sp->AttnTimer, + sp->rtmtInterval+TX_DLY); + goto sendit; + } + } /* attn data */ + + if (sp->sendAttnAck) /* Send attn ack ready to go? */ + { + attnMsg = 1; + sp->f.descriptor = ADSP_CONTROL_BIT | ADSP_ATTENTION_BIT; + sp->sendAttnAck = 0; + goto sendit; + } /* attn ack */ + + if ((sp->state == sOpen || sp->state == sClosing) && /* Correct state */ + (!sp->waitingAck) && /* not waiting for an ACK */ + (sp->sData) && /* have data to send */ + (GTE(sp->sendWdwSeq,sp->sendSeq)) && /* he has room to accept it */ + (sp->pktSendCnt < sp->pktSendMax)) /* haven't sent too many pkts + * in a row. */ + { + attnMsg = 0; + if (datalen = attachData(sp, mp)) /* attach data to mp */ + goto sendit; /* if successful, sendit */ + } + + if (sp->sendDataAck) { + UAL_ASSIGN(sp->f.pktFirstByteSeq, netdw(sp->sendSeq)); /* seq # of next byte */ + attnMsg = 0; + sp->f.descriptor = ADSP_CONTROL_BIT; + goto sendit; + } + + /* + * Nothing left to do... + */ + if (mp) + gbuf_freem(mp); + ATENABLE(s, sp->lock); + if (mlist) + adsp_sendddp(sp, mlist, 0, &sp->remoteAddress, DDP_ADSP); + return; + +sendit: + + if (attnMsg) { + UAL_ASSIGN(sp->f.pktFirstByteSeq, netdw(sp->attnSendSeq)); + UAL_ASSIGN(sp->f.pktNextRecvSeq, netdw(sp->attnRecvSeq)); + UAS_ASSIGN(sp->f.pktRecvWdw, 0); /* Always zero in attn pkt */ + } else { + sp->sendDataAck = 0; + UAL_ASSIGN(sp->f.pktNextRecvSeq, netdw(sp->recvSeq)); + UAS_ASSIGN(sp->f.pktRecvWdw, netw(CalcRecvWdw(sp))); + } + if (use_attention_code) { + bcopy((caddr_t) &sp->f, (caddr_t) gbuf_wptr(mp), ADSP_FRAME_LEN + 2); + len += ADSP_FRAME_LEN + 2; + } else { + bcopy((caddr_t) &sp->f, (caddr_t) gbuf_wptr(mp), ADSP_FRAME_LEN); + len += ADSP_FRAME_LEN; + } + gbuf_winc(mp,len); /* update mblk length */ + if (mlist) + gbuf_next(mprev) = mp; + else + mlist = mp; + mprev = mp; + + if (sp->state == sClosed) { /* must have sent a close advice */ + /* send header + data */ + ATENABLE(s, sp->lock); + adsp_sendddp(sp, mlist, 0, &sp->remoteAddress, DDP_ADSP); + DoClose(sp, 0, -1); /* complete close! */ + return; + } + ATENABLE(s, sp->lock); + if (sp->state == sClosing) /* See if we were waiting on this write */ + CheckOkToClose(sp); + goto top; +} + +/* + * completepb delivers a paramater block with all its appropriate fields + * set back to the user. + * + * The assumptions here are that the PB is not linked to any queue, + * that the fields including ioResult are set, and that the + * kernel is no longer interested in the mblks that may or + * maynot be linked to this pb. + */ +void completepb(sp, pb) + register CCBPtr sp; + register struct adspcmd *pb; +{ + if (sp->gref && (sp->gref->info == (caddr_t)sp->sp_mp)) { + if (gbuf_len(pb->mp) > sizeof(struct adspcmd)) + gbuf_wset(pb->mp,sizeof(struct adspcmd)); + SndMsgUp(sp->gref, pb->mp); + NotifyUser(sp); + } else + gbuf_freem(pb->mp); +} + +attachData(sp, mp) + register CCBPtr sp; + register gbuf_t *mp; +{ + int seq; + int cnt; + char eom = 0; + int bsize; + int diff; + char sendAckReq; + int partial = 0; /* flag for a partial send */ + int tcnt = 0; + register gbuf_t *smp; /* send data message block */ + register gbuf_t *psmp; /* previous message block */ + + sendAckReq = 0; + + if (LT(sp->sendSeq, sp->firstRtmtSeq)) /* Sanity check on send seq */ + sp->sendSeq = sp->firstRtmtSeq; /* seq must be oldest in buffer. */ + + /* This test and assignment was necessary because the retry VBL could + * have fired and reset send Seq to first Rtmt Seq, and then an + * expected ACK comes in that bumps first Rtmt Seq up. Then we + * have the problem that send Seq is less than first Rtmt Seq. + * The easiest fix to this timing dilemma seems to be to reset + * sendSeq to first Rtmt Seq if we're sending the first packet. + */ + UAL_ASSIGN(sp->f.pktFirstByteSeq, netdw(sp->sendSeq)); + + if (smp = sp->sbuf_mb) /* Get oldest header */ + eom = 1; + else if (smp = sp->csbuf_mb) + eom = 0; + + if (smp == 0) { /* this shouldn't happen... */ + sp->sData = 0; + return 0; + } + /* + * Must find next byte to transmit + */ + seq = sp->firstRtmtSeq; /* Seq # of oldest in buffer */ + while ((diff = (sp->sendSeq - seq)) >= ((bsize = gbuf_msgsize(smp)) + eom)) { + seq += bsize + eom; /* update sequence # */ + if (gbuf_next(smp)) { /* if another send buffer */ + smp = gbuf_next(smp); + eom = 1; + } else if (smp == sp->csbuf_mb) { /* seen the current one? */ + smp = 0; + break; + } else if (sp->csbuf_mb) { /* look at it */ + smp = sp->csbuf_mb; + eom = 0; + } else { /* no more buffers */ + smp = 0; + break; + } + } /* while */ + + if (smp) { + if (gbuf_next(smp) == 0) /* last block */ + sendAckReq = 1; + cnt = bsize - diff; /* # of bytes in this block */ + } else + cnt = 0; + + /* + * Check to see if the number of bytes is less than the 'send + * Blocking' setting. If so, then we won't send this data unless + * we're flushing. So we set up a timer to force a flush later. + */ + if ((cnt < sp->sendBlocking) && !sp->writeFlush) { + InsertTimerElem(&adspGlobal.fastTimers, &sp->FlushTimer, + sp->sendInterval); + return 0; /* no data to send */ + } + + if (cnt > ADSP_MAX_DATA_LEN) { /* truncate to one packet */ + cnt = ADSP_MAX_DATA_LEN; + eom = 0; + sendAckReq = 0; /* Won't send ack because end of data */ + partial++; + } + + if (smp) { + /* trim extra bytes off the beginning of the "block" before the copy */ + while (diff) { + if (gbuf_len(smp) > diff) + break; + else + diff -= gbuf_len(smp); + smp = gbuf_cont(smp); + } + if((gbuf_cont(mp) = gbuf_dupm(smp)) == 0) /* copy the data */ + return 0; + smp = gbuf_cont(mp); /* use the new message blocks */ + gbuf_rinc(smp,diff); /* and get to the first byte of data to send */ + } + /* + * Check to see if this many bytes will close the other end's + * receive window. If so, we need to send an ack request along + * with this. sendWdwSeq is the seq # of the last byte that + * the remote has room for + */ + if ((diff = sp->sendWdwSeq + 1 - sp->sendSeq) <= cnt) { + if (diff < cnt) { /* Won't fit exactly */ + eom = 0; /* so can't send EOM */ + cnt = diff; + partial++; + } + sendAckReq = 1; /* Make him tell us new recv. window */ + sp->noXmitFlow = 1; /* Don't do flow control calc. */ + } + + /* trim extra bytes off the tail of the "block" after the copy */ + if (partial && smp) { + psmp = smp; + tcnt = cnt; + while (tcnt && smp) { /* while there are message blocks and data */ + if (tcnt >= gbuf_len(smp)) { + tcnt -= gbuf_len(smp); + if (tcnt) { + psmp = smp; + smp = gbuf_cont(smp); + } else { + if (psmp != smp) { /* not the first item on the list */ + gbuf_cont(psmp) = 0; + gbuf_freem(smp); + smp = psmp; + } else { + gbuf_freem(gbuf_cont(smp)); + gbuf_cont(smp) = 0; + } + break; + } + } else { + gbuf_wset(smp,tcnt); + if (gbuf_cont(smp)) { + gbuf_freem(gbuf_cont(smp)); + gbuf_cont(smp) = 0; + } + break; + } + } + } + + sp->sendSeq += cnt + eom; /* Update sendSeq field */ + + if (GT(sp->sendSeq, sp->maxSendSeq)) /* Keep track of >st ever sent */ + sp->maxSendSeq = sp->sendSeq; + + if (eom) + sp->f.descriptor = ADSP_EOM_BIT; + else + sp->f.descriptor = 0; + + if (sendAckReq || (++sp->pktSendCnt >= sp->pktSendMax)) { + /* Last packet in a series */ + sp->f.descriptor |= ADSP_ACK_REQ_BIT; /* We want an ack to this */ + sp->waitingAck = 1; /* Flag that we're waiting */ + sp->sendStamp = SysTicks(); /* Save time we sent request */ + sp->timerSeq = sp->sendSeq; /* Save seq # we want acked */ + InsertTimerElem(&adspGlobal.fastTimers, &sp->RetryTimer, + sp->rtmtInterval+TX_DLY); + } + return cnt + eom; +} + + + diff --git a/bsd/netat/adsp_Init.c b/bsd/netat/adsp_Init.c new file mode 100644 index 000000000..b6ab21997 --- /dev/null +++ b/bsd/netat/adsp_Init.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* dspInit.c + * + * From Mike Shoemaker v01.20 06/29/90 mbs + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +extern atlock_t adspgen_lock; + +/* + * InitContinue + * + * Handle 2nd half of code for dsp init. We could be called directly by + * the dsp Init routine, or if a socket has to be opened, we get called + * by the completion routine of the dsp open socket. + * + * INPUTS: + * sp The stream we're initing (not yet on list of streams) + * pb The user's dsp Init param block + * soc The socket we're going to use + * OUTPUTS: + * none +*/ +static void InitContinue(sp, pb) /* (CCBPtr sp, DSPPBPtr pb, int soc) */ + CCBPtr sp; + struct adspcmd *pb; +{ + int s; + + /* Save connection's socket # in CCB */ + sp->localSocket = pb->socket; + + /* + * Link the new ccb onto queue. Must be done with interrupts off. + */ + ATDISABLE(s, adspgen_lock); + qAddToEnd(AT_ADSP_STREAMS, sp); /* Put on linked list of connections */ + ATENABLE(s, adspgen_lock); + return; +} + +/* + * dspInit + * + * Create and initialize a connection end. return ccbRefNum so that client can + * reference this ccb in later calls. The caller provides a pointer to + * ccb which belongs to adsp until the connection end is removed. + * + * If we have to open a socket, we'll have to do an async open socket, and + * finish up in the completion routine + * + * INPUTS: + * --> ccbPtr Pointer to connection control block + * --> adspcmdPtr Pointer to user request block + * + * OUTPUTS: + * <-- ccbRefNum refnum assigned to this connection. + * + * ERRORS: + * EADDRINUSE or 0 + */ +int adspInit(sp, ap) /* (DSPPBPtr pb) */ + CCBPtr sp; + struct adspcmd *ap; +{ + /* + * Set connection end defaults + */ + sp->badSeqMax = 3; /* # of out-of-sequence packets received */ + /* until a retransmit advice packet is sent */ + sp->probeInterval = 6 * 30; /* 30 second probe interval */ + sp->rtmtInterval = 6 * 5; /* Just a guess --- 5 seconds */ + sp->sendBlocking = 16; + sp->sendInterval = 6; + sp->badSeqMax = 3; /* This is the default */ + + sp->ProbeTimer.type = kProbeTimerType; + sp->FlushTimer.type = kFlushTimerType; + sp->RetryTimer.type = kRetryTimerType; + sp->AttnTimer.type = kAttnTimerType; + sp->ResetTimer.type = kResetTimerType; + + if (ap->csCode == dspInit) { /* Only do this if not connection Listener */ + /* + * Initialize send and receive queue. Make sure they are the + * right size + */ + sp->rbuflen = RecvQSize; + sp->rbuf_mb = 0; + sp->sbuflen = SendQSize; + sp->sbuf_mb = 0; + sp->csbuf_mb = 0; + + /* + * Initialize send and receive defaults + */ + + sp->attn_mb = 0; + sp->state = sClosed; /* Set state for connection end */ + /* end dspInit */ + } else { + + /* dspCLInit */ + sp->state = sListening; /* Set state for conn end */ + } /* end dspCLInit */ + /* + * User opens the socket, so continue with the init stuff + */ + InitContinue(sp, ap); + return(0); +} + + +/* + * AdspBad + * + * + * INPUTS: + * --> ap Parameter block + * + */ +int AdspBad(ap) /* (DSPPBPtr pb) */ + struct adspcmd *ap; +{ + dPrintf(D_M_ADSP, D_L_ERROR, + ("Hey! Do you have the right AuthToolbox?")); + ap->ioResult = controlErr; /* Unknown csCode in the param block */ + return EINVAL; +} diff --git a/bsd/netat/adsp_InitGlobals.c b/bsd/netat/adsp_InitGlobals.c new file mode 100644 index 000000000..e15b0b74b --- /dev/null +++ b/bsd/netat/adsp_InitGlobals.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* InitGlobals.c + * + * From v01.08 06/06/90 mbs + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * InitGlobals + * + * INPUTS: + * none + * OUTPUTS: + * none + */ +void InitGlobals() +{ + + adspGlobal.lastCID = (random() & 0xffff); + adspGlobal.inTimer = 0; + TimerTick(); /* start the ADSP timer */ + +} + + +/* + * CleanupGlobals + * + * INPUTS: + * none + * OUTPUTS: + * none + */ + +void CleanupGlobals() +{ + TimerStop(); +} diff --git a/bsd/netat/adsp_NewCID.c b/bsd/netat/adsp_NewCID.c new file mode 100644 index 000000000..0e15ee0a8 --- /dev/null +++ b/bsd/netat/adsp_NewCID.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * dspNewCID.c + * + * From v01.04 04/20/90 mbs + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * dspNewCID + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * + * OUTPUTS: + * <-- newCID new connection identifier + * + * ERRORS: + * errRefNum bad connection refnum + * errState connection is not closed + */ +int adspNewCID(sp, pb) /* (DSPPBPtr pb) */ + CCBPtr sp; + struct adspcmd *pb; +{ + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + if (sp->state != sClosed) { /* Can only assign to a closed connection */ + pb->ioResult = errState; + return EINVAL; + } + + /* + * Assign a unique connection ID to this ccb + */ + sp->locCID = pb->u.newCIDParams.newcid = NextCID(); + + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); + return 0; +} diff --git a/bsd/netat/adsp_Open.c b/bsd/netat/adsp_Open.c new file mode 100644 index 000000000..d73e2ee94 --- /dev/null +++ b/bsd/netat/adsp_Open.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* adspOpen.c v01.20 + * + * From v01.20 08/23/90 Mike Shoemaker for MacOS + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +extern atlock_t adspgen_lock; + +/* + * NextCID + * + * Create a unique connection ID. + * + * INPUTS: + * none + * OUTPUTS: + * unique connection ID + */ +unsigned short NextCID() +{ + int s; + unsigned short num; + register CCB *queue; + + while (1) { + ATDISABLE(s, adspgen_lock); /* Disable interrupts */ + num = ++adspGlobal.lastCID; + /* qfind_w below is in 68K assembly */ + /* point to the first element */ + queue = (CCB *)AT_ADSP_STREAMS; + while (queue) { + /* and scan .. */ + if (queue->locCID == num) + break; + queue = queue->ccbLink; + } + ATENABLE(s, adspgen_lock); + if (queue == (CCBPtr)NULL) + break; + } + return num; +} + +static byte xlateStateTbl[4] = /* The value to be given to the CCB's state. */ +{ /* indexed by ocMode */ + sOpening, /* ocRequest */ + sPassive, /* ocPassive */ + sOpening, /* ocAccept */ + sOpen /* ocEstablish */ +}; +static byte xlateOpenTbl[4] = /* Value to use for open state. */ +{ /* indexed by ocMode */ + O_STATE_OPENWAIT, /* ocRequest */ + O_STATE_LISTEN, /* ocPassive */ + O_STATE_ESTABLISHED, /* ocAccept */ + O_STATE_OPEN /* ocEstablish */ +}; + +/* + * adspOpen + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * --> remoteCID connection id of remote connection end + * --> remoteAddress internet address of remote connection end + * --> filterAddress filter for incoming open connection requests + * --> sendSeq initial send sequence number to use + * --> sendWindow initial size of remote end's receive buffer + * --> recvSeq initial receive sequence number to use + * --> attnSendSeq initial attention send sequence number + * --> attnRecvSeq initial receive sequence number + * --> ocMode connection opening mode + * --> ocMaximum maximum retries of open connection request + * + * OUTPUTS: + * <-- localCID connection identifier of this connection end + * <-- remoteCID connection id of remote connection end + * <-- remoteAddress + * <-- sendSeq + * <-- sendWindow + * <-- attnSendSeq + * + * ERRORS: + * errRefNum bad connection refnum + * errState connection end must be closed + * errOpening open connection attempt failed + * errAborted request aborted by a remove or close call + */ +int adspOpen(sp, pb) /* (DSPPBPtr pb) */ + register CCBPtr sp; + register struct adspcmd *pb; +{ + extern int adsp_pidM[]; + + int ocMode; + register gbuf_t *mp; + + if (sp == 0) { + pb->ioResult = errRefNum; /* Unknown refnum */ + return EINVAL; + } + + if ((sp->state != sClosed) || + (sp->removing)) { /* The CCB must be closed */ + pb->ioResult = errState; + return EALREADY; + } + + ocMode = pb->u.openParams.ocMode; /* get a local copy of open mode */ + if (ocMode == ocRequest) + adsp_pidM[pb->socket] = 0; + + /* + * Save parameters. Fill in defaults if zero + */ + if (pb->u.openParams.ocInterval) + sp->openInterval = pb->u.openParams.ocInterval; + else + sp->openInterval = ocIntervalDefault; + + if (pb->u.openParams.ocMaximum) + sp->openRetrys = pb->u.openParams.ocMaximum; + else + sp->openRetrys = ocMaximumDefault; + + sp->remoteAddress = *((AddrUnionPtr)&pb->u.openParams.remoteAddress); + /* Not used for passive */ + /* + * Clear out send/receive buffers. + */ + if (sp->sbuf_mb) { /* clear the send queue */ + gbuf_freel(sp->sbuf_mb); + sp->sbuf_mb = 0; + } + if (sp->csbuf_mb) { + gbuf_freem(sp->csbuf_mb); + sp->csbuf_mb = 0; + } + if (sp->rbuf_mb) { /* clear the receive queue */ + gbuf_freel(sp->rbuf_mb); + sp->rbuf_mb = 0; + } + if (sp->crbuf_mb) { + gbuf_freem(sp->crbuf_mb); + sp->crbuf_mb = 0; + } + + sp->rData = 0; /* Flag both buffers as empty */ + sp->sData = 0; + sp->recvQPending = 0; /* No bytes in receive queue */ + + /* + * Clear all of those pesky flags + */ + sp->userFlags = 0; + sp->sendDataAck = 0; + sp->sendAttnAck = 0; + sp->sendAttnData = 0; + sp->callSend = 0; + sp->removing = 0; + sp->writeFlush = 0; + + /* + * Reset round-trip timers + */ + sp->roundTrip = sp->rtmtInterval; + sp->deviation = 0; + + /* + * Reset stuff for retransmit advice packet + */ + sp->badSeqCnt = 0; + /* + * Reset flow control variables + */ + sp->pktSendMax = 1; /* Slow start says we should set this to 1 */ + sp->pktSendCnt = 0; + sp->rbufFull = 0; + sp->resentData = 0; + sp->noXmitFlow = 0; + sp->waitingAck = 0; + + /* + * Copy required information out of parameter block + */ + if (ocMode == ocAccept || ocMode == ocEstablish) { + sp->remCID = pb->u.openParams.remoteCID; + sp->sendSeq = sp->firstRtmtSeq = pb->u.openParams.sendSeq; + sp->sendWdwSeq = sp->sendSeq + pb->u.openParams.sendWindow; + sp->attnSendSeq = pb->u.openParams.attnSendSeq; + } else { /* accept or establish */ + sp->remCID = 0; + sp->sendSeq = 0; + sp->sendWdwSeq = 0; + sp->attnSendSeq = 0; + } + + if (ocMode == ocEstablish) { /* Only set these if establish mode */ + sp->recvSeq = pb->u.openParams.recvSeq; + sp->attnRecvSeq = pb->u.openParams.attnRecvSeq; + UAS_ASSIGN(sp->f.CID, sp->locCID); /* Preset the CID in the ADSP header */ + /* This is done elsewhere for all other modes */ + InsertTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer, + sp->probeInterval); + } else { /* establish */ + /* All other modes need a CID assigned */ + sp->locCID = NextCID(); + sp->recvSeq = 0; + sp->attnRecvSeq = 0; + } + + /* + * Now set the state variables for this CCB. + */ + + sp->openState = xlateOpenTbl[ocMode-ocRequest]; + sp->state = xlateStateTbl[ocMode-ocRequest]; + + if (ocMode == ocEstablish) { /* For establish call, we're done */ + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); + return 0; + } + + pb->qLink = 0; /* Clear link field before putting on queue */ + mp = gbuf_copym(pb->mp); /* Save parameter block to match later */ + + if (mp == 0) { + pb->ioResult = errDSPQueueSize; + return ENOBUFS; + } + pb->ioResult = 1; /* not open -> not done */ + adspioc_ack(0, pb->ioc, pb->gref); /* release user */ + sp->opb = (struct adspcmd *)gbuf_rptr(mp); + sp->opb->ioc = 0; /* unlink saved pb from ioctl block */ + sp->opb->mp = mp; + + /* + * For request & accept, need to send a packet + */ + if ((ocMode == ocRequest) || (ocMode == ocAccept)) { + sp->sendCtl |= (1 << (ocMode == ocRequest ? + ADSP_CTL_OREQ : ADSP_CTL_OREQACK)); + CheckSend(sp); + } + return 0; +} + +int adspMode(pb) + register struct adspcmd *pb; +{ + return pb->u.openParams.ocMode; +} diff --git a/bsd/netat/adsp_Options.c b/bsd/netat/adsp_Options.c new file mode 100644 index 000000000..5e302bcaa --- /dev/null +++ b/bsd/netat/adsp_Options.c @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * dspOptions.c + * + * From v01.06 04/19/90 mbs + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * dspOptions + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * --> sendBlocking send blocking threshold + * --> sendTimer send timer interval + * --> rtmtTimer retransmit timer interval + * --> badSeqMax retransmit advice send threshold + * --> useCheckSum generate DDP checksum on internet packets + * + * OUTPUTS: + * none + * + * ERRORS: + * errRefNum bad connection refnum +*/ +int adspOptions(sp, pb) /* (DSPPBPtr pb) */ + CCBPtr sp; + struct adspcmd *pb; +{ + short err; + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + if (pb->u.optionParams.sendBlocking) + sp->sendBlocking = pb->u.optionParams.sendBlocking; + + if (pb->u.optionParams.sendTimer) + sp->sendInterval = pb->u.optionParams.sendTimer; + + /* No longer allowed to set retransmit timer as of ADSP 1.5 */ + /* Use it to specify a command blocking request specific to MacOS + * emulation. */ + if (pb->u.optionParams.rtmtTimer) + sp->delay = pb->u.optionParams.rtmtTimer; + KERNEL_DEBUG(DBG_ADSP_MISC, 0, sp, sp->delay, pb, pb->u.optionParams.rtmtTimer); + + if (pb->u.optionParams.badSeqMax) + sp->badSeqMax = pb->u.optionParams.badSeqMax; + + sp->useCheckSum = pb->u.optionParams.useCheckSum; + if (pb->u.optionParams.newPID) + sp->pid = pb->u.optionParams.newPID; + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); + return 0; + +} diff --git a/bsd/netat/adsp_Packet.c b/bsd/netat/adsp_Packet.c new file mode 100644 index 000000000..2b9a8f0ff --- /dev/null +++ b/bsd/netat/adsp_Packet.c @@ -0,0 +1,836 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Packet.c + * + * v01.23 All incoming packets come here first 06/21/90 mbs + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include + +extern at_ifaddr_t *ifID_home; + +/* + * GleanSession + * + * We just got a packet for this session, glean its address & + * reset probe timer + * + * INPUTS: + * Session + * OUTPUTS: + * none + */ +static void GleanSession(sp) /* (CCBPtr sp) */ + CCBPtr sp; +{ + if (sp->openState == O_STATE_OPEN) { + /* This is true for both state = sOpen & sClosing */ + RemoveTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer); + InsertTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer, + sp->probeInterval); + sp->probeCntr = 4; + } + +} + + +/* + * The same code handles incoming Open Connection Request, + * Open Request + Ack, Open Connection Ack, Open Connection Denial + * + * We could be in four different states, LISTEN, OPENWAIT, ESTABLISHED, + * OPEN. + */ + +/* + * + * Ok, there are 16 combinations. 8 are do-nothings, 2 have to be + * special cased (Open Deny and Req+Ack on Open session) + * + * Build a table of actions: + * Ignore? + * What to match on (local socket, whole address, DestCID, SrcCID) + * What to send (Ack or Req+Ack) + * Next State (both the ccb state and the open state) + */ + +/* + * + */ +typedef struct { + u_char match; /* Characteristics that have to match + * (Bit-Mapped, see below) */ + char action; /* What to do if CCB matches */ + char send; /* What to send in response + * (Bit mapped, same as sendCtl field of + * CCB) */ + char openState; /* Next Open state */ + char state; /* Next ccb state. */ + char pad; /* Too bad we need this to make structure + * even size */ +} TBL, *TBLPtr; + +#define M_LSOC 0x01 /* bit 0 - Match on local socket */ +#define M_ADDR 0x02 /* bit 1 - Match on whole address */ +#define M_DCID 0x04 /* bit 2 - Match on DestCID */ +#define M_SCID 0x08 /* bit 3 - Match SrcCID */ +#define M_DCIDZERO 0x10 /* bit 4 - Dest CID must be 0 */ +#define M_SCIDZERO 0x20 /* bit 5 - Src CID must be 0 */ +#define M_FILTER 0x40 /* bit 6 - Match address filter */ +#define M_IGNORE 0x80 /* bit 7 - Ignore */ + +#define A_COMPLETE 0x01 /* Complete open parameter block */ +#define A_SAVEPARMS 0x02 /* Save connection parameters */ +#define A_OREQACKOPEN 0x04 /* special case for open Req+Ack on + * OPEN session */ +#define A_GLEAN 0x08 /* We'll be talking back to this guy */ +#define A_DENY 0x10 /* We've been denied! */ + + +/* + * So here's our table + */ + +static TBL tbl[16] = { + +/* + * For Open Request ($81) + * + * LISTENING + * Match on destination socket + * Match on address filter + * Dest CID must be 0 + * Glean connection + * Save Open Connection parameters + * Send OREQACK + * Change state to ESTABLISHED + */ + { M_LSOC + M_DCIDZERO + M_FILTER, + A_SAVEPARMS + A_GLEAN, + B_CTL_OREQACK, + O_STATE_ESTABLISHED, + sOpening, + 0 + }, + +/* + * + * OPENWAIT + * Match on Remote Address & destination socket + * Dest CID must be 0 + * Save Open Connection parameters + * Send Ack + * Change state to ESTABLISHED + */ + { M_LSOC + M_ADDR + M_DCIDZERO, + A_SAVEPARMS + A_GLEAN, + B_CTL_OACK, + O_STATE_ESTABLISHED, + sOpening, + 0 + }, +/* + * + * ESTABLISHED + * Match on Remote Address & SrcCID + * Dest CID must be 0 + * Send Req + Ack + */ + { M_ADDR + M_SCID + M_DCIDZERO, + A_GLEAN, + B_CTL_OACK, + O_STATE_ESTABLISHED, + sOpening, + 0 + }, +/* + * OPEN + * Ignore + */ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, + +/* + * + * For Open Ack ($82) + * + * LISTENING + * Ignore + */ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, +/* + * + * OPENWAIT + * Ignore + */ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, +/* + * + * ESTABLISHED + * Match on SrcCID & DestCID & Address & Local Socket + * Complete Listen or Connect PB + * OPEN + */ + { M_ADDR + M_DCID + M_SCID + M_LSOC, + A_COMPLETE + A_GLEAN, + 0, + O_STATE_OPEN, + sOpen, + 0 + }, +/* + * + * OPEN + * Ignore +*/ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, + +/* + * + * For Open Request + Ack ($83) + * + * LISTENING + * Ignore +*/ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, +/* + * + * OPENWAIT + * Match on DestCID & socket + * Do not test remote address -- our open req could have + * been passed to another address by a connection server + * Save Open Connection parameters + * Complete Connect parameter block + * Send Ack + * OPEN + */ + { M_DCID + M_LSOC, + A_COMPLETE + A_SAVEPARMS + A_GLEAN, + B_CTL_OACK, + O_STATE_OPEN, + sOpen, + 0 + }, +/* + * + * ESTABLISHED + * Ignore + */ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, +/* + * + * OPEN + * Match on Remote Address & SrcCID & DestCID & Local Socket + * If we've never gotten any data + * Send Ack & Retransmit + */ + { M_ADDR + M_DCID + M_SCID + M_LSOC, + A_OREQACKOPEN + A_GLEAN, + B_CTL_OACK, + O_STATE_OPEN, + sOpen, + 0 + }, + +/* + * + * + * For Open Deny ($84) + * + * LISTENING + * Ignore + */ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, +/* + * + * OPENWAIT + * Match on DestCID & Address + * Source CID must be 0 + * Complete with error + */ + { M_SCIDZERO + M_DCID + M_ADDR, + A_DENY, + 0, + O_STATE_NOTHING, + sClosed, + 0 + }, +/* + * + * ESTABLISHED + * Ignore + */ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + }, /* %%% No we probably don't want to ignore in this case */ +/* + * + * OPEN + * Ignore + */ + { M_IGNORE, + 0, + 0, + 0, + 0, + 0 + } +}; + +extern at_ifaddr_t *ifID_table[]; + +/* + * Used to search down queue of sessions for a session waiting for an + * open request. + */ +typedef struct { + AddrUnion addr; + word dstCID; + word srcCID; + byte socket; + byte descriptor; + byte idx; /* Index into state tables */ + TBLPtr t; /* Ptr to entry in table above */ +} MATCH, *MATCHPtr; + +/* + * MatchStream + * + * Called by Rx connection to find which stream (if any) should get this open + * request/ack/req+ack/deny packet. + * + */ + +static boolean +MatchStream(sp, m) /* (CCBPtr sp, MATCHPtr m) */ + CCBPtr sp; + MATCHPtr m; +{ + unsigned char match; + struct adspcmd *opb; + + if (sp->openState < O_STATE_LISTEN || + sp->openState > O_STATE_OPEN) + return 0; + + + m->t = &tbl[sp->openState - O_STATE_LISTEN + m->idx]; + + match = m->t->match; /* Get match criteria */ + + if (match & M_IGNORE) /* Ignore this combination */ + return 0; + + if (match & M_LSOC) { /* Match on Local socket */ + if (sp->localSocket != m->socket) + return 0; + } + + if (match & M_ADDR) { /* Match on Address */ + AddrUnion addr; + addr = m->addr; /* Make local copy for efficiency */ + if (sp->remoteAddress.a.node != addr.a.node) + return 0; + if (sp->remoteAddress.a.socket != addr.a.socket) + return 0; + if (sp->remoteAddress.a.net && addr.a.net && + (sp->remoteAddress.a.net != addr.a.net)) + return 0; + + /* + * Handle special case to reject self-sent open request + */ + if ((m->srcCID == sp->locCID) && + (addr.a.node == ifID_home->ifThisNode.s_node) && + /* *** was (addr.a.node == ddpcfg.node_addr.node) && *** */ + ((addr.a.net == 0) || + (ifID_home->ifThisNode.s_net == 0) || + (ifID_home->ifThisNode.s_net == addr.a.net)) ) + /* *** was + (NET_VALUE(ddpcfg.node_addr.net) == 0) || + (NET_VALUE(ddpcfg.node_addr.net) == NET_VALUE(addr.a.net))) ) + *** */ + /* CID's match, and */ + /* If nodeID matches, and */ + /* network matches, */ + return 0; /* then came from us! */ + } + + if (match & M_DCID) { /* Match on DestCID */ + if (sp->locCID != m->dstCID) + return 0; + } + + if (match & M_SCID) { /* Match on SourceCID */ + if (sp->remCID != m->srcCID) + return 0; + } + + if (match & M_DCIDZERO) { /* Destination CID must be 0 */ + if (m->dstCID != 0) + return 0; + } + + if (match & M_SCIDZERO) /* Source CID must be 0 */ + { + if (m->srcCID != 0) + return 0; + } + + if (match & M_FILTER) { /* Check address filter? */ + if ((opb = sp->opb)) /* There should be a param block... */ + { + AddrUnion addr; + addr = m->addr; /* Make local copy for efficiency */ + if ((opb->u.openParams.filterAddress.net && + addr.a.net && + opb->u.openParams.filterAddress.net != addr.a.net) || + (opb->u.openParams.filterAddress.node != 0 && + opb->u.openParams.filterAddress.node != addr.a.node)|| + (opb->u.openParams.filterAddress.socket != 0 && + opb->u.openParams.filterAddress.socket != addr.a.socket)) + return 0; + } + } + + return 1; +} + +/* + * MatchListener + * + * Called by rx connection to see which connection listener (if any) should + * get this incoming open connection request. + * + */ + +static boolean MatchListener(sp, m) /* (CCBPtr sp, MATCHPtr m) */ + CCBPtr sp; + MATCHPtr m; +{ + + if ((sp->state == (word)sListening) && /* This CCB is a listener */ + (sp->localSocket == m->socket)) /* on the right socket */ + return 1; + + return 0; +} + +/* + * RXConnection + * + * We just received one of the 4 Open Connection packets + * Interrupts are masked OFF at this point + * + * INPUTS: + * spPtr Place to put ptr to stream (if we found one -- not + * for listeners) + * f Pointer to ADSP header for packet, data follows behind it + * len # of byte in ADSP header + data + * addr Who sent the packet + * dsoc Where they sent it to + * + * OUTPUTS: + * Returns 1 if packet was ignored + */ +static int RXConnection(gref, spPtr, f, len, addr, dsoc) + /* (CCBPtr *spPtr, ADSP_FRAMEPtr f, word len, AddrUnion addr, byte dsoc) */ + gref_t *gref; /* READ queue */ + CCBPtr *spPtr; + ADSP_FRAMEPtr f; + int len; + AddrUnion addr; + unsigned char dsoc; +{ + CCBPtr sp; + ADSP_OPEN_DATAPtr op; + struct adspcmd *pb; + MATCH m; + gbuf_t *mp; + ADSP_FRAMEPtr adspp; + ADSP_OPEN_DATAPtr adspop; + int s; + + op = (ADSP_OPEN_DATAPtr)&f->data[0]; /* Point to Open-Connection parms */ + len -= ADSP_FRAME_LEN; + + if (len < (sizeof(ADSP_OPEN_DATA))) /* Packet too small */ + return 1; + + + if (UAS_VALUE(op->version) != netw(0x0100)) { /* Check version num (on even-byte) */ + /* + * The open request has been denied. Try to send him a denial. + */ + + mp = gbuf_alloc(AT_WR_OFFSET + DDPL_FRAME_LEN + ADSP_FRAME_LEN + ADSP_OPEN_FRAME_LEN, + PRI_LO); + gbuf_rinc(mp,AT_WR_OFFSET); + gbuf_wset(mp,DDPL_FRAME_LEN); + adspp = (ADSP_FRAMEPtr)gbuf_wptr(mp); + gbuf_winc(mp,ADSP_FRAME_LEN); + bzero((caddr_t) gbuf_rptr(mp),DDPL_FRAME_LEN + ADSP_FRAME_LEN + + ADSP_OPEN_FRAME_LEN); + adspp->descriptor = ADSP_CONTROL_BIT | ADSP_CTL_ODENY; + adspop = (ADSP_OPEN_DATAPtr)gbuf_wptr(mp); + gbuf_winc(mp,ADSP_OPEN_FRAME_LEN); + UAS_UAS(adspop->dstCID, f->CID); + UAS_ASSIGN(adspop->version, 0x100); + adsp_sendddp(0, mp, DDPL_FRAME_LEN + ADSP_FRAME_LEN + + ADSP_OPEN_FRAME_LEN, &addr, DDP_ADSP); + + return 0; + } + m.addr = addr; + m.socket = dsoc; + m.descriptor = f->descriptor; + m.srcCID = UAS_VALUE(f->CID); + m.dstCID = UAS_VALUE(op->dstCID); /* On even-byte boundry */ + m.idx = ((f->descriptor & ADSP_CONTROL_MASK) - 1) * 4; + + /* + * See if we can find a stream that knows what to do with this packet + */ + if ((sp = (CCBPtr)qfind_m(AT_ADSP_STREAMS, &m, (ProcPtr)MatchStream)) == 0) + { + struct adspcmd *p; + struct adspcmd *n; + /* + * No match, so look for connection listeners if this is an + * open request + */ + if ((f->descriptor & ADSP_CONTROL_MASK) != (byte)ADSP_CTL_OREQ) + return 1; + + if ((sp = (CCBPtr)qfind_m(AT_ADSP_STREAMS, &m, + (ProcPtr)MatchListener)) == 0) + return 1; + + ATDISABLE(s, sp->lock); + p = (struct adspcmd *)&sp->opb; + while (n = (struct adspcmd *)p->qLink) /* Hunt down list of listens */ + { + /* Check address filter */ + if (((n->u.openParams.filterAddress.net == 0) || + (addr.a.net == 0) || + (n->u.openParams.filterAddress.net == addr.a.net)) && + + ((n->u.openParams.filterAddress.node == 0) || + (n->u.openParams.filterAddress.node == addr.a.node)) && + + ((n->u.openParams.filterAddress.socket == 0) || + (n->u.openParams.filterAddress.socket == addr.a.socket))) { + p->qLink = n->qLink; /* Unlink this param block */ + n->u.openParams.remoteCID = m.srcCID; + *((AddrUnionPtr)&n->u.openParams.remoteAddress) = addr; + n->u.openParams.sendSeq = netdw(UAL_VALUE(f->pktNextRecvSeq)); + n->u.openParams.sendWindow = netw(UAS_VALUE(f->pktRecvWdw)); + n->u.openParams.attnSendSeq = netdw(UAL_VALUE(op->pktAttnRecvSeq)); + n->ioResult = 0; + ATENABLE(s, sp->lock); + completepb(sp, n); /* complete copy of request */ + /* complete(n, 0); */ + return 0; + } /* found CLListen */ + + p = n; /* down the list we go... */ + + } /* while */ + + ATENABLE(s, sp->lock); + return 1; + } + + *spPtr = sp; /* Save ptr to stream we just found */ + + ATDISABLE(s, sp->lock); + sp->openState = m.t->openState; /* Move to next state (may be same) */ + sp->state = m.t->state; /* Move to next state (may be same) */ + + if (m.t->action & A_SAVEPARMS) { /* Need to Save open-conn parms */ + sp->firstRtmtSeq = sp->sendSeq = netdw(UAL_VALUE(f->pktNextRecvSeq)); + sp->sendWdwSeq = netdw(UAL_VALUE(f->pktNextRecvSeq)) + netw(UAS_VALUE(f->pktRecvWdw)) - 1; + sp->attnSendSeq = netdw(UAL_VALUE(op->pktAttnRecvSeq)); /* on even boundry */ + + + sp->remCID = UAS_VALUE(f->CID); /* Save Source CID as RemCID */ + UAS_UAS(sp->of.dstCID, f->CID); /* Save CID in open ctl packet */ + + sp->remoteAddress = addr; /* Save his address */ + + } + ATENABLE(s, sp->lock); + + if (m.t->action & A_DENY) { /* We've been denied ! */ + DoClose(sp, errOpenDenied, -1); + } + + if (m.t->action & A_OREQACKOPEN) { + /* Special case for OREQACK */ + /* on an open session */ + RemoveTimerElem(&adspGlobal.fastTimers, &sp->RetryTimer); + sp->sendSeq = sp->firstRtmtSeq; + sp->pktSendCnt = 0; + sp->waitingAck = 0; + sp->callSend = 1; + } + + if (m.t->send) { /* Need to send a response */ + sp->sendCtl |= m.t->send; + sp->callSend = 1; + } + + if (m.t->action & A_COMPLETE) { /* Need to complete open param blk */ + RemoveTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer); + + if (pb = sp->opb) { + sp->opb = 0; + pb->u.openParams.localCID = sp->locCID; + pb->u.openParams.remoteCID = sp->remCID; + pb->u.openParams.remoteAddress = + *((at_inet_t *)&sp->remoteAddress); + pb->u.openParams.sendSeq = sp->sendSeq; + pb->u.openParams.sendWindow = sp->sendWdwSeq - sp->sendSeq; + pb->u.openParams.attnSendSeq = sp->attnSendSeq; + pb->ioResult = 0; + completepb(sp, pb); /* complete(pb, 0); */ + return 0; + } + /* Start probe timer */ + InsertTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer, + sp->probeInterval); + } + return 0; +} + +/* + * ADSPPacket + * + * When a packet is received by the protocol stack with DDP type equal + * to ADSP, then execution comes here + * + * DS is set to ATALK's DGROUP + * + * This routine, or one of its children MUST call glean packet + * + * INPUTS: + * Pointer to DDP header + * OUTPUTS: + * none + * + * Note that the incoming message block (mp) is usually discarded, either + * by the "ignored" path, or via the "checksend" path. The only case + * where the message is NOT freed is via the RxData case in the + * non control packet switch. I zero mp after the RxData case succeeds + * so that mp will not be freed. + */ +int adspPacket(gref, mp) + /* (bytePtr data, word len, AddrUnion a, byte dsoc) */ + gref_t *gref; + gbuf_t *mp; +{ + unsigned char *bp; + int len; + AddrUnion a; + int dsoc; + int s; + register DDPX_FRAME *ddp; /* DDP frame pointer */ + register ADSP_FRAMEPtr f; /* Frame */ + CCBPtr sp; + + sp = 0; /* No stream */ + bp = (unsigned char *)gbuf_rptr(mp); + ddp = (DDPX_FRAME *)bp; + if (ddp->ddpx_type != DDP_ADSP) + return -1; + f = (ADSP_FRAMEPtr)(bp + DDPL_FRAME_LEN); + + len = UAS_VALUE(ddp->ddpx_length) & 0x3ff; /* (ten bits of length) */ + len -= DDPL_FRAME_LEN; + if (len < (sizeof(ADSP_FRAME) - 1)) /* Packet too small */ + return -1; /* mark the failure */ + + a.a.net = NET_VALUE(ddp->ddpx_snet); + a.a.node = ddp->ddpx_snode; + a.a.socket = ddp->ddpx_source; + + dsoc = ddp->ddpx_dest; + + if (sp = (CCBPtr)FindSender(f, a)) + GleanSession(sp); + + if (f->descriptor & ADSP_ATTENTION_BIT) { /* ATTN packet */ + if (sp && RXAttention(sp, mp, f, len)) + goto ignore; + else + mp = 0; /* attention data is being held */ + } /* ATTENTION BIT */ + + else if (f->descriptor & ADSP_CONTROL_BIT) { /* Control packet */ + switch (f->descriptor & ADSP_CONTROL_MASK) { + case ADSP_CTL_PROBE: /* Probe or acknowledgement */ + if (sp) + CheckRecvSeq(sp, f); + break; + + case ADSP_CTL_OREQ: /* Open Connection Request */ + case ADSP_CTL_OREQACK: /* Open Request and acknowledgement */ + case ADSP_CTL_OACK: /* Open Request acknowledgment */ + case ADSP_CTL_ODENY: /* Open Request denial */ + if (RXConnection(gref, &sp, f, len, a, dsoc)) + goto ignore; + break; + + case ADSP_CTL_CLOSE: /* Close connection advice */ + if (sp) { + /* This pkt may also ack some data we sent */ + CheckRecvSeq(sp, f); + RxClose(sp); + sp = 0; + } else + goto ignore; + break; + + case ADSP_CTL_FRESET: /* Forward Reset */ + /* May I rot in hell for the code below... */ + if (sp && (CheckRecvSeq(sp, f), RXFReset(sp, f))) + goto ignore; + break; + + case ADSP_CTL_FRESET_ACK: /* Forward Reset Acknowledgement */ + if (sp && (CheckRecvSeq(sp, f), RXFResetAck(sp, f))) + goto ignore; + break; + + case ADSP_CTL_RETRANSMIT: /* Retransmit advice */ + if (sp) { + /* This pkt may also ack some data we sent */ + CheckRecvSeq(sp, f); + RemoveTimerElem(&adspGlobal.fastTimers, &sp->RetryTimer); + ATDISABLE(s, sp->lock); + sp->sendSeq = sp->firstRtmtSeq; + sp->pktSendCnt = 0; + sp->waitingAck = 0; + sp->callSend = 1; + ATENABLE(s, sp->lock); + } else + goto ignore; + break; + + default: + goto ignore; + } /* switch */ + } /* Control packet */ + + else { /* Data Packet */ + if ((sp == 0) || RXData(sp, mp, f, len)) + goto ignore; + else + mp = 0; /* RXData used up the data, DONT free it! */ + } /* Data Packet */ + + if (mp) + gbuf_freem(mp); + +checksend: /* incoming data was not ignored */ + if (sp && sp->callSend) /* If we have a stream & we need to send */ + CheckSend(sp); + + return 0; + +ignore: + gbuf_freem(mp); + return 0; +} diff --git a/bsd/netat/adsp_Read.c b/bsd/netat/adsp_Read.c new file mode 100644 index 000000000..fba5e4191 --- /dev/null +++ b/bsd/netat/adsp_Read.c @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * dspRead.c + * + * From v01.17 08/22/90 mbs + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * CheckReadQueue + * + * Checks to see if there is any data in the receive queue. If there + * is data, a pb and the data are queued to the user. + * + * + */ +extern int adsp_check; + +int CheckReadQueue(sp) /* (CCBPtr sp) */ + register CCBPtr sp; +{ + register struct adspcmd *pb; + int s; + unsigned short cnt; + char eom = 0; + register gbuf_t *mp; + register gbuf_t *tmp; + gref_t *gref; + + dPrintf(D_M_ADSP, D_L_TRACE, ("CheckReadQueue: sp=0x%x\n", (unsigned)sp)); + KERNEL_DEBUG(DBG_ADSP_READ, 0, sp, sp->rbuf_mb, sp->rpb, sp->delay); + trace_mbufs(D_M_ADSP_LOW, " bCQR m", sp->rbuf_mb); + ATDISABLE(s, sp->lock); + + while (sp->rData && (pb = sp->rpb)) { /* have data */ + dPrintf(D_M_ADSP, D_L_TRACE, + (" pb=0x%x, gref=0x%x, ioc=0x%x, reqCount=%d (have data)\n", + pb, pb->gref, pb->ioc, pb->u.ioParams.reqCount)); + KERNEL_DEBUG(DBG_ADSP_READ, 1, pb, pb->gref, pb->ioc, pb->u.ioParams.reqCount); + if (pb->u.ioParams.reqCount == 0) { + pb->ioResult = 0; + sp->rpb = pb->qLink; + if (pb->ioc) { + KERNEL_DEBUG(DBG_ADSP_READ, 2, pb, pb->gref, pb->ioc, 0); + adspioc_ack(0, pb->ioc, pb->gref); + } else { + KERNEL_DEBUG(DBG_ADSP_READ, 3, pb, pb->gref, 0, 0); + completepb(sp, pb); + } + continue; + } + + /* take the first packet off of sp->rbuf_mb or sp->crbuf_mb */ + if (mp = sp->rbuf_mb) { /* Get header for oldest data */ + KERNEL_DEBUG(DBG_ADSP_READ, 4, pb, mp, gbuf_msgsize(mp), gbuf_next(mp)); + sp->rbuf_mb = gbuf_next(mp); + gbuf_next(mp) = 0; + eom = 1; + } else if (mp = sp->crbuf_mb) { + KERNEL_DEBUG(DBG_ADSP_READ, 5, pb, mp, gbuf_msgsize(mp), gbuf_next(mp)); + sp->crbuf_mb = 0; + eom = 0; + } + + /* Get the first (reqCount-actCount) bytes and tack them onto + the end of pb->mp. If eom is set, put the remainder of the + data onto the front of sp->rbuf_mb, otherwise sp->crbuf_mb. */ + cnt = gbuf_msgsize(mp); /* # of data bytes in it. */ + if (cnt > (unsigned short)(pb->u.ioParams.reqCount - pb->u.ioParams.actCount)) { + cnt = pb->u.ioParams.reqCount - pb->u.ioParams.actCount; + /* m_split returns the tail */ + if (!(tmp = (gbuf_t *)m_split(mp, cnt, M_DONTWAIT))) { + cnt = 0; + tmp = mp; + } + if (eom) { + gbuf_next(tmp) = sp->rbuf_mb; + sp->rbuf_mb = tmp; + eom = 0; + } else + sp->crbuf_mb = tmp; + } + if (cnt) { + pb->u.ioParams.actCount += cnt; + gbuf_linkb(pb->mp, mp); + } + + pb->u.ioParams.eom = eom; + /* + * Now clean up receive buffer to remove all of the data + * we just copied + */ + if ((sp->rbuf_mb == 0) && + (sp->crbuf_mb == 0)) /* no more data blocks */ + sp->rData = 0; + /* + * If we've filled the parameter block, unlink it from read + * queue and complete it. We also need to do this if the connection + * is closed && there is no more stuff to read. + */ + if (eom || (pb->u.ioParams.actCount >= pb->u.ioParams.reqCount) || + ((sp->state == sClosed) && (!sp->rData)) ) { + /* end of message, message is full, connection + * is closed and all data has been delivered, + * or we are not to "delay" data delivery. + */ + pb->ioResult = 0; + sp->rpb = pb->qLink; /* dequeue request */ + if (pb->ioc) { /* data to be delivered at the time of the */ + mp = gbuf_cont(pb->mp); /* ioctl call */ + gbuf_cont(pb->mp) = 0; + gref = (gref_t *)pb->gref; + adspioc_ack(0, pb->ioc, pb->gref); + dPrintf(D_M_ADSP, D_L_TRACE, (" (pb->ioc) mp=%x\n", mp)); + KERNEL_DEBUG(DBG_ADSP_READ, 0x0A, pb, mp, + gbuf_next(mp), gbuf_cont(mp)); + SndMsgUp(gref, mp); + dPrintf(D_M_ADSP, D_L_TRACE, + (" (data) size req=%d\n", pb->u.ioParams.actCount)); + KERNEL_DEBUG(DBG_ADSP_READ, 0x0B, pb, pb->ioc, + pb->u.ioParams.reqCount, pb->u.ioParams.actCount); + } else { /* complete an queued async request */ + KERNEL_DEBUG(DBG_ADSP_READ, 0x0C, pb, sp, + pb->u.ioParams.actCount, sp->delay); + completepb(sp, pb); + } + } + } /* while */ + + if (pb = sp->rpb) { /* if there is an outstanding request */ + dPrintf(D_M_ADSP, D_L_TRACE, + (" pb=0x%x, ioc=0x%x, reqCount=%d (no more data)\n", + pb, pb->ioc, pb->u.ioParams.reqCount)); + KERNEL_DEBUG(DBG_ADSP_READ, 0x0D, pb, pb->ioc, + pb->u.ioParams.reqCount, pb->u.ioParams.actCount); + + if (sp->state == sClosed) { + while (pb) { + KERNEL_DEBUG(DBG_ADSP_READ, 0x0E, pb, sp, pb->ioc, 0); + pb->ioResult = 0; + pb->u.ioParams.actCount = 0; + pb->u.ioParams.eom = 0; + sp->rpb = pb->qLink; + if (pb->ioc) { + adspioc_ack(0, pb->ioc, pb->gref); + } else { + completepb(sp, pb); + } + pb = sp->rpb; + } + } else if (pb->ioc) { /* if request not complete and this + * is an active ioctl, release user */ + sp->rpb = pb->qLink; + pb->ioResult = 1; + tmp = gbuf_cont(pb->mp); /* detatch perhaps delayed data */ + gbuf_cont(pb->mp) = 0; + if (mp = gbuf_copym(pb->mp)) { /* otherwise, duplicate user request */ + KERNEL_DEBUG(DBG_ADSP_READ, 0x0F, pb, sp, pb->mp, 0); + adspioc_ack(0, pb->ioc, pb->gref); /* release user */ + pb = (struct adspcmd *)gbuf_rptr(mp); /* get new parameter block */ + pb->ioc = 0; + pb->mp = mp; + gbuf_cont(pb->mp) = tmp; /* reattach data */ + pb->qLink = sp->rpb; /* requeue the duplicate at the head */ + sp->rpb = pb; + } else { /* there is no data left, but no space + * to duplicate the parameter block, so + * put what must be a non EOM message + * back on the current receive queue, and + * error out the user + */ + KERNEL_DEBUG(DBG_ADSP_READ, 0x10, pb, sp, pb->mp, 0); + if (tmp) { + sp->crbuf_mb = tmp; + sp->rData = 1; + } + pb->ioResult = errDSPQueueSize; + adspioc_ack(ENOBUFS, pb->ioc, pb->gref); + } + } + } + /* + * The receive window has opened. If was previously closed, then we + * need to notify the other guy that we now have room to receive more + * data. But, in order to cut down on lots of small data packets, + * we'll wait until the recieve buffer is /14 empy before telling + * him that there's room in our receive buffer. + */ + if (sp->rbufFull && (CalcRecvWdw(sp) > (sp->rbuflen >> 2))) { + sp->rbufFull = 0; + sp->sendDataAck = 1; + sp->callSend = 1; + } + ATENABLE(s, sp->lock); + + KERNEL_DEBUG(DBG_ADSP_READ, 0x11, sp, 0, 0, 0); + trace_mbufs(D_M_ADSP_LOW, " eCQR m", sp->rbuf_mb); + return 0; +} + +/* + * CheckAttn + * + * Checks to see if there is any attention data and passes the data back + * in the passed in pb. + * + * INPUTS: + * sp + * pb + * + * OUTPUTS: + * + */ +int CheckAttn(sp, pb) /* (CCBPtr sp) */ + register CCBPtr sp; + register struct adspcmd *pb; +{ + int s; + gbuf_t *mp; + gref_t *gref; + + dPrintf(D_M_ADSP, D_L_TRACE, + ("CheckAttn: sp=0x%x, pb=0x%x\n", (unsigned)sp, (unsigned)pb)); + + ATDISABLE(s, sp->lock); + if (mp = sp->attn_mb) { + + /* + * Deliver the attention data to the user. + */ + gref = (gref_t *)pb->gref; + pb->u.attnParams.attnSize = sp->attnSize; + pb->u.attnParams.attnCode = sp->attnCode; + if (!sp->attnSize) { + gbuf_freem(mp); + mp = 0; + } + sp->userFlags &= ~eAttention; + /* + * Now clean up receive buffer to remove all of the data + * we just copied + */ + sp->attn_mb = 0; + pb->ioResult = 0; + } else { + /* + * No data... + */ + pb->u.attnParams.attnSize = 0; + pb->u.attnParams.attnCode = 0; + pb->ioResult = 1; /* not done */ + } + adspioc_ack(0, pb->ioc, pb->gref); + if (mp) { + SndMsgUp(gref, mp); + } + ATENABLE(s, sp->lock); + return 0; +} + +/* + * adspRead + * + * INPUTS: + * --> sp stream pointer + * --> pb user request parameter block + * + * OUTPUTS: + * <-- actCount actual number of bytes read + * <-- eom one if end-of-message, zero otherwise + * + * ERRORS: + * errRefNum bad connection refnum + * errState + * errFwdReset read terminated by forward reset + * errAborted request aborted by Remove or Close call + */ +int adspRead(sp, pb) /* (DSPPBPtr pb) */ + register CCBPtr sp; + register struct adspcmd *pb; +{ + register gbuf_t *mp; + int s; + + dPrintf(D_M_ADSP, D_L_TRACE, + ("adspRead: sp=0x%x, pb=0x%x\n", (unsigned)sp, (unsigned)pb)); + + KERNEL_DEBUG(DBG_ADSP_READ, 0x12, sp, pb, sp->state, sp->rData); + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + /* + * It's OK to read on a closed, or closing session + */ + ATDISABLE(s, sp->lock); + if (sp->state != sOpen && sp->state != sClosing && sp->state != sClosed) { + ATENABLE(s, sp->lock); + pb->ioResult = errState; + return EINVAL; + } + if (sp->rData && (sp->rpb == 0)) { /* if data, and no queue of pbs */ + qAddToEnd(&sp->rpb, pb); /* deliver data to user directly */ + ATENABLE(s, sp->lock); + CheckReadQueue(sp); + } else if ((pb->u.ioParams.reqCount == 0) && (sp->rpb == 0)) { + /* empty read */ + ATENABLE(s, sp->lock); + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); + return 0; + } else { + pb->ioResult = 1; + if (mp = gbuf_copym(pb->mp)) { /* otherwise, duplicate user request */ + adspioc_ack(0, pb->ioc, pb->gref); /* release user */ + pb = (struct adspcmd *)gbuf_rptr(mp); /* get new parameter block */ + pb->ioc = 0; + pb->mp = mp; + qAddToEnd(&sp->rpb, pb); /* and queue it for later */ + ATENABLE(s, sp->lock); + } else { + ATENABLE(s, sp->lock); + pb->ioResult = errDSPQueueSize; + return ENOBUFS; + } + } + + if (sp->callSend) { + CheckSend(sp); /* If recv window opened, we might */ + /* send an unsolicited ACK. */ + } + return 0; +} + +/* + * dspReadAttention + * + * INPUTS: + * --> sp stream pointer + * --> pb user request parameter block + * + * OUTPUTS: + * <-- NONE + * + * ERRORS: + * errRefNum bad connection refnum + * errState connection is not in the right state + */ +int adspReadAttention(sp, pb) /* (DSPPBPtr pb) */ + register CCBPtr sp; + register struct adspcmd *pb; +{ + dPrintf(D_M_ADSP, D_L_TRACE, + ("adspReadAttention: sp=0x%x, pb=0x%x\n", (unsigned)sp, (unsigned)pb)); + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + /* + * It's OK to read on a closed, or closing session + */ + if (sp->state != sOpen && sp->state != sClosing && sp->state != sClosed) { + pb->ioResult = errState; + return EINVAL; + } + + CheckAttn(sp, pb); /* Anything in the attention queue */ + CheckReadQueue(sp); /* check to see if receive window has opened */ + if (sp->callSend) { + CheckSend(sp); /* If recv window opened, we might */ + /* send an unsolicited ACK. */ + } + return 0; +} /* adspReadAttention */ diff --git a/bsd/netat/adsp_RxAttn.c b/bsd/netat/adsp_RxAttn.c new file mode 100644 index 000000000..8f62c8c1a --- /dev/null +++ b/bsd/netat/adsp_RxAttn.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * RxAttn.c + * + * From v01.12 06/12/90 mbs + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * Used to search down queue of sessions for a session that matches + * sender and source connection ID +*/ +typedef struct +{ + AddrUnion addr; + word srcCID; +} MATCH_SENDER, *MATCH_SENDERPtr; + +/* + * MatchSender + * + */ + +static boolean MatchSender(sp, m) /* (CCBPtr sp, MATCH_SENDERPtr m) */ + CCBPtr sp; + MATCH_SENDERPtr m; +{ + + if (sp->state != sOpen && sp->state != sClosing) + return 0; + + if (sp->remCID != m->srcCID) + return 0; + + if (sp->remoteAddress.a.node != m->addr.a.node) + return 0; + if (sp->remoteAddress.a.socket != m->addr.a.socket) + return 0; + if (sp->remoteAddress.a.net && m->addr.a.net && + (sp->remoteAddress.a.net != m->addr.a.net)) + return 0; + + return 1; +} + + +/* + * FindSender + * + * Given an ADSP Packet, find the stream it is associated with. + * + * This should only be used for ADSP Packets that could be received + * by an OPEN connection. + * + * INPUTS: + * Pointer to ADSP header & address of sender + * OUTPUTS: + * Pointer to stream if found, else 0 + */ +CCBPtr FindSender(f, a) /* (ADSP_FRAMEPtr f, AddrUnion a) */ + ADSP_FRAMEPtr f; + AddrUnion a; +{ + MATCH_SENDER m; + + m.addr = a; + m.srcCID = UAS_VALUE(f->CID); + return (CCBPtr)qfind_m(AT_ADSP_STREAMS, &m, (ProcPtr)MatchSender); +} + +/* + * RXAttention + * + * We just got an Attention Packet. + * See if it came from anybody we know. + * Then check to see if it is an attention data packet or acknowledgement + * + * Interrupts are masked OFF at this point. + * + * INPUTS: + * stream pointer + * Pointer to ADSP header, + * Length of header plus data + * OUTPUTS: + * Returns 1 if packet was ignored + */ +int RXAttention(sp, mp, f, len) /* (CCBPtr sp, ADSP_FRAMEPtr f, word len) */ + CCBPtr sp; + gbuf_t *mp; + ADSP_FRAMEPtr f; + int len; +{ + int offset; + struct adspcmd *pb; + long diff; + + if (UAS_VALUE(f->pktRecvWdw)) /* This field must be 0 in attn pkts */ + return 1; + + if ((f->descriptor == + (char)(ADSP_ATTENTION_BIT | ADSP_ACK_REQ_BIT)) && /* Attention Data */ + ((sp->userFlags & eAttention) == 0)) /* & he read the previous */ + { + diff = netdw(UAL_VALUE(f->pktFirstByteSeq)) - sp->attnRecvSeq; + if (diff > 0) /* Hey, he missed one */ + return 1; + + if (diff == 0) /* This is the one we expected */ + { + len -= ADSP_FRAME_LEN; /* remove adsp header */ + if (len < 2) /* Poorly formed attn packet */ + return 1; + sp->attnCode = (f->data[0] << 8) + f->data[1]; /* Save attn code */ + sp->attn_mb = mp; + offset = ((unsigned char *)&f->data[2]) - (unsigned char *)gbuf_rptr(mp); + gbuf_rinc(mp,offset); + sp->attnPtr = (unsigned char *)gbuf_rptr(mp); + mp = 0; /* mp has been queued don't free it */ + + /* Interrupts are off here, or otherwise we have to do + * these three operations automically. + */ + sp->attnSize = len - 2; /* Tell user how many bytes */ + ++sp->attnRecvSeq; + /* Set flag saying we got attn message */ + sp->userFlags |= eAttention; + UrgentUser(sp); /* Notify user */ + /* BEFORE sending acknowledge */ + } /* in sequence */ + + sp->sendAttnAck = 1; /* send attention ack for dupl. & + * expected data */ + sp->callSend = 1; + } /* Attn Data */ + + /* + * Interrupts are OFF here, otherwise we have to do this atomically + */ + /* Check to see if this acknowledges anything */ + if ((sp->attnSendSeq + 1) == netdw(UAL_VALUE(f->pktNextRecvSeq))) { + sp->attnSendSeq++; + if ((pb = sp->sapb) == 0) { /* We never sent data ? !!! */ + if (mp) + gbuf_freem(mp); + return 0; + } + + sp->sapb = (struct adspcmd *)pb->qLink; /* Unlink from queue */ + + /* Remove timer */ + RemoveTimerElem(&adspGlobal.fastTimers, &sp->AttnTimer); + + pb->ioResult = 0; + if (gbuf_cont(pb->mp)) { + gbuf_freem(gbuf_cont(pb->mp)); /* free the data */ + gbuf_cont(pb->mp) = 0; + } + completepb(sp, pb); /* Done with the send attention */ + + if (sp->sapb) { /* Another send attention pending? */ + sp->sendAttnData = 1; + sp->callSend = 1; + } else { + if (sp->state == sClosing) /* this ack may allow us to close... */ + CheckOkToClose(sp); + } + } + if (mp) + gbuf_freem(mp); + return 0; +} diff --git a/bsd/netat/adsp_RxData.c b/bsd/netat/adsp_RxData.c new file mode 100644 index 000000000..94d7f64d9 --- /dev/null +++ b/bsd/netat/adsp_RxData.c @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * RxData.c + * + * From v01.28 Handle an incoming Data Packet 06/21/90 mbs + */ +/* + * Change log: + * 06/29/95 - Modified to handle flow control for writing (Tuyen Nguyen) + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +gbuf_t *releaseData(mp, len) + gbuf_t *mp; + int len; +{ + register gbuf_t *tmp; + register int cnt; + int freeit; + + dPrintf(D_M_ADSP, D_L_TRACE, + ("releaseData: mbuf=0x%x, len=%d\n", (unsigned)mp, len)); + + KERNEL_DEBUG(DBG_ADSP_RCV, 0, mp, len, 0, 0); + + do { + freeit = 1; /* assume we use the whole mblk */ + if ((cnt = gbuf_len(mp)) > len) { + freeit = 0; /* using only part of the mblk */ + cnt = len; + } + gbuf_rinc(mp,cnt); + len -= cnt; + tmp = mp; + mp = gbuf_cont(mp); + if (freeit) { + gbuf_freeb(tmp); + } else + return tmp; /* if we don't use the whole block */ + /* pass back the partial gbuf_t pointer */ + } while (len && mp); + return mp; +} + +/* + * CheckRecvSeq + * + * We just got a non-attention packet. Check the pktNextRecvSeq field + * to see if it acknowledges any of our sent data. + * + * If any data was acked, check to see if we have anything to fill the + * newly opened up remote receive window. Otherwise, if the ACK request + * bit was set, we need to send an Ack Packet + * + * Always called as the result of receiving a packet. Interrupts + * are completely masked when this routine is called. + * + * INPUTS: + * sp stream + * f pointer to ASDP header + * OUTPUTS: + * none + */ +void CheckRecvSeq(sp, f) /* (CCBPtr sp, ADSP_FRAMEPtr f) */ + register CCBPtr sp; + register ADSP_FRAMEPtr f; +{ + int s; + int pktNextRecvSeq; + int sendWdwSeq; + int eom; + int hlen; + register gbuf_t *mp; + + ATDISABLE(s, sp->lock); + if (f->descriptor & ADSP_ACK_REQ_BIT) { /* He wants an Ack */ + sp->sendDataAck = 1; + sp->callSend = 1; + } + + pktNextRecvSeq = netdw(UAL_VALUE(f->pktNextRecvSeq)); /* Local copy */ + + /* + * Make sure the sequence number corresponds to reality -- i.e. for + * unacknowledged data that we have sent + */ + + if (GT(pktNextRecvSeq, sp->maxSendSeq)) /* We've never sent this seq #! */ + goto noack; + + if (GTE(pktNextRecvSeq, sp->timerSeq) && sp->waitingAck) { + /* This acks our Ack Request */ + sp->waitingAck = 0; /* Allow sending more */ + sp->pktSendCnt = 0; /* Reset packet count */ + /* Remove retry timer */ + RemoveTimerElem(&adspGlobal.fastTimers, &sp->RetryTimer); + + if (!sp->resentData) { /* Data sent without retries */ + short diff; /* Signed!! */ + /* All timings done in 6th second base */ + /* The contortions here are to prevent C from promoting + * everything to longs and then using a library routine + * to do the division. As 16-bit words, a DIVU instruction + * is used. + */ + + diff = (((word)(SysTicks() - sp->sendStamp)) / (word)10) - + sp->roundTrip + 1; + + sp->roundTrip += diff >> 3; /* Update average */ + + if (diff < 0) /* Take absolute value */ + diff = -diff; + sp->deviation += (diff - sp->deviation) >> 2; /* Update deviation*/ + + sp->rtmtInterval = sp->roundTrip + + ((short)2 * (short)sp->deviation); + + if (!sp->noXmitFlow && + sp->pktSendMax < 50) /* Bump # of sequential */ + sp->pktSendMax++; /* Packets we'll send */ + + sp->noXmitFlow = 0; + } + else + sp->resentData = 0; + + } /* Acked our data */ + + if (LTE(pktNextRecvSeq, + sp->firstRtmtSeq)) /* Was duplicate ack, so ignore */ + goto noack; + + if (!sp->sData) /* If nothing in send queue, ignore */ + goto noack; + + + do { /* This acks bytes in our buffer */ + if (mp = sp->sbuf_mb) { /* Get ptr to oldest data header */ + sp->sbuf_mb = gbuf_next(mp); /* unlink it from send queue */ + eom = 1; + } else { + mp = sp->csbuf_mb; + sp->csbuf_mb = 0; + eom = 0; + } + + if (mp == 0) { /* shouldn't happen! */ + sp->sData = 0; + goto noack; + } + /* + * Does this ack the entire data block we're now pointing at? + */ + if (LTE((sp->firstRtmtSeq + eom + (hlen = gbuf_msgsize(mp))), + pktNextRecvSeq)) { + + gbuf_freem(mp); + + /* Update seq # of oldest byte in bfr */ + sp->firstRtmtSeq += eom + hlen; + + if ((sp->sbuf_mb == 0) && (sp->csbuf_mb == 0)) { + /* If this was only block, then ... */ + sp->sData = 0; /* ... no data in queue */ + sp->writeFlush = 0; + if (sp->state == sClosing) /* this may allow us to close... */ + CheckOkToClose(sp); + atalk_enablew(sp->gref); + break; + } + } /* whole data block acked */ + else /* Only some of the data was acked */ + { + short acked; + + acked = (pktNextRecvSeq - sp->firstRtmtSeq); + mp = releaseData(mp, acked); + if (eom) { + if (mp) { + gbuf_next(mp) = sp->sbuf_mb; + sp->sbuf_mb = mp; + } + } else + sp->csbuf_mb = mp; + + sp->firstRtmtSeq = pktNextRecvSeq; /* Update seq # oldest byte */ + break; + } + } while (LT(sp->firstRtmtSeq, pktNextRecvSeq)); + + if (sp->sData) /* We've got stuff to send */ + sp->callSend = 1; + +noack: + sendWdwSeq = netw(UAS_VALUE(f->pktRecvWdw)) - 1 + pktNextRecvSeq; + + if (GT(sendWdwSeq, sp->sendWdwSeq)) /* Don't make send window smaller */ + { + sp->callSend = 1; /* His recv wdw opened, so see */ + /* if we can send more data */ + sp->sendWdwSeq = sendWdwSeq; + } + ATENABLE(s, sp->lock); +} + +/* + * RXData + * + * We just got a Data Packet + * See if it came from anybody we know. + * + * Called from ADSP Packet with interrupts masked completely OFF + * *** In MacOSX interrupts do not seem to be off! *** + * + * INPUTS: + * Stream pointer + * gbuf_t pointer + * Pointer to ADSP header, (part of the mblk pointer to by mp) + * Length of header plus data + * OUTPUTS: + * Returns 1 if packet was ignored + */ +int RXData(sp, mp, f, len) /* (CCBPtr sp, ADSP_FRAMEPtr f, word len) */ + CCBPtr sp; + register gbuf_t *mp; + ADSP_FRAMEPtr f; + int len; +{ + int s, offset; + int PktFirstByteSeq; + short cnt; + char eom; + + len -= ADSP_FRAME_LEN; + + /* Does packet have eom bit set? */ + eom = (f->descriptor & ADSP_EOM_BIT) ? 1 : 0; + + dPrintf(D_M_ADSP, D_L_TRACE, + ("RXData: sp=0x%x, mbuf=0x%x, f=0x%x, len=%d, eom=%d\n", + (unsigned)sp, (unsigned)mp, (unsigned)f, len, eom)); + + KERNEL_DEBUG(DBG_ADSP_RCV, 1, sp, mp, len, eom); + + trace_mbufs(D_M_ADSP, " mp", mp); + + PktFirstByteSeq = netdw(UAL_VALUE(f->pktFirstByteSeq)); /* Local copy */ + + ATDISABLE(s, sp->lock); + if (GT(PktFirstByteSeq, sp->recvSeq)) /* missed a packet (out of order) */ + { + if (sp->badSeqCnt++ > sp->badSeqCnt) /* Need to send rexmit advice */ + sp->sendCtl |= B_CTL_RETRANSMIT; + ATENABLE(s, sp->lock); + CheckRecvSeq(sp, f); /* Will set send ACK flag if requested */ + CheckReadQueue(sp); + gbuf_freem(mp); + + KERNEL_DEBUG(DBG_ADSP_RCV, 2, sp, 0, 0, 0); + trace_mbufs(D_M_ADSP, " exRXD m", sp->rbuf_mb); + dPrintf(D_M_ADSP, D_L_TRACE, (" End RXData - missed a packet\n")); + + return 0; + } + + if (LTE(PktFirstByteSeq + len + eom, sp->recvSeq)) { /* duplicate data? */ + ATENABLE(s, sp->lock); + CheckRecvSeq(sp, f); /* Will set send ACK flag if requested */ + CheckReadQueue(sp); + gbuf_freem(mp); + + KERNEL_DEBUG(DBG_ADSP_RCV, 3, sp, 0, 0, 0); + trace_mbufs(D_M_ADSP, " exRXD m", sp->rbuf_mb); + dPrintf(D_M_ADSP, D_L_TRACE, (" End RXData - duplicate data\n")); + + return 0; + } + + sp->badSeqCnt = 0; /* reset out of sequence pckt counter */ + + cnt = sp->recvSeq - PktFirstByteSeq; /* # bytes we've seen already */ + + offset = ((unsigned char *)&f->data[cnt]) - (unsigned char *)gbuf_rptr(mp); + gbuf_rinc(mp,offset); + /* point recv mblk to data (past headers) */ + + len -= cnt; /* # of new data bytes */ + + cnt = len; /* # bytes left to deal with */ + + if (!sp->rData) /* Recv bfr is empty */ + { + sp->rData = 1; /* Not empty any more */ + + if ((sp->rpb)->ioc == mp) { + dPrintf(D_M_ADSP, D_L_TRACE, + ("RXData: (pb->ioc == mp) no stored data\n")); + KERNEL_DEBUG(DBG_ADSP_RCV, 4, sp, sp->rpb, 0, 0); + } + if (eom) + sp->rbuf_mb = mp; + else + sp->crbuf_mb = mp; + } /* Recv queue is empty */ + + /* + * Else, there's already stored data. + */ + else { + gbuf_t *rmp; + /* + * Is this a new "message?" + */ + if (eom) { + if (sp->crbuf_mb) { + gbuf_linkb(sp->crbuf_mb, mp); + mp = sp->crbuf_mb; + sp->crbuf_mb = 0; + } + if (rmp = sp->rbuf_mb) { + /* + * Add it to the end + */ + while(gbuf_next(rmp)) + rmp = gbuf_next(rmp); + gbuf_next(rmp) = mp; + } else + sp->rbuf_mb = mp; + } else if (sp->crbuf_mb) + gbuf_linkb(sp->crbuf_mb, mp); + else + sp->crbuf_mb = mp; + } + sp->recvSeq += (cnt + eom); /* We've got these bytes */ + + /* %%% We really should call check recv seq first, but let's + * continue to do it down here. We really want to service the + * received packet first, and maybe reenable scc ints before + * doing anything that might take a long while + */ + + ATENABLE(s, sp->lock); + CheckRecvSeq(sp, f); /* Will set send ACK flag if requested */ + CheckReadQueue(sp); + KERNEL_DEBUG(DBG_ADSP_RCV, 5, sp, sp->rbuf_mb, 0, 0); + trace_mbufs(D_M_ADSP, " eRXD m", sp->rbuf_mb); + return 0; +} /* RXData */ diff --git a/bsd/netat/adsp_Status.c b/bsd/netat/adsp_Status.c new file mode 100644 index 000000000..46880508b --- /dev/null +++ b/bsd/netat/adsp_Status.c @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * dspStatus.c + * + * From Mike Shoemaker v01.04 06/15/90 mbs + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * calcSendFree + * + * INPUTS: + * sp ADSP Stream + * OUTPUTS: + * # of bytes avail in local send queue + */ +int CalcSendQFree(sp) /* (CCBPtr sp) */ + CCBPtr sp; +{ + int bytes; + + bytes = calcSendQ(sp); + bytes = sp->sbuflen - bytes; + + if (bytes < 0) + return 0; + return bytes; +} + +calcSendQ(sp) + CCBPtr sp; +{ + register gbuf_t *mp; + int bytes = 0; + + if (sp->sData) { /* There is data in buffer */ + if (mp = sp->sbuf_mb) { + do { + bytes += gbuf_msgsize(mp); + mp = gbuf_next(mp); + } while (mp); + } + if (mp = sp->csbuf_mb) + bytes += gbuf_msgsize(mp); + } + return bytes; +} + +/* + * dspStatus + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * + * OUTPUTS: + * <-- statusCCB Pointer to the connection control block + * <-- sendQPending bytes waiting to be sent or acknowledged + * <-- sendQFree available buffer in bytes of send queue + * <-- recvQPending bytes waiting to be read from queue + * <-- recvQFree available buffer in bytes of receive queue + * + * ERRORS: + * errRefNum bad connection refnum + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * +*/ +int adspStatus(sp, pb) /* (DSPPBPtr pb) */ + CCBPtr sp; + register struct adspcmd *pb; +{ + short err; + short bytes; + int s; + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + pb->u.statusParams.ccbPtr = (TPCCB)sp; + ATDISABLE(s, sp->lock); + + /* + * pending bytes in send queue + */ + if (sp->sData) + bytes = calcSendQ(sp); + else + bytes = 0; + pb->u.statusParams.sendQPending = bytes; + + /* available buffer space in send queue */ + pb->u.statusParams.sendQFree = CalcSendQFree(sp); + + /* + * pending bytes in recv queue + */ + if (sp->rData) + bytes = calcRecvQ(sp); + else + bytes = 0; + pb->u.statusParams.recvQPending = bytes; + + /* available buffer space in receive queue */ + pb->u.statusParams.recvQFree = CalcRecvWdw(sp); + + ATENABLE(s, sp->lock); + pb->ioResult = 0; + adspioc_ack(0, pb->ioc, pb->gref); + return 0; + +} diff --git a/bsd/netat/adsp_Timer.c b/bsd/netat/adsp_Timer.c new file mode 100644 index 000000000..fe092e0b1 --- /dev/null +++ b/bsd/netat/adsp_Timer.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* + * Timer.c + * + * From v01.12 06/22/90 mbs + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * TrashSession + * + * Cleanly abort a session that might be open. Called if probe timer expires, + * or from AppleTalk event handler (close or network gone away) + * + * Only call if the session is active (I.e. not for closed or listeners) + * + * INPUTS: + * session pointer + * OUTPUTS: + * none + */ +void TrashSession(sp) /* (CCBPtr sp) */ + CCBPtr sp; +{ + int s; + + ATDISABLE(s, sp->lock); + sp->userFlags |= eTearDown; + sp->removing = 1; + sp->state = sClosed; + ATENABLE(s, sp->lock); + + DoClose(sp, errAborted, 1); +} + + +/* + * DoTimerElem + * + * INPUTS: + * + * OUTPUTS: + * + */ +void DoTimerElem(t) /* (TimerElemPtr t) */ + TimerElemPtr t; +{ + CCBPtr sp; + int s; + + sp = (CCBPtr)((Ptr)t - t->type); /* Recover stream pointer for this guy */ + ATDISABLE(s, sp->lock); + + if (t->type == kFlushTimerType) { /* flush write data time just fired */ + if (sp->sData) { /* If there's any data, flush it. */ + sp->writeFlush = 1; + goto send; + } + } else if (t->type == kRetryTimerType) { + if (sp->waitingAck) { + + sp->waitingAck = 0; + sp->sendSeq = sp->firstRtmtSeq; + sp->pktSendCnt = 0; + sp->resentData = 1; /* Had to resend data */ + sp->noXmitFlow = 1; /* Don't incr. max packets. */ + + if ((sp->pktSendMax /= 2) == 0) /* Back off on max # packets + * sent */ + sp->pktSendMax = 1; + + if ((sp->roundTrip *= 2) > sp->probeInterval) + sp->roundTrip = sp->probeInterval; + sp->rtmtInterval = sp->roundTrip + ((short)2 * + (short)sp->deviation); + goto send; + } + } else if (t->type == kAttnTimerType) { + if (sp->sapb) { /* Unacknowledged attn pkt */ + sp->sendAttnData = 1; + goto send; + } + } else if (t->type == kResetTimerType) { + if (sp->frpb) { /* Unacknowledged forward reset */ + sp->sendCtl |= B_CTL_FRESET; + goto send; + } + } else if (t->type == kProbeTimerType) { + if (sp->state == sOpen || sp->state == sClosing) { + if (--sp->probeCntr == 0) { /* Connection died */ + ATENABLE(s, sp->lock); + TrashSession(sp); + return; + } else { + InsertTimerElem(&adspGlobal.slowTimers, &sp->ProbeTimer, + sp->probeInterval); + sp->sendCtl |= B_CTL_PROBE; + goto send; + } + } else if (sp->state == sOpening) { + if ((sp->openState == O_STATE_OPENWAIT) || + (sp->openState == O_STATE_ESTABLISHED)) + { + if (--sp->openRetrys == 0) { /* Oops, didn't open */ + sp->state = sClosed; + ATENABLE(s, sp->lock); + DoClose(sp, errOpening, 1); + return; + } /* open failed */ + else /* Send packet again */ + { + sp->sendCtl |= (sp->openState == O_STATE_OPENWAIT) ? + B_CTL_OREQ : B_CTL_OREQACK; + goto send; + } + } /* we're opening */ + } + } + + else { + dPrintf(D_M_ADSP, D_L_ERROR, ("DoTimerElem:Unknown timer type!\n")); + } + + ATENABLE(s, sp->lock); + return; + +send: + ATENABLE(s, sp->lock); + CheckSend(sp); +} + +void TimerTick_funnel() +{ + thread_funnel_set(network_flock, TRUE); + TimerTick(); + thread_funnel_set(network_flock, FALSE); +} + +static StopTimer; + +/* + * TimerTick + * + * Called 6 times per second + * INPUTS: + * + * OUTPUTS: + * + */ +void TimerTick() /* (void) */ +{ + + if (StopTimer) { + return; + } + TimerQueueTick(&adspGlobal.slowTimers); + TimerQueueTick(&adspGlobal.fastTimers); + timeout(TimerTick_funnel, (caddr_t)0, HZ/6); +} + +void TimerStop() +{ + StopTimer = 1; + untimeout(TimerTick_funnel, (caddr_t) 0); +} diff --git a/bsd/netat/adsp_TimerElem.c b/bsd/netat/adsp_TimerElem.c new file mode 100644 index 000000000..d7f61ac14 --- /dev/null +++ b/bsd/netat/adsp_TimerElem.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * TimerElem.c + * + * From v01.00 04/15/90 mbs + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +atlock_t adsptmr_lock; + +extern void DoTimerElem(); /* (TimerElemPtr t); + * External routine called to + * process each one. */ + +/* + * InsertTimerElem + * + * INPUTS: + * qhead Address of ptr to first item in list + * t timer element to link in + * vbl timer value to use + * OUTPUTS: + * void + */ +void InsertTimerElem(qhead, t, val) + /* (TimerElemPtr *qhead, TimerElemPtr t, word val) */ + TimerElemPtr *qhead, t; + int val; +{ + TimerElemPtr p; /* parent pointer */ + TimerElemPtr n; /* current */ + int s; + + ATDISABLE(s, adsptmr_lock); + + if (t->onQ) { + /* + * someone else beat us to the punch and put this + * element back on the queue, just return in this case + */ + ATENABLE(s, adsptmr_lock); + return; + } + p = (TimerElemPtr)qhead; + + while (n = p->link) { + if (val <= n->timer) /* Do we go in front of this? */ + { + n->timer -= val; /* Yes, adjust his delta */ + break; /* and go link us in */ + } + val -= n->timer; /* No, subtract off delta from our value */ + p = n; + } /* while */ + + /* It must go after item pointed to by p and in front of item + * pointed to by n */ + + t->onQ = 1; /* we're linked in now */ + p->link = t; /* parent points to us */ + t->timer = val; /* this is our value */ + t->link = n; /* we point to n */ + + ATENABLE(s, adsptmr_lock); +} + + +/* + * RemoveTimerElem + * + * INPUTS: + * qhead Address of ptr to first item in list + * t timer element to link in + * OUTPUTS: + * void + */ +void RemoveTimerElem(qhead, t) /* (TimerElemPtr *qhead, TimerElemPtr t) */ + TimerElemPtr *qhead, t; +{ + TimerElemPtr p; /* parent pointer */ + TimerElemPtr n; /* current */ + int s; + + ATDISABLE(s, adsptmr_lock); + + if ( !t->onQ) { + /* + * someone else beat us to the punch and took this + * element off of the queue, just return in this case + */ + ATENABLE(s, adsptmr_lock); + return; + } + p = (TimerElemPtr)qhead; + + while (n = p->link) /* Get next item in queue */ + { + if (n == t) /* Is it us? */ + { + if (p->link = n->link) /* Link our parent to our child */ + { + n->link->timer += t->timer; /* and update child's timer */ + } + n->onQ = 0; /* Not on linked list anymore */ + break; + } + p = n; + } /* while */ + + ATENABLE(s, adsptmr_lock); +} + + +/* + * TimerQueueTick + * + * INPUTS: + * qhead Address of ptr to first item in list + * + * OUTPUTS: + * void + */ +void TimerQueueTick(qhead) /* (TimerElemPtr *qhead) */ + TimerElemPtr *qhead; +{ + TimerElemPtr p; /* parent pointer */ + TimerElemPtr n; /* current */ + int s; + + ATDISABLE(s, adsptmr_lock); + + p = (TimerElemPtr)qhead; + if (p->link) /* Is anything on queue? */ + p->link->timer--; /* Yes, decrement by a tick */ + else + goto done; /* No, we're outta' here */ + + while ((n = p->link) && + (n->timer == 0)) /* Next guy needs to be serviced */ + { + p->link = n->link; /* Unlink us */ + n->onQ = 0; + + ATENABLE(s, adsptmr_lock); + DoTimerElem(n); + ATDISABLE(s, adsptmr_lock); + + p = (TimerElemPtr)qhead; + } /* while */ + +done: + ATENABLE(s, adsptmr_lock); +} diff --git a/bsd/netat/adsp_Write.c b/bsd/netat/adsp_Write.c new file mode 100644 index 000000000..9a170cf50 --- /dev/null +++ b/bsd/netat/adsp_Write.c @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* dspWrite.c + * From Mike Shoemaker v01.13 06/21/90 mbs for MacOS + */ +/* + * Change log: + * 06/29/95 - Modified to handle flow control for writing (Tuyen Nguyen) + * 09/07/95 - Modified for performance (Tuyen Nguyen) + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +void completepb(); + +/* + * FillSendQueue + * + * INPUTS: + * sp stream + * OUTPUTS: + * none + */ +int FillSendQueue(sp, pb) /* (CCBPtr sp) */ + register CCBPtr sp; + register struct adspcmd *pb; /* The write PB we're playing with */ +{ + gbuf_t *mb, *nmb; + int eom; /* True if should set eom in header */ + int cnt; /* # of bytes in this write */ + int err = 0; + int s; + + cnt = pb->u.ioParams.reqCount - pb->u.ioParams.actCount; + eom = pb->u.ioParams.eom ? F_EOM : 0; + + if (cnt == 0 && eom == 0) /* Nothing to do here, complete it */ + goto unlink; + + /* The 1st mbuf in the pb->mp chain (mb) is the adspcmd structure. + The 2nd mbuf (nmb) will be the beginning of the data. */ + mb = pb->mp; + nmb = gbuf_cont(mb); + if (gbuf_len(mb) > sizeof(struct adspcmd)) { + if ((nmb = gbuf_dupb(mb)) == 0) { + gbuf_wset(mb,sizeof(struct adspcmd)); + err = errDSPQueueSize; + goto unlink; + } + gbuf_wset(mb,sizeof(struct adspcmd)); + gbuf_rinc(nmb,sizeof(struct adspcmd)); + gbuf_cont(nmb) = gbuf_cont(mb); + } else if (nmb == 0) { + if ((nmb = gbuf_alloc(1, PRI_LO)) == 0) { + err = errENOBUFS; + goto unlink; + } + } + gbuf_cont(mb) = 0; + + ATDISABLE(s, sp->lock); + sp->sData = 1; /* note that there is data to send */ + if ((mb = sp->csbuf_mb)) { /* add to the current message */ + gbuf_linkb(mb, nmb); + } else + sp->csbuf_mb = nmb; /* mark the buffer we are currently filling */ + if (eom) { + if ((mb = sp->sbuf_mb)) { + while (gbuf_next(mb)) + mb = gbuf_next(mb); + gbuf_next(mb) = sp->csbuf_mb; /* add the current item */ + } else + sp->sbuf_mb = sp->csbuf_mb; + sp->csbuf_mb = 0; /* if its done, no current buffer */ + } + pb->u.ioParams.actCount += cnt; /* Update count field in param blk */ + ATENABLE(s, sp->lock); + + if (pb->u.ioParams.actCount == pb->u.ioParams.reqCount) { + /* Write is complete */ +unlink: + if (pb->u.ioParams.flush) /* flush the send Q? */ + sp->writeFlush = 1; + + pb->ioResult = err; + if (err) + atalk_notify(sp->gref, EIO); + gbuf_freem(pb->mp); + } + + return 0; +} /* FillSendQueue */ + +/* + * dspWrite + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * --> reqCount requested number of bytes to write + * --> dataPtr pointer to buffer for reading bytes into + * --> eom one if end-of-message, zero otherwise + * + * OUTPUTS: + * <-- actCount actual number of bytes written + * + * ERRORS: + * errRefNum bad connection refnum + * errState connection is not open + * errAborted request aborted by Remove or Close call + */ +int adspWrite(sp, pb) /* (DSPPBPtr pb) */ + CCBPtr sp; + struct adspcmd *pb; +{ + int s; + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; /* no stream, so drop the message */ + } + + ATDISABLE(s, sp->lock); + if (sp->state != sOpen) { /* Not allowed */ + pb->ioResult = errState; + ATENABLE(s, sp->lock); + atalk_notify(sp->gref, ENOTCONN); + gbuf_freem(pb->mp); + return 0; + } + + pb->u.ioParams.actCount = 0; /* Set # of bytes so far to zero */ + ATENABLE(s, sp->lock); + + FillSendQueue(sp, pb); /* Copy from write param block to send queue */ + + CheckSend(sp); /* See if we should send anything */ + return 0; +} + +#ifdef notdef +int adsp_check = 1; + +CheckQueue(sp) + CCBPtr sp; +{ + register gbuf_t *mp, *tmp; + unsigned char current; + int current_valid = 0; + + if (adsp_check == 0) + return; + if (mp = sp->sbuf_mb) { + current = *mp->b_rptr; + current_valid = 1; + while (mp) { + tmp = mp; + while (tmp) { + current = CheckData(tmp->b_rptr, tmp->b_wptr - tmp->b_rptr, + current); + tmp = tmp->b_cont; + } + mp = mp->b_next; + } + } + if (mp = sp->csbuf_mb) { + if (current_valid == 0) + current = *mp->b_rptr; + tmp = mp; + while (tmp) { + current = CheckData(tmp->b_rptr, tmp->b_wptr - tmp->b_rptr, + current); + tmp = tmp->b_cont; + } + } +} + + +int adsp_bad_block_count; +char *adsp_bad_block; + +CheckData(block, size, current) + char *block; + int size; + u_char current; +{ + register int anError = 0; + register int i; + + for (i = 0; i < size; i++) { + if ((block[i] & 0xff) != (current & 0xff)) { + if (!anError) { + adsp_bad_block = block; + } + anError++; + } + current++; + } + + if (anError) { + adsp_bad_block_count++; + } + return current; +} +#endif diff --git a/bsd/netat/adsp_attention.c b/bsd/netat/adsp_attention.c new file mode 100644 index 000000000..18ce61f65 --- /dev/null +++ b/bsd/netat/adsp_attention.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * dspAttention.c + * + * From Mike Shoemaker v01.05 03/16/90 mbs + */ +/* + * Change log: + * 06/29/95 - Modified to handle flow control for writing (Tuyen Nguyen) + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * dspAttention + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * --> attnCode client attention code + * --> attnSize size in bytes of attention data + * --> attnData pointer to attention data + * --> attnInterval attention retransmit interval + * (ignored by ADSP 1.5 & up) + * + * OUTPUTS: + * none + * + * ERRORS: + * errRefNum bad connection refnum + * errState connection is not open + * errAttention attention message too long + * errAborted request aborted by Remove or Close call + */ +int adspAttention(sp, pb) /* (DSPPBPtr pb) */ + register struct adspcmd *pb; + register CCBPtr sp; +{ + int s; + register gbuf_t *mp, *nmp; + unsigned char uerr; + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + if (sp->state != sOpen) { /* If we're not open, tell user to go away */ + pb->ioResult = errState; + uerr = ENOTCONN; +l_err: + atalk_notify(sp->gref, uerr); + gbuf_freem(pb->mp); + return 0; + } + + if (pb->u.attnParams.attnSize > attnBufSize) /* If data too big, bye-bye */ + { + pb->ioResult = errAttention; + uerr = ERANGE; + goto l_err; + } + + /* The 1st mbuf in the pb->mp chain (mp) is the adspcmd structure. + The 2nd mbuf (nmp) will be the beginning of the data. */ + mp = pb->mp; + if (pb->u.attnParams.attnSize) { + nmp = gbuf_cont(mp); + if (gbuf_len(mp) > sizeof(struct adspcmd)) { + if ((nmp = gbuf_dupb(mp)) == 0) { + gbuf_wset(mp, sizeof(struct adspcmd)); + uerr = ENOBUFS; + goto l_err; + } + gbuf_wset(mp, sizeof(struct adspcmd)); + gbuf_rinc(nmp, sizeof(struct adspcmd)); + gbuf_cont(nmp) = gbuf_cont(mp); + gbuf_cont(mp) = nmp; + } + } + pb->ioDirection = 1; /* outgoing attention data */ + ATDISABLE(s, sp->lock); + if (sp->sapb) { /* Pending attentions already? */ + qAddToEnd(&sp->sapb, pb); /* Just add to end of queue */ + ATENABLE(s, sp->lock); + } else { + sp->sendAttnData = 1; /* Start off this attention */ + pb->qLink = 0; + sp->sapb = pb; + ATENABLE(s, sp->lock); + CheckSend(sp); + } + pb->ioResult = 1; /* indicate that the IO is not complete */ + return 0; +} diff --git a/bsd/netat/adsp_internal.h b/bsd/netat/adsp_internal.h new file mode 100644 index 000000000..9b7c4e83c --- /dev/null +++ b/bsd/netat/adsp_internal.h @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _NETAT_ADSP_INTERNAL_H_ +#define _NETAT_ADSP_INTERNAL_H_ + +#ifdef KERNEL + +/* from h/adsp_portab.h */ + +/* TypeDefs for the basic data bytes. */ + +typedef unsigned char byte, *bytePtr; + +#ifdef NOT_USED +typedef char int8; +typedef short int16; +typedef int int32; +#endif + +typedef unsigned char boolean; + +typedef unsigned short word; + +typedef unsigned int dword; + +#define BYTE_AT(x) (*((byte PTR)(x))) +#define WORD_AT(x) (*((word PTR)(x))) +#define DWORD_AT(x) (*((dword PTR)(x))) + +#define high(x) ((byte)((x) >> 8)) +#define low(x) ((byte)(x)) +#define hlword(h, l) (((byte)(l)) | (((byte)(h)) << 8)) + +#define offsetof(typ,id) (size_t)&(((typ*)0)->id) + +/* + * On a Mac, there is no need to byte-swap data on the network, so + * these macros do nothing + */ + +#define netw(x) x +#define netdw(x) x + +typedef struct +{ + at_net network; /* network number */ + byte nodeid; /* node number */ + byte socket; /* socket number */ +} AddrBlk, *AddrBlkPtr; + +typedef union +{ + at_inet_t a; +} AddrUnion, *AddrUnionPtr; + +/* End Portab.h */ + +/* from h/adsp_internal.h */ + +#undef T_IDLE + +/* +* Default Behavior for ADSP +*/ +#define ocIntervalDefault 6 +#define ocMaximumDefault 10 +#define probeIntervalDefault 180 + +/* +* MACROS for comparing 32-bit sequence numbers +*/ +#define GT(x,y) (((long)(x-y)) > (long) 0) +#define LT(x,y) (((long)(x-y)) < (long) 0) +#define GTE(x,y) (((long)(x-y)) >= (long) 0) +#define LTE(x,y) (((long)(x-y)) <= (long) 0) +#define BETWEEN(x,y,z) (LTE(x,y) && LTE(y,z)) + +/* + * Use the kernel tick counter for SysTicks. + */ + +#define SysTicks() lbolt + +/* + * Timer element used for handling timings + */ +typedef struct timerelem { + struct timerelem *link; + short timer; + char type; + unsigned onQ:1; /* Bit-fields are faster than booleans */ +} TimerElem; + +typedef TimerElem *TimerElemPtr; + +/* + * For AppleTalk Phase 2 event queue + */ +typedef struct { + Ptr qLink; + unsigned short qType; + ProcPtr callAddr; +} LAPEventElem; + +typedef LAPEventElem *LAPEventElemPtr; + +/* + * The Event types we're passed when an AppleTalk transition occurs + */ +#define AOpenTransition 0 +#define ACloseTransition 2 +#define ANetworkTransition 5 + +/* + * The element we're passed when a NetworkTransaction event occurs + */ +typedef struct TNetworkTransition { + Ptr private; /* pointer used internally by NetShare */ + ProcPtr netValidProc; /* pointer to the network valid procedure */ +} TNetworkTransition, *TPNetworkTransition; + +typedef long (*NetworkTransitionProcPtr)(); + /* (TPNetworkTransition nettrans, + unsigned long thenet); */ +/* + * This is the connection control block + */ +typedef struct ccb { + /*---These fields may not change order or size-----------*/ + + struct ccb *ccbLink; /* link to next ccb */ + unsigned short state; /* state of the connection end */ + unsigned char userFlags; /* flags for unsolicited connection events */ + unsigned char localSocket; /* socket number of this connection end */ + AddrUnion remoteAddress; /* internet address of remote end */ + unsigned short attnCode; /* attention code received */ + unsigned short attnSize; /* size of received attention data */ + unsigned char *attnPtr; /* ptr to received attention data */ + unsigned short recvQPending; /* # bytes in receive queue %%% */ + /*------------------------------------------------------ */ + + struct adspcmd *opb; /* Outstanding open/close/remove/listens */ + struct adspcmd *spb; /* Outstanding Sends */ + struct adspcmd *sapb; /* Outstanding Send Attentions */ + struct adspcmd *frpb; /* Outstanding Forward Resets */ + struct adspcmd *rpb; /* Outstanding Read Requests */ + + struct ccb *otccbLink; /* link to next ccb */ + int pid; /* Process ID for CCB owner */ + + unsigned short remCID; /* Remote Connection ID */ + unsigned short locCID; /* Local Connection ID */ + int sendSeq; /* Seq number of next char to send to remote */ + int firstRtmtSeq; /* oldest seq # in local send queue */ + int sendWdwSeq; /* Seq # of last char remote has bfr for */ + int recvSeq; /* Seq of # of next char expected from rmte */ + int recvWdw; /* # of bytes local end has buffer space for */ + int attnSendSeq; /* Seq # of next attn pkt to send to remote */ + int attnRecvSeq; /* Seq # of next packet local end expects */ + int maxSendSeq; /* Highest seq # we ever sent on connection */ + + /* These must be in the first 255 bytes of the CCB */ + TimerElem ProbeTimer; /* Timer element for probes (and open) */ + TimerElem FlushTimer; /* Timer element for flushing data */ + TimerElem RetryTimer; /* Timer element for retransmissions */ + TimerElem AttnTimer; /* Timer element for attention packets */ + TimerElem ResetTimer; /* Timer element for forward resets */ + + short openInterval; /* Interval between open connection packets */ + short probeInterval; /* Interval between probes */ + short sendInterval; /* Interval before automatic flush */ + short rtmtInterval; /* Rexmit interval (dynamically determined) */ + + short sendCtl; /* Send control message bits */ + short sendBlocking; /* Flush unsent data if > than sendBlocking */ + short openRetrys; /* # of retrys for Connect & Accept */ + short rbuflen; /* Total size of receive buffer */ + short sbuflen; /* Total size of receive buffer */ + char pad; + char lockFlag; + char badSeqMax; /* retransmit advice send threshold */ + char badSeqCnt; /* # of of out-of-order packets received */ + char useCheckSum; /* true to use DDP checksums */ + char openState; /* Used for opening a connection (see below) */ + + gbuf_t *rbuf_mb; /* message block for the recv buffer */ + gbuf_t *crbuf_mb; + gbuf_t *sbuf_mb; /* message block for the send buffer */ + gbuf_t *csbuf_mb; + gbuf_t *attn_mb; /* message block for the attention buffer */ + gbuf_t *deferred_mb; /* message block deferred for later processing */ + +#ifdef NOT_USED + char ioDone; /* flag for when the adsp header is busy */ +#endif + char probeCntr; /* # of probes we can miss (counts down) */ + char pktSendMax; /* Max # of packets to send without an ack */ + char pktSendCnt; /* # of packets sent so far */ + + int sendStamp; /* Time of last ackRequest */ + int timerSeq; /* Seq # of char corresponding to above time stamp */ + short roundTrip; /* Average Round-Trip time (in 6ths of a second) */ + short deviation; /* deviation from roundTrip time */ + + unsigned sData:1; /* There's data in the send queue */ + unsigned waitingAck:1; /* We're waiting for an ack packet */ + unsigned rData:1; /* There's data in the receive queue */ + unsigned resentData:1; /* True when we resend data due to timeout */ + unsigned sendDataAck:1; /* True if he requested an ack */ + unsigned sendAttnAck:1; /* Must send attn acknowlege */ + unsigned sendAttnData:1; /* Must send attn data */ + unsigned callSend:1; /* Must call CheckSend() */ + unsigned rbufFull:1; /* We've closed our receive window. */ + unsigned noXmitFlow:1; /* True stops incrementing # of xmit + * packets to send in a row after receiving + * an ack packet. */ + unsigned secureCCB:1; /* True if this is a secure connection */ + unsigned removing:1; /* There is a dspRemove pending */ + unsigned writeFlush:1; /* Flush send queue even if # bytes to + * send is less than send blocking. */ + unsigned delay:1; /* do not complete commands until user + * *** NO LONGER USED IN KERNEL *** */ + ADSP_FRAME f; /* Used to send every packet */ + ADSP_OPEN_DATA of; /* Holds the data for the open exchange */ + gref_t *gref; /* The queue associated with the CCB */ + gbuf_t *sp_mp; + atlock_t lock; + atlock_t lockClose; + atlock_t lockRemove; +} CCB, *CCBPtr; + + +/* + * Change order and die !!! --- See the receive open packet code + */ +#define O_STATE_NOTHING 0 /* Not opening */ +#define O_STATE_LISTEN 1 /* Listening for open request */ +#define O_STATE_OPENWAIT 2 /* Sent Req, waiting for Ack to open + * request */ +#define O_STATE_ESTABLISHED 3 /* Got Req, send Req+Ack,waiting Ack */ +#define O_STATE_OPEN 4 /* Connection is open */ + +/* +* These bits are used in the sendCtl field to indicate what needs to be sent +*/ +#define B_CTL_PROBE 0x0001 +#define B_CTL_OREQ 0x0002 +#define B_CTL_OACK 0x0004 +#define B_CTL_OREQACK 0x0008 +#define B_CTL_ODENY 0x0010 +#define B_CTL_CLOSE 0x0020 +#define B_CTL_FRESET 0x0040 +#define B_CTL_FRESETACK 0x0080 +#define B_CTL_RETRANSMIT 0x0100 + + +#define kProbeTimerType offsetof(CCB, ProbeTimer) +#define kFlushTimerType offsetof(CCB, FlushTimer) +#define kRetryTimerType offsetof(CCB, RetryTimer) +#define kAttnTimerType offsetof(CCB, AttnTimer) +#define kResetTimerType offsetof(CCB, ResetTimer) + +/* + * Used to manage the send receive queue + */ +typedef struct { + short len; /* # of bytes in this fragment */ + char flags; /* See #define's below */ + char data[1]; +} HDR, *HDRPtr; + +#define HDR_LEN 3 /* Yes, I know it really is 4 bytes long... */ + +#define F_GAP 0x03 +#define F_EOM 0x04 +#define F_WRAP 0x08 +#define F_VALID 0x10 +#define F_ENCRYPTED 0x20 /* %%% Needed ??? */ +#define F_LAST 0x40 /* This is last block in buffer */ + + +/* %%% Are these two used anymore? */ +#define sbufPtr(y) (&sp->sbuf[((y) < sp->sbuflen) ? (y) : ((y) - sp->sbuflen)]) +#define rbufPtr(y) (&sp->rbuf[((y) < sp->rbuflen) ? (y) : ((y) - sp->rbuflen)]) + +/* End Internal.h */ + +/* fron h/adsp_supp.h */ + +void CallUserRoutine(); /* (CCB FPTR sp); */ + + +/* + * Add queue element to end of queue. Pass Address of ptr to + * 1st element of queue + */ +int qAddToEnd(); /* (void FPTR FPTR qhead, void FPTR qelem); */ + +/* + * Hunt down a linked list of queue elements looking for an element with + * 'data' at 'offset' bytes into the queue element. + */ +void *qfind_b(); /* (void *qhead, word offset, word data); */ +void *qfind_w(); /* (void *qhead, word offset, word data); */ +void *qfind_p(); /* (void *qhead, word offset, void *ptr); */ +void *qfind_o(); /* (void *qhead, word offset, void *ptr); */ +void *qfind_m(); /* (void *qhead, void *match, + ProcPtr compare_fnx); */ + + +/* + * Routines to handle sorted timer queues + */ +void InsertTimerElem(); /* (TimerElemPtr *qhead, TimerElemPtr t, + word val); */ +void RemoveTimerElem(); /* (TimerElemPtr *qhead, TimerElemPtr t); */ +void TimerQueueTick(); /* (TimerElemPtr *qhead);*/ + +/* from h/adsp_global.h */ + +typedef struct { + void *ccbList; /* Ptr to list of connection control blocks */ + + TimerElemPtr slowTimers; /* The probe timer list */ + TimerElemPtr fastTimers; /* The fast timer list */ + + unsigned short lastCID; /* Last connection ID assigned */ + char inTimer; /* We're inside timer routine */ +} GLOBAL; + +extern GLOBAL adspGlobal; + +/* Address of ptr to list of ccb's */ +#define AT_ADSP_STREAMS ((CCB **)&(adspGlobal.ccbList)) + +#endif /* KERNEL */ + +#endif /* _NETAT_ADSP_INTERNAL_H_ */ diff --git a/bsd/netat/adsp_misc.c b/bsd/netat/adsp_misc.c new file mode 100644 index 000000000..8ee925709 --- /dev/null +++ b/bsd/netat/adsp_misc.c @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * These function replace the Mk68 assembly routines found in qAddToEnd.s and + * q????.s + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ +extern atlock_t adspgen_lock; + + +struct qlink { + struct qlink *qlinkp; +}; + +/* ---------------------------------------------------------------------- + * void qAddToEnd(void *qhead, void *qelem) + * + * INPUTS: + * Ptr to ptr to 1st item in queue + * Ptr to item to add to end of queue + * OUTPUTS: + * none + * + * Assumptions: The link field is the FIRST field of the qelem structure. + * ---------------------------------------------------------------------- + */ +int qAddToEnd(qhead, qelem) + struct qlink **qhead; + struct qlink *qelem; +{ + /* define our own type to access the next field. NOTE THAT THE "NEXT" + * FIELD IS ASSUMED TO BE THE FIRST FIELD OF THE STRUCTURE + */ + + register struct qlink *q; + + /* Scan the linked list to the end and update the previous + * element next field. (do that protocted). + */ + + q = *qhead; + if (q) { + while (q->qlinkp) { + /* are we about to link to ourself */ + if (q == qelem) + goto breakit; + q = q->qlinkp; + } + q->qlinkp = qelem; + } + else { + *qhead = qelem; + } + qelem->qlinkp = (struct qlink *) 0; +breakit: +#ifdef NOTDEF + DPRINTF("%s: qhead=%x added elem=%x\n","qAddToEnd", qhead, qelem); +#endif + return 0; +} + + + +/* ---------------------------------------------------------------------- + * qfind_m + * void* qfind_m(void *qhead, void NPTR match, ProcPtr compare_fnx) + * + * Hunt down a linked list of queue elements calling the compare + * function on each item. When the compare function returns true, + * return ptr to the queue element. + * + * + * INPUTS: + * qhead Address of ptr to first item in queue + * match + * compare_fnx + * OUTPUTS: + * D0 & A0 Ptr to queue element or NIL + * REGISTERS: + * D0,D1,A0,A1 + * ---------------------------------------------------------------------- + */ +void* qfind_m(qhead, match, compare_fnx) + CCBPtr qhead; + void *match; + ProcPtr compare_fnx; +{ + int s; + CCBPtr queue_item = qhead; + + ATDISABLE(s, adspgen_lock); + while (queue_item) { + if ((*compare_fnx)(queue_item,match)) + break; + + queue_item = queue_item->ccbLink; + } + ATENABLE(s, adspgen_lock); + + return (queue_item); +} diff --git a/bsd/netat/adsp_reset.c b/bsd/netat/adsp_reset.c new file mode 100644 index 000000000..bcb738907 --- /dev/null +++ b/bsd/netat/adsp_reset.c @@ -0,0 +1,243 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Reset.c + * + * From v01.15 07/11/90 mbs + */ +/* + * Change log: + * 06/29/95 - Modified to handle flow control for writing (Tuyen Nguyen) + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * RXFReset + * + * We just got a Forward Reset Packet. + * + * Called with interrupts OFF + * + * INPUTS: + * stream pointer + * Pointer to ADSP header, + * OUTPUTS: + * Returns 1 if packet was ignored + */ +int RXFReset(sp, f) /* (CCBPtr sp, ADSP_FRAMEPtr f) */ + CCBPtr sp; + ADSP_FRAMEPtr f; +{ + unsigned int pktFirstByteSeq; + unsigned int hi; + register gbuf_t *mp; + register struct adspcmd *pb; + int s; + + ATDISABLE(s, sp->lock); + pktFirstByteSeq = netdw(UAL_VALUE(f->pktFirstByteSeq)); + + hi = sp->recvSeq + CalcRecvWdw(sp); + + /* + * Must do this with interrupts OFF + */ + if (BETWEEN(sp->recvSeq, pktFirstByteSeq, hi)) /* Is this acceptable? */ + { + sp->recvSeq = pktFirstByteSeq; + while (mp = sp->rbuf_mb) { /* clear the receive queue */ + sp->rbuf_mb = gbuf_next(mp); + gbuf_freem(mp); + } + if (sp->crbuf_mb) { + gbuf_freem(sp->crbuf_mb); + sp->crbuf_mb = 0; + } + sp->rData = 0; + sp->rbufFull = 0; + sp->userFlags |= eFwdReset; /* Set forward reset received Flag */ + + mp = gbuf_alloc(sizeof(struct adspcmd), PRI_HI); + pb = (struct adspcmd *)gbuf_rptr(mp); + gbuf_winc(mp,sizeof(struct adspcmd)); + pb->ioc = 0; + pb->mp = mp; + + pb->csCode = dspReset; + pb->ioResult = 0; + completepb(sp, pb); + sp->userFlags &= ~eFwdReset; + } + + if (LTE(pktFirstByteSeq, hi)) { + sp->sendCtl |= B_CTL_FRESETACK; /* Ack it if it's OK, or a duplicate */ + sp->callSend = 1; + } + + ATENABLE(s, sp->lock); + return 0; +} + + +/* + * RXFResetAck + * + * We just got a Forward Reset Acknowledgement packet + * + * Called with interrupts OFF + * + * INPUTS: + * stream pointer + * Pointer to ADSP header, + * OUTPUTS: + * Returns 1 if packet was ignored + */ +int RXFResetAck(sp, f) /* (CCBPtr sp, ADSP_FRAMEPtr f) */ + CCBPtr sp; + ADSP_FRAMEPtr f; +{ + unsigned int PktNextRecvSeq; + int s; + + if (sp->frpb == 0) /* Not expecting frwd reset Ack packet */ + return 1; + + ATDISABLE(s, sp->lock); + PktNextRecvSeq = netdw(UAL_VALUE(f->pktNextRecvSeq)); + + if (BETWEEN(sp->sendSeq, PktNextRecvSeq, sp->sendWdwSeq+1)) { + struct adspcmd *pb; + + RemoveTimerElem(&adspGlobal.fastTimers, &sp->ResetTimer); + /* Remove timer */ + + /* + * Interrupts are OFF here while we muck with the linked list + */ + pb = sp->frpb; /* Unlink copy of user's parameter block */ + sp->frpb = (struct adspcmd *)pb->qLink; + + pb->ioResult = 0; + completepb(sp, pb); /* complete(pb, 0); */ + + if (sp->state == sClosing) /* this ack may allow us to close... */ + CheckOkToClose(sp); + + if (sp->frpb) /* Another to send? */ + { + sp->callSend = 1; + sp->sendCtl |= B_CTL_FRESET; + } + } + + ATENABLE(s, sp->lock); + return 0; +} + + +/* + * dspReset + * + * INPUTS: + * --> ccbRefNum refnum of connection end + * + * OUTPUTS: + * none + * + * ERRORS: + * errRefNum bad connection refnum + * errState connection is not open + * errAborted request aborted by Remove or Close call + */ +int adspReset(sp, pb) /* (DSPPBPtr pb) */ + CCBPtr sp; + struct adspcmd *pb; +{ + int s; + register gbuf_t *mp; + register struct adspcmd *rpb; + + if (sp == 0) { + pb->ioResult = errRefNum; + return EINVAL; + } + + if (sp->state != sOpen) { + pb->ioResult = errState; + return EINVAL; + } + + ATDISABLE(s, sp->lock); + + while (mp = sp->sbuf_mb) { /* clear the send queue */ + sp->sbuf_mb = gbuf_next(mp); + gbuf_freem(mp); + } + if (sp->csbuf_mb) { + gbuf_freem(sp->csbuf_mb); + sp->csbuf_mb = 0; + } + sp->sData = 0; + sp->writeFlush = 0; + sp->sendCtl |= B_CTL_FRESET; + + sp->firstRtmtSeq = sp->sendSeq; /* Reset sequence #'s */ + if (mp = gbuf_copym(pb->mp)) { /* copy the parameter block */ + adspioc_ack(0, pb->ioc, pb->gref); /* release user */ + rpb = (struct adspcmd *)gbuf_rptr(mp); + rpb->ioc = 0; /* unlink copy */ + rpb->mp = mp; + + qAddToEnd(&sp->frpb, rpb); + /* Hold on to pb (will be completed when */ + /* forward reset ack is received). */ + } else { /* assume it will work... but keep no + * bookkeeping for it. yetch! */ + adspioc_ack(0, pb->ioc, pb->gref); + } + ATENABLE(s, sp->lock); + + CheckSend(sp); + return STR_IGNORE; + +} diff --git a/bsd/netat/adsp_stream.c b/bsd/netat/adsp_stream.c new file mode 100644 index 000000000..8dd2ec374 --- /dev/null +++ b/bsd/netat/adsp_stream.c @@ -0,0 +1,655 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* + * 09/07/95 - Modified for performance (Tuyen Nguyen) + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, April 9, 1997 by Tuyen Nguyen for MacOSX. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void SndMsgUp(); +void adsp_rput(); +static void adsp_iocack(); +static void adsp_iocnak(); +void adsp_dequeue_ccb(); +unsigned char adspAssignSocket(); +int adspallocate(), adsprelease(); +int adspInited = 0; + +atlock_t adspall_lock; +atlock_t adspgen_lock; +GLOBAL adspGlobal; + +/**********/ + +int adsp_pidM[256]; +char adsp_inputC[256]; +CCB *adsp_inputQ[256]; + +extern at_ifaddr_t *ifID_home; + +CCB *ccb_used_list; + +void adsp_input(mp) + gbuf_t *mp; +{ + gref_t *gref; + CCBPtr sp; + at_ddp_t *p; + int s, l; + gbuf_t *mb; + + switch (gbuf_type(mp)) { + case MSG_DATA: + p = (at_ddp_t *)gbuf_rptr(mp); + ATDISABLE(s, adspall_lock); + sp = adsp_inputQ[p->dst_socket]; + if ((sp == 0) || (sp->gref==0) || (sp->state==sClosed)) + { + ATENABLE(s, adspall_lock); + gbuf_freem(mp); + return; + } + else if (sp->otccbLink != 0) { + do { + if ((sp->remoteAddress.a.node == p->src_node) + && (sp->remoteAddress.a.socket == p->src_socket) + && (sp->remoteAddress.a.net == NET_VALUE(p->src_net))) + break; + } while ((sp = sp->otccbLink) != 0); + if (sp == 0) + { + ATENABLE(s, adspall_lock); + gbuf_freem(mp); + return; + } + } + if (sp->lockFlag) { + gbuf_next(mp) = 0; + if (sp->deferred_mb) { + for (mb=sp->deferred_mb; gbuf_next(mb); mb=gbuf_next(mb)) ; + gbuf_next(mb) = mp; + } else + sp->deferred_mb = mp; + ATENABLE(s, adspall_lock); + return; + } + ATDISABLE(l, sp->lockRemove); + sp->lockFlag = 1; + ATENABLE(l, adspall_lock); + while (mp) { + adsp_rput(sp->gref, mp); + if ((mp = sp->deferred_mb) != 0) { + sp->deferred_mb = gbuf_next(mp); + gbuf_next(mp) = 0; + } + } + sp->lockFlag = 0; + ATENABLE(s, sp->lockRemove); + return; + + case MSG_IOCACK: + case MSG_IOCNAK: + gref = (gref_t *)((ioc_t *)gbuf_rptr(mp))->ioc_private; + break; + + case MSG_IOCTL: +#ifdef APPLETALK_DEBUG + kprintf("unexpected MSG_IOCTL in adsp_input()"); +#endif + /* fall through */ + + default: + gbuf_freem(mp); + return; + } + + adsp_rput(gref, mp); +} + +/**********/ +int adsp_readable(gref) + gref_t *gref; +{ + int rc; + CCBPtr sp; + + if (gref->info == 0) + /* + * we don't have the structure we need to determine + * if there's data available... we return readable in + * this case to keep from hanging up in the select + * a subsequent read will run into the same missing data + * structure and return an error... the ATselect code does + * this if it can't retrieve the 'gref' structure from the + * file table for the fd specified + */ + return(1); + + sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + rc = sp->rData; + + return rc; +} + +int adsp_writeable(gref) + gref_t *gref; +{ + int s, rc; + CCBPtr sp; + + if (gref->info == 0) + /* + * we don't have the structure we need to determine + * if there's room available... we return writeable in + * this case to keep from hanging up in the select + * a subsequent write will run into the same missing data + * structure and return an error... the ATselect code does + * this if it can't retrieve the 'gref' structure from the + * file table for the fd specified + */ + return(1); + + sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + ATDISABLE(s, sp->lock); + rc = CalcSendQFree(sp); + ATENABLE(s, sp->lock); + + return rc; +} + +static void adsp_init() +{ + adspInited++; + InitGlobals(); + ccb_used_list = 0; + bzero(adsp_pidM, sizeof(adsp_pidM)); + bzero(adsp_inputC, sizeof(adsp_inputC)); + bzero(adsp_inputQ, sizeof(adsp_inputQ)); +} + +/* + * Description: + * ADSP open and close routines. These routines + * initalize and release the ADSP structures. They do not + * have anything to do with "connections" + */ + +int adsp_open(gref) + gref_t *gref; +{ + register CCBPtr sp; + int s; + + if (!adspInited) + adsp_init(); + + if (!adspAllocateCCB(gref)) + return(ENOBUFS); /* can't get buffers */ + + sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + gref->readable = adsp_readable; + gref->writeable = adsp_writeable; + ATDISABLE(s, adspall_lock); + if ((sp->otccbLink = ccb_used_list) != 0) + sp->otccbLink->ccbLink = sp; + ccb_used_list = sp; + ATENABLE(s, adspall_lock); + return 0; +} + +int adsp_close(gref) + gref_t *gref; +{ + int s, l; + unsigned char localSocket; + + /* make sure we've not yet removed the CCB (e.g., due to TrashSession) */ + ATDISABLE(l, adspgen_lock); + if (gref->info) { + CCBPtr sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + ATDISABLE(s, sp->lock); + ATENABLE(s, adspgen_lock); + localSocket = sp->localSocket; + ATENABLE(l, sp->lock); + if (localSocket) + adspRelease(gref); + else + { + adsp_dequeue_ccb(sp); + gbuf_freeb((gbuf_t *)gref->info); + } + } else + ATENABLE(l, adspgen_lock); + return 0; +} + + +/* + * Name: + * adsp_rput + * + * Description: + * ADSP streams read put and service routines. + */ + +void adsp_rput(gref, mp) + gref_t *gref; /* READ queue */ + gbuf_t *mp; +{ + switch (gbuf_type(mp)) { + case MSG_HANGUP: + case MSG_IOCACK: + case MSG_IOCNAK: + switch (adspReadHandler(gref, mp)) { + case STR_PUTNEXT: + atalk_putnext(gref, mp); + break; + case STR_IGNORE: + break; + } + break; + case MSG_ERROR: +#ifdef APPLETALK_DEBUG + kprintf("adsp_rput received MSG_ERROR"); +#endif + /* fall through */ + default: + CheckReadQueue(gbuf_rptr(((gbuf_t *)gref->info))); + CheckSend(gbuf_rptr(((gbuf_t *)gref->info))); + + switch (gbuf_type(mp)) { + case MSG_IOCTL: + case MSG_DATA: + case MSG_PROTO: + if (adspReadHandler(gref, mp) == STR_PUTNEXT) + atalk_putnext(gref, mp); + break; + default: + atalk_putnext(gref, mp); + break; + } + } +} + +/* + * Name: + * adsp_wput + * + * Description: + * ADSP streams write put and service routines. + * + */ + +int adsp_wput(gref, mp) + gref_t *gref; /* WRITE queue */ + gbuf_t *mp; +{ + int rc; + int s; + gbuf_t *xm; + ioc_t *iocbp; + CCBPtr sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + + if (gbuf_type(mp) == MSG_IOCTL) { + iocbp = (ioc_t *)gbuf_rptr(mp); + switch (iocbp->ioc_cmd) { + case ADSPBINDREQ: + { + unsigned char v; + + if (gbuf_cont(mp) == NULL) { + iocbp->ioc_rval = -1; + adsp_iocnak(gref, mp, EINVAL); + } + v = *(unsigned char *)gbuf_rptr(gbuf_cont(mp)); + ATDISABLE(s, adspall_lock); + if ( (v != 0) + && ((v > DDP_SOCKET_LAST) || (v < 2) + || ddp_socket_inuse(v, DDP_ADSP))) { + ATENABLE(s, adspall_lock); + iocbp->ioc_rval = -1; + adsp_iocnak(gref, mp, EINVAL); + } + else { + if (v == 0) { + ATENABLE(s, adspall_lock); + if ((v = adspAssignSocket(gref, 0)) == 0) { + iocbp->ioc_rval = -1; + adsp_iocnak(gref, mp, EINVAL); + return 0; + } + } else { + adsp_inputC[v] = 1; + adsp_inputQ[v] = sp; + adsp_pidM[v] = sp->pid; + ATENABLE(s, adspall_lock); + adsp_dequeue_ccb(sp); + } + *(unsigned char *)gbuf_rptr(gbuf_cont(mp)) = v; + sp->localSocket = v; + iocbp->ioc_rval = 0; + adsp_iocack(gref, mp); + } + return 0; + } + + case ADSPGETSOCK: + case ADSPGETPEER: + { + at_inet_t *addr; + + if (((xm = gbuf_cont(mp)) == NULL) + && ((xm = gbuf_alloc(sizeof(at_inet_t), PRI_MED)) == NULL)) { + iocbp->ioc_rval = -1; + adsp_iocnak(gref, mp, ENOBUFS); + return 0; + } + gbuf_cont(mp) = xm; + gbuf_wset(xm,sizeof(at_inet_t)); + addr = (at_inet_t *)gbuf_rptr(xm); + if (iocbp->ioc_cmd == ADSPGETSOCK) { + /* Obtain Network and Node Id's from DDP */ + /* *** was ddp_get_cfg() *** */ + addr->net = ifID_home->ifThisNode.s_net; + addr->node = ifID_home->ifThisNode.s_node; + addr->socket = (sp)? sp->localSocket: 0; + } else + if (sp) + *addr = sp->remoteAddress.a; + else { + addr->net = 0; + addr->node = 0; + addr->socket = 0; + } + iocbp->ioc_rval = 0; + adsp_iocack(gref, mp); + return 0; + } + case DDP_IOC_GET_CFG: + /* respond to an DDP_IOC_GET_CFG sent on an adsp fd */ + if (((xm = gbuf_cont(mp)) == NULL) && + (xm = gbuf_alloc(sizeof(at_inet_t), PRI_MED)) == NULL) { + iocbp->ioc_rval = -1; + adsp_iocnak(gref, mp, ENOBUFS); + return 0; + } + gbuf_cont(mp) = xm; + gbuf_wset(xm, sizeof(ddp_addr_t)); + /* Obtain Network and Node Id's from DDP */ + { + /* *** was ddp_get_cfg() *** */ + ddp_addr_t *cfgp = + (ddp_addr_t *)gbuf_rptr(gbuf_cont(mp)); + cfgp->inet.net = ifID_home->ifThisNode.s_net; + cfgp->inet.node = ifID_home->ifThisNode.s_node; + cfgp->inet.socket = (sp)? sp->localSocket: 0; + cfgp->ddptype = DDP_ADSP; + } + iocbp->ioc_rval = 0; + adsp_iocack(gref, mp); + return 0; + } /* switch */ + } + + if (!gref->info) + gbuf_freem(mp); + else { + ATDISABLE(s, sp->lockClose); + rc = adspWriteHandler(gref, mp); + ATENABLE(s, sp->lockClose); + + switch (rc) { + case STR_PUTNEXT: + if (gbuf_type(mp) == MSG_IOCTL) { + iocbp = (ioc_t *)gbuf_rptr(mp); + iocbp->ioc_private = (void *)gref; + } + DDP_OUTPUT(mp); + break; + case STR_IGNORE: + case STR_IGNORE+99: + break; + default: + gbuf_freem(mp); + break; + } + } + + return 0; +} /* adsp_wput */ + +void adspioc_ack(errno, m, gref) + int errno; + gbuf_t *m; + gref_t *gref; +{ + ioc_t *iocbp; + + if (m == NULL) + return; + iocbp = (ioc_t *) gbuf_rptr(m); + + iocbp->ioc_error = errno; /* set the errno */ + iocbp->ioc_count = gbuf_msgsize(gbuf_cont(m)); + if (gbuf_type(m) == MSG_IOCTL) /* if an ioctl, this is an ack */ + gbuf_set_type(m, MSG_IOCACK); /* and ALWAYS update the user */ + /* ioctl structure */ + trace_mbufs(D_M_ADSP,"A ", m); + SndMsgUp(gref, m); +} + +static void adsp_iocack(gref, m) + gref_t *gref; + register gbuf_t *m; +{ + if (gbuf_type(m) == MSG_IOCTL) + gbuf_set_type(m, MSG_IOCACK); + + if (gbuf_cont(m)) + ((ioc_t *)gbuf_rptr(m))->ioc_count = gbuf_msgsize(gbuf_cont(m)); + else + ((ioc_t *)gbuf_rptr(m))->ioc_count = 0; + + SndMsgUp(gref, m); +} + + +static void adsp_iocnak(gref, m, err) + gref_t *gref; + register gbuf_t *m; + register int err; +{ + if (gbuf_type(m) == MSG_IOCTL) + gbuf_set_type(m, MSG_IOCNAK); + ((ioc_t *)gbuf_rptr(m))->ioc_count = 0; + + if (err == 0) + err = ENXIO; + ((ioc_t *)gbuf_rptr(m))->ioc_error = err; + + if (gbuf_cont(m)) { + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + } + SndMsgUp(gref, m); +} + +unsigned char +adspAssignSocket(gref, flag) + gref_t *gref; + int flag; +{ + unsigned char sVal, sMax, sMin, sSav, inputC; + CCBPtr sp; + int s; + + sMax = flag ? DDP_SOCKET_LAST-46 : DDP_SOCKET_LAST-6; + sMin = DDP_SOCKET_1st_DYNAMIC-64; + + ATDISABLE(s, adspall_lock); + for (inputC=255, sVal=sMax; sVal >= sMin; sVal--) { + if (!ddp_socket_inuse(sVal, DDP_ADSP)) + break; + else if (flag) { + if (adsp_inputC[sVal] && + /* meaning that raw DDP doesn't have it */ + (adsp_inputC[sVal] < inputC) + && (adsp_inputQ[sVal]->state == sOpen)) { + inputC = adsp_inputC[sVal]; + sSav = sVal; + } + } + } + if (sVal < sMin) { + if (!flag || (inputC == 255)) { + ATENABLE(s, adspall_lock); + return 0; + } + sVal = sSav; + } + sp = (CCBPtr)gbuf_rptr(((gbuf_t *)gref->info)); + ATENABLE(s, adspall_lock); + adsp_dequeue_ccb(sp); + ATDISABLE(s, adspall_lock); + adsp_inputC[sVal]++; + sp->otccbLink = adsp_inputQ[sVal]; + adsp_inputQ[sVal] = sp; + if (!flag) + adsp_pidM[sVal] = sp->pid; + ATENABLE(s, adspall_lock); + return sVal; +} + +int +adspDeassignSocket(sp) + CCBPtr sp; +{ + unsigned char sVal; + CCBPtr curr_sp; + CCBPtr prev_sp; + int pid = 0; + int s, l; + + dPrintf(D_M_ADSP, D_L_TRACE, ("adspDeassignSocket: pid=%d,s=%d\n", + sp->pid, sp->localSocket)); + ATDISABLE(s, adspall_lock); + sVal = sp->localSocket; + if ((curr_sp = adsp_inputQ[sVal]) != 0) { + prev_sp = 0; + while (curr_sp != sp) { + prev_sp = curr_sp; + curr_sp = curr_sp->otccbLink; + } + if (curr_sp) { + ATDISABLE(l, sp->lockRemove); + if (prev_sp) + prev_sp->otccbLink = sp->otccbLink; + else + adsp_inputQ[sVal] = sp->otccbLink; + ATENABLE(l, sp->lockRemove); + if (adsp_inputQ[sVal]) + adsp_inputC[sVal]--; + else { + pid = adsp_pidM[sVal]; + adsp_inputC[sVal] = 0; + adsp_pidM[sVal] = 0; + } + sp->ccbLink = 0; + sp->otccbLink = 0; + sp->localSocket = 0; + ATENABLE(s, adspall_lock); + return pid ? 0 : 1; + } + } + ATENABLE(s, adspall_lock); + + dPrintf(D_M_ADSP, D_L_ERROR, + ("adspDeassignSocket: closing, no CCB block, trouble ahead\n")); + return -1; +} /* adspDeassignSocket */ + +/* + * remove CCB from the use list + */ +void +adsp_dequeue_ccb(sp) + CCB *sp; +{ + int s; + + ATDISABLE(s, adspall_lock); + if (sp == ccb_used_list) { + if ((ccb_used_list = sp->otccbLink) != 0) + sp->otccbLink->ccbLink = 0; + } else if (sp->ccbLink) { + if ((sp->ccbLink->otccbLink = sp->otccbLink) != 0) + sp->otccbLink->ccbLink = sp->ccbLink; + } + + sp->otccbLink = 0; + sp->ccbLink = 0; + ATENABLE(s, adspall_lock); +} + +void SndMsgUp(gref, mp) + gref_t *gref; /* WRITE queue */ + gbuf_t *mp; +{ +/* + dPrintf(D_M_ADSP, D_L_TRACE, + ("SndMsgUp: gref=0x%x, mbuf=0x%x\n", (unsigned)gref, (unsigned)mp)); + trace_mbufs(D_M_ADSP, " m", mp); +*/ + atalk_putnext(gref, mp); +} diff --git a/bsd/netat/appletalk.h b/bsd/netat/appletalk.h new file mode 100644 index 000000000..d450f039a --- /dev/null +++ b/bsd/netat/appletalk.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +/* Miscellaneous definitions for AppleTalk used by all protocol + * modules. + */ + +#ifndef _NETAT_APPLETALK_H_ +#define _NETAT_APPLETALK_H_ + +#include +#include + +/* + Non-aligned types are used in packet headers. +*/ + +/* New fundemental types: non-aligned variations of u_short and u_long */ +typedef u_char ua_short[2]; /* Unaligned short */ +typedef u_char ua_long[4]; /* Unaligned long */ + +/* Two at_net typedefs; the first is aligned the other isn't */ +typedef u_short at_net_al; /* Aligned AppleTalk network number */ +typedef ua_short at_net_unal; /* Unaligned AppleTalk network number */ + +/* Miscellaneous types */ +typedef u_char at_node; /* AppleTalk node number */ +typedef u_char at_socket; /* AppleTalk socket number */ + +typedef at_net_unal at_net; /* Default: Unaligned AppleTalk network number */ +struct atalk_addr { + u_char atalk_unused; + at_net atalk_net; + at_node atalk_node; +}; + +/* Macros to manipulate unaligned fields */ +#define UAS_ASSIGN(x,s) *(unsigned short *) &(x[0]) = (unsigned short) (s) +#define UAS_UAS(x,y) *(unsigned short *) &(x[0]) = *(unsigned short *) &(y[0]) +#define UAS_VALUE(x) (*(unsigned short *) &(x[0])) +#define UAL_ASSIGN(x,l) *(unsigned long *) &(x[0]) = (unsigned long) (l) +#define UAL_UAL(x,y) *(unsigned long *) &(x[0]) = *(unsigned long *) &(y[0]) +#define UAL_VALUE(x) (*(unsigned long *) &(x[0])) + +/* Macros to manipulate at_net variables */ +#define NET_ASSIGN(x,s) *(unsigned short *)&(x[0]) = (unsigned short)(s) +#define NET_NET(x, y) *(unsigned short *)&(x[0]) = *(unsigned short *)&(y[0]) +#define NET_VALUE(x) (*(unsigned short *) &(x[0])) +#define ATALK_ASSIGN(a, net, node, unused ) \ + a.atalk_unused = unused; a.atalk_node = node; NET_ASSIGN(a.atalk_net, net) + +#define NET_EQUAL(a, b) (NET_VALUE(a) == NET_VALUE(b)) +#define NET_NOTEQ(a, b) (NET_VALUE(a) != NET_VALUE(b)) +#define NET_EQUAL0(a) (NET_VALUE(a) == 0) +#define NET_NOTEQ0(a) (NET_VALUE(a) != 0) + + +/* + AppleTalk Internet Address +*/ + +typedef struct at_inet { + u_short net; /* Network Address */ + u_char node; /* Node number */ + u_char socket; /* Socket number */ +} at_inet_t; + +/* + DDP Address for OT +*/ + +typedef struct ddp_addr { + at_inet_t inet; + u_short ddptype; +} ddp_addr_t; + +/* + AppleTalk address +*/ + +struct at_addr { + u_short s_net; /* 16-bit network address */ + u_char s_node; /* 8-bit node # (1-0xfd) */ +}; + +/* + Appletalk sockaddr definition +*/ +struct sockaddr_at { + u_char sat_len; /* total length */ + u_char sat_family; /* address family (AF_APPLETALK) */ + u_char sat_port; /* 8-bit "socket number" */ + struct at_addr sat_addr; /* 16-bit "net" and 8-bit "node */ + char sat_zero[8]; /* used for netrange in netatalk */ +}; + +#define ATADDR_ANYNET (u_short)0x0000 +#define ATADDR_ANYNODE (u_char)0x00 +#define ATADDR_ANYPORT (u_char)0x00 + +#define ATADDR_BCASTNODE (u_char)0xff /* There is no BCAST for NET */ + +/* make sure the net, node and socket numbers are in legal range : + * + * Net# 0 Local Net + * 1 - 0xfffe Legal net nos + * 0xffff Reserved by Apple for future use. + * Node# 0 Illegal + * 1 - 0x7f Legal (user node id's) + * 0x80 - 0xfe Legal (server node id's; 0xfe illegal in + * Phase II nodes) + * 0xff Broadcast + * Socket# 0 Illegal + * 1 - 0xfe Legal + * 0xff Illegal + */ +#define valid_at_addr(addr) \ + ((!(addr) || (addr)->net == 0xffff || (addr)->node == 0 || \ + (addr)->socket == 0 || (addr)->socket == 0xff)? 0: 1) + +/*** * ETHERTYPE_ definitions are in netinet/if_ether.h *** */ +#define ETHERTYPE_AT 0x809B /* AppleTalk protocol */ +#define ETHERTYPE_AARP 0x80F3 /* AppleTalk ARP */ + +/* + DDP protocol types +*/ + +#define DDP_RTMP 0x01 +#define DDP_NBP 0x02 +#define DDP_ATP 0x03 +#define DDP_ECHO 0x04 +#define DDP_RTMP_REQ 0x05 +#define DDP_ZIP 0x06 +#define DDP_ADSP 0x07 + +/* + Protocols for the socket API +*/ + +#define ATPROTO_NONE 0 /* no corresponding DDP type exists */ + +#define ATPROTO_ATP DDP_ATP /* must match DDP type */ +#define ATPROTO_ADSP DDP_ADSP /* must match DDP type */ + +#define ATPROTO_DDP 249 /* *** to be eliminated eventually *** */ +#define ATPROTO_LAP 250 /* *** to be eliminated eventually *** */ + +#define ATPROTO_AURP 251 /* no corresponding DDP type exists */ +#define ATPROTO_ASP 252 /* no corresponding DDP type exists */ +#define ATPROTO_AFP 253 /* no corresponding DDP type exists */ + +#define ATPROTO_RAW 255 /* no corresponding DDP type exists */ + +/* + Options for use with [gs]etsockopt at the DDP level. + First word of comment is data type; bool is stored in int. +*/ +#define DDP_CHKSUM_ON 1 /* int; default = FALSE; + DDP checksums should be used */ +#define DDP_HDRINCL 2 /* int; default = FALSE; + header is included with data */ +#define DDP_GETSOCKNAME 3 /* used to get ddp_addr_t */ +#define DDP_SLFSND_ON 4 /* int; default = FALSE; + packets sent to the cable-multicast address + on this socket will be looped back */ +#define DDP_STRIPHDR 5 /* int; default = FALSE; + drop DDP header on receive (raw) */ + +/* + AppleTalk protocol retry and timeout +*/ + +typedef struct at_retry { + short interval; /* Retry interval in seconds */ + short retries; /* Maximum number of retries */ + u_char backoff; /* Retry backoff, must be 1 through 4 */ +} at_retry_t; + +/* + Basic NBP Definitions needed for AppleTalk framework +*/ + +#define MAX_ZONES 50 + +#define NBP_NVE_STR_SIZE 32 /* Maximum NBP tuple string size */ +typedef struct at_nvestr { + u_char len; + u_char str[NBP_NVE_STR_SIZE]; +} at_nvestr_t; + +/* Entity Name */ +typedef struct at_entity { + at_nvestr_t object; + at_nvestr_t type; + at_nvestr_t zone; +} at_entity_t; + +#define NBP_TUPLE_SIZE ((3*NBP_NVE_STR_SIZE)+3) + /* 3 for field lengths + 3*32 for three names */ +typedef struct at_nbptuple { + at_inet_t enu_addr; + u_char enu_enum; + at_entity_t enu_entity; +} at_nbptuple_t; + +/* + Basic ATP Definitions needed for LibcAT +*/ + +#define ATP_TRESP_MAX 8 /* Maximum number of Tresp pkts */ + +/* Response buffer structure for atp_sendreq() and atp_sendrsp() */ +typedef struct at_resp { + u_char bitmap; /* Bitmap of responses */ + u_char filler[3]; /* Force 68K to RISC alignment */ + struct iovec resp[ATP_TRESP_MAX]; /* Buffer for response data */ + long userdata[ATP_TRESP_MAX]; /* Buffer for response user data */ +} at_resp_t; + +/* + Needed for ASP and ADSP +*/ + +typedef struct { + int maxlen; /* max buffer length */ + int len; /* length of data */ + char *buf; /* pointer to buffer */ +} strbuf_t; + +#define IFID_HOME 1 /* home port in ifID_table */ + +#define ATALK_VALUE(a) ((*(u_long *) &(a))&0x00ffffff) +#define ATALK_EQUAL(a, b) (ATALK_VALUE(a) == ATALK_VALUE(b)) + +#define VERSION_LENGTH 80 /* length of version string */ + +/* struture containing general information regarding the state of + * the Appletalk networking + */ +typedef struct at_state { + unsigned int flags; /* various init flags */ +} at_state_t; + +/* at_state_t 'flags' defines */ +#define AT_ST_STARTED 0x0001 +#define AT_ST_MULTIHOME 0x0080 /* set if multihome mode */ +#define AT_ST_ROUTER 0x0100 /* set if we are a router */ +#define AT_ST_IF_CHANGED 0x0200 /* set when state of any I/F + changes (for SNMP) */ +#define AT_ST_RT_CHANGED 0x0400 /* route table changed (for SNMP)*/ +#define AT_ST_ZT_CHANGED 0x0800 /* zone table changed (for SNMP) */ +#define AT_ST_NBP_CHANGED 0x1000 /* if nbp table changed (for SNMP)*/ + +#ifdef KERNEL +extern at_state_t at_state; /* global state of AT network */ + +#define ROUTING_MODE (at_state.flags & AT_ST_ROUTER) +#define MULTIHOME_MODE (at_state.flags & AT_ST_MULTIHOME) +#define MULTIPORT_MODE (ROUTING_MODE || MULTIHOME_MODE) +#endif /* KERNEL */ + +/* defines originally from h/at_elap.h */ +#define AT_ADDR 0 +#define ET_ADDR 1 +#define AT_ADDR_NO_LOOP 2 /* disables packets from looping back */ + +#endif /* _NETAT_APPLETALK_H_ */ diff --git a/bsd/netat/asp.h b/bsd/netat/asp.h new file mode 100644 index 000000000..780dffd57 --- /dev/null +++ b/bsd/netat/asp.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +#ifndef _NETAT_ASP_H_ +#define _NETAT_ASP_H_ + +#define ASP_Version 0x100 + +#define ASPFUNC_CloseSess 1 +#define ASPFUNC_Command 2 +#define ASPFUNC_GetStatus 3 +#define ASPFUNC_OpenSess 4 +#define ASPFUNC_Tickle 5 +#define ASPFUNC_Write 6 +#define ASPFUNC_WriteContinue 7 +#define ASPFUNC_Attention 8 +#define ASPFUNC_CmdReply 9 + +#define ASPIOC 210 /* AT_MID_ASP */ +#define ASPIOC_ClientBind ((ASPIOC<<8) | 1) +#define ASPIOC_CloseSession ((ASPIOC<<8) | 2) +#define ASPIOC_GetLocEntity ((ASPIOC<<8) | 3) +#define ASPIOC_GetRemEntity ((ASPIOC<<8) | 4) +#define ASPIOC_GetSession ((ASPIOC<<8) | 5) +#define ASPIOC_GetStatus ((ASPIOC<<8) | 6) +#define ASPIOC_ListenerBind ((ASPIOC<<8) | 7) +#define ASPIOC_OpenSession ((ASPIOC<<8) | 8) +#define ASPIOC_StatusBlock ((ASPIOC<<8) | 9) +#define ASPIOC_SetPid ((ASPIOC<<8) |10) +#define ASPIOC_GetSessId ((ASPIOC<<8) |11) +#define ASPIOC_EnableSelect ((ASPIOC<<8) |12) /* not needed */ +#define ASPIOC_Look ((ASPIOC<<8) |13) + +#define MOREDATA 1 + +/* The following ASP error codes are defined in Inside AppleTalk: */ + +#define ASPERR_NoError 0 +#define ASPERR_BadVersNum -1066 +#define ASPERR_BufTooSmall -1067 +#define ASPERR_NoMoreSessions -1068 +#define ASPERR_NoServers -1069 +#define ASPERR_ParamErr -1070 +#define ASPERR_ServerBusy -1071 +#define ASPERR_SessClosed -1072 +#define ASPERR_SizeErr -1073 +#define ASPERR_TooManyClients -1074 +#define ASPERR_NoAck -1075 + +/* These ASP error codes were apparently defined later: */ + +#define ASPERR_NoSuchDevice -1058 +#define ASPERR_BindErr -1059 +#define ASPERR_CmdReply -1060 +#define ASPERR_CmdRequest -1061 +#define ASPERR_SystemErr -1062 +#define ASPERR_ProtoErr -1063 +#define ASPERR_NoSuchEntity -1064 +#define ASPERR_RegisterErr -1065 + +typedef struct { + at_inet_t SLSEntityIdentifier; + at_retry_t Retry; + int StatusBufferSize; +} asp_status_cmd_t; + +typedef struct { + at_inet_t SLSEntityIdentifier; + at_retry_t Retry; + unsigned short TickleInterval; + unsigned short SessionTimer; +} asp_open_cmd_t; + +typedef struct { + int Primitive; + int CmdResult; + unsigned short ReqRefNum; + unsigned short Filler; +} asp_cmdreply_req_t; + +typedef struct { + int Primitive; + int CmdResult; +} asp_cmdreply_ind_t; + +typedef struct { + int Primitive; + unsigned short ReqRefNum; + unsigned char ReqType; + unsigned char Filler; +} asp_command_ind_t; + +union asp_primitives { + int Primitive; + asp_cmdreply_ind_t CmdReplyInd; + asp_cmdreply_req_t CmdReplyReq; + asp_command_ind_t CommandInd; +}; + +#ifdef KERNEL + +#define ASPSTATE_Close 0 +#define ASPSTATE_Idle 1 +#define ASPSTATE_WaitingForGetStatusRsp 2 +#define ASPSTATE_WaitingForOpenSessRsp 3 +#define ASPSTATE_WaitingForCommandRsp 4 +#define ASPSTATE_WaitingForWriteContinue 5 +#define ASPSTATE_WaitingForWriteRsp 6 +#define ASPSTATE_WaitingForWriteContinueRsp 7 +#define ASPSTATE_WaitingForCloseSessRsp 8 +#ifdef NOT_USED +#define ASPSTATE_WaitingForCfgAck 9 +#endif + +/* + * ATP state block + */ +typedef struct { + gref_t *atp_gref; /* gref must be the first entry */ + int pid; /* process id, must be the second entry */ + gbuf_t *atp_msgq; /* data msg, must be the third entry */ + unsigned char dflag; /* structure flag, must be the fourth entry */ + unsigned char filler[3]; +} atp_state_t; + +/* + * ASP word + */ +typedef struct { + unsigned char func; + unsigned char param1; + unsigned short param2; +} asp_word_t; + +/* + * ASP session control block + */ +typedef struct asp_scb { + gref_t *gref; /* read queue pointer, must be the first entry */ + int pid; /* process id, must be the second entry */ + atp_state_t *atp_state; /* atp state info, must be the third entry */ + unsigned char dflag; /* structure flag, must be the fourth entry */ + unsigned char state; + unsigned char sess_id; + unsigned char tmo_delta; + unsigned char tmo_cnt; + unsigned char rem_socket; + unsigned char rem_node; + unsigned char magic_num; + unsigned short snd_seq_num; + unsigned short rcv_seq_num; + unsigned short filler; + unsigned short tickle_tid; + unsigned short tickle_interval; + unsigned short session_timer; + unsigned short attn_tid; + unsigned char attn_flag; + unsigned char req_flag; + gbuf_t *req_msgq; + unsigned short wrt_seq_num; + unsigned char get_wait; + unsigned char ioc_wait; + at_retry_t cmd_retry; + at_inet_t loc_addr; + at_inet_t rem_addr; + at_inet_t svc_addr; + gbuf_t *sess_ioc; + gbuf_t *stat_msg; + void (*tmo_func)(); + struct asp_scb *next_tmo; + struct asp_scb *prev_tmo; + struct asp_scb *sess_scb; + struct asp_scb *next_scb; + struct asp_scb *prev_scb; + unsigned char sel_on; /* not needed */ + unsigned char user; + unsigned char rcv_cnt; + unsigned char snd_stop; + unsigned char reply_socket; + unsigned char if_num; + unsigned char pad[2]; + atlock_t lock; + atlock_t delay_lock; + atevent_t event; + atevent_t delay_event; +} asp_scb_t; + +#endif /* KERNEL */ +#endif /* _NETAT_ASP_H_ */ diff --git a/bsd/netat/asp_proto.c b/bsd/netat/asp_proto.c new file mode 100644 index 000000000..a93025454 --- /dev/null +++ b/bsd/netat/asp_proto.c @@ -0,0 +1,2298 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 Apple Computer, Inc. + * + * Change Log: + * Created February 20, 1995 by Tuyen Nguyen + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static int loop_cnt; +#define CHK_LOOP(str) { \ + if (loop_cnt++ > 100) { \ + kprintf("%s", str); \ + break; \ + } \ +} + +#define atpBDSsize (sizeof(struct atpBDS)*ATP_TRESP_MAX) +#define aspCMDsize (atpBDSsize+sizeof(struct atp_set_default)+TOTAL_ATP_HDR_SIZE) +#define SCBS_PER_BLK 16 +#define TICKS_PER_SEC HZ +#define SESS_TMO_RES 2 +#define DEF_SESS_TMO 120 +#define NEXT_SEQ_NUM(x) (x = (x == 65535) ? 0 : (x + 1)) +#define MAX_RCV_CNT 5 +#define BAD_REMADDR(addr) \ + ( (*(long *)&scb->rem_addr != *(long *)&addr) \ + && ((scb->rem_addr.net != addr.net) \ + || (scb->rem_addr.node != addr.node)) ) + +int ASPputmsg(); +int ASPgetmsg(); +void asp_init(); +void asp_ack_reply(); +void asp_nak_reply(); +void asp_clock(); +void asp_clock_funnel(); +int asp_open(); +int asp_close(); +int asp_wput(); +void atp_retry_req(); +StaticProc asp_scb_t *asp_find_scb(); +StaticProc asp_scb_t *asp_scb_alloc(); + +StaticProc void asp_putnext(); +StaticProc void asp_iocack(); +StaticProc void asp_iocnak(); +StaticProc void asp_dequeue_scb(); +StaticProc void asp_scb_free(); +StaticProc void asp_timout(); +StaticProc void asp_untimout(); +StaticProc void asp_hangup(); +StaticProc void asp_send_tickle(); +StaticProc void asp_send_tickle_funnel(); +StaticProc void asp_accept(); +StaticProc int asp_send_req(); + +extern at_ifaddr_t *ifID_home; +extern int atp_pidM[]; +extern gref_t *atp_inputQ[]; +gbuf_t *scb_resource_m = 0; +unsigned char asp_inpC[256]; +asp_scb_t *asp_scbQ[256]; + +static at_retry_t asp_def_retry = {2, -1, 1}; +static unsigned char scb_tmo_cnt; +asp_scb_t *scb_used_list; +static asp_scb_t *scb_tmo_list; +asp_scb_t *scb_free_list; +atlock_t aspall_lock, asptmo_lock; + +int +asp_readable(gref) + gref_t *gref; +{ + return (((asp_scb_t *)gref->info)->sess_ioc ? 1 : 0); +} + +void +asp_init() +{ + scb_tmo_cnt = 1; + scb_tmo_list = 0; + scb_used_list = 0; + scb_free_list = 0; + bzero(asp_inpC, sizeof(asp_inpC)); + bzero(asp_scbQ, sizeof(asp_scbQ)); +} + +/* + * the open routine allocates a state structure + */ +int asp_open(gref) + gref_t *gref; +{ + int s; + asp_scb_t *scb; + + /* + * if no asp structure available, return failure + */ + if ((scb = asp_scb_alloc()) == 0) + return ENOBUFS; + + /* + * initialize the gref data structure + */ + gref->info = (void *)scb; + gref->readable = asp_readable; + + /* + * initialize the scb data structure + */ + scb->dflag = 1; + scb->magic_num = 222; + scb->state = ASPSTATE_Idle; + scb->pid = gref->pid; + scb->gref = gref; + scb->session_timer = DEF_SESS_TMO; + scb->cmd_retry = asp_def_retry; + ATDISABLE(s, aspall_lock); + if ((scb->next_scb = scb_used_list) != 0) + scb->next_scb->prev_scb = scb; + scb_used_list = scb; + ATENABLE(s, aspall_lock); + + /* + * return success + */ + dPrintf(D_M_ASP, D_L_INFO, ("asp_open: pid=%d\n", scb->pid)); + return 0; +} /* asp_open */ + +/* + * the close routine frees all the data structures + */ +int +asp_close(gref) + gref_t *gref; +{ + int s; + unsigned char sock_num; + asp_scb_t *scb, *new_scb; + gbuf_t *m; + + scb = (asp_scb_t *)gref->info; + dPrintf(D_M_ASP, D_L_INFO, ("asp_close: loc=%d\n", + scb->loc_addr.socket)); + + if (scb->pid && scb->sess_ioc && (scb->dflag != 1)) { + /* + * send the CloseSess response to peer + */ + if (gbuf_type(scb->sess_ioc) != MSG_PROTO) { + ATDISABLE(s, scb->lock); + m = scb->sess_ioc; + scb->sess_ioc = gbuf_next(m); + ATENABLE(s, scb->lock); + atp_send_rsp(scb->gref, m, TRUE); + } + } + + if (scb->atp_state) { + sock_num = scb->loc_addr.socket; + ATDISABLE(s, aspall_lock); + if ((scb->dflag != 1) && scb->stat_msg) { + untimeout(atp_retry_req, scb->stat_msg); + gbuf_freem(scb->stat_msg); + scb->stat_msg = 0; + } + if (asp_scbQ[sock_num]->next_scb == 0) { + asp_scbQ[sock_num] = 0; + asp_inpC[sock_num] = 0; + ATENABLE(s, aspall_lock); + dPrintf(D_M_ASP, D_L_INFO, + (" : atp_close(), loc=%d\n", scb->loc_addr.socket)); + atp_close(gref, 0); + } else { + asp_inpC[sock_num]--; + if (scb == asp_scbQ[sock_num]) { + new_scb = scb->next_scb; + new_scb->prev_scb = 0; + asp_scbQ[sock_num] = new_scb; + new_scb->atp_state->atp_gref = new_scb->gref; + new_scb->atp_state->pid = new_scb->pid; + atp_inputQ[sock_num] = new_scb->gref; + } else { + if ((scb->prev_scb->next_scb = scb->next_scb) != 0) + scb->next_scb->prev_scb = scb->prev_scb; + } + scb->next_scb = 0; + ATENABLE(s, aspall_lock); + } + } else + asp_dequeue_scb(scb); + + /* + * free all allocated blocks if any + */ + ATDISABLE(s, scb->lock); + if (scb->stat_msg) { + gbuf_freem(scb->stat_msg); + scb->stat_msg = 0; + } + if (scb->sess_ioc) { + gbuf_freel(scb->sess_ioc); + scb->sess_ioc = 0; + } + if (scb->req_msgq) { + gbuf_freel(scb->req_msgq); + scb->req_msgq = 0; + } + + scb->rem_addr.node = 0; + ATENABLE(s, scb->lock); + + /* + * stop all timers + */ + scb->tmo_cnt = 0; + asp_untimout(asp_hangup, scb); + untimeout(asp_send_tickle_funnel, (void *)scb); /* added for 2225395 */ + + /* + * free the asp session control block + */ + scb->state = ASPSTATE_Close; + asp_scb_free(scb); + return 0; +} /* asp_close */ + +static char *aspStateStr(state) + int state; +{ + return ((state==ASPSTATE_Close)? "Close": + (state==ASPSTATE_Idle)? "Idle": + (state==ASPSTATE_WaitingForGetStatusRsp)? "GetStatusRsp": + (state==ASPSTATE_WaitingForOpenSessRsp)? "OpenSessRsp": + (state==ASPSTATE_WaitingForCommandRsp)? "CmdRsp": + (state==ASPSTATE_WaitingForWriteContinue)? "WriteCont": + (state==ASPSTATE_WaitingForWriteRsp)? "WriteRsp": + (state==ASPSTATE_WaitingForWriteContinueRsp)? "WriteContRsp": + (state==ASPSTATE_WaitingForCloseSessRsp)? "CloseSessRsp": + "unknown"); +} + +static char *aspCmdStr(aspCmd) + int aspCmd; +{ +return ((aspCmd==ASPFUNC_CloseSess)? "CloseSess": + (aspCmd==ASPFUNC_Command)? "Command": + (aspCmd==ASPFUNC_GetStatus)? "GetStatus": + (aspCmd==ASPFUNC_OpenSess)? "OpenSess": + (aspCmd==ASPFUNC_Tickle)? "Tickle": + (aspCmd==ASPFUNC_Write)? "Write": + (aspCmd==ASPFUNC_WriteContinue)? "WriteContinue": + (aspCmd==ASPFUNC_Attention)? "Attention": + (aspCmd==ASPFUNC_CmdReply)? "CmdReply": "unknown"); +} + +static char *aspIOCStr(aspIOC) + int aspIOC; +{ +return ( + (aspIOC==ASPIOC_ClientBind)? "ClientBind": + (aspIOC==ASPIOC_CloseSession)? "CloseSession": + (aspIOC==ASPIOC_GetLocEntity)? "GetLocEntity": + (aspIOC==ASPIOC_GetRemEntity)? "GetRemEntity": + (aspIOC==ASPIOC_GetSession)? "GetSession": + (aspIOC==ASPIOC_GetStatus)? "GetStatus": + (aspIOC==ASPIOC_ListenerBind)? "ListenerBind": + (aspIOC==ASPIOC_OpenSession)? "OpenSession": + (aspIOC==ASPIOC_StatusBlock)? "StatusBlock": + (aspIOC==ASPIOC_SetPid)? "SetPid": + (aspIOC==ASPIOC_GetSessId)? "GetSessId": + (aspIOC==ASPIOC_EnableSelect)? "EnableSelect": + (aspIOC==ASPIOC_Look)? "Look": + "unknown" + ); +} + +#ifdef AT_MBUF_TRACE + +static char mbuf_str[100]; +char *mbuf_totals() +{ + sprintf(mbuf_str, + /* + "dat = %d, prot = %d, ioc = %d, err = %d, hu = %d, ack = %d, nak = %d, ctl = %d", + */ + "dat = %d, prot = %d, ioc = %d, ctl = %d", + mbstat.m_mtypes[MSG_DATA], mbstat.m_mtypes[MSG_PROTO], mbstat.m_mtypes[MSG_IOCTL], + /* + mbstat.m_mtypes[MSG_ERROR], mbstat.m_mtypes[MSG_HANGUP], mbstat.m_mtypes[MSG_IOCACK], + mbstat.m_mtypes[MSG_IOCNAK], + */ + mbstat.m_mtypes[MSG_CTL]); + return(&mbuf_str[0]); +} + +void trace_beg(str, m) + char *str; + gbuf_t *m; +{ + int i = 0, j = 0; + gbuf_t *mdata, *mchain; + + if (m) + for (i = 0, j = 0, mdata = m, mchain = m; mdata; i++) { + mdata = gbuf_cont(mdata); + if (!mdata && mchain) { + mdata = gbuf_next(mchain); + mchain = mdata; + j++; + } + } + dPrintf(D_M_ASP, D_L_TRACE, + ("%s: %s, m# = %d, c# = %d\n", str, mbuf_totals(), i, j)); +} + +void trace_end(str) + char *str; +{ + dPrintf(D_M_ASP, D_L_TRACE, + (" %s: %s\n", str, mbuf_totals())); +} +#endif AT_MBUF_TRACE + +/* + * the write routine + */ +int asp_wput(gref, m) + gref_t *gref; + gbuf_t *m; +{ + int s, err; + unsigned char sockSav, sock_num; + gbuf_t *mioc, *mdata; + ioc_t *iocbp; + asp_scb_t *scb, *server_scb, *curr_scb; + at_inet_t *addr; + asp_word_t aw; + union asp_primitives *primitives; + asp_status_cmd_t *status_cmd; + asp_open_cmd_t *open_cmd; + at_retry_t Retry; + + scb = (asp_scb_t *)gref->info; + if (scb->dflag == 0) { + atp_wput(gref, m); + return 0; + } + + if (gbuf_type(m) != MSG_IOCTL) { + dPrintf(D_M_ASP, D_L_WARNING, + ("asp_wput: UNKNOWN message, type=%d\n", + gbuf_type(m))); + gbuf_freem(m); + return 0; + } + + mioc = m; + iocbp = (ioc_t *)gbuf_rptr(mioc); + + dPrintf(D_M_ASP_LOW, D_L_INFO, + ("asp_wput: %s, loc=%d, state=%s\n", + aspIOCStr(iocbp->ioc_cmd), scb->loc_addr.socket, + aspStateStr(scb->state))); + + switch (iocbp->ioc_cmd) { + case ASPIOC_CloseSession: + if ((scb->state == ASPSTATE_Close) || (scb->rem_addr.node == 0)) + break; + + Retry.retries = 3; + Retry.interval = 1; + aw.func = ASPFUNC_CloseSess; + aw.param1 = scb->sess_id; + aw.param2 = 0; + iocbp->ioc_private = (void *)scb; + scb->ioc_wait = (unsigned char)(iocbp->ioc_cmd & 0xff); + iocbp->ioc_cmd = AT_ATP_ISSUE_REQUEST; + asp_send_req(gref, mioc, &scb->rem_addr, &Retry, &aw, + 0, ASPSTATE_WaitingForCloseSessRsp, 0x01); + return 0; + + case ASPIOC_ClientBind: + /* + * open an ATP channel + */ + if ((err = atp_open(gref, 0)) != 0) { + asp_iocnak(gref, mioc, err); + return 0; + } + scb->atp_state = (atp_state_t *)gref->info; + scb->atp_state->pid = scb->pid; + /* + * bind to any available socket + */ + scb->dflag = 2; + sockSav = scb->dflag; + if ((sock_num = (at_socket)atp_bind(gref, 0, &sockSav)) == 0) { + scb->atp_state = (atp_state_t *)0; + atp_close(gref, 0); + gref->info = (void *)scb; + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + gref->info = (void *)scb; + asp_dequeue_scb(scb); + scb->atp_state->dflag = scb->dflag; + scb->loc_addr.socket = sock_num; + asp_scbQ[sock_num] = scb; + asp_inpC[sock_num]++; + atp_pidM[sock_num] = 0; + break; + + case ASPIOC_ListenerBind: + /* + * open an ATP channel + */ + if ((err = atp_open(gref, 0)) != 0) { + asp_iocnak(gref, mioc, err); + return 0; + } + scb->atp_state = (atp_state_t *)gref->info; + scb->atp_state->pid = scb->pid; + /* + * bind to any available socket + */ + if ((sock_num = (at_socket)atp_bind(gref, 0, 0)) == 0) { + scb->atp_state = (atp_state_t *)0; + atp_close(gref, 0); + gref->info = (void *)scb; + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + gref->info = (void *)scb; + asp_dequeue_scb(scb); + scb->atp_state->dflag = scb->dflag; + scb->loc_addr.socket = sock_num; + asp_scbQ[sock_num] = scb; + asp_inpC[sock_num]++; + if (gbuf_cont(mioc)) + *(at_inet_t *)gbuf_rptr(gbuf_cont(mioc)) = scb->loc_addr; + break; + + case ASPIOC_GetLocEntity: + if ((gbuf_cont(mioc) == 0) || (scb->atp_state == 0)) { + asp_iocnak(gref, mioc, EPROTO); + return 0; + } + *(at_inet_t *)gbuf_rptr(gbuf_cont(mioc)) = scb->loc_addr; + break; + + case ASPIOC_GetRemEntity: + if ((gbuf_cont(mioc) == 0) || (scb->atp_state == 0)) { + asp_iocnak(gref, mioc, EPROTO); + return 0; + } + *(at_inet_t *)gbuf_rptr(gbuf_cont(mioc)) = scb->rem_addr; + break; + + case ASPIOC_GetSession: + if ((mdata = gbuf_cont(mioc)) == 0) { + asp_iocnak(gref, mioc, EPROTO); + return 0; + } + addr = (at_inet_t *)gbuf_rptr(mdata); + scb->tickle_interval = (unsigned short)addr->node; + scb->session_timer = addr->net; + server_scb = asp_scbQ[addr->socket]; +/*### LD 10/28/97: changed to make sure we're not accessing a null server_scb */ + if (server_scb == 0) { + asp_iocnak(gref, mioc, EPROTO); + return 0; + } + if (server_scb->sess_ioc == 0) { + asp_iocnak(gref, mioc, EPROTO); + return 0; + } + + /* + * open an ATP channel + */ + if ((err = atp_open(gref, 0)) != 0) { + gref->info = (void *)scb; + asp_iocnak(gref, mioc, err); + return 0; + } + scb->atp_state = (atp_state_t *)gref->info; + scb->atp_state->pid = scb->pid; + /* + * bind to any available socket + */ + scb->dflag = 3; + sockSav = scb->dflag; + if ((sock_num = (at_socket)atp_bind(gref, 0, &sockSav)) == 0) { + atp_close(gref, 0); + asp_dequeue_scb(scb); + ATDISABLE(s, aspall_lock); + sock_num = sockSav; + scb->loc_addr.socket = sock_num; + for (curr_scb = asp_scbQ[sock_num]; + curr_scb->next_scb; curr_scb = curr_scb->next_scb) ; + scb->prev_scb = curr_scb; + curr_scb->next_scb = scb; + scb->atp_state = curr_scb->atp_state; + ATENABLE(s, aspall_lock); + } else { + asp_dequeue_scb(scb); + ATDISABLE(s, aspall_lock); + scb->loc_addr.socket = sock_num; + asp_scbQ[sock_num] = scb; + scb->atp_state->dflag = scb->dflag; + ATENABLE(s, aspall_lock); + } + gref->info = (void *)scb; + asp_inpC[sock_num]++; + gbuf_cont(mioc) = 0; + asp_accept(server_scb, scb, mdata); + break; + + case ASPIOC_GetStatus: + if ((mdata = gbuf_cont(mioc)) == 0) { + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + gbuf_cont(mioc) = 0; + status_cmd = (asp_status_cmd_t *)gbuf_rptr(mdata); + aw.func = ASPFUNC_GetStatus; + aw.param1 = 0; + aw.param2 = 0; + scb->ioc_wait = (unsigned char)(iocbp->ioc_cmd & 0xff); + iocbp->ioc_cmd = AT_ATP_ISSUE_REQUEST_DEF; + asp_send_req(gref, mioc, &status_cmd->SLSEntityIdentifier, + &status_cmd->Retry, &aw, 1, ASPSTATE_WaitingForGetStatusRsp, 0xff); + gbuf_freeb(mdata); + return 0; + + case ASPIOC_OpenSession: + if ((mdata = gbuf_cont(mioc)) == 0) { + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + gbuf_cont(mioc) = 0; + open_cmd = (asp_open_cmd_t *)gbuf_rptr(mdata); + scb->svc_addr = open_cmd->SLSEntityIdentifier; + scb->rem_addr = scb->svc_addr; + scb->rem_node = scb->rem_addr.node; + scb->rem_addr.node = 0; + scb->tickle_interval = open_cmd->TickleInterval; + scb->session_timer = open_cmd->SessionTimer; + aw.func = ASPFUNC_OpenSess; + aw.param1 = scb->loc_addr.socket; + aw.param2 = ASP_Version; + scb->ioc_wait = (unsigned char)(iocbp->ioc_cmd & 0xff); + iocbp->ioc_cmd = AT_ATP_ISSUE_REQUEST_DEF; + asp_send_req(gref, mioc, &open_cmd->SLSEntityIdentifier, + &open_cmd->Retry, &aw, 1, ASPSTATE_WaitingForOpenSessRsp, 0x01); + gbuf_freeb(mdata); + return 0; + + case ASPIOC_StatusBlock: + /* + * save the server status block + */ + if (scb->stat_msg) + gbuf_freem(scb->stat_msg); + scb->stat_msg = gbuf_cont(mioc); + gbuf_cont(mioc) = 0; + break; + + /* *** Does scb->pid get used in a packet header, + and if so is it in ASP, or in ATP? + If not, do we need this call for anything? + (cap does currently use it in _ANS code.) + *** */ + case ASPIOC_SetPid: + if (gbuf_cont(mioc) == 0) { + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + scb->pid = *(int *)gbuf_rptr(gbuf_cont(mioc)); + break; + + case ASPIOC_GetSessId: + if (gbuf_cont(mioc) == 0) { + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + *(gref_t **)gbuf_rptr(gbuf_cont(mioc)) = gref; + break; + + case ASPIOC_Look: + if (gbuf_cont(mioc) == 0) { + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + if (scb->sess_ioc) { + primitives = (union asp_primitives *)gbuf_rptr(scb->sess_ioc); + if (primitives->Primitive == ASPFUNC_CmdReply) + *(int *)gbuf_rptr(gbuf_cont(mioc)) = 0; + else + *(int *)gbuf_rptr(gbuf_cont(mioc)) = 1; + } else + *(int *)gbuf_rptr(gbuf_cont(mioc)) = -1; + break; + + case DDP_IOC_GET_CFG: + { + struct atp_state *atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = atp->atp_msgq; + + if (gbuf_cont(mioc) == 0) { + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + /* *** borrowed from ddp_proto.c to handle DDP_IOC_GET_CFG + on atp fd *** */ + scb->state = ASPSTATE_Idle; + { + /* *** was ddp_get_cfg() *** */ + ddp_addr_t *cfgp = + (ddp_addr_t *)gbuf_rptr(gbuf_cont(mioc)); + cfgp->inet.net = ifID_home->ifThisNode.s_net; + cfgp->inet.node = ifID_home->ifThisNode.s_node; + cfgp->inet.socket = atp->atp_socket_no; + cfgp->ddptype = DDP_ATP; + } + gbuf_wset(gbuf_cont(mioc), sizeof(at_inet_t)); + } + break; + + default: + asp_iocnak(gref, mioc, EINVAL); + return 0; + } + + asp_iocack(gref, mioc); + return 0; +} /* asp_wput */ + +/* + * send request routine + */ +StaticProc int +asp_send_req(gref, mioc, dest, retry, awp, xo, state, bitmap) + gref_t *gref; + gbuf_t *mioc; + at_inet_t *dest; + at_retry_t *retry; + asp_word_t *awp; + unsigned char xo; + unsigned char state; + unsigned char bitmap; +{ + int i; + gbuf_t *mdata; + ioc_t *iocbp; + struct atp_set_default *sd; + at_ddp_t *ddp; + at_atp_t *atp; + struct atpBDS *atpBDS; + asp_scb_t *scb = (asp_scb_t *)gref->info; + + /* + * allocate an ATP buffer for the request + */ + if ((gbuf_cont(mioc) = gbuf_alloc(aspCMDsize, PRI_MED)) == 0) { + if (awp->func == ASPFUNC_Tickle) + gbuf_freem(mioc); + else + asp_iocnak(gref, mioc, ENOBUFS); + dPrintf(D_M_ASP, D_L_WARNING, + ("asp_send_req: ENOBUFS, loc=%d\n", scb->loc_addr.socket)); + + return -1; + } + mdata = gbuf_cont(mioc); + iocbp = (ioc_t *)gbuf_rptr(mioc); + + /* + * build the request + */ + atpBDS = (struct atpBDS *)gbuf_rptr(mdata); + gbuf_wset(mdata,atpBDSsize); + for (i=0; i < ATP_TRESP_MAX; i++) { + *(unsigned long *)atpBDS[i].bdsBuffAddr = 1; + *(unsigned short *)atpBDS[i].bdsBuffSz = ATP_DATA_SIZE; + } + sd = (struct atp_set_default *)gbuf_wptr(mdata); + gbuf_winc(mdata,sizeof(struct atp_set_default)); + sd->def_retries = (retry->retries == -1) ? + ATP_INFINITE_RETRIES : retry->retries; + sd->def_rate = retry->interval*TICKS_PER_SEC; + sd->def_BDSlen = atpBDSsize; + ddp = (at_ddp_t *)gbuf_wptr(mdata); + NET_ASSIGN(ddp->src_net, scb->loc_addr.net); + ddp->src_node = scb->loc_addr.node; + NET_ASSIGN(ddp->dst_net, dest->net); + ddp->dst_node = dest->node; + ddp->dst_socket = dest->socket; + UAS_ASSIGN(ddp->checksum, 0); + atp = ATP_ATP_HDR(gbuf_wptr(mdata)); + atp->xo = xo; + atp->xo_relt = xo; + atp->bitmap = bitmap; + gbuf_winc(mdata,TOTAL_ATP_HDR_SIZE); + *(asp_word_t *)atp->user_bytes = *awp; + iocbp->ioc_count = gbuf_len(mdata); + iocbp->ioc_rval = 0; + + /* + * send the request + */ + scb->state = state; + dPrintf(D_M_ASP, D_L_INFO, + ("asp_send_req: %s, loc=%d, rem= %d, len=%d, state=%s\n", + aspCmdStr(awp->func), + scb->loc_addr.socket, ddp->dst_socket, iocbp->ioc_count, + aspStateStr(scb->state))); + + atp_send_req(gref, mioc); + return 0; +} + +/* + * send tickle routine - funnelled version + */ +StaticProc void +asp_send_tickle_funnel(scb) + asp_scb_t *scb; +{ + thread_funnel_set(network_flock, TRUE); + asp_send_tickle(scb); + thread_funnel_set(network_flock, FALSE); +} + + +/* + * send tickle routine + */ +StaticProc void +asp_send_tickle(scb) + asp_scb_t *scb; +{ + gbuf_t *mioc; + at_retry_t retry; + asp_word_t aw; + at_inet_t *dest; + + + /* + * make sure the connection is still there + */ + if (scb->rem_addr.node == 0) { + return; + } + + if ((mioc = gbuf_alloc(sizeof(ioc_t), PRI_HI)) == 0) { + dPrintf(D_M_ASP, D_L_WARNING, + ("asp_send_tickle: ENOBUFS 0, loc=%d, rem=%d\n", + scb->loc_addr.socket,scb->rem_addr.socket)); + timeout(asp_send_tickle_funnel, (void *)scb, 10); + return; + } + gbuf_wset(mioc,sizeof(ioc_t)); + gbuf_set_type(mioc, MSG_IOCTL); + + dest = scb->svc_addr.node ? + (at_inet_t *)&scb->svc_addr : (at_inet_t *)&scb->rem_addr; + retry.interval = scb->tickle_interval; + retry.retries = -1; + retry.backoff = 1; + aw.func = ASPFUNC_Tickle; + aw.param1 = scb->sess_id; + aw.param2 = 0; + ((ioc_t *)gbuf_rptr(mioc))->ioc_cr = (void *)scb; + ((ioc_t *)gbuf_rptr(mioc))->ioc_cmd = AT_ATP_ISSUE_REQUEST_TICKLE; + + if (asp_send_req(scb->gref, mioc, dest, &retry, &aw, 0, scb->state, 0)) { + dPrintf(D_M_ASP, D_L_WARNING, + ("asp_send_tickle: ENOBUFS 1, loc=%d, rem=%d\n", + scb->loc_addr.socket,scb->rem_addr.socket)); + + timeout(asp_send_tickle_funnel, (void *)scb, 10); + return; + } +} + +/* + * accept connection routine + */ +StaticProc void +asp_accept(scb, sess_scb, m) + asp_scb_t *scb; + asp_scb_t *sess_scb; + gbuf_t *m; +{ + int s; + gbuf_t *mdata; + at_ddp_t *ddp; + at_atp_t *atp; + asp_word_t *awp; + at_inet_t rem_addr; + + mdata = scb->sess_ioc; + ddp = (at_ddp_t *)gbuf_rptr(mdata); + atp = (at_atp_t *)(gbuf_rptr(mdata) + DDP_X_HDR_SIZE); + rem_addr.net = NET_VALUE(ddp->src_net); + rem_addr.node = ddp->src_node; + rem_addr.socket = ddp->src_socket; + awp = (asp_word_t *)atp->user_bytes; + + sess_scb->loc_addr.net = NET_VALUE(ddp->dst_net); + sess_scb->loc_addr.node = ddp->dst_node; + NET_ASSIGN(ddp->src_net, sess_scb->loc_addr.net); + ddp->src_node = sess_scb->loc_addr.node; + NET_ASSIGN(ddp->dst_net, rem_addr.net); + ddp->dst_node = rem_addr.node; + ddp->dst_socket = rem_addr.socket; + + sess_scb->sess_id = sess_scb->loc_addr.socket; + sess_scb->rem_socket = rem_addr.socket; + sess_scb->rem_addr = rem_addr; + sess_scb->rem_addr.socket = awp->param1; + sess_scb->reply_socket = sess_scb->rem_addr.socket; + awp->func = sess_scb->loc_addr.socket; + awp->param1 = sess_scb->sess_id; + awp->param2 = 0; + gbuf_freeb(m); + ATDISABLE(s, scb->lock); + scb->sess_ioc = gbuf_next(mdata); + ATENABLE(s, scb->lock); + gbuf_next(mdata) = 0; + asp_timout(asp_hangup, sess_scb, sess_scb->session_timer); + atp_send_rsp(scb->gref, mdata, TRUE); + asp_send_tickle(sess_scb); + dPrintf(D_M_ASP, D_L_INFO, + ("asp_accept: ACCEPT connect request, loc=%d, rem=%x.%x.%d\n", + sess_scb->loc_addr.socket, + sess_scb->rem_addr.net, + sess_scb->rem_addr.node,sess_scb->rem_addr.socket)); +} /* asp_accept */ + +/* + * timer routine - funneled version + */ +void asp_clock_funnel(arg) + void *arg; +{ + thread_funnel_set(network_flock, TRUE); + asp_clock(arg); + thread_funnel_set(network_flock, FALSE); +} + +/* + * timer routine + */ +void asp_clock(arg) + void *arg; +{ + int s; + asp_scb_t *scb; + void (*tmo_func)(); + + ATDISABLE(s, asptmo_lock); + if (scb_tmo_list) + scb_tmo_list->tmo_delta--; + while (((scb = scb_tmo_list) != 0) && (scb_tmo_list->tmo_delta == 0)) { + if ((scb_tmo_list = scb->next_tmo) != 0) + scb_tmo_list->prev_tmo = 0; + if ((tmo_func = scb->tmo_func) != 0) { + scb->tmo_func = 0; + ATENABLE(s, asptmo_lock); + (*tmo_func)(scb); + ATDISABLE(s, asptmo_lock); + } + } + ATENABLE(s, asptmo_lock); + + if (++scb_tmo_cnt == 0) scb_tmo_cnt++; + timeout(asp_clock_funnel, (void *)arg, (1<ioc_cmd == AT_ATP_ISSUE_REQUEST_TICKLE) { + /* + * ignore the ack for the tickle request + */ + scb = (asp_scb_t *)iocbp->ioc_cr; + scb->tickle_tid = (unsigned short)iocbp->ioc_rval; + gbuf_freem(mioc); + return; + } + + scb = (asp_scb_t *)gref->info; + if (scb == 0) { + gbuf_freem(mioc); + return; + } + + if (iocbp->ioc_cmd == AT_ATP_GET_POLL) { + /* + * if no data, just drop the request + */ + if ((mdata = gbuf_cont(mioc)) == 0) { + gbuf_freeb(mioc); + return; + } + + gbuf_set_type(mioc, MSG_IOCTL); + ddp = (at_ddp_t *)gbuf_rptr(mdata); + gbuf_rinc(mdata,DDP_X_HDR_SIZE); + atp = (at_atp_t *)gbuf_rptr(mdata); + gbuf_rinc(mdata,ATP_HDR_SIZE); + rem_addr.net = NET_VALUE(ddp->src_net); + rem_addr.node = ddp->src_node; + rem_addr.socket = ddp->src_socket; + awp = (asp_word_t *)atp->user_bytes; + + if (scb->next_scb) { + /* + * find the responsible scb + */ + if ((scb = asp_find_scb(scb->loc_addr.socket, &rem_addr)) == 0) { + gbuf_freem(mioc); + return; + } + } + dPrintf(D_M_ASP, D_L_INFO, + ("asp_ack_reply: %s, loc=%d, rem=%x.%x.%d\n", + aspCmdStr(awp->func),scb->loc_addr.socket, + NET_VALUE(ddp->src_net) ,ddp->src_node,ddp->src_socket)); + + if (scb->rem_addr.node) + asp_untimout(asp_hangup, scb); + + switch (awp->func) { + case ASPFUNC_GetStatus: + /* + * ignore if this is not a server socket + */ + mx = 0; + if ((scb->dflag != 1) || (scb->stat_msg + && ((mx = gbuf_dupb(scb->stat_msg)) == 0))) + break; + gbuf_freeb(mioc); + + /* + * send the status block + */ + if (gbuf_cont(mdata)) { + gbuf_freem(gbuf_cont(mdata)); + gbuf_cont(mdata) = 0; + } + gbuf_rdec(mdata,TOTAL_ATP_HDR_SIZE); + if ((m = gbuf_alloc( (TOTAL_ATP_HDR_SIZE+atpBDSsize), PRI_MED)) == 0) { + gbuf_freem(mdata); + gbuf_freeb(mx); + goto l_done; + } + bcopy(gbuf_rptr(mdata), gbuf_rptr(m), TOTAL_ATP_HDR_SIZE); + gbuf_freeb(mdata); + mdata = m; + ddp = (at_ddp_t *)gbuf_rptr(mdata); + gbuf_wset(mdata,DDP_X_HDR_SIZE); + atp = (at_atp_t *)gbuf_wptr(mdata); + gbuf_winc(mdata,ATP_HDR_SIZE); + awp = (asp_word_t *)atp->user_bytes; + NET_NET(ddp->src_net, ddp->dst_net); + ddp->src_node = ddp->dst_node; + NET_ASSIGN(ddp->dst_net, rem_addr.net); + ddp->dst_node = rem_addr.node; + ddp->dst_socket = rem_addr.socket; + UAS_ASSIGN(ddp->checksum, 0); + atpBDS = (struct atpBDS *)gbuf_wptr(mdata); + msize = mx ? gbuf_msgsize(mx) : 0; + for (nbds=0; (nbds < ATP_TRESP_MAX) && (msize > 0); nbds++) { + len = msize < ATP_DATA_SIZE ? msize : ATP_DATA_SIZE; + msize -= ATP_DATA_SIZE; + *(long *)atpBDS[nbds].bdsUserData = 0; + UAL_ASSIGN(atpBDS[nbds].bdsBuffAddr, 1); + UAS_ASSIGN(atpBDS[nbds].bdsBuffSz, len); + } + UAS_ASSIGN(atpBDS[0].bdsDataSz, nbds); + gbuf_winc(mdata,atpBDSsize); + gbuf_cont(mdata) = mx; + atp_send_rsp(gref, mdata, FALSE); + goto l_done; + + case ASPFUNC_OpenSess: + /* + * ignore if server is not ready + */ + if ((scb->dflag != 1) || (scb->stat_msg == 0)) + break; + gbuf_freeb(mioc); + + if (gbuf_cont(mdata)) { + gbuf_freem(gbuf_cont(mdata)); + gbuf_cont(mdata) = 0; + } + gbuf_rdec(mdata,TOTAL_ATP_HDR_SIZE); + gbuf_wset(mdata,TOTAL_ATP_HDR_SIZE); + if (awp->param2 != ASP_Version) { + /* + * bad version number, send the OpenSession response + */ + awp->func = 0; + awp->param1 = 0; + awp->param2 = (unsigned short)ASPERR_BadVersNum; + dPrintf(D_M_ASP, D_L_INFO, + (" : version=%d\n", + ASPERR_BadVersNum)); + + NET_NET(ddp->src_net, ddp->dst_net); + ddp->src_node = ddp->dst_node; + NET_ASSIGN(ddp->dst_net, rem_addr.net); + ddp->dst_node = rem_addr.node; + ddp->dst_socket = rem_addr.socket; + atp_send_rsp(gref, mdata, FALSE); + return; + } + + /* + * queue the connection request + */ + ATDISABLE(s, scb->lock); + gbuf_next(mdata) = 0; + if ((m = scb->sess_ioc) == 0) { + scb->sess_ioc = mdata; + if (scb->get_wait) + thread_wakeup(&scb->event); + else + atalk_notify_sel(gref); + } else { + while (gbuf_next(m)) + m = gbuf_next(m); + gbuf_next(m) = mdata; + } + ATENABLE(s, scb->lock); + dPrintf(D_M_ASP, D_L_INFO, + (" : QUEUE connect request\n")); + + return; + + case ASPFUNC_Command: + case ASPFUNC_Write: + if ( (scb->sess_id != awp->param1) + || (scb->rcv_seq_num != awp->param2) + || BAD_REMADDR(rem_addr) ) { + char era[8], ra[8]; + sprintf(era,"%d.%d", scb->rem_addr.node,scb->rem_addr.socket); + sprintf(ra,"%d.%d", rem_addr.node,rem_addr.socket); + dPrintf(D_M_ASP, D_L_WARNING, + (" : DROP, id=%d,esn=%d,sn=%d,erem=%s,rem=%s\n", + scb->sess_id,scb->rcv_seq_num,awp->param2,era,ra)); + gbuf_cont(mioc) = 0; + gbuf_rdec(mdata,TOTAL_ATP_HDR_SIZE); + atp_drop_req(gref, mdata); + break; + } + scb->reply_socket = rem_addr.socket; + if (awp->func == ASPFUNC_Write) + scb->wrt_seq_num = scb->rcv_seq_num; + NEXT_SEQ_NUM(scb->rcv_seq_num); + gbuf_set_type(mioc, MSG_PROTO); + gbuf_wset(mioc,sizeof(asp_command_ind_t)); + command_ind = (asp_command_ind_t *)gbuf_rptr(mioc); + command_ind->Primitive = (int)awp->func; + command_ind->ReqRefNum = + *(unsigned short *)atp->tid; + command_ind->ReqType = awp->func; + + mdata = gbuf_strip(mdata); + gbuf_cont(mioc) = mdata; + ATDISABLE(s, scb->lock); + if (scb->req_flag) { + if ((mx = scb->req_msgq) != 0) { + while (gbuf_next(mx)) + mx = gbuf_next(mx); + gbuf_next(mx) = mioc; + } else + scb->req_msgq = mioc; + ATENABLE(s, scb->lock); + } else { + scb->req_flag = 1; + ATENABLE(s, scb->lock); + asp_putnext(scb->gref, mioc); + } + goto l_done; + + case ASPFUNC_WriteContinue: + if ( (scb->sess_id != awp->param1) + || (scb->snd_seq_num != awp->param2) + || BAD_REMADDR(rem_addr) ) { + break; + } + scb->reply_socket = rem_addr.socket; + gbuf_set_type(mioc, MSG_PROTO); + gbuf_wset(mioc,sizeof(asp_command_ind_t)); + command_ind = (asp_command_ind_t *)gbuf_rptr(mioc); + command_ind->Primitive = (int)awp->func; + command_ind->ReqRefNum = + *(unsigned short *)atp->tid; + command_ind->ReqType = awp->func; + + mdata = gbuf_strip(mdata); + gbuf_cont(mioc) = mdata; + asp_putnext(scb->gref, mioc); + goto l_done; + + case ASPFUNC_Tickle: + if (scb->stat_msg) { + sess_scb = asp_scbQ[awp->param1]; + if (sess_scb && sess_scb->next_scb) + sess_scb = asp_find_scb( + sess_scb->loc_addr.socket, &rem_addr); + if (sess_scb) { + if (sess_scb->rem_addr.node) + asp_untimout(asp_hangup, sess_scb); + if (sess_scb->rem_addr.node) + asp_timout(asp_hangup, sess_scb, sess_scb->session_timer); + } + } + dPrintf(D_M_ASP, D_L_INFO, + (" : Tickle, %d -> %d, id=%d\n", + ddp->src_socket,ddp->dst_socket,awp->param1)); + break; + + case ASPFUNC_CloseSess: + if ( (scb->sess_id != awp->param1) + || (scb->state == ASPSTATE_Close) + || (scb->state == ASPSTATE_WaitingForCloseSessRsp) + || (scb->rem_addr.net != rem_addr.net) + || (scb->rem_addr.node != rem_addr.node) ) { + dPrintf(D_M_ASP, D_L_INFO, + (" : CLOSE retry, loc=%d, rem=%x.%x.%d\n", + scb->loc_addr.socket, + scb->rem_addr.net, + scb->rem_addr.node, + scb->rem_addr.socket)); + + break; + } + gbuf_freeb(mioc); + + /* + * build the CloseSess response to be sent to peer + * when the session is closed by the user. + */ + if (gbuf_cont(mdata)) { + gbuf_freem(gbuf_cont(mdata)); + gbuf_cont(mdata) = 0; + } + gbuf_rdec(mdata,TOTAL_ATP_HDR_SIZE); + gbuf_wset(mdata,TOTAL_ATP_HDR_SIZE); + NET_NET(ddp->src_net, ddp->dst_net); + ddp->src_node = ddp->dst_node; + NET_ASSIGN(ddp->dst_net, rem_addr.net); + ddp->dst_node = rem_addr.node; + ddp->dst_socket = rem_addr.socket; + awp->func = 0; + awp->param1 = 0; + awp->param2 = 0; + dPrintf(D_M_ASP,D_L_INFO, + (" : CLOSE, loc=%d, rem=%x.%x.%d\n", + scb->loc_addr.socket, + scb->rem_addr.net, + scb->rem_addr.node, + scb->rem_addr.socket)); + + gbuf_next(mdata) = 0; + ATDISABLE(s, scb->lock); + if (scb->sess_ioc) + gbuf_freel(scb->sess_ioc); + scb->sess_ioc = mdata; + scb->state = ASPSTATE_Close; + ATENABLE(s, scb->lock); + + /* + * notify upstream of the CloseSess from peer + */ + asp_hangup(scb); + return; + + case ASPFUNC_Attention: + if ( (scb->sess_id != awp->param1) + || (scb->rem_addr.net != rem_addr.net) + || (scb->rem_addr.node != rem_addr.node) ) { + break; + } + gbuf_set_type(mioc, MSG_PROTO); + gbuf_wset(mioc,sizeof(asp_command_ind_t)); + command_ind = (asp_command_ind_t *)gbuf_rptr(mioc); + command_ind->Primitive = (int)awp->func; + command_ind->ReqRefNum = + *(unsigned short *)atp->tid; + command_ind->ReqType = awp->func; + scb->attn_tid = *(unsigned short *)atp->tid; + scb->attn_flag = 1; + gbuf_rdec(mdata,2); /* attention code */ + + mdata = gbuf_strip(mdata); + gbuf_cont(mioc) = mdata; + asp_putnext(scb->gref, mioc); + goto l_done; + + default: + dPrintf(D_M_ASP, D_L_WARNING, + (" : UNKNOWN func, func=%d\n", + awp->func)); + + break; + } + } + + else if (iocbp->ioc_cmd == AT_ATP_REQUEST_COMPLETE) { + if (scb->next_scb) { + /* + * find the responsible scb + */ + scb = (asp_scb_t *)iocbp->ioc_private; + if ((scb == 0) || (scb->magic_num != 222)) { + dPrintf(D_M_ASP, D_L_ERROR, + ("asp_ack_reply: CAN'T find scb 1\n")); + gbuf_freem(mioc); + return; + } + } + dPrintf(D_M_ASP, D_L_INFO, + ("asp_ack_reply: RSP, loc=%d, rem=%x.%x.%d, state=%s\n", + scb->loc_addr.socket, + scb->rem_addr.net, + scb->rem_addr.node, + scb->rem_addr.socket, + aspStateStr(scb->state))); + + switch (scb->state) { + case ASPSTATE_Close: + case ASPSTATE_Idle: + scb->rem_addr.node = 0; + gbuf_freem(mioc); + ATDISABLE(s, scb->lock); + if (scb->get_wait) + thread_wakeup(&scb->event); + else + atalk_notify_sel(gref); + ATENABLE(s, scb->lock); + return; + + case ASPSTATE_WaitingForGetStatusRsp: + scb->ioc_wait = 0; + scb->state = ASPSTATE_Idle; + mx = gbuf_cont(mioc); + gbuf_cont(mioc) = 0; + mdata = gbuf_cont(mx); + gbuf_cont(mx) = 0; + iocbp->ioc_cmd = ASPIOC_GetStatus; + iocbp->ioc_count = 0; + iocbp->ioc_rval = mdata ? gbuf_msgsize(mdata) : 0; + gbuf_freeb(mx); + atalk_putnext(gref, mioc); + atalk_putnext(gref, mdata); + return; + + case ASPSTATE_WaitingForOpenSessRsp: + scb->ioc_wait = 0; + scb->state = ASPSTATE_Idle; + mx = gbuf_cont(mioc); + gbuf_cont(mioc) = 0; + if (gbuf_cont(mx)) { + gbuf_freem(gbuf_cont(mx)); + gbuf_cont(mx) = 0; + } + iocbp->ioc_cmd = ASPIOC_OpenSession; + iocbp->ioc_rval = 0; + iocbp->ioc_count = 0; + atpBDS = (struct atpBDS *)gbuf_rptr(mx); + awp = (asp_word_t *)atpBDS->bdsUserData; + if (awp->param2) { + gbuf_freeb(mx); + asp_iocnak(gref, mioc, ECONNREFUSED); + } else { + scb->rem_addr.node = scb->rem_node; + scb->rem_addr.socket = awp->func; + scb->sess_id = awp->param1; + gbuf_freeb(mx); + atalk_putnext(gref, mioc); + asp_timout(asp_hangup, scb, scb->session_timer); + asp_send_tickle(scb); + dPrintf(D_M_ASP, D_L_INFO, + ("asp_ack_reply: CONNECT, loc=%d, rem=%x.%x.%d\n", + scb->loc_addr.socket, + scb->rem_addr.net, + scb->rem_addr.node, + scb->rem_addr.socket)); + } + return; + + case ASPSTATE_WaitingForCommandRsp: + case ASPSTATE_WaitingForWriteRsp: + case ASPSTATE_WaitingForWriteContinueRsp: + if (scb->rem_addr.node) + asp_untimout(asp_hangup, scb); + NEXT_SEQ_NUM(scb->snd_seq_num); + scb->state = ASPSTATE_Idle; + gbuf_set_type(mioc, MSG_PROTO); + mx = gbuf_cont(mioc); + mdata = gbuf_cont(mx); + gbuf_cont(mioc) = mdata; + atpBDS = (struct atpBDS *)gbuf_rptr(mx); + cmdreply_ind = (asp_cmdreply_ind_t *)gbuf_rptr(mioc); + cmdreply_ind->Primitive = ASPFUNC_CmdReply; + cmdreply_ind->CmdResult = *(int *)atpBDS->bdsUserData; + gbuf_wset(mioc,sizeof(asp_cmdreply_ind_t)); + gbuf_freeb(mx); + asp_putnext(scb->gref, mioc); + goto l_done; + + case ASPSTATE_WaitingForCloseSessRsp: + scb->ioc_wait = 0; + scb->state = ASPSTATE_Close; + scb->rem_addr.node = 0; + iocbp->ioc_cmd = ASPIOC_CloseSession; + iocbp->ioc_rval = 0; + if (gbuf_cont(mioc)) { + gbuf_freem(gbuf_cont(mioc)); + gbuf_cont(mioc) = 0; + } + atalk_putnext(scb->gref, mioc); + atp_cancel_req(scb->gref, (unsigned int)scb->tickle_tid); + scb->tickle_tid = 0; + return; + + default: + dPrintf(D_M_ASP, D_L_WARNING, + (" : UNKNOWN state, state=%s\n", + aspStateStr(scb->state))); + break; + } + } + + else { + if (scb->next_scb) { + /* + * find the responsible scb + */ + scb = (asp_scb_t *)iocbp->ioc_cr; + if ((scb == 0) || (scb->magic_num != 222)) { + dPrintf(D_M_ASP, D_L_ERROR, + ("asp_ack_reply: CAN'T find scb 2\n")); + gbuf_freem(mioc); + return; + } + } + + switch (scb->state) { + case ASPSTATE_Close: + scb->rem_addr.node = 0; + break; + } + } + + if (mioc != 0) + gbuf_freem(mioc); + +l_done: + if (scb->rem_addr.node) + asp_timout(asp_hangup, scb, scb->session_timer); +} /* asp_ack_reply */ + +/* + * NAK reply routine + */ +void +asp_nak_reply(gref, mioc) + register gref_t *gref; + register gbuf_t *mioc; +{ + register asp_scb_t *scb; + register ioc_t *iocbp; + + iocbp = (ioc_t *)gbuf_rptr(mioc); + + if (iocbp->ioc_cmd == AT_ATP_ISSUE_REQUEST_TICKLE) { + /* + * no tickle, close session + */ + scb = (asp_scb_t *)iocbp->ioc_cr; + gbuf_freem(mioc); + asp_hangup(scb); + dPrintf(D_M_ASP, D_L_WARNING, + ("tickle_nak: loc=%d, rem=%x.%x.%d, state=%s\n", + scb->loc_addr.socket, + scb->rem_addr.net, + scb->rem_addr.node, + scb->rem_addr.socket, + aspStateStr(scb->state))); + + return; + } + + scb = (asp_scb_t *)gref->info; + if (scb == 0) { + gbuf_freem(mioc); + return; + } + + if (iocbp->ioc_cmd == AT_ATP_REQUEST_COMPLETE) { + if (scb->next_scb) { + /* + * find the responsible scb + */ + scb = (asp_scb_t *)iocbp->ioc_private; + if ((scb == 0) || (scb->magic_num != 222)) { + dPrintf(D_M_ASP, D_L_ERROR, + ("asp_nak_reply: CAN'T find scb 1\n")); + gbuf_freem(mioc); + return; + } + } + dPrintf(D_M_ASP, D_L_WARNING, + ("asp_nak_reply: RSP, loc=%d, rem=%x.%x.%d, state=%s\n", + scb->loc_addr.socket, + scb->rem_addr.net, + scb->rem_addr.node, + scb->rem_addr.socket, + aspStateStr(scb->state))); + + switch (scb->state) { + case ASPSTATE_WaitingForGetStatusRsp: + iocbp->ioc_cmd = ASPIOC_GetStatus; + break; + + case ASPSTATE_WaitingForOpenSessRsp: + iocbp->ioc_cmd = ASPIOC_OpenSession; + break; + + case ASPSTATE_WaitingForCommandRsp: + case ASPSTATE_WaitingForWriteRsp: + case ASPSTATE_WaitingForWriteContinueRsp: + scb->state = ASPSTATE_Idle; + + /* last remaining use of MSG_ERROR */ + gbuf_set_type(mioc, MSG_ERROR); + *gbuf_rptr(mioc) = (u_char)EPROTO; + gbuf_wset(mioc, 1); + if (gbuf_cont(mioc)) { + gbuf_freem(gbuf_cont(mioc)); + gbuf_cont(mioc) = 0; + } + + asp_putnext(scb->gref, mioc); + return; + + case ASPSTATE_WaitingForCloseSessRsp: + scb->state = ASPSTATE_Close; + /* fall through */ + case ASPSTATE_Close: /* new for PR-2296832 */ + scb->rem_addr.node = 0; + iocbp->ioc_cmd = ASPIOC_CloseSession; + iocbp->ioc_rval = 0; + if (gbuf_cont(mioc)) { + gbuf_freem(gbuf_cont(mioc)); + gbuf_cont(mioc) = 0; + } + gbuf_set_type(mioc, MSG_IOCACK); + atalk_putnext(scb->gref, mioc); + return; + + default: + gbuf_freem(mioc); + return; + } + scb->state = ASPSTATE_Idle; + atalk_putnext(gref, mioc); + } + + else { + if (scb->next_scb) { + /* + * find the responsible scb + */ + scb = (asp_scb_t *)iocbp->ioc_cr; + if ((scb == 0) || (scb->magic_num != 222)) { + dPrintf(D_M_ASP, D_L_ERROR, + ("asp_nak_reply: CAN'T find scb 2\n")); + gbuf_freem(mioc); + return; + } + } + + switch (scb->state) { + case ASPSTATE_Close: + scb->rem_addr.node = 0; + break; + } + + gbuf_freem(mioc); + } +} /* asp_nak_reply */ + +/* + * delete scb from the use list + */ +StaticProc void +asp_dequeue_scb(scb) + asp_scb_t *scb; +{ + int s; + + ATDISABLE(s, aspall_lock); + if (scb == scb_used_list) { + if ((scb_used_list = scb->next_scb) != 0) + scb->next_scb->prev_scb = 0; + } else { + if ((scb->prev_scb->next_scb = scb->next_scb) != 0) + scb->next_scb->prev_scb = scb->prev_scb; + } + ATENABLE(s, aspall_lock); + + scb->next_scb = 0; + scb->prev_scb = 0; +} + +/* + * find scb routine + */ +StaticProc asp_scb_t * +asp_find_scb(sock_num, rem_addr) + unsigned char sock_num; + at_inet_t *rem_addr; +{ + int s; + asp_scb_t *scb; + asp_scb_t *alt_scb = 0; + + ATDISABLE(s, aspall_lock); + for (scb = asp_scbQ[sock_num]; scb; scb = scb->next_scb) { + if ((scb->rem_addr.net == rem_addr->net) + && (scb->rem_addr.node == rem_addr->node)) { + if ((scb->rem_addr.socket == rem_addr->socket) + || (scb->rem_socket == rem_addr->socket)) + break; + else if (alt_scb == 0) + alt_scb = scb; + } + } + + if ((scb == 0) && ((scb = alt_scb) == 0)) { + dPrintf(D_M_ASP, D_L_ERROR, + ("asp_find_scb: CAN'T find scb, loc=%d, rem=%x.%x.%d\n", + sock_num, + rem_addr->net, + rem_addr->node, + rem_addr->socket)); + } + ATENABLE(s, aspall_lock); + + return scb; +} + +/* + * timout routine + */ +StaticProc void +asp_timout(func, scb, seconds) + void (*func)(); + register asp_scb_t *scb; + int seconds; +{ + int s; + unsigned char sum; + register asp_scb_t *curr_scb, *prev_scb; + + if (scb->tmo_func) + return; + + scb->tmo_func = func; + scb->tmo_delta = (seconds>>SESS_TMO_RES); + scb->tmo_cnt = scb_tmo_cnt; + + ATDISABLE(s, asptmo_lock); + if (scb_tmo_list == 0) { + scb->next_tmo = scb->prev_tmo = 0; + scb_tmo_list = scb; + ATENABLE(s, asptmo_lock); + return; + } + + prev_scb = 0; + curr_scb = scb_tmo_list; + sum = 0; + + while (1) { + sum += curr_scb->tmo_delta; + if (sum > scb->tmo_delta) { + sum -= curr_scb->tmo_delta; + scb->tmo_delta -= sum; + curr_scb->tmo_delta -= scb->tmo_delta; + break; + } + prev_scb = curr_scb; + if ((curr_scb = curr_scb->next_tmo) == 0) { + scb->tmo_delta -= sum; + break; + } + } + + if (prev_scb) { + scb->prev_tmo = prev_scb; + if ((scb->next_tmo = prev_scb->next_tmo) != 0) + prev_scb->next_tmo->prev_tmo = scb; + prev_scb->next_tmo = scb; + } else { + scb->prev_tmo = 0; + scb->next_tmo = scb_tmo_list; + scb_tmo_list->prev_tmo = scb; + scb_tmo_list = scb; + } + ATENABLE(s, asptmo_lock); +} + +/* + * untimout routine + */ +StaticProc void +asp_untimout(func, scb) + void (*func)(); + register asp_scb_t *scb; +{ + int s; + + if ((scb->tmo_cnt == scb_tmo_cnt) || (scb->tmo_func == 0)) + return; + + ATDISABLE(s, asptmo_lock); + if (scb_tmo_list == scb) { + if ((scb_tmo_list = scb->next_tmo) != 0) { + scb_tmo_list->prev_tmo = 0; + scb->next_tmo->tmo_delta += scb->tmo_delta; + } + } else if (scb->prev_tmo) { + if ((scb->prev_tmo->next_tmo = scb->next_tmo) != 0) { + scb->next_tmo->prev_tmo = scb->prev_tmo; + scb->next_tmo->tmo_delta += scb->tmo_delta; + } + scb->prev_tmo = 0; + } + scb->tmo_func = 0; + ATENABLE(s, asptmo_lock); +} + +/* + * hangup routine + */ +StaticProc void +asp_hangup(scb) + asp_scb_t *scb; +{ + int s; + + /* + * set the state to Close + */ + ATDISABLE(s, scb->lock); + scb->state = ASPSTATE_Close; + if (scb->tickle_tid) { + atp_cancel_req(scb->gref, (unsigned int)scb->tickle_tid); + scb->tickle_tid = 0; + } + + /* + * notify upstream of the hangup + */ + if (scb->rem_addr.node) { + if (scb->get_wait) { + thread_wakeup(&scb->event); + ATENABLE(s, scb->lock); + } else { + ATENABLE(s, scb->lock); + atalk_notify_sel(scb->gref); + } + } else + ATENABLE(s, scb->lock); +} + +StaticProc void +asp_iocack(gref, mioc) + gref_t *gref; + gbuf_t *mioc; +{ + if (gbuf_cont(mioc)) + ((ioc_t *)gbuf_rptr(mioc))->ioc_count = gbuf_msgsize(gbuf_cont(mioc)); + else + ((ioc_t *)gbuf_rptr(mioc))->ioc_count = 0; + + gbuf_set_type(mioc, MSG_IOCACK); + atalk_putnext(gref, mioc); +} + +StaticProc void +asp_iocnak(gref, mioc, err) + gref_t *gref; + gbuf_t *mioc; + int err; +{ + ((ioc_t *)gbuf_rptr(mioc))->ioc_count = 0; + if (err == 0) + err = ENXIO; + ((ioc_t *)gbuf_rptr(mioc))->ioc_error = err; + ((ioc_t *)gbuf_rptr(mioc))->ioc_rval = -1; + if (gbuf_cont(mioc)) { + gbuf_freem(gbuf_cont(mioc)); + gbuf_cont(mioc) = 0; + } + + gbuf_set_type(mioc, MSG_IOCNAK); + atalk_putnext(gref, mioc); +} + +/* + * the alloc scb routine + */ +StaticProc asp_scb_t * +asp_scb_alloc() +{ + int s, i; + gbuf_t *m; + asp_scb_t *scb, *scb_array; + + ATDISABLE(s, aspall_lock); + if (scb_free_list == 0) { + if ((m = gbuf_alloc(SCBS_PER_BLK*sizeof(asp_scb_t), PRI_MED)) == 0) + { + ATENABLE(s, aspall_lock); + return (asp_scb_t *)0; + } + bzero((char *)gbuf_rptr(m), SCBS_PER_BLK*sizeof(asp_scb_t)); + gbuf_cont(m) = scb_resource_m; + scb_resource_m = m; + scb_array = (asp_scb_t *)gbuf_rptr(m); + for (i=0; i < SCBS_PER_BLK-1; i++) + scb_array[i].next_scb = (asp_scb_t *)&scb_array[i+1]; + scb_array[i].next_scb = 0; + scb_free_list = (asp_scb_t *)&scb_array[0]; + } + + scb = scb_free_list; + scb_free_list = scb->next_scb; + ATENABLE(s, aspall_lock); + ATLOCKINIT(scb->lock); + ATLOCKINIT(scb->delay_lock); + ATEVENTINIT(scb->event); + ATEVENTINIT(scb->delay_event); + + return scb; +} + +/* + * the free scb routine + */ +StaticProc void +asp_scb_free(scb) + asp_scb_t *scb; +{ + int s; + + bzero((char *)scb, sizeof(asp_scb_t)); + ATDISABLE(s, aspall_lock); + scb->next_scb = scb_free_list; + scb_free_list = scb; + ATENABLE(s, aspall_lock); +} + +/* + * routine to pass up receive data + */ +StaticProc void +asp_putnext(gref, mproto) + gref_t *gref; + gbuf_t *mproto; +{ + int s; + gbuf_t *m; + asp_scb_t *scb; + + scb = (asp_scb_t *)gref->info; + + /* + * queue the message. + */ + ATDISABLE(s, scb->lock); + gbuf_next(mproto) = 0; + if ((m = scb->sess_ioc) == 0) + scb->sess_ioc = mproto; + else { + while (gbuf_next(m)) + m = gbuf_next(m); + gbuf_next(m) = mproto; + } + scb->rcv_cnt++; + if (scb->rcv_cnt >= MAX_RCV_CNT) + scb->snd_stop = 1; + + if (scb->get_wait) { + thread_wakeup(&scb->event); + ATENABLE(s, scb->lock); + } else if (mproto == scb->sess_ioc) { + ATENABLE(s, scb->lock); + atalk_notify_sel(gref); + } else + ATENABLE(s, scb->lock); +} /* asp_putnext */ + +/* + * The following two routines are direct entries from system + * calls to allow fast sending and recving of ASP data. + */ + +/* in ASPputmsg we expect: + + ASPFUNC_CmdReply + ASPFUNC_Attention + ASPFUNC_Command + ASPFUNC_Write + ASPFUNC_WriteContinue +*/ + +int ASPputmsg(gref, ctlptr, datptr, flags, errp) + gref_t *gref; + strbuf_t *ctlptr; + strbuf_t *datptr; + int flags; + int *errp; +{ + int s, i, err, len; + gbuf_t *mioc, *mdata, *mx; + ioc_t *iocbp; + strbuf_t ctlbuf; + strbuf_t datbuf; + asp_scb_t *scb; + int nbds, result, msize, Primitive; + unsigned char *wptr; + struct atp_set_default *sd; + at_ddp_t *ddp; + at_atp_t *atp; + struct atpBDS *atpBDS; + asp_word_t *awp; + union asp_primitives *primitives; + unsigned short tid; + + if ((scb = (asp_scb_t *)gref->info) == 0) { + dPrintf(D_M_ASP, D_L_ERROR, + ("ASPputmsg: stale handle=0x%x, pid=%d\n", + (u_int) gref, gref->pid)); + + *errp = EINVAL; + return -1; + } + + if (scb->state == ASPSTATE_Close) + return 0; + if (scb->snd_stop) { + *errp = EAGAIN; + return -1; + } + + /* + * copy in the control and data info + */ + if ((err = copyin((caddr_t)ctlptr, + (caddr_t)&ctlbuf, sizeof(ctlbuf))) != 0) + goto l_err; + if ((err = copyin((caddr_t)datptr, + (caddr_t)&datbuf, sizeof(datbuf))) != 0) + goto l_err; + + /* + * allocate buffer and copy in the control content + */ + if (!(mioc = gbuf_alloc_wait(ctlbuf.len, TRUE))) { + /* error return should not be possible */ + err = ENOBUFS; + goto l_err; + } + gbuf_set_type(mioc, MSG_IOCTL); /* for later, in ATP */ + gbuf_wset(mioc, ctlbuf.len); + if ((err = copyin((caddr_t)ctlbuf.buf, + (caddr_t)gbuf_rptr(mioc), ctlbuf.len)) != 0) { + gbuf_freem(mioc); + goto l_err; + } + + iocbp = (ioc_t *)gbuf_rptr(mioc); + primitives = (union asp_primitives *)gbuf_rptr(mioc); + Primitive = primitives->Primitive; + dPrintf(D_M_ASP, D_L_INFO, + ("ASPputmsg: %s\n", aspCmdStr(Primitive))); + + /* + * allocate buffer and copy in the data content + */ + len = (Primitive == ASPFUNC_CmdReply) ? 0 : aspCMDsize; + if (!(mdata = gbuf_alloc_wait(datbuf.len+len, TRUE))) { + /* error return should not be possible */ + err = ENOBUFS; + gbuf_freem(mioc); + goto l_err; + } + gbuf_wset(mdata,(datbuf.len+len)); + gbuf_cont(mioc) = mdata; + if ((err = copyin((caddr_t)datbuf.buf, + (caddr_t)(gbuf_rptr(mdata)+len), datbuf.len)) != 0) { + gbuf_freem(mioc); + goto l_err; + } + + switch (Primitive) { + + case ASPFUNC_Command: + case ASPFUNC_Write: + case ASPFUNC_WriteContinue: + case ASPFUNC_Attention: + /* + * build the command/write/write_continue request + */ + wptr = gbuf_rptr(mdata); + atpBDS = (struct atpBDS *)wptr; + wptr += atpBDSsize; + for (i=0; i < ATP_TRESP_MAX; i++) { + *(unsigned long *)atpBDS[i].bdsBuffAddr = 1; + *(unsigned short *)atpBDS[i].bdsBuffSz = ATP_DATA_SIZE; + } + sd = (struct atp_set_default *)wptr; + wptr += sizeof(struct atp_set_default); + sd->def_retries = (scb->cmd_retry.retries == -1) ? + ATP_INFINITE_RETRIES : scb->cmd_retry.retries; + sd->def_rate = scb->cmd_retry.interval*TICKS_PER_SEC; + sd->def_BDSlen = atpBDSsize; + ddp = (at_ddp_t *)wptr; + NET_ASSIGN(ddp->src_net, scb->loc_addr.net); + ddp->src_node = scb->loc_addr.node; + NET_ASSIGN(ddp->dst_net, scb->rem_addr.net); + ddp->dst_node = scb->rem_addr.node; + ddp->dst_socket = scb->rem_addr.socket; + UAS_ASSIGN(ddp->checksum, 0); + atp = ATP_ATP_HDR(wptr); + wptr += TOTAL_ATP_HDR_SIZE; + atp->xo = 1; + atp->xo_relt = 1; + atp->bitmap = 0xff; + awp = (asp_word_t *)atp->user_bytes; + awp->func = (unsigned char)Primitive; + awp->param1 = scb->sess_id; + awp->param2 = scb->snd_seq_num; + iocbp->ioc_private = (void *)scb; + iocbp->ioc_count = gbuf_len(mdata); + iocbp->ioc_rval = 0; + iocbp->ioc_cmd = AT_ATP_ISSUE_REQUEST_DEF; + + /* + * send the command/write/write_continue/attention request + */ + ATDISABLE(s, scb->lock); + switch (awp->func) { + case ASPFUNC_Command: + scb->state = ASPSTATE_WaitingForCommandRsp; + break; + case ASPFUNC_Write: + scb->state = ASPSTATE_WaitingForWriteRsp; + break; + case ASPFUNC_WriteContinue: + scb->state = ASPSTATE_WaitingForWriteContinueRsp; + awp->param2 = scb->wrt_seq_num; + break; + case ASPFUNC_Attention: + scb->state = ASPSTATE_WaitingForCommandRsp; + atp->xo = 0; + atp->xo_relt = 0; + atp->bitmap = 0x01; + gbuf_wdec(mdata,2); + awp->param2 = *(unsigned short *)gbuf_wptr(mdata); + break; + } + ATENABLE(s, scb->lock); + dPrintf(D_M_ASP,D_L_INFO, + ("ASPputmsg: %s, loc=%d, rem=%x.%x.%d\n", + (awp->func == ASPFUNC_Command ? "CommandReq" : + awp->func == ASPFUNC_Write ? "WriteReq" : + awp->func == ASPFUNC_WriteContinue ? "WriteContinue" : + "AttentionReq"),scb->loc_addr.socket, + NET_VALUE(ddp->dst_net),ddp->dst_node,ddp->dst_socket)); + atp_send_req(gref, mioc); + return 0; + + case ASPFUNC_CmdReply: + + ATDISABLE(s, scb->lock); + if (scb->req_msgq) { + mx = scb->req_msgq; + scb->req_msgq = gbuf_next(mx); + gbuf_next(mx) = 0; + ATENABLE(s, scb->lock); + asp_putnext(scb->gref, mx); + } else { + scb->req_flag = 0; + ATENABLE(s, scb->lock); + } + result = primitives->CmdReplyReq.CmdResult; + tid = primitives->CmdReplyReq.ReqRefNum; + + /* Re-use the original mioc mbuf to send the response. */ + gbuf_rinc(mioc,sizeof(void *)); + gbuf_wset(mioc,0); + ddp = (at_ddp_t *)gbuf_wptr(mioc); + gbuf_winc(mioc,DDP_X_HDR_SIZE); + atp = (at_atp_t *)gbuf_wptr(mioc); + gbuf_winc(mioc,ATP_HDR_SIZE); + NET_ASSIGN(ddp->src_net, scb->loc_addr.net); + ddp->src_node = scb->loc_addr.node; + NET_ASSIGN(ddp->dst_net, scb->rem_addr.net); + ddp->dst_node = scb->rem_addr.node; + ddp->dst_socket = scb->reply_socket; + ddp->type = DDP_ATP; + UAS_ASSIGN(ddp->checksum, 0); + UAS_ASSIGN(atp->tid, tid); + if (scb->attn_flag && (tid == scb->attn_tid)) { + scb->attn_flag = 0; + atp->xo = 0; + atp->xo_relt = 0; + } else { + atp->xo = 1; + atp->xo_relt = 1; + } + atpBDS = (struct atpBDS *)gbuf_wptr(mioc); + msize = mdata ? gbuf_msgsize(mdata) : 0; + for (nbds=0; (nbds < ATP_TRESP_MAX) && (msize > 0); nbds++) { + len = msize < ATP_DATA_SIZE ? msize : ATP_DATA_SIZE; + msize -= ATP_DATA_SIZE; + *(long *)atpBDS[nbds].bdsUserData = 0; + UAL_ASSIGN(atpBDS[nbds].bdsBuffAddr, 1); + UAS_ASSIGN(atpBDS[nbds].bdsBuffSz, len); + } + UAS_ASSIGN(atpBDS[0].bdsDataSz, nbds); + *(long *)atpBDS[0].bdsUserData = (long)result; + *(long *)atp->user_bytes = (long)result; + gbuf_winc(mioc,atpBDSsize); + dPrintf(D_M_ASP, D_L_INFO, + ("ASPputmsg: ATP CmdReplyReq, loc=%d, state=%s, msgsize = %d, result = %d, tid = %d\n", + scb->loc_addr.socket, aspStateStr(scb->state), + (mdata ? gbuf_msgsize(mdata) : 0), result, tid)); + atp_send_rsp(gref, mioc, TRUE); + return 0; + } + + /* Not an expected ASPFUNC */ + gbuf_freem(mioc); + err = EOPNOTSUPP; + +l_err: + *errp = err; + return -1; +} /* ASPputmsg */ + +int +ASPgetmsg(gref, ctlptr, datptr, flags, errp) + gref_t *gref; + strbuf_t *ctlptr; + strbuf_t *datptr; + int *flags; + int *errp; +{ + int err, s, len, sum, rval; + gbuf_t *mproto, *mdata; + strbuf_t ctlbuf; + strbuf_t datbuf; + asp_scb_t *scb; + unsigned char get_wait; + + if ((scb = (asp_scb_t *)gref->info) == 0) { + dPrintf(D_M_ASP, D_L_ERROR, + ("ASPgetmsg: stale handle=0x%x, pid=%d\n", + (u_int) gref, gref->pid)); + + *errp = EINVAL; + return -1; + } + + ATDISABLE(s, scb->lock); + if (scb->state == ASPSTATE_Close) { + ATENABLE(s, scb->lock); + return 0; + } + + /* + * get receive data + */ + while ((mproto = scb->sess_ioc) == 0) { + scb->get_wait = 1; + err = tsleep(&scb->event, PSOCK | PCATCH, "aspgetmsg", 0); + if (err != 0) { + scb->get_wait = 0; + ATENABLE(s, scb->lock); + *errp = err; + return -1; + } + if (scb->state == ASPSTATE_Close) { + scb->get_wait = 0; + ATENABLE(s, scb->lock); + return 0; + } + } + get_wait = scb->get_wait; + scb->get_wait = 0; + if ((ctlptr == 0) && (datptr == 0)) { + ATENABLE(s, scb->lock); + return 0; + } + scb->sess_ioc = gbuf_next(mproto); + mdata = gbuf_cont(mproto); + ATENABLE(s, scb->lock); + + /* last remaining use of MSG_ERROR */ + if (gbuf_type(mproto) == MSG_ERROR) { + err = (int)gbuf_rptr(mproto)[0]; + goto l_err; + } + + /* + * copy in the control and data info + */ + if ((err = copyin((caddr_t)ctlptr, + (caddr_t)&ctlbuf, sizeof(ctlbuf))) != 0) + goto l_err; + if ((err = copyin((caddr_t)datptr, + (caddr_t)&datbuf, sizeof(datbuf))) != 0) + goto l_err; + if ((datbuf.maxlen < 0) || (datbuf.maxlen < gbuf_msgsize(mdata))) { + ATDISABLE(s, scb->lock); + gbuf_next(mproto) = scb->sess_ioc; + scb->sess_ioc = mproto; + ATENABLE(s, scb->lock); + return MOREDATA; + } + + if (get_wait == 0) { + /* + * this is a hack to support the select() call. + * we're not supposed to dequeue messages in the Streams + * head's read queue this way; but there is no better way. + */ + ATDISABLE(s, scb->lock); + if (scb->sess_ioc == 0) { + ATENABLE(s, scb->lock); + } else { + ATENABLE(s, scb->lock); + atalk_notify_sel(gref); + } + } + + /* + * copy out the control content and info + */ + ctlbuf.len = gbuf_len(mproto); + if ((err = copyout((caddr_t)gbuf_rptr(mproto), + (caddr_t)ctlbuf.buf, ctlbuf.len)) != 0) + goto l_err; + if ((err = copyout((caddr_t)&ctlbuf, + (caddr_t)ctlptr, sizeof(ctlbuf))) != 0) + goto l_err; + + /* + * copy out the data content and info + */ + for (rval=0, sum=0; mdata && (rval == 0); mdata = gbuf_cont(mdata)) { + len = gbuf_len(mdata); + if (len) { + if ((len + sum) > datbuf.maxlen) { + len = datbuf.maxlen - sum; + rval = MOREDATA; + } + if ((err = copyout((caddr_t)gbuf_rptr(mdata), + (caddr_t)&datbuf.buf[sum], len)) != 0) + goto l_err; + sum += len; + } + } + datbuf.len = sum; + if ((err = copyout((caddr_t)&datbuf, + (caddr_t)datptr, sizeof(datbuf))) != 0) + goto l_err; + +#ifdef APPLETALK_DEBUG + if (mproto == 0) + kprintf("ASPgetmsg: null mproto!!!\n"); +#endif + + gbuf_freem(mproto); + + ATDISABLE(s, scb->lock); + if (scb->sess_ioc) + scb->rcv_cnt--; + else { + scb->rcv_cnt = 0; + scb->snd_stop = 0; + } + ATENABLE(s, scb->lock); + return rval; + +l_err: + dPrintf(D_M_ASP, D_L_ERROR, + ("ASPgetmsg: err=%d, loc=%d, rem=%x.%x.%d, state=%s\n", + err, scb->loc_addr.socket, + scb->rem_addr.net, + scb->rem_addr.node, scb->rem_addr.socket, + aspStateStr(scb->state))); + ATDISABLE(s, scb->lock); + gbuf_next(mproto) = scb->sess_ioc; + scb->sess_ioc = mproto; + ATENABLE(s, scb->lock); + *errp = err; + return -1; +} diff --git a/bsd/netat/at.c b/bsd/netat/at.c new file mode 100644 index 000000000..f1a750bbf --- /dev/null +++ b/bsd/netat/at.c @@ -0,0 +1,681 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. + */ + +/* at.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +extern int at_ioctl(struct atpcb *, u_long, caddr_t); +extern int routerStart(at_kern_err_t *); +extern void elap_offline(at_ifaddr_t *); +extern at_ifaddr_t *find_ifID(char *); +extern at_nvestr_t *getRTRLocalZone(zone_usage_t *); +extern int setLocalZones(at_nvestr_t *, int); + +extern int xpatcnt; +extern at_ifaddr_t at_interfaces[]; +extern at_ifaddr_t *ifID_home; +extern TAILQ_HEAD(name_registry, _nve_) name_registry; +extern int nve_lock; + +struct etalk_addr etalk_multicast_addr = { + {0x09, 0x00, 0x07, 0xff, 0xff, 0xff}}; +struct etalk_addr ttalk_multicast_addr = { + {0xC0, 0x00, 0x40, 0x00, 0x00, 0x00}}; + +/* called only in router mode */ +static int set_zones(ifz) + zone_usage_t *ifz; + +/* 1. adds zone to table + 2. looks up each route entry from zone list + 3. sets zone bit in each route entry + + returns 0 if successful + errno if error occurred +*/ +{ + int i; + at_ifaddr_t *ifID; + short zno; + RT_entry *rte; + + zno = zt_add_zone(ifz->zone_name.str, ifz->zone_name.len); + + if (zno == ZT_MAXEDOUT) { + dPrintf(D_M_ELAP, D_L_ERROR, ("set_zones: error: table full\n")); + return(ENOSPC); + } + if (ifz->zone_home) { + ifID_home->ifZoneName = ifz->zone_name; + ifID_home->ifDefZone = zno; + } + + for (i=0; izone_iflist.at_if[i][0]) { + if ((ifID = find_ifID(ifz->zone_iflist.at_if[i]))) { + rte = rt_blookup(ifID->ifThisCableEnd); + if (!rte) { + dPrintf(D_M_ELAP, D_L_ERROR, + ("set_zones: error: can't find route\n")); + } else { + zt_set_zmap(zno, rte->ZoneBitMap); + + /* if first zone for this I/F, + make default */ + if (!ifID->ifDefZone) + ifID->ifDefZone = zno; + } + } + } + } + + return(0); +} /* set_zones */ + +/* + * Generic internet control operations (ioctl's). + * ifp is 0 if not an interface-specific ioctl. + */ + +int at_control(so, cmd, data, ifp) + struct socket *so; + u_long cmd; + caddr_t data; + struct ifnet *ifp; +{ + struct ifreq *ifr = (struct ifreq *)data; + int pat_id = 0, error = 0; + struct proc *p = current_proc(); + at_ifaddr_t *ifID = 0; + struct ifaddr *ifa; + struct sockaddr_dl *sdl; + + if (cmd == 0x2000ff99) { + /* *** this is a temporary hack to get at_send_to_dev() to + work with BSD-style sockets instead of the special purpose + system calls, ATsocket() and ATioctl(). + *** */ + if ((error = at_ioctl((struct atpcb *)so->so_pcb, cmd, data))) { + if (((struct atpcb *)so->so_pcb)->proto != ATPROTO_LAP) { + ((struct atpcb *)so->so_pcb)->proto = ATPROTO_LAP; + error = at_ioctl((struct atpcb *)so->so_pcb, cmd, data); + } + } + return(error); + + /* *** processing should be + return(EINVAL); + *** */ + } + /* + * Find address for this interface, if it exists. + */ + if (ifp) + for (pat_id = 0; pat_id < xpatcnt; pat_id++) + if (at_interfaces[pat_id].aa_ifp == ifp) { + ifID = &at_interfaces[pat_id]; + break; + } + + switch (cmd) { + + case AIOCGETSTATE: + { + at_state_t *global_state = (at_state_t *)data; + + *global_state = at_state; + return(0); + break; + } + + case AIOCGETIFCFG: + { + at_if_cfg_t *cfgp = (at_if_cfg_t *)data; + + ifID = 0; + if ((at_state.flags & AT_ST_STARTED) && + ifID_home) { + if (strlen(cfgp->ifr_name)) { + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (!strncmp(ifID->ifName, cfgp->ifr_name, + strlen(ifID->ifName))) + break; + } + } else { + ifID = ifID_home; + strncpy(cfgp->ifr_name, ifID->ifName, + sizeof(ifID->ifName)); + } + if (ifID && ifID->ifState != LAP_OFFLINE) { + cfgp->flags = ifID->ifFlags; + /* put the IF state into the low order + bits of flags */ + cfgp->flags |= (ifID->ifState & LAP_STATE_MASK); + cfgp->node = ifID->ifThisNode; + cfgp->router = ifID->ifARouter; + cfgp->netStart = ifID->ifThisCableStart; + cfgp->netEnd = ifID->ifThisCableEnd; + cfgp->zonename = ifID->ifZoneName; + return(0); + } else + return(EINVAL); + } else + return(ENOTREADY); + break; + } + + case AIOCSETDEFZONE: + { + at_def_zone_t *defzonep = (at_def_zone_t *)data; + + /* check for root access */ + if (error = suser(p->p_ucred, &p->p_acflag)) + return(EACCES); + + ifID = 0; + if ((at_state.flags & AT_ST_STARTED) && ifID_home) { + if (strlen(defzonep->ifr_name)) { + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (!strncmp(ifID->ifName, defzonep->ifr_name, + strlen(ifID->ifName))) + break; + } + } else { + ifID = ifID_home; + strncpy(defzonep->ifr_name, ifID->ifName, + sizeof(ifID->ifName)); + } + + /* In routing mode the default zone is only set for the + default interface. */ + if (ROUTING_MODE && (ifID != ifID_home)) + return(EINVAL); + + if (ifID && ifID->ifState != LAP_OFFLINE) { + if (zonename_equal(&ifID->ifZoneName, + &defzonep->zonename)) + return(0); + else { + /* check the zone name */ + if (MULTIPORT_MODE) { + short zno; + char ifs_in_zone[IF_TOTAL_MAX]; + + if (!(zno = zt_find_zname(&defzonep->zonename))) + return(EINVAL); + + getIfUsage(zno-1, ifs_in_zone); + if (!ifs_in_zone[ifID->ifPort]) + return(EINVAL); + ifID->ifDefZone = zno+1; + } else { + int i; + at_nvestr_t *zone; + + for (i = 0, zone = getSPLocalZone(i); + zone; + i++, zone = getSPLocalZone(i)) { + if (zonename_equal(zone, + &defzonep->zonename)) + break; + } + if (!zone) + return(EINVAL); + } + ifID->ifZoneName = defzonep->zonename; + (void)regDefaultZone(ifID); + return(0); + } + } else + return(EINVAL); + } else + return(ENOTREADY); + break; + } + + case AIOCREGLOCALZN: + { + at_nvestr_t *zone = (at_nvestr_t *)data; + + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) + return(ENOTREADY); + + if (MULTIPORT_MODE) + return(EINVAL); + + return(setLocalZones(zone, zone->len)); + + break; + } + case AIOCSETZNUSAGE: + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) + return(ENOTREADY); + + if (!ROUTING_MODE) + return(EINVAL); + + return(set_zones((zone_usage_t *)data)); + + break; + + case AIOCGETZNUSAGE: + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) + return(ENOTREADY); + + if (!MULTIPORT_MODE) + return(EINVAL); + + if (getRTRLocalZone((zone_usage_t *)data)) + return(0); + else + return(ENOENT); + break; + + case AIOCNBPREG: + { + at_nbp_reg_t *nbpP = (at_nbp_reg_t *)data; + nve_entry_t nve; + int error; + + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) + return(ENOTREADY); + + /* multihoming mode */ + if (MULTIHOME_MODE) { + return(nbp_mh_reg(nbpP)); + } + + /* single port mode or router mode */ + if (nbp_fillin_nve(&nbpP->name, &nve) != 0) { + /* bad tuple... */ + return(EINVAL); + } + + /* In routing mode when the zone is specified, we need to + find an interface on which the specified zone is seeded, so + that the zone multicast will be plausible. */ + if (ROUTING_MODE && !(DEFAULT_ZONE(&nve.zone))) { + /* find first segment (interface) which is seeded for + this zone */ + int finished = FALSE; + int zno; + char ifs_in_zone[IF_TOTAL_MAX]; + if (!(zno = zt_find_zname(&nve.zone))) { + return(EINVAL); + } + getIfUsage(zno-1, ifs_in_zone); + + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (!ifs_in_zone[ifID->ifPort]) + /* zone doesn't match */ + continue; + else + finished = TRUE; + } + if (!finished) + return(EINVAL); + } else + ifID = ifID_home; + + nve.address.net = ifID->ifThisNode.s_net; + nve.address.node = ifID->ifThisNode.s_node; + nve.address.socket = nbpP->addr.socket; + nve.ddptype = nbpP->ddptype; + + if (nbp_find_nve(&nve)) + return(EADDRNOTAVAIL); + + /* Normal case; no tuple found for this name, so insert + * this tuple in the registry and return ok response. + */ + ATDISABLE(nve_lock, NVE_LOCK); + if ((error = nbp_new_nve_entry(&nve, ifID)) == 0) { + nbpP->addr.net = ifID->ifThisNode.s_net; + nbpP->addr.node = ifID->ifThisNode.s_node; + nbpP->unique_nbp_id = nve.unique_nbp_id; + } + ATENABLE(nve_lock, NVE_LOCK); + + return(error); + break; + } + + case AIOCNBPREMOVE: + { + at_nbp_reg_t *nbpP = (at_nbp_reg_t *)data; + nve_entry_t *nve_entry, nve; + + if (!(at_state.flags & AT_ST_STARTED)) + return(ENOTREADY); + + /* delete by id */ + if (nbpP->unique_nbp_id) { + ATDISABLE(nve_lock, NVE_LOCK); + TAILQ_FOREACH(nve_entry, &name_registry, nve_link) { + if (nve_entry->unique_nbp_id == nbpP->unique_nbp_id) { + /* Found a match! */ + nbp_delete_entry(nve_entry); + ATENABLE(nve_lock, NVE_LOCK); + return(0); + } + } + ATENABLE(nve_lock, NVE_LOCK); + return(EADDRNOTAVAIL); + } + + /* delete by entity */ + if (nbp_fillin_nve(&nbpP->name, &nve) != 0) { + /* bad tuple... */ + return(EINVAL); + } + + if (MULTIHOME_MODE && DEFAULT_ZONE(&nbpP->name.zone)) { + /* if mhome & *, remove nve from all default zones */ + int found = FALSE; /* if any found & deleted */ + + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + nve.zone = ifID->ifZoneName; + nve.zone_hash = nbp_strhash(&nve.zone); + if ((nve_entry = nbp_find_nve(&nve)) == NULL) + continue; + + ATDISABLE(nve_lock, NVE_LOCK); + nbp_delete_entry(nve_entry); + ATENABLE(nve_lock, NVE_LOCK); + found = TRUE; + } + if (found) + return(0); + else + return(EADDRNOTAVAIL); + } + + if ((nve_entry = nbp_find_nve(&nve)) == NULL) + /* Can't find the tuple we're looking for, send error*/ + return(EADDRNOTAVAIL); + + /* Normal case; tuple found for this name, so delete + * the entry from the registry and return ok response. + */ + ATDISABLE(nve_lock, NVE_LOCK); + nbp_delete_entry(nve_entry); + ATENABLE(nve_lock, NVE_LOCK); + return(0); + + break; + } + + case AIOCSETROUTER: + { + at_router_params_t *rt = (at_router_params_t *)data; + + /* check for root access */ + if (error = suser(p->p_ucred, &p->p_acflag)) + return(EACCES); + + /* when in routing/multihome mode the AIOCSETROUTER IOCTL + is done first */ + if (at_state.flags & AT_ST_STARTED) + return(EALREADY); + + /* Setup the routing & zip table size for the router */ + if (rt->rtmp_table_sz >= RT_MIN && rt->rtmp_table_sz <= RT_MAX) + RT_maxentry = rt->rtmp_table_sz; + else + RT_maxentry = RT_DEFAULT; + + if (rt->zone_table_sz >= ZT_MIN && rt->zone_table_sz <= ZT_MAX) + ZT_maxentry = rt->zone_table_sz; + else + ZT_maxentry = ZT_DEFAULT; + + if (rt_table_init() == ENOBUFS) + return(ENOBUFS); + + if (rt->router_mix) + RouterMix = (int)rt->router_mix; + else + RouterMix = RT_MIX_DEFAULT; + + add_ddp_handler(RTMP_SOCKET, rtmp_router_input); + + if (rt->multihome) + at_state.flags |= AT_ST_MULTIHOME; + else + at_state.flags |= AT_ST_ROUTER; + break; + } + case AIOCSTARTROUTER: + { + at_kern_err_t *keP = (at_kern_err_t *)data; + + /* check for root access */ + if (suser(p->p_ucred, &p->p_acflag)) + return(EACCES); + + if (!(at_state.flags & AT_ST_STARTED)) + return(ENOTREADY); + + bzero(keP, sizeof(at_kern_err_t)); + error = routerStart(keP); + + break; + } + case AIOCGETROUTER: + { + at_router_params_t *rt = (at_router_params_t *)data; + + if (!(at_state.flags & AT_ST_STARTED)) + return(ENOTREADY); + + rt->multihome = (MULTIHOME_MODE)? 1: 0; + rt->rtmp_table_sz = RT_maxentry; + rt->zone_table_sz = ZT_maxentry; + rt->router_mix = RouterMix; + + break; + } + case AIOCSTOPATALK: + { + int *count_only = (int *)data, + ret; + + /* check for root access */ + if (error = suser(p->p_ucred, &p->p_acflag)) + return(EACCES); + + ret = ddp_shutdown(*count_only); + if (*count_only) { + *count_only = ret; + return(0); + } else + return((ret == 0)? 0 : EBUSY); + break; + } + + case SIOCSIFADDR: + /* check for root access */ + if (error = suser(p->p_ucred, &p->p_acflag)) + error = EACCES; + else if (ifID) + error = EEXIST; + else { + int s; + if (xpatcnt == 0) { + at_state.flags |= AT_ST_STARTED; + ddp_brt_init(); + } + + /* *** find an empty entry *** */ + ifID = &at_interfaces[xpatcnt]; + bzero((caddr_t)ifID, sizeof(at_ifaddr_t)); + strncpy(ifID->ifName, ifr->ifr_name, sizeof(ifID->ifName)); + + ifID->aa_ifp = ifp; + ifa = &ifID->aa_ifa; + TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) + if ((sdl = (struct sockaddr_dl *)ifa->ifa_addr) && + (sdl->sdl_family == AF_LINK)) { + bcopy(LLADDR(sdl), ifID->xaddr, sizeof(ifID->xaddr)); +#ifdef APPLETALK_DEBUG + kprintf("SIOCSIFADDR: local enet address is %x.%x.%x.%x.%x.%x\n", + ifID->xaddr[0], ifID->xaddr[1], + ifID->xaddr[2], ifID->xaddr[3], + ifID->xaddr[4], ifID->xaddr[5]); +#endif + break; + } + + /* attach the AppleTalk address to the ifnet structure */ + ifa = &ifID->aa_ifa; + ifa->ifa_addr = (struct sockaddr *)&ifID->ifNodeAddress; + ifID->ifNodeAddress.sat_len = sizeof(struct sockaddr_at); + ifID->ifNodeAddress.sat_family = AF_APPLETALK; + /* the address itself will be filled in when ifThisNode + is set */ + s = splnet(); + TAILQ_INSERT_TAIL(&ifp->if_addrhead, ifa, ifa_link); + splx(s); + + switch (ifp->if_type) { + case IFT_ETHER: + ether_attach_at(ifp, &ifID->at_dl_tag, + &ifID->aarp_dl_tag); + error = 0; + ifID->cable_multicast_addr = etalk_multicast_addr; + + xpatcnt++; + break; + case IFT_FDDI: + ifID->cable_multicast_addr = etalk_multicast_addr; + ddp_bit_reverse(&ifID->cable_multicast_addr); + xpatcnt++; + break; + case IFT_ISO88025: /* token ring */ + ifID->cable_multicast_addr = ttalk_multicast_addr; + ddp_bit_reverse(&ifID->cable_multicast_addr); + + xpatcnt++; + break; + default: + error = EINVAL; + } + } + break; + + /* complete the initialization started in SIOCSIFADDR */ + case AIOCSIFADDR: + { + at_if_cfg_t *cfgp = (at_if_cfg_t *)data; + + if (!(at_state.flags & AT_ST_STARTED)) + return(ENOTREADY); + + if (!(ifID = find_ifID(cfgp->ifr_name))) + return(EINVAL); + + return(lap_online(ifID, cfgp)); + break; + } + +#ifdef NOT_YET + /* *** this can't be added until AT can handle dynamic addition and + deletion of interfaces *** */ + case SIOCDIFADDR: + /* check for root access */ + if (error = suser(p->p_ucred, &p->p_acflag)) + error = EACCES; + else if (!ifID) + error = EINVAL; + else + elap_offline(ifID); + break; +#endif + + case SIOCSETOT: { + int s; + struct atpcb *at_pcb, *clonedat_pcb; + int cloned_fd = *(int *)data; + + s = splnet(); /* XXX */ + at_pcb = sotoatpcb(so); + + /* let's make sure it's either -1 or a valid file descriptor */ + if (cloned_fd != -1) { + struct socket *cloned_so; + struct file *cloned_fp; + error = getsock(p->p_fd, cloned_fd, &cloned_fp); + if (error){ + splx(s); /* XXX */ + break; + } + cloned_so = (struct socket *)cloned_fp->f_data; + clonedat_pcb = sotoatpcb(cloned_so); + } else { + clonedat_pcb = NULL; + } + + if (clonedat_pcb == NULL) { + at_pcb->ddp_flags |= DDPFLG_STRIPHDR; + } else { + at_pcb->ddp_flags = clonedat_pcb->ddp_flags; + } + splx(s); /* XXX */ + break; + } + + default: + if (ifp == 0 || ifp->if_ioctl == 0) + return (EOPNOTSUPP); + return dlil_ioctl(0, ifp, cmd, (caddr_t) data); + } + + return(error); +} diff --git a/bsd/netat/at_aarp.h b/bsd/netat/at_aarp.h new file mode 100644 index 000000000..4490830d0 --- /dev/null +++ b/bsd/netat/at_aarp.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _NETAT_AT_AARP_H_ +#define _NETAT_AT_AARP_H_ +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + */ + +/* "@(#)at_aarp.h: 2.0, 1.6; 10/4/93; Copyright 1988-89, Apple Computer, Inc." */ + +/* This is a header file for AARP. + * + * Author: R. C. Venkatraman + * Date : 3/2/88 + * + */ + +/* AARP packet */ + +typedef struct { + u_short hardware_type; + u_short stack_type; /* indicates appletalk or xns*/ + u_char hw_addr_len; /* len of hardware addr, e.g + * ethernet addr len, in bytes + */ + u_char stack_addr_len; /* protocol stack addr len, + * e.g., appletalk addr len + * in bytes + */ + u_short aarp_cmd; + struct etalk_addr src_addr; + struct atalk_addr src_at_addr; + struct etalk_addr dest_addr; + struct atalk_addr dest_at_addr; /* desired or dest. at addr */ +} aarp_pkt_t; + + +/* Constants currently defined in AARP */ + +#define AARP_AT_TYPE 0x80F3 /* indicates aarp packet */ +#define AARP_ETHER_HW_TYPE 0x1 +#define AARP_AT_PROTO 0x809B /* indicates stack type */ +#define AARP_ETHER_ADDR_LEN 6 /* in bytes */ +#define AARP_AT_ADDR_LEN 4 /* in bytes */ + +/* AARP cmd definitions */ + +#define AARP_REQ_CMD 0x1 /* address lookup request */ +#define AARP_RESP_CMD 0x2 /* address match response */ +#define AARP_PROBE_CMD 0x3 /* new kid probing... */ + +/* AARP timer and retry counts */ + +#define AARP_MAX_PROBE_RETRIES 20 +#define AARP_PROBE_TIMER_INT HZ/30 /* HZ defines in param.h */ +#define AARP_MAX_REQ_RETRIES 10 +#define AARP_REQ_TIMER_INT HZ/30 +#define AARP_MAX_NODES_TRIED 200 /* max no. of addresses tried */ + /* on the same net before */ + /* giving up on the net# */ +#define AARP_MAX_NETS_TRIED 10 /* max no. of net nos tried */ + /* before giving up on startup*/ + +/* Probe states */ + +#define PROBE_IDLE 0x1 /* There is no node addr */ +#define PROBE_TENTATIVE 0x2 /* probing */ +#define PROBE_DONE 0x3 /* an appletalk addr has been */ + /* assigned for the given node*/ +/* Errors returned by AARP routines */ +#define AARP_ERR_NOT_OURS 1 /* not our appletalk address */ + +/*************************************************/ +/* Declarations for AARP Address Map Table (AMT) */ +/*************************************************/ + +typedef struct { + struct atalk_addr dest_at_addr; + struct etalk_addr dest_addr; + char dummy[2]; /* pad out to struct size of 32 */ + time_t last_time; /* the last time that this addr + * was used. Read in lbolt + * whenever the addr is used. + */ + int no_of_retries; /* number of times we've xmitted */ + gbuf_t *m; /* ptr to msg blk to be sent out */ + at_ifaddr_t *elapp; + int error; + void *tmo; +} aarp_amt_t; + +#define AMT_BSIZ 4 /* bucket size */ +#define AMT_NB 64 /* number of buckets */ +#define AMTSIZE (AMT_BSIZ * AMT_NB) + +typedef struct { + aarp_amt_t et_aarp_amt[AMTSIZE]; +} aarp_amt_array; + +#define AMT_HASH(a) \ + ((NET_VALUE(((struct atalk_addr *)&a)->atalk_net) + ((struct atalk_addr *)&a)->atalk_node) % AMT_NB) + +#define AMT_LOOK(at, at_addr, elapp) { \ + register n; \ + at = &aarp_table[elapp->ifPort]->et_aarp_amt[AMT_HASH(at_addr) * AMT_BSIZ]; \ + for (n = 0 ; ; at++) { \ + if (ATALK_EQUAL(at->dest_at_addr, at_addr)) \ + break; \ + if (++n >= AMT_BSIZ) { \ + at = NULL; \ + break; \ + } \ + } \ + } + +#define NEW_AMT(at, at_addr, elapp) { \ + register n; \ + register aarp_amt_t *myat; \ + myat = at = &aarp_table[elapp->ifPort]->et_aarp_amt[AMT_HASH(at_addr) * AMT_BSIZ]; \ + for (n = 0 ; ; at++) { \ + if (at->last_time == 0) \ + break; \ + if (++n >= AMT_BSIZ) { \ + at = aarp_lru_entry(myat); \ + break; \ + } \ + } \ + } + +#define AARP_NET_MCAST(p, elapp) \ + (NET_VALUE((p)->dst_net) == elapp->ifThisNode.s_net) \ + ) /* network-wide broadcast */ + +#define AARP_CABLE_MCAST(p) \ + (NET_VALUE((p)->dst_net) == 0x0000 \ + ) + +#define AARP_BROADCAST(p, elapp) \ + (((p)->dst_node == 0xff) && \ + ( \ + (NET_VALUE((p)->dst_net) == 0x0000) || \ + (NET_VALUE((p)->dst_net) == elapp->ifThisNode.s_net)) \ + ) /* is this some kind of a broadcast address (?) */ + + +#define ETHER_ADDR_EQUAL(addr1p, addr2p) \ + (( \ + ((addr1p)->etalk_addr_octet[0]==(addr2p)->etalk_addr_octet[0]) && \ + ((addr1p)->etalk_addr_octet[1]==(addr2p)->etalk_addr_octet[1]) && \ + ((addr1p)->etalk_addr_octet[2]==(addr2p)->etalk_addr_octet[2]) && \ + ((addr1p)->etalk_addr_octet[3]==(addr2p)->etalk_addr_octet[3]) && \ + ((addr1p)->etalk_addr_octet[4]==(addr2p)->etalk_addr_octet[4]) && \ + ((addr1p)->etalk_addr_octet[5]==(addr2p)->etalk_addr_octet[5]) \ + ) ? 1 : 0 \ + ) + +#ifdef KERNEL + +int aarp_chk_addr(at_ddp_t *, at_ifaddr_t *); +int aarp_rcv_pkt(aarp_pkt_t *, at_ifaddr_t *); + +#endif /* KERNEL */ + +#endif /* _NETAT_AT_AARP_H_ */ diff --git a/bsd/netat/at_config.h b/bsd/netat/at_config.h new file mode 100644 index 000000000..9514c64a7 --- /dev/null +++ b/bsd/netat/at_config.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988-1993 Apple Computer, Inc. + */ + +#ifndef _NETAT_AT_CONFIG_H_ +#define _NETAT_AT_CONFIG_H_ + +/* originally from if_cnt.h + * + * defines for if_stat struct. + * note: set IF_TYPE_CNT to number of types supported and make sure + * that defines for those type are LESS than this value + */ + +#define IF_TYPENO_CNT 1 /* number of different types we support */ + /* *** this value was 5, but for now, let's + just start with ethernet *** */ + +/* maximum number of I/F's allowed */ +/* *** "17" corresponds to Shiner *** */ +#define IF_TOTAL_MAX 17 /* max count of any combination of I/F's */ + +typedef struct if_types { + int iftype, + max_interfaces; +} if_types_t; + + /* GET_ZONES defines */ +#define GET_ALL_ZONES 0 +#define GET_LOCAL_ZONES_ONLY 1 + +typedef struct if_zone_info { + at_nvestr_t zone_name; /* the zone name & len */ + unsigned zone_ifs[IF_TYPENO_CNT]; /* bitmapped I/F usage for zone */ + unsigned zone_home; /* TRUE for home zone */ +} if_zone_info_t; + +typedef union if_zone_nve { + at_nvestr_t ifnve; + int zone; +} if_zone_nve_t; + +/* this struct used to obtain local zones for specific + ifID's from the kernel and to set default zones for + specific ifID numbers */ +typedef struct if_zone { + if_zone_nve_t ifzn; + char usage[IF_TOTAL_MAX]; /* I/F usage (1 set if + I/F in this zone */ + int index; /* zone index in ZT_table */ +} if_zone_t; + + +#endif /* _NETAT_AT_CONFIG_H_ */ diff --git a/bsd/netat/at_ddp_brt.h b/bsd/netat/at_ddp_brt.h new file mode 100644 index 000000000..7cc33f78f --- /dev/null +++ b/bsd/netat/at_ddp_brt.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + */ + +#ifndef _NETAT_AT_DDP_BRT_H_ +#define _NETAT_AT_DDP_BRT_H_ + +typedef struct { + int age_flag; + at_ifaddr_t *ifID; + struct etalk_addr et_addr; + at_net_al net; +} ddp_brt_t; + +#define BRT_SWEEP_INT (10 * PR_SLOWHZ) +#define BRT_BSIZ 4 /* bucket size */ +#define BRT_NB 16 /* number of buckets */ +#define BRTSIZE (BRT_BSIZ * BRT_NB) + +/* age_flag values */ +#define BRT_EMPTY 0 /* the BRT entry is empty */ + /* (or aged out). */ +#define BRT_VALID 1 /* BRT entry contains valid */ + /* tuple */ +#define BRT_GETTING_OLD 2 /* BRT entry is a candidate */ + /* for aging */ + +#define BRT_HASH(a) ((a) % BRT_NB) + +#define BRT_LOOK(brt, dst_net) { \ + register n; \ + brt = &at_ddp_brt[BRT_HASH(dst_net) * BRT_BSIZ]; \ + for (n = 0 ; ; brt++) { \ + if (brt->net == dst_net) \ + break; \ + if (++n >= BRT_BSIZ) { \ + brt = NULL; \ + break; \ + } \ + } \ + } + +#define NEW_BRT(brt, net) { \ + register n; \ + brt = &at_ddp_brt[BRT_HASH(net) * BRT_BSIZ]; \ + for (n = 0 ; ; brt++) { \ + if (brt->age_flag == BRT_EMPTY) \ + break; \ + if (++n >= BRT_BSIZ) { \ + brt = NULL; \ + break; \ + } \ + } \ + } + +/* Best Router Cache */ +extern ddp_brt_t at_ddp_brt[BRTSIZE]; + +#endif /* _NETAT_AT_DDP_BRT_H_ */ + diff --git a/bsd/netat/at_pat.h b/bsd/netat/at_pat.h new file mode 100644 index 000000000..97a535b2e --- /dev/null +++ b/bsd/netat/at_pat.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + */ + +#ifndef _NETAT_AT_PAT_H_ +#define _NETAT_AT_PAT_H_ + +/* This is header for the PAT module. This contains a table of pointers that + * should get initialized with the BNET stuff and the ethernet driver. The + * number of interfaces supported should be communicated. Should include + * mbuf.h, if.h, socket.h + * + * Author: R. C. Venkatraman + * Date : 2/29/88 + */ + +typedef struct { + unsigned char dst[6]; + unsigned char src[6]; + unsigned short len; +} enet_header_t; + +typedef struct { + unsigned char dst_sap; + unsigned char src_sap; + unsigned char control; + unsigned char protocol[5]; +} llc_header_t; + +#define ENET_LLC_SIZE (sizeof(enet_header_t)+sizeof(llc_header_t)) +#define SNAP_UI 0x03 /* bits 11000000 reversed!! */ +#define SNAP_AT_SAP 0xaa +#define SNAP_PROTO_AT {0x08, 0x00, 0x07, 0x80, 0x9B} +#define SNAP_PROTO_AARP {0x00, 0x00, 0x00, 0x80, 0xF3} +#define SNAP_HDR_AT {SNAP_AT_SAP, SNAP_AT_SAP, SNAP_UI, SNAP_PROTO_AT} +#define SNAP_HDR_AARP {SNAP_AT_SAP, SNAP_AT_SAP, SNAP_UI, SNAP_PROTO_AARP} + +#define LLC_PROTO_EQUAL(a1, a2) \ + ((*((unsigned long *)(a1)) == *((unsigned long *)(a2))) && \ + (a1[4] == a2[4]) \ + ) +#endif /* _NETAT_AT_PAT_H_ */ diff --git a/bsd/netat/at_pcb.c b/bsd/netat/at_pcb.c new file mode 100644 index 000000000..7c120650e --- /dev/null +++ b/bsd/netat/at_pcb.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1997-1999 Apple Computer, Inc. + * All Rights Reserved. + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)at_pcb.c 8.2 (Berkeley) 1/4/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct atpcb ddp_head; +extern struct atpcb *atp_inputQ[]; +extern CCB *adsp_inputQ[]; +extern at_ifaddr_t *ifID_home; +extern struct { + void (*func)(); + } ddp_handler[]; + +int DDP_chksum_on = FALSE; +int DDP_slfsnd_on = FALSE; + +zone_t atpcb_zone; + +void at_memzone_init() +{ + vm_size_t str_size; + + str_size = (vm_size_t)sizeof(struct atpcb); + atpcb_zone = (zone_t)zinit(str_size, 1000*str_size, 8192, "atpcb zone"); +} + +int at_pcballoc(so, head) + struct socket *so; + struct atpcb *head; +{ + register struct atpcb *pcb; + + pcb = (struct atpcb *)zalloc(atpcb_zone); + if (pcb == NULL) + return (ENOBUFS); + bzero((caddr_t)pcb, sizeof(*pcb)); + + /* set the flags to the system defaults */ + if (DDP_chksum_on) + pcb->ddp_flags |= DDPFLG_CHKSUM; + else + pcb->ddp_flags &= ~DDPFLG_CHKSUM; + if (DDP_slfsnd_on) + pcb->ddp_flags |= DDPFLG_SLFSND; + else + pcb->ddp_flags &= ~DDPFLG_SLFSND; + + pcb->atpcb_head = head; + pcb->atpcb_socket = so; + if (head) + insque((queue_t)pcb, (queue_t)head); + so->so_pcb = (caddr_t)pcb; + + return (0); +} + +int at_pcbdetach(pcb) + struct atpcb *pcb; +{ + struct socket *so = pcb->atpcb_socket; + + /* Notify NBP that we are closing this DDP socket */ + if (pcb->lport) { + ddp_notify_nbp(pcb->lport, pcb->pid, pcb->ddptype); + pcb->lport = 0; + } + + so->so_pcb = 0; + if ((pcb->atpcb_next) && (pcb->atpcb_prev)) + remque((queue_t)pcb); + zfree(atpcb_zone, (vm_offset_t)pcb); + sofree(so); + return(0); +} + +int ddp_socket_inuse(ddpsock, proto) + u_char ddpsock, proto; +{ + struct atpcb *pcb; + + if ((!proto || (proto == DDP_ATP)) && atp_inputQ[ddpsock]) + return TRUE; + if ((!proto || (proto == DDP_ADSP)) && adsp_inputQ[ddpsock]) + return TRUE; + if (ddp_handler[ddpsock].func) + return TRUE; + for (pcb = ddp_head.atpcb_next; pcb != &ddp_head; + pcb = pcb->atpcb_next) { + if (pcb->lport == ddpsock && + (!pcb->ddptype || !proto || (pcb->ddptype == proto))) + return TRUE; + } + return FALSE; +} + +int at_pcbbind(pcb, nam) + register struct atpcb *pcb; + struct sockaddr *nam; +{ + register struct socket *so = pcb->atpcb_socket; + register struct sockaddr_at *local = (struct sockaddr_at *) nam; + u_char ddpsock = local->sat_port; + + if ((!ifID_home) || (local->sat_family != AF_APPLETALK)) + return(EADDRNOTAVAIL); + + if (pcb->lport != ATADDR_ANYPORT || + pcb->laddr.s_node != ATADDR_ANYNODE || + pcb->laddr.s_net != ATADDR_ANYNET) + return(EINVAL); + + /* Request for dynamic socket? */ + if (ddpsock == 0) { + /* Search table for free one */ + /* *** borrow IP algorithm, instead? *** */ + for (ddpsock = DDP_SOCKET_LAST; + ddpsock >= (DDP_SOCKET_1st_DYNAMIC + 1); + /* sip has 1st */ + ddpsock--) { + if (! ddp_socket_inuse(ddpsock, pcb->ddptype)) + break; + } + if (ddpsock < (DDP_SOCKET_1st_DYNAMIC + 1)) + return(EADDRNOTAVAIL); /* Error if no free sockets */ + } else { + /* Asking to open a socket by its number. + Check if its legal & free. */ + if (ddpsock > DDP_SOCKET_LAST) + return(EINVAL); + if (ddp_socket_inuse(ddpsock, pcb->ddptype)) + return(EADDRNOTAVAIL); + } + + pcb->lport = ddpsock; + /* if address is specified, make sure address matches one of the + interfaces configured for AppleTalk */ + if (local->sat_addr.s_net || local->sat_addr.s_node) { + if (MULTIHOME_MODE) { + at_ifaddr_t *ifID; + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (ifID->ifThisNode.s_net == local->sat_addr.s_net && + ifID->ifThisNode.s_node == local->sat_addr.s_node) { + pcb->laddr = local->sat_addr; + return(0); + } + } + return(EINVAL); + } else { + /* for single-port and router modes if the local address is + specified, it must match the default interface, which is + what will be put into packets' source address anyway */ + if (ifID_home->ifThisNode.s_net == local->sat_addr.s_net && + ifID_home->ifThisNode.s_node == local->sat_addr.s_node) { + pcb->laddr = local->sat_addr; + return(0); + } + return(EINVAL); + + } + } + return(0); +} diff --git a/bsd/netat/at_pcb.h b/bsd/netat/at_pcb.h new file mode 100644 index 000000000..489d35961 --- /dev/null +++ b/bsd/netat/at_pcb.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1999 Apple Computer, Inc. + * All Rights Reserved. + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* at_pcb.h */ + +/* + * Common structure pcb for internet protocol implementation. + * Here are stored pointers to local and foreign host table + * entries, local and foreign socket numbers, and pointers + * up (to a socket structure) and down (to a protocol-specific) + * control block. + */ +struct atpcb { + struct atpcb *atpcb_next, /* pointers to other pcb's */ + *atpcb_prev, + *atpcb_head; /* pointer back to chain of atpcb's + for this protocol */ + struct socket *atpcb_socket; /* back pointer to socket */ + u_char ddptype, /* DDP type */ + lport, /* local DDP socket */ + rport; /* remote DDP socket */ + struct at_addr laddr, /* local net and node */ + raddr; /* remote net and node */ + int ddp_flags; /* generic IP/datagram flags */ + caddr_t at_ppcb; /* pointer to per-protocol pcb */ + + /* from the gref structure */ + + void *info; + gbuf_t *ichead; + gbuf_t *ictail; + gbuf_t *rdhead; + gbuf_t *rdtail; + unsigned char proto; /* old-style ATPROTO_* */ + unsigned char errno; + unsigned short sevents; + int pid; + atlock_t lock; + atevent_t event; + atevent_t iocevent; + int (*writeable)(); + int (*readable)(); + struct selinfo si; /* BSD 4.4 selinfo structure for + selrecord/selwakeup */ +}; + +#define sotoatpcb(so)((struct atpcb *)(so)->so_pcb) + +/* ddp_flags */ +#define DDPFLG_CHKSUM 0x01 /* DDP checksums to be used on this connection */ +#define DDPFLG_SLFSND 0x02 /* packets sent to the cable-multicast address + on this socket should be looped back */ +#define DDPFLG_HDRINCL 0x08 /* user supplies entire DDP header */ +#define DDPFLG_STRIPHDR 0x200 /* drop DDP header on receive (raw) */ + +#ifdef KERNEL +typedef struct atpcb gref_t; + +int at_pcballoc __P((struct socket *, struct atpcb *)); +int at_pcbdetach __P((struct atpcb *)); +int at_pcbbind __P((struct atpcb *, struct sockaddr *)); +#endif diff --git a/bsd/netat/at_proto.c b/bsd/netat/at_proto.c new file mode 100644 index 000000000..3a8cb6055 --- /dev/null +++ b/bsd/netat/at_proto.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. + */ + +/* at_proto.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +struct domain atalkdomain; + +extern int ddp_pru_abort(struct socket *so); + +extern int ddp_pru_attach(struct socket *so, int proto, + struct proc *p); +extern int ddp_pru_bind(struct socket *so, struct sockaddr *nam, + struct proc *p); +extern int ddp_pru_connect(struct socket *so, struct sockaddr *nam, + struct proc *p); + +extern int ddp_pru_control(struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp, struct proc *p); +extern int ddp_pru_detach(struct socket *so); +extern int ddp_pru_disconnect(struct socket *so); + +extern int ddp_pru_peeraddr(struct socket *so, + struct sockaddr **nam); + +extern int ddp_pru_send(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p); + +extern int ddp_pru_shutdown(struct socket *so); +extern int ddp_pru_sockaddr(struct socket *so, + struct sockaddr **nam); + +/* + * Dummy usrreqs struct created by Ted for FreeBSD 3.x integration. + * Fill in supported functions as appropriate. + */ +struct pr_usrreqs ddp_usrreqs = { + ddp_pru_abort, pru_accept_notsupp, ddp_pru_attach, ddp_pru_bind, + ddp_pru_connect, pru_connect2_notsupp, ddp_pru_control, ddp_pru_detach, + ddp_pru_disconnect, pru_listen_notsupp, ddp_pru_peeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, ddp_pru_send, pru_sense_null, ddp_pru_shutdown, + ddp_pru_sockaddr, sosend, soreceive, sopoll +}; + +struct protosw atalksw[] = { + { SOCK_RAW, &atalkdomain, /*protocol*/ 0, PR_ATOMIC|PR_ADDR, + /*input*/ 0, /*output*/ 0, /*clinput*/ 0, ddp_ctloutput, + /*ousrreq*/ 0, + ddp_init, /*fastto*/ 0, /*slowto*/ 0, /*drain*/ 0, + /*sysctl*/ 0, &ddp_usrreqs + } +}; + +struct domain atalkdomain = +{ AF_APPLETALK, "appletalk", 0, 0, 0, + atalksw, 0, + 0, 0, 0, + DDP_X_HDR_SIZE, 0 +}; + +SYSCTL_NODE(_net, PF_APPLETALK, appletalk, CTLFLAG_RW, 0, "AppleTalk Family"); + diff --git a/bsd/netat/at_snmp.h b/bsd/netat/at_snmp.h new file mode 100644 index 000000000..fbabd69c4 --- /dev/null +++ b/bsd/netat/at_snmp.h @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _NETAT_AT_SNMP_H_ +#define _NETAT_AT_SNMP_H_ + +#define MAX_PHYS_ADDR_SIZE 6 /* maximum physical addr size */ +#define MAX_IFS 25 /* max # interfaces */ +#define IF_NAME_SIZE 6 /* max name length of I/F name */ +#define DDP_ADDR_SIZE 3 +#define ZONE_SIZE NBP_NVE_STR_SIZE +#define OBJECT_SIZE NBP_NVE_STR_SIZE +#define TYPE_SIZE NBP_NVE_STR_SIZE +#define PORT_DESC_SIZE 50 +#define UPDATE_IF_CHANGED 1 /* for ioctls */ +#define UPDATE_UNCONDITIONAL 2 +#define SNMP_NBP_HEADER_SIZE (sizeof(snmpNbpTable_t) - sizeof(snmpNbpEntry_t)) + +typedef struct snmpIfCfg { + u_short ifc_aarpSize; /* AARP table size for this I/F */ + u_short ifc_addrSize; /* Mac address size in bytes */ + u_short ifc_type; /* port type */ + at_net_al ifc_start; /* net range start */ + at_net_al ifc_end; /* net range end */ + struct at_addr ifc_ddpAddr; /* ddp address of port */ + u_short ifc_status; /* port status */ + u_short ifc_netCfg; + u_short ifc_zoneCfg; + at_nvestr_t ifc_zoneName; + u_short ifc_index; + char ifc_name[IF_NAME_SIZE]; /* I/F name (e.g. ent0 */ +} snmpIfCfg_t; + + +typedef struct snmpCfg { + int pad; /* pad for UPDATE flag when ioctl issued */ + u_int cfg_flags; /* at_state flags */ + int cfg_ifCnt; /* # I/F's up */ + snmpIfCfg_t cfg_ifCfg[MAX_IFS]; +} snmpCfg_t; + +typedef struct snmpAarpEnt { + struct at_addr ap_ddpAddr; + u_char ap_physAddr[MAX_PHYS_ADDR_SIZE]; +}snmpAarpEnt_t; + +typedef struct snmpAarp { /* aarp info for 1 I/F */ + time_t at_time; /* the time() we got this table */ + int at_ifno; /* the (SNMP) I/F number of this table */ + int at_maxSize; /* allocated size of as_table in entries */ + int at_usedSize; /* size of at_table used portion */ + snmpAarpEnt_t *at_table; +} snmpAarp_t; + +typedef struct snmpFlags { + int lap_changed; /* set when any I/F state changes */ + int rtmpAdded; /* set when rtmp entry ADDED */ + int zipAdded; /* set when zip entry ADDED */ +} snmpFlags_t; + +typedef struct snmpNbpEntry { + at_nvestr_t nbpe_object; + at_nvestr_t nbpe_type; +}snmpNbpEntry_t; + +typedef struct snmpNbpTable { + int nbpt_entries; + at_nvestr_t nbpt_zone; + snmpNbpEntry_t nbpt_table[1]; +}snmpNbpTable_t; + + + + +typedef struct snmpStats { + + /* ddp group */ + u_int dd_outReq; + u_int dd_outShort; + u_int dd_outLong; + u_int dd_inTotal; + u_int dd_fwdReq; + u_int dd_inLocal; + u_int dd_noHandler; + u_int dd_noRoutes; + u_int dd_tooShort; + u_int dd_tooLong; + u_int dd_inBcastErr; + u_int dd_shortErr; + u_int dd_hopCount; + u_int dd_checkSum; + + /* ATEcho group */ + + u_int ec_echoReq; + u_int ec_echoReply; +} snmpStats_t; + +#define SNMP_TYPE_OTHER 1 +#define SNMP_TYPE_LOCAL 2 +#define SNMP_TYPE_ETHER1 3 +#define SNMP_TYPE_ETHER2 4 +#define SNMP_TYPE_TOKEN 5 +#define SNMP_TYPE_IP 6 +#define SNMP_TYPE_SERIALPPP 7 +#define SNMP_TYPE_SERIALNONSTD 8 +#define SNMP_TYPE_VIRTUAL 9 +#define SNMP_CFG_CONFIGURED 1 +#define SNMP_CFG_GARNERED 2 +#define SNMP_CFG_GUESSED 3 +#define SNMP_CFG_UNCONFIG 4 + +#define SNMP_OBJ_TYPE_AARP 0x0100 +#define SNMP_OBJ_TYPE_ECHO 0x0200 +#define SNMP_OBJ_TYPE_PORT 0x0300 +#define SNMP_OBJ_TYPE_DDP 0x0400 +#define SNMP_OBJ_TYPE_RTMP 0x0500 +#define SNMP_OBJ_TYPE_ZIP 0x0600 +#define SNMP_OBJ_TYPE_NBP 0x0700 +#define SNMP_OBJ_TYPE_MASK 0x0f00 + +#define AARPIFINDEX 2 + SNMP_OBJ_TYPE_AARP +#define AARPNETADDRESS 3 + SNMP_OBJ_TYPE_AARP +#define AARPPHYSADDRESS 4 + SNMP_OBJ_TYPE_AARP +#define ATECHOREPLIES 6 + SNMP_OBJ_TYPE_AARP +#define ATECHOREQUESTS 7 + SNMP_OBJ_TYPE_AARP +#define ATPORTDESCR 8 + SNMP_OBJ_TYPE_PORT +#define ATPORTIFINDEX 10 + SNMP_OBJ_TYPE_PORT +#define ATPORTINDEX 11 + SNMP_OBJ_TYPE_PORT +#define ATPORTNETADDRESS 12 + SNMP_OBJ_TYPE_PORT +#define ATPORTNETCONFIG 13 + SNMP_OBJ_TYPE_PORT +#define ATPORTNETEND 14 + SNMP_OBJ_TYPE_PORT +#define ATPORTNETSTART 15 + SNMP_OBJ_TYPE_PORT +#define ATPORTSTATUS 16 + SNMP_OBJ_TYPE_PORT +#define ATPORTTYPE 18 + SNMP_OBJ_TYPE_PORT +#define ATPORTZONE 19 + SNMP_OBJ_TYPE_PORT +#define ATPORTZONECONFIG 20 + SNMP_OBJ_TYPE_PORT +#define DDPBROADCASTERRORS 21 + SNMP_OBJ_TYPE_DDP +#define DDPCHECKSUMERRORS 22 + SNMP_OBJ_TYPE_DDP +#define DDPFORWREQUESTS 23 + SNMP_OBJ_TYPE_DDP +#define DDPHOPCOUNTERRORS 24 + SNMP_OBJ_TYPE_DDP +#define DDPINLOCALDATAGRAMS 25 + SNMP_OBJ_TYPE_DDP +#define DDPINRECEIVES 26 + SNMP_OBJ_TYPE_DDP +#define DDPNOPROTOCOLHANDLERS 27 + SNMP_OBJ_TYPE_DDP +#define DDPOUTLONGS 28 + SNMP_OBJ_TYPE_DDP +#define DDPOUTNOROUTES 29 + SNMP_OBJ_TYPE_DDP +#define DDPOUTREQUESTS 30 + SNMP_OBJ_TYPE_DDP +#define DDPOUTSHORTS 31 + SNMP_OBJ_TYPE_DDP +#define DDPSHORTDDPERRORS 32 + SNMP_OBJ_TYPE_DDP +#define DDPTOOLONGERRORS 33 + SNMP_OBJ_TYPE_DDP +#define DDPTOOSHORTERRORS 34 + SNMP_OBJ_TYPE_DDP +#define KIPBCASTADDR 35 +#define KIPCORE 36 +#define KIPENTRY 37 +#define KIPHOPCOUNT 38 +#define KIPNETEND 39 +#define KIPNETSTART 40 +#define KIPNEXTHOP 41 +#define KIPSHARE 42 +#define KIPSTATE 43 +#define KIPTABLE 44 +#define KIPTYPE 45 +#define LLAPCOLLISIONS 46 +#define LLAPDEFERS 47 +#define LLAPENTRY 48 +#define LLAPFCSERRORS 49 +#define LLAPIFINDEX 50 +#define LLAPINERRORS 51 +#define LLAPINLENGTHERRORS 52 +#define LLAPINNOHANDLERS 53 +#define LLAPINPKTS 54 +#define LLAPNODATAERRORS 55 +#define LLAPOUTPKTS 56 +#define LLAPRANDOMCTSERRORS 57 +#define NBPINDEX 60 + SNMP_OBJ_TYPE_NBP +#define NBPOBJECT 61 + SNMP_OBJ_TYPE_NBP +#define NBPSTATE 62 + SNMP_OBJ_TYPE_NBP +#define NBPTABLE 63 + SNMP_OBJ_TYPE_NBP +#define NBPTYPE 64 + SNMP_OBJ_TYPE_NBP +#define NBPZONE 65 + SNMP_OBJ_TYPE_NBP +#define RTMPHOPS 67 + SNMP_OBJ_TYPE_RTMP +#define RTMPNEXTHOP 68 + SNMP_OBJ_TYPE_RTMP +#define RTMPPORT 69 + SNMP_OBJ_TYPE_RTMP +#define RTMPRANGEEND 70 + SNMP_OBJ_TYPE_RTMP +#define RTMPRANGESTART 71 + SNMP_OBJ_TYPE_RTMP +#define RTMPSTATE 72 + SNMP_OBJ_TYPE_RTMP +#define RTMPTYPE 74 + SNMP_OBJ_TYPE_RTMP +#define ZIPZONEINDEX 77 + SNMP_OBJ_TYPE_ZIP +#define ZIPZONENAME 78 + SNMP_OBJ_TYPE_ZIP +#define ZIPZONENETEND 79 + SNMP_OBJ_TYPE_ZIP +#define ZIPZONENETSTART 80 + SNMP_OBJ_TYPE_ZIP +#define ZIPZONESTATE 81 + SNMP_OBJ_TYPE_ZIP + +#define SNMP_TYPE(var,type) ((var & SNMP_OBJ_TYPE_MASK) == type) + +#endif _NETAT_AT_SNMP_H_ diff --git a/bsd/netat/at_var.h b/bsd/netat/at_var.h new file mode 100644 index 000000000..072377c0a --- /dev/null +++ b/bsd/netat/at_var.h @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. + */ + +#include + +/* at_var.h */ + +/* at_var.h contains definitions formerly found in: at/at_lap.h & at/elap.h */ + +/* multicast tracking */ +#define MAX_MCASTS 25 /* #multicast addrs tracked per i/f */ +#define MCAST_TRACK_ADD 1 +#define MCAST_TRACK_DELETE 2 +#define MCAST_TRACK_CHECK 3 + +#define ETHERNET_ADDR_LEN 6 +#define IFNAMESIZ 16 + +/* maximum number of I/F's allowed */ +#define IF_TOTAL_MAX 17 /* max count of any combination of I/F's */ + /* 17 == (1+(4*4)); 9 and 13 would also be + reasonable values */ + +#define FDDI_OR_TOKENRING(i) ((i == IFT_FDDI) || (i == IFT_ISO88025)) + +typedef struct etalk_addr { + u_char etalk_addr_octet[ETHERNET_ADDR_LEN]; +} etalk_addr_t; + +typedef char if_name_t[IFNAMESIZ]; +typedef struct at_ifname_list { + if_name_t at_if[IF_TOTAL_MAX]; +} at_ifnames_t; + +typedef struct at_if_statstics { + u_long fwdBytes; /* bytes received & forwarded */ + u_long fwdPkts; /* pkts received & forwarded */ + u_long droppedBytes; /* bytes received & dropped */ + u_long droppedPkts; /* pkts received & dropped */ + u_long outBytes; /* bytes sent */ + u_long outPkts; /* pkts sent */ + u_long routes; /* count of routes in rtmptable */ +} at_if_statistics_t; + +typedef struct { + u_int unknown_mblks; /* number of unknown streams msgs */ + u_int rcv_bytes; /* number of data bytes received */ + u_int rcv_packets; /* number of packets received */ + u_int xmit_bytes; /* number of data bytes xmited */ + u_int xmit_packets; /* number of packets xmited */ +} at_elap_stats_t; + +typedef struct { + char ifr_name[IFNAMESIZ]; + u_int flags; /* misc. port flags, + (ELAP_CFG_xxx on input + ifFlags on output) */ + struct at_addr node; /* Our node number. */ + struct at_addr router; /* Our router. */ + u_short netStart; /* network start range */ + u_short netEnd; /* network ending range */ + at_nvestr_t zonename; +} at_if_cfg_t; + +typedef struct { + at_entity_t name; + at_inet_t addr; /* net and node are ignored, except in + multihoming mode where "addr" is used + to specify the interface. */ + u_char ddptype; + long unique_nbp_id; +} at_nbp_reg_t; + +typedef struct { + char ifr_name[IFNAMESIZ]; + at_nvestr_t zonename; +} at_def_zone_t; + +typedef struct zone_usage { + int zone_index; /* index in local_zones */ + at_nvestr_t zone_name; /* the zone name & len */ + int zone_home; /* used only to set zones in + router mode */ + at_ifnames_t zone_iflist; /* list of interfaces for + this zone. */ + char usage[IF_TOTAL_MAX]; /* I/F usage (set if + I/F in this zone) */ +} zone_usage_t; + +typedef struct { + short multihome; + short rtmp_table_sz; + short zone_table_sz; + short router_mix; +} at_router_params_t; + +typedef struct at_kern_err { + int error; /* kernel error # (KE_xxx) */ + int port1; + int port2; + char name1[IFNAMESIZ]; + char name2[IFNAMESIZ]; + u_short net; + u_char node; + u_short netr1b, netr1e; /* net range 1 begin & end */ + u_short netr2b, netr2e; /* net range 2 begin & end */ + u_char rtmp_id; +} at_kern_err_t; + +#define KE_CONF_RANGE 1 +#define KE_CONF_SEED_RNG 2 +#define KE_CONF_SEED1 3 +#define KE_CONF_SEED_NODE 4 +#define KE_NO_ZONES_FOUND 5 +#define KE_NO_SEED 6 +#define KE_INVAL_RANGE 7 +#define KE_SEED_STARTUP 8 +#define KE_BAD_VER 9 +#define KE_RTMP_OVERFLOW 10 +#define KE_ZIP_OVERFLOW 11 + +/* + * Interface address, AppleTalk version. One of these structures + * is allocated for each AppleTalk address on an interface. + * + * The ifaddr structure contains the protocol-independent part + * of the structure and is assumed to be first, as it is in + * "struct in_ifaddr", defined in bsd/netinet/in_var.h. + */ +typedef struct at_ifaddr { + struct ifaddr aa_ifa; +#define aa_ifp aa_ifa.ifa_ifp +#define aa_flags aa_ifa.ifa_flags + + TAILQ_ENTRY(at_ifaddr) aa_link; /* tailq macro glue */ + + u_long at_dl_tag; /* DLIL tag to be used in packet output */ + u_long aarp_dl_tag; /* DLIL tag for Appletalk ARP */ + + /* from pat_unit_t */ + unsigned char mcast[MAX_MCASTS]; + char xaddr[ETHERNET_ADDR_LEN]; + + /* from elap_specifics_t */ + at_elap_stats_t stats; + + /* The DDP sets these values: */ + u_char ifState; /* State of the interface LAP_* */ + u_short ifThisCableStart; + u_short ifThisCableEnd; + struct at_addr ifARouter; + u_char ifRouterState; + u_int ifFlags; /* Flags, see AT_IFF_* */ + struct sockaddr_at ifNodeAddress; +#define ifThisNode ifNodeAddress.sat_addr + /* AppleTalk node ID is ifNodeAddress.sat_addr*/ + + /* for use by ZIP */ + u_char ifNumRetries; + at_nvestr_t ifZoneName; + + /* Added for routing support */ + int ifPort; /* the unique ddp logical port + number, also index into + at_interfaces[] and ifID_table[] */ + char ifName[IFNAMESIZ]; + /* added to support LAP_IOC_GET_IFID */ + u_short ifDefZone; /* Default Zone index in ZoneTable; used + only in routing/multihome modes to be + able to answer a ZIP GetNetInfo request */ + char ifZipNeedQueries; + /* ZIP/RTMP Query flag */ + char ifRoutingState; /* Port (as a router) state */ + at_if_statistics_t + ifStatistics; /* statistics */ + /* end of elap_if structure */ + + u_short flags; /* port specific flags */ + struct etalk_addr ZoneMcastAddr; + /* zone multicast addr */ + struct etalk_addr cable_multicast_addr; + /* AppleTalk broadcast addr */ + + struct at_addr initial_addr; /* temporary value used during startup */ + at_nvestr_t startup_zone; + int startup_error, /* to get error code back from + ZIPwakeup() / AARPwakeup() */ + startup_inprogress; /* to decide whether it's the + middle of an elap_online operation */ + +} at_ifaddr_t; + +#define LAP_OFFLINE 0 /* LAP_OFFLINE MUST be 0 */ +#define LAP_ONLINE 1 +#define LAP_ONLINE_FOR_ZIP 2 +#define LAP_ONLINE_ZONELESS 3 /* for non-home router ports */ + +#define NO_ROUTER 1 /* there's no router around */ +#define ROUTER_WARNING 2 /* there's a router around that */ + /* we are ignoring, warning has */ + /* been issued to the user */ +#define ROUTER_AROUND 3 /* A router is around and we've */ + /* noted its presence */ +#define ROUTER_UPDATED 4 /* for mh tracking of routers. Value decremented + with rtmp aging timer, a value of 4 allows a + minimum of 40 secs to laps before we decide + to revert to cable multicasts */ + +/* AppleTalk IOCTLs */ + + +#define AIOCSTOPATALK _IOWR('a', 1, int) /* stop AppleTalk */ +#define AIOCGETIFCFG _IOWR('a', 2, at_if_cfg_t) /* get AT interface cfg */ +#define AIOCNBPREG _IOWR('a', 3, at_nbp_reg_t) /* NBP register */ +#define AIOCNBPREMOVE _IOW('a', 4, at_nbp_reg_t) /* NBP remove */ +#define AIOCGETSTATE _IOR('a', 5, at_state_t) /* get AT global state */ +#define AIOCSETDEFZONE _IOW('a', 6, at_def_zone_t) + /* in single-port, router, and multihome modes, set default zone */ +#define AIOCSETROUTER _IOW('a', 7, at_router_params_t) +#define AIOCGETROUTER _IOR('a', 8, at_router_params_t) +#define AIOCSIFADDR _IOW('a', 9, at_if_cfg_t) /* init AT interface */ +#define AIOCSTARTROUTER _IOR('a',10, at_kern_err_t) /* start AT routing */ +#define AIOCREGLOCALZN _IOW('a',11, at_nvestr_t) + /* in single-port mode, register local zone in kernel table for + future use in error checking NBP registration */ +#define AIOCSETZNUSAGE _IOW('a',12, zone_usage_t) + /* in router mode, set up each zone for interfaces being seeded */ +#define AIOCGETZNUSAGE _IOWR('a',13, zone_usage_t) + /* in router and multihome modes, given a zone index, report zone name + and interfaces corresponding to that zone */ + +/* values for ifFlags */ +#define LAP_STATE_MASK 0xf /* low order bits used to report + IF state, by AIOCGETIFCFG */ +#define AT_IFF_DEFAULT 0x40000 +#define AT_IFF_AURP 0x20000 +#define RTR_NXNET_PORT 0x10000000 /* Non Extended net port */ +#define RTR_XNET_PORT 0x20000000 /* Extended net port */ +#define RTR_SEED_PORT 0x40000000 /* Seed port require config net values*/ + +/* elap_cfg 'flags' defines */ +#define ELAP_CFG_ZONELESS 0x01 /* true if we shouldn't set a zone + (to avoid generating a zip_getnetinfo + when routing) */ +#define ELAP_CFG_HOME 0x02 /* designate home port (one allowed) */ +#define ELAP_CFG_SEED 0x08 /* set if it's a seed port */ + +#ifdef KERNEL +extern TAILQ_HEAD(at_ifQueueHd, at_ifaddr) at_ifQueueHd; + +int at_control __P((struct socket *, u_long, caddr_t, struct ifnet *)); +int ddp_usrreq __P((struct socket *, int, struct mbuf *, struct mbuf *, + struct mbuf *)); +int ddp_ctloutput __P((struct socket *, struct sockopt *)); +void ddp_init __P((void));; +void ddp_slowtimo __P((void)); +#endif diff --git a/bsd/netat/atalk.exp b/bsd/netat/atalk.exp new file mode 100644 index 000000000..8bad4de70 --- /dev/null +++ b/bsd/netat/atalk.exp @@ -0,0 +1,9 @@ +#!/unix +* +ATsocket syscall +ATgetmsg syscall +ATputmsg syscall +ATPsndreq syscall +ATPsndrsp syscall +ATPgetreq syscall +ATPgetrsp syscall diff --git a/bsd/netat/atalk.imp b/bsd/netat/atalk.imp new file mode 100644 index 000000000..d36d8e6e9 --- /dev/null +++ b/bsd/netat/atalk.imp @@ -0,0 +1,9 @@ +#!/unix +* +sys_ATsocket +sys_ATgetmsg +sys_ATputmsg +sys_ATPsndreq +sys_ATPsndrsp +sys_ATPgetreq +sys_ATPgetrsp diff --git a/bsd/netat/atp.h b/bsd/netat/atp.h new file mode 100644 index 000000000..72dd34ae0 --- /dev/null +++ b/bsd/netat/atp.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +/* Definitions for ATP protocol and streams module, per + * AppleTalk Transaction Protocol documentation from + * `Inside AppleTalk', July 14, 1986. + */ + +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + * + * The information contained herein is subject to change without + * notice and should not be construed as a commitment by Apple + * Computer, Inc. Apple Computer, Inc. assumes no responsibility + * for any errors that may appear. + * + * Confidential and Proprietary to Apple Computer, Inc. + */ + +#ifndef _NETAT_ATP_H_ +#define _NETAT_ATP_H_ + +/* ATP function codes */ + +#define ATP_CMD_TREQ 0x01 /* TRequest packet */ +#define ATP_CMD_TRESP 0x02 /* TResponse packet */ +#define ATP_CMD_TREL 0x03 /* TRelease packet */ + +/* Miscellaneous definitions */ + +#define ATP_DEF_RETRIES 8 /* Default for maximum retry count */ +#define ATP_DEF_INTERVAL 2 /* Default for retry interval in seconds */ + +#define ATP_TRESP_MAX 8 /* Maximum number of Tresp pkts */ + +#define ATP_HDR_SIZE 8 /* Size of the ATP header */ +#define ATP_DATA_SIZE 578 /* Maximum size of the ATP data area */ + +/* Consts for asynch support */ +#define ATP_ASYNCH_REQ 1 +#define ATP_ASYNCH_RESP 2 + +/* Timer values for XO release timers */ +#define ATP_XO_DEF_REL_TIME 0 +#define ATP_XO_30SEC 0 +#define ATP_XO_1MIN 1 +#define ATP_XO_2MIN 2 +#define ATP_XO_4MIN 3 +#define ATP_XO_8MIN 4 + +typedef struct { + unsigned cmd : 2, + xo : 1, + eom : 1, + sts : 1, + xo_relt : 3; + u_char bitmap; + ua_short tid; + ua_long user_bytes; + u_char data[ATP_DATA_SIZE]; +} at_atp_t; + +#define ATP_ATP_HDR(c) ((at_atp_t *)(&((at_ddp_t *)(c))->data[0])) + +#define TOTAL_ATP_HDR_SIZE (ATP_HDR_SIZE+DDP_X_HDR_SIZE) +#define ATP_CLEAR_CONTROL(c) (*(char *)(c) = 0) + +/* ATP ioctl interface */ + +/* Structure for the atp_set_default call */ + +#define ATP_INFINITE_RETRIES 0xffffffff /* means retry forever + * in the def_retries field + */ + +struct atp_set_default { + u_int def_retries; /* number of retries for a request */ + u_int def_rate; /* retry rate (in seconds/100) NB: the + * system may not be able to resolve + * delays of 100th of a second but will + * instead make a 'best effort' + */ + struct atpBDS *def_bdsp; /* BDS structure associated with this req */ + u_int def_BDSlen; /* size of BDS structure */ +}; + + +/* Return header from requests */ + +struct atp_result { + u_short count; /* the number of packets */ + u_short hdr; /* offset to header in buffer */ + u_short offset[8]; /* offset to the Nth packet in the buffer */ + u_short len[8]; /* length of the Nth packet */ +}; + +struct atpBDS { + ua_short bdsBuffSz; + ua_long bdsBuffAddr; + ua_short bdsDataSz; + unsigned char bdsUserData[4]; +}; + + +typedef struct { + u_short at_atpreq_type; + at_inet_t at_atpreq_to; + u_char at_atpreq_treq_user_bytes[4]; + u_char *at_atpreq_treq_data; + u_short at_atpreq_treq_length; + u_char at_atpreq_treq_bitmap; + u_char at_atpreq_xo; + u_char at_atpreq_xo_relt; + u_short at_atpreq_retry_timeout; + u_short at_atpreq_maximum_retries; + u_char at_atpreq_tresp_user_bytes[ATP_TRESP_MAX][4]; + u_char *at_atpreq_tresp_data[ATP_TRESP_MAX]; + u_short at_atpreq_tresp_lengths[ATP_TRESP_MAX]; + u_long at_atpreq_debug[4]; + u_short at_atpreq_tid; + u_char at_atpreq_tresp_bitmap; + u_char at_atpreq_tresp_eom_seqno; + u_char at_atpreq_got_trel; +} at_atpreq; + + +/* The ATP module ioctl commands */ + +#define AT_ATP_CANCEL_REQUEST (('|'<<8)|1) +#define AT_ATP_ISSUE_REQUEST (('|'<<8)|2) /* ALO */ +#define AT_ATP_ISSUE_REQUEST_DEF (('|'<<8)|3) /* XO */ +#define AT_ATP_ISSUE_REQUEST_DEF_NOTE (('|'<<8)|4) /* XO & nowait -- not needed*/ +#define AT_ATP_ISSUE_REQUEST_NOTE (('|'<<8)|5) /* ALO & nowait */ +#define AT_ATP_GET_POLL (('|'<<8)|6) +#define AT_ATP_RELEASE_RESPONSE (('|'<<8)|7) +#define AT_ATP_REQUEST_COMPLETE (('|'<<8)|8) +#define AT_ATP_SEND_FULL_RESPONSE (('|'<<8)|9) /* not used */ +#define AT_ATP_BIND_REQ (('|'<<8)|10) +#define AT_ATP_GET_CHANID (('|'<<8)|11) +#define AT_ATP_PEEK (('|'<<8)|12) +#define AT_ATP_ISSUE_REQUEST_TICKLE (('|'<<8)|13) /* ALO & nowait */ + +/* These macros don't really depend here, but since they're used only by the + * old ATP and old PAP, they're put here. Unisoft PAP includes this file. + */ +#define R16(x) UAS_VALUE(x) +#define W16(x,v) UAS_ASSIGN(x, v) +#define C16(x,v) UAS_UAS(x, v) + +/* + * these are the dispatch codes for + * the new atp_control system call + */ +#define ATP_SENDREQUEST 0 +#define ATP_GETRESPONSE 1 +#define ATP_SENDRESPONSE 2 +#define ATP_GETREQUEST 3 + +#ifdef KERNEL + + + +/* + * Stuff for accessing protocol headers + */ +#define AT_DDP_HDR(m) ((at_ddp_t *)(gbuf_rptr(m))) +#define AT_ATP_HDR(m) ((at_atp_t *)(&((at_ddp_t *)(gbuf_rptr(m)))->data[0])) + +/* + * Masks for accessing/manipulating the bitmap field in atp headers + */ + +#ifdef ATP_DECLARE +unsigned char atp_mask [] = { + 0x01, 0x02, 0x04, 0x08, + 0x10, 0x20, 0x40, 0x80, +}; + +unsigned char atp_lomask [] = { + 0x00, 0x01, 0x03, 0x07, + 0x0f, 0x1f, 0x3f, 0x7f, + 0xff +}; +#else +extern unsigned char atp_mask []; +extern unsigned char atp_lomask []; +#endif /* ATP_DECLARE */ + +/* + * doubly linked queue types and primitives + */ + +#define ATP_Q_ENTER(hdr, object, entry) { \ + if ((hdr).head) { \ + (hdr).head->entry.prev = (object); \ + (object)->entry.next = (hdr).head; \ + } else { \ + (hdr).tail = (object); \ + (object)->entry.next = NULL; \ + } \ + (object)->entry.prev = NULL; \ + (hdr).head = (object); \ + } + +#define ATP_Q_APPEND(hdr, object, entry) { \ + if ((hdr).head) { \ + (hdr).tail->entry.next = (object); \ + (object)->entry.prev = (hdr).tail; \ + } else { \ + (hdr).head = (object); \ + (object)->entry.prev = NULL; \ + } \ + (object)->entry.next = NULL; \ + (hdr).tail = (object); \ + } + +#define ATP_Q_REMOVE(hdr, object, entry) { \ + if ((object)->entry.prev) { \ + (object)->entry.prev->entry.next = (object)->entry.next;\ + } else { \ + (hdr).head = (object)->entry.next; \ + } \ + if ((object)->entry.next) { \ + (object)->entry.next->entry.prev = (object)->entry.prev;\ + } else { \ + (hdr).tail = (object)->entry.prev; \ + } \ + } + +struct atp_rcb_qhead { + struct atp_rcb *head; + struct atp_rcb *tail; +}; + +struct atp_rcb_q { + struct atp_rcb *prev; + struct atp_rcb *next; +}; + +struct atp_trans_qhead { + struct atp_trans *head; + struct atp_trans *tail; +}; + +struct atp_trans_q { + struct atp_trans *prev; + struct atp_trans *next; +}; + +/* + * Locally saved remote node address + */ + +struct atp_socket { + u_short net; + at_node node; + at_socket socket; +}; + +/* + * transaction control block (local context at requester end) + */ + +struct atp_trans { + struct atp_trans_q tr_list; /* trans list */ + struct atp_state *tr_queue; /* state data structure */ + gbuf_t *tr_xmt; /* message being sent */ + gbuf_t *tr_rcv[8]; /* message being rcvd */ + unsigned int tr_retry; /* # retries left */ + unsigned int tr_timeout; /* timer interval */ + char tr_state; /* current state */ + char tr_rsp_wait; /* waiting for transaction response */ + char filler[2]; + unsigned char tr_xo; /* execute once transaction */ + unsigned char tr_bitmap; /* requested bitmask */ + unsigned short tr_tid; /* transaction id */ + struct atp_socket tr_socket; /* the remote socket id */ + struct atp_trans_q tr_snd_wait; /* list of transactions waiting + for space to send a msg */ + at_socket tr_local_socket; + at_node tr_local_node; + at_net tr_local_net; + gbuf_t *tr_bdsp; /* bds structure pointer */ + unsigned int tr_tmo_delta; + void (*tr_tmo_func)(); + struct atp_trans *tr_tmo_next; + struct atp_trans *tr_tmo_prev; + atlock_t tr_lock; + atevent_t tr_event; +}; + +#define TRANS_TIMEOUT 0 /* waiting for a reply */ +#define TRANS_REQUEST 1 /* waiting to send a request */ +#define TRANS_RELEASE 2 /* waiting to send a release */ +#define TRANS_DONE 3 /* done - waiting for poll to complete */ +#define TRANS_FAILED 4 /* done - waiting for poll to report failure */ + +/* + * reply control block (local context at repling end) + */ + +struct atp_rcb { + struct atp_rcb_q rc_list; /* rcb list */ + struct atp_rcb_q rc_tlist; + struct atp_state *rc_queue; /* state data structure */ + gbuf_t *rc_xmt; /* replys being sent */ + gbuf_t *rc_ioctl; /* waiting ioctl */ + char rc_snd[8]; /* replys actually to be sent */ + int rc_pktcnt; /* no of pkts in this trans */ + short rc_state; /* current state */ + unsigned char rc_xo; /* execute once transaction */ + at_node rc_local_node; + at_net rc_local_net; + short rc_rep_waiting; /* in the reply wait list */ + int rc_timestamp; /* reply timer */ + unsigned char rc_bitmap; /* replied bitmask */ + unsigned char rc_not_sent_bitmap; /* replied bitmask */ + unsigned short rc_tid; /* transaction id */ + struct atp_socket rc_socket; /* the remote socket id */ +}; + +#define RCB_UNQUEUED 0 /* newly allocated, not q'd */ +#define RCB_RESPONDING 2 /* waiting all of response from process*/ +#define RCB_RESPONSE_FULL 3 /* got all of response */ +#define RCB_RELEASED 4 /* got our release */ +#define RCB_PENDING 5 /* a no wait rcb is full */ +#define RCB_NOTIFIED 6 +#define RCB_SENDING 7 /* we're currently xmitting this trans */ + +/* + * socket state (per module data structure) + */ + +struct atp_state { + gref_t *atp_gref; /* must be the first entry */ + int atp_pid; /* process id, must be the second entry */ + gbuf_t *atp_msgq; /* data msg, must be the third entry */ + unsigned char dflag; /* structure flag, must be the fourth entry */ + unsigned char filler; + short atp_socket_no; + short atp_flags; /* general flags */ + struct atp_trans_qhead atp_trans_wait; /* pending transaction list */ + struct atp_state *atp_trans_waiting; /* list of atps waiting for a + free transaction */ + unsigned int atp_retry; /* retry count */ + unsigned int atp_timeout; /* retry timeout */ + struct atp_state *atp_rcb_waiting; + struct atp_rcb_qhead atp_rcb; /* active rcbs */ + struct atp_rcb_qhead atp_attached; /* rcb's waiting to be read */ + atlock_t atp_lock; + atevent_t atp_event; + atlock_t atp_delay_lock; + atevent_t atp_delay_event; +}; + + +/* + * atp_state flag definitions + */ +#define ATP_CLOSING 0x08 /* atp stream in process of closing */ + + +/* + * tcb/rcb/state allocation queues + */ + +/* + * Size defines; must be outside following #ifdef to permit + * debugging code to reference independent of ATP_DECLARE + */ +#define NATP_RCB 512 /* the number of ATP RCBs at once */ +#define NATP_STATE 192 /* the number of ATP sockets open at once */ + /* note: I made NATP_STATE == NSOCKETS */ + +#ifdef ATP_DECLARE +struct atp_trans *atp_trans_free_list = NULL; /* free transactions */ +struct atp_rcb *atp_rcb_free_list = NULL; /* free rcbs */ +static struct atp_state *atp_free_list = NULL; /* free atp states */ +static struct atp_rcb atp_rcb_data[NATP_RCB]; +static struct atp_state atp_state_data[NATP_STATE]; + +#else +extern struct atp_trans *atp_trans_free_list; /* free transactions */ +extern struct atp_rcb *atp_rcb_free_list; /* free rcbs */ +extern struct atp_state *atp_free_list; /* free atp states */ +extern struct atp_rcb atp_rcb_data[]; +extern struct atp_state atp_state_data[]; + +extern void atp_req_timeout(); +extern void atp_rcb_timer(); +extern void atp_x_done(); +extern struct atp_rcb *atp_rcb_alloc(); +extern struct atp_trans *atp_trans_alloc(); +#endif /* ATP_DECLARE */ + +/* prototypes */ +void atp_send_req(gref_t *, gbuf_t *); +void atp_drop_req(gref_t *, gbuf_t *); +void atp_send_rsp(gref_t *, gbuf_t *, int); +void atp_wput(gref_t *, gbuf_t *); +void atp_rput(gref_t *, gbuf_t *); +void atp_retry_req(gbuf_t *); +void atp_stop(gbuf_t *, int); +void atp_cancel_req(gref_t *, unsigned short); +int atp_open(gref_t *, int); +int atp_bind(gref_t *, unsigned int, unsigned char *); +int atp_close(gref_t *, int); +gbuf_t *atp_build_release(struct atp_trans *); +void atp_req_timeout(struct atp_trans *); +void atp_free(struct atp_trans *); +void atp_x_done(struct atp_trans *); +void atp_send(struct atp_trans *); +void atp_rsp_ind(struct atp_trans *, gbuf_t *); +void atp_trans_free(struct atp_trans *); +void atp_reply(struct atp_rcb *); +void atp_rcb_free(struct atp_rcb *); +void atp_send_replies(struct atp_state *, struct atp_rcb *); +void atp_dequeue_atp(struct atp_state *); +int atp_iocack(struct atp_state *, gbuf_t *); +void atp_req_ind(struct atp_state *, gbuf_t *); +int atp_iocnak(struct atp_state *, gbuf_t *, int); +void atp_trp_timer(void *, int); +void atp_timout(void (*func)(), struct atp_trans *, int); +void atp_untimout(void (*func)(), struct atp_trans *); +int atp_tid(struct atp_state *); + +#endif /* KERNEL */ +#endif /* _NETAT_ATP_H_ */ diff --git a/bsd/netat/atp_alloc.c b/bsd/netat/atp_alloc.c new file mode 100644 index 000000000..fc2d97724 --- /dev/null +++ b/bsd/netat/atp_alloc.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Modified for MP, 1996 by Tuyen Nguyen */ +/* + * tcb (transaction) allocation routine. If no transaction data structure + * is available then put the module on a queue of modules waiting + * for transaction structures. When a tcb is available it will be + * removed from this list and its write queue will be scheduled. + * Version 1.4 of atp_alloc.c on 89/02/09 17:53:01 + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/*### MacOSX MCLBYTE is 2048, not 4096 like AIX */ +#define TRPS_PER_BLK 16 + +gbuf_t *atp_resource_m = 0; +extern atlock_t atpgen_lock; + +struct atp_trans *atp_trans_alloc(atp) +struct atp_state *atp; +{ + int s; + int i; + gbuf_t *m; + register struct atp_trans *trp, *trp_array; + + ATDISABLE(s, atpgen_lock); + if (atp_trans_free_list == 0) { + ATENABLE(s, atpgen_lock); + if ((m = gbuf_alloc(TRPS_PER_BLK*sizeof(struct atp_trans),PRI_HI)) == 0) + return (struct atp_trans *)0; + bzero(gbuf_rptr(m), TRPS_PER_BLK*sizeof(struct atp_trans)); + trp_array = (struct atp_trans *)gbuf_rptr(m); + for (i=0; i < TRPS_PER_BLK-1; i++) + trp_array[i].tr_list.next = (struct atp_trans *)&trp_array[i+1]; + ATDISABLE(s, atpgen_lock); + gbuf_cont(m) = atp_resource_m; + atp_resource_m = m; + trp_array[i].tr_list.next = atp_trans_free_list; + atp_trans_free_list = (struct atp_trans *)&trp_array[0]; + } + + trp = atp_trans_free_list; + atp_trans_free_list = trp->tr_list.next; + ATENABLE(s, atpgen_lock); + trp->tr_queue = atp; + trp->tr_state = TRANS_TIMEOUT; + trp->tr_local_node = 0; + ATLOCKINIT(trp->tr_lock); + ATEVENTINIT(trp->tr_event); + + dPrintf(D_M_ATP_LOW, D_L_TRACE, + ("atp_trans_alloc(0x%x): alloc'd trp 0x%x\n", + (u_int) atp, (u_int) trp)); + return trp; +} /* atp_trans_alloc */ + +/* + * tcb free routine - if modules are waiting schedule them + * always called at 'lock' + */ + +void atp_trans_free(trp) +register struct atp_trans *trp; +{ + int s; + + ATDISABLE(s, atpgen_lock); + trp->tr_queue = 0; + trp->tr_list.next = atp_trans_free_list; + atp_trans_free_list = trp; + ATENABLE(s, atpgen_lock); +} + +/* + * This routine allocates a rcb, if none are available it makes sure the + * the write service routine will be called when one is + * always called at 'lock' + */ + +struct atp_rcb *atp_rcb_alloc(atp) +struct atp_state *atp; +{ + register struct atp_rcb *rcbp; + int s; + + ATDISABLE(s, atpgen_lock); + if ((rcbp = atp_rcb_free_list) != NULL) { + atp_rcb_free_list = rcbp->rc_list.next; + rcbp->rc_queue = atp; + rcbp->rc_pktcnt = 0; + rcbp->rc_local_node = 0; + } + ATENABLE(s, atpgen_lock); + dPrintf(D_M_ATP_LOW, D_L_TRACE, + ("atp_rcb_alloc: allocated rcbp 0x%x\n", (u_int) rcbp)); + return(rcbp); +} + +/* + * Here we free rcbs, if required reschedule other people waiting for them + * always called at 'lock' + */ + +void atp_rcb_free(rcbp) +register struct atp_rcb *rcbp; +{ + register struct atp_state *atp; + register int i; + register int rc_state; + int s; + + dPrintf(D_M_ATP_LOW, D_L_TRACE, + ("atp_rcb_free: freeing rcbp 0x%x\n", (u_int) rcbp)); + ATDISABLE(s, atpgen_lock); + atp = rcbp->rc_queue; + if ((rc_state = rcbp->rc_state) == -1) { + ATENABLE(s, atpgen_lock); + dPrintf(D_M_ATP, D_L_WARNING, + ("atp_rcb_free(%d): tid=%d,loc=%d,rem=%d\n", + 0, rcbp->rc_tid, + rcbp->rc_socket.socket, atp->atp_socket_no)); + return; + } + rcbp->rc_state = -1; + rcbp->rc_xo = 0; + rcbp->rc_queue = 0; + + if (rcbp->rc_timestamp) { + extern struct atp_rcb_qhead atp_need_rel; + + rcbp->rc_timestamp = 0; + ATP_Q_REMOVE(atp_need_rel, rcbp, rc_tlist); + rcbp->rc_tlist.prev = NULL; + rcbp->rc_tlist.next = NULL; + } + + if (rcbp->rc_xmt) { + gbuf_freem(rcbp->rc_xmt); /* *** bad free is the second mbuf in this chain *** */ + rcbp->rc_xmt = NULL; + for (i=0; i < rcbp->rc_pktcnt; i++) + rcbp->rc_snd[i] = 0; + } + if (rc_state != RCB_UNQUEUED) { + if (rc_state == RCB_PENDING) { + ATP_Q_REMOVE(atp->atp_attached, rcbp, rc_list); + } else { + ATP_Q_REMOVE(atp->atp_rcb, rcbp, rc_list); + } + } + if (rcbp->rc_ioctl) { + gbuf_freem(rcbp->rc_ioctl); + rcbp->rc_ioctl = NULL; + } + rcbp->rc_list.next = atp_rcb_free_list; + atp_rcb_free_list = rcbp; + ATENABLE(s, atpgen_lock); +} diff --git a/bsd/netat/atp_misc.c b/bsd/netat/atp_misc.c new file mode 100644 index 000000000..b332e1ebf --- /dev/null +++ b/bsd/netat/atp_misc.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +extern atlock_t atpgen_lock; +void atp_free(); +void atp_send(struct atp_trans *); + +/* + * The request timer retries a request, if all retries are used up + * it returns a NAK + */ + +void +atp_req_timeout(trp) +register struct atp_trans *trp; +{ + int s; + register gbuf_t *m; + gref_t *gref; + struct atp_state *atp; + struct atp_trans *ctrp; + + if ((atp = trp->tr_queue) == 0) + return; + ATDISABLE(s, atp->atp_lock); + if (atp->atp_flags & ATP_CLOSING) { + ATENABLE(s, atp->atp_lock); + return; + } + for (ctrp = atp->atp_trans_wait.head; ctrp; ctrp = ctrp->tr_list.next) { + if (ctrp == trp) + break; + } + if (ctrp != trp) { + ATENABLE(s, atp->atp_lock); + return; + } + + if ((m = gbuf_cont(trp->tr_xmt)) == NULL) + m = trp->tr_xmt; /* issued via the new interface */ + + if (trp->tr_retry == 0) { + trp->tr_state = TRANS_FAILED; + if (m == trp->tr_xmt) { + trp->tr_xmt = NULL; +l_notify: + gbuf_wset(m,1); + *gbuf_rptr(m) = 99; + gbuf_set_type(m, MSG_DATA); + gref = trp->tr_queue->atp_gref; + ATENABLE(s, atp->atp_lock); + atalk_putnext(gref, m); + + return; + } + dPrintf(D_M_ATP_LOW,D_L_INFO, ("atp_req_timeout: skt=%d\n", + trp->tr_local_socket)); + m = trp->tr_xmt; + switch(((ioc_t *)(gbuf_rptr(trp->tr_xmt)))->ioc_cmd) { + case AT_ATP_ISSUE_REQUEST: + trp->tr_xmt = NULL; + if (trp->tr_queue->dflag) + ((ioc_t *)gbuf_rptr(m))->ioc_cmd = AT_ATP_REQUEST_COMPLETE; + else if (trp->tr_bdsp == NULL) { + ATENABLE(s, atp->atp_lock); + gbuf_freem(m); + if (trp->tr_rsp_wait) + thread_wakeup(&trp->tr_event); + break; + } + ATENABLE(s, atp->atp_lock); + atp_iocnak(trp->tr_queue, m, ETIMEDOUT); + atp_free(trp); + return; + + case AT_ATP_ISSUE_REQUEST_NOTE: + case AT_ATP_ISSUE_REQUEST_TICKLE: + trp->tr_xmt = gbuf_cont(m); + gbuf_cont(m) = NULL; + goto l_notify; + } + } else { + (AT_ATP_HDR(m))->bitmap = trp->tr_bitmap; + + if (trp->tr_retry != (unsigned int) ATP_INFINITE_RETRIES) + trp->tr_retry--; + ATENABLE(s, atp->atp_lock); + atp_send(trp); + } +} + + +/* + * atp_free frees up a request, cleaning up the queues and freeing + * the request packet + * always called at 'lock' + */ + +void atp_free(trp) +register struct atp_trans *trp; +{ + register struct atp_state *atp; + register int i; + int s; + + dPrintf(D_M_ATP_LOW, D_L_TRACE, + ("atp_free: freeing trp 0x%x\n", (u_int) trp)); + ATDISABLE(s, atpgen_lock); + if (trp->tr_tmo_func) + atp_untimout(atp_req_timeout, trp); + atp = trp->tr_queue; + + ATP_Q_REMOVE(atp->atp_trans_wait, trp, tr_list); + + if (trp->tr_xmt) { + gbuf_freem(trp->tr_xmt); + trp->tr_xmt = NULL; + } + for (i = 0; i < 8; i++) { + if (trp->tr_rcv[i]) { + gbuf_freem(trp->tr_rcv[i]); + trp->tr_rcv[i] = NULL; + } + } + if (trp->tr_bdsp) { + gbuf_freem(trp->tr_bdsp); + trp->tr_bdsp = NULL; + } + + ATENABLE(s, atpgen_lock); + atp_trans_free(trp); + +} /* atp_free */ + + +/* + * atp_send transmits a request packet by queuing it (if it isn't already) and + * scheduling the queue + */ + +void atp_send(trp) +register struct atp_trans *trp; +{ + gbuf_t *m; + struct atp_state *atp; + + dPrintf(D_M_ATP_LOW, D_L_OUTPUT, ("atp_send: trp=0x%x, loc=%d\n", + (u_int) trp->tr_queue, trp->tr_local_socket)); + + if ((atp = trp->tr_queue) != 0) { + if (trp->tr_state == TRANS_TIMEOUT) { + if ((m = gbuf_cont(trp->tr_xmt)) == NULL) + m = trp->tr_xmt; + + /* + * Now either release the transaction or start the timer + */ + if (!trp->tr_retry && !trp->tr_bitmap && !trp->tr_xo) { + m = (gbuf_t *)gbuf_copym(m); + atp_x_done(trp); + } else { + m = (gbuf_t *)gbuf_dupm(m); + + atp_timout(atp_req_timeout, trp, trp->tr_timeout); + } + + if (m) { + trace_mbufs(D_M_ATP_LOW, " m", m); + DDP_OUTPUT(m); + } + } + } +} + + +/* + * atp_reply sends all the available messages in the bitmap again + * by queueing us to the write service routine + */ + +void atp_reply(rcbp) +register struct atp_rcb *rcbp; +{ + register struct atp_state *atp; + register int i; + int s; + + if ((atp = rcbp->rc_queue) != 0) { + ATDISABLE(s, atp->atp_lock); + for (i = 0; i < rcbp->rc_pktcnt; i++) { + if (rcbp->rc_bitmap&atp_mask[i]) + rcbp->rc_snd[i] = 1; + else + rcbp->rc_snd[i] = 0; + } + if (rcbp->rc_rep_waiting == 0) { + rcbp->rc_state = RCB_SENDING; + rcbp->rc_rep_waiting = 1; + ATENABLE(s, atp->atp_lock); + atp_send_replies(atp, rcbp); + } else + ATENABLE(s, atp->atp_lock); + } +} + + +/* + * The rcb timer just frees the rcb, this happens when we missed a release for XO + */ + +void atp_rcb_timer() +{ + int s; + register struct atp_rcb *rcbp; + register struct atp_rcb *next_rcbp; + extern struct atp_rcb_qhead atp_need_rel; + extern struct atp_trans *trp_tmo_rcb; + +l_again: + ATDISABLE(s, atpgen_lock); + for (rcbp = atp_need_rel.head; rcbp; rcbp = next_rcbp) { + next_rcbp = rcbp->rc_tlist.next; + + if (abs(time.tv_sec - rcbp->rc_timestamp) > 30) { + ATENABLE(s, atpgen_lock); + atp_rcb_free(rcbp); + goto l_again; + } + } + ATENABLE(s, atpgen_lock); + atp_timout(atp_rcb_timer, trp_tmo_rcb, 10 * HZ); +} + +atp_iocack(atp, m) +struct atp_state *atp; +register gbuf_t *m; +{ + if (gbuf_type(m) == MSG_IOCTL) + gbuf_set_type(m, MSG_IOCACK); + if (gbuf_cont(m)) + ((ioc_t *)gbuf_rptr(m))->ioc_count = gbuf_msgsize(gbuf_cont(m)); + else + ((ioc_t *)gbuf_rptr(m))->ioc_count = 0; + + if (atp->dflag) + asp_ack_reply(atp->atp_gref, m); + else + atalk_putnext(atp->atp_gref, m); +} + +atp_iocnak(atp, m, err) +struct atp_state *atp; +register gbuf_t *m; +register int err; +{ + if (gbuf_type(m) == MSG_IOCTL) + gbuf_set_type(m, MSG_IOCNAK); + ((ioc_t *)gbuf_rptr(m))->ioc_count = 0; + ((ioc_t *)gbuf_rptr(m))->ioc_error = err ? err : ENXIO; + ((ioc_t *)gbuf_rptr(m))->ioc_rval = -1; + if (gbuf_cont(m)) { + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + } + + if (atp->dflag) + asp_nak_reply(atp->atp_gref, m); + else + atalk_putnext(atp->atp_gref, m); +} + +/* + * Generate a transaction id for a socket + */ +static int lasttid; +atp_tid(atp) +register struct atp_state *atp; +{ + register int i; + register struct atp_trans *trp; + int s; + + ATDISABLE(s, atpgen_lock); + for (i = lasttid;;) { + i = (i+1)&0xffff; + + for (trp = atp->atp_trans_wait.head; trp; trp = trp->tr_list.next) { + if (trp->tr_tid == i) + break; + } + if (trp == NULL) { + lasttid = i; + ATENABLE(s, atpgen_lock); + return(i); + } + } +} diff --git a/bsd/netat/atp_open.c b/bsd/netat/atp_open.c new file mode 100644 index 000000000..9c8b95e9f --- /dev/null +++ b/bsd/netat/atp_open.c @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +#define ATP_DECLARE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * The init routine creates all the free lists + * Version 1.4 of atp_open.c on 89/02/09 17:53:11 + */ + +int atp_inited = 0; +struct atp_rcb_qhead atp_need_rel; +atlock_t atpall_lock; +atlock_t atptmo_lock; +atlock_t atpgen_lock; + +/**********/ +int atp_pidM[256]; +gref_t *atp_inputQ[256]; +struct atp_state *atp_used_list; + +int atp_input(mp) + gbuf_t *mp; +{ + register gref_t *gref; + + switch (gbuf_type(mp)) { + case MSG_DATA: + gref = atp_inputQ[((at_ddp_t *)gbuf_rptr(mp))->dst_socket]; + if ((gref == 0) || (gref == (gref_t *)1)) { + dPrintf(D_M_ATP, D_L_WARNING, ("atp_input: no socket, skt=%d\n", + ((at_ddp_t *)gbuf_rptr(mp))->dst_socket)); + gbuf_freem(mp); + return 0; + } + break; + + case MSG_IOCACK: + case MSG_IOCNAK: + gref = (gref_t *)((ioc_t *)gbuf_rptr(mp))->ioc_private; + break; + + case MSG_IOCTL: + default: + dPrintf(D_M_ATP, D_L_WARNING, ("atp_input: unknown msg, type=%d\n", + gbuf_type(mp))); + gbuf_freem(mp); + return 0; + } + + atp_rput(gref, mp); + return 0; +} + +/**********/ +void atp_init() +{ + int i; + + if (!atp_inited) { + atp_inited = 1; + atp_used_list = 0; + for (i = 0; i < NATP_RCB; i++) { + atp_rcb_data[i].rc_list.next = atp_rcb_free_list; + atp_rcb_free_list = &atp_rcb_data[i]; + } + for (i = 0; i < NATP_STATE; i++) { + atp_state_data[i].atp_trans_waiting = atp_free_list; + atp_free_list = &atp_state_data[i]; + } + atp_need_rel.head = NULL; + atp_need_rel.tail = NULL; + + bzero(atp_inputQ, sizeof(atp_inputQ)); + bzero(atp_pidM, sizeof(atp_pidM)); + asp_init(); + } +} + +/* + * The open routine allocates a state structure + */ + +/*ARGSUSED*/ +int atp_open(gref, flag) + gref_t *gref; + int flag; +{ + register struct atp_state *atp; + register int s; + + /* + * If no atp structure available return failure + */ + + ATDISABLE(s, atpall_lock); + if ((atp = atp_free_list) == NULL) { + ATENABLE(s, atpall_lock); + return(EAGAIN); + } + + /* + * Update free list + */ + + atp_free_list = atp->atp_trans_waiting; + ATENABLE(s, atpall_lock); + + /* + * Initialize the data structure + */ + + atp->dflag = 0; + atp->atp_trans_wait.head = NULL; + atp->atp_trans_waiting = NULL; + atp->atp_gref = gref; + atp->atp_retry = 10; + atp->atp_timeout = HZ/8; + atp->atp_rcb_waiting = NULL; + atp->atp_rcb.head = NULL; + atp->atp_flags = T_MPSAFE; + atp->atp_socket_no = -1; + atp->atp_pid = gref->pid; + atp->atp_msgq = 0; + ATLOCKINIT(atp->atp_lock); + ATLOCKINIT(atp->atp_delay_lock); + ATEVENTINIT(atp->atp_event); + ATEVENTINIT(atp->atp_delay_event); + gref->info = (void *)atp; + + /* + * Return success + */ + + if (flag) { + ATDISABLE(s, atpall_lock); + if ((atp->atp_trans_waiting = atp_used_list) != 0) + atp->atp_trans_waiting->atp_rcb_waiting = atp; + atp_used_list = atp; + ATENABLE(s, atpall_lock); + } + return(0); +} + +/* + * The close routine frees all the data structures + */ + +/*ARGSUSED*/ +int atp_close(gref, flag) + gref_t *gref; + int flag; +{ + extern void atp_req_timeout(); + register struct atp_state *atp; + register struct atp_trans *trp; + register struct atp_rcb *rcbp; + register int s; + int socket; + pid_t pid; + + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + if (atp->atp_msgq) { + gbuf_freem(atp->atp_msgq); + atp->atp_msgq = 0; + } + + ATDISABLE(s, atp->atp_lock); + atp->atp_flags |= ATP_CLOSING; + socket = atp->atp_socket_no; + if (socket != -1) + atp_inputQ[socket] = (gref_t *)1; + + /* + * blow away all pending timers + */ + for (trp = atp->atp_trans_wait.head; trp; trp = trp->tr_list.next) + atp_untimout(atp_req_timeout, trp); + + /* + * Release pending transactions + rcbs + */ + while ((trp = atp->atp_trans_wait.head)) + atp_free(trp); + while ((rcbp = atp->atp_rcb.head)) + atp_rcb_free(rcbp); + while ((rcbp = atp->atp_attached.head)) + atp_rcb_free(rcbp); + ATENABLE(s, atp->atp_lock); + + if (flag && (socket == -1)) + atp_dequeue_atp(atp); + + /* + * free the state variable + */ + ATDISABLE(s, atpall_lock); + atp->atp_socket_no = -1; + atp->atp_trans_waiting = atp_free_list; + atp_free_list = atp; + ATENABLE(s, atpall_lock); + + if (socket != -1) { + pid = (pid_t)atp_pidM[socket]; + atp_pidM[socket] = 0; + atp_inputQ[socket] = NULL; + if (pid) + ddp_notify_nbp(socket, pid, DDP_ATP); + } + + return 0; +} diff --git a/bsd/netat/atp_read.c b/bsd/netat/atp_read.c new file mode 100644 index 000000000..200577dbf --- /dev/null +++ b/bsd/netat/atp_read.c @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static void atp_trans_complete(); +void atp_x_done(); +void atp_x_done_funnel(); +extern void atp_req_timeout(); + +/* + * Decide what to do about received messages + * Version 1.7 of atp_read.c on 89/02/09 17:53:16 + */ + +void atp_treq_event(gref) +register gref_t *gref; +{ + register gbuf_t *m; + register struct atp_state *atp; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + + if (atp->dflag) { + if ((m = gbuf_alloc(sizeof(ioc_t), PRI_HI)) != NULL) { + gbuf_set_type(m, MSG_IOCTL); + gbuf_wset(m,sizeof(ioc_t)); + ((ioc_t *)gbuf_rptr(m))->ioc_cmd = AT_ATP_GET_POLL; + atp_wput(gref, m); + } + } + else if ((m = gbuf_alloc(1, PRI_HI)) != NULL) { + *gbuf_rptr(m) = 0; + gbuf_wset(m,1); + atalk_putnext(gref, m); + } + + if (m == 0) + timeout(atp_treq_event, gref, 10); + (void) thread_funnel_set(network_flock, FALSE); +} + +void atp_rput(gref, m) +gref_t *gref; +gbuf_t *m; +{ + register at_atp_t *athp; + register struct atp_state *atp; + register int s, s_gen; + gbuf_t *m_asp = NULL; + + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + + switch(gbuf_type(m)) { + case MSG_DATA: + /* + * Decode the message, make sure it is an atp + * message + */ + if (((AT_DDP_HDR(m))->type != DDP_ATP) || + (atp->atp_flags & ATP_CLOSING)) { + gbuf_freem(m); + dPrintf(D_M_ATP_LOW, (D_L_INPUT|D_L_ERROR), + ("atp_rput: dropping MSG, not atp\n")); + break; + } + + athp = AT_ATP_HDR(m); + dPrintf(D_M_ATP_LOW, D_L_INPUT, + ("atp_rput MSG_DATA: %s (%d)\n", + (athp->cmd == ATP_CMD_TRESP)? "TRESP": + (athp->cmd == ATP_CMD_TREL)? "TREL": + (athp->cmd == ATP_CMD_TREQ)? "TREQ": "unknown", + athp->cmd)); + trace_mbufs(D_M_ATP_LOW, " r", m); + + switch (athp->cmd) { + + case ATP_CMD_TRESP: + { + register struct atp_trans *trp; + register int seqno; + register at_ddp_t *ddp; + + /* + * we just got a response, find the trans record + */ + + ATDISABLE(s, atp->atp_lock); + for (trp = atp->atp_trans_wait.head; trp; trp = trp->tr_list.next) { + if (trp->tr_tid == UAS_VALUE(athp->tid)) + break; + } + + /* + * If we can't find one then ignore the message + */ + seqno = athp->bitmap; + if (trp == NULL) { + ATENABLE(s, atp->atp_lock); + ddp = AT_DDP_HDR(m); + dPrintf(D_M_ATP_LOW, (D_L_INPUT|D_L_ERROR), + ("atp_rput: dropping TRESP, no trp,tid=%d,loc=%d,rem=%d.%d,seqno=%d\n", + UAS_VALUE(athp->tid), + ddp->dst_socket,ddp->src_node,ddp->src_socket,seqno)); + gbuf_freem(m); + return; + } + + /* + * If no longer valid, drop it + */ + if (trp->tr_state == TRANS_FAILED) { + ATENABLE(s, atp->atp_lock); + ddp = AT_DDP_HDR(m); + dPrintf(D_M_ATP_LOW, (D_L_INPUT|D_L_ERROR), + ("atp_rput: dropping TRESP, failed trp,tid=%d,loc=%d,rem=%d.%d\n", + UAS_VALUE(athp->tid), + ddp->dst_socket, ddp->src_node, ddp->src_socket)); + gbuf_freem(m); + return; + } + + /* + * If we have already received it, ignore it + */ + if (!(trp->tr_bitmap&atp_mask[seqno]) || trp->tr_rcv[seqno]) { + ATENABLE(s, atp->atp_lock); + ddp = AT_DDP_HDR(m); + dPrintf(D_M_ATP_LOW, (D_L_INPUT|D_L_ERROR), + ("atp_rput: dropping TRESP, duplicate,tid=%d,loc=%d,rem=%d.%d,seqno=%d\n", + UAS_VALUE(athp->tid), + ddp->dst_socket, ddp->src_node, ddp->src_socket, seqno)); + gbuf_freem(m); + return; + } + + /* + * Update the received packet bitmap + */ + if (athp->eom) + trp->tr_bitmap &= atp_lomask[seqno]; + else + trp->tr_bitmap &= ~atp_mask[seqno]; + + /* + * Save the message in the trans record + */ + trp->tr_rcv[seqno] = m; + + /* + * If it isn't the first message then + * can the header + */ + if (seqno) + gbuf_rinc(m,DDP_X_HDR_SIZE); + + /* + * If we now have all the responses then return + * the message to the user + */ + if (trp->tr_bitmap == 0) { + ATENABLE(s, atp->atp_lock); + + /* + * Cancel the request timer and any + * pending transmits + */ + atp_untimout(atp_req_timeout, trp); + + /* + * Send the results back to the user + */ + atp_x_done(trp); + return; + } + if (athp->sts) { + /* + * If they want treq again, send them + */ + ATENABLE(s, atp->atp_lock); + atp_untimout(atp_req_timeout, trp); + atp_send(trp); + return; + } + ATENABLE(s, atp->atp_lock); + return; + } + + case ATP_CMD_TREL: + { register struct atp_rcb *rcbp; + register at_ddp_t *ddp; + + /* + * Search for a matching transaction + */ + ddp = AT_DDP_HDR(m); + + ATDISABLE(s, atp->atp_lock); + for (rcbp = atp->atp_rcb.head; rcbp; rcbp = rcbp->rc_list.next) { + if (rcbp->rc_tid == UAS_VALUE(athp->tid) && + rcbp->rc_socket.node == ddp->src_node && + rcbp->rc_socket.net == NET_VALUE(ddp->src_net) && + rcbp->rc_socket.socket == ddp->src_socket) { + /* + * Mark the rcb released + */ + rcbp->rc_not_sent_bitmap = 0; + if (rcbp->rc_state == RCB_SENDING) + rcbp->rc_state = RCB_RELEASED; + else + { + ddp = 0; + atp_rcb_free(rcbp); + ATENABLE(s, atp->atp_lock); + } + break; + } + } + + if (ddp) + ATENABLE(s, atp->atp_lock); + gbuf_freem(m); + return; + } + + + case ATP_CMD_TREQ: + { register struct atp_rcb *rcbp; + register at_ddp_t *ddp; + gbuf_t *m2; + + /* + * If it is a request message, first + * check to see + * if matches something in our active + * request queue + */ + ddp = AT_DDP_HDR(m); + + ATDISABLE(s, atp->atp_lock); + for (rcbp = atp->atp_rcb.head; rcbp; rcbp = rcbp->rc_list.next) { + if (rcbp->rc_tid == UAS_VALUE(athp->tid) && + rcbp->rc_socket.node == ddp->src_node && + rcbp->rc_socket.net == NET_VALUE(ddp->src_net) && + rcbp->rc_socket.socket == ddp->src_socket) + break; + } + /* + * If this is a new req then do + * something with it + */ + if (rcbp == NULL) { + /* + * see if it matches something in the + * attached request queue + * if it does, just release the message + * and go on about our buisness + */ + /* we just did this, why do again? -jjs 4-10-95 */ + for (rcbp = atp->atp_attached.head; rcbp; rcbp = rcbp->rc_list.next) { + if (rcbp->rc_tid == UAS_VALUE(athp->tid) && + rcbp->rc_socket.node == ddp->src_node && + rcbp->rc_socket.net == NET_VALUE(ddp->src_net) && + rcbp->rc_socket.socket == ddp->src_socket) { + ATENABLE(s, atp->atp_lock); + gbuf_freem(m); + dPrintf(D_M_ATP_LOW, D_L_INPUT, + ("atp_rput: dropping TREQ, matches req queue\n")); + return; + } + } + + /* + * assume someone is interested in + * in an asynchronous incoming request + */ + ATENABLE(s, atp->atp_lock); + if ((rcbp = atp_rcb_alloc(atp)) == NULL) { + gbuf_freem(m); + return; + } + rcbp->rc_state = RCB_UNQUEUED; + ATDISABLE(s, atp->atp_lock); + + rcbp->rc_local_node = ddp->dst_node; + NET_NET(rcbp->rc_local_net, ddp->dst_net); + rcbp->rc_socket.socket = ddp->src_socket; + rcbp->rc_socket.node = ddp->src_node; + rcbp->rc_socket.net = NET_VALUE(ddp->src_net); + rcbp->rc_tid = UAS_VALUE(athp->tid); + rcbp->rc_bitmap = athp->bitmap; + rcbp->rc_not_sent_bitmap = athp->bitmap; + rcbp->rc_xo = athp->xo; + /* + * if async then send it as + * data + * otherwise, it is a synchronous ioctl so + * complete it + */ + if (atp->dflag) { /* for ASP? */ + if ((m2 = gbuf_alloc(sizeof(ioc_t), PRI_HI))) { + gbuf_set_type(m2, MSG_DATA); + gbuf_wset(m2,sizeof(ioc_t)); + ((ioc_t *)gbuf_rptr(m2))->ioc_cmd = AT_ATP_GET_POLL; + m_asp = m2; + } + } else if ((m2 = gbuf_alloc(1, PRI_HI))) { + *gbuf_rptr(m2) = 0; + gbuf_wset(m2,1); + atalk_putnext(gref, m2); + } + if (m2 == 0) { + dPrintf(D_M_ATP,D_L_WARNING, + ("atp_rput: out of buffer for TREQ\n")); + timeout(atp_treq_event, gref, 10); + } + rcbp->rc_ioctl = m; + + /* + * move it to the attached list + */ + dPrintf(D_M_ATP_LOW, D_L_INPUT, + ("atp_rput: moving to attached list\n")); + rcbp->rc_state = RCB_PENDING; + ATP_Q_APPEND(atp->atp_attached, rcbp, rc_list); + if (m_asp != NULL) { + ATENABLE(s, atp->atp_lock); + atp_req_ind(atp, m_asp); + return; + } + } else { + dPrintf(D_M_ATP_LOW, D_L_INPUT, + ("atp_rput: found match, state:%d\n", + rcbp->rc_state)); + + /* + * Otherwise we have found a matching request + * look for what to do + */ + switch (rcbp->rc_state) { + case RCB_RESPONDING: + case RCB_RESPONSE_FULL: + /* + * If it is one we have in progress + * (either have all the responses + * or are waiting for them) + * update the bitmap and resend + * the replies + */ + ATDISABLE(s_gen, atpgen_lock); + if (rcbp->rc_timestamp) { + rcbp->rc_timestamp = time.tv_sec; + if (rcbp->rc_timestamp == 0) + rcbp->rc_timestamp = 1; + } + ATENABLE(s_gen, atpgen_lock); + rcbp->rc_bitmap = athp->bitmap; + rcbp->rc_not_sent_bitmap = athp->bitmap; + ATENABLE(s, atp->atp_lock); + gbuf_freem(m); + atp_reply(rcbp); + return; + + case RCB_RELEASED: + default: + /* + * If we have a release or + * we haven't sent any data yet + * ignore the request + */ + ATENABLE(s, atp->atp_lock); + gbuf_freem(m); + return; + } + } + ATENABLE(s, atp->atp_lock); + return; + } + + default: + gbuf_freem(m); + break; + } + break; + + case MSG_IOCACK: + if (atp->dflag) + asp_ack_reply(gref, m); + else + atalk_putnext(gref, m); + break; + + case MSG_IOCNAK: + if (atp->dflag) + asp_nak_reply(gref, m); + else + atalk_putnext(gref, m); + break; + + default: + gbuf_freem(m); + } +} /* atp_rput */ + +void +atp_x_done_funnel(trp) +register struct atp_trans *trp; +{ + thread_funnel_set(network_flock, TRUE); + atp_x_done(trp); + (void) thread_funnel_set(network_flock, FALSE); + +} + +void +atp_x_done(trp) +register struct atp_trans *trp; +{ + struct atp_state *atp; + gbuf_t *m; + + + if ( !trp->tr_xo) + atp_trans_complete(trp); + else { + /* + * If execute once send a release + */ + if ((m = (gbuf_t *)atp_build_release(trp)) != NULL) { + AT_DDP_HDR(m)->src_socket = ((struct atp_state *) + trp->tr_queue)->atp_socket_no; + DDP_OUTPUT(m); + /* + * Now send back the transaction reply to the process + * or notify the process if required + */ + atp_trans_complete(trp); + } else { + + atp = trp->tr_queue; + trp->tr_state = TRANS_RELEASE; + timeout(atp_x_done_funnel, trp, 10); + } + } +} + +static void +atp_trans_complete(trp) +register struct atp_trans *trp; +{ register gbuf_t *m; + register int type; + struct atp_state *atp; + + /* we could gbuf_freem(trp->tr_xmt) here if were not planning to + re-use the mbuf later */ + m = trp->tr_xmt; + trp->tr_xmt = NULL; + trp->tr_state = TRANS_DONE; + + if (gbuf_cont(m) == NULL) /* issued via the new interface */ + type = AT_ATP_ISSUE_REQUEST_NOTE; + else { + type = ((ioc_t *)(gbuf_rptr(m)))->ioc_cmd; + /* + * free any data following the ioctl blk + */ + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + } + dPrintf(D_M_ATP_LOW, D_L_INPUT, ("atp_trans_comp: trp=0x%x type = %s\n", + (u_int) trp, + (type==AT_ATP_ISSUE_REQUEST)? "AT_ATP_ISSUE_REQUEST": + (type==AT_ATP_ISSUE_REQUEST_NOTE)? "AT_ATP_ISSUE_REQUEST_NOTE" : + "unknown")); + + switch(type) { + case AT_ATP_ISSUE_REQUEST: + atp = trp->tr_queue; + if (atp->dflag) { + ((ioc_t *)gbuf_rptr(m))->ioc_count = 0; + ((ioc_t *)gbuf_rptr(m))->ioc_error = 0; + ((ioc_t *)gbuf_rptr(m))->ioc_rval = trp->tr_tid; + ((ioc_t *)gbuf_rptr(m))->ioc_cmd = AT_ATP_REQUEST_COMPLETE; + gbuf_set_type(m, MSG_IOCTL); + atp_rsp_ind(trp, m); + } else { + if (trp->tr_bdsp == NULL) { + gbuf_freem(m); + if (trp->tr_rsp_wait) + thread_wakeup(&trp->tr_event); + } else { + gbuf_set_type(m, MSG_IOCACK); + ((ioc_t *)gbuf_rptr(m))->ioc_count = 0; + ((ioc_t *)gbuf_rptr(m))->ioc_error = 0; + ((ioc_t *)gbuf_rptr(m))->ioc_rval = 0; + atalk_putnext(trp->tr_queue->atp_gref, m); + } + } + break; + + case AT_ATP_ISSUE_REQUEST_NOTE: + gbuf_wset(m,1); + *gbuf_rptr(m) = 1; + gbuf_set_type(m, MSG_DATA); + atalk_putnext(trp->tr_queue->atp_gref, m); + break; + } +} /* atp_trans_complete */ diff --git a/bsd/netat/atp_write.c b/bsd/netat/atp_write.c new file mode 100644 index 000000000..e0d84e9e1 --- /dev/null +++ b/bsd/netat/atp_write.c @@ -0,0 +1,1893 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +#define RESOLVE_DBG + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int loop_cnt; /* for debugging loops */ +#define CHK_LOOP(str) { \ + if (loop_cnt++ > 100) { \ + kprintf("%s", str); \ + break; \ + } \ +} + +static void atp_pack_bdsp(struct atp_trans *, struct atpBDS *); +static int atp_unpack_bdsp(struct atp_state *, gbuf_t *, struct atp_rcb *, + int, int); +void atp_retry_req(), atp_trp_clock(), asp_clock(), asp_clock_funnel(), atp_trp_clock_funnel();; + +extern struct atp_rcb_qhead atp_need_rel; +extern int atp_inited; +extern struct atp_state *atp_used_list; +extern asp_scb_t *scb_free_list; +extern atlock_t atpgen_lock; +extern atlock_t atpall_lock; +extern atlock_t atptmo_lock; + +extern gbuf_t *scb_resource_m; +extern gbuf_t *atp_resource_m; +extern gref_t *atp_inputQ[]; +extern int atp_pidM[]; +extern at_ifaddr_t *ifID_home; + +static struct atp_trans *trp_tmo_list; +struct atp_trans *trp_tmo_rcb; + +/* first bds entry gives number of bds entries in total (hack) */ +#define get_bds_entries(m) \ + ((gbuf_len(m) > TOTAL_ATP_HDR_SIZE)? \ + (UAS_VALUE(((struct atpBDS *)(AT_ATP_HDR(m)->data))->bdsDataSz)): 0) + +#define atpBDSsize (sizeof(struct atpBDS)*ATP_TRESP_MAX) + +void atp_link() +{ + trp_tmo_list = 0; + trp_tmo_rcb = atp_trans_alloc(0); + atp_timout(atp_rcb_timer, trp_tmo_rcb, 10 * HZ); + atp_trp_clock((void *)&atp_inited); + asp_clock((void *)&atp_inited); +} + +void atp_unlink() +{ + untimeout(asp_clock_funnel, (void *)&atp_inited); + untimeout(atp_trp_clock_funnel, (void *)&atp_inited); + atp_untimout(atp_rcb_timer, trp_tmo_rcb); + trp_tmo_list = 0; + +#ifdef BAD_IDEA + /* allocated in asp_scb_alloc(), which is called + by asp_open() */ + if (scb_resource_m) { + gbuf_freem(scb_resource_m); + scb_resource_m = 0; + scb_free_list = 0; + } + /* allocated in atp_trans_alloc() */ + if (atp_resource_m) { + gbuf_freem(atp_resource_m); + atp_resource_m = 0; + atp_trans_free_list = 0; + } +#endif +} + +/* + * write queue put routine .... filter out other than IOCTLs + * Version 1.8 of atp_write.c on 89/02/09 17:53:26 + */ + +void +atp_wput(gref, m) + register gref_t *gref; + register gbuf_t *m; +{ + register ioc_t *iocbp; + int i, xcnt, s; + struct atp_state *atp; + struct atp_trans *trp; + struct atp_rcb *rcbp; + at_socket skt; + + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + + switch(gbuf_type(m)) { + case MSG_DATA: + if (atp->atp_msgq) { + gbuf_freem(m); + dPrintf(D_M_ATP, D_L_WARNING, + ("atp_wput: atp_msgq discarded\n")); + } else + atp->atp_msgq = m; + break; + + case MSG_IOCTL: + /* Need to ensure that all copyin/copyout calls are made at + * put routine time which should be in the user context. (true when + * we are the stream head). The service routine can be called on an + * unpredictable context and copyin/copyout calls will get wrong results + * or even panic the kernel. + */ + iocbp = (ioc_t *)gbuf_rptr(m); + + switch (iocbp->ioc_cmd) { + case AT_ATP_BIND_REQ: + if (gbuf_cont(m) == NULL) { + iocbp->ioc_rval = -1; + atp_iocnak(atp, m, EINVAL); + return; + } + skt = *(at_socket *)gbuf_rptr(gbuf_cont(m)); + if ((skt = (at_socket)atp_bind(gref, (unsigned int)skt, 0)) == 0) + atp_iocnak(atp, m, EINVAL); + else { + *(at_socket *)gbuf_rptr(gbuf_cont(m)) = skt; + iocbp->ioc_rval = 0; + atp_iocack(atp, m); + atp_dequeue_atp(atp); + } + return; + + case AT_ATP_GET_CHANID: + if (gbuf_cont(m) == NULL) { + iocbp->ioc_rval = -1; + atp_iocnak(atp, m, EINVAL); + return; + } + *(gref_t **)gbuf_rptr(gbuf_cont(m)) = gref; + atp_iocack(atp, m); + return; + + /* not the close and not the tickle(?) */ + case AT_ATP_ISSUE_REQUEST_DEF: + case AT_ATP_ISSUE_REQUEST_DEF_NOTE: { + gbuf_t *bds, *tmp, *m2; + struct atp_rcb *rcbp; + at_ddp_t *ddp; + at_atp_t *athp; + + if ((tmp = gbuf_cont(m)) != 0) { + if ((bds = gbuf_dupb(tmp)) == NULL) { + atp_iocnak(atp, m, ENOBUFS); + return; + } + gbuf_rinc(tmp,atpBDSsize); + gbuf_wset(bds,atpBDSsize); + iocbp->ioc_count -= atpBDSsize; + gbuf_cont(tmp) = bds; + } + + /* + * send a response to a transaction + * first check it out + */ + if (iocbp->ioc_count < TOTAL_ATP_HDR_SIZE) { + atp_iocnak(atp, m, EINVAL); + break; + } + + /* + * remove the response from the message + */ + m2 = gbuf_cont(m); + gbuf_cont(m) = NULL; + iocbp->ioc_count = 0; + ddp = AT_DDP_HDR(m2); + athp = AT_ATP_HDR(m2); + if (atp->atp_msgq) { + gbuf_cont(m2) = atp->atp_msgq; + atp->atp_msgq = 0; + } + + ATDISABLE(s, atp->atp_lock); + /* + * search for the corresponding rcb + */ + for (rcbp = atp->atp_rcb.head; rcbp; rcbp = rcbp->rc_list.next) { + if (rcbp->rc_tid == UAS_VALUE(athp->tid) && + rcbp->rc_socket.node == ddp->dst_node && + rcbp->rc_socket.net == NET_VALUE(ddp->dst_net) && + rcbp->rc_socket.socket == ddp->dst_socket) + break; + } + ATENABLE(s, atp->atp_lock); + + /* + * If it has already been sent then return an error + */ + if ((rcbp && rcbp->rc_state != RCB_NOTIFIED) || + (rcbp == NULL && athp->xo)) { + atp_iocnak(atp, m, ENOENT); + gbuf_freem(m2); + return; + } + if (rcbp == NULL) { /* a response for an ALO transaction */ + if ((rcbp = atp_rcb_alloc(atp)) == NULL) { + atp_iocnak(atp, m, ENOBUFS); + gbuf_freem(m2); + return; + } + rcbp->rc_ioctl = 0; + rcbp->rc_socket.socket = ddp->dst_socket; + rcbp->rc_socket.node = ddp->dst_node; + rcbp->rc_socket.net = NET_VALUE(ddp->dst_net); + rcbp->rc_tid = UAS_VALUE(athp->tid); + rcbp->rc_bitmap = 0xff; + rcbp->rc_xo = 0; + ATDISABLE(s, atp->atp_lock); + rcbp->rc_state = RCB_SENDING; + ATP_Q_APPEND(atp->atp_rcb, rcbp, rc_list); + ATENABLE(s, atp->atp_lock); + } + xcnt = get_bds_entries(m2); + if ((i = atp_unpack_bdsp(atp, m2, rcbp, xcnt, FALSE))) { + if ( !rcbp->rc_xo) + atp_rcb_free(rcbp); + atp_iocnak(atp, m, i); + return; + } + atp_send_replies(atp, rcbp); + + /* + * send the ack back to the responder + */ + atp_iocack(atp, m); + return; + } + + case AT_ATP_GET_POLL: { + if (gbuf_cont(m)) { + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + iocbp->ioc_count = 0; + } + + /* + * search for a waiting request + */ + ATDISABLE(s, atp->atp_lock); + if ((rcbp = atp->atp_attached.head)) { + /* + * Got one, move it to the active response Q + */ + gbuf_cont(m) = rcbp->rc_ioctl; + rcbp->rc_ioctl = NULL; + if (rcbp->rc_xo) { + ATP_Q_REMOVE(atp->atp_attached, rcbp, rc_list); + rcbp->rc_state = RCB_NOTIFIED; + ATP_Q_APPEND(atp->atp_rcb, rcbp, rc_list); + } else { + /* detach rcbp from attached queue, + * and free any outstanding resources + */ + atp_rcb_free(rcbp); + } + ATENABLE(s, atp->atp_lock); + atp_iocack(atp, m); + } else { + /* + * None available - can out + */ + ATENABLE(s, atp->atp_lock); + atp_iocnak(atp, m, EAGAIN); + } + break; + } + + case AT_ATP_CANCEL_REQUEST: { + /* + * Cancel a pending request + */ + if (iocbp->ioc_count != sizeof(int)) { + atp_iocnak(atp, m, EINVAL); + break; + } + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + ATDISABLE(s, atp->atp_lock); + for (trp = atp->atp_trans_wait.head; trp; trp = trp->tr_list.next) { + if (trp->tr_tid == i) + break; + } + if (trp == NULL) { + ATENABLE(s, atp->atp_lock); + atp_iocnak(atp, m, ENOENT); + } else { + ATENABLE(s, atp->atp_lock); + atp_free(trp); + atp_iocack(atp, m); + } + break; + } + + case AT_ATP_PEEK: { + unsigned char event; + if (atalk_peek(gref, &event) == -1) + atp_iocnak(atp, m, EAGAIN); + else { + *gbuf_rptr(gbuf_cont(m)) = event; + atp_iocack(atp, m); + } + break; + } + + case DDP_IOC_GET_CFG: +#ifdef APPLETALK_DEBUG + kprintf("atp_wput: DDP_IOC_GET_CFG\n"); +#endif + if (gbuf_cont(m) == 0) { + atp_iocnak(atp, m, EINVAL); + break; + } + { + /* *** was ddp_get_cfg() *** */ + ddp_addr_t *cfgp = + (ddp_addr_t *)gbuf_rptr(gbuf_cont(m)); + cfgp->inet.net = ifID_home->ifThisNode.s_net; + cfgp->inet.node = ifID_home->ifThisNode.s_node; + cfgp->inet.socket = atp->atp_socket_no; + cfgp->ddptype = DDP_ATP; +#ifdef NOT_YET + cfgp->inet.net = atp->atp_gref->laddr.s_net; + cfgp->inet.node = atp->atp_gref->laddr.s_node; + cfgp->inet.socket = atp->atp_gref->lport; + cfgp->ddptype = atp->atp_gref->ddptype; +#endif + } + gbuf_wset(gbuf_cont(m), sizeof(ddp_addr_t)); + atp_iocack(atp, m); + break; + + default: + /* + * Otherwise pass it on, if possible + */ + iocbp->ioc_private = (void *)gref; + DDP_OUTPUT(m); + break; + } + break; + + default: + gbuf_freem(m); + break; + } +} /* atp_wput */ + +gbuf_t *atp_build_release(trp) +register struct atp_trans *trp; +{ + register gbuf_t *m; + register at_ddp_t *ddp; + register at_atp_t *athp; + + /* + * Now try and allocate enough space to send the message + * if none is available the caller will schedule + * a timeout so we can retry for more space soon + */ + if ((m = (gbuf_t *)gbuf_alloc(AT_WR_OFFSET+ATP_HDR_SIZE, PRI_HI)) != NULL) { + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,TOTAL_ATP_HDR_SIZE); + ddp = AT_DDP_HDR(m); + ddp->type = DDP_ATP; + UAS_ASSIGN(ddp->checksum, 0); + ddp->dst_socket = trp->tr_socket.socket; + ddp->dst_node = trp->tr_socket.node; + NET_ASSIGN(ddp->dst_net, trp->tr_socket.net); + ddp->src_node = trp->tr_local_node; + NET_NET(ddp->src_net, trp->tr_local_net); + + /* + * clear the cmd/xo/eom/sts/unused fields + */ + athp = AT_ATP_HDR(m); + ATP_CLEAR_CONTROL(athp); + athp->cmd = ATP_CMD_TREL; + UAS_ASSIGN(athp->tid, trp->tr_tid); + } + + return (m); +} + +void atp_send_replies(atp, rcbp) + register struct atp_state *atp; + register struct atp_rcb *rcbp; +{ register gbuf_t *m; + register int i, len; + int s_gen, s, cnt; + unsigned char *m0_rptr = NULL, *m0_wptr = NULL; + register at_atp_t *athp; + register struct atpBDS *bdsp; + register gbuf_t *m2, *m1, *m0; + gbuf_t *mprev, *mlist = 0; + at_socket src_socket = (at_socket)atp->atp_socket_no; + gbuf_t *rc_xmt[ATP_TRESP_MAX]; + struct ddp_atp { + char ddp_atp_hdr[TOTAL_ATP_HDR_SIZE]; + }; + + ATDISABLE(s, atp->atp_lock); + if (rcbp->rc_queue != atp) { + ATENABLE(s, atp->atp_lock); + return; + } + if (rcbp->rc_not_sent_bitmap == 0) + goto nothing_to_send; + + dPrintf(D_M_ATP_LOW, D_L_OUTPUT, ("atp_send_replies\n")); + /* + * Do this for each message that hasn't been sent + */ + cnt = rcbp->rc_pktcnt; + for (i = 0; i < cnt; i++) { + rc_xmt[i] = 0; + if (rcbp->rc_snd[i]) { + if ((rc_xmt[i] = + gbuf_alloc(AT_WR_OFFSET+TOTAL_ATP_HDR_SIZE,PRI_MED)) + == NULL) { + for (cnt = 0; cnt < i; cnt++) + if (rc_xmt[cnt]) + gbuf_freeb(rc_xmt[cnt]); + goto nothing_to_send; + } + } + } + + m = rcbp->rc_xmt; + m0 = gbuf_cont(m); + if (m0) { + m0_rptr = gbuf_rptr(m0); + m0_wptr = gbuf_wptr(m0); + } + if (gbuf_len(m) > TOTAL_ATP_HDR_SIZE) + bdsp = (struct atpBDS *)(AT_ATP_HDR(m)->data); + else + bdsp = 0; + + for (i = 0; i < cnt; i++) { + if (rcbp->rc_snd[i] == 0) { + if ((len = UAS_VALUE(bdsp->bdsBuffSz))) + gbuf_rinc(m0,len); + + } else { + m2 = rc_xmt[i]; + gbuf_rinc(m2,AT_WR_OFFSET); + gbuf_wset(m2,TOTAL_ATP_HDR_SIZE); + *(struct ddp_atp *)(gbuf_rptr(m2))= *(struct ddp_atp *)(gbuf_rptr(m)); + athp = AT_ATP_HDR(m2); + ATP_CLEAR_CONTROL(athp); + athp->cmd = ATP_CMD_TRESP; + athp->bitmap = i; + if (i == (cnt - 1)) + athp->eom = 1; /* for the last fragment */ + if (bdsp) + UAL_UAL(athp->user_bytes, bdsp->bdsUserData); + + if (bdsp) + if (len = UAS_VALUE(bdsp->bdsBuffSz)) { /* copy in data */ + if (m0 && gbuf_len(m0)) { + if ((m1 = gbuf_dupb(m0)) == NULL) { + for (i = 0; i < cnt; i++) + if (rc_xmt[i]) + gbuf_freem(rc_xmt[i]); + gbuf_rptr(m0) = m0_rptr; + gbuf_wset(m0,(m0_wptr-m0_rptr)); + goto nothing_to_send; + } + gbuf_wset(m1,len); + gbuf_rinc(m0,len); + if ((len = gbuf_len(m0)) < 0) { + gbuf_rdec(m0,len); + gbuf_wdec(m1,len); + if (!append_copy((struct mbuf *)m1, + (struct mbuf *)gbuf_cont(m0), FALSE)) { + for (i = 0; i < cnt; i++) + if (rc_xmt[i]) + gbuf_freem(rc_xmt[i]); + gbuf_rptr(m0) = m0_rptr; + gbuf_wset(m0,(m0_wptr-m0_rptr)); + goto nothing_to_send; + } + } else + gbuf_cont(m1) = 0; + gbuf_cont(m2) = m1; + } + } + + AT_DDP_HDR(m2)->src_socket = src_socket; + dPrintf(D_M_ATP_LOW, D_L_OUTPUT, + ("atp_send_replies: %d, socket=%d, size=%d\n", + i, atp->atp_socket_no, gbuf_msgsize(gbuf_cont(m2)))); + + if (mlist) + gbuf_next(mprev) = m2; + else + mlist = m2; + mprev = m2; + + rcbp->rc_snd[i] = 0; + rcbp->rc_not_sent_bitmap &= ~atp_mask[i]; + if (rcbp->rc_not_sent_bitmap == 0) + break; + } + /* + * on to the next frag + */ + bdsp++; + } + if (m0) { + gbuf_rptr(m0) = m0_rptr; + gbuf_wset(m0,(m0_wptr-m0_rptr)); + } + + if (mlist) { + ATENABLE(s, atp->atp_lock); + DDP_OUTPUT(mlist); + ATDISABLE(s, atp->atp_lock); + } + +nothing_to_send: + /* + * If all replies from this reply block have been sent then + * remove it from the queue and mark it so + */ + if (rcbp->rc_queue != atp) { + ATENABLE(s, atp->atp_lock); + return; + } + rcbp->rc_rep_waiting = 0; + + /* + * If we are doing execute once re-set the rcb timeout + * each time we send back any part of the response. Note + * that this timer is started when an initial request is + * received. Each response reprimes the timer. Duplicate + * requests do not reprime the timer. + * + * We have sent all of a response so free the + * resources. + */ + if (rcbp->rc_xo && rcbp->rc_state != RCB_RELEASED) { + ATDISABLE(s_gen, atpgen_lock); + if (rcbp->rc_timestamp == 0) { + rcbp->rc_timestamp = time.tv_sec; + if (rcbp->rc_timestamp == 0) + rcbp->rc_timestamp = 1; + ATP_Q_APPEND(atp_need_rel, rcbp, rc_tlist); + } + rcbp->rc_state = RCB_RESPONSE_FULL; + ATENABLE(s_gen, atpgen_lock); + } else + atp_rcb_free(rcbp); + ATENABLE(s, atp->atp_lock); +} /* atp_send_replies */ + + +static void +atp_pack_bdsp(trp, bdsp) + register struct atp_trans *trp; + register struct atpBDS *bdsp; +{ + register gbuf_t *m = NULL; + register int i, datsize = 0; + struct atpBDS *bdsbase = bdsp; + + dPrintf(D_M_ATP, D_L_INFO, ("atp_pack_bdsp: socket=%d\n", + trp->tr_queue->atp_socket_no)); + + for (i = 0; i < ATP_TRESP_MAX; i++, bdsp++) { + short bufsize = UAS_VALUE(bdsp->bdsBuffSz); + long bufaddr = UAL_VALUE(bdsp->bdsBuffAddr); + + if ((m = trp->tr_rcv[i]) == NULL) + break; + + /* discard ddp hdr on first packet */ + if (i == 0) + gbuf_rinc(m,DDP_X_HDR_SIZE); + + /* this field may contain control information even when + no data is present */ + UAL_UAL(bdsp->bdsUserData, + (((at_atp_t *)(gbuf_rptr(m)))->user_bytes)); + gbuf_rinc(m, ATP_HDR_SIZE); + + if ((bufsize != 0) && (bufaddr != 0)) { + /* user expects data back */ + short tmp = 0; + register char *buf = (char *)bufaddr; + + while (m) { + short len = (short)(gbuf_len(m)); + if (len) { + if (len > bufsize) + len = bufsize; + copyout((caddr_t)gbuf_rptr(m), + (caddr_t)&buf[tmp], + len); + bufsize -= len; + tmp =+ len; + } + m = gbuf_cont(m); + } + + UAS_ASSIGN(bdsp->bdsDataSz, tmp); + datsize += (int)tmp; + } + gbuf_freem(trp->tr_rcv[i]); + trp->tr_rcv[i] = NULL; + } + + /* report the number of packets */ + UAS_ASSIGN(((struct atpBDS *)bdsbase)->bdsBuffSz, i); + + dPrintf(D_M_ATP, D_L_INFO, (" : size=%d\n", + datsize)); +} /* atp_pack_bdsp */ + + +static int +atp_unpack_bdsp(atp, m, rcbp, cnt, wait) + struct atp_state *atp; + gbuf_t *m; /* ddp, atp and bdsp gbuf_t */ + register struct atp_rcb *rcbp; + register int cnt, wait; +{ + register struct atpBDS *bdsp; + register gbuf_t *m2, *m1, *m0; + register at_atp_t *athp; + register int i, len, s_gen; + at_socket src_socket; + struct ddp_atp { + char ddp_atp_hdr[TOTAL_ATP_HDR_SIZE]; + }; + gbuf_t *mprev, *mlist = 0; + gbuf_t *rc_xmt[ATP_TRESP_MAX]; + unsigned char *m0_rptr, *m0_wptr; + + /* + * get the user data structure pointer + */ + bdsp = (struct atpBDS *)(AT_ATP_HDR(m)->data); + + /* + * Guard against bogus count argument. + */ + if ((unsigned) cnt > ATP_TRESP_MAX) { + dPrintf(D_M_ATP, D_L_ERROR, + ("atp_unpack_bdsp: bad bds count 0x%x\n", cnt)); + gbuf_freem(m); + return(EINVAL); + } + if ((src_socket = (at_socket)atp->atp_socket_no) == 0xFF) { + /* comparison was to -1, however src_socket is a u_char */ + gbuf_freem(m); + return EPIPE; + } + + m0 = gbuf_cont(m); + rcbp->rc_xmt = m; + rcbp->rc_pktcnt = cnt; + rcbp->rc_state = RCB_SENDING; + rcbp->rc_not_sent_bitmap = 0; + + if (cnt <= 1) { + /* + * special case this to + * improve AFP write transactions to the server + */ + rcbp->rc_pktcnt = 1; + if ((m2 = gbuf_alloc_wait(AT_WR_OFFSET+TOTAL_ATP_HDR_SIZE, + wait)) == NULL) + return 0; + gbuf_rinc(m2,AT_WR_OFFSET); + gbuf_wset(m2,TOTAL_ATP_HDR_SIZE); + *(struct ddp_atp *)(gbuf_rptr(m2))= *(struct ddp_atp *)(gbuf_rptr(m)); + athp = AT_ATP_HDR(m2); + ATP_CLEAR_CONTROL(athp); + athp->cmd = ATP_CMD_TRESP; + athp->bitmap = 0; + athp->eom = 1; /* there's only 1 fragment */ + + /* *** why only if cnt > 0? *** */ + if (cnt > 0) + UAL_UAL(athp->user_bytes, bdsp->bdsUserData); + if (m0) + if (!append_copy((struct mbuf *)m2, + (struct mbuf *)m0, wait)) { + gbuf_freeb(m2); + return 0; + } + /* + * send the message and mark it as sent + */ + AT_DDP_HDR(m2)->src_socket = src_socket; + dPrintf(D_M_ATP_LOW, D_L_INFO, + ("atp_unpack_bdsp %d, socket=%d, size=%d, cnt=%d\n", + 0,atp->atp_socket_no,gbuf_msgsize(gbuf_cont(m2)),cnt)); + mlist = m2; + goto l_send; + } + + for (i = 0; i < cnt; i++) { + /* all hdrs, packet data and dst addr storage */ + if ((rc_xmt[i] = + gbuf_alloc_wait(AT_WR_OFFSET+TOTAL_ATP_HDR_SIZE, + wait)) == NULL) { + for (cnt = 0; cnt < i; cnt++) + if (rc_xmt[cnt]) + gbuf_freeb(rc_xmt[cnt]); + return 0; + } + } + if (m0) { + m0_rptr = gbuf_rptr(m0); + m0_wptr = gbuf_wptr(m0); + } + + for (i = 0; i < cnt; i++) { + m2 = rc_xmt[i]; + gbuf_rinc(m2,AT_WR_OFFSET); + gbuf_wset(m2,TOTAL_ATP_HDR_SIZE); + *(struct ddp_atp *)(gbuf_rptr(m2))= *(struct ddp_atp *)(gbuf_rptr(m)); + athp = AT_ATP_HDR(m2); + ATP_CLEAR_CONTROL(athp); + athp->cmd = ATP_CMD_TRESP; + athp->bitmap = i; + if (i == (cnt - 1)) + athp->eom = 1; /* for the last fragment */ + UAL_UAL(athp->user_bytes, bdsp->bdsUserData); + + if ((len = UAS_VALUE(bdsp->bdsBuffSz))) { /* copy in data */ + if (m0 && gbuf_len(m0)) { + if ((m1 = gbuf_dupb_wait(m0, wait)) == NULL) { + for (i = 0; i < cnt; i++) + if (rc_xmt[i]) + gbuf_freem(rc_xmt[i]); + gbuf_rptr(m0) = m0_rptr; + gbuf_wset(m0,(m0_wptr-m0_rptr)); + return 0; + } + gbuf_wset(m1,len); /* *** m1 is first len bytes of m0? *** */ + gbuf_rinc(m0,len); + if ((len = gbuf_len(m0)) < 0) { + gbuf_rdec(m0,len); + gbuf_wdec(m1,len); + if (!append_copy((struct mbuf *)m1, + (struct mbuf *)gbuf_cont(m0), wait)) { + for (i = 0; i < cnt; i++) + if (rc_xmt[i]) + gbuf_freem(rc_xmt[i]); + gbuf_rptr(m0) = m0_rptr; + gbuf_wset(m0,(m0_wptr-m0_rptr)); + return 0; + } + } else + gbuf_cont(m1) = 0; + gbuf_cont(m2) = m1; + } + } + + AT_DDP_HDR(m2)->src_socket = src_socket; + dPrintf(D_M_ATP_LOW,D_L_INFO, + ("atp_unpack_bdsp %d, socket=%d, size=%d, cnt=%d\n", + i,atp->atp_socket_no,gbuf_msgsize(gbuf_cont(m2)),cnt)); + if (mlist) + gbuf_next(mprev) = m2; + else + mlist = m2; + mprev = m2; + /* + * on to the next frag + */ + bdsp++; + } + if (m0) { + gbuf_rptr(m0) = m0_rptr; + gbuf_wset(m0,(m0_wptr-m0_rptr)); + } + /* + * send the message + */ +l_send: + if (rcbp->rc_xo) { + ATDISABLE(s_gen, atpgen_lock); + if (rcbp->rc_timestamp == 0) { + if ((rcbp->rc_timestamp = time.tv_sec) == 0) + rcbp->rc_timestamp = 1; + ATP_Q_APPEND(atp_need_rel, rcbp, rc_tlist); + } + ATENABLE(s_gen, atpgen_lock); + } + + DDP_OUTPUT(mlist); + return 0; +} /* atp_unpack_bdsp */ + +#define ATP_SOCKET_LAST (DDP_SOCKET_LAST-6) +#define ATP_SOCKET_FIRST (DDP_SOCKET_1st_DYNAMIC-64) +static unsigned int sNext = 0; + +int atp_bind(gref, sVal, flag) + gref_t *gref; + unsigned int sVal; + unsigned char *flag; +{ + extern unsigned char asp_inpC[]; + extern asp_scb_t *asp_scbQ[]; + unsigned char inpC, sNextUsed = 0; + unsigned int sMin, sMax, sSav; + struct atp_state *atp; + int s; + + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + + sMax = ATP_SOCKET_LAST; + sMin = ATP_SOCKET_FIRST; + ATDISABLE(s, atpgen_lock); + if (flag && (*flag == 3)) { + sMin += 40; + if (sMin < sNext) { + sMin = sNext; + sNextUsed = 1; + } + } + + if ( (sVal != 0) && + ((sVal > sMax) || (sVal < 2) || (sVal == 6) || + (ddp_socket_inuse(sVal, DDP_ATP) && + (atp_inputQ[sVal] != (gref_t *)1)))) { + ATENABLE(s, atpgen_lock); + return 0; + } + + if (sVal == 0) { + inpC = 255; +again: + for (sVal=sMin; sVal <= sMax; sVal++) { + if (!ddp_socket_inuse(sVal, DDP_ATP) || + atp_inputQ[sVal] == (gref_t *)1) + break; + else if (flag && (*flag == 3) && asp_scbQ[sVal]) { + if ((asp_scbQ[sVal]->dflag == *flag) + && (asp_inpC[sVal] < inpC) ) { + inpC = asp_inpC[sVal]; + sSav = sVal; + } + } + } + if (sVal > sMax) { + if (flag && (*flag == 3)) { + if (sNextUsed) { + sNextUsed = 0; + sMax = sNext - 1; + sMin = ATP_SOCKET_FIRST+40; + goto again; + } + sNext = 0; + *flag = (unsigned char)sSav; + } + ATENABLE(s, atpgen_lock); + return 0; + } + } + atp->atp_socket_no = (short)sVal; + atp_inputQ[sVal] = gref; + if (flag == 0) + atp_pidM[sVal] = atp->atp_pid; + else if (*flag == 3) { + sNext = sVal + 1; + if (sNext > ATP_SOCKET_LAST) + sNext = 0; + } + + ATENABLE(s, atpgen_lock); + return (int)sVal; +} + +void atp_req_ind(atp, mioc) + register struct atp_state *atp; + register gbuf_t *mioc; +{ + register struct atp_rcb *rcbp; + int s; + + if ((rcbp = atp->atp_attached.head) != 0) { + gbuf_cont(mioc) = rcbp->rc_ioctl; + rcbp->rc_ioctl = NULL; + ATDISABLE(s, atp->atp_lock); + if (rcbp->rc_xo) { + ATP_Q_REMOVE(atp->atp_attached, rcbp, rc_list); + rcbp->rc_state = RCB_NOTIFIED; + ATP_Q_APPEND(atp->atp_rcb, rcbp, rc_list); + } else + atp_rcb_free(rcbp); + ATENABLE(s, atp->atp_lock); + if (gbuf_cont(mioc)) + ((ioc_t *)gbuf_rptr(mioc))->ioc_count = gbuf_msgsize(gbuf_cont(mioc)); + else + ((ioc_t *)gbuf_rptr(mioc))->ioc_count = 0; + asp_ack_reply(atp->atp_gref, mioc); + } else + gbuf_freeb(mioc); +} + +void atp_rsp_ind(trp, mioc) + register struct atp_trans *trp; + register gbuf_t *mioc; +{ + register struct atp_state *atp = trp->tr_queue; + register int err; + gbuf_t *xm = 0; + + err = 0; + { + switch (trp->tr_state) { + case TRANS_DONE: + if (asp_pack_bdsp(trp, &xm) < 0) + err = EFAULT; + gbuf_cont(mioc) = trp->tr_xmt; + trp->tr_xmt = NULL; + break; + + case TRANS_FAILED: + err = ETIMEDOUT; + break; + + default: + err = ENOENT; + break; + } + atp_free(trp); + + if (err) { + dPrintf(D_M_ATP, D_L_ERROR, + ("atp_rsp_ind: TRANSACTION error\n")); + atp_iocnak(atp, mioc, err); + } else { + gbuf_cont(gbuf_cont(mioc)) = xm; + atp_iocack(atp, mioc); + } + } +} + +void atp_cancel_req(gref, tid) + gref_t *gref; + unsigned short tid; +{ + int s; + struct atp_state *atp; + struct atp_trans *trp; + + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + + ATDISABLE(s, atp->atp_lock); + for (trp = atp->atp_trans_wait.head; trp; trp = trp->tr_list.next) { + if (trp->tr_tid == tid) + break; + } + ATENABLE(s, atp->atp_lock); + if (trp != NULL) + atp_free(trp); +} + +/* + * remove atp from the use list + */ +void +atp_dequeue_atp(atp) + struct atp_state *atp; +{ + int s; + + ATDISABLE(s, atpall_lock); + if (atp == atp_used_list) { + if ((atp_used_list = atp->atp_trans_waiting) != 0) + atp->atp_trans_waiting->atp_rcb_waiting = 0; + } else if (atp->atp_rcb_waiting) { + if ((atp->atp_rcb_waiting->atp_trans_waiting + = atp->atp_trans_waiting) != 0) + atp->atp_trans_waiting->atp_rcb_waiting = atp->atp_rcb_waiting; + } + + atp->atp_trans_waiting = 0; + atp->atp_rcb_waiting = 0; + ATENABLE(s, atpall_lock); +} + +void +atp_timout(func, trp, ticks) + void (*func)(); + struct atp_trans *trp; + int ticks; +{ + int s; + unsigned int sum; + struct atp_trans *curr_trp, *prev_trp; + + ATDISABLE(s, atptmo_lock); + if (trp->tr_tmo_func) { + ATENABLE(s, atptmo_lock); + return; + } + + trp->tr_tmo_func = func; + trp->tr_tmo_delta = 1+(ticks>>5); + + if (trp_tmo_list == 0) { + trp->tr_tmo_next = trp->tr_tmo_prev = 0; + trp_tmo_list = trp; + ATENABLE(s, atptmo_lock); + return; + } + + prev_trp = 0; + curr_trp = trp_tmo_list; + sum = 0; + + while (1) { + sum += curr_trp->tr_tmo_delta; + if (sum > trp->tr_tmo_delta) { + sum -= curr_trp->tr_tmo_delta; + trp->tr_tmo_delta -= sum; + curr_trp->tr_tmo_delta -= trp->tr_tmo_delta; + break; + } + prev_trp = curr_trp; + if ((curr_trp = curr_trp->tr_tmo_next) == 0) { + trp->tr_tmo_delta -= sum; + break; + } + } + + if (prev_trp) { + trp->tr_tmo_prev = prev_trp; + if ((trp->tr_tmo_next = prev_trp->tr_tmo_next) != 0) + prev_trp->tr_tmo_next->tr_tmo_prev = trp; + prev_trp->tr_tmo_next = trp; + } else { + trp->tr_tmo_prev = 0; + trp->tr_tmo_next = trp_tmo_list; + trp_tmo_list->tr_tmo_prev = trp; + trp_tmo_list = trp; + } + ATENABLE(s, atptmo_lock); +} + +void +atp_untimout(func, trp) + void (*func)(); + struct atp_trans *trp; +{ + int s; + + ATDISABLE(s, atptmo_lock); + if (trp->tr_tmo_func == 0) { + ATENABLE(s, atptmo_lock); + return; + } + + if (trp_tmo_list == trp) { + if ((trp_tmo_list = trp->tr_tmo_next) != 0) { + trp_tmo_list->tr_tmo_prev = 0; + trp->tr_tmo_next->tr_tmo_delta += trp->tr_tmo_delta; + } + } else { + if ((trp->tr_tmo_prev->tr_tmo_next = trp->tr_tmo_next) != 0) { + trp->tr_tmo_next->tr_tmo_prev = trp->tr_tmo_prev; + trp->tr_tmo_next->tr_tmo_delta += trp->tr_tmo_delta; + } + } + trp->tr_tmo_func = 0; + ATENABLE(s, atptmo_lock); +} + +void +atp_trp_clock_funnel(arg) + void *arg; +{ + thread_funnel_set(network_flock, TRUE); + atp_trp_clock(arg); + thread_funnel_set(network_flock, FALSE); +} + +void +atp_trp_clock(arg) + void *arg; +{ + int s; + struct atp_trans *trp; + void (*tr_tmo_func)(); + + ATDISABLE(s, atptmo_lock); + if (trp_tmo_list) + trp_tmo_list->tr_tmo_delta--; + while (((trp = trp_tmo_list) != 0) && (trp_tmo_list->tr_tmo_delta == 0)) { + if ((trp_tmo_list = trp->tr_tmo_next) != 0) + trp_tmo_list->tr_tmo_prev = 0; + if ((tr_tmo_func = trp->tr_tmo_func) != 0) { + trp->tr_tmo_func = 0; + ATENABLE(s, atptmo_lock); + (*tr_tmo_func)(trp); + ATDISABLE(s, atptmo_lock); + } + } + ATENABLE(s, atptmo_lock); + + timeout(atp_trp_clock_funnel, (void *)arg, (1<<5)); +} + +void +atp_send_req(gref, mioc) + gref_t *gref; + gbuf_t *mioc; +{ + register struct atp_state *atp; + register struct atp_trans *trp; + register ioc_t *iocbp; + register at_atp_t *athp; + register at_ddp_t *ddp; + gbuf_t *m, *m2, *bds; + struct atp_set_default *sdb; + int s, old; + unsigned int timer; + + atp = (struct atp_state *)((struct atp_state *)gref->info)->atp_msgq; + iocbp = (ioc_t *)gbuf_rptr(mioc); + + if ((trp = atp_trans_alloc(atp)) == NULL) { +l_retry: + ((asp_scb_t *)gref->info)->stat_msg = mioc; + iocbp->ioc_private = (void *)gref; + timeout(atp_retry_req, mioc, 10); + return; + } + + m2 = gbuf_cont(mioc); + if ((bds = gbuf_dupb(m2)) == NULL) { + atp_trans_free(trp); + goto l_retry; + } + gbuf_rinc(m2,atpBDSsize); + gbuf_wset(bds,atpBDSsize); + iocbp->ioc_count -= atpBDSsize; + gbuf_cont(m2) = NULL; + + old = iocbp->ioc_cmd; + iocbp->ioc_cmd = AT_ATP_ISSUE_REQUEST; + sdb = (struct atp_set_default *)gbuf_rptr(m2); + + /* + * The at_snd_req library routine multiplies seconds by 100. + * We need to divide by 100 in order to obtain the timer. + */ + if ((timer = (sdb->def_rate * HZ)/100) == 0) + timer = HZ; + iocbp->ioc_count -= sizeof(struct atp_set_default); + gbuf_rinc(m2,sizeof(struct atp_set_default)); + + trp->tr_retry = sdb->def_retries; + trp->tr_timeout = timer; + trp->tr_bdsp = bds; + trp->tr_tid = atp_tid(atp); + trp->tr_xmt = mioc; + + /* + * Now fill in the header (and remember the bits + * we need to know) + */ + athp = AT_ATP_HDR(m2); + athp->cmd = ATP_CMD_TREQ; + UAS_ASSIGN(athp->tid, trp->tr_tid); + athp->eom = 0; + athp->sts = 0; + trp->tr_xo = athp->xo; + trp->tr_bitmap = athp->bitmap; + ddp = AT_DDP_HDR(m2); + ddp->type = DDP_ATP; + ddp->src_socket = (at_socket)atp->atp_socket_no; + trp->tr_socket.socket = ddp->dst_socket; + trp->tr_socket.node = ddp->dst_node; + trp->tr_socket.net = NET_VALUE(ddp->dst_net); + trp->tr_local_socket = atp->atp_socket_no; + trp->tr_local_node = ddp->src_node; + NET_NET(trp->tr_local_net, ddp->src_net); + +#ifdef NOT_YET + /* save the local information in the gref */ + atp->atp_gref->laddr.s_net = NET_VALUE(ddp->src_net); + atp->atp_gref->laddr.s_node = ddp->src_node; + atp->atp_gref->lport = ddp->src_node; + atp->atp_gref->ddptype = DDP_ATP; +#endif + + /* + * Put us in the transaction waiting queue + */ + ATDISABLE(s, atp->atp_lock); + ATP_Q_APPEND(atp->atp_trans_wait, trp, tr_list); + ATENABLE(s, atp->atp_lock); + + /* + * Send the message and set the timer + */ + m = (gbuf_t *)copy_pkt(m2, sizeof(llc_header_t)); + if (!trp->tr_retry && !trp->tr_bitmap && !trp->tr_xo) + atp_x_done(trp); /* no reason to tie up resources */ + else + atp_timout(atp_req_timeout, trp, trp->tr_timeout); + if (m) { + trace_mbufs(D_M_ATP_LOW, " s", m); + DDP_OUTPUT(m); + } +} /* atp_send_req */ + +void atp_retry_req(m) + gbuf_t *m; +{ + gref_t *gref; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + gref = (gref_t *)((ioc_t *)gbuf_rptr(m))->ioc_private; + if (gref->info) { + ((asp_scb_t *)gref->info)->stat_msg = 0; + atp_send_req(gref, m); + } + (void) thread_funnel_set(network_flock, FALSE); +} + +void atp_send_rsp(gref, m, wait) + gref_t *gref; + gbuf_t *m; + int wait; +{ + register struct atp_state *atp; + register struct atp_rcb *rcbp; + register at_atp_t *athp; + register at_ddp_t *ddp; + int s, xcnt; + + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + ddp = AT_DDP_HDR(m); + athp = AT_ATP_HDR(m); + + /* + * search for the corresponding rcb + */ + ATDISABLE(s, atp->atp_lock); + for (rcbp = atp->atp_rcb.head; rcbp; rcbp = rcbp->rc_list.next) { + if ( (rcbp->rc_tid == UAS_VALUE(athp->tid)) && + (rcbp->rc_socket.node == ddp->dst_node) && + (rcbp->rc_socket.net == NET_VALUE(ddp->dst_net)) && + (rcbp->rc_socket.socket == ddp->dst_socket) ) + break; + } + + /* + * If it has already been sent then drop the request + */ + if ((rcbp && (rcbp->rc_state != RCB_NOTIFIED)) || + (rcbp == NULL && athp->xo) ) { + ATENABLE(s, atp->atp_lock); + gbuf_freem(m); + return; + } + ATENABLE(s, atp->atp_lock); + + if (rcbp == NULL) { /* a response is being sent for an ALO transaction */ + if ((rcbp = atp_rcb_alloc(atp)) == NULL) { + gbuf_freem(m); + return; + } + rcbp->rc_ioctl = 0; + rcbp->rc_socket.socket = ddp->dst_socket; + rcbp->rc_socket.node = ddp->dst_node; + rcbp->rc_socket.net = NET_VALUE(ddp->dst_net); + rcbp->rc_tid = UAS_VALUE(athp->tid); + rcbp->rc_bitmap = 0xff; + rcbp->rc_xo = 0; + rcbp->rc_state = RCB_RESPONSE_FULL; + ATDISABLE(s, atp->atp_lock); + ATP_Q_APPEND(atp->atp_rcb, rcbp, rc_list); + ATENABLE(s, atp->atp_lock); + } + else if (ddp->src_node == 0) { + NET_NET(ddp->src_net, rcbp->rc_local_net); + ddp->src_node = rcbp->rc_local_node; + } + + xcnt = get_bds_entries(m); + s = atp_unpack_bdsp(atp, m, rcbp, xcnt, wait); + if (s == 0) + atp_send_replies(atp, rcbp); +} /* atp_send_rsp */ + +int asp_pack_bdsp(trp, xm) + register struct atp_trans *trp; + gbuf_t **xm; +{ + register struct atpBDS *bdsp; + register gbuf_t *m, *m2; + register int i; + gbuf_t *m_prev, *m_head = 0; + + dPrintf(D_M_ATP, D_L_INFO, ("asp_pack_bdsp: socket=%d\n", + trp->tr_queue->atp_socket_no)); + + if ((m2 = trp->tr_bdsp) == NULL) + return 0; + trp->tr_bdsp = NULL; + bdsp = (struct atpBDS *)gbuf_rptr(m2); + + for (i = 0; (i < ATP_TRESP_MAX && + bdsp < (struct atpBDS *)(gbuf_wptr(m2))); i++) { + if ((m = trp->tr_rcv[i]) == NULL) + break; + if (i == 0) { + /* discard ddp hdr on first packet */ + gbuf_rinc(m,DDP_X_HDR_SIZE); + } + + UAL_UAL(bdsp->bdsUserData, (((at_atp_t *)(gbuf_rptr(m)))->user_bytes)); + gbuf_rinc(m, ATP_HDR_SIZE); + + if (UAL_VALUE(bdsp->bdsBuffAddr)) { + short tmp; + + /* user expects data back */ + m = gbuf_strip(m); + if (m_head == 0) + m_head = m; + else + gbuf_cont(m_prev) = m; + if (m) { + tmp = (short)gbuf_len(m); + while (gbuf_cont(m)) { + m = gbuf_cont(m); + tmp += (short)(gbuf_len(m)); + } + m_prev = m; + } + UAS_ASSIGN(bdsp->bdsDataSz, tmp); + } + trp->tr_rcv[i] = NULL; + bdsp++; + + } + /* + * report the number of packets + */ + UAS_ASSIGN(((struct atpBDS *)gbuf_rptr(m2))->bdsBuffSz, i); + + if (trp->tr_xmt) /* an ioctl block is still held? */ + gbuf_cont(trp->tr_xmt) = m2; + else + trp->tr_xmt = m2; + + if (m_head) + *xm = m_head; + else + *xm = 0; + + dPrintf(D_M_ATP, D_L_INFO, (" : size=%d\n", + gbuf_msgsize(*xm))); + + return 0; +} + +/* + * The following routines are direct entries from system + * calls to allow fast sending and recving of ATP data. + */ + +int +_ATPsndreq(fd, buf, len, nowait, err, proc) + int fd; + unsigned char *buf; + int len; + int nowait; + int *err; + void *proc; +{ + gref_t *gref; + int s, rc; + unsigned short tid; + unsigned int timer; + register struct atp_state *atp; + register struct atp_trans *trp; + register ioc_t *iocbp; + register at_atp_t *athp; + register at_ddp_t *ddp; + struct atp_set_default *sdb; + gbuf_t *m2, *m, *mioc; + char bds[atpBDSsize]; + + if ((*err = atalk_getref(0, fd, &gref, proc)) != 0) + return -1; + + if ((gref == 0) || ((atp = (struct atp_state *)gref->info) == 0) + || (atp->atp_flags & ATP_CLOSING)) { + dPrintf(D_M_ATP, D_L_ERROR, ("ATPsndreq: stale handle=0x%x, pid=%d\n", + (u_int) gref, gref->pid)); + + *err = EINVAL; + return -1; + } + + while ((mioc = gbuf_alloc(sizeof(ioc_t), PRI_MED)) == 0) { + ATDISABLE(s, atp->atp_delay_lock); + rc = tsleep(&atp->atp_delay_event, PSOCK | PCATCH, "atpmioc", 10); + ATENABLE(s, atp->atp_delay_lock); + if (rc != 0) { + *err = rc; + return -1; + } + + } + gbuf_wset(mioc,sizeof(ioc_t)); + len -= atpBDSsize; + while ((m2 = gbuf_alloc(len, PRI_MED)) == 0) { + ATDISABLE(s, atp->atp_delay_lock); + rc = tsleep(&atp->atp_delay_event, PSOCK | PCATCH, "atpm2", 10); + ATENABLE(s, atp->atp_delay_lock); + if (rc != 0) { + gbuf_freeb(mioc); + *err = rc; + return -1; + } + } + gbuf_wset(m2, len); + gbuf_cont(mioc) = m2; + if (((*err = copyin((caddr_t)buf, (caddr_t)bds, atpBDSsize)) != 0) + || ((*err = copyin((caddr_t)&buf[atpBDSsize], + (caddr_t)gbuf_rptr(m2), len)) != 0)) { + gbuf_freem(mioc); + return -1; + } + gbuf_set_type(mioc, MSG_IOCTL); + iocbp = (ioc_t *)gbuf_rptr(mioc); + iocbp->ioc_count = len; + iocbp->ioc_cmd = nowait ? AT_ATP_ISSUE_REQUEST_NOTE : AT_ATP_ISSUE_REQUEST; + sdb = (struct atp_set_default *)gbuf_rptr(m2); + + /* + * The at_snd_req library routine multiplies seconds by 100. + * We need to divide by 100 in order to obtain the timer. + */ + if ((timer = (sdb->def_rate * HZ)/100) == 0) + timer = HZ; + iocbp->ioc_count -= sizeof(struct atp_set_default); + gbuf_rinc(m2,sizeof(struct atp_set_default)); + + /* + * allocate and set up the transaction record + */ + while ((trp = atp_trans_alloc(atp)) == 0) { + ATDISABLE(s, atp->atp_delay_lock); + rc = tsleep(&atp->atp_delay_event, PSOCK | PCATCH, "atptrp", 10); + ATENABLE(s, atp->atp_delay_lock); + if (rc != 0) { + gbuf_freem(mioc); + *err = rc; + return -1; + } + } + trp->tr_retry = sdb->def_retries; + trp->tr_timeout = timer; + trp->tr_bdsp = NULL; + trp->tr_tid = atp_tid(atp); + tid = trp->tr_tid; + + /* + * remember the IOCTL packet so we can ack it + * later + */ + trp->tr_xmt = mioc; + + /* + * Now fill in the header (and remember the bits + * we need to know) + */ + athp = AT_ATP_HDR(m2); + athp->cmd = ATP_CMD_TREQ; + UAS_ASSIGN(athp->tid, trp->tr_tid); + athp->eom = 0; + athp->sts = 0; + trp->tr_xo = athp->xo; + trp->tr_bitmap = athp->bitmap; + ddp = AT_DDP_HDR(m2); + ddp->type = DDP_ATP; + ddp->src_socket = (at_socket)atp->atp_socket_no; + ddp->src_node = 0; + trp->tr_socket.socket = ddp->dst_socket; + trp->tr_socket.node = ddp->dst_node; + trp->tr_socket.net = NET_VALUE(ddp->dst_net); + trp->tr_local_socket = atp->atp_socket_no; + +#ifdef NOT_YET + /* save the local information in the gref */ + atp->atp_gref->laddr.s_net = NET_VALUE(ddp->src_net); + atp->atp_gref->laddr.s_node = ddp->src_node; + atp->atp_gref->lport = ddp->src_node; + atp->atp_gref->ddptype = DDP_ATP; +#endif + + /* + * Put us in the transaction waiting queue + */ + ATDISABLE(s, atp->atp_lock); + ATP_Q_APPEND(atp->atp_trans_wait, trp, tr_list); + ATENABLE(s, atp->atp_lock); + + /* + * Send the message and set the timer + */ + m = (gbuf_t *)copy_pkt(m2, sizeof(llc_header_t)); + if ( !trp->tr_retry && !trp->tr_bitmap && !trp->tr_xo) + atp_x_done(trp); /* no reason to tie up resources */ + else + atp_timout(atp_req_timeout, trp, trp->tr_timeout); + if (m) + DDP_OUTPUT(m); + + if (nowait) + return (int)tid; + + /* + * wait for the transaction to complete + */ + ATDISABLE(s, trp->tr_lock); + while ((trp->tr_state != TRANS_DONE) && (trp->tr_state != TRANS_FAILED)) { + trp->tr_rsp_wait = 1; + rc = tsleep(&trp->tr_event, PSOCK | PCATCH, "atpsndreq", 0); + if (rc != 0) { + trp->tr_rsp_wait = 0; + ATENABLE(s, trp->tr_lock); + *err = rc; + return -1; + } + } + trp->tr_rsp_wait = 0; + ATENABLE(s, trp->tr_lock); + + if (trp->tr_state == TRANS_FAILED) { + /* + * transaction timed out, return error + */ + atp_free(trp); + *err = ETIMEDOUT; + return -1; + } + + /* + * copy out the recv data + */ + atp_pack_bdsp(trp, bds); + + /* + * copyout the result info + */ + copyout((caddr_t)bds, (caddr_t)buf, atpBDSsize); + + atp_free(trp); + + return (int)tid; +} /* _ATPsndreq */ + +int +_ATPsndrsp(fd, respbuff, resplen, datalen, err, proc) + int fd; + unsigned char *respbuff; + int resplen; + int datalen; + int *err; + void *proc; +{ + gref_t *gref; + int s, rc; + long bufaddr; + gbuf_t *m, *mdata; + register short len; + register int size; + register struct atp_state *atp; + register struct atpBDS *bdsp; + register char *buf; + + if ((*err = atalk_getref(0, fd, &gref, proc)) != 0) + return -1; + + if ((gref == 0) || ((atp = (struct atp_state *)gref->info) == 0) + || (atp->atp_flags & ATP_CLOSING)) { + dPrintf(D_M_ATP, D_L_ERROR, ("ATPsndrsp: stale handle=0x%x, pid=%d\n", + (u_int) gref, gref->pid)); + + *err = EINVAL; + return -1; + } + + /* + * allocate buffer and copy in the response info + */ + while ((m = gbuf_alloc(resplen, PRI_MED)) == 0) { + ATDISABLE(s, atp->atp_delay_lock); + rc = tsleep(&atp->atp_delay_event, PSOCK | PCATCH, "atprspinfo", 10); + ATENABLE(s, atp->atp_delay_lock); + if (rc != 0) { + *err = rc; + return -1; + } + } + if ((*err = copyin((caddr_t)respbuff, (caddr_t)gbuf_rptr(m), resplen)) != 0) { + gbuf_freeb(m); + return -1; + } + gbuf_wset(m,resplen); + ((at_ddp_t *)gbuf_rptr(m))->src_node = 0; + bdsp = (struct atpBDS *)(gbuf_rptr(m) + TOTAL_ATP_HDR_SIZE); + if ((resplen == TOTAL_ATP_HDR_SIZE) || ((len = UAS_VALUE(bdsp->bdsDataSz)) == 1)) + len = 0; + else + len = 16 * sizeof(gbuf_t); + + /* + * allocate buffer and copy in the response data + */ + while ((mdata = gbuf_alloc(datalen+len, PRI_MED)) == 0) { + ATDISABLE(s, atp->atp_delay_lock); + rc = tsleep(&atp->atp_delay_event, PSOCK | PCATCH, "atprspdata", 10); + ATENABLE(s, atp->atp_delay_lock); + if (rc != 0) { + gbuf_freem(m); + *err = rc; + return -1; + } + } + gbuf_cont(m) = mdata; + for (size=0; bdsp < (struct atpBDS *)gbuf_wptr(m); bdsp++) { + if ((bufaddr = UAL_VALUE(bdsp->bdsBuffAddr)) != 0) { + len = UAS_VALUE(bdsp->bdsBuffSz); + buf = (char *)bufaddr; + if ((*err = copyin((caddr_t)buf, + (caddr_t)&gbuf_rptr(mdata)[size], len)) != 0) { + gbuf_freem(m); + return -1; + } + size += len; + } + } + gbuf_wset(mdata,size); + + atp_send_rsp(gref, m, TRUE); + return 0; +} + +int +_ATPgetreq(fd, buf, buflen, err, proc) + int fd; + unsigned char *buf; + int buflen; + int *err; + void *proc; +{ + gref_t *gref; + register struct atp_state *atp; + register struct atp_rcb *rcbp; + register gbuf_t *m, *m_head; + int s, size, len; + + if ((*err = atalk_getref(0, fd, &gref, proc)) != 0) + return -1; + + if ((gref == 0) || ((atp = (struct atp_state *)gref->info) == 0) + || (atp->atp_flags & ATP_CLOSING)) { + dPrintf(D_M_ATP, D_L_ERROR, ("ATPgetreq: stale handle=0x%x, pid=%d\n", + (u_int) gref, gref->pid)); + *err = EINVAL; + return -1; + } + + ATDISABLE(s, atp->atp_lock); + if ((rcbp = atp->atp_attached.head) != NULL) { + /* + * Got one, move it to the active response Q + */ + m_head = rcbp->rc_ioctl; + rcbp->rc_ioctl = NULL; + + if (rcbp->rc_xo) { + ATP_Q_REMOVE(atp->atp_attached, rcbp, rc_list); + rcbp->rc_state = RCB_NOTIFIED; + ATP_Q_APPEND(atp->atp_rcb, rcbp, rc_list); + } else { + /* detach rcbp from attached queue, + * and free any outstanding resources + */ + atp_rcb_free(rcbp); + } + ATENABLE(s, atp->atp_lock); + + /* + * copyout the request data, including the protocol header + */ + for (size=0, m=m_head; m; m = gbuf_cont(m)) { + if ((len = gbuf_len(m)) > buflen) + len = buflen; + copyout((caddr_t)gbuf_rptr(m), (caddr_t)&buf[size], len); + size += len; + if ((buflen -= len) == 0) + break; + } + gbuf_freem(m_head); + + return size; + } + ATENABLE(s, atp->atp_lock); + + return -1; +} + +int +_ATPgetrsp(fd, bdsp, err, proc) + int fd; + struct atpBDS *bdsp; + int *err; + void *proc; +{ + gref_t *gref; + register struct atp_state *atp; + register struct atp_trans *trp; + int s, tid; + char bds[atpBDSsize]; + + if ((*err = atalk_getref(0, fd, &gref, proc)) != 0) + return -1; + + if ((gref == 0) || ((atp = (struct atp_state *)gref->info) == 0) + || (atp->atp_flags & ATP_CLOSING)) { + dPrintf(D_M_ATP, D_L_ERROR, ("ATPgetrsp: stale handle=0x%x, pid=%d\n", + (u_int) gref, gref->pid)); + *err = EINVAL; + return -1; + } + + ATDISABLE(s, atp->atp_lock); + for (trp = atp->atp_trans_wait.head; trp; trp = trp->tr_list.next) { + dPrintf(D_M_ATP, D_L_INFO, + ("ATPgetrsp: atp:0x%x, trp:0x%x, state:%d\n", + (u_int) atp, (u_int) trp, trp->tr_state)); + + switch (trp->tr_state) { + case TRANS_DONE: + ATENABLE(s, atp->atp_lock); + if ((*err = copyin((caddr_t)bdsp, + (caddr_t)bds, sizeof(bds))) != 0) + return -1; + atp_pack_bdsp(trp, bds); + tid = (int)trp->tr_tid; + atp_free(trp); + copyout((caddr_t)bds, (caddr_t)bdsp, sizeof(bds)); + return tid; + + case TRANS_FAILED: + /* + * transaction timed out, return error + */ + ATENABLE(s, atp->atp_lock); + atp_free(trp); + *err = ETIMEDOUT; + return -1; + + default: + continue; + } + } + ATENABLE(s, atp->atp_lock); + + *err = EINVAL; + return -1; +} + +void +atp_drop_req(gref, m) + gref_t *gref; + gbuf_t *m; +{ + int s; + struct atp_state *atp; + struct atp_rcb *rcbp; + at_atp_t *athp; + at_ddp_t *ddp; + + atp = (struct atp_state *)gref->info; + if (atp->dflag) + atp = (struct atp_state *)atp->atp_msgq; + ddp = AT_DDP_HDR(m); + athp = AT_ATP_HDR(m); + + /* + * search for the corresponding rcb + */ + ATDISABLE(s, atp->atp_lock); + for (rcbp = atp->atp_rcb.head; rcbp; rcbp = rcbp->rc_list.next) { + if ( (rcbp->rc_tid == UAS_VALUE(athp->tid)) && + (rcbp->rc_socket.node == ddp->src_node) && + (rcbp->rc_socket.net == NET_VALUE(ddp->src_net)) && + (rcbp->rc_socket.socket == ddp->src_socket) ) + break; + } + + /* + * drop the request + */ + if (rcbp) + atp_rcb_free(rcbp); + ATENABLE(s, atp->atp_lock); + + gbuf_freem(m); +} diff --git a/bsd/netat/aurp.h b/bsd/netat/aurp.h new file mode 100644 index 000000000..3b64639c8 --- /dev/null +++ b/bsd/netat/aurp.h @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 23, 1996, by Justin C. Walker + * + * File: aurp.h + */ + +#ifndef _NETAT_AURP_H_ +#define _NETAT_AURP_H_ + +/* + * AURP device ioctl (I_STR) 'subcommands' + */ +#define AUC_CFGTNL 0 /* Configure Tunnels */ +#define AUC_SHTDOWN 1 /* Shutdown AURP */ +#define AUC_EXPNET 2 /* Configure exported networks */ +#define AUC_HIDENET 3 /* Configure hidden netowrks */ +#define AUC_UDPPORT 4 /* UPD Port number */ +#define AUC_NETLIST 5 /* List of remote endpoints */ +#define AUC_TIMER 6 /* Configured update interval timer */ +#define AUC_ADDNET 7 /* Add remote endpoints */ +#define AUC_ACCEPTALL 8 /* Accept all nets */ +#define AUC_MAX 9 /* Maximun numbers of access nets */ + +/* Default AURP-over-UDP port */ +#define AURP_SOCKNUM 387 +#define AURP_MAXNETACCESS 64 + +#ifdef KERNEL + +#define AURPCODE_REG 0 +#define AURPCODE_RTMPPKT 1 +#define AURPCODE_DATAPKT 2 +#define AURPCODE_AURPPROTO 3 +#define AURPCODE_DEBUGINFO 10 +#ifdef NOT_USED +#define AURPCODE_RTINFO 11 /* was used to set up pointers to the + routing table, the zone table, and + several functions */ +#endif +#define AURPCODE_RTUPDATE 12 + +#define AURPSTATE_Unconnected 0 +#define AURPSTATE_Connected 1 +#define AURPSTATE_WaitingForOpenRsp 2 +#define AURPSTATE_WaitingForRIRsp 3 +#define AURPSTATE_WaitingForTickleAck 4 +#define AURPSTATE_WaitingForRIAck1 5 +#define AURPSTATE_WaitingForRIAck2 6 +#define AURPSTATE_WaitingForRIAck3 7 + +#define AURPCMD_RIReq 1 +#define AURPCMD_RIRsp 2 +#define AURPCMD_RIAck 3 +#define AURPCMD_RIUpd 4 +#define AURPCMD_RDReq 5 +#define AURPCMD_ZReq 6 +#define AURPCMD_ZRsp 7 +#define AURPCMD_OpenReq 8 +#define AURPCMD_OpenRsp 9 +#define AURPCMD_Tickle 14 +#define AURPCMD_TickleAck 15 + +#define AURPSUBCODE_ZoneInfo1 1 +#define AURPSUBCODE_ZoneInfo2 2 +#define AURPSUBCODE_GetZoneNets 3 +#define AURPSUBCODE_GetDomainZoneList 4 + +#define AURPEV_Null 0 +#define AURPEV_NetAdded 1 +#define AURPEV_NetDeleted 2 +#define AURPEV_NetRouteChange 3 +#define AURPEV_NetDistChange 4 +#define AURPEV_NetZoneChange 5 + +#define AURP_Version 1 +#define AURP_ProbeRetryInterval 300 +#define AURP_MaxTickleRetry 4 +#define AURP_TickleRetryInterval 30 +#define AURP_MaxRetry 10 +#define AURP_RetryInterval 3 +#define AURP_UpdateRate 1 +#define AURP_UDType 0 +#define AURP_UDNode 1 +#define AURP_UDSize 2 +#define AURP_FirstSeqNum 1 +#define AURP_LastSeqNum 65535 +#define AURP_MaxPktSize 1400 +#define AURP_MaxNetAccess 64 +#define AURP_NetHiden 0x01 + +#define AURPERR_NormalConnectionClose -1 +#define AURPERR_RoutingLoopDetected -2 +#define AURPERR_ConnectionOutOfSync -3 +#define AURPERR_OptionNegotiationError -4 +#define AURPERR_InvalidVersionNumber -5 +#define AURPERR_InsufficientResources -6 +#define AURPERR_AuthenticationError -7 + +#define AURPFLG_NA 0x4000 +#define AURPFLG_ND 0x2000 +#define AURPFLG_NDC 0x1000 +#define AURPFLG_ZC 0x0800 +#define AURPFLG_RMA 0x4000 +#define AURPFLG_HCRA 0x2000 +#define AURPFLG_SZI 0x4000 +#define AURPFLG_LAST 0x8000 + +/* + * AURP state block + */ +typedef struct { + unsigned char get_zi; /* get zone info flag */ + unsigned char rem_node; /* node id of a tunnel peer */ + unsigned char tickle_retry; /* tickle retry count */ + unsigned char rcv_retry; /* data receiver retry count */ + unsigned char snd_state; /* data sender state */ + unsigned char rcv_state; /* data receiver state */ + unsigned char filler[2]; + unsigned short rcv_update_rate; + unsigned short snd_next_entry; /* next entry in RT */ + unsigned short rcv_env; + unsigned short snd_sui; + unsigned short rcv_connection_id; /* data receiver connection id */ + unsigned short snd_connection_id; /* data sender connection id */ + unsigned short rcv_sequence_number; /* data receiver sequence number */ + unsigned short snd_sequence_number; /* data sender sequence number */ + int rcv_tmo; + int snd_tmo; + gbuf_t *rsp_m; + gbuf_t *upd_m; +} aurp_state_t; + +/* + * AURP protocol header + */ +typedef struct { + unsigned short connection_id; + unsigned short sequence_number; + unsigned short command_code; + unsigned short flags; +} aurp_hdr_t; + +#ifdef KERNEL + +#ifdef AURP_SUPPORT + +extern atlock_t aurpgen_lock; +extern gref_t *aurp_gref; +extern unsigned char dst_addr_cnt; +extern unsigned char net_access_cnt; +extern unsigned char net_export; +extern unsigned short rcv_connection_id; +extern int net_port; +extern int update_tmo; +extern aurp_state_t aurp_state[]; +extern unsigned short net_access[]; +#endif + +struct myq +{ struct mbuf *q_head; + struct mbuf *q_tail; + int q_cnt; +}; + +#define LOCK_DECL(x) atlock_t x + +/* + * Quandry: if we use a single socket, we have to rebind on each call. + * If we use separate sockets per tunnel endpoint, we have to examine + * each one on wakeup. What to do; what to do? + */ +struct aurp_global_t +{ int src_addr; /* What's our IP address? */ + int udp_port; /* Local UDP port */ + unsigned short net_access[AURP_MAXNETACCESS]; + long dst_addr[256]; /* Tunnel 'other ends', passed in from user */ + int pid; /* Who are we? */ + struct socket *tunnel; /* IP socket for all IP endpoints */ + int event; /* Sleep queue anchor */ + int event_anchor; /* Sleep queue anchor */ + atlock_t glock; /* aurp_global lock */ + struct uio auio; /* Dummy uio struct for soreceive() */ + /* Statistics */ + unsigned int toosmall; /* size less than domain header, from UDP */ + unsigned int no_mbufs; /* gbuf_to_mbuf failed */ + unsigned int no_gbufs; /* mbuf_to_gbuf failed */ + unsigned int shutdown; /* shutdown flag */ + unsigned int running; /* running flag */ +}; + +#define AE_ATALK 0x01 /* A/talk input event */ +#define AE_UDPIP 0x02 /* UDP/IP input event */ +#define AE_SHUTDOWN 0x04 /* Shutdown AURP process */ + +void aurp_wakeup __P((struct socket *, caddr_t, int)); +struct mbuf *at_gbuf_to_mbuf __P((gbuf_t *)); +gbuf_t *at_mbuf_to_gbuf __P((struct mbuf *, int)); +int at_insert __P((gbuf_t *m, unsigned int type, unsigned int node)); +int ddp_AURPfuncx __P((int code, void *param, unsigned char node)); +int AURPinit __P((void)); +int aurpd_start __P((void)); +void atalk_to_ip __P((gbuf_t *m)); +void AURPaccess __P((void)); +void AURPshutdown __P((void)); +void AURPiocack __P((gref_t *gref, gbuf_t *m)); +void AURPiocnak __P((gref_t *gref, gbuf_t *m, int error)); +void AURPsndZReq __P((aurp_state_t *state)); +void AURPsndZRsp __P((aurp_state_t *state, gbuf_t *dat_m, int flag)); +void AURPsndRIUpd __P((aurp_state_t *state)); +void AURPsndRIReq __P((aurp_state_t *state)); +void AURPsndRIAck __P((aurp_state_t *state, gbuf_t *m, unsigned short flags)); +void AURPsndOpenReq __P((aurp_state_t *state)); +void AURPsndRDReq __P((aurp_state_t *state)); +void AURPrcvZReq __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvZRsp __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvRIUpd __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvRIReq __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvRIAck __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvRIRsp __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvOpenReq __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvOpenRsp __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvTickle __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvTickleAck __P((aurp_state_t *state, gbuf_t *m)); +void AURPrcvRDReq __P((aurp_state_t *state, gbuf_t *m)); +void AURPfreemsg __P((gbuf_t *m)); +void AURPrtupdate __P((RT_entry *entry, unsigned char ev)); +void AURPsend __P((gbuf_t *mdata, int type, int node)); +void AURPcleanup __P((aurp_state_t *state)); +void AURPpurgeri __P((unsigned char node)); +int AURPgetri __P((short next_entry, unsigned char *buf, short *len)); +int AURPsetri __P((unsigned char node, gbuf_t *m)); +int AURPupdateri __P((unsigned char node, gbuf_t *m)); + +#endif /* KERNEL */ + +/* AURP header for IP tunneling */ +typedef struct aurp_domain +{ char dst_length; + char dst_authority; + short dst_distinguisher; + long dst_address; + char src_length; + char src_authority; + short src_distinguisher; + long src_address; + short version; + short reserved; + short type; +} aurp_domain_t; + +/* AURP/domain header constants */ +#define AUD_Version 0x1 +#define AUD_Atalk 0x2 +#define AUD_AURP 0x3 + +/* IP domain identifier constants */ +#define IP_LENGTH 7 +#define IP_AUTHORITY 1 +#define IP_DISTINGUISHER 0 +/* Need this because the )(*&^%$#@ compiler rounds up the size */ +#define IP_DOMAINSIZE 22 + +/****### LD 9/26/97*/ +extern struct aurp_global_t aurp_global; +#endif /* KERNEL */ +#endif /* _NETAT_AURP_H_ */ diff --git a/bsd/netat/aurp_aurpd.c b/bsd/netat/aurp_aurpd.c new file mode 100644 index 000000000..17673ee99 --- /dev/null +++ b/bsd/netat/aurp_aurpd.c @@ -0,0 +1,448 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 25, 1996, by Justin C. Walker + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: aurpd.c + */ + +/* + * Kernel process to implement the AURP daemon: + * manage tunnels to remote AURP servers across IP networks + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define M_RCVBUF (64 * 1024) +#define M_SNDBUF (64 * 1024) + +static int ip_to_atalk(struct sockaddr_in *fp, register gbuf_t *p_mbuf); +static int aurp_bindrp(struct socket *so); + +struct aurp_global_t aurp_global; + +/* + * Initialize the aurp pipe - + * -Create, initialize, and start the aurpd kernel process; we need + * a process to permit queueing between the socket and the stream, + * which is necessary for orderly access to the socket structure. + * -The user process (aurpd) is there to 'build' the AURP + * stream, act as a 'logging agent' (:-}), and hold open the stream + * during its use. + * -Data and AURP packets from the DDP stream will be fed into the + * UDP tunnel (AURPsend()) + * -Data and AURP packets from the UDP tunnel will be fed into the + * DDP stream (ip_to_atalk(), via the kernel process). + */ +int +aurpd_start() +{ + register int error; + register struct socket *so; + struct mbuf *m; + int maxbuf; + struct sockopt sopt; + + if (suser(current_proc()->p_ucred, ¤t_proc()->p_acflag) != 0 ) + return(EPERM); + + /* + * Set up state prior to starting kernel process so we can back out + * (error return) if something goes wrong. + */ + bzero((char *)&aurp_global.tunnel, sizeof(aurp_global.tunnel)); + /*lock_alloc(&aurp_global.glock, LOCK_ALLOC_PIN, AURP_EVNT_LOCK, -1);*/ + ATLOCKINIT(aurp_global.glock); + ATEVENTINIT(aurp_global.event_anchor); + + /* open udp socket */ + if (aurp_global.udp_port == 0) + aurp_global.udp_port = AURP_SOCKNUM; + error = socreate(AF_INET, &aurp_global.tunnel, SOCK_DGRAM, + IPPROTO_UDP); + if (error) + { dPrintf(D_M_AURP, D_L_FATAL, ("AURP: Can't get socket (%d)\n", + error)); + return(error); + } + + so = aurp_global.tunnel; + + if ((error = aurp_bindrp(so)) != 0) + { dPrintf(D_M_AURP, D_L_FATAL, + ("AURP: Can't bind to port %d (error %d)\n", + aurp_global.udp_port, error)); + soclose(so); + return(error); + } + + sblock(&so->so_rcv, M_WAIT); + sblock(&so->so_snd, M_WAIT); + + /* + * Set socket Receive buffer size + */ + m = m_get(M_WAIT, MT_SOOPTS); + if (m == NULL) { + error = ENOBUFS; + goto out; + } else { + maxbuf = M_RCVBUF; + sopt.sopt_val = &maxbuf; + sopt.sopt_valsize = sizeof(maxbuf); + sopt.sopt_level = SOL_SOCKET; + sopt.sopt_name = SO_RCVBUF; + sopt.sopt_dir = SOPT_SET; + if ((error = sosetopt(so, &sopt)) != 0) + goto out; + } + + /* + * Set socket Send buffer size + */ + m = m_get(M_WAIT, MT_SOOPTS); + if (m == NULL) { + error = ENOBUFS; + goto out; + } else { + + maxbuf = M_SNDBUF; + sopt.sopt_val = &maxbuf; + sopt.sopt_valsize = sizeof(maxbuf); + sopt.sopt_level = SOL_SOCKET; + sopt.sopt_name = SO_SNDBUF; + sopt.sopt_dir = SOPT_SET; + if ((error = sosetopt(so, &sopt)) != 0) + goto out; + } + + so->so_upcall = aurp_wakeup; + so->so_upcallarg = (caddr_t)AE_UDPIP; /* Yuck */ + so->so_state |= SS_NBIO; + so->so_rcv.sb_flags |=(SB_SEL|SB_NOINTR); + so->so_snd.sb_flags |=(SB_SEL|SB_NOINTR); + +out: + sbunlock(&so->so_snd); + sbunlock(&so->so_rcv); + + return(error); +} + +int +AURPgetmsg(err) + int *err; +{ register struct socket *so; + register int s, events; + + so = aurp_global.tunnel; + *err = 0; + + for (;;) + { gbuf_t *from, *p_mbuf; + int flags = MSG_DONTWAIT; + struct uio auio; + + /* + * Wait for a package to arrive. This will be from the + * IP side - sowakeup() calls aurp_wakeup() + * when a packet arrives + */ + + ATDISABLE(s, aurp_global.glock); + events = aurp_global.event; + if (((*err == 0) || (*err == EWOULDBLOCK)) && events == 0) + { + *err = tsleep(&aurp_global.event_anchor, PSOCK | PCATCH, "AURPgetmsg", 0); + events = aurp_global.event; + aurp_global.event = 0; + } + ATENABLE(s, aurp_global.glock); + + /* + * Shut down if we have the AE_SHUTDOWN event or if we got + * a system error other than EWOULDBLOCK, such as EINTR. + */ + if (((*err != EWOULDBLOCK) && (*err != 0)) || events & AE_SHUTDOWN) + { + dPrintf(D_M_AURP, D_L_SHUTDN_INFO, + ("AURPgetmsg: AE_SHUTDOWN detected--starting shutdown sequence\n")); + aurp_global.shutdown = 1; + while (aurp_global.running) + ; + /*lock_free(&aurp_global.glock);*/ + aurp_global.tunnel = 0; + aurp_global.event = 0; + aurp_global.shutdown = 0; + soclose(so); + if (*err == 0) + *err = ESHUTDOWN; + dPrintf(D_M_AURP, D_L_SHUTDN_INFO, + ("AURPgetmsg: shutdown completed\n")); + return -1; + } + + + + /* + * Set up the nominal uio structure - + * give it no iov's, point off to non-existant user space, + * but make sure the 'resid' count means somehting. + */ + + auio.uio_iov = NULL; + auio.uio_iovcnt = 0; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_offset = 0; /* XXX */ + + /* Keep up an even flow... */ + for (;;) + { +/* + * This should be large enough to encompass a full DDP packet plus + * domain header. + */ +#define A_LARGE_SIZE 700 + + flags = MSG_DONTWAIT; + auio.uio_resid = A_LARGE_SIZE; + *err = soreceive(so, (struct sockaddr **)&from, &auio, &p_mbuf, 0, &flags); + dPrintf(D_M_AURP, D_L_VERBOSE, + ("AURPgetmsg: soreceive returned %d, aurp_global.event==0x%x\n", *err, events)); + /* soreceive() sets *mp to zero! at start */ + if (p_mbuf) + ip_to_atalk(from, p_mbuf); + if (*err || (p_mbuf == NULL)) { + /* + * An error occurred in soreceive(), + * so clear the data input event flag + * and break out of this inner loop. + * + * XXX Note that clearing AE_UDPIP here could + * cause us to lose an AE_UDPIP event that + * was posted in aurp_global.event between + * the soreceive() above and the code here. + * The protocol should recover from this + * lost event, though, since the next + * request (a tickle, for example) from + * the other end of the tunnel will cause + * another AE_UDPIP event to be posted, + * which will wake us from the sleep at + * the top of the outer loop. + */ + int s; + ATDISABLE(s, aurp_global.glock); + aurp_global.event &= ~AE_UDPIP; + ATENABLE(s, aurp_global.glock); + dPrintf(D_M_AURP, D_L_WARNING, ("AURPgetmsg: spurious soreceive, err==%d, p_mbuf==0x%x\n", *err, (unsigned int) p_mbuf)); + break; + } + } + } + return -1; +} + +/* + * Wakeup the sleeping giant - we've put a message on his queue(s). + * The arg indicates what queue has been updated. + * + * This conforms to the so_upcall function pointer member of struct sockbuf. + */ +void aurp_wakeup(struct socket *so, register caddr_t p, int state) +{ + register int s; + register int bit; + + bit = (int) p; + ATDISABLE(s, aurp_global.glock); + aurp_global.event |= bit; + ATENABLE(s, aurp_global.glock); + + dPrintf(D_M_AURP, D_L_STATE_CHG, + ("aurp_wakeup: bit 0x%x, aurp_global.event now 0x%x\n", + bit, aurp_global.event)); + + thread_wakeup(&aurp_global.event_anchor); +} + +/* + * Try to bind to the specified reserved port. + * Sort of like sobind(), but no suser() check. + */ +static int +aurp_bindrp(struct socket *so) +{ + struct sockaddr_in sin; + struct proc *p = current_proc(); + gbuf_t *m; + int error; + + + bzero(&sin, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = htons(aurp_global.src_addr); + sin.sin_port = htons(aurp_global.udp_port); + sin.sin_len = sizeof(struct sockaddr_in); + + sblock(&so->so_rcv, M_WAIT); + sblock(&so->so_snd, M_WAIT); + so->so_state |= SS_PRIV; + error = (*so->so_proto->pr_usrreqs->pru_bind)(so, (struct sockaddr *) &sin, p); + sbunlock(&so->so_snd); + sbunlock(&so->so_rcv); + + return (error); +} + +/* + * receive from UDP + * fp is the 'source address' mbuf; p_mbuf is the data mbuf. + * Use the source address to find the 'node number' (index of the address), + * and pass that to the next stage. + */ +int ip_to_atalk(register struct sockaddr_in *rem_addr, register gbuf_t *p_mbuf) +{ + register aurp_domain_t *domain; + unsigned char node; + + + /* determine the node where the packet came from */ + for (node=1; node <= dst_addr_cnt; node++) { + if (aurp_global.dst_addr[node] == *(long *)&rem_addr->sin_addr) + break; + } + if (node > dst_addr_cnt) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrecv: invalid node, %d.%lx\n", + rem_addr->sin_port, + rem_addr->sin_addr.s_addr)); + + gbuf_freem(p_mbuf); + FREE(rem_addr, M_SONAME); + return -1; + } + + /* validate the domain */ + domain = (aurp_domain_t *)gbuf_rptr(p_mbuf); + if ( (domain->dst_length != IP_LENGTH) || + (domain->dst_authority != IP_AUTHORITY) || + (domain->version != AUD_Version) || + ((domain->type != AUD_Atalk) && (domain->type != AUD_AURP)) ) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrecv: invalid domain, %d.%lx\n", + rem_addr->sin_port, + rem_addr->sin_addr.s_addr)); + + gbuf_freem(p_mbuf); + FREE(rem_addr, M_SONAME); + return -1; + } + + /* Remove domain header */ + p_mbuf->m_pkthdr.len -= IP_DOMAINSIZE; + gbuf_rinc(p_mbuf,IP_DOMAINSIZE); + gbuf_set_type(p_mbuf, MSG_DATA); + + /* forward the packet to the local AppleTalk stack */ + + at_insert(p_mbuf, domain->type, node); + FREE(rem_addr, M_SONAME); + return 0; +} + +/* + * send to UDP + * The real work has been done already. Here, we just cobble together + * a sockaddr for the destination and call sosend(). + */ +void +atalk_to_ip(register gbuf_t *m) +{ register aurp_domain_t *domain; + int error; + int flags = MSG_DONTWAIT; + struct sockaddr_in rem_addr; + int s; + + m->m_type = MT_HEADER; + m->m_pkthdr.len = gbuf_msgsize(m); + m->m_pkthdr.rcvif = 0; + + bzero((char *) &rem_addr, sizeof(rem_addr)); + rem_addr.sin_family = PF_INET; + rem_addr.sin_port = aurp_global.udp_port; + rem_addr.sin_len = sizeof (struct sockaddr_in); + domain = (aurp_domain_t *)gbuf_rptr(m); + *(long *) &rem_addr.sin_addr = domain->dst_address; + + ATDISABLE(s, aurp_global.glock); + aurp_global.running++; + ATENABLE(s, aurp_global.glock); + if (aurp_global.shutdown) { + gbuf_freem(m); + ATDISABLE(s, aurp_global.glock); + aurp_global.running--; + ATENABLE(s, aurp_global.glock); + dPrintf(D_M_AURP, D_L_SHUTDN_INFO, + ("atalk_to_ip: detected aurp_global.shutdown state\n")); + return; + } + dPrintf(D_M_AURP, D_L_VERBOSE, ("atalk_to_ip: calling sosend\n")); + error = sosend(aurp_global.tunnel, (struct sockaddr *) &rem_addr, NULL, m, NULL, flags); + if (error) + { /*log error*/ + dPrintf(D_M_AURP, D_L_ERROR, ("AURP: sosend error (%d)\n", + error)); + } + + ATDISABLE(s, aurp_global.glock); + aurp_global.running--; + ATENABLE(s, aurp_global.glock); + return; +} + diff --git a/bsd/netat/aurp_cfg.c b/bsd/netat/aurp_cfg.c new file mode 100644 index 000000000..bc7de2587 --- /dev/null +++ b/bsd/netat/aurp_cfg.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: cfg.c + */ +#define RESOLVE_DBG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +extern atlock_t aurpgen_lock; +static int aurp_inited = 0; +static char aurp_minor_no[4]; + +int aurp_open(gref) + gref_t *gref; +{ + extern void AURPcmdx(); + int i; + + if (!aurp_inited) { + aurp_inited = 1; + ATLOCKINIT(aurpgen_lock); + } + + for (i=1; i < sizeof(aurp_minor_no); i++) { + if (aurp_minor_no[i] == 0) { + aurp_minor_no[i] = (char )i; + break; + } + } + if (i == sizeof(aurp_minor_no)) + return EAGAIN; + if (i == 1) { + aurp_gref = gref; + if (ddp_AURPfuncx(AURPCODE_REG, AURPcmdx, 0)) { + aurp_gref = 0; + aurp_minor_no[i] = 0; + return EPROTO; + } + } + + gref->info = (void *)&aurp_minor_no[i]; + return 0; +} + +int aurp_close(gref) + gref_t *gref; +{ + if (*(char *)gref->info == 1) { + aurp_gref = 0; + aurp_inited = 0; + ddp_AURPfuncx(AURPCODE_REG, 0, 0); + } + + *(char *)gref->info = 0; + gref->info = 0; + return 0; +} diff --git a/bsd/netat/aurp_gdata.c b/bsd/netat/aurp_gdata.c new file mode 100644 index 000000000..fa15fbcf4 --- /dev/null +++ b/bsd/netat/aurp_gdata.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: gdata.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +atlock_t aurpgen_lock; +gref_t *aurp_gref; +unsigned char dst_addr_cnt; +unsigned char net_access_cnt; +unsigned char net_export; +unsigned short rcv_connection_id; +int net_port; +int update_tmo; +aurp_state_t aurp_state[256]; +unsigned short net_access[AURP_MaxNetAccess]; diff --git a/bsd/netat/aurp_misc.c b/bsd/netat/aurp_misc.c new file mode 100644 index 000000000..662499730 --- /dev/null +++ b/bsd/netat/aurp_misc.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: misc.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* */ +void AURPiocack(gref, m) + gref_t *gref; + gbuf_t *m; +{ + /* send ok reply to ioctl command */ + gbuf_set_type(m, MSG_IOCACK); + atalk_putnext(gref, m); +} + +void AURPiocnak(gref, m, error) + gref_t *gref; + gbuf_t *m; + int error; +{ + ioc_t *iocbp = (ioc_t *)gbuf_rptr(m); + + /* send error reply to ioctl command */ + if (gbuf_cont(m)) { + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = 0; + } + iocbp->ioc_error = error; + iocbp->ioc_count = 0; + iocbp->ioc_rval = -1; + gbuf_set_type(m, MSG_IOCNAK); + atalk_putnext(gref, m); +} + +/* */ +void AURPupdate(arg) + void *arg; +{ + unsigned char node; + boolean_t funnel_state; + aurp_state_t *state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + state = (aurp_state_t *)&aurp_state[1]; + + if (aurp_gref == 0) { + (void) thread_funnel_set(network_flock, FALSE); + return; + } + /* + * for every tunnel peer, do the following periodically: + * 1. send zone requests to determine zone names of networks + * that still do not have asssociated zone names. + * 2. send any RI update that are pending + */ + for (node=1; node <= dst_addr_cnt; node++, state++) { + AURPsndZReq(state); + AURPsndRIUpd(state); + } + + /* restart the periodic update timer */ + timeout(AURPupdate, arg, AURP_UpdateRate*10*HZ); + update_tmo = 1; + + (void) thread_funnel_set(network_flock, FALSE); +} + +/* */ +void AURPfreemsg(m) + gbuf_t *m; +{ + gbuf_t *tmp_m; + + while ((tmp_m = m) != 0) { + m = gbuf_next(m); + gbuf_next(tmp_m) = 0; + gbuf_freem(tmp_m); + } +} + +/* */ +int AURPinit() +{ + unsigned char node; + aurp_state_t *state = (aurp_state_t *)&aurp_state[1]; + short entry_num; + RT_entry *entry = (RT_entry *)RT_table; + + /* start the periodic update timer */ + timeout(AURPupdate, 0, AURP_UpdateRate*10*HZ); + update_tmo = 1; + + /* initialize AURP flags for entries in the RT table */ + for (entry_num=0; entry_num < RT_maxentry; entry_num++,entry++) + entry->AURPFlag = 0; + + /* initiate connections to peers */ + for (node=1; node <= dst_addr_cnt; node++, state++) { + bzero((char *)state, sizeof(*state)); + state->rem_node = node; + state->snd_state = AURPSTATE_Unconnected; + state->rcv_state = AURPSTATE_Unconnected; + dPrintf(D_M_AURP, D_L_STARTUP_INFO, + ("AURPinit: sending OpenReq to node %u\n", node)); + AURPsndOpenReq(state); + } + + return 0; +} + +/* */ +void AURPcleanup(state) + aurp_state_t *state; +{ + if (state->rsp_m) { + gbuf_freem(state->rsp_m); + state->rsp_m = 0; + } + + if (state->upd_m) { + gbuf_freem(state->upd_m); + state->upd_m = 0; + } +} + +/* + * + */ +void AURPshutdown() +{ + unsigned char node; + aurp_state_t *state = (aurp_state_t *)&aurp_state[1]; + + /* cancel the periodic update timer */ + untimeout(AURPupdate, 0); + update_tmo = 0; + + /* notify tunnel peers of router going-down */ + for (node=1; node <= dst_addr_cnt; node++, state++) { + AURPcleanup(state); + AURPsndRDReq(state); + } + + /* bring down the router */ + aurp_wakeup(NULL, (caddr_t) AE_SHUTDOWN, 0); +} + +void AURPaccess() +{ + unsigned char i; + short entry_num; + RT_entry *entry; + + entry = (RT_entry *)RT_table; + for (entry_num=0; entry_num < RT_maxentry; entry_num++,entry++) + entry->AURPFlag = net_export ? AURP_NetHiden : 0; + + for (i=0; i < net_access_cnt; i++) { + /* export or hide networks as configured */ + if ((entry = rt_blookup(net_access[i])) != 0) + entry->AURPFlag = net_export ? 0 : AURP_NetHiden; + } +} diff --git a/bsd/netat/aurp_open.c b/bsd/netat/aurp_open.c new file mode 100644 index 000000000..948ccda85 --- /dev/null +++ b/bsd/netat/aurp_open.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: open.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +/* funnel version of AURPsndOpenReq */ +void AURPsndOpenReq_funnel(state) + aurp_state_t *state; +{ + thread_funnel_set(network_flock, TRUE); + AURPsndOpenReq(state); + thread_funnel_set(network_flock, FALSE); +} + +/* */ +void AURPsndOpenReq(state) + aurp_state_t *state; +{ + int msize; + gbuf_t *m; + aurp_hdr_t *hdrp; + + if (aurp_gref == 0) { + return; + } + if (state->rcv_retry && (state->rcv_state != AURPSTATE_WaitingForOpenRsp)) { + return; + } + + /* stop trying if the retry count exceeds the maximum value */ + if (++state->rcv_retry > AURP_MaxRetry) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPsndOpenReq: no response, node %u\n", + state->rem_node)); + state->rcv_state = AURPSTATE_Unconnected; + state->rcv_tmo = 0; + state->rcv_retry = 0; + return; + } + + msize = sizeof(aurp_hdr_t) + 3; + if ((m = (gbuf_t *)gbuf_alloc(msize, PRI_MED)) != 0) { + gbuf_wset(m,msize); + + /* construct the open request packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + if (state->rcv_retry > 1) + hdrp->connection_id = state->rcv_connection_id; + else { + if (++rcv_connection_id == 0) + rcv_connection_id = 1; + hdrp->connection_id = rcv_connection_id; + } + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_OpenReq; + hdrp->flags = (AURPFLG_NA | AURPFLG_ND | AURPFLG_NDC | AURPFLG_ZC); + *(short *)(hdrp+1) = AURP_Version; + ((char *)(hdrp+1))[2] = 0; /* option count */ + + /* update state info */ + state->rcv_connection_id = hdrp->connection_id; + state->rcv_state = AURPSTATE_WaitingForOpenRsp; + + /* send the packet */ + dPrintf(D_M_AURP, D_L_TRACE, + ("AURPsndOpenReq: sending AURPCMD_OpenReq, node %u\n", + state->rem_node)); + AURPsend(m, AUD_AURP, state->rem_node); + } + + /* start the retry timer */ + timeout(AURPsndOpenReq_funnel, state, AURP_RetryInterval*HZ); + state->rcv_tmo = 1; +} + +/* */ +void AURPrcvOpenReq(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + short rc, version; + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + unsigned short sui = hdrp->flags; + + /* make sure we're in a valid state to accept it */ + if ((update_tmo == 0) || ((state->snd_state != AURPSTATE_Unconnected) && + (state->snd_state != AURPSTATE_Connected))) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvOpenReq: unexpected request, update_tmo=0x%x, snd_state=%u\n", (unsigned int) update_tmo, state->snd_state)); + gbuf_freem(m); + return; + } + + /* check for the correct version number */ + version = *(short *)(hdrp+1); + if (version != AURP_Version) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvOpenReq: invalid version number %d, expected %d\n", version, AURP_Version)); + rc = AURPERR_InvalidVersionNumber; + } else + rc = (short)AURP_UpdateRate; + + /* construct the open response packet */ + gbuf_wset(m,sizeof(aurp_hdr_t)+sizeof(short)); + hdrp->command_code = AURPCMD_OpenRsp; + hdrp->flags = 0; + *(short *)(hdrp+1) = rc; + ((char *)(hdrp+1))[2] = 0; /* option count */ + + /* + * reset if we're in the Connected state and this is + * a completely new open request + */ + if ((state->snd_state == AURPSTATE_Connected) && + ((state->snd_connection_id != hdrp->connection_id) || + (state->snd_sequence_number != AURP_FirstSeqNum))) { + extern void AURPsndTickle(); + if (state->rcv_state == AURPSTATE_Connected) { + state->rcv_state = AURPSTATE_Unconnected; + untimeout(AURPsndTickle, state); + } + state->snd_state = AURPSTATE_Unconnected; + AURPcleanup(state); + AURPpurgeri(state->rem_node); + } + + /* update state info */ + if (state->snd_state == AURPSTATE_Unconnected) { + state->snd_state = AURPSTATE_Connected; + state->snd_sui = sui; + state->snd_connection_id = hdrp->connection_id; + state->snd_sequence_number = AURP_FirstSeqNum; + } + + /* send the packet */ + AURPsend(m, AUD_AURP, state->rem_node); + + /* open connection for the data receiver side if not yet connected */ + if (state->rcv_state == AURPSTATE_Unconnected) { + state->rcv_retry = 0; + state->tickle_retry = 0; + state->rcv_sequence_number = 0; + AURPsndOpenReq(state); + } +} + +/* */ +void AURPrcvOpenRsp(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + extern void AURPsndTickle(); + short rc; + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + + /* make sure we're in a valid state to accept it */ + if (state->rcv_state != AURPSTATE_WaitingForOpenRsp) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvOpenRsp: unexpected response\n")); + gbuf_freem(m); + return; + } + + /* check for the correct connection id */ + if (hdrp->connection_id != state->rcv_connection_id) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvOpenRsp: invalid connection id, r=%d, m=%d\n", + hdrp->connection_id, state->rcv_connection_id)); + gbuf_freem(m); + return; + } + + /* cancel the retry timer */ + untimeout(AURPsndOpenReq_funnel, state); + state->rcv_tmo = 0; + state->rcv_retry = 0; + + /* update state info */ + state->rcv_sequence_number = AURP_FirstSeqNum; + state->rcv_env = hdrp->flags; + + /* check for error */ + rc = *(short *)(hdrp+1); + gbuf_freem(m); + if (rc < 0) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvOpenRsp: error=%d\n", rc)); + return; + } + + /* update state info */ + state->rcv_update_rate = (unsigned short)rc; + state->rcv_state = AURPSTATE_Connected; + dPrintf(D_M_AURP, D_L_TRACE, ("AURPrcvOpenRsp: moved rcv_state to AURPSTATE_Connected\n")); + + /* start tickle */ + timeout(AURPsndTickle, state, AURP_TickleRetryInterval*HZ); + + /* get routing info */ + AURPsndRIReq(state); +} diff --git a/bsd/netat/aurp_rd.c b/bsd/netat/aurp_rd.c new file mode 100644 index 000000000..1140a2bf3 --- /dev/null +++ b/bsd/netat/aurp_rd.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: rd.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* */ +void AURPsndRDReq(state) + aurp_state_t *state; +{ + int msize; + gbuf_t *m; + aurp_hdr_t *hdrp; + + if ((state->rcv_state == AURPSTATE_Unconnected) || + (state->snd_state == AURPSTATE_Unconnected)) + return; + + /* update state info */ + state->rcv_state = AURPSTATE_Unconnected; + state->snd_state = AURPSTATE_Unconnected; + + /* notify tunnel peer of router going-down for the data receiver side */ + msize = sizeof(aurp_hdr_t) + sizeof(short); + if ((m = (gbuf_t *)gbuf_alloc(msize, PRI_MED)) != 0) { + gbuf_wset(m,msize); + + /* construct the router down packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->rcv_connection_id; + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_RDReq; + hdrp->flags = 0; + *(short *)(hdrp+1) = AURPERR_NormalConnectionClose; + + /* send the packet */ + AURPsend(m, AUD_AURP, state->rem_node); + } + + /* notify tunnel peer of router going-down for the data sender side */ + msize = sizeof(aurp_hdr_t) + sizeof(short); + if ((m = (gbuf_t *)gbuf_alloc(msize, PRI_MED)) != 0) { + gbuf_wset(m,msize); + + /* construct the router down packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->snd_connection_id; + hdrp->sequence_number = state->snd_sequence_number; + hdrp->command_code = AURPCMD_RDReq; + hdrp->flags = 0; + *(short *)(hdrp+1) = AURPERR_NormalConnectionClose; + + /* send the packet */ + AURPsend(m, AUD_AURP, state->rem_node); + } +} + +/* */ +void AURPrcvRDReq(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + /* update state info */ + state->rcv_state = AURPSTATE_Unconnected; + state->snd_state = AURPSTATE_Unconnected; + AURPcleanup(state); + + /* purge all routes associated with the tunnel peer going-down */ + AURPpurgeri(state->rem_node); + + /* respond to the going-down peer with an RI Ack packet */ + AURPsndRIAck(state, m, 0); +} diff --git a/bsd/netat/aurp_ri.c b/bsd/netat/aurp_ri.c new file mode 100644 index 000000000..b0a492ca6 --- /dev/null +++ b/bsd/netat/aurp_ri.c @@ -0,0 +1,866 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: ri.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* */ +void AURPsndRIAck(state, m, flags) + aurp_state_t *state; + gbuf_t *m; + unsigned short flags; +{ + unsigned short sequence_number; + aurp_hdr_t *hdrp; + int msize = sizeof(aurp_hdr_t); + + if (m) { + sequence_number = ((aurp_hdr_t *)gbuf_rptr(m))->sequence_number; + gbuf_wset(m,sizeof(aurp_hdr_t)); + } else { + sequence_number = state->rcv_sequence_number; + if ((m = (gbuf_t *)gbuf_alloc(msize, PRI_MED)) == 0) + return; + gbuf_wset(m,msize); + } + + /* construct the RI Ack packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->rcv_connection_id; + hdrp->sequence_number = sequence_number; + hdrp->command_code = AURPCMD_RIAck; + hdrp->flags = flags; + + /* send the packet */ + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndRIAck: node=%d\n", + state->rem_node)); + AURPsend(m, AUD_AURP, state->rem_node); +} + +/* funneled version of AURPsndRIReq */ +void AURPsndRIReq_funnel(state) + aurp_state_t *state; +{ + thread_funnel_set(network_flock, TRUE); + AURPsndRIReq(state); + thread_funnel_set(network_flock, FALSE); +} + +/* */ +void AURPsndRIReq(state) + aurp_state_t *state; +{ + int msize; + gbuf_t *m; + aurp_hdr_t *hdrp; + + + if (state->rcv_state == AURPSTATE_Unconnected) { + return; + } + if (state->rcv_tmo && (state->rcv_state != AURPSTATE_WaitingForRIRsp)) { + return; + } + + msize = sizeof(aurp_hdr_t); + if ((m = (gbuf_t *)gbuf_alloc(msize, PRI_MED)) != 0) { + gbuf_wset(m,msize); + + /* construct the RI request packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->rcv_connection_id; + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_RIReq; + hdrp->flags = 0; + + /* update state info */ + state->rcv_state = AURPSTATE_WaitingForRIRsp; + + /* send the packet */ + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndRIReq: node=%d\n", + state->rem_node)); + AURPsend(m, AUD_AURP, state->rem_node); + } + + /* start the retry timer */ + timeout(AURPsndRIReq_funnel, state, AURP_RetryInterval*HZ); + state->rcv_tmo = 1; +} + +/* funneled version of AURPsndRIRsp */ +void AURPsndRIRsp_funnel(state) + aurp_state_t *state; +{ + thread_funnel_set(network_flock, TRUE); + AURPsndRIRsp(state); + thread_funnel_set(network_flock, FALSE); +} + +/* */ +void AURPsndRIRsp(state) + aurp_state_t *state; +{ + gbuf_t *m; + aurp_hdr_t *hdrp; + short len = 0; + int s, msize = 0; + + ATDISABLE(s, aurpgen_lock); + + /* make sure we're in a valid state to send RI response */ + if ((state->snd_state == AURPSTATE_Unconnected) || + (state->snd_state == AURPSTATE_WaitingForRIAck2)) { + ATENABLE(s, aurpgen_lock); + return; + } + + /* update state info */ + state->snd_state = AURPSTATE_WaitingForRIAck1; + + if (state->rsp_m == 0) { + ATENABLE(s, aurpgen_lock); + msize = sizeof(aurp_hdr_t); + if ((m = (gbuf_t *)gbuf_alloc(msize+AURP_MaxPktSize, PRI_MED)) == 0) { + timeout(AURPsndRIRsp_funnel, state, AURP_RetryInterval*HZ); + state->snd_tmo = 1; + return; + } + gbuf_wset(m,msize); + state->rsp_m = m; + + /* construct the RI response packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->snd_connection_id; + hdrp->sequence_number = state->snd_sequence_number; + hdrp->command_code = AURPCMD_RIRsp; + hdrp->flags = 0; + + /* get routing info of the local networks */ + state->snd_next_entry = AURPgetri( + state->snd_next_entry, gbuf_wptr(m), &len); + gbuf_winc(m,len); + + /* set the last flag if this is the last response packet */ + if (!state->snd_next_entry) + hdrp->flags = AURPFLG_LAST; + } + + /* keep a copy of the packet for retry */ + m = (gbuf_t *)gbuf_dupb(state->rsp_m); + + /* start the retry timer */ + timeout(AURPsndRIRsp_funnel, state, AURP_RetryInterval*HZ); + state->snd_tmo = 1; + + if (msize == 0) + ATENABLE(s, aurpgen_lock); + + /* send the packet */ + if (m) { + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndRIRsp: len=%d\n", len)); + AURPsend(m, AUD_AURP, state->rem_node); + } + +} + +void AURPsndRIUpd_funnel(state) + aurp_state_t *state; +{ + thread_funnel_set(network_flock, TRUE); + AURPsndRIUpd(state); + thread_funnel_set(network_flock, FALSE); +} + +/* */ +void AURPsndRIUpd(state) + aurp_state_t *state; +{ + gbuf_t *m; + aurp_hdr_t *hdrp; + short len = 0; + int s, msize = 0; + + ATDISABLE(s, aurpgen_lock); + + /* make sure we're in a valid state to send update */ + if (state->snd_next_entry || (state->upd_m == 0) || + (state->snd_state == AURPSTATE_Unconnected) || + (state->snd_state == AURPSTATE_WaitingForRIAck1)) { + ATENABLE(s, aurpgen_lock); + return; + } + + /* update state info */ + state->snd_state = AURPSTATE_WaitingForRIAck2; + + if (state->snd_tmo == 0) { + ATENABLE(s, aurpgen_lock); + msize = sizeof(aurp_hdr_t); + m = state->upd_m; + len = gbuf_len(m); + gbuf_rdec(m,msize); + + /* construct the RI update packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->snd_connection_id; + hdrp->sequence_number = state->snd_sequence_number; + hdrp->command_code = AURPCMD_RIUpd; + hdrp->flags = 0; + } + + /* keep a copy of the packet for retry */ + m = (gbuf_t *)gbuf_dupb(state->upd_m); + + /* start the retry timer */ + timeout(AURPsndRIUpd_funnel, state, AURP_RetryInterval*HZ); + state->snd_tmo = 1; + + if (msize == 0) + ATENABLE(s, aurpgen_lock); + + /* send the packet */ + if (m) { + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndRIUpd: len=%d\n", len)); + AURPsend(m, AUD_AURP, state->rem_node); + } + +} + +/* */ +void AURPrcvRIReq(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + int s; + + ATDISABLE(s, aurpgen_lock); + + /* make sure we're in a valid state to accept it */ + if ((state->snd_state == AURPSTATE_Unconnected) || + (state->snd_state == AURPSTATE_WaitingForRIAck2)) { + ATENABLE(s, aurpgen_lock); + dPrintf(D_M_AURP, D_L_WARNING, ("AURPrcvRIReq: unexpected request\n")); + gbuf_freem(m); + return; + } + + /* check for the correct connection id */ + if (hdrp->connection_id != state->snd_connection_id) { + ATENABLE(s, aurpgen_lock); + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvRIReq: invalid connection id, r=%d, m=%d\n", + hdrp->connection_id, state->snd_connection_id)); + gbuf_freem(m); + return; + } + + if (state->snd_state != AURPSTATE_WaitingForRIAck1) { + state->snd_next_entry = 0; + if (state->rsp_m) { + gbuf_freem(state->rsp_m); + state->rsp_m = 0; + } + ATENABLE(s, aurpgen_lock); + AURPsndRIRsp(state); + } else + ATENABLE(s, aurpgen_lock); + + gbuf_freem(m); +} + +/* */ +void AURPrcvRIRsp(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + int s; + + ATDISABLE(s, aurpgen_lock); + + /* make sure we're in a valid state to accept it */ + if (state->rcv_state != AURPSTATE_WaitingForRIRsp) { + ATENABLE(s, aurpgen_lock); + dPrintf(D_M_AURP, D_L_WARNING, ("AURPrcvRIRsp: unexpected response\n")); + gbuf_freem(m); + return; + } + + /* check for the correct connection id */ + if (hdrp->connection_id != state->rcv_connection_id) { + ATENABLE(s, aurpgen_lock); + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvRIRsp: invalid connection id, r=%d, m=%d\n", + hdrp->connection_id, state->rcv_connection_id)); + gbuf_freem(m); + return; + } + + /* check for the correct sequence number */ + if (hdrp->sequence_number != state->rcv_sequence_number) { + ATENABLE(s, aurpgen_lock); + if ( ((state->rcv_sequence_number == AURP_FirstSeqNum) && + (hdrp->sequence_number == AURP_LastSeqNum)) || + (hdrp->sequence_number == (state->rcv_sequence_number-1)) ) { + AURPsndRIAck(state, m, AURPFLG_SZI); + } else { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvRIRsp: invalid sequence number, r=%d, m=%d\n", + hdrp->sequence_number, state->rcv_sequence_number)); + gbuf_freem(m); + } + return; + } + gbuf_rinc(m,sizeof(*hdrp)); + if (hdrp->flags & AURPFLG_LAST) + state->rcv_state = AURPSTATE_Connected; + ATENABLE(s, aurpgen_lock); + + dPrintf(D_M_AURP, D_L_INFO, ("AURPrcvRIRsp: len=%ld\n", gbuf_len(m))); + + /* cancel the retry timer */ + untimeout(AURPsndRIReq_funnel, state); + state->rcv_tmo = 0; + + /* send RI ack */ + AURPsndRIAck(state, 0, AURPFLG_SZI); + + /* update state info */ + if (++state->rcv_sequence_number == 0) + state->rcv_sequence_number = AURP_FirstSeqNum; + + /* process routing info of the tunnel peer */ + if (AURPsetri(state->rem_node, m)) { + dPrintf(D_M_AURP, D_L_ERROR, ("AURPrcvRIRsp: AURPsetri() error\n")); + } + gbuf_freem(m); + + /* set the get zone flag to get zone info later if required */ + if (state->rcv_state == AURPSTATE_Connected) + state->get_zi = 1; +} + +/* */ +void AURPrcvRIUpd(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + + /* make sure we're in a valid state to accept it */ + if (state->rcv_state == AURPSTATE_Unconnected) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPrcvRIUpd: unexpected response\n")); + gbuf_freem(m); + return; + } + + /* check for the correct connection id */ + if (hdrp->connection_id != state->rcv_connection_id) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvRIUpd: invalid connection id, r=%d, m=%d\n", + hdrp->connection_id, state->rcv_connection_id)); + gbuf_freem(m); + return; + } + + /* check for the correct sequence number */ + if (hdrp->sequence_number != state->rcv_sequence_number) { + if ( ((state->rcv_sequence_number == AURP_FirstSeqNum) && + (hdrp->sequence_number == AURP_LastSeqNum)) || + (hdrp->sequence_number == (state->rcv_sequence_number-1)) ) { + AURPsndRIAck(state, m, AURPFLG_SZI); + } else { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvRIUpd: invalid sequence number, r=%d, m=%d\n", + hdrp->sequence_number, state->rcv_sequence_number)); + gbuf_freem(m); + } + return; + } + gbuf_rinc(m,sizeof(*hdrp)); + + dPrintf(D_M_AURP, D_L_INFO, ("AURPrcvRIUpd: len=%ld\n", gbuf_len(m))); + + /* send RI ack */ + AURPsndRIAck(state, 0, AURPFLG_SZI); + + /* update state info */ + if (++state->rcv_sequence_number == 0) + state->rcv_sequence_number = AURP_FirstSeqNum; + + /* process update routing info of the tunnel peer */ + if (AURPupdateri(state->rem_node, m)) { + dPrintf(D_M_AURP, D_L_ERROR, ("AURPrcvRIUpd: AURPupdateri() error\n")); + } + + /* set the get zone flag to get zone info later if required */ + state->get_zi = 1; + + gbuf_freem(m); +} + +/* */ +void AURPrcvRIAck(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + gbuf_t *dat_m; + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + unsigned char snd_state; + int s; + int flag; + + dPrintf(D_M_AURP, D_L_INFO, ("AURPrcvRIAck: state=%d\n", + state->snd_state)); + ATDISABLE(s, aurpgen_lock); + + /* make sure we're in a valid state to accept it */ + snd_state = state->snd_state; + if (((snd_state == AURPSTATE_WaitingForRIAck1) || + (snd_state == AURPSTATE_WaitingForRIAck2)) && + (hdrp->sequence_number == state->snd_sequence_number)) { + + if (snd_state == AURPSTATE_WaitingForRIAck1) { + /* ack from the tunnel peer to our RI response */ + untimeout(AURPsndRIRsp_funnel, state); + dat_m = state->rsp_m; + state->rsp_m = 0; + flag = 1; + } else { + /* ack from the tunnel peer to our RI update */ + untimeout(AURPsndRIUpd_funnel, state); + dat_m = state->upd_m; + state->upd_m = 0; + flag = 2; + } + state->snd_tmo = 0; + gbuf_rinc(dat_m,sizeof(aurp_hdr_t)); + + /* increment the sequence number */ + if (++state->snd_sequence_number == 0) + state->snd_sequence_number = AURP_FirstSeqNum; + + /* update state info */ + state->snd_state = AURPSTATE_Connected; + ATENABLE(s, aurpgen_lock); + + if (state->snd_next_entry) /* more RI responses to send? */ + AURPsndRIRsp(state); + + /* check to see if we need to send ZI responses */ + if (hdrp->flags & AURPFLG_SZI) + AURPsndZRsp(state, dat_m, flag); + else if (dat_m) + gbuf_freem(dat_m); + } else + ATENABLE(s, aurpgen_lock); + + gbuf_freem(m); +} + +/* */ +int AURPgetri(next_entry, buf, len) + short next_entry; + unsigned char *buf; + short *len; +{ + short entry_num = next_entry; + RT_entry *entry = (RT_entry *)&RT_table[next_entry]; + + for (*len=0; entry_num < RT_maxentry; entry_num++,entry++) { + if ((net_port != entry->NetPort) && + !(entry->AURPFlag & AURP_NetHiden)) { + if ((entry->EntryState & 0x0F) >= RTE_STATE_SUSPECT) { + if (entry->NetStart) { + /* route info for extended network */ + *(short *)buf = entry->NetStart; + buf += sizeof(short); + *buf++ = 0x80 | (entry->NetDist & 0x1F); + *(short *)buf = entry->NetStop; + buf += sizeof(short); + *buf++ = 0; + *len += 6; + } else { + /* route info for non-extended network */ + *(short *)buf = entry->NetStop; + buf += sizeof(short); + *buf++ = (entry->NetDist & 0x1F); + *len += 3; + } + } + } + if (*len > AURP_MaxPktSize) + break; + } + + return (entry_num == RT_maxentry) ? 0 : entry_num; +} + +/* */ +int AURPsetri(node, m) + unsigned char node; + gbuf_t *m; +{ + int tuples_cnt; + unsigned char *tuples_ptr; + RT_entry new_rt, *curr_rt; + + new_rt.NextIRNet = 0; + new_rt.NextIRNode = node; + new_rt.NetPort = net_port; + + /* + * Process all the tuples against our routing table + */ + tuples_ptr = (char *)gbuf_rptr(m); + tuples_cnt = (gbuf_len(m))/3; + + while (tuples_cnt--) { + new_rt.NetDist = TUPLEDIST(tuples_ptr) + 1; + new_rt.EntryState = RTE_STATE_GOOD; + new_rt.NetStart = TUPLENET(tuples_ptr); + tuples_ptr += 3; + if (tuples_ptr[-1] & 0x80) { + new_rt.NetStop = TUPLENET((tuples_ptr)); + tuples_ptr += 3; + tuples_cnt--; + } else { + new_rt.NetStop = new_rt.NetStart; + new_rt.NetStart = 0; + } + if ((new_rt.NetStop == 0) || (new_rt.NetStop < new_rt.NetStart)) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPsetri: %d, invalid tuple received [%d-%d]\n", + net_port, new_rt.NetStart, new_rt.NetStop)); + continue; + } + + if ((curr_rt = rt_blookup(new_rt.NetStop)) != 0) { /* found? */ + /* ignore loop if present */ + if (curr_rt->NetPort != net_port) + continue; + + if (new_rt.NetDist < 16) { + /* + * check if the definition of the route has changed + */ + if ((new_rt.NetStop != curr_rt->NetStop) || + (new_rt.NetStart != curr_rt->NetStart)) { + if ((new_rt.NetStop == curr_rt->NetStop) && + (new_rt.NetStop == curr_rt->NetStart) && + (new_rt.NetStart == 0)) { + new_rt.NetStart = new_rt.NetStop; + } else if ((new_rt.NetStop == curr_rt->NetStop) && + (new_rt.NetStart == new_rt.NetStop) && + (curr_rt->NetStart == 0)) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPsetri: [%d-%d] has changed to [%d-%d], Dist=%d\n", + curr_rt->NetStart, curr_rt->NetStop, + new_rt.NetStart, new_rt.NetStop, new_rt.NetDist)); + new_rt.NetStart = 0; + } else { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPsetri: Net Conflict, Curr=[%d-%d], New=[%d-%d]\n", + curr_rt->NetStart,curr_rt->NetStop, + new_rt.NetStart,new_rt.NetStop)); + zt_remove_zones(curr_rt->ZoneBitMap); + rt_delete(curr_rt->NetStop, curr_rt->NetStart); + continue; + } + } + } + + if ((new_rt.NetDist <= curr_rt->NetDist) && + (new_rt.NetDist < 16)) { + /* + * found a shorter or more recent route, + * replace with the new entry + */ + curr_rt->NetDist = new_rt.NetDist; + curr_rt->NextIRNode = new_rt.NextIRNode; + dPrintf(D_M_AURP_LOW,D_L_INFO, + ("AURPsetri: shorter route found [%d-%d], update\n", + new_rt.NetStart,new_rt.NetStop)); + } + + } else { /* no entry found */ + if (new_rt.NetDist < 16) { + new_rt.EntryState = RTE_STATE_GOOD; + dPrintf(D_M_AURP, D_L_INFO, + ("AURPsetri: new_rt [%d-%d], tuple #%d\n", + new_rt.NetStart, new_rt.NetStop, tuples_cnt)); + if (rt_insert(new_rt.NetStop, new_rt.NetStart, + new_rt.NextIRNet, new_rt.NextIRNode, + new_rt.NetDist, new_rt.NetPort, + new_rt.EntryState) == (RT_entry *)0) { + dPrintf(D_M_AURP,D_L_ERROR, + ("AURPsetri: RTMP table full [%d-%d]\n", + new_rt.NetStart,new_rt.NetStop)); + return -1; + } + } + } + } /* end of main while */ + + return 0; +} + +/* */ +int AURPupdateri(node, m) + unsigned char node; + gbuf_t *m; +{ + char ev, ev_len; + RT_entry new_rt, *old_rt; + + while (gbuf_len(m) > 0) { + ev = *gbuf_rptr(m); /* event code */ + gbuf_rinc(m,1); + if (gbuf_rptr(m)[2] & 0x80) { + /* event tuple for extended network */ + new_rt.NetStart = *(unsigned short *)gbuf_rptr(m); + new_rt.NetStop = *(unsigned short *)&gbuf_rptr(m)[3]; + new_rt.NetDist = gbuf_rptr(m)[2] & 0x7f; + ev_len = 5; + } else { + /* event tuple for non-extended network */ + new_rt.NetStart = 0; + new_rt.NetStop = *(unsigned short *)gbuf_rptr(m); + new_rt.NetDist = gbuf_rptr(m)[2]; + ev_len = 3; + } + + switch (ev) { + case AURPEV_Null: + break; + + case AURPEV_NetAdded: + gbuf_rinc(m,ev_len); + new_rt.NextIRNet = 0; + new_rt.NextIRNode = node; + new_rt.NetPort = net_port; + if ((new_rt.NetDist == 0) || (new_rt.NetStop == 0) || + (new_rt.NetStop < new_rt.NetStart)) { + dPrintf(D_M_AURP,D_L_WARNING, + ("AURPupdateri: %d, invalid NetAdded received [%d-%d]\n", + net_port, new_rt.NetStart, new_rt.NetStop)); + break; + } + + if ((old_rt = rt_blookup(new_rt.NetStop)) != 0) { /* found? */ + if (old_rt->NetPort == net_port) { + /* + * process this event as if it was an NDC event; + * update the route's distance + */ + old_rt->NetDist = new_rt.NetDist; + } + } else { +l_add: if ((new_rt.NetDist < 16) && (new_rt.NetDist != NOTIFY_N_DIST)) { + new_rt.EntryState = RTE_STATE_GOOD; + dPrintf(D_M_AURP, D_L_INFO, + ("AURPupdateri: NetAdded [%d-%d]\n", + new_rt.NetStart, new_rt.NetStop)); + if (rt_insert(new_rt.NetStop, new_rt.NetStart, + new_rt.NextIRNet, new_rt.NextIRNode, + new_rt.NetDist, new_rt.NetPort, + new_rt.EntryState) == (RT_entry *)0) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPupdateri: RTMP table full [%d-%d]\n", + new_rt.NetStart,new_rt.NetStop)); + return 0; + } + } + } + break; + + case AURPEV_NetDeleted: + case AURPEV_NetRouteChange: + gbuf_rinc(m,ev_len); +l_delete: if ((old_rt = rt_blookup(new_rt.NetStop)) != 0) { /* found? */ + if (old_rt->NetPort == net_port) { + zt_remove_zones(old_rt->ZoneBitMap); + rt_delete(old_rt->NetStop, old_rt->NetStart); + } + } + break; + + case AURPEV_NetDistChange: + gbuf_rinc(m,ev_len); + if (new_rt.NetDist == 15) + goto l_delete; /* process this event as if was an ND event */ + if ((old_rt = rt_blookup(new_rt.NetStop)) != 0) { /* found? */ + if (old_rt->NetPort == net_port) { + /* + * update the route's distance + */ + old_rt->NetDist = new_rt.NetDist; + } + } else + goto l_add; /* process this event as if was an NA event */ + break; + + case AURPEV_NetZoneChange: + break; + } + } + + return 0; +} + +/* */ +void AURPpurgeri(node) + unsigned char node; +{ + short entry_num; + RT_entry *entry = (RT_entry *)RT_table; + + /* + * purge all routes associated with the tunnel peer + */ + for (entry_num=0; entry_num < RT_maxentry; entry_num++,entry++) { + if ((net_port == entry->NetPort) && (node == entry->NextIRNode)) { + zt_remove_zones(entry->ZoneBitMap); + rt_delete(entry->NetStop, entry->NetStart); + } + } +} + +/* */ +void AURPrtupdate(entry, ev) + RT_entry *entry; + unsigned char ev; +{ + unsigned char i, node, ev_len, ev_tuple[6]; + gbuf_t *m; + aurp_state_t *state = (aurp_state_t *)&aurp_state[1]; + int s, msize = sizeof(aurp_hdr_t); + + dPrintf(D_M_AURP, D_L_TRACE, ("AURPrtupdate: event=%d, net=[%d-%d]\n", + ev, entry->NetStart, entry->NetStop)); + + /* + * check that the network can be exported; if not, + * we must not make it visible beyond the local networks + */ + if (net_export) { + for (i=0; i < net_access_cnt; i++) { + if ((net_access[i] == entry->NetStart) || + (net_access[i] == entry->NetStop)) + break; + } + if (i == net_access_cnt) + return; + } else { + for (i=0; i < net_access_cnt; i++) { + if ((net_access[i] == entry->NetStart) || + (net_access[i] == entry->NetStop)) + return; + } + } + + /* + * create the update event tuple + */ + ev_tuple[0] = ev; /* event code */ + if (entry->NetStart) { + *(unsigned short *)&ev_tuple[1] = entry->NetStart; + ev_tuple[3] = 0x80 | (entry->NetDist & 0x1F); + *(unsigned short *)&ev_tuple[4] = entry->NetStop; + ev_len = 6; + } else { + *(unsigned short *)&ev_tuple[1] = entry->NetStop; + ev_tuple[3] = (entry->NetDist & 0x1F); + ev_len = 4; + } + + for (node=1; node <= dst_addr_cnt; node++, state++) { + if ((ev == AURPEV_NetAdded) && + (!(state->snd_sui & AURPFLG_NA))) continue; + if ((ev == AURPEV_NetDeleted) && + (!(state->snd_sui & AURPFLG_ND))) continue; + if ((ev == AURPEV_NetDistChange) && + (!(state->snd_sui & AURPFLG_NDC))) continue; + ATDISABLE(s, aurpgen_lock); + if ((state->snd_state != AURPSTATE_Unconnected) && + (state->snd_state != AURPSTATE_WaitingForRIAck2)) { + if ((m = state->upd_m) == 0) { + /* + * we don't have the RI update buffer yet, allocate one + */ + ATENABLE(s, aurpgen_lock); + if ((m = (gbuf_t *)gbuf_alloc(msize+AURP_MaxPktSize, PRI_HI)) == 0) + continue; + ATDISABLE(s, aurpgen_lock); + state->upd_m = m; + gbuf_rinc(m,msize); + gbuf_wset(m,0); + } + + /* + * add the update event tuple to the RI update buffer; + * the RI update buffer will be sent when the periodic update + * timer expires + */ + bcopy(ev_tuple, gbuf_wptr(m), ev_len); + gbuf_winc(m,ev_len); + + /* + * if the RI update buffer is full, send the RI update now + */ + if (gbuf_len(m) > (AURP_MaxPktSize-6)) { + ATENABLE(s, aurpgen_lock); + AURPsndRIUpd(state); + continue; + } + } + ATENABLE(s, aurpgen_lock); + } +} diff --git a/bsd/netat/aurp_rx.c b/bsd/netat/aurp_rx.c new file mode 100644 index 000000000..5d5c43a3a --- /dev/null +++ b/bsd/netat/aurp_rx.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified for Kernel execution, May, 1996, Justin C. Walker + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: rx.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * Not using the stream queue for data; keep this around to handle + * requests from the user proc (mostly setup). + */ +int +aurp_wput(gref, m) + gref_t *gref; + gbuf_t *m; +{ + register ioc_t *iocbp; + register gbuf_t *mdata; + register int temp, error; + + switch (gbuf_type(m)) { + + case MSG_IOCTL: + iocbp = (ioc_t *)gbuf_rptr(m); + switch (iocbp->ioc_cmd) { + case AUC_CFGTNL: /* set up a tunnel, init the AURP daemon */ + mdata = gbuf_cont(m); + temp = (int)(*gbuf_rptr(mdata)); + if (temp != dst_addr_cnt) { + AURPiocnak(gref, m, ENOSPC); + return 0; + } + if ((error = aurpd_start()) != 0) { + AURPiocnak(gref, m, error); + return 0; + } + if (AURPinit()) { + AURPiocnak(gref, m, ENOMEM); + return 0; + } + ddp_AURPfuncx(AURPCODE_AURPPROTO, 0, 0); + AURPaccess(); + break; + + case AUC_SHTDOWN: /* shutdown AURP operation */ + AURPshutdown(); + break; + + case AUC_EXPNET: /* configure networks to be exported */ + case AUC_HIDENET: /* configure networks to be hiden */ + mdata = gbuf_cont(m); + net_access_cnt = (gbuf_len(mdata))/sizeof(short); + if ((net_access_cnt==0) || (net_access_cnt>AURP_MaxNetAccess)) { + AURPiocnak(gref, m, EINVAL); + return 0; + } + bcopy(gbuf_rptr(mdata), net_access, + gbuf_len(mdata)); + if (iocbp->ioc_cmd == AUC_EXPNET) + net_export = 1; + break; + + case AUC_UDPPORT: + aurp_global.udp_port = *(char *)gbuf_rptr(mdata); + break; + + case AUC_NETLIST: + mdata = gbuf_cont(m); + /* + * Compute # addrs, Save for later check + * We cheat with a shift. + */ + dst_addr_cnt = ((gbuf_len(mdata)) >> 2)-1; + bcopy(gbuf_rptr(mdata), &aurp_global.dst_addr, + gbuf_len(mdata)); + aurp_global.src_addr = aurp_global.dst_addr[0]; + aurp_global.dst_addr[0] = 0; + break; + + default: + AURPiocnak(gref, m, EINVAL); + return 0; + } + AURPiocack(gref, m); + break; + + default: + dPrintf(D_M_AURP, D_L_WARNING, + ("aurp_wput: bad msg type=%d\n", gbuf_type(m))); + gbuf_freem(m); + break; + } + + return 0; +} + +/* + * Insert an appletalk packet into the appletalk stack. + * If it's an AURP data packet, just send it up; if it's AURP protocol, + * switch out here. + */ + +int +at_insert(m, type, node) + register gbuf_t *m; + register unsigned int type, node; +{ + register aurp_hdr_t *hdrp; + register aurp_state_t *state; + + if (type == AUD_Atalk) + /* non-AURP proto packet */ + ddp_AURPfuncx(AURPCODE_DATAPKT, m, node); + else + { /* AURP proto packet */ + state = (aurp_state_t *)&aurp_state[node]; + state->tickle_retry = 0; + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + + switch (hdrp->command_code) { + case AURPCMD_RIUpd: + AURPrcvRIUpd(state, m); break; + + case AURPCMD_RIReq: + AURPrcvRIReq(state, m); break; + + case AURPCMD_RIRsp: + AURPrcvRIRsp(state, m); break; + + case AURPCMD_RIAck: + AURPrcvRIAck(state, m); break; + + case AURPCMD_ZReq: + AURPrcvZReq(state, m); break; + + case AURPCMD_ZRsp: + AURPrcvZRsp(state, m); break; + + case AURPCMD_OpenReq: + AURPrcvOpenReq(state, m); break; + + case AURPCMD_OpenRsp: + AURPrcvOpenRsp(state, m); break; + + case AURPCMD_Tickle: + AURPrcvTickle(state, m); break; + + case AURPCMD_TickleAck: + AURPrcvTickleAck(state, m); break; + + case AURPCMD_RDReq: + AURPrcvRDReq(state, m); break; + + default: + dPrintf(D_M_AURP, D_L_WARNING, + ("at_insert: bad proto cmd=%d\n", + hdrp->command_code)); + gbuf_freem(m); + } + } + + return 0; +} diff --git a/bsd/netat/aurp_tickle.c b/bsd/netat/aurp_tickle.c new file mode 100644 index 000000000..ce8772d0f --- /dev/null +++ b/bsd/netat/aurp_tickle.c @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: tickle.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* */ +void AURPsndTickle(state) + aurp_state_t *state; +{ + int msize; + gbuf_t *m; + aurp_hdr_t *hdrp; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + if (state->rcv_state == AURPSTATE_Unconnected) { + (void) thread_funnel_set(network_flock, FALSE); + return; + } + /* stop trying if the retry count exceeds the maximum retry value */ + if (++state->tickle_retry > AURP_MaxTickleRetry) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPsndTickle: no response, %d\n", state->rem_node)); + /* + * the tunnel peer seems to have disappeared, update state info + */ + state->snd_state = AURPSTATE_Unconnected; + state->rcv_state = AURPSTATE_Unconnected; + state->tickle_retry = 0; + AURPcleanup(state); + + /* purge all routes associated with the tunnel peer */ + AURPpurgeri(state->rem_node); + (void) thread_funnel_set(network_flock, FALSE); + return; + } + + if (state->tickle_retry > 1) { + msize = sizeof(aurp_hdr_t); + if ((m = (gbuf_t *)gbuf_alloc(msize, PRI_MED)) != 0) { + gbuf_wset(m,msize); + + /* construct the tickle packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->rcv_connection_id; + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_Tickle; + hdrp->flags = 0; + + /* send the packet */ + AURPsend(m, AUD_AURP, state->rem_node); + } + } + + /* start the retry timer */ + timeout(AURPsndTickle, state, AURP_TickleRetryInterval*HZ); + + (void) thread_funnel_set(network_flock, FALSE); +} + +/* */ +void AURPrcvTickle(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + + /* make sure we're in a valid state to accept it */ + if (state->snd_state == AURPSTATE_Unconnected) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvTickle: unexpected request\n")); + gbuf_freem(m); + return; + } + + /* construct the tickle ack packet */ + gbuf_wset(m,sizeof(aurp_hdr_t)); + hdrp->command_code = AURPCMD_TickleAck; + hdrp->flags = 0; + + /* send the packet */ + AURPsend(m, AUD_AURP, state->rem_node); +} + +/* */ +void AURPrcvTickleAck(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + + /* make sure we're in a valid state to accept it */ + if (state->rcv_state == AURPSTATE_Unconnected) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvTickleAck: unexpected response\n")); + gbuf_freem(m); + return; + } + + /* check for the correct connection id */ + if (hdrp->connection_id != state->rcv_connection_id) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvTickleAck: invalid connection id, r=%d, m=%d\n", + hdrp->connection_id, state->rcv_connection_id)); + gbuf_freem(m); + return; + } + gbuf_freem(m); + + /* update state info */ + state->tickle_retry = 0; +} diff --git a/bsd/netat/aurp_tx.c b/bsd/netat/aurp_tx.c new file mode 100644 index 000000000..c6c4ce658 --- /dev/null +++ b/bsd/netat/aurp_tx.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: tx.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * Any AURP protocol or appletalk data (ddp) packets flowing through + * are inserted into the kernel aurpd process's (atalk) input queue. + * Assume here that we deal with single packets, i.e., someone earlier + * in the food chain has broken up packet chains. + */ +void AURPsend(mdata, type, node) + gbuf_t *mdata; + int type, node; +{ + struct aurp_domain *domain; + gbuf_t *m; + int msize = AT_WR_OFFSET+32+IP_DOMAINSIZE; + + /* Add the domain header */ + if ((m = gbuf_alloc(msize, PRI_MED)) == 0) { + gbuf_freem(mdata); + dPrintf(D_M_AURP, D_L_WARNING, ("AURPsend: gbuf_alloc failed\n")); + return; + } + gbuf_wset(m,msize); + gbuf_rinc(m,AT_WR_OFFSET+32); + gbuf_cont(m) = mdata; + domain = (struct aurp_domain *)gbuf_rptr(m); + domain->dst_length = IP_LENGTH; + domain->dst_authority = IP_AUTHORITY; + domain->dst_distinguisher = IP_DISTINGUISHER; + domain->src_length = IP_LENGTH; + domain->src_authority = IP_AUTHORITY; + domain->src_distinguisher = IP_DISTINGUISHER; + domain->src_address = aurp_global.src_addr; + domain->version = AUD_Version; + domain->reserved = 0; + domain->type = type; + domain->dst_address = aurp_global.dst_addr[node]; + atalk_to_ip(m); +} + +/* + * Called from within ddp (via ddp_AURPsendx) to handle data (DDP) packets + * sent from the AppleTalk stack, routing updates, and routing info + * initialization. + */ +void AURPcmdx(code, mdata, param) + int code; + gbuf_t *mdata; + int param; +{ + unsigned char node; + gbuf_t *mdata_next; + + if (mdata == 0) + return; + if (aurp_gref == 0) { + if (code != AURPCODE_DEBUGINFO) + AURPfreemsg(mdata); + return; + } + + switch (code) { + case AURPCODE_DATAPKT: /* data packet */ + node = (unsigned char)param; + if (gbuf_next(mdata)) { + mdata_next = gbuf_next(mdata); + gbuf_next(mdata) = 0; + AURPsend(mdata, AUD_Atalk, node); + do { + mdata = mdata_next; + mdata_next = gbuf_next(mdata); + gbuf_next(mdata) = 0; + /* Indicate non-AURP packet, node id of peer */ + AURPsend(mdata, AUD_Atalk, node); + } while (mdata_next); + } else + AURPsend(mdata, AUD_Atalk, node); + break; + + case AURPCODE_RTUPDATE: + AURPrtupdate((RT_entry *)mdata, param); + break; + + case AURPCODE_DEBUGINFO: /* debug info */ + dbgBits = *(dbgBits_t *)mdata; + net_port = param; + break; + + default: + dPrintf(D_M_AURP, D_L_ERROR, ("AURPcmdx: bad code, %d\n", code)); + } +} diff --git a/bsd/netat/aurp_zi.c b/bsd/netat/aurp_zi.c new file mode 100644 index 000000000..33d051d03 --- /dev/null +++ b/bsd/netat/aurp_zi.c @@ -0,0 +1,613 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 Apple Computer, Inc. + * + * Created April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + * File: zi.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +static int AURPgetzi(int, unsigned char *, short *, gbuf_t *, int); +static void AURPsetzi(unsigned char, gbuf_t *, short, short); + +/* */ +void AURPsndZReq(state) + aurp_state_t *state; +{ + gbuf_t *m; + int msize; + aurp_hdr_t *hdrp; + short *net, nets_cnt, net_sent=0, entry_num=0; + RT_entry *entry = RT_table; + + if (!state->get_zi || (state->rcv_state == AURPSTATE_Unconnected)) + return; + +l_more: + msize = sizeof(aurp_hdr_t); + if ((m = (gbuf_t *)gbuf_alloc(msize+AURP_MaxPktSize, PRI_MED)) == 0) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPsndZReq: node=%d, out of mblk\n", + state->rem_node)); + return; + } + gbuf_wset(m,msize); + + /* construct the ZI request packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->rcv_connection_id; + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_ZReq; + hdrp->flags = 0; + *(short *)(hdrp+1) = AURPSUBCODE_ZoneInfo1; + gbuf_winc(m,sizeof(short)); + + net = (short *)gbuf_wptr(m); + nets_cnt = 0; + + while (entry_num < RT_maxentry) { + /* + * scan the router table, and build the ZI request packet + * with the right entries, i.e., + * - entry in use and not of the net_port + * - with no zones and in an active state + * - talking to the right router + */ + if ( (entry->NetPort == net_port) && entry->NetStop && + ((entry->EntryState & 0x0F) >= RTE_STATE_SUSPECT) && + (!RT_ALL_ZONES_KNOWN(entry)) ) { + *net++ = (entry->NetStart) ? entry->NetStart : entry->NetStop; + nets_cnt++; + } + + if (nets_cnt >= 640) { + /* query only 640 networks per packet */ + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndZReq: node=%d\n", + state->rem_node)); + gbuf_winc(m,(nets_cnt * sizeof(short))); + AURPsend(m, AUD_AURP, state->rem_node); + net_sent = 1; + goto l_more; + } + + entry_num++; + entry++; + } + + if (nets_cnt) { + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndZReq: node=%d\n", + state->rem_node)); + gbuf_winc(m,(nets_cnt * sizeof(short))); + AURPsend(m, AUD_AURP, state->rem_node); + net_sent = 1; + } else + gbuf_freeb(m); + + if (!net_sent) + state->get_zi = 0; +} + +/* */ +void AURPsndZRsp(state, dat_m, flag) + aurp_state_t *state; + gbuf_t *dat_m; + int flag; +{ + short len; + int msize, next_entry = 0; + gbuf_t *m; + aurp_hdr_t *hdrp; + + if ((state->snd_state == AURPSTATE_Unconnected) || (dat_m == 0)) + return; + msize = sizeof(aurp_hdr_t); + + do { + if ((m = (gbuf_t *)gbuf_alloc(msize+AURP_MaxPktSize, PRI_MED)) == 0) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPsndZRsp: node=%d, out of mblk\n", + state->rem_node)); + return; + } + gbuf_wset(m,msize); + + /* construct the ZI response packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->snd_connection_id; + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_ZRsp; + hdrp->flags = 0; + + /* get zone info of the local networks */ + next_entry = AURPgetzi(next_entry, gbuf_wptr(m), &len, dat_m, flag); + gbuf_winc(m,len); + + /* send the packet */ + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndZRsp: len=%d\n", len)); + AURPsend(m, AUD_AURP, state->rem_node); + + } while (next_entry); + + gbuf_freem(dat_m); +} + +/* */ +void AURPsndGZN(state, dat_m) + aurp_state_t *state; + gbuf_t *dat_m; +{ + short zname_len; + int msize; + gbuf_t *m; + aurp_hdr_t *hdrp; + + if (state->snd_state == AURPSTATE_Unconnected) + return; + + msize = sizeof(aurp_hdr_t); + if ((m = (gbuf_t *)gbuf_alloc(msize+AURP_MaxPktSize, PRI_MED)) == 0) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPsndGZN: node=%d, out of mblk\n", + state->rem_node)); + return; + } + gbuf_wset(m,msize); + + /* construct the GZN response packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->snd_connection_id; + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_ZRsp; + hdrp->flags = 0; + *(short *)(gbuf_wptr(m)) = AURPSUBCODE_GetZoneNets; + gbuf_winc(m,sizeof(short)); + zname_len = gbuf_len(dat_m); + bcopy(gbuf_rptr(dat_m), gbuf_wptr(m), zname_len); + gbuf_winc(m,zname_len); + *(short *)(gbuf_wptr(m)) = -1; /* number of tuples - proto not supported */ + gbuf_winc(m,sizeof(short)); + + /* send the packet */ + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndGZN: count=%d\n", -1)); + AURPsend(m, AUD_AURP, state->rem_node); +} + +/* */ +void AURPsndGDZL(state, dat_m) + aurp_state_t *state; + gbuf_t *dat_m; +{ + int msize; + gbuf_t *m; + aurp_hdr_t *hdrp; + + if (state->snd_state == AURPSTATE_Unconnected) + return; + + msize = sizeof(aurp_hdr_t); + if ((m = (gbuf_t *)gbuf_alloc(msize+AURP_MaxPktSize, PRI_MED)) == 0) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPsndGDZL: node=%d, out of mblk\n", + state->rem_node)); + return; + } + gbuf_wset(m,msize); + + /* construct the GDZL response packet */ + hdrp = (aurp_hdr_t *)gbuf_rptr(m); + hdrp->connection_id = state->snd_connection_id; + hdrp->sequence_number = 0; + hdrp->command_code = AURPCMD_ZRsp; + hdrp->flags = 0; + *(short *)(gbuf_wptr(m)) = AURPSUBCODE_GetDomainZoneList; + gbuf_winc(m,sizeof(short)); + *(short *)(gbuf_wptr(m)) = -1; /* start index - proto not supported */ + gbuf_winc(m,sizeof(short)); + + /* send the packet */ + dPrintf(D_M_AURP, D_L_INFO, ("AURPsndGDZL: index=%d\n", -1)); + AURPsend(m, AUD_AURP, state->rem_node); +} + +/* */ +void AURPrcvZReq(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + short sub_code; + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + + /* make sure we're in a valid state to accept it */ + if (state->snd_state == AURPSTATE_Unconnected) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPrcvZReq: unexpected response\n")); + gbuf_freem(m); + return; + } + + /* check for the correct connection id */ + if (hdrp->connection_id != state->snd_connection_id) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvZReq: invalid connection id, r=%d, m=%d\n", + hdrp->connection_id, state->snd_connection_id)); + gbuf_freem(m); + return; + } + + gbuf_rinc(m,sizeof(*hdrp)); + sub_code = *(short *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(short)); + + dPrintf(D_M_AURP, D_L_INFO, ("AURPrcvZReq: len=%ld\n", gbuf_len(m))); + + switch (sub_code) { + case AURPSUBCODE_ZoneInfo1: + AURPsndZRsp(state, m, 0); + return; + + case AURPSUBCODE_GetZoneNets: + AURPsndGZN(state, m); + break; + + case AURPSUBCODE_GetDomainZoneList: + AURPsndGDZL(state, m); + break; + } + + gbuf_freem(m); +} + +/* */ +void AURPrcvZRsp(state, m) + aurp_state_t *state; + gbuf_t *m; +{ + short sub_code, tuples_cnt; + aurp_hdr_t *hdrp = (aurp_hdr_t *)gbuf_rptr(m); + + /* make sure we're in a valid state to accept it */ + if (state->rcv_state == AURPSTATE_Unconnected) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPrcvZRsp: unexpected response\n")); + gbuf_freem(m); + return; + } + + /* check for the correct connection id */ + if (hdrp->connection_id != state->rcv_connection_id) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPrcvZRsp: invalid connection id, r=%d, m=%d\n", + hdrp->connection_id, state->rcv_connection_id)); + gbuf_freem(m); + return; + } + + gbuf_rinc(m,sizeof(*hdrp)); + sub_code = *(short *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(short)); + + dPrintf(D_M_AURP, D_L_INFO, ("AURPrcvZRsp: len=%ld\n", gbuf_len(m))); + + switch (sub_code) { + case AURPSUBCODE_ZoneInfo1: + case AURPSUBCODE_ZoneInfo2: + tuples_cnt = *(short *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(short)); + AURPsetzi(state->rem_node, m, sub_code, tuples_cnt); + break; + + case AURPSUBCODE_GetZoneNets: + break; + + case AURPSUBCODE_GetDomainZoneList: + break; + } + + gbuf_freem(m); +} + +/* */ +static int +AURPgetzi(next_entry, buf, len, dat_m, flag) + int next_entry; + unsigned char *buf; + short *len; + gbuf_t *dat_m; + int flag; +{ + static int i_sav=ZT_BYTES-1, j_sav=0, idx_sav=-1; + unsigned char ev, zname_len, *zmap, *zname_base, *zname_sav, *tuples_ptr; + unsigned short net_num, *net, zname_offset; + short *sub_codep, *tuples_cntp, tuples_cnt, dat_len; + int i, j, idx, nets_cnt; + RT_entry *entry; + + /* + * XXX CHS June-98: The compiler complains that some of these + * XXX variables may be used before they're set. I don't think + * XXX that's actually the case, but to check, I'll assign them + * XXX with some test value, and add asserts to check them at + * XXX run-time. The asserts won't be compiled in for production. + */ + zname_sav = tuples_ptr = (unsigned char *) 0xdeadbeef; /* XXX */ + net = (unsigned short *) 0xdeadbeef; /* XXX */ + net_num = 0xdead; /* XXX */ + nets_cnt = 0xfeedface; /* XXX */ + + sub_codep = (short *)buf; + buf += sizeof(short); + tuples_cntp = (short *)buf; + buf += sizeof(short); + *len = sizeof(short) + sizeof(short); + zname_base = buf + sizeof(short); + dat_len = 0; + + /* set the subcode in the ZI response packet */ + *sub_codep = next_entry ? AURPSUBCODE_ZoneInfo2 : AURPSUBCODE_ZoneInfo1; + + switch (flag) { + case 0: /* zone info in response to ZI request */ + net = (unsigned short *)gbuf_rptr(dat_m); + nets_cnt = (gbuf_len(dat_m))/2; + break; + case 1: /* zone info in response to Ack of RI response */ + tuples_ptr = gbuf_rptr(dat_m); + nets_cnt = (gbuf_len(dat_m))/3; + next_entry = 0; + break; + case 2: /* zone info in response to Ack of RI update */ + tuples_ptr = gbuf_rptr(dat_m); + nets_cnt = (gbuf_len(dat_m))/4; + next_entry = 0; + break; + } + + /* + * for each network, find all the zones that it belongs to + */ + assert(nets_cnt != 0xfeedface); /* XXX */ + for (tuples_cnt=0; next_entry < nets_cnt; next_entry++) { + switch(flag) { + case 0: + assert(net != 0xdeadbeef); /* XXX */ + net_num = net[next_entry]; + break; + case 1: + assert(tuples_ptr != 0xdeadbeef); /* XXX */ + net_num = *(unsigned short *)tuples_ptr; + tuples_ptr += 3; + gbuf_rinc(dat_m,3); + if (tuples_ptr[-1] & 0x80) { + tuples_ptr += 3; + gbuf_rinc(dat_m,3); + next_entry++; + } + break; + case 2: + if (gbuf_len(dat_m) <= 0) { + next_entry = nets_cnt; + goto l_done; + } + assert(tuples_ptr != 0xdeadbeef); /* XXX */ + ev = *tuples_ptr++; + net_num = *(unsigned short *)tuples_ptr; + tuples_ptr += 3; + gbuf_rinc(dat_m,4); + if (tuples_ptr[-1] & 0x80) { + tuples_ptr += 2; + gbuf_rinc(dat_m,2); + } + if (ev != AURPEV_NetAdded) + continue; + break; + } + + /* + * find the RT entry associated with the network + */ + assert(net_num != 0xdead); /* XXX */ + if ((entry = rt_blookup(net_num)) == 0) { + dPrintf(D_M_AURP, D_L_WARNING, ("AURPgetzi: invalid net, %d\n", + net_num)); + continue; + } + if ( ((entry->EntryState & 0x0F) < RTE_STATE_SUSPECT) || + !RT_ALL_ZONES_KNOWN(entry) || + (entry->AURPFlag & AURP_NetHiden) ) { + dPrintf(D_M_AURP_LOW, D_L_INFO, ("AURPgetzi: zombie net, net=%d\n", + net_num)); + continue; + } + + if (entry->NetStart == 0) { + if ((idx = zt_ent_zindex(entry->ZoneBitMap)) == 0) + continue; + idx--; /* index in the zone table */ + zname_len = ZT_table[idx].Zone.len; + if (zname_len) { + assert(net_num != 0xdead); /* XXX */ + *(unsigned short *)buf = net_num; + buf += sizeof(short); + if (idx == idx_sav) { + /* use the optimized format */ + assert(zname_sav != 0xdeadbeef); /* XXX */ + zname_offset = zname_sav - zname_base; + *(unsigned short *)buf = (0x8000 | zname_offset); + buf += sizeof(short); + dat_len += 4; + } else { + /* use the long format */ + zname_sav = buf; + *buf++ = zname_len; + bcopy(ZT_table[idx].Zone.str, buf, zname_len); + buf += zname_len; + dat_len += (3 + zname_len); + } + tuples_cnt++; + idx_sav = idx; + } + + } else { + zmap = entry->ZoneBitMap; + for (i=i_sav; i >=0; i--) { + if (!zmap[i]) + continue; + + for (j=j_sav; j < 8; j++) { + if (!((zmap[i] << j) & 0x80)) + continue; + + idx = i*8 + j; /* index in the zone table */ + zname_len = ZT_table[idx].Zone.len; + if (zname_len) { + if ((dat_len+3+zname_len) > AURP_MaxPktSize) { + i_sav = i; + j_sav = j; + goto l_done; + } + + assert(net_num != 0xdead); /* XXX */ + *(unsigned short *)buf = net_num; + buf += sizeof(short); + if (idx == idx_sav) { + /* use the optimized format */ + assert(zname_sav != 0xdeadbeef);/*XXX*/ + zname_offset = zname_sav - zname_base; + *(unsigned short *)buf = (0x8000 | zname_offset); + buf += sizeof(short); + dat_len += 4; + } else { + /* use the long format */ + zname_sav = buf; + *buf++ = zname_len; + bcopy(ZT_table[idx].Zone.str, buf, zname_len); + buf += zname_len; + dat_len += (3 + zname_len); + } + tuples_cnt++; + idx_sav = idx; + } + } + } + } + if ((dat_len+3+32) > AURP_MaxPktSize) { + next_entry++; + break; + } + } + i_sav = ZT_BYTES-1; + j_sav = 0; + +l_done: + *len += dat_len; + if (next_entry == nets_cnt) + next_entry = 0; + + /* set the subcode in the ZI response packet */ + if (next_entry) + *sub_codep = AURPSUBCODE_ZoneInfo2; + + /* set the tuples count in the ZI response packet */ + *tuples_cntp = tuples_cnt; + + idx_sav = -1; + return next_entry; +} + +/* */ +static void +AURPsetzi(node, m, sub_code, tuples_cnt) + unsigned char node; + gbuf_t *m; + short sub_code; + short tuples_cnt; +{ + int rc, tuple_fmt; + unsigned short net_num, zname_offset; + unsigned char *buf = gbuf_rptr(m), *zname_base; + RT_entry *entry; + at_nvestr_t *zname; + + /* compute the base of the zone names of the optimized tuples */ + zname_base = buf + sizeof(short); + + /* process all tuples */ + while (tuples_cnt-- > 0) { + net_num = *(unsigned short *)buf; + buf += sizeof(short); + if (*buf & 0x80) { + /* optimized-format tuple */ + zname_offset = (*(unsigned short *)buf) & 0x7fff; + buf += sizeof(short); + zname = (at_nvestr_t *)(zname_base + zname_offset); + tuple_fmt = 0; + dPrintf(D_M_AURP_LOW, D_L_INFO, + ("AURPsetzi: optimized fmt, net=%d. zlen=%d, zoffset=%d\n ", + net_num, zname->len, zname_offset)); + } else { + /* long-format tuple */ + zname = (at_nvestr_t *)buf; + tuple_fmt = 1; + dPrintf(D_M_AURP_LOW, D_L_INFO, + ("AURPsetzi: long fmt, net=%d, zlen=%d\n ", + net_num, zname->len)); + } + + /* + * find the RT entry associated with the specified network + */ + if ((entry = rt_blookup(net_num)) == 0) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPsetzi: invalid net, net=%d\n", net_num)); + } else { /* entry found */ + if (entry->EntryState >= RTE_STATE_SUSPECT) { + if ((rc = zt_add_zonename(zname)) == ZT_MAXEDOUT) { + dPrintf(D_M_AURP, D_L_WARNING, + ("AURPsetzi: ZT_table full\n")); + } else { + zt_set_zmap(rc, entry->ZoneBitMap); + RT_SET_ZONE_KNOWN(entry); + } + } + } + if (tuple_fmt) + buf += zname->len+1; + } +} diff --git a/bsd/netat/ddp.c b/bsd/netat/ddp.c new file mode 100644 index 000000000..a05405e14 --- /dev/null +++ b/bsd/netat/ddp.c @@ -0,0 +1,1406 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987, 1988, 1989 Apple Computer, Inc. + * + * + * Modified for MP, 1996 by Tuyen Nguyen + * Added AURP support, April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#define RESOLVE_DBG /* define debug globals in debug.h */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* globals */ + +/* Queue of LAP interfaces which have registered themselves with DDP */ +struct at_ifQueueHd at_ifQueueHd; + +extern at_state_t at_state; +extern TAILQ_HEAD(name_registry, _nve_) name_registry; + +snmpStats_t snmpStats; /* snmp ddp & echo stats */ + +extern at_ddp_stats_t at_ddp_stats; /* DDP statistics */ +extern struct atpcb ddp_head; +extern at_ifaddr_t *ifID_home, *ifID_table[]; +extern aarp_amt_array *aarp_table[]; +extern at_ifaddr_t at_interfaces[]; + +/* routing mode special */ +void (*ddp_AURPsendx)(); +at_ifaddr_t *aurp_ifID = 0; +extern pktsIn,pktsOut; +int pktsDropped,pktsHome; +atlock_t ddpall_lock; +atlock_t ddpinp_lock; + +extern int *atp_pidM; +extern int *adsp_pidM; +extern struct atpcb *atp_inputQ[]; +extern CCB *adsp_inputQ[]; + +at_ifaddr_t *forUs(at_ddp_t *); + +void ddp_input(), ddp_notify_nbp(); + +extern void routing_needed(); +extern void ddp_brt_sweep(); + +struct { + void (*func)(); +} ddp_handler[256]; + +void init_ddp_handler() +{ + bzero(ddp_handler, sizeof(ddp_handler)); +} + +void add_ddp_handler(ddp_socket, input_func) + u_char ddp_socket; + void (*input_func)(); +{ + ddp_handler[ddp_socket].func = input_func; +} + +void +ddp_slowtimo() +{ + ddp_brt_sweep(); +} + +/* + * Raw DDP socket option processing. + */ +int ddp_ctloutput(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + struct atpcb *at_pcb = sotoatpcb(so); + int optval, error = 0; + + if (sopt->sopt_level != ATPROTO_NONE) + return (EINVAL); + + switch (sopt->sopt_dir) { + + case SOPT_GET: + switch (sopt->sopt_name) { + case DDP_HDRINCL: + optval = at_pcb->ddp_flags & DDPFLG_HDRINCL; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + case DDP_CHKSUM_ON: + optval = at_pcb->ddp_flags & DDPFLG_CHKSUM; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + case DDP_STRIPHDR: + optval = at_pcb->ddp_flags & DDPFLG_STRIPHDR; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + case DDP_SLFSND_ON: + optval = at_pcb->ddp_flags & DDPFLG_SLFSND; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + case DDP_GETSOCKNAME: + { + ddp_addr_t addr; + addr.inet.net = at_pcb->laddr.s_net; + addr.inet.node = at_pcb->laddr.s_node; + addr.inet.socket = at_pcb->lport; + addr.ddptype = at_pcb->ddptype; + error = sooptcopyout(sopt, &addr, sizeof addr); + } + break; + default: + error = ENOPROTOOPT; + break; + } + break; + case SOPT_SET: + switch (sopt->sopt_name) { + case DDP_HDRINCL: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval) + at_pcb->ddp_flags |= DDPFLG_HDRINCL; + else + at_pcb->ddp_flags &= ~DDPFLG_HDRINCL; + break; + case DDP_CHKSUM_ON: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval) + at_pcb->ddp_flags |= DDPFLG_CHKSUM; + else + at_pcb->ddp_flags &= ~DDPFLG_CHKSUM; + break; + case DDP_STRIPHDR: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval) + at_pcb->ddp_flags |= DDPFLG_STRIPHDR; + else + at_pcb->ddp_flags &= ~DDPFLG_STRIPHDR; + break; + case DDP_SLFSND_ON: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval) + at_pcb->ddp_flags |= DDPFLG_SLFSND; + else + at_pcb->ddp_flags &= ~DDPFLG_SLFSND; + break; + default: + error = ENOPROTOOPT; + break; + } + break; + } + + return(error); +} /* ddp_cloutput */ + +/****************************************************************/ +/* */ +/* */ +/* Support Routines */ +/* */ +/* */ +/****************************************************************/ + +/* + * Name: + * ddp_checksum + * + * Description: + * This procedure determines the checksum of an extended DDP datagram. + * Add the unsigned bytes into an unsigned 16-bit accumulator. + * After each add, rotate the sign bit into the low order bit of + * the accumulator. When done, if the checksum is 0, changed into 0xFFFF. + * + * Calling sequence: + * checksum = ddp_checksum(mp, offset) + * + * Parameters: + * mp pointer to the datagram gbuf_t + * offset offset to start at in first gbuf_t block + * + * Return value: + * The DDP checksum. + * + */ + +u_short ddp_checksum(mp, offset) + register gbuf_t *mp; + register int offset; +{ + register u_char *data; + register int length; + register u_short checksum; + + checksum = 0; + + do { + if (offset >= gbuf_len(mp)) + offset -= gbuf_len(mp); + else { + data = ((unsigned char *) gbuf_rptr(mp)) + offset; + length = gbuf_len(mp) - offset; + offset = 0; + /* Portable checksum from 3.0 */ + while (length--) { + checksum += *data++; + checksum = (checksum & 0x8000) ? + ((checksum << 1) | 1) : (checksum << 1); + } + } + } while ( (mp = gbuf_cont(mp)) ); + + if (checksum == 0) + checksum = 0xffff; + + return(checksum); +} + +/* + * ddp_add_if() + * + * Description: + * This procedure is called by each LAP interface when it wants to place + * itself online. The LAP interfaces passes in a pointer to its at_if + * struct, which is added to DDP's list of active interfaces (at_ifQueueHd). + * When DDP wants to transmit a packet, it searches this list for the + * interface to use. + * + * If AT_IFF_DEFAULT is set, then this interface is to be brought online + * as the interface DDP socket addresses are tied to. Of course there can + * be only one default interface; we return an error if it's already set. + * + * Calling Sequence: + * ret_status = ddp_add_if(ifID) + * + * Formal Parameters: + * ifID pointer to LAP interface's at_if struct. + * + * Completion Status: + * 0 Procedure successfully completed. + * EALREADY This interface is already online, or there is + * already a default interface. + * ENOBUFS Cannot allocate input queue + * + */ +int ddp_add_if(ifID) +register at_ifaddr_t *ifID; +{ + int port = -1; + + dPrintf(D_M_DDP, D_L_STARTUP, + ("ddp_add_if: called, ifID:0x%x\n", (u_int) ifID)); + + if (ifID->ifFlags & AT_IFF_DEFAULT) { + if (ifID_home) + return(EEXIST); /* home port already set */ + else { + port = IFID_HOME; + ifID_home = ifID; + } + } else { + for (port=IFID_HOME+1; portifPort = port; /* set ddp port # in ifID */ + + /* Add this interface to the list of online interfaces */ + TAILQ_INSERT_TAIL(&at_ifQueueHd, ifID, aa_link); + + return (0); +} /* ddp_add_if */ + +/* + * ddp_rem_if() + * + * Description: + * This procedure is called by each LAP interface when it wants to take + * itself offline. The LAP interfaces passes in a pointer to its at_if + * struct; DDP's list of active interfaces (at_ifQueueHd) is searched and + * this interface is removed from the list. DDP can still transmit + * packets as long as this interface is not the default interface; the + * sender will just get ENETUNREACH errors when it tries to send to an + * interface that went offline. However, if the default interface is + * taken offline, we no longer have a node ID to use as a source address + * and DDP must return ENETDOWN when a caller tries to send a packet. + * + * Formal Parameters: + * ifID pointer to LAP interface's at_if struct. + */ + +void ddp_rem_if(ifID) + register at_ifaddr_t *ifID; +{ + struct ifaddr *ifa = &ifID->aa_ifa; + + /* un-do processing done in SIOCSIFADDR */ + if (ifa->ifa_addr) { + int s = splnet(); + TAILQ_REMOVE(&ifID->aa_ifp->if_addrhead, ifa, ifa_link); + ifa->ifa_addr = NULL; + splx(s); + } + if (ifID->at_dl_tag) { +/* dlil_detach_protocol(ifID->at_dl_tag); */ + ether_detach_at(ifID->aa_ifp); + ifID->at_dl_tag = 0; + } + + /* un-do processing done in ddp_add_if() */ + if (ifID->ifPort) { + if (aarp_table[ifID->ifPort]) { + FREE(aarp_table[ifID->ifPort], M_RTABLE); + aarp_table[ifID->ifPort] = NULL; + } + + at_state.flags |= AT_ST_IF_CHANGED; + ifID->aa_ifp = NULL; + + trackrouter_rem_if(ifID); + TAILQ_REMOVE(&at_ifQueueHd, ifID, aa_link); + ifID_table[ifID->ifPort] = NULL; + ifID->ifName[0] = '\0'; + ifID->ifPort = 0; + } + + /* *** deallocate ifID, eventually *** */ +} /* ddp_rem_if */ + +/* + * The user may have registered an NVE with the NBP on a socket. When the + * socket is closed, the NVE should be deleted from NBP's name table. The + * user should delete the NVE before the socket is shut down, but there + * may be circumstances when he can't. So, whenever a DDP socket is closed, + * this routine is used to notify NBP of the socket closure. This would + * help NBP get rid of all NVE's registered on the socket. + */ + +/* *** Do we still need to do this? *** */ +int ot_ddp_check_socket(socket, pid) + unsigned char socket; + int pid; +{ + int cnt = 0; + gref_t *gref; + + dPrintf(D_M_DDP, D_L_INFO, ("ot_ddp_check_socket: %d\n", socket)); + for (gref = ddp_head.atpcb_next; gref != &ddp_head; gref = gref->atpcb_next) + if (gref->lport == socket && gref->pid == pid) + cnt++; + if ((atp_inputQ[socket] != NULL) && (atp_inputQ[socket] != (gref_t *)1) + && (atp_pidM[socket] == pid)) + cnt++; + if ((adsp_inputQ[socket] != NULL) && (adsp_pidM[socket] == pid)) + cnt++; + + return(cnt); +} + +void ddp_notify_nbp(socket, pid, ddptype) + unsigned char socket; + int pid; + unsigned char ddptype; /* not used */ +{ + extern int nve_lock; + nve_entry_t *nve_entry; + + if (at_state.flags & AT_ST_STARTED) { + /* *** NBP_CLOSE_NOTE processing (from ddp_nbp.c) *** */ + ATDISABLE(nve_lock, NVE_LOCK); + TAILQ_FOREACH(nve_entry, &name_registry, nve_link) { + if ((at_socket)socket == nve_entry->address.socket && + /* *** check complete address and ddptype here *** */ + pid == nve_entry->pid && + ot_ddp_check_socket(nve_entry->address.socket, + nve_entry->pid) < 2) { + nbp_delete_entry(nve_entry); + } + } + ATENABLE(nve_lock, NVE_LOCK); + } +} /* ddp_notify_nbp */ + +static void fillin_pkt_chain(m) + gbuf_t *m; +{ + gbuf_t *tmp_m = m; + register at_ddp_t + *ddp = (at_ddp_t *)gbuf_rptr(m), + *tmp_ddp; + u_short tmp; + + if (UAS_VALUE(ddp->checksum)) { + tmp = ddp_checksum(m, 4); + UAS_ASSIGN(ddp->checksum, tmp); + } + + for (tmp_m=gbuf_next(tmp_m); tmp_m; tmp_m=gbuf_next(tmp_m)) { + tmp_ddp = (at_ddp_t *)gbuf_rptr(tmp_m); + tmp_ddp->length = gbuf_msgsize(tmp_m); + tmp_ddp->hopcount = + tmp_ddp->unused = 0; + NET_NET(tmp_ddp->src_net, ddp->src_net); + tmp_ddp->src_node = ddp->src_node; + tmp_ddp->src_socket = ddp->src_socket; + if (UAS_VALUE(tmp_ddp->checksum)) { + tmp = ddp_checksum(tmp_m, 4); + UAS_ASSIGN(tmp_ddp->checksum, tmp); + } + } +} + +/* There are various ways a packet may go out.... it may be sent out + * directly to destination node, or sent to a random router or sent + * to a router whose entry exists in Best Router Cache. Following are + * constants used WITHIN this routine to keep track of choice of destination + */ +#define DIRECT_ADDR 1 +#define BRT_ENTRY 2 +#define BRIDGE_ADDR 3 + +/* + * ddp_output() + * + * Remarks : + * Called to queue a atp/ddp data packet on the network interface. + * It returns 0 normally, and an errno in case of error. + * The mbuf chain pointed to by *mp is consumed on success, and + * freed in case of error. + * + */ +int ddp_output(mp, src_socket, src_addr_included) + register gbuf_t **mp; + at_socket src_socket; + int src_addr_included; +{ + register at_ifaddr_t *ifID = ifID_home, *ifIDTmp = NULL; + register at_ddp_t *ddp; + register ddp_brt_t *brt; + register at_net_al dst_net; + register int len; + struct atalk_addr at_dest; + at_ifaddr_t *ARouterIf = NULL; + int loop = 0; + int error = 0; + int addr_type; + u_char addr_flag; + char *addr = NULL; + register gbuf_t *m; + + KERNEL_DEBUG(DBG_AT_DDP_OUTPUT | DBG_FUNC_START, 0, + 0,0,0,0); + + snmpStats.dd_outReq++; + + m = *mp; + ddp = (at_ddp_t *)gbuf_rptr(m); + + if ((ddp->dst_socket > (unsigned) (DDP_SOCKET_LAST + 1)) || + (ddp->dst_socket < DDP_SOCKET_1st_RESERVED)) { + dPrintf(D_M_DDP, D_L_ERROR, + ("Illegal destination socket on outgoing packet (0x%x)", + ddp->dst_socket)); + at_ddp_stats.xmit_bad_addr++; + error = ENOTSOCK; + gbuf_freel(*mp); + goto exit_ddp_output; + } + if ((len = gbuf_msgsize(*mp)) > DDP_DATAGRAM_SIZE) { + /* the packet is too large */ + dPrintf(D_M_DDP, D_L_ERROR, + ("Outgoing packet too long (len=%d bytes)", len)); + at_ddp_stats.xmit_bad_length++; + error = EMSGSIZE; + gbuf_freel(*mp); + goto exit_ddp_output; + } + at_ddp_stats.xmit_bytes += len; + at_ddp_stats.xmit_packets++; + + ddp->length = len; + ddp->hopcount = + ddp->unused = 0; + + /* If this packet is for the same node, loop it back + * up... Note that for LocalTalk, dst_net zero means "THIS_NET", so + * address 0.nn is eligible for loopback. For Extended EtherTalk, + * dst_net 0 can be used only for cable-wide or zone-wide + * broadcasts (0.ff) and as such, address of the form 0.nn is NOT + * eligible for loopback. + */ + dst_net = NET_VALUE(ddp->dst_net); + + /* If our packet is destined for the 'virtual' bridge + * address of NODE==0xFE, replace that address with a + * real bridge address. + */ + if ((ddp->dst_node == 0xfe) && + ((dst_net == ATADDR_ANYNET) || + (dst_net >= ifID_home->ifThisCableStart && + dst_net <= ifID_home->ifThisCableEnd))) { + /* if there's a router that's not us, it's in ifID_home */ + NET_ASSIGN(ddp->dst_net, ifID_home->ifARouter.s_net); + dst_net = ifID_home->ifARouter.s_net; + ddp->dst_node = ifID_home->ifARouter.s_node; + } + + if (MULTIHOME_MODE && (ifIDTmp = forUs(ddp))) { + ifID = ifIDTmp; + loop = TRUE; + dPrintf(D_M_DDP_LOW, D_L_USR1, + ("ddp_out: for us if:%s\n", ifIDTmp->ifName)); + } + + if (!loop) + loop = ((ddp->dst_node == ifID->ifThisNode.s_node) && + (dst_net == ifID->ifThisNode.s_net) + ); + if (loop) { + gbuf_t *mdata, *mdata_next; + + if (!MULTIHOME_MODE || !src_addr_included) { + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + ddp->src_node = ifID->ifThisNode.s_node; + } + ddp->src_socket = src_socket; + + dPrintf(D_M_DDP_LOW, D_L_OUTPUT, + ("ddp_output: loop to %d:%d port=%d\n", + NET_VALUE(ddp->dst_net), + ddp->dst_node, + ifID->ifPort)); + + fillin_pkt_chain(*mp); + + dPrintf(D_M_DDP, D_L_VERBOSE, + ("Looping back packet from skt 0x%x to skt 0x%x\n", + ddp->src_socket, ddp->dst_socket)); + + for (mdata = *mp; mdata; mdata = mdata_next) { + mdata_next = gbuf_next(mdata); + gbuf_next(mdata) = 0; + ddp_input(mdata, ifID); + } + goto exit_ddp_output; + } + if ((ddp->dst_socket == ZIP_SOCKET) && + (zip_type_packet(*mp) == ZIP_GETMYZONE)) { + ddp->src_socket = src_socket; + error = zip_handle_getmyzone(ifID, *mp); + gbuf_freel(*mp); + goto exit_ddp_output; + } + /* + * find out the interface on which the packet should go out + */ + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if ((ifID->ifThisNode.s_net == dst_net) || (dst_net == 0)) + /* the message is either going out (i) on the same + * NETWORK in case of LocalTalk, or (ii) on the same + * CABLE in case of Extended AppleTalk (EtherTalk). + */ + break; + + if ((ifID->ifThisCableStart <= dst_net) && + (ifID->ifThisCableEnd >= dst_net) + ) + /* We're on EtherTalk and the message is going out to + * some other network on the same cable. + */ + break; + + if (ARouterIf == NULL && ATALK_VALUE(ifID->ifARouter)) + ARouterIf = ifID; + } + dPrintf(D_M_DDP_LOW, D_L_USR1, + ("ddp_output: after search ifid:0x%x %s ifID_home:0x%x\n", + (u_int)ifID, ifID ? ifID->ifName : "", + (u_int)ifID_home)); + + if (ifID) { + /* located the interface where the packet should + * go.... the "first-hop" destination address + * must be the same as real destination address. + */ + addr_type = DIRECT_ADDR; + } else { + /* no, the destination network number does + * not match known network numbers. If we have + * heard from this network recently, BRT table + * may have address of a router we could use! + */ + if (!MULTIPORT_MODE) { + BRT_LOOK (brt, dst_net); + if (brt) { + /* Bingo... BRT has an entry for this network. + * Use the link address as is. + */ + dPrintf(D_M_DDP, D_L_VERBOSE, + ("Found BRT entry to send to net 0x%x", dst_net)); + at_ddp_stats.xmit_BRT_used++; + addr_type = BRT_ENTRY; + ifID = brt->ifID; + } else { + /* No BRT entry available for dest network... do we + * know of any router at all?? + */ + if ((ifID = ARouterIf) != NULL) + addr_type = BRIDGE_ADDR; + else { + dPrintf(D_M_DDP, D_L_WARNING, + ("Found no interface to send pkt")); + at_ddp_stats.xmit_bad_addr++; + error = ENETUNREACH; + gbuf_freel(*mp); + goto exit_ddp_output; + } + } + } + else { /* We are in multiport mode, so we can bypass all the rest + * and directly ask for the routing of the packet + */ + at_ddp_stats.xmit_BRT_used++; + + ifID = ifID_home; + if (!src_addr_included) { + ddp->src_node = ifID->ifThisNode.s_node; + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + } + ddp->src_socket = src_socket; + routing_needed(*mp, ifID, TRUE); + + goto exit_ddp_output; + } + } + /* by the time we land here, we know the interface on + * which this packet is going out.... ifID. + */ + if (ifID->ifState == LAP_OFFLINE) { + gbuf_freel(*mp); + goto exit_ddp_output; + } + + switch (addr_type) { + case DIRECT_ADDR : +/* + at_dest.atalk_unused = 0; +*/ + NET_ASSIGN(at_dest.atalk_net, dst_net); + at_dest.atalk_node = ddp->dst_node; + addr_flag = AT_ADDR; + addr = (char *)&at_dest; + break; + case BRT_ENTRY : + addr_flag = ET_ADDR; + addr = (char *)&brt->et_addr; + break; + case BRIDGE_ADDR : + NET_ASSIGN(at_dest.atalk_net, ifID->ifARouter.s_net); + at_dest.atalk_node = ifID->ifARouter.s_node; + addr_flag = AT_ADDR; + addr = (char *)&at_dest; + break; + + } + /* Irrespective of the interface on which + * the packet is going out, we always put the + * same source address on the packet (unless multihoming mode). + */ + if (MULTIHOME_MODE) { + if (!src_addr_included) { + ddp->src_node = ifID->ifThisNode.s_node; + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + } + } + else { + ddp->src_node = ifID_home->ifThisNode.s_node; + NET_ASSIGN(ddp->src_net, ifID_home->ifThisNode.s_net); + } + ddp->src_socket = src_socket; + + dPrintf(D_M_DDP_LOW, D_L_OUTPUT, + ("ddp_output: going out to %d:%d skt%d on %s\n", + dst_net, ddp->dst_node, ddp->dst_socket, ifID->ifName)); + + fillin_pkt_chain(*mp); + + { /* begin block */ + struct etalk_addr dest_addr; + struct atalk_addr dest_at_addr; + int loop = TRUE; /* flag to aarp to loopback (default) */ + + m = *mp; + + /* the incoming frame is of the form {flag, address, ddp...} + * where "flag" indicates whether the address is an 802.3 + * (link) address, or an appletalk address. If it's an + * 802.3 address, the packet can just go out to the network + * through PAT, if it's an appletalk address, AT->802.3 address + * resolution needs to be done. + * If 802.3 address is known, strip off the flag and 802.3 + * address, and prepend 802.2 and 802.3 headers. + */ + + if (addr == NULL) { + addr_flag = *(u_char *)gbuf_rptr(m); + gbuf_rinc(m,1); + } + + switch (addr_flag) { + case AT_ADDR_NO_LOOP : + loop = FALSE; + /* pass thru */ + case AT_ADDR : + if (addr == NULL) { + dest_at_addr = *(struct atalk_addr *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(struct atalk_addr)); + } else + dest_at_addr = *(struct atalk_addr *)addr; + break; + case ET_ADDR : + if (addr == NULL) { + dest_addr = *(struct etalk_addr *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(struct etalk_addr)); + } else + dest_addr = *(struct etalk_addr *)addr; + break; + default : + dPrintf(D_M_DDP_LOW,D_L_ERROR, + ("ddp_output: Unknown addr_flag = 0x%x\n", addr_flag)); + gbuf_freel(m); /* unknown address type, chuck it */ + goto exit_ddp_output; + } + + m = gbuf_strip(m); + + /* At this point, rptr points to ddp header for sure */ + if (ifID->ifState == LAP_ONLINE_FOR_ZIP) { + /* see if this is a ZIP packet that we need + * to let through even though network is + * not yet alive!! + */ + if (zip_type_packet(m) == 0) { + gbuf_freel(m); + goto exit_ddp_output; + } + } + + ifID->stats.xmit_packets++; + ifID->stats.xmit_bytes += gbuf_msgsize(m); + snmpStats.dd_outLong++; + + switch (addr_flag) { + case AT_ADDR_NO_LOOP : + case AT_ADDR : + /* + * we don't want elap to be looking into ddp header, so + * it doesn't know net#, consequently can't do + * AMT_LOOKUP. That task left to aarp now. + */ + aarp_send_data(m,ifID,&dest_at_addr, loop); + break; + case ET_ADDR : + pat_output(ifID, m, &dest_addr, 0); + break; + } + } /* end block */ + exit_ddp_output: + KERNEL_DEBUG(DBG_AT_DDP_OUTPUT | DBG_FUNC_END, 0, + error, 0, 0, 0); + return(error); +} /* ddp_output */ + +void ddp_input(mp, ifID) + register gbuf_t *mp; + register at_ifaddr_t *ifID; +{ + register at_ddp_t *ddp; /* DDP header */ + register int msgsize; + register at_socket socket; + register int len; + register at_net_al dst_net; + + KERNEL_DEBUG(DBG_AT_DDP_INPUT | DBG_FUNC_START, 0, + ifID, mp, gbuf_len(mp),0); + + /* Makes sure we know the default interface before starting to + * accept incomming packets. If we don't we may end up with a + * null ifID_table[0] and have impredicable results (specially + * in router mode. This is a transitory state (because we can + * begin to receive packet while we're not completly set up yet. + */ + + if (ifID_home == (at_ifaddr_t *)NULL) { + dPrintf(D_M_DDP, D_L_ERROR, + ("dropped incoming packet ifID_home not set yet\n")); + gbuf_freem(mp); + goto out; /* return */ + } + + /* + * if a DDP packet has been broadcast, we're going to get a copy of + * it here; if it originated at user level via a write on a DDP + * socket; when it gets here, the first block in the chain will be + * empty since it only contained the lap level header which will be + * stripped in the lap level immediately below ddp + */ + + if ((mp = (gbuf_t *)ddp_compress_msg(mp)) == NULL) { + dPrintf(D_M_DDP, D_L_ERROR, + ("dropped short incoming ET packet (len %d)", 0)); + snmpStats.dd_inTotal++; + at_ddp_stats.rcv_bad_length++; + goto out; /* return; */ + } + msgsize = gbuf_msgsize(mp); + + at_ddp_stats.rcv_bytes += msgsize; + at_ddp_stats.rcv_packets++; + + /* if the interface pointer is 0, the packet has been + * looped back by 'write' half of DDP. It is of the + * form {extended ddp,...}. The packet is meant to go + * up to some socket on the same node. + */ + if (!ifID) /* if loop back is specified */ + ifID = ifID_home; /* that means the home port */ + + /* the incoming datagram has extended DDP header and is of + * the form {ddp,...}. + */ + if (msgsize < DDP_X_HDR_SIZE) { + dPrintf(D_M_DDP, D_L_ERROR, + ("dropped short incoming ET packet (len %d)", msgsize)); + at_ddp_stats.rcv_bad_length++; + gbuf_freem(mp); + goto out; /* return; */ + } + /* + * At this point, the message is always of the form + * {extended ddp, ... }. + */ + ddp = (at_ddp_t *)gbuf_rptr(mp); + len = ddp->length; + + if (msgsize != len) { + if ((unsigned) msgsize > len) { + if (len < DDP_X_HDR_SIZE) { + dPrintf(D_M_DDP, D_L_ERROR, + ("Length problems, ddp length %d, buffer length %d", + len, msgsize)); + snmpStats.dd_tooLong++; + at_ddp_stats.rcv_bad_length++; + gbuf_freem(mp); + goto out; /* return; */ + } + /* + * shave off the extra bytes from the end of message + */ + mp = ddp_adjmsg(mp, -(msgsize - len)) ? mp : 0; + if (mp == 0) + goto out; /* return; */ + } else { + dPrintf(D_M_DDP, D_L_ERROR, + ("Length problems, ddp length %d, buffer length %d", + len, msgsize)); + snmpStats.dd_tooShort++; + at_ddp_stats.rcv_bad_length++; + gbuf_freem(mp); + goto out; /* return; */ + } + } + socket = ddp->dst_socket; + + /* + * We want everything in router mode, specially socket 254 for nbp so we need + * to bypass this test when we are a router. + */ + + if (!MULTIPORT_MODE && (socket > DDP_SOCKET_LAST || + socket < DDP_SOCKET_1st_RESERVED)) { + dPrintf(D_M_DDP, D_L_WARNING, + ("Bad dst socket on incoming packet (0x%x)", + ddp->dst_socket)); + at_ddp_stats.rcv_bad_socket++; + gbuf_freem(mp); + goto out; /* return; */ + } + /* + * if the checksum is true, then upstream wants us to calc + */ + if (UAS_VALUE(ddp->checksum) && + (UAS_VALUE(ddp->checksum) != ddp_checksum(mp, 4))) { + dPrintf(D_M_DDP, D_L_WARNING, + ("Checksum error on incoming pkt, calc 0x%x, exp 0x%x", + ddp_checksum(mp, 4), UAS_VALUE(ddp->checksum))); + snmpStats.dd_checkSum++; + at_ddp_stats.rcv_bad_checksum++; + gbuf_freem(mp); + goto out; /* return; */ + } + +/*############### routing input checking */ + +/* Router mode special: we send "up-stack" packets for this node or coming from any + * other ports, but for the reserved atalk sockets (RTMP, ZIP, NBP [and EP]) + * BTW, the way we know it's for the router and not the home port is that the + * MAC (ethernet) address is always the one of the interface we're on, but + * the AppleTalk address must be the one of the home port. If it's a multicast + * or another AppleTalk address, this is the router job's to figure out where it's + * going to go. + */ + /* *** a duplicate should be sent to any other client that is listening + for packets of this type on a raw DDP socket *** */ + if (ddp_handler[socket].func) { + dPrintf(D_M_DDP,D_L_INPUT, + ("ddp_input: skt %d hdnlr:0x%x\n", + (u_int) socket, ddp_handler[socket].func)); + pktsHome++; + snmpStats.dd_inLocal++; + + (*ddp_handler[socket].func)(mp, ifID); + goto out; /* return; */ + } + dst_net = NET_VALUE(ddp->dst_net); + if ( + /* exact match */ + forUs(ddp) || + /* any node, wildcard or matching net */ + ((ddp->dst_node == 255) && + (((dst_net >= ifID_home->ifThisCableStart) && + (dst_net <= ifID_home->ifThisCableEnd)) || + dst_net == 0)) || + /* this node is not online yet(?) */ + (ifID->ifRoutingState < PORT_ONLINE) + ) { + gref_t *gref; + pktsHome++; + snmpStats.dd_inLocal++; + + if (ddp->type == DDP_ATP) { + if (atp_inputQ[socket] && (atp_inputQ[socket] != (gref_t *)1)) { + /* if there's an ATP pcb */ + atp_input(mp); + goto out; /* return; */ + } + } else if (ddp->type == DDP_ADSP) { + if (adsp_inputQ[socket]) { + /* if there's an ADSP pcb */ + adsp_input(mp); + goto out; /* return; */ + } + } + + /* otherwise look for a DDP pcb; + ATP / raw-DDP and ADSP / raw-DDP are possible */ + for (gref = ddp_head.atpcb_next; gref != &ddp_head; + gref = gref->atpcb_next) + if (gref->lport == socket) { + dPrintf(D_M_DDP, D_L_INPUT, + ("ddp_input: streamq, skt %d\n", socket)); + if (gref->atpcb_socket) { + struct sockaddr_at ddp_in; + ddp_in.sat_len = sizeof(ddp_in); + ddp_in.sat_family = AF_APPLETALK; + ddp_in.sat_addr.s_net = NET_VALUE(ddp->src_net); + ddp_in.sat_addr.s_node = ddp->src_node; + ddp_in.sat_port = ddp->src_socket; + + /* strip off DDP header if so indicated by + sockopt */ + if (gref->ddp_flags & DDPFLG_STRIPHDR) { + mp = m_pullup((struct mbuf *)mp, + DDP_X_HDR_SIZE); + if (mp) { + gbuf_rinc(mp, DDP_X_HDR_SIZE); + } else { + /* this should never happen because + msgsize was checked earlier */ + at_ddp_stats.rcv_bad_length++; + goto out; /* return */ + } + } + + if (sbappendaddr(&((gref->atpcb_socket)->so_rcv), + (struct sockaddr *)&ddp_in, + mp, 0) == 0) + gbuf_freem(mp); + else + sorwakeup(gref->atpcb_socket); + } else { + atalk_putnext(gref, mp); + } + goto out; /* return */ + } + + at_ddp_stats.rcv_bad_socket++; + gbuf_freem(mp); + snmpStats.dd_noHandler++; + dPrintf(D_M_DDP, D_L_WARNING, + ("ddp_input: dropped pkt for socket %d\n", socket)); + } else { + dPrintf(D_M_DDP, D_L_ROUTING, + ("ddp_input: routing_needed from port=%d sock=%d\n", + ifID->ifPort, ddp->dst_socket)); + + snmpStats.dd_fwdReq++; + if (((pktsIn-pktsHome+200) >= RouterMix) && ((++pktsDropped % 5) == 0)) { + at_ddp_stats.rcv_dropped_nobuf++; + gbuf_freem(mp); + } + else { + routing_needed(mp, ifID, FALSE); + } + } +out: + KERNEL_DEBUG(DBG_AT_DDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); +} /* ddp_input */ + + +/* + * ddp_router_output() + * + * Remarks : + * This is a modified version of ddp_output for router use. + * The main difference is that the interface on which the packet needs + * to be sent is specified and a *destination* AppleTalk address is passed + * as an argument, this address may or may not be the same as the destination + * address found in the ddp packet... This is the trick about routing, the + * AppleTalk destination of the packet may not be the same as the Enet address + * we send the packet too (ie, we may pass the baby to another router). + * + */ +int ddp_router_output(mp, ifID, addr_type, router_net, router_node, enet_addr) + gbuf_t *mp; + at_ifaddr_t *ifID; + int addr_type; + at_net_al router_net; + at_node router_node; + etalk_addr_t *enet_addr; +{ + register at_ddp_t *ddp; + struct atalk_addr at_dest; + int addr_flag; + char *addr = NULL; + register gbuf_t *m; + + if (!ifID) { + dPrintf(D_M_DDP, D_L_WARNING, ("BAD BAD ifID\n")); + gbuf_freel(mp); + return(EPROTOTYPE); + } + ddp = (at_ddp_t *)gbuf_rptr(mp); + + if (ifID->ifFlags & AT_IFF_AURP) { /* AURP link? */ + if (ddp_AURPsendx) { + fillin_pkt_chain(mp); + if (router_node == 255) + router_node = 0; + ddp_AURPsendx(AURPCODE_DATAPKT, mp, router_node); + return 0; + } else { + gbuf_freel(mp); + return EPROTOTYPE; + } + } + + /* keep some of the tests for now ####### */ + + if (gbuf_msgsize(mp) > DDP_DATAGRAM_SIZE) { + /* the packet is too large */ + dPrintf(D_M_DDP, D_L_WARNING, + ("ddp_router_output: Packet too large size=%d\n", + gbuf_msgsize(mp))); + gbuf_freel(mp); + return (EMSGSIZE); + } + + switch (addr_type) { + + case AT_ADDR : + + /* + * Check for packet destined to the home stack + */ + + if ((ddp->dst_node == ifID->ifThisNode.s_node) && + (NET_VALUE(ddp->dst_net) == ifID->ifThisNode.s_net)) { + dPrintf(D_M_DDP_LOW, D_L_ROUTING, + ("ddp_r_output: sending back home from port=%d socket=%d\n", + ifID->ifPort, ddp->dst_socket)); + + UAS_ASSIGN(ddp->checksum, 0); + ddp_input(mp, ifID); + return(0); + } + + NET_ASSIGN(at_dest.atalk_net, router_net); + at_dest.atalk_node = router_node; + + addr_flag = AT_ADDR_NO_LOOP; + addr = (char *)&at_dest; + dPrintf(D_M_DDP_LOW, D_L_ROUTING_AT, + ("ddp_r_output: AT_ADDR out port=%d net %d:%d via rte %d:%d", + ifID->ifPort, NET_VALUE(ddp->dst_net), ddp->dst_node, router_net, + router_node)); + break; + + case ET_ADDR : + addr_flag = ET_ADDR; + addr = (char *)enet_addr; + dPrintf(D_M_DDP_LOW, D_L_ROUTING, + ("ddp_r_output: ET_ADDR out port=%d net %d:%d\n", + ifID->ifPort, NET_VALUE(ddp->dst_net), ddp->dst_node)); + break; + } + + if (ifID->ifState == LAP_OFFLINE) { + gbuf_freel(mp); + return 0; + } + + fillin_pkt_chain(mp); + + { /* begin block */ + struct etalk_addr dest_addr; + struct atalk_addr dest_at_addr; + int loop = TRUE; /* flag to aarp to loopback (default) */ + + m = mp; + + /* the incoming frame is of the form {flag, address, ddp...} + * where "flag" indicates whether the address is an 802.3 + * (link) address, or an appletalk address. If it's an + * 802.3 address, the packet can just go out to the network + * through PAT, if it's an appletalk address, AT->802.3 address + * resolution needs to be done. + * If 802.3 address is known, strip off the flag and 802.3 + * address, and prepend 802.2 and 802.3 headers. + */ + + if (addr == NULL) { + addr_flag = *(u_char *)gbuf_rptr(m); + gbuf_rinc(m,1); + } + + switch (addr_flag) { + case AT_ADDR_NO_LOOP : + loop = FALSE; + /* pass thru */ + case AT_ADDR : + if (addr == NULL) { + dest_at_addr = *(struct atalk_addr *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(struct atalk_addr)); + } else + dest_at_addr = *(struct atalk_addr *)addr; + break; + case ET_ADDR : + if (addr == NULL) { + dest_addr = *(struct etalk_addr *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(struct etalk_addr)); + } else + dest_addr = *(struct etalk_addr *)addr; + break; + default : + dPrintf(D_M_DDP_LOW,D_L_ERROR, + ("ddp_router_output: Unknown addr_flag = 0x%x\n", addr_flag)); + + gbuf_freel(m); /* unknown address type, chuck it */ + return 0; + } + + m = gbuf_strip(m); + + /* At this point, rptr points to ddp header for sure */ + if (ifID->ifState == LAP_ONLINE_FOR_ZIP) { + /* see if this is a ZIP packet that we need + * to let through even though network is + * not yet alive!! + */ + if (zip_type_packet(m) == 0) { + gbuf_freel(m); + return 0; + } + } + + ifID->stats.xmit_packets++; + ifID->stats.xmit_bytes += gbuf_msgsize(m); + snmpStats.dd_outLong++; + + switch (addr_flag) { + case AT_ADDR_NO_LOOP : + case AT_ADDR : + /* + * we don't want elap to be looking into ddp header, so + * it doesn't know net#, consequently can't do + * AMT_LOOKUP. That task left to aarp now. + */ + aarp_send_data(m,ifID,&dest_at_addr, loop); + break; + case ET_ADDR : + pat_output(ifID, m, &dest_addr, 0); + break; + } + } /* end block */ + + return(0); +} /* ddp_router_output */ + +/*****************************************/ + +void rt_delete(NetStop, NetStart) + unsigned short NetStop; + unsigned short NetStart; +{ + RT_entry *found; + int s; + + ATDISABLE(s, ddpinp_lock); + if ((found = rt_bdelete(NetStop, NetStart)) != 0) { + bzero(found, sizeof(RT_entry)); + found->right = RT_table_freelist; + RT_table_freelist = found; + } + ATENABLE(s, ddpinp_lock); +} + +int ddp_AURPfuncx(code, param, node) + int code; + void *param; + unsigned char node; +{ + extern void rtmp_timeout(); + extern void rtmp_send_port(); + at_ifaddr_t *ifID; + int k; + + switch (code) { + case AURPCODE_DATAPKT: /* data packet */ + if (aurp_ifID) { + dPrintf(D_M_DDP, D_L_TRACE, ("ddp_AURPfuncx: data, 0x%x, %d\n", + (u_int) aurp_ifID, node)); + + ddp_input((gbuf_t *)param, aurp_ifID); + } else + gbuf_freem((gbuf_t *)param); + break; + + case AURPCODE_REG: /* register/deregister */ + if (!ROUTING_MODE) + return -1; + ddp_AURPsendx = (void(*)())param; + + if (param) { + /* register AURP callback function */ + if (aurp_ifID) + return 0; + for (k=(IFID_HOME+1); k < IF_TOTAL_MAX; k++) { + if (ifID_table[k] == 0) { + aurp_ifID = &at_interfaces[k]; + aurp_ifID->ifFlags = RTR_XNET_PORT; + ddp_add_if(aurp_ifID); + aurp_ifID->ifState = LAP_ONLINE; + aurp_ifID->ifRoutingState = PORT_ONLINE; + dPrintf(D_M_DDP, D_L_TRACE, + ("ddp_AURPfuncx: on, 0x%x\n", + (u_int) aurp_ifID)); + + ddp_AURPsendx(AURPCODE_DEBUGINFO, + &dbgBits, aurp_ifID->ifPort); + return 0; + } + } + return -1; + + } else { + /* deregister AURP callback function */ + if (aurp_ifID) { + rtmp_purge(aurp_ifID); + ddp_rem_if(aurp_ifID); + aurp_ifID->ifState = LAP_OFFLINE; + aurp_ifID->ifRoutingState = PORT_OFFLINE; + dPrintf(D_M_DDP, D_L_TRACE, + ("ddp_AURPfuncx: off, 0x%x\n", (u_int) aurp_ifID)); + aurp_ifID = 0; + } + } + break; + + case AURPCODE_AURPPROTO: /* proto type - AURP */ + if (aurp_ifID) { + aurp_ifID->ifFlags |= AT_IFF_AURP; + } + break; + } + + return 0; +} + + +/* checks to see if address of packet is for one of our interfaces + returns *ifID if it's for us, NULL if not +*/ +at_ifaddr_t *forUs(ddp) + register at_ddp_t *ddp; +{ + at_ifaddr_t *ifID; + + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if ((ddp->dst_node == ifID->ifThisNode.s_node) && + (NET_VALUE(ddp->dst_net) == ifID->ifThisNode.s_net) + ) { + dPrintf(D_M_DDP_LOW, D_L_ROUTING, + ("pkt was for port %d\n", ifID->ifPort)); + + return(ifID); + } + } + + return((at_ifaddr_t *)NULL); +} /* forUs */ diff --git a/bsd/netat/ddp.h b/bsd/netat/ddp.h new file mode 100644 index 000000000..142068932 --- /dev/null +++ b/bsd/netat/ddp.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +#ifndef _NETAT_DDP_H_ +#define _NETAT_DDP_H_ + +/* Header and data sizes */ + +#define DDP_HDR_SIZE 5 /* DDP (short) header size */ +#define DDP_X_HDR_SIZE 13 /* DDP extended header size */ +#define DDP_DATA_SIZE 586 /* Maximum DataGram data size */ +#define DDP_DATAGRAM_SIZE 599 /* Maximum DataGram size */ + +/* DDP socket definitions */ + +#define DDP_SOCKET_1st_RESERVED 1 /* First in reserved range */ +#define DDP_SOCKET_1st_EXPERIMENTAL 64 /* First in experimental range */ +#define DDP_SOCKET_1st_DYNAMIC 128 /* First in dynamic range */ +#define DDP_SOCKET_LAST 253 /* Last socket in any range */ + +/* DDP type used to replace "0" on packets sent out, for compatibility + with Open Transport */ +#define DEFAULT_OT_DDPTYPE 11 + +/* DDP well-known sockets */ + +#define RTMP_SOCKET 1 /* RTMP socket number */ +#define NBP_SOCKET 2 /* NIS socket number */ +#define EP_SOCKET 4 /* EP socket number */ +#define ZIP_SOCKET 6 /* ZIP socket number */ + +/* DDP extended header packet format */ + +typedef struct { + unsigned unused:2, + hopcount:4, + length:10; /* Datagram length */ + ua_short checksum; /* Checksum */ + at_net dst_net; /* Destination network number */ + at_net src_net; /* Source network number */ + at_node dst_node; /* Destination node ID */ + at_node src_node; /* Source node ID */ + at_socket dst_socket; /* Destination socket number */ + at_socket src_socket; /* Source socket number */ + u_char type; /* Protocol type */ + char data[DDP_DATA_SIZE]; +} at_ddp_t; + + +#define DDPLEN_ASSIGN(ddp, len) ddp->length = len +#define DDPLEN_VALUE(ddp) ddp->length + + +/* DDP module statistics and configuration */ + +typedef struct at_ddp_stats { + /* General */ + + /* Receive stats */ + u_int rcv_bytes; + u_int rcv_packets; + u_int rcv_bad_length; + u_int rcv_unreg_socket; + u_int rcv_bad_socket; + u_int rcv_bad_checksum; + u_int rcv_dropped_nobuf; + + /* Transmit stats */ + u_int xmit_bytes; + u_int xmit_packets; + u_int xmit_BRT_used; + u_int xmit_bad_length; + u_int xmit_bad_addr; + u_int xmit_dropped_nobuf; +} at_ddp_stats_t; + +/* DDP streams module ioctls */ + +#define AT_MID_DDP 203 + +#define DDP_IOC_MYIOCTL(i) ((i>>8) == AT_MID_DDP) +#define DDP_IOC_GET_CFG ((AT_MID_DDP<<8) | 1) + +#ifdef NOT_USED +#define DDP_IOC_BIND_SOCK ((AT_MID_DDP<<8) | 2) +#define DDP_IOC_GET_STATS ((AT_MID_DDP<<8) | 3) +#define DDP_IOC_LSTATUS_TABLE ((AT_MID_DDP<<8) | 4) +#define DDP_IOC_ULSTATUS_TABLE ((AT_MID_DDP<<8) | 5) +#define DDP_IOC_RSTATUS_TABLE ((AT_MID_DDP<<8) | 6) +#define DDP_IOC_SET_WROFF ((AT_MID_DDP<<8) | 7 ) +#define DDP_IOC_SET_OPTS ((AT_MID_DDP<<8) | 8 ) +#define DDP_IOC_GET_OPTS ((AT_MID_DDP<<8) | 9 ) +#define DDP_IOC_GET_SOCK ((AT_MID_DDP<<8) | 10) +#define DDP_IOC_GET_PEER ((AT_MID_DDP<<8) | 11) +#define DDP_IOC_SET_PEER ((AT_MID_DDP<<8) | 12) +#define DDP_IOC_SET_PROTO ((AT_MID_DDP<<8) | 13) +#endif + +#ifdef KERNEL + +#define DDP_MIN_NETWORK 0x0001 +#define DDP_MAX_NETWORK 0xfffe +#define DDP_STARTUP_LOW 0xff00 +#define DDP_STARTUP_HIGH DDP_MAX_NETWORK + +typedef struct { + void **inputQ; + int *pidM; + char **socketM; + char *dbgBits; +} proto_reg_t; + +/* *** note: this counts on the src address always being that of the + home port + *** */ +#define FROM_US(ddp) ((NET_VALUE(ddp->src_net) ==\ + ifID_home->ifThisNode.s_net) && \ + ifID_home->ifThisNode.s_node == ddp->src_node) + +#define RT_LOOKUP_OKAY(ifID, ddp) \ + ((ROUTING_MODE && ifID->ifRoutingState==PORT_ONLINE) || \ + (MULTIHOME_MODE && FROM_US(ddp))) + +#ifdef NOT_YET +/* from sys_glue.c */ +int ddp_adjmsg(gbuf_t *m, int len); +gbuf_t *ddp_growmsg(gbuf_t *mp, int len); + +/* from ddp.c */ +int ddp_add_if(at_ifaddr_t *ifID); +int ddp_rem_if(at_ifaddr_t *ifID); +int ddp_bind_socket(ddp_socket_t *socketp); +int ddp_close_socket(ddp_socket_t *socketp); +int ddp_output(gbuf_t **mp, at_socket src_socket, int src_addr_included); +void ddp_input(gbuf_t *mp, at_ifaddr_t *ifID); +int ddp_router_output( + gbuf_t *mp, + at_ifaddr_t *ifID, + int addr_type, + at_net_al router_net, + at_node router_node, + etalk_addr_t *enet_addr); + +/* from ddp_proto.c */ +int ddp_close(gref_t *gref); +void ddp_putmsg(gref_t *gref, gbuf_t *mp); +gbuf_t *ddp_compress_msg(gbuf_t *mp); +void ddp_stop(gbuf_t *mioc, gref_t *gref); + +/* in ddp_lap.c */ +void ddp_bit_reverse(unsigned char *); + +#endif /* NOT_YET */ + +/* in ddp_lap.c */ +int ddp_shutdown(int); + +#endif /* KERNEL */ +#endif /* _NETAT_DDP_H_ */ diff --git a/bsd/netat/ddp.save b/bsd/netat/ddp.save new file mode 100644 index 000000000..4fa9acea6 --- /dev/null +++ b/bsd/netat/ddp.save @@ -0,0 +1,903 @@ +/* From ddp.c: + + ddp_shrink_hdr() + ddp_extend_hdr() + + Saved from xnu/bsd/bsd/netat/ddp.c on 4/14/99. +*/ + +#ifdef NOT_USED +/* This routine shrinks the ddp header from long to short, + * It also prepends ALAP header and fills up some of the + * fields as appropriate. + */ +static at_ddp_short_t *ddp_shrink_hdr (mp) +register gbuf_t *mp; +{ + register at_ddp_t *ddp; + register at_ddp_short_t *ddp_short; + register at_llap_hdr_t *llap; + gbuf_t *newmp; + + if ((newmp = (gbuf_t *)gbuf_copym((gbuf_t *) mp)) == (gbuf_t *)NULL) + return ((at_ddp_short_t *)NULL); + gbuf_freem(mp); + mp = newmp; + + ddp = (at_ddp_t *)gbuf_rptr(mp); + gbuf_rinc(mp,((DDP_X_HDR_SIZE - DDP_HDR_SIZE) - LLAP_HDR_SIZE)); + llap = (at_llap_hdr_t *)gbuf_rptr(mp); + ddp_short = (at_ddp_short_t *)(gbuf_rptr(mp) + LLAP_HDR_SIZE); + + llap->destination = ddp->dst_node; + llap->type = LLAP_TYPE_DDP; + ddp_short->length = ddp->length - (DDP_X_HDR_SIZE - DDP_HDR_SIZE); + ddp_short->unused = 0; + return ((at_ddp_short_t *)mp); +} + + +/* mp points to message of the form {llap, short ddp, ...}. + * Get rid of llap, extend ddp header to make it of the form + * {extended ddp, ... } + */ +static gbuf_t *ddp_extend_hdr(mp) +register gbuf_t *mp; +{ + register at_llap_hdr_t *llap; + register at_ddp_short_t *ddp_short; + register at_ddp_t *ddp; + char buf[DDP_HDR_SIZE + LLAP_HDR_SIZE]; + gbuf_t *m1, *m2; + + /* We need to remove the llap header from the packet and extend the + * short DDP header in to a long one. 5 bytes of additional space + * is required in effect, but we can not afford to put these 5 bytes + * in a separate buffer, since the ddp buffer would end up being + * fragmented into two pieces, which is a no-no. So, we first get + * rid of the llap and ddp short headers and then add the extended + * header. + */ + + /* Assuming that the llap and ddp short headers are placed next + * to each other in the same buffer + */ + bcopy(gbuf_rptr(mp), buf, LLAP_HDR_SIZE + DDP_HDR_SIZE); + m1 = ddp_adjmsg(mp, LLAP_HDR_SIZE+DDP_HDR_SIZE) ? mp : 0; + + /* If the message did not contain any ddp data bytes, then m would + * be NULL at this point... and we can't just grow a NULL message, + * we need to ALLOC a new one. + */ + if (m1) { + if ((m2 = (gbuf_t *)ddp_growmsg(m1, -DDP_X_HDR_SIZE)) == NULL) { + dPrintf(D_M_DDP, D_L_WARNING, + ("Dropping packet - no bufs to extend hdr")); + at_ddp_stats.rcv_dropped_nobuf++; + gbuf_freem(m1); + return(NULL); + } + } else + /* Original message mp has already been freed by ddp_adjmsg if we + * managed to arrive here... this case occurs only when the + * message mp did not contain any ddp data bytes, only lap and + * ddp headers + */ + if ((m2 = gbuf_alloc(AT_WR_OFFSET+DDP_X_HDR_SIZE, PRI_MED)) == NULL) { + dPrintf(D_M_DDP,D_L_WARNING, + ("Packet (no data) dropped - no bufs to extend hdr")); + at_ddp_stats.rcv_dropped_nobuf++; + return(NULL); + } else { + gbuf_rinc(m2,AT_WR_OFFSET); + gbuf_wset(m2,DDP_X_HDR_SIZE); + } + + /* By the time we arrive here, m2 points to message of the form + * {Extended DDP, ... } + * mp and m1 are either non-existent or irrelevant. + */ + ddp = (at_ddp_t *)gbuf_rptr(m2); + llap = (at_llap_hdr_t *)buf; + ddp_short = (at_ddp_short_t *)(buf + LLAP_HDR_SIZE); + + ddp->unused = ddp->hopcount = 0; + ddp->length = ddp_short->length + DDP_X_HDR_SIZE - DDP_HDR_SIZE; + UAS_ASSIGN(ddp->checksum, 0); + NET_NET(ddp->dst_net, ifID_home->ifThisNode.atalk_net); + NET_NET(ddp->src_net, ifID_home->ifThisNode.atalk_net); + ddp->src_node = llap->source; + ddp->dst_node = llap->destination; + ddp->dst_socket = ddp_short->dst_socket; + ddp->src_socket = ddp_short->src_socket; + ddp->type = ddp_short->type; + return (m2); +} +#endif + +From sys_dep.c: + +#ifdef _AIX /* This AIX code (to the end of this file) is no longer supported. */ + +int ATsocket(proto) /* AIX version */ + int proto; +{ + int err, rc = -1; + + if (sys_ATsocket) + rc = (*sys_ATsocket)(proto, &err, 0); + else + err = ENXIO; + if (err) + setuerror(err); + return rc; +} + +int ATgetmsg(fd, ctlptr, datptr, flags) /* AIX version */ + int fd; + void *ctlptr; + void *datptr; + int *flags; +{ + int err, rc = -1; + + if (sys_ATgetmsg) + rc = (*sys_ATgetmsg)(fd, ctlptr, datptr, flags, &err, 0); + else + err = ENXIO; + if (err) + setuerror(err); + return rc; +} + +int ATputmsg(fd, ctlptr, datptr, flags) /* AIX version */ + int fd; + void *ctlptr; + void *datptr; + int flags; +{ + int err, rc = -1; + + if (sys_ATputmsg) + rc = (*sys_ATputmsg)(fd, ctlptr, datptr, flags, &err, 0); + else + err = ENXIO; + if (err) + setuerror(err); + return rc; +} + +int ATPsndreq(fd, buf, len, nowait) /* AIX version */ + int fd; + unsigned char *buf; + int len; + int nowait; +{ + int err, rc = -1; + + if (sys_ATPsndreq) + rc = (*sys_ATPsndreq)(fd, buf, len, nowait, &err, 0); + else + err = ENXIO; + if (err) + setuerror(err); + return rc; +} + +int ATPsndrsp(fd, respbuff, resplen, datalen) /* AIX version */ + int fd; + unsigned char *respbuff; + int resplen; + int datalen; +{ + int err, rc = -1; + + if (sys_ATPsndrsp) + rc = (*sys_ATPsndrsp)(fd, respbuff, resplen, datalen, &err, 0); + else + err = ENXIO; + if (err) + setuerror(err); + return rc; +} + +int ATPgetreq(fd, buf, buflen) /* AIX version */ + int fd; + unsigned char *buf; + int buflen; +{ + int err, rc = -1; + + if (sys_ATPgetreq) + rc = (*sys_ATPgetreq)(fd, buf, buflen, &err, 0); + else + err = ENXIO; + if (err) + setuerror(err); + return rc; +} + +int ATPgetrsp(fd, bdsp) /* AIX version */ + int fd; + unsigned char *bdsp; +{ + int err, rc = -1; + + if (sys_ATPgetrsp) + rc = (*sys_ATPgetrsp)(fd, bdsp, &err, 0); + else + err = ENXIO; + if (err) + setuerror(err); + return rc; +} + +void *atalk_kalloc(size) /* AIX version */ + int size; +{ + return (void *)xmalloc(size, 2, pinned_heap); +} + +void atalk_kfree(buf) /* AIX version */ + void *buf; +{ + xmfree(buf, pinned_heap); +} + +int atalk_closeref(fp, grefp) /* AIX version */ + struct file *fp; + gref_t **grefp; +{ + *grefp = (gref_t *)fp->f_data; + fp->f_data = 0; + return 0; +} + +int atalk_openref(gref, retfd, proc) /* AIX version */ + gref_t *gref; + int *retfd; + void *proc; +{ +extern int _ATrw(), _ATioctl(), _ATselect(), _ATclose(), _ATstat(); +static struct fileops fileops = {_ATrw, _ATioctl, _ATselect, _ATclose, _ATstat}; + int err, fd; + struct file *fp; + void *crp; + + crp = (void *)crref(); +#ifdef _AIX + if ((err = ufdcreate(FREAD|FWRITE, + &fileops, 0, DTYPE_OTHER, &fd, crp)) != 0) +#else + if ((err = ufdcreate(FREAD|FWRITE, + &fileops, 0, DTYPE_ATALK, &fd, crp)) != 0) +#endif + return err; + *retfd = fd; + fp = U.U_ufd[fd].fp; + fp->f_data = (void *)gref; + gref->next = (void *)fp; + return 0; +} + +int atalk_getref(fp, fd, grefp, proc) /* AIX version */ + struct file *fp; + int fd; + gref_t **grefp; + struct proc *proc; +{ + if (fp == 0) { + if ((fd < 0) || (fd > U.U_maxofile) || ((fp = U.U_ufd[fd].fp) == 0)) { + *grefp = (gref_t *)0; + return EBADF; + } + } + if ((*grefp = (gref_t *)fp->f_data) == 0) + return EBADF; + return 0; +} + +gbuf_t *gbuf_alloc(size, pri) /* AIX version */ + int size; + int pri; +{ + gbuf_t *m; + + m = (size > MHLEN) ? (gbuf_t *)m_getclustm(M_DONTWAIT, MSG_DATA, size) + : (gbuf_t *)m_gethdr(M_DONTWAIT, MSG_DATA); +#ifdef APPLETALK_DEBUG + kprintf("gbuf_alloc: for size = %d m=%x\n", size, m); +#endif + gbuf_next(m) = 0; + gbuf_cont(m) = 0; + gbuf_wset(m,0); + return m; +} + +void gbuf_freeb(m) /* AIX version */ + gbuf_t *m; +{ + if (m) + m_free(m); +} + +static struct trb *trb_freehead = 0; +static struct trb *trb_freetail = 0; +static struct trb *trb_pendhead = 0; +static int trb_cnt = 0; +static atlock_t trb_lock; + +static void atalk_rem_timeoutcf() /* AIX version */ +{ + register int s; + register struct trb *trb; + register struct trb *tmp_freehead, *tmp_pendhead; + + ATDISABLE(s, trb_lock); + tmp_freehead = trb_freehead; + trb_freehead = 0; + tmp_pendhead = trb_pendhead; + trb_pendhead = 0; + trb_cnt = 0; + ATENABLE(s, trb_lock); + while ((trb = tmp_pendhead) != 0) { + tmp_pendhead = trb->to_next; + while (tstop(trb)); + tfree(trb); + } + while ((trb = tmp_freehead) != 0) { + tmp_freehead = trb->to_next; + tfree(trb); + } + dPrintf(D_M_ATP,D_L_ERROR, "atalk: timer stopped!\n",0,0,0,0,0); +} + +static void atalk_timeoutcf(cnt) /* AIX version */ + int cnt; +{ + register int i; + register struct trb *trb; + + if (trb_freehead == 0) { + for (i=0; i < cnt-1; i++) { + trb = (struct trb *)talloc(); + trb->to_next = trb_freehead; + trb_freehead = trb; + if (!i) trb_freetail = trb; + trb_cnt++; + } + } + ATLOCKINIT(trb_lock); +} + +static void atalk_clock(trb) /* AIX version */ + register struct trb *trb; +{ + register int s; + register struct trb *next; + void (*tof)(); + void *arg; + + ATDISABLE(s, trb_lock); + if (trb_pendhead && trb->func) { + /* + * remove the timeout from the pending queue + */ + if (trb_pendhead == trb) + trb_pendhead = trb->to_next; + else { + for (next=trb_pendhead; next->to_next; next=next->to_next) { + if (next->to_next == trb) { + next->to_next = trb->to_next; + trb->func = 0; + break; + } + } + if (trb->func) { + dPrintf(D_M_ATP,D_L_WARNING, + "atalk_clock: %d,%x,%x\n", trb_cnt,trb,trb_pendhead,0,0); + /* + * we have not found the trb in the pending list - something + * has gone wrong here. maybe the trb has been returned to + * the free list; in which case, we should simply ignore + * this timeout event! + */ + for (next=trb_freehead; next; next=next->to_next) { + if (next == trb) + { + ATENABLE(s, trb_lock); + return; + } + } + /* + * the trb is not in the free list either - something has + * really gone wacky here! all we can do now is put the + * trb back into the free list and hope that it will be ok. + */ + trb->to_next = 0; + if (trb_freehead) + trb_freetail->to_next = trb; + else + trb_freehead = trb; + trb_freetail = trb; + trb_cnt++; + ATENABLE(s, trb_lock); + return; + } + } + + /* + * process the timeout + */ + trb->func = 0; + trb->to_next = 0; + tof = trb->tof; + trb->tof = 0; + arg = (void *)trb->func_data; + trb->func_data = 999; + if (trb_freehead) + trb_freetail->to_next = trb; + else + trb_freehead = trb; + trb_freetail = trb; + trb_cnt++; + ATENABLE(s, trb_lock); + if (tof) { + dPrintf(D_M_ATP,D_L_VERBOSE, "atalk_clock: func=%x, arg=%x, %d\n", + tof,arg,trb_cnt,0,0); + (*tof)(arg); + } else { + dPrintf(D_M_ATP,D_L_ERROR, "atalk_clock: func=%x, arg=%x, %d\n", + tof,arg,trb_cnt,0,0); + } + } else + ATENABLE(s, trb_lock); +} + +void *atalk_timeout(func, arg, ticks) /* AIX version */ + void (*func)(); + void *arg; + int ticks; +{ + register int s; + register struct trb *trb; + + dPrintf(D_M_ATP,D_L_VERBOSE, + "atalk_timeout: func=%x,arg=%x,time=%d, %d,%x\n", func,arg,ticks,trb_cnt,trb_pendhead); + /* + * set up the timeout request + */ + ATDISABLE(s, trb_lock); + if ((trb = trb_freehead) == 0) { + ATENABLE(s, trb_lock); + dPrintf(D_M_ATP,D_L_WARNING, + "atalk_timeout: NO TRB! time=%d, %d\n", ticks,trb_cnt,0,0,0); + return 0; + } + trb_freehead = trb->to_next; + trb->to_next = trb_pendhead; + trb_pendhead = trb; + trb_cnt--; + trb->timeout.it_value.tv_sec = ticks / HZ; + trb->timeout.it_value.tv_nsec = (ticks % HZ) * (NS_PER_SEC / HZ); + trb->knext = 0; + trb->kprev = 0; + trb->flags = 0; + trb->tof = func; + trb->func = (void (*)())atalk_clock; + trb->func_data = (ulong)arg; + trb->ipri = PL_IMP; + trb->id = -1; + + /* + * start the timeout + */ + ATENABLE(s, trb_lock); + tstart(trb); + return (void *)trb; +} + +void atalk_untimeout(func, arg, trb) /* AIX version */ + void (*func)(); + void *arg; + register struct trb *trb; +{ + register int s; + register struct trb *next; + + dPrintf(D_M_ATP,D_L_VERBOSE, + "atalk_untimeout: func=%x,arg=%x, %d\n", func,arg,trb_cnt,0,0); + + ATDISABLE(s, trb_lock); + if (trb == 0) { + for (trb=trb_pendhead; trb; trb=trb->to_next) { + if ((func == trb->tof) && (arg == (void *)trb->func_data)) + break; + } + } + if (trb && (trb->func == (void (*)())atalk_clock) + && (func == trb->tof) && (arg == (void *)trb->func_data)) { + trb->func_data = 999; + if (!(trb->flags & T_PENDING)) + { + trb->tof = 0; + ATENABLE(s, trb_lock); + return; + } + trb->func = 0; + while (tstop(trb)); + if (trb_pendhead == trb) + trb_pendhead = trb->to_next; + else { + for (next=trb_pendhead; next->to_next != trb; next=next->to_next) { + if (next->to_next == 0) { + ATENABLE(s, trb_lock); + dPrintf(D_M_ATP,D_L_WARNING, + "atalk_untimeout: UNKNOWN TRB %x...\n",trb,0,0,0,0); + return; + } + } + next->to_next = trb->to_next; + } + trb->to_next = 0; + trb_freetail->to_next = trb; + trb_freetail = trb; + trb_cnt++; + } + ATENABLE(s, trb_lock); +} + +int config_atalk(dev, cmd, uiop) /* AIX only */ +dev_t dev; +int cmd; +void *uiop; +{ + static int loaded = 0; + int err, nest; + + err = 0; + nest = lockl(&kernel_lock, LOCK_SHORT); + + if (cmd == CFG_INIT) { + if (loaded) + goto out; + vm_protect(0, 4096, 3); + atalk_timeoutcf(256); + atalk_load(); + loaded = 1; + + } else if (cmd == CFG_TERM) { + if (!loaded) + goto out; + atalk_rem_timeoutcf(); + atalk_unload(); + loaded = 0; + + } else + err = EINVAL; + +out: + if (nest != LOCK_NEST) + unlockl(&kernel_lock); + return(err); +} + +#endif + +From sys_glue.c: + +#ifdef _AIX /* AIX code, to the end of this file, is no longer supported. */ + +int _ATselect(fp, corl, reqevents, retevents, notify) /* AIX version */ + void *fp; + int corl; + unsigned short reqevents; + unsigned short *retevents; + void (*notify)(); +{ + int s, err, rc = 0; + gref_t *gref; + unsigned short sevents = 0; + + if ((err = atalk_getref(fp, 0, &gref, 0)) != 0) + return err; + + ATDISABLE(s, gref->lock); + if (reqevents & POLLIN) { + if (gref->rdhead || (gref->readable && (*gref->readable)(gref))) + sevents |= POLLIN; + } + + if (reqevents & POLLOUT) { + if (gref->writeable) { + if ((*gref->writeable)(gref)) + sevents |= POLLOUT; + } else + sevents |= POLLOUT; + } + + if ((sevents == 0) && ((reqevents & POLLSYNC) == 0)) { + if (rc = selreg(corl, 99, gref, reqevents, notify)) { + ATENABLE(s, gref->lock); + goto l_done; + } + + if (reqevents & POLLIN) { + if (gref->rdhead || (gref->readable && (*gref->readable)(gref))) + sevents |= POLLIN; + else + gref->sevents |= POLLIN; + } + + if (reqevents & POLLOUT) { + if (gref->writeable) { + if ((*gref->writeable)(gref)) + sevents |= POLLOUT; + else + gref->sevents |= POLLOUT; + } else + sevents |= POLLOUT; + } + } + ATENABLE(s, gref->lock); + *retevents = sevents; + +l_done: + return rc; +} +#endif /* end AIX section */ + +From drv_dep.c: + + + + +#ifdef _AIX +/* AIX section to end of file (not supported) */ + +/* from beginning of file ... */ +#include +#include +static struct ns_8022 elap_link; /* The SNAP header description */ +static struct ns_user elap_user; /* The interface to the demuxer */ + +int +pat_ifpresent(name) /* AIX */ + char *name; +{ + return (int)ifunit(name); +} + +int +pat_output(pat_id, mlist, dst_addr, type) /* AIX */ + int pat_id; + gbuf_t *mlist; + unsigned char *dst_addr; + int type; +{ + int len; + pat_unit_t *patp; + gbuf_t *m, *m_prev, *new_mlist, *m_temp; + struct ndd *nddp; + short size; + enet_header_t *enet_header; + llc_header_t *llc_header; + + patp = (pat_unit_t *)&pat_units[pat_id]; + if (patp->state != PAT_ONLINE) { + gbuf_freel(mlist); + return ENOTREADY; + } + + if (patp->xtype == IFTYPE_NULLTALK) { + gbuf_freel(mlist); + return 0; + } + + nddp = (void *)patp->nddp; + new_mlist = 0; + + for (m = mlist; m; m = mlist) { + mlist = gbuf_next(m); + gbuf_next(m) = 0; + + gbuf_prepend(m,ENET_LLC_SIZE); + if (m == 0) { + if (mlist) + gbuf_freel(mlist); + if (new_mlist) + gbuf_freel(new_mlist); + return 0; + } + + enet_header = (enet_header_t *)gbuf_rptr(m); + bcopy(dst_addr, enet_header->dst, sizeof(enet_header->dst)); + bcopy(patp->xaddr, enet_header->src, sizeof(enet_header->src)); + size = gbuf_msgsize(m); + enet_header->len = size - sizeof(enet_header_t); + llc_header = (llc_header_t *)(gbuf_rptr(m)+sizeof(enet_header_t)); + *llc_header = (type == AARP_AT_TYPE) ? snap_hdr_aarp : snap_hdr_at; + + m->m_pkthdr.len = size; + m->m_pkthdr.rcvif = 0; + + if (new_mlist) + gbuf_next(m_prev) = m; + else + new_mlist = m; + m_prev = m; + pktsOut++; + } + + if (new_mlist) + (*nddp->ndd_output)(nddp, new_mlist); + + return 0; +} + +int +pat_online (ifName, ifType) /* AIX */ + char *ifName; + char *ifType; +{ + void pat_input(); + int pat_id; + pat_unit_t *patp; + struct ndd *nddp; + char ns_name[8]; + + if ((pat_id = pat_ID(ifName)) == -1) + return (-1); + patp = &pat_units[pat_id]; + + if (patp->xtype == IFTYPE_ETHERTALK) { + ns_name[0] = ifName[0]; + ns_name[1] = 'n'; + strcpy(&ns_name[2], &ifName[1]); + } else if (patp->xtype == IFTYPE_NULLTALK) { + patp->xaddrlen = 6; + bzero(patp->xaddr, patp->xaddrlen); + if (ifType) + *ifType = patp->xtype; + patp->nddp = (void *)0; + patp->state = PAT_ONLINE; + at_statep->flags |= AT_ST_IF_CHANGED; + return (pat_id); + } else + return -1; + + if (ns_alloc(ns_name, &nddp)) + return -1; + + bzero(&elap_user, sizeof(elap_user)); + elap_user.isr = pat_input; + elap_user.pkt_format = NS_HANDLE_HEADERS|NS_INCLUDE_MAC; + + elap_link.filtertype = NS_8022_LLC_DSAP_SNAP; + elap_link.orgcode[0] = 0; + elap_link.orgcode[2] = 0; + elap_link.dsap = DSAP_SNAP; + elap_link.ethertype = 0x80f3; /* AARP SNAP code */ + if (ns_add_filter(nddp, &elap_link, sizeof(elap_link), &elap_user)) + return -1; + + elap_link.orgcode[0] = 0x08; + elap_link.orgcode[2] = 0x07; + elap_link.ethertype = 0x809b; /* DDP SNAP code */ + if (ns_add_filter(nddp, &elap_link, sizeof(elap_link), &elap_user)) { + elap_link.orgcode[0] = 0; + elap_link.orgcode[2] = 0; + elap_link.ethertype = 0x80f3; /* AARP SNAP code */ + (void)ns_del_filter(nddp, &elap_link, sizeof(elap_link)); + return -1; + } + + patp->xaddrlen = nddp->ndd_addrlen; + bcopy(nddp->ndd_physaddr, patp->xaddr, patp->xaddrlen); + + if (ifType) + *ifType = patp->xtype; + + patp->nddp = (void *)nddp; + patp->state = PAT_ONLINE; + at_statep->flags |= AT_ST_IF_CHANGED; + + return (pat_id); +} + +void +pat_offline(pat_id) /* AIX */ + int pat_id; +{ + pat_unit_t *patp = &pat_units[pat_id]; + + if (patp->state == PAT_ONLINE) { + if (patp->xtype != IFTYPE_NULLTALK) { + elap_link.filtertype = NS_8022_LLC_DSAP_SNAP; + elap_link.orgcode[0] = 0; + elap_link.orgcode[2] = 0; + elap_link.dsap = DSAP_SNAP; + elap_link.ethertype = 0x80f3; /* AARP SNAP code */ + (void)ns_del_filter(patp->nddp, &elap_link, sizeof(elap_link)); + elap_link.orgcode[0] = 0x08; + elap_link.orgcode[2] = 0x07; + elap_link.ethertype = 0x809b; /* DDP SNAP code */ + (void)ns_del_filter(patp->nddp, &elap_link, sizeof(elap_link)); + ns_free(patp->nddp); + } + at_statep->flags |= AT_ST_IF_CHANGED; + bzero(patp, sizeof(pat_unit_t)); + } +} + +int +pat_mcast(pat_id, control, data) /* AIX */ + int pat_id; + int control; + unsigned char *data; +{ + struct ndd *nddp; + + nddp = (struct ndd *)pat_units[pat_id].nddp; + return (*nddp->ndd_ctl)(nddp, (control == PAT_REG_MCAST) ? + NDD_ENABLE_ADDRESS : NDD_DISABLE_ADDRESS, + data, nddp->ndd_addrlen); +} + +void +pat_input(nddp, m, unused) /* AIX */ + struct ndd *nddp; + gbuf_t *m; + void *unused; +{ + extern int ddprunning_flag; + llc_header_t *llc_header; + int pat_id; + pat_unit_t *patp; + char src[6]; + enet_header_t *enet_header = (enet_header_t *)gbuf_rptr(m); + + for (pat_id=0, patp = &pat_units[pat_id]; + pat_id < xpatcnt; pat_id++, patp++) { + if ((patp->state == PAT_ONLINE) && (patp->nddp == nddp)) + break; + } + if (pat_id == xpatcnt) { + gbuf_freem(m); + return; + } + + /* Ignore multicast packets from local station */ + if (patp->xtype == IFTYPE_ETHERTALK) { + bcopy((char *)enet_header->src, src, sizeof(src)); + if ((enet_header->dst[0] & 1) && + (bcmp(src, patp->xaddr, sizeof(src)) == 0)) { + gbuf_freem(m); + return; + } + llc_header = (llc_header_t *)(enet_header+1); + } + + gbuf_rinc(m,(ENET_LLC_SIZE)); + (void)fetch_and_add((atomic_p)&ddprunning_flag, 1); + pktsIn++; + if (LLC_PROTO_EQUAL(llc_header->protocol,snap_proto_aarp)) { + patp->aarp_func(gbuf_rptr(m), patp->context); + gbuf_freem(m); + } else if (LLC_PROTO_EQUAL(llc_header->protocol,snap_proto_ddp)) { + /* if we're a router take all pkts */ + if (!ROUTING_MODE) { + if (patp->addr_check(gbuf_rptr(m), patp->context) + == AARP_ERR_NOT_OURS) { + gbuf_freem(m); + (void)fetch_and_add((atomic_p)&ddprunning_flag, -1); + return; + } + } + gbuf_set_type(m, MSG_DATA); + elap_input(m, patp->context, src); + } else + gbuf_freem(m); + (void)fetch_and_add((atomic_p)&ddprunning_flag, -1); +} +#endif /* AIX */ diff --git a/bsd/netat/ddp_aarp.c b/bsd/netat/ddp_aarp.c new file mode 100644 index 000000000..1fbb21e43 --- /dev/null +++ b/bsd/netat/ddp_aarp.c @@ -0,0 +1,984 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1988, 1989, 1997, 1998 Apple Computer, Inc. + * + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +/* at_aarp.c: 2.0, 1.17; 10/4/93; Apple Computer, Inc. */; + +/* This file is at_aarp.c and it contains all the routines used by AARP. This + * is part of the LAP layer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static int probing; +/* Following two variables are used to keep track of how many dynamic addresses + * we have tried out at startup. + */ +int no_of_nodes_tried; /* no of node addresses we've tried + * so far, within a network number + */ +int no_of_nets_tried; /* no. of network numbers tried + */ + +struct etalk_addr et_zeroaddr = { + {0, 0, 0, 0, 0, 0}}; + +aarp_amt_t probe_cb; +aarp_amt_array *aarp_table[IF_TOTAL_MAX]; + +int aarp_init1(), aarp_init2(); +int aarp_send_data(); +int aarp_sched_probe(); + +StaticProc int aarp_req_cmd_in(); +StaticProc int aarp_resp_cmd_in(); +StaticProc int aarp_probe_cmd_in(); +StaticProc int aarp_send_resp(); +StaticProc int aarp_send_req(); +StaticProc int aarp_send_probe(); +StaticProc aarp_amt_t *aarp_lru_entry(); +StaticProc int aarp_glean_info(); +StaticProc int aarp_delete_amt_info(); +StaticProc void aarp_build_pkt(); +StaticProc int aarp_sched_req(); +StaticProc int aarp_get_rand_node(); +StaticProc int aarp_get_next_node(); +StaticProc int aarp_get_rand_net(); +atlock_t arpinp_lock; + +extern void AARPwakeup(aarp_amt_t *); +extern int pat_output(at_ifaddr_t *, gbuf_t *, unsigned char *, int); + +/**************************************************************************** + * aarp_init() + * + ****************************************************************************/ + +int aarp_init1(elapp) + register at_ifaddr_t *elapp; +{ + elapp->ifThisNode.s_net = 0; + elapp->ifThisNode.s_node = 0; + + if (probing != PROBE_TENTATIVE) /* How do I set the initial probe */ + probing = PROBE_IDLE; /* state ???*/ + else { + dPrintf(D_M_AARP,D_L_ERROR, + ("aarp_init: error :probing == PROBE_TENTATIVE\n")); + return(-1); + } + + /* pick a random addr or start with what we have from initial_node addr */ + if (elapp->initial_addr.s_net == 0 && elapp->initial_addr.s_node == 0) { + dPrintf(D_M_AARP, D_L_INFO, + ("aarp_init: pick up a new node number\n")); + aarp_get_rand_node(elapp); + aarp_get_rand_net(elapp); + } + probe_cb.elapp = elapp; + probe_cb.no_of_retries = 0; + probe_cb.error = 0; + + no_of_nodes_tried = 0; /* haven't tried any addresses yet */ + no_of_nets_tried = 0; + + if (aarp_send_probe() == -1) { + probing = PROBE_IDLE; /* not probing any more */ + dPrintf(D_M_AARP, D_L_ERROR, + ("aarp_init: aarp_send_probe returns error\n")); + return(-1); + } + return(ENOTREADY); +} + +int aarp_init2(elapp) + register at_ifaddr_t *elapp; +{ + if (probe_cb.error != 0) { + probing = PROBE_IDLE; /* not probing any more */ + dPrintf(D_M_AARP, D_L_ERROR, + ("aarp_init: probe_cb.error creates error =%d\n", + probe_cb.error)); + return(-1); + } + + if (aarp_table[elapp->ifPort]) + bzero ((caddr_t)&aarp_table[elapp->ifPort]->et_aarp_amt[0], + sizeof(aarp_amt_array)); + else + return(-1); + + elapp->ifThisNode = elapp->initial_addr; + probing = PROBE_DONE; + + return(0); +} + +/**************************************************************************** + * aarp_rcv_pkt() + * + * remarks : + * (1) The caller must take care of freeing the real storage (gbuf) + * (2) The incoming packet is of the form {802.3, 802.2, aarp}. + * + ****************************************************************************/ +int aarp_rcv_pkt(pkt, elapp) + aarp_pkt_t *pkt; + at_ifaddr_t *elapp; +{ + switch (pkt->aarp_cmd) { + case AARP_REQ_CMD: + return (aarp_req_cmd_in (pkt, elapp)); + case AARP_RESP_CMD: + return (aarp_resp_cmd_in (pkt, elapp)); + case AARP_PROBE_CMD: + return (aarp_probe_cmd_in (pkt, elapp)); + default: + return (-1); + }/* end of switch*/ +} + +/**************************************************************************** + * aarp_req_cmd_in() + * + ****************************************************************************/ +StaticProc int aarp_req_cmd_in (pkt, elapp) +aarp_pkt_t *pkt; +at_ifaddr_t *elapp; +{ +/* + kprintf("aarp_req_cmd_in: ifThisNode=%d:%d srcNode=%d:%d dstNode=%d:%d\n", + elapp->ifThisNode.s_net, + elapp->ifThisNode.s_node, + NET_VALUE(pkt->src_at_addr.atalk_net), + pkt->src_at_addr.atalk_node, + NET_VALUE(pkt->dest_at_addr.atalk_net), + pkt->dest_at_addr.atalk_node); +*/ + if ((probing == PROBE_DONE) && + (NET_VALUE(pkt->dest_at_addr.atalk_net) == elapp->ifThisNode.s_net) && + (pkt->dest_at_addr.atalk_node == elapp->ifThisNode.s_node)) { + if (aarp_send_resp(elapp, pkt) == -1) + return(-1); + } + /* now to glean some info */ + aarp_glean_info(pkt, elapp); + return (0); +} + + + +/**************************************************************************** + * aarp_resp_cmd_in() + * + ****************************************************************************/ +StaticProc int aarp_resp_cmd_in (pkt, elapp) + aarp_pkt_t *pkt; + at_ifaddr_t *elapp; +{ + register aarp_amt_t *amt_ptr; + gbuf_t *m; + + switch (probing) { + case PROBE_TENTATIVE : + if ((NET_VALUE(pkt->src_at_addr.atalk_net) == + probe_cb.elapp->initial_addr.s_net) && + (pkt->src_at_addr.atalk_node == + probe_cb.elapp->initial_addr.s_node)) { + + /* this is a response to AARP_PROBE_CMD. There's + * someone out there with the address we desire + * for ourselves. + */ + untimeout(aarp_sched_probe, 0); + probe_cb.no_of_retries = 0; + aarp_get_next_node(probe_cb.elapp); + no_of_nodes_tried++; + + if (no_of_nodes_tried == AARP_MAX_NODES_TRIED) { + aarp_get_rand_net(probe_cb.elapp); + aarp_get_rand_node(probe_cb.elapp); + no_of_nodes_tried = 0; + no_of_nets_tried++; + } + if (no_of_nets_tried == AARP_MAX_NETS_TRIED) { + /* We have tried enough nodes and nets, give up. + */ + probe_cb.error = EADDRNOTAVAIL; + AARPwakeup(&probe_cb); + return(0); + } + if (aarp_send_probe() == -1) { + /* expecting aarp_send_probe to fill in + * probe_cb.error + */ + AARPwakeup(&probe_cb); + return(-1); + } + } else { + /* hmmmm! got a response packet while still probing + * for AT address and the AT dest address doesn't + * match!! + * What should I do here?? kkkkkkkkk + */ + return(-1); + } + break; + + case PROBE_DONE : + AMT_LOOK(amt_ptr, pkt->src_at_addr, elapp); + if (amt_ptr == NULL) + return(-1); + if (amt_ptr->tmo) { + untimeout(aarp_sched_req, amt_ptr); + amt_ptr->tmo = 0; + } + + if (amt_ptr->m == NULL) { + /* this may be because of a belated response to + * aarp reaquest. Based on an earlier response, we + * might have already sent the packet out, so + * there's nothing to send now. This is okay, no + * error. + */ + return(0); + } + amt_ptr->dest_addr = pkt->src_addr; + if (FDDI_OR_TOKENRING(elapp->aa_ifp->if_type)) + ddp_bit_reverse(&amt_ptr->dest_addr); + m = amt_ptr->m; + amt_ptr->m = NULL; + pat_output(amt_ptr->elapp, m, + (unsigned char *)&amt_ptr->dest_addr, 0); + break; + default : + /* probing in a weird state?? */ + return(-1); + } + return(0); +} + + + +/**************************************************************************** + * aarp_probe_cmd_in() + * + ****************************************************************************/ +StaticProc int aarp_probe_cmd_in (pkt, elapp) +register aarp_pkt_t *pkt; +at_ifaddr_t *elapp; +{ + register aarp_amt_t *amt_ptr; + + switch (probing) { + case PROBE_TENTATIVE : + if ((elapp == probe_cb.elapp) && + (NET_VALUE(pkt->src_at_addr.atalk_net) == + probe_cb.elapp->initial_addr.s_net) && + (pkt->src_at_addr.atalk_node == + probe_cb.elapp->initial_addr.s_node)) { + /* some bozo is probing for address I want... and I + * can't tell him to shove off! + */ + untimeout(aarp_sched_probe, 0); + probe_cb.no_of_retries = 0; + aarp_get_next_node(probe_cb.elapp); + no_of_nodes_tried++; + + if (no_of_nodes_tried == AARP_MAX_NODES_TRIED) { + aarp_get_rand_net(probe_cb.elapp); + aarp_get_rand_node(probe_cb.elapp); + no_of_nodes_tried = 0; + no_of_nets_tried++; + } + if (no_of_nets_tried == AARP_MAX_NETS_TRIED) { + /* We have tried enough nodes and nets, give up. + */ + probe_cb.error = EADDRNOTAVAIL; + AARPwakeup(&probe_cb); + return(0); + } + if (aarp_send_probe() == -1) { + /* expecting aarp_send_probe to fill in + * probe_cb.error + */ + AARPwakeup(&probe_cb); + return(-1); + } + } else { + /* somebody's probing... none of my business yet, so + * just ignore the packet + */ + return (0); + } + break; + + case PROBE_DONE : + if ((NET_VALUE(pkt->src_at_addr.atalk_net) == elapp->ifThisNode.s_net) && + (pkt->src_at_addr.atalk_node == elapp->ifThisNode.s_node)) { + if (aarp_send_resp(elapp, pkt) == -1) + return (-1); + return (0); + } + AMT_LOOK(amt_ptr, pkt->src_at_addr, elapp); + + if (amt_ptr) + aarp_delete_amt_info(amt_ptr); + break; + default : + /* probing in a weird state?? */ + return (-1); + } + return (0); +} + + + +/**************************************************************************** + * aarp_chk_addr() + ****************************************************************************/ +int aarp_chk_addr(ddp_hdrp, elapp) + at_ddp_t *ddp_hdrp; + at_ifaddr_t *elapp; +{ + if ((ddp_hdrp->dst_node == elapp->ifThisNode.s_node) && + (NET_VALUE(ddp_hdrp->dst_net) == elapp->ifThisNode.s_net)) { + return(0); /* exact match in address */ + } + + if (AARP_BROADCAST(ddp_hdrp, elapp)) { + return(0); /* some kind of broadcast address */ + } + return (AARP_ERR_NOT_OURS); /* not for us */ +} + + + +/**************************************************************************** + * aarp_send_data() + * + * remarks : + * 1. The message coming in would be of the form {802.3, 802.2, ddp,...} + * + * 2. The message coming in would be freed here if transmission goes + * through okay. If an error is returned by aarp_send_data, the caller + * can assume that the message is not freed. The exception to + * this scenario is the prepended atalk_addr field. This field + * will ALWAYS be removed. If the message is dropped, + * it's not an "error". + * + ****************************************************************************/ + +int aarp_send_data(m, elapp, dest_at_addr, loop) + register gbuf_t *m; + register at_ifaddr_t *elapp; + struct atalk_addr *dest_at_addr; + int loop; /* if true, loopback broadcasts */ +{ + register aarp_amt_t *amt_ptr; + register at_ddp_t *ddp_hdrp; + int error; + int s; + + if (gbuf_len(m) <= 0) + ddp_hdrp = (at_ddp_t *)gbuf_rptr(gbuf_cont(m)); + else + ddp_hdrp = (at_ddp_t *)gbuf_rptr(m); + + if ((ddp_hdrp->dst_node == ddp_hdrp->src_node) && + (NET_VALUE(ddp_hdrp->dst_net) == NET_VALUE(ddp_hdrp->src_net))) { + /* + * we're sending to ourselves + * so loop it back upstream + */ + ddp_input(m, elapp); + return(0); + } + ATDISABLE(s, arpinp_lock); + AMT_LOOK(amt_ptr, *dest_at_addr, elapp); + + + if (amt_ptr) { + if (amt_ptr->m) { + /* + * there's already a packet awaiting transmission, so + * drop this one and let the upper layer retransmit + * later. + */ + ATENABLE(s, arpinp_lock); + gbuf_freel(m); + return (0); + } + ATENABLE(s, arpinp_lock); + return (pat_output(elapp, m, + (unsigned char *)&amt_ptr->dest_addr, 0)); + } + /* + * either this is a packet to be broadcasted, or the address + * resolution needs to be done + */ + if (AARP_BROADCAST(ddp_hdrp, elapp)) { + gbuf_t *newm = 0; + struct etalk_addr *dest_addr; + + ATENABLE(s, arpinp_lock); + dest_addr = &elapp->cable_multicast_addr; + if (loop) + newm = (gbuf_t *)gbuf_dupm(m); + + if ( !(error = pat_output(elapp, m, + (unsigned char *)dest_addr, 0))) { + /* + * The message transmitted successfully; + * Also loop a copy back up since this + * is a broadcast message. + */ + if (loop) { + if (newm == NULL) + return (error); + ddp_input(newm, elapp); + } /* endif loop */ + } else { + if (newm) + gbuf_freem(newm); + } + return (error); + } + NEW_AMT(amt_ptr, *dest_at_addr,elapp); + + if (amt_ptr->m) { + /* + * no non-busy slots available in the cache, so + * drop this one and let the upper layer retransmit + * later. + */ + ATENABLE(s, arpinp_lock); + gbuf_freel(m); + return (0); + } + amt_ptr->dest_at_addr = *dest_at_addr; + amt_ptr->dest_at_addr.atalk_unused = 0; + + amt_ptr->last_time = time.tv_sec; + amt_ptr->m = m; + amt_ptr->elapp = elapp; + amt_ptr->no_of_retries = 0; + ATENABLE(s, arpinp_lock); + + if ((error = aarp_send_req(amt_ptr))) { + aarp_delete_amt_info(amt_ptr); + return(error); + } + return(0); +} + + + +/**************************************************************************** + * aarp_send_resp() + * + * remarks : + * The pkt being passed here is only to "look at". It should neither + * be used for transmission, nor freed. Its contents also must not be + * altered. + * + ****************************************************************************/ +StaticProc int aarp_send_resp(elapp, pkt) + register at_ifaddr_t *elapp; + aarp_pkt_t *pkt; +{ + register aarp_pkt_t *new_pkt; + register gbuf_t *m; + + if ((m = gbuf_alloc(AT_WR_OFFSET+sizeof(aarp_pkt_t), PRI_MED)) == NULL) { + return (-1); + } + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,0); + + new_pkt = (aarp_pkt_t *)gbuf_rptr(m); + aarp_build_pkt(new_pkt, elapp); + + new_pkt->aarp_cmd = AARP_RESP_CMD; + new_pkt->dest_addr = pkt->src_addr; + + new_pkt->dest_at_addr = pkt->src_at_addr; + new_pkt->dest_at_addr.atalk_unused = 0; + + ATALK_ASSIGN(new_pkt->src_at_addr, elapp->ifThisNode.s_net, + elapp->ifThisNode.s_node, 0); + + gbuf_winc(m,sizeof(aarp_pkt_t)); + if (FDDI_OR_TOKENRING(elapp->aa_ifp->if_type)) + ddp_bit_reverse(&new_pkt->dest_addr); + + if (pat_output(elapp, m, (unsigned char *)&new_pkt->dest_addr, + AARP_AT_TYPE)) + return(-1); + return(0); +} + + + +/**************************************************************************** + * aarp_send_req() + * + ****************************************************************************/ + +StaticProc int aarp_send_req (amt_ptr) +register aarp_amt_t *amt_ptr; +{ + register aarp_pkt_t *pkt; + register gbuf_t *m; + int error; + + if ((m = gbuf_alloc(AT_WR_OFFSET+sizeof(aarp_pkt_t), PRI_MED)) == NULL) { + return (ENOBUFS); + } + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,0); + + pkt = (aarp_pkt_t *)gbuf_rptr(m); + aarp_build_pkt(pkt, amt_ptr->elapp); + + pkt->aarp_cmd = AARP_REQ_CMD; + pkt->dest_addr = et_zeroaddr; + pkt->dest_at_addr = amt_ptr->dest_at_addr; + pkt->dest_at_addr.atalk_unused = 0; + ATALK_ASSIGN(pkt->src_at_addr, amt_ptr->elapp->ifThisNode.s_net, + amt_ptr->elapp->ifThisNode.s_node, 0); + gbuf_winc(m,sizeof(aarp_pkt_t)); + + amt_ptr->no_of_retries++; + timeout(aarp_sched_req, amt_ptr, AARP_REQ_TIMER_INT); + amt_ptr->tmo = 1; + error = pat_output(amt_ptr->elapp, m, + (unsigned char *)&amt_ptr->elapp->cable_multicast_addr, AARP_AT_TYPE); + if (error) + { + untimeout(aarp_sched_req, amt_ptr); + amt_ptr->tmo = 0; + return(error); + } + + return(0); +} + + + +/**************************************************************************** + * aarp_send_probe() + * + ****************************************************************************/ +StaticProc int aarp_send_probe() +{ + register aarp_pkt_t *pkt; + register gbuf_t *m; + + if ((m = gbuf_alloc(AT_WR_OFFSET+sizeof(aarp_pkt_t), PRI_MED)) == NULL) { + probe_cb.error = ENOBUFS; + return (-1); + } + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,0); + pkt = (aarp_pkt_t *)gbuf_rptr(m); + aarp_build_pkt(pkt, probe_cb.elapp); + + pkt->aarp_cmd = AARP_PROBE_CMD; + pkt->dest_addr = et_zeroaddr; + + ATALK_ASSIGN(pkt->src_at_addr, probe_cb.elapp->initial_addr.s_net, + probe_cb.elapp->initial_addr.s_node, 0); + + ATALK_ASSIGN(pkt->dest_at_addr, probe_cb.elapp->initial_addr.s_net, + probe_cb.elapp->initial_addr.s_node, 0); + + gbuf_winc(m,sizeof(aarp_pkt_t)); + + probe_cb.error = pat_output(probe_cb.elapp, m, + (unsigned char *)&probe_cb.elapp->cable_multicast_addr, AARP_AT_TYPE); + if (probe_cb.error) { + return(-1); + } + + probing = PROBE_TENTATIVE; + probe_cb.no_of_retries++; + timeout(aarp_sched_probe, 0, AARP_PROBE_TIMER_INT); + + return(0); +} + + + +/**************************************************************************** + * aarp_lru_entry() + * + ****************************************************************************/ + +StaticProc aarp_amt_t *aarp_lru_entry(at) +register aarp_amt_t *at; +{ + register aarp_amt_t *at_ret; + register int i; + + at_ret = at; + + for (i = 1, at++; i < AMT_BSIZ; i++, at++) { + if (at->last_time < at_ret->last_time && (at->m == NULL)) + at_ret = at; + } + return(at_ret); +} + + + +/**************************************************************************** + * aarp_glean_info() + * + ****************************************************************************/ + +StaticProc int aarp_glean_info(pkt, elapp) +register aarp_pkt_t *pkt; +at_ifaddr_t *elapp; +{ + register aarp_amt_t *amt_ptr; + int s; + + ATDISABLE(s, arpinp_lock); + AMT_LOOK(amt_ptr, pkt->src_at_addr, elapp); + + if (amt_ptr == NULL) { + /* + * amt entry for this address doesn't exist, add it to the cache + */ + NEW_AMT(amt_ptr, pkt->src_at_addr,elapp); + + if (amt_ptr->m) + { + ATENABLE(s, arpinp_lock); + return(0); /* no non-busy slots available in the cache */ + } + amt_ptr->dest_at_addr = pkt->src_at_addr; + amt_ptr->dest_at_addr.atalk_unused = 0; + + amt_ptr->last_time = (int)random(); + } + /* + * update the ethernet address + * in either case + */ + amt_ptr->dest_addr = pkt->src_addr; + if (FDDI_OR_TOKENRING(elapp->aa_ifp->if_type)) + ddp_bit_reverse(&amt_ptr->dest_addr); + ATENABLE(s, arpinp_lock); + return(1); +} + + +/**************************************************************************** + * aarp_delete_amt_info() + * + ****************************************************************************/ + +StaticProc int aarp_delete_amt_info(amt_ptr) +register aarp_amt_t *amt_ptr; +{ + register s; + register gbuf_t *m; + ATDISABLE(s, arpinp_lock); + amt_ptr->last_time = 0; + ATALK_ASSIGN(amt_ptr->dest_at_addr, 0, 0, 0); + amt_ptr->no_of_retries = 0; + + if (amt_ptr->m) { + m = amt_ptr->m; + amt_ptr->m = NULL; + ATENABLE(s, arpinp_lock); + gbuf_freel(m); + } + else + ATENABLE(s, arpinp_lock); + return(0); +} + + + +/**************************************************************************** + * aarp_sched_probe() + * + ****************************************************************************/ + +int aarp_sched_probe() +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + if (probe_cb.no_of_retries != AARP_MAX_PROBE_RETRIES) { + if (aarp_send_probe() == -1) + AARPwakeup(&probe_cb); + } else { + probe_cb.error = 0; + AARPwakeup(&probe_cb); + } + + (void) thread_funnel_set(network_flock, FALSE); + return(0); +} + + + +/**************************************************************************** + * aarp_build_pkt() + * + ****************************************************************************/ + +StaticProc void aarp_build_pkt(pkt, elapp) + register aarp_pkt_t *pkt; + at_ifaddr_t *elapp; +{ + pkt->hardware_type = AARP_ETHER_HW_TYPE; + pkt->stack_type = AARP_AT_PROTO; + pkt->hw_addr_len = ETHERNET_ADDR_LEN; + pkt->stack_addr_len = AARP_AT_ADDR_LEN; + bcopy(elapp->xaddr, pkt->src_addr.etalk_addr_octet, sizeof(elapp->xaddr)); + if (FDDI_OR_TOKENRING(elapp->aa_ifp->if_type)) + ddp_bit_reverse(pkt->src_addr.etalk_addr_octet); +} + +/**************************************************************************** + * aarp_sched_req() + * + ****************************************************************************/ + +StaticProc int aarp_sched_req(amt_ptr) + register aarp_amt_t *amt_ptr; +{ + int s; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + ATDISABLE(s, arpinp_lock); + if (amt_ptr->tmo == 0) + { + ATENABLE(s, arpinp_lock); + (void) thread_funnel_set(network_flock, FALSE); + return(0); + } + if (amt_ptr->no_of_retries < AARP_MAX_REQ_RETRIES) { + ATENABLE(s, arpinp_lock); + if (aarp_send_req(amt_ptr) == 0) { + (void) thread_funnel_set(network_flock, FALSE); + return(0); + } + ATDISABLE(s, arpinp_lock); + } + ATENABLE(s, arpinp_lock); + aarp_delete_amt_info(amt_ptr); + + (void) thread_funnel_set(network_flock, FALSE); + + return(0); +} + + + +/**************************************************************************** + * aarp_get_rand_node() + * + ****************************************************************************/ +StaticProc int aarp_get_rand_node(elapp) +at_ifaddr_t *elapp; +{ + register u_char node; + + /* + * generate a starting node number in the range 1 thru 0xfd. + * we use this as the starting probe point for a given net + * To generate a different node number each time we call + * aarp_get_next_node + */ + node = ((u_char)(random() & 0xff)) % 0xfd + 2; + + elapp->initial_addr.s_node = node; + return(0); +} + + + +StaticProc int aarp_get_next_node(elapp) +at_ifaddr_t *elapp; +{ + register u_char node = elapp->initial_addr.s_node; + + /* + * return the next node number in the range 1 thru 0xfd. + */ + node = (node == 0xfd) ? (1) : (node+1); + + elapp->initial_addr.s_node = node; + return(0); +} + + + + + +/**************************************************************************** + * aarp_get_rand_net() + * + ****************************************************************************/ +StaticProc int aarp_get_rand_net(elapp) +register at_ifaddr_t *elapp; +{ + register at_net_al last_net, new_net; + + if (elapp->ifThisCableStart) { + last_net = elapp->initial_addr.s_net; + /* + * the range of network numbers valid for this + * cable is known. Try to choose a number from + * this range only. + */ + new_net= ((at_net_al)random() & 0xffff); + /* two-byte random number generated... now fit it in + * the prescribed range + */ + new_net = new_net % (unsigned) (elapp->ifThisCableEnd - + elapp->ifThisCableStart + 1) + + elapp->ifThisCableStart; + + if (new_net == last_net) { + if (new_net == elapp->ifThisCableEnd) + new_net = elapp->ifThisCableStart; + else + new_net++; + } + elapp->initial_addr.s_net = new_net; + } else { + /* The range of valid network numbers for this cable + * is not known... choose a network number from + * startup range. + */ + last_net = (elapp->initial_addr.s_net & 0x00ff); + new_net = (at_net_al)random() & 0x00ff; + + if (new_net == last_net) + new_net++; + if (new_net == 0xff) + new_net = 0; + elapp->initial_addr.s_net = (DDP_STARTUP_LOW | new_net); + } + return(0); +} + + +int getAarpTableSize(elapId) + int elapId; /* elap_specifics array index (should be + * changed when we add a non-ethernet type + * of I/F to the mix. Unused for now. + */ +{ + return(AMTSIZE); +} + +int getPhysAddrSize(elapId) + int elapId; /* elap_specifics array index (should be + * changed when we add a non-ethernet type + * of I/F to the mix. Unused for now. + */ +{ + return(ETHERNET_ADDR_LEN); +} + +#define ENTRY_SIZE sizeof(struct atalk_addr) + sizeof(struct etalk_addr) + +snmpAarpEnt_t *getAarp(elapId) + int *elapId; /* I/F table to retrieve & table + size entries on return */ + +/* gets aarp table for specified interface and builds + a table in SNMP expected format. Returns pointer to said + table and sets elapId to byte size of used portion of table +*/ +{ + int i, cnt=0; + aarp_amt_t *amtp; + static snmpAarpEnt_t snmp[AMTSIZE]; + snmpAarpEnt_t *snmpp; + + + if (*elapId <0 || *elapId >= IF_TOTAL_MAX) + return NULL; + + + for (i=0, amtp = &(aarp_table[*elapId]->et_aarp_amt[0]), snmpp = snmp; + i < AMTSIZE; i++,amtp++) { + + /* last_time will be 0 if entry was never used */ + if (amtp->last_time) { + /* copy just network & mac address. + * For speed, we assume that the atalk_addr + * & etalk_addr positions in the aarp_amt_t struct + * has not changed and copy both at once + */ + bcopy(&amtp->dest_at_addr, &snmpp->ap_ddpAddr, ENTRY_SIZE); + snmpp++; + cnt++; + + } + } + *elapId = cnt; + return(snmp); +} +/*#endif *//* COMMENTED_OUT */ + diff --git a/bsd/netat/ddp_aep.c b/bsd/netat/ddp_aep.c new file mode 100644 index 000000000..77e78451e --- /dev/null +++ b/bsd/netat/ddp_aep.c @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +extern snmpStats_t snmpStats; + +/****************************************************************/ +/* */ +/* */ +/* Echo Protocol */ +/* */ +/* */ +/****************************************************************/ + +void ep_input (mp, ifID) + gbuf_t *mp; + register at_ifaddr_t *ifID; +{ + register at_ddp_t *ddp; + + snmpStats.ec_echoReq++; + ddp = (at_ddp_t *)gbuf_rptr(mp); + + /* ep packets that have a source broadcast can cause + * possible broadcast storms, prevent that here + */ + if ( NET_VALUE(ddp->src_net) == 0 || ddp->src_node == 255) { + gbuf_freem(mp); + return; + } + + /* + * Check if this AEP message is for us or need to be forwarded + */ + if (!ROUTING_MODE || + (ifID->ifThisNode.s_net == NET_VALUE(ddp->dst_net)) + && (ifID->ifThisNode.s_node == ddp->dst_node)) { + + dPrintf(D_M_AEP, D_L_INFO, ("aep_input: received for this port from %d:%d\n", + NET_VALUE(ddp->src_net), ddp->src_node)); + + if (ddp->type == DDP_ECHO && + ddp->data[0] == EP_REQUEST) { + ddp->data[0] = EP_REPLY; + NET_NET(ddp->dst_net, ddp->src_net); + ddp->dst_node = ddp->src_node; + ddp->dst_socket = ddp->src_socket; + /* send the packet out.... */ + snmpStats.ec_echoReply++; + (void)ddp_output(&mp, (at_socket)EP_SOCKET, FALSE); + } else + gbuf_freem(mp); + } + else { + dPrintf(D_M_AEP, D_L_INFO, + ("aep_input: calling routing needed from %d:%d to %d:%d\n", + NET_VALUE(ddp->src_net), ddp->src_node, NET_VALUE(ddp->dst_net), + ddp->dst_node)); + routing_needed(mp, ifID, TRUE); + } + + return; +} diff --git a/bsd/netat/ddp_brt.c b/bsd/netat/ddp_brt.c new file mode 100644 index 000000000..22f0d103d --- /dev/null +++ b/bsd/netat/ddp_brt.c @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + * + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#ifndef lint +/* static char sccsid[] = "@(#)ddp_brt.c: 2.0, 1.7; 10/4/93; Copyright 1988-89, Apple Computer, Inc."; */ +#endif /* lint */ + +/* + * Title: ddp_brt.c + * + * Facility: Best Router Caching. + * + * Author: Kumar Vora, Creation Date: June-15-1989 + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +/* Best Router Cache */ +ddp_brt_t at_ddp_brt[BRTSIZE]; +int ddp_brt_sweep_timer; + +void ddp_glean(mp, ifID, src_addr) + register gbuf_t *mp; + register at_ifaddr_t *ifID; + struct etalk_addr *src_addr; +{ + register at_net_al src_net; + + /* NOT assuming that the incoming packet is in one contiguous + * buffer. + */ + + { + /* The interface is ethertalk, so the message is + * of the form {802.3, 802.2, ddp.... }. Extract the + * 802.3 source address if necessary. Assuming, + * however, that 802.3 and 802.2 headers are in + * one contiguous piece. + */ + { register at_ddp_t *dgp; + + dgp = (at_ddp_t *)(gbuf_rptr(mp)); + src_net = NET_VALUE(dgp->src_net); + } + if (src_net >= ifID->ifThisCableStart && src_net <= ifID->ifThisCableEnd) + /* the packet has come from a net on this cable, + * no need to glean router info. + */ + return; + + if (src_addr != NULL) + { register ddp_brt_t *brt; + + BRT_LOOK (brt, src_net); + if (brt == NULL) { + /* There's no BRT entry corresponding to this + * net. Allocate a new entry. + */ + NEW_BRT(brt, src_net); + if (brt == NULL) + /* No space available in the BRT; + * can't glean info. + */ + return; + brt->net = src_net; + } + /* + * update the router info in either case + */ + brt->et_addr = *src_addr; + brt->age_flag = BRT_VALID; + brt->ifID = ifID; + } + } +} + +void ddp_brt_init() +{ + bzero(at_ddp_brt, sizeof(at_ddp_brt)); + ddp_brt_sweep_timer = 1; +#ifdef NOT_USED + timeout(ddp_brt_sweep_funnel, (long)0, BRT_SWEEP_INT * SYS_HZ); +#endif +} + +void ddp_brt_shutdown() +{ +#ifdef NOT_USED + bzero(at_ddp_brt, sizeof(at_ddp_brt)); + if (ddp_brt_sweep_timer) + untimeout(ddp_brt_sweep_funnel, 0); +#endif + ddp_brt_sweep_timer = 0; +} + +/* funneled version */ +void ddp_brt_sweep_funnel() +{ + thread_funnel_set(network_flock, TRUE); + ddp_brt_sweep(); + thread_funnel_set(network_flock, FALSE); +} + +void ddp_brt_sweep() +{ + register ddp_brt_t *brt; + register int i; + + if (ddp_brt_sweep_timer) + if (++ddp_brt_sweep_timer > BRT_SWEEP_INT) { + ddp_brt_sweep_timer = 1; + + brt = at_ddp_brt; + for (i = 0; i < BRTSIZE; i++, brt++) { + switch (brt->age_flag) { + case BRT_EMPTY : + break; + case BRT_VALID : + brt->age_flag = BRT_GETTING_OLD; + break; + case BRT_GETTING_OLD : + bzero(brt, sizeof(ddp_brt_t)); + break; + default : + ATTRACE(AT_MID_DDP,AT_SID_RESOURCE, AT_LV_ERROR, FALSE, + "ddp_brt_sweep : corrupt age flag %d", + brt->age_flag, 0,0); + break; + } + } + } +#ifdef NOT_USED + /* set up the next sweep... */ + timeout(ddp_brt_sweep_funnel, (long)0, BRT_SWEEP_INT * SYS_HZ); +#endif + +} + + diff --git a/bsd/netat/ddp_lap.c b/bsd/netat/ddp_lap.c new file mode 100644 index 000000000..626d22f09 --- /dev/null +++ b/bsd/netat/ddp_lap.c @@ -0,0 +1,1695 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989, 1993-1998 Apple Computer, Inc. + */ + +/* at_elap.c: 2.0, 1.29; 10/4/93; Apple Computer, Inc. */ + +/* This is the file which implements all the streams driver + * functionality required for EtherTalk. + */ + +/* revision history + + 03-14-94 jjs Changed all functions which assumed only one port would + ever be used. Added validate_msg_size, changed elap_online + to work with the h/w name only (e.g. 'et2'). + + Modified for MP, 1996 by Tuyen Nguyen + Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + +*/ + +#define RESOLVE_DBG /* for debug.h global resolution */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include /* rtmp+zip table structs */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* globals */ + +at_ifaddr_t at_interfaces[IF_TOTAL_MAX]; + /* index for at_interfaces is not important */ +at_ifaddr_t *ifID_table[IF_TOTAL_MAX]; + /* the table of ifID structures, one per interface + (not just ethernet), + * NOTE: for MH, entry 0 in this table is + * now defined to be the default I/F + */ +at_ifaddr_t *ifID_home; + /* always ifID_table[IFID_HOME] for now, but will be used for + dynamic "home port" assignment, later */ + +at_state_t at_state; /* global state of AT network */ +snmpFlags_t snmpFlags; + +int xpatcnt = 0; + +/* snmp defines */ +#define MAX_BUFSIZE 8192 +#define MAX_RTMP (MAX_BUFSIZE/sizeof(RT_entry)-1) +#define MAX_NBP \ + ((MAX_BUFSIZE - SNMP_NBP_HEADER_SIZE)/sizeof(snmpNbpEntry_t)-1) +#define MAX_NBP_BYTES (MAX_NBP * sizeof(snmpNbpEntry_t)) +#define MAX_ZIP (MAX_BUFSIZE/sizeof(ZT_entry)-1) +#define MAX_RTMP_BYTES (MAX_RTMP * sizeof(RT_entry)) +#define MAX_ZIP_BYTES (MAX_ZIP * sizeof(ZT_entry)) + +/* externs */ +extern TAILQ_HEAD(name_registry, _nve_) name_registry; +extern snmpStats_t snmpStats; +extern atlock_t ddpinp_lock; +extern atlock_t arpinp_lock; +extern short appletalk_inited; +extern int adspInited; +extern struct atpcb ddp_head; +extern gref_t *atp_inputQ[]; +extern struct atp_state *atp_used_list; +extern asp_scb_t *asp_scbQ[]; +extern asp_scb_t *scb_used_list; +extern CCB *adsp_inputQ[]; +extern CCB *ccb_used_list; +extern at_ddp_stats_t at_ddp_stats; + +/* protos */ +extern snmpAarpEnt_t * getAarp(int *); +extern void nbp_shutdown(), routershutdown(), ddp_brt_shutdown(); +extern void ddp_brt_init(), rtmp_init(), rtmp_input(); +extern rtmp_router_start(at_kern_err_t *); +static void getIfNames(at_ifnames_t *); +static void add_route(); +static int set_zones(); +void elap_offline(); +static int elap_online1(), re_aarp(); +int at_reg_mcast(), at_unreg_mcast(); +void AARPwakeup(), ZIPwakeup(); +static void elap_hangup(); +static getSnmpCfg(); + +at_ifaddr_t *find_ifID(if_name) + char *if_name; +{ + int pat_id; + + if (strlen(if_name)) + for (pat_id=0; pat_id < xpatcnt; pat_id++) { + if (!strcmp(at_interfaces[pat_id].ifName, if_name)) + return(&at_interfaces[pat_id]); + } + + return((at_ifaddr_t *)NULL); +} + +static int validate_msg_size(m, gref, elapp) + register gbuf_t *m; + gref_t *gref; + at_ifaddr_t **elapp; + +/* checks ioctl message type for minimum expected message size & + sends error back if size invalid +*/ +{ + register ioc_t *iocbp; + register at_if_cfg_t *cfgp; + int i = 0, size = 1; + + *elapp = NULL; + iocbp = (ioc_t *) gbuf_rptr(m); + + dPrintf(D_M_ELAP, D_L_INFO, ("validate_msg_size: ioc_cmd = %d\n", + iocbp->ioc_cmd)); + switch (iocbp->ioc_cmd) { + case LAP_IOC_ADD_ROUTE: + size = sizeof(RT_entry); + break; + case LAP_IOC_GET_ROUTE: + size = sizeof(RT_entry); + break; + case LAP_IOC_GET_ZONE: + size = sizeof(ZT_entryno); + break; + case LAP_IOC_SNMP_GET_CFG: + case LAP_IOC_SNMP_GET_AARP: + case LAP_IOC_SNMP_GET_ZIP: + case LAP_IOC_SNMP_GET_RTMP: + case LAP_IOC_SNMP_GET_NBP: + size = sizeof(int); + break; + + case ELAP_IOC_GET_STATS: + case LAP_IOC_SNMP_GET_DDP: + size = 0; + break; + + default: + dPrintf(D_M_ELAP, D_L_ERROR, ("validate_msg_size: unknown ioctl\n")); + goto error; + } + + if (size == 0) { /* a non-data ioctl */ + return(0); + } + + if (gbuf_cont(m) != NULL) + i = gbuf_len(gbuf_cont(m)); + if (iocbp->ioc_count < size || (gbuf_cont(m) == NULL) || i < size) { + dPrintf(D_M_ELAP, D_L_ERROR, + ("ioctl msg error:s:%d c:%d bcont:%c delta:%d\n", + size, iocbp->ioc_count, + gbuf_cont(m)? 'Y' : 'N', i)); + goto error; + } + else + return(0); +error: + ioc_ack(EMSGSIZE, m, gref); + return (EMSGSIZE); +} /* validate_msg_size */ + +int lap_online(elapp, cfgp) + at_ifaddr_t *elapp; + at_if_cfg_t *cfgp; +{ + int error; + + if (elapp->ifState != LAP_OFFLINE) { + return(EALREADY); + } + + elapp->flags = 0; + if (cfgp->flags & ELAP_CFG_HOME) { + if (ifID_home) { + /* only 1 home allowed! */ + return(EEXIST); + } + dPrintf(D_M_ELAP, D_L_STARTUP, + ("elap_wput home I/F:%s\n", cfgp->ifr_name)); + elapp->flags |= ELAP_CFG_HOME; + } + + if (MULTIPORT_MODE) { + elapp->flags |= ELAP_CFG_ZONELESS; + if (ROUTING_MODE && cfgp->netStart) + elapp->flags |= ELAP_CFG_SEED; + } + + if (!DEFAULT_ZONE(&cfgp->zonename) && + (elapp->flags & ELAP_CFG_HOME) || MULTIHOME_MODE) { + elapp->startup_zone = cfgp->zonename; + } + + if (elapp->flags & ELAP_CFG_SEED) { + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, + ("elap_wput: found to be seed\n")); + elapp->ifThisCableStart = cfgp->netStart; + elapp->ifThisCableEnd = cfgp->netEnd; + } + else { + dPrintf(D_M_ELAP,D_L_ERROR, + ("elap_wput: we believe we're not seed\n")); + /* from ELAP_IOC_SET_CFG */ + if (ATALK_VALUE(cfgp->node)) { + u_short initial_net; + u_char initial_node; + + initial_node = cfgp->node.s_node; + initial_net = cfgp->node.s_net; + if ((initial_node<0xfe) && (initial_node>0) && + !((initial_net == 0) || + ((initial_net >= DDP_STARTUP_LOW)&& + (initial_net <= DDP_STARTUP_HIGH)))) { + + elapp->initial_addr = cfgp->node; + } + } + } + + elapp->startup_error = 0; + elapp->startup_inprogress = FALSE; + if ((error = elap_online1(elapp))) + ddp_rem_if(elapp); + else + if (!(MULTIPORT_MODE) && + elapp->ifZoneName.len == 1 && + elapp->ifZoneName.str[0] == '*' && + !DEFAULT_ZONE(&cfgp->zonename)) { + nbp_add_multicast(&cfgp->zonename, elapp); + } + return(error); +} /* lap_online */ + +/*********************************************************************** + * elap_wput() + * + **********************************************************************/ +int elap_wput(gref, m) + gref_t *gref; + register gbuf_t *m; +{ + at_ifaddr_t *elapp; + register ioc_t *iocbp; + register at_if_cfg_t *cfgp; + at_elap_stats_t *statsp; + int error, i; + int (*func)(); + gbuf_t *tmpm; + at_ifaddr_t *patp; + + + switch (gbuf_type(m)) { + case MSG_DATA: + gbuf_freem(m); + dPrintf(D_M_ELAP,D_L_ERROR, + ("Output data to control channel is ignored\n")); + break; + + case MSG_IOCTL: + iocbp = (ioc_t *) gbuf_rptr(m); + + if (validate_msg_size(m, gref, &elapp)) + break; + + if (elapp) + cfgp = (at_if_cfg_t*) gbuf_rptr(gbuf_cont(m)); + + if (LAP_IOC_MYIOCTL(iocbp->ioc_cmd) || + ELAP_IOC_MYIOCTL(iocbp->ioc_cmd)) { + + switch (iocbp->ioc_cmd) { + case ELAP_IOC_GET_STATS: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_GET_STATS\n"); +#endif + if ( (gbuf_cont(m) == NULL) + || (elapp = find_ifID(gbuf_rptr(gbuf_cont(m)))) == NULL) { + ioc_ack(EINVAL, m, gref); + break; + } + gbuf_freem(gbuf_cont(m)); + if ((gbuf_cont(m) =gbuf_alloc(sizeof(at_elap_stats_t), + PRI_MED)) == NULL) { + ioc_ack(ENOBUFS, m, gref); + break; + } + statsp = ((at_elap_stats_t *)gbuf_rptr(gbuf_cont(m))); + *statsp = elapp->stats; + gbuf_wset(gbuf_cont(m),sizeof(at_elap_stats_t)); + iocbp->ioc_count = sizeof(at_elap_stats_t); + ioc_ack(0, m, gref); + break; + + case LAP_IOC_ADD_ROUTE: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_ADD_ROUTE\n"); +#endif + add_route((RT_entry *)gbuf_rptr(gbuf_cont(m))); + ioc_ack(0, m, gref); + break; + + case LAP_IOC_GET_ZONE: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_GET_ZONE\n"); +#endif + /* return next ZT_entryno from ZT_table + a pointer to the struct ZT_entryno is passed down from + user space and the first byte is cast to a int, if + this int is non-zero, then the first ZT_entry is + returned and subsequent calls with a zero value + will return the next entry in the table. The next + read after the last valid entry will return EINVAL + */ + { + ZT_entryno *pZTe; + + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + + pZTe = zt_getNextZone(i); + if (pZTe) { + if ((gbuf_cont(m) = gbuf_alloc(sizeof(ZT_entryno), PRI_MED)) == NULL) { + ioc_ack(ENOBUFS, m, gref); + break; + } + *(ZT_entryno *)gbuf_rptr(gbuf_cont(m)) = *pZTe; + gbuf_wset(gbuf_cont(m),sizeof(ZT_entryno)); + iocbp->ioc_count = sizeof(ZT_entryno); + ioc_ack(0, m, gref); + } + else + ioc_ack(EINVAL, m, gref); + } + break; + + case LAP_IOC_GET_ROUTE: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_GET_ROUTE\n"); +#endif + /* return next RT_entry from RT_table + * a pointer to the struct RT_entry is + * passed down from user space and the first + * byte is cast to a int, if this int is + * non-zero, then the first RT_entry is + * returned and subsequent calls with a + * zero value will return the next entry in + * the table. The next read after the last + * valid entry will return EINVAL + */ + { + RT_entry *pRT; + + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + + pRT = rt_getNextRoute(i); + if (pRT) { + if ((gbuf_cont(m) = gbuf_alloc(sizeof(RT_entry), PRI_MED)) == NULL) { + ioc_ack(ENOBUFS, m, gref); + break; + } + *(RT_entry *)gbuf_rptr(gbuf_cont(m)) = *pRT; + gbuf_wset(gbuf_cont(m),sizeof(RT_entry)); + iocbp->ioc_count = sizeof(RT_entry); + ioc_ack(0, m, gref); + } + else + ioc_ack(EINVAL, m, gref); + } + break; + + case LAP_IOC_SNMP_GET_DDP: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_SNMP_GET_DDP\n"); +#endif + if (!(at_state.flags & AT_ST_STARTED)) { + ioc_ack(ENOTREADY, m, gref); + break; + } + if ((gbuf_cont(m) = gbuf_alloc(sizeof(snmpStats_t), + PRI_MED)) == NULL) { + ioc_ack(ENOBUFS, m, gref); + break; + } + + *(snmpStats_t *)gbuf_rptr(gbuf_cont(m)) = snmpStats; + gbuf_wset(gbuf_cont(m),sizeof(snmpStats)); + iocbp->ioc_count = sizeof(snmpStats); + ioc_ack(0, m, gref); + break; + case LAP_IOC_SNMP_GET_CFG: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_SNMP_GET_CFG\n"); +#endif + { + int i,size; + snmpCfg_t snmp; + + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + if (!(at_state.flags & AT_ST_STARTED)) { + /* if stack down */ + iocbp->ioc_count = 0; + ioc_ack(ENOTREADY, m, gref); + dPrintf(D_M_ELAP_LOW, D_L_INFO, + ("elap_wput: cfg req, stack down\n")); + break; + } + if (i == UPDATE_IF_CHANGED && + !(at_state.flags & AT_ST_IF_CHANGED)) { + iocbp->ioc_count = 0; + ioc_ack(0, m, gref); + dPrintf(D_M_ELAP_LOW, D_L_INFO, + ("elap_wput: cfg req, unchanged\n")); + break; + } + dPrintf(D_M_ELAP_LOW, D_L_INFO, + ("elap_wput: cfg req, changed\n")); + + if (getSnmpCfg(&snmp)) { + dPrintf(D_M_ELAP,D_L_ERROR, + ("elap_wput:SNMP_GET_CFG error\n")); + ioc_ack(EOPNOTSUPP, m, gref); + break; + } + /* send up only used part of table */ + size = sizeof(snmp) - + sizeof(snmpIfCfg_t) * (MAX_IFS - snmp.cfg_ifCnt); + + if ((gbuf_cont(m) = gbuf_alloc(size, PRI_MED)) == NULL) { + ioc_ack(ENOBUFS, m, gref); + break; + } + bcopy(&snmp,gbuf_rptr(gbuf_cont(m)),size); + gbuf_wset(gbuf_cont(m),size); + iocbp->ioc_count = size; + at_state.flags &= ~AT_ST_IF_CHANGED; + ioc_ack(0, m, gref); + } + break; + + case LAP_IOC_SNMP_GET_AARP: + { + snmpAarpEnt_t *snmpp; + int bytes; +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_SNMP_GET_AARP\n"); +#endif + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + dPrintf(D_M_ELAP,D_L_INFO, + ("elap_wput:calling getarp,i=%d\n", i)); + snmpp = getAarp(&i); + bytes = i * sizeof(snmpAarpEnt_t); + dPrintf(D_M_ELAP,D_L_INFO, + ("elap_wput:getarp returned, i=%d,bytes=%d\n", + i, bytes)); + if (snmpp) { + if ((gbuf_cont(m) = gbuf_alloc(bytes, PRI_MED)) == NULL) { + ioc_ack(ENOBUFS, m, gref); + break; + } + bcopy(snmpp, gbuf_rptr(gbuf_cont(m)), bytes); + gbuf_wset(gbuf_cont(m),bytes); + iocbp->ioc_count = bytes; + ioc_ack(0, m, gref); + } + else + ioc_ack(EOPNOTSUPP, m, gref); + } + break; + + case LAP_IOC_SNMP_GET_ZIP: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_SNMP_GET_ZIP\n"); +#endif + { /* matching brace NOT in this case */ + register int i,j; + register int size, total, tabsize; + gbuf_t *mn; /* new gbuf */ + gbuf_t *mo; /* old gbuf */ + gbuf_t *mt; /* temp */ + snmpNbpTable_t *nbp; + + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + if (!(at_state.flags & AT_ST_STARTED)) { + ioc_ack(ENOTREADY, m, gref); + break; + } + if (i == UPDATE_IF_CHANGED && + !(at_state.flags & AT_ST_ZT_CHANGED)) { + iocbp->ioc_count = 0; + ioc_ack(0, m, gref); + break; + } + mo=(gbuf_t*)NULL; + tabsize = getZipTableSize(); + + /* retrieve table into multiple gbufs */ + for (i =0; i + MAX_ZIP ? MAX_ZIP : tabsize - i; + size = j < MAX_ZIP ? sizeof(ZT_entry)*j : MAX_ZIP_BYTES; + if ((mn = gbuf_alloc(size, PRI_MED)) == NULL) { + if (gbuf_cont(m)) + gbuf_freem(gbuf_cont(m)); + ioc_ack(ENOBUFS, m, gref); + break; + } + if (!mo) { /* if first new one */ + mt = mn; + total = size; + } + else { + gbuf_cont(mo) = mn; + total += size; + } + mo = mn; + getZipTable((ZT_entry*)gbuf_rptr(mn),i,j); + gbuf_wset(mn,size); + } + if ((gbuf_cont(m) = gbuf_alloc(sizeof(int), PRI_MED)) == NULL) { + if (mt) + gbuf_freem(mt); + iocbp->ioc_count = 0; + ioc_ack(ENOBUFS, m, gref); + break; + } + if (!tabsize) { + dPrintf(D_M_ELAP,D_L_WARNING, + ("elap_wput:snmp: empty zip table\n")); + total = 0; + } + *(int*)gbuf_rptr(gbuf_cont(m)) = total; /* return table size */ + gbuf_wset(gbuf_cont(m),sizeof(int)); + iocbp->ioc_count = sizeof(int); + ioc_ack(0, m, gref); + if (tabsize) + atalk_putnext(gref,mt); /* send up table */ + at_state.flags &= ~AT_ST_ZT_CHANGED; + break; + + case LAP_IOC_SNMP_GET_RTMP: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_SNMP_GET_RTMP\n"); +#endif + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + if (!(at_state.flags & AT_ST_STARTED)) { + ioc_ack(ENOTREADY, m, gref); + break; + } + if (i == UPDATE_IF_CHANGED && + !(at_state.flags & AT_ST_RT_CHANGED)) { + iocbp->ioc_count = 0; + ioc_ack(0, m, gref); + break; + } + + mo=(gbuf_t*)NULL; + tabsize = getRtmpTableSize(); + + /* retrieve table into multiple gbufs */ + for (i =0; i + MAX_RTMP ? MAX_RTMP : tabsize - i; + size = j < MAX_RTMP ? sizeof(RT_entry)*j : MAX_RTMP_BYTES; + if ((mn = gbuf_alloc(size, PRI_MED)) == NULL) { + if (gbuf_cont(m)) + gbuf_freem(gbuf_cont(m)); + ioc_ack(ENOBUFS, m, gref); + break; + } + if (!mo) { /* if first new one */ + mt = mn; + total = size; + } + else { + gbuf_cont(mo) = mn; + total += size; + } + mo = mn; + getRtmpTable((RT_entry*)gbuf_rptr(mn),i,j); + gbuf_wset(mn,size); + } + if ((gbuf_cont(m) = gbuf_alloc(sizeof(int), PRI_MED)) == NULL) { + if (mt) + gbuf_freem(mt); + iocbp->ioc_count = 0; + ioc_ack(ENOBUFS, m, gref); + break; + } + if (!tabsize) + total = 0; + *(int*)gbuf_rptr(gbuf_cont(m)) = total; /* return table size */ + gbuf_wset(gbuf_cont(m),sizeof(int)); + iocbp->ioc_count = sizeof(int); + ioc_ack(0, m, gref); + if (tabsize) + atalk_putnext(gref,mt); /* send up table */ + at_state.flags &= ~AT_ST_RT_CHANGED; + break; + + case LAP_IOC_SNMP_GET_NBP: +#ifdef APPLETALK_DEBUG + kprintf("LAP_IOC_SNMP_GET_NBP\n"); +#endif + i = *(int *)gbuf_rptr(gbuf_cont(m)); + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = NULL; + if (!(at_state.flags & AT_ST_STARTED)) { + ioc_ack(ENOTREADY, m, gref); + break; + } + if (i == UPDATE_IF_CHANGED && + !(at_state.flags & AT_ST_NBP_CHANGED)) { + iocbp->ioc_count = 0; + ioc_ack(0, m, gref); + dPrintf(D_M_ELAP_LOW, D_L_INFO, + ("elap_wput: nbp req denied, no change\n")); + break; + } + + mo=(gbuf_t*)NULL; + tabsize = getNbpTableSize(); + + /* retrieve table into multiple gbufs */ + for (i =0; i + MAX_NBP ? MAX_NBP : tabsize - i; + size = j < MAX_NBP ? sizeof(snmpNbpEntry_t)*j : MAX_NBP_BYTES; + if (!i) + size += SNMP_NBP_HEADER_SIZE; + if ((mn = gbuf_alloc(size, PRI_MED)) == NULL) { + if (gbuf_cont(m)) + gbuf_freem(gbuf_cont(m)); + ioc_ack(ENOBUFS, m, gref); + break; + } + if (!mo) { /* if first new one */ + mt = mn; + total = size; + nbp = (snmpNbpTable_t*)gbuf_rptr(mn); + nbp->nbpt_entries = tabsize; + nbp->nbpt_zone = ifID_home->ifZoneName; + getNbpTable(nbp->nbpt_table,i,j); + } + else { + gbuf_cont(mo) = mn; + total += size; + getNbpTable((snmpNbpEntry_t *)gbuf_rptr(mn),i,j); + } + mo = mn; + gbuf_wset(mn,size); + } + if ((gbuf_cont(m) = gbuf_alloc(sizeof(int), PRI_MED)) == NULL) { + if (mt) + gbuf_freem(mt); + iocbp->ioc_count = 0; + ioc_ack(ENOBUFS, m, gref); + break; + } + if (!tabsize) + total = 0; + *(int*)gbuf_rptr(gbuf_cont(m)) = total; /* return table size */ + gbuf_wset(gbuf_cont(m),sizeof(int)); + iocbp->ioc_count = sizeof(int); + ioc_ack(0, m, gref); + if (tabsize) + atalk_putnext(gref,mt); /* send up table */ + at_state.flags &= ~AT_ST_NBP_CHANGED; + break; + } + + default: +#ifdef APPLETALK_DEBUG + kprintf("unknown ioctl %d\n", iocbp->ioc_cmd); +#endif + ioc_ack(ENOTTY, m, gref); + dPrintf(D_M_ELAP, D_L_WARNING, + ("elap_wput: unknown ioctl (%d)\n", iocbp->ioc_cmd)); + + if (elapp) + elapp->stats.unknown_mblks++; + break; + } + } + break; + + default: + gbuf_freem(m); + break; + } + + return 0; +} /* elap_wput */ + + +/* Called directly by ddp/zip. + */ +elap_dataput(m, elapp, addr_flag, addr) + register gbuf_t *m; + register at_ifaddr_t *elapp; + u_char addr_flag; + char *addr; +{ + register int size; + int error; + extern int zip_type_packet(); + struct etalk_addr dest_addr; + struct atalk_addr dest_at_addr; + extern gbuf_t *growmsg(); + int loop = TRUE; + /* flag to aarp to loopback (default) */ + + /* the incoming frame is of the form {flag, address, ddp...} + * where "flag" indicates whether the address is an 802.3 + * (link) address, or an appletalk address. If it's an + * 802.3 address, the packet can just go out to the network + * through PAT, if it's an appletalk address, AT->802.3 address + * resolution needs to be done. + * If 802.3 address is known, strip off the flag and 802.3 + * address, and prepend 802.2 and 802.3 headers. + */ + + if (addr == NULL) { + addr_flag = *(u_char *)gbuf_rptr(m); + gbuf_rinc(m,1); + } + + switch (addr_flag) { + case AT_ADDR_NO_LOOP : + loop = FALSE; + /* pass thru */ + case AT_ADDR : + if (addr == NULL) { + dest_at_addr = *(struct atalk_addr *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(struct atalk_addr)); + } else + dest_at_addr = *(struct atalk_addr *)addr; + break; + case ET_ADDR : + if (addr == NULL) { + dest_addr = *(struct etalk_addr *)gbuf_rptr(m); + gbuf_rinc(m,sizeof(struct etalk_addr)); + } else + dest_addr = *(struct etalk_addr *)addr; + break; + default : + gbuf_freel(m); /* unknown address type, chuck it */ + return(EINVAL); + } + + m = gbuf_strip(m); + + /* At this point, rptr points to ddp header for sure */ + if (elapp->ifState == LAP_OFFLINE) { + gbuf_freel(m); + return(ENETDOWN); + } + + if (elapp->ifState == LAP_ONLINE_FOR_ZIP) { + /* see if this is a ZIP packet that we need + * to let through even though network is + * not yet alive!! + */ + if (zip_type_packet(m) == 0) { + gbuf_freel(m); + return(ENETDOWN); + } + } + + elapp->stats.xmit_packets++; + size = gbuf_msgsize(m); + elapp->stats.xmit_bytes += size; + snmpStats.dd_outLong++; + + switch (addr_flag) { + case AT_ADDR_NO_LOOP : + case AT_ADDR : + /* + * we don't want elap to be looking into ddp header, so + * it doesn't know net#, consequently can't do + * AMT_LOOKUP. That task left to aarp now. + */ + error = aarp_send_data(m,elapp,&dest_at_addr, loop); + break; + case ET_ADDR : + error = pat_output(elapp, m, &dest_addr, 0); + break; + } + return (error); +} /* elap_dataput */ + +/************************************************************************ + * elap_online() + * + ************************************************************************/ + +static int elap_online1(elapp) + at_ifaddr_t *elapp; +{ + int errno; + + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, ("elap_online:%s elapp:0x%x\n", + (elapp->ifName) ? &elapp->ifName[0] : "NULL interface", (u_int) elapp)); + if (elapp->ifState != LAP_OFFLINE || elapp->startup_inprogress == TRUE) + return (EALREADY); + + at_state.flags |= AT_ST_IF_CHANGED; + + if (elapp->flags & ELAP_CFG_HOME) /* tell ddp_add_if if this is home */ + elapp->ifFlags |= AT_IFF_DEFAULT; + + /* Get DDP started */ + if ((errno = ddp_add_if(elapp))) + return(errno); + + /* set up multicast address for cable-wide broadcasts */ + (void)at_reg_mcast(elapp, (caddr_t)&elapp->cable_multicast_addr); + + elapp->startup_inprogress = TRUE; + if (! (elapp->startup_error = re_aarp(elapp))) + (void)tsleep(&elapp->startup_inprogress, PSOCK | PCATCH, + "elap_online1", 0); + + /* then later, after some timeouts AARPwakeup() is called */ + + return(elapp->startup_error); +} /* elap_online1 */ + +static int re_aarp(elapp) + at_ifaddr_t *elapp; +{ + int errno; + + /* We now call aarp_init() to assign an appletalk node addr */ + errno = aarp_init1(elapp); + /* aarp_init1() returns either -1 or ENOTREADY */ + if (errno == ENOTREADY) + return(0); + else { + dPrintf(D_M_ELAP, D_L_STATE_CHG, + ("elap_online aarp_init for %s\n", elapp->ifName)); + (void)at_unreg_mcast(elapp, (caddr_t)&elapp->cable_multicast_addr); + ddp_rem_if(elapp); + elapp->ifState = LAP_OFFLINE; + return(EADDRNOTAVAIL); + } +} + +/* called from AARPwakeup */ +static void elap_online2(elapp) + at_ifaddr_t *elapp; +{ + if (MULTIPORT_MODE) { + dPrintf(D_M_ELAP,D_L_STARTUP_INFO, + ("elap_online: re_aarp, we know it's a router...\n")); + + if (elapp->flags & ELAP_CFG_SEED) { + /* add route table entry (zones to be added later) */ + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, + ("elap_online: rt_insert Cable %d-%d port =%d as SEED\n", + elapp->ifThisCableStart, elapp->ifThisCableEnd, elapp->ifPort)); + rt_insert(elapp->ifThisCableEnd, + elapp->ifThisCableStart, + 0,0,0, + elapp->ifPort, + RTE_STATE_PERMANENT | RTE_STATE_ZKNOWN | RTE_STATE_GOOD + ); + /* LD 081694: set the RTR_SEED_PORT flag for seed ports */ + elapp->ifFlags |= RTR_SEED_PORT; + } + else + dPrintf(D_M_ELAP,D_L_STARTUP_INFO, + ("elap_online: it's a router, but non seed\n")); + } + + if (elapp->flags & ELAP_CFG_ZONELESS) { + /* ELAP_CFG_ZONELESS tells us that it is a router or in + multihome mode, so we don't want to do the GetNetInfo + exchange with the router. */ + + elapp->ifState = LAP_ONLINE_ZONELESS; + elapp->startup_inprogress = FALSE; + thread_wakeup(&elapp->startup_inprogress); + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, ("elap_online: ack 3\n")); + return; + } + + /* if we don't already have a zone and a multicast address */ + if (*(int *)&elapp->ZoneMcastAddr == 0 || elapp->ifZoneName.len == 0) { + /* hzonehash is a global containing the nbp hash for the startup_zone */ + sethzonehash(elapp); + + /* Get ZIP rolling to get zone multicast address, etc. */ + elapp->ifState = LAP_ONLINE_FOR_ZIP; + (void)zip_control(elapp, ZIP_ONLINE); + /* zip_control (w. control == ZIP_ONLINE) always returns ENOTREADY */ + + /* later, after some timeouts ZIPwakeup() is called. */ + } else { + /* otherwise, we have the zone and the multicast already, + so don't bother with another ZIP GetNetInfo request */ + ZIPwakeup(elapp, 0); + } +} /* elap_online2 */ + +/* called from rtmp_router_start */ +int elap_online3(elapp) + at_ifaddr_t *elapp; +{ + elapp->startup_inprogress = TRUE; + + /* just reset the net range */ + elapp->initial_addr.s_net = 0; + elapp->initial_addr.s_node = 0; + dPrintf(D_M_ELAP_LOW, D_L_STARTUP_INFO, + ("elap_online: goto re_aarp port=%d\n", elapp->ifPort)); + + if ((elapp->startup_error = re_aarp(elapp))) + return(elapp->startup_error); + + /* then later, after some timeouts AARPwakeup() is called */ + + (void)tsleep(&elapp->startup_inprogress, PSOCK | PCATCH, + "elap_online3", 0); + return(elapp->startup_error); +} /* elap_online3 */ + +/**************************************************************************** + * elap_offline() + * + ****************************************************************************/ + +void elap_offline(elapp) + register at_ifaddr_t *elapp; + +{ + void zip_sched_getnetinfo(); /* forward reference */ + int errno; + int s; + + dPrintf(D_M_ELAP, D_L_SHUTDN_INFO, ("elap_offline:%s\n", elapp->ifName)); + if (elapp->ifState != LAP_OFFLINE) { + + /* Since AppleTalk is going away, remove the cable + * multicast address and turn the interface off so that all + * AppleTalk packets are dropped in the driver itself. + * Get rid of the zone multicast address prior to going Offline. + */ + (void)at_unreg_mcast(elapp, (caddr_t)&elapp->ZoneMcastAddr); + (void)at_unreg_mcast(elapp, (caddr_t)&elapp->cable_multicast_addr); + elapp->ifState = LAP_OFFLINE; + + ATDISABLE(s, ddpinp_lock); + if (MULTIPORT_MODE) + RT_DELETE(elapp->ifThisCableEnd, + elapp->ifThisCableStart); + ATENABLE(s, ddpinp_lock); + + /* make sure no zip timeouts are left running */ + untimeout(zip_sched_getnetinfo, elapp); + } + ddp_rem_if(elapp); +} /* elap_offline */ + + +static void add_route(rt) +RT_entry *rt; + +/* support ioctl to manually add routes to table. + this is really only for testing +*/ +{ + rt_insert( rt->NetStop, rt->NetStart, rt->NextIRNet, + rt->NextIRNode, rt->NetDist, rt->NetPort, + rt->EntryState); + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, ("adding route: %ud:%ud dist:%ud\n", + rt->NetStart, rt->NetStop,rt->NetDist)); +} + +/* + * ddp_start() + * + * Initialization that takes place each time AppleTalk is restarted. + * + */ +void ddp_start() +{ + TAILQ_INIT(&at_ifQueueHd); + TAILQ_INIT(&name_registry); + bzero(at_interfaces, sizeof(at_interfaces)); + bzero(ifID_table, sizeof(ifID_table)); + bzero(&at_ddp_stats, sizeof(at_ddp_stats_t)); + rtmp_init(); /* initialize trackedrouters */ + + add_ddp_handler(RTMP_SOCKET, rtmp_input); + ifID_home = (at_ifaddr_t *)NULL; + xpatcnt = 0; +} + +int ddp_shutdown(count_only) + int count_only; +{ + at_ifaddr_t *ifID; + asp_scb_t *scb, *scb_next; + struct atp_state *atp, *atp_next; + CCB *sp, *sp_next; + gref_t *gref; + int i, s, + active_skts = 0; /* count of active pids for non-socketized + AppleTalk protocols */ + extern int aarp_sched_probe(); + + + /* Network is shutting down... send error messages up on each open + * socket. + *** For now, for ASP, ATP and ADSP, attempt to notify open + sockets, but return EBUSY and don't complete shutdown. *** + */ + + s = splimp(); /* *** previously contained mismatched locking + that was ifdef'ed to splimp() *** */ + if (!count_only) + nbp_shutdown(); /* clear all known NVE */ + + /* ASP */ + for (scb = scb_used_list; scb; ) { + scb_next = scb->next_scb; + active_skts++; + if (!count_only) { + dPrintf(D_M_ASP, D_L_TRACE, ("asp pid=%d\n", scb->pid)); + atalk_notify(scb->gref, ESHUTDOWN); + } + scb = scb_next; + } + for (i = 0; i < 256 ; i++) { + if ((scb = asp_scbQ[i])) + do { + scb_next = scb->next_scb; + active_skts++; + if (!count_only) { + dPrintf(D_M_ASP, D_L_TRACE, + ("asp pid=%d\n", scb->pid)); + atalk_notify(scb->gref, ESHUTDOWN); + } + scb = scb_next; + } while (scb); + } + + /* ATP */ + for (atp = atp_used_list; atp; ) { + atp_next = atp->atp_trans_waiting; + active_skts++; + if (!count_only) { + dPrintf(D_M_ATP, D_L_TRACE, ("atp pid=%d\n", atp->atp_pid)); + atalk_notify(atp->atp_gref, ESHUTDOWN); + } + atp = atp_next; + } + for (i = 0; i < 256; i++) { + if ((gref = atp_inputQ[i]) && (gref != (gref_t *)1)) { + atp = (struct atp_state *)gref->info; + if (!atp->dflag) { + active_skts++; + if (!count_only) { + dPrintf(D_M_ATP, D_L_TRACE, + ("atp pid=%d\n", atp->atp_pid)); + atalk_notify(atp->atp_gref, ESHUTDOWN); + } + } + } + } + + /* ADSP */ + for (sp = ccb_used_list; sp ; ) { + sp_next = sp->otccbLink; + active_skts++; + if (!count_only) { + dPrintf(D_M_ADSP, D_L_TRACE, ("adsp pid=%d\n", sp->pid)); + atalk_notify(sp->gref, ESHUTDOWN); + } + sp = sp_next; + } + for (i = 0; i < 256 ; i++) { + if ((sp = adsp_inputQ[i])) + do { + sp_next = sp->otccbLink; + active_skts++; + if (!count_only) { + dPrintf(D_M_ADSP, D_L_TRACE, + ("adsp pid=%d\n", sp->pid)); + atalk_notify(sp->gref, ESHUTDOWN); + } + sp = sp_next; + } while (sp); + } + + /* DDP */ + for (gref = ddp_head.atpcb_next; gref != &ddp_head; + gref = gref->atpcb_next) { + if (count_only) { + active_skts++; + } else { + dPrintf(D_M_DDP,D_L_TRACE, ("ddp pid=%d\n", gref->pid)); + atalk_notify(gref, ESHUTDOWN); + } + } + if (count_only || active_skts) { + splx(s); + return(active_skts); + + } + /* if there are no interfaces in the process of going online, continue shutting down DDP */ + for (i = 0; i < IF_TOTAL_MAX; i++) { + if (at_interfaces[i].startup_inprogress == TRUE) + return(1); + } + if (MULTIPORT_MODE) { + rtmp_shutdown(); + /* free memory allocated for the rtmp/zip tables */ + if (ZT_table) { + FREE(ZT_table, M_RTABLE); + ZT_table = (ZT_entry *)NULL; + } + if (RT_table) { + FREE(RT_table, M_RTABLE); + RT_table = (RT_entry *)NULL; + } + } + + at_state.flags = 0; /* make sure inits are done on restart */ + + /* from original ddp_shutdown() */ + routershutdown(); + ddp_brt_shutdown(); + + if (adspInited) { + CleanupGlobals(); + adspInited = 0; + } + dPrintf(D_M_DDP, D_L_VERBOSE, ("DDP shutdown completed")); + + /* + * make sure we don't have a probe timeout hanging around + * it's going to try and make use of an entry in at_interfaces + * which is going to be zero'd out by the call to ddp_start a + * little further down + */ + untimeout(aarp_sched_probe, 0); + + /* *** after an SIOCSIFADDR and before an AIOCSIFADDR, + this is the only place to find the ifID *** */ + for (i = 0; i < IF_TOTAL_MAX; i++) { + ifID = &at_interfaces[i]; + /* do LAP_IOC_OFFLINE processing */ + elap_offline(ifID); + } + ddp_start(); + + splx(s); + return(0); +} /* ddp_shutdown */ + +int routerStart(keP) + at_kern_err_t *keP; +{ + register at_ifaddr_t *ifID; + int error; + + if (! ifID_home) + return(EINVAL); + + /* + * this will cause the ports to glean from the net the relevant + * information before forwarding + */ + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, + ("routerStart Port %d (%s) set to activating\n", + ifID->ifPort, ifID->ifName)); + ifID->ifRoutingState = PORT_ACTIVATING; + ifID->ifFlags |= RTR_XNET_PORT; + } + + /* + * The next step is to check the information for each port before + * declaring the ports up and forwarding + */ + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, + ("router_start: waiting 20 sec before starting up\n")); + + /* sleep for 20 seconds */ + if ((error = + /* *** eventually this will be the ifID for the interface + being brought up in router mode *** */ + tsleep(&ifID_home->startup_inprogress, + PSOCK | PCATCH, "routerStart", 20 * SYS_HZ)) + != EWOULDBLOCK) { +/* + if (!error) + panic("routerStart: spurious interrupt"); +*/ + return(error); + } + + return(rtmp_router_start(keP)); + /* was timeout(rtmp_router_start, 0, 20 * SYS_HZ); */ +} /* routerStart */ + +void ZIPwakeup(elapp, ZipError) + at_ifaddr_t *elapp; + int ZipError; +{ + int s, error = ZipError; + + ATDISABLE(s, ddpinp_lock); + if ( (elapp != NULL) && elapp->startup_inprogress) { + ATENABLE(s, ddpinp_lock); + + /* was ZIPContinue */ + /* was elapp_online() with jump to ZIP_sleep */ + + /* instead of the goto ZIP_sleep ... */ + switch (ZipError) { + case 0 : /* success */ + elapp->ifState = LAP_ONLINE; + break; + case ZIP_RE_AARP : + /* instead of goto re_aarp; */ + /* We now call aarp_init() to assign an + appletalk node addr */ + if ((elapp->startup_error = re_aarp(elapp))) { + elapp->startup_inprogress = FALSE; + thread_wakeup(&elapp->startup_inprogress); + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, + ("elap_online: ack 2\n")); + } + break; + default : + break; + } + if (ZipError != ZIP_RE_AARP) { + elapp->startup_error = error; + elapp->startup_inprogress = FALSE; + thread_wakeup(&elapp->startup_inprogress); + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, + ("elap_online: ifZipError=%d\n", error)); + } + } else + ATENABLE(s, ddpinp_lock); +} /* ZIPwakeup */ + +void AARPwakeup(probe_cb) + aarp_amt_t *probe_cb; +{ + int s; + int errno; + at_ifaddr_t *elapp; + + ATDISABLE(s, arpinp_lock); + elapp = probe_cb->elapp; + if ( (elapp != NULL) && elapp->startup_inprogress ) { + ATENABLE(s, arpinp_lock); + + /* was AARPContinue */ + errno = aarp_init2(elapp); + /* aarp_init2() returns either -1 or 0 */ + if (errno != 0) { + dPrintf(D_M_ELAP, D_L_STATE_CHG, + ("elap_online aarp_init for %s\n", + elapp->ifName)); + (void)at_unreg_mcast(elapp, (caddr_t)&elapp->ZoneMcastAddr); + (void)at_unreg_mcast(elapp, (caddr_t)&elapp->cable_multicast_addr); + elapp->ifState = LAP_OFFLINE; + ddp_rem_if(elapp); + elapp->startup_error = EADDRNOTAVAIL; + elapp->startup_inprogress = FALSE; + thread_wakeup(&elapp->startup_inprogress); + dPrintf(D_M_ELAP, D_L_STARTUP_INFO, ("elap_online: ack 2\n")); + } else { + dPrintf(D_M_ELAP,D_L_STARTUP_INFO, + ("elap_online: aarp_init returns zero\n")); + elap_online2(elapp); + } + } else + ATENABLE(s, arpinp_lock); +} /* AARPwakeup */ + +void ddp_bit_reverse(addr) + unsigned char *addr; +{ +static unsigned char reverse_data[] = { + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff + }; + + unsigned char k; + + for (k=0; k < 6; k++) + addr[k] = reverse_data[addr[k]]; +} + +static int elap_trackMcast(patp, func, addr) + at_ifaddr_t *patp; + int func; + caddr_t addr; +{ + int i, loc=-1; + u_char c; + switch(patp->aa_ifp->if_type) { + case IFT_ETHER: + case IFT_FDDI: + /* set addr to point to unique part of addr */ + c = addr[5]; + + /* first try to find match */ + /* *** save just one byte of the multicast address? *** */ + for (i=0; i< MAX_MCASTS; i++) + if (c == patp->mcast[i]) { + loc = i; + break; + } + + switch (func) { + case MCAST_TRACK_DELETE: + if (loc >= 0) + patp->mcast[loc] = 0; + + break; + case MCAST_TRACK_ADD: + dPrintf(D_M_PAT_LOW, D_L_USR2, ("mctrack:add loc:%d\n", i)); + if (loc >= 0) { + dPrintf(D_M_PAT_LOW, D_L_USR2, ("mctrack:add, addr was there\n")); + return(1); + break; /* already there */ + } + for (i=0; i< MAX_MCASTS; i++) + if (patp->mcast[i] == 0) { + loc = i; + break; + } + dPrintf(D_M_PAT_LOW, D_L_USR2, ("mctrack:add1 loc:%d\n", i)); + if (loc >= 0) { + patp->mcast[loc] = c; + dPrintf(D_M_PAT_LOW, D_L_USR2, ("mctrack:add, adding(%x)\n", + (*(int*)addr)&0xffffff)); + } + else { + /*errno = ENOMEM; */ /*LD 5/7/97 nobody is using that */ + return(-1); + } + break; + case MCAST_TRACK_CHECK: + if (loc >= 0) { + dPrintf(D_M_PAT_LOW, D_L_USR2, ("mctrack:check, addr was there\n")); + return(0); + } + else { + dPrintf(D_M_PAT_LOW, D_L_USR2, ("mctrack:add, addr was NOT there\n")); + return(-1); + } + + default: + /*errno = EINVAL;*/ /*LD 5/7/97 nobody is using that */ + return(-1); + } + + case IFT_ISO88025: /* token ring */ + /* we would use the lowest byte of the addr argument as a value + to shift left a 1 to form the mcast mask for TR. We'll do this + when the time comes + */ + default: + ; + } + return(0); +} + + +static getSnmpCfg(snmp) + snmpCfg_t *snmp; +{ + int i; + at_ifaddr_t *elapp; + snmpIfCfg_t *ifc; + + snmp->cfg_ifCnt = 0; + + bzero(snmp,sizeof(snmpCfg_t)); + for (i=0, elapp=at_interfaces,ifc=snmp->cfg_ifCfg; + iifState != LAP_OFFLINE) { + snmp->cfg_ifCnt++; + strncpy(ifc->ifc_name,elapp->ifName, sizeof(ifc->ifc_name)); + ifc->ifc_aarpSize = getAarpTableSize(i); + ifc->ifc_addrSize = getPhysAddrSize(i); + switch (elapp->aa_ifp->if_type) { + case IFT_ETHER: + ifc->ifc_type = SNMP_TYPE_ETHER2; + break; + case IFT_ISO88025: /* token ring */ + ifc->ifc_type = SNMP_TYPE_TOKEN; + break; + case IFT_FDDI: + default: + ifc->ifc_type = SNMP_TYPE_OTHER; + break; + } + ifc->ifc_start = elapp->ifThisCableStart; + ifc->ifc_end = elapp->ifThisCableEnd; + ifc->ifc_ddpAddr= elapp->ifThisNode; + ifc->ifc_status = elapp->ifState == LAP_ONLINE ? 1 : 2; + ifc->ifc_zoneName.len = 0; + if (elapp->ifZoneName.len != 0) { + ifc->ifc_zoneName = elapp->ifZoneName; + } + else if (elapp->ifDefZone) { + ifc->ifc_zoneName = ZT_table[elapp->ifDefZone-1].Zone; + } + else /* temp, debug only */ + ifc->ifc_zoneName = ZT_table[0].Zone; + if (ROUTING_MODE) { + if (elapp->ifFlags & RTR_SEED_PORT) { + ifc->ifc_netCfg = SNMP_CFG_CONFIGURED; + ifc->ifc_zoneCfg = SNMP_CFG_CONFIGURED; + } + else { + ifc->ifc_netCfg = SNMP_CFG_GARNERED; + ifc->ifc_zoneCfg = SNMP_CFG_GARNERED; + } + } + else { /* single-port mode */ + if (elapp->ifRouterState == ROUTER_AROUND) { + ifc->ifc_netCfg = SNMP_CFG_GARNERED; + } + else { + ifc->ifc_netCfg = SNMP_CFG_GUESSED; + ifc->ifc_zoneCfg = SNMP_CFG_UNCONFIG; + } + } + } + } + snmp->cfg_flags = at_state.flags; + + + return(0); +} + +int at_reg_mcast(ifID, data) + at_ifaddr_t *ifID; + caddr_t data; +{ + struct ifnet *nddp = ifID->aa_ifp; + struct sockaddr sa; + + if (*(int *)data) { + if (!nddp) { + dPrintf(D_M_PAT, D_L_STARTUP, ("pat_mcast: BAD ndpp\n")); + return(-1); + } + + if (elap_trackMcast(ifID, MCAST_TRACK_ADD, data) == 1) + return(0); + + /* this is for ether_output */ + sa.sa_family = AF_UNSPEC; + sa.sa_len = 2 + sizeof(struct etalk_addr); + bcopy (data, &sa.sa_data[0], sizeof(struct etalk_addr)); + + dPrintf(D_M_PAT, D_L_STARTUP, + ("pat_mcast: adding multicast %08x%04x ifID:0x%x\n", + *(unsigned*)data, (*(unsigned *)(data+2))&0x0000ffff, + (unsigned)ifID)); + + if (if_addmulti(nddp, &sa, 0)) + return -1; + } + return 0; + +} + +int at_unreg_mcast(ifID, data) + at_ifaddr_t *ifID; + caddr_t data; +{ + struct ifnet *nddp = ifID->aa_ifp; + struct sockaddr sa; + + if (*(int *)data) { + if (!nddp) { + dPrintf(D_M_PAT, D_L_STARTUP, ("pat_mcast: BAD ndpp\n")); + return(-1); + } + + elap_trackMcast(ifID, MCAST_TRACK_DELETE, data); + + /* this is for ether_output */ + sa.sa_family = AF_UNSPEC; + sa.sa_len = 2 + sizeof(struct etalk_addr); + bcopy (data, &sa.sa_data[0], sizeof(struct etalk_addr)); + + dPrintf(D_M_PAT, D_L_STARTUP, + ("pat_mcast: deleting multicast %08x%04x ifID:0x%x\n", + *(unsigned*)data, (*(unsigned *)(data+2))&0x0000ffff, + (unsigned)ifID)); + bzero(data, sizeof(struct etalk_addr)); + + if (if_delmulti(nddp, &sa)) + return -1; + } + return 0; +} +#ifdef NOT_YET +/* *** at_reg_mcast() and at_unreg_mcast() should be replaced as soon as the + new code to allow an AF_LINK address family multicast to be (un)registered + using the SIOCADDMULTI / SIOCDELMULTI ioctls has been completed. + + The issue is that the "struct sockaddr_dl" needed for the AF_LINK does not + fit in the "struct ifreq" that is used for these ioctls, and we do not want + Blue/Classic, which currently uses AF_UNSPEC, to use a different address + family multicast address than Mac OS X uses. + *** */ + +int at_reg_mcast(ifID, data) + at_ifaddr_t *ifID; + caddr_t data; +{ + struct ifnet *nddp = ifID->aa_ifp; + struct sockaddr_dl sdl; + + if (*(int *)data) { + if (!nddp) { + dPrintf(D_M_PAT, D_L_STARTUP, ("pat_mcast: BAD ndpp\n")); + return(-1); + } + if (elap_trackMcast(ifID, MCAST_TRACK_ADD, data) == 1) + return(0); + + sdl.sdl_len = sizeof(struct sockaddr_dl); + sdl.sdl_family = AF_LINK; + sdl.sdl_index = 0; + sdl.sdl_type = nddp->if_type; + sdl.sdl_alen = nddp->if_addrlen; + sdl.sdl_slen = 0; + sdl.sdl_nlen = sprintf(sdl.sdl_data, "%s%d", + nddp->if_name , nddp->if_unit); + bcopy(data, LLADDR(&sdl), sdl.sdl_alen); + + dPrintf(D_M_PAT, D_L_STARTUP, + ("pat_mcast: adding multicast %08x%04x ifID:0x%x\n", + *(unsigned*)data, (*(unsigned *)(data+2))&0x0000ffff, + (unsigned)ifID)); + + if (if_addmulti(nddp, (struct sockaddr *)&sdl, 0)) + return -1; + } + + return 0; +} + +int at_unreg_mcast(ifID, data) + at_ifaddr_t *ifID; + caddr_t data; +{ + struct ifnet *nddp = ifID->aa_ifp; + struct sockaddr_dl sdl; + + if (*(int *)data) { + if (!nddp) { + dPrintf(D_M_PAT, D_L_STARTUP, ("pat_mcast: BAD ndpp\n")); + return(-1); + } + + elap_trackMcast(ifID, MCAST_TRACK_DELETE, data); + + sdl.sdl_len = sizeof(struct sockaddr_dl); + sdl.sdl_family = AF_LINK; + sdl.sdl_index = 0; + sdl.sdl_type = nddp->if_type; + sdl.sdl_alen = nddp->if_addrlen; + sdl.sdl_slen = 0; + sdl.sdl_nlen = sprintf(sdl.sdl_data, "%s%d", + nddp->if_name , nddp->if_unit); + + dPrintf(D_M_PAT, D_L_STARTUP, + ("pat_mcast: deleting multicast %08x%04x ifID:0x%x\n", + *(unsigned*)data, (*(unsigned *)(data+2))&0x0000ffff, + (unsigned)ifID)); + bzero(data, ETHERNET_ADDR_LEN); + + if (if_delmulti(nddp, (struct sockaddr *)&sdl)) + return(-1); + } + + return 0; +} + +#endif diff --git a/bsd/netat/ddp_nbp.c b/bsd/netat/ddp_nbp.c new file mode 100644 index 000000000..dd77e5fd5 --- /dev/null +++ b/bsd/netat/ddp_nbp.c @@ -0,0 +1,1565 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989, 1997, 1998 Apple Computer, Inc. + * + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* router */ +#include +#include +#include + +/* reaching for DDP and NBP headers in the datagram */ +#define DATA_DDP(mp) ((at_ddp_t *)(gbuf_rptr(mp))) +#define DATA_NBP(mp) ((at_nbp_t *)((DATA_DDP(mp))->data)) + +/* Get to the nve_entry_t part ofthe buffer */ +#define NVE_ENTRY(mp) (nve_entry_t *)(gbuf_rptr(mp)) + +#ifndef MIN +#define MIN(a,b) ((a)>(b)?(b):(a)) +#endif + +#define errno nbperrno +#define NBP_DEBUG 0 + + /* externs */ +extern at_ifaddr_t *ifID_table[]; +extern at_ifaddr_t *ifID_home; + +TAILQ_HEAD(name_registry, _nve_) name_registry; + +atlock_t nve_lock; + +/* statics */ +static int errno; +static gbuf_t *lzones=0; /* head of local zones list */ +static int lzonecnt=0; /* # zones stored in lzones */ +static u_int hzonehash=0; /* hash val of home zone */ +static int nve_lock_pri; + +static int nbp_lkup_reply(nbp_req_t *, nve_entry_t *); +static int nbp_strcmp(at_nvestr_t *, at_nvestr_t *, u_char); +static int nbp_setup_resp(nbp_req_t *, int); +static int nbp_send_resp(nbp_req_t *); +static int nbp_validate_n_hash(nbp_req_t *, int, int); +static nve_entry_t *nbp_search_nve(); +static int isZoneLocal(at_nvestr_t *); + +/* macros */ +#define NVE_LOCK nve_lock + +/* prototypes */ +void nbp_delete_entry(); +extern int at_reg_mcast(); +extern at_nvestr_t *getRTRLocalZone(zone_usage_t *); +extern void nbp_add_multicast( at_nvestr_t *, at_ifaddr_t *); + +static long nbp_id_count = 0; + +void sethzonehash(elapp) + at_ifaddr_t *elapp; +{ + if (elapp->startup_zone.len) { + hzonehash = nbp_strhash(&elapp->startup_zone); + } +} + +void nbp_shutdown() +{ + /* delete all NVE's and release buffers */ + register nve_entry_t *nve_entry, *next_nve; + + ATDISABLE(nve_lock_pri,NVE_LOCK); + TAILQ_FOREACH(nve_entry, &name_registry, nve_link) { + nbp_delete_entry(nve_entry); + } + ATENABLE(nve_lock_pri,NVE_LOCK); + + if (lzones) { + gbuf_freem(lzones); + lzonecnt = 0; + lzones = NULL; + } +} /* nbp_shutdown */ + +static +u_char *nbp2zone(nbp, maxp) + at_nbp_t *nbp; + u_char *maxp; +{ + + u_char *p; + + p = (u_char*)&nbp->tuple[0].enu_entity; /* p -> object */ + if (p >= maxp) return NULL; + p += (*p +1); /* p -> type */ + if (p >= maxp) return NULL; + p += (*p +1); /* p -> zone */ + if (p >= maxp) return NULL; + if ((p + *p) >= maxp) return NULL; + return(p); +} + +void nbp_input(m, ifID) + register gbuf_t *m; + register at_ifaddr_t *ifID; + +{ + register at_ddp_t *ddp = DATA_DDP(m); + register at_nbp_t *nbp = DATA_NBP(m); + register nve_entry_t *nve_entry, *next_nve; + register RT_entry *rt; + register int ddpSent = FALSE; /* true if we re-sent this pkt (don't free) */ + struct etalk_addr mcastAddr; + nbp_req_t nbp_req; + u_char *p; + + /* from original nbp_input() when this function was nbp_handler() */ + if ((gbuf_type(m) != MT_DATA && gbuf_type(m) != MSG_DATA) || + ddp->type != DDP_NBP) { + gbuf_freem(m); + return; + } + + /* Some initializations */ + nbp_req.response = NULL; + nbp_req.request = m; + nbp_req.space_unused = nbp_req.flags = 0; + + dPrintf(D_M_NBP_LOW, D_L_USR1, + ("nbp_input control:%d tuplecount:%d id:%d\n", + nbp->control, nbp->tuple_count, nbp->at_nbp_id)); + switch (nbp->control) { + case NBP_LKUP : + { + at_net_al dst_net; + + dst_net = NET_VALUE(ddp->dst_net); + dPrintf(D_M_NBP_LOW, D_L_USR2, (" LKUP %s\n", + ifID != ifID_home ? "non-home" : "home")); + if ( ROUTING_MODE && (NET_VALUE(ddp->dst_net) != 0) + && ((dst_net < ifID->ifThisCableStart) + || (dst_net > ifID->ifThisCableEnd)) ) { + routing_needed(m, ifID, TRUE); + ddpSent = TRUE; + break; + } + } + + if (nbp_validate_n_hash (&nbp_req, TRUE, FALSE) == 0) { + nbp_req.func = nbp_lkup_reply; + (void) nbp_search_nve(&nbp_req, ifID); + if (nbp_req.response) { + nbp_send_resp(&nbp_req); + } + } +#ifdef NBP_DEBUG + { + char zone[35],object[35],type[35]; + strncpy(zone,nbp_req.nve.zone.str, nbp_req.nve.zone.len); + strncpy(object,nbp_req.nve.object.str, nbp_req.nve.object.len); + strncpy(type,nbp_req.nve.type.str, nbp_req.nve.type.len); + object[nbp_req.nve.object.len] = '\0'; + zone[nbp_req.nve.zone.len] = '\0'; + type[nbp_req.nve.type.len] = '\0'; + if (ifID != ifID_home) + dPrintf(D_M_NBP_LOW,D_L_USR2, + ("nbp_LKUP for:%s:%s@%s", object, type, zone)); + } +#endif /* NBP_DEBUG */ + + break; + case NBP_FWDRQ: + { + register int zhome=0; + /* true if home zone == destination zone */ + register int zno, i; + register gbuf_t *m2; + register error_found =0; + register at_ifaddr_t *ifIDorig; + + if (!ROUTING_MODE) /* for routers only! */ + break; + + ifIDorig = ifID; + ifID= NULL; + for (i = 0 ; i < RT_maxentry; i++) { + rt = &RT_table[i]; + if ((rt->EntryState & RTE_STATE_PERMANENT) && + NET_VALUE(ddp->dst_net) >= rt->NetStart && + NET_VALUE(ddp->dst_net) <= rt->NetStop + ) { + /* sanity check */ + if (rt->NetPort >= IF_TOTAL_MAX) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input:FWDREQ: bad port# from RT_table\n")); + error_found = TRUE; + break; + } + ifID = ifID_table[rt->NetPort]; + if (!ifID) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input:FWDREQ: ifID %s\n", + !ifID ? "not found" : "invalid")); + error_found = TRUE; + break; + } + if (ifID->ifState == LAP_OFFLINE) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input:FWDREQ: ifID offline (port %d)\n", + rt->NetPort)); + error_found = TRUE; + break; + } + break; + } + } + if (error_found) /* the port is not correct */ + break; + + if (!ifID) { /* this packet is not for us, let the routing engine handle it */ + routing_needed(m, ifIDorig, TRUE); + ddpSent= TRUE; + break; + } + + /* + * At this point, we have a valid Forward request for one of our + * directly connected port. Convert it to a NBP Lookup + */ + + nbp->control = NBP_LKUP; + NET_ASSIGN(ddp->dst_net, 0); + ddp->dst_node = 255; + + + /*### LD 01/18/94 Check if the dest is also the home zone. */ + + p = nbp2zone(nbp, gbuf_wptr(m)); + if ((p == NULL) || !(zno = zt_find_zname(p))) { + dPrintf(D_M_NBP,D_L_WARNING, + ("nbp_input: FWDRQ:zone not found\n")); + break; + } + if (isZoneLocal((at_nvestr_t*)p)) + zhome = TRUE; /* one of our ports is in destination zone */ + if (!zt_get_zmcast(ifID, p, &mcastAddr)) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input: FDWREQ:zt_get_zmcast error\n")); + break; + } + + + if (zhome) { /*### LD 01/18/95 In case our home is here, call back nbp */ + + if (!(m2 = (gbuf_t *)gbuf_copym((gbuf_t *)m))) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input: FWDRQ:gbuf_copym failed\n")); + break; + } + + ddp = DATA_DDP(m2); + nbp = DATA_NBP(m2); + nbp->control = NBP_LKUP; + NET_ASSIGN(ddp->dst_net, 0); + ddp->dst_node = 255; + dPrintf(D_M_NBP,D_L_INFO, + ("nbp_input: FWDRQ:loop back for us\n")); + nbp_input(m2, ifID_home); + } + + if (FDDI_OR_TOKENRING(ifID->aa_ifp->if_type)) + ddp_bit_reverse(&mcastAddr); + ddp_router_output(m, ifID, ET_ADDR,NULL,NULL, &mcastAddr); + ddpSent = TRUE; + } + break; + + case NBP_BRRQ: + { + register int zno; /* zone table entry numb */ + register int ztind; /* zone bitmap index into RT_entry */ + register int ztbit; /* zone bit to check within above index */ + register int zhome=0; /* true if home zone == destination zone */ + register int i; + register gbuf_t *m2, *m3; + register int fromUs = FALSE; + register at_socket ourSkt; /* originating skt */ + + /* for router & MH local only */ + if ((!(MULTIHOME_MODE && FROM_US(ddp))) && !ROUTING_MODE) { + dPrintf(D_M_NBP,D_L_USR2, + ("nbp_input: BRREQ:non router or MH local\n")); + + break; + } + p = nbp2zone(nbp, gbuf_wptr(m)); + if ((p == NULL) || !(zno = zt_find_zname(p))) { + break; + } + if (MULTIHOME_MODE && ifID->ifRouterState == NO_ROUTER) { + ((at_nvestr_t*)p)->len = 1; + ((at_nvestr_t*)p)->str[0] = '*'; + } + if (isZoneLocal((at_nvestr_t*)p)) { + zhome = TRUE; /* one of our ports is in destination zone */ + } + if (FROM_US(ddp)){ /* save, before we munge it */ + fromUs = TRUE; + ourSkt = ddp->src_socket; + dPrintf(D_M_NBP,D_L_USR2, + ("nbp_input:BRRQ from us net:%d\n", + (int)NET_VALUE(ddp->src_net))); + } + /* from ZT_CLR_ZMAP */ + i = zno - 1; + ztind = i >> 3; + ztbit = 0x80 >> (i % 8); + for (i=0,rt=RT_table; iZoneBitMap[ztind] & ztbit)) /* if zone not in route, skip*/ + continue; +/* dPrintf(D_M_NBP, D_L_USR3, + ("nbp_input: BRREQ: port:%d, entry %d\n", + rt->NetPort, i)); +*/ + + ifID = ifID_table[rt->NetPort]; + if (!ifID) { + dPrintf(D_M_NBP, D_L_ERROR, + ("nbp_input:BRRQ: ifID %s\n", + !ifID ? "not found" : "invalid")); + break; + } + + ddp = DATA_DDP(m); + ddp->src_node = ifID->ifThisNode.s_node; + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + ddp->src_socket = NBP_SOCKET; + if (!(m2 = (gbuf_t *)gbuf_copym((gbuf_t *)m))) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input: BRREQ:gbuf_copym failed\n")); + break; + } + + ddp = DATA_DDP(m2); + nbp = DATA_NBP(m2); +/* nbp->tuple[0].enu_addr.socket = NBP_SOCKET; */ + if (MULTIHOME_MODE && fromUs ) { + /* set the return address of the lookup to that of the + interface it's going out on so that replies come back + on that net */ + dPrintf(D_M_NBP,D_L_USR3, + ("nbp_input: BRREQ: src changed to %d.%d.%d\n", + ifID->ifThisNode.s_net, + ifID->ifThisNode.s_node, ourSkt)); + nbp->tuple[0].enu_addr.net = ifID->ifThisNode.s_net; + nbp->tuple[0].enu_addr.node = ifID->ifThisNode.s_node; + nbp->tuple[0].enu_addr.socket = ourSkt; + ddp->src_socket = NBP_SOCKET; + } + else + dPrintf(D_M_NBP, D_L_USR3, + ("nbp_input: BRREQ: not from us\n")); + + dPrintf(D_M_NBP, D_L_USR3, + ("nbp_input dist:%d\n", rt->NetDist)); + if (rt->NetDist == 0) { /* if direct connect, *we* do the LKUP */ + nbp->control = NBP_LKUP; + NET_ASSIGN(ddp->dst_net, 0); + ddp->dst_node = 255; + if (!zt_get_zmcast(ifID, p, &mcastAddr)) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input: BRRQ:zt_get_zmcast error\n")); + break; + } + if (FDDI_OR_TOKENRING(ifID->aa_ifp->if_type)) + ddp_bit_reverse(&mcastAddr); + ddp_router_output(m2, ifID, ET_ADDR, NULL, NULL, &mcastAddr); + } + else { /* else fwd to router */ + ddp->dst_node = 0; + if (rt->NetStart == 0) /* if Ltalk */ + NET_ASSIGN(ddp->dst_net, rt->NetStop); + else + NET_ASSIGN(ddp->dst_net, rt->NetStart); + nbp->control = NBP_FWDRQ; + ddp_router_output(m2, ifID, AT_ADDR, + rt->NextIRNet, rt->NextIRNode, + NULL); + } + } + if (!zhome) + break; + + if (!(m3 = (gbuf_t *)gbuf_copym((gbuf_t *)m))) { + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input: BRREQ:gbuf_copym failed\n")); + break; + } + + ddp = DATA_DDP(m3); + nbp = DATA_NBP(m3); + + nbp->control = NBP_LKUP; + NET_ASSIGN(ddp->dst_net, 0); + ddp->dst_node = 255; + dPrintf(D_M_NBP,D_L_INFO, ("nbp_input: BRRQ:loop back for us\n")); + nbp_input(m3, ifID_home); + break; + } + + case NBP_LKUP_REPLY: + + if (!ROUTING_MODE) /* for routers only! */ + break; + + dPrintf(D_M_NBP,D_L_WARNING, + ("nbp_input: routing needed for LKUP_REPLY: from %d.%d\n", + NET_VALUE(ddp->src_net), ddp->src_node)); + routing_needed(m, ifID, TRUE); + ddpSent = TRUE; + break; + + default : + dPrintf(D_M_NBP,D_L_ERROR, + ("nbp_input: unhandled pkt: type:%d\n", nbp->control)); + + routing_needed(m, ifID, TRUE); + ddpSent = TRUE; + break; + } /* switch control */ + + if (!ddpSent) + gbuf_freem(m); + return; +} /* nbp_input */ + +static int nbp_validate_n_hash (nbp_req, wild_ok, checkLocal) + register nbp_req_t *nbp_req; + register int wild_ok; + register int checkLocal; /* if true check if local zone */ +{ + register at_nvestr_t *object, *type, *zone; + at_nbptuple_t *tuple; + register int i, part_wild; + + tuple = DATA_NBP(nbp_req->request)->tuple; + nbp_req->flags = 0; +#ifdef COMMENTED_OUT + { + int net,node,skt; + net = tuple->enu_addr.net; + node = tuple->enu_addr.node; + skt = tuple->enu_addr.socket; + dPrintf(D_M_NBP_LOW,D_L_USR4, + ("nbp_validate: tuple addr:%d:%d:%d\n",net,node,skt)); + } +#endif /* COMMENTED_OUT */ + + /* tuple is in the compressed (no "filler") format */ + object = (at_nvestr_t *)&tuple->enu_entity; + type = (at_nvestr_t *)(&object->str[object->len]); + zone = (at_nvestr_t *)(&type->str[type->len]); + + if (object->len > NBP_NVE_STR_SIZE || type->len > NBP_NVE_STR_SIZE || + zone->len > NBP_NVE_STR_SIZE) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_val_n_hash: bad str len\n")); + errno = EINVAL; + return (-1); + } + +#ifdef NBP_DEBUG + { + char xzone[35],xobject[35],xtype[35]; + strncpy(xzone,zone->str, zone->len); + strncpy(xobject,object->str, object->len); + strncpy(xtype,type->str, type->len); + xobject[object->len] = '\0'; + xzone[zone->len] = '\0'; + xtype[type->len] = '\0'; + dPrintf(D_M_NBP_LOW, D_L_USR4, + ("nbp_validate: looking for %s:%s@%s\n", + xobject, xtype, xzone)); + } +#endif /* NBP_DEBUG */ + /* Is this request for our zone ?? */ + nbp_req->nve.zone.len = zone->len; + nbp_req->nve.zone_hash = 0; + bcopy(zone->str,nbp_req->nve.zone.str, zone->len); + + if (checkLocal && !isZoneLocal(zone)) { + char str[35]; + strncpy(str,zone->str,zone->len); + str[zone->len] = '\0'; + dPrintf(D_M_NBP_LOW,D_L_WARNING, + ("nbp_val_n_hash bad zone: %s\n", str)); + errno = EINVAL; + return(-1); + } + + if (!DEFAULT_ZONE(zone)) { + nbp_req->nve.zone_hash = nbp_strhash(& nbp_req->nve.zone); + } + + nbp_req->nve.address = tuple->enu_addr; + nbp_req->nve.object.len = object->len; + nbp_req->nve.object_hash = 0; + if (object->len == 1 && (object->str[0] == NBP_ORD_WILDCARD || + object->str[0] == NBP_SPL_WILDCARD)) { + if (wild_ok) + nbp_req->flags |= NBP_WILD_OBJECT; + else { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_val_n_hash: wild not okay\n")); + errno = EINVAL; + return (-1); + } + } else{ + for (i = part_wild = 0; (unsigned) ilen; i++) { + if (object->str[i] == NBP_SPL_WILDCARD) + if (wild_ok) + if (part_wild) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_val_n_hash: too many parts wild\n")); + errno = EINVAL; + return (-1); + } else + part_wild++; + else { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_val_n_hash: wild not okay2\n")); + errno = EINVAL; + return (-1); + } + nbp_req->nve.object.str[i] = object->str[i]; + } + if (!part_wild) + nbp_req->nve.object_hash = + nbp_strhash(&nbp_req->nve.object); + } + + nbp_req->nve.type.len = type->len; + nbp_req->nve.type_hash = 0; + if (type->len == 1 && (type->str[0] == NBP_ORD_WILDCARD || + type->str[0] == NBP_SPL_WILDCARD)) { + if (wild_ok) + nbp_req->flags |= NBP_WILD_TYPE; + else { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_val_n_hash: wild not okay3\n")); + errno = EINVAL; + return (-1); + } + } else { + for (i = part_wild = 0; (unsigned) ilen; i++) { + if (type->str[i] == NBP_SPL_WILDCARD) + if (wild_ok) + if (part_wild) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_val_n_hash: too many parts wild2\n")); + errno = EINVAL; + return (-1); + } else + part_wild++; + else { + errno = EINVAL; + return (-1); + } + nbp_req->nve.type.str[i] = type->str[i]; + } + if (!part_wild) + nbp_req->nve.type_hash = + nbp_strhash(&nbp_req->nve.type); + } +#ifdef NBP_DEBUG + { + char zone[35],object[35],type[35]; + strncpy(zone,nbp_req->nve.zone.str, nbp_req->nve.zone.len); + strncpy(object,nbp_req->nve.object.str, nbp_req->nve.object.len); + strncpy(type,nbp_req->nve.type.str, nbp_req->nve.type.len); + object[nbp_req->nve.object.len] = '\0'; + zone[nbp_req->nve.zone.len] = '\0'; + type[nbp_req->nve.type.len] = '\0'; + dPrintf(D_M_NBP_LOW,D_L_USR4, + ("nbp_validate: after hash: %s:%s@%s\n", + object, type, zone)); + } +#endif /* NBP_DEBUG */ + return(0); +} /* nbp_validate_n_hash */ + + +/* Upshifts in place */ +static void nbp_upshift (str, count) +register u_char *str; +register int count; +{ + register int i, j; + register u_char ch; + static unsigned char lower_case[] = + {0x8a, 0x8c, 0x8d, 0x8e, 0x96, 0x9a, 0x9f, 0xbe, + 0xbf, 0xcf, 0x9b, 0x8b, 0x88, 0}; + static unsigned char upper_case[] = + {0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0xae, + 0xaf, 0xce, 0xcd, 0xcc, 0xcb, 0}; + + for (j=0 ; j= 'a' && ch <= 'z') + str[j] = ch + 'A' - 'a'; + else if (ch & 0x80) + for (i=0; lower_case[i]; i++) + if (ch == lower_case[i]) + str[j] = upper_case[i]; + } +} + + +u_int nbp_strhash (nvestr) + register at_nvestr_t *nvestr; +{ + /* upshift while hashing */ + register u_int hash = 0; + register int i, len; + union { + u_char h_4char[4]; + int h_int; + } un; + + for (i=0; (unsigned) i < nvestr->len; i+=sizeof(int)) { + len = MIN((nvestr->len-i), sizeof(int)); + if (len == sizeof(int)) + bcopy(&(nvestr->str[i]), &un, sizeof(un)); + else { + un.h_int = -1; + for ( ; (unsigned) ilen; i++) + un.h_4char[i % sizeof(int)] = nvestr->str[i]; + } + nbp_upshift (un.h_4char, len); + hash ^= un.h_int; + } + + return (hash); +} /* nbp_strhash */ + +static nve_entry_t *nbp_search_nve (nbp_req, ifID) + register nbp_req_t *nbp_req; + register at_ifaddr_t *ifID; /* NULL ok */ +{ + register nve_entry_t *nve_entry; + +#ifdef NBP_DEBUG + { + char zone[35],object[35],type[35]; + strncpy(zone,nbp_req->nve.zone.str, nbp_req->nve.zone.len); + strncpy(object,nbp_req->nve.object.str, nbp_req->nve.object.len); + strncpy(type,nbp_req->nve.type.str, nbp_req->nve.type.len); + object[nbp_req->nve.object.len] = '\0'; + zone[nbp_req->nve.zone.len] = '\0'; + type[nbp_req->nve.type.len] = '\0'; + dPrintf(D_M_NBP_LOW, D_L_USR4, + ("nbp_search: looking for %s:%s@%s resp:0x%x\n",object,type,zone, + (u_int) nbp_req->response)); + } +#endif /* NBP_DEBUG */ + ATDISABLE(nve_lock_pri,NVE_LOCK); + TAILQ_FOREACH(nve_entry, &name_registry, nve_link) { + if ((nbp_req->nve.zone_hash) && + ((nbp_req->nve.zone_hash != + nve_entry->zone_hash) && + (nbp_req->nve.zone_hash != hzonehash) + ) + ) { + dPrintf(D_M_NBP_LOW,D_L_USR4, + ("nbp_search: no match for zone, req hash:%x\n", + nbp_req->nve.zone_hash)); + continue; + } + else { /* for this entry's zone OR no zone in request or entry */ + /* only in singleport mode (!MULTIPORT_MODE) with + empty PRAM can an entry have '*' for it's zone + */ + at_nvestr_t *ezone=&nve_entry->zone; + at_nvestr_t *rzone=&nbp_req->nve.zone; + if (!DEFAULT_ZONE(rzone) && !DEFAULT_ZONE(ezone)) { + if (nbp_strcmp (rzone, ezone, 0) != 0) + continue; + } + else { + if (MULTIHOME_MODE && ifID && + (nve_entry->address.net != + ifID->ifThisNode.s_net)) { + dPrintf(D_M_NBP, D_L_USR4, + ("nbp search ifID (%d) & req net (%d) not eq\n", + nve_entry->address.net, + ifID->ifThisNode.s_net)); + continue; + } + if (ifID) + dPrintf(D_M_NBP, D_L_USR4, + ("nbp search ifID (%d) & req net (%d) equal\n", + nve_entry->address.net, + ifID->ifThisNode.s_net)); + } + + } + if (!(nbp_req->flags & NBP_WILD_OBJECT)) { + if ((nbp_req->nve.object_hash) && + (nbp_req->nve.object_hash != + nve_entry->object_hash)) + continue; + else { + if (nbp_strcmp (&nbp_req->nve.object, + &nve_entry->object, + NBP_SPL_WILDCARD) != 0) + continue; + } + } + + + if (!(nbp_req->flags & NBP_WILD_TYPE)) { + if ((nbp_req->nve.type_hash) && + (nbp_req->nve.type_hash !=nve_entry->type_hash)) + continue; + else { + if (nbp_strcmp (&nbp_req->nve.type, + &nve_entry->type, + NBP_SPL_WILDCARD) != 0) + continue; + } + } + + /* Found a match! */ +#ifdef NBP_DEBUG + { + char zone[35],object[35],type[35]; + + strncpy(zone,nbp_req->nve.zone.str, nbp_req->nve.zone.len); + strncpy(object,nbp_req->nve.object.str, nbp_req->nve.object.len); + strncpy(type,nbp_req->nve.type.str, nbp_req->nve.type.len); + object[nbp_req->nve.object.len] = '\0'; + zone[nbp_req->nve.zone.len] = '\0'; + type[nbp_req->nve.type.len] = '\0'; + dPrintf(D_M_NBP_LOW, D_L_USR2, + ("nbp_search: found %s:%s@%s net:%d\n", + object, type, zone, (int)nve_entry->address.net)); + } +#endif /* NBP_DEBUG */ + if (nbp_req->func != NULL) { + if ((*(nbp_req->func))(nbp_req, nve_entry) != 0) { + /* errno expected to be set by func */ + ATENABLE(nve_lock_pri,NVE_LOCK); + return (NULL); + } + } else { + ATENABLE(nve_lock_pri,NVE_LOCK); + return (nve_entry); + } + } + ATENABLE(nve_lock_pri,NVE_LOCK); + + errno = 0; + return (NULL); +} /* nbp_search_nve */ + +static int nbp_lkup_reply (nbp_req, nve_entry) +register nbp_req_t *nbp_req; +register nve_entry_t *nve_entry; +{ + register at_nbptuple_t *tuple; + register int tuple_size, buf_len; + register int obj_len, type_len; + u_char *p; + + /* size of the current tuple we want to write... */ + tuple_size = nve_entry->object.len + 1 + /* object */ + nve_entry->type.len + 1 + /* type */ + 2 + /* zone */ + sizeof (at_inet_t) + 1; /* addr + enum */ + + buf_len = ((nbp_req->flags & NBP_WILD_MASK) ? DDP_DATA_SIZE:tuple_size); + if (nbp_req->response == NULL) { + if (nbp_setup_resp (nbp_req, buf_len) != 0) + /* errno expected to be set by nbp_setup_resp() */ + return (-1); + } + + if ((nbp_req->space_unused < tuple_size) || + (DATA_NBP(nbp_req->response)->tuple_count == NBP_TUPLE_MAX)) { + if (nbp_send_resp (nbp_req) != 0) + return (-1); + if (nbp_setup_resp (nbp_req, buf_len) != 0) + return (-1); + } + + /* At this point, we have a response buffer that can accommodate the + * tuple we want to write. Write it! + */ + tuple = (at_nbptuple_t *)gbuf_wptr(nbp_req->response); + tuple->enu_addr = nve_entry->address; + tuple->enu_enum = nve_entry->enumerator; + + /* tuple is in the compressed (no "filler") format */ + p = (u_char *)&tuple->enu_entity.object; + obj_len = nve_entry->object.len + 1; + bcopy(&nve_entry->object, p, obj_len); + p += obj_len; + type_len = nve_entry->type.len + 1; + bcopy(&nve_entry->type, p, type_len); + p += type_len; + p[0] = (u_char)1; + p[1] = '*'; + + nbp_req->space_unused -= tuple_size; + gbuf_winc(nbp_req->response, tuple_size); + + /* increment the tuple count in header by 1 */ + DATA_NBP(nbp_req->response)->tuple_count++; + + return (0); +} + + +static int nbp_strcmp (str1, str2, embedded_wildcard) +register at_nvestr_t *str1, *str2; +register u_char embedded_wildcard; /* If str1 may contain a character + * that's to be treated as an + * embedded wildcard, this character + * is it. Making this special case + * since for zone names, squiggly + * equal is not to be treated as a + * wildcard. + */ +{ + u_char ch1,ch2; + register int i1, i2; + register int reverse = 0; + register int left_index; + + /* Embedded wildcard, if any, could only be in the first string (str1). + * returns 0 if two strings are equal (modulo case), -1 otherwise + */ + + if (str1->len == 0 || str2->len == 0) { + return (-1); + } + + /* Wildcards are not allowed in str2. + * + * If str1 could potentially contain an embedded wildcard, since the + * embedded wildcard matches ZERO OR MORE characters, str1 can not be + * more than 1 character longer than str2. + * + * If str1 is not supposed to have embedded wildcards, the two strs + * must be of equal length. + */ + if ((embedded_wildcard && (str2->len < (unsigned) (str1->len-1))) || + (!embedded_wildcard && (str2->len != str1->len))) { + return (-1); + } + + for (i1 = i2 = left_index = 0; (unsigned) i1 < str1->len ;) { + ch1 = str1->str[i1]; + ch2 = str2->str[i2]; + + if (embedded_wildcard && (ch1==embedded_wildcard)) { + /* hit the embedded wild card... start comparing from + * the other end of the string. + */ + reverse++; + /* But, if embedded wildcard was the last character of + * the string, the two strings match, so return okay. + */ + if (i1 == str1->len-1) { + return (0); + } + + i1 = str1->len - 1; + i2 = str2->len - 1; + + continue; + } + + nbp_upshift(&ch1, 1); + nbp_upshift(&ch2, 1); + + if (ch1 != ch2) { + return (-1); + } + + if (reverse) { + i1--; i2--; + if (i1 == left_index) { + return (0); + } + } else { + i1++; i2++; left_index++; + } + } + return (0); +} + + +static void nbp_setup_hdr (nbp_req) +register nbp_req_t *nbp_req; +{ + register at_ddp_t *ddp; + register at_nbp_t *nbp; + + ddp = DATA_DDP(nbp_req->response); + nbp = DATA_NBP(nbp_req->response); + + ddp->type = DDP_NBP; + UAS_ASSIGN(ddp->checksum, 0); + ddp->unused = ddp->hopcount = 0; + + switch(DATA_NBP(nbp_req->request)->control) { + case NBP_LKUP : + ddp->dst_socket = nbp_req->nve.address.socket; + ddp->dst_node = nbp_req->nve.address.node; + NET_ASSIGN(ddp->dst_net, nbp_req->nve.address.net); + nbp->control = NBP_LKUP_REPLY; + break; + } + nbp->at_nbp_id = DATA_NBP(nbp_req->request)->at_nbp_id; + return; +} + + +static int nbp_setup_resp (nbp_req, tuples_size) +register nbp_req_t *nbp_req; +register int tuples_size; +{ + int buf_size = tuples_size + DDP_X_HDR_SIZE + NBP_HDR_SIZE; + nbp_req->response = gbuf_alloc(AT_WR_OFFSET+buf_size, PRI_MED); + if (nbp_req->response == NULL) { + errno = ENOBUFS; + return(-1); + } + gbuf_rinc(nbp_req->response, AT_WR_OFFSET); + gbuf_wset(nbp_req->response, DDP_X_HDR_SIZE + NBP_HDR_SIZE); + nbp_setup_hdr(nbp_req); + + DATA_NBP(nbp_req->response)->tuple_count = 0; + nbp_req->space_unused = tuples_size; + + return (0); +} /* nbp_setup_resp */ + + +static int nbp_send_resp (nbp_req) +register nbp_req_t *nbp_req; +{ + int status; + + status = ddp_output(&nbp_req->response, (at_socket)NBP_SOCKET, FALSE); + nbp_req->response = NULL; + errno = status; + return(errno?-1:0); +} + +void nbp_add_multicast(zone, ifID) + at_nvestr_t *zone; + at_ifaddr_t *ifID; +{ + char data[ETHERNET_ADDR_LEN]; + int i; + + if (zone->str[0] == '*') + return; + + { + char str[35]; + strncpy(str,zone->str,zone->len); + str[zone->len] = '\0'; + dPrintf(D_M_NBP_LOW, D_L_USR3, + ("nbp_add_multi getting mc for %s\n", str)); + } + zt_get_zmcast(ifID, zone, data); + if (FDDI_OR_TOKENRING(ifID->aa_ifp->if_type)) + ddp_bit_reverse(data); + dPrintf(D_M_NBP_LOW,D_L_USR3, + ("nbp_add_multi adding 0x%x%x port:%d ifID:0x%x if:%s\n", + *(unsigned*)data, (*(unsigned *)(data+2))&0x0000ffff, + i, (u_int) ifID, ifID->ifName)); + + bcopy((caddr_t)data, (caddr_t)&ifID->ZoneMcastAddr, ETHERNET_ADDR_LEN); + (void)at_reg_mcast(ifID, (caddr_t)&ifID->ZoneMcastAddr); +} + + +getNbpTableSize() + +/* for SNMP, returns size in # of entries */ +{ + register nve_entry_t *nve; + register int i=0; + + ATDISABLE(nve_lock_pri,NVE_LOCK); + for (nve = TAILQ_FIRST(&name_registry); nve; nve = TAILQ_NEXT(nve, nve_link), i++) + i++; + ATENABLE(nve_lock_pri,NVE_LOCK); + return(i); +} + +getNbpTable(p, s, c) + snmpNbpEntry_t *p; + int s; /* starting entry */ + int c; /* # entries to copy */ + +/* for SNMP, returns section of nbp table */ +{ + register nve_entry_t *nve; + register int i=0; + static int nextNo=0; /* entry that *next points to */ + static nve_entry_t *next = (nve_entry_t*)NULL; + + if (s && next && nextNo == s) { + nve = next; + i = nextNo; + } + else + nve = TAILQ_FIRST(&name_registry); + + ATDISABLE(nve_lock_pri,NVE_LOCK); + for ( ; nve && c ; nve = TAILQ_NEXT(nve, nve_link), p++,i++) { + if (i>= s) { + p->nbpe_object = nve->object; + p->nbpe_type = nve->type; + c--; + } + } + ATENABLE(nve_lock_pri,NVE_LOCK); + if (nve) { + next = nve; + nextNo = i; + } else { + next = (nve_entry_t*)NULL; + nextNo = 0; + } +} + + +#define ZONES_PER_BLK 31 /* 31 fits within a 1k blk) */ +#define ZONE_BLK_SIZE ZONES_PER_BLK * sizeof(at_nvestr_t) + +int setLocalZones(newzones, size) + at_nvestr_t *newzones; + int size; +/* updates list of zones which are local to all active ports + missing zones are not deleted, only missing zones are added. +*/ +{ + int bytesread=0; /* #bytes read from tuple */ + int i=0, dupe; + gbuf_t *m; + at_nvestr_t *pnve, *pnew = newzones; + + if (!lzones) { + if(!(lzones = gbuf_alloc(ZONE_BLK_SIZE, PRI_MED))) + return(ENOBUFS); + gbuf_wset(lzones,0); + } + while (bytesread < size) { /* for each new zone */ + { + char str[35]; + strncpy(str,pnew->str,pnew->len); + str[pnew->len] = '\0'; + } + m = lzones; + pnve = (at_nvestr_t*)gbuf_rptr(m); + dupe = 0; + for (i=0; ilen != pnve->len) + continue; + if (pnew->len > NBP_NVE_STR_SIZE) { + return(0); + } + if (!strncmp(pnew->str, pnve->str, pnew->len)) { + dupe=1; + continue; + } + } + if (!dupe) { + /* add new zone */ + if (lzonecnt && !(lzonecnt%ZONES_PER_BLK)) { + if(!(gbuf_cont(m) = gbuf_alloc(ZONE_BLK_SIZE, PRI_MED))) + return(ENOBUFS); + gbuf_wset(gbuf_cont(m),0); + pnve = (at_nvestr_t*)gbuf_rptr(gbuf_cont(m)); + } + strncpy(pnve->str,pnew->str,pnew->len); + pnve->len = pnew->len; + lzonecnt++; + } + bytesread += (pnew->len+1); + pnew = (at_nvestr_t*) (((char *)pnew) + pnew->len + 1); + } + /* showLocalZones1(); */ + return(0); +} + +/********** +showLocalZones1() +{ + int i; + at_nvestr_t *pnve; + gbuf_t *m; + char str[35]; + + for (i=0; ; i++) { + if (!(pnve = getLocalZone(i))) { + break; + } + strncpy(str,pnve->str,pnve->len); + str[pnve->len] = '\0'; + } +} + +*********/ + +isZoneLocal(zone) +at_nvestr_t *zone; +{ + at_nvestr_t *pnve; + int i; + if (DEFAULT_ZONE(zone)) + return(1); + for (i=0; ; i++) { + if (!(pnve = getLocalZone(i))) + break; + if (!nbp_strcmp(pnve,zone,0)) + return(1); + } + return(0); +} + + +#define NULL_PNVESTR (at_nvestr_t *) 0 + +at_nvestr_t *getLocalZone(zno) + int zno; /* zone number in virtual list to + return, 0 for first zone */ +/* returns pointer to a new local zone number zno, + returns null when no zones left. +*/ +{ + zone_usage_t ifz; + ifz.zone_index = zno; + if (MULTIPORT_MODE) + return(getRTRLocalZone(&ifz)); + else + return(getSPLocalZone(zno)); +} + + +at_nvestr_t *getSPLocalZone(zno) + int zno; /* zone number in virtual list to + return, 0 for first zone */ +/* single port mode version */ +{ + int curz=0; /* current zone */ + gbuf_t *m; + at_nvestr_t *pnve; + + if (lzones) { + m = lzones; + pnve = (at_nvestr_t*)gbuf_rptr(m); + } + else + return(NULL_PNVESTR); + if ( zno>=lzonecnt ) + return(NULL_PNVESTR); + for (curz=0; curzlen > NBP_NVE_STR_SIZE) { + return(NULL_PNVESTR); + } + } + else + return(NULL_PNVESTR); + } + return(pnve); +} + +/* The following functions are used in name registration and removal */ + +int nbp_fillin_nve(entity, nve) + at_entity_t *entity; + nve_entry_t *nve; +{ + register int i; + + if (entity->object.len > NBP_NVE_STR_SIZE || + entity->type.len > NBP_NVE_STR_SIZE || + entity->zone.len > NBP_NVE_STR_SIZE) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_fillin_nve: bad str len\n")); + errno = EINVAL; + return (-1); + } + + nve->zone = entity->zone; + nve->zone_hash = 0; + if (!isZoneLocal(&entity->zone)) { + errno = EINVAL; + return(-1); + } + /* if there's no zone, '*' gets filled in when entry is created */ + if (!DEFAULT_ZONE(&entity->zone)) + nve->zone_hash = nbp_strhash(&nve->zone); + + nve->object = entity->object; + nve->object_hash = 0; + if (entity->object.len == 1 && + (entity->object.str[0] == NBP_ORD_WILDCARD || + entity->object.str[0] == NBP_SPL_WILDCARD)) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_fillin_nve: wildcard\n")); + errno = EINVAL; + return (-1); + } + for (i = 0; i < entity->object.len; i++) { + if (entity->object.str[i] == NBP_SPL_WILDCARD) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_fillin_nve: wildcard2\n")); + errno = EINVAL; + return (-1); + } + } + nve->object_hash = nbp_strhash(&nve->object); + + nve->type = entity->type; + nve->type_hash = 0; + if (entity->type.len == 1 && + (entity->type.str[0] == NBP_ORD_WILDCARD || + entity->type.str[0] == NBP_SPL_WILDCARD)) { + errno = EINVAL; + return (-1); + } + for (i = 0; i < entity->type.len; i++) { + if (entity->type.str[i] == NBP_SPL_WILDCARD) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_fillin_nve: wildcard3\n")); + errno = EINVAL; + return (-1); + } + } + nve->type_hash = nbp_strhash(&nve->type); + + return(0); +} /* nbp_fillin_nve */ + +nve_entry_t *nbp_find_nve(nve) + nve_entry_t *nve; +{ + register nve_entry_t *nve_entry; + + ATDISABLE(nve_lock_pri,NVE_LOCK); + TAILQ_FOREACH(nve_entry, &name_registry, nve_link) { + if (nve->zone_hash && + ((nve->zone_hash != nve_entry->zone_hash) && + (nve->zone_hash != hzonehash))) { + dPrintf(D_M_NBP_LOW,D_L_USR4, + ("nbp_find_nve: no match for zone, req hash:%x\n", + nve->zone_hash)); + continue; + } + + if ((nve->object_hash) && + (nve->object_hash != nve_entry->object_hash)) + continue; + + if ((nve->type_hash) && + (nve->type_hash != nve_entry->type_hash)) + continue; + + /* Found a match! */ + ATENABLE(nve_lock_pri,NVE_LOCK); + return (nve_entry); + } + ATENABLE(nve_lock_pri,NVE_LOCK); + + return (NULL); +} /* nbp_find_nve */ + +static int nbp_enum_gen (nve_entry) + register nve_entry_t *nve_entry; +{ + register int new_enum = 0; + register nve_entry_t *ne; + + ATDISABLE(nve_lock_pri,NVE_LOCK); +re_do: + TAILQ_FOREACH(ne, &name_registry, nve_link) { + if ((*(int *)&ne->address == *(int *)&nve_entry->address) && + (ne->enumerator == new_enum)) { + if (new_enum == 255) { + ATENABLE(nve_lock_pri,NVE_LOCK); + return(EADDRNOTAVAIL); + } else { + new_enum++; + goto re_do; + } + } + } + + ATENABLE(nve_lock_pri,NVE_LOCK); + nve_entry->enumerator = new_enum; + return (0); +} + +int nbp_new_nve_entry(nve_entry, ifID) + nve_entry_t *nve_entry; + at_ifaddr_t *ifID; +{ + gbuf_t *tag; + nve_entry_t *new_entry; + at_nvestr_t *zone; + int error; + + if (!(valid_at_addr((at_inet_t *)&nve_entry->address))) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_new_nve_entry: valid_at_addr\n")); + return(EINVAL); + } + if ((error = nbp_enum_gen(nve_entry))) + return(error); + + nve_entry->unique_nbp_id = ++nbp_id_count; + + /* Got an nve entry on hand.... allocate a buffer, copy the entry + * on to it and stick it in the registry. + */ + if ((tag = gbuf_alloc(sizeof(nve_entry_t), PRI_HI)) == NULL){ + return(ENOBUFS); + } + gbuf_wset(tag, sizeof(nve_entry_t)); + new_entry = (nve_entry_t *)gbuf_rptr(tag); + bcopy(nve_entry, new_entry, sizeof(nve_entry_t)); + + if (DEFAULT_ZONE(&nve_entry->zone)) { + /* put actual zone name in entry instead of "*" */ + /* if single port mode and no zone name, then a router + is down, so use pram zone name hint from elap cfg */ + if (!MULTIPORT_MODE && ifID_home->ifZoneName.str[0] == '*') { + zone = &ifID_home->startup_zone; + } else { + zone = &ifID_home->ifZoneName; + } + new_entry->zone = *zone; + if ( new_entry->zone.len == 0 ) { + new_entry->zone.str[0] = '*'; + new_entry->zone.len = 1; + } + new_entry->zone_hash = nbp_strhash(&new_entry->zone); + } + new_entry->tag = tag; + new_entry->pid = current_proc()->p_pid; + + ATDISABLE(nve_lock_pri,NVE_LOCK); + TAILQ_INSERT_TAIL(&name_registry, new_entry, nve_link); + ATENABLE(nve_lock_pri,NVE_LOCK); + at_state.flags |= AT_ST_NBP_CHANGED; + +#ifdef NBP_DEBUG + { + char zone[35],object[35],type[35]; + strncpy(zone,new_entry->zone.str, new_entry->zone.len); + strncpy(object,new_entry->object.str, new_entry->object.len); + strncpy(type,new_entry->type.str, new_entry->type.len); + object[new_entry->object.len] = '\0'; + zone[new_entry->zone.len] = '\0'; + type[new_entry->type.len] = '\0'; + dPrintf(D_M_NBP_LOW, D_L_USR4, + ("nbp_insert: adding %s:%s@%s addr:%d.%d ", + object, type, zone, + new_entry->address.net, new_entry->address.node)); + } +#endif /* NBP_DEBUG */ + + nbp_add_multicast(&new_entry->zone, ifID); + return (0); +} /* nbp_new_nve_entry */ + +void nbp_delete_entry (nve_entry) + nve_entry_t *nve_entry; +{ + TAILQ_REMOVE(&name_registry, nve_entry, nve_link); + gbuf_freem(nve_entry->tag); + at_state.flags |= AT_ST_NBP_CHANGED; +} + +/* Registration of an NBP entity in multihoming mode, from AIOCNBPREG + in at.c */ +int nbp_mh_reg(nbpP) + at_nbp_reg_t *nbpP; +{ + nve_entry_t nve; + at_ifaddr_t *ifID = 0; + int registered = 0; + int finished = FALSE; + + if (nbp_fillin_nve(&nbpP->name, &nve) != 0) { + /* bad tuple... */ + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_mh_reg: bad tuple\n")); + return(EINVAL); + } + nve.address = nbpP->addr; + nve.ddptype = nbpP->ddptype; + + if (DEFAULT_ZONE(&nbpP->name.zone)) { + /* multihoming mode with the default zone specified */ + + /* now find the matching interfaces */ + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (nbpP->addr.net || nbpP->addr.node) { + /* if address is specified */ + if ((nbpP->addr.net != ifID->ifThisNode.s_net || + nbpP->addr.node != ifID->ifThisNode.s_node)) + continue; + else + /* the address was specified, and + we found the matching interface */ + finished = TRUE; + } else { + /* address is not specified, so fill in + the address for the interface */ + nve.address.net = ifID->ifThisNode.s_net; + nve.address.node = ifID->ifThisNode.s_node; + } + nve.zone = ifID->ifZoneName; + nve.zone_hash = nbp_strhash(&nve.zone); + if (nbp_find_nve(&nve)) + continue; + if (nbp_new_nve_entry(&nve, ifID) == 0) + registered++; + } + if (registered && !nbpP->addr.net && !nbpP->addr.node) { + nbpP->addr.net = ifID_home->ifThisNode.s_net; + nbpP->addr.node = ifID_home->ifThisNode.s_node; + } + } else { + /* multihoming mode with a specific zone specified */ + /* see which segments (interfaces) are seeded for this zone */ + int zno; + char ifs_in_zone[IF_TOTAL_MAX]; + if (!(zno = zt_find_zname(&nve.zone))) { + dPrintf(D_M_NBP_LOW, D_L_WARNING, + ("nbp_mh_reg: didn't find zone name\n")); + return(EINVAL); + } + getIfUsage(zno-1, ifs_in_zone); + + /* now find the first matching interface */ + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (!ifs_in_zone[ifID->ifPort]) + /* zone doesn't match */ + continue; + else + /* the zone matches, so unless the + address is specified and doesn't + match, we only need to do this once */ + finished = TRUE; + + if (nbpP->addr.net || nbpP->addr.node) { + /* address is specified */ + finished = FALSE; + if ((nbpP->addr.net != ifID->ifThisNode.s_net || + nbpP->addr.node != ifID->ifThisNode.s_node)) + continue; + else + /* the address was specified, and + we found the matching interface */ + finished = TRUE; + } else { + /* address is not specified, so fill in + the address for the interface */ + nve.address.net = ifID->ifThisNode.s_net; + nve.address.node = ifID->ifThisNode.s_node; + } + if (nbp_find_nve(&nve)) + continue; + if (nbp_new_nve_entry(&nve, ifID) == 0) + registered++; + } + if (registered && !nbpP->addr.net && !nbpP->addr.node) { + nbpP->addr.net = ifID->ifThisNode.s_net; + nbpP->addr.node = ifID->ifThisNode.s_node; + } + } + nbpP->unique_nbp_id = (registered > 1)? 0: nve.unique_nbp_id; + + if (registered) + return(0); + else + return(EADDRNOTAVAIL); + +} /* nbp_mh_reg */ diff --git a/bsd/netat/ddp_proto.c b/bsd/netat/ddp_proto.c new file mode 100644 index 000000000..e9073ff41 --- /dev/null +++ b/bsd/netat/ddp_proto.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989, 1997, 1998 Apple Computer, Inc. + * + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +/* ddp_proto.c: 2.0, 1.23; 10/18/93; Apple Computer, Inc. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +extern at_ifaddr_t *ifID_home; + +void ddp_putmsg(gref, mp) + gref_t *gref; + gbuf_t *mp; +{ + u_char socket; + register ioc_t *iocbp; + register int error; + at_ddp_t *ddp; + + switch(gbuf_type(mp)) { + case MSG_DATA : + /* If this message is going out on a socket that's not bound, + * nail it. + */ + ddp = (at_ddp_t *)gbuf_rptr(mp); + if ((ddp->type == DDP_ATP) || (ddp->type == DDP_ADSP)) { + if ((gref == 0) || (gref->lport == 0)) { + int src_addr_included = + ((ddp->type==DDP_ATP) && ddp->src_node)? 1 : 0; + (void)ddp_output(&mp, ddp->src_socket, + src_addr_included); + return; + } + } + + if (gref && (gref->lport == 0)) { + gbuf_freel(mp); + atalk_notify(gref, ENOTCONN); + return; + } + if ((error = ddp_output(&mp, gref->lport, 0)) != 0) { + if (gref) + atalk_notify(gref, error); + } + return; + + case MSG_IOCTL : + iocbp = (ioc_t *)gbuf_rptr(mp); + if (DDP_IOC_MYIOCTL(iocbp->ioc_cmd)) { + switch(iocbp->ioc_cmd) { + case DDP_IOC_GET_CFG : + /* Note that DDP_IOC_GET_CFG / AppleTalk ddp_config() + fills in the net and node of the ddp_addr_t param + with the net and node of the default interface, + not the net and node that has been bound, as + getsockname() and sockopt DDP_GETSOCKNAME do. + */ +#ifdef APPLETALK_DEBUG + kprintf("ddp_putmsg: DDP_IOC_GET_CFG\n"); +#endif + if (gbuf_cont(mp)) + gbuf_freem(gbuf_cont(mp)); + if ((gbuf_cont(mp) = + gbuf_alloc(sizeof(at_inet_t), + PRI_MED)) == NULL) { + ioc_ack(ENOBUFS, mp, gref); + break; + } + { + /* *** was ddp_get_cfg() *** */ + ddp_addr_t *cfgp = + (ddp_addr_t *)gbuf_rptr(gbuf_cont(mp)); + cfgp->inet.net = ifID_home->ifThisNode.s_net; + cfgp->inet.node = ifID_home->ifThisNode.s_node; +#ifdef NOT_YET + cfgp->inet.net = gref->laddr.s_net; + cfgp->inet.node = gref->laddr.s_node; +#endif + cfgp->inet.socket = gref->lport; + cfgp->ddptype = gref->ddptype; + } + gbuf_wset(gbuf_cont(mp), sizeof(ddp_addr_t)); + iocbp->ioc_count = sizeof(ddp_addr_t); + ioc_ack(0, mp, gref); + break; + default: + ioc_ack(EINVAL, mp, gref); + break; + } + } else { + /* Unknown ioctl */ + ioc_ack(EINVAL, mp, gref); + } + break; + default : +#ifdef APPLETALK_DEBUG + kprintf("unexpected message type in ddp_putmsg: %d/n", + gbuf_type(mp)); +#endif + gbuf_freem(mp); + break; + } + return; +} /* ddp_putmsg */ + +gbuf_t *ddp_compress_msg(mp) +register gbuf_t *mp; +{ + register gbuf_t *tmp; + + while (gbuf_len(mp) == 0) { + tmp = mp; + mp = gbuf_cont(mp); + gbuf_freeb(tmp); + + if (mp == NULL) + break; + } + return (mp); +} diff --git a/bsd/netat/ddp_r_rtmp.c b/bsd/netat/ddp_r_rtmp.c new file mode 100644 index 000000000..4c74b7fae --- /dev/null +++ b/bsd/netat/ddp_r_rtmp.c @@ -0,0 +1,1629 @@ +/* + * Copyright (c) 1994, 1996-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*-------------------------------------------------------------------------- + * Router RTMP protocol functions: + * + * This file contains Routing specifics to handle RTMP packets and + * the maintenance of the routing table through.... + * + * The entry point for the rtmp input in ddp is valid only when we're + * running in router mode. + * + * + * 0.01 03/22/94 Laurent Dumont Creation + * Modified for MP, 1996 by Tuyen Nguyen + * Added AURP support, April 8, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + *------------------------------------------------------------------------- + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void (*ddp_AURPsendx)(); +extern at_ifaddr_t *aurp_ifID; +extern at_ifaddr_t *ifID_table[]; +extern at_ifaddr_t *ifID_home; + +/*DEBUG ONLY */ +static int dump_counter =0; +/*DEBUG ONLY */ + +static at_kern_err_t ke; + /* Used to record error discovered in rtmp_update() */ +gbuf_t *rtmp_prep_new_packet(); + +void rtmp_timeout(); +void rtmp_send_port(); +void rtmp_send_port_funnel(); +void rtmp_dropper(); +void rtmp_shutdown(); +static void rtmp_update(); +static void rtmp_request(); +extern int elap_online3(); + +extern pktsIn, pktsOut, pktsDropped, pktsHome; +extern short ErrorRTMPoverflow, ErrorZIPoverflow; +extern atlock_t ddpinp_lock; + +/* + * rtmp_router_input: function called by DDP (in router mode) to handle + * all incoming RTMP packets. Listen to the RTMP socket + * for all the connected ports. + * Switch to the relevant rtmp functions. + */ + +void rtmp_router_input(mp, ifID) + register gbuf_t *mp; + register at_ifaddr_t *ifID; +{ + register at_ddp_t *ddp = (at_ddp_t *)gbuf_rptr(mp); + /* NOTE: there is an assumption here that the + * DATA follows the header. */ + + register at_net_al OurNet; + register at_node OurNode; + register at_net_al DstNet; + register at_node DstNode; + short tuples; + RT_entry *Entry; + + if (!ifID || (ifID->ifRoutingState < PORT_ACTIVATING)) { + gbuf_freem(mp); + return; + } + + + OurNet = ifID->ifThisNode.s_net; + OurNode = ifID->ifThisNode.s_node; + + + if (gbuf_type(mp) != MSG_DATA) { + + /* If this is a M_ERROR message, DDP is shutting down, + * nothing to do here...If it's something else, we don't + * understand what it is + */ + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_router_input: Not an M_DATA type\n")); + gbuf_freem(mp); + return; + } + + DstNet = NET_VALUE(ddp->dst_net); + DstNode = ddp->dst_node; + + /* check the kind of RTMP packet we received */ + + switch (ddp->type) { + + case DDP_RTMP: + + tuples = gbuf_len(mp) - DDP_X_HDR_SIZE - RTMP_IDLENGTH; + /* + * we need to make sure that the size of 'tuples' is + * not less than or equal to 0 due to a bad packet + */ + if (tuples <= 0) { + gbuf_freem(mp); + break; + } + + if (tuples % 3) {/* not a valid RTMP data packet */ + gbuf_freem(mp); + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_input: bad number of tuple in RTMP packet\n")); + return; + } + + tuples = tuples / 3; + + rtmp_update(ifID, (at_rtmp *)ddp->data, tuples); + gbuf_freem(mp); + + break; + + case DDP_RTMP_REQ: + + /* we should treat requests a bit differently. + * - if the request if not for the port, route it and also respond + * for this port if not locally connected. + * - if the request for this port, then just respond to it. + */ + + if (!ROUTING_MODE) { + gbuf_freem(mp); + return; + } + if (DstNode == 255) { + if (((DstNet >= CableStart) && (DstNet <= CableStop)) || + DstNet == 0) { + rtmp_request(ifID, ddp); + gbuf_freem(mp); + return; + } + else { + /* check if directly connected port */ + if ((Entry = rt_blookup(DstNet)) && + (Entry->NetDist == 0)) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_router_input: request for %d.%d, port %d\n", + DstNet, DstNode, Entry->NetPort)); + rtmp_request(ifID_table[Entry->NetPort], ddp); + gbuf_freem(mp); + return; + } + else { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_router_input: RTMP packet received for %d.%d, also forward\n", + NET_VALUE(ddp->dst_net),ddp->dst_node)); + routing_needed(mp, ifID, TRUE); + return; + } + } + } + else { + + if ((DstNode == OurNode) && (DstNet == OurNet)) { + rtmp_request(ifID, ddp); + gbuf_freem(mp); + return; + } + else { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_router_input: RTMP packet received for %d.%d, forward\n", + NET_VALUE(ddp->dst_net), ddp->dst_node)); + routing_needed(mp, ifID, TRUE); + } + } + + break; + + default: + + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_input: RTMP packet type=%d, route it\n", ddp->type)); + routing_needed(mp, ifID, TRUE); + break; + + } +} /* rtmp_router_input */ + +/* + * rtmp_update: + * + */ + +static void rtmp_update(ifID, rtmp, tuple_nb) + register at_ifaddr_t *ifID; + register at_rtmp *rtmp; + register short tuple_nb; +{ + register int PortFlags = ifID->ifFlags; + register at_rtmp_tuple *FirstTuple = (at_rtmp_tuple *)&rtmp->at_rtmp_id[1]; + register at_rtmp_tuple *SecondTuple = (at_rtmp_tuple *)&rtmp->at_rtmp_id[4]; + RT_entry NewRoute, *CurrentRoute; + + register u_char SenderNodeID = rtmp->at_rtmp_id[0]; + char *TuplePtr; + short state; + + + /* Make sure this an AppleTalk node sending us the RTMP packet */ + + if (rtmp->at_rtmp_id_length != 8) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update : RTMP ID not as expected Net=%d L=x%x\n", + NET_VALUE(rtmp->at_rtmp_this_net), rtmp->at_rtmp_id_length)); + return; + } + + /* + * If the port is activating, only take the Network range from the + * the RTMP packet received. + * Check if there is a conflict with our seed infos. + */ + + if (ifID->ifRoutingState == PORT_ACTIVATING) { + if (PortFlags & RTR_XNET_PORT) { + if ((PortFlags & RTR_SEED_PORT) && + ((CableStart != TUPLENET(FirstTuple)) || + (CableStop != TUPLENET(SecondTuple)))) { + ifID->ifRoutingState = PORT_ERR_SEED; + ke.error = KE_CONF_SEED_RNG; + ke.port1 = ifID->ifPort; + strncpy(ke.name1, ifID->ifName, sizeof(ke.name1)); + ke.net = NET_VALUE(rtmp->at_rtmp_this_net); + ke.node = SenderNodeID; + ke.netr1b = TUPLENET(FirstTuple); + ke.netr1e = TUPLENET(SecondTuple); + ke.netr2b = CableStart; + ke.netr2e = CableStop; + RouterError(ifID->ifPort, ERTR_SEED_CONFLICT); + return; + } + CableStart = TUPLENET(FirstTuple); + CableStop = TUPLENET(SecondTuple); +/* + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_update: Port #%d activating, set Cable %d-%d\n", + ifID->ifPort, CableStart, CableStop)); +*/ + } + else { /* non extended cable */ + if ((PortFlags & RTR_SEED_PORT) && + (ifID->ifThisCableEnd != NET_VALUE(rtmp->at_rtmp_this_net))) { + ke.error = KE_CONF_SEED1; + ke.port1 = ifID->ifPort; + strncpy(ke.name1, ifID->ifName,sizeof(ke.name1)); + ke.net = NET_VALUE(rtmp->at_rtmp_this_net); + ke.node = SenderNodeID; + ke.netr1e = ifID->ifThisCableEnd; + ifID->ifRoutingState = PORT_ERR_SEED; + RouterError(ifID->ifPort, ERTR_SEED_CONFLICT); + return; + } + CableStop = NET_VALUE(rtmp->at_rtmp_this_net); + CableStart = 0; + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_update: Port #%d NONX activating, set Cable %d-%d\n", + ifID->ifPort, CableStart, CableStop)); + } + } + + /* + * Perform a few sanity checks on the received RTMP data packet + */ + + if ((PortFlags & RTR_XNET_PORT) && (tuple_nb >= 2)) { + + /* The first tuple must be extended */ + + if (! TUPLERANGE(FirstTuple)) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: bad range value in 1st tuple =%d\n", + TUPLERANGE(FirstTuple))); + return; + } + + if (PortFlags & RTR_SEED_PORT) + if ((TUPLENET(FirstTuple) != CableStart) || + (TUPLENET(SecondTuple) != CableStop)) { + dPrintf(D_M_RTMP, D_L_WARNING, ("rtmp_update: conflict on Seed Port\n")); + ifID->ifRoutingState = PORT_ERR_CABLER; + ke.error = KE_CONF_SEED_NODE; + ke.port1 = ifID->ifPort; + strncpy(ke.name1, ifID->ifName,sizeof(ke.name1)); + ke.net = NET_VALUE(rtmp->at_rtmp_this_net); + ke.node = SenderNodeID; + ke.netr1b = TUPLENET(FirstTuple); + ke.netr1e = TUPLENET(SecondTuple); + ke.netr2b = CableStart; + ke.netr2e = CableStop; + RouterError(ifID->ifPort, ERTR_CABLE_CONFLICT); + return; + } + + /* check that the tuple matches the range */ + + if ((TUPLENET(SecondTuple) < TUPLENET(FirstTuple)) || + (TUPLENET(FirstTuple) == 0) || + (TUPLENET(FirstTuple) >= DDP_STARTUP_LOW) || + (TUPLENET(SecondTuple) == 0) || + (TUPLENET(SecondTuple) >= DDP_STARTUP_LOW)) { + + /* + * IS THIS NON-FATAL????? + */ + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: STARTUP RANGE!!! 1st %d-%d\n", + TUPLENET(FirstTuple), TUPLENET(SecondTuple))); + ifID->ifRoutingState = PORT_ERR_STARTUP; + ke.error = KE_SEED_STARTUP; + ke.port1 = ifID->ifPort; + strncpy(ke.name1, ifID->ifName,sizeof(ke.name1)); + ke.net = NET_VALUE(rtmp->at_rtmp_this_net); + ke.node = SenderNodeID; + RouterError(ifID->ifPort, ERTR_CABLE_STARTUP); + return; + } + + if (TUPLEDIST(FirstTuple) != 0) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: Invalid distance in 1st tuple\n")); + return; + } + + if (rtmp->at_rtmp_id[6] != RTMP_VERSION_NUMBER) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: Invalid RTMP version = x%x\n", + rtmp->at_rtmp_id[6])); + return; + } + + } + else { /* non extended interface or problem in tuple*/ + + if (PortFlags & RTR_XNET_PORT) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: invalid number of tuple for X-net\n")); + return; + } + + if (TUPLENET(FirstTuple) == 0) { /* non extended RTMP data */ + + if (rtmp->at_rtmp_id[3] > RTMP_VERSION_NUMBER) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: Invalid non extended RTMP version\n")); + return; + } + + } + else { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: version 1.0 non Xtended net not supported\n")); + ifID->ifRoutingState = PORT_ERR_BADRTMP; + ke.error = KE_BAD_VER; + ke.rtmp_id = rtmp->at_rtmp_id[6]; + ke.net = NET_VALUE(rtmp->at_rtmp_this_net); + ke.node = SenderNodeID; + RouterError(ifID->ifPort, ERTR_RTMP_BAD_VERSION); + return; + } + } + + NewRoute.NextIRNet = NET_VALUE(rtmp->at_rtmp_this_net); + NewRoute.NextIRNode = SenderNodeID; + NewRoute.NetPort = ifID->ifPort; + + /* + * Process the case where a non-seed port needs to acquire the right + * information. + */ + + if (!(PortFlags & RTR_SEED_PORT) && (ifID->ifRoutingState == PORT_ACTIVATING)) { + dPrintf(D_M_RTMP_LOW, D_L_INFO, + ("rtmp_update: Port# %d, set non seed cable %d-%d\n", + ifID->ifPort, TUPLENET(FirstTuple), TUPLENET(SecondTuple))); + + if (PortFlags & RTR_XNET_PORT) { + NewRoute.NetStart = TUPLENET(FirstTuple); + NewRoute.NetStop = TUPLENET(SecondTuple); + ifID->ifThisCableStart = TUPLENET(FirstTuple); + ifID->ifThisCableEnd = TUPLENET(SecondTuple); + + } + else { + + NewRoute.NetStart = 0; + NewRoute.NetStop = NET_VALUE(rtmp->at_rtmp_this_net); + ifID->ifThisCableStart = NET_VALUE(rtmp->at_rtmp_this_net); + ifID->ifThisCableEnd = NET_VALUE(rtmp->at_rtmp_this_net); + } + /* + * Now, check if we already know this route, or we need to add it + * (or modify it in the table accordingly) + */ + + if ((CurrentRoute = rt_blookup(NewRoute.NetStop)) && + (CurrentRoute->NetStop == NewRoute.NetStop) && + (CurrentRoute->NetStart == NewRoute.NetStart)) { +/*LD 7/31/95 tempo########*/ + if (NewRoute.NetPort != CurrentRoute->NetPort) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: port# %d, not the port we waited for %d\n", + ifID->ifPort, CurrentRoute->NetPort)); + /* propose to age the entry we know... */ + + state = CurrentRoute->EntryState & 0x0F; + /* if entry has been updated recently, just clear the UPDATED + bit. if bit not set, then we can age the entry */ + if (state) + if (CurrentRoute->EntryState & RTE_STATE_UPDATED) { + CurrentRoute->EntryState &= ~RTE_STATE_UPDATED; + } + else { + state = state >> 1 ; /* decrement state */ + } + + CurrentRoute->EntryState = (CurrentRoute->EntryState & 0xF0) | state; + } + } + + else { /* add the new route */ + + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_update: P# %d, 1st tuple route not known, add %d-%d\n", + ifID->ifPort, NewRoute.NetStart, NewRoute.NetStop)); + + NewRoute.EntryState = RTE_STATE_GOOD|RTE_STATE_UPDATED; + NewRoute.NetDist = 0; + + if (rt_insert(NewRoute.NetStop, NewRoute.NetStart, 0, + 0, NewRoute.NetDist, NewRoute.NetPort, + NewRoute.EntryState) == (RT_entry *)NULL) + + ErrorRTMPoverflow = 1; + } + + } + + if (ifID->ifRoutingState == PORT_ACTIVATING) { + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_update: port activating, ignoring remaining tuples\n")); + return; + } + + /* + * Process all the tuples against our routing table + */ + + TuplePtr = (char *)FirstTuple; + + while (tuple_nb-- > 0) { + + if (TUPLEDIST(TuplePtr) == NOTIFY_N_DIST) { + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_update: Port# %d, Tuple with Notify Neighbour\n", + ifID->ifPort)); + NewRoute.NetDist = NOTIFY_N_DIST; + NewRoute.EntryState = RTE_STATE_BAD; + } + else { + NewRoute.NetDist = TUPLEDIST(TuplePtr) + 1; + NewRoute.EntryState = RTE_STATE_GOOD; + NewRoute.EntryState = RTE_STATE_GOOD|RTE_STATE_UPDATED; + } + + + if (TUPLERANGE(TuplePtr)) { /* Extended Tuple */ + + + NewRoute.NetStart = TUPLENET(TuplePtr); + TuplePtr += 3; + NewRoute.NetStop = TUPLENET((TuplePtr)); + TuplePtr += 3; + tuple_nb--; + + if ((NewRoute.NetDist == 0) || + (NewRoute.NetStart == 0) || + (NewRoute.NetStop == 0) || + (NewRoute.NetStop < NewRoute.NetStart) || + (NewRoute.NetStart >= DDP_STARTUP_LOW) || + (NewRoute.NetStop >= DDP_STARTUP_LOW)) { + + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: P# %d, non valid xtuple received [%d-%d]\n", + ifID->ifPort, NewRoute.NetStart, NewRoute.NetStop)); + + continue; + } + + } + else { /* Non Extended Tuple */ + + NewRoute.NetStart = 0; + NewRoute.NetStop = TUPLENET(TuplePtr); + + TuplePtr += 3; + + if ((NewRoute.NetDist == 0) || + (NewRoute.NetStop == 0) || + (NewRoute.NetStop >= DDP_STARTUP_LOW)) { + + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: P# %d, non valid tuple received [%d]\n", + ifID->ifPort, NewRoute.NetStop)); + + continue; + } + } + + if ((CurrentRoute = rt_blookup(NewRoute.NetStop))) { + /* found something... */ + + if (NewRoute.NetDist < 16 || + NewRoute.NetDist == NOTIFY_N_DIST ) { + + /* + * Check if the definition of the route changed + */ + + if (NewRoute.NetStop != CurrentRoute->NetStop || + NewRoute.NetStart != CurrentRoute->NetStart) { + + if (NewRoute.NetStop == CurrentRoute->NetStop && + NewRoute.NetStop == CurrentRoute->NetStart && + NewRoute.NetStart == 0) + + NewRoute.NetStart = NewRoute.NetStop; + + else if (NewRoute.NetStop == CurrentRoute->NetStop && + NewRoute.NetStart == NewRoute.NetStop && + CurrentRoute->NetStart == 0) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: Range %d-%d has changed to %d-%d Dist=%d\n", + CurrentRoute->NetStart, CurrentRoute->NetStop, + NewRoute.NetStart, NewRoute.NetStop, NewRoute.NetDist)); + NewRoute.NetStart = 0; + } + + else { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_update: Net Conflict Cur=%d, New=%d\n", + CurrentRoute->NetStop, NewRoute.NetStop)); + CurrentRoute->EntryState = + (CurrentRoute->EntryState & 0xF0) | RTE_STATE_BAD; + continue; + + } + } + + /* + * If we don't know the associated zones + */ + + if (!RT_ALL_ZONES_KNOWN(CurrentRoute)) { + + dPrintf(D_M_RTMP_LOW, D_L_INFO, + ("rtmp_update: Zone unknown for %d-%d state=0x%x\n", + CurrentRoute->NetStart, CurrentRoute->NetStop, + CurrentRoute->EntryState)); + + /* set the flag in the ifID structure telling + * that a scheduling of Zip Query is needed. + */ + + ifID->ifZipNeedQueries = 1; + continue; + } + + if (((CurrentRoute->EntryState & 0x0F) <= RTE_STATE_SUSPECT) && + NewRoute.NetDist != NOTIFY_N_DIST) { + + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_update: update suspect entry %d-%d State=%d\n", + NewRoute.NetStart, NewRoute.NetStop, + (CurrentRoute->EntryState & 0x0F))); + + if (NewRoute.NetDist <= CurrentRoute->NetDist) { + CurrentRoute->NetDist = NewRoute.NetDist; + CurrentRoute->NetPort = NewRoute.NetPort; + CurrentRoute->NextIRNode = NewRoute.NextIRNode; + CurrentRoute->NextIRNet = NewRoute.NextIRNet; + CurrentRoute->EntryState = + (CurrentRoute->EntryState & 0xF0) | + (RTE_STATE_GOOD|RTE_STATE_UPDATED); + } + continue; + } + else { + + if (NewRoute.NetDist == NOTIFY_N_DIST) { + + CurrentRoute->EntryState = + (CurrentRoute->EntryState & 0xF0) | RTE_STATE_SUSPECT; + CurrentRoute->NetDist = NOTIFY_N_DIST; + continue; + } + } + + } + + + if ((NewRoute.NetDist <= CurrentRoute->NetDist) && (NewRoute.NetDist <16)) { + + /* Found a shorter or more recent Route, + * Replace with the New entryi + */ + + CurrentRoute->NetDist = NewRoute.NetDist; + CurrentRoute->NetPort = NewRoute.NetPort; + CurrentRoute->NextIRNode = NewRoute.NextIRNode; + CurrentRoute->NextIRNet = NewRoute.NextIRNet; + CurrentRoute->EntryState |= RTE_STATE_UPDATED; + + /* Can we consider now that the entry is updated? */ + dPrintf(D_M_RTMP_LOW, D_L_INFO, + ("rtmp_update: Shorter route found %d-%d, update\n", + NewRoute.NetStart, NewRoute.NetStop)); + + if (ddp_AURPsendx && (aurp_ifID->ifFlags & AT_IFF_AURP)) + ddp_AURPsendx(AURPCODE_RTUPDATE, + (void *)&NewRoute, AURPEV_NetDistChange); + } + } + else { /* no entry found */ + + if (NewRoute.NetDist < 16 && NewRoute.NetDist != NOTIFY_N_DIST && + NewRoute.NextIRNet >= ifID->ifThisCableStart && + NewRoute.NextIRNet <= ifID->ifThisCableEnd) { + + NewRoute.EntryState = (RTE_STATE_GOOD|RTE_STATE_UPDATED); + + dPrintf(D_M_RTMP_LOW, D_L_INFO, + ("rtmp_update: NewRoute %d-%d Tuple #%d\n", + NewRoute.NetStart, NewRoute.NetStop, tuple_nb)); + + ifID->ifZipNeedQueries = 1; + + if (rt_insert(NewRoute.NetStop, NewRoute.NetStart, NewRoute.NextIRNet, + NewRoute.NextIRNode, NewRoute.NetDist, NewRoute.NetPort, + NewRoute.EntryState) == (RT_entry *)NULL) + ErrorRTMPoverflow = 1; + + else if (ddp_AURPsendx && (aurp_ifID->ifFlags & AT_IFF_AURP)) + ddp_AURPsendx(AURPCODE_RTUPDATE, + (void *)&NewRoute, AURPEV_NetAdded); + } + } + + } /* end of main while */ + ifID->ifRouterState = ROUTER_UPDATED; + if (ifID->ifZipNeedQueries) + zip_send_queries(ifID, 0, 0xFF); + +/* + timeout(rtmp_timeout, (caddr_t) ifID, 20*SYS_HZ); +*/ +} /* rtmp_update */ + +/* The RTMP validity timer expired, we need to update the + * state of each routing entry in the table + * because there is only one validity timer and it is always running, + * we can't just age all the entries automatically, as we might be + * aging entries that were just updated. So, when an entry is updated, + * the RTE_STATE_UPDATED bit is set and when the aging routine is called + * it just resets this bit if it is set, only if it is not set will the + * route actually be aged. + * Note there are 4 states for an entry, the state is decremented until + * it reaches the bad state. At this point, the entry is removed + * + * RTE_STATE_GOOD : The entry was valid (will be SUSPECT) + * RTE_STATE_SUSPECT: The entry was suspect (can still be used for routing) + * RTE_STATE_BAD : The entry was bad and is now deleted + * RTE_STATE_UNUSED : Unused or removed entry in the table + */ + +void rtmp_timeout(ifID) +register at_ifaddr_t *ifID; +{ + register u_char state; + register unsigned int s; + short i; + RT_entry *en = &RT_table[0]; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + if (ifID->ifRoutingState < PORT_ONLINE) { + (void) thread_funnel_set(network_flock, FALSE); + return; + } + + /* for multihoming mode, we use ifRouterState to tell if there + is a router out there, so we know when to use cable multicast */ + if (ifID->ifRouterState > NO_ROUTER) + ifID->ifRouterState--; + + ATDISABLE(s, ddpinp_lock); + for (i = 0 ; i < RT_maxentry; i++,en++) { + + /* we want to age "learned" nets, not directly connected ones */ + state = en->EntryState & 0x0F; + + + if (state > RTE_STATE_UNUSED && + !(en->EntryState & RTE_STATE_PERMANENT) && en->NetStop && + en->NetDist && en->NetPort == ifID->ifPort) { + + /* if entry has been updated recently, just clear the UPDATED + bit. if bit not set, then we can age the entry */ + if (en->EntryState & RTE_STATE_UPDATED) { + en->EntryState &= ~RTE_STATE_UPDATED; + continue; + } + else + state = state >> 1 ; /* decrement state */ + + if (state == RTE_STATE_UNUSED) {/* was BAD, needs to delete */ + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_timeout: Bad State for %d-%d (e#%d): remove\n", + en->NetStart, en->NetStop, i)); + + if (ddp_AURPsendx && (aurp_ifID->ifFlags & AT_IFF_AURP)) + ddp_AURPsendx(AURPCODE_RTUPDATE, + (void *)en, AURPEV_NetDeleted); + + /* then clear the bit in the table concerning this entry. + If the zone Count reaches zero, remove the entry */ + + zt_remove_zones(en->ZoneBitMap); + + RT_DELETE(en->NetStop, en->NetStart); + } + else { + en->EntryState = (en->EntryState & 0xF0) | state; + dPrintf(D_M_RTMP, D_L_INFO, ("Change State for %d-%d to %d (e#%d)\n", + en->NetStart, en->NetStop, state, i)); + } + } + } + ATENABLE(s, ddpinp_lock); + timeout(rtmp_timeout, (caddr_t) ifID, 20*SYS_HZ); + + (void) thread_funnel_set(network_flock, FALSE); +} + +/* + * rtmp_prep_new_packet: allocate a ddp packet for RTMP use (reply to a RTMP request or + * Route Data Request, or generation of RTMP data packets. + * The ddp header is filled with relevant information, as well as + * the beginning of the rtmp packet with the following info: + * Router's net number (2bytes) + * ID Length = 8 (1byte) + * Router's node ID (1byte) + * Extended Range Start (2bytes) + * Range + dist (0x80) (1byte) + * Extended Range End (2bytes) + * Rtmp version (0x82) (1byte) + * + */ + +gbuf_t *rtmp_prep_new_packet (ifID, DstNet, DstNode, socket) +register at_ifaddr_t *ifID; +register at_net DstNet; +register u_char DstNode; +register char socket; + +{ + gbuf_t *m; + register at_ddp_t *ddp; + register char * rtmp_data; + + if ((m = gbuf_alloc(AT_WR_OFFSET+1024, PRI_HI)) == NULL) { + dPrintf(D_M_RTMP, D_L_WARNING, ("rtmp_new_packet: Can't allocate mblock\n")); + return ((gbuf_t *)NULL); + } + + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,DDP_X_HDR_SIZE + 10); + ddp = (at_ddp_t *)(gbuf_rptr(m)); + + /* + * Prepare the DDP header of the new packet + */ + + + ddp->unused = ddp->hopcount = 0; + + UAS_ASSIGN(ddp->checksum, 0); + + NET_NET(ddp->dst_net, DstNet); + ddp->dst_node = DstNode; + ddp->dst_socket = socket; + + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + ddp->src_node = ifID->ifThisNode.s_node; + ddp->src_socket = RTMP_SOCKET; + ddp->type = DDP_RTMP; + + /* + * Prepare the RTMP header (Router Net, ID, Node and Net Tuple + * (this works only if we are on an extended net) + */ + + rtmp_data = ddp->data; + + *rtmp_data++ = (ifID->ifThisNode.s_net & 0xff00) >> 8; + *rtmp_data++ = ifID->ifThisNode.s_net & 0x00ff ; + *rtmp_data++ = 8; + *rtmp_data++ = (u_char)ifID->ifThisNode.s_node; + *rtmp_data++ = (CableStart & 0xff00) >> 8; + *rtmp_data++ = CableStart & 0x00ff ; + *rtmp_data++ = 0x80; /* first tuple, so distance is always zero */ + *rtmp_data++ = (CableStop & 0xff00) >> 8; + *rtmp_data++ = CableStop & 0x00ff ; + *rtmp_data++ = RTMP_VERSION_NUMBER; + + return (m); + + +} +int rtmp_r_find_bridge(ifID, orig_ddp) +register at_ifaddr_t *ifID; +register at_ddp_t *orig_ddp; + +{ + gbuf_t *m; + register int size, status; + register at_ddp_t *ddp; + register char * rtmp_data; + RT_entry *Entry; + + + /* find the bridge for the querried net */ + + Entry = rt_blookup(NET_VALUE(orig_ddp->dst_net)); + + if (Entry == NULL) { + dPrintf(D_M_RTMP, D_L_WARNING, ("rtmp_r_find_bridge: no info for net %d\n", + NET_VALUE(orig_ddp->dst_net))); + return (1); + } + + + size = DDP_X_HDR_SIZE + 10 ; + if ((m = gbuf_alloc(AT_WR_OFFSET+size, PRI_HI)) == NULL) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_r_find_bridge: Can't allocate mblock\n")); + return (ENOBUFS); + } + + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,size); + ddp = (at_ddp_t *)(gbuf_rptr(m)); + + /* + * Prepare the DDP header of the new packet + */ + + ddp->unused = ddp->hopcount = 0; + + DDPLEN_ASSIGN(ddp, size); + UAS_ASSIGN(ddp->checksum, 0); + + NET_NET(ddp->dst_net, orig_ddp->src_net); + ddp->dst_node = orig_ddp->src_node; + ddp->dst_socket = orig_ddp->src_socket; + + NET_ASSIGN(ddp->src_net, Entry->NextIRNet); + ddp->src_node = Entry->NextIRNode; + ddp->src_socket = RTMP_SOCKET; + ddp->type = DDP_RTMP; + + /* + * Prepare the RTMP header (Router Net, ID, Node and Net Tuple + * (this works only if we are on an extended net) + */ + + rtmp_data = ddp->data; + + *rtmp_data++ = (Entry->NextIRNet & 0xff00) >> 8; + *rtmp_data++ = Entry->NextIRNet & 0x00ff ; + *rtmp_data++ = 8; + *rtmp_data++ = (u_char)Entry->NextIRNode; + *rtmp_data++ = (Entry->NetStart & 0xff00) >> 8; + *rtmp_data++ = Entry->NetStart & 0x00ff ; + *rtmp_data++ = 0x80; /* first tuple, so distance is always zero */ + *rtmp_data++ = (Entry->NetStop & 0xff00) >> 8; + *rtmp_data++ = Entry->NetStop & 0x00ff ; + *rtmp_data++ = RTMP_VERSION_NUMBER; + + + dPrintf(D_M_RTMP, D_L_INFO, ("rtmp_r_find_bridge: for net %d send back router %d.%d\n", + NET_VALUE(orig_ddp->dst_net), Entry->NextIRNet, Entry->NextIRNode)); + if (status = ddp_router_output(m, ifID, AT_ADDR, NET_VALUE(orig_ddp->src_net), + orig_ddp->src_node, 0)){ + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_r_find_bridge: ddp_router_output failed status=%d\n", status)); + return (status); + } + return (0); +} + +/* + * rtmp_send_table: + * Send the routing table entries in RTMP data packets. + * Use split horizon if specified. The Data packets are sent + * as full DDP packets, if the last packet is full an empty + * packet is sent to tell the recipients that this is the end of + * the table... + * + */ +static int rtmp_send_table(ifID, DestNet, DestNode, split_hz, socket, + n_neighbors) + register at_ifaddr_t *ifID; /* interface/port params */ + register at_net DestNet; /* net where to send the table */ + register u_char DestNode; /* node where to send to table */ + short split_hz; /* use split horizon */ + char socket; /* the destination socket to send to */ + short n_neighbors; /* used to send packets telling we are going down */ +{ + + RT_entry *Entry; + char *Buff_ptr; + u_char NewDist; + gbuf_t *m; + short size,status ; + register at_ddp_t *ddp; + register short EntNb = 0, sent_tuple = 0; + register unsigned int s; + + if (ifID->ifRoutingState < PORT_ONLINE) { + dPrintf(D_M_RTMP, D_L_INFO, + ("rtmp_send_table: port %d activating, we don't send anything!\n", + ifID->ifPort)); + return (0); + } + + /* prerare tuples and packets for DDP*/ + /* if split horizon, do not send tuples we can reach on the port we + * want to send too + */ + + Entry = &RT_table[0]; + size = 0; + if (!(m = rtmp_prep_new_packet(ifID, DestNet, DestNode, socket))) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_send_table: rtmp_prep_new_packet failed\n")); + return(ENOBUFS); + } + + ddp = (at_ddp_t *)(gbuf_rptr(m)); + Buff_ptr = (char *)((char *)ddp + DDP_X_HDR_SIZE + 10); + + ATDISABLE(s, ddpinp_lock); + while (EntNb < RT_maxentry) { + + if (Entry->NetStop && ((Entry->EntryState & 0x0F) >= RTE_STATE_SUSPECT)) { + if (!(split_hz && ifID->ifPort == Entry->NetPort)) { + sent_tuple++; + + if (((Entry->EntryState & 0x0F) < RTE_STATE_SUSPECT) || n_neighbors) + NewDist = NOTIFY_N_DIST; + else + NewDist = Entry->NetDist & 0x1F; + + if (Entry->NetStart) { /* Extended */ + *Buff_ptr++ = (Entry->NetStart & 0xFF00) >> 8; + *Buff_ptr++ = (Entry->NetStart & 0x00FF); + *Buff_ptr++ = 0x80 | NewDist; + *Buff_ptr++ = (Entry->NetStop & 0xFF00) >> 8; + *Buff_ptr++ = (Entry->NetStop & 0x00FF); + *Buff_ptr++ = RTMP_VERSION_NUMBER; + size += 6; + } + else { /* non extended tuple */ + *Buff_ptr++ = (Entry->NetStop & 0xFF00) >> 8; + *Buff_ptr++ = (Entry->NetStop & 0x00FF); + *Buff_ptr++ = NewDist; + size += 3; + } + } + } + + if (size > (DDP_DATA_SIZE-20)) { + DDPLEN_ASSIGN(ddp, size + DDP_X_HDR_SIZE + 10); + gbuf_winc(m,size); + ATENABLE(s, ddpinp_lock); + if (status = ddp_router_output(m, ifID, AT_ADDR, + NET_VALUE(DestNet),DestNode, 0)){ + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_send_table: ddp_router_output failed status=%d\n", + status)); + return (status); + } + if ((m = rtmp_prep_new_packet (ifID, DestNet, DestNode, socket)) == NULL){ + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_send_table: rtmp_prep_new_poacket failed status=%d\n", + status)); + return (ENOBUFS); + } + ddp = (at_ddp_t *)(gbuf_rptr(m)); + Buff_ptr = (char *)((char *)ddp + DDP_X_HDR_SIZE + 10); + + dPrintf(D_M_RTMP_LOW, D_L_OUTPUT, + ("rtmp_s_tble: Send %d tuples on port %d\n", + sent_tuple, ifID->ifPort)); + sent_tuple = 0; + size = 0; + ATDISABLE(s, ddpinp_lock); + } + + Entry++; + EntNb++; + } + ATENABLE(s, ddpinp_lock); + + /* + * If we have some remaining entries to send, send them now. + * otherwise, the last packet we sent was full, we need to send an empty one + */ + + DDPLEN_ASSIGN(ddp, size + DDP_X_HDR_SIZE + 10); + gbuf_winc(m,size); + if ((status = + ddp_router_output(m, ifID, AT_ADDR, NET_VALUE(DestNet),DestNode, 0))){ + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_send_table: ddp_router_output failed status=%d\n", status)); + return (status); + } + dPrintf(D_M_RTMP_LOW, D_L_OUTPUT, + ("rtmp_s_tble: LAST Packet split=%d with %d tuples sent on port %d\n", + split_hz, sent_tuple, ifID->ifPort)); + + return (0); +} + +/* + * rtmp_request: respond to the 3 types of RTMP requests RTMP may receive + * RTMP func =1 : respond with an RTMP Reponse Packet + * RTMP func =2 : respond with the routing table RTMP packet with split horizon + * RTMP func =3 : respond with the routing table RTMP packet no split horizon + * + * see Inside AppleTalk around page 5-18 for "details" + */ + +static void rtmp_request(ifID, ddp) + register at_ifaddr_t *ifID; + register at_ddp_t *ddp; +{ + + short split_horizon = FALSE; + short code; + short error; + + /* We ignore the request if we're activating on that port */ + + if (ifID->ifRoutingState < PORT_ONLINE) + return; + + /* check RTMP function code */ + + code = ddp->data[0]; + + switch (code) { + + case RTMP_REQ_FUNC1: /* RTMP Find Bridge */ + + /* RTMP Request Packet: we send a response with the next IRrange */ + dPrintf(D_M_RTMP, D_L_INPUT, + ( "rtmp_request: find bridge for net %d port %d node %d.%d\n", + NET_VALUE(ddp->dst_net), ifID->ifPort, + NET_VALUE(ddp->src_net), ddp->src_node)); + + if ((error = rtmp_r_find_bridge (ifID, ddp))) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_request: Code 1 ddp_r_output failed error=%d\n", + error)); + return; + } + + break; + + case RTMP_REQ_FUNC2: + + split_horizon = TRUE; + + case RTMP_REQ_FUNC3: + + /* RTMP Route Request Packet */ + + dPrintf(D_M_RTMP, D_L_INPUT, + ("rtmp_request: received code=%d from %d.%d for %d.%d\n", + code, NET_VALUE(ddp->src_net), ddp->src_node, + NET_VALUE(ddp->dst_net), ddp->dst_node)); + + rtmp_send_table(ifID, ddp->src_net, ddp->src_node, + split_horizon, ddp->src_socket, 0); + + break; + + default: + + /* unknown type of request */ + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_request : invalid type of request =%d\n", + code)); + break; + } + +} + +/* funnel version of rtmp_send_port */ +void rtmp_send_port_funnel(ifID) + register at_ifaddr_t *ifID; +{ + thread_funnel_set(network_flock, TRUE); + rtmp_send_port(ifID); + thread_funnel_set(network_flock, FALSE); +} + + +/* + * rtmp_send_all_ports : send the routing table on all connected ports + * check for the port status and if ok, send the + * rtmp tuples to the broadcast address for the port + * usually called on timeout every 10 seconds. + */ + +void rtmp_send_port(ifID) + register at_ifaddr_t *ifID; +{ + at_net DestNet; + + NET_ASSIGN(DestNet, 0); + + if (ifID && ifID->ifRoutingState == PORT_ONLINE) { + dPrintf(D_M_RTMP_LOW, D_L_OUTPUT, + ("rtmp_send_port: do stuff for port=%d\n", + ifID->ifPort)); + if (ifID->ifZipNeedQueries) + zip_send_queries(ifID, 0, 0xFF); + if (!ROUTING_MODE) { + return; + } + rtmp_send_table(ifID, DestNet, 0xFF, 1, RTMP_SOCKET, 0); + } + + if (ifID == ifID_home) + dPrintf(D_M_RTMP_LOW, D_L_VERBOSE, + ("I:%5d O:%5d H:%5d dropped:%d\n", + pktsIn, pktsOut, pktsHome, pktsDropped)); + + dPrintf(D_M_RTMP_LOW, D_L_TRACE, + ("rtmp_send_port: func=0x%x, ifID=0x%x\n", + (u_int) rtmp_send_port, (u_int) ifID)); + timeout (rtmp_send_port_funnel, (caddr_t)ifID, 10 * SYS_HZ); + +} + +/* rtmp_dropper: check the number of packet received every x secondes. + * the actual packet dropping is done in ddp_input + */ + +void rtmp_dropper() +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + pktsIn = pktsOut = pktsHome = pktsDropped = 0; + timeout(rtmp_dropper, NULL, 2*SYS_HZ); + + (void) thread_funnel_set(network_flock, FALSE); +} + +/* + * rtmp_router_start: perform the sanity checks before declaring the router up + * and running. This function looks for discrepency between the net infos + * for the different ports and seed problems. + * If everything is fine, the state of each port is brought to PORT_ONLINE.\ + * ### LD 01/09/95 Changed to correct Zone problem on non seed ports. + */ + +int rtmp_router_start(keP) + at_kern_err_t *keP; /* used to report errors (if any) */ +{ + int err = 0; + register at_ifaddr_t *ifID, *ifID2; + register short Index, router_starting_timer = 0; + register RT_entry *Entry; + register at_net_al netStart, netStop; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + /* clear the static structure used to record routing errors */ + bzero(&ke, sizeof(ke)); + + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + + /* if non seed, need to acquire the right node address */ + + if ((ifID->ifFlags & RTR_SEED_PORT) == 0) { + if ((ifID->ifThisCableStart == 0 && ifID->ifThisCableEnd == 0) || + (ifID->ifThisCableStart >= DDP_STARTUP_LOW && + ifID->ifThisCableEnd <= DDP_STARTUP_HIGH)) { + + if (ifID->ifThisCableEnd == 0) { + keP->error = KE_NO_SEED; + keP->port1 = ifID->ifPort; + strncpy(keP->name1, ifID->ifName,sizeof(keP->name1)); + } + else { + keP->error = KE_INVAL_RANGE; + keP->port1 = ifID->ifPort; + strncpy(keP->name1, ifID->ifName,sizeof(keP->name1)); + keP->netr1b = ifID->ifThisCableStart; + keP->netr1e = ifID->ifThisCableEnd; + } + ifID->ifRoutingState = PORT_ERR_STARTUP; + RouterError(ifID->ifPort, ERTR_CABLE_STARTUP); + + goto error; + } + + /* we are non seed, so try to acquire the zones for that guy */ + ifID->ifZipNeedQueries = 1; + + dPrintf(D_M_RTMP, D_L_STARTUP, + ("rtmp_router_start: call elap_online for Non Seed port #%d cable =%d-%d\n", + ifID->ifPort, CableStart, CableStop)); + if ((err = elap_online3(ifID))) + goto error; + } + } + + /* Check if we have a problem with the routing table size */ + + if (ErrorRTMPoverflow) { + keP->error = KE_RTMP_OVERFLOW; + goto error; + } + + + /* Now, check that we don't have a conflict in between our interfaces */ + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + + /* check if the RoutingState != PORT_ONERROR */ + if (ifID->ifRoutingState < PORT_ACTIVATING) { + goto error; + } + + if ((ifID->ifThisCableStart == 0 && ifID->ifThisCableEnd == 0) || + (ifID->ifThisCableStart >= DDP_STARTUP_LOW && + ifID->ifThisCableEnd <= DDP_STARTUP_HIGH)) { + + if (ifID->ifThisCableEnd == 0) { + keP->error = KE_NO_SEED; + keP->port1 = ifID->ifPort; + strncpy(keP->name1, ifID->ifName,sizeof(keP->name1)); + } + else { + keP->error = KE_INVAL_RANGE; + keP->port1 = ifID->ifPort; + strncpy(keP->name1, ifID->ifName,sizeof(keP->name1)); + keP->netr1b = ifID->ifThisCableStart; + keP->netr1e = ifID->ifThisCableEnd; + } + + ifID->ifRoutingState = PORT_ERR_STARTUP; + RouterError(ifID->ifPort, ERTR_CABLE_STARTUP); + + goto error; + } + + /* check the interface address against all other ifs */ + + netStart = ifID->ifThisCableStart; + netStop = ifID->ifThisCableEnd; + + for (ifID2 = TAILQ_NEXT(ifID, aa_link); ifID2; + ifID2 = TAILQ_NEXT(ifID2, aa_link)) { + + if (((netStart >= ifID2->ifThisCableStart) && + (netStart <= ifID2->ifThisCableEnd)) || + ((netStop >= ifID2->ifThisCableStart) && + (netStop <= ifID2->ifThisCableEnd)) || + ((ifID2->ifThisCableStart >= netStart) && + (ifID2->ifThisCableStart <= netStop)) || + ((ifID2->ifThisCableEnd >= netStart) && + (ifID2->ifThisCableEnd <= netStop)) ) { + + keP->error = KE_CONF_RANGE; + keP->port1 = ifID->ifPort; + strncpy(keP->name1, ifID->ifName,sizeof(keP->name1)); + keP->port2 = ifID2->ifPort; + strncpy(keP->name2, ifID2->ifName,sizeof(keP->name2)); + keP->netr1b = ifID->ifThisCableStart; + keP->netr1e = ifID->ifThisCableEnd; + ifID->ifRoutingState = PORT_ERR_CABLER; + RouterError(ifID->ifPort, ERTR_CABLE_CONFLICT); + goto error; + } + + } + + /* ### LD 01/04/94: We need to fill in the next IR info in the routing table */ + Entry = rt_blookup(ifID->ifThisCableEnd); + + if (Entry == NULL) { + dPrintf(D_M_RTMP, D_L_ERROR, + ("rtmp_router_start: we don't know our cable range port=%d\n", + ifID->ifPort)); + + goto error; + } + + /* + * Note: At this point, non seed ports may not be aware of their Default zone + */ + + if (!(ifID->ifFlags & RTR_SEED_PORT)) { + ifID->ifDefZone = 0; + Entry->EntryState |= (RTE_STATE_GOOD|RTE_STATE_UPDATED); + } + + ifID->ifRoutingState = PORT_ONLINE; + ifID->ifState = LAP_ONLINE; + + /* set the right net and node for each port */ + Entry->NextIRNet = ifID->ifThisNode.s_net; + Entry->NextIRNode= ifID->ifThisNode.s_node; + + dPrintf(D_M_RTMP, D_L_STARTUP, + ("rtmp_router_start: bring port=%d [%d.%d]... on line\n", + ifID->ifPort, ifID->ifThisNode.s_net, + ifID->ifThisNode.s_node)); + + } + + /* + * Everything is fine, we can begin to babble on the net... + */ + + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (ifID->ifRoutingState == PORT_ONLINE) { + rtmp_send_port(ifID); + timeout(rtmp_timeout, (caddr_t)ifID, (50+ifID->ifPort) * SYS_HZ); + if (ifID->ifRoutingState < PORT_ACTIVATING) { + goto error; + } + } + } + + /* Check if we have a problem with the routing or zip table size */ + + if (ErrorRTMPoverflow) { + keP->error = KE_RTMP_OVERFLOW; + goto error; + } + if (ErrorZIPoverflow) { + keP->error = KE_ZIP_OVERFLOW; + goto error; + } + + /* sleep for 10 seconds */ + if ((err = + /* *** eventually this will be the ifID for the interface + being brought up in router mode *** */ + /* *** router sends rtmp packets every 10 seconds *** */ + tsleep(&ifID_home->startup_inprogress, + PSOCK | PCATCH, "router_start1", (10+1) * SYS_HZ)) + != EWOULDBLOCK) { + goto error; + } + +startZoneInfo: + err = 0; + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + + if (ifID->ifRoutingState < PORT_ACTIVATING) { + goto error; + } + + if ((ifID->ifZipNeedQueries) + && (ifID->ifFlags & RTR_SEED_PORT) == 0) { + dPrintf(D_M_RTMP, D_L_STARTUP, + ("rtmp_router_start: send Zip Queries for Port %d\n", + ifID->ifPort)); + zip_send_queries(ifID, 0, 0xFF); + + if (router_starting_timer >= 10) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmp_router_start: no received response to ZipNeedQueries\n")); + keP->error = KE_NO_ZONES_FOUND; + keP->port1 = ifID->ifPort; + strncpy(keP->name1, ifID->ifName,sizeof(keP->name1)); + keP->netr1b = ifID->ifThisCableStart; + keP->netr1e = ifID->ifThisCableEnd; + ifID->ifRoutingState = PORT_ERR_CABLER; + RouterError(ifID->ifPort, ERTR_CABLE_CONFLICT); + goto error; + } + + dPrintf(D_M_RTMP, D_L_STARTUP, + ("rtmp_router_start: waiting for zone info to complete\n")); + /* sleep for 10 seconds */ + if ((err = + /* *** eventually this will be the ifID for the + interface being brought up in router mode *** */ + tsleep(&ifID_home->startup_inprogress, + PSOCK | PCATCH, "router_start2", 10 * SYS_HZ)) + != EWOULDBLOCK) { + goto error; + } + + err = 0; + router_starting_timer++; + goto startZoneInfo; + } + + } + + /* At This Point, check if we know the default zones for non seed port */ + + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + + if (ifID->ifRoutingState < PORT_ACTIVATING) + goto error; + + if (!(ifID->ifFlags & RTR_SEED_PORT)) { + Entry = rt_blookup(ifID->ifThisCableEnd); + + if (Entry == NULL) { + dPrintf(D_M_RTMP, D_L_ERROR, + ("rtmp_router_start: (2)we don't know our cable range port=%d\n", + ifID->ifPort)); + goto error; + } + + dPrintf(D_M_RTMP, D_L_STARTUP, + ("rtmp_router_start: if %s set to permanent\n", + ifID->ifName)); + Entry->NetDist = 0; /* added 4-29-96 jjs, prevent direct + nets from showing non-zero + distance */ + /* upgrade the non seed ports. */ + Entry->EntryState |= RTE_STATE_PERMANENT; + + Index = zt_ent_zindex(Entry->ZoneBitMap); + if (Index <= 0) { + dPrintf(D_M_RTMP, D_L_ERROR, + ("rtmp_router_start: still don't know default zone for port %d\n", + ifID->ifPort)); + } else { + ifID->ifDefZone = Index; + if ((ifID == ifID_home) || MULTIHOME_MODE) { + ifID->ifZoneName = ZT_table[Index-1].Zone; + (void)regDefaultZone(ifID); + } + } + } + } + + /* Check if we have a problem with the routing or zip table size */ + + if (ErrorRTMPoverflow) { + keP->error = KE_RTMP_OVERFLOW; + goto error; + } + if (ErrorZIPoverflow) { + keP->error = KE_ZIP_OVERFLOW; + goto error; + } + + /* + * Handle the Home Port specifics + */ + + /* set the router address as being us no matter what*/ + ifID_home->ifARouter = ifID_home->ifThisNode; + ifID_home->ifRouterState = ROUTER_UPDATED; + + /* prepare the packet dropper timer */ + timeout (rtmp_dropper, NULL, 1*SYS_HZ); + + (void) thread_funnel_set(network_flock, funnel_state); + return(0); + +error: + dPrintf(D_M_RTMP,D_L_ERROR, + ("rtmp_router_start: error type=%d occured on port %d\n", + ifID->ifRoutingState, ifID->ifPort)); + + /* if there's no keP->error, copy the local ke structure, + since the error occured asyncronously */ + if ((!keP->error) && ke.error) + bcopy(&ke, keP, sizeof(ke)); + rtmp_shutdown(); + + /* to return the error in keP, the ioctl has to return 0 */ + (void) thread_funnel_set(network_flock, funnel_state); + + return((keP->error)? 0: err); +} /* rtmp_router_start */ + + +void rtmp_shutdown() +{ + register at_ifaddr_t *ifID; + register short i; + at_net DestNet; + + NET_ASSIGN(DestNet, 0); + + dPrintf(D_M_RTMP, D_L_SHUTDN, + ("rtmp_shutdown:stop sending to all ports\n")); + + untimeout(rtmp_dropper, (caddr_t)0); + untimeout(rtmp_router_start, 1); /* added for 2225395 */ + untimeout(rtmp_router_start, 3); /* added for 2225395 */ + + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (ifID->ifRoutingState > PORT_OFFLINE ) { + if (ifID->ifRoutingState == PORT_ONLINE) { + untimeout(rtmp_send_port_funnel, (caddr_t)ifID); + untimeout(rtmp_timeout, (caddr_t) ifID); + } + /* + * it's better to notify the neighbour routers that we are going down + */ + if (ROUTING_MODE) + rtmp_send_table(ifID, DestNet, 0xFF, TRUE, + RTMP_SOCKET, TRUE); + + ifID->ifRoutingState = PORT_OFFLINE; + + dPrintf(D_M_RTMP, D_L_SHUTDN, + ("rtmp_shutdown: routing on port=%d... off line\nStats:\n", + ifID->ifPort)); + dPrintf(D_M_RTMP, D_L_SHUTDN, + ("fwdBytes : %ld\nfwdPackets : %ld\ndroppedBytes : %ld\ndroppedPkts : %ld\n", + ifID->ifStatistics.fwdBytes, ifID->ifStatistics.fwdPkts, + ifID->ifStatistics.droppedBytes, ifID->ifStatistics.droppedPkts)); + + } + } + +} + +/* + * Remove all entries associated with the specified port. + */ +void rtmp_purge(ifID) + at_ifaddr_t *ifID; +{ + u_char state; + int i, s; + RT_entry *en = &RT_table[0]; + + ATDISABLE(s, ddpinp_lock); + for (i=0; i < RT_maxentry; i++) { + state = en->EntryState & 0x0F; + if ((state > RTE_STATE_UNUSED) && (state != RTE_STATE_PERMANENT) + && en->NetStop && en->NetDist && (en->NetPort == ifID->ifPort)) { + zt_remove_zones(en->ZoneBitMap); + RT_DELETE(en->NetStop, en->NetStart); + } + en++; + } + ATENABLE(s, ddpinp_lock); +} diff --git a/bsd/netat/ddp_r_zip.c b/bsd/netat/ddp_r_zip.c new file mode 100644 index 000000000..573fda9e5 --- /dev/null +++ b/bsd/netat/ddp_r_zip.c @@ -0,0 +1,1962 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988-1998 Apple Computer, Inc. + */ +/* + * 0.01 05/12/94 Laurent Dumont Creation + * + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +/* + * + * Router ZIP protocol functions: + * + * This file contains Routing specifics to handle ZIP requests and responses + * sent and received by a router node. + * + * The entry point for the zip input in ddp is valid only when we're + * running in router mode. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void zip_reply_to_getmyzone(); +extern int at_reg_mcast(), at_unreg_mcast(); + +/* globals */ +extern at_ifaddr_t *ifID_table[], *ifID_home; +extern short ErrorZIPoverflow; + +/********************************************************************** + * Remarks : + * ZIP is implemented as a "peer" of DDP, so the packets coming in + * to ZIP have the same headers as those coming in to DDP {ddp...}. + * Same applies to outgoing packets. Also, unlike DDP, ZIP assumes + * that an incoming packet is in a contiguous gbuf_t. + * + **********************************************************************/ + +static int netinfo_reply_pending; +static void zip_netinfo_reply(at_x_zip_t *, at_ifaddr_t *); +static void zip_getnetinfo(at_ifaddr_t *); +static void zip_getnetinfo_funnel(at_ifaddr_t *); +static void send_phony_reply(gbuf_t *); + +/* + * zip_send_getnetinfo_reply: we received a GetNetInfo packet, we need to reply + * with the right information for the port. + */ +static void zip_send_getnetinfo_reply(m, ifID) + register gbuf_t *m; + register at_ifaddr_t *ifID; +{ + at_nvestr_t *zname; + gbuf_t *m_sent; + at_ddp_t *ddp, *ddp_sent; + short ZoneNameProvided = FALSE; + short RequestIsBroadcasted = FALSE; + u_short znumber, len, packet_length, size, status; + RT_entry *Entry; + char GNIReply[128]; + + ddp = (at_ddp_t *)gbuf_rptr(m); + + /* access the Zone Name info part of the GetNetInfo Request */ + + zname = (at_nvestr_t *)(gbuf_rptr(m) + DDP_X_HDR_SIZE + 6); + + if (zname->len > ZIP_MAX_ZONE_LENGTH) { + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_s_gni_r: zone len too long l=%d ddplen=%d\n", + zname->len, DDPLEN_VALUE(ddp))); + return; + } + + + if (zname->len) + ZoneNameProvided = TRUE; + + GNIReply[0] = ZIP_NETINFO_REPLY; + GNIReply[1] = ZIP_ZONENAME_INVALID; + + /* check if we are the originator is in the cable range for this interface */ + + if ((NET_VALUE(ddp->src_net) < CableStart || NET_VALUE(ddp->src_net) > CableStop) && + (NET_VALUE(ddp->dst_net) == 0 && ddp->dst_node == 0xff)) { + RequestIsBroadcasted = TRUE; + } + Entry = rt_blookup(CableStop); + + if (Entry != NULL && RT_ALL_ZONES_KNOWN(Entry)) { /* this net is well known... */ + + GNIReply[2] = (Entry->NetStart & 0xFF00) >> 8; + GNIReply[3] = (Entry->NetStart & 0x00FF); + GNIReply[4] = (Entry->NetStop & 0xFF00) >> 8; + GNIReply[5] = (Entry->NetStop & 0x00FF); + + /* copy the zone name found in the request */ + + GNIReply[6] = zname->len; + bcopy(&zname->str, &GNIReply[7], zname->len); + + + if (znumber = zt_find_zname(zname)) { + + if (ZT_ISIN_ZMAP((znumber), Entry->ZoneBitMap)) { + + GNIReply[1] = 0; /* Zone Valid */ + + if (len = zt_get_zmcast(ifID, zname, &GNIReply[8+zname->len])) + GNIReply[7+zname->len] = len; + else { + GNIReply[1] |= ZIP_USE_BROADCAST; + GNIReply[7+zname->len] = 0; /* multicast address length */ + } + packet_length = 8 + zname->len + len; + } + } + + } + + else { /* should not happen, we are supposed to know our net */ + dPrintf(D_M_ZIP, D_L_WARNING, ("zip_s_gni_r: Don't know about our zone infos!!!\n")); + return; + } + + if (zt_ent_zcount(Entry) == 1) + GNIReply[1] |= ZIP_ONE_ZONE; + + if (GNIReply[1] & ZIP_ZONENAME_INVALID) { + + short Index = ifID->ifDefZone; + + if (Index <= 0 || Index >= ZT_MAXEDOUT) { + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_s_gni_r: Invalid starting index =%d port%d\n", + Index, ifID->ifPort)); + return; + } + + + Index--; + + if (len = zt_get_zmcast(ifID, &ZT_table[Index].Zone, &GNIReply[8+zname->len])) + GNIReply[7+zname->len] = len; + else { + GNIReply[1] |= ZIP_USE_BROADCAST; + GNIReply[7+zname->len] = 0; /* multicast address length */ + } + + packet_length = 7 + zname->len + len; + + /* in the case the zone name asked for in the request was invalid, we need + * to copy the good default zone for this net + */ + + GNIReply[packet_length + 1] = ZT_table[Index].Zone.len; + bcopy(&ZT_table[Index].Zone.str, &GNIReply[packet_length + 2], + ZT_table[Index].Zone.len); + packet_length = packet_length +2 + ZT_table[Index].Zone.len; + } + + + /* + * we're finally ready to send out the GetNetInfo Reply + * + */ + + + size = DDP_X_HDR_SIZE + packet_length; + if ((m_sent = gbuf_alloc(AT_WR_OFFSET+size, PRI_HI)) == NULL) { + return; /* was return(ENOBUFS); */ + } + + gbuf_rinc(m_sent,AT_WR_OFFSET); + gbuf_wset(m_sent,size); + ddp_sent = (at_ddp_t *)(gbuf_rptr(m_sent)); + + /* Prepare the DDP header */ + + ddp_sent->unused = ddp_sent->hopcount = 0; + UAS_ASSIGN(ddp->checksum, 0); + DDPLEN_ASSIGN(ddp_sent, size); + NET_ASSIGN(ddp_sent->src_net, ifID->ifThisNode.s_net); + ddp_sent->src_node = ifID->ifThisNode.s_node; + ddp_sent->src_socket = ZIP_SOCKET; + ddp_sent->dst_socket = ddp->src_socket; + + if (RequestIsBroadcasted) { /* if this was a broadcast, must respond from that */ + + NET_ASSIGN(ddp_sent->dst_net, 0); + ddp_sent->dst_node = 0xFF; + } + else { + + NET_NET(ddp_sent->dst_net, ddp->src_net); + ddp_sent->dst_node = ddp->src_node; + } + ddp_sent->type = DDP_ZIP; + + bcopy(&GNIReply, &ddp_sent->data, packet_length); + + dPrintf(D_M_ZIP_LOW, D_L_ROUTING, + ("zip_s_gni_r: send to %d:%d port#%d pack_len=%d\n", + NET_VALUE(ddp_sent->dst_net), ddp_sent->dst_node, + ifID->ifPort, packet_length)); + if ((status = + ddp_router_output(m_sent, ifID, AT_ADDR, + NET_VALUE(ddp_sent->dst_net), ddp_sent->dst_node, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_s_gni_r: ddp_router_output returns =%d\n", status)); + return; /* was return(status); */ + } +} /* zip_send_getnetinfo_reply */ + + +/* + * build_ZIP_reply_packet: is used to create and send a DDP packet and use the + * provided buffer as a ZIP reply. This is used by zip_send_ext_reply_to_query + * and zip_send_reply_to_query for sending their replies to ZIP queries. + */ +gbuf_t *prep_ZIP_reply_packet(m, ifID) + register gbuf_t *m; /* this is the original zip query */ + register at_ifaddr_t *ifID; +{ + register gbuf_t *m_sent; + register at_ddp_t *ddp, *src_ddp; + + /* access the source Net and Node informations */ + + src_ddp = (at_ddp_t *)gbuf_rptr(m); + + if ((m_sent = gbuf_alloc (AT_WR_OFFSET+1024, PRI_HI)) == NULL) { + return((gbuf_t *)NULL); + } + gbuf_rinc(m_sent,AT_WR_OFFSET); + gbuf_wset(m_sent,DDP_X_HDR_SIZE); + ddp = (at_ddp_t *)(gbuf_rptr(m_sent)); + + /* Prepare the DDP header */ + + ddp->unused = ddp->hopcount = 0; + UAS_ASSIGN(ddp->checksum, 0); + + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + ddp->src_node = ifID->ifThisNode.s_node; + ddp->src_socket = ZIP_SOCKET; + + ddp->dst_socket = src_ddp->src_socket; + NET_NET(ddp->dst_net, src_ddp->src_net); + ddp->dst_node = src_ddp->src_node; + + ddp->type = DDP_ZIP; + + return(m_sent); +} +/* + * zip_send_ext_reply_to_query: this function deals with ZIP Queries for extended nets. + * When we recognize an extended net (that might have several zone name associated with + * it), we send A SEPARATE ZIP reply for that network. This is called from the + * regular zip_send_reply_to_query, that just deals with non-ext nets. + */ + +static void zip_send_ext_reply_to_query(mreceived, ifID, Entry, NetAsked) + register gbuf_t *mreceived; + register at_ifaddr_t *ifID; + RT_entry *Entry; /* info about the network we're looking for */ + u_short NetAsked; +{ + register gbuf_t *m; + register at_ddp_t *ddp; + short i, j, reply_length, Index, zone_count, status; + u_char *zmap; + char *ReplyBuff, *ZonesInPacket; + + zone_count = zt_ent_zcount(Entry); + zmap = Entry->ZoneBitMap; + i = ZT_BYTES -1; + + +newPacket: + + if (!(m = prep_ZIP_reply_packet (mreceived, ifID))) { + return; /* was return(ENOBUFS); */ + } + + ddp = (at_ddp_t *)(gbuf_rptr(m)); + ReplyBuff = (char *)(ddp->data); + + + *ReplyBuff++ = 8; /* ZIP function = 8 [extended reply] */ + + ZonesInPacket= ReplyBuff; + *ZonesInPacket= 0; + ReplyBuff ++; + reply_length = 2; /* 1st byte is ZIP reply code, 2nd is network count */ + j= 0; + + /* For all zones, we check if they belong to the map for that Network */ + + for (; i >= 0; i--) { + + /* find the zones defined in this entry bitmap */ + + if (zmap[i]) { + for (; j < 8 ; j++) + if (zmap[i] << j & 0x80) { /* bingo */ + + Index = i*8 + j; /* zone index in zone table */ + + if (reply_length + 3 + ZT_table[Index].Zone.len > DDP_DATA_SIZE) { + + /* we need to send the packet before, this won't fit... */ + + zone_count -= *ZonesInPacket; + + DDPLEN_ASSIGN(ddp, reply_length + DDP_X_HDR_SIZE); + gbuf_winc(m,reply_length); + if ((status = + ddp_router_output(m, ifID, AT_ADDR, + NET_VALUE(ddp->dst_net), ddp->dst_node, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_s_ext_repl: ddp_router_output returns =%d\n", + status)); + return; /* was return (status); */ + } + + goto newPacket; + + } + /* this should fit in this packet, build the NetNumber, ZoneLen, + * ZoneName triple + */ + + if (ZT_table[Index].Zone.len) { + *ZonesInPacket += 1; /* bump NetCount field */ + *ReplyBuff++ = (NetAsked & 0xFF00) >> 8; + *ReplyBuff++ = (NetAsked & 0x00FF) ; + *ReplyBuff++ = ZT_table[Index].Zone.len; + + bcopy(&ZT_table[Index].Zone.str, ReplyBuff, + ZT_table[Index].Zone.len); + + ReplyBuff += ZT_table[Index].Zone.len; + reply_length += ZT_table[Index].Zone.len +3; + } + + } + } + j= 0; /* reset the bit count */ + } + + /* if we have some zone info in a half-empty packet, send it now. + * Remember, for extended nets we send *at least* one Reply + */ + + if (zone_count) { + DDPLEN_ASSIGN(ddp, reply_length + DDP_X_HDR_SIZE); + gbuf_winc(m,reply_length); + if ((status = + ddp_router_output(m, ifID, AT_ADDR, + NET_VALUE(ddp->dst_net), ddp->dst_node, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_s_ext_reply: ddp_router_output returns =%d\n", status)); + return; /* was return (status); */ + } + } + else /* free the buffer not used */ + + gbuf_freem(m); +} /* zip_send_ext_reply_to_query */ + +/* + * zip_send_reply_to_query: we received a ZIPQuery packet, we need to reply + * with the right information for the nets requested (if we have + * the right information. + */ +static void zip_send_reply_to_query(mreceived, ifID) + register gbuf_t *mreceived; + register at_ifaddr_t *ifID; +{ + register gbuf_t *m; + register at_ddp_t *ddp, *ddp_received; + RT_entry *Entry; + short i, reply_length, Index, status; + u_char network_count; + u_short *NetAsked; + char *ReplyBuff, *ZonesInPacket; + + ddp_received = (at_ddp_t *)gbuf_rptr(mreceived); + + /* access the number of nets requested in the Query */ + network_count = *((char *)(ddp_received->data) + 1); + NetAsked = (u_short *)(ddp_received->data+ 2); + + /* check the validity of the Query packet */ + + if (DDPLEN_VALUE(ddp_received) != + (2 + network_count * 2 + DDP_X_HDR_SIZE)) { + + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_s_reply_to_q: bad length netcount=%d len=%d\n", + network_count, DDPLEN_VALUE(ddp))); + return; /* was return(1); */ + } + + /* walk the Query Network list */ + /* we want to build a response with the network number followed by the zone name + * length and the zone name. If there is more than one zone per network asked, + * we repeat the network number and stick the zone length and zone name. + * We need to be carefull with the max DDP size for data. If we see that a new + * NetNum, ZoneLen, ZoneName sequence won't fit, we send the previous packet and + * begin to build a new one. + */ + +newPacket: + + if (!(m = prep_ZIP_reply_packet (mreceived, ifID))) { + return; /* was return(ENOBUFS); */ + } + + ddp = (at_ddp_t *)(gbuf_rptr(m)); + ReplyBuff = (char *)(ddp->data); + + *ReplyBuff++ = 2; /* ZIP function = 2 [Non extended reply] */ + ZonesInPacket = ReplyBuff; + *ZonesInPacket = 0; + ReplyBuff++; + reply_length = 2; /* 1st byte is ZIP reply code, 2nd is network count */ + + for (i = 0 ; i < network_count ; i ++, NetAsked++) { + Entry = rt_blookup(*NetAsked); + + if (Entry != NULL && ((Entry->EntryState & 0x0F) >= RTE_STATE_SUSPECT) && + RT_ALL_ZONES_KNOWN(Entry)) { /* this net is well known... */ + + if (Entry->NetStart == 0) { /* asking for a NON EXTENDED network */ + + if ( (Index = zt_ent_zindex(Entry->ZoneBitMap)) == 0) + continue; + + Index--; + + if (reply_length + 3 + ZT_table[Index].Zone.len > DDP_DATA_SIZE) { + + /* we need to send the packet before, this won't fit... */ + + DDPLEN_ASSIGN(ddp, reply_length + DDP_X_HDR_SIZE); + gbuf_winc(m,reply_length); + + if ((status = + ddp_router_output(m, ifID, AT_ADDR, + NET_VALUE(ddp->dst_net), + ddp->dst_node, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_s_reply: ddp_router_output returns =%d\n", + status)); + return; /* was return (status); */ + } + + /* this is not nice, I know, but we reenter the loop with + * a packet is sent with the next network field in the Query + */ + + network_count -= i; + goto newPacket; + + } + + /* this should fit in this packet, build the NetNumber, ZoneLen, + * ZoneName triple + */ + + if (ZT_table[Index].Zone.len) { + ZonesInPacket += 1; /* bump NetCount field */ + *ReplyBuff++ = (*NetAsked & 0xFF00) >> 8; + *ReplyBuff++ = (*NetAsked & 0x00FF) ; + *ReplyBuff++ = ZT_table[Index].Zone.len; + bcopy(&ZT_table[Index].Zone.str, ReplyBuff, + ZT_table[Index].Zone.len); + + ReplyBuff += ZT_table[Index].Zone.len; + + reply_length += ZT_table[Index].Zone.len + 3; + + + } + + + } + else { /* extended network, check for multiple zone name attached + * and build a separate packet for each extended network requested + */ + + zip_send_ext_reply_to_query(mreceived, ifID, Entry, *NetAsked); + + } + } + } + + /* If we have a non extended packet (code 2) with some stuff in it, + * we need to send it now + */ + + if ( reply_length > 2) { + DDPLEN_ASSIGN(ddp, reply_length + DDP_X_HDR_SIZE); + gbuf_winc(m,reply_length); + if ((status = + ddp_router_output(m, ifID, AT_ADDR, + NET_VALUE(ddp->dst_net), + ddp->dst_node, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_send_reply: ddp_router_output returns =%d\n", status)); + return; /* was return (status); */ + } + } + else /* free the buffer not used */ + gbuf_freem(m); +} /* zip_send_reply_to_query */ + +/*********************************************************************** + * zip_router_input() + * + **********************************************************************/ + +void zip_router_input (m, ifID) + register gbuf_t *m; + register at_ifaddr_t *ifID; +{ + register at_ddp_t *ddp; + register at_atp_t *atp; + register at_zip_t *zip; + register u_long user_bytes; + register u_short user_byte; + + /* variables for ZipNotify processing */ + register char old_zone_len; + register char new_zone_len; + register char *old_zone; + char *new_zone; + void zip_sched_getnetinfo(); /* forward reference */ + + if (gbuf_type(m) != MSG_DATA) { + /* If this is a M_ERROR message, DDP is shutting down, + * nothing to do here...If it's something else, we don't + * understand what it is + */ + dPrintf(D_M_ZIP, D_L_WARNING, ("zip_router_input: not an M_DATA message\n")); + gbuf_freem(m); + return; + } + + if (!ifID) { + dPrintf(D_M_ZIP, D_L_WARNING, ("zip_router_input: BAD ifID\n")); + gbuf_freem(m); + return; + } + + /* + * The ZIP listener receives two types of requests: + * + * ATP requests: GetZoneList, GetLocalZone, or GetMyZone + * ZIP requests: Netinfo, Query, Reply, takedown, bringup + */ + + ddp = (at_ddp_t *)gbuf_rptr(m); + + if (ddp->type == DDP_ZIP) { + zip = (at_zip_t *)(gbuf_rptr(m) + DDP_X_HDR_SIZE); + dPrintf(D_M_ZIP_LOW, D_L_INPUT, + ("zip_input: received a ZIP_DDP command=%d\n", + zip->command)); + switch (zip->command) { + case ZIP_QUERY : /* we received a Zip Query request */ + dPrintf(D_M_ZIP, D_L_INPUT, + ("zip_input: Received a Zip Query in from %d.%d\n", + NET_VALUE(ddp->src_net), ddp->src_node)); + + if (!RT_LOOKUP_OKAY(ifID, ddp)) { + dPrintf(D_M_ZIP, D_L_INPUT, + ("zip_input:: refused ZIP_QUERY from %d:%d\n", + NET_VALUE(ddp->src_net), ddp->src_node)); + } + else + zip_send_reply_to_query(m, ifID); + gbuf_freem(m); + break; + + case ZIP_REPLY : /* we received a Zip Query Reply packet */ + case ZIP_EXTENDED_REPLY: + if (ifID->ifRoutingState == PORT_OFFLINE) { + dPrintf(D_M_ZIP, D_L_INPUT, + ("zip_input: Received a Zip Reply in user mode\n")); + } + else + zip_reply_received(m, ifID, zip->command); + gbuf_freem(m); + break; + + case ZIP_TAKEDOWN : + /* we received a Zip Takedown packet */ + dPrintf(D_M_ZIP, D_L_WARNING, ("zip_input: Received a Zip takedown!!!\n")); + gbuf_freem(m); + break; + + case ZIP_BRINGUP : + /* we received a Zip BringUp packet */ + dPrintf(D_M_ZIP, D_L_WARNING, ("zip_input: Received a Zip BringUp!!!\n")); + gbuf_freem(m); + break; + + case ZIP_GETNETINFO: /* we received a GetNetInfo request */ + dPrintf(D_M_ZIP, D_L_INPUT, + ("zip_input: Received a GetNetInfo Req in from %d.%d\n", + NET_VALUE(ddp->src_net), ddp->src_node)); + if (RT_LOOKUP_OKAY(ifID, ddp)) { + dPrintf(D_M_ZIP, D_L_OUTPUT, + ("zip_input: we, as node %d:%d send GNI reply to %d:%d\n", + ifID->ifThisNode.s_net, ifID->ifThisNode.s_node, + NET_VALUE(ddp->src_net), ddp->src_node)); + zip_send_getnetinfo_reply(m, ifID); + } + gbuf_freem(m); + break; + + + case ZIP_NETINFO_REPLY : + + /* If we are not waiting for a GetNetInfo reply + * to arrive, this must be a broadcast + * message for someone else on the zone, so + * no need to even look at it! + */ + if (!ROUTING_MODE && + ((NET_VALUE(ddp->src_net) != ifID->ifThisNode.s_net) || + (ddp->src_node != ifID->ifThisNode.s_node)) && netinfo_reply_pending) + { + extern void trackrouter(); + dPrintf(D_M_ZIP, D_L_INPUT, + ("zip_input: Received a GetNetInfo Reply from %d.%d\n", + NET_VALUE(ddp->src_net), ddp->src_node)); + trackrouter(ifID, NET_VALUE(ddp->src_net), ddp->src_node); + zip_netinfo_reply((at_x_zip_t *)zip, ifID); + } + + gbuf_freem(m); + break; + + case ZIP_NOTIFY : + /* processing of ZipNotify message : first, change + * our zone name, then if NIS is open, let NBP demon + process know of this change...(just forward the + * Notify packet + */ + /* First, check if this is really a packet for us */ + old_zone = &zip->data[4]; + if (!zonename_equal(&ifID->ifZoneName, + (at_nvestr_t *)old_zone)) { + /* the old zone name in the packet is not the + * same as ours, so this packet couldn't be + * for us. + */ + gbuf_freem(m); + break; + + } + old_zone_len = *old_zone; + new_zone_len = zip->data[4 + old_zone_len + 1]; + new_zone = old_zone + old_zone_len; + + /* Reset the zone multicast address */ + (void)at_unreg_mcast(ifID, (caddr_t)&ifID->ZoneMcastAddr); + bzero((caddr_t)&ifID->ZoneMcastAddr, ETHERNET_ADDR_LEN); + + /* change the zone name - copy both the length and the string */ + bcopy((caddr_t)new_zone, (caddr_t)&ifID->ifZoneName, + new_zone_len+1); + + /* add the new zone to the list of local zones */ + if (!MULTIPORT_MODE && !DEFAULT_ZONE(&ifID->ifZoneName)) + (void)setLocalZones(&ifID->ifZoneName, + (ifID->ifZoneName.len+1)); + + /* Before trying to request our new multicast address, + * wait a while... someone might have alredy requested + * it, so we may see some broadcast messages flying + * by... Set up the structures so that it appears that + * we have already requested the NetInfo. + */ + ifID->ifNumRetries = ZIP_NETINFO_RETRIES; + netinfo_reply_pending = 1; + timeout(zip_sched_getnetinfo, (caddr_t) ifID, + 2*ZIP_TIMER_INT); + + gbuf_freem(m); + break; + default : + routing_needed(m, ifID, TRUE); + break; + } + } + else if (ddp->type == DDP_ATP && + RT_LOOKUP_OKAY(ifID, ddp)) { + if (gbuf_len(m) > DDP_X_HDR_SIZE) + atp = (at_atp_t *)(gbuf_rptr(m)+DDP_X_HDR_SIZE); + else + atp = (at_atp_t *)(gbuf_rptr(gbuf_cont(m))); + + /* Get the user bytes in network order */ + + user_bytes = UAL_VALUE(atp->user_bytes); + user_byte = user_bytes >> 24; /* Get the zeroth byte */ + + dPrintf(D_M_ZIP, D_L_INPUT, + ("zip_input: received a ZIP_ATP command=%d\n", user_byte)); + + switch (user_byte) { + case ZIP_GETMYZONE: + zip_reply_to_getmyzone(ifID, m); + gbuf_freem(m); + break; + + case ZIP_GETZONELIST: + zip_reply_to_getzonelist(ifID, m); + gbuf_freem(m); + break; + + case ZIP_GETLOCALZONES: + zip_reply_to_getlocalzones(ifID, m); + gbuf_freem(m); + break; + + default: + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_input: received unknown ZIP_ATP command=%d\n", user_byte)); + routing_needed(m, ifID, TRUE); + break; + } + } else { + gbuf_freem(m); + } + return; +} /* zip_router_input */ + +/*********************************************************************** + * zonename_equal() + * + * Remarks : + * + **********************************************************************/ +int zonename_equal (zone1, zone2) + register at_nvestr_t *zone1, *zone2; +{ + register char c1, c2; + char upshift8(); + register int i; + + if (zone1->len != zone2->len) + return(0); + + for (i=0; i< (int) zone1->len; i++) { + c1 = zone1->str[i]; + c2 = zone2->str[i]; + if (c1 >= 'a' && c1 <= 'z') + c1 += 'A' - 'a'; + if (c2 >= 'a' && c2 <= 'z') + c2 += 'A' - 'a'; + if (c1 & 0x80) + c1 = upshift8(c1); + if (c2 & 0x80) + c2 = upshift8(c2); + if (c1 != c2) + return(0); + } + return(1); +} + + +char upshift8 (ch) + register char ch; +{ + register int i; + + static unsigned char lower_case[] = + {0x8a, 0x8c, 0x8d, 0x8e, 0x96, 0x9a, 0x9f, 0xbe, + 0xbf, 0xcf, 0x9b, 0x8b, 0x88, 0}; + static unsigned char upper_case[] = + {0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0xae, + 0xaf, 0xce, 0xcd, 0xcc, 0xcb, 0}; + + for (i=0; lower_case[i]; i++) + if (ch == lower_case[i]) + return (upper_case[i]); + + return(ch); +} + + +/*********************************************************************** + * zip_netinfo_reply () + * + * Remarks : + * + **********************************************************************/ +static void zip_netinfo_reply (netinfo, ifID) + register at_x_zip_t *netinfo; + register at_ifaddr_t *ifID; +{ + u_char mcast_len; + void zip_sched_getnetinfo(); /* forward reference */ + register at_net_al this_net; + char *default_zone; + register u_char zone_name_len; + + /* There may be multiple zones on the cable.... we need to + * worry about whether or not this packet is addressed + * to us. + */ + /* *** Do we really need to check this? *** */ + if (!zonename_equal((at_nvestr_t *)netinfo->data, &ifID->ifZoneName)) + return; + + ifID->ifThisCableStart = NET_VALUE(netinfo->cable_range_start); + ifID->ifThisCableEnd = NET_VALUE(netinfo->cable_range_end); + dPrintf(D_M_ZIP, D_L_OUTPUT, ("Zip_netinfo_reply: Set cable to %d-%d\n", + ifID->ifThisCableStart, ifID->ifThisCableEnd)); + + /* The packet is in response to our request */ + untimeout (zip_sched_getnetinfo, (caddr_t) ifID); + netinfo_reply_pending = 0; + zone_name_len = netinfo->data[0]; + mcast_len = netinfo->data[zone_name_len + 1]; + + if (netinfo->flags & ZIP_ZONENAME_INVALID) { + /* copy out the default zone name from packet */ + default_zone = (char *)&netinfo->data[zone_name_len+1+mcast_len+1]; + bcopy((caddr_t)default_zone, (caddr_t)&ifID->ifZoneName, + *default_zone + 1); + } + + /* add the new zone to the list of local zones */ + if (!MULTIPORT_MODE && !DEFAULT_ZONE(&ifID->ifZoneName)) + (void)setLocalZones(&ifID->ifZoneName, (ifID->ifZoneName.len+1)); + + /* get the multicast address out of the GetNetInfo reply, if there is one */ + if (!(netinfo->flags & ZIP_USE_BROADCAST)) { + /* If ZIP_USE_BROADCAST is set, we will use the cable + broadcast address as the multicast address, however + the cable multicast address has already been registered. + */ + /* This packet contains a multicast address, so + * send to elap to register it. + */ + if (FDDI_OR_TOKENRING(ifID->aa_ifp->if_type)) + ddp_bit_reverse(&netinfo->data[zone_name_len + 2]); + + bcopy((caddr_t)&netinfo->data[zone_name_len + 2], + (caddr_t)&ifID->ZoneMcastAddr, ETHERNET_ADDR_LEN); + (void)at_reg_mcast(ifID, (caddr_t)&ifID->ZoneMcastAddr); + } + + this_net = ifID->ifThisNode.s_net; + if ((this_net >= ifID->ifThisCableStart) && + (this_net <= ifID->ifThisCableEnd)) { + /* ThisNet is in the range of valid network numbers + * for the cable. Do nothing. + */ + } else { + /* ThisNet is not in the range of valid network + * numbers for the cable. This may be either because + * the chosen number was from start-up range, or + * because the user has a misconception of where the + * machine is!! Since ThisCableRange is set up, next + * time aarp is invoked, it would select address in + * the right range. + */ + + /* to reset initial_net and initial_node to zero, so + * that aarp is forced to choose new values + */ + ifID->initial_addr.s_net = 0; + ifID->initial_addr.s_node = 0; + + /* Wake up elap_online sleeping on this interface. */ + ZIPwakeup(ifID, ZIP_RE_AARP); + return; + } + + ZIPwakeup(ifID, 0); /* no error */ + return; +} /* zip_netinfo_reply */ + + +/********************************************************************** + * zip_control() + * + **********************************************************************/ +int zip_control (ifID, control) + register at_ifaddr_t *ifID; + int control; +{ + dPrintf(D_M_ZIP, D_L_INFO, ("zip_control called port=%d control=%d\n", + ifID->ifPort, control)); + switch (control) { + case ZIP_ONLINE : + case ZIP_LATE_ROUTER : + ifID->ifNumRetries = 0; + /* Get the desired zone name from elap and put it in + * ifID for zip_getnetinfo() to use. + */ + if (ifID->startup_zone.len) + ifID->ifZoneName = ifID->startup_zone; + zip_getnetinfo(ifID); + break; + case ZIP_NO_ROUTER : + ifID->ifZoneName.len = 1; + ifID->ifZoneName.str[0] = '*'; + ifID->ifZoneName.str[1] = '\0'; + break; + default : + break; + } + return (0); +} + +/* funnel version of zip_getnetinfo */ +static void zip_getnetinfo_funnel(ifID) + register at_ifaddr_t *ifID; +{ + thread_funnel_set(network_flock, TRUE); + zip_getnetinfo(ifID); + thread_funnel_set(network_flock, FALSE); +} + + +/********************************************************************** + * zip_getnetinfo() + * + **********************************************************************/ +static void zip_getnetinfo (ifID) + register at_ifaddr_t *ifID; +{ + register at_x_zip_t *zip; + gbuf_t *m; + register at_ddp_t *ddp; + void zip_sched_getnetinfo(); + register struct atalk_addr *at_dest; + register int size; + + size = DDP_X_HDR_SIZE + ZIP_X_HDR_SIZE + ifID->ifZoneName.len + 1 + + sizeof(struct atalk_addr) + 1; + if ((m = gbuf_alloc (AT_WR_OFFSET+size, PRI_HI)) == NULL) { + /* This time, we're unable to allocate buffer to + * send a packet out, so schedule to send a packet + * out later, and exit. + */ + dPrintf(D_M_ZIP, D_L_WARNING, ("zip_getnetinfo: no buffer, call later port=%d\n", + ifID->ifPort)); + timeout (zip_getnetinfo_funnel, (caddr_t) ifID, ZIP_TIMER_INT/10); + return; + } + + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,0); + *(u_char *)gbuf_rptr(m) = AT_ADDR; + at_dest = (struct atalk_addr *)(gbuf_rptr(m) + 1); + ddp = (at_ddp_t *)(gbuf_rptr(m) + sizeof(struct atalk_addr) + 1); + zip = (at_x_zip_t *)ddp->data; + gbuf_winc(m,size); + + zip->command = ZIP_GETNETINFO; + zip->flags = 0; + NET_ASSIGN(zip->cable_range_start, 0); + NET_ASSIGN(zip->cable_range_end, 0); + if (ifID->ifZoneName.len) /* has to match reply exactly */ + bcopy((caddr_t)&ifID->ifZoneName, (caddr_t)zip->data, + ifID->ifZoneName.len + 1); + else + zip->data[0] = 0; /* No zone name is availbale */ + + /* let the lap fields be uninitialized, 'cause it doesn't + * matter. + */ + DDPLEN_ASSIGN(ddp, size - (sizeof(struct atalk_addr) + 1)); + UAS_ASSIGN(ddp->checksum, 0); + ddp->hopcount = ddp->unused = 0; + NET_ASSIGN(ddp->dst_net, 0); /* cable-wide broadcast */ + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + /* By this time, AARP is done */ + + ddp->dst_node = 0xff; + ddp->src_node = ifID->ifThisNode.s_node; + ddp->dst_socket = ZIP_SOCKET; + ddp->src_socket = ZIP_SOCKET; + ddp->type = DDP_ZIP; + + at_dest->atalk_unused = 0; + NET_NET(at_dest->atalk_net, ddp->dst_net); + at_dest->atalk_node = ddp->dst_node; + + dPrintf(D_M_ZIP, D_L_INPUT, ("zip_getnetinfo: called for port=%d\n", + ifID->ifPort)); + + if (elap_dataput(m, ifID, 0, NULL)) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_getnetinfo: error sending zip_getnetinfo\n")); + return; + } + + ifID->ifNumRetries++; + netinfo_reply_pending = 1; + + timeout (zip_sched_getnetinfo, (caddr_t) ifID, ZIP_TIMER_INT); +} /* zip_getnetinfo */ + + +/********************************************************************** + * zip_sched_getnetinfo() + * + **********************************************************************/ + +void zip_sched_getnetinfo (ifID) + register at_ifaddr_t *ifID; +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + if (ifID->ifNumRetries >= ZIP_NETINFO_RETRIES) { + /* enough packets sent.... give up! */ + /* we didn't get any response from the net, so + * assume there's no router around and the given + * zone name, if any, is not valid. Change the + * zone name to "*". + */ + ifID->ifZoneName.len = 1; + ifID->ifZoneName.str[0] = '*'; + ifID->ifZoneName.str[1] = '\0'; + /* Should NBP be notified of this "new" zone name?? */ + netinfo_reply_pending = 0; + + ifID->ifRouterState = NO_ROUTER; + ifID->ifARouter.s_net = 0; + ifID->ifARouter.s_node = 0; + + dPrintf(D_M_ZIP, D_L_INFO, ("zip_sched_getnetinfo: Reset Cable Range\n")); + + ifID->ifThisCableStart = DDP_MIN_NETWORK; + ifID->ifThisCableEnd = DDP_MAX_NETWORK; + + if (ifID->ifState == LAP_ONLINE_FOR_ZIP) + ZIPwakeup (ifID, 0); /* no error */ + } else + zip_getnetinfo(ifID); + + (void) thread_funnel_set(network_flock, FALSE); +} + + +/********************************************************************** + * zip_type_packet() + * + * Remarks: + * This routine checks whether or not the packet contained in "m" + * is an (outgoing) ZIP packet. If not, it returns 0. If it is a + * ZIP packet, it returns the ZIP packet type (ZIP command). "m" + * points to a packet with extended DDP header. The rest of the + * DDP data may or may not be in the first gbuf. + * + **********************************************************************/ +int zip_type_packet (m) + register gbuf_t *m; +{ + register at_atp_t *atp; + register at_ddp_t *ddp; + register at_zip_t *zip; + register u_long user_bytes; + register int user_byte; + + ddp = (at_ddp_t *)gbuf_rptr(m); + if (ddp->dst_socket == ZIP_SOCKET) { + switch (ddp->type) { + case DDP_ZIP : + if (gbuf_len(m) > DDP_X_HDR_SIZE) + zip = (at_zip_t *)(gbuf_rptr(m) + + DDP_X_HDR_SIZE); + else + zip=(at_zip_t *)(gbuf_rptr(gbuf_cont(m))); + return ((int)zip->command); + case DDP_ATP : + if (gbuf_len(m) > DDP_X_HDR_SIZE) + atp = (at_atp_t *)(gbuf_rptr(m)+DDP_X_HDR_SIZE); + else + atp = (at_atp_t *)(gbuf_rptr(gbuf_cont(m))); + /* Get the user bytes in network order */ + user_bytes = UAL_VALUE(atp->user_bytes); + user_byte = user_bytes >> 24; /* Get the zeroth byte */ + if ((user_byte == ZIP_GETMYZONE) || + (user_byte == ZIP_GETZONELIST) || + (user_byte == ZIP_GETLOCALZONES)) + return (user_byte); + else + return (0); + default : + return (0); + } + } else + return (0); +} + +/********************************************************************** + * zip_handle_getmyzone() + * + * Remarks: + * Routine to handle ZIP GetMyZone request locally. It generates + * a phony response to the outgoing ATP request and sends it up. + * + * 07/12/94 : remark2 only called from ddp.c / ddp_output + * should only be called from the home port, but + * when we are a router we should know the infos for all + * anyway, so reply locally with what we have in stock... + * + **********************************************************************/ + +int zip_handle_getmyzone(ifID, m) + register at_ifaddr_t *ifID; + register gbuf_t *m; +{ + at_atp_t *atp; + register at_ddp_t *ddp; + register at_ddp_t *r_ddp; + register at_atp_t *r_atp; + gbuf_t *rm; /* reply message */ + register int size; + u_long ulongtmp; + + dPrintf(D_M_ZIP, D_L_INFO, + ("zip_handle_getmyzone: local reply for port=%d\n", + ifID->ifPort)); + + size = DDP_X_HDR_SIZE + ATP_HDR_SIZE + 1 + ifID->ifZoneName.len; + /* space for two headers and the zone name */ + if ((rm = gbuf_alloc(AT_WR_OFFSET+size, PRI_HI)) == NULL) { + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_handle_getmyzone: no buffer, port=%d\n", + ifID->ifPort)); + return (ENOBUFS); + } + + gbuf_rinc(rm,AT_WR_OFFSET); + gbuf_wset(rm,0); + r_ddp = (at_ddp_t *)(gbuf_rptr(rm)); + r_atp = (at_atp_t *)r_ddp->data; + gbuf_winc(rm,size); + + ddp = (at_ddp_t *)gbuf_rptr(m); + if (gbuf_len(m) > DDP_X_HDR_SIZE) + atp = (at_atp_t *)(gbuf_rptr(m) + DDP_X_HDR_SIZE); + else + atp = (at_atp_t *)(gbuf_rptr(gbuf_cont(m))); + + /* fill up the ddp header for reply */ + DDPLEN_ASSIGN(r_ddp, size); + r_ddp->hopcount = r_ddp->unused = 0; + UAS_ASSIGN(r_ddp->checksum, 0); + NET_ASSIGN(r_ddp->dst_net, ifID->ifThisNode.s_net); + NET_NET(r_ddp->src_net, ddp->dst_net); + r_ddp->dst_node = ifID->ifThisNode.s_node; + r_ddp->src_node = ddp->dst_node; + r_ddp->dst_socket = ddp->src_socket; + r_ddp->src_socket = ZIP_SOCKET; + r_ddp->type = DDP_ATP; + + /* fill up the atp header */ + r_atp->cmd = ATP_CMD_TRESP; + r_atp->xo = 0; + r_atp->eom = 1; + r_atp->sts = 0; + r_atp->xo_relt = 0; + r_atp->bitmap = 0; + UAS_UAS(r_atp->tid, atp->tid); + ulongtmp = 1; + ulongtmp = htonl(ulongtmp); + UAL_ASSIGN(r_atp->user_bytes, ulongtmp); /* no of zones */ + + /* fill up atp data part */ + bcopy((caddr_t) &ifID->ifZoneName, (caddr_t) r_atp->data, ifID->ifZoneName.len+1); + + /* all set to send the packet back up */ + + timeout(send_phony_reply, (caddr_t) rm, HZ/20); + return (0); +} + +static void +send_phony_reply(rm) + gbuf_t *rm; +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + ddp_input(rm, ifID_home); + + (void) thread_funnel_set(network_flock, FALSE); + return; +} + + +/* + * zip_prep_query_packet: build the actual ddp packet for the zip query + */ + +gbuf_t *zip_prep_query_packet(ifID, RouterNet, RouterNode) + at_ifaddr_t *ifID; + at_net_al RouterNet; /* we want to send the Zip Query to that router */ + at_node RouterNode; +{ + + register gbuf_t *m; + register at_ddp_t *ddp; + + if ((m = gbuf_alloc (AT_WR_OFFSET+1024, PRI_HI)) == NULL) { + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_send_query_packet: no buffer, port=%d\n", + ifID->ifPort)); + return((gbuf_t *)NULL); + } + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,0); + + ddp = (at_ddp_t *)(gbuf_rptr(m)); + + /* Prepare the DDP header */ + + ddp->unused = ddp->hopcount = 0; + UAS_ASSIGN(ddp->checksum, 0); + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + ddp->src_node = ifID->ifThisNode.s_node; + ddp->src_socket = ZIP_SOCKET; + + ddp->dst_socket = ZIP_SOCKET; + NET_ASSIGN(ddp->dst_net, RouterNet); + ddp->dst_node = RouterNode; + + ddp->type = DDP_ZIP; + + return (m); +} /* zip_prep_query_packet */ + + +/* + * zip_send_queries: this function send queries for the routing table entries that + * need to know their zones. It scans the routing table for entries with unknown + * zones and build Query packets accordingly. + * Note: this is called on a per port basis. + */ + +void zip_send_queries(ifID, RouterNet, RouterNode) + register at_ifaddr_t *ifID; + at_net_al RouterNet; /* we want to send the Zip Query to that router */ + at_node RouterNode; +{ + RT_entry *Entry = &RT_table[0]; + register gbuf_t *m; + register at_ddp_t *ddp; + int status; + short Query_index, EntryNumber = 0 ; + register u_char port = ifID->ifPort; + char *QueryBuff, *ZoneCount; + short zip_sent = FALSE; + +newPacket: + + if (!(m = zip_prep_query_packet(ifID, RouterNet, RouterNode))) { + return; /* was return (ENOBUFS); */ + } + + ddp = (at_ddp_t *)(gbuf_rptr(m)); + QueryBuff = (char *)ddp->data; + + *QueryBuff++ = ZIP_QUERY; + ZoneCount = QueryBuff; /* network count */ + *ZoneCount = 0; + QueryBuff++; + Query_index = 2; + + + while (EntryNumber < RT_maxentry) { + + /* scan the table, and build the packet with the right entries: + * - entry in use and on the right Port + * - with unknwon zones and in an active state + * - talking to the right router + */ + + if ((Query_index) > 2*254 +2) { + + /* we need to send the packet now, but we can't have more than 256 + * requests for networks: the Netcount field is a 8bit in the zip query + * packet format as defined in Inside Atalk + */ + + dPrintf(D_M_ZIP_LOW, D_L_OUTPUT, + ("zip_send_query: FULL query for %d nets on port#%d.(len=%d)\n", + *ZoneCount, port, Query_index)); + zip_sent = TRUE; + + gbuf_winc(m,DDP_X_HDR_SIZE + Query_index); + DDPLEN_ASSIGN(ddp, DDP_X_HDR_SIZE + Query_index); + + if ((status = + ddp_router_output(m, ifID, AT_ADDR, + RouterNet, RouterNode, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_send_query: ddp_router_output returns =%d\n", status)); + return; /* was return (status); */ + } + + goto newPacket; + } + + + if (((Entry->EntryState & 0x0F) >= RTE_STATE_SUSPECT) && + (Entry->NetStop) && (Entry->NetPort == port) && + (!RT_ALL_ZONES_KNOWN(Entry))){ + + /* we're ready to had that to our list of stuff to send */ + + if (Entry->NetStart) { /* extended net*/ + + *QueryBuff++ = (Entry->NetStart & 0xFF00) >> 8; + *QueryBuff++ = (Entry->NetStart & 0x00FF); + + } + else { + *QueryBuff++ = (Entry->NetStop & 0xFF00) >> 8; + *QueryBuff++ = (Entry->NetStop & 0x00FF); + } + + Query_index += 2; + *ZoneCount += 1;/* bump the number of network requested */ + + } + + Entry++; + EntryNumber++; + + } + + dPrintf(D_M_ZIP_LOW, D_L_OUTPUT, + ("zip_send_query: query for %d nets on port#%d.(len=%d)\n", + *ZoneCount, port, Query_index)); + + if (*ZoneCount) { /* non-full Query needs to be sent */ + zip_sent = TRUE; + gbuf_winc(m,DDP_X_HDR_SIZE + Query_index); + DDPLEN_ASSIGN(ddp, DDP_X_HDR_SIZE + Query_index); + + if ((status = + ddp_router_output(m, ifID, AT_ADDR, + RouterNet, RouterNode, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_send_query: ddp_router_output returns =%d\n", + status)); + return; /* was return (status); */ + } + } + else + gbuf_freem(m); + + if (!zip_sent) /* we didn't need to send anything for that port */ + ifID->ifZipNeedQueries = 0; +} /* zip_send_queries */ + +/* zip_reply_received: we recieved the reply to one of our query, update the + * zone bitmap and stuffs with was we received. + * we receive two types of replies: non extended and extended. + * For extended replies, the network count is the Total of zones for that net. + */ + +zip_reply_received(m, ifID, reply_type) + register gbuf_t *m; + register at_ifaddr_t *ifID; + int reply_type; +{ + register at_nvestr_t *zname; + RT_entry *Entry = &RT_table[0]; + register at_ddp_t *ddp; + at_net_al Network; + u_short payload_len, result; + u_char network_count; + char *PacketPtr; + + ddp = (at_ddp_t *)gbuf_rptr(m); + + /* access the number of nets provided in the ZIP Reply */ + + network_count = *(u_char *)(gbuf_rptr(m) + DDP_X_HDR_SIZE + 1); + + PacketPtr = (char *)(gbuf_rptr(m) + DDP_X_HDR_SIZE + 2); + + payload_len = DDPLEN_VALUE(ddp) - (DDP_X_HDR_SIZE + 2); + + dPrintf(D_M_ZIP_LOW, D_L_INPUT, ("zip_reply_received from %d:%d type=%d netcount=%d\n", + NET_VALUE(ddp->src_net), ddp->src_node, reply_type, network_count)); + + + while (payload_len > 0 && network_count >0) { + + Network = *(at_net_al *)PacketPtr; + PacketPtr += 2; + zname = (at_nvestr_t *)PacketPtr; + if (payload_len) + payload_len = payload_len -(zname->len + 3); + + if (zname->len <= 0) { /* not valid, we got a problem here... */ + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_reply_received: Problem zlen=0 for net=%d from %d:%d type=%d netcnt=%d\n", + Network, NET_VALUE(ddp->src_net), ddp->src_node, reply_type, network_count)); + payload_len =0; + continue; + } + + + Entry = rt_blookup(Network); + + if (Entry != NULL) { + + if (Entry->EntryState >= RTE_STATE_SUSPECT) { + + result = zt_add_zonename(zname); + + if (result == ZT_MAXEDOUT) { + + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_reply_received: ZTable full from %d:%d on zone '%s'\n", + NET_VALUE(ddp->src_net), ddp->src_node, zname->str)); + ErrorZIPoverflow = 1; + return(1); + } + + zt_set_zmap(result, Entry->ZoneBitMap); + + RT_SET_ZONE_KNOWN(Entry); + + } + else { + dPrintf(D_M_ZIP, D_L_INPUT, + ("zip_reply_received: entry %d-%d not updated, cause state=%d\n", + Entry->NetStart, Entry->NetStop, Entry->EntryState)); + } + } + else { + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_reply_received: network %d not found in RT\n", Network)); + } + + + /* now bump the PacketPtr pointer */ + PacketPtr += zname->len + 1; + network_count--; + } + + if ((reply_type == ZIP_REPLY) && network_count > 0) { + if (Entry) + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_reply_received: Problem decoding zone (after net:%d-%d)\n", + Entry->NetStart, Entry->NetStop)); + ifID->ifZipNeedQueries = 1; + } + else { + ifID->ifZipNeedQueries = 0; + if (Entry) + dPrintf(D_M_ZIP_LOW, D_L_INFO, + ("zip_reply_received: entry %d-%d all zones known\n", + Entry->NetStart, Entry->NetStop)); + } +} + +/* + * zip_reply_to_getmyzone: replies to ZIP GetMyZone received from the Net + */ + +static void zip_reply_to_getmyzone (ifID, m) + register at_ifaddr_t *ifID; + register gbuf_t *m; +{ + at_atp_t *atp; + register at_ddp_t *ddp; + register at_ddp_t *r_ddp; + register at_atp_t *r_atp; + register gbuf_t *rm; /* reply message */ + register int size, Index, status; + char *data_ptr; + RT_entry *Entry; + u_long ulongtmp; + + size = DDP_X_HDR_SIZE + ATP_HDR_SIZE + 1 + ifID->ifZoneName.len; + /* space for two headers and the zone name */ + if ((rm = gbuf_alloc(AT_WR_OFFSET+size, PRI_HI)) == NULL) { + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_reply_to_getmyzone: no buffer, port=%d\n", ifID->ifPort)); + return; /* was return (ENOBUFS); */ + } + gbuf_rinc(rm,AT_WR_OFFSET); + gbuf_wset(rm,size); + r_ddp = (at_ddp_t *)(gbuf_rptr(rm)); + r_atp = (at_atp_t *)r_ddp->data; + + ddp = (at_ddp_t *)gbuf_rptr(m); + if (gbuf_len(m) > DDP_X_HDR_SIZE) + atp = (at_atp_t *)(gbuf_rptr(m) + DDP_X_HDR_SIZE); + else + atp = (at_atp_t *)(gbuf_rptr(gbuf_cont(m))); + + /* fill up the ddp header for reply */ + DDPLEN_ASSIGN(r_ddp, size); + r_ddp->hopcount = r_ddp->unused = 0; + UAS_ASSIGN(r_ddp->checksum, 0); + + NET_ASSIGN(r_ddp->src_net, ifID->ifThisNode.s_net); + NET_NET(r_ddp->dst_net, ddp->src_net); + + r_ddp->src_node = ifID->ifThisNode.s_node; + r_ddp->dst_node = ddp->src_node; + + r_ddp->dst_socket = ddp->src_socket; + r_ddp->src_socket = ZIP_SOCKET; + r_ddp->type = DDP_ATP; + + /* fill up the atp header */ + r_atp->cmd = ATP_CMD_TRESP; + r_atp->xo = 0; + r_atp->eom = 1; + r_atp->sts = 0; + r_atp->xo_relt = 0; + r_atp->bitmap = 0; + UAS_UAS(r_atp->tid, atp->tid); + ulongtmp = 1; + ulongtmp = htonl(ulongtmp); + UAL_ASSIGN(r_atp->user_bytes, ulongtmp); /* no of zones */ + + data_ptr = (char *)r_atp->data; + + /* + * fill up atp data part with the zone name if we can find it... + */ + + Entry = rt_blookup(NET_VALUE(ddp->src_net)); + if (Entry != NULL && ((Entry->EntryState & 0x0F) >= RTE_STATE_SUSPECT) && + RT_ALL_ZONES_KNOWN(Entry)) { /* this net is well known... */ + + Index = zt_ent_zindex(Entry->ZoneBitMap) -1; + + *data_ptr = ZT_table[Index].Zone.len; + bcopy((caddr_t) &ZT_table[Index].Zone.str, (caddr_t) ++data_ptr, + ZT_table[Index].Zone.len); + + /* all set to send the packet back up */ + dPrintf(D_M_ZIP_LOW, D_L_OUTPUT, + ("zip_reply_to_GMZ: ddp_router_output to %d:%d port %d\n", + NET_VALUE(r_ddp->dst_net), r_ddp->dst_node, ifID->ifPort)); + + if ((status = + ddp_router_output(rm, ifID, AT_ADDR, + NET_VALUE(r_ddp->dst_net), r_ddp->dst_node, 0))) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_reply_to_GMZ: ddp_r_output returns =%d\n", status)); + return; /* was return (status); */ + } + } + else + gbuf_freem(rm); +} + +/* + * zip_reply_to_getzonelist: replies to ZIP GetZoneList requested from the Net + */ + +zip_reply_to_getzonelist (ifID, m) + register at_ifaddr_t *ifID; + register gbuf_t *m; +{ + at_atp_t *atp; + register at_ddp_t *ddp; + register at_ddp_t *r_ddp; + register at_atp_t *r_atp; + register gbuf_t *rm; /* reply message */ + register int size, status; + register short Index=0, StartPoint, ZLength, PacketLen=0; + u_long ulongtmp= 0; + char *Reply; + + ddp = (at_ddp_t *)gbuf_rptr(m); + if (gbuf_len(m) > DDP_X_HDR_SIZE) + atp = (at_atp_t *)(gbuf_rptr(m) + DDP_X_HDR_SIZE); + else + atp = (at_atp_t *)(gbuf_rptr(gbuf_cont(m))); + + + /* space for two headers and the zone name */ + + if ((rm = gbuf_alloc(AT_WR_OFFSET+1024, PRI_HI)) == NULL) { + return (ENOBUFS); + } + + gbuf_rinc(rm,AT_WR_OFFSET); + gbuf_wset(rm,0); + r_ddp = (at_ddp_t *)(gbuf_rptr(rm)); + r_atp = (at_atp_t *)r_ddp->data; + + /* fill up the ddp header for reply */ + + r_ddp->hopcount = r_ddp->unused = 0; + UAS_ASSIGN(r_ddp->checksum, 0); + NET_ASSIGN(r_ddp->src_net, ifID->ifThisNode.s_net); + NET_NET(r_ddp->dst_net, ddp->src_net); + r_ddp->src_node = ifID->ifThisNode.s_node; + r_ddp->dst_node = ddp->src_node; + r_ddp->dst_socket = ddp->src_socket; + r_ddp->src_socket = ZIP_SOCKET; + r_ddp->type = DDP_ATP; + + /* fill up the atp header */ + + r_atp->cmd = ATP_CMD_TRESP; + r_atp->xo = 0; + r_atp->eom = 1; + r_atp->sts = 0; + r_atp->xo_relt = 0; + r_atp->bitmap = 0; + UAS_UAS(r_atp->tid, atp->tid); + + Reply = (char *)r_atp->data; + + /* get the start index from the ATP request */ + + StartPoint = (UAL_VALUE(atp->user_bytes) & 0xffff) -1; + + /* find the next zone to send */ + + while ((Index < ZT_maxentry) && StartPoint > 0) { + if (ZT_table[Index].Zone.len) + StartPoint--; + Index++; + } + + + dPrintf(D_M_ZIP_LOW, D_L_OUTPUT, ("zip_reply_to_GZL: Index=%d\n", Index)); + /* + * fill up atp data part with the zone name if we can find it... + */ + + while (Index < ZT_maxentry) { + + ZLength = ZT_table[Index].Zone.len; + + if (ZT_table[Index].ZoneCount && ZLength) { + + + if (PacketLen + 8 + ZLength+1 > DDP_DATA_SIZE) /* packet full */ + break; + + *Reply++ = ZLength; + bcopy((caddr_t) &ZT_table[Index].Zone.str, + Reply, ZLength); + Reply += ZLength; + PacketLen += ZLength + 1; + ulongtmp++; + } + Index++; + } + + if (Index >= ZT_maxentry) /* this is the end of the list */ + + ulongtmp += 0x01000000; + + + UAL_ASSIGN(r_atp->user_bytes, ulongtmp); /* # of zones and flag*/ + + size = DDP_X_HDR_SIZE + ATP_HDR_SIZE + PacketLen; + gbuf_winc(rm,size); + DDPLEN_ASSIGN(r_ddp, size); + + /* all set to send the packet back up */ + + dPrintf(D_M_ZIP_LOW, D_L_OUTPUT, + ("zip_r_GZL: send packet to %d:%d port %d atp_len =%d\n", + NET_VALUE(r_ddp->dst_net), r_ddp->dst_node, ifID->ifPort, PacketLen)); + + + if (status= ddp_router_output(rm, ifID, AT_ADDR, + NET_VALUE(r_ddp->dst_net), r_ddp->dst_node, 0)) { + dPrintf(D_M_ZIP, D_L_ERROR, ("zip_reply_to_GZL: ddp_router_output returns=%d\n", + status)); + return (status); + } + return (0); + +} + +/* + * zip_reply_to_getlocalzones: replies to ZIP GetLocalZones requested from the Net + */ + +int zip_reply_to_getlocalzones (ifID, m) + register at_ifaddr_t *ifID; + register gbuf_t *m; +{ + at_atp_t *atp; + register at_ddp_t *ddp; + register at_ddp_t *r_ddp; + register at_atp_t *r_atp; + register gbuf_t *rm; /* reply message */ + int size, status; + short Index, Index_wanted, ZLength; + short i,j, packet_len; + short zCount, ZoneCount, ZonesInPacket; + char *zmap, last_flag = 0; + RT_entry *Entry; + char *Reply; + + u_long ulongtmp = 0; + + Index = Index_wanted = ZLength = i = j = packet_len = zCount = ZoneCount = + ZonesInPacket = 0; + + ddp = (at_ddp_t *)gbuf_rptr(m); + if (gbuf_len(m) > DDP_X_HDR_SIZE) + atp = (at_atp_t *)(gbuf_rptr(m) + DDP_X_HDR_SIZE); + else + atp = (at_atp_t *)(gbuf_rptr(gbuf_cont(m))); + + /* space for two headers and the zone name */ + + if ((rm = gbuf_alloc(AT_WR_OFFSET+1024, PRI_HI)) == NULL) { + return (ENOBUFS); + } + + gbuf_rinc(rm,AT_WR_OFFSET); + gbuf_wset(rm,0); + r_ddp = (at_ddp_t *)(gbuf_rptr(rm)); + r_atp = (at_atp_t *)r_ddp->data; + + Reply = (char *)r_atp->data; + + + /* get the start index from the ATP request */ + + Index_wanted = (UAL_VALUE(atp->user_bytes) & 0xffff) -1; + + dPrintf(D_M_ZIP_LOW, D_L_INFO, + ("zip_r_GLZ: for station %d:%d Index_wanted = %d\n", + NET_VALUE(ddp->src_net), ddp->src_node, Index_wanted)); + + Entry = rt_blookup(NET_VALUE(ddp->src_net)); + + if (Entry != NULL && ((Entry->EntryState & 0x0F) >= RTE_STATE_SUSPECT) && + RT_ALL_ZONES_KNOWN(Entry)) { /* this net is well known... */ + + ZoneCount = zt_ent_zcount(Entry) ; + + dPrintf(D_M_ZIP_LOW, D_L_INFO, + ("zip_reply_GLZ: for %d:%d ZoneCount=%d\n", + NET_VALUE(ddp->src_net), ddp->src_node, ZoneCount)); + + zmap = &Entry->ZoneBitMap[0]; + + /* + * first of all, we want to find the "first next zone" in the bitmap, + * to do so, we need to scan the bitmap and add the number of valid + * zones we find until we reach the next zone to be sent in the reply + */ + + if (ZoneCount > Index_wanted) { + + ZoneCount -= Index_wanted; + + /* find the starting point in the bitmap according to index */ + + for (i = 0; Index_wanted >= 0 && i < ZT_BYTES; i++) + if (zmap[i]) { + if (Index_wanted < 8) { + /* how many zones in the bitmap byte */ + for (j = 0, zCount =0; j < 8 ; j++) + if ((zmap[i] << j) & 0x80) + zCount++; + if (Index_wanted < zCount) { + for (j = 0 ; Index_wanted > 0 && j < 8 ; j++) + if ((zmap[i] << j) & 0x80) + Index_wanted--; + break; + } + else + Index_wanted -= zCount; + } + else + for (j = 0 ; j < 8 ; j++) + if ((zmap[i] << j) & 0x80) + Index_wanted--; + } + + /* + * now, we point to the begining of our next zones in the bitmap + */ + + while (i < ZT_BYTES) { + + if (zmap[i]) { + for (; j < 8 ; j++) + if ((zmap[i] << j) & 0x80) { + Index = i*8 + j; /* get the index in ZT */ + + ZLength = ZT_table[Index].Zone.len; + + if (ZT_table[Index].ZoneCount && ZLength) { + if (packet_len + ATP_HDR_SIZE + ZLength + 1 > + DDP_DATA_SIZE) + goto FullPacket; + + *Reply++ = ZLength; + bcopy((caddr_t) &ZT_table[Index].Zone.str, + Reply, ZLength); + Reply += ZLength; + packet_len += ZLength + 1; + ZonesInPacket ++; + dPrintf(D_M_ZIP_LOW, D_L_INFO, + ("zip_reply_GLZ: add z#%d to packet (l=%d)\n", + Index, packet_len)); + } + else { + dPrintf(D_M_ZIP, D_L_WARNING, + ("zip_reply_GLZ: no len for index=%d\n", + Index)); + } + } + } + i++; + j = 0; + } + } + else /* set the "last flag" bit in the reply */ + last_flag = 1; + } + else /* set the "last flag" bit in the reply */ + last_flag = 1; + +FullPacket: + + if (ZonesInPacket == ZoneCount) + last_flag = 1; + + + /* fill up the ddp header for reply */ + + r_ddp->hopcount = r_ddp->unused = 0; + UAS_ASSIGN(r_ddp->checksum, 0); + + NET_ASSIGN(r_ddp->src_net, ifID->ifThisNode.s_net); + NET_NET(r_ddp->dst_net, ddp->src_net); + + r_ddp->src_node = ifID->ifThisNode.s_node; + r_ddp->dst_node = ddp->src_node; + + r_ddp->dst_socket = ddp->src_socket; + r_ddp->src_socket = ZIP_SOCKET; + r_ddp->type = DDP_ATP; + + /* fill up the atp header */ + r_atp->cmd = ATP_CMD_TRESP; + r_atp->xo = 0; + r_atp->eom = 1; + r_atp->sts = 0; + r_atp->xo_relt = 0; + r_atp->bitmap = 0; + UAS_UAS(r_atp->tid, atp->tid); + ulongtmp = ((last_flag << 24) & 0xFF000000) + ZonesInPacket; /* # of zones and flag*/ + UAL_ASSIGN(r_atp->user_bytes, ulongtmp); + size = DDP_X_HDR_SIZE + ATP_HDR_SIZE + packet_len; + gbuf_winc(rm,size); + DDPLEN_ASSIGN(r_ddp, size); + + /* all set to send the packet back up */ + + dPrintf(D_M_ZIP_LOW, D_L_OUTPUT, + ("zip_r_GLZ: send packet to %d:%d port %d atp_len =%d\n", + NET_VALUE(r_ddp->dst_net), r_ddp->dst_node, ifID->ifPort, packet_len)); + + if (status= ddp_router_output(rm, ifID, AT_ADDR, + NET_VALUE(r_ddp->dst_net), r_ddp->dst_node, 0)) { + dPrintf(D_M_ZIP, D_L_ERROR, + ("zip_reply_to_GLZ: ddp_router_output returns =%d\n", + status)); + return (status); + } + return (0); +} /* zip_reply_to_getlocalzones */ + +int regDefaultZone(ifID) + at_ifaddr_t *ifID; +{ + int i; + char data[ETHERNET_ADDR_LEN]; + + if (!ifID) + return(-1); + + zt_get_zmcast(ifID, &ifID->ifZoneName, data); + if (FDDI_OR_TOKENRING(ifID->aa_ifp->if_type)) + ddp_bit_reverse(data); + bcopy((caddr_t)data, (caddr_t)&ifID->ZoneMcastAddr, ETHERNET_ADDR_LEN); + (void)at_reg_mcast(ifID, (caddr_t)&ifID->ZoneMcastAddr); + return(0); +} diff --git a/bsd/netat/ddp_rtmp.c b/bsd/netat/ddp_rtmp.c new file mode 100644 index 000000000..59e7a25fd --- /dev/null +++ b/bsd/netat/ddp_rtmp.c @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993-1998 Apple Computer, Inc. + * All Rights Reserved. + */ + +/* + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void rtmp_router_input(); + +/****************************************************************/ +/* */ +/* */ +/* RTMP Protocol */ +/* */ +/* */ +/****************************************************************/ + + +/* rtmp.c: , 1.6; 2/26/93; Apple Computer, Inc." */ + + +#define NROUTERS2TRAK 8 +#define FIFTYSECS 10 +#define NODE(r) ((r)->ifARouter.s_node) +#define NET(r) ((r)->ifARouter.s_net) +#define INUSE(r) (NODE(r)) + +void ddp_age_router(); + +static struct routerinfo { + struct at_addr ifARouter; + at_ifaddr_t *ifID; + void *tmo; +} trackedrouters[NROUTERS2TRAK]; + +void trackrouter_rem_if(ifID) + register at_ifaddr_t *ifID; +{ + register i; + register struct routerinfo *router; + + for (i = NROUTERS2TRAK; --i >= 0;) { + router = &trackedrouters[i]; + if (trackedrouters[i].ifID == ifID) { + untimeout(ddp_age_router, (caddr_t)router); + break; + } + } +} + + +void routershutdown() +{ + register i; + + for (i = NROUTERS2TRAK; --i >= 0;) { + register struct routerinfo *router; + + router = &trackedrouters[i]; + if (INUSE(router)) { + untimeout(ddp_age_router, (caddr_t) router); + bzero((caddr_t) router, sizeof(struct routerinfo)); + } + } +} + +int router_added = 0; +int router_killed = 0; + + + +void trackrouter(ifID, net, node) + register at_ifaddr_t *ifID; + register unsigned short net; + register unsigned char node; +{ + register struct routerinfo *unused = NULL; + register i; + + for (i = NROUTERS2TRAK; --i >= 0;) { + register struct routerinfo *router; + + router = &trackedrouters[(i + node) & (NROUTERS2TRAK-1)]; + if ((NODE(router) == node) && (NET(router) == net)) { + untimeout(ddp_age_router, (caddr_t) router); + timeout(ddp_age_router, (caddr_t) router, 50*SYS_HZ); + unused = NULL; + break; + } + else if (!INUSE(router) && !unused) + unused = router; + } + if (unused) { + router_added++; + + unused->ifID = ifID; + NET(unused) = net; + NODE(unused) = node; + timeout(ddp_age_router, (caddr_t) unused, 50*SYS_HZ); + if (NET(ifID) == 0 && NODE(ifID) == 0) { + NET(ifID) = net; + NODE(ifID) = node; + ifID->ifRouterState = ROUTER_AROUND; + } + } +} + +/* + * This is the timeout function that is called after 50 seconds, + * if no router packets come in. That way we won't send extended + * frames to something that is not there. Untimeout is called if + * an RTMP packet comes in so this routine will not be called. + */ +void ddp_age_router(deadrouter) + register struct routerinfo *deadrouter; +{ + register at_ifaddr_t *ourrouter = deadrouter->ifID; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + + dPrintf(D_M_RTMP, D_L_INFO, + ("ddp_age_router called deadrouter=%d:%d\n", NODE(deadrouter), NET(deadrouter))); + + router_killed++; + + if (NODE(ourrouter) == NODE(deadrouter) && + NET(ourrouter) == NET(deadrouter)) { + register unsigned long atrandom = random(); + register struct routerinfo *newrouter; + register i; + + bzero((caddr_t) deadrouter, sizeof(struct routerinfo)); + for (i = NROUTERS2TRAK; --i >= 0;) { + newrouter = &trackedrouters[(i + atrandom) & (NROUTERS2TRAK-1)]; + if (INUSE(newrouter)) + break; + else + newrouter = NULL; + } + if (newrouter) { + NET(ourrouter) = NET(newrouter); + NODE(ourrouter) = NODE(newrouter); + } + else { + /* from gorouterless() */ + ATTRACE(AT_MID_DDP, AT_SID_TIMERS, AT_LV_WARNING, FALSE, + "ddp_age_router entry : ARouter = 0x%x, RouterState = 0x%x", + ATALK_VALUE(ourrouter->ifARouter), ourrouter->ifRouterState, 0); + + switch (ourrouter->ifRouterState) { + case ROUTER_AROUND : + ourrouter->ifARouter.s_net = 0; + ourrouter->ifARouter.s_node = 0; + dPrintf(D_M_RTMP,D_L_INFO, + ("rtmp.c Gorouterless!!!!!!!!\n")); + ourrouter->ifThisCableStart = DDP_MIN_NETWORK; + ourrouter->ifThisCableEnd = DDP_MAX_NETWORK; + ourrouter->ifRouterState = NO_ROUTER; + zip_control(ourrouter, ZIP_NO_ROUTER); + break; + case ROUTER_WARNING : + /* there was a router that we were ignoring... + * now, even that's gone. But we want to tackle the + * case where another router may come up after all + * of them have died... + */ + ourrouter->ifRouterState = NO_ROUTER; + break; + } + } + } else + bzero((caddr_t) deadrouter, sizeof(struct routerinfo)); + + (void) thread_funnel_set(network_flock, FALSE); + +} /* ddp_age_router */ + +void rtmp_input (mp, ifID) + register gbuf_t *mp; + register at_ifaddr_t *ifID; +{ + register at_net_al this_net; + register at_net_al range_start, range_end; + register at_ddp_t *ddp = (at_ddp_t *)gbuf_rptr(mp); + /* NOTE: there is an assumption here that the + * DATA follows the header. */ + register at_rtmp *rtmp = (at_rtmp *)ddp->data; + + if (gbuf_type(mp) != MSG_DATA) { + /* If this is a M_ERROR message, DDP is shutting down, + * nothing to do here...If it's something else, we don't + * understand what it is + */ + gbuf_freem(mp); + return; + } + + if (!ifID) { + gbuf_freem(mp); + return; + } + if (gbuf_len(mp) < (DDP_X_HDR_SIZE + sizeof(at_rtmp))) { + gbuf_freem(mp); + return; + } + this_net = ifID->ifThisNode.s_net; + if (rtmp->at_rtmp_id_length != 8) { + gbuf_freem(mp); + return; + } + + { + at_rtmp_tuple *tp; + tp = ((at_rtmp_tuple *)&rtmp->at_rtmp_id[1]); + range_start = NET_VALUE(tp->at_rtmp_net); + tp = ((at_rtmp_tuple *)&rtmp->at_rtmp_id[4]); + range_end = NET_VALUE(tp->at_rtmp_net); + + if (ifID->ifRouterState == ROUTER_AROUND) { + if ((ifID->ifThisCableStart == range_start) && + (ifID->ifThisCableEnd == range_end)) { + trackrouter(ifID, + NET_VALUE(rtmp->at_rtmp_this_net), + rtmp->at_rtmp_id[0] + ); + } + } else { + /* There was no router around earlier, one + * probably just came up. + */ + if ((this_net >= DDP_STARTUP_LOW) && + (this_net <= DDP_STARTUP_HIGH)) { + /* we're operating in the startup range, + * ignore the presence of router + */ + if (ifID->ifRouterState == NO_ROUTER) { + dPrintf(D_M_RTMP, D_L_STARTUP, + ("Warning: new router came up: invalid startup net/node\n")); + trackrouter(ifID, + NET_VALUE(rtmp->at_rtmp_this_net), + rtmp->at_rtmp_id[0] + ); + ifID->ifRouterState = ROUTER_WARNING; + } + } else { + /* our address + * is not in startup range; Is our + * address good for the cable?? + */ + if ((this_net >= range_start) && + (this_net <= range_end)) { + /* Our address is in the range + * valid for this cable... Note + * the router address and then + * get ZIP rolling to get the + * zone info. + */ + ifID->ifThisCableStart = range_start; + ifID->ifThisCableEnd = range_end; + trackrouter(ifID, + NET_VALUE(rtmp->at_rtmp_this_net), + rtmp->at_rtmp_id[0] + ); + zip_control(ifID, ZIP_LATE_ROUTER); + } else { + /* Our address is not in the + * range valid for this cable.. + * ignore presence of the + * router + */ + if (ifID->ifRouterState == NO_ROUTER) { + dPrintf(D_M_RTMP,D_L_ERROR, + ("Warning: new router came up: invalid net/node\n")); + trackrouter(ifID, + NET_VALUE(rtmp->at_rtmp_this_net), + rtmp->at_rtmp_id[0] + ); + ifID->ifRouterState = ROUTER_WARNING; + } + } + } + } + } + + gbuf_freem(mp); + return; +} + + +void rtmp_init() +{ + bzero((caddr_t)trackedrouters, sizeof(struct routerinfo)*NROUTERS2TRAK); +} + + diff --git a/bsd/netat/ddp_rtmptable.c b/bsd/netat/ddp_rtmptable.c new file mode 100644 index 000000000..a197b7d6c --- /dev/null +++ b/bsd/netat/ddp_rtmptable.c @@ -0,0 +1,1145 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*---------------------------------------------------------------------------- + * + * RTMP & ZIP routing tables access routines + * + * This code implement b-tree search and manipulation of + * of the RTMP routing table and ZIP zone table. + * + * The RTMP routing table is a data block divided in several routing + * entries sorted during insertion in a b-tree form. We use a table and + * not dynamically allocated entries because it allow us to scan the whole + * table when RTMP packets are generated. The routing table entries are sorted + * by there NetStop value (because non extended nets have a NetStart value of + * zero. From any point in the tree, the left side contains Network ranges + * smaller or equal to the current Node, and the right tree points to higher + * values network ranges. + * + * + * 0.01 3/16/94 LD Creation + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + * + *---------------------------------------------------------------------------- + * + * Copyright (c) 1994, 1996, 1997, 1998 Apple Computer, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +RT_entry *RT_table_freelist; /* start of free entry list */ +RT_entry RT_table_start; /* start of the actual entry table */ +RT_entry *RT_table; /* the routing table */ +ZT_entry *ZT_table; /* the Zone Information Protocol table */ +short RT_maxentry; /* Number of entry in RTMP table */ +short ZT_maxentry; /* Number of entry in ZIP table */ + +char errstr[512]; /* used to display meaningfull router errors*/ + +extern at_ifaddr_t *ifID_table[]; +extern at_ifaddr_t *ifID_home; +extern snmpStats_t snmpStats; +extern atlock_t ddpinp_lock; + +short ErrorRTMPoverflow = 0; /* flag if RTMP table is too small for this net */ +short ErrorZIPoverflow = 0; /* flag if ZIP table is too small for this net */ + + /* prototypes */ +void getIfUsage( int, at_ifnames_t *); + +/* + * This a temporary function : just to display the router error + */ + +void RouterError(port, err_number) +short port, err_number; + +{ + switch (err_number) { + + case ERTR_SEED_CONFLICT: + dPrintf(D_M_RTMP, D_L_ERROR, + ("**** RTR Error on port# %d SEED_CONFLICT\n", port)); + break; + + case ERTR_CABLE_CONFLICT: + dPrintf(D_M_RTMP, D_L_ERROR, + ("**** RTR Error on port# %d CABLE_CONFLICT\n", port)); + break; + + case ERTR_RTMP_BAD_VERSION: + dPrintf(D_M_RTMP, D_L_ERROR, + ("**** RTR Error on port# %d RTMP_BAD_VERSION\n", port)); + break; + + case ERTR_CABLE_STARTUP: + dPrintf(D_M_RTMP, D_L_ERROR, + ("**** RTR Error on port# %d RTMP_CABLE_STARTUP\n", + port)); + break; + + default: + dPrintf(D_M_RTMP, D_L_ERROR, + ("**** RTR Error on port# %d WHAT IN THE WORLD IS THIS ONE? code=%d\n", + port, err_number)); + break; + } + dPrintf(D_M_RTMP, D_L_ERROR, ("Explanation: %s\n", errstr)); +} + + +/* + * this function just look for a NetNumber in the routing table, + * no check is done for the validity of the entry + */ + +RT_entry *rt_blookup (NetNumber) +at_net_al NetNumber; +{ + + RT_entry *ptree = &RT_table_start; + at_net_al LowEnd; + register unsigned int s; +/* + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("%s : Lookup for Net=%d\n", + "rt_blookup", NetNumber)); +*/ + ATDISABLE(s, ddpinp_lock); + while (ptree) { + + if (NetNumber > ptree->NetStop) { +/* + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("%s : Go Right from #%d\n", + "rt_blookup", ptree->NextIRNet)); +*/ + ptree = ptree->right; + continue; + } + else { + if (ptree->NetStart) + LowEnd = ptree->NetStart; + else + LowEnd = ptree->NetStop; + + if (NetNumber < LowEnd ) { +/* + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("%s : Go Left from #%d\n", + "rt_blookup", ptree->NextIRNet)); +*/ + ptree = ptree->left; + continue; + } + ATENABLE(s, ddpinp_lock); + + /* we're in the range (either extended or not) + * return the entry found. + */ + +/* dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("%s : found %04d-%04d Port=%d State=0x%x\n", + "rt_blookup", ptree->NetStart, ptree->NetStop, ptree->NetPort, + ptree->EntryState)); +*/ + + return (ptree); + } + } + ATENABLE(s, ddpinp_lock); + + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("%s : %04d : NOT FOUND\n", + "rt_blookup", NetNumber)); + return ((RT_entry *)NULL); +} + + +/* Routing table btree insert routine + * Uses a RT_entry parameter as the input, the insert is sorted in + * the tree on the NetStop field. Provision is made for non extented + * net (ie NetStart = 0). + * The function returns the element where the new entry was inserted, or + * NULL if the insert didn't work. (In this cas there is a problem with + * the tree coherency... + * + */ + + +RT_entry *rt_binsert (NewEntry) +RT_entry *NewEntry; +{ + RT_entry *ptree = &RT_table_start; + + register at_net_al NetStart = NewEntry->NetStart; + register at_net_al NetStop = NewEntry->NetStop; + + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("rt_binsert: for Net %d-%d state=x%x NextIR %d:%d\n", + NetStart, NetStop, NewEntry->EntryState,NewEntry->NextIRNet, NewEntry->NextIRNode)); + + if (ptree == (RT_entry *)NULL) { + *ptree = *NewEntry; + at_state.flags |= AT_ST_RT_CHANGED; + return (NewEntry); + } + + + while (ptree) { + + if (NetStop > ptree->NetStop) { /* walk the right sub-tree */ + if (ptree->right) + ptree = ptree->right; + else { + ptree->right = NewEntry; + at_state.flags |= AT_ST_RT_CHANGED; + return (ptree); + } + } + else { /* walk the left sub-tree */ + if (ptree->left) + ptree = ptree->left; + else { + ptree->left = NewEntry; + at_state.flags |= AT_ST_RT_CHANGED; + return (ptree); + } + } + + } + + dPrintf(D_M_RTMP, D_L_WARNING, ("%s : ERROR NOT INSERTED Net %d-%d\n", + "rt_binsert", NetStart, NetStop)); + return ((RT_entry *)NULL); +} + +RT_entry *rt_insert(NStop, NStart, NxNet, NxNode, NtDist, NtPort, EntS) + at_net_al NStop, NStart, NxNet; + at_node NxNode; + u_char NtDist, NtPort, EntS; +{ + RT_entry *New; + if ((New = RT_table_freelist)) { + RT_table_freelist = RT_table_freelist->right; + } else + return ((RT_entry *)NULL); + New->right = NULL; + New->NetStop = NStop; + New->NetStart = NStart; + New->NextIRNet = NxNet; + New->NextIRNode = NxNode; + New->NetDist = NtDist; + New->NetPort = NtPort; + New->EntryState = EntS; + bzero(New->ZoneBitMap, sizeof(New->ZoneBitMap)); + at_state.flags |= AT_ST_RT_CHANGED; + return(rt_binsert(New)); +} + +/* + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("%s : %04d : NOT FOUND\n", + "rt_blookup", NetNumber)); + * Routing table btree deletion routine + * + */ + +RT_entry *rt_bdelete (NetStop, NetStart) + at_net_al NetStop, NetStart; +{ + + RT_entry *rt_found, *pprevious, *pnext, *pnextl, *psub; + at_net_al LowEnd; + + rt_found = &RT_table_start; + + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("%s : Delete %d-%d\n", + "rt_bdelete", NetStart, NetStop)); + + while (rt_found) { + + if (NetStop > rt_found->NetStop) { + pprevious = rt_found; + rt_found = rt_found->right; + continue; + } + else { + + /* non extended nets cases */ + + if (rt_found->NetStart) + LowEnd = rt_found->NetStart; + else + LowEnd = rt_found->NetStop; + + if (NetStop < LowEnd) { + pprevious = rt_found; + rt_found = rt_found->left; + continue; + } + + /* we're in the range (either extended or not) + * return the entry found. + */ + + break; + } + } + + dPrintf(D_M_RTMP, D_L_ROUTING, ("%s : Delete %d-%d found to delete %d-%d\n", + "rt_bdelete", NetStart, NetStop, rt_found->NetStart,rt_found->NetStop)); + + if (rt_found) { + + + + /* we found the entry, now reorg the sub-trees + * spanning from our node. + */ + + if ((pnext = rt_found->right)) { + + /* Tree pruning: take the left branch of the current + * node and place it at the lowest left branch + * of the current right branch + */ + + psub = pnext; + + /* walk the Right/Left sub tree from current node */ + + while ((pnextl = psub->left)) + psub = pnextl; + + /* plug the old left tree to the new ->Right leftmost node */ + + psub->left = rt_found->left; + + + } else { /* only left sub-tree, simple case */ + + pnext = rt_found->left; + } + + /* Now, plug the current node sub tree to the good pointer of + * our parent node. + */ + + + if (pprevious->left == rt_found) + pprevious->left = pnext; + else + pprevious->right = pnext; + + /* clean-up entry and add to the free-list */ + + at_state.flags |= AT_ST_RT_CHANGED; + return(rt_found); + } + + else { /* Trying to delete something that doesn't exist? */ + + dPrintf(D_M_RTMP, D_L_WARNING, ("%s : %d NOT Removed\n", + "rt_bdelete", NetStop)); + + return ((RT_entry *)NULL); + } + + +} + + +RT_entry *rt_sortedshow(parent) +RT_entry *parent; +{ + RT_entry *me; + + me = parent; + + if (parent == NULL) { + me = &RT_table_start; + while (me) + if (me->left) { + parent = me; + me = me->left; + } +/* parent = parent->parent; */ + } + return (parent); +} + +/* + * debug only: display the contents of the routing table + */ + +void rt_show () +{ + RT_entry *ptree; + int i=0; + + ptree = &RT_table[0]; + + while (ptree && i < 600 ) { + if (ptree->NetStop) { + dPrintf(D_M_RTMP_LOW, D_L_VERBOSE, + ("%4d-%4d IR=%d:%d Dist=%d\n", + ptree->NetStop, ptree->NetStart, ptree->NextIRNet, + ptree->NextIRNode, (short)ptree->NetDist)); + } else { + dPrintf(D_M_RTMP_LOW, D_L_VERBOSE, + ("%04d : * FREE ENTRY\n", i)); + } + ptree++; + i++; + } +} + +/* + * prepare the indexing of the free entries in the RTMP table + */ + +rt_table_init() +{ + short i; + + if ((RT_table = (RT_entry *)_MALLOC(sizeof(RT_entry)*RT_maxentry, + M_RTABLE, M_WAITOK)) == NULL) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmptable: Can't allocate RT_table\n")); + return (ENOMEM); + } + if ((ZT_table = (ZT_entry *)_MALLOC(sizeof(ZT_entry)*ZT_maxentry, + M_RTABLE, M_WAITOK)) == NULL) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("rtmptable: Can't allocate ZT_table\n")); + return (ENOMEM); + } + dPrintf(D_M_RTMP, D_L_STARTUP, ("rt_table_init called\n")); + bzero(&RT_table[0], sizeof(RT_entry)* RT_maxentry); + for (i= 1 ; i < RT_maxentry ; i++) { + (&RT_table[i-1])->right = &RT_table[i]; + } + RT_table_freelist = &RT_table[0]; + + at_state.flags |= AT_ST_RT_CHANGED; + at_state.flags |= AT_ST_ZT_CHANGED; + bzero(&RT_table_start, sizeof(RT_entry)); + + /* also clean up the ZIP table */ + + bzero(&ZT_table[0], sizeof(ZT_entry)* ZT_maxentry); + ErrorRTMPoverflow = 0; + ErrorZIPoverflow = 0; + return(0); +} + +/* + * zt_add_zone: add a zone name in the zone table. + */ + +zt_add_zone(name, length) +char *name; +short length; +{ + at_nvestr_t zname; + bcopy(name, &zname.str, length); + zname.len = length; + return (zt_add_zonename(&zname)); +} + +/* + * zt_add_zonename: add a zone name in the zone table. + */ + +int zt_add_zonename(zname) +at_nvestr_t *zname; +{ + register short res,i; + register unsigned int s; + + if (res = zt_find_zname(zname)) + return(res); + + ATDISABLE(s, ddpinp_lock); + for (i = 0; i < ZT_maxentry ; i++) { + if (ZT_table[i].ZoneCount == 0 && ZT_table[i].Zone.len == 0) {/* free entry */ + ZT_table[i].Zone = *zname; + dPrintf(D_M_RTMP, D_L_VERBOSE, ("zt_add_zonename: zone #%d %s len=%d\n", + i, ZT_table[i].Zone.str, ZT_table[i].Zone.len)); + at_state.flags |= AT_ST_ZT_CHANGED; + ATENABLE(s, ddpinp_lock); + return(i+1); + } + } + ATENABLE(s, ddpinp_lock); + /* table full... */ + return (ZT_MAXEDOUT); +} + +/* Adjust zone counts for a removed network entry. + * If the ZoneCount of a zone reaches zero, delete the zone from the zone table + */ +void zt_remove_zones(zmap) +u_char *zmap; +{ + + register u_short i,j, Index; + + for (i=0; i< ZT_BYTES ; i++) { + + if (zmap[i]) { + for (j=0; j < 8 ; j++) + if ((zmap[i] << j) & 0x80) { + Index = i*8 + j; /* get the index in ZT */ + /* 1-23-97 this routine caused a crash once, presumably + zmap bits beyond ZT_table size got set somehow. + prevent that here + */ + if (Index >= ZT_maxentry) { + dPrintf(D_M_RTMP, D_L_ERROR, + ("zt_remove_zones: index (%d) GT ZT_maxentry (%d) (zmap:%d)\n", + Index,ZT_maxentry,i)); + return; + } + dPrintf(D_M_RTMP, D_L_VERBOSE, + ("zt_remove_zones: zone #%d %s was=%d\n", Index, + ZT_table[Index].Zone.str, ZT_table[Index].ZoneCount)); + if (ZT_table[Index].ZoneCount > 0) + ZT_table[Index].ZoneCount--; + if (ZT_table[Index].ZoneCount == 0) + ZT_table[Index].Zone.len = 0; + at_state.flags |= AT_ST_ZT_CHANGED; + } + } + } +} + + + +/* + * zt_compute_hash: compute hash index from the zone name string + */ + +short zt_compute_hash(zname) +at_nvestr_t *zname; +{ + register u_short checksum=0, i; + register char c1; + + /* apply the upper name + DDP checksum algorithm */ + + for (i= 0 ; i < zname->len; i++) { + + /* upperize the character */ + + c1 = zname->str[i]; + if (c1 >= 'a' && c1 <= 'z') + c1 += 'A' - 'a'; + if (c1 & 0x80) + c1 = upshift8(c1); + + /* DDP Checksum */ + + checksum += c1; + checksum = ((checksum & 0x8000) ? + (checksum << 1 | 1) : (checksum << 1)); + } + + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("zt_comphash: value computed for zone=%s h=%d\n", + zname->str, checksum)); + + if (checksum) + return (checksum); + else + return (0xffff); + +} + +/* + * zt_upper_zname: translate the name string into uppercase + */ + +void zt_upper_zname(zname) +at_nvestr_t *zname; +{ + register short i; + register char c1; + + for (i= 0 ; i < zname->len; i++) { + + c1 = zname->str[i]; + if (c1 >= 'a' && c1 <= 'z') + c1 += 'A' - 'a'; + if (c1 & 0x80) + c1 = upshift8(c1); + + zname->str[i] = c1; + } +} + +/* + * zt_get_zmcast: calcularte the zone multicast address for a + * given zone name. + * Returns the result in "buffer" + */ + +zt_get_zmcast(ifID, zname, buffer) + at_ifaddr_t *ifID; /* we want to know the media type */ + at_nvestr_t *zname; /* source name for multicast address */ + char *buffer; /* resulting Zone Multicast address */ +{ + u_short h; + + h = zt_compute_hash(zname); + +/* + * Find a nice way to decide if it is TokenRing or Ethernet for + * the Multicast address computation.... + */ + + if (ifID->aa_ifp->if_type != IFT_ISO88025) { /* token ring */ + + /* Ethernet case */ + + buffer[0] = 0x09; + buffer[1] = 0x00; + buffer[2] = 0x07; + /* no router, use cable multicast */ + if (MULTIHOME_MODE && ifID->ifRouterState == NO_ROUTER ) { + buffer[3] = buffer[4] = buffer[5] = 0xff; + } + else { + buffer[3] = 0x00; + buffer[4] = 0x00; + buffer[5] = h % 0xFD; + } + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, ("zt_get_multi: computed for h=%d %x %x\n", + h, *(u_int *)&buffer[0], *(u_short *)&buffer[4])); + + return(6); /* returns Multicast address length */ + + } + else { + /* assume it is token ring: note for the magic number computation, + * first see Inside Mac Page 3-10, there is 20 multicast addresses + * for TLAP, and they are from 0xC000 0000 0008 00 to 0xC000 0200 0000 00 + */ + buffer[0] = 0xC0; + buffer[1] = 0x00; + *(u_int *)&buffer[2] = 1 << ((h % 19) + 11); + dPrintf(D_M_RTMP, D_L_WARNING,("zt_get_multi: BROAD not found forr h=%d \n", + h)); + return(6); + } + + + +} + +/* + * zt_ent_zindex: return the first zone index found in the zone map + * return the entry number+1 in the Zone Table, or zero if not found + */ + +int zt_ent_zindex(zmap) +u_char *zmap; +{ + u_short i,j; + + + for (i = 0 ; i < ZT_BYTES ; i++) + + if (zmap[i]) + for (j = 0 ; j < 8 ; j++) + if ((zmap[i] << j) & 0x80) + return (8*i + j +1); + + return (0); +} +/* + * zt_ent_zcount: count the number of actives zone for a routing entry + */ + +zt_ent_zcount(ent) +RT_entry *ent; +{ + register u_char *zmap; + register u_short i,j; + register int zone_count = 0 ; + register unsigned int s; + + ATDISABLE(s, ddpinp_lock); + + if (!RT_ALL_ZONES_KNOWN(ent)) { + ATENABLE(s, ddpinp_lock); + return (0); + } + zmap = ent->ZoneBitMap; + + for (i = 0 ; i < ZT_BYTES ; i++) { + + if (*zmap) + + for (j = 0 ; j < 8 ; j++) + if ((*zmap << j) & 0x80) + zone_count++; + zmap++; + } + + ATENABLE(s, ddpinp_lock); + return (zone_count); +} + +/* + * zt_find_zname: match a zone name in the zone table and return the entry if found + */ +zt_find_zname(zname) +at_nvestr_t *zname; +{ + register short i, j, found; + register char c1, c2; + register unsigned int s; + + + if (!zname->len) + return(0); + + ATDISABLE(s, ddpinp_lock); + for (i = 0 ; i < ZT_maxentry ; i++) { + if (!ZT_table[i].ZoneCount || zname->len != ZT_table[i].Zone.len) + continue; + + found = 1; /* did we get the right one? */ + + for (j = 0 ; j < zname->len ; j++) { + c1 = zname->str[j]; + c2 = ZT_table[i].Zone.str[j]; + if (c1 >= 'a' && c1 <= 'z') + c1 += 'A' - 'a'; + if (c2 >= 'a' && c2 <= 'z') + c2 += 'A' - 'a'; + if (c1 & 0x80) + c1 = upshift8(c1); + if (c2 & 0x80) + c2 = upshift8(c2); + if (c1 != c2) { + found = 0; + break; + } + } + + if (found) { + ATENABLE(s, ddpinp_lock); + return (i+1); + } + } + + ATENABLE(s, ddpinp_lock); + return(0); +} + + +/* + * zt_set_zmap: set a bit for the corresponding zone map in an entry bitmap + */ +void zt_set_zmap(znum, zmap) + u_short znum; + char *zmap; +{ + register u_short num = znum -1; + register unsigned int s; + + ATDISABLE(s, ddpinp_lock); + if (!(zmap[num >> 3] & 0x80 >> (num % 8))) { + zmap[num >> 3] |= 0x80 >> (num % 8); + ZT_table[num].ZoneCount++; + } + ATENABLE(s, ddpinp_lock); +} + + +/* + * zt_clr_zmap: clear a bit for the corresponding zone map in an entry bitmap + */ +void zt_clr_zmap(znum, zmap) + u_short znum; + char *zmap; +{ + register u_short num = znum -1; + register unsigned int s; + + ATDISABLE(s, ddpinp_lock); + if (zmap[num >> 3] & 0x80 >> (num % 8)) { + zmap[num >> 3] ^= 0x80 >> (num % 8); + ZT_table[num].ZoneCount--; + } + ATENABLE(s, ddpinp_lock); +} + + +/* + * routing_needed : + * This function performs the actual lookup and forward of packets + * send to the box for routing. + * + * The destination network is looked up in our tables, and if we + * know the next IR to send the packet to, we forward the packet + * on the right port. + * + * If the destination is unknown, we simply dump the packet. + */ + +void routing_needed(mp, ifID, bypass) + gbuf_t *mp; + at_ifaddr_t *ifID; + char bypass; /* set by special socket handlers */ +{ + + register at_ddp_t *ddp; + register int msgsize; + register RT_entry *Entry; + register gbuf_t *tmp_m; + + /* first check the interface is up and forwarding */ + + if (!ifID) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("routing_needed: non valid IFID!\n")); + gbuf_freel(mp); + return; + } + if ((ifID->ifRoutingState < PORT_ONLINE)) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("routing_needed: port %d not online yet\n", + ifID->ifPort)); + gbuf_freel(mp); + return; + } + + ddp = (at_ddp_t *)gbuf_rptr(mp); + msgsize = DDPLEN_VALUE(ddp); + for (tmp_m = gbuf_next(mp); tmp_m; tmp_m = gbuf_next(tmp_m)) + msgsize += DDPLEN_VALUE(((at_ddp_t *)gbuf_rptr(tmp_m))); + + if (ddp->hopcount++ > 15) { + dPrintf(D_M_RTMP, D_L_WARNING, + ("routing_needed: drop packet for %d:%d, hopcount too high\n", + NET_VALUE(ddp->dst_net), ddp->dst_node)); + gbuf_freel(mp); + snmpStats.dd_hopCount++; + return; /* was return(1); */ + } + + if ((Entry = rt_blookup(NET_VALUE(ddp->dst_net)))) { + + dPrintf(D_M_RTMP_LOW, D_L_ROUTING, + ("routing_needed: FOUND for %d.%d p=%d to %d.%d \n", + NET_VALUE(ddp->dst_net), ddp->dst_node, ifID->ifPort, + Entry->NextIRNet, Entry->NextIRNode)); + + /* somehow, come to that point... */ + + ifID->ifStatistics.fwdPkts++; + ifID->ifStatistics.fwdBytes += msgsize; + + if (Entry->NetDist) /* net not directly connected */ + ddp_router_output(mp, ifID_table[Entry->NetPort], AT_ADDR, + Entry->NextIRNet, Entry->NextIRNode, 0); + else {/* we are directly on this net */ + + /* we want to avoid duplicating broadcast packet on the same net, + * but special sockets handlers are ok to do that (mainly for + * for loopback purpose). So, if the "bypass" flag is set, we don't + * check for that test... [Problem was "movietalk"]. + */ + + if (bypass || ifID_table[Entry->NetPort] != ifID) + ddp_router_output(mp, ifID_table[Entry->NetPort], AT_ADDR, + NET_VALUE(ddp->dst_net), ddp->dst_node, 0); + else { + dPrintf(D_M_RTMP, D_L_ROUTING, + ("routing_needed: bad loopback for add %d.%d from port %d (%d.%d)\n", + NET_VALUE(ddp->dst_net), ddp->dst_node, ifID->ifPort, + NET_VALUE(ddp->src_net), ddp->src_node)); + ifID->ifStatistics.droppedPkts++; + ifID->ifStatistics.droppedBytes += msgsize; + + gbuf_freel(mp); + return; /* was return (2); */ + } + + + } + } + else { + dPrintf(D_M_RTMP, D_L_ROUTING, + ("routing_needed: NOT FOUND for add %d.%d from port %d our %d.%d\n", + NET_VALUE(ddp->dst_net), ddp->dst_node, ifID->ifPort, + ifID_home->ifThisNode.s_net, + ifID_home->ifThisNode.s_node)); + + ifID->ifStatistics.droppedPkts++; + ifID->ifStatistics.droppedBytes += msgsize; + snmpStats.dd_noRoutes++; + + gbuf_freel(mp); + return; /* was return (2); */ + + } + /* return(0); */ +} /* routing_needed */ + +ZT_entryno *zt_getNextZone(first) + int first; + /* a call made with first = TRUE returns the first valid entry in + the ZT_table, if first != TRUE, then then each call returns the + next valid entry in the table. The next call after the last + valid entry was read returns NULL + */ +{ + int i; + static int idx=0; + static ZT_entryno zte; + + if (!ZT_table) + return NULL; + + if (first) + idx=0; + + for (i=idx; iEntryState != RTE_STATE_UNUSED) { + size = i; + return(i); + } + return(0); +} + +getZipTableSize() +{ + register int i; + register ZT_entry *zt; + static int size=0; + + if (!(at_state.flags & AT_ST_ZT_CHANGED)) + return(size); + + for (i=ZT_maxentry,zt = &ZT_table[ZT_maxentry-1]; i; i--,zt--) + if (zt->ZoneCount) { + size = i; + return(i); + } + return(0); +} + +getRtmpTable(d,s,c) + RT_entry *d; /* destination */ + int s; /* starting entry */ + int c; /* # entries to copy */ +{ + register int i,n=0; + register RT_entry *rt; + + for(i=s,rt=&RT_table[s]; iEntryState != RTE_STATE_UNUSED) { + *d++ = *rt; + n++; + } +} + +getZipTable(d,s,c) + ZT_entry *d; /* destination */ + int s; /* starting entry */ + int c; /* # entries to copy */ +{ + + bcopy(&ZT_table[s], d, c*sizeof(ZT_entry)); +} + +at_nvestr_t *getRTRLocalZone(ifz) + zone_usage_t *ifz; +{ + char *zmap; + RT_entry *route; + int i, j, index; + int zcnt=0; /* zone we're pointing to in the list */ + char zonesChecked[ZT_BYTES]; + at_ifaddr_t *ifID; + + if (ifz->zone_index < 0) { + return((at_nvestr_t*)NULL); + } + bzero(zonesChecked,sizeof(zonesChecked)); + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (!(route = rt_blookup(ifID->ifThisNode.s_net))) { + return((at_nvestr_t*)NULL); + } + zmap=route->ZoneBitMap; + dPrintf(D_M_RTMP_LOW, D_L_USR1, + ("getRTRLocal: i/f %s, net:%d\n",ifID->ifName, + ifID->ifThisNode.s_net)); + for (i = 0 ; i < ZT_BYTES; i++) { + if (zmap[i]) { + for (j = 0; j < 8 ; j++) + if ( (zmap[i] & (0x80 >> j)) && + !(zonesChecked[i] & (0x80 >> j)) + ) { + zonesChecked[i] |= (0x80 >> j); + if (ifz->zone_index == zcnt) { + index = i * 8 + j; + getIfUsage(index, &ifz->zone_iflist); + ifz->zone_name = ZT_table[index].Zone; + dPrintf(D_M_RTMP_LOW, D_L_USR1, + ("getRTRLocal:zmap:%8x zcnt:%d\n", + *(int*)zmap, zcnt)); + ifz->zone_index = index+1; + return(&ZT_table[index].Zone); + } + zcnt++; + } + } + } + } + dPrintf(D_M_RTMP_LOW, D_L_USR1, + ("getRTRLocal: returning NULL last ent:%d net:%d zmap:%08x\n", + (ifID ? ifID->ifPort : 0), + (ifID ? ifID->ifThisNode.s_net : 0),*(int*)zmap)); + ifz->zone_name.len = 0; + return((at_nvestr_t*)NULL); +} /* getRTRLocalZone */ + +void getIfUsage(zone, ifs_in_zone) + int zone; + at_ifnames_t *ifs_in_zone; + +/* sets a "1" in each element of the char array for each I/F in the + requested zone. The char array has a 1:1 correspondence with the + ifID_table. Zone is assumed to be valid and local, so if we're in + single port mode, we'll set the home port and thats it. +*/ +{ + u_int zmi; /* zone map index for zone */ + u_char zmb; /* zone map bit mask for zone */ + RT_entry *route; + int cnt=0; + at_ifaddr_t *ifID; + + if (!MULTIPORT_MODE) { + strncpy(ifs_in_zone->at_if[cnt], ifID_home->ifName, + IFNAMESIZ); + return; + } + bzero(ifs_in_zone, sizeof(at_ifnames_t)); + zmi = zone>>3; + zmb = 0x80>>(zone % 8); + dPrintf(D_M_NBP_LOW, D_L_USR3, ("get_ifs znum:%d zmi%d zmb:%x\n", + zone, zmi, zmb)); + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (!(route = rt_blookup(ifID->ifThisNode.s_net))) + return; + if (route->ZoneBitMap[zmi] & zmb) { + dPrintf(D_M_NBP_LOW, D_L_USR3, ("zone in port %d \n", + route->NetPort)); + strncpy(ifs_in_zone->at_if[cnt], + ifID_table[route->NetPort]->ifName, IFNAMESIZ); + cnt++; + } + } + return; +} /* getIfUsage */ diff --git a/bsd/netat/ddp_sip.c b/bsd/netat/ddp_sip.c new file mode 100644 index 000000000..766eabace --- /dev/null +++ b/bsd/netat/ddp_sip.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + * + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#ifndef lint +/* static char sccsid[] = "@(#)sip.c: 2.0, 1.3; 10/18/93; Copyright 1988-89, Apple Computer, Inc."; */ +#endif /* lint */ + +/****************************************************************/ +/* */ +/* */ +/* S I P */ +/* System Information Protocol */ +/* */ +/* */ +/****************************************************************/ + +/* System Information Protocol -- implemented to handle Responder + * Queries. The queries are ATP requests, but the ATP responses are faked + * here in a DDP level handler routine. The responder socket is always + * the 1st socket in the dynamic socket range (128) and it is assumed + * that the node will be registered on that socket. + * + * In A/UX implementation, this implies that /etc/appletalk program will + * register the node name on socket DDP_SOCKET_1st_DYNAMIC (128). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include /* nbp.h needs the gbuf definiton */ +#include +#include +#include +#include + +#define SIP_SYSINFO_CMD 1 +#define SIP_DATALINK_CMD 6 + +#define SIP_GOOD_RESPONSE 0x1 +#define SIP_BAD_RESPONSE 0xff + +#define SIP_DRIVER_VERSION 0x0001 +#define SIP_RESPONDER_VERSION 0x0001 + +typedef struct { + u_char response; + u_char unused; + u_short responder_version; +} sip_userbytes_t; + +void sip_input(mp, ifID) + gbuf_t *mp; + int *ifID; /* not used */ +{ + /* Packets arriving here are actually ATP packets, but since + * A/UX only send dummy responses, we're implementing responder as + * a DDP handler + */ + register at_ddp_t *ddp; + register at_atp_t *atp; + register gbuf_t *tmp; + u_char *resp; + sip_userbytes_t ubytes; + + ddp = (at_ddp_t *)gbuf_rptr(mp); + + /* Make sure the packet we got is an ATP packet */ + if (ddp->type != DDP_ATP) { + gbuf_freem(mp); + return; + } + + /* assuming that the whole packet is in one contiguous buffer */ + atp = (at_atp_t *)ddp->data; + + switch(UAL_VALUE(atp->user_bytes)) { + case SIP_SYSINFO_CMD : + /* Sending a response with "AppleTalk driver version" (u_short) + * followed by 14 zeros will pacify the interpoll. + * What? You don't understand what it means to send 14 zeroes? + * Tsk, tsk, look up SIP protocol specs for details!! + */ + if ((tmp = (gbuf_t *)ddp_growmsg(mp, 16)) == NULL) { + /* dont have buffers */ + gbuf_freem(mp); + return; + } + if (tmp == mp) + /* extra space allocated on the same buffer block */ + resp = atp->data; + else + resp = (u_char *)gbuf_rptr(tmp); + bzero(resp, 16); + *(u_short *)resp = SIP_DRIVER_VERSION; + + ubytes.response = SIP_GOOD_RESPONSE; + ubytes.unused = 0; + ubytes.responder_version = SIP_RESPONDER_VERSION; + break; + case SIP_DATALINK_CMD : + /* In this case, the magic spell is to send 2 zeroes after + * the "AppleTalk driver version". + */ + if ((tmp = (gbuf_t *)ddp_growmsg(mp, 4)) == NULL) { + /* dont have buffers */ + gbuf_freem(mp); + return; + } + if (tmp == mp) + /* extra space allocated on the same buffer block */ + resp = atp->data; + else + resp = (u_char *)gbuf_rptr(tmp); + bzero(resp, 16); + *(u_short *)resp = SIP_DRIVER_VERSION; + + ubytes.response = SIP_GOOD_RESPONSE; + ubytes.unused = 0; + ubytes.responder_version = SIP_RESPONDER_VERSION; + break; + default : + /* bad request, send a bad command response back */ + ubytes.response = SIP_BAD_RESPONSE; + ubytes.unused = 0; + ubytes.responder_version = SIP_RESPONDER_VERSION; + } + + NET_NET(ddp->dst_net, ddp->src_net); + ddp->dst_node = ddp->src_node; + ddp->dst_socket = ddp->src_socket; + bcopy((caddr_t) &ubytes, (caddr_t) atp->user_bytes, sizeof(ubytes)); + atp->cmd = ATP_CMD_TRESP; + atp->eom = 1; + atp->sts = 0; + atp->bitmap = 0; + + (void)ddp_output(&mp, DDP_SOCKET_1st_DYNAMIC, FALSE); + return; +} /* sip_input */ + diff --git a/bsd/netat/ddp_usrreq.c b/bsd/netat/ddp_usrreq.c new file mode 100644 index 000000000..b46a541c7 --- /dev/null +++ b/bsd/netat/ddp_usrreq.c @@ -0,0 +1,351 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. + */ + +/* ddp_usrreq.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int at_control(), at_memzone_init(); +extern void nbp_input(), ep_input(), zip_router_input(), + sip_input(), add_ddp_handler(), init_ddp_handler(), + ddp_start(), ddp_input(), appletalk_hack_start(); +extern u_short ddp_checksum(); +extern at_ifaddr_t *forUs(); +extern struct mbuf *m_dup(struct mbuf *, int); + +extern at_ifaddr_t *ifID_home; +extern int xpatcnt; + +struct atpcb ddp_head; +u_long ddp_sendspace = 600, /* *** what should this value be? *** */ + ddp_recvspace = 50 * (600 + sizeof(struct sockaddr_at)); + +int ddp_pru_control(struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp, struct proc *p) +{ + return(at_control(so, cmd, data, ifp)); +} + + +int ddp_pru_attach(struct socket *so, int proto, + struct proc *p) +{ + int s, error = 0; + at_ddp_t *ddp = NULL; + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + + s = splnet(); + error = at_pcballoc(so, &ddp_head); + splx(s); + if (error) + return error; + error = soreserve(so, ddp_sendspace, ddp_recvspace); + pcb = (struct atpcb *)((so)->so_pcb); + pcb->pid = current_proc()->p_pid; + pcb->ddptype = (u_char) proto; /* set in socreate() */ + pcb->proto = ATPROTO_DDP; + + return error; +} + + +int ddp_pru_disconnect(struct socket *so) +{ + + int s, error = 0; + at_ddp_t *ddp = NULL; + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + + if (pcb == NULL) + return (EINVAL); + + if ((so->so_state & SS_ISCONNECTED) == 0) + return ENOTCONN; + + soisdisconnected(so); + s = splnet(); + at_pcbdetach(pcb); + splx(s); + + return error; +} + + +int ddp_pru_abort(struct socket *so) +{ + int s; + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + + if (pcb == NULL) + return (EINVAL); + + soisdisconnected(so); + s = splnet(); + at_pcbdetach(pcb); + splx(s); + + return 0; +} + +int ddp_pru_detach(struct socket *so) +{ + int s; + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + + if (pcb == NULL) + return (EINVAL); + + s = splnet(); + at_pcbdetach(pcb); + splx(s); + return 0; +} + +int ddp_pru_shutdown(struct socket *so) +{ + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + + if (pcb == NULL) + return (EINVAL); + + socantsendmore(so); + return 0; +} + + +int ddp_pru_bind(struct socket *so, struct sockaddr *nam, + struct proc *p) +{ + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + + if (pcb == NULL) + return (EINVAL); + + return (at_pcbbind(pcb, nam)); +} + + +int ddp_pru_send(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p) +{ + at_ddp_t *ddp = NULL; + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + + if (pcb == NULL) + return (EINVAL); + + if (!(pcb->ddp_flags & DDPFLG_HDRINCL)) { + /* prepend a DDP header */ + M_PREPEND(m, DDP_X_HDR_SIZE, M_WAIT); + ddp = mtod(m, at_ddp_t *); + } + + if (so->so_state & SS_ISCONNECTED) { + if (addr) + return EISCONN; + + if (ddp) { + NET_ASSIGN(ddp->dst_net, pcb->raddr.s_net); + ddp->dst_node = pcb->raddr.s_node; + ddp->dst_socket = pcb->rport; + } + } else { + if (addr == NULL) + return ENOTCONN; + + if (ddp) { + struct sockaddr_at *dst = + (struct sockaddr_at *) addr; + NET_ASSIGN(ddp->dst_net, dst->sat_addr.s_net); + ddp->dst_node = dst->sat_addr.s_node; + ddp->dst_socket = dst->sat_port; + } + } + if (ddp) { + ddp->length = m->m_pkthdr.len; + UAS_ASSIGN(ddp->checksum, + (pcb->ddp_flags & DDPFLG_CHKSUM)? 1: 0); + ddp->type = (pcb->ddptype)? pcb->ddptype: DEFAULT_OT_DDPTYPE; +#ifdef NOT_YET + NET_ASSIGN(ddp->src_net, pcb->laddr.s_net); + ddp->src_node = pcb->laddr.s_node; + ddp->src_socket = pcb->lport; +#endif + } else { + ddp = mtod(m, at_ddp_t *); + } + if (NET_VALUE(ddp->dst_net) == ATADDR_ANYNET && + ddp->dst_node == ATADDR_BCASTNODE && + (pcb->ddp_flags & DDPFLG_SLFSND)) { + struct mbuf *n; + + if ((n = m_dup(m, M_DONTWAIT))) { + at_ifaddr_t + *ifID = ifID_home, + *ifIDTmp = (at_ifaddr_t *)NULL; + + /* as in ddp_output() loop processing, fill in the + rest of the header */ + ddp = mtod(n, at_ddp_t *); + if (MULTIHOME_MODE && (ifIDTmp = forUs(ddp))) + ifID = ifIDTmp; + NET_ASSIGN(ddp->src_net, ifID->ifThisNode.s_net); + ddp->src_node = ifID->ifThisNode.s_node; + ddp->src_socket = pcb->lport; + if (UAS_VALUE(ddp->checksum)) + UAS_ASSIGN(ddp->checksum, ddp_checksum(m, 4)); + ddp_input(n, ifID); + } + } + return(ddp_output(&m, pcb->lport, FALSE)); +} /* ddp_pru_send */ + +int ddp_pru_sockaddr(struct socket *so, + struct sockaddr **nam) +{ + int s; + struct atpcb *pcb; + struct sockaddr_at *sat; + + MALLOC(sat, struct sockaddr_at *, sizeof *sat, M_SONAME, M_WAITOK); + bzero((caddr_t)sat, sizeof(*sat)); + + s = splnet(); + if ((pcb = sotoatpcb(so)) == NULL) { + splx(s); + FREE(sat, M_SONAME); + return(EINVAL); + } + + sat->sat_family = AF_APPLETALK; + sat->sat_len = sizeof(*sat); + sat->sat_port = pcb->lport; + sat->sat_addr = pcb->laddr; + splx(s); + + *nam = (struct sockaddr *)sat; + return(0); +} + + +int ddp_pru_peeraddr(struct socket *so, + struct sockaddr **nam) +{ + int s; + struct atpcb *pcb; + struct sockaddr_at *sat; + + MALLOC(sat, struct sockaddr_at *, sizeof *sat, M_SONAME, M_WAITOK); + bzero((caddr_t)sat, sizeof(*sat)); + + s = splnet(); + if ((pcb = sotoatpcb(so)) == NULL) { + splx(s); + FREE(sat, M_SONAME); + return(EINVAL); + } + + sat->sat_family = AF_APPLETALK; + sat->sat_len = sizeof(*sat); + sat->sat_port = pcb->rport; + sat->sat_addr = pcb->raddr; + splx(s); + + *nam = (struct sockaddr *)sat; + return(0); +} + + +int ddp_pru_connect(struct socket *so, struct sockaddr *nam, + struct proc *p) +{ + struct atpcb *pcb = (struct atpcb *)((so)->so_pcb); + struct sockaddr_at *faddr = (struct sockaddr_at *) nam; + + if (pcb != NULL) + return (EINVAL); + + if (xpatcnt == 0) + return (EADDRNOTAVAIL); + + if (faddr->sat_family != AF_APPLETALK) + return (EAFNOSUPPORT); + + pcb->raddr = faddr->sat_addr; + soisconnected(so); + return 0; +} + + + +/* + * One-time AppleTalk initialization + */ +void ddp_init() +{ + at_memzone_init(); + ddp_head.atpcb_next = ddp_head.atpcb_prev = &ddp_head; + init_ddp_handler(); + + /* Initialize protocols implemented in the kernel */ + add_ddp_handler(EP_SOCKET, ep_input); + add_ddp_handler(ZIP_SOCKET, zip_router_input); + add_ddp_handler(NBP_SOCKET, nbp_input); + add_ddp_handler(DDP_SOCKET_1st_DYNAMIC, sip_input); + + ddp_start(); + + appletalk_hack_start(); +} /* ddp_init */ + diff --git a/bsd/netat/debug.h b/bsd/netat/debug.h new file mode 100644 index 000000000..1a802b0e9 --- /dev/null +++ b/bsd/netat/debug.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989, 1997 Apple Computer, Inc. + */ + +/* netat/debug.h */ + +#ifndef _NETAT_DEBUG_H_ +#define _NETAT_DEBUG_H_ + +#define D_L_FATAL 0x00000001 +#define D_L_ERROR 0x00000002 +#define D_L_WARNING 0x00000004 +#define D_L_INFO 0x00000008 +#define D_L_VERBOSE 0x00000010 +#define D_L_STARTUP 0x00000020 +#define D_L_STARTUP_LOW 0x00000040 +#define D_L_SHUTDN 0x00000080 +#define D_L_SHUTDN_LOW 0x00000100 +#define D_L_INPUT 0x00000200 +#define D_L_OUTPUT 0x00000400 +#define D_L_STATS 0x00000800 +#define D_L_STATE_CHG 0x00001000 /* re-aarp, ifState etc. */ +#define D_L_ROUTING 0x00002000 +#define D_L_DNSTREAM 0x00004000 +#define D_L_UPSTREAM 0x00008000 +#define D_L_STARTUP_INFO 0x00010000 +#define D_L_SHUTDN_INFO 0x00020000 +#define D_L_ROUTING_AT 0x00040000 /* atalk address routing */ +#define D_L_USR1 0x01000000 +#define D_L_USR2 0x02000000 +#define D_L_USR3 0x04000000 +#define D_L_USR4 0x08000000 +#define D_L_TRACE 0x10000000 + + +#define D_M_PAT 0x00000001 +#define D_M_PAT_LOW 0x00000002 +#define D_M_ELAP 0x00000004 +#define D_M_ELAP_LOW 0x00000008 +#define D_M_DDP 0x00000010 +#define D_M_DDP_LOW 0x00000020 +#define D_M_NBP 0x00000040 +#define D_M_NBP_LOW 0x00000080 +#define D_M_ZIP 0x00000100 +#define D_M_ZIP_LOW 0x00000200 +#define D_M_RTMP 0x00000400 +#define D_M_RTMP_LOW 0x00000800 +#define D_M_ATP 0x00001000 +#define D_M_ATP_LOW 0x00002000 +#define D_M_ADSP 0x00004000 +#define D_M_ADSP_LOW 0x00008000 +#define D_M_AEP 0x00010000 +#define D_M_AARP 0x00020000 +#define D_M_ASP 0x00040000 +#define D_M_ASP_LOW 0x00080000 +#define D_M_AURP 0x00100000 +#define D_M_AURP_LOW 0x00200000 +#define D_M_TRACE 0x10000000 + + /* macros for working with atp data at the lap level. + * These are for tracehook performance measurements only!!! + * It is assumed that the ddp & atp headers are at the top of the + * mblk, occupy contiguous memory and the atp headers are of the + * extended type only. + */ + +typedef struct dbgBits { + unsigned long dbgMod; /* debug module bitmap (used in dPrintf) */ + unsigned long dbgLev; /* debug level bitmap */ +} dbgBits_t; + +extern dbgBits_t dbgBits; + + /* macros for debugging */ +#ifdef DEBUG +#define dPrintf(mod, lev, p) \ + if (((mod) & dbgBits.dbgMod) && ((lev) & dbgBits.dbgLev)) {\ + kprintf p; \ + } +#else +#define dPrintf(mod, lev, p) +#endif + +/* 8/5/98 LD: Adds MacOSX kernel debugging facility */ +/* note: kdebug must be added to the "RELEASE" config in conf/MASTER.ppc */ + +#include +#if KDEBUG +/* + Strings for the "trace/codes" file: + +0x02650004 AT_DDPinput + +0x02680000 AT_ADSP_Misc +0x02680004 AT_ADSP_RxData +0x02680008 AT_ADSP_SndData +0x0268000C AT_ADSP_Read +0x02680010 AT_ADSP_Write +0x02680014 AT_ADSP_mbuf +0x02680018 AT_ADSP_putnext +0x0268001c AT_ADSP_ATrw + +*/ + +/* usage: + KERNEL_DEBUG(DBG_AT_DDP_INPUT | DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_AT_DDP_INPUT, 0,0,0,0,0); + KERNEL_DEBUG(DBG_AT_DDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); +*/ + +#define DBG_AT_DDP_INPUT NETDBG_CODE(DBG_NETDDP, 1) +#define DBG_AT_DDP_OUTPUT NETDBG_CODE(DBG_NETDDP, 2) + +#define DBG_ADSP_MISC NETDBG_CODE(DBG_NETADSP, 0) +#define DBG_ADSP_RCV NETDBG_CODE(DBG_NETADSP, 1) +#define DBG_ADSP_SND NETDBG_CODE(DBG_NETADSP, 2) +#define DBG_ADSP_READ NETDBG_CODE(DBG_NETADSP, 3) +#define DBG_ADSP_WRITE NETDBG_CODE(DBG_NETADSP, 4) +#define DBG_ADSP_MBUF NETDBG_CODE(DBG_NETADSP, 5) +#define DBG_ADSP_PNEXT NETDBG_CODE(DBG_NETADSP, 6) +#define DBG_ADSP_ATRW NETDBG_CODE(DBG_NETADSP, 7) +#endif + +#define trace_mbufs(pri, str, start)\ +{ if (start)\ +{ int i; gbuf_t *tmp;\ + for (tmp=start, i=0; tmp && i < 10; tmp = gbuf_cont(tmp), i++) {\ + dPrintf(pri, D_L_TRACE, ("%s=0x%x, len=%d %s\n",\ + str, tmp, gbuf_len(tmp),\ + (((struct mbuf *)tmp)->m_flags & M_EXT)?"CL":""));\ + KERNEL_DEBUG(DBG_ADSP_MBUF, 0, tmp, gbuf_len(tmp), gbuf_next(tmp), \ + ((struct mbuf *)tmp)->m_flags & M_EXT);\ +}}} + +/* from h/atlog.h */ + +/* These pointers are non-NULL if logging or tracing are activated. */ +#ifndef LOG_DRIVER +extern char *log_errp; +extern char *log_trcp; +#endif /* LOG_DRIVER */ + +/* ATTRACE() macro. Use this routine for calling + * streams tracing and logging. If `log' is TRUE, then + * this event will also be logged if logging is on. + */ +#if !defined(lint) && defined(AT_DEBUG) +#define ATTRACE(mid,sid,level,log,fmt,arg1,arg2,arg3) \ + if (log_trcp || (log && log_errp)) { \ + strlog(mid,sid,level,SL_TRACE | \ + (log ? SL_ERROR : 0) | \ + (level <= AT_LV_FATAL ? SL_FATAL : 0), \ + fmt,arg1,arg2,arg3); \ + } +#else +#define ATTRACE(mid,sid,level,log,fmt,arg1,arg2,arg3) \ +/* printf(fmt, arg1, arg2, arg3); */ + +#endif + + +/* Levels for AppleTalk tracing */ + +#define AT_LV_FATAL 1 +#define AT_LV_ERROR 3 +#define AT_LV_WARNING 5 +#define AT_LV_INFO 7 +#define AT_LV_VERBOSE 9 + + +/* Sub-ids for AppleTalk tracing, add more if you can't figure + * out where your event belongs. + */ + +#define AT_SID_INPUT 1 /* Network incoming packets */ +#define AT_SID_OUTPUT 2 /* Network outgoing packets */ +#define AT_SID_TIMERS 3 /* Protocol timers */ +#define AT_SID_FLOWCTRL 4 /* Protocol flow control */ +#define AT_SID_USERREQ 5 /* User requests */ +#define AT_SID_RESOURCE 6 /* Resource limitations */ + + + +/* Module ID's for AppleTalk subsystems */ + +#define AT_MID(n) (200+n) + +/* +#define AT_MID_MISC AT_MID(0) not used +#define AT_MID_LLAP AT_MID(1) not_used +#define AT_MID_ELAP 202 moved to lap.h +#define AT_MID_DDP 203 moved to ddp.h +#define AT_MID_RTMP AT_MID(4) not used +#define AT_MID_NBP AT_MID(5) not used +#define AT_MID_EP AT_MID(6) not used +#define AT_MID_ATP AT_MID(7) not used +#define AT_MID_ZIP AT_MID(8) not needed +#define AT_MID_PAP AT_MID(9) not used +#define AT_MID_ASP AT_MID(10) redefined in adsp.h +#define AT_MID_AFP AT_MID(11) not used +#define AT_MID_ADSP 212 moved to adsp.h +#define AT_MID_NBPD AT_MID(13) not used +#define AT_MID_LAP 214 moved to lap.h +#define AT_MID_LAST 214 +*/ + +#ifdef AT_MID_STRINGS +static char *at_mid_strings[] = { + "misc", + "LLAP", + "ELAP", + "DDP", + "RTMP", + "NBP", + "EP", + "ATP", + "ZIP", + "PAP", + "ASP", + "AFP", + "ADSP", + "NBPD", + "LAP" +}; +#endif + + +#ifndef SL_FATAL +/* Don't define these if they're already defined */ + +/* Flags for log messages */ + +#define SL_FATAL 01 /* indicates fatal error */ +#define SL_NOTIFY 02 /* logger must notify administrator */ +#define SL_ERROR 04 /* include on the error log */ +#define SL_TRACE 010 /* include on the trace log */ + +#endif + +#endif /* _NETAT_DEBUG_H_ */ + diff --git a/bsd/netat/drv_dep.c b/bsd/netat/drv_dep.c new file mode 100644 index 000000000..96fa0d78a --- /dev/null +++ b/bsd/netat/drv_dep.c @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1994 Apple Computer, Inc. + * All Rights Reserved. + * + * Tuyen A. Nguyen. (December 5, 1994) + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DSAP_SNAP 0xaa + +extern void gref_init(), atp_init(), atp_link(), atp_unlink(); + +extern int adspInited; + +static llc_header_t snap_hdr_at = SNAP_HDR_AT; +static llc_header_t snap_hdr_aarp = SNAP_HDR_AARP; +static unsigned char snap_proto_ddp[5] = SNAP_PROTO_AT; +static unsigned char snap_proto_aarp[5] = SNAP_PROTO_AARP; + +int pktsIn, pktsOut; + +struct ifqueue atalkintrq; /* appletalk and aarp packet input queue */ + +short appletalk_inited = 0; + +extern atlock_t + ddpall_lock, ddpinp_lock, arpinp_lock, refall_lock, nve_lock, + aspall_lock, asptmo_lock, atpall_lock, atptmo_lock, atpgen_lock; + +extern int (*sys_ATsocket )(), (*sys_ATgetmsg)(), (*sys_ATputmsg)(); +extern int (*sys_ATPsndreq)(), (*sys_ATPsndrsp)(); +extern int (*sys_ATPgetreq)(), (*sys_ATPgetrsp)(); + +void atalk_load() +{ + extern int _ATsocket(), _ATgetmsg(), _ATputmsg(); + extern int _ATPsndreq(), _ATPsndrsp(), _ATPgetreq(), _ATPgetrsp(); + + sys_ATsocket = _ATsocket; + sys_ATgetmsg = _ATgetmsg; + sys_ATputmsg = _ATputmsg; + sys_ATPsndreq = _ATPsndreq; + sys_ATPsndrsp = _ATPsndrsp; + sys_ATPgetreq = _ATPgetreq; + sys_ATPgetrsp = _ATPgetrsp; + + ATLOCKINIT(ddpall_lock); + ATLOCKINIT(ddpinp_lock); + ATLOCKINIT(arpinp_lock); + ATLOCKINIT(refall_lock); + ATLOCKINIT(aspall_lock); + ATLOCKINIT(asptmo_lock); + ATLOCKINIT(atpall_lock); + ATLOCKINIT(atptmo_lock); + ATLOCKINIT(atpgen_lock); + ATLOCKINIT(nve_lock); + + atp_init(); + atp_link(); + adspInited = 0; + +/* adsp_init(); + for 2225395 + this happens in adsp_open and is undone on ADSP_UNLINK +*/ +} /* atalk_load */ + +/* Undo everything atalk_load() did. */ +void atalk_unload() /* not currently used */ +{ + extern gbuf_t *scb_resource_m; + extern gbuf_t *atp_resource_m; + + sys_ATsocket = 0; + sys_ATgetmsg = 0; + sys_ATputmsg = 0; + sys_ATPsndreq = 0; + sys_ATPsndrsp = 0; + sys_ATPgetreq = 0; + sys_ATPgetrsp = 0; + + atp_unlink(); + +#ifdef NOT_YET + if (scb_resource_m) { + gbuf_freem(scb_resource_m); + scb_resource_m = 0; + scb_free_list = 0; + } + /* allocated in atp_trans_alloc() */ + if (atp_resource_m) { + gbuf_freem(atp_resource_m); + atp_resource_m = 0; + atp_trans_free_list = 0; + } +#endif + + appletalk_inited = 0; +} /* atalk_unload */ + +void appletalk_hack_start() +{ + if (!appletalk_inited) { + atalk_load(); + atalkintrq.ifq_maxlen = IFQ_MAXLEN; + appletalk_inited = 1; + } +} /* appletalk_hack_start */ + +int pat_output(patp, mlist, dst_addr, type) + at_ifaddr_t *patp; + struct mbuf *mlist; /* packet chain */ + unsigned char *dst_addr; + int type; +{ + struct mbuf *m, *m1; + llc_header_t *llc_header; + struct sockaddr dst; + + if (! patp->aa_ifp) { + for (m = mlist; m; m = mlist) { + mlist = m->m_nextpkt; + m->m_nextpkt = 0; + m_freem(m); + } + return ENOTREADY; + } + + /* this is for ether_output */ + dst.sa_family = AF_APPLETALK; + dst.sa_len = 2 + sizeof(struct etalk_addr); + bcopy (dst_addr, &dst.sa_data[0], sizeof(struct etalk_addr)); + + /* packet chains are used on output and can be tested using aufs */ + for (m = mlist; m; m = mlist) { + mlist = m->m_nextpkt; + m->m_nextpkt = 0; + + M_PREPEND(m, sizeof(llc_header_t), M_DONTWAIT) + if (m == 0) { + continue; + } + + llc_header = mtod(m, llc_header_t *); + *llc_header = + (type == AARP_AT_TYPE) ? snap_hdr_aarp : snap_hdr_at; + + for (m->m_pkthdr.len = 0, m1 = m; m1; m1 = m1->m_next) + m->m_pkthdr.len += m1->m_len; + m->m_pkthdr.rcvif = 0; + + /* *** Note: AT is sending out mbufs of type MSG_DATA, + not MT_DATA. *** */ +#ifdef APPLETALK_DEBUG + if (m->m_next && + !((m->m_next)->m_flags & M_EXT)) + kprintf("po: mlen= %d, m2len= %d\n", m->m_len, + (m->m_next)->m_len); +#endif + dlil_output(patp->at_dl_tag, m, NULL, &dst, 0); + + pktsOut++; + } + + return 0; +} /* pat_output */ + +void atalkintr() +{ + struct mbuf *m, *m1, *mlist = NULL; + struct ifnet *ifp; + int s; + llc_header_t *llc_header; + at_ifaddr_t *ifID; + char src[6]; + enet_header_t *enet_header; + +next: + s = splimp(); + IF_DEQUEUE(&atalkintrq, m); + splx(s); + + if (m == 0) + return; + + for ( ; m ; m = mlist) { + mlist = m->m_nextpkt; +#ifdef APPLETALK_DEBUG + /* packet chains are not yet in use on input */ + if (mlist) kprintf("atalkintr: packet chain\n"); +#endif + m->m_nextpkt = 0; + + if (!appletalk_inited) { + m_freem(m); + continue; + } + + if ((m->m_flags & M_PKTHDR) == 0) { +#ifdef APPLETALK_DEBUG + kprintf("atalkintr: no HDR on packet received"); +#endif + m_freem(m); + continue; + } + + /* make sure the interface this packet was received on is configured + for AppleTalk */ + ifp = m->m_pkthdr.rcvif; + TAILQ_FOREACH(ifID, &at_ifQueueHd, aa_link) { + if (ifID->aa_ifp && (ifID->aa_ifp == ifp)) + break; + } + /* if we didn't find a matching interface */ + if (!ifID) { + m_freem(m); + continue; /* was EAFNOSUPPORT */ + } + + /* make sure the entire packet header is in the current mbuf */ + if (m->m_len < ENET_LLC_SIZE && + (m = m_pullup(m, ENET_LLC_SIZE)) == 0) { +#ifdef APPLETALK_DEBUG + kprintf("atalkintr: packet too small\n"); +#endif + m_freem(m); + continue; + } + enet_header = mtod(m, enet_header_t *); + + /* Ignore multicast packets from local station */ + /* *** Note: code for IFTYPE_TOKENTALK may be needed here. *** */ + if (ifID->aa_ifp->if_type == IFT_ETHER) { + bcopy((char *)enet_header->src, src, sizeof(src)); + +#ifdef COMMENT /* In order to receive packets from the Blue Box, we cannot + reject packets whose source address matches our local address. + */ + if ((enet_header->dst[0] & 1) && + (bcmp(src, ifID->xaddr, sizeof(src)) == 0)) { + /* Packet rejected: think it's a local mcast. */ + m_freem(m); + continue; /* was EAFNOSUPPORT */ + } +#endif COMMENT + + llc_header = (llc_header_t *)(enet_header+1); + + /* advance the mbuf pointers past the ethernet header */ + m->m_data += ENET_LLC_SIZE; + m->m_len -= ENET_LLC_SIZE; + + pktsIn++; + + if (LLC_PROTO_EQUAL(llc_header->protocol,snap_proto_aarp)) { + (void)aarp_rcv_pkt(mtod(m, aarp_pkt_t *), ifID); + m_freem(m); + } + else if (LLC_PROTO_EQUAL(llc_header->protocol, snap_proto_ddp)) { + /* if we're a router take all pkts */ + if (!ROUTING_MODE) { + if (aarp_chk_addr(mtod(m, at_ddp_t *), ifID) + == AARP_ERR_NOT_OURS) { +#ifdef APPLETALK_DEBUG + kprintf("pat_input: Packet Rejected: not for us? dest=%x.%x.%x.%x.%x.%x LLC_PROTO= %02x%02x\n", + enet_header->dst[0], enet_header->dst[1], + enet_header->dst[2], enet_header->dst[3], + enet_header->dst[4], enet_header->dst[5], + llc_header->protocol[3], + llc_header->protocol[4]); +#endif + m_freem(m); + continue; /* was EAFNOSUPPORT */ + } + } + MCHTYPE(m, MSG_DATA); /* set the mbuf type */ + + ifID->stats.rcv_packets++; + for (m1 = m; m1; m1 = m1->m_next) + ifID->stats.rcv_bytes += m1->m_len; + + if (!MULTIPORT_MODE) + ddp_glean(m, ifID, src); + + ddp_input(m, ifID); + } else { +#ifdef APPLETALK_DEBUG + kprintf("pat_input: Packet Rejected: wrong LLC_PROTO = %02x%02x\n", + llc_header->protocol[3], + llc_header->protocol[4]); +#endif + m_freem(m); + } + } + } + goto next; +} /* atalkintr */ diff --git a/bsd/netat/ep.h b/bsd/netat/ep.h new file mode 100644 index 000000000..569e591d8 --- /dev/null +++ b/bsd/netat/ep.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +#ifndef _NETAT_EP_H_ +#define _NETAT_EP_H_ + +#define EP_REQUEST 1 /* Echo request packet */ +#define EP_REPLY 2 /* Echo reply packet */ + +/* Misc. definitions */ + +#define EP_DATA_SIZE 585 /* Maximum size of EP data */ + +#endif /* _NETAT_EP_H_ */ diff --git a/bsd/netat/lap.h b/bsd/netat/lap.h new file mode 100644 index 000000000..196fe8775 --- /dev/null +++ b/bsd/netat/lap.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + */ + +/* Definitions for generic access to AppleTalk link level protocols. + */ + +#ifndef _NETAT_LAP_H_ +#define _NETAT_LAP_H_ + +#define AT_MID_ELAP 202 + +/* elap ioctl's */ + +#define ELAP_IOC_MYIOCTL(i) ((i>>8) == AT_MID_ELAP) +#define ELAP_IOC_GET_STATS ((AT_MID_ELAP<<8) | 2) + +#define AT_MID_LAP 214 + +/* Generic LAP ioctl's. Each LAP may implement other ioctl's specific to + * its functionality. + */ +#define LAP_IOC_MYIOCTL(i) ((i>>8) == AT_MID_LAP) +#define LAP_IOC_ADD_ROUTE ((AT_MID_LAP<<8) | 9) +#define LAP_IOC_GET_ZONE ((AT_MID_LAP<<8) | 12) +#define LAP_IOC_GET_ROUTE ((AT_MID_LAP<<8) | 13) +#define LAP_IOC_SNMP_GET_CFG ((AT_MID_LAP<<8) | 21) +#define LAP_IOC_SNMP_GET_AARP ((AT_MID_LAP<<8) | 22) +#define LAP_IOC_SNMP_GET_RTMP ((AT_MID_LAP<<8) | 23) +#define LAP_IOC_SNMP_GET_ZIP ((AT_MID_LAP<<8) | 24) +#define LAP_IOC_SNMP_GET_DDP ((AT_MID_LAP<<8) | 25) +#define LAP_IOC_SNMP_GET_NBP ((AT_MID_LAP<<8) | 26) +#define LAP_IOC_SNMP_GET_PORTS ((AT_MID_LAP<<8) | 27) + +#ifdef NOT_USED + +#define ELAP_IOC_GET_CFG ((AT_MID_ELAP<<8) | 1) /* not used */ +#define ELAP_IOC_SET_CFG ((AT_MID_ELAP<<8) | 3) /* not used */ +#define ELAP_IOC_SET_ZONE ((AT_MID_ELAP<<8) | 4) /* not used */ +#define ELAP_IOC_SWITCHZONE ((AT_MID_ELAP<<8) | 5) /* not used */ + +#define LAP_IOC_ONLINE ((AT_MID_LAP<<8) | 1) /* not used */ +#define LAP_IOC_OFFLINE ((AT_MID_LAP<<8) | 2) /* not used */ +#define LAP_IOC_GET_IFS_STAT ((AT_MID_LAP<<8) | 3) /* not used */ +#define LAP_IOC_ADD_ZONE ((AT_MID_LAP<<8) | 4) /* not used */ +#define LAP_IOC_ROUTER_START ((AT_MID_LAP<<8) | 5) /* not used */ +#define LAP_IOC_ROUTER_SHUTDOWN ((AT_MID_LAP<<8) | 6) /* not used */ +#define LAP_IOC_ROUTER_INIT ((AT_MID_LAP<<8) | 7) /* not used */ +#define LAP_IOC_GET_IFID ((AT_MID_LAP<<8) | 8) /* not used */ +#define LAP_IOC_GET_DBG ((AT_MID_LAP<<8) | 10) /* not used */ +#define LAP_IOC_SET_DBG ((AT_MID_LAP<<8) | 11) /* not used */ +#define LAP_IOC_ADD_IFNAME ((AT_MID_LAP<<8) | 14) /* not used */ +#define LAP_IOC_DO_DEFER ((AT_MID_LAP<<8) | 15) /* not used */ +#define LAP_IOC_DO_DELAY ((AT_MID_LAP<<8) | 16) /* not used */ +#define LAP_IOC_SHUT_DOWN ((AT_MID_LAP<<8) | 17) /* not used */ +#define LAP_IOC_CHECK_STATE ((AT_MID_LAP<<8) | 18) /* not used */ +#define LAP_IOC_DEL_IFNAME ((AT_MID_LAP<<8) | 19) /* not used */ +#define LAP_IOC_SET_MIX ((AT_MID_LAP<<8) | 20) /* not used */ +#define LAP_IOC_SET_LOCAL_ZONES ((AT_MID_LAP<<8) | 28) /* not used */ +#define LAP_IOC_GET_LOCAL_ZONE ((AT_MID_LAP<<8) | 29) /* not used */ +#define LAP_IOC_IS_ZONE_LOCAL ((AT_MID_LAP<<8) | 30) /* not used */ +#define LAP_IOC_GET_MODE ((AT_MID_LAP<<8) | 31) /* not used */ +#define LAP_IOC_GET_IF_NAMES ((AT_MID_LAP<<8) | 32) /* not used */ +#define LAP_IOC_GET_DEFAULT_ZONE ((AT_MID_LAP<<8) | 33) /* not used */ +#define LAP_IOC_SET_DEFAULT_ZONES ((AT_MID_LAP<<8) | 34) /* not used */ + +#endif /* NOT_USED */ + +#endif /* _NETAT_LAP_H_ */ + diff --git a/bsd/netat/nbp.h b/bsd/netat/nbp.h new file mode 100644 index 000000000..d6386b349 --- /dev/null +++ b/bsd/netat/nbp.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + * + * The information contained herein is subject to change without + * notice and should not be construed as a commitment by Apple + * Computer, Inc. Apple Computer, Inc. assumes no responsibility + * for any errors that may appear. + * + * Confidential and Proprietary to Apple Computer, Inc. + */ +/* + * Title: nbp.h + * + * Facility: Include file for NBP kernel module. + * + * Author: Kumar Vora, Creation Date: May-1-1989 + * + * History: + * X01-001 Kumar Vora May-1-1989 + * Initial Creation. + */ + +#ifndef _NETAT_NBP_H_ +#define _NETAT_NBP_H_ + +/* NBP packet types */ + +#define NBP_BRRQ 0x01 /* Broadcast request */ +#define NBP_LKUP 0x02 /* Lookup */ +#define NBP_LKUP_REPLY 0x03 /* Lookup reply */ +#define NBP_FWDRQ 0x04 /* Forward Request (router only) */ + +/* *** the following may be discontinued in the near future *** */ + +#define NBP_CONFIRM 0x09 /* Confirm, not sent on wire */ + +#ifdef NOT_USED +#define NBP_REGISTER 0x07 /* Register a name */ +#define NBP_DELETE 0x08 /* Delete a name */ +#define NBP_STATUS_REPLY 0x0a /* Status on register/delete */ +#define NBP_CLOSE_NOTE 0x0b /* Close notification from DDP */ +#endif + +/* *** **************************************************** *** */ + +/* Protocol defaults */ + +#define NBP_RETRY_COUNT 8 /* Maximum repeats */ +#define NBP_RETRY_INTERVAL 1 /* Retry timeout */ + +/* Special (partial) wildcard character */ +#define NBP_SPL_WILDCARD 0xC5 +#define NBP_ORD_WILDCARD '=' + +/* Packet definitions */ + +#define NBP_TUPLE_MAX 15 /* Maximum number of tuples in one DDP packet */ +#define NBP_HDR_SIZE 2 + +typedef struct at_nbp { + unsigned control : 4, + tuple_count : 4; + u_char at_nbp_id; + at_nbptuple_t tuple[NBP_TUPLE_MAX]; +} at_nbp_t; + +#define DEFAULT_ZONE(zone) (!(zone)->len || ((zone)->len == 1 && (zone)->str[0] == '*')) + +#ifdef KERNEL + +/* Struct for name registry */ +typedef struct _nve_ { + TAILQ_ENTRY(_nve_) nve_link; /* tailq macro glue */ + gbuf_t *tag; /*pointer to the parent gbuf_t*/ + /* *** there's no reason why tag has to + be an mbuf *** */ + at_nvestr_t zone; + u_int zone_hash; + at_nvestr_t object; + u_int object_hash; + at_nvestr_t type; + u_int type_hash; + at_inet_t address; + u_char ddptype; + u_char enumerator; + int pid; + long unique_nbp_id; /* long to be compatible with OT */ +} nve_entry_t; + +#define NBP_WILD_OBJECT 0x01 +#define NBP_WILD_TYPE 0x02 +#define NBP_WILD_MASK 0x03 + +typedef struct nbp_req { + int (*func)(); + gbuf_t *response; /* the response datagram */ + int space_unused; /* Space available in the resp */ + /* packet. */ + gbuf_t *request; /* The request datagram */ + /* Saved for return address */ + nve_entry_t nve; + u_char flags; /* Flags to indicate whether or */ + /* not the request tuple has */ + /* wildcards in it */ +} nbp_req_t; + +extern int nbp_insert_entry(nve_entry_t *); +extern u_int nbp_strhash (at_nvestr_t *); +extern nve_entry_t *nbp_find_nve(nve_entry_t *); +extern int nbp_fillin_nve(); + +extern at_nvestr_t *getSPLocalZone(int); +extern at_nvestr_t *getLocalZone(int); + +#endif /* KERNEL */ +#endif /* _NETAT_NBP_H_ */ diff --git a/bsd/netat/pap.h b/bsd/netat/pap.h new file mode 100644 index 000000000..df5a08d0c --- /dev/null +++ b/bsd/netat/pap.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +/* Definitions for ATP protocol and streams module, per + * AppleTalk Transaction Protocol documentation from + * `Inside AppleTalk', July 14, 1986. + */ + +#ifndef _NETAT_PAP_H_ +#define _NETAT_PAP_H_ + +#define AT_PAP_DATA_SIZE 512 /* Maximum PAP data size */ +#define AT_PAP_STATUS_SIZE 255 /* Maximum PAP status length */ +#define PAP_TIMEOUT 120 + +/* PAP packet types */ + +#define AT_PAP_TYPE_OPEN_CONN 0x01 /* Open-Connection packet */ +#define AT_PAP_TYPE_OPEN_CONN_REPLY 0x02 /* Open-Connection-Reply packet */ +#define AT_PAP_TYPE_SEND_DATA 0x03 /* Send-Data packet */ +#define AT_PAP_TYPE_DATA 0x04 /* Data packet */ +#define AT_PAP_TYPE_TICKLE 0x05 /* Tickle packet */ +#define AT_PAP_TYPE_CLOSE_CONN 0x06 /* Close-Connection packet */ +#define AT_PAP_TYPE_CLOSE_CONN_REPLY 0x07 /* Close-Connection-Reply pkt */ +#define AT_PAP_TYPE_SEND_STATUS 0x08 /* Send-Status packet */ +#define AT_PAP_TYPE_SEND_STS_REPLY 0x09 /* Send-Status-Reply packet */ +#define AT_PAP_TYPE_READ_LW 0x0A /* Read LaserWriter Message */ + + +/* PAP packet structure */ + +typedef struct { + u_char at_pap_connection_id; + u_char at_pap_type; + u_char at_pap_sequence_number[2]; + u_char at_pap_responding_socket; + u_char at_pap_flow_quantum; + u_char at_pap_wait_time_or_result[2]; + u_char at_pap_buffer[AT_PAP_DATA_SIZE]; +} at_pap; + + +/* ioctl definitions */ + +#define AT_PAP_SETHDR (('~'<<8)|0) +#define AT_PAP_READ (('~'<<8)|1) +#define AT_PAP_WRITE (('~'<<8)|2) +#define AT_PAP_WRITE_EOF (('~'<<8)|3) +#define AT_PAP_WRITE_FLUSH (('~'<<8)|4) +#define AT_PAP_READ_IGNORE (('~'<<8)|5) +#define AT_PAPD_SET_STATUS (('~'<<8)|40) +#define AT_PAPD_GET_NEXT_JOB (('~'<<8)|41) + +extern char at_pap_status[]; +extern char *pap_status (); + +#define NPAPSERVERS 10 /* the number of active PAP servers/node */ +#define NPAPSESSIONS 40 /* the number of active PAP sockets/node */ + +#define AT_PAP_HDR_SIZE (DDP_X_HDR_SIZE + ATP_HDR_SIZE) + +#define ATP_DDP_HDR(c) ((at_ddp_t *)(c)) + +#define PAP_SOCKERR "Unable to open PAP socket" +#define P_NOEXIST "Printer not found" +#define P_UNREACH "Unable to establish PAP session" + +struct pap_state { + u_char pap_inuse; /* true if this one is allocated */ + u_char pap_tickle; /* true if we are tickling the other end */ + u_char pap_request; /* bitmap from a received request */ + u_char pap_eof; /* true if we have received an EOF */ + u_char pap_eof_sent; /* true if we have sent an EOF */ + u_char pap_sent; /* true if we have sent anything (and + therefore may have to send an eof + on close) */ + u_char pap_error; /* error message from read request */ + u_char pap_timer; /* a timeout is pending */ + u_char pap_closing; /* the link is closing and/or closed */ + u_char pap_request_count; /* number of outstanding requests */ + u_char pap_req_timer; /* the request timer is running */ + u_char pap_ending; /* we are waiting for atp to flush */ + u_char pap_read_ignore; /* we are in 'read with ignore' mode */ + + u_char pap_req_socket; + at_inet_t pap_to; + int pap_flow; + + u_short pap_send_count; /* the sequence number to send on the + next send data request */ + u_short pap_rcv_count; /* the sequence number expected to + receive on the next request */ + u_short pap_tid; /* ATP transaction ID for responses */ + u_char pap_connID; /* our connection ID */ + + int pap_ignore_id; /* the transaction ID for read ignore */ + int pap_tickle_id; /* the transaction ID for tickles */ +}; + +#endif /* _NETAT_PAP_H_ */ diff --git a/bsd/netat/routing_tables.h b/bsd/netat/routing_tables.h new file mode 100644 index 000000000..e7a15ad78 --- /dev/null +++ b/bsd/netat/routing_tables.h @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * This include file defines the RTMP table and ZIP table + * for the AppleTalk AIX router + * + * + * 0.01 03/16/94 LD Creation + * 0.10 08/19/94 LD merged + * + */ + +#ifndef _NETAT_ROUTING_TABLES_H_ +#define _NETAT_ROUTING_TABLES_H_ + +/* RTMP table entry state bitmap (EntryState) values */ + +#define RTE_STATE_UNUSED 0 /* this entry is not in used */ +#define RTE_STATE_BAD 2 /* The route is almost ready to be removed */ +#define RTE_STATE_SUSPECT 4 /* didn't received an update for route */ +#define RTE_STATE_GOOD 8 /* this route is 100% valid */ +#define RTE_STATE_ZKNOWN 16 /* we know the zones for this entry */ +#define RTE_STATE_UPDATED 32 /* set when updated from received rtmp table */ +#define RTE_STATE_BKUP 64 /* for future use : AURP */ +#define RTE_STATE_PERMANENT 128 /* This is a directly attached route */ + +#define PORT_ONLINE 32 /* router port in forwarding state */ +#define PORT_SEEDING 31 /* router port seeding */ +#define PORT_ACTIVATING 16 /* router port waiting for net infos */ +#define PORT_ERR_NOZONE 6 /* router no zones for non seed port*/ +#define PORT_ERR_BADRTMP 5 /* router problem bad rtmp version*/ +#define PORT_ERR_STARTUP 4 /* router problem cable in start range*/ +#define PORT_ERR_CABLER 3 /* router problem bad cable range*/ +#define PORT_ERR_SEED 2 /* router startup seeding problem */ +#define PORT_ONERROR 1 /* router port with generic problem*/ +#define PORT_OFFLINE 0 /* router port disabled/not ready */ + +#define ZT_MAX 1024 /* Don't allow more zones than that */ +#define ZT_MIN 32 /* Minimum for a good behaviour*/ +#define ZT_DEFAULT 512 /* Minimum for a good behaviour*/ +#define RT_MAX 4096 /* Don't allow more entries than that */ +#define RT_MIN 128 /* Minimum for a good behaviour*/ +#define RT_DEFAULT 1024 /* Minimum for a good behaviour*/ +#define ZT_BYTES (ZT_MAX/8) /* Bytes in Zone Bitmap */ +#define ZT_MAXEDOUT ZT_MAX+1 /* reached the entry limit.. */ +#define RT_MIX_DEFAULT 2000 /* default for nbr of ppsec */ + + +#define NOTIFY_N_DIST 31 /* Notify Neighbor distance (when shutdown or so) */ + +/* Useful macros to access the RTMP tuple fields */ + +#define TUPLENET(x) NET_VALUE(((at_rtmp_tuple *)(x))->at_rtmp_net) +#define TUPLEDIST(x) ((((at_rtmp_tuple *)(x))->at_rtmp_data) & RTMP_DISTANCE) +#define TUPLERANGE(x) ((((at_rtmp_tuple *)(x))->at_rtmp_data) & RTMP_RANGE_FLAG) + +#define CableStart ifID->ifThisCableStart +#define CableStop ifID->ifThisCableEnd + +#define RTMP_IDLENGTH 4 /* RTMP packet Node header length */ + + +#define RTMP_VERSION_NUMBER 0x82 /* V2 only version of RTMP supported */ + +#define ERTR_SEED_CONFLICT 0x101 /* Conflict between port information and net + * value received for the port (fatal for Rtr) + */ +#define ERTR_CABLE_CONFLICT 0x102 /* Conflict between port information and net + * information received in a RTMP packet + */ + +#define ERTR_RTMP_BAD_VERSION 0x103 /* We received a non phase-II RTMP packet + * that's bad... We can't deal with it + */ + +#define ERTR_CABLE_STARTUP 0x104 /* the cable range we're on happen to + * be in the startup range. Shouldn't + */ + +#define ERTR_CABLE_NOZONE 0x105 /* We haven't found any zones for that port + * after all the timeout expired + */ + + +/* RTMP table entry */ + +typedef struct rt_entry { + + struct rt_entry *left; /* btree left pointer */ + struct rt_entry *right; /* btree right pointer */ + + at_net_al NetStop; /* Last net # in the range, or network # if + non extended network */ + at_net_al NetStart; /* Starting network number in the range, 0 + non extended network */ + at_net_al NextIRNet; /* Network number of next Internet Router */ + at_node NextIRNode; /* Node ID of next Router */ + u_char ZoneBitMap[ZT_BYTES]; /* One bit per Zone defined for this entry */ + u_char NetDist; /* Distance in hops of the destination net */ + u_char NetPort; /* Physical port number to forward to */ + u_char EntryState; /* State of the entry bitmap field */ + u_char RTMPFlag; + u_char AURPFlag; + +} RT_entry; + + +/* ZIP Table entry */ + +typedef struct { + + u_short ZoneCount; /* Count of reference to zone entry */ + at_nvestr_t Zone; /* zone name as a Network Visible Entity */ + +} ZT_entry; + +/* for zone retrieval to user space only */ +typedef struct { + unsigned short entryno; /* zone table entry number (1st = 0) */ + ZT_entry zt; /* the zone table entry */ +} ZT_entryno; + +#ifdef KERNEL + +/* Macros for Routing table B-tree easy access */ + +#define RT_DELETE(NetStop, NetStart) {\ + RT_entry *found; \ + if ((found = rt_bdelete(NetStop, NetStart))) { \ + memset(found, '\0', sizeof(RT_entry)); \ + found->right = RT_table_freelist; \ + RT_table_freelist = found; \ + } \ +} + +/* Set/Reset and test the All zones known bit in for the entry field */ + +#define RT_ALL_ZONES_KNOWN(entry) ((entry)->EntryState & RTE_STATE_ZKNOWN) +#define RT_SET_ZONE_KNOWN(entry) ((entry)->EntryState |= RTE_STATE_ZKNOWN) +#define RT_CLR_ZONE_KNOWN(entry) ((entry)->EntryState ^= RTE_STATE_ZKNOWN) + +/* + * check if a zone number is in a given zone map + */ +#define ZT_ISIN_ZMAP(znum, zmap) ((zmap)[(znum-1) >> 3] & 0x80 >> (znum-1) % 8) + +/* remove a zone from the zone bitmap, and check if the zone + * is still in use by someone else. + */ + +#define ZT_CLR_ZMAP(num, zmap) { \ + if ((zmap)[(num-1) >> 3] & 0x80 >> (num-1) % 8) { \ + (zmap)[(num-1) >> 3] ^= 0x80 >> (num-1) % 8; \ + ZT_table[(num-1)].ZoneCount-- ; \ + } \ +} + +/* set a bit in an entry bit map */ + +#define ZT_SET_ZMAP(num, zmap) { \ + if (!zmap[(num-1) >> 3] & 0x80 >> (num-1) % 8) { \ + zmap[(num-1) >> 3] |= 0x80 >> (num-1) % 8; \ + ZT_table[(num-1)].ZoneCount++ ; \ + } \ +} + +extern int regDefaultZone(at_ifaddr_t *); +extern int zonename_equal(at_nvestr_t *, at_nvestr_t *); + +extern RT_entry *RT_table_freelist; +extern RT_entry RT_table_start; +extern RT_entry *RT_table; +extern RT_entry *rt_binsert(); +extern RT_entry *rt_insert(); +extern RT_entry *rt_bdelete(); +extern RT_entry *rt_blookup(int); +extern RT_entry *rt_getNextRoute(int); + +extern ZT_entry *ZT_table; +extern short RT_maxentry; +extern short ZT_maxentry; + +extern volatile int RouterMix; + +extern int zt_add_zone(char *, short); +extern int zt_add_zonename(at_nvestr_t *); +extern int zt_ent_zindex(u_char *); +extern ZT_entryno *zt_getNextZone(int); +extern void zt_remove_zones(u_char *); +extern void zt_set_zmap(u_short, char *); +extern void rtmp_router_input(); + +#endif /* KERNEL */ + +#endif /* _NETAT_ROUTING_TABLES_H_ */ diff --git a/bsd/netat/rtmp.h b/bsd/netat/rtmp.h new file mode 100644 index 000000000..7d9a7113c --- /dev/null +++ b/bsd/netat/rtmp.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1988, 1989 Apple Computer, Inc. + */ + +#ifndef _NETAT_RTMP_H_ +#define _NETAT_RTMP_H_ + +/* Changed 03-22-94 for router support LD */ + +/* RTMP function codes */ +#define RTMP_REQ_FUNC1 0x01 /* RTMP request function code=1 */ +#define RTMP_REQ_FUNC2 0x02 /* Route Data Req with Split Horizon */ +#define RTMP_REQ_FUNC3 0x03 /* Route Data Req no Split Horizon */ + + +#define RTMP_ROUTER_AGE 50 /* Number of seconds to age router */ + +/* RTMP response and data packet format */ + +typedef struct { + at_net at_rtmp_this_net; + u_char at_rtmp_id_length; + u_char at_rtmp_id[1]; +} at_rtmp; + +/* RTMP network/distance data tuples */ + +#define RTMP_TUPLE_SIZE 3 + +/* Extended AppleTalk tuple can be thought of as two of + * these tuples back to back. + */ + +#define RTMP_RANGE_FLAG 0x80 +#define RTMP_DISTANCE 0x0f + +typedef struct { + at_net at_rtmp_net; + unsigned char at_rtmp_data; +} at_rtmp_tuple; + +#endif /* _NETAT_RTMP_H_ */ diff --git a/bsd/netat/sys_dep.c b/bsd/netat/sys_dep.c new file mode 100644 index 000000000..a5d17dc84 --- /dev/null +++ b/bsd/netat/sys_dep.c @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1998 Apple Computer, Inc. + * + * Change Log: + * Created February 20, 1995 by Tuyen Nguyen + * Modified for MP, 1996 by Tuyen Nguyen + * Modified, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +int (*sys_ATsocket)() = 0; +int (*sys_ATgetmsg)() = 0; +int (*sys_ATputmsg)() = 0; +int (*sys_ATPsndreq)() = 0; +int (*sys_ATPsndrsp)() = 0; +int (*sys_ATPgetreq)() = 0; +int (*sys_ATPgetrsp)() = 0; + +extern at_state_t at_state; /* global state of AT network */ +extern at_ifaddr_t *ifID_home; /* default interface */ + +int ATsocket(proc, uap, retval) + void *proc; + struct { + int proto; + } *uap; + int *retval; +{ + int err; + + if (sys_ATsocket) { + /* required check for all AppleTalk system calls */ + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) { + *retval = -1; + err = ENOTREADY; + } else { + *retval = (*sys_ATsocket)(uap->proto, &err, proc); + } + } else { + *retval = -1; + err = ENXIO; + } + return err; +} + +int ATgetmsg(proc, uap, retval) + void *proc; + struct { + int fd; + void *ctlptr; + void *datptr; + int *flags; + } *uap; + int *retval; +{ + int err; + + if (sys_ATgetmsg) { + /* required check for all AppleTalk system calls */ + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) { + *retval = -1; + err = ENOTREADY; + } else { + *retval = + (*sys_ATgetmsg)(uap->fd, uap->ctlptr, uap->datptr, + uap->flags, &err, proc); + } + } else { + *retval = -1; + err = ENXIO; + } + return err; +} + +int ATputmsg(proc, uap, retval) + void *proc; + struct { + int fd; + void *ctlptr; + void *datptr; + int flags; + } *uap; + int *retval; +{ + int err; + + if (sys_ATputmsg) { + /* required check for all AppleTalk system calls */ + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) { + *retval = -1; + err = ENOTREADY; + } else { + *retval = + (*sys_ATputmsg)(uap->fd, uap->ctlptr, uap->datptr, + uap->flags, &err, proc); + } + } else { + *retval = -1; + err = ENXIO; + } + return err; +} + +int ATPsndreq(proc, uap, retval) + void *proc; + struct { + int fd; + unsigned char *buf; + int len; + int nowait; + } *uap; + int *retval; +{ + int err; + + if (sys_ATPsndreq) { + /* required check for all AppleTalk system calls */ + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) { + *retval = -1; + err = ENOTREADY; + } else { + *retval = + (*sys_ATPsndreq)(uap->fd, uap->buf, uap->len, + uap->nowait, &err, proc); + } + } else { + *retval = -1; + err= ENXIO; + } + return err; +} + +int ATPsndrsp(proc, uap, retval) + void *proc; + struct { + int fd; + unsigned char *respbuff; + int resplen; + int datalen; + } *uap; + int *retval; +{ + int err; + + if (sys_ATPsndrsp) { + /* required check for all AppleTalk system calls */ + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) { + *retval = -1; + err = ENOTREADY; + } else { + *retval = + (*sys_ATPsndrsp)(uap->fd, uap->respbuff, + uap->resplen, uap->datalen, &err, proc); + } + } else { + *retval = -1; + err = ENXIO; + } + return err; +} + +int ATPgetreq(proc, uap, retval) + void *proc; + struct { + int fd; + unsigned char *buf; + int buflen; + } *uap; + int *retval; +{ + int err; + + if (sys_ATPgetreq) { + /* required check for all AppleTalk system calls */ + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) { + *retval = -1; + err = ENOTREADY; + } else { + *retval = + (*sys_ATPgetreq)(uap->fd, uap->buf, uap->buflen, + &err, proc); + } + } else { + *retval = -1; + err = ENXIO; + } + return err; +} + +int ATPgetrsp(proc, uap, retval) + void *proc; + struct { + int fd; + unsigned char *bdsp; + } *uap; + int *retval; +{ + int err = 0; + + if (sys_ATPgetrsp) { + /* required check for all AppleTalk system calls */ + if (!(at_state.flags & AT_ST_STARTED) || !ifID_home) { + *retval = -1; + err = ENOTREADY; + } else { + *retval = + (*sys_ATPgetrsp)(uap->fd, uap->bdsp, &err, proc); + } + } else { + *retval = -1; + err = ENXIO; + } + return err; +} + +int atalk_closeref(fp, grefp) + struct file *fp; + gref_t **grefp; +{ + if ((*grefp = (gref_t *)fp->f_data)) { + fp->f_data = 0; +/* + kprintf("atalk_closeref: fp = 0x%x, gref = 0x%x\n", (u_int)fp, + (u_int)*grefp); +*/ + return(0); + } + return(EBADF); +} + +int atalk_openref(gref, retfd, proc) + gref_t *gref; + int *retfd; + struct proc *proc; +{ + extern int _ATread(), _ATwrite(),_ATioctl(), _ATselect(), _ATclose(); + static struct fileops fileops = + {_ATread, _ATwrite, _ATioctl, _ATselect, _ATclose}; + int err, fd; + struct file *fp; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + if ((err = falloc(proc, &fp, &fd)) != 0) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return err; + } + + fp->f_flag = FREAD|FWRITE; + /*##### LD 5/7/96 Warning: we don't have a "DTYPE_OTHER" for + * MacOSX, so defines DTYPE_ATALK as DTYPE_SOCKET... + */ + fp->f_type = DTYPE_ATALK+1; + fp->f_ops = &fileops; + *fdflags(proc, fd) &= ~UF_RESERVED; + *retfd = fd; + fp->f_data = (void *)gref; +/* + kprintf("atalk_openref: fp = 0x%x, gref = 0x%x\n", (u_int)fp, (u_int)gref); +*/ + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return 0; +} + +/* go from file descriptor to gref, which has been saved in fp->f_data */ +int atalk_getref(fp, fd, grefp, proc) +struct file *fp; +int fd; +gref_t **grefp; +struct proc *proc; +{ + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + if (fp == 0) { + int error = fdgetf(proc, fd, &fp); + + if (error) { + + *grefp = (gref_t *) 0; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return EBADF; + } + } + if ((*grefp = (gref_t *)fp->f_data) == 0) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return EBADF; + } + + if ((*grefp)->errno) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return (int)(*grefp)->errno; + } + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + return 0; +} diff --git a/bsd/netat/sys_glue.c b/bsd/netat/sys_glue.c new file mode 100644 index 000000000..9d0e95620 --- /dev/null +++ b/bsd/netat/sys_glue.c @@ -0,0 +1,1244 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 Apple Computer, Inc. + * + * Change Log: + * Created, March 17, 1997 by Tuyen Nguyen for MacOSX. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +extern struct atpcb ddp_head; + +extern void + ddp_putmsg(gref_t *gref, gbuf_t *m), + elap_wput(gref_t *gref, gbuf_t *m), + atp_wput(gref_t *gref, gbuf_t *m), + asp_wput(gref_t *gref, gbuf_t *m), +#ifdef AURP_SUPPORT + aurp_wput(gref_t *gref, gbuf_t *m), +#endif + adsp_wput(gref_t *gref, gbuf_t *m); + +void atalk_putnext(gref_t *gref, gbuf_t *m); +static int gref_close(gref_t *gref); + +SYSCTL_DECL(_net_appletalk); +dbgBits_t dbgBits; +SYSCTL_STRUCT(_net_appletalk, OID_AUTO, debug, CTLFLAG_WR, + &dbgBits, dbgBits, "AppleTalk Debug Flags"); +volatile int RouterMix = RT_MIX_DEFAULT; /* default for nbr of ppsec */ +SYSCTL_INT(_net_appletalk, OID_AUTO, routermix, CTLFLAG_WR, + &RouterMix, 0, "Appletalk RouterMix"); +at_ddp_stats_t at_ddp_stats; /* DDP statistics */ +SYSCTL_STRUCT(_net_appletalk, OID_AUTO, ddpstats, CTLFLAG_RD, + &at_ddp_stats, at_ddp_stats, "AppleTalk DDP Stats"); + +atlock_t refall_lock; + +static void gref_wput(gref, m) + gref_t *gref; + gbuf_t *m; +{ + switch (gref->proto) { + case ATPROTO_DDP: + ddp_putmsg(gref, m); break; + case ATPROTO_LAP: + elap_wput(gref, m); break; + case ATPROTO_ATP: + atp_wput(gref, m); break; + case ATPROTO_ASP: + asp_wput(gref, m); break; +#ifdef AURP_SUPPORT + case ATPROTO_AURP: + aurp_wput(gref, m); break; +#endif + case ATPROTO_ADSP: + adsp_wput(gref, m); break; + case ATPROTO_NONE: + if (gbuf_type(m) == MSG_IOCTL) { + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = 0; + ((ioc_t *)gbuf_rptr(m))->ioc_rval = -1; + ((ioc_t *)gbuf_rptr(m))->ioc_error = EPROTO; + gbuf_set_type(m, MSG_IOCNAK); + atalk_putnext(gref, m); + } else + gbuf_freem(m); + break; + default: + gbuf_freem(m); + break; + } +} + +int _ATsocket(proto, err, proc) + int proto; + int *err; + void *proc; +{ + int fd; + gref_t *gref; + + /* make sure the specified protocol id is valid */ + switch (proto) { + + /* ATPROTO_DDP and ATPROTO_LAP have been replaced with + BSD-style socket interface. */ + + case ATPROTO_ATP: + case ATPROTO_ASP: + case ATPROTO_AURP: + case ATPROTO_ADSP: + break; + default: + *err = EPROTOTYPE; +#ifdef APPLETALK_DEBUG + kprintf("_ATsocket: error EPROTOTYPE =%d\n", *err); +#endif + return -1; + } + + /* allocate a protocol channel */ + if ((*err = gref_alloc(&gref)) != 0) { +#ifdef APPLETALK_DEBUG + kprintf("_ATsocket: error gref_open =%d\n", *err); +#endif + return -1; + } + gref->proto = proto; + gref->pid = ((struct proc *)proc)->p_pid; + + /* open the specified protocol */ + switch (gref->proto) { + + /* ATPROTO_DDP and ATPROTO_LAP have been replaced with + BSD-style socket interface. */ + + case ATPROTO_ATP: + *err = atp_open(gref, 1); break; + case ATPROTO_ASP: + *err = asp_open(gref); break; +#ifdef AURP_SUPPORT + case ATPROTO_AURP: + *err = aurp_open(gref); break; +#endif + case ATPROTO_ADSP: + *err = adsp_open(gref); break; + } + + /* create the descriptor for the channel */ + if (*err) { +#ifdef APPLETALK_DEBUG + kprintf("_ATsocket: open failed for %d proto; err = %d\n", + gref->proto, *err); +#endif + gref->proto = ATPROTO_NONE; + } + if (*err || (*err = atalk_openref(gref, &fd, proc))) { +#ifdef APPLETALK_DEBUG + kprintf("_ATsocket: error atalk_openref =%d\n", *err); +#endif + (void)gref_close(gref); + return -1; + } +/* + kprintf("_ATsocket: proto=%d return=%d fd=%d\n", proto, *err, fd); +*/ + return fd; +} /* _ATsocket */ + +int _ATgetmsg(fd, ctlptr, datptr, flags, err, proc) + int fd; + strbuf_t *ctlptr; + strbuf_t *datptr; + int *flags; + int *err; + void *proc; +{ + int rc = -1; + gref_t *gref; + + if ((*err = atalk_getref(0, fd, &gref, proc)) == 0) { + switch (gref->proto) { + case ATPROTO_ASP: + rc = ASPgetmsg(gref, ctlptr, datptr, flags, err); + break; + case ATPROTO_AURP: +#ifdef AURP_SUPPORT + rc = AURPgetmsg(err); + break; +#endif + default: + *err = EPROTONOSUPPORT; + break; + } + } + +/* kprintf("_ATgetmsg: return=%d\n", *err);*/ + return rc; +} + +int _ATputmsg(fd, ctlptr, datptr, flags, err, proc) + int fd; + strbuf_t *ctlptr; + strbuf_t *datptr; + int flags; + int *err; + void *proc; +{ + int rc = -1; + gref_t *gref; + + if ((*err = atalk_getref(0, fd, &gref, proc)) == 0) { + switch (gref->proto) { + case ATPROTO_ASP: + rc = ASPputmsg(gref, ctlptr, datptr, flags, err); break; + default: + *err = EPROTONOSUPPORT; break; + } + } + +/* kprintf("_ATputmsg: return=%d\n", *err); */ + return rc; +} + +int _ATclose(fp, proc) + struct file *fp; + struct proc *proc; +{ + int err; + gref_t *gref; + + + + if ((err = atalk_closeref(fp, &gref)) == 0) { + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + (void)gref_close(gref); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + } + + return err; +} + +int _ATrw(fp, rw, uio, ext) + void *fp; + enum uio_rw rw; + struct uio *uio; + int ext; +{ + int s, err, len, clen = 0, res; + gref_t *gref; + gbuf_t *m, *mhead, *mprev; + + if ((err = atalk_getref(fp, 0, &gref, 0)) != 0) + return err; + + if ((len = uio->uio_resid) == 0) + return 0; + + ATDISABLE(s, gref->lock); + + if (rw == UIO_READ) { + KERNEL_DEBUG(DBG_ADSP_ATRW, 0, gref, len, gref->rdhead, 0); + while ((gref->errno == 0) && ((mhead = gref->rdhead) == 0)) { + gref->sevents |= POLLMSG; + err = tsleep(&gref->event, PSOCK | PCATCH, "AT read", 0); + gref->sevents &= ~POLLMSG; + if (err != 0) { + ATENABLE(s, gref->lock); + return err; + } + KERNEL_DEBUG(DBG_ADSP_ATRW, 1, gref, gref->rdhead, mhead, gbuf_next(mhead)); + } + + if (gref->errno) { + ATENABLE(s, gref->lock); + return EPIPE; + } + if ((gref->rdhead = gbuf_next(mhead)) == 0) + gref->rdtail = 0; + + KERNEL_DEBUG(DBG_ADSP_ATRW, 2, gref, gref->rdhead, mhead, gbuf_next(mhead)); + + ATENABLE(s, gref->lock); + +//##### LD TEST 08/05 +// simple_lock(&gref->lock); + + gbuf_next(mhead) = 0; + + for (mprev=0, m=mhead; m && len; len-=clen) { + if ((clen = gbuf_len(m)) > 0) { + if (clen > len) + clen = len; + uio->uio_rw = UIO_READ; + if ((res = uiomove((caddr_t)gbuf_rptr(m), + clen, uio))) { + KERNEL_DEBUG(DBG_ADSP_ATRW, 3, m, clen, + len, gbuf_cont(m)); + break; + } + if (gbuf_len(m) > len) { + gbuf_rinc(m,clen); + break; + } + } + mprev = m; + m = gbuf_cont(m); + } + if (m) { + KERNEL_DEBUG(DBG_ADSP_ATRW, 4, m, gbuf_len(m), mprev, gref->rdhead); + if (mprev) + gbuf_cont(mprev) = 0; + else + mhead = 0; + ATDISABLE(s, gref->lock); + if (gref->rdhead == 0) + gref->rdtail = m; + gbuf_next(m) = gref->rdhead; + gref->rdhead = m; + ATENABLE(s, gref->lock); + } + if (mhead) + gbuf_freem(mhead); +//### LD TEST +// simple_unlock(&gref->lock); + } else { + if (gref->writeable) { + while (!(*gref->writeable)(gref)) { + /* flow control on, wait to be enabled to write */ + gref->sevents |= POLLSYNC; + err = tsleep(&gref->event, PSOCK | PCATCH, "AT write", 0); + gref->sevents &= ~POLLSYNC; + if (err != 0) { + ATENABLE(s, gref->lock); + return err; + } + } + } + + ATENABLE(s, gref->lock); + + /* allocate a buffer to copy in the write data */ + if ((m = gbuf_alloc(AT_WR_OFFSET+len, PRI_MED)) == 0) + return ENOBUFS; + gbuf_rinc(m,AT_WR_OFFSET); + gbuf_wset(m,len); + + /* copy in the write data */ + uio->uio_rw = UIO_WRITE; + if ((res = uiomove((caddr_t)gbuf_rptr(m), len, uio))) { +#ifdef APPLETALK_DEBUG + kprintf("_ATrw: UIO_WRITE: res=%d\n", res); +#endif + gbuf_freeb(m); + return EIO; + } + + /* forward the write data to the appropriate protocol module */ + gref_wput(gref, m); + } + + return 0; +} /* _ATrw */ + +int _ATread(fp, uio, cred) + void *fp; + struct uio *uio; + void *cred; +{ + int stat; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + stat = _ATrw(fp, UIO_READ, uio, 0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return stat; +} + +int _ATwrite(fp, uio, cred) + void *fp; + struct uio *uio; + void *cred; +{ + int stat; + + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + stat = _ATrw(fp, UIO_WRITE, uio, 0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + return stat; +} + +/* Most of the processing from _ATioctl, so that it can be called + from the new ioctl code */ +int at_ioctl(gref, cmd, arg) + gref_t *gref; + register caddr_t arg; +{ + int s, err = 0, len; + gbuf_t *m, *mdata; + ioc_t *ioc; + ioccmd_t ioccmd; + + /* error if not for us */ + if ((cmd & 0xffff) != 0xff99) + return EOPNOTSUPP; + + /* copy in ioc command info */ +/* + kprintf("at_ioctl: arg ioccmd.ic_cmd=%x ic_len=%x gref->lock=%x, gref->event=%x\n", + ((ioccmd_t *)arg)->ic_cmd, ((ioccmd_t *)arg)->ic_len, + gref->lock, gref->event); +*/ + if ((err = copyin((caddr_t)arg, + (caddr_t)&ioccmd, sizeof(ioccmd_t))) != 0) { +#ifdef APPLETALK_DEBUG + kprintf("at_ioctl: err = %d, copyin(%x, %x, %d)\n", err, + (caddr_t)arg, (caddr_t)&ioccmd, sizeof(ioccmd_t)); +#endif + return err; + } + + /* allocate a buffer to create an ioc command */ + if ((m = gbuf_alloc(sizeof(ioc_t), PRI_HI)) == 0) + return ENOBUFS; + gbuf_wset(m,sizeof(ioc_t)); + gbuf_set_type(m, MSG_IOCTL); + + /* create the ioc command */ + if (ioccmd.ic_len) { + if ((gbuf_cont(m) = gbuf_alloc(ioccmd.ic_len, PRI_HI)) == 0) { + gbuf_freem(m); +#ifdef APPLETALK_DEBUG + kprintf("at_ioctl: gbuf_alloc err=%d\n",ENOBUFS); +#endif + return ENOBUFS; + } + gbuf_wset(gbuf_cont(m),ioccmd.ic_len); + if ((err = copyin((caddr_t)ioccmd.ic_dp, + (caddr_t)gbuf_rptr(gbuf_cont(m)), ioccmd.ic_len)) != 0) { + gbuf_freem(m); + return err; + } + } + ioc = (ioc_t *)gbuf_rptr(m); + ioc->ioc_cmd = ioccmd.ic_cmd; + ioc->ioc_count = ioccmd.ic_len; + ioc->ioc_error = 0; + ioc->ioc_rval = 0; + + /* send the ioc command to the appropriate recipient */ + gref_wput(gref, m); + + /* wait for the ioc ack */ + ATDISABLE(s, gref->lock); + while ((m = gref->ichead) == 0) { + gref->sevents |= POLLPRI; +#ifdef APPLETALK_DEBUG + kprintf("sleep gref = 0x%x\n", (unsigned)gref); +#endif + err = tsleep(&gref->iocevent, PSOCK | PCATCH, "AT ioctl", 0); + gref->sevents &= ~POLLPRI; + if (err != 0) { + ATENABLE(s, gref->lock); +#ifdef APPLETALK_DEBUG + kprintf("at_ioctl: EINTR\n"); +#endif + return err; + } + } + + /* PR-2224797 */ + if (gbuf_next(m) == m) /* error case */ + gbuf_next(m) = 0; + + gref->ichead = gbuf_next(m); + + ATENABLE(s, gref->lock); + +#ifdef APPLETALK_DEBUG + kprintf("at_ioctl: woke up from ioc sleep gref = 0x%x\n", + (unsigned)gref); +#endif + /* process the ioc response */ + ioc = (ioc_t *)gbuf_rptr(m); + if ((err = ioc->ioc_error) == 0) { + ioccmd.ic_timout = ioc->ioc_rval; + ioccmd.ic_len = 0; + mdata = gbuf_cont(m); + if (mdata && ioccmd.ic_dp) { + ioccmd.ic_len = gbuf_msgsize(mdata); + for (len=0; mdata; mdata=gbuf_cont(mdata)) { + if ((err = copyout((caddr_t)gbuf_rptr(mdata), + (caddr_t)&ioccmd.ic_dp[len], gbuf_len(mdata))) < 0) { +#ifdef APPLETALK_DEBUG + kprintf("at_ioctl: len=%d error copyout=%d from=%x to=%x gbuf_len=%x\n", + len, err, (caddr_t)gbuf_rptr(mdata), + (caddr_t)&ioccmd.ic_dp[len], gbuf_len(mdata)); +#endif + goto l_done; + } + len += gbuf_len(mdata); + } + } + if ((err = copyout((caddr_t)&ioccmd, + (caddr_t)arg, sizeof(ioccmd_t))) != 0) { +#ifdef APPLETALK_DEBUG + kprintf("at_ioctl: error copyout2=%d from=%x to=%x len=%d\n", + err, &ioccmd, arg, sizeof(ioccmd_t)); +#endif + goto l_done; + } + } + +l_done: + gbuf_freem(m); + /*kprintf("at_ioctl: I_done=%d\n", err);*/ + return err; +} /* at_ioctl */ + +int _ATioctl(fp, cmd, arg, proc) + void *fp; + u_long cmd; + register caddr_t arg; + void *proc; +{ + int err; + gref_t *gref; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if ((err = atalk_getref(fp, 0, &gref, 0)) != 0) { +#ifdef APPLETALK_DEBUG + kprintf("_ATioctl: atalk_getref err = %d\n", err); +#endif + } + else + err = at_ioctl(gref, cmd, arg); + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + return err; +} + +int _ATselect(fp, which, proc) + struct file *fp; + int which; + struct proc *proc; +{ + int s, err, rc = 0; + gref_t *gref; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + err = atalk_getref(fp, 0, &gref, 0); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + if (err != 0) + rc = 1; + else { + ATDISABLE(s, gref->lock); + if (which == FREAD) { + if (gref->rdhead || (gref->readable && (*gref->readable)(gref))) + rc = 1; + else { + gref->sevents |= POLLIN; + selrecord(proc, &gref->si); + } + } + else if (which == POLLOUT) { + if (gref->writeable) { + if ((*gref->writeable)(gref)) + rc = 1; + else { + gref->sevents |= POLLOUT; + selrecord(proc, &gref->si); + } + } else + rc = 1; + } + ATENABLE(s, gref->lock); + } + + return rc; +} + +void atalk_putnext(gref, m) + gref_t *gref; + gbuf_t *m; +{ + int s; + + ATDISABLE(s, gref->lock); + + /* *** potential leak? *** */ + gbuf_next(m) = 0; + + switch (gbuf_type(m)) { + case MSG_IOCACK: + case MSG_IOCNAK: + if (gref->ichead) + gbuf_next(gref->ichead) = m; + else { + gref->ichead = m; + if (gref->sevents & POLLPRI) { +#ifdef APPLETALK_DEBUG + kprintf("wakeup gref = 0x%x\n", (unsigned)gref); +#endif + thread_wakeup(&gref->iocevent); + } + } + break; + case MSG_ERROR: + /* *** this processing was moved to atalk_notify *** */ + panic("atalk_putnext receved MSG_ERROR"); + break; + default: + if (gref->errno) + gbuf_freem(m); + else + if (gref->rdhead) { + gbuf_next(gref->rdtail) = m; + gref->rdtail = m; + } else { + gref->rdhead = m; + if (gref->sevents & POLLMSG) { + gref->sevents &= ~POLLMSG; + thread_wakeup(&gref->event); + } + if (gref->sevents & POLLIN) { + gref->sevents &= ~POLLIN; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&gref->si); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } + gref->rdtail = m; + } + } /* switch gbuf_type(m) */ + + ATENABLE(s, gref->lock); +} /* atalk_putnext */ + +void atalk_enablew(gref) + gref_t *gref; +{ + if (gref->sevents & POLLSYNC) + thread_wakeup(&gref->event); +} + +void atalk_flush(gref) + gref_t *gref; +{ + int s; + + ATDISABLE(s, gref->lock); + if (gref->rdhead) { + gbuf_freel(gref->rdhead); + gref->rdhead = 0; + } + if (gref->ichead) { + gbuf_freel(gref->ichead); + gref->ichead = 0; + } + ATENABLE(s, gref->lock); +} + +/* + * Notify an appletalk user of an asynchronous error; + * just wake up so that he can collect error status. + */ +void atalk_notify(gref, errno) + register gref_t *gref; + int errno; +{ + int s; + ATDISABLE(s, gref->lock); + + if (gref->atpcb_socket) { + /* For DDP -- + This section is patterned after udp_notify() in + netinet/udp_usrreq.c + */ + gref->atpcb_socket->so_error = errno; + sorwakeup(gref->atpcb_socket); + sowwakeup(gref->atpcb_socket); + } else { + /* for ATP, ASP, and ADSP */ + if (gref->errno == 0) { + gref->errno = errno; + /* clear out data waiting to be read */ + if (gref->rdhead) { + gbuf_freel(gref->rdhead); + gref->rdhead = 0; + } + /* blocked read */ + if (gref->sevents & POLLMSG) { + gref->sevents &= ~POLLMSG; + thread_wakeup(&gref->event); + } + /* select */ + if (gref->sevents & POLLIN) { + gref->sevents &= ~POLLIN; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&gref->si); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } + } + } + ATENABLE(s, gref->lock); +} /* atalk_notify */ + +void atalk_notify_sel(gref) + gref_t *gref; +{ + int s; + + ATDISABLE(s, gref->lock); + if (gref->sevents & POLLIN) { + gref->sevents &= ~POLLIN; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + selwakeup(&gref->si); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } + ATENABLE(s, gref->lock); +} + +int atalk_peek(gref, event) + gref_t *gref; + unsigned char *event; +{ + int s, rc; + + ATDISABLE(s, gref->lock); + if (gref->rdhead) { + *event = *gbuf_rptr(gref->rdhead); + rc = 0; + } else + rc = -1; + ATENABLE(s, gref->lock); + + return rc; +} + +static gbuf_t *trace_msg; + +void atalk_settrace(str, p1, p2, p3, p4, p5) + char *str; +{ + int len; + gbuf_t *m, *nextm; + char trace_buf[256]; + + sprintf(trace_buf, str, p1, p2, p3, p4, p5); + len = strlen(trace_buf); +#ifdef APPLETALK_DEBUG + kprintf("atalk_settrace: gbufalloc size=%d\n", len+1); +#endif + if ((m = gbuf_alloc(len+1, PRI_MED)) == 0) + return; + gbuf_wset(m,len); + strcpy(gbuf_rptr(m), trace_buf); + if (trace_msg) { + for (nextm=trace_msg; gbuf_cont(nextm); nextm=gbuf_cont(nextm)) ; + gbuf_cont(nextm) = m; + } else + trace_msg = m; +} + +void atalk_gettrace(m) + gbuf_t *m; +{ + if (trace_msg) { + gbuf_cont(m) = trace_msg; + trace_msg = 0; + } +} + +#define GREF_PER_BLK 32 +static gref_t *gref_free_list = 0; + +int gref_alloc(grefp) + gref_t **grefp; +{ + extern gbuf_t *atp_resource_m; + int i, s; + gbuf_t *m; + gref_t *gref, *gref_array; + + *grefp = (gref_t *)NULL; + + ATDISABLE(s, refall_lock); + if (gref_free_list == 0) { + ATENABLE(s, refall_lock); +#ifdef APPLETALK_DEBUG + kprintf("gref_alloc: gbufalloc size=%d\n", GREF_PER_BLK*sizeof(gref_t)); +#endif + if ((m = gbuf_alloc(GREF_PER_BLK*sizeof(gref_t),PRI_HI)) == 0) + return ENOBUFS; + bzero(gbuf_rptr(m), GREF_PER_BLK*sizeof(gref_t)); + gref_array = (gref_t *)gbuf_rptr(m); + for (i=0; i < GREF_PER_BLK-1; i++) + gref_array[i].atpcb_next = (gref_t *)&gref_array[i+1]; + ATDISABLE(s, refall_lock); + gbuf_cont(m) = atp_resource_m; + atp_resource_m = m; + gref_array[i].atpcb_next = gref_free_list; + gref_free_list = (gref_t *)&gref_array[0]; + } + + gref = gref_free_list; + gref_free_list = gref->atpcb_next; + ATENABLE(s, refall_lock); + ATLOCKINIT(gref->lock); +//### LD Test 08/05/98 +// simple_lock_init(&gref->lock); + ATEVENTINIT(gref->event); + ATEVENTINIT(gref->iocevent); + + /* *** just for now *** */ + gref->atpcb_socket = (struct socket *)NULL; + + *grefp = gref; + return 0; +} /* gref_alloc */ + +static int gref_close(gref) + gref_t *gref; +{ + int s, rc; + + switch (gref->proto) { + + /* ATPROTO_DDP and ATPROTO_LAP have been replaced with + BSD-style socket interface. */ + + case ATPROTO_ATP: + rc = atp_close(gref, 1); break; + case ATPROTO_ASP: + rc = asp_close(gref); break; +#ifdef AURP_SUPPORT + case ATPROTO_AURP: + rc = aurp_close(gref); break; + break; +#endif + case ATPROTO_ADSP: + rc = adsp_close(gref); break; + default: + rc = 0; + break; + } + + if (rc == 0) { + atalk_flush(gref); + selthreadclear(&gref->si); + + /* from original gref_free() */ + ATDISABLE(s, refall_lock); + bzero((char *)gref, sizeof(gref_t)); + gref->atpcb_next = gref_free_list; + gref_free_list = gref; + ATENABLE(s, refall_lock); + } + + return rc; +} + +/* + Buffer Routines + + *** Some to be replaced with mbuf routines, some to be re-written + as mbuf routines (and moved to kern/uicp_mbuf.c or sys/mbuf.h?). + *** + +*/ + +/* + * LD 5/12/97 Added for MacOSX, defines a m_clattach function that: + * "Allocates an mbuf structure and attaches an external cluster." + */ + +struct mbuf *m_clattach(extbuf, extfree, extsize, extarg, wait) + caddr_t extbuf; + int (*extfree)(); + int extsize; + int extarg; + int wait; +{ + struct mbuf *m; + + if ((m = m_gethdr(wait, MSG_DATA)) == NULL) + return (NULL); + + m->m_ext.ext_buf = extbuf; + m->m_ext.ext_free = extfree; + m->m_ext.ext_size = extsize; + m->m_ext.ext_arg = extarg; + m->m_ext.ext_refs.forward = + m->m_ext.ext_refs.backward = &m->m_ext.ext_refs; + m->m_data = extbuf; + m->m_flags |= M_EXT; + + return (m); +} + +/* + Used as the "free" routine for over-size clusters allocated using + m_lgbuf_alloc(). +*/ + +void m_lgbuf_free(buf, size, arg) + void *buf; + int size, arg; /* not needed, but they're in m_free() */ +{ + FREE(buf, M_MCLUST); +} + +/* + Used to allocate an mbuf when there is the possibility that it may + need to be larger than the size of a standard cluster. +*/ + +struct mbuf *m_lgbuf_alloc(size, wait) + int size, wait; +{ + struct mbuf *m; + + /* If size is too large, allocate a cluster, otherwise, use the + standard mbuf allocation routines.*/ + if (size > MCLBYTES) { + void *buf; + if (NULL == + (buf = (void *)_MALLOC(size, M_MCLUST, + (wait)? M_WAITOK: M_NOWAIT))) { + return(NULL); + } + if (NULL == + (m = m_clattach(buf, m_lgbuf_free, size, 0, + (wait)? M_WAIT: M_DONTWAIT))) { + m_lgbuf_free(buf); + return(NULL); + } + } else { + m = m_gethdr(((wait)? M_WAIT: M_DONTWAIT), MSG_DATA); + if (m && (size > MHLEN)) { + MCLGET(m, ((wait)? M_WAIT: M_DONTWAIT)); + if (!(m->m_flags & M_EXT)) { + (void)m_free(m); + return(NULL); + } + } + } + + return(m); +} /* m_lgbuf_alloc */ + +/* + gbuf_alloc() is a wrapper for m_lgbuf_alloc(), which is used to + allocate an mbuf when there is the possibility that it may need + to be larger than the size of a standard cluster. + + gbuf_alloc() sets the mbuf lengths, unlike the standard mbuf routines. +*/ + +gbuf_t *gbuf_alloc_wait(size, wait) + int size, wait; +{ + gbuf_t *m = (gbuf_t *)m_lgbuf_alloc(size, wait); + + /* Standard mbuf allocation routines assume that the caller + will set the size. */ + if (m) { + (struct mbuf *)m->m_pkthdr.len = size; + (struct mbuf *)m->m_len = size; + } + + return(m); +} + +int gbuf_msgsize(m) + gbuf_t *m; +{ + int size; + + for (size=0; m; m=gbuf_cont(m)) + size += gbuf_len(m); + return size; +} + +int append_copy(m1, m2, wait) + struct mbuf *m1, *m2; + int wait; +{ + if ((!(m1->m_flags & M_EXT)) && (!(m2->m_flags & M_EXT)) && + (m_trailingspace(m1) >= m2->m_len)) { + /* splat the data from one into the other */ + bcopy(mtod(m2, caddr_t), mtod(m1, caddr_t) + m1->m_len, + (u_int)m2->m_len); + m1->m_len += m2->m_len; + if (m1->m_flags & M_PKTHDR) + m1->m_pkthdr.len += m2->m_len; + return 1; + } + if ((m1->m_next = m_copym(m2, 0, m2->m_len, + (wait)? M_WAIT: M_DONTWAIT)) == NULL) + return 0; + return 1; +} /* append_copy */ + +/* + Copy an mbuf chain, referencing existing external storage, if any. + Leave space for a header in the new chain, if the space has been + left in the origin chain. +*/ +struct mbuf *copy_pkt(mlist, pad) + struct mbuf *mlist; /* the mbuf chain to be copied */ + int pad; /* hint as to how long the header might be + If pad is < 0, leave the same amount of space + as there was in the original. */ +{ + struct mbuf *new_m; + int len; + + if (pad < 0) + len = m_leadingspace(mlist); + else + len = min(pad, m_leadingspace(mlist)); + + /* preserve space for the header at the beginning of the mbuf */ + if (len) { + mlist->m_data -= (len); + mlist->m_len += (len); + if (mlist->m_flags & M_PKTHDR) + mlist->m_pkthdr.len += (len); + new_m = m_copym(mlist, 0, M_COPYALL, M_DONTWAIT); + m_adj(mlist, len); + m_adj(new_m, len); + } else + new_m = m_copym(mlist, 0, M_COPYALL, M_DONTWAIT); + + return(new_m); +} + +void gbuf_linkb(m1, m2) + gbuf_t *m1; + gbuf_t *m2; +{ + while (gbuf_cont(m1) != 0) + m1 = gbuf_cont(m1); + gbuf_cont(m1) = m2; +} + +void gbuf_linkpkt(m1, m2) + gbuf_t *m1; + gbuf_t *m2; +{ + while (gbuf_next(m1) != 0) + m1 = gbuf_next(m1); + gbuf_next(m1) = m2; +} + +int gbuf_freel(m) + gbuf_t *m; +{ + gbuf_t *tmp_m; + + while ((tmp_m = m) != 0) { + m = gbuf_next(m); + gbuf_next(tmp_m) = 0; + gbuf_freem(tmp_m); + } + return (0); +} + +/* free empty mbufs at the front of the chain */ +gbuf_t *gbuf_strip(m) + gbuf_t *m; +{ + gbuf_t *tmp_m; + + while (m && gbuf_len(m) == 0) { + tmp_m = m; + m = gbuf_cont(m); + gbuf_freeb(tmp_m); + } + return(m); +} + +/**************************************/ + +int ddp_adjmsg(m, len) + gbuf_t *m; + int len; +{ + int buf_len; + gbuf_t *curr_m, *prev_m; + + if (m == (gbuf_t *)0) + return 0; + + if (len > 0) { + for (curr_m=m; curr_m;) { + buf_len = gbuf_len(curr_m); + if (len < buf_len) { + gbuf_rinc(curr_m,len); + return 1; + } + len -= buf_len; + gbuf_rinc(curr_m,buf_len); + if ((curr_m = gbuf_cont(curr_m)) == 0) { + gbuf_freem(m); + return 0; + } + } + + } else if (len < 0) { + len = -len; +l_cont: prev_m = 0; + for (curr_m=m; gbuf_cont(curr_m); + prev_m=curr_m, curr_m=gbuf_cont(curr_m)) ; + buf_len = gbuf_len(curr_m); + if (len < buf_len) { + gbuf_wdec(curr_m,len); + return 1; + } + if (prev_m == 0) + return 0; + gbuf_cont(prev_m) = 0; + gbuf_freeb(curr_m); + len -= buf_len; + goto l_cont; + + } else + return 1; +} + +/* + * The message chain, m is grown in size by len contiguous bytes. + * If len is non-negative, len bytes are added to the + * end of the gbuf_t chain. If len is negative, the + * bytes are added to the front. ddp_growmsg only adds bytes to + * message blocks of the same type. + * It returns a pointer to the new gbuf_t on sucess, 0 on failure. + */ + +gbuf_t *ddp_growmsg(mp, len) + gbuf_t *mp; + int len; +{ + gbuf_t *m, *d; + + if ((m = mp) == (gbuf_t *) 0) + return ((gbuf_t *) 0); + + if (len <= 0) { + len = -len; + if ((d = gbuf_alloc(len, PRI_MED)) == 0) + return ((gbuf_t *) 0); + gbuf_set_type(d, gbuf_type(m)); + gbuf_wset(d,len); + /* link in new gbuf_t */ + gbuf_cont(d) = m; + return (d); + + } else { + register int count; + /* + * Add to tail. + */ + if ((count = gbuf_msgsize(m)) < 0) + return ((gbuf_t *) 0); + /* find end of chain */ + for ( ; m; m = gbuf_cont(m)) { + if (gbuf_len(m) >= count) + break; + count -= gbuf_len(m); + } + /* m now points to gbuf_t to add to */ + if ((d = gbuf_alloc(len, PRI_MED)) == 0) + return ((gbuf_t *) 0); + gbuf_set_type(d, gbuf_type(m)); + /* link in new gbuf_t */ + gbuf_cont(d) = gbuf_cont(m); + gbuf_cont(m) = d; + gbuf_wset(d,len); + return (d); + } +} + +/* + * return the MSG_IOCACK/MSG_IOCNAK. Note that the same message + * block is used as the vehicle, and that if there is an error return, + * then linked blocks are lopped off. BEWARE of multiple references. + * Used by other appletalk modules, so it is not static! + */ + +void ioc_ack(errno, m, gref) + int errno; + register gbuf_t *m; + register gref_t *gref; +{ + ioc_t *iocbp = (ioc_t *)gbuf_rptr(m); + + /*kprintf("ioc_ack: m=%x gref=%x errno=%d\n", m, gref, errno);*/ + if ((iocbp->ioc_error = errno) != 0) + { /* errno != 0, then there is an error, get rid of linked blocks! */ + + if (gbuf_cont(m)) { + gbuf_freem(gbuf_cont(m)); + gbuf_cont(m) = 0; + } + gbuf_set_type(m, MSG_IOCNAK); + iocbp->ioc_count = 0; /* only make zero length if error */ + iocbp->ioc_rval = -1; + } else + gbuf_set_type(m, MSG_IOCACK); + + atalk_putnext(gref, m); +} + diff --git a/bsd/netat/sysglue.h b/bsd/netat/sysglue.h new file mode 100644 index 000000000..0885bb142 --- /dev/null +++ b/bsd/netat/sysglue.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Title: sysglue.h - AppleTalk protocol to Unix System V/streams interface + * + * Facility: AppleTalk Protocol Execution Environment + * + * Author: Gregory Burns, Creation Date: Jun-3-1988 + * + * History: + * X01-001 Gregory Burns 3-Jun-1988 + * Initial Creation. + * + */ + +#ifndef _NETAT_SYSGLUE_H_ +#define _NETAT_SYSGLUE_H_ + +/* + The following is originally from netat/h/localglue.h, which was + included in netat/h/sysglue.h: +*/ + +typedef struct { + int ic_cmd; + int ic_timout; + int ic_len; + char *ic_dp; +} ioccmd_t; + +typedef struct { + int ioc_cmd; + void *ioc_cr; + int ioc_id; + int ioc_count; + int ioc_error; + int ioc_rval; + void *ioc_private; + int ioc_filler[4]; +} ioc_t; + +/* + * Want these definitions outside the KERNEL define for admin + * program access. + */ +#ifdef _AIX +#define MSG_DATA 0x00 +#define MSG_PROTO 0x01 +#define MSG_IOCTL 0x0e +#define MSG_ERROR 0x8a +#define MSG_HANGUP 0x89 +#define MSG_IOCACK 0x81 +#define MSG_IOCNAK 0x82 +#define MSG_CTL 0x0d +#else +/* ### LD 5/3/97 MacOSX porting note: + * Cannot use MSG_DATA = 0, because MT_FREE is defined as 0 + * and the sanity check in m_free cause a panic. + */ + +#define MSG_DATA (MT_MAX - 1) +#define MSG_PROTO (MT_MAX - 2) +#define MSG_IOCTL (MT_MAX - 3) +#define MSG_ERROR (MT_MAX - 4) +#define MSG_HANGUP (MT_MAX - 5) +#define MSG_IOCACK (MT_MAX - 6) +#define MSG_IOCNAK (MT_MAX - 7) +#define MSG_CTL (MT_MAX - 8) +#endif + +#ifdef KERNEL + +#define SYS_HZ HZ /* Number of clock (SYS_SETTIMER) ticks per second */ +#define HZ hz /* HZ ticks definition used throughout AppleTalk */ + +/* returned when the operation is not possible at this + * time (ie when starting up or shutting down. + * right now, uses ESHUTDOWN because ENOTREADY is not defined + * in MacOSX. Need to find a better Error code ###LD + */ +#define ENOTREADY ESHUTDOWN +#define ENOMSG EOPNOTSUPP +#define EPROTO EPROTOTYPE + +/* T_MPSAFE is used only in atp_open. I suspect it's a + * trick to accelerate local atp transactions. + */ +#define T_MPSAFE 0 + +#define INTERRUPTIBLE 1 +#define POLLIN 0x0001 +#define POLLOUT 0x0002 +#define POLLPRI 0x0004 +#define POLLMSG 0x0080 +#define POLLSYNC 0x8000 +#define POLLMSG 0x0080 + +/* + * Define a new Data Type for file. it was DTYPE_OTHER for + * AIX, for MacOSX there is no such define so defines + * DTYPE_ATALK + */ + +#define DTYPE_ATALK -1 + +#define AT_WR_OFFSET 38 +#ifndef EVENT_NULL +#define EVENT_NULL -1 +#define LOCK_HANDLER 2 +#endif +typedef int atevent_t; + +typedef simple_lock_t atlock_t; +typedef int *atomic_p; +#define ATLOCKINIT(a) (a = (atlock_t) EVENT_NULL) +#define ATDISABLE(l, a) (l = splimp()) +#define ATENABLE(l, a) splx(l) +#define ATEVENTINIT(a) (a = (atevent_t) EVENT_NULL) +#define DDP_OUTPUT(m) ddp_putmsg(0,m) +#define StaticProc static + +#define PRI_LO 1 +#define PRI_MED 2 +#define PRI_HI 3 + +typedef struct mbuf gbuf_t; + +/* prototypes for the gbuf routines */ + +struct mbuf *m_lgbuf_alloc(int size, int wait); +gbuf_t *gbuf_alloc_wait(int size, int wait); +gbuf_t *gbuf_copym(gbuf_t *mlist); +gbuf_t *gbuf_strip(gbuf_t *m); +int gbuf_freel(gbuf_t *m); +void gbuf_linkb(gbuf_t *m1, gbuf_t *m2); +void gbuf_linkpkt(gbuf_t *m1, gbuf_t *m2); +int gbuf_msgsize(gbuf_t *m); + +#define gbuf_cont(m) m->m_next +#define gbuf_next(m) m->m_nextpkt +#define gbuf_rptr(m) m->m_data +#define gbuf_rinc(m,len) {m->m_data += len; m->m_len -= len;} +#define gbuf_rdec(m,len) {m->m_data -= len; m->m_len += len;} +#define gbuf_wptr(m) (m->m_data + m->m_len) +#define gbuf_winc(m,len) (m->m_len += len) +#define gbuf_wdec(m,len) (m->m_len -= len) +#define gbuf_wset(m,len) (m->m_len = len) +#define gbuf_type(m) m->m_type +#define gbuf_len(m) m->m_len + +#define gbuf_alloc(size, pri) (gbuf_alloc_wait(size, FALSE)) +#define gbuf_copym(mlist) ((gbuf_t *)copy_pkt(mlist, -1)) + +#define gbuf_prepend(m,len) M_PREPEND(m,len,M_DONTWAIT) +#define gbuf_freem(mlist) m_freem((struct mbuf *)mlist) +#define gbuf_freeb(m) (void)m_free((struct mbuf *)m) +#define gbuf_set_type(m, mtype) MCHTYPE(m, mtype) + +/* Duplicate a single mbuf, attaching existing external storage. */ +#define gbuf_dupb_wait(m, wait) ((gbuf_t *)m_copym(m, 0, gbuf_len(m), (wait)? M_WAIT: M_DONTWAIT)) +#define gbuf_dupb(m) (gbuf_dupb_wait(m, FALSE)) +/* Duplicate an mbuf chain, attaching existing external storage. */ +#define gbuf_dupm(mlist) ((gbuf_t *)copy_pkt(mlist, -1)) + /* *** was ((gbuf_t *)m_copym(mlist, 0, M_COPYALL, M_DONTWAIT)) *** */ + +#undef timeoutcf +#undef timeout +#undef untimeout + +#endif /* KERNEL */ +#endif /* _NETAT_SYSGLUE_H_ */ diff --git a/bsd/netat/zip.h b/bsd/netat/zip.h new file mode 100644 index 000000000..1a402989a --- /dev/null +++ b/bsd/netat/zip.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ORIGINS: 82 + * + * (C) COPYRIGHT Apple Computer, Inc. 1992-1996 + * All Rights Reserved + * + */ + +#ifndef _NETAT_ZIP_H_ +#define _NETAT_ZIP_H_ + +/* Definitions for ZIP, per AppleTalk Zone Information Protocol + * documentation from `Inside AppleTalk', July 14, 1986. + */ + +/* ZIP packet types */ + +#define ZIP_QUERY 1 /* ZIP zone query packet */ +#define ZIP_REPLY 2 /* ZIP query reply packet */ +#define ZIP_TAKEDOWN 3 /* ZIP takedown packet */ +#define ZIP_BRINGUP 4 /* ZIP bringup packet */ +#define ZIP_GETNETINFO 5 /* ZIP DDP get net info packet */ +#define ZIP_NETINFO_REPLY 6 /* ZIP GetNetInfo Reply */ +#define ZIP_NOTIFY 7 /* Notification of zone name change */ +#define ZIP_EXTENDED_REPLY 8 /* ZIP extended query reply packet */ + +#define ZIP_GETMYZONE 7 /* ZIP ATP get my zone packet */ +#define ZIP_GETZONELIST 8 /* ZIP ATP get zone list packet */ +#define ZIP_GETLOCALZONES 9 /* ZIP ATP get cable list packet*/ + +#define ZIP_HDR_SIZE 2 +#define ZIP_DATA_SIZE 584 + + +#define ZIP_MAX_ZONE_LENGTH 32 /* Max length for a Zone Name */ + +typedef struct at_zip { + u_char command; + u_char flags; + char data[ZIP_DATA_SIZE]; +} at_zip_t; + +#define ZIP_ZIP(c) ((at_zip_t *)(&((at_ddp_t *)(c))->data[0])) + +typedef struct { + char command; + char flags; + at_net cable_range_start; + at_net cable_range_end; + u_char data[1]; +} at_x_zip_t; + +#define ZIP_X_HDR_SIZE 6 + +/* flags for ZipNetInfoReply packet */ +#define ZIP_ZONENAME_INVALID 0x80 +#define ZIP_USE_BROADCAST 0x40 +#define ZIP_ONE_ZONE 0x20 + +#define ZIP_NETINFO_RETRIES 3 +#define ZIP_TIMER_INT HZ /* HZ defined in param.h */ + +/* ZIP control codes */ +#define ZIP_ONLINE 1 +#define ZIP_LATE_ROUTER 2 +#define ZIP_NO_ROUTER 3 + +#define ZIP_RE_AARP -1 + +#endif /* _NETAT_ZIP_H_ */ diff --git a/bsd/netccitt/Makefile b/bsd/netccitt/Makefile new file mode 100644 index 000000000..77353c827 --- /dev/null +++ b/bsd/netccitt/Makefile @@ -0,0 +1,38 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + dll.h hd_var.h hdlc.h llc_var.h pk.h pk_var.h \ + x25.h x25_sockaddr.h x25acct.h x25err.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = netccitt + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = netccitt + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/netccitt/ccitt_proto.c b/bsd/netccitt/ccitt_proto.c new file mode 100644 index 000000000..cf5f8edd6 --- /dev/null +++ b/bsd/netccitt/ccitt_proto.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998-1999 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ccitt_proto.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include + +#include + +#include + +/* + * Definitions of protocols supported in the CCITT domain. + */ + +extern struct domain ccittdomain; +#define DOMAIN &ccittdomain + +#if LLC +int llc_output(); +void llc_ctlinput(), llc_init(), llc_timer(); +#endif +#if HDLC +int hd_output(); +void hd_ctlinput(), hd_init(), hd_timer(); +#endif +int pk_usrreq(), pk_ctloutput(); +void pk_timer(), pk_init(), pk_input(), pk_ctlinput(); + +struct protosw ccittsw[] = { +#if LLC + { 0, DOMAIN, IEEEPROTO_802LLC,0, + 0, llc_output, llc_ctlinput, 0, + 0, + llc_init, 0, llc_timer, 0, + }, +#endif +#if HDLC + { 0, DOMAIN, CCITTPROTO_HDLC,0, + 0, hd_output, hd_ctlinput, 0, + 0, + hd_init, 0, hd_timer, 0, + }, +#endif + { SOCK_STREAM, DOMAIN, CCITTPROTO_X25, PR_CONNREQUIRED|PR_ATOMIC|PR_WANTRCVD, + pk_input, 0, pk_ctlinput, pk_ctloutput, + pk_usrreq, + pk_init, 0, pk_timer, 0, + } +}; + +#if 0 +/*need to look at other init functions, use net_add_proto() to assure + things are init'd properly*/ +LINK_PROTOS(ccittsw); +#endif + +/* No init or rights functions; a routing init function; no header sizes */ +struct domain ccittdomain = + { AF_CCITT, "ccitt", link_ccittsw_protos, 0, 0, ccittsw, + &ccittsw[sizeof(ccittsw)/sizeof(ccittsw[0])], 0, + rn_inithead, 32, sizeof (struct sockaddr_x25) }; diff --git a/bsd/netccitt/dll.h b/bsd/netccitt/dll.h new file mode 100644 index 000000000..63f7f979d --- /dev/null +++ b/bsd/netccitt/dll.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Dirk Husemann, Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dll.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * We define the additional PRC_* codes in here + */ +#ifdef KERNEL +#ifndef PRC_IFUP +#define PRC_IFUP 3 +#endif +#define PRC_CONNECT_INDICATION 8 +#define PRC_CONNECT_REQUEST 9 +#define PRC_DISCONNECT_REQUEST 10 +#define PRC_DISCONNECT_INDICATION 11 +#define PRC_RESET_REQUEST 12 +#endif + +/* + * Data link layer configuration --- basically a copy of the relevant parts + * of x25config, implemented to become a little bit more network + * layer independent. (Probably only used for casting et al.) + */ +struct dllconfig { + u_short dllcfg_unused0:4, + dllcfg_unused1:4, + dllcfg_trace:1, /* link level tracing flag */ + dllcfg_window:7; /* link level window size */ + u_short dllcfg_xchxid:1, /* exchange XID (not yet) */ + dllcfg_unused2:7; /* here be dragons */ +}; + +struct dll_ctlinfo { + union { + struct { + struct dllconfig *dctli_up_cfg; + u_char dctli_up_lsap; + } CTLI_UP; + struct { + caddr_t dctli_down_pcb; + struct rtentry *dctli_down_rt; + struct dllconfig *dctli_down_llconf; + } CTLI_DOWN; + } CTLIun; +}; +#define dlcti_cfg CTLIun.CTLI_UP.dctli_up_cfg +#define dlcti_lsap CTLIun.CTLI_UP.dctli_up_lsap +#define dlcti_pcb CTLIun.CTLI_DOWN.dctli_down_pcb +#define dlcti_rt CTLIun.CTLI_DOWN.dctli_down_rt +#define dlcti_conf CTLIun.CTLI_DOWN.dctli_down_llconf diff --git a/bsd/netccitt/hd_debug.c b/bsd/netccitt/hd_debug.c new file mode 100644 index 000000000..38a705d4e --- /dev/null +++ b/bsd/netccitt/hd_debug.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hd_debug.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#ifdef HDLCDEBUG +#define NTRACE 32 + +struct hdlctrace { + struct hdcb *ht_hdp; + short ht_dir; + struct mbuf *ht_frame; + struct timeval ht_time; +} hdtrace[NTRACE]; + +int lasttracelogged, freezetrace; +#endif + +hd_trace (hdp, direction, frame) +struct hdcb *hdp; +register struct Hdlc_frame *frame; +{ + register char *s; + register int nr, pf, ns, i; + struct Hdlc_iframe *iframe = (struct Hdlc_iframe *) frame; + +#ifdef HDLCDEBUG + hd_savetrace (hdp, direction, frame); +#endif + if (hdp -> hd_xcp -> xc_ltrace) { + if (direction == RX) + printf ("F-In: "); + else if (direction == 2) + printf ("F-Xmt: "); + else + printf ("F-Out: "); + + nr = iframe -> nr; + pf = iframe -> pf; + ns = iframe -> ns; + + switch (hd_decode (hdp, frame)) { + case SABM: + printf ("SABM : PF=%d\n", pf); + break; + + case DISC: + printf ("DISC : PF=%d\n", pf); + break; + + case DM: + printf ("DM : PF=%d\n", pf); + break; + + case FRMR: + { + register struct Frmr_frame *f = (struct Frmr_frame *)frame; + + printf ("FRMR : PF=%d, TEXT=", pf); + for (s = (char *) frame, i = 0; i < 5; ++i, ++s) + printf ("%x ", (int) * s & 0xff); + printf ("\n"); + printf ("control=%x v(s)=%d v(r)=%d w%d x%d y%d z%d\n", + f->frmr_control, f->frmr_ns, f->frmr_nr, + f->frmr_w, f->frmr_x, f->frmr_y, f->frmr_z); + break; + } + + case UA: + printf ("UA : PF=%d\n", pf); + break; + + case RR: + printf ("RR : N(R)=%d, PF=%d\n", nr, pf); + break; + + case RNR: + printf ("RNR : N(R)=%d, PF=%d\n", nr, pf); + break; + + case REJ: + printf ("REJ : N(R)=%d, PF=%d\n", nr, pf); + break; + + case IFRAME: + { + register struct mbuf *m; + register int len = 0; + + for(m = dtom (frame); m; m = m -> m_next) + len += m -> m_len; + len -= HDHEADERLN; + printf ("IFRAME : N(R)=%d, PF=%d, N(S)=%d, DATA(%d)=", + nr, pf, ns, len); + for (s = (char *)iframe->i_field, i = 0; i < 3; ++i, ++s) + printf ("%x ", (int) *s & 0xff); + printf ("\n"); + break; + } + + default: + printf ("ILLEGAL: "); + for (s = (char *) frame, i = 0; i < 5; ++i, ++s) + printf ("%x ", (int) *s & 0xff); + printf ("\n"); + } + + } +} + +#ifdef HDLCDEBUG +static +hd_savetrace (hdp, dir, frame) +struct hdcb *hdp; +struct Hdlc_frame *frame; +{ + register struct hdlctrace *htp; + register struct mbuf *m; + + if (freezetrace) + return; + htp = &hdtrace[lasttracelogged]; + lasttracelogged = (lasttracelogged + 1) % NTRACE; + if (m = htp->ht_frame) + m_freem (m); + m = dtom (frame); + htp->ht_frame = m_copy (m, 0, m->m_len); + htp->ht_hdp = hdp; + htp->ht_dir = dir; + htp->ht_time = time; +} + +hd_dumptrace (hdp) +struct hdcb *hdp; +{ + register int i, ltrace; + register struct hdlctrace *htp; + + freezetrace = 1; + hd_status (hdp); + printf ("retransmit queue:"); + for (i = 0; i < 8; i++) + printf (" %x", hdp -> hd_retxq[i]); + printf ("\n"); + ltrace = hdp -> hd_xcp -> xc_ltrace; + hdp -> hd_xcp -> xc_ltrace = 1; + for (i = 0; i < NTRACE; i++) { + htp = &hdtrace[(lasttracelogged + i) % NTRACE]; + if (htp->ht_hdp != hdp || htp->ht_frame == 0) + continue; + printf ("%d/%d ", htp->ht_time.tv_sec & 0xff, + htp->ht_time.tv_usec / 10000); + hd_trace (htp->ht_hdp, htp->ht_dir, + mtod (htp->ht_frame, struct Hdlc_frame *)); + m_freem (htp->ht_frame); + htp->ht_frame = 0; + } + hdp -> hd_xcp -> xc_ltrace = ltrace; + freezetrace = 0; +} +#endif diff --git a/bsd/netccitt/hd_input.c b/bsd/netccitt/hd_input.c new file mode 100644 index 000000000..ff9009816 --- /dev/null +++ b/bsd/netccitt/hd_input.c @@ -0,0 +1,690 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hd_input.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +static frame_reject(); +static rej_routine(); +static free_iframes(); +/* + * HDLC INPUT INTERFACE + * + * This routine is called when the HDLC physical device has + * completed reading a frame. + */ + +hdintr () +{ + register struct mbuf *m; + register struct hdcb *hdp; + register struct ifnet *ifp; + register int s; + static struct ifnet *lastifp; + static struct hdcb *lasthdp; + + for (;;) { + s = splimp (); + IF_DEQUEUE (&hdintrq, m); + splx (s); + if (m == 0) + break; + if (m->m_len < HDHEADERLN) { + printf ("hdintr: packet too short (len=%d)\n", + m->m_len); + m_freem (m); + continue; + } + if ((m->m_flags & M_PKTHDR) == 0) + panic("hdintr"); + ifp = m->m_pkthdr.rcvif; + + /* + * look up the appropriate hdlc control block + */ + + if (ifp == lastifp) + hdp = lasthdp; + else { + for (hdp = hdcbhead; hdp; hdp = hdp->hd_next) + if (hdp->hd_ifp == ifp) + break; + if (hdp == 0) { + printf ("hdintr: unknown interface %x\n", ifp); + m_freem (m); + continue; + } + lastifp = ifp; + lasthdp = hdp; + } + + /* Process_rxframe returns FALSE if the frame was NOT queued + for the next higher layers. */ + if (process_rxframe (hdp, m) == FALSE) + m_freem (m); + } +} + +process_rxframe (hdp, fbuf) +register struct hdcb *hdp; +register struct mbuf *fbuf; +{ + register int queued = FALSE, frametype, pf; + register struct Hdlc_frame *frame; + + frame = mtod (fbuf, struct Hdlc_frame *); + pf = ((struct Hdlc_iframe *) frame) -> pf; + + hd_trace (hdp, RX, frame); + if (frame -> address != ADDRESS_A && frame -> address != ADDRESS_B) + return (queued); + + switch ((frametype = hd_decode (hdp, frame)) + hdp->hd_state) { + case DM + DISC_SENT: + case UA + DISC_SENT: + /* + * Link now closed. Leave timer running + * so hd_timer() can periodically check the + * status of interface driver flag bit IFF_UP. + */ + hdp->hd_state = DISCONNECTED; + break; + + case DM + INIT: + case UA + INIT: + /* + * This is a non-standard state change needed for DCEs + * that do dynamic link selection. We can't go into the + * usual "SEND DM" state because a DM is a SARM in LAP. + */ + hd_writeinternal (hdp, SABM, POLLOFF); + hdp->hd_state = SABM_SENT; + SET_TIMER (hdp); + break; + + case SABM + DM_SENT: + case SABM + WAIT_SABM: + hd_writeinternal (hdp, UA, pf); + case UA + SABM_SENT: + case UA + WAIT_UA: + KILL_TIMER (hdp); + hd_initvars (hdp); + hdp->hd_state = ABM; + hd_message (hdp, "Link level operational"); + /* Notify the packet level - to send RESTART. */ + (void) pk_ctlinput (PRC_LINKUP, hdp->hd_pkp); + break; + + case SABM + SABM_SENT: + /* Got a SABM collision. Acknowledge the remote's SABM + via UA but still wait for UA. */ + hd_writeinternal (hdp, UA, pf); + break; + + case SABM + ABM: + /* Request to reset the link from the remote. */ + KILL_TIMER (hdp); + hd_message (hdp, "Link reset"); +#ifdef HDLCDEBUG + hd_dumptrace (hdp); +#endif + hd_flush (hdp->hd_ifp); + hd_writeinternal (hdp, UA, pf); + hd_initvars (hdp); + (void) pk_ctlinput (PRC_LINKRESET, hdp->hd_pkp); + hdp->hd_resets++; + break; + + case SABM + WAIT_UA: + hd_writeinternal (hdp, UA, pf); + break; + + case DM + ABM: + hd_message (hdp, "DM received: link down"); +#ifdef HDLCDEBUG + hd_dumptrace (hdp); +#endif + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); + hd_flush (hdp->hd_ifp); + case DM + DM_SENT: + case DM + WAIT_SABM: + case DM + WAIT_UA: + hd_writeinternal (hdp, SABM, pf); + hdp->hd_state = SABM_SENT; + SET_TIMER (hdp); + break; + + case DISC + INIT: + case DISC + DM_SENT: + case DISC + SABM_SENT: + /* Note: This is a non-standard state change. */ + hd_writeinternal (hdp, UA, pf); + hd_writeinternal (hdp, SABM, POLLOFF); + hdp->hd_state = SABM_SENT; + SET_TIMER (hdp); + break; + + case DISC + WAIT_UA: + hd_writeinternal (hdp, DM, pf); + SET_TIMER (hdp); + hdp->hd_state = DM_SENT; + break; + + case DISC + ABM: + hd_message (hdp, "DISC received: link down"); + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); + case DISC + WAIT_SABM: + hd_writeinternal (hdp, UA, pf); + hdp->hd_state = DM_SENT; + SET_TIMER (hdp); + break; + + case UA + ABM: + hd_message (hdp, "UA received: link down"); + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); + case UA + WAIT_SABM: + hd_writeinternal (hdp, DM, pf); + hdp->hd_state = DM_SENT; + SET_TIMER (hdp); + break; + + case FRMR + DM_SENT: + hd_writeinternal (hdp, SABM, pf); + hdp->hd_state = SABM_SENT; + SET_TIMER (hdp); + break; + + case FRMR + WAIT_SABM: + hd_writeinternal (hdp, DM, pf); + hdp->hd_state = DM_SENT; + SET_TIMER (hdp); + break; + + case FRMR + ABM: + hd_message (hdp, "FRMR received: link down"); + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); +#ifdef HDLCDEBUG + hd_dumptrace (hdp); +#endif + hd_flush (hdp->hd_ifp); + hd_writeinternal (hdp, SABM, pf); + hdp->hd_state = WAIT_UA; + SET_TIMER (hdp); + break; + + case RR + ABM: + case RNR + ABM: + case REJ + ABM: + process_sframe (hdp, (struct Hdlc_sframe *)frame, frametype); + break; + + case IFRAME + ABM: + queued = process_iframe (hdp, fbuf, (struct Hdlc_iframe *)frame); + break; + + case IFRAME + SABM_SENT: + case RR + SABM_SENT: + case RNR + SABM_SENT: + case REJ + SABM_SENT: + hd_writeinternal (hdp, DM, POLLON); + hdp->hd_state = DM_SENT; + SET_TIMER (hdp); + break; + + case IFRAME + WAIT_SABM: + case RR + WAIT_SABM: + case RNR + WAIT_SABM: + case REJ + WAIT_SABM: + hd_writeinternal (hdp, FRMR, POLLOFF); + SET_TIMER (hdp); + break; + + case ILLEGAL + SABM_SENT: + hdp->hd_unknown++; + hd_writeinternal (hdp, DM, POLLOFF); + hdp->hd_state = DM_SENT; + SET_TIMER (hdp); + break; + + case ILLEGAL + ABM: + hd_message (hdp, "Unknown frame received: link down"); + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); + case ILLEGAL + WAIT_SABM: + hdp->hd_unknown++; +#ifdef HDLCDEBUG + hd_dumptrace (hdp); +#endif + hd_writeinternal (hdp, FRMR, POLLOFF); + hdp->hd_state = WAIT_SABM; + SET_TIMER (hdp); + break; + } + + return (queued); +} + +process_iframe (hdp, fbuf, frame) +register struct hdcb *hdp; +struct mbuf *fbuf; +register struct Hdlc_iframe *frame; +{ + register int nr = frame -> nr, + ns = frame -> ns, + pf = frame -> pf; + register int queued = FALSE; + + /* + * Validate the iframe's N(R) value. It's N(R) value must be in + * sync with our V(S) value and our "last received nr". + */ + + if (valid_nr (hdp, nr, FALSE) == FALSE) { + frame_reject (hdp, Z, frame); + return (queued); + } + + + /* + * This section tests the IFRAME for proper sequence. That is, it's + * sequence number N(S) MUST be equal to V(S). + */ + + if (ns != hdp->hd_vr) { + hdp->hd_invalid_ns++; + if (pf || (hdp->hd_condition & REJ_CONDITION) == 0) { + hdp->hd_condition |= REJ_CONDITION; + /* + * Flush the transmit queue. This is ugly but we + * have no choice. A reject response must be + * immediately sent to the DCE. Failure to do so + * may result in another out of sequence iframe + * arriving (and thus sending another reject) + * before the first reject is transmitted. This + * will cause the DCE to receive two or more + * rejects back to back, which must never happen. + */ + hd_flush (hdp->hd_ifp); + hd_writeinternal (hdp, REJ, pf); + } + return (queued); + } + hdp->hd_condition &= ~REJ_CONDITION; + + /* + * This section finally tests the IFRAME's sequence number against + * the window size (K) and the sequence number of the last frame + * we have acknowledged. If the IFRAME is completely correct then + * it is queued for the packet level. + */ + + if (ns != (hdp -> hd_lasttxnr + hdp -> hd_xcp -> xc_lwsize) % MODULUS) { + hdp -> hd_vr = (hdp -> hd_vr + 1) % MODULUS; + if (pf == 1) { + /* Must generate a RR or RNR with final bit on. */ + hd_writeinternal (hdp, RR, POLLON); + } else + /* + * Hopefully we can piggyback the RR, if not we will generate + * a RR when T3 timer expires. + */ + if (hdp -> hd_rrtimer == 0) + hdp->hd_rrtimer = hd_t3; + + /* Forward iframe to packet level of X.25. */ + fbuf -> m_data += HDHEADERLN; + fbuf -> m_len -= HDHEADERLN; + fbuf -> m_pkthdr.len -= HDHEADERLN; + fbuf -> m_pkthdr.rcvif = (struct ifnet *)hdp -> hd_pkp; +#ifdef BSD4_3 + fbuf->m_act = 0; /* probably not necessary */ +#else + { + register struct mbuf *m; + + for (m = fbuf; m -> m_next; m = m -> m_next) + m -> m_act = (struct mbuf *) 0; + m -> m_act = (struct mbuf *) 1; + } +#endif + pk_input (fbuf); + queued = TRUE; + hd_start (hdp); + } else { + /* + * Here if the remote station has transmitted more iframes then + * the number which have been acknowledged plus K. + */ + hdp->hd_invalid_ns++; + frame_reject (hdp, W, frame); + } + return (queued); +} + +/* + * This routine is used to determine if a value (the middle parameter) + * is between two other values. The low value is the first parameter + * the high value is the last parameter. The routine checks the middle + * value to see if it is within the range of the first and last values. + * The reason we need this routine is the values are modulo some base + * hence a simple test for greater or less than is not sufficient. + */ + +bool +range_check (rear, value, front) +int rear, + value, + front; +{ + register bool result = FALSE; + + if (front > rear) + result = (rear <= value) && (value <= front); + else + result = (rear <= value) || (value <= front); + + return (result); +} + +/* + * This routine handles all the frame reject conditions which can + * arise as a result of secondary processing. The frame reject + * condition Y (frame length error) are handled elsewhere. + */ + +static +frame_reject (hdp, rejectcode, frame) +struct hdcb *hdp; +struct Hdlc_iframe *frame; +{ + register struct Frmr_frame *frmr = &hd_frmr; + + frmr -> frmr_control = ((struct Hdlc_frame *) frame) -> control; + + frmr -> frmr_ns = frame -> ns; + frmr -> frmr_f1_0 = 0; + frmr -> frmr_nr = frame -> nr; + frmr -> frmr_f2_0 = 0; + + frmr -> frmr_0000 = 0; + frmr -> frmr_w = frmr -> frmr_x = frmr -> frmr_y = + frmr -> frmr_z = 0; + switch (rejectcode) { + case Z: + frmr -> frmr_z = 1;/* invalid N(R). */ + break; + + case Y: + frmr -> frmr_y = 1;/* iframe length error. */ + break; + + case X: + frmr -> frmr_x = 1;/* invalid information field. */ + frmr -> frmr_w = 1; + break; + + case W: + frmr -> frmr_w = 1;/* invalid N(S). */ + } + + hd_writeinternal (hdp, FRMR, POLLOFF); + + hdp->hd_state = WAIT_SABM; + SET_TIMER (hdp); +} + +/* + * This procedure is invoked when ever we receive a supervisor + * frame such as RR, RNR and REJ. All processing for these + * frames is done here. + */ + +process_sframe (hdp, frame, frametype) +register struct hdcb *hdp; +register struct Hdlc_sframe *frame; +int frametype; +{ + register int nr = frame -> nr, pf = frame -> pf, pollbit = 0; + + if (valid_nr (hdp, nr, pf) == TRUE) { + switch (frametype) { + case RR: + hdp->hd_condition &= ~REMOTE_RNR_CONDITION; + break; + + case RNR: + hdp->hd_condition |= REMOTE_RNR_CONDITION; + hdp->hd_retxcnt = 0; + break; + + case REJ: + hdp->hd_condition &= ~REMOTE_RNR_CONDITION; + rej_routine (hdp, nr); + } + + if (pf == 1) { + hdp->hd_retxcnt = 0; + hdp->hd_condition &= ~TIMER_RECOVERY_CONDITION; + + if (frametype == RR && hdp->hd_lastrxnr == hdp->hd_vs + && hdp->hd_timer == 0 && hdp->hd_txq.head == 0) + hd_writeinternal(hdp, RR, pf); + else + /* If any iframes have been queued because of the + timer condition, transmit then now. */ + if (hdp->hd_condition & REMOTE_RNR_CONDITION) { + /* Remote is busy or timer condition, so only + send one. */ + if (hdp->hd_vs != hdp->hd_retxqi) + hd_send_iframe (hdp, hdp->hd_retxq[hdp->hd_vs], pollbit); + } + else /* Flush the retransmit list first. */ + while (hdp->hd_vs != hdp->hd_retxqi) + hd_send_iframe (hdp, hdp->hd_retxq[hdp->hd_vs], POLLOFF); + } + + hd_start (hdp); + } else + frame_reject (hdp, Z, (struct Hdlc_iframe *)frame); /* Invalid N(R). */ +} + +/* + * This routine tests the validity of the N(R) which we have received. + * If it is ok, then all the iframes which it acknowledges (if any) + * will be freed. + */ + +bool +valid_nr (hdp, nr, finalbit) +register struct hdcb *hdp; +register int finalbit; +{ + /* Make sure it really does acknowledge something. */ + if (hdp->hd_lastrxnr == nr) + return (TRUE); + + /* + * This section validates the frame's N(R) value. It's N(R) value + * must be in syncronization with our V(S) value and our "last + * received nr" variable. If it is correct then we are able to send + * more IFRAME's, else frame reject condition is entered. + */ + + if (range_check (hdp->hd_lastrxnr, nr, hdp->hd_vs) == FALSE) { + if ((hdp->hd_condition & TIMER_RECOVERY_CONDITION) && + range_check (hdp->hd_vs, nr, hdp->hd_xx) == TRUE) + hdp->hd_vs = nr; + + else { + hdp->hd_invalid_nr++; + return (FALSE); + } + } + + /* + * If we get to here, we do have a valid frame but it might be out + * of sequence. However, we should still accept the receive state + * number N(R) since it has already passed our previous test and it + * does acknowledge frames which we are sending. + */ + + KILL_TIMER (hdp); + free_iframes (hdp, &nr, finalbit);/* Free all acknowledged iframes */ + if (nr != hdp->hd_vs) + SET_TIMER (hdp); + + return (TRUE); +} + +/* + * This routine determines how many iframes need to be retransmitted. + * It then resets the Send State Variable V(S) to accomplish this. + */ + +static +rej_routine (hdp, rejnr) +register struct hdcb *hdp; +register int rejnr; +{ + register int anchor; + + /* + * Flush the output queue. Any iframes queued for + * transmission will be out of sequence. + */ + + hd_flush (hdp->hd_ifp); + + /* + * Determine how many frames should be re-transmitted. In the case + * of a normal REJ this should be 1 to K. In the case of a timer + * recovery REJ (ie. a REJ with the Final Bit on) this could be 0. + */ + + anchor = hdp->hd_vs; + if (hdp->hd_condition & TIMER_RECOVERY_CONDITION) + anchor = hdp->hd_xx; + + anchor = (anchor - rejnr + 8) % MODULUS; + + if (anchor > 0) { + + /* There is at least one iframe to retransmit. */ + KILL_TIMER (hdp); + hdp->hd_vs = rejnr; + + while (hdp->hd_vs != hdp->hd_retxqi) + hd_send_iframe (hdp, hdp->hd_retxq[hdp->hd_vs], POLLOFF); + + } + hd_start (hdp); +} + +/* + * This routine frees iframes from the retransmit queue. It is called + * when a previously written iframe is acknowledged. + */ + +static +free_iframes (hdp, nr, finalbit) +register struct hdcb *hdp; +int *nr; +register int finalbit; + +{ + register int i, k; + + /* + * We need to do the following because of a funny quirk in the + * protocol. This case occures when in Timer recovery condition + * we get a N(R) which acknowledges all the outstanding iframes + * but with the Final Bit off. In this case we need to save the last + * iframe for possible retransmission even though it has already been + * acknowledged! + */ + + if ((hdp->hd_condition & TIMER_RECOVERY_CONDITION) && *nr == hdp->hd_xx && finalbit == 0) { + *nr = (*nr - 1 + 8) % MODULUS; +/* printf ("QUIRK\n"); */ + } + + k = (*nr - hdp->hd_lastrxnr + 8) % MODULUS; + + /* Loop here freeing all acknowledged iframes. */ + for (i = 0; i < k; ++i) { + m_freem (hdp->hd_retxq[hdp->hd_lastrxnr]); + hdp->hd_retxq[hdp->hd_lastrxnr] = 0; + hdp->hd_lastrxnr = (hdp->hd_lastrxnr + 1) % MODULUS; + } + +} diff --git a/bsd/netccitt/hd_output.c b/bsd/netccitt/hd_output.c new file mode 100644 index 000000000..17fd5dedd --- /dev/null +++ b/bsd/netccitt/hd_output.c @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hd_output.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* + * HDLC OUTPUT INTERFACE + * + * This routine is called when the X.25 packet layer output routine + * has a information frame (iframe) to write. It is also called + * by the input and control routines of the HDLC layer. + */ + +hd_output (hdp, m0) +register struct hdcb *hdp; +struct mbuf *m0; +{ + struct x25config *xcp; + register struct mbuf *m = m0; + int len; + + if (m == NULL) + panic ("hd_output"); + if ((m->m_flags & M_PKTHDR) == 0) + panic ("hd_output 2"); + + if (hdp->hd_state != ABM) { + m_freem (m); + return; + } + + /* + * Make room for the hdlc header either by prepending + * another mbuf, or by adjusting the offset and length + * of the first mbuf in the mbuf chain. + */ + + M_PREPEND(m, HDHEADERLN, M_DONTWAIT); + if (m == NULL) + return; + for (len = 0; m; m = m->m_next) + len += m->m_len; + m = m0; + m->m_pkthdr.len = len; + + hd_append (&hdp->hd_txq, m); + hd_start (hdp); +} + +hd_start (hdp) +register struct hdcb *hdp; +{ + register struct mbuf *m; + + /* + * The iframe is only transmitted if all these conditions are FALSE. + * The iframe remains queued (hdp->hd_txq) however and will be + * transmitted as soon as these conditions are cleared. + */ + + while (!(hdp->hd_condition & (TIMER_RECOVERY_CONDITION | REMOTE_RNR_CONDITION | REJ_CONDITION))) { + if (hdp->hd_vs == (hdp->hd_lastrxnr + hdp->hd_xcp->xc_lwsize) % MODULUS) { + + /* We have now exceeded the maximum number of + outstanding iframes. Therefore, we must wait + until at least one is acknowledged if this + condition is not turned off before we are + requested to write another iframe. */ + hdp->hd_window_condition++; + break; + } + + /* hd_remove top iframe from transmit queue. */ + if ((m = hd_remove (&hdp->hd_txq)) == NULL) + break; + + hd_send_iframe (hdp, m, POLLOFF); + } +} + +/* + * This procedure is passed a buffer descriptor for an iframe. It builds + * the rest of the control part of the frame and then writes it out. It + * also starts the acknowledgement timer and keeps the iframe in the + * Retransmit queue (Retxq) just in case we have to do this again. + * + * Note: This routine is also called from hd_input.c when retransmission + * of old frames is required. + */ + +hd_send_iframe (hdp, buf, poll_bit) +register struct hdcb *hdp; +register struct mbuf *buf; +int poll_bit; +{ + register struct Hdlc_iframe *iframe; + struct mbuf *m; + + KILL_TIMER (hdp); + + if (buf == 0) { + printf ("hd_send_iframe: zero arg\n"); +#ifdef HDLCDEBUG + hd_status (hdp); + hd_dumptrace (hdp); +#endif + hdp->hd_vs = (hdp->hd_vs + 7) % MODULUS; + return; + } + iframe = mtod (buf, struct Hdlc_iframe *); + + iframe -> hdlc_0 = 0; + iframe -> nr = hdp->hd_vr; + iframe -> pf = poll_bit; + iframe -> ns = hdp->hd_vs; + iframe -> address = ADDRESS_B; + hdp->hd_lasttxnr = hdp->hd_vr; + hdp->hd_rrtimer = 0; + + if (hdp->hd_vs == hdp->hd_retxqi) { + /* Check for retransmissions. */ + /* Put iframe only once in the Retransmission queue. */ + hdp->hd_retxq[hdp->hd_retxqi] = buf; + hdp->hd_retxqi = (hdp->hd_retxqi + 1) % MODULUS; + hdp->hd_iframes_out++; + } + + hdp->hd_vs = (hdp->hd_vs + 1) % MODULUS; + + hd_trace (hdp, TX, (struct Hdlc_frame *)iframe); + + /* Write buffer on device. */ + m = hdp->hd_dontcopy ? buf : m_copy(buf, 0, (int)M_COPYALL); + if (m == 0) { + printf("hdlc: out of mbufs\n"); + return; + } + (*hdp->hd_output)(hdp, m); + SET_TIMER (hdp); +} + +hd_ifoutput(hdp, m) +register struct mbuf *m; +register struct hdcb *hdp; +{ + /* + * Queue message on interface, and start output if interface + * not yet active. + */ + register struct ifnet *ifp = hdp->hd_ifp; + int s = splimp(); + + if (IF_QFULL(&ifp->if_snd)) { + IF_DROP(&ifp->if_snd); + /* printf("%s%d: HDLC says OK to send but queue full, may hang\n", + ifp->if_name, ifp->if_unit);*/ + m_freem(m); + } else { + IF_ENQUEUE(&ifp->if_snd, m); + if ((ifp->if_flags & IFF_OACTIVE) == 0) + (*ifp->if_start)(ifp); + } + splx(s); +} + + +/* + * This routine gets control when the timer expires because we have not + * received an acknowledgement for a iframe. + */ + +hd_resend_iframe (hdp) +register struct hdcb *hdp; +{ + + if (hdp->hd_retxcnt++ < hd_n2) { + if (!(hdp->hd_condition & TIMER_RECOVERY_CONDITION)) { + hdp->hd_xx = hdp->hd_vs; + hdp->hd_condition |= TIMER_RECOVERY_CONDITION; + } + + hdp->hd_vs = hdp->hd_lastrxnr; + hd_send_iframe (hdp, hdp->hd_retxq[hdp->hd_vs], POLLON); + } else { + /* At this point we have not received a RR even after N2 + retries - attempt to reset link. */ + + hd_initvars (hdp); + hd_writeinternal (hdp, SABM, POLLOFF); + hdp->hd_state = WAIT_UA; + SET_TIMER (hdp); + hd_message (hdp, "Timer recovery failed: link down"); + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); + } +} diff --git a/bsd/netccitt/hd_subr.c b/bsd/netccitt/hd_subr.c new file mode 100644 index 000000000..b65f6bf37 --- /dev/null +++ b/bsd/netccitt/hd_subr.c @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hd_subr.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +hd_init () +{ + + hdintrq.ifq_maxlen = IFQ_MAXLEN; +} + +hd_ctlinput (prc, addr) +int prc; +struct sockaddr *addr; +{ + register struct x25config *xcp = (struct x25config *)addr; + register struct hdcb *hdp; + register struct ifaddr *ifa; + struct ifnet *ifp; + caddr_t pk_newlink(); + + if (addr->sa_family != AF_CCITT) + return (EAFNOSUPPORT); + if (xcp->xc_lptype != HDLCPROTO_LAPB) + return (EPROTONOSUPPORT); + ifa = ifa_ifwithaddr(addr); + if (ifa == 0 || ifa->ifa_addr->sa_family != AF_CCITT || + (ifp = ifa->ifa_ifp) == 0) + panic ("hd_ctlinput"); + for (hdp = hdcbhead; hdp; hdp = hdp->hd_next) + if (hdp->hd_ifp == ifp) + break; + + if (hdp == 0) { /* new interface */ + int error; + int hd_ifoutput(), hd_output(); + + /* an hdcb is now too big to fit in an mbuf */ + MALLOC(hdp, struct hdcb *, sizeof (*hdp), M_PCB, M_NOWAIT); + if (hdp == 0) + return (ENOBUFS); + bzero((caddr_t)hdp, sizeof(*hdp)); + hdp->hd_pkp = + (caddr_t) pk_newlink ((struct x25_ifaddr *) ifa, + (caddr_t) hdp); + ((struct x25_ifaddr *)ifa)->ia_pkcb = + (struct pkcb *) hdp->hd_pkp; + if (hdp -> hd_pkp == 0) { + FREE(hdp, M_PCB); + return (ENOBUFS); + } + hdp->hd_ifp = ifp; + hdp->hd_ifa = ifa; + hdp->hd_xcp = xcp; + hdp->hd_state = INIT; + hdp->hd_output = hd_ifoutput; + hdp->hd_next = hdcbhead; + hdcbhead = hdp; + } else if (hdp->hd_pkp == 0) { /* interface got reconfigured */ + hdp->hd_pkp = + (caddr_t) pk_newlink ((struct x25_ifaddr *) ifa, + (caddr_t) hdp); + ((struct x25_ifaddr *)ifa)->ia_pkcb = + (struct pkcb *) hdp->hd_pkp; + if (hdp -> hd_pkp == 0) { + FREE(hdp, M_PCB); + return (ENOBUFS); + } + } + + switch (prc) { + case PRC_IFUP: + if (xcp->xc_lwsize == 0 || + xcp->xc_lwsize > MAX_WINDOW_SIZE) + xcp->xc_lwsize = MAX_WINDOW_SIZE; + if (hdp->hd_state == INIT) + SET_TIMER (hdp); + break; + + case PRC_IFDOWN: + if (hdp->hd_state == ABM) + hd_message (hdp, "Operator shutdown: link closed"); + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); + + /* fall thru to ... */ + + case PRC_DISCONNECT_REQUEST: + /* drop reference to pkcb --- it's dead meat */ + hdp->hd_pkp = (caddr_t) 0; + ((struct x25_ifaddr *)ifa)->ia_pkcb = (struct pkcb *) 0; + + hd_writeinternal (hdp, DISC, POLLON); + hdp->hd_state = DISC_SENT; + SET_TIMER (hdp); + } + return (0); +} + +hd_initvars (hdp) +register struct hdcb *hdp; +{ + register struct mbuf *m; + register int i; + + /* Clear Transmit queue. */ + while ((m = hd_remove (&hdp->hd_txq)) != NULL) + m_freem (m); + + /* Clear Retransmit queue. */ + i = hdp->hd_lastrxnr; + while (i != hdp->hd_retxqi) { + m_freem (hdp->hd_retxq[i]); + i = (i + 1) % MODULUS; + } + hdp->hd_retxqi = 0; + + hdp->hd_vs = hdp->hd_vr = 0; + hdp->hd_lasttxnr = hdp->hd_lastrxnr = 0; + hdp->hd_rrtimer = 0; + KILL_TIMER(hdp); + hdp->hd_retxcnt = 0; + hdp->hd_condition = 0; +} + +hd_decode (hdp, frame) +register struct hdcb *hdp; +struct Hdlc_frame *frame; +{ + register int frametype = ILLEGAL; + register struct Hdlc_iframe *iframe = (struct Hdlc_iframe *) frame; + register struct Hdlc_sframe *sframe = (struct Hdlc_sframe *) frame; + register struct Hdlc_uframe *uframe = (struct Hdlc_uframe *) frame; + + if (iframe -> hdlc_0 == 0) { + frametype = IFRAME; + hdp->hd_iframes_in++; + } + + else if (sframe -> hdlc_01 == 1) { + /* Supervisory format. */ + switch (sframe -> s2) { + case 0: + frametype = RR; + hdp->hd_rrs_in++; + break; + + case 1: + frametype = RNR; + hdp->hd_rnrs_in++; + break; + + case 2: + frametype = REJ; + hdp->hd_rejs_in++; + } + } + else if (uframe -> hdlc_11 == 3) { + /* Unnumbered format. */ + switch (uframe -> m3) { + case 0: + frametype = DM; + break; + + case 1: + frametype = SABM; + break; + + case 2: + frametype = DISC; + break; + + case 3: + frametype = UA; + break; + + case 4: + frametype = FRMR; + hdp->hd_frmrs_in++; + } + } + return (frametype); +} + +/* + * This routine is called when the HDLC layer internally generates a + * command or response for the remote machine ( eg. RR, UA etc. ). + * Only supervisory or unnumbered frames are processed. + */ + +hd_writeinternal (hdp, frametype, pf) +register struct hdcb *hdp; +register int frametype, pf; +{ + register struct mbuf *buf; + struct Hdlc_frame *frame; + register struct Hdlc_sframe *sframe; + register struct Hdlc_uframe *uframe; + + MGETHDR (buf, M_DONTWAIT, MT_HEADER); + if (buf == 0) + return; + frame = mtod (buf, struct Hdlc_frame *); + sframe = mtod (buf, struct Hdlc_sframe *); + uframe = mtod (buf, struct Hdlc_uframe *); + + /* Assume a response - address structure for DTE */ + frame -> address = ADDRESS_A; + buf -> m_len = 2; + buf -> m_act = buf -> m_next = NULL; + + switch (frametype) { + case RR: + frame -> control = RR_CONTROL; + hdp->hd_rrs_out++; + break; + + case RNR: + frame -> control = RNR_CONTROL; + hdp->hd_rnrs_out++; + break; + + case REJ: + frame -> control = REJ_CONTROL; + hdp->hd_rejs_out++; + break; + + case SABM: + frame -> control = SABM_CONTROL; + frame -> address = ADDRESS_B; + break; + + case DISC: + if ((hdp->hd_ifp->if_flags & IFF_UP) == 0) { + hdp->hd_state = DISCONNECTED; + (void) m_freem (buf); + hd_flush (hdp->hd_ifp); + return; + } + frame -> control = DISC_CONTROL; + frame -> address = ADDRESS_B; + break; + + case DM: + frame -> control = DM_CONTROL; + break; + + case UA: + frame -> control = UA_CONTROL; + break; + + case FRMR: + frame -> control = FRMR_CONTROL; + bcopy ((caddr_t)&hd_frmr, (caddr_t)frame -> info, 3); + buf -> m_len = 5; + hdp->hd_frmrs_out++; + + } + + if (sframe -> hdlc_01 == 1) { + /* Supervisory format - RR, REJ, or RNR. */ + sframe -> nr = hdp->hd_vr; + sframe -> pf = pf; + hdp->hd_lasttxnr = hdp->hd_vr; + hdp->hd_rrtimer = 0; + } + else + uframe -> pf = pf; + + hd_trace (hdp, TX, frame); + buf -> m_pkthdr.len = buf -> m_len; + (*hdp->hd_output) (hdp, buf); +} + +struct mbuf * +hd_remove (q) +struct hdtxq *q; +{ + register struct mbuf *m; + + m = q -> head; + if (m) { + if ((q -> head = m -> m_act) == NULL) + q -> tail = NULL; + m -> m_act = 0; + } + return (m); +} + +hd_append (q, m) +register struct hdtxq *q; +register struct mbuf *m; +{ + + m -> m_act = NULL; + if (q -> tail == NULL) + q -> head = m; + else + q -> tail -> m_act = m; + q -> tail = m; +} + +hd_flush (ifp) +struct ifnet *ifp; +{ + register struct mbuf *m; + register int s; + + while (1) { + s = splimp (); + IF_DEQUEUE (&ifp->if_snd, m); + splx (s); + if (m == 0) + break; + m_freem (m); + } +} + +hd_message (hdp, msg) +struct hdcb *hdp; +char *msg; +{ + char *format_ntn (); + + if (hdcbhead -> hd_next) + printf ("HDLC(%s): %s\n", format_ntn (hdp->hd_xcp), msg); + else + printf ("HDLC: %s\n", msg); +} + +#ifdef HDLCDEBUG +hd_status (hdp) +struct hdcb *hdp; +{ + printf ("HDLC STATUS:\n V(S)=%d, V(R)=%d, retxqi=%d,\n", + hdp->hd_vs, hdp->hd_vr, hdp->hd_retxqi); + + printf ("Last_rx_nr=%d, Last_tx_nr=%d,\n Condition=%d, Xx=%d\n", + hdp->hd_lastrxnr, hdp->hd_lasttxnr, hdp->hd_condition, hdp->hd_xx); +} +#endif diff --git a/bsd/netccitt/hd_timer.c b/bsd/netccitt/hd_timer.c new file mode 100644 index 000000000..17ccb091a --- /dev/null +++ b/bsd/netccitt/hd_timer.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hd_timer.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* + * these can be patched with adb if the + * default values are inappropriate + */ + +int hd_t1 = T1; +int hd_t3 = T3; +int hd_n2 = N2; + +/* + * HDLC TIMER + * + * This routine is called every 500ms by the kernel. Decrement timer by this + * amount - if expired then process the event. + */ + +hd_timer () +{ + register struct hdcb *hdp; + register int s = splimp (); + + for (hdp = hdcbhead; hdp; hdp = hdp->hd_next) { + if (hdp->hd_rrtimer && (--hdp->hd_rrtimer == 0)) { + if (hdp->hd_lasttxnr != hdp->hd_vr) + hd_writeinternal (hdp, RR, POLLOFF); + } + + if (!(hdp->hd_timer && --hdp->hd_timer == 0)) + continue; + + switch (hdp->hd_state) { + case INIT: + case DISC_SENT: + hd_writeinternal (hdp, DISC, POLLON); + break; + + case ABM: + if (hdp->hd_lastrxnr != hdp->hd_vs) { /* XXX */ + hdp->hd_timeouts++; + hd_resend_iframe (hdp); + } + break; + + case WAIT_SABM: + hd_writeinternal (hdp, FRMR, POLLOFF); + if (++hdp->hd_retxcnt == hd_n2) { + hdp->hd_retxcnt = 0; + hd_writeinternal (hdp, SABM, POLLOFF); + hdp->hd_state = WAIT_UA; + } + break; + + case DM_SENT: + if (++hdp->hd_retxcnt == hd_n2) { + /* Notify the packet level. */ + (void) pk_ctlinput (PRC_LINKDOWN, hdp->hd_pkp); + hdp->hd_retxcnt = 0; + hdp->hd_state = SABM_SENT; + hd_writeinternal (hdp, SABM, POLLOFF); + } else + hd_writeinternal (hdp, DM, POLLOFF); + break; + + case WAIT_UA: + if (++hdp->hd_retxcnt == hd_n2) { + hdp->hd_retxcnt = 0; + hd_writeinternal (hdp, DM, POLLOFF); + hdp->hd_state = DM_SENT; + } else + hd_writeinternal (hdp, SABM, POLLOFF); + break; + + case SABM_SENT: + /* Do this indefinitely. */ + hd_writeinternal (hdp, SABM, POLLON); + break; + + case DISCONNECTED: + /* + * Poll the interface driver flags waiting + * for the IFF_UP bit to come on. + */ + if (hdp->hd_ifp->if_flags & IFF_UP) + hdp->hd_state = INIT; + + } + SET_TIMER (hdp); + } + + splx (s); +} diff --git a/bsd/netccitt/hd_var.h b/bsd/netccitt/hd_var.h new file mode 100644 index 000000000..1bbe85a58 --- /dev/null +++ b/bsd/netccitt/hd_var.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hd_var.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * + * hdlc control block + * + */ + +struct hdtxq { + struct mbuf *head; + struct mbuf *tail; +}; + +struct hdcb { + struct hdcb *hd_next; /* pointer to next hdlc control block */ + char hd_state; /* link state */ + char hd_vs; /* send state variable */ + char hd_vr; /* receive state variable */ + char hd_lastrxnr; /* last received N(R) */ + char hd_lasttxnr; /* last transmitted N(R) */ + char hd_condition; +#define TIMER_RECOVERY_CONDITION 0x01 +#define REJ_CONDITION 0x02 +#define REMOTE_RNR_CONDITION 0X04 + char hd_retxcnt; + char hd_xx; + struct hdtxq hd_txq; + struct mbuf *hd_retxq[MODULUS]; + char hd_retxqi; + char hd_rrtimer; + char hd_timer; +#define SET_TIMER(hdp) hdp->hd_timer = hd_t1 +#define KILL_TIMER(hdp) hdp->hd_timer = 0 + char hd_dontcopy; /* if-driver doesn't free I-frames */ + struct ifnet *hd_ifp; /* device's network visible interface */ + struct ifaddr *hd_ifa; /* device's X.25 network address */ + struct x25config *hd_xcp; + caddr_t hd_pkp; /* Level III junk */ + int (*hd_output)(); /* separate entry for HDLC direct output */ + + /* link statistics */ + + long hd_iframes_in; + long hd_iframes_out; + long hd_rrs_in; + long hd_rrs_out; + short hd_rejs_in; + short hd_rejs_out; + long hd_window_condition; + short hd_invalid_ns; + short hd_invalid_nr; + short hd_timeouts; + short hd_resets; + short hd_unknown; + short hd_frmrs_in; + short hd_frmrs_out; + short hd_rnrs_in; + short hd_rnrs_out; +}; + +#ifdef KERNEL +struct hdcb *hdcbhead; /* head of linked list of hdcb's */ +struct Frmr_frame hd_frmr; /* rejected frame diagnostic info */ +struct ifqueue hdintrq; /* hdlc packet input queue */ + +int hd_t1; /* timer T1 value */ +int hd_t3; /* RR send timer */ +int hd_n2; /* frame retransmission limit */ +#endif diff --git a/bsd/netccitt/hdlc.h b/bsd/netccitt/hdlc.h new file mode 100644 index 000000000..2ab17e3d3 --- /dev/null +++ b/bsd/netccitt/hdlc.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)hdlc.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef ORDER4 +#define FALSE 0 +#define TRUE 1 +typedef u_char octet; +typedef char bool; + +/* + * HDLC Packet format definitions + * This will eventually have to be rewritten without reference + * to bit fields, to be compliant with ANSI C and alignment safe. + */ + +#if BYTE_ORDER == BIG_ENDIAN +#define ORDER4(a, b, c, d) a , b , c , d +#define ORDER5(a, b, c, d, e) a , b , c , d , e +#endif + +#if BYTE_ORDER == LITTLE_ENDIAN +#define ORDER4(a, b, c, d) d , c , b , a +#define ORDER5(a, b, c, d, e) e , d , c , b , a +#endif +#endif + +#define MAX_INFO_LEN 4096+3+4 +#define ADDRESS_A 3 /* B'00000011' */ +#define ADDRESS_B 1 /* B'00000001' */ + +struct Hdlc_iframe { + octet address; + octet ORDER4(nr:3, pf:1, ns:3, hdlc_0:1); + octet i_field[MAX_INFO_LEN]; +}; + +struct Hdlc_sframe { + octet address; + octet ORDER4(nr:3, pf:1, s2:2, hdlc_01:2); +}; + +struct Hdlc_uframe { + octet address; + octet ORDER4(m3:3, pf:1, m2:2, hdlc_11:2); +}; + +struct Frmr_frame { + octet address; + octet control; + octet frmr_control; + octet ORDER4(frmr_nr:3, frmr_f1_0:1, frmr_ns:3, frmr_f2_0:1); + octet ORDER5(frmr_0000:4, frmr_z:1, frmr_y:1, frmr_x:1, frmr_w:1); +}; + +#define HDHEADERLN 2 +#define MINFRLN 2 /* Minimum frame length. */ + +struct Hdlc_frame { + octet address; + octet control; + octet info[3]; /* min for FRMR */ +}; + +#define SABM_CONTROL 057 /* B'00101111' */ +#define UA_CONTROL 0143 /* B'01100011' */ +#define DISC_CONTROL 0103 /* B'01000011' */ +#define DM_CONTROL 017 /* B'00001111' */ +#define FRMR_CONTROL 0207 /* B'10000111' */ +#define RR_CONTROL 01 /* B'00000001' */ +#define RNR_CONTROL 05 /* B'00000101' */ +#define REJ_CONTROL 011 /* B'00001001' */ + +#define POLLOFF 0 +#define POLLON 1 + +/* Define Link State constants. */ + +#define INIT 0 +#define DM_SENT 1 +#define SABM_SENT 2 +#define ABM 3 +#define WAIT_SABM 4 +#define WAIT_UA 5 +#define DISC_SENT 6 +#define DISCONNECTED 7 +#define MAXSTATE 8 + +/* The following constants are used in a switch statement to process + frames read from the communications line. */ + +#define SABM 0 * MAXSTATE +#define DM 1 * MAXSTATE +#define DISC 2 * MAXSTATE +#define UA 3 * MAXSTATE +#define FRMR 4 * MAXSTATE +#define RR 5 * MAXSTATE +#define RNR 6 * MAXSTATE +#define REJ 7 * MAXSTATE +#define IFRAME 8 * MAXSTATE +#define ILLEGAL 9 * MAXSTATE + +#define T1 (3 * PR_SLOWHZ) /* IFRAME TIMEOUT - 3 seconds */ +#define T3 (T1 / 2) /* RR generate timeout - 1.5 seconds */ +#define N2 10 +#define MODULUS 8 +#define MAX_WINDOW_SIZE 7 + +#define Z 0 +#define Y 1 +#define X 2 +#define W 3 +#define A 4 + +#define TX 0 +#define RX 1 + +bool range_check (); +bool valid_nr (); +struct mbuf *hd_remove (); diff --git a/bsd/netccitt/if_x25subr.c b/bsd/netccitt/if_x25subr.c new file mode 100644 index 000000000..4edf1b77a --- /dev/null +++ b/bsd/netccitt/if_x25subr.c @@ -0,0 +1,820 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_x25subr.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#if INET +#include +#include +#endif + +#if NS +#include +#include +#endif + +#if ISO +int tp_incoming(); +#include +#include +#include +#endif + +extern struct ifnet loif; +struct llinfo_x25 llinfo_x25 = {&llinfo_x25, &llinfo_x25}; +#ifndef _offsetof +#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m)) +#endif +struct sockaddr *x25_dgram_sockmask; +struct sockaddr_x25 x25_dgmask = { + _offsetof(struct sockaddr_x25, x25_udata[1]), /* _len */ + 0, /* _family */ + 0, /* _net */ + { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, /* _addr */ + {0}, /* opts */ + -1, /* _udlen */ + {-1} /* _udata */ +}; + +struct if_x25stats { + int ifx_wrongplen; + int ifx_nophdr; +} if_x25stats; +int x25_autoconnect = 0; + +#define senderr(x) {error = x; goto bad;} +/* + * Ancillary routines + */ +static struct llinfo_x25 * +x25_lxalloc(rt) +register struct rtentry *rt; +{ + register struct llinfo_x25 *lx; + register struct sockaddr *dst = rt_key(rt); + register struct ifaddr *ifa; + + MALLOC(lx, struct llinfo_x25 *, sizeof (*lx), M_PCB, M_NOWAIT); + if (lx == 0) + return lx; + Bzero(lx, sizeof(*lx)); + lx->lx_rt = rt; + lx->lx_family = dst->sa_family; + RTHOLD(rt); + if (rt->rt_llinfo) + insque(lx, (struct llinfo_x25 *)rt->rt_llinfo); + else { + rt->rt_llinfo = (caddr_t)lx; + insque(lx, &llinfo_x25); + } + for (ifa = rt->rt_ifp->if_addrlist; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family == AF_CCITT) + lx->lx_ia = (struct x25_ifaddr *)ifa; + } + return lx; +} +x25_lxfree(lx) +register struct llinfo_x25 *lx; +{ + register struct rtentry *rt = lx->lx_rt; + register struct pklcd *lcp = lx->lx_lcd; + + if (lcp) { + lcp->lcd_upper = 0; + pk_disconnect(lcp); + } + if ((rt->rt_llinfo == (caddr_t)lx) && (lx->lx_next->lx_rt == rt)) + rt->rt_llinfo = (caddr_t)lx->lx_next; + else + rt->rt_llinfo = 0; + RTFREE(rt); + remque(lx); + FREE(lx, M_PCB); +} +/* + * Process a x25 packet as datagram; + */ +x25_ifinput(lcp, m) +struct pklcd *lcp; +register struct mbuf *m; +{ + struct llinfo_x25 *lx = (struct llinfo_x25 *)lcp->lcd_upnext; + register struct ifnet *ifp; + struct ifqueue *inq; + extern struct timeval time; + int s, len, isr; + + if (m == 0 || lcp->lcd_state != DATA_TRANSFER) { + x25_connect_callback(lcp, 0); + return; + } + pk_flowcontrol(lcp, 0, 1); /* Generate RR */ + ifp = m->m_pkthdr.rcvif; + ifp->if_lastchange = time; + switch (m->m_type) { + default: + if (m) + m_freem(m); + return; + + case MT_DATA: + /* FALLTHROUGH */; + } + switch (lx->lx_family) { +#if INET + case AF_INET: + isr = NETISR_IP; + inq = &ipintrq; + break; + +#endif +#if NS + case AF_NS: + isr = NETISR_NS; + inq = &nsintrq; + break; + +#endif +#if ISO + case AF_ISO: + isr = NETISR_ISO; + inq = &clnlintrq; + break; +#endif + default: + m_freem(m); + ifp->if_noproto++; + return; + } + s = splimp(); + schednetisr(isr); + if (IF_QFULL(inq)) { + IF_DROP(inq); + m_freem(m); + } else { + IF_ENQUEUE(inq, m); + ifp->if_ibytes += m->m_pkthdr.len; + } + splx(s); +} +x25_connect_callback(lcp, m) +register struct pklcd *lcp; +register struct mbuf *m; +{ + register struct llinfo_x25 *lx = (struct llinfo_x25 *)lcp->lcd_upnext; + int do_clear = 1; + if (m == 0) + goto refused; + if (m->m_type != MT_CONTROL) { + printf("x25_connect_callback: should panic\n"); + goto refused; + } + switch (pk_decode(mtod(m, struct x25_packet *))) { + case CALL_ACCEPTED: + lcp->lcd_upper = x25_ifinput; + if (lcp->lcd_sb.sb_mb) + lcp->lcd_send(lcp); /* XXX start queued packets */ + return; + default: + do_clear = 0; + refused: + lcp->lcd_upper = 0; + lx->lx_lcd = 0; + if (do_clear) + pk_disconnect(lcp); + return; + } +} +#define SA(p) ((struct sockaddr *)(p)) +#define RT(p) ((struct rtentry *)(p)) + +x25_dgram_incoming(lcp, m0) +register struct pklcd *lcp; +struct mbuf *m0; +{ + register struct rtentry *rt, *nrt; + register struct mbuf *m = m0->m_next; /* m0 has calling sockaddr_x25 */ + void x25_rtrequest(); + + rt = rtalloc1(SA(&lcp->lcd_faddr), 0); + if (rt == 0) { +refuse: lcp->lcd_upper = 0; + pk_close(lcp); + return; + } + rt->rt_refcnt--; + if ((nrt = RT(rt->rt_llinfo)) == 0 || rt_mask(rt) != x25_dgram_sockmask) + goto refuse; + if ((nrt->rt_flags & RTF_UP) == 0) { + rt->rt_llinfo = (caddr_t)rtalloc1(rt->rt_gateway, 0); + rtfree(nrt); + if ((nrt = RT(rt->rt_llinfo)) == 0) + goto refuse; + nrt->rt_refcnt--; + } + if (nrt->rt_ifa == 0 || nrt->rt_ifa->ifa_rtrequest != x25_rtrequest) + goto refuse; + lcp->lcd_send(lcp); /* confirm call */ + x25_rtattach(lcp, nrt); + m_freem(m); +} + +/* + * X.25 output routine. + */ +x25_ifoutput(ifp, m0, dst, rt) +struct ifnet *ifp; +struct mbuf *m0; +struct sockaddr *dst; +register struct rtentry *rt; +{ + register struct mbuf *m = m0; + register struct llinfo_x25 *lx; + struct pklcd *lcp; + int s, error = 0; + +int plen; +for (plen = 0; m; m = m->m_next) + plen += m->m_len; +m = m0; + + if ((ifp->if_flags & IFF_UP) == 0) + senderr(ENETDOWN); + while (rt == 0 || (rt->rt_flags & RTF_GATEWAY)) { + if (rt) { + if (rt->rt_llinfo) { + rt = (struct rtentry *)rt->rt_llinfo; + continue; + } + dst = rt->rt_gateway; + } + if ((rt = rtalloc1(dst, 1)) == 0) + senderr(EHOSTUNREACH); + rt->rt_refcnt--; + } + /* + * Sanity checks. + */ + if ((rt->rt_ifp != ifp) || + (rt->rt_flags & (RTF_CLONING | RTF_GATEWAY)) || + ((lx = (struct llinfo_x25 *)rt->rt_llinfo) == 0)) { + senderr(ENETUNREACH); + } +if ((m->m_flags & M_PKTHDR) == 0) { + if_x25stats.ifx_nophdr++; + m = m_gethdr(M_NOWAIT, MT_HEADER); + if (m == 0) + senderr(ENOBUFS); + m->m_pkthdr.len = plen; + m->m_next = m0; +} +if (plen != m->m_pkthdr.len) { + if_x25stats.ifx_wrongplen++; + m->m_pkthdr.len = plen; +} +next_circuit: + lcp = lx->lx_lcd; + if (lcp == 0) { + lx->lx_lcd = lcp = pk_attach((struct socket *)0); + if (lcp == 0) + senderr(ENOBUFS); + lcp->lcd_upper = x25_connect_callback; + lcp->lcd_upnext = (caddr_t)lx; + lcp->lcd_packetsize = lx->lx_ia->ia_xc.xc_psize; + lcp->lcd_flags = X25_MBS_HOLD; + } + switch (lcp->lcd_state) { + case READY: + if (dst->sa_family == AF_INET && + ifp->if_type == IFT_X25DDN && + rt->rt_gateway->sa_family != AF_CCITT) + x25_ddnip_to_ccitt(dst, rt); + if (rt->rt_gateway->sa_family != AF_CCITT) { + if ((rt->rt_flags & RTF_XRESOLVE) == 0) + senderr(EHOSTUNREACH); + } else if (x25_autoconnect) + error = pk_connect(lcp, + (struct sockaddr_x25 *)rt->rt_gateway); + if (error) + senderr(error); + /* FALLTHROUGH */ + case SENT_CALL: + case DATA_TRANSFER: + if (sbspace(&lcp->lcd_sb) < 0) { + lx = lx->lx_next; + if (lx->lx_rt != rt) + senderr(ENOSPC); + goto next_circuit; + } + if (lx->lx_ia) + lcp->lcd_dg_timer = + lx->lx_ia->ia_xc.xc_dg_idletimo; + pk_send(lcp, m); + break; + default: + /* + * We count on the timer routine to close idle + * connections, if there are not enough circuits to go + * around. + * + * So throw away data for now. + * After we get it all working, we'll rewrite to handle + * actively closing connections (other than by timers), + * when circuits get tight. + * + * In the DDN case, the imp itself closes connections + * under heavy load. + */ + error = ENOBUFS; + bad: + if (m) + m_freem(m); + } + return (error); +} + +/* + * Simpleminded timer routine. + */ +x25_iftimeout(ifp) +struct ifnet *ifp; +{ + register struct pkcb *pkcb = 0; + register struct pklcd **lcpp, *lcp; + int s = splimp(); + + FOR_ALL_PKCBS(pkcb) + if (pkcb->pk_ia->ia_ifp == ifp) + for (lcpp = pkcb->pk_chan + pkcb->pk_maxlcn; + --lcpp > pkcb->pk_chan;) + if ((lcp = *lcpp) && + lcp->lcd_state == DATA_TRANSFER && + (lcp->lcd_flags & X25_DG_CIRCUIT) && + (lcp->lcd_dg_timer && --lcp->lcd_dg_timer == 0)) { + lcp->lcd_upper(lcp, 0); + } + splx(s); +} +/* + * This routine gets called when validating additions of new routes + * or deletions of old ones. + */ +x25_rtrequest(cmd, rt, dst) +register struct rtentry *rt; +struct sockaddr *dst; +{ + register struct llinfo_x25 *lx = (struct llinfo_x25 *)rt->rt_llinfo; + register struct sockaddr_x25 *sa =(struct sockaddr_x25 *)rt->rt_gateway; + register struct pklcd *lcp; + + /* would put this pk_init, except routing table doesn't + exist yet. */ + if (x25_dgram_sockmask == 0) { + struct radix_node *rn_addmask(); + x25_dgram_sockmask = + SA(rn_addmask((caddr_t)&x25_dgmask, 0, 4)->rn_key); + } + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_llinfo) + RTFREE((struct rtentry *)rt->rt_llinfo); + rt->rt_llinfo = (cmd == RTM_ADD) ? + (caddr_t)rtalloc1(rt->rt_gateway, 1) : 0; + return; + } + if ((rt->rt_flags & RTF_HOST) == 0) + return; + if (cmd == RTM_DELETE) { + while (rt->rt_llinfo) + x25_lxfree((struct llinfo *)rt->rt_llinfo); + x25_rtinvert(RTM_DELETE, rt->rt_gateway, rt); + return; + } + if (lx == 0 && (lx = x25_lxalloc(rt)) == 0) + return; + if ((lcp = lx->lx_lcd) && lcp->lcd_state != READY) { + /* + * This can only happen on a RTM_CHANGE operation + * though cmd will be RTM_ADD. + */ + if (lcp->lcd_ceaddr && + Bcmp(rt->rt_gateway, lcp->lcd_ceaddr, + lcp->lcd_ceaddr->x25_len) != 0) { + x25_rtinvert(RTM_DELETE, lcp->lcd_ceaddr, rt); + lcp->lcd_upper = 0; + pk_disconnect(lcp); + } + lcp = 0; + } + x25_rtinvert(RTM_ADD, rt->rt_gateway, rt); +} + +int x25_dont_rtinvert = 0; + +x25_rtinvert(cmd, sa, rt) +register struct sockaddr *sa; +register struct rtentry *rt; +{ + struct rtentry *rt2 = 0; + /* + * rt_gateway contains PID indicating which proto + * family on the other end, so will be different + * from general host route via X.25. + */ + if (rt->rt_ifp->if_type == IFT_X25DDN || x25_dont_rtinvert) + return; + if (sa->sa_family != AF_CCITT) + return; + if (cmd != RTM_DELETE) { + rtrequest(RTM_ADD, sa, rt_key(rt), x25_dgram_sockmask, + RTF_PROTO2, &rt2); + if (rt2) { + rt2->rt_llinfo = (caddr_t) rt; + RTHOLD(rt); + } + return; + } + rt2 = rt; + if ((rt = rtalloc1(sa, 0)) == 0 || + (rt->rt_flags & RTF_PROTO2) == 0 || + rt->rt_llinfo != (caddr_t)rt2) { + printf("x25_rtchange: inverse route foulup\n"); + return; + } else + rt2->rt_refcnt--; + rtrequest(RTM_DELETE, sa, rt_key(rt2), x25_dgram_sockmask, + 0, (struct rtentry **) 0); +} + +static struct sockaddr_x25 blank_x25 = {sizeof blank_x25, AF_CCITT}; +/* + * IP to X25 address routine copyright ACC, used by permission. + */ +union imp_addr { + struct in_addr ip; + struct imp { + u_char s_net; + u_char s_host; + u_char s_lh; + u_char s_impno; + } imp; +}; + +/* + * The following is totally bogus and here only to preserve + * the IP to X.25 translation. + */ +x25_ddnip_to_ccitt(src, rt) +struct sockaddr_in *src; +register struct rtentry *rt; +{ + register struct sockaddr_x25 *dst = (struct sockaddr_x25 *)rt->rt_gateway; + union imp_addr imp_addr; + int imp_no, imp_port, temp; + char *x25addr = dst->x25_addr; + + + imp_addr.ip = src->sin_addr; + *dst = blank_x25; + if ((imp_addr.imp.s_net & 0x80) == 0x00) { /* class A */ + imp_no = imp_addr.imp.s_impno; + imp_port = imp_addr.imp.s_host; + } else if ((imp_addr.imp.s_net & 0xc0) == 0x80) { /* class B */ + imp_no = imp_addr.imp.s_impno; + imp_port = imp_addr.imp.s_lh; + } else { /* class C */ + imp_no = imp_addr.imp.s_impno / 32; + imp_port = imp_addr.imp.s_impno % 32; + } + + x25addr[0] = 12; /* length */ + /* DNIC is cleared by struct copy above */ + + if (imp_port < 64) { /* Physical: 0000 0 IIIHH00 [SS] *//* s_impno + * -> III, s_host -> HH */ + x25addr[5] = 0; /* set flag bit */ + x25addr[6] = imp_no / 100; + x25addr[7] = (imp_no % 100) / 10; + x25addr[8] = imp_no % 10; + x25addr[9] = imp_port / 10; + x25addr[10] = imp_port % 10; + } else { /* Logical: 0000 1 RRRRR00 [SS] *//* s + * _host * 256 + s_impno -> RRRRR */ + temp = (imp_port << 8) + imp_no; + x25addr[5] = 1; + x25addr[6] = temp / 10000; + x25addr[7] = (temp % 10000) / 1000; + x25addr[8] = (temp % 1000) / 100; + x25addr[9] = (temp % 100) / 10; + x25addr[10] = temp % 10; + } +} + +/* + * This routine is a sketch and is not to be believed!!!!! + * + * This is a utility routine to be called by x25 devices when a + * call request is honored with the intent of starting datagram forwarding. + */ +x25_dg_rtinit(dst, ia, af) +struct sockaddr_x25 *dst; +register struct x25_ifaddr *ia; +{ + struct sockaddr *sa = 0; + struct rtentry *rt; + struct in_addr my_addr; + static struct sockaddr_in sin = {sizeof(sin), AF_INET}; + + if (ia->ia_ifp->if_type == IFT_X25DDN && af == AF_INET) { + /* + * Inverse X25 to IP mapping copyright and courtesy ACC. + */ + int imp_no, imp_port, temp; + union imp_addr imp_addr; + { + /* + * First determine our IP addr for network + */ + register struct in_ifaddr *ina; + extern struct in_ifaddr *in_ifaddr; + + for (ina = in_ifaddr; ina; ina = ina->ia_next) + if (ina->ia_ifp == ia->ia_ifp) { + my_addr = ina->ia_addr.sin_addr; + break; + } + } + { + + register char *x25addr = dst->x25_addr; + + switch (x25addr[5] & 0x0f) { + case 0: /* Physical: 0000 0 IIIHH00 [SS] */ + imp_no = + ((int) (x25addr[6] & 0x0f) * 100) + + ((int) (x25addr[7] & 0x0f) * 10) + + ((int) (x25addr[8] & 0x0f)); + + + imp_port = + ((int) (x25addr[9] & 0x0f) * 10) + + ((int) (x25addr[10] & 0x0f)); + break; + case 1: /* Logical: 0000 1 RRRRR00 [SS] */ + temp = ((int) (x25addr[6] & 0x0f) * 10000) + + ((int) (x25addr[7] & 0x0f) * 1000) + + ((int) (x25addr[8] & 0x0f) * 100) + + ((int) (x25addr[9] & 0x0f) * 10) + + ((int) (x25addr[10] & 0x0f)); + + imp_port = temp >> 8; + imp_no = temp & 0xff; + break; + default: + return (0L); + } + imp_addr.ip = my_addr; + if ((imp_addr.imp.s_net & 0x80) == 0x00) { + /* class A */ + imp_addr.imp.s_host = imp_port; + imp_addr.imp.s_impno = imp_no; + imp_addr.imp.s_lh = 0; + } else if ((imp_addr.imp.s_net & 0xc0) == 0x80) { + /* class B */ + imp_addr.imp.s_lh = imp_port; + imp_addr.imp.s_impno = imp_no; + } else { + /* class C */ + imp_addr.imp.s_impno = (imp_no << 5) + imp_port; + } + } + sin.sin_addr = imp_addr.ip; + sa = (struct sockaddr *)&sin; + } else { + /* + * This uses the X25 routing table to do inverse + * lookup of x25 address to sockaddr. + */ + if (rt = rtalloc1(SA(dst), 0)) { + sa = rt->rt_gateway; + rt->rt_refcnt--; + } + } + /* + * Call to rtalloc1 will create rtentry for reverse path + * to callee by virtue of cloning magic and will allocate + * space for local control block. + */ + if (sa && (rt = rtalloc1(sa, 1))) + rt->rt_refcnt--; +} +int x25_startproto = 1; + +pk_init() +{ + /* + * warning, sizeof (struct sockaddr_x25) > 32, + * but contains no data of interest beyond 32 + */ + if (x25_startproto) { + pk_protolisten(0xcc, 1, x25_dgram_incoming); + pk_protolisten(0x81, 1, x25_dgram_incoming); + } +} + +struct x25_dgproto { + u_char spi; + u_char spilen; + int (*f)(); +} x25_dgprototab[] = { +#if (ISO) && (TPCONS) +{ 0x0, 0, tp_incoming}, +#endif +{ 0xcc, 1, x25_dgram_incoming}, +{ 0xcd, 1, x25_dgram_incoming}, +{ 0x81, 1, x25_dgram_incoming}, +}; + +pk_user_protolisten(info) +register u_char *info; +{ + register struct x25_dgproto *dp = x25_dgprototab + + ((sizeof x25_dgprototab) / (sizeof *dp)); + register struct pklcd *lcp; + + while (dp > x25_dgprototab) + if ((--dp)->spi == info[0]) + goto gotspi; + return ESRCH; + +gotspi: if (info[1]) + return pk_protolisten(dp->spi, dp->spilen, dp->f); + for (lcp = pk_listenhead; lcp; lcp = lcp->lcd_listen) + if (lcp->lcd_laddr.x25_udlen == dp->spilen && + Bcmp(&dp->spi, lcp->lcd_laddr.x25_udata, dp->spilen) == 0) { + pk_disconnect(lcp); + return 0; + } + return ESRCH; +} + +/* + * This routine transfers an X.25 circuit to or from a routing entry. + * If the supplied circuit is * in DATA_TRANSFER state, it is added to the + * routing entry. If freshly allocated, it glues back the vc from + * the rtentry to the socket. + */ +pk_rtattach(so, m0) +register struct socket *so; +struct mbuf *m0; +{ + register struct pklcd *lcp = (struct pklcd *)so->so_pcb; + register struct mbuf *m = m0; + struct sockaddr *dst = mtod(m, struct sockaddr *); + register struct rtentry *rt = rtalloc1(dst, 0); + register struct llinfo_x25 *lx; + caddr_t cp; +#define ROUNDUP(a) \ + ((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long)) +#define transfer_sockbuf(s, f, l) \ + while (m = (s)->sb_mb)\ + {(s)->sb_mb = m->m_act; m->m_act = 0; sbfree((s), m); f(l, m);} + + if (rt) + rt->rt_refcnt--; + cp = (dst->sa_len < m->m_len) ? ROUNDUP(dst->sa_len) + (caddr_t)dst : 0; + while (rt && + ((cp == 0 && rt_mask(rt) != 0) || + (cp != 0 && (rt_mask(rt) == 0 || + Bcmp(cp, rt_mask(rt), rt_mask(rt)->sa_len)) != 0))) + rt = (struct rtentry *)rt->rt_nodes->rn_dupedkey; + if (rt == 0 || (rt->rt_flags & RTF_GATEWAY) || + (lx = (struct llinfo_x25 *)rt->rt_llinfo) == 0) + return ESRCH; + if (lcp == 0) + return ENOTCONN; + switch (lcp->lcd_state) { + default: + return ENOTCONN; + + case READY: + /* Detach VC from rtentry */ + if (lx->lx_lcd == 0) + return ENOTCONN; + lcp->lcd_so = 0; + pk_close(lcp); + lcp = lx->lx_lcd; + if (lx->lx_next->lx_rt == rt) + x25_lxfree(lx); + lcp->lcd_so = so; + lcp->lcd_upper = 0; + lcp->lcd_upnext = 0; + transfer_sockbuf(&lcp->lcd_sb, sbappendrecord, &so->so_snd); + soisconnected(so); + return 0; + + case DATA_TRANSFER: + /* Add VC to rtentry */ + lcp->lcd_so = 0; + lcp->lcd_sb = so->so_snd; /* structure copy */ + bzero((caddr_t)&so->so_snd, sizeof(so->so_snd)); /* XXXXXX */ + so->so_pcb = 0; + x25_rtattach(lcp, rt); + transfer_sockbuf(&so->so_rcv, x25_ifinput, lcp); + soisdisconnected(so); + } + return 0; +} +x25_rtattach(lcp0, rt) +register struct pklcd *lcp0; +struct rtentry *rt; +{ + register struct llinfo_x25 *lx = (struct llinfo_x25 *)rt->rt_llinfo; + register struct pklcd *lcp; + register struct mbuf *m; + if (lcp = lx->lx_lcd) { /* adding an additional VC */ + if (lcp->lcd_state == READY) { + transfer_sockbuf(&lcp->lcd_sb, pk_output, lcp0); + lcp->lcd_upper = 0; + pk_close(lcp); + } else { + lx = x25_lxalloc(rt); + if (lx == 0) + return ENOBUFS; + } + } + lx->lx_lcd = lcp = lcp0; + lcp->lcd_upper = x25_ifinput; + lcp->lcd_upnext = (caddr_t)lx; +} diff --git a/bsd/netccitt/llc_input.c b/bsd/netccitt/llc_input.c new file mode 100644 index 000000000..aacbce91c --- /dev/null +++ b/bsd/netccitt/llc_input.c @@ -0,0 +1,489 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Dirk Husemann, Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Dirk Husemann and the Computer Science Department (IV) of + * the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)llc_input.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +/* + * This module implements LLC as specified by ISO 8802-2. + */ + + +/* + * llcintr() handles all LLC frames (except ISO CLNS ones for the time being) + * and tries to pass them on to the appropriate network layer entity. + */ +void +llcintr() +{ + register struct mbuf *m; + register int i; + register int frame_kind; + register u_char cmdrsp; + struct llc_linkcb *linkp; + struct rtentry *sirt; + struct npaidbentry *sapinfo; + struct sdl_hdr *sdlhdr; + struct llc *frame; + char *c; + long expected_len; + + struct ifnet *ifp; + struct rtentry *llrt; + struct rtentry *nlrt; + + for (;;) { + i = splimp(); + IF_DEQUEUE(&llcintrq, m); + splx(i); + if (m == 0) + break; +#if DIAGNOSTIC + if ((m->m_flags & M_PKTHDR) == 0) + panic("llcintr no HDR"); +#endif + /* + * Get ifp this packet was received on + */ + ifp = m->m_pkthdr.rcvif; + + sdlhdr = mtod(m, struct sdl_hdr *); + + /* + * [Copied from net/ip_input.c] + * + * Check that the amount of data in the buffers is + * at least as much as the LLC header tells us. + * Trim mbufs if longer than expected. + * Drop packets if shorter than we think they are. + * + * Layout of mbuf chain at this point: + * + * +-------------------------------+----+ -\ + * | sockaddr_dl src - sdlhdr_src | 20 | \ + * +-------------------------------+----+ | + * | sockaddr_dl dst - sdlhdr_dst | 20 | > sizeof(struct sdl_hdr) == 44 + * +-------------------------------+----+ | + * | LLC frame len - sdlhdr_len | 04 | / + * +-------------------------------+----+ -/ + * / + * | m_next + * \ + * +----------------------------+----+ -\ + * | llc DSAP | 01 | \ + * +----------------------------+----+ | + * | llc SSAP | 01 | | + * +----------------------------+----+ > sdlhdr_len + * | llc control | 01 | | + * +----------------------------+----+ | + * | ... | | / + * -/ + * + * Thus the we expect to have exactly + * (sdlhdr->sdlhdr_len+sizeof(struct sdl_hdr)) in the mbuf chain + */ + expected_len = sdlhdr->sdlhdr_len + sizeof(struct sdl_hdr); + + if (m->m_pkthdr.len < expected_len) { + m_freem(m); + continue; + } + if (m->m_pkthdr.len > expected_len) { + if (m->m_len == m->m_pkthdr.len) { + m->m_len = expected_len; + m->m_pkthdr.len = expected_len; + } else + m_adj(m, expected_len - m->m_pkthdr.len); + } + + /* + * Get llc header + */ + if (m->m_len > sizeof(struct sdl_hdr)) + frame = mtod((struct mbuf *)((struct sdl_hdr*)(m+1)), + struct llc *); + else frame = mtod(m->m_next, struct llc *); + if (frame == (struct llc *) NULL) + panic("llcintr no llc header"); + + /* + * Now check for bogus I/S frame, i.e. those with a control + * field telling us they're an I/S frame yet their length + * is less than the established I/S frame length (DSAP + SSAP + + * control + N(R)&P/F = 4) --- we drop those suckers + */ + if (((frame->llc_control & 0x03) != 0x03) + && ((expected_len - sizeof(struct sdl_hdr)) < LLC_ISFRAMELEN)) { + m_freem(m); + printf("llc: hurz error\n"); + continue; + } + + /* + * Get link control block for the addressed link connection. + * If there is none we take care of it later on. + */ + cmdrsp = (frame->llc_ssap & 0x01); + frame->llc_ssap &= ~0x01; + if (llrt = rtalloc1((struct sockaddr *)&sdlhdr->sdlhdr_src, 0)) + llrt->rt_refcnt--; +#ifdef notyet + else llrt = npaidb_enter(&sdlhdr->sdlhdr_src, 0, 0, 0); +#endif /* notyet */ + else { + /* + * We cannot do anything currently here as we + * don't `know' this link --- drop it + */ + m_freem(m); + continue; + } + linkp = ((struct npaidbentry *)(llrt->rt_llinfo))->np_link; + nlrt = ((struct npaidbentry *)(llrt->rt_llinfo))->np_rt; + + /* + * If the link is not existing right now, we can try and look up + * the SAP info block. + */ + if ((linkp == 0) && frame->llc_ssap) + sapinfo = llc_getsapinfo(frame->llc_dsap, ifp); + + /* + * Handle XID and TEST frames + * XID: if DLSAP == 0, return type-of-services + * window-0 + * DLSAP-0 + * format-identifier-? + * if DLSAP != 0, locate sapcb and return + * type-of-services + * SAP-window + * format-identifier-? + * TEST: swap (snpah_dst, snpah_src) and return frame + * + * Also toggle the CMD/RESP bit + * + * Is this behaviour correct? Check ISO 8802-2 (90)! + */ + frame_kind = llc_decode(frame, (struct llc_linkcb *)0); + switch(frame_kind) { + case LLCFT_XID: + if (linkp || sapinfo) { + if (linkp) + frame->llc_window = linkp->llcl_window; + else frame->llc_window = sapinfo->si_window; + frame->llc_fid = 9; /* XXX */ + frame->llc_class = sapinfo->si_class; + frame->llc_ssap = frame->llc_dsap; + } else { + frame->llc_window = 0; + frame->llc_fid = 9; + frame->llc_class = 1; + frame->llc_dsap = frame->llc_ssap = 0; + } + + /* fall thru to */ + case LLCFT_TEST: + sdl_swapaddr(&(mtod(m, struct sdl_hdr *)->sdlhdr_dst), + &(mtod(m, struct sdl_hdr *)->sdlhdr_src)); + + /* Now set the CMD/RESP bit */ + frame->llc_ssap |= (cmdrsp == 0x0 ? 0x1 : 0x0); + + /* Ship it out again */ + (*ifp->if_output)(ifp, m, + (struct sockaddr *) &(mtod(m, struct sdl_hdr *)->sdlhdr_dst), + (struct rtentry *) 0); + continue; + } + + /* + * Create link control block in case it is not existing + */ + if (linkp == 0 && sapinfo) { + if ((linkp = llc_newlink(&sdlhdr->sdlhdr_src, ifp, nlrt, + (nlrt == 0) ? 0 : nlrt->rt_llinfo, + llrt)) == 0) { + printf("llcintr: couldn't create new link\n"); + m_freem(m); + continue; + } + ((struct npaidbentry *)llrt->rt_llinfo)->np_link = linkp; + } else if (linkp == 0) { + /* The link is not known to us, drop the frame and continue */ + m_freem(m); + continue; + } + + /* + * Drop SNPA header and get rid of empty mbuf at the + * front of the mbuf chain (I don't like 'em) + */ + m_adj(m, sizeof(struct sdl_hdr)); + /* + * LLC_UFRAMELEN is sufficient, m_pullup() will pull up + * the min(m->m_len, maxprotohdr_len [=40]) thus doing + * the trick ... + */ + if ((m = m_pullup(m, LLC_UFRAMELEN))) + /* + * Pass it on thru the elements of procedure + */ + llc_input(linkp, m, cmdrsp); + } + return; +} + +/* + * llc_input() --- We deal with the various incoming frames here. + * Basically we (indirectly) call the appropriate + * state handler function that's pointed to by + * llcl_statehandler. + * + * The statehandler returns an action code --- + * further actions like + * o notify network layer + * o block further sending + * o deblock link + * o ... + * are then enacted accordingly. + */ +llc_input(struct llc_linkcb *linkp, struct mbuf *m, u_char cmdrsp) +{ + int frame_kind; + int pollfinal; + int action = 0; + struct llc *frame; + struct ifnet *ifp = linkp->llcl_if; + + if ((frame = mtod(m, struct llc *)) == (struct llc *) 0) { + m_freem(m); + return 0; + } + pollfinal = ((frame->llc_control & 0x03) == 0x03) ? + LLCGBITS(frame->llc_control, u_pf) : + LLCGBITS(frame->llc_control_ext, s_pf); + + /* + * first decode the frame + */ + frame_kind = llc_decode(frame, linkp); + + switch (action = llc_statehandler(linkp, frame, frame_kind, cmdrsp, + pollfinal)) { + case LLC_DATA_INDICATION: + m_adj(m, LLC_ISFRAMELEN); + if (m = m_pullup(m, NLHDRSIZEGUESS)) { + m->m_pkthdr.rcvif = (struct ifnet *)linkp->llcl_nlnext; + (*linkp->llcl_sapinfo->si_input)(m); + } + break; + } + + /* release mbuf if not an info frame */ + if (action != LLC_DATA_INDICATION && m) + m_freem(m); + + /* try to get frames out ... */ + llc_start(linkp); + + return 0; +} + +/* + * This routine is called by configuration setup. It sets up a station control + * block and notifies all registered upper level protocols. + */ +caddr_t +llc_ctlinput(int prc, struct sockaddr *addr, caddr_t info) +{ + struct ifnet *ifp; + struct ifaddr *ifa; + struct dll_ctlinfo *ctlinfo = (struct dll_ctlinfo *)info; + u_char sap; + struct dllconfig *config; + caddr_t pcb; + struct rtentry *nlrt; + struct rtentry *llrt; + struct llc_linkcb *linkp; + register int i; + + /* info must point to something valid at all times */ + if (info == 0) + return 0; + + if (prc == PRC_IFUP || prc == PRC_IFDOWN) { + /* we use either this set ... */ + ifa = ifa_ifwithaddr(addr); + ifp = ifa ? ifa->ifa_ifp : 0; + if (ifp == 0) + return 0; + + sap = ctlinfo->dlcti_lsap; + config = ctlinfo->dlcti_cfg; + pcb = (caddr_t) 0; + nlrt = (struct rtentry *) 0; + } else { + /* or this one */ + sap = 0; + config = (struct dllconfig *) 0; + pcb = ctlinfo->dlcti_pcb; + nlrt = ctlinfo->dlcti_rt; + + if ((llrt = rtalloc1(nlrt->rt_gateway, 0))) + llrt->rt_refcnt--; + else return 0; + + linkp = ((struct npaidbentry *)llrt->rt_llinfo)->np_link; + } + + switch (prc) { + case PRC_IFUP: + (void) llc_setsapinfo(ifp, addr->sa_family, sap, config); + return 0; + + case PRC_IFDOWN: { + register struct llc_linkcb *linkp; + register struct llc_linkcb *nlinkp; + register int i; + + /* + * All links are accessible over the doubly linked list llccb_q + */ + if (!LQEMPTY) { + /* + * A for-loop is not that great an idea as the linkp + * will get deleted by llc_timer() + */ + linkp = LQFIRST; + while (LQVALID(linkp)) { + nlinkp = LQNEXT(linkp); + if (linkp->llcl_if = ifp) { + i = splimp(); + (void)llc_statehandler(linkp, (struct llc *)0, + NL_DISCONNECT_REQUEST, + 0, 1); + splx(i); + } + linkp = nlinkp; + } + } + } + + case PRC_CONNECT_REQUEST: + if (linkp == 0) { + if ((linkp = llc_newlink((struct sockaddr_dl *) nlrt->rt_gateway, + nlrt->rt_ifp, nlrt, + pcb, llrt)) == 0) + return (0); + ((struct npaidbentry *)llrt->rt_llinfo)->np_link = linkp; + i = splimp(); + (void)llc_statehandler(linkp, (struct llc *) 0, + NL_CONNECT_REQUEST, 0, 1); + splx(i); + } + return ((caddr_t)linkp); + + case PRC_DISCONNECT_REQUEST: + if (linkp == 0) + panic("no link control block!"); + + i = splimp(); + (void)llc_statehandler(linkp, (struct llc *) 0, + NL_DISCONNECT_REQUEST, 0, 1); + splx(i); + + /* + * The actual removal of the link control block is done by the + * cleaning neutrum (i.e. llc_timer()). + */ + break; + + case PRC_RESET_REQUEST: + if (linkp == 0) + panic("no link control block!"); + + i = splimp(); + (void)llc_statehandler(linkp, (struct llc *) 0, + NL_RESET_REQUEST, 0, 1); + splx(i); + + break; + + } + + return 0; +} diff --git a/bsd/netccitt/llc_output.c b/bsd/netccitt/llc_output.c new file mode 100644 index 000000000..951429296 --- /dev/null +++ b/bsd/netccitt/llc_output.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Dirk Husemann, Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Dirk Husemann and the Computer Science Department (IV) of + * the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)llc_output.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +/* + * llc_output() --- called by an upper layer (network layer) entity whenever + * there is an INFO frame to be transmitted. We enqueue the + * info frame and call llc_start() to do the actual sending. + */ + +llc_output(struct llc_linkcb *linkp, struct mbuf *m) +{ + register int i; + + i = splimp(); + LLC_ENQUEUE(linkp, m); + llc_start(linkp); + splx(i); + +} + + +/* + * llc_start() --- We try to subsequently dequeue all the frames available and + * send them out. + */ +void +llc_start(struct llc_linkcb *linkp) +{ + register int i; + register struct mbuf *m; + int action; + + while ((LLC_STATEEQ(linkp, NORMAL) || LLC_STATEEQ(linkp, BUSY) || + LLC_STATEEQ(linkp, REJECT)) && + (linkp->llcl_slotsfree > 0) && + (LLC_GETFLAG(linkp, REMOTE_BUSY) == 0)) { + LLC_DEQUEUE(linkp, m); + if (m == NULL) + break; + LLC_SETFRAME(linkp, m); + (void)llc_statehandler(linkp, (struct llc *) 0, NL_DATA_REQUEST, + 0, 0); + } +} + + +/* + * llc_send() --- Handles single frames. If dealing with INFO frames we need to + * prepend the LLC header, otherwise we just allocate an mbuf. + * In both cases the actual send is done by llc_rawsend(). + */ +llc_send(struct llc_linkcb *linkp, int frame_kind, int cmdrsp, int pollfinal) +{ + register struct mbuf *m = (struct mbuf *)0; + register struct llc *frame; + + if (frame_kind == LLCFT_INFO) + m = linkp->llcl_output_buffers[llc_seq2slot(linkp, + linkp->llcl_vs)]; + LLC_GETHDR(frame, m); + + /* pass it on to llc_rawsend() */ + llc_rawsend(linkp, m, frame, frame_kind, linkp->llcl_vs, cmdrsp, pollfinal); + + if (frame_kind == LLCFT_INFO) + LLC_INC(linkp->llcl_vs); + + return 0; +} + +/* + * llc_resend() --- llc_resend() retransmits all unacknowledged INFO frames. + */ +llc_resend(struct llc_linkcb *linkp, int cmdrsp, int pollfinal) +{ + register struct llc *frame; + register struct mbuf *m; + register int seq, slot; + + if (linkp->llcl_slotsfree < linkp->llcl_window) + /* assert lock between nr_received & V(S) */ + if (linkp->llcl_nr_received != linkp->llcl_vs) + panic("llc: V(S) != N(R) received\n"); + + for (slot = llc_seq2slot(linkp, linkp->llcl_vs); + slot != linkp->llcl_freeslot; + LLC_INC(linkp->llcl_vs), + slot = llc_seq2slot(linkp, linkp->llcl_vs)) { + m = linkp->llcl_output_buffers[slot]; + LLC_GETHDR(frame, m); + llc_rawsend(linkp, m, frame, LLCFT_INFO, linkp->llcl_vs, + cmdrsp, pollfinal); + pollfinal = 0; + } + + return 0; +} + +/* + * llc_rawsend() --- constructs an LLC frame and sends it out via the + * associated interface of the link control block. + * + * We need to make sure that outgoing frames have the correct length, + * in particular the 4 byte ones (RR, RNR, REJ) as LLC_GETHDR() will + * set the mbuf len to 3 as default len for non INFO frames ... + * + * Frame kind Length (w/o MAC header, {D,S}SAP incl.) + * -------------------------------------------------------------- + * DISC, SABME, UA, DM 3 bytes ({D,S}SAP + CONTROL) + * RR, RNR, REJ 4 bytes ({D,S}SAP + CONTROL0 + CONTROL1) + * XID 6 bytes ({D,S}SAP + CONTROL0 + FI,CLASS,WINDOW) + * FRMR 7 bytes ({D,S}SAP + CONTROL0 + REJ CONTROL,V(S),V(R),CAUSE) + * INFO 4 -- MTU + * UI, TEST 3 -- MTU + * + */ +#define LLC_SETLEN(m, l) (m)->m_pkthdr.len = (m)->m_len = (l) + +llc_rawsend(struct llc_linkcb *linkp, struct mbuf *m, struct llc *frame, + int frame_kind, int vs, int cmdrsp, int pollfinal) +{ + register short adjust = LLC_UFRAMELEN; + struct ifnet *ifp; + + switch (frame_kind) { + /* supervisory and information frames */ + case LLCFT_INFO: + frame->llc_control = LLC_INFO; + LLCSBITS(frame->llc_control, i_ns, vs); + LLCSBITS(frame->llc_control_ext, i_nr, linkp->llcl_vr); + adjust = LLC_ISFRAMELEN; + break; + case LLCFT_RR: + frame->llc_control = LLC_RR; + LLC_SETLEN(m, LLC_ISFRAMELEN); + LLCSBITS(frame->llc_control_ext, s_nr, linkp->llcl_vr); + adjust = LLC_ISFRAMELEN; + break; + case LLCFT_RNR: + frame->llc_control = LLC_RNR; + LLC_SETLEN(m, LLC_ISFRAMELEN); + LLCSBITS(frame->llc_control_ext, s_nr, linkp->llcl_vr); + adjust = LLC_ISFRAMELEN; + break; + case LLCFT_REJ: + frame->llc_control = LLC_REJ; + LLC_SETLEN(m, LLC_ISFRAMELEN); + LLCSBITS(frame->llc_control_ext, s_nr, linkp->llcl_vr); + adjust = LLC_ISFRAMELEN; + break; + /* unnumbered frames */ + case LLCFT_DM: + frame->llc_control = LLC_DM; + break; + case LLCFT_SABME: + frame->llc_control = LLC_SABME; + break; + case LLCFT_DISC: + frame->llc_control = LLC_DISC; + break; + case LLCFT_UA: + frame->llc_control = LLC_UA; + break; + case LLCFT_UI: + frame->llc_control = LLC_UI; + break; + case LLCFT_FRMR: + frame->llc_control = LLC_FRMR; + /* get more space --- FRMR frame are longer then usual */ + LLC_SETLEN(m, LLC_FRMRLEN); + bcopy((caddr_t) &linkp->llcl_frmrinfo, + (caddr_t) &frame->llc_frmrinfo, + sizeof(struct frmrinfo)); + break; + default: + /* + * We don't send {XID, TEST} frames + */ + if (m) + m_freem(m); + return; + } + + /* + * Fill in DSAP/SSAP + */ + frame->llc_dsap = frame->llc_ssap = LLSAPADDR(&linkp->llcl_addr); + frame->llc_ssap |= cmdrsp; + + /* + * Check for delayed action pending. ISO 8802-2, 7.9.2 (5) + * and ISO 8802-2, 7.9.2.3 (32), (34), (36) pertain to this + * piece of code --- hopefully we got it right here (i.e. + * in the spirit of (32), (34), and (36) ... + */ + switch (frame_kind) { + case LLCFT_RR: + case LLCFT_RNR: + case LLCFT_REJ: + case LLCFT_INFO: + switch (LLC_GETFLAG(linkp, DACTION)) { + case LLC_DACKCMD: + case LLC_DACKRSP: + LLC_STOPTIMER(linkp, DACTION); + break; + case LLC_DACKCMDPOLL: + if (cmdrsp == LLC_CMD) { + pollfinal = 1; + LLC_STOPTIMER(linkp, DACTION); + } + break; + case LLC_DACKRSPFINAL: + if (cmdrsp == LLC_RSP) { + pollfinal = 1; + LLC_STOPTIMER(linkp, DACTION); + } + break; + } + break; + } + + if (adjust == LLC_UFRAMELEN) + LLCSBITS(frame->llc_control, u_pf, pollfinal); + else LLCSBITS(frame->llc_control_ext, s_pf, pollfinal); + + /* + * Get interface to send frame onto + */ + ifp = linkp->llcl_if; + if (frame_kind == LLCFT_INFO) { + /* + * send out a copy of the frame, retain the + * original + */ + (*ifp->if_output)(ifp, m_copy(m, 0, (int)M_COPYALL), + rt_key(linkp->llcl_nlrt), + linkp->llcl_nlrt); + /* + * Account for the LLC header and let it ``disappear'' + * as the raw info frame payload is what we hold in + * the output_buffers of the link. + */ + m_adj(m, LLC_ISFRAMELEN); + } else (*ifp->if_output)(ifp, m, + rt_key(linkp->llcl_nlrt), + linkp->llcl_nlrt); +} + diff --git a/bsd/netccitt/llc_subr.c b/bsd/netccitt/llc_subr.c new file mode 100644 index 000000000..c8c56d1cf --- /dev/null +++ b/bsd/netccitt/llc_subr.c @@ -0,0 +1,2379 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Dirk Husemann, Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Dirk Husemann and the Computer Science Department (IV) of + * the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)llc_subr.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +/* + * Frame names for diagnostic messages + */ +char *frame_names[] = { "INFO", "RR", "RNR", "REJ", "DM", "SABME", "DISC", + "UA", "FRMR", "UI", "XID", "TEST", "ILLEGAL", "TIMER", "N2xT1"}; + + +/* + * Trace level + */ +int llc_tracelevel = LLCTR_URGENT; + +/* + * Values for accessing various bitfields + */ +struct bitslice llc_bitslice[] = { +/* mask, shift value */ + { 0x1, 0x0 }, + { 0xfe, 0x1 }, + { 0x3, 0x0 }, + { 0xc, 0x2 }, + { 0x10, 0x4 }, + { 0xe0, 0x5 }, + { 0x1f, 0x0 } +}; + +/* + * We keep the link control blocks on a doubly linked list - + * primarily for checking in llc_time() + */ + +struct llccb_q llccb_q = { &llccb_q, &llccb_q }; + +/* + * Flag for signalling wether route tree for AF_LINK has been + * initialized yet. + */ + +int af_link_rts_init_done = 0; + + +/* + * Functions dealing with struct sockaddr_dl */ + +/* Compare sdl_a w/ sdl_b */ + +sdl_cmp(struct sockaddr_dl *sdl_a, struct sockaddr_dl *sdl_b) +{ + if (LLADDRLEN(sdl_a) != LLADDRLEN(sdl_b)) + return(1); + return(bcmp((caddr_t) sdl_a->sdl_data, (caddr_t) sdl_b->sdl_data, + LLADDRLEN(sdl_a))); +} + +/* Copy sdl_f to sdl_t */ + +sdl_copy(struct sockaddr_dl *sdl_f, struct sockaddr_dl *sdl_t) +{ + bcopy((caddr_t) sdl_f, (caddr_t) sdl_t, sdl_f->sdl_len); +} + +/* Swap sdl_a w/ sdl_b */ + +sdl_swapaddr(struct sockaddr_dl *sdl_a, struct sockaddr_dl *sdl_b) +{ + struct sockaddr_dl sdl_tmp; + + sdl_copy(sdl_a, &sdl_tmp); + sdl_copy(sdl_b, sdl_a); + sdl_copy(&sdl_tmp, sdl_b); +} + +/* Fetch the sdl of the associated if */ + +struct sockaddr_dl * +sdl_getaddrif(struct ifnet *ifp) +{ + register struct ifaddr *ifa; + + for(ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) + if (ifa->ifa_addr->sa_family == AF_LINK ) + return((struct sockaddr_dl *)(ifa->ifa_addr)); + + return((struct sockaddr_dl *)0); +} + +/* Check addr of interface with the one given */ + +sdl_checkaddrif(struct ifnet *ifp, struct sockaddr_dl *sdl_c) +{ + register struct ifaddr *ifa; + + for(ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) + if ((ifa->ifa_addr->sa_family == AF_LINK ) && + !sdl_cmp((struct sockaddr_dl *)(ifa->ifa_addr), sdl_c)) + return(1); + + return(0); +} + +/* Build an sdl from MAC addr, DLSAP addr, and interface */ + +sdl_setaddrif(struct ifnet *ifp, u_char *mac_addr, u_char dlsap_addr, + u_char mac_len, struct sockaddr_dl *sdl_to) +{ + register struct sockaddr_dl *sdl_tmp; + + if ((sdl_tmp = sdl_getaddrif(ifp)) ) { + sdl_copy(sdl_tmp, sdl_to); + bcopy((caddr_t) mac_addr, (caddr_t) LLADDR(sdl_to), mac_len); + *(LLADDR(sdl_to)+mac_len) = dlsap_addr; + sdl_to->sdl_alen = mac_len+1; + return(1); + } else return(0); +} + +/* Fill out the sdl header aggregate */ + +sdl_sethdrif(struct ifnet *ifp, u_char *mac_src, u_char dlsap_src, u_char *mac_dst, + u_char dlsap_dst, u_char mac_len, struct sdl_hdr *sdlhdr_to) +{ + if ( !sdl_setaddrif(ifp, mac_src, dlsap_src, mac_len, + &sdlhdr_to->sdlhdr_src) || + !sdl_setaddrif(ifp, mac_dst, dlsap_dst, mac_len, + &sdlhdr_to->sdlhdr_dst) ) + return(0); + else return(1); +} + +static struct sockaddr_dl sap_saddr; +static struct sockaddr_dl sap_sgate = { + sizeof(struct sockaddr_dl), /* _len */ + AF_LINK /* _af */ +}; + +/* + * Set sapinfo for SAP address, llcconfig, af, and interface + */ +struct npaidbentry * +llc_setsapinfo(struct ifnet *ifp, u_char af, u_char sap, struct dllconfig *llconf) +{ + struct protosw *pp; + struct sockaddr_dl *ifdl_addr; + struct rtentry *sirt = (struct rtentry *)0; + struct npaidbentry *sapinfo; + u_char saploc; + int size = sizeof(struct npaidbentry); + + USES_AF_LINK_RTS; + + /* + * We rely/assume that only STREAM protocols will make use of + * connection oriented LLC2. If this will one day not be the + * case this will obviously fail. + */ + pp = pffindtype (af, SOCK_STREAM); + if (pp == 0 || pp->pr_input == 0 || pp->pr_ctlinput == 0) { + printf("network level protosw error"); + return 0; + } + + /* + * We need a way to jot down the LLC2 configuration for + * a certain LSAP address. To do this we enter + * a "route" for the SAP. + */ + ifdl_addr = sdl_getaddrif(ifp); + sdl_copy(ifdl_addr, &sap_saddr); + sdl_copy(ifdl_addr, &sap_sgate); + saploc = LLSAPLOC(&sap_saddr, ifp); + sap_saddr.sdl_data[saploc] = sap; + sap_saddr.sdl_alen++; + + /* now enter it */ + rtrequest(RTM_ADD, (struct sockaddr *)&sap_saddr, + (struct sockaddr *)&sap_sgate, 0, 0, &sirt); + if (sirt == 0) + return 0; + + /* Plug in config information in rt->rt_llinfo */ + +// sirt->rt_llinfo = malloc(size , M_PCB, M_WAITOK); + MALLOC(sirt->rt_llinfo, caddr_t, size, M_PCB, M_WAITOK); + sapinfo = (struct npaidbentry *) sirt->rt_llinfo; + if (sapinfo) { + bzero ((caddr_t)sapinfo, size); + /* + * For the time being we support LLC CLASS II here + * only + */ + sapinfo->si_class = LLC_CLASS_II; + sapinfo->si_window = llconf->dllcfg_window; + sapinfo->si_trace = llconf->dllcfg_trace; + if (sapinfo->si_trace) + llc_tracelevel--; + else llc_tracelevel++; + sapinfo->si_input = pp->pr_input; + sapinfo->si_ctlinput = (caddr_t (*)())pp->pr_ctlinput; + + return (sapinfo); + } + + return 0; +} + +/* + * Get sapinfo for SAP address and interface + */ +struct npaidbentry * +llc_getsapinfo(u_char sap, struct ifnet *ifp) +{ + struct sockaddr_dl *ifdl_addr; + struct sockaddr_dl si_addr; + struct rtentry *sirt; + u_char saploc; + + USES_AF_LINK_RTS; + + ifdl_addr = sdl_getaddrif(ifp); + sdl_copy(ifdl_addr, &si_addr); + saploc = LLSAPLOC(&si_addr, ifp); + si_addr.sdl_data[saploc] = sap; + si_addr.sdl_alen++; + + if ((sirt = rtalloc1((struct sockaddr *)&si_addr, 0))) + sirt->rt_refcnt--; + else return(0); + + return((struct npaidbentry *)sirt->rt_llinfo); +} + +/* + * llc_seq2slot() --- We only allocate enough memory to hold the window. This + * introduces the necessity to keep track of two ``pointers'' + * + * o llcl_freeslot the next free slot to be used + * this one advances modulo llcl_window + * o llcl_projvs the V(S) associated with the next frame + * to be set via llcl_freeslot + * this one advances modulo LLC_MAX_SEQUENCE + * + * A new frame is inserted at llcl_output_buffers[llcl_freeslot], after + * which both llcl_freeslot and llcl_projvs are incremented. + * + * The slot sl(sn) for any given sequence number sn is given by + * + * sl(sn) = (llcl_freeslot + llcl_window - 1 - (llcl_projvs + + * LLC_MAX_SEQUENCE- sn) % LLC_MAX_SEQUENCE) % + * llcl_window + * + * i.e. we first calculate the number of frames we need to ``go back'' + * from the current one (really the next one, but that doesn't matter as + * llcl_projvs is likewise of by plus one) and subtract that from the + * pointer to the most recently taken frame (llcl_freeslot - 1). + */ + +short +llc_seq2slot(struct llc_linkcb *linkp, short seqn) +{ + register sn = 0; + + sn = (linkp->llcl_freeslot + linkp->llcl_window - + (linkp->llcl_projvs + LLC_MAX_SEQUENCE - seqn) % + LLC_MAX_SEQUENCE) % linkp->llcl_window; + + return sn; +} + +/* + * LLC2 link state handler + * + * There is in most cases one function per LLC2 state. The LLC2 standard + * ISO 8802-2 allows in some cases for ambiguities, i.e. we have the choice + * to do one thing or the other. Right now I have just chosen one but have also + * indicated the spot by "multiple possibilities". One could make the behavior + * in those cases configurable, allowing the superuser to enter a profile word + * (32/64 bits, whatever is needed) that would suit her needs [I quite like + * that idea, perhaps I'll get around to it]. + * + * [Preceeding each state handler function is the description as taken from + * ISO 8802-2, section 7.9.2.1] + */ + +/* + * ADM --- The connection component is in the asynchronous disconnected mode. + * It can accept an SABME PDU from a remote LLC SSAP or, at the request + * of the service access point user, can initiate an SABME PDU + * transmission to a remote LLC DSAP, to establish a data link + * connection. It also responds to a DISC command PDU and to any + * command PDU with the P bit set to ``1''. + */ +int +llc_state_ADM(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case NL_CONNECT_REQUEST: + llc_send(linkp, LLCFT_SABME, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_SETFLAG(linkp, S, 0); + linkp->llcl_retry = 0; + LLC_NEWSTATE(linkp, SETUP); + break; + case LLCFT_SABME + LLC_CMD: + /* + * ISO 8802-2, table 7-1, ADM state says to set + * the P flag, yet this will cause an SABME [P] to be + * answered with an UA only, not an UA [F], all + * other `disconnected' states set the F flag, so ... + */ + LLC_SETFLAG(linkp, F, pollfinal); + LLC_NEWSTATE(linkp, CONN); + action = LLC_CONNECT_INDICATION; + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_DM, LLC_RSP, pollfinal); + break; + default: + if (cmdrsp == LLC_CMD && pollfinal == 1) + llc_send(linkp, LLCFT_DM, LLC_RSP, 1); + /* remain in ADM state */ + } + + return action; +} + +/* + * CONN --- The local connection component has received an SABME PDU from a + * remote LLC SSAP, and it is waiting for the local user to accept or + * refuse the connection. + */ +int +llc_state_CONN(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case NL_CONNECT_RESPONSE: + llc_send(linkp, LLCFT_UA, LLC_RSP, LLC_GETFLAG(linkp, F)); + LLC_RESETCOUNTER(linkp); + LLC_SETFLAG(linkp, P, 0); + LLC_SETFLAG(linkp, REMOTE_BUSY, 0); + LLC_NEWSTATE(linkp, NORMAL); + break; + case NL_DISCONNECT_REQUEST: + llc_send(linkp, LLCFT_DM, LLC_RSP, LLC_GETFLAG(linkp, F)); + LLC_NEWSTATE(linkp, ADM); + break; + case LLCFT_SABME + LLC_CMD: + LLC_SETFLAG(linkp, F, pollfinal); + break; + case LLCFT_DM + LLC_RSP: + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + /* all other frames effect nothing here */ + } + + return action; +} + +/* + * RESET_WAIT --- The local connection component is waiting for the local user + * to indicate a RESET_REQUEST or a DISCONNECT_REQUEST. + */ +int +llc_state_RESET_WAIT(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case NL_RESET_REQUEST: + if (LLC_GETFLAG(linkp, S) == 0) { + llc_send(linkp, LLCFT_SABME, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry = 0; + LLC_NEWSTATE(linkp, RESET); + } else { + llc_send(linkp, LLCFT_UA, LLC_RSP, + LLC_GETFLAG(linkp, F)); + LLC_RESETCOUNTER(linkp); + LLC_SETFLAG(linkp, P, 0); + LLC_SETFLAG(linkp, REMOTE_BUSY, 0); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_RESET_CONFIRM; + } + break; + case NL_DISCONNECT_REQUEST: + if (LLC_GETFLAG(linkp, S) == 0) { + llc_send(linkp, LLCFT_DISC, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry = 0; + LLC_NEWSTATE(linkp, D_CONN); + } else { + llc_send(linkp, LLCFT_DM, LLC_RSP, + LLC_GETFLAG(linkp, F)); + LLC_NEWSTATE(linkp, ADM); + } + break; + case LLCFT_DM + LLC_RSP: + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + case LLCFT_SABME + LLC_CMD: + LLC_SETFLAG(linkp, S, 1); + LLC_SETFLAG(linkp, F, pollfinal); + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_DM, LLC_RSP, pollfinal); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + } + + return action; +} + +/* + * RESET_CHECK --- The local connection component is waiting for the local user + * to accept or refuse a remote reset request. + */ +int +llc_state_RESET_CHECK(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case NL_RESET_RESPONSE: + llc_send(linkp, LLCFT_UA, LLC_RSP, LLC_GETFLAG(linkp, F)); + LLC_RESETCOUNTER(linkp); + LLC_SETFLAG(linkp, P, 0); + LLC_SETFLAG(linkp, REMOTE_BUSY, 0); + LLC_NEWSTATE(linkp, NORMAL); + break; + case NL_DISCONNECT_REQUEST: + llc_send(linkp, LLCFT_DM, LLC_RSP, LLC_GETFLAG(linkp, F)); + LLC_NEWSTATE(linkp, ADM); + break; + case LLCFT_DM + LLC_RSP: + action = LLC_DISCONNECT_INDICATION; + break; + case LLCFT_SABME + LLC_CMD: + LLC_SETFLAG(linkp, F, pollfinal); + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_DM, LLC_RSP, pollfinal); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + } + + return action; +} + +/* + * SETUP --- The connection component has transmitted an SABME command PDU to a + * remote LLC DSAP and is waiting for a reply. + */ +int +llc_state_SETUP(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case LLCFT_SABME + LLC_CMD: + LLC_RESETCOUNTER(linkp); + llc_send(linkp, LLCFT_UA, LLC_RSP, pollfinal); + LLC_SETFLAG(linkp, S, 1); + break; + case LLCFT_UA + LLC_RSP: + if (LLC_GETFLAG(linkp, P) == pollfinal) { + LLC_STOP_ACK_TIMER(linkp); + LLC_RESETCOUNTER(linkp); + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_SETFLAG(linkp, REMOTE_BUSY, 0); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_CONNECT_CONFIRM; + } + break; + case LLC_ACK_TIMER_EXPIRED: + if (LLC_GETFLAG(linkp, S) == 1) { + LLC_SETFLAG(linkp, P, 0); + LLC_SETFLAG(linkp, REMOTE_BUSY, 0), + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_CONNECT_CONFIRM; + } else if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_SABME, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry++; + } else { + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + } + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_DM, LLC_RSP, pollfinal); + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + case LLCFT_DM + LLC_RSP: + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + } + + return action; +} + +/* + * RESET --- As a result of a service access point user request or the receipt + * of a FRMR response PDU, the local connection component has sent an + * SABME command PDU to the remote LLC DSAP to reset the data link + * connection and is waiting for a reply. + */ +int +llc_state_RESET(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case LLCFT_SABME + LLC_CMD: + LLC_RESETCOUNTER(linkp); + LLC_SETFLAG(linkp, S, 1); + llc_send(linkp, LLCFT_UA, LLC_RSP, pollfinal); + break; + case LLCFT_UA + LLC_RSP: + if (LLC_GETFLAG(linkp, P) == pollfinal) { + LLC_STOP_ACK_TIMER(linkp); + LLC_RESETCOUNTER(linkp); + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_SETFLAG(linkp, REMOTE_BUSY, 0); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_RESET_CONFIRM; + } + break; + case LLC_ACK_TIMER_EXPIRED: + if (LLC_GETFLAG(linkp, S) == 1) { + LLC_SETFLAG(linkp, P, 0); + LLC_SETFLAG(linkp, REMOTE_BUSY, 0); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_RESET_CONFIRM; + } else if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_SABME, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry++; + } else { + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + } + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_DM, LLC_RSP, pollfinal); + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + case LLCFT_DM + LLC_RSP: + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + } + + return action; +} + +/* + * D_CONN --- At the request of the service access point user, the local LLC + * has sent a DISC command PDU to the remote LLC DSAP and is waiting + * for a reply. + */ +int +llc_state_D_CONN(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case LLCFT_SABME + LLC_CMD: + llc_send(linkp, LLCFT_DM, LLC_RSP, pollfinal); + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + break; + case LLCFT_UA + LLC_RSP: + if (LLC_GETFLAG(linkp, P) == pollfinal) { + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + } + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_UA, LLC_RSP, pollfinal); + break; + case LLCFT_DM + LLC_RSP: + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + break; + case LLC_ACK_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_DISC, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry++; + } else LLC_NEWSTATE(linkp, ADM); + break; + } + + return action; +} + +/* + * ERROR --- The local connection component has detected an error in a received + * PDU and has sent a FRMR response PDU. It is waiting for a reply from + * the remote connection component. + */ +int +llc_state_ERROR(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case LLCFT_SABME + LLC_CMD: + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, RESET_CHECK); + action = LLC_RESET_INDICATION_REMOTE; + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_UA, LLC_RSP, pollfinal); + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + case LLCFT_DM + LLC_RSP: + LLC_STOP_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + case LLCFT_FRMR + LLC_RSP: + LLC_STOP_ACK_TIMER(linkp); + LLC_SETFLAG(linkp, S, 0); + LLC_NEWSTATE(linkp, RESET_WAIT); + action = LLC_FRMR_RECEIVED; + break; + case LLC_ACK_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_FRMR, LLC_RSP, 0); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry++; + } else { + LLC_SETFLAG(linkp, S, 0); + LLC_NEWSTATE(linkp, RESET_WAIT); + action = LLC_RESET_INDICATION_LOCAL; + } + break; + default: + if (cmdrsp == LLC_CMD){ + llc_send(linkp, LLCFT_FRMR, LLC_RSP, pollfinal); + LLC_START_ACK_TIMER(linkp); + } + break; + + } + + return action; +} + +/* + * NORMAL, BUSY, REJECT, AWAIT, AWAIT_BUSY, and AWAIT_REJECT all share + * a common core state handler. + */ +int +llc_state_NBRAcore(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = 0; + + switch(frame_kind + cmdrsp) { + case NL_DISCONNECT_REQUEST: + llc_send(linkp, LLCFT_DISC, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_STOP_ALL_TIMERS(linkp); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry = 0; + LLC_NEWSTATE(linkp, D_CONN); + break; + case NL_RESET_REQUEST: + llc_send(linkp, LLCFT_SABME, LLC_CMD, pollfinal); + LLC_SETFLAG(linkp, P, pollfinal); + LLC_STOP_ALL_TIMERS(linkp); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry = 0; + LLC_SETFLAG(linkp, S, 0); + LLC_NEWSTATE(linkp, RESET); + break; + case LLCFT_SABME + LLC_CMD: + LLC_SETFLAG(linkp, F, pollfinal); + LLC_STOP_ALL_TIMERS(linkp); + LLC_NEWSTATE(linkp, RESET_CHECK); + action = LLC_RESET_INDICATION_REMOTE; + break; + case LLCFT_DISC + LLC_CMD: + llc_send(linkp, LLCFT_UA, LLC_RSP, pollfinal); + LLC_STOP_ALL_TIMERS(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + case LLCFT_FRMR + LLC_RSP: + LLC_STOP_ALL_TIMERS(linkp); + LLC_SETFLAG(linkp, S, 0); + LLC_NEWSTATE(linkp, RESET_WAIT); + action = LLC_FRMR_RECEIVED; + break; + case LLCFT_DM + LLC_RSP: + LLC_STOP_ALL_TIMERS(linkp); + LLC_NEWSTATE(linkp, ADM); + action = LLC_DISCONNECT_INDICATION; + break; + case LLC_INVALID_NR + LLC_CMD: + case LLC_INVALID_NS + LLC_CMD: + LLC_SETFRMR(linkp, frame, cmdrsp, + (frame_kind == LLC_INVALID_NR ? LLC_FRMR_Z : + (LLC_FRMR_V | LLC_FRMR_W))); + llc_send(linkp, LLCFT_FRMR, LLC_RSP, pollfinal); + LLC_STOP_ALL_TIMERS(linkp); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry = 0; + LLC_NEWSTATE(linkp, ERROR); + action = LLC_FRMR_SENT; + break; + case LLC_INVALID_NR + LLC_RSP: + case LLC_INVALID_NS + LLC_RSP: + case LLCFT_UA + LLC_RSP: + case LLC_BAD_PDU: { + char frmrcause = 0; + + switch (frame_kind) { + case LLC_INVALID_NR: frmrcause = LLC_FRMR_Z; break; + case LLC_INVALID_NS: frmrcause = LLC_FRMR_V | LLC_FRMR_W; break; + default: frmrcause = LLC_FRMR_W; + } + LLC_SETFRMR(linkp, frame, cmdrsp, frmrcause); + llc_send(linkp, LLCFT_FRMR, LLC_RSP, 0); + LLC_STOP_ALL_TIMERS(linkp); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry = 0; + LLC_NEWSTATE(linkp, ERROR); + action = LLC_FRMR_SENT; + break; + } + default: + if (cmdrsp == LLC_RSP && pollfinal == 1 && + LLC_GETFLAG(linkp, P) == 0) { + LLC_SETFRMR(linkp, frame, cmdrsp, LLC_FRMR_W); + LLC_STOP_ALL_TIMERS(linkp); + LLC_START_ACK_TIMER(linkp); + linkp->llcl_retry = 0; + LLC_NEWSTATE(linkp, ERROR); + action = LLC_FRMR_SENT; + } + break; + case LLC_P_TIMER_EXPIRED: + case LLC_ACK_TIMER_EXPIRED: + case LLC_REJ_TIMER_EXPIRED: + case LLC_BUSY_TIMER_EXPIRED: + if (linkp->llcl_retry >= llc_n2) { + LLC_STOP_ALL_TIMERS(linkp); + LLC_SETFLAG(linkp, S, 0); + LLC_NEWSTATE(linkp, RESET_WAIT); + action = LLC_RESET_INDICATION_LOCAL; + } + break; + } + + return action; +} + +/* + * NORMAL --- A data link connection exists between the local LLC service access + * point and the remote LLC service access point. Sending and + * reception of information and supervisory PDUs can be performed. + */ +int +llc_state_NORMAL(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = LLC_PASSITON; + + switch(frame_kind + cmdrsp) { + case NL_DATA_REQUEST: + if (LLC_GETFLAG(linkp, REMOTE_BUSY) == 0) { +#ifdef not_now + if (LLC_GETFLAG(linkp, P) == 0) { + /* multiple possibilities */ + llc_send(linkp, LLCFT_INFO, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + if (LLC_TIMERXPIRED(linkp, ACK) != LLC_TIMER_RUNNING) + LLC_START_ACK_TIMER(linkp); + } else { +#endif + /* multiple possibilities */ + llc_send(linkp, LLCFT_INFO, LLC_CMD, 0); + if (LLC_TIMERXPIRED(linkp, ACK) != LLC_TIMER_RUNNING) + LLC_START_ACK_TIMER(linkp); +#ifdef not_now + } +#endif + action = 0; + } + break; + case LLC_LOCAL_BUSY_DETECTED: + if (LLC_GETFLAG(linkp, P) == 0) { + /* multiple possibilities --- action-wise */ + /* multiple possibilities --- CMD/RSP-wise */ + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_START_P_TIMER(linkp); + LLC_SETFLAG(linkp, DATA, 0); + LLC_NEWSTATE(linkp, BUSY); + action = 0; + } else { + /* multiple possibilities --- CMD/RSP-wise */ + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_SETFLAG(linkp, DATA, 0); + LLC_NEWSTATE(linkp, BUSY); + action = 0; + } + break; + case LLC_INVALID_NS + LLC_CMD: + case LLC_INVALID_NS + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_REJ, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_START_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, REJECT); + action = 0; + } else if (pollfinal == 0 && p == 1) { + llc_send(linkp, LLCFT_REJ, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_START_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, REJECT); + action = 0; + } else if ((pollfinal == 0 && p == 0) || + (pollfinal == 1 && p == 1 && cmdrsp == LLC_RSP)) { + llc_send(linkp, LLCFT_REJ, LLC_CMD, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_START_P_TIMER(linkp); + LLC_START_REJ_TIMER(linkp); + if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else action = 0; + LLC_NEWSTATE(linkp, REJECT); + } + break; + } + case LLCFT_INFO + LLC_CMD: + case LLCFT_INFO + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + LLC_INC(linkp->llcl_vr); + LLC_SENDACKNOWLEDGE(linkp, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = LLC_DATA_INDICATION; + } else if (pollfinal == 0 && p == 1) { + LLC_INC(linkp->llcl_vr); + LLC_SENDACKNOWLEDGE(linkp, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = LLC_DATA_INDICATION; + } else if ((pollfinal == 0 && p == 0 && cmdrsp == LLC_CMD) || + (pollfinal == p && cmdrsp == LLC_RSP)) { + LLC_INC(linkp->llcl_vr); + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_SENDACKNOWLEDGE(linkp, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (cmdrsp == LLC_RSP && pollfinal == 1) + LLC_CLEAR_REMOTE_BUSY(linkp, action); + action = LLC_DATA_INDICATION; + } + break; + } + case LLCFT_RR + LLC_CMD: + case LLCFT_RR + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + LLC_SENDACKNOWLEDGE(linkp, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if ((pollfinal == 0) || + (cmdrsp == LLC_RSP && pollfinal == 1 && p == 1)) { + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case LLCFT_RNR + LLC_CMD: + case LLCFT_RNR + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } else if ((pollfinal == 0) || + (cmdrsp == LLC_RSP && pollfinal == 1 && p == 1)) { + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } + break; + } + case LLCFT_REJ + LLC_CMD: + case LLCFT_REJ + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + llc_resend(linkp, LLC_RSP, 1); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if (pollfinal == 0 && p == 1) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if ((pollfinal == 0 && p == 0 && cmdrsp == LLC_CMD) || + (pollfinal == p && cmdrsp == LLC_RSP)) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_START_P_TIMER(linkp); + llc_resend(linkp, LLC_CMD, 1); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case NL_INITIATE_PF_CYCLE: + if (LLC_GETFLAG(linkp, P) == 0) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + action = 0; + } + break; + case LLC_P_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + LLC_NEWSTATE(linkp, AWAIT); + action = 0; + } + break; + case LLC_ACK_TIMER_EXPIRED: + case LLC_BUSY_TIMER_EXPIRED: + if ((LLC_GETFLAG(linkp, P) == 0) + && (linkp->llcl_retry < llc_n2)) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + LLC_NEWSTATE(linkp, AWAIT); + action = 0; + } + break; + } + if (action == LLC_PASSITON) + action = llc_state_NBRAcore(linkp, frame, frame_kind, + cmdrsp, pollfinal); + + return action; +} + +/* + * BUSY --- A data link connection exists between the local LLC service access + * point and the remote LLC service access point. I PDUs may be sent. + * Local conditions make it likely that the information feld of + * received I PDUs will be ignored. Supervisory PDUs may be both sent + * and received. + */ +int +llc_state_BUSY(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = LLC_PASSITON; + + switch(frame_kind + cmdrsp) { + case NL_DATA_REQUEST: + if (LLC_GETFLAG(linkp, REMOTE_BUSY) == 0) + if (LLC_GETFLAG(linkp, P) == 0) { + llc_send(linkp, LLCFT_INFO, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + if (LLC_TIMERXPIRED(linkp, ACK) != LLC_TIMER_RUNNING) + LLC_START_ACK_TIMER(linkp); + action = 0; + } else { + llc_send(linkp, LLCFT_INFO, LLC_CMD, 0); + if (LLC_TIMERXPIRED(linkp, ACK) != LLC_TIMER_RUNNING) + LLC_START_ACK_TIMER(linkp); + action = 0; + } + break; + case LLC_LOCAL_BUSY_CLEARED: { + register int p = LLC_GETFLAG(linkp, P); + register int df = LLC_GETFLAG(linkp, DATA); + + switch (df) { + case 1: + if (p == 0) { + /* multiple possibilities */ + llc_send(linkp, LLCFT_REJ, LLC_CMD, 1); + LLC_START_REJ_TIMER(linkp); + LLC_START_P_TIMER(linkp); + LLC_NEWSTATE(linkp, REJECT); + action = 0; + } else { + llc_send(linkp, LLCFT_REJ, LLC_CMD, 0); + LLC_START_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, REJECT); + action = 0; + } + break; + case 0: + if (p == 0) { + /* multiple possibilities */ + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_NEWSTATE(linkp, NORMAL); + action = 0; + } else { + llc_send(linkp, LLCFT_RR, LLC_CMD, 0); + LLC_NEWSTATE(linkp, NORMAL); + action = 0; + } + break; + case 2: + if (p == 0) { + /* multiple possibilities */ + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_NEWSTATE(linkp, REJECT); + action = 0; + } else { + llc_send(linkp, LLCFT_RR, LLC_CMD, 0); + LLC_NEWSTATE(linkp, REJECT); + action =0; + } + break; + } + break; + } + case LLC_INVALID_NS + LLC_CMD: + case LLC_INVALID_NS + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (LLC_GETFLAG(linkp, DATA) == 0) + LLC_SETFLAG(linkp, DATA, 1); + action = 0; + } else if ((cmdrsp == LLC_CMD && pollfinal == 0 && p == 0) || + (cmdrsp == LLC_RSP && pollfinal == p)) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (LLC_GETFLAG(linkp, DATA) == 0) + LLC_SETFLAG(linkp, DATA, 1); + if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else action = 0; + } else if (pollfinal == 0 && p == 1) { + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (LLC_GETFLAG(linkp, DATA) == 0) + LLC_SETFLAG(linkp, DATA, 1); + action = 0; + } + break; + } + case LLCFT_INFO + LLC_CMD: + case LLCFT_INFO + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + LLC_INC(linkp->llcl_vr); + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (LLC_GETFLAG(linkp, DATA) == 2) + LLC_STOP_REJ_TIMER(linkp); + LLC_SETFLAG(linkp, DATA, 0); + action = LLC_DATA_INDICATION; + } else if ((cmdrsp == LLC_CMD && pollfinal == 0 && p == 0) || + (cmdrsp == LLC_RSP && pollfinal == p)) { + LLC_INC(linkp->llcl_vr); + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (LLC_GETFLAG(linkp, DATA) == 2) + LLC_STOP_REJ_TIMER(linkp); + if (cmdrsp == LLC_RSP && pollfinal == 1) + LLC_CLEAR_REMOTE_BUSY(linkp, action); + action = LLC_DATA_INDICATION; + } else if (pollfinal == 0 && p == 1) { + LLC_INC(linkp->llcl_vr); + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (LLC_GETFLAG(linkp, DATA) == 2) + LLC_STOP_REJ_TIMER(linkp); + LLC_SETFLAG(linkp, DATA, 0); + action = LLC_DATA_INDICATION; + } + break; + } + case LLCFT_RR + LLC_CMD: + case LLCFT_RR + LLC_RSP: + case LLCFT_RNR + LLC_CMD: + case LLCFT_RNR + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (frame_kind == LLCFT_RR) { + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else { + LLC_SET_REMOTE_BUSY(linkp, action); + } + } else if (pollfinal = 0 || + (cmdrsp == LLC_RSP && pollfinal == 1)) { + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (frame_kind == LLCFT_RR) { + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else { + LLC_SET_REMOTE_BUSY(linkp, action); + } + } + break; + } + case LLCFT_REJ + LLC_CMD: + case LLCFT_REJ + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if ((cmdrsp == LLC_CMD && pollfinal == 0 && p == 0) || + (cmdrsp == LLC_RSP && pollfinal == p)) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if (pollfinal == 0 && p == 1) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case NL_INITIATE_PF_CYCLE: + if (LLC_GETFLAG(linkp, P) == 0) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + action = 0; + } + break; + case LLC_P_TIMER_EXPIRED: + /* multiple possibilities */ + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + LLC_NEWSTATE(linkp, AWAIT_BUSY); + action = 0; + } + break; + case LLC_ACK_TIMER_EXPIRED: + case LLC_BUSY_TIMER_EXPIRED: + if (LLC_GETFLAG(linkp, P) == 0 && linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + LLC_NEWSTATE(linkp, AWAIT_BUSY); + action = 0; + } + break; + case LLC_REJ_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) + if (LLC_GETFLAG(linkp, P) == 0) { + /* multiple possibilities */ + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + LLC_SETFLAG(linkp, DATA, 1); + LLC_NEWSTATE(linkp, AWAIT_BUSY); + action = 0; + } else{ + LLC_SETFLAG(linkp, DATA, 1); + LLC_NEWSTATE(linkp, BUSY); + action = 0; + } + + break; + } + if (action == LLC_PASSITON) + action = llc_state_NBRAcore(linkp, frame, frame_kind, + cmdrsp, pollfinal); + + return action; +} + +/* + * REJECT --- A data link connection exists between the local LLC service + * access point and the remote LLC service access point. The local + * connection component has requested that the remote connection + * component resend a specific I PDU that the local connection + * componnent has detected as being out of sequence. Both I PDUs and + * supervisory PDUs may be sent and received. + */ +int +llc_state_REJECT(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = LLC_PASSITON; + + switch(frame_kind + cmdrsp) { + case NL_DATA_REQUEST: + if (LLC_GETFLAG(linkp, P) == 0) { + llc_send(linkp, LLCFT_INFO, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + if (LLC_TIMERXPIRED(linkp, ACK) != LLC_TIMER_RUNNING) + LLC_START_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, REJECT); + action = 0; + } else { + llc_send(linkp, LLCFT_INFO, LLC_CMD, 0); + if (LLC_TIMERXPIRED(linkp, ACK) != LLC_TIMER_RUNNING) + LLC_START_ACK_TIMER(linkp); + LLC_NEWSTATE(linkp, REJECT); + action = 0; + } + break; + case NL_LOCAL_BUSY_DETECTED: + if (LLC_GETFLAG(linkp, P) == 0) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_SETFLAG(linkp, DATA, 2); + LLC_NEWSTATE(linkp, BUSY); + action = 0; + } else { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_SETFLAG(linkp, DATA, 2); + LLC_NEWSTATE(linkp, BUSY); + action = 0; + } + break; + case LLC_INVALID_NS + LLC_CMD: + case LLC_INVALID_NS + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = 0; + } else if (pollfinal == 0 || + (cmdrsp == LLC_RSP && pollfinal == 1 && p == 1)) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else action = 0; + } + break; + } + case LLCFT_INFO + LLC_CMD: + case LLCFT_INFO + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + LLC_INC(linkp->llcl_vr); + LLC_SENDACKNOWLEDGE(linkp, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_STOP_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_DATA_INDICATION; + } else if ((cmdrsp = LLC_RSP && pollfinal == p) || + (cmdrsp == LLC_CMD && pollfinal == 0 && p == 0)) { + LLC_INC(linkp->llcl_vr); + LLC_SENDACKNOWLEDGE(linkp, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + if (cmdrsp == LLC_RSP && pollfinal == 1) + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_STOP_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_DATA_INDICATION; + } else if (pollfinal == 0 && p == 1) { + LLC_INC(linkp->llcl_vr); + LLC_SENDACKNOWLEDGE(linkp, LLC_CMD, 0); + LLC_STOP_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_DATA_INDICATION; + } + break; + } + case LLCFT_RR + LLC_CMD: + case LLCFT_RR + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + LLC_SENDACKNOWLEDGE(linkp, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if (pollfinal == 0 || + (cmdrsp == LLC_RSP && pollfinal == 1 && p == 1)) { + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case LLCFT_RNR + LLC_CMD: + case LLCFT_RNR + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } else if (pollfinal == 0 || + (cmdrsp == LLC_RSP && pollfinal == 1 && p == 1)) { + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = 0; + } + break; + } + case LLCFT_REJ + LLC_CMD: + case LLCFT_REJ + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + llc_resend(linkp, LLC_RSP, 1); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if ((cmdrsp == LLC_CMD && pollfinal == 0 && p == 0) || + (cmdrsp == LLC_RSP && pollfinal == p)) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_UPDATE_P_FLAG(linkp, cmdrsp, pollfinal); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if (pollfinal == 0 && p == 1) { + linkp->llcl_vs = nr; + LLC_UPDATE_NR_RECEIVED(linkp, nr); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case NL_INITIATE_PF_CYCLE: + if (LLC_GETFLAG(linkp, P) == 0) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + action = 0; + } + break; + case LLC_REJ_TIMER_EXPIRED: + if (LLC_GETFLAG(linkp, P) == 0 && linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_REJ, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_START_REJ_TIMER(linkp); + linkp->llcl_retry++; + action = 0; + } + case LLC_P_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_START_REJ_TIMER(linkp); + linkp->llcl_retry++; + LLC_NEWSTATE(linkp, AWAIT_REJECT); + action = 0; + } + break; + case LLC_ACK_TIMER_EXPIRED: + case LLC_BUSY_TIMER_EXPIRED: + if (LLC_GETFLAG(linkp, P) == 0 && linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_START_REJ_TIMER(linkp); + linkp->llcl_retry++; + /* + * I cannot locate the description of RESET_V(S) + * in ISO 8802-2, table 7-1, state REJECT, last event, + * and assume they meant to set V(S) to 0 ... + */ + linkp->llcl_vs = 0; /* XXX */ + LLC_NEWSTATE(linkp, AWAIT_REJECT); + action = 0; + } + + break; + } + if (action == LLC_PASSITON) + action = llc_state_NBRAcore(linkp, frame, frame_kind, + cmdrsp, pollfinal); + + return action; +} + +/* + * AWAIT --- A data link connection exists between the local LLC service access + * point and the remote LLC service access point. The local LLC is + * performing a timer recovery operation and has sent a command PDU + * with the P bit set to ``1'', and is awaiting an acknowledgement + * from the remote LLC. I PDUs may be received but not sent. + * Supervisory PDUs may be both sent and received. + */ +int +llc_state_AWAIT(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = LLC_PASSITON; + + switch(frame_kind + cmdrsp) { + case LLC_LOCAL_BUSY_DETECTED: + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_SETFLAG(linkp, DATA, 0); + LLC_NEWSTATE(linkp, AWAIT_BUSY); + action = 0; + break; + case LLC_INVALID_NS + LLC_CMD: + case LLC_INVALID_NS + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_REJ, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_START_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, AWAIT_REJECT); + action = 0; + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + llc_send(linkp, LLCFT_REJ, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + llc_resend(linkp, LLC_CMD, 0); + LLC_START_REJ_TIMER(linkp); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, REJECT); + } else if (pollfinal == 0) { + llc_send(linkp, LLCFT_REJ, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_START_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, AWAIT_REJECT); + action = 0; + } + break; + } + case LLCFT_INFO + LLC_RSP: + case LLCFT_INFO + LLC_CMD: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + LLC_INC(linkp->llcl_vr); + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = LLC_DATA_INDICATION; + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + llc_resend(linkp, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_DATA_INDICATION; + } else if (pollfinal == 0) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = LLC_DATA_INDICATION; + } + break; + } + case LLCFT_RR + LLC_CMD: + case LLCFT_RR + LLC_RSP: + case LLCFT_REJ + LLC_CMD: + case LLCFT_REJ + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, NORMAL); + } else if (pollfinal == 0) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case LLCFT_RNR + LLC_CMD: + case LLCFT_RNR + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (pollfinal == 1 && cmdrsp == LLC_CMD) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } else if (pollfinal == 1 && cmdrsp == LLC_RSP) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + LLC_SET_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, NORMAL); + } else if (pollfinal == 0) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } + break; + } + case LLC_P_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_RR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + action = 0; + } + break; + } + if (action == LLC_PASSITON) + action = llc_state_NBRAcore(linkp, frame, frame_kind, + cmdrsp, pollfinal); + + return action; +} + +/* + * AWAIT_BUSY --- A data link connection exists between the local LLC service + * access point and the remote LLC service access point. The + * local LLC is performing a timer recovery operation and has + * sent a command PDU with the P bit set to ``1'', and is + * awaiting an acknowledgement from the remote LLC. I PDUs may + * not be sent. Local conditions make it likely that the + * information feld of receoved I PDUs will be ignored. + * Supervisory PDUs may be both sent and received. + */ +int +llc_state_AWAIT_BUSY(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = LLC_PASSITON; + + switch(frame_kind + cmdrsp) { + case LLC_LOCAL_BUSY_CLEARED: + switch (LLC_GETFLAG(linkp, DATA)) { + case 1: + llc_send(linkp, LLCFT_REJ, LLC_CMD, 0); + LLC_START_REJ_TIMER(linkp); + LLC_NEWSTATE(linkp, AWAIT_REJECT); + action = 0; + break; + case 0: + llc_send(linkp, LLCFT_RR, LLC_CMD, 0); + LLC_NEWSTATE(linkp, AWAIT); + action = 0; + break; + case 2: + llc_send(linkp, LLCFT_RR, LLC_CMD, 0); + LLC_NEWSTATE(linkp, AWAIT_REJECT); + action = 0; + break; + } + break; + case LLC_INVALID_NS + LLC_CMD: + case LLC_INVALID_NS + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SETFLAG(linkp, DATA, 1); + action = 0; + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + /* optionally */ + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + LLC_SETFLAG(linkp, DATA, 1); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + llc_resend(linkp, LLC_CMD, 0); + LLC_NEWSTATE(linkp, BUSY); + } else if (pollfinal == 0) { + /* optionally */ + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SETFLAG(linkp, DATA, 1); + action = 0; + } + } + case LLCFT_INFO + LLC_CMD: + case LLCFT_INFO + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_INC(linkp->llcl_vr); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SETFLAG(linkp, DATA, 0); + action = LLC_DATA_INDICATION; + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_INC(linkp->llcl_vr); + LLC_START_P_TIMER(linkp); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_SETFLAG(linkp, DATA, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + llc_resend(linkp, LLC_CMD, 0); + LLC_NEWSTATE(linkp, BUSY); + action = LLC_DATA_INDICATION; + } else if (pollfinal == 0) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_INC(linkp->llcl_vr); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SETFLAG(linkp, DATA, 0); + action = LLC_DATA_INDICATION; + } + break; + } + case LLCFT_RR + LLC_CMD: + case LLCFT_REJ + LLC_CMD: + case LLCFT_RR + LLC_RSP: + case LLCFT_REJ + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, BUSY); + } else if (pollfinal == 0) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case LLCFT_RNR + LLC_CMD: + case LLCFT_RNR + LLC_RSP: { + register int p = LLC_GETFLAG(linkp, P); + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RNR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + LLC_SET_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, BUSY); + } else if (pollfinal == 0) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } + break; + } + case LLC_P_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_RNR, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + action = 0; + } + break; + } + if (action == LLC_PASSITON) + action = llc_state_NBRAcore(linkp, frame, frame_kind, + cmdrsp, pollfinal); + + return action; +} + +/* + * AWAIT_REJECT --- A data link connection exists between the local LLC service + * access point and the remote LLC service access point. The + * local connection component has requested that the remote + * connection component re-transmit a specific I PDU that the + * local connection component has detected as being out of + * sequence. Before the local LLC entered this state it was + * performing a timer recovery operation and had sent a + * command PDU with the P bit set to ``1'', and is still + * awaiting an acknowledgment from the remote LLC. I PDUs may + * be received but not transmitted. Supervisory PDUs may be + * both transmitted and received. + */ +int +llc_state_AWAIT_REJECT(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + int action = LLC_PASSITON; + + switch(frame_kind + cmdrsp) { + case LLC_LOCAL_BUSY_DETECTED: + llc_send(linkp, LLCFT_RNR, LLC_CMD, 0); + LLC_SETFLAG(linkp, DATA, 2); + LLC_NEWSTATE(linkp, AWAIT_BUSY); + action = 0; + break; + case LLC_INVALID_NS + LLC_CMD: + case LLC_INVALID_NS + LLC_RSP: { + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = 0; + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + llc_resend(linkp, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, REJECT); + } else if (pollfinal == 0) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + action = 0; + } + break; + } + case LLCFT_INFO + LLC_CMD: + case LLCFT_INFO + LLC_RSP: { + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + LLC_INC(linkp->llcl_vr); + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_STOP_REJ_TIMER(linkp); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_NEWSTATE(linkp, AWAIT); + action = LLC_DATA_INDICATION; + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_INC(linkp->llcl_vr); + LLC_STOP_P_TIMER(linkp); + LLC_STOP_REJ_TIMER(linkp); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + llc_resend(linkp, LLC_CMD, 0); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, NORMAL); + action = LLC_DATA_INDICATION; + } else if (pollfinal == 0) { + LLC_INC(linkp->llcl_vr); + llc_send(linkp, LLCFT_RR, LLC_CMD, 0); + LLC_STOP_REJ_TIMER(linkp); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_NEWSTATE(linkp, AWAIT); + action = LLC_DATA_INDICATION; + } + break; + } + case LLCFT_RR + LLC_CMD: + case LLCFT_REJ + LLC_CMD: + case LLCFT_RR + LLC_RSP: + case LLCFT_REJ + LLC_RSP: { + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + llc_resend(linkp, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, REJECT); + } else if (pollfinal == 0) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_CLEAR_REMOTE_BUSY(linkp, action); + } + break; + } + case LLCFT_RNR + LLC_CMD: + case LLCFT_RNR + LLC_RSP: { + register int nr = LLCGBITS(frame->llc_control_ext, s_nr); + + if (cmdrsp == LLC_CMD && pollfinal == 1) { + llc_send(linkp, LLCFT_RR, LLC_RSP, 1); + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } else if (cmdrsp == LLC_RSP && pollfinal == 1) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + linkp->llcl_vs = nr; + LLC_STOP_P_TIMER(linkp); + LLC_SET_REMOTE_BUSY(linkp, action); + LLC_NEWSTATE(linkp, REJECT); + } else if (pollfinal == 0) { + LLC_UPDATE_NR_RECEIVED(linkp, nr); + LLC_SET_REMOTE_BUSY(linkp, action); + } + break; + } + case LLC_P_TIMER_EXPIRED: + if (linkp->llcl_retry < llc_n2) { + llc_send(linkp, LLCFT_REJ, LLC_CMD, 1); + LLC_START_P_TIMER(linkp); + linkp->llcl_retry++; + action = 0; + } + break; + } + if (action == LLC_PASSITON) + action = llc_state_NBRAcore(linkp, frame, frame_kind, + cmdrsp, pollfinal); + + return action; +} + + +/* + * llc_statehandler() --- Wrapper for llc_state_*() functions. + * Deals with action codes and checks for + * ``stuck'' links. + */ + +int +llc_statehandler(struct llc_linkcb *linkp, struct llc *frame, int frame_kind, + int cmdrsp, int pollfinal) +{ + register int action = 0; + + /* + * To check for ``zombie'' links each time llc_statehandler() gets called + * the AGE timer of linkp is reset. If it expires llc_timer() will + * take care of the link --- i.e. kill it 8=) + */ + LLC_STARTTIMER(linkp, AGE); + + /* + * Now call the current statehandler function. + */ + action = (*linkp->llcl_statehandler)(linkp, frame, frame_kind, + cmdrsp, pollfinal); +once_more_and_again: + switch (action) { + case LLC_CONNECT_INDICATION: { + int naction; + + LLC_TRACE(linkp, LLCTR_INTERESTING, "CONNECT INDICATION"); + linkp->llcl_nlnext = + (*linkp->llcl_sapinfo->si_ctlinput) + (PRC_CONNECT_INDICATION, + (struct sockaddr *) &linkp->llcl_addr, (caddr_t) linkp); + if (linkp->llcl_nlnext == 0) + naction = NL_DISCONNECT_REQUEST; + else naction = NL_CONNECT_RESPONSE; + action = (*linkp->llcl_statehandler)(linkp, frame, naction, 0, 0); + goto once_more_and_again; + } + case LLC_CONNECT_CONFIRM: + /* llc_resend(linkp, LLC_CMD, 0); */ + llc_start(linkp); + break; + case LLC_DISCONNECT_INDICATION: + LLC_TRACE(linkp, LLCTR_INTERESTING, "DISCONNECT INDICATION"); + (*linkp->llcl_sapinfo->si_ctlinput) + (PRC_DISCONNECT_INDICATION, + (struct sockaddr *) &linkp->llcl_addr, linkp->llcl_nlnext); + break; + /* internally visible only */ + case LLC_RESET_CONFIRM: + case LLC_RESET_INDICATION_LOCAL: + /* + * not much we can do here, the state machine either makes it or + * brakes it ... + */ + break; + case LLC_RESET_INDICATION_REMOTE: + LLC_TRACE(linkp, LLCTR_SHOULDKNOW, "RESET INDICATION (REMOTE)"); + action = (*linkp->llcl_statehandler)(linkp, frame, + NL_RESET_RESPONSE, 0, 0); + goto once_more_and_again; + case LLC_FRMR_SENT: + LLC_TRACE(linkp, LLCTR_URGENT, "FRMR SENT"); + break; + case LLC_FRMR_RECEIVED: + LLC_TRACE(linkp, LLCTR_URGEN, "FRMR RECEIVED"); + action = (*linkp->llcl_statehandler)(linkp, frame, + NL_RESET_REQUEST, 0, 0); + + goto once_more_and_again; + case LLC_REMOTE_BUSY: + LLC_TRACE(linkp, LLCTR_SHOULDKNOW, "REMOTE BUSY"); + break; + case LLC_REMOTE_NOT_BUSY: + LLC_TRACE(linkp, LLCTR_SHOULDKNOW, "REMOTE BUSY CLEARED"); + /* + * try to get queued frames out + */ + llc_start(linkp); + break; + } + + /* + * Only LLC_DATA_INDICATION is for the time being + * passed up to the network layer entity. + * The remaining action codes are for the time + * being visible internally only. + * However, this can/may be changed if necessary. + */ + + return action; +} + + +/* + * Core LLC2 routines + */ + +/* + * The INIT call. This routine is called once after the system is booted. + */ + +llc_init() +{ + llcintrq.ifq_maxlen = IFQ_MAXLEN; +} + + +/* + * In case of a link reset we need to shuffle the frames queued inside the + * LLC2 window. + */ + +void +llc_resetwindow(struct llc_linkcb *linkp) +{ + register struct mbuf *mptr = (struct mbuf *) 0; + register struct mbuf *anchor = (struct mbuf *)0; + register short i; + + /* Pick up all queued frames and collect them in a linked mbuf list */ + if (linkp->llcl_slotsfree != linkp->llcl_window) { + i = llc_seq2slot(linkp, linkp->llcl_nr_received); + anchor = mptr = linkp->llcl_output_buffers[i]; + for (; i != linkp->llcl_freeslot; + i = llc_seq2slot(linkp, i+1)) { + if (linkp->llcl_output_buffers[i]) { + mptr->m_nextpkt = linkp->llcl_output_buffers[i]; + mptr = mptr->m_nextpkt; + } else panic("LLC2 window broken"); + } + } + /* clean closure */ + if (mptr) + mptr->m_nextpkt = (struct mbuf *) 0; + + /* Now --- plug 'em in again */ + if (anchor != (struct mbuf *)0) { + for (i = 0, mptr = anchor; mptr != (struct mbuf *) 0; i++) { + linkp->llcl_output_buffers[i] = mptr; + mptr = mptr->m_nextpkt; + linkp->llcl_output_buffers[i]->m_nextpkt = (struct mbuf *)0; + } + linkp->llcl_freeslot = i; + } else linkp->llcl_freeslot = 0; + + /* We're resetting the link, the next frame to be acknowledged is 0 */ + linkp->llcl_nr_received = 0; + + /* set distance between LLC2 sequence number and the top of window to 0 */ + linkp->llcl_projvs = linkp->llcl_freeslot; + + return; +} + +/* + * llc_newlink() --- We allocate enough memory to contain a link control block + * and initialize it properly. We don't intiate the actual + * setup of the LLC2 link here. + */ +struct llc_linkcb * +llc_newlink(struct sockaddr_dl *dst, struct ifnet *ifp, struct rtentry *nlrt, + caddr_t nlnext, struct rtentry *llrt) +{ + struct llc_linkcb *nlinkp; + u_char sap = LLSAPADDR(dst); + short llcwindow; + + + /* allocate memory for link control block */ + MALLOC(nlinkp, struct llc_linkcb *, sizeof(struct llc_linkcb), + M_PCB, M_NOWAIT); + if (nlinkp == 0) + return (NULL); + bzero((caddr_t)nlinkp, sizeof(struct llc_linkcb)); + + /* copy link address */ + sdl_copy(dst, &nlinkp->llcl_addr); + + /* hold on to the network layer route entry */ + nlinkp->llcl_nlrt = nlrt; + + /* likewise the network layer control block */ + nlinkp->llcl_nlnext = nlnext; + + /* jot down the link layer route entry */ + nlinkp->llcl_llrt = llrt; + + /* reset writeq */ + nlinkp->llcl_writeqh = nlinkp->llcl_writeqt = NULL; + + /* setup initial state handler function */ + nlinkp->llcl_statehandler = llc_state_ADM; + + /* hold on to interface pointer */ + nlinkp->llcl_if = ifp; + + /* get service access point information */ + nlinkp->llcl_sapinfo = llc_getsapinfo(sap, ifp); + + /* get window size from SAP info block */ + if ((llcwindow = nlinkp->llcl_sapinfo->si_window) == 0) + llcwindow = LLC_MAX_WINDOW; + + /* allocate memory for window buffer */ + MALLOC(nlinkp->llcl_output_buffers, struct mbuf **, + llcwindow*sizeof(struct mbuf *), M_PCB, M_NOWAIT); + if (nlinkp->llcl_output_buffers == 0) { + FREE(nlinkp, M_PCB); + return(NULL); + } + bzero((caddr_t)nlinkp->llcl_output_buffers, + llcwindow*sizeof(struct mbuf *)); + + /* set window size & slotsfree */ + nlinkp->llcl_slotsfree = nlinkp->llcl_window = llcwindow; + + /* enter into linked listed of link control blocks */ + insque(nlinkp, &llccb_q); + + return(nlinkp); +} + +/* + * llc_dellink() --- farewell to link control block + */ +llc_dellink(struct llc_linkcb *linkp) +{ + register struct mbuf *m; + register struct mbuf *n; + register struct npaidbentry *sapinfo = linkp->llcl_sapinfo; + register i; + + /* notify upper layer of imminent death */ + if (linkp->llcl_nlnext && sapinfo->si_ctlinput) + (*sapinfo->si_ctlinput) + (PRC_DISCONNECT_INDICATION, + (struct sockaddr *)&linkp->llcl_addr, linkp->llcl_nlnext); + + /* pull the plug */ + if (linkp->llcl_llrt) + ((struct npaidbentry *)(linkp->llcl_llrt->rt_llinfo))->np_link + = (struct llc_linkcb *) 0; + + /* leave link control block queue */ + remque(linkp); + + /* drop queued packets */ + for (m = linkp->llcl_writeqh; m;) { + n = m->m_act; + m_freem(m); + m = n; + } + + /* drop packets in the window */ + for(i = 0; i < linkp->llcl_window; i++) + if (linkp->llcl_output_buffers[i]) + m_freem(linkp->llcl_output_buffers[i]); + + /* return the window space */ + FREE((caddr_t)linkp->llcl_output_buffers, M_PCB); + + /* return the control block space --- now it's gone ... */ + FREE((caddr_t)linkp, M_PCB); +} + +llc_decode(struct llc* frame, struct llc_linkcb * linkp) +{ + register int ft = LLC_BAD_PDU; + + if ((frame->llc_control & 01) == 0) { + ft = LLCFT_INFO; + /* S or U frame ? */ + } else switch (frame->llc_control) { + + /* U frames */ + case LLC_UI: + case LLC_UI_P: ft = LLC_UI; break; + case LLC_DM: + case LLC_DM_P: ft =LLCFT_DM; break; + case LLC_DISC: + case LLC_DISC_P: ft = LLCFT_DISC; break; + case LLC_UA: + case LLC_UA_P: ft = LLCFT_UA; break; + case LLC_SABME: + case LLC_SABME_P: ft = LLCFT_SABME; break; + case LLC_FRMR: + case LLC_FRMR_P: ft = LLCFT_FRMR; break; + case LLC_XID: + case LLC_XID_P: ft = LLCFT_XID; break; + case LLC_TEST: + case LLC_TEST_P: ft = LLCFT_TEST; break; + + /* S frames */ + case LLC_RR: ft = LLCFT_RR; break; + case LLC_RNR: ft = LLCFT_RNR; break; + case LLC_REJ: ft = LLCFT_REJ; break; + } /* switch */ + + if (linkp) { + switch (ft) { + case LLCFT_INFO: + if (LLCGBITS(frame->llc_control, i_ns) != linkp->llcl_vr) { + ft = LLC_INVALID_NS; + break; + } + /* fall thru --- yeeeeeee */ + case LLCFT_RR: + case LLCFT_RNR: + case LLCFT_REJ: + /* splash! */ + if (LLC_NR_VALID(linkp, LLCGBITS(frame->llc_control_ext, + s_nr)) == 0) + ft = LLC_INVALID_NR; + break; + } + } + + return ft; +} + +/* + * llc_anytimersup() --- Checks if at least one timer is still up and running. + */ +int +llc_anytimersup(struct llc_linkcb * linkp) +{ + register int i; + + FOR_ALL_LLC_TIMERS(i) + if (linkp->llcl_timers[i] > 0) + break; + if (i == LLC_AGE_SHIFT) + return 0; + else return 1; +} + +/* + * llc_link_dump() - dump link info + */ + +#define SAL(s) ((struct sockaddr_dl *)&(s)->llcl_addr) +#define CHECK(l, s) if (LLC_STATEEQ(l, s)) return #s + +char *timer_names[] = {"ACK", "P", "BUSY", "REJ", "AGE"}; + +char * +llc_getstatename(struct llc_linkcb *linkp) +{ + CHECK(linkp, ADM); + CHECK(linkp, CONN); + CHECK(linkp, RESET_WAIT); + CHECK(linkp, RESET_CHECK); + CHECK(linkp, SETUP); + CHECK(linkp, RESET); + CHECK(linkp, D_CONN); + CHECK(linkp, ERROR); + CHECK(linkp, NORMAL); + CHECK(linkp, BUSY); + CHECK(linkp, REJECT); + CHECK(linkp, AWAIT); + CHECK(linkp, AWAIT_BUSY); + CHECK(linkp, AWAIT_REJECT); + + return "UNKNOWN - eh?"; +} + +void +llc_link_dump(struct llc_linkcb* linkp, const char *message) +{ + register int i; + register char *state; + + /* print interface */ + printf("if %s%d\n", linkp->llcl_if->if_name, linkp->llcl_if->if_unit); + + /* print message */ + printf(">> %s <<\n", message); + + /* print MAC and LSAP */ + printf("llc addr "); + for (i = 0; i < (SAL(linkp)->sdl_alen)-2; i++) + printf("%x:", (char)*(LLADDR(SAL(linkp))+i) & 0xff); + printf("%x,", (char)*(LLADDR(SAL(linkp))+i) & 0xff); + printf("%x\n", (char)*(LLADDR(SAL(linkp))+i+1) & 0xff); + + /* print state we're in and timers */ + printf("state %s, ", llc_getstatename(linkp)); + for (i = LLC_ACK_SHIFT; i < LLC_AGE_SHIFT; i++) + printf("%s-%c %d/", timer_names[i], + (linkp->llcl_timerflags & (1<llcl_timers[i]); + printf("%s-%c %d\n", timer_names[i], (linkp->llcl_timerflags & (1<llcl_timers[i]); + + /* print flag values */ + printf("flags P %d/F %d/S %d/DATA %d/REMOTE_BUSY %d\n", + LLC_GETFLAG(linkp, P), LLC_GETFLAG(linkp, S), + LLC_GETFLAG(linkp, DATA), LLC_GETFLAG(linkp, REMOTE_BUSY)); + + /* print send and receive state variables, ack, and window */ + printf("V(R) %d/V(S) %d/N(R) received %d/window %d/freeslot %d\n", + linkp->llcl_vs, linkp->llcl_vr, linkp->llcl_nr_received, + linkp->llcl_window, linkp->llcl_freeslot); + + /* further expansions can follow here */ + +} + +void +llc_trace(struct llc_linkcb *linkp, int level, const char *message) +{ + if (linkp->llcl_sapinfo->si_trace && level > llc_tracelevel) + llc_link_dump(linkp, message); + + return; +} diff --git a/bsd/netccitt/llc_timer.c b/bsd/netccitt/llc_timer.c new file mode 100644 index 000000000..6014a31ff --- /dev/null +++ b/bsd/netccitt/llc_timer.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Dirk Husemann, Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Dirk Husemann and the Computer Science Department (IV) of + * the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)llc_timer.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + + +/* + * Various timer values. They can be adjusted + * by patching the binary with adb if necessary. + */ +/* ISO 8802-2 timers */ +int llc_n2 = LLC_N2_VALUE; +int llc_ACK_timer = LLC_ACK_TIMER; +int llc_P_timer = LLC_P_TIMER; +int llc_BUSY_timer = LLC_BUSY_TIMER; +int llc_REJ_timer = LLC_REJ_TIMER; +/* Implementation specific timers */ +int llc_AGE_timer = LLC_AGE_TIMER; +int llc_DACTION_timer = LLC_DACTION_TIMER; + +/* + * The timer routine. We are called every 500ms by the kernel. + * Handle the various virtual timers. + */ + +void +llc_timer() +{ + register struct llc_linkcb *linkp; + register struct llc_linkcb *nlinkp; + register int timer; + register int action; + register int s = splimp(); + + /* + * All links are accessible over the doubly linked list llccb_q + */ + if (!LQEMPTY) { + /* + * A for-loop is not that great an idea as the linkp + * might get deleted if the age timer has expired ... + */ + linkp = LQFIRST; + while (LQVALID(linkp)) { + nlinkp = LQNEXT(linkp); + /* + * Check implementation specific timers first + */ + /* The delayed action/acknowledge idle timer */ + switch (LLC_TIMERXPIRED(linkp, DACTION)) { + case LLC_TIMER_RUNNING: + LLC_AGETIMER(linkp, DACTION); + break; + case LLC_TIMER_EXPIRED: { + register int cmdrsp; + register int pollfinal; + + switch (LLC_GETFLAG(linkp, DACTION)) { + case LLC_DACKCMD: + cmdrsp = LLC_CMD, pollfinal = 0; + break; + case LLC_DACKCMDPOLL: + cmdrsp = LLC_CMD, pollfinal = 1; + break; + case LLC_DACKRSP: + cmdrsp = LLC_RSP, pollfinal = 0; + break; + case LLC_DACKRSPFINAL: + cmdrsp = LLC_RSP, pollfinal = 1; + break; + } + llc_send(linkp, LLCFT_RR, cmdrsp, pollfinal); + LLC_STOPTIMER(linkp, DACTION); + break; + } + } + /* The link idle timer */ + switch (LLC_TIMERXPIRED(linkp, AGE)) { + case LLC_TIMER_RUNNING: + LLC_AGETIMER(linkp, AGE); + break; + case LLC_TIMER_EXPIRED: + /* + * Only crunch the link when really no + * timers are running any more. + */ + if (llc_anytimersup(linkp) == 0) { + llc_dellink(linkp); + LLC_STOPTIMER(linkp, AGE); + goto gone; + } else { + LLC_STARTTIMER(linkp, AGE); + } + break; + } + /* + * Now, check all the ISO 8802-2 timers + */ + FOR_ALL_LLC_TIMERS(timer) { + action = 0; + if ((linkp->llcl_timerflags & (1<llcl_timers[timer] == 0)) { + switch (timer) { + case LLC_ACK_SHIFT: + action = LLC_ACK_TIMER_EXPIRED; + break; + case LLC_P_SHIFT: + action = LLC_P_TIMER_EXPIRED; + break; + case LLC_BUSY_SHIFT: + action = LLC_BUSY_TIMER_EXPIRED; + break; + case LLC_REJ_SHIFT: + action = LLC_REJ_TIMER_EXPIRED; + break; + } + linkp->llcl_timerflags &= ~(1<llcl_timers[timer] > 0) + linkp->llcl_timers[timer]--; + } + +gone: linkp = nlinkp; + } + } + splx (s); +} diff --git a/bsd/netccitt/llc_var.h b/bsd/netccitt/llc_var.h new file mode 100644 index 000000000..0bb1d3948 --- /dev/null +++ b/bsd/netccitt/llc_var.h @@ -0,0 +1,680 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Dirk Husemann, Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Dirk Husemann and the Computer Science Department (IV) of + * the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)llc_var.h 8.1 (Berkeley) 6/10/93 + */ + +#ifdef __STDC__ +/* + * Forward structure declarations for function prototypes [sic]. + */ +struct llc; +#endif + +#define NPAIDB_LINK 0 + +struct npaidbentry { + union { + /* MAC,DLSAP -> CONS */ + struct { + struct llc_linkcb *NE_link; + struct rtentry *NE_rt; + } NE; + /* SAP info for unconfigured incoming calls */ + struct { + u_short SI_class; +#define LLC_CLASS_I 0x1 +#define LLC_CLASS_II 0x3 +#define LLC_CLASS_III 0x4 /* Future */ +#define LLC_CLASS_IV 0x7 /* Future */ + u_short SI_window; + u_short SI_trace; + u_short SI_xchxid; + void (*SI_input) + __P((struct mbuf *)); + caddr_t (*SI_ctlinput) + __P((int, struct sockaddr *, caddr_t)); + } SI; + } NESIun; +}; +#define np_link NESIun.NE.NE_link +#define np_rt NESIun.NE.NE_rt +#define si_class NESIun.SI.SI_class +#define si_window NESIun.SI.SI_window +#define si_trace NESIun.SI.SI_trace +#define si_xchxid NESIun.SI.SI_xchxid +#define si_input NESIun.SI.SI_input +#define si_ctlinput NESIun.SI.SI_ctlinput + +#define NPDL_SAPNETMASK 0x7e + +/* + * Definitions for accessing bitfields/bitslices inside + * LLC2 headers + */ +struct bitslice { + unsigned int bs_mask; + unsigned int bs_shift; +}; + + +#define i_z 0 +#define i_ns 1 +#define i_pf 0 +#define i_nr 1 +#define s_oz 2 +#define s_selector 3 +#define s_pf 0 +#define s_nr 1 +#define u_bb 2 +#define u_select_other 3 +#define u_pf 4 +#define u_select 5 +#define f_vs 1 +#define f_cr 0 +#define f_vr 1 +#define f_wxyzv 6 + +#define LLCGBITS(Arg, Index) (((Arg) & llc_bitslice[(Index)].bs_mask) >> llc_bitslice[(Index)].bs_shift) +#define LLCSBITS(Arg, Index, Val) (Arg) |= (((Val) << llc_bitslice[(Index)].bs_shift) & llc_bitslice[(Index)].bs_mask) +#define LLCCSBITS(Arg, Index, Val) (Arg) = (((Val) << llc_bitslice[(Index)].bs_shift) & llc_bitslice[(Index)].bs_mask) + +extern struct bitslice llc_bitslice[]; + +#define LLC_CMD 0 +#define LLC_RSP 1 +#define LLC_MAXCMDRSP 2 + +/* + * LLC events --- These events may either be frames received from the + * remote LLC DSAP, request from the network layer user, + * timer events from llc_timer(), or diagnostic events from + * llc_input(). + */ + +/* LLC frame types */ +#define LLCFT_INFO 0 * LLC_MAXCMDRSP +#define LLCFT_RR 1 * LLC_MAXCMDRSP +#define LLCFT_RNR 2 * LLC_MAXCMDRSP +#define LLCFT_REJ 3 * LLC_MAXCMDRSP +#define LLCFT_DM 4 * LLC_MAXCMDRSP +#define LLCFT_SABME 5 * LLC_MAXCMDRSP +#define LLCFT_DISC 6 * LLC_MAXCMDRSP +#define LLCFT_UA 7 * LLC_MAXCMDRSP +#define LLCFT_FRMR 8 * LLC_MAXCMDRSP +#define LLCFT_UI 9 * LLC_MAXCMDRSP +#define LLCFT_XID 10 * LLC_MAXCMDRSP +#define LLCFT_TEST 11 * LLC_MAXCMDRSP + +/* LLC2 timer events */ +#define LLC_ACK_TIMER_EXPIRED 12 * LLC_MAXCMDRSP +#define LLC_P_TIMER_EXPIRED 13 * LLC_MAXCMDRSP +#define LLC_REJ_TIMER_EXPIRED 14 * LLC_MAXCMDRSP +#define LLC_BUSY_TIMER_EXPIRED 15 * LLC_MAXCMDRSP + +/* LLC2 diagnostic events */ +#define LLC_INVALID_NR 16 * LLC_MAXCMDRSP +#define LLC_INVALID_NS 17 * LLC_MAXCMDRSP +#define LLC_BAD_PDU 18 * LLC_MAXCMDRSP +#define LLC_LOCAL_BUSY_DETECTED 19 * LLC_MAXCMDRSP +#define LLC_LOCAL_BUSY_CLEARED 20 * LLC_MAXCMDRSP + +/* Network layer user requests */ +/* + * NL_CONNECT_REQUEST --- The user has requested that a data link connection + * be established with a remote LLC DSAP. + */ +#define NL_CONNECT_REQUEST 21 * LLC_MAXCMDRSP +/* + * NL_CONNECT_RESPONSE --- The user has accepted the data link connection. + */ +#define NL_CONNECT_RESPONSE 22 * LLC_MAXCMDRSP +/* + * NL_RESET_REQUEST --- The user has requested that the data link with the + * remote LLC DSAP be reset. + */ +#define NL_RESET_REQUEST 23 * LLC_MAXCMDRSP +/* + * NL_RESET_RESPONSE --- The user has accepted the reset of the data link + * connection. + */ +#define NL_RESET_RESPONSE 24 * LLC_MAXCMDRSP +/* + * NL_DISCONNECT_REQUEST --- The user has requested that the data link + * connection with remote LLC DSAP be terminated. + */ +#define NL_DISCONNECT_REQUEST 25 * LLC_MAXCMDRSP +/* + * NL_DATA_REQUEST --- The user has requested that a data unit be sent ot the + * remote LLC DSAP. + */ +#define NL_DATA_REQUEST 26 * LLC_MAXCMDRSP +/* + * NL_INITIATE_PF_CYCLE --- The local LLC wants to initiate a P/F cycle. + */ +#define NL_INITIATE_PF_CYCLE 27 * LLC_MAXCMDRSP +/* + * NL_LOCAL_BUSY_DETECTED --- The local entity has encountered a busy condition + */ +#define NL_LOCAL_BUSY_DETECTED 28 * LLC_MAXCMDRSP + +#define LLCFT_NONE 255 + +/* return message from state handlers */ + +/* + * LLC_CONNECT_INDICATION --- Inform the user that a connection has been + * requested by a remote LLC SSAP. + */ +#define LLC_CONNECT_INDICATION 1 +/* + * LLC_CONNECT_CONFIRM --- The connection service component indicates that the + * remote network entity has accepted the connection. + */ +#define LLC_CONNECT_CONFIRM 2 +/* + * LLC_DISCONNECT_INDICATION --- Inform the user that the remote network + * entity has intiated disconnection of the data + * link connection. + */ +#define LLC_DISCONNECT_INDICATION 3 +/* + * LLC_RESET_CONFIRM --- The connection service component indicates that the + * remote network entity has accepted the reset. + */ +#define LLC_RESET_CONFIRM 4 +/* + * LLC_RESET_INDICATION_REMOTE --- The remote network entity or remote peer + * has initiated a reset of the data link + * connection. + */ +#define LLC_RESET_INDICATION_REMOTE 5 +/* + * LLC_RESET_INDICATION_LOCAL --- The local LLC has determined that the data + * link connection is in need of + * reinitialization. + */ +#define LLC_RESET_INDICATION_LOCAL 6 +/* + * LLC_FRMR_RECEIVED --- The local connection service component has received a + * FRMR response PDU. + */ +#define LLC_FRMR_RECEIVED 7 +/* + * LLC_FRMR_SENT --- The local connection component has received an ivalid + * PDU, and has sent a FRMR response PDU. + */ +#define LLC_FRMR_SENT 8 +/* + * LLC_DATA_INDICATION --- The connection service component passes the data + * unit from the received I PDU to the user. + */ +#define LLC_DATA_INDICATION 9 +/* + * LLC_REMOTE_NOT_BUSY --- The remote LLC DSAP is no longer busy. The local + * connection service component will now accept a + * DATA_REQUEST. + */ +#define LLC_REMOTE_NOT_BUSY 10 +/* + * LLC_REMOTE_BUSY --- The remote LLC DSAP is busy. The local connection + * service component will not accept a DATA_REQUEST. + */ +#define LLC_REMOTE_BUSY 11 + +/* Internal return code */ +#define LLC_PASSITON 255 + +#define INFORMATION_CONTROL 0x00 +#define SUPERVISORY_CONTROL 0x02 +#define UNUMBERED_CONTROL 0x03 + +/* + * Other necessary definitions + */ + +#define LLC_MAX_SEQUENCE 128 +#define LLC_MAX_WINDOW 127 +#define LLC_WINDOW_SIZE 7 + +/* + * Don't we love this one? CCITT likes its bits 8=) + */ +#define NLHDRSIZEGUESS 3 + +/* + * LLC control block + */ + +struct llc_linkcb { + struct llccb_q { + struct llccb_q *q_forw; /* admin chain */ + struct llccb_q *q_backw; + } llcl_q; + struct npaidbentry *llcl_sapinfo; /* SAP information */ + struct sockaddr_dl llcl_addr; /* link snpa address */ + struct rtentry *llcl_nlrt; /* layer 3 -> LLC */ + struct rtentry *llcl_llrt; /* LLC -> layer 3 */ + struct ifnet *llcl_if; /* our interface */ + caddr_t llcl_nlnext; /* cb for network layer */ + struct mbuf *llcl_writeqh; /* Write queue head */ + struct mbuf *llcl_writeqt; /* Write queue tail */ + struct mbuf **llcl_output_buffers; + short llcl_timers[6]; /* timer array */ + long llcl_timerflags; /* flags signalling running timers */ + int (*llcl_statehandler) + __P((struct llc_linkcb *, struct llc *, int, int, int)); + int llcl_P_flag; + int llcl_F_flag; + int llcl_S_flag; + int llcl_DATA_flag; + int llcl_REMOTE_BUSY_flag; + int llcl_DACTION_flag; /* delayed action */ + int llcl_retry; + /* + * The following components deal --- in one way or the other --- + * with the LLC2 window. Indicated by either [L] or [W] is the + * domain of the specific component: + * + * [L] The domain is 0--LLC_MAX_WINDOW + * [W] The domain is 0--llcl_window + */ + short llcl_vr; /* next to receive [L] */ + short llcl_vs; /* next to send [L] */ + short llcl_nr_received; /* next frame to b ack'd [L] */ + short llcl_freeslot; /* next free slot [W] */ + short llcl_projvs; /* V(S) associated with freeslot */ + short llcl_slotsfree; /* free slots [W] */ + short llcl_window; /* window size */ + /* + * In llcl_frmrinfo we jot down the last frmr info field, which we + * need to do as we need to be able to resend it in the ERROR state. + */ + struct frmrinfo llcl_frmrinfo; /* last FRMR info field */ +}; +#define llcl_frmr_pdu0 llcl_frmrinfo.rej_pdu_0 +#define llcl_frmr_pdu1 llcl_frmrinfo.rej_pdu_1 +#define llcl_frmr_control llcl_frmrinfo.frmr_control +#define llcl_frmr_control_ext llcl_frmrinfo.frmr_control_ext +#define llcl_frmr_cause llcl_frmrinfo.frmr_cause + +#define LQNEXT(l) (struct llc_linkcb *)((l)->llcl_q.q_forw) +#define LQEMPTY (llccb_q.q_forw == &llccb_q) +#define LQFIRST (struct llc_linkcb *)(llccb_q.q_forw) +#define LQVALID(l) (!((struct llccb_q *)(l) == &llccb_q)) + +#define LLC_ENQUEUE(l, m) if ((l)->llcl_writeqh == NULL) { \ + (l)->llcl_writeqh = (m); \ + (l)->llcl_writeqt = (m); \ + } else { \ + (l)->llcl_writeqt->m_nextpkt = (m); \ + (l)->llcl_writeqt = (m); \ + } + +#define LLC_DEQUEUE(l, m) if ((l)->llcl_writeqh == NULL) \ + (m) = NULL; \ + else { \ + (m) = (l)->llcl_writeqh; \ + (l)->llcl_writeqh = (l)->llcl_writeqh->m_nextpkt; \ + } + +#define LLC_SETFRAME(l, m) { \ + if ((l)->llcl_slotsfree > 0) { \ + (l)->llcl_slotsfree--; \ + (l)->llcl_output_buffers[(l)->llcl_freeslot] = (m); \ + (l)->llcl_freeslot = ((l)->llcl_freeslot+1) % (l)->llcl_window; \ + LLC_INC((l)->llcl_projvs); \ + } \ + } + +/* + * handling of sockaddr_dl's + */ + +#define LLADDRLEN(s) ((s)->sdl_alen + (s)->sdl_nlen) +#define LLSAPADDR(s) ((s)->sdl_data[LLADDRLEN(s)-1] & 0xff) +#define LLSAPLOC(s, if) ((s)->sdl_nlen + (if)->if_addrlen) + +struct sdl_hdr { + struct sockaddr_dl sdlhdr_dst; + struct sockaddr_dl sdlhdr_src; + long sdlhdr_len; +}; + +#define LLC_GETHDR(f,m) { \ + struct mbuf *_m = (struct mbuf *) (m); \ + if (_m) { \ + M_PREPEND(_m, LLC_ISFRAMELEN, M_DONTWAIT); \ + bzero(mtod(_m, caddr_t), LLC_ISFRAMELEN); \ + } else { \ + MGETHDR (_m, M_DONTWAIT, MT_HEADER); \ + if (_m != NULL) { \ + _m->m_pkthdr.len = _m->m_len = LLC_UFRAMELEN; \ + _m->m_next = _m->m_act = NULL; \ + bzero(mtod(_m, caddr_t), LLC_UFRAMELEN); \ + } else return; \ + } \ + (m) = _m; \ + (f) = mtod(m, struct llc *); \ + } + +#define LLC_NEWSTATE(l, LLCstate) (l)->llcl_statehandler = llc_state_##LLCstate +#define LLC_STATEEQ(l, LLCstate) ((l)->llcl_statehandler == llc_state_##LLCstate ? 1 : 0) + +#define LLC_ACK_SHIFT 0 +#define LLC_P_SHIFT 1 +#define LLC_BUSY_SHIFT 2 +#define LLC_REJ_SHIFT 3 +#define LLC_AGE_SHIFT 4 +#define LLC_DACTION_SHIFT 5 + +#define LLC_TIMER_NOTRUNNING 0 +#define LLC_TIMER_RUNNING 1 +#define LLC_TIMER_EXPIRED 2 + +#define LLC_STARTTIMER(l, LLCtimer) { \ + (l)->llcl_timers[LLC_##LLCtimer##_SHIFT] = llc_##LLCtimer##_timer; \ + (l)->llcl_timerflags |= (1<llcl_timers[LLC_##LLCtimer##_SHIFT] = 0; \ + (l)->llcl_timerflags &= ~(1<llcl_timers[LLC_##LLCtimer##_SHIFT] > 0) \ + (l)->llcl_timers[LLC_##LLCtimer##_SHIFT]--; + +#define LLC_TIMERXPIRED(l, LLCtimer) \ + (((l)->llcl_timerflags & (1<llcl_timers[LLC_##LLCtimer##_SHIFT] == 0 ) ? \ + LLC_TIMER_EXPIRED : LLC_TIMER_RUNNING) : LLC_TIMER_NOTRUNNING) + +#define FOR_ALL_LLC_TIMERS(t) for ((t) = LLC_ACK_SHIFT; (t) < LLC_AGE_SHIFT; (t)++) + +#define LLC_SETFLAG(l, LLCflag, v) (l)->llcl_##LLCflag##_flag = (v) +#define LLC_GETFLAG(l, LLCflag) (l)->llcl_##LLCflag##_flag + +#define LLC_RESETCOUNTER(l) { \ + (l)->llcl_vs = (l)->llcl_vr = (l)->llcl_retry = 0; \ + llc_resetwindow((l)); \ + } + +/* + * LLC2 macro definitions + */ + + +#define LLC_START_ACK_TIMER(l) LLC_STARTTIMER((l), ACK) +#define LLC_STOP_ACK_TIMER(l) LLC_STOPTIMER((l), ACK) +#define LLC_START_REJ_TIMER(l) LLC_STARTTIMER((l), REJ) +#define LLC_STOP_REJ_TIMER(l) LLC_STOPTIMER((l), REJ) +#define LLC_START_P_TIMER(l) { \ + LLC_STARTTIMER((l), P); \ + if (LLC_GETFLAG((l), P) == 0) \ + (l)->llcl_retry = 0; \ + LLC_SETFLAG((l), P, 1); \ + } +#define LLC_STOP_P_TIMER(l) { \ + LLC_STOPTIMER((l), P); \ + LLC_SETFLAG((l), P, 0); \ + } +#define LLC_STOP_ALL_TIMERS(l) { \ + LLC_STOPTIMER((l), ACK); \ + LLC_STOPTIMER((l), REJ); \ + LLC_STOPTIMER((l), BUSY); \ + LLC_STOPTIMER((l), P); \ + } + + +#define LLC_INC(i) (i) = ((i)+1) % LLC_MAX_SEQUENCE + +#define LLC_NR_VALID(l, nr) ((l)->llcl_vs < (l)->llcl_nr_received ? \ + (((nr) >= (l)->llcl_nr_received) || \ + ((nr) <= (l)->llcl_vs) ? 1 : 0) : \ + (((nr) <= (l)->llcl_vs) && \ + ((nr) >= (l)->llcl_nr_received) ? 1 : 0)) + +#define LLC_UPDATE_P_FLAG(l, cr, pf) { \ + if ((cr) == LLC_RSP && (pf) == 1) { \ + LLC_SETFLAG((l), P, 0); \ + LLC_STOPTIMER((l), P); \ + } \ + } + +#define LLC_UPDATE_NR_RECEIVED(l, nr) { \ + while ((l)->llcl_nr_received != (nr)) { \ + struct mbuf *_m; \ + register short seq; \ + if ((_m = (l)->llcl_output_buffers[seq = llc_seq2slot((l), (l)->llcl_nr_received)])) \ + m_freem(_m); \ + (l)->llcl_output_buffers[seq] = NULL; \ + LLC_INC((l)->llcl_nr_received); \ + (l)->llcl_slotsfree++; \ + } \ + (l)->llcl_retry = 0; \ + if ((l)->llcl_slotsfree < (l)->llcl_window) { \ + LLC_START_ACK_TIMER(l); \ + } else LLC_STOP_ACK_TIMER(l); \ + LLC_STARTTIMER((l), DACTION); \ + } + +#define LLC_SET_REMOTE_BUSY(l,a) { \ + if (LLC_GETFLAG((l), REMOTE_BUSY) == 0) { \ + LLC_SETFLAG((l), REMOTE_BUSY, 1); \ + LLC_STARTTIMER((l), BUSY); \ + (a) = LLC_REMOTE_BUSY; \ + } else { \ + (a) = 0; \ + } \ + } +#define LLC_CLEAR_REMOTE_BUSY(l,a) { \ + if (LLC_GETFLAG((l), REMOTE_BUSY) == 1) { \ + LLC_SETFLAG((l), REMOTE_BUSY, 1); \ + LLC_STOPTIMER((l), BUSY); \ + if (LLC_STATEEQ((l), NORMAL) || \ + LLC_STATEEQ((l), REJECT) || \ + LLC_STATEEQ((l), BUSY)) \ + llc_resend((l), LLC_CMD, 0); \ + (a) = LLC_REMOTE_NOT_BUSY; \ + } else { \ + (a) = 0; \ + } \ + } + +#define LLC_DACKCMD 0x1 +#define LLC_DACKCMDPOLL 0x2 +#define LLC_DACKRSP 0x3 +#define LLC_DACKRSPFINAL 0x4 + +#define LLC_SENDACKNOWLEDGE(l, cmd, pf) { \ + if ((cmd) == LLC_CMD) { \ + LLC_SETFLAG((l), DACTION, ((pf) == 0 ? LLC_DACKCMD : LLC_DACKCMDPOLL)); \ + } else { \ + LLC_SETFLAG((l), DACTION, ((pf) == 0 ? LLC_DACKRSP : LLC_DACKRSPFINAL)); \ + } \ + } + +#define LLC_FRMR_W (1<<0) +#define LLC_FRMR_X (1<<1) +#define LLC_FRMR_Y (1<<2) +#define LLC_FRMR_Z (1<<3) +#define LLC_FRMR_V (1<<4) + +#define LLC_SETFRMR(l, f, cr, c) { \ + if ((f)->llc_control & 0x3) { \ + (l)->llcl_frmr_pdu0 = (f)->llc_control; \ + (l)->llcl_frmr_pdu1 = 0; \ + } else { \ + (l)->llcl_frmr_pdu0 = (f)->llc_control; \ + (l)->llcl_frmr_pdu1 = (f)->llc_control_ext; \ + } \ + LLCCSBITS((l)->llcl_frmr_control, f_vs, (l)->llcl_vs); \ + LLCCSBITS((l)->llcl_frmr_control_ext, f_cr, (cr)); \ + LLCSBITS((l)->llcl_frmr_control_ext, f_vr, (l)->llcl_vr); \ + LLCCSBITS((l)->llcl_frmr_cause, f_wxyzv, (c)); \ + } + +/* + * LLC tracing levels: + * LLCTR_INTERESTING interesting event, we might care to know about + * it, but then again, we might not ... + * LLCTR_SHOULDKNOW we probably should know about this event + * LLCTR_URGENT something has gone utterly wrong ... + */ +#define LLCTR_INTERESTING 1 +#define LLCTR_SHOULDKNOW 2 +#define LLCTR_URGENT 3 + +#ifdef LLCDEBUG +#define LLC_TRACE(lp, l, msg) llc_trace((lp), (l), (msg)) +#else /* LLCDEBUG */ +#define LLC_TRACE(lp, l, msg) /* NOOP */ +#endif /* LLCDEBUG */ + +#define LLC_N2_VALUE 15 /* up to 15 retries */ +#define LLC_ACK_TIMER 10 /* 5 secs */ +#define LLC_P_TIMER 4 /* 2 secs */ +#define LLC_BUSY_TIMER 12 /* 6 secs */ +#define LLC_REJ_TIMER 12 /* 6 secs */ +#define LLC_AGE_TIMER 40 /* 20 secs */ +#define LLC_DACTION_TIMER 2 /* 1 secs */ + +#if defined (KERNEL) && defined(LLC) +extern int llc_n2; +extern int llc_ACK_timer; +extern int llc_P_timer; +extern int llc_REJ_timer; +extern int llc_BUSY_timer; +extern int llc_AGE_timer; +extern int llc_DACTION_timer; + +extern int af_link_rts_init_done; + +#define USES_AF_LINK_RTS { \ + if (!af_link_rts_init_done) { \ + rn_inithead((void **)&rt_tables[AF_LINK], 32); \ + af_link_rts_init_done++; \ + } \ + } + +struct ifqueue llcintrq; + +extern struct llccb_q llccb_q; +extern char *frame_names[]; + +/* + * Function prototypes + */ +int sdl_cmp __P((struct sockaddr_dl *, struct sockaddr_dl *)); +int sdl_copy __P((struct sockaddr_dl *, struct sockaddr_dl *)); +int sdl_swapaddr __P((struct sockaddr_dl *, struct sockaddr_dl *)); +int sdl_checkaddrif __P((struct ifnet *, struct sockaddr_dl *)); +int sdl_setaddrif __P((struct ifnet *, u_char *, u_char, u_char, + struct sockaddr_dl *)); +int sdl_sethdrif __P((struct ifnet *, u_char *, u_char, u_char *, u_char, u_char, + struct sdl_hdr *)); +struct npaidbentry *llc_setsapinfo __P((struct ifnet *, u_char, u_char, + struct dllconfig *)); +struct npaidbentry *llc_getsapinfo __P((u_char, struct ifnet *)); +struct rtentry *npaidb_enrich __P((short, caddr_t, struct sockaddr_dl *)); +int npaidb_destroy __P((struct rtentry *)); +short llc_seq2slot __P((struct llc_linkcb *, short)); +int llc_state_ADM __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_CONN __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_RESET_WAIT __P((struct llc_linkcb *, struct llc *, + int, int, int)); +int llc_state_RESET_CHECK __P((struct llc_linkcb *, struct llc *, + int, int, int)); +int llc_state_SETUP __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_RESET __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_D_CONN __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_ERROR __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_NBRAcore __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_NORMAL __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_BUSY __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_REJECT __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_AWAIT __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_AWAIT_BUSY __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_state_AWAIT_REJECT __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_statehandler __P((struct llc_linkcb *, struct llc *, int, int, int)); +int llc_init __P((void)); +struct llc_linkcb *llc_newlink __P((struct sockaddr_dl *, struct ifnet *, + struct rtentry *, caddr_t, struct rtentry *)); +int llc_dellink __P((struct llc_linkcb *)); +int llc_anytimersup __P((struct llc_linkcb *)); +char * llc_getstatename __P((struct llc_linkcb *)); +void llc_link_dump __P((struct llc_linkcb *, const char *)); +void llc_trace __P((struct llc_linkcb *, int, const char *)); +void llc_resetwindow __P((struct llc_linkcb *)); +int llc_decode __P((struct llc *, struct llc_linkcb *)); +void llc_timer __P((void)); +void llcintr __P((void)); +int llc_input __P((struct llc_linkcb *, struct mbuf *, u_char)); +caddr_t llc_ctlinput __P((int, struct sockaddr *, caddr_t)); +int llc_output __P((struct llc_linkcb *, struct mbuf *)); +void llc_start __P((struct llc_linkcb *)); +int llc_send __P((struct llc_linkcb *, int, int, int)); +int llc_resend __P((struct llc_linkcb *, int, int)); +int llc_rawsend __P((struct llc_linkcb *, struct mbuf *, struct llc *, int, int, + int, int)); +int cons_rtrequest __P((int, struct rtentry *, struct sockaddr *)); +int x25_llcglue __P((int, struct sockaddr *)); + +#endif + + diff --git a/bsd/netccitt/pk.h b/bsd/netccitt/pk.h new file mode 100644 index 000000000..0a17e3367 --- /dev/null +++ b/bsd/netccitt/pk.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * + * X.25 Packet Level Definitions: + * + */ + +/* Packet type identifier field defintions. */ + +#define X25_CALL 11 +#define X25_CALL_ACCEPTED 15 +#define X25_CLEAR 19 +#define X25_CLEAR_CONFIRM 23 +#define X25_DATA 0 +#define X25_INTERRUPT 35 +#define X25_INTERRUPT_CONFIRM 39 + +#define X25_RR 1 +#define X25_RNR 5 +#define X25_REJECT 9 +#define X25_RESET 27 +#define X25_RESET_CONFIRM 31 +#define X25_DIAGNOSTIC 241 + +#define X25_RESTART 251 +#define X25_RESTART_CONFIRM 255 + +/* Restart cause field definitions. */ + +#define X25_RESTART_DTE_ORIGINATED 0 +#define X25_RESTART_LOCAL_PROCEDURE_ERROR 1 +#define X25_RESTART_NETWORK_CONGESTION 3 +#define X25_RESTART_NETWORK_OPERATIONAL 7 +#define X25_RESTART_DTE_ORIGINATED2 128 + + +/* Miscellaneous definitions. */ + +#define DATA_PACKET_DESIGNATOR 0x01 +#define RR_OR_RNR_PACKET_DESIGNATOR 0x02 +#define RR_PACKET_DESIGNATOR 0x04 + +#define DEFAULT_WINDOW_SIZE 2 +#define MODULUS 8 + +#define ADDRLN 1 +#define MAXADDRLN 15 +#define FACILITIESLN 1 +#define MAXFACILITIESLN 10 +#define MAXUSERDATA 16 +#define MAXCALLINFOLN 1+15+1+10+16 + +#define PACKET_OK 0 +#define IGNORE_PACKET 1 +#define ERROR_PACKET 2 + +typedef char bool; +#define FALSE 0 +#define TRUE 1 + +/* + * X.25 Packet format definitions + * This will eventually have to be rewritten without reference + * to bit fields, to be ansi C compliant and allignment safe. + */ + +typedef u_char octet; + +struct x25_calladdr { + octet addrlens; + octet address_field[MAXADDRLN]; +}; + +struct x25_packet { + octet bits; + octet logical_channel_number; + octet packet_type; + octet packet_data; +}; +#define packet_cause packet_data + +struct data_packet { + octet bits; +}; + +#define FACILITIES_REVERSE_CHARGE 0x1 +#define FACILITIES_THROUGHPUT 0x2 +#define FACILITIES_PACKETSIZE 0x42 +#define FACILITIES_WINDOWSIZE 0x43 + +#define PKHEADERLN 3 + +#define DP(xp) (((struct data_packet *)&(xp) -> packet_type) -> bits) +#define PS(xp) X25GBITS(DP(xp), p_s) +#define PR(xp) X25GBITS(DP(xp), p_r) +#define MBIT(xp) X25GBITS(DP(xp), m_bit) +#define SPR(xp, v) X25SBITS(DP(xp), p_r, (v)) +#define SPS(xp, v) X25SBITS(DP(xp), p_s, (v)) +#define SMBIT(xp, v) X25SBITS(DP(xp), m_bit, (v)) + +#define LCN(xp) (xp -> logical_channel_number + \ + (X25GBITS(xp -> bits, lc_group_number) ? (X25GBITS(xp -> bits, lc_group_number) << 8) : 0)) +#define SET_LCN(xp, lcn) ((xp -> logical_channel_number = lcn), \ + (X25SBITS(xp -> bits, lc_group_number, lcn > 255 ? lcn >> 8 : 0))) + +struct mbuf *pk_template (); + +/* Define X.25 packet level states. */ + +/* Call setup and clearing substates. */ + +#define LISTEN 0 +#define READY 1 +#define RECEIVED_CALL 2 +#define SENT_CALL 3 +#define DATA_TRANSFER 4 +#define RECEIVED_CLEAR 5 +#define SENT_CLEAR 6 + +/* DTE states. */ + +#define DTE_WAITING 7 +#define DTE_RECEIVED_RESTART 8 +#define DTE_SENT_RESTART 9 +#define DTE_READY 0 + +/* Cleaning out ... */ + +#define LCN_ZOMBIE 10 + +#define MAXSTATES 11 + +/* + * The following definitions are used in a switch statement after + * determining the packet type. These values are returned by the + * pk_decode procedure. + */ + +#define CALL 0 * MAXSTATES +#define CALL_ACCEPTED 1 * MAXSTATES +#define CLEAR 2 * MAXSTATES +#define CLEAR_CONF 3 * MAXSTATES +#define DATA 4 * MAXSTATES +#define INTERRUPT 5 * MAXSTATES +#define INTERRUPT_CONF 6 * MAXSTATES +#define RR 7 * MAXSTATES +#define RNR 8 * MAXSTATES +#define RESET 9 * MAXSTATES +#define RESET_CONF 10 * MAXSTATES +#define RESTART 11 * MAXSTATES +#define RESTART_CONF 12 * MAXSTATES +#define REJECT 13 * MAXSTATES +#define DIAG_TYPE 14 * MAXSTATES +#define INVALID_PACKET 15 * MAXSTATES +#define DELETE_PACKET INVALID_PACKET + +/* + * The following definitions are used by the restart procedures + * for noting wether the PLE is supposed to behave as DTE or DCE + * (essentially necessary for operation over LLC2) + */ +#define DTE_DXERESOLVING 0x0001 +#define DTE_PLAYDTE 0x0002 +#define DTE_PLAYDCE 0x0004 +#define DTE_CONNECTPENDING 0x0010 +#define DTE_PRETENDDTE 0x0020 + +#define MAXRESTARTCOLLISIONS 10 diff --git a/bsd/netccitt/pk_acct.c b/bsd/netccitt/pk_acct.c new file mode 100644 index 000000000..c5aa584b3 --- /dev/null +++ b/bsd/netccitt/pk_acct.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_acct.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + + +struct vnode *pkacctp; +/* + * Turn on packet accounting + */ + +pk_accton (path) + char *path; +{ + register struct vnode *vp = NULL; + struct nameidata nd; + struct vnode *oacctp = pkacctp; + struct proc *p = current_proc(); + int error; + + if (path == 0) + goto close; + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, path, p); + if (error = vn_open (&nd, FWRITE, 0644)) + return (error); + vp = nd.ni_vp; + VOP_UNLOCK(vp); + if (vp -> v_type != VREG) { + vrele (vp); + return (EACCES); + } + pkacctp = vp; + if (oacctp) { + close: + error = vn_close (oacctp, FWRITE, p -> p_ucred, p); + } + return (error); +} + +/* + * Write a record on the accounting file. + */ + +pk_acct (lcp) +register struct pklcd *lcp; +{ + register struct vnode *vp; + register struct sockaddr_x25 *sa; + register char *src, *dst; + register int len; + register long etime; + static struct x25acct acbuf; + + if ((vp = pkacctp) == 0) + return; + bzero ((caddr_t)&acbuf, sizeof (acbuf)); + if (lcp -> lcd_ceaddr != 0) + sa = lcp -> lcd_ceaddr; + else if (lcp -> lcd_craddr != 0) { + sa = lcp -> lcd_craddr; + acbuf.x25acct_callin = 1; + } else + return; + + if (sa -> x25_opts.op_flags & X25_REVERSE_CHARGE) + acbuf.x25acct_revcharge = 1; + acbuf.x25acct_stime = lcp -> lcd_stime; + acbuf.x25acct_etime = time.tv_sec - acbuf.x25acct_stime; + acbuf.x25acct_uid = current_proc() -> p_cred -> p_ruid; + acbuf.x25acct_psize = sa -> x25_opts.op_psize; + acbuf.x25acct_net = sa -> x25_net; + /* + * Convert address to bcd + */ + src = sa -> x25_addr; + dst = acbuf.x25acct_addr; + for (len = 0; *src; len++) + if (len & 01) + *dst++ |= *src++ & 0xf; + else + *dst = *src++ << 4; + acbuf.x25acct_addrlen = len; + + bcopy (sa -> x25_udata, acbuf.x25acct_udata, + sizeof (acbuf.x25acct_udata)); + acbuf.x25acct_txcnt = lcp -> lcd_txcnt; + acbuf.x25acct_rxcnt = lcp -> lcd_rxcnt; + + (void) vn_rdwr(UIO_WRITE, vp, (caddr_t)&acbuf, sizeof (acbuf), + (off_t)0, UIO_SYSSPACE, IO_UNIT|IO_APPEND, + current_proc() -> p_ucred, (int *)0, + (struct proc *)0); +} diff --git a/bsd/netccitt/pk_debug.c b/bsd/netccitt/pk_debug.c new file mode 100644 index 000000000..11f09bfd6 --- /dev/null +++ b/bsd/netccitt/pk_debug.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_debug.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +char *pk_state[] = { + "Listen", "Ready", "Received-Call", + "Sent-Call", "Data-Transfer","Received-Clear", + "Sent-Clear", +}; + +char *pk_name[] = { + "Call", "Call-Conf", "Clear", + "Clear-Conf", "Data", "Intr", "Intr-Conf", + "Rr", "Rnr", "Reset", "Reset-Conf", + "Restart", "Restart-Conf", "Reject", "Diagnostic", + "Invalid" +}; + +pk_trace (xcp, m, dir) +struct x25config *xcp; +register struct mbuf *m; +char *dir; +{ + register char *s; + struct x25_packet *xp = mtod(m, struct x25_packet *); + register int i, len = 0, cnt = 0; + + if (xcp -> xc_ptrace == 0) + return; + + i = pk_decode (xp) / MAXSTATES; + for (; m; m = m -> m_next) { + len = len + m -> m_len; + ++cnt; + } + printf ("LCN=%d %s: %s #=%d, len=%d ", + LCN(xp), dir, pk_name[i], cnt, len); + for (s = (char *) xp, i = 0; i < 5; ++i, ++s) + printf ("%x ", (int) * s & 0xff); + printf ("\n"); +} + +mbuf_cache(c, m) +register struct mbuf_cache *c; +struct mbuf *m; +{ + register struct mbuf **mp; + + if (c->mbc_size != c->mbc_oldsize) { + unsigned zero_size, copy_size; + unsigned new_size = c->mbc_size * sizeof(m); + caddr_t cache = (caddr_t)c->mbc_cache; + + if (new_size) { +// c->mbc_cache = (struct mbuf **) +// malloc(new_size, M_MBUF, M_NOWAIT); + MALLOC(c->mbc_cache, struct mbuf **, new_size, M_MBUF, M_NOWAIT); + if (c->mbc_cache == 0) { + c->mbc_cache = (struct mbuf **)cache; + return; + } + c->mbc_num %= c->mbc_size; + } else + c->mbc_cache = 0; + if (c->mbc_size < c->mbc_oldsize) { + register struct mbuf **mplim; + mp = c->mbc_size + (struct mbuf **)cache; + mplim = c->mbc_oldsize + (struct mbuf **)cache; + while (mp < mplim) + m_freem(*mp++); + zero_size = 0; + } else + zero_size = (c->mbc_size - c->mbc_oldsize) * sizeof(m); + copy_size = new_size - zero_size; + c->mbc_oldsize = c->mbc_size; + if (copy_size) + bcopy(cache, (caddr_t)c->mbc_cache, copy_size); + if (cache) + FREE(cache, M_MBUF); + if (zero_size) + bzero(copy_size + (caddr_t)c->mbc_cache, zero_size); + } + if (c->mbc_size == 0) + return; + mp = c->mbc_cache + c->mbc_num; + c->mbc_num = (1 + c->mbc_num) % c->mbc_size; + if (*mp) + m_freem(*mp); + if (*mp = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) + (*mp)->m_flags |= m->m_flags & 0x08; +} diff --git a/bsd/netccitt/pk_input.c b/bsd/netccitt/pk_input.c new file mode 100644 index 000000000..4cc5f27dd --- /dev/null +++ b/bsd/netccitt/pk_input.c @@ -0,0 +1,1145 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (C) Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1992 + * Copyright (c) 1991, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_input.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +struct pkcb_q pkcb_q = {&pkcb_q, &pkcb_q}; + +/* + * ccittintr() is the generic interrupt handler for HDLC, LLC2, and X.25. This + * allows to have kernel running X.25 but no HDLC or LLC2 or both (in case we + * employ boards that do all the stuff themselves, e.g. ADAX X.25 or TPS ISDN.) + */ +void +ccittintr () +{ + extern struct ifqueue pkintrq; + extern struct ifqueue hdintrq; + extern struct ifqueue llcintrq; + +#if HDLC + if (hdintrq.ifq_len) + hdintr (); +#endif +#if LLC + if (llcintrq.ifq_len) + llcintr (); +#endif + if (pkintrq.ifq_len) + pkintr (); +} + +struct pkcb * +pk_newlink (ia, llnext) +struct x25_ifaddr *ia; +caddr_t llnext; +{ + register struct x25config *xcp = &ia -> ia_xc; + register struct pkcb *pkp; + register struct pklcd *lcp; + register struct protosw *pp; + unsigned size; + + pp = pffindproto (AF_CCITT, (int) xcp -> xc_lproto, 0); + if (pp == 0 || pp -> pr_output == 0) { + pk_message (0, xcp, "link level protosw error"); + return ((struct pkcb *)0); + } + /* + * Allocate a network control block structure + */ + size = sizeof (struct pkcb); +// pkp = (struct pkcb *) malloc (size, M_PCB, M_WAITOK); + MALLOC(pkp, struct pkcb *, size, M_PCB, M_WAITOK); + if (pkp == 0) + return ((struct pkcb *)0); + bzero ((caddr_t) pkp, size); + pkp -> pk_lloutput = pp -> pr_output; + pkp -> pk_llctlinput = (caddr_t (*)()) pp -> pr_ctlinput; + pkp -> pk_xcp = xcp; + pkp -> pk_ia = ia; + pkp -> pk_state = DTE_WAITING; + pkp -> pk_llnext = llnext; + insque (pkp, &pkcb_q); + + /* + * set defaults + */ + + if (xcp -> xc_pwsize == 0) + xcp -> xc_pwsize = DEFAULT_WINDOW_SIZE; + if (xcp -> xc_psize == 0) + xcp -> xc_psize = X25_PS128; + /* + * Allocate logical channel descriptor vector + */ + + (void) pk_resize (pkp); + return (pkp); +} + + +pk_dellink (pkp) +register struct pkcb *pkp; +{ + register int i; + register struct protosw *pp; + + /* + * Essentially we have the choice to + * (a) go ahead and let the route be deleted and + * leave the pkcb associated with that route + * as it is, i.e. the connections stay open + * (b) do a pk_disconnect() on all channels associated + * with the route via the pkcb and then proceed. + * + * For the time being we stick with (b) + */ + + for (i = 1; i < pkp -> pk_maxlcn; ++i) + if (pkp -> pk_chan[i]) + pk_disconnect (pkp -> pk_chan[i]); + + /* + * Free the pkcb + */ + + /* + * First find the protoswitch to get hold of the link level + * protocol to be notified that the packet level entity is + * dissolving ... + */ + pp = pffindproto (AF_CCITT, (int) pkp -> pk_xcp -> xc_lproto, 0); + if (pp == 0 || pp -> pr_output == 0) { + pk_message (0, pkp -> pk_xcp, "link level protosw error"); + return (EPROTONOSUPPORT); + } + + pkp -> pk_refcount--; + if (!pkp -> pk_refcount) { + struct dll_ctlinfo ctlinfo; + + remque (pkp); + if (pkp -> pk_rt -> rt_llinfo == (caddr_t) pkp) + pkp -> pk_rt -> rt_llinfo = (caddr_t) NULL; + + /* + * Tell the link level that the pkcb is dissolving + */ + if (pp -> pr_ctlinput && pkp -> pk_llnext) { + ctlinfo.dlcti_pcb = pkp -> pk_llnext; + ctlinfo.dlcti_rt = pkp -> pk_rt; + (pp -> pr_ctlinput)(PRC_DISCONNECT_REQUEST, + pkp -> pk_xcp, &ctlinfo); + } + FREE((caddr_t) pkp -> pk_chan, M_IFADDR); + FREE((caddr_t) pkp, M_PCB); + } + + return (0); +} + + +pk_resize (pkp) +register struct pkcb *pkp; +{ + struct pklcd *dev_lcp = 0; + struct x25config *xcp = pkp -> pk_xcp; + if (pkp -> pk_chan && + (pkp -> pk_maxlcn != xcp -> xc_maxlcn)) { + pk_restart (pkp, X25_RESTART_NETWORK_CONGESTION); + dev_lcp = pkp -> pk_chan[0]; + FREE((caddr_t) pkp -> pk_chan, M_IFADDR); + pkp -> pk_chan = 0; + } + if (pkp -> pk_chan == 0) { + unsigned size; + pkp -> pk_maxlcn = xcp -> xc_maxlcn; + size = (pkp -> pk_maxlcn + 1) * sizeof (struct pklcd *); +// pkp -> pk_chan = +// (struct pklcd **) malloc (size, M_IFADDR, M_WAITOK); + MALLOC(pkp->pk_chan, struct pklcd **, size, M_IFADDR, M_WAITOK); + if (pkp -> pk_chan) { + bzero ((caddr_t) pkp -> pk_chan, size); + /* + * Allocate a logical channel descriptor for lcn 0 + */ + if (dev_lcp == 0 && + (dev_lcp = pk_attach ((struct socket *)0)) == 0) + return (ENOBUFS); + dev_lcp -> lcd_state = READY; + dev_lcp -> lcd_pkp = pkp; + pkp -> pk_chan[0] = dev_lcp; + } else { + if (dev_lcp) + pk_close (dev_lcp); + return (ENOBUFS); + } + } + return 0; +} + +/* + * This procedure is called by the link level whenever the link + * becomes operational, is reset, or when the link goes down. + */ +/*VARARGS*/ +caddr_t +pk_ctlinput (code, src, addr) + int code; + struct sockaddr *src; + caddr_t addr; +{ + register struct pkcb *pkp = (struct pkcb *) addr; + + switch (code) { + case PRC_LINKUP: + if (pkp -> pk_state == DTE_WAITING) + pk_restart (pkp, X25_RESTART_NETWORK_CONGESTION); + break; + + case PRC_LINKDOWN: + pk_restart (pkp, -1); /* Clear all active circuits */ + pkp -> pk_state = DTE_WAITING; + break; + + case PRC_LINKRESET: + pk_restart (pkp, X25_RESTART_NETWORK_CONGESTION); + break; + + case PRC_CONNECT_INDICATION: { + struct rtentry *llrt; + + if ((llrt = rtalloc1(src, 0)) == 0) + return 0; + else llrt -> rt_refcnt--; + + pkp = (((struct npaidbentry *) llrt -> rt_llinfo) -> np_rt) ? + (struct pkcb *)(((struct npaidbentry *) llrt -> rt_llinfo) -> np_rt -> rt_llinfo) : (struct pkcb *) 0; + if (pkp == (struct pkcb *) 0) + return 0; + pkp -> pk_llnext = addr; + + return ((caddr_t) pkp); + } + case PRC_DISCONNECT_INDICATION: + pk_restart (pkp, -1) ; /* Clear all active circuits */ + pkp -> pk_state = DTE_WAITING; + pkp -> pk_llnext = (caddr_t) 0; + } + return (0); +} +struct ifqueue pkintrq; +/* + * This routine is called if there are semi-smart devices that do HDLC + * in hardware and want to queue the packet and call level 3 directly + */ +pkintr () +{ + register struct mbuf *m; + register struct ifaddr *ifa; + register struct ifnet *ifp; + register int s; + + for (;;) { + s = splimp (); + IF_DEQUEUE (&pkintrq, m); + splx (s); + if (m == 0) + break; + if (m -> m_len < PKHEADERLN) { + printf ("pkintr: packet too short (len=%d)\n", + m -> m_len); + m_freem (m); + continue; + } + pk_input (m); + } +} +struct mbuf *pk_bad_packet; +struct mbuf_cache pk_input_cache = {0 }; +/* + * X.25 PACKET INPUT + * + * This procedure is called by a link level procedure whenever + * an information frame is received. It decodes the packet and + * demultiplexes based on the logical channel number. + * + * We change the original conventions of the UBC code here -- + * since there may be multiple pkcb's for a given interface + * of type 802.2 class 2, we retrieve which one it is from + * m_pkthdr.rcvif (which has been overwritten by lower layers); + * That field is then restored for the benefit of upper layers which + * may make use of it, such as CLNP. + * + */ + +#define RESTART_DTE_ORIGINATED(xp) (((xp) -> packet_cause == X25_RESTART_DTE_ORIGINATED) || \ + ((xp) -> packet_cause >= X25_RESTART_DTE_ORIGINATED2)) + +pk_input (m) +register struct mbuf *m; +{ + register struct x25_packet *xp; + register struct pklcd *lcp; + register struct socket *so = 0; + register struct pkcb *pkp; + int ptype, lcn, lcdstate = LISTEN; + + if (pk_input_cache.mbc_size || pk_input_cache.mbc_oldsize) + mbuf_cache (&pk_input_cache, m); + if ((m -> m_flags & M_PKTHDR) == 0) + panic ("pkintr"); + + if ((pkp = (struct pkcb *) m -> m_pkthdr.rcvif) == 0) + return; + xp = mtod (m, struct x25_packet *); + ptype = pk_decode (xp); + lcn = LCN(xp); + lcp = pkp -> pk_chan[lcn]; + + /* + * If the DTE is in Restart state, then it will ignore data, + * interrupt, call setup and clearing, flow control and reset + * packets. + */ + if (lcn < 0 || lcn > pkp -> pk_maxlcn) { + pk_message (lcn, pkp -> pk_xcp, "illegal lcn"); + m_freem (m); + return; + } + + pk_trace (pkp -> pk_xcp, m, "P-In"); + + if (pkp -> pk_state != DTE_READY && ptype != RESTART && ptype != RESTART_CONF) { + m_freem (m); + return; + } + if (lcp) { + so = lcp -> lcd_so; + lcdstate = lcp -> lcd_state; + } else { + if (ptype == CLEAR) { /* idle line probe (Datapac specific) */ + /* send response on lcd 0's output queue */ + lcp = pkp -> pk_chan[0]; + lcp -> lcd_template = pk_template (lcn, X25_CLEAR_CONFIRM); + pk_output (lcp); + m_freem (m); + return; + } + if (ptype != CALL) + ptype = INVALID_PACKET; + } + + if (lcn == 0 && ptype != RESTART && ptype != RESTART_CONF) { + pk_message (0, pkp -> pk_xcp, "illegal ptype (%d, %s) on lcn 0", + ptype, pk_name[ptype / MAXSTATES]); + if (pk_bad_packet) + m_freem (pk_bad_packet); + pk_bad_packet = m; + return; + } + + m -> m_pkthdr.rcvif = pkp -> pk_ia -> ia_ifp; + + switch (ptype + lcdstate) { + /* + * Incoming Call packet received. + */ + case CALL + LISTEN: + pk_incoming_call (pkp, m); + break; + + /* + * Call collision: Just throw this "incoming call" away since + * the DCE will ignore it anyway. + */ + case CALL + SENT_CALL: + pk_message ((int) lcn, pkp -> pk_xcp, + "incoming call collision"); + break; + + /* + * Call confirmation packet received. This usually means our + * previous connect request is now complete. + */ + case CALL_ACCEPTED + SENT_CALL: + MCHTYPE(m, MT_CONTROL); + pk_call_accepted (lcp, m); + break; + + /* + * This condition can only happen if the previous state was + * SENT_CALL. Just ignore the packet, eventually a clear + * confirmation should arrive. + */ + case CALL_ACCEPTED + SENT_CLEAR: + break; + + /* + * Clear packet received. This requires a complete tear down + * of the virtual circuit. Free buffers and control blocks. + * and send a clear confirmation. + */ + case CLEAR + READY: + case CLEAR + RECEIVED_CALL: + case CLEAR + SENT_CALL: + case CLEAR + DATA_TRANSFER: + lcp -> lcd_state = RECEIVED_CLEAR; + lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_CLEAR_CONFIRM); + pk_output (lcp); + pk_clearcause (pkp, xp); + if (lcp -> lcd_upper) { + MCHTYPE(m, MT_CONTROL); + lcp -> lcd_upper (lcp, m); + } + pk_close (lcp); + lcp = 0; + break; + + /* + * Clear collision: Treat this clear packet as a confirmation. + */ + case CLEAR + SENT_CLEAR: + pk_close (lcp); + break; + + /* + * Clear confirmation received. This usually means the virtual + * circuit is now completely removed. + */ + case CLEAR_CONF + SENT_CLEAR: + pk_close (lcp); + break; + + /* + * A clear confirmation on an unassigned logical channel - just + * ignore it. Note: All other packets on an unassigned channel + * results in a clear. + */ + case CLEAR_CONF + READY: + case CLEAR_CONF + LISTEN: + break; + + /* + * Data packet received. Pass on to next level. Move the Q and M + * bits into the data portion for the next level. + */ + case DATA + DATA_TRANSFER: + if (lcp -> lcd_reset_condition) { + ptype = DELETE_PACKET; + break; + } + + /* + * Process the P(S) flow control information in this Data packet. + * Check that the packets arrive in the correct sequence and that + * they are within the "lcd_input_window". Input window rotation is + * initiated by the receive interface. + */ + + if (PS(xp) != ((lcp -> lcd_rsn + 1) % MODULUS) || + PS(xp) == ((lcp -> lcd_input_window + lcp -> lcd_windowsize) % MODULUS)) { + m_freem (m); + pk_procerror (RESET, lcp, "p(s) flow control error", 1); + break; + } + lcp -> lcd_rsn = PS(xp); + + if (pk_ack (lcp, PR(xp)) != PACKET_OK) { + m_freem (m); + break; + } + m -> m_data += PKHEADERLN; + m -> m_len -= PKHEADERLN; + m -> m_pkthdr.len -= PKHEADERLN; + + lcp -> lcd_rxcnt++; + if (lcp -> lcd_flags & X25_MBS_HOLD) { + register struct mbuf *n = lcp -> lcd_cps; + int mbit = MBIT(xp); + octet q_and_d_bits; + + if (n) { + n -> m_pkthdr.len += m -> m_pkthdr.len; + while (n -> m_next) + n = n -> m_next; + n -> m_next = m; + m = lcp -> lcd_cps; + + if (lcp -> lcd_cpsmax && + n -> m_pkthdr.len > lcp -> lcd_cpsmax) { + pk_procerror (RESET, lcp, + "C.P.S. overflow", 128); + return; + } + q_and_d_bits = 0xc0 & *(octet *) xp; + xp = (struct x25_packet *) + (mtod (m, octet *) - PKHEADERLN); + *(octet *) xp |= q_and_d_bits; + } + if (mbit) { + lcp -> lcd_cps = m; + pk_flowcontrol (lcp, 0, 1); + return; + } + lcp -> lcd_cps = 0; + } + if (so == 0) + break; + if (lcp -> lcd_flags & X25_MQBIT) { + octet t = (X25GBITS(xp -> bits, q_bit)) ? t = 0x80 : 0; + + if (MBIT(xp)) + t |= 0x40; + m -> m_data -= 1; + m -> m_len += 1; + m -> m_pkthdr.len += 1; + *mtod (m, octet *) = t; + } + + /* + * Discard Q-BIT packets if the application + * doesn't want to be informed of M and Q bit status + */ + if (X25GBITS(xp -> bits, q_bit) + && (lcp -> lcd_flags & X25_MQBIT) == 0) { + m_freem (m); + /* + * NB. This is dangerous: sending a RR here can + * cause sequence number errors if a previous data + * packet has not yet been passed up to the application + * (RR's are normally generated via PRU_RCVD). + */ + pk_flowcontrol (lcp, 0, 1); + } else { + sbappendrecord (&so -> so_rcv, m); + sorwakeup (so); + } + break; + + /* + * Interrupt packet received. + */ + case INTERRUPT + DATA_TRANSFER: + if (lcp -> lcd_reset_condition) + break; + lcp -> lcd_intrdata = xp -> packet_data; + lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_INTERRUPT_CONFIRM); + pk_output (lcp); + m -> m_data += PKHEADERLN; + m -> m_len -= PKHEADERLN; + m -> m_pkthdr.len -= PKHEADERLN; + MCHTYPE(m, MT_OOBDATA); + if (so) { + if (so -> so_options & SO_OOBINLINE) + sbinsertoob (&so -> so_rcv, m); + else + m_freem (m); + sohasoutofband (so); + } + break; + + /* + * Interrupt confirmation packet received. + */ + case INTERRUPT_CONF + DATA_TRANSFER: + if (lcp -> lcd_reset_condition) + break; + if (lcp -> lcd_intrconf_pending == TRUE) + lcp -> lcd_intrconf_pending = FALSE; + else + pk_procerror (RESET, lcp, "unexpected packet", 43); + break; + + /* + * Receiver ready received. Rotate the output window and output + * any data packets waiting transmission. + */ + case RR + DATA_TRANSFER: + if (lcp -> lcd_reset_condition || + pk_ack (lcp, PR(xp)) != PACKET_OK) { + ptype = DELETE_PACKET; + break; + } + if (lcp -> lcd_rnr_condition == TRUE) + lcp -> lcd_rnr_condition = FALSE; + pk_output (lcp); + break; + + /* + * Receiver Not Ready received. Packets up to the P(R) can be + * be sent. Condition is cleared with a RR. + */ + case RNR + DATA_TRANSFER: + if (lcp -> lcd_reset_condition || + pk_ack (lcp, PR(xp)) != PACKET_OK) { + ptype = DELETE_PACKET; + break; + } + lcp -> lcd_rnr_condition = TRUE; + break; + + /* + * Reset packet received. Set state to FLOW_OPEN. The Input and + * Output window edges ar set to zero. Both the send and receive + * numbers are reset. A confirmation is returned. + */ + case RESET + DATA_TRANSFER: + if (lcp -> lcd_reset_condition) + /* Reset collision. Just ignore packet. */ + break; + + pk_resetcause (pkp, xp); + lcp -> lcd_window_condition = lcp -> lcd_rnr_condition = + lcp -> lcd_intrconf_pending = FALSE; + lcp -> lcd_output_window = lcp -> lcd_input_window = + lcp -> lcd_last_transmitted_pr = 0; + lcp -> lcd_ssn = 0; + lcp -> lcd_rsn = MODULUS - 1; + + lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_RESET_CONFIRM); + pk_output (lcp); + + pk_flush (lcp); + if (so == 0) + break; + wakeup ((caddr_t) & so -> so_timeo); + sorwakeup (so); + sowwakeup (so); + break; + + /* + * Reset confirmation received. + */ + case RESET_CONF + DATA_TRANSFER: + if (lcp -> lcd_reset_condition) { + lcp -> lcd_reset_condition = FALSE; + pk_output (lcp); + } + else + pk_procerror (RESET, lcp, "unexpected packet", 32); + break; + + case DATA + SENT_CLEAR: + ptype = DELETE_PACKET; + case RR + SENT_CLEAR: + case RNR + SENT_CLEAR: + case INTERRUPT + SENT_CLEAR: + case INTERRUPT_CONF + SENT_CLEAR: + case RESET + SENT_CLEAR: + case RESET_CONF + SENT_CLEAR: + /* Just ignore p if we have sent a CLEAR already. + */ + break; + + /* + * Restart sets all the permanent virtual circuits to the "Data + * Transfer" stae and all the switched virtual circuits to the + * "Ready" state. + */ + case RESTART + READY: + switch (pkp -> pk_state) { + case DTE_SENT_RESTART: + /* + * Restart collision. + * If case the restart cause is "DTE originated" we + * have a DTE-DTE situation and are trying to resolve + * who is going to play DTE/DCE [ISO 8208:4.2-4.5] + */ + if (RESTART_DTE_ORIGINATED(xp)) { + pk_restart (pkp, X25_RESTART_DTE_ORIGINATED); + pk_message (0, pkp -> pk_xcp, + "RESTART collision"); + if ((pkp -> pk_restartcolls++) > MAXRESTARTCOLLISIONS) { + pk_message (0, pkp -> pk_xcp, + "excessive RESTART collisions"); + pkp -> pk_restartcolls = 0; + } + break; + } + pkp -> pk_state = DTE_READY; + pkp -> pk_dxerole |= DTE_PLAYDTE; + pkp -> pk_dxerole &= ~DTE_PLAYDCE; + pk_message (0, pkp -> pk_xcp, + "Packet level operational"); + pk_message (0, pkp -> pk_xcp, + "Assuming DTE role"); + if (pkp -> pk_dxerole & DTE_CONNECTPENDING) + pk_callcomplete (pkp); + break; + + default: + pk_restart (pkp, -1); + pk_restartcause (pkp, xp); + pkp -> pk_chan[0] -> lcd_template = pk_template (0, + X25_RESTART_CONFIRM); + pk_output (pkp -> pk_chan[0]); + pkp -> pk_state = DTE_READY; + pkp -> pk_dxerole |= RESTART_DTE_ORIGINATED(xp) ? DTE_PLAYDCE : + DTE_PLAYDTE; + if (pkp -> pk_dxerole & DTE_PLAYDTE) { + pkp -> pk_dxerole &= ~DTE_PLAYDCE; + pk_message (0, pkp -> pk_xcp, + "Assuming DTE role"); + } else { + pkp -> pk_dxerole &= ~DTE_PLAYDTE; + pk_message (0, pkp -> pk_xcp, + "Assuming DCE role"); + } + if (pkp -> pk_dxerole & DTE_CONNECTPENDING) + pk_callcomplete (pkp); + } + break; + + /* + * Restart confirmation received. All logical channels are set + * to READY. + */ + case RESTART_CONF + READY: + switch (pkp -> pk_state) { + case DTE_SENT_RESTART: + pkp -> pk_state = DTE_READY; + pkp -> pk_dxerole |= DTE_PLAYDTE; + pkp -> pk_dxerole &= ~DTE_PLAYDCE; + pk_message (0, pkp -> pk_xcp, + "Packet level operational"); + pk_message (0, pkp -> pk_xcp, + "Assuming DTE role"); + if (pkp -> pk_dxerole & DTE_CONNECTPENDING) + pk_callcomplete (pkp); + break; + + default: + /* Restart local procedure error. */ + pk_restart (pkp, X25_RESTART_LOCAL_PROCEDURE_ERROR); + pkp -> pk_state = DTE_SENT_RESTART; + pkp -> pk_dxerole &= ~(DTE_PLAYDTE | DTE_PLAYDCE); + } + break; + + default: + if (lcp) { + pk_procerror (CLEAR, lcp, "unknown packet error", 33); + pk_message (lcn, pkp -> pk_xcp, + "\"%s\" unexpected in \"%s\" state", + pk_name[ptype/MAXSTATES], pk_state[lcdstate]); + } else + pk_message (lcn, pkp -> pk_xcp, + "packet arrived on unassigned lcn"); + break; + } + if (so == 0 && lcp && lcp -> lcd_upper && lcdstate == DATA_TRANSFER) { + if (ptype != DATA && ptype != INTERRUPT) + MCHTYPE(m, MT_CONTROL); + lcp -> lcd_upper (lcp, m); + } else if (ptype != DATA && ptype != INTERRUPT) + m_freem (m); +} + +static +prune_dnic (from, to, dnicname, xcp) +char *from, *to, *dnicname; +register struct x25config *xcp; +{ + register char *cp1 = from, *cp2 = from; + if (xcp -> xc_prepnd0 && *cp1 == '0') { + from = ++cp1; + goto copyrest; + } + if (xcp -> xc_nodnic) { + for (cp1 = dnicname; *cp2 = *cp1++;) + cp2++; + cp1 = from; + } +copyrest: + for (cp1 = dnicname; *cp2 = *cp1++;) + cp2++; +} +/* static */ +pk_simple_bsd (from, to, lower, len) +register octet *from, *to; +register len, lower; +{ + register int c; + while (--len >= 0) { + c = *from; + if (lower & 0x01) + *from++; + else + c >>= 4; + c &= 0x0f; c |= 0x30; *to++ = c; lower++; + } + *to = 0; +} + +/*static octet * */ +pk_from_bcd (a, iscalling, sa, xcp) +register struct x25_calladdr *a; +int iscalling; +register struct sockaddr_x25 *sa; +register struct x25config *xcp; +{ + octet buf[MAXADDRLN+1]; + octet *cp; + unsigned count; + + bzero ((caddr_t) sa, sizeof (*sa)); + sa -> x25_len = sizeof (*sa); + sa -> x25_family = AF_CCITT; + if (iscalling) { + cp = a -> address_field + (X25GBITS(a -> addrlens, called_addrlen) / 2); + count = X25GBITS(a -> addrlens, calling_addrlen); + pk_simple_bsd (cp, buf, X25GBITS(a -> addrlens, called_addrlen), count); + } else { + count = X25GBITS(a -> addrlens, called_addrlen); + pk_simple_bsd (a -> address_field, buf, 0, count); + } + if (xcp -> xc_addr.x25_net && (xcp -> xc_nodnic || xcp -> xc_prepnd0)) { + octet dnicname[sizeof (long) * NBBY/3 + 2]; + + sprintf ((char *) dnicname, "%d", xcp -> xc_addr.x25_net); + prune_dnic ((char *) buf, sa -> x25_addr, dnicname, xcp); + } else + bcopy ((caddr_t) buf, (caddr_t) sa -> x25_addr, count + 1); +} + +static +save_extra (m0, fp, so) +struct mbuf *m0; +octet *fp; +struct socket *so; +{ + register struct mbuf *m; + struct cmsghdr cmsghdr; + if (m = m_copy (m, 0, (int)M_COPYALL)) { + int off = fp - mtod (m0, octet *); + int len = m -> m_pkthdr.len - off + sizeof (cmsghdr); + cmsghdr.cmsg_len = len; + cmsghdr.cmsg_level = AF_CCITT; + cmsghdr.cmsg_type = PK_FACILITIES; + m_adj (m, off); + M_PREPEND (m, sizeof (cmsghdr), M_DONTWAIT); + if (m == 0) + return; + bcopy ((caddr_t)&cmsghdr, mtod (m, caddr_t), sizeof (cmsghdr)); + MCHTYPE(m, MT_CONTROL); + sbappendrecord (&so -> so_rcv, m); + } +} + +/* + * This routine handles incoming call packets. It matches the protocol + * field on the Call User Data field (usually the first four bytes) with + * sockets awaiting connections. + */ + +pk_incoming_call (pkp, m0) +struct mbuf *m0; +struct pkcb *pkp; +{ + register struct pklcd *lcp = 0, *l; + register struct sockaddr_x25 *sa; + register struct x25_calladdr *a; + register struct socket *so = 0; + struct x25_packet *xp = mtod (m0, struct x25_packet *); + struct mbuf *m; + struct x25config *xcp = pkp -> pk_xcp; + int len = m0 -> m_pkthdr.len; + int udlen; + char *errstr = "server unavailable"; + octet *u, *facp; + int lcn = LCN(xp); + + /* First, copy the data from the incoming call packet to a X25 address + descriptor. It is to be regretted that you have + to parse the facilities into a sockaddr to determine + if reverse charging is being requested */ + if ((m = m_get (M_DONTWAIT, MT_SONAME)) == 0) + return; + sa = mtod (m, struct sockaddr_x25 *); + a = (struct x25_calladdr *) &xp -> packet_data; + facp = u = (octet *) (a -> address_field + + ((X25GBITS(a -> addrlens, called_addrlen) + X25GBITS(a -> addrlens, calling_addrlen) + 1) / 2)); + u += *u + 1; + udlen = min (16, ((octet *) xp) + len - u); + if (udlen < 0) + udlen = 0; + pk_from_bcd (a, 1, sa, pkp -> pk_xcp); /* get calling address */ + pk_parse_facilities (facp, sa); + bcopy ((caddr_t) u, sa -> x25_udata, udlen); + sa -> x25_udlen = udlen; + + /* + * Now, loop through the listen sockets looking for a match on the + * PID. That is the first few octets of the user data field. + * This is the closest thing to a port number for X.25 packets. + * It does provide a way of multiplexing services at the user level. + */ + + for (l = pk_listenhead; l; l = l -> lcd_listen) { + struct sockaddr_x25 *sxp = l -> lcd_ceaddr; + + if (bcmp (sxp -> x25_udata, u, sxp -> x25_udlen)) + continue; + if (sxp -> x25_net && + sxp -> x25_net != xcp -> xc_addr.x25_net) + continue; + /* + * don't accept incoming calls with the D-Bit on + * unless the server agrees + */ + if (X25GBITS(xp -> bits, d_bit) && !(sxp -> x25_opts.op_flags & X25_DBIT)) { + errstr = "incoming D-Bit mismatch"; + break; + } + /* + * don't accept incoming collect calls unless + * the server sets the reverse charging option. + */ + if ((sxp -> x25_opts.op_flags & (X25_OLDSOCKADDR|X25_REVERSE_CHARGE)) == 0 && + sa -> x25_opts.op_flags & X25_REVERSE_CHARGE) { + errstr = "incoming collect call refused"; + break; + } + if (l -> lcd_so) { + if (so = sonewconn (l -> lcd_so, SS_ISCONNECTED)) + lcp = (struct pklcd *) so -> so_pcb; + } else + lcp = pk_attach ((struct socket *) 0); + if (lcp == 0) { + /* + * Insufficient space or too many unaccepted + * connections. Just throw the call away. + */ + errstr = "server malfunction"; + break; + } + lcp -> lcd_upper = l -> lcd_upper; + lcp -> lcd_upnext = l -> lcd_upnext; + lcp -> lcd_lcn = lcn; + lcp -> lcd_state = RECEIVED_CALL; + sa -> x25_opts.op_flags |= (sxp -> x25_opts.op_flags & + ~X25_REVERSE_CHARGE) | l -> lcd_flags; + pk_assoc (pkp, lcp, sa); + lcp -> lcd_faddr = *sa; + lcp -> lcd_laddr.x25_udlen = sxp -> x25_udlen; + lcp -> lcd_craddr = &lcp -> lcd_faddr; + lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_CALL_ACCEPTED); + if (lcp -> lcd_flags & X25_DBIT) { + if (X25GBITS(xp -> bits, d_bit)) + X25SBITS(mtod (lcp -> lcd_template, + struct x25_packet *) -> bits, d_bit, 1); + else + lcp -> lcd_flags &= ~X25_DBIT; + } + if (so) { + pk_output (lcp); + soisconnected (so); + if (so -> so_options & SO_OOBINLINE) + save_extra (m0, facp, so); + } else if (lcp -> lcd_upper) { + (*lcp -> lcd_upper) (lcp, m0); + } + (void) m_free (m); + return; + } + + /* + * If the call fails for whatever reason, we still need to build a + * skeleton LCD in order to be able to properly receive the CLEAR + * CONFIRMATION. + */ +#ifdef WATERLOO /* be explicit */ + if (l == 0 && bcmp (sa -> x25_udata, "ean", 3) == 0) + pk_message (lcn, pkp -> pk_xcp, "host=%s ean%c: %s", + sa -> x25_addr, sa -> x25_udata[3] & 0xff, errstr); + else if (l == 0 && bcmp (sa -> x25_udata, "\1\0\0\0", 4) == 0) + pk_message (lcn, pkp -> pk_xcp, "host=%s x29d: %s", + sa -> x25_addr, errstr); + else +#endif + pk_message (lcn, pkp -> pk_xcp, "host=%s pid=%x %x %x %x: %s", + sa -> x25_addr, sa -> x25_udata[0] & 0xff, + sa -> x25_udata[1] & 0xff, sa -> x25_udata[2] & 0xff, + sa -> x25_udata[3] & 0xff, errstr); + if ((lcp = pk_attach ((struct socket *)0)) == 0) { + (void) m_free (m); + return; + } + lcp -> lcd_lcn = lcn; + lcp -> lcd_state = RECEIVED_CALL; + pk_assoc (pkp, lcp, sa); + (void) m_free (m); + pk_clear (lcp, 0, 1); +} + +pk_call_accepted (lcp, m) +struct pklcd *lcp; +struct mbuf *m; +{ + register struct x25_calladdr *ap; + register octet *fcp; + struct x25_packet *xp = mtod (m, struct x25_packet *); + int len = m -> m_len; + + lcp -> lcd_state = DATA_TRANSFER; + if (lcp -> lcd_so) + soisconnected (lcp -> lcd_so); + if ((lcp -> lcd_flags & X25_DBIT) && (X25GBITS(xp -> bits, d_bit) == 0)) + lcp -> lcd_flags &= ~X25_DBIT; + if (len > 3) { + ap = (struct x25_calladdr *) &xp -> packet_data; + fcp = (octet *) ap -> address_field + (X25GBITS(ap -> addrlens, calling_addrlen) + + X25GBITS(ap -> addrlens, called_addrlen) + 1) / 2; + if (fcp + *fcp <= ((octet *) xp) + len) + pk_parse_facilities (fcp, lcp -> lcd_ceaddr); + } + pk_assoc (lcp -> lcd_pkp, lcp, lcp -> lcd_ceaddr); + if (lcp -> lcd_so == 0 && lcp -> lcd_upper) + lcp -> lcd_upper (lcp, m); +} + +pk_parse_facilities (fcp, sa) +register octet *fcp; +register struct sockaddr_x25 *sa; +{ + register octet *maxfcp; + + maxfcp = fcp + *fcp; + fcp++; + while (fcp < maxfcp) { + /* + * Ignore national DCE or DTE facilities + */ + if (*fcp == 0 || *fcp == 0xff) + break; + switch (*fcp) { + case FACILITIES_WINDOWSIZE: + sa -> x25_opts.op_wsize = fcp[1]; + fcp += 3; + break; + + case FACILITIES_PACKETSIZE: + sa -> x25_opts.op_psize = fcp[1]; + fcp += 3; + break; + + case FACILITIES_THROUGHPUT: + sa -> x25_opts.op_speed = fcp[1]; + fcp += 2; + break; + + case FACILITIES_REVERSE_CHARGE: + if (fcp[1] & 01) + sa -> x25_opts.op_flags |= X25_REVERSE_CHARGE; + /* + * Datapac specific: for a X.25(1976) DTE, bit 2 + * indicates a "hi priority" (eg. international) call. + */ + if (fcp[1] & 02 && sa -> x25_opts.op_psize == 0) + sa -> x25_opts.op_psize = X25_PS128; + fcp += 2; + break; + + default: +/*printf("unknown facility %x, class=%d\n", *fcp, (*fcp & 0xc0) >> 6);*/ + switch ((*fcp & 0xc0) >> 6) { + case 0: /* class A */ + fcp += 2; + break; + + case 1: + fcp += 3; + break; + + case 2: + fcp += 4; + break; + + case 3: + fcp++; + fcp += *fcp; + } + } + } +} diff --git a/bsd/netccitt/pk_llcsubr.c b/bsd/netccitt/pk_llcsubr.c new file mode 100644 index 000000000..54fd65e71 --- /dev/null +++ b/bsd/netccitt/pk_llcsubr.c @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Dirk Husemann, Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Dirk Husemann and the Computer Science Department (IV) of + * the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_llcsubr.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +/* + * Routing support for X.25 + * + * We distinguish between two cases: + * RTF_HOST: + * rt_key(rt) X.25 address of host + * rt_gateway SNPA (MAC+DLSAP) address of host + * rt_llinfo pkcb for rt_key(rt) + * + * RTF_GATEWAY + * rt_key(rt) X.25 address of host or suitably masked network + * rt_gateway X.25 address of next X.25 gateway (switch) + * rt_llinfo rtentry for rt_gateway address + * ought to be of type RTF_HOST + * + * + * Mapping of X.121 to pkcbs: + * + * HDLC uses the DTE-DCE model of X.25, therefore we need a many-to-one + * relationship, i.e.: + * + * {X.121_a, X.121_b, X.121_c, ..., X.121_i} -> pkcb_0 + * + * LLC2 utilizes the DTE-DTE model of X.25, resulting effectively in a + * one-to-one relationship, i.e.: + * + * {X.121_j} -> pkcb_1a + * {X.121_k} -> pkcb_1b + * ... + * {X.121_q} -> pkcb_1q + * + * It might make sense to allow a many-to-one relation for LLC2 also, + * + * {X.121_r, X.121_s, X.121_t, X.121_u} -> pkcb_2a + * + * This would make addresses X.121_[r-u] essentially aliases of one + * address ({X.121_[r-u]} would constitute a representative set). + * + * Each one-to-one relation must obviously be entered individually with + * a route add command, whereas a many-to-one relationship can be + * either entered individually or generated by using a netmask. + * + * To facilitate dealings the many-to-one case for LLC2 can only be + * established via a netmask. + * + */ + +#define XTRACTPKP(rt) ((rt)->rt_flags & RTF_GATEWAY ? \ + ((rt)->rt_llinfo ? \ + (struct pkcb *) ((struct rtentry *)((rt)->rt_llinfo))->rt_llinfo : \ + (struct pkcb *) NULL) : \ + (struct pkcb *)((rt)->rt_llinfo)) + +#define equal(a1, a2) (bcmp((caddr_t)(a1), \ + (caddr_t)(a2), \ + (a1)->sa_len) == 0) +#define XIFA(rt) ((struct x25_ifaddr *)((rt)->rt_ifa)) +#define SA(s) ((struct sockaddr *)s) + +int +cons_rtrequest(int cmd, struct rtentry *rt, struct sockaddr *dst) +{ + register struct pkcb *pkp; + register int i; + register char one_to_one; + struct pkcb *pk_newlink(); + struct rtentry *npaidb_enter(); + + pkp = XTRACTPKP(rt); + + switch(cmd) { + case RTM_RESOLVE: + case RTM_ADD: + if (pkp) + return(EEXIST); + + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_llinfo) + RTFREE((struct rtentry *)rt->rt_llinfo); + rt->rt_llinfo = (caddr_t) rtalloc1(rt->rt_gateway, 1); + return(0); + } + /* + * Assumptions: (1) ifnet structure is filled in + * (2) at least the pkcb created via + * x25config (ifconfig?) has been + * set up already. + * (3) HDLC interfaces have an if_type of + * IFT_X25{,DDN}, LLC2 interfaces + * anything else (any better way to + * do this?) + * + */ + if (!rt->rt_ifa) + return (ENETDOWN); + + /* + * We differentiate between dealing with a many-to-one + * (HDLC: DTE-DCE) and a one-to-one (LLC2: DTE-DTE) + * relationship (by looking at the if type). + * + * Only in case of the many-to-one relationship (HDLC) + * we set the ia->ia_pkcb pointer to the pkcb allocated + * via pk_newlink() as we will use just that one pkcb for + * future route additions (the rtentry->rt_llinfo pointer + * points to the pkcb allocated for that route). + * + * In case of the one-to-one relationship (LLC2) we + * create a new pkcb (via pk_newlink()) for each new rtentry. + * + * NOTE: Only in case of HDLC does ia->ia_pkcb point + * to a pkcb, in the LLC2 case it doesn't (as we don't + * need it here)! + */ + one_to_one = ISISO8802(rt->rt_ifp); + + if (!(pkp = XIFA(rt)->ia_pkcb) && !one_to_one) + XIFA(rt)->ia_pkcb = pkp = + pk_newlink(XIFA(rt), (caddr_t) 0); + else if (one_to_one && + !equal(rt->rt_gateway, rt->rt_ifa->ifa_addr)) { + pkp = pk_newlink(XIFA(rt), (caddr_t) 0); + /* + * We also need another route entry for mapping + * MAC+LSAP->X.25 address + */ + pkp->pk_llrt = npaidb_enter(rt->rt_gateway, rt_key(rt), rt, 0); + } + if (pkp) { + if (!pkp->pk_rt) + pkp->pk_rt = rt; + pkp->pk_refcount++; + } + rt->rt_llinfo = (caddr_t) pkp; + + return(0); + + case RTM_DELETE: + { + /* + * The pkp might be empty if we are dealing + * with an interface route entry for LLC2, in this + * case we don't need to do anything ... + */ + if (pkp) { + if ( rt->rt_flags & RTF_GATEWAY ) { + if (rt->rt_llinfo) + RTFREE((struct rtentry *)rt->rt_llinfo); + return(0); + } + + if (pkp->pk_llrt) + npaidb_destroy(pkp->pk_llrt); + + pk_dellink (pkp); + + return(0); + } + } + } +} + +/* + * Network Protocol Addressing Information DataBase (npaidb) + * + * To speed up locating the entity dealing with an LLC packet use is made + * of a routing tree. This npaidb routing tree is handled + * by the normal rn_*() routines just like (almost) any other routing tree. + * + * The mapping being done by the npaidb_*() routines is as follows: + * + * Key: MAC,LSAP (enhancing struct sockaddr_dl) + * Gateway: sockaddr_x25 (i.e. X.25 address - X.121 or NSAP) + * Llinfo: npaidbentry { + * struct llc_linkcb *npaidb_linkp; + * struct rtentry *npaidb_rt; + * } + * + * Using the npaidbentry provided by llinfo we can then access + * + * o the pkcb by using (struct pkcb *) (npaidb_rt->rt_llinfo) + * o the linkcb via npaidb_linkp + * + * The following functions are provided + * + * o npaidb_enter(struct sockaddr_dl *sdl, struct sockaddr_x25 *sx25, + * struct struct llc_linkcb *link, struct rtentry *rt) + * + * o npaidb_enrich(short type, caddr_t info) + * + */ + +struct sockaddr_dl npdl_netmask = { + sizeof(struct sockaddr_dl), /* _len */ + 0, /* _family */ + 0, /* _index */ + 0, /* _type */ + -1, /* _nlen */ + -1, /* _alen */ + -1, /* _slen */ + { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, /* _data */ +}; +struct sockaddr npdl_dummy; + +int npdl_datasize = sizeof(struct sockaddr_dl)- + ((int)((caddr_t)&((struct sockaddr_dl *)0)->sdl_data[0])); + +struct rtentry * +npaidb_enter(struct sockaddr_dl *key, struct sockaddr *value, + struct rtentry *rt, struct llc_linkcb *link) +{ + struct rtentry *nprt; register int i; + + USES_AF_LINK_RTS; + + if ((nprt = rtalloc1(SA(key), 0)) == 0) { + register u_int size = sizeof(struct npaidbentry); + register u_char saploc = LLSAPLOC(key, rt->rt_ifp); + + /* + * set up netmask: LLC2 packets have the lowest bit set in + * response packets (e.g. 0x7e for command packets, 0x7f for + * response packets), to facilitate the lookup we use a netmask + * of 11111110 for the SAP position. The remaining positions + * are zeroed out. + */ + npdl_netmask.sdl_data[saploc] = NPDL_SAPNETMASK; + bzero((caddr_t)&npdl_netmask.sdl_data[saploc+1], + npdl_datasize-saploc-1); + + if (value == 0) + value = &npdl_dummy; + + /* now enter it */ + rtrequest(RTM_ADD, SA(key), SA(value), + SA(&npdl_netmask), 0, &nprt); + + /* and reset npdl_netmask */ + for (i = saploc; i < npdl_datasize; i++) + npdl_netmask.sdl_data[i] = -1; + +// nprt->rt_llinfo = malloc(size , M_PCB, M_WAITOK); + MALLOC(nprt->rt_llinfo, caddr_t, size , M_PCB, M_WAITOK); + if (nprt->rt_llinfo) { + bzero (nprt->rt_llinfo, size); + ((struct npaidbentry *) (nprt->rt_llinfo))->np_rt = rt; + } + } else nprt->rt_refcnt--; + return nprt; +} + +struct rtentry * +npaidb_enrich(short type, caddr_t info, struct sockaddr_dl *sdl) +{ + struct rtentry *rt; + + USES_AF_LINK_RTS; + + if (rt = rtalloc1((struct sockaddr *)sdl, 0)) { + rt->rt_refcnt--; + switch (type) { + case NPAIDB_LINK: + ((struct npaidbentry *)(rt->rt_llinfo))->np_link = + (struct llc_linkcb *) info; + break; + } + return rt; + } + + return ((struct rtentry *) 0); + +} + +npaidb_destroy(struct rtentry *rt) +{ + USES_AF_LINK_RTS; + + if (rt->rt_llinfo) + FREE((caddr_t) rt->rt_llinfo, M_PCB); + return(rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), + 0, 0)); +} + + +#if LLC +/* + * Glue between X.25 and LLC2 + */ +int +x25_llcglue(int prc, struct sockaddr *addr) +{ + register struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)addr; + register struct x25_ifaddr *x25ifa; + struct dll_ctlinfo ctlinfo; + + if((x25ifa = (struct x25_ifaddr *)ifa_ifwithaddr(addr)) == 0) + return 0; + + ctlinfo.dlcti_cfg = + (struct dllconfig *)(((struct sockaddr_x25 *)(&x25ifa->ia_xc))+1); + ctlinfo.dlcti_lsap = LLC_X25_LSAP; + + return ((int)llc_ctlinput(prc, addr, (caddr_t)&ctlinfo)); +} +#endif /* LLC */ diff --git a/bsd/netccitt/pk_output.c b/bsd/netccitt/pk_output.c new file mode 100644 index 000000000..2fa3124fe --- /dev/null +++ b/bsd/netccitt/pk_output.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (C) Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1992 + * Copyright (c) 1991, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_output.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +struct mbuf_cache pk_output_cache = {0 }, pk_input_cache; +struct mbuf *nextpk (); + +pk_output (lcp) +register struct pklcd *lcp; +{ + register struct x25_packet *xp; + register struct mbuf *m; + register struct pkcb *pkp = lcp -> lcd_pkp; + + if (lcp == 0 || pkp == 0) { + printf ("pk_output: zero arg\n"); + return; + } + + while ((m = nextpk (lcp)) != NULL) { + xp = mtod (m, struct x25_packet *); + + switch (pk_decode (xp) + lcp -> lcd_state) { + /* + * All the work is already done - just set the state and + * pass to peer. + */ + case CALL + READY: + lcp -> lcd_state = SENT_CALL; + lcp -> lcd_timer = pk_t21; + break; + + /* + * Just set the state to allow packet to flow and send the + * confirmation. + */ + case CALL_ACCEPTED + RECEIVED_CALL: + lcp -> lcd_state = DATA_TRANSFER; + break; + + /* + * Just set the state. Keep the LCD around till the clear + * confirmation is returned. + */ + case CLEAR + RECEIVED_CALL: + case CLEAR + SENT_CALL: + case CLEAR + DATA_TRANSFER: + lcp -> lcd_state = SENT_CLEAR; + lcp -> lcd_retry = 0; + /* fall through */ + + case CLEAR + SENT_CLEAR: + lcp -> lcd_timer = pk_t23; + lcp -> lcd_retry++; + break; + + case CLEAR_CONF + RECEIVED_CLEAR: + case CLEAR_CONF + SENT_CLEAR: + case CLEAR_CONF + READY: + lcp -> lcd_state = READY; + break; + + case DATA + DATA_TRANSFER: + SPS(xp, lcp -> lcd_ssn); + lcp -> lcd_input_window = + (lcp -> lcd_rsn + 1) % MODULUS; + SPR(xp, lcp -> lcd_input_window); + lcp -> lcd_last_transmitted_pr = lcp -> lcd_input_window; + lcp -> lcd_ssn = (lcp -> lcd_ssn + 1) % MODULUS; + if (lcp -> lcd_ssn == ((lcp -> lcd_output_window + lcp -> lcd_windowsize) % MODULUS)) + lcp -> lcd_window_condition = TRUE; + break; + + case INTERRUPT + DATA_TRANSFER: +#ifdef ancient_history + xp -> packet_data = 0; +#endif + lcp -> lcd_intrconf_pending = TRUE; + break; + + case INTERRUPT_CONF + DATA_TRANSFER: + break; + + case RR + DATA_TRANSFER: + case RNR + DATA_TRANSFER: + lcp -> lcd_input_window = + (lcp -> lcd_rsn + 1) % MODULUS; + SPR(xp, lcp -> lcd_input_window); + lcp -> lcd_last_transmitted_pr = lcp -> lcd_input_window; + break; + + case RESET + DATA_TRANSFER: + lcp -> lcd_reset_condition = TRUE; + break; + + case RESET_CONF + DATA_TRANSFER: + lcp -> lcd_reset_condition = FALSE; + break; + + /* + * A restart should be only generated internally. Therefore + * all logic for restart is in the pk_restart routine. + */ + case RESTART + READY: + lcp -> lcd_timer = pk_t20; + break; + + /* + * Restarts are all handled internally. Therefore all the + * logic for the incoming restart packet is handled in the + * pk_input routine. + */ + case RESTART_CONF + READY: + break; + + default: + m_freem (m); + return; + } + + /* Trace the packet. */ + pk_trace (pkp -> pk_xcp, m, "P-Out"); + + /* Pass the packet on down to the link layer */ + if (pk_input_cache.mbc_size || pk_input_cache.mbc_oldsize) { + m->m_flags |= 0x08; + mbuf_cache(&pk_input_cache, m); + } + (*pkp -> pk_lloutput) (pkp -> pk_llnext, m, pkp -> pk_rt); + } +} + +/* + * This procedure returns the next packet to send or null. A + * packet is composed of one or more mbufs. + */ + +struct mbuf * +nextpk (lcp) +struct pklcd *lcp; +{ + register struct mbuf *m, *n; + struct socket *so = lcp -> lcd_so; + register struct sockbuf *sb = & (so ? so -> so_snd : lcp -> lcd_sb); + + if (lcp -> lcd_template) { + m = lcp -> lcd_template; + lcp -> lcd_template = NULL; + } else { + if (lcp -> lcd_rnr_condition || lcp -> lcd_window_condition || + lcp -> lcd_reset_condition) + return (NULL); + + if ((m = sb -> sb_mb) == 0) + return (NULL); + + sb -> sb_mb = m -> m_nextpkt; + m->m_act = 0; + for (n = m; n; n = n -> m_next) + sbfree (sb, n); + } + return (m); +} diff --git a/bsd/netccitt/pk_subr.c b/bsd/netccitt/pk_subr.c new file mode 100644 index 000000000..cb6cec959 --- /dev/null +++ b/bsd/netccitt/pk_subr.c @@ -0,0 +1,1214 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (C) Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1992 + * Copyright (c) 1991, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_subr.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +int pk_sendspace = 1024 * 2 + 8; +int pk_recvspace = 1024 * 2 + 8; + +struct pklcd_q pklcd_q = {&pklcd_q, &pklcd_q}; + +struct x25bitslice x25_bitslice[] = { +/* mask, shift value */ + { 0xf0, 0x4 }, + { 0xf, 0x0 }, + { 0x80, 0x7 }, + { 0x40, 0x6 }, + { 0x30, 0x4 }, + { 0xe0, 0x5 }, + { 0x10, 0x4 }, + { 0xe, 0x1 }, + { 0x1, 0x0 } +}; + + +/* + * Attach X.25 protocol to socket, allocate logical channel descripter + * and buffer space, and enter LISTEN state if we are to accept + * IN-COMMING CALL packets. + * + */ + +struct pklcd * +pk_attach (so) +struct socket *so; +{ + register struct pklcd *lcp; + register int error = ENOBUFS; + int pk_output (); + + MALLOC(lcp, struct pklcd *, sizeof (*lcp), M_PCB, M_NOWAIT); + if (lcp) { + bzero ((caddr_t)lcp, sizeof (*lcp)); + insque (&lcp -> lcd_q, &pklcd_q); + lcp -> lcd_state = READY; + lcp -> lcd_send = pk_output; + if (so) { + error = soreserve (so, pk_sendspace, pk_recvspace); + lcp -> lcd_so = so; + if (so -> so_options & SO_ACCEPTCONN) + lcp -> lcd_state = LISTEN; + } else + sbreserve (&lcp -> lcd_sb, pk_sendspace); + } + if (so) { + so -> so_pcb = (caddr_t) lcp; + so -> so_error = error; + } + return (lcp); +} + +/* + * Disconnect X.25 protocol from socket. + */ + +pk_disconnect (lcp) +register struct pklcd *lcp; +{ + register struct socket *so = lcp -> lcd_so; + register struct pklcd *l, *p; + + switch (lcp -> lcd_state) { + case LISTEN: + for (p = 0, l = pk_listenhead; l && l != lcp; p = l, l = l -> lcd_listen); + if (p == 0) { + if (l != 0) + pk_listenhead = l -> lcd_listen; + } + else + if (l != 0) + p -> lcd_listen = l -> lcd_listen; + pk_close (lcp); + break; + + case READY: + pk_acct (lcp); + pk_close (lcp); + break; + + case SENT_CLEAR: + case RECEIVED_CLEAR: + break; + + default: + pk_acct (lcp); + if (so) { + soisdisconnecting (so); + sbflush (&so -> so_rcv); + } + pk_clear (lcp, 241, 0); /* Normal Disconnect */ + + } +} + +/* + * Close an X.25 Logical Channel. Discard all space held by the + * connection and internal descriptors. Wake up any sleepers. + */ + +pk_close (lcp) +struct pklcd *lcp; +{ + register struct socket *so = lcp -> lcd_so; + + /* + * If the X.25 connection is torn down due to link + * level failure (e.g. LLC2 FRMR) and at the same the user + * level is still filling up the socket send buffer that + * send buffer is locked. An attempt to sbflush () that send + * buffer will lead us into - no, not temptation but - panic! + * So - we'll just check wether the send buffer is locked + * and if that's the case we'll mark the lcp as zombie and + * have the pk_timer () do the cleaning ... + */ + + if (so && so -> so_snd.sb_flags & SB_LOCK) + lcp -> lcd_state = LCN_ZOMBIE; + else + pk_freelcd (lcp); + + if (so == NULL) + return; + + so -> so_pcb = 0; + soisdisconnected (so); + /* sofree (so); /* gak!!! you can't do that here */ +} + +/* + * Create a template to be used to send X.25 packets on a logical + * channel. It allocates an mbuf and fills in a skeletal packet + * depending on its type. This packet is passed to pk_output where + * the remainer of the packet is filled in. +*/ + +struct mbuf * +pk_template (lcn, type) +int lcn, type; +{ + register struct mbuf *m; + register struct x25_packet *xp; + + MGETHDR (m, M_DONTWAIT, MT_HEADER); + if (m == 0) + panic ("pk_template"); + m -> m_act = 0; + + /* + * Efficiency hack: leave a four byte gap at the beginning + * of the packet level header with the hope that this will + * be enough room for the link level to insert its header. + */ + m -> m_data += max_linkhdr; + m -> m_pkthdr.len = m -> m_len = PKHEADERLN; + + xp = mtod (m, struct x25_packet *); + *(long *)xp = 0; /* ugly, but fast */ +/* xp -> q_bit = 0;*/ + X25SBITS(xp -> bits, fmt_identifier, 1); +/* xp -> lc_group_number = 0;*/ + + SET_LCN(xp, lcn); + xp -> packet_type = type; + + return (m); +} + +/* + * This routine restarts all the virtual circuits. Actually, + * the virtual circuits are not "restarted" as such. Instead, + * any active switched circuit is simply returned to READY + * state. + */ + +pk_restart (pkp, restart_cause) +register struct pkcb *pkp; +int restart_cause; +{ + register struct mbuf *m; + register struct pklcd *lcp; + register int i; + + /* Restart all logical channels. */ + if (pkp -> pk_chan == 0) + return; + + /* + * Don't do this if we're doing a restart issued from + * inside pk_connect () --- which is only done if and + * only if the X.25 link is down, i.e. a RESTART needs + * to be done to get it up. + */ + if (!(pkp -> pk_dxerole & DTE_CONNECTPENDING)) { + for (i = 1; i <= pkp -> pk_maxlcn; ++i) + if ((lcp = pkp -> pk_chan[i]) != NULL) { + if (lcp -> lcd_so) { + lcp -> lcd_so -> so_error = ENETRESET; + pk_close (lcp); + } else { + pk_flush (lcp); + lcp -> lcd_state = READY; + if (lcp -> lcd_upper) + lcp -> lcd_upper (lcp, 0); + } + } + } + + if (restart_cause < 0) + return; + + pkp -> pk_state = DTE_SENT_RESTART; + pkp -> pk_dxerole &= ~(DTE_PLAYDCE | DTE_PLAYDTE); + lcp = pkp -> pk_chan[0]; + m = lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_RESTART); + m -> m_pkthdr.len = m -> m_len += 2; + mtod (m, struct x25_packet *) -> packet_data = 0; /* DTE only */ + mtod (m, octet *)[4] = restart_cause; + pk_output (lcp); +} + + +/* + * This procedure frees up the Logical Channel Descripter. + */ + +pk_freelcd (lcp) +register struct pklcd *lcp; +{ + if (lcp == NULL) + return; + + if (lcp -> lcd_lcn > 0) + lcp -> lcd_pkp -> pk_chan[lcp -> lcd_lcn] = NULL; + + pk_flush (lcp); + remque (&lcp -> lcd_q); + FREE((caddr_t)lcp, M_PCB); +} + +static struct x25_ifaddr * +pk_ifwithaddr (sx) + struct sockaddr_x25 *sx; +{ + struct ifnet *ifp; + struct ifaddr *ifa; + register struct x25_ifaddr *ia; + char *addr = sx -> x25_addr; + + for (ifp = ifnet; ifp; ifp = ifp -> if_next) + for (ifa = ifp -> if_addrlist; ifa; ifa = ifa -> ifa_next) + if (ifa -> ifa_addr -> sa_family == AF_CCITT) { + ia = (struct x25_ifaddr *)ifa; + if (bcmp (addr, ia -> ia_xc.xc_addr.x25_addr, + 16) == 0) + return (ia); + + } + return ((struct x25_ifaddr *)0); +} + + +/* + * Bind a address and protocol value to a socket. The important + * part is the protocol value - the first four characters of the + * Call User Data field. + */ + +#define XTRACTPKP(rt) ((rt) -> rt_flags & RTF_GATEWAY ? \ + ((rt) -> rt_llinfo ? \ + (struct pkcb *) ((struct rtentry *)((rt) -> rt_llinfo)) -> rt_llinfo : \ + (struct pkcb *) NULL) : \ + (struct pkcb *)((rt) -> rt_llinfo)) + +pk_bind (lcp, nam) +struct pklcd *lcp; +struct mbuf *nam; +{ + register struct pklcd *pp; + register struct sockaddr_x25 *sa; + + if (nam == NULL) + return (EADDRNOTAVAIL); + if (lcp -> lcd_ceaddr) /* XXX */ + return (EADDRINUSE); + if (pk_checksockaddr (nam)) + return (EINVAL); + sa = mtod (nam, struct sockaddr_x25 *); + + /* + * If the user wishes to accept calls only from a particular + * net (net != 0), make sure the net is known + */ + + if (sa -> x25_addr[0]) { + if (!pk_ifwithaddr (sa)) + return (ENETUNREACH); + } else if (sa -> x25_net) { + if (!ifa_ifwithnet ((struct sockaddr *)sa)) + return (ENETUNREACH); + } + + /* + * For ISO's sake permit default listeners, but only one such . . . + */ + for (pp = pk_listenhead; pp; pp = pp -> lcd_listen) { + register struct sockaddr_x25 *sa2 = pp -> lcd_ceaddr; + if ((sa2 -> x25_udlen == sa -> x25_udlen) && + (sa2 -> x25_udlen == 0 || + (bcmp (sa2 -> x25_udata, sa -> x25_udata, + min (sa2 -> x25_udlen, sa -> x25_udlen)) == 0))) + return (EADDRINUSE); + } + lcp -> lcd_laddr = *sa; + lcp -> lcd_ceaddr = &lcp -> lcd_laddr; + return (0); +} + +/* + * Include a bound control block in the list of listeners. + */ +pk_listen (lcp) +register struct pklcd *lcp; +{ + register struct pklcd **pp; + + if (lcp -> lcd_ceaddr == 0) + return (EDESTADDRREQ); + + lcp -> lcd_state = LISTEN; + /* + * Add default listener at end, any others at start. + */ + if (lcp -> lcd_ceaddr -> x25_udlen == 0) { + for (pp = &pk_listenhead; *pp; ) + pp = &((*pp) -> lcd_listen); + *pp = lcp; + } else { + lcp -> lcd_listen = pk_listenhead; + pk_listenhead = lcp; + } + return (0); +} +/* + * Include a listening control block for the benefit of other protocols. + */ +pk_protolisten (spi, spilen, callee) +int (*callee) (); +{ + register struct pklcd *lcp = pk_attach ((struct socket *)0); + register struct mbuf *nam; + register struct sockaddr_x25 *sa; + int error = ENOBUFS; + + if (lcp) { + if (nam = m_getclr (MT_SONAME, M_DONTWAIT)) { + sa = mtod (nam, struct sockaddr_x25 *); + sa -> x25_family = AF_CCITT; + sa -> x25_len = nam -> m_len = sizeof (*sa); + sa -> x25_udlen = spilen; + sa -> x25_udata[0] = spi; + lcp -> lcd_upper = callee; + lcp -> lcd_flags = X25_MBS_HOLD; + if ((error = pk_bind (lcp, nam)) == 0) + error = pk_listen (lcp); + (void) m_free (nam); + } + if (error) + pk_freelcd (lcp); + } + return error; /* Hopefully Zero !*/ +} + +/* + * Associate a logical channel descriptor with a network. + * Fill in the default network specific parameters and then + * set any parameters explicitly specified by the user or + * by the remote DTE. + */ + +pk_assoc (pkp, lcp, sa) +register struct pkcb *pkp; +register struct pklcd *lcp; +register struct sockaddr_x25 *sa; +{ + + lcp -> lcd_pkp = pkp; + lcp -> lcd_packetsize = pkp -> pk_xcp -> xc_psize; + lcp -> lcd_windowsize = pkp -> pk_xcp -> xc_pwsize; + lcp -> lcd_rsn = MODULUS - 1; + pkp -> pk_chan[lcp -> lcd_lcn] = lcp; + + if (sa -> x25_opts.op_psize) + lcp -> lcd_packetsize = sa -> x25_opts.op_psize; + else + sa -> x25_opts.op_psize = lcp -> lcd_packetsize; + if (sa -> x25_opts.op_wsize) + lcp -> lcd_windowsize = sa -> x25_opts.op_wsize; + else + sa -> x25_opts.op_wsize = lcp -> lcd_windowsize; + sa -> x25_net = pkp -> pk_xcp -> xc_addr.x25_net; + lcp -> lcd_flags |= sa -> x25_opts.op_flags; + lcp -> lcd_stime = time.tv_sec; +} + +pk_connect (lcp, sa) +register struct pklcd *lcp; +register struct sockaddr_x25 *sa; +{ + register struct pkcb *pkp; + register struct rtentry *rt; + register struct rtentry *nrt; + + struct rtentry *npaidb_enter (); + struct pkcb *pk_newlink (); + + if (sa -> x25_addr[0] == '\0') + return (EDESTADDRREQ); + + /* + * Is the destination address known? + */ + if (!(rt = rtalloc1 ((struct sockaddr *)sa, 1))) + return (ENETUNREACH); + + if (!(pkp = XTRACTPKP(rt))) + pkp = pk_newlink ((struct x25_ifaddr *) (rt -> rt_ifa), + (caddr_t) 0); + + /* + * Have we entered the LLC address? + */ + if (nrt = npaidb_enter (rt -> rt_gateway, rt_key (rt), rt, 0)) + pkp -> pk_llrt = nrt; + + /* + * Have we allocated an LLC2 link yet? + */ + if (pkp -> pk_llnext == (caddr_t)0 && pkp -> pk_llctlinput) { + struct dll_ctlinfo ctlinfo; + + ctlinfo.dlcti_rt = rt; + ctlinfo.dlcti_pcb = (caddr_t) pkp; + ctlinfo.dlcti_conf = + (struct dllconfig *) (&((struct x25_ifaddr *)(rt -> rt_ifa)) -> ia_xc); + pkp -> pk_llnext = + (pkp -> pk_llctlinput) (PRC_CONNECT_REQUEST, 0, &ctlinfo); + } + + if (pkp -> pk_state != DTE_READY && pkp -> pk_state != DTE_WAITING) + return (ENETDOWN); + if ((lcp -> lcd_lcn = pk_getlcn (pkp)) == 0) + return (EMFILE); + + lcp -> lcd_faddr = *sa; + lcp -> lcd_ceaddr = & lcp -> lcd_faddr; + pk_assoc (pkp, lcp, lcp -> lcd_ceaddr); + + /* + * If the link is not up yet, initiate an X.25 RESTART + */ + if (pkp -> pk_state == DTE_WAITING) { + pkp -> pk_dxerole |= DTE_CONNECTPENDING; + pk_ctlinput (PRC_LINKUP, (struct sockaddr *)0, pkp); + if (lcp -> lcd_so) + soisconnecting (lcp -> lcd_so); + return 0; + } + + if (lcp -> lcd_so) + soisconnecting (lcp -> lcd_so); + lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_CALL); + pk_callrequest (lcp, lcp -> lcd_ceaddr, pkp -> pk_xcp); + return (*pkp -> pk_ia -> ia_start) (lcp); +} + +/* + * Complete all pending X.25 call requests --- this gets called after + * the X.25 link has been restarted. + */ +#define RESHUFFLELCN(maxlcn, lcn) ((maxlcn) - (lcn) + 1) + +pk_callcomplete (pkp) + register struct pkcb *pkp; +{ + register struct pklcd *lcp; + register int i; + register int ni; + + + if (pkp -> pk_dxerole & DTE_CONNECTPENDING) + pkp -> pk_dxerole &= ~DTE_CONNECTPENDING; + else return; + + if (pkp -> pk_chan == 0) + return; + + /* + * We pretended to be a DTE for allocating lcns, if + * it turns out that we are in reality performing as a + * DCE we need to reshuffle the lcps. + * + * /+---------------+-------- - + * / | a (maxlcn-1) | \ + * / +---------------+ \ + * +--- * | b (maxlcn-2) | \ + * | \ +---------------+ \ + * r | \ | c (maxlcn-3) | \ + * e | \+---------------+ | + * s | | . | + * h | | . | m + * u | | . | a + * f | | . | x + * f | | . | l + * l | /+---------------+ | c + * e | / | c' ( 3 ) | | n + * | / +---------------+ | + * +--> * | b' ( 2 ) | / + * \ +---------------+ / + * \ | a' ( 1 ) | / + * \+---------------+ / + * | 0 | / + * +---------------+-------- - + * + */ + if (pkp -> pk_dxerole & DTE_PLAYDCE) { + /* Sigh, reshuffle it */ + for (i = pkp -> pk_maxlcn; i > 0; --i) + if (pkp -> pk_chan[i]) { + ni = RESHUFFLELCN(pkp -> pk_maxlcn, i); + pkp -> pk_chan[ni] = pkp -> pk_chan[i]; + pkp -> pk_chan[i] = NULL; + pkp -> pk_chan[ni] -> lcd_lcn = ni; + } + } + + for (i = 1; i <= pkp -> pk_maxlcn; ++i) + if ((lcp = pkp -> pk_chan[i]) != NULL) { + /* if (lcp -> lcd_so) + soisconnecting (lcp -> lcd_so); */ + lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_CALL); + pk_callrequest (lcp, lcp -> lcd_ceaddr, pkp -> pk_xcp); + (*pkp -> pk_ia -> ia_start) (lcp); + } +} + +struct bcdinfo { + octet *cp; + unsigned posn; +}; +/* + * Build the rest of the CALL REQUEST packet. Fill in calling + * address, facilities fields and the user data field. + */ + +pk_callrequest (lcp, sa, xcp) +struct pklcd *lcp; +register struct sockaddr_x25 *sa; +register struct x25config *xcp; +{ + register struct x25_calladdr *a; + register struct mbuf *m = lcp -> lcd_template; + register struct x25_packet *xp = mtod (m, struct x25_packet *); + struct bcdinfo b; + + if (lcp -> lcd_flags & X25_DBIT) + X25SBITS(xp -> bits, d_bit, 1); + a = (struct x25_calladdr *) &xp -> packet_data; + b.cp = (octet *) a -> address_field; + b.posn = 0; + X25SBITS(a -> addrlens, called_addrlen, to_bcd (&b, sa, xcp)); + X25SBITS(a -> addrlens, calling_addrlen, to_bcd (&b, &xcp -> xc_addr, xcp)); + if (b.posn & 0x01) + *b.cp++ &= 0xf0; + m -> m_pkthdr.len = m -> m_len += b.cp - (octet *) a; + + if (lcp -> lcd_facilities) { + m -> m_pkthdr.len += + (m -> m_next = lcp -> lcd_facilities) -> m_pkthdr.len; + lcp -> lcd_facilities = 0; + } else + pk_build_facilities (m, sa, (int)xcp -> xc_type); + + m_copyback (m, m -> m_pkthdr.len, sa -> x25_udlen, sa -> x25_udata); +} + +pk_build_facilities (m, sa, type) +register struct mbuf *m; +struct sockaddr_x25 *sa; +{ + register octet *cp; + register octet *fcp; + register int revcharge; + + cp = mtod (m, octet *) + m -> m_len; + fcp = cp + 1; + revcharge = sa -> x25_opts.op_flags & X25_REVERSE_CHARGE ? 1 : 0; + /* + * This is specific to Datapac X.25(1976) DTEs. International + * calls must have the "hi priority" bit on. + */ + if (type == X25_1976 && sa -> x25_opts.op_psize == X25_PS128) + revcharge |= 02; + if (revcharge) { + *fcp++ = FACILITIES_REVERSE_CHARGE; + *fcp++ = revcharge; + } + switch (type) { + case X25_1980: + case X25_1984: + *fcp++ = FACILITIES_PACKETSIZE; + *fcp++ = sa -> x25_opts.op_psize; + *fcp++ = sa -> x25_opts.op_psize; + + *fcp++ = FACILITIES_WINDOWSIZE; + *fcp++ = sa -> x25_opts.op_wsize; + *fcp++ = sa -> x25_opts.op_wsize; + } + *cp = fcp - cp - 1; + m -> m_pkthdr.len = (m -> m_len += *cp + 1); +} + +to_bcd (b, sa, xcp) +register struct bcdinfo *b; +struct sockaddr_x25 *sa; +register struct x25config *xcp; +{ + register char *x = sa -> x25_addr; + unsigned start = b -> posn; + /* + * The nodnic and prepnd0 stuff looks tedious, + * but it does allow full X.121 addresses to be used, + * which is handy for routing info (& OSI type 37 addresses). + */ + if (xcp -> xc_addr.x25_net && (xcp -> xc_nodnic || xcp -> xc_prepnd0)) { + char dnicname[sizeof (long) * NBBY/3 + 2]; + register char *p = dnicname; + + sprintf (p, "%d", xcp -> xc_addr.x25_net & 0x7fff); + for (; *p; p++) /* *p == 0 means dnic matched */ + if ((*p ^ *x++) & 0x0f) + break; + if (*p || xcp -> xc_nodnic == 0) + x = sa -> x25_addr; + if (*p && xcp -> xc_prepnd0) { + if ((b -> posn)++ & 0x01) + *(b -> cp)++; + else + *(b -> cp) = 0; + } + } + while (*x) + if ((b -> posn)++ & 0x01) + *(b -> cp)++ |= *x++ & 0x0F; + else + *(b -> cp) = *x++ << 4; + return ((b -> posn) - start); +} + +/* + * This routine gets the first available logical channel number. The + * search is + * - from the highest number to lowest number if playing DTE, and + * - from lowest to highest number if playing DCE. + */ + +pk_getlcn (pkp) +register struct pkcb *pkp; +{ + register int i; + + if (pkp -> pk_chan == 0) + return (0); + if ( pkp -> pk_dxerole & DTE_PLAYDCE ) { + for (i = 1; i <= pkp -> pk_maxlcn; ++i) + if (pkp -> pk_chan[i] == NULL) + break; + } else { + for (i = pkp -> pk_maxlcn; i > 0; --i) + if (pkp -> pk_chan[i] == NULL) + break; + } + i = ( i > pkp -> pk_maxlcn ? 0 : i ); + return (i); +} + +/* + * This procedure sends a CLEAR request packet. The lc state is + * set to "SENT_CLEAR". + */ + +pk_clear (lcp, diagnostic, abortive) +register struct pklcd *lcp; +{ + register struct mbuf *m = pk_template (lcp -> lcd_lcn, X25_CLEAR); + + m -> m_len += 2; + m -> m_pkthdr.len += 2; + mtod (m, struct x25_packet *) -> packet_data = 0; + mtod (m, octet *)[4] = diagnostic; + if (lcp -> lcd_facilities) { + m -> m_next = lcp -> lcd_facilities; + m -> m_pkthdr.len += m -> m_next -> m_len; + lcp -> lcd_facilities = 0; + } + if (abortive) + lcp -> lcd_template = m; + else { + struct socket *so = lcp -> lcd_so; + struct sockbuf *sb = so ? & so -> so_snd : & lcp -> lcd_sb; + sbappendrecord (sb, m); + } + pk_output (lcp); + +} + +/* + * This procedure generates RNR's or RR's to inhibit or enable + * inward data flow, if the current state changes (blocked ==> open or + * vice versa), or if forced to generate one. One forces RNR's to ack data. + */ +pk_flowcontrol (lcp, inhibit, forced) +register struct pklcd *lcp; +{ + inhibit = (inhibit != 0); + if (lcp == 0 || lcp -> lcd_state != DATA_TRANSFER || + (forced == 0 && lcp -> lcd_rxrnr_condition == inhibit)) + return; + lcp -> lcd_rxrnr_condition = inhibit; + lcp -> lcd_template = + pk_template (lcp -> lcd_lcn, inhibit ? X25_RNR : X25_RR); + pk_output (lcp); +} + +/* + * This procedure sends a RESET request packet. It re-intializes + * virtual circuit. + */ + +static +pk_reset (lcp, diagnostic) +register struct pklcd *lcp; +{ + register struct mbuf *m; + register struct socket *so = lcp -> lcd_so; + + if (lcp -> lcd_state != DATA_TRANSFER) + return; + + if (so) + so -> so_error = ECONNRESET; + lcp -> lcd_reset_condition = TRUE; + + /* Reset all the control variables for the channel. */ + pk_flush (lcp); + lcp -> lcd_window_condition = lcp -> lcd_rnr_condition = + lcp -> lcd_intrconf_pending = FALSE; + lcp -> lcd_rsn = MODULUS - 1; + lcp -> lcd_ssn = 0; + lcp -> lcd_output_window = lcp -> lcd_input_window = + lcp -> lcd_last_transmitted_pr = 0; + m = lcp -> lcd_template = pk_template (lcp -> lcd_lcn, X25_RESET); + m -> m_pkthdr.len = m -> m_len += 2; + mtod (m, struct x25_packet *) -> packet_data = 0; + mtod (m, octet *)[4] = diagnostic; + pk_output (lcp); + +} + +/* + * This procedure frees all data queued for output or delivery on a + * virtual circuit. + */ + +pk_flush (lcp) +register struct pklcd *lcp; +{ + register struct socket *so; + + if (lcp -> lcd_template) + m_freem (lcp -> lcd_template); + + if (lcp -> lcd_cps) { + m_freem (lcp -> lcd_cps); + lcp -> lcd_cps = 0; + } + if (lcp -> lcd_facilities) { + m_freem (lcp -> lcd_facilities); + lcp -> lcd_facilities = 0; + } + if (so = lcp -> lcd_so) + sbflush (&so -> so_snd); + else + sbflush (&lcp -> lcd_sb); +} + +/* + * This procedure handles all local protocol procedure errors. + */ + +pk_procerror (error, lcp, errstr, diagnostic) +register struct pklcd *lcp; +char *errstr; +{ + + pk_message (lcp -> lcd_lcn, lcp -> lcd_pkp -> pk_xcp, errstr); + + switch (error) { + case CLEAR: + if (lcp -> lcd_so) { + lcp -> lcd_so -> so_error = ECONNABORTED; + soisdisconnecting (lcp -> lcd_so); + } + pk_clear (lcp, diagnostic, 1); + break; + + case RESET: + pk_reset (lcp, diagnostic); + } +} + +/* + * This procedure is called during the DATA TRANSFER state to check + * and process the P(R) values received in the DATA, RR OR RNR + * packets. + */ + +pk_ack (lcp, pr) +struct pklcd *lcp; +unsigned pr; +{ + register struct socket *so = lcp -> lcd_so; + + if (lcp -> lcd_output_window == pr) + return (PACKET_OK); + if (lcp -> lcd_output_window < lcp -> lcd_ssn) { + if (pr < lcp -> lcd_output_window || pr > lcp -> lcd_ssn) { + pk_procerror (RESET, lcp, + "p(r) flow control error", 2); + return (ERROR_PACKET); + } + } + else { + if (pr < lcp -> lcd_output_window && pr > lcp -> lcd_ssn) { + pk_procerror (RESET, lcp, + "p(r) flow control error #2", 2); + return (ERROR_PACKET); + } + } + + lcp -> lcd_output_window = pr; /* Rotate window. */ + if (lcp -> lcd_window_condition == TRUE) + lcp -> lcd_window_condition = FALSE; + + if (so && ((so -> so_snd.sb_flags & SB_WAIT) || + (so -> so_snd.sb_flags & SB_NOTIFY))) + sowwakeup (so); + + return (PACKET_OK); +} + +/* + * This procedure decodes the X.25 level 3 packet returning a + * code to be used in switchs or arrays. + */ + +pk_decode (xp) +register struct x25_packet *xp; +{ + register int type; + + if (X25GBITS(xp -> bits, fmt_identifier) != 1) + return (INVALID_PACKET); +#ifdef ancient_history + /* + * Make sure that the logical channel group number is 0. + * This restriction may be removed at some later date. + */ + if (xp -> lc_group_number != 0) + return (INVALID_PACKET); +#endif + /* + * Test for data packet first. + */ + if (!(xp -> packet_type & DATA_PACKET_DESIGNATOR)) + return (DATA); + + /* + * Test if flow control packet (RR or RNR). + */ + if (!(xp -> packet_type & RR_OR_RNR_PACKET_DESIGNATOR)) + switch (xp -> packet_type & 0x1f) { + case X25_RR: + return (RR); + case X25_RNR: + return (RNR); + case X25_REJECT: + return (REJECT); + } + + /* + * Determine the rest of the packet types. + */ + switch (xp -> packet_type) { + case X25_CALL: + type = CALL; + break; + + case X25_CALL_ACCEPTED: + type = CALL_ACCEPTED; + break; + + case X25_CLEAR: + type = CLEAR; + break; + + case X25_CLEAR_CONFIRM: + type = CLEAR_CONF; + break; + + case X25_INTERRUPT: + type = INTERRUPT; + break; + + case X25_INTERRUPT_CONFIRM: + type = INTERRUPT_CONF; + break; + + case X25_RESET: + type = RESET; + break; + + case X25_RESET_CONFIRM: + type = RESET_CONF; + break; + + case X25_RESTART: + type = RESTART; + break; + + case X25_RESTART_CONFIRM: + type = RESTART_CONF; + break; + + case X25_DIAGNOSTIC: + type = DIAG_TYPE; + break; + + default: + type = INVALID_PACKET; + } + return (type); +} + +/* + * A restart packet has been received. Print out the reason + * for the restart. + */ + +pk_restartcause (pkp, xp) +struct pkcb *pkp; +register struct x25_packet *xp; +{ + register struct x25config *xcp = pkp -> pk_xcp; + register int lcn = LCN(xp); + + switch (xp -> packet_data) { + case X25_RESTART_LOCAL_PROCEDURE_ERROR: + pk_message (lcn, xcp, "restart: local procedure error"); + break; + + case X25_RESTART_NETWORK_CONGESTION: + pk_message (lcn, xcp, "restart: network congestion"); + break; + + case X25_RESTART_NETWORK_OPERATIONAL: + pk_message (lcn, xcp, "restart: network operational"); + break; + + default: + pk_message (lcn, xcp, "restart: unknown cause"); + } +} + +#define MAXRESETCAUSE 7 + +int Reset_cause[] = { + EXRESET, EXROUT, 0, EXRRPE, 0, EXRLPE, 0, EXRNCG +}; + +/* + * A reset packet has arrived. Return the cause to the user. + */ + +pk_resetcause (pkp, xp) +struct pkcb *pkp; +register struct x25_packet *xp; +{ + register struct pklcd *lcp = + pkp -> pk_chan[LCN(xp)]; + register int code = xp -> packet_data; + + if (code > MAXRESETCAUSE) + code = 7; /* EXRNCG */ + + pk_message (LCN(xp), lcp -> lcd_pkp, "reset code 0x%x, diagnostic 0x%x", + xp -> packet_data, 4[(u_char *)xp]); + + if (lcp -> lcd_so) + lcp -> lcd_so -> so_error = Reset_cause[code]; +} + +#define MAXCLEARCAUSE 25 + +int Clear_cause[] = { + EXCLEAR, EXCBUSY, 0, EXCINV, 0, EXCNCG, 0, + 0, 0, EXCOUT, 0, EXCAB, 0, EXCNOB, 0, 0, 0, EXCRPE, + 0, EXCLPE, 0, 0, 0, 0, 0, EXCRRC +}; + +/* + * A clear packet has arrived. Return the cause to the user. + */ + +pk_clearcause (pkp, xp) +struct pkcb *pkp; +register struct x25_packet *xp; +{ + register struct pklcd *lcp = + pkp -> pk_chan[LCN(xp)]; + register int code = xp -> packet_data; + + if (code > MAXCLEARCAUSE) + code = 5; /* EXRNCG */ + if (lcp -> lcd_so) + lcp -> lcd_so -> so_error = Clear_cause[code]; +} + +char * +format_ntn (xcp) +register struct x25config *xcp; +{ + + return (xcp -> xc_addr.x25_addr); +} + +/* VARARGS1 */ +pk_message (lcn, xcp, fmt, a1, a2, a3, a4, a5, a6) +struct x25config *xcp; +char *fmt; +{ + + if (lcn) + if (!PQEMPTY) + printf ("X.25(%s): lcn %d: ", format_ntn (xcp), lcn); + else + printf ("X.25: lcn %d: ", lcn); + else + if (!PQEMPTY) + printf ("X.25(%s): ", format_ntn (xcp)); + else + printf ("X.25: "); + + printf (fmt, a1, a2, a3, a4, a5, a6); + printf ("\n"); +} + +pk_fragment (lcp, m0, qbit, mbit, wait) +struct mbuf *m0; +register struct pklcd *lcp; +{ + register struct mbuf *m = m0; + register struct x25_packet *xp; + register struct sockbuf *sb; + struct mbuf *head = 0, *next, **mp = &head, *m_split (); + int totlen, psize = 1 << (lcp -> lcd_packetsize); + + if (m == 0) + return 0; + if (m -> m_flags & M_PKTHDR == 0) + panic ("pk_fragment"); + totlen = m -> m_pkthdr.len; + m -> m_act = 0; + sb = lcp -> lcd_so ? &lcp -> lcd_so -> so_snd : & lcp -> lcd_sb; + do { + if (totlen > psize) { + if ((next = m_split (m, psize, wait)) == 0) + goto abort; + totlen -= psize; + } else + next = 0; + M_PREPEND(m, PKHEADERLN, wait); + if (m == 0) + goto abort; + *mp = m; + mp = & m -> m_act; + *mp = 0; + xp = mtod (m, struct x25_packet *); + 0[(char *)xp] = 0; + if (qbit) + X25SBITS(xp -> bits, q_bit, 1); + if (lcp -> lcd_flags & X25_DBIT) + X25SBITS(xp -> bits, d_bit, 1); + X25SBITS(xp -> bits, fmt_identifier, 1); + xp -> packet_type = X25_DATA; + SET_LCN(xp, lcp -> lcd_lcn); + if (next || (mbit && (totlen == psize || + (lcp -> lcd_flags & X25_DBIT)))) + SMBIT(xp, 1); + } while (m = next); + for (m = head; m; m = next) { + next = m -> m_act; + m -> m_act = 0; + sbappendrecord (sb, m); + } + return 0; +abort: + if (wait) + panic ("pk_fragment null mbuf after wait"); + if (next) + m_freem (next); + for (m = head; m; m = next) { + next = m -> m_act; + m_freem (m); + } + return ENOBUFS; +} diff --git a/bsd/netccitt/pk_timer.c b/bsd/netccitt/pk_timer.c new file mode 100644 index 000000000..d664e0913 --- /dev/null +++ b/bsd/netccitt/pk_timer.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) Computing Centre, University of British Columbia, 1984 + * Copyright (C) Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1992 + * Copyright (c) 1990, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_timer.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* + * Various timer values. They can be adjusted + * by patching the binary with adb if necessary. + */ +int pk_t20 = 18 * PR_SLOWHZ; /* restart timer */ +int pk_t21 = 20 * PR_SLOWHZ; /* call timer */ +/* XXX pk_t22 is never used */ +int pk_t22 = 18 * PR_SLOWHZ; /* reset timer */ +int pk_t23 = 18 * PR_SLOWHZ; /* clear timer */ + +pk_timer () +{ + register struct pkcb *pkp; + register struct pklcd *lcp, **pp; + register int lcns_jammed, cant_restart; + + FOR_ALL_PKCBS(pkp) { + switch (pkp -> pk_state) { + case DTE_SENT_RESTART: + lcp = pkp -> pk_chan[0]; + /* + * If restart failures are common, a link level + * reset should be initiated here. + */ + if (lcp -> lcd_timer && --lcp -> lcd_timer == 0) { + pk_message (0, pkp -> pk_xcp, + "packet level restart failed"); + pkp -> pk_state = DTE_WAITING; + } + break; + + case DTE_READY: + lcns_jammed = cant_restart = 0; + for (pp = &pkp -> pk_chan[1]; pp <= &pkp -> pk_chan[pkp -> pk_maxlcn]; pp++) { + if ((lcp = *pp) == 0) + continue; + switch (lcp -> lcd_state) { + case SENT_CALL: + if (--lcp -> lcd_timer == 0) { + if (lcp -> lcd_so) + lcp -> lcd_so -> so_error = ETIMEDOUT; + pk_clear (lcp, 49, 1); + } + break; + + case SENT_CLEAR: + if (lcp -> lcd_retry >= 3) + lcns_jammed++; + else + if (--lcp -> lcd_timer == 0) + pk_clear (lcp, 50, 1); + break; + + case DATA_TRANSFER: /* lcn active */ + cant_restart++; + break; + + case LCN_ZOMBIE: /* zombie state */ + pk_freelcd (lcp); + break; + } + } + if (lcns_jammed > pkp -> pk_maxlcn / 2 && cant_restart == 0) { + pk_message (0, pkp -> pk_xcp, "%d lcns jammed: attempting restart", lcns_jammed); + pk_restart (pkp, 0); + } + } + } +} diff --git a/bsd/netccitt/pk_usrreq.c b/bsd/netccitt/pk_usrreq.c new file mode 100644 index 000000000..ba1cb9500 --- /dev/null +++ b/bsd/netccitt/pk_usrreq.c @@ -0,0 +1,626 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (C) Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1992 + * Copyright (c) 1991, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +static old_to_new(); +static new_to_old(); +/* + * + * X.25 Packet level protocol interface to socket abstraction. + * + * Process an X.25 user request on a logical channel. If this is a send + * request then m is the mbuf chain of the send data. If this is a timer + * expiration (called from the software clock routine) them timertype is + * the particular timer. + * + */ + +pk_usrreq (so, req, m, nam, control) +struct socket *so; +int req; +register struct mbuf *m, *nam; +struct mbuf *control; +{ + register struct pklcd *lcp = (struct pklcd *) so -> so_pcb; + register int error = 0; + + if (req == PRU_CONTROL) + return (pk_control (so, (int)m, (caddr_t)nam, + (struct ifnet *)control)); + if (control && control -> m_len) { + error = EINVAL; + goto release; + } + if (lcp == NULL && req != PRU_ATTACH) { + error = EINVAL; + goto release; + } + +/* + pk_trace (pkcbhead, TR_USER, (struct pklcd *)0, + req, (struct x25_packet *)0); +*/ + + switch (req) { + /* + * X.25 attaches to socket via PRU_ATTACH and allocates a logical + * channel descriptor. If the socket is to receive connections, + * then the LISTEN state is entered. + */ + case PRU_ATTACH: + if (lcp) { + error = EISCONN; + /* Socket already connected. */ + break; + } + lcp = pk_attach (so); + if (lcp == 0) + error = ENOBUFS; + break; + + /* + * Detach a logical channel from the socket. If the state of the + * channel is embryonic, simply discard it. Otherwise we have to + * initiate a PRU_DISCONNECT which will finish later. + */ + case PRU_DETACH: + pk_disconnect (lcp); + break; + + /* + * Give the socket an address. + */ + case PRU_BIND: + if (nam -> m_len == sizeof (struct x25_sockaddr)) + old_to_new (nam); + error = pk_bind (lcp, nam); + break; + + /* + * Prepare to accept connections. + */ + case PRU_LISTEN: + error = pk_listen (lcp); + break; + + /* + * Initiate a CALL REQUEST to peer entity. Enter state SENT_CALL + * and mark the socket as connecting. Set timer waiting for + * CALL ACCEPT or CLEAR. + */ + case PRU_CONNECT: + if (nam -> m_len == sizeof (struct x25_sockaddr)) + old_to_new (nam); + if (pk_checksockaddr (nam)) + return (EINVAL); + error = pk_connect (lcp, mtod (nam, struct sockaddr_x25 *)); + break; + + /* + * Initiate a disconnect to peer entity via a CLEAR REQUEST packet. + * The socket will be disconnected when we receive a confirmation + * or a clear collision. + */ + case PRU_DISCONNECT: + pk_disconnect (lcp); + break; + + /* + * Accept an INCOMING CALL. Most of the work has already been done + * by pk_input. Just return the callers address to the user. + */ + case PRU_ACCEPT: + if (lcp -> lcd_craddr == NULL) + break; + bcopy ((caddr_t)lcp -> lcd_craddr, mtod (nam, caddr_t), + sizeof (struct sockaddr_x25)); + nam -> m_len = sizeof (struct sockaddr_x25); + if (lcp -> lcd_flags & X25_OLDSOCKADDR) + new_to_old (nam); + break; + + /* + * After a receive, we should send a RR. + */ + case PRU_RCVD: + pk_flowcontrol (lcp, /*sbspace (&so -> so_rcv) <= */ 0, 1); + break; + + /* + * Send INTERRUPT packet. + */ + case PRU_SENDOOB: + if (m == 0) { + MGETHDR(m, M_WAITOK, MT_OOBDATA); + m -> m_pkthdr.len = m -> m_len = 1; + *mtod (m, octet *) = 0; + } + if (m -> m_pkthdr.len > 32) { + m_freem (m); + error = EMSGSIZE; + break; + } + MCHTYPE(m, MT_OOBDATA); + /* FALLTHROUGH */ + + /* + * Do send by placing data on the socket output queue. + */ + case PRU_SEND: + if (control) { + register struct cmsghdr *ch = mtod (m, struct cmsghdr *); + control -> m_len -= sizeof (*ch); + control -> m_data += sizeof (*ch); + error = pk_ctloutput (PRCO_SETOPT, so, ch -> cmsg_level, + ch -> cmsg_type, &control); + } + if (error == 0 && m) + error = pk_send (lcp, m); + break; + + /* + * Abort a virtual circuit. For example all completed calls + * waiting acceptance. + */ + case PRU_ABORT: + pk_disconnect (lcp); + break; + + /* Begin unimplemented hooks. */ + + case PRU_SHUTDOWN: + error = EOPNOTSUPP; + break; + + case PRU_CONTROL: + error = EOPNOTSUPP; + break; + + case PRU_SENSE: +#ifdef BSD4_3 + ((struct stat *)m) -> st_blksize = so -> so_snd.sb_hiwat; +#else + error = EOPNOTSUPP; +#endif + break; + + /* End unimplemented hooks. */ + + case PRU_SOCKADDR: + if (lcp -> lcd_ceaddr == 0) + return (EADDRNOTAVAIL); + nam -> m_len = sizeof (struct sockaddr_x25); + bcopy ((caddr_t)lcp -> lcd_ceaddr, mtod (nam, caddr_t), + sizeof (struct sockaddr_x25)); + if (lcp -> lcd_flags & X25_OLDSOCKADDR) + new_to_old (nam); + break; + + case PRU_PEERADDR: + if (lcp -> lcd_state != DATA_TRANSFER) + return (ENOTCONN); + nam -> m_len = sizeof (struct sockaddr_x25); + bcopy (lcp -> lcd_craddr ? (caddr_t)lcp -> lcd_craddr : + (caddr_t)lcp -> lcd_ceaddr, + mtod (nam, caddr_t), sizeof (struct sockaddr_x25)); + if (lcp -> lcd_flags & X25_OLDSOCKADDR) + new_to_old (nam); + break; + + /* + * Receive INTERRUPT packet. + */ + case PRU_RCVOOB: + if (so -> so_options & SO_OOBINLINE) { + register struct mbuf *n = so -> so_rcv.sb_mb; + if (n && n -> m_type == MT_OOBDATA) { + unsigned len = n -> m_pkthdr.len; + so -> so_rcv.sb_mb = n -> m_nextpkt; + if (len != n -> m_len && + (n = m_pullup (n, len)) == 0) + break; + m -> m_len = len; + bcopy (mtod (m, caddr_t), mtod (n, caddr_t), len); + m_freem (n); + } + break; + } + m -> m_len = 1; + *mtod (m, char *) = lcp -> lcd_intrdata; + break; + + default: + panic ("pk_usrreq"); + } +release: + if (control != NULL) + m_freem (control); + return (error); +} + +/* + * If you want to use UBC X.25 level 3 in conjunction with some + * other X.25 level 2 driver, have the ifp -> if_ioctl routine + * assign pk_start to ia -> ia_start when called with SIOCSIFCONF_X25. + */ +/* ARGSUSED */ +pk_start (lcp) +register struct pklcd *lcp; +{ + pk_output (lcp); + return (0); /* XXX pk_output should return a value */ +} + +#ifndef _offsetof +#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m)) +#endif +struct sockaddr_x25 pk_sockmask = { + _offsetof(struct sockaddr_x25, x25_addr[0]), /* x25_len */ + 0, /* x25_family */ + -1, /* x25_net id */ +}; + +/*ARGSUSED*/ +pk_control (so, cmd, data, ifp) +struct socket *so; +int cmd; +caddr_t data; +register struct ifnet *ifp; +{ + register struct ifreq_x25 *ifr = (struct ifreq_x25 *)data; + register struct ifaddr *ifa = 0; + register struct x25_ifaddr *ia = 0; + struct pklcd *dev_lcp = 0; + int error, s, old_maxlcn; + unsigned n; + + /* + * Find address for this interface, if it exists. + */ + if (ifp) + for (ifa = ifp -> if_addrlist; ifa; ifa = ifa -> ifa_next) + if (ifa -> ifa_addr -> sa_family == AF_CCITT) + break; + + ia = (struct x25_ifaddr *)ifa; + switch (cmd) { + case SIOCGIFCONF_X25: + if (ifa == 0) + return (EADDRNOTAVAIL); + ifr -> ifr_xc = ia -> ia_xc; + return (0); + + case SIOCSIFCONF_X25: + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); + if (ifp == 0) + panic ("pk_control"); + if (ifa == (struct ifaddr *)0) { + register struct mbuf *m; + + MALLOC(ia, struct x25_ifaddr *, sizeof (*ia), + M_IFADDR, M_WAITOK); + if (ia == 0) + return (ENOBUFS); + bzero ((caddr_t)ia, sizeof (*ia)); + if (ifa = ifp -> if_addrlist) { + for ( ; ifa -> ifa_next; ifa = ifa -> ifa_next) + ; + ifa -> ifa_next = &ia -> ia_ifa; + } else + ifp -> if_addrlist = &ia -> ia_ifa; + ifa = &ia -> ia_ifa; + ifa -> ifa_netmask = (struct sockaddr *)&pk_sockmask; + ifa -> ifa_addr = (struct sockaddr *)&ia -> ia_xc.xc_addr; + ifa -> ifa_dstaddr = (struct sockaddr *)&ia -> ia_dstaddr; /* XXX */ + ia -> ia_ifp = ifp; + ia -> ia_dstaddr.x25_family = AF_CCITT; + ia -> ia_dstaddr.x25_len = pk_sockmask.x25_len; + } else if (ISISO8802(ifp) == 0) { + rtinit (ifa, (int)RTM_DELETE, 0); + } + old_maxlcn = ia -> ia_maxlcn; + ia -> ia_xc = ifr -> ifr_xc; + ia -> ia_dstaddr.x25_net = ia -> ia_xc.xc_addr.x25_net; + if (ia -> ia_maxlcn != old_maxlcn && old_maxlcn != 0) { + /* VERY messy XXX */ + register struct pkcb *pkp; + FOR_ALL_PKCBS(pkp) + if (pkp -> pk_ia == ia) + pk_resize (pkp); + } + /* + * Give the interface a chance to initialize if this +p * is its first address, and to validate the address. + */ + ia -> ia_start = pk_start; + s = splimp(); + if (ifp -> if_ioctl) + error = (*ifp -> if_ioctl)(ifp, SIOCSIFCONF_X25, + (caddr_t) ifa); + if (error) + ifp -> if_flags &= ~IFF_UP; + else if (ISISO8802(ifp) == 0) + error = rtinit (ifa, (int)RTM_ADD, RTF_UP); + splx (s); + return (error); + + default: + if (ifp == 0 || ifp -> if_ioctl == 0) + return (EOPNOTSUPP); + return ((*ifp -> if_ioctl)(ifp, cmd, data)); + } +} + +pk_ctloutput (cmd, so, level, optname, mp) +struct socket *so; +struct mbuf **mp; +int cmd, level, optname; +{ + register struct mbuf *m = *mp; + register struct pklcd *lcp = (struct pklcd *) so -> so_pcb; + int error = EOPNOTSUPP; + + if (m == 0) + return (EINVAL); + if (cmd == PRCO_SETOPT) switch (optname) { + case PK_FACILITIES: + if (m == 0) + return (EINVAL); + lcp -> lcd_facilities = m; + *mp = 0; + return (0); + + case PK_ACCTFILE: + if ((so->so_state & SS_PRIV) == 0) + error = EPERM; + else if (m -> m_len) + error = pk_accton (mtod (m, char *)); + else + error = pk_accton ((char *)0); + break; + + case PK_RTATTACH: + error = pk_rtattach (so, m); + break; + + case PK_PRLISTEN: + error = pk_user_protolisten (mtod (m, u_char *)); + } + if (*mp) { + (void) m_freem (*mp); + *mp = 0; + } + return (error); + +} + + +/* + * Do an in-place conversion of an "old style" + * socket address to the new style + */ + +static +old_to_new (m) +register struct mbuf *m; +{ + register struct x25_sockaddr *oldp; + register struct sockaddr_x25 *newp; + register char *ocp, *ncp; + struct sockaddr_x25 new; + + oldp = mtod (m, struct x25_sockaddr *); + newp = &new; + bzero ((caddr_t)newp, sizeof (*newp)); + + newp -> x25_family = AF_CCITT; + newp -> x25_len = sizeof(*newp); + newp -> x25_opts.op_flags = (oldp -> xaddr_facilities & X25_REVERSE_CHARGE) + | X25_MQBIT | X25_OLDSOCKADDR; + if (oldp -> xaddr_facilities & XS_HIPRIO) /* Datapac specific */ + newp -> x25_opts.op_psize = X25_PS128; + bcopy ((caddr_t)oldp -> xaddr_addr, newp -> x25_addr, + (unsigned)min (oldp -> xaddr_len, sizeof (newp -> x25_addr) - 1)); + if (bcmp ((caddr_t)oldp -> xaddr_proto, newp -> x25_udata, 4) != 0) { + bcopy ((caddr_t)oldp -> xaddr_proto, newp -> x25_udata, 4); + newp -> x25_udlen = 4; + } + ocp = (caddr_t)oldp -> xaddr_userdata; + ncp = newp -> x25_udata + 4; + while (*ocp && ocp < (caddr_t)oldp -> xaddr_userdata + 12) { + if (newp -> x25_udlen == 0) + newp -> x25_udlen = 4; + *ncp++ = *ocp++; + newp -> x25_udlen++; + } + bcopy ((caddr_t)newp, mtod (m, char *), sizeof (*newp)); + m -> m_len = sizeof (*newp); +} + +/* + * Do an in-place conversion of a new style + * socket address to the old style + */ + +static +new_to_old (m) +register struct mbuf *m; +{ + register struct x25_sockaddr *oldp; + register struct sockaddr_x25 *newp; + register char *ocp, *ncp; + struct x25_sockaddr old; + + oldp = &old; + newp = mtod (m, struct sockaddr_x25 *); + bzero ((caddr_t)oldp, sizeof (*oldp)); + + oldp -> xaddr_facilities = newp -> x25_opts.op_flags & X25_REVERSE_CHARGE; + if (newp -> x25_opts.op_psize == X25_PS128) + oldp -> xaddr_facilities |= XS_HIPRIO; /* Datapac specific */ + ocp = (char *)oldp -> xaddr_addr; + ncp = newp -> x25_addr; + while (*ncp) { + *ocp++ = *ncp++; + oldp -> xaddr_len++; + } + + bcopy (newp -> x25_udata, (caddr_t)oldp -> xaddr_proto, 4); + if (newp -> x25_udlen > 4) + bcopy (newp -> x25_udata + 4, (caddr_t)oldp -> xaddr_userdata, + (unsigned)(newp -> x25_udlen - 4)); + + bcopy ((caddr_t)oldp, mtod (m, char *), sizeof (*oldp)); + m -> m_len = sizeof (*oldp); +} + + +pk_checksockaddr (m) +struct mbuf *m; +{ + register struct sockaddr_x25 *sa = mtod (m, struct sockaddr_x25 *); + register char *cp; + + if (m -> m_len != sizeof (struct sockaddr_x25)) + return (1); + if (sa -> x25_family != AF_CCITT || + sa -> x25_udlen > sizeof (sa -> x25_udata)) + return (1); + for (cp = sa -> x25_addr; *cp; cp++) { + if (*cp < '0' || *cp > '9' || + cp >= &sa -> x25_addr[sizeof (sa -> x25_addr) - 1]) + return (1); + } + return (0); +} + +pk_send (lcp, m) +struct pklcd *lcp; +register struct mbuf *m; +{ + int mqbit = 0, error = 0; + register struct x25_packet *xp; + register struct socket *so; + + if (m -> m_type == MT_OOBDATA) { + if (lcp -> lcd_intrconf_pending) + error = ETOOMANYREFS; + if (m -> m_pkthdr.len > 32) + error = EMSGSIZE; + M_PREPEND(m, PKHEADERLN, M_WAITOK); + if (m == 0 || error) + goto bad; + *(mtod (m, octet *)) = 0; + xp = mtod (m, struct x25_packet *); + X25SBITS(xp -> bits, fmt_identifier, 1); + xp -> packet_type = X25_INTERRUPT; + SET_LCN(xp, lcp -> lcd_lcn); + sbinsertoob ( (so = lcp -> lcd_so) ? + &so -> so_snd : &lcp -> lcd_sb, m); + goto send; + } + /* + * Application has elected (at call setup time) to prepend + * a control byte to each packet written indicating m-bit + * and q-bit status. Examine and then discard this byte. + */ + if (lcp -> lcd_flags & X25_MQBIT) { + if (m -> m_len < 1) { + m_freem (m); + return (EMSGSIZE); + } + mqbit = *(mtod (m, u_char *)); + m -> m_len--; + m -> m_data++; + m -> m_pkthdr.len--; + } + error = pk_fragment (lcp, m, mqbit & 0x80, mqbit & 0x40, 1); +send: + if (error == 0 && lcp -> lcd_state == DATA_TRANSFER) + lcp -> lcd_send (lcp); /* XXXXXXXXX fix pk_output!!! */ + return (error); +bad: + if (m) + m_freem (m); + return (error); +} diff --git a/bsd/netccitt/pk_var.h b/bsd/netccitt/pk_var.h new file mode 100644 index 000000000..94b3b76ae --- /dev/null +++ b/bsd/netccitt/pk_var.h @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) Computing Centre, University of British Columbia, 1985 + * Copyright (C) Computer Science Department IV, + * University of Erlangen-Nuremberg, Germany, 1990, 1991, 1992 + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)pk_var.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * + * X.25 Logical Channel Descriptor + * + */ + +struct pklcd { + struct pklcd_q { + struct pklcd_q *q_forw; /* debugging chain */ + struct pklcd_q *q_back; /* debugging chain */ + } lcd_q; + int (*lcd_upper)(); /* switch to socket vs datagram vs ...*/ + caddr_t lcd_upnext; /* reference for lcd_upper() */ + int (*lcd_send)(); /* if X.25 front end, direct connect */ + caddr_t lcd_downnext; /* reference for lcd_send() */ + short lcd_lcn; /* Logical channel number */ + short lcd_state; /* Logical Channel state */ + short lcd_timer; /* Various timer values */ + short lcd_dg_timer; /* to reclaim idle datagram circuits */ + bool lcd_intrconf_pending; /* Interrupt confirmation pending */ + octet lcd_intrdata; /* Octet of incoming intr data */ + char lcd_retry; /* Timer retry count */ + char lcd_rsn; /* Seq no of last received packet */ + char lcd_ssn; /* Seq no of next packet to send */ + char lcd_output_window; /* Output flow control window */ + char lcd_input_window; /* Input flow control window */ + char lcd_last_transmitted_pr;/* Last Pr value transmitted */ + bool lcd_rnr_condition; /* Remote in busy condition */ + bool lcd_window_condition; /* Output window size exceeded */ + bool lcd_reset_condition; /* True, if waiting reset confirm */ + bool lcd_rxrnr_condition; /* True, if we have sent rnr */ + char lcd_packetsize; /* Maximum packet size */ + char lcd_windowsize; /* Window size - both directions */ + octet lcd_closed_user_group; /* Closed user group specification */ + char lcd_flags; /* copy of sockaddr_x25 op_flags */ + struct mbuf *lcd_facilities; /* user supplied facilities for cr */ + struct mbuf *lcd_template; /* Address of response packet */ + struct socket *lcd_so; /* Socket addr for connection */ + struct sockaddr_x25 *lcd_craddr;/* Calling address pointer */ + struct sockaddr_x25 *lcd_ceaddr;/* Called address pointer */ + time_t lcd_stime; /* time circuit established */ + long lcd_txcnt; /* Data packet transmit count */ + long lcd_rxcnt; /* Data packet receive count */ + short lcd_intrcnt; /* Interrupt packet transmit count */ + struct pklcd *lcd_listen; /* Next lcd on listen queue */ + struct pkcb *lcd_pkp; /* Network this lcd is attached to */ + struct mbuf *lcd_cps; /* Complete Packet Sequence reassembly*/ + long lcd_cpsmax; /* Max length for CPS */ + struct sockaddr_x25 lcd_faddr; /* Remote Address (Calling) */ + struct sockaddr_x25 lcd_laddr; /* Local Address (Called) */ + struct sockbuf lcd_sb; /* alternate for datagram service */ +}; + +/* + * Per network information, allocated dynamically + * when a new network is configured. + */ + +struct pkcb { + struct pkcb_q { + struct pkcb_q *q_forw; + struct pkcb_q *q_backw; + } pk_q; + short pk_state; /* packet level status */ + short pk_maxlcn; /* local copy of xc_maxlcn */ + int (*pk_lloutput) (); /* link level output procedure */ + caddr_t (*pk_llctlinput) (); /* link level ctloutput procedure */ + caddr_t pk_llnext; /* handle for next level down */ + struct x25config *pk_xcp; /* network specific configuration */ + struct x25_ifaddr *pk_ia; /* backpointer to ifaddr */ + struct pklcd **pk_chan; /* actual size == xc_maxlcn+1 */ + short pk_dxerole; /* DXE role of PLE over LLC2 */ + short pk_restartcolls; /* counting RESTART collisions til resolved */ + struct rtentry *pk_rt; /* back pointer to route */ + struct rtentry *pk_llrt; /* pointer to reverse mapping */ + u_short pk_refcount; /* ref count */ +}; + +#define FOR_ALL_PKCBS(p) for((p) = (struct pkcb *)(pkcb_q.q_forw); \ + (pkcb_q.q_forw != &pkcb_q) && ((struct pkcb_q *)(p) != &pkcb_q); \ + (p) = (struct pkcb *)((p) -> pk_q.q_forw)) + +#define PQEMPTY (pkcb_q.q_forw == &pkcb_q) + +/* + * Interface address, x25 version. Exactly one of these structures is + * allocated for each interface with an x25 address. + * + * The ifaddr structure conatins the protocol-independent part + * of the structure, and is assumed to be first. + */ +struct x25_ifaddr { + struct ifaddr ia_ifa; /* protocol-independent info */ +#define ia_ifp ia_ifa.ifa_ifp +#define ia_flags ia_ifa.ifa_flags + struct x25config ia_xc; /* network specific configuration */ + struct pkcb *ia_pkcb; +#define ia_maxlcn ia_xc.xc_maxlcn + int (*ia_start) (); /* connect, confirm method */ + struct sockaddr_x25 ia_dstaddr; /* reserve space for route dst */ +}; + +/* + * ``Link-Level'' extension to Routing Entry for upper level + * packet switching via X.25 virtual circuits. + */ +struct llinfo_x25 { + struct llinfo_x25 *lx_next; /* chain together in linked list */ + struct llinfo_x25 *lx_prev; /* chain together in linked list */ + struct rtentry *lx_rt; /* back pointer to route */ + struct pklcd *lx_lcd; /* local connection block */ + struct x25_ifaddr *lx_ia; /* may not be same as rt_ifa */ + int lx_state; /* can't trust lcd->lcd_state */ + int lx_flags; + int lx_timer; /* for idle timeout */ + int lx_family; /* for dispatch */ +}; + +/* States for lx_state */ +#define LXS_NEWBORN 0 +#define LXS_RESOLVING 1 +#define LXS_FREE 2 +#define LXS_CONNECTING 3 +#define LXS_CONNECTED 4 +#define LXS_DISCONNECTING 5 +#define LXS_LISTENING 6 + +/* flags */ +#define LXF_VALID 0x1 /* Circuit is live, etc. */ +#define LXF_RTHELD 0x2 /* this lcb references rtentry */ +#define LXF_LISTEN 0x4 /* accepting incoming calls */ + +/* + * Definitions for accessing bitfields/bitslices inside X.25 structs + */ + + +struct x25bitslice { + unsigned int bs_mask; + unsigned int bs_shift; +}; + +#define calling_addrlen 0 +#define called_addrlen 1 +#define q_bit 2 +#define d_bit 3 +#define fmt_identifier 4 +#define lc_group_number 1 +#define p_r 5 +#define m_bit 6 +#define p_s 7 +#define zilch 8 + +#define X25GBITS(Arg, Index) (((Arg) & x25_bitslice[(Index)].bs_mask) >> x25_bitslice[(Index)].bs_shift) +#define X25SBITS(Arg, Index, Val) (Arg) |= (((Val) << x25_bitslice[(Index)].bs_shift) & x25_bitslice[(Index)].bs_mask) +#define X25CSBITS(Arg, Index, Val) (Arg) = (((Val) << x25_bitslice[(Index)].bs_shift) & x25_bitslice[(Index)].bs_mask) + +extern struct x25bitslice x25_bitslice[]; + + +#define ISOFIFTTYPE(i,t) ((i)->if_type == (t)) +#define ISISO8802(i) ((ISOFIFTTYPE(i, IFT_ETHER) || \ + ISOFIFTTYPE(i, IFT_ISO88023) || \ + ISOFIFTTYPE(i, IFT_ISO88024) || \ + ISOFIFTTYPE(i, IFT_ISO88025) || \ + ISOFIFTTYPE(i, IFT_ISO88026) || \ + ISOFIFTTYPE(i, IFT_P10) || \ + ISOFIFTTYPE(i, IFT_P80) || \ + ISOFIFTTYPE(i, IFT_FDDI))) + +/* + * miscellenous debugging info + */ +struct mbuf_cache { + int mbc_size; + int mbc_num; + int mbc_oldsize; + struct mbuf **mbc_cache; +}; + +#if defined(KERNEL) && defined(CCITT) +extern struct pkcb_q pkcb_q; +struct pklcd *pk_listenhead; +struct pklcd *pk_attach(); + +extern char *pk_name[], *pk_state[]; +int pk_t20, pk_t21, pk_t22, pk_t23; +#endif diff --git a/bsd/netccitt/x25.h b/bsd/netccitt/x25.h new file mode 100644 index 000000000..06a7b75de --- /dev/null +++ b/bsd/netccitt/x25.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * University of Erlangen-Nuremberg, Germany, 1992 + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)x25.h 8.1 (Berkeley) 6/10/93 + */ + +#ifdef KERNEL +#define PRC_IFUP 3 +#define PRC_LINKUP 4 +#define PRC_LINKDOWN 5 +#define PRC_LINKRESET 6 +#define PRC_LINKDONTCOPY 7 +#ifndef PRC_DISCONNECT_REQUEST +#define PRC_DISCONNECT_REQUEST 10 +#endif +#endif + +#define CCITTPROTO_HDLC 1 +#define CCITTPROTO_X25 2 /* packet level protocol */ +#define IEEEPROTO_802LLC 3 /* doesn't belong here */ + +#define HDLCPROTO_LAP 1 +#define HDLCPROTO_LAPB 2 +#define HDLCPROTO_UNSET 3 +#define HDLCPROTO_LAPD 4 + +/* socket options */ +#define PK_ACCTFILE 1 /* use level = CCITTPROTO_X25 */ +#define PK_FACILITIES 2 /* use level = CCITTPROTO_X25 */ +#define PK_RTATTACH 3 /* use level = CCITTPROTO_X25 */ +#define PK_PRLISTEN 4 /* use level = CCITTPROTO_X25 */ + +#define MAX_FACILITIES 109 /* maximum size for facilities */ + +/* + * X.25 Socket address structure. It contains the X.121 or variation of + * X.121, facilities information, higher level protocol value (first four + * bytes of the User Data field), and the last 12 characters of the User + * Data field. + */ + +struct x25_sockaddr { /* obsolete - use sockaddr_x25 */ + short xaddr_len; /* Length of xaddr_addr. */ + u_char xaddr_addr[15]; /* Network dependent or X.121 address. */ + u_char xaddr_facilities; /* Facilities information. */ +#define XS_REVERSE_CHARGE 0x01 +#define XS_HIPRIO 0x02 + u_char xaddr_proto[4]; /* Protocol ID (4 bytes of user data). */ + u_char xaddr_userdata[12]; /* Remaining User data field. */ +}; + +/* + * X.25 Socket address structure. It contains the network id, X.121 + * address, facilities information, higher level protocol value (first four + * bytes of the User Data field), and up to 12 characters of User Data. + */ + +struct sockaddr_x25 { + u_char x25_len; + u_char x25_family; /* must be AF_CCITT */ + short x25_net; /* network id code (usually a dnic) */ + char x25_addr[16]; /* X.121 address (null terminated) */ + struct x25opts { + char op_flags; /* miscellaneous options */ + /* pk_var.h defines other lcd_flags */ +#define X25_REVERSE_CHARGE 0x01 /* remote DTE pays for call */ +#define X25_DBIT 0x02 /* not yet supported */ +#define X25_MQBIT 0x04 /* prepend M&Q bit status byte to packet data */ +#define X25_OLDSOCKADDR 0x08 /* uses old sockaddr structure */ +#define X25_DG_CIRCUIT 0x10 /* lcd_flag: used for datagrams */ +#define X25_DG_ROUTING 0x20 /* lcd_flag: peer addr not yet known */ +#define X25_MBS_HOLD 0x40 /* lcd_flag: collect m-bit sequences */ + char op_psize; /* requested packet size */ +#define X25_PS128 7 +#define X25_PS256 8 +#define X25_PS512 9 + char op_wsize; /* window size (1 .. 7) */ + char op_speed; /* throughput class */ + } x25_opts; + short x25_udlen; /* user data field length */ + char x25_udata[16]; /* user data field */ +}; + +/* + * network configuration info + * this structure must be 16 bytes long + */ + +struct x25config { + struct sockaddr_x25 xc_addr; + /* link level parameters */ + u_short xc_lproto:4, /* link level protocol eg. CCITTPROTO_HDLC */ + xc_lptype:4, /* protocol type eg. HDLCPROTO_LAPB */ + xc_ltrace:1, /* link level tracing flag */ + xc_lwsize:7; /* link level window size */ + u_short xc_lxidxchg:1, /* link level XID exchange flag - NOT YET */ + /* packet level parameters */ + xc_rsvd1:2, + xc_pwsize:3, /* default window size */ + xc_psize:4, /* default packet size 7=128, 8=256, ... */ + xc_type:3, /* network type */ +#define X25_1976 0 +#define X25_1980 1 +#define X25_1984 2 +#define X25_DDN 3 +#define X25_BASIC 4 + xc_ptrace:1, /* packet level tracing flag */ + xc_nodnic:1, /* remove our dnic when calling on net */ + xc_prepnd0:1; /* prepend 0 when making offnet calls */ + u_short xc_maxlcn; /* max logical channels */ + u_short xc_dg_idletimo; /* timeout for idle datagram circuits. */ +}; + +#ifdef IFNAMSIZ +struct ifreq_x25 { + char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct x25config ifr_xc; +}; +#define SIOCSIFCONF_X25 _IOW('i', 12, struct ifreq_x25) /* set ifnet config */ +#define SIOCGIFCONF_X25 _IOWR('i',13, struct ifreq_x25) /* get ifnet config */ +#endif diff --git a/bsd/netccitt/x25_sockaddr.h b/bsd/netccitt/x25_sockaddr.h new file mode 100644 index 000000000..06a7b75de --- /dev/null +++ b/bsd/netccitt/x25_sockaddr.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * University of Erlangen-Nuremberg, Germany, 1992 + * + * This code is derived from software contributed to Berkeley by the + * Laboratory for Computation Vision and the Computer Science Department + * of the the University of British Columbia and the Computer Science + * Department (IV) of the University of Erlangen-Nuremberg, Germany. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)x25.h 8.1 (Berkeley) 6/10/93 + */ + +#ifdef KERNEL +#define PRC_IFUP 3 +#define PRC_LINKUP 4 +#define PRC_LINKDOWN 5 +#define PRC_LINKRESET 6 +#define PRC_LINKDONTCOPY 7 +#ifndef PRC_DISCONNECT_REQUEST +#define PRC_DISCONNECT_REQUEST 10 +#endif +#endif + +#define CCITTPROTO_HDLC 1 +#define CCITTPROTO_X25 2 /* packet level protocol */ +#define IEEEPROTO_802LLC 3 /* doesn't belong here */ + +#define HDLCPROTO_LAP 1 +#define HDLCPROTO_LAPB 2 +#define HDLCPROTO_UNSET 3 +#define HDLCPROTO_LAPD 4 + +/* socket options */ +#define PK_ACCTFILE 1 /* use level = CCITTPROTO_X25 */ +#define PK_FACILITIES 2 /* use level = CCITTPROTO_X25 */ +#define PK_RTATTACH 3 /* use level = CCITTPROTO_X25 */ +#define PK_PRLISTEN 4 /* use level = CCITTPROTO_X25 */ + +#define MAX_FACILITIES 109 /* maximum size for facilities */ + +/* + * X.25 Socket address structure. It contains the X.121 or variation of + * X.121, facilities information, higher level protocol value (first four + * bytes of the User Data field), and the last 12 characters of the User + * Data field. + */ + +struct x25_sockaddr { /* obsolete - use sockaddr_x25 */ + short xaddr_len; /* Length of xaddr_addr. */ + u_char xaddr_addr[15]; /* Network dependent or X.121 address. */ + u_char xaddr_facilities; /* Facilities information. */ +#define XS_REVERSE_CHARGE 0x01 +#define XS_HIPRIO 0x02 + u_char xaddr_proto[4]; /* Protocol ID (4 bytes of user data). */ + u_char xaddr_userdata[12]; /* Remaining User data field. */ +}; + +/* + * X.25 Socket address structure. It contains the network id, X.121 + * address, facilities information, higher level protocol value (first four + * bytes of the User Data field), and up to 12 characters of User Data. + */ + +struct sockaddr_x25 { + u_char x25_len; + u_char x25_family; /* must be AF_CCITT */ + short x25_net; /* network id code (usually a dnic) */ + char x25_addr[16]; /* X.121 address (null terminated) */ + struct x25opts { + char op_flags; /* miscellaneous options */ + /* pk_var.h defines other lcd_flags */ +#define X25_REVERSE_CHARGE 0x01 /* remote DTE pays for call */ +#define X25_DBIT 0x02 /* not yet supported */ +#define X25_MQBIT 0x04 /* prepend M&Q bit status byte to packet data */ +#define X25_OLDSOCKADDR 0x08 /* uses old sockaddr structure */ +#define X25_DG_CIRCUIT 0x10 /* lcd_flag: used for datagrams */ +#define X25_DG_ROUTING 0x20 /* lcd_flag: peer addr not yet known */ +#define X25_MBS_HOLD 0x40 /* lcd_flag: collect m-bit sequences */ + char op_psize; /* requested packet size */ +#define X25_PS128 7 +#define X25_PS256 8 +#define X25_PS512 9 + char op_wsize; /* window size (1 .. 7) */ + char op_speed; /* throughput class */ + } x25_opts; + short x25_udlen; /* user data field length */ + char x25_udata[16]; /* user data field */ +}; + +/* + * network configuration info + * this structure must be 16 bytes long + */ + +struct x25config { + struct sockaddr_x25 xc_addr; + /* link level parameters */ + u_short xc_lproto:4, /* link level protocol eg. CCITTPROTO_HDLC */ + xc_lptype:4, /* protocol type eg. HDLCPROTO_LAPB */ + xc_ltrace:1, /* link level tracing flag */ + xc_lwsize:7; /* link level window size */ + u_short xc_lxidxchg:1, /* link level XID exchange flag - NOT YET */ + /* packet level parameters */ + xc_rsvd1:2, + xc_pwsize:3, /* default window size */ + xc_psize:4, /* default packet size 7=128, 8=256, ... */ + xc_type:3, /* network type */ +#define X25_1976 0 +#define X25_1980 1 +#define X25_1984 2 +#define X25_DDN 3 +#define X25_BASIC 4 + xc_ptrace:1, /* packet level tracing flag */ + xc_nodnic:1, /* remove our dnic when calling on net */ + xc_prepnd0:1; /* prepend 0 when making offnet calls */ + u_short xc_maxlcn; /* max logical channels */ + u_short xc_dg_idletimo; /* timeout for idle datagram circuits. */ +}; + +#ifdef IFNAMSIZ +struct ifreq_x25 { + char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct x25config ifr_xc; +}; +#define SIOCSIFCONF_X25 _IOW('i', 12, struct ifreq_x25) /* set ifnet config */ +#define SIOCGIFCONF_X25 _IOWR('i',13, struct ifreq_x25) /* get ifnet config */ +#endif diff --git a/bsd/netccitt/x25acct.h b/bsd/netccitt/x25acct.h new file mode 100644 index 000000000..14a52242d --- /dev/null +++ b/bsd/netccitt/x25acct.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)x25acct.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Format of X.25 accounting record written + * to X25ACCTF whenever a circuit is closed. + */ + +#ifdef waterloo +#define X25ACCTF "/usr/adm/logs/x25acct" +#else +#define X25ACCTF "/usr/adm/x25acct" +#endif + +struct x25acct { + time_t x25acct_stime; /* start time */ +#ifdef waterloo + u_long x25acct_etime; /* elapsed time (seconds) */ +#else + u_short x25acct_etime; /* elapsed time (seconds) */ +#endif + short x25acct_uid; /* user id */ + short x25acct_net; /* network id */ + u_short x25acct_psize:4, /* packet size */ + x25acct_addrlen:4, /* x25acct_addr length */ + x25acct_revcharge:1, /* reverse charging */ + x25acct_callin:1, /* incoming call */ + x25acct_unused:6; + char x25acct_addr[8]; /* remote DTE address (in bcd) */ + char x25acct_udata[4]; /* protocol id */ + long x25acct_txcnt; /* packets transmitted */ + long x25acct_rxcnt; /* packets received */ +}; diff --git a/bsd/netccitt/x25err.h b/bsd/netccitt/x25err.h new file mode 100644 index 000000000..97b47a8a1 --- /dev/null +++ b/bsd/netccitt/x25err.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) University of British Columbia, 1984 + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Laboratory for Computation Vision and the Computer Science Department + * of the University of British Columbia. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)x25err.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * + * X.25 Reset and Clear errors and diagnostics. These values are + * returned in the u_error field of the u structure. + * + */ + +#define EXRESET 100 /* Reset: call reset */ +#define EXROUT 101 /* Reset: out of order */ +#define EXRRPE 102 /* Reset: remote procedure error */ +#define EXRLPE 103 /* Reset: local procedure error */ +#define EXRNCG 104 /* Reset: network congestion */ + +#define EXCLEAR 110 /* Clear: call cleared */ +#define EXCBUSY 111 /* Clear: number busy */ +#define EXCOUT 112 /* Clear: out of order */ +#define EXCRPE 113 /* Clear: remote procedure error */ +#define EXCRRC 114 /* Clear: collect call refused */ +#define EXCINV 115 /* Clear: invalid call */ +#define EXCAB 116 /* Clear: access barred */ +#define EXCLPE 117 /* Clear: local procedure error */ +#define EXCNCG 118 /* Clear: network congestion */ +#define EXCNOB 119 /* Clear: not obtainable */ + diff --git a/bsd/netinet/Makefile b/bsd/netinet/Makefile new file mode 100644 index 000000000..15d797279 --- /dev/null +++ b/bsd/netinet/Makefile @@ -0,0 +1,44 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + bootp.h icmp6.h icmp_var.h if_atm.h if_ether.h if_fddi.h \ + igmp.h igmp_var.h in.h in_gif.h in_hostcache.h in_pcb.h \ + in_systm.h in_var.h ip.h ip6.h ip_auth.h ip_compat.h \ + ip_dummynet.h ip_ecn.h ip_encap.h ip_fil.h ip_flow.h \ + ip_frag.h ip_fw.h ip_icmp.h ip_mroute.h ip_nat.h \ + ip_proxy.h ip_state.h ip_var.h ipl.h tcp.h \ + tcp_debug.h tcp_fsm.h tcp_seq.h tcp_timer.h tcp_var.h \ + tcpip.h udp.h udp_var.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = netinet + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = netinet + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/netinet/bootp.h b/bsd/netinet/bootp.h new file mode 100644 index 000000000..4de5d87cd --- /dev/null +++ b/bsd/netinet/bootp.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Bootstrap Protocol (BOOTP). RFC 951. + */ +/* + * HISTORY + * + * 14 May 1992 ? at NeXT + * Added correct padding to struct nextvend. This is + * needed for the i386 due to alignment differences wrt + * the m68k. Also adjusted the size of the array fields + * because the NeXT vendor area was overflowing the bootp + * packet. + */ + +#define iaddr_t struct in_addr + +struct bootp { + u_char bp_op; /* packet opcode type */ +#define BOOTREQUEST 1 +#define BOOTREPLY 2 + u_char bp_htype; /* hardware addr type */ + u_char bp_hlen; /* hardware addr length */ + u_char bp_hops; /* gateway hops */ + u_long bp_xid; /* transaction ID */ + u_short bp_secs; /* seconds since boot began */ + u_short bp_unused; + iaddr_t bp_ciaddr; /* client IP address */ + iaddr_t bp_yiaddr; /* 'your' IP address */ + iaddr_t bp_siaddr; /* server IP address */ + iaddr_t bp_giaddr; /* gateway IP address */ + u_char bp_chaddr[16]; /* client hardware address */ + u_char bp_sname[64]; /* server host name */ + u_char bp_file[128]; /* boot file name */ + u_char bp_vend[64]; /* vendor-specific area */ +}; + +/* + * UDP port numbers, server and client. + */ +#define IPPORT_BOOTPS 67 +#define IPPORT_BOOTPC 68 + +/* + * "vendor" data permitted for Stanford boot clients. + */ +struct vend { + u_char v_magic[4]; /* magic number */ + u_long v_flags; /* flags/opcodes, etc. */ + u_char v_unused[56]; /* currently unused */ +}; +#define VM_STANFORD "STAN" /* v_magic for Stanford */ + +/* v_flags values */ +#define VF_PCBOOT 1 /* an IBMPC or Mac wants environment info */ +#define VF_HELP 2 /* help me, I'm not registered */ + +#define NVMAXTEXT 55 /* don't change this, it just fits RFC951 */ +struct nextvend { + u_char nv_magic[4]; /* Magic number for vendor specificity */ + u_char nv_version; /* NeXT protocol version */ + /* + * Round the beginning + * of the union to a 16 + * bit boundary due to + * struct/union alignment + * on the m68k. + */ + unsigned short :0; + union { + u_char NV0[58]; + struct { + u_char NV1_opcode; /* opcode - Version 1 */ + u_char NV1_xid; /* transcation id */ + u_char NV1_text[NVMAXTEXT]; /* text */ + u_char NV1_null; /* null terminator */ + } NV1; + } nv_U; +}; +#define nv_unused nv_U.NV0 +#define nv_opcode nv_U.NV1.NV1_opcode +#define nv_xid nv_U.NV1.NV1_xid +#define nv_text nv_U.NV1.NV1_text +#define nv_null nv_U.NV1.NV1_null + +/* Magic number */ +#define VM_NEXT "NeXT" /* v_magic for NeXT, Inc. */ + +/* Opcodes */ +#define BPOP_OK 0 +#define BPOP_QUERY 1 +#define BPOP_QUERY_NE 2 +#define BPOP_ERROR 3 + +struct bootp_packet { + struct ip bp_ip; + struct udphdr bp_udp; + struct bootp bp_bootp; +}; + +#define BOOTP_PKTSIZE (sizeof (struct bootp_packet)) + +/* backoffs must be masks */ +#define BOOTP_MIN_BACKOFF 0x7ff /* 2.048 sec */ +#define BOOTP_MAX_BACKOFF 0xffff /* 65.535 sec */ +#define BOOTP_RETRY 6 /* # retries */ + diff --git a/bsd/netinet/fil.c b/bsd/netinet/fil.c new file mode 100644 index 000000000..4cba950af --- /dev/null +++ b/bsd/netinet/fil.c @@ -0,0 +1,1344 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1993-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + */ +#if !defined(lint) +/* static const char sccsid[] = "@(#)fil.c 1.36 6/5/96 (C) 1993-1996 Darren Reed"; */ +#endif + +#include "opt_ipfilter.h" + +#include +#include +#include +#include +#include +#if !defined(__FreeBSD__) +# include +#endif + +# include + +#include +#if !defined(__SVR4) && !defined(__svr4__) +# ifndef linux +# include +# endif +#else +# include +# include +# include +#endif +#if defined(__FreeBSD__) +# include +#endif +#ifndef linux +# include +# include +#endif +#include +#ifdef sun +# include +#endif +#include +#include +#include +#include +#ifndef linux +# include +#endif +#include +#include +#include +#include "netinet/ip_compat.h" +#include +#include "netinet/ip_fil.h" +#include "netinet/ip_proxy.h" +#include "netinet/ip_nat.h" +#include "netinet/ip_frag.h" +#include "netinet/ip_state.h" +#include "netinet/ip_auth.h" +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#ifndef KERNEL +# include "ipf.h" +# include "ipt.h" +extern int opts; + +# define FR_IFVERBOSE(ex,second,verb_pr) if (ex) { verbose verb_pr; \ + second; } +# define FR_IFDEBUG(ex,second,verb_pr) if (ex) { debug verb_pr; \ + second; } +# define FR_VERBOSE(verb_pr) verbose verb_pr +# define FR_DEBUG(verb_pr) debug verb_pr +# define SEND_RESET(ip, qif, if, m) send_reset(ip, if) +# define IPLLOG(a, c, d, e) ipllog() +# define FR_NEWAUTH(m, fi, ip, qif) fr_newauth((mb_t *)m, fi, ip) +# if SOLARIS +# define ICMP_ERROR(b, ip, t, c, if, src) icmp_error(ip) +# else +# define ICMP_ERROR(b, ip, t, c, if, src) icmp_error(b, ip, if) +# endif +#else /* #ifndef KERNEL */ +# define FR_IFVERBOSE(ex,second,verb_pr) ; +# define FR_IFDEBUG(ex,second,verb_pr) ; +# define FR_VERBOSE(verb_pr) +# define FR_DEBUG(verb_pr) +# define IPLLOG(a, c, d, e) ipflog(a, c, d, e) +# if SOLARIS || defined(__sgi) +extern kmutex_t ipf_mutex, ipf_auth; +# endif +# if SOLARIS +# define FR_NEWAUTH(m, fi, ip, qif) fr_newauth((mb_t *)m, fi, \ + ip, qif) +# define SEND_RESET(ip, qif, if) send_reset(ip, qif) +# define ICMP_ERROR(b, ip, t, c, if, src) \ + icmp_error(ip, t, c, if, src) +# else /* SOLARIS */ +# define FR_NEWAUTH(m, fi, ip, qif) fr_newauth((mb_t *)m, fi, ip) +# ifdef linux +# define SEND_RESET(ip, qif, if) send_reset((tcpiphdr_t *)ip,\ + ifp) +# else +# define SEND_RESET(ip, qif, if) send_reset((tcpiphdr_t *)ip) +# endif +# ifdef __sgi +# define ICMP_ERROR(b, ip, t, c, if, src) \ + icmp_error(b, t, c, if, src, if) +# else +# if BSD < 199103 +# ifdef linux +# define ICMP_ERROR(b, ip, t, c, if, src) icmp_send(b,t,c,0,if) +# else +# define ICMP_ERROR(b, ip, t, c, if, src) \ + icmp_error(mtod(b, ip_t *), t, c, if, src) +# endif /* linux */ +# else +# define ICMP_ERROR(b, ip, t, c, if, src) \ + icmp_error(b, t, c, (src).s_addr, if) +# endif /* BSD < 199103 */ +# endif /* __sgi */ +# endif /* SOLARIS || __sgi */ +#endif /* KERNEL */ + + +struct filterstats frstats[2] = {{0,0,0,0,0},{0,0,0,0,0}}; +struct frentry *ipfilter[2][2] = { { NULL, NULL }, { NULL, NULL } }, + *ipacct[2][2] = { { NULL, NULL }, { NULL, NULL } }; +struct frgroup *ipfgroups[3][2]; +int fr_flags = IPF_LOGGING, fr_active = 0; +#if defined(IPFILTER_DEFAULT_BLOCK) +int fr_pass = FR_NOMATCH|FR_BLOCK; +#else +int fr_pass = (IPF_DEFAULT_PASS|FR_NOMATCH); +#endif + +fr_info_t frcache[2]; + +static void fr_makefrip __P((int, ip_t *, fr_info_t *)); +static int fr_tcpudpchk __P((frentry_t *, fr_info_t *)); +static int frflushlist __P((int, int, int *, frentry_t *, frentry_t **)); + + +/* + * bit values for identifying presence of individual IP options + */ +static struct optlist ipopts[20] = { + { IPOPT_NOP, 0x000001 }, + { IPOPT_RR, 0x000002 }, + { IPOPT_ZSU, 0x000004 }, + { IPOPT_MTUP, 0x000008 }, + { IPOPT_MTUR, 0x000010 }, + { IPOPT_ENCODE, 0x000020 }, + { IPOPT_TS, 0x000040 }, + { IPOPT_TR, 0x000080 }, + { IPOPT_SECURITY, 0x000100 }, + { IPOPT_LSRR, 0x000200 }, + { IPOPT_E_SEC, 0x000400 }, + { IPOPT_CIPSO, 0x000800 }, + { IPOPT_SATID, 0x001000 }, + { IPOPT_SSRR, 0x002000 }, + { IPOPT_ADDEXT, 0x004000 }, + { IPOPT_VISA, 0x008000 }, + { IPOPT_IMITD, 0x010000 }, + { IPOPT_EIP, 0x020000 }, + { IPOPT_FINN, 0x040000 }, + { 0, 0x000000 } +}; + +/* + * bit values for identifying presence of individual IP security options + */ +static struct optlist secopt[8] = { + { IPSO_CLASS_RES4, 0x01 }, + { IPSO_CLASS_TOPS, 0x02 }, + { IPSO_CLASS_SECR, 0x04 }, + { IPSO_CLASS_RES3, 0x08 }, + { IPSO_CLASS_CONF, 0x10 }, + { IPSO_CLASS_UNCL, 0x20 }, + { IPSO_CLASS_RES2, 0x40 }, + { IPSO_CLASS_RES1, 0x80 } +}; + + +/* + * compact the IP header into a structure which contains just the info. + * which is useful for comparing IP headers with. + */ +static void fr_makefrip(hlen, ip, fin) +int hlen; +ip_t *ip; +fr_info_t *fin; +{ + struct optlist *op; + tcphdr_t *tcp; + icmphdr_t *icmp; + fr_ip_t *fi = &fin->fin_fi; + u_short optmsk = 0, secmsk = 0, auth = 0; + int i, mv, ol, off; + u_char *s, opt; + + fin->fin_fr = NULL; + fin->fin_tcpf = 0; + fin->fin_data[0] = 0; + fin->fin_data[1] = 0; + fin->fin_rule = -1; + fin->fin_group = -1; + fin->fin_id = ip->ip_id; +#ifdef KERNEL + fin->fin_icode = ipl_unreach; +#endif + fi->fi_v = ip->ip_v; + fi->fi_tos = ip->ip_tos; + fin->fin_hlen = hlen; + fin->fin_dlen = ip->ip_len - hlen; + tcp = (tcphdr_t *)((char *)ip + hlen); + icmp = (icmphdr_t *)tcp; + fin->fin_dp = (void *)tcp; + (*(((u_short *)fi) + 1)) = (*(((u_short *)ip) + 4)); + (*(((u_32_t *)fi) + 1)) = (*(((u_32_t *)ip) + 3)); + (*(((u_32_t *)fi) + 2)) = (*(((u_32_t *)ip) + 4)); + + fi->fi_fl = (hlen > sizeof(ip_t)) ? FI_OPTIONS : 0; + off = (ip->ip_off & 0x1fff) << 3; + if (ip->ip_off & 0x3fff) + fi->fi_fl |= FI_FRAG; + switch (ip->ip_p) + { + case IPPROTO_ICMP : + { + int minicmpsz = sizeof(struct icmp); + + if (!off && ip->ip_len > ICMP_MINLEN + hlen && + (icmp->icmp_type == ICMP_ECHOREPLY || + icmp->icmp_type == ICMP_UNREACH)) + minicmpsz = ICMP_MINLEN; + if ((!(ip->ip_len >= hlen + minicmpsz) && !off) || + (off && off < sizeof(struct icmp))) + fi->fi_fl |= FI_SHORT; + if (fin->fin_dlen > 1) + fin->fin_data[0] = *(u_short *)tcp; + break; + } + case IPPROTO_TCP : + fi->fi_fl |= FI_TCPUDP; + if ((!IPMINLEN(ip, tcphdr) && !off) || + (off && off < sizeof(struct tcphdr))) + fi->fi_fl |= FI_SHORT; + if (!(fi->fi_fl & FI_SHORT) && !off) + fin->fin_tcpf = tcp->th_flags; + goto getports; + case IPPROTO_UDP : + fi->fi_fl |= FI_TCPUDP; + if ((!IPMINLEN(ip, udphdr) && !off) || + (off && off < sizeof(struct udphdr))) + fi->fi_fl |= FI_SHORT; +getports: + if (!off && (fin->fin_dlen > 3)) { + fin->fin_data[0] = ntohs(tcp->th_sport); + fin->fin_data[1] = ntohs(tcp->th_dport); + } + break; + default : + break; + } + + + for (s = (u_char *)(ip + 1), hlen -= sizeof(*ip); hlen; ) { + if (!(opt = *s)) + break; + ol = (opt == IPOPT_NOP) ? 1 : (int)*(s+1); + if (opt > 1 && (ol < 2 || ol > hlen)) + break; + for (i = 9, mv = 4; mv >= 0; ) { + op = ipopts + i; + if (opt == (u_char)op->ol_val) { + optmsk |= op->ol_bit; + if (opt == IPOPT_SECURITY) { + struct optlist *sp; + u_char sec; + int j, m; + + sec = *(s + 2); /* classification */ + for (j = 3, m = 2; m >= 0; ) { + sp = secopt + j; + if (sec == sp->ol_val) { + secmsk |= sp->ol_bit; + auth = *(s + 3); + auth *= 256; + auth += *(s + 4); + break; + } + if (sec < sp->ol_val) + j -= m--; + else + j += m--; + } + } + break; + } + if (opt < op->ol_val) + i -= mv--; + else + i += mv--; + } + hlen -= ol; + s += ol; + } + if (auth && !(auth & 0x0100)) + auth &= 0xff00; + fi->fi_optmsk = optmsk; + fi->fi_secmsk = secmsk; + fi->fi_auth = auth; +} + + +/* + * check an IP packet for TCP/UDP characteristics such as ports and flags. + */ +static int fr_tcpudpchk(fr, fin) +frentry_t *fr; +fr_info_t *fin; +{ + register u_short po, tup; + register char i; + register int err = 1; + + /* + * Both ports should *always* be in the first fragment. + * So far, I cannot find any cases where they can not be. + * + * compare destination ports + */ + if ((i = (int)fr->fr_dcmp)) { + po = fr->fr_dport; + tup = fin->fin_data[1]; + /* + * Do opposite test to that required and + * continue if that succeeds. + */ + if (!--i && tup != po) /* EQUAL */ + err = 0; + else if (!--i && tup == po) /* NOTEQUAL */ + err = 0; + else if (!--i && tup >= po) /* LESSTHAN */ + err = 0; + else if (!--i && tup <= po) /* GREATERTHAN */ + err = 0; + else if (!--i && tup > po) /* LT or EQ */ + err = 0; + else if (!--i && tup < po) /* GT or EQ */ + err = 0; + else if (!--i && /* Out of range */ + (tup >= po && tup <= fr->fr_dtop)) + err = 0; + else if (!--i && /* In range */ + (tup <= po || tup >= fr->fr_dtop)) + err = 0; + } + /* + * compare source ports + */ + if (err && (i = (int)fr->fr_scmp)) { + po = fr->fr_sport; + tup = fin->fin_data[0]; + if (!--i && tup != po) + err = 0; + else if (!--i && tup == po) + err = 0; + else if (!--i && tup >= po) + err = 0; + else if (!--i && tup <= po) + err = 0; + else if (!--i && tup > po) + err = 0; + else if (!--i && tup < po) + err = 0; + else if (!--i && /* Out of range */ + (tup >= po && tup <= fr->fr_stop)) + err = 0; + else if (!--i && /* In range */ + (tup <= po || tup >= fr->fr_stop)) + err = 0; + } + + /* + * If we don't have all the TCP/UDP header, then how can we + * expect to do any sort of match on it ? If we were looking for + * TCP flags, then NO match. If not, then match (which should + * satisfy the "short" class too). + */ + if (err && (fin->fin_fi.fi_p == IPPROTO_TCP)) { + if (fin->fin_fi.fi_fl & FI_SHORT) + return !(fr->fr_tcpf | fr->fr_tcpfm); + /* + * Match the flags ? If not, abort this match. + */ + if (fr->fr_tcpf && + fr->fr_tcpf != (fin->fin_tcpf & fr->fr_tcpfm)) { + FR_DEBUG(("f. %#x & %#x != %#x\n", fin->fin_tcpf, + fr->fr_tcpfm, fr->fr_tcpf)); + err = 0; + } + } + return err; +} + +/* + * Check the input/output list of rules for a match and result. + * Could be per interface, but this gets real nasty when you don't have + * kernel sauce. + */ +int fr_scanlist(pass, ip, fin, m) +int pass; +ip_t *ip; +register fr_info_t *fin; +void *m; +{ + register struct frentry *fr; + register fr_ip_t *fi = &fin->fin_fi; + int rulen, portcmp = 0, off, skip = 0; + + fr = fin->fin_fr; + fin->fin_fr = NULL; + fin->fin_rule = 0; + fin->fin_group = 0; + off = ip->ip_off & 0x1fff; + pass |= (fi->fi_fl << 24); + + if ((fi->fi_fl & FI_TCPUDP) && (fin->fin_dlen > 3) && !off) + portcmp = 1; + + for (rulen = 0; fr; fr = fr->fr_next, rulen++) { + if (skip) { + skip--; + continue; + } + /* + * In all checks below, a null (zero) value in the + * filter struture is taken to mean a wildcard. + * + * check that we are working for the right interface + */ +#ifdef KERNEL + if (fr->fr_ifa && fr->fr_ifa != fin->fin_ifp) + continue; +#else + if (opts & (OPT_VERBOSE|OPT_DEBUG)) + printf("\n"); + FR_VERBOSE(("%c", (pass & FR_PASS) ? 'p' : + (pass & FR_AUTH) ? 'a' : 'b')); + if (fr->fr_ifa && fr->fr_ifa != fin->fin_ifp) + continue; + FR_VERBOSE((":i")); +#endif + { + register u_32_t *ld, *lm, *lip; + register int i; + + lip = (u_32_t *)fi; + lm = (u_32_t *)&fr->fr_mip; + ld = (u_32_t *)&fr->fr_ip; + i = ((lip[0] & lm[0]) != ld[0]); + FR_IFDEBUG(i,continue,("0. %#08x & %#08x != %#08x\n", + lip[0], lm[0], ld[0])); + i |= ((lip[1] & lm[1]) != ld[1]) << 21; + FR_IFDEBUG(i,continue,("1. %#08x & %#08x != %#08x\n", + lip[1], lm[1], ld[1])); + i |= ((lip[2] & lm[2]) != ld[2]) << 22; + FR_IFDEBUG(i,continue,("2. %#08x & %#08x != %#08x\n", + lip[2], lm[2], ld[2])); + i |= ((lip[3] & lm[3]) != ld[3]); + FR_IFDEBUG(i,continue,("3. %#08x & %#08x != %#08x\n", + lip[3], lm[3], ld[3])); + i |= ((lip[4] & lm[4]) != ld[4]); + FR_IFDEBUG(i,continue,("4. %#08x & %#08x != %#08x\n", + lip[4], lm[4], ld[4])); + i ^= (fi->fi_fl & (FR_NOTSRCIP|FR_NOTDSTIP)); + if (i) + continue; + } + + /* + * If a fragment, then only the first has what we're looking + * for here... + */ + if (!portcmp && (fr->fr_dcmp || fr->fr_scmp || fr->fr_tcpf || + fr->fr_tcpfm)) + continue; + if (fi->fi_fl & FI_TCPUDP) { + if (!fr_tcpudpchk(fr, fin)) + continue; + } else if (fr->fr_icmpm || fr->fr_icmp) { + if ((fi->fi_p != IPPROTO_ICMP) || off || + (fin->fin_dlen < 2)) + continue; + if ((fin->fin_data[0] & fr->fr_icmpm) != fr->fr_icmp) { + FR_DEBUG(("i. %#x & %#x != %#x\n", + fin->fin_data[0], fr->fr_icmpm, + fr->fr_icmp)); + continue; + } + } + FR_VERBOSE(("*")); + /* + * Just log this packet... + */ + if (!(skip = fr->fr_skip)) + pass = fr->fr_flags; + if ((pass & FR_CALLNOW) && fr->fr_func) + pass = (*fr->fr_func)(pass, ip, fin); +#if IPFILTER_LOG + if ((pass & FR_LOGMASK) == FR_LOG) { + if (!IPLLOG(fr->fr_flags, ip, fin, m)) + frstats[fin->fin_out].fr_skip++; + frstats[fin->fin_out].fr_pkl++; + } +#endif /* IPFILTER_LOG */ + FR_DEBUG(("pass %#x\n", pass)); + fr->fr_hits++; + if (pass & FR_ACCOUNT) + fr->fr_bytes += (U_QUAD_T)ip->ip_len; + else + fin->fin_icode = fr->fr_icode; + fin->fin_rule = rulen; + fin->fin_group = fr->fr_group; + fin->fin_fr = fr; + if (fr->fr_grp) { + fin->fin_fr = fr->fr_grp; + pass = fr_scanlist(pass, ip, fin, m); + if (fin->fin_fr == NULL) { + fin->fin_rule = rulen; + fin->fin_group = fr->fr_group; + fin->fin_fr = fr; + } + } + if (pass & FR_QUICK) + break; + } + return pass; +} + + +/* + * frcheck - filter check + * check using source and destination addresses/pors in a packet whether + * or not to pass it on or not. + */ +int fr_check(ip, hlen, ifp, out +#if defined(KERNEL) && SOLARIS +, qif, mp) +qif_t *qif; +#else +, mp) +#endif +mb_t **mp; +ip_t *ip; +int hlen; +void *ifp; +int out; +{ + /* + * The above is really bad, but short of writing a diff + */ + fr_info_t frinfo, *fc; + register fr_info_t *fin = &frinfo; + frentry_t *fr = NULL; + int pass, changed, apass, error = EHOSTUNREACH; +#if !SOLARIS || !defined(KERNEL) + register mb_t *m = *mp; +#endif + +#if KERNEL + mb_t *mc = NULL; +# if !defined(__SVR4) && !defined(__svr4__) +# ifdef __sgi + char hbuf[(0xf << 2) + sizeof(struct icmp) + sizeof(ip_t) + 8]; +# endif + int up; + +#if M_CANFASTFWD + /* + * XXX For now, IP Filter and fast-forwarding of cached flows + * XXX are mutually exclusive. Eventually, IP Filter should + * XXX get a "can-fast-forward" filter rule. + */ + m->m_flags &= ~M_CANFASTFWD; +#endif /* M_CANFASTFWD */ + + if ((ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_UDP || + ip->ip_p == IPPROTO_ICMP)) { + int plen = 0; + + switch(ip->ip_p) + { + case IPPROTO_TCP: + plen = sizeof(tcphdr_t); + break; + case IPPROTO_UDP: + plen = sizeof(udphdr_t); + break; + case IPPROTO_ICMP: + /* 96 - enough for complete ICMP error IP header */ + plen = sizeof(struct icmp) + sizeof(ip_t) + 8; + break; + } + up = MIN(hlen + plen, ip->ip_len); + + if (up > m->m_len) { +#ifdef __sgi /* Under IRIX, avoid m_pullup as it makes ping panic */ + if ((up > sizeof(hbuf)) || (m_length(m) < up)) { + frstats[out].fr_pull[1]++; + return -1; + } + m_copydata(m, 0, up, hbuf); + frstats[out].fr_pull[0]++; + ip = (ip_t *)hbuf; +#else +# ifndef linux + if ((*mp = m_pullup(m, up)) == 0) { + frstats[out].fr_pull[1]++; + return -1; + } else { + frstats[out].fr_pull[0]++; + m = *mp; + ip = mtod(m, ip_t *); + } +# endif +#endif + } else + up = 0; + } else + up = 0; +# endif +# if SOLARIS + mb_t *m = qif->qf_m; +# endif +#endif + fr_makefrip(hlen, ip, fin); + fin->fin_ifp = ifp; + fin->fin_out = out; + fin->fin_mp = mp; + + MUTEX_ENTER(&ipf_mutex); + + /* + * Check auth now. This, combined with the check below to see if apass + * is 0 is to ensure that we don't count the packet twice, which can + * otherwise occur when we reprocess it. As it is, we only count it + * after it has no auth. table matchup. This also stops NAT from + * occuring until after the packet has been auth'd. + */ + apass = fr_checkauth(ip, fin); + + if (!out) { + changed = ip_natin(ip, hlen, fin); + if (!apass && (fin->fin_fr = ipacct[0][fr_active]) && + (FR_SCANLIST(FR_NOMATCH, ip, fin, m) & FR_ACCOUNT)) + frstats[0].fr_acct++; + } + + if (apass || (!(pass = ipfr_knownfrag(ip, fin)) && + !(pass = fr_checkstate(ip, fin)))) { + /* + * If a packet is found in the auth table, then skip checking + * the access lists for permission but we do need to consider + * the result as if it were from the ACL's. + */ + if (!apass) { + fc = frcache + out; + if (!bcmp((char *)fin, (char *)fc, FI_CSIZE)) { + /* + * copy cached data so we can unlock the mutex + * earlier. + */ + bcopy((char *)fc, (char *)fin, FI_COPYSIZE); + frstats[out].fr_chit++; + if ((fr = fin->fin_fr)) { + fr->fr_hits++; + pass = fr->fr_flags; + } else + pass = fr_pass; + } else { + pass = fr_pass; + if ((fin->fin_fr = ipfilter[out][fr_active])) + pass = FR_SCANLIST(fr_pass, ip, fin, m); + bcopy((char *)fin, (char *)fc, FI_COPYSIZE); + if (pass & FR_NOMATCH) + frstats[out].fr_nom++; + } + fr = fin->fin_fr; + } else + pass = apass; + + /* + * If we fail to add a packet to the authorization queue, + * then we drop the packet later. However, if it was added + * then pretend we've dropped it already. + */ + if ((pass & FR_AUTH)) + if (FR_NEWAUTH(m, fin, ip, qif) != 0) +#ifdef KERNEL + m = *mp = NULL; +#else + ; +#endif + + if (pass & FR_PREAUTH) { + MUTEX_ENTER(&ipf_auth); + if ((fin->fin_fr = ipauth) && + (pass = FR_SCANLIST(0, ip, fin, m))) + fr_authstats.fas_hits++; + else + fr_authstats.fas_miss++; + MUTEX_EXIT(&ipf_auth); + } + + if (pass & FR_KEEPFRAG) { + if (fin->fin_fi.fi_fl & FI_FRAG) { + if (ipfr_newfrag(ip, fin, pass) == -1) + frstats[out].fr_bnfr++; + else + frstats[out].fr_nfr++; + } else + frstats[out].fr_cfr++; + } + if (pass & FR_KEEPSTATE) { + if (fr_addstate(ip, fin, pass) == -1) + frstats[out].fr_bads++; + else + frstats[out].fr_ads++; + } + } + + if (fr && fr->fr_func && !(pass & FR_CALLNOW)) + pass = (*fr->fr_func)(pass, ip, fin); + + /* + * Only count/translate packets which will be passed on, out the + * interface. + */ + if (out && (pass & FR_PASS)) { + if ((fin->fin_fr = ipacct[1][fr_active]) && + (FR_SCANLIST(FR_NOMATCH, ip, fin, m) & FR_ACCOUNT)) + frstats[1].fr_acct++; + fin->fin_fr = NULL; + changed = ip_natout(ip, hlen, fin); + } + fin->fin_fr = fr; + MUTEX_EXIT(&ipf_mutex); + +#if IPFILTER_LOG + if ((fr_flags & FF_LOGGING) || (pass & FR_LOGMASK)) { + if ((fr_flags & FF_LOGNOMATCH) && (pass & FR_NOMATCH)) { + pass |= FF_LOGNOMATCH; + frstats[out].fr_npkl++; + goto logit; + } else if (((pass & FR_LOGMASK) == FR_LOGP) || + ((pass & FR_PASS) && (fr_flags & FF_LOGPASS))) { + if ((pass & FR_LOGMASK) != FR_LOGP) + pass |= FF_LOGPASS; + frstats[out].fr_ppkl++; + goto logit; + } else if (((pass & FR_LOGMASK) == FR_LOGB) || + ((pass & FR_BLOCK) && (fr_flags & FF_LOGBLOCK))) { + if ((pass & FR_LOGMASK) != FR_LOGB) + pass |= FF_LOGBLOCK; + frstats[out].fr_bpkl++; +logit: + if (!IPLLOG(pass, ip, fin, m)) { + frstats[out].fr_skip++; + if ((pass & (FR_PASS|FR_LOGORBLOCK)) == + (FR_PASS|FR_LOGORBLOCK)) + pass ^= FR_PASS|FR_BLOCK; + } + } + } +#endif /* IPFILTER_LOG */ +#ifdef KERNEL + /* + * Only allow FR_DUP to work if a rule matched - it makes no sense to + * set FR_DUP as a "default" as there are no instructions about where + * to send the packet. + */ + if (fr && (pass & FR_DUP)) +# if SOLARIS + mc = dupmsg(m); +# else +# ifndef linux + mc = m_copy(m, 0, M_COPYALL); +# else + ; +# endif +# endif +#endif + if (pass & FR_PASS) + frstats[out].fr_pass++; + else if (pass & FR_BLOCK) { + frstats[out].fr_block++; + /* + * Should we return an ICMP packet to indicate error + * status passing through the packet filter ? + * WARNING: ICMP error packets AND TCP RST packets should + * ONLY be sent in repsonse to incoming packets. Sending them + * in response to outbound packets can result in a panic on + * some operating systems. + */ + if (!out) { +#ifdef KERNEL + if (pass & FR_RETICMP) { +# if SOLARIS + ICMP_ERROR(q, ip, ICMP_UNREACH, fin->fin_icode, + qif, ip->ip_src); +# else + ICMP_ERROR(m, ip, ICMP_UNREACH, fin->fin_icode, + ifp, ip->ip_src); + m = *mp = NULL; /* freed by icmp_error() */ +# endif + + frstats[0].fr_ret++; + } else if ((pass & FR_RETRST) && + !(fin->fin_fi.fi_fl & FI_SHORT)) { + if (SEND_RESET(ip, qif, ifp) == 0) + frstats[1].fr_ret++; + } +#else + if (pass & FR_RETICMP) { + verbose("- ICMP unreachable sent\n"); + frstats[0].fr_ret++; + } else if ((pass & FR_RETRST) && + !(fin->fin_fi.fi_fl & FI_SHORT)) { + verbose("- TCP RST sent\n"); + frstats[1].fr_ret++; + } +#endif + } else { + if (pass & FR_RETRST) + error = ECONNRESET; + } + } + + /* + * If we didn't drop off the bottom of the list of rules (and thus + * the 'current' rule fr is not NULL), then we may have some extra + * instructions about what to do with a packet. + * Once we're finished return to our caller, freeing the packet if + * we are dropping it (* BSD ONLY *). + */ +#if defined(KERNEL) +# if !SOLARIS +# if !defined(linux) + if (fr) { + frdest_t *fdp = &fr->fr_tif; + + if ((pass & FR_FASTROUTE) || + (fdp->fd_ifp && fdp->fd_ifp != (struct ifnet *)-1)) { + ipfr_fastroute(m, fin, fdp); + m = *mp = NULL; + } + if (mc) + ipfr_fastroute(mc, fin, &fr->fr_dif); + } + if (!(pass & FR_PASS) && m) + m_freem(m); +# ifdef __sgi + else if (changed && up && m) + m_copyback(m, 0, up, hbuf); +# endif +# endif /* !linux */ + return (pass & FR_PASS) ? 0 : error; +# else /* !SOLARIS */ + if (fr) { + frdest_t *fdp = &fr->fr_tif; + + if ((pass & FR_FASTROUTE) || + (fdp->fd_ifp && fdp->fd_ifp != (struct ifnet *)-1)) { + ipfr_fastroute(qif, ip, m, mp, fin, fdp); + m = *mp = NULL; + } + if (mc) + ipfr_fastroute(qif, ip, mc, mp, fin, &fr->fr_dif); + } + return (pass & FR_PASS) ? changed : error; +# endif /* !SOLARIS */ +#else /* KERNEL */ + if (pass & FR_NOMATCH) + return 1; + if (pass & FR_PASS) + return 0; + if (pass & FR_AUTH) + return -2; + return -1; +#endif /* KERNEL */ +} + + +/* + * ipf_cksum + * addr should be 16bit aligned and len is in bytes. + * length is in bytes + */ +u_short ipf_cksum(addr, len) +register u_short *addr; +register int len; +{ + register u_32_t sum = 0; + + for (sum = 0; len > 1; len -= 2) + sum += *addr++; + + /* mop up an odd byte, if necessary */ + if (len == 1) + sum += *(u_char *)addr; + + /* + * add back carry outs from top 16 bits to low 16 bits + */ + sum = (sum >> 16) + (sum & 0xffff); /* add hi 16 to low 16 */ + sum += (sum >> 16); /* add carry */ + return (u_short)(~sum); +} + + +/* + * NB: This function assumes we've pullup'd enough for all of the IP header + * and the TCP header. We also assume that data blocks aren't allocated in + * odd sizes. + */ +u_short fr_tcpsum(m, ip, tcp, len) +mb_t *m; +ip_t *ip; +tcphdr_t *tcp; +int len; +{ + union { + u_char c[2]; + u_short s; + } bytes; + u_32_t sum; + u_short *sp; +# if SOLARIS || defined(__sgi) + int add, hlen; +# endif + +# if SOLARIS + /* skip any leading M_PROTOs */ + while(m && (MTYPE(m) != M_DATA)) + m = m->b_cont; + PANIC((!m),("fr_tcpsum: no M_DATA")); +# endif + + /* + * Add up IP Header portion + */ + bytes.c[0] = 0; + bytes.c[1] = IPPROTO_TCP; + len -= (ip->ip_hl << 2); + sum = bytes.s; + sum += htons((u_short)len); + sp = (u_short *)&ip->ip_src; + sum += *sp++; + sum += *sp++; + sum += *sp++; + sum += *sp++; + if (sp != (u_short *)tcp) + sp = (u_short *)tcp; + sum += *sp++; + sum += *sp++; + sum += *sp++; + sum += *sp++; + sum += *sp++; + sum += *sp++; + sum += *sp++; + sum += *sp; + sp += 2; /* Skip over checksum */ + sum += *sp++; + +#if SOLARIS + /* + * In case we had to copy the IP & TCP header out of mblks, + * skip over the mblk bits which are the header + */ + if ((caddr_t)ip != (caddr_t)m->b_rptr) { + hlen = (caddr_t)sp - (caddr_t)ip; + while (hlen) { + add = MIN(hlen, m->b_wptr - m->b_rptr); + sp = (u_short *)((caddr_t)m->b_rptr + add); + hlen -= add; + if ((caddr_t)sp >= (caddr_t)m->b_wptr) { + m = m->b_cont; + PANIC((!m),("fr_tcpsum: not enough data")); + if (!hlen) + sp = (u_short *)m->b_rptr; + } + } + } +#endif +#ifdef __sgi + /* + * In case we had to copy the IP & TCP header out of mbufs, + * skip over the mbuf bits which are the header + */ + if ((caddr_t)ip != mtod(m, caddr_t)) { + hlen = (caddr_t)sp - (caddr_t)ip; + while (hlen) { + add = MIN(hlen, m->m_len); + sp = (u_short *)(mtod(m, caddr_t) + add); + hlen -= add; + if (add >= m->m_len) { + m = m->m_next; + PANIC((!m),("fr_tcpsum: not enough data")); + if (!hlen) + sp = mtod(m, u_short *); + } + } + } +#endif + + if (!(len -= sizeof(*tcp))) + goto nodata; + while (len > 0) { +#if SOLARIS + while ((caddr_t)sp >= (caddr_t)m->b_wptr) { + m = m->b_cont; + PANIC((!m),("fr_tcpsum: not enough data")); + sp = (u_short *)m->b_rptr; + } +#else + while (((caddr_t)sp - mtod(m, caddr_t)) >= m->m_len) + { + m = m->m_next; + PANIC((!m),("fr_tcpsum: not enough data")); + sp = mtod(m, u_short *); + } +#endif /* SOLARIS */ + if (len < 2) + break; + if((u_32_t)sp & 1) { + bcopy((char *)sp++, (char *)&bytes.s, sizeof(bytes.s)); + sum += bytes.s; + } else + sum += *sp++; + len -= 2; + } + if (len) { + bytes.c[1] = 0; + bytes.c[0] = *(u_char *)sp; + sum += bytes.s; + } +nodata: + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); + sum = (u_short)((~sum) & 0xffff); + return sum; +} + + +#if defined(KERNEL) && ( ((BSD < 199306) && !SOLARIS) || defined(__sgi) ) +/* + * Copyright (c) 1982, 1986, 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 + */ +/* + * Copy data from an mbuf chain starting "off" bytes from the beginning, + * continuing for "len" bytes, into the indicated buffer. + */ +void +m_copydata(m, off, len, cp) + register mb_t *m; + register int off; + register int len; + caddr_t cp; +{ + register unsigned count; + + if (off < 0 || len < 0) + panic("m_copydata"); + while (off > 0) { + if (m == 0) + panic("m_copydata"); + if (off < m->m_len) + break; + off -= m->m_len; + m = m->m_next; + } + while (len > 0) { + if (m == 0) + panic("m_copydata"); + count = MIN(m->m_len - off, len); + bcopy(mtod(m, caddr_t) + off, cp, count); + len -= count; + cp += count; + off = 0; + m = m->m_next; + } +} + + +# ifndef linux +/* + * Copy data from a buffer back into the indicated mbuf chain, + * starting "off" bytes from the beginning, extending the mbuf + * chain if necessary. + */ +void +m_copyback(m0, off, len, cp) + struct mbuf *m0; + register int off; + register int len; + caddr_t cp; +{ + register int mlen; + register struct mbuf *m = m0, *n; + int totlen = 0; + + if (m0 == 0) + return; + while (off > (mlen = m->m_len)) { + off -= mlen; + totlen += mlen; + if (m->m_next == 0) { + n = m_getclr(M_DONTWAIT, m->m_type); + if (n == 0) + goto out; + n->m_len = min(MLEN, len + off); + m->m_next = n; + } + m = m->m_next; + } + while (len > 0) { + mlen = min (m->m_len - off, len); + bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); + cp += mlen; + len -= mlen; + mlen += off; + off = 0; + totlen += mlen; + if (len == 0) + break; + if (m->m_next == 0) { + n = m_get(M_DONTWAIT, m->m_type); + if (n == 0) + break; + n->m_len = min(MLEN, len); + m->m_next = n; + } + m = m->m_next; + } +out: +#if 0 + if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) + m->m_pkthdr.len = totlen; +#endif + return; +} +# endif /* linux */ +#endif /* (KERNEL) && ( ((BSD < 199306) && !SOLARIS) || __sgi) */ + + +frgroup_t *fr_findgroup(num, flags, which, set, fgpp) +u_short num; +u_32_t flags; +int which, set; +frgroup_t ***fgpp; +{ + frgroup_t *fg, **fgp; + + if (which == IPL_LOGAUTH) + fgp = &ipfgroups[2][set]; + else if (flags & FR_ACCOUNT) + fgp = &ipfgroups[1][set]; + else if (flags & (FR_OUTQUE|FR_INQUE)) + fgp = &ipfgroups[0][set]; + else + return NULL; + + while ((fg = *fgp)) + if (fg->fg_num == num) + break; + else + fgp = &fg->fg_next; + if (fgpp) + *fgpp = fgp; + return fg; +} + + +frgroup_t *fr_addgroup(num, fp, which, set) +u_short num; +frentry_t *fp; +int which, set; +{ + frgroup_t *fg, **fgp; + + if ((fg = fr_findgroup(num, fp->fr_flags, which, set, &fgp))) + return fg; + + KMALLOC(fg, frgroup_t *, sizeof(*fg)); + if (fg) { + fg->fg_num = num; + fg->fg_next = *fgp; + fg->fg_head = fp; + fg->fg_start = &fp->fr_grp; + *fgp = fg; + } + return fg; +} + + +void fr_delgroup(num, flags, which, set) +u_short num; +u_32_t flags; +int which, set; +{ + frgroup_t *fg, **fgp; + + if (!(fg = fr_findgroup(num, flags, which, set, &fgp))) + return; + + *fgp = fg->fg_next; + KFREE(fg); +} + + + +/* + * recursively flush rules from the list, descending groups as they are + * encountered. if a rule is the head of a group and it has lost all its + * group members, then also delete the group reference. + */ +static int frflushlist(set, unit, nfreedp, list, listp) +int set, unit, *nfreedp; +frentry_t *list, **listp; +{ + register frentry_t *fp = list, *fpn; + register int freed = 0; + + while (fp) { + fpn = fp->fr_next; + if (fp->fr_grp) { + fp->fr_ref -= frflushlist(set, unit, nfreedp, + fp->fr_grp, &fp->fr_grp); + } + + if (fp->fr_ref == 1) { + if (fp->fr_grhead) + fr_delgroup(fp->fr_grhead, fp->fr_flags, unit, + set); + KFREE(fp); + *listp = fpn; + freed++; + } + fp = fpn; + } + *nfreedp += freed; + return freed; +} + + +void frflush(unit, result) +int unit; +int *result; +{ + int flags = *result, flushed = 0, set = fr_active; + + bzero((char *)frcache, sizeof(frcache[0]) * 2); + + if (flags & FR_INACTIVE) + set = 1 - set; + + if (unit == IPL_LOGIPF) { + if (flags & FR_OUTQUE) { + (void) frflushlist(set, unit, &flushed, + ipfilter[1][set], + &ipfilter[1][set]); + (void) frflushlist(set, unit, &flushed, + ipacct[1][set], &ipacct[1][set]); + } + if (flags & FR_INQUE) { + (void) frflushlist(set, unit, &flushed, + ipfilter[0][set], + &ipfilter[0][set]); + (void) frflushlist(set, unit, &flushed, + ipacct[0][set], &ipacct[0][set]); + } + } + + *result = flushed; +} diff --git a/bsd/netinet/icmp6.h b/bsd/netinet/icmp6.h new file mode 100644 index 000000000..14281112c --- /dev/null +++ b/bsd/netinet/icmp6.h @@ -0,0 +1,725 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: icmp6.h,v 1.9 2000/03/09 21:09:16 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_icmp.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_ICMP6_H_ +#define _NETINET_ICMP6_H_ + +#define ICMPV6_PLD_MAXLEN 1232 /* IPV6_MMTU - sizeof(struct ip6_hdr) + - sizeof(struct icmp6_hdr) */ + +struct icmp6_hdr { + u_int8_t icmp6_type; /* type field */ + u_int8_t icmp6_code; /* code field */ + u_int16_t icmp6_cksum; /* checksum field */ + union { + u_int32_t icmp6_un_data32[1]; /* type-specific field */ + u_int16_t icmp6_un_data16[2]; /* type-specific field */ + u_int8_t icmp6_un_data8[4]; /* type-specific field */ + } icmp6_dataun; +}; + +#define icmp6_data32 icmp6_dataun.icmp6_un_data32 +#define icmp6_data16 icmp6_dataun.icmp6_un_data16 +#define icmp6_data8 icmp6_dataun.icmp6_un_data8 +#define icmp6_pptr icmp6_data32[0] /* parameter prob */ +#define icmp6_mtu icmp6_data32[0] /* packet too big */ +#define icmp6_id icmp6_data16[0] /* echo request/reply */ +#define icmp6_seq icmp6_data16[1] /* echo request/reply */ +#define icmp6_maxdelay icmp6_data16[0] /* mcast group membership */ + +#define ICMP6_DST_UNREACH 1 /* dest unreachable, codes: */ +#define ICMP6_PACKET_TOO_BIG 2 /* packet too big */ +#define ICMP6_TIME_EXCEEDED 3 /* time exceeded, code: */ +#define ICMP6_PARAM_PROB 4 /* ip6 header bad */ + +#define ICMP6_ECHO_REQUEST 128 /* echo service */ +#define ICMP6_ECHO_REPLY 129 /* echo reply */ +#define ICMP6_MEMBERSHIP_QUERY 130 /* group membership query */ +#define MLD6_LISTENER_QUERY 130 /* multicast listener query */ +#define ICMP6_MEMBERSHIP_REPORT 131 /* group membership report */ +#define MLD6_LISTENER_REPORT 131 /* multicast listener report */ +#define ICMP6_MEMBERSHIP_REDUCTION 132 /* group membership termination */ +#define MLD6_LISTENER_DONE 132 /* multicast listener done */ + +#define ND_ROUTER_SOLICIT 133 /* router solicitation */ +#define ND_ROUTER_ADVERT 134 /* router advertisment */ +#define ND_NEIGHBOR_SOLICIT 135 /* neighbor solicitation */ +#define ND_NEIGHBOR_ADVERT 136 /* neighbor advertisment */ +#define ND_REDIRECT 137 /* redirect */ + +#define ICMP6_ROUTER_RENUMBERING 138 /* router renumbering */ + +#define ICMP6_WRUREQUEST 139 /* who are you request */ +#define ICMP6_WRUREPLY 140 /* who are you reply */ +#define ICMP6_FQDN_QUERY 139 /* FQDN query */ +#define ICMP6_FQDN_REPLY 140 /* FQDN reply */ +#define ICMP6_NI_QUERY 139 /* node information request */ +#define ICMP6_NI_REPLY 140 /* node information reply */ + +/* The definitions below are experimental. TBA */ +#define MLD6_MTRACE_RESP 141 /* mtrace response(to sender) */ +#define MLD6_MTRACE 142 /* mtrace messages */ + +#define ICMP6_MAXTYPE 142 + +#define ICMP6_DST_UNREACH_NOROUTE 0 /* no route to destination */ +#define ICMP6_DST_UNREACH_ADMIN 1 /* administratively prohibited */ +#define ICMP6_DST_UNREACH_NOTNEIGHBOR 2 /* not a neighbor(obsolete) */ +#define ICMP6_DST_UNREACH_BEYONDSCOPE 2 /* beyond scope of source address */ +#define ICMP6_DST_UNREACH_ADDR 3 /* address unreachable */ +#define ICMP6_DST_UNREACH_NOPORT 4 /* port unreachable */ + +#define ICMP6_TIME_EXCEED_TRANSIT 0 /* ttl==0 in transit */ +#define ICMP6_TIME_EXCEED_REASSEMBLY 1 /* ttl==0 in reass */ + +#define ICMP6_PARAMPROB_HEADER 0 /* erroneous header field */ +#define ICMP6_PARAMPROB_NEXTHEADER 1 /* unrecognized next header */ +#define ICMP6_PARAMPROB_OPTION 2 /* unrecognized option */ + +#define ICMP6_INFOMSG_MASK 0x80 /* all informational messages */ + +#define ICMP6_NI_SUCESS 0 /* node information successful reply */ +#define ICMP6_NI_REFUSED 1 /* node information request is refused */ +#define ICMP6_NI_UNKNOWN 2 /* unknown Qtype */ + +#define ICMP6_ROUTER_RENUMBERING_COMMAND 0 /* rr command */ +#define ICMP6_ROUTER_RENUMBERING_RESULT 1 /* rr result */ +#define ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET 255 /* rr seq num reset */ + +/* Used in kernel only */ +#define ND_REDIRECT_ONLINK 0 /* redirect to an on-link node */ +#define ND_REDIRECT_ROUTER 1 /* redirect to a better router */ + +/* + * Multicast Listener Discovery + */ +struct mld6_hdr { + struct icmp6_hdr mld6_hdr; + struct in6_addr mld6_addr; /* multicast address */ +}; + +#define mld6_type mld6_hdr.icmp6_type +#define mld6_code mld6_hdr.icmp6_code +#define mld6_cksum mld6_hdr.icmp6_cksum +#define mld6_maxdelay mld6_hdr.icmp6_data16[0] +#define mld6_reserved mld6_hdr.icmp6_data16[1] + +/* + * Neighbor Discovery + */ + +struct nd_router_solicit { /* router solicitation */ + struct icmp6_hdr nd_rs_hdr; + /* could be followed by options */ +}; + +#define nd_rs_type nd_rs_hdr.icmp6_type +#define nd_rs_code nd_rs_hdr.icmp6_code +#define nd_rs_cksum nd_rs_hdr.icmp6_cksum +#define nd_rs_reserved nd_rs_hdr.icmp6_data32[0] + +struct nd_router_advert { /* router advertisement */ + struct icmp6_hdr nd_ra_hdr; + u_int32_t nd_ra_reachable; /* reachable time */ + u_int32_t nd_ra_retransmit; /* retransmit timer */ + /* could be followed by options */ +}; + +#define nd_ra_type nd_ra_hdr.icmp6_type +#define nd_ra_code nd_ra_hdr.icmp6_code +#define nd_ra_cksum nd_ra_hdr.icmp6_cksum +#define nd_ra_curhoplimit nd_ra_hdr.icmp6_data8[0] +#define nd_ra_flags_reserved nd_ra_hdr.icmp6_data8[1] +#define ND_RA_FLAG_MANAGED 0x80 +#define ND_RA_FLAG_OTHER 0x40 +#define ND_RA_FLAG_HA 0x20 +#define nd_ra_router_lifetime nd_ra_hdr.icmp6_data16[1] + +struct nd_neighbor_solicit { /* neighbor solicitation */ + struct icmp6_hdr nd_ns_hdr; + struct in6_addr nd_ns_target; /*target address */ + /* could be followed by options */ +}; + +#define nd_ns_type nd_ns_hdr.icmp6_type +#define nd_ns_code nd_ns_hdr.icmp6_code +#define nd_ns_cksum nd_ns_hdr.icmp6_cksum +#define nd_ns_reserved nd_ns_hdr.icmp6_data32[0] + +struct nd_neighbor_advert { /* neighbor advertisement */ + struct icmp6_hdr nd_na_hdr; + struct in6_addr nd_na_target; /* target address */ + /* could be followed by options */ +}; + +#define nd_na_type nd_na_hdr.icmp6_type +#define nd_na_code nd_na_hdr.icmp6_code +#define nd_na_cksum nd_na_hdr.icmp6_cksum +#define nd_na_flags_reserved nd_na_hdr.icmp6_data32[0] +#if BYTE_ORDER == BIG_ENDIAN +#define ND_NA_FLAG_ROUTER 0x80000000 +#define ND_NA_FLAG_SOLICITED 0x40000000 +#define ND_NA_FLAG_OVERRIDE 0x20000000 +#else +#if BYTE_ORDER == LITTLE_ENDIAN +#define ND_NA_FLAG_ROUTER 0x80 +#define ND_NA_FLAG_SOLICITED 0x40 +#define ND_NA_FLAG_OVERRIDE 0x20 +#endif +#endif + +struct nd_redirect { /* redirect */ + struct icmp6_hdr nd_rd_hdr; + struct in6_addr nd_rd_target; /* target address */ + struct in6_addr nd_rd_dst; /* destination address */ + /* could be followed by options */ +}; + +#define nd_rd_type nd_rd_hdr.icmp6_type +#define nd_rd_code nd_rd_hdr.icmp6_code +#define nd_rd_cksum nd_rd_hdr.icmp6_cksum +#define nd_rd_reserved nd_rd_hdr.icmp6_data32[0] + +struct nd_opt_hdr { /* Neighbor discovery option header */ + u_int8_t nd_opt_type; + u_int8_t nd_opt_len; + /* followed by option specific data*/ +}; + +#define ND_OPT_SOURCE_LINKADDR 1 +#define ND_OPT_TARGET_LINKADDR 2 +#define ND_OPT_PREFIX_INFORMATION 3 +#define ND_OPT_REDIRECTED_HEADER 4 +#define ND_OPT_MTU 5 +#define ND_OPT_ADV_INTERVAL 7 /* MIPv6 */ +#define ND_OPT_HA_INFORMATION 8 /* MIPv6 */ + +struct nd_opt_prefix_info { /* prefix information */ + u_int8_t nd_opt_pi_type; + u_int8_t nd_opt_pi_len; + u_int8_t nd_opt_pi_prefix_len; + u_int8_t nd_opt_pi_flags_reserved; + u_int32_t nd_opt_pi_valid_time; + u_int32_t nd_opt_pi_preferred_time; + u_int32_t nd_opt_pi_reserved2; + struct in6_addr nd_opt_pi_prefix; +}; + +#define ND_OPT_PI_FLAG_ONLINK 0x80 +#define ND_OPT_PI_FLAG_AUTO 0x40 +#define ND_OPT_PI_FLAG_RTADDR 0x20 + +struct nd_opt_rd_hdr { /* redirected header */ + u_int8_t nd_opt_rh_type; + u_int8_t nd_opt_rh_len; + u_int16_t nd_opt_rh_reserved1; + u_int32_t nd_opt_rh_reserved2; + /* followed by IP header and data */ +}; + +struct nd_opt_mtu { /* MTU option */ + u_int8_t nd_opt_mtu_type; + u_int8_t nd_opt_mtu_len; + u_int16_t nd_opt_mtu_reserved; + u_int32_t nd_opt_mtu_mtu; +}; + +struct nd_opt_advint { /* Advertisement Interval option (MIPv6) */ + u_int8_t nd_opt_int_type; + u_int8_t nd_opt_int_len; + u_int16_t nd_opt_int_reserved; + u_int32_t nd_opt_int_interval; +}; + +struct nd_opt_hai { /* Home Agent Information option (MIPv6) */ + u_int8_t nd_opt_hai_type; + u_int8_t nd_opt_hai_len; + u_int16_t nd_opt_hai_reserved; + u_int16_t nd_opt_hai_pref; + u_int16_t nd_opt_hai_lifetime; +}; + +/* + * icmp6 namelookup + */ + +struct icmp6_namelookup { + struct icmp6_hdr icmp6_nl_hdr; + u_int8_t icmp6_nl_nonce[8]; + u_int32_t icmp6_nl_ttl; +#if 0 + u_int8_t icmp6_nl_len; + u_int8_t icmp6_nl_name[3]; +#endif + /* could be followed by options */ +}; + +/* + * icmp6 node information + */ +struct icmp6_nodeinfo { + struct icmp6_hdr icmp6_ni_hdr; + u_int8_t icmp6_ni_nonce[8]; + /* could be followed by reply data */ +}; + +#define ni_type icmp6_ni_hdr.icmp6_type +#define ni_code icmp6_ni_hdr.icmp6_code +#define ni_cksum icmp6_ni_hdr.icmp6_cksum +#define ni_qtype icmp6_ni_hdr.icmp6_data16[0] +#define ni_flags icmp6_ni_hdr.icmp6_data16[1] + + +#define NI_QTYPE_NOOP 0 /* NOOP */ +#define NI_QTYPE_SUPTYPES 1 /* Supported Qtypes */ +#define NI_QTYPE_FQDN 2 /* FQDN */ +#define NI_QTYPE_NODEADDR 3 /* Node Addresses. XXX: spec says 2, but it may be a typo... */ + +#if BYTE_ORDER == BIG_ENDIAN +#define NI_SUPTYPE_FLAG_COMPRESS 0x1 +#define NI_FQDN_FLAG_VALIDTTL 0x1 +#elif BYTE_ORDER == LITTLE_ENDIAN +#define NI_SUPTYPE_FLAG_COMPRESS 0x0100 +#define NI_FQDN_FLAG_VALIDTTL 0x0100 +#endif + +#ifdef NAME_LOOKUPS_04 +#if BYTE_ORDER == BIG_ENDIAN +#define NI_NODEADDR_FLAG_LINKLOCAL 0x1 +#define NI_NODEADDR_FLAG_SITELOCAL 0x2 +#define NI_NODEADDR_FLAG_GLOBAL 0x4 +#define NI_NODEADDR_FLAG_ALL 0x8 +#define NI_NODEADDR_FLAG_TRUNCATE 0x10 +#define NI_NODEADDR_FLAG_ANYCAST 0x20 /* just experimental. not in spec */ +#elif BYTE_ORDER == LITTLE_ENDIAN +#define NI_NODEADDR_FLAG_LINKLOCAL 0x0100 +#define NI_NODEADDR_FLAG_SITELOCAL 0x0200 +#define NI_NODEADDR_FLAG_GLOBAL 0x0400 +#define NI_NODEADDR_FLAG_ALL 0x0800 +#define NI_NODEADDR_FLAG_TRUNCATE 0x1000 +#define NI_NODEADDR_FLAG_ANYCAST 0x2000 /* just experimental. not in spec */ +#endif +#else /* draft-ietf-ipngwg-icmp-name-lookups-05 (and later?) */ +#if BYTE_ORDER == BIG_ENDIAN +#define NI_NODEADDR_FLAG_TRUNCATE 0x1 +#define NI_NODEADDR_FLAG_ALL 0x2 +#define NI_NODEADDR_FLAG_COMPAT 0x4 +#define NI_NODEADDR_FLAG_LINKLOCAL 0x8 +#define NI_NODEADDR_FLAG_SITELOCAL 0x10 +#define NI_NODEADDR_FLAG_GLOBAL 0x20 +#define NI_NODEADDR_FLAG_ANYCAST 0x40 /* just experimental. not in spec */ +#elif BYTE_ORDER == LITTLE_ENDIAN +#define NI_NODEADDR_FLAG_TRUNCATE 0x0100 +#define NI_NODEADDR_FLAG_ALL 0x0200 +#define NI_NODEADDR_FLAG_COMPAT 0x0400 +#define NI_NODEADDR_FLAG_LINKLOCAL 0x0800 +#define NI_NODEADDR_FLAG_SITELOCAL 0x1000 +#define NI_NODEADDR_FLAG_GLOBAL 0x2000 +#define NI_NODEADDR_FLAG_ANYCAST 0x4000 /* just experimental. not in spec */ +#endif +#endif + +struct ni_reply_fqdn { + u_int32_t ni_fqdn_ttl; /* TTL */ + u_int8_t ni_fqdn_namelen; /* length in octets of the FQDN */ + u_int8_t ni_fqdn_name[3]; /* XXX: alignment */ +}; + +/* + * Router Renumbering. as router-renum-08.txt + */ +#if BYTE_ORDER == BIG_ENDIAN /* net byte order */ +struct icmp6_router_renum { /* router renumbering header */ + struct icmp6_hdr rr_hdr; + u_int8_t rr_segnum; + u_int8_t rr_test : 1; + u_int8_t rr_reqresult : 1; + u_int8_t rr_forceapply : 1; + u_int8_t rr_specsite : 1; + u_int8_t rr_prevdone : 1; + u_int8_t rr_flags_reserved : 3; + u_int16_t rr_maxdelay; + u_int32_t rr_reserved; +}; +#elif BYTE_ORDER == LITTLE_ENDIAN +struct icmp6_router_renum { /* router renumbering header */ + struct icmp6_hdr rr_hdr; + u_int8_t rr_segnum; + u_int8_t rr_flags_reserved : 3; + u_int8_t rr_prevdone : 1; + u_int8_t rr_specsite : 1; + u_int8_t rr_forceapply : 1; + u_int8_t rr_reqresult : 1; + u_int8_t rr_test : 1; + u_int16_t rr_maxdelay; + u_int32_t rr_reserved; +}; +#endif /* BYTE_ORDER */ + +#define rr_type rr_hdr.icmp6_type +#define rr_code rr_hdr.icmp6_code +#define rr_cksum rr_hdr.icmp6_cksum +#define rr_seqnum rr_hdr.icmp6_data32[0] + +struct rr_pco_match { /* match prefix part */ + u_int8_t rpm_code; + u_int8_t rpm_len; + u_int8_t rpm_ordinal; + u_int8_t rpm_matchlen; + u_int8_t rpm_minlen; + u_int8_t rpm_maxlen; + u_int16_t rpm_reserved; + struct in6_addr rpm_prefix; +}; + +#define RPM_PCO_ADD 1 +#define RPM_PCO_CHANGE 2 +#define RPM_PCO_SETGLOBAL 3 +#define RPM_PCO_MAX 4 + +#if BYTE_ORDER == BIG_ENDIAN /* net byte order */ +struct rr_pco_use { /* use prefix part */ + u_int8_t rpu_uselen; + u_int8_t rpu_keeplen; + u_int8_t rpu_mask_onlink : 1; + u_int8_t rpu_mask_autonomous : 1; + u_int8_t rpu_mask_reserved : 6; + u_int8_t rpu_onlink : 1; + u_int8_t rpu_autonomous : 1; + u_int8_t rpu_raflags_reserved : 6; + u_int32_t rpu_vltime; + u_int32_t rpu_pltime; + u_int32_t rpu_decr_vltime : 1; + u_int32_t rpu_decr_pltime : 1; + u_int32_t rpu_flags_reserved : 6; + u_int32_t rpu_reserved : 24; + struct in6_addr rpu_prefix; +}; +#elif BYTE_ORDER == LITTLE_ENDIAN +struct rr_pco_use { /* use prefix part */ + u_int8_t rpu_uselen; + u_int8_t rpu_keeplen; + u_int8_t rpu_mask_reserved : 6; + u_int8_t rpu_mask_autonomous : 1; + u_int8_t rpu_mask_onlink : 1; + u_int8_t rpu_raflags_reserved : 6; + u_int8_t rpu_autonomous : 1; + u_int8_t rpu_onlink : 1; + u_int32_t rpu_vltime; + u_int32_t rpu_pltime; + u_int32_t rpu_flags_reserved : 6; + u_int32_t rpu_decr_pltime : 1; + u_int32_t rpu_decr_vltime : 1; + u_int32_t rpu_reserved : 24; + struct in6_addr rpu_prefix; +}; +#endif /* BYTE_ORDER */ + +#if BYTE_ORDER == BIG_ENDIAN /* net byte order */ +struct rr_result { /* router renumbering result message */ + u_int8_t rrr_reserved; + u_int8_t rrr_flags_reserved : 6; + u_int8_t rrr_outofbound : 1; + u_int8_t rrr_forbidden : 1; + u_int8_t rrr_ordinal; + u_int8_t rrr_matchedlen; + u_int32_t rrr_ifid; + struct in6_addr rrr_prefix; +}; +#elif BYTE_ORDER == LITTLE_ENDIAN +struct rr_result { /* router renumbering result message */ + u_int8_t rrr_reserved; + u_int8_t rrr_forbidden : 1; + u_int8_t rrr_outofbound : 1; + u_int8_t rrr_flags_reserved : 6; + u_int8_t rrr_ordinal; + u_int8_t rrr_matchedlen; + u_int32_t rrr_ifid; + struct in6_addr rrr_prefix; +}; +#endif /* BYTE_ORDER */ + +/* + * icmp6 filter structures. + */ + +struct icmp6_filter { + u_int32_t icmp6_filt[8]; +}; + +#ifdef KERNEL +#define ICMP6_FILTER_SETPASSALL(filterp) \ +do { \ + int i; u_char *p; \ + p = (u_char *)filterp; \ + for (i = 0; i < sizeof(struct icmp6_filter); i++) \ + p[i] = 0xff; \ +} while (0) +#define ICMP6_FILTER_SETBLOCKALL(filterp) \ + bzero(filterp, sizeof(struct icmp6_filter)) +#else /* KERNEL */ +#define ICMP6_FILTER_SETPASSALL(filterp) \ + memset(filterp, 0xff, sizeof(struct icmp6_filter)) +#define ICMP6_FILTER_SETBLOCKALL(filterp) \ + memset(filterp, 0x00, sizeof(struct icmp6_filter)) +#endif /* KERNEL */ + +#define ICMP6_FILTER_SETPASS(type, filterp) \ + (((filterp)->icmp6_filt[(type) >> 5]) |= (1 << ((type) & 31))) +#define ICMP6_FILTER_SETBLOCK(type, filterp) \ + (((filterp)->icmp6_filt[(type) >> 5]) &= ~(1 << ((type) & 31))) +#define ICMP6_FILTER_WILLPASS(type, filterp) \ + ((((filterp)->icmp6_filt[(type) >> 5]) & (1 << ((type) & 31))) != 0) +#define ICMP6_FILTER_WILLBLOCK(type, filterp) \ + ((((filterp)->icmp6_filt[(type) >> 5]) & (1 << ((type) & 31))) == 0) + +/* + * Variables related to this implementation + * of the internet control message protocol version 6. + */ +struct icmp6stat { +/* statistics related to icmp6 packets generated */ + u_quad_t icp6s_error; /* # of calls to icmp6_error */ + u_quad_t icp6s_canterror; /* no error 'cuz old was icmp */ + u_quad_t icp6s_toofreq; /* no error 'cuz rate limitation */ + u_quad_t icp6s_outhist[256]; +/* statistics related to input message processed */ + u_quad_t icp6s_badcode; /* icmp6_code out of range */ + u_quad_t icp6s_tooshort; /* packet < sizeof(struct icmp6_hdr) */ + u_quad_t icp6s_checksum; /* bad checksum */ + u_quad_t icp6s_badlen; /* calculated bound mismatch */ + u_quad_t icp6s_reflect; /* number of responses */ + u_quad_t icp6s_inhist[256]; + u_quad_t icp6s_nd_toomanyopt; /* too many ND options */ +}; + +/* + * Names for ICMP sysctl objects + */ +#define ICMPV6CTL_STATS 1 +#define ICMPV6CTL_REDIRACCEPT 2 /* accept/process redirects */ +#define ICMPV6CTL_REDIRTIMEOUT 3 /* redirect cache time */ +#define ICMPV6CTL_ERRRATELIMIT 5 /* ICMPv6 error rate limitation */ +#define ICMPV6CTL_ND6_PRUNE 6 +#define ICMPV6CTL_ND6_DELAY 8 +#define ICMPV6CTL_ND6_UMAXTRIES 9 +#define ICMPV6CTL_ND6_MMAXTRIES 10 +#define ICMPV6CTL_ND6_USELOOPBACK 11 +/*#define ICMPV6CTL_ND6_PROXYALL 12 obsoleted, do not reuse here */ +#define ICMPV6CTL_NODEINFO 13 +#define ICMPV6CTL_MAXID 14 + +#define ICMPV6CTL_NAMES { \ + { 0, 0 }, \ + { 0, 0 }, \ + { "rediraccept", CTLTYPE_INT }, \ + { "redirtimeout", CTLTYPE_INT }, \ + { 0, 0 }, \ + { "errratelimit", CTLTYPE_INT }, \ + { "nd6_prune", CTLTYPE_INT }, \ + { 0, 0 }, \ + { "nd6_delay", CTLTYPE_INT }, \ + { "nd6_umaxtries", CTLTYPE_INT }, \ + { "nd6_mmaxtries", CTLTYPE_INT }, \ + { "nd6_useloopback", CTLTYPE_INT }, \ + { 0, 0 }, \ + { "nodeinfo", CTLTYPE_INT }, \ +} + +#ifdef __bsdi__ +#define ICMPV6CTL_VARS { \ + 0, \ + 0, \ + &icmp6_rediraccept, \ + &icmp6_redirtimeout, \ + 0, \ + 0, \ + &icmp6errratelim, \ + &nd6_prune, \ + 0, \ + &nd6_delay, \ + &nd6_umaxtries, \ + &nd6_mmaxtries, \ + &nd6_useloopback, \ + 0, \ + &icmp6_nodeinfo, \ +} +#endif + +#define RTF_PROBEMTU RTF_PROTO1 + +#ifdef KERNEL +# ifdef __STDC__ +struct rtentry; +struct rttimer; +struct in6_multi; +# endif +void icmp6_init __P((void)); +void icmp6_paramerror __P((struct mbuf *, int)); +void icmp6_error __P((struct mbuf *, int, int, int)); +int icmp6_input __P((struct mbuf **, int *, int)); +void icmp6_fasttimo __P((void)); +void icmp6_reflect __P((struct mbuf *, size_t)); +void icmp6_prepare __P((struct mbuf *)); +void icmp6_redirect_input __P((struct mbuf *, int)); +void icmp6_redirect_output __P((struct mbuf *, struct rtentry *)); +#ifdef __bsdi__ +int icmp6_sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); +void icmp6_mtuexpire __P((struct rtentry *, struct rttimer *)); +#endif /*__bsdi__*/ +#if defined(__NetBSD__) || defined(__OpenBSD__) +int icmp6_sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); +#endif + +/* XXX: is this the right place for these macros? */ +#define icmp6_ifstat_inc(ifp, tag) \ +do { \ + if ((ifp) && (ifp)->if_index <= if_index \ + && (ifp)->if_index < icmp6_ifstatmax \ + && icmp6_ifstat && icmp6_ifstat[(ifp)->if_index]) { \ + icmp6_ifstat[(ifp)->if_index]->tag++; \ + } \ +} while (0) + +#define icmp6_ifoutstat_inc(ifp, type, code) \ +do { \ + icmp6_ifstat_inc(ifp, ifs6_out_msg); \ + if (type < ICMP6_INFOMSG_MASK) \ + icmp6_ifstat_inc(ifp, ifs6_out_error); \ + switch(type) { \ + case ICMP6_DST_UNREACH: \ + icmp6_ifstat_inc(ifp, ifs6_out_dstunreach); \ + if (code == ICMP6_DST_UNREACH_ADMIN) \ + icmp6_ifstat_inc(ifp, ifs6_out_adminprohib); \ + break; \ + case ICMP6_PACKET_TOO_BIG: \ + icmp6_ifstat_inc(ifp, ifs6_out_pkttoobig); \ + break; \ + case ICMP6_TIME_EXCEEDED: \ + icmp6_ifstat_inc(ifp, ifs6_out_timeexceed); \ + break; \ + case ICMP6_PARAM_PROB: \ + icmp6_ifstat_inc(ifp, ifs6_out_paramprob); \ + break; \ + case ICMP6_ECHO_REQUEST: \ + icmp6_ifstat_inc(ifp, ifs6_out_echo); \ + break; \ + case ICMP6_ECHO_REPLY: \ + icmp6_ifstat_inc(ifp, ifs6_out_echoreply); \ + break; \ + case MLD6_LISTENER_QUERY: \ + icmp6_ifstat_inc(ifp, ifs6_out_mldquery); \ + break; \ + case MLD6_LISTENER_REPORT: \ + icmp6_ifstat_inc(ifp, ifs6_out_mldreport); \ + break; \ + case MLD6_LISTENER_DONE: \ + icmp6_ifstat_inc(ifp, ifs6_out_mlddone); \ + break; \ + case ND_ROUTER_SOLICIT: \ + icmp6_ifstat_inc(ifp, ifs6_out_routersolicit); \ + break; \ + case ND_ROUTER_ADVERT: \ + icmp6_ifstat_inc(ifp, ifs6_out_routeradvert); \ + break; \ + case ND_NEIGHBOR_SOLICIT: \ + icmp6_ifstat_inc(ifp, ifs6_out_neighborsolicit); \ + break; \ + case ND_NEIGHBOR_ADVERT: \ + icmp6_ifstat_inc(ifp, ifs6_out_neighboradvert); \ + break; \ + case ND_REDIRECT: \ + icmp6_ifstat_inc(ifp, ifs6_out_redirect); \ + break; \ + } \ +} while (0) + +extern int icmp6_rediraccept; /* accept/process redirects */ +extern int icmp6_redirtimeout; /* cache time for redirect routes */ +#endif /* KERNEL */ + +#endif /* !_NETINET_ICMP6_H_ */ diff --git a/bsd/netinet/icmp_var.h b/bsd/netinet/icmp_var.h new file mode 100644 index 000000000..f8e7633e5 --- /dev/null +++ b/bsd/netinet/icmp_var.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)icmp_var.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_ICMP_VAR_H_ +#define _NETINET_ICMP_VAR_H_ + +#ifdef KERNEL +#if ISFB31 +#include "opt_icmp_bandlim.h" /* for ICMP_BANDLIM */ +#endif +#endif + +/* + * Variables related to this implementation + * of the internet control message protocol. + */ +struct icmpstat { +/* statistics related to icmp packets generated */ + u_long icps_error; /* # of calls to icmp_error */ + u_long icps_oldshort; /* no error 'cuz old ip too short */ + u_long icps_oldicmp; /* no error 'cuz old was icmp */ + u_long icps_outhist[ICMP_MAXTYPE + 1]; +/* statistics related to input messages processed */ + u_long icps_badcode; /* icmp_code out of range */ + u_long icps_tooshort; /* packet < ICMP_MINLEN */ + u_long icps_checksum; /* bad checksum */ + u_long icps_badlen; /* calculated bound mismatch */ + u_long icps_reflect; /* number of responses */ + u_long icps_inhist[ICMP_MAXTYPE + 1]; + u_long icps_bmcastecho; /* b/mcast echo requests dropped */ + u_long icps_bmcasttstamp; /* b/mcast tstamp requests dropped */ +}; + +/* + * Names for ICMP sysctl objects + */ +#define ICMPCTL_MASKREPL 1 /* allow replies to netmask requests */ +#define ICMPCTL_STATS 2 /* statistics (read-only) */ +#define ICMPCTL_ICMPLIM 3 +#define ICMPCTL_MAXID 4 + +#define ICMPCTL_NAMES { \ + { 0, 0 }, \ + { "maskrepl", CTLTYPE_INT }, \ + { "stats", CTLTYPE_STRUCT }, \ + { "icmplim", CTLTYPE_INT }, \ +} + +#ifdef KERNEL +SYSCTL_DECL(_net_inet_icmp); +#ifdef ICMP_BANDLIM +extern int badport_bandlim __P((int)); +#endif +#endif + +#endif diff --git a/bsd/netinet/if_atm.c b/bsd/netinet/if_atm.c new file mode 100644 index 000000000..cfc61835f --- /dev/null +++ b/bsd/netinet/if_atm.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_atm.c,v 1.6 1996/10/13 02:03:01 christos Exp $ */ + +/* + * + * Copyright (c) 1996 Charles D. Cranor and Washington University. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Charles D. Cranor and + * Washington University. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * IP <=> ATM address resolution. + */ + +#include "opt_inet.h" +#include "opt_natm.h" + +#if defined(INET) || defined(INET6) + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + + +#if NATM +#include +#endif + + +#define SDL(s) ((struct sockaddr_dl *)s) + +/* + * atm_rtrequest: handle ATM rt request (in support of generic code) + * inputs: "req" = request code + * "rt" = route entry + * "sa" = sockaddr + */ + +void +atm_rtrequest(req, rt, sa) + int req; + register struct rtentry *rt; + struct sockaddr *sa; +{ + register struct sockaddr *gate = rt->rt_gateway; + struct atm_pseudoioctl api; +#if NATM + struct sockaddr_in *sin; + struct natmpcb *npcb = NULL; + struct atm_pseudohdr *aph; +#endif + static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK}; + + if (rt->rt_flags & RTF_GATEWAY) /* link level requests only */ + return; + + switch (req) { + + case RTM_RESOLVE: /* resolve: only happens when cloning */ + printf("atm_rtrequest: RTM_RESOLVE request detected?\n"); + break; + + case RTM_ADD: + + /* + * route added by a command (e.g. ifconfig, route, arp...). + * + * first check to see if this is not a host route, in which + * case we are being called via "ifconfig" to set the address. + */ + + if ((rt->rt_flags & RTF_HOST) == 0) { + rt_setgate(rt,rt_key(rt),(struct sockaddr *)&null_sdl); + gate = rt->rt_gateway; + SDL(gate)->sdl_type = rt->rt_ifp->if_type; + SDL(gate)->sdl_index = rt->rt_ifp->if_index; + break; + } + + if ((rt->rt_flags & RTF_CLONING) != 0) { + printf("atm_rtrequest: cloning route detected?\n"); + break; + } + if (gate->sa_family != AF_LINK || + gate->sa_len < sizeof(null_sdl)) { + log(LOG_DEBUG, "atm_rtrequest: bad gateway value"); + break; + } + +#if DIAGNOSTIC + if (rt->rt_ifp->if_ioctl == NULL) panic("atm null ioctl"); +#endif + +#if NATM + /* + * let native ATM know we are using this VCI/VPI + * (i.e. reserve it) + */ + sin = (struct sockaddr_in *) rt_key(rt); + if (sin->sin_family != AF_INET) + goto failed; + aph = (struct atm_pseudohdr *) LLADDR(SDL(gate)); + npcb = npcb_add(NULL, rt->rt_ifp, ATM_PH_VCI(aph), + ATM_PH_VPI(aph)); + if (npcb == NULL) + goto failed; + npcb->npcb_flags |= NPCB_IP; + npcb->ipaddr.s_addr = sin->sin_addr.s_addr; + /* XXX: move npcb to llinfo when ATM ARP is ready */ + rt->rt_llinfo = (caddr_t) npcb; + rt->rt_flags |= RTF_LLINFO; +#endif + /* + * let the lower level know this circuit is active + */ + bcopy(LLADDR(SDL(gate)), &api.aph, sizeof(api.aph)); + api.rxhand = NULL; + if (dlil_ioctl(0, rt->rt_ifp, SIOCATMENA, + (caddr_t)&api) != 0) { + printf("atm: couldn't add VC\n"); + goto failed; + } + + SDL(gate)->sdl_type = rt->rt_ifp->if_type; + SDL(gate)->sdl_index = rt->rt_ifp->if_index; + + break; + +failed: +#if NATM + if (npcb) { + npcb_free(npcb, NPCB_DESTROY); + rt->rt_llinfo = NULL; + rt->rt_flags &= ~RTF_LLINFO; + } +#endif + rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0, + rt_mask(rt), 0, (struct rtentry **) 0); + break; + + case RTM_DELETE: + +#if NATM + /* + * tell native ATM we are done with this VC + */ + + if (rt->rt_flags & RTF_LLINFO) { + npcb_free((struct natmpcb *)rt->rt_llinfo, + NPCB_DESTROY); + rt->rt_llinfo = NULL; + rt->rt_flags &= ~RTF_LLINFO; + } +#endif + /* + * tell the lower layer to disable this circuit + */ + + bcopy(LLADDR(SDL(gate)), &api.aph, sizeof(api.aph)); + api.rxhand = NULL; + dlil_ioctl(0, rt->rt_ifp, SIOCATMDIS, + (caddr_t)&api); + + break; + } +} + +/* + * atmresolve: + * inputs: + * [1] "rt" = the link level route to use (or null if need to look one up) + * [2] "m" = mbuf containing the data to be sent + * [3] "dst" = sockaddr_in (IP) address of dest. + * output: + * [4] "desten" = ATM pseudo header which we will fill in VPI/VCI info + * return: + * 0 == resolve FAILED; note that "m" gets m_freem'd in this case + * 1 == resolve OK; desten contains result + * + * XXX: will need more work if we wish to support ATMARP in the kernel, + * but this is enough for PVCs entered via the "route" command. + */ + +int +atmresolve(rt, m, dst, desten) + +register struct rtentry *rt; +struct mbuf *m; +register struct sockaddr *dst; +register struct atm_pseudohdr *desten; /* OUT */ + +{ + struct sockaddr_dl *sdl; + + if (m->m_flags & (M_BCAST|M_MCAST)) { + log(LOG_INFO, "atmresolve: BCAST/MCAST packet detected/dumped"); + goto bad; + } + + if (rt == NULL) { + rt = RTALLOC1(dst, 0); + if (rt == NULL) goto bad; /* failed */ + rt->rt_refcnt--; /* don't keep LL references */ + if ((rt->rt_flags & RTF_GATEWAY) != 0 || + (rt->rt_flags & RTF_LLINFO) == 0 || + /* XXX: are we using LLINFO? */ + rt->rt_gateway->sa_family != AF_LINK) { + goto bad; + } + } + + /* + * note that rt_gateway is a sockaddr_dl which contains the + * atm_pseudohdr data structure for this route. we currently + * don't need any rt_llinfo info (but will if we want to support + * ATM ARP [c.f. if_ether.c]). + */ + + sdl = SDL(rt->rt_gateway); + + /* + * Check the address family and length is valid, the address + * is resolved; otherwise, try to resolve. + */ + + + if (sdl->sdl_family == AF_LINK && sdl->sdl_alen == sizeof(*desten)) { + bcopy(LLADDR(sdl), desten, sdl->sdl_alen); + return(1); /* ok, go for it! */ + } + + /* + * we got an entry, but it doesn't have valid link address + * info in it (it is prob. the interface route, which has + * sdl_alen == 0). dump packet. (fall through to "bad"). + */ + +bad: + m_freem(m); + return(0); +} +#endif /* INET */ diff --git a/bsd/netinet/if_atm.h b/bsd/netinet/if_atm.h new file mode 100644 index 000000000..79417f4e0 --- /dev/null +++ b/bsd/netinet/if_atm.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: if_atm.h,v 1.2 1996/07/03 17:17:17 chuck Exp $ */ + +/* + * + * Copyright (c) 1996 Charles D. Cranor and Washington University. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Charles D. Cranor and + * Washington University. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * if_atm.h + */ + +struct atm_pseudohdr; +struct mbuf; +struct rtentry; +struct sockaddr; + +void atm_rtrequest __P((int, struct rtentry *, struct sockaddr *)); +int atmresolve __P((struct rtentry *, struct mbuf *, struct sockaddr *, + struct atm_pseudohdr *)); diff --git a/bsd/netinet/if_ether.c b/bsd/netinet/if_ether.c new file mode 100644 index 000000000..31932451d --- /dev/null +++ b/bsd/netinet/if_ether.c @@ -0,0 +1,747 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_ether.c 8.1 (Berkeley) 6/10/93 + */ + +/* + * Ethernet address resolution protocol. + * TODO: + * add "inuse/lock" bit (or ref. count) along with valid bit + */ + +#if NOTFB31 +#include "opt_inet.h" +#include "opt_bdg.h" +#endif + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#define SIN(s) ((struct sockaddr_in *)s) +#define SDL(s) ((struct sockaddr_dl *)s) + +SYSCTL_DECL(_net_link_ether); +SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW, 0, ""); + +/* timer values */ +static int arpt_prune = (5*60*1); /* walk list every 5 minutes */ +static int arpt_keep = (20*60); /* once resolved, good for 20 more minutes */ +static int arpt_down = 20; /* once declared down, don't send for 20 sec */ + +SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl, CTLFLAG_RW, + &arpt_prune, 0, ""); +SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age, CTLFLAG_RW, + &arpt_keep, 0, ""); +SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time, CTLFLAG_RW, + &arpt_down, 0, ""); + +#define rt_expire rt_rmx.rmx_expire + +struct llinfo_arp { + LIST_ENTRY(llinfo_arp) la_le; + struct rtentry *la_rt; + struct mbuf *la_hold; /* last packet until resolved/timeout */ + long la_asked; /* last time we QUERIED for this addr */ +#define la_timer la_rt->rt_rmx.rmx_expire /* deletion time in seconds */ +}; + +static LIST_HEAD(, llinfo_arp) llinfo_arp; + +struct ifqueue arpintrq = {0, 0, 0, 50}; +static int arp_inuse, arp_allocated; + +static int arp_maxtries = 5; +static int useloopback = 1; /* use loopback interface for local traffic */ +static int arp_proxyall = 0; +static int arp_init_called = 0; + +SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries, CTLFLAG_RW, + &arp_maxtries, 0, ""); +SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback, CTLFLAG_RW, + &useloopback, 0, ""); +SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall, CTLFLAG_RW, + &arp_proxyall, 0, ""); + +void arp_rtrequest __P((int, struct rtentry *, struct sockaddr *)); +static void arprequest __P((struct arpcom *, + struct in_addr *, struct in_addr *, u_char *)); +void arpintr __P((void)); +static void arptfree __P((struct llinfo_arp *)); +static void arptimer __P((void *)); +static u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static struct llinfo_arp + *arplookup __P((u_long, int, int)); +#if INET +static void in_arpinput __P((struct mbuf *)); +#endif + +/* + * Timeout routine. Age arp_tab entries periodically. + */ +/* ARGSUSED */ +static void +arptimer(ignored_arg) + void *ignored_arg; +{ + int s ; + register struct llinfo_arp *la; + struct llinfo_arp *ola; + boolean_t funnel_state; + + + funnel_state = thread_funnel_set(network_flock, TRUE); + s = splnet(); + la = llinfo_arp.lh_first; + + timeout(arptimer, (caddr_t)0, arpt_prune * hz); + while ((ola = la) != 0) { + register struct rtentry *rt = la->la_rt; + la = la->la_le.le_next; + if (rt->rt_expire && rt->rt_expire <= time_second) + arptfree(ola); /* timer has expired, clear */ + } + splx(s); + (void) thread_funnel_set(network_flock, FALSE); + +} + +/* + * Parallel to llc_rtrequest. + */ +void +arp_rtrequest(req, rt, sa) + int req; + register struct rtentry *rt; + struct sockaddr *sa; +{ + register struct sockaddr *gate = rt->rt_gateway; + register struct llinfo_arp *la = (struct llinfo_arp *)rt->rt_llinfo; + static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK}; + static int arpinit_done; + + if (!arpinit_done) { + arpinit_done = 1; + LIST_INIT(&llinfo_arp); + timeout(arptimer, (caddr_t)0, hz); + } + if (rt->rt_flags & RTF_GATEWAY) + return; + switch (req) { + + case RTM_ADD: + /* + * XXX: If this is a manually added route to interface + * such as older version of routed or gated might provide, + * restore cloning bit. + */ + if ((rt->rt_flags & RTF_HOST) == 0 && + SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff) + rt->rt_flags |= RTF_CLONING; + if (rt->rt_flags & RTF_CLONING) { + /* + * Case 1: This route should come from a route to iface. + */ + rt_setgate(rt, rt_key(rt), + (struct sockaddr *)&null_sdl); + gate = rt->rt_gateway; + SDL(gate)->sdl_type = rt->rt_ifp->if_type; + SDL(gate)->sdl_index = rt->rt_ifp->if_index; + rt->rt_expire = time_second; + break; + } + /* Announce a new entry if requested. */ + if (rt->rt_flags & RTF_ANNOUNCE) + arprequest((struct arpcom *)rt->rt_ifp, + &SIN(rt_key(rt))->sin_addr, + &SIN(rt_key(rt))->sin_addr, + (u_char *)LLADDR(SDL(gate))); + /*FALLTHROUGH*/ + case RTM_RESOLVE: + if (gate->sa_family != AF_LINK || + gate->sa_len < sizeof(null_sdl)) { + log(LOG_DEBUG, "arp_rtrequest: bad gateway value\n"); + break; + } + SDL(gate)->sdl_type = rt->rt_ifp->if_type; + SDL(gate)->sdl_index = rt->rt_ifp->if_index; + if (la != 0) + break; /* This happens on a route change */ + /* + * Case 2: This route may come from cloning, or a manual route + * add with a LL address. + */ + R_Malloc(la, struct llinfo_arp *, sizeof(*la)); + rt->rt_llinfo = (caddr_t)la; + if (la == 0) { + log(LOG_DEBUG, "arp_rtrequest: malloc failed\n"); + break; + } + arp_inuse++, arp_allocated++; + Bzero(la, sizeof(*la)); + la->la_rt = rt; + rt->rt_flags |= RTF_LLINFO; + LIST_INSERT_HEAD(&llinfo_arp, la, la_le); + +#if INET + /* + * This keeps the multicast addresses from showing up + * in `arp -a' listings as unresolved. It's not actually + * functional. Then the same for broadcast. + */ + if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) { + ETHER_MAP_IP_MULTICAST(&SIN(rt_key(rt))->sin_addr, + LLADDR(SDL(gate))); + SDL(gate)->sdl_alen = 6; + rt->rt_expire = 0; + } + if (in_broadcast(SIN(rt_key(rt))->sin_addr, rt->rt_ifp)) { + memcpy(LLADDR(SDL(gate)), etherbroadcastaddr, 6); + SDL(gate)->sdl_alen = 6; + rt->rt_expire = 0; + } +#endif + + if (SIN(rt_key(rt))->sin_addr.s_addr == + (IA_SIN(rt->rt_ifa))->sin_addr.s_addr) { + /* + * This test used to be + * if (loif.if_flags & IFF_UP) + * It allowed local traffic to be forced + * through the hardware by configuring the loopback down. + * However, it causes problems during network configuration + * for boards that can't receive packets they send. + * It is now necessary to clear "useloopback" and remove + * the route to force traffic out to the hardware. + */ + rt->rt_expire = 0; + Bcopy(((struct arpcom *)rt->rt_ifp)->ac_enaddr, + LLADDR(SDL(gate)), SDL(gate)->sdl_alen = 6); + if (useloopback) + rt->rt_ifp = loif; + + } + break; + + case RTM_DELETE: + if (la == 0) + break; + arp_inuse--; + LIST_REMOVE(la, la_le); + rt->rt_llinfo = 0; + rt->rt_flags &= ~RTF_LLINFO; + if (la->la_hold) + m_freem(la->la_hold); + Free((caddr_t)la); + } +} + + + + +/* + * Broadcast an ARP packet, asking who has addr on interface ac. + */ +void +arpwhohas(ac, addr) + register struct arpcom *ac; + register struct in_addr *addr; +{ struct ifnet *ifp = (struct ifnet *)ac; + struct ifaddr *ifa = TAILQ_FIRST(&ifp->if_addrhead); + + while (ifa) + { if (ifa->ifa_addr->sa_family == AF_INET) + { arprequest(ac, &SIN(ifa->ifa_addr)->sin_addr, addr, ac->ac_enaddr); + return; + } + ifa = TAILQ_NEXT(ifa, ifa_link); + } + return; /* XXX */ +} + + + +/* + * Broadcast an ARP request. Caller specifies: + * - arp header source ip address + * - arp header target ip address + * - arp header source ethernet address + */ +static void +arprequest(ac, sip, tip, enaddr) + register struct arpcom *ac; + register struct in_addr *sip, *tip; + register u_char *enaddr; +{ + register struct mbuf *m; + register struct ether_header *eh; + register struct ether_arp *ea; + struct sockaddr sa; + + if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL) + return; + m->m_len = sizeof(*ea); + m->m_pkthdr.len = sizeof(*ea); + m->m_pkthdr.rcvif = (struct ifnet *)0; + MH_ALIGN(m, sizeof(*ea)); + ea = mtod(m, struct ether_arp *); + eh = (struct ether_header *)sa.sa_data; + bzero((caddr_t)ea, sizeof (*ea)); + (void)memcpy(eh->ether_dhost, etherbroadcastaddr, sizeof(eh->ether_dhost)); + eh->ether_type = htons(ETHERTYPE_ARP); /* if_output will not swap */ + ea->arp_hrd = htons(ARPHRD_ETHER); + ea->arp_pro = htons(ETHERTYPE_IP); + ea->arp_hln = sizeof(ea->arp_sha); /* hardware address length */ + ea->arp_pln = sizeof(ea->arp_spa); /* protocol address length */ + ea->arp_op = htons(ARPOP_REQUEST); + (void)memcpy(ea->arp_sha, enaddr, sizeof(ea->arp_sha)); + (void)memcpy(ea->arp_spa, sip, sizeof(ea->arp_spa)); + (void)memcpy(ea->arp_tpa, tip, sizeof(ea->arp_tpa)); + sa.sa_family = AF_UNSPEC; + sa.sa_len = sizeof(sa); + dlil_output((u_long) ac, m, 0, &sa, 0); +} + +/* + * Resolve an IP address into an ethernet address. If success, + * desten is filled in. If there is no entry in arptab, + * set one up and broadcast a request for the IP address. + * Hold onto this mbuf and resend it once the address + * is finally resolved. A return value of 1 indicates + * that desten has been filled in and the packet should be sent + * normally; a 0 return indicates that the packet has been + * taken over here, either now or for later transmission. + */ +int +arpresolve(ac, rt, m, dst, desten, rt0) + register struct arpcom *ac; + register struct rtentry *rt; + struct mbuf *m; + register struct sockaddr *dst; + register u_char *desten; + struct rtentry *rt0; +{ + register struct llinfo_arp *la = 0; + struct sockaddr_dl *sdl; + + if (m->m_flags & M_BCAST) { /* broadcast */ + (void)memcpy(desten, etherbroadcastaddr, sizeof(etherbroadcastaddr)); + return (1); + } + if (m->m_flags & M_MCAST) { /* multicast */ + ETHER_MAP_IP_MULTICAST(&SIN(dst)->sin_addr, desten); + return(1); + } + if (rt) + la = (struct llinfo_arp *)rt->rt_llinfo; + if (la == 0) { + la = arplookup(SIN(dst)->sin_addr.s_addr, 1, 0); + if (la) + rt = la->la_rt; + } + if (la == 0 || rt == 0) { + log(LOG_DEBUG, "arpresolve: can't allocate llinfo for %s%s%s\n", + inet_ntoa(SIN(dst)->sin_addr), la ? "la" : "", + rt ? "rt" : ""); + m_freem(m); + return (0); + } + sdl = SDL(rt->rt_gateway); + /* + * Check the address family and length is valid, the address + * is resolved; otherwise, try to resolve. + */ + if ((rt->rt_expire == 0 || rt->rt_expire > time_second) && + sdl->sdl_family == AF_LINK && sdl->sdl_alen != 0) { + bcopy(LLADDR(sdl), desten, sdl->sdl_alen); + return 1; + } + /* + * There is an arptab entry, but no ethernet address + * response yet. Replace the held mbuf with this + * latest one. + */ + if (la->la_hold) + m_freem(la->la_hold); + la->la_hold = m; + if (rt->rt_expire) { + rt->rt_flags &= ~RTF_REJECT; + if (la->la_asked == 0 || rt->rt_expire != time_second) { + rt->rt_expire = time_second; + if (la->la_asked++ < arp_maxtries) + arprequest(ac, + &SIN(rt->rt_ifa->ifa_addr)->sin_addr, + &SIN(dst)->sin_addr, ac->ac_enaddr); + else { + rt->rt_flags |= RTF_REJECT; + rt->rt_expire += arpt_down; + la->la_asked = 0; + } + + } + } + return (0); +} + +/* + * Common length and type checks are done here, + * then the protocol-specific routine is called. + */ +void +arpintr() +{ + register struct mbuf *m; + register struct arphdr *ar; + int s; + + while (arpintrq.ifq_head) { + s = splimp(); + IF_DEQUEUE(&arpintrq, m); + splx(s); + if (m == 0 || (m->m_flags & M_PKTHDR) == 0) + panic("arpintr"); + if (m->m_len >= sizeof(struct arphdr) && + (ar = mtod(m, struct arphdr *)) && + ntohs(ar->ar_hrd) == ARPHRD_ETHER && + m->m_len >= + sizeof(struct arphdr) + 2 * ar->ar_hln + 2 * ar->ar_pln) + + switch (ntohs(ar->ar_pro)) { + +#if INET + case ETHERTYPE_IP: + in_arpinput(m); + continue; +#endif + } + m_freem(m); + } +} + +NETISR_SET(NETISR_ARP, arpintr); + + +#if INET +/* + * ARP for Internet protocols on 10 Mb/s Ethernet. + * Algorithm is that given in RFC 826. + * In addition, a sanity check is performed on the sender + * protocol address, to catch impersonators. + * We no longer handle negotiations for use of trailer protocol: + * Formerly, ARP replied for protocol type ETHERTYPE_TRAIL sent + * along with IP replies if we wanted trailers sent to us, + * and also sent them in response to IP replies. + * This allowed either end to announce the desire to receive + * trailer packets. + * We no longer reply to requests for ETHERTYPE_TRAIL protocol either, + * but formerly didn't normally send requests. + */ +static void +in_arpinput(m) + struct mbuf *m; +{ + register struct ether_arp *ea; + register struct arpcom *ac = (struct arpcom *)m->m_pkthdr.rcvif; + struct ether_header *eh; + register struct llinfo_arp *la = 0; + register struct rtentry *rt; + struct in_ifaddr *ia, *maybe_ia = 0; + struct sockaddr_dl *sdl; + struct sockaddr sa; + struct in_addr isaddr, itaddr, myaddr; + int op; + unsigned char buf[18]; + + ea = mtod(m, struct ether_arp *); + op = ntohs(ea->arp_op); + (void)memcpy(&isaddr, ea->arp_spa, sizeof (isaddr)); + (void)memcpy(&itaddr, ea->arp_tpa, sizeof (itaddr)); + for (ia = in_ifaddrhead.tqh_first; ia; ia = ia->ia_link.tqe_next) +#if BRIDGE + /* + * For a bridge, we want to check the address irrespective + * of the receive interface. (This will change slightly + * when we have clusters of interfaces). + */ + { +#else + if (ia->ia_ifp == &ac->ac_if) { +#endif + maybe_ia = ia; + if ((itaddr.s_addr == ia->ia_addr.sin_addr.s_addr) || + (isaddr.s_addr == ia->ia_addr.sin_addr.s_addr)) + break; + } + if (maybe_ia == 0) { + m_freem(m); + return; + } + myaddr = ia ? ia->ia_addr.sin_addr : maybe_ia->ia_addr.sin_addr; + +#if 0 + /* + * In order to support BlueBox networking, we need to allow + * "self-addressed" stamped envelopes + */ + if (!bcmp((caddr_t)ea->arp_sha, (caddr_t)ac->ac_enaddr, + sizeof (ea->arp_sha))) { + m_freem(m); /* it's from me, ignore it. */ + return; + } +#endif + if (!bcmp((caddr_t)ea->arp_sha, (caddr_t)etherbroadcastaddr, + sizeof (ea->arp_sha))) { + log(LOG_ERR, + "arp: ether address is broadcast for IP address %s!\n", + inet_ntoa(isaddr)); + m_freem(m); + return; + } + if (isaddr.s_addr == myaddr.s_addr) { + log(LOG_ERR, + "duplicate IP address %s sent from ethernet address %s\n", + inet_ntoa(isaddr), ether_sprintf(buf, ea->arp_sha)); + itaddr = myaddr; + goto reply; + } + la = arplookup(isaddr.s_addr, itaddr.s_addr == myaddr.s_addr, 0); + if (la && (rt = la->la_rt) && (sdl = SDL(rt->rt_gateway))) { +#ifndef BRIDGE /* the following is not an error when doing bridging */ + if (rt->rt_ifp != &ac->ac_if) { + log(LOG_ERR, "arp: %s is on %s%d but got reply from %6D on %s%d\n", + inet_ntoa(isaddr), + rt->rt_ifp->if_name, rt->rt_ifp->if_unit, + ea->arp_sha, ":", + ac->ac_if.if_name, ac->ac_if.if_unit); + goto reply; + } +#endif + if (sdl->sdl_alen && + bcmp((caddr_t)ea->arp_sha, LLADDR(sdl), sdl->sdl_alen)) + if (rt->rt_expire) + log(LOG_INFO, "arp: %s moved from %6D to %6D on %s%d\n", + inet_ntoa(isaddr), (u_char *)LLADDR(sdl), ":", + ea->arp_sha, ":", + ac->ac_if.if_name, ac->ac_if.if_unit); + else { + log(LOG_ERR, + "arp: %6D attempts to modify permanent entry for %s on %s%d", + ea->arp_sha, ":", inet_ntoa(isaddr), + ac->ac_if.if_name, ac->ac_if.if_unit); + goto reply; + } + (void)memcpy(LLADDR(sdl), ea->arp_sha, sizeof(ea->arp_sha)); + sdl->sdl_alen = sizeof(ea->arp_sha); + if (rt->rt_expire) + rt->rt_expire = time_second + arpt_keep; + rt->rt_flags &= ~RTF_REJECT; + la->la_asked = 0; + if (la->la_hold) { + dlil_output((u_long) ac, la->la_hold, rt, + rt_key(rt), 0); + la->la_hold = 0; + } + } +reply: + if (op != ARPOP_REQUEST) { + m_freem(m); + return; + } + if (itaddr.s_addr == myaddr.s_addr) { + /* I am the target */ + (void)memcpy(ea->arp_tha, ea->arp_sha, sizeof(ea->arp_sha)); + (void)memcpy(ea->arp_sha, ac->ac_enaddr, sizeof(ea->arp_sha)); + } else { + la = arplookup(itaddr.s_addr, 0, SIN_PROXY); + if (la == NULL) { + struct sockaddr_in sin; + + if (!arp_proxyall) { + m_freem(m); + return; + } + + bzero(&sin, sizeof sin); + sin.sin_family = AF_INET; + sin.sin_len = sizeof sin; + sin.sin_addr = itaddr; + + rt = rtalloc1((struct sockaddr *)&sin, 0, 0UL); + if (!rt) { + m_freem(m); + return; + } + /* + * Don't send proxies for nodes on the same interface + * as this one came out of, or we'll get into a fight + * over who claims what Ether address. + */ + if (rt->rt_ifp == &ac->ac_if) { + rtfree(rt); + m_freem(m); + return; + } + (void)memcpy(ea->arp_tha, ea->arp_sha, sizeof(ea->arp_sha)); + (void)memcpy(ea->arp_sha, ac->ac_enaddr, sizeof(ea->arp_sha)); + rtfree(rt); +#if DEBUG_PROXY + printf("arp: proxying for %s\n", + inet_ntoa(itaddr)); +#endif + } else { + rt = la->la_rt; + (void)memcpy(ea->arp_tha, ea->arp_sha, sizeof(ea->arp_sha)); + sdl = SDL(rt->rt_gateway); + (void)memcpy(ea->arp_sha, LLADDR(sdl), sizeof(ea->arp_sha)); + } + } + + (void)memcpy(ea->arp_tpa, ea->arp_spa, sizeof(ea->arp_spa)); + (void)memcpy(ea->arp_spa, &itaddr, sizeof(ea->arp_spa)); + ea->arp_op = htons(ARPOP_REPLY); + ea->arp_pro = htons(ETHERTYPE_IP); /* let's be sure! */ + eh = (struct ether_header *)sa.sa_data; + (void)memcpy(eh->ether_dhost, ea->arp_tha, sizeof(eh->ether_dhost)); + eh->ether_type = htons(ETHERTYPE_ARP); + sa.sa_family = AF_UNSPEC; + sa.sa_len = sizeof(sa); + dlil_output((u_long) ac, m, 0, &sa, 0); + return; +} +#endif + +/* + * Free an arp entry. + */ +static void +arptfree(la) + register struct llinfo_arp *la; +{ + register struct rtentry *rt = la->la_rt; + register struct sockaddr_dl *sdl; + if (rt == 0) + panic("arptfree"); + if (rt->rt_refcnt > 0 && (sdl = SDL(rt->rt_gateway)) && + sdl->sdl_family == AF_LINK) { + sdl->sdl_alen = 0; + la->la_asked = 0; + rt->rt_flags &= ~RTF_REJECT; + return; + } + rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0, rt_mask(rt), + 0, (struct rtentry **)0); +} +/* + * Lookup or enter a new address in arptab. + */ +static struct llinfo_arp * +arplookup(addr, create, proxy) + u_long addr; + int create, proxy; +{ + register struct rtentry *rt; + static struct sockaddr_inarp sin = {sizeof(sin), AF_INET }; + const char *why = 0; + + sin.sin_addr.s_addr = addr; + sin.sin_other = proxy ? SIN_PROXY : 0; + rt = rtalloc1((struct sockaddr *)&sin, create, 0UL); + if (rt == 0) + return (0); + rt->rt_refcnt--; + + if (rt->rt_flags & RTF_GATEWAY) + why = "host is not on local network"; + else if ((rt->rt_flags & RTF_LLINFO) == 0) + why = "could not allocate llinfo"; + else if (rt->rt_gateway->sa_family != AF_LINK) + why = "gateway route is not ours"; + + if (why && create) { + log(LOG_DEBUG, "arplookup %s failed: %s\n", + inet_ntoa(sin.sin_addr), why); + return 0; + } else if (why) { + return 0; + } + return ((struct llinfo_arp *)rt->rt_llinfo); +} + +void +arp_ifinit(ac, ifa) + struct arpcom *ac; + struct ifaddr *ifa; +{ + if (ntohl(IA_SIN(ifa)->sin_addr.s_addr) != INADDR_ANY) + arprequest(ac, &IA_SIN(ifa)->sin_addr, + &IA_SIN(ifa)->sin_addr, ac->ac_enaddr); + ifa->ifa_rtrequest = arp_rtrequest; + ifa->ifa_flags |= RTF_CLONING; +} diff --git a/bsd/netinet/if_ether.h b/bsd/netinet/if_ether.h new file mode 100644 index 000000000..7ac0d866e --- /dev/null +++ b/bsd/netinet/if_ether.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_ether.h 8.3 (Berkeley) 5/2/95 + */ + +#ifndef _NETINET_IF_ETHER_H_ +#define _NETINET_IF_ETHER_H_ + + +#include + +#include +#include +#include +#define ea_byte ether_addr_octet + + +/* + * Macro for looking up the ether_multi record for a given range of Ethernet + * multicast addresses connected to a given arpcom structure. If no matching + * record is found, "enm" returns NULL. + */ +#define ETHER_LOOKUP_MULTI(addrlo, addrhi, ac, enm) \ + /* u_char addrlo[6]; */ \ + /* u_char addrhi[6]; */ \ + /* struct arpcom *ac; */ \ + /* struct ether_multi *enm; */ \ +{ \ + for ((enm) = (ac)->ac_multiaddrs; \ + (enm) != NULL && \ + (bcmp((enm)->enm_addrlo, (addrlo), 6) != 0 || \ + bcmp((enm)->enm_addrhi, (addrhi), 6) != 0); \ + (enm) = (enm)->enm_next); \ +} + +/* + * Macro to step through all of the ether_multi records, one at a time. + * The current position is remembered in "step", which the caller must + * provide. ETHER_FIRST_MULTI(), below, must be called to initialize "step" + * and get the first record. Both macros return a NULL "enm" when there + * are no remaining records. + */ +#define ETHER_NEXT_MULTI(step, enm) \ + /* struct ether_multistep step; */ \ + /* struct ether_multi *enm; */ \ +{ \ + if (((enm) = (step).e_enm) != NULL) \ + (step).e_enm = (enm)->enm_next; \ +} + +#define ETHER_FIRST_MULTI(step, ac, enm) \ + /* struct ether_multistep step; */ \ + /* struct arpcom *ac; */ \ + /* struct ether_multi *enm; */ \ +{ \ + (step).e_enm = (ac)->ac_multiaddrs; \ + ETHER_NEXT_MULTI((step), (enm)); \ +} + +#define ETHERTYPE_IPV6 0x86dd + +/* + * Macro to map an IP multicast address to an Ethernet multicast address. + * The high-order 25 bits of the Ethernet address are statically assigned, + * and the low-order 23 bits are taken from the low end of the IP address. + */ +#define ETHER_MAP_IP_MULTICAST(ipaddr, enaddr) \ + /* struct in_addr *ipaddr; */ \ + /* u_char enaddr[ETHER_ADDR_LEN]; */ \ +{ \ + (enaddr)[0] = 0x01; \ + (enaddr)[1] = 0x00; \ + (enaddr)[2] = 0x5e; \ + (enaddr)[3] = ((u_char *)ipaddr)[1] & 0x7f; \ + (enaddr)[4] = ((u_char *)ipaddr)[2]; \ + (enaddr)[5] = ((u_char *)ipaddr)[3]; \ +} +/* + * Macro to map an IP6 multicast address to an Ethernet multicast address. + * The high-order 16 bits of the Ethernet address are statically assigned, + * and the low-order 32 bits are taken from the low end of the IP6 address. + */ +#define ETHER_MAP_IPV6_MULTICAST(ip6addr, enaddr) \ +/* struct in6_addr *ip6addr; */ \ +/* u_char enaddr[ETHER_ADDR_LEN]; */ \ +{ \ + (enaddr)[0] = 0x33; \ + (enaddr)[1] = 0x33; \ + (enaddr)[2] = ((u_char *)ip6addr)[12]; \ + (enaddr)[3] = ((u_char *)ip6addr)[13]; \ + (enaddr)[4] = ((u_char *)ip6addr)[14]; \ + (enaddr)[5] = ((u_char *)ip6addr)[15]; \ +} + +/* + * Ethernet Address Resolution Protocol. + * + * See RFC 826 for protocol description. Structure below is adapted + * to resolving internet addresses. Field names used correspond to + * RFC 826. + */ +struct ether_arp { + struct arphdr ea_hdr; /* fixed-size header */ + u_char arp_sha[ETHER_ADDR_LEN]; /* sender hardware address */ + u_char arp_spa[4]; /* sender protocol address */ + u_char arp_tha[ETHER_ADDR_LEN]; /* target hardware address */ + u_char arp_tpa[4]; /* target protocol address */ +}; +#define arp_hrd ea_hdr.ar_hrd +#define arp_pro ea_hdr.ar_pro +#define arp_hln ea_hdr.ar_hln +#define arp_pln ea_hdr.ar_pln +#define arp_op ea_hdr.ar_op + +struct sockaddr_inarp { + u_char sin_len; + u_char sin_family; + u_short sin_port; + struct in_addr sin_addr; + struct in_addr sin_srcaddr; + u_short sin_tos; + u_short sin_other; +#define SIN_PROXY 1 +}; +/* + * IP and ethernet specific routing flags + */ +#define RTF_USETRAILERS RTF_PROTO1 /* use trailers */ +#define RTF_ANNOUNCE RTF_PROTO2 /* announce new arp entry */ + +#ifdef KERNEL +extern u_char ether_ipmulticast_min[ETHER_ADDR_LEN]; +extern u_char ether_ipmulticast_max[ETHER_ADDR_LEN]; +extern struct ifqueue arpintrq; + +int arpresolve __P((struct arpcom *, struct rtentry *, struct mbuf *, + struct sockaddr *, u_char *, struct rtentry *)); +void arp_ifinit __P((struct arpcom *, struct ifaddr *)); +#endif + +#endif diff --git a/bsd/netinet/if_fddi.h b/bsd/netinet/if_fddi.h new file mode 100644 index 000000000..b44792a77 --- /dev/null +++ b/bsd/netinet/if_fddi.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * Copyright (c) 1995 Matt Thomas (thomas@lkg.dec.com) + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_fddi.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_IF_FDDI_H_ +#define _NETINET_IF_FDDI_H_ + +/* + * Structure of an 100Mb/s FDDI header. + */ +struct fddi_header { + u_char fddi_fc; + u_char fddi_dhost[6]; + u_char fddi_shost[6]; +}; + +#define FDDIIPMTU 4352 +#define FDDIMTU 4470 +#define FDDIMIN 3 + +#define FDDIFC_C 0x80 /* 0b10000000 */ +#define FDDIFC_L 0x40 /* 0b01000000 */ +#define FDDIFC_F 0x30 /* 0b00110000 */ +#define FDDIFC_Z 0x0F /* 0b00001111 */ + +#define FDDIFC_LLC_ASYNC 0x50 +#define FDDIFC_LLC_PRIO0 0 +#define FDDIFC_LLC_PRIO1 1 +#define FDDIFC_LLC_PRIO2 2 +#define FDDIFC_LLC_PRIO3 3 +#define FDDIFC_LLC_PRIO4 4 +#define FDDIFC_LLC_PRIO5 5 +#define FDDIFC_LLC_PRIO6 6 +#define FDDIFC_LLC_PRIO7 7 +#define FDDIFC_LLC_SYNC 0xd0 +#define FDDIFC_SMT 0x40 + +#if defined(KERNEL) +#define fddibroadcastaddr etherbroadcastaddr +#define fddi_ipmulticast_min ether_ipmulticast_min +#define fddi_ipmulticast_max ether_ipmulticast_max +#define fddi_addmulti ether_addmulti +#define fddi_delmulti ether_delmulti +#define fddi_sprintf ether_sprintf + +void fddi_ifattach __P((struct ifnet *)); +void fddi_input __P((struct ifnet *, struct fddi_header *, struct mbuf *)); +int fddi_output __P((struct ifnet *, + struct mbuf *, struct sockaddr *, struct rtentry *)); + +#endif + +#endif diff --git a/bsd/netinet/if_tun.h b/bsd/netinet/if_tun.h new file mode 100644 index 000000000..6e548368a --- /dev/null +++ b/bsd/netinet/if_tun.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1988, Julian Onions + * Nottingham University 1987. + * + * This source may be freely distributed, however I would be interested + * in any changes that are made. + * + * This driver takes packets off the IP i/f and hands them up to a + * user process to have it's wicked way with. This driver has it's + * roots in a similar driver written by Phil Cockcroft (formerly) at + * UCL. This driver is based much more on read/write/select mode of + * operation though. + * + */ + +#ifndef _NET_IF_TUN_H_ +#define _NET_IF_TUN_H_ + +struct tun_softc { + u_short tun_flags; /* misc flags */ +#define TUN_OPEN 0x0001 +#define TUN_INITED 0x0002 +#define TUN_RCOLL 0x0004 +#define TUN_IASET 0x0008 +#define TUN_DSTADDR 0x0010 +#define TUN_RWAIT 0x0040 +#define TUN_ASYNC 0x0080 +#define TUN_NBIO 0x0100 + +#define TUN_READY (TUN_OPEN | TUN_INITED | TUN_IASET) + + struct ifnet tun_if; /* the interface */ + int tun_pgrp; /* the process group - if any */ + struct selinfo tun_rsel; /* read select */ + struct selinfo tun_wsel; /* write select (not used) */ +#if NBPFILTER > 0 + caddr_t tun_bpf; +#endif +}; + +/* Maximum packet size */ +#define TUNMTU 1500 + +/* ioctl's for get/set debug */ +#define TUNSDEBUG _IOW('t', 90, int) +#define TUNGDEBUG _IOR('t', 89, int) + +#endif /* !_NET_IF_TUN_H_ */ diff --git a/bsd/netinet/igmp.c b/bsd/netinet/igmp.c new file mode 100644 index 000000000..0a1d51ef1 --- /dev/null +++ b/bsd/netinet/igmp.c @@ -0,0 +1,512 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988 Stephen Deering. + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Stephen Deering of Stanford University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)igmp.c 8.1 (Berkeley) 7/19/93 + */ + +/* + * Internet Group Management Protocol (IGMP) routines. + * + * Written by Steve Deering, Stanford, May 1988. + * Modified by Rosen Sharma, Stanford, Aug 1994. + * Modified by Bill Fenner, Xerox PARC, Feb 1995. + * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995. + * + * MULTICAST Revision: 3.5.1.4 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state"); + +static struct router_info * + find_rti __P((struct ifnet *ifp)); + +static struct igmpstat igmpstat; + +SYSCTL_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_RD, + &igmpstat, igmpstat, ""); + +static int igmp_timers_are_running; +static u_long igmp_all_hosts_group; +static u_long igmp_all_rtrs_group; +static struct mbuf *router_alert; +static struct router_info *Head; + +static void igmp_sendpkt __P((struct in_multi *, int, unsigned long)); + +void +igmp_init() +{ + struct ipoption *ra; + + /* + * To avoid byte-swapping the same value over and over again. + */ + igmp_all_hosts_group = htonl(INADDR_ALLHOSTS_GROUP); + igmp_all_rtrs_group = htonl(INADDR_ALLRTRS_GROUP); + + igmp_timers_are_running = 0; + + /* + * Construct a Router Alert option to use in outgoing packets + */ + MGET(router_alert, M_DONTWAIT, MT_DATA); + ra = mtod(router_alert, struct ipoption *); + ra->ipopt_dst.s_addr = 0; + ra->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */ + ra->ipopt_list[1] = 0x04; /* 4 bytes long */ + ra->ipopt_list[2] = 0x00; + ra->ipopt_list[3] = 0x00; + router_alert->m_len = sizeof(ra->ipopt_dst) + ra->ipopt_list[1]; + + Head = (struct router_info *) 0; +} + +static struct router_info * +find_rti(ifp) + struct ifnet *ifp; +{ + register struct router_info *rti = Head; + +#if IGMP_DEBUG + printf("[igmp.c, _find_rti] --> entering \n"); +#endif + while (rti) { + if (rti->rti_ifp == ifp) { +#if IGMP_DEBUG + printf("[igmp.c, _find_rti] --> found old entry \n"); +#endif + return rti; + } + rti = rti->rti_next; + } + +#if ISFB31 + MALLOC(rti, struct router_info *, sizeof *rti, M_IGMP, M_NOWAIT); +#else + MALLOC(rti, struct router_info *, sizeof *rti, M_TEMP, M_NOWAIT); +#endif + rti->rti_ifp = ifp; + rti->rti_type = IGMP_V2_ROUTER; + rti->rti_time = 0; + rti->rti_next = Head; + Head = rti; +#if IGMP_DEBUG + printf("[igmp.c, _find_rti] --> created an entry \n"); +#endif + return rti; +} + +void +igmp_input(m, iphlen) + register struct mbuf *m; + register int iphlen; +{ + register struct igmp *igmp; + register struct ip *ip; + register int igmplen; + register struct ifnet *ifp = m->m_pkthdr.rcvif; + register int minlen; + register struct in_multi *inm; + register struct in_ifaddr *ia; + struct in_multistep step; + struct router_info *rti; + + int timer; /** timer value in the igmp query header **/ + + ++igmpstat.igps_rcv_total; + + ip = mtod(m, struct ip *); + igmplen = ip->ip_len; + + /* + * Validate lengths + */ + if (igmplen < IGMP_MINLEN) { + ++igmpstat.igps_rcv_tooshort; + m_freem(m); + return; + } + minlen = iphlen + IGMP_MINLEN; + if ((m->m_flags & M_EXT || m->m_len < minlen) && + (m = m_pullup(m, minlen)) == 0) { + ++igmpstat.igps_rcv_tooshort; + return; + } + + /* + * Validate checksum + */ + m->m_data += iphlen; + m->m_len -= iphlen; + igmp = mtod(m, struct igmp *); + if (in_cksum(m, igmplen)) { + ++igmpstat.igps_rcv_badsum; + m_freem(m); + return; + } + m->m_data -= iphlen; + m->m_len += iphlen; + + ip = mtod(m, struct ip *); + timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE; + if (timer == 0) + timer = 1; + rti = find_rti(ifp); + + /* + * In the IGMPv2 specification, there are 3 states and a flag. + * + * In Non-Member state, we simply don't have a membership record. + * In Delaying Member state, our timer is running (inm->inm_timer) + * In Idle Member state, our timer is not running (inm->inm_timer==0) + * + * The flag is inm->inm_state, it is set to IGMP_OTHERMEMBER if + * we have heard a report from another member, or IGMP_IREPORTEDLAST + * if I sent the last report. + */ + switch (igmp->igmp_type) { + + case IGMP_MEMBERSHIP_QUERY: + ++igmpstat.igps_rcv_queries; + + if (ifp->if_flags & IFF_LOOPBACK) + break; + + if (igmp->igmp_code == 0) { + /* + * Old router. Remember that the querier on this + * interface is old, and set the timer to the + * value in RFC 1112. + */ + + rti->rti_type = IGMP_V1_ROUTER; + rti->rti_time = 0; + + timer = IGMP_MAX_HOST_REPORT_DELAY * PR_FASTHZ; + + if (ip->ip_dst.s_addr != igmp_all_hosts_group || + igmp->igmp_group.s_addr != 0) { + ++igmpstat.igps_rcv_badqueries; + m_freem(m); + return; + } + } else { + /* + * New router. Simply do the new validity check. + */ + + if (igmp->igmp_group.s_addr != 0 && + !IN_MULTICAST(ntohl(igmp->igmp_group.s_addr))) { + ++igmpstat.igps_rcv_badqueries; + m_freem(m); + return; + } + } + + /* + * - Start the timers in all of our membership records + * that the query applies to for the interface on + * which the query arrived excl. those that belong + * to the "all-hosts" group (224.0.0.1). + * - Restart any timer that is already running but has + * a value longer than the requested timeout. + * - Use the value specified in the query message as + * the maximum timeout. + */ + IN_FIRST_MULTI(step, inm); + while (inm != NULL) { + if (inm->inm_ifp == ifp && + inm->inm_addr.s_addr != igmp_all_hosts_group && + (igmp->igmp_group.s_addr == 0 || + igmp->igmp_group.s_addr == inm->inm_addr.s_addr)) { + if (inm->inm_timer == 0 || + inm->inm_timer > timer) { + inm->inm_timer = + IGMP_RANDOM_DELAY(timer); + igmp_timers_are_running = 1; + } + } + IN_NEXT_MULTI(step, inm); + } + + break; + + case IGMP_V1_MEMBERSHIP_REPORT: + case IGMP_V2_MEMBERSHIP_REPORT: + /* + * For fast leave to work, we have to know that we are the + * last person to send a report for this group. Reports + * can potentially get looped back if we are a multicast + * router, so discard reports sourced by me. + */ + IFP_TO_IA(ifp, ia); + if (ia && ip->ip_src.s_addr == IA_SIN(ia)->sin_addr.s_addr) + break; + + ++igmpstat.igps_rcv_reports; + + if (ifp->if_flags & IFF_LOOPBACK) + break; + + if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr))) { + ++igmpstat.igps_rcv_badreports; + m_freem(m); + return; + } + + /* + * KLUDGE: if the IP source address of the report has an + * unspecified (i.e., zero) subnet number, as is allowed for + * a booting host, replace it with the correct subnet number + * so that a process-level multicast routing demon can + * determine which subnet it arrived from. This is necessary + * to compensate for the lack of any way for a process to + * determine the arrival interface of an incoming packet. + */ + if ((ntohl(ip->ip_src.s_addr) & IN_CLASSA_NET) == 0) + if (ia) ip->ip_src.s_addr = htonl(ia->ia_subnet); + + /* + * If we belong to the group being reported, stop + * our timer for that group. + */ + IN_LOOKUP_MULTI(igmp->igmp_group, ifp, inm); + + if (inm != NULL) { + inm->inm_timer = 0; + ++igmpstat.igps_rcv_ourreports; + + inm->inm_state = IGMP_OTHERMEMBER; + } + + break; + } + + /* + * Pass all valid IGMP packets up to any process(es) listening + * on a raw IGMP socket. + */ + rip_input(m, iphlen); +} + +void +igmp_joingroup(inm) + struct in_multi *inm; +{ + int s = splnet(); + + if (inm->inm_addr.s_addr == igmp_all_hosts_group + || inm->inm_ifp->if_flags & IFF_LOOPBACK) { + inm->inm_timer = 0; + inm->inm_state = IGMP_OTHERMEMBER; + } else { + inm->inm_rti = find_rti(inm->inm_ifp); + igmp_sendpkt(inm, inm->inm_rti->rti_type, 0); + inm->inm_timer = IGMP_RANDOM_DELAY( + IGMP_MAX_HOST_REPORT_DELAY*PR_FASTHZ); + inm->inm_state = IGMP_IREPORTEDLAST; + igmp_timers_are_running = 1; + } + splx(s); +} + +void +igmp_leavegroup(inm) + struct in_multi *inm; +{ + if (inm->inm_state == IGMP_IREPORTEDLAST && + inm->inm_addr.s_addr != igmp_all_hosts_group && + !(inm->inm_ifp->if_flags & IFF_LOOPBACK) && + inm->inm_rti->rti_type != IGMP_V1_ROUTER) + igmp_sendpkt(inm, IGMP_V2_LEAVE_GROUP, igmp_all_rtrs_group); +} + +void +igmp_fasttimo() +{ + register struct in_multi *inm; + struct in_multistep step; + int s; + + /* + * Quick check to see if any work needs to be done, in order + * to minimize the overhead of fasttimo processing. + */ + + if (!igmp_timers_are_running) + return; + + s = splnet(); + igmp_timers_are_running = 0; + IN_FIRST_MULTI(step, inm); + while (inm != NULL) { + if (inm->inm_timer == 0) { + /* do nothing */ + } else if (--inm->inm_timer == 0) { + igmp_sendpkt(inm, inm->inm_rti->rti_type, 0); + inm->inm_state = IGMP_IREPORTEDLAST; + } else { + igmp_timers_are_running = 1; + } + IN_NEXT_MULTI(step, inm); + } + splx(s); +} + +void +igmp_slowtimo() +{ + int s = splnet(); + register struct router_info *rti = Head; + +#if IGMP_DEBUG + printf("[igmp.c,_slowtimo] -- > entering \n"); +#endif + while (rti) { + if (rti->rti_type == IGMP_V1_ROUTER) { + rti->rti_time++; + if (rti->rti_time >= IGMP_AGE_THRESHOLD) { + rti->rti_type = IGMP_V2_ROUTER; + } + } + rti = rti->rti_next; + } +#if IGMP_DEBUG + printf("[igmp.c,_slowtimo] -- > exiting \n"); +#endif + splx(s); +} + +static struct route igmprt; + +static void +igmp_sendpkt(inm, type, addr) + struct in_multi *inm; + int type; + unsigned long addr; +{ + struct mbuf *m; + struct igmp *igmp; + struct ip *ip; + struct ip_moptions imo; + + MGETHDR(m, M_DONTWAIT, MT_HEADER); + if (m == NULL) + return; + + m->m_pkthdr.rcvif = loif; + m->m_pkthdr.len = sizeof(struct ip) + IGMP_MINLEN; + MH_ALIGN(m, IGMP_MINLEN + sizeof(struct ip)); + m->m_data += sizeof(struct ip); + m->m_len = IGMP_MINLEN; + igmp = mtod(m, struct igmp *); + igmp->igmp_type = type; + igmp->igmp_code = 0; + igmp->igmp_group = inm->inm_addr; + igmp->igmp_cksum = 0; + igmp->igmp_cksum = in_cksum(m, IGMP_MINLEN); + + m->m_data -= sizeof(struct ip); + m->m_len += sizeof(struct ip); + ip = mtod(m, struct ip *); + ip->ip_tos = 0; + ip->ip_len = sizeof(struct ip) + IGMP_MINLEN; + ip->ip_off = 0; + ip->ip_p = IPPROTO_IGMP; + ip->ip_src.s_addr = INADDR_ANY; + ip->ip_dst.s_addr = addr ? addr : igmp->igmp_group.s_addr; + + imo.imo_multicast_ifp = inm->inm_ifp; + imo.imo_multicast_ttl = 1; + imo.imo_multicast_vif = -1; + /* + * Request loopback of the report if we are acting as a multicast + * router, so that the process-level routing demon can hear it. + */ + imo.imo_multicast_loop = (ip_mrouter != NULL); + + /* + * XXX + * Do we have to worry about reentrancy here? Don't think so. + */ +#if IPSEC + m->m_pkthdr.rcvif = NULL; +#endif /*IPSEC*/ + ip_output(m, router_alert, &igmprt, 0, &imo); + + ++igmpstat.igps_snd_reports; +} diff --git a/bsd/netinet/igmp.h b/bsd/netinet/igmp.h new file mode 100644 index 000000000..b854c8ad0 --- /dev/null +++ b/bsd/netinet/igmp.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988 Stephen Deering. + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Stephen Deering of Stanford University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)igmp.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_IGMP_H_ +#define _NETINET_IGMP_H_ + +/* + * Internet Group Management Protocol (IGMP) definitions. + * + * Written by Steve Deering, Stanford, May 1988. + * + * MULTICAST Revision: 3.5.1.2 + */ + +/* + * IGMP packet format. + */ +struct igmp { + u_char igmp_type; /* version & type of IGMP message */ + u_char igmp_code; /* subtype for routing msgs */ + u_short igmp_cksum; /* IP-style checksum */ + struct in_addr igmp_group; /* group address being reported */ +}; /* (zero for queries) */ + +#define IGMP_MINLEN 8 + +/* + * Message types, including version number. + */ +#define IGMP_MEMBERSHIP_QUERY 0x11 /* membership query */ +#define IGMP_V1_MEMBERSHIP_REPORT 0x12 /* Ver. 1 membership report */ +#define IGMP_V2_MEMBERSHIP_REPORT 0x16 /* Ver. 2 membership report */ +#define IGMP_V2_LEAVE_GROUP 0x17 /* Leave-group message */ + +#define IGMP_DVMRP 0x13 /* DVMRP routing message */ +#define IGMP_PIM 0x14 /* PIM routing message */ + +#define IGMP_MTRACE_RESP 0x1e /* traceroute resp.(to sender)*/ +#define IGMP_MTRACE 0x1f /* mcast traceroute messages */ + +#define IGMP_MAX_HOST_REPORT_DELAY 10 /* max delay for response to */ + /* query (in seconds) according */ + /* to RFC1112 */ + + +#define IGMP_TIMER_SCALE 10 /* denotes that the igmp code field */ + /* specifies time in 10th of seconds*/ + +/* + * The following four defininitions are for backwards compatibility. + * They should be removed as soon as all applications are updated to + * use the new constant names. + */ +#define IGMP_HOST_MEMBERSHIP_QUERY IGMP_MEMBERSHIP_QUERY +#define IGMP_HOST_MEMBERSHIP_REPORT IGMP_V1_MEMBERSHIP_REPORT +#define IGMP_HOST_NEW_MEMBERSHIP_REPORT IGMP_V2_MEMBERSHIP_REPORT +#define IGMP_HOST_LEAVE_MESSAGE IGMP_V2_LEAVE_GROUP + +#endif /* _NETINET_IGMP_H_ */ diff --git a/bsd/netinet/igmp_var.h b/bsd/netinet/igmp_var.h new file mode 100644 index 000000000..54cec3475 --- /dev/null +++ b/bsd/netinet/igmp_var.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988 Stephen Deering. + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Stephen Deering of Stanford University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)igmp_var.h 8.1 (Berkeley) 7/19/93 + */ + +#ifndef _NETINET_IGMP_VAR_H_ +#define _NETINET_IGMP_VAR_H_ + +/* + * Internet Group Management Protocol (IGMP), + * implementation-specific definitions. + * + * Written by Steve Deering, Stanford, May 1988. + * + * MULTICAST Revision: 3.5.1.3 + */ + +struct igmpstat { + u_int igps_rcv_total; /* total IGMP messages received */ + u_int igps_rcv_tooshort; /* received with too few bytes */ + u_int igps_rcv_badsum; /* received with bad checksum */ + u_int igps_rcv_queries; /* received membership queries */ + u_int igps_rcv_badqueries; /* received invalid queries */ + u_int igps_rcv_reports; /* received membership reports */ + u_int igps_rcv_badreports; /* received invalid reports */ + u_int igps_rcv_ourreports; /* received reports for our groups */ + u_int igps_snd_reports; /* sent membership reports */ +}; + +#ifdef KERNEL +#define IGMP_RANDOM_DELAY(X) (random() % (X) + 1) + +/* + * States for IGMPv2's leave processing + */ +#define IGMP_OTHERMEMBER 0 +#define IGMP_IREPORTEDLAST 1 + +/* + * We must remember what version the subnet's querier is. + * We conveniently use the IGMP message type for the proper + * membership report to keep this state. + */ +#define IGMP_V1_ROUTER IGMP_V1_MEMBERSHIP_REPORT +#define IGMP_V2_ROUTER IGMP_V2_MEMBERSHIP_REPORT + +/* + * Revert to new router if we haven't heard from an old router in + * this amount of time. + */ +#define IGMP_AGE_THRESHOLD 540 + +void igmp_init __P((void)); +void igmp_input __P((struct mbuf *, int)); +void igmp_joingroup __P((struct in_multi *)); +void igmp_leavegroup __P((struct in_multi *)); +void igmp_fasttimo __P((void)); +void igmp_slowtimo __P((void)); + +SYSCTL_DECL(_net_inet_igmp); + +#endif + +/* + * Names for IGMP sysctl objects + */ +#define IGMPCTL_STATS 1 /* statistics (read-only) */ +#define IGMPCTL_MAXID 2 + +#define IGMPCTL_NAMES { \ + { 0, 0 }, \ + { "stats", CTLTYPE_STRUCT }, \ +} + +#endif + + diff --git a/bsd/netinet/in.c b/bsd/netinet/in.c new file mode 100644 index 000000000..1389cf3a2 --- /dev/null +++ b/bsd/netinet/in.c @@ -0,0 +1,1170 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.c 8.4 (Berkeley) 1/9/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if NGIF > 0 +#include "gif.h" +#include +#include +#endif + +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include + +#include + + +static int in_mask2len __P((struct in_addr *)); +static void in_len2mask __P((struct in_addr *, int)); +static int in_lifaddr_ioctl __P((struct socket *, u_long, caddr_t, + struct ifnet *, struct proc *)); + +static void in_socktrim __P((struct sockaddr_in *)); +static int in_ifinit __P((struct ifnet *, + struct in_ifaddr *, struct sockaddr_in *, int)); + +static int subnetsarelocal = 0; +SYSCTL_INT(_net_inet_ip, OID_AUTO, subnets_are_local, CTLFLAG_RW, + &subnetsarelocal, 0, ""); + +struct in_multihead in_multihead; /* XXX BSS initialization */ + +extern void arp_rtrequest(); + + +/* + * Return 1 if an internet address is for a ``local'' host + * (one to which we have a connection). If subnetsarelocal + * is true, this includes other subnets of the local net. + * Otherwise, it includes only the directly-connected (sub)nets. + */ +int +in_localaddr(in) + struct in_addr in; +{ + register u_long i = ntohl(in.s_addr); + register struct in_ifaddr *ia; + + if (subnetsarelocal) { + for (ia = in_ifaddrhead.tqh_first; ia; + ia = ia->ia_link.tqe_next) + if ((i & ia->ia_netmask) == ia->ia_net) + return (1); + } else { + for (ia = in_ifaddrhead.tqh_first; ia; + ia = ia->ia_link.tqe_next) + if ((i & ia->ia_subnetmask) == ia->ia_subnet) + return (1); + } + return (0); +} + +/* + * Determine whether an IP address is in a reserved set of addresses + * that may not be forwarded, or whether datagrams to that destination + * may be forwarded. + */ +int +in_canforward(in) + struct in_addr in; +{ + register u_long i = ntohl(in.s_addr); + register u_long net; + + if (IN_EXPERIMENTAL(i) || IN_MULTICAST(i)) + return (0); + if (IN_CLASSA(i)) { + net = i & IN_CLASSA_NET; + if (net == 0 || net == (IN_LOOPBACKNET << IN_CLASSA_NSHIFT)) + return (0); + } + return (1); +} + +/* + * Trim a mask in a sockaddr + */ +static void +in_socktrim(ap) +struct sockaddr_in *ap; +{ + register char *cplim = (char *) &ap->sin_addr; + register char *cp = (char *) (&ap->sin_addr + 1); + + ap->sin_len = 0; + while (--cp >= cplim) + if (*cp) { + (ap)->sin_len = cp - (char *) (ap) + 1; + break; + } +} + +static int +in_mask2len(mask) + struct in_addr *mask; +{ + int x, y; + u_char *p; + + p = (u_char *)mask; + for (x = 0; x < sizeof(*mask); x++) { + if (p[x] != 0xff) + break; + } + y = 0; + if (x < sizeof(*mask)) { + for (y = 0; y < 8; y++) { + if ((p[x] & (0x80 >> y)) == 0) + break; + } + } + return x * 8 + y; +} + +static void +in_len2mask(mask, len) + struct in_addr *mask; + int len; +{ + int i; + u_char *p; + + p = (u_char *)mask; + bzero(mask, sizeof(*mask)); + for (i = 0; i < len / 8; i++) + p[i] = 0xff; + if (len % 8) + p[i] = (0xff00 >> (len % 8)) & 0xff; +} + +static int in_interfaces; /* number of external internet interfaces */ + +/* + * Generic internet control operations (ioctl's). + * Ifp is 0 if not an interface-specific ioctl. + */ +/* ARGSUSED */ +int +in_control(so, cmd, data, ifp, p) + struct socket *so; + u_long cmd; + caddr_t data; + register struct ifnet *ifp; + struct proc *p; +{ + register struct ifreq *ifr = (struct ifreq *)data; + register struct in_ifaddr *ia = 0, *iap; + register struct ifaddr *ifa; + struct in_ifaddr *oia; + struct in_aliasreq *ifra = (struct in_aliasreq *)data; + struct sockaddr_in oldaddr; + int error, hostIsNew, maskIsNew, s; + u_long i, dl_tag; + struct kev_msg ev_msg; + struct kev_in_data in_event_data; + +#if NGIF > 0 + if (ifp && ifp->if_type == IFT_GIF) { + switch (cmd) { + case SIOCSIFPHYADDR: +#if 1 + if (p && + (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return(error); +#else + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); +#endif + case SIOCGIFPSRCADDR: + case SIOCGIFPDSTADDR: + if (strcmp(ifp->if_name, "gif") == 0) + dl_tag = gif_attach_inet(ifp); + return gif_ioctl(ifp, cmd, data); + } + } +#endif +#if NFAITH > 0 + if (ifp && ifp->if_type == IFT_FAITH) + dl_tag = faith_attach_inet(ifp); +#endif + + switch (cmd) { + case SIOCALIFADDR: + case SIOCDLIFADDR: +#if 1 + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; +#else + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); +#endif + /*fall through*/ + case SIOCGLIFADDR: + if (!ifp) + return EINVAL; + return in_lifaddr_ioctl(so, cmd, data, ifp, p); + } + + /* + * Find address for this interface, if it exists. + * + * If an alias address was specified, find that one instead of + * the first one on the interface. + */ + if (ifp) + for (iap = in_ifaddrhead.tqh_first; iap; + iap = iap->ia_link.tqe_next) + if (iap->ia_ifp == ifp) { + if (((struct sockaddr_in *)&ifr->ifr_addr)->sin_addr.s_addr == + iap->ia_addr.sin_addr.s_addr) { + ia = iap; + break; + } else if (ia == NULL) { + ia = iap; + if (ifr->ifr_addr.sa_family != AF_INET) + break; + } + } + + switch (cmd) { + + case SIOCAIFADDR: + case SIOCDIFADDR: + if (ifp == 0) + return (EADDRNOTAVAIL); + if (ifra->ifra_addr.sin_family == AF_INET) { + for (oia = ia; ia; ia = ia->ia_link.tqe_next) { + if (ia->ia_ifp == ifp && + ia->ia_addr.sin_addr.s_addr == + ifra->ifra_addr.sin_addr.s_addr) + break; + } + if ((ifp->if_flags & IFF_POINTOPOINT) + && (cmd == SIOCAIFADDR) + && (ifra->ifra_dstaddr.sin_addr.s_addr + == INADDR_ANY)) { + return EDESTADDRREQ; + } + } + if (cmd == SIOCDIFADDR && ia == 0) + return (EADDRNOTAVAIL); + /* FALLTHROUGH */ + case SIOCSIFADDR: + case SIOCSIFNETMASK: + case SIOCSIFDSTADDR: + +#if ISFB31 + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; +#else + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); +#endif + + if (ifp == 0) + return (EADDRNOTAVAIL); + if (ia == (struct in_ifaddr *)0) { + ia = (struct in_ifaddr *) + _MALLOC(sizeof *ia, M_IFADDR, M_WAITOK); + if (ia == (struct in_ifaddr *)NULL) + return (ENOBUFS); + bzero((caddr_t)ia, sizeof *ia); + /* + * Protect from ipintr() traversing address list + * while we're modifying it. + */ + s = splnet(); + + TAILQ_INSERT_TAIL(&in_ifaddrhead, ia, ia_link); + ifa = &ia->ia_ifa; + TAILQ_INSERT_TAIL(&ifp->if_addrhead, ifa, ifa_link); + +/* + * Temorary code for protocol attachment XXX + */ + + if (strcmp(ifp->if_name, "en") == 0) + dl_tag = ether_attach_inet(ifp); + + if (strcmp(ifp->if_name, "lo") == 0) + dl_tag = lo_attach_inet(ifp); +/* End of temp code */ + + ifa->ifa_dlt = dl_tag; + ifa->ifa_addr = (struct sockaddr *)&ia->ia_addr; + ifa->ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr; + ifa->ifa_netmask = (struct sockaddr *)&ia->ia_sockmask; + ia->ia_sockmask.sin_len = 8; + if (ifp->if_flags & IFF_BROADCAST) { + ia->ia_broadaddr.sin_len = sizeof(ia->ia_addr); + ia->ia_broadaddr.sin_family = AF_INET; + } + ia->ia_ifp = ifp; + if (!(ifp->if_flags & IFF_LOOPBACK)) + in_interfaces++; + splx(s); + } + break; + + case SIOCSIFBRDADDR: +#if ISFB31 + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; +#else + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); +#endif + /* FALLTHROUGH */ + + case SIOCGIFADDR: + case SIOCGIFNETMASK: + case SIOCGIFDSTADDR: + case SIOCGIFBRDADDR: + if (ia == (struct in_ifaddr *)0) + return (EADDRNOTAVAIL); + break; + } + switch (cmd) { + + case SIOCGIFADDR: + *((struct sockaddr_in *)&ifr->ifr_addr) = ia->ia_addr; + break; + + case SIOCGIFBRDADDR: + if ((ifp->if_flags & IFF_BROADCAST) == 0) + return (EINVAL); + *((struct sockaddr_in *)&ifr->ifr_dstaddr) = ia->ia_broadaddr; + break; + + case SIOCGIFDSTADDR: + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + return (EINVAL); + *((struct sockaddr_in *)&ifr->ifr_dstaddr) = ia->ia_dstaddr; + break; + + case SIOCGIFNETMASK: + *((struct sockaddr_in *)&ifr->ifr_addr) = ia->ia_sockmask; + break; + + case SIOCSIFDSTADDR: + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + return (EINVAL); + oldaddr = ia->ia_dstaddr; + ia->ia_dstaddr = *(struct sockaddr_in *)&ifr->ifr_dstaddr; + error = dlil_ioctl(PF_INET, ifp, SIOCSIFDSTADDR, (caddr_t)ia); + if (error == EOPNOTSUPP) + error = 0; + + if (error) { + ia->ia_dstaddr = oldaddr; + return error; + } + + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; + + ev_msg.event_code = KEV_INET_SIFDSTADDR; + + if (ia->ia_ifa.ifa_dstaddr) + in_event_data.ia_dstaddr = + ((struct sockaddr_in *)ia->ia_ifa.ifa_dstaddr)->sin_addr; + else + in_event_data.ia_dstaddr.s_addr = 0; + + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + strncpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); + in_event_data.link_data.if_family = ifp->if_family; + in_event_data.link_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_ptr = &in_event_data; + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + + + if (ia->ia_flags & IFA_ROUTE) { + ia->ia_ifa.ifa_dstaddr = (struct sockaddr *)&oldaddr; + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + ia->ia_ifa.ifa_dstaddr = + (struct sockaddr *)&ia->ia_dstaddr; + rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_HOST|RTF_UP); + } + break; + + case SIOCSIFBRDADDR: + if ((ifp->if_flags & IFF_BROADCAST) == 0) + return (EINVAL); + ia->ia_broadaddr = *(struct sockaddr_in *)&ifr->ifr_broadaddr; + + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; + + ev_msg.event_code = KEV_INET_SIFBRDADDR; + + if (ia->ia_ifa.ifa_dstaddr) + in_event_data.ia_dstaddr = + ((struct sockaddr_in *)ia->ia_ifa.ifa_dstaddr)->sin_addr; + else + in_event_data.ia_dstaddr.s_addr = 0; + + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + strncpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); + in_event_data.link_data.if_family = ifp->if_family; + in_event_data.link_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_ptr = &in_event_data; + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + + break; + + case SIOCSIFADDR: + return (in_ifinit(ifp, ia, + (struct sockaddr_in *) &ifr->ifr_addr, 1)); + + case SIOCSIFNETMASK: + i = ifra->ifra_addr.sin_addr.s_addr; + ia->ia_subnetmask = ntohl(ia->ia_sockmask.sin_addr.s_addr = i); + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; + + ev_msg.event_code = KEV_INET_SIFNETMASK; + + if (ia->ia_ifa.ifa_dstaddr) + in_event_data.ia_dstaddr = + ((struct sockaddr_in *)ia->ia_ifa.ifa_dstaddr)->sin_addr; + else + in_event_data.ia_dstaddr.s_addr = 0; + + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + strncpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); + in_event_data.link_data.if_family = ifp->if_family; + in_event_data.link_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_ptr = &in_event_data; + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + + break; + + case SIOCAIFADDR: + maskIsNew = 0; + hostIsNew = 1; + error = 0; + if (ia->ia_addr.sin_family == AF_INET) { + if (ifra->ifra_addr.sin_len == 0) { + ifra->ifra_addr = ia->ia_addr; + hostIsNew = 0; + } else if (ifra->ifra_addr.sin_addr.s_addr == + ia->ia_addr.sin_addr.s_addr) + hostIsNew = 0; + } + if (ifra->ifra_mask.sin_len) { + in_ifscrub(ifp, ia); + ia->ia_sockmask = ifra->ifra_mask; + ia->ia_subnetmask = + ntohl(ia->ia_sockmask.sin_addr.s_addr); + maskIsNew = 1; + } + if ((ifp->if_flags & IFF_POINTOPOINT) && + (ifra->ifra_dstaddr.sin_family == AF_INET)) { + in_ifscrub(ifp, ia); + ia->ia_dstaddr = ifra->ifra_dstaddr; + maskIsNew = 1; /* We lie; but the effect's the same */ + } + if (ifra->ifra_addr.sin_family == AF_INET && + (hostIsNew || maskIsNew)) { + error = in_ifinit(ifp, ia, &ifra->ifra_addr, 0); + } + if ((ifp->if_flags & IFF_BROADCAST) && + (ifra->ifra_broadaddr.sin_family == AF_INET)) + ia->ia_broadaddr = ifra->ifra_broadaddr; + + /* + * Report event. + */ + + if (error == 0) { + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; + + if (hostIsNew) + ev_msg.event_code = KEV_INET_NEW_ADDR; + else + ev_msg.event_code = KEV_INET_CHANGED_ADDR; + + if (ia->ia_ifa.ifa_dstaddr) + in_event_data.ia_dstaddr = + ((struct sockaddr_in *)ia->ia_ifa.ifa_dstaddr)->sin_addr; + else + in_event_data.ia_dstaddr.s_addr = 0; + + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + strncpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); + in_event_data.link_data.if_family = ifp->if_family; + in_event_data.link_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_ptr = &in_event_data; + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + } + + return (error); + + case SIOCDIFADDR: + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; + + ev_msg.event_code = KEV_INET_ADDR_DELETED; + + if (ia->ia_ifa.ifa_dstaddr) + in_event_data.ia_dstaddr = + ((struct sockaddr_in *)ia->ia_ifa.ifa_dstaddr)->sin_addr; + else + in_event_data.ia_dstaddr.s_addr = 0; + + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + strncpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); + in_event_data.link_data.if_family = ifp->if_family; + in_event_data.link_data.if_unit = (unsigned long) ifp->if_unit; + + ev_msg.dv[0].data_ptr = &in_event_data; + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); + ev_msg.dv[1].data_length = 0; + + kev_post_msg(&ev_msg); + + in_ifscrub(ifp, ia); + /* + * Protect from ipintr() traversing address list + * while we're modifying it. + */ + s = splnet(); + + ifa = &ia->ia_ifa; + TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); + oia = ia; + TAILQ_REMOVE(&in_ifaddrhead, oia, ia_link); + IFAFREE(&oia->ia_ifa); + splx(s); + break; + + case SIOCSETOT: { + /* + * Inspiration from tcp_ctloutput() and ip_ctloutput() + */ + struct inpcb *inp, *cloned_inp; + int error = 0; + int cloned_fd = *(int *)data; + + s = splnet(); /* XXX */ + inp = sotoinpcb(so); + if (inp == NULL) { + splx(s); + break; + } + + /* let's make sure it's either -1 or a valid file descriptor */ + if (cloned_fd != -1) { + struct socket *cloned_so; + struct file *cloned_fp; + error = getsock(p->p_fd, cloned_fd, &cloned_fp); + if (error){ + splx(s); + break; + } + cloned_so = (struct socket *)cloned_fp->f_data; + cloned_inp = sotoinpcb(cloned_so); + } else { + cloned_inp = NULL; + } + + if (cloned_inp == NULL) { + /* OT always uses IP_PORTRANGE_HIGH */ + inp->inp_flags &= ~(INP_LOWPORT); + inp->inp_flags |= INP_HIGHPORT; + /* For UDP, OT allows broadcast by default */ + if (so->so_type == SOCK_DGRAM) + so->so_options |= SO_BROADCAST; + /* For TCP we want to see MSG_OOB when receive urgent data */ + else if (so->so_type == SOCK_STREAM) + so->so_options |= SO_WANTOOBFLAG; + } else { + inp->inp_ip_tos = cloned_inp->inp_ip_tos; + inp->inp_ip_ttl = cloned_inp->inp_ip_ttl; + inp->inp_flags = cloned_inp->inp_flags; + + /* Multicast options */ + if (cloned_inp->inp_moptions != NULL) { + int i; + struct ip_moptions *cloned_imo = cloned_inp->inp_moptions; + struct ip_moptions *imo = inp->inp_moptions; + + if (imo == NULL) { + /* + * No multicast option buffer attached to the pcb; + * allocate one. + */ + splx(); + imo = (struct ip_moptions*) + _MALLOC(sizeof(*imo), M_IPMOPTS, M_WAITOK); + if (imo == NULL) { + error = ENOBUFS; + break; + } + s = splnet(); /* XXX */ + inp->inp_moptions = imo; + } + imo->imo_multicast_ifp = cloned_imo->imo_multicast_ifp; + imo->imo_multicast_vif = cloned_imo->imo_multicast_vif; + imo->imo_multicast_ttl = cloned_imo->imo_multicast_ttl; + imo->imo_multicast_loop = cloned_imo->imo_multicast_loop; + imo->imo_num_memberships = cloned_imo->imo_num_memberships; + for (i = 0; i < cloned_imo->imo_num_memberships; i++) { + imo->imo_membership[i] = + in_addmulti(&cloned_imo->imo_membership[i]->inm_addr, + cloned_imo->imo_membership[i]->inm_ifp); + } + } + } + splx(s); + break; + } + + default: + return EOPNOTSUPP; + + } + return (0); +} + +/* + * SIOC[GAD]LIFADDR. + * SIOCGLIFADDR: get first address. (???) + * SIOCGLIFADDR with IFLR_PREFIX: + * get first address that matches the specified prefix. + * SIOCALIFADDR: add the specified address. + * SIOCALIFADDR with IFLR_PREFIX: + * EINVAL since we can't deduce hostid part of the address. + * SIOCDLIFADDR: delete the specified address. + * SIOCDLIFADDR with IFLR_PREFIX: + * delete the first address that matches the specified prefix. + * return values: + * EINVAL on invalid parameters + * EADDRNOTAVAIL on prefix match failed/specified address not found + * other values may be returned from in_ioctl() + */ +static int +in_lifaddr_ioctl(so, cmd, data, ifp, p) + struct socket *so; + u_long cmd; + caddr_t data; + struct ifnet *ifp; + struct proc *p; +{ + struct if_laddrreq *iflr = (struct if_laddrreq *)data; + struct ifaddr *ifa; + + /* sanity checks */ + if (!data || !ifp) { + panic("invalid argument to in_lifaddr_ioctl"); + /*NOTRECHED*/ + } + + switch (cmd) { + case SIOCGLIFADDR: + /* address must be specified on GET with IFLR_PREFIX */ + if ((iflr->flags & IFLR_PREFIX) == 0) + break; + /*FALLTHROUGH*/ + case SIOCALIFADDR: + case SIOCDLIFADDR: + /* address must be specified on ADD and DELETE */ + if (iflr->addr.ss_family != AF_INET) + return EINVAL; + if (iflr->addr.ss_len != sizeof(struct sockaddr_in)) + return EINVAL; + /* XXX need improvement */ + if (iflr->dstaddr.ss_family + && iflr->dstaddr.ss_family != AF_INET) + return EINVAL; + if (iflr->dstaddr.ss_family + && iflr->dstaddr.ss_len != sizeof(struct sockaddr_in)) + return EINVAL; + break; + default: /*shouldn't happen*/ +#if 0 + panic("invalid cmd to in_lifaddr_ioctl"); + /*NOTREACHED*/ +#else + return EOPNOTSUPP; +#endif + } + if (sizeof(struct in_addr) * 8 < iflr->prefixlen) + return EINVAL; + + switch (cmd) { + case SIOCALIFADDR: + { + struct in_aliasreq ifra; + + if (iflr->flags & IFLR_PREFIX) + return EINVAL; + + /* copy args to in_aliasreq, perform ioctl(SIOCAIFADDR_IN6). */ + bzero(&ifra, sizeof(ifra)); + bcopy(iflr->iflr_name, ifra.ifra_name, + sizeof(ifra.ifra_name)); + + bcopy(&iflr->addr, &ifra.ifra_addr, iflr->addr.ss_len); + + if (iflr->dstaddr.ss_family) { /*XXX*/ + bcopy(&iflr->dstaddr, &ifra.ifra_dstaddr, + iflr->dstaddr.ss_len); + } + + ifra.ifra_mask.sin_family = AF_INET; + ifra.ifra_mask.sin_len = sizeof(struct sockaddr_in); + in_len2mask(&ifra.ifra_mask.sin_addr, iflr->prefixlen); + + return in_control(so, SIOCAIFADDR, (caddr_t)&ifra, ifp, p); + } + case SIOCGLIFADDR: + case SIOCDLIFADDR: + { + struct in_ifaddr *ia; + struct in_addr mask, candidate, match; + struct sockaddr_in *sin; + int cmp; + + bzero(&mask, sizeof(mask)); + if (iflr->flags & IFLR_PREFIX) { + /* lookup a prefix rather than address. */ + in_len2mask(&mask, iflr->prefixlen); + + sin = (struct sockaddr_in *)&iflr->addr; + match.s_addr = sin->sin_addr.s_addr; + match.s_addr &= mask.s_addr; + + /* if you set extra bits, that's wrong */ + if (match.s_addr != sin->sin_addr.s_addr) + return EINVAL; + + cmp = 1; + } else { + if (cmd == SIOCGLIFADDR) { + /* on getting an address, take the 1st match */ + cmp = 0; /*XXX*/ + } else { + /* on deleting an address, do exact match */ + in_len2mask(&mask, 32); + sin = (struct sockaddr_in *)&iflr->addr; + match.s_addr = sin->sin_addr.s_addr; + + cmp = 1; + } + } + + TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (!cmp) + break; + candidate.s_addr = ((struct sockaddr_in *)&ifa->ifa_addr)->sin_addr.s_addr; + candidate.s_addr &= mask.s_addr; + if (candidate.s_addr == match.s_addr) + break; + } + if (!ifa) + return EADDRNOTAVAIL; + ia = (struct in_ifaddr *)ifa; + + if (cmd == SIOCGLIFADDR) { + /* fill in the if_laddrreq structure */ + bcopy(&ia->ia_addr, &iflr->addr, ia->ia_addr.sin_len); + + if ((ifp->if_flags & IFF_POINTOPOINT) != 0) { + bcopy(&ia->ia_dstaddr, &iflr->dstaddr, + ia->ia_dstaddr.sin_len); + } else + bzero(&iflr->dstaddr, sizeof(iflr->dstaddr)); + + iflr->prefixlen = + in_mask2len(&ia->ia_sockmask.sin_addr); + + iflr->flags = 0; /*XXX*/ + + return 0; + } else { + struct in_aliasreq ifra; + + /* fill in_aliasreq and do ioctl(SIOCDIFADDR_IN6) */ + bzero(&ifra, sizeof(ifra)); + bcopy(iflr->iflr_name, ifra.ifra_name, + sizeof(ifra.ifra_name)); + + bcopy(&ia->ia_addr, &ifra.ifra_addr, + ia->ia_addr.sin_len); + if ((ifp->if_flags & IFF_POINTOPOINT) != 0) { + bcopy(&ia->ia_dstaddr, &ifra.ifra_dstaddr, + ia->ia_dstaddr.sin_len); + } + bcopy(&ia->ia_sockmask, &ifra.ifra_dstaddr, + ia->ia_sockmask.sin_len); + + return in_control(so, SIOCDIFADDR, (caddr_t)&ifra, + ifp, p); + } + } + } + + return EOPNOTSUPP; /*just for safety*/ +} + +/* + * Delete any existing route for an interface. + */ +void +in_ifscrub(ifp, ia) + register struct ifnet *ifp; + register struct in_ifaddr *ia; +{ + + if ((ia->ia_flags & IFA_ROUTE) == 0) + return; + if (ifp->if_flags & (IFF_LOOPBACK|IFF_POINTOPOINT)) + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + else + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, 0); + ia->ia_flags &= ~IFA_ROUTE; +} + +/* + * Initialize an interface's internet address + * and routing table entry. + */ +static int +in_ifinit(ifp, ia, sin, scrub) + register struct ifnet *ifp; + register struct in_ifaddr *ia; + struct sockaddr_in *sin; + int scrub; +{ + register u_long i = ntohl(sin->sin_addr.s_addr); + struct sockaddr_in oldaddr; + int s = splimp(), flags = RTF_UP, error; + u_long dl_tag; + + + + oldaddr = ia->ia_addr; + ia->ia_addr = *sin; + + + error = dlil_ioctl(PF_INET, ifp, SIOCSIFADDR, (caddr_t)ia); + if (error == EOPNOTSUPP) + error = 0; + + if (error) { + splx(s); + ia->ia_addr = oldaddr; + return (error); + } + + splx(s); + if (scrub) { + ia->ia_ifa.ifa_addr = (struct sockaddr *)&oldaddr; + in_ifscrub(ifp, ia); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + } + if (IN_CLASSA(i)) + ia->ia_netmask = IN_CLASSA_NET; + else if (IN_CLASSB(i)) + ia->ia_netmask = IN_CLASSB_NET; + else + ia->ia_netmask = IN_CLASSC_NET; + /* + * The subnet mask usually includes at least the standard network part, + * but may may be smaller in the case of supernetting. + * If it is set, we believe it. + */ + if (ia->ia_subnetmask == 0) { + ia->ia_subnetmask = ia->ia_netmask; + ia->ia_sockmask.sin_addr.s_addr = htonl(ia->ia_subnetmask); + } else + ia->ia_netmask &= ia->ia_subnetmask; + ia->ia_net = i & ia->ia_netmask; + ia->ia_subnet = i & ia->ia_subnetmask; + in_socktrim(&ia->ia_sockmask); + /* + * Add route for the network. + */ + ia->ia_ifa.ifa_metric = ifp->if_metric; + if (ifp->if_flags & IFF_BROADCAST) { + ia->ia_broadaddr.sin_addr.s_addr = + htonl(ia->ia_subnet | ~ia->ia_subnetmask); + ia->ia_netbroadcast.s_addr = + htonl(ia->ia_net | ~ ia->ia_netmask); + } else if (ifp->if_flags & IFF_LOOPBACK) { + ia->ia_ifa.ifa_dstaddr = ia->ia_ifa.ifa_addr; + flags |= RTF_HOST; + } else if (ifp->if_flags & IFF_POINTOPOINT) { + if (ia->ia_dstaddr.sin_family != AF_INET) + return (0); + flags |= RTF_HOST; + } + if ((error = rtinit(&(ia->ia_ifa), (int)RTM_ADD, flags)) == 0) + ia->ia_flags |= IFA_ROUTE; + + /* + * If the interface supports multicast, join the "all hosts" + * multicast group on that interface. + */ + if (ifp->if_flags & IFF_MULTICAST) { + struct in_addr addr; + + addr.s_addr = htonl(INADDR_ALLHOSTS_GROUP); + in_addmulti(&addr, ifp); + } + return (error); +} + + +/* + * Return 1 if the address might be a local broadcast address. + */ +int +in_broadcast(in, ifp) + struct in_addr in; + struct ifnet *ifp; +{ + register struct ifaddr *ifa; + u_long t; + + if (in.s_addr == INADDR_BROADCAST || + in.s_addr == INADDR_ANY) + return 1; + if ((ifp->if_flags & IFF_BROADCAST) == 0) + return 0; + t = ntohl(in.s_addr); + /* + * Look through the list of addresses for a match + * with a broadcast address. + */ +#define ia ((struct in_ifaddr *)ifa) + for (ifa = ifp->if_addrhead.tqh_first; ifa; + ifa = ifa->ifa_link.tqe_next) + if (ifa->ifa_addr->sa_family == AF_INET && + (in.s_addr == ia->ia_broadaddr.sin_addr.s_addr || + in.s_addr == ia->ia_netbroadcast.s_addr || + /* + * Check for old-style (host 0) broadcast. + */ + t == ia->ia_subnet || t == ia->ia_net) && + /* + * Check for an all one subnetmask. These + * only exist when an interface gets a secondary + * address. + */ + ia->ia_subnetmask != (u_long)0xffffffff) + return 1; + return (0); +#undef ia +} +/* + * Add an address to the list of IP multicast addresses for a given interface. + */ +struct in_multi * +in_addmulti(ap, ifp) + register struct in_addr *ap; + register struct ifnet *ifp; +{ + register struct in_multi *inm; + int error; + struct sockaddr_in sin; + struct ifmultiaddr *ifma; + int s = splnet(); + + /* + * Call generic routine to add membership or increment + * refcount. It wants addresses in the form of a sockaddr, + * so we build one here (being careful to zero the unused bytes). + */ + bzero(&sin, sizeof sin); + sin.sin_family = AF_INET; + sin.sin_len = sizeof sin; + sin.sin_addr = *ap; + error = if_addmulti(ifp, (struct sockaddr *)&sin, &ifma); + if (error) { + splx(s); + return 0; + } + + /* + * If ifma->ifma_protospec is null, then if_addmulti() created + * a new record. Otherwise, we are done. + */ + if (ifma->ifma_protospec != 0) + return ifma->ifma_protospec; + + /* XXX - if_addmulti uses M_WAITOK. Can this really be called + at interrupt time? If so, need to fix if_addmulti. XXX */ + inm = (struct in_multi *) _MALLOC(sizeof(*inm), M_IPMADDR, M_NOWAIT); + if (inm == NULL) { + splx(s); + return (NULL); + } + + bzero(inm, sizeof *inm); + inm->inm_addr = *ap; + inm->inm_ifp = ifp; + inm->inm_ifma = ifma; + ifma->ifma_protospec = inm; + LIST_INSERT_HEAD(&in_multihead, inm, inm_link); + + /* + * Let IGMP know that we have joined a new IP multicast group. + */ + igmp_joingroup(inm); + splx(s); + return (inm); +} + +/* + * Delete a multicast address record. + */ +void +in_delmulti(inm) + register struct in_multi *inm; +{ + struct ifmultiaddr *ifma = inm->inm_ifma; + int s = splnet(); + + if (ifma->ifma_refcount == 1) { + /* + * No remaining claims to this record; let IGMP know that + * we are leaving the multicast group. + */ + igmp_leavegroup(inm); + ifma->ifma_protospec = 0; + LIST_REMOVE(inm, inm_link); + FREE(inm, M_IPMADDR); + } + /* XXX - should be separate API for when we have an ifma? */ + if_delmulti(ifma->ifma_ifp, ifma->ifma_addr); + splx(s); + + +} diff --git a/bsd/netinet/in.h b/bsd/netinet/in.h new file mode 100644 index 000000000..e62cdc73f --- /dev/null +++ b/bsd/netinet/in.h @@ -0,0 +1,516 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.h 8.3 (Berkeley) 1/3/94 + */ + +#ifndef _NETINET_IN_H_ +#define _NETINET_IN_H_ + +/* + * Constants and structures defined by the internet system, + * Per RFC 790, September 1981, and numerous additions. + */ + +/* + * Protocols (RFC 1700) + */ +#define IPPROTO_IP 0 /* dummy for IP */ +#define IPPROTO_HOPOPTS 0 /* IP6 hop-by-hop options */ +#define IPPROTO_ICMP 1 /* control message protocol */ +#define IPPROTO_IGMP 2 /* group mgmt protocol */ +#define IPPROTO_GGP 3 /* gateway^2 (deprecated) */ +#define IPPROTO_IPIP 4 /* IP encapsulation in IP */ +#define IPPROTO_IPV4 4 /* IP header */ +#define IPPROTO_TCP 6 /* tcp */ +#define IPPROTO_ST 7 /* Stream protocol II */ +#define IPPROTO_EGP 8 /* exterior gateway protocol */ +#define IPPROTO_PIGP 9 /* private interior gateway */ +#define IPPROTO_RCCMON 10 /* BBN RCC Monitoring */ +#define IPPROTO_NVPII 11 /* network voice protocol*/ +#define IPPROTO_PUP 12 /* pup */ +#define IPPROTO_ARGUS 13 /* Argus */ +#define IPPROTO_EMCON 14 /* EMCON */ +#define IPPROTO_XNET 15 /* Cross Net Debugger */ +#define IPPROTO_CHAOS 16 /* Chaos*/ +#define IPPROTO_UDP 17 /* user datagram protocol */ +#define IPPROTO_MUX 18 /* Multiplexing */ +#define IPPROTO_MEAS 19 /* DCN Measurement Subsystems */ +#define IPPROTO_HMP 20 /* Host Monitoring */ +#define IPPROTO_PRM 21 /* Packet Radio Measurement */ +#define IPPROTO_IDP 22 /* xns idp */ +#define IPPROTO_TRUNK1 23 /* Trunk-1 */ +#define IPPROTO_TRUNK2 24 /* Trunk-2 */ +#define IPPROTO_LEAF1 25 /* Leaf-1 */ +#define IPPROTO_LEAF2 26 /* Leaf-2 */ +#define IPPROTO_RDP 27 /* Reliable Data */ +#define IPPROTO_IRTP 28 /* Reliable Transaction */ +#define IPPROTO_TP 29 /* tp-4 w/ class negotiation */ +#define IPPROTO_BLT 30 /* Bulk Data Transfer */ +#define IPPROTO_NSP 31 /* Network Services */ +#define IPPROTO_INP 32 /* Merit Internodal */ +#define IPPROTO_SEP 33 /* Sequential Exchange */ +#define IPPROTO_3PC 34 /* Third Party Connect */ +#define IPPROTO_IDPR 35 /* InterDomain Policy Routing */ +#define IPPROTO_XTP 36 /* XTP */ +#define IPPROTO_DDP 37 /* Datagram Delivery */ +#define IPPROTO_CMTP 38 /* Control Message Transport */ +#define IPPROTO_TPXX 39 /* TP++ Transport */ +#define IPPROTO_IL 40 /* IL transport protocol */ +#define IPPROTO_IPV6 41 /* IP6 header */ +#define IPPROTO_SDRP 42 /* Source Demand Routing */ +#define IPPROTO_ROUTING 43 /* IP6 routing header */ +#define IPPROTO_FRAGMENT 44 /* IP6 fragmentation header */ +#define IPPROTO_IDRP 45 /* InterDomain Routing*/ +#define IPPROTO_RSVP 46 /* resource reservation */ +#define IPPROTO_GRE 47 /* General Routing Encap. */ +#define IPPROTO_MHRP 48 /* Mobile Host Routing */ +#define IPPROTO_BHA 49 /* BHA */ +#define IPPROTO_ESP 50 /* SIPP Encap Sec. Payload */ +#define IPPROTO_AH 51 /* SIPP Auth Header */ +#define IPPROTO_INLSP 52 /* Integ. Net Layer Security */ +#define IPPROTO_SWIPE 53 /* IP with encryption */ +#define IPPROTO_NHRP 54 /* Next Hop Resolution */ +/* 55-57: Unassigned */ +#define IPPROTO_ICMPV6 58 /* ICMP6 */ +#define IPPROTO_NONE 59 /* IP6 no next header */ +#define IPPROTO_DSTOPTS 60 /* IP6 destination option */ +#define IPPROTO_AHIP 61 /* any host internal protocol */ +#define IPPROTO_CFTP 62 /* CFTP */ +#define IPPROTO_HELLO 63 /* "hello" routing protocol */ +#define IPPROTO_SATEXPAK 64 /* SATNET/Backroom EXPAK */ +#define IPPROTO_KRYPTOLAN 65 /* Kryptolan */ +#define IPPROTO_RVD 66 /* Remote Virtual Disk */ +#define IPPROTO_IPPC 67 /* Pluribus Packet Core */ +#define IPPROTO_ADFS 68 /* Any distributed FS */ +#define IPPROTO_SATMON 69 /* Satnet Monitoring */ +#define IPPROTO_VISA 70 /* VISA Protocol */ +#define IPPROTO_IPCV 71 /* Packet Core Utility */ +#define IPPROTO_CPNX 72 /* Comp. Prot. Net. Executive */ +#define IPPROTO_CPHB 73 /* Comp. Prot. HeartBeat */ +#define IPPROTO_WSN 74 /* Wang Span Network */ +#define IPPROTO_PVP 75 /* Packet Video Protocol */ +#define IPPROTO_BRSATMON 76 /* BackRoom SATNET Monitoring */ +#define IPPROTO_ND 77 /* Sun net disk proto (temp.) */ +#define IPPROTO_WBMON 78 /* WIDEBAND Monitoring */ +#define IPPROTO_WBEXPAK 79 /* WIDEBAND EXPAK */ +#define IPPROTO_EON 80 /* ISO cnlp */ +#define IPPROTO_VMTP 81 /* VMTP */ +#define IPPROTO_SVMTP 82 /* Secure VMTP */ +#define IPPROTO_VINES 83 /* Banyon VINES */ +#define IPPROTO_TTP 84 /* TTP */ +#define IPPROTO_IGP 85 /* NSFNET-IGP */ +#define IPPROTO_DGP 86 /* dissimilar gateway prot. */ +#define IPPROTO_TCF 87 /* TCF */ +#define IPPROTO_IGRP 88 /* Cisco/GXS IGRP */ +#define IPPROTO_OSPFIGP 89 /* OSPFIGP */ +#define IPPROTO_SRPC 90 /* Strite RPC protocol */ +#define IPPROTO_LARP 91 /* Locus Address Resoloution */ +#define IPPROTO_MTP 92 /* Multicast Transport */ +#define IPPROTO_AX25 93 /* AX.25 Frames */ +#define IPPROTO_IPEIP 94 /* IP encapsulated in IP */ +#define IPPROTO_MICP 95 /* Mobile Int.ing control */ +#define IPPROTO_SCCSP 96 /* Semaphore Comm. security */ +#define IPPROTO_ETHERIP 97 /* Ethernet IP encapsulation */ +#define IPPROTO_ENCAP 98 /* encapsulation header */ +#define IPPROTO_APES 99 /* any private encr. scheme */ +#define IPPROTO_GMTP 100 /* GMTP*/ +#define IPPROTO_IPCOMP 108 /* payload compression (IPComp) */ +/* 101-254: Partly Unassigned */ +#if defined(PM) +#define IPPROTO_PM 101 /* PM - Packet Management by SuMiRe */ +#endif +#define IPPROTO_PIM 103 /* Protocol Independent Mcast */ +#define IPPROTO_PGM 113 /* PGM */ +/* 255: Reserved */ +/* BSD Private, local use, namespace incursion */ +#define IPPROTO_DIVERT 254 /* divert pseudo-protocol */ +#define IPPROTO_RAW 255 /* raw IP packet */ +#define IPPROTO_MAX 256 + +/* last return value of *_input(), meaning "all job for this pkt is done". */ +#define IPPROTO_DONE 257 + +/* + * Local port number conventions: + * + * When a user does a bind(2) or connect(2) with a port number of zero, + * a non-conflicting local port address is chosen. + * The default range is IPPORT_RESERVED through + * IPPORT_USERRESERVED, although that is settable by sysctl. + * + * A user may set the IPPROTO_IP option IP_PORTRANGE to change this + * default assignment range. + * + * The value IP_PORTRANGE_DEFAULT causes the default behavior. + * + * The value IP_PORTRANGE_HIGH changes the range of candidate port numbers + * into the "high" range. These are reserved for client outbound connections + * which do not want to be filtered by any firewalls. + * + * The value IP_PORTRANGE_LOW changes the range to the "low" are + * that is (by convention) restricted to privileged processes. This + * convention is based on "vouchsafe" principles only. It is only secure + * if you trust the remote host to restrict these ports. + * + * The default range of ports and the high range can be changed by + * sysctl(3). (net.inet.ip.port{hi,low}{first,last}_auto) + * + * Changing those values has bad security implications if you are + * using a a stateless firewall that is allowing packets outside of that + * range in order to allow transparent outgoing connections. + * + * Such a firewall configuration will generally depend on the use of these + * default values. If you change them, you may find your Security + * Administrator looking for you with a heavy object. + * + * For a slightly more orthodox text view on this: + * + * ftp://ftp.isi.edu/in-notes/iana/assignments/port-numbers + * + * port numbers are divided into three ranges: + * + * 0 - 1023 Well Known Ports + * 1024 - 49151 Registered Ports + * 49152 - 65535 Dynamic and/or Private Ports + * + */ + +/* + * Ports < IPPORT_RESERVED are reserved for + * privileged processes (e.g. root). (IP_PORTRANGE_LOW) + * Ports > IPPORT_USERRESERVED are reserved + * for servers, not necessarily privileged. (IP_PORTRANGE_DEFAULT) + */ +#define IPPORT_RESERVED 1024 +#define IPPORT_USERRESERVED 5000 + +/* + * Default local port range to use by setting IP_PORTRANGE_HIGH + */ +#define IPPORT_HIFIRSTAUTO 49152 +#define IPPORT_HILASTAUTO 65535 + +/* + * Scanning for a free reserved port return a value below IPPORT_RESERVED, + * but higher than IPPORT_RESERVEDSTART. Traditionally the start value was + * 512, but that conflicts with some well-known-services that firewalls may + * have a fit if we use. + */ +#define IPPORT_RESERVEDSTART 600 + +/* + * Internet address (a structure for historical reasons) + */ +struct in_addr { + u_int32_t s_addr; +}; + +/* + * Definitions of bits in internet address integers. + * On subnets, the decomposition of addresses to host and net parts + * is done according to subnet mask, not the masks here. + */ +#define IN_CLASSA(i) (((u_int32_t)(i) & 0x80000000) == 0) +#define IN_CLASSA_NET 0xff000000 +#define IN_CLASSA_NSHIFT 24 +#define IN_CLASSA_HOST 0x00ffffff +#define IN_CLASSA_MAX 128 + +#define IN_CLASSB(i) (((u_int32_t)(i) & 0xc0000000) == 0x80000000) +#define IN_CLASSB_NET 0xffff0000 +#define IN_CLASSB_NSHIFT 16 +#define IN_CLASSB_HOST 0x0000ffff +#define IN_CLASSB_MAX 65536 + +#define IN_CLASSC(i) (((u_int32_t)(i) & 0xe0000000) == 0xc0000000) +#define IN_CLASSC_NET 0xffffff00 +#define IN_CLASSC_NSHIFT 8 +#define IN_CLASSC_HOST 0x000000ff + +#define IN_CLASSD(i) (((u_int32_t)(i) & 0xf0000000) == 0xe0000000) +#define IN_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IN_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IN_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IN_MULTICAST(i) IN_CLASSD(i) + +#define IN_EXPERIMENTAL(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) +#define IN_BADCLASS(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) + +#define INADDR_ANY (u_int32_t)0x00000000 +#define INADDR_LOOPBACK (u_int32_t)0x7f000001 +#define INADDR_BROADCAST (u_int32_t)0xffffffff /* must be masked */ +#ifndef KERNEL +#define INADDR_NONE 0xffffffff /* -1 return */ +#endif + +#define INADDR_UNSPEC_GROUP (u_int32_t)0xe0000000 /* 224.0.0.0 */ +#define INADDR_ALLHOSTS_GROUP (u_int32_t)0xe0000001 /* 224.0.0.1 */ +#define INADDR_ALLRTRS_GROUP (u_int32_t)0xe0000002 /* 224.0.0.2 */ +#define INADDR_MAX_LOCAL_GROUP (u_int32_t)0xe00000ff /* 224.0.0.255 */ + +#define IN_LOOPBACKNET 127 /* official! */ + +/* + * Socket address, internet style. + */ +struct sockaddr_in { + u_char sin_len; + u_char sin_family; + u_short sin_port; + struct in_addr sin_addr; + char sin_zero[8]; +}; + +#define INET_ADDRSTRLEN 16 + +/* + * Structure used to describe IP options. + * Used to store options internally, to pass them to a process, + * or to restore options retrieved earlier. + * The ip_dst is used for the first-hop gateway when using a source route + * (this gets put into the header proper). + */ +struct ip_opts { + struct in_addr ip_dst; /* first hop, 0 w/o src rt */ + char ip_opts[40]; /* actually variable in size */ +}; + +/* + * Options for use with [gs]etsockopt at the IP level. + * First word of comment is data type; bool is stored in int. + */ +#define IP_OPTIONS 1 /* buf/ip_opts; set/get IP options */ +#define IP_HDRINCL 2 /* int; header is included with data */ +#define IP_TOS 3 /* int; IP type of service and preced. */ +#define IP_TTL 4 /* int; IP time to live */ +#define IP_RECVOPTS 5 /* bool; receive all IP opts w/dgram */ +#define IP_RECVRETOPTS 6 /* bool; receive IP opts for response */ +#define IP_RECVDSTADDR 7 /* bool; receive IP dst addr w/dgram */ +#define IP_RETOPTS 8 /* ip_opts; set/get IP options */ +#define IP_MULTICAST_IF 9 /* u_char; set/get IP multicast i/f */ +#define IP_MULTICAST_TTL 10 /* u_char; set/get IP multicast ttl */ +#define IP_MULTICAST_LOOP 11 /* u_char; set/get IP multicast loopback */ +#define IP_ADD_MEMBERSHIP 12 /* ip_mreq; add an IP group membership */ +#define IP_DROP_MEMBERSHIP 13 /* ip_mreq; drop an IP group membership */ +#define IP_MULTICAST_VIF 14 /* set/get IP mcast virt. iface */ +#define IP_RSVP_ON 15 /* enable RSVP in kernel */ +#define IP_RSVP_OFF 16 /* disable RSVP in kernel */ +#define IP_RSVP_VIF_ON 17 /* set RSVP per-vif socket */ +#define IP_RSVP_VIF_OFF 18 /* unset RSVP per-vif socket */ +#define IP_PORTRANGE 19 /* int; range to choose for unspec port */ +#define IP_RECVIF 20 /* bool; receive reception if w/dgram */ +#define IP_IPSEC_POLICY 21 /* int; set/get security policy */ +#define IP_FAITH 22 /* bool; accept FAITH'ed connections */ +#define IP_STRIPHDR 23 /* bool: drop receive of raw IP header */ + + +#define IP_FW_ADD 50 /* add a firewall rule to chain */ +#define IP_FW_DEL 51 /* delete a firewall rule from chain */ +#define IP_FW_FLUSH 52 /* flush firewall rule chain */ +#define IP_FW_ZERO 53 /* clear single/all firewall counter(s) */ +#define IP_FW_GET 54 /* get entire firewall rule chain */ +#define IP_NAT 55 /* set/get NAT opts */ +#define IP_FW_RESETLOG 56 /* reset logging counters */ + +#define IP_DUMMYNET_CONFIGURE 60 /* add/configure a dummynet pipe */ +#define IP_DUMMYNET_DEL 61 /* delete a dummynet pipe from chain */ +#define IP_DUMMYNET_FLUSH 62 /* flush dummynet */ +#define IP_DUMMYNET_GET 64 /* get entire dummynet pipes */ + +/* + * Defaults and limits for options + */ +#define IP_DEFAULT_MULTICAST_TTL 1 /* normally limit m'casts to 1 hop */ +#define IP_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ +#define IP_MAX_MEMBERSHIPS 20 /* per socket */ + +/* + * Argument structure for IP_ADD_MEMBERSHIP and IP_DROP_MEMBERSHIP. + */ +struct ip_mreq { + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ +}; + +/* + * Argument for IP_PORTRANGE: + * - which range to search when port is unspecified at bind() or connect() + */ +#define IP_PORTRANGE_DEFAULT 0 /* default range */ +#define IP_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ +#define IP_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ + +/* + * Definitions for inet sysctl operations. + * + * Third level is protocol number. + * Fourth level is desired variable within that protocol. + */ +#define IPPROTO_MAXID (IPPROTO_ESP + 1) /* don't list to IPPROTO_MAX */ + +#define CTL_IPPROTO_NAMES { \ + { "ip", CTLTYPE_NODE }, \ + { "icmp", CTLTYPE_NODE }, \ + { "igmp", CTLTYPE_NODE }, \ + { "ggp", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "tcp", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { "egp", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "pup", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "udp", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "idp", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "ipsec", CTLTYPE_NODE }, \ +} + +/* + * Names for IP sysctl objects + */ +#define IPCTL_FORWARDING 1 /* act as router */ +#define IPCTL_SENDREDIRECTS 2 /* may send redirects when forwarding */ +#define IPCTL_DEFTTL 3 /* default TTL */ +#ifdef notyet +#define IPCTL_DEFMTU 4 /* default MTU */ +#endif +#define IPCTL_RTEXPIRE 5 /* cloned route expiration time */ +#define IPCTL_RTMINEXPIRE 6 /* min value for expiration time */ +#define IPCTL_RTMAXCACHE 7 /* trigger level for dynamic expire */ +#define IPCTL_SOURCEROUTE 8 /* may perform source routes */ +#define IPCTL_DIRECTEDBROADCAST 9 /* may re-broadcast received packets */ +#define IPCTL_INTRQMAXLEN 10 /* max length of netisr queue */ +#define IPCTL_INTRQDROPS 11 /* number of netisr q drops */ +#define IPCTL_STATS 12 /* ipstat structure */ +#define IPCTL_ACCEPTSOURCEROUTE 13 /* may accept source routed packets */ +#define IPCTL_FASTFORWARDING 14 /* use fast IP forwarding code */ +#define IPCTL_KEEPFAITH 15 +#define IPCTL_GIF_TTL 16 /* default TTL for gif encap packet */ +#define IPCTL_MAXID 17 + +#define IPCTL_NAMES { \ + { 0, 0 }, \ + { "forwarding", CTLTYPE_INT }, \ + { "redirect", CTLTYPE_INT }, \ + { "ttl", CTLTYPE_INT }, \ + { "mtu", CTLTYPE_INT }, \ + { "rtexpire", CTLTYPE_INT }, \ + { "rtminexpire", CTLTYPE_INT }, \ + { "rtmaxcache", CTLTYPE_INT }, \ + { "sourceroute", CTLTYPE_INT }, \ + { "directed-broadcast", CTLTYPE_INT }, \ + { "intr-queue-maxlen", CTLTYPE_INT }, \ + { "intr-queue-drops", CTLTYPE_INT }, \ + { "stats", CTLTYPE_STRUCT }, \ + { "accept_sourceroute", CTLTYPE_INT }, \ + { "fastforwarding", CTLTYPE_INT }, \ + { "keepfaith", CTLTYPE_INT }, \ + { "gifttl", CTLTYPE_INT }, \ +} + +/* INET6 stuff */ +#define __KAME_NETINET_IN_H_INCLUDED_ +#include +#undef __KAME_NETINET_IN_H_INCLUDED_ + +#ifdef KERNEL +struct ifnet; struct mbuf; /* forward declarations for Standard C */ + +int in_broadcast __P((struct in_addr, struct ifnet *)); +int in_canforward __P((struct in_addr)); +int in_cksum __P((struct mbuf *, int)); +int in_localaddr __P((struct in_addr)); +char *inet_ntoa __P((struct in_addr)); /* in libkern */ +u_long in_netof __P((struct in_addr)); +#endif /* KERNEL */ + +#endif diff --git a/bsd/netinet/in_bootp.c b/bsd/netinet/in_bootp.c new file mode 100644 index 000000000..025a1c379 --- /dev/null +++ b/bsd/netinet/in_bootp.c @@ -0,0 +1,640 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988-1999 Apple Computer, Inc. All Rights Reserved + */ + +/* + * bootp.c + * - be a BOOTP client over a particular interface to retrieve + * the IP address, netmask, and router + */ + +/* + * Modification History + * + * February 19, 1999 Dieter Siegmund (dieter@apple.com) + * - completely rewritten + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef BOOTP_DEBUG +#define dprintf(x) printf x; +#else BOOTP_DEBUG +#define dprintf(x) +#endif BOOTP_DEBUG + +/* ip address formatting macros */ +#define IP_FORMAT "%d.%d.%d.%d" +#define IP_CH(ip) ((u_char *)ip) +#define IP_LIST(ip) IP_CH(ip)[0],IP_CH(ip)[1],IP_CH(ip)[2],IP_CH(ip)[3] + +/* tag values (from RFC 2132) */ +#define TAG_PAD 0 +#define TAG_END 255 +#define TAG_SUBNET_MASK 1 +#define TAG_ROUTER 3 +#define RFC_OPTIONS_MAGIC { 99, 130, 83, 99 } +static unsigned char rfc_magic[4] = RFC_OPTIONS_MAGIC; + + +static struct sockaddr_in blank_sin = { sizeof(struct sockaddr_in), + AF_INET }; + +static __inline__ void +print_reply(struct bootp *bp, int bp_len) +{ + int i, j, len; + + printf("bp_op = "); + if (bp->bp_op == BOOTREQUEST) printf("BOOTREQUEST\n"); + else if (bp->bp_op == BOOTREPLY) printf("BOOTREPLY\n"); + else + { + i = bp->bp_op; + printf("%d\n", i); + } + + i = bp->bp_htype; + printf("bp_htype = %d\n", i); + + len = bp->bp_hlen; + printf("bp_hlen = %d\n", len); + + i = bp->bp_hops; + printf("bp_hops = %d\n", i); + + printf("bp_xid = %lu\n", bp->bp_xid); + + printf("bp_secs = %u\n", bp->bp_secs); + + printf("bp_ciaddr = " IP_FORMAT "\n", IP_LIST(&bp->bp_ciaddr)); + printf("bp_yiaddr = " IP_FORMAT "\n", IP_LIST(&bp->bp_yiaddr)); + printf("bp_siaddr = " IP_FORMAT "\n", IP_LIST(&bp->bp_siaddr)); + printf("bp_giaddr = " IP_FORMAT "\n", IP_LIST(&bp->bp_giaddr)); + + printf("bp_chaddr = "); + for (j = 0; j < len; j++) + { + i = bp->bp_chaddr[j]; + printf("%0x", i); + if (j < (len - 1)) printf(":"); + } + printf("\n"); + + printf("bp_sname = %s\n", bp->bp_sname); + printf("bp_file = %s\n", bp->bp_file); +} + +static __inline__ void +print_reply_short(struct bootp *bp, int bp_len) +{ + printf("bp_yiaddr = " IP_FORMAT "\n", IP_LIST(&bp->bp_yiaddr)); + printf("bp_sname = %s\n", bp->bp_sname); +} + + +static __inline__ long +random_range(long bottom, long top) +{ + long number = top - bottom + 1; + long range_size = LONG_MAX / number; + return (((long)random()) / range_size + bottom); +} + +/* + * Function: make_bootp_request + * Purpose: + * Create a "blank" bootp packet. + */ +static void +make_bootp_request(struct bootp_packet * pkt, + u_char * hwaddr, u_char hwtype, u_char hwlen) +{ + bzero(pkt, sizeof (*pkt)); + pkt->bp_ip.ip_v = IPVERSION; + pkt->bp_ip.ip_hl = sizeof (struct ip) >> 2; + pkt->bp_ip.ip_id = htons(ip_id++); + pkt->bp_ip.ip_ttl = MAXTTL; + pkt->bp_ip.ip_p = IPPROTO_UDP; + pkt->bp_ip.ip_src.s_addr = 0; + pkt->bp_ip.ip_dst.s_addr = htonl(INADDR_BROADCAST); + pkt->bp_udp.uh_sport = htons(IPPORT_BOOTPC); + pkt->bp_udp.uh_dport = htons(IPPORT_BOOTPS); + pkt->bp_udp.uh_sum = 0; + pkt->bp_bootp.bp_op = BOOTREQUEST; + pkt->bp_bootp.bp_htype = hwtype; + pkt->bp_bootp.bp_hlen = hwlen; + pkt->bp_bootp.bp_ciaddr.s_addr = 0; + bcopy(hwaddr, pkt->bp_bootp.bp_chaddr, hwlen); + bcopy(rfc_magic, pkt->bp_bootp.bp_vend, sizeof(rfc_magic)); + pkt->bp_bootp.bp_vend[4] = TAG_END; + pkt->bp_udp.uh_ulen = htons(sizeof(pkt->bp_udp) + sizeof(pkt->bp_bootp)); + pkt->bp_ip.ip_len = htons(sizeof(struct ip) + ntohs(pkt->bp_udp.uh_ulen)); + pkt->bp_ip.ip_sum = 0; + return; +} + +/* + * Function: ip_pkt_to_mbuf + * Purpose: + * Put the given IP packet into an mbuf, calculate the + * IP checksum. + */ +struct mbuf * +ip_pkt_to_mbuf(caddr_t pkt, int pktsize) +{ + struct ip * ip; + struct mbuf * m; + + m = (struct mbuf *)m_devget(pkt, pktsize, 0, 0, 0); + if (m == 0) { + printf("bootp: ip_pkt_to_mbuf: m_devget failed\n"); + return 0; + } + m->m_flags |= M_BCAST; + /* Compute the checksum */ + ip = mtod(m, struct ip *); + ip->ip_sum = 0; + ip->ip_sum = in_cksum(m, sizeof (struct ip)); + return (m); +} + +static __inline__ u_char * +link_address(struct sockaddr_dl * dl_p) +{ + return (dl_p->sdl_data + dl_p->sdl_nlen); +} + +static __inline__ void +link_print(struct sockaddr_dl * dl_p) +{ + int i; + +#if 0 + printf("len %d index %d family %d type 0x%x nlen %d alen %d" + " slen %d addr ", dl_p->sdl_len, + dl_p->sdl_index, dl_p->sdl_family, dl_p->sdl_type, + dl_p->sdl_nlen, dl_p->sdl_alen, dl_p->sdl_slen); +#endif 0 + for (i = 0; i < dl_p->sdl_alen; i++) + printf("%s%x", i ? ":" : "", + (link_address(dl_p))[i]); + printf("\n"); + return; +} + +static struct sockaddr_dl * +link_from_ifnet(struct ifnet * ifp) +{ + struct ifaddr * addr; + +/* for (addr = ifp->if_addrlist; addr; addr = addr->ifa_next) */ + + TAILQ_FOREACH(addr, &ifp->if_addrhead, ifa_link) { + if (addr->ifa_addr->sa_family == AF_LINK) { + struct sockaddr_dl * dl_p = (struct sockaddr_dl *)(addr->ifa_addr); + + return (dl_p); + } + } + return (NULL); +} + +/* + * Function: send_bootp_request + * Purpose: + * Send the request by calling the interface's output routine + * bypassing routing code. + */ +static int +send_bootp_request(struct ifnet * ifp, struct socket * so, + struct bootp_packet * pkt) +{ + struct mbuf * m; + struct sockaddr_in sin; + + /* Address to send to */ + sin = blank_sin; + sin.sin_port = htons(IPPORT_BOOTPS); + sin.sin_addr.s_addr = INADDR_BROADCAST; + + m = ip_pkt_to_mbuf((caddr_t)pkt, sizeof(*pkt)); + return (dlil_output((u_long) ifp, m, 0, (struct sockaddr *)&sin, 0)); +} + +/* + * Function: receive_packet + * Purpose: + * Return a received packet or an error if none available. + */ +int +receive_packet(struct socket * so, caddr_t pp, int psize) +{ + struct iovec aiov; + struct uio auio; + int rcvflg; + int error; + + aiov.iov_base = pp; + aiov.iov_len = psize; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_offset = 0; + auio.uio_resid = psize; + auio.uio_rw = UIO_READ; + rcvflg = MSG_WAITALL; + + error = soreceive(so, (struct sockaddr **) 0, &auio, 0, 0, &rcvflg); + return (error); +} + +/* + * Function: bootp_timeout + * Purpose: + * Wakeup the process waiting for something on a socket. + */ +static void +bootp_timeout(struct socket * * socketflag) +{ + struct socket * so = *socketflag; + boolean_t funnel_state; + + dprintf(("bootp: timeout\n")); + + funnel_state = thread_funnel_set(network_flock,TRUE); + *socketflag = NULL; + sowakeup(so, &so->so_rcv); + (void) thread_funnel_set(network_flock, FALSE); + return; +} + +#define TAG_OFFSET 0 +#define LEN_OFFSET 1 +#define OPTION_OFFSET 2 + +void * +packet_option(struct bootp * pkt, u_char t) +{ + void * buffer = pkt->bp_vend + sizeof(rfc_magic); + int len; + unsigned char option_len; + void * ret = NULL; + unsigned char * scan; + unsigned char tag = TAG_PAD; + + len = sizeof(pkt->bp_vend) - sizeof(rfc_magic); + for (scan = buffer; len > 0; ) { + tag = scan[TAG_OFFSET]; + if (tag == TAG_END) /* we hit the end of the options */ + break; + if (tag == TAG_PAD) { /* discard pad characters */ + scan++; + len--; + } + else { + if (t == tag && ret == NULL) + ret = scan + OPTION_OFFSET; + option_len = scan[LEN_OFFSET]; + len -= (option_len + 2); + scan += (option_len + 2); + } + } + if (len < 0 || tag != TAG_END) { /* we ran off the end */ + if (len < 0) { + dprintf(("bootp: error parsing options\n")); + } + else { + dprintf(("bootp: end tag missing\n")); + } + ret = NULL; + } + return (ret); +} + +/* + * Function: rate_packet + * Purpose: + * Return an integer point rating value for the given bootp packet. + * If yiaddr non-zero, the packet gets a rating of 1. + * Another point is given if the packet contains the subnet mask, + * and another if the router is present. + */ +#define GOOD_RATING 3 +static __inline__ int +rate_packet(struct bootp * pkt) +{ + int rating = 0; + + if (pkt->bp_yiaddr.s_addr) { + struct in_addr * ip; + + rating++; + ip = (struct in_addr *)packet_option(pkt, TAG_SUBNET_MASK); + if (ip) + rating++; + ip = (struct in_addr *)packet_option(pkt, TAG_ROUTER); + if (ip) + rating++; + } + return (rating); +} + +#define INITIAL_WAIT_SECS 4 +#define MAX_WAIT_SECS 64 +#define GATHER_TIME_SECS 2 +#define RAND_TICKS (hz) /* one second */ + +/* + * Function: bootp_loop + * Purpose: + * Do the actual BOOTP protocol. + * The algorithm sends out a packet, waits for a response. + * We try max_try times, waiting in an exponentially increasing + * amount of time. Once we receive a good response, we start + * a new time period called the "gather time", during which we + * either find the perfect packet (one that has ip, mask and router) + * or we continue to gather responses. At the end of the gather period, + * we use the best response gathered. + */ +static int +bootp_loop(struct socket * so, struct ifnet * ifp, int max_try, + struct in_addr * iaddr_p, struct in_addr * netmask_p, + struct in_addr * router_p) +{ + struct timeval current_time; + struct sockaddr_dl * dl_p; + int error = 0; + char * hwaddr; + int hwlen; + char hwtype = 0; + struct bootp_packet * request = NULL; + struct bootp * reply = NULL; + struct bootp * saved_reply = NULL; + struct timeval start_time; + u_long xid; + int retry; + struct socket * timeflag; + int wait_ticks = INITIAL_WAIT_SECS * hz; + + /* get the hardware address from the interface */ + dl_p = link_from_ifnet(ifp); + if (dl_p == NULL) { + printf("bootp: can't get link address\n"); + return (ENXIO); + } + + printf("bootp: h/w addr "); + link_print(dl_p); + + hwaddr = link_address(dl_p); + hwlen = dl_p->sdl_alen; + switch (dl_p->sdl_type) { + case IFT_ETHER: + hwtype = ARPHRD_ETHER; + break; + default: + printf("bootp: hardware type %d not supported\n", + dl_p->sdl_type); + panic("bootp: hardware type not supported"); + break; + } + + /* set transaction id and remember the start time */ + microtime(&start_time); + current_time = start_time; + xid = random(); + + /* make a request/reply packet */ + request = (struct bootp_packet *)kalloc(sizeof(*request)); + make_bootp_request(request, hwaddr, hwtype, hwlen); + reply = (struct bootp *)kalloc(sizeof(*reply)); + saved_reply = (struct bootp *)kalloc(sizeof(*saved_reply)); + iaddr_p->s_addr = 0; + printf("bootp: sending request"); + for (retry = 0; retry < max_try; retry++) { + int gather_count = 0; + int last_rating = 0; + + /* Send the request */ + printf("."); + request->bp_bootp.bp_secs = htons((u_short)(current_time.tv_sec + - start_time.tv_sec)); + request->bp_bootp.bp_xid = htonl(xid); + error = send_bootp_request(ifp, so, request); + if (error) + goto cleanup; + + timeflag = so; + wait_ticks += random_range(-RAND_TICKS, RAND_TICKS); + dprintf(("bootp: waiting %d ticks\n", wait_ticks)); + timeout(bootp_timeout, &timeflag, wait_ticks); + + while (TRUE) { + error = receive_packet(so, (caddr_t)reply, sizeof(*reply)); + if (error == 0) { + dprintf(("\nbootp: received packet\n")); + if (ntohl(reply->bp_xid) == xid + && reply->bp_yiaddr.s_addr + && bcmp(reply->bp_chaddr, hwaddr, hwlen) == 0) { + int rating; +#ifdef BOOTP_DEBUG + print_reply_short(reply, sizeof(*reply)); +#endif BOOTP_DEBUG + rating = rate_packet(reply); + if (rating > last_rating) + *saved_reply = *reply; + if (rating >= GOOD_RATING) { + untimeout(bootp_timeout, &timeflag); + goto save_values; + } + if (gather_count == 0) { + untimeout(bootp_timeout, &timeflag); + timeflag = so; + timeout(bootp_timeout, &timeflag, + hz * GATHER_TIME_SECS); + } + gather_count++; + } + else { + dprintf(("bootp: packet ignored\n")); + } + } + else if ((error != EWOULDBLOCK)) { + break; + } + else if (timeflag == NULL) { /* timed out */ + if (gather_count) { + dprintf(("bootp: gathering time has expired")); + goto save_values; /* we have a packet */ + } + break; /* retry */ + } + else + sbwait(&so->so_rcv); + } + if (error && (error != EWOULDBLOCK)) { + dprintf(("bootp: failed to receive packets: %d\n", error)); + untimeout(bootp_timeout, &timeflag); + goto cleanup; + } + wait_ticks *= 2; + if (wait_ticks > (MAX_WAIT_SECS * hz)) + wait_ticks = MAX_WAIT_SECS * hz; + xid++; + microtime(¤t_time); + } + error = ETIMEDOUT; + goto cleanup; + + save_values: + error = 0; + printf("\nbootp: got response from %s (" IP_FORMAT ")\n", + saved_reply->bp_sname, IP_LIST(&saved_reply->bp_siaddr)); + /* return the ip address */ + *iaddr_p = saved_reply->bp_yiaddr; + { + struct in_addr * ip; + ip = (struct in_addr *)packet_option(saved_reply, TAG_SUBNET_MASK); + if (ip) + *netmask_p = *ip; + ip = (struct in_addr *)packet_option(saved_reply, TAG_ROUTER); + if (ip) + *router_p = *ip; + } + + cleanup: + if (request) + kfree((caddr_t)request, sizeof (*request)); + if (reply) + kfree((caddr_t)reply, sizeof(*reply)); + if (saved_reply) + kfree((caddr_t)saved_reply, sizeof(*saved_reply)); + return (error); +} + +/* + * Routine: bootp + * Function: + * Use the BOOTP protocol to resolve what our IP address should be + * on a particular interface. + */ +int bootp(struct ifnet * ifp, struct in_addr * iaddr_p, int max_try, + struct in_addr * netmask_p, struct in_addr * router_p, + struct proc * procp) +{ + boolean_t addr_set = FALSE; + struct ifreq ifr; + int error; + struct socket * so = NULL; + + /* get a socket */ + error = socreate(AF_INET, &so, SOCK_DGRAM, 0); + if (error) { + dprintf(("bootp: socreate failed %d\n", error)); + return (error); + } + + /* assign the all-zeroes address */ + bzero(&ifr, sizeof(ifr)); + sprintf(ifr.ifr_name, "%s%d", ifp->if_name, ifp->if_unit); + *((struct sockaddr_in *)&ifr.ifr_addr) = blank_sin; + error = ifioctl(so, SIOCSIFADDR, (caddr_t)&ifr, procp); + if (error) { + dprintf(("bootp: SIOCSIFADDR all-zeroes IP failed: %d\n", + error)); + goto cleanup; + } + dprintf(("bootp: all-zeroes IP address assigned\n")); + addr_set = TRUE; + + { /* bind the socket */ + struct sockaddr_in * sin; + + sin = _MALLOC(sizeof(struct sockaddr_in), M_IFADDR, M_NOWAIT); + sin->sin_len = sizeof(struct sockaddr_in); + sin->sin_family = AF_INET; + sin->sin_port = htons(IPPORT_BOOTPC); + sin->sin_addr.s_addr = INADDR_ANY; + error = sobind(so, (struct sockaddr *) sin); + + FREE(sin, M_IFADDR); + if (error) { + dprintf(("bootp: sobind failed, %d\n", error)); + goto cleanup; + } + so->so_state |= SS_NBIO; + } + /* do the protocol */ + error = bootp_loop(so, ifp, max_try, iaddr_p, netmask_p, router_p); + + cleanup: + if (so) { + if (addr_set) { + (void) ifioctl(so, SIOCDIFADDR, (caddr_t) &ifr, procp); + } + soclose(so); + } + return (error); +} + +/* + * Function: in_bootp + * Purpose: + * This is deprecated API. Once SIOCAUTOADDR is eliminated from + * the system (IOEthernet class as well), this routine can be removed. + */ +int +in_bootp(struct ifnet * ifp, struct sockaddr_in * sin, u_char my_enaddr[6]) +{ + return (EOPNOTSUPP); +} diff --git a/bsd/netinet/in_cksum.c b/bsd/netinet/in_cksum.c new file mode 100644 index 000000000..4262277c9 --- /dev/null +++ b/bsd/netinet/in_cksum.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include + +#define DBG_FNC_IN_CKSUM NETDBG_CODE(DBG_NETIP, (3 << 8)) + +/* + * Checksum routine for Internet Protocol family headers (Portable Version). + * + * This routine is very heavily used in the network + * code and should be modified for each CPU to be as fast as possible. + */ + + + +#if defined(ppc) + +int +in_cksum(m, len) + register struct mbuf *m; + register int len; +{ + register u_short *w; + register int sum = 0; + register int mlen = 0; + int starting_on_odd = 0; + + + KERNEL_DEBUG(DBG_FNC_IN_CKSUM | DBG_FUNC_START, len,0,0,0,0); + + for (;m && len; m = m->m_next) { + if (m->m_len == 0) + continue; + mlen = m->m_len; + w = mtod(m, u_short *); + + if (len < mlen) + mlen = len; + + sum = xsum_assym(w, mlen, sum, starting_on_odd); + len -= mlen; + if (mlen & 0x1) + { + if (starting_on_odd) + starting_on_odd = 0; + else + starting_on_odd = 1; + } + } + + KERNEL_DEBUG(DBG_FNC_IN_CKSUM | DBG_FUNC_END, 0,0,0,0,0); + return (~sum & 0xffff); +} + +#else + + +#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x) +#define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);} + + + +int +in_cksum(m, len) + register struct mbuf *m; + register int len; +{ + register u_short *w; + register int sum = 0; + register int mlen = 0; + int byte_swapped = 0; + + union { + char c[2]; + u_short s; + } s_util; + union { + u_short s[2]; + long l; + } l_util; + + KERNEL_DEBUG(DBG_FNC_IN_CKSUM | DBG_FUNC_START, len,0,0,0,0); + + for (;m && len; m = m->m_next) { + if (m->m_len == 0) + continue; + w = mtod(m, u_short *); + if (mlen == -1) { + /* + * The first byte of this mbuf is the continuation + * of a word spanning between this mbuf and the + * last mbuf. + * + * s_util.c[0] is already saved when scanning previous + * mbuf. + */ + s_util.c[1] = *(char *)w; + sum += s_util.s; + w = (u_short *)((char *)w + 1); + mlen = m->m_len - 1; + len--; + } else + mlen = m->m_len; + if (len < mlen) + mlen = len; + len -= mlen; + /* + * Force to even boundary. + */ + if ((1 & (int) w) && (mlen > 0)) { + REDUCE; + sum <<= 8; + s_util.c[0] = *(u_char *)w; + w = (u_short *)((char *)w + 1); + mlen--; + byte_swapped = 1; + } + /* + * Unroll the loop to make overhead from + * branches &c small. + */ + while ((mlen -= 32) >= 0) { + sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; + sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; + sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11]; + sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15]; + w += 16; + } + mlen += 32; + while ((mlen -= 8) >= 0) { + sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; + w += 4; + } + mlen += 8; + if (mlen == 0 && byte_swapped == 0) + continue; + REDUCE; + while ((mlen -= 2) >= 0) { + sum += *w++; + } + if (byte_swapped) { + REDUCE; + sum <<= 8; + byte_swapped = 0; + if (mlen == -1) { + s_util.c[1] = *(char *)w; + sum += s_util.s; + mlen = 0; + } else + mlen = -1; + } else if (mlen == -1) + s_util.c[0] = *(char *)w; + } + if (len) + printf("cksum: out of data\n"); + if (mlen == -1) { + /* The last mbuf has odd # of bytes. Follow the + standard (the odd byte may be shifted left by 8 bits + or not as determined by endian-ness of the machine) */ + s_util.c[1] = 0; + sum += s_util.s; + } + REDUCE; + KERNEL_DEBUG(DBG_FNC_IN_CKSUM | DBG_FUNC_END, 0,0,0,0,0); + return (~sum & 0xffff); +} + +#endif diff --git a/bsd/netinet/in_gif.c b/bsd/netinet/in_gif.c new file mode 100644 index 000000000..f57a3ea28 --- /dev/null +++ b/bsd/netinet/in_gif.c @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: in_gif.c,v 1.27 2000/03/30 01:29:05 jinmei Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * in_gif.c + */ +#if BSD310 +#include "opt_mrouting.h" +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include "opt_inet.h" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#ifdef __FreeBSD__ +#include +#include +#endif +#if !defined(__FreeBSD__) || __FreeBSD__ < 3 +#include +#endif +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#include +#endif + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#if INET6 +#include +#endif + +#if MROUTING +#include +#endif /* MROUTING */ + +#include + +#include "gif.h" + +#include + +#if NGIF > 0 +int ip_gif_ttl = GIF_TTL; +#else +int ip_gif_ttl = 0; +#endif + +extern struct protosw in_gif_protosw; + +SYSCTL_INT(_net_inet_ip, IPCTL_GIF_TTL, gifttl, + CTLFLAG_RW, &ip_gif_ttl , 0, ""); + +int +in_gif_output(ifp, family, m, rt) + struct ifnet *ifp; + int family; + struct mbuf *m; + struct rtentry *rt; +{ + register struct gif_softc *sc = (struct gif_softc*)ifp; + struct sockaddr_in *dst = (struct sockaddr_in *)&sc->gif_ro.ro_dst; + struct sockaddr_in *sin_src = (struct sockaddr_in *)sc->gif_psrc; + struct sockaddr_in *sin_dst = (struct sockaddr_in *)sc->gif_pdst; + struct ip iphdr; /* capsule IP header, host byte ordered */ + int proto, error; + u_int8_t tos; + + if (sin_src == NULL || sin_dst == NULL || + sin_src->sin_family != AF_INET || + sin_dst->sin_family != AF_INET) { + printf("in_gif_output: unknown family src=%x dst=%x\n", sin_src->sin_family, sin_dst->sin_family); + m_freem(m); + return EAFNOSUPPORT; + } + + switch (family) { +#if INET + case AF_INET: + { + struct ip *ip; + + proto = IPPROTO_IPV4; + if (m->m_len < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) + return ENOBUFS; + } + ip = mtod(m, struct ip *); + tos = ip->ip_tos; + break; + } +#endif /*INET*/ +#if INET6 + case AF_INET6: + { + struct ip6_hdr *ip6; + proto = IPPROTO_IPV6; + if (m->m_len < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) + return ENOBUFS; + } + ip6 = mtod(m, struct ip6_hdr *); + tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; + break; + } +#endif /*INET6*/ + default: +#if DEBUG + printf("in_gif_output: warning: unknown family %d passed\n", + family); +#endif + m_freem(m); + return EAFNOSUPPORT; + } + + bzero(&iphdr, sizeof(iphdr)); + iphdr.ip_src = sin_src->sin_addr; + if (ifp->if_flags & IFF_LINK0) { + /* multi-destination mode */ + if (sin_dst->sin_addr.s_addr != INADDR_ANY) + iphdr.ip_dst = sin_dst->sin_addr; + else if (rt) { + if (family != AF_INET) { + m_freem(m); + return EINVAL; /*XXX*/ + } + iphdr.ip_dst = ((struct sockaddr_in *) + (rt->rt_gateway))->sin_addr; + } else { + m_freem(m); + return ENETUNREACH; + } + } else { + /* bidirectional configured tunnel mode */ + if (sin_dst->sin_addr.s_addr != INADDR_ANY) + iphdr.ip_dst = sin_dst->sin_addr; + else { + m_freem(m); + return ENETUNREACH; + } + } + iphdr.ip_p = proto; + /* version will be set in ip_output() */ + iphdr.ip_ttl = ip_gif_ttl; + iphdr.ip_len = m->m_pkthdr.len + sizeof(struct ip); + if (ifp->if_flags & IFF_LINK1) + ip_ecn_ingress(ECN_ALLOWED, &iphdr.ip_tos, &tos); + + /* prepend new IP header */ + M_PREPEND(m, sizeof(struct ip), M_DONTWAIT); + if (m && m->m_len < sizeof(struct ip)) + m = m_pullup(m, sizeof(struct ip)); + if (m == NULL) { + printf("ENOBUFS in in_gif_output %d\n", __LINE__); + return ENOBUFS; + } + + *(mtod(m, struct ip *)) = iphdr; + + if (dst->sin_family != sin_dst->sin_family || + dst->sin_addr.s_addr != sin_dst->sin_addr.s_addr) { + /* cache route doesn't match */ + dst->sin_family = sin_dst->sin_family; + dst->sin_len = sizeof(struct sockaddr_in); + dst->sin_addr = sin_dst->sin_addr; + if (sc->gif_ro.ro_rt) { + RTFREE(sc->gif_ro.ro_rt); + sc->gif_ro.ro_rt = NULL; + } +#if 0 + sc->gif_if.if_mtu = GIF_MTU; +#endif + } + + if (sc->gif_ro.ro_rt == NULL) { + rtalloc(&sc->gif_ro); + if (sc->gif_ro.ro_rt == NULL) { + m_freem(m); + return ENETUNREACH; + } +#if 0 + ifp->if_mtu = sc->gif_ro.ro_rt->rt_ifp->if_mtu + - sizeof(struct ip); +#endif + } + +#ifndef __OpenBSD__ + error = ip_output(m, NULL, &sc->gif_ro, 0, NULL); +#else + error = ip_output(m, NULL, &sc->gif_ro, 0, NULL, NULL); +#endif + return(error); +} + +void +in_gif_input(m, off) + struct mbuf *m; + int off; +{ + struct gif_softc *sc; + struct ifnet *gifp = NULL; + struct ip *ip; + int i, af, proto; + u_int8_t otos; + + if (gif == NULL) { + m_freem(m); + return; + } + + ip = mtod(m, struct ip *); + proto = ip->ip_p; + +#if 0 + /* this code will be soon improved. */ +#define satosin(sa) ((struct sockaddr_in *)(sa)) + for (i = 0, sc = gif; i < ngif; i++, sc++) { + if (sc->gif_psrc == NULL + || sc->gif_pdst == NULL + || sc->gif_psrc->sa_family != AF_INET + || sc->gif_pdst->sa_family != AF_INET) { + continue; + } + + if ((sc->gif_if.if_flags & IFF_UP) == 0) + continue; + + if ((sc->gif_if.if_flags & IFF_LINK0) + && satosin(sc->gif_psrc)->sin_addr.s_addr == ip->ip_dst.s_addr + && satosin(sc->gif_pdst)->sin_addr.s_addr == INADDR_ANY) { + gifp = &sc->gif_if; + continue; + } + + if (satosin(sc->gif_psrc)->sin_addr.s_addr == ip->ip_dst.s_addr + && satosin(sc->gif_pdst)->sin_addr.s_addr == ip->ip_src.s_addr) + { + gifp = &sc->gif_if; + break; + } + } +#else + gifp = (struct ifnet *)encap_getarg(m); +#endif + + if (gifp == NULL) { + /* for backward compatibility */ + if (proto == IPPROTO_IPV4) { +#ifdef __OpenBSD__ +#if defined(MROUTING) || defined(IPSEC) + ip4_input(m, off, proto); + return; +#endif +#else +#if MROUTING + ipip_input(m, off); + return; +#endif /*MROUTING*/ +#endif + } + m_freem(m); + ipstat.ips_nogif++; + return; + } + + if ((gifp->if_flags & IFF_UP) == 0) { + m_freem(m); + ipstat.ips_nogif++; + return; + } + + otos = ip->ip_tos; + m_adj(m, off); + + switch (proto) { +#if INET + case IPPROTO_IPV4: + { + struct ip *ip; + af = AF_INET; + if (m->m_len < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) + return; + } + ip = mtod(m, struct ip *); + if (gifp->if_flags & IFF_LINK1) + ip_ecn_egress(ECN_ALLOWED, &otos, &ip->ip_tos); + break; + } +#endif +#if INET6 + case IPPROTO_IPV6: + { + struct ip6_hdr *ip6; + u_int8_t itos; + af = AF_INET6; + if (m->m_len < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) + return; + } + ip6 = mtod(m, struct ip6_hdr *); + itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; + if (gifp->if_flags & IFF_LINK1) + ip_ecn_egress(ECN_ALLOWED, &otos, &itos); + ip6->ip6_flow &= ~htonl(0xff << 20); + ip6->ip6_flow |= htonl((u_int32_t)itos << 20); + break; + } +#endif /* INET6 */ + default: + ipstat.ips_nogif++; + m_freem(m); + return; + } + gif_input(m, af, gifp); + return; +} + +int +in_gif_ioctl(ifp, cmd, data) + struct ifnet *ifp; +#if defined(__FreeBSD__) && __FreeBSD__ < 3 + int cmd; +#else + u_long cmd; +#endif + caddr_t data; +{ + struct gif_softc *sc = (struct gif_softc*)ifp; + struct ifreq *ifr = (struct ifreq*)data; + int error = 0, size; + struct sockaddr *sa, *dst, *src; + const struct encaptab *p; + struct sockaddr_in smask4, dmask4; + + switch (cmd) { + case SIOCSIFFLAGS: + /* + * whenever we change our idea about multi-destination mode + * we need to update encap attachment. + */ + if (((ifp->if_flags ^ sc->gif_oflags) & IFF_LINK0) == 0) + break; + if (sc->gif_psrc == NULL || sc->gif_pdst == NULL || + sc->gif_psrc->sa_family != sc->gif_pdst->sa_family) + break; + bzero(&smask4, sizeof(smask4)); + smask4.sin_addr.s_addr = ~0; + dmask4 = smask4; + if ((ifp->if_flags & IFF_LINK0) != 0 && + ((struct sockaddr_in *)dst)->sin_addr.s_addr == + INADDR_ANY) { + bzero(&dmask4, sizeof(dmask4)); + } + p = encap_attach(sc->gif_psrc->sa_family, -1, sc->gif_psrc, + (struct sockaddr *)&smask4, sc->gif_pdst, + (struct sockaddr *)&dmask4, + (struct protosw *)&in_gif_protosw, &sc->gif_if); + if (p == NULL) { + error = EINVAL; + goto bad; + } + if (sc->encap_cookie != NULL) + (void)encap_detach(sc->encap_cookie); + sc->encap_cookie = p; + sc->gif_oflags = ifp->if_flags; + + break; + + case SIOCSIFPHYADDR: + switch (ifr->ifr_addr.sa_family) { + case AF_INET: + src = (struct sockaddr *) + &(((struct in_aliasreq *)data)->ifra_addr); + dst = (struct sockaddr *) + &(((struct in_aliasreq *)data)->ifra_dstaddr); + + bzero(&smask4, sizeof(smask4)); + smask4.sin_addr.s_addr = ~0; + dmask4 = smask4; + if ((ifp->if_flags & IFF_LINK0) != 0 && + ((struct sockaddr_in *)dst)->sin_addr.s_addr == + INADDR_ANY) { + bzero(&dmask4, sizeof(dmask4)); + } + size = sizeof(struct sockaddr_in); + break; + default: + error = EAFNOSUPPORT; + goto bad; + } + + if (sc->encap_cookie) + (void)encap_detach(sc->encap_cookie); + if (sc->gif_psrc != NULL) { + _FREE((caddr_t)sc->gif_psrc, M_IFADDR); + sc->gif_psrc = NULL; + } + if (sc->gif_pdst != NULL) { + _FREE((caddr_t)sc->gif_pdst, M_IFADDR); + sc->gif_pdst = NULL; + } + + p = encap_attach(ifr->ifr_addr.sa_family, -1, src, + (struct sockaddr *)&smask4, dst, + (struct sockaddr *)&dmask4, + (struct protosw *)&in_gif_protosw, &sc->gif_if); + if (p == NULL) { + error = EINVAL; + goto bad; + } + sc->encap_cookie = p; + sc->gif_oflags = ifp->if_flags; + + sa = (struct sockaddr *)_MALLOC(size, M_IFADDR, M_WAITOK); + bcopy((caddr_t)src, (caddr_t)sa, size); + sc->gif_psrc = sa; + + sa = (struct sockaddr *)_MALLOC(size, M_IFADDR, M_WAITOK); + bcopy((caddr_t)dst, (caddr_t)sa, size); + sc->gif_pdst = sa; + + ifp->if_flags |= IFF_UP; + if_up(ifp); /* send up RTM_IFINFO */ + + error = 0; + break; + default: + error = EINVAL; + goto bad; + } + + bad: + return error; +} diff --git a/bsd/netinet/in_gif.h b/bsd/netinet/in_gif.h new file mode 100644 index 000000000..8a1906d27 --- /dev/null +++ b/bsd/netinet/in_gif.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: in_gif.h,v 1.3 2000/02/22 14:01:59 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET_IN_GIF_H_ +#define _NETINET_IN_GIF_H_ + +#define GIF_TTL 30 + +extern int ip_gif_ttl; + +void in_gif_input __P((struct mbuf *, int)); +int in_gif_output __P((struct ifnet *, int, struct mbuf *, struct rtentry *)); +#if defined(__FreeBSD__) && __FreeBSD__ < 3 +int in_gif_ioctl __P((struct ifnet *, int, caddr_t)); +#else +int in_gif_ioctl __P((struct ifnet *, u_long, caddr_t)); +#endif + +#endif /*_NETINET_IN_GIF_H_*/ diff --git a/bsd/netinet/in_hostcache.c b/bsd/netinet/in_hostcache.c new file mode 100644 index 000000000..e791f1598 --- /dev/null +++ b/bsd/netinet/in_hostcache.c @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * Manage the IP per-host cache (really a thin veneer over the generic + * per-host cache code). + */ + +/* Look up an entry -- can be called from interrupt context. */ +struct in_hcentry * +inhc_lookup(struct sockaddr_in *sin) +{ + struct hcentry *hc; + + hc = hc_get((struct sockaddr *)sin); + return ((struct in_hcentry *)hc); +} + +/* Look up and possibly create an entry -- must be called from user mode. */ +struct in_hcentry * +inhc_alloc(struct sockaddr_in *sin) +{ + struct in_hcentry *inhc; + struct rtentry *rt; + int error; + /* xxx mutual exclusion for smp */ + + inhc = inhc_lookup(sin); + if (inhc != 0) + return inhc; + + rt = rtalloc1(inhc->inhc_hc.hc_host, 1, 0); + if (rt == 0) + return 0; + + MALLOC(inhc, struct in_hcentry *, sizeof *inhc, M_HOSTCACHE, M_WAITOK); + bzero(inhc, sizeof *inhc); + inhc->inhc_hc.hc_host = dup_sockaddr((struct sockaddr *)sin, 1); + if (in_broadcast(sin->sin_addr, rt->rt_ifp)) + inhc->inhc_flags |= INHC_BROADCAST; + else if (((struct sockaddr_in *)rt->rt_ifa->ifa_addr)->sin_addr.s_addr + == sin->sin_addr.s_addr) + inhc->inhc_flags |= INHC_LOCAL; + else if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) + inhc->inhc_flags |= INHC_MULTICAST; + inhc->inhc_pmtu = rt->rt_rmx.rmx_mtu; + inhc->inhc_recvpipe = rt->rt_rmx.rmx_recvpipe; + inhc->inhc_sendpipe = rt->rt_rmx.rmx_sendpipe; + inhc->inhc_ssthresh = rt->rt_rmx.rmx_ssthresh; + if (rt->rt_rmx.rmx_locks & RTV_RTT) + inhc->inhc_rttmin = rt->rt_rmx.rmx_rtt + / (RTM_RTTUNIT / TCP_RTT_SCALE); + inhc->inhc_hc.hc_rt = rt; + error = hc_insert(&inhc->inhc_hc); + if (error != 0) { + RTFREE(rt); + FREE(inhc, M_HOSTCACHE); + return 0; + } + /* + * We don't return the structure directly because hc_get() needs + * to be allowed to do its own processing. + */ + return (inhc_lookup(sin)); +} + +/* + * This is Van Jacobson's hash function for IPv4 addresses. + * It is designed to work with a power-of-two-sized hash table. + */ +static u_long +inhc_hash(struct sockaddr *sa, u_long nbuckets) +{ + u_long ip; + + ip = ((struct sockaddr_in *)sa)->sin_addr.s_addr; + return ((ip ^ (ip >> 23) ^ (ip >> 17)) & ~(nbuckets - 1)); +} + +/* + * We don't need to do any special work... if there are no references, + * as the caller has already ensured, then it's OK to kill. + */ +static int +inhc_delete(struct hcentry *hc) +{ + return 0; +} + +/* + * Return the next increment for the number of buckets in the hash table. + * Zero means ``do not bump''. + */ +static u_long +inhc_bump(u_long oldsize) +{ + if (oldsize < 512) + return (oldsize << 1); + return 0; +} + +static struct hccallback inhc_cb = { + inhc_hash, inhc_delete, inhc_bump +}; + +int +inhc_init(void) +{ + + return (hc_init(AF_INET, &inhc_cb, 128, 0)); +} + diff --git a/bsd/netinet/in_hostcache.h b/bsd/netinet/in_hostcache.h new file mode 100644 index 000000000..0ee6ca532 --- /dev/null +++ b/bsd/netinet/in_hostcache.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1997 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef _NETINET_IN_HOSTCACHE_H +#define _NETINET_IN_HOSTCACHE_H 1 + +/* + * This file defines the particular structures contained in the host cache + * for the use of IP. + */ + +/* + * An IP host cache entry. Note that we include the srtt/var here, + * with the expectation that it might be used to keep a persistent, + * cross-connection view of this statistic. + */ +struct in_hcentry { + struct hcentry inhc_hc; + u_long inhc_pmtu; + u_long inhc_recvpipe; + u_long inhc_sendpipe; + u_long inhc_pksent; + u_long inhc_flags; + u_long inhc_ssthresh; + int inhc_srtt; /* VJ RTT estimator */ + int inhc_srttvar; /* VJ */ + u_int inhc_rttmin; /* VJ */ + int inhc_rxt; /* TCP retransmit timeout */ + u_long inhc_cc; /* deliberate type pun with tcp_cc */ + u_long inhc_ccsent; /* as above */ + u_short inhc_mssopt; +}; + +#define inhc_addr(inhc) ((struct sockaddr_in *)(inhc)->inhc_hc.hc_host) + +/* Flags for inhc_flags... */ +#define INHC_LOCAL 0x0001 /* this address is local */ +#define INHC_BROADCAST 0x0002 /* this address is broadcast */ +#define INHC_MULTICAST 0x0004 /* this address is multicast */ +#define INHC_REDUCEDMTU 0x0008 /* we reduced the mtu via PMTU discovery */ + +#ifdef KERNEL +/* + * inhc_alloc can block while adding a new entry to the cache; + * inhc_lookup will does not add new entries and so can be called + * in non-process context. + */ +struct in_hcentry *inhc_alloc(struct sockaddr_in *sin); +int inhc_init(void); +struct in_hcentry *inhc_lookup(struct sockaddr_in *sin); +#define inhc_ref(inhc) (hc_ref(&(inhc)->inhc_hc)) +#define inhc_rele(inhc) (hc_rele(&(inhc)->inhc_hc)) +#endif /* KERNEL */ + +#endif /* _NETINET_IN_HOSTCACHE_H */ diff --git a/bsd/netinet/in_pcb.c b/bsd/netinet/in_pcb.c new file mode 100644 index 000000000..ab4f2fc3d --- /dev/null +++ b/bsd/netinet/in_pcb.c @@ -0,0 +1,1369 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1991, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_pcb.c 8.4 (Berkeley) 5/24/95 + */ + +#include +#include +#include +#include +#if INET6 +#include +#endif +#include +#include +#include +#include +#include +#include + +#include + +#if ISFB31 +#include +#else +#include +#endif + +#include +#include +#include + +#include +#include +#include +#include +#if INET6 +#include +#include +#endif /* INET6 */ + +#include "faith.h" + +#if IPSEC +#include +#include +#include +#endif /* IPSEC */ + +#include + + +#define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8)) +#define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1)) + +struct in_addr zeroin_addr; + +void in_pcbremlists __P((struct inpcb *)); +static void in_rtchange __P((struct inpcb *, int)); + + +/* + * These configure the range of local port addresses assigned to + * "unspecified" outgoing connections/packets/whatever. + */ +int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */ +int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */ +int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */ +int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */ +int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */ +int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */ + +#define RANGECHK(var, min, max) \ + if ((var) < (min)) { (var) = (min); } \ + else if ((var) > (max)) { (var) = (max); } + + +static int +sysctl_net_ipport_check SYSCTL_HANDLER_ARGS +{ + int error = sysctl_handle_int(oidp, + oidp->oid_arg1, oidp->oid_arg2, req); + if (!error) { + RANGECHK(ipport_lowfirstauto, 1, IPPORT_RESERVED - 1); + RANGECHK(ipport_lowlastauto, 1, IPPORT_RESERVED - 1); + RANGECHK(ipport_firstauto, IPPORT_RESERVED, USHRT_MAX); + RANGECHK(ipport_lastauto, IPPORT_RESERVED, USHRT_MAX); + RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX); + RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX); + } + return error; +} + +#undef RANGECHK + +SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, CTLFLAG_RW, 0, "IP Ports"); + +SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst, CTLTYPE_INT|CTLFLAG_RW, + &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", ""); +SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast, CTLTYPE_INT|CTLFLAG_RW, + &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", ""); +SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first, CTLTYPE_INT|CTLFLAG_RW, + &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", ""); +SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last, CTLTYPE_INT|CTLFLAG_RW, + &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", ""); +SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst, CTLTYPE_INT|CTLFLAG_RW, + &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", ""); +SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast, CTLTYPE_INT|CTLFLAG_RW, + &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", ""); + +/* + * in_pcb.c: manage the Protocol Control Blocks. + * + * NOTE: It is assumed that most of these functions will be called at + * splnet(). XXX - There are, unfortunately, a few exceptions to this + * rule that should be fixed. + */ + +/* + * Allocate a PCB and associate it with the socket. + */ +int +in_pcballoc(so, pcbinfo, p) + struct socket *so; + struct inpcbinfo *pcbinfo; + struct proc *p; +{ + register struct inpcb *inp; + caddr_t temp; + + if (so->cached_in_sock_layer == 0) { +#if TEMPDEBUG + printf("PCBALLOC calling zalloc for socket %x\n", so); +#endif + inp = (struct inpcb *) zalloc(pcbinfo->ipi_zone); + if (inp == NULL) + return (ENOBUFS); + bzero((caddr_t)inp, sizeof(*inp)); + } + else { +#if TEMPDEBUG + printf("PCBALLOC reusing PCB for socket %x\n", so); +#endif + inp = (struct inpcb *) so->so_saved_pcb; + temp = inp->inp_saved_ppcb; + bzero((caddr_t) inp, sizeof(*inp)); + inp->inp_saved_ppcb = temp; + } + + inp->inp_gencnt = ++pcbinfo->ipi_gencnt; + inp->inp_pcbinfo = pcbinfo; + inp->inp_socket = so; + LIST_INSERT_HEAD(pcbinfo->listhead, inp, inp_list); + pcbinfo->ipi_count++; + so->so_pcb = (caddr_t)inp; + return (0); +} + +int +in_pcbbind(inp, nam, p) + register struct inpcb *inp; + struct sockaddr *nam; + struct proc *p; +{ + register struct socket *so = inp->inp_socket; + u_short *lastport; + struct sockaddr_in *sin; + struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; + u_short lport = 0; + int wild = 0, reuseport = (so->so_options & SO_REUSEPORT); + int error; + + if (TAILQ_EMPTY(&in_ifaddrhead)) /* XXX broken! */ + return (EADDRNOTAVAIL); + if (inp->inp_lport || inp->inp_laddr.s_addr != INADDR_ANY) + return (EINVAL); + if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) + wild = 1; + if (nam) { + sin = (struct sockaddr_in *)nam; + if (nam->sa_len != sizeof (*sin)) + return (EINVAL); +#ifdef notdef + /* + * We should check the family, but old programs + * incorrectly fail to initialize it. + */ + if (sin->sin_family != AF_INET) + return (EAFNOSUPPORT); +#endif + lport = sin->sin_port; + if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { + /* + * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; + * allow complete duplication of binding if + * SO_REUSEPORT is set, or if SO_REUSEADDR is set + * and a multicast address is bound on both + * new and duplicated sockets. + */ + if (so->so_options & SO_REUSEADDR) + reuseport = SO_REUSEADDR|SO_REUSEPORT; + } else if (sin->sin_addr.s_addr != INADDR_ANY) { + sin->sin_port = 0; /* yech... */ + if (ifa_ifwithaddr((struct sockaddr *)sin) == 0) + return (EADDRNOTAVAIL); + } + if (lport) { + struct inpcb *t; + + /* GROSS */ + if (ntohs(lport) < IPPORT_RESERVED && p && + suser(p->p_ucred, &p->p_acflag)) + return (EACCES); + if (so->so_uid && + !IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { + t = in_pcblookup_local(inp->inp_pcbinfo, + sin->sin_addr, lport, INPLOOKUP_WILDCARD); + if (t && + (ntohl(sin->sin_addr.s_addr) != INADDR_ANY || + ntohl(t->inp_laddr.s_addr) != INADDR_ANY || + (t->inp_socket->so_options & + SO_REUSEPORT) == 0) && + (so->so_uid != t->inp_socket->so_uid)) { +#if INET6 + if (ip6_mapped_addr_on == 0 || + ntohl(sin->sin_addr.s_addr) != + INADDR_ANY || + ntohl(t->inp_laddr.s_addr) != + INADDR_ANY || + INP_SOCKAF(so) == + INP_SOCKAF(t->inp_socket)) +#endif + return (EADDRINUSE); + } + } + t = in_pcblookup_local(pcbinfo, sin->sin_addr, + lport, wild); + if (t && + (reuseport & t->inp_socket->so_options) == 0) { +#if INET6 + if (ip6_mapped_addr_on == 0 || + ntohl(sin->sin_addr.s_addr) != + INADDR_ANY || + ntohl(t->inp_laddr.s_addr) != + INADDR_ANY || + INP_SOCKAF(so) == + INP_SOCKAF(t->inp_socket)) +#endif + return (EADDRINUSE); + } + } + inp->inp_laddr = sin->sin_addr; + } + if (lport == 0) { + u_short first, last; + int count; + + inp->inp_flags |= INP_ANONPORT; + + if (inp->inp_flags & INP_HIGHPORT) { + first = ipport_hifirstauto; /* sysctl */ + last = ipport_hilastauto; + lastport = &pcbinfo->lasthi; + } else if (inp->inp_flags & INP_LOWPORT) { + if (p && (error = suser(p->p_ucred, &p->p_acflag))) + return error; + first = ipport_lowfirstauto; /* 1023 */ + last = ipport_lowlastauto; /* 600 */ + lastport = &pcbinfo->lastlow; + } else { + first = ipport_firstauto; /* sysctl */ + last = ipport_lastauto; + lastport = &pcbinfo->lastport; + } + /* + * Simple check to ensure all ports are not used up causing + * a deadlock here. + * + * We split the two cases (up and down) so that the direction + * is not being tested on each round of the loop. + */ + if (first > last) { + /* + * counting down + */ + count = first - last; + + do { + if (count-- < 0) { /* completely used? */ + /* + * Undo any address bind that may have + * occurred above. + */ + inp->inp_laddr.s_addr = INADDR_ANY; + return (EAGAIN); + } + --*lastport; + if (*lastport > first || *lastport < last) + *lastport = first; + lport = htons(*lastport); + } while (in_pcblookup_local(pcbinfo, + inp->inp_laddr, lport, wild)); + } else { + /* + * counting up + */ + count = last - first; + + do { + if (count-- < 0) { /* completely used? */ + /* + * Undo any address bind that may have + * occurred above. + */ + inp->inp_laddr.s_addr = INADDR_ANY; + return (EAGAIN); + } + ++*lastport; + if (*lastport < first || *lastport > last) + *lastport = first; + lport = htons(*lastport); + } while (in_pcblookup_local(pcbinfo, + inp->inp_laddr, lport, wild)); + } + } + inp->inp_lport = lport; + if (in_pcbinshash(inp) != 0) { + inp->inp_laddr.s_addr = INADDR_ANY; + inp->inp_lport = 0; + return (EAGAIN); + } + return (0); +} + +/* + * Transform old in_pcbconnect() into an inner subroutine for new + * in_pcbconnect(): Do some validity-checking on the remote + * address (in mbuf 'nam') and then determine local host address + * (i.e., which interface) to use to access that remote host. + * + * This preserves definition of in_pcbconnect(), while supporting a + * slightly different version for T/TCP. (This is more than + * a bit of a kludge, but cleaning up the internal interfaces would + * have forced minor changes in every protocol). + */ + +int +in_pcbladdr(inp, nam, plocal_sin) + register struct inpcb *inp; + struct sockaddr *nam; + struct sockaddr_in **plocal_sin; +{ + struct in_ifaddr *ia; + register struct sockaddr_in *sin = (struct sockaddr_in *)nam; + + if (nam->sa_len != sizeof (*sin)) + return (EINVAL); + if (sin->sin_family != AF_INET) + return (EAFNOSUPPORT); + if (sin->sin_port == 0) + return (EADDRNOTAVAIL); + if (!TAILQ_EMPTY(&in_ifaddrhead)) { + /* + * If the destination address is INADDR_ANY, + * use the primary local address. + * If the supplied address is INADDR_BROADCAST, + * and the primary interface supports broadcast, + * choose the broadcast address for that interface. + */ +#define satosin(sa) ((struct sockaddr_in *)(sa)) +#define sintosa(sin) ((struct sockaddr *)(sin)) +#define ifatoia(ifa) ((struct in_ifaddr *)(ifa)) + if (sin->sin_addr.s_addr == INADDR_ANY) + sin->sin_addr = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr; + else if (sin->sin_addr.s_addr == (u_long)INADDR_BROADCAST && + (in_ifaddrhead.tqh_first->ia_ifp->if_flags & IFF_BROADCAST)) + sin->sin_addr = satosin(&in_ifaddrhead.tqh_first->ia_broadaddr)->sin_addr; + } + if (inp->inp_laddr.s_addr == INADDR_ANY) { + register struct route *ro; + + ia = (struct in_ifaddr *)0; + /* + * If route is known or can be allocated now, + * our src addr is taken from the i/f, else punt. + */ + ro = &inp->inp_route; + if (ro->ro_rt && + (satosin(&ro->ro_dst)->sin_addr.s_addr != + sin->sin_addr.s_addr || + inp->inp_socket->so_options & SO_DONTROUTE)) { + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } + if ((inp->inp_socket->so_options & SO_DONTROUTE) == 0 && /*XXX*/ + (ro->ro_rt == (struct rtentry *)0 || + ro->ro_rt->rt_ifp == (struct ifnet *)0)) { + /* No route yet, so try to acquire one */ + ro->ro_dst.sa_family = AF_INET; + ro->ro_dst.sa_len = sizeof(struct sockaddr_in); + ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = + sin->sin_addr; + rtalloc(ro); + } + /* + * If we found a route, use the address + * corresponding to the outgoing interface + * unless it is the loopback (in case a route + * to our address on another net goes to loopback). + */ + if (ro->ro_rt && !(ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK)) + ia = ifatoia(ro->ro_rt->rt_ifa); + if (ia == 0) { + u_short fport = sin->sin_port; + + sin->sin_port = 0; + ia = ifatoia(ifa_ifwithdstaddr(sintosa(sin))); + if (ia == 0) + ia = ifatoia(ifa_ifwithnet(sintosa(sin))); + sin->sin_port = fport; + if (ia == 0) + ia = in_ifaddrhead.tqh_first; + if (ia == 0) + return (EADDRNOTAVAIL); + } + /* + * If the destination address is multicast and an outgoing + * interface has been set as a multicast option, use the + * address of that interface as our source address. + */ + if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr)) && + inp->inp_moptions != NULL) { + struct ip_moptions *imo; + struct ifnet *ifp; + + imo = inp->inp_moptions; + if (imo->imo_multicast_ifp != NULL) { + ifp = imo->imo_multicast_ifp; + for (ia = in_ifaddrhead.tqh_first; ia; + ia = ia->ia_link.tqe_next) + if (ia->ia_ifp == ifp) + break; + if (ia == 0) + return (EADDRNOTAVAIL); + } + } + /* + * Don't do pcblookup call here; return interface in plocal_sin + * and exit to caller, that will do the lookup. + */ + *plocal_sin = &ia->ia_addr; + + } + return(0); +} + +/* + * Outer subroutine: + * Connect from a socket to a specified address. + * Both address and port must be specified in argument sin. + * If don't have a local address for this socket yet, + * then pick one. + */ +int +in_pcbconnect(inp, nam, p) + register struct inpcb *inp; + struct sockaddr *nam; + struct proc *p; +{ + struct sockaddr_in *ifaddr; + register struct sockaddr_in *sin = (struct sockaddr_in *)nam; + int error; + + /* + * Call inner routine, to assign local interface address. + */ + if ((error = in_pcbladdr(inp, nam, &ifaddr)) != 0) + return(error); + + if (in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port, + inp->inp_laddr.s_addr ? inp->inp_laddr : ifaddr->sin_addr, + inp->inp_lport, 0, NULL) != NULL) { + return (EADDRINUSE); + } + if (inp->inp_laddr.s_addr == INADDR_ANY) { + if (inp->inp_lport == 0) + (void)in_pcbbind(inp, (struct sockaddr *)0, p); + inp->inp_laddr = ifaddr->sin_addr; + } + inp->inp_faddr = sin->sin_addr; + inp->inp_fport = sin->sin_port; + in_pcbrehash(inp); + return (0); +} + +void +in_pcbdisconnect(inp) + struct inpcb *inp; +{ + + inp->inp_faddr.s_addr = INADDR_ANY; + inp->inp_fport = 0; + in_pcbrehash(inp); + if (inp->inp_socket->so_state & SS_NOFDREF) + in_pcbdetach(inp); +} + +void +in_pcbdetach(inp) + struct inpcb *inp; +{ + struct socket *so = inp->inp_socket; + struct inpcbinfo *ipi = inp->inp_pcbinfo; + +#if IPSEC + ipsec4_delete_pcbpolicy(inp); +#endif /*IPSEC*/ + inp->inp_gencnt = ++ipi->ipi_gencnt; + in_pcbremlists(inp); + +#if TEMPDEBUG + if (so->cached_in_sock_layer) + printf("PCB_DETACH for cached socket %x\n", so); + else + printf("PCB_DETACH for allocated socket %x\n", so); +#endif + + so->so_pcb = 0; + + if (inp->inp_options) + (void)m_free(inp->inp_options); + if (inp->inp_route.ro_rt) + rtfree(inp->inp_route.ro_rt); + ip_freemoptions(inp->inp_moptions); + if (so->cached_in_sock_layer) + so->so_saved_pcb = (caddr_t) inp; + else + zfree(ipi->ipi_zone, (vm_offset_t) inp); + + sofree(so); +} + +/* + * The calling convention of in_setsockaddr() and in_setpeeraddr() was + * modified to match the pru_sockaddr() and pru_peeraddr() entry points + * in struct pr_usrreqs, so that protocols can just reference then directly + * without the need for a wrapper function. The socket must have a valid + * (i.e., non-nil) PCB, but it should be impossible to get an invalid one + * except through a kernel programming error, so it is acceptable to panic + * (or in this case trap) if the PCB is invalid. (Actually, we don't trap + * because there actually /is/ a programming error somewhere... XXX) + */ +int +in_setsockaddr(so, nam) + struct socket *so; + struct sockaddr **nam; +{ + int s; + register struct inpcb *inp; + register struct sockaddr_in *sin; + + /* + * Do the malloc first in case it blocks. + */ + MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK); + bzero(sin, sizeof *sin); + sin->sin_family = AF_INET; + sin->sin_len = sizeof(*sin); + + s = splnet(); + inp = sotoinpcb(so); + if (!inp) { + splx(s); + FREE(sin, M_SONAME); + return EINVAL; + } + sin->sin_port = inp->inp_lport; + sin->sin_addr = inp->inp_laddr; + splx(s); + + *nam = (struct sockaddr *)sin; + return 0; +} + +int +in_setpeeraddr(so, nam) + struct socket *so; + struct sockaddr **nam; +{ + int s; + struct inpcb *inp; + register struct sockaddr_in *sin; + + /* + * Do the malloc first in case it blocks. + */ + MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK); + bzero((caddr_t)sin, sizeof (*sin)); + sin->sin_family = AF_INET; + sin->sin_len = sizeof(*sin); + + s = splnet(); + inp = sotoinpcb(so); + if (!inp) { + splx(s); + FREE(sin, M_SONAME); + return EINVAL; + } + sin->sin_port = inp->inp_fport; + sin->sin_addr = inp->inp_faddr; + splx(s); + + *nam = (struct sockaddr *)sin; + return 0; +} + +/* + * Pass some notification to all connections of a protocol + * associated with address dst. The local address and/or port numbers + * may be specified to limit the search. The "usual action" will be + * taken, depending on the ctlinput cmd. The caller must filter any + * cmds that are uninteresting (e.g., no error in the map). + * Call the protocol specific routine (if any) to report + * any errors for each matching socket. + */ +void +in_pcbnotify(head, dst, fport_arg, laddr, lport_arg, cmd, notify) + struct inpcbhead *head; + struct sockaddr *dst; + u_int fport_arg, lport_arg; + struct in_addr laddr; + int cmd; + void (*notify) __P((struct inpcb *, int)); +{ + register struct inpcb *inp, *oinp; + struct in_addr faddr; + u_short fport = fport_arg, lport = lport_arg; + int errno, s; + + if ((unsigned)cmd > PRC_NCMDS || dst->sa_family != AF_INET) + return; + faddr = ((struct sockaddr_in *)dst)->sin_addr; + if (faddr.s_addr == INADDR_ANY) + return; + + /* + * Redirects go to all references to the destination, + * and use in_rtchange to invalidate the route cache. + * Dead host indications: notify all references to the destination. + * Otherwise, if we have knowledge of the local port and address, + * deliver only to that socket. + */ + if (PRC_IS_REDIRECT(cmd) || cmd == PRC_HOSTDEAD) { + fport = 0; + lport = 0; + laddr.s_addr = 0; + if (cmd != PRC_HOSTDEAD) + notify = in_rtchange; + } + errno = inetctlerrmap[cmd]; + s = splnet(); + for (inp = head->lh_first; inp != NULL;) { + if ((inp->inp_vflag & INP_IPV4) == NULL) { + inp = LIST_NEXT(inp, inp_list); + continue; + } + if (inp->inp_faddr.s_addr != faddr.s_addr || + inp->inp_socket == 0 || + (lport && inp->inp_lport != lport) || + (laddr.s_addr && inp->inp_laddr.s_addr != laddr.s_addr) || + (fport && inp->inp_fport != fport)) { + inp = LIST_NEXT(inp, inp_list); + continue; + } + oinp = inp; + inp = LIST_NEXT(inp, inp_list); + if (notify) + (*notify)(oinp, errno); + } + splx(s); +} + +/* + * Check for alternatives when higher level complains + * about service problems. For now, invalidate cached + * routing information. If the route was created dynamically + * (by a redirect), time to try a default gateway again. + */ +void +in_losing(inp) + struct inpcb *inp; +{ + register struct rtentry *rt; + struct rt_addrinfo info; + + if ((rt = inp->inp_route.ro_rt)) { + inp->inp_route.ro_rt = 0; + bzero((caddr_t)&info, sizeof(info)); + info.rti_info[RTAX_DST] = + (struct sockaddr *)&inp->inp_route.ro_dst; + info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; + info.rti_info[RTAX_NETMASK] = rt_mask(rt); + rt_missmsg(RTM_LOSING, &info, rt->rt_flags, 0); + if (rt->rt_flags & RTF_DYNAMIC) + (void) rtrequest(RTM_DELETE, rt_key(rt), + rt->rt_gateway, rt_mask(rt), rt->rt_flags, + (struct rtentry **)0); + else + /* + * A new route can be allocated + * the next time output is attempted. + */ + rtfree(rt); + } +} + +/* + * After a routing change, flush old routing + * and allocate a (hopefully) better one. + */ +static void +in_rtchange(inp, errno) + register struct inpcb *inp; + int errno; +{ + if (inp->inp_route.ro_rt) { + rtfree(inp->inp_route.ro_rt); + inp->inp_route.ro_rt = 0; + /* + * A new route can be allocated the next time + * output is attempted. + */ + } +} + +/* + * Lookup a PCB based on the local address and port. + */ +struct inpcb * +in_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay) + struct inpcbinfo *pcbinfo; + struct in_addr laddr; + u_int lport_arg; + int wild_okay; +{ + register struct inpcb *inp; + int matchwild = 3, wildcard; + u_short lport = lport_arg; + + KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_START, 0,0,0,0,0); + + if (!wild_okay) { + struct inpcbhead *head; + /* + * Look for an unconnected (wildcard foreign addr) PCB that + * matches the local address and port we're looking for. + */ + head = &pcbinfo->hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->hashmask)]; + for (inp = head->lh_first; inp != NULL; inp = inp->inp_hash.le_next) { + if ((inp->inp_vflag & INP_IPV4) == NULL) + continue; + if (inp->inp_faddr.s_addr == INADDR_ANY && + inp->inp_laddr.s_addr == laddr.s_addr && + inp->inp_lport == lport) { + /* + * Found. + */ + return (inp); + } + } + /* + * Not found. + */ + KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0,0,0,0,0); + return (NULL); + } else { + struct inpcbporthead *porthash; + struct inpcbport *phd; + struct inpcb *match = NULL; + /* + * Best fit PCB lookup. + * + * First see if this local port is in use by looking on the + * port hash list. + */ + porthash = &pcbinfo->porthashbase[INP_PCBPORTHASH(lport, + pcbinfo->porthashmask)]; + for (phd = porthash->lh_first; phd != NULL; phd = phd->phd_hash.le_next) { + if (phd->phd_port == lport) + break; + } + if (phd != NULL) { + /* + * Port is in use by one or more PCBs. Look for best + * fit. + */ + for (inp = phd->phd_pcblist.lh_first; inp != NULL; + inp = inp->inp_portlist.le_next) { + wildcard = 0; + if ((inp->inp_vflag & INP_IPV4) == NULL) + continue; + if (inp->inp_faddr.s_addr != INADDR_ANY) + wildcard++; + if (inp->inp_laddr.s_addr != INADDR_ANY) { + if (laddr.s_addr == INADDR_ANY) + wildcard++; + else if (inp->inp_laddr.s_addr != laddr.s_addr) + continue; + } else { + if (laddr.s_addr != INADDR_ANY) + wildcard++; + } + if (wildcard < matchwild) { + match = inp; + matchwild = wildcard; + if (matchwild == 0) { + break; + } + } + } + } + KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match,0,0,0,0); + return (match); + } +} + +/* + * Lookup PCB in hash list. + */ +struct inpcb * +in_pcblookup_hash(pcbinfo, faddr, fport_arg, laddr, lport_arg, wildcard, ifp) + struct inpcbinfo *pcbinfo; + struct in_addr faddr, laddr; + u_int fport_arg, lport_arg; + int wildcard; + struct ifnet *ifp; +{ + struct inpcbhead *head; + register struct inpcb *inp; + u_short fport = fport_arg, lport = lport_arg; + + /* + * We may have found the pcb in the last lookup - check this first. + */ + + if ((!IN_MULTICAST(laddr.s_addr)) && (pcbinfo->last_pcb)) { + if (faddr.s_addr == pcbinfo->last_pcb->inp_faddr.s_addr && + laddr.s_addr == pcbinfo->last_pcb->inp_laddr.s_addr && + fport_arg == pcbinfo->last_pcb->inp_fport && + lport_arg == pcbinfo->last_pcb->inp_lport) { + /* + * Found. + */ + return (pcbinfo->last_pcb); + } + + pcbinfo->last_pcb = 0; + } + + /* + * First look for an exact match. + */ + head = &pcbinfo->hashbase[INP_PCBHASH(faddr.s_addr, lport, fport, pcbinfo->hashmask)]; + for (inp = head->lh_first; inp != NULL; inp = inp->inp_hash.le_next) { + if ((inp->inp_vflag & INP_IPV4) == NULL) + continue; + if (inp->inp_faddr.s_addr == faddr.s_addr && + inp->inp_laddr.s_addr == laddr.s_addr && + inp->inp_fport == fport && + inp->inp_lport == lport) { + /* + * Found. + */ + return (inp); + } + } + if (wildcard) { + struct inpcb *local_wild = NULL; +#if INET6 + struct inpcb *local_wild_mapped = NULL; +#endif + + head = &pcbinfo->hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->hashmask)]; + for (inp = head->lh_first; inp != NULL; inp = inp->inp_hash.le_next) { + if ((inp->inp_vflag & INP_IPV4) == NULL) + continue; + if (inp->inp_faddr.s_addr == INADDR_ANY && + inp->inp_lport == lport) { +#if defined(NFAITH) && NFAITH > 0 + if (ifp && ifp->if_type == IFT_FAITH && + (inp->inp_flags & INP_FAITH) == 0) + continue; +#endif + if (inp->inp_laddr.s_addr == laddr.s_addr) + return (inp); + else if (inp->inp_laddr.s_addr == INADDR_ANY) { +#if INET6 + if (INP_CHECK_SOCKAF(inp->inp_socket, + AF_INET6)) + local_wild_mapped = inp; + else +#endif + local_wild = inp; + } + } + } +#if INET6 + if (local_wild == NULL) + return (local_wild_mapped); +#endif + return (local_wild); + } + + /* + * Not found. + */ + return (NULL); +} + +/* + * Insert PCB onto various hash lists. + */ +int +in_pcbinshash(inp) + struct inpcb *inp; +{ + struct inpcbhead *pcbhash; + struct inpcbporthead *pcbporthash; + struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; + struct inpcbport *phd; + u_int32_t hashkey_faddr; + +#if INET6 + if (inp->inp_vflag & INP_IPV6) + hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; + else +#endif /* INET6 */ + hashkey_faddr = inp->inp_faddr.s_addr; + + pcbhash = &pcbinfo->hashbase[INP_PCBHASH(hashkey_faddr, + inp->inp_lport, inp->inp_fport, pcbinfo->hashmask)]; + + pcbporthash = &pcbinfo->porthashbase[INP_PCBPORTHASH(inp->inp_lport, + pcbinfo->porthashmask)]; + + /* + * Go through port list and look for a head for this lport. + */ + for (phd = pcbporthash->lh_first; phd != NULL; phd = phd->phd_hash.le_next) { + if (phd->phd_port == inp->inp_lport) + break; + } + /* + * If none exists, malloc one and tack it on. + */ + if (phd == NULL) { + MALLOC(phd, struct inpcbport *, sizeof(struct inpcbport), M_PCB, M_NOWAIT); + if (phd == NULL) { + return (ENOBUFS); /* XXX */ + } + phd->phd_port = inp->inp_lport; + LIST_INIT(&phd->phd_pcblist); + LIST_INSERT_HEAD(pcbporthash, phd, phd_hash); + } + inp->inp_phd = phd; + LIST_INSERT_HEAD(&phd->phd_pcblist, inp, inp_portlist); + LIST_INSERT_HEAD(pcbhash, inp, inp_hash); + inp->hash_element = INP_PCBHASH(inp->inp_faddr.s_addr, inp->inp_lport, + inp->inp_fport, pcbinfo->hashmask); + return (0); +} + +/* + * Move PCB to the proper hash bucket when { faddr, fport } have been + * changed. NOTE: This does not handle the case of the lport changing (the + * hashed port list would have to be updated as well), so the lport must + * not change after in_pcbinshash() has been called. + */ +void +in_pcbrehash(inp) + struct inpcb *inp; +{ + struct inpcbhead *head; + u_int32_t hashkey_faddr; + +#if INET6 + if (inp->inp_vflag & INP_IPV6) + hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; + else +#endif /* INET6 */ + hashkey_faddr = inp->inp_faddr.s_addr; + + head = &inp->inp_pcbinfo->hashbase[INP_PCBHASH(hashkey_faddr, + inp->inp_lport, inp->inp_fport, inp->inp_pcbinfo->hashmask)]; + + LIST_REMOVE(inp, inp_hash); + LIST_INSERT_HEAD(head, inp, inp_hash); + inp->hash_element = INP_PCBHASH(inp->inp_faddr.s_addr, inp->inp_lport, + inp->inp_fport, inp->inp_pcbinfo->hashmask); +} + +/* + * Remove PCB from various lists. + */ +void +in_pcbremlists(inp) + struct inpcb *inp; +{ + inp->inp_gencnt = ++inp->inp_pcbinfo->ipi_gencnt; + if (inp == inp->inp_pcbinfo->last_pcb) + inp->inp_pcbinfo->last_pcb = 0; + + if (inp->inp_lport) { + struct inpcbport *phd = inp->inp_phd; + + LIST_REMOVE(inp, inp_hash); + LIST_REMOVE(inp, inp_portlist); + if (phd->phd_pcblist.lh_first == NULL) { + LIST_REMOVE(phd, phd_hash); + FREE(phd, M_PCB); + } + } + + LIST_REMOVE(inp, inp_list); + inp->inp_pcbinfo->ipi_count--; +} + +int +in_pcb_grab_port __P((struct inpcbinfo *pcbinfo, + u_short options, + struct in_addr laddr, + u_short *lport, + struct in_addr faddr, + u_short fport, + u_int cookie, + u_char owner_id)) +{ + struct inpcb *pcb; + struct sockaddr_in sin; + struct proc *p = current_proc(); + int stat; + + + pcbinfo->nat_dummy_socket.so_pcb = 0; + pcbinfo->nat_dummy_socket.so_options = 0; + if (*lport) { + /* The grabber wants a particular port */ + + if (faddr.s_addr || fport) { + /* + * This is either the second half of an active connect, or + * it's from the acceptance of an incoming connection. + */ + if (laddr.s_addr == 0) { + return EINVAL; + } + + if (in_pcblookup_hash(pcbinfo, faddr, fport, + laddr, *lport, 0, NULL) != NULL) { + if (!(IN_MULTICAST(ntohl(laddr.s_addr)))) { + return (EADDRINUSE); + } + } + + stat = in_pcballoc(&pcbinfo->nat_dummy_socket, pcbinfo, p); + if (stat) + return stat; + pcb = sotoinpcb(&pcbinfo->nat_dummy_socket); + pcb->inp_vflag |= INP_IPV4; + + pcb->inp_lport = *lport; + pcb->inp_laddr.s_addr = laddr.s_addr; + + pcb->inp_faddr = faddr; + pcb->inp_fport = fport; + in_pcbinshash(pcb); + } + else { + /* + * This is either a bind for a passive socket, or it's the + * first part of bind-connect sequence (not likely since an + * ephemeral port is usually used in this case). Or, it's + * the result of a connection acceptance when the foreign + * address/port cannot be provided (which requires the SO_REUSEADDR + * flag if laddr is not multicast). + */ + + stat = in_pcballoc(&pcbinfo->nat_dummy_socket, pcbinfo, p); + if (stat) + return stat; + pcb = sotoinpcb(&pcbinfo->nat_dummy_socket); + pcb->inp_vflag |= INP_IPV4; + + pcbinfo->nat_dummy_socket.so_options = options; + bzero(&sin, sizeof(struct sockaddr_in)); + sin.sin_len = sizeof(struct sockaddr_in); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = laddr.s_addr; + sin.sin_port = *lport; + + stat = in_pcbbind((struct inpcb *) pcbinfo->nat_dummy_socket.so_pcb, + (struct sockaddr *) &sin, p); + if (stat) { + in_pcbdetach(pcb); + return stat; + } + } + } + else { + /* The grabber wants an ephemeral port */ + + stat = in_pcballoc(&pcbinfo->nat_dummy_socket, pcbinfo, p); + if (stat) + return stat; + pcb = sotoinpcb(&pcbinfo->nat_dummy_socket); + pcb->inp_vflag |= INP_IPV4; + + bzero(&sin, sizeof(struct sockaddr_in)); + sin.sin_len = sizeof(struct sockaddr_in); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = laddr.s_addr; + sin.sin_port = 0; + + if (faddr.s_addr || fport) { + /* + * Not sure if this case will be used - could occur when connect + * is called, skipping the bind. + */ + + if (laddr.s_addr == 0) { + in_pcbdetach(pcb); + return EINVAL; + } + + stat = in_pcbbind((struct inpcb *) pcbinfo->nat_dummy_socket.so_pcb, + (struct sockaddr *) &sin, p); + if (stat) { + in_pcbdetach(pcb); + return stat; + } + + if (in_pcblookup_hash(pcbinfo, faddr, fport, + pcb->inp_laddr, pcb->inp_lport, 0, NULL) != NULL) { + in_pcbdetach(pcb); + return (EADDRINUSE); + } + + pcb->inp_faddr = faddr; + pcb->inp_fport = fport; + in_pcbrehash(pcb); + } + else { + /* + * This is a simple bind of an ephemeral port. The local addr + * may or may not be defined. + */ + + stat = in_pcbbind((struct inpcb *) pcbinfo->nat_dummy_socket.so_pcb, + (struct sockaddr *) &sin, p); + if (stat) { + in_pcbdetach(pcb); + return stat; + } + } + *lport = pcb->inp_lport; + } + + + pcb->nat_owner = owner_id; + pcb->nat_cookie = cookie; + pcb->inp_ppcb = (caddr_t) pcbinfo->dummy_cb; + return 0; +} + +int +in_pcb_letgo_port __P((struct inpcbinfo *pcbinfo, struct in_addr laddr, u_short lport, + struct in_addr faddr, u_short fport, u_char owner_id)) +{ + struct inpcbhead *head; + register struct inpcb *inp; + + + /* + * First look for an exact match. + */ + head = &pcbinfo->hashbase[INP_PCBHASH(faddr.s_addr, lport, fport, pcbinfo->hashmask)]; + for (inp = head->lh_first; inp != NULL; inp = inp->inp_hash.le_next) { + if (inp->inp_faddr.s_addr == faddr.s_addr && + inp->inp_laddr.s_addr == laddr.s_addr && + inp->inp_fport == fport && + inp->inp_lport == lport && + inp->nat_owner == owner_id) { + /* + * Found. + */ + in_pcbdetach(inp); + return 0; + } + } + + return ENOENT; +} + +u_char +in_pcb_get_owner(struct inpcbinfo *pcbinfo, + struct in_addr laddr, u_short lport, + struct in_addr faddr, u_short fport, + u_int *cookie) + +{ + struct inpcb *inp; + u_char owner_id = INPCB_NO_OWNER; + struct inpcbport *phd; + struct inpcbporthead *porthash; + + + if (IN_MULTICAST(laddr.s_addr)) { + /* + * Walk through PCB's looking for registered + * owners. + */ + + porthash = &pcbinfo->porthashbase[INP_PCBPORTHASH(lport, + pcbinfo->porthashmask)]; + for (phd = porthash->lh_first; phd != NULL; phd = phd->phd_hash.le_next) { + if (phd->phd_port == lport) + break; + } + + if (phd == 0) { + return INPCB_NO_OWNER; + } + + owner_id = INPCB_NO_OWNER; + for (inp = phd->phd_pcblist.lh_first; inp != NULL; + inp = inp->inp_portlist.le_next) { + + if (inp->inp_laddr.s_addr == laddr.s_addr) { + if (inp->nat_owner == 0) + owner_id |= INPCB_OWNED_BY_X; + else + owner_id |= inp->nat_owner; + } + } + + return owner_id; + } + else { + inp = in_pcblookup_hash(pcbinfo, faddr, fport, + laddr, lport, 1, NULL); + if (inp) { + if (inp->nat_owner) { + owner_id = inp->nat_owner; + *cookie = inp->nat_cookie; + } + else { + pcbinfo->last_pcb = inp; + owner_id = INPCB_OWNED_BY_X; + } + } + else + owner_id = INPCB_NO_OWNER; + + return owner_id; + } +} + +int +in_pcb_new_share_client(struct inpcbinfo *pcbinfo, u_char *owner_id) +{ + + int i; + + + for (i=0; i < INPCB_MAX_IDS; i++) { + if ((pcbinfo->all_owners & (1 << i)) == 0) { + pcbinfo->all_owners |= (1 << i); + *owner_id = (1 << i); + return 0; + } + } + + return ENOSPC; +} + +int +in_pcb_rem_share_client(struct inpcbinfo *pcbinfo, u_char owner_id) +{ + struct inpcb *inp; + + + if (pcbinfo->all_owners & owner_id) { + pcbinfo->all_owners &= ~owner_id; + for (inp = pcbinfo->listhead->lh_first; inp != NULL; inp = inp->inp_list.le_next) { + if (inp->nat_owner & owner_id) { + if (inp->nat_owner == owner_id) + /* + * Deallocate the pcb + */ + in_pcbdetach(inp); + else + inp->nat_owner &= ~owner_id; + } + } + } + else { + return ENOENT; + } + + return 0; +} + +void in_pcb_nat_init(struct inpcbinfo *pcbinfo, int afamily, + int pfamily, int protocol) +{ + bzero(&pcbinfo->nat_dummy_socket, sizeof(struct socket)); + pcbinfo->nat_dummy_socket.so_proto = pffindproto(afamily, pfamily, protocol); + pcbinfo->all_owners = 0; +} diff --git a/bsd/netinet/in_pcb.h b/bsd/netinet/in_pcb.h new file mode 100644 index 000000000..13bc26209 --- /dev/null +++ b/bsd/netinet/in_pcb.h @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_pcb.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_IN_PCB_H_ +#define _NETINET_IN_PCB_H_ + +#include +#if IPSEC +#include +#endif + +#define in6pcb inpcb /* for KAME src sync over BSD*'s */ +#define in6p_sp inp_sp /* for KAME src sync over BSD*'s */ + +/* + * Common structure pcb for internet protocol implementation. + * Here are stored pointers to local and foreign host table + * entries, local and foreign socket numbers, and pointers + * up (to a socket structure) and down (to a protocol-specific) + * control block. + */ +LIST_HEAD(inpcbhead, inpcb); +LIST_HEAD(inpcbporthead, inpcbport); +typedef u_quad_t inp_gen_t; + +/* + * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet. + * So, AF_INET6 null laddr is also used as AF_INET null laddr, + * by utilize following structure. (At last, same as INRIA) + */ +struct in_addr_4in6 { + u_int32_t ia46_pad32[3]; + struct in_addr ia46_addr4; +}; + +/* + * NB: the zone allocator is type-stable EXCEPT FOR THE FIRST TWO LONGS + * of the structure. Therefore, it is important that the members in + * that position not contain any information which is required to be + * stable. + */ +struct icmp6_filter; + +struct inpcb { + LIST_ENTRY(inpcb) inp_hash; /* hash list */ + struct in_addr inp_faddr; /* foreign host table entry */ + struct in_addr inp_laddr; /* local host table entry */ + u_short inp_fport; /* foreign port */ + u_short inp_lport; /* local port */ + LIST_ENTRY(inpcb) inp_list; /* list for all PCBs of this proto */ + caddr_t inp_ppcb; /* pointer to per-protocol pcb */ + struct inpcbinfo *inp_pcbinfo; /* PCB list info */ + struct socket *inp_socket; /* back pointer to socket */ + u_char nat_owner; /* Used to NAT TCP/UDP traffic */ + u_long nat_cookie; /* Cookie stored and returned to NAT */ + LIST_ENTRY(inpcb) inp_portlist; /* list for this PCB's local port */ + struct inpcbport *inp_phd; /* head of this list */ + inp_gen_t inp_gencnt; /* generation count of this instance */ + int inp_flags; /* generic IP/datagram flags */ + u_int32_t inp_flow; + + u_char inp_vflag; +#define INP_IPV4 0x1 +#define INP_IPV6 0x2 + + u_char inp_ip_ttl; /* time to live proto */ + u_char inp_ip_p; /* protocol proto */ + /* protocol dependent part */ + union { + /* foreign host table entry */ + struct in_addr_4in6 inp46_foreign; + struct in6_addr inp6_foreign; + } inp_dependfaddr; + union { + /* local host table entry */ + struct in_addr_4in6 inp46_local; + struct in6_addr inp6_local; + } inp_dependladdr; + union { + /* placeholder for routing entry */ + struct route inp4_route; + struct route_in6 inp6_route; + } inp_dependroute; + struct { + /* type of service proto */ + u_char inp4_ip_tos; + /* IP options */ + struct mbuf *inp4_options; + /* IP multicast options */ + struct ip_moptions *inp4_moptions; + } inp_depend4; +#define inp_faddr inp_dependfaddr.inp46_foreign.ia46_addr4 +#define inp_laddr inp_dependladdr.inp46_local.ia46_addr4 +#define inp_route inp_dependroute.inp4_route +#define inp_ip_tos inp_depend4.inp4_ip_tos +#define inp_options inp_depend4.inp4_options +#define inp_moptions inp_depend4.inp4_moptions + struct { + /* IP options */ + struct mbuf *inp6_options; + /* IP6 options for incoming packets */ + struct ip6_recvpktopts *inp6_inputopts; + /* IP6 options for outgoing packets */ + struct ip6_pktopts *inp6_outputopts; + /* IP multicast options */ + struct ip6_moptions *inp6_moptions; + /* ICMPv6 code type filter */ + struct icmp6_filter *inp6_icmp6filt; + /* IPV6_CHECKSUM setsockopt */ + int inp6_cksum; + u_short inp6_ifindex; + short inp6_hops; + } inp_depend6; +#define in6p_faddr inp_dependfaddr.inp6_foreign +#define in6p_laddr inp_dependladdr.inp6_local +#define in6p_route inp_dependroute.inp6_route +#define in6p_hops inp_depend6.inp6_hops /* default hop limit */ +#define in6p_ip6_nxt inp_ip_p +#define in6p_flowinfo inp_flow +#define in6p_vflag inp_vflag +#define in6p_options inp_depend6.inp6_options +#define in6p_inputopts inp_depend6.inp6_inputopts +#define in6p_outputopts inp_depend6.inp6_outputopts +#define in6p_moptions inp_depend6.inp6_moptions +#define in6p_icmp6filt inp_depend6.inp6_icmp6filt +#define in6p_cksum inp_depend6.inp6_cksum +#define inp6_ifindex inp_depend6.inp6_ifindex +#define in6p_flags inp_flags /* for KAME src sync over BSD*'s */ +#define in6p_socket inp_socket /* for KAME src sync over BSD*'s */ +#define in6p_lport inp_lport /* for KAME src sync over BSD*'s */ +#define in6p_fport inp_fport /* for KAME src sync over BSD*'s */ +#define in6p_ppcb inp_ppcb /* for KAME src sync over BSD*'s */ +#if IPSEC + struct inpcbpolicy *inp_sp; +#endif + int hash_element; /* Array index of pcb's hash list */ + caddr_t inp_saved_ppcb; /* place to save pointer while cached */ +}; +/* + * The range of the generation count, as used in this implementation, + * is 9e19. We would have to create 300 billion connections per + * second for this number to roll over in a year. This seems sufficiently + * unlikely that we simply don't concern ourselves with that possibility. + */ + +/* + * Interface exported to userland by various protocols which use + * inpcbs. Hack alert -- only define if struct xsocket is in scope. + */ +#ifdef _SYS_SOCKETVAR_H_ +struct xinpcb { + size_t xi_len; /* length of this structure */ + struct inpcb xi_inp; + struct xsocket xi_socket; + u_quad_t xi_alignment_hack; +}; + +struct xinpgen { + size_t xig_len; /* length of this structure */ + u_int xig_count; /* number of PCBs at this time */ + inp_gen_t xig_gen; /* generation count at this time */ + so_gen_t xig_sogen; /* socket generation count at this time */ +}; +#endif /* _SYS_SOCKETVAR_H_ */ + +struct inpcbport { + LIST_ENTRY(inpcbport) phd_hash; + struct inpcbhead phd_pcblist; + u_short phd_port; +}; + +struct inpcbinfo { /* XXX documentation, prefixes */ + struct inpcbhead *hashbase; + u_long hashsize; /* in elements */ + u_long hashmask; + struct inpcbporthead *porthashbase; + u_long porthashmask; + struct inpcbhead *listhead; + u_short lastport; + u_short lastlow; + u_short lasthi; + void *ipi_zone; /* zone to allocate pcbs from */ + u_int ipi_count; /* number of pcbs in this list */ + u_quad_t ipi_gencnt; /* current generation count */ + u_char all_owners; + struct socket nat_dummy_socket; + struct inpcb *last_pcb; + caddr_t dummy_cb; +}; + +#define INP_PCBHASH(faddr, lport, fport, mask) \ + (((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask)) +#define INP_PCBPORTHASH(lport, mask) \ + (ntohs((lport)) & (mask)) + +/* flags in inp_flags: */ +#define INP_RECVOPTS 0x01 /* receive incoming IP options */ +#define INP_RECVRETOPTS 0x02 /* receive IP options for reply */ +#define INP_RECVDSTADDR 0x04 /* receive IP dst address */ +#define INP_HDRINCL 0x08 /* user supplies entire IP header */ +#define INP_HIGHPORT 0x10 /* user wants "high" port binding */ +#define INP_LOWPORT 0x20 /* user wants "low" port binding */ +#define INP_ANONPORT 0x40 /* port chosen for user */ +#define INP_RECVIF 0x80 /* receive incoming interface */ +#define INP_MTUDISC 0x100 /* user can do MTU discovery */ +#define INP_STRIPHDR 0x200 /* drop receive of raw IP header */ +#define INP_FAITH 0x400 /* accept FAITH'ed connections */ +#define IN6P_PKTINFO 0x010000 /* receive IP6 dst and I/F */ +#define IN6P_HOPLIMIT 0x020000 /* receive hoplimit */ +#define IN6P_HOPOPTS 0x040000 /* receive hop-by-hop options */ +#define IN6P_DSTOPTS 0x080000 /* receive dst options after rthdr */ +#define IN6P_RTHDR 0x100000 /* receive routing header */ +#define IN6P_RTHDRDSTOPTS 0x200000 /* receive dstoptions before rthdr */ +#define IN6P_BINDV6ONLY 0x10000000 /* do not grab IPv4 traffic */ +#define IN6P_MINMTU 0x20000000 /* use minimum MTU */ + +#define INP_CONTROLOPTS (INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|\ + INP_RECVIF|\ + IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|\ + IN6P_DSTOPTS|IN6P_RTHDR|IN6P_RTHDRDSTOPTS) +#define INP_UNMAPPABLEOPTS (IN6P_HOPOPTS|IN6P_DSTOPTS|IN6P_RTHDR) + + /* for KAME src sync over BSD*'s */ +#define IN6P_HIGHPORT INP_HIGHPORT +#define IN6P_LOWPORT INP_LOWPORT +#define IN6P_ANONPORT INP_ANONPORT +#define IN6P_RECVIF INP_RECVIF +#define IN6P_MTUDISC INP_MTUDISC +#define IN6P_FAITH INP_FAITH +#define IN6P_CONTROLOPTS INP_CONTROLOPTS + /* + * socket AF version is {newer than,or include} + * actual datagram AF version + */ + +#define INPLOOKUP_WILDCARD 1 +#define INPCB_ALL_OWNERS 0xff +#define INPCB_NO_OWNER 0x0 +#define INPCB_OWNED_BY_X 0x80 +#define INPCB_MAX_IDS 7 + +#define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb) +#define sotoin6pcb(so) sotoinpcb(so) /* for KAME src sync over BSD*'s */ + +#define INP_SOCKAF(so) so->so_proto->pr_domain->dom_family + +#define INP_CHECK_SOCKAF(so, af) \ + (INP_SOCKAF(so) == af) + +#ifdef KERNEL +extern int ipport_lowfirstauto; +extern int ipport_lowlastauto; +extern int ipport_firstauto; +extern int ipport_lastauto; +extern int ipport_hifirstauto; +extern int ipport_hilastauto; + +void in_losing __P((struct inpcb *)); +int in_pcballoc __P((struct socket *, struct inpcbinfo *, struct proc *)); +int in_pcbbind __P((struct inpcb *, struct sockaddr *, struct proc *)); +int in_pcbconnect __P((struct inpcb *, struct sockaddr *, struct proc *)); +void in_pcbdetach __P((struct inpcb *)); +void in_pcbdisconnect __P((struct inpcb *)); +int in_pcbinshash __P((struct inpcb *)); +int in_pcbladdr __P((struct inpcb *, struct sockaddr *, + struct sockaddr_in **)); +struct inpcb * + in_pcblookup_local __P((struct inpcbinfo *, + struct in_addr, u_int, int)); +struct inpcb * + in_pcblookup_hash __P((struct inpcbinfo *, + struct in_addr, u_int, struct in_addr, u_int, int, struct ifnet *)); +void in_pcbnotify __P((struct inpcbhead *, struct sockaddr *, + u_int, struct in_addr, u_int, int, void (*)(struct inpcb *, int))); +void in_pcbrehash __P((struct inpcb *)); +int in_setpeeraddr __P((struct socket *so, struct sockaddr **nam)); +int in_setsockaddr __P((struct socket *so, struct sockaddr **nam)); + +int +in_pcb_grab_port __P((struct inpcbinfo *pcbinfo, + u_short options, + struct in_addr laddr, + u_short *lport, + struct in_addr faddr, + u_short fport, + u_int cookie, + u_char owner_id)); + +int +in_pcb_letgo_port __P((struct inpcbinfo *pcbinfo, + struct in_addr laddr, + u_short lport, + struct in_addr faddr, + u_short fport, u_char owner_id)); + +u_char +in_pcb_get_owner __P((struct inpcbinfo *pcbinfo, + struct in_addr laddr, + u_short lport, + struct in_addr faddr, + u_short fport, + u_int *cookie)); + +void in_pcb_nat_init(struct inpcbinfo *pcbinfo, int afamily, int pfamily, + int protocol); + +int +in_pcb_new_share_client(struct inpcbinfo *pcbinfo, u_char *owner_id); + +int +in_pcb_rem_share_client(struct inpcbinfo *pcbinfo, u_char owner_id); + +void in_pcbremlists __P((struct inpcb *inp)); +#if INET6 +int in6_selecthlim __P((struct inpcb *, struct ifnet *)); +#endif + +#endif /* KERNEL */ + +#endif /* !_NETINET_IN_PCB_H_ */ diff --git a/bsd/netinet/in_proto.c b/bsd/netinet/in_proto.c new file mode 100644 index 000000000..4ac5b78fc --- /dev/null +++ b/bsd/netinet/in_proto.c @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_proto.c 8.2 (Berkeley) 2/9/95 + */ + +#if ISFB31 +#include "opt_ipdivert.h" +#include "opt_ipx.h" +#endif + +#include +#include +#include +#include +#include + + +#include + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * TCP/IP protocol family: IP, ICMP, UDP, TCP. + */ + +#if IPSEC +#include +#include +#if IPSEC_ESP +#include +#endif +#include +#endif /* IPSEC */ + +#include "gif.h" +#if NGIF > 0 +#include +#endif + +#if IPXIP +#include +#endif + +#if NSIP +#include +#include +#endif + +#if PM +void pm_init __P((void)); +void pm_input __P((struct mbuf *, int)); +int pm_ctloutput __P((int, struct socket *, int, int, struct mbuf **)); +struct pr_usrreqs pm_usrreqs; +#endif + +#if NATPT +void natpt_init __P((void)); +int natpt_ctloutput __P((int, struct socket *, int, int, struct mbuf **)); +struct pr_usrreqs natpt_usrreqs; +#endif + +extern struct domain inetdomain; +static struct pr_usrreqs nousrreqs; + +struct protosw inetsw[] = { +{ 0, &inetdomain, 0, 0, + 0, 0, 0, 0, + 0, + ip_init, 0, ip_slowtimo, ip_drain, + 0, &nousrreqs +}, +{ SOCK_DGRAM, &inetdomain, IPPROTO_UDP, PR_ATOMIC|PR_ADDR, + udp_input, 0, udp_ctlinput, ip_ctloutput, + 0, + udp_init, 0, 0, 0, + 0, &udp_usrreqs +}, +{ SOCK_STREAM, &inetdomain, IPPROTO_TCP, + PR_CONNREQUIRED|PR_IMPLOPCL|PR_WANTRCVD, + tcp_input, 0, tcp_ctlinput, tcp_ctloutput, + 0, + tcp_init, tcp_fasttimo, tcp_slowtimo, tcp_drain, + 0, &tcp_usrreqs +}, +{ SOCK_RAW, &inetdomain, IPPROTO_RAW, PR_ATOMIC|PR_ADDR, + rip_input, 0, rip_ctlinput, rip_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip_usrreqs +}, +{ SOCK_RAW, &inetdomain, IPPROTO_ICMP, PR_ATOMIC|PR_ADDR, + icmp_input, 0, 0, rip_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip_usrreqs +}, +{ SOCK_RAW, &inetdomain, IPPROTO_IGMP, PR_ATOMIC|PR_ADDR, + igmp_input, 0, 0, rip_ctloutput, + 0, + igmp_init, igmp_fasttimo, igmp_slowtimo, 0, + 0, &rip_usrreqs +}, +{ SOCK_RAW, &inetdomain, IPPROTO_RSVP, PR_ATOMIC|PR_ADDR, + rsvp_input, 0, 0, rip_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip_usrreqs +}, +#if IPSEC +{ SOCK_RAW, &inetdomain, IPPROTO_AH, PR_ATOMIC|PR_ADDR, + ah4_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#if IPSEC_ESP +{ SOCK_RAW, &inetdomain, IPPROTO_ESP, PR_ATOMIC|PR_ADDR, + esp4_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#endif +{ SOCK_RAW, &inetdomain, IPPROTO_IPCOMP, PR_ATOMIC|PR_ADDR, + ipcomp4_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#endif /* IPSEC */ +#if NGIF > 0 +{ SOCK_RAW, &inetdomain, IPPROTO_IPV4, PR_ATOMIC|PR_ADDR, + in_gif_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +# if INET6 +{ SOCK_RAW, &inetdomain, IPPROTO_IPV6, PR_ATOMIC|PR_ADDR, + in_gif_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#endif +#else /*NGIF*/ +{ SOCK_RAW, &inetdomain, IPPROTO_IPIP, PR_ATOMIC|PR_ADDR, + ipip_input, 0, 0, rip_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip_usrreqs +}, +#endif /*NGIF*/ +#if IPDIVERT +{ SOCK_RAW, &inetdomain, IPPROTO_DIVERT, PR_ATOMIC|PR_ADDR, + div_input, 0, 0, ip_ctloutput, + 0, + div_init, 0, 0, 0, + 0, &div_usrreqs, +}, +#endif +#if TPIP +{ SOCK_SEQPACKET,&inetdomain, IPPROTO_TP, PR_CONNREQUIRED|PR_WANTRCVD, + tpip_input, 0, tpip_ctlinput, tp_ctloutput, + 0, tp_usrreq, + tp_init, 0, tp_slowtimo, tp_drain, +}, +#endif +/* EON (ISO CLNL over IP) */ +#if EON +{ SOCK_RAW, &inetdomain, IPPROTO_EON, 0, + eoninput, 0, eonctlinput, 0, + 0, + eonprotoinit, 0, 0, 0, +}, +#endif +#if IPXIP +{ SOCK_RAW, &inetdomain, IPPROTO_IDP, PR_ATOMIC|PR_ADDR, + ipxip_input, 0, ipxip_ctlinput, 0, + 0, + 0, 0, 0, 0, + 0, &rip_usrreqs +}, +#endif +#if NSIP +{ SOCK_RAW, &inetdomain, IPPROTO_IDP, PR_ATOMIC|PR_ADDR, + idpip_input, 0, nsip_ctlinput, 0, + 0, + 0, 0, 0, 0, + 0, &rip_usrreqs +}, +#endif + /* raw wildcard */ +{ SOCK_RAW, &inetdomain, 0, PR_ATOMIC|PR_ADDR, + rip_input, 0, 0, rip_ctloutput, + 0, + rip_init, 0, 0, 0, + 0, &rip_usrreqs +}, +}; + +#if NGIF > 0 +struct protosw in_gif_protosw = +{ SOCK_RAW, &inetdomain, 0/*IPPROTO_IPV[46]*/, PR_ATOMIC|PR_ADDR, + in_gif_input, rip_output, 0, rip_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip_usrreqs +}; +#endif /*NGIF*/ + +extern int in_inithead __P((void **, int)); + +int in_proto_count = (sizeof (inetsw) / sizeof (struct protosw)); + +extern void in_dinit(void); +/* A routing init function, and a header size */ +struct domain inetdomain = + { AF_INET, "internet", in_dinit, 0, 0, + inetsw, 0, + in_inithead, 32, sizeof(struct sockaddr_in), + sizeof(struct tcpiphdr), 0 + }; + +DOMAIN_SET(inet); + + +SYSCTL_NODE(_net, PF_INET, inet, CTLFLAG_RW, 0, + "Internet Family"); + +SYSCTL_NODE(_net_inet, IPPROTO_IP, ip, CTLFLAG_RW, 0, "IP"); +SYSCTL_NODE(_net_inet, IPPROTO_ICMP, icmp, CTLFLAG_RW, 0, "ICMP"); +SYSCTL_NODE(_net_inet, IPPROTO_UDP, udp, CTLFLAG_RW, 0, "UDP"); +SYSCTL_NODE(_net_inet, IPPROTO_TCP, tcp, CTLFLAG_RW, 0, "TCP"); +SYSCTL_NODE(_net_inet, IPPROTO_IGMP, igmp, CTLFLAG_RW, 0, "IGMP"); +#if IPSEC +SYSCTL_NODE(_net_inet, IPPROTO_AH, ipsec, CTLFLAG_RW, 0, "IPSEC"); +#endif /* IPSEC */ +SYSCTL_NODE(_net_inet, IPPROTO_RAW, raw, CTLFLAG_RW, 0, "RAW"); +#if IPDIVERT +SYSCTL_NODE(_net_inet, IPPROTO_DIVERT, div, CTLFLAG_RW, 0, "DIVERT"); +#endif + diff --git a/bsd/netinet/in_rmx.c b/bsd/netinet/in_rmx.c new file mode 100644 index 000000000..f1a41c2f6 --- /dev/null +++ b/bsd/netinet/in_rmx.c @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1994, 1995 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +/* + * This code does two things necessary for the enhanced TCP metrics to + * function in a useful manner: + * 1) It marks all non-host routes as `cloning', thus ensuring that + * every actual reference to such a route actually gets turned + * into a reference to a host route to the specific destination + * requested. + * 2) When such routes lose all their references, it arranges for them + * to be deleted in some random collection of circumstances, so that + * a large quantity of stale routing data is not kept in kernel memory + * indefinitely. See in_rtqtimo() below for the exact mechanism. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +extern int in_inithead __P((void **head, int off)); + +#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ + +/* + * Do what we need to do when inserting a route. + */ +static struct radix_node * +in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, + struct radix_node *treenodes) +{ + struct rtentry *rt = (struct rtentry *)treenodes; + struct sockaddr_in *sin = (struct sockaddr_in *)rt_key(rt); + struct radix_node *ret; + + /* + * For IP, all unicast non-host routes are automatically cloning. + */ + if(IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) + rt->rt_flags |= RTF_MULTICAST; + + if(!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) { + rt->rt_flags |= RTF_PRCLONING; + } + + /* + * A little bit of help for both IP output and input: + * For host routes, we make sure that RTF_BROADCAST + * is set for anything that looks like a broadcast address. + * This way, we can avoid an expensive call to in_broadcast() + * in ip_output() most of the time (because the route passed + * to ip_output() is almost always a host route). + * + * We also do the same for local addresses, with the thought + * that this might one day be used to speed up ip_input(). + * + * We also mark routes to multicast addresses as such, because + * it's easy to do and might be useful (but this is much more + * dubious since it's so easy to inspect the address). (This + * is done above.) + */ + if (rt->rt_flags & RTF_HOST) { + if (in_broadcast(sin->sin_addr, rt->rt_ifp)) { + rt->rt_flags |= RTF_BROADCAST; + } else { +#define satosin(sa) ((struct sockaddr_in *)sa) + if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr + == sin->sin_addr.s_addr) + rt->rt_flags |= RTF_LOCAL; +#undef satosin + } + } + + if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU) + && rt->rt_ifp) + rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; + + ret = rn_addroute(v_arg, n_arg, head, treenodes); + if (ret == NULL && rt->rt_flags & RTF_HOST) { + struct rtentry *rt2; + /* + * We are trying to add a host route, but can't. + * Find out if it is because of an + * ARP entry and delete it if so. + */ + rt2 = rtalloc1((struct sockaddr *)sin, 0, + RTF_CLONING | RTF_PRCLONING); + if (rt2) { + if (rt2->rt_flags & RTF_LLINFO && + rt2->rt_flags & RTF_HOST && + rt2->rt_gateway && + rt2->rt_gateway->sa_family == AF_LINK) { + rtrequest(RTM_DELETE, + (struct sockaddr *)rt_key(rt2), + rt2->rt_gateway, + rt_mask(rt2), rt2->rt_flags, 0); + ret = rn_addroute(v_arg, n_arg, head, + treenodes); + } + RTFREE(rt2); + } + } + return ret; +} + +/* + * This code is the inverse of in_clsroute: on first reference, if we + * were managing the route, stop doing so and set the expiration timer + * back off again. + */ +static struct radix_node * +in_matroute(void *v_arg, struct radix_node_head *head) +{ + struct radix_node *rn = rn_match(v_arg, head); + struct rtentry *rt = (struct rtentry *)rn; + + if(rt && rt->rt_refcnt == 0) { /* this is first reference */ + if(rt->rt_flags & RTPRF_OURS) { + rt->rt_flags &= ~RTPRF_OURS; + rt->rt_rmx.rmx_expire = 0; + } + } + return rn; +} + +int rtq_reallyold = 60*60; + /* one hour is ``really old'' */ +SYSCTL_INT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, + CTLFLAG_RW, &rtq_reallyold , 0, ""); + +int rtq_minreallyold = 10; + /* never automatically crank down to less */ +SYSCTL_INT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, + CTLFLAG_RW, &rtq_minreallyold , 0, ""); + +int rtq_toomany = 128; + /* 128 cached routes is ``too many'' */ +SYSCTL_INT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, + CTLFLAG_RW, &rtq_toomany , 0, ""); + + +/* + * On last reference drop, mark the route as belong to us so that it can be + * timed out. + */ +static void +in_clsroute(struct radix_node *rn, struct radix_node_head *head) +{ + struct rtentry *rt = (struct rtentry *)rn; + + if(!(rt->rt_flags & RTF_UP)) + return; /* prophylactic measures */ + + if((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) + return; + + if((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS)) + != RTF_WASCLONED) + return; + + /* + * As requested by David Greenman: + * If rtq_reallyold is 0, just delete the route without + * waiting for a timeout cycle to kill it. + */ + if(rtq_reallyold != 0) { + rt->rt_flags |= RTPRF_OURS; + rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; + } else { + rtrequest(RTM_DELETE, + (struct sockaddr *)rt_key(rt), + rt->rt_gateway, rt_mask(rt), + rt->rt_flags, 0); + } +} + +struct rtqk_arg { + struct radix_node_head *rnh; + int draining; + int killed; + int found; + int updating; + time_t nextstop; +}; + +/* + * Get rid of old routes. When draining, this deletes everything, even when + * the timeout is not expired yet. When updating, this makes sure that + * nothing has a timeout longer than the current value of rtq_reallyold. + */ +static int +in_rtqkill(struct radix_node *rn, void *rock) +{ + struct rtqk_arg *ap = rock; + struct rtentry *rt = (struct rtentry *)rn; + int err; + + if(rt->rt_flags & RTPRF_OURS) { + ap->found++; + + if(ap->draining || rt->rt_rmx.rmx_expire <= time_second) { + if(rt->rt_refcnt > 0) + panic("rtqkill route really not free"); + + err = rtrequest(RTM_DELETE, + (struct sockaddr *)rt_key(rt), + rt->rt_gateway, rt_mask(rt), + rt->rt_flags, 0); + if(err) { + log(LOG_WARNING, "in_rtqkill: error %d\n", err); + } else { + ap->killed++; + } + } else { + if(ap->updating + && (rt->rt_rmx.rmx_expire - time_second + > rtq_reallyold)) { + rt->rt_rmx.rmx_expire = time_second + + rtq_reallyold; + } + ap->nextstop = lmin(ap->nextstop, + rt->rt_rmx.rmx_expire); + } + } + + return 0; +} + +static void +in_rtqtimo_funnel(void *rock) +{ + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + in_rtqtimo(rock); + (void) thread_funnel_set(network_flock, FALSE); + +} +#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ +static int rtq_timeout = RTQ_TIMEOUT; + +static void +in_rtqtimo(void *rock) +{ + struct radix_node_head *rnh = rock; + struct rtqk_arg arg; + struct timeval atv; + static time_t last_adjusted_timeout = 0; + int s; + + arg.found = arg.killed = 0; + arg.rnh = rnh; + arg.nextstop = time_second + rtq_timeout; + arg.draining = arg.updating = 0; + s = splnet(); + rnh->rnh_walktree(rnh, in_rtqkill, &arg); + splx(s); + + /* + * Attempt to be somewhat dynamic about this: + * If there are ``too many'' routes sitting around taking up space, + * then crank down the timeout, and see if we can't make some more + * go away. However, we make sure that we will never adjust more + * than once in rtq_timeout seconds, to keep from cranking down too + * hard. + */ + if((arg.found - arg.killed > rtq_toomany) + && (time_second - last_adjusted_timeout >= rtq_timeout) + && rtq_reallyold > rtq_minreallyold) { + rtq_reallyold = 2*rtq_reallyold / 3; + if(rtq_reallyold < rtq_minreallyold) { + rtq_reallyold = rtq_minreallyold; + } + + last_adjusted_timeout = time_second; +#if DIAGNOSTIC + log(LOG_DEBUG, "in_rtqtimo: adjusted rtq_reallyold to %d\n", + rtq_reallyold); +#endif + arg.found = arg.killed = 0; + arg.updating = 1; + s = splnet(); + rnh->rnh_walktree(rnh, in_rtqkill, &arg); + splx(s); + } + + atv.tv_usec = 0; + atv.tv_sec = arg.nextstop - time_second; + timeout(in_rtqtimo_funnel, rock, tvtohz(&atv)); + +} + +void +in_rtqdrain(void) +{ + struct radix_node_head *rnh = rt_tables[AF_INET]; + struct rtqk_arg arg; + int s; + arg.found = arg.killed = 0; + arg.rnh = rnh; + arg.nextstop = 0; + arg.draining = 1; + arg.updating = 0; + s = splnet(); + rnh->rnh_walktree(rnh, in_rtqkill, &arg); + splx(s); +} + +/* + * Initialize our routing tree. + */ +int +in_inithead(void **head, int off) +{ + struct radix_node_head *rnh; + + if (*head) + return 1; + + if(!rn_inithead(head, off)) + return 0; + + if(head != (void **)&rt_tables[AF_INET]) /* BOGUS! */ + return 1; /* only do this for the real routing table */ + + rnh = *head; + rnh->rnh_addaddr = in_addroute; + rnh->rnh_matchaddr = in_matroute; + rnh->rnh_close = in_clsroute; + in_rtqtimo(rnh); /* kick off timeout first time */ + return 1; +} + + +/* + * This zaps old routes when the interface goes down. + * Currently it doesn't delete static routes; there are + * arguments one could make for both behaviors. For the moment, + * we will adopt the Principle of Least Surprise and leave them + * alone (with the knowledge that this will not be enough for some + * people). The ones we really want to get rid of are things like ARP + * entries, since the user might down the interface, walk over to a completely + * different network, and plug back in. + */ +struct in_ifadown_arg { + struct radix_node_head *rnh; + struct ifaddr *ifa; +}; + +static int +in_ifadownkill(struct radix_node *rn, void *xap) +{ + struct in_ifadown_arg *ap = xap; + struct rtentry *rt = (struct rtentry *)rn; + int err; + + if (rt->rt_ifa == ap->ifa && !(rt->rt_flags & RTF_STATIC)) { + /* + * We need to disable the automatic prune that happens + * in this case in rtrequest() because it will blow + * away the pointers that rn_walktree() needs in order + * continue our descent. We will end up deleting all + * the routes that rtrequest() would have in any case, + * so that behavior is not needed there. + */ + rt->rt_flags &= ~RTF_PRCLONING; + err = rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), + rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); + if (err) { + log(LOG_WARNING, "in_ifadownkill: error %d\n", err); + } + } + return 0; +} + +int +in_ifadown(struct ifaddr *ifa) +{ + struct in_ifadown_arg arg; + struct radix_node_head *rnh; + + if (ifa->ifa_addr->sa_family != AF_INET) + return 1; + + arg.rnh = rnh = rt_tables[AF_INET]; + arg.ifa = ifa; + rnh->rnh_walktree(rnh, in_ifadownkill, &arg); + ifa->ifa_flags &= ~IFA_ROUTE; + return 0; +} diff --git a/bsd/netinet/in_systm.h b/bsd/netinet/in_systm.h new file mode 100644 index 000000000..ad9fea338 --- /dev/null +++ b/bsd/netinet/in_systm.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_systm.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_IN_SYSTM_H_ +#define _NETINET_IN_SYSTM_H_ + +/* + * Miscellaneous internetwork + * definitions for kernel. + */ + +/* + * Network types. + * + * Internally the system keeps counters in the headers with the bytes + * swapped so that VAX instructions will work on them. It reverses + * the bytes before transmission at each protocol level. The n_ types + * represent the types with the bytes in ``high-ender'' order. + */ +typedef u_int16_t n_short; /* short as received from the net */ +typedef u_int32_t n_long; /* long as received from the net */ + +typedef u_int32_t n_time; /* ms since 00:00 GMT, byte rev */ + +#ifdef KERNEL +n_time iptime __P((void)); +#endif + +#endif diff --git a/bsd/netinet/in_var.h b/bsd/netinet/in_var.h new file mode 100644 index 000000000..ad84f6e40 --- /dev/null +++ b/bsd/netinet/in_var.h @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1985, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_var.h 8.2 (Berkeley) 1/9/95 + */ + +#ifndef _NETINET_IN_VAR_H_ +#define _NETINET_IN_VAR_H_ + +#include +#include + +/* + * Interface address, Internet version. One of these structures + * is allocated for each Internet address on an interface. + * The ifaddr structure contains the protocol-independent part + * of the structure and is assumed to be first. + */ +struct in_ifaddr { + struct ifaddr ia_ifa; /* protocol-independent info */ +#define ia_ifp ia_ifa.ifa_ifp +#define ia_flags ia_ifa.ifa_flags + /* ia_{,sub}net{,mask} in host order */ + u_long ia_net; /* network number of interface */ + u_long ia_netmask; /* mask of net part */ + u_long ia_subnet; /* subnet number, including net */ + u_long ia_subnetmask; /* mask of subnet part */ + struct in_addr ia_netbroadcast; /* to recognize net broadcasts */ + TAILQ_ENTRY(in_ifaddr) ia_link; /* tailq macro glue */ + struct sockaddr_in ia_addr; /* reserve space for interface name */ + struct sockaddr_in ia_dstaddr; /* reserve space for broadcast addr */ +#define ia_broadaddr ia_dstaddr + struct sockaddr_in ia_sockmask; /* reserve space for general netmask */ +}; + +struct in_aliasreq { + char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct sockaddr_in ifra_addr; + struct sockaddr_in ifra_broadaddr; +#define ifra_dstaddr ifra_broadaddr + struct sockaddr_in ifra_mask; + u_long dlt; +}; + + +/* + * Event data, internet style. + */ + +struct kev_in_data { + struct net_event_data link_data; + struct in_addr ia_addr; + u_long ia_net; /* network number of interface */ + u_long ia_netmask; /* mask of net part */ + u_long ia_subnet; /* subnet number, including net */ + u_long ia_subnetmask; /* mask of subnet part */ + struct in_addr ia_netbroadcast; /* to recognize net broadcasts */ + struct in_addr ia_dstaddr; +}; + + + +/* + * Define inet event subclass and specific inet events. + */ + +#define KEV_INET_SUBCLASS 1 + +#define KEV_INET_NEW_ADDR 1 +#define KEV_INET_CHANGED_ADDR 2 +#define KEV_INET_ADDR_DELETED 3 +#define KEV_INET_SIFDSTADDR 4 +#define KEV_INET_SIFBRDADDR 5 +#define KEV_INET_SIFNETMASK 6 + +/* + * Given a pointer to an in_ifaddr (ifaddr), + * return a pointer to the addr as a sockaddr_in. + */ +#define IA_SIN(ia) (&(((struct in_ifaddr *)(ia))->ia_addr)) +#define IA_DSTSIN(ia) (&(((struct in_ifaddr *)(ia))->ia_dstaddr)) + +#define IN_LNAOF(in, ifa) \ + ((ntohl((in).s_addr) & ~((struct in_ifaddr *)(ifa)->ia_subnetmask)) + + +#ifdef KERNEL +extern TAILQ_HEAD(in_ifaddrhead, in_ifaddr) in_ifaddrhead; +extern struct ifqueue ipintrq; /* ip packet input queue */ +extern struct in_addr zeroin_addr; +extern u_char inetctlerrmap[]; + +/* + * Macro for finding the interface (ifnet structure) corresponding to one + * of our IP addresses. + */ +#define INADDR_TO_IFP(addr, ifp) \ + /* struct in_addr addr; */ \ + /* struct ifnet *ifp; */ \ +{ \ + register struct in_ifaddr *ia; \ +\ + for (ia = in_ifaddrhead.tqh_first; \ + ia != NULL && ((ia->ia_ifp->if_flags & IFF_POINTOPOINT)? \ + IA_DSTSIN(ia):IA_SIN(ia))->sin_addr.s_addr != (addr).s_addr; \ + ia = ia->ia_link.tqe_next) \ + continue; \ + if (ia == NULL) \ + for (ia = in_ifaddrhead.tqh_first; \ + ia != NULL; \ + ia = ia->ia_link.tqe_next) \ + if (ia->ia_ifp->if_flags & IFF_POINTOPOINT && \ + IA_SIN(ia)->sin_addr.s_addr == (addr).s_addr) \ + break; \ + (ifp) = (ia == NULL) ? NULL : ia->ia_ifp; \ +} + +/* + * Macro for finding the internet address structure (in_ifaddr) corresponding + * to a given interface (ifnet structure). + */ +#define IFP_TO_IA(ifp, ia) \ + /* struct ifnet *ifp; */ \ + /* struct in_ifaddr *ia; */ \ +{ \ + for ((ia) = in_ifaddrhead.tqh_first; \ + (ia) != NULL && (ia)->ia_ifp != (ifp); \ + (ia) = (ia)->ia_link.tqe_next) \ + continue; \ +} +#endif + +/* + * This information should be part of the ifnet structure but we don't wish + * to change that - as it might break a number of things + */ + +struct router_info { + struct ifnet *rti_ifp; + int rti_type; /* type of router which is querier on this interface */ + int rti_time; /* # of slow timeouts since last old query */ + struct router_info *rti_next; +}; + +/* + * Internet multicast address structure. There is one of these for each IP + * multicast group to which this host belongs on a given network interface. + * For every entry on the interface's if_multiaddrs list which represents + * an IP multicast group, there is one of these structures. They are also + * kept on a system-wide list to make it easier to keep our legacy IGMP code + * compatible with the rest of the world (see IN_FIRST_MULTI et al, below). + */ +struct in_multi { + LIST_ENTRY(in_multi) inm_link; /* queue macro glue */ + struct in_addr inm_addr; /* IP multicast address, convenience */ + struct ifnet *inm_ifp; /* back pointer to ifnet */ + struct ifmultiaddr *inm_ifma; /* back pointer to ifmultiaddr */ + u_int inm_timer; /* IGMP membership report timer */ + u_int inm_state; /* state of the membership */ + struct router_info *inm_rti; /* router info*/ +}; + +#ifdef KERNEL + +#ifdef SYSCTL_DECL +SYSCTL_DECL(_net_inet_ip); +SYSCTL_DECL(_net_inet_raw); +#endif + +extern LIST_HEAD(in_multihead, in_multi) in_multihead; + +/* + * Structure used by macros below to remember position when stepping through + * all of the in_multi records. + */ +struct in_multistep { + struct in_multi *i_inm; +}; + +/* + * Macro for looking up the in_multi record for a given IP multicast address + * on a given interface. If no matching record is found, "inm" is set null. + */ +#define IN_LOOKUP_MULTI(addr, ifp, inm) \ + /* struct in_addr addr; */ \ + /* struct ifnet *ifp; */ \ + /* struct in_multi *inm; */ \ +do { \ + register struct ifmultiaddr *ifma; \ +\ + for (ifma = (ifp)->if_multiaddrs.lh_first; ifma; \ + ifma = ifma->ifma_link.le_next) { \ + if (ifma->ifma_addr->sa_family == AF_INET \ + && ((struct sockaddr_in *)ifma->ifma_addr)->sin_addr.s_addr == \ + (addr).s_addr) \ + break; \ + } \ + (inm) = ifma ? ifma->ifma_protospec : 0; \ +} while(0) + +/* + * Macro to step through all of the in_multi records, one at a time. + * The current position is remembered in "step", which the caller must + * provide. IN_FIRST_MULTI(), below, must be called to initialize "step" + * and get the first record. Both macros return a NULL "inm" when there + * are no remaining records. + */ +#define IN_NEXT_MULTI(step, inm) \ + /* struct in_multistep step; */ \ + /* struct in_multi *inm; */ \ +do { \ + if (((inm) = (step).i_inm) != NULL) \ + (step).i_inm = (step).i_inm->inm_link.le_next; \ +} while(0) + +#define IN_FIRST_MULTI(step, inm) \ + /* struct in_multistep step; */ \ + /* struct in_multi *inm; */ \ +do { \ + (step).i_inm = in_multihead.lh_first; \ + IN_NEXT_MULTI((step), (inm)); \ +} while(0) + +struct route; +struct in_multi *in_addmulti __P((struct in_addr *, struct ifnet *)); +void in_delmulti __P((struct in_multi *)); +int in_control __P((struct socket *, u_long, caddr_t, struct ifnet *, + struct proc *)); +void in_rtqdrain __P((void)); +void ip_input __P((struct mbuf *)); +int in_ifadown __P((struct ifaddr *ifa)); +void in_ifscrub __P((struct ifnet *, struct in_ifaddr *)); +int ipflow_fastforward __P((struct mbuf *)); +void ipflow_create __P((const struct route *, struct mbuf *)); +void ipflow_slowtimo __P((void)); + +#endif /* KERNEL */ +#if INET6 +/* INET6 stuff */ +#include +#endif + +#endif /* _NETINET_IN_VAR_H_ */ diff --git a/bsd/netinet/ip.h b/bsd/netinet/ip.h new file mode 100644 index 000000000..bf81e1aa5 --- /dev/null +++ b/bsd/netinet/ip.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip.h 8.2 (Berkeley) 6/1/94 + */ + +#ifndef _NETINET_IP_H_ +#define _NETINET_IP_H_ + +/* + * Definitions for internet protocol version 4. + * Per RFC 791, September 1981. + */ +#define IPVERSION 4 + +/* + * Structure of an internet header, naked of options. + */ +struct ip { +#ifdef _IP_VHL + u_char ip_vhl; /* version << 4 | header length >> 2 */ +#else +#if BYTE_ORDER == LITTLE_ENDIAN + u_int ip_hl:4, /* header length */ + ip_v:4; /* version */ +#endif +#if BYTE_ORDER == BIG_ENDIAN + u_int ip_v:4, /* version */ + ip_hl:4; /* header length */ +#endif +#endif /* not _IP_VHL */ + u_char ip_tos; /* type of service */ + u_short ip_len; /* total length */ + u_short ip_id; /* identification */ + u_short ip_off; /* fragment offset field */ +#define IP_RF 0x8000 /* reserved fragment flag */ +#define IP_DF 0x4000 /* dont fragment flag */ +#define IP_MF 0x2000 /* more fragments flag */ +#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ + u_char ip_ttl; /* time to live */ + u_char ip_p; /* protocol */ + u_short ip_sum; /* checksum */ + struct in_addr ip_src,ip_dst; /* source and dest address */ +}; + +#ifdef _IP_VHL +#define IP_MAKE_VHL(v, hl) ((v) << 4 | (hl)) +#define IP_VHL_HL(vhl) ((vhl) & 0x0f) +#define IP_VHL_V(vhl) ((vhl) >> 4) +#define IP_VHL_BORING 0x45 +#endif + +#define IP_MAXPACKET 65535 /* maximum packet size */ + +/* + * Definitions for IP type of service (ip_tos) + */ +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_MINCOST 0x02 +#if 1 /* ALTQ_ECN */ +/* ECN bits proposed by Sally Floyd */ +#define IPTOS_CE 0x01 /* congestion experienced */ +#define IPTOS_ECT 0x02 /* ECN-capable transport */ +#endif + +/* + * Definitions for IP precedence (also in ip_tos) (hopefully unused) + */ +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 + +/* + * Definitions for options. + */ +#define IPOPT_COPIED(o) ((o)&0x80) +#define IPOPT_CLASS(o) ((o)&0x60) +#define IPOPT_NUMBER(o) ((o)&0x1f) + +#define IPOPT_CONTROL 0x00 +#define IPOPT_RESERVED1 0x20 +#define IPOPT_DEBMEAS 0x40 +#define IPOPT_RESERVED2 0x60 + +#define IPOPT_EOL 0 /* end of option list */ +#define IPOPT_NOP 1 /* no operation */ + +#define IPOPT_RR 7 /* record packet route */ +#define IPOPT_TS 68 /* timestamp */ +#define IPOPT_SECURITY 130 /* provide s,c,h,tcc */ +#define IPOPT_LSRR 131 /* loose source route */ +#define IPOPT_SATID 136 /* satnet id */ +#define IPOPT_SSRR 137 /* strict source route */ +#define IPOPT_RA 148 /* router alert */ + +/* + * Offsets to fields in options other than EOL and NOP. + */ +#define IPOPT_OPTVAL 0 /* option ID */ +#define IPOPT_OLEN 1 /* option length */ +#define IPOPT_OFFSET 2 /* offset within option */ +#define IPOPT_MINOFF 4 /* min value of above */ + +/* + * Time stamp option structure. + */ +struct ip_timestamp { + u_char ipt_code; /* IPOPT_TS */ + u_char ipt_len; /* size of structure (variable) */ + u_char ipt_ptr; /* index of current entry */ +#if BYTE_ORDER == LITTLE_ENDIAN + u_int ipt_flg:4, /* flags, see below */ + ipt_oflw:4; /* overflow counter */ +#endif +#if BYTE_ORDER == BIG_ENDIAN + u_int ipt_oflw:4, /* overflow counter */ + ipt_flg:4; /* flags, see below */ +#endif + union ipt_timestamp { + n_long ipt_time[1]; + struct ipt_ta { + struct in_addr ipt_addr; + n_long ipt_time; + } ipt_ta[1]; + } ipt_timestamp; +}; + +/* flag bits for ipt_flg */ +#define IPOPT_TS_TSONLY 0 /* timestamps only */ +#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */ +#define IPOPT_TS_PRESPEC 3 /* specified modules only */ + +/* bits for security (not byte swapped) */ +#define IPOPT_SECUR_UNCLASS 0x0000 +#define IPOPT_SECUR_CONFID 0xf135 +#define IPOPT_SECUR_EFTO 0x789a +#define IPOPT_SECUR_MMMM 0xbc4d +#define IPOPT_SECUR_RESTR 0xaf13 +#define IPOPT_SECUR_SECRET 0xd788 +#define IPOPT_SECUR_TOPSECRET 0x6bc5 + +/* + * Internet implementation parameters. + */ +#define MAXTTL 255 /* maximum time to live (seconds) */ +#define IPDEFTTL 64 /* default ttl, from RFC 1340 */ +#define IPFRAGTTL 60 /* time to live for frags, slowhz */ +#define IPTTLDEC 1 /* subtracted when forwarding */ + +#define IP_MSS 576 /* default maximum segment size */ + +#endif diff --git a/bsd/netinet/ip6.h b/bsd/netinet/ip6.h new file mode 100644 index 000000000..88508d6f9 --- /dev/null +++ b/bsd/netinet/ip6.h @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: ip6.h,v 1.6 2000/02/26 12:53:07 jinmei Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_IP6_H_ +#define _NETINET_IP6_H_ + +/* + * Definition for internet protocol version 6. + * RFC 2460 + */ + +struct ip6_hdr { + union { + struct ip6_hdrctl { + u_int32_t ip6_un1_flow; /* 20 bits of flow-ID */ + u_int16_t ip6_un1_plen; /* payload length */ + u_int8_t ip6_un1_nxt; /* next header */ + u_int8_t ip6_un1_hlim; /* hop limit */ + } ip6_un1; + u_int8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */ + } ip6_ctlun; + struct in6_addr ip6_src; /* source address */ + struct in6_addr ip6_dst; /* destination address */ +}; + +#define ip6_vfc ip6_ctlun.ip6_un2_vfc +#define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow +#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen +#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt +#define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim +#define ip6_hops ip6_ctlun.ip6_un1.ip6_un1_hlim + +#define IPV6_VERSION 0x60 +#define IPV6_VERSION_MASK 0xf0 + +#if BYTE_ORDER == BIG_ENDIAN +#define IPV6_FLOWINFO_MASK 0x0fffffff /* flow info (28 bits) */ +#define IPV6_FLOWLABEL_MASK 0x000fffff /* flow label (20 bits) */ +#else +#if BYTE_ORDER == LITTLE_ENDIAN +#define IPV6_FLOWINFO_MASK 0xffffff0f /* flow info (28 bits) */ +#define IPV6_FLOWLABEL_MASK 0xffff0f00 /* flow label (20 bits) */ +#endif /* LITTLE_ENDIAN */ +#endif +#if 1 +/* ECN bits proposed by Sally Floyd */ +#define IP6TOS_CE 0x01 /* congestion experienced */ +#define IP6TOS_ECT 0x02 /* ECN-capable transport */ +#endif + +/* + * Extension Headers + */ + +struct ip6_ext { + u_char ip6e_nxt; + u_char ip6e_len; +}; + +/* Hop-by-Hop options header */ +/* XXX should we pad it to force alignment on an 8-byte boundary? */ +struct ip6_hbh { + u_int8_t ip6h_nxt; /* next header */ + u_int8_t ip6h_len; /* length in units of 8 octets */ + /* followed by options */ +}; + +/* Destination options header */ +/* XXX should we pad it to force alignment on an 8-byte boundary? */ +struct ip6_dest { + u_int8_t ip6d_nxt; /* next header */ + u_int8_t ip6d_len; /* length in units of 8 octets */ + /* followed by options */ +}; + +/* Option types and related macros */ +#define IP6OPT_PAD1 0x00 /* 00 0 00000 */ +#define IP6OPT_PADN 0x01 /* 00 0 00001 */ +#define IP6OPT_JUMBO 0xC2 /* 11 0 00010 = 194 */ +#define IP6OPT_NSAP_ADDR 0xC3 /* 11 0 00011 */ +#define IP6OPT_TUNNEL_LIMIT 0x04 /* 00 0 00100 */ +#define IP6OPT_RTALERT 0x05 /* 00 0 00101 (KAME definition) */ +#define IP6OPT_ROUTER_ALERT 0x05 /* (2292bis def, recommended) */ + +#define IP6OPT_RTALERT_LEN 4 +#define IP6OPT_RTALERT_MLD 0 /* Datagram contains an MLD message */ +#define IP6OPT_RTALERT_RSVP 1 /* Datagram contains an RSVP message */ +#define IP6OPT_RTALERT_ACTNET 2 /* contains an Active Networks msg */ +#define IP6OPT_MINLEN 2 + +#define IP6OPT_BINDING_UPDATE 0xc6 /* 11 0 00110 */ +#define IP6OPT_BINDING_ACK 0x07 /* 00 0 00111 */ +#define IP6OPT_BINDING_REQ 0x08 /* 00 0 01000 */ +#define IP6OPT_HOME_ADDRESS 0xc9 /* 11 0 01001 */ +#define IP6OPT_EID 0x8a /* 10 0 01010 */ + +#define IP6OPT_TYPE(o) ((o) & 0xC0) +#define IP6OPT_TYPE_SKIP 0x00 +#define IP6OPT_TYPE_DISCARD 0x40 +#define IP6OPT_TYPE_FORCEICMP 0x80 +#define IP6OPT_TYPE_ICMP 0xC0 + +#define IP6OPT_MUTABLE 0x20 + +/* IPv6 options: common part */ +struct ip6_opt { + u_int8_t ip6o_type; + u_int8_t ip6o_len; +}; + +/* Jumbo Payload Option */ +struct ip6_opt_jumbo { + u_int8_t ip6oj_type; + u_int8_t ip6oj_len; + u_int8_t ip6oj_jumbo_len[4]; +}; +#define IP6OPT_JUMBO_LEN 6 + +/* NSAP Address Option */ +struct ip6_opt_nsap { + u_int8_t ip6on_type; + u_int8_t ip6on_len; + u_int8_t ip6on_src_nsap_len; + u_int8_t ip6on_dst_nsap_len; + /* followed by source NSAP */ + /* followed by destination NSAP */ +}; + +/* Tunnel Limit Option */ +struct ip6_opt_tunnel { + u_int8_t ip6ot_type; + u_int8_t ip6ot_len; + u_int8_t ip6ot_encap_limit; +}; + +/* Router Alert Option */ +struct ip6_opt_router { + u_int8_t ip6or_type; + u_int8_t ip6or_len; + u_int8_t ip6or_value[2]; +}; +/* Router alert values (in network byte order) */ +#if BYTE_ORDER == BIG_ENDIAN +#define IP6_ALERT_MLD 0x0000 +#define IP6_ALERT_RSVP 0x0001 +#define IP6_ALERT_AN 0x0002 +#else +#if BYTE_ORDER == LITTLE_ENDIAN +#define IP6_ALERT_MLD 0x0000 +#define IP6_ALERT_RSVP 0x0100 +#define IP6_ALERT_AN 0x0200 +#endif /* LITTLE_ENDIAN */ +#endif + +/* Binding Update Option */ +struct ip6_opt_binding_update { + u_int8_t ip6ou_type; + u_int8_t ip6ou_len; + u_int8_t ip6ou_flags; + u_int8_t ip6ou_prefixlen; + u_int8_t ip6ou_seqno[2]; + u_int8_t ip6ou_lifetime[4]; + u_int8_t ip6ou_coa[16];/* Optional based on flags */ + /* followed by sub-options */ +}; + +/* Binding Update Flags */ +#define IP6_BUF_ACK 0x80 /* Request a binding ack */ +#define IP6_BUF_HOME 0x40 /* Home Registration */ +#define IP6_BUF_COA 0x20 /* Care-of-address present in option */ +#define IP6_BUF_ROUTER 0x10 /* Sending mobile node is a router */ + +/* Binding Ack Option */ +struct ip6_opt_binding_ack { + u_int8_t ip6oa_type; + u_int8_t ip6oa_len; + u_int8_t ip6oa_status; + u_int8_t ip6oa_seqno[2]; + u_int8_t ip6oa_lifetime[4]; + u_int8_t ip6oa_refresh[4]; + /* followed by sub-options */ +}; + +/* Binding Request Option */ +struct ip6_opt_binding_request { + u_int8_t ip6or_type; + u_int8_t ip6or_len; + /* followed by sub-options */ +}; + +/* Home Address Option */ +struct ip6_opt_home_address { + u_int8_t ip6oh_type; + u_int8_t ip6oh_len; + u_int8_t ip6oh_addr[16];/* Home Address */ + /* followed by sub-options */ +}; + +/* Routing header */ +struct ip6_rthdr { + u_int8_t ip6r_nxt; /* next header */ + u_int8_t ip6r_len; /* length in units of 8 octets */ + u_int8_t ip6r_type; /* routing type */ + u_int8_t ip6r_segleft; /* segments left */ + /* followed by routing type specific data */ +}; + +/* Type 0 Routing header */ +struct ip6_rthdr0 { + u_int8_t ip6r0_nxt; /* next header */ + u_int8_t ip6r0_len; /* length in units of 8 octets */ + u_int8_t ip6r0_type; /* always zero */ + u_int8_t ip6r0_segleft; /* segments left */ + u_int32_t ip6r0_reserved; /* reserved field */ + /* followed by up to 127 struct in6_addr */ + +#ifdef COMPAT_RFC2292 + struct in6_addr ip6r0_addr[1]; /* up to 127 addresses */ +#endif +}; + +/* Fragment header */ +struct ip6_frag { + u_int8_t ip6f_nxt; /* next header */ + u_int8_t ip6f_reserved; /* reserved field */ + u_int16_t ip6f_offlg; /* offset, reserved, and flag */ + u_int32_t ip6f_ident; /* identification */ +}; + +#if BYTE_ORDER == BIG_ENDIAN +#define IP6F_OFF_MASK 0xfff8 /* mask out offset from _offlg */ +#define IP6F_RESERVED_MASK 0x0006 /* reserved bits in ip6f_offlg */ +#define IP6F_MORE_FRAG 0x0001 /* more-fragments flag */ +#else /* BYTE_ORDER == LITTLE_ENDIAN */ +#define IP6F_OFF_MASK 0xf8ff /* mask out offset from _offlg */ +#define IP6F_RESERVED_MASK 0x0600 /* reserved bits in ip6f_offlg */ +#define IP6F_MORE_FRAG 0x0100 /* more-fragments flag */ +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +/* + * Internet implementation parameters. + */ +#define IPV6_MAXHLIM 255 /* maximun hoplimit */ +#define IPV6_DEFHLIM 64 /* default hlim */ +#define IPV6_FRAGTTL 120 /* ttl for fragment packets, in slowtimo tick */ +#define IPV6_HLIMDEC 1 /* subtracted when forwaeding */ + +#define IPV6_MMTU 1280 /* minimal MTU and reassembly. 1024 + 256 */ +#define IPV6_MAXPACKET 65535 /* ip6 max packet size without Jumbo payload*/ + +#ifdef KERNEL +/* + * IP6_EXTHDR_CHECK ensures that region between the IP6 header and the + * target header (including IPv6 itself, extension headers and + * TCP/UDP/ICMP6 headers) are continuous. KAME requires drivers + * to store incoming data into one internal mbuf or one or more external + * mbufs(never into two or more internal mbufs). Thus, the third case is + * supposed to never be matched but is prepared just in case. + */ + +#define IP6_EXTHDR_CHECK(m, off, hlen, ret) \ +do { \ + if ((m)->m_next != NULL) { \ + if (((m)->m_flags & M_LOOP) && \ + ((m)->m_len < (off) + (hlen)) && \ + (((m) = m_pullup((m), (off) + (hlen))) == NULL)) { \ + ip6stat.ip6s_exthdrtoolong++; \ + return ret; \ + } else if ((m)->m_flags & M_EXT) { \ + if ((m)->m_len < (off) + (hlen)) { \ + ip6stat.ip6s_exthdrtoolong++; \ + m_freem(m); \ + return ret; \ + } \ + } else { \ + if ((m)->m_len < (off) + (hlen)) { \ + ip6stat.ip6s_exthdrtoolong++; \ + m_freem(m); \ + return ret; \ + } \ + } \ + } \ + else { \ + if ((m)->m_len < (off) + (hlen)) { \ + ip6stat.ip6s_tooshort++; \ + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); \ + m_freem(m); \ + return ret; \ + } \ + } \ +} while (0) + +/* + * IP6_EXTHDR_GET ensures that intermediate protocol header (from "off" to + * "len") is located in single mbuf, on contiguous memory region. + * The pointer to the region will be returned to pointer variable "val", + * with type "typ". + * IP6_EXTHDR_GET0 does the same, except that it aligns the structure at the + * very top of mbuf. GET0 is likely to make memory copy than GET. + * + * XXX we're now testing this, needs m_pulldown() + */ +#define IP6_EXTHDR_GET(val, typ, m, off, len) \ +do { \ + struct mbuf *t; \ + int tmp; \ + ip6stat.ip6s_exthdrget++; \ + if ((m)->m_len >= (off) + (len)) \ + (val) = (typ)(mtod((m), caddr_t) + (off)); \ + else { \ + t = m_pulldown((m), (off), (len), &tmp); \ + if (t) { \ + if (t->m_len < tmp + (len)) \ + panic("m_pulldown malfunction"); \ + (val) = (typ)(mtod(t, caddr_t) + tmp); \ + } else { \ + (val) = (typ)NULL; \ + (m) = NULL; \ + } \ + } \ +} while (0) + +#define IP6_EXTHDR_GET0(val, typ, m, off, len) \ +do { \ + struct mbuf *t; \ + ip6stat.ip6s_exthdrget0++; \ + if ((off) == 0) \ + (val) = (typ)mtod(m, caddr_t); \ + else { \ + t = m_pulldown((m), (off), (len), NULL); \ + if (t) { \ + if (t->m_len < (len)) \ + panic("m_pulldown malfunction"); \ + (val) = (typ)mtod(t, caddr_t); \ + } else { \ + (val) = (typ)NULL; \ + (m) = NULL; \ + } \ + } \ +} while (0) +#endif /*KERNEL*/ + +#endif /* not _NETINET_IP6_H_ */ diff --git a/bsd/netinet/ip_auth.c b/bsd/netinet/ip_auth.c new file mode 100644 index 000000000..051008eea --- /dev/null +++ b/bsd/netinet/ip_auth.c @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1997 by Darren Reed & Guido van Rooij. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + */ + +#define __FreeBSD_version 300000 /* just a hack - no */ + +#if !defined(KERNEL) +# include +# include +#endif +#include +#include +#include +#include +#include +#if defined(KERNEL) && (__FreeBSD_version >= 220000) +# include +# include +#else +# include +#endif +#include +#ifndef linux +# include +#endif +#include +#if defined(KERNEL) +# include +#endif +#if !defined(__SVR4) && !defined(__svr4__) +# ifndef linux +# include +# endif +#else +# include +# include +# include +# include +# include +#endif +#if defined(KERNEL) && (__FreeBSD_version >= 300000) +# include +#endif +#if defined(__NetBSD__) || defined(__OpenBSD__) || defined(bsdi) +# include +#endif +#include +#ifdef sun +#include +#endif +#if !defined(KERNEL) && (__FreeBSD_version >= 300000) +# include +#endif +#include +#include +#include +#include +#ifndef KERNEL +#define KERNEL +#define NOT_KERNEL +#endif +#ifndef linux +# include +#endif +#ifdef NOT_KERNEL +#undef KERNEL +#endif +#ifdef __sgi +# ifdef IFF_DRVRLOCK /* IRIX6 */ +#include +# endif +#endif +#include +#if defined(__sgi) && !defined(IFF_DRVRLOCK) /* IRIX < 6 */ +extern struct ifqueue ipintrq; /* ip packet input queue */ +#else +# ifndef linux +# include +# include +# endif +#endif +#include +#include +#include "netinet/ip_compat.h" +#include +#include "netinet/ip_fil.h" +#include "netinet/ip_auth.h" +#if !SOLARIS && !defined(linux) +# include +# ifdef __FreeBSD__ +# include +# endif +#endif + + +#if (SOLARIS || defined(__sgi)) && defined(_KERNEL) +extern kmutex_t ipf_auth; +# if SOLARIS +extern kcondvar_t ipfauthwait; +# endif +#endif +#ifdef linux +static struct wait_queue *ipfauthwait = NULL; +#endif + +int fr_authsize = FR_NUMAUTH; +int fr_authused = 0; +int fr_defaultauthage = 600; +fr_authstat_t fr_authstats; +static frauth_t fr_auth[FR_NUMAUTH]; +mb_t *fr_authpkts[FR_NUMAUTH]; +static int fr_authstart = 0, fr_authend = 0, fr_authnext = 0; +static frauthent_t *fae_list = NULL; +frentry_t *ipauth = NULL; + + +/* + * Check if a packet has authorization. If the packet is found to match an + * authorization result and that would result in a feedback loop (i.e. it + * will end up returning FR_AUTH) then return FR_BLOCK instead. + */ +int fr_checkauth(ip, fin) +ip_t *ip; +fr_info_t *fin; +{ + u_short id = ip->ip_id; + u_32_t pass; + int i; + + MUTEX_ENTER(&ipf_auth); + for (i = fr_authstart; i != fr_authend; ) { + /* + * index becomes -2 only after an SIOCAUTHW. Check this in + * case the same packet gets sent again and it hasn't yet been + * auth'd. + */ + if ((fr_auth[i].fra_index == -2) && + (id == fr_auth[i].fra_info.fin_id) && + !bcmp((char *)fin,(char *)&fr_auth[i].fra_info,FI_CSIZE)) { + /* + * Avoid feedback loop. + */ + if (!(pass = fr_auth[i].fra_pass) || (pass & FR_AUTH)) + pass = FR_BLOCK; + fr_authstats.fas_hits++; + fr_auth[i].fra_index = -1; + fr_authused--; + if (i == fr_authstart) { + while (fr_auth[i].fra_index == -1) { + i++; + if (i == FR_NUMAUTH) + i = 0; + fr_authstart = i; + if (i == fr_authend) + break; + } + if (fr_authstart == fr_authend) { + fr_authnext = 0; + fr_authstart = fr_authend = 0; + } + } + MUTEX_EXIT(&ipf_auth); + return pass; + } + i++; + if (i == FR_NUMAUTH) + i = 0; + } + fr_authstats.fas_miss++; + MUTEX_EXIT(&ipf_auth); + return 0; +} + + +/* + * Check if we have room in the auth array to hold details for another packet. + * If we do, store it and wake up any user programs which are waiting to + * hear about these events. + */ +int fr_newauth(m, fin, ip +#if defined(_KERNEL) && SOLARIS +, qif) +qif_t *qif; +#else +) +#endif +mb_t *m; +fr_info_t *fin; +ip_t *ip; +{ + int i; + + MUTEX_ENTER(&ipf_auth); + if ((fr_authstart > fr_authend) && (fr_authstart - fr_authend == -1)) { + fr_authstats.fas_nospace++; + MUTEX_EXIT(&ipf_auth); + return 0; + } + if (fr_authend - fr_authstart == FR_NUMAUTH - 1) { + fr_authstats.fas_nospace++; + MUTEX_EXIT(&ipf_auth); + return 0; + } + + fr_authstats.fas_added++; + fr_authused++; + i = fr_authend++; + if (fr_authend == FR_NUMAUTH) + fr_authend = 0; + MUTEX_EXIT(&ipf_auth); + fr_auth[i].fra_index = i; + fr_auth[i].fra_pass = 0; + fr_auth[i].fra_age = fr_defaultauthage; + bcopy((char *)fin, (char *)&fr_auth[i].fra_info, sizeof(*fin)); +#if !defined(sparc) && !defined(m68k) + /* + * No need to copyback here as we want to undo the changes, not keep + * them. + */ +# if SOLARIS && defined(KERNEL) + if (ip == (ip_t *)m->b_rptr) +# endif + { + register u_short bo; + + bo = ip->ip_len; + ip->ip_len = htons(bo); +# if !SOLARIS /* 4.4BSD converts this ip_input.c, but I don't in solaris.c */ + bo = ip->ip_id; + ip->ip_id = htons(bo); +# endif + bo = ip->ip_off; + ip->ip_off = htons(bo); + } +#endif +#if SOLARIS && defined(KERNEL) + m->b_rptr -= qif->qf_off; + fr_authpkts[i] = *(mblk_t **)fin->fin_mp; + fr_auth[i].fra_q = qif->qf_q; + cv_signal(&ipfauthwait); +#else + fr_authpkts[i] = m; +# if defined(linux) && defined(KERNEL) + wake_up_interruptible(&ipfauthwait); +# else + WAKEUP(&fr_authnext); +# endif +#endif + return 1; +} + + +int fr_auth_ioctl(data, cmd, fr, frptr) +caddr_t data; +#if defined(__NetBSD__) || defined(__OpenBSD__) || (__FreeBSD_version >= 300003) +u_long cmd; +#else +int cmd; +#endif +frentry_t *fr, **frptr; +{ + mb_t *m; +#if defined(KERNEL) +# if !SOLARIS + struct ifqueue *ifq; + int s; +# endif +#endif + frauth_t auth, *au = &auth; + frauthent_t *fae, **faep; + int i, error = 0; + + switch (cmd) + { + case SIOCINIFR : + case SIOCRMIFR : + case SIOCADIFR : + error = EINVAL; + break; + case SIOCINAFR : + case SIOCRMAFR : + case SIOCADAFR : + for (faep = &fae_list; (fae = *faep); ) + if (&fae->fae_fr == fr) + break; + else + faep = &fae->fae_next; + if (cmd == SIOCRMAFR) { + if (!fae) + error = ESRCH; + else { + *faep = fae->fae_next; + *frptr = fr->fr_next; + KFREE(fae); + } + } else { + KMALLOC(fae, frauthent_t *, sizeof(*fae)); + if (fae != NULL) { + IRCOPY((char *)data, (char *)&fae->fae_fr, + sizeof(fae->fae_fr)); + if (!fae->fae_age) + fae->fae_age = fr_defaultauthage; + fae->fae_fr.fr_hits = 0; + fae->fae_fr.fr_next = *frptr; + *frptr = &fae->fae_fr; + fae->fae_next = *faep; + *faep = fae; + } else + error = ENOMEM; + } + break; + case SIOCATHST: + IWCOPY((char *)&fr_authstats, data, sizeof(fr_authstats)); + break; + case SIOCAUTHW: +fr_authioctlloop: + MUTEX_ENTER(&ipf_auth); + if ((fr_authnext != fr_authend) && fr_authpkts[fr_authnext]) { + IWCOPY((char *)&fr_auth[fr_authnext++], data, + sizeof(fr_info_t)); + if (fr_authnext == FR_NUMAUTH) + fr_authnext = 0; + MUTEX_EXIT(&ipf_auth); + return 0; + } +#ifdef KERNEL +# if SOLARIS + if (!cv_wait_sig(&ipfauthwait, &ipf_auth)) { + mutex_exit(&ipf_auth); + return EINTR; + } +# else +# ifdef linux + interruptible_sleep_on(&ipfauthwait); + if (current->signal & ~current->blocked) + error = -EINTR; +# else + error = SLEEP(&fr_authnext, "fr_authnext"); +# endif +# endif +#endif + MUTEX_EXIT(&ipf_auth); + if (!error) + goto fr_authioctlloop; + break; + case SIOCAUTHR: + IRCOPY(data, (caddr_t)&auth, sizeof(auth)); + MUTEX_ENTER(&ipf_auth); + i = au->fra_index; + if ((i < 0) || (i > FR_NUMAUTH) || + (fr_auth[i].fra_info.fin_id != au->fra_info.fin_id)) { + MUTEX_EXIT(&ipf_auth); + return EINVAL; + } + m = fr_authpkts[i]; + fr_auth[i].fra_index = -2; + fr_auth[i].fra_pass = au->fra_pass; + fr_authpkts[i] = NULL; +#ifdef KERNEL + MUTEX_EXIT(&ipf_auth); + SPL_NET(s); +# ifndef linux + if (m && au->fra_info.fin_out) { +# if SOLARIS + error = fr_qout(fr_auth[i].fra_q, m); +# else /* SOLARIS */ +#if IPSEC + m->m_pkthdr.rcvif = NULL; +#endif /*IPSEC*/ + + error = ip_output(m, NULL, NULL, IP_FORWARDING, NULL); +# endif /* SOLARIS */ + if (error) + fr_authstats.fas_sendfail++; + else + fr_authstats.fas_sendok++; + } else if (m) { +# if SOLARIS + error = fr_qin(fr_auth[i].fra_q, m); +# else /* SOLARIS */ + ifq = &ipintrq; + if (IF_QFULL(ifq)) { + IF_DROP(ifq); + m_freem(m); + error = ENOBUFS; + } else { + IF_ENQUEUE(ifq, m); + schednetisr(NETISR_IP); + } +# endif /* SOLARIS */ + if (error) + fr_authstats.fas_quefail++; + else + fr_authstats.fas_queok++; + } else + error = EINVAL; +# endif +# if SOLARIS + if (error) + error = EINVAL; +# else + /* + * If we experience an error which will result in the packet + * not being processed, make sure we advance to the next one. + */ + if (error == ENOBUFS) { + fr_authused--; + fr_auth[i].fra_index = -1; + fr_auth[i].fra_pass = 0; + if (i == fr_authstart) { + while (fr_auth[i].fra_index == -1) { + i++; + if (i == FR_NUMAUTH) + i = 0; + fr_authstart = i; + if (i == fr_authend) + break; + } + if (fr_authstart == fr_authend) { + fr_authnext = 0; + fr_authstart = fr_authend = 0; + } + } + } +# endif + SPL_X(s); +#endif /* KERNEL */ + break; + default : + error = EINVAL; + break; + } + return error; +} + + +#ifdef KERNEL +/* + * Free all network buffer memory used to keep saved packets. + */ +void fr_authunload() +{ + register int i; + register frauthent_t *fae, **faep; + mb_t *m; + + MUTEX_ENTER(&ipf_auth); + for (i = 0; i < FR_NUMAUTH; i++) { + if ((m = fr_authpkts[i])) { + FREE_MB_T(m); + fr_authpkts[i] = NULL; + fr_auth[i].fra_index = -1; + } + } + + + for (faep = &fae_list; (fae = *faep); ) { + *faep = fae->fae_next; + KFREE(fae); + } + MUTEX_EXIT(&ipf_auth); +} + + +/* + * Slowly expire held auth records. Timeouts are set + * in expectation of this being called twice per second. + */ +void fr_authexpire() +{ + register int i; + register frauth_t *fra; + register frauthent_t *fae, **faep; + mb_t *m; +#if !SOLARIS + int s; +#endif + + SPL_NET(s); + MUTEX_ENTER(&ipf_auth); + for (i = 0, fra = fr_auth; i < FR_NUMAUTH; i++, fra++) { + if ((!--fra->fra_age) && (m = fr_authpkts[i])) { + FREE_MB_T(m); + fr_authpkts[i] = NULL; + fr_auth[i].fra_index = -1; + fr_authstats.fas_expire++; + fr_authused--; + } + } + + for (faep = &fae_list; (fae = *faep); ) { + if (!--fra->fra_age) { + *faep = fae->fae_next; + KFREE(fae); + fr_authstats.fas_expire++; + } else + faep = &fae->fae_next; + } + MUTEX_EXIT(&ipf_auth); + SPL_X(s); +} +#endif diff --git a/bsd/netinet/ip_auth.h b/bsd/netinet/ip_auth.h new file mode 100644 index 000000000..0ea1e5c51 --- /dev/null +++ b/bsd/netinet/ip_auth.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1997 by Darren Reed & Guido Van Rooij. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + */ +#ifndef __IP_AUTH_H__ +#define __IP_AUTH_H__ + +#define FR_NUMAUTH 32 + +typedef struct fr_authstat { + U_QUAD_T fas_hits; + U_QUAD_T fas_miss; + u_long fas_nospace; + u_long fas_added; + u_long fas_sendfail; + u_long fas_sendok; + u_long fas_queok; + u_long fas_quefail; + u_long fas_expire; +} fr_authstat_t; + +typedef struct frauth { + int fra_age; + int fra_index; + u_32_t fra_pass; + fr_info_t fra_info; +#if SOLARIS + queue_t *fra_q; +#endif +} frauth_t; + +typedef struct frauthent { + struct frentry fae_fr; + struct frauthent *fae_next; + u_long fae_age; +} frauthent_t; + + +extern frentry_t *ipauth; +extern struct fr_authstat fr_authstats; +extern int fr_defaultauthage; +extern int fr_authstart; +extern int fr_authend; +extern int fr_authsize; +extern int fr_authused; +extern int fr_checkauth __P((ip_t *, fr_info_t *)); +extern void fr_authexpire __P((void)); +extern void fr_authunload __P((void)); +extern mb_t *fr_authpkts[]; +#if defined(_KERNEL) && SOLARIS +extern int fr_newauth __P((mb_t *, fr_info_t *, ip_t *, qif_t *)); +#else +extern int fr_newauth __P((mb_t *, fr_info_t *, ip_t *)); +#endif +#if defined(__NetBSD__) || defined(__OpenBSD__) || (__FreeBSD_version >= 300003) +extern int fr_auth_ioctl __P((caddr_t, u_long, frentry_t *, frentry_t **)); +#else +extern int fr_auth_ioctl __P((caddr_t, int, frentry_t *, frentry_t **)); +#endif +#endif /* __IP_AUTH_H__ */ diff --git a/bsd/netinet/ip_compat.h b/bsd/netinet/ip_compat.h new file mode 100644 index 000000000..115773c62 --- /dev/null +++ b/bsd/netinet/ip_compat.h @@ -0,0 +1,750 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1993-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + * @(#)ip_compat.h 1.8 1/14/96 + */ + +#if 0 + +#ifndef __IP_COMPAT_H__ +#define __IP_COMPAT_H__ + +#ifndef __P +# ifdef __STDC__ +# define __P(x) x +# else +# define __P(x) () +# define const +# endif +#endif + +#ifndef SOLARIS +#define SOLARIS (defined(sun) && (defined(__svr4__) || defined(__SVR4))) +#endif + +#if defined(_KERNEL) && !defined(KERNEL) +# define KERNEL +#endif +#if defined(KERNEL) && !defined(_KERNEL) +# define _KERNEL +#endif +#if!defined(__KERNEL__) && defined(KERNEL) +# define __KERNEL__ +#endif + +#if defined(__SVR4) || defined(__svr4__) || defined(__sgi) +#define index strchr +# if !defined(_KERNEL) +# define bzero(a,b) memset(a,0,b) +# define bcmp memcmp +# define bcopy(a,b,c) memmove(b,a,c) +# endif +#endif + +struct ether_addr { + u_char ether_addr_octet[6]; +}; + + +#if defined(__sgi) && !defined(IPFILTER_LKM) +# ifdef __STDC__ +# define IPL_EXTERN(ep) ipfilter##ep +# else +# define IPL_EXTERN(ep) ipfilter/**/ep +# endif +#else +# ifdef __STDC__ +# define IPL_EXTERN(ep) ipl##ep +# else +# define IPL_EXTERN(ep) ipl/**/ep +# endif +#endif + +#ifdef linux +# include +#endif +#if SOLARIS +# define MTYPE(m) ((m)->b_datap->db_type) +# include +# include +# include +/* + * because Solaris 2 defines these in two places :-/ + */ +# undef IPOPT_EOL +# undef IPOPT_NOP +# undef IPOPT_LSRR +# undef IPOPT_RR +# undef IPOPT_SSRR +# ifndef _KERNEL +# define _KERNEL +# undef RES_INIT +# include +# include +# include +# undef _KERNEL +# else /* _KERNEL */ +# include +# include +# include +# endif /* _KERNEL */ +#endif /* SOLARIS */ +#define IPMINLEN(i, h) ((i)->ip_len >= ((i)->ip_hl * 4 + sizeof(struct h))) + +#ifndef IP_OFFMASK +#define IP_OFFMASK 0x1fff +#endif + +#if BSD > 199306 +# define USE_QUAD_T +# define U_QUAD_T u_quad_t +# define QUAD_T quad_t +#else /* BSD > 199306 */ +# define U_QUAD_T u_long +# define QUAD_T long +#endif /* BSD > 199306 */ + +/* + * These operating systems already take care of the problem for us. + */ +#if defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__) || \ + defined(__sgi) +typedef u_int32_t u_32_t; +#else +/* + * Really, any arch where sizeof(long) != sizeof(int). + */ +# if defined(__alpha__) || defined(__alpha) +typedef unsigned int u_32_t; +# else +typedef unsigned long u_32_t; +# endif +#endif /* __NetBSD__ || __OpenBSD__ || __FreeBSD__ || __sgi */ + +#ifndef MAX +#define MAX(a,b) (((a) > (b)) ? (a) : (b)) +#endif + +/* + * Security Options for Intenet Protocol (IPSO) as defined in RFC 1108. + * + * Basic Option + * + * 00000001 - (Reserved 4) + * 00111101 - Top Secret + * 01011010 - Secret + * 10010110 - Confidential + * 01100110 - (Reserved 3) + * 11001100 - (Reserved 2) + * 10101011 - Unclassified + * 11110001 - (Reserved 1) + */ +#define IPSO_CLASS_RES4 0x01 +#define IPSO_CLASS_TOPS 0x3d +#define IPSO_CLASS_SECR 0x5a +#define IPSO_CLASS_CONF 0x96 +#define IPSO_CLASS_RES3 0x66 +#define IPSO_CLASS_RES2 0xcc +#define IPSO_CLASS_UNCL 0xab +#define IPSO_CLASS_RES1 0xf1 + +#define IPSO_AUTH_GENSER 0x80 +#define IPSO_AUTH_ESI 0x40 +#define IPSO_AUTH_SCI 0x20 +#define IPSO_AUTH_NSA 0x10 +#define IPSO_AUTH_DOE 0x08 +#define IPSO_AUTH_UN 0x06 +#define IPSO_AUTH_FTE 0x01 + +/* + * IP option #defines + */ +/*#define IPOPT_RR 7 */ +#define IPOPT_ZSU 10 /* ZSU */ +#define IPOPT_MTUP 11 /* MTUP */ +#define IPOPT_MTUR 12 /* MTUR */ +#define IPOPT_ENCODE 15 /* ENCODE */ +/*#define IPOPT_TS 68 */ +#define IPOPT_TR 82 /* TR */ +/*#define IPOPT_SECURITY 130 */ +/*#define IPOPT_LSRR 131 */ +#define IPOPT_E_SEC 133 /* E-SEC */ +#define IPOPT_CIPSO 134 /* CIPSO */ +/*#define IPOPT_SATID 136 */ +#ifndef IPOPT_SID +# define IPOPT_SID IPOPT_SATID +#endif +/*#define IPOPT_SSRR 137 */ +#define IPOPT_ADDEXT 147 /* ADDEXT */ +#define IPOPT_VISA 142 /* VISA */ +#define IPOPT_IMITD 144 /* IMITD */ +#define IPOPT_EIP 145 /* EIP */ +#define IPOPT_FINN 205 /* FINN */ + + +#if defined(__FreeBSD__) && defined(KERNEL) +# if __FreeBSD__ < 3 +# include +# endif +# if defined(IPFILTER_LKM) && !defined(ACTUALLY_LKM_NOT_KERNEL) +# define ACTUALLY_LKM_NOT_KERNEL +# endif +#endif /* __FreeBSD__ && KERNEL */ + +/* + * Build some macros and #defines to enable the same code to compile anywhere + * Well, that's the idea, anyway :-) + */ +#if KERNEL +# if SOLARIS +# define MUTEX_ENTER(x) mutex_enter(x) +# define MUTEX_EXIT(x) mutex_exit(x) +# define MTOD(m,t) (t)((m)->b_rptr) +# define IRCOPY(a,b,c) copyin((a), (b), (c)) +# define IWCOPY(a,b,c) copyout((a), (b), (c)) +# define FREE_MB_T(m) freemsg(m) +# define SPL_NET(x) ; +# define SPL_IMP(x) ; +# undef SPL_X +# define SPL_X(x) ; +# ifdef sparc +# define ntohs(x) (x) +# define ntohl(x) (x) +# define htons(x) (x) +# define htonl(x) (x) +# endif /* sparc */ +# define KMALLOC(a,b,c) (a) = (b)kmem_alloc((c), KM_NOSLEEP) +# define GET_MINOR(x) getminor(x) +typedef struct qif { + struct qif *qf_next; + ill_t *qf_ill; + kmutex_t qf_lock; + void *qf_iptr; + void *qf_optr; + queue_t *qf_in; + queue_t *qf_out; + struct qinit *qf_wqinfo; + struct qinit *qf_rqinfo; + struct qinit qf_wqinit; + struct qinit qf_rqinit; + mblk_t *qf_m; /* These three fields are for passing data up from */ + queue_t *qf_q; /* fr_qin and fr_qout to the packet processing. */ + int qf_off; + int qf_len; /* this field is used for in ipfr_fastroute */ + char qf_name[8]; + /* + * in case the ILL has disappeared... + */ + int qf_hl; /* header length */ +} qif_t; +extern ill_t *get_unit __P((char *)); +# define GETUNIT(n) get_unit((n)) +# else /* SOLARIS */ +# if defined(__sgi) +# include +# define IPF_LOCK_PL plhi +# include +#undef kmutex_t +typedef struct { + lock_t *l; + int pl; +} kmutex_t; +# define MUTEX_ENTER(x) (x)->pl = LOCK((x)->l, IPF_LOCK_PL); +# define MUTEX_EXIT(x) UNLOCK((x)->l, (x)->pl); +# else /* __sgi */ +# define MUTEX_ENTER(x) ; +# define MUTEX_EXIT(x) ; +# endif /* __sgi */ +# ifndef linux +# define FREE_MB_T(m) m_freem(m) +# define MTOD(m,t) mtod(m,t) +# define IRCOPY(a,b,c) bcopy((a), (b), (c)) +# define IWCOPY(a,b,c) bcopy((a), (b), (c)) +# endif /* !linux */ +# endif /* SOLARIS */ + +# ifdef sun +# if !SOLARIS +# include +# define GETUNIT(n) ifunit((n), IFNAMSIZ) +# endif +# else +# ifndef linux +# define GETUNIT(n) ifunit((n)) +# endif +# endif /* sun */ + +# if defined(sun) && !defined(linux) || defined(__sgi) +# define UIOMOVE(a,b,c,d) uiomove((caddr_t)a,b,c,d) +# define SLEEP(id, n) sleep((id), PZERO+1) +# define WAKEUP(id) wakeup(id) +# define KFREE(x) kmem_free((char *)(x), sizeof(*(x))) +# define KFREES(x,s) kmem_free((char *)(x), (s)) +# if !SOLARIS +extern void m_copydata __P((struct mbuf *, int, int, caddr_t)); +extern void m_copyback __P((struct mbuf *, int, int, caddr_t)); +# endif +# ifdef __sgi +# include +# include +# define KMALLOC(a,b,c) (a) = (b)kmem_alloc((c), KM_NOSLEEP) +# define GET_MINOR(x) getminor(x) +# else +# if !SOLARIS +# define KMALLOC(a,b,c) (a) = (b)new_kmem_alloc((c), KMEM_NOSLEEP) +# endif /* SOLARIS */ +# endif /* __sgi */ +# endif /* sun && !linux */ +# ifndef GET_MINOR +# define GET_MINOR(x) minor(x) +# endif +# if (BSD >= 199306) || defined(__FreeBSD__) +# include +# if !defined(__FreeBSD__) || (defined (__FreeBSD__) && __FreeBSD__>=3) +# include +# include +extern vm_map_t kmem_map; +# else /* !__FreeBSD__ || (__FreeBSD__ && __FreeBSD__>=3) */ +# include +# endif /* !__FreeBSD__ || (__FreeBSD__ && __FreeBSD__>=3) */ +# ifdef M_PFIL +# define KMALLOC(a, b, c) MALLOC((a), b, (c), M_PFIL, M_NOWAIT) +# define KFREE(x) FREE((x), M_PFIL) +# define KFREES(x,s) FREE((x), M_PFIL) +# else +# define KMALLOC(a, b, c) MALLOC((a), b, (c), M_TEMP, M_NOWAIT) +# define KFREE(x) FREE((x), M_TEMP) +# define KFREES(x,s) FREE((x), M_TEMP) +# endif /* M_PFIL */ +# define UIOMOVE(a,b,c,d) uiomove(a,b,d) +# define SLEEP(id, n) tsleep((id), PPAUSE|PCATCH, n, 0) +# define WAKEUP(id) wakeup(id) +# endif /* BSD */ +# if defined(NetBSD) && NetBSD <= 1991011 && NetBSD >= 199407 +# define SPL_NET(x) x = splsoftnet() +# define SPL_X(x) (void) splx(x) +# else +# if !SOLARIS && !defined(linux) +# define SPL_IMP(x) x = splimp() +# define SPL_NET(x) x = splnet() +# define SPL_X(x) (void) splx(x) +# endif +# endif /* NetBSD && NetBSD <= 1991011 && NetBSD >= 199407 */ +# define PANIC(x,y) if (x) panic y +#else /* KERNEL */ +# define SLEEP(x,y) ; +# define WAKEUP(x) ; +# define PANIC(x,y) ; +# define MUTEX_ENTER(x) ; +# define MUTEX_EXIT(x) ; +# define SPL_NET(x) ; +# define SPL_IMP(x) ; +# undef SPL_X +# define SPL_X(x) ; +/*# define KMALLOC(a,b,c) (a) = (b)malloc(c) */ +# define KFREE(x) FREE(x) +# define KFREES(x,s) FREE(x) +# define GETUNIT(x) get_unit(x) +# define IRCOPY(a,b,c) bcopy((a), (b), (c)) +# define IWCOPY(a,b,c) bcopy((a), (b), (c)) +#endif /* KERNEL */ + +#if SOLARIS +typedef mblk_t mb_t; +#else +# ifdef linux +typedef struct sk_buff mb_t; +# else +typedef struct mbuf mb_t; +# endif +#endif /* SOLARIS */ + +#if defined(linux) || defined(__sgi) +/* + * These #ifdef's are here mainly for linux, but who knows, they may + * not be in other places or maybe one day linux will grow up and some + * of these will turn up there too. + */ +#ifndef ICMP_MINLEN +# define ICMP_MINLEN 8 +#endif +#ifndef ICMP_UNREACH +# define ICMP_UNREACH ICMP_DEST_UNREACH +#endif +#ifndef ICMP_SOURCEQUENCH +# define ICMP_SOURCEQUENCH ICMP_SOURCE_QUENCH +#endif +#ifndef ICMP_TIMXCEED +# define ICMP_TIMXCEED ICMP_TIME_EXCEEDED +#endif +#ifndef ICMP_PARAMPROB +# define ICMP_PARAMPROB ICMP_PARAMETERPROB +#endif +#ifndef ICMP_TSTAMP +# define ICMP_TSTAMP ICMP_TIMESTAMP +#endif +#ifndef ICMP_TSTAMPREPLY +# define ICMP_TSTAMPREPLY ICMP_TIMESTAMPREPLY +#endif +#ifndef ICMP_IREQ +# define ICMP_IREQ ICMP_INFO_REQUEST +#endif +#ifndef ICMP_IREQREPLY +# define ICMP_IREQREPLY ICMP_INFO_REPLY +#endif +#ifndef ICMP_MASKREQ +# define ICMP_MASKREQ ICMP_ADDRESS +#endif +#ifndef ICMP_MASKREPLY +# define ICMP_MASKREPLY ICMP_ADDRESSREPLY +#endif +#ifndef IPVERSION +# define IPVERSION 4 +#endif +#ifndef IPOPT_MINOFF +# define IPOPT_MINOFF 4 +#endif +#ifndef IPOPT_COPIED +# define IPOPT_COPIED(x) ((x)&0x80) +#endif +#ifndef IPOPT_EOL +# define IPOPT_EOL 0 +#endif +#ifndef IPOPT_NOP +# define IPOPT_NOP 1 +#endif +#ifndef IP_MF +# define IP_MF ((u_short)0x2000) +#endif +#ifndef ETHERTYPE_IP +# define ETHERTYPE_IP ((u_short)0x0800) +#endif +#ifndef TH_FIN +# define TH_FIN 0x01 +#endif +#ifndef TH_SYN +# define TH_SYN 0x02 +#endif +#ifndef TH_RST +# define TH_RST 0x04 +#endif +#ifndef TH_PUSH +# define TH_PUSH 0x08 +#endif +#ifndef TH_ACK +# define TH_ACK 0x10 +#endif +#ifndef TH_URG +# define TH_URG 0x20 +#endif +#ifndef IPOPT_EOL +# define IPOPT_EOL 0 +#endif +#ifndef IPOPT_NOP +# define IPOPT_NOP 1 +#endif +#ifndef IPOPT_RR +# define IPOPT_RR 7 +#endif +#ifndef IPOPT_TS +# define IPOPT_TS 68 +#endif +#ifndef IPOPT_SECURITY +# define IPOPT_SECURITY 130 +#endif +#ifndef IPOPT_LSRR +# define IPOPT_LSRR 131 +#endif +#ifndef IPOPT_SATID +# define IPOPT_SATID 136 +#endif +#ifndef IPOPT_SSRR +# define IPOPT_SSRR 137 +#endif +#ifndef IPOPT_SECUR_UNCLASS +# define IPOPT_SECUR_UNCLASS ((u_short)0x0000) +#endif +#ifndef IPOPT_SECUR_CONFID +# define IPOPT_SECUR_CONFID ((u_short)0xf135) +#endif +#ifndef IPOPT_SECUR_EFTO +# define IPOPT_SECUR_EFTO ((u_short)0x789a) +#endif +#ifndef IPOPT_SECUR_MMMM +# define IPOPT_SECUR_MMMM ((u_short)0xbc4d) +#endif +#ifndef IPOPT_SECUR_RESTR +# define IPOPT_SECUR_RESTR ((u_short)0xaf13) +#endif +#ifndef IPOPT_SECUR_SECRET +# define IPOPT_SECUR_SECRET ((u_short)0xd788) +#endif +#ifndef IPOPT_SECUR_TOPSECRET +# define IPOPT_SECUR_TOPSECRET ((u_short)0x6bc5) +#endif +#ifndef IPOPT_OLEN +# define IPOPT_OLEN 1 +#endif +#endif /* linux || __sgi */ + +#ifdef linux +/* + * TCP States + */ +#define TCPS_CLOSED 0 /* closed */ +#define TCPS_LISTEN 1 /* listening for connection */ +#define TCPS_SYN_SENT 2 /* active, have sent syn */ +#define TCPS_SYN_RECEIVED 3 /* have send and received syn */ +/* states < TCPS_ESTABLISHED are those where connections not established */ +#define TCPS_ESTABLISHED 4 /* established */ +#define TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */ +/* states > TCPS_CLOSE_WAIT are those where user has closed */ +#define TCPS_FIN_WAIT_1 6 /* have closed, sent fin */ +#define TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */ +#define TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */ +/* states > TCPS_CLOSE_WAIT && < TCPS_FIN_WAIT_2 await ACK of FIN */ +#define TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */ +#define TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */ + +/* + * file flags. + */ +#define FWRITE WRITE +#define FREAD READ +/* + * mbuf related problems. + */ +#define mtod(m,t) (t)((m)->data) +#define m_len len +#define m_next next + +#define IP_DF 0x8000 + +typedef struct { + __u16 th_sport; + __u16 th_dport; + __u32 th_seq; + __u32 th_ack; +# if defined(__i386__) || defined(__MIPSEL__) || defined(__alpha__) ||\ + defined(vax) + __u8 th_res:4; + __u8 th_off:4; +#else + __u8 th_off:4; + __u8 th_res:4; +#endif + __u8 th_flags; + __u16 th_win; + __u16 th_sum; + __u16 th_urp; +} tcphdr_t; + +typedef struct { + __u16 uh_sport; + __u16 uh_dport; + __u16 uh_ulen; + __u16 uh_sum; +} udphdr_t; + +typedef struct { +# if defined(__i386__) || defined(__MIPSEL__) || defined(__alpha__) ||\ + defined(vax) + __u8 ip_hl:4; + __u8 ip_v:4; +# else + __u8 ip_hl:4; + __u8 ip_v:4; +# endif + __u8 ip_tos; + __u16 ip_len; + __u16 ip_id; + __u16 ip_off; + __u8 ip_ttl; + __u8 ip_p; + __u16 ip_sum; + struct in_addr ip_src; + struct in_addr ip_dst; +} ip_t; + +/* + * Structure of an icmp header. + */ +typedef struct icmp { + u_char icmp_type; /* type of message, see below */ + u_char icmp_code; /* type sub code */ + u_short icmp_cksum; /* ones complement cksum of struct */ + union { + u_char ih_pptr; /* ICMP_PARAMPROB */ + struct in_addr ih_gwaddr; /* ICMP_REDIRECT */ + struct ih_idseq { + n_short icd_id; + n_short icd_seq; + } ih_idseq; + int ih_void; + } icmp_hun; +# define icmp_pptr icmp_hun.ih_pptr +# define icmp_gwaddr icmp_hun.ih_gwaddr +# define icmp_id icmp_hun.ih_idseq.icd_id +# define icmp_seq icmp_hun.ih_idseq.icd_seq +# define icmp_void icmp_hun.ih_void + union { + struct id_ts { + n_time its_otime; + n_time its_rtime; + n_time its_ttime; + } id_ts; + struct id_ip { + ip_t idi_ip; + /* options and then 64 bits of data */ + } id_ip; + u_long id_mask; + char id_data[1]; + } icmp_dun; +# define icmp_otime icmp_dun.id_ts.its_otime +# define icmp_rtime icmp_dun.id_ts.its_rtime +# define icmp_ttime icmp_dun.id_ts.its_ttime +# define icmp_ip icmp_dun.id_ip.idi_ip +# define icmp_mask icmp_dun.id_mask +# define icmp_data icmp_dun.id_data +} icmphdr_t; + +# ifndef LINUX_IPOVLY +# define LINUX_IPOVLY +struct ipovly { + caddr_t ih_next, ih_prev; /* for protocol sequence q's */ + u_char ih_x1; /* (unused) */ + u_char ih_pr; /* protocol */ + short ih_len; /* protocol length */ + struct in_addr ih_src; /* source internet address */ + struct in_addr ih_dst; /* destination internet address */ +}; +# endif + +typedef struct { + __u8 ether_dhost[6]; + __u8 ether_shost[6]; + __u16 ether_type; +} ether_header_t; + +typedef struct uio { + int uio_resid; + int uio_rw; + caddr_t uio_buf; +} uio_t; + +# define UIO_READ 0 +# define UIO_WRITE 1 +# define UIOMOVE(a, b, c, d) uiomove(a,b,c,d) + +/* + * For masking struct ifnet onto struct device + */ +# define if_name name + +# ifdef KERNEL +# define GETUNIT(x) dev_get(x) +# define FREE_MB_T(m) kfree_skb(m, FREE_WRITE) +# define uniqtime do_gettimeofday +# undef INT_MAX +# undef UINT_MAX +# undef LONG_MAX +# undef ULONG_MAX +# include +# define SPL_X(x) +# define SPL_NET(x) +# define SPL_IMP(x) + +# define bcmp(a,b,c) memcmp(a,b,c) +# define bcopy(a,b,c) memcpy(b,a,c) +# define bzero(a,c) memset(a,0,c) + +# define UNITNAME(n) dev_get((n)) + +# define KMALLOC(a,b,c) (a) = (b)kmalloc((c), GFP_ATOMIC) +# define KFREE(x) kfree_s((x), sizeof(*(x))) +# define KFREES(x,s) kfree_s((x), (s)) +# define IRCOPY(a,b,c) { \ + error = verify_area(VERIFY_READ, (a) ,(c)); \ + if (!error) \ + memcpy_fromfs((b), (a), (c)); \ + } +# define IWCOPY(a,b,c) { \ + error = verify_area(VERIFY_WRITE, (b), (c)); \ + if (!error) \ + memcpy_tofs((b), (a), (c)); \ + } +# else +# define __KERNEL__ +# undef INT_MAX +# undef UINT_MAX +# undef LONG_MAX +# undef ULONG_MAX +# define s8 __s8 +# define u8 __u8 +# define s16 __s16 +# define u16 __u16 +# define s32 __s32 +# define u32 __u32 +# include +# undef __KERNEL__ +# endif +# define ifnet device +#else +typedef struct tcphdr tcphdr_t; +typedef struct udphdr udphdr_t; +typedef struct icmp icmphdr_t; +typedef struct ip ip_t; +typedef struct ether_header ether_header_t; +#endif /* linux */ +typedef struct tcpiphdr tcpiphdr_t; + +#if defined(hpux) || defined(linux) +struct ether_addr { + char ether_addr_octet[6]; +}; +#endif + +/* + * XXX - This is one of those *awful* hacks which nobody likes + */ +#ifdef ultrix +#define A_A +#else +#define A_A & +#endif + +#ifndef ICMP_ROUTERADVERT +# define ICMP_ROUTERADVERT 9 +#endif +#ifndef ICMP_ROUTERSOLICIT +# define ICMP_ROUTERSOLICIT 10 +#endif + +#endif /* __IP_COMPAT_H__ */ + +#endif /* #if 0 */ diff --git a/bsd/netinet/ip_divert.c b/bsd/netinet/ip_divert.c new file mode 100644 index 000000000..acfd471bf --- /dev/null +++ b/bsd/netinet/ip_divert.c @@ -0,0 +1,483 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#if ISFB31 +#include "opt_inet.h" +#include "opt_ipfw.h" +#include "opt_ipdivert.h" +#endif + +#ifndef INET +#error "IPDIVERT requires INET." +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#if ISFB31 +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * Divert sockets + */ + +/* + * Allocate enough space to hold a full IP packet + */ +#define DIVSNDQ (65536 + 100) +#define DIVRCVQ (65536 + 100) + +/* Global variables */ + +/* + * ip_input() and ip_output() set this secret value before calling us to + * let us know which divert port to divert a packet to; this is done so + * we can use the existing prototype for struct protosw's pr_input(). + * This is stored in host order. + */ +u_short ip_divert_port; + +/* + * A 16 bit cookie is passed to the user process. + * The user process can send it back to help the caller know something + * about where the packet came from. + * + * If IPFW is the caller then the cookie is the rule that sent + * us here. On reinjection is is the rule after which processing + * should continue. Leaving it the same will make processing start + * at the rule number after that which sent it here. Setting it to + * 0 will restart processing at the beginning. + */ +u_int16_t ip_divert_cookie; + +/* Internal variables */ + +static struct inpcbhead divcb; +static struct inpcbinfo divcbinfo; + +static u_long div_sendspace = DIVSNDQ; /* XXX sysctl ? */ +static u_long div_recvspace = DIVRCVQ; /* XXX sysctl ? */ + +/* Optimization: have this preinitialized */ +static struct sockaddr_in divsrc = { sizeof(divsrc), AF_INET }; + +/* Internal functions */ + +static int div_output(struct socket *so, + struct mbuf *m, struct sockaddr *addr, struct mbuf *control); + +/* + * Initialize divert connection block queue. + */ +void +div_init(void) +{ + LIST_INIT(&divcb); + divcbinfo.listhead = &divcb; + /* + * XXX We don't use the hash list for divert IP, but it's easier + * to allocate a one entry hash list than it is to check all + * over the place for hashbase == NULL. + */ + divcbinfo.hashbase = hashinit(1, M_PCB, &divcbinfo.hashmask); + divcbinfo.porthashbase = hashinit(1, M_PCB, &divcbinfo.porthashmask); + divcbinfo.ipi_zone = (void *) zinit(sizeof(struct inpcb),(maxsockets * sizeof(struct inpcb)), + 4096, "divzone"); + +/* + * ### LD 08/03: init IP forwarding at this point [ipfw is not a module yet] + */ +#if !IPFIREWALL_KEXT + ip_fw_init(); +#endif +} + +/* + * Setup generic address and protocol structures + * for div_input routine, then pass them along with + * mbuf chain. ip->ip_len is assumed to have had + * the header length (hlen) subtracted out already. + * We tell whether the packet was incoming or outgoing + * by seeing if hlen == 0, which is a hack. + */ +void +div_input(struct mbuf *m, int hlen) +{ + struct ip *ip; + struct inpcb *inp; + struct socket *sa; + + /* Sanity check */ + if (ip_divert_port == 0) + panic("div_input: port is 0"); + + /* Assure header */ + if (m->m_len < sizeof(struct ip) && + (m = m_pullup(m, sizeof(struct ip))) == 0) { + return; + } + ip = mtod(m, struct ip *); + + /* Record divert cookie */ + divsrc.sin_port = ip_divert_cookie; + ip_divert_cookie = 0; + + /* Restore packet header fields */ + ip->ip_len += hlen; + HTONS(ip->ip_len); + HTONS(ip->ip_off); + + /* + * Record receive interface address, if any + * But only for incoming packets. + */ + divsrc.sin_addr.s_addr = 0; + if (hlen) { + struct ifaddr *ifa; + +#if DIAGNOSTIC + /* Sanity check */ + if (!(m->m_flags & M_PKTHDR)) + panic("div_input: no pkt hdr"); +#endif + + /* More fields affected by ip_input() */ + HTONS(ip->ip_id); + + /* Find IP address for receive interface */ + for (ifa = m->m_pkthdr.rcvif->if_addrhead.tqh_first; + ifa != NULL; ifa = ifa->ifa_link.tqe_next) { + if (ifa->ifa_addr == NULL) + continue; + if (ifa->ifa_addr->sa_family != AF_INET) + continue; + divsrc.sin_addr = + ((struct sockaddr_in *) ifa->ifa_addr)->sin_addr; + break; + } + } + /* + * Record the incoming interface name whenever we have one. + */ + bzero(&divsrc.sin_zero, sizeof(divsrc.sin_zero)); + if (m->m_pkthdr.rcvif) { + /* + * Hide the actual interface name in there in the + * sin_zero array. XXX This needs to be moved to a + * different sockaddr type for divert, e.g. + * sockaddr_div with multiple fields like + * sockaddr_dl. Presently we have only 7 bytes + * but that will do for now as most interfaces + * are 4 or less + 2 or less bytes for unit. + * There is probably a faster way of doing this, + * possibly taking it from the sockaddr_dl on the iface. + * This solves the problem of a P2P link and a LAN interface + * having the same address, which can result in the wrong + * interface being assigned to the packet when fed back + * into the divert socket. Theoretically if the daemon saves + * and re-uses the sockaddr_in as suggested in the man pages, + * this iface name will come along for the ride. + * (see div_output for the other half of this.) + */ + snprintf(divsrc.sin_zero, sizeof(divsrc.sin_zero), + "%s%d", m->m_pkthdr.rcvif->if_name, + m->m_pkthdr.rcvif->if_unit); + } + + /* Put packet on socket queue, if any */ + sa = NULL; + for (inp = divcb.lh_first; inp != NULL; inp = inp->inp_list.le_next) { + if (inp->inp_lport == htons(ip_divert_port)) + sa = inp->inp_socket; + } + ip_divert_port = 0; + if (sa) { + if (sbappendaddr(&sa->so_rcv, (struct sockaddr *)&divsrc, + m, (struct mbuf *)0) == 0) + m_freem(m); + else + sorwakeup(sa); + } else { + m_freem(m); + ipstat.ips_noproto++; + ipstat.ips_delivered--; + } +} + +/* + * Deliver packet back into the IP processing machinery. + * + * If no address specified, or address is 0.0.0.0, send to ip_output(); + * otherwise, send to ip_input() and mark as having been received on + * the interface with that address. + */ +static int +div_output(so, m, addr, control) + struct socket *so; + register struct mbuf *m; + struct sockaddr *addr; + struct mbuf *control; +{ + register struct inpcb *const inp = sotoinpcb(so); + register struct ip *const ip = mtod(m, struct ip *); + struct sockaddr_in *sin = (struct sockaddr_in *)addr; + int error = 0; + + if (control) + m_freem(control); /* XXX */ + + /* Loopback avoidance and state recovery */ + if (sin) { + int len = 0; + char *c = sin->sin_zero; + + ip_divert_cookie = sin->sin_port; + + /* + * Find receive interface with the given name or IP address. + * The name is user supplied data so don't trust it's size or + * that it is zero terminated. The name has priority. + * We are presently assuming that the sockaddr_in + * has not been replaced by a sockaddr_div, so we limit it + * to 16 bytes in total. the name is stuffed (if it exists) + * in the sin_zero[] field. + */ + while (*c++ && (len++ < sizeof(sin->sin_zero))); + if ((len > 0) && (len < sizeof(sin->sin_zero))) + m->m_pkthdr.rcvif = ifunit(sin->sin_zero); + } else { + ip_divert_cookie = 0; + } + + /* Reinject packet into the system as incoming or outgoing */ + if (!sin || sin->sin_addr.s_addr == 0) { + /* + * Don't allow both user specified and setsockopt options, + * and don't allow packet length sizes that will crash + */ + if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options) || + ((u_short)ntohs(ip->ip_len) > m->m_pkthdr.len)) { + error = EINVAL; + goto cantsend; + } + + /* Convert fields to host order for ip_output() */ + NTOHS(ip->ip_len); + NTOHS(ip->ip_off); + + /* Send packet to output processing */ + ipstat.ips_rawout++; /* XXX */ + error = ip_output(m, inp->inp_options, &inp->inp_route, + (so->so_options & SO_DONTROUTE) | + IP_ALLOWBROADCAST | IP_RAWOUTPUT, inp->inp_moptions); + } else { + struct ifaddr *ifa; + + /* If no luck with the name above. check by IP address. */ + if (m->m_pkthdr.rcvif == NULL) { + /* + * Make sure there are no distractions + * for ifa_ifwithaddr. Clear the port and the ifname. + * Maybe zap all 8 bytes at once using a 64bit write? + */ + bzero(sin->sin_zero, sizeof(sin->sin_zero)); + /* *((u_int64_t *)sin->sin_zero) = 0; */ /* XXX ?? */ + sin->sin_port = 0; + if (!(ifa = ifa_ifwithaddr((struct sockaddr *) sin))) { + error = EADDRNOTAVAIL; + goto cantsend; + } + m->m_pkthdr.rcvif = ifa->ifa_ifp; + } + + /* Send packet to input processing */ + ip_input(m); + } + + /* paranoid: Reset for next time (and other packets) */ + /* almost definitly already done in the ipfw filter but.. */ + ip_divert_cookie = 0; + return error; + +cantsend: + ip_divert_cookie = 0; + m_freem(m); + return error; +} + +static int +div_attach(struct socket *so, int proto, struct proc *p) +{ + struct inpcb *inp; + int error, s; + + inp = sotoinpcb(so); + if (inp) + panic("div_attach"); + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; + + s = splnet(); + error = in_pcballoc(so, &divcbinfo, p); + splx(s); + if (error) + return error; + error = soreserve(so, div_sendspace, div_recvspace); + if (error) + return error; + inp = (struct inpcb *)so->so_pcb; + inp->inp_ip_p = proto; + inp->inp_flags |= INP_HDRINCL | INP_IPV4; + /* The socket is always "connected" because + we always know "where" to send the packet */ + so->so_state |= SS_ISCONNECTED; +#if IPSEC + error = ipsec_init_policy(so, &inp->inp_sp); + if (error != 0) { + in_pcbdetach(inp); + return error; + } +#endif /*IPSEC*/ + return 0; +} + +static int +div_detach(struct socket *so) +{ + struct inpcb *inp; + + inp = sotoinpcb(so); + if (inp == 0) + panic("div_detach"); + in_pcbdetach(inp); + return 0; +} + +static int +div_abort(struct socket *so) +{ + soisdisconnected(so); + return div_detach(so); +} + +static int +div_disconnect(struct socket *so) +{ + if ((so->so_state & SS_ISCONNECTED) == 0) + return ENOTCONN; + return div_abort(so); +} + +static int +div_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp; + int s; + int error; + + s = splnet(); + inp = sotoinpcb(so); + error = in_pcbbind(inp, nam, p); + splx(s); + return 0; +} + +static int +div_shutdown(struct socket *so) +{ + socantsendmore(so); + return 0; +} + +static int +div_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, + struct mbuf *control, struct proc *p) +{ + /* Packet must have a header (but that's about it) */ + if (m->m_len < sizeof (struct ip) || + (m = m_pullup(m, sizeof (struct ip))) == 0) { + ipstat.ips_toosmall++; + m_freem(m); + return EINVAL; + } + + /* Send packet */ + return div_output(so, m, nam, control); +} + +struct pr_usrreqs div_usrreqs = { + div_abort, pru_accept_notsupp, div_attach, div_bind, + pru_connect_notsupp, pru_connect2_notsupp, in_control, div_detach, + div_disconnect, pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, div_send, pru_sense_null, div_shutdown, + in_setsockaddr, sosend, soreceive, sopoll +}; diff --git a/bsd/netinet/ip_dummynet.c b/bsd/netinet/ip_dummynet.c new file mode 100644 index 000000000..4788dc56a --- /dev/null +++ b/bsd/netinet/ip_dummynet.c @@ -0,0 +1,658 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Luigi Rizzo + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + * + */ + +/* + * This module implements IP dummynet, a bandwidth limiter/delay emulator + * used in conjunction with the ipfw package. + * + * Changes: + * + * 980821: changed conventions in the queueing logic + * packets passed from dummynet to ip_in/out are prepended with + * a vestigial mbuf type MT_DUMMYNET which contains a pointer + * to the matching rule. + * ip_input/output will extract the parameters, free the vestigial mbuf, + * and do the processing. + * + * 980519: fixed behaviour when deleting rules. + * 980518: added splimp()/splx() to protect against races + * 980513: initial release + */ + +/* include files marked with XXX are probably not needed */ + +#include +#include +#include +#include +#include /* XXX */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if BRIDGE +#include /* for struct arpcom */ +#include +#endif + +static struct dn_pipe *all_pipes = NULL ; /* list of all pipes */ + +static int dn_debug = 0 ; /* verbose */ +static int dn_calls = 0 ; /* number of calls */ +static int dn_idle = 1; +#ifdef SYSCTL_NODE +SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW, 0, "Dummynet"); +SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug, CTLFLAG_RW, &dn_debug, 0, ""); +SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, calls, CTLFLAG_RD, &dn_calls, 0, ""); +SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, idle, CTLFLAG_RD, &dn_idle, 0, ""); +#endif + +static int ip_dn_ctl(struct sockopt *sopt); + +static void rt_unref(struct rtentry *); +static void dummynet(void *); +static void dn_restart(void); +static void dn_move(struct dn_pipe *pipe, int immediate); +static void dummynet_flush(void); + +/* + * the following is needed when deleting a pipe, because rules can + * hold references to the pipe. + */ +extern LIST_HEAD (ip_fw_head, ip_fw_chain) ip_fw_chain; + +/* + * invoked to reschedule the periodic task if necessary. + * Should only be called when dn_idle = 1 ; + */ +static void +dn_restart() +{ + struct dn_pipe *pipe; + + if (!dn_idle) + return; + + for (pipe = all_pipes ; pipe ; pipe = pipe->next ) { + /* if there any pipe that needs work, restart */ + if (pipe->r.head || pipe->p.head || pipe->numbytes < 0 ) { + dn_idle = 0; + timeout(dummynet, NULL, 1); + return ; + } + } +} + +static void +rt_unref(struct rtentry *rt) +{ + if (rt == NULL) + return ; + if (rt->rt_refcnt <= 0) + printf("-- warning, refcnt now %d, decreasing\n", rt->rt_refcnt); + RTFREE(rt); +} + +/* + * move packets from R-queue to P-queue + */ +static void +dn_move(struct dn_pipe *pipe, int immediate) +{ + struct dn_pkt *pkt; + + /* + * consistency check, should catch new pipes which are + * not initialized properly. + */ + if ( pipe->p.head == NULL && + pipe->ticks_from_last_insert != pipe->delay) { + printf("Warning, empty pipe and delay %d (should be %d)\n", + pipe->ticks_from_last_insert, pipe->delay); + pipe->ticks_from_last_insert = pipe->delay; + } + /* this ought to go in dn_dequeue() */ + if (!immediate && pipe->ticks_from_last_insert < pipe->delay) + pipe->ticks_from_last_insert++; + if ( pkt = pipe->r.head ) { + /* + * Move at most numbytes bytes from src and move to dst. + * delay is set to ticks_from_last_insert, which + * is reset after the first insertion; + */ + while ( pkt ) { + struct ip *ip=mtod(pkt->dn_m, struct ip *); + + /* + * queue limitation: pass packets down if the len is + * such that the pkt would go out before the next tick. + */ + if (pipe->bandwidth) { + if (pipe->numbytes < ip->ip_len) + break; + pipe->numbytes -= ip->ip_len; + } + pipe->r_len--; /* elements in queue */ + pipe->r_len_bytes -= ip->ip_len ; + + /* + * to add delay jitter, must act here. A lower value + * (bounded to 0) means lower delay. + */ + pkt->delay = pipe->ticks_from_last_insert; + pipe->ticks_from_last_insert = 0; + /* compensate the decrement done next in dn_dequeue */ + if (!immediate && pkt->delay >0 && pipe->p.head==NULL) + pkt->delay++; + if (pipe->p.head == NULL) + pipe->p.head = pkt; + else + (struct dn_pkt *)pipe->p.tail->dn_next = pkt; + pipe->p.tail = pkt; + pkt = (struct dn_pkt *)pkt->dn_next; + pipe->p.tail->dn_next = NULL; + } + pipe->r.head = pkt; + + /*** XXX just a sanity check */ + if ( ( pkt == NULL && pipe->r_len != 0) || + ( pkt != NULL && pipe->r_len == 0) ) + printf("-- Warning, pipe head %p len %d\n", + (void *)pkt, pipe->r_len); + } + + /* + * deliver packets downstream after the delay in the P-queue. + */ + + if (pipe->p.head == NULL) + return; + if (!immediate) + pipe->p.head->delay--; + while ( (pkt = pipe->p.head) && pkt->delay < 1) { + /* + * first unlink, then call procedures since ip_input() + * can result in a call to ip_output cnd viceversa, + * thus causing nested calls + */ + pipe->p.head = (struct dn_pkt *) pkt->dn_next ; + + /* + * the trick to avoid flow-id settings here is to prepend a + * vestigial mbuf to the packet, with the following values: + * m_type = MT_DUMMYNET + * m_next = the actual mbuf to be processed by ip_input/output + * m_data = the matching rule + * The vestigial element is the same memory area used by + * the dn_pkt, and IS FREED IN ip_input/ip_output. IT IS + * NOT A REAL MBUF, just a block of memory acquired with malloc(). + */ + switch (pkt->dn_dir) { + case DN_TO_IP_OUT: { + struct rtentry *tmp_rt = pkt->ro.ro_rt ; + + (void)ip_output((struct mbuf *)pkt, (struct mbuf *)pkt->ifp, + &(pkt->ro), pkt->dn_hlen, NULL); + rt_unref (tmp_rt) ; + } + break ; + case DN_TO_IP_IN : + ip_input((struct mbuf *)pkt) ; + break ; +#if BRIDGE + case DN_TO_BDG_FWD : + bdg_forward((struct mbuf **)&pkt, pkt->ifp); + break ; +#endif + default: + printf("dummynet: bad switch %d!\n", pkt->dn_dir); + m_freem(pkt->dn_m); + FREE(pkt, M_IPFW); + break ; + } + } +} +/* + * this is the periodic task that moves packets between the R- + * and the P- queue + */ +/*ARGSUSED*/ +void +dummynet(void * __unused unused) +{ + struct dn_pipe *p ; + int s ; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + dn_calls++ ; + for (p = all_pipes ; p ; p = p->next ) { + /* + * Increment the amount of data that can be sent. However, + * don't do that if the channel is idle + * (r.head == NULL && numbytes >= bandwidth). + * This bug fix is from tim shepard (shep@bbn.com) + */ + s = splimp(); + if (p->r.head != NULL || p->numbytes < p->bandwidth ) + p->numbytes += p->bandwidth ; + dn_move(p, 0); /* is it really 0 (also below) ? */ + splx(s); + } + + /* + * finally, if some queue has data, restart the timer. + */ + dn_idle = 1; + dn_restart(); + (void) thread_funnel_set(network_flock, funnel_state); + +} + +/* + * dummynet hook for packets. + * input and output use the same code, so i use bit 16 in the pipe + * number to chose the direction: 1 for output packets, 0 for input. + * for input, only m is significant. For output, also the others. + */ +int +dummynet_io(int pipe_nr, int dir, + struct mbuf *m, struct ifnet *ifp, struct route *ro, int hlen, + struct ip_fw_chain *rule) +{ + struct dn_pkt *pkt; + struct dn_pipe *pipe; + struct ip *ip=mtod(m, struct ip *); + + int s=splimp(); + + pipe_nr &= 0xffff ; + /* + * locate pipe. First time is expensive, next have direct access. + */ + + if ( (pipe = rule->rule->pipe_ptr) == NULL ) { + for (pipe=all_pipes; pipe && pipe->pipe_nr !=pipe_nr; pipe=pipe->next) + ; + if (pipe == NULL) { + splx(s); + if (dn_debug) + printf("warning, pkt for no pipe %d\n", pipe_nr); + m_freem(m); + return 0 ; + } else + rule->rule->pipe_ptr = pipe ; + } + + /* + * should i drop ? + * This section implements random packet drop. + */ + if ( (pipe->plr && random() < pipe->plr) || + (pipe->queue_size && pipe->r_len >= pipe->queue_size) || + (pipe->queue_size_bytes && + ip->ip_len + pipe->r_len_bytes > pipe->queue_size_bytes) || + (pkt = (struct dn_pkt *) _MALLOC(sizeof (*pkt), + M_IPFW, M_NOWAIT) ) == NULL ) { + splx(s); + if (dn_debug) + printf("-- dummynet: drop from pipe %d, have %d pks, %d bytes\n", + pipe_nr, pipe->r_len, pipe->r_len_bytes); + pipe->r_drops++ ; + m_freem(m); + return 0 ; /* XXX error */ + } + bzero(pkt, sizeof(*pkt) ); + /* build and enqueue packet */ + pkt->hdr.mh_type = MT_DUMMYNET ; + (struct ip_fw_chain *)pkt->hdr.mh_data = rule ; + pkt->dn_next = NULL; + pkt->dn_m = m; + pkt->dn_dir = dir ; + pkt->delay = 0; + + pkt->ifp = ifp; + if (dir == DN_TO_IP_OUT) { + pkt->ro = *ro; /* XXX copied! */ + if (ro->ro_rt) + ro->ro_rt->rt_refcnt++ ; /* XXX */ + } + pkt->dn_hlen = hlen; + if (pipe->r.head == NULL) + pipe->r.head = pkt; + else + (struct dn_pkt *)pipe->r.tail->dn_next = pkt; + pipe->r.tail = pkt; + pipe->r_len++; + pipe->r_len_bytes += ip->ip_len ; + + /* + * here we could implement RED if we like to + */ + + if (pipe->r.head == pkt) { /* process immediately */ + dn_move(pipe, 1); + } + splx(s); + if (dn_idle) + dn_restart(); + return 0; +} + +/* + * dispose all packets queued on a pipe + */ +static void +purge_pipe(struct dn_pipe *pipe) +{ + struct dn_pkt *pkt, *n ; + struct rtentry *tmp_rt ; + + for (pkt = pipe->r.head ; pkt ; ) { + rt_unref (tmp_rt = pkt->ro.ro_rt ) ; + m_freem(pkt->dn_m); + n = pkt ; + pkt = (struct dn_pkt *)pkt->dn_next ; + FREE(n, M_IPFW) ; + } + for (pkt = pipe->p.head ; pkt ; ) { + rt_unref (tmp_rt = pkt->ro.ro_rt ) ; + m_freem(pkt->dn_m); + n = pkt ; + pkt = (struct dn_pkt *)pkt->dn_next ; + FREE(n, M_IPFW) ; + } +} + +/* + * delete all pipes returning memory + */ +static void +dummynet_flush() +{ + struct dn_pipe *q, *p = all_pipes ; + int s = splnet() ; + + all_pipes = NULL ; + splx(s) ; + /* + * purge all queued pkts and delete all pipes + */ + for ( ; p ; ) { + purge_pipe(p); + q = p ; + p = p->next ; + FREE(q, M_IPFW); + } +} + +extern struct ip_fw_chain *ip_fw_default_rule ; +/* + * when a firewall rule is deleted, scan all pipes and remove the flow-id + * from packets matching this rule. + */ +void +dn_rule_delete(void *r) +{ + struct dn_pipe *p ; + int matches = 0 ; + + for ( p = all_pipes ; p ; p = p->next ) { + struct dn_pkt *x ; + for (x = p->r.head ; x ; x = (struct dn_pkt *)x->dn_next ) + if (x->hdr.mh_data == r) { + matches++ ; + x->hdr.mh_data = (void *)ip_fw_default_rule ; + } + for (x = p->p.head ; x ; x = (struct dn_pkt *)x->dn_next ) + if (x->hdr.mh_data == r) { + matches++ ; + x->hdr.mh_data = (void *)ip_fw_default_rule ; + } + } + printf("dn_rule_delete, r %p, default %p%s, %d matches\n", + (void *)r, (void *)ip_fw_default_rule, + r == ip_fw_default_rule ? " AARGH!":"", matches); +} + +/* + * handler for the various dummynet socket options + * (get, flush, config, del) + */ +static int +ip_dn_ctl(struct sockopt *sopt) +{ + int error = 0 ; + size_t size ; + char *buf, *bp ; + struct dn_pipe *p, tmp_pipe ; + + struct dn_pipe *x, *a, *b ; + + /* Disallow sets in really-really secure mode. */ + if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) + return (EPERM); + + switch (sopt->sopt_name) { + default : + panic("ip_dn_ctl -- unknown option"); + + case IP_DUMMYNET_GET : + for (p = all_pipes, size = 0 ; p ; p = p->next ) + size += sizeof( *p ) ; + buf = _MALLOC(size, M_TEMP, M_WAITOK); + if (buf == 0) { + error = ENOBUFS ; + break ; + } + for (p = all_pipes, bp = buf ; p ; p = p->next ) { + struct dn_pipe *q = (struct dn_pipe *)bp ; + + bcopy(p, bp, sizeof( *p ) ); + /* + * return bw and delay in bits/s and ms, respectively + */ + q->bandwidth *= (8*hz) ; + q->delay = (q->delay * 1000) / hz ; + bp += sizeof( *p ) ; + } + error = sooptcopyout(sopt, buf, size); + FREE(buf, M_TEMP); + break ; + case IP_DUMMYNET_FLUSH : + dummynet_flush() ; + break ; + case IP_DUMMYNET_CONFIGURE : + p = &tmp_pipe ; + error = sooptcopyin(sopt, p, sizeof *p, sizeof *p); + if (error) + break ; + /* + * The config program passes parameters as follows: + * bandwidth = bits/second (0 = no limits); + * must be translated in bytes/tick. + * delay = ms + * must be translated in ticks. + * queue_size = slots (0 = no limit) + * queue_size_bytes = bytes (0 = no limit) + * only one can be set, must be bound-checked + */ + if ( p->bandwidth > 0 ) { + p->bandwidth = p->bandwidth / 8 / hz ; + if (p->bandwidth == 0) /* too little does not make sense! */ + p->bandwidth = 10 ; + } + p->delay = ( p->delay * hz ) / 1000 ; + if (p->queue_size == 0 && p->queue_size_bytes == 0) + p->queue_size = 100 ; + if (p->queue_size != 0 ) /* buffers are prevailing */ + p->queue_size_bytes = 0 ; + if (p->queue_size > 100) + p->queue_size = 100 ; + if (p->queue_size_bytes > 1024*1024) + p->queue_size_bytes = 1024*1024 ; +#if 0 + printf("ip_dn: config pipe %d %d bit/s %d ms %d bufs\n", + p->pipe_nr, + p->bandwidth * 8 * hz , + p->delay * 1000 / hz , p->queue_size); +#endif + for (a = NULL , b = all_pipes ; b && b->pipe_nr < p->pipe_nr ; + a = b , b = b->next) ; + if (b && b->pipe_nr == p->pipe_nr) { + /* XXX should spl and flush old pipe... */ + b->bandwidth = p->bandwidth ; + b->delay = p->delay ; + b->ticks_from_last_insert = p->delay ; + b->queue_size = p->queue_size ; + b->queue_size_bytes = p->queue_size_bytes ; + b->plr = p->plr ; + } else { + int s ; + x = _MALLOC(sizeof(struct dn_pipe), M_IPFW, M_DONTWAIT) ; + if (x == NULL) { + printf("ip_dummynet.c: sorry no memory\n"); + error = ENOSPC ; + break ; + } + bzero(x, sizeof(*x) ); + x->bandwidth = p->bandwidth ; + x->delay = p->delay ; + x->ticks_from_last_insert = p->delay ; + x->pipe_nr = p->pipe_nr ; + x->queue_size = p->queue_size ; + x->queue_size_bytes = p->queue_size_bytes ; + x->plr = p->plr ; + + s = splnet() ; + x->next = b ; + if (a == NULL) + all_pipes = x ; + else + a->next = x ; + splx(s); + } + break ; + + case IP_DUMMYNET_DEL : + p = &tmp_pipe ; + error = sooptcopyin(sopt, p, sizeof *p, sizeof *p); + if (error) + break ; + + for (a = NULL , b = all_pipes ; b && b->pipe_nr < p->pipe_nr ; + a = b , b = b->next) ; + if (b && b->pipe_nr == p->pipe_nr) { /* found pipe */ + int s = splnet() ; + struct ip_fw_chain *chain = ip_fw_chain.lh_first; + + if (a == NULL) + all_pipes = b->next ; + else + a->next = b->next ; + /* + * remove references to this pipe from the ip_fw rules. + */ + for (; chain; chain = chain->chain.le_next) { + register struct ip_fw *const f = chain->rule; + if (f->pipe_ptr == b) + f->pipe_ptr = NULL ; + } + splx(s); + purge_pipe(b); /* remove pkts from here */ + FREE(b, M_IPFW); + } + break ; + } + return error ; +} + +void +ip_dn_init(void) +{ + printf("DUMMYNET initialized (980901) -- size dn_pkt %d\n", + sizeof(struct dn_pkt)); + all_pipes = NULL ; + ip_dn_ctl_ptr = ip_dn_ctl; +} + +#if DUMMYNET_MODULE + +#include +#include +#include + +MOD_MISC(dummynet); + +static ip_dn_ctl_t *old_dn_ctl_ptr ; + +static int +dummynet_load(struct lkm_table *lkmtp, int cmd) +{ + int s=splnet(); + old_dn_ctl_ptr = ip_dn_ctl_ptr; + ip_dn_init(); + splx(s); + return 0; +} + +static int +dummynet_unload(struct lkm_table *lkmtp, int cmd) +{ + int s=splnet(); + ip_dn_ctl_ptr = old_dn_ctl_ptr; + splx(s); + dummynet_flush(); + printf("DUMMYNET unloaded\n"); + return 0; +} + +int +dummynet_mod(struct lkm_table *lkmtp, int cmd, int ver) +{ + DISPATCH(lkmtp, cmd, ver, dummynet_load, dummynet_unload, lkm_nullcmd); +} +#endif diff --git a/bsd/netinet/ip_dummynet.h b/bsd/netinet/ip_dummynet.h new file mode 100644 index 000000000..312eb5b8b --- /dev/null +++ b/bsd/netinet/ip_dummynet.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Luigi Rizzo + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + * + */ + +#ifndef _IP_DUMMYNET_H +#define _IP_DUMMYNET_H + +/* + * Definition of dummynet data structures. + * Dummynet handles a list of pipes, each one identified by a unique + * number (hopefully the list is short so we use a linked list). + * + * Each list contains a set of parameters identifying the pipe, and + * a set of packets queued on the pipe itself. + * + * I could have used queue macros, but the management i have + * is pretty simple and this makes the code more portable. + */ + +/* + * struct dn_pkt identifies a packet in the dummynet queue. The + * first part is really an m_hdr for implementation purposes, and some + * fields are saved there. When passing the packet back to the ip_input/ + * ip_output(), the struct is prepended to the mbuf chain with type + * MT_DUMMYNET, and contains the pointer to the matching rule. + */ +struct dn_pkt { + struct m_hdr hdr ; +#define dn_next hdr.mh_nextpkt /* next element in queue */ +#define dn_m hdr.mh_next /* packet to be forwarded */ +#define dn_hlen hdr.mh_len /* hlen, for ip_output */ +#define dn_dir hdr.mh_flags /* IP_FW_F_IN or IP_FW_F_OUT */ + int delay; /* stays queued until delay=0 */ + struct ifnet *ifp; /* interface, for ip_output */ + struct route ro; /* route, for ip_output. MUST COPY */ + +#if DUMMYNET_DEBUG + struct timeval beg, mid; /* testing only */ + int act_delay; /* testing only */ + int in_delay; /* testing only */ +#endif +}; + +struct dn_queue { + struct dn_pkt *head, *tail; +} ; + +/* + * descriptor of a pipe. The flags field will be used to speed up the + * forwarding code paths, in case some of the parameters are not + * used. + */ +struct dn_pipe { /* a pipe */ + struct dn_pipe *next ; + + u_short pipe_nr ; /* number */ + u_short flags ; /* to speed up things */ +#define DN_HAVE_BW 1 +#define DN_HAVE_QUEUE 2 +#define DN_HAVE_DELAY 4 + int bandwidth; /* really, bytes/tick. */ + int queue_size ; + int queue_size_bytes ; + int delay ; /* really, ticks */ + int plr ; /* pkt loss rate (2^31-1 means 100%) */ + + struct dn_queue r; + int r_len; /* elements in r_queue */ + int r_len_bytes; /* bytes in r_queue */ + int r_drops; /* drops from r_queue */ + struct dn_queue p ; + int ticks_from_last_insert; + long numbytes; /* which can send or receive */ +}; + +/* + * The following is used to define a new mbuf type that is + * prepended to the packet when it comes out of a pipe. The definition + * ought to go in /sys/sys/mbuf.h but here it is less intrusive. + */ + +#define MT_DUMMYNET MT_CONTROL +/* + * what to do of a packet when it comes out of a pipe + */ +#define DN_TO_IP_OUT 1 +#define DN_TO_IP_IN 2 +#define DN_TO_BDG_FWD 3 + +#if KERNEL + +MALLOC_DECLARE(M_IPFW); + +typedef int ip_dn_ctl_t __P((struct sockopt *)) ; +extern ip_dn_ctl_t *ip_dn_ctl_ptr; + +void ip_dn_init(void); /* called in ip_input.c */ +void dn_rule_delete(void *r); /* used in ip_fw.c */ +int dummynet_io(int pipe, int dir, + struct mbuf *m, struct ifnet *ifp, struct route *ro, int hlen, + struct ip_fw_chain *rule); +#endif /* KERNEL */ + +#endif /* _IP_DUMMYNET_H */ diff --git a/bsd/netinet/ip_ecn.c b/bsd/netinet/ip_ecn.c new file mode 100644 index 000000000..33211a3d5 --- /dev/null +++ b/bsd/netinet/ip_ecn.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: ip_ecn.c,v 1.3 2000/11/22 01:12:12 zarzycki Exp $ + */ +/* + * ECN consideration on tunnel ingress/egress operation. + * http://www.aciri.org/floyd/papers/draft-ipsec-ecn-00.txt + */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include + +#if INET +#include +#include +#include +#endif + +#if INET6 +#ifndef INET +#include +#endif +#include +#endif + +#include + +/* + * modify outer ECN (TOS) field on ingress operation (tunnel encapsulation). + * call it after you've done the default initialization/copy for the outer. + */ +void +ip_ecn_ingress(mode, outer, inner) + int mode; + u_int8_t *outer; + u_int8_t *inner; +{ + if (!outer || !inner) + panic("NULL pointer passed to ip_ecn_ingress"); + + switch (mode) { + case ECN_ALLOWED: /* ECN allowed */ + *outer &= ~IPTOS_CE; + break; + case ECN_FORBIDDEN: /* ECN forbidden */ + *outer &= ~(IPTOS_ECT | IPTOS_CE); + break; + case ECN_NOCARE: /* no consideration to ECN */ + break; + } +} + +/* + * modify inner ECN (TOS) field on egress operation (tunnel decapsulation). + * call it after you've done the default initialization/copy for the inner. + */ +void +ip_ecn_egress(mode, outer, inner) + int mode; + u_int8_t *outer; + u_int8_t *inner; +{ + if (!outer || !inner) + panic("NULL pointer passed to ip_ecn_egress"); + + switch (mode) { + case ECN_ALLOWED: + if (*outer & IPTOS_CE) + *inner |= IPTOS_CE; + break; + case ECN_FORBIDDEN: /* ECN forbidden */ + case ECN_NOCARE: /* no consideration to ECN */ + break; + } +} + +#if INET6 +void +ip6_ecn_ingress(mode, outer, inner) + int mode; + u_int32_t *outer; + u_int32_t *inner; +{ + u_int8_t outer8, inner8; + + if (!outer || !inner) + panic("NULL pointer passed to ip6_ecn_ingress"); + + outer8 = (ntohl(*outer) >> 20) & 0xff; + inner8 = (ntohl(*inner) >> 20) & 0xff; + ip_ecn_ingress(mode, &outer8, &inner8); + *outer &= ~htonl(0xff << 20); + *outer |= htonl((u_int32_t)outer8 << 20); +} + +void +ip6_ecn_egress(mode, outer, inner) + int mode; + u_int32_t *outer; + u_int32_t *inner; +{ + u_int8_t outer8, inner8; + + if (!outer || !inner) + panic("NULL pointer passed to ip6_ecn_egress"); + + outer8 = (ntohl(*outer) >> 20) & 0xff; + inner8 = (ntohl(*inner) >> 20) & 0xff; + ip_ecn_egress(mode, &outer8, &inner8); + *inner &= ~htonl(0xff << 20); + *inner |= htonl((u_int32_t)inner8 << 20); +} +#endif diff --git a/bsd/netinet/ip_ecn.h b/bsd/netinet/ip_ecn.h new file mode 100644 index 000000000..4b3e143a6 --- /dev/null +++ b/bsd/netinet/ip_ecn.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: ip_ecn.h,v 1.3 2000/11/22 01:12:12 zarzycki Exp $ + */ +/* + * ECN consideration on tunnel ingress/egress operation. + * http://www.aciri.org/floyd/papers/draft-ipsec-ecn-00.txt + */ + +#define ECN_ALLOWED 1 /* ECN allowed */ +#define ECN_FORBIDDEN 0 /* ECN forbidden */ +#define ECN_NOCARE (-1) /* no consideration to ECN */ + +#if defined(KERNEL) || defined(_KERNEL) +extern void ip_ecn_ingress __P((int, u_int8_t *, u_int8_t *)); +extern void ip_ecn_egress __P((int, u_int8_t *, u_int8_t *)); +#ifdef INET6 +extern void ip6_ecn_ingress __P((int, u_int32_t *, u_int32_t *)); +extern void ip6_ecn_egress __P((int, u_int32_t *, u_int32_t *)); +#endif +#endif diff --git a/bsd/netinet/ip_encap.c b/bsd/netinet/ip_encap.c new file mode 100644 index 000000000..0fae46c85 --- /dev/null +++ b/bsd/netinet/ip_encap.c @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: ip_encap.c,v 1.21 2000/03/30 14:30:06 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * My grandfather said that there's a devil inside tunnelling technology... + * + * We have surprisingly many protocols that want packets with IP protocol + * #4 or #41. Here's a list of protocols that want protocol #41: + * RFC1933 configured tunnel + * RFC1933 automatic tunnel + * RFC2401 IPsec tunnel + * RFC2473 IPv6 generic packet tunnelling + * RFC2529 6over4 tunnel + * mobile-ip6 (uses RFC2473) + * 6to4 tunnel + * Here's a list of protocol that want protocol #4: + * RFC1853 IPv4-in-IPv4 tunnel + * RFC2344 reverse tunnelling for mobile-ip4 + * RFC2401 IPsec tunnel + * Well, what can I say. They impose different en/decapsulation mechanism + * from each other, so they need separate protocol handler. The only one + * we can easily determine by protocol # is IPsec, which always has + * AH/ESP/IPComp header right after outer IP header. + * + * So, clearly good old protosw does not work for protocol #4 and #41. + * The code will let you match protocol via src/dst address pair. + */ + +#ifdef __FreeBSD__ +# include "opt_mrouting.h" +# if __FreeBSD__ == 3 +# include "opt_inet.h" +# endif +# if __FreeBSD__ >= 4 +# include "opt_inet.h" +# include "opt_inet6.h" +# endif +#else +# ifdef __NetBSD__ +# include "opt_inet.h" +# endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#if MROUTING +#include +#endif /* MROUTING */ +#ifdef __OpenBSD__ +#include +#endif + +#if INET6 +#include +#include +#include +#endif + + +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include +#include +MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); +#endif + +static int mask_match __P((const struct encaptab *, const struct sockaddr *, + const struct sockaddr *)); +static void encap_fillarg __P((struct mbuf *, const struct encaptab *)); + +/* rely upon BSS initialization */ +LIST_HEAD(, encaptab) encaptab; + +void +encap_init() +{ +#if 0 + /* + * we cannot use LIST_INIT() here, since drivers may want to call + * encap_attach(), on driver attach. encap_init() wlil be called + * on AF_INET{,6} initialization, which happens after driver + * initialization - using LIST_INIT() here can nuke encap_attach() + * from drivers. + */ + LIST_INIT(&encaptab); +#endif +} + +void +encap4_input(m, off, proto) + struct mbuf *m; + int off; + int proto; +{ + struct ip *ip; + struct sockaddr_in s, d; + struct encaptab *ep; + + + ip = mtod(m, struct ip *); +#ifdef __OpenBSD__ + proto = ip->ip_p; +#endif + + bzero(&s, sizeof(s)); + s.sin_family = AF_INET; + s.sin_len = sizeof(struct sockaddr_in); + s.sin_addr = ip->ip_src; + bzero(&d, sizeof(d)); + d.sin_family = AF_INET; + d.sin_len = sizeof(struct sockaddr_in); + d.sin_addr = ip->ip_dst; + + for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) { + if (ep->proto >= 0 && ep->proto != proto) + continue; + + if (ep->func) { + if ((*ep->func)(m, off, proto, ep->arg) == 0) + continue; + } else { + /* + * it's inbound traffic, we need to match in reverse + * order + */ + if (mask_match(ep, (struct sockaddr *)&d, + (struct sockaddr *)&s) == 0) + continue; + } + + /* found a match */ + if (ep->psw && ep->psw->pr_input) { + encap_fillarg(m, ep); +#warning watchout pr_input! + (*ep->psw->pr_input)(m, off); + } else + m_freem(m); + return; + } + + /* for backward compatibility */ + if (proto == IPPROTO_IPV4) { +#ifdef __OpenBSD__ +#if defined(MROUTING) || defined(IPSEC) + ip4_input(m, off, proto); + return; +#endif +#else +#if MROUTING + ipip_input(m, off); + return; +#endif /*MROUTING*/ +#endif + } + + /* last resort: inject to raw socket */ + rip_input(m, off); +} + +#if INET6 +int +encap6_input(mp, offp, proto) + struct mbuf **mp; + int *offp; + int proto; +{ + struct mbuf *m = *mp; + struct ip6_hdr *ip6; + struct sockaddr_in6 s, d; + struct ip6protosw *psw; + struct encaptab *ep; + + ip6 = mtod(m, struct ip6_hdr *); + + bzero(&s, sizeof(s)); + s.sin6_family = AF_INET6; + s.sin6_len = sizeof(struct sockaddr_in6); + s.sin6_addr = ip6->ip6_src; + bzero(&d, sizeof(d)); + d.sin6_family = AF_INET6; + d.sin6_len = sizeof(struct sockaddr_in6); + d.sin6_addr = ip6->ip6_dst; + + for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) { + if (ep->proto >= 0 && ep->proto != proto) + continue; + if (ep->func) { + if ((*ep->func)(m, *offp, proto, ep->arg) == 0) + continue; + } else { + /* + * it's inbound traffic, we need to match in reverse + * order + */ + if (mask_match(ep, (struct sockaddr *)&d, + (struct sockaddr *)&s) == 0) + continue; + } + + /* found a match */ + psw = (struct ip6protosw *)ep->psw; +#warning watchout pr_input! + if (psw && psw->pr_input) { + encap_fillarg(m, ep); + return (*psw->pr_input)(mp, offp, proto); + } else { + m_freem(m); + return IPPROTO_DONE; + } + } + + /* last resort: inject to raw socket */ + return rip6_input(mp, offp, proto); +} +#endif + +/* + * sp (src ptr) is always my side, and dp (dst ptr) is always remote side. + * length of mask (sm and dm) is assumed to be same as sp/dp. + * Return value will be necessary as input (cookie) for encap_detach(). + */ +const struct encaptab * +encap_attach(af, proto, sp, sm, dp, dm, psw, arg) + int af; + int proto; + const struct sockaddr *sp, *sm; + const struct sockaddr *dp, *dm; + const struct protosw *psw; + void *arg; +{ + struct encaptab *ep; + int error; + int s; + +#if defined(__NetBSD__) || defined(__OpenBSD__) + s = splsoftnet(); +#else + s = splnet(); +#endif + /* sanity check on args */ + if (sp->sa_len > sizeof(ep->src) || dp->sa_len > sizeof(ep->dst)) { + error = EINVAL; + goto fail; + } + if (sp->sa_len != dp->sa_len) { + error = EINVAL; + goto fail; + } + if (af != sp->sa_family || af != dp->sa_family) { + error = EINVAL; + goto fail; + } + + /* check if anyone have already attached with exactly same config */ + for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) { + if (ep->af != af) + continue; + if (ep->proto != proto) + continue; + if (ep->src.ss_len != sp->sa_len || + bcmp(&ep->src, sp, sp->sa_len) != 0 || + bcmp(&ep->srcmask, sm, sp->sa_len) != 0) + continue; + if (ep->dst.ss_len != dp->sa_len || + bcmp(&ep->dst, dp, dp->sa_len) != 0 || + bcmp(&ep->dstmask, dm, dp->sa_len) != 0) + continue; + + error = EEXIST; + goto fail; + } + + ep = _MALLOC(sizeof(*ep), M_NETADDR, M_NOWAIT); /*XXX*/ + if (ep == NULL) { + error = ENOBUFS; + goto fail; + } + bzero(ep, sizeof(*ep)); + + ep->af = af; + ep->proto = proto; + bcopy(sp, &ep->src, sp->sa_len); + bcopy(sm, &ep->srcmask, sp->sa_len); + bcopy(dp, &ep->dst, dp->sa_len); + bcopy(dm, &ep->dstmask, dp->sa_len); + ep->psw = psw; + ep->arg = arg; + + /* + * Order of insertion will determine the priority in lookup. + * We should be careful putting them in specific-one-first order. + * The question is, since we have two "mask" portion, we cannot really + * define total order between entries. + * For example, which of these should be preferred? + * src=3ffe::/16, dst=3ffe:501::/32 + * src=3ffe:501::/32, dst=3ffe::/16 + * + * At this moment we don't care about the ordering. + */ + LIST_INSERT_HEAD(&encaptab, ep, chain); + error = 0; + splx(s); + return ep; + +fail: + splx(s); + return NULL; +} + +const struct encaptab * +encap_attach_func(af, proto, func, psw, arg) + int af; + int proto; + int (*func) __P((const struct mbuf *, int, int, void *)); + const struct protosw *psw; + void *arg; +{ + struct encaptab *ep; + int error; + int s; + +#if defined(__NetBSD__) || defined(__OpenBSD__) + s = splsoftnet(); +#else + s = splnet(); +#endif + /* sanity check on args */ + if (!func) { + error = EINVAL; + goto fail; + } + + ep = _MALLOC(sizeof(*ep), M_NETADDR, M_NOWAIT); /*XXX*/ + if (ep == NULL) { + error = ENOBUFS; + goto fail; + } + bzero(ep, sizeof(*ep)); + + ep->af = af; + ep->proto = proto; + ep->func = func; + ep->psw = psw; + ep->arg = arg; + + /* + * Order of insertion will determine the priority in lookup. + * We should be careful putting them in specific-one-first order. + * The question is, since we have two "mask" portion, we cannot really + * define total order between entries. + * For example, which of these should be checked first? + * src=3ffe::/16, dst=3ffe:501::/32 + * src=3ffe:501::/32, dst=3ffe::/16 + * + * At this moment we don't care about the ordering. + */ + LIST_INSERT_HEAD(&encaptab, ep, chain); + error = 0; + splx(s); + return ep; + +fail: + splx(s); + return NULL; +} + +int +encap_detach(cookie) + const struct encaptab *cookie; +{ + const struct encaptab *ep = cookie; + struct encaptab *p; + + for (p = LIST_FIRST(&encaptab); p; p = LIST_NEXT(p, chain)) { + if (p == ep) { + LIST_REMOVE(p, chain); + _FREE(p, M_NETADDR); /*XXX*/ + return 0; + } + } + + return EINVAL; +} + +static int +mask_match(ep, sp, dp) + const struct encaptab *ep; + const struct sockaddr *sp; + const struct sockaddr *dp; +{ + struct sockaddr_storage s; + struct sockaddr_storage d; + int i; + u_int8_t *p, *q, *r; + + if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d)) + return 0; + if (sp->sa_family != ep->af || dp->sa_family != ep->af) + return 0; + if (sp->sa_len != ep->src.ss_len || dp->sa_len != ep->dst.ss_len) + return 0; + + p = (u_int8_t *)sp; + q = (u_int8_t *)&ep->srcmask; + r = (u_int8_t *)&s; + for (i = 0 ; i < sp->sa_len; i++) + r[i] = p[i] & q[i]; + + p = (u_int8_t *)dp; + q = (u_int8_t *)&ep->dstmask; + r = (u_int8_t *)&d; + for (i = 0 ; i < dp->sa_len; i++) + r[i] = p[i] & q[i]; + + /* need to overwrite len/family portion as we don't compare them */ + s.ss_len = sp->sa_len; + s.ss_family = sp->sa_family; + d.ss_len = dp->sa_len; + d.ss_family = dp->sa_family; + + if (bcmp(&s, &ep->src, ep->src.ss_len) == 0 && + bcmp(&d, &ep->dst, ep->dst.ss_len) == 0) { + return 1; + } else + return 0; +} + +static void +encap_fillarg(m, ep) + struct mbuf *m; + const struct encaptab *ep; +{ +#if 0 + m->m_pkthdr.aux = ep->arg; +#else + struct mbuf *n; + + n = m_aux_add(m, AF_INET, IPPROTO_IPV4); + if (n) { + *mtod(n, void **) = ep->arg; + n->m_len = sizeof(void *); + } +#endif +} + +void * +encap_getarg(m) + struct mbuf *m; +{ + void *p; +#if 0 + p = m->m_pkthdr.aux; + m->m_pkthdr.aux = NULL; + return p; +#else + struct mbuf *n; + + p = NULL; + n = m_aux_find(m, AF_INET, IPPROTO_IPV4); + if (n) { + if (n->m_len == sizeof(void *)) + p = *mtod(n, void **); + m_aux_delete(m, n); + } + return p; +#endif +} diff --git a/bsd/netinet/ip_encap.h b/bsd/netinet/ip_encap.h new file mode 100644 index 000000000..c795d55c0 --- /dev/null +++ b/bsd/netinet/ip_encap.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $KAME: ip_encap.h,v 1.6 2000/03/06 04:34:21 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET_IP_ENCAP_H_ +#define _NETINET_IP_ENCAP_H_ + +#if KERNEL + +struct encaptab { + LIST_ENTRY(encaptab) chain; + int af; + int proto; /* -1: don't care, I'll check myself */ + struct sockaddr_storage src; /* my addr */ + struct sockaddr_storage srcmask; + struct sockaddr_storage dst; /* remote addr */ + struct sockaddr_storage dstmask; + int (*func) __P((const struct mbuf *, int, int, void *)); + const struct protosw *psw; /* only pr_input will be used */ + void *arg; /* passed via m->m_pkthdr.aux */ +}; + +void encap_init __P((void)); +void encap4_input __P((struct mbuf *, int, int)); +int encap6_input __P((struct mbuf **, int *, int)); +const struct encaptab *encap_attach __P((int, int, const struct sockaddr *, + const struct sockaddr *, const struct sockaddr *, + const struct sockaddr *, const struct protosw *, void *)); +const struct encaptab *encap_attach_func __P((int, int, + int (*) __P((const struct mbuf *, int, int, void *)), + const struct protosw *, void *)); +int encap_detach __P((const struct encaptab *)); +void *encap_getarg __P((struct mbuf *)); +#endif + +#endif /*_NETINET_IP_ENCAP_H_*/ diff --git a/bsd/netinet/ip_fil.h b/bsd/netinet/ip_fil.h new file mode 100644 index 000000000..e847d070e --- /dev/null +++ b/bsd/netinet/ip_fil.h @@ -0,0 +1,546 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1993-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + * @(#)ip_fil.h 1.35 6/5/96 + * $Id: ip_fil.h,v 1.3 2000/11/22 01:12:12 zarzycki Exp $ + */ + +#ifndef __IP_FIL_H__ +#define __IP_FIL_H__ + +/* + * Pathnames for various IP Filter control devices. Used by LKM + * and userland, so defined here. + */ +#define IPNAT_NAME "/dev/ipnat" +#define IPSTATE_NAME "/dev/ipstate" +#define IPAUTH_NAME "/dev/ipauth" + +#ifndef SOLARIS +#define SOLARIS (defined(sun) && (defined(__svr4__) || defined(__SVR4))) +#endif + +#if defined(KERNEL) && !defined(_KERNEL) +#define _KERNEL +#endif + +#ifndef __P +# if __STDC__ +# define __P(x) x +# else +# define __P(x) () +# endif +#endif + +#if defined(__STDC__) || defined(__GNUC__) +#define SIOCADAFR _IOW('r', 60, struct frentry) +#define SIOCRMAFR _IOW('r', 61, struct frentry) +#define SIOCSETFF _IOW('r', 62, u_int) +#define SIOCGETFF _IOR('r', 63, u_int) +#define SIOCGETFS _IOR('r', 64, struct friostat) +#define SIOCIPFFL _IOWR('r', 65, int) +#define SIOCIPFFB _IOR('r', 66, int) +#define SIOCADIFR _IOW('r', 67, struct frentry) +#define SIOCRMIFR _IOW('r', 68, struct frentry) +#define SIOCSWAPA _IOR('r', 69, u_int) +#define SIOCINAFR _IOW('r', 70, struct frentry) +#define SIOCINIFR _IOW('r', 71, struct frentry) +#define SIOCFRENB _IOW('r', 72, u_int) +#define SIOCFRSYN _IOW('r', 73, u_int) +#define SIOCFRZST _IOWR('r', 74, struct friostat) +#define SIOCZRLST _IOWR('r', 75, struct frentry) +#define SIOCAUTHW _IOWR('r', 76, struct fr_info) +#define SIOCAUTHR _IOWR('r', 77, struct fr_info) +#define SIOCATHST _IOWR('r', 78, struct fr_authstat) +#else +#define SIOCADAFR _IOW(r, 60, struct frentry) +#define SIOCRMAFR _IOW(r, 61, struct frentry) +#define SIOCSETFF _IOW(r, 62, u_int) +#define SIOCGETFF _IOR(r, 63, u_int) +#define SIOCGETFS _IOR(r, 64, struct friostat) +#define SIOCIPFFL _IOWR(r, 65, int) +#define SIOCIPFFB _IOR(r, 66, int) +#define SIOCADIFR _IOW(r, 67, struct frentry) +#define SIOCRMIFR _IOW(r, 68, struct frentry) +#define SIOCSWAPA _IOR(r, 69, u_int) +#define SIOCINAFR _IOW(r, 70, struct frentry) +#define SIOCINIFR _IOW(r, 71, struct frentry) +#define SIOCFRENB _IOW(r, 72, u_int) +#define SIOCFRSYN _IOW(r, 73, u_int) +#define SIOCFRZST _IOWR(r, 74, struct friostat) +#define SIOCZRLST _IOWR(r, 75, struct frentry) +#define SIOCAUTHW _IOWR(r, 76, struct fr_info) +#define SIOCAUTHR _IOWR(r, 77, struct fr_info) +#define SIOCATHST _IOWR(r, 78, struct fr_authstat) +#endif +#define SIOCADDFR SIOCADAFR +#define SIOCDELFR SIOCRMAFR +#define SIOCINSFR SIOCINAFR + +typedef struct fr_ip { + u_int fi_v:4; /* IP version */ + u_int fi_fl:4; /* packet flags */ + u_char fi_tos; + u_char fi_ttl; + u_char fi_p; + struct in_addr fi_src; + struct in_addr fi_dst; + u_32_t fi_optmsk; /* bitmask composed from IP options */ + u_short fi_secmsk; /* bitmask composed from IP security options */ + u_short fi_auth; +} fr_ip_t; + +#define FI_OPTIONS (FF_OPTIONS >> 24) +#define FI_TCPUDP (FF_TCPUDP >> 24) /* TCP/UCP implied comparison*/ +#define FI_FRAG (FF_FRAG >> 24) +#define FI_SHORT (FF_SHORT >> 24) + +typedef struct fr_info { + struct fr_ip fin_fi; + u_short fin_data[2]; + u_short fin_out; + u_short fin_hlen; + u_char fin_tcpf; + u_char fin_icode; /* From here on is packet specific */ + u_short fin_rule; + u_short fin_group; + u_short fin_dlen; + u_short fin_id; + void *fin_ifp; + struct frentry *fin_fr; + char *fin_dp; /* start of data past IP header */ + void *fin_mp; +} fr_info_t; + +/* + * Size for compares on fr_info structures + */ +#define FI_CSIZE (sizeof(struct fr_ip) + sizeof(u_short) * 4 + \ + sizeof(u_char)) +/* + * Size for copying cache fr_info structure + */ +#define FI_COPYSIZE (sizeof(fr_info_t) - sizeof(void *) * 2) + +typedef struct frdest { + void *fd_ifp; + struct in_addr fd_ip; + char fd_ifname[IFNAMSIZ]; +} frdest_t; + +typedef struct frentry { + struct frentry *fr_next; + u_short fr_group; /* group to which this rule belongs */ + u_short fr_grhead; /* group # which this rule starts */ + struct frentry *fr_grp; + int fr_ref; /* reference count - for grouping */ + void *fr_ifa; + /* + * These are only incremented when a packet matches this rule and + * it is the last match + */ + U_QUAD_T fr_hits; + U_QUAD_T fr_bytes; + /* + * Fields after this may not change whilst in the kernel. + */ + struct fr_ip fr_ip; + struct fr_ip fr_mip; /* mask structure */ + + u_char fr_tcpfm; /* tcp flags mask */ + u_char fr_tcpf; /* tcp flags */ + + u_short fr_icmpm; /* data for ICMP packets (mask) */ + u_short fr_icmp; + + u_char fr_scmp; /* data for port comparisons */ + u_char fr_dcmp; + u_short fr_dport; + u_short fr_sport; + u_short fr_stop; /* top port for <> and >< */ + u_short fr_dtop; /* top port for <> and >< */ + u_32_t fr_flags; /* per-rule flags && options (see below) */ + int fr_skip; /* # of rules to skip */ + int (*fr_func) __P((int, ip_t *, fr_info_t *)); /* call this function */ + char fr_icode; /* return ICMP code */ + char fr_ifname[IFNAMSIZ]; + struct frdest fr_tif; /* "to" interface */ + struct frdest fr_dif; /* duplicate packet interfaces */ +} frentry_t; + +#define fr_proto fr_ip.fi_p +#define fr_ttl fr_ip.fi_ttl +#define fr_tos fr_ip.fi_tos +#define fr_dst fr_ip.fi_dst +#define fr_src fr_ip.fi_src +#define fr_dmsk fr_mip.fi_dst +#define fr_smsk fr_mip.fi_src + +#ifndef offsetof +#define offsetof(t,m) (int)((&((t *)0L)->m)) +#endif +#define FR_CMPSIZ (sizeof(struct frentry) - offsetof(frentry_t, fr_ip)) + +/* + * fr_flags + */ +#define FR_BLOCK 0x00001 /* do not allow packet to pass */ +#define FR_PASS 0x00002 /* allow packet to pass */ +#define FR_OUTQUE 0x00004 /* outgoing packets */ +#define FR_INQUE 0x00008 /* ingoing packets */ +#define FR_LOG 0x00010 /* Log */ +#define FR_LOGB 0x00011 /* Log-fail */ +#define FR_LOGP 0x00012 /* Log-pass */ +#define FR_LOGBODY 0x00020 /* Log the body */ +#define FR_LOGFIRST 0x00040 /* Log the first byte if state held */ +#define FR_RETRST 0x00080 /* Return TCP RST packet - reset connection */ +#define FR_RETICMP 0x00100 /* Return ICMP unreachable packet */ +#define FR_NOMATCH 0x00200 /* no match occured */ +#define FR_ACCOUNT 0x00400 /* count packet bytes */ +#define FR_KEEPFRAG 0x00800 /* keep fragment information */ +#define FR_KEEPSTATE 0x01000 /* keep `connection' state information */ +#define FR_INACTIVE 0x02000 +#define FR_QUICK 0x04000 /* match & stop processing list */ +#define FR_FASTROUTE 0x08000 /* bypass normal routing */ +#define FR_CALLNOW 0x10000 /* call another function (fr_func) if matches */ +#define FR_DUP 0x20000 /* duplicate packet */ +#define FR_LOGORBLOCK 0x40000 /* block the packet if it can't be logged */ +#define FR_NOTSRCIP 0x80000 /* not the src IP# */ +#define FR_NOTDSTIP 0x100000 /* not the dst IP# */ +#define FR_AUTH 0x200000 /* use authentication */ +#define FR_PREAUTH 0x400000 /* require preauthentication */ + +#define FR_LOGMASK (FR_LOG|FR_LOGP|FR_LOGB) + +/* + * These correspond to #define's for FI_* and are stored in fr_flags + */ +#define FF_OPTIONS 0x01000000 +#define FF_TCPUDP 0x02000000 +#define FF_FRAG 0x04000000 +#define FF_SHORT 0x08000000 +/* + * recognized flags for SIOCGETFF and SIOCSETFF, and get put in fr_flags + */ +#define FF_LOGPASS 0x10000000 +#define FF_LOGBLOCK 0x20000000 +#define FF_LOGNOMATCH 0x40000000 +#define FF_LOGGING (FF_LOGPASS|FF_LOGBLOCK|FF_LOGNOMATCH) +#define FF_BLOCKNONIP 0x80000000 /* Solaris2 Only */ + +#define FR_NONE 0 +#define FR_EQUAL 1 +#define FR_NEQUAL 2 +#define FR_LESST 3 +#define FR_GREATERT 4 +#define FR_LESSTE 5 +#define FR_GREATERTE 6 +#define FR_OUTRANGE 7 +#define FR_INRANGE 8 + +typedef struct filterstats { + u_long fr_pass; /* packets allowed */ + u_long fr_block; /* packets denied */ + u_long fr_nom; /* packets which don't match any rule */ + u_long fr_ppkl; /* packets allowed and logged */ + u_long fr_bpkl; /* packets denied and logged */ + u_long fr_npkl; /* packets unmatched and logged */ + u_long fr_pkl; /* packets logged */ + u_long fr_skip; /* packets to be logged but buffer full */ + u_long fr_ret; /* packets for which a return is sent */ + u_long fr_acct; /* packets for which counting was performed */ + u_long fr_bnfr; /* bad attempts to allocate fragment state */ + u_long fr_nfr; /* new fragment state kept */ + u_long fr_cfr; /* add new fragment state but complete pkt */ + u_long fr_bads; /* bad attempts to allocate packet state */ + u_long fr_ads; /* new packet state kept */ + u_long fr_chit; /* cached hit */ + u_long fr_tcpbad; /* TCP checksum check failures */ + u_long fr_pull[2]; /* good and bad pullup attempts */ +#if SOLARIS + u_long fr_bad; /* bad IP packets to the filter */ + u_long fr_notip; /* packets passed through no on ip queue */ + u_long fr_drop; /* packets dropped - no info for them! */ +#endif +} filterstats_t; + +/* + * For SIOCGETFS + */ +typedef struct friostat { + struct filterstats f_st[2]; + struct frentry *f_fin[2]; + struct frentry *f_fout[2]; + struct frentry *f_acctin[2]; + struct frentry *f_acctout[2]; + struct frentry *f_auth; + u_long f_froute[2]; + int f_active; +} friostat_t; + +typedef struct optlist { + u_short ol_val; + int ol_bit; +} optlist_t; + + +/* + * Group list structure. + */ +typedef struct frgroup { + u_short fg_num; + struct frgroup *fg_next; + struct frentry *fg_head; + struct frentry **fg_start; +} frgroup_t; + + +/* + * Log structure. Each packet header logged is prepended by one of these. + * Following this in the log records read from the device will be an ipflog + * structure which is then followed by any packet data. + */ +typedef struct iplog { + u_long ipl_magic; + u_long ipl_sec; + u_long ipl_usec; + u_int ipl_len; + u_int ipl_count; + size_t ipl_dsize; + struct iplog *ipl_next; +} iplog_t; + +#define IPL_MAGIC 0x49504c4d /* 'IPLM' */ + +typedef struct ipflog { +#if (defined(NetBSD) && (NetBSD <= 1991011) && (NetBSD >= 199603)) || \ + (defined(OpenBSD) && (OpenBSD >= 199603)) + u_char fl_ifname[IFNAMSIZ]; +#else + u_int fl_unit; + u_char fl_ifname[4]; +#endif + u_char fl_plen; /* extra data after hlen */ + u_char fl_hlen; /* length of IP headers saved */ + u_short fl_rule; /* assume never more than 64k rules, total */ + u_short fl_group; + u_32_t fl_flags; +} ipflog_t; + + +#ifndef ICMP_UNREACH_FILTER +#define ICMP_UNREACH_FILTER 13 +#endif + +#ifndef IPF_LOGGING +#define IPF_LOGGING 0 +#endif +#ifndef IPF_DEFAULT_PASS +#define IPF_DEFAULT_PASS FR_PASS +#endif + +#define IPMINLEN(i, h) ((i)->ip_len >= ((i)->ip_hl * 4 + sizeof(struct h))) +#define IPLLOGSIZE 8192 + +/* + * Device filenames for reading log information. Use ipf on Solaris2 because + * ipl is already a name used by something else. + */ +#ifndef IPL_NAME +# if SOLARIS +# define IPL_NAME "/dev/ipf" +# else +# define IPL_NAME "/dev/ipl" +# endif +#endif +#define IPL_NAT IPNAT_NAME +#define IPL_STATE IPSTATE_NAME +#define IPL_AUTH IPAUTH_NAME + +#define IPL_LOGIPF 0 /* Minor device #'s for accessing logs */ +#define IPL_LOGNAT 1 +#define IPL_LOGSTATE 2 +#define IPL_LOGAUTH 3 +#define IPL_LOGMAX 3 + +#if !defined(CDEV_MAJOR) && defined (__FreeBSD_version) && \ + (__FreeBSD_version >= 220000) +# define CDEV_MAJOR 79 +#endif + +#ifndef _KERNEL +struct ifnet; +extern int fr_check __P((ip_t *, int, void *, int, mb_t **)); +extern int (*fr_checkp) __P((ip_t *, int, void *, int, mb_t **)); +extern int send_reset __P((ip_t *, struct ifnet *)); +extern int icmp_error __P((ip_t *, struct ifnet *)); +extern int ipf_log __P((void)); +extern void ipfr_fastroute __P((ip_t *, fr_info_t *, frdest_t *)); +extern struct ifnet *get_unit __P((char *)); +# define FR_SCANLIST(p, ip, fi, m) fr_scanlist(p, ip, fi, m) +# if defined(__NetBSD__) || defined(__OpenBSD__) || \ + (_BSDI_VERSION >= 199701) || (__FreeBSD_version >= 300003) +extern int iplioctl __P((dev_t, u_long, caddr_t, int)); +# else +extern int iplioctl __P((dev_t, int, caddr_t, int)); +# endif +extern int iplopen __P((dev_t, int)); +extern int iplclose __P((dev_t, int)); +#else /* #ifndef _KERNEL */ +# if defined(__NetBSD__) && defined(PFIL_HOOKS) +extern int ipfilterattach __P((int)); +# endif +extern int iplattach __P((void)); +extern int ipl_enable __P((void)); +extern int ipl_disable __P((void)); +extern void ipflog_init __P((void)); +extern int ipflog_clear __P((int)); +extern int ipflog_read __P((int, struct uio *)); +extern int ipflog __P((u_int, ip_t *, fr_info_t *, mb_t *)); +extern int ipllog __P((int, u_long, void **, size_t *, int *, int)); +# if SOLARIS +extern int fr_check __P((ip_t *, int, void *, int, qif_t *, mb_t **)); +extern int (*fr_checkp) __P((ip_t *, int, void *, + int, qif_t *, mb_t **)); +extern int icmp_error __P((ip_t *, int, int, qif_t *, + struct in_addr)); +extern int iplioctl __P((dev_t, int, int, int, cred_t *, int *)); +extern int iplopen __P((dev_t *, int, int, cred_t *)); +extern int iplclose __P((dev_t, int, int, cred_t *)); +extern int ipfsync __P((void)); +extern int send_reset __P((ip_t *, qif_t *)); +extern int ipfr_fastroute __P((qif_t *, ip_t *, mblk_t *, mblk_t **, + fr_info_t *, frdest_t *)); +extern void copyin_mblk __P((mblk_t *, int, int, char *)); +extern void copyout_mblk __P((mblk_t *, int, int, char *)); +extern int fr_qin __P((queue_t *, mblk_t *)); +extern int fr_qout __P((queue_t *, mblk_t *)); +# if IPFILTER_LOG +extern int iplread __P((dev_t, struct uio *, cred_t *)); +# endif +# else /* SOLARIS */ +extern int fr_check __P((ip_t *, int, void *, int, mb_t **)); +extern int (*fr_checkp) __P((ip_t *, int, void *, int, mb_t **)); +# if linux +extern int send_reset __P((tcpiphdr_t *, struct ifnet *)); +# else +extern int send_reset __P((tcpiphdr_t *)); +# endif +extern void ipfr_fastroute __P((mb_t *, fr_info_t *, frdest_t *)); +extern size_t mbufchainlen __P((mb_t *)); +# if __sgi +# include +extern int iplioctl __P((dev_t, int, caddr_t, int, cred_t *, int *)); +extern int iplopen __P((dev_t *, int, int, cred_t *)); +extern int iplclose __P((dev_t, int, int, cred_t *)); +extern int iplread __P((dev_t, struct uio *, cred_t *)); +extern int ipfsync __P((void)); +extern int ipfilter_sgi_attach __P((void)); +extern void ipfilter_sgi_detach __P((void)); +extern void ipfilter_sgi_intfsync __P((void)); +# else +# if IPFILTER_LKM +extern int iplidentify __P((char *)); +# endif +# if (_BSDI_VERSION >= 199510) || (__FreeBSD_version >= 220000) || \ + (NetBSD >= 199511) +# if defined(__NetBSD__) || (_BSDI_VERSION >= 199701) || \ + (__FreeBSD_version >= 300003) +extern int iplioctl __P((dev_t, u_long, caddr_t, int, struct proc *)); +# else +extern int iplioctl __P((dev_t, int, caddr_t, int, struct proc *)); +# endif +extern int iplopen __P((dev_t, int, int, struct proc *)); +extern int iplclose __P((dev_t, int, int, struct proc *)); +# else +# if defined(__OpenBSD__) +extern int iplioctl __P((dev_t, u_long, caddr_t, int)); +# else /* __OpenBSD__ */ +# ifndef linux +extern int iplioctl __P((dev_t, int, caddr_t, int)); +# else +extern int iplioctl(struct inode *, struct file *, u_int, u_long); +# endif +# endif /* __OpenBSD__ */ +# ifndef linux +extern int iplopen __P((dev_t, int)); +extern int iplclose __P((dev_t, int)); +# else +extern int iplopen __P((struct inode *, struct file *)); +extern void iplclose __P((struct inode *, struct file *)); +# endif /* !linux */ +# endif /* (_BSDI_VERSION >= 199510) */ +# if BSD >= 199306 +extern int iplread __P((dev_t, struct uio *, int)); +# else +# ifndef linux +extern int iplread __P((dev_t, struct uio *)); +# else +extern int iplread(struct inode *, struct file *, char *, int); +# endif /* !linux */ +# endif /* BSD >= 199306 */ +# endif /* __ sgi */ +# endif /* SOLARIS */ +#endif /* #ifndef _KERNEL */ + +/* + * Post NetBSD 1.2 has the PFIL interface for packet filters. This turns + * on those hooks. We don't need any special mods in non-IP Filter code + * with this! + */ +#if (defined(NetBSD) && (NetBSD > 199609) && (NetBSD <= 1991011)) || \ + (defined(NetBSD1_2) && NetBSD1_2 > 1) +# define NETBSD_PF +#endif + +extern int ipldetach __P((void)); +extern u_short fr_tcpsum __P((mb_t *, ip_t *, tcphdr_t *, int)); +#define FR_SCANLIST(p, ip, fi, m) fr_scanlist(p, ip, fi, m) +extern int fr_scanlist __P((int, ip_t *, fr_info_t *, void *)); +extern u_short ipf_cksum __P((u_short *, int)); +extern int fr_copytolog __P((int, char *, int)); +extern void frflush __P((int, int *)); +extern frgroup_t *fr_addgroup __P((u_short, frentry_t *, int, int)); +extern frgroup_t *fr_findgroup __P((u_short, u_32_t, int, int, frgroup_t ***)); +extern void fr_delgroup __P((u_short, u_32_t, int, int)); +extern int ipl_unreach; +extern int ipl_inited; +extern u_long ipl_frouteok[2]; +extern int fr_pass; +extern int fr_flags; +extern int fr_active; +extern fr_info_t frcache[2]; +#if IPFILTER_LOG +extern iplog_t **iplh[IPL_LOGMAX+1], *iplt[IPL_LOGMAX+1]; +extern int iplused[IPL_LOGMAX + 1]; +#endif +extern struct frentry *ipfilter[2][2], *ipacct[2][2]; +extern struct frgroup *ipfgroups[3][2]; +extern struct filterstats frstats[]; + +#endif /* __IP_FIL_H__ */ diff --git a/bsd/netinet/ip_flow.c b/bsd/netinet/ip_flow.c new file mode 100644 index 000000000..f95477801 --- /dev/null +++ b/bsd/netinet/ip_flow.c @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define IPFLOW_TIMER (5 * PR_SLOWHZ) +#define IPFLOW_HASHBITS 6 /* should not be a multiple of 8 */ +#define IPFLOW_HASHSIZE (1 << IPFLOW_HASHBITS) +static LIST_HEAD(ipflowhead, ipflow) ipflows[IPFLOW_HASHSIZE]; +static int ipflow_inuse; +#define IPFLOW_MAX 256 + +#if ISFB31 +#else +#define M_IPFLOW M_TEMP +#endif + +static int ipflow_active = 0; +SYSCTL_INT(_net_inet_ip, IPCTL_FASTFORWARDING, fastforwarding, CTLFLAG_RW, + &ipflow_active, 0, ""); + +MALLOC_DEFINE(M_IPFLOW, "ip_flow", "IP flow"); + +static unsigned +ipflow_hash( + struct in_addr dst, + struct in_addr src, + unsigned tos) +{ + unsigned hash = tos; + int idx; + for (idx = 0; idx < 32; idx += IPFLOW_HASHBITS) + hash += (dst.s_addr >> (32 - idx)) + (src.s_addr >> idx); + return hash & (IPFLOW_HASHSIZE-1); +} + +static struct ipflow * +ipflow_lookup( + const struct ip *ip) +{ + unsigned hash; + struct ipflow *ipf; + + hash = ipflow_hash(ip->ip_dst, ip->ip_src, ip->ip_tos); + + ipf = LIST_FIRST(&ipflows[hash]); + while (ipf != NULL) { + if (ip->ip_dst.s_addr == ipf->ipf_dst.s_addr + && ip->ip_src.s_addr == ipf->ipf_src.s_addr + && ip->ip_tos == ipf->ipf_tos) + break; + ipf = LIST_NEXT(ipf, ipf_next); + } + return ipf; +} + +int +ipflow_fastforward( + struct mbuf *m) +{ + struct ip *ip; + struct ipflow *ipf; + struct rtentry *rt; + int error; + + /* + * Are we forwarding packets? Big enough for an IP packet? + */ + if (!ipforwarding || !ipflow_active || m->m_len < sizeof(struct ip)) + return 0; + /* + * IP header with no option and valid version and length + */ + ip = mtod(m, struct ip *); + if (ip->ip_v != IPVERSION || ip->ip_hl != (sizeof(struct ip) >> 2) + || ntohs(ip->ip_len) > m->m_pkthdr.len) + return 0; + /* + * Find a flow. + */ + if ((ipf = ipflow_lookup(ip)) == NULL) + return 0; + + /* + * Route and interface still up? + */ + rt = ipf->ipf_ro.ro_rt; + if ((rt->rt_flags & RTF_UP) == 0 || (rt->rt_ifp->if_flags & IFF_UP) == 0) + return 0; + + /* + * Packet size OK? TTL? + */ + if (m->m_pkthdr.len > rt->rt_ifp->if_mtu || ip->ip_ttl <= IPTTLDEC) + return 0; + + /* + * Everything checks out and so we can forward this packet. + * Modify the TTL and incrementally change the checksum. + */ + ip->ip_ttl -= IPTTLDEC; + if (ip->ip_sum >= htons(0xffff - (IPTTLDEC << 8))) { + ip->ip_sum += htons(IPTTLDEC << 8) + 1; + } else { + ip->ip_sum += htons(IPTTLDEC << 8); + } + + /* + * Send the packet on its way. All we can get back is ENOBUFS + */ + ipf->ipf_uses++; + ipf->ipf_timer = IPFLOW_TIMER; + + /* Not sure the rt_dlt is valid here !! XXX */ + if ((error = dlil_output((u_long)rt->rt_dlt, m, (caddr_t) rt, &ipf->ipf_ro.ro_dst, 0)) != 0) { + if (error == ENOBUFS) + ipf->ipf_dropped++; + else + ipf->ipf_errors++; + } + return 1; +} + +static void +ipflow_addstats( + struct ipflow *ipf) +{ + ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses; + ipstat.ips_cantforward += ipf->ipf_errors + ipf->ipf_dropped; + ipstat.ips_forward += ipf->ipf_uses; + ipstat.ips_fastforward += ipf->ipf_uses; +} + +static void +ipflow_free( + struct ipflow *ipf) +{ + int s; + /* + * Remove the flow from the hash table (at elevated IPL). + * Once it's off the list, we can deal with it at normal + * network IPL. + */ + s = splimp(); + LIST_REMOVE(ipf, ipf_next); + splx(s); + ipflow_addstats(ipf); + RTFREE(ipf->ipf_ro.ro_rt); + ipflow_inuse--; + FREE(ipf, M_IPFLOW); +} + +static struct ipflow * +ipflow_reap( + void) +{ + struct ipflow *ipf, *maybe_ipf = NULL; + int idx; + int s; + + for (idx = 0; idx < IPFLOW_HASHSIZE; idx++) { + ipf = LIST_FIRST(&ipflows[idx]); + while (ipf != NULL) { + /* + * If this no longer points to a valid route + * reclaim it. + */ + if ((ipf->ipf_ro.ro_rt->rt_flags & RTF_UP) == 0) + goto done; + /* + * choose the one that's been least recently used + * or has had the least uses in the last 1.5 + * intervals. + */ + if (maybe_ipf == NULL + || ipf->ipf_timer < maybe_ipf->ipf_timer + || (ipf->ipf_timer == maybe_ipf->ipf_timer + && ipf->ipf_last_uses + ipf->ipf_uses < + maybe_ipf->ipf_last_uses + + maybe_ipf->ipf_uses)) + maybe_ipf = ipf; + ipf = LIST_NEXT(ipf, ipf_next); + } + } + ipf = maybe_ipf; + done: + /* + * Remove the entry from the flow table. + */ + s = splimp(); + LIST_REMOVE(ipf, ipf_next); + splx(s); + ipflow_addstats(ipf); + RTFREE(ipf->ipf_ro.ro_rt); + return ipf; +} + +void +ipflow_slowtimo( + void) +{ + struct ipflow *ipf; + int idx; + + for (idx = 0; idx < IPFLOW_HASHSIZE; idx++) { + ipf = LIST_FIRST(&ipflows[idx]); + while (ipf != NULL) { + struct ipflow *next_ipf = LIST_NEXT(ipf, ipf_next); + if (--ipf->ipf_timer == 0) { + ipflow_free(ipf); + } else { + ipf->ipf_last_uses = ipf->ipf_uses; + ipf->ipf_ro.ro_rt->rt_use += ipf->ipf_uses; + ipstat.ips_forward += ipf->ipf_uses; + ipstat.ips_fastforward += ipf->ipf_uses; + ipf->ipf_uses = 0; + } + ipf = next_ipf; + } + } +} + +void +ipflow_create( + const struct route *ro, + struct mbuf *m) +{ + const struct ip *const ip = mtod(m, struct ip *); + struct ipflow *ipf; + unsigned hash; + int s; + + /* + * Don't create cache entries for ICMP messages. + */ + if (!ipflow_active || ip->ip_p == IPPROTO_ICMP) + return; + /* + * See if an existing flow struct exists. If so remove it from it's + * list and free the old route. If not, try to malloc a new one + * (if we aren't at our limit). + */ + ipf = ipflow_lookup(ip); + if (ipf == NULL) { + if (ipflow_inuse == IPFLOW_MAX) { + ipf = ipflow_reap(); + } else { + ipf = (struct ipflow *) _MALLOC(sizeof(*ipf), M_IPFLOW, + M_NOWAIT); + if (ipf == NULL) + return; + ipflow_inuse++; + } + bzero((caddr_t) ipf, sizeof(*ipf)); + } else { + s = splimp(); + LIST_REMOVE(ipf, ipf_next); + splx(s); + ipflow_addstats(ipf); + RTFREE(ipf->ipf_ro.ro_rt); + ipf->ipf_uses = ipf->ipf_last_uses = 0; + ipf->ipf_errors = ipf->ipf_dropped = 0; + } + + /* + * Fill in the updated information. + */ + ipf->ipf_ro = *ro; + ro->ro_rt->rt_refcnt++; + ipf->ipf_dst = ip->ip_dst; + ipf->ipf_src = ip->ip_src; + ipf->ipf_tos = ip->ip_tos; + ipf->ipf_timer = IPFLOW_TIMER; + /* + * Insert into the approriate bucket of the flow table. + */ + hash = ipflow_hash(ip->ip_dst, ip->ip_src, ip->ip_tos); + s = splimp(); + LIST_INSERT_HEAD(&ipflows[hash], ipf, ipf_next); + splx(s); +} diff --git a/bsd/netinet/ip_flow.h b/bsd/netinet/ip_flow.h new file mode 100644 index 000000000..151b6ba32 --- /dev/null +++ b/bsd/netinet/ip_flow.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1998 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by the 3am Software Foundry ("3am"). It was developed by Matt Thomas. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _NETINET_IP_FLOW_H +#define _NETINET_IP_FLOW_H + +struct ipflow { + LIST_ENTRY(ipflow) ipf_next; /* next ipflow in bucket */ + struct in_addr ipf_dst; /* destination address */ + struct in_addr ipf_src; /* source address */ + + u_int8_t ipf_tos; /* type-of-service */ + struct route ipf_ro; /* associated route entry */ + u_long ipf_uses; /* number of uses in this period */ + + int ipf_timer; /* remaining lifetime of this entry */ + u_long ipf_dropped; /* ENOBUFS returned by if_output */ + u_long ipf_errors; /* other errors returned by if_output */ + u_long ipf_last_uses; /* number of uses in last period */ +}; + +#endif diff --git a/bsd/netinet/ip_frag.c b/bsd/netinet/ip_frag.c new file mode 100644 index 000000000..ddb4f1ab4 --- /dev/null +++ b/bsd/netinet/ip_frag.c @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1993-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + */ +#if !defined(lint) +/* static const char sccsid[] = "@(#)ip_frag.c 1.11 3/24/96 (C) 1993-1995 Darren Reed"; */ +#endif + + +#if !defined(KERNEL) +# include +# include +#endif +#include +#include +#include +#include +#include +#if defined(KERNEL) +#include +#include +#include +#else +#include +#endif +#include +#ifndef linux +#include +#endif +#include +#if defined(KERNEL) +# include +#endif +#if !defined(__SVR4) && !defined(__svr4__) +# ifndef linux +# include +# endif +#else +# include +# include +# include +# include +#endif +#if defined(KERNEL) +#include +#endif + +#include +#ifdef sun +#include +#endif +#include +#include +#include +#include +#ifndef linux +#include +#endif +#include +#include +#include +#include "netinet/ip_compat.h" +#include +#include "netinet/ip_fil.h" +#include "netinet/ip_proxy.h" +#include "netinet/ip_nat.h" +#include "netinet/ip_frag.h" +#include "netinet/ip_state.h" +#include "netinet/ip_auth.h" + +static ipfr_t *ipfr_heads[IPFT_SIZE]; +static ipfr_t *ipfr_nattab[IPFT_SIZE]; +static ipfrstat_t ipfr_stats; +static int ipfr_inuse = 0; + int fr_ipfrttl = 120; /* 60 seconds */ +#ifdef KERNEL +extern int ipfr_timer_id; +#endif +#if (SOLARIS || defined(__sgi)) && defined(KERNEL) +extern kmutex_t ipf_frag; +extern kmutex_t ipf_natfrag; +extern kmutex_t ipf_nat; +#endif + + +static ipfr_t *ipfr_new __P((ip_t *, fr_info_t *, int, ipfr_t **)); +static ipfr_t *ipfr_lookup __P((ip_t *, fr_info_t *, ipfr_t **)); + + +ipfrstat_t *ipfr_fragstats() +{ + ipfr_stats.ifs_table = ipfr_heads; + ipfr_stats.ifs_nattab = ipfr_nattab; + ipfr_stats.ifs_inuse = ipfr_inuse; + return &ipfr_stats; +} + + +/* + * add a new entry to the fragment cache, registering it as having come + * through this box, with the result of the filter operation. + */ +static ipfr_t *ipfr_new(ip, fin, pass, table) +ip_t *ip; +fr_info_t *fin; +int pass; +ipfr_t *table[]; +{ + ipfr_t **fp, *fr, frag; + u_int idx; + + frag.ipfr_p = ip->ip_p; + idx = ip->ip_p; + frag.ipfr_id = ip->ip_id; + idx += ip->ip_id; + frag.ipfr_tos = ip->ip_tos; + frag.ipfr_src.s_addr = ip->ip_src.s_addr; + idx += ip->ip_src.s_addr; + frag.ipfr_dst.s_addr = ip->ip_dst.s_addr; + idx += ip->ip_dst.s_addr; + idx *= 127; + idx %= IPFT_SIZE; + + /* + * first, make sure it isn't already there... + */ + for (fp = &table[idx]; (fr = *fp); fp = &fr->ipfr_next) + if (!bcmp((char *)&frag.ipfr_src, (char *)&fr->ipfr_src, + IPFR_CMPSZ)) { + ipfr_stats.ifs_exists++; + return NULL; + } + + /* + * allocate some memory, if possible, if not, just record that we + * failed to do so. + */ + KMALLOC(fr, ipfr_t *, sizeof(*fr)); + if (fr == NULL) { + ipfr_stats.ifs_nomem++; + return NULL; + } + + /* + * Instert the fragment into the fragment table, copy the struct used + * in the search using bcopy rather than reassign each field. + * Set the ttl to the default and mask out logging from "pass" + */ + if ((fr->ipfr_next = table[idx])) + table[idx]->ipfr_prev = fr; + fr->ipfr_prev = NULL; + fr->ipfr_data = NULL; + table[idx] = fr; + bcopy((char *)&frag.ipfr_src, (char *)&fr->ipfr_src, IPFR_CMPSZ); + fr->ipfr_ttl = fr_ipfrttl; + fr->ipfr_pass = pass & ~(FR_LOGFIRST|FR_LOG); + /* + * Compute the offset of the expected start of the next packet. + */ + fr->ipfr_off = (ip->ip_off & 0x1fff) + (fin->fin_dlen >> 3); + ipfr_stats.ifs_new++; + ipfr_inuse++; + return fr; +} + + +int ipfr_newfrag(ip, fin, pass) +ip_t *ip; +fr_info_t *fin; +int pass; +{ + ipfr_t *ipf; + + MUTEX_ENTER(&ipf_frag); + ipf = ipfr_new(ip, fin, pass, ipfr_heads); + MUTEX_EXIT(&ipf_frag); + return ipf ? 0 : -1; +} + + +int ipfr_nat_newfrag(ip, fin, pass, nat) +ip_t *ip; +fr_info_t *fin; +int pass; +nat_t *nat; +{ + ipfr_t *ipf; + + MUTEX_ENTER(&ipf_natfrag); + if ((ipf = ipfr_new(ip, fin, pass, ipfr_nattab))) { + ipf->ipfr_data = nat; + nat->nat_data = ipf; + } + MUTEX_EXIT(&ipf_natfrag); + return ipf ? 0 : -1; +} + + +/* + * check the fragment cache to see if there is already a record of this packet + * with its filter result known. + */ +static ipfr_t *ipfr_lookup(ip, fin, table) +ip_t *ip; +fr_info_t *fin; +ipfr_t *table[]; +{ + ipfr_t *f, frag; + u_int idx; + + /* + * For fragments, we record protocol, packet id, TOS and both IP#'s + * (these should all be the same for all fragments of a packet). + * + * build up a hash value to index the table with. + */ + frag.ipfr_p = ip->ip_p; + idx = ip->ip_p; + frag.ipfr_id = ip->ip_id; + idx += ip->ip_id; + frag.ipfr_tos = ip->ip_tos; + frag.ipfr_src.s_addr = ip->ip_src.s_addr; + idx += ip->ip_src.s_addr; + frag.ipfr_dst.s_addr = ip->ip_dst.s_addr; + idx += ip->ip_dst.s_addr; + idx *= 127; + idx %= IPFT_SIZE; + + /* + * check the table, careful to only compare the right amount of data + */ + for (f = table[idx]; f; f = f->ipfr_next) + if (!bcmp((char *)&frag.ipfr_src, (char *)&f->ipfr_src, + IPFR_CMPSZ)) { + u_short atoff, off; + + if (f != table[idx]) { + /* + * move fragment info. to the top of the list + * to speed up searches. + */ + if ((f->ipfr_prev->ipfr_next = f->ipfr_next)) + f->ipfr_next->ipfr_prev = f->ipfr_prev; + f->ipfr_next = table[idx]; + table[idx]->ipfr_prev = f; + f->ipfr_prev = NULL; + table[idx] = f; + } + off = ip->ip_off; + atoff = off + (fin->fin_dlen >> 3); + /* + * If we've follwed the fragments, and this is the + * last (in order), shrink expiration time. + */ + if ((off & 0x1fff) == f->ipfr_off) { + if (!(off & IP_MF)) + f->ipfr_ttl = 1; + else + f->ipfr_off = atoff; + } + ipfr_stats.ifs_hits++; + return f; + } + return NULL; +} + + +/* + * functional interface for NAT lookups of the NAT fragment cache + */ +nat_t *ipfr_nat_knownfrag(ip, fin) +ip_t *ip; +fr_info_t *fin; +{ + nat_t *nat; + ipfr_t *ipf; + + MUTEX_ENTER(&ipf_natfrag); + ipf = ipfr_lookup(ip, fin, ipfr_nattab); + if (ipf) { + nat = ipf->ipfr_data; + /* + * This is the last fragment for this packet. + */ + if (ipf->ipfr_ttl == 1) { + nat->nat_data = NULL; + ipf->ipfr_data = NULL; + } + } else + nat = NULL; + MUTEX_EXIT(&ipf_natfrag); + return nat; +} + + +/* + * functional interface for normal lookups of the fragment cache + */ +int ipfr_knownfrag(ip, fin) +ip_t *ip; +fr_info_t *fin; +{ + int ret; + ipfr_t *ipf; + + MUTEX_ENTER(&ipf_frag); + ipf = ipfr_lookup(ip, fin, ipfr_heads); + ret = ipf ? ipf->ipfr_pass : 0; + MUTEX_EXIT(&ipf_frag); + return ret; +} + + +/* + * forget any references to this external object. + */ +void ipfr_forget(nat) +void *nat; +{ + ipfr_t *fr; + int idx; + + MUTEX_ENTER(&ipf_natfrag); + for (idx = IPFT_SIZE - 1; idx >= 0; idx--) + for (fr = ipfr_heads[idx]; fr; fr = fr->ipfr_next) + if (fr->ipfr_data == nat) + fr->ipfr_data = NULL; + + MUTEX_EXIT(&ipf_natfrag); +} + + +/* + * Free memory in use by fragment state info. kept. + */ +void ipfr_unload() +{ + ipfr_t **fp, *fr; + nat_t *nat; + int idx; + + MUTEX_ENTER(&ipf_frag); + for (idx = IPFT_SIZE - 1; idx >= 0; idx--) + for (fp = &ipfr_heads[idx]; (fr = *fp); ) { + *fp = fr->ipfr_next; + KFREE(fr); + } + MUTEX_EXIT(&ipf_frag); + + MUTEX_ENTER(&ipf_nat); + MUTEX_ENTER(&ipf_natfrag); + for (idx = IPFT_SIZE - 1; idx >= 0; idx--) + for (fp = &ipfr_nattab[idx]; (fr = *fp); ) { + *fp = fr->ipfr_next; + if ((nat = (nat_t *)fr->ipfr_data)) { + if (nat->nat_data == fr) + nat->nat_data = NULL; + } + KFREE(fr); + } + MUTEX_EXIT(&ipf_natfrag); + MUTEX_EXIT(&ipf_nat); +} + + +#ifdef KERNEL +/* + * Slowly expire held state for fragments. Timeouts are set * in expectation + * of this being called twice per second. + */ +# if (BSD >= 199306) || SOLARIS || defined(__sgi) +void ipfr_slowtimer() +# else +int ipfr_slowtimer() +# endif +{ + ipfr_t **fp, *fr; + nat_t *nat; + int s, idx; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); +#ifdef __sgi + ipfilter_sgi_intfsync(); +#endif + + SPL_NET(s); + MUTEX_ENTER(&ipf_frag); + + /* + * Go through the entire table, looking for entries to expire, + * decreasing the ttl by one for each entry. If it reaches 0, + * remove it from the chain and free it. + */ + for (idx = IPFT_SIZE - 1; idx >= 0; idx--) + for (fp = &ipfr_heads[idx]; (fr = *fp); ) { + --fr->ipfr_ttl; + if (fr->ipfr_ttl == 0) { + if (fr->ipfr_prev) + fr->ipfr_prev->ipfr_next = + fr->ipfr_next; + if (fr->ipfr_next) + fr->ipfr_next->ipfr_prev = + fr->ipfr_prev; + *fp = fr->ipfr_next; + ipfr_stats.ifs_expire++; + ipfr_inuse--; + KFREE(fr); + } else + fp = &fr->ipfr_next; + } + MUTEX_EXIT(&ipf_frag); + + /* + * Same again for the NAT table, except that if the structure also + * still points to a NAT structure, and the NAT structure points back + * at the one to be free'd, NULL the reference from the NAT struct. + * NOTE: We need to grab both mutex's early, and in this order so as + * to prevent a deadlock if both try to expire at the same time. + */ + MUTEX_ENTER(&ipf_nat); + MUTEX_ENTER(&ipf_natfrag); + for (idx = IPFT_SIZE - 1; idx >= 0; idx--) + for (fp = &ipfr_nattab[idx]; (fr = *fp); ) { + --fr->ipfr_ttl; + if (fr->ipfr_ttl == 0) { + if (fr->ipfr_prev) + fr->ipfr_prev->ipfr_next = + fr->ipfr_next; + if (fr->ipfr_next) + fr->ipfr_next->ipfr_prev = + fr->ipfr_prev; + *fp = fr->ipfr_next; + ipfr_stats.ifs_expire++; + ipfr_inuse--; + if ((nat = (nat_t *)fr->ipfr_data)) { + if (nat->nat_data == fr) + nat->nat_data = NULL; + } + KFREE(fr); + } else + fp = &fr->ipfr_next; + } + MUTEX_EXIT(&ipf_natfrag); + MUTEX_EXIT(&ipf_nat); + SPL_X(s); + fr_timeoutstate(); + ip_natexpire(); + fr_authexpire(); +# if SOLARIS + ipfr_timer_id = timeout(ipfr_slowtimer, NULL, drv_usectohz(500000)); +# else +# ifndef linux + ip_slowtimo(); +# endif +# if (BSD < 199306) && !defined(__sgi) + (void) thread_funnel_set(network_flock, FALSE); + return 0; +# endif +# endif + (void) thread_funnel_set(network_flock, FALSE); +} +#endif /* defined(KERNEL) */ diff --git a/bsd/netinet/ip_frag.h b/bsd/netinet/ip_frag.h new file mode 100644 index 000000000..faff6d5d1 --- /dev/null +++ b/bsd/netinet/ip_frag.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1993-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + * @(#)ip_frag.h 1.5 3/24/96 + */ + +#ifndef __IP_FRAG_H__ +#define __IP_FRAG_H__ + +#define IPFT_SIZE 257 + +typedef struct ipfr { + struct ipfr *ipfr_next, *ipfr_prev; + void *ipfr_data; + struct in_addr ipfr_src; + struct in_addr ipfr_dst; + u_short ipfr_id; + u_char ipfr_p; + u_char ipfr_tos; + u_short ipfr_off; + u_short ipfr_ttl; + u_char ipfr_pass; +} ipfr_t; + + +typedef struct ipfrstat { + u_long ifs_exists; /* add & already exists */ + u_long ifs_nomem; + u_long ifs_new; + u_long ifs_hits; + u_long ifs_expire; + u_long ifs_inuse; + struct ipfr **ifs_table; + struct ipfr **ifs_nattab; +} ipfrstat_t; + +#define IPFR_CMPSZ (4 + 4 + 2 + 1 + 1) + +extern int fr_ipfrttl; +extern ipfrstat_t *ipfr_fragstats __P((void)); +extern int ipfr_newfrag __P((ip_t *, fr_info_t *, int)); +extern int ipfr_nat_newfrag __P((ip_t *, fr_info_t *, int, struct nat *)); +extern nat_t *ipfr_nat_knownfrag __P((ip_t *, fr_info_t *)); +extern int ipfr_knownfrag __P((ip_t *, fr_info_t *)); +extern void ipfr_forget __P((void *)); +extern void ipfr_unload __P((void)); + +#if (BSD >= 199306) || SOLARIS || defined(__sgi) +extern void ipfr_slowtimer __P((void)); +#else +extern int ipfr_slowtimer __P((void)); +#endif + +#endif /* __IP_FIL_H__ */ diff --git a/bsd/netinet/ip_ftp_pxy.c b/bsd/netinet/ip_ftp_pxy.c new file mode 100644 index 000000000..46152358a --- /dev/null +++ b/bsd/netinet/ip_ftp_pxy.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Simple FTP transparent proxy for in-kernel use. For use with the NAT + * code. + */ + + +#define isdigit(x) ((x) >= '0' && (x) <= '9') + +#define IPF_FTP_PROXY + +#define IPF_MINPORTLEN 18 +#define IPF_MAXPORTLEN 30 + + +int ippr_ftp_init __P((fr_info_t *, ip_t *, tcphdr_t *, + ap_session_t *, nat_t *)); +int ippr_ftp_in __P((fr_info_t *, ip_t *, tcphdr_t *, + ap_session_t *, nat_t *)); +int ippr_ftp_out __P((fr_info_t *, ip_t *, tcphdr_t *, + ap_session_t *, nat_t *)); +u_short ipf_ftp_atoi __P((char **)); + + +/* + * FTP application proxy initialization. + */ +int ippr_ftp_init(fin, ip, tcp, aps, nat) +fr_info_t *fin; +ip_t *ip; +tcphdr_t *tcp; +ap_session_t *aps; +nat_t *nat; +{ + aps->aps_sport = tcp->th_sport; + aps->aps_dport = tcp->th_dport; + return 0; +} + + +int ippr_ftp_in(fin, ip, tcp, aps, nat) +fr_info_t *fin; +ip_t *ip; +tcphdr_t *tcp; +ap_session_t *aps; +nat_t *nat; +{ + u_32_t sum1, sum2; + short sel; + + if (tcp->th_sport == aps->aps_dport) { + sum2 = (u_32_t)ntohl(tcp->th_ack); + sel = aps->aps_sel; + if ((aps->aps_after[!sel] > aps->aps_after[sel]) && + (sum2 > aps->aps_after[!sel])) { + sel = aps->aps_sel = !sel; /* switch to other set */ + } + if (aps->aps_seqoff[sel] && (sum2 > aps->aps_after[sel])) { + sum1 = (u_32_t)aps->aps_seqoff[sel]; + tcp->th_ack = htonl(sum2 - sum1); + return 2; + } + } + return 0; +} + + +/* + * ipf_ftp_atoi - implement a version of atoi which processes numbers in + * pairs separated by commas (which are expected to be in the range 0 - 255), + * returning a 16 bit number combining either side of the , as the MSB and + * LSB. + */ +u_short ipf_ftp_atoi(ptr) +char **ptr; +{ + register char *s = *ptr, c; + register u_char i = 0, j = 0; + + while ((c = *s++) && isdigit(c)) { + i *= 10; + i += c - '0'; + } + if (c != ',') { + *ptr = NULL; + return 0; + } + while ((c = *s++) && isdigit(c)) { + j *= 10; + j += c - '0'; + } + *ptr = s; + return (i << 8) | j; +} + + +int ippr_ftp_out(fin, ip, tcp, aps, nat) +fr_info_t *fin; +ip_t *ip; +tcphdr_t *tcp; +ap_session_t *aps; +nat_t *nat; +{ + register u_32_t sum1, sum2; + char newbuf[IPF_MAXPORTLEN+1]; + char portbuf[IPF_MAXPORTLEN+1], *s; + int ch = 0, off = (ip->ip_hl << 2) + (tcp->th_off << 2); + u_int a1, a2, a3, a4; + u_short a5, a6; + int olen, dlen, nlen = 0, inc = 0; + tcphdr_t tcph, *tcp2 = &tcph; + void *savep; + nat_t *ipn; + struct in_addr swip; + mb_t *m = *(mb_t **)fin->fin_mp; + +#if SOLARIS + mb_t *m1; + + /* skip any leading M_PROTOs */ + while(m && (MTYPE(m) != M_DATA)) + m = m->b_cont; + PANIC((!m),("ippr_ftp_out: no M_DATA")); + + dlen = msgdsize(m) - off; + bzero(portbuf, sizeof(portbuf)); + copyout_mblk(m, off, MIN(sizeof(portbuf), dlen), portbuf); +#else + dlen = mbufchainlen(m) - off; + bzero(portbuf, sizeof(portbuf)); + m_copydata(m, off, MIN(sizeof(portbuf), dlen), portbuf); +#endif + portbuf[IPF_MAXPORTLEN] = '\0'; + + if ((dlen < IPF_MINPORTLEN) || strncmp(portbuf, "PORT ", 5)) + goto adjust_seqack; + + /* + * Skip the PORT command + space + */ + s = portbuf + 5; + /* + * Pick out the address components, two at a time. + */ + (void) ipf_ftp_atoi(&s); + if (!s) + goto adjust_seqack; + (void) ipf_ftp_atoi(&s); + if (!s) + goto adjust_seqack; + a5 = ipf_ftp_atoi(&s); + if (!s) + goto adjust_seqack; + /* + * check for CR-LF at the end. + */ + if (*s != '\n' || *(s - 1) != '\r') + goto adjust_seqack; + a6 = a5 & 0xff; + a5 >>= 8; + /* + * Calculate new address parts for PORT command + */ + a1 = ntohl(ip->ip_src.s_addr); + a2 = (a1 >> 16) & 0xff; + a3 = (a1 >> 8) & 0xff; + a4 = a1 & 0xff; + a1 >>= 24; + olen = s - portbuf + 1; + (void) snprintf(newbuf, sizeof(newbuf), "PORT %d,%d,%d,%d,%d,%d\r\n", + a1, a2, a3, a4, a5, a6); + nlen = strlen(newbuf); + inc = nlen - olen; +#if SOLARIS + for (m1 = m; m1->b_cont; m1 = m1->b_cont) + ; + if (inc > 0) { + mblk_t *nm; + + /* alloc enough to keep same trailer space for lower driver */ + nm = allocb(nlen + m1->b_datap->db_lim - m1->b_wptr, BPRI_MED); + PANIC((!nm),("ippr_ftp_out: allocb failed")); + + nm->b_band = m1->b_band; + nm->b_wptr += nlen; + + m1->b_wptr -= olen; + PANIC((m1->b_wptr < m1->b_rptr),("ippr_ftp_out: cannot handle fragmented data block")); + + linkb(m1, nm); + } else { + m1->b_wptr += inc; + } + copyin_mblk(m, off, nlen, newbuf); +#else + if (inc < 0) + m_adj(m, inc); + /* the mbuf chain will be extended if necessary by m_copyback() */ + m_copyback(m, off, nlen, newbuf); +#endif + if (inc) { +#if SOLARIS || defined(__sgi) + sum1 = ip->ip_len; + sum2 = ip->ip_len + inc; + + /* Because ~1 == -2, We really need ~1 == -1 */ + if (sum1 > sum2) + sum2--; + sum2 -= sum1; + sum2 = (sum2 & 0xffff) + (sum2 >> 16); + + fix_outcksum(&ip->ip_sum, sum2); +#endif + ip->ip_len += inc; + } + ch = 1; + + /* + * Add skeleton NAT entry for connection which will come back the + * other way. + */ + savep = fin->fin_dp; + fin->fin_dp = (char *)tcp2; + bzero((char *)tcp2, sizeof(*tcp2)); + tcp2->th_sport = htons(a5 << 8 | a6); + tcp2->th_dport = htons(20); + swip = ip->ip_src; + ip->ip_src = nat->nat_inip; + if ((ipn = nat_new(nat->nat_ptr, ip, fin, IPN_TCP, NAT_OUTBOUND))) + ipn->nat_age = fr_defnatage; + (void) fr_addstate(ip, fin, FR_INQUE|FR_PASS|FR_QUICK|FR_KEEPSTATE); + ip->ip_src = swip; + fin->fin_dp = (char *)savep; + +adjust_seqack: + if (tcp->th_dport == aps->aps_dport) { + sum2 = (u_32_t)ntohl(tcp->th_seq); + off = aps->aps_sel; + if ((aps->aps_after[!off] > aps->aps_after[off]) && + (sum2 > aps->aps_after[!off])) { + off = aps->aps_sel = !off; /* switch to other set */ + } + if (aps->aps_seqoff[off]) { + sum1 = (u_32_t)aps->aps_after[off] - + aps->aps_seqoff[off]; + if (sum2 > sum1) { + sum1 = (u_32_t)aps->aps_seqoff[off]; + sum2 += sum1; + tcp->th_seq = htonl(sum2); + ch = 1; + } + } + + if (inc && (sum2 > aps->aps_after[!off])) { + aps->aps_after[!off] = sum2 + nlen - 1; + aps->aps_seqoff[!off] = aps->aps_seqoff[off] + inc; + } + } + return ch ? 2 : 0; +} diff --git a/bsd/netinet/ip_fw.c b/bsd/netinet/ip_fw.c new file mode 100644 index 000000000..8705c5bab --- /dev/null +++ b/bsd/netinet/ip_fw.c @@ -0,0 +1,1408 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 Daniel Boulet + * Copyright (c) 1994 Ugen J.S.Antsilevich + * Copyright (c) 1996 Alex Nash + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + * + */ + +/* + * Implement IP packet firewall + */ +#if !IPFIREWALL_KEXT +#if ISFB31 +#if !defined(KLD_MODULE) && !defined(IPFIREWALL_MODULE) +#include "opt_ipfw.h" +#include "opt_ipdn.h" +#include "opt_ipdivert.h" +#include "opt_inet.h" +#endif +#endif +#ifndef INET +#error IPFIREWALL requires INET. +#endif /* INET */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if DUMMYNET +#include +#include +#endif +#include +#include +#include +#ifdef INET6 +#include +#endif +#include +#include +#include + +#include /* XXX ethertype_ip */ + +static int fw_debug = 1; +#if IPFIREWALL_VERBOSE +static int fw_verbose = 1; +#else +static int fw_verbose = 0; +#endif +static int fw_one_pass = 0; /* XXX */ +#if IPFIREWALL_VERBOSE_LIMIT +static int fw_verbose_limit = IPFIREWALL_VERBOSE_LIMIT; +#else +static int fw_verbose_limit = 0; +#endif + +#define IPFW_DEFAULT_RULE ((u_int)(u_short)~0) + +LIST_HEAD (ip_fw_head, ip_fw_chain) ip_fw_chain; + +MALLOC_DEFINE(M_IPFW, "IpFw/IpAcct", "IpFw/IpAcct chain's"); + +SYSCTL_DECL(_net_inet_ip); +SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall"); +SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, debug, CTLFLAG_RW, &fw_debug, 0, ""); +SYSCTL_INT(_net_inet_ip_fw, OID_AUTO,one_pass,CTLFLAG_RW, &fw_one_pass, 0, ""); +SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose, CTLFLAG_RW, &fw_verbose, 0, ""); +SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW, &fw_verbose_limit, 0, ""); + +#define dprintf(a) if (!fw_debug); else printf a + +#define print_ip(a) printf("%d.%d.%d.%d", \ + (int)(ntohl(a.s_addr) >> 24) & 0xFF, \ + (int)(ntohl(a.s_addr) >> 16) & 0xFF, \ + (int)(ntohl(a.s_addr) >> 8) & 0xFF, \ + (int)(ntohl(a.s_addr)) & 0xFF); + +#define dprint_ip(a) if (!fw_debug); else print_ip(a) + +static int add_entry __P((struct ip_fw_head *chainptr, struct ip_fw *frwl)); +static int del_entry __P((struct ip_fw_head *chainptr, u_short number)); +static int zero_entry __P((struct ip_fw *)); +static int check_ipfw_struct __P((struct ip_fw *m)); +static __inline int + iface_match __P((struct ifnet *ifp, union ip_fw_if *ifu, + int byname)); +static int ipopts_match __P((struct ip *ip, struct ip_fw *f)); +static __inline int + port_match __P((u_short *portptr, int nports, u_short port, + int range_flag)); +static int tcpflg_match __P((struct tcphdr *tcp, struct ip_fw *f)); +static int icmptype_match __P((struct icmp * icmp, struct ip_fw * f)); +static void ipfw_report __P((struct ip_fw *f, struct ip *ip, + struct ifnet *rif, struct ifnet *oif)); + +static void flush_rule_ptrs(void); + +static int ip_fw_chk __P((struct ip **pip, int hlen, + struct ifnet *oif, u_int16_t *cookie, struct mbuf **m, + struct ip_fw_chain **flow_id, + struct sockaddr_in **next_hop)); +static int ip_fw_ctl __P((struct sockopt *sopt)); + +static char err_prefix[] = "ip_fw_ctl:"; + +/* + * Returns 1 if the port is matched by the vector, 0 otherwise + */ +static __inline int +port_match(u_short *portptr, int nports, u_short port, int range_flag) +{ + if (!nports) + return 1; + if (range_flag) { + if (portptr[0] <= port && port <= portptr[1]) { + return 1; + } + nports -= 2; + portptr += 2; + } + while (nports-- > 0) { + if (*portptr++ == port) { + return 1; + } + } + return 0; +} + +static int +tcpflg_match(struct tcphdr *tcp, struct ip_fw *f) +{ + u_char flg_set, flg_clr; + + if ((f->fw_tcpf & IP_FW_TCPF_ESTAB) && + (tcp->th_flags & (IP_FW_TCPF_RST | IP_FW_TCPF_ACK))) + return 1; + + flg_set = tcp->th_flags & f->fw_tcpf; + flg_clr = tcp->th_flags & f->fw_tcpnf; + + if (flg_set != f->fw_tcpf) + return 0; + if (flg_clr) + return 0; + + return 1; +} + +static int +icmptype_match(struct icmp *icmp, struct ip_fw *f) +{ + int type; + + if (!(f->fw_flg & IP_FW_F_ICMPBIT)) + return(1); + + type = icmp->icmp_type; + + /* check for matching type in the bitmap */ + if (type < IP_FW_ICMPTYPES_MAX && + (f->fw_uar.fw_icmptypes[type / (sizeof(unsigned) * 8)] & + (1U << (type % (8 * sizeof(unsigned)))))) + return(1); + + return(0); /* no match */ +} + +static int +is_icmp_query(struct ip *ip) +{ + const struct icmp *icmp; + int icmp_type; + + icmp = (struct icmp *)((u_int32_t *)ip + ip->ip_hl); + icmp_type = icmp->icmp_type; + + if (icmp_type == ICMP_ECHO || icmp_type == ICMP_ROUTERSOLICIT || + icmp_type == ICMP_TSTAMP || icmp_type == ICMP_IREQ || + icmp_type == ICMP_MASKREQ) + return(1); + + return(0); +} + +static int +ipopts_match(struct ip *ip, struct ip_fw *f) +{ + register u_char *cp; + int opt, optlen, cnt; + u_char opts, nopts, nopts_sve; + + cp = (u_char *)(ip + 1); + cnt = (ip->ip_hl << 2) - sizeof (struct ip); + opts = f->fw_ipopt; + nopts = nopts_sve = f->fw_ipnopt; + + for (; cnt > 0; cnt -= optlen, cp += optlen) { + opt = cp[IPOPT_OPTVAL]; + if (opt == IPOPT_EOL) + break; + if (opt == IPOPT_NOP) + optlen = 1; + else { + optlen = cp[IPOPT_OLEN]; + if (optlen <= 0 || optlen > cnt) { + return 0; /*XXX*/ + } + } + switch (opt) { + + default: + break; + + case IPOPT_LSRR: + opts &= ~IP_FW_IPOPT_LSRR; + nopts &= ~IP_FW_IPOPT_LSRR; + break; + + case IPOPT_SSRR: + opts &= ~IP_FW_IPOPT_SSRR; + nopts &= ~IP_FW_IPOPT_SSRR; + break; + + case IPOPT_RR: + opts &= ~IP_FW_IPOPT_RR; + nopts &= ~IP_FW_IPOPT_RR; + break; + case IPOPT_TS: + opts &= ~IP_FW_IPOPT_TS; + nopts &= ~IP_FW_IPOPT_TS; + break; + } + if (opts == nopts) + break; + } + if (opts == 0 && nopts == nopts_sve) + return 1; + else + return 0; +} + +static __inline int +iface_match(struct ifnet *ifp, union ip_fw_if *ifu, int byname) +{ + /* Check by name or by IP address */ + if (byname) { + /* Check unit number (-1 is wildcard) */ + if (ifu->fu_via_if.unit != -1 + && ifp->if_unit != ifu->fu_via_if.unit) + return(0); + /* Check name */ + if (strncmp(ifp->if_name, ifu->fu_via_if.name, FW_IFNLEN)) + return(0); + return(1); + } else if (ifu->fu_via_ip.s_addr != 0) { /* Zero == wildcard */ + struct ifaddr *ia; + + for (ia = ifp->if_addrhead.tqh_first; + ia != NULL; ia = ia->ifa_link.tqe_next) { + if (ia->ifa_addr == NULL) + continue; + if (ia->ifa_addr->sa_family != AF_INET) + continue; + if (ifu->fu_via_ip.s_addr != ((struct sockaddr_in *) + (ia->ifa_addr))->sin_addr.s_addr) + continue; + return(1); + } + return(0); + } + return(1); +} + +static void +ipfw_report(struct ip_fw *f, struct ip *ip, + struct ifnet *rif, struct ifnet *oif) +{ + if (ip) { + static u_int64_t counter; + struct tcphdr *const tcp = (struct tcphdr *) ((u_int32_t *) ip+ ip->ip_hl); + struct udphdr *const udp = (struct udphdr *) ((u_int32_t *) ip+ ip->ip_hl); + struct icmp *const icmp = (struct icmp *) ((u_int32_t *) ip + ip->ip_hl); + int count; + + count = f ? f->fw_pcnt : ++counter; + if (fw_verbose_limit != 0 && count > fw_verbose_limit) + return; + + /* Print command name */ + printf("ipfw: %d ", f ? f->fw_number : -1); + if (!f) + printf("Refuse"); + else + switch (f->fw_flg & IP_FW_F_COMMAND) { + case IP_FW_F_DENY: + printf("Deny"); + break; + case IP_FW_F_REJECT: + if (f->fw_reject_code == IP_FW_REJECT_RST) + printf("Reset"); + else + printf("Unreach"); + break; + case IP_FW_F_ACCEPT: + printf("Accept"); + break; + case IP_FW_F_COUNT: + printf("Count"); + break; + case IP_FW_F_DIVERT: + printf("Divert %d", f->fw_divert_port); + break; + case IP_FW_F_TEE: + printf("Tee %d", f->fw_divert_port); + break; + case IP_FW_F_SKIPTO: + printf("SkipTo %d", f->fw_skipto_rule); + break; +#if DUMMYNET + case IP_FW_F_PIPE: + printf("Pipe %d", f->fw_skipto_rule); + break; +#endif +#if IPFIREWALL_FORWARD + case IP_FW_F_FWD: + printf("Forward to "); + print_ip(f->fw_fwd_ip.sin_addr); + if (f->fw_fwd_ip.sin_port) + printf(":%d", f->fw_fwd_ip.sin_port); + break; +#endif + default: + printf("UNKNOWN"); + break; + } + printf(" "); + + switch (ip->ip_p) { + case IPPROTO_TCP: + printf("TCP "); + print_ip(ip->ip_src); + if ((ip->ip_off & IP_OFFMASK) == 0) + printf(":%d ", ntohs(tcp->th_sport)); + else + printf(" "); + print_ip(ip->ip_dst); + if ((ip->ip_off & IP_OFFMASK) == 0) + printf(":%d", ntohs(tcp->th_dport)); + break; + case IPPROTO_UDP: + printf("UDP "); + print_ip(ip->ip_src); + if ((ip->ip_off & IP_OFFMASK) == 0) + printf(":%d ", ntohs(udp->uh_sport)); + else + printf(" "); + print_ip(ip->ip_dst); + if ((ip->ip_off & IP_OFFMASK) == 0) + printf(":%d", ntohs(udp->uh_dport)); + break; + case IPPROTO_ICMP: + if ((ip->ip_off & IP_OFFMASK) == 0) + printf("ICMP:%u.%u ", icmp->icmp_type, icmp->icmp_code); + else + printf("ICMP "); + print_ip(ip->ip_src); + printf(" "); + print_ip(ip->ip_dst); + break; + default: + printf("P:%d ", ip->ip_p); + print_ip(ip->ip_src); + printf(" "); + print_ip(ip->ip_dst); + break; + } + if (oif) + printf(" out via %s%d", oif->if_name, oif->if_unit); + else if (rif) + printf(" in via %s%d", rif->if_name, rif->if_unit); + if ((ip->ip_off & IP_OFFMASK)) + printf(" Fragment = %d",ip->ip_off & IP_OFFMASK); + printf("\n"); + if (fw_verbose_limit != 0 && count == fw_verbose_limit) + printf("ipfw: limit reached on rule #%d\n", + f ? f->fw_number : -1); + } +} + +/* + * given an ip_fw_chain *, lookup_next_rule will return a pointer + * of the same type to the next one. This can be either the jump + * target (for skipto instructions) or the next one in the chain (in + * all other cases including a missing jump target). + * Backward jumps are not allowed, so start looking from the next + * rule... + */ +static struct ip_fw_chain * lookup_next_rule(struct ip_fw_chain *me); + +static struct ip_fw_chain * +lookup_next_rule(struct ip_fw_chain *me) +{ + struct ip_fw_chain *chain ; + int rule = me->rule->fw_skipto_rule ; /* guess... */ + + if ( (me->rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_SKIPTO ) + for (chain = me->chain.le_next; chain ; chain = chain->chain.le_next ) + if (chain->rule->fw_number >= rule) + return chain ; + return me->chain.le_next ; /* failure or not a skipto */ +} + +/* + * Parameters: + * + * pip Pointer to packet header (struct ip **) + * bridge_ipfw extension: pip = NULL means a complete ethernet packet + * including ethernet header in the mbuf. Other fields + * are ignored/invalid. + * + * hlen Packet header length + * oif Outgoing interface, or NULL if packet is incoming + * *cookie Skip up to the first rule past this rule number; + * *m The packet; we set to NULL when/if we nuke it. + * *flow_id pointer to the last matching rule (in/out) + * *next_hop socket we are forwarding to (in/out). + * + * Return value: + * + * 0 The packet is to be accepted and routed normally OR + * the packet was denied/rejected and has been dropped; + * in the latter case, *m is equal to NULL upon return. + * port Divert the packet to port. + */ + +static int +ip_fw_chk(struct ip **pip, int hlen, + struct ifnet *oif, u_int16_t *cookie, struct mbuf **m, + struct ip_fw_chain **flow_id, + struct sockaddr_in **next_hop) +{ + struct ip_fw_chain *chain; + struct ip_fw *rule = NULL; + struct ip *ip = NULL ; + struct ifnet *const rif = (*m)->m_pkthdr.rcvif; + u_short offset = 0 ; + u_short src_port, dst_port; + u_int16_t skipto = *cookie; + + if (pip) { /* normal ip packet */ + ip = *pip; + offset = (ip->ip_off & IP_OFFMASK); + } else { /* bridged or non-ip packet */ + struct ether_header *eh = mtod(*m, struct ether_header *); + switch (ntohs(eh->ether_type)) { + case ETHERTYPE_IP : + if ((*m)->m_lenip_v != IPVERSION) + goto non_ip ; + hlen = ip->ip_hl << 2; + if (hlen < sizeof(struct ip)) /* minimum header length */ + goto non_ip ; + if ((*m)->m_len < 14 + hlen + 14) { + printf("-- m_len %d, need more...\n", (*m)->m_len); + goto non_ip ; + } + offset = (ip->ip_off & IP_OFFMASK); + break ; + default : +non_ip: ip = NULL ; + break ; + } + } + + if (*flow_id) { + if (fw_one_pass) + return 0 ; /* accept if passed first test */ + /* + * pkt has already been tagged. Look for the next rule + * to restart processing + */ + chain = LIST_NEXT( *flow_id, chain); + + if ( (chain = (*flow_id)->rule->next_rule_ptr) == NULL ) + chain = (*flow_id)->rule->next_rule_ptr = + lookup_next_rule(*flow_id) ; + if (! chain) goto dropit; + } else { + /* + * Go down the chain, looking for enlightment + * If we've been asked to start at a given rule immediatly, do so. + */ + chain = LIST_FIRST(&ip_fw_chain); + if ( skipto ) { + if (skipto >= IPFW_DEFAULT_RULE) + goto dropit; + while (chain && (chain->rule->fw_number <= skipto)) { + chain = LIST_NEXT(chain, chain); + } + if (! chain) goto dropit; + } + } + *cookie = 0; + for (; chain; chain = LIST_NEXT(chain, chain)) { + register struct ip_fw * f ; +again: + f = chain->rule; + + if (oif) { + /* Check direction outbound */ + if (!(f->fw_flg & IP_FW_F_OUT)) + continue; + } else { + /* Check direction inbound */ + if (!(f->fw_flg & IP_FW_F_IN)) + continue; + } + if (ip == NULL ) { + /* + * do relevant checks for non-ip packets: + * after this, only goto got_match or continue + */ + struct ether_header *eh = mtod(*m, struct ether_header *); + + /* + * make default rule always match or we have a panic + */ + if (f->fw_number == IPFW_DEFAULT_RULE) + goto got_match ; + /* + * temporary hack: + * udp from 0.0.0.0 means this rule applies. + * 1 src port is match ether type + * 2 src ports (interval) is match ether type + * 3 src ports is match ether address + */ + if ( f->fw_src.s_addr != 0 || f->fw_prot != IPPROTO_UDP + || f->fw_smsk.s_addr != 0xffffffff ) + continue; + switch (IP_FW_GETNSRCP(f)) { + case 1: /* match one type */ + if ( /* ( (f->fw_flg & IP_FW_F_INVSRC) != 0) ^ */ + ( f->fw_uar.fw_pts[0] == ntohs(eh->ether_type) ) ) { + goto got_match ; + } + break ; + default: + break ; + } + continue ; + } + + /* Fragments */ + if ((f->fw_flg & IP_FW_F_FRAG) && offset == 0 ) + continue; + + /* If src-addr doesn't match, not this rule. */ + if (((f->fw_flg & IP_FW_F_INVSRC) != 0) ^ ((ip->ip_src.s_addr + & f->fw_smsk.s_addr) != f->fw_src.s_addr)) + continue; + + /* If dest-addr doesn't match, not this rule. */ + if (((f->fw_flg & IP_FW_F_INVDST) != 0) ^ ((ip->ip_dst.s_addr + & f->fw_dmsk.s_addr) != f->fw_dst.s_addr)) + continue; + + /* Interface check */ + if ((f->fw_flg & IF_FW_F_VIAHACK) == IF_FW_F_VIAHACK) { + struct ifnet *const iface = oif ? oif : rif; + + /* Backwards compatibility hack for "via" */ + if (!iface || !iface_match(iface, + &f->fw_in_if, f->fw_flg & IP_FW_F_OIFNAME)) + continue; + } else { + /* Check receive interface */ + if ((f->fw_flg & IP_FW_F_IIFACE) + && (!rif || !iface_match(rif, + &f->fw_in_if, f->fw_flg & IP_FW_F_IIFNAME))) + continue; + /* Check outgoing interface */ + if ((f->fw_flg & IP_FW_F_OIFACE) + && (!oif || !iface_match(oif, + &f->fw_out_if, f->fw_flg & IP_FW_F_OIFNAME))) + continue; + } + + /* Check IP options */ + if (f->fw_ipopt != f->fw_ipnopt && !ipopts_match(ip, f)) + continue; + + /* Check protocol; if wildcard, match */ + if (f->fw_prot == IPPROTO_IP) + goto got_match; + + /* If different, don't match */ + if (ip->ip_p != f->fw_prot) + continue; + +/* + * here, pip==NULL for bridged pkts -- they include the ethernet + * header so i have to adjust lengths accordingly + */ +#define PULLUP_TO(l) do { \ + int len = (pip ? l : l + 14 ) ; \ + if ((*m)->m_len < (len) ) { \ + if ( (*m = m_pullup(*m, (len))) == 0) \ + goto bogusfrag; \ + ip = mtod(*m, struct ip *); \ + if (pip) \ + *pip = ip ; \ + else \ + ip = (struct ip *)((int)ip + 14); \ + offset = (ip->ip_off & IP_OFFMASK); \ + } \ + } while (0) + + /* Protocol specific checks */ + switch (ip->ip_p) { + case IPPROTO_TCP: + { + struct tcphdr *tcp; + + if (offset == 1) /* cf. RFC 1858 */ + goto bogusfrag; + if (offset != 0) { + /* + * TCP flags and ports aren't available in this + * packet -- if this rule specified either one, + * we consider the rule a non-match. + */ + if (f->fw_nports != 0 || + f->fw_tcpf != f->fw_tcpnf) + continue; + + break; + } + PULLUP_TO(hlen + 14); + tcp = (struct tcphdr *) ((u_int32_t *)ip + ip->ip_hl); + if (f->fw_tcpf != f->fw_tcpnf && !tcpflg_match(tcp, f)) + continue; + src_port = ntohs(tcp->th_sport); + dst_port = ntohs(tcp->th_dport); + goto check_ports; + } + + case IPPROTO_UDP: + { + struct udphdr *udp; + + if (offset != 0) { + /* + * Port specification is unavailable -- if this + * rule specifies a port, we consider the rule + * a non-match. + */ + if (f->fw_nports != 0) + continue; + + break; + } + PULLUP_TO(hlen + 4); + udp = (struct udphdr *) ((u_int32_t *)ip + ip->ip_hl); + src_port = ntohs(udp->uh_sport); + dst_port = ntohs(udp->uh_dport); +check_ports: + if (!port_match(&f->fw_uar.fw_pts[0], + IP_FW_GETNSRCP(f), src_port, + f->fw_flg & IP_FW_F_SRNG)) + continue; + if (!port_match(&f->fw_uar.fw_pts[IP_FW_GETNSRCP(f)], + IP_FW_GETNDSTP(f), dst_port, + f->fw_flg & IP_FW_F_DRNG)) + continue; + break; + } + + case IPPROTO_ICMP: + { + struct icmp *icmp; + + if (offset != 0) /* Type isn't valid */ + break; + PULLUP_TO(hlen + 2); + icmp = (struct icmp *) ((u_int32_t *)ip + ip->ip_hl); + if (!icmptype_match(icmp, f)) + continue; + break; + } +#undef PULLUP_TO + +bogusfrag: + if (fw_verbose) + ipfw_report(NULL, ip, rif, oif); + goto dropit; + } + +got_match: + *flow_id = chain ; /* XXX set flow id */ + /* Update statistics */ + f->fw_pcnt += 1; + if (ip) { + f->fw_bcnt += ip->ip_len; + } + f->timestamp = time_second; + + /* Log to console if desired */ + if ((f->fw_flg & IP_FW_F_PRN) && fw_verbose) + ipfw_report(f, ip, rif, oif); + + /* Take appropriate action */ + switch (f->fw_flg & IP_FW_F_COMMAND) { + case IP_FW_F_ACCEPT: + return(0); + case IP_FW_F_COUNT: + continue; +#if IPDIVERT + case IP_FW_F_DIVERT: + *cookie = f->fw_number; + return(f->fw_divert_port); +#endif + case IP_FW_F_TEE: + /* + * XXX someday tee packet here, but beware that you + * can't use m_copym() or m_copypacket() because + * the divert input routine modifies the mbuf + * (and these routines only increment reference + * counts in the case of mbuf clusters), so need + * to write custom routine. + */ + continue; + case IP_FW_F_SKIPTO: /* XXX check */ + if ( f->next_rule_ptr ) + chain = f->next_rule_ptr ; + else + chain = lookup_next_rule(chain) ; + if (! chain) goto dropit; + goto again ; +#if DUMMYNET + case IP_FW_F_PIPE: + return(f->fw_pipe_nr | 0x10000 ); +#endif +#if IPFIREWALL_FORWARD + case IP_FW_F_FWD: + /* Change the next-hop address for this packet. + * Initially we'll only worry about directly + * reachable next-hop's, but ultimately + * we will work out for next-hops that aren't + * direct the route we would take for it. We + * [cs]ould leave this latter problem to + * ip_output.c. We hope to high [name the abode of + * your favourite deity] that ip_output doesn't modify + * the new value of next_hop (which is dst there) + */ + if (next_hop != NULL) /* Make sure, first... */ + *next_hop = &(f->fw_fwd_ip); + return(0); /* Allow the packet */ +#endif + } + + /* Deny/reject this packet using this rule */ + rule = f; + break; + + } + +#if DIAGNOSTIC + /* Rule IPFW_DEFAULT_RULE should always be there and should always match */ + if (!chain) + panic("ip_fw: chain"); +#endif + + /* + * At this point, we're going to drop the packet. + * Send a reject notice if all of the following are true: + * + * - The packet matched a reject rule + * - The packet is not an ICMP packet, or is an ICMP query packet + * - The packet is not a multicast or broadcast packet + */ + if ((rule->fw_flg & IP_FW_F_COMMAND) == IP_FW_F_REJECT + && ip + && (ip->ip_p != IPPROTO_ICMP || is_icmp_query(ip)) + && !((*m)->m_flags & (M_BCAST|M_MCAST)) + && !IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { + switch (rule->fw_reject_code) { + case IP_FW_REJECT_RST: + { + struct tcphdr *const tcp = + (struct tcphdr *) ((u_int32_t *)ip + ip->ip_hl); + struct tcpiphdr ti, *const tip = (struct tcpiphdr *) ip; + + if (offset != 0 || (tcp->th_flags & TH_RST)) + break; + ti.ti_i = *((struct ipovly *) ip); + ti.ti_t = *tcp; + bcopy(&ti, ip, sizeof(ti)); + NTOHL(tip->ti_seq); + NTOHL(tip->ti_ack); + tip->ti_len = ip->ip_len - hlen - (tip->ti_off << 2); + if (tcp->th_flags & TH_ACK) { + tcp_respond(NULL, (void *)tip, &tip->ti_t, *m, + (tcp_seq)0, ntohl(tcp->th_ack), TH_RST, 0); + } else { + if (tcp->th_flags & TH_SYN) + tip->ti_len++; + tcp_respond(NULL, (void *)tip, &tip->ti_t, *m, + tip->ti_seq + tip->ti_len, + (tcp_seq)0, TH_RST|TH_ACK, 0); + } + *m = NULL; + break; + } + default: /* Send an ICMP unreachable using code */ + icmp_error(*m, ICMP_UNREACH, + rule->fw_reject_code, 0L, 0); + *m = NULL; + break; + } + } + +dropit: + /* + * Finally, drop the packet. + */ + /* *cookie = 0; */ /* XXX is this necessary ? */ + if (*m) { + m_freem(*m); + *m = NULL; + } + return(0); +} + +/* + * when a rule is added/deleted, zero the direct pointers within + * all firewall rules. These will be reconstructed on the fly + * as packets are matched. + * Must be called at splnet(). + */ +static void +flush_rule_ptrs() +{ + struct ip_fw_chain *fcp ; + + for (fcp = ip_fw_chain.lh_first; fcp; fcp = fcp->chain.le_next) { + fcp->rule->next_rule_ptr = NULL ; + } +} + +static int +add_entry(struct ip_fw_head *chainptr, struct ip_fw *frwl) +{ + struct ip_fw *ftmp = 0; + struct ip_fw_chain *fwc = 0, *fcp, *fcpl = 0; + u_short nbr = 0; + int s; + + fwc = _MALLOC(sizeof *fwc, M_IPFW, M_DONTWAIT); + ftmp = _MALLOC(sizeof *ftmp, M_IPFW, M_DONTWAIT); + if (!fwc || !ftmp) { + dprintf(("%s MALLOC said no\n", err_prefix)); + if (fwc) FREE(fwc, M_IPFW); + if (ftmp) FREE(ftmp, M_IPFW); + return (ENOSPC); + } + + bcopy(frwl, ftmp, sizeof(struct ip_fw)); + ftmp->fw_in_if.fu_via_if.name[FW_IFNLEN - 1] = '\0'; + ftmp->fw_pcnt = 0L; + ftmp->fw_bcnt = 0L; + ftmp->next_rule_ptr = NULL ; + ftmp->pipe_ptr = NULL ; + fwc->rule = ftmp; + + s = splnet(); + + if (chainptr->lh_first == 0) { + LIST_INSERT_HEAD(chainptr, fwc, chain); + splx(s); + return(0); + } + + /* If entry number is 0, find highest numbered rule and add 100 */ + if (ftmp->fw_number == 0) { + for (fcp = LIST_FIRST(chainptr); fcp; fcp = LIST_NEXT(fcp, chain)) { + if (fcp->rule->fw_number != (u_short)-1) + nbr = fcp->rule->fw_number; + else + break; + } + if (nbr < IPFW_DEFAULT_RULE - 100) + nbr += 100; + ftmp->fw_number = nbr; + } + + /* Got a valid number; now insert it, keeping the list ordered */ + for (fcp = LIST_FIRST(chainptr); fcp; fcp = LIST_NEXT(fcp, chain)) { + if (fcp->rule->fw_number > ftmp->fw_number) { + if (fcpl) { + LIST_INSERT_AFTER(fcpl, fwc, chain); + } else { + LIST_INSERT_HEAD(chainptr, fwc, chain); + } + break; + } else { + fcpl = fcp; + } + } + flush_rule_ptrs(); + + splx(s); + return (0); +} + +static int +del_entry(struct ip_fw_head *chainptr, u_short number) +{ + struct ip_fw_chain *fcp; + + fcp = LIST_FIRST(chainptr); + if (number != (u_short)-1) { + for (; fcp; fcp = LIST_NEXT(fcp, chain)) { + if (fcp->rule->fw_number == number) { + int s; + + /* prevent access to rules while removing them */ + s = splnet(); + while (fcp && fcp->rule->fw_number == number) { + struct ip_fw_chain *next; + + next = LIST_NEXT(fcp, chain); + LIST_REMOVE(fcp, chain); +#if DUMMYNET + dn_rule_delete(fcp) ; +#endif + flush_rule_ptrs(); + FREE(fcp->rule, M_IPFW); + FREE(fcp, M_IPFW); + fcp = next; + } + splx(s); + return 0; + } + } + } + + return (EINVAL); +} + +static int +zero_entry(struct ip_fw *frwl) +{ + struct ip_fw_chain *fcp; + int s, cleared; + + if (frwl == 0) { + s = splnet(); + for (fcp = LIST_FIRST(&ip_fw_chain); fcp; fcp = LIST_NEXT(fcp, chain)) { + fcp->rule->fw_bcnt = fcp->rule->fw_pcnt = 0; + fcp->rule->timestamp = 0; + } + splx(s); + } + else { + cleared = 0; + + /* + * It's possible to insert multiple chain entries with the + * same number, so we don't stop after finding the first + * match if zeroing a specific entry. + */ + for (fcp = LIST_FIRST(&ip_fw_chain); fcp; fcp = LIST_NEXT(fcp, chain)) + if (frwl->fw_number == fcp->rule->fw_number) { + s = splnet(); + while (fcp && frwl->fw_number == fcp->rule->fw_number) { + fcp->rule->fw_bcnt = fcp->rule->fw_pcnt = 0; + fcp->rule->timestamp = 0; + fcp = LIST_NEXT(fcp, chain); + } + splx(s); + cleared = 1; + break; + } + if (!cleared) /* we didn't find any matching rules */ + return (EINVAL); + } + + if (fw_verbose) { + if (frwl) + printf("ipfw: Entry %d cleared.\n", frwl->fw_number); + else + printf("ipfw: Accounting cleared.\n"); + } + + return (0); +} + +static int +check_ipfw_struct(struct ip_fw *frwl) +{ + /* Check for invalid flag bits */ + if ((frwl->fw_flg & ~IP_FW_F_MASK) != 0) { + dprintf(("%s undefined flag bits set (flags=%x)\n", + err_prefix, frwl->fw_flg)); + return (EINVAL); + } + /* Must apply to incoming or outgoing (or both) */ + if (!(frwl->fw_flg & (IP_FW_F_IN | IP_FW_F_OUT))) { + dprintf(("%s neither in nor out\n", err_prefix)); + return (EINVAL); + } + /* Empty interface name is no good */ + if (((frwl->fw_flg & IP_FW_F_IIFNAME) + && !*frwl->fw_in_if.fu_via_if.name) + || ((frwl->fw_flg & IP_FW_F_OIFNAME) + && !*frwl->fw_out_if.fu_via_if.name)) { + dprintf(("%s empty interface name\n", err_prefix)); + return (EINVAL); + } + /* Sanity check interface matching */ + if ((frwl->fw_flg & IF_FW_F_VIAHACK) == IF_FW_F_VIAHACK) { + ; /* allow "via" backwards compatibility */ + } else if ((frwl->fw_flg & IP_FW_F_IN) + && (frwl->fw_flg & IP_FW_F_OIFACE)) { + dprintf(("%s outgoing interface check on incoming\n", + err_prefix)); + return (EINVAL); + } + /* Sanity check port ranges */ + if ((frwl->fw_flg & IP_FW_F_SRNG) && IP_FW_GETNSRCP(frwl) < 2) { + dprintf(("%s src range set but n_src_p=%d\n", + err_prefix, IP_FW_GETNSRCP(frwl))); + return (EINVAL); + } + if ((frwl->fw_flg & IP_FW_F_DRNG) && IP_FW_GETNDSTP(frwl) < 2) { + dprintf(("%s dst range set but n_dst_p=%d\n", + err_prefix, IP_FW_GETNDSTP(frwl))); + return (EINVAL); + } + if (IP_FW_GETNSRCP(frwl) + IP_FW_GETNDSTP(frwl) > IP_FW_MAX_PORTS) { + dprintf(("%s too many ports (%d+%d)\n", + err_prefix, IP_FW_GETNSRCP(frwl), IP_FW_GETNDSTP(frwl))); + return (EINVAL); + } + /* + * Protocols other than TCP/UDP don't use port range + */ + if ((frwl->fw_prot != IPPROTO_TCP) && + (frwl->fw_prot != IPPROTO_UDP) && + (IP_FW_GETNSRCP(frwl) || IP_FW_GETNDSTP(frwl))) { + dprintf(("%s port(s) specified for non TCP/UDP rule\n", + err_prefix)); + return (EINVAL); + } + + /* + * Rather than modify the entry to make such entries work, + * we reject this rule and require user level utilities + * to enforce whatever policy they deem appropriate. + */ + if ((frwl->fw_src.s_addr & (~frwl->fw_smsk.s_addr)) || + (frwl->fw_dst.s_addr & (~frwl->fw_dmsk.s_addr))) { + dprintf(("%s rule never matches\n", err_prefix)); + return (EINVAL); + } + + if ((frwl->fw_flg & IP_FW_F_FRAG) && + (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { + if (frwl->fw_nports) { + dprintf(("%s cannot mix 'frag' and ports\n", err_prefix)); + return (EINVAL); + } + if (frwl->fw_prot == IPPROTO_TCP && + frwl->fw_tcpf != frwl->fw_tcpnf) { + dprintf(("%s cannot mix 'frag' and TCP flags\n", err_prefix)); + return (EINVAL); + } + } + + /* Check command specific stuff */ + switch (frwl->fw_flg & IP_FW_F_COMMAND) + { + case IP_FW_F_REJECT: + if (frwl->fw_reject_code >= 0x100 + && !(frwl->fw_prot == IPPROTO_TCP + && frwl->fw_reject_code == IP_FW_REJECT_RST)) { + dprintf(("%s unknown reject code\n", err_prefix)); + return (EINVAL); + } + break; + case IP_FW_F_DIVERT: /* Diverting to port zero is invalid */ + case IP_FW_F_PIPE: /* piping through 0 is invalid */ + case IP_FW_F_TEE: + if (frwl->fw_divert_port == 0) { + dprintf(("%s can't divert to port 0\n", err_prefix)); + return (EINVAL); + } + break; + case IP_FW_F_DENY: + case IP_FW_F_ACCEPT: + case IP_FW_F_COUNT: + case IP_FW_F_SKIPTO: +#if IPFIREWALL_FORWARD + case IP_FW_F_FWD: +#endif + break; + default: + dprintf(("%s invalid command\n", err_prefix)); + return (EINVAL); + } + + return 0; +} + +static int +ip_fw_ctl(struct sockopt *sopt) +{ + int error, s; + size_t size; + char *buf, *bp; + struct ip_fw_chain *fcp; + struct ip_fw frwl; + + /* Disallow sets in really-really secure mode. */ + if (sopt->sopt_dir == SOPT_SET && securelevel >= 3) + return (EPERM); + error = 0; + + switch (sopt->sopt_name) { + case IP_FW_GET: + for (fcp = LIST_FIRST(&ip_fw_chain), size = 0; fcp; + fcp = LIST_NEXT(fcp, chain)) + size += sizeof *fcp->rule; + buf = _MALLOC(size, M_TEMP, M_WAITOK); + if (buf == 0) { + error = ENOBUFS; + break; + } + + for (fcp = LIST_FIRST(&ip_fw_chain), bp = buf; fcp; + fcp = LIST_NEXT(fcp, chain)) { + bcopy(fcp->rule, bp, sizeof *fcp->rule); + bp += sizeof *fcp->rule; + } + error = sooptcopyout(sopt, buf, size); + FREE(buf, M_TEMP); + break; + + case IP_FW_FLUSH: + for (fcp = ip_fw_chain.lh_first; + fcp != 0 && fcp->rule->fw_number != IPFW_DEFAULT_RULE; + fcp = ip_fw_chain.lh_first) { + s = splnet(); + LIST_REMOVE(fcp, chain); + FREE(fcp->rule, M_IPFW); + FREE(fcp, M_IPFW); + splx(s); + } + break; + + case IP_FW_ZERO: + if (sopt->sopt_val != 0) { + error = sooptcopyin(sopt, &frwl, sizeof frwl, + sizeof frwl); + if (error || (error = zero_entry(&frwl))) + break; + } else { + error = zero_entry(0); + } + break; + + case IP_FW_ADD: + error = sooptcopyin(sopt, &frwl, sizeof frwl, sizeof frwl); + if (error || (error = check_ipfw_struct(&frwl))) + break; + + if (frwl.fw_number == IPFW_DEFAULT_RULE) { + dprintf(("%s can't add rule %u\n", err_prefix, + (unsigned)IPFW_DEFAULT_RULE)); + error = EINVAL; + } else { + error = add_entry(&ip_fw_chain, &frwl); + } + break; + + case IP_FW_DEL: + error = sooptcopyin(sopt, &frwl, sizeof frwl, sizeof frwl); + if (error) + break; + + if (frwl.fw_number == IPFW_DEFAULT_RULE) { + dprintf(("%s can't delete rule %u\n", err_prefix, + (unsigned)IPFW_DEFAULT_RULE)); + error = EINVAL; + } else { + error = del_entry(&ip_fw_chain, frwl.fw_number); + } + break; + + default: + printf("ip_fw_ctl invalid option %d\n", sopt->sopt_name); + error = EINVAL ; + } + + return (error); +} + +struct ip_fw_chain *ip_fw_default_rule ; + +void +ip_fw_init(void) +{ + struct ip_fw default_rule; + + ip_fw_chk_ptr = ip_fw_chk; + ip_fw_ctl_ptr = ip_fw_ctl; + LIST_INIT(&ip_fw_chain); + + bzero(&default_rule, sizeof default_rule); + default_rule.fw_prot = IPPROTO_IP; + default_rule.fw_number = IPFW_DEFAULT_RULE; +#if IPFIREWALL_DEFAULT_TO_ACCEPT + default_rule.fw_flg |= IP_FW_F_ACCEPT; +#else + default_rule.fw_flg |= IP_FW_F_DENY; +#endif + default_rule.fw_flg |= IP_FW_F_IN | IP_FW_F_OUT; + if (check_ipfw_struct(&default_rule) != 0 || + add_entry(&ip_fw_chain, &default_rule)) + panic("ip_fw_init"); + + ip_fw_default_rule = ip_fw_chain.lh_first ; + printf("IP packet filtering initialized, " +#if IPDIVERT + "divert enabled, "); +#else + "divert disabled, "); +#endif +#if IPFIREWALL_FORWARD + printf("rule-based forwarding enabled, "); +#else + printf("rule-based forwarding disabled, "); +#endif +#if IPFIREWALL_DEFAULT_TO_ACCEPT + printf("default to accept, "); +#endif +#ifndef IPFIREWALL_VERBOSE + printf("logging disabled\n"); +#else + if (fw_verbose_limit == 0) + printf("unlimited logging\n"); + else + printf("logging limited to %d packets/entry\n", + fw_verbose_limit); +#endif +} + +#if ISFB31 + +/* + * ### LD 08/04/99: This is used if IPFIREWALL is a FreeBSD "KLD" module + * Right now, we're linked to the kernel all the time + * will be fixed with the use of an NKE? + * + * Note: ip_fw_init is called from div_init() in xnu + */ + +static ip_fw_chk_t *old_chk_ptr; +static ip_fw_ctl_t *old_ctl_ptr; + +#if defined(IPFIREWALL_MODULE) && !defined(KLD_MODULE) + +#include +#include +#include + +MOD_MISC(ipfw); + +static int +ipfw_load(struct lkm_table *lkmtp, int cmd) +{ + int s=splnet(); + + old_chk_ptr = ip_fw_chk_ptr; + old_ctl_ptr = ip_fw_ctl_ptr; + + ip_fw_init(); + splx(s); + return 0; +} + +static int +ipfw_unload(struct lkm_table *lkmtp, int cmd) +{ + int s=splnet(); + + ip_fw_chk_ptr = old_chk_ptr; + ip_fw_ctl_ptr = old_ctl_ptr; + + while (LIST_FIRST(&ip_fw_chain) != NULL) { + struct ip_fw_chain *fcp = LIST_FIRST(&ip_fw_chain); + LIST_REMOVE(LIST_FIRST(&ip_fw_chain), chain); + FREE(fcp->rule, M_IPFW); + FREE(fcp, M_IPFW); + } + + splx(s); + printf("IP firewall unloaded\n"); + return 0; +} + +int +ipfw_mod(struct lkm_table *lkmtp, int cmd, int ver) +{ + MOD_DISPATCH(ipfw, lkmtp, cmd, ver, + ipfw_load, ipfw_unload, lkm_nullcmd); +} +#else +static int +ipfw_modevent(module_t mod, int type, void *unused) +{ + int s; + + switch (type) { + case MOD_LOAD: + s = splnet(); + + old_chk_ptr = ip_fw_chk_ptr; + old_ctl_ptr = ip_fw_ctl_ptr; + + ip_fw_init(); + splx(s); + return 0; + case MOD_UNLOAD: + s = splnet(); + + ip_fw_chk_ptr = old_chk_ptr; + ip_fw_ctl_ptr = old_ctl_ptr; + + while (LIST_FIRST(&ip_fw_chain) != NULL) { + struct ip_fw_chain *fcp = LIST_FIRST(&ip_fw_chain); + LIST_REMOVE(LIST_FIRST(&ip_fw_chain), chain); + FREE(fcp->rule, M_IPFW); + FREE(fcp, M_IPFW); + } + + splx(s); + printf("IP firewall unloaded\n"); + return 0; + default: + break; + } + return 0; +} + +static moduledata_t ipfwmod = { + "ipfw", + ipfw_modevent, + 0 +}; +DECLARE_MODULE(ipfw, ipfwmod, SI_SUB_PSEUDO, SI_ORDER_ANY); +#endif +#endif /* ISFB31 */ +#endif /* IPFIREWALL_KEXT */ + diff --git a/bsd/netinet/ip_fw.h b/bsd/netinet/ip_fw.h new file mode 100644 index 000000000..c4fbf860f --- /dev/null +++ b/bsd/netinet/ip_fw.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 Daniel Boulet + * Copyright (c) 1994 Ugen J.S.Antsilevich + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + * + */ + +#ifndef _IP_FW_H +#define _IP_FW_H + +#include + +/* + * This union structure identifies an interface, either explicitly + * by name or implicitly by IP address. The flags IP_FW_F_IIFNAME + * and IP_FW_F_OIFNAME say how to interpret this structure. An + * interface unit number of -1 matches any unit number, while an + * IP address of 0.0.0.0 indicates matches any interface. + * + * The receive and transmit interfaces are only compared against the + * the packet if the corresponding bit (IP_FW_F_IIFACE or IP_FW_F_OIFACE) + * is set. Note some packets lack a receive or transmit interface + * (in which case the missing "interface" never matches). + */ + +union ip_fw_if { + struct in_addr fu_via_ip; /* Specified by IP address */ + struct { /* Specified by interface name */ +#define FW_IFNLEN 10 /* need room ! was IFNAMSIZ */ + char name[FW_IFNLEN]; + short unit; /* -1 means match any unit */ + } fu_via_if; +}; + +/* + * Format of an IP firewall descriptor + * + * fw_src, fw_dst, fw_smsk, fw_dmsk are always stored in network byte order. + * fw_flg and fw_n*p are stored in host byte order (of course). + * Port numbers are stored in HOST byte order. + * Warning: setsockopt() will fail if sizeof(struct ip_fw) > MLEN (108) + */ + +struct ip_fw { + u_int64_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ + struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ + struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ + u_short fw_number; /* Rule number */ + u_int fw_flg; /* Flags word */ +#define IP_FW_MAX_PORTS 10 /* A reasonable maximum */ + union { + u_short fw_pts[IP_FW_MAX_PORTS]; /* Array of port numbers to match */ +#define IP_FW_ICMPTYPES_MAX 128 +#define IP_FW_ICMPTYPES_DIM (IP_FW_ICMPTYPES_MAX / (sizeof(unsigned) * 8)) + unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ + } fw_uar; + u_char fw_ipopt,fw_ipnopt; /* IP options set/unset */ + u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ + long timestamp; /* timestamp (tv_sec) of last match */ + union ip_fw_if fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ + u_short fu_pipe_nr; /* pipe number (option DUMMYNET) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ + struct sockaddr_in fu_fwd_ip; + } fw_un; + u_char fw_prot; /* IP protocol */ + u_char fw_nports; /* N'of src ports and # of dst ports */ + /* in ports array (dst ports follow */ + /* src ports; max of 10 ports in all; */ + /* count of 0 means match all ports) */ + void *pipe_ptr; /* Pipe ptr in case of dummynet pipe */ + void *next_rule_ptr ; /* next rule in case of match */ +}; + +#define IP_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) +#define IP_FW_SETNSRCP(rule, n) do { \ + (rule)->fw_nports &= ~0x0f; \ + (rule)->fw_nports |= (n); \ + } while (0) +#define IP_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) +#define IP_FW_SETNDSTP(rule, n) do { \ + (rule)->fw_nports &= ~0xf0; \ + (rule)->fw_nports |= (n) << 4;\ + } while (0) + +#define fw_divert_port fw_un.fu_divert_port +#define fw_skipto_rule fw_un.fu_skipto_rule +#define fw_reject_code fw_un.fu_reject_code +#define fw_pipe_nr fw_un.fu_pipe_nr +#define fw_fwd_ip fw_un.fu_fwd_ip + +struct ip_fw_chain { + LIST_ENTRY(ip_fw_chain) chain; + struct ip_fw *rule; +}; + +/* + * Values for "flags" field . + */ +#define IP_FW_F_COMMAND 0x000000ff /* Mask for type of chain entry: */ +#define IP_FW_F_DENY 0x00000000 /* This is a deny rule */ +#define IP_FW_F_REJECT 0x00000001 /* Deny and send a response packet */ +#define IP_FW_F_ACCEPT 0x00000002 /* This is an accept rule */ +#define IP_FW_F_COUNT 0x00000003 /* This is a count rule */ +#define IP_FW_F_DIVERT 0x00000004 /* This is a divert rule */ +#define IP_FW_F_TEE 0x00000005 /* This is a tee rule */ +#define IP_FW_F_SKIPTO 0x00000006 /* This is a skipto rule */ +#define IP_FW_F_FWD 0x00000007 /* This is a "change forwarding address" rule */ +#define IP_FW_F_PIPE 0x00000008 /* This is a dummynet rule */ + +#define IP_FW_F_IN 0x00000100 /* Check inbound packets */ +#define IP_FW_F_OUT 0x00000200 /* Check outbound packets */ +#define IP_FW_F_IIFACE 0x00000400 /* Apply inbound interface test */ +#define IP_FW_F_OIFACE 0x00000800 /* Apply outbound interface test */ + +#define IP_FW_F_PRN 0x00001000 /* Print if this rule matches */ + +#define IP_FW_F_SRNG 0x00002000 /* The first two src ports are a min * + * and max range (stored in host byte * + * order). */ + +#define IP_FW_F_DRNG 0x00004000 /* The first two dst ports are a min * + * and max range (stored in host byte * + * order). */ + +#define IP_FW_F_FRAG 0x00008000 /* Fragment */ + +#define IP_FW_F_IIFNAME 0x00010000 /* In interface by name/unit (not IP) */ +#define IP_FW_F_OIFNAME 0x00020000 /* Out interface by name/unit (not IP) */ + +#define IP_FW_F_INVSRC 0x00040000 /* Invert sense of src check */ +#define IP_FW_F_INVDST 0x00080000 /* Invert sense of dst check */ + +#define IP_FW_F_ICMPBIT 0x00100000 /* ICMP type bitmap is valid */ + +#define IP_FW_F_MASK 0x001FFFFF /* All possible flag bits mask */ + +/* + * For backwards compatibility with rules specifying "via iface" but + * not restricted to only "in" or "out" packets, we define this combination + * of bits to represent this configuration. + */ + +#define IF_FW_F_VIAHACK (IP_FW_F_IN|IP_FW_F_OUT|IP_FW_F_IIFACE|IP_FW_F_OIFACE) + +/* + * Definitions for REJECT response codes. + * Values less than 256 correspond to ICMP unreachable codes. + */ +#define IP_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ + +/* + * Definitions for IP option names. + */ +#define IP_FW_IPOPT_LSRR 0x01 +#define IP_FW_IPOPT_SSRR 0x02 +#define IP_FW_IPOPT_RR 0x04 +#define IP_FW_IPOPT_TS 0x08 + +/* + * Definitions for TCP flags. + */ +#define IP_FW_TCPF_FIN TH_FIN +#define IP_FW_TCPF_SYN TH_SYN +#define IP_FW_TCPF_RST TH_RST +#define IP_FW_TCPF_PSH TH_PUSH +#define IP_FW_TCPF_ACK TH_ACK +#define IP_FW_TCPF_URG TH_URG +#define IP_FW_TCPF_ESTAB 0x40 + +/* + * Main firewall chains definitions and global var's definitions. + */ +#ifdef KERNEL + +/* + * Function definitions. + */ +void ip_fw_init __P((void)); + +/* Firewall hooks */ +struct ip; +struct sockopt; +typedef int ip_fw_chk_t __P((struct ip **, int, struct ifnet *, u_int16_t *, + struct mbuf **, struct ip_fw_chain **, struct sockaddr_in **)); +typedef int ip_fw_ctl_t __P((struct sockopt *)); +extern ip_fw_chk_t *ip_fw_chk_ptr; +extern ip_fw_ctl_t *ip_fw_ctl_ptr; + +/* IP NAT hooks */ +typedef int ip_nat_t __P((struct ip **, struct mbuf **, struct ifnet *, int)); +typedef int ip_nat_ctl_t __P((struct sockopt *)); +extern ip_nat_t *ip_nat_ptr; +extern ip_nat_ctl_t *ip_nat_ctl_ptr; +#define IP_NAT_IN 0x00000001 +#define IP_NAT_OUT 0x00000002 + +#endif /* KERNEL */ + +#endif /* _IP_FW_H */ diff --git a/bsd/netinet/ip_icmp.c b/bsd/netinet/ip_icmp.c new file mode 100644 index 000000000..e29e6a08a --- /dev/null +++ b/bsd/netinet/ip_icmp.c @@ -0,0 +1,869 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_icmp.c 8.2 (Berkeley) 1/4/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define _IP_VHL +#include +#include +#include +#include +#include +#include +#include + +#if IPSEC +#include +#include +#endif + +#if defined(NFAITH) && NFAITH > 0 +#include "faith.h" +#include +#endif + +/* + * ICMP routines: error generation, receive packet processing, and + * routines to turnaround packets back to the originator, and + * host table maintenance routines. + */ + +static struct icmpstat icmpstat; +SYSCTL_STRUCT(_net_inet_icmp, ICMPCTL_STATS, stats, CTLFLAG_RD, + &icmpstat, icmpstat, ""); + +static int icmpmaskrepl = 0; +SYSCTL_INT(_net_inet_icmp, ICMPCTL_MASKREPL, maskrepl, CTLFLAG_RW, + &icmpmaskrepl, 0, ""); + +#if ICMP_BANDLIM + +/* + * ICMP error-response bandwidth limiting sysctl. If not enabled, sysctl + * variable content is -1 and read-only. + */ + +static int icmplim = 100; +SYSCTL_INT(_net_inet_icmp, ICMPCTL_ICMPLIM, icmplim, CTLFLAG_RW, + &icmplim, 0, ""); +#else + +static int icmplim = -1; +SYSCTL_INT(_net_inet_icmp, ICMPCTL_ICMPLIM, icmplim, CTLFLAG_RD, + &icmplim, 0, ""); + +#endif + +/* + * ICMP broadcast echo sysctl + */ + +static int icmpbmcastecho = 0; +SYSCTL_INT(_net_inet_icmp, OID_AUTO, bmcastecho, CTLFLAG_RW, &icmpbmcastecho, + 0, ""); + + +#if ICMPPRINTFS +int icmpprintfs = 0; +#endif + +static void icmp_reflect __P((struct mbuf *)); +static void icmp_send __P((struct mbuf *, struct mbuf *)); +int ip_next_mtu __P((int, int)); + +extern struct protosw inetsw[]; + +/* + * Generate an error packet of type error + * in response to bad packet ip. + */ +void +icmp_error(n, type, code, dest, destifp) + struct mbuf *n; + int type, code; + n_long dest; + struct ifnet *destifp; +{ + register struct ip *oip = mtod(n, struct ip *), *nip; + register unsigned oiplen = IP_VHL_HL(oip->ip_vhl) << 2; + register struct icmp *icp; + register struct mbuf *m; + unsigned icmplen; + +#if ICMPPRINTFS + if (icmpprintfs) + printf("icmp_error(%p, %x, %d)\n", oip, type, code); +#endif + if (type != ICMP_REDIRECT) + icmpstat.icps_error++; + /* + * Don't send error if the original packet was encrypted. + * Don't send error if not the first fragment of message. + * Don't error if the old packet protocol was ICMP + * error message, only known informational types. + */ + if (n->m_flags & M_DECRYPTED) + goto freeit; + if (oip->ip_off &~ (IP_MF|IP_DF)) + goto freeit; + if (oip->ip_p == IPPROTO_ICMP && type != ICMP_REDIRECT && + n->m_len >= oiplen + ICMP_MINLEN && + !ICMP_INFOTYPE(((struct icmp *)((caddr_t)oip + oiplen))->icmp_type)) { + icmpstat.icps_oldicmp++; + goto freeit; + } + /* Don't send error in response to a multicast or broadcast packet */ + if (n->m_flags & (M_BCAST|M_MCAST)) + goto freeit; + /* + * First, formulate icmp message + */ + m = m_gethdr(M_DONTWAIT, MT_HEADER); + if (m == NULL) + goto freeit; + icmplen = oiplen + min(8, oip->ip_len); + m->m_len = icmplen + ICMP_MINLEN; + MH_ALIGN(m, m->m_len); + icp = mtod(m, struct icmp *); + if ((u_int)type > ICMP_MAXTYPE) + panic("icmp_error"); + icmpstat.icps_outhist[type]++; + icp->icmp_type = type; + if (type == ICMP_REDIRECT) + icp->icmp_gwaddr.s_addr = dest; + else { + icp->icmp_void = 0; + /* + * The following assignments assume an overlay with the + * zeroed icmp_void field. + */ + if (type == ICMP_PARAMPROB) { + icp->icmp_pptr = code; + code = 0; + } else if (type == ICMP_UNREACH && + code == ICMP_UNREACH_NEEDFRAG && destifp) { + icp->icmp_nextmtu = htons(destifp->if_mtu); + } + } + + icp->icmp_code = code; + bcopy((caddr_t)oip, (caddr_t)&icp->icmp_ip, icmplen); + nip = &icp->icmp_ip; + nip->ip_len = htons((u_short)(nip->ip_len + oiplen)); + + /* + * Now, copy old ip header (without options) + * in front of icmp message. + */ + if (m->m_data - sizeof(struct ip) < m->m_pktdat) + panic("icmp len"); + m->m_data -= sizeof(struct ip); + m->m_len += sizeof(struct ip); + m->m_pkthdr.len = m->m_len; + m->m_pkthdr.rcvif = n->m_pkthdr.rcvif; + m->m_pkthdr.aux = NULL; /* for IPsec */ + nip = mtod(m, struct ip *); + bcopy((caddr_t)oip, (caddr_t)nip, sizeof(struct ip)); + nip->ip_len = m->m_len; + nip->ip_vhl = IP_VHL_BORING; + nip->ip_p = IPPROTO_ICMP; + nip->ip_tos = 0; + icmp_reflect(m); + +freeit: + m_freem(n); +} + +static struct sockaddr_in icmpsrc = { sizeof (struct sockaddr_in), AF_INET }; +static struct sockaddr_in icmpdst = { sizeof (struct sockaddr_in), AF_INET }; +static struct sockaddr_in icmpgw = { sizeof (struct sockaddr_in), AF_INET }; + +/* + * Process a received ICMP message. + */ +void +icmp_input(m, hlen) + register struct mbuf *m; + int hlen; +{ + register struct icmp *icp; + register struct ip *ip = mtod(m, struct ip *); + int icmplen = ip->ip_len; + register int i; + struct in_ifaddr *ia; + void (*ctlfunc) __P((int, struct sockaddr *, void *)); + int code; + + /* + * Locate icmp structure in mbuf, and check + * that not corrupted and of at least minimum length. + */ +#if ICMPPRINTFS + if (icmpprintfs) { + char buf[4 * sizeof "123"]; + strcpy(buf, inet_ntoa(ip->ip_src)); + printf("icmp_input from %s to %s, len %d\n", + buf, inet_ntoa(ip->ip_dst), icmplen); + } +#endif + if (icmplen < ICMP_MINLEN) { + icmpstat.icps_tooshort++; + goto freeit; + } + i = hlen + min(icmplen, ICMP_ADVLENMIN); + if (m->m_len < i && (m = m_pullup(m, i)) == 0) { + icmpstat.icps_tooshort++; + return; + } + ip = mtod(m, struct ip *); + m->m_len -= hlen; + m->m_data += hlen; + icp = mtod(m, struct icmp *); + if (in_cksum(m, icmplen)) { + icmpstat.icps_checksum++; + goto freeit; + } + m->m_len += hlen; + m->m_data -= hlen; + +#if defined(NFAITH) && 0 < NFAITH + if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { + /* + * Deliver very specific ICMP type only. + */ + switch (icp->icmp_type) { + case ICMP_UNREACH: + case ICMP_TIMXCEED: + break; + default: + goto freeit; + } + } +#endif + +#if ICMPPRINTFS + if (icmpprintfs) + printf("icmp_input, type %d code %d\n", icp->icmp_type, + icp->icmp_code); +#endif + +#if IPSEC + /* drop it if it does not match the policy */ + /* XXX Is there meaning of check in here ? */ + if (ipsec4_in_reject(m, NULL)) { + ipsecstat.in_polvio++; + goto freeit; + } +#endif + + /* + * Message type specific processing. + */ + if (icp->icmp_type > ICMP_MAXTYPE) + goto raw; + icmpstat.icps_inhist[icp->icmp_type]++; + code = icp->icmp_code; + switch (icp->icmp_type) { + + case ICMP_UNREACH: + switch (code) { + case ICMP_UNREACH_NET: + case ICMP_UNREACH_HOST: + case ICMP_UNREACH_PROTOCOL: + case ICMP_UNREACH_PORT: + case ICMP_UNREACH_SRCFAIL: + code += PRC_UNREACH_NET; + break; + + case ICMP_UNREACH_NEEDFRAG: + code = PRC_MSGSIZE; + break; + + case ICMP_UNREACH_NET_UNKNOWN: + case ICMP_UNREACH_NET_PROHIB: + case ICMP_UNREACH_TOSNET: + code = PRC_UNREACH_NET; + break; + + case ICMP_UNREACH_HOST_UNKNOWN: + case ICMP_UNREACH_ISOLATED: + case ICMP_UNREACH_HOST_PROHIB: + case ICMP_UNREACH_TOSHOST: + code = PRC_UNREACH_HOST; + break; + + case ICMP_UNREACH_FILTER_PROHIB: + case ICMP_UNREACH_HOST_PRECEDENCE: + case ICMP_UNREACH_PRECEDENCE_CUTOFF: + code = PRC_UNREACH_PORT; + break; + + default: + goto badcode; + } + goto deliver; + + case ICMP_TIMXCEED: + if (code > 1) + goto badcode; + code += PRC_TIMXCEED_INTRANS; + goto deliver; + + case ICMP_PARAMPROB: + if (code > 1) + goto badcode; + code = PRC_PARAMPROB; + goto deliver; + + case ICMP_SOURCEQUENCH: + if (code) + goto badcode; + code = PRC_QUENCH; + deliver: + /* + * Problem with datagram; advise higher level routines. + */ + if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) || + IP_VHL_HL(icp->icmp_ip.ip_vhl) < (sizeof(struct ip) >> 2)) { + icmpstat.icps_badlen++; + goto freeit; + } + NTOHS(icp->icmp_ip.ip_len); + /* Discard ICMP's in response to multicast packets */ + if (IN_MULTICAST(ntohl(icp->icmp_ip.ip_dst.s_addr))) + goto badcode; +#if ICMPPRINTFS + if (icmpprintfs) + printf("deliver to protocol %d\n", icp->icmp_ip.ip_p); +#endif + icmpsrc.sin_addr = icp->icmp_ip.ip_dst; +#if 1 + /* + * MTU discovery: + * If we got a needfrag and there is a host route to the + * original destination, and the MTU is not locked, then + * set the MTU in the route to the suggested new value + * (if given) and then notify as usual. The ULPs will + * notice that the MTU has changed and adapt accordingly. + * If no new MTU was suggested, then we guess a new one + * less than the current value. If the new MTU is + * unreasonably small (arbitrarily set at 296), then + * we reset the MTU to the interface value and enable the + * lock bit, indicating that we are no longer doing MTU + * discovery. + */ + if (code == PRC_MSGSIZE) { + struct rtentry *rt; + int mtu; + + rt = rtalloc1((struct sockaddr *)&icmpsrc, 0, + RTF_CLONING | RTF_PRCLONING); + if (rt && (rt->rt_flags & RTF_HOST) + && !(rt->rt_rmx.rmx_locks & RTV_MTU)) { + mtu = ntohs(icp->icmp_nextmtu); + if (!mtu) + mtu = ip_next_mtu(rt->rt_rmx.rmx_mtu, + 1); +#if DEBUG_MTUDISC + printf("MTU for %s reduced to %d\n", + inet_ntoa(icmpsrc.sin_addr), mtu); +#endif + if (mtu < 296) { + /* rt->rt_rmx.rmx_mtu = + rt->rt_ifp->if_mtu; */ + rt->rt_rmx.rmx_locks |= RTV_MTU; + } else if (rt->rt_rmx.rmx_mtu > mtu) { + rt->rt_rmx.rmx_mtu = mtu; + } + } + if (rt) + RTFREE(rt); + } + +#endif + /* + * XXX if the packet contains [IPv4 AH TCP], we can't make a + * notification to TCP layer. + */ + ctlfunc = ip_protox[icp->icmp_ip.ip_p]->pr_ctlinput; + if (ctlfunc) + (*ctlfunc)(code, (struct sockaddr *)&icmpsrc, + (void *)&icp->icmp_ip); + break; + + badcode: + icmpstat.icps_badcode++; + break; + + case ICMP_ECHO: + if (!icmpbmcastecho + && (m->m_flags & (M_MCAST | M_BCAST)) != 0) { + icmpstat.icps_bmcastecho++; + break; + } + icp->icmp_type = ICMP_ECHOREPLY; + goto reflect; + + case ICMP_TSTAMP: + if (!icmpbmcastecho + && (m->m_flags & (M_MCAST | M_BCAST)) != 0) { + icmpstat.icps_bmcasttstamp++; + break; + } + if (icmplen < ICMP_TSLEN) { + icmpstat.icps_badlen++; + break; + } + icp->icmp_type = ICMP_TSTAMPREPLY; + icp->icmp_rtime = iptime(); + icp->icmp_ttime = icp->icmp_rtime; /* bogus, do later! */ + goto reflect; + + case ICMP_MASKREQ: +#define satosin(sa) ((struct sockaddr_in *)(sa)) + if (icmpmaskrepl == 0) + break; + /* + * We are not able to respond with all ones broadcast + * unless we receive it over a point-to-point interface. + */ + if (icmplen < ICMP_MASKLEN) + break; + switch (ip->ip_dst.s_addr) { + + case INADDR_BROADCAST: + case INADDR_ANY: + icmpdst.sin_addr = ip->ip_src; + break; + + default: + icmpdst.sin_addr = ip->ip_dst; + } + ia = (struct in_ifaddr *)ifaof_ifpforaddr( + (struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif); + if (ia == 0) + break; + if (ia->ia_ifp == 0) + break; + icp->icmp_type = ICMP_MASKREPLY; + icp->icmp_mask = ia->ia_sockmask.sin_addr.s_addr; + if (ip->ip_src.s_addr == 0) { + if (ia->ia_ifp->if_flags & IFF_BROADCAST) + ip->ip_src = satosin(&ia->ia_broadaddr)->sin_addr; + else if (ia->ia_ifp->if_flags & IFF_POINTOPOINT) + ip->ip_src = satosin(&ia->ia_dstaddr)->sin_addr; + } +reflect: + ip->ip_len += hlen; /* since ip_input deducts this */ + icmpstat.icps_reflect++; + icmpstat.icps_outhist[icp->icmp_type]++; + icmp_reflect(m); + return; + + case ICMP_REDIRECT: + if (code > 3) + goto badcode; + if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) || + IP_VHL_HL(icp->icmp_ip.ip_vhl) < (sizeof(struct ip) >> 2)) { + icmpstat.icps_badlen++; + break; + } + /* + * Short circuit routing redirects to force + * immediate change in the kernel's routing + * tables. The message is also handed to anyone + * listening on a raw socket (e.g. the routing + * daemon for use in updating its tables). + */ + icmpgw.sin_addr = ip->ip_src; + icmpdst.sin_addr = icp->icmp_gwaddr; +#if ICMPPRINTFS + if (icmpprintfs) { + char buf[4 * sizeof "123"]; + strcpy(buf, inet_ntoa(icp->icmp_ip.ip_dst)); + + printf("redirect dst %s to %s\n", + buf, inet_ntoa(icp->icmp_gwaddr)); + } +#endif + icmpsrc.sin_addr = icp->icmp_ip.ip_dst; + rtredirect((struct sockaddr *)&icmpsrc, + (struct sockaddr *)&icmpdst, + (struct sockaddr *)0, RTF_GATEWAY | RTF_HOST, + (struct sockaddr *)&icmpgw, (struct rtentry **)0); + pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&icmpsrc); +#if IPSEC + key_sa_routechange((struct sockaddr *)&icmpsrc); +#endif + break; + + /* + * No kernel processing for the following; + * just fall through to send to raw listener. + */ + case ICMP_ECHOREPLY: + case ICMP_ROUTERADVERT: + case ICMP_ROUTERSOLICIT: + case ICMP_TSTAMPREPLY: + case ICMP_IREQREPLY: + case ICMP_MASKREPLY: + default: + break; + } + +raw: + rip_input(m, hlen); + return; + +freeit: + m_freem(m); +} + +/* + * Reflect the ip packet back to the source + */ +static void +icmp_reflect(m) + struct mbuf *m; +{ + register struct ip *ip = mtod(m, struct ip *); + register struct in_ifaddr *ia; + struct in_addr t; + struct mbuf *opts = 0; + int optlen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); + + if (!in_canforward(ip->ip_src) && + ((ntohl(ip->ip_src.s_addr) & IN_CLASSA_NET) != + (IN_LOOPBACKNET << IN_CLASSA_NSHIFT))) { + m_freem(m); /* Bad return address */ + goto done; /* Ip_output() will check for broadcast */ + } + t = ip->ip_dst; + ip->ip_dst = ip->ip_src; + /* + * If the incoming packet was addressed directly to us, + * use dst as the src for the reply. Otherwise (broadcast + * or anonymous), use the address which corresponds + * to the incoming interface. + */ + for (ia = in_ifaddrhead.tqh_first; ia; ia = ia->ia_link.tqe_next) { + if (t.s_addr == IA_SIN(ia)->sin_addr.s_addr) + break; + if (ia->ia_ifp && (ia->ia_ifp->if_flags & IFF_BROADCAST) && + t.s_addr == satosin(&ia->ia_broadaddr)->sin_addr.s_addr) + break; + } + icmpdst.sin_addr = t; + if ((ia == (struct in_ifaddr *)0) && m->m_pkthdr.rcvif) + ia = (struct in_ifaddr *)ifaof_ifpforaddr( + (struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif); + /* + * The following happens if the packet was not addressed to us, + * and was received on an interface with no IP address. + */ + if (ia == (struct in_ifaddr *)0) + ia = in_ifaddrhead.tqh_first; + t = IA_SIN(ia)->sin_addr; + ip->ip_src = t; + ip->ip_ttl = MAXTTL; + + if (optlen > 0) { + register u_char *cp; + int opt, cnt; + u_int len; + + /* + * Retrieve any source routing from the incoming packet; + * add on any record-route or timestamp options. + */ + cp = (u_char *) (ip + 1); + if ((opts = ip_srcroute()) == 0 && + (opts = m_gethdr(M_DONTWAIT, MT_HEADER))) { + opts->m_len = sizeof(struct in_addr); + mtod(opts, struct in_addr *)->s_addr = 0; + } + if (opts) { +#if ICMPPRINTFS + if (icmpprintfs) + printf("icmp_reflect optlen %d rt %d => ", + optlen, opts->m_len); +#endif + for (cnt = optlen; cnt > 0; cnt -= len, cp += len) { + opt = cp[IPOPT_OPTVAL]; + if (opt == IPOPT_EOL) + break; + if (opt == IPOPT_NOP) + len = 1; + else { + if (cnt < IPOPT_OLEN + sizeof(*cp)) + break; + len = cp[IPOPT_OLEN]; + if (len < IPOPT_OLEN + sizeof(*cp) || + len > cnt) + break; + } + /* + * Should check for overflow, but it "can't happen" + */ + if (opt == IPOPT_RR || opt == IPOPT_TS || + opt == IPOPT_SECURITY) { + bcopy((caddr_t)cp, + mtod(opts, caddr_t) + opts->m_len, len); + opts->m_len += len; + } + } + /* Terminate & pad, if necessary */ + cnt = opts->m_len % 4; + if (cnt) { + for (; cnt < 4; cnt++) { + *(mtod(opts, caddr_t) + opts->m_len) = + IPOPT_EOL; + opts->m_len++; + } + } +#if ICMPPRINTFS + if (icmpprintfs) + printf("%d\n", opts->m_len); +#endif + } + /* + * Now strip out original options by copying rest of first + * mbuf's data back, and adjust the IP length. + */ + ip->ip_len -= optlen; + ip->ip_vhl = IP_VHL_BORING; + m->m_len -= optlen; + if (m->m_flags & M_PKTHDR) + m->m_pkthdr.len -= optlen; + optlen += sizeof(struct ip); + bcopy((caddr_t)ip + optlen, (caddr_t)(ip + 1), + (unsigned)(m->m_len - sizeof(struct ip))); + } + m->m_flags &= ~(M_BCAST|M_MCAST); + icmp_send(m, opts); +done: + if (opts) + (void)m_free(opts); +} + +/* + * Send an icmp packet back to the ip level, + * after supplying a checksum. + */ +static void +icmp_send(m, opts) + register struct mbuf *m; + struct mbuf *opts; +{ + register struct ip *ip = mtod(m, struct ip *); + register int hlen; + register struct icmp *icp; + struct route ro; + + hlen = IP_VHL_HL(ip->ip_vhl) << 2; + m->m_data += hlen; + m->m_len -= hlen; + icp = mtod(m, struct icmp *); + icp->icmp_cksum = 0; + icp->icmp_cksum = in_cksum(m, ip->ip_len - hlen); + m->m_data -= hlen; + m->m_len += hlen; + m->m_pkthdr.rcvif = (struct ifnet *)0; + m->m_pkthdr.aux = NULL; +#if ICMPPRINTFS + if (icmpprintfs) { + char buf[4 * sizeof "123"]; + strcpy(buf, inet_ntoa(ip->ip_dst)); + printf("icmp_send dst %s src %s\n", + buf, inet_ntoa(ip->ip_src)); + } +#endif + bzero(&ro, sizeof ro); + +#ifdef IPSEC + ipsec_setsocket(m, NULL); +#endif /*IPSEC*/ + (void) ip_output(m, opts, &ro, 0, NULL); + if (ro.ro_rt) + RTFREE(ro.ro_rt); +} + +n_time +iptime() +{ + struct timeval atv; + u_long t; + + microtime(&atv); + t = (atv.tv_sec % (24*60*60)) * 1000 + atv.tv_usec / 1000; + return (htonl(t)); +} + +#if 1 +/* + * Return the next larger or smaller MTU plateau (table from RFC 1191) + * given current value MTU. If DIR is less than zero, a larger plateau + * is returned; otherwise, a smaller value is returned. + */ +/* static */ int +ip_next_mtu(mtu, dir) + int mtu; + int dir; +{ + static int mtutab[] = { + 65535, 32000, 17914, 8166, 4352, 2002, 1492, 1006, 508, 296, + 68, 0 + }; + int i; + + for (i = 0; i < (sizeof mtutab) / (sizeof mtutab[0]); i++) { + if (mtu >= mtutab[i]) + break; + } + + if (dir < 0) { + if (i == 0) { + return 0; + } else { + return mtutab[i - 1]; + } + } else { + if (mtutab[i] == 0) { + return 0; + } else if(mtu > mtutab[i]) { + return mtutab[i]; + } else { + return mtutab[i + 1]; + } + } +} +#endif + +#if ICMP_BANDLIM + +/* + * badport_bandlim() - check for ICMP bandwidth limit + * + * Return 0 if it is ok to send an ICMP error response, -1 if we have + * hit our bandwidth limit and it is not ok. + * + * If icmplim is <= 0, the feature is disabled and 0 is returned. + * + * For now we separate the TCP and UDP subsystems w/ different 'which' + * values. We may eventually remove this separation (and simplify the + * code further). + * + * Note that the printing of the error message is delayed so we can + * properly print the icmp error rate that the system was trying to do + * (i.e. 22000/100 pps, etc...). This can cause long delays in printing + * the 'final' error, but it doesn't make sense to solve the printing + * delay with more complex code. + */ + +int +badport_bandlim(int which) +{ + static int lticks[2]; + static int lpackets[2]; + int dticks; + + /* + * Return ok status if feature disabled or argument out of + * ranage. + */ + + if (icmplim <= 0 || which >= 2 || which < 0) + return(0); + dticks = ticks - lticks[which]; + + /* + * reset stats when cumulative dt exceeds one second. + */ + + if ((unsigned int)dticks > hz) { + if (lpackets[which] > icmplim) { + printf("icmp-response bandwidth limit %d/%d pps\n", + lpackets[which], + icmplim + ); + } + lticks[which] = ticks; + lpackets[which] = 0; + } + + /* + * bump packet count + */ + + if (++lpackets[which] > icmplim) { + return(-1); + } + return(0); +} + +#endif + + diff --git a/bsd/netinet/ip_icmp.h b/bsd/netinet/ip_icmp.h new file mode 100644 index 000000000..6a9fb7bfb --- /dev/null +++ b/bsd/netinet/ip_icmp.h @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_icmp.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_IP_ICMP_H_ +#define _NETINET_IP_ICMP_H_ + +/* + * Interface Control Message Protocol Definitions. + * Per RFC 792, September 1981. + */ + +/* + * Internal of an ICMP Router Advertisement + */ +struct icmp_ra_addr { + u_int32_t ira_addr; + u_int32_t ira_preference; +}; + +/* + * Structure of an icmp header. + */ +struct icmp { + u_char icmp_type; /* type of message, see below */ + u_char icmp_code; /* type sub code */ + u_short icmp_cksum; /* ones complement cksum of struct */ + union { + u_char ih_pptr; /* ICMP_PARAMPROB */ + struct in_addr ih_gwaddr; /* ICMP_REDIRECT */ + struct ih_idseq { + n_short icd_id; + n_short icd_seq; + } ih_idseq; + int ih_void; + + /* ICMP_UNREACH_NEEDFRAG -- Path MTU Discovery (RFC1191) */ + struct ih_pmtu { + n_short ipm_void; + n_short ipm_nextmtu; + } ih_pmtu; + + struct ih_rtradv { + u_char irt_num_addrs; + u_char irt_wpa; + u_int16_t irt_lifetime; + } ih_rtradv; + } icmp_hun; +#define icmp_pptr icmp_hun.ih_pptr +#define icmp_gwaddr icmp_hun.ih_gwaddr +#define icmp_id icmp_hun.ih_idseq.icd_id +#define icmp_seq icmp_hun.ih_idseq.icd_seq +#define icmp_void icmp_hun.ih_void +#define icmp_pmvoid icmp_hun.ih_pmtu.ipm_void +#define icmp_nextmtu icmp_hun.ih_pmtu.ipm_nextmtu +#define icmp_num_addrs icmp_hun.ih_rtradv.irt_num_addrs +#define icmp_wpa icmp_hun.ih_rtradv.irt_wpa +#define icmp_lifetime icmp_hun.ih_rtradv.irt_lifetime + union { + struct id_ts { + n_time its_otime; + n_time its_rtime; + n_time its_ttime; + } id_ts; + struct id_ip { + struct ip idi_ip; + /* options and then 64 bits of data */ + } id_ip; + struct icmp_ra_addr id_radv; + u_int32_t id_mask; + char id_data[1]; + } icmp_dun; +#define icmp_otime icmp_dun.id_ts.its_otime +#define icmp_rtime icmp_dun.id_ts.its_rtime +#define icmp_ttime icmp_dun.id_ts.its_ttime +#define icmp_ip icmp_dun.id_ip.idi_ip +#define icmp_radv icmp_dun.id_radv +#define icmp_mask icmp_dun.id_mask +#define icmp_data icmp_dun.id_data +}; + +/* + * Lower bounds on packet lengths for various types. + * For the error advice packets must first insure that the + * packet is large enough to contain the returned ip header. + * Only then can we do the check to see if 64 bits of packet + * data have been returned, since we need to check the returned + * ip header length. + */ +#define ICMP_MINLEN 8 /* abs minimum */ +#define ICMP_TSLEN (8 + 3 * sizeof (n_time)) /* timestamp */ +#define ICMP_MASKLEN 12 /* address mask */ +#define ICMP_ADVLENMIN (8 + sizeof (struct ip) + 8) /* min */ +#ifndef _IP_VHL +#define ICMP_ADVLEN(p) (8 + ((p)->icmp_ip.ip_hl << 2) + 8) + /* N.B.: must separately check that ip_hl >= 5 */ +#else +#define ICMP_ADVLEN(p) (8 + (IP_VHL_HL((p)->icmp_ip.ip_vhl) << 2) + 8) + /* N.B.: must separately check that header length >= 5 */ +#endif + +/* + * Definition of type and code field values. + */ +#define ICMP_ECHOREPLY 0 /* echo reply */ +#define ICMP_UNREACH 3 /* dest unreachable, codes: */ +#define ICMP_UNREACH_NET 0 /* bad net */ +#define ICMP_UNREACH_HOST 1 /* bad host */ +#define ICMP_UNREACH_PROTOCOL 2 /* bad protocol */ +#define ICMP_UNREACH_PORT 3 /* bad port */ +#define ICMP_UNREACH_NEEDFRAG 4 /* IP_DF caused drop */ +#define ICMP_UNREACH_SRCFAIL 5 /* src route failed */ +#define ICMP_UNREACH_NET_UNKNOWN 6 /* unknown net */ +#define ICMP_UNREACH_HOST_UNKNOWN 7 /* unknown host */ +#define ICMP_UNREACH_ISOLATED 8 /* src host isolated */ +#define ICMP_UNREACH_NET_PROHIB 9 /* prohibited access */ +#define ICMP_UNREACH_HOST_PROHIB 10 /* ditto */ +#define ICMP_UNREACH_TOSNET 11 /* bad tos for net */ +#define ICMP_UNREACH_TOSHOST 12 /* bad tos for host */ +#define ICMP_UNREACH_FILTER_PROHIB 13 /* admin prohib */ +#define ICMP_UNREACH_HOST_PRECEDENCE 14 /* host prec vio. */ +#define ICMP_UNREACH_PRECEDENCE_CUTOFF 15 /* prec cutoff */ +#define ICMP_SOURCEQUENCH 4 /* packet lost, slow down */ +#define ICMP_REDIRECT 5 /* shorter route, codes: */ +#define ICMP_REDIRECT_NET 0 /* for network */ +#define ICMP_REDIRECT_HOST 1 /* for host */ +#define ICMP_REDIRECT_TOSNET 2 /* for tos and net */ +#define ICMP_REDIRECT_TOSHOST 3 /* for tos and host */ +#define ICMP_ECHO 8 /* echo service */ +#define ICMP_ROUTERADVERT 9 /* router advertisement */ +#define ICMP_ROUTERSOLICIT 10 /* router solicitation */ +#define ICMP_TIMXCEED 11 /* time exceeded, code: */ +#define ICMP_TIMXCEED_INTRANS 0 /* ttl==0 in transit */ +#define ICMP_TIMXCEED_REASS 1 /* ttl==0 in reass */ +#define ICMP_PARAMPROB 12 /* ip header bad */ +#define ICMP_PARAMPROB_ERRATPTR 0 /* error at param ptr */ +#define ICMP_PARAMPROB_OPTABSENT 1 /* req. opt. absent */ +#define ICMP_PARAMPROB_LENGTH 2 /* bad length */ +#define ICMP_TSTAMP 13 /* timestamp request */ +#define ICMP_TSTAMPREPLY 14 /* timestamp reply */ +#define ICMP_IREQ 15 /* information request */ +#define ICMP_IREQREPLY 16 /* information reply */ +#define ICMP_MASKREQ 17 /* address mask request */ +#define ICMP_MASKREPLY 18 /* address mask reply */ + +#define ICMP_MAXTYPE 18 + +#define ICMP_INFOTYPE(type) \ + ((type) == ICMP_ECHOREPLY || (type) == ICMP_ECHO || \ + (type) == ICMP_ROUTERADVERT || (type) == ICMP_ROUTERSOLICIT || \ + (type) == ICMP_TSTAMP || (type) == ICMP_TSTAMPREPLY || \ + (type) == ICMP_IREQ || (type) == ICMP_IREQREPLY || \ + (type) == ICMP_MASKREQ || (type) == ICMP_MASKREPLY) + +#ifdef KERNEL +void icmp_error __P((struct mbuf *, int, int, n_long, struct ifnet *)); +void icmp_input __P((struct mbuf *, int)); +#endif + +#endif diff --git a/bsd/netinet/ip_input.c b/bsd/netinet/ip_input.c new file mode 100644 index 000000000..fd3530236 --- /dev/null +++ b/bsd/netinet/ip_input.c @@ -0,0 +1,1896 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 + * $ANA: ip_input.c,v 1.5 1996/09/18 14:34:59 wollman Exp $ + */ + +#define _IP_VHL + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#ifdef INET6 +#include +#include +#endif +#include +#include +#include +#include + +#include + + +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 0) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 2) +#define DBG_FNC_IP_INPUT NETDBG_CODE(DBG_NETIP, (2 << 8)) + + +#if IPFIREWALL +#include +#endif + +#if IPSEC +#include +#include +#include +#endif + +#include "faith.h" +#if defined(NFAITH) && NFAITH > 0 +#include +#endif + +#if DUMMYNET +#include +#endif + +int rsvp_on = 0; +static int ip_rsvp_on; +struct socket *ip_rsvpd; + +int ipforwarding = 0; +SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW, + &ipforwarding, 0, ""); + +static int ipsendredirects = 1; /* XXX */ +SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW, + &ipsendredirects, 0, ""); + +int ip_defttl = IPDEFTTL; +SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW, + &ip_defttl, 0, ""); + +static int ip_dosourceroute = 0; +SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW, + &ip_dosourceroute, 0, ""); + +static int ip_acceptsourceroute = 0; +SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute, + CTLFLAG_RW, &ip_acceptsourceroute, 0, ""); + +static int ip_keepfaith = 0; +SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW, + &ip_keepfaith, 0, ""); + +#if DIAGNOSTIC +static int ipprintfs = 0; +#endif + +extern struct domain inetdomain; +extern struct protosw inetsw[]; +struct protosw *ip_protox[IPPROTO_MAX]; +static int ipqmaxlen = IFQ_MAXLEN; +struct in_ifaddrhead in_ifaddrhead; /* first inet address */ +struct ifqueue ipintrq; +SYSCTL_INT(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, CTLFLAG_RD, + &ipintrq.ifq_maxlen, 0, ""); +SYSCTL_INT(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, CTLFLAG_RD, + &ipintrq.ifq_drops, 0, ""); + +struct ipstat ipstat; +SYSCTL_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RD, + &ipstat, ipstat, ""); + +/* Packet reassembly stuff */ +#define IPREASS_NHASH_LOG2 6 +#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) +#define IPREASS_HMASK (IPREASS_NHASH - 1) +#define IPREASS_HASH(x,y) \ + ((((x) & 0xF | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) + +static struct ipq ipq[IPREASS_NHASH]; +static int nipq = 0; /* total # of reass queues */ +static int maxnipq; + +#if IPCTL_DEFMTU +SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW, + &ip_mtu, 0, ""); +#endif + +#if !defined(COMPAT_IPFW) || COMPAT_IPFW == 1 +#undef COMPAT_IPFW +#define COMPAT_IPFW 1 +#else +#undef COMPAT_IPFW +#endif + +#if COMPAT_IPFW + +#include + +/* Firewall hooks */ +ip_fw_chk_t *ip_fw_chk_ptr; +ip_fw_ctl_t *ip_fw_ctl_ptr; + +#if DUMMYNET +ip_dn_ctl_t *ip_dn_ctl_ptr; +#endif + +/* IP Network Address Translation (NAT) hooks */ +ip_nat_t *ip_nat_ptr; +ip_nat_ctl_t *ip_nat_ctl_ptr; +#endif + +#if defined(IPFILTER_LKM) || defined(IPFILTER) +int iplattach __P((void)); +int (*fr_checkp) __P((struct ip *, int, struct ifnet *, int, struct mbuf **)) = NULL; +#endif + + +/* + * We need to save the IP options in case a protocol wants to respond + * to an incoming packet over the same route if the packet got here + * using IP source routing. This allows connection establishment and + * maintenance when the remote end is on a network that is not known + * to us. + */ +static int ip_nhops = 0; +static struct ip_srcrt { + struct in_addr dst; /* final destination */ + char nop; /* one NOP to align */ + char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */ + struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)]; +} ip_srcrt; + +#if IPDIVERT +/* + * Shared variable between ip_input() and ip_reass() to communicate + * about which packets, once assembled from fragments, get diverted, + * and to which port. + */ +static u_short frag_divert_port; +#endif + +struct sockaddr_in *ip_fw_fwd_addr; + +static void save_rte __P((u_char *, struct in_addr)); +static int ip_dooptions __P((struct mbuf *)); +#ifndef NATPT +static +#endif +void ip_forward __P((struct mbuf *, int)); +static void ip_freef __P((struct ipq *)); +static struct ip * + ip_reass __P((struct mbuf *, struct ipq *, struct ipq *)); +static struct in_ifaddr * + ip_rtaddr __P((struct in_addr)); +void ipintr __P((void)); + +#if PM +extern int doNatFil; +extern int doRoute; + +extern int pm_in __P((struct ifnet *, struct ip *, struct mbuf *)); +extern struct route *pm_route __P((struct mbuf *)); +#endif + +#if defined(NATPT) +extern int ip6_protocol_tr; + +int natpt_in4 __P((struct mbuf *, struct mbuf **)); + +#endif /* NATPT */ + +/* + * IP initialization: fill in IP protocol switch table. + * All protocols not implemented in kernel go to raw IP protocol handler. + */ +void +ip_init() +{ + register struct protosw *pr; + register int i; + static ip_initialized = 0; + + if (!ip_initialized) + { + TAILQ_INIT(&in_ifaddrhead); + pr = pffindproto(PF_INET, IPPROTO_RAW, SOCK_RAW); + if (pr == 0) + panic("ip_init"); + for (i = 0; i < IPPROTO_MAX; i++) + ip_protox[i] = pr; + for (pr = inetdomain.dom_protosw; pr; pr = pr->pr_next) + { if(!((unsigned int)pr->pr_domain)) continue; /* If uninitialized, skip */ + if (pr->pr_domain->dom_family == PF_INET && + pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) + ip_protox[pr->pr_protocol] = pr; + } + for (i = 0; i < IPREASS_NHASH; i++) + ipq[i].next = ipq[i].prev = &ipq[i]; + + maxnipq = nmbclusters/4; + + ip_id = time_second & 0xffff; + ipintrq.ifq_maxlen = ipqmaxlen; +#if DUMMYNET + ip_dn_init(); +#endif +#if IPNAT + ip_nat_init(); +#endif +#if IPFILTER + iplattach(); +#endif + ip_initialized = 1; + } +} + +/* Initialize the PF_INET domain, and add in the pre-defined protos */ +void +in_dinit() +{ register int i; + register struct protosw *pr; + register struct domain *dp; + static inetdomain_initted = 0; + extern int in_proto_count; + + if (!inetdomain_initted) + { kprintf("Initing %d protosw entries\n", in_proto_count); + dp = &inetdomain; + + for (i=0, pr = &inetsw[0]; im_type == MT_DUMMYNET) { + struct mbuf *m0 = m ; + rule = (struct ip_fw_chain *)(m->m_data) ; + m = m->m_next ; + FREE(m0, M_IPFW); + ip = mtod(m, struct ip *); + hlen = IP_VHL_HL(ip->ip_vhl) << 2; + goto iphack ; + } else + rule = NULL ; +#endif + +#if DIAGNOSTIC + if (m == NULL || (m->m_flags & M_PKTHDR) == 0) + panic("ip_input no HDR"); +#endif + /* + * If no IP addresses have been set yet but the interfaces + * are receiving, can't do anything with incoming packets yet. + * XXX This is broken! We should be able to receive broadcasts + * and multicasts even without any local addresses configured. + */ + if (TAILQ_EMPTY(&in_ifaddrhead)) + goto bad; + ipstat.ips_total++; + + if (m->m_pkthdr.len < sizeof(struct ip)) + goto tooshort; + + if (m->m_len < sizeof (struct ip) && + (m = m_pullup(m, sizeof (struct ip))) == 0) { + ipstat.ips_toosmall++; + return; + } + ip = mtod(m, struct ip *); + + KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, + ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); + + if (IP_VHL_V(ip->ip_vhl) != IPVERSION) { + ipstat.ips_badvers++; + goto bad; + } + + hlen = IP_VHL_HL(ip->ip_vhl) << 2; + if (hlen < sizeof(struct ip)) { /* minimum header length */ + ipstat.ips_badhlen++; + goto bad; + } + if (hlen > m->m_len) { + if ((m = m_pullup(m, hlen)) == 0) { + ipstat.ips_badhlen++; + return; + } + ip = mtod(m, struct ip *); + } + + sum = in_cksum(m, hlen); + + if (sum) { + ipstat.ips_badsum++; + goto bad; + } + + /* + * Convert fields to host representation. + */ + NTOHS(ip->ip_len); + if (ip->ip_len < hlen) { + ipstat.ips_badlen++; + goto bad; + } + NTOHS(ip->ip_id); + NTOHS(ip->ip_off); + + /* + * Check that the amount of data in the buffers + * is as at least much as the IP header would have us expect. + * Trim mbufs if longer than we expect. + * Drop packet if shorter than we expect. + */ + if (m->m_pkthdr.len < ip->ip_len) { +tooshort: + ipstat.ips_tooshort++; + goto bad; + } + if (m->m_pkthdr.len > ip->ip_len) { + if (m->m_len == m->m_pkthdr.len) { + m->m_len = ip->ip_len; + m->m_pkthdr.len = ip->ip_len; + } else + m_adj(m, ip->ip_len - m->m_pkthdr.len); + } + /* + * IpHack's section. + * Right now when no processing on packet has done + * and it is still fresh out of network we do our black + * deals with it. + * - Firewall: deny/allow/divert + * - Xlate: translate packet's addr/port (NAT). + * - Pipe: pass pkt through dummynet. + * - Wrap: fake packet's addr/port + * - Encapsulate: put it in another IP and send out. + */ + +#if defined(IPFIREWALL) && defined(DUMMYNET) +iphack: +#endif +#if defined(IPFILTER) || defined(IPFILTER_LKM) + /* + * Check if we want to allow this packet to be processed. + * Consider it to be bad if not. + */ + if (fr_checkp) { + struct mbuf *m1 = m; + + if ((*fr_checkp)(ip, hlen, m->m_pkthdr.rcvif, 0, &m1) || !m1) + return; + ip = mtod(m = m1, struct ip *); + } +#endif +#if COMPAT_IPFW + if (ip_fw_chk_ptr) { +#if IPFIREWALL_FORWARD + /* + * If we've been forwarded from the output side, then + * skip the firewall a second time + */ + if (ip_fw_fwd_addr) + goto ours; +#endif /* IPFIREWALL_FORWARD */ + i = (*ip_fw_chk_ptr)(&ip, hlen, NULL, &ip_divert_cookie, + &m, &rule, &ip_fw_fwd_addr); + /* + * see the comment in ip_output for the return values + * produced by the firewall. + */ + if (!m) /* packet discarded by firewall */ + return ; + if (i == 0 && ip_fw_fwd_addr == NULL) /* common case */ + goto pass ; +#if DUMMYNET + if (i & 0x10000) { + /* send packet to the appropriate pipe */ + dummynet_io(i&0xffff,DN_TO_IP_IN,m,NULL,NULL,0, rule); + return ; + } +#endif +#if IPDIVERT + if (i > 0 && i < 0x10000) { + /* Divert packet */ + frag_divert_port = i & 0xffff ; + goto ours; + } +#endif +#if IPFIREWALL_FORWARD + if (i == 0 && ip_fw_fwd_addr != NULL) + goto pass ; +#endif + /* + * if we get here, the packet must be dropped + */ + m_freem(m); + return; + } +pass: + + if (ip_nat_ptr && !(*ip_nat_ptr)(&ip, &m, m->m_pkthdr.rcvif, IP_NAT_IN)) { +#if IPFIREWALL_FORWARD + ip_fw_fwd_addr = NULL; +#endif + return; + } +#endif /* !COMPAT_IPFW */ + +#if defined(PM) + /* + * Process ip-filter/NAT. + * Return TRUE if this packed is discarded. + * Return FALSE if this packed is accepted. + */ + + if (doNatFil && pm_in(m->m_pkthdr.rcvif, ip, m)) + return; +#endif + +#if defined(NATPT) + /* + * + */ + if (ip6_protocol_tr) + { + struct mbuf *m1 = NULL; + + switch (natpt_in4(m, &m1)) + { + case IPPROTO_IP: goto dooptions; + case IPPROTO_IPV4: ip_forward(m1, 0); break; + case IPPROTO_IPV6: ip6_forward(m1, 1); break; + case IPPROTO_MAX: /* discard this packet */ + default: + } + + if (m != m1) + m_freem(m); + + return; + } + dooptions: +#endif + + /* + * Process options and, if not destined for us, + * ship it on. ip_dooptions returns 1 when an + * error was detected (causing an icmp message + * to be sent and the original packet to be freed). + */ + ip_nhops = 0; /* for source routed packets */ + if (hlen > sizeof (struct ip) && ip_dooptions(m)) { +#if IPFIREWALL_FORWARD + ip_fw_fwd_addr = NULL; +#endif + return; + } + + /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no + * matter if it is destined to another node, or whether it is + * a multicast one, RSVP wants it! and prevents it from being forwarded + * anywhere else. Also checks if the rsvp daemon is running before + * grabbing the packet. + */ + if (rsvp_on && ip->ip_p==IPPROTO_RSVP) + goto ours; + + /* + * Check our list of addresses, to see if the packet is for us. + */ + for (ia = TAILQ_FIRST(&in_ifaddrhead); ia; + ia = TAILQ_NEXT(ia, ia_link)) { +#define satosin(sa) ((struct sockaddr_in *)(sa)) + + if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) + goto ours; + + if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) + goto ours; + +#if IPFIREWALL_FORWARD + /* + * If the addr to forward to is one of ours, we pretend to + * be the destination for this packet. + */ + if (ip_fw_fwd_addr == NULL) { + if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) + goto ours; + } else if (IA_SIN(ia)->sin_addr.s_addr == + ip_fw_fwd_addr->sin_addr.s_addr) + goto ours; +#else + if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) + goto ours; +#endif + if (ia->ia_ifp && ia->ia_ifp->if_flags & IFF_BROADCAST) { + if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == + ip->ip_dst.s_addr) + goto ours; + if (ip->ip_dst.s_addr == ia->ia_netbroadcast.s_addr) + goto ours; + } + } + if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { + struct in_multi *inm; + if (ip_mrouter) { + /* + * If we are acting as a multicast router, all + * incoming multicast packets are passed to the + * kernel-level multicast forwarding function. + * The packet is returned (relatively) intact; if + * ip_mforward() returns a non-zero value, the packet + * must be discarded, else it may be accepted below. + * + * (The IP ident field is put in the same byte order + * as expected when ip_mforward() is called from + * ip_output().) + */ + ip->ip_id = htons(ip->ip_id); + if (ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) { + ipstat.ips_cantforward++; + m_freem(m); + return; + } + ip->ip_id = ntohs(ip->ip_id); + + /* + * The process-level routing demon needs to receive + * all multicast IGMP packets, whether or not this + * host belongs to their destination groups. + */ + if (ip->ip_p == IPPROTO_IGMP) + goto ours; + ipstat.ips_forward++; + } + /* + * See if we belong to the destination multicast group on the + * arrival interface. + */ + IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm); + if (inm == NULL) { + ipstat.ips_notmember++; + m_freem(m); + return; + } + goto ours; + } + if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST) + goto ours; + if (ip->ip_dst.s_addr == INADDR_ANY) + goto ours; + +#if defined(NFAITH) && NFAITH > 0 + /* + * FAITH(Firewall Aided Internet Translator) + */ + if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { + if (ip_keepfaith) { + if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP) + goto ours; + } + m_freem(m); + return; + } +#endif + /* + * Not for us; forward if possible and desirable. + */ + if (ipforwarding == 0) { + ipstat.ips_cantforward++; + m_freem(m); + } else + ip_forward(m, 0); +#if IPFIREWALL_FORWARD + ip_fw_fwd_addr = NULL; +#endif + return; + +ours: + + /* + * If offset or IP_MF are set, must reassemble. + * Otherwise, nothing need be done. + * (We could look in the reassembly queue to see + * if the packet was previously fragmented, + * but it's not worth the time; just let them time out.) + */ + if (ip->ip_off & (IP_MF | IP_OFFMASK | IP_RF)) { + if (m->m_flags & M_EXT) { /* XXX */ + if ((m = m_pullup(m, hlen)) == 0) { + ipstat.ips_toosmall++; +#if IPDIVERT + frag_divert_port = 0; + ip_divert_cookie = 0; +#endif +#if IPFIREWALL_FORWARD + ip_fw_fwd_addr = NULL; +#endif + return; + } + ip = mtod(m, struct ip *); + } + sum = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); + /* + * Look for queue of fragments + * of this datagram. + */ + for (fp = ipq[sum].next; fp != &ipq[sum]; fp = fp->next) + if (ip->ip_id == fp->ipq_id && + ip->ip_src.s_addr == fp->ipq_src.s_addr && + ip->ip_dst.s_addr == fp->ipq_dst.s_addr && + ip->ip_p == fp->ipq_p) + goto found; + + fp = 0; + + /* check if there's a place for the new queue */ + if (nipq > maxnipq) { + /* + * drop something from the tail of the current queue + * before proceeding further + */ + if (ipq[sum].prev == &ipq[sum]) { /* gak */ + for (i = 0; i < IPREASS_NHASH; i++) { + if (ipq[i].prev != &ipq[i]) { + ip_freef(ipq[i].prev); + break; + } + } + } else + ip_freef(ipq[sum].prev); + } +found: + /* + * Adjust ip_len to not reflect header, + * set ip_mff if more fragments are expected, + * convert offset of this to bytes. + */ + ip->ip_len -= hlen; + mff = (ip->ip_off & IP_MF) != 0; + if (mff) { + /* + * Make sure that fragments have a data length + * that's a non-zero multiple of 8 bytes. + */ + if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) { + ipstat.ips_toosmall++; /* XXX */ + goto bad; + } + m->m_flags |= M_FRAG; + } + ip->ip_off <<= 3; + + /* + * If datagram marked as having more fragments + * or if this is not the first fragment, + * attempt reassembly; if it succeeds, proceed. + */ + if (mff || ip->ip_off) { + ipstat.ips_fragments++; + m->m_pkthdr.header = ip; + ip = ip_reass(m, fp, &ipq[sum]); + if (ip == 0) { +#if IPFIREWALL_FORWARD + ip_fw_fwd_addr = NULL; +#endif + return; + } + /* Get the length of the reassembled packets header */ + hlen = IP_VHL_HL(ip->ip_vhl) << 2; + ipstat.ips_reassembled++; + m = dtom(ip); +#if IPDIVERT + if (frag_divert_port) { + struct mbuf m; + m.m_next = 0; + m.m_len = hlen; + m.m_data = (char *) ip; + ip->ip_len += hlen; + HTONS(ip->ip_len); + HTONS(ip->ip_off); + HTONS(ip->ip_id); + ip->ip_sum = 0; + ip->ip_sum = in_cksum(&m, hlen); + NTOHS(ip->ip_id); + NTOHS(ip->ip_off); + NTOHS(ip->ip_len); + ip->ip_len -= hlen; + } +#endif + } else + if (fp) + ip_freef(fp); + } else + ip->ip_len -= hlen; + +#if IPDIVERT + /* + * Divert reassembled packets to the divert protocol if required + * If divert port is null then cookie should be too, + * so we shouldn't need to clear them here. Assume ip_divert does so. + */ + if (frag_divert_port) { + ipstat.ips_delivered++; + ip_divert_port = frag_divert_port; + frag_divert_port = 0; + (*ip_protox[IPPROTO_DIVERT]->pr_input)(m, hlen); + return; + } + + /* Don't let packets divert themselves */ + if (ip->ip_p == IPPROTO_DIVERT) { + ipstat.ips_noproto++; + goto bad; + } + +#endif + + /* + * Switch out to protocol's input routine. + */ + ipstat.ips_delivered++; + + KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr, + ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); + + (*ip_protox[ip->ip_p]->pr_input)(m, hlen); +#if IPFIREWALL_FORWARD + ip_fw_fwd_addr = NULL; /* tcp needed it */ +#endif + return; +bad: +#if IPFIREWALL_FORWARD + ip_fw_fwd_addr = NULL; +#endif + KERNEL_DEBUG(DBG_LAYER_END, 0,0,0,0,0); + m_freem(m); +} + +/* + * IP software interrupt routine - to go away sometime soon + */ +void +ipintr(void) +{ + int s; + struct mbuf *m; + + KERNEL_DEBUG(DBG_FNC_IP_INPUT | DBG_FUNC_START, 0,0,0,0,0); + + while(1) { + s = splimp(); + IF_DEQUEUE(&ipintrq, m); + splx(s); + if (m == 0) { + KERNEL_DEBUG(DBG_FNC_IP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + return; + } + + ip_input(m); + } +} + +NETISR_SET(NETISR_IP, ipintr); + +/* + * Take incoming datagram fragment and try to + * reassemble it into whole datagram. If a chain for + * reassembly of this datagram already exists, then it + * is given as fp; otherwise have to make a chain. + */ +static struct ip * +ip_reass(m, fp, where) + register struct mbuf *m; + register struct ipq *fp; + struct ipq *where; +{ + struct ip *ip = mtod(m, struct ip *); + register struct mbuf *p = 0, *q, *nq; + struct mbuf *t; + int hlen = IP_VHL_HL(ip->ip_vhl) << 2; + int i, next; + + /* + * Presence of header sizes in mbufs + * would confuse code below. + */ + m->m_data += hlen; + m->m_len -= hlen; + + /* + * If first fragment to arrive, create a reassembly queue. + */ + if (fp == 0) { + if ((t = m_get(M_DONTWAIT, MT_FTABLE)) == NULL) + goto dropfrag; + fp = mtod(t, struct ipq *); + insque((void *) fp, (void *) where); + nipq++; + fp->ipq_ttl = IPFRAGTTL; + fp->ipq_p = ip->ip_p; + fp->ipq_id = ip->ip_id; + fp->ipq_src = ip->ip_src; + fp->ipq_dst = ip->ip_dst; + fp->ipq_frags = m; + m->m_nextpkt = NULL; +#if IPDIVERT + fp->ipq_divert = 0; + fp->ipq_div_cookie = 0; +#endif + goto inserted; + } + +#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header)) + + /* + * Find a segment which begins after this one does. + */ + for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) + if (GETIP(q)->ip_off > ip->ip_off) + break; + + /* + * If there is a preceding segment, it may provide some of + * our data already. If so, drop the data from the incoming + * segment. If it provides all of our data, drop us, otherwise + * stick new segment in the proper place. + */ + if (p) { + i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; + if (i > 0) { + if (i >= ip->ip_len) + goto dropfrag; + m_adj(dtom(ip), i); + ip->ip_off += i; + ip->ip_len -= i; + } + m->m_nextpkt = p->m_nextpkt; + p->m_nextpkt = m; + } else { + m->m_nextpkt = fp->ipq_frags; + fp->ipq_frags = m; + } + + /* + * While we overlap succeeding segments trim them or, + * if they are completely covered, dequeue them. + */ + for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off; + q = nq) { + i = (ip->ip_off + ip->ip_len) - + GETIP(q)->ip_off; + if (i < GETIP(q)->ip_len) { + GETIP(q)->ip_len -= i; + GETIP(q)->ip_off += i; + m_adj(q, i); + break; + } + nq = q->m_nextpkt; + m->m_nextpkt = nq; + m_freem(q); + } + +inserted: + +#if IPDIVERT + /* + * Any fragment diverting causes the whole packet to divert + */ + if (frag_divert_port) { + fp->ipq_divert = frag_divert_port; + fp->ipq_div_cookie = ip_divert_cookie; + } + frag_divert_port = 0; + ip_divert_cookie = 0; +#endif + + /* + * Check for complete reassembly. + */ + next = 0; + for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { + if (GETIP(q)->ip_off != next) + return (0); + next += GETIP(q)->ip_len; + } + /* Make sure the last packet didn't have the IP_MF flag */ + if (p->m_flags & M_FRAG) + return (0); + + /* + * Reassembly is complete. Make sure the packet is a sane size. + */ + q = fp->ipq_frags; + ip = GETIP(q); + if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) { + ipstat.ips_toolong++; + ip_freef(fp); + return (0); + } + + /* + * Concatenate fragments. + */ + m = q; + t = m->m_next; + m->m_next = 0; + m_cat(m, t); + nq = q->m_nextpkt; + q->m_nextpkt = 0; + for (q = nq; q != NULL; q = nq) { + nq = q->m_nextpkt; + q->m_nextpkt = NULL; + m_cat(m, q); + } + +#if IPDIVERT + /* + * extract divert port for packet, if any + */ + frag_divert_port = fp->ipq_divert; + ip_divert_cookie = fp->ipq_div_cookie; +#endif + + /* + * Create header for new ip packet by + * modifying header of first packet; + * dequeue and discard fragment reassembly header. + * Make header visible. + */ + ip->ip_len = next; + ip->ip_src = fp->ipq_src; + ip->ip_dst = fp->ipq_dst; + remque((void *) fp); + nipq--; + (void) m_free(dtom(fp)); + m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2); + m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2); + /* some debugging cruft by sklower, below, will go away soon */ + if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ + register int plen = 0; + for (t = m; m; m = m->m_next) + plen += m->m_len; + t->m_pkthdr.len = plen; + } + return (ip); + +dropfrag: +#if IPDIVERT + frag_divert_port = 0; + ip_divert_cookie = 0; +#endif + ipstat.ips_fragdropped++; + m_freem(m); + return (0); + +#undef GETIP +} + +/* + * Free a fragment reassembly header and all + * associated datagrams. + */ +static void +ip_freef(fp) + struct ipq *fp; +{ + register struct mbuf *q; + + while (fp->ipq_frags) { + q = fp->ipq_frags; + fp->ipq_frags = q->m_nextpkt; + m_freem(q); + } + remque((void *) fp); + (void) m_free(dtom(fp)); + nipq--; +} + +/* + * IP timer processing; + * if a timer expires on a reassembly + * queue, discard it. + */ +void +ip_slowtimo() +{ + register struct ipq *fp; + int s = splnet(); + int i; + + for (i = 0; i < IPREASS_NHASH; i++) { + fp = ipq[i].next; + if (fp == 0) + continue; + while (fp != &ipq[i]) { + --fp->ipq_ttl; + fp = fp->next; + if (fp->prev->ipq_ttl == 0) { + ipstat.ips_fragtimeout++; + ip_freef(fp->prev); + } + } + } + ipflow_slowtimo(); + splx(s); +} + +/* + * Drain off all datagram fragments. + */ +void +ip_drain() +{ + int i; + + for (i = 0; i < IPREASS_NHASH; i++) { + while (ipq[i].next != &ipq[i]) { + ipstat.ips_fragdropped++; + ip_freef(ipq[i].next); + } + } + in_rtqdrain(); +} + +/* + * Do option processing on a datagram, + * possibly discarding it if bad options are encountered, + * or forwarding it if source-routed. + * Returns 1 if packet has been forwarded/freed, + * 0 if the packet should be processed further. + */ +static int +ip_dooptions(m) + struct mbuf *m; +{ + register struct ip *ip = mtod(m, struct ip *); + register u_char *cp; + register struct ip_timestamp *ipt; + register struct in_ifaddr *ia; + int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0; + struct in_addr *sin, dst; + n_time ntime; + + dst = ip->ip_dst; + cp = (u_char *)(ip + 1); + cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip); + for (; cnt > 0; cnt -= optlen, cp += optlen) { + opt = cp[IPOPT_OPTVAL]; + if (opt == IPOPT_EOL) + break; + if (opt == IPOPT_NOP) + optlen = 1; + else { + if (cnt < IPOPT_OLEN + sizeof(*cp)) { + code = &cp[IPOPT_OLEN] - (u_char *)ip; + goto bad; + } + optlen = cp[IPOPT_OLEN]; + if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { + code = &cp[IPOPT_OLEN] - (u_char *)ip; + goto bad; + } + } + switch (opt) { + + default: + break; + + /* + * Source routing with record. + * Find interface with current destination address. + * If none on this machine then drop if strictly routed, + * or do nothing if loosely routed. + * Record interface address and bring up next address + * component. If strictly routed make sure next + * address is on directly accessible net. + */ + case IPOPT_LSRR: + case IPOPT_SSRR: + if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { + code = &cp[IPOPT_OFFSET] - (u_char *)ip; + goto bad; + } + ipaddr.sin_addr = ip->ip_dst; + ia = (struct in_ifaddr *) + ifa_ifwithaddr((struct sockaddr *)&ipaddr); + if (ia == 0) { + if (opt == IPOPT_SSRR) { + type = ICMP_UNREACH; + code = ICMP_UNREACH_SRCFAIL; + goto bad; + } + if (!ip_dosourceroute) + goto nosourcerouting; + /* + * Loose routing, and not at next destination + * yet; nothing to do except forward. + */ + break; + } + off--; /* 0 origin */ + if (off > optlen - sizeof(struct in_addr)) { + /* + * End of source route. Should be for us. + */ + if (!ip_acceptsourceroute) + goto nosourcerouting; + save_rte(cp, ip->ip_src); + break; + } + + if (!ip_dosourceroute) { + if (ipforwarding) { + char buf[16]; /* aaa.bbb.ccc.ddd\0 */ + /* + * Acting as a router, so generate ICMP + */ +nosourcerouting: + strcpy(buf, inet_ntoa(ip->ip_dst)); + log(LOG_WARNING, + "attempted source route from %s to %s\n", + inet_ntoa(ip->ip_src), buf); + type = ICMP_UNREACH; + code = ICMP_UNREACH_SRCFAIL; + goto bad; + } else { + /* + * Not acting as a router, so silently drop. + */ + ipstat.ips_cantforward++; + m_freem(m); + return (1); + } + } + + /* + * locate outgoing interface + */ + (void)memcpy(&ipaddr.sin_addr, cp + off, + sizeof(ipaddr.sin_addr)); + + if (opt == IPOPT_SSRR) { +#define INA struct in_ifaddr * +#define SA struct sockaddr * + if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0) + ia = (INA)ifa_ifwithnet((SA)&ipaddr); + } else + ia = ip_rtaddr(ipaddr.sin_addr); + if (ia == 0) { + type = ICMP_UNREACH; + code = ICMP_UNREACH_SRCFAIL; + goto bad; + } + ip->ip_dst = ipaddr.sin_addr; + (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr), + sizeof(struct in_addr)); + cp[IPOPT_OFFSET] += sizeof(struct in_addr); + /* + * Let ip_intr's mcast routing check handle mcast pkts + */ + forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr)); + break; + + case IPOPT_RR: + if (optlen < IPOPT_OFFSET + sizeof(*cp)) { + code = &cp[IPOPT_OFFSET] - (u_char *)ip; + goto bad; + } + if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) { + code = &cp[IPOPT_OFFSET] - (u_char *)ip; + goto bad; + } + /* + * If no space remains, ignore. + */ + off--; /* 0 origin */ + if (off > optlen - sizeof(struct in_addr)) + break; + (void)memcpy(&ipaddr.sin_addr, &ip->ip_dst, + sizeof(ipaddr.sin_addr)); + /* + * locate outgoing interface; if we're the destination, + * use the incoming interface (should be same). + */ + if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0 && + (ia = ip_rtaddr(ipaddr.sin_addr)) == 0) { + type = ICMP_UNREACH; + code = ICMP_UNREACH_HOST; + goto bad; + } + (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr), + sizeof(struct in_addr)); + cp[IPOPT_OFFSET] += sizeof(struct in_addr); + break; + + case IPOPT_TS: + code = cp - (u_char *)ip; + ipt = (struct ip_timestamp *)cp; + if (ipt->ipt_len < 5) + goto bad; + if (ipt->ipt_ptr > ipt->ipt_len - sizeof(int32_t)) { + if (++ipt->ipt_oflw == 0) + goto bad; + break; + } + sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1); + switch (ipt->ipt_flg) { + + case IPOPT_TS_TSONLY: + break; + + case IPOPT_TS_TSANDADDR: + if (ipt->ipt_ptr - 1 + sizeof(n_time) + + sizeof(struct in_addr) > ipt->ipt_len) + goto bad; + ipaddr.sin_addr = dst; + ia = (INA)ifaof_ifpforaddr((SA)&ipaddr, + m->m_pkthdr.rcvif); + if (ia == 0) + continue; + (void)memcpy(sin, &IA_SIN(ia)->sin_addr, + sizeof(struct in_addr)); + ipt->ipt_ptr += sizeof(struct in_addr); + break; + + case IPOPT_TS_PRESPEC: + if (ipt->ipt_ptr - 1 + sizeof(n_time) + + sizeof(struct in_addr) > ipt->ipt_len) + goto bad; + (void)memcpy(&ipaddr.sin_addr, sin, + sizeof(struct in_addr)); + if (ifa_ifwithaddr((SA)&ipaddr) == 0) + continue; + ipt->ipt_ptr += sizeof(struct in_addr); + break; + + default: + goto bad; + } + ntime = iptime(); + (void)memcpy(cp + ipt->ipt_ptr - 1, &ntime, + sizeof(n_time)); + ipt->ipt_ptr += sizeof(n_time); + } + } + if (forward && ipforwarding) { + ip_forward(m, 1); + return (1); + } + return (0); +bad: + ip->ip_len -= IP_VHL_HL(ip->ip_vhl) << 2; /* XXX icmp_error adds in hdr length */ + icmp_error(m, type, code, 0, 0); + ipstat.ips_badoptions++; + return (1); +} + +/* + * Given address of next destination (final or next hop), + * return internet address info of interface to be used to get there. + */ +static struct in_ifaddr * +ip_rtaddr(dst) + struct in_addr dst; +{ + register struct sockaddr_in *sin; + + sin = (struct sockaddr_in *) &ipforward_rt.ro_dst; + + if (ipforward_rt.ro_rt == 0 || dst.s_addr != sin->sin_addr.s_addr) { + if (ipforward_rt.ro_rt) { + RTFREE(ipforward_rt.ro_rt); + ipforward_rt.ro_rt = 0; + } + sin->sin_family = AF_INET; + sin->sin_len = sizeof(*sin); + sin->sin_addr = dst; + + rtalloc_ign(&ipforward_rt, RTF_PRCLONING); + } + if (ipforward_rt.ro_rt == 0) + return ((struct in_ifaddr *)0); + return ((struct in_ifaddr *) ipforward_rt.ro_rt->rt_ifa); +} + +/* + * Save incoming source route for use in replies, + * to be picked up later by ip_srcroute if the receiver is interested. + */ +void +save_rte(option, dst) + u_char *option; + struct in_addr dst; +{ + unsigned olen; + + olen = option[IPOPT_OLEN]; +#if DIAGNOSTIC + if (ipprintfs) + printf("save_rte: olen %d\n", olen); +#endif + if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) + return; + bcopy(option, ip_srcrt.srcopt, olen); + ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); + ip_srcrt.dst = dst; +} + +/* + * Retrieve incoming source route for use in replies, + * in the same form used by setsockopt. + * The first hop is placed before the options, will be removed later. + */ +struct mbuf * +ip_srcroute() +{ + register struct in_addr *p, *q; + register struct mbuf *m; + + if (ip_nhops == 0) + return ((struct mbuf *)0); + m = m_get(M_DONTWAIT, MT_HEADER); + if (m == 0) + return ((struct mbuf *)0); + +#define OPTSIZ (sizeof(ip_srcrt.nop) + sizeof(ip_srcrt.srcopt)) + + /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */ + m->m_len = ip_nhops * sizeof(struct in_addr) + sizeof(struct in_addr) + + OPTSIZ; +#if DIAGNOSTIC + if (ipprintfs) + printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len); +#endif + + /* + * First save first hop for return route + */ + p = &ip_srcrt.route[ip_nhops - 1]; + *(mtod(m, struct in_addr *)) = *p--; +#if DIAGNOSTIC + if (ipprintfs) + printf(" hops %lx", (u_long)ntohl(mtod(m, struct in_addr *)->s_addr)); +#endif + + /* + * Copy option fields and padding (nop) to mbuf. + */ + ip_srcrt.nop = IPOPT_NOP; + ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; + (void)memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), + &ip_srcrt.nop, OPTSIZ); + q = (struct in_addr *)(mtod(m, caddr_t) + + sizeof(struct in_addr) + OPTSIZ); +#undef OPTSIZ + /* + * Record return path as an IP source route, + * reversing the path (pointers are now aligned). + */ + while (p >= ip_srcrt.route) { +#if DIAGNOSTIC + if (ipprintfs) + printf(" %lx", (u_long)ntohl(q->s_addr)); +#endif + *q++ = *p--; + } + /* + * Last hop goes to final destination. + */ + *q = ip_srcrt.dst; +#if DIAGNOSTIC + if (ipprintfs) + printf(" %lx\n", (u_long)ntohl(q->s_addr)); +#endif + return (m); +} + +/* + * Strip out IP options, at higher + * level protocol in the kernel. + * Second argument is buffer to which options + * will be moved, and return value is their length. + * XXX should be deleted; last arg currently ignored. + */ +void +ip_stripoptions(m, mopt) + register struct mbuf *m; + struct mbuf *mopt; +{ + register int i; + struct ip *ip = mtod(m, struct ip *); + register caddr_t opts; + int olen; + + olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip); + opts = (caddr_t)(ip + 1); + i = m->m_len - (sizeof (struct ip) + olen); + bcopy(opts + olen, opts, (unsigned)i); + m->m_len -= olen; + if (m->m_flags & M_PKTHDR) + m->m_pkthdr.len -= olen; + ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2); +} + +u_char inetctlerrmap[PRC_NCMDS] = { + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, + ENOPROTOOPT +}; + +/* + * Forward a packet. If some error occurs return the sender + * an icmp packet. Note we can't always generate a meaningful + * icmp message because icmp doesn't have a large enough repertoire + * of codes and types. + * + * If not forwarding, just drop the packet. This could be confusing + * if ipforwarding was zero but some routing protocol was advancing + * us as a gateway to somewhere. However, we must let the routing + * protocol deal with that. + * + * The srcrt parameter indicates whether the packet is being forwarded + * via a source route. + */ +#ifndef NATPT +static +#endif +void +ip_forward(m, srcrt) + struct mbuf *m; + int srcrt; +{ + register struct ip *ip = mtod(m, struct ip *); + register struct sockaddr_in *sin; + register struct rtentry *rt; + int error, type = 0, code = 0; + struct mbuf *mcopy; + n_long dest; + struct ifnet *destifp; +#if IPSEC + struct ifnet dummyifp; +#endif + + dest = 0; +#if DIAGNOSTIC + if (ipprintfs) + printf("forward: src %lx dst %lx ttl %x\n", + (u_long)ip->ip_src.s_addr, (u_long)ip->ip_dst.s_addr, + ip->ip_ttl); +#endif + + + if (m->m_flags & M_BCAST || in_canforward(ip->ip_dst) == 0) { + ipstat.ips_cantforward++; + m_freem(m); + return; + } + HTONS(ip->ip_id); + if (ip->ip_ttl <= IPTTLDEC) { + icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); + return; + } + ip->ip_ttl -= IPTTLDEC; + +#if defined(PM) + if (doRoute) + { + struct route *ipfw_rt; + + if ((ipfw_rt = pm_route(m)) != NULL) + { + mcopy = m_copy(m, 0, imin((int)ip->ip_len, 64)); +#if IPSEC + ipsec_setsocket(m, NULL); +#endif /*IPSEC*/ + error = ip_output(m, (struct mbuf *)0, ipfw_rt, + IP_FORWARDING | IP_PROTOCOLROUTE , 0); + goto clearAway; + } + + } +#endif + + sin = (struct sockaddr_in *)&ipforward_rt.ro_dst; + if ((rt = ipforward_rt.ro_rt) == 0 || + ip->ip_dst.s_addr != sin->sin_addr.s_addr) { + if (ipforward_rt.ro_rt) { + RTFREE(ipforward_rt.ro_rt); + ipforward_rt.ro_rt = 0; + } + sin->sin_family = AF_INET; + sin->sin_len = sizeof(*sin); + sin->sin_addr = ip->ip_dst; + + rtalloc_ign(&ipforward_rt, RTF_PRCLONING); + if (ipforward_rt.ro_rt == 0) { + icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0); + return; + } + rt = ipforward_rt.ro_rt; + } + + /* + * Save at most 64 bytes of the packet in case + * we need to generate an ICMP message to the src. + */ + mcopy = m_copy(m, 0, imin((int)ip->ip_len, 64)); + + /* + * If forwarding packet using same interface that it came in on, + * perhaps should send a redirect to sender to shortcut a hop. + * Only send redirect if source is sending directly to us, + * and if packet was not source routed (or has any options). + * Also, don't send redirect if forwarding using a default route + * or a route modified by a redirect. + */ +#define satosin(sa) ((struct sockaddr_in *)(sa)) + if (rt->rt_ifp == m->m_pkthdr.rcvif && + (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 && + satosin(rt_key(rt))->sin_addr.s_addr != 0 && + ipsendredirects && !srcrt) { +#define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa)) + u_long src = ntohl(ip->ip_src.s_addr); + + if (RTA(rt) && + (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) { + if (rt->rt_flags & RTF_GATEWAY) + dest = satosin(rt->rt_gateway)->sin_addr.s_addr; + else + dest = ip->ip_dst.s_addr; + /* Router requirements says to only send host redirects */ + type = ICMP_REDIRECT; + code = ICMP_REDIRECT_HOST; +#if DIAGNOSTIC + if (ipprintfs) + printf("redirect (%d) to %lx\n", code, (u_long)dest); +#endif + } + } + +#if IPSEC + ipsec_setsocket(m, NULL); +#endif /*IPSEC*/ + error = ip_output(m, (struct mbuf *)0, &ipforward_rt, + IP_FORWARDING, 0); +#if defined(PM) + clearAway:; +#endif + if (error) + ipstat.ips_cantforward++; + else { + ipstat.ips_forward++; + if (type) + ipstat.ips_redirectsent++; + else { + if (mcopy) { + ipflow_create(&ipforward_rt, mcopy); + m_freem(mcopy); + } + return; + } + } + if (mcopy == NULL) + return; + destifp = NULL; + + switch (error) { + + case 0: /* forwarded, but need redirect */ + /* type, code set above */ + break; + + case ENETUNREACH: /* shouldn't happen, checked above */ + case EHOSTUNREACH: + case ENETDOWN: + case EHOSTDOWN: + default: + type = ICMP_UNREACH; + code = ICMP_UNREACH_HOST; + break; + + case EMSGSIZE: + type = ICMP_UNREACH; + code = ICMP_UNREACH_NEEDFRAG; +#ifndef IPSEC + if (ipforward_rt.ro_rt) + destifp = ipforward_rt.ro_rt->rt_ifp; +#else + /* + * If the packet is routed over IPsec tunnel, tell the + * originator the tunnel MTU. + * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz + * XXX quickhack!!! + */ + if (ipforward_rt.ro_rt) { + struct secpolicy *sp = NULL; + int ipsecerror; + int ipsechdr; + struct route *ro; + + sp = ipsec4_getpolicybyaddr(mcopy, + IPSEC_DIR_OUTBOUND, + IP_FORWARDING, + &ipsecerror); + + if (sp == NULL) + destifp = ipforward_rt.ro_rt->rt_ifp; + else { + /* count IPsec header size */ + ipsechdr = ipsec4_hdrsiz(mcopy, + IPSEC_DIR_OUTBOUND, + NULL); + + /* + * find the correct route for outer IPv4 + * header, compute tunnel MTU. + * + * XXX BUG ALERT + * The "dummyifp" code relies upon the fact + * that icmp_error() touches only ifp->if_mtu. + */ + /*XXX*/ + destifp = NULL; + if (sp->req != NULL + && sp->req->sav != NULL + && sp->req->sav->sah != NULL) { + ro = &sp->req->sav->sah->sa_route; + if (ro->ro_rt && ro->ro_rt->rt_ifp) { + dummyifp.if_mtu = + ro->ro_rt->rt_ifp->if_mtu; + dummyifp.if_mtu -= ipsechdr; + destifp = &dummyifp; + } + } + + key_freesp(sp); + } + } +#endif /*IPSEC*/ + ipstat.ips_cantfrag++; + break; + + case ENOBUFS: + type = ICMP_SOURCEQUENCH; + code = 0; + break; + } + icmp_error(mcopy, type, code, dest, destifp); +} + +void +ip_savecontrol(inp, mp, ip, m) + register struct inpcb *inp; + register struct mbuf **mp; + register struct ip *ip; + register struct mbuf *m; +{ + if (inp->inp_socket->so_options & SO_TIMESTAMP) { + struct timeval tv; + + microtime(&tv); + *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), + SCM_TIMESTAMP, SOL_SOCKET); + if (*mp) + mp = &(*mp)->m_next; + } + if (inp->inp_flags & INP_RECVDSTADDR) { + *mp = sbcreatecontrol((caddr_t) &ip->ip_dst, + sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); + if (*mp) + mp = &(*mp)->m_next; + } +#ifdef notyet + /* XXX + * Moving these out of udp_input() made them even more broken + * than they already were. + */ + /* options were tossed already */ + if (inp->inp_flags & INP_RECVOPTS) { + *mp = sbcreatecontrol((caddr_t) opts_deleted_above, + sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP); + if (*mp) + mp = &(*mp)->m_next; + } + /* ip_srcroute doesn't do what we want here, need to fix */ + if (inp->inp_flags & INP_RECVRETOPTS) { + *mp = sbcreatecontrol((caddr_t) ip_srcroute(), + sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP); + if (*mp) + mp = &(*mp)->m_next; + } +#endif + if (inp->inp_flags & INP_RECVIF) { + struct ifnet *ifp; + struct sdlbuf { + struct sockaddr_dl sdl; + u_char pad[32]; + } sdlbuf; + struct sockaddr_dl *sdp; + struct sockaddr_dl *sdl2 = &sdlbuf.sdl; + + if (((ifp = m->m_pkthdr.rcvif)) + && ( ifp->if_index && (ifp->if_index <= if_index))) { + sdp = (struct sockaddr_dl *)(ifnet_addrs + [ifp->if_index - 1]->ifa_addr); + /* + * Change our mind and don't try copy. + */ + if ((sdp->sdl_family != AF_LINK) + || (sdp->sdl_len > sizeof(sdlbuf))) { + goto makedummy; + } + bcopy(sdp, sdl2, sdp->sdl_len); + } else { +makedummy: + sdl2->sdl_len + = offsetof(struct sockaddr_dl, sdl_data[0]); + sdl2->sdl_family = AF_LINK; + sdl2->sdl_index = 0; + sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0; + } + *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len, + IP_RECVIF, IPPROTO_IP); + if (*mp) + mp = &(*mp)->m_next; + } +} + +int +ip_rsvp_init(struct socket *so) +{ + if (so->so_type != SOCK_RAW || + so->so_proto->pr_protocol != IPPROTO_RSVP) + return EOPNOTSUPP; + + if (ip_rsvpd != NULL) + return EADDRINUSE; + + ip_rsvpd = so; + /* + * This may seem silly, but we need to be sure we don't over-increment + * the RSVP counter, in case something slips up. + */ + if (!ip_rsvp_on) { + ip_rsvp_on = 1; + rsvp_on++; + } + + return 0; +} + +int +ip_rsvp_done(void) +{ + ip_rsvpd = NULL; + /* + * This may seem silly, but we need to be sure we don't over-decrement + * the RSVP counter, in case something slips up. + */ + if (ip_rsvp_on) { + ip_rsvp_on = 0; + rsvp_on--; + } + return 0; +} diff --git a/bsd/netinet/ip_log.c b/bsd/netinet/ip_log.c new file mode 100644 index 000000000..8a588a370 --- /dev/null +++ b/bsd/netinet/ip_log.c @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + */ +#include "opt_ipfilter.h" + +#if IPFILTER_LOG +# ifndef SOLARIS +# define SOLARIS (defined(sun) && (defined(__svr4__) || defined(__SVR4))) +# endif + +# if defined(KERNEL) && !defined(_KERNEL) +# define _KERNEL +# endif +# ifdef __FreeBSD__ +# if defined(_KERNEL) && !defined(IPFILTER_LKM) +# define __FreeBSD_version 300000 /* this will do as a hack */ +# else +# include +# endif +# endif +# ifndef _KERNEL +# include +# include +# include +# include +# endif +# include +# include +# include +# include +# if __FreeBSD_version >= 220000 && defined(_KERNEL) +# include +# include +# else +# include +# endif +# include +# if defined(_KERNEL) && !defined(linux) +# include +# endif +# include +# if !SOLARIS +# if (NetBSD > 199609) || (OpenBSD > 199603) || defined(__FreeBSD__) +# include +# else +# include +# endif +# ifndef linux +# include +# endif +# else +# include +# include +# include +# include +# include +# include +# include +# include +# include +# endif +# ifndef linux +# include +# endif +# include + +# include +# ifdef sun +# include +# endif +# if __FreeBSD_version >= 300000 +# include +# include +# endif +# include +# include +# ifdef __sgi +# include +# ifdef IFF_DRVRLOCK /* IRIX6 */ +# include +# endif +# endif +# if !defined(linux) && !(defined(__sgi) && !defined(IFF_DRVRLOCK)) /*IRIX<6*/ +# include +# endif +# include +# include +# include +# include +# include +# ifndef linux +# include +# endif +# ifndef _KERNEL +# include +# endif +# include "netinet/ip_compat.h" +# include +# include "netinet/ip_fil.h" +# include "netinet/ip_proxy.h" +# include "netinet/ip_nat.h" +# include "netinet/ip_frag.h" +# include "netinet/ip_state.h" +# include "netinet/ip_auth.h" +# ifndef MIN +# define MIN(a,b) (((a)<(b))?(a):(b)) +# endif + + +# if SOLARIS || defined(__sgi) +extern kmutex_t ipl_mutex; +# if SOLARIS +extern kcondvar_t iplwait; +# endif +# endif + +iplog_t **iplh[IPL_LOGMAX+1], *iplt[IPL_LOGMAX+1]; +int iplused[IPL_LOGMAX+1]; +static u_long iplcrc[IPL_LOGMAX+1]; +static u_long iplcrcinit; +#ifdef linux +static struct wait_queue *iplwait[IPL_LOGMAX+1]; +#endif + + +/* + * Initialise log buffers & pointers. Also iniialised the CRC to a local + * secret for use in calculating the "last log checksum". + */ +void ipflog_init() +{ + int i; + + for (i = IPL_LOGMAX; i >= 0; i--) { + iplt[i] = NULL; + iplh[i] = &iplt[i]; + iplused[i] = 0; + } +# if defined(__FreeBSD__) && __FreeBSD_version >= 300000 + read_random(&iplcrcinit, sizeof iplcrcinit); +# else + { + struct timeval tv; + +#if BSD >= 199306 || defined(__FreeBSD__) || defined(__sgi) + microtime(&tv); +# else + uniqtime(&tv); +# endif + iplcrcinit = tv.tv_sec ^ (tv.tv_usec << 8) ^ tv.tv_usec; + } +# endif +} + + +/* + * ipflog + * Create a log record for a packet given that it has been triggered by a + * rule (or the default setting). Calculate the transport protocol header + * size using predetermined size of a couple of popular protocols and thus + * how much data to copy into the log, including part of the data body if + * requested. + */ +int ipflog(flags, ip, fin, m) +u_int flags; +ip_t *ip; +fr_info_t *fin; +mb_t *m; +{ + ipflog_t ipfl; + register int mlen, hlen; + u_long crc; + size_t sizes[2]; + void *ptrs[2]; + int types[2]; +# if SOLARIS + ill_t *ifp = fin->fin_ifp; +# else + struct ifnet *ifp = fin->fin_ifp; +# endif + + /* + * calculate header size. + */ + hlen = fin->fin_hlen; + if (ip->ip_p == IPPROTO_TCP) + hlen += MIN(sizeof(tcphdr_t), fin->fin_dlen); + else if (ip->ip_p == IPPROTO_UDP) + hlen += MIN(sizeof(udphdr_t), fin->fin_dlen); + else if (ip->ip_p == IPPROTO_ICMP) { + struct icmp *icmp = (struct icmp *)((char *)ip + hlen); + + /* + * For ICMP, if the packet is an error packet, also include + * the information about the packet which caused the error. + */ + switch (icmp->icmp_type) + { + case ICMP_UNREACH : + case ICMP_SOURCEQUENCH : + case ICMP_REDIRECT : + case ICMP_TIMXCEED : + case ICMP_PARAMPROB : + hlen += MIN(sizeof(struct icmp) + 8, fin->fin_dlen); + break; + default : + hlen += MIN(sizeof(struct icmp), fin->fin_dlen); + break; + } + } + /* + * Get the interface number and name to which this packet is + * currently associated. + */ +# if SOLARIS + ipfl.fl_unit = (u_char)ifp->ill_ppa; + bcopy(ifp->ill_name, ipfl.fl_ifname, MIN(ifp->ill_name_length, 4)); + mlen = (flags & FR_LOGBODY) ? MIN(msgdsize(m) - hlen, 128) : 0; +# else +# if (defined(NetBSD) && (NetBSD <= 1991011) && (NetBSD >= 199603)) || \ + (defined(OpenBSD) && (OpenBSD >= 199603)) + strncpy(ipfl.fl_ifname, ifp->if_xname, IFNAMSIZ); +# else +# ifndef linux + ipfl.fl_unit = (u_char)ifp->if_unit; +# endif + if ((ipfl.fl_ifname[0] = ifp->if_name[0])) + if ((ipfl.fl_ifname[1] = ifp->if_name[1])) + if ((ipfl.fl_ifname[2] = ifp->if_name[2])) + ipfl.fl_ifname[3] = ifp->if_name[3]; +# endif + mlen = (flags & FR_LOGBODY) ? MIN(ip->ip_len - hlen, 128) : 0; +# endif + ipfl.fl_plen = (u_char)mlen; + ipfl.fl_hlen = (u_char)hlen; + ipfl.fl_rule = fin->fin_rule; + ipfl.fl_group = fin->fin_group; + ipfl.fl_flags = flags; + ptrs[0] = (void *)&ipfl; + sizes[0] = sizeof(ipfl); + types[0] = 0; +#if SOLARIS + /* + * Are we copied from the mblk or an aligned array ? + */ + if (ip == (ip_t *)m->b_rptr) { + ptrs[1] = m; + sizes[1] = hlen + mlen; + types[1] = 1; + } else { + ptrs[1] = ip; + sizes[1] = hlen + mlen; + types[1] = 0; + } +#else + ptrs[1] = m; + sizes[1] = hlen + mlen; + types[1] = 1; +#endif + crc = (ipf_cksum((u_short *)fin, FI_CSIZE) << 8) + iplcrcinit; + return ipllog(IPL_LOGIPF, crc, ptrs, sizes, types, 2); +} + + +/* + * ipllog + */ +int ipllog(dev, crc, items, itemsz, types, cnt) +int dev; +u_long crc; +void **items; +size_t *itemsz; +int *types, cnt; +{ + iplog_t *ipl; + caddr_t buf, s; + int len, i; + + /* + * Check to see if this log record has a CRC which matches the last + * record logged. If it does, just up the count on the previous one + * rather than create a new one. + */ + if (crc) { + MUTEX_ENTER(&ipl_mutex); + if ((iplcrc[dev] == crc) && *iplh[dev]) { + (*iplh[dev])->ipl_count++; + MUTEX_EXIT(&ipl_mutex); + return 1; + } + iplcrc[dev] = crc; + MUTEX_EXIT(&ipl_mutex); + } + + /* + * Get the total amount of data to be logged. + */ + for (i = 0, len = sizeof(iplog_t); i < cnt; i++) + len += itemsz[i]; + + /* + * check that we have space to record this information and can + * allocate that much. + */ + KMALLOC(buf, caddr_t, len); + if (!buf) + return 0; + MUTEX_ENTER(&ipl_mutex); + if ((iplused[dev] + len) > IPLLOGSIZE) { + MUTEX_EXIT(&ipl_mutex); + KFREES(buf, len); + return 0; + } + iplused[dev] += len; + MUTEX_EXIT(&ipl_mutex); + + /* + * advance the log pointer to the next empty record and deduct the + * amount of space we're going to use. + */ + ipl = (iplog_t *)buf; + ipl->ipl_magic = IPL_MAGIC; + ipl->ipl_count = 1; + ipl->ipl_next = NULL; + ipl->ipl_dsize = len; +# if SOLARIS || defined(sun) || defined(linux) + uniqtime((struct timeval *)&ipl->ipl_sec); +# else +# if BSD >= 199306 || defined(__FreeBSD__) || defined(__sgi) + microtime((struct timeval *)&ipl->ipl_sec); +# endif +# endif + + /* + * Loop through all the items to be logged, copying each one to the + * buffer. Use bcopy for normal data or the mb_t copyout routine. + */ + for (i = 0, s = buf + sizeof(*ipl); i < cnt; i++) { + if (types[i] == 0) + bcopy(items[i], s, itemsz[i]); + else if (types[i] == 1) { +# if SOLARIS + copyout_mblk(items[i], 0, itemsz[i], s); +# else + m_copydata(items[i], 0, itemsz[i], s); +# endif + } + s += itemsz[i]; + } + MUTEX_ENTER(&ipl_mutex); + *iplh[dev] = ipl; + iplh[dev] = &ipl->ipl_next; +# if SOLARIS + cv_signal(&iplwait); + mutex_exit(&ipl_mutex); +# else + MUTEX_EXIT(&ipl_mutex); +# ifdef linux + wake_up_interruptible(&iplwait[dev]); +# else + wakeup(&iplh[dev]); +# endif +# endif + return 1; +} + + +int ipflog_read(unit, uio) +int unit; +struct uio *uio; +{ + iplog_t *ipl; + int error = 0, dlen, copied; +# if defined(_KERNEL) && !SOLARIS + int s; +# endif + + /* + * Sanity checks. Make sure the minor # is valid and we're copying + * a valid chunk of data. + */ + if ((IPL_LOGMAX < unit) || (unit < 0)) + return ENXIO; + if (!uio->uio_resid) + return 0; + if ((uio->uio_resid < sizeof(iplog_t)) || + (uio->uio_resid > IPLLOGSIZE)) + return EINVAL; + + /* + * Lock the log so we can snapshot the variables. Wait for a signal + * if the log is empty. + */ + SPL_NET(s); + MUTEX_ENTER(&ipl_mutex); + + while (!iplused[unit] || !iplt[unit]) { +# if SOLARIS && defined(_KERNEL) + if (!cv_wait_sig(&iplwait, &ipl_mutex)) { + MUTEX_EXIT(&ipl_mutex); + return EINTR; + } +# else +# ifdef linux + interruptible_sleep_on(&iplwait[unit]); + if (current->signal & ~current->blocked) + return -EINTR; +# else + MUTEX_EXIT(&ipl_mutex); + SPL_X(s); + error = SLEEP(&iplh[unit], "ipl sleep"); + if (error) + return error; + SPL_NET(s); + MUTEX_ENTER(&ipl_mutex); +# endif /* linux */ +# endif /* SOLARIS */ + } + +# if BSD >= 199306 || defined(__FreeBSD__) + uio->uio_rw = UIO_READ; +# endif + + for (copied = 0; (ipl = iplt[unit]); copied += dlen) { + dlen = ipl->ipl_dsize; + if (dlen + sizeof(iplog_t) > uio->uio_resid) + break; + /* + * Don't hold the mutex over the uiomove call. + */ + iplt[unit] = ipl->ipl_next; + MUTEX_EXIT(&ipl_mutex); + SPL_X(s); + error = UIOMOVE((caddr_t)ipl, ipl->ipl_dsize, UIO_READ, uio); + KFREES((caddr_t)ipl, ipl->ipl_dsize); + if (error) + break; + SPL_NET(s); + MUTEX_ENTER(&ipl_mutex); + iplused[unit] -= dlen; + } + if (!ipl) { + iplused[unit] = 0; + iplh[unit] = &iplt[unit]; + } + + if (!error) { + MUTEX_EXIT(&ipl_mutex); + SPL_X(s); + } +#ifdef linux + if (!error) + return copied; + return -error; +#else + return error; +#endif +} + + +int ipflog_clear(unit) +int unit; +{ + iplog_t *ipl; + int used; + + while ((ipl = iplt[unit])) { + iplt[unit] = ipl->ipl_next; + KFREES((caddr_t)ipl, ipl->ipl_dsize); + } + iplh[unit] = &iplt[unit]; + used = iplused[unit]; + iplused[unit] = 0; + iplcrc[unit] = 0; + return used; +} +#endif /* IPFILTER_LOG */ diff --git a/bsd/netinet/ip_mroute.c b/bsd/netinet/ip_mroute.c new file mode 100644 index 000000000..3ceeae10b --- /dev/null +++ b/bsd/netinet/ip_mroute.c @@ -0,0 +1,2320 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IP multicast forwarding procedures + * + * Written by David Waitzman, BBN Labs, August 1988. + * Modified by Steve Deering, Stanford, February 1989. + * Modified by Mark J. Steiglitz, Stanford, May, 1991 + * Modified by Van Jacobson, LBL, January 1993 + * Modified by Ajit Thyagarajan, PARC, August 1993 + * Modified by Bill Fenner, PARC, April 1995 + * + * MROUTING Revision: 3.5 + */ + +#if ISFB31 +#include "opt_mrouting.h" +#else +#define MROUTE_LKM 0 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef INET6 +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#ifndef NTOHL +#if BYTE_ORDER != BIG_ENDIAN +#define NTOHL(d) ((d) = ntohl((d))) +#define NTOHS(d) ((d) = ntohs((u_short)(d))) +#define HTONL(d) ((d) = htonl((d))) +#define HTONS(d) ((d) = htons((u_short)(d))) +#else +#define NTOHL(d) +#define NTOHS(d) +#define HTONL(d) +#define HTONS(d) +#endif +#endif + +#if !MROUTING +extern u_long _ip_mcast_src __P((int vifi)); +extern int _ip_mforward __P((struct ip *ip, struct ifnet *ifp, + struct mbuf *m, struct ip_moptions *imo)); +extern int _ip_mrouter_done __P((void)); +extern int _ip_mrouter_get __P((struct socket *so, struct sockopt *sopt)); +extern int _ip_mrouter_set __P((struct socket *so, struct sockopt *sopt)); +extern int _mrt_ioctl __P((int req, caddr_t data, struct proc *p)); + +/* + * Dummy routines and globals used when multicast routing is not compiled in. + */ + +struct socket *ip_mrouter = NULL; +u_int rsvpdebug = 0; + +int +_ip_mrouter_set(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + return(EOPNOTSUPP); +} + +int (*ip_mrouter_set)(struct socket *, struct sockopt *) = _ip_mrouter_set; + + +int +_ip_mrouter_get(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + return(EOPNOTSUPP); +} + +int (*ip_mrouter_get)(struct socket *, struct sockopt *) = _ip_mrouter_get; + +int +_ip_mrouter_done() +{ + return(0); +} + +int (*ip_mrouter_done)(void) = _ip_mrouter_done; + +int +_ip_mforward(ip, ifp, m, imo) + struct ip *ip; + struct ifnet *ifp; + struct mbuf *m; + struct ip_moptions *imo; +{ + return(0); +} + +int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, + struct ip_moptions *) = _ip_mforward; + +int +_mrt_ioctl(int req, caddr_t data, struct proc *p) +{ + return EOPNOTSUPP; +} + +int (*mrt_ioctl)(int, caddr_t, struct proc *) = _mrt_ioctl; + +void +rsvp_input(m, iphlen) /* XXX must fixup manually */ + struct mbuf *m; + int iphlen; +{ + /* Can still get packets with rsvp_on = 0 if there is a local member + * of the group to which the RSVP packet is addressed. But in this + * case we want to throw the packet away. + */ + if (!rsvp_on) { + m_freem(m); + return; + } + + if (ip_rsvpd != NULL) { + if (rsvpdebug) + printf("rsvp_input: Sending packet up old-style socket\n"); + rip_input(m, iphlen); + return; + } + /* Drop the packet */ + m_freem(m); +} + +void ipip_input(struct mbuf *m, int iphlen) { /* XXX must fixup manually */ + rip_input(m, iphlen); +} + +int (*legal_vif_num)(int) = 0; + +/* + * This should never be called, since IP_MULTICAST_VIF should fail, but + * just in case it does get called, the code a little lower in ip_output + * will assign the packet a local address. + */ +u_long +_ip_mcast_src(int vifi) { return INADDR_ANY; } +u_long (*ip_mcast_src)(int) = _ip_mcast_src; + +int +ip_rsvp_vif_init(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + return(EINVAL); +} + +int +ip_rsvp_vif_done(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + return(EINVAL); +} + +void +ip_rsvp_force_done(so) + struct socket *so; +{ + return; +} + +#else /* MROUTING */ + +#define M_HASCL(m) ((m)->m_flags & M_EXT) + +#define INSIZ sizeof(struct in_addr) +#define same(a1, a2) \ + (bcmp((caddr_t)(a1), (caddr_t)(a2), INSIZ) == 0) + + +/* + * Globals. All but ip_mrouter and ip_mrtproto could be static, + * except for netstat or debugging purposes. + */ +#if !MROUTE_LKM +struct socket *ip_mrouter = NULL; +static struct mrtstat mrtstat; +#else /* MROUTE_LKM */ +extern void X_ipip_input __P((struct mbuf *m, int iphlen)); +extern struct mrtstat mrtstat; +static int ip_mrtproto; +#endif + +#define NO_RTE_FOUND 0x1 +#define RTE_FOUND 0x2 + +static struct mfc *mfctable[MFCTBLSIZ]; +static u_char nexpire[MFCTBLSIZ]; +static struct vif viftable[MAXVIFS]; +static u_int mrtdebug = 0; /* debug level */ +#define DEBUG_MFC 0x02 +#define DEBUG_FORWARD 0x04 +#define DEBUG_EXPIRE 0x08 +#define DEBUG_XMIT 0x10 +static u_int tbfdebug = 0; /* tbf debug level */ +static u_int rsvpdebug = 0; /* rsvp debug level */ + + + +#define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */ +#define UPCALL_EXPIRE 6 /* number of timeouts */ + +/* + * Define the token bucket filter structures + * tbftable -> each vif has one of these for storing info + */ + +static struct tbf tbftable[MAXVIFS]; +#define TBF_REPROCESS (hz / 100) /* 100x / second */ + +/* + * 'Interfaces' associated with decapsulator (so we can tell + * packets that went through it from ones that get reflected + * by a broken gateway). These interfaces are never linked into + * the system ifnet list & no routes point to them. I.e., packets + * can't be sent this way. They only exist as a placeholder for + * multicast source verification. + */ +static struct ifnet multicast_decap_if[MAXVIFS]; + +#define ENCAP_TTL 64 +#define ENCAP_PROTO IPPROTO_IPIP /* 4 */ + +/* prototype IP hdr for encapsulated packets */ +static struct ip multicast_encap_iphdr = { +#if BYTE_ORDER == LITTLE_ENDIAN + sizeof(struct ip) >> 2, IPVERSION, +#else + IPVERSION, sizeof(struct ip) >> 2, +#endif + 0, /* tos */ + sizeof(struct ip), /* total length */ + 0, /* id */ + 0, /* frag offset */ + ENCAP_TTL, ENCAP_PROTO, + 0, /* checksum */ +}; + +/* + * Private variables. + */ +static vifi_t numvifs = 0; +static int have_encap_tunnel = 0; + +/* + * one-back cache used by ipip_input to locate a tunnel's vif + * given a datagram's src ip address. + */ +static u_long last_encap_src; +static struct vif *last_encap_vif; + +static u_long X_ip_mcast_src __P((int vifi)); +static int X_ip_mforward __P((struct ip *ip, struct ifnet *ifp, struct mbuf *m, struct ip_moptions *imo)); +static int X_ip_mrouter_done __P((void)); +static int X_ip_mrouter_get __P((struct socket *so, struct sockopt *m)); +static int X_ip_mrouter_set __P((struct socket *so, struct sockopt *m)); +static int X_legal_vif_num __P((int vif)); +static int X_mrt_ioctl __P((int cmd, caddr_t data)); + +static int get_sg_cnt(struct sioc_sg_req *); +static int get_vif_cnt(struct sioc_vif_req *); +static int ip_mrouter_init(struct socket *, int); +static int add_vif(struct vifctl *); +static int del_vif(vifi_t); +static int add_mfc(struct mfcctl *); +static int del_mfc(struct mfcctl *); +static int socket_send(struct socket *, struct mbuf *, struct sockaddr_in *); +static int set_assert(int); +static void expire_upcalls(void *); +static int ip_mdq(struct mbuf *, struct ifnet *, struct mfc *, + vifi_t); +static void phyint_send(struct ip *, struct vif *, struct mbuf *); +static void encap_send(struct ip *, struct vif *, struct mbuf *); +static void tbf_control(struct vif *, struct mbuf *, struct ip *, u_long); +static void tbf_queue(struct vif *, struct mbuf *); +static void tbf_process_q(struct vif *); +static void tbf_reprocess_q(void *); +static int tbf_dq_sel(struct vif *, struct ip *); +static void tbf_send_packet(struct vif *, struct mbuf *); +static void tbf_update_tokens(struct vif *); +static int priority(struct vif *, struct ip *); +void multiencap_decap(struct mbuf *); + +/* + * whether or not special PIM assert processing is enabled. + */ +static int pim_assert; +/* + * Rate limit for assert notification messages, in usec + */ +#define ASSERT_MSG_TIME 3000000 + +/* + * Hash function for a source, group entry + */ +#define MFCHASH(a, g) MFCHASHMOD(((a) >> 20) ^ ((a) >> 10) ^ (a) ^ \ + ((g) >> 20) ^ ((g) >> 10) ^ (g)) + +/* + * Find a route for a given origin IP address and Multicast group address + * Type of service parameter to be added in the future!!! + */ + +#define MFCFIND(o, g, rt) { \ + register struct mfc *_rt = mfctable[MFCHASH(o,g)]; \ + rt = NULL; \ + ++mrtstat.mrts_mfc_lookups; \ + while (_rt) { \ + if ((_rt->mfc_origin.s_addr == o) && \ + (_rt->mfc_mcastgrp.s_addr == g) && \ + (_rt->mfc_stall == NULL)) { \ + rt = _rt; \ + break; \ + } \ + _rt = _rt->mfc_next; \ + } \ + if (rt == NULL) { \ + ++mrtstat.mrts_mfc_misses; \ + } \ +} + + +/* + * Macros to compute elapsed time efficiently + * Borrowed from Van Jacobson's scheduling code + */ +#define TV_DELTA(a, b, delta) { \ + register int xxs; \ + \ + delta = (a).tv_usec - (b).tv_usec; \ + if ((xxs = (a).tv_sec - (b).tv_sec)) { \ + switch (xxs) { \ + case 2: \ + delta += 1000000; \ + /* fall through */ \ + case 1: \ + delta += 1000000; \ + break; \ + default: \ + delta += (1000000 * xxs); \ + } \ + } \ +} + +#define TV_LT(a, b) (((a).tv_usec < (b).tv_usec && \ + (a).tv_sec <= (b).tv_sec) || (a).tv_sec < (b).tv_sec) + +#if UPCALL_TIMING +u_long upcall_data[51]; +static void collate(struct timeval *); +#endif /* UPCALL_TIMING */ + + +/* + * Handle MRT setsockopt commands to modify the multicast routing tables. + */ +static int +X_ip_mrouter_set(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error, optval; + vifi_t vifi; + struct vifctl vifc; + struct mfcctl mfc; + + if (so != ip_mrouter && sopt->sopt_name != MRT_INIT) + return (EPERM); + + error = 0; + switch (sopt->sopt_name) { + case MRT_INIT: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + error = ip_mrouter_init(so, optval); + break; + + case MRT_DONE: + error = ip_mrouter_done(); + break; + + case MRT_ADD_VIF: + error = sooptcopyin(sopt, &vifc, sizeof vifc, sizeof vifc); + if (error) + break; + error = add_vif(&vifc); + break; + + case MRT_DEL_VIF: + error = sooptcopyin(sopt, &vifi, sizeof vifi, sizeof vifi); + if (error) + break; + error = del_vif(vifi); + break; + + case MRT_ADD_MFC: + case MRT_DEL_MFC: + error = sooptcopyin(sopt, &mfc, sizeof mfc, sizeof mfc); + if (error) + break; + if (sopt->sopt_name == MRT_ADD_MFC) + error = add_mfc(&mfc); + else + error = del_mfc(&mfc); + break; + + case MRT_ASSERT: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + set_assert(optval); + break; + + default: + error = EOPNOTSUPP; + break; + } + return (error); +} + +#if !MROUTE_LKM +int (*ip_mrouter_set)(struct socket *, struct sockopt *) = X_ip_mrouter_set; +#endif + +/* + * Handle MRT getsockopt commands + */ +static int +X_ip_mrouter_get(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error; + static int version = 0x0305; /* !!! why is this here? XXX */ + + switch (sopt->sopt_name) { + case MRT_VERSION: + error = sooptcopyout(sopt, &version, sizeof version); + break; + + case MRT_ASSERT: + error = sooptcopyout(sopt, &pim_assert, sizeof pim_assert); + break; + default: + error = EOPNOTSUPP; + break; + } + return (error); +} + +#if !MROUTE_LKM +int (*ip_mrouter_get)(struct socket *, struct sockopt *) = X_ip_mrouter_get; +#endif + +/* + * Handle ioctl commands to obtain information from the cache + */ +static int +X_mrt_ioctl(cmd, data) + int cmd; + caddr_t data; +{ + int error = 0; + + switch (cmd) { + case (SIOCGETVIFCNT): + return (get_vif_cnt((struct sioc_vif_req *)data)); + break; + case (SIOCGETSGCNT): + return (get_sg_cnt((struct sioc_sg_req *)data)); + break; + default: + return (EINVAL); + break; + } + return error; +} + +#if !MROUTE_LKM +int (*mrt_ioctl)(int, caddr_t) = X_mrt_ioctl; +#endif + +/* + * returns the packet, byte, rpf-failure count for the source group provided + */ +static int +get_sg_cnt(req) + register struct sioc_sg_req *req; +{ + register struct mfc *rt; + int s; + + s = splnet(); + MFCFIND(req->src.s_addr, req->grp.s_addr, rt); + splx(s); + if (rt != NULL) { + req->pktcnt = rt->mfc_pkt_cnt; + req->bytecnt = rt->mfc_byte_cnt; + req->wrong_if = rt->mfc_wrong_if; + } else + req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff; + + return 0; +} + +/* + * returns the input and output packet and byte counts on the vif provided + */ +static int +get_vif_cnt(req) + register struct sioc_vif_req *req; +{ + register vifi_t vifi = req->vifi; + + if (vifi >= numvifs) return EINVAL; + + req->icount = viftable[vifi].v_pkt_in; + req->ocount = viftable[vifi].v_pkt_out; + req->ibytes = viftable[vifi].v_bytes_in; + req->obytes = viftable[vifi].v_bytes_out; + + return 0; +} + +/* + * Enable multicast routing + */ +static int +ip_mrouter_init(so, version) + struct socket *so; + int version; +{ + if (mrtdebug) + log(LOG_DEBUG,"ip_mrouter_init: so_type = %d, pr_protocol = %d\n", + so->so_type, so->so_proto->pr_protocol); + + if (so->so_type != SOCK_RAW || + so->so_proto->pr_protocol != IPPROTO_IGMP) return EOPNOTSUPP; + + if (version != 1) + return ENOPROTOOPT; + + if (ip_mrouter != NULL) return EADDRINUSE; + + ip_mrouter = so; + + bzero((caddr_t)mfctable, sizeof(mfctable)); + bzero((caddr_t)nexpire, sizeof(nexpire)); + + pim_assert = 0; + + timeout(expire_upcalls, (caddr_t)NULL, EXPIRE_TIMEOUT); + + if (mrtdebug) + log(LOG_DEBUG, "ip_mrouter_init\n"); + + return 0; +} + +/* + * Disable multicast routing + */ +static int +X_ip_mrouter_done() +{ + vifi_t vifi; + int i; + struct ifnet *ifp; + struct ifreq ifr; + struct mfc *rt; + struct rtdetq *rte; + int s; + + s = splnet(); + + /* + * For each phyint in use, disable promiscuous reception of all IP + * multicasts. + */ + for (vifi = 0; vifi < numvifs; vifi++) { + if (viftable[vifi].v_lcl_addr.s_addr != 0 && + !(viftable[vifi].v_flags & VIFF_TUNNEL)) { + ((struct sockaddr_in *)&(ifr.ifr_addr))->sin_family = AF_INET; + ((struct sockaddr_in *)&(ifr.ifr_addr))->sin_addr.s_addr + = INADDR_ANY; + ifp = viftable[vifi].v_ifp; + if_allmulti(ifp, 0); + } + } + bzero((caddr_t)tbftable, sizeof(tbftable)); + bzero((caddr_t)viftable, sizeof(viftable)); + numvifs = 0; + pim_assert = 0; + + untimeout(expire_upcalls, (caddr_t)NULL); + + /* + * Free all multicast forwarding cache entries. + */ + for (i = 0; i < MFCTBLSIZ; i++) { + for (rt = mfctable[i]; rt != NULL; ) { + struct mfc *nr = rt->mfc_next; + + for (rte = rt->mfc_stall; rte != NULL; ) { + struct rtdetq *n = rte->next; + + m_freem(rte->m); + FREE(rte, M_MRTABLE); + rte = n; + } + FREE(rt, M_MRTABLE); + rt = nr; + } + } + + bzero((caddr_t)mfctable, sizeof(mfctable)); + + /* + * Reset de-encapsulation cache + */ + last_encap_src = 0; + last_encap_vif = NULL; + have_encap_tunnel = 0; + + ip_mrouter = NULL; + + splx(s); + + if (mrtdebug) + log(LOG_DEBUG, "ip_mrouter_done\n"); + + return 0; +} + +#if !MROUTE_LKM +int (*ip_mrouter_done)(void) = X_ip_mrouter_done; +#endif + +/* + * Set PIM assert processing global + */ +static int +set_assert(i) + int i; +{ + if ((i != 1) && (i != 0)) + return EINVAL; + + pim_assert = i; + + return 0; +} + +/* + * Add a vif to the vif table + */ +static int +add_vif(vifcp) + register struct vifctl *vifcp; +{ + register struct vif *vifp = viftable + vifcp->vifc_vifi; + static struct sockaddr_in sin = {sizeof sin, AF_INET}; + struct ifaddr *ifa; + struct ifnet *ifp; + int error, s; + struct tbf *v_tbf = tbftable + vifcp->vifc_vifi; + + if (vifcp->vifc_vifi >= MAXVIFS) return EINVAL; + if (vifp->v_lcl_addr.s_addr != 0) return EADDRINUSE; + + /* Find the interface with an address in AF_INET family */ + sin.sin_addr = vifcp->vifc_lcl_addr; + ifa = ifa_ifwithaddr((struct sockaddr *)&sin); + if (ifa == 0) return EADDRNOTAVAIL; + ifp = ifa->ifa_ifp; + + if (vifcp->vifc_flags & VIFF_TUNNEL) { + if ((vifcp->vifc_flags & VIFF_SRCRT) == 0) { + /* + * An encapsulating tunnel is wanted. Tell ipip_input() to + * start paying attention to encapsulated packets. + */ + if (have_encap_tunnel == 0) { + have_encap_tunnel = 1; + for (s = 0; s < MAXVIFS; ++s) { + multicast_decap_if[s].if_name = "mdecap"; + multicast_decap_if[s].if_unit = s; + multicast_decap_if[s].if_family = APPLE_IF_FAM_MDECAP; + } + } + /* + * Set interface to fake encapsulator interface + */ + ifp = &multicast_decap_if[vifcp->vifc_vifi]; + /* + * Prepare cached route entry + */ + bzero(&vifp->v_route, sizeof(vifp->v_route)); + } else { + log(LOG_ERR, "source routed tunnels not supported\n"); + return EOPNOTSUPP; + } + } else { + /* Make sure the interface supports multicast */ + if ((ifp->if_flags & IFF_MULTICAST) == 0) + return EOPNOTSUPP; + + /* Enable promiscuous reception of all IP multicasts from the if */ + s = splnet(); + error = if_allmulti(ifp, 1); + splx(s); + if (error) + return error; + } + + s = splnet(); + /* define parameters for the tbf structure */ + vifp->v_tbf = v_tbf; + GET_TIME(vifp->v_tbf->tbf_last_pkt_t); + vifp->v_tbf->tbf_n_tok = 0; + vifp->v_tbf->tbf_q_len = 0; + vifp->v_tbf->tbf_max_q_len = MAXQSIZE; + vifp->v_tbf->tbf_q = vifp->v_tbf->tbf_t = NULL; + + vifp->v_flags = vifcp->vifc_flags; + vifp->v_threshold = vifcp->vifc_threshold; + vifp->v_lcl_addr = vifcp->vifc_lcl_addr; + vifp->v_rmt_addr = vifcp->vifc_rmt_addr; + vifp->v_ifp = ifp; + /* scaling up here allows division by 1024 in critical code */ + vifp->v_rate_limit= vifcp->vifc_rate_limit * 1024 / 1000; + vifp->v_rsvp_on = 0; + vifp->v_rsvpd = NULL; + /* initialize per vif pkt counters */ + vifp->v_pkt_in = 0; + vifp->v_pkt_out = 0; + vifp->v_bytes_in = 0; + vifp->v_bytes_out = 0; + splx(s); + + /* Adjust numvifs up if the vifi is higher than numvifs */ + if (numvifs <= vifcp->vifc_vifi) numvifs = vifcp->vifc_vifi + 1; + + if (mrtdebug) + log(LOG_DEBUG, "add_vif #%d, lcladdr %lx, %s %lx, thresh %x, rate %d\n", + vifcp->vifc_vifi, + (u_long)ntohl(vifcp->vifc_lcl_addr.s_addr), + (vifcp->vifc_flags & VIFF_TUNNEL) ? "rmtaddr" : "mask", + (u_long)ntohl(vifcp->vifc_rmt_addr.s_addr), + vifcp->vifc_threshold, + vifcp->vifc_rate_limit); + + return 0; +} + +/* + * Delete a vif from the vif table + */ +static int +del_vif(vifi) + vifi_t vifi; +{ + register struct vif *vifp = &viftable[vifi]; + register struct mbuf *m; + struct ifnet *ifp; + struct ifreq ifr; + int s; + + if (vifi >= numvifs) return EINVAL; + if (vifp->v_lcl_addr.s_addr == 0) return EADDRNOTAVAIL; + + s = splnet(); + + if (!(vifp->v_flags & VIFF_TUNNEL)) { + ((struct sockaddr_in *)&(ifr.ifr_addr))->sin_family = AF_INET; + ((struct sockaddr_in *)&(ifr.ifr_addr))->sin_addr.s_addr = INADDR_ANY; + ifp = vifp->v_ifp; + if_allmulti(ifp, 0); + } + + if (vifp == last_encap_vif) { + last_encap_vif = 0; + last_encap_src = 0; + } + + /* + * Free packets queued at the interface + */ + while (vifp->v_tbf->tbf_q) { + m = vifp->v_tbf->tbf_q; + vifp->v_tbf->tbf_q = m->m_act; + m_freem(m); + } + + bzero((caddr_t)vifp->v_tbf, sizeof(*(vifp->v_tbf))); + bzero((caddr_t)vifp, sizeof (*vifp)); + + if (mrtdebug) + log(LOG_DEBUG, "del_vif %d, numvifs %d\n", vifi, numvifs); + + /* Adjust numvifs down */ + for (vifi = numvifs; vifi > 0; vifi--) + if (viftable[vifi-1].v_lcl_addr.s_addr != 0) break; + numvifs = vifi; + + splx(s); + + return 0; +} + +/* + * Add an mfc entry + */ +static int +add_mfc(mfccp) + struct mfcctl *mfccp; +{ + struct mfc *rt; + u_long hash; + struct rtdetq *rte; + register u_short nstl; + int s; + int i; + + MFCFIND(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr, rt); + + /* If an entry already exists, just update the fields */ + if (rt) { + if (mrtdebug & DEBUG_MFC) + log(LOG_DEBUG,"add_mfc update o %lx g %lx p %x\n", + (u_long)ntohl(mfccp->mfcc_origin.s_addr), + (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), + mfccp->mfcc_parent); + + s = splnet(); + rt->mfc_parent = mfccp->mfcc_parent; + for (i = 0; i < numvifs; i++) + rt->mfc_ttls[i] = mfccp->mfcc_ttls[i]; + splx(s); + return 0; + } + + /* + * Find the entry for which the upcall was made and update + */ + s = splnet(); + hash = MFCHASH(mfccp->mfcc_origin.s_addr, mfccp->mfcc_mcastgrp.s_addr); + for (rt = mfctable[hash], nstl = 0; rt; rt = rt->mfc_next) { + + if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) && + (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr) && + (rt->mfc_stall != NULL)) { + + if (nstl++) + log(LOG_ERR, "add_mfc %s o %lx g %lx p %x dbx %p\n", + "multiple kernel entries", + (u_long)ntohl(mfccp->mfcc_origin.s_addr), + (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), + mfccp->mfcc_parent, (void *)rt->mfc_stall); + + if (mrtdebug & DEBUG_MFC) + log(LOG_DEBUG,"add_mfc o %lx g %lx p %x dbg %p\n", + (u_long)ntohl(mfccp->mfcc_origin.s_addr), + (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), + mfccp->mfcc_parent, (void *)rt->mfc_stall); + + rt->mfc_origin = mfccp->mfcc_origin; + rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp; + rt->mfc_parent = mfccp->mfcc_parent; + for (i = 0; i < numvifs; i++) + rt->mfc_ttls[i] = mfccp->mfcc_ttls[i]; + /* initialize pkt counters per src-grp */ + rt->mfc_pkt_cnt = 0; + rt->mfc_byte_cnt = 0; + rt->mfc_wrong_if = 0; + rt->mfc_last_assert.tv_sec = rt->mfc_last_assert.tv_usec = 0; + + rt->mfc_expire = 0; /* Don't clean this guy up */ + nexpire[hash]--; + + /* free packets Qed at the end of this entry */ + for (rte = rt->mfc_stall; rte != NULL; ) { + struct rtdetq *n = rte->next; + + ip_mdq(rte->m, rte->ifp, rt, -1); + m_freem(rte->m); +#if UPCALL_TIMING + collate(&(rte->t)); +#endif /* UPCALL_TIMING */ + FREE(rte, M_MRTABLE); + rte = n; + } + rt->mfc_stall = NULL; + } + } + + /* + * It is possible that an entry is being inserted without an upcall + */ + if (nstl == 0) { + if (mrtdebug & DEBUG_MFC) + log(LOG_DEBUG,"add_mfc no upcall h %lu o %lx g %lx p %x\n", + hash, (u_long)ntohl(mfccp->mfcc_origin.s_addr), + (u_long)ntohl(mfccp->mfcc_mcastgrp.s_addr), + mfccp->mfcc_parent); + + for (rt = mfctable[hash]; rt != NULL; rt = rt->mfc_next) { + + if ((rt->mfc_origin.s_addr == mfccp->mfcc_origin.s_addr) && + (rt->mfc_mcastgrp.s_addr == mfccp->mfcc_mcastgrp.s_addr)) { + + rt->mfc_origin = mfccp->mfcc_origin; + rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp; + rt->mfc_parent = mfccp->mfcc_parent; + for (i = 0; i < numvifs; i++) + rt->mfc_ttls[i] = mfccp->mfcc_ttls[i]; + /* initialize pkt counters per src-grp */ + rt->mfc_pkt_cnt = 0; + rt->mfc_byte_cnt = 0; + rt->mfc_wrong_if = 0; + rt->mfc_last_assert.tv_sec = rt->mfc_last_assert.tv_usec = 0; + if (rt->mfc_expire) + nexpire[hash]--; + rt->mfc_expire = 0; + } + } + if (rt == NULL) { + /* no upcall, so make a new entry */ + rt = (struct mfc *) _MALLOC(sizeof(*rt), M_MRTABLE, M_NOWAIT); + if (rt == NULL) { + splx(s); + return ENOBUFS; + } + + /* insert new entry at head of hash chain */ + rt->mfc_origin = mfccp->mfcc_origin; + rt->mfc_mcastgrp = mfccp->mfcc_mcastgrp; + rt->mfc_parent = mfccp->mfcc_parent; + for (i = 0; i < numvifs; i++) + rt->mfc_ttls[i] = mfccp->mfcc_ttls[i]; + /* initialize pkt counters per src-grp */ + rt->mfc_pkt_cnt = 0; + rt->mfc_byte_cnt = 0; + rt->mfc_wrong_if = 0; + rt->mfc_last_assert.tv_sec = rt->mfc_last_assert.tv_usec = 0; + rt->mfc_expire = 0; + rt->mfc_stall = NULL; + + /* link into table */ + rt->mfc_next = mfctable[hash]; + mfctable[hash] = rt; + } + } + splx(s); + return 0; +} + +#if UPCALL_TIMING +/* + * collect delay statistics on the upcalls + */ +static void collate(t) +register struct timeval *t; +{ + register u_long d; + register struct timeval tp; + register u_long delta; + + GET_TIME(tp); + + if (TV_LT(*t, tp)) + { + TV_DELTA(tp, *t, delta); + + d = delta >> 10; + if (d > 50) + d = 50; + + ++upcall_data[d]; + } +} +#endif /* UPCALL_TIMING */ + +/* + * Delete an mfc entry + */ +static int +del_mfc(mfccp) + struct mfcctl *mfccp; +{ + struct in_addr origin; + struct in_addr mcastgrp; + struct mfc *rt; + struct mfc **nptr; + u_long hash; + int s; + + origin = mfccp->mfcc_origin; + mcastgrp = mfccp->mfcc_mcastgrp; + hash = MFCHASH(origin.s_addr, mcastgrp.s_addr); + + if (mrtdebug & DEBUG_MFC) + log(LOG_DEBUG,"del_mfc orig %lx mcastgrp %lx\n", + (u_long)ntohl(origin.s_addr), (u_long)ntohl(mcastgrp.s_addr)); + + s = splnet(); + + nptr = &mfctable[hash]; + while ((rt = *nptr) != NULL) { + if (origin.s_addr == rt->mfc_origin.s_addr && + mcastgrp.s_addr == rt->mfc_mcastgrp.s_addr && + rt->mfc_stall == NULL) + break; + + nptr = &rt->mfc_next; + } + if (rt == NULL) { + splx(s); + return EADDRNOTAVAIL; + } + + *nptr = rt->mfc_next; + FREE(rt, M_MRTABLE); + + splx(s); + + return 0; +} + +/* + * Send a message to mrouted on the multicast routing socket + */ +static int +socket_send(s, mm, src) + struct socket *s; + struct mbuf *mm; + struct sockaddr_in *src; +{ + if (s) { + if (sbappendaddr(&s->so_rcv, + (struct sockaddr *)src, + mm, (struct mbuf *)0) != 0) { + sorwakeup(s); + return 0; + } + } + m_freem(mm); + return -1; +} + +/* + * IP multicast forwarding function. This function assumes that the packet + * pointed to by "ip" has arrived on (or is about to be sent to) the interface + * pointed to by "ifp", and the packet is to be relayed to other networks + * that have members of the packet's destination IP multicast group. + * + * The packet is returned unscathed to the caller, unless it is + * erroneous, in which case a non-zero return value tells the caller to + * discard it. + */ + +#define IP_HDR_LEN 20 /* # bytes of fixed IP header (excluding options) */ +#define TUNNEL_LEN 12 /* # bytes of IP option for tunnel encapsulation */ + +static int +X_ip_mforward(ip, ifp, m, imo) + register struct ip *ip; + struct ifnet *ifp; + struct mbuf *m; + struct ip_moptions *imo; +{ + register struct mfc *rt; + register u_char *ipoptions; + static struct sockaddr_in k_igmpsrc = { sizeof k_igmpsrc, AF_INET }; + static int srctun = 0; + register struct mbuf *mm; + int s; + vifi_t vifi; + struct vif *vifp; + + if (mrtdebug & DEBUG_FORWARD) + log(LOG_DEBUG, "ip_mforward: src %lx, dst %lx, ifp %p\n", + (u_long)ntohl(ip->ip_src.s_addr), (u_long)ntohl(ip->ip_dst.s_addr), + (void *)ifp); + + if (ip->ip_hl < (IP_HDR_LEN + TUNNEL_LEN) >> 2 || + (ipoptions = (u_char *)(ip + 1))[1] != IPOPT_LSRR ) { + /* + * Packet arrived via a physical interface or + * an encapsulated tunnel. + */ + } else { + /* + * Packet arrived through a source-route tunnel. + * Source-route tunnels are no longer supported. + */ + if ((srctun++ % 1000) == 0) + log(LOG_ERR, + "ip_mforward: received source-routed packet from %lx\n", + (u_long)ntohl(ip->ip_src.s_addr)); + + return 1; + } + + if ((imo) && ((vifi = imo->imo_multicast_vif) < numvifs)) { + if (ip->ip_ttl < 255) + ip->ip_ttl++; /* compensate for -1 in *_send routines */ + if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) { + vifp = viftable + vifi; + printf("Sending IPPROTO_RSVP from %lx to %lx on vif %d (%s%s%d)\n", + ntohl(ip->ip_src.s_addr), ntohl(ip->ip_dst.s_addr), vifi, + (vifp->v_flags & VIFF_TUNNEL) ? "tunnel on " : "", + vifp->v_ifp->if_name, vifp->v_ifp->if_unit); + } + return (ip_mdq(m, ifp, NULL, vifi)); + } + if (rsvpdebug && ip->ip_p == IPPROTO_RSVP) { + printf("Warning: IPPROTO_RSVP from %lx to %lx without vif option\n", + ntohl(ip->ip_src.s_addr), ntohl(ip->ip_dst.s_addr)); + if(!imo) + printf("In fact, no options were specified at all\n"); + } + + /* + * Don't forward a packet with time-to-live of zero or one, + * or a packet destined to a local-only group. + */ + if (ip->ip_ttl <= 1 || + ntohl(ip->ip_dst.s_addr) <= INADDR_MAX_LOCAL_GROUP) + return 0; + + /* + * Determine forwarding vifs from the forwarding cache table + */ + s = splnet(); + MFCFIND(ip->ip_src.s_addr, ip->ip_dst.s_addr, rt); + + /* Entry exists, so forward if necessary */ + if (rt != NULL) { + splx(s); + return (ip_mdq(m, ifp, rt, -1)); + } else { + /* + * If we don't have a route for packet's origin, + * Make a copy of the packet & + * send message to routing daemon + */ + + register struct mbuf *mb0; + register struct rtdetq *rte; + register u_long hash; + int hlen = ip->ip_hl << 2; +#if UPCALL_TIMING + struct timeval tp; + + GET_TIME(tp); +#endif + + mrtstat.mrts_no_route++; + if (mrtdebug & (DEBUG_FORWARD | DEBUG_MFC)) + log(LOG_DEBUG, "ip_mforward: no rte s %lx g %lx\n", + (u_long)ntohl(ip->ip_src.s_addr), + (u_long)ntohl(ip->ip_dst.s_addr)); + + /* + * Allocate mbufs early so that we don't do extra work if we are + * just going to fail anyway. Make sure to pullup the header so + * that other people can't step on it. + */ + rte = (struct rtdetq *) _MALLOC((sizeof *rte), M_MRTABLE, M_NOWAIT); + if (rte == NULL) { + splx(s); + return ENOBUFS; + } + mb0 = m_copy(m, 0, M_COPYALL); + if (mb0 && (M_HASCL(mb0) || mb0->m_len < hlen)) + mb0 = m_pullup(mb0, hlen); + if (mb0 == NULL) { + FREE(rte, M_MRTABLE); + splx(s); + return ENOBUFS; + } + + /* is there an upcall waiting for this packet? */ + hash = MFCHASH(ip->ip_src.s_addr, ip->ip_dst.s_addr); + for (rt = mfctable[hash]; rt; rt = rt->mfc_next) { + if ((ip->ip_src.s_addr == rt->mfc_origin.s_addr) && + (ip->ip_dst.s_addr == rt->mfc_mcastgrp.s_addr) && + (rt->mfc_stall != NULL)) + break; + } + + if (rt == NULL) { + int i; + struct igmpmsg *im; + + /* no upcall, so make a new entry */ + rt = (struct mfc *) _MALLOC(sizeof(*rt), M_MRTABLE, M_NOWAIT); + if (rt == NULL) { + FREE(rte, M_MRTABLE); + m_freem(mb0); + splx(s); + return ENOBUFS; + } + /* Make a copy of the header to send to the user level process */ + mm = m_copy(mb0, 0, hlen); + if (mm == NULL) { + FREE(rte, M_MRTABLE); + m_freem(mb0); + FREE(rt, M_MRTABLE); + splx(s); + return ENOBUFS; + } + + /* + * Send message to routing daemon to install + * a route into the kernel table + */ + k_igmpsrc.sin_addr = ip->ip_src; + + im = mtod(mm, struct igmpmsg *); + im->im_msgtype = IGMPMSG_NOCACHE; + im->im_mbz = 0; + + mrtstat.mrts_upcalls++; + + if (socket_send(ip_mrouter, mm, &k_igmpsrc) < 0) { + log(LOG_WARNING, "ip_mforward: ip_mrouter socket queue full\n"); + ++mrtstat.mrts_upq_sockfull; + FREE(rte, M_MRTABLE); + m_freem(mb0); + FREE(rt, M_MRTABLE); + splx(s); + return ENOBUFS; + } + + /* insert new entry at head of hash chain */ + rt->mfc_origin.s_addr = ip->ip_src.s_addr; + rt->mfc_mcastgrp.s_addr = ip->ip_dst.s_addr; + rt->mfc_expire = UPCALL_EXPIRE; + nexpire[hash]++; + for (i = 0; i < numvifs; i++) + rt->mfc_ttls[i] = 0; + rt->mfc_parent = -1; + + /* link into table */ + rt->mfc_next = mfctable[hash]; + mfctable[hash] = rt; + rt->mfc_stall = rte; + + } else { + /* determine if q has overflowed */ + int npkts = 0; + struct rtdetq **p; + + for (p = &rt->mfc_stall; *p != NULL; p = &(*p)->next) + npkts++; + + if (npkts > MAX_UPQ) { + mrtstat.mrts_upq_ovflw++; + FREE(rte, M_MRTABLE); + m_freem(mb0); + splx(s); + return 0; + } + + /* Add this entry to the end of the queue */ + *p = rte; + } + + rte->m = mb0; + rte->ifp = ifp; +#if UPCALL_TIMING + rte->t = tp; +#endif + rte->next = NULL; + + splx(s); + + return 0; + } +} + +#if !MROUTE_LKM +int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *, + struct ip_moptions *) = X_ip_mforward; +#endif + +/* + * Clean up the cache entry if upcall is not serviced + */ +static void +expire_upcalls(void *unused) +{ + struct rtdetq *rte; + struct mfc *mfc, **nptr; + int i; + int s; + boolean_t funnel_state; + + + funnel_state = thread_funnel_set(network_flock, TRUE); + + s = splnet(); + for (i = 0; i < MFCTBLSIZ; i++) { + if (nexpire[i] == 0) + continue; + nptr = &mfctable[i]; + for (mfc = *nptr; mfc != NULL; mfc = *nptr) { + /* + * Skip real cache entries + * Make sure it wasn't marked to not expire (shouldn't happen) + * If it expires now + */ + if (mfc->mfc_stall != NULL && + mfc->mfc_expire != 0 && + --mfc->mfc_expire == 0) { + if (mrtdebug & DEBUG_EXPIRE) + log(LOG_DEBUG, "expire_upcalls: expiring (%lx %lx)\n", + (u_long)ntohl(mfc->mfc_origin.s_addr), + (u_long)ntohl(mfc->mfc_mcastgrp.s_addr)); + /* + * drop all the packets + * free the mbuf with the pkt, if, timing info + */ + for (rte = mfc->mfc_stall; rte; ) { + struct rtdetq *n = rte->next; + + m_freem(rte->m); + FREE(rte, M_MRTABLE); + rte = n; + } + ++mrtstat.mrts_cache_cleanups; + nexpire[i]--; + + *nptr = mfc->mfc_next; + FREE(mfc, M_MRTABLE); + } else { + nptr = &mfc->mfc_next; + } + } + } + splx(s); + timeout(expire_upcalls, (caddr_t)NULL, EXPIRE_TIMEOUT); + (void) thread_funnel_set(network_flock, FALSE); +} + +/* + * Packet forwarding routine once entry in the cache is made + */ +static int +ip_mdq(m, ifp, rt, xmt_vif) + register struct mbuf *m; + register struct ifnet *ifp; + register struct mfc *rt; + register vifi_t xmt_vif; +{ + register struct ip *ip = mtod(m, struct ip *); + register vifi_t vifi; + register struct vif *vifp; + register int plen = ip->ip_len; + +/* + * Macro to send packet on vif. Since RSVP packets don't get counted on + * input, they shouldn't get counted on output, so statistics keeping is + * seperate. + */ +#define MC_SEND(ip,vifp,m) { \ + if ((vifp)->v_flags & VIFF_TUNNEL) \ + encap_send((ip), (vifp), (m)); \ + else \ + phyint_send((ip), (vifp), (m)); \ +} + + /* + * If xmt_vif is not -1, send on only the requested vif. + * + * (since vifi_t is u_short, -1 becomes MAXUSHORT, which > numvifs.) + */ + if (xmt_vif < numvifs) { + MC_SEND(ip, viftable + xmt_vif, m); + return 1; + } + + /* + * Don't forward if it didn't arrive from the parent vif for its origin. + */ + vifi = rt->mfc_parent; + if ((vifi >= numvifs) || (viftable[vifi].v_ifp != ifp)) { + /* came in the wrong interface */ + if (mrtdebug & DEBUG_FORWARD) + log(LOG_DEBUG, "wrong if: ifp %p vifi %d vififp %p\n", + (void *)ifp, vifi, (void *)viftable[vifi].v_ifp); + ++mrtstat.mrts_wrong_if; + ++rt->mfc_wrong_if; + /* + * If we are doing PIM assert processing, and we are forwarding + * packets on this interface, and it is a broadcast medium + * interface (and not a tunnel), send a message to the routing daemon. + */ + if (pim_assert && rt->mfc_ttls[vifi] && + (ifp->if_flags & IFF_BROADCAST) && + !(viftable[vifi].v_flags & VIFF_TUNNEL)) { + struct sockaddr_in k_igmpsrc; + struct mbuf *mm; + struct igmpmsg *im; + int hlen = ip->ip_hl << 2; + struct timeval now; + register u_long delta; + + GET_TIME(now); + + TV_DELTA(rt->mfc_last_assert, now, delta); + + if (delta > ASSERT_MSG_TIME) { + mm = m_copy(m, 0, hlen); + if (mm && (M_HASCL(mm) || mm->m_len < hlen)) + mm = m_pullup(mm, hlen); + if (mm == NULL) { + return ENOBUFS; + } + + rt->mfc_last_assert = now; + + im = mtod(mm, struct igmpmsg *); + im->im_msgtype = IGMPMSG_WRONGVIF; + im->im_mbz = 0; + im->im_vif = vifi; + + k_igmpsrc.sin_addr = im->im_src; + + socket_send(ip_mrouter, mm, &k_igmpsrc); + } + } + return 0; + } + + /* If I sourced this packet, it counts as output, else it was input. */ + if (ip->ip_src.s_addr == viftable[vifi].v_lcl_addr.s_addr) { + viftable[vifi].v_pkt_out++; + viftable[vifi].v_bytes_out += plen; + } else { + viftable[vifi].v_pkt_in++; + viftable[vifi].v_bytes_in += plen; + } + rt->mfc_pkt_cnt++; + rt->mfc_byte_cnt += plen; + + /* + * For each vif, decide if a copy of the packet should be forwarded. + * Forward if: + * - the ttl exceeds the vif's threshold + * - there are group members downstream on interface + */ + for (vifp = viftable, vifi = 0; vifi < numvifs; vifp++, vifi++) + if ((rt->mfc_ttls[vifi] > 0) && + (ip->ip_ttl > rt->mfc_ttls[vifi])) { + vifp->v_pkt_out++; + vifp->v_bytes_out += plen; + MC_SEND(ip, vifp, m); + } + + return 0; +} + +/* + * check if a vif number is legal/ok. This is used by ip_output, to export + * numvifs there, + */ +static int +X_legal_vif_num(vif) + int vif; +{ + if (vif >= 0 && vif < numvifs) + return(1); + else + return(0); +} + +#if !MROUTE_LKM +int (*legal_vif_num)(int) = X_legal_vif_num; +#endif + +/* + * Return the local address used by this vif + */ +static u_long +X_ip_mcast_src(vifi) + int vifi; +{ + if (vifi >= 0 && vifi < numvifs) + return viftable[vifi].v_lcl_addr.s_addr; + else + return INADDR_ANY; +} + +#if !MROUTE_LKM +u_long (*ip_mcast_src)(int) = X_ip_mcast_src; +#endif + +static void +phyint_send(ip, vifp, m) + struct ip *ip; + struct vif *vifp; + struct mbuf *m; +{ + register struct mbuf *mb_copy; + register int hlen = ip->ip_hl << 2; + + /* + * Make a new reference to the packet; make sure that + * the IP header is actually copied, not just referenced, + * so that ip_output() only scribbles on the copy. + */ + mb_copy = m_copy(m, 0, M_COPYALL); + if (mb_copy && (M_HASCL(mb_copy) || mb_copy->m_len < hlen)) + mb_copy = m_pullup(mb_copy, hlen); + if (mb_copy == NULL) + return; + + if (vifp->v_rate_limit == 0) + tbf_send_packet(vifp, mb_copy); + else + tbf_control(vifp, mb_copy, mtod(mb_copy, struct ip *), ip->ip_len); +} + +static void +encap_send(ip, vifp, m) + register struct ip *ip; + register struct vif *vifp; + register struct mbuf *m; +{ + register struct mbuf *mb_copy; + register struct ip *ip_copy; + register int i, len = ip->ip_len; + + /* + * copy the old packet & pullup its IP header into the + * new mbuf so we can modify it. Try to fill the new + * mbuf since if we don't the ethernet driver will. + */ + MGETHDR(mb_copy, M_DONTWAIT, MT_HEADER); + if (mb_copy == NULL) + return; + mb_copy->m_data += max_linkhdr; + mb_copy->m_len = sizeof(multicast_encap_iphdr); + + if ((mb_copy->m_next = m_copy(m, 0, M_COPYALL)) == NULL) { + m_freem(mb_copy); + return; + } + i = MHLEN - M_LEADINGSPACE(mb_copy); + if (i > len) + i = len; + mb_copy = m_pullup(mb_copy, i); + if (mb_copy == NULL) + return; + mb_copy->m_pkthdr.len = len + sizeof(multicast_encap_iphdr); + + /* + * fill in the encapsulating IP header. + */ + ip_copy = mtod(mb_copy, struct ip *); + *ip_copy = multicast_encap_iphdr; + ip_copy->ip_id = htons(ip_id++); + ip_copy->ip_len += len; + ip_copy->ip_src = vifp->v_lcl_addr; + ip_copy->ip_dst = vifp->v_rmt_addr; + + /* + * turn the encapsulated IP header back into a valid one. + */ + ip = (struct ip *)((caddr_t)ip_copy + sizeof(multicast_encap_iphdr)); + --ip->ip_ttl; + HTONS(ip->ip_len); + HTONS(ip->ip_off); + ip->ip_sum = 0; + mb_copy->m_data += sizeof(multicast_encap_iphdr); + ip->ip_sum = in_cksum(mb_copy, ip->ip_hl << 2); + mb_copy->m_data -= sizeof(multicast_encap_iphdr); + + if (vifp->v_rate_limit == 0) + tbf_send_packet(vifp, mb_copy); + else + tbf_control(vifp, mb_copy, ip, ip_copy->ip_len); +} + +/* + * De-encapsulate a packet and feed it back through ip input (this + * routine is called whenever IP gets a packet with proto type + * ENCAP_PROTO and a local destination address). + */ +void +#if MROUTE_LKM +X_ipip_input(m, iphlen) +#else +ipip_input(m, iphlen) +#endif + register struct mbuf *m; + int iphlen; +{ + struct ifnet *ifp = m->m_pkthdr.rcvif; + register struct ip *ip = mtod(m, struct ip *); + register int hlen = ip->ip_hl << 2; + register int s; + register struct ifqueue *ifq; + register struct vif *vifp; + + if (!have_encap_tunnel) { + rip_input(m, iphlen); + return; + } + /* + * dump the packet if it's not to a multicast destination or if + * we don't have an encapsulating tunnel with the source. + * Note: This code assumes that the remote site IP address + * uniquely identifies the tunnel (i.e., that this site has + * at most one tunnel with the remote site). + */ + if (! IN_MULTICAST(ntohl(((struct ip *)((char *)ip + hlen))->ip_dst.s_addr))) { + ++mrtstat.mrts_bad_tunnel; + m_freem(m); + return; + } + if (ip->ip_src.s_addr != last_encap_src) { + register struct vif *vife; + + vifp = viftable; + vife = vifp + numvifs; + last_encap_src = ip->ip_src.s_addr; + last_encap_vif = 0; + for ( ; vifp < vife; ++vifp) + if (vifp->v_rmt_addr.s_addr == ip->ip_src.s_addr) { + if ((vifp->v_flags & (VIFF_TUNNEL|VIFF_SRCRT)) + == VIFF_TUNNEL) + last_encap_vif = vifp; + break; + } + } + if ((vifp = last_encap_vif) == 0) { + last_encap_src = 0; + mrtstat.mrts_cant_tunnel++; /*XXX*/ + m_freem(m); + if (mrtdebug) + log(LOG_DEBUG, "ip_mforward: no tunnel with %lx\n", + (u_long)ntohl(ip->ip_src.s_addr)); + return; + } + ifp = vifp->v_ifp; + + if (hlen > IP_HDR_LEN) + ip_stripoptions(m, (struct mbuf *) 0); + m->m_data += IP_HDR_LEN; + m->m_len -= IP_HDR_LEN; + m->m_pkthdr.len -= IP_HDR_LEN; + m->m_pkthdr.rcvif = ifp; + + ifq = &ipintrq; + s = splimp(); + if (IF_QFULL(ifq)) { + IF_DROP(ifq); + m_freem(m); + } else { + IF_ENQUEUE(ifq, m); + /* + * normally we would need a "schednetisr(NETISR_IP)" + * here but we were called by ip_input and it is going + * to loop back & try to dequeue the packet we just + * queued as soon as we return so we avoid the + * unnecessary software interrrupt. + */ + } + splx(s); +} + +/* + * Token bucket filter module + */ + +static void +tbf_control(vifp, m, ip, p_len) + register struct vif *vifp; + register struct mbuf *m; + register struct ip *ip; + register u_long p_len; +{ + register struct tbf *t = vifp->v_tbf; + + if (p_len > MAX_BKT_SIZE) { + /* drop if packet is too large */ + mrtstat.mrts_pkt2large++; + m_freem(m); + return; + } + + tbf_update_tokens(vifp); + + /* if there are enough tokens, + * and the queue is empty, + * send this packet out + */ + + if (t->tbf_q_len == 0) { + /* queue empty, send packet if enough tokens */ + if (p_len <= t->tbf_n_tok) { + t->tbf_n_tok -= p_len; + tbf_send_packet(vifp, m); + } else { + /* queue packet and timeout till later */ + tbf_queue(vifp, m); + timeout(tbf_reprocess_q, (caddr_t)vifp, TBF_REPROCESS); + } + } else if (t->tbf_q_len < t->tbf_max_q_len) { + /* finite queue length, so queue pkts and process queue */ + tbf_queue(vifp, m); + tbf_process_q(vifp); + } else { + /* queue length too much, try to dq and queue and process */ + if (!tbf_dq_sel(vifp, ip)) { + mrtstat.mrts_q_overflow++; + m_freem(m); + return; + } else { + tbf_queue(vifp, m); + tbf_process_q(vifp); + } + } + return; +} + +/* + * adds a packet to the queue at the interface + */ +static void +tbf_queue(vifp, m) + register struct vif *vifp; + register struct mbuf *m; +{ + register int s = splnet(); + register struct tbf *t = vifp->v_tbf; + + if (t->tbf_t == NULL) { + /* Queue was empty */ + t->tbf_q = m; + } else { + /* Insert at tail */ + t->tbf_t->m_act = m; + } + + /* Set new tail pointer */ + t->tbf_t = m; + +#if DIAGNOSTIC + /* Make sure we didn't get fed a bogus mbuf */ + if (m->m_act) + panic("tbf_queue: m_act"); +#endif + m->m_act = NULL; + + t->tbf_q_len++; + + splx(s); +} + + +/* + * processes the queue at the interface + */ +static void +tbf_process_q(vifp) + register struct vif *vifp; +{ + register struct mbuf *m; + register int len; + register int s = splnet(); + register struct tbf *t = vifp->v_tbf; + + /* loop through the queue at the interface and send as many packets + * as possible + */ + while (t->tbf_q_len > 0) { + m = t->tbf_q; + + len = mtod(m, struct ip *)->ip_len; + + /* determine if the packet can be sent */ + if (len <= t->tbf_n_tok) { + /* if so, + * reduce no of tokens, dequeue the packet, + * send the packet. + */ + t->tbf_n_tok -= len; + + t->tbf_q = m->m_act; + if (--t->tbf_q_len == 0) + t->tbf_t = NULL; + + m->m_act = NULL; + tbf_send_packet(vifp, m); + + } else break; + } + splx(s); +} + +static void +tbf_reprocess_q(xvifp) + void *xvifp; +{ + register struct vif *vifp = xvifp; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(network_flock, TRUE); + if (ip_mrouter == NULL) { + (void) thread_funnel_set(network_flock, FALSE); + return; + } + + tbf_update_tokens(vifp); + + tbf_process_q(vifp); + + if (vifp->v_tbf->tbf_q_len) + timeout(tbf_reprocess_q, (caddr_t)vifp, TBF_REPROCESS); + (void) thread_funnel_set(network_flock, FALSE); +} + +/* function that will selectively discard a member of the queue + * based on the precedence value and the priority + */ +static int +tbf_dq_sel(vifp, ip) + register struct vif *vifp; + register struct ip *ip; +{ + register int s = splnet(); + register u_int p; + register struct mbuf *m, *last; + register struct mbuf **np; + register struct tbf *t = vifp->v_tbf; + + p = priority(vifp, ip); + + np = &t->tbf_q; + last = NULL; + while ((m = *np) != NULL) { + if (p > priority(vifp, mtod(m, struct ip *))) { + *np = m->m_act; + /* If we're removing the last packet, fix the tail pointer */ + if (m == t->tbf_t) + t->tbf_t = last; + m_freem(m); + /* it's impossible for the queue to be empty, but + * we check anyway. */ + if (--t->tbf_q_len == 0) + t->tbf_t = NULL; + splx(s); + mrtstat.mrts_drop_sel++; + return(1); + } + np = &m->m_act; + last = m; + } + splx(s); + return(0); +} + +static void +tbf_send_packet(vifp, m) + register struct vif *vifp; + register struct mbuf *m; +{ + struct ip_moptions imo; + int error; + static struct route ro; + int s = splnet(); + + if (vifp->v_flags & VIFF_TUNNEL) { + /* If tunnel options */ + ip_output(m, (struct mbuf *)0, &vifp->v_route, + IP_FORWARDING, (struct ip_moptions *)0); + } else { + imo.imo_multicast_ifp = vifp->v_ifp; + imo.imo_multicast_ttl = mtod(m, struct ip *)->ip_ttl - 1; + imo.imo_multicast_loop = 1; + imo.imo_multicast_vif = -1; + + /* + * Re-entrancy should not be a problem here, because + * the packets that we send out and are looped back at us + * should get rejected because they appear to come from + * the loopback interface, thus preventing looping. + */ + error = ip_output(m, (struct mbuf *)0, &ro, + IP_FORWARDING, &imo); + + if (mrtdebug & DEBUG_XMIT) + log(LOG_DEBUG, "phyint_send on vif %d err %d\n", + vifp - viftable, error); + } + splx(s); +} + +/* determine the current time and then + * the elapsed time (between the last time and time now) + * in milliseconds & update the no. of tokens in the bucket + */ +static void +tbf_update_tokens(vifp) + register struct vif *vifp; +{ + struct timeval tp; + register u_long tm; + register int s = splnet(); + register struct tbf *t = vifp->v_tbf; + + GET_TIME(tp); + + TV_DELTA(tp, t->tbf_last_pkt_t, tm); + + /* + * This formula is actually + * "time in seconds" * "bytes/second". + * + * (tm / 1000000) * (v_rate_limit * 1000 * (1000/1024) / 8) + * + * The (1000/1024) was introduced in add_vif to optimize + * this divide into a shift. + */ + t->tbf_n_tok += tm * vifp->v_rate_limit / 1024 / 8; + t->tbf_last_pkt_t = tp; + + if (t->tbf_n_tok > MAX_BKT_SIZE) + t->tbf_n_tok = MAX_BKT_SIZE; + + splx(s); +} + +static int +priority(vifp, ip) + register struct vif *vifp; + register struct ip *ip; +{ + register int prio; + + /* temporary hack; may add general packet classifier some day */ + + /* + * The UDP port space is divided up into four priority ranges: + * [0, 16384) : unclassified - lowest priority + * [16384, 32768) : audio - highest priority + * [32768, 49152) : whiteboard - medium priority + * [49152, 65536) : video - low priority + */ + if (ip->ip_p == IPPROTO_UDP) { + struct udphdr *udp = (struct udphdr *)(((char *)ip) + (ip->ip_hl << 2)); + switch (ntohs(udp->uh_dport) & 0xc000) { + case 0x4000: + prio = 70; + break; + case 0x8000: + prio = 60; + break; + case 0xc000: + prio = 55; + break; + default: + prio = 50; + break; + } + if (tbfdebug > 1) + log(LOG_DEBUG, "port %x prio%d\n", ntohs(udp->uh_dport), prio); + } else { + prio = 50; + } + return prio; +} + +/* + * End of token bucket filter modifications + */ + +int +ip_rsvp_vif_init(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error, i, s; + + if (rsvpdebug) + printf("ip_rsvp_vif_init: so_type = %d, pr_protocol = %d\n", + so->so_type, so->so_proto->pr_protocol); + + if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP) + return EOPNOTSUPP; + + /* Check mbuf. */ + error = sooptcopyin(sopt, &i, sizeof i, sizeof i); + if (error) + return (error); + + if (rsvpdebug) + printf("ip_rsvp_vif_init: vif = %d rsvp_on = %d\n", i, rsvp_on); + + s = splnet(); + + /* Check vif. */ + if (!legal_vif_num(i)) { + splx(s); + return EADDRNOTAVAIL; + } + + /* Check if socket is available. */ + if (viftable[i].v_rsvpd != NULL) { + splx(s); + return EADDRINUSE; + } + + viftable[i].v_rsvpd = so; + /* This may seem silly, but we need to be sure we don't over-increment + * the RSVP counter, in case something slips up. + */ + if (!viftable[i].v_rsvp_on) { + viftable[i].v_rsvp_on = 1; + rsvp_on++; + } + + splx(s); + return 0; +} + +int +ip_rsvp_vif_done(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error, i, s; + + if (rsvpdebug) + printf("ip_rsvp_vif_done: so_type = %d, pr_protocol = %d\n", + so->so_type, so->so_proto->pr_protocol); + + if (so->so_type != SOCK_RAW || + so->so_proto->pr_protocol != IPPROTO_RSVP) + return EOPNOTSUPP; + + error = sooptcopyin(sopt, &i, sizeof i, sizeof i); + if (error) + return (error); + + s = splnet(); + + /* Check vif. */ + if (!legal_vif_num(i)) { + splx(s); + return EADDRNOTAVAIL; + } + + if (rsvpdebug) + printf("ip_rsvp_vif_done: v_rsvpd = %p so = %p\n", + viftable[i].v_rsvpd, so); + + viftable[i].v_rsvpd = NULL; + /* + * This may seem silly, but we need to be sure we don't over-decrement + * the RSVP counter, in case something slips up. + */ + if (viftable[i].v_rsvp_on) { + viftable[i].v_rsvp_on = 0; + rsvp_on--; + } + + splx(s); + return 0; +} + +void +ip_rsvp_force_done(so) + struct socket *so; +{ + int vifi; + register int s; + + /* Don't bother if it is not the right type of socket. */ + if (so->so_type != SOCK_RAW || so->so_proto->pr_protocol != IPPROTO_RSVP) + return; + + s = splnet(); + + /* The socket may be attached to more than one vif...this + * is perfectly legal. + */ + for (vifi = 0; vifi < numvifs; vifi++) { + if (viftable[vifi].v_rsvpd == so) { + viftable[vifi].v_rsvpd = NULL; + /* This may seem silly, but we need to be sure we don't + * over-decrement the RSVP counter, in case something slips up. + */ + if (viftable[vifi].v_rsvp_on) { + viftable[vifi].v_rsvp_on = 0; + rsvp_on--; + } + } + } + + splx(s); + return; +} + +void +rsvp_input(m, iphlen) + struct mbuf *m; + int iphlen; +{ + int vifi; + register struct ip *ip = mtod(m, struct ip *); + static struct sockaddr_in rsvp_src = { sizeof rsvp_src, AF_INET }; + register int s; + struct ifnet *ifp; + + if (rsvpdebug) + printf("rsvp_input: rsvp_on %d\n",rsvp_on); + + /* Can still get packets with rsvp_on = 0 if there is a local member + * of the group to which the RSVP packet is addressed. But in this + * case we want to throw the packet away. + */ + if (!rsvp_on) { + m_freem(m); + return; + } + + /* If the old-style non-vif-associated socket is set, then use + * it and ignore the new ones. + */ + if (ip_rsvpd != NULL) { + if (rsvpdebug) + printf("rsvp_input: Sending packet up old-style socket\n"); + rip_input(m, iphlen); + return; + } + + s = splnet(); + + if (rsvpdebug) + printf("rsvp_input: check vifs\n"); + +#if DIAGNOSTIC + if (!(m->m_flags & M_PKTHDR)) + panic("rsvp_input no hdr"); +#endif + + ifp = m->m_pkthdr.rcvif; + /* Find which vif the packet arrived on. */ + for (vifi = 0; vifi < numvifs; vifi++) { + if (viftable[vifi].v_ifp == ifp) + break; + } + + if (vifi == numvifs) { + /* Can't find vif packet arrived on. Drop packet. */ + if (rsvpdebug) + printf("rsvp_input: Can't find vif for packet...dropping it.\n"); + m_freem(m); + splx(s); + return; + } + + if (rsvpdebug) + printf("rsvp_input: check socket\n"); + + if (viftable[vifi].v_rsvpd == NULL) { + /* drop packet, since there is no specific socket for this + * interface */ + if (rsvpdebug) + printf("rsvp_input: No socket defined for vif %d\n",vifi); + m_freem(m); + splx(s); + return; + } + rsvp_src.sin_addr = ip->ip_src; + + if (rsvpdebug && m) + printf("rsvp_input: m->m_len = %d, sbspace() = %ld\n", + m->m_len,sbspace(&(viftable[vifi].v_rsvpd->so_rcv))); + + if (socket_send(viftable[vifi].v_rsvpd, m, &rsvp_src) < 0) + if (rsvpdebug) + printf("rsvp_input: Failed to append to socket\n"); + else + if (rsvpdebug) + printf("rsvp_input: send packet up\n"); + + splx(s); +} + +#if MROUTE_LKM +#include +#include +#include +#include + +MOD_MISC("ip_mroute_mod") + +static int +ip_mroute_mod_handle(struct lkm_table *lkmtp, int cmd) +{ + int i; + struct lkm_misc *args = lkmtp->private.lkm_misc; + int err = 0; + + switch(cmd) { + static int (*old_ip_mrouter_cmd)(); + static int (*old_ip_mrouter_done)(); + static int (*old_ip_mforward)(); + static int (*old_mrt_ioctl)(); + static void (*old_proto4_input)(); + static int (*old_legal_vif_num)(); + extern struct protosw inetsw[]; + + case LKM_E_LOAD: + if(lkmexists(lkmtp) || ip_mrtproto) + return(EEXIST); + old_ip_mrouter_cmd = ip_mrouter_cmd; + ip_mrouter_cmd = X_ip_mrouter_cmd; + old_ip_mrouter_done = ip_mrouter_done; + ip_mrouter_done = X_ip_mrouter_done; + old_ip_mforward = ip_mforward; + ip_mforward = X_ip_mforward; + old_mrt_ioctl = mrt_ioctl; + mrt_ioctl = X_mrt_ioctl; + old_proto4_input = ip_protox[ENCAP_PROTO]->pr_input; + ip_protox[ENCAP_PROTO]->pr_input = X_ipip_input; + old_legal_vif_num = legal_vif_num; + legal_vif_num = X_legal_vif_num; + ip_mrtproto = IGMP_DVMRP; + + printf("\nIP multicast routing loaded\n"); + break; + + case LKM_E_UNLOAD: + if (ip_mrouter) + return EINVAL; + + ip_mrouter_cmd = old_ip_mrouter_cmd; + ip_mrouter_done = old_ip_mrouter_done; + ip_mforward = old_ip_mforward; + mrt_ioctl = old_mrt_ioctl; + ip_protox[ENCAP_PROTO]->pr_input = old_proto4_input; + legal_vif_num = old_legal_vif_num; + ip_mrtproto = 0; + break; + + default: + err = EINVAL; + break; + } + + return(err); +} + +int +ip_mroute_mod(struct lkm_table *lkmtp, int cmd, int ver) { + DISPATCH(lkmtp, cmd, ver, ip_mroute_mod_handle, ip_mroute_mod_handle, + nosys); +} + +#endif /* MROUTE_LKM */ +#endif /* MROUTING */ diff --git a/bsd/netinet/ip_mroute.h b/bsd/netinet/ip_mroute.h new file mode 100644 index 000000000..e87167b6f --- /dev/null +++ b/bsd/netinet/ip_mroute.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989 Stephen Deering. + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Stephen Deering of Stanford University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_mroute.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_IP_MROUTE_H_ +#define _NETINET_IP_MROUTE_H_ + +/* + * Definitions for IP multicast forwarding. + * + * Written by David Waitzman, BBN Labs, August 1988. + * Modified by Steve Deering, Stanford, February 1989. + * Modified by Ajit Thyagarajan, PARC, August 1993. + * Modified by Ajit Thyagarajan, PARC, August 1994. + * + * MROUTING Revision: 3.3.1.3 + */ + + +/* + * Multicast Routing set/getsockopt commands. + */ +#define MRT_INIT 100 /* initialize forwarder */ +#define MRT_DONE 101 /* shut down forwarder */ +#define MRT_ADD_VIF 102 /* create virtual interface */ +#define MRT_DEL_VIF 103 /* delete virtual interface */ +#define MRT_ADD_MFC 104 /* insert forwarding cache entry */ +#define MRT_DEL_MFC 105 /* delete forwarding cache entry */ +#define MRT_VERSION 106 /* get kernel version number */ +#define MRT_ASSERT 107 /* enable PIM assert processing */ + + +#define GET_TIME(t) microtime(&t) + +/* + * Types and macros for handling bitmaps with one bit per virtual interface. + */ +#define MAXVIFS 32 +typedef u_long vifbitmap_t; +typedef u_short vifi_t; /* type of a vif index */ +#define ALL_VIFS (vifi_t)-1 + +#define VIFM_SET(n, m) ((m) |= (1 << (n))) +#define VIFM_CLR(n, m) ((m) &= ~(1 << (n))) +#define VIFM_ISSET(n, m) ((m) & (1 << (n))) +#define VIFM_CLRALL(m) ((m) = 0x00000000) +#define VIFM_COPY(mfrom, mto) ((mto) = (mfrom)) +#define VIFM_SAME(m1, m2) ((m1) == (m2)) + + +/* + * Argument structure for MRT_ADD_VIF. + * (MRT_DEL_VIF takes a single vifi_t argument.) + */ +struct vifctl { + vifi_t vifc_vifi; /* the index of the vif to be added */ + u_char vifc_flags; /* VIFF_ flags defined below */ + u_char vifc_threshold; /* min ttl required to forward on vif */ + u_int vifc_rate_limit; /* max rate */ + struct in_addr vifc_lcl_addr; /* local interface address */ + struct in_addr vifc_rmt_addr; /* remote address (tunnels only) */ +}; + +#define VIFF_TUNNEL 0x1 /* vif represents a tunnel end-point */ +#define VIFF_SRCRT 0x2 /* tunnel uses IP source routing */ + +/* + * Argument structure for MRT_ADD_MFC and MRT_DEL_MFC + * (mfcc_tos to be added at a future point) + */ +struct mfcctl { + struct in_addr mfcc_origin; /* ip origin of mcasts */ + struct in_addr mfcc_mcastgrp; /* multicast group associated*/ + vifi_t mfcc_parent; /* incoming vif */ + u_char mfcc_ttls[MAXVIFS]; /* forwarding ttls on vifs */ +}; + +/* + * The kernel's multicast routing statistics. + */ +struct mrtstat { + u_long mrts_mfc_lookups; /* # forw. cache hash table hits */ + u_long mrts_mfc_misses; /* # forw. cache hash table misses */ + u_long mrts_upcalls; /* # calls to mrouted */ + u_long mrts_no_route; /* no route for packet's origin */ + u_long mrts_bad_tunnel; /* malformed tunnel options */ + u_long mrts_cant_tunnel; /* no room for tunnel options */ + u_long mrts_wrong_if; /* arrived on wrong interface */ + u_long mrts_upq_ovflw; /* upcall Q overflow */ + u_long mrts_cache_cleanups; /* # entries with no upcalls */ + u_long mrts_drop_sel; /* pkts dropped selectively */ + u_long mrts_q_overflow; /* pkts dropped - Q overflow */ + u_long mrts_pkt2large; /* pkts dropped - size > BKT SIZE */ + u_long mrts_upq_sockfull; /* upcalls dropped - socket full */ +}; + +/* + * Argument structure used by mrouted to get src-grp pkt counts + */ +struct sioc_sg_req { + struct in_addr src; + struct in_addr grp; + u_long pktcnt; + u_long bytecnt; + u_long wrong_if; +}; + +/* + * Argument structure used by mrouted to get vif pkt counts + */ +struct sioc_vif_req { + vifi_t vifi; /* vif number */ + u_long icount; /* Input packet count on vif */ + u_long ocount; /* Output packet count on vif */ + u_long ibytes; /* Input byte count on vif */ + u_long obytes; /* Output byte count on vif */ +}; + + +/* + * The kernel's virtual-interface structure. + */ +struct vif { + u_char v_flags; /* VIFF_ flags defined above */ + u_char v_threshold; /* min ttl required to forward on vif*/ + u_int v_rate_limit; /* max rate */ + struct tbf *v_tbf; /* token bucket structure at intf. */ + struct in_addr v_lcl_addr; /* local interface address */ + struct in_addr v_rmt_addr; /* remote address (tunnels only) */ + struct ifnet *v_ifp; /* pointer to interface */ + u_long v_pkt_in; /* # pkts in on interface */ + u_long v_pkt_out; /* # pkts out on interface */ + u_long v_bytes_in; /* # bytes in on interface */ + u_long v_bytes_out; /* # bytes out on interface */ + struct route v_route; /* cached route if this is a tunnel */ + u_int v_rsvp_on; /* RSVP listening on this vif */ + struct socket *v_rsvpd; /* RSVP daemon socket */ +}; + +/* + * The kernel's multicast forwarding cache entry structure + * (A field for the type of service (mfc_tos) is to be added + * at a future point) + */ +struct mfc { + struct in_addr mfc_origin; /* IP origin of mcasts */ + struct in_addr mfc_mcastgrp; /* multicast group associated*/ + vifi_t mfc_parent; /* incoming vif */ + u_char mfc_ttls[MAXVIFS]; /* forwarding ttls on vifs */ + u_long mfc_pkt_cnt; /* pkt count for src-grp */ + u_long mfc_byte_cnt; /* byte count for src-grp */ + u_long mfc_wrong_if; /* wrong if for src-grp */ + int mfc_expire; /* time to clean entry up */ + struct timeval mfc_last_assert; /* last time I sent an assert*/ + struct rtdetq *mfc_stall; /* q of packets awaiting mfc */ + struct mfc *mfc_next; /* next mfc entry */ +}; + +/* + * Struct used to communicate from kernel to multicast router + * note the convenient similarity to an IP packet + */ +struct igmpmsg { + u_long unused1; + u_long unused2; + u_char im_msgtype; /* what type of message */ +#define IGMPMSG_NOCACHE 1 +#define IGMPMSG_WRONGVIF 2 + u_char im_mbz; /* must be zero */ + u_char im_vif; /* vif rec'd on */ + u_char unused3; + struct in_addr im_src, im_dst; +}; + +/* + * Argument structure used for pkt info. while upcall is made + */ +struct rtdetq { + struct mbuf *m; /* A copy of the packet */ + struct ifnet *ifp; /* Interface pkt came in on */ + vifi_t xmt_vif; /* Saved copy of imo_multicast_vif */ +#if UPCALL_TIMING + struct timeval t; /* Timestamp */ +#endif /* UPCALL_TIMING */ + struct rtdetq *next; /* Next in list of packets */ +}; + +#define MFCTBLSIZ 256 +#if (MFCTBLSIZ & (MFCTBLSIZ - 1)) == 0 /* from sys:route.h */ +#define MFCHASHMOD(h) ((h) & (MFCTBLSIZ - 1)) +#else +#define MFCHASHMOD(h) ((h) % MFCTBLSIZ) +#endif + +#define MAX_UPQ 4 /* max. no of pkts in upcall Q */ + +/* + * Token Bucket filter code + */ +#define MAX_BKT_SIZE 10000 /* 10K bytes size */ +#define MAXQSIZE 10 /* max # of pkts in queue */ + +/* + * the token bucket filter at each vif + */ +struct tbf +{ + struct timeval tbf_last_pkt_t; /* arr. time of last pkt */ + u_long tbf_n_tok; /* no of tokens in bucket */ + u_long tbf_q_len; /* length of queue at this vif */ + u_long tbf_max_q_len; /* max. queue length */ + struct mbuf *tbf_q; /* Packet queue */ + struct mbuf *tbf_t; /* tail-insertion pointer */ +}; + +#ifdef KERNEL + +struct sockopt; + +extern int (*ip_mrouter_set) __P((struct socket *, struct sockopt *)); +extern int (*ip_mrouter_get) __P((struct socket *, struct sockopt *)); +extern int (*ip_mrouter_done) __P((void)); + +#if MROUTING +extern int (*mrt_ioctl) __P((int, caddr_t)); +#else +extern int (*mrt_ioctl) __P((int, caddr_t, struct proc *)); +#endif + +#endif /* KERNEL */ + +#endif /* _NETINET_IP_MROUTE_H_ */ diff --git a/bsd/netinet/ip_nat.c b/bsd/netinet/ip_nat.c new file mode 100644 index 000000000..d8a0f6219 --- /dev/null +++ b/bsd/netinet/ip_nat.c @@ -0,0 +1,1393 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1995-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + * Added redirect stuff and a LOT of bug fixes. (mcn@EnGarde.com) + */ +#if !defined(lint) +/* static const char sccsid[] = "@(#)ip_nat.c 1.11 6/5/96 (C) 1995 Darren Reed"; */ +#endif + +#include "opt_ipfilter.h" +#define __FreeBSD_version 300000 /* it's a hack, but close enough */ + +#if defined(__FreeBSD__) && defined(KERNEL) && !defined(_KERNEL) +#define _KERNEL +#endif + +#if !defined(_KERNEL) && !defined(KERNEL) +# include +# include +# include +#endif +#include +#include +#include +#include +#include +#if defined(KERNEL) && (__FreeBSD_version >= 220000) +# include +# include +#else +# include +#endif +#include +#include +#ifndef linux +# include +#endif +#include +#if defined(_KERNEL) && !defined(linux) +# include +#endif +#if !defined(__SVR4) && !defined(__svr4__) +# ifndef linux +# include +# endif +#else +# include +# include +# include +# include +# include +#endif +#if __FreeBSD_version >= 300000 +# include +# include +#endif +#include +#if __FreeBSD_version >= 300000 +# include +#endif +#ifdef sun +#include +#endif +#include +#include +#include +#include + +#ifdef __sgi +# ifdef IFF_DRVRLOCK /* IRIX6 */ +#include +#include +# endif +#endif + +#if RFC1825 +#include +#include +extern struct ifnet vpnif; +#endif + +#ifndef linux +# include +#endif +#include +#include +#include +#include "netinet/ip_compat.h" +#include +#include "netinet/ip_fil.h" +#include "netinet/ip_proxy.h" +#include "netinet/ip_nat.h" +#include "netinet/ip_frag.h" +#include "netinet/ip_state.h" +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif +#undef SOCKADDR_IN +#define SOCKADDR_IN struct sockaddr_in + + nat_t *nat_table[2][NAT_SIZE], *nat_instances = NULL; +static ipnat_t *nat_list = NULL; +u_long fr_defnatage = 1200, /* 10 minutes (600 seconds) */ + fr_defnaticmpage = 6; /* 3 seconds */ +static natstat_t nat_stats; +#if (SOLARIS || defined(__sgi)) && defined(_KERNEL) +extern kmutex_t ipf_nat; +#endif + +static int nat_flushtable __P((void)); +static int nat_clearlist __P((void)); +static void nat_delete __P((struct nat *)); +static int nat_ifpaddr __P((nat_t *, void *, struct in_addr *)); + + +#define LONG_SUM(in) (((in) & 0xffff) + ((in) >> 16)) + +#define CALC_SUMD(s1, s2, sd) { \ + /* Do it twice */ \ + (s1) = ((s1) & 0xffff) + ((s1) >> 16); \ + (s1) = ((s1) & 0xffff) + ((s1) >> 16); \ + /* Do it twice */ \ + (s2) = ((s2) & 0xffff) + ((s2) >> 16); \ + (s2) = ((s2) & 0xffff) + ((s2) >> 16); \ + /* Because ~1 == -2, We really need ~1 == -1 */ \ + if ((s1) > (s2)) (s2)--; \ + (sd) = (s2) - (s1); \ + (sd) = ((sd) & 0xffff) + ((sd) >> 16); } + +void fix_outcksum(sp, n) +u_short *sp; +u_32_t n; +{ + register u_short sumshort; + register u_32_t sum1; + + if (!n) + return; + sum1 = (~ntohs(*sp)) & 0xffff; + sum1 += (n); + sum1 = (sum1 >> 16) + (sum1 & 0xffff); + /* Again */ + sum1 = (sum1 >> 16) + (sum1 & 0xffff); + sumshort = ~(u_short)sum1; + *(sp) = htons(sumshort); +} + + +void fix_incksum(sp, n) +u_short *sp; +u_32_t n; +{ + register u_short sumshort; + register u_32_t sum1; + + if (!n) + return; +#if sparc + sum1 = (~(*sp)) & 0xffff; +#else + sum1 = (~ntohs(*sp)) & 0xffff; +#endif + sum1 += ~(n) & 0xffff; + sum1 = (sum1 >> 16) + (sum1 & 0xffff); + /* Again */ + sum1 = (sum1 >> 16) + (sum1 & 0xffff); + sumshort = ~(u_short)sum1; + *(sp) = htons(sumshort); +} + + +/* + * How the NAT is organised and works. + * + * Inside (interface y) NAT Outside (interface x) + * -------------------- -+- ------------------------------------- + * Packet going | out, processsed by ip_natout() for x + * ------------> | ------------> + * src=10.1.1.1 | src=192.1.1.1 + * | + * | in, processed by ip_natin() for x + * <------------ | <------------ + * dst=10.1.1.1 | dst=192.1.1.1 + * -------------------- -+- ------------------------------------- + * ip_natout() - changes ip_src and if required, sport + * - creates a new mapping, if required. + * ip_natin() - changes ip_dst and if required, dport + * + * In the NAT table, internal source is recorded as "in" and externally + * seen as "out". + */ + +/* + * Handle ioctls which manipulate the NAT. + */ +int nat_ioctl(data, cmd, mode) +#if defined(__NetBSD__) || defined(__OpenBSD__) || (__FreeBSD_version >= 300003) +u_long cmd; +#else +int cmd; +#endif +caddr_t data; +int mode; +{ + register ipnat_t *nat, *n = NULL, **np = NULL; + ipnat_t natd; + int error = 0, ret; +#if defined(_KERNEL) && !SOLARIS + int s; +#endif + + nat = NULL; /* XXX gcc -Wuninitialized */ + + /* + * For add/delete, look to see if the NAT entry is already present + */ + SPL_NET(s); + MUTEX_ENTER(&ipf_nat); + if ((cmd == SIOCADNAT) || (cmd == SIOCRMNAT)) { + IRCOPY(data, (char *)&natd, sizeof(natd)); + nat = &natd; + nat->in_inip &= nat->in_inmsk; + nat->in_outip &= nat->in_outmsk; + for (np = &nat_list; (n = *np); np = &n->in_next) + if (!bcmp((char *)&nat->in_flags, (char *)&n->in_flags, + IPN_CMPSIZ)) + break; + } + + switch (cmd) + { + case SIOCADNAT : + if (!(mode & FWRITE)) { + error = EPERM; + break; + } + if (n) { + error = EEXIST; + break; + } + KMALLOC(n, ipnat_t *, sizeof(*n)); + if (n == NULL) { + error = ENOMEM; + break; + } + bcopy((char *)nat, (char *)n, sizeof(*n)); + n->in_ifp = (void *)GETUNIT(n->in_ifname); + if (!n->in_ifp) + n->in_ifp = (void *)-1; + n->in_apr = ap_match(n->in_p, n->in_plabel); + n->in_next = *np; + n->in_use = 0; + n->in_space = ~(0xffffffff & ntohl(n->in_outmsk)); + if (n->in_space) /* lose 2: broadcast + network address */ + n->in_space -= 2; + else + n->in_space = 1; /* single IP# mapping */ + if ((n->in_outmsk != 0xffffffff) && n->in_outmsk) + n->in_nip = ntohl(n->in_outip) + 1; + else + n->in_nip = ntohl(n->in_outip); + if (n->in_redir & NAT_MAP) { + n->in_pnext = ntohs(n->in_pmin); + /* + * Multiply by the number of ports made available. + */ + if (ntohs(n->in_pmax) > ntohs(n->in_pmin)) + n->in_space *= (ntohs(n->in_pmax) - + ntohs(n->in_pmin)); + } + /* Otherwise, these fields are preset */ + *np = n; + nat_stats.ns_rules++; + break; + case SIOCRMNAT : + if (!(mode & FWRITE)) { + error = EPERM; + break; + } + if (!n) { + error = ESRCH; + break; + } + *np = n->in_next; + if (!n->in_use) { + if (n->in_apr) + ap_free(n->in_apr); + KFREE(n); + nat_stats.ns_rules--; + } else { + n->in_flags |= IPN_DELETE; + n->in_next = NULL; + } + break; + case SIOCGNATS : + nat_stats.ns_table[0] = nat_table[0]; + nat_stats.ns_table[1] = nat_table[1]; + nat_stats.ns_list = nat_list; + IWCOPY((char *)&nat_stats, (char *)data, sizeof(nat_stats)); + break; + case SIOCGNATL : + { + natlookup_t nl; + + IRCOPY((char *)data, (char *)&nl, sizeof(nl)); + + if (nat_lookupredir(&nl)) { + IWCOPY((char *)&nl, (char *)data, sizeof(nl)); + } else + error = ESRCH; + break; + } + case SIOCFLNAT : + if (!(mode & FWRITE)) { + error = EPERM; + break; + } + ret = nat_flushtable(); + (void) ap_unload(); + IWCOPY((caddr_t)&ret, data, sizeof(ret)); + break; + case SIOCCNATL : + if (!(mode & FWRITE)) { + error = EPERM; + break; + } + ret = nat_clearlist(); + IWCOPY((caddr_t)&ret, data, sizeof(ret)); + break; + case FIONREAD : +#if IPFILTER_LOG + IWCOPY((caddr_t)&iplused[IPL_LOGNAT], (caddr_t)data, + sizeof(iplused[IPL_LOGNAT])); +#endif + break; + } + MUTEX_EXIT(&ipf_nat); + SPL_X(s); + return error; +} + + +/* + * Delete a nat entry from the various lists and table. + */ +static void nat_delete(natd) +struct nat *natd; +{ + register struct nat **natp, *nat; + struct ipnat *ipn; + + for (natp = natd->nat_hstart[0]; (nat = *natp); + natp = &nat->nat_hnext[0]) + if (nat == natd) { + *natp = nat->nat_hnext[0]; + break; + } + + for (natp = natd->nat_hstart[1]; (nat = *natp); + natp = &nat->nat_hnext[1]) + if (nat == natd) { + *natp = nat->nat_hnext[1]; + break; + } + + /* + * If there is an active reference from the nat entry to its parent + * rule, decrement the rule's reference count and free it too if no + * longer being used. + */ + if ((ipn = natd->nat_ptr)) { + ipn->in_space++; + ipn->in_use--; + if (!ipn->in_use && (ipn->in_flags & IPN_DELETE)) { + if (ipn->in_apr) + ap_free(ipn->in_apr); + KFREE(ipn); + nat_stats.ns_rules--; + } + } + + /* + * If there's a fragment table entry too for this nat entry, then + * dereference that as well. + */ + ipfr_forget((void *)natd); + KFREE(natd); +} + + +/* + * nat_flushtable - clear the NAT table of all mapping entries. + */ +static int nat_flushtable() +{ + register nat_t *nat, **natp; + register int j = 0; + + /* + * Everything will be deleted, so lets just make it the deletions + * quicker. + */ + bzero((char *)nat_table[0], sizeof(nat_table[0])); + bzero((char *)nat_table[1], sizeof(nat_table[1])); + + for (natp = &nat_instances; (nat = *natp); ) { + *natp = nat->nat_next; + nat_delete(nat); + j++; + } + + return j; +} + + +/* + * nat_clearlist - delete all entries in the active NAT mapping list. + */ +static int nat_clearlist() +{ + register ipnat_t *n, **np = &nat_list; + int i = 0; + + while ((n = *np)) { + *np = n->in_next; + if (!n->in_use) { + if (n->in_apr) + ap_free(n->in_apr); + KFREE(n); + nat_stats.ns_rules--; + i++; + } else { + n->in_flags |= IPN_DELETE; + n->in_next = NULL; + } + } + nat_stats.ns_inuse = 0; + return i; +} + + +/* + * return the first IP Address associated with an interface + */ +static int nat_ifpaddr(nat, ifptr, inp) +nat_t *nat; +void *ifptr; +struct in_addr *inp; +{ +#if SOLARIS + ill_t *ill = ifptr; +#else + struct ifnet *ifp = ifptr; +#endif + struct in_addr in; + +#if SOLARIS + in.s_addr = ntohl(ill->ill_ipif->ipif_local_addr); +#else /* SOLARIS */ +# if linux + ; +# else /* linux */ + struct ifaddr *ifa; + struct sockaddr_in *sin; + +# if (__FreeBSD_version >= 300000) + ifa = TAILQ_FIRST(&ifp->if_addrhead); +# else +# if defined(__NetBSD__) || defined(__OpenBSD__) + ifa = ifp->if_addrlist.tqh_first; +# else +# if defined(__sgi) && defined(IFF_DRVRLOCK) /* IRIX 6 */ + ifa = &((struct in_ifaddr *)ifp->in_ifaddr)->ia_ifa; +# else + ifa = ifp->if_addrlist; +# endif +# endif /* __NetBSD__ || __OpenBSD__ */ +# endif /* __FreeBSD_version >= 300000 */ +# if (BSD < 199306) && !(/*IRIX6*/defined(__sgi) && defined(IFF_DRVRLOCK)) + sin = (SOCKADDR_IN *)&ifa->ifa_addr; +# else + sin = (SOCKADDR_IN *)ifa->ifa_addr; + while (sin && ifa && + sin->sin_family != AF_INET) { +# if (__FreeBSD_version >= 300000) + ifa = TAILQ_NEXT(ifa, ifa_link); +# else +# if defined(__NetBSD__) || defined(__OpenBSD__) + ifa = ifa->ifa_list.tqe_next; +# else + ifa = ifa->ifa_next; +# endif +# endif /* __FreeBSD_version >= 300000 */ + if (ifa) + sin = (SOCKADDR_IN *)ifa->ifa_addr; + } + if (!ifa) + sin = NULL; + if (!sin) { + KFREE(nat); + return -1; + } +# endif /* (BSD < 199306) && (!__sgi && IFF_DRVLOCK) */ + in = sin->sin_addr; + in.s_addr = ntohl(in.s_addr); +# endif /* linux */ +#endif /* SOLARIS */ + *inp = in; + return 0; +} + + +/* + * Create a new NAT table entry. + */ +nat_t *nat_new(np, ip, fin, flags, direction) +ipnat_t *np; +ip_t *ip; +fr_info_t *fin; +u_short flags; +int direction; +{ + register u_32_t sum1, sum2, sumd, l; + u_short port = 0, sport = 0, dport = 0, nport = 0; + struct in_addr in; + tcphdr_t *tcp = NULL; + nat_t *nat, **natp; + u_short nflags; + + nflags = flags & np->in_flags; + if (flags & IPN_TCPUDP) { + tcp = (tcphdr_t *)fin->fin_dp; + sport = tcp->th_sport; + dport = tcp->th_dport; + } + + /* Give me a new nat */ + KMALLOC(nat, nat_t *, sizeof(*nat)); + if (nat == NULL) + return NULL; + + bzero((char *)nat, sizeof(*nat)); + nat->nat_flags = flags; + + /* + * Search the current table for a match. + */ + if (direction == NAT_OUTBOUND) { + /* + * If it's an outbound packet which doesn't match any existing + * record, then create a new port + */ + l = 0; + do { + l++; + port = 0; + in.s_addr = np->in_nip; + if (!in.s_addr && (np->in_outmsk == 0xffffffff)) { + if ((l > 1) || + nat_ifpaddr(nat, fin->fin_ifp, &in) == -1) { + KFREE(nat); + return NULL; + } + } else if (!in.s_addr && !np->in_outmsk) { + if (l > 1) { + KFREE(nat); + return NULL; + } + in.s_addr = ntohl(ip->ip_src.s_addr); + if (nflags & IPN_TCPUDP) + port = sport; + } else if (nflags & IPN_TCPUDP) { + port = htons(np->in_pnext++); + if (np->in_pnext >= ntohs(np->in_pmax)) { + np->in_pnext = ntohs(np->in_pmin); + np->in_space--; + if (np->in_outmsk != 0xffffffff) + np->in_nip++; + } + } else if (np->in_outmsk != 0xffffffff) { + np->in_space--; + np->in_nip++; + } + + if (!port && (flags & IPN_TCPUDP)) + port = sport; + if ((np->in_nip & ntohl(np->in_outmsk)) > + ntohl(np->in_outip)) + np->in_nip = ntohl(np->in_outip) + 1; + } while (nat_inlookup(fin->fin_ifp, flags, ip->ip_dst, + dport, in, port)); + + /* Setup the NAT table */ + nat->nat_inip = ip->ip_src; + nat->nat_outip.s_addr = htonl(in.s_addr); + nat->nat_oip = ip->ip_dst; + + sum1 = (ntohl(ip->ip_src.s_addr) & 0xffff) + + (ntohl(ip->ip_src.s_addr) >> 16) + ntohs(sport); + + sum2 = (in.s_addr & 0xffff) + (in.s_addr >> 16) + ntohs(port); + + if (flags & IPN_TCPUDP) { + nat->nat_inport = sport; + nat->nat_outport = port; + nat->nat_oport = dport; + } + } else { + + /* + * Otherwise, it's an inbound packet. Most likely, we don't + * want to rewrite source ports and source addresses. Instead, + * we want to rewrite to a fixed internal address and fixed + * internal port. + */ + in.s_addr = ntohl(np->in_inip); + if (!(nport = np->in_pnext)) + nport = dport; + + nat->nat_inip.s_addr = htonl(in.s_addr); + nat->nat_outip = ip->ip_dst; + nat->nat_oip = ip->ip_src; + + sum1 = (ntohl(ip->ip_dst.s_addr) & 0xffff) + + (ntohl(ip->ip_dst.s_addr) >> 16) + ntohs(dport); + + sum2 = (in.s_addr & 0xffff) + (in.s_addr >> 16) + ntohs(nport); + + if (flags & IPN_TCPUDP) { + nat->nat_inport = nport; + nat->nat_outport = dport; + nat->nat_oport = sport; + } + } + + /* Do it twice */ + sum1 = (sum1 & 0xffff) + (sum1 >> 16); + sum1 = (sum1 & 0xffff) + (sum1 >> 16); + + /* Do it twice */ + sum2 = (sum2 & 0xffff) + (sum2 >> 16); + sum2 = (sum2 & 0xffff) + (sum2 >> 16); + + if (sum1 > sum2) + sum2--; /* Because ~1 == -2, We really need ~1 == -1 */ + sumd = sum2 - sum1; + sumd = (sumd & 0xffff) + (sumd >> 16); + nat->nat_sumd = (sumd & 0xffff) + (sumd >> 16); + + if ((flags & IPN_TCPUDP) && ((sport != port) || (dport != nport))) { + if (direction == NAT_OUTBOUND) + sum1 = (ntohl(ip->ip_src.s_addr) & 0xffff) + + (ntohl(ip->ip_src.s_addr) >> 16); + else + sum1 = (ntohl(ip->ip_dst.s_addr) & 0xffff) + + (ntohl(ip->ip_dst.s_addr) >> 16); + + sum2 = (in.s_addr & 0xffff) + (in.s_addr >> 16); + + /* Do it twice */ + sum1 = (sum1 & 0xffff) + (sum1 >> 16); + sum1 = (sum1 & 0xffff) + (sum1 >> 16); + + /* Do it twice */ + sum2 = (sum2 & 0xffff) + (sum2 >> 16); + sum2 = (sum2 & 0xffff) + (sum2 >> 16); + + if (sum1 > sum2) + sum2--; /* Because ~1 == -2, We really need ~1 == -1 */ + sumd = sum2 - sum1; + sumd = (sumd & 0xffff) + (sumd >> 16); + nat->nat_ipsumd = (sumd & 0xffff) + (sumd >> 16); + } else + nat->nat_ipsumd = nat->nat_sumd; + + in.s_addr = htonl(in.s_addr); + nat->nat_next = nat_instances; + nat_instances = nat; + natp = &nat_table[0][nat->nat_inip.s_addr % NAT_SIZE]; + nat->nat_hstart[0] = natp; + nat->nat_hnext[0] = *natp; + *natp = nat; + natp = &nat_table[1][nat->nat_outip.s_addr % NAT_SIZE]; + nat->nat_hstart[1] = natp; + nat->nat_hnext[1] = *natp; + *natp = nat; + nat->nat_ptr = np; + nat->nat_bytes = 0; + nat->nat_pkts = 0; + nat->nat_ifp = fin->fin_ifp; + nat->nat_dir = direction; + if (direction == NAT_OUTBOUND) { + if (flags & IPN_TCPUDP) + tcp->th_sport = port; + } else { + if (flags & IPN_TCPUDP) + tcp->th_dport = nport; + } + nat_stats.ns_added++; + nat_stats.ns_inuse++; + np->in_use++; + return nat; +} + + +nat_t *nat_icmpinlookup(ip, fin) +ip_t *ip; +fr_info_t *fin; +{ + icmphdr_t *icmp; + tcphdr_t *tcp = NULL; + ip_t *oip; + int flags = 0, type; + + icmp = (icmphdr_t *)fin->fin_dp; + /* + * Does it at least have the return (basic) IP header ? + * Only a basic IP header (no options) should be with an ICMP error + * header. + */ + if ((ip->ip_hl != 5) || (ip->ip_len < sizeof(*icmp) + sizeof(ip_t))) + return NULL; + type = icmp->icmp_type; + /* + * If it's not an error type, then return. + */ + if ((type != ICMP_UNREACH) && (type != ICMP_SOURCEQUENCH) && + (type != ICMP_REDIRECT) && (type != ICMP_TIMXCEED) && + (type != ICMP_PARAMPROB)) + return NULL; + + oip = (ip_t *)((char *)fin->fin_dp + 8); + if (oip->ip_p == IPPROTO_TCP) + flags = IPN_TCP; + else if (oip->ip_p == IPPROTO_UDP) + flags = IPN_UDP; + if (flags & IPN_TCPUDP) { + tcp = (tcphdr_t *)((char *)oip + (oip->ip_hl << 2)); + return nat_inlookup(fin->fin_ifp, flags, oip->ip_dst, + tcp->th_dport, oip->ip_src, tcp->th_sport); + } + return nat_inlookup(fin->fin_ifp, 0, oip->ip_src, 0, oip->ip_dst, 0); +} + + +/* + * This should *ONLY* be used for incoming packets to make sure a NAT'd ICMP + * packet gets correctly recognised. + */ +nat_t *nat_icmpin(ip, fin, nflags) +ip_t *ip; +fr_info_t *fin; +int *nflags; +{ + icmphdr_t *icmp; + nat_t *nat; + ip_t *oip; + int flags = 0; + + if (!(nat = nat_icmpinlookup(ip, fin))) + return NULL; + + *nflags = IPN_ICMPERR; + icmp = (icmphdr_t *)fin->fin_dp; + oip = (ip_t *)((char *)icmp + 8); + if (oip->ip_p == IPPROTO_TCP) + flags = IPN_TCP; + else if (oip->ip_p == IPPROTO_UDP) + flags = IPN_UDP; + /* + * Need to adjust ICMP header to include the real IP#'s and + * port #'s. Only apply a checksum change relative to the + * IP address change is it will be modified again in ip_natout + * for both address and port. Two checksum changes are + * necessary for the two header address changes. Be careful + * to only modify the checksum once for the port # and twice + * for the IP#. + */ + if (flags & IPN_TCPUDP) { + tcphdr_t *tcp = (tcphdr_t *)(oip + 1); + u_32_t sum1, sum2, sumd; + struct in_addr in; + + if (nat->nat_dir == NAT_OUTBOUND) { + sum1 = LONG_SUM(ntohl(oip->ip_src.s_addr)); + in = nat->nat_outip; + oip->ip_src = in; + tcp->th_sport = nat->nat_outport; + } else { + sum1 = LONG_SUM(ntohl(oip->ip_dst.s_addr)); + in = nat->nat_inip; + oip->ip_dst = in; + tcp->th_dport = nat->nat_inport; + } + + sum2 = LONG_SUM(in.s_addr); + + CALC_SUMD(sum1, sum2, sumd); + sumd = (sumd & 0xffff) + (sumd >> 16); + + if (nat->nat_dir == NAT_OUTBOUND) { + fix_incksum(&oip->ip_sum, sumd); + fix_incksum(&icmp->icmp_cksum, sumd); + } else { + fix_outcksum(&oip->ip_sum, sumd); + fix_outcksum(&icmp->icmp_cksum, sumd); + } + + /* + * TCP checksum doesn't make it into the 1st eight + * bytes but UDP does. + */ + if (ip->ip_p == IPPROTO_UDP) { + udphdr_t *udp = (udphdr_t *)tcp; + + if (udp->uh_sum) { + if (nat->nat_dir == NAT_OUTBOUND) + fix_incksum(&udp->uh_sum, + nat->nat_sumd); + else + fix_outcksum(&udp->uh_sum, + nat->nat_sumd); + } + } + } else + ip->ip_dst = nat->nat_outip; + nat->nat_age = fr_defnaticmpage; + return nat; +} + + +/* + * NB: these lookups don't lock access to the list, it assume it has already + * been done! + */ +/* + * Lookup a nat entry based on the mapped destination ip address/port and + * real source address/port. We use this lookup when receiving a packet, + * we're looking for a table entry, based on the destination address. + * NOTE: THE PACKET BEING CHECKED (IF FOUND) HAS A MAPPING ALREADY. + */ +#ifdef __STDC__ +nat_t *nat_inlookup(void *ifp, int flags, struct in_addr src, u_short sport, struct in_addr mapdst, u_short mapdport) +#else +nat_t *nat_inlookup(ifp, flags, src, sport, mapdst, mapdport) +void *ifp; +register int flags; +struct in_addr src , mapdst; +u_short sport, mapdport; +#endif +{ + register nat_t *nat; + + flags &= IPN_TCPUDP; + + nat = nat_table[1][mapdst.s_addr % NAT_SIZE]; + for (; nat; nat = nat->nat_hnext[1]) + if ((!ifp || ifp == nat->nat_ifp) && + nat->nat_oip.s_addr == src.s_addr && + nat->nat_outip.s_addr == mapdst.s_addr && + flags == nat->nat_flags && (!flags || + (nat->nat_oport == sport && + nat->nat_outport == mapdport))) + return nat; + return NULL; +} + + +/* + * Lookup a nat entry based on the source 'real' ip address/port and + * destination address/port. We use this lookup when sending a packet out, + * we're looking for a table entry, based on the source address. + * NOTE: THE PACKET BEING CHECKED (IF FOUND) HAS A MAPPING ALREADY. + */ +#ifdef __STDC__ +nat_t *nat_outlookup(void *ifp, int flags, struct in_addr src, u_short sport, struct in_addr dst, u_short dport) +#else +nat_t *nat_outlookup(ifp, flags, src, sport, dst, dport) +void *ifp; +register int flags; +struct in_addr src , dst; +u_short sport, dport; +#endif +{ + register nat_t *nat; + + flags &= IPN_TCPUDP; + + nat = nat_table[0][src.s_addr % NAT_SIZE]; + for (; nat; nat = nat->nat_hnext[0]) { + if ((!ifp || ifp == nat->nat_ifp) && + nat->nat_inip.s_addr == src.s_addr && + nat->nat_oip.s_addr == dst.s_addr && + flags == nat->nat_flags && (!flags || + (nat->nat_inport == sport && nat->nat_oport == dport))) + return nat; + } + return NULL; +} + + +/* + * Lookup a nat entry based on the mapped source ip address/port and + * real destination address/port. We use this lookup when sending a packet + * out, we're looking for a table entry, based on the source address. + */ +#ifdef __STDC__ +nat_t *nat_lookupmapip(void *ifp, int flags, struct in_addr mapsrc, u_short mapsport, struct in_addr dst, u_short dport) +#else +nat_t *nat_lookupmapip(ifp, flags, mapsrc, mapsport, dst, dport) +void *ifp; +register int flags; +struct in_addr mapsrc , dst; +u_short mapsport, dport; +#endif +{ + register nat_t *nat; + + flags &= IPN_TCPUDP; + + nat = nat_table[1][mapsrc.s_addr % NAT_SIZE]; + for (; nat; nat = nat->nat_hnext[0]) + if ((!ifp || ifp == nat->nat_ifp) && + nat->nat_oip.s_addr == dst.s_addr && + nat->nat_outip.s_addr == mapsrc.s_addr && + flags == nat->nat_flags && (!flags || + (nat->nat_outport == mapsport && + nat->nat_oport == dport))) + return nat; + return NULL; +} + + +/* + * Lookup the NAT tables to search for a matching redirect + */ +nat_t *nat_lookupredir(np) +register natlookup_t *np; +{ + nat_t *nat; + + /* + * If nl_inip is non null, this is a lookup based on the real + * ip address. Else, we use the fake. + */ + if ((nat = nat_outlookup(NULL, np->nl_flags, np->nl_inip, + np->nl_inport, np->nl_outip, + np->nl_outport))) { + np->nl_realip = nat->nat_outip; + np->nl_realport = nat->nat_outport; + } + return nat; +} + + +/* + * Packets going out on the external interface go through this. + * Here, the source address requires alteration, if anything. + */ +int ip_natout(ip, hlen, fin) +ip_t *ip; +int hlen; +fr_info_t *fin; +{ + register ipnat_t *np; + register u_32_t ipa; + tcphdr_t *tcp = NULL; + u_short nflags = 0, sport = 0, dport = 0, *csump = NULL; + struct ifnet *ifp; + frentry_t *fr; + nat_t *nat; + int natadd = 1; + + if ((fr = fin->fin_fr) && !(fr->fr_flags & FR_DUP) && + fr->fr_tif.fd_ifp && fr->fr_tif.fd_ifp != (void *)-1) + ifp = fr->fr_tif.fd_ifp; + else + ifp = fin->fin_ifp; + + if (!(ip->ip_off & 0x1fff) && !(fin->fin_fi.fi_fl & FI_SHORT)) { + if (ip->ip_p == IPPROTO_TCP) + nflags = IPN_TCP; + else if (ip->ip_p == IPPROTO_UDP) + nflags = IPN_UDP; + if (nflags) { + tcp = (tcphdr_t *)fin->fin_dp; + sport = tcp->th_sport; + dport = tcp->th_dport; + } + } + + ipa = ip->ip_src.s_addr; + + MUTEX_ENTER(&ipf_nat); + if ((ip->ip_off & (IP_OFFMASK|IP_MF)) && + (nat = ipfr_nat_knownfrag(ip, fin))) + natadd = 0; + else if ((nat = nat_outlookup(ifp, nflags, ip->ip_src, sport, + ip->ip_dst, dport))) + ; + else + /* + * If there is no current entry in the nat table for this IP#, + * create one for it (if there is a matching rule). + */ + for (np = nat_list; np; np = np->in_next) + if ((np->in_ifp == ifp) && np->in_space && + (!np->in_flags || (np->in_flags & nflags)) && + ((ipa & np->in_inmsk) == np->in_inip) && + ((np->in_redir & NAT_MAP) || + (np->in_pnext == sport))) { + if (*np->in_plabel && !ap_ok(ip, tcp, np)) + continue; + /* + * If it's a redirection, then we don't want to + * create new outgoing port stuff. + * Redirections are only for incoming + * connections. + */ + if (!(np->in_redir & NAT_MAP)) + continue; + if ((nat = nat_new(np, ip, fin, nflags, + NAT_OUTBOUND))) +#if IPFILTER_LOG + nat_log(nat, (u_short)np->in_redir); +#else + ; +#endif + break; + } + + if (nat) { + if (natadd && fin->fin_fi.fi_fl & FI_FRAG) + ipfr_nat_newfrag(ip, fin, 0, nat); + nat->nat_age = fr_defnatage; + ip->ip_src = nat->nat_outip; + nat->nat_bytes += ip->ip_len; + nat->nat_pkts++; + + /* + * Fix up checksums, not by recalculating them, but + * simply computing adjustments. + */ +#if SOLARIS || defined(__sgi) + if (nat->nat_dir == NAT_OUTBOUND) + fix_outcksum(&ip->ip_sum, nat->nat_ipsumd); + else + fix_incksum(&ip->ip_sum, nat->nat_ipsumd); +#endif + + if (nflags && !(ip->ip_off & 0x1fff) && + !(fin->fin_fi.fi_fl & FI_SHORT)) { + + if (nat->nat_outport) + tcp->th_sport = nat->nat_outport; + + if (ip->ip_p == IPPROTO_TCP) { + csump = &tcp->th_sum; + fr_tcp_age(&nat->nat_age, + nat->nat_state, ip, fin,1); + /* + * Increase this because we may have + * "keep state" following this too and + * packet storms can occur if this is + * removed too quickly. + */ + if (nat->nat_age == fr_tcpclosed) + nat->nat_age = fr_tcplastack; + } else if (ip->ip_p == IPPROTO_UDP) { + udphdr_t *udp = (udphdr_t *)tcp; + + if (udp->uh_sum) + csump = &udp->uh_sum; + } else if (ip->ip_p == IPPROTO_ICMP) { + icmphdr_t *ic = (icmphdr_t *)tcp; + + csump = &ic->icmp_cksum; + } + if (csump) { + if (nat->nat_dir == NAT_OUTBOUND) + fix_outcksum(csump, + nat->nat_sumd); + else + fix_incksum(csump, + nat->nat_sumd); + } + } + (void) ap_check(ip, tcp, fin, nat); + nat_stats.ns_mapped[1]++; + MUTEX_EXIT(&ipf_nat); + return -2; + } + MUTEX_EXIT(&ipf_nat); + return 0; +} + + +/* + * Packets coming in from the external interface go through this. + * Here, the destination address requires alteration, if anything. + */ +int ip_natin(ip, hlen, fin) +ip_t *ip; +int hlen; +fr_info_t *fin; +{ + register ipnat_t *np; + register struct in_addr in; + struct ifnet *ifp = fin->fin_ifp; + tcphdr_t *tcp = NULL; + u_short sport = 0, dport = 0, *csump = NULL; + nat_t *nat; + int nflags = 0, natadd = 1; + + if (!(ip->ip_off & 0x1fff) && !(fin->fin_fi.fi_fl & FI_SHORT)) { + if (ip->ip_p == IPPROTO_TCP) + nflags = IPN_TCP; + else if (ip->ip_p == IPPROTO_UDP) + nflags = IPN_UDP; + if (nflags) { + tcp = (tcphdr_t *)((char *)ip + hlen); + dport = tcp->th_dport; + sport = tcp->th_sport; + } + } + + in = ip->ip_dst; + + MUTEX_ENTER(&ipf_nat); + + if ((ip->ip_p == IPPROTO_ICMP) && (nat = nat_icmpin(ip, fin, &nflags))) + ; + else if ((ip->ip_off & IP_OFFMASK) && + (nat = ipfr_nat_knownfrag(ip, fin))) + natadd = 0; + else if ((nat = nat_inlookup(fin->fin_ifp, nflags, ip->ip_src, sport, + ip->ip_dst, dport))) + ; + else + /* + * If there is no current entry in the nat table for this IP#, + * create one for it (if there is a matching rule). + */ + for (np = nat_list; np; np = np->in_next) + if ((np->in_ifp == ifp) && + (!np->in_flags || (nflags & np->in_flags)) && + ((in.s_addr & np->in_outmsk) == np->in_outip) && + (np->in_redir & NAT_REDIRECT) && + (!np->in_pmin || np->in_pmin == dport)) { + if ((nat = nat_new(np, ip, fin, nflags, + NAT_INBOUND))) +#if IPFILTER_LOG + nat_log(nat, (u_short)np->in_redir); +#else + ; +#endif + break; + } + if (nat) { + if (natadd && fin->fin_fi.fi_fl & FI_FRAG) + ipfr_nat_newfrag(ip, fin, 0, nat); + (void) ap_check(ip, tcp, fin, nat); + + if (nflags != IPN_ICMPERR) + nat->nat_age = fr_defnatage; + + ip->ip_dst = nat->nat_inip; + nat->nat_bytes += ip->ip_len; + nat->nat_pkts++; + + /* + * Fix up checksums, not by recalculating them, but + * simply computing adjustments. + */ +#if SOLARIS || defined(__sgi) + if (nat->nat_dir == NAT_OUTBOUND) + fix_incksum(&ip->ip_sum, nat->nat_ipsumd); + else + fix_outcksum(&ip->ip_sum, nat->nat_ipsumd); +#endif + if ((nflags & IPN_TCPUDP) && !(ip->ip_off & 0x1fff) && + !(fin->fin_fi.fi_fl & FI_SHORT)) { + + if (nat->nat_inport) + tcp->th_dport = nat->nat_inport; + + if (ip->ip_p == IPPROTO_TCP) { + csump = &tcp->th_sum; + fr_tcp_age(&nat->nat_age, + nat->nat_state, ip, fin,0); + /* + * Increase this because we may have + * "keep state" following this too and + * packet storms can occur if this is + * removed too quickly. + */ + if (nat->nat_age == fr_tcpclosed) + nat->nat_age = fr_tcplastack; + } else if (ip->ip_p == IPPROTO_UDP) { + udphdr_t *udp = (udphdr_t *)tcp; + + if (udp->uh_sum) + csump = &udp->uh_sum; + } else if (ip->ip_p == IPPROTO_ICMP) { + icmphdr_t *ic = (icmphdr_t *)tcp; + + csump = &ic->icmp_cksum; + } + if (csump) { + if (nat->nat_dir == NAT_OUTBOUND) + fix_incksum(csump, + nat->nat_sumd); + else + fix_outcksum(csump, + nat->nat_sumd); + } + } + nat_stats.ns_mapped[0]++; + MUTEX_EXIT(&ipf_nat); + return -2; + } + MUTEX_EXIT(&ipf_nat); + return 0; +} + + +/* + * Free all memory used by NAT structures allocated at runtime. + */ +void ip_natunload() +{ + MUTEX_ENTER(&ipf_nat); + (void) nat_clearlist(); + (void) nat_flushtable(); + (void) ap_unload(); + MUTEX_EXIT(&ipf_nat); +} + + +/* + * Slowly expire held state for NAT entries. Timeouts are set in + * expectation of this being called twice per second. + */ +void ip_natexpire() +{ + register struct nat *nat, **natp; +#if defined(_KERNEL) && !SOLARIS + int s; +#endif + + SPL_NET(s); + MUTEX_ENTER(&ipf_nat); + for (natp = &nat_instances; (nat = *natp); ) { + if (--nat->nat_age) { + natp = &nat->nat_next; + continue; + } + *natp = nat->nat_next; +#if IPFILTER_LOG + nat_log(nat, NL_EXPIRE); +#endif + nat_delete(nat); + nat_stats.ns_expire++; + } + + ap_expire(); + + MUTEX_EXIT(&ipf_nat); + SPL_X(s); +} + + +/* + */ +#ifdef __STDC__ +void ip_natsync(void *ifp) +#else +void ip_natsync(ifp) +void *ifp; +#endif +{ + register nat_t *nat; + register u_32_t sum1, sum2, sumd; + struct in_addr in; + ipnat_t *np; +#if defined(_KERNEL) && !SOLARIS + int s; +#endif + + SPL_NET(s); + MUTEX_ENTER(&ipf_nat); + for (nat = nat_instances; nat; nat = nat->nat_next) + if ((ifp == nat->nat_ifp) && (np = nat->nat_ptr)) + if ((np->in_outmsk == 0xffffffff) && !np->in_nip) { + /* + * Change the map-to address to be the same + * as the new one. + */ + sum1 = nat->nat_outip.s_addr; + if (nat_ifpaddr(nat, ifp, &in) == -1) + nat->nat_outip.s_addr = htonl(in.s_addr); + sum2 = nat->nat_outip.s_addr; + + /* + * Readjust the checksum adjustment to take + * into account the new IP#. + * + * Do it twice + */ + sum1 = (sum1 & 0xffff) + (sum1 >> 16); + sum1 = (sum1 & 0xffff) + (sum1 >> 16); + + /* Do it twice */ + sum2 = (sum2 & 0xffff) + (sum2 >> 16); + sum2 = (sum2 & 0xffff) + (sum2 >> 16); + + /* Because ~1 == -2, We really need ~1 == -1 */ + if (sum1 > sum2) + sum2--; + sumd = sum2 - sum1; + sumd = (sumd & 0xffff) + (sumd >> 16); + sumd += nat->nat_sumd; + nat->nat_sumd = (sumd & 0xffff) + (sumd >> 16); + } + MUTEX_EXIT(&ipf_nat); + SPL_X(s); +} + + +#if IPFILTER_LOG +# ifdef __STDC__ +void nat_log(struct nat *nat, u_short type) +# else +void nat_log(nat, type) +struct nat *nat; +u_short type; +# endif +{ + struct ipnat *np; + struct natlog natl; + void *items[1]; + size_t sizes[1]; + int rulen, types[1]; + + natl.nl_inip = nat->nat_inip; + natl.nl_outip = nat->nat_outip; + natl.nl_origip = nat->nat_oip; + natl.nl_bytes = nat->nat_bytes; + natl.nl_pkts = nat->nat_pkts; + natl.nl_origport = nat->nat_oport; + natl.nl_inport = nat->nat_inport; + natl.nl_outport = nat->nat_outport; + natl.nl_type = type; + natl.nl_rule = -1; + if (nat->nat_ptr) { + for (rulen = 0, np = nat_list; np; np = np->in_next, rulen++) + if (np == nat->nat_ptr) { + natl.nl_rule = rulen; + break; + } + } + items[0] = &natl; + sizes[0] = sizeof(natl); + types[0] = 0; + + (void) ipllog(IPL_LOGNAT, 0, items, sizes, types, 1); +} +#endif diff --git a/bsd/netinet/ip_nat.h b/bsd/netinet/ip_nat.h new file mode 100644 index 000000000..5df61b662 --- /dev/null +++ b/bsd/netinet/ip_nat.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1995-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + * @(#)ip_nat.h 1.5 2/4/96 + */ + +#ifndef __IP_NAT_H__ +#define __IP_NAT_H__ + +#ifndef SOLARIS +#define SOLARIS (defined(sun) && (defined(__svr4__) || defined(__SVR4))) +#endif + +#if defined(__STDC__) || defined(__GNUC__) +#define SIOCADNAT _IOW('r', 80, struct ipnat) +#define SIOCRMNAT _IOW('r', 81, struct ipnat) +#define SIOCGNATS _IOR('r', 82, struct natstat) +#define SIOCGNATL _IOWR('r', 83, struct natlookup) +#define SIOCGFRST _IOR('r', 84, struct ipfrstat) +#define SIOCGIPST _IOR('r', 85, struct ips_stat) +#define SIOCFLNAT _IOWR('r', 86, int) +#define SIOCCNATL _IOWR('r', 87, int) +#else +#define SIOCADNAT _IOW(r, 80, struct ipnat) +#define SIOCRMNAT _IOW(r, 81, struct ipnat) +#define SIOCGNATS _IOR(r, 82, struct natstat) +#define SIOCGNATL _IOWR(r, 83, struct natlookup) +#define SIOCGFRST _IOR(r, 84, struct ipfrstat) +#define SIOCGIPST _IOR(r, 85, struct ips_stat) +#define SIOCFLNAT _IOWR(r, 86, int) +#define SIOCCNATL _IOWR(r, 87, int) +#endif + +#define NAT_SIZE 367 +#ifndef APR_LABELLEN +#define APR_LABELLEN 16 +#endif + +typedef struct nat { + u_long nat_age; + int nat_flags; + u_32_t nat_sumd; + u_32_t nat_ipsumd; + void *nat_data; + struct in_addr nat_inip; + struct in_addr nat_outip; + struct in_addr nat_oip; /* other ip */ + U_QUAD_T nat_pkts; + U_QUAD_T nat_bytes; + u_short nat_oport; /* other port */ + u_short nat_inport; + u_short nat_outport; + u_short nat_use; + u_char nat_state[2]; + struct ipnat *nat_ptr; + struct nat *nat_next; + struct nat *nat_hnext[2]; + struct nat **nat_hstart[2]; + void *nat_ifp; + int nat_dir; +} nat_t; + +typedef struct ipnat { + struct ipnat *in_next; + void *in_ifp; + void *in_apr; + u_int in_space; + u_int in_use; + struct in_addr in_nextip; + u_short in_pnext; + u_short in_flags; + u_short in_port[2]; + struct in_addr in_in[2]; + struct in_addr in_out[2]; + int in_redir; /* 0 if it's a mapping, 1 if it's a hard redir */ + char in_ifname[IFNAMSIZ]; + char in_plabel[APR_LABELLEN]; /* proxy label */ + char in_p; /* protocol */ + u_short in_dport; +} ipnat_t; + +#define in_pmin in_port[0] /* Also holds static redir port */ +#define in_pmax in_port[1] +#define in_nip in_nextip.s_addr +#define in_inip in_in[0].s_addr +#define in_inmsk in_in[1].s_addr +#define in_outip in_out[0].s_addr +#define in_outmsk in_out[1].s_addr + +#define NAT_OUTBOUND 0 +#define NAT_INBOUND 1 + +#define NAT_MAP 0x01 +#define NAT_REDIRECT 0x02 +#define NAT_BIMAP (NAT_MAP|NAT_REDIRECT) + +#define IPN_CMPSIZ (sizeof(struct in_addr) * 4 + sizeof(u_short) * 3 + \ + sizeof(int) + IFNAMSIZ + APR_LABELLEN + sizeof(char)) + +typedef struct natlookup { + struct in_addr nl_inip; + struct in_addr nl_outip; + struct in_addr nl_realip; + int nl_flags; + u_short nl_inport; + u_short nl_outport; + u_short nl_realport; +} natlookup_t; + +typedef struct natstat { + u_long ns_mapped[2]; + u_long ns_rules; + u_long ns_added; + u_long ns_expire; + u_long ns_inuse; + u_long ns_logged; + u_long ns_logfail; + nat_t **ns_table[2]; + ipnat_t *ns_list; +} natstat_t; + +#define IPN_ANY 0x00 +#define IPN_TCP 0x01 +#define IPN_UDP 0x02 +#define IPN_TCPUDP 0x03 +#define IPN_DELETE 0x04 +#define IPN_ICMPERR 0x08 + + +typedef struct natlog { + struct in_addr nl_origip; + struct in_addr nl_outip; + struct in_addr nl_inip; + u_short nl_origport; + u_short nl_outport; + u_short nl_inport; + u_short nl_type; + int nl_rule; + U_QUAD_T nl_pkts; + U_QUAD_T nl_bytes; +} natlog_t; + + +#define NL_NEWMAP NAT_MAP +#define NL_NEWRDR NAT_REDIRECT +#define NL_EXPIRE 0xffff + + +extern void ip_natsync __P((void *)); +extern u_long fr_defnatage; +extern u_long fr_defnaticmpage; +extern nat_t *nat_table[2][NAT_SIZE]; +#if defined(__NetBSD__) || defined(__OpenBSD__) || (__FreeBSD_version >= 300003) +extern int nat_ioctl __P((caddr_t, u_long, int)); +#else +extern int nat_ioctl __P((caddr_t, int, int)); +#endif +extern nat_t *nat_new __P((ipnat_t *, ip_t *, fr_info_t *, u_short, int)); +extern nat_t *nat_outlookup __P((void *, int, struct in_addr, u_short, + struct in_addr, u_short)); +extern nat_t *nat_inlookup __P((void *, int, struct in_addr, u_short, + struct in_addr, u_short)); +extern nat_t *nat_lookupredir __P((natlookup_t *)); +extern nat_t *nat_lookupmapip __P((void *, int, struct in_addr, u_short, + struct in_addr, u_short)); +extern nat_t *nat_icmpinlookup __P((ip_t *, fr_info_t *)); +extern nat_t *nat_icmpin __P((ip_t *, fr_info_t *, int *)); + +extern int ip_natout __P((ip_t *, int, fr_info_t *)); +extern int ip_natin __P((ip_t *, int, fr_info_t *)); +extern void ip_natunload __P((void)), ip_natexpire __P((void)); +extern void nat_log __P((struct nat *, u_short)); +extern void fix_incksum __P((u_short *, u_32_t)); +extern void fix_outcksum __P((u_short *, u_32_t)); + +#endif /* __IP_NAT_H__ */ diff --git a/bsd/netinet/ip_output.c b/bsd/netinet/ip_output.c new file mode 100644 index 000000000..7f74f5998 --- /dev/null +++ b/bsd/netinet/ip_output.c @@ -0,0 +1,1872 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_output.c 8.3 (Berkeley) 1/21/94 + */ + +#define _IP_VHL + +#if ISFB31 +#include "opt_ipfw.h" +#include "opt_ipdn.h" +#include "opt_ipdivert.h" +#include "opt_ipfilter.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#if INET6 +#include +#include +#endif +#include +#include +#include +#include + +#include + +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 3) +#define DBG_FNC_IP_OUTPUT NETDBG_CODE(DBG_NETIP, (1 << 8) | 1) + + +#ifdef vax +#include +#endif + +#if ISFB31 +#include + +static MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options"); +#endif + +//static MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "internet multicast options"); + +#if IPSEC +#include +#include +#include + +#endif /*IPSEC*/ + +#if !defined(COMPAT_IPFW) || COMPAT_IPFW == 1 +#undef COMPAT_IPFW +#define COMPAT_IPFW 1 +#else +#undef COMPAT_IPFW +#endif + +#if COMPAT_IPFW +#include +#endif + +#if DUMMYNET +#include +#endif + +#if IPFIREWALL_FORWARD_DEBUG +#define print_ip(a) printf("%ld.%ld.%ld.%ld",(ntohl(a.s_addr)>>24)&0xFF,\ + (ntohl(a.s_addr)>>16)&0xFF,\ + (ntohl(a.s_addr)>>8)&0xFF,\ + (ntohl(a.s_addr))&0xFF); +#endif + +u_short ip_id; + +static struct mbuf *ip_insertoptions __P((struct mbuf *, struct mbuf *, int *)); +static void ip_mloopback + __P((struct ifnet *, struct mbuf *, struct sockaddr_in *, int)); +static int ip_getmoptions + __P((struct sockopt *, struct ip_moptions *)); +static int ip_pcbopts __P((int, struct mbuf **, struct mbuf *)); +static int ip_setmoptions + __P((struct sockopt *, struct ip_moptions **)); +static u_long lo_dl_tag = 0; + +#if IPFILTER_LKM || IPFILTER +int ip_optcopy __P((struct ip *, struct ip *)); +extern int (*fr_checkp) __P((struct ip *, int, struct ifnet *, int, struct mbuf **)); +#else +static int ip_optcopy __P((struct ip *, struct ip *)); +#endif + + +extern struct protosw inetsw[]; + +/* + * IP output. The packet in mbuf chain m contains a skeletal IP + * header (with len, off, ttl, proto, tos, src, dst). + * The mbuf chain containing the packet will be freed. + * The mbuf opt, if present, will not be freed. + */ +int +ip_output(m0, opt, ro, flags, imo) + struct mbuf *m0; + struct mbuf *opt; + struct route *ro; + int flags; + struct ip_moptions *imo; +{ + struct ip *ip, *mhip; + struct ifnet *ifp; + u_long dl_tag; + struct mbuf *m = m0; + int hlen = sizeof (struct ip); + int len, off, error = 0; + struct sockaddr_in *dst; + struct in_ifaddr *ia; + int isbroadcast; +#if IPSEC + struct route iproute; + struct socket *so; + struct secpolicy *sp = NULL; +#endif +#if IPFIREWALL_FORWARD + int fwd_rewrite_src = 0; +#endif + + +#if !IPDIVERT /* dummy variable for the firewall code to play with */ + u_short ip_divert_cookie = 0 ; +#endif +#if COMPAT_IPFW + struct ip_fw_chain *rule = NULL ; +#endif + + KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + +#if IPSEC + /* + * NOTE: m->m_pkthdr is NULL cleared below just to prevent ipfw code + * from SEGV. + * ipfw code uses rcvif to determine incoming interface, and + * KAME uses rcvif for ipsec processing. + * ipfw may not be working right with KAME at this moment. + * We need more tests. + */ +#if DUMMYNET + if (m->m_type == MT_DUMMYNET) { + if (m->m_next != NULL) { + so = (struct socket *)m->m_next->m_pkthdr.rcvif; + m->m_next->m_pkthdr.rcvif = NULL; + } else + so = NULL; + } else +#endif + { + so = ipsec_getsocket(m); + ipsec_setsocket(m, NULL); + } +#endif /*IPSEC*/ + + +#if IPFIREWALL && DUMMYNET + /* + * dummynet packet are prepended a vestigial mbuf with + * m_type = MT_DUMMYNET and m_data pointing to the matching + * rule. + */ + if (m->m_type == MT_DUMMYNET) { + struct mbuf *tmp_m = m ; + /* + * the packet was already tagged, so part of the + * processing was already done, and we need to go down. + * opt, flags and imo have already been used, and now + * they are used to hold ifp and hlen and NULL, respectively. + */ + rule = (struct ip_fw_chain *)(m->m_data) ; + m = m->m_next ; + FREE(tmp_m, M_IPFW); + ip = mtod(m, struct ip *); + dst = (struct sockaddr_in *)&ro->ro_dst; + ifp = (struct ifnet *)opt; + hlen = IP_VHL_HL(ip->ip_vhl) << 2 ; + opt = NULL ; + flags = 0 ; /* XXX is this correct ? */ + goto sendit; + } else + rule = NULL ; +#endif + +#if DIAGNOSTIC + if ((m->m_flags & M_PKTHDR) == 0) + panic("ip_output no HDR"); + if (!ro) + panic("ip_output no route, proto = %d", + mtod(m, struct ip *)->ip_p); +#endif + if (opt) { + m = ip_insertoptions(m, opt, &len); + hlen = len; + } + ip = mtod(m, struct ip *); + /* + * Fill in IP header. + */ + if ((flags & (IP_FORWARDING|IP_RAWOUTPUT)) == 0) { + ip->ip_vhl = IP_MAKE_VHL(IPVERSION, hlen >> 2); + ip->ip_off &= IP_DF; + ip->ip_id = htons(ip_id++); + ipstat.ips_localout++; + } else { + hlen = IP_VHL_HL(ip->ip_vhl) << 2; + } + + KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, + ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); + + dst = (struct sockaddr_in *)&ro->ro_dst; + /* + * If there is a cached route, + * check that it is to the same destination + * and is still up. If not, free it and try again. + */ + if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || + dst->sin_addr.s_addr != ip->ip_dst.s_addr)) { + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } + if (ro->ro_rt == 0) { + dst->sin_family = AF_INET; + dst->sin_len = sizeof(*dst); + dst->sin_addr = ip->ip_dst; + } + /* + * If routing to interface only, + * short circuit routing lookup. + */ +#define ifatoia(ifa) ((struct in_ifaddr *)(ifa)) +#define sintosa(sin) ((struct sockaddr *)(sin)) + if (flags & IP_ROUTETOIF) { + if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == 0 && + (ia = ifatoia(ifa_ifwithnet(sintosa(dst)))) == 0) { + ipstat.ips_noroute++; + error = ENETUNREACH; + goto bad; + } + ifp = ia->ia_ifp; + dl_tag = ia->ia_ifa.ifa_dlt; + ip->ip_ttl = 1; + isbroadcast = in_broadcast(dst->sin_addr, ifp); + } else { + /* + * If this is the case, we probably don't want to allocate + * a protocol-cloned route since we didn't get one from the + * ULP. This lets TCP do its thing, while not burdening + * forwarding or ICMP with the overhead of cloning a route. + * Of course, we still want to do any cloning requested by + * the link layer, as this is probably required in all cases + * for correct operation (as it is for ARP). + */ + if (ro->ro_rt == 0) + rtalloc_ign(ro, RTF_PRCLONING); + if (ro->ro_rt == 0) { + ipstat.ips_noroute++; + error = EHOSTUNREACH; + goto bad; + } + ia = ifatoia(ro->ro_rt->rt_ifa); + ifp = ro->ro_rt->rt_ifp; + dl_tag = ro->ro_rt->rt_dlt; + ro->ro_rt->rt_use++; + if (ro->ro_rt->rt_flags & RTF_GATEWAY) + dst = (struct sockaddr_in *)ro->ro_rt->rt_gateway; + if (ro->ro_rt->rt_flags & RTF_HOST) + isbroadcast = (ro->ro_rt->rt_flags & RTF_BROADCAST); + else + isbroadcast = in_broadcast(dst->sin_addr, ifp); + } + if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { + struct in_multi *inm; + + m->m_flags |= M_MCAST; + /* + * IP destination address is multicast. Make sure "dst" + * still points to the address in "ro". (It may have been + * changed to point to a gateway address, above.) + */ + dst = (struct sockaddr_in *)&ro->ro_dst; + /* + * See if the caller provided any multicast options + */ + if (imo != NULL) { + ip->ip_ttl = imo->imo_multicast_ttl; + if (imo->imo_multicast_ifp != NULL) + ifp = imo->imo_multicast_ifp; + if (imo->imo_multicast_vif != -1) + ip->ip_src.s_addr = + ip_mcast_src(imo->imo_multicast_vif); + } else + ip->ip_ttl = IP_DEFAULT_MULTICAST_TTL; + /* + * Confirm that the outgoing interface supports multicast. + */ + if ((imo == NULL) || (imo->imo_multicast_vif == -1)) { + if ((ifp->if_flags & IFF_MULTICAST) == 0) { + ipstat.ips_noroute++; + error = ENETUNREACH; + goto bad; + } + } + /* + * If source address not specified yet, use address + * of outgoing interface. + */ + if (ip->ip_src.s_addr == INADDR_ANY) { + register struct in_ifaddr *ia1; + + for (ia1 = in_ifaddrhead.tqh_first; ia1; + ia1 = ia1->ia_link.tqe_next) + if (ia1->ia_ifp == ifp) { + ip->ip_src = IA_SIN(ia1)->sin_addr; + break; + } + } + + IN_LOOKUP_MULTI(ip->ip_dst, ifp, inm); + if (inm != NULL && + (imo == NULL || imo->imo_multicast_loop)) { + /* + * If we belong to the destination multicast group + * on the outgoing interface, and the caller did not + * forbid loopback, loop back a copy. + */ + ip_mloopback(ifp, m, dst, hlen); + } + else { + /* + * If we are acting as a multicast router, perform + * multicast forwarding as if the packet had just + * arrived on the interface to which we are about + * to send. The multicast forwarding function + * recursively calls this function, using the + * IP_FORWARDING flag to prevent infinite recursion. + * + * Multicasts that are looped back by ip_mloopback(), + * above, will be forwarded by the ip_input() routine, + * if necessary. + */ + if (ip_mrouter && (flags & IP_FORWARDING) == 0) { + /* + * Check if rsvp daemon is running. If not, don't + * set ip_moptions. This ensures that the packet + * is multicast and not just sent down one link + * as prescribed by rsvpd. + */ + if (!rsvp_on) + imo = NULL; + if (ip_mforward(ip, ifp, m, imo) != 0) { + m_freem(m); + goto done; + } + } + } + + /* + * Multicasts with a time-to-live of zero may be looped- + * back, above, but must not be transmitted on a network. + * Also, multicasts addressed to the loopback interface + * are not sent -- the above call to ip_mloopback() will + * loop back a copy if this host actually belongs to the + * destination group on the loopback interface. + */ + if (ip->ip_ttl == 0 || ifp->if_flags & IFF_LOOPBACK) { + m_freem(m); + goto done; + } + + goto sendit; + } +#ifndef notdef + /* + * If source address not specified yet, use address + * of outgoing interface. + */ + if (ip->ip_src.s_addr == INADDR_ANY) { + ip->ip_src = IA_SIN(ia)->sin_addr; +#if IPFIREWALL_FORWARD + /* Keep note that we did this - if the firewall changes + * the next-hop, our interface may change, changing the + * default source IP. It's a shame so much effort happens + * twice. Oh well. + */ + fwd_rewrite_src++; +#endif /* IPFIREWALL_FORWARD */ + } +#endif /* notdef */ + /* + * Verify that we have any chance at all of being able to queue + * the packet or packet fragments + */ + if ((ifp->if_snd.ifq_len + ip->ip_len / ifp->if_mtu + 1) >= + ifp->if_snd.ifq_maxlen) { + error = ENOBUFS; + goto bad; + } + + /* + * Look for broadcast address and + * and verify user is allowed to send + * such a packet. + */ + if (isbroadcast) { + if ((ifp->if_flags & IFF_BROADCAST) == 0) { + error = EADDRNOTAVAIL; + goto bad; + } + if ((flags & IP_ALLOWBROADCAST) == 0) { + error = EACCES; + goto bad; + } + /* don't allow broadcast messages to be fragmented */ + if ((u_short)ip->ip_len > ifp->if_mtu) { + error = EMSGSIZE; + goto bad; + } + m->m_flags |= M_BCAST; + } else { + m->m_flags &= ~M_BCAST; + } + +sendit: + /* + * IpHack's section. + * - Xlate: translate packet's addr/port (NAT). + * - Firewall: deny/allow/etc. + * - Wrap: fake packet's addr/port + * - Encapsulate: put it in another IP and send out. + */ +#if IPFILTER || IPFILTER_LKM + if (fr_checkp) { + struct mbuf *m1 = m; + + if ((error = (*fr_checkp)(ip, hlen, ifp, 1, &m1)) || !m1) + goto done; + ip = mtod(m = m1, struct ip *); + } +#endif + +#if COMPAT_IPFW + if (ip_nat_ptr && !(*ip_nat_ptr)(&ip, &m, ifp, IP_NAT_OUT)) { + error = EACCES; + goto done; + } + + /* + * Check with the firewall... + */ + if (ip_fw_chk_ptr) { + struct sockaddr_in *old = dst; + + off = (*ip_fw_chk_ptr)(&ip, + hlen, ifp, &ip_divert_cookie, &m, &rule, &dst); + /* + * On return we must do the following: + * m == NULL -> drop the pkt + * 1<=off<= 0xffff -> DIVERT + * (off & 0x10000) -> send to a DUMMYNET pipe + * dst != old -> IPFIREWALL_FORWARD + * off==0, dst==old -> accept + * If some of the above modules is not compiled in, then + * we should't have to check the corresponding condition + * (because the ipfw control socket should not accept + * unsupported rules), but better play safe and drop + * packets in case of doubt. + */ + if (!m) { /* firewall said to reject */ + error = EACCES; + goto done; + } + if (off == 0 && dst == old) /* common case */ + goto pass ; +#if DUMMYNET + if (off & 0x10000) { + /* + * pass the pkt to dummynet. Need to include + * pipe number, m, ifp, ro, hlen because these are + * not recomputed in the next pass. + * All other parameters have been already used and + * so they are not needed anymore. + * XXX note: if the ifp or ro entry are deleted + * while a pkt is in dummynet, we are in trouble! + */ + dummynet_io(off & 0xffff, DN_TO_IP_OUT, m,ifp,ro,hlen,rule); + goto done; + } +#endif +#if IPDIVERT + if (off > 0 && off < 0x10000) { /* Divert packet */ + ip_divert_port = off & 0xffff ; + (*ip_protox[IPPROTO_DIVERT]->pr_input)(m, 0); + goto done; + } +#endif + +#if IPFIREWALL_FORWARD + /* Here we check dst to make sure it's directly reachable on the + * interface we previously thought it was. + * If it isn't (which may be likely in some situations) we have + * to re-route it (ie, find a route for the next-hop and the + * associated interface) and set them here. This is nested + * forwarding which in most cases is undesirable, except where + * such control is nigh impossible. So we do it here. + * And I'm babbling. + */ + if (off == 0 && old != dst) { + struct in_ifaddr *ia; + + /* It's changed... */ + /* There must be a better way to do this next line... */ + static struct route sro_fwd, *ro_fwd = &sro_fwd; +#if IPFIREWALL_FORWARD_DEBUG + printf("IPFIREWALL_FORWARD: New dst ip: "); + print_ip(dst->sin_addr); + printf("\n"); +#endif + /* + * We need to figure out if we have been forwarded + * to a local socket. If so then we should somehow + * "loop back" to ip_input, and get directed to the + * PCB as if we had received this packet. This is + * because it may be dificult to identify the packets + * you want to forward until they are being output + * and have selected an interface. (e.g. locally + * initiated packets) If we used the loopback inteface, + * we would not be able to control what happens + * as the packet runs through ip_input() as + * it is done through a ISR. + */ + for (ia = TAILQ_FIRST(&in_ifaddrhead); ia; + ia = TAILQ_NEXT(ia, ia_link)) { + /* + * If the addr to forward to is one + * of ours, we pretend to + * be the destination for this packet. + */ + if (IA_SIN(ia)->sin_addr.s_addr == + dst->sin_addr.s_addr) + break; + } + if (ia) { + /* tell ip_input "dont filter" */ + ip_fw_fwd_addr = dst; + if (m->m_pkthdr.rcvif == NULL) + m->m_pkthdr.rcvif = ifunit("lo0"); + ip->ip_len = htons((u_short)ip->ip_len); + ip->ip_off = htons((u_short)ip->ip_off); + ip->ip_sum = 0; + + ip->ip_sum = in_cksum(m, hlen); + + ip_input(m); + goto done; + } + /* Some of the logic for this was + * nicked from above. + * + * This rewrites the cached route in a local PCB. + * Is this what we want to do? + */ + bcopy(dst, &ro_fwd->ro_dst, sizeof(*dst)); + + ro_fwd->ro_rt = 0; + rtalloc_ign(ro_fwd, RTF_PRCLONING); + + if (ro_fwd->ro_rt == 0) { + ipstat.ips_noroute++; + error = EHOSTUNREACH; + goto bad; + } + + ia = ifatoia(ro_fwd->ro_rt->rt_ifa); + ifp = ro_fwd->ro_rt->rt_ifp; + dl_tag = ro->ro_rt->rt_dlt; + ro_fwd->ro_rt->rt_use++; + if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY) + dst = (struct sockaddr_in *)ro_fwd->ro_rt->rt_gateway; + if (ro_fwd->ro_rt->rt_flags & RTF_HOST) + isbroadcast = + (ro_fwd->ro_rt->rt_flags & RTF_BROADCAST); + else + isbroadcast = in_broadcast(dst->sin_addr, ifp); + RTFREE(ro->ro_rt); + ro->ro_rt = ro_fwd->ro_rt; + dst = (struct sockaddr_in *)&ro_fwd->ro_dst; + + /* + * If we added a default src ip earlier, + * which would have been gotten from the-then + * interface, do it again, from the new one. + */ + if (fwd_rewrite_src) + ip->ip_src = IA_SIN(ia)->sin_addr; + goto pass ; + } +#endif /* IPFIREWALL_FORWARD */ + /* + * if we get here, none of the above matches, and + * we have to drop the pkt + */ + m_freem(m); + error = EACCES; /* not sure this is the right error msg */ + goto done; + } +#endif /* COMPAT_IPFW */ + +pass: + +#if defined(PM) + /* + * Processing IP filter/NAT. + * Return TRUE iff this packet is discarded. + * Return FALSE iff this packet is accepted. + */ + + if (doNatFil && pm_out(ro->ro_rt->rt_ifp, ip, m)) + goto done; +#endif + +#if IPSEC + /* get SP for this packet */ + if (so == NULL) + sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, flags, &error); + else + sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND, so, &error); + + if (sp == NULL) { + ipsecstat.out_inval++; + goto bad; + } + + error = 0; + + /* check policy */ + switch (sp->policy) { + case IPSEC_POLICY_DISCARD: + /* + * This packet is just discarded. + */ + ipsecstat.out_polvio++; + goto bad; + + case IPSEC_POLICY_BYPASS: + case IPSEC_POLICY_NONE: + /* no need to do IPsec. */ + goto skip_ipsec; + + case IPSEC_POLICY_IPSEC: + if (sp->req == NULL) { + /* XXX should be panic ? */ + printf("ip_output: No IPsec request specified.\n"); + error = EINVAL; + goto bad; + } + break; + + case IPSEC_POLICY_ENTRUST: + default: + printf("ip_output: Invalid policy found. %d\n", sp->policy); + } + + ip->ip_len = htons((u_short)ip->ip_len); + ip->ip_off = htons((u_short)ip->ip_off); + ip->ip_sum = 0; + + { + struct ipsec_output_state state; + bzero(&state, sizeof(state)); + state.m = m; + if (flags & IP_ROUTETOIF) { + state.ro = &iproute; + bzero(&iproute, sizeof(iproute)); + } else + state.ro = ro; + state.dst = (struct sockaddr *)dst; + + error = ipsec4_output(&state, sp, flags); + + m = state.m; + if (flags & IP_ROUTETOIF) { + /* + * if we have tunnel mode SA, we may need to ignore + * IP_ROUTETOIF. + */ + if (state.ro != &iproute || state.ro->ro_rt != NULL) { + flags &= ~IP_ROUTETOIF; + ro = state.ro; + } + } else + ro = state.ro; + dst = (struct sockaddr_in *)state.dst; + if (error) { + /* mbuf is already reclaimed in ipsec4_output. */ + m0 = NULL; + switch (error) { + case EHOSTUNREACH: + case ENETUNREACH: + case EMSGSIZE: + case ENOBUFS: + case ENOMEM: + break; + default: + printf("ip4_output (ipsec): error code %d\n", error); + /*fall through*/ + case ENOENT: + /* don't show these error codes to the user */ + error = 0; + break; + } + goto bad; + } + } + + /* be sure to update variables that are affected by ipsec4_output() */ + ip = mtod(m, struct ip *); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + if (ro->ro_rt == NULL) { + if ((flags & IP_ROUTETOIF) == 0) { + printf("ip_output: " + "can't update route after IPsec processing\n"); + error = EHOSTUNREACH; /*XXX*/ + goto bad; + } + } else { + /* nobody uses ia beyond here */ + ifp = ro->ro_rt->rt_ifp; + } + + /* make it flipped, again. */ + ip->ip_len = ntohs((u_short)ip->ip_len); + ip->ip_off = ntohs((u_short)ip->ip_off); +skip_ipsec: +#endif /*IPSEC*/ + + /* + * If small enough for interface, can just send directly. + */ + if ((u_short)ip->ip_len <= ifp->if_mtu) { + ip->ip_len = htons((u_short)ip->ip_len); + ip->ip_off = htons((u_short)ip->ip_off); + ip->ip_sum = 0; + ip->ip_sum = in_cksum(m, hlen); + error = dlil_output(dl_tag, m, (void *) ro->ro_rt, + (struct sockaddr *)dst, 0); + goto done; + } + /* + * Too large for interface; fragment if possible. + * Must be able to put at least 8 bytes per fragment. + */ + if (ip->ip_off & IP_DF) { + error = EMSGSIZE; + /* + * This case can happen if the user changed the MTU + * of an interface after enabling IP on it. Because + * most netifs don't keep track of routes pointing to + * them, there is no way for one to update all its + * routes when the MTU is changed. + */ + if ((ro->ro_rt->rt_flags & (RTF_UP | RTF_HOST)) + && !(ro->ro_rt->rt_rmx.rmx_locks & RTV_MTU) + && (ro->ro_rt->rt_rmx.rmx_mtu > ifp->if_mtu)) { + ro->ro_rt->rt_rmx.rmx_mtu = ifp->if_mtu; + } + ipstat.ips_cantfrag++; + goto bad; + } + len = (ifp->if_mtu - hlen) &~ 7; + if (len < 8) { + error = EMSGSIZE; + goto bad; + } + + { + int mhlen, firstlen = len; + struct mbuf **mnext = &m->m_nextpkt; + + /* + * Loop through length of segment after first fragment, + * make new header and copy data of each part and link onto chain. + */ + m0 = m; + mhlen = sizeof (struct ip); + for (off = hlen + len; off < (u_short)ip->ip_len; off += len) { + MGETHDR(m, M_DONTWAIT, MT_HEADER); + if (m == 0) { + error = ENOBUFS; + ipstat.ips_odropped++; + goto sendorfree; + } + m->m_flags |= (m0->m_flags & M_MCAST); + m->m_data += max_linkhdr; + mhip = mtod(m, struct ip *); + *mhip = *ip; + if (hlen > sizeof (struct ip)) { + mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); + mhip->ip_vhl = IP_MAKE_VHL(IPVERSION, mhlen >> 2); + } + m->m_len = mhlen; + mhip->ip_off = ((off - hlen) >> 3) + (ip->ip_off & ~IP_MF); + if (ip->ip_off & IP_MF) + mhip->ip_off |= IP_MF; + if (off + len >= (u_short)ip->ip_len) + len = (u_short)ip->ip_len - off; + else + mhip->ip_off |= IP_MF; + mhip->ip_len = htons((u_short)(len + mhlen)); + m->m_next = m_copy(m0, off, len); + if (m->m_next == 0) { + (void) m_free(m); + error = ENOBUFS; /* ??? */ + ipstat.ips_odropped++; + goto sendorfree; + } + m->m_pkthdr.len = mhlen + len; + m->m_pkthdr.rcvif = (struct ifnet *)0; + mhip->ip_off = htons((u_short)mhip->ip_off); + mhip->ip_sum = 0; + mhip->ip_sum = in_cksum(m, mhlen); + *mnext = m; + mnext = &m->m_nextpkt; + ipstat.ips_ofragments++; + } + /* + * Update first fragment by trimming what's been copied out + * and updating header, then send each fragment (in order). + */ + m = m0; + m_adj(m, hlen + firstlen - (u_short)ip->ip_len); + m->m_pkthdr.len = hlen + firstlen; + ip->ip_len = htons((u_short)m->m_pkthdr.len); + ip->ip_off = htons((u_short)(ip->ip_off | IP_MF)); + ip->ip_sum = 0; + ip->ip_sum = in_cksum(m, hlen); + +sendorfree: + + KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr, + ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); + + for (m = m0; m; m = m0) { + m0 = m->m_nextpkt; + m->m_nextpkt = 0; + if (error == 0) + error = dlil_output(dl_tag, m, (void *) ro->ro_rt, + (struct sockaddr *)dst, 0); + else + m_freem(m); + } + + if (error == 0) + ipstat.ips_fragmented++; + } +done: +#if IPSEC + if (ro == &iproute && ro->ro_rt) { + RTFREE(ro->ro_rt); + ro->ro_rt = NULL; + } + if (sp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ip_output call free SP:%x\n", sp)); + key_freesp(sp); + } +#endif /* IPSEC */ + + KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_END, error,0,0,0,0); + return (error); +bad: + m_freem(m0); + goto done; +} + +/* + * Insert IP options into preformed packet. + * Adjust IP destination as required for IP source routing, + * as indicated by a non-zero in_addr at the start of the options. + * + * XXX This routine assumes that the packet has no options in place. + */ +static struct mbuf * +ip_insertoptions(m, opt, phlen) + register struct mbuf *m; + struct mbuf *opt; + int *phlen; +{ + register struct ipoption *p = mtod(opt, struct ipoption *); + struct mbuf *n; + register struct ip *ip = mtod(m, struct ip *); + unsigned optlen; + + optlen = opt->m_len - sizeof(p->ipopt_dst); + if (optlen + (u_short)ip->ip_len > IP_MAXPACKET) + return (m); /* XXX should fail */ + if (p->ipopt_dst.s_addr) + ip->ip_dst = p->ipopt_dst; + if (m->m_flags & M_EXT || m->m_data - optlen < m->m_pktdat) { + MGETHDR(n, M_DONTWAIT, MT_HEADER); + if (n == 0) + return (m); + n->m_pkthdr.len = m->m_pkthdr.len + optlen; + m->m_len -= sizeof(struct ip); + m->m_data += sizeof(struct ip); + n->m_next = m; + m = n; + m->m_len = optlen + sizeof(struct ip); + m->m_data += max_linkhdr; + (void)memcpy(mtod(m, void *), ip, sizeof(struct ip)); + } else { + m->m_data -= optlen; + m->m_len += optlen; + m->m_pkthdr.len += optlen; + ovbcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); + } + ip = mtod(m, struct ip *); + bcopy(p->ipopt_list, ip + 1, optlen); + *phlen = sizeof(struct ip) + optlen; + ip->ip_vhl = IP_MAKE_VHL(IPVERSION, *phlen >> 2); + ip->ip_len += optlen; + return (m); +} + +/* + * Copy options from ip to jp, + * omitting those not copied during fragmentation. + */ +#if !IPFILTER && !IPFILTER_LKM +static +#endif +int +ip_optcopy(ip, jp) + struct ip *ip, *jp; +{ + register u_char *cp, *dp; + int opt, optlen, cnt; + + cp = (u_char *)(ip + 1); + dp = (u_char *)(jp + 1); + cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip); + for (; cnt > 0; cnt -= optlen, cp += optlen) { + opt = cp[0]; + if (opt == IPOPT_EOL) + break; + if (opt == IPOPT_NOP) { + /* Preserve for IP mcast tunnel's LSRR alignment. */ + *dp++ = IPOPT_NOP; + optlen = 1; + continue; + } else + optlen = cp[IPOPT_OLEN]; + /* bogus lengths should have been caught by ip_dooptions */ + if (optlen > cnt) + optlen = cnt; + if (IPOPT_COPIED(opt)) { + bcopy(cp, dp, optlen); + dp += optlen; + } + } + for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++) + *dp++ = IPOPT_EOL; + return (optlen); +} + +/* + * IP socket option processing. + */ +int +ip_ctloutput(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + struct inpcb *inp = sotoinpcb(so); + int error, optval; + + error = optval = 0; + if (sopt->sopt_level != IPPROTO_IP) { + return (EINVAL); + } + + switch (sopt->sopt_dir) { + case SOPT_SET: + switch (sopt->sopt_name) { + case IP_OPTIONS: +#ifdef notyet + case IP_RETOPTS: +#endif + { + struct mbuf *m; + if (sopt->sopt_valsize > MLEN) { + error = EMSGSIZE; + break; + } + MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_HEADER); + if (m == 0) { + error = ENOBUFS; + break; + } + m->m_len = sopt->sopt_valsize; + error = sooptcopyin(sopt, mtod(m, char *), m->m_len, + m->m_len); + if (error) + break; + + return (ip_pcbopts(sopt->sopt_name, &inp->inp_options, + m)); + } + + case IP_TOS: + case IP_TTL: + case IP_RECVOPTS: + case IP_RECVRETOPTS: + case IP_RECVDSTADDR: + case IP_RECVIF: + case IP_FAITH: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + + switch (sopt->sopt_name) { + case IP_TOS: + inp->inp_ip_tos = optval; + break; + + case IP_TTL: + inp->inp_ip_ttl = optval; + break; +#define OPTSET(bit) \ + if (optval) \ + inp->inp_flags |= bit; \ + else \ + inp->inp_flags &= ~bit; + + case IP_RECVOPTS: + OPTSET(INP_RECVOPTS); + break; + + case IP_RECVRETOPTS: + OPTSET(INP_RECVRETOPTS); + break; + + case IP_RECVDSTADDR: + OPTSET(INP_RECVDSTADDR); + break; + + case IP_RECVIF: + OPTSET(INP_RECVIF); + break; + + case IP_FAITH: + OPTSET(INP_FAITH); + break; + } + break; +#undef OPTSET + + case IP_MULTICAST_IF: + case IP_MULTICAST_VIF: + case IP_MULTICAST_TTL: + case IP_MULTICAST_LOOP: + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: + error = ip_setmoptions(sopt, &inp->inp_moptions); + break; + + case IP_PORTRANGE: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + + switch (optval) { + case IP_PORTRANGE_DEFAULT: + inp->inp_flags &= ~(INP_LOWPORT); + inp->inp_flags &= ~(INP_HIGHPORT); + break; + + case IP_PORTRANGE_HIGH: + inp->inp_flags &= ~(INP_LOWPORT); + inp->inp_flags |= INP_HIGHPORT; + break; + + case IP_PORTRANGE_LOW: + inp->inp_flags &= ~(INP_HIGHPORT); + inp->inp_flags |= INP_LOWPORT; + break; + + default: + error = EINVAL; + break; + } + break; + +#if IPSEC + case IP_IPSEC_POLICY: + { + caddr_t req = NULL; + size_t len = 0; + int priv; + struct mbuf *m; + int optname; + + if (error = sooptgetm(sopt, &m)) /* XXX */ + break; + if (error = sooptmcopyin(sopt, m)) /* XXX */ + break; + priv = (sopt->sopt_p != NULL && + suser(sopt->sopt_p->p_ucred, + &sopt->sopt_p->p_acflag) != 0) ? 0 : 1; + if (m) { + req = mtod(m, caddr_t); + len = m->m_len; + } + optname = sopt->sopt_name; + error = ipsec4_set_policy(inp, optname, req, len, priv); + m_freem(m); + break; + } +#endif /*IPSEC*/ + + default: + error = ENOPROTOOPT; + break; + } + break; + + case SOPT_GET: + switch (sopt->sopt_name) { + case IP_OPTIONS: + case IP_RETOPTS: + if (inp->inp_options) + error = sooptcopyout(sopt, + mtod(inp->inp_options, + char *), + inp->inp_options->m_len); + else + sopt->sopt_valsize = 0; + break; + + case IP_TOS: + case IP_TTL: + case IP_RECVOPTS: + case IP_RECVRETOPTS: + case IP_RECVDSTADDR: + case IP_RECVIF: + case IP_PORTRANGE: + case IP_FAITH: + switch (sopt->sopt_name) { + + case IP_TOS: + optval = inp->inp_ip_tos; + break; + + case IP_TTL: + optval = inp->inp_ip_ttl; + break; + +#define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0) + + case IP_RECVOPTS: + optval = OPTBIT(INP_RECVOPTS); + break; + + case IP_RECVRETOPTS: + optval = OPTBIT(INP_RECVRETOPTS); + break; + + case IP_RECVDSTADDR: + optval = OPTBIT(INP_RECVDSTADDR); + break; + + case IP_RECVIF: + optval = OPTBIT(INP_RECVIF); + break; + + case IP_PORTRANGE: + if (inp->inp_flags & INP_HIGHPORT) + optval = IP_PORTRANGE_HIGH; + else if (inp->inp_flags & INP_LOWPORT) + optval = IP_PORTRANGE_LOW; + else + optval = 0; + break; + + case IP_FAITH: + optval = OPTBIT(INP_FAITH); + break; + } + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + + case IP_MULTICAST_IF: + case IP_MULTICAST_VIF: + case IP_MULTICAST_TTL: + case IP_MULTICAST_LOOP: + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: + error = ip_getmoptions(sopt, inp->inp_moptions); + break; + +#if IPSEC + case IP_IPSEC_POLICY: + { + struct mbuf *m = NULL; + size_t len = 0; + caddr_t req = NULL; + + if (error = sooptgetm(sopt, &m)) /* XXX */ + break; + if (error = sooptmcopyin(sopt, m)) /* XXX */ + break; + if (m) { + req = mtod(m, caddr_t); + len = m->m_len; + } + + error = ipsec4_get_policy(sotoinpcb(so), req, len, &m); + if (error == 0) + error = sooptmcopyout(sopt, m); /* XXX */ + + /* if error, m_freem called at soopt_mcopyout(). */ + if (error == 0) + m_freem(m); + break; + } +#endif /*IPSEC*/ + + default: + error = ENOPROTOOPT; + break; + } + break; + } + return (error); +} + +/* + * Set up IP options in pcb for insertion in output packets. + * Store in mbuf with pointer in pcbopt, adding pseudo-option + * with destination address if source routed. + */ +static int +ip_pcbopts(optname, pcbopt, m) + int optname; + struct mbuf **pcbopt; + register struct mbuf *m; +{ + register int cnt, optlen; + register u_char *cp; + u_char opt; + + /* turn off any old options */ + if (*pcbopt) + (void)m_free(*pcbopt); + *pcbopt = 0; + if (m == (struct mbuf *)0 || m->m_len == 0) { + /* + * Only turning off any previous options. + */ + if (m) + (void)m_free(m); + return (0); + } + +#ifndef vax + if (m->m_len % sizeof(int32_t)) + goto bad; +#endif + /* + * IP first-hop destination address will be stored before + * actual options; move other options back + * and clear it when none present. + */ + if (m->m_data + m->m_len + sizeof(struct in_addr) >= &m->m_dat[MLEN]) + goto bad; + cnt = m->m_len; + m->m_len += sizeof(struct in_addr); + cp = mtod(m, u_char *) + sizeof(struct in_addr); + ovbcopy(mtod(m, caddr_t), (caddr_t)cp, (unsigned)cnt); + bzero(mtod(m, caddr_t), sizeof(struct in_addr)); + + for (; cnt > 0; cnt -= optlen, cp += optlen) { + opt = cp[IPOPT_OPTVAL]; + if (opt == IPOPT_EOL) + break; + if (opt == IPOPT_NOP) + optlen = 1; + else { + if (cnt < IPOPT_OLEN + sizeof(*cp)) + goto bad; + optlen = cp[IPOPT_OLEN]; + if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) + goto bad; + } + switch (opt) { + + default: + break; + + case IPOPT_LSRR: + case IPOPT_SSRR: + /* + * user process specifies route as: + * ->A->B->C->D + * D must be our final destination (but we can't + * check that since we may not have connected yet). + * A is first hop destination, which doesn't appear in + * actual IP option, but is stored before the options. + */ + if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr)) + goto bad; + m->m_len -= sizeof(struct in_addr); + cnt -= sizeof(struct in_addr); + optlen -= sizeof(struct in_addr); + cp[IPOPT_OLEN] = optlen; + /* + * Move first hop before start of options. + */ + bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t), + sizeof(struct in_addr)); + /* + * Then copy rest of options back + * to close up the deleted entry. + */ + ovbcopy((caddr_t)(&cp[IPOPT_OFFSET+1] + + sizeof(struct in_addr)), + (caddr_t)&cp[IPOPT_OFFSET+1], + (unsigned)cnt + sizeof(struct in_addr)); + break; + } + } + if (m->m_len > MAX_IPOPTLEN + sizeof(struct in_addr)) + goto bad; + *pcbopt = m; + return (0); + +bad: + (void)m_free(m); + return (EINVAL); +} + +/* + * XXX + * The whole multicast option thing needs to be re-thought. + * Several of these options are equally applicable to non-multicast + * transmission, and one (IP_MULTICAST_TTL) totally duplicates a + * standard option (IP_TTL). + */ +/* + * Set the IP multicast options in response to user setsockopt(). + */ +static int +ip_setmoptions(sopt, imop) + struct sockopt *sopt; + struct ip_moptions **imop; +{ + int error = 0; + int i; + struct in_addr addr; + struct ip_mreq mreq; + struct ifnet *ifp; + struct ip_moptions *imo = *imop; + struct route ro; + struct sockaddr_in *dst; + int s; + + if (imo == NULL) { + /* + * No multicast option buffer attached to the pcb; + * allocate one and initialize to default values. + */ + imo = (struct ip_moptions*) _MALLOC(sizeof(*imo), M_IPMOPTS, + M_WAITOK); + + if (imo == NULL) + return (ENOBUFS); + *imop = imo; + imo->imo_multicast_ifp = NULL; + imo->imo_multicast_vif = -1; + imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; + imo->imo_multicast_loop = IP_DEFAULT_MULTICAST_LOOP; + imo->imo_num_memberships = 0; + } + + switch (sopt->sopt_name) { + /* store an index number for the vif you wanna use in the send */ + case IP_MULTICAST_VIF: + if (legal_vif_num == 0) { + error = EOPNOTSUPP; + break; + } + error = sooptcopyin(sopt, &i, sizeof i, sizeof i); + if (error) + break; + if (!legal_vif_num(i) && (i != -1)) { + error = EINVAL; + break; + } + imo->imo_multicast_vif = i; + break; + + case IP_MULTICAST_IF: + /* + * Select the interface for outgoing multicast packets. + */ + error = sooptcopyin(sopt, &addr, sizeof addr, sizeof addr); + if (error) + break; + /* + * INADDR_ANY is used to remove a previous selection. + * When no interface is selected, a default one is + * chosen every time a multicast packet is sent. + */ + if (addr.s_addr == INADDR_ANY) { + imo->imo_multicast_ifp = NULL; + break; + } + /* + * The selected interface is identified by its local + * IP address. Find the interface and confirm that + * it supports multicasting. + */ + s = splimp(); + INADDR_TO_IFP(addr, ifp); + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + splx(s); + error = EADDRNOTAVAIL; + break; + } + imo->imo_multicast_ifp = ifp; + splx(s); + break; + + case IP_MULTICAST_TTL: + /* + * Set the IP time-to-live for outgoing multicast packets. + * The original multicast API required a char argument, + * which is inconsistent with the rest of the socket API. + * We allow either a char or an int. + */ + if (sopt->sopt_valsize == 1) { + u_char ttl; + error = sooptcopyin(sopt, &ttl, 1, 1); + if (error) + break; + imo->imo_multicast_ttl = ttl; + } else { + u_int ttl; + error = sooptcopyin(sopt, &ttl, sizeof ttl, + sizeof ttl); + if (error) + break; + if (ttl > 255) + error = EINVAL; + else + imo->imo_multicast_ttl = ttl; + } + break; + + case IP_MULTICAST_LOOP: + /* + * Set the loopback flag for outgoing multicast packets. + * Must be zero or one. The original multicast API required a + * char argument, which is inconsistent with the rest + * of the socket API. We allow either a char or an int. + */ + if (sopt->sopt_valsize == 1) { + u_char loop; + error = sooptcopyin(sopt, &loop, 1, 1); + if (error) + break; + imo->imo_multicast_loop = !!loop; + } else { + u_int loop; + error = sooptcopyin(sopt, &loop, sizeof loop, + sizeof loop); + if (error) + break; + imo->imo_multicast_loop = !!loop; + } + break; + + case IP_ADD_MEMBERSHIP: + /* + * Add a multicast group membership. + * Group must be a valid IP multicast address. + */ + error = sooptcopyin(sopt, &mreq, sizeof mreq, sizeof mreq); + if (error) + break; + + if (!IN_MULTICAST(ntohl(mreq.imr_multiaddr.s_addr))) { + error = EINVAL; + break; + } + s = splimp(); + /* + * If no interface address was provided, use the interface of + * the route to the given multicast address. + */ + if (mreq.imr_interface.s_addr == INADDR_ANY) { + bzero((caddr_t)&ro, sizeof(ro)); + dst = (struct sockaddr_in *)&ro.ro_dst; + dst->sin_len = sizeof(*dst); + dst->sin_family = AF_INET; + dst->sin_addr = mreq.imr_multiaddr; + rtalloc(&ro); + if (ro.ro_rt == NULL) { + error = EADDRNOTAVAIL; + splx(s); + break; + } + ifp = ro.ro_rt->rt_ifp; + rtfree(ro.ro_rt); + } + else { + INADDR_TO_IFP(mreq.imr_interface, ifp); + } + + /* + * See if we found an interface, and confirm that it + * supports multicast. + */ + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + error = EADDRNOTAVAIL; + splx(s); + break; + } + /* + * See if the membership already exists or if all the + * membership slots are full. + */ + for (i = 0; i < imo->imo_num_memberships; ++i) { + if (imo->imo_membership[i]->inm_ifp == ifp && + imo->imo_membership[i]->inm_addr.s_addr + == mreq.imr_multiaddr.s_addr) + break; + } + if (i < imo->imo_num_memberships) { + error = EADDRINUSE; + splx(s); + break; + } + if (i == IP_MAX_MEMBERSHIPS) { + error = ETOOMANYREFS; + splx(s); + break; + } + /* + * Everything looks good; add a new record to the multicast + * address list for the given interface. + */ + if ((imo->imo_membership[i] = + in_addmulti(&mreq.imr_multiaddr, ifp)) == NULL) { + error = ENOBUFS; + splx(s); + break; + } + ++imo->imo_num_memberships; + splx(s); + break; + + case IP_DROP_MEMBERSHIP: + /* + * Drop a multicast group membership. + * Group must be a valid IP multicast address. + */ + error = sooptcopyin(sopt, &mreq, sizeof mreq, sizeof mreq); + if (error) + break; + + if (!IN_MULTICAST(ntohl(mreq.imr_multiaddr.s_addr))) { + error = EINVAL; + break; + } + + s = splimp(); + /* + * If an interface address was specified, get a pointer + * to its ifnet structure. + */ + if (mreq.imr_interface.s_addr == INADDR_ANY) + ifp = NULL; + else { + INADDR_TO_IFP(mreq.imr_interface, ifp); + if (ifp == NULL) { + error = EADDRNOTAVAIL; + splx(s); + break; + } + } + /* + * Find the membership in the membership array. + */ + for (i = 0; i < imo->imo_num_memberships; ++i) { + if ((ifp == NULL || + imo->imo_membership[i]->inm_ifp == ifp) && + imo->imo_membership[i]->inm_addr.s_addr == + mreq.imr_multiaddr.s_addr) + break; + } + if (i == imo->imo_num_memberships) { + error = EADDRNOTAVAIL; + splx(s); + break; + } + /* + * Give up the multicast address record to which the + * membership points. + */ + in_delmulti(imo->imo_membership[i]); + /* + * Remove the gap in the membership array. + */ + for (++i; i < imo->imo_num_memberships; ++i) + imo->imo_membership[i-1] = imo->imo_membership[i]; + --imo->imo_num_memberships; + splx(s); + break; + + default: + error = EOPNOTSUPP; + break; + } + + /* + * If all options have default values, no need to keep the mbuf. + */ + if (imo->imo_multicast_ifp == NULL && + imo->imo_multicast_vif == -1 && + imo->imo_multicast_ttl == IP_DEFAULT_MULTICAST_TTL && + imo->imo_multicast_loop == IP_DEFAULT_MULTICAST_LOOP && + imo->imo_num_memberships == 0) { + FREE(*imop, M_IPMOPTS); + *imop = NULL; + } + + return (error); +} + +/* + * Return the IP multicast options in response to user getsockopt(). + */ +static int +ip_getmoptions(sopt, imo) + struct sockopt *sopt; + register struct ip_moptions *imo; +{ + struct in_addr addr; + struct in_ifaddr *ia; + int error, optval; + u_char coptval; + + error = 0; + switch (sopt->sopt_name) { + case IP_MULTICAST_VIF: + if (imo != NULL) + optval = imo->imo_multicast_vif; + else + optval = -1; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + + case IP_MULTICAST_IF: + if (imo == NULL || imo->imo_multicast_ifp == NULL) + addr.s_addr = INADDR_ANY; + else { + IFP_TO_IA(imo->imo_multicast_ifp, ia); + addr.s_addr = (ia == NULL) ? INADDR_ANY + : IA_SIN(ia)->sin_addr.s_addr; + } + error = sooptcopyout(sopt, &addr, sizeof addr); + break; + + case IP_MULTICAST_TTL: + if (imo == 0) + optval = coptval = IP_DEFAULT_MULTICAST_TTL; + else + optval = coptval = imo->imo_multicast_ttl; + if (sopt->sopt_valsize == 1) + error = sooptcopyout(sopt, &coptval, 1); + else + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + + case IP_MULTICAST_LOOP: + if (imo == 0) + optval = coptval = IP_DEFAULT_MULTICAST_LOOP; + else + optval = coptval = imo->imo_multicast_loop; + if (sopt->sopt_valsize == 1) + error = sooptcopyout(sopt, &coptval, 1); + else + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + + default: + error = ENOPROTOOPT; + break; + } + return (error); +} + +/* + * Discard the IP multicast options. + */ +void +ip_freemoptions(imo) + register struct ip_moptions *imo; +{ + register int i; + + if (imo != NULL) { + for (i = 0; i < imo->imo_num_memberships; ++i) + in_delmulti(imo->imo_membership[i]); + FREE(imo, M_IPMOPTS); + } +} + +/* + * Routine called from ip_output() to loop back a copy of an IP multicast + * packet to the input queue of a specified interface. Note that this + * calls the output routine of the loopback "driver", but with an interface + * pointer that might NOT be a loopback interface -- evil, but easier than + * replicating that code here. + */ +static void +ip_mloopback(ifp, m, dst, hlen) + struct ifnet *ifp; + register struct mbuf *m; + register struct sockaddr_in *dst; + int hlen; +{ + register struct ip *ip; + struct mbuf *copym; + + copym = m_copy(m, 0, M_COPYALL); + if (copym != NULL && (copym->m_flags & M_EXT || copym->m_len < hlen)) + copym = m_pullup(copym, hlen); + if (copym != NULL) { + /* + * We don't bother to fragment if the IP length is greater + * than the interface's MTU. Can this possibly matter? + */ + ip = mtod(copym, struct ip *); + ip->ip_len = htons((u_short)ip->ip_len); + ip->ip_off = htons((u_short)ip->ip_off); + ip->ip_sum = 0; + ip->ip_sum = in_cksum(copym, hlen); + + /* + * NB: + * It's not clear whether there are any lingering + * reentrancy problems in other areas which might + * be exposed by using ip_input directly (in + * particular, everything which modifies the packet + * in-place). Yet another option is using the + * protosw directly to deliver the looped back + * packet. For the moment, we'll err on the side + * of safety by using if_simloop(). + */ +#if 1 /* XXX */ + if (dst->sin_family != AF_INET) { + printf("ip_mloopback: bad address family %d\n", + dst->sin_family); + dst->sin_family = AF_INET; + } +#endif + + /* + * TedW: + * We need to send all loopback traffic down to dlil in case + * a filter has tapped-in. + */ + + if (lo_dl_tag == 0) + dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET, &lo_dl_tag); + + /* + * Stuff the 'real' ifp into the pkthdr, to be used in matching + * in ip_input(); we need the loopback ifp/dl_tag passed as args + * to make the loopback driver compliant with the data link + * requirements. + */ + if (lo_dl_tag) + { copym->m_pkthdr.rcvif = ifp; + dlil_output(lo_dl_tag, copym, 0, (struct sockaddr *) dst, 0); + } else { + printf("Warning: ip_output call to dlil_find_dltag failed!\n"); + m_freem(copym); + } + +/* if_simloop(ifp, copym, (struct sockaddr *)dst, 0);*/ + } +} diff --git a/bsd/netinet/ip_proxy.c b/bsd/netinet/ip_proxy.c new file mode 100644 index 000000000..8d645eb40 --- /dev/null +++ b/bsd/netinet/ip_proxy.c @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + */ +#if !defined(lint) +#endif + +#if defined(__FreeBSD__) && defined(KERNEL) && !defined(_KERNEL) +# define _KERNEL +#endif + +#if !defined(_KERNEL) && !defined(KERNEL) +# include +# include +# include +#endif +#include +#include +#include +#include +#include +#if !defined(__FreeBSD__) +# include +#endif +#include +#include +#ifndef linux +# include +#endif +#include +#if defined(_KERNEL) +# if !defined(linux) +# include +# else +# include +# endif +#endif +#if !defined(__SVR4) && !defined(__svr4__) +# ifndef linux +# include +# endif +#else +# include +# include +# include +# include +#endif +#if __FreeBSD__ > 2 +# include +# include +#endif +#include +#ifdef sun +# include +#endif +#include +#include +#include +#include +#ifndef linux +# include +#endif +#include +#include +#include +#include "netinet/ip_compat.h" +#include +#include "netinet/ip_fil.h" +#include "netinet/ip_proxy.h" +#include "netinet/ip_nat.h" +#include "netinet/ip_state.h" + +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +static ap_session_t *ap_find __P((ip_t *, tcphdr_t *)); +static ap_session_t *ap_new_session __P((aproxy_t *, ip_t *, tcphdr_t *, + fr_info_t *, nat_t *)); + +static int ap_matchsrcdst __P((ap_session_t *aps, struct in_addr src, + struct in_addr dst, void *tcp, u_short sport, + u_short dport)); + +#define AP_SESS_SIZE 53 + +#if defined(_KERNEL) && !defined(linux) +#include "netinet/ip_ftp_pxy.c" +#endif + +ap_session_t *ap_sess_tab[AP_SESS_SIZE]; +aproxy_t ap_proxies[] = { +#if IPF_FTP_PROXY + { "ftp", (char)IPPROTO_TCP, 0, 0, ippr_ftp_init, ippr_ftp_in, ippr_ftp_out }, +#endif + { "", '\0', 0, 0, NULL, NULL } +}; + + +int ap_ok(ip, tcp, nat) +ip_t *ip; +tcphdr_t *tcp; +ipnat_t *nat; +{ + aproxy_t *apr = nat->in_apr; + u_short dport = nat->in_dport; + + if (!apr || (apr && (apr->apr_flags & APR_DELETE)) || + (ip->ip_p != apr->apr_p)) + return 0; + if ((tcp && (tcp->th_dport != dport)) || (!tcp && dport)) + return 0; + return 1; +} + + +static int +ap_matchsrcdst(aps, src, dst, tcp, sport, dport) +ap_session_t *aps; +struct in_addr src, dst; +void *tcp; +u_short sport, dport; +{ + if (aps->aps_dst.s_addr == dst.s_addr) { + if ((aps->aps_src.s_addr == src.s_addr) && + (!tcp || (sport == aps->aps_sport) && + (dport == aps->aps_dport))) + return 1; + } else if (aps->aps_dst.s_addr == src.s_addr) { + if ((aps->aps_src.s_addr == dst.s_addr) && + (!tcp || (sport == aps->aps_dport) && + (dport == aps->aps_sport))) + return 1; + } + return 0; +} + + +static ap_session_t *ap_find(ip, tcp) +ip_t *ip; +tcphdr_t *tcp; +{ + register u_char p = ip->ip_p; + register ap_session_t *aps; + register u_short sp, dp; + register u_long hv; + struct in_addr src, dst; + + src = ip->ip_src, dst = ip->ip_dst; + sp = dp = 0; /* XXX gcc -Wunitialized */ + + hv = ip->ip_src.s_addr ^ ip->ip_dst.s_addr; + hv *= 651733; + if (tcp) { + sp = tcp->th_sport; + dp = tcp->th_dport; + hv ^= (sp + dp); + hv *= 5; + } + hv %= AP_SESS_SIZE; + + for (aps = ap_sess_tab[hv]; aps; aps = aps->aps_next) + if ((aps->aps_p == p) && + ap_matchsrcdst(aps, src, dst, tcp, sp, dp)) + break; + return aps; +} + + +/* + * Allocate a new application proxy structure and fill it in with the + * relevant details. call the init function once complete, prior to + * returning. + */ +static ap_session_t *ap_new_session(apr, ip, tcp, fin, nat) +aproxy_t *apr; +ip_t *ip; +tcphdr_t *tcp; +fr_info_t *fin; +nat_t *nat; +{ + register ap_session_t *aps; + u_short dport; + u_long hv; + + if (!apr || (apr && (apr->apr_flags & APR_DELETE)) || + (ip->ip_p != apr->apr_p)) + return NULL; + dport = nat->nat_ptr->in_dport; + if ((tcp && (tcp->th_dport != dport)) || (!tcp && dport)) + return NULL; + + hv = ip->ip_src.s_addr ^ ip->ip_dst.s_addr; + hv *= 651733; + if (tcp) { + hv ^= (tcp->th_sport + tcp->th_dport); + hv *= 5; + } + hv %= AP_SESS_SIZE; + + KMALLOC(aps, ap_session_t *, sizeof(*aps)); + if (!aps) + return NULL; + bzero((char *)aps, sizeof(*aps)); + aps->aps_apr = apr; + aps->aps_src = ip->ip_src; + aps->aps_dst = ip->ip_dst; + aps->aps_p = ip->ip_p; + aps->aps_tout = 1200; /* XXX */ + if (tcp) { + aps->aps_sport = tcp->th_sport; + aps->aps_dport = tcp->th_dport; + } + aps->aps_data = NULL; + aps->aps_psiz = 0; + aps->aps_next = ap_sess_tab[hv]; + ap_sess_tab[hv] = aps; + (void) (*apr->apr_init)(fin, ip, tcp, aps, nat); + return aps; +} + + +/* + * check to see if a packet should be passed through an active proxy routine + * if one has been setup for it. + */ +int ap_check(ip, tcp, fin, nat) +ip_t *ip; +tcphdr_t *tcp; +fr_info_t *fin; +nat_t *nat; +{ + ap_session_t *aps; + aproxy_t *apr; + int err; + + if (!(fin->fin_fi.fi_fl & FI_TCPUDP)) + tcp = NULL; + + if ((aps = ap_find(ip, tcp)) || + (aps = ap_new_session(nat->nat_ptr->in_apr, ip, tcp, fin, nat))) { + if (ip->ip_p == IPPROTO_TCP) { + /* + * verify that the checksum is correct. If not, then + * don't do anything with this packet. + */ + if (tcp->th_sum != fr_tcpsum(*(mb_t **)fin->fin_mp, + ip, tcp, ip->ip_len)) { + frstats[fin->fin_out].fr_tcpbad++; + return -1; + } + fr_tcp_age(&aps->aps_tout, aps->aps_state, ip, fin, + tcp->th_sport == aps->aps_sport); + } + + apr = aps->aps_apr; + err = 0; + if (fin->fin_out) { + if (apr->apr_outpkt) + err = (*apr->apr_outpkt)(fin, ip, tcp, + aps, nat); + } else { + if (apr->apr_inpkt) + err = (*apr->apr_inpkt)(fin, ip, tcp, + aps, nat); + } + if (err == 2) { + tcp->th_sum = fr_tcpsum(*(mb_t **)fin->fin_mp, ip, + tcp, ip->ip_len); + err = 0; + } + return err; + } + return -1; +} + + +aproxy_t *ap_match(pr, name) +u_char pr; +char *name; +{ + aproxy_t *ap; + + for (ap = ap_proxies; ap->apr_p; ap++) + if ((ap->apr_p == pr) && + !strncmp(name, ap->apr_label, sizeof(ap->apr_label))) { + ap->apr_ref++; + return ap; + } + return NULL; +} + + +void ap_free(ap) +aproxy_t *ap; +{ + ap->apr_ref--; +} + + +void aps_free(aps) +ap_session_t *aps; +{ + if (aps->aps_data && aps->aps_psiz) + KFREES(aps->aps_data, aps->aps_psiz); + KFREE(aps); +} + + +void ap_unload() +{ + ap_session_t *aps; + int i; + + for (i = 0; i < AP_SESS_SIZE; i++) + while ((aps = ap_sess_tab[i])) { + ap_sess_tab[i] = aps->aps_next; + aps_free(aps); + } +} + + +void ap_expire() +{ + ap_session_t *aps, **apsp; + int i; + + for (i = 0; i < AP_SESS_SIZE; i++) + for (apsp = &ap_sess_tab[i]; (aps = *apsp); ) { + aps->aps_tout--; + if (!aps->aps_tout) { + ap_sess_tab[i] = aps->aps_next; + aps_free(aps); + *apsp = aps->aps_next; + } else + apsp = &aps->aps_next; + } +} diff --git a/bsd/netinet/ip_proxy.h b/bsd/netinet/ip_proxy.h new file mode 100644 index 000000000..1447027e0 --- /dev/null +++ b/bsd/netinet/ip_proxy.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + */ + +#ifndef __IP_PROXY_H__ +#define __IP_PROXY_H__ + +#ifndef SOLARIS +#define SOLARIS (defined(sun) && (defined(__svr4__) || defined(__SVR4))) +#endif + +#ifndef APR_LABELLEN +#define APR_LABELLEN 16 +#endif +#define AP_SESS_SIZE 53 + +struct nat; +struct ipnat; + +typedef struct ap_tcp { + u_short apt_sport; /* source port */ + u_short apt_dport; /* destination port */ + short apt_sel; /* seqoff/after set selector */ + short apt_seqoff[2]; /* sequence # difference */ + tcp_seq apt_after[2]; /* don't change seq-off until after this */ + u_char apt_state[2]; /* connection state */ +} ap_tcp_t; + +typedef struct ap_udp { + u_short apu_sport; /* source port */ + u_short apu_dport; /* destination port */ +} ap_udp_t; + +typedef struct ap_session { + struct aproxy *aps_apr; + struct in_addr aps_src; /* source IP# */ + struct in_addr aps_dst; /* destination IP# */ + u_char aps_p; /* protocol */ + union { + struct ap_tcp apu_tcp; + struct ap_udp apu_udp; + } aps_un; + u_int aps_flags; + QUAD_T aps_bytes; /* bytes sent */ + QUAD_T aps_pkts; /* packets sent */ + u_long aps_tout; /* time left before expiring */ + void *aps_data; /* private data */ + int aps_psiz; /* size of private data */ + struct ap_session *aps_next; +} ap_session_t ; + +#define aps_sport aps_un.apu_tcp.apt_sport +#define aps_dport aps_un.apu_tcp.apt_dport +#define aps_sel aps_un.apu_tcp.apt_sel +#define aps_seqoff aps_un.apu_tcp.apt_seqoff +#define aps_after aps_un.apu_tcp.apt_after +#define aps_state aps_un.apu_tcp.apt_state + + +typedef struct aproxy { + char apr_label[APR_LABELLEN]; /* Proxy label # */ + u_char apr_p; /* protocol */ + int apr_ref; /* +1 per rule referencing it */ + int apr_flags; + int (* apr_init) __P((fr_info_t *, ip_t *, tcphdr_t *, + ap_session_t *, struct nat *)); + int (* apr_inpkt) __P((fr_info_t *, ip_t *, tcphdr_t *, + ap_session_t *, struct nat *)); + int (* apr_outpkt) __P((fr_info_t *, ip_t *, tcphdr_t *, + ap_session_t *, struct nat *)); +} aproxy_t; + +#define APR_DELETE 1 + + +extern ap_session_t *ap_sess_tab[AP_SESS_SIZE]; +extern aproxy_t ap_proxies[]; + +extern int ap_ok __P((ip_t *, tcphdr_t *, struct ipnat *)); +extern void ap_unload __P((void)); +extern void ap_free __P((aproxy_t *)); +extern void aps_free __P((ap_session_t *)); +extern int ap_check __P((ip_t *, tcphdr_t *, fr_info_t *, struct nat *)); +extern aproxy_t *ap_match __P((u_char, char *)); +extern void ap_expire __P((void)); + +#endif /* __IP_PROXY_H__ */ diff --git a/bsd/netinet/ip_state.c b/bsd/netinet/ip_state.c new file mode 100644 index 000000000..b920df6c9 --- /dev/null +++ b/bsd/netinet/ip_state.c @@ -0,0 +1,821 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1995-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + */ +#if !defined(lint) +/* static const char sccsid[] = "@(#)ip_state.c 1.8 6/5/96 (C) 1993-1995 Darren Reed"; */ +#endif + +#include "opt_ipfilter.h" +#if defined(KERNEL) && !defined(_KERNEL) +#define _KERNEL +#endif +#define __FreeBSD_version 300000 /* it's a hack, but close enough */ + +#if !defined(_KERNEL) && !defined(KERNEL) && !defined(__KERNEL__) +# include +# include +#else +# ifdef linux +# include +# include +# endif +#endif +#include +#include +#include +#include +#if defined(KERNEL) && (__FreeBSD_version >= 220000) +# include +# include +# include +#else +# include +#endif +#include +#include +#ifndef linux +#include +#endif +#include +#if defined(_KERNEL) && !defined(linux) +# include +#endif +#if !defined(__SVR4) && !defined(__svr4__) +# ifndef linux +# include +# endif +#else +# include +# include +# include +# include +# include +#endif + +#include +#if sun +#include +#endif +#include +#include +#include +#include +#include +#ifndef linux +# include +# include +#endif +#include +#include +#include "netinet/ip_compat.h" +#include +#include "netinet/ip_fil.h" +#include "netinet/ip_nat.h" +#include "netinet/ip_frag.h" +#include "netinet/ip_proxy.h" +#include "netinet/ip_state.h" +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif + +#define TCP_CLOSE (TH_FIN|TH_RST) + +static ipstate_t *ips_table[IPSTATE_SIZE]; +static int ips_num = 0; +static ips_stat_t ips_stats; +#if (SOLARIS || defined(__sgi)) && defined(_KERNEL) +extern kmutex_t ipf_state; +#endif + +static int fr_matchsrcdst __P((ipstate_t *, struct in_addr, struct in_addr, + fr_info_t *, void *, u_short, u_short)); +static int fr_state_flush __P((int)); +static ips_stat_t *fr_statetstats __P((void)); + + +#define FIVE_DAYS (2 * 5 * 86400) /* 5 days: half closed session */ + +u_long fr_tcpidletimeout = FIVE_DAYS, + fr_tcpclosewait = 60, + fr_tcplastack = 20, + fr_tcptimeout = 120, + fr_tcpclosed = 1, + fr_udptimeout = 120, + fr_icmptimeout = 120; + + +static ips_stat_t *fr_statetstats() +{ + ips_stats.iss_active = ips_num; + ips_stats.iss_table = ips_table; + return &ips_stats; +} + + +/* + * flush state tables. two actions currently defined: + * which == 0 : flush all state table entries + * which == 1 : flush TCP connections which have started to close but are + * stuck for some reason. + */ +static int fr_state_flush(which) +int which; +{ + register int i; + register ipstate_t *is, **isp; +#if defined(_KERNEL) && !SOLARIS + int s; +#endif + int delete, removed = 0; + + SPL_NET(s); + MUTEX_ENTER(&ipf_state); + for (i = 0; i < IPSTATE_SIZE; i++) + for (isp = &ips_table[i]; (is = *isp); ) { + delete = 0; + + switch (which) + { + case 0 : + delete = 1; + break; + case 1 : + if ((is->is_p == IPPROTO_TCP) && + (((is->is_state[0] <= TCPS_ESTABLISHED) && + (is->is_state[1] > TCPS_ESTABLISHED)) || + ((is->is_state[1] <= TCPS_ESTABLISHED) && + (is->is_state[0] > TCPS_ESTABLISHED)))) + delete = 1; + break; + } + + if (delete) { + *isp = is->is_next; + if (is->is_p == IPPROTO_TCP) + ips_stats.iss_fin++; + else + ips_stats.iss_expire++; +#if IPFILTER_LOG + ipstate_log(is, ISL_FLUSH); +#endif + KFREE(is); + ips_num--; + removed++; + } else + isp = &is->is_next; + } + MUTEX_EXIT(&ipf_state); + SPL_X(s); + return removed; +} + + +int fr_state_ioctl(data, cmd, mode) +caddr_t data; +#if defined(__NetBSD__) || defined(__OpenBSD__) +u_long cmd; +#else +int cmd; +#endif +int mode; +{ + int arg, ret, error = 0; + + switch (cmd) + { + case SIOCIPFFL : + IRCOPY(data, (caddr_t)&arg, sizeof(arg)); + if (arg == 0 || arg == 1) { + ret = fr_state_flush(arg); + IWCOPY((caddr_t)&ret, data, sizeof(ret)); + } else + error = EINVAL; + break; + case SIOCGIPST : + IWCOPY((caddr_t)fr_statetstats(), data, sizeof(ips_stat_t)); + break; + case FIONREAD : +#if IPFILTER_LOG + IWCOPY((caddr_t)&iplused[IPL_LOGSTATE], (caddr_t)data, + sizeof(iplused[IPL_LOGSTATE])); +#endif + break; + default : + return EINVAL; + } + return error; +} + + +/* + * Create a new ipstate structure and hang it off the hash table. + */ +int fr_addstate(ip, fin, pass) +ip_t *ip; +fr_info_t *fin; +u_int pass; +{ + ipstate_t ips; + register ipstate_t *is = &ips; + register u_int hv; + + if ((ip->ip_off & 0x1fff) || (fin->fin_fi.fi_fl & FI_SHORT)) + return -1; + if (ips_num == IPSTATE_MAX) { + ips_stats.iss_max++; + return -1; + } + ips.is_age = 1; + ips.is_state[0] = 0; + ips.is_state[1] = 0; + /* + * Copy and calculate... + */ + hv = (is->is_p = ip->ip_p); + hv += (is->is_src.s_addr = ip->ip_src.s_addr); + hv += (is->is_dst.s_addr = ip->ip_dst.s_addr); + + switch (ip->ip_p) + { + case IPPROTO_ICMP : + { + struct icmp *ic = (struct icmp *)fin->fin_dp; + + switch (ic->icmp_type) + { + case ICMP_ECHO : + is->is_icmp.ics_type = ICMP_ECHOREPLY; /* XXX */ + hv += (is->is_icmp.ics_id = ic->icmp_id); + hv += (is->is_icmp.ics_seq = ic->icmp_seq); + break; + case ICMP_TSTAMP : + case ICMP_IREQ : + case ICMP_MASKREQ : + is->is_icmp.ics_type = ic->icmp_type + 1; + break; + default : + return -1; + } + ips_stats.iss_icmp++; + is->is_age = fr_icmptimeout; + break; + } + case IPPROTO_TCP : + { + register tcphdr_t *tcp = (tcphdr_t *)fin->fin_dp; + + /* + * The endian of the ports doesn't matter, but the ack and + * sequence numbers do as we do mathematics on them later. + */ + hv += (is->is_dport = tcp->th_dport); + hv += (is->is_sport = tcp->th_sport); + is->is_seq = ntohl(tcp->th_seq); + is->is_ack = ntohl(tcp->th_ack); + is->is_swin = ntohs(tcp->th_win); + is->is_dwin = is->is_swin; /* start them the same */ + ips_stats.iss_tcp++; + /* + * If we're creating state for a starting connection, start the + * timer on it as we'll never see an error if it fails to + * connect. + */ + if ((tcp->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) + is->is_ack = 0; /* Trumpet WinSock 'ism */ + fr_tcp_age(&is->is_age, is->is_state, ip, fin, + tcp->th_sport == is->is_sport); + break; + } + case IPPROTO_UDP : + { + register tcphdr_t *tcp = (tcphdr_t *)fin->fin_dp; + + hv += (is->is_dport = tcp->th_dport); + hv += (is->is_sport = tcp->th_sport); + ips_stats.iss_udp++; + is->is_age = fr_udptimeout; + break; + } + default : + return -1; + } + + KMALLOC(is, ipstate_t *, sizeof(*is)); + if (is == NULL) { + ips_stats.iss_nomem++; + return -1; + } + bcopy((char *)&ips, (char *)is, sizeof(*is)); + hv %= IPSTATE_SIZE; + MUTEX_ENTER(&ipf_state); + + is->is_pass = pass; + is->is_pkts = 1; + is->is_bytes = ip->ip_len; + /* + * Copy these from the rule itself. + */ + is->is_opt = fin->fin_fr->fr_ip.fi_optmsk; + is->is_optmsk = fin->fin_fr->fr_mip.fi_optmsk; + is->is_sec = fin->fin_fr->fr_ip.fi_secmsk; + is->is_secmsk = fin->fin_fr->fr_mip.fi_secmsk; + is->is_auth = fin->fin_fr->fr_ip.fi_auth; + is->is_authmsk = fin->fin_fr->fr_mip.fi_auth; + is->is_flags = fin->fin_fr->fr_ip.fi_fl; + is->is_flags |= fin->fin_fr->fr_mip.fi_fl << 4; + /* + * add into table. + */ + is->is_next = ips_table[hv]; + ips_table[hv] = is; + if (fin->fin_out) { + is->is_ifpin = NULL; + is->is_ifpout = fin->fin_ifp; + } else { + is->is_ifpin = fin->fin_ifp; + is->is_ifpout = NULL; + } + if (pass & FR_LOGFIRST) + is->is_pass &= ~(FR_LOGFIRST|FR_LOG); + ips_num++; +#if IPFILTER_LOG + ipstate_log(is, ISL_NEW); +#endif + MUTEX_EXIT(&ipf_state); + if (fin->fin_fi.fi_fl & FI_FRAG) + ipfr_newfrag(ip, fin, pass ^ FR_KEEPSTATE); + return 0; +} + + +/* + * check to see if a packet with TCP headers fits within the TCP window. + * change timeout depending on whether new packet is a SYN-ACK returning for a + * SYN or a RST or FIN which indicate time to close up shop. + */ +int fr_tcpstate(is, fin, ip, tcp) +register ipstate_t *is; +fr_info_t *fin; +ip_t *ip; +tcphdr_t *tcp; +{ + register int seqskew, ackskew; + register u_short swin, dwin; + register tcp_seq seq, ack; + int source; + + /* + * Find difference between last checked packet and this packet. + */ + seq = ntohl(tcp->th_seq); + ack = ntohl(tcp->th_ack); + source = (ip->ip_src.s_addr == is->is_src.s_addr); + + if (!(tcp->th_flags & TH_ACK)) /* Pretend an ack was sent */ + ack = source ? is->is_ack : is->is_seq; + + if (source) { + if (!is->is_seq) + /* + * Must be an outgoing SYN-ACK in reply to a SYN. + */ + is->is_seq = seq; + seqskew = seq - is->is_seq; + ackskew = ack - is->is_ack; + } else { + if (!is->is_ack) + /* + * Must be a SYN-ACK in reply to a SYN. + */ + is->is_ack = seq; + ackskew = seq - is->is_ack; + seqskew = ack - is->is_seq; + } + + /* + * Make skew values absolute + */ + if (seqskew < 0) + seqskew = -seqskew; + if (ackskew < 0) + ackskew = -ackskew; + + /* + * If the difference in sequence and ack numbers is within the + * window size of the connection, store these values and match + * the packet. + */ + if (source) { + swin = is->is_swin; + dwin = is->is_dwin; + } else { + dwin = is->is_swin; + swin = is->is_dwin; + } + + if ((seqskew <= dwin) && (ackskew <= swin)) { + if (source) { + is->is_seq = seq; + is->is_ack = ack; + is->is_swin = ntohs(tcp->th_win); + } else { + is->is_seq = ack; + is->is_ack = seq; + is->is_dwin = ntohs(tcp->th_win); + } + ips_stats.iss_hits++; + is->is_pkts++; + is->is_bytes += ip->ip_len; + /* + * Nearing end of connection, start timeout. + */ + fr_tcp_age(&is->is_age, is->is_state, ip, fin, source); + return 1; + } + return 0; +} + + +static int fr_matchsrcdst(is, src, dst, fin, tcp, sp, dp) +ipstate_t *is; +struct in_addr src, dst; +fr_info_t *fin; +void *tcp; +u_short sp, dp; +{ + int ret = 0, rev, out; + void *ifp; + + rev = (is->is_dst.s_addr != dst.s_addr); + ifp = fin->fin_ifp; + out = fin->fin_out; + + if (!rev) { + if (out) { + if (!is->is_ifpout) + is->is_ifpout = ifp; + } else { + if (!is->is_ifpin) + is->is_ifpin = ifp; + } + } else { + if (out) { + if (!is->is_ifpin) + is->is_ifpin = ifp; + } else { + if (!is->is_ifpout) + is->is_ifpout = ifp; + } + } + + if (!rev) { + if (((out && is->is_ifpout == ifp) || + (!out && is->is_ifpin == ifp)) && + (is->is_dst.s_addr == dst.s_addr) && + (is->is_src.s_addr == src.s_addr) && + (!tcp || (sp == is->is_sport) && + (dp == is->is_dport))) { + ret = 1; + } + } else { + if (((out && is->is_ifpin == ifp) || + (!out && is->is_ifpout == ifp)) && + (is->is_dst.s_addr == src.s_addr) && + (is->is_src.s_addr == dst.s_addr) && + (!tcp || (sp == is->is_dport) && + (dp == is->is_sport))) { + ret = 1; + } + } + + /* + * Whether or not this should be here, is questionable, but the aim + * is to get this out of the main line. + */ + if (ret) { + if (((fin->fin_fi.fi_optmsk & is->is_optmsk) != is->is_opt) || + ((fin->fin_fi.fi_secmsk & is->is_secmsk) != is->is_sec) || + ((fin->fin_fi.fi_auth & is->is_authmsk) != is->is_auth) || + ((fin->fin_fi.fi_fl & (is->is_flags >> 4)) != + (is->is_flags & 0xf))) + ret = 0; + } + return ret; +} + + +/* + * Check if a packet has a registered state. + */ +int fr_checkstate(ip, fin) +ip_t *ip; +fr_info_t *fin; +{ + register struct in_addr dst, src; + register ipstate_t *is, **isp; + register u_char pr; + struct icmp *ic; + tcphdr_t *tcp; + u_int hv, hlen, pass; + + if ((ip->ip_off & 0x1fff) || (fin->fin_fi.fi_fl & FI_SHORT)) + return 0; + + hlen = fin->fin_hlen; + tcp = (tcphdr_t *)((char *)ip + hlen); + ic = (struct icmp *)tcp; + hv = (pr = ip->ip_p); + hv += (src.s_addr = ip->ip_src.s_addr); + hv += (dst.s_addr = ip->ip_dst.s_addr); + + /* + * Search the hash table for matching packet header info. + */ + switch (ip->ip_p) + { + case IPPROTO_ICMP : + hv += ic->icmp_id; + hv += ic->icmp_seq; + hv %= IPSTATE_SIZE; + MUTEX_ENTER(&ipf_state); + for (isp = &ips_table[hv]; (is = *isp); isp = &is->is_next) + if ((is->is_p == pr) && + (ic->icmp_id == is->is_icmp.ics_id) && + (ic->icmp_seq == is->is_icmp.ics_seq) && + fr_matchsrcdst(is, src, dst, fin, NULL, 0, 0)) { + if (is->is_icmp.ics_type != ic->icmp_type) + continue; + is->is_age = fr_icmptimeout; + is->is_pkts++; + is->is_bytes += ip->ip_len; + ips_stats.iss_hits++; + pass = is->is_pass; + MUTEX_EXIT(&ipf_state); + return pass; + } + MUTEX_EXIT(&ipf_state); + break; + case IPPROTO_TCP : + { + register u_short dport = tcp->th_dport, sport = tcp->th_sport; + + hv += dport; + hv += sport; + hv %= IPSTATE_SIZE; + MUTEX_ENTER(&ipf_state); + for (isp = &ips_table[hv]; (is = *isp); isp = &is->is_next) + if ((is->is_p == pr) && + fr_matchsrcdst(is, src, dst, fin, tcp, + sport, dport)) { + if (fr_tcpstate(is, fin, ip, tcp)) { + pass = is->is_pass; +#ifdef _KERNEL + MUTEX_EXIT(&ipf_state); +#else + + if (tcp->th_flags & TCP_CLOSE) { + *isp = is->is_next; + isp = &ips_table[hv]; + KFREE(is); + } +#endif + return pass; + } + } + MUTEX_EXIT(&ipf_state); + break; + } + case IPPROTO_UDP : + { + register u_short dport = tcp->th_dport, sport = tcp->th_sport; + + hv += dport; + hv += sport; + hv %= IPSTATE_SIZE; + /* + * Nothing else to match on but ports. and IP#'s + */ + MUTEX_ENTER(&ipf_state); + for (is = ips_table[hv]; is; is = is->is_next) + if ((is->is_p == pr) && + fr_matchsrcdst(is, src, dst, fin, + tcp, sport, dport)) { + ips_stats.iss_hits++; + is->is_pkts++; + is->is_bytes += ip->ip_len; + is->is_age = fr_udptimeout; + pass = is->is_pass; + MUTEX_EXIT(&ipf_state); + return pass; + } + MUTEX_EXIT(&ipf_state); + break; + } + default : + break; + } + ips_stats.iss_miss++; + return 0; +} + + +/* + * Free memory in use by all state info. kept. + */ +void fr_stateunload() +{ + register int i; + register ipstate_t *is, **isp; + + MUTEX_ENTER(&ipf_state); + for (i = 0; i < IPSTATE_SIZE; i++) + for (isp = &ips_table[i]; (is = *isp); ) { + *isp = is->is_next; + KFREE(is); + } + MUTEX_EXIT(&ipf_state); +} + + +/* + * Slowly expire held state for thingslike UDP and ICMP. Timeouts are set + * in expectation of this being called twice per second. + */ +void fr_timeoutstate() +{ + register int i; + register ipstate_t *is, **isp; +#if defined(_KERNEL) && !SOLARIS + int s; +#endif + + SPL_NET(s); + MUTEX_ENTER(&ipf_state); + for (i = 0; i < IPSTATE_SIZE; i++) + for (isp = &ips_table[i]; (is = *isp); ) + if (is->is_age && !--is->is_age) { + *isp = is->is_next; + if (is->is_p == IPPROTO_TCP) + ips_stats.iss_fin++; + else + ips_stats.iss_expire++; +#if IPFILTER_LOG + ipstate_log(is, ISL_EXPIRE); +#endif + KFREE(is); + ips_num--; + } else + isp = &is->is_next; + MUTEX_EXIT(&ipf_state); + SPL_X(s); +} + + +/* + * Original idea freom Pradeep Krishnan for use primarily with NAT code. + * (pkrishna@netcom.com) + */ +void fr_tcp_age(age, state, ip, fin, dir) +u_long *age; +u_char *state; +ip_t *ip; +fr_info_t *fin; +int dir; +{ + tcphdr_t *tcp = (tcphdr_t *)fin->fin_dp; + u_char flags = tcp->th_flags; + int dlen, ostate; + + ostate = state[1 - dir]; + + dlen = ip->ip_len - fin->fin_hlen - (tcp->th_off << 2); + + if (flags & TH_RST) { + if (!(tcp->th_flags & TH_PUSH) && !dlen) { + *age = fr_tcpclosed; + state[dir] = TCPS_CLOSED; + } else { + *age = fr_tcpclosewait; + state[dir] = TCPS_CLOSE_WAIT; + } + return; + } + + *age = fr_tcptimeout; /* 1 min */ + + switch(state[dir]) + { + case TCPS_FIN_WAIT_2: + case TCPS_CLOSED: + if ((flags & TH_OPENING) == TH_OPENING) + state[dir] = TCPS_SYN_RECEIVED; + else if (flags & TH_SYN) + state[dir] = TCPS_SYN_SENT; + break; + case TCPS_SYN_RECEIVED: + if ((flags & (TH_FIN|TH_ACK)) == TH_ACK) { + state[dir] = TCPS_ESTABLISHED; + current_active_connections++; + *age = fr_tcpidletimeout; + } + break; + case TCPS_SYN_SENT: + if ((flags & (TH_FIN|TH_ACK)) == TH_ACK) { + state[dir] = TCPS_ESTABLISHED; + current_active_connections++; + *age = fr_tcpidletimeout; + } + break; + case TCPS_ESTABLISHED: + if (flags & TH_FIN) { + state[dir] = TCPS_CLOSE_WAIT; + if (!(flags & TH_PUSH) && !dlen && + ostate > TCPS_ESTABLISHED) + *age = fr_tcplastack; + else + *age = fr_tcpclosewait; + } else + *age = fr_tcpidletimeout; + break; + case TCPS_CLOSE_WAIT: + if ((flags & TH_FIN) && !(flags & TH_PUSH) && !dlen && + ostate > TCPS_ESTABLISHED) { + *age = fr_tcplastack; + state[dir] = TCPS_LAST_ACK; + } else + *age = fr_tcpclosewait; + break; + case TCPS_LAST_ACK: + if (flags & TH_ACK) { + state[dir] = TCPS_FIN_WAIT_2; + if (!(flags & TH_PUSH) && !dlen && + ostate > TCPS_ESTABLISHED) + *age = fr_tcplastack; + else { + *age = fr_tcpclosewait; + state[dir] = TCPS_CLOSE_WAIT; + } + } + break; + } +} + + +#if IPFILTER_LOG +void ipstate_log(is, type) +struct ipstate *is; +u_short type; +{ + struct ipslog ipsl; + void *items[1]; + size_t sizes[1]; + int types[1]; + + ipsl.isl_pkts = is->is_pkts; + ipsl.isl_bytes = is->is_bytes; + ipsl.isl_src = is->is_src; + ipsl.isl_dst = is->is_dst; + ipsl.isl_p = is->is_p; + ipsl.isl_flags = is->is_flags; + ipsl.isl_type = type; + if (ipsl.isl_p == IPPROTO_TCP || ipsl.isl_p == IPPROTO_UDP) { + ipsl.isl_sport = is->is_sport; + ipsl.isl_dport = is->is_dport; + } else if (ipsl.isl_p == IPPROTO_ICMP) + ipsl.isl_itype = is->is_icmp.ics_type; + else { + ipsl.isl_ps.isl_filler[0] = 0; + ipsl.isl_ps.isl_filler[1] = 0; + } + items[0] = &ipsl; + sizes[0] = sizeof(ipsl); + types[0] = 0; + + (void) ipllog(IPL_LOGSTATE, 0, items, sizes, types, 1); +} +#endif diff --git a/bsd/netinet/ip_state.h b/bsd/netinet/ip_state.h new file mode 100644 index 000000000..f27f94cb1 --- /dev/null +++ b/bsd/netinet/ip_state.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1995-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + * @(#)ip_state.h 1.3 1/12/96 (C) 1995 Darren Reed + */ +#ifndef __IP_STATE_H__ +#define __IP_STATE_H__ + +#define IPSTATE_SIZE 257 +#define IPSTATE_MAX 2048 /* Maximum number of states held */ + +#define PAIRS(s1,d1,s2,d2) ((((s1) == (s2)) && ((d1) == (d2))) ||\ + (((s1) == (d2)) && ((d1) == (s2)))) +#define IPPAIR(s1,d1,s2,d2) PAIRS((s1).s_addr, (d1).s_addr, \ + (s2).s_addr, (d2).s_addr) + + +typedef struct udpstate { + u_short us_sport; + u_short us_dport; +} udpstate_t; + +typedef struct icmpstate { + u_short ics_id; + u_short ics_seq; + u_char ics_type; +} icmpstate_t; + +typedef struct tcpstate { + u_short ts_sport; + u_short ts_dport; + u_long ts_seq; + u_long ts_ack; + u_short ts_swin; + u_short ts_dwin; + u_char ts_state[2]; +} tcpstate_t; + +typedef struct ipstate { + struct ipstate *is_next; + u_long is_age; + u_int is_pass; + U_QUAD_T is_pkts; + U_QUAD_T is_bytes; + void *is_ifpin; + void *is_ifpout; + struct in_addr is_src; + struct in_addr is_dst; + u_char is_p; + u_char is_flags; + u_32_t is_opt; + u_32_t is_optmsk; + u_short is_sec; + u_short is_secmsk; + u_short is_auth; + u_short is_authmsk; + union { + icmpstate_t is_ics; + tcpstate_t is_ts; + udpstate_t is_us; + } is_ps; +} ipstate_t; + +#define is_icmp is_ps.is_ics +#define is_tcp is_ps.is_ts +#define is_udp is_ps.is_us +#define is_seq is_tcp.ts_seq +#define is_ack is_tcp.ts_ack +#define is_dwin is_tcp.ts_dwin +#define is_swin is_tcp.ts_swin +#define is_sport is_tcp.ts_sport +#define is_dport is_tcp.ts_dport +#define is_state is_tcp.ts_state + +#define TH_OPENING (TH_SYN|TH_ACK) + + +typedef struct ipslog { + U_QUAD_T isl_pkts; + U_QUAD_T isl_bytes; + struct in_addr isl_src; + struct in_addr isl_dst; + u_char isl_p; + u_char isl_flags; + u_short isl_type; + union { + u_short isl_filler[2]; + u_short isl_ports[2]; + u_short isl_icmp; + } isl_ps; +} ipslog_t; + +#define isl_sport isl_ps.isl_ports[0] +#define isl_dport isl_ps.isl_ports[1] +#define isl_itype isl_ps.isl_icmp + +#define ISL_NEW 0 +#define ISL_EXPIRE 0xffff +#define ISL_FLUSH 0xfffe + + +typedef struct ips_stat { + u_long iss_hits; + u_long iss_miss; + u_long iss_max; + u_long iss_tcp; + u_long iss_udp; + u_long iss_icmp; + u_long iss_nomem; + u_long iss_expire; + u_long iss_fin; + u_long iss_active; + u_long iss_logged; + u_long iss_logfail; + ipstate_t **iss_table; +} ips_stat_t; + + +extern u_long fr_tcpidletimeout; +extern u_long fr_tcpclosewait; +extern u_long fr_tcplastack; +extern u_long fr_tcptimeout; +extern u_long fr_tcpclosed; +extern u_long fr_udptimeout; +extern u_long fr_icmptimeout; +extern int fr_tcpstate __P((ipstate_t *, fr_info_t *, ip_t *, tcphdr_t *)); +extern int fr_addstate __P((ip_t *, fr_info_t *, u_int)); +extern int fr_checkstate __P((ip_t *, fr_info_t *)); +extern void fr_timeoutstate __P((void)); +extern void fr_tcp_age __P((u_long *, u_char *, ip_t *, fr_info_t *, int)); +extern void fr_stateunload __P((void)); +extern void ipstate_log __P((struct ipstate *, u_short)); +#if defined(__NetBSD__) || defined(__OpenBSD__) +extern int fr_state_ioctl __P((caddr_t, u_long, int)); +#else +extern int fr_state_ioctl __P((caddr_t, int, int)); +#endif + +#endif /* __IP_STATE_H__ */ diff --git a/bsd/netinet/ip_var.h b/bsd/netinet/ip_var.h new file mode 100644 index 000000000..b994281ca --- /dev/null +++ b/bsd/netinet/ip_var.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_var.h 8.2 (Berkeley) 1/9/95 + */ + +#ifndef _NETINET_IP_VAR_H_ +#define _NETINET_IP_VAR_H_ + +/* + * Overlay for ip header used by other protocols (tcp, udp). + */ +struct ipovly { + u_char ih_x1[9]; /* (unused) */ + u_char ih_pr; /* protocol */ + u_short ih_len; /* protocol length */ + struct in_addr ih_src; /* source internet address */ + struct in_addr ih_dst; /* destination internet address */ +}; + +/* + * Ip reassembly queue structure. Each fragment + * being reassembled is attached to one of these structures. + * They are timed out after ipq_ttl drops to 0, and may also + * be reclaimed if memory becomes tight. + */ +struct ipq { + struct ipq *next,*prev; /* to other reass headers */ + u_char ipq_ttl; /* time for reass q to live */ + u_char ipq_p; /* protocol of this fragment */ + u_short ipq_id; /* sequence id for reassembly */ + struct mbuf *ipq_frags; /* to ip headers of fragments */ + struct in_addr ipq_src,ipq_dst; +#if IPDIVERT + u_short ipq_divert; /* divert protocol port */ + u_short ipq_div_cookie; /* divert protocol cookie */ +#endif +}; + +/* + * Structure stored in mbuf in inpcb.ip_options + * and passed to ip_output when ip options are in use. + * The actual length of the options (including ipopt_dst) + * is in m_len. + */ +#define MAX_IPOPTLEN 40 + +struct ipoption { + struct in_addr ipopt_dst; /* first-hop dst if source routed */ + char ipopt_list[MAX_IPOPTLEN]; /* options proper */ +}; + +/* + * Structure attached to inpcb.ip_moptions and + * passed to ip_output when IP multicast options are in use. + */ +struct ip_moptions { + struct ifnet *imo_multicast_ifp; /* ifp for outgoing multicasts */ + u_char imo_multicast_ttl; /* TTL for outgoing multicasts */ + u_char imo_multicast_loop; /* 1 => hear sends if a member */ + u_short imo_num_memberships; /* no. memberships this socket */ + struct in_multi *imo_membership[IP_MAX_MEMBERSHIPS]; + u_long imo_multicast_vif; /* vif num outgoing multicasts */ +}; + +struct ipstat { + u_long ips_total; /* total packets received */ + u_long ips_badsum; /* checksum bad */ + u_long ips_tooshort; /* packet too short */ + u_long ips_toosmall; /* not enough data */ + u_long ips_badhlen; /* ip header length < data size */ + u_long ips_badlen; /* ip length < ip header length */ + u_long ips_fragments; /* fragments received */ + u_long ips_fragdropped; /* frags dropped (dups, out of space) */ + u_long ips_fragtimeout; /* fragments timed out */ + u_long ips_forward; /* packets forwarded */ + u_long ips_fastforward; /* packets fast forwarded */ + u_long ips_cantforward; /* packets rcvd for unreachable dest */ + u_long ips_redirectsent; /* packets forwarded on same net */ + u_long ips_noproto; /* unknown or unsupported protocol */ + u_long ips_delivered; /* datagrams delivered to upper level*/ + u_long ips_localout; /* total ip packets generated here */ + u_long ips_odropped; /* lost packets due to nobufs, etc. */ + u_long ips_reassembled; /* total packets reassembled ok */ + u_long ips_fragmented; /* datagrams successfully fragmented */ + u_long ips_ofragments; /* output fragments created */ + u_long ips_cantfrag; /* don't fragment flag was set, etc. */ + u_long ips_badoptions; /* error in option processing */ + u_long ips_noroute; /* packets discarded due to no route */ + u_long ips_badvers; /* ip version != 4 */ + u_long ips_rawout; /* total raw ip packets generated */ + u_long ips_toolong; /* ip length > max ip packet size */ + u_long ips_notmember; /* multicasts for unregistered grps */ + u_long ips_nogif; /* no match gif found */ +}; + +#if KERNEL + +/* flags passed to ip_output as last parameter */ +#define IP_FORWARDING 0x1 /* most of ip header exists */ +#define IP_RAWOUTPUT 0x2 /* raw ip header exists */ +#define IP_ROUTETOIF SO_DONTROUTE /* bypass routing tables */ +#define IP_ALLOWBROADCAST SO_BROADCAST /* can send broadcast packets */ + +struct ip; +struct inpcb; +struct route; +struct sockopt; + +extern struct ipstat ipstat; +extern u_short ip_id; /* ip packet ctr, for ids */ +extern int ip_defttl; /* default IP ttl */ +extern int ipforwarding; /* ip forwarding */ +extern struct protosw *ip_protox[]; +extern struct socket *ip_rsvpd; /* reservation protocol daemon */ +extern struct socket *ip_mrouter; /* multicast routing daemon */ +extern int (*legal_vif_num) __P((int)); +extern u_long (*ip_mcast_src) __P((int)); +extern int rsvp_on; +extern struct pr_usrreqs rip_usrreqs; + +int ip_ctloutput __P((struct socket *, struct sockopt *sopt)); +void ip_drain __P((void)); +void ip_freemoptions __P((struct ip_moptions *)); +void ip_init __P((void)); +extern int (*ip_mforward) __P((struct ip *, struct ifnet *, struct mbuf *, + struct ip_moptions *)); +int ip_output __P((struct mbuf *, + struct mbuf *, struct route *, int, struct ip_moptions *)); +void ip_savecontrol __P((struct inpcb *, struct mbuf **, struct ip *, + struct mbuf *)); +void ip_slowtimo __P((void)); +struct mbuf * + ip_srcroute __P((void)); +void ip_stripoptions __P((struct mbuf *, struct mbuf *)); +int rip_ctloutput __P((struct socket *, struct sockopt *)); +void rip_ctlinput __P((int, struct sockaddr *, void *)); +void rip_init __P((void)); +void rip_input __P((struct mbuf *, int)); +int rip_output __P((struct mbuf *, struct socket *, u_long)); +void ipip_input __P((struct mbuf *, int)); +void rsvp_input __P((struct mbuf *, int)); +int ip_rsvp_init __P((struct socket *)); +int ip_rsvp_done __P((void)); +int ip_rsvp_vif_init __P((struct socket *, struct sockopt *)); +int ip_rsvp_vif_done __P((struct socket *, struct sockopt *)); +void ip_rsvp_force_done __P((struct socket *)); + +#if IPDIVERT +void div_init __P((void)); +void div_input __P((struct mbuf *, int)); +extern struct pr_usrreqs div_usrreqs; +extern u_short ip_divert_port; +extern u_short ip_divert_cookie; +#endif + +extern struct sockaddr_in *ip_fw_fwd_addr; + +#endif /* KERNEL */ + +#endif /* !_NETINET_IP_VAR_H_ */ diff --git a/bsd/netinet/ipl.h b/bsd/netinet/ipl.h new file mode 100644 index 000000000..dba3d458b --- /dev/null +++ b/bsd/netinet/ipl.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1993-1997 by Darren Reed. + * + * Redistribution and use in source and binary forms are permitted + * provided that this notice is preserved and due credit is given + * to the original author and the contributors. + * + * @(#)ipl.h 1.21 6/5/96 + */ + +#ifndef __IPL_H__ +#define __IPL_H__ + +#define IPL_VERSION "IP Filter v3.2.7" + +#endif diff --git a/bsd/netinet/raw_ip.c b/bsd/netinet/raw_ip.c new file mode 100644 index 000000000..c305d431f --- /dev/null +++ b/bsd/netinet/raw_ip.c @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)raw_ip.c 8.7 (Berkeley) 5/15/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if ISFB31 +#include +#endif + +#include +#include + +#define _IP_VHL +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if IPSEC +#include +#endif /*IPSEC*/ + +#if ISFB31 +#include "opt_ipdn.h" +#endif + +#if DUMMYNET +#include +#endif +#if !defined(COMPAT_IPFW) || COMPAT_IPFW == 1 +#undef COMPAT_IPFW +#define COMPAT_IPFW 1 +#else +#undef COMPAT_IPFW +#endif + +struct inpcbhead ripcb; +struct inpcbinfo ripcbinfo; + +/* + * Nominal space allocated to a raw ip socket. + */ +#define RIPSNDQ 8192 +#define RIPRCVQ 8192 + +/* + * Raw interface to IP protocol. + */ + +/* + * Initialize raw connection block q. + */ +void +rip_init() +{ + LIST_INIT(&ripcb); + ripcbinfo.listhead = &ripcb; + /* + * XXX We don't use the hash list for raw IP, but it's easier + * to allocate a one entry hash list than it is to check all + * over the place for hashbase == NULL. + */ + ripcbinfo.hashbase = hashinit(1, M_PCB, &ripcbinfo.hashmask); + ripcbinfo.porthashbase = hashinit(1, M_PCB, &ripcbinfo.porthashmask); + + ripcbinfo.ipi_zone = (void *) zinit(sizeof(struct inpcb), + (4096 * sizeof(struct inpcb)), + 4096, "ripzone"); + +} + +static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET }; +/* + * Setup generic address and protocol structures + * for raw_input routine, then pass them along with + * mbuf chain. + */ +void +rip_input(m, iphlen) + struct mbuf *m; + int iphlen; +{ + register struct ip *ip = mtod(m, struct ip *); + register struct inpcb *inp; + struct inpcb *last = 0; + struct mbuf *opts = 0; + short need_wakeup = 0; + + ripsrc.sin_addr = ip->ip_src; + LIST_FOREACH(inp, &ripcb, inp_list) { + if ((inp->inp_vflag & INP_IPV4) == NULL) + continue; +#warning do something about this + if (inp->inp_ip_p && inp->inp_ip_p != ip->ip_p) + continue; + if (inp->inp_laddr.s_addr && + inp->inp_laddr.s_addr != ip->ip_dst.s_addr) + continue; + if (inp->inp_faddr.s_addr && + inp->inp_faddr.s_addr != ip->ip_src.s_addr) + continue; + if (last) { + struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); + if (n) { + if (last->inp_flags & INP_CONTROLOPTS || + last->inp_socket->so_options & SO_TIMESTAMP) + ip_savecontrol(last, &opts, ip, n); + if (last->inp_flags & INP_STRIPHDR) { + n->m_len -= iphlen; + n->m_pkthdr.len -= iphlen; + n->m_data += iphlen; + } + if (sbappendaddr(&last->inp_socket->so_rcv, + (struct sockaddr *)&ripsrc, n, + opts) == 0) { + /* should notify about lost packet */ + kprintf("rip_input can't append to socket\n"); + m_freem(n); + if (opts) + m_freem(opts); + } else { + /* kprintf("rip_input calling sorwakeup\n"); */ + need_wakeup++; + } + opts = 0; + } + } + last = inp; + } + if (last) { + if (last->inp_flags & INP_CONTROLOPTS || + last->inp_socket->so_options & SO_TIMESTAMP) + ip_savecontrol(last, &opts, ip, m); + if (last->inp_flags & INP_STRIPHDR) { + m->m_len -= iphlen; + m->m_pkthdr.len -= iphlen; + m->m_data += iphlen; + } + if (sbappendaddr(&last->inp_socket->so_rcv, + (struct sockaddr *)&ripsrc, m, opts) == 0) { + kprintf("rip_input(2) can't append to socket\n"); + m_freem(m); + if (opts) + m_freem(opts); + } else { + /* kprintf("rip_input calling sorwakeup\n"); */ + need_wakeup++; + } + } else { + m_freem(m); + ipstat.ips_noproto++; + ipstat.ips_delivered--; + } + if (need_wakeup) + sorwakeup(last->inp_socket); +} + +/* + * Generate IP header and pass packet to ip_output. + * Tack on options user may have setup with control call. + */ +int +rip_output(m, so, dst) + register struct mbuf *m; + struct socket *so; + u_long dst; +{ + register struct ip *ip; + register struct inpcb *inp = sotoinpcb(so); + int flags = (so->so_options & SO_DONTROUTE) | IP_ALLOWBROADCAST; + + /* + * If the user handed us a complete IP packet, use it. + * Otherwise, allocate an mbuf for a header and fill it in. + */ + if ((inp->inp_flags & INP_HDRINCL) == 0) { + if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { + m_freem(m); + return(EMSGSIZE); + } + M_PREPEND(m, sizeof(struct ip), M_WAIT); + ip = mtod(m, struct ip *); + ip->ip_tos = 0; + ip->ip_off = 0; + ip->ip_p = inp->inp_ip_p; + ip->ip_len = m->m_pkthdr.len; + ip->ip_src = inp->inp_laddr; + ip->ip_dst.s_addr = dst; + ip->ip_ttl = MAXTTL; + } else { + if (m->m_pkthdr.len > IP_MAXPACKET) { + m_freem(m); + return(EMSGSIZE); + } + ip = mtod(m, struct ip *); + /* don't allow both user specified and setsockopt options, + and don't allow packet length sizes that will crash */ + if (((IP_VHL_HL(ip->ip_vhl) != (sizeof (*ip) >> 2)) + && inp->inp_options) + || (ip->ip_len > m->m_pkthdr.len) + || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) { + m_freem(m); + return EINVAL; + } + if (ip->ip_id == 0) + ip->ip_id = htons(ip_id++); + /* XXX prevent ip_output from overwriting header fields */ + flags |= IP_RAWOUTPUT; + ipstat.ips_rawout++; + } + +#if IPSEC + m->m_pkthdr.rcvif = (struct ifnet *)so; /*XXX*/ +#endif /*IPSEC*/ + + return (ip_output(m, inp->inp_options, &inp->inp_route, flags, + inp->inp_moptions)); +} + +/* + * Raw IP socket option processing. + */ +int +rip_ctloutput(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + struct inpcb *inp = sotoinpcb(so); + int error, optval; + + if (sopt->sopt_level != IPPROTO_IP) + return (EINVAL); + + error = 0; + + switch (sopt->sopt_dir) { + case SOPT_GET: + switch (sopt->sopt_name) { + case IP_HDRINCL: + optval = inp->inp_flags & INP_HDRINCL; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + + case IP_STRIPHDR: + optval = inp->inp_flags & INP_STRIPHDR; + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + +#if COMPAT_IPFW + case IP_FW_GET: + if (ip_fw_ctl_ptr == 0) + error = ENOPROTOOPT; + else + error = ip_fw_ctl_ptr(sopt); + break; + + case IP_NAT: + if (ip_nat_ctl_ptr == 0) + error = ENOPROTOOPT; + else + error = ip_nat_ctl_ptr(sopt); + break; +#if DUMMYNET + case IP_DUMMYNET_GET: + if (ip_dn_ctl_ptr == NULL) + error = ENOPROTOOPT ; + else + error = ip_dn_ctl_ptr(sopt); + break ; +#endif /* DUMMYNET */ +#endif /* COMPAT_IPFW */ + + case MRT_INIT: + case MRT_DONE: + case MRT_ADD_VIF: + case MRT_DEL_VIF: + case MRT_ADD_MFC: + case MRT_DEL_MFC: + case MRT_VERSION: + case MRT_ASSERT: + error = ip_mrouter_get(so, sopt); + break; + + default: + error = ip_ctloutput(so, sopt); + break; + } + break; + + case SOPT_SET: + switch (sopt->sopt_name) { + case IP_HDRINCL: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval) + inp->inp_flags |= INP_HDRINCL; + else + inp->inp_flags &= ~INP_HDRINCL; + break; + + case IP_STRIPHDR: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + if (optval) + inp->inp_flags |= INP_STRIPHDR; + else + inp->inp_flags &= ~INP_STRIPHDR; + break; + + +#if COMPAT_IPFW + case IP_FW_ADD: + case IP_FW_DEL: + case IP_FW_FLUSH: + case IP_FW_ZERO: + if (ip_fw_ctl_ptr == 0) + error = ENOPROTOOPT; + else + error = ip_fw_ctl_ptr(sopt); + break; + + case IP_NAT: + if (ip_nat_ctl_ptr == 0) + error = ENOPROTOOPT; + else + error = ip_nat_ctl_ptr(sopt); + break; +#if DUMMYNET + case IP_DUMMYNET_CONFIGURE: + case IP_DUMMYNET_DEL: + case IP_DUMMYNET_FLUSH: + if (ip_dn_ctl_ptr == NULL) + error = ENOPROTOOPT ; + else + error = ip_dn_ctl_ptr(sopt); + break ; +#endif +#endif /* COMPAT_IPFW */ + + case IP_RSVP_ON: + error = ip_rsvp_init(so); + break; + + case IP_RSVP_OFF: + error = ip_rsvp_done(); + break; + + /* XXX - should be combined */ + case IP_RSVP_VIF_ON: + error = ip_rsvp_vif_init(so, sopt); + break; + + case IP_RSVP_VIF_OFF: + error = ip_rsvp_vif_done(so, sopt); + break; + + case MRT_INIT: + case MRT_DONE: + case MRT_ADD_VIF: + case MRT_DEL_VIF: + case MRT_ADD_MFC: + case MRT_DEL_MFC: + case MRT_VERSION: + case MRT_ASSERT: + error = ip_mrouter_set(so, sopt); + break; + + default: + error = ip_ctloutput(so, sopt); + break; + } + break; + } + + return (error); +} + +/* + * This function exists solely to receive the PRC_IFDOWN messages which + * are sent by if_down(). It looks for an ifaddr whose ifa_addr is sa, + * and calls in_ifadown() to remove all routes corresponding to that address. + * It also receives the PRC_IFUP messages from if_up() and reinstalls the + * interface routes. + */ +void +rip_ctlinput(cmd, sa, vip) + int cmd; + struct sockaddr *sa; + void *vip; +{ + struct in_ifaddr *ia; + struct ifnet *ifp; + int err; + int flags; + + switch (cmd) { + case PRC_IFDOWN: + for (ia = in_ifaddrhead.tqh_first; ia; + ia = ia->ia_link.tqe_next) { + if (ia->ia_ifa.ifa_addr == sa + && (ia->ia_flags & IFA_ROUTE)) { + /* + * in_ifscrub kills the interface route. + */ + in_ifscrub(ia->ia_ifp, ia); + /* + * in_ifadown gets rid of all the rest of + * the routes. This is not quite the right + * thing to do, but at least if we are running + * a routing process they will come back. + */ + in_ifadown(&ia->ia_ifa); + break; + } + } + break; + + case PRC_IFUP: + for (ia = in_ifaddrhead.tqh_first; ia; + ia = ia->ia_link.tqe_next) { + if (ia->ia_ifa.ifa_addr == sa) + break; + } + if (ia == 0 || (ia->ia_flags & IFA_ROUTE)) + return; + flags = RTF_UP; + ifp = ia->ia_ifa.ifa_ifp; + + if ((ifp->if_flags & IFF_LOOPBACK) + || (ifp->if_flags & IFF_POINTOPOINT)) + flags |= RTF_HOST; + + err = rtinit(&ia->ia_ifa, RTM_ADD, flags); + if (err == 0) + ia->ia_flags |= IFA_ROUTE; + break; + } +} + +u_long rip_sendspace = RIPSNDQ; +u_long rip_recvspace = RIPRCVQ; + +SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW, &rip_sendspace, + 0, ""); +SYSCTL_INT(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW, &rip_recvspace, + 0, ""); + +static int +rip_attach(struct socket *so, int proto, struct proc *p) +{ + struct inpcb *inp; + int error, s; + + inp = sotoinpcb(so); + if (inp) + panic("rip_attach"); + + +#if ISFB31 + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; +#else + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); +#endif + + s = splnet(); + error = in_pcballoc(so, &ripcbinfo, p); + splx(s); + if (error) + return error; + error = soreserve(so, rip_sendspace, rip_recvspace); + if (error) + return error; + inp = (struct inpcb *)so->so_pcb; + inp->inp_vflag |= INP_IPV4; + inp->inp_ip_p = proto; +#if IPSEC + error = ipsec_init_policy(so, &inp->inp_sp); + if (error != 0) { + in_pcbdetach(inp); + return error; + } +#endif /*IPSEC*/ + + return 0; +} + +static int +rip_detach(struct socket *so) +{ + struct inpcb *inp; + + inp = sotoinpcb(so); + if (inp == 0) + panic("rip_detach"); + if (so == ip_mrouter) + ip_mrouter_done(); + ip_rsvp_force_done(so); + if (so == ip_rsvpd) + ip_rsvp_done(); + in_pcbdetach(inp); + return 0; +} + +static int +rip_abort(struct socket *so) +{ + soisdisconnected(so); + return rip_detach(so); +} + +static int +rip_disconnect(struct socket *so) +{ + if ((so->so_state & SS_ISCONNECTED) == 0) + return ENOTCONN; + return rip_abort(so); +} + +static int +rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp = sotoinpcb(so); + struct sockaddr_in *addr = (struct sockaddr_in *)nam; + + if (nam->sa_len != sizeof(*addr)) + return EINVAL; + + if (TAILQ_EMPTY(&ifnet) || ((addr->sin_family != AF_INET) && + (addr->sin_family != AF_IMPLINK)) || + (addr->sin_addr.s_addr && + ifa_ifwithaddr((struct sockaddr *)addr) == 0)) + return EADDRNOTAVAIL; + inp->inp_laddr = addr->sin_addr; + return 0; +} + +static int +rip_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp = sotoinpcb(so); + struct sockaddr_in *addr = (struct sockaddr_in *)nam; + + if (nam->sa_len != sizeof(*addr)) + return EINVAL; + if (TAILQ_EMPTY(&ifnet)) + return EADDRNOTAVAIL; + if ((addr->sin_family != AF_INET) && + (addr->sin_family != AF_IMPLINK)) + return EAFNOSUPPORT; + inp->inp_faddr = addr->sin_addr; + soisconnected(so); + return 0; +} + +static int +rip_shutdown(struct socket *so) +{ + socantsendmore(so); + return 0; +} + +static int +rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, + struct mbuf *control, struct proc *p) +{ + struct inpcb *inp = sotoinpcb(so); + register u_long dst; + + if (so->so_state & SS_ISCONNECTED) { + if (nam) { + m_freem(m); + return EISCONN; + } + dst = inp->inp_faddr.s_addr; + } else { + if (nam == NULL) { + m_freem(m); + return ENOTCONN; + } + dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr; + } + return rip_output(m, so, dst); +} + + +static int +rip_pcblist SYSCTL_HANDLER_ARGS +{ + int error, i, n, s; + struct inpcb *inp, **inp_list; + inp_gen_t gencnt; + struct xinpgen xig; + + /* + * The process of preparing the TCB list is too time-consuming and + * resource-intensive to repeat twice on every request. + */ + if (req->oldptr == 0) { + n = ripcbinfo.ipi_count; + req->oldidx = 2 * (sizeof xig) + + (n + n/8) * sizeof(struct xinpcb); + return 0; + } + + if (req->newptr != 0) + return EPERM; + + /* + * OK, now we're committed to doing something. + */ + s = splnet(); + gencnt = ripcbinfo.ipi_gencnt; + n = ripcbinfo.ipi_count; + splx(s); + + xig.xig_len = sizeof xig; + xig.xig_count = n; + xig.xig_gen = gencnt; + xig.xig_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xig, sizeof xig); + if (error) + return error; + + inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); + if (inp_list == 0) + return ENOMEM; + + s = splnet(); + for (inp = ripcbinfo.listhead->lh_first, i = 0; inp && i < n; + inp = inp->inp_list.le_next) { + if (inp->inp_gencnt <= gencnt) + inp_list[i++] = inp; + } + splx(s); + n = i; + + error = 0; + for (i = 0; i < n; i++) { + inp = inp_list[i]; + if (inp->inp_gencnt <= gencnt) { + struct xinpcb xi; + xi.xi_len = sizeof xi; + /* XXX should avoid extra copy */ + bcopy(inp, &xi.xi_inp, sizeof *inp); + if (inp->inp_socket) + sotoxsocket(inp->inp_socket, &xi.xi_socket); + error = SYSCTL_OUT(req, &xi, sizeof xi); + } + } + if (!error) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + s = splnet(); + xig.xig_gen = ripcbinfo.ipi_gencnt; + xig.xig_sogen = so_gencnt; + xig.xig_count = ripcbinfo.ipi_count; + splx(s); + error = SYSCTL_OUT(req, &xig, sizeof xig); + } + FREE(inp_list, M_TEMP); + return error; +} + + +SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0, + rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); + +struct pr_usrreqs rip_usrreqs = { + rip_abort, pru_accept_notsupp, rip_attach, rip_bind, rip_connect, + pru_connect2_notsupp, in_control, rip_detach, rip_disconnect, + pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, rip_send, pru_sense_null, rip_shutdown, + in_setsockaddr, sosend, soreceive, sopoll +}; diff --git a/bsd/netinet/tcp.h b/bsd/netinet/tcp.h new file mode 100644 index 000000000..5cd425b9c --- /dev/null +++ b/bsd/netinet/tcp.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_TCP_H_ +#define _NETINET_TCP_H_ + +typedef u_int32_t tcp_seq; +typedef u_int32_t tcp_cc; /* connection count per rfc1644 */ + +#define tcp6_seq tcp_seq /* for KAME src sync over BSD*'s */ +#define tcp6hdr tcphdr /* for KAME src sync over BSD*'s */ + +/* + * TCP header. + * Per RFC 793, September, 1981. + */ +struct tcphdr { + u_short th_sport; /* source port */ + u_short th_dport; /* destination port */ + tcp_seq th_seq; /* sequence number */ + tcp_seq th_ack; /* acknowledgement number */ +#if BYTE_ORDER == LITTLE_ENDIAN + u_int th_x2:4, /* (unused) */ + th_off:4; /* data offset */ +#endif +#if BYTE_ORDER == BIG_ENDIAN + u_int th_off:4, /* data offset */ + th_x2:4; /* (unused) */ +#endif + u_char th_flags; +#define TH_FIN 0x01 +#define TH_SYN 0x02 +#define TH_RST 0x04 +#define TH_PUSH 0x08 +#define TH_ACK 0x10 +#define TH_URG 0x20 +#define TH_FLAGS (TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + + u_short th_win; /* window */ + u_short th_sum; /* checksum */ + u_short th_urp; /* urgent pointer */ +}; + +#define TCPOPT_EOL 0 +#define TCPOPT_NOP 1 +#define TCPOPT_MAXSEG 2 +#define TCPOLEN_MAXSEG 4 +#define TCPOPT_WINDOW 3 +#define TCPOLEN_WINDOW 3 +#define TCPOPT_SACK_PERMITTED 4 /* Experimental */ +#define TCPOLEN_SACK_PERMITTED 2 +#define TCPOPT_SACK 5 /* Experimental */ +#define TCPOPT_TIMESTAMP 8 +#define TCPOLEN_TIMESTAMP 10 +#define TCPOLEN_TSTAMP_APPA (TCPOLEN_TIMESTAMP+2) /* appendix A */ +#define TCPOPT_TSTAMP_HDR \ + (TCPOPT_NOP<<24|TCPOPT_NOP<<16|TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP) + +#define TCPOPT_CC 11 /* CC options: RFC-1644 */ +#define TCPOPT_CCNEW 12 +#define TCPOPT_CCECHO 13 +#define TCPOLEN_CC 6 +#define TCPOLEN_CC_APPA (TCPOLEN_CC+2) +#define TCPOPT_CC_HDR(ccopt) \ + (TCPOPT_NOP<<24|TCPOPT_NOP<<16|(ccopt)<<8|TCPOLEN_CC) + +/* + * Default maximum segment size for TCP. + * With an IP MSS of 576, this is 536, + * but 512 is probably more convenient. + * This should be defined as MIN(512, IP_MSS - sizeof (struct tcpiphdr)). + */ +#define TCP_MSS 512 + +/* + * Default maximum segment size for TCP6. + * With an IP6 MSS of 1280, this is 1220, + * but 1024 is probably more convenient. (xxx kazu in doubt) + * This should be defined as MIN(1024, IP6_MSS - sizeof (struct tcpip6hdr)) + */ +#define TCP6_MSS 1024 + +#define TCP_MAXWIN 65535 /* largest value for (unscaled) window */ +#define TTCP_CLIENT_SND_WND 4096 /* dflt send window for T/TCP client */ + +#define TCP_MAX_WINSHIFT 14 /* maximum window shift */ + +#define TCP_MAXHLEN (0xf<<2) /* max length of header in bytes */ +#define TCP_MAXOLEN (TCP_MAXHLEN - sizeof(struct tcphdr)) + /* max space left for options */ + +/* + * User-settable options (used with setsockopt). + */ +#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ +#define TCP_MAXSEG 0x02 /* set maximum segment size */ +#define TCP_NOPUSH 0x04 /* don't push last block of write */ +#define TCP_NOOPT 0x08 /* don't use TCP options */ + +#endif diff --git a/bsd/netinet/tcp_debug.c b/bsd/netinet/tcp_debug.c new file mode 100644 index 000000000..8eef1d998 --- /dev/null +++ b/bsd/netinet/tcp_debug.c @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_debug.c 8.1 (Berkeley) 6/10/93 + */ + +#if ISFB31 +#include "opt_inet.h" +#include "opt_tcpdebug.h" +#endif + +#ifndef INET +#error The option TCPDEBUG requires option INET. +#endif + +#if TCPDEBUG +/* load symbolic names */ +#define PRUREQUESTS +#define TCPSTATES +#define TCPTIMERS +#define TANAMES +#endif + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if TCPDEBUG +static int tcpconsdebug = 0; +#endif + +static struct tcp_debug tcp_debug[TCP_NDEBUG]; +static int tcp_debx; + +/* + * Tcp debug routines + */ +void +tcp_trace(act, ostate, tp, ip, th, req) + short act, ostate; + struct tcpcb *tp; +#if INET6 + void *ip; +#else + struct ip *ip; +#endif + struct tcphdr *th; + int req; +{ +#if INET6 + int isipv6 = (ip != NULL && ((struct ip *)ip)->ip_v == 6) ? 1 : 0; +#endif /* INET6 */ + tcp_seq seq, ack; + int len, flags; + struct tcp_debug *td = &tcp_debug[tcp_debx++]; + + if (tcp_debx == TCP_NDEBUG) + tcp_debx = 0; + td->td_time = iptime(); + td->td_act = act; + td->td_ostate = ostate; + td->td_tcb = (caddr_t)tp; + if (tp) + td->td_cb = *tp; + else + bzero((caddr_t)&td->td_cb, sizeof (*tp)); + if (ip) { +#if INET6 + if (isipv6) + td->td_ip6 = *(struct ip6_hdr *)ip; + else + td->td_ip = *(struct ip *)ip; +#else /* INET6 */ + td->td_ip = *ip; +#endif /* INET6 */ + } else +#if INET6 + bzero((caddr_t)&td->_td_ipx, sizeof (td->_td_ipx)); +#else /* INET6 */ + bzero((caddr_t)&td->td_ip, sizeof (*ip)); +#endif /* INET6 */ + if (th) + td->td_th = *th; + + td->td_req = req; +#if TCPDEBUG + if (tcpconsdebug == 0) + return; + if (tp) + printf("%p %s:", tp, tcpstates[ostate]); + else + printf("???????? "); + printf("%s ", tanames[act]); + switch (act) { + + case TA_INPUT: + case TA_OUTPUT: + case TA_DROP: + if (ip == 0) + break; +#if INET6 + if (isipv6) { + len = ((struct ip6_hdr *)ip)->ip6_plen; + } else { + len = ((struct ip *)ip)->ip_len; + } +#else /* INET6 */ + len = ip->ip_len; +#endif /* INET6 */ + seq = th->th_seq; + ack = th->th_ack; + if (act == TA_OUTPUT) { + seq = ntohl(seq); + ack = ntohl(ack); + len = ntohs((u_short)len); + } + if (act == TA_OUTPUT) + len -= sizeof (struct tcphdr); + if (len) + printf("[%x..%x)", seq, seq+len); + else + printf("%x", seq); + printf("@%x, urp=%x", ack, th->th_urp); + flags = th->th_flags; + if (flags) { + char *cp = "<"; +#define pf(f) { \ + if (th->th_flags & TH_##f) { \ + printf("%s%s", cp, #f); \ + cp = ","; \ + } \ +} + pf(SYN); pf(ACK); pf(FIN); pf(RST); pf(PUSH); pf(URG); + printf(">"); + } + break; + + case TA_USER: + printf("%s", prurequests[req&0xff]); + if ((req & 0xff) == PRU_SLOWTIMO) + printf("<%s>", tcptimers[req>>8]); + break; + } + if (tp) + printf(" -> %s", tcpstates[tp->t_state]); + /* print out internal state of tp !?! */ + printf("\n"); + if (tp == 0) + return; + printf( + "\trcv_(nxt,wnd,up) (%lx,%lx,%lx) snd_(una,nxt,max) (%lx,%lx,%lx)\n", + (u_long)tp->rcv_nxt, tp->rcv_wnd, (u_long)tp->rcv_up, + (u_long)tp->snd_una, (u_long)tp->snd_nxt, (u_long)tp->snd_max); + printf("\tsnd_(wl1,wl2,wnd) (%lx,%lx,%lx)\n", + (u_long)tp->snd_wl1, (u_long)tp->snd_wl2, tp->snd_wnd); +#endif /* TCPDEBUG */ +} diff --git a/bsd/netinet/tcp_debug.h b/bsd/netinet/tcp_debug.h new file mode 100644 index 000000000..79eb6c7bb --- /dev/null +++ b/bsd/netinet/tcp_debug.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_debug.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_TCP_DEBUG_H_ +#define _NETINET_TCP_DEBUG_H_ + +struct tcp_debug { + n_time td_time; + short td_act; + short td_ostate; + caddr_t td_tcb; + union { + struct ip _td_ip4; +#define td_ip _td_ipx._td_ip4 +#if INET6 + struct ip6_hdr _td_ip6; +#define td_ip6 _td_ipx._td_ip6 +#endif + } _td_ipx; + struct tcphdr td_th; + short td_req; + struct tcpcb td_cb; +}; + +#define TA_INPUT 0 +#define TA_OUTPUT 1 +#define TA_USER 2 +#define TA_RESPOND 3 +#define TA_DROP 4 + +#ifdef TANAMES +static char *tanames[] = + { "input", "output", "user", "respond", "drop" }; +#endif + +#define TCP_NDEBUG 100 + +#ifndef KERNEL +/* XXX common variables for broken applications. */ +struct tcp_debug tcp_debug[TCP_NDEBUG]; +int tcp_debx; +#endif + +#endif /* !_NETINET_TCP_DEBUG_H_ */ diff --git a/bsd/netinet/tcp_fsm.h b/bsd/netinet/tcp_fsm.h new file mode 100644 index 000000000..209f06e4b --- /dev/null +++ b/bsd/netinet/tcp_fsm.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_fsm.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_TCP_FSM_H_ +#define _NETINET_TCP_FSM_H_ + +/* + * TCP FSM state definitions. + * Per RFC793, September, 1981. + */ + +#define TCP_NSTATES 11 + +#define TCPS_CLOSED 0 /* closed */ +#define TCPS_LISTEN 1 /* listening for connection */ +#define TCPS_SYN_SENT 2 /* active, have sent syn */ +#define TCPS_SYN_RECEIVED 3 /* have send and received syn */ +/* states < TCPS_ESTABLISHED are those where connections not established */ +#define TCPS_ESTABLISHED 4 /* established */ +#define TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */ +/* states > TCPS_CLOSE_WAIT are those where user has closed */ +#define TCPS_FIN_WAIT_1 6 /* have closed, sent fin */ +#define TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */ +#define TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */ +/* states > TCPS_CLOSE_WAIT && < TCPS_FIN_WAIT_2 await ACK of FIN */ +#define TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */ +#define TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */ + +/* for KAME src sync over BSD*'s */ +#define TCP6_NSTATES TCP_NSTATES +#define TCP6S_CLOSED TCPS_CLOSED +#define TCP6S_LISTEN TCPS_LISTEN +#define TCP6S_SYN_SENT TCPS_SYN_SENT +#define TCP6S_SYN_RECEIVED TCPS_SYN_RECEIVED +#define TCP6S_ESTABLISHED TCPS_ESTABLISHED +#define TCP6S_CLOSE_WAIT TCPS_CLOSE_WAIT +#define TCP6S_FIN_WAIT_1 TCPS_FIN_WAIT_1 +#define TCP6S_CLOSING TCPS_CLOSING +#define TCP6S_LAST_ACK TCPS_LAST_ACK +#define TCP6S_FIN_WAIT_2 TCPS_FIN_WAIT_2 +#define TCP6S_TIME_WAIT TCPS_TIME_WAIT + +#define TCPS_HAVERCVDSYN(s) ((s) >= TCPS_SYN_RECEIVED) +#define TCPS_HAVEESTABLISHED(s) ((s) >= TCPS_ESTABLISHED) +#define TCPS_HAVERCVDFIN(s) ((s) >= TCPS_TIME_WAIT) + +#ifdef TCPOUTFLAGS +/* + * Flags used when sending segments in tcp_output. + * Basic flags (TH_RST,TH_ACK,TH_SYN,TH_FIN) are totally + * determined by state, with the proviso that TH_FIN is sent only + * if all data queued for output is included in the segment. + */ +static u_char tcp_outflags[TCP_NSTATES] = { + TH_RST|TH_ACK, 0, TH_SYN, TH_SYN|TH_ACK, + TH_ACK, TH_ACK, + TH_FIN|TH_ACK, TH_ACK, TH_FIN|TH_ACK, TH_ACK, TH_ACK, +}; +#endif + +#if KPROF +int tcp_acounts[TCP_NSTATES][PRU_NREQ]; +#endif + +#ifdef TCPSTATES +char *tcpstates[] = { + "CLOSED", "LISTEN", "SYN_SENT", "SYN_RCVD", + "ESTABLISHED", "CLOSE_WAIT", "FIN_WAIT_1", "CLOSING", + "LAST_ACK", "FIN_WAIT_2", "TIME_WAIT", +}; +#endif + +#endif diff --git a/bsd/netinet/tcp_input.c b/bsd/netinet/tcp_input.c new file mode 100644 index 000000000..cab8916e1 --- /dev/null +++ b/bsd/netinet/tcp_input.c @@ -0,0 +1,2941 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 + */ + +#if ISFB31 +#include "opt_ipfw.h" /* for ipfw_fwd */ +#include "opt_tcpdebug.h" +#endif + +#include +#include +#include +#include +#include +#include +#include /* for proc0 declaration */ +#include +#include +#include +#include + +#include /* before tcp_seq.h, for tcp_random18() */ + +#include +#include + +#include +#include +#include +#include /* for ICMP_BANDLIM */ +#include +#include +#include /* for ICMP_BANDLIM */ +#include +#if INET6 +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#if TCPDEBUG +#include +#if INET6 +union { + struct ip _tcp_si4; + struct ip6_hdr _tcp_si6; +} tcp_saveip; +#else +struct ip tcp_saveip; +#endif /* INET6 */ +struct tcphdr tcp_savetcp; +#endif /* TCPDEBUG */ + +#if IPSEC +#include +#include +#endif /*IPSEC*/ + +#include + +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2) +#define DBG_FNC_TCP_INPUT NETDBG_CODE(DBG_NETTCP, (3 << 8)) +#define DBG_FNC_TCP_NEWCONN NETDBG_CODE(DBG_NETTCP, (7 << 8)) + +static int tcprexmtthresh = 3; +tcp_seq tcp_iss; +tcp_cc tcp_ccgen; + +struct tcpstat tcpstat; +SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, + CTLFLAG_RD, &tcpstat , tcpstat, ""); + +int log_in_vain = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, + &log_in_vain, 0, ""); + +int tcp_delack_enabled = 1; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, + &tcp_delack_enabled, 0, ""); + +u_long tcp_now; +struct inpcbhead tcb; +#define tcb6 tcb /* for KAME src sync over BSD*'s */ +struct inpcbinfo tcbinfo; + +static void tcp_dooptions __P((struct tcpcb *, + u_char *, int, struct tcphdr *, struct tcpopt *)); +static void tcp_pulloutofband __P((struct socket *, + struct tcphdr *, struct mbuf *)); +static void tcp_xmit_timer __P((struct tcpcb *, int)); + +/* + * Neighbor Discovery, Neighbor Unreachability Detection + * Upper layer hint. + */ +#define ND6_HINT(tp) { \ + if ((tp) && (tp)->t_inpcb && (tp)->t_inpcb->in6p_route.ro_rt) \ + nd6_nud_hint((tp)->t_inpcb->in6p_route.ro_rt, NULL); \ +} + + +extern u_long current_active_connections; +extern u_long last_active_conn_count; + +extern u_long *delack_bitmask; + + + + +/* + * Insert segment ti into reassembly queue of tcp with + * control block tp. Return TH_FIN if reassembly now includes + * a segment with FIN. The macro form does the common case inline + * (segment is the next to be received on an established connection, + * and the queue is empty), avoiding linkage into and removal + * from the queue and repetition of various conversions. + * Set DELACK for segments received in order, but ack immediately + * when segments are out of order (so fast retransmit can work). + */ +#if INET6 +#define _ONLY_IF_INET6_(x) x +#else +#define _ONLY_IF_INET6_(x) +#endif +#define TCP_REASS(tp, th, tilen, m, so, flags, isipv6, needwakeup) { \ + if ((th)->th_seq == (tp)->rcv_nxt && \ + (tp)->segq.lh_first == NULL && \ + (tp)->t_state == TCPS_ESTABLISHED) { \ + if (tcp_delack_enabled) {\ + if (last_active_conn_count > DELACK_BITMASK_THRESH) \ + TCP_DELACK_BITSET(tp->t_inpcb->hash_element); \ + tp->t_flags |= TF_DELACK; \ + } \ + else \ + tp->t_flags |= TF_ACKNOW; \ + (tp)->rcv_nxt += (tilen); \ + flags = (th)->th_flags & TH_FIN; \ + tcpstat.tcps_rcvpack++;\ + tcpstat.tcps_rcvbyte += (tilen);\ + _ONLY_IF_INET6_(ND6_HINT(tp);) \ + sbappend(&(so)->so_rcv, (m)); \ + needwakeup++; \ + } else { \ + (flags) = tcp_reass((tp), (th), (tilen), (m), (isipv6)); \ + tp->t_flags |= TF_ACKNOW; \ + } \ +} + +/* + * Note: + * in the ip header part of the ipqe_tcp structure only the length is used. + */ +int +tcp_reass(tp, th, tilen, m, isipv6) + register struct tcpcb *tp; + register struct tcphdr *th; + u_int16_t tilen; + struct mbuf *m; +#if INET6 + int isipv6; +#endif +{ + register struct ipqent *p, *q, *nq, *tiqe; + struct socket *so = tp->t_inpcb->inp_socket; + int flags; + + /* + * Call with th==0 after become established to + * force pre-ESTABLISHED data up to user socket. + */ + if (th == 0) + goto present; + +#if 0 /* Not using GETTCP(m) macro */ + m->m_pkthdr.header = ti; +#endif + + /* + * Allocate a new queue entry, before we throw away any data. + * If we can't, just drop the packet. XXX + */ + MALLOC(tiqe, struct ipqent *, sizeof (struct ipqent), M_SONAME, M_NOWAIT); + if (tiqe == NULL) { + tcpstat.tcps_rcvmemdrop++; + m_freem(m); + return (0); + } + + /* + * Find a segment which begins after this one does. + */ + for (p = NULL, q = tp->segq.lh_first; q != NULL; + p = q, q = q->ipqe_q.le_next) + if (SEQ_GT(q->ipqe_tcp->ti_seq, th->th_seq)) + break; + + /* + * If there is a preceding segment, it may provide some of + * our data already. If so, drop the data from the incoming + * segment. If it provides all of our data, drop us. + */ + if (p != NULL) { + register struct tcpiphdr *phdr = p->ipqe_tcp; + register int i; + + /* conversion to int (in i) handles seq wraparound */ + i = phdr->ti_seq + phdr->ti_len - th->th_seq; + if (i > 0) { + if (i >= tilen) { + tcpstat.tcps_rcvduppack++; + tcpstat.tcps_rcvdupbyte += tilen; + m_freem(m); + FREE(tiqe, M_SONAME); + +#if 1 /* XXX: NetBSD just return 0 here */ + /* + * Try to present any queued data + * at the left window edge to the user. + * This is needed after the 3-WHS + * completes. + */ + goto present; /* ??? */ +#endif + } + m_adj(m, i); + tilen -= i; + th->th_seq += i; + } + } + tcpstat.tcps_rcvoopack++; + tcpstat.tcps_rcvoobyte += tilen; + + /* + * While we overlap succeeding segments trim them or, + * if they are completely covered, dequeue them. + */ + while (q) { + register struct tcpiphdr *qhdr = q->ipqe_tcp; + register int i = (th->th_seq + tilen) - qhdr->ti_seq; + + if (i <= 0) + break; + if (i < qhdr->ti_len) { + qhdr->ti_seq += i; + qhdr->ti_len -= i; + m_adj(q->ipqe_m, i); + break; + } + nq = q->ipqe_q.le_next; + m_freem(q->ipqe_m); + LIST_REMOVE(q, ipqe_q); + FREE(q, M_SONAME); + q = nq; + } + + /* Insert the new fragment queue entry into place. */ + tiqe->ipqe_m = m; + /* + * There is a IP or IPv6 header in the mbuf before th + * so there is space for an ip header (for the length field) + */ +#define thtoti(x) \ + ((struct tcpiphdr *)(((char *)(x)) - (sizeof (struct ip)))) + + tiqe->ipqe_tcp = thtoti(th); + tiqe->ipqe_tcp->ti_len = tilen; + if (p == NULL) { + LIST_INSERT_HEAD(&tp->segq, tiqe, ipqe_q); + } else { + LIST_INSERT_AFTER(p, tiqe, ipqe_q); + } + +present: + /* + * Present data to user, advancing rcv_nxt through + * completed sequence space. + */ + if (!TCPS_HAVEESTABLISHED(tp->t_state)) + return (0); + q = tp->segq.lh_first; + if (!q || q->ipqe_tcp->ti_seq != tp->rcv_nxt) + return (0); +#if 0 + /* + * XXX from INRIA for NetBSD, but should not happen because + * TCPS_HAVEESTABLISHED(tp->t_state) should be true here. + */ + if (tp->t_state == TCPS_SYN_RECEIVED && q->ipqe_tcp->ti_len) + return (0); +#endif + do { + tp->rcv_nxt += q->ipqe_tcp->ti_len; + flags = q->ipqe_tcp->ti_flags & TH_FIN; + nq = q->ipqe_q.le_next; + LIST_REMOVE(q, ipqe_q); + if (so->so_state & SS_CANTRCVMORE) + m_freem(q->ipqe_m); + else + sbappend(&so->so_rcv, q->ipqe_m); + FREE(q, M_SONAME); + q = nq; + } while (q && q->ipqe_tcp->ti_seq == tp->rcv_nxt); +#if INET6 + if (isipv6) + ND6_HINT(tp); +#endif + + KERNEL_DEBUG(DBG_LAYER_END, ((ti->ti_dport << 16) | ti->ti_sport), + (((ti->ti_src.s_addr & 0xffff) << 16) | (ti->ti_dst.s_addr & 0xffff)), + ti->ti_seq, ti->ti_ack, ti->ti_win); + + sorwakeup(so); + return (flags); +} + +/* + * TCP input routine, follows pages 65-76 of the + * protocol specification dated September, 1981 very closely. + */ +#if INET6 +int +tcp6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + tcp_input(*mp, *offp); + return IPPROTO_DONE; +} +#endif + +void +tcp_input(m, off) + struct mbuf *m; + int off; +{ + register struct tcphdr *th; + register struct ip *ip = NULL; + register struct ipovly *ipov; + register struct inpcb *inp; + u_char *optp = NULL; + int optlen = 0; + int len, toff; + int hdroptlen; + u_int16_t tilen; + register struct tcpcb *tp = 0; + register int thflags; + struct socket *so = 0; + int todrop, acked, ourfinisacked, needoutput = 0; + struct in_addr laddr; +#if 0 + struct in6_addr laddr6; +#endif + int dropsocket = 0; + int iss = 0; + u_long tiwin; + struct tcpopt to; /* options in this segment */ + struct rmxp_tao *taop; /* pointer to our TAO cache entry */ + struct rmxp_tao tao_noncached; /* in case there's no cached entry */ + int need_sowwakeup = 0; + int need_sorwakeup = 0; +#if TCPDEBUG + short ostate = 0; +#endif +#if INET6 + struct ip6_hdr *ip6 = NULL; + int lgminh; +#else /* INET6 */ +#define lgminh (sizeof (struct tcpiphdr)) +#endif /* INET6 */ + int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; + + struct proc *proc0=current_proc(); + + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START,0,0,0,0,0); + + bzero((char *)&to, sizeof(to)); + + tcpstat.tcps_rcvtotal++; + /* + * Get IP and TCP header together in first mbuf. + * Note: IP leaves IP header in first mbuf. + */ + th = mtod(m, struct tcpiphdr *); + + KERNEL_DEBUG(DBG_LAYER_BEG, ((ti->ti_dport << 16) | ti->ti_sport), + (((ti->ti_src.s_addr & 0xffff) << 16) | (ti->ti_dst.s_addr & 0xffff)), + ti->ti_seq, ti->ti_ack, ti->ti_win); + +#if INET6 + if (isipv6) { + ip6 = mtod(m, struct ip6_hdr *); + lgminh = sizeof(struct tcpip6hdr); + } else { + lgminh = sizeof(struct tcpiphdr); +#endif /* INET6 */ + ip = mtod(m, struct ip *); + ipov = (struct ipovly *)ip; +#if INET6 + } +#endif /* INET6 */ + +#if INET6 + /* XXX not a good place to put this into... */ + if (isipv6 && + m && (m->m_flags & M_ANYCAST6)) { + icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, + (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); + return; + } +#endif /* INET6 */ + +#if INET6 + if (isipv6) { + IP6_EXTHDR_CHECK(m, off, sizeof(struct tcphdr), ); + ip6 = mtod(m, struct ip6_hdr *); + tilen = ntohs(ip6->ip6_plen) - off + sizeof(*ip6); + + if (in6_cksum(m, IPPROTO_TCP, off, tilen)) { + tcpstat.tcps_rcvbadsum++; + goto drop; + } + th = (struct tcphdr *)((caddr_t)ip6 + off); + } else +#endif /* INET6 */ + { + /* + * Get IP and TCP header together in first mbuf. + * Note: IP leaves IP header in first mbuf. + */ + /* XXX: should we still require this for IPv4? */ + if (off > sizeof (struct ip)) { + ip_stripoptions(m, (struct mbuf *)0); + off = sizeof(struct ip); + } + if (m->m_len < lgminh) { + if ((m = m_pullup(m, lgminh)) == 0) { + tcpstat.tcps_rcvshort++; + return; + } + } + ip = mtod(m, struct ip *); + ipov = (struct ipovly *)ip; + + /* + * Checksum extended TCP header and data. + */ + tilen = ip->ip_len; + len = sizeof (struct ip) + tilen; + bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); + ipov->ih_len = (u_short)tilen; + HTONS(ipov->ih_len); + th = (struct tcphdr *)((caddr_t)ip + off); + th->th_sum = in_cksum(m, len); + if (th->th_sum) { + tcpstat.tcps_rcvbadsum++; + goto drop; + } + } + + /* + * Check that TCP offset makes sense, + * pull out TCP options and adjust length. XXX + */ + toff = th->th_off << 2; + if (toff < sizeof (struct tcphdr) || toff > tilen) { + tcpstat.tcps_rcvbadoff++; + goto drop; + } + tilen -= toff; + if (toff > sizeof (struct tcphdr)) { +#if INET6 + if (isipv6) { + IP6_EXTHDR_CHECK(m, off, toff, ); + ip6 = mtod(m, struct ip6_hdr *); + th = (struct tcphdr *)((caddr_t)ip6 + off); + } else +#endif /* INET6 */ + { + if (m->m_len < sizeof(struct ip) + toff) { + if ((m = m_pullup(m, sizeof (struct ip) + toff)) == 0) { + tcpstat.tcps_rcvshort++; + return; + } + ip = mtod(m, struct ip *); + ipov = (struct ipovly *)ip; + th = (struct tcphdr *)((caddr_t)ip + off); + } + } + optlen = toff - sizeof (struct tcphdr); + optp = (u_char *)(th + 1); + /* + * Do quick retrieval of timestamp options ("options + * prediction?"). If timestamp is the only option and it's + * formatted as recommended in RFC 1323 appendix A, we + * quickly get the values now and not bother calling + * tcp_dooptions(), etc. + */ + if ((optlen == TCPOLEN_TSTAMP_APPA || + (optlen > TCPOLEN_TSTAMP_APPA && + optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && + *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) && + (th->th_flags & TH_SYN) == 0) { + to.to_flag |= TOF_TS; + to.to_tsval = ntohl(*(u_int32_t *)(optp + 4)); + to.to_tsecr = ntohl(*(u_int32_t *)(optp + 8)); + optp = NULL; /* we've parsed the options */ + } + } + thflags = th->th_flags; + + /* + * Convert TCP protocol specific fields to host format. + */ + NTOHL(th->th_seq); + NTOHL(th->th_ack); + NTOHS(th->th_win); + NTOHS(th->th_urp); + + /* + * Drop TCP, IP headers and TCP options. + */ + hdroptlen = off+toff; + m->m_data += hdroptlen; + m->m_len -= hdroptlen; + + /* + * Locate pcb for segment. + */ +findpcb: +#if IPFIREWALL_FORWARD + if (ip_fw_fwd_addr != NULL +#if INET6 + && isipv6 == NULL +#endif /* INET6 */ + ) { + /* + * Diverted. Pretend to be the destination. + * already got one like this? + */ + inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, + ip->ip_dst, th->th_dport, 0, m->m_pkthdr.rcvif); + if (!inp) { + /* + * No, then it's new. Try find the ambushing socket + */ + if (!ip_fw_fwd_addr->sin_port) { + inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, + th->th_sport, ip_fw_fwd_addr->sin_addr, + th->th_dport, 1, m->m_pkthdr.rcvif); + } else { + inp = in_pcblookup_hash(&tcbinfo, + ip->ip_src, th->th_sport, + ip_fw_fwd_addr->sin_addr, + ntohs(ip_fw_fwd_addr->sin_port), 1, + m->m_pkthdr.rcvif); + } + } + ip_fw_fwd_addr = NULL; + } else +#endif /* IPFIREWALL_FORWARD */ + +#if INET6 + if (isipv6) + inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport, + &ip6->ip6_dst, th->th_dport, 1, + m->m_pkthdr.rcvif); + else +#endif /* INET6 */ + inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, + ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif); + +#if IPSEC + /* due to difference from other BSD stacks */ + m->m_data -= hdroptlen; + m->m_len += hdroptlen; +#if INET6 + if (isipv6) { + if (inp != NULL && ipsec6_in_reject_so(m, inp->inp_socket)) { + ipsec6stat.in_polvio++; + goto drop; + } + } else +#endif /* INET6 */ + if (inp != NULL && ipsec4_in_reject_so(m, inp->inp_socket)) { + ipsecstat.in_polvio++; + goto drop; + } + m->m_data += hdroptlen; + m->m_len -= hdroptlen; +#endif /*IPSEC*/ + + /* + * If the state is CLOSED (i.e., TCB does not exist) then + * all data in the incoming segment is discarded. + * If the TCB exists but is in CLOSED state, it is embryonic, + * but should either do a listen or a connect soon. + */ + if (inp == NULL) { + if (log_in_vain && thflags & TH_SYN) { +#if INET6 + char buf[INET6_ADDRSTRLEN]; +#else /* INET6 */ + char buf[4*sizeof "123"]; +#endif /* INET6 */ + +#if INET6 + if (isipv6) { + strcpy(buf, ip6_sprintf(&ip6->ip6_dst)); + log(LOG_INFO, + "Connection attempt to TCP %s:%d from %s:%d\n", + buf, ntohs(th->th_dport), + ip6_sprintf(&ip6->ip6_src), + ntohs(th->th_sport)); + } else { +#endif + strcpy(buf, inet_ntoa(ip->ip_dst)); + log(LOG_INFO, + "Connection attempt to TCP %s:%d from %s:%d\n", + buf, ntohs(th->th_dport), inet_ntoa(ip->ip_src), + ntohs(th->th_sport)); +#if INET6 + } +#endif /* INET6 */ + } +#if ICMP_BANDLIM + if (badport_bandlim(1) < 0) + goto drop; +#endif + goto dropwithreset; + } + tp = intotcpcb(inp); + if (tp == 0) + goto dropwithreset; + if (tp->t_state == TCPS_CLOSED) + goto drop; + + /* Unscale the window into a 32-bit value. */ + if ((thflags & TH_SYN) == 0) + tiwin = th->th_win << tp->snd_scale; + else + tiwin = th->th_win; + + so = inp->inp_socket; + if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) { +#if TCPDEBUG + if (so->so_options & SO_DEBUG) { + ostate = tp->t_state; +#if INET6 + if (isipv6) + tcp_saveip._tcp_si6 = *ip6; + else + tcp_saveip._tcp_si4 = *ip; +#else /* INET6 */ + tcp_saveip = *ip; +#endif /* INET6 */ + + tcp_savetcp = *th; + } +#endif + if (so->so_options & SO_ACCEPTCONN) { + register struct tcpcb *tp0 = tp; + struct socket *so2; +#if IPSEC + struct socket *oso; +#endif +#if INET6 + struct inpcb *oinp = sotoinpcb(so); +#endif /* INET6 */ + +#if !IPSEC + if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) { + /* + * Note: dropwithreset makes sure we don't + * send a RST in response to a RST. + */ + if (thflags & TH_ACK) { + tcpstat.tcps_badsyn++; + goto dropwithreset; + } + goto drop; + } +#endif + KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START,0,0,0,0,0); + so2 = sonewconn(so, 0); + + + if (so2 == 0) { + tcpstat.tcps_listendrop++; + so2 = sodropablereq(so); + if (so2) { + tcp_drop(sototcpcb(so2), ETIMEDOUT); + so2 = sonewconn(so, 0); + } + if (!so2) + goto drop; + } +#if IPSEC + oso = so; +#endif + so = so2; + /* + * This is ugly, but .... + * + * Mark socket as temporary until we're + * committed to keeping it. The code at + * ``drop'' and ``dropwithreset'' check the + * flag dropsocket to see if the temporary + * socket created here should be discarded. + * We mark the socket as discardable until + * we're committed to it below in TCPS_LISTEN. + */ + dropsocket++; + inp = (struct inpcb *)so->so_pcb; +#if INET6 + if (isipv6) + inp->in6p_laddr = ip6->ip6_dst; + else { + if (ip6_mapped_addr_on) { + inp->inp_vflag &= ~INP_IPV6; + inp->inp_vflag |= INP_IPV4; + } +#endif /* INET6 */ + inp->inp_laddr = ip->ip_dst; +#if INET6 + } +#endif /* INET6 */ + + inp->inp_lport = th->th_dport; + if (in_pcbinshash(inp) != 0) { + /* + * Undo the assignments above if we failed to put + * the PCB on the hash lists. + */ +#if INET6 + if (isipv6) + inp->in6p_laddr = in6addr_any; + else +#endif /* INET6 */ + inp->inp_laddr.s_addr = INADDR_ANY; + inp->inp_lport = 0; + goto drop; + } +#if IPSEC + /* + * from IPsec perspective, it is important to do it + * after making actual listening socket. + * otherwise, cached security association will bark. + * + * Subject: (KAME-snap 748) + * From: Wayne Knowles + */ + if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) { + /* + * Note: dropwithreset makes sure we don't + * send a RST in response to a RST. + */ + if (thflags & TH_ACK) { + tcpstat.tcps_badsyn++; + goto dropwithreset; + } + goto drop; + } +#endif +#if INET6 + if (isipv6) { + struct ip6_recvpktopts newopts; + + /* + * Inherit socket options from the listening + * socket. + * Note that in6p_inputopts are not (even + * should not be) copied, since it stores + * previously received options and is used to + * detect if each new option is different than + * the previous one and hence should be passed + * to a user. + * If we copied in6p_inputopts, a user would + * not be able to receive options just after + * calling the accept system call. + */ + inp->inp_flags |= + oinp->inp_flags & INP_CONTROLOPTS; + if (oinp->in6p_outputopts) + inp->in6p_outputopts = + ip6_copypktopts(oinp->in6p_outputopts, + M_NOWAIT); + } else +#endif /* INET6 */ + inp->inp_options = ip_srcroute(); +#if IPSEC + /* copy old policy into new socket's */ + if (ipsec_copy_policy(sotoinpcb(oso)->inp_sp, + inp->inp_sp)) + printf("tcp_input: could not copy policy\n"); +#endif + + tp = intotcpcb(inp); + tp->t_state = TCPS_LISTEN; + tp->t_flags |= tp0->t_flags & (TF_NOPUSH|TF_NOOPT); + + /* Compute proper scaling value from buffer space */ + while (tp->request_r_scale < TCP_MAX_WINSHIFT && + TCP_MAXWIN << tp->request_r_scale < so->so_rcv.sb_hiwat) + tp->request_r_scale++; + + KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END,0,0,0,0,0); + } + } + +#if INET6 + /* save packet options if user wanted */ + if (isipv6 && (inp->in6p_flags & INP_CONTROLOPTS) != 0) { + struct ip6_recvpktopts opts6; + + /* + * Temporarily re-adjusting the mbuf before ip6_savecontrol(), + * which is necessary for FreeBSD only due to difference from + * other BSD stacks. + * XXX: we'll soon make a more natural fix after getting a + * consensus. + */ +#ifndef DEFER_MADJ + m->m_data -= hdroptlen; + m->m_len += hdroptlen; +#endif + ip6_savecontrol(inp, ip6, m, &opts6, &inp->in6p_inputopts); + if (inp->in6p_inputopts) + ip6_update_recvpcbopt(inp->in6p_inputopts, &opts6); + if (opts6.head) { + if (sbappendcontrol(&inp->in6p_socket->so_rcv, + NULL, opts6.head) + == 0) + m_freem(opts6.head); + } +#ifndef DEFER_MADJ + m->m_data += hdroptlen; /* XXX */ + m->m_len -= hdroptlen; /* XXX */ +#endif + } +#endif /* INET6 */ + + /* + * Segment received on connection. + * Reset idle time and keep-alive timer. + */ + tp->t_idle = 0; + if (TCPS_HAVEESTABLISHED(tp->t_state)) + tp->t_timer[TCPT_KEEP] = tcp_keepidle; + + /* + * Process options if not in LISTEN state, + * else do it below (after getting remote address). + */ + if (tp->t_state != TCPS_LISTEN && optp) + tcp_dooptions(tp, optp, optlen, th, &to); + if (th->th_flags & TH_SYN) + tcp_mss(tp, to.to_maxseg, isipv6); /* sets t_maxseg */ + + /* + * Header prediction: check for the two common cases + * of a uni-directional data xfer. If the packet has + * no control flags, is in-sequence, the window didn't + * change and we're not retransmitting, it's a + * candidate. If the length is zero and the ack moved + * forward, we're the sender side of the xfer. Just + * free the data acked & wake any higher level process + * that was blocked waiting for space. If the length + * is non-zero and the ack didn't move, we're the + * receiver side. If we're getting packets in-order + * (the reassembly queue is empty), add the data to + * the socket buffer and note that we need a delayed ack. + * Make sure that the hidden state-flags are also off. + * Since we check for TCPS_ESTABLISHED above, it can only + * be TH_NEEDSYN. + */ + if (tp->t_state == TCPS_ESTABLISHED && + (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && + ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && + ((to.to_flag & TOF_TS) == 0 || + TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && + /* + * Using the CC option is compulsory if once started: + * the segment is OK if no T/TCP was negotiated or + * if the segment has a CC option equal to CCrecv + */ + ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) != (TF_REQ_CC|TF_RCVD_CC) || + ((to.to_flag & TOF_CC) != 0 && to.to_cc == tp->cc_recv)) && + th->th_seq == tp->rcv_nxt && + tiwin && tiwin == tp->snd_wnd && + tp->snd_nxt == tp->snd_max) { + + /* + * If last ACK falls within this segment's sequence numbers, + * record the timestamp. + * NOTE that the test is modified according to the latest + * proposal of the tcplw@cray.com list (Braden 1993/04/26). + */ + if ((to.to_flag & TOF_TS) != 0 && + SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { + tp->ts_recent_age = tcp_now; + tp->ts_recent = to.to_tsval; + } + + if (tilen == 0) { + if (SEQ_GT(th->th_ack, tp->snd_una) && + SEQ_LEQ(th->th_ack, tp->snd_max) && + tp->snd_cwnd >= tp->snd_wnd && + tp->t_dupacks < tcprexmtthresh) { + /* + * this is a pure ack for outstanding data. + */ + ++tcpstat.tcps_predack; + if ((to.to_flag & TOF_TS) != 0) + tcp_xmit_timer(tp, + tcp_now - to.to_tsecr + 1); + else if (tp->t_rtt && + SEQ_GT(th->th_ack, tp->t_rtseq)) + tcp_xmit_timer(tp, tp->t_rtt); + acked = th->th_ack - tp->snd_una; + tcpstat.tcps_rcvackpack++; + tcpstat.tcps_rcvackbyte += acked; + sbdrop(&so->so_snd, acked); + tp->snd_una = th->th_ack; + m_freem(m); +#if INET6 + /* some progress has been done */ + if (isipv6) + ND6_HINT(tp); +#endif + + /* + * If all outstanding data are acked, stop + * retransmit timer, otherwise restart timer + * using current (possibly backed-off) value. + * If process is waiting for space, + * wakeup/selwakeup/signal. If data + * are ready to send, let tcp_output + * decide between more output or persist. + */ + if (tp->snd_una == tp->snd_max) + tp->t_timer[TCPT_REXMT] = 0; + else if (tp->t_timer[TCPT_PERSIST] == 0) + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + + if (so->so_snd.sb_cc) + (void) tcp_output(tp); + sowwakeup(so); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + return; + } + } else if (th->th_ack == tp->snd_una && + tp->segq.lh_first == NULL && + tilen <= sbspace(&so->so_rcv)) { + /* + * this is a pure, in-sequence data packet + * with nothing on the reassembly queue and + * we have enough buffer space to take it. + */ + ++tcpstat.tcps_preddat; + tp->rcv_nxt += tilen; + tcpstat.tcps_rcvpack++; + tcpstat.tcps_rcvbyte += tilen; +#if INET6 + /* some progress has been done */ + if (isipv6) + ND6_HINT(tp); +#endif + sbappend(&so->so_rcv, m); + KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), + (((th->th_src.s_addr & 0xffff) << 16) | (th->th_dst.s_addr & 0xffff)), + th->th_seq, th->th_ack, th->th_win); + if (tcp_delack_enabled) { + if (last_active_conn_count > DELACK_BITMASK_THRESH) + TCP_DELACK_BITSET(tp->t_inpcb->hash_element); + tp->t_flags |= TF_DELACK; + } else { + tp->t_flags |= TF_ACKNOW; + tcp_output(tp); + } + sorwakeup(so); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + return; + } + } + + /* + * Calculate amount of space in receive window, + * and then do TCP input processing. + * Receive window is amount of space in rcv queue, + * but not less than advertised window. + */ + { int win; + + win = sbspace(&so->so_rcv); + if (win < 0) + win = 0; + tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); + } + + switch (tp->t_state) { + + /* + * If the state is LISTEN then ignore segment if it contains an RST. + * If the segment contains an ACK then it is bad and send a RST. + * If it does not contain a SYN then it is not interesting; drop it. + * If it is from this socket, drop it, it must be forged. + * Don't bother responding if the destination was a broadcast. + * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial + * tp->iss, and send a segment: + * + * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss. + * Fill in remote peer address fields if not previously specified. + * Enter SYN_RECEIVED state, and process any other fields of this + * segment in this state. + */ + case TCPS_LISTEN: { + register struct sockaddr_in *sin; +#if 0 + register struct sockaddr_in6 *sin6; +#endif + + if (thflags & TH_RST) + goto drop; + if (thflags & TH_ACK) + goto dropwithreset; + if ((thflags & TH_SYN) == 0) + goto drop; + if (th->th_dport == th->th_sport) { +#if INET6 + if (isipv6) { + if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, + &ip6->ip6_src)) + goto drop; + } else +#endif /* INET6 */ + if (ip->ip_dst.s_addr == ip->ip_src.s_addr) + goto drop; + } + +#if INET6 + if (isipv6) { + if (m->m_flags & (M_BCAST|M_MCAST) || + IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) + goto drop; +#if 1 + /* + * Perhaps this should be a call/macro + * to a function like in6_pcbconnect(), but almost + * all of the checks have been done: we know + * that the association is unique, and the + * local address is always set here. + */ + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) + inp->in6p_laddr = ip6->ip6_dst; + inp->in6p_faddr = ip6->ip6_src; + inp->inp_fport = th->th_sport; + + /* TODO: flowinfo initialization */ + + in_pcbrehash(inp); +#else + MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, + M_SONAME, M_NOWAIT); + if (sin6 == NULL) + goto drop; + bzero(sin6, sizeof(*sin6)); + sin6->sin6_family = AF_INET6; + sin6->sin6_len = sizeof(*sin6); + sin6->sin6_addr = ip6->ip6_src; + sin6->sin6_port = th->th_sport; + laddr6 = inp->in6p_laddr; + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) + inp->in6p_laddr = ip6->ip6_dst; + if (in6_pcbconnect(inp, (struct sockaddr *)sin6, + &proc0)) { + inp->in6p_laddr = laddr6; + FREE(sin6, M_SONAME); + goto drop; + } + FREE(sin6, M_SONAME); +#endif + } + else { +#endif /* INET6 */ + /* + * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN + * in_broadcast() should never return true on a received + * packet with M_BCAST not set. + */ + if (m->m_flags & (M_BCAST|M_MCAST) || + IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) + goto drop; + MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, + M_NOWAIT); + if (sin == NULL) + goto drop; + sin->sin_family = AF_INET; + sin->sin_len = sizeof(*sin); + sin->sin_addr = ip->ip_src; + sin->sin_port = th->th_sport; + bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero)); + laddr = inp->inp_laddr; + if (inp->inp_laddr.s_addr == INADDR_ANY) + inp->inp_laddr = ip->ip_dst; + if (in_pcbconnect(inp, (struct sockaddr *)sin, &proc0)) { + inp->inp_laddr = laddr; + FREE(sin, M_SONAME); + goto drop; + } + FREE(sin, M_SONAME); +#if INET6 + } +#endif /* INET6 */ + + tp->t_template = tcp_template(tp); + if (tp->t_template == 0) { + tp = tcp_drop(tp, ENOBUFS); + dropsocket = 0; /* socket is already gone */ + goto drop; + } + if ((taop = tcp_gettaocache(inp)) == NULL) { + taop = &tao_noncached; + bzero(taop, sizeof(*taop)); + } + tcp_dooptions(tp, optp, optlen, th, &to); + if (th->th_flags & TH_SYN) + tcp_mss(tp, to.to_maxseg, isipv6); /* sets t_maxseg */ + if (iss) + tp->iss = iss; + else + tp->iss = tcp_iss; + tcp_iss += TCP_ISSINCR/4; + tp->irs = th->th_seq; + tcp_sendseqinit(tp); + tcp_rcvseqinit(tp); + /* + * Initialization of the tcpcb for transaction; + * set SND.WND = SEG.WND, + * initialize CCsend and CCrecv. + */ + tp->snd_wnd = tiwin; /* initial send-window */ + tp->cc_send = CC_INC(tcp_ccgen); + tp->cc_recv = to.to_cc; + /* + * Perform TAO test on incoming CC (SEG.CC) option, if any. + * - compare SEG.CC against cached CC from the same host, + * if any. + * - if SEG.CC > chached value, SYN must be new and is accepted + * immediately: save new CC in the cache, mark the socket + * connected, enter ESTABLISHED state, turn on flag to + * send a SYN in the next segment. + * A virtual advertised window is set in rcv_adv to + * initialize SWS prevention. Then enter normal segment + * processing: drop SYN, process data and FIN. + * - otherwise do a normal 3-way handshake. + */ + if ((to.to_flag & TOF_CC) != 0) { + if (((tp->t_flags & TF_NOPUSH) != 0) && + taop->tao_cc != 0 && CC_GT(to.to_cc, taop->tao_cc)) { + + taop->tao_cc = to.to_cc; + if (tp->t_state != TCPS_ESTABLISHED) + current_active_connections++; + + tp->t_state = TCPS_ESTABLISHED; + + /* + * If there is a FIN, or if there is data and the + * connection is local, then delay SYN,ACK(SYN) in + * the hope of piggy-backing it on a response + * segment. Otherwise must send ACK now in case + * the other side is slow starting. + */ + if (tcp_delack_enabled && + ((thflags & TH_FIN) || + (tilen != 0 && +#if INET6 + (isipv6 && in6_localaddr(&inp->in6p_faddr)) + || + (!isipv6 && +#endif /* INET6 */ + in_localaddr(inp->inp_faddr) +#if INET6 + ) +#endif /* INET6 */ + ))) { + if (last_active_conn_count > DELACK_BITMASK_THRESH) + TCP_DELACK_BITSET(tp->t_inpcb->hash_element); + + tp->t_flags |= (TF_DELACK | TF_NEEDSYN); + } + else + tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); + + /* + * Limit the `virtual advertised window' to TCP_MAXWIN + * here. Even if we requested window scaling, it will + * become effective only later when our SYN is acked. + */ + tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN); + tcpstat.tcps_connects++; + soisconnected(so); + tp->t_timer[TCPT_KEEP] = tcp_keepinit; + dropsocket = 0; /* committed to socket */ + tcpstat.tcps_accepts++; + goto trimthenstep6; + } + /* else do standard 3-way handshake */ + } else { + /* + * No CC option, but maybe CC.NEW: + * invalidate cached value. + */ + taop->tao_cc = 0; + } + /* + * TAO test failed or there was no CC option, + * do a standard 3-way handshake. + */ + tp->t_flags |= TF_ACKNOW; + tp->t_state = TCPS_SYN_RECEIVED; + tp->t_timer[TCPT_KEEP] = tcp_keepinit; + dropsocket = 0; /* committed to socket */ + tcpstat.tcps_accepts++; + goto trimthenstep6; + } + + /* + * If the state is SYN_RECEIVED: + * if seg contains an ACK, but not for our SYN/ACK, send a RST. + */ + case TCPS_SYN_RECEIVED: + if ((thflags & TH_ACK) && + (SEQ_LEQ(th->th_ack, tp->snd_una) || + SEQ_GT(th->th_ack, tp->snd_max))) + goto dropwithreset; + break; + + /* + * If the state is SYN_SENT: + * if seg contains an ACK, but not for our SYN, drop the input. + * if seg contains a RST, then drop the connection. + * if seg does not contain SYN, then drop it. + * Otherwise this is an acceptable SYN segment + * initialize tp->rcv_nxt and tp->irs + * if seg contains ack then advance tp->snd_una + * if SYN has been acked change to ESTABLISHED else SYN_RCVD state + * arrange for segment to be acked (eventually) + * continue processing rest of data/controls, beginning with URG + */ + case TCPS_SYN_SENT: + if ((taop = tcp_gettaocache(inp)) == NULL) { + taop = &tao_noncached; + bzero(taop, sizeof(*taop)); + } + + if ((thflags & TH_ACK) && + (SEQ_LEQ(th->th_ack, tp->iss) || + SEQ_GT(th->th_ack, tp->snd_max))) { + /* + * If we have a cached CCsent for the remote host, + * hence we haven't just crashed and restarted, + * do not send a RST. This may be a retransmission + * from the other side after our earlier ACK was lost. + * Our new SYN, when it arrives, will serve as the + * needed ACK. + */ + if (taop->tao_ccsent != 0) + goto drop; + else + goto dropwithreset; + } + if (thflags & TH_RST) { + if (thflags & TH_ACK) { + tp = tcp_drop(tp, ECONNREFUSED); + postevent(so, 0, EV_RESET); + } + goto drop; + } + if ((thflags & TH_SYN) == 0) + goto drop; + tp->snd_wnd = th->th_win; /* initial send window */ + tp->cc_recv = to.to_cc; /* foreign CC */ + + tp->irs = th->th_seq; + tcp_rcvseqinit(tp); + if (thflags & TH_ACK) { + /* + * Our SYN was acked. If segment contains CC.ECHO + * option, check it to make sure this segment really + * matches our SYN. If not, just drop it as old + * duplicate, but send an RST if we're still playing + * by the old rules. If no CC.ECHO option, make sure + * we don't get fooled into using T/TCP. + */ + if (to.to_flag & TOF_CCECHO) { + if (tp->cc_send != to.to_ccecho) + if (taop->tao_ccsent != 0) + goto drop; + else + goto dropwithreset; + } else + tp->t_flags &= ~TF_RCVD_CC; + tcpstat.tcps_connects++; + soisconnected(so); + /* Do window scaling on this connection? */ + if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == + (TF_RCVD_SCALE|TF_REQ_SCALE)) { + tp->snd_scale = tp->requested_s_scale; + tp->rcv_scale = tp->request_r_scale; + } + /* Segment is acceptable, update cache if undefined. */ + if (taop->tao_ccsent == 0) + taop->tao_ccsent = to.to_ccecho; + + tp->rcv_adv += tp->rcv_wnd; + tp->snd_una++; /* SYN is acked */ + /* + * If there's data, delay ACK; if there's also a FIN + * ACKNOW will be turned on later. + */ + if (tcp_delack_enabled && tilen != 0) { + if (last_active_conn_count > DELACK_BITMASK_THRESH) + TCP_DELACK_BITSET(tp->t_inpcb->hash_element); + tp->t_flags |= TF_DELACK; + } + else + tp->t_flags |= TF_ACKNOW; + /* + * Received in SYN_SENT[*] state. + * Transitions: + * SYN_SENT --> ESTABLISHED + * SYN_SENT* --> FIN_WAIT_1 + */ + if (tp->t_flags & TF_NEEDFIN) { + tp->t_state = TCPS_FIN_WAIT_1; + tp->t_flags &= ~TF_NEEDFIN; + thflags &= ~TH_SYN; + } else { + if (tp->t_state != TCPS_ESTABLISHED) + current_active_connections++; + tp->t_state = TCPS_ESTABLISHED; + tp->t_timer[TCPT_KEEP] = tcp_keepidle; + } + } else { + /* + * Received initial SYN in SYN-SENT[*] state => simul- + * taneous open. If segment contains CC option and there is + * a cached CC, apply TAO test; if it succeeds, connection is + * half-synchronized. Otherwise, do 3-way handshake: + * SYN-SENT -> SYN-RECEIVED + * SYN-SENT* -> SYN-RECEIVED* + * If there was no CC option, clear cached CC value. + */ + tp->t_flags |= TF_ACKNOW; + tp->t_timer[TCPT_REXMT] = 0; + if (to.to_flag & TOF_CC) { + if (taop->tao_cc != 0 && + CC_GT(to.to_cc, taop->tao_cc)) { + /* + * update cache and make transition: + * SYN-SENT -> ESTABLISHED* + * SYN-SENT* -> FIN-WAIT-1* + */ + taop->tao_cc = to.to_cc; + if (tp->t_flags & TF_NEEDFIN) { + tp->t_state = TCPS_FIN_WAIT_1; + tp->t_flags &= ~TF_NEEDFIN; + } else { + if (tp->t_state != TCPS_ESTABLISHED) + current_active_connections++; + tp->t_state = TCPS_ESTABLISHED; + tp->t_timer[TCPT_KEEP] = tcp_keepidle; + } + tp->t_flags |= TF_NEEDSYN; + } else + tp->t_state = TCPS_SYN_RECEIVED; + } else { + /* CC.NEW or no option => invalidate cache */ + taop->tao_cc = 0; + tp->t_state = TCPS_SYN_RECEIVED; + } + } + +trimthenstep6: + /* + * Advance th->th_seq to correspond to first data byte. + * If data, trim to stay within window, + * dropping FIN if necessary. + */ + th->th_seq++; + if (tilen > tp->rcv_wnd) { + todrop = tilen - tp->rcv_wnd; + m_adj(m, -todrop); + tilen = tp->rcv_wnd; + thflags &= ~TH_FIN; + tcpstat.tcps_rcvpackafterwin++; + tcpstat.tcps_rcvbyteafterwin += todrop; + } + tp->snd_wl1 = th->th_seq - 1; + tp->rcv_up = th->th_seq; + /* + * Client side of transaction: already sent SYN and data. + * If the remote host used T/TCP to validate the SYN, + * our data will be ACK'd; if so, enter normal data segment + * processing in the middle of step 5, ack processing. + * Otherwise, goto step 6. + */ + if (thflags & TH_ACK) + goto process_ACK; + goto step6; + /* + * If the state is LAST_ACK or CLOSING or TIME_WAIT: + * if segment contains a SYN and CC [not CC.NEW] option: + * if state == TIME_WAIT and connection duration > MSL, + * drop packet and send RST; + * + * if SEG.CC > CCrecv then is new SYN, and can implicitly + * ack the FIN (and data) in retransmission queue. + * Complete close and delete TCPCB. Then reprocess + * segment, hoping to find new TCPCB in LISTEN state; + * + * else must be old SYN; drop it. + * else do normal processing. + */ + case TCPS_LAST_ACK: + case TCPS_CLOSING: + case TCPS_TIME_WAIT: + if ((thflags & TH_SYN) && + (to.to_flag & TOF_CC) && tp->cc_recv != 0) { + if (tp->t_state == TCPS_TIME_WAIT && + tp->t_duration > TCPTV_MSL) + goto dropwithreset; + if (CC_GT(to.to_cc, tp->cc_recv)) { + tp = tcp_close(tp); + goto findpcb; + } + else + goto drop; + } + break; /* continue normal processing */ + } + + /* + * States other than LISTEN or SYN_SENT. + * First check the RST flag and sequence number since reset segments + * are exempt from the timestamp and connection count tests. This + * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix + * below which allowed reset segments in half the sequence space + * to fall though and be processed (which gives forged reset + * segments with a random sequence number a 50 percent chance of + * killing a connection). + * Then check timestamp, if present. + * Then check the connection count, if present. + * Then check that at least some bytes of segment are within + * receive window. If segment begins before rcv_nxt, + * drop leading data (and SYN); if nothing left, just ack. + * + * + * If the RST bit is set, check the sequence number to see + * if this is a valid reset segment. + * RFC 793 page 37: + * In all states except SYN-SENT, all reset (RST) segments + * are validated by checking their SEQ-fields. A reset is + * valid if its sequence number is in the window. + * Note: this does not take into account delayed ACKs, so + * we should test against last_ack_sent instead of rcv_nxt. + * Also, it does not make sense to allow reset segments with + * sequence numbers greater than last_ack_sent to be processed + * since these sequence numbers are just the acknowledgement + * numbers in our outgoing packets being echoed back at us, + * and these acknowledgement numbers are monotonically + * increasing. + * If we have multiple segments in flight, the intial reset + * segment sequence numbers will be to the left of last_ack_sent, + * but they will eventually catch up. + * In any case, it never made sense to trim reset segments to + * fit the receive window since RFC 1122 says: + * 4.2.2.12 RST Segment: RFC-793 Section 3.4 + * + * A TCP SHOULD allow a received RST segment to include data. + * + * DISCUSSION + * It has been suggested that a RST segment could contain + * ASCII text that encoded and explained the cause of the + * RST. No standard has yet been established for such + * data. + * + * If the reset segment passes the sequence number test examine + * the state: + * SYN_RECEIVED STATE: + * If passive open, return to LISTEN state. + * If active open, inform user that connection was refused. + * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES: + * Inform user that connection was reset, and close tcb. + * CLOSING, LAST_ACK, TIME_WAIT STATES + * Close the tcb. + * TIME_WAIT state: + * Drop the segment - see Stevens, vol. 2, p. 964 and + * RFC 1337. + */ + if (thflags & TH_RST) { + if (tp->last_ack_sent == th->th_seq) { + switch (tp->t_state) { + + case TCPS_SYN_RECEIVED: + so->so_error = ECONNREFUSED; + goto close; + + case TCPS_ESTABLISHED: + case TCPS_FIN_WAIT_1: + case TCPS_CLOSE_WAIT: + current_active_connections--; + /* + Drop through ... + */ + case TCPS_FIN_WAIT_2: + so->so_error = ECONNRESET; + close: + postevent(so, 0, EV_RESET); + tp->t_state = TCPS_CLOSED; + tcpstat.tcps_drops++; + tp = tcp_close(tp); + break; + + case TCPS_CLOSING: + case TCPS_LAST_ACK: + current_active_connections--; + tp = tcp_close(tp); + break; + + case TCPS_TIME_WAIT: + break; + } + } + goto drop; + } + + /* + * RFC 1323 PAWS: If we have a timestamp reply on this segment + * and it's less than ts_recent, drop it. + */ + if ((to.to_flag & TOF_TS) != 0 && tp->ts_recent && + TSTMP_LT(to.to_tsval, tp->ts_recent)) { + + /* Check to see if ts_recent is over 24 days old. */ + if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) { + /* + * Invalidate ts_recent. If this segment updates + * ts_recent, the age will be reset later and ts_recent + * will get a valid value. If it does not, setting + * ts_recent to zero will at least satisfy the + * requirement that zero be placed in the timestamp + * echo reply when ts_recent isn't valid. The + * age isn't reset until we get a valid ts_recent + * because we don't want out-of-order segments to be + * dropped when ts_recent is old. + */ + tp->ts_recent = 0; + } else { + tcpstat.tcps_rcvduppack++; + tcpstat.tcps_rcvdupbyte += tilen; + tcpstat.tcps_pawsdrop++; + goto dropafterack; + } + } + + /* + * T/TCP mechanism + * If T/TCP was negotiated and the segment doesn't have CC, + * or if its CC is wrong then drop the segment. + * RST segments do not have to comply with this. + */ + if ((tp->t_flags & (TF_REQ_CC|TF_RCVD_CC)) == (TF_REQ_CC|TF_RCVD_CC) && + ((to.to_flag & TOF_CC) == 0 || tp->cc_recv != to.to_cc)) + goto dropafterack; + + /* + * In the SYN-RECEIVED state, validate that the packet belongs to + * this connection before trimming the data to fit the receive + * window. Check the sequence number versus IRS since we know + * the sequence numbers haven't wrapped. This is a partial fix + * for the "LAND" DoS attack. + */ + if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) + goto dropwithreset; + + todrop = tp->rcv_nxt - th->th_seq; + if (todrop > 0) { + if (thflags & TH_SYN) { + thflags &= ~TH_SYN; + th->th_seq++; + if (th->th_urp > 1) + th->th_urp--; + else + thflags &= ~TH_URG; + todrop--; + } + /* + * Following if statement from Stevens, vol. 2, p. 960. + */ + if (todrop > tilen + || (todrop == tilen && (thflags & TH_FIN) == 0)) { + /* + * Any valid FIN must be to the left of the window. + * At this point the FIN must be a duplicate or out + * of sequence; drop it. + */ + thflags &= ~TH_FIN; + + /* + * Send an ACK to resynchronize and drop any data. + * But keep on processing for RST or ACK. + */ + tp->t_flags |= TF_ACKNOW; + todrop = tilen; + tcpstat.tcps_rcvduppack++; + tcpstat.tcps_rcvdupbyte += todrop; + } else { + tcpstat.tcps_rcvpartduppack++; + tcpstat.tcps_rcvpartdupbyte += todrop; + } + m_adj(m, todrop); + th->th_seq += todrop; + tilen -= todrop; + if (th->th_urp > todrop) + th->th_urp -= todrop; + else { + thflags &= ~TH_URG; + th->th_urp = 0; + } + } + + /* + * If new data are received on a connection after the + * user processes are gone, then RST the other end. + */ + if ((so->so_state & SS_NOFDREF) && + tp->t_state > TCPS_CLOSE_WAIT && tilen) { + tp = tcp_close(tp); + tcpstat.tcps_rcvafterclose++; + goto dropwithreset; + } + + /* + * If segment ends after window, drop trailing data + * (and PUSH and FIN); if nothing left, just ACK. + */ + todrop = (th->th_seq+tilen) - (tp->rcv_nxt+tp->rcv_wnd); + if (todrop > 0) { + tcpstat.tcps_rcvpackafterwin++; + if (todrop >= tilen) { + tcpstat.tcps_rcvbyteafterwin += tilen; + /* + * If a new connection request is received + * while in TIME_WAIT, drop the old connection + * and start over if the sequence numbers + * are above the previous ones. + */ + if (thflags & TH_SYN && + tp->t_state == TCPS_TIME_WAIT && + SEQ_GT(th->th_seq, tp->rcv_nxt)) { + iss = tp->rcv_nxt + TCP_ISSINCR; + tp = tcp_close(tp); + goto findpcb; + } + /* + * If window is closed can only take segments at + * window edge, and have to drop data and PUSH from + * incoming segments. Continue processing, but + * remember to ack. Otherwise, drop segment + * and ack. + */ + if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { + tp->t_flags |= TF_ACKNOW; + tcpstat.tcps_rcvwinprobe++; + } else + goto dropafterack; + } else + tcpstat.tcps_rcvbyteafterwin += todrop; + m_adj(m, -todrop); + tilen -= todrop; + thflags &= ~(TH_PUSH|TH_FIN); + } + + /* + * If last ACK falls within this segment's sequence numbers, + * record its timestamp. + * NOTE that the test is modified according to the latest + * proposal of the tcplw@cray.com list (Braden 1993/04/26). + */ + if ((to.to_flag & TOF_TS) != 0 && + SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { + tp->ts_recent_age = tcp_now; + tp->ts_recent = to.to_tsval; + } + + /* + * If a SYN is in the window, then this is an + * error and we send an RST and drop the connection. + */ + if (thflags & TH_SYN) { + tp = tcp_drop(tp, ECONNRESET); + postevent(so, 0, EV_RESET); + goto dropwithreset; + } + + /* + * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN + * flag is on (half-synchronized state), then queue data for + * later processing; else drop segment and return. + */ + if ((thflags & TH_ACK) == 0) { + if (tp->t_state == TCPS_SYN_RECEIVED || + (tp->t_flags & TF_NEEDSYN)) + goto step6; + else + goto drop; + } + + /* + * Ack processing. + */ + switch (tp->t_state) { + + /* + * In SYN_RECEIVED state, the ack ACKs our SYN, so enter + * ESTABLISHED state and continue processing. + * The ACK was checked above. + */ + case TCPS_SYN_RECEIVED: + + tcpstat.tcps_connects++; + soisconnected(so); + current_active_connections++; + + /* Do window scaling? */ + if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == + (TF_RCVD_SCALE|TF_REQ_SCALE)) { + tp->snd_scale = tp->requested_s_scale; + tp->rcv_scale = tp->request_r_scale; + } + /* + * Upon successful completion of 3-way handshake, + * update cache.CC if it was undefined, pass any queued + * data to the user, and advance state appropriately. + */ + if ((taop = tcp_gettaocache(inp)) != NULL && + taop->tao_cc == 0) + taop->tao_cc = tp->cc_recv; + + /* + * Make transitions: + * SYN-RECEIVED -> ESTABLISHED + * SYN-RECEIVED* -> FIN-WAIT-1 + */ + if (tp->t_flags & TF_NEEDFIN) { + tp->t_state = TCPS_FIN_WAIT_1; + tp->t_flags &= ~TF_NEEDFIN; + } else { + tp->t_state = TCPS_ESTABLISHED; + tp->t_timer[TCPT_KEEP] = tcp_keepidle; + } + /* + * If segment contains data or ACK, will call tcp_reass() + * later; if not, do so now to pass queued data to user. + */ + if (tilen == 0 && (thflags & TH_FIN) == 0) + (void) tcp_reass(tp, (struct tcphdr *)0, 0, + (struct mbuf *)0, isipv6); + tp->snd_wl1 = th->th_seq - 1; + /* fall into ... */ + + /* + * In ESTABLISHED state: drop duplicate ACKs; ACK out of range + * ACKs. If the ack is in the range + * tp->snd_una < th->th_ack <= tp->snd_max + * then advance tp->snd_una to th->th_ack and drop + * data from the retransmission queue. If this ACK reflects + * more up to date window information we update our window information. + */ + case TCPS_ESTABLISHED: + case TCPS_FIN_WAIT_1: + case TCPS_FIN_WAIT_2: + case TCPS_CLOSE_WAIT: + case TCPS_CLOSING: + case TCPS_LAST_ACK: + case TCPS_TIME_WAIT: + + if (SEQ_LEQ(th->th_ack, tp->snd_una)) { + if (tilen == 0 && tiwin == tp->snd_wnd) { + tcpstat.tcps_rcvdupack++; + /* + * If we have outstanding data (other than + * a window probe), this is a completely + * duplicate ack (ie, window info didn't + * change), the ack is the biggest we've + * seen and we've seen exactly our rexmt + * threshhold of them, assume a packet + * has been dropped and retransmit it. + * Kludge snd_nxt & the congestion + * window so we send only this one + * packet. + * + * We know we're losing at the current + * window size so do congestion avoidance + * (set ssthresh to half the current window + * and pull our congestion window back to + * the new ssthresh). + * + * Dup acks mean that packets have left the + * network (they're now cached at the receiver) + * so bump cwnd by the amount in the receiver + * to keep a constant cwnd packets in the + * network. + */ + if (tp->t_timer[TCPT_REXMT] == 0 || + th->th_ack != tp->snd_una) + tp->t_dupacks = 0; + else if (++tp->t_dupacks == tcprexmtthresh) { + tcp_seq onxt = tp->snd_nxt; + u_int win = + min(tp->snd_wnd, tp->snd_cwnd) / 2 / + tp->t_maxseg; + + if (win < 2) + win = 2; + tp->snd_ssthresh = win * tp->t_maxseg; + tp->t_timer[TCPT_REXMT] = 0; + tp->t_rtt = 0; + tp->snd_nxt = th->th_ack; + tp->snd_cwnd = tp->t_maxseg; + (void) tcp_output(tp); + tp->snd_cwnd = tp->snd_ssthresh + + tp->t_maxseg * tp->t_dupacks; + if (SEQ_GT(onxt, tp->snd_nxt)) + tp->snd_nxt = onxt; + goto drop; + } else if (tp->t_dupacks > tcprexmtthresh) { + tp->snd_cwnd += tp->t_maxseg; + (void) tcp_output(tp); + goto drop; + } + } else + tp->t_dupacks = 0; + break; + } + /* + * If the congestion window was inflated to account + * for the other side's cached packets, retract it. + */ + if (tp->t_dupacks >= tcprexmtthresh && + tp->snd_cwnd > tp->snd_ssthresh) + tp->snd_cwnd = tp->snd_ssthresh; + tp->t_dupacks = 0; + if (SEQ_GT(th->th_ack, tp->snd_max)) { + tcpstat.tcps_rcvacktoomuch++; + goto dropafterack; + } + /* + * If we reach this point, ACK is not a duplicate, + * i.e., it ACKs something we sent. + */ + if (tp->t_flags & TF_NEEDSYN) { + /* + * T/TCP: Connection was half-synchronized, and our + * SYN has been ACK'd (so connection is now fully + * synchronized). Go to non-starred state, + * increment snd_una for ACK of SYN, and check if + * we can do window scaling. + */ + tp->t_flags &= ~TF_NEEDSYN; + tp->snd_una++; + /* Do window scaling? */ + if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == + (TF_RCVD_SCALE|TF_REQ_SCALE)) { + tp->snd_scale = tp->requested_s_scale; + tp->rcv_scale = tp->request_r_scale; + } + } + +process_ACK: + acked = th->th_ack - tp->snd_una; + tcpstat.tcps_rcvackpack++; + tcpstat.tcps_rcvackbyte += acked; + + /* + * If we have a timestamp reply, update smoothed + * round trip time. If no timestamp is present but + * transmit timer is running and timed sequence + * number was acked, update smoothed round trip time. + * Since we now have an rtt measurement, cancel the + * timer backoff (cf., Phil Karn's retransmit alg.). + * Recompute the initial retransmit timer. + */ + if (to.to_flag & TOF_TS) + tcp_xmit_timer(tp, tcp_now - to.to_tsecr + 1); + else if (tp->t_rtt && SEQ_GT(th->th_ack, tp->t_rtseq)) + tcp_xmit_timer(tp,tp->t_rtt); + + /* + * If all outstanding data is acked, stop retransmit + * timer and remember to restart (more output or persist). + * If there is more data to be acked, restart retransmit + * timer, using current (possibly backed-off) value. + */ + if (th->th_ack == tp->snd_max) { + tp->t_timer[TCPT_REXMT] = 0; + needoutput = 1; + } else if (tp->t_timer[TCPT_PERSIST] == 0) + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + + /* + * If no data (only SYN) was ACK'd, + * skip rest of ACK processing. + */ + if (acked == 0) + goto step6; + + /* + * When new data is acked, open the congestion window. + * If the window gives us less than ssthresh packets + * in flight, open exponentially (maxseg per packet). + * Otherwise open linearly: maxseg per window + * (maxseg^2 / cwnd per packet). + */ + { + register u_int cw = tp->snd_cwnd; + register u_int incr = tp->t_maxseg; + + if (cw > tp->snd_ssthresh) + incr = incr * incr / cw; + tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<snd_scale); + } + if (acked > so->so_snd.sb_cc) { + tp->snd_wnd -= so->so_snd.sb_cc; + sbdrop(&so->so_snd, (int)so->so_snd.sb_cc); + ourfinisacked = 1; + } else { + sbdrop(&so->so_snd, acked); + tp->snd_wnd -= acked; + ourfinisacked = 0; + } + need_sowwakeup++; + tp->snd_una = th->th_ack; + if (SEQ_LT(tp->snd_nxt, tp->snd_una)) + tp->snd_nxt = tp->snd_una; + + switch (tp->t_state) { + + /* + * In FIN_WAIT_1 STATE in addition to the processing + * for the ESTABLISHED state if our FIN is now acknowledged + * then enter FIN_WAIT_2. + */ + case TCPS_FIN_WAIT_1: + if (ourfinisacked) { + /* + * If we can't receive any more + * data, then closing user can proceed. + * Starting the timer is contrary to the + * specification, but if we don't get a FIN + * we'll hang forever. + */ + if (so->so_state & SS_CANTRCVMORE) { + soisdisconnected(so); + tp->t_timer[TCPT_2MSL] = tcp_maxidle; + } + add_to_time_wait(tp); + current_active_connections--; + tp->t_state = TCPS_FIN_WAIT_2; + } + break; + + /* + * In CLOSING STATE in addition to the processing for + * the ESTABLISHED state if the ACK acknowledges our FIN + * then enter the TIME-WAIT state, otherwise ignore + * the segment. + */ + case TCPS_CLOSING: + if (ourfinisacked) { + tp->t_state = TCPS_TIME_WAIT; + tcp_canceltimers(tp); + /* Shorten TIME_WAIT [RFC-1644, p.28] */ + if (tp->cc_recv != 0 && + tp->t_duration < TCPTV_MSL) + tp->t_timer[TCPT_2MSL] = + tp->t_rxtcur * TCPTV_TWTRUNC; + else + tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; + add_to_time_wait(tp); + current_active_connections--; + soisdisconnected(so); + } + break; + + /* + * In LAST_ACK, we may still be waiting for data to drain + * and/or to be acked, as well as for the ack of our FIN. + * If our FIN is now acknowledged, delete the TCB, + * enter the closed state and return. + */ + case TCPS_LAST_ACK: + if (ourfinisacked) { + tp = tcp_close(tp); + goto drop; + } + break; + + /* + * In TIME_WAIT state the only thing that should arrive + * is a retransmission of the remote FIN. Acknowledge + * it and restart the finack timer. + */ + case TCPS_TIME_WAIT: + tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; + add_to_time_wait(tp); + goto dropafterack; + } + } + +step6: + /* + * Update window information. + * Don't look at window if no ACK: TAC's send garbage on first SYN. + */ + if ((thflags & TH_ACK) && + (SEQ_LT(tp->snd_wl1, th->th_seq) || + (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || + (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { + /* keep track of pure window updates */ + if (tilen == 0 && + tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) + tcpstat.tcps_rcvwinupd++; + tp->snd_wnd = tiwin; + tp->snd_wl1 = th->th_seq; + tp->snd_wl2 = th->th_ack; + if (tp->snd_wnd > tp->max_sndwnd) + tp->max_sndwnd = tp->snd_wnd; + needoutput = 1; + } + + /* + * Process segments with URG. + */ + if ((thflags & TH_URG) && th->th_urp && + TCPS_HAVERCVDFIN(tp->t_state) == 0) { + /* + * This is a kludge, but if we receive and accept + * random urgent pointers, we'll crash in + * soreceive. It's hard to imagine someone + * actually wanting to send this much urgent data. + */ + if (th->th_urp + so->so_rcv.sb_cc > sb_max) { + th->th_urp = 0; /* XXX */ + thflags &= ~TH_URG; /* XXX */ + goto dodata; /* XXX */ + } + /* + * If this segment advances the known urgent pointer, + * then mark the data stream. This should not happen + * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since + * a FIN has been received from the remote side. + * In these states we ignore the URG. + * + * According to RFC961 (Assigned Protocols), + * the urgent pointer points to the last octet + * of urgent data. We continue, however, + * to consider it to indicate the first octet + * of data past the urgent section as the original + * spec states (in one of two places). + */ + if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { + tp->rcv_up = th->th_seq + th->th_urp; + so->so_oobmark = so->so_rcv.sb_cc + + (tp->rcv_up - tp->rcv_nxt) - 1; + if (so->so_oobmark == 0) { + so->so_state |= SS_RCVATMARK; + postevent(so, 0, EV_OOB); + } + sohasoutofband(so); + tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); + } + /* + * Remove out of band data so doesn't get presented to user. + * This can happen independent of advancing the URG pointer, + * but if two URG's are pending at once, some out-of-band + * data may creep in... ick. + */ + if (th->th_urp <= (u_long)tilen +#if SO_OOBINLINE + && (so->so_options & SO_OOBINLINE) == 0 +#endif + ) + tcp_pulloutofband(so, th, m); + } else + /* + * If no out of band data is expected, + * pull receive urgent pointer along + * with the receive window. + */ + if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) + tp->rcv_up = tp->rcv_nxt; +dodata: /* XXX */ + + /* + * Process the segment text, merging it into the TCP sequencing queue, + * and arranging for acknowledgment of receipt if necessary. + * This process logically involves adjusting tp->rcv_wnd as data + * is presented to the user (this happens in tcp_usrreq.c, + * case PRU_RCVD). If a FIN has already been received on this + * connection then we just ignore the text. + */ + if ((tilen || (thflags&TH_FIN)) && + TCPS_HAVERCVDFIN(tp->t_state) == 0) { + TCP_REASS(tp, th, tilen, m, so, thflags, isipv6, need_sorwakeup); + + if (tp->t_flags & TF_DELACK) + { + KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), + (((th->th_src.s_addr & 0xffff) << 16) | (th->th_dst.s_addr & 0xffff)), + th->th_seq, th->th_ack, th->th_win); + } + /* + * Note the amount of data that peer has sent into + * our window, in order to estimate the sender's + * buffer size. + */ + len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); + } else { + m_freem(m); + thflags &= ~TH_FIN; + } + + /* + * If FIN is received ACK the FIN and let the user know + * that the connection is closing. + */ + if (thflags & TH_FIN) { + if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { + socantrcvmore(so); + postevent(so, 0, EV_FIN); + /* + * If connection is half-synchronized + * (ie NEEDSYN flag on) then delay ACK, + * so it may be piggybacked when SYN is sent. + * Otherwise, since we received a FIN then no + * more input can be expected, send ACK now. + */ + if (tcp_delack_enabled && (tp->t_flags & TF_NEEDSYN)) { + if (last_active_conn_count > DELACK_BITMASK_THRESH) + TCP_DELACK_BITSET(tp->t_inpcb->hash_element); + + tp->t_flags |= TF_DELACK; + } + else + tp->t_flags |= TF_ACKNOW; + tp->rcv_nxt++; + } + switch (tp->t_state) { + + /* + * In SYN_RECEIVED and ESTABLISHED STATES + * enter the CLOSE_WAIT state. + */ + case TCPS_SYN_RECEIVED: + case TCPS_ESTABLISHED: + tp->t_state = TCPS_CLOSE_WAIT; + break; + + /* + * If still in FIN_WAIT_1 STATE FIN has not been acked so + * enter the CLOSING state. + */ + case TCPS_FIN_WAIT_1: + tp->t_state = TCPS_CLOSING; + break; + + /* + * In FIN_WAIT_2 state enter the TIME_WAIT state, + * starting the time-wait timer, turning off the other + * standard timers. + */ + case TCPS_FIN_WAIT_2: + tp->t_state = TCPS_TIME_WAIT; + tcp_canceltimers(tp); + /* Shorten TIME_WAIT [RFC-1644, p.28] */ + if (tp->cc_recv != 0 && + tp->t_duration < TCPTV_MSL) { + tp->t_timer[TCPT_2MSL] = + tp->t_rxtcur * TCPTV_TWTRUNC; + /* For transaction client, force ACK now. */ + tp->t_flags |= TF_ACKNOW; + } + else + tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; + + add_to_time_wait(tp); + soisdisconnected(so); + break; + + /* + * In TIME_WAIT state restart the 2 MSL time_wait timer. + */ + case TCPS_TIME_WAIT: + tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL; + add_to_time_wait(tp); + break; + } + } +#if TCPDEBUG + if (so->so_options & SO_DEBUG) { +#if INET6 + if (isipv6) + tcp_saveip._tcp_si6.ip6_plen = tilen; + else + tcp_saveip._tcp_si4.ip_len = tilen; +#else /* INET6 */ + tcp_saveip.ip_len = tilen; +#endif /* INET6 */ + + tcp_trace(TA_INPUT, ostate, tp, (void *)&tcp_saveip, + &tcp_savetcp, 0); + } +#endif + + /* + * Return any desired output. + */ + if (needoutput || (tp->t_flags & TF_ACKNOW)) + (void) tcp_output(tp); + if (need_sorwakeup) + sorwakeup(so); + if (need_sowwakeup) + sowwakeup(so); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + return; + +dropafterack: + /* + * Generate an ACK dropping incoming segment if it occupies + * sequence space, where the ACK reflects our state. + * + * We can now skip the test for the RST flag since all + * paths to this code happen after packets containing + * RST have been dropped. + * + * In the SYN-RECEIVED state, don't send an ACK unless the + * segment we received passes the SYN-RECEIVED ACK test. + * If it fails send a RST. This breaks the loop in the + * "LAND" DoS attack, and also prevents an ACK storm + * between two listening ports that have been sent forged + * SYN segments, each with the source address of the other. + */ + if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && + (SEQ_GT(tp->snd_una, th->th_ack) || + SEQ_GT(th->th_ack, tp->snd_max)) ) + goto dropwithreset; +#if TCPDEBUG + if (so->so_options & SO_DEBUG) { +#if INET6 + if (isipv6) + tcp_saveip._tcp_si6.ip6_plen = tilen; + else + tcp_saveip._tcp_si4.ip_len = tilen; +#else /* INET6 */ + tcp_saveip.ip_len = tilen; +#endif /* INET6 */ + tcp_trace(TA_DROP, ostate, tp, (void *)&tcp_saveip, + &tcp_savetcp, 0); + } +#endif + m_freem(m); + tp->t_flags |= TF_ACKNOW; + (void) tcp_output(tp); + if (need_sorwakeup) + sorwakeup(so); + if (need_sowwakeup) + sowwakeup(so); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + return; + +dropwithreset: + /* + * Generate a RST, dropping incoming segment. + * Make ACK acceptable to originator of segment. + * Don't bother to respond if destination was broadcast/multicast. + */ + if ((thflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) + goto drop; +#if INET6 + if (isipv6) { + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) + goto drop; /* anycast check is done at the top */ + } else +#endif /* INET6 */ + if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) + goto drop; +#if TCPDEBUG + if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) { + if (tp == 0) { +#if INET6 + if (isipv6) + tcp_saveip._tcp_si6 = *ip6; + else + tcp_saveip._tcp_si4 = *ip; +#else /* INET6 */ + tcp_saveip = *ip; +#endif /* INET6 */ + } +#if INET6 + if (isipv6) + tcp_saveip._tcp_si6.ip6_plen = tilen; + else + tcp_saveip._tcp_si4.ip_len = tilen; +#else /* INET6 */ + tcp_saveip.ip_len = tilen; +#endif /* INET6 */ + tcp_trace(TA_DROP, ostate, tp, (void *)&tcp_saveip, + &tcp_savetcp, 0); + } +#endif + if (thflags & TH_ACK) +#if INET6 + tcp_respond(tp, isipv6 ? (void *)ip6 : (void *)ip, th, m, + (tcp_seq)0, th->th_ack, TH_RST, isipv6); +#else /* INET6 */ + tcp_respond(tp, (void *)ip, th, m, + (tcp_seq)0, th->th_ack, TH_RST, isipv6); +#endif /* INET6 */ + else { + if (thflags & TH_SYN) + tilen++; +#if INET6 + tcp_respond(tp, isipv6 ? (void *)ip6 : (void *)ip, th, m, + th->th_seq+tilen, (tcp_seq)0, TH_RST|TH_ACK, + isipv6); +#else /* INET6 */ + tcp_respond(tp, (void *)ip, th, m, + th->th_seq+tilen, (tcp_seq)0, TH_RST|TH_ACK, + isipv6); +#endif /* INET6 */ + } + /* destroy temporarily created socket */ + if (need_sorwakeup) + sorwakeup(so); + if (need_sowwakeup) + sowwakeup(so); + if (dropsocket) + (void) soabort(so); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + return; + +drop: + /* + * Drop space held by incoming segment and return. + */ +#if TCPDEBUG + if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) { + if (tp == 0) { +#if INET6 + if (isipv6) + tcp_saveip._tcp_si6 = *ip6; + else + tcp_saveip._tcp_si4 = *ip; +#else /* INET6 */ + tcp_saveip = *ip; +#endif /* INET6 */ + } +#if INET6 + if (isipv6) + tcp_saveip._tcp_si6.ip6_plen = tilen; + else + tcp_saveip._tcp_si4.ip_len = tilen; +#else /* INET6 */ + tcp_saveip.ip_len = tilen; +#endif /* INET6 */ + tcp_trace(TA_DROP, ostate, tp, (void *)&tcp_saveip, + &tcp_savetcp, 0); + } +#endif + m_freem(m); + if (need_sorwakeup) + sorwakeup(so); + if (need_sowwakeup) + sowwakeup(so); + /* destroy temporarily created socket */ + if (dropsocket) + (void) soabort(so); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + return; +} + +static void +tcp_dooptions(tp, cp, cnt, th, to) + struct tcpcb *tp; + u_char *cp; + int cnt; + struct tcphdr *th; + struct tcpopt *to; +{ + u_short mss = 0; + int opt, optlen; + + for (; cnt > 0; cnt -= optlen, cp += optlen) { + opt = cp[0]; + if (opt == TCPOPT_EOL) + break; + if (opt == TCPOPT_NOP) + optlen = 1; + else { + optlen = cp[1]; + if (optlen <= 0) + break; + } + switch (opt) { + + default: + continue; + + case TCPOPT_MAXSEG: + if (optlen != TCPOLEN_MAXSEG) + continue; + if (!(th->th_flags & TH_SYN)) + continue; + bcopy((char *) cp + 2, (char *) &mss, sizeof(mss)); + to->to_maxseg = ntohs(mss); + break; + + case TCPOPT_WINDOW: + if (optlen != TCPOLEN_WINDOW) + continue; + if (!(th->th_flags & TH_SYN)) + continue; + tp->t_flags |= TF_RCVD_SCALE; + tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); + break; + + case TCPOPT_TIMESTAMP: + if (optlen != TCPOLEN_TIMESTAMP) + continue; + to->to_flag |= TOF_TS; + bcopy((char *)cp + 2, + (char *)&to->to_tsval, sizeof(to->to_tsval)); + NTOHL(to->to_tsval); + bcopy((char *)cp + 6, + (char *)&to->to_tsecr, sizeof(to->to_tsecr)); + NTOHL(to->to_tsecr); + + /* + * A timestamp received in a SYN makes + * it ok to send timestamp requests and replies. + */ + if (th->th_flags & TH_SYN) { + tp->t_flags |= TF_RCVD_TSTMP; + tp->ts_recent = to->to_tsval; + tp->ts_recent_age = tcp_now; + } + break; + case TCPOPT_CC: + if (optlen != TCPOLEN_CC) + continue; + to->to_flag |= TOF_CC; + bcopy((char *)cp + 2, + (char *)&to->to_cc, sizeof(to->to_cc)); + NTOHL(to->to_cc); + /* + * A CC or CC.new option received in a SYN makes + * it ok to send CC in subsequent segments. + */ + if (th->th_flags & TH_SYN) + tp->t_flags |= TF_RCVD_CC; + break; + case TCPOPT_CCNEW: + if (optlen != TCPOLEN_CC) + continue; + if (!(th->th_flags & TH_SYN)) + continue; + to->to_flag |= TOF_CCNEW; + bcopy((char *)cp + 2, + (char *)&to->to_cc, sizeof(to->to_cc)); + NTOHL(to->to_cc); + /* + * A CC or CC.new option received in a SYN makes + * it ok to send CC in subsequent segments. + */ + tp->t_flags |= TF_RCVD_CC; + break; + case TCPOPT_CCECHO: + if (optlen != TCPOLEN_CC) + continue; + if (!(th->th_flags & TH_SYN)) + continue; + to->to_flag |= TOF_CCECHO; + bcopy((char *)cp + 2, + (char *)&to->to_ccecho, sizeof(to->to_ccecho)); + NTOHL(to->to_ccecho); + break; + } + } +} + +/* + * Pull out of band byte out of a segment so + * it doesn't appear in the user's data queue. + * It is still reflected in the segment length for + * sequencing purposes. + */ +static void +tcp_pulloutofband(so, th, m) + struct socket *so; + struct tcphdr *th; + register struct mbuf *m; +{ + int cnt = th->th_urp - 1; + + while (cnt >= 0) { + if (m->m_len > cnt) { + char *cp = mtod(m, caddr_t) + cnt; + struct tcpcb *tp = sototcpcb(so); + + tp->t_iobc = *cp; + tp->t_oobflags |= TCPOOB_HAVEDATA; + bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); + m->m_len--; + return; + } + cnt -= m->m_len; + m = m->m_next; + if (m == 0) + break; + } + panic("tcp_pulloutofband"); +} + +/* + * Collect new round-trip time estimate + * and update averages and current timeout. + */ +static void +tcp_xmit_timer(tp, rtt) + register struct tcpcb *tp; + short rtt; +{ + register int delta; + + tcpstat.tcps_rttupdated++; + tp->t_rttupdated++; + if (tp->t_srtt != 0) { + /* + * srtt is stored as fixed point with 5 bits after the + * binary point (i.e., scaled by 8). The following magic + * is equivalent to the smoothing algorithm in rfc793 with + * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed + * point). Adjust rtt to origin 0. + */ + delta = ((rtt - 1) << TCP_DELTA_SHIFT) + - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); + + if ((tp->t_srtt += delta) <= 0) + tp->t_srtt = 1; + + /* + * We accumulate a smoothed rtt variance (actually, a + * smoothed mean difference), then set the retransmit + * timer to smoothed rtt + 4 times the smoothed variance. + * rttvar is stored as fixed point with 4 bits after the + * binary point (scaled by 16). The following is + * equivalent to rfc793 smoothing with an alpha of .75 + * (rttvar = rttvar*3/4 + |delta| / 4). This replaces + * rfc793's wired-in beta. + */ + if (delta < 0) + delta = -delta; + delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); + if ((tp->t_rttvar += delta) <= 0) + tp->t_rttvar = 1; + } else { + /* + * No rtt measurement yet - use the unsmoothed rtt. + * Set the variance to half the rtt (so our first + * retransmit happens at 3*rtt). + */ + tp->t_srtt = rtt << TCP_RTT_SHIFT; + tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); + } + tp->t_rtt = 0; + tp->t_rxtshift = 0; + + /* + * the retransmit should happen at rtt + 4 * rttvar. + * Because of the way we do the smoothing, srtt and rttvar + * will each average +1/2 tick of bias. When we compute + * the retransmit timer, we want 1/2 tick of rounding and + * 1 extra tick because of +-1/2 tick uncertainty in the + * firing of the timer. The bias will give us exactly the + * 1.5 tick we need. But, because the bias is + * statistical, we have to test that we don't drop below + * the minimum feasible timer (which is 2 ticks). + */ + TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), + max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); + + /* + * We received an ack for a packet that wasn't retransmitted; + * it is probably safe to discard any error indications we've + * received recently. This isn't quite right, but close enough + * for now (a route might have failed after we sent a segment, + * and the return path might not be symmetrical). + */ + tp->t_softerror = 0; +} + +/* + * Determine a reasonable value for maxseg size. + * If the route is known, check route for mtu. + * If none, use an mss that can be handled on the outgoing + * interface without forcing IP to fragment; if bigger than + * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES + * to utilize large mbufs. If no route is found, route has no mtu, + * or the destination isn't local, use a default, hopefully conservative + * size (usually 512 or the default IP max size, but no more than the mtu + * of the interface), as we can't discover anything about intervening + * gateways or networks. We also initialize the congestion/slow start + * window to be a single segment if the destination isn't local. + * While looking at the routing entry, we also initialize other path-dependent + * parameters from pre-set or cached values in the routing entry. + * + * Also take into account the space needed for options that we + * send regularly. Make maxseg shorter by that amount to assure + * that we can send maxseg amount of data even when the options + * are present. Store the upper limit of the length of options plus + * data in maxopd. + * + * NOTE that this routine is only called when we process an incoming + * segment, for outgoing segments only tcp_mssopt is called. + * + * In case of T/TCP, we call this routine during implicit connection + * setup as well (offer = -1), to initialize maxseg from the cached + * MSS of our peer. + */ +void +tcp_mss(tp, offer, isipv6) + struct tcpcb *tp; + int offer; +#if INET6 + int isipv6; +#endif +{ + register struct rtentry *rt; + struct ifnet *ifp; + register int rtt, mss; + u_long bufsize; + struct inpcb *inp; + struct socket *so; + struct rmxp_tao *taop; + int origoffer = offer; +#if INET6 + int lgminh = isipv6 ? sizeof (struct tcpip6hdr) : + sizeof (struct tcpiphdr); +#else /* INET6 */ +#define lgminh (sizeof (struct tcpiphdr)) +#endif /* INET6 */ + + inp = tp->t_inpcb; +#if INET6 + if (isipv6) + rt = tcp_rtlookup6(inp); + else +#endif /* INET6 */ + rt = tcp_rtlookup(inp); + if (rt == NULL) { + tp->t_maxopd = tp->t_maxseg = +#if INET6 + isipv6 ? tcp_v6mssdflt : +#endif /* INET6 */ + tcp_mssdflt; + return; + } + ifp = rt->rt_ifp; + so = inp->inp_socket; + + taop = rmx_taop(rt->rt_rmx); + /* + * Offer == -1 means that we didn't receive SYN yet, + * use cached value in that case; + */ + if (offer == -1) + offer = taop->tao_mssopt; + /* + * Offer == 0 means that there was no MSS on the SYN segment, + * in this case we use tcp_mssdflt. + */ + if (offer == 0) + offer = +#if INET6 + isipv6 ? tcp_v6mssdflt : +#endif /* INET6 */ + tcp_mssdflt; + else + /* + * Sanity check: make sure that maxopd will be large + * enough to allow some data on segments even is the + * all the option space is used (40bytes). Otherwise + * funny things may happen in tcp_output. + */ + offer = max(offer, 64); + taop->tao_mssopt = offer; + + /* + * While we're here, check if there's an initial rtt + * or rttvar. Convert from the route-table units + * to scaled multiples of the slow timeout timer. + */ + if (tp->t_srtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { + /* + * XXX the lock bit for RTT indicates that the value + * is also a minimum value; this is subject to time. + */ + if (rt->rt_rmx.rmx_locks & RTV_RTT) + tp->t_rttmin = rtt / (RTM_RTTUNIT / PR_SLOWHZ); + tp->t_srtt = rtt / (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE)); + tcpstat.tcps_usedrtt++; + if (rt->rt_rmx.rmx_rttvar) { + tp->t_rttvar = rt->rt_rmx.rmx_rttvar / + (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE)); + tcpstat.tcps_usedrttvar++; + } else { + /* default variation is +- 1 rtt */ + tp->t_rttvar = + tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; + } + TCPT_RANGESET(tp->t_rxtcur, + ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, + tp->t_rttmin, TCPTV_REXMTMAX); + } + /* + * if there's an mtu associated with the route, use it + * else, use the link mtu. + */ + if (rt->rt_rmx.rmx_mtu) + mss = rt->rt_rmx.rmx_mtu - lgminh; + else + mss = +#if INET6 + isipv6 ? nd_ifinfo[rt->rt_ifp->if_index].linkmtu : +#endif + ifp->if_mtu - lgminh; + + if (rt->rt_rmx.rmx_mtu == 0) { +#if INET6 + if (isipv6) { + if (!in6_localaddr(&inp->in6p_faddr)) + mss = min(mss, tcp_v6mssdflt); + } else +#endif /* INET6 */ + if (!in_localaddr(inp->inp_faddr)) + mss = min(mss, tcp_mssdflt); + } + mss = min(mss, offer); + /* + * maxopd stores the maximum length of data AND options + * in a segment; maxseg is the amount of data in a normal + * segment. We need to store this value (maxopd) apart + * from maxseg, because now every segment carries options + * and thus we normally have somewhat less data in segments. + */ + tp->t_maxopd = mss; + + /* + * In case of T/TCP, origoffer==-1 indicates, that no segments + * were received yet. In this case we just guess, otherwise + * we do the same as before T/TCP. + */ + if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && + (origoffer == -1 || + (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) + mss -= TCPOLEN_TSTAMP_APPA; + if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && + (origoffer == -1 || + (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC)) + mss -= TCPOLEN_CC_APPA; + +#if (MCLBYTES & (MCLBYTES - 1)) == 0 + if (mss > MCLBYTES) + mss &= ~(MCLBYTES-1); +#else + if (mss > MCLBYTES) + mss = mss / MCLBYTES * MCLBYTES; +#endif + /* + * If there's a pipesize, change the socket buffer + * to that size. Make the socket buffers an integral + * number of mss units; if the mss is larger than + * the socket buffer, decrease the mss. + */ +#if RTV_SPIPE + if ((bufsize = rt->rt_rmx.rmx_sendpipe) == 0) +#endif + bufsize = so->so_snd.sb_hiwat; + if (bufsize < mss) + mss = bufsize; + else { + bufsize = roundup(bufsize, mss); + if (bufsize > sb_max) + bufsize = sb_max; + (void)sbreserve(&so->so_snd, bufsize); + } + tp->t_maxseg = mss; + +#if RTV_RPIPE + if ((bufsize = rt->rt_rmx.rmx_recvpipe) == 0) +#endif + bufsize = so->so_rcv.sb_hiwat; + if (bufsize > mss) { + bufsize = roundup(bufsize, mss); + if (bufsize > sb_max) + bufsize = sb_max; + (void)sbreserve(&so->so_rcv, bufsize); + } + /* + * Don't force slow-start on local network. + */ +#if INET6 + if (isipv6) { + if (!in6_localaddr(&inp->in6p_faddr)) + tp->snd_cwnd = mss; + } else +#endif /* INET6 */ + if (!in_localaddr(inp->inp_faddr)) + tp->snd_cwnd = mss; + + if (rt->rt_rmx.rmx_ssthresh) { + /* + * There's some sort of gateway or interface + * buffer limit on the path. Use this to set + * the slow start threshhold, but set the + * threshold to no less than 2*mss. + */ + tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh); + tcpstat.tcps_usedssthresh++; + } +} + +/* + * Determine the MSS option to send on an outgoing SYN. + */ +int +tcp_mssopt(tp, isipv6) + struct tcpcb *tp; +#if INET6 + int isipv6; +#endif +{ + struct rtentry *rt; + int mss; +#if INET6 + int lgminh = isipv6 ? sizeof (struct tcpip6hdr) : + sizeof (struct tcpiphdr); +#else /* INET6 */ +#define lgminh (sizeof (struct tcpiphdr)) +#endif /* INET6 */ + +#if INET6 + if (isipv6) + rt = tcp_rtlookup6(tp->t_inpcb); + else +#endif /* INET6 */ + rt = tcp_rtlookup(tp->t_inpcb); + if (rt == NULL) + return +#if INET6 + isipv6 ? tcp_v6mssdflt : +#endif /* INET6 */ + tcp_mssdflt; + + mss = rt->rt_ifp->if_mtu - lgminh; + + return mss; +} diff --git a/bsd/netinet/tcp_output.c b/bsd/netinet/tcp_output.c new file mode 100644 index 000000000..14535b6bb --- /dev/null +++ b/bsd/netinet/tcp_output.c @@ -0,0 +1,934 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95 + */ + +#if ISFB31 +#include "opt_tcpdebug.h" +#endif +#define _IP_VHL + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#if INET6 +#include +#include +#include +#endif +#include +#include +#define TCPOUTFLAGS +#include +#include +#include +#include +#include +#if TCPDEBUG +#include +#endif +#include + +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 3) +#define DBG_FNC_TCP_OUTPUT NETDBG_CODE(DBG_NETTCP, (4 << 8) | 1) + + +#ifdef notyet +extern struct mbuf *m_copypack(); +#endif + + +/* + * Tcp output routine: figure out what should be sent and send it. + */ +int +tcp_output(tp) + register struct tcpcb *tp; +{ + register struct socket *so = tp->t_inpcb->inp_socket; + register long len, win; + int off, flags, error; + register struct mbuf *m; + struct ip *ip = NULL; + struct ipovly *ipov = NULL; +#if INET6 + struct ip6_hdr *ip6 = NULL; +#endif /* INET6 */ + struct tcphdr *th; + u_char opt[TCP_MAXOLEN]; + unsigned ipoptlen, optlen, hdrlen; + int idle, sendalot; + struct rmxp_tao *taop; + struct rmxp_tao tao_noncached; +#if INET6 + int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; +#endif + + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_LAYER_BEG, + ((tp->t_template->th_dport << 16) | tp->t_template->th_sport), + (((tp->t_template->th_src.s_addr & 0xffff) << 16) | + (tp->t_template->th_dst.s_addr & 0xffff)), + 0,0,0); + + /* + * Determine length of data that should be transmitted, + * and flags that will be used. + * If there is some data or critical controls (SYN, RST) + * to send, then transmit; otherwise, investigate further. + */ + idle = (tp->snd_max == tp->snd_una); + if (idle && tp->t_idle >= tp->t_rxtcur) + /* + * We have been idle for "a while" and no acks are + * expected to clock out any data we send -- + * slow start to get ack "clock" running again. + */ + tp->snd_cwnd = tp->t_maxseg; +again: + sendalot = 0; + off = tp->snd_nxt - tp->snd_una; + win = min(tp->snd_wnd, tp->snd_cwnd); + + flags = tcp_outflags[tp->t_state]; + /* + * Get standard flags, and add SYN or FIN if requested by 'hidden' + * state flags. + */ + if (tp->t_flags & TF_NEEDFIN) + flags |= TH_FIN; + if (tp->t_flags & TF_NEEDSYN) + flags |= TH_SYN; + + /* + * If in persist timeout with window of 0, send 1 byte. + * Otherwise, if window is small but nonzero + * and timer expired, we will send what we can + * and go to transmit state. + */ + if (tp->t_force) { + if (win == 0) { + /* + * If we still have some data to send, then + * clear the FIN bit. Usually this would + * happen below when it realizes that we + * aren't sending all the data. However, + * if we have exactly 1 byte of unsent data, + * then it won't clear the FIN bit below, + * and if we are in persist state, we wind + * up sending the packet without recording + * that we sent the FIN bit. + * + * We can't just blindly clear the FIN bit, + * because if we don't have any more data + * to send then the probe will be the FIN + * itself. + */ + if (off < so->so_snd.sb_cc) + flags &= ~TH_FIN; + win = 1; + } else { + tp->t_timer[TCPT_PERSIST] = 0; + tp->t_rxtshift = 0; + } + } + + len = (long)ulmin(so->so_snd.sb_cc, win) - off; + + if ((taop = tcp_gettaocache(tp->t_inpcb)) == NULL) { + taop = &tao_noncached; + bzero(taop, sizeof(*taop)); + } + + /* + * Lop off SYN bit if it has already been sent. However, if this + * is SYN-SENT state and if segment contains data and if we don't + * know that foreign host supports TAO, suppress sending segment. + */ + if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) { + flags &= ~TH_SYN; + off--, len++; + if (len > 0 && tp->t_state == TCPS_SYN_SENT && + taop->tao_ccsent == 0) { + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + return 0; + } + } + + /* + * Be careful not to send data and/or FIN on SYN segments + * in cases when no CC option will be sent. + * This measure is needed to prevent interoperability problems + * with not fully conformant TCP implementations. + */ + if ((flags & TH_SYN) && + ((tp->t_flags & TF_NOOPT) || !(tp->t_flags & TF_REQ_CC) || + ((flags & TH_ACK) && !(tp->t_flags & TF_RCVD_CC)))) { + len = 0; + flags &= ~TH_FIN; + } + + if (len < 0) { + /* + * If FIN has been sent but not acked, + * but we haven't been called to retransmit, + * len will be -1. Otherwise, window shrank + * after we sent into it. If window shrank to 0, + * cancel pending retransmit, pull snd_nxt back + * to (closed) window, and set the persist timer + * if it isn't already going. If the window didn't + * close completely, just wait for an ACK. + */ + len = 0; + if (win == 0) { + tp->t_timer[TCPT_REXMT] = 0; + tp->t_rxtshift = 0; + tp->snd_nxt = tp->snd_una; + if (tp->t_timer[TCPT_PERSIST] == 0) + tcp_setpersist(tp); + } + } + if (len > tp->t_maxseg) { + len = tp->t_maxseg; + sendalot = 1; + } + if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc)) + flags &= ~TH_FIN; + + win = sbspace(&so->so_rcv); + + /* + * Sender silly window avoidance. If connection is idle + * and can send all data, a maximum segment, + * at least a maximum default-size segment do it, + * or are forced, do it; otherwise don't bother. + * If peer's buffer is tiny, then send + * when window is at least half open. + * If retransmitting (possibly after persist timer forced us + * to send into a small window), then must resend. + */ + if (len) { + if (len == tp->t_maxseg) + goto send; + if (!(tp->t_flags & TF_MORETOCOME) && + (idle || tp->t_flags & TF_NODELAY) && + (tp->t_flags & TF_NOPUSH) == 0 && + len + off >= so->so_snd.sb_cc) + goto send; + if (tp->t_force) + goto send; + if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0) + goto send; + if (SEQ_LT(tp->snd_nxt, tp->snd_max)) + goto send; + } + + /* + * Compare available window to amount of window + * known to peer (as advertised window less + * next expected input). If the difference is at least two + * max size segments, or at least 50% of the maximum possible + * window, then want to send a window update to peer. + */ + if (win > 0) { + /* + * "adv" is the amount we can increase the window, + * taking into account that we are limited by + * TCP_MAXWIN << tp->rcv_scale. + */ + long adv = min(win, (long)TCP_MAXWIN << tp->rcv_scale) - + (tp->rcv_adv - tp->rcv_nxt); + + if (adv >= (long) (2 * tp->t_maxseg)) + goto send; + if (2 * adv >= (long) so->so_rcv.sb_hiwat) + goto send; + } + + /* + * Send if we owe peer an ACK. + */ + if (tp->t_flags & TF_ACKNOW) + goto send; + if ((flags & TH_RST) || + ((flags & TH_SYN) && (tp->t_flags & TF_NEEDSYN) == 0)) + goto send; + if (SEQ_GT(tp->snd_up, tp->snd_una)) + goto send; + /* + * If our state indicates that FIN should be sent + * and we have not yet done so, or we're retransmitting the FIN, + * then we need to send. + */ + if (flags & TH_FIN && + ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una)) + goto send; + + /* + * TCP window updates are not reliable, rather a polling protocol + * using ``persist'' packets is used to insure receipt of window + * updates. The three ``states'' for the output side are: + * idle not doing retransmits or persists + * persisting to move a small or zero window + * (re)transmitting and thereby not persisting + * + * tp->t_timer[TCPT_PERSIST] + * is set when we are in persist state. + * tp->t_force + * is set when we are called to send a persist packet. + * tp->t_timer[TCPT_REXMT] + * is set when we are retransmitting + * The output side is idle when both timers are zero. + * + * If send window is too small, there is data to transmit, and no + * retransmit or persist is pending, then go to persist state. + * If nothing happens soon, send when timer expires: + * if window is nonzero, transmit what we can, + * otherwise force out a byte. + */ + if (so->so_snd.sb_cc && tp->t_timer[TCPT_REXMT] == 0 && + tp->t_timer[TCPT_PERSIST] == 0) { + tp->t_rxtshift = 0; + tcp_setpersist(tp); + } + + /* + * No reason to send a segment, just return. + */ + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + return (0); + +send: + /* + * Before ESTABLISHED, force sending of initial options + * unless TCP set not to do any options. + * NOTE: we assume that the IP/TCP header plus TCP options + * always fit in a single mbuf, leaving room for a maximum + * link header, i.e. + * max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MHLEN + */ + optlen = 0; +#if INET6 + if (isipv6) + hdrlen = sizeof (struct tcpip6hdr); + else +#endif + hdrlen = sizeof (struct tcpiphdr); + if (flags & TH_SYN) { + tp->snd_nxt = tp->iss; + if ((tp->t_flags & TF_NOOPT) == 0) { + u_short mss; + + opt[0] = TCPOPT_MAXSEG; + opt[1] = TCPOLEN_MAXSEG; + mss = htons((u_short) tcp_mssopt(tp, isipv6)); + (void)memcpy(opt + 2, &mss, sizeof(mss)); + optlen = TCPOLEN_MAXSEG; + + if ((tp->t_flags & TF_REQ_SCALE) && + ((flags & TH_ACK) == 0 || + (tp->t_flags & TF_RCVD_SCALE))) { + *((u_int32_t *)(opt + optlen)) = htonl( + TCPOPT_NOP << 24 | + TCPOPT_WINDOW << 16 | + TCPOLEN_WINDOW << 8 | + tp->request_r_scale); + optlen += 4; + } + } + } + + /* + * Send a timestamp and echo-reply if this is a SYN and our side + * wants to use timestamps (TF_REQ_TSTMP is set) or both our side + * and our peer have sent timestamps in our SYN's. + */ + if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && + (flags & TH_RST) == 0 && + ((flags & TH_ACK) == 0 || + (tp->t_flags & TF_RCVD_TSTMP))) { + u_int32_t *lp = (u_int32_t *)(opt + optlen); + + /* Form timestamp option as shown in appendix A of RFC 1323. */ + *lp++ = htonl(TCPOPT_TSTAMP_HDR); + *lp++ = htonl(tcp_now); + *lp = htonl(tp->ts_recent); + optlen += TCPOLEN_TSTAMP_APPA; + } + + /* + * Send `CC-family' options if our side wants to use them (TF_REQ_CC), + * options are allowed (!TF_NOOPT) and it's not a RST. + */ + if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && + (flags & TH_RST) == 0) { + switch (flags & (TH_SYN|TH_ACK)) { + /* + * This is a normal ACK, send CC if we received CC before + * from our peer. + */ + case TH_ACK: + if (!(tp->t_flags & TF_RCVD_CC)) + break; + /*FALLTHROUGH*/ + + /* + * We can only get here in T/TCP's SYN_SENT* state, when + * we're a sending a non-SYN segment without waiting for + * the ACK of our SYN. A check above assures that we only + * do this if our peer understands T/TCP. + */ + case 0: + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = TCPOPT_CC; + opt[optlen++] = TCPOLEN_CC; + *(u_int32_t *)&opt[optlen] = htonl(tp->cc_send); + + optlen += 4; + break; + + /* + * This is our initial SYN, check whether we have to use + * CC or CC.new. + */ + case TH_SYN: + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = tp->t_flags & TF_SENDCCNEW ? + TCPOPT_CCNEW : TCPOPT_CC; + opt[optlen++] = TCPOLEN_CC; + *(u_int32_t *)&opt[optlen] = htonl(tp->cc_send); + optlen += 4; + break; + + /* + * This is a SYN,ACK; send CC and CC.echo if we received + * CC from our peer. + */ + case (TH_SYN|TH_ACK): + if (tp->t_flags & TF_RCVD_CC) { + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = TCPOPT_CC; + opt[optlen++] = TCPOLEN_CC; + *(u_int32_t *)&opt[optlen] = + htonl(tp->cc_send); + optlen += 4; + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = TCPOPT_NOP; + opt[optlen++] = TCPOPT_CCECHO; + opt[optlen++] = TCPOLEN_CC; + *(u_int32_t *)&opt[optlen] = + htonl(tp->cc_recv); + optlen += 4; + } + break; + } + } + + hdrlen += optlen; +#if INET6 + if (isipv6) + ipoptlen = ip6_optlen(tp->t_inpcb); + else +#endif + if (tp->t_inpcb->inp_options) { + ipoptlen = tp->t_inpcb->inp_options->m_len - + offsetof(struct ipoption, ipopt_list); + } else { + ipoptlen = 0; + } +#if IPSEC +#if INET6 + ipoptlen += ipsec_hdrsiz_tcp(tp, isipv6); +#else + ipoptlen += ipsec_hdrsiz_tcp(tp, 0); +#endif +#endif + + /* + * Adjust data length if insertion of options will + * bump the packet length beyond the t_maxopd length. + * Clear the FIN bit because we cut off the tail of + * the segment. + */ + if (len + optlen + ipoptlen > tp->t_maxopd) { + /* + * If there is still more to send, don't close the connection. + */ + flags &= ~TH_FIN; + len = tp->t_maxopd - optlen - ipoptlen; + sendalot = 1; + } + +/*#ifdef DIAGNOSTIC*/ + if (max_linkhdr + hdrlen > MHLEN) + panic("tcphdr too big"); +/*#endif*/ + + /* + * Grab a header mbuf, attaching a copy of data to + * be transmitted, and initialize the header from + * the template for sends on this connection. + */ + if (len) { + if (tp->t_force && len == 1) + tcpstat.tcps_sndprobe++; + else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) { + tcpstat.tcps_sndrexmitpack++; + tcpstat.tcps_sndrexmitbyte += len; + } else { + tcpstat.tcps_sndpack++; + tcpstat.tcps_sndbyte += len; + } +#ifdef notyet + if ((m = m_copypack(so->so_snd.sb_mb, off, + (int)len, max_linkhdr + hdrlen)) == 0) { + error = ENOBUFS; + goto out; + } + /* + * m_copypack left space for our hdr; use it. + */ + m->m_len += hdrlen; + m->m_data -= hdrlen; +#else + MGETHDR(m, M_DONTWAIT, MT_HEADER); + if (m == NULL) { + error = ENOBUFS; + goto out; + } +#if INET6 + if (MHLEN < hdrlen + max_linkhdr) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_freem(m); + error = ENOBUFS; + goto out; + } + } +#endif + m->m_data += max_linkhdr; + m->m_len = hdrlen; + if (len <= MHLEN - hdrlen - max_linkhdr) { + m_copydata(so->so_snd.sb_mb, off, (int) len, + mtod(m, caddr_t) + hdrlen); + m->m_len += len; + } else { + m->m_next = m_copy(so->so_snd.sb_mb, off, (int) len); + if (m->m_next == 0) { + (void) m_free(m); + error = ENOBUFS; + goto out; + } + } +#endif + /* + * If we're sending everything we've got, set PUSH. + * (This will keep happy those implementations which only + * give data to the user when a buffer fills or + * a PUSH comes in.) + */ + if (off + len == so->so_snd.sb_cc) + flags |= TH_PUSH; + } else { + if (tp->t_flags & TF_ACKNOW) + tcpstat.tcps_sndacks++; + else if (flags & (TH_SYN|TH_FIN|TH_RST)) + tcpstat.tcps_sndctrl++; + else if (SEQ_GT(tp->snd_up, tp->snd_una)) + tcpstat.tcps_sndurg++; + else + tcpstat.tcps_sndwinup++; + + MGETHDR(m, M_DONTWAIT, MT_HEADER); + if (m == NULL) { + error = ENOBUFS; + goto out; + } +#if INET6 + if (isipv6) { + MH_ALIGN(m, hdrlen); + } else +#endif + m->m_data += max_linkhdr; + m->m_len = hdrlen; + } + m->m_pkthdr.rcvif = (struct ifnet *)0; + if (tp->t_template == 0) + panic("tcp_output"); +#if INET6 + if (isipv6) { + ip6 = mtod(m, struct ip6_hdr *); + th = (struct tcphdr *)(ip6 + 1); + bcopy((caddr_t)&tp->t_template->tt_i6, (caddr_t)ip6, + sizeof(struct ip6_hdr)); + bcopy((caddr_t)&tp->t_template->tt_t, (caddr_t)th, + sizeof(struct tcphdr)); + } else { +#endif /* INET6 */ + ip = mtod(m, struct ip *); + ipov = (struct ipovly *)ip; + th = (struct tcphdr *)(ip + 1); + bcopy((caddr_t)&tp->t_template->tt_i, (caddr_t)ip, sizeof(struct ip)); + bcopy((caddr_t)&tp->t_template->tt_t, (caddr_t)th, + sizeof(struct tcphdr)); +#if INET6 + } +#endif /* INET6 */ + + /* + * Fill in fields, remembering maximum advertised + * window for use in delaying messages about window sizes. + * If resending a FIN, be sure not to use a new sequence number. + */ + if (flags & TH_FIN && tp->t_flags & TF_SENTFIN && + tp->snd_nxt == tp->snd_max) + tp->snd_nxt--; + /* + * If we are doing retransmissions, then snd_nxt will + * not reflect the first unsent octet. For ACK only + * packets, we do not want the sequence number of the + * retransmitted packet, we want the sequence number + * of the next unsent octet. So, if there is no data + * (and no SYN or FIN), use snd_max instead of snd_nxt + * when filling in ti_seq. But if we are in persist + * state, snd_max might reflect one byte beyond the + * right edge of the window, so use snd_nxt in that + * case, since we know we aren't doing a retransmission. + * (retransmit and persist are mutually exclusive...) + */ + if (len || (flags & (TH_SYN|TH_FIN)) || tp->t_timer[TCPT_PERSIST]) + th->th_seq = htonl(tp->snd_nxt); + else + th->th_seq = htonl(tp->snd_max); + th->th_ack = htonl(tp->rcv_nxt); + if (optlen) { + bcopy(opt, th + 1, optlen); + th->th_off = (sizeof (struct tcphdr) + optlen) >> 2; + } + th->th_flags = flags; + /* + * Calculate receive window. Don't shrink window, + * but avoid silly window syndrome. + */ + if (win < (long)(so->so_rcv.sb_hiwat / 4) && win < (long)tp->t_maxseg) + win = 0; + if (win < (long)(tp->rcv_adv - tp->rcv_nxt)) + win = (long)(tp->rcv_adv - tp->rcv_nxt); + if (win > (long)TCP_MAXWIN << tp->rcv_scale) + win = (long)TCP_MAXWIN << tp->rcv_scale; + th->th_win = htons((u_short) (win>>tp->rcv_scale)); + if (SEQ_GT(tp->snd_up, tp->snd_nxt)) { + th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt)); + th->th_flags |= TH_URG; + } else + /* + * If no urgent pointer to send, then we pull + * the urgent pointer to the left edge of the send window + * so that it doesn't drift into the send window on sequence + * number wraparound. + */ + tp->snd_up = tp->snd_una; /* drag it along */ + + /* + * Put TCP length in extended header, and then + * checksum extended header and data. + */ + m->m_pkthdr.len = hdrlen + len; +#if INET6 + if (isipv6) { +#if 0 /* ip6_plen will be filled in ip6_output. */ + ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + + optlen + len)); +#endif + + th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), + sizeof(struct tcphdr) + optlen + len); + } else { +#endif /* INET6 */ + if (len + optlen) + ipov->ih_len = htons((u_short)(sizeof (struct tcphdr) + + optlen + len)); + th->th_sum = in_cksum(m, (int)(hdrlen + len)); +#if INET6 + } +#endif /* INET6 */ + + /* + * In transmit state, time the transmission and arrange for + * the retransmit. In persist state, just set snd_max. + */ + if (tp->t_force == 0 || tp->t_timer[TCPT_PERSIST] == 0) { + tcp_seq startseq = tp->snd_nxt; + + /* + * Advance snd_nxt over sequence space of this segment. + */ + if (flags & (TH_SYN|TH_FIN)) { + if (flags & TH_SYN) + tp->snd_nxt++; + if (flags & TH_FIN) { + tp->snd_nxt++; + tp->t_flags |= TF_SENTFIN; + } + } + tp->snd_nxt += len; + if (SEQ_GT(tp->snd_nxt, tp->snd_max)) { + tp->snd_max = tp->snd_nxt; + /* + * Time this transmission if not a retransmission and + * not currently timing anything. + */ + if (tp->t_rtt == 0) { + tp->t_rtt = 1; + tp->t_rtseq = startseq; + tcpstat.tcps_segstimed++; + } + } + + /* + * Set retransmit timer if not currently set, + * and not doing an ack or a keep-alive probe. + * Initial value for retransmit timer is smoothed + * round-trip time + 2 * round-trip time variance. + * Initialize shift counter which is used for backoff + * of retransmit time. + */ + if (tp->t_timer[TCPT_REXMT] == 0 && + tp->snd_nxt != tp->snd_una) { + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + if (tp->t_timer[TCPT_PERSIST]) { + tp->t_timer[TCPT_PERSIST] = 0; + tp->t_rxtshift = 0; + } + } + } else + if (SEQ_GT(tp->snd_nxt + len, tp->snd_max)) + tp->snd_max = tp->snd_nxt + len; + +#if TCPDEBUG + /* + * Trace. + */ + if (so->so_options & SO_DEBUG) { +#if INET6 + if (isipv6) + ip6->ip6_vfc = IPV6_VERSION; + else + ip->ip_vhl = IP_MAKE_VHL(IPVERSION, + IP_VHL_HL(ip->ip_vhl)); +#endif /* INET6 */ + tcp_trace(TA_OUTPUT, tp->t_state, tp, +#if INET6 + isipv6 ? (void *)ip6 : +#endif /* INET6 */ + ip, + th, 0); + + } +#endif /* TCPDEBUG */ + + /* + * Fill in IP length and desired time to live and + * send to IP level. There should be a better way + * to handle ttl and tos; we could keep them in + * the template, but need a way to checksum without them. + */ +#if INET6 + if (isipv6) { + /* + * we separately set hoplimit for every segment, since the + * user might want to change the value via setsockopt. + * Also, desired default hop limit might be changed via + * Neighbor Discovery. + */ + ip6->ip6_hlim = in6_selecthlim(tp->t_inpcb, + tp->t_inpcb->in6p_route.ro_rt ? + tp->t_inpcb->in6p_route.ro_rt->rt_ifp + : NULL); + + /* TODO: IPv6 IP6TOS_ECT bit on */ +#if IPSEC + ipsec_setsocket(m, so); +#endif /*IPSEC*/ + error = ip6_output(m, + tp->t_inpcb->in6p_outputopts, + &tp->t_inpcb->in6p_route, + (so->so_options & SO_DONTROUTE) /* | IP6_DONTFRAG */, + NULL, NULL); + } else +#endif /* INET6 */ + { +#if 1 + struct rtentry *rt; +#endif + ip->ip_len = m->m_pkthdr.len; +#if INET6 + if (INP_CHECK_SOCKAF(so, AF_INET6)) + ip->ip_ttl = in6_selecthlim(tp->t_inpcb, + tp->t_inpcb->in6p_route.ro_rt ? + tp->t_inpcb->in6p_route.ro_rt->rt_ifp + : NULL); + else +#endif /* INET6 */ + ip->ip_ttl = tp->t_inpcb->inp_ip_ttl; /* XXX */ + ip->ip_tos = tp->t_inpcb->inp_ip_tos; /* XXX */ + + KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), + (((th->th_src.s_addr & 0xffff) << 16) | (th->th_dst.s_addr & 0xffff)), + th->th_seq, th->th_ack, th->th_win); + + +#if 1 + /* + * See if we should do MTU discovery. We do it only if the following + * are true: + * 1) we have a valid route to the destination + * 2) the MTU is not locked (if it is, then discovery has been + * disabled) + */ + if ((rt = tp->t_inpcb->inp_route.ro_rt) + && rt->rt_flags & RTF_UP + && !(rt->rt_rmx.rmx_locks & RTV_MTU)) { + ip->ip_off |= IP_DF; + } +#endif + +#if IPSEC + ipsec_setsocket(m, so); +#endif /*IPSEC*/ + + error = ip_output(m, tp->t_inpcb->inp_options, &tp->t_inpcb->inp_route, + so->so_options & SO_DONTROUTE, 0); + } + if (error) { +out: + if (error == ENOBUFS) { + tcp_quench(tp->t_inpcb, 0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + return (0); + } +#if 1 + if (error == EMSGSIZE) { + /* + * ip_output() will have already fixed the route + * for us. tcp_mtudisc() will, as its last action, + * initiate retransmission, so it is important to + * not do so here. + */ + tcp_mtudisc(tp->t_inpcb, 0); + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + return 0; + } +#endif + if ((error == EHOSTUNREACH || error == ENETDOWN) + && TCPS_HAVERCVDSYN(tp->t_state)) { + tp->t_softerror = error; + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + return (0); + } + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + return (error); + } + tcpstat.tcps_sndtotal++; + + /* + * Data sent (as far as we can tell). + * If this advertises a larger window than any other segment, + * then remember the size of the advertised window. + * Any pending ACK has now been sent. + */ + if (win > 0 && SEQ_GT(tp->rcv_nxt+win, tp->rcv_adv)) + tp->rcv_adv = tp->rcv_nxt + win; + tp->last_ack_sent = tp->rcv_nxt; + tp->t_flags &= ~(TF_ACKNOW|TF_DELACK); + if (sendalot) + goto again; + KERNEL_DEBUG(DBG_FNC_TCP_OUTPUT | DBG_FUNC_END, 0,0,0,0,0); + return (0); +} + +void +tcp_setpersist(tp) + register struct tcpcb *tp; +{ + register int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1; + + if (tp->t_timer[TCPT_REXMT]) + panic("tcp_output REXMT"); + /* + * Start/restart persistance timer. + */ + TCPT_RANGESET(tp->t_timer[TCPT_PERSIST], + t * tcp_backoff[tp->t_rxtshift], + TCPTV_PERSMIN, TCPTV_PERSMAX); + if (tp->t_rxtshift < TCP_MAXRXTSHIFT) + tp->t_rxtshift++; +} diff --git a/bsd/netinet/tcp_seq.h b/bsd/netinet/tcp_seq.h new file mode 100644 index 000000000..032cff920 --- /dev/null +++ b/bsd/netinet/tcp_seq.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_seq.h 8.3 (Berkeley) 6/21/95 + */ + +#ifndef _NETINET_TCP_SEQ_H_ +#define _NETINET_TCP_SEQ_H_ +/* + * TCP sequence numbers are 32 bit integers operated + * on with modular arithmetic. These macros can be + * used to compare such integers. + */ +#define SEQ_LT(a,b) ((int)((a)-(b)) < 0) +#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0) +#define SEQ_GT(a,b) ((int)((a)-(b)) > 0) +#define SEQ_GEQ(a,b) ((int)((a)-(b)) >= 0) + +/* for modulo comparisons of timestamps */ +#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0) +#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0) + +/* + * TCP connection counts are 32 bit integers operated + * on with modular arithmetic. These macros can be + * used to compare such integers. + */ +#define CC_LT(a,b) ((int)((a)-(b)) < 0) +#define CC_LEQ(a,b) ((int)((a)-(b)) <= 0) +#define CC_GT(a,b) ((int)((a)-(b)) > 0) +#define CC_GEQ(a,b) ((int)((a)-(b)) >= 0) + +/* Macro to increment a CC: skip 0 which has a special meaning */ +#define CC_INC(c) (++(c) == 0 ? ++(c) : (c)) + +/* + * Macros to initialize tcp sequence numbers for + * send and receive from initial send and receive + * sequence numbers. + */ +#define tcp_rcvseqinit(tp) \ + (tp)->rcv_adv = (tp)->rcv_nxt = (tp)->irs + 1 + +#define tcp_sendseqinit(tp) \ + (tp)->snd_una = (tp)->snd_nxt = (tp)->snd_max = (tp)->snd_up = \ + (tp)->iss + +#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ) + /* timestamp wrap-around time */ + +#ifdef KERNEL +extern tcp_cc tcp_ccgen; /* global connection count */ + +/* + * Increment for tcp_iss each second. + * This is designed to increment at the standard 250 KB/s, + * but with a random component averaging 128 KB. + * We also increment tcp_iss by a quarter of this amount + * each time we use the value for a new connection. + * If defined, the tcp_random18() macro should produce a + * number in the range [0-0x3ffff] that is hard to predict. + */ +#ifndef tcp_random18 +#define tcp_random18() ((random() >> 14) & 0x3ffff) +#endif +#define TCP_ISSINCR (122*1024 + tcp_random18()) + +extern tcp_seq tcp_iss; /* tcp initial send seq # */ +#else +#define TCP_ISSINCR (250*1024) /* increment for tcp_iss each second */ +#endif /* KERNEL */ +#endif /* _NETINET_TCP_SEQ_H_ */ diff --git a/bsd/netinet/tcp_subr.c b/bsd/netinet/tcp_subr.c new file mode 100644 index 000000000..45ae7ffed --- /dev/null +++ b/bsd/netinet/tcp_subr.c @@ -0,0 +1,1261 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 + */ + +#if ISFB31 +#include "opt_compat.h" +#include "opt_tcpdebug.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#if ISFB31 +#include +#endif + +#include +#include + +#define _IP_VHL +#include +#include +#include +#include +#include +#include +#if INET6 +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#if TCPDEBUG +#include +#endif +#include + +#if IPSEC +#include +#endif /*IPSEC*/ + +#include + +#define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2)) + + +int tcp_mssdflt = TCP_MSS; +SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, + CTLFLAG_RW, &tcp_mssdflt , 0, ""); + +int tcp_v6mssdflt = TCP6_MSS; +SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, + CTLFLAG_RW, &tcp_v6mssdflt , 0, ""); + +static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; +SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, + CTLFLAG_RW, &tcp_rttdflt , 0, ""); + +static int tcp_do_rfc1323 = 1; +SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, + CTLFLAG_RW, &tcp_do_rfc1323 , 0, ""); + +static int tcp_do_rfc1644 = 0; +SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, + CTLFLAG_RW, &tcp_do_rfc1644 , 0, ""); + +SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, &tcbinfo.ipi_count, + 0, "Number of active PCBs"); + +static void tcp_cleartaocache __P((void)); +static void tcp_notify __P((struct inpcb *, int)); +extern u_long current_active_connections; + + + + +/* + * Target size of TCP PCB hash tables. Must be a power of two. + * + * Note that this can be overridden by the kernel environment + * variable net.inet.tcp.tcbhashsize + */ +#ifndef TCBHASHSIZE +#define TCBHASHSIZE 4096 +#endif + +/* + * This is the actual shape of what we allocate using the zone + * allocator. Doing it this way allows us to protect both structures + * using the same generation count, and also eliminates the overhead + * of allocating tcpcbs separately. By hiding the structure here, + * we avoid changing most of the rest of the code (although it needs + * to be changed, eventually, for greater efficiency). + */ +#define ALIGNMENT 32 +#define ALIGNM1 (ALIGNMENT - 1) +struct inp_tp { + union { + struct inpcb inp; + char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; + } inp_tp_u; + struct tcpcb tcb; +}; +#undef ALIGNMENT +#undef ALIGNM1 + +static struct tcpcb dummy_tcb; + + +extern struct inpcbhead time_wait_slots[]; +extern int cur_tw_slot; +extern u_long *delack_bitmask; + + +int get_inpcb_str_size() +{ + return sizeof(struct inpcb); +} + + +int get_tcp_str_size() +{ + return sizeof(struct tcpcb); +} + +int tcp_freeq __P((struct tcpcb *tp)); + + +/* + * Tcp initialization + */ +void +tcp_init() +{ + int hashsize; + vm_size_t str_size; + int i; + + tcp_iss = random(); /* wrong, but better than a constant */ + tcp_ccgen = 1; + tcp_cleartaocache(); + LIST_INIT(&tcb); + tcbinfo.listhead = &tcb; + if (!(getenv_int("net.inet.tcp.tcbhashsize", &hashsize))) + hashsize = TCBHASHSIZE; + if (!powerof2(hashsize)) { + printf("WARNING: TCB hash size not a power of 2\n"); + hashsize = 512; /* safe default */ + } + tcbinfo.hashsize = hashsize; + tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); + tcbinfo.porthashbase = hashinit(hashsize, M_PCB, + &tcbinfo.porthashmask); +#if ISFB31 + tcbinfo.ipi_zone = (void *) zinit("tcpcb", sizeof(struct inp_tp), maxsockets, + ZONE_INTERRUPT, 0); +#else + str_size = (vm_size_t) sizeof(struct inp_tp); + tcbinfo.ipi_zone = (void *) zinit(str_size, 120000*str_size, 8192, "inpcb_zone"); +#endif +#if INET6 +#define TCP_LGHDR (sizeof(struct tcpip6hdr)) +#else /* INET6 */ +#define TCP_LGHDR (sizeof(struct tcpiphdr)) +#endif /* INET6 */ + if (max_protohdr < TCP_LGHDR) + max_protohdr = TCP_LGHDR; + if ((max_linkhdr + TCP_LGHDR) > MHLEN) + panic("tcp_init"); + + tcbinfo.last_pcb = 0; + dummy_tcb.t_state = TCP_NSTATES; + dummy_tcb.t_flags = 0; + tcbinfo.dummy_cb = (caddr_t) &dummy_tcb; + in_pcb_nat_init(&tcbinfo, AF_INET, IPPROTO_TCP, SOCK_STREAM); + + delack_bitmask = _MALLOC((4 * hashsize)/32, M_PCB, M_NOWAIT); + if (delack_bitmask == 0) + panic("Delack Memory"); + + for (i=0; i < (tcbinfo.hashsize / 32); i++) + delack_bitmask[i] = 0; + + for (i=0; i < N_TIME_WAIT_SLOTS; i++) { + LIST_INIT(&time_wait_slots[i]); + } +#undef TCP_LGHDR +} + +/* + * Create template to be used to send tcp packets on a connection. + * Call after host entry created, allocates an mbuf and fills + * in a skeletal tcp/ip header, minimizing the amount of work + * necessary when the connection is used. + */ +struct tcptemp * +tcp_template(tp) + struct tcpcb *tp; +{ + register struct inpcb *inp = tp->t_inpcb; + register struct mbuf *m; + register struct tcptemp *n; + + if ((n = tp->t_template) == 0) { + m = m_get(M_DONTWAIT, MT_HEADER); + if (m == NULL) + return (0); + m->m_len = sizeof (struct tcptemp); + n = mtod(m, struct tcptemp *); + } + bzero(n->tt_x1, sizeof(n->tt_x1)); + n->tt_pr = IPPROTO_TCP; + n->tt_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip)); + n->tt_src = inp->inp_laddr; + n->tt_dst = inp->inp_faddr; + n->tt_sport = inp->inp_lport; + n->tt_dport = inp->inp_fport; + n->tt_seq = 0; + n->tt_ack = 0; + n->tt_x2 = 0; + n->tt_off = 5; + n->tt_flags = 0; + n->tt_win = 0; + n->tt_sum = 0; + n->tt_urp = 0; +#if INET6 + n->tt_flow = inp->inp_flow & IPV6_FLOWINFO_MASK; + if (ip6_auto_flowlabel) { + n->tt_flow &= ~IPV6_FLOWLABEL_MASK; + n->tt_flow |= (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK); + } + n->tt_vfc |= IPV6_VERSION; + n->tt_pr6 = IPPROTO_TCP; + n->tt_len6 = n->tt_len; + n->tt_src6 = inp->in6p_laddr; + n->tt_dst6 = inp->in6p_faddr; +#endif /* INET6 */ + return (n); +} + +/* + * Send a single message to the TCP at address specified by + * the given TCP/IP header. If m == 0, then we make a copy + * of the tcpiphdr at ti and send directly to the addressed host. + * This is used to force keep alive messages out using the TCP + * template for a connection tp->t_template. If flags are given + * then we send a message back to the TCP which originated the + * segment ti, and discard the mbuf containing it and any other + * attached mbufs. + * + * In any case the ack and sequence number of the transmitted + * segment are as specified by the parameters. + * + * NOTE: If m != NULL, then ti must point to *inside* the mbuf. + */ +void +tcp_respond(tp, iph, th, m, ack, seq, flags, isipv6) + struct tcpcb *tp; + void *iph; + register struct tcphdr *th; + register struct mbuf *m; + tcp_seq ack, seq; + int flags; +#if INET6 + int isipv6; +#endif +{ + register int tlen; + int win = 0; + struct route *ro = 0; + struct route sro; + struct ip *ip = iph; + struct tcpiphdr *ti = iph; + struct tcphdr *nth; +#if INET6 + struct route_in6 *ro6 = 0; + struct route_in6 sro6; + struct ip6_hdr *ip6 = iph; + struct tcpip6hdr *ti6 = iph; +#endif /* INET6 */ + + if (tp) { + if (!(flags & TH_RST)) + win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); +#if INET6 + if (isipv6) + ro6 = &tp->t_inpcb->in6p_route; + else +#endif /* INET6 */ + ro = &tp->t_inpcb->inp_route; + } else { +#if INET6 + if (isipv6) { + ro6 = &sro6; + bzero(ro6, sizeof *ro6); + } else { +#endif /* INET6 */ + ro = &sro; + bzero(ro, sizeof *ro); +#if INET6 + } +#endif /* INET6 */ + } + if (m == 0) { + m = m_gethdr(M_DONTWAIT, MT_HEADER); + if (m == NULL) + return; +#if TCP_COMPAT_42 + tlen = 1; +#else + tlen = 0; +#endif + m->m_data += max_linkhdr; +#if INET6 + if (isipv6) { + ti6 = mtod(m, struct tcpip6hdr *); + bcopy((caddr_t)ip6, (caddr_t)&ti6->ti6_i, + sizeof(struct ip6_hdr)); + ip6 = &ti6->ti6_i; + nth = &ti6->ti6_t; + } else { +#endif /* INET6 */ + ti = mtod(m, struct tcpiphdr *); + bcopy((caddr_t)ip, (caddr_t)&ti->ti_i, sizeof(struct ip)); + ip = (struct ip *)&ti->ti_i; + nth = &ti->ti_t; +#if INET6 + } +#endif /* INET6 */ + bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); + flags = TH_ACK; + } else { + m_freem(m->m_next); + m->m_next = 0; + m->m_data = (caddr_t)ti; + /* m_len is set later */ + tlen = 0; +#define xchg(a,b,type) { type t; t=a; a=b; b=t; } +#if INET6 + if (isipv6) { + struct in6_addr t; + + t = ip6->ip6_dst; + ip6->ip6_dst = ip6->ip6_src; + ip6->ip6_src = t; + nth = (struct tcphdr *)(ip6 + 1); + if (th != nth) { + /* + * this is the case if an extension header + * exists between the IPv6 header and the + * TCP header. + */ + nth->th_sport = th->th_sport; + nth->th_dport = th->th_dport; + } + } else { +#endif /* INET6 */ + xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, n_long); + nth = th; +#if INET6 + } +#endif /* INET6 */ + xchg(nth->th_dport, nth->th_sport, n_short); +#undef xchg + } + nth->th_seq = htonl(seq); + nth->th_ack = htonl(ack); + nth->th_x2 = 0; + nth->th_off = sizeof (struct tcphdr) >> 2; + nth->th_flags = flags; + if (tp) + nth->th_win = htons((u_short) (win >> tp->rcv_scale)); + else + nth->th_win = htons((u_short)win); + nth->th_urp = 0; + nth->th_sum = 0; + tlen += sizeof (struct tcphdr); +#if INET6 + if (isipv6) { + m->m_len = tlen + sizeof(struct ip6_hdr); + m->m_pkthdr.len = tlen + sizeof(struct ip6_hdr); + m->m_pkthdr.rcvif = (struct ifnet *) 0; + ip6->ip6_plen = htons((u_short)tlen); + ip6->ip6_nxt = IPPROTO_TCP; + ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, + ro6 && ro6->ro_rt ? + ro6->ro_rt->rt_ifp : + NULL); + nth->th_sum = in6_cksum(m, IPPROTO_TCP, + sizeof(struct ip6_hdr), tlen); + ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK; + if (ip6_auto_flowlabel) { + ip6->ip6_flow |= + (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK); + } + } else { +#endif /* INET6 */ + ti->ti_len = htons((u_short)(tlen)); + m->m_len = tlen + sizeof(struct ip); + m->m_pkthdr.len = tlen + sizeof(struct ip); + m->m_pkthdr.rcvif = (struct ifnet *) 0; + bzero(ti->ti_x1, sizeof(ti->ti_x1)); + nth->th_sum = in_cksum(m, tlen + sizeof(struct ip)); + ip->ip_len = tlen + sizeof (struct ip); + ip->ip_ttl = ip_defttl; +#if INET6 + } +#endif /* INET6 */ +#if TCPDEBUG + if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) + tcp_trace(TA_OUTPUT, 0, tp, +#if INET6 + isipv6 ? (void *)ip6 : +#endif /* INET6 */ + ip, + nth, 0); +#endif +#if IPSEC + ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL); +#endif /*IPSEC*/ +#if INET6 + if (isipv6) { + (void)ip6_output(m, NULL, ro6, 0, NULL, NULL); + if (ro6 == &sro6 && ro6->ro_rt) + RTFREE(ro6->ro_rt); + } else { +#endif /* INET6 */ + (void)ip_output(m, NULL, ro, 0, NULL); + if (ro == &sro && ro->ro_rt) { + RTFREE(ro->ro_rt); + } +#if INET6 + } +#endif /* INET6 */ +} + +/* + * Create a new TCP control block, making an + * empty reassembly queue and hooking it to the argument + * protocol control block. The `inp' parameter must have + * come from the zone allocator set up in tcp_init(). + */ +struct tcpcb * +tcp_newtcpcb(inp) + struct inpcb *inp; +{ + struct inp_tp *it; + register struct tcpcb *tp; + register struct socket *so = inp->inp_socket; +#if INET6 + int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; +#endif /* INET6 */ + + + if (so->cached_in_sock_layer == 0) { + it = (struct inp_tp *)inp; + tp = &it->tcb; + } + else + tp = (struct tcpcb *) inp->inp_saved_ppcb; + + bzero((char *) tp, sizeof(struct tcpcb)); + tp->segq.lh_first = NULL; + tp->t_maxseg = tp->t_maxopd = +#if INET6 + isipv6 ? tcp_v6mssdflt : +#endif /* INET6 */ + tcp_mssdflt; + + + if (tcp_do_rfc1323) + tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); + if (tcp_do_rfc1644) + tp->t_flags |= TF_REQ_CC; + tp->t_inpcb = inp; /* XXX */ + /* + * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no + * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives + * reasonable initial retransmit time. + */ + tp->t_srtt = TCPTV_SRTTBASE; + tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; + tp->t_rttmin = TCPTV_MIN; + tp->t_rxtcur = TCPTV_RTOBASE; + tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; + tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; + /* + * IPv4 TTL initialization is necessary for an IPv6 socket as well, + * because the socket may be bound to an IPv6 wildcard address, + * which may match an IPv4-mapped IPv6 address. + * XXX: is there a better approach? + */ + inp->inp_ip_ttl = ip_defttl; + inp->inp_ppcb = (caddr_t)tp; + return (tp); /* XXX */ +} + +/* + * Drop a TCP connection, reporting + * the specified error. If connection is synchronized, + * then send a RST to peer. + */ +struct tcpcb * +tcp_drop(tp, errno) + register struct tcpcb *tp; + int errno; +{ + struct socket *so = tp->t_inpcb->inp_socket; + + switch (tp->t_state) + { + case TCPS_ESTABLISHED: + case TCPS_FIN_WAIT_1: + case TCPS_CLOSING: + case TCPS_CLOSE_WAIT: + case TCPS_LAST_ACK: + current_active_connections--; + break; + } + + if (TCPS_HAVERCVDSYN(tp->t_state)) { + tp->t_state = TCPS_CLOSED; + (void) tcp_output(tp); + tcpstat.tcps_drops++; + } else + tcpstat.tcps_conndrops++; + if (errno == ETIMEDOUT && tp->t_softerror) + errno = tp->t_softerror; + so->so_error = errno; + return (tcp_close(tp)); +} + +/* + * Close a TCP control block: + * discard all space held by the tcp + * discard internet protocol block + * wake up any sleepers + */ +struct tcpcb * +tcp_close(tp) + register struct tcpcb *tp; +{ + register struct mbuf *q; + register struct mbuf *nq; + struct inpcb *inp = tp->t_inpcb; + struct socket *so = inp->inp_socket; +#if INET6 + int isipv6 = INP_CHECK_SOCKAF(so, AF_INET6); +#endif /* INET6 */ + register struct rtentry *rt; + int dosavessthresh; + + + KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp,0,0,0,0); + switch (tp->t_state) + { + case TCPS_ESTABLISHED: + case TCPS_FIN_WAIT_1: + case TCPS_CLOSING: + case TCPS_CLOSE_WAIT: + case TCPS_LAST_ACK: + current_active_connections--; + break; + } + + + /* + * If we got enough samples through the srtt filter, + * save the rtt and rttvar in the routing entry. + * 'Enough' is arbitrarily defined as the 16 samples. + * 16 samples is enough for the srtt filter to converge + * to within 5% of the correct value; fewer samples and + * we could save a very bogus rtt. + * + * Don't update the default route's characteristics and don't + * update anything that the user "locked". + */ + if (tp->t_rttupdated >= 16) { + register u_long i = 0; +#if INET6 + if (isipv6) { + struct sockaddr_in6 *sin6; + + if ((rt = inp->in6p_route.ro_rt) == NULL) + goto no_valid_rt; + sin6 = (struct sockaddr_in6 *)rt_key(rt); + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) + goto no_valid_rt; + } + else +#endif /* INET6 */ + if ((rt = inp->inp_route.ro_rt) == NULL || + ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr + == INADDR_ANY) + goto no_valid_rt; + + if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { + i = tp->t_srtt * + (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTT_SCALE)); + if (rt->rt_rmx.rmx_rtt && i) + /* + * filter this update to half the old & half + * the new values, converting scale. + * See route.h and tcp_var.h for a + * description of the scaling constants. + */ + rt->rt_rmx.rmx_rtt = + (rt->rt_rmx.rmx_rtt + i) / 2; + else + rt->rt_rmx.rmx_rtt = i; + tcpstat.tcps_cachedrtt++; + } + if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { + i = tp->t_rttvar * + (RTM_RTTUNIT / (PR_SLOWHZ * TCP_RTTVAR_SCALE)); + if (rt->rt_rmx.rmx_rttvar && i) + rt->rt_rmx.rmx_rttvar = + (rt->rt_rmx.rmx_rttvar + i) / 2; + else + rt->rt_rmx.rmx_rttvar = i; + tcpstat.tcps_cachedrttvar++; + } + /* + * The old comment here said: + * update the pipelimit (ssthresh) if it has been updated + * already or if a pipesize was specified & the threshhold + * got below half the pipesize. I.e., wait for bad news + * before we start updating, then update on both good + * and bad news. + * + * But we want to save the ssthresh even if no pipesize is + * specified explicitly in the route, because such + * connections still have an implicit pipesize specified + * by the global tcp_sendspace. In the absence of a reliable + * way to calculate the pipesize, it will have to do. + */ + i = tp->snd_ssthresh; + if (rt->rt_rmx.rmx_sendpipe != 0) + dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); + else + dosavessthresh = (i < so->so_snd.sb_hiwat / 2); + if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && + i != 0 && rt->rt_rmx.rmx_ssthresh != 0) + || dosavessthresh) { + /* + * convert the limit from user data bytes to + * packets then to packet data bytes. + */ + i = (i + tp->t_maxseg / 2) / tp->t_maxseg; + if (i < 2) + i = 2; + i *= (u_long)(tp->t_maxseg + +#if INET6 + isipv6 ? sizeof (struct tcpip6hdr) : +#endif /* INET6 */ + sizeof (struct tcpiphdr)); + if (rt->rt_rmx.rmx_ssthresh) + rt->rt_rmx.rmx_ssthresh = + (rt->rt_rmx.rmx_ssthresh + i) / 2; + else + rt->rt_rmx.rmx_ssthresh = i; + tcpstat.tcps_cachedssthresh++; + } + } + no_valid_rt: + /* free the reassembly queue, if any */ + (void) tcp_freeq(tp); + + if (tp->t_template) + (void) m_free(dtom(tp->t_template)); + + if (so->cached_in_sock_layer) + inp->inp_saved_ppcb = (caddr_t) tp; + + inp->inp_ppcb = NULL; + soisdisconnected(so); +#if INET6 + if (isipv6) + in6_pcbdetach(inp); + else +#endif /* INET6 */ + in_pcbdetach(inp); + tcpstat.tcps_closed++; + KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed,0,0,0,0); + return ((struct tcpcb *)0); +} + +int +tcp_freeq(tp) + struct tcpcb *tp; +{ + register struct ipqent *qe; + int rv = 0; + + while ((qe = tp->segq.lh_first) != NULL) { + LIST_REMOVE(qe, ipqe_q); + m_freem(qe->ipqe_m); + FREE(qe, M_SONAME); + rv = 1; + } + return (rv); +} + +void +tcp_drain() +{ + +} + +/* + * Notify a tcp user of an asynchronous error; + * store error as soft error, but wake up user + * (for now, won't do anything until can select for soft error). + */ +static void +tcp_notify(inp, error) + struct inpcb *inp; + int error; +{ + register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; + register struct socket *so = inp->inp_socket; + + /* + * Ignore some errors if we are hooked up. + * If connection hasn't completed, has retransmitted several times, + * and receives a second error, give up now. This is better + * than waiting a long time to establish a connection that + * can never complete. + */ + if (tp->t_state == TCPS_ESTABLISHED && + (error == EHOSTUNREACH || error == ENETUNREACH || + error == EHOSTDOWN)) { + return; + } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && + tp->t_softerror) + so->so_error = error; + else + tp->t_softerror = error; + wakeup((caddr_t) &so->so_timeo); + sorwakeup(so); + sowwakeup(so); +} + + +static int +tcp_pcblist SYSCTL_HANDLER_ARGS +{ + int error, i, n, s; + struct inpcb *inp, **inp_list; + inp_gen_t gencnt; + struct xinpgen xig; + + /* + * The process of preparing the TCB list is too time-consuming and + * resource-intensive to repeat twice on every request. + */ + if (req->oldptr == 0) { + n = tcbinfo.ipi_count; + req->oldidx = 2 * (sizeof xig) + + (n + n/8) * sizeof(struct xtcpcb); + return 0; + } + + if (req->newptr != 0) + return EPERM; + + /* + * OK, now we're committed to doing something. + */ + s = splnet(); + gencnt = tcbinfo.ipi_gencnt; + n = tcbinfo.ipi_count; + splx(s); + + xig.xig_len = sizeof xig; + xig.xig_count = n; + xig.xig_gen = gencnt; + xig.xig_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xig, sizeof xig); + if (error) + return error; + + inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); + if (inp_list == 0) + return ENOMEM; + + s = splnet(); + for (inp = tcbinfo.listhead->lh_first, i = 0; inp && i < n; + inp = inp->inp_list.le_next) { + if (inp->inp_gencnt <= gencnt) + inp_list[i++] = inp; + } + splx(s); + n = i; + + error = 0; + for (i = 0; i < n; i++) { + inp = inp_list[i]; + if (inp->inp_gencnt <= gencnt) { + struct xtcpcb xt; + xt.xt_len = sizeof xt; + /* XXX should avoid extra copy */ + bcopy(inp, &xt.xt_inp, sizeof *inp); + bcopy(inp->inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); + if (inp->inp_socket) + sotoxsocket(inp->inp_socket, &xt.xt_socket); + error = SYSCTL_OUT(req, &xt, sizeof xt); + } + } + if (!error) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + s = splnet(); + xig.xig_gen = tcbinfo.ipi_gencnt; + xig.xig_sogen = so_gencnt; + xig.xig_count = tcbinfo.ipi_count; + splx(s); + error = SYSCTL_OUT(req, &xig, sizeof xig); + } + FREE(inp_list, M_TEMP); + return error; +} + + +SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, + tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); + +void +tcp_ctlinput(cmd, sa, vip) + int cmd; + struct sockaddr *sa; + void *vip; +{ + register struct ip *ip = vip; + register struct tcphdr *th; + void (*notify) __P((struct inpcb *, int)) = tcp_notify; + + if (cmd == PRC_QUENCH) + notify = tcp_quench; + else if (cmd == PRC_MSGSIZE) + notify = tcp_mtudisc; + else if (!PRC_IS_REDIRECT(cmd) && + ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)) + return; + if (ip) { + th = (struct tcphdr *)((caddr_t)ip + + (IP_VHL_HL(ip->ip_vhl) << 2)); + in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport, + cmd, notify); + } else + in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify); +} + +#if INET6 +void +tcp6_ctlinput(cmd, sa, d) + int cmd; + struct sockaddr *sa; + void *d; +{ + register struct tcphdr *thp; + struct tcphdr th; + void (*notify) __P((struct inpcb *, int)) = tcp_notify; + struct sockaddr_in6 sa6; + struct ip6_hdr *ip6; + struct mbuf *m; + int off = 0 ; + + if (sa->sa_family != AF_INET6 || + sa->sa_len != sizeof(struct sockaddr_in6)) + return; + + if (cmd == PRC_QUENCH) + notify = tcp_quench; + else if (cmd == PRC_MSGSIZE) + notify = tcp_mtudisc; + else if (!PRC_IS_REDIRECT(cmd) && + ((unsigned)cmd > PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) + return; + + /* if the parameter is from icmp6, decode it. */ + if (d != NULL) { + struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d; + m = ip6cp->ip6c_m; + ip6 = ip6cp->ip6c_ip6; + off = ip6cp->ip6c_off; + } else { + m = NULL; + ip6 = NULL; + } + + /* translate addresses into internal form */ + sa6 = *(struct sockaddr_in6 *)sa; + if (IN6_IS_ADDR_LINKLOCAL(&sa6.sin6_addr) && m && m->m_pkthdr.rcvif) + sa6.sin6_addr.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + + if (ip6) { + /* + * XXX: We assume that when IPV6 is non NULL, + * M and OFF are valid. + */ + struct in6_addr s; + + /* translate addresses into internal form */ + memcpy(&s, &ip6->ip6_src, sizeof(s)); + if (IN6_IS_ADDR_LINKLOCAL(&s)) + s.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + + + if (m->m_len < off + sizeof(*thp)) { + /* + * this should be rare case, + * so we compromise on this copy... + */ + m_copydata(m, off, sizeof(th), (caddr_t)&th); + thp = &th; + } else + thp = (struct tcphdr *)(mtod(m, caddr_t) + off); + in6_pcbnotify(&tcb, (struct sockaddr *)&sa6, thp->th_dport, + &s, thp->th_sport, cmd, notify); + } else + in6_pcbnotify(&tcb, (struct sockaddr *)&sa6, 0, &zeroin6_addr, + 0, cmd, notify); +} +#endif /* INET6 */ + +/* + * When a source quench is received, close congestion window + * to one segment. We will gradually open it again as we proceed. + */ +void +tcp_quench(inp, errno) + struct inpcb *inp; + int errno; +{ + struct tcpcb *tp = intotcpcb(inp); + + if (tp) + tp->snd_cwnd = tp->t_maxseg; +} + +/* + * When `need fragmentation' ICMP is received, update our idea of the MSS + * based on the new value in the route. Also nudge TCP to send something, + * since we know the packet we just sent was dropped. + * This duplicates some code in the tcp_mss() function in tcp_input.c. + */ +void +tcp_mtudisc(inp, errno) + struct inpcb *inp; + int errno; +{ + struct tcpcb *tp = intotcpcb(inp); + struct rtentry *rt; + struct rmxp_tao *taop; + struct socket *so = inp->inp_socket; + int offered; + int mss; +#if INET6 + int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; +#endif /* INET6 */ + + if (tp) { +#if INET6 + if (isipv6) + rt = tcp_rtlookup6(inp); + else +#endif /* INET6 */ + rt = tcp_rtlookup(inp); + if (!rt || !rt->rt_rmx.rmx_mtu) { + tp->t_maxopd = tp->t_maxseg = +#if INET6 + isipv6 ? tcp_v6mssdflt : +#endif /* INET6 */ + tcp_mssdflt; + return; + } + taop = rmx_taop(rt->rt_rmx); + offered = taop->tao_mssopt; + mss = rt->rt_rmx.rmx_mtu - +#if INET6 + (isipv6 ? + sizeof(struct tcpip6hdr) : +#endif /* INET6 */ + sizeof(struct tcpiphdr) +#if INET6 + ) +#endif /* INET6 */ + ; + + if (offered) + mss = min(mss, offered); + /* + * XXX - The above conditional probably violates the TCP + * spec. The problem is that, since we don't know the + * other end's MSS, we are supposed to use a conservative + * default. But, if we do that, then MTU discovery will + * never actually take place, because the conservative + * default is much less than the MTUs typically seen + * on the Internet today. For the moment, we'll sweep + * this under the carpet. + * + * The conservative default might not actually be a problem + * if the only case this occurs is when sending an initial + * SYN with options and data to a host we've never talked + * to before. Then, they will reply with an MSS value which + * will get recorded and the new parameters should get + * recomputed. For Further Study. + */ + if (tp->t_maxopd <= mss) + return; + tp->t_maxopd = mss; + + if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && + (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) + mss -= TCPOLEN_TSTAMP_APPA; + if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && + (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) + mss -= TCPOLEN_CC_APPA; +#if (MCLBYTES & (MCLBYTES - 1)) == 0 + if (mss > MCLBYTES) + mss &= ~(MCLBYTES-1); +#else + if (mss > MCLBYTES) + mss = mss / MCLBYTES * MCLBYTES; +#endif + if (so->so_snd.sb_hiwat < mss) + mss = so->so_snd.sb_hiwat; + + tp->t_maxseg = mss; + + tcpstat.tcps_mturesent++; + tp->t_rtt = 0; + tp->snd_nxt = tp->snd_una; + tcp_output(tp); + } +} + +/* + * Look-up the routing entry to the peer of this inpcb. If no route + * is found and it cannot be allocated the return NULL. This routine + * is called by TCP routines that access the rmx structure and by tcp_mss + * to get the interface MTU. + */ +struct rtentry * +tcp_rtlookup(inp) + struct inpcb *inp; +{ + struct route *ro; + struct rtentry *rt; + + ro = &inp->inp_route; + rt = ro->ro_rt; + if (rt == NULL || !(rt->rt_flags & RTF_UP)) { + /* No route yet, so try to acquire one */ + if (inp->inp_faddr.s_addr != INADDR_ANY) { + ro->ro_dst.sa_family = AF_INET; + ro->ro_dst.sa_len = sizeof(ro->ro_dst); + ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = + inp->inp_faddr; + rtalloc(ro); + rt = ro->ro_rt; + } + } + return rt; +} + +#if INET6 +struct rtentry * +tcp_rtlookup6(inp) + struct inpcb *inp; +{ + struct route_in6 *ro6; + struct rtentry *rt; + + ro6 = &inp->in6p_route; + rt = ro6->ro_rt; + if (rt == NULL || !(rt->rt_flags & RTF_UP)) { + /* No route yet, so try to acquire one */ + if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { + ro6->ro_dst.sin6_family = AF_INET6; + ro6->ro_dst.sin6_len = sizeof(ro6->ro_dst); + ro6->ro_dst.sin6_addr = inp->in6p_faddr; + rtalloc((struct route *)ro6); + rt = ro6->ro_rt; + } + } + return rt; +} +#endif /* INET6 */ + +#if IPSEC +/* compute ESP/AH header size for TCP, including outer IP header. */ +size_t +ipsec_hdrsiz_tcp(tp, isipv6) + struct tcpcb *tp; +#if INET6 + int isipv6; +#endif /* INET6 */ +{ + struct inpcb *inp; + struct mbuf *m; + size_t hdrsiz; + struct ip *ip; +#if INET6 + struct ip6_hdr *ip6 = NULL; +#endif /* INET6 */ + struct tcphdr *th; + + if (!tp || !tp->t_template || !(inp = tp->t_inpcb)) + return 0; + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (!m) + return 0; + +#if INET6 + if (isipv6) { + ip6 = mtod(m, struct ip6_hdr *); + th = (struct tcphdr *)(ip6 + 1); + m->m_pkthdr.len = m->m_len = sizeof(struct tcpip6hdr); + bcopy((caddr_t)&tp->t_template->tt_i6, (caddr_t)ip6, + sizeof(struct ip6_hdr)); + bcopy((caddr_t)&tp->t_template->tt_t, (caddr_t)th, + sizeof(struct tcphdr)); + } else { +#endif /* INET6 */ + ip = mtod(m, struct ip *); + th = (struct tcphdr *)(ip + 1); + m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); + bcopy((caddr_t)&tp->t_template->tt_i, (caddr_t)ip, sizeof(struct ip)); + bcopy((caddr_t)&tp->t_template->tt_t, (caddr_t)th, + sizeof(struct tcphdr)); +#if INET6 + } +#endif /* INET6 */ + +#if INET6 + if (isipv6) + hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); + else +#endif /* INET6 */ + hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); + + m_free(m); + return hdrsiz; +} +#endif /*IPSEC*/ + +/* + * Return a pointer to the cached information about the remote host. + * The cached information is stored in the protocol specific part of + * the route metrics. + */ +struct rmxp_tao * +tcp_gettaocache(inp) + struct inpcb *inp; +{ +#if INET6 + int isipv6 = (inp->inp_vflag & INP_IPV4) == 0; +#endif /* INET6 */ + struct rtentry *rt; + +#if INET6 + if (isipv6) + rt = tcp_rtlookup6(inp); + else +#endif /* INET6 */ + rt = tcp_rtlookup(inp); + + /* Make sure this is a host route and is up. */ + if (rt == NULL || + (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) + return NULL; + + return rmx_taop(rt->rt_rmx); +} + +/* + * Clear all the TAO cache entries, called from tcp_init. + * + * XXX + * This routine is just an empty one, because we assume that the routing + * routing tables are initialized at the same time when TCP, so there is + * nothing in the cache left over. + */ +static void +tcp_cleartaocache() +{ +} diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c new file mode 100644 index 000000000..3819a60cf --- /dev/null +++ b/bsd/netinet/tcp_timer.c @@ -0,0 +1,549 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95 + */ + +#if ISFB31 +#include "opt_compat.h" +#include "opt_tcpdebug.h" +#endif + +#include +#include +#include +#include +#include +#include +#include + +#include /* before tcp_seq.h, for tcp_random18() */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if TCPDEBUG +#include +#endif +#include + +#define DBG_FNC_TCP_FAST NETDBG_CODE(DBG_NETTCP, (5 << 8)) +#define DBG_FNC_TCP_SLOW NETDBG_CODE(DBG_NETTCP, (5 << 8) | 1) + + +int tcp_keepinit = TCPTV_KEEP_INIT; +SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, + CTLFLAG_RW, &tcp_keepinit , 0, ""); + +int tcp_keepidle = TCPTV_KEEP_IDLE; +SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, + CTLFLAG_RW, &tcp_keepidle , 0, ""); + +static int tcp_keepintvl = TCPTV_KEEPINTVL; +SYSCTL_INT(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, + CTLFLAG_RW, &tcp_keepintvl , 0, ""); + +static int always_keepalive = 0; +SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, + CTLFLAG_RW, &always_keepalive , 0, ""); + +static int tcp_keepcnt = TCPTV_KEEPCNT; + /* max idle probes */ +static int tcp_maxpersistidle = TCPTV_KEEP_IDLE; + /* max idle time in persist */ +int tcp_maxidle; + + + +struct inpcbhead time_wait_slots[N_TIME_WAIT_SLOTS]; +int cur_tw_slot = 0; + +u_long *delack_bitmask; +u_long current_active_connections = 0; +u_long last_active_conn_count = 0; + + +void add_to_time_wait(tp) + struct tcpcb *tp; +{ + int tw_slot; + + LIST_REMOVE(tp->t_inpcb, inp_list); + + if (tp->t_timer[TCPT_2MSL] == 0) + tp->t_timer[TCPT_2MSL] = 1; + + tp->t_idle += tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1); + tw_slot = (tp->t_timer[TCPT_2MSL] & (N_TIME_WAIT_SLOTS - 1)) + cur_tw_slot; + if (tw_slot >= N_TIME_WAIT_SLOTS) + tw_slot -= N_TIME_WAIT_SLOTS; + + LIST_INSERT_HEAD(&time_wait_slots[tw_slot], tp->t_inpcb, inp_list); +} + + + + + +/* + * Fast timeout routine for processing delayed acks + */ +void +tcp_fasttimo() +{ + register struct inpcb *inp; + register struct tcpcb *tp; + + + register u_long i,j; + register u_long temp_mask; + register u_long elem_base = 0; + struct inpcbhead *head; + int s = splnet(); + + static + int delack_checked = 0; + + KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_START, 0,0,0,0,0); + + if (!tcp_delack_enabled) + return; + + if ((current_active_connections > DELACK_BITMASK_THRESH) && + (last_active_conn_count > DELACK_BITMASK_THRESH)) { + for (i=0; i < (tcbinfo.hashsize / 32); i++) { + if (delack_bitmask[i]) { + temp_mask = 1; + for (j=0; j < 32; j++) { + if (temp_mask & delack_bitmask[i]) { + head = &tcbinfo.hashbase[elem_base + j]; + for (inp=head->lh_first; inp != 0; inp = inp->inp_hash.le_next) { + delack_checked++; + if ((tp = (struct tcpcb *)inp->inp_ppcb) && (tp->t_flags & TF_DELACK)) { + tp->t_flags &= ~TF_DELACK; + tp->t_flags |= TF_ACKNOW; + tcpstat.tcps_delack++; + (void) tcp_output(tp); + } + } + } + temp_mask <<= 1; + } + delack_bitmask[i] = 0; + } + elem_base += 32; + } + } + else + { + for (inp = tcb.lh_first; inp != NULL; inp = inp->inp_list.le_next) { + if ((tp = (struct tcpcb *)inp->inp_ppcb) && + (tp->t_flags & TF_DELACK)) { + tp->t_flags &= ~TF_DELACK; + tp->t_flags |= TF_ACKNOW; + tcpstat.tcps_delack++; + (void) tcp_output(tp); + } + } + } + + last_active_conn_count = current_active_connections; + KERNEL_DEBUG(DBG_FNC_TCP_FAST | DBG_FUNC_END, delack_checked,tcpstat.tcps_delack,0,0,0); + splx(s); + +} + +/* + * Tcp protocol timeout routine called every 500 ms. + * Updates the timers in all active tcb's and + * causes finite state machine actions if timers expire. + */ +void +tcp_slowtimo() +{ + register struct inpcb *ip, *ipnxt; + register struct tcpcb *tp; + register int i; + int s; +#if TCPDEBUG + int ostate; +#endif +#if KDEBUG + static int tws_checked; +#endif + + KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_START, 0,0,0,0,0); + s = splnet(); + + tcp_maxidle = tcp_keepcnt * tcp_keepintvl; + + ip = tcb.lh_first; + if (ip == NULL) { + splx(s); + return; + } + /* + * Search through tcb's and update active timers. + */ + for (; ip != NULL; ip = ipnxt) { + ipnxt = ip->inp_list.le_next; + tp = intotcpcb(ip); + if (tp == 0 || tp->t_state == TCPS_LISTEN) + continue; + for (i = 0; i < TCPT_NTIMERS; i++) { + if (tp->t_timer[i] && --tp->t_timer[i] == 0) { +#if TCPDEBUG + ostate = tp->t_state; +#endif + tp = tcp_timers(tp, i); + if (tp == NULL) + goto tpgone; +#if TCPDEBUG + if (tp->t_inpcb->inp_socket->so_options + & SO_DEBUG) + tcp_trace(TA_USER, ostate, tp, + (void *)0, + (struct tcphdr *)0, + PRU_SLOWTIMO); +#endif + } + } + tp->t_idle++; + tp->t_duration++; + if (tp->t_rtt) + tp->t_rtt++; +tpgone: + ; + } + +#if KDEBUG + tws_checked = 0; +#endif + KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_NONE, tws_checked,0,0,0,0); + + /* + * Process the items in the current time-wait slot + */ + + for (ip = time_wait_slots[cur_tw_slot].lh_first; ip; ip = ipnxt) + { +#if KDEBUG + tws_checked++; +#endif + ipnxt = ip->inp_list.le_next; + tp = intotcpcb(ip); + if (tp->t_timer[TCPT_2MSL] >= N_TIME_WAIT_SLOTS) { + tp->t_timer[TCPT_2MSL] -= N_TIME_WAIT_SLOTS; + tp->t_idle += N_TIME_WAIT_SLOTS; + } + else + tp->t_timer[TCPT_2MSL] = 0; + + if (tp->t_timer[TCPT_2MSL] == 0) + tp = tcp_timers(tp, TCPT_2MSL); + } + + if (++cur_tw_slot >= N_TIME_WAIT_SLOTS) + cur_tw_slot = 0; + + tcp_iss += TCP_ISSINCR/PR_SLOWHZ; /* increment iss */ +#if TCP_COMPAT_42 + if ((int)tcp_iss < 0) + tcp_iss = TCP_ISSINCR; /* XXX */ +#endif + tcp_now++; /* for timestamps */ + splx(s); + KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot,0,0,0); +} + +/* + * Cancel all timers for TCP tp. + */ +void +tcp_canceltimers(tp) + struct tcpcb *tp; +{ + register int i; + + for (i = 0; i < TCPT_NTIMERS; i++) + tp->t_timer[i] = 0; +} + +int tcp_backoff[TCP_MAXRXTSHIFT + 1] = + { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; + +static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ + +/* + * TCP timer processing. + */ +struct tcpcb * +tcp_timers(tp, timer) + register struct tcpcb *tp; + int timer; +{ + register int rexmt; + struct socket *so_tmp; +#if INET6 + int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV4) == 0; +#endif /* INET6 */ + + switch (timer) { + + /* + * 2 MSL timeout in shutdown went off. If we're closed but + * still waiting for peer to close and connection has been idle + * too long, or if 2MSL time is up from TIME_WAIT, delete connection + * control block. Otherwise, check again in a bit. + */ + case TCPT_2MSL: + if (tp->t_state != TCPS_TIME_WAIT && + tp->t_idle <= tcp_maxidle) { + tp->t_timer[TCPT_2MSL] = tcp_keepintvl; + add_to_time_wait(tp); + } + else + tp = tcp_close(tp); + break; + + /* + * Retransmission timer went off. Message has not + * been acked within retransmit interval. Back off + * to a longer retransmit interval and retransmit one segment. + */ + case TCPT_REXMT: + if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) { + tp->t_rxtshift = TCP_MAXRXTSHIFT; + tcpstat.tcps_timeoutdrop++; + so_tmp = tp->t_inpcb->inp_socket; + tp = tcp_drop(tp, tp->t_softerror ? + tp->t_softerror : ETIMEDOUT); + postevent(so_tmp, 0, EV_TIMEOUT); + break; + } + tcpstat.tcps_rexmttimeo++; + rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift]; + TCPT_RANGESET(tp->t_rxtcur, rexmt, + tp->t_rttmin, TCPTV_REXMTMAX); + tp->t_timer[TCPT_REXMT] = tp->t_rxtcur; + /* + * If losing, let the lower level know and try for + * a better route. Also, if we backed off this far, + * our srtt estimate is probably bogus. Clobber it + * so we'll take the next rtt measurement as our srtt; + * move the current srtt into rttvar to keep the current + * retransmit times until then. + */ + if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { +#if INET6 + if (isipv6) + in6_losing(tp->t_inpcb); + else +#endif /* INET6 */ + in_losing(tp->t_inpcb); + tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); + tp->t_srtt = 0; + } + tp->snd_nxt = tp->snd_una; + /* + * Force a segment to be sent. + */ + tp->t_flags |= TF_ACKNOW; + /* + * If timing a segment in this window, stop the timer. + */ + tp->t_rtt = 0; + /* + * Close the congestion window down to one segment + * (we'll open it by one segment for each ack we get). + * Since we probably have a window's worth of unacked + * data accumulated, this "slow start" keeps us from + * dumping all that data as back-to-back packets (which + * might overwhelm an intermediate gateway). + * + * There are two phases to the opening: Initially we + * open by one mss on each ack. This makes the window + * size increase exponentially with time. If the + * window is larger than the path can handle, this + * exponential growth results in dropped packet(s) + * almost immediately. To get more time between + * drops but still "push" the network to take advantage + * of improving conditions, we switch from exponential + * to linear window opening at some threshhold size. + * For a threshhold, we use half the current window + * size, truncated to a multiple of the mss. + * + * (the minimum cwnd that will give us exponential + * growth is 2 mss. We don't allow the threshhold + * to go below this.) + */ + { + u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; + if (win < 2) + win = 2; + tp->snd_cwnd = tp->t_maxseg; + tp->snd_ssthresh = win * tp->t_maxseg; + tp->t_dupacks = 0; + } + (void) tcp_output(tp); + break; + + /* + * Persistance timer into zero window. + * Force a byte to be output, if possible. + */ + case TCPT_PERSIST: + tcpstat.tcps_persisttimeo++; + /* + * Hack: if the peer is dead/unreachable, we do not + * time out if the window is closed. After a full + * backoff, drop the connection if the idle time + * (no responses to probes) reaches the maximum + * backoff that we would use if retransmitting. + */ + if (tp->t_rxtshift == TCP_MAXRXTSHIFT && + (tp->t_idle >= tcp_maxpersistidle || + tp->t_idle >= TCP_REXMTVAL(tp) * tcp_totbackoff)) { + tcpstat.tcps_persistdrop++; + so_tmp = tp->t_inpcb->inp_socket; + tp = tcp_drop(tp, ETIMEDOUT); + postevent(so_tmp, 0, EV_TIMEOUT); + break; + } + tcp_setpersist(tp); + tp->t_force = 1; + (void) tcp_output(tp); + tp->t_force = 0; + break; + + /* + * Keep-alive timer went off; send something + * or drop connection if idle for too long. + */ + case TCPT_KEEP: + tcpstat.tcps_keeptimeo++; + if (tp->t_state < TCPS_ESTABLISHED) + goto dropit; + if ((always_keepalive || + tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) && + tp->t_state <= TCPS_CLOSING) { + if (tp->t_idle >= tcp_keepidle + tcp_maxidle) + goto dropit; + /* + * Send a packet designed to force a response + * if the peer is up and reachable: + * either an ACK if the connection is still alive, + * or an RST if the peer has closed the connection + * due to timeout or reboot. + * Using sequence number tp->snd_una-1 + * causes the transmitted zero-length segment + * to lie outside the receive window; + * by the protocol spec, this requires the + * correspondent TCP to respond. + */ + tcpstat.tcps_keepprobe++; +#if TCP_COMPAT_42 + /* + * The keepalive packet must have nonzero length + * to get a 4.2 host to respond. + */ +#if INET6 + if (isipv6) + tcp_respond(tp, (void *)&tp->t_template->tt_i6, + &tp->t_template->tt_t, + (struct mbuf *)NULL, + tp->rcv_nxt - 1, tp->snd_una - 1, + 0, isipv6); + else +#endif /* INET6 */ + tcp_respond(tp, (void *)&tp->t_template->tt_i, + &tp->t_template->tt_t, (struct mbuf *)NULL, + tp->rcv_nxt - 1, tp->snd_una - 1, 0, + isipv6); +#else +#if INET6 + if (isipv6) + tcp_respond(tp, (void *)&tp->t_template->tt_i6, + &tp->t_template->tt_t, + (struct mbuf *)NULL, tp->rcv_nxt, + tp->snd_una - 1, 0, isipv6); + else +#endif /* INET6 */ + tcp_respond(tp, (void *)&tp->t_template->tt_i, + &tp->t_template->tt_t, (struct mbuf *)NULL, + tp->rcv_nxt, tp->snd_una - 1, 0, isipv6); +#endif + tp->t_timer[TCPT_KEEP] = tcp_keepintvl; + } else + tp->t_timer[TCPT_KEEP] = tcp_keepidle; + break; + dropit: + tcpstat.tcps_keepdrops++; + so_tmp = tp->t_inpcb->inp_socket; + tp = tcp_drop(tp, ETIMEDOUT); + postevent(so_tmp, 0, EV_TIMEOUT); + break; + } + return (tp); +} diff --git a/bsd/netinet/tcp_timer.h b/bsd/netinet/tcp_timer.h new file mode 100644 index 000000000..03d29b83c --- /dev/null +++ b/bsd/netinet/tcp_timer.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_timer.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_TCP_TIMER_H_ +#define _NETINET_TCP_TIMER_H_ + +/* + * Definitions of the TCP timers. These timers are counted + * down PR_SLOWHZ times a second. + */ +#define TCPT_NTIMERS 4 + +#define TCPT_REXMT 0 /* retransmit */ +#define TCPT_PERSIST 1 /* retransmit persistence */ +#define TCPT_KEEP 2 /* keep alive */ +#define TCPT_2MSL 3 /* 2*msl quiet time timer */ + +/* + * The TCPT_REXMT timer is used to force retransmissions. + * The TCP has the TCPT_REXMT timer set whenever segments + * have been sent for which ACKs are expected but not yet + * received. If an ACK is received which advances tp->snd_una, + * then the retransmit timer is cleared (if there are no more + * outstanding segments) or reset to the base value (if there + * are more ACKs expected). Whenever the retransmit timer goes off, + * we retransmit one unacknowledged segment, and do a backoff + * on the retransmit timer. + * + * The TCPT_PERSIST timer is used to keep window size information + * flowing even if the window goes shut. If all previous transmissions + * have been acknowledged (so that there are no retransmissions in progress), + * and the window is too small to bother sending anything, then we start + * the TCPT_PERSIST timer. When it expires, if the window is nonzero, + * we go to transmit state. Otherwise, at intervals send a single byte + * into the peer's window to force him to update our window information. + * We do this at most as often as TCPT_PERSMIN time intervals, + * but no more frequently than the current estimate of round-trip + * packet time. The TCPT_PERSIST timer is cleared whenever we receive + * a window update from the peer. + * + * The TCPT_KEEP timer is used to keep connections alive. If an + * connection is idle (no segments received) for TCPTV_KEEP_INIT amount of time, + * but not yet established, then we drop the connection. Once the connection + * is established, if the connection is idle for TCPTV_KEEP_IDLE time + * (and keepalives have been enabled on the socket), we begin to probe + * the connection. We force the peer to send us a segment by sending: + * + * This segment is (deliberately) outside the window, and should elicit + * an ack segment in response from the peer. If, despite the TCPT_KEEP + * initiated segments we cannot elicit a response from a peer in TCPT_MAXIDLE + * amount of time probing, then we drop the connection. + */ + +/* + * Time constants. + */ +#define TCPTV_MSL ( 30*PR_SLOWHZ) /* max seg lifetime (hah!) */ +#define TCPTV_SRTTBASE 0 /* base roundtrip time; + if 0, no idea yet */ +#define TCPTV_RTOBASE ( 3*PR_SLOWHZ) /* assumed RTO if no info */ +#define TCPTV_SRTTDFLT ( 3*PR_SLOWHZ) /* assumed RTT if no info */ + +#define TCPTV_PERSMIN ( 5*PR_SLOWHZ) /* retransmit persistence */ +#define TCPTV_PERSMAX ( 60*PR_SLOWHZ) /* maximum persist interval */ + +#define TCPTV_KEEP_INIT ( 75*PR_SLOWHZ) /* initial connect keep alive */ +#define TCPTV_KEEP_IDLE (120*60*PR_SLOWHZ) /* dflt time before probing */ +#define TCPTV_KEEPINTVL ( 75*PR_SLOWHZ) /* default probe interval */ +#define TCPTV_KEEPCNT 8 /* max probes before drop */ + +#define TCPTV_MIN ( 1*PR_SLOWHZ) /* minimum allowable value */ +#define TCPTV_REXMTMAX ( 64*PR_SLOWHZ) /* max allowable REXMT value */ + +#define TCPTV_TWTRUNC 8 /* RTO factor to truncate TW */ + +#define TCP_LINGERTIME 120 /* linger at most 2 minutes */ + +#define TCP_MAXRXTSHIFT 12 /* maximum retransmits */ + +#ifdef TCPTIMERS +static char *tcptimers[] = + { "REXMT", "PERSIST", "KEEP", "2MSL" }; +#endif + +/* + * Force a time value to be in a certain range. + */ +#define TCPT_RANGESET(tv, value, tvmin, tvmax) { \ + (tv) = (value); \ + if ((u_long)(tv) < (u_long)(tvmin)) \ + (tv) = (tvmin); \ + else if ((u_long)(tv) > (u_long)(tvmax)) \ + (tv) = (tvmax); \ +} + +#ifdef KERNEL +extern int tcp_keepinit; /* time to establish connection */ +extern int tcp_keepidle; /* time before keepalive probes begin */ +extern int tcp_maxidle; /* time to drop after starting probes */ +extern int tcp_ttl; /* time to live for TCP segs */ +extern int tcp_backoff[]; +#endif + +#endif diff --git a/bsd/netinet/tcp_usrreq.c b/bsd/netinet/tcp_usrreq.c new file mode 100644 index 000000000..75458f08a --- /dev/null +++ b/bsd/netinet/tcp_usrreq.c @@ -0,0 +1,1136 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * From: @(#)tcp_usrreq.c 8.2 (Berkeley) 1/3/94 + */ + +#if ISFB31 +#include "opt_tcpdebug.h" +#endif + +#include +#include +#include +#include +#include +#if INET6 +#include +#endif /* INET6 */ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#if INET6 +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include +#if TCPDEBUG +#include +#endif + +#if IPSEC +#include +#endif /*IPSEC*/ + +/* + * TCP protocol interface to socket abstraction. + */ +extern char *tcpstates[]; /* XXX ??? */ + +static int tcp_attach __P((struct socket *, struct proc *)); +static int tcp_connect __P((struct tcpcb *, struct sockaddr *, + struct proc *)); +#if INET6 +static int tcp6_connect __P((struct tcpcb *, struct sockaddr *, + struct proc *)); +#endif /* INET6 */ +static struct tcpcb * tcp_disconnect __P((struct tcpcb *)); +static struct tcpcb * + tcp_usrclosed __P((struct tcpcb *)); + +#if TCPDEBUG +#define TCPDEBUG0 int ostate +#define TCPDEBUG1() ostate = tp ? tp->t_state : 0 +#define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \ + tcp_trace(TA_USER, ostate, tp, 0, req) +#else +#define TCPDEBUG0 +#define TCPDEBUG1() +#define TCPDEBUG2(req) +#endif + +/* + * TCP attaches to socket via pru_attach(), reserving space, + * and an internet control block. + */ +static int +tcp_usr_attach(struct socket *so, int proto, struct proc *p) +{ + int s = splnet(); + int error; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp = 0; + TCPDEBUG0; + + TCPDEBUG1(); + if (inp) { + error = EISCONN; + goto out; + } + + error = tcp_attach(so, p); + if (error) + goto out; + + if ((so->so_options & SO_LINGER) && so->so_linger == 0) + so->so_linger = TCP_LINGERTIME * hz; + tp = sototcpcb(so); +out: + TCPDEBUG2(PRU_ATTACH); + splx(s); + return error; +} + +/* + * pru_detach() detaches the TCP protocol from the socket. + * If the protocol state is non-embryonic, then can't + * do this directly: have to initiate a pru_disconnect(), + * which may finish later; embryonic TCB's can just + * be discarded here. + */ +static int +tcp_usr_detach(struct socket *so) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + TCPDEBUG0; + + if (inp == 0) { + splx(s); + return EINVAL; /* XXX */ + } + tp = intotcpcb(inp); + /* In case we got disconnected from the peer */ + if (tp == 0) + goto out; + TCPDEBUG1(); + tp = tcp_disconnect(tp); +out: + TCPDEBUG2(PRU_DETACH); + splx(s); + return error; +} + +#define COMMON_START() TCPDEBUG0; \ + do { \ + if (inp == 0) { \ + splx(s); \ + return EINVAL; \ + } \ + tp = intotcpcb(inp); \ + TCPDEBUG1(); \ + } while(0) + +#define COMMON_END(req) out: TCPDEBUG2(req); splx(s); return error; goto out + + +/* + * Give the socket an address. + */ +static int +tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + struct sockaddr_in *sinp; + + COMMON_START(); + + /* + * Must check for multicast addresses and disallow binding + * to them. + */ + sinp = (struct sockaddr_in *)nam; + if (sinp->sin_family == AF_INET && + IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) { + error = EAFNOSUPPORT; + goto out; + } + error = in_pcbbind(inp, nam, p); + if (error) + goto out; + COMMON_END(PRU_BIND); + +} + +#if INET6 +static int +tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + struct sockaddr_in6 *sin6p; + + COMMON_START(); + + /* + * Must check for multicast addresses and disallow binding + * to them. + */ + sin6p = (struct sockaddr_in6 *)nam; + if (sin6p->sin6_family == AF_INET6 && + IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) { + error = EAFNOSUPPORT; + goto out; + } + inp->inp_vflag &= ~INP_IPV4; + inp->inp_vflag |= INP_IPV6; + if (ip6_mapped_addr_on && (inp->inp_flags & IN6P_BINDV6ONLY) == NULL) { + + if (IN6_IS_ADDR_UNSPECIFIED(&sin6p->sin6_addr)) + inp->inp_vflag |= INP_IPV4; + else if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { + struct sockaddr_in sin; + + in6_sin6_2_sin(&sin, sin6p); + inp->inp_vflag |= INP_IPV4; + inp->inp_vflag &= ~INP_IPV6; + error = in_pcbbind(inp, (struct sockaddr *)&sin, p); + goto out; + } + } + error = in6_pcbbind(inp, nam, p); + COMMON_END(PRU_BIND); +} +#endif /* INET6 */ + +/* + * Prepare to accept connections. + */ +static int +tcp_usr_listen(struct socket *so, struct proc *p) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + if (inp->inp_lport == 0) + error = in_pcbbind(inp, (struct sockaddr *)0, p); + if (error == 0) + tp->t_state = TCPS_LISTEN; + COMMON_END(PRU_LISTEN); +} + +#if INET6 +static int +tcp6_usr_listen(struct socket *so, struct proc *p) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + if (inp->inp_lport == 0) { + inp->inp_vflag &= ~INP_IPV4; + if (ip6_mapped_addr_on && + (inp->inp_flags & IN6P_BINDV6ONLY) == NULL) + inp->inp_vflag |= INP_IPV4; + error = in6_pcbbind(inp, (struct sockaddr *)0, p); + } + if (error == 0) + tp->t_state = TCPS_LISTEN; + COMMON_END(PRU_LISTEN); +} +#endif /* INET6 */ + +/* + * Initiate connection to peer. + * Create a template for use in transmissions on this connection. + * Enter SYN_SENT state, and mark socket as connecting. + * Start keep-alive timer, and seed output sequence space. + * Send initial segment on connection. + */ +static int +tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + struct sockaddr_in *sinp; + + COMMON_START(); + + /* + * Must disallow TCP ``connections'' to multicast addresses. + */ + sinp = (struct sockaddr_in *)nam; + if (sinp->sin_family == AF_INET + && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) { + error = EAFNOSUPPORT; + goto out; + } + + if ((error = tcp_connect(tp, nam, p)) != 0) + goto out; + error = tcp_output(tp); + COMMON_END(PRU_CONNECT); +} + +#if INET6 +static int +tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + struct sockaddr_in6 *sin6p; + + COMMON_START(); + + /* + * Must disallow TCP ``connections'' to multicast addresses. + */ + sin6p = (struct sockaddr_in6 *)nam; + if (sin6p->sin6_family == AF_INET6 + && IN6_IS_ADDR_MULTICAST(&sin6p->sin6_addr)) { + error = EAFNOSUPPORT; + goto out; + } + inp->inp_vflag &= ~INP_IPV4; + inp->inp_vflag |= INP_IPV6; + if (ip6_mapped_addr_on && + IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { + struct sockaddr_in sin; + + in6_sin6_2_sin(&sin, sin6p); + inp->inp_vflag |= INP_IPV4; + inp->inp_vflag &= ~INP_IPV6; + if ((error = tcp_connect(tp, (struct sockaddr *)&sin, p)) != 0) + goto out; + error = tcp_output(tp); + goto out; + } + if ((error = tcp6_connect(tp, nam, p)) != 0) + goto out; + error = tcp_output(tp); + if (error) + goto out; + if (ip6_mapped_addr_on) + inp->inp_vflag |= INP_IPV6; + COMMON_END(PRU_CONNECT); +} +#endif /* INET6 */ + +/* + * Initiate disconnect from peer. + * If connection never passed embryonic stage, just drop; + * else if don't need to let data drain, then can just drop anyways, + * else have to begin TCP shutdown process: mark socket disconnecting, + * drain unread data, state switch to reflect user close, and + * send segment (e.g. FIN) to peer. Socket will be really disconnected + * when peer sends FIN and acks ours. + * + * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB. + */ +static int +tcp_usr_disconnect(struct socket *so) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + /* In case we got disconnected from the peer */ + if (tp == 0) + goto out; + tp = tcp_disconnect(tp); + COMMON_END(PRU_DISCONNECT); +} + +/* + * Accept a connection. Essentially all the work is + * done at higher levels; just return the address + * of the peer, storing through addr. + */ +static int +tcp_usr_accept(struct socket *so, struct sockaddr **nam) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + in_setpeeraddr(so, nam); + COMMON_END(PRU_ACCEPT); +} + +#if INET6 +static int +tcp6_usr_accept(struct socket *so, struct sockaddr **nam) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + in6_mapped_peeraddr(so, nam); + COMMON_END(PRU_ACCEPT); +} +#endif /* INET6 */ + +/* + * Mark the connection as being incapable of further output. + */ +static int +tcp_usr_shutdown(struct socket *so) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + socantsendmore(so); + /* In case we got disconnected from the peer */ + if (tp == 0) + goto out; + tp = tcp_usrclosed(tp); + if (tp) + error = tcp_output(tp); + COMMON_END(PRU_SHUTDOWN); +} + +/* + * After a receive, possibly send window update to peer. + */ +static int +tcp_usr_rcvd(struct socket *so, int flags) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + /* In case we got disconnected from the peer */ + if (tp == 0) + goto out; + tcp_output(tp); + COMMON_END(PRU_RCVD); +} + +/* + * Do a send by putting data in output queue and updating urgent + * marker if URG set. Possibly send more data. + */ +static int +tcp_usr_send(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *nam, struct mbuf *control, struct proc *p) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; +#if INET6 + int isipv6; +#endif /* INET6 */ + + COMMON_START(); + if (control && control->m_len) { + m_freem(control); /* XXX shouldn't caller do this??? */ + if (m) + m_freem(m); + error = EINVAL; + goto out; + } + +#if INET6 + isipv6 = nam && nam->sa_family == AF_INET6; +#endif /* INET6 */ + + if(!(flags & PRUS_OOB)) { + sbappend(&so->so_snd, m); + if (nam && tp->t_state < TCPS_SYN_SENT) { + /* + * Do implied connect if not yet connected, + * initialize window to default value, and + * initialize maxseg/maxopd using peer's cached + * MSS. + */ +#if INET6 + if (isipv6) + error = tcp6_connect(tp, nam, p); + else +#endif /* INET6 */ + error = tcp_connect(tp, nam, p); + if (error) + goto out; + tp->snd_wnd = TTCP_CLIENT_SND_WND; + tcp_mss(tp, -1, isipv6); + } + + if (flags & PRUS_EOF) { + /* + * Close the send side of the connection after + * the data is sent. + */ + socantsendmore(so); + tp = tcp_usrclosed(tp); + } + if (tp != NULL) { + if (flags & PRUS_MORETOCOME) + tp->t_flags |= TF_MORETOCOME; + error = tcp_output(tp); + if (flags & PRUS_MORETOCOME) + tp->t_flags &= ~TF_MORETOCOME; + } + } else { + if (sbspace(&so->so_snd) < -512) { + m_freem(m); + error = ENOBUFS; + goto out; + } + /* + * According to RFC961 (Assigned Protocols), + * the urgent pointer points to the last octet + * of urgent data. We continue, however, + * to consider it to indicate the first octet + * of data past the urgent section. + * Otherwise, snd_up should be one lower. + */ + sbappend(&so->so_snd, m); + if (nam && tp->t_state < TCPS_SYN_SENT) { + /* + * Do implied connect if not yet connected, + * initialize window to default value, and + * initialize maxseg/maxopd using peer's cached + * MSS. + */ +#if INET6 + if (isipv6) + error = tcp6_connect(tp, nam, p); + else +#endif /* INET6 */ + error = tcp_connect(tp, nam, p); + if (error) + goto out; + tp->snd_wnd = TTCP_CLIENT_SND_WND; + tcp_mss(tp, -1, isipv6); + } + tp->snd_up = tp->snd_una + so->so_snd.sb_cc; + tp->t_force = 1; + error = tcp_output(tp); + tp->t_force = 0; + } + COMMON_END((flags & PRUS_OOB) ? PRU_SENDOOB : + ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND)); +} + +/* + * Abort the TCP. + */ +static int +tcp_usr_abort(struct socket *so) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + /* In case we got disconnected from the peer */ + if (tp == 0) + goto out; + tp = tcp_drop(tp, ECONNABORTED); + COMMON_END(PRU_ABORT); +} + +/* + * Receive out-of-band data. + */ +static int +tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags) +{ + int s = splnet(); + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct tcpcb *tp; + + COMMON_START(); + if ((so->so_oobmark == 0 && + (so->so_state & SS_RCVATMARK) == 0) || + so->so_options & SO_OOBINLINE || + tp->t_oobflags & TCPOOB_HADDATA) { + error = EINVAL; + goto out; + } + if ((tp->t_oobflags & TCPOOB_HAVEDATA) == 0) { + error = EWOULDBLOCK; + goto out; + } + m->m_len = 1; + *mtod(m, caddr_t) = tp->t_iobc; + if ((flags & MSG_PEEK) == 0) + tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA); + COMMON_END(PRU_RCVOOB); +} + +/* xxx - should be const */ +struct pr_usrreqs tcp_usrreqs = { + tcp_usr_abort, tcp_usr_accept, tcp_usr_attach, tcp_usr_bind, + tcp_usr_connect, pru_connect2_notsupp, in_control, tcp_usr_detach, + tcp_usr_disconnect, tcp_usr_listen, in_setpeeraddr, tcp_usr_rcvd, + tcp_usr_rcvoob, tcp_usr_send, pru_sense_null, tcp_usr_shutdown, + in_setsockaddr, sosend, soreceive, sopoll +}; + +#if INET6 +struct pr_usrreqs tcp6_usrreqs = { + tcp_usr_abort, tcp6_usr_accept, tcp_usr_attach, tcp6_usr_bind, + tcp6_usr_connect, pru_connect2_notsupp, in6_control, tcp_usr_detach, + tcp_usr_disconnect, tcp6_usr_listen, in6_mapped_peeraddr, tcp_usr_rcvd, + tcp_usr_rcvoob, tcp_usr_send, pru_sense_null, tcp_usr_shutdown, + in6_mapped_sockaddr, sosend, soreceive, sopoll +}; +#endif /* INET6 */ + +/* + * Common subroutine to open a TCP connection to remote host specified + * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local + * port number if needed. Call in_pcbladdr to do the routing and to choose + * a local host address (interface). If there is an existing incarnation + * of the same connection in TIME-WAIT state and if the remote host was + * sending CC options and if the connection duration was < MSL, then + * truncate the previous TIME-WAIT state and proceed. + * Initialize connection parameters and enter SYN-SENT state. + */ +static int +tcp_connect(tp, nam, p) + register struct tcpcb *tp; + struct sockaddr *nam; + struct proc *p; +{ + struct inpcb *inp = tp->t_inpcb, *oinp; + struct socket *so = inp->inp_socket; + struct tcpcb *otp; + struct sockaddr_in *sin = (struct sockaddr_in *)nam; + struct sockaddr_in *ifaddr; + struct rmxp_tao *taop; + struct rmxp_tao tao_noncached; + int error; + + if (inp->inp_lport == 0) { + error = in_pcbbind(inp, (struct sockaddr *)0, p); + if (error) + return error; + } + + /* + * Cannot simply call in_pcbconnect, because there might be an + * earlier incarnation of this same connection still in + * TIME_WAIT state, creating an ADDRINUSE error. + */ + error = in_pcbladdr(inp, nam, &ifaddr); + if (error) + return error; + oinp = in_pcblookup_hash(inp->inp_pcbinfo, + sin->sin_addr, sin->sin_port, + inp->inp_laddr.s_addr != INADDR_ANY ? inp->inp_laddr + : ifaddr->sin_addr, + inp->inp_lport, 0, NULL); + if (oinp) { + if (oinp != inp && (otp = intotcpcb(oinp)) != NULL && + otp->t_state == TCPS_TIME_WAIT && + otp->t_duration < TCPTV_MSL && + (otp->t_flags & TF_RCVD_CC)) + otp = tcp_close(otp); + else + return EADDRINUSE; + } + if (inp->inp_laddr.s_addr == INADDR_ANY) + inp->inp_laddr = ifaddr->sin_addr; + inp->inp_faddr = sin->sin_addr; + inp->inp_fport = sin->sin_port; + in_pcbrehash(inp); + + tp->t_template = tcp_template(tp); + if (tp->t_template == 0) { + in_pcbdisconnect(inp); + return ENOBUFS; + } + + /* Compute window scaling to request. */ + while (tp->request_r_scale < TCP_MAX_WINSHIFT && + (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.sb_hiwat) + tp->request_r_scale++; + + soisconnecting(so); + tcpstat.tcps_connattempt++; + tp->t_state = TCPS_SYN_SENT; + tp->t_timer[TCPT_KEEP] = tcp_keepinit; + tp->iss = tcp_iss; tcp_iss += TCP_ISSINCR/2; + tcp_sendseqinit(tp); + + /* + * Generate a CC value for this connection and + * check whether CC or CCnew should be used. + */ + if ((taop = tcp_gettaocache(tp->t_inpcb)) == NULL) { + taop = &tao_noncached; + bzero(taop, sizeof(*taop)); + } + + tp->cc_send = CC_INC(tcp_ccgen); + if (taop->tao_ccsent != 0 && + CC_GEQ(tp->cc_send, taop->tao_ccsent)) { + taop->tao_ccsent = tp->cc_send; + } else { + taop->tao_ccsent = 0; + tp->t_flags |= TF_SENDCCNEW; + } + + return 0; +} + +#if INET6 +static int +tcp6_connect(tp, nam, p) + register struct tcpcb *tp; + struct sockaddr *nam; + struct proc *p; +{ + struct inpcb *inp = tp->t_inpcb, *oinp; + struct socket *so = inp->inp_socket; + struct tcpcb *otp; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam; + struct in6_addr *addr6; + struct rmxp_tao *taop; + struct rmxp_tao tao_noncached; + int error; + + if (inp->inp_lport == 0) { + error = in6_pcbbind(inp, (struct sockaddr *)0, p); + if (error) + return error; + } + + /* + * Cannot simply call in_pcbconnect, because there might be an + * earlier incarnation of this same connection still in + * TIME_WAIT state, creating an ADDRINUSE error. + */ + error = in6_pcbladdr(inp, nam, &addr6); + if (error) + return error; + oinp = in6_pcblookup_hash(inp->inp_pcbinfo, + &sin6->sin6_addr, sin6->sin6_port, + IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) + ? addr6 + : &inp->in6p_laddr, + inp->inp_lport, 0, NULL); + if (oinp) { + if (oinp != inp && (otp = intotcpcb(oinp)) != NULL && + otp->t_state == TCPS_TIME_WAIT && + otp->t_duration < TCPTV_MSL && + (otp->t_flags & TF_RCVD_CC)) + otp = tcp_close(otp); + else + return EADDRINUSE; + } + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) + inp->in6p_laddr = *addr6; + inp->in6p_faddr = sin6->sin6_addr; + inp->inp_fport = sin6->sin6_port; + /* + * xxx kazu flowlabel is necessary for connect? + * but if this line is missing, the garbage value remains. + */ + inp->in6p_flowinfo = sin6->sin6_flowinfo; + + in_pcbrehash(inp); + + tp->t_template = tcp_template(tp); + if (tp->t_template == 0) { + in6_pcbdisconnect(inp); + return ENOBUFS; + } + + /* Compute window scaling to request. */ + while (tp->request_r_scale < TCP_MAX_WINSHIFT && + (TCP_MAXWIN << tp->request_r_scale) < so->so_rcv.sb_hiwat) + tp->request_r_scale++; + + soisconnecting(so); + tcpstat.tcps_connattempt++; + tp->t_state = TCPS_SYN_SENT; + tp->t_timer[TCPT_KEEP] = tcp_keepinit; + tp->iss = tcp_iss; tcp_iss += TCP_ISSINCR/2; + tcp_sendseqinit(tp); + + /* + * Generate a CC value for this connection and + * check whether CC or CCnew should be used. + */ + if ((taop = tcp_gettaocache(tp->t_inpcb)) == NULL) { + taop = &tao_noncached; + bzero(taop, sizeof(*taop)); + } + + tp->cc_send = CC_INC(tcp_ccgen); + if (taop->tao_ccsent != 0 && + CC_GEQ(tp->cc_send, taop->tao_ccsent)) { + taop->tao_ccsent = tp->cc_send; + } else { + taop->tao_ccsent = 0; + tp->t_flags |= TF_SENDCCNEW; + } + + return 0; +} +#endif /* INET6 */ + +/* + * The new sockopt interface makes it possible for us to block in the + * copyin/out step (if we take a page fault). Taking a page fault at + * splnet() is probably a Bad Thing. (Since sockets and pcbs both now + * use TSM, there probably isn't any need for this function to run at + * splnet() any more. This needs more examination.) + */ +int +tcp_ctloutput(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error, opt, optval, s; + struct inpcb *inp; + struct tcpcb *tp; + + error = 0; + s = splnet(); /* XXX */ + inp = sotoinpcb(so); + if (inp == NULL) { + splx(s); + return (ECONNRESET); + } + if (sopt->sopt_level != IPPROTO_TCP) { +#if INET6 + if (INP_CHECK_SOCKAF(so, AF_INET6)) + error = ip6_ctloutput(so, sopt); + else +#endif /* INET6 */ + error = ip_ctloutput(so, sopt); + splx(s); + return (error); + } + tp = intotcpcb(inp); + if (tp == NULL) { + splx(s); + return (ECONNRESET); + } + + switch (sopt->sopt_dir) { + case SOPT_SET: + switch (sopt->sopt_name) { + case TCP_NODELAY: + case TCP_NOOPT: + case TCP_NOPUSH: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + + switch (sopt->sopt_name) { + case TCP_NODELAY: + opt = TF_NODELAY; + break; + case TCP_NOOPT: + opt = TF_NOOPT; + break; + case TCP_NOPUSH: + opt = TF_NOPUSH; + break; + default: + opt = 0; /* dead code to fool gcc */ + break; + } + + if (optval) + tp->t_flags |= opt; + else + tp->t_flags &= ~opt; + break; + + case TCP_MAXSEG: + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; + + if (optval > 0 && optval <= tp->t_maxseg) + tp->t_maxseg = optval; + else + error = EINVAL; + break; + + default: + error = ENOPROTOOPT; + break; + } + break; + + case SOPT_GET: + switch (sopt->sopt_name) { + case TCP_NODELAY: + optval = tp->t_flags & TF_NODELAY; + break; + case TCP_MAXSEG: + optval = tp->t_maxseg; + break; + case TCP_NOOPT: + optval = tp->t_flags & TF_NOOPT; + break; + case TCP_NOPUSH: + optval = tp->t_flags & TF_NOPUSH; + break; + default: + error = ENOPROTOOPT; + break; + } + if (error == 0) + error = sooptcopyout(sopt, &optval, sizeof optval); + break; + } + splx(s); + return (error); +} + +/* + * tcp_sendspace and tcp_recvspace are the default send and receive window + * sizes, respectively. These are obsolescent (this information should + * be set by the route). + */ +u_long tcp_sendspace = 1024*16; +SYSCTL_INT(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, + CTLFLAG_RW, &tcp_sendspace , 0, ""); +u_long tcp_recvspace = 1024*16; +SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, + CTLFLAG_RW, &tcp_recvspace , 0, ""); + +/* + * Attach TCP protocol to socket, allocating + * internet protocol control block, tcp control block, + * bufer space, and entering LISTEN state if to accept connections. + */ +static int +tcp_attach(so, p) + struct socket *so; + struct proc *p; +{ + register struct tcpcb *tp; + struct inpcb *inp; + int error; +#if INET6 + int isipv6 = INP_CHECK_SOCKAF(so, AF_INET) == NULL; +#endif /* INET6 */ + + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { + error = soreserve(so, tcp_sendspace, tcp_recvspace); + if (error) + return (error); + } + error = in_pcballoc(so, &tcbinfo, p); + if (error) + return (error); + inp = sotoinpcb(so); +#if IPSEC + error = ipsec_init_policy(so, &inp->inp_sp); + if (error) { +#if INET6 + if (isipv6) + in6_pcbdetach(inp); + else +#endif /* INET6 */ + in_pcbdetach(inp); + return (error); + } +#endif /*IPSEC*/ +#if INET6 + if (isipv6) { + inp->inp_vflag |= INP_IPV6; + inp->in6p_hops = -1; /* use kernel default */ + } + else +#endif /* INET6 */ + inp->inp_vflag |= INP_IPV4; + tp = tcp_newtcpcb(inp); + if (tp == 0) { + int nofd = so->so_state & SS_NOFDREF; /* XXX */ + + so->so_state &= ~SS_NOFDREF; /* don't free the socket yet */ +#if INET6 + if (isipv6) + in6_pcbdetach(inp); + else +#endif /* INET6 */ + in_pcbdetach(inp); + so->so_state |= nofd; + return (ENOBUFS); + } + tp->t_state = TCPS_CLOSED; + return (0); +} + +/* + * Initiate (or continue) disconnect. + * If embryonic state, just send reset (once). + * If in ``let data drain'' option and linger null, just drop. + * Otherwise (hard), mark socket disconnecting and drop + * current input data; switch states based on user close, and + * send segment to peer (with FIN). + */ +static struct tcpcb * +tcp_disconnect(tp) + register struct tcpcb *tp; +{ + struct socket *so = tp->t_inpcb->inp_socket; + + if (tp->t_state < TCPS_ESTABLISHED) + tp = tcp_close(tp); + else if ((so->so_options & SO_LINGER) && so->so_linger == 0) + tp = tcp_drop(tp, 0); + else { + soisdisconnecting(so); + sbflush(&so->so_rcv); + tp = tcp_usrclosed(tp); + if (tp) + (void) tcp_output(tp); + } + return (tp); +} + +/* + * User issued close, and wish to trail through shutdown states: + * if never received SYN, just forget it. If got a SYN from peer, + * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN. + * If already got a FIN from peer, then almost done; go to LAST_ACK + * state. In all other cases, have already sent FIN to peer (e.g. + * after PRU_SHUTDOWN), and just have to play tedious game waiting + * for peer to send FIN or not respond to keep-alives, etc. + * We can let the user exit from the close as soon as the FIN is acked. + */ +static struct tcpcb * +tcp_usrclosed(tp) + register struct tcpcb *tp; +{ + + switch (tp->t_state) { + + case TCPS_CLOSED: + case TCPS_LISTEN: + tp->t_state = TCPS_CLOSED; + tp = tcp_close(tp); + break; + + case TCPS_SYN_SENT: + case TCPS_SYN_RECEIVED: + tp->t_flags |= TF_NEEDFIN; + break; + + case TCPS_ESTABLISHED: + tp->t_state = TCPS_FIN_WAIT_1; + break; + + case TCPS_CLOSE_WAIT: + tp->t_state = TCPS_LAST_ACK; + break; + } + if (tp && tp->t_state >= TCPS_FIN_WAIT_2) { + soisdisconnected(tp->t_inpcb->inp_socket); + /* To prevent the connection hanging in FIN_WAIT_2 forever. */ + if (tp->t_state == TCPS_FIN_WAIT_2) + tp->t_timer[TCPT_2MSL] = tcp_maxidle; + } + return (tp); +} + diff --git a/bsd/netinet/tcp_var.h b/bsd/netinet/tcp_var.h new file mode 100644 index 000000000..480ff21d0 --- /dev/null +++ b/bsd/netinet/tcp_var.h @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993, 1994, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcp_var.h 8.4 (Berkeley) 5/24/95 + */ + +#ifndef _NETINET_TCP_VAR_H_ +#define _NETINET_TCP_VAR_H_ +#define N_TIME_WAIT_SLOTS 128 /* must be power of 2 */ + +/* + * Ip (reassembly or sequence) queue structures. + * + * XXX -- The following explains why the ipqe_m field is here, for TCP's use: + * We want to avoid doing m_pullup on incoming packets but that + * means avoiding dtom on the tcp reassembly code. That in turn means + * keeping an mbuf pointer in the reassembly queue (since we might + * have a cluster). As a quick hack, the source & destination + * port numbers (which are no longer needed once we've located the + * tcpcb) are overlayed with an mbuf pointer. + */ +LIST_HEAD(ipqehead, ipqent); +struct ipqent { + LIST_ENTRY(ipqent) ipqe_q; + union { + struct ip *_ip; +#if INET6 + struct ipv6 *_ip6; +#endif + struct tcpiphdr *_tcp; + } _ipqe_u1; + struct mbuf *ipqe_m; /* mbuf contains packet */ + u_int8_t ipqe_mff; /* for IP fragmentation */ +}; +#define ipqe_ip _ipqe_u1._ip +#if INET6 +#define ipqe_ip6 _ipqe_u1._ip6 +#endif +#define ipqe_tcp _ipqe_u1._tcp +#define tcp6cb tcpcb /* for KAME src sync over BSD*'s */ + +#define TCP_DELACK_BITSET(hash_elem)\ +delack_bitmask[((hash_elem) >> 5)] |= 1 << ((hash_elem) & 0x1F) + +#define DELACK_BITMASK_ON 1 +#define DELACK_BITMASK_THRESH 300 + +/* + * Kernel variables for tcp. + */ + +/* + * Tcp control block, one per tcp; fields: + * Organized for 16 byte cacheline efficiency. + */ +struct tcpcb { + struct ipqehead segq; /* sequencing queue */ + int t_dupacks; /* consecutive dup acks recd */ + struct tcptemp *t_template; /* skeletal packet for transmit */ + + int t_timer[TCPT_NTIMERS]; /* tcp timers */ + + struct inpcb *t_inpcb; /* back pointer to internet pcb */ + int t_state; /* state of this connection */ + u_int t_flags; +#define TF_ACKNOW 0x00001 /* ack peer immediately */ +#define TF_DELACK 0x00002 /* ack, but try to delay it */ +#define TF_NODELAY 0x00004 /* don't delay packets to coalesce */ +#define TF_NOOPT 0x00008 /* don't use tcp options */ +#define TF_SENTFIN 0x00010 /* have sent FIN */ +#define TF_REQ_SCALE 0x00020 /* have/will request window scaling */ +#define TF_RCVD_SCALE 0x00040 /* other side has requested scaling */ +#define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ +#define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ +#define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ +#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ +#define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ +#define TF_NOPUSH 0x01000 /* don't push */ +#define TF_REQ_CC 0x02000 /* have/will request CC */ +#define TF_RCVD_CC 0x04000 /* a CC was received in SYN */ +#define TF_SENDCCNEW 0x08000 /* send CCnew instead of CC in SYN */ +#define TF_MORETOCOME 0x10000 /* More data to be appended to sock */ + int t_force; /* 1 if forcing out a byte */ + + tcp_seq snd_una; /* send unacknowledged */ + tcp_seq snd_max; /* highest sequence number sent; + * used to recognize retransmits + */ + tcp_seq snd_nxt; /* send next */ + tcp_seq snd_up; /* send urgent pointer */ + + tcp_seq snd_wl1; /* window update seg seq number */ + tcp_seq snd_wl2; /* window update seg ack number */ + tcp_seq iss; /* initial send sequence number */ + tcp_seq irs; /* initial receive sequence number */ + + tcp_seq rcv_nxt; /* receive next */ + tcp_seq rcv_adv; /* advertised window */ + u_long rcv_wnd; /* receive window */ + tcp_seq rcv_up; /* receive urgent pointer */ + + u_long snd_wnd; /* send window */ + u_long snd_cwnd; /* congestion-controlled window */ + u_long snd_ssthresh; /* snd_cwnd size threshold for + * for slow start exponential to + * linear switch + */ + u_int t_maxopd; /* mss plus options */ + + u_int t_idle; /* inactivity time */ + u_long t_duration; /* connection duration */ + int t_rtt; /* round trip time */ + tcp_seq t_rtseq; /* sequence number being timed */ + + int t_rxtcur; /* current retransmit value */ + u_int t_maxseg; /* maximum segment size */ + int t_srtt; /* smoothed round-trip time */ + int t_rttvar; /* variance in round-trip time */ + + int t_rxtshift; /* log(2) of rexmt exp. backoff */ + u_int t_rttmin; /* minimum rtt allowed */ + u_long t_rttupdated; /* number of times rtt sampled */ + u_long max_sndwnd; /* largest window peer has offered */ + + int t_softerror; /* possible error not yet reported */ +/* out-of-band data */ + char t_oobflags; /* have some */ + char t_iobc; /* input character */ +#define TCPOOB_HAVEDATA 0x01 +#define TCPOOB_HADDATA 0x02 +/* RFC 1323 variables */ + u_char snd_scale; /* window scaling for send window */ + u_char rcv_scale; /* window scaling for recv window */ + u_char request_r_scale; /* pending window scaling */ + u_char requested_s_scale; + u_long ts_recent; /* timestamp echo data */ + + u_long ts_recent_age; /* when last updated */ + tcp_seq last_ack_sent; +/* RFC 1644 variables */ + tcp_cc cc_send; /* send connection count */ + tcp_cc cc_recv; /* receive connection count */ +}; + +/* + * Structure to hold TCP options that are only used during segment + * processing (in tcp_input), but not held in the tcpcb. + * It's basically used to reduce the number of parameters + * to tcp_dooptions. + */ +struct tcpopt { + u_long to_flag; /* which options are present */ +#define TOF_TS 0x0001 /* timestamp */ +#define TOF_CC 0x0002 /* CC and CCnew are exclusive */ +#define TOF_CCNEW 0x0004 +#define TOF_CCECHO 0x0008 + u_long to_tsval; + u_long to_tsecr; + tcp_cc to_cc; /* holds CC or CCnew */ + tcp_cc to_ccecho; + u_short to_maxseg; +}; + +/* + * The TAO cache entry which is stored in the protocol family specific + * portion of the route metrics. + */ +struct rmxp_tao { + tcp_cc tao_cc; /* latest CC in valid SYN */ + tcp_cc tao_ccsent; /* latest CC sent to peer */ + u_short tao_mssopt; /* peer's cached MSS */ +#ifdef notyet + u_short tao_flags; /* cache status flags */ +#define TAOF_DONT 0x0001 /* peer doesn't understand rfc1644 */ +#define TAOF_OK 0x0002 /* peer does understand rfc1644 */ +#define TAOF_UNDEF 0 /* we don't know yet */ +#endif /* notyet */ +}; +#define rmx_taop(r) ((struct rmxp_tao *)(r).rmx_filler) + +#define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb) +#define sototcpcb(so) (intotcpcb(sotoinpcb(so))) + +/* + * The smoothed round-trip time and estimated variance + * are stored as fixed point numbers scaled by the values below. + * For convenience, these scales are also used in smoothing the average + * (smoothed = (1/scale)sample + ((scale-1)/scale)smoothed). + * With these scales, srtt has 3 bits to the right of the binary point, + * and thus an "ALPHA" of 0.875. rttvar has 2 bits to the right of the + * binary point, and is smoothed with an ALPHA of 0.75. + */ +#define TCP_RTT_SCALE 32 /* multiplier for srtt; 3 bits frac. */ +#define TCP_RTT_SHIFT 5 /* shift for srtt; 3 bits frac. */ +#define TCP_RTTVAR_SCALE 16 /* multiplier for rttvar; 2 bits */ +#define TCP_RTTVAR_SHIFT 4 /* shift for rttvar; 2 bits */ +#define TCP_DELTA_SHIFT 2 /* see tcp_input.c */ + +/* + * The initial retransmission should happen at rtt + 4 * rttvar. + * Because of the way we do the smoothing, srtt and rttvar + * will each average +1/2 tick of bias. When we compute + * the retransmit timer, we want 1/2 tick of rounding and + * 1 extra tick because of +-1/2 tick uncertainty in the + * firing of the timer. The bias will give us exactly the + * 1.5 tick we need. But, because the bias is + * statistical, we have to test that we don't drop below + * the minimum feasible timer (which is 2 ticks). + * This version of the macro adapted from a paper by Lawrence + * Brakmo and Larry Peterson which outlines a problem caused + * by insufficient precision in the original implementation, + * which results in inappropriately large RTO values for very + * fast networks. + */ +#define TCP_REXMTVAL(tp) \ + max((tp)->t_rttmin, (((tp)->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)) \ + + (tp)->t_rttvar) >> TCP_DELTA_SHIFT) + +/* + * TCP statistics. + * Many of these should be kept per connection, + * but that's inconvenient at the moment. + */ +struct tcpstat { + u_long tcps_connattempt; /* connections initiated */ + u_long tcps_accepts; /* connections accepted */ + u_long tcps_connects; /* connections established */ + u_long tcps_drops; /* connections dropped */ + u_long tcps_conndrops; /* embryonic connections dropped */ + u_long tcps_closed; /* conn. closed (includes drops) */ + u_long tcps_segstimed; /* segs where we tried to get rtt */ + u_long tcps_rttupdated; /* times we succeeded */ + u_long tcps_delack; /* delayed acks sent */ + u_long tcps_timeoutdrop; /* conn. dropped in rxmt timeout */ + u_long tcps_rexmttimeo; /* retransmit timeouts */ + u_long tcps_persisttimeo; /* persist timeouts */ + u_long tcps_keeptimeo; /* keepalive timeouts */ + u_long tcps_keepprobe; /* keepalive probes sent */ + u_long tcps_keepdrops; /* connections dropped in keepalive */ + + u_long tcps_sndtotal; /* total packets sent */ + u_long tcps_sndpack; /* data packets sent */ + u_long tcps_sndbyte; /* data bytes sent */ + u_long tcps_sndrexmitpack; /* data packets retransmitted */ + u_long tcps_sndrexmitbyte; /* data bytes retransmitted */ + u_long tcps_sndacks; /* ack-only packets sent */ + u_long tcps_sndprobe; /* window probes sent */ + u_long tcps_sndurg; /* packets sent with URG only */ + u_long tcps_sndwinup; /* window update-only packets sent */ + u_long tcps_sndctrl; /* control (SYN|FIN|RST) packets sent */ + + u_long tcps_rcvtotal; /* total packets received */ + u_long tcps_rcvpack; /* packets received in sequence */ + u_long tcps_rcvbyte; /* bytes received in sequence */ + u_long tcps_rcvbadsum; /* packets received with ccksum errs */ + u_long tcps_rcvbadoff; /* packets received with bad offset */ + u_long tcps_rcvmemdrop; /* packets dropped for lack of memory */ + u_long tcps_rcvshort; /* packets received too short */ + u_long tcps_rcvduppack; /* duplicate-only packets received */ + u_long tcps_rcvdupbyte; /* duplicate-only bytes received */ + u_long tcps_rcvpartduppack; /* packets with some duplicate data */ + u_long tcps_rcvpartdupbyte; /* dup. bytes in part-dup. packets */ + u_long tcps_rcvoopack; /* out-of-order packets received */ + u_long tcps_rcvoobyte; /* out-of-order bytes received */ + u_long tcps_rcvpackafterwin; /* packets with data after window */ + u_long tcps_rcvbyteafterwin; /* bytes rcvd after window */ + u_long tcps_rcvafterclose; /* packets rcvd after "close" */ + u_long tcps_rcvwinprobe; /* rcvd window probe packets */ + u_long tcps_rcvdupack; /* rcvd duplicate acks */ + u_long tcps_rcvacktoomuch; /* rcvd acks for unsent data */ + u_long tcps_rcvackpack; /* rcvd ack packets */ + u_long tcps_rcvackbyte; /* bytes acked by rcvd acks */ + u_long tcps_rcvwinupd; /* rcvd window update packets */ + u_long tcps_pawsdrop; /* segments dropped due to PAWS */ + u_long tcps_predack; /* times hdr predict ok for acks */ + u_long tcps_preddat; /* times hdr predict ok for data pkts */ + u_long tcps_pcbcachemiss; + u_long tcps_cachedrtt; /* times cached RTT in route updated */ + u_long tcps_cachedrttvar; /* times cached rttvar updated */ + u_long tcps_cachedssthresh; /* times cached ssthresh updated */ + u_long tcps_usedrtt; /* times RTT initialized from route */ + u_long tcps_usedrttvar; /* times RTTVAR initialized from rt */ + u_long tcps_usedssthresh; /* times ssthresh initialized from rt*/ + u_long tcps_persistdrop; /* timeout in persist state */ + u_long tcps_badsyn; /* bogus SYN, e.g. premature ACK */ + u_long tcps_mturesent; /* resends due to MTU discovery */ + u_long tcps_listendrop; /* listen queue overflows */ +}; + +/* + * TCB structure exported to user-land via sysctl(3). + * Evil hack: declare only if in_pcb.h and sys/socketvar.h have been + * included. Not all of our clients do. + */ +#if defined(_NETINET_IN_PCB_H_) && defined(_SYS_SOCKETVAR_H_) +struct xtcpcb { + size_t xt_len; + struct inpcb xt_inp; + struct tcpcb xt_tp; + struct xsocket xt_socket; + u_quad_t xt_alignment_hack; +}; +#endif + +/* + * Names for TCP sysctl objects + */ +#define TCPCTL_DO_RFC1323 1 /* use RFC-1323 extensions */ +#define TCPCTL_DO_RFC1644 2 /* use RFC-1644 extensions */ +#define TCPCTL_MSSDFLT 3 /* MSS default */ +#define TCPCTL_STATS 4 /* statistics (read-only) */ +#define TCPCTL_RTTDFLT 5 /* default RTT estimate */ +#define TCPCTL_KEEPIDLE 6 /* keepalive idle timer */ +#define TCPCTL_KEEPINTVL 7 /* interval to send keepalives */ +#define TCPCTL_SENDSPACE 8 /* send buffer space */ +#define TCPCTL_RECVSPACE 9 /* receive buffer space */ +#define TCPCTL_KEEPINIT 10 /* receive buffer space */ +#define TCPCTL_PCBLIST 11 /* list of all outstanding PCBs */ +#define TCPCTL_V6MSSDFLT 12 /* MSS default for IPv6 */ +#define TCPCTL_MAXID 13 + +#define TCPCTL_NAMES { \ + { 0, 0 }, \ + { "rfc1323", CTLTYPE_INT }, \ + { "rfc1644", CTLTYPE_INT }, \ + { "mssdflt", CTLTYPE_INT }, \ + { "stats", CTLTYPE_STRUCT }, \ + { "rttdflt", CTLTYPE_INT }, \ + { "keepidle", CTLTYPE_INT }, \ + { "keepintvl", CTLTYPE_INT }, \ + { "sendspace", CTLTYPE_INT }, \ + { "recvspace", CTLTYPE_INT }, \ + { "keepinit", CTLTYPE_INT }, \ + { "pcblist", CTLTYPE_STRUCT }, \ + { "v6mssdflt", CTLTYPE_INT }, \ +} + +#ifdef KERNEL +#ifdef SYSCTL_DECL +SYSCTL_DECL(_net_inet_tcp); +#endif + +extern struct inpcbhead tcb; /* head of queue of active tcpcb's */ +extern struct inpcbinfo tcbinfo; +extern struct tcpstat tcpstat; /* tcp statistics */ +extern int tcp_mssdflt; /* XXX */ +extern int tcp_v6mssdflt; /* XXX */ +extern u_long tcp_now; /* for RFC 1323 timestamps */ +extern int tcp_delack_enabled; + +void tcp_canceltimers __P((struct tcpcb *)); +struct tcpcb * + tcp_close __P((struct tcpcb *)); +void tcp_ctlinput __P((int, struct sockaddr *, void *)); +#if INET6 +struct ip6_hdr; +void tcp6_ctlinput __P((int, struct sockaddr *,void *)); +#endif +int tcp_ctloutput __P((struct socket *, struct sockopt *)); +struct tcpcb * + tcp_drop __P((struct tcpcb *, int)); +void tcp_drain __P((void)); +void tcp_fasttimo __P((void)); +struct rmxp_tao * + tcp_gettaocache __P((struct inpcb *)); +void tcp_init __P((void)); +#if INET6 +void tcp6_init __P((void)); +int tcp6_input __P((struct mbuf **, int *, int)); +#endif /* INET6 */ +void tcp_input __P((struct mbuf *, int)); +#if INET6 +void tcp_mss __P((struct tcpcb *, int, int)); +int tcp_mssopt __P((struct tcpcb *, int)); +#else /* INET6 */ +void tcp_mss __P((struct tcpcb *, int)); +int tcp_mssopt __P((struct tcpcb *)); +#endif /* INET6 */ + +void tcp_mtudisc __P((struct inpcb *, int)); +struct tcpcb * + tcp_newtcpcb __P((struct inpcb *)); +int tcp_output __P((struct tcpcb *)); +void tcp_quench __P((struct inpcb *, int)); +#if INET6 +void tcp_respond __P((struct tcpcb *, void *, struct tcphdr *, + struct mbuf *, tcp_seq, tcp_seq, int, int)); +#else /* INET6 */ +void tcp_respond __P((struct tcpcb *, void *, struct tcphdr *, + struct mbuf *, tcp_seq, tcp_seq, int)); +#endif /* INET6 */ + +struct rtentry * + tcp_rtlookup __P((struct inpcb *)); +#if INET6 +struct rtentry * + tcp_rtlookup6 __P((struct inpcb *)); +#endif /* INET6 */ +void tcp_setpersist __P((struct tcpcb *)); +void tcp_slowtimo __P((void)); +struct tcptemp * + tcp_template __P((struct tcpcb *)); +struct tcpcb * + tcp_timers __P((struct tcpcb *, int)); +#if INET6 +void tcp_trace __P((int, int, struct tcpcb *, void *, struct tcphdr *, + int)); +#else +void tcp_trace __P((int, int, struct tcpcb *, struct ip *, + struct tcphdr *, int)); +#endif + +#if INET6 +int tcp_reass __P((struct tcpcb *, struct tcphdr *, int, + struct mbuf *, int)); +#else /* INET6 */ +int tcp_reass __P((struct tcpcb *, struct tcphdr *, int, struct mbuf *)); +/* suppress INET6 only args */ +#define tcp_reass(x, y, z, t, i) tcp_reass(x, y, z, t) +#define tcp_mss(x, y, i) tcp_mss(x, y) +#define tcp_mssopt(x, i) tcp_mssopt(x) +#define tcp_respond(x, y, z, m, s1, s2, f, i) tcp_respond(x, y, z, m, s1, \ + s2, f) + +#endif /* INET6 */ + +extern struct pr_usrreqs tcp_usrreqs; +#if INET6 +extern struct pr_usrreqs tcp6_usrreqs; +#endif /* INET6 */ +extern u_long tcp_sendspace; +extern u_long tcp_recvspace; + +#endif /* KERNEL */ + +#endif /* _NETINET_TCP_VAR_H_ */ diff --git a/bsd/netinet/tcpip.h b/bsd/netinet/tcpip.h new file mode 100644 index 000000000..166501a5a --- /dev/null +++ b/bsd/netinet/tcpip.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tcpip.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_TCPIP_H_ +#define _NETINET_TCPIP_H_ + +/* + * Tcp+ip header, after ip options removed. + */ +struct tcpiphdr { + struct ipovly ti_i; /* overlaid ip structure */ + struct tcphdr ti_t; /* tcp header */ +}; +#ifdef notyet +/* + * Tcp+ip header, after ip options removed but including TCP options. + */ +struct full_tcpiphdr { + struct ipovly ti_i; /* overlaid ip structure */ + struct tcphdr ti_t; /* tcp header */ + char ti_o[TCP_MAXOLEN]; /* space for tcp options */ +}; +#endif /* notyet */ +#define ti_x1 ti_i.ih_x1 +#define ti_pr ti_i.ih_pr +#define ti_len ti_i.ih_len +#define ti_src ti_i.ih_src +#define ti_dst ti_i.ih_dst +#define ti_sport ti_t.th_sport +#define ti_dport ti_t.th_dport +#define ti_seq ti_t.th_seq +#define ti_ack ti_t.th_ack +#define ti_x2 ti_t.th_x2 +#define ti_off ti_t.th_off +#define ti_flags ti_t.th_flags +#define ti_win ti_t.th_win +#define ti_sum ti_t.th_sum +#define ti_urp ti_t.th_urp + +#ifndef INET6 +/* + * Same for templates. + */ +struct tcptemp { + struct ipovly tt_i; /* overlaid ip structure */ + struct tcphdr tt_t; /* tcp header */ +}; +#define tt_x1 tt_i.ih_x1 +#define tt_pr tt_i.ih_pr +#define tt_len tt_i.ih_len +#define tt_src tt_i.ih_src +#define tt_dst tt_i.ih_dst +#define tt_sport tt_t.th_sport +#define tt_dport tt_t.th_dport +#define tt_off tt_t.th_off +#define tt_seq tt_t.th_seq +#define tt_ack tt_t.th_ack +#define tt_x2 tt_t.th_x2 +#define tt_flags tt_t.th_flags +#define tt_win tt_t.th_win +#define tt_sum tt_t.th_sum +#define tt_urp tt_t.th_urp +#else + +#define ip6tcp tcpip6hdr /* for KAME src sync over BSD*'s */ + +/* + * IPv6+TCP headers. + */ +struct tcpip6hdr { + struct ip6_hdr ti6_i; /* IPv6 header */ + struct tcphdr ti6_t; /* TCP header */ +}; +#define ti6_vfc ti6_i.ip6_vfc +#define ti6_flow ti6_i.ip6_vlow +#define ti6_plen ti6_i.ip6_plen +#define ti6_nxt ti6_i.ip6_nxt +#define ti6_hlim ti6_i.ip6_hlim +#define ti6_src ti6_i.ip6_src +#define ti6_dst ti6_i.ip6_dst +#define ti6_sport ti6_t.th_sport +#define ti6_dport ti6_t.th_dport +#define ti6_seq ti6_t.th_seq +#define ti6_ack ti6_t.th_ack +#define ti6_x2 ti6_t.th_x2 +#define ti6_off ti6_t.th_off +#define ti6_flags ti6_t.th_flags +#define ti6_win ti6_t.th_win +#define ti6_sum ti6_t.th_sum +#define ti6_urp ti6_t.th_urp + +/* + * Dual template for IPv4/IPv6 TCP. + * + * Optimized for IPv4 + */ +struct tcptemp { + struct ipovly tt_i; /* overlaid ip structure */ + struct tcphdr tt_t; /* tcp header */ + struct ip6_hdr tt_i6; /* IPv6 header */ +}; +#define tt_x1 tt_i.ih_x1 +#define tt_pr tt_i.ih_pr +#define tt_len tt_i.ih_len +#define tt_src tt_i.ih_src +#define tt_dst tt_i.ih_dst +#define tt_sport tt_t.th_sport +#define tt_dport tt_t.th_dport +#define tt_off tt_t.th_off +#define tt_seq tt_t.th_seq +#define tt_ack tt_t.th_ack +#define tt_x2 tt_t.th_x2 +#define tt_flags tt_t.th_flags +#define tt_win tt_t.th_win +#define tt_sum tt_t.th_sum +#define tt_urp tt_t.th_urp +#define tt_vfc tt_i6.ip6_vfc +#define tt_flow tt_i6.ip6_flow +#define tt_pr6 tt_i6.ip6_nxt +#define tt_len6 tt_i6.ip6_plen +#define tt_src6 tt_i6.ip6_src +#define tt_dst6 tt_i6.ip6_dst +#endif + +#endif diff --git a/bsd/netinet/udp.h b/bsd/netinet/udp.h new file mode 100644 index 000000000..99a80dd29 --- /dev/null +++ b/bsd/netinet/udp.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)udp.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_UDP_H_ +#define _NETINET_UDP_H_ + +/* + * Udp protocol header. + * Per RFC 768, September, 1981. + */ +struct udphdr { + u_short uh_sport; /* source port */ + u_short uh_dport; /* destination port */ + u_short uh_ulen; /* udp length */ + u_short uh_sum; /* udp checksum */ +}; + +#endif diff --git a/bsd/netinet/udp_usrreq.c b/bsd/netinet/udp_usrreq.c new file mode 100644 index 000000000..3d123c164 --- /dev/null +++ b/bsd/netinet/udp_usrreq.c @@ -0,0 +1,966 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)udp_usrreq.c 8.6 (Berkeley) 5/23/95 + */ + +#include +#include +#include +#include +#include +#if INET6 +#include +#endif +#include +#include +#include +#include +#include + +#if ISFB31 +#include +#endif + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#if INET6 +#include +#include +#endif +#include +#include +#include + +#if IPSEC +#include +#endif /*IPSEC*/ + + +#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0) +#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2) +#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1) +#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3) +#define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8)) +#define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1) + +#define __STDC__ 1 +/* + * UDP protocol implementation. + * Per RFC 768, August, 1980. + */ +#ifndef COMPAT_42 +static int udpcksum = 1; +#else +static int udpcksum = 0; /* XXX */ +#endif +SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW, + &udpcksum, 0, ""); + +int log_in_vain; +SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW, + &log_in_vain, 0, ""); + +struct inpcbhead udb; /* from udp_var.h */ +#define udb6 udb /* for KAME src sync over BSD*'s */ +struct inpcbinfo udbinfo; + +#ifndef UDBHASHSIZE +#define UDBHASHSIZE 16 +#endif + +struct udpstat udpstat; /* from udp_var.h */ +SYSCTL_STRUCT(_net_inet_udp, UDPCTL_STATS, stats, CTLFLAG_RD, + &udpstat, udpstat, ""); + +static struct sockaddr_in udp_in = { sizeof(udp_in), AF_INET }; +#if INET6 +struct udp_in6 { + struct sockaddr_in6 uin6_sin; + u_char uin6_init_done : 1; +} udp_in6 = { + { sizeof(udp_in6.uin6_sin), AF_INET6 }, + 0 +}; +struct udp_ip6 { + struct ip6_hdr uip6_ip6; + u_char uip6_init_done : 1; +} udp_ip6; +#endif /* INET6 */ + +static void udp_append __P((struct inpcb *last, struct ip *ip, + struct mbuf *n, int off)); +#if INET6 +static void ip_2_ip6_hdr __P((struct ip6_hdr *ip6, struct ip *ip)); +#endif + +static int udp_detach __P((struct socket *so)); +static int udp_output __P((struct inpcb *, struct mbuf *, struct sockaddr *, + struct mbuf *, struct proc *)); + +void +udp_init() +{ + vm_size_t str_size; + int stat; + u_char fake_owner; + struct in_addr laddr; + struct in_addr faddr; + u_short lport; + + LIST_INIT(&udb); + udbinfo.listhead = &udb; + udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask); + udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB, + &udbinfo.porthashmask); +#if ISFB31 + udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets, + ZONE_INTERRUPT, 0); +#else + str_size = (vm_size_t) sizeof(struct inpcb); + udbinfo.ipi_zone = (void *) zinit(str_size, 80000*str_size, 8192, "inpcb_zone"); +#endif + + udbinfo.last_pcb = 0; + in_pcb_nat_init(&udbinfo, AF_INET, IPPROTO_UDP, SOCK_DGRAM); + +#if 0 + stat = in_pcb_new_share_client(&udbinfo, &fake_owner); + kprintf("udp_init in_pcb_new_share_client - stat = %d\n", stat); + + laddr.s_addr = 0x11646464; + faddr.s_addr = 0x11646465; + + lport = 1500; + in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner); + kprintf("udp_init in_pcb_grab_port - stat = %d\n", stat); + + stat = in_pcb_rem_share_client(&udbinfo, fake_owner); + kprintf("udp_init in_pcb_rem_share_client - stat = %d\n", stat); + + stat = in_pcb_new_share_client(&udbinfo, &fake_owner); + kprintf("udp_init in_pcb_new_share_client(2) - stat = %d\n", stat); + + laddr.s_addr = 0x11646464; + faddr.s_addr = 0x11646465; + + lport = 1500; + stat = in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner); + kprintf("udp_init in_pcb_grab_port(2) - stat = %d\n", stat); +#endif +} + +void +udp_input(m, iphlen) + register struct mbuf *m; + int iphlen; +{ + register struct ip *ip; + register struct udphdr *uh; + register struct inpcb *inp; + struct mbuf *opts = 0; +#if INET6 + struct ip6_recvpktopts opts6; +#endif + int len; + struct ip save_ip; + struct sockaddr *append_sa; + + udpstat.udps_ipackets++; +#if INET6 + bzero(&opts6, sizeof(opts6)); +#endif + + + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_START, 0,0,0,0,0); + + /* + * Strip IP options, if any; should skip this, + * make available to user, and use on returned packets, + * but we don't yet have a way to check the checksum + * with options still present. + */ + if (iphlen > sizeof (struct ip)) { + ip_stripoptions(m, (struct mbuf *)0); + iphlen = sizeof(struct ip); + } + + /* + * Get IP and UDP header together in first mbuf. + */ + ip = mtod(m, struct ip *); + if (m->m_len < iphlen + sizeof(struct udphdr)) { + if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) { + udpstat.udps_hdrops++; + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + return; + } + ip = mtod(m, struct ip *); + } + uh = (struct udphdr *)((caddr_t)ip + iphlen); + + KERNEL_DEBUG(DBG_LAYER_IN_BEG, uh->uh_dport, uh->uh_sport, + ip->ip_src.s_addr, ip->ip_dst.s_addr, uh->uh_ulen); + + /* + * Make mbuf data length reflect UDP length. + * If not enough data to reflect UDP length, drop. + */ + len = ntohs((u_short)uh->uh_ulen); + if (ip->ip_len != len) { + if (len > ip->ip_len || len < sizeof(struct udphdr)) { + udpstat.udps_badlen++; + goto bad; + } + m_adj(m, len - ip->ip_len); + /* ip->ip_len = len; */ + } + /* + * Save a copy of the IP header in case we want restore it + * for sending an ICMP error message in response. + */ + save_ip = *ip; + + /* + * Checksum extended UDP header and data. + */ + if (uh->uh_sum) { + bzero(((struct ipovly *)ip)->ih_x1, 9); + ((struct ipovly *)ip)->ih_len = uh->uh_ulen; + uh->uh_sum = in_cksum(m, len + sizeof (struct ip)); + if (uh->uh_sum) { + udpstat.udps_badsum++; + m_freem(m); + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + return; + } + } + + if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || + in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { + struct inpcb *last; + /* + * Deliver a multicast or broadcast datagram to *all* sockets + * for which the local and remote addresses and ports match + * those of the incoming datagram. This allows more than + * one process to receive multi/broadcasts on the same port. + * (This really ought to be done for unicast datagrams as + * well, but that would cause problems with existing + * applications that open both address-specific sockets and + * a wildcard socket listening to the same port -- they would + * end up receiving duplicates of every unicast datagram. + * Those applications open the multiple sockets to overcome an + * inadequacy of the UDP socket interface, but for backwards + * compatibility we avoid the problem here rather than + * fixing the interface. Maybe 4.5BSD will remedy this?) + */ + + /* + * Construct sockaddr format source address. + */ + udp_in.sin_port = uh->uh_sport; + udp_in.sin_addr = ip->ip_src; + /* + * Locate pcb(s) for datagram. + * (Algorithm copied from raw_intr().) + */ + last = NULL; +#if INET6 + udp_in6.uin6_init_done = udp_ip6.uip6_init_done = 0; +#endif + LIST_FOREACH(inp, &udb, inp_list) { +#if INET6 + if ((inp->inp_vflag & INP_IPV4) == 0) + continue; +#endif + if (inp->inp_lport != uh->uh_dport) + continue; + if (inp->inp_laddr.s_addr != INADDR_ANY) { + if (inp->inp_laddr.s_addr != + ip->ip_dst.s_addr) + continue; + } + if (inp->inp_faddr.s_addr != INADDR_ANY) { + if (inp->inp_faddr.s_addr != + ip->ip_src.s_addr || + inp->inp_fport != uh->uh_sport) + continue; + } + + if (last != NULL) { + struct mbuf *n; + +#if IPSEC + /* check AH/ESP integrity. */ + if (ipsec4_in_reject_so(m, last->inp_socket)) { + ipsecstat.in_polvio++; + /* do not inject data to pcb */ + } else +#endif /*IPSEC*/ + if ((n = m_copy(m, 0, M_COPYALL)) != NULL) { + udp_append(last, ip, n, iphlen + + sizeof (struct udphdr)); + } + } + last = inp; + /* + * Don't look for additional matches if this one does + * not have either the SO_REUSEPORT or SO_REUSEADDR + * socket options set. This heuristic avoids searching + * through all pcbs in the common case of a non-shared + * port. It * assumes that an application will never + * clear these options after setting them. + */ + if ((last->inp_socket->so_options&(SO_REUSEPORT|SO_REUSEADDR)) == 0) + break; + } + + if (last == NULL) { + /* + * No matching pcb found; discard datagram. + * (No need to send an ICMP Port Unreachable + * for a broadcast or multicast datgram.) + */ + udpstat.udps_noportbcast++; + goto bad; + } +#if IPSEC + else + /* check AH/ESP integrity. */ + if (m && ipsec4_in_reject_so(m, last->inp_socket)) { + ipsecstat.in_polvio++; + goto bad; + } +#endif /*IPSEC*/ + udp_append(last, ip, m, iphlen + sizeof (struct udphdr)); + return; + } + /* + * Locate pcb for datagram. + */ + inp = in_pcblookup_hash(&udbinfo, ip->ip_src, uh->uh_sport, + ip->ip_dst, uh->uh_dport, 1, m->m_pkthdr.rcvif); + if (inp == NULL) { + if (log_in_vain) { + char buf[4*sizeof "123"]; + + strcpy(buf, inet_ntoa(ip->ip_dst)); + log(LOG_INFO, + "Connection attempt to UDP %s:%d from %s:%d\n", + buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src), + ntohs(uh->uh_sport)); + } + udpstat.udps_noport++; + if (m->m_flags & (M_BCAST | M_MCAST)) { + udpstat.udps_noportbcast++; + goto bad; + } + *ip = save_ip; +#if ICMP_BANDLIM + if (badport_bandlim(0) < 0) + goto bad; +#endif + icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0); + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + return; + } +#if IPSEC + if (inp != NULL && ipsec4_in_reject_so(m, inp->inp_socket)) { + ipsecstat.in_polvio++; + goto bad; + } +#endif /*IPSEC*/ + + /* + * Construct sockaddr format source address. + * Stuff source address and datagram in user buffer. + */ + udp_in.sin_port = uh->uh_sport; + udp_in.sin_addr = ip->ip_src; + if (inp->inp_flags & INP_CONTROLOPTS + || inp->inp_socket->so_options & SO_TIMESTAMP) { +#if INET6 + if (inp->inp_vflag & INP_IPV6) { + int savedflags; + + ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip); + savedflags = inp->inp_flags; + inp->inp_flags &= ~INP_UNMAPPABLEOPTS; + ip6_savecontrol(inp, &udp_ip6.uip6_ip6, m, + &opts6, NULL); + + inp->inp_flags = savedflags; + } else +#endif + ip_savecontrol(inp, &opts, ip, m); + } + m_adj(m, iphlen + sizeof(struct udphdr)); + + KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport, + save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen); + +#if INET6 + if (inp->inp_vflag & INP_IPV6) { + in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin); + append_sa = (struct sockaddr *)&udp_in6; + opts = opts6.head; + } else +#endif + append_sa = (struct sockaddr *)&udp_in; + if (sbappendaddr(&inp->inp_socket->so_rcv, append_sa, m, opts) == 0) { + udpstat.udps_fullsock++; + goto bad; + } + sorwakeup(inp->inp_socket); + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); + return; +bad: + m_freem(m); + if (opts) + m_freem(opts); + KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0,0,0,0,0); +} + +#if INET6 +static void +ip_2_ip6_hdr(ip6, ip) + struct ip6_hdr *ip6; + struct ip *ip; +{ + bzero(ip6, sizeof(*ip6)); + + ip6->ip6_vfc = IPV6_VERSION; + ip6->ip6_plen = ip->ip_len; + ip6->ip6_nxt = ip->ip_p; + ip6->ip6_hlim = ip->ip_ttl; + ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] = + IPV6_ADDR_INT32_SMP; + ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr; + ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr; +} +#endif + +/* + * subroutine of udp_input(), mainly for source code readability. + * caller must properly init udp_ip6 and udp_in6 beforehand. + */ +static void +udp_append(last, ip, n, off) + struct inpcb *last; + struct ip *ip; + struct mbuf *n; +{ + struct sockaddr *append_sa; + struct mbuf *opts = 0; +#if INET6 + struct ip6_recvpktopts opts6; + bzero(&opts6, sizeof(opts6)); +#endif + + + if (last->inp_flags & INP_CONTROLOPTS || + last->inp_socket->so_options & SO_TIMESTAMP) { +#if INET6 + if (last->inp_vflag & INP_IPV6) { + int savedflags; + + if (udp_ip6.uip6_init_done == 0) { + ip_2_ip6_hdr(&udp_ip6.uip6_ip6, ip); + udp_ip6.uip6_init_done = 1; + } + savedflags = last->inp_flags; + last->inp_flags &= ~INP_UNMAPPABLEOPTS; + ip6_savecontrol(last, &udp_ip6.uip6_ip6, n, + &opts6, NULL); + last->inp_flags = savedflags; + } else +#endif + ip_savecontrol(last, &opts, ip, n); + } +#if INET6 + if (last->inp_vflag & INP_IPV6) { + if (udp_in6.uin6_init_done == 0) { + in6_sin_2_v4mapsin6(&udp_in, &udp_in6.uin6_sin); + udp_in6.uin6_init_done = 1; + } + append_sa = (struct sockaddr *)&udp_in6.uin6_sin; + opts = opts6.head; + } else +#endif + append_sa = (struct sockaddr *)&udp_in; + m_adj(n, off); + + if (sbappendaddr(&last->inp_socket->so_rcv, append_sa, n, opts) == 0) { + m_freem(n); + if (opts) + m_freem(opts); + udpstat.udps_fullsock++; + } else + sorwakeup(last->inp_socket); +} + + + +/* + * Notify a udp user of an asynchronous error; + * just wake up so that he can collect error status. + */ +void +udp_notify(inp, errno) + register struct inpcb *inp; + int errno; +{ + inp->inp_socket->so_error = errno; + sorwakeup(inp->inp_socket); + sowwakeup(inp->inp_socket); +} + +void +udp_ctlinput(cmd, sa, vip) + int cmd; + struct sockaddr *sa; + void *vip; +{ + register struct ip *ip = vip; + register struct udphdr *uh; + + if (!PRC_IS_REDIRECT(cmd) && + ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)) + return; + if (ip) { + uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2)); + in_pcbnotify(&udb, sa, uh->uh_dport, ip->ip_src, uh->uh_sport, + cmd, udp_notify); + } else + in_pcbnotify(&udb, sa, 0, zeroin_addr, 0, cmd, udp_notify); +} + + +static int +udp_pcblist SYSCTL_HANDLER_ARGS +{ + int error, i, n, s; + struct inpcb *inp, **inp_list; + inp_gen_t gencnt; + struct xinpgen xig; + + /* + * The process of preparing the TCB list is too time-consuming and + * resource-intensive to repeat twice on every request. + */ + if (req->oldptr == 0) { + n = udbinfo.ipi_count; + req->oldidx = 2 * (sizeof xig) + + (n + n/8) * sizeof(struct xinpcb); + return 0; + } + + if (req->newptr != 0) + return EPERM; + + /* + * OK, now we're committed to doing something. + */ + s = splnet(); + gencnt = udbinfo.ipi_gencnt; + n = udbinfo.ipi_count; + splx(s); + + xig.xig_len = sizeof xig; + xig.xig_count = n; + xig.xig_gen = gencnt; + xig.xig_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xig, sizeof xig); + if (error) + return error; + + inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); + if (inp_list == 0) { + return ENOMEM; + } + s = splnet(); + for (inp = udbinfo.listhead->lh_first, i = 0; inp && i < n; + inp = inp->inp_list.le_next) { + if (inp->inp_gencnt <= gencnt) + inp_list[i++] = inp; + } + splx(s); + n = i; + + error = 0; + for (i = 0; i < n; i++) { + inp = inp_list[i]; + if (inp->inp_gencnt <= gencnt) { + struct xinpcb xi; + xi.xi_len = sizeof xi; + /* XXX should avoid extra copy */ + bcopy(inp, &xi.xi_inp, sizeof *inp); + if (inp->inp_socket) + sotoxsocket(inp->inp_socket, &xi.xi_socket); + error = SYSCTL_OUT(req, &xi, sizeof xi); + } + } + if (!error) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + s = splnet(); + xig.xig_gen = udbinfo.ipi_gencnt; + xig.xig_sogen = so_gencnt; + xig.xig_count = udbinfo.ipi_count; + splx(s); + error = SYSCTL_OUT(req, &xig, sizeof xig); + } + FREE(inp_list, M_TEMP); + return error; +} + +SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, + udp_pcblist, "S,xinpcb", "List of active UDP sockets"); + + + +static int +udp_output(inp, m, addr, control, p) + register struct inpcb *inp; + register struct mbuf *m; + struct sockaddr *addr; + struct mbuf *control; + struct proc *p; +{ + register struct udpiphdr *ui; + register int len = m->m_pkthdr.len; + struct in_addr laddr; + int s = 0, error = 0; + + KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_START, 0,0,0,0,0); + + if (control) + m_freem(control); /* XXX */ + + KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport, + inp->inp_laddr.s_addr, inp->inp_faddr.s_addr, + (htons((u_short)len + sizeof (struct udphdr)))); + + if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { + error = EMSGSIZE; + goto release; + } + + if (addr) { + laddr = inp->inp_laddr; + if (inp->inp_faddr.s_addr != INADDR_ANY) { + error = EISCONN; + goto release; + } + /* + * Must block input while temporarily connected. + */ + s = splnet(); + error = in_pcbconnect(inp, addr, p); + if (error) { + splx(s); + goto release; + } + } else { + if (inp->inp_faddr.s_addr == INADDR_ANY) { + error = ENOTCONN; + goto release; + } + } + /* + * Calculate data length and get a mbuf + * for UDP and IP headers. + */ + M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT); + if (m == 0) { + error = ENOBUFS; + if (addr) + splx(s); + goto release; + } + + /* + * Fill in mbuf with extended UDP header + * and addresses and length put into network format. + */ + ui = mtod(m, struct udpiphdr *); + bzero(ui->ui_x1, sizeof(ui->ui_x1)); + ui->ui_pr = IPPROTO_UDP; + ui->ui_len = htons((u_short)len + sizeof (struct udphdr)); + ui->ui_src = inp->inp_laddr; + ui->ui_dst = inp->inp_faddr; + ui->ui_sport = inp->inp_lport; + ui->ui_dport = inp->inp_fport; + ui->ui_ulen = ui->ui_len; + + /* + * Stuff checksum and output datagram. + */ + ui->ui_sum = 0; + if (udpcksum) { + if ((ui->ui_sum = in_cksum(m, sizeof (struct udpiphdr) + len)) == 0) + ui->ui_sum = 0xffff; + } + ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len; + ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ + ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */ + udpstat.udps_opackets++; + + KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport, + ui->ui_src.s_addr, ui->ui_dst.s_addr, ui->ui_ulen); + + +#if IPSEC + ipsec_setsocket(m, inp->inp_socket); +#endif /*IPSEC*/ + + error = ip_output(m, inp->inp_options, &inp->inp_route, + inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST), + inp->inp_moptions); + + if (addr) { + in_pcbdisconnect(inp); + inp->inp_laddr = laddr; /* XXX rehash? */ + splx(s); + } + KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0); + return (error); + +release: + m_freem(m); + KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0,0,0,0); + return (error); +} + +u_long udp_sendspace = 9216; /* really max datagram size */ + /* 40 1K datagrams */ +SYSCTL_INT(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW, + &udp_sendspace, 0, ""); + + +u_long udp_recvspace = 40 * (1024 + /* 40 1K datagrams */ +#if INET6 + sizeof(struct sockaddr_in6) +#else /* INET6 */ + sizeof(struct sockaddr_in) +#endif /* INET6 */ + ); +SYSCTL_INT(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW, + &udp_recvspace, 0, ""); + +static int +udp_abort(struct socket *so) +{ + struct inpcb *inp; + int s; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; /* ??? possible? panic instead? */ + soisdisconnected(so); + s = splnet(); + in_pcbdetach(inp); + splx(s); + return 0; +} + +static int +udp_attach(struct socket *so, int proto, struct proc *p) +{ + struct inpcb *inp; + int error; long s; + + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { + error = soreserve(so, udp_sendspace, udp_recvspace); + if (error) + return error; + } + s = splnet(); + error = in_pcballoc(so, &udbinfo, p); + splx(s); + if (error) + return error; + error = soreserve(so, udp_sendspace, udp_recvspace); + if (error) + return error; + inp = (struct inpcb *)so->so_pcb; + inp->inp_vflag |= INP_IPV4; + inp->inp_ip_ttl = ip_defttl; +#if IPSEC + error = ipsec_init_policy(so, &inp->inp_sp); + if (error != 0) { + in_pcbdetach(inp); + return error; + } +#endif /*IPSEC*/ + return 0; +} + +static int +udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp; + int s, error; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + s = splnet(); + error = in_pcbbind(inp, nam, p); + splx(s); + return error; +} + +static int +udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp; + int s, error; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + if (inp->inp_faddr.s_addr != INADDR_ANY) + return EISCONN; + s = splnet(); + error = in_pcbconnect(inp, nam, p); + splx(s); + if (error == 0) + soisconnected(so); + return error; +} + +static int +udp_detach(struct socket *so) +{ + struct inpcb *inp; + int s; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + s = splnet(); + in_pcbdetach(inp); + splx(s); + return 0; +} + +static int +udp_disconnect(struct socket *so) +{ + struct inpcb *inp; + int s; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + if (inp->inp_faddr.s_addr == INADDR_ANY) + return ENOTCONN; + + s = splnet(); + in_pcbdisconnect(inp); + inp->inp_laddr.s_addr = INADDR_ANY; + splx(s); + so->so_state &= ~SS_ISCONNECTED; /* XXX */ + return 0; +} + +static int +udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, + struct mbuf *control, struct proc *p) +{ + struct inpcb *inp; + + inp = sotoinpcb(so); + if (inp == 0) { + m_freem(m); + return EINVAL; + } + return udp_output(inp, m, addr, control, p); +} + +int +udp_shutdown(struct socket *so) +{ + struct inpcb *inp; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + socantsendmore(so); + return 0; +} + +struct pr_usrreqs udp_usrreqs = { + udp_abort, pru_accept_notsupp, udp_attach, udp_bind, udp_connect, + pru_connect2_notsupp, in_control, udp_detach, udp_disconnect, + pru_listen_notsupp, in_setpeeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, udp_send, pru_sense_null, udp_shutdown, + in_setsockaddr, sosend, soreceive, sopoll +}; + diff --git a/bsd/netinet/udp_var.h b/bsd/netinet/udp_var.h new file mode 100644 index 000000000..97adb4508 --- /dev/null +++ b/bsd/netinet/udp_var.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)udp_var.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET_UDP_VAR_H_ +#define _NETINET_UDP_VAR_H_ + +#include + +/* + * UDP kernel structures and variables. + */ +struct udpiphdr { + struct ipovly ui_i; /* overlaid ip structure */ + struct udphdr ui_u; /* udp header */ +}; +#define ui_x1 ui_i.ih_x1 +#define ui_pr ui_i.ih_pr +#define ui_len ui_i.ih_len +#define ui_src ui_i.ih_src +#define ui_dst ui_i.ih_dst +#define ui_sport ui_u.uh_sport +#define ui_dport ui_u.uh_dport +#define ui_ulen ui_u.uh_ulen +#define ui_sum ui_u.uh_sum +#define ui_next ui_i.ih_next +#define ui_prev ui_i.ih_prev + +struct udpcb { + /* XXX - these should be by reference so we can do options quickly */ + struct ip udb_ip; + struct udphdr udb_uh; + struct sockaddr_in udb_conn; + struct in_hostcache *udb_hc; + struct mbuf *udb_queue; +}; +#define inptoudpcb(inp) ((struct udpdb *)(inp)->inp_ppcb) + +struct udpstat { + /* input statistics: */ + u_long udps_ipackets; /* total input packets */ + u_long udps_hdrops; /* packet shorter than header */ + u_long udps_badsum; /* checksum error */ + u_long udps_badlen; /* data length larger than packet */ + u_long udps_noport; /* no socket on port */ + u_long udps_noportbcast; /* of above, arrived as broadcast */ + u_long udps_fullsock; /* not delivered, input socket full */ + u_long udpps_pcbcachemiss; /* input packets missing pcb cache */ + u_long udpps_pcbhashmiss; /* input packets not for hashed pcb */ + /* output statistics: */ + u_long udps_opackets; /* total output packets */ + u_long udps_fastout; /* output packets on fast path */ +}; + +/* + * Names for UDP sysctl objects + */ +#define UDPCTL_CHECKSUM 1 /* checksum UDP packets */ +#define UDPCTL_STATS 2 /* statistics (read-only) */ +#define UDPCTL_MAXDGRAM 3 /* max datagram size */ +#define UDPCTL_RECVSPACE 4 /* default receive buffer space */ +#define UDPCTL_PCBLIST 5 /* list of PCBs for UDP sockets */ +#define UDPCTL_MAXID 6 + +#define UDPCTL_NAMES { \ + { 0, 0 }, \ + { "checksum", CTLTYPE_INT }, \ + { "stats", CTLTYPE_STRUCT }, \ + { "maxdgram", CTLTYPE_INT }, \ + { "recvspace", CTLTYPE_INT }, \ + { "pcblist", CTLTYPE_STRUCT }, \ +} + +#ifdef KERNEL +SYSCTL_DECL(_net_inet_udp); + +extern struct pr_usrreqs udp_usrreqs; +extern struct inpcbhead udb; +extern struct inpcbinfo udbinfo; +extern u_long udp_sendspace; +extern u_long udp_recvspace; +extern struct udpstat udpstat; +extern int log_in_vain; + +void udp_ctlinput __P((int, struct sockaddr *, void *)); +void udp_init __P((void)); +void udp_input __P((struct mbuf *, int)); + +void udp_notify __P((struct inpcb *inp, int errno)); +int udp_shutdown __P((struct socket *so)); +#endif + +#endif diff --git a/bsd/netinet6/Makefile b/bsd/netinet6/Makefile new file mode 100644 index 000000000..e70225fdf --- /dev/null +++ b/bsd/netinet6/Makefile @@ -0,0 +1,41 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + ah.h esp.h icmp6.h in6.h in6_gif.h in6_ifattach.h in6_pcb.h \ + in6_prefix.h in6_var.h ip6.h ip6_fw.h ip6_mroute.h ip6_var.h \ + ip6protosw.h ipcomp.h ipsec.h mip6.h mip6_common.h mld6_var.h \ + natpt_defs.h natpt_list.h natpt_log.h natpt_soctl.h natpt_var.h \ + nd6.h pim6.h pim6_var.h udp6.h udp6_var.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = netinet6 + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = netinet6 + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/netinet6/ah.h b/bsd/netinet6/ah.h new file mode 100644 index 000000000..a1e9667a0 --- /dev/null +++ b/bsd/netinet6/ah.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC1826/2402 authentication header. + */ + +#ifndef _NETINET6_AH_H_ +#define _NETINET6_AH_H_ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#if defined(_KERNEL) && !defined(_LKM) +#include "opt_inet.h" +#endif +#endif + +#include /* for struct secasvar */ + +struct ah { + u_int8_t ah_nxt; /* Next Header */ + u_int8_t ah_len; /* Length of data, in 32bit */ + u_int16_t ah_reserve; /* Reserved for future use */ + u_int32_t ah_spi; /* Security parameter index */ + /* variable size, 32bit bound*/ /* Authentication data */ +}; + +struct newah { + u_int8_t ah_nxt; /* Next Header */ + u_int8_t ah_len; /* Length of data + 1, in 32bit */ + u_int16_t ah_reserve; /* Reserved for future use */ + u_int32_t ah_spi; /* Security parameter index */ + u_int32_t ah_seq; /* Sequence number field */ + /* variable size, 32bit bound*/ /* Authentication data */ +}; + +struct ah_algorithm_state { + struct secasvar *sav; + void* foo; /*per algorithm data - maybe*/ +}; + +struct ah_algorithm { + int (*sumsiz) __P((struct secasvar *)); + int (*mature) __P((struct secasvar *)); + int keymin; /* in bits */ + int keymax; /* in bits */ + void (*init) __P((struct ah_algorithm_state *, struct secasvar *)); + void (*update) __P((struct ah_algorithm_state *, caddr_t, size_t)); + void (*result) __P((struct ah_algorithm_state *, caddr_t)); +}; + +#define AH_MAXSUMSIZE 16 + +#ifdef KERNEL +extern struct ah_algorithm ah_algorithms[]; + +struct inpcb; +#if INET6 +struct in6pcb; +#endif + +/* cksum routines */ +extern int ah_hdrlen __P((struct secasvar *)); + +extern size_t ah_hdrsiz __P((struct ipsecrequest *)); +extern void ah4_input __P((struct mbuf *, int)); +extern int ah4_output __P((struct mbuf *, struct ipsecrequest *)); +extern int ah4_calccksum __P((struct mbuf *, caddr_t, + struct ah_algorithm *, struct secasvar *)); + +#if INET6 +extern int ah6_input __P((struct mbuf **, int *, int)); +extern int ah6_output __P((struct mbuf *, u_char *, struct mbuf *, + struct ipsecrequest *)); +extern int ah6_calccksum __P((struct mbuf *, caddr_t, + struct ah_algorithm *, struct secasvar *)); +#endif /* INET6 */ + +#endif /*KERNEL*/ + +#endif /*_NETINET6_AH_H_*/ diff --git a/bsd/netinet6/ah_core.c b/bsd/netinet6/ah_core.c new file mode 100644 index 000000000..af0762142 --- /dev/null +++ b/bsd/netinet6/ah_core.c @@ -0,0 +1,1132 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC1826/2402 authentication header. + */ +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#if __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif +#endif + +/* Some of operating systems have standard crypto checksum library */ +#if __NetBSD__ +#define HAVE_MD5 +#define HAVE_SHA1 +#endif +#if defined(__FreeBSD__) || defined(__APPLE__) +#define HAVE_MD5 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#if INET6 +#include +#include +#include +#endif + +#include +#include +#if IPSEC_ESP +#include +#endif +#include +#include +#if HAVE_MD5 +#include +#else +#include +#endif +#if HAVE_SHA1 +#include +#define SHA1_RESULTLEN 20 +#else +#include +#endif + +#include + +#define HMACSIZE 16 + +static int ah_sumsiz_1216 __P((struct secasvar *)); +static int ah_sumsiz_zero __P((struct secasvar *)); +static int ah_none_mature __P((struct secasvar *)); +static void ah_none_init __P((struct ah_algorithm_state *, + struct secasvar *)); +static void ah_none_loop __P((struct ah_algorithm_state *, caddr_t, size_t)); +static void ah_none_result __P((struct ah_algorithm_state *, caddr_t)); +static int ah_keyed_md5_mature __P((struct secasvar *)); +static void ah_keyed_md5_init __P((struct ah_algorithm_state *, + struct secasvar *)); +static void ah_keyed_md5_loop __P((struct ah_algorithm_state *, caddr_t, + size_t)); +static void ah_keyed_md5_result __P((struct ah_algorithm_state *, caddr_t)); +static int ah_keyed_sha1_mature __P((struct secasvar *)); +static void ah_keyed_sha1_init __P((struct ah_algorithm_state *, + struct secasvar *)); +static void ah_keyed_sha1_loop __P((struct ah_algorithm_state *, caddr_t, + size_t)); +static void ah_keyed_sha1_result __P((struct ah_algorithm_state *, caddr_t)); +static int ah_hmac_md5_mature __P((struct secasvar *)); +static void ah_hmac_md5_init __P((struct ah_algorithm_state *, + struct secasvar *)); +static void ah_hmac_md5_loop __P((struct ah_algorithm_state *, caddr_t, + size_t)); +static void ah_hmac_md5_result __P((struct ah_algorithm_state *, caddr_t)); +static int ah_hmac_sha1_mature __P((struct secasvar *)); +static void ah_hmac_sha1_init __P((struct ah_algorithm_state *, + struct secasvar *)); +static void ah_hmac_sha1_loop __P((struct ah_algorithm_state *, caddr_t, + size_t)); +static void ah_hmac_sha1_result __P((struct ah_algorithm_state *, caddr_t)); + +static void ah_update_mbuf __P((struct mbuf *, int, int, struct ah_algorithm *, + struct ah_algorithm_state *)); + +/* checksum algorithms */ +/* NOTE: The order depends on SADB_AALG_x in net/pfkeyv2.h */ +struct ah_algorithm ah_algorithms[] = { + { 0, 0, 0, 0, 0, 0, }, + { ah_sumsiz_1216, ah_hmac_md5_mature, 128, 128, + ah_hmac_md5_init, ah_hmac_md5_loop, ah_hmac_md5_result, }, + { ah_sumsiz_1216, ah_hmac_sha1_mature, 160, 160, + ah_hmac_sha1_init, ah_hmac_sha1_loop, ah_hmac_sha1_result, }, + { ah_sumsiz_1216, ah_keyed_md5_mature, 128, 128, + ah_keyed_md5_init, ah_keyed_md5_loop, ah_keyed_md5_result, }, + { ah_sumsiz_1216, ah_keyed_sha1_mature, 160, 160, + ah_keyed_sha1_init, ah_keyed_sha1_loop, ah_keyed_sha1_result, }, + { ah_sumsiz_zero, ah_none_mature, 0, 2048, + ah_none_init, ah_none_loop, ah_none_result, }, +}; + +static int +ah_sumsiz_1216(sav) + struct secasvar *sav; +{ + if (!sav) + return -1; + if (sav->flags & SADB_X_EXT_OLD) + return 16; + else + return 12; +} + +static int +ah_sumsiz_zero(sav) + struct secasvar *sav; +{ + if (!sav) + return -1; + return 0; +} + +static int +ah_none_mature(sav) + struct secasvar *sav; +{ + if (sav->sah->saidx.proto == IPPROTO_AH) { + ipseclog((LOG_ERR, + "ah_none_mature: protocol and algorithm mismatch.\n")); + return 1; + } + return 0; +} + +static void +ah_none_init(state, sav) + struct ah_algorithm_state *state; + struct secasvar *sav; +{ + state->foo = NULL; +} + +static void +ah_none_loop(state, addr, len) + struct ah_algorithm_state *state; + caddr_t addr; + size_t len; +{ +} + +static void +ah_none_result(state, addr) + struct ah_algorithm_state *state; + caddr_t addr; +{ +} + +static int +ah_keyed_md5_mature(sav) + struct secasvar *sav; +{ + /* anything is okay */ + return 0; +} + +static void +ah_keyed_md5_init(state, sav) + struct ah_algorithm_state *state; + struct secasvar *sav; +{ + if (!state) + panic("ah_keyed_md5_init: what?"); + + state->sav = sav; + state->foo = (void *)_MALLOC(sizeof(MD5_CTX), M_TEMP, M_NOWAIT); + if (state->foo == NULL) + panic("ah_keyed_md5_init: what?"); + MD5Init((MD5_CTX *)state->foo); + if (state->sav) { + MD5Update((MD5_CTX *)state->foo, + (u_int8_t *)_KEYBUF(state->sav->key_auth), + (u_int)_KEYLEN(state->sav->key_auth)); + + { + /* + * Pad after the key. + * We cannot simply use md5_pad() since the function + * won't update the total length. + */ + size_t padlen; + size_t keybitlen; + u_int8_t buf[32]; + + if (_KEYLEN(state->sav->key_auth) < 56) + padlen = 64 - 8 - _KEYLEN(state->sav->key_auth); + else + padlen = 64 + 64 - 8 - _KEYLEN(state->sav->key_auth); + keybitlen = _KEYLEN(state->sav->key_auth); + keybitlen *= 8; + + buf[0] = 0x80; + MD5Update((MD5_CTX *)state->foo, &buf[0], 1); + padlen--; + + bzero(buf, sizeof(buf)); + while (sizeof(buf) < padlen) { + MD5Update((MD5_CTX *)state->foo, &buf[0], sizeof(buf)); + padlen -= sizeof(buf); + } + if (padlen) { + MD5Update((MD5_CTX *)state->foo, &buf[0], padlen); + } + + buf[0] = (keybitlen >> 0) & 0xff; + buf[1] = (keybitlen >> 8) & 0xff; + buf[2] = (keybitlen >> 16) & 0xff; + buf[3] = (keybitlen >> 24) & 0xff; + MD5Update((MD5_CTX *)state->foo, buf, 8); + } + } +} + +static void +ah_keyed_md5_loop(state, addr, len) + struct ah_algorithm_state *state; + caddr_t addr; + size_t len; +{ + if (!state) + panic("ah_keyed_md5_loop: what?"); + + MD5Update((MD5_CTX *)state->foo, addr, len); +} + +static void +ah_keyed_md5_result(state, addr) + struct ah_algorithm_state *state; + caddr_t addr; +{ + u_char digest[16]; + + if (!state) + panic("ah_keyed_md5_result: what?"); + + if (state->sav) { + MD5Update((MD5_CTX *)state->foo, + (u_int8_t *)_KEYBUF(state->sav->key_auth), + (u_int)_KEYLEN(state->sav->key_auth)); + } + MD5Final(&digest[0], (MD5_CTX *)state->foo); + _FREE(state->foo, M_TEMP); + bcopy(&digest[0], (void *)addr, sizeof(digest)); +} + +static int +ah_keyed_sha1_mature(sav) + struct secasvar *sav; +{ + struct ah_algorithm *algo; + + if (!sav->key_auth) { + ipseclog((LOG_ERR, "ah_keyed_sha1_mature: no key is given.\n")); + return 1; + } + algo = &ah_algorithms[sav->alg_auth]; + if (sav->key_auth->sadb_key_bits < algo->keymin + || algo->keymax < sav->key_auth->sadb_key_bits) { + ipseclog((LOG_ERR, + "ah_keyed_sha1_mature: invalid key length %d.\n", + sav->key_auth->sadb_key_bits)); + return 1; + } + + return 0; +} + +static void +ah_keyed_sha1_init(state, sav) + struct ah_algorithm_state *state; + struct secasvar *sav; +{ + SHA1_CTX *ctxt; + + if (!state) + panic("ah_keyed_sha1_init: what?"); + + state->sav = sav; + state->foo = (void *)_MALLOC(sizeof(SHA1_CTX), M_TEMP, M_NOWAIT); + if (!state->foo) + panic("ah_keyed_sha1_init: what?"); + + ctxt = (SHA1_CTX *)state->foo; + SHA1Init(ctxt); + + if (state->sav) { + SHA1Update(ctxt, (u_int8_t *)_KEYBUF(state->sav->key_auth), + (u_int)_KEYLEN(state->sav->key_auth)); + + { + /* + * Pad after the key. + */ + size_t padlen; + size_t keybitlen; + u_int8_t buf[32]; + + if (_KEYLEN(state->sav->key_auth) < 56) + padlen = 64 - 8 - _KEYLEN(state->sav->key_auth); + else + padlen = 64 + 64 - 8 - _KEYLEN(state->sav->key_auth); + keybitlen = _KEYLEN(state->sav->key_auth); + keybitlen *= 8; + + buf[0] = 0x80; + SHA1Update(ctxt, &buf[0], 1); + padlen--; + + bzero(buf, sizeof(buf)); + while (sizeof(buf) < padlen) { + SHA1Update(ctxt, &buf[0], sizeof(buf)); + padlen -= sizeof(buf); + } + if (padlen) { + SHA1Update(ctxt, &buf[0], padlen); + } + + buf[0] = (keybitlen >> 0) & 0xff; + buf[1] = (keybitlen >> 8) & 0xff; + buf[2] = (keybitlen >> 16) & 0xff; + buf[3] = (keybitlen >> 24) & 0xff; + SHA1Update(ctxt, buf, 8); + } + } +} + +static void +ah_keyed_sha1_loop(state, addr, len) + struct ah_algorithm_state *state; + caddr_t addr; + size_t len; +{ + SHA1_CTX *ctxt; + + if (!state || !state->foo) + panic("ah_keyed_sha1_loop: what?"); + ctxt = (SHA1_CTX *)state->foo; + + SHA1Update(ctxt, (caddr_t)addr, (size_t)len); +} + +static void +ah_keyed_sha1_result(state, addr) + struct ah_algorithm_state *state; + caddr_t addr; +{ + u_char digest[SHA1_RESULTLEN]; /* SHA-1 generates 160 bits */ + SHA1_CTX *ctxt; + + if (!state || !state->foo) + panic("ah_keyed_sha1_result: what?"); + ctxt = (SHA1_CTX *)state->foo; + + if (state->sav) { + SHA1Update(ctxt, (u_int8_t *)_KEYBUF(state->sav->key_auth), + (u_int)_KEYLEN(state->sav->key_auth)); + } + SHA1Final((caddr_t)&digest[0], ctxt); + bcopy(&digest[0], (void *)addr, HMACSIZE); + + _FREE(state->foo, M_TEMP); +} + +static int +ah_hmac_md5_mature(sav) + struct secasvar *sav; +{ + struct ah_algorithm *algo; + + if (!sav->key_auth) { + ipseclog((LOG_ERR, "ah_hmac_md5_mature: no key is given.\n")); + return 1; + } + algo = &ah_algorithms[sav->alg_auth]; + if (sav->key_auth->sadb_key_bits < algo->keymin + || algo->keymax < sav->key_auth->sadb_key_bits) { + ipseclog((LOG_ERR, + "ah_hmac_md5_mature: invalid key length %d.\n", + sav->key_auth->sadb_key_bits)); + return 1; + } + + return 0; +} + +static void +ah_hmac_md5_init(state, sav) + struct ah_algorithm_state *state; + struct secasvar *sav; +{ + u_char *ipad; + u_char *opad; + u_char tk[16]; + u_char *key; + size_t keylen; + size_t i; + MD5_CTX *ctxt; + + if (!state) + panic("ah_hmac_md5_init: what?"); + + state->sav = sav; + state->foo = (void *)_MALLOC(64 + 64 + sizeof(MD5_CTX), M_TEMP, M_NOWAIT); + if (!state->foo) + panic("ah_hmac_md5_init: what?"); + + ipad = (u_char *)state->foo; + opad = (u_char *)(ipad + 64); + ctxt = (MD5_CTX *)(opad + 64); + + /* compress the key if necessery */ + if (64 < _KEYLEN(state->sav->key_auth)) { + MD5Init(ctxt); + MD5Update(ctxt, _KEYBUF(state->sav->key_auth), + _KEYLEN(state->sav->key_auth)); + MD5Final(&tk[0], ctxt); + key = &tk[0]; + keylen = 16; + } else { + key = _KEYBUF(state->sav->key_auth); + keylen = _KEYLEN(state->sav->key_auth); + } + + bzero(ipad, 64); + bzero(opad, 64); + bcopy(key, ipad, keylen); + bcopy(key, opad, keylen); + for (i = 0; i < 64; i++) { + ipad[i] ^= 0x36; + opad[i] ^= 0x5c; + } + + MD5Init(ctxt); + MD5Update(ctxt, ipad, 64); +} + +static void +ah_hmac_md5_loop(state, addr, len) + struct ah_algorithm_state *state; + caddr_t addr; + size_t len; +{ + MD5_CTX *ctxt; + + if (!state || !state->foo) + panic("ah_hmac_md5_loop: what?"); + ctxt = (MD5_CTX *)(((caddr_t)state->foo) + 128); + MD5Update(ctxt, addr, len); +} + +static void +ah_hmac_md5_result(state, addr) + struct ah_algorithm_state *state; + caddr_t addr; +{ + u_char digest[16]; + u_char *ipad; + u_char *opad; + MD5_CTX *ctxt; + + if (!state || !state->foo) + panic("ah_hmac_md5_result: what?"); + + ipad = (u_char *)state->foo; + opad = (u_char *)(ipad + 64); + ctxt = (MD5_CTX *)(opad + 64); + + MD5Final(&digest[0], ctxt); + + MD5Init(ctxt); + MD5Update(ctxt, opad, 64); + MD5Update(ctxt, &digest[0], sizeof(digest)); + MD5Final(&digest[0], ctxt); + + bcopy(&digest[0], (void *)addr, HMACSIZE); + + _FREE(state->foo, M_TEMP); +} + +static int +ah_hmac_sha1_mature(sav) + struct secasvar *sav; +{ + struct ah_algorithm *algo; + + if (!sav->key_auth) { + ipseclog((LOG_ERR, "ah_hmac_sha1_mature: no key is given.\n")); + return 1; + } + algo = &ah_algorithms[sav->alg_auth]; + if (sav->key_auth->sadb_key_bits < algo->keymin + || algo->keymax < sav->key_auth->sadb_key_bits) { + ipseclog((LOG_ERR, + "ah_hmac_sha1_mature: invalid key length %d.\n", + sav->key_auth->sadb_key_bits)); + return 1; + } + + return 0; +} + +static void +ah_hmac_sha1_init(state, sav) + struct ah_algorithm_state *state; + struct secasvar *sav; +{ + u_char *ipad; + u_char *opad; + SHA1_CTX *ctxt; + u_char tk[SHA1_RESULTLEN]; /* SHA-1 generates 160 bits */ + u_char *key; + size_t keylen; + size_t i; + + if (!state) + panic("ah_hmac_sha1_init: what?"); + + state->sav = sav; + state->foo = (void *)_MALLOC(64 + 64 + sizeof(SHA1_CTX), + M_TEMP, M_NOWAIT); + if (!state->foo) + panic("ah_hmac_sha1_init: what?"); + + ipad = (u_char *)state->foo; + opad = (u_char *)(ipad + 64); + ctxt = (SHA1_CTX *)(opad + 64); + + /* compress the key if necessery */ + if (64 < _KEYLEN(state->sav->key_auth)) { + SHA1Init(ctxt); + SHA1Update(ctxt, _KEYBUF(state->sav->key_auth), + _KEYLEN(state->sav->key_auth)); + SHA1Final(&tk[0], ctxt); + key = &tk[0]; + keylen = SHA1_RESULTLEN; + } else { + key = _KEYBUF(state->sav->key_auth); + keylen = _KEYLEN(state->sav->key_auth); + } + + bzero(ipad, 64); + bzero(opad, 64); + bcopy(key, ipad, keylen); + bcopy(key, opad, keylen); + for (i = 0; i < 64; i++) { + ipad[i] ^= 0x36; + opad[i] ^= 0x5c; + } + + SHA1Init(ctxt); + SHA1Update(ctxt, ipad, 64); +} + +static void +ah_hmac_sha1_loop(state, addr, len) + struct ah_algorithm_state *state; + caddr_t addr; + size_t len; +{ + SHA1_CTX *ctxt; + + if (!state || !state->foo) + panic("ah_hmac_sha1_loop: what?"); + + ctxt = (SHA1_CTX *)(((u_char *)state->foo) + 128); + SHA1Update(ctxt, (caddr_t)addr, (size_t)len); +} + +static void +ah_hmac_sha1_result(state, addr) + struct ah_algorithm_state *state; + caddr_t addr; +{ + u_char digest[SHA1_RESULTLEN]; /* SHA-1 generates 160 bits */ + u_char *ipad; + u_char *opad; + SHA1_CTX *ctxt; + + if (!state || !state->foo) + panic("ah_hmac_sha1_result: what?"); + + ipad = (u_char *)state->foo; + opad = (u_char *)(ipad + 64); + ctxt = (SHA1_CTX *)(opad + 64); + + SHA1Final((caddr_t)&digest[0], ctxt); + + SHA1Init(ctxt); + SHA1Update(ctxt, opad, 64); + SHA1Update(ctxt, (caddr_t)&digest[0], sizeof(digest)); + SHA1Final((caddr_t)&digest[0], ctxt); + + bcopy(&digest[0], (void *)addr, HMACSIZE); + + _FREE(state->foo, M_TEMP); +} + +/*------------------------------------------------------------*/ + +/* + * go generate the checksum. + */ +static void +ah_update_mbuf(m, off, len, algo, algos) + struct mbuf *m; + int off; + int len; + struct ah_algorithm *algo; + struct ah_algorithm_state *algos; +{ + struct mbuf *n; + int tlen; + + /* easy case first */ + if (off + len <= m->m_len) { + (algo->update)(algos, mtod(m, caddr_t) + off, len); + return; + } + + for (n = m; n; n = n->m_next) { + if (off < n->m_len) + break; + + off -= n->m_len; + } + + if (!n) + panic("ah_update_mbuf: wrong offset specified"); + + for (/*nothing*/; n && len > 0; n = n->m_next) { + if (n->m_len == 0) + continue; + if (n->m_len - off < len) + tlen = n->m_len - off; + else + tlen = len; + + (algo->update)(algos, mtod(n, caddr_t) + off, tlen); + + len -= tlen; + off = 0; + } +} + +/* + * Go generate the checksum. This function won't modify the mbuf chain + * except AH itself. + * + * NOTE: the function does not free mbuf on failure. + * Don't use m_copy(), it will try to share cluster mbuf by using refcnt. + */ +int +ah4_calccksum(m, ahdat, algo, sav) + struct mbuf *m; + caddr_t ahdat; + struct ah_algorithm *algo; + struct secasvar *sav; +{ + int off; + int hdrtype; + size_t advancewidth; + struct ah_algorithm_state algos; + u_char sumbuf[AH_MAXSUMSIZE]; + int error = 0; + int ahseen; + struct mbuf *n = NULL; + + if ((m->m_flags & M_PKTHDR) == 0) + return EINVAL; + + ahseen = 0; + hdrtype = -1; /*dummy, it is called IPPROTO_IP*/ + + off = 0; + + (algo->init)(&algos, sav); + + advancewidth = 0; /*safety*/ + +again: + /* gory. */ + switch (hdrtype) { + case -1: /*first one only*/ + { + /* + * copy ip hdr, modify to fit the AH checksum rule, + * then take a checksum. + */ + struct ip iphdr; + size_t hlen; + + m_copydata(m, off, sizeof(iphdr), (caddr_t)&iphdr); +#ifdef _IP_VHL + hlen = IP_VHL_HL(iphdr.ip_vhl) << 2; +#else + hlen = iphdr.ip_hl << 2; +#endif + iphdr.ip_ttl = 0; + iphdr.ip_sum = htons(0); + if (ip4_ah_cleartos) + iphdr.ip_tos = 0; + iphdr.ip_off = htons(ntohs(iphdr.ip_off) & ip4_ah_offsetmask); + (algo->update)(&algos, (caddr_t)&iphdr, sizeof(struct ip)); + + if (hlen != sizeof(struct ip)) { + u_char *p; + int i, l, skip; + + if (hlen > MCLBYTES) { + error = EMSGSIZE; + goto fail; + } + MGET(n, M_DONTWAIT, MT_DATA); + if (n && hlen > MLEN) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + n = NULL; + } + } + if (n == NULL) { + error = ENOBUFS; + goto fail; + } + m_copydata(m, off, hlen, mtod(n, caddr_t)); + + /* + * IP options processing. + * See RFC2402 appendix A. + */ + p = mtod(n, u_char *); + i = sizeof(struct ip); + while (i < hlen) { + skip = 1; + switch (p[i + IPOPT_OPTVAL]) { + case IPOPT_EOL: + case IPOPT_NOP: + l = 1; + skip = 0; + break; + case IPOPT_SECURITY: /* 0x82 */ + case 0x85: /* Extended security */ + case 0x86: /* Commercial security */ + case 0x94: /* Router alert */ + case 0x95: /* RFC1770 */ + l = p[i + IPOPT_OLEN]; + skip = 0; + break; + default: + l = p[i + IPOPT_OLEN]; + skip = 1; + break; + } + if (l <= 0 || hlen - i < l) { + ipseclog((LOG_ERR, + "ah4_calccksum: invalid IP option " + "(type=%02x len=%02x)\n", + p[i + IPOPT_OPTVAL], + p[i + IPOPT_OLEN])); + m_free(n); + n = NULL; + error = EINVAL; + goto fail; + } + if (skip) + bzero(p + i, l); + if (p[i + IPOPT_OPTVAL] == IPOPT_EOL) + break; + i += l; + } + p = mtod(n, u_char *) + sizeof(struct ip); + (algo->update)(&algos, p, hlen - sizeof(struct ip)); + + m_free(n); + n = NULL; + } + + hdrtype = (iphdr.ip_p) & 0xff; + advancewidth = hlen; + break; + } + + case IPPROTO_AH: + { + struct ah ah; + int siz; + int hdrsiz; + int totlen; + + m_copydata(m, off, sizeof(ah), (caddr_t)&ah); + hdrsiz = (sav->flags & SADB_X_EXT_OLD) + ? sizeof(struct ah) + : sizeof(struct newah); + siz = (*algo->sumsiz)(sav); + totlen = (ah.ah_len + 2) << 2; + + /* + * special treatment is necessary for the first one, not others + */ + if (!ahseen) { + if (totlen > m->m_pkthdr.len - off || + totlen > MCLBYTES) { + error = EMSGSIZE; + goto fail; + } + MGET(n, M_DONTWAIT, MT_DATA); + if (n && totlen > MLEN) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + n = NULL; + } + } + if (n == NULL) { + error = ENOBUFS; + goto fail; + } + m_copydata(m, off, totlen, mtod(n, caddr_t)); + n->m_len = totlen; + bzero(mtod(n, caddr_t) + hdrsiz, siz); + (algo->update)(&algos, mtod(n, caddr_t), n->m_len); + m_free(n); + n = NULL; + } else + ah_update_mbuf(m, off, totlen, algo, &algos); + ahseen++; + + hdrtype = ah.ah_nxt; + advancewidth = totlen; + break; + } + + default: + ah_update_mbuf(m, off, m->m_pkthdr.len - off, algo, &algos); + advancewidth = m->m_pkthdr.len - off; + break; + } + + off += advancewidth; + if (off < m->m_pkthdr.len) + goto again; + + (algo->result)(&algos, &sumbuf[0]); + bcopy(&sumbuf[0], ahdat, (*algo->sumsiz)(sav)); + + if (n) + m_free(n); + return error; + +fail: + if (n) + m_free(n); + return error; +} + +#if INET6 +/* + * Go generate the checksum. This function won't modify the mbuf chain + * except AH itself. + * + * NOTE: the function does not free mbuf on failure. + * Don't use m_copy(), it will try to share cluster mbuf by using refcnt. + */ +int +ah6_calccksum(m, ahdat, algo, sav) + struct mbuf *m; + caddr_t ahdat; + struct ah_algorithm *algo; + struct secasvar *sav; +{ + int newoff, off; + int proto, nxt; + struct mbuf *n = NULL; + int error; + int ahseen; + struct ah_algorithm_state algos; + u_char sumbuf[AH_MAXSUMSIZE]; + + if ((m->m_flags & M_PKTHDR) == 0) + return EINVAL; + + (algo->init)(&algos, sav); + + off = 0; + proto = IPPROTO_IPV6; + nxt = -1; + ahseen = 0; + + again: + newoff = ip6_nexthdr(m, off, proto, &nxt); + if (newoff < 0) + newoff = m->m_pkthdr.len; + else if (newoff <= off) { + error = EINVAL; + goto fail; + } + + switch (proto) { + case IPPROTO_IPV6: + /* + * special treatment is necessary for the first one, not others + */ + if (off == 0) { + struct ip6_hdr ip6copy; + + if (newoff - off != sizeof(struct ip6_hdr)) { + error = EINVAL; + goto fail; + } + + m_copydata(m, off, newoff - off, (caddr_t)&ip6copy); + /* RFC2402 */ + ip6copy.ip6_flow = 0; + ip6copy.ip6_vfc &= ~IPV6_VERSION_MASK; + ip6copy.ip6_vfc |= IPV6_VERSION; + ip6copy.ip6_hlim = 0; + if (IN6_IS_ADDR_LINKLOCAL(&ip6copy.ip6_src)) + ip6copy.ip6_src.s6_addr16[1] = 0x0000; + if (IN6_IS_ADDR_LINKLOCAL(&ip6copy.ip6_dst)) + ip6copy.ip6_dst.s6_addr16[1] = 0x0000; + (algo->update)(&algos, (caddr_t)&ip6copy, + sizeof(struct ip6_hdr)); + } else { + newoff = m->m_pkthdr.len; + ah_update_mbuf(m, off, m->m_pkthdr.len - off, algo, + &algos); + } + break; + + case IPPROTO_AH: + { + int siz; + int hdrsiz; + + hdrsiz = (sav->flags & SADB_X_EXT_OLD) + ? sizeof(struct ah) + : sizeof(struct newah); + siz = (*algo->sumsiz)(sav); + + /* + * special treatment is necessary for the first one, not others + */ + if (!ahseen) { + if (newoff - off > MCLBYTES) { + error = EMSGSIZE; + goto fail; + } + MGET(n, M_DONTWAIT, MT_DATA); + if (n && newoff - off > MLEN) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + n = NULL; + } + } + if (n == NULL) { + error = ENOBUFS; + goto fail; + } + m_copydata(m, off, newoff - off, mtod(n, caddr_t)); + n->m_len = newoff - off; + bzero(mtod(n, caddr_t) + hdrsiz, siz); + (algo->update)(&algos, mtod(n, caddr_t), n->m_len); + m_free(n); + n = NULL; + } else + ah_update_mbuf(m, off, newoff - off, algo, &algos); + ahseen++; + break; + } + + case IPPROTO_HOPOPTS: + case IPPROTO_DSTOPTS: + { + struct ip6_ext *ip6e; + int hdrlen, optlen; + u_int8_t *p, *optend, *optp; + + if (newoff - off > MCLBYTES) { + error = EMSGSIZE; + goto fail; + } + MGET(n, M_DONTWAIT, MT_DATA); + if (n && newoff - off > MLEN) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + n = NULL; + } + } + if (n == NULL) { + error = ENOBUFS; + goto fail; + } + m_copydata(m, off, newoff - off, mtod(n, caddr_t)); + n->m_len = newoff - off; + + ip6e = mtod(n, struct ip6_ext *); + hdrlen = (ip6e->ip6e_len + 1) << 3; + if (newoff - off < hdrlen) { + error = EINVAL; + m_free(n); + n = NULL; + goto fail; + } + p = mtod(n, u_int8_t *); + optend = p + hdrlen; + + /* + * ICV calculation for the options header including all + * options. This part is a little tricky since there are + * two type of options; mutable and immutable. We try to + * null-out mutable ones here. + */ + optp = p + 2; + while (optp < optend) { + if (optp[0] == IP6OPT_PAD1) + optlen = 1; + else { + if (optp + 2 > optend) { + error = EINVAL; + m_free(n); + n = NULL; + goto fail; + } + optlen = optp[1] + 2; + + if (optp[0] & IP6OPT_MUTABLE) + bzero(optp + 2, optlen - 2); + } + + optp += optlen; + } + + (algo->update)(&algos, mtod(n, caddr_t), n->m_len); + m_free(n); + n = NULL; + break; + } + + case IPPROTO_ROUTING: + /* + * For an input packet, we can just calculate `as is'. + * For an output packet, we assume ip6_output have already + * made packet how it will be received at the final + * destination. + */ + /* FALLTHROUGH */ + + default: + ah_update_mbuf(m, off, newoff - off, algo, &algos); + break; + } + + if (newoff < m->m_pkthdr.len) { + proto = nxt; + off = newoff; + goto again; + } + + (algo->result)(&algos, &sumbuf[0]); + bcopy(&sumbuf[0], ahdat, (*algo->sumsiz)(sav)); + + /* just in case */ + if (n) + m_free(n); + return 0; +fail: + /* just in case */ + if (n) + m_free(n); + return error; +} +#endif diff --git a/bsd/netinet6/ah_input.c b/bsd/netinet6/ah_input.c new file mode 100644 index 000000000..0b76ad007 --- /dev/null +++ b/bsd/netinet6/ah_input.c @@ -0,0 +1,916 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC1826/2402 authentication header. + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if INET6 +#include +#include +#include +#endif + +#include +#include +#include +#include +#include + + +#include + +#define IPLEN_FLIPPED + +#if INET +extern struct protosw inetsw[]; +#if defined(__bsdi__) || defined(__NetBSD__) +extern u_char ip_protox[]; +#endif + +void +ah4_input(struct mbuf *m, int off) +{ + struct ip *ip; + struct ah *ah; + u_int32_t spi; + struct ah_algorithm *algo; + size_t siz; + size_t siz1; + u_char *cksum; + struct secasvar *sav = NULL; + u_int16_t nxt; + size_t hlen; + int s; + +#ifndef PULLDOWN_TEST + if (m->m_len < off + sizeof(struct newah)) { + m = m_pullup(m, off + sizeof(struct newah)); + if (!m) { + ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup;" + "dropping the packet for simplicity\n")); + ipsecstat.in_inval++; + goto fail; + } + } + + ip = mtod(m, struct ip *); + ah = (struct ah *)(((caddr_t)ip) + off); +#else + ip = mtod(m, struct ip *); + IP6_EXTHDR_GET(ah, struct ah *, m, off, sizeof(struct newah)); + if (ah == NULL) { + ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup;" + "dropping the packet for simplicity\n")); + ipsecstat.in_inval++; + goto fail; + } +#endif + nxt = ah->ah_nxt; +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + + /* find the sassoc. */ + spi = ah->ah_spi; + + if ((sav = key_allocsa(AF_INET, + (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, + IPPROTO_AH, spi)) == 0) { + ipseclog((LOG_WARNING, + "IPv4 AH input: no key association found for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsecstat.in_nosa++; + goto fail; + } + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ah4_input called to allocate SA:%p\n", sav)); + if (sav->state != SADB_SASTATE_MATURE + && sav->state != SADB_SASTATE_DYING) { + ipseclog((LOG_DEBUG, + "IPv4 AH input: non-mature/dying SA found for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsecstat.in_badspi++; + goto fail; + } + if (sav->alg_auth == SADB_AALG_NONE) { + ipseclog((LOG_DEBUG, "IPv4 AH input: " + "unspecified authentication algorithm for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsecstat.in_badspi++; + goto fail; + } + + algo = &ah_algorithms[sav->alg_auth]; + + siz = (*algo->sumsiz)(sav); + siz1 = ((siz + 3) & ~(4 - 1)); + + /* + * sanity checks for header, 1. + */ + { + int sizoff; + + sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; + + if ((ah->ah_len << 2) - sizoff != siz1) { + ipseclog((LOG_NOTICE, "sum length mismatch in IPv4 AH input " + "(%d should be %u): %s\n", + (ah->ah_len << 2) - sizoff, (unsigned int)siz1, + ipsec4_logpacketstr(ip, spi))); + ipsecstat.in_inval++; + goto fail; + } + +#ifndef PULLDOWN_TEST + if (m->m_len < off + sizeof(struct ah) + sizoff + siz1) { + m = m_pullup(m, off + sizeof(struct ah) + sizoff + siz1); + if (!m) { + ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); + ipsecstat.in_inval++; + goto fail; + } + + ip = mtod(m, struct ip *); + ah = (struct ah *)(((caddr_t)ip) + off); + } +#else + IP6_EXTHDR_GET(ah, struct ah *, m, off, + sizeof(struct ah) + sizoff + siz1); + if (ah == NULL) { + ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); + ipsecstat.in_inval++; + goto fail; + } +#endif + } + + /* + * check for sequence number. + */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { + if (ipsec_chkreplay(ntohl(((struct newah *)ah)->ah_seq), sav)) + ; /*okey*/ + else { + ipsecstat.in_ahreplay++; + ipseclog((LOG_WARNING, + "replay packet in IPv4 AH input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + goto fail; + } + } + + /* + * alright, it seems sane. now we are going to check the + * cryptographic checksum. + */ + cksum = _MALLOC(siz1, M_TEMP, M_NOWAIT); + if (!cksum) { + ipseclog((LOG_DEBUG, "IPv4 AH input: " + "couldn't alloc temporary region for cksum\n")); + ipsecstat.in_inval++; + goto fail; + } + + { +#if 1 + /* + * some of IP header fields are flipped to the host endian. + * convert them back to network endian. VERY stupid. + */ +#ifndef __NetBSD__ + ip->ip_len = htons(ip->ip_len + hlen); + ip->ip_id = htons(ip->ip_id); +#else + ip->ip_len = htons(ip->ip_len); +#endif + ip->ip_off = htons(ip->ip_off); +#endif + if (ah4_calccksum(m, (caddr_t)cksum, algo, sav)) { + _FREE(cksum, M_TEMP); + ipsecstat.in_inval++; + goto fail; + } + ipsecstat.in_ahhist[sav->alg_auth]++; +#if 1 + /* + * flip them back. + */ +#ifndef __NetBSD__ + ip->ip_len = ntohs(ip->ip_len) - hlen; + ip->ip_id = ntohs(ip->ip_id); +#else + ip->ip_len = ntohs(ip->ip_len); +#endif + ip->ip_off = ntohs(ip->ip_off); +#endif + } + + { + caddr_t sumpos = NULL; + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + sumpos = (caddr_t)(ah + 1); + } else { + /* RFC 2402 */ + sumpos = (caddr_t)(((struct newah *)ah) + 1); + } + + if (bcmp(sumpos, cksum, siz) != 0) { + ipseclog((LOG_WARNING, + "checksum mismatch in IPv4 AH input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + _FREE(cksum, M_TEMP); + ipsecstat.in_ahauthfail++; + goto fail; + } + } + + _FREE(cksum, M_TEMP); + + m->m_flags |= M_AUTHIPHDR; + m->m_flags |= M_AUTHIPDGM; + +#if 0 + /* + * looks okey, but we need more sanity check. + * XXX should elaborate. + */ + if (ah->ah_nxt == IPPROTO_IPIP || ah->ah_nxt == IPPROTO_IP) { + struct ip *nip; + size_t sizoff; + + sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; + + if (m->m_len < off + sizeof(struct ah) + sizoff + siz1 + hlen) { + m = m_pullup(m, off + sizeof(struct ah) + + sizoff + siz1 + hlen); + if (!m) { + ipseclog((LOG_DEBUG, + "IPv4 AH input: can't pullup\n")); + ipsecstat.in_inval++; + goto fail; + } + } + + nip = (struct ip *)((u_char *)(ah + 1) + sizoff + siz1); + if (nip->ip_src.s_addr != ip->ip_src.s_addr + || nip->ip_dst.s_addr != ip->ip_dst.s_addr) { + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; + } + } +#if INET6 + else if (ah->ah_nxt == IPPROTO_IPV6) { + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; + } +#endif /*INET6*/ +#endif /*0*/ + + if (m->m_flags & M_AUTHIPHDR + && m->m_flags & M_AUTHIPDGM) { +#if 0 + ipseclog((LOG_DEBUG, + "IPv4 AH input: authentication succeess\n")); +#endif + ipsecstat.in_ahauthsucc++; + } else { + ipseclog((LOG_WARNING, + "authentication failed in IPv4 AH input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_ahauthfail++; + goto fail; + } + + /* + * update sequence number. + */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { + if (ipsec_updatereplay(ntohl(((struct newah *)ah)->ah_seq), sav)) { + ipsecstat.in_ahreplay++; + goto fail; + } + } + + /* was it transmitted over the IPsec tunnel SA? */ + if (ipsec4_tunnel_validate(ip, nxt, sav) && nxt == IPPROTO_IPV4) { + /* + * strip off all the headers that precedes AH. + * IP xx AH IP' payload -> IP' payload + * + * XXX more sanity checks + * XXX relationship with gif? + */ + size_t stripsiz = 0; + u_int8_t tos; + + tos = ip->ip_tos; + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + stripsiz = sizeof(struct ah) + siz1; + } else { + /* RFC 2402 */ + stripsiz = sizeof(struct newah) + siz1; + } + m_adj(m, off + stripsiz); + if (m->m_len < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) { + ipsecstat.in_inval++; + goto fail; + } + } + ip = mtod(m, struct ip *); + /* ECN consideration. */ + ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos); + if (!key_checktunnelsanity(sav, AF_INET, + (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { + ipseclog((LOG_NOTICE, "ipsec tunnel address mismatch " + "in IPv4 AH input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_inval++; + goto fail; + } + +#if 0 /* XXX should we call ipfw rather than ipsec_in_reject? */ + /* drop it if it does not match the default policy */ + if (ipsec4_in_reject(m, NULL)) { + ipsecstat.in_polvio++; + goto fail; + } +#endif + +#if 1 + /* + * Should the inner packet be considered authentic? + * My current answer is: NO. + * + * host1 -- gw1 === gw2 -- host2 + * In this case, gw2 can trust the authenticity of the + * outer packet, but NOT inner. Packet may be altered + * between host1 and gw1. + * + * host1 -- gw1 === host2 + * This case falls into the same scenario as above. + * + * host1 === host2 + * This case is the only case when we may be able to leave + * M_AUTHIPHDR and M_AUTHIPDGM set. + * However, if host1 is wrongly configured, and allows + * attacker to inject some packet with src=host1 and + * dst=host2, you are in risk. + */ + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; +#endif + + key_sa_recordxfer(sav, m); + + s = splimp(); + if (IF_QFULL(&ipintrq)) { + ipsecstat.in_inval++; + goto fail; + } + IF_ENQUEUE(&ipintrq, m); + m = NULL; + schednetisr(NETISR_IP); /*can be skipped but to make sure*/ + splx(s); + nxt = IPPROTO_DONE; + } else { + /* + * strip off AH. + * We do deep-copy since KAME requires that + * the packet is placed in a single external mbuf. + */ + size_t stripsiz = 0; + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + stripsiz = sizeof(struct ah) + siz1; + } else { + /* RFC 2402 */ + stripsiz = sizeof(struct newah) + siz1; + } + + ip = mtod(m, struct ip *); +#ifndef PULLDOWN_TEST + ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off); + m->m_data += stripsiz; + m->m_len -= stripsiz; + m->m_pkthdr.len -= stripsiz; +#else + /* + * even in m_pulldown case, we need to strip off AH so that + * we can compute checksum for multiple AH correctly. + */ + if (m->m_len >= stripsiz + off) { + ovbcopy((caddr_t)ip, ((caddr_t)ip) + stripsiz, off); + m->m_data += stripsiz; + m->m_len -= stripsiz; + m->m_pkthdr.len -= stripsiz; + } else { + /* + * this comes with no copy if the boundary is on + * cluster + */ + struct mbuf *n; + + n = m_split(m, off, M_DONTWAIT); + if (n == NULL) { + /* m is retained by m_split */ + goto fail; + } + m_adj(n, stripsiz); + m_cat(m, n); + /* m_cat does not update m_pkthdr.len */ + m->m_pkthdr.len += n->m_pkthdr.len; + } +#endif + + if (m->m_len < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (m == NULL) { + ipsecstat.in_inval++; + goto fail; + } + } + ip = mtod(m, struct ip *); +#ifdef IPLEN_FLIPPED + ip->ip_len = ip->ip_len - stripsiz; +#else + ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz); +#endif + ip->ip_p = nxt; + /* forget about IP hdr checksum, the check has already been passed */ + + key_sa_recordxfer(sav, m); + + if (nxt != IPPROTO_DONE) + (*ip_protox[nxt]->pr_input)(m, off); + else + m_freem(m); + m = NULL; + } + + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ah4_input call free SA:%p\n", sav)); + key_freesav(sav); + } + ipsecstat.in_success++; + return; + +fail: + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ah4_input call free SA:%p\n", sav)); + key_freesav(sav); + } + if (m) + m_freem(m); + return; +} +#endif /* INET */ + +#if INET6 +int +ah6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + struct mbuf *m = *mp; + int off = *offp; + struct ip6_hdr *ip6; + struct ah *ah; + u_int32_t spi; + struct ah_algorithm *algo; + size_t siz; + size_t siz1; + u_char *cksum; + struct secasvar *sav = NULL; + u_int16_t nxt; + int s; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(struct ah), IPPROTO_DONE); + ah = (struct ah *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(ah, struct ah *, m, off, sizeof(struct newah)); + if (ah == NULL) { + ipseclog((LOG_DEBUG, "IPv6 AH input: can't pullup\n")); + ipsecstat.in_inval++; + return IPPROTO_DONE; + } +#endif + ip6 = mtod(m, struct ip6_hdr *); + nxt = ah->ah_nxt; + + /* find the sassoc. */ + spi = ah->ah_spi; + + if (ntohs(ip6->ip6_plen) == 0) { + ipseclog((LOG_ERR, "IPv6 AH input: " + "AH with IPv6 jumbogram is not supported.\n")); + ipsec6stat.in_inval++; + goto fail; + } + + if ((sav = key_allocsa(AF_INET6, + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, + IPPROTO_AH, spi)) == 0) { + ipseclog((LOG_WARNING, + "IPv6 AH input: no key association found for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsec6stat.in_nosa++; + goto fail; + } + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ah6_input called to allocate SA:%p\n", sav)); + if (sav->state != SADB_SASTATE_MATURE + && sav->state != SADB_SASTATE_DYING) { + ipseclog((LOG_DEBUG, + "IPv6 AH input: non-mature/dying SA found for spi %u; ", + (u_int32_t)ntohl(spi))); + ipsec6stat.in_badspi++; + goto fail; + } + if (sav->alg_auth == SADB_AALG_NONE) { + ipseclog((LOG_DEBUG, "IPv6 AH input: " + "unspecified authentication algorithm for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsec6stat.in_badspi++; + goto fail; + } + + algo = &ah_algorithms[sav->alg_auth]; + + siz = (*algo->sumsiz)(sav); + siz1 = ((siz + 3) & ~(4 - 1)); + + /* + * sanity checks for header, 1. + */ + { + int sizoff; + + sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; + + if ((ah->ah_len << 2) - sizoff != siz1) { + ipseclog((LOG_NOTICE, "sum length mismatch in IPv6 AH input " + "(%d should be %u): %s\n", + (ah->ah_len << 2) - sizoff, (unsigned int)siz1, + ipsec6_logpacketstr(ip6, spi))); + ipsec6stat.in_inval++; + goto fail; + } +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(struct ah) + sizoff + siz1, IPPROTO_DONE); +#else + IP6_EXTHDR_GET(ah, struct ah *, m, off, + sizeof(struct ah) + sizoff + siz1); + if (ah == NULL) { + ipseclog((LOG_NOTICE, "couldn't pullup gather IPv6 AH checksum part")); + ipsecstat.in_inval++; + m = NULL; + goto fail; + } +#endif + } + + /* + * check for sequence number. + */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { + if (ipsec_chkreplay(ntohl(((struct newah *)ah)->ah_seq), sav)) + ; /*okey*/ + else { + ipsec6stat.in_ahreplay++; + ipseclog((LOG_WARNING, + "replay packet in IPv6 AH input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), + ipsec_logsastr(sav))); + goto fail; + } + } + + /* + * alright, it seems sane. now we are going to check the + * cryptographic checksum. + */ + cksum = _MALLOC(siz1, M_TEMP, M_NOWAIT); + if (!cksum) { + ipseclog((LOG_DEBUG, "IPv6 AH input: " + "couldn't alloc temporary region for cksum\n")); + ipsec6stat.in_inval++; + goto fail; + } + + if (ah6_calccksum(m, (caddr_t)cksum, algo, sav)) { + _FREE(cksum, M_TEMP); + ipsec6stat.in_inval++; + goto fail; + } + ipsec6stat.in_ahhist[sav->alg_auth]++; + + { + caddr_t sumpos = NULL; + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + sumpos = (caddr_t)(ah + 1); + } else { + /* RFC 2402 */ + sumpos = (caddr_t)(((struct newah *)ah) + 1); + } + + if (bcmp(sumpos, cksum, siz) != 0) { + ipseclog((LOG_WARNING, + "checksum mismatch in IPv6 AH input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + _FREE(cksum, M_TEMP); + ipsec6stat.in_ahauthfail++; + goto fail; + } + } + + _FREE(cksum, M_TEMP); + + m->m_flags |= M_AUTHIPHDR; + m->m_flags |= M_AUTHIPDGM; + +#if 0 + /* + * looks okey, but we need more sanity check. + * XXX should elaborate. + */ + if (ah->ah_nxt == IPPROTO_IPV6) { + struct ip6_hdr *nip6; + size_t sizoff; + + sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; + + IP6_EXTHDR_CHECK(m, off, sizeof(struct ah) + sizoff + siz1 + + sizeof(struct ip6_hdr), IPPROTO_DONE); + + nip6 = (struct ip6_hdr *)((u_char *)(ah + 1) + sizoff + siz1); + if (!IN6_ARE_ADDR_EQUAL(&nip6->ip6_src, &ip6->ip6_src) + || !IN6_ARE_ADDR_EQUAL(&nip6->ip6_dst, &ip6->ip6_dst)) { + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; + } + } else if (ah->ah_nxt == IPPROTO_IPIP) { + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; + } else if (ah->ah_nxt == IPPROTO_IP) { + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; + } +#endif + + if (m->m_flags & M_AUTHIPHDR + && m->m_flags & M_AUTHIPDGM) { +#if 0 + ipseclog((LOG_DEBUG, + "IPv6 AH input: authentication succeess\n")); +#endif + ipsec6stat.in_ahauthsucc++; + } else { + ipseclog((LOG_WARNING, + "authentication failed in IPv6 AH input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + ipsec6stat.in_ahauthfail++; + goto fail; + } + + /* + * update sequence number. + */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { + if (ipsec_updatereplay(ntohl(((struct newah *)ah)->ah_seq), sav)) { + ipsec6stat.in_ahreplay++; + goto fail; + } + } + + /* was it transmitted over the IPsec tunnel SA? */ + if (ipsec6_tunnel_validate(ip6, nxt, sav) && nxt == IPPROTO_IPV6) { + /* + * strip off all the headers that precedes AH. + * IP6 xx AH IP6' payload -> IP6' payload + * + * XXX more sanity checks + * XXX relationship with gif? + */ + size_t stripsiz = 0; + u_int32_t flowinfo; /*net endian*/ + + flowinfo = ip6->ip6_flow; + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + stripsiz = sizeof(struct ah) + siz1; + } else { + /* RFC 2402 */ + stripsiz = sizeof(struct newah) + siz1; + } + m_adj(m, off + stripsiz); + if (m->m_len < sizeof(*ip6)) { + /* + * m_pullup is prohibited in KAME IPv6 input processing + * but there's no other way! + */ + m = m_pullup(m, sizeof(*ip6)); + if (!m) { + ipsec6stat.in_inval++; + goto fail; + } + } + ip6 = mtod(m, struct ip6_hdr *); + /* ECN consideration. */ + ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow); + if (!key_checktunnelsanity(sav, AF_INET6, + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) { + ipseclog((LOG_NOTICE, "ipsec tunnel address mismatch " + "in IPv6 AH input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), + ipsec_logsastr(sav))); + ipsec6stat.in_inval++; + goto fail; + } + +#if 0 /* XXX should we call ipfw rather than ipsec_in_reject? */ + /* drop it if it does not match the default policy */ + if (ipsec6_in_reject(m, NULL)) { + ipsec6stat.in_polvio++; + goto fail; + } +#endif + +#if 1 + /* + * should the inner packet be considered authentic? + * see comment in ah4_input(). + */ + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; +#endif + + key_sa_recordxfer(sav, m); + + s = splimp(); + if (IF_QFULL(&ip6intrq)) { + ipsec6stat.in_inval++; + goto fail; + } + IF_ENQUEUE(&ip6intrq, m); + m = NULL; + schednetisr(NETISR_IPV6); /*can be skipped but to make sure*/ + splx(s); + nxt = IPPROTO_DONE; + } else { + /* + * strip off AH. + * We do deep-copy since KAME requires that + * the packet is placed in a single mbuf. + */ + size_t stripsiz = 0; + char *prvnxtp; + + /* + * Copy the value of the next header field of AH to the + * next header field of the previous header. + * This is necessary because AH will be stripped off below. + */ + prvnxtp = ip6_get_prevhdr(m, off); /* XXX */ + *prvnxtp = nxt; + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + stripsiz = sizeof(struct ah) + siz1; + } else { + /* RFC 2402 */ + stripsiz = sizeof(struct newah) + siz1; + } + + ip6 = mtod(m, struct ip6_hdr *); +#ifndef PULLDOWN_TEST + ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off); + m->m_data += stripsiz; + m->m_len -= stripsiz; + m->m_pkthdr.len -= stripsiz; +#else + /* + * even in m_pulldown case, we need to strip off AH so that + * we can compute checksum for multiple AH correctly. + */ + if (m->m_len >= stripsiz + off) { + ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off); + m->m_data += stripsiz; + m->m_len -= stripsiz; + m->m_pkthdr.len -= stripsiz; + } else { + /* + * this comes with no copy if the boundary is on + * cluster + */ + struct mbuf *n; + + n = m_split(m, off, M_DONTWAIT); + if (n == NULL) { + /* m is retained by m_split */ + goto fail; + } + m_adj(n, stripsiz); + m_cat(m, n); + /* m_cat does not update m_pkthdr.len */ + m->m_pkthdr.len += n->m_pkthdr.len; + } +#endif + ip6 = mtod(m, struct ip6_hdr *); + /* XXX jumbogram */ + ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz); + + key_sa_recordxfer(sav, m); + } + + *offp = off; + *mp = m; + + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ah6_input call free SA:%p\n", sav)); + key_freesav(sav); + } + ipsec6stat.in_success++; + return nxt; + +fail: + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ah6_input call free SA:%p\n", sav)); + key_freesav(sav); + } + if (m) + m_freem(m); + return IPPROTO_DONE; +} +#endif /* INET6 */ diff --git a/bsd/netinet6/ah_output.c b/bsd/netinet6/ah_output.c new file mode 100644 index 000000000..e8c81b01f --- /dev/null +++ b/bsd/netinet6/ah_output.c @@ -0,0 +1,558 @@ +/* $KAME: ah_output.c,v 1.17 2000/03/09 08:54:48 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC1826/2402 authentication header. + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include + +#if INET6 +#include +#include +#include +#endif + +#include +#include +#include +#include +#include + +#include + +static struct in_addr *ah4_finaldst __P((struct mbuf *)); + +/* + * compute AH header size. + * transport mode only. for tunnel mode, we should implement + * virtual interface, and control MTU/MSS by the interface MTU. + */ +size_t +ah_hdrsiz(isr) + struct ipsecrequest *isr; +{ + struct ah_algorithm *algo; + size_t hdrsiz; + + /* sanity check */ + if (isr == NULL) + panic("ah_hdrsiz: NULL was passed.\n"); + + if (isr->saidx.proto != IPPROTO_AH) + panic("unsupported mode passed to ah_hdrsiz"); + + if (isr->sav == NULL) + goto estimate; + if (isr->sav->state != SADB_SASTATE_MATURE + && isr->sav->state != SADB_SASTATE_DYING) + goto estimate; + + /* we need transport mode AH. */ + algo = &ah_algorithms[isr->sav->alg_auth]; + if (!algo) + goto estimate; + + /* + * XXX + * right now we don't calcurate the padding size. simply + * treat the padding size as constant, for simplicity. + * + * XXX variable size padding support + */ + hdrsiz = (((*algo->sumsiz)(isr->sav) + 3) & ~(4 - 1)); + if (isr->sav->flags & SADB_X_EXT_OLD) + hdrsiz += sizeof(struct ah); + else + hdrsiz += sizeof(struct newah); + + return hdrsiz; + + estimate: + /* ASSUMING: + * sizeof(struct newah) > sizeof(struct ah). + * 16 = (16 + 3) & ~(4 - 1). + */ + return sizeof(struct newah) + 16; +} + +/* + * Modify the packet so that it includes the authentication data. + * The mbuf passed must start with IPv4 header. + * + * assumes that the first mbuf contains IPv4 header + option only. + * the function does not modify m. + */ +int +ah4_output(m, isr) + struct mbuf *m; + struct ipsecrequest *isr; +{ + struct secasvar *sav = isr->sav; + struct ah_algorithm *algo; + u_int32_t spi; + u_char *ahdrpos; + u_char *ahsumpos = NULL; + size_t hlen = 0; /*IP header+option in bytes*/ + size_t plen = 0; /*AH payload size in bytes*/ + size_t ahlen = 0; /*plen + sizeof(ah)*/ + struct ip *ip; + struct in_addr dst; + struct in_addr *finaldst; + int error; + + /* sanity checks */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { + struct ip *ip; + + ip = mtod(m, struct ip *); + ipseclog((LOG_DEBUG, "ah4_output: internal error: " + "sav->replay is null: %x->%x, SPI=%u\n", + (u_int32_t)ntohl(ip->ip_src.s_addr), + (u_int32_t)ntohl(ip->ip_dst.s_addr), + (u_int32_t)ntohl(sav->spi))); + ipsecstat.out_inval++; + m_freem(m); + return EINVAL; + } + + algo = &ah_algorithms[sav->alg_auth]; + spi = sav->spi; + + /* + * determine the size to grow. + */ + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ + ahlen = plen + sizeof(struct ah); + } else { + /* RFC 2402 */ + plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ + ahlen = plen + sizeof(struct newah); + } + + /* + * grow the mbuf to accomodate AH. + */ + ip = mtod(m, struct ip *); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + + if (m->m_len != hlen) + panic("ah4_output: assumption failed (first mbuf length)"); + if (M_LEADINGSPACE(m->m_next) < ahlen) { + struct mbuf *n; + MGET(n, M_DONTWAIT, MT_DATA); + if (!n) { + ipseclog((LOG_DEBUG, "ENOBUFS in ah4_output %d\n", + __LINE__)); + m_freem(m); + return ENOBUFS; + } + n->m_len = ahlen; + n->m_next = m->m_next; + m->m_next = n; + m->m_pkthdr.len += ahlen; + ahdrpos = mtod(n, u_char *); + } else { + m->m_next->m_len += ahlen; + m->m_next->m_data -= ahlen; + m->m_pkthdr.len += ahlen; + ahdrpos = mtod(m->m_next, u_char *); + } + + ip = mtod(m, struct ip *); /*just to be sure*/ + + /* + * initialize AH. + */ + if (sav->flags & SADB_X_EXT_OLD) { + struct ah *ahdr; + + ahdr = (struct ah *)ahdrpos; + ahsumpos = (u_char *)(ahdr + 1); + ahdr->ah_len = plen >> 2; + ahdr->ah_nxt = ip->ip_p; + ahdr->ah_reserve = htons(0); + ahdr->ah_spi = spi; + bzero(ahdr + 1, plen); + } else { + struct newah *ahdr; + + ahdr = (struct newah *)ahdrpos; + ahsumpos = (u_char *)(ahdr + 1); + ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ + ahdr->ah_nxt = ip->ip_p; + ahdr->ah_reserve = htons(0); + ahdr->ah_spi = spi; + if (sav->replay->count == ~0) { + if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) { + /* XXX Is it noisy ? */ + ipseclog((LOG_WARNING, + "replay counter overflowed. %s\n", + ipsec_logsastr(sav))); + ipsecstat.out_inval++; + m_freem(m); + return EINVAL; + } + } + sav->replay->count++; + /* + * XXX sequence number must not be cycled, if the SA is + * installed by IKE daemon. + */ + ahdr->ah_seq = htonl(sav->replay->count); + bzero(ahdr + 1, plen); + } + + /* + * modify IPv4 header. + */ + ip->ip_p = IPPROTO_AH; + if (ahlen < (IP_MAXPACKET - ntohs(ip->ip_len))) + ip->ip_len = htons(ntohs(ip->ip_len) + ahlen); + else { + ipseclog((LOG_ERR, "IPv4 AH output: size exceeds limit\n")); + ipsecstat.out_inval++; + m_freem(m); + return EMSGSIZE; + } + + /* + * If there is source routing option, update destination field in + * the IPv4 header to the final destination. + * Note that we do not need to update source routing option itself + * (as done in IPv4 AH processing -- see ip6_output()), since + * source routing option is not part of the ICV computation. + */ + finaldst = ah4_finaldst(m); + if (finaldst) { + dst.s_addr = ip->ip_dst.s_addr; + ip->ip_dst.s_addr = finaldst->s_addr; + } + + /* + * calcurate the checksum, based on security association + * and the algorithm specified. + */ + error = ah4_calccksum(m, (caddr_t)ahsumpos, algo, sav); + if (error) { + ipseclog((LOG_ERR, + "error after ah4_calccksum, called from ah4_output")); + m = NULL; + ipsecstat.out_inval++; + return error; + } + + if (finaldst) { + ip = mtod(m, struct ip *); /*just to make sure*/ + ip->ip_dst.s_addr = dst.s_addr; + } + ipsecstat.out_success++; + ipsecstat.out_ahhist[sav->alg_auth]++; + key_sa_recordxfer(sav, m); + + return 0; +} + +/* Calculate AH length */ +int +ah_hdrlen(sav) + struct secasvar *sav; +{ + struct ah_algorithm *algo; + int plen, ahlen; + + algo = &ah_algorithms[sav->alg_auth]; + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ + ahlen = plen + sizeof(struct ah); + } else { + /* RFC 2402 */ + plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ + ahlen = plen + sizeof(struct newah); + } + + return(ahlen); +} + +#if INET6 +/* + * Fill in the Authentication Header and calculate checksum. + */ +int +ah6_output(m, nexthdrp, md, isr) + struct mbuf *m; + u_char *nexthdrp; + struct mbuf *md; + struct ipsecrequest *isr; +{ + struct mbuf *mprev; + struct mbuf *mah; + struct secasvar *sav = isr->sav; + struct ah_algorithm *algo; + u_int32_t spi; + u_char *ahsumpos = NULL; + size_t plen; /*AH payload size in bytes*/ + int error = 0; + int ahlen; + struct ip6_hdr *ip6; + + if (m->m_len < sizeof(struct ip6_hdr)) { + ipseclog((LOG_DEBUG, "ah6_output: first mbuf too short\n")); + m_freem(m); + return EINVAL; + } + + ahlen = ah_hdrlen(sav); + if (ahlen == 0) + return 0; + + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) + ; + if (!mprev || mprev->m_next != md) { + ipseclog((LOG_DEBUG, "ah6_output: md is not in chain\n")); + m_freem(m); + return EINVAL; + } + + MGET(mah, M_DONTWAIT, MT_DATA); + if (!mah) { + m_freem(m); + return ENOBUFS; + } + if (ahlen > MLEN) { + MCLGET(mah, M_DONTWAIT); + if ((mah->m_flags & M_EXT) == 0) { + m_free(mah); + m_freem(m); + return ENOBUFS; + } + } + mah->m_len = ahlen; + mah->m_next = md; + mprev->m_next = mah; + m->m_pkthdr.len += ahlen; + + /* fix plen */ + if (m->m_pkthdr.len - sizeof(struct ip6_hdr) > IPV6_MAXPACKET) { + ipseclog((LOG_ERR, + "ip6_output: AH with IPv6 jumbogram is not supported\n")); + m_freem(m); + return EINVAL; + } + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); + + if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { + ipseclog((LOG_DEBUG, "ah6_output: internal error: " + "sav->replay is null: SPI=%u\n", + (u_int32_t)ntohl(sav->spi))); + ipsec6stat.out_inval++; + m_freem(m); + return EINVAL; + } + + algo = &ah_algorithms[sav->alg_auth]; + spi = sav->spi; + + /* + * initialize AH. + */ + if (sav->flags & SADB_X_EXT_OLD) { + struct ah *ahdr = mtod(mah, struct ah *); + + plen = mah->m_len - sizeof(struct ah); + ahsumpos = (u_char *)(ahdr + 1); + ahdr->ah_nxt = *nexthdrp; + *nexthdrp = IPPROTO_AH; + ahdr->ah_len = plen >> 2; + ahdr->ah_reserve = htons(0); + ahdr->ah_spi = spi; + bzero(ahdr + 1, plen); + } else { + struct newah *ahdr = mtod(mah, struct newah *); + + plen = mah->m_len - sizeof(struct newah); + ahsumpos = (u_char *)(ahdr + 1); + ahdr->ah_nxt = *nexthdrp; + *nexthdrp = IPPROTO_AH; + ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ + ahdr->ah_reserve = htons(0); + ahdr->ah_spi = spi; + if (sav->replay->count == ~0) { + if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) { + /* XXX Is it noisy ? */ + ipseclog((LOG_WARNING, + "replay counter overflowed. %s\n", + ipsec_logsastr(sav))); + ipsecstat.out_inval++; + m_freem(m); + return EINVAL; + } + } + sav->replay->count++; + /* + * XXX sequence number must not be cycled, if the SA is + * installed by IKE daemon. + */ + ahdr->ah_seq = htonl(sav->replay->count); + bzero(ahdr + 1, plen); + } + + /* + * calcurate the checksum, based on security association + * and the algorithm specified. + */ + error = ah6_calccksum(m, (caddr_t)ahsumpos, algo, sav); + if (error) { + ipsec6stat.out_inval++; + m_freem(m); + } else { + ipsec6stat.out_success++; + key_sa_recordxfer(sav, m); + } + ipsec6stat.out_ahhist[sav->alg_auth]++; + + return(error); +} +#endif + +/* + * Find the final destination if there is loose/strict source routing option. + * Returns NULL if there's no source routing options. + * Returns NULL on errors too. + * Note that this function will return a pointer INTO the given parameter, + * struct mbuf *m. + * The mbuf must be pulled up toward, at least, ip option part. + */ +static struct in_addr * +ah4_finaldst(m) + struct mbuf *m; +{ + struct ip *ip; + int optlen; + u_char *q; + int i; + int hlen; + + if (!m) + panic("ah4_finaldst: m == NULL"); + ip = mtod(m, struct ip *); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + + if (m->m_len < hlen) { + ipseclog((LOG_DEBUG, + "ah4_finaldst: parameter mbuf wrong (not pulled up)\n")); + return NULL; + } + + if (hlen == sizeof(struct ip)) + return NULL; + + optlen = hlen - sizeof(struct ip); + if (optlen < 0) { + ipseclog((LOG_DEBUG, "ah4_finaldst: wrong optlen %d\n", + optlen)); + return NULL; + } + + q = (u_char *)(ip + 1); + i = 0; + while (i < optlen) { + switch (q[i + IPOPT_OPTVAL]) { + case IPOPT_EOL: + i = optlen; /* bye */ + break; + case IPOPT_NOP: + i++; + break; + case IPOPT_LSRR: + case IPOPT_SSRR: + if (q[i + IPOPT_OLEN] <= 0 + || optlen - i < q[i + IPOPT_OLEN]) { + ipseclog((LOG_ERR, + "ip_finaldst: invalid IP option " + "(code=%02x len=%02x)\n", + q[i + IPOPT_OPTVAL], q[i + IPOPT_OLEN])); + return NULL; + } + i += q[i + IPOPT_OLEN] - sizeof(struct in_addr); + return (struct in_addr *)(q + i); + default: + if (q[i + IPOPT_OLEN] <= 0 + || optlen - i < q[i + IPOPT_OLEN]) { + ipseclog((LOG_ERR, + "ip_finaldst: invalid IP option " + "(code=%02x len=%02x)\n", + q[i + IPOPT_OPTVAL], q[i + IPOPT_OLEN])); + return NULL; + } + i += q[i + IPOPT_OLEN]; + break; + } + } + return NULL; +} diff --git a/bsd/netinet6/dest6.c b/bsd/netinet6/dest6.c new file mode 100644 index 000000000..eca5f3ca9 --- /dev/null +++ b/bsd/netinet6/dest6.c @@ -0,0 +1,154 @@ +/* $KAME: dest6.c,v 1.10 2000/02/28 16:18:11 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#if !(defined(__APPLE__)) +#include +#endif +#include + +#if MIP6 +int (*mip6_store_dstopt_pre_hook)(struct mbuf *m, u_int8_t *opt, + u_int8_t off, u_int8_t dstlen) = NULL; +int (*mip6_rec_ctrl_sig_hook)(struct mbuf *m, int off) = NULL; +#endif /* MIP6 */ + +/* + * Destination options header processing. + */ +int +dest6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + register struct mbuf *m = *mp; + int off = *offp, dstoptlen, optlen; + struct ip6_dest *dstopts; + u_int8_t *opt; + + /* validation of the length of the header */ +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(*dstopts), IPPROTO_DONE); + dstopts = (struct ip6_dest *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(dstopts, struct ip6_dest *, m, off, sizeof(*dstopts)); + if (dstopts == NULL) + return IPPROTO_DONE; +#endif + dstoptlen = (dstopts->ip6d_len + 1) << 3; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, dstoptlen, IPPROTO_DONE); + dstopts = (struct ip6_dest *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(dstopts, struct ip6_dest *, m, off, dstoptlen); + if (dstopts == NULL) + return IPPROTO_DONE; +#endif + off += dstoptlen; + dstoptlen -= sizeof(struct ip6_dest); + opt = (u_int8_t *)dstopts + sizeof(struct ip6_dest); + + /* search header for all options. */ + for (optlen = 0; dstoptlen > 0; dstoptlen -= optlen, opt += optlen) { + switch(*opt) { + case IP6OPT_PAD1: + optlen = 1; + break; + case IP6OPT_PADN: + if (dstoptlen < IP6OPT_MINLEN) { + ip6stat.ip6s_toosmall++; + goto bad; + } + optlen = *(opt + 1) + 2; + break; + +#if MIP6 + case IP6OPT_BINDING_UPDATE: + case IP6OPT_BINDING_ACK: + case IP6OPT_BINDING_REQ: + case IP6OPT_HOME_ADDRESS: + if (mip6_store_dstopt_pre_hook) { + if ((*mip6_store_dstopt_pre_hook)(m, opt, off, dstoptlen) != 0) + goto bad; + } + optlen = *(opt + 1) + 2; + break; +#endif /* MIP6 */ + + default: /* unknown option */ + if (dstoptlen < IP6OPT_MINLEN) { + ip6stat.ip6s_toosmall++; + goto bad; + } + if ((optlen = ip6_unknown_opt(opt, m, + opt-mtod(m, u_int8_t *))) == -1) + return(IPPROTO_DONE); + optlen += 2; + break; + } + } + +#if MIP6 + if (mip6_rec_ctrl_sig_hook) { + /* + * All Destinations options have been processed. Call MIPv6 to + * process stored options. + */ + if ((*mip6_rec_ctrl_sig_hook)(m, *offp) != 0) + return(IPPROTO_DONE); + } +#endif /* MIP6 */ + + *offp = off; + return(dstopts->ip6d_nxt); + + bad: + m_freem(m); + return(IPPROTO_DONE); +} diff --git a/bsd/netinet6/esp.h b/bsd/netinet6/esp.h new file mode 100644 index 000000000..8add062e1 --- /dev/null +++ b/bsd/netinet6/esp.h @@ -0,0 +1,106 @@ +/* $KAME: esp.h,v 1.5 2000/02/22 14:04:15 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC1827/2406 Encapsulated Security Payload. + */ + +#ifndef _NETINET6_ESP_H_ +#define _NETINET6_ESP_H_ + +#include /* for struct secas */ + +struct esp { + u_int32_t esp_spi; /* ESP */ + /*variable size, 32bit bound*/ /* Initialization Vector */ + /*variable size*/ /* Payload data */ + /*variable size*/ /* padding */ + /*8bit*/ /* pad size */ + /*8bit*/ /* next header */ + /*8bit*/ /* next header */ + /*variable size, 32bit bound*/ /* Authentication data (new IPsec) */ +}; + +struct newesp { + u_int32_t esp_spi; /* ESP */ + u_int32_t esp_seq; /* Sequence number */ + /*variable size*/ /* (IV and) Payload data */ + /*variable size*/ /* padding */ + /*8bit*/ /* pad size */ + /*8bit*/ /* next header */ + /*8bit*/ /* next header */ + /*variable size, 32bit bound*/ /* Authentication data */ +}; + +struct esptail { + u_int8_t esp_padlen; /* pad length */ + u_int8_t esp_nxt; /* Next header */ + /*variable size, 32bit bound*/ /* Authentication data (new IPsec)*/ +}; + +struct esp_algorithm_state { + struct secasvar *sav; + void* foo; /*per algorithm data - maybe*/ +}; + +/* XXX yet to be defined */ +struct esp_algorithm { + size_t padbound; /* pad boundary, in byte */ + int (*mature) __P((struct secasvar *)); + int keymin; /* in bits */ + int keymax; /* in bits */ + int (*ivlen) __P((struct secasvar *)); + int (*decrypt) __P((struct mbuf *, size_t, + struct secasvar *, struct esp_algorithm *, int)); + int (*encrypt) __P((struct mbuf *, size_t, size_t, + struct secasvar *, struct esp_algorithm *, int)); +}; + +#if KERNEL +extern struct esp_algorithm esp_algorithms[]; + +/* crypt routines */ +extern int esp4_output __P((struct mbuf *, struct ipsecrequest *)); +extern void esp4_input __P((struct mbuf *, int off)); +extern size_t esp_hdrsiz __P((struct ipsecrequest *)); + +#if INET6 +extern int esp6_output __P((struct mbuf *, u_char *, struct mbuf *, + struct ipsecrequest *)); +extern int esp6_input __P((struct mbuf **, int *, int)); +#endif /* INET6 */ +#endif /*KERNEL*/ + +struct secasvar; +extern int esp_auth __P((struct mbuf *, size_t, size_t, + struct secasvar *, u_char *)); + +#endif /*_NETINET6_ESP_H_*/ diff --git a/bsd/netinet6/esp_core.c b/bsd/netinet6/esp_core.c new file mode 100644 index 000000000..22ff54d80 --- /dev/null +++ b/bsd/netinet6/esp_core.c @@ -0,0 +1,1271 @@ +/* $KAME: esp_core.c,v 1.11 2000/02/22 14:04:15 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#if INET6 +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static int esp_null_mature __P((struct secasvar *)); +static int esp_null_ivlen __P((struct secasvar *)); +static int esp_null_decrypt __P((struct mbuf *, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_null_encrypt __P((struct mbuf *, size_t, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_descbc_mature __P((struct secasvar *)); +static int esp_descbc_ivlen __P((struct secasvar *)); +static int esp_descbc_decrypt __P((struct mbuf *, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_descbc_encrypt __P((struct mbuf *, size_t, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_cbc_mature __P((struct secasvar *)); +static int esp_blowfish_cbc_decrypt __P((struct mbuf *, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_blowfish_cbc_encrypt __P((struct mbuf *, size_t, + size_t, struct secasvar *, struct esp_algorithm *, int)); +static int esp_blowfish_cbc_ivlen __P((struct secasvar *)); +static int esp_cast128cbc_ivlen __P((struct secasvar *)); +static int esp_cast128cbc_decrypt __P((struct mbuf *, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_cast128cbc_encrypt __P((struct mbuf *, size_t, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_3descbc_ivlen __P((struct secasvar *)); +static int esp_3descbc_decrypt __P((struct mbuf *, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_3descbc_encrypt __P((struct mbuf *, size_t, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_rc5cbc_ivlen __P((struct secasvar *)); +static int esp_rc5cbc_decrypt __P((struct mbuf *, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static int esp_rc5cbc_encrypt __P((struct mbuf *, size_t, size_t, + struct secasvar *, struct esp_algorithm *, int)); +static void esp_increment_iv __P((struct secasvar *)); +static caddr_t mbuf_find_offset __P((struct mbuf *, size_t, size_t)); + +/* NOTE: The order depends on SADB_EALG_x in netkey/keyv2.h */ +struct esp_algorithm esp_algorithms[] = { + { 0, 0, 0, 0, 0, 0, 0, }, + { 8, esp_descbc_mature, 64, 64, + esp_descbc_ivlen, esp_descbc_decrypt, esp_descbc_encrypt, }, + { 8, esp_cbc_mature, 192, 192, + esp_3descbc_ivlen, esp_3descbc_decrypt, esp_3descbc_encrypt, }, + { 1, esp_null_mature, 0, 2048, + esp_null_ivlen, esp_null_decrypt, esp_null_encrypt, }, + { 8, esp_cbc_mature, 40, 448, + esp_blowfish_cbc_ivlen, esp_blowfish_cbc_decrypt, + esp_blowfish_cbc_encrypt, }, + { 8, esp_cbc_mature, 40, 128, + esp_cast128cbc_ivlen, esp_cast128cbc_decrypt, + esp_cast128cbc_encrypt, }, + { 8, esp_cbc_mature, 40, 2040, + esp_rc5cbc_ivlen, esp_rc5cbc_decrypt, esp_rc5cbc_encrypt, }, +}; + +/* + * mbuf assumption: foo_encrypt() assumes that IV part is placed in a single + * mbuf, not across multiple mbufs. + */ + +static int +esp_null_mature(sav) + struct secasvar *sav; +{ + /* anything is okay */ + return 0; +} + +static int +esp_null_ivlen(sav) + struct secasvar *sav; +{ + return 0; +} + +static int +esp_null_decrypt(m, off, sav, algo, ivlen) + struct mbuf *m; + size_t off; /* offset to ESP header */ + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + return 0; /* do nothing */ +} + +static int +esp_null_encrypt(m, off, plen, sav, algo, ivlen) + struct mbuf *m; + size_t off; /* offset to ESP header */ + size_t plen; /* payload length (to be encrypted) */ + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + return 0; /* do nothing */ +} + +static int +esp_descbc_mature(sav) + struct secasvar *sav; +{ + struct esp_algorithm *algo; + + if (!(sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_IV4B)) { + ipseclog((LOG_ERR, "esp_cbc_mature: " + "algorithm incompatible with 4 octets IV length\n")); + return 1; + } + + if (!sav->key_enc) { + ipseclog((LOG_ERR, "esp_descbc_mature: no key is given.\n")); + return 1; + } + algo = &esp_algorithms[sav->alg_enc]; + if (_KEYBITS(sav->key_enc) < algo->keymin + || algo->keymax < _KEYBITS(sav->key_enc)) { + ipseclog((LOG_ERR, + "esp_descbc_mature: invalid key length %d.\n", + _KEYBITS(sav->key_enc))); + return 1; + } + + /* weak key check */ + if (des_is_weak_key((C_Block *)_KEYBUF(sav->key_enc))) { + ipseclog((LOG_ERR, + "esp_descbc_mature: weak key was passed.\n")); + return 1; + } + + return 0; +} + +static int +esp_descbc_ivlen(sav) + struct secasvar *sav; +{ + if (sav && (sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_IV4B)) + return 4; + + if (sav && !(sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_DERIV)) + return 4; + else + return 8; +} + +static int +esp_descbc_decrypt(m, off, sav, algo, ivlen) + struct mbuf *m; + size_t off; /* offset to ESP header */ + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff = 0; + size_t bodyoff = 0; + u_int8_t *iv; + size_t plen; + u_int8_t tiv[8]; + int derived; + + derived = 0; + /* sanity check */ + if (ivlen != sav->ivlen) { + ipseclog((LOG_ERR, "esp_descbc_decrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || algo->keymax < _KEYBITS(sav->key_enc)) { + ipseclog((LOG_ERR, "esp_descbc_decrypt: bad keylen %d\n", + _KEYBITS(sav->key_enc))); + return EINVAL; + } + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + ivoff = off + sizeof(struct esp); + bodyoff = off + sizeof(struct esp) + ivlen; + derived = 0; + } else { + /* RFC 2406 */ + if (sav->flags & SADB_X_EXT_DERIV) { + /* + * draft-ietf-ipsec-ciph-des-derived-00.txt + * uses sequence number field as IV field. + * This draft has been deleted, but you can get from + * ftp://ftp.kame.net/pub/internet-drafts/. + */ + ivoff = off + sizeof(struct esp); + bodyoff = off + sizeof(struct esp) + sizeof(u_int32_t); + ivlen = sizeof(u_int32_t); + derived = 1; + } else { + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + derived = 0; + } + } + if (ivlen == 4) { + iv = &tiv[0]; + m_copydata(m, ivoff, 4, &tiv[0]); + m_copydata(m, ivoff, 4, &tiv[4]); + tiv[4] ^= 0xff; + tiv[5] ^= 0xff; + tiv[6] ^= 0xff; + tiv[7] ^= 0xff; + } else if (ivlen == 8) { + iv = &tiv[0]; + m_copydata(m, ivoff, 8, &tiv[0]); + } else { + ipseclog((LOG_ERR, "esp_descbc_decrypt: unsupported ivlen %d\n", + ivlen)); + return EINVAL; + } + + plen = m->m_pkthdr.len; + if (plen < bodyoff) + panic("esp_descbc_decrypt: too short packet: len=%lu", + (u_long)plen); + plen -= bodyoff; + + if (plen % 8) { + ipseclog((LOG_ERR, "esp_descbc_decrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + + { + int deserr; + des_key_schedule ks; + + deserr = des_key_sched((C_Block *)_KEYBUF(sav->key_enc), ks); + if (deserr != 0) { + ipseclog((LOG_ERR, + "esp_descbc_decrypt: key error %d\n", deserr)); + return EINVAL; + } + + des_cbc_encrypt(m, bodyoff, plen, ks, (C_Block *)iv, DES_DECRYPT); + + /* for safety */ + bzero(&ks, sizeof(des_key_schedule)); + } + + /* for safety */ + bzero(&tiv[0], sizeof(tiv)); + + return 0; +} + +static int +esp_descbc_encrypt(m, off, plen, sav, algo, ivlen) + struct mbuf *m; + size_t off; /* offset to ESP header */ + size_t plen; /* payload length (to be decrypted) */ + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff = 0; + size_t bodyoff = 0; + u_int8_t *iv; + u_int8_t tiv[8]; + int derived; + + derived = 0; + + /* sanity check */ + if (plen % 8) { + ipseclog((LOG_ERR, "esp_descbc_encrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + if (sav->ivlen != ivlen) { + ipseclog((LOG_ERR, "esp_descbc_encrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || algo->keymax < _KEYBITS(sav->key_enc)) { + ipseclog((LOG_ERR, "esp_descbc_encrypt: bad keylen %d\n", + _KEYBITS(sav->key_enc))); + return EINVAL; + } + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + /* + * draft-ietf-ipsec-ciph-des-derived-00.txt + * uses sequence number field as IV field. + * This draft has been deleted, see above. + */ + ivoff = off + sizeof(struct esp); + bodyoff = off + sizeof(struct esp) + ivlen; + derived = 0; + } else { + /* RFC 2406 */ + if (sav->flags & SADB_X_EXT_DERIV) { + /* + * draft-ietf-ipsec-ciph-des-derived-00.txt + * uses sequence number field as IV field. + * This draft has been deleted, see above. + */ + ivoff = off + sizeof(struct esp); + bodyoff = off + sizeof(struct esp) + sizeof(u_int32_t); + ivlen = sizeof(u_int32_t); + derived = 1; + } else { + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + derived = 0; + } + } + + if (m->m_pkthdr.len < bodyoff) + panic("assumption failed: mbuf too short"); + iv = mbuf_find_offset(m, ivoff, ivlen); + if (!iv) + panic("assumption failed: bad mbuf chain"); + if (ivlen == 4) { + if (!derived) { + bcopy(sav->iv, &tiv[0], 4); + bcopy(sav->iv, &tiv[4], 4); + tiv[4] ^= 0xff; + tiv[5] ^= 0xff; + tiv[6] ^= 0xff; + tiv[7] ^= 0xff; + bcopy(&tiv[0], iv, 4); + iv = &tiv[0]; + } else { + bcopy(iv, &tiv[0], 4); + bcopy(iv, &tiv[4], 4); + tiv[4] ^= 0xff; + tiv[5] ^= 0xff; + tiv[6] ^= 0xff; + tiv[7] ^= 0xff; + iv = &tiv[0]; + } + } else if (ivlen == 8) + bcopy((caddr_t)sav->iv, (caddr_t)iv, ivlen); + else { + ipseclog((LOG_ERR, + "esp_descbc_encrypt: unsupported ivlen %d\n", ivlen)); + return EINVAL; + } + + { + int deserr; + des_key_schedule ks; + + deserr = des_key_sched((C_Block *)_KEYBUF(sav->key_enc), ks); + if (deserr != 0) { + ipseclog((LOG_ERR, + "esp_descbc_encrypt: key error %d\n", deserr)); + return EINVAL; + } + + des_cbc_encrypt(m, bodyoff, plen, ks, (C_Block *)iv, DES_ENCRYPT); + + /* for safety */ + bzero(&ks, sizeof(des_key_schedule)); + } + + esp_increment_iv(sav); + + /* for safety */ + bzero(&tiv[0], sizeof(tiv)); + + return 0; +} + +static int +esp_cbc_mature(sav) + struct secasvar *sav; +{ + int keylen; + struct esp_algorithm *algo; + + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_cbc_mature: algorithm incompatible with esp-old\n")); + return 1; + } + if (sav->flags & SADB_X_EXT_DERIV) { + ipseclog((LOG_ERR, + "esp_cbc_mature: algorithm incompatible with derived\n")); + return 1; + } + + if (!sav->key_enc) { + ipseclog((LOG_ERR, + "esp_cbc_mature: no key is given.\n")); + return 1; + } + algo = &esp_algorithms[sav->alg_enc]; + keylen = sav->key_enc->sadb_key_bits; + if (keylen < algo->keymin || algo->keymax < keylen) { + ipseclog((LOG_ERR, "esp_cbc_mature: invalid key length %d.\n", + sav->key_enc->sadb_key_bits)); + return 1; + } + switch (sav->alg_enc) { + case SADB_EALG_3DESCBC: + /* weak key check */ + if (des_is_weak_key((C_Block *)_KEYBUF(sav->key_enc)) + || des_is_weak_key((C_Block *)(_KEYBUF(sav->key_enc) + 8)) + || des_is_weak_key((C_Block *)(_KEYBUF(sav->key_enc) + 16))) { + ipseclog((LOG_ERR, + "esp_cbc_mature: weak key was passed.\n")); + return 1; + } + break; + case SADB_EALG_BLOWFISHCBC: + case SADB_EALG_CAST128CBC: + case SADB_EALG_RC5CBC: + break; + } + + return 0; +} + +static int +esp_blowfish_cbc_decrypt(m, off, sav, algo, ivlen) + struct mbuf *m; + size_t off; /* offset to ESP header */ + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t *iv; + u_int8_t tiv[8]; + size_t plen; + static BF_KEY key; /* made static to avoid kernel stack overflow */ + int s; + + /* sanity check */ + if (sav->ivlen != ivlen) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_decrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || algo->keymax < _KEYBITS(sav->key_enc)) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_decrypt: unsupported key length %d: " + "need %d to %d bits\n", _KEYBITS(sav->key_enc), + algo->keymin, algo->keymax)); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_decrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_decrypt: unsupported ivlen %d\n", ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + iv = &tiv[0]; + m_copydata(m, ivoff, 8, &tiv[0]); + + plen = m->m_pkthdr.len; + if (plen < bodyoff) + panic("esp_blowfish_cbc_decrypt: too short packet: len=%lu", + (u_long)plen); + plen -= bodyoff; + + if (plen % 8) { + ipseclog((LOG_ERR, "esp_blowfish_cbc_decrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + +#if __NetBSD__ + s = splsoftnet(); /* XXX correct? */ +#else + s = splnet(); /* XXX correct? */ +#endif + + BF_set_key(&key, _KEYBITS(sav->key_enc) / 8, _KEYBUF(sav->key_enc)); + BF_cbc_encrypt_m(m, bodyoff, plen, &key, iv, BF_DECRYPT); + + /* for safety */ + bzero(&key, sizeof(BF_KEY)); + + splx(s); + + /* for safety */ + bzero(&tiv[0], sizeof(tiv)); + + return 0; +} + +static int +esp_blowfish_cbc_encrypt(m, off, plen, sav, algo, ivlen) + struct mbuf *m; + size_t off; /* offset to ESP header */ + size_t plen; /* payload length (to be decrypted) */ + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t *iv; + static BF_KEY key; /* made static to avoid kernel stack overflow */ + int s; + + /* sanity check */ + if (plen % 8) { + ipseclog((LOG_ERR, "esp_blowfish_cbc_encrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + if (sav->ivlen != ivlen) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_encrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || algo->keymax < _KEYBITS(sav->key_enc)) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_encrypt: unsupported key length %d: " + "need %d to %d bits\n", _KEYBITS(sav->key_enc), + algo->keymin, algo->keymax)); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_encrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, + "esp_blowfish_cbc_encrypt: unsupported ivlen %d\n", ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + + if (m->m_pkthdr.len < bodyoff) + panic("assumption failed: mbuf too short"); + iv = mbuf_find_offset(m, ivoff, ivlen); + if (!iv) + panic("assumption failed: bad mbuf chain"); + + bcopy((caddr_t)sav->iv, (caddr_t)iv, ivlen); + +#if __NetBSD__ + s = splsoftnet(); /* XXX correct? */ +#else + s = splnet(); /* XXX correct? */ +#endif + + BF_set_key(&key, _KEYBITS(sav->key_enc) / 8, _KEYBUF(sav->key_enc)); + BF_cbc_encrypt_m(m, bodyoff, plen, &key, iv, BF_ENCRYPT); + + /* for safety */ + bzero(&key, sizeof(BF_KEY)); + + splx(s); + + esp_increment_iv(sav); + + return 0; +} + +static int +esp_blowfish_cbc_ivlen(sav) + struct secasvar *sav; +{ + return 8; +} + +static int +esp_cast128cbc_ivlen(sav) + struct secasvar *sav; +{ + return 8; +} + +static int +esp_cast128cbc_decrypt(m, off, sav, algo, ivlen) + struct mbuf *m; + size_t off; + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t iv[8]; + size_t plen; + + /* sanity check */ + if (ivlen != sav->ivlen) { + ipseclog((LOG_ERR, "esp_cast128cbc_decrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || _KEYBITS(sav->key_enc) > algo->keymax) { + ipseclog((LOG_ERR, + "esp_cast128cbc_decrypt: unsupported key length %d: " + "need %d to %d bits\n", _KEYBITS(sav->key_enc), + algo->keymin, algo->keymax)); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_cast128cbc_decrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, + "esp_cast128cbc_decrypt: unsupported ivlen %d\n", ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + + /* copy mbuf's IV into iv */ + m_copydata(m, ivoff, 8, iv); + + plen = m->m_pkthdr.len; + if (plen < bodyoff) { + panic("esp_cast128cbc_decrypt: too short packet: len=%lu\n", + (u_long)plen); + } + plen -= bodyoff; + + if (plen % 8) { + ipseclog((LOG_ERR, "esp_cast128cbc_decrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + + /* decrypt */ + { + u_int8_t key[16]; + u_int32_t subkey[32]; + + bzero(key, sizeof(key)); + bcopy(_KEYBUF(sav->key_enc), key, _KEYLEN(sav->key_enc)); + + set_cast128_subkey(subkey, key); + cast128_cbc_process(m, bodyoff, plen, subkey, iv, + _KEYBITS(sav->key_enc) / 8, CAST128_DECRYPT); + + /* for safety */ + bzero(subkey, sizeof(subkey)); + bzero(key, sizeof(key)); + } + + return 0; +} + +static int +esp_cast128cbc_encrypt(m, off, plen, sav, algo, ivlen) + struct mbuf *m; + size_t off; + size_t plen; + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t *iv; + + /* sanity check */ + if (plen % 8) { + ipseclog((LOG_ERR, "esp_cast128cbc_encrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + if (sav->ivlen != ivlen) { + ipseclog((LOG_ERR, "esp_cast128cbc_encrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || _KEYBITS(sav->key_enc) > algo->keymax) { + ipseclog((LOG_ERR, + "esp_cast128cbc_encrypt: unsupported key length %d: " + "needs %d to %d bits\n", _KEYBITS(sav->key_enc), + algo->keymin, algo->keymax)); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_cast128cbc_encrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, + "esp_cast128cbc_encrypt: unsupported ivlen %d\n", ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + + if (m->m_pkthdr.len < bodyoff) + panic("assumption failed: mbuf too short"); + iv = mbuf_find_offset(m, ivoff, ivlen); + if (!iv) + panic("assumption failed: bad mbuf chain"); + + bcopy(sav->iv, iv, ivlen); + + /* encrypt */ + { + u_int8_t key[16]; + u_int32_t subkey[32]; + + bzero(key, sizeof(key)); + bcopy(_KEYBUF(sav->key_enc), key, _KEYLEN(sav->key_enc)); + + set_cast128_subkey(subkey, key); + cast128_cbc_process(m, bodyoff, plen, subkey, iv, + _KEYBITS(sav->key_enc) / 8, CAST128_ENCRYPT); + + /* for safety */ + bzero(subkey, sizeof(subkey)); + bzero(key, sizeof(key)); + } + + esp_increment_iv(sav); + + return 0; +} + +static int +esp_3descbc_ivlen(sav) + struct secasvar *sav; +{ + return 8; +} + +static int +esp_3descbc_decrypt(m, off, sav, algo, ivlen) + struct mbuf *m; + size_t off; + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t *iv; + size_t plen; + u_int8_t tiv[8]; + + /* sanity check */ + if (ivlen != sav->ivlen) { + ipseclog((LOG_ERR, "esp_3descbc_decrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || algo->keymax < _KEYBITS(sav->key_enc)) { + ipseclog((LOG_ERR, "esp_3descbc_decrypt: bad keylen %d\n", + _KEYBITS(sav->key_enc))); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_3descbc_decrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, + "esp_3descbc_decrypt: unsupported ivlen %d\n", ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + iv = &tiv[0]; + m_copydata(m, ivoff, 8, &tiv[0]); + + plen = m->m_pkthdr.len; + if (plen < bodyoff) + panic("esp_3descbc_decrypt: too short packet: len=%lu", + (u_long)plen); + + plen -= bodyoff; + + if (plen % 8) { + ipseclog((LOG_ERR, "esp_3descbc_decrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + + /* decrypt packet */ + { + int deserr[3]; + des_key_schedule ks[3]; + + deserr[0] = des_key_sched((C_Block *)_KEYBUF(sav->key_enc),ks[0]); + deserr[1] = des_key_sched((C_Block *)(_KEYBUF(sav->key_enc) + 8), ks[1]); + deserr[2] = des_key_sched((C_Block *)(_KEYBUF(sav->key_enc) + 16), ks[2]); + if ((deserr[0] != 0) || (deserr[1] != 0) || (deserr[2] != 0)) { + ipseclog((LOG_ERR, "esp_3descbc_decrypt: key error %d/%d/%d\n", + deserr[0], deserr[1], deserr[2])); + return EINVAL; + } + + des_3cbc_process(m, bodyoff, plen, ks, (C_Block *)iv, DES_DECRYPT); + + /* for safety */ + bzero(ks[0], sizeof(des_key_schedule)*3); + } + + /* for safety */ + bzero(&tiv[0], sizeof(tiv)); + + return 0; +} + +static int +esp_3descbc_encrypt(m, off, plen, sav, algo, ivlen) + struct mbuf *m; + size_t off; + size_t plen; + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t *iv; + + /* sanity check */ + if (plen % 8) { + ipseclog((LOG_ERR, "esp_3descbc_encrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + if (sav->ivlen != ivlen) { + ipseclog((LOG_ERR, "esp_3descbc_encrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || algo->keymax < _KEYBITS(sav->key_enc)) { + ipseclog((LOG_ERR, "esp_3descbc_encrypt: bad keylen %d\n", + _KEYBITS(sav->key_enc))); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_3descbc_encrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, + "esp_3descbc_encrypt: unsupported ivlen %d\n", ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + + if (m->m_pkthdr.len < bodyoff) + panic("assumption failed: mbuf too short"); + iv = mbuf_find_offset(m, ivoff, ivlen); + if (!iv) + panic("assumption failed: bad mbuf chain"); + + bcopy((caddr_t)sav->iv, (caddr_t)iv, ivlen); + + /* encrypt packet */ + { + int deserr[3]; + des_key_schedule ks[3]; + + deserr[0] = des_key_sched((C_Block *)_KEYBUF(sav->key_enc), ks[0]); + deserr[1] = des_key_sched((C_Block *)(_KEYBUF(sav->key_enc) + 8), ks[1]); + deserr[2] = des_key_sched((C_Block *)(_KEYBUF(sav->key_enc) + 16), ks[2]); + if ((deserr[0] != 0) || (deserr[1] != 0) || (deserr[2] != 0)) { + ipseclog((LOG_ERR, "esp_3descbc_encrypt: key error %d/%d/%d\n", + deserr[0], deserr[1], deserr[2])); + return EINVAL; + } + + des_3cbc_process(m, bodyoff, plen, ks, (C_Block *)iv, DES_ENCRYPT); + + /* for safety */ + bzero(ks[0], sizeof(des_key_schedule)*3); + } + + esp_increment_iv(sav); + + return 0; +} + +static int +esp_rc5cbc_ivlen(sav) + struct secasvar *sav; +{ + return 8; +} + +static int +esp_rc5cbc_decrypt(m, off, sav, algo, ivlen) + struct mbuf *m; + size_t off; + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t iv[8]; + size_t plen; + + /* sanity check */ + if (sav->ivlen != ivlen) { + ipseclog((LOG_ERR, "esp_rc5cbc_decrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if ((_KEYBITS(sav->key_enc) < 40) || (_KEYBITS(sav->key_enc) > 2040)) { + ipseclog((LOG_ERR, + "esp_rc5cbc_decrypt: unsupported key length %d: " + "need 40 to 2040 bit\n", _KEYBITS(sav->key_enc))); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_rc5cbc_decrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, "esp_rc5cbc_decrypt: unsupported ivlen %d\n", + ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + + /* copy mbuf's IV into iv */ + m_copydata(m, ivoff, 8, iv); + + plen = m->m_pkthdr.len; + if (plen < bodyoff) { + panic("esp_rc5cbc_decrypt: too short packet: len=%lu", + (u_long)plen); + } + plen -= bodyoff; + + if (plen % 8) { + ipseclog((LOG_ERR, "esp_rc5cbc_decrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + + /* decrypt */ + { + RC5_WORD e_key[34]; + + set_rc5_expandkey(e_key, _KEYBUF(sav->key_enc), + _KEYBITS(sav->key_enc) / 8, 16); + rc5_cbc_process(m, bodyoff, plen, e_key, iv, RC5_DECRYPT); + + /* for safety */ + bzero(e_key, sizeof(e_key)); + } + + return 0; +} + +static int +esp_rc5cbc_encrypt(m, off, plen, sav, algo, ivlen) + struct mbuf *m; + size_t off; + size_t plen; + struct secasvar *sav; + struct esp_algorithm *algo; + int ivlen; +{ + size_t ivoff; + size_t bodyoff; + u_int8_t *iv; + + /* sanity check */ + if (plen % 8) { + ipseclog((LOG_ERR, "esp_rc5cbc_encrypt: " + "payload length must be multiple of 8\n")); + return EINVAL; + } + if (sav->ivlen != ivlen) { + ipseclog((LOG_ERR, "esp_rc5cbc_encrypt: bad ivlen %d/%d\n", + ivlen, sav->ivlen)); + return EINVAL; + } + if (_KEYBITS(sav->key_enc) < algo->keymin + || _KEYBITS(sav->key_enc) > algo->keymax) { + ipseclog((LOG_ERR, + "esp_rc5cbc_encrypt: unsupported key length %d: " + "need %d to %d bits\n", _KEYBITS(sav->key_enc), + algo->keymin, algo->keymax)); + return EINVAL; + } + if (sav->flags & SADB_X_EXT_OLD) { + ipseclog((LOG_ERR, + "esp_rc5cbc_encrypt: unsupported ESP version\n")); + return EINVAL; + } + if (ivlen != 8) { + ipseclog((LOG_ERR, "esp_rc5cbc_encrypt: unsupported ivlen %d\n", + ivlen)); + return EINVAL; + } + + ivoff = off + sizeof(struct newesp); + bodyoff = off + sizeof(struct newesp) + ivlen; + + if (m->m_pkthdr.len < bodyoff) + panic("assumption failed: mbuf too short"); + iv = mbuf_find_offset(m, ivoff, ivlen); + if (!iv) + panic("assumption failed: bad mbuf chain"); + + bcopy(sav->iv, iv, ivlen); + + /* encrypt */ + { + RC5_WORD e_key[34]; + + set_rc5_expandkey(e_key, _KEYBUF(sav->key_enc), + _KEYBITS(sav->key_enc) / 8, 16); + rc5_cbc_process(m, bodyoff, plen, e_key, iv, RC5_ENCRYPT); + + /* for safety */ + bzero(e_key, sizeof(e_key)); + } + + esp_increment_iv(sav); + + return 0; +} + +/* + * increment iv. + */ +static void +esp_increment_iv(sav) + struct secasvar *sav; +{ + u_int8_t *x; + u_int8_t y; + int i; + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) + y = time.tv_sec & 0xff; +#else + y = time_second & 0xff; +#endif + if (!y) y++; + x = (u_int8_t *)sav->iv; + for (i = 0; i < sav->ivlen; i++) { + *x = (*x + y) & 0xff; + x++; + } +} + +static caddr_t +mbuf_find_offset(m, off, len) + struct mbuf *m; + size_t off; + size_t len; +{ + struct mbuf *n; + size_t cnt; + + if (m->m_pkthdr.len < off || m->m_pkthdr.len < off + len) + return (caddr_t)NULL; + cnt = 0; + for (n = m; n; n = n->m_next) { + if (cnt + n->m_len <= off) { + cnt += n->m_len; + continue; + } + if (cnt <= off && off < cnt + n->m_len + && cnt <= off + len && off + len <= cnt + n->m_len) { + return mtod(n, caddr_t) + off - cnt; + } else + return (caddr_t)NULL; + } + return (caddr_t)NULL; +} + +/*------------------------------------------------------------*/ + +int +esp_auth(m0, skip, length, sav, sum) + struct mbuf *m0; + size_t skip; /* offset to ESP header */ + size_t length; /* payload length */ + struct secasvar *sav; + u_char *sum; +{ + struct mbuf *m; + size_t off; + struct ah_algorithm_state s; + u_char sumbuf[AH_MAXSUMSIZE]; + struct ah_algorithm *algo; + size_t siz; + + /* sanity checks */ + if (m0->m_pkthdr.len < skip) { + ipseclog((LOG_DEBUG, "esp_auth: mbuf length < skip\n")); + return EINVAL; + } + if (m0->m_pkthdr.len < skip + length) { + ipseclog((LOG_DEBUG, + "esp_auth: mbuf length < skip + length\n")); + return EINVAL; + } + /* + * length of esp part (excluding authentication data) must be 4n, + * since nexthdr must be at offset 4n+3. + */ + if (length % 4) { + ipseclog((LOG_ERR, "esp_auth: length is not multiple of 4\n")); + return EINVAL; + } + if (!sav) { + ipseclog((LOG_DEBUG, "esp_auth: NULL SA passed\n")); + return EINVAL; + } + if (!sav->alg_auth) { + ipseclog((LOG_ERR, + "esp_auth: bad ESP auth algorithm passed: %d\n", + sav->alg_auth)); + return EINVAL; + } + + m = m0; + off = 0; + + algo = &ah_algorithms[sav->alg_auth]; + siz = (((*algo->sumsiz)(sav) + 3) & ~(4 - 1)); + if (sizeof(sumbuf) < siz) { + ipseclog((LOG_DEBUG, + "esp_auth: AH_MAXSUMSIZE is too small: siz=%lu\n", + (u_long)siz)); + return EINVAL; + } + + /* skip the header */ + while (skip) { + if (!m) + panic("mbuf chain?"); + if (m->m_len <= skip) { + skip -= m->m_len; + m = m->m_next; + off = 0; + } else { + off = skip; + skip = 0; + } + } + + (*algo->init)(&s, sav); + while (0 < length) { + if (!m) + panic("mbuf chain?"); + + if (m->m_len - off < length) { + (*algo->update)(&s, mtod(m, u_char *) + off, + m->m_len - off); + length -= m->m_len - off; + m = m->m_next; + off = 0; + } else { + (*algo->update)(&s, mtod(m, u_char *) + off, length); + break; + } + } + (*algo->result)(&s, sumbuf); + bcopy(sumbuf, sum, siz); /*XXX*/ + + return 0; +} diff --git a/bsd/netinet6/esp_input.c b/bsd/netinet6/esp_input.c new file mode 100644 index 000000000..7bd490e8f --- /dev/null +++ b/bsd/netinet6/esp_input.c @@ -0,0 +1,790 @@ +/* $KAME: esp_input.c,v 1.22 2000/03/21 05:14:49 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC1827/2406 Encapsulated Security Payload. + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if INET6 +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include + + +#include + +#define IPLEN_FLIPPED + +#if INET +extern struct protosw inetsw[]; +#if defined(__bsdi__) || defined(__NetBSD__) +extern u_char ip_protox[]; +#endif + +#define ESPMAXLEN \ + (sizeof(struct esp) < sizeof(struct newesp) \ + ? sizeof(struct newesp) : sizeof(struct esp)) + +void +esp4_input(m, off) + struct mbuf *m; + int off; +{ + struct ip *ip; + struct esp *esp; + struct esptail esptail; + u_int32_t spi; + struct secasvar *sav = NULL; + size_t taillen; + u_int16_t nxt; + struct esp_algorithm *algo; + int ivlen; + size_t hlen; + size_t esplen; + int s; + + /* sanity check for alignment. */ + if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) { + ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem " + "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); + ipsecstat.in_inval++; + goto bad; + } + + if (m->m_len < off + ESPMAXLEN) { + m = m_pullup(m, off + ESPMAXLEN); + if (!m) { + ipseclog((LOG_DEBUG, + "IPv4 ESP input: can't pullup in esp4_input\n")); + ipsecstat.in_inval++; + goto bad; + } + } + + ip = mtod(m, struct ip *); + esp = (struct esp *)(((u_int8_t *)ip) + off); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + + /* find the sassoc. */ + spi = esp->esp_spi; + + if ((sav = key_allocsa(AF_INET, + (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, + IPPROTO_ESP, spi)) == 0) { + ipseclog((LOG_WARNING, + "IPv4 ESP input: no key association found for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsecstat.in_nosa++; + goto bad; + } + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP esp4_input called to allocate SA:%p\n", sav)); + if (sav->state != SADB_SASTATE_MATURE + && sav->state != SADB_SASTATE_DYING) { + ipseclog((LOG_DEBUG, + "IPv4 ESP input: non-mature/dying SA found for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsecstat.in_badspi++; + goto bad; + } + if (sav->alg_enc == SADB_EALG_NONE) { + ipseclog((LOG_DEBUG, "IPv4 ESP input: " + "unspecified encryption algorithm for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsecstat.in_badspi++; + goto bad; + } + + algo = &esp_algorithms[sav->alg_enc]; /*XXX*/ + + /* check if we have proper ivlen information */ + ivlen = sav->ivlen; + if (ivlen < 0) { + ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_inval++; + goto bad; + } + + if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay + && (sav->alg_auth && sav->key_auth))) + goto noreplaycheck; + + if (sav->alg_auth == SADB_AALG_NULL) + goto noreplaycheck; + + /* + * check for sequence number. + */ + if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) + ; /*okey*/ + else { + ipsecstat.in_espreplay++; + ipseclog((LOG_WARNING, + "replay packet in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + goto bad; + } + + /* check ICV */ + { + u_char sum0[AH_MAXSUMSIZE]; + u_char sum[AH_MAXSUMSIZE]; + struct ah_algorithm *sumalgo; + size_t siz; + + sumalgo = &ah_algorithms[sav->alg_auth]; + siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); + if (AH_MAXSUMSIZE < siz) { + ipseclog((LOG_DEBUG, + "internal error: AH_MAXSUMSIZE must be larger than %lu\n", + (u_long)siz)); + ipsecstat.in_inval++; + goto bad; + } + + m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]); + + if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { + ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_espauthfail++; + goto bad; + } + + if (bcmp(sum0, sum, siz) != 0) { + ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_espauthfail++; + goto bad; + } + + /* strip off the authentication data */ + m_adj(m, -siz); + ip = mtod(m, struct ip *); +#ifdef IPLEN_FLIPPED + ip->ip_len = ip->ip_len - siz; +#else + ip->ip_len = htons(ntohs(ip->ip_len) - siz); +#endif + m->m_flags |= M_AUTHIPDGM; + ipsecstat.in_espauthsucc++; + } + + /* + * update sequence number. + */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { + if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) { + ipsecstat.in_espreplay++; + goto bad; + } + } + +noreplaycheck: + + /* process main esp header. */ + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + esplen = sizeof(struct esp); + } else { + /* RFC 2406 */ + if (sav->flags & SADB_X_EXT_DERIV) + esplen = sizeof(struct esp); + else + esplen = sizeof(struct newesp); + } + + if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) { + ipseclog((LOG_WARNING, + "IPv4 ESP input: packet too short\n")); + ipsecstat.in_inval++; + goto bad; + } + + if (m->m_len < off + esplen + ivlen) { + m = m_pullup(m, off + esplen + ivlen); + if (!m) { + ipseclog((LOG_DEBUG, + "IPv4 ESP input: can't pullup in esp4_input\n")); + ipsecstat.in_inval++; + goto bad; + } + } + + { + /* + * decrypt the packet. + */ + if (!algo->decrypt) + panic("internal error: no decrypt function"); + if ((*algo->decrypt)(m, off, sav, algo, ivlen)) { + ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_inval++; + goto bad; + } + ipsecstat.in_esphist[sav->alg_enc]++; + + m->m_flags |= M_DECRYPTED; + } + + /* + * find the trailer of the ESP. + */ + m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail), + (caddr_t)&esptail); + nxt = esptail.esp_nxt; + taillen = esptail.esp_padlen + sizeof(esptail); + + if (m->m_pkthdr.len < taillen + || m->m_pkthdr.len - taillen < hlen) { /*?*/ + ipseclog((LOG_WARNING, + "bad pad length in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_inval++; + goto bad; + } + + /* strip off the trailing pad area. */ + m_adj(m, -taillen); + +#ifdef IPLEN_FLIPPED + ip->ip_len = ip->ip_len - taillen; +#else + ip->ip_len = htons(ntohs(ip->ip_len) - taillen); +#endif + + /* was it transmitted over the IPsec tunnel SA? */ + if (ipsec4_tunnel_validate(ip, nxt, sav)) { + /* + * strip off all the headers that precedes ESP header. + * IP4 xx ESP IP4' payload -> IP4' payload + * + * XXX more sanity checks + * XXX relationship with gif? + */ + u_int8_t tos; + + tos = ip->ip_tos; + m_adj(m, off + esplen + ivlen); + if (m->m_len < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) { + ipsecstat.in_inval++; + goto bad; + } + } + ip = mtod(m, struct ip *); + /* ECN consideration. */ + ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos); + if (!key_checktunnelsanity(sav, AF_INET, + (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { + ipseclog((LOG_ERR, "ipsec tunnel address mismatch " + "in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + ipsecstat.in_inval++; + goto bad; + } + +#if 0 /* XXX should call ipfw rather than ipsec_in_reject, shouldn't it ? */ + /* drop it if it does not match the default policy */ + if (ipsec4_in_reject(m, NULL)) { + ipsecstat.in_polvio++; + goto bad; + } +#endif + + key_sa_recordxfer(sav, m); + + s = splimp(); + if (IF_QFULL(&ipintrq)) { + ipsecstat.in_inval++; + goto bad; + } + IF_ENQUEUE(&ipintrq, m); + m = NULL; + schednetisr(NETISR_IP); /*can be skipped but to make sure*/ + splx(s); + nxt = IPPROTO_DONE; + } else { + /* + * strip off ESP header and IV. + * even in m_pulldown case, we need to strip off ESP so that + * we can always compute checksum for AH correctly. + */ + size_t stripsiz; + + stripsiz = esplen + ivlen; + + ip = mtod(m, struct ip *); + ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off); + m->m_data += stripsiz; + m->m_len -= stripsiz; + m->m_pkthdr.len -= stripsiz; + + ip = mtod(m, struct ip *); +#ifdef IPLEN_FLIPPED + ip->ip_len = ip->ip_len - stripsiz; +#else + ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz); +#endif + ip->ip_p = nxt; + + key_sa_recordxfer(sav, m); + + if (nxt != IPPROTO_DONE) + (*ip_protox[nxt]->pr_input)(m, off); + else + m_freem(m); + m = NULL; + } + + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP esp4_input call free SA:%p\n", sav)); + key_freesav(sav); + } + ipsecstat.in_success++; + return; + +bad: + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP esp4_input call free SA:%p\n", sav)); + key_freesav(sav); + } + if (m) + m_freem(m); + return; +} +#endif /* INET */ + +#if INET6 +int +esp6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + struct mbuf *m = *mp; + int off = *offp; + struct ip6_hdr *ip6; + struct esp *esp; + struct esptail esptail; + u_int32_t spi; + struct secasvar *sav = NULL; + size_t taillen; + u_int16_t nxt; + struct esp_algorithm *algo; + int ivlen; + size_t esplen; + int s; + + /* sanity check for alignment. */ + if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) { + ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem " + "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); + ipsec6stat.in_inval++; + goto bad; + } + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, IPPROTO_DONE); + esp = (struct esp *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN); + if (esp == NULL) { + ipsec6stat.in_inval++; + return IPPROTO_DONE; + } +#endif + ip6 = mtod(m, struct ip6_hdr *); + + if (ntohs(ip6->ip6_plen) == 0) { + ipseclog((LOG_ERR, "IPv6 ESP input: " + "ESP with IPv6 jumbogram is not supported.\n")); + ipsec6stat.in_inval++; + goto bad; + } + + /* find the sassoc. */ + spi = esp->esp_spi; + + if ((sav = key_allocsa(AF_INET6, + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, + IPPROTO_ESP, spi)) == 0) { + ipseclog((LOG_WARNING, + "IPv6 ESP input: no key association found for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsec6stat.in_nosa++; + goto bad; + } + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP esp6_input called to allocate SA:%p\n", sav)); + if (sav->state != SADB_SASTATE_MATURE + && sav->state != SADB_SASTATE_DYING) { + ipseclog((LOG_DEBUG, + "IPv6 ESP input: non-mature/dying SA found for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsec6stat.in_badspi++; + goto bad; + } + if (sav->alg_enc == SADB_EALG_NONE) { + ipseclog((LOG_DEBUG, "IPv6 ESP input: " + "unspecified encryption algorithm for spi %u\n", + (u_int32_t)ntohl(spi))); + ipsec6stat.in_badspi++; + goto bad; + } + + algo = &esp_algorithms[sav->alg_enc]; /*XXX*/ + + /* check if we have proper ivlen information */ + ivlen = sav->ivlen; + if (ivlen < 0) { + ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + ipsec6stat.in_badspi++; + goto bad; + } + + if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay + && (sav->alg_auth && sav->key_auth))) + goto noreplaycheck; + + if (sav->alg_auth == SADB_AALG_NULL) + goto noreplaycheck; + + /* + * check for sequence number. + */ + if (ipsec_chkreplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) + ; /*okey*/ + else { + ipsec6stat.in_espreplay++; + ipseclog((LOG_WARNING, + "replay packet in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + goto bad; + } + + /* check ICV */ + { + u_char sum0[AH_MAXSUMSIZE]; + u_char sum[AH_MAXSUMSIZE]; + struct ah_algorithm *sumalgo; + size_t siz; + + sumalgo = &ah_algorithms[sav->alg_auth]; + siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); + if (AH_MAXSUMSIZE < siz) { + ipseclog((LOG_DEBUG, + "internal error: AH_MAXSUMSIZE must be larger than %lu\n", + (u_long)siz)); + ipsec6stat.in_inval++; + goto bad; + } + + m_copydata(m, m->m_pkthdr.len - siz, siz, &sum0[0]); + + if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { + ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + ipsec6stat.in_espauthfail++; + goto bad; + } + + if (bcmp(sum0, sum, siz) != 0) { + ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + ipsec6stat.in_espauthfail++; + goto bad; + } + + /* strip off the authentication data */ + m_adj(m, -siz); + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz); + + m->m_flags |= M_AUTHIPDGM; + ipsec6stat.in_espauthsucc++; + } + + /* + * update sequence number. + */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { + if (ipsec_updatereplay(ntohl(((struct newesp *)esp)->esp_seq), sav)) { + ipsec6stat.in_espreplay++; + goto bad; + } + } + +noreplaycheck: + + /* process main esp header. */ + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + esplen = sizeof(struct esp); + } else { + /* RFC 2406 */ + if (sav->flags & SADB_X_EXT_DERIV) + esplen = sizeof(struct esp); + else + esplen = sizeof(struct newesp); + } + + if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) { + ipseclog((LOG_WARNING, + "IPv6 ESP input: packet too short\n")); + ipsec6stat.in_inval++; + goto bad; + } + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, esplen + ivlen, IPPROTO_DONE); /*XXX*/ +#else + IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen); + if (esp == NULL) { + ipsec6stat.in_inval++; + m = NULL; + goto bad; + } +#endif + ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/ + + /* + * decrypt the packet. + */ + if (!algo->decrypt) + panic("internal error: no decrypt function"); + if ((*algo->decrypt)(m, off, sav, algo, ivlen)) { + ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + ipsec6stat.in_inval++; + goto bad; + } + ipsec6stat.in_esphist[sav->alg_enc]++; + + m->m_flags |= M_DECRYPTED; + + /* + * find the trailer of the ESP. + */ + m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail), + (caddr_t)&esptail); + nxt = esptail.esp_nxt; + taillen = esptail.esp_padlen + sizeof(esptail); + + if (m->m_pkthdr.len < taillen + || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/ + ipseclog((LOG_WARNING, + "bad pad length in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + ipsec6stat.in_inval++; + goto bad; + } + + /* strip off the trailing pad area. */ + m_adj(m, -taillen); + + ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen); + + /* was it transmitted over the IPsec tunnel SA? */ + if (ipsec6_tunnel_validate(ip6, nxt, sav)) { + /* + * strip off all the headers that precedes ESP header. + * IP6 xx ESP IP6' payload -> IP6' payload + * + * XXX more sanity checks + * XXX relationship with gif? + */ + u_int32_t flowinfo; /*net endian*/ + flowinfo = ip6->ip6_flow; + m_adj(m, off + esplen + ivlen); + if (m->m_len < sizeof(*ip6)) { +#ifndef PULLDOWN_TEST + /* + * m_pullup is prohibited in KAME IPv6 input processing + * but there's no other way! + */ +#else + /* okay to pullup in m_pulldown style */ +#endif + m = m_pullup(m, sizeof(*ip6)); + if (!m) { + ipsec6stat.in_inval++; + goto bad; + } + } + ip6 = mtod(m, struct ip6_hdr *); + /* ECN consideration. */ + ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow); + if (!key_checktunnelsanity(sav, AF_INET6, + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) { + ipseclog((LOG_ERR, "ipsec tunnel address mismatch " + "in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), + ipsec_logsastr(sav))); + ipsec6stat.in_inval++; + goto bad; + } + +#if 0 /* XXX should call ipfw rather than ipsec_in_reject, shouldn't it ? */ + /* drop it if it does not match the default policy */ + if (ipsec6_in_reject(m, NULL)) { + ipsec6stat.in_polvio++; + goto bad; + } +#endif + + key_sa_recordxfer(sav, m); + + s = splimp(); + if (IF_QFULL(&ip6intrq)) { + ipsec6stat.in_inval++; + goto bad; + } + IF_ENQUEUE(&ip6intrq, m); + m = NULL; + schednetisr(NETISR_IPV6); /*can be skipped but to make sure*/ + splx(s); + nxt = IPPROTO_DONE; + } else { + /* + * strip off ESP header and IV. + * even in m_pulldown case, we need to strip off ESP so that + * we can always compute checksum for AH correctly. + */ + size_t stripsiz; + char *prvnxtp; + + /* + * Set the next header field of the previous header correctly. + */ + prvnxtp = ip6_get_prevhdr(m, off); /* XXX */ + *prvnxtp = nxt; + + stripsiz = esplen + ivlen; + + ip6 = mtod(m, struct ip6_hdr *); + if (m->m_len >= stripsiz + off) { + ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off); + m->m_data += stripsiz; + m->m_len -= stripsiz; + m->m_pkthdr.len -= stripsiz; + } else { + /* + * this comes with no copy if the boundary is on + * cluster + */ + struct mbuf *n; + + n = m_split(m, off, M_DONTWAIT); + if (n == NULL) { + /* m is retained by m_split */ + goto bad; + } + m_adj(n, stripsiz); + m_cat(m, n); + /* m_cat does not update m_pkthdr.len */ + m->m_pkthdr.len += n->m_pkthdr.len; + } + + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz); + + key_sa_recordxfer(sav, m); + } + + *offp = off; + *mp = m; + + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP esp6_input call free SA:%p\n", sav)); + key_freesav(sav); + } + ipsec6stat.in_success++; + return nxt; + +bad: + if (sav) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP esp6_input call free SA:%p\n", sav)); + key_freesav(sav); + } + if (m) + m_freem(m); + return IPPROTO_DONE; +} +#endif /* INET6 */ diff --git a/bsd/netinet6/esp_output.c b/bsd/netinet6/esp_output.c new file mode 100644 index 000000000..26a1b0ca6 --- /dev/null +++ b/bsd/netinet6/esp_output.c @@ -0,0 +1,671 @@ +/* $KAME: esp_output.c,v 1.17 2000/02/22 14:04:15 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +/* + * RFC1827/2406 Encapsulated Security Payload. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#if INET6 +#include +#include +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include + +static int esp_output __P((struct mbuf *, u_char *, struct mbuf *, + struct ipsecrequest *, int)); + +/* + * compute ESP header size. + */ +size_t +esp_hdrsiz(isr) + struct ipsecrequest *isr; +{ + struct secasvar *sav; + struct esp_algorithm *algo; + size_t ivlen; + size_t authlen; + size_t hdrsiz; + + /* sanity check */ + if (isr == NULL) + panic("esp_hdrsiz: NULL was passed.\n"); + + sav = isr->sav; + + if (isr->saidx.proto != IPPROTO_ESP) + panic("unsupported mode passed to esp_hdrsiz"); + + if (sav == NULL) + goto estimate; + if (sav->state != SADB_SASTATE_MATURE + && sav->state != SADB_SASTATE_DYING) + goto estimate; + + /* we need transport mode ESP. */ + algo = &esp_algorithms[sav->alg_enc]; + if (!algo) + goto estimate; + ivlen = sav->ivlen; + if (ivlen < 0) + goto estimate; + + /* + * XXX + * right now we don't calcurate the padding size. simply + * treat the padding size as constant, for simplicity. + * + * XXX variable size padding support + */ + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + hdrsiz = sizeof(struct esp) + ivlen + 9; + } else { + /* RFC 2406 */ + if (sav->replay && sav->alg_auth && sav->key_auth) + authlen = (*ah_algorithms[sav->alg_auth].sumsiz)(sav); + else + authlen = 0; + hdrsiz = sizeof(struct newesp) + ivlen + 9 + authlen; + } + + return hdrsiz; + + estimate: + /* + * ASSUMING: + * sizeof(struct newesp) > sizeof(struct esp). + * 8 = ivlen for CBC mode (RFC2451). + * 9 = (maximum padding length without random padding length) + * + (Pad Length field) + (Next Header field). + * 16 = maximum ICV we support. + */ + return sizeof(struct newesp) + 8 + 9 + 16; +} + +/* + * Modify the packet so that the payload is encrypted. + * The mbuf (m) must start with IPv4 or IPv6 header. + * On failure, free the given mbuf and return NULL. + * + * on invocation: + * m nexthdrp md + * v v v + * IP ......... payload + * during the encryption: + * m nexthdrp mprev md + * v v v v + * IP ............... esp iv payload pad padlen nxthdr + * <--><-><------><---------------> + * esplen plen extendsiz + * ivlen + * <-----> esphlen + * <-> hlen + * <-----------------> espoff + */ +static int +esp_output(m, nexthdrp, md, isr, af) + struct mbuf *m; + u_char *nexthdrp; + struct mbuf *md; + struct ipsecrequest *isr; + int af; +{ + struct mbuf *n; + struct mbuf *mprev; + struct esp *esp; + struct esptail *esptail; + struct secasvar *sav = isr->sav; + struct esp_algorithm *algo; + u_int32_t spi; + u_int8_t nxt = 0; + size_t plen; /*payload length to be encrypted*/ + size_t espoff; + int ivlen; + int afnumber; + size_t extendsiz; + int error = 0; + + switch (af) { +#if INET + case AF_INET: + afnumber = 4; + break; +#endif +#if INET6 + case AF_INET6: + afnumber = 6; + break; +#endif + default: + ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af)); + return 0; /* no change at all */ + } + + /* some sanity check */ + if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { + switch (af) { +#if INET + case AF_INET: + { + struct ip *ip; + + ip = mtod(m, struct ip *); + ipseclog((LOG_DEBUG, "esp4_output: internal error: " + "sav->replay is null: %x->%x, SPI=%u\n", + (u_int32_t)ntohl(ip->ip_src.s_addr), + (u_int32_t)ntohl(ip->ip_dst.s_addr), + (u_int32_t)ntohl(sav->spi))); + ipsecstat.out_inval++; + m_freem(m); + return EINVAL; + } +#endif /*INET*/ +#if INET6 + case AF_INET6: + { + struct ip6_hdr *ip6; + + ip6 = mtod(m, struct ip6_hdr *); + ipseclog((LOG_DEBUG, "esp6_output: internal error: " + "sav->replay is null: SPI=%u\n", + (u_int32_t)ntohl(sav->spi))); + ipsec6stat.out_inval++; + m_freem(m); + return EINVAL; + } +#endif /*INET6*/ + } + } + + algo = &esp_algorithms[sav->alg_enc]; /*XXX*/ + spi = sav->spi; + ivlen = sav->ivlen; + /* should be okey */ + if (ivlen < 0) { + panic("invalid ivlen"); + } + + { + /* + * insert ESP header. + * XXX inserts ESP header right after IPv4 header. should + * chase the header chain. + * XXX sequential number + */ +#if INET + struct ip *ip = NULL; +#endif +#if INET6 + struct ip6_hdr *ip6 = NULL; +#endif + size_t esplen; /*sizeof(struct esp/newesp)*/ + size_t esphlen; /*sizeof(struct esp/newesp) + ivlen*/ + size_t hlen = 0; /*ip header len*/ + + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ + esplen = sizeof(struct esp); + } else { + /* RFC 2406 */ + if (sav->flags & SADB_X_EXT_DERIV) + esplen = sizeof(struct esp); + else + esplen = sizeof(struct newesp); + } + esphlen = esplen + ivlen; + + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) + ; + if (mprev == NULL || mprev->m_next != md) { + ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n", + afnumber)); + m_freem(m); + return EINVAL; + } + + plen = 0; + for (n = md; n; n = n->m_next) + plen += n->m_len; + + switch (af) { +#if INET + case AF_INET: + ip = mtod(m, struct ip *); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + break; +#endif +#if INET6 + case AF_INET6: + ip6 = mtod(m, struct ip6_hdr *); + hlen = sizeof(*ip6); + break; +#endif + } + + /* make the packet over-writable */ + mprev->m_next = NULL; + if ((md = ipsec_copypkt(md)) == NULL) { + m_freem(m); + error = ENOBUFS; + goto fail; + } + mprev->m_next = md; + + espoff = m->m_pkthdr.len - plen; + + /* + * grow the mbuf to accomodate ESP header. + * before: IP ... payload + * after: IP ... ESP IV payload + */ + if (M_LEADINGSPACE(md) < esphlen) { + MGET(n, M_DONTWAIT, MT_DATA); + if (!n) { + m_freem(m); + error = ENOBUFS; + goto fail; + } + n->m_len = esphlen; + mprev->m_next = n; + n->m_next = md; + m->m_pkthdr.len += esphlen; + esp = mtod(n, struct esp *); + } else { + md->m_len += esphlen; + md->m_data -= esphlen; + m->m_pkthdr.len += esphlen; + esp = mtod(md, struct esp *); + } + + nxt = *nexthdrp; + *nexthdrp = IPPROTO_ESP; + switch (af) { +#if INET + case AF_INET: + if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) + ip->ip_len = htons(ntohs(ip->ip_len) + esphlen); + else { + ipseclog((LOG_ERR, + "IPv4 ESP output: size exceeds limit\n")); + ipsecstat.out_inval++; + m_freem(m); + error = EMSGSIZE; + goto fail; + } + break; +#endif +#if INET6 + case AF_INET6: + /* total packet length will be computed in ip6_output() */ + break; +#endif + } + } + + /* initialize esp header. */ + esp->esp_spi = spi; + if ((sav->flags & SADB_X_EXT_OLD) == 0) { + struct newesp *nesp; + nesp = (struct newesp *)esp; + if (sav->replay->count == ~0) { + if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) { + /* XXX Is it noisy ? */ + ipseclog((LOG_WARNING, + "replay counter overflowed. %s\n", + ipsec_logsastr(sav))); + ipsecstat.out_inval++; + m_freem(m); + return EINVAL; + } + } + sav->replay->count++; + /* + * XXX sequence number must not be cycled, if the SA is + * installed by IKE daemon. + */ + nesp->esp_seq = htonl(sav->replay->count); + } + + { + /* + * find the last mbuf. make some room for ESP trailer. + * XXX new-esp authentication data + */ +#if INET + struct ip *ip = NULL; +#endif + size_t padbound; + u_char *extend; + int i; + + if (algo->padbound) + padbound = algo->padbound; + else + padbound = 4; + /* ESP packet, including nxthdr field, must be length of 4n */ + if (padbound < 4) + padbound = 4; + + extendsiz = padbound - (plen % padbound); + if (extendsiz == 1) + extendsiz = padbound + 1; + + n = m; + while (n->m_next) + n = n->m_next; + + /* + * if M_EXT, the external part may be shared among + * two consequtive TCP packets. + */ + if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) { + extend = mtod(n, u_char *) + n->m_len; + n->m_len += extendsiz; + m->m_pkthdr.len += extendsiz; + } else { + struct mbuf *nn; + + MGET(nn, M_DONTWAIT, MT_DATA); + if (!nn) { + ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf", + afnumber)); + m_freem(m); + error = ENOBUFS; + goto fail; + } + extend = mtod(nn, u_char *); + nn->m_len = extendsiz; + nn->m_next = NULL; + n->m_next = nn; + n = nn; + m->m_pkthdr.len += extendsiz; + } + switch (sav->flags & SADB_X_EXT_PMASK) { + case SADB_X_EXT_PRAND: + for (i = 0; i < extendsiz; i++) + extend[i] = random() & 0xff; + break; + case SADB_X_EXT_PZERO: + bzero(extend, extendsiz); + break; + case SADB_X_EXT_PSEQ: + for (i = 0; i < extendsiz; i++) + extend[i] = (i + 1) & 0xff; + break; + } + + /* initialize esp trailer. */ + esptail = (struct esptail *) + (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail)); + esptail->esp_nxt = nxt; + esptail->esp_padlen = extendsiz - 2; + + /* modify IP header (for ESP header part only) */ + switch (af) { +#if INET + case AF_INET: + ip = mtod(m, struct ip *); + if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) + ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz); + else { + ipseclog((LOG_ERR, + "IPv4 ESP output: size exceeds limit\n")); + ipsecstat.out_inval++; + m_freem(m); + error = EMSGSIZE; + goto fail; + } + break; +#endif +#if INET6 + case AF_INET6: + /* total packet length will be computed in ip6_output() */ + break; +#endif + } + } + + /* + * encrypt the packet, based on security association + * and the algorithm specified. + */ + if (!algo->encrypt) + panic("internal error: no encrypt function"); + if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) { + ipseclog((LOG_ERR, "packet encryption failure\n")); + m_freem(m); + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_inval++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_inval++; + break; +#endif + } + error = EINVAL; + goto fail; + } + + /* + * calculate ICV if required. + */ + if (!sav->replay) + goto noantireplay; + if (!sav->key_auth) + goto noantireplay; + if (!sav->alg_auth) + goto noantireplay; + { + u_char authbuf[AH_MAXSUMSIZE]; + struct mbuf *n; + u_char *p; + size_t siz; + struct ip *ip; + + siz = (((*ah_algorithms[sav->alg_auth].sumsiz)(sav) + 3) & ~(4 - 1)); + if (AH_MAXSUMSIZE < siz) + panic("assertion failed for AH_MAXSUMSIZE"); + + if (esp_auth(m, espoff, m->m_pkthdr.len - espoff, sav, authbuf)) + goto noantireplay; + + n = m; + while (n->m_next) + n = n->m_next; + + if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /*XXX*/ + n->m_len += siz; + m->m_pkthdr.len += siz; + p = mtod(n, u_char *) + n->m_len - siz; + } else { + struct mbuf *nn; + + MGET(nn, M_DONTWAIT, MT_DATA); + if (!nn) { + ipseclog((LOG_DEBUG, "can't alloc mbuf in esp%d_output", + afnumber)); + m_freem(m); + error = ENOBUFS; + goto fail; + } + nn->m_len = siz; + nn->m_next = NULL; + n->m_next = nn; + n = nn; + m->m_pkthdr.len += siz; + p = mtod(nn, u_char *); + } + bcopy(authbuf, p, siz); + + /* modify IP header (for ESP header part only) */ + switch (af) { +#if INET + case AF_INET: + ip = mtod(m, struct ip *); + if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) + ip->ip_len = htons(ntohs(ip->ip_len) + siz); + else { + ipseclog((LOG_ERR, + "IPv4 ESP output: size exceeds limit\n")); + ipsecstat.out_inval++; + m_freem(m); + error = EMSGSIZE; + goto fail; + } + break; +#endif +#if INET6 + case AF_INET6: + /* total packet length will be computed in ip6_output() */ + break; +#endif + } + } + +noantireplay: + if (!m) { + ipseclog((LOG_ERR, + "NULL mbuf after encryption in esp%d_output", afnumber)); + } else { + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_success++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_success++; + break; +#endif + } + } + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_esphist[sav->alg_enc]++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_esphist[sav->alg_enc]++; + break; +#endif + } + key_sa_recordxfer(sav, m); + return 0; + +fail: +#if 1 + return error; +#else + panic("something bad in esp_output"); +#endif +} + +#if INET +int +esp4_output(m, isr) + struct mbuf *m; + struct ipsecrequest *isr; +{ + struct ip *ip; + if (m->m_len < sizeof(struct ip)) { + ipseclog((LOG_DEBUG, "esp4_output: first mbuf too short\n")); + m_freem(m); + return NULL; + } + ip = mtod(m, struct ip *); + /* XXX assumes that m->m_next points to payload */ + return esp_output(m, &ip->ip_p, m->m_next, isr, AF_INET); +} +#endif /*INET*/ + +#if INET6 +int +esp6_output(m, nexthdrp, md, isr) + struct mbuf *m; + u_char *nexthdrp; + struct mbuf *md; + struct ipsecrequest *isr; +{ + if (m->m_len < sizeof(struct ip6_hdr)) { + ipseclog((LOG_DEBUG, "esp6_output: first mbuf too short\n")); + m_freem(m); + return NULL; + } + return esp_output(m, nexthdrp, md, isr, AF_INET6); +} +#endif /*INET6*/ diff --git a/bsd/netinet6/frag6.c b/bsd/netinet6/frag6.c new file mode 100644 index 000000000..f9434d42d --- /dev/null +++ b/bsd/netinet6/frag6.c @@ -0,0 +1,710 @@ +/* $KAME: frag6.c,v 1.23 2000/02/28 16:18:11 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__OpenBSD__) && !(defined(__bsdi__) && _BSDI_VERSION >= 199802) && !defined(__APPLE__) +#include +#endif +#include + +#include + +/* + * Define it to get a correct behavior on per-interface statistics. + * You will need to perform an extra routing table lookup, per fragment, + * to do it. This may, or may not be, a performance hit. + */ +#define IN6_IFSTAT_STRICT + +static void frag6_enq __P((struct ip6asfrag *, struct ip6asfrag *)); +static void frag6_deq __P((struct ip6asfrag *)); +static void frag6_insque __P((struct ip6q *, struct ip6q *)); +static void frag6_remque __P((struct ip6q *)); +static void frag6_freef __P((struct ip6q *)); + +int frag6_doing_reass; +u_int frag6_nfragpackets; +struct ip6q ip6q; /* ip6 reassemble queue */ + +/* FreeBSD tweak */ +#if !defined(M_FTABLE) && (defined(__FreeBSD__) && __FreeBSD__ >= 3) +MALLOC_DEFINE(M_FTABLE, "fragment", "fragment reassembly header"); +#endif + +#ifndef offsetof /* XXX */ +#define offsetof(type, member) ((size_t)(&((type *)0)->member)) +#endif + +/* + * Initialise reassembly queue and fragment identifier. + */ +void +frag6_init() +{ + struct timeval tv; + + /* + * in many cases, random() here does NOT return random number + * as initialization during bootstrap time occur in fixed order. + */ + microtime(&tv); + ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; + ip6_id = random() ^ tv.tv_usec; +} + +/* + * In RFC2460, fragment and reassembly rule do not agree with each other, + * in terms of next header field handling in fragment header. + * While the sender will use the same value for all of the fragmented packets, + * receiver is suggested not to check the consistency. + * + * fragment rule (p20): + * (2) A Fragment header containing: + * The Next Header value that identifies the first header of + * the Fragmentable Part of the original packet. + * -> next header field is same for all fragments + * + * reassembly rule (p21): + * The Next Header field of the last header of the Unfragmentable + * Part is obtained from the Next Header field of the first + * fragment's Fragment header. + * -> should grab it from the first fragment only + * + * The following note also contradicts with fragment rule - noone is going to + * send different fragment with different next header field. + * + * additional note (p22): + * The Next Header values in the Fragment headers of different + * fragments of the same original packet may differ. Only the value + * from the Offset zero fragment packet is used for reassembly. + * -> should grab it from the first fragment only + * + * There is no explicit reason given in the RFC. Historical reason maybe? + */ +/* + * Fragment input + */ +int +frag6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + struct mbuf *m = *mp, *t; + struct ip6_hdr *ip6; + struct ip6_frag *ip6f; + struct ip6q *q6; + struct ip6asfrag *af6, *ip6af, *af6dwn; + int offset = *offp, nxt, i, next; + int first_frag = 0; + int fragoff, frgpartlen; /* must be larger than u_int16_t */ + struct ifnet *dstifp; +#ifdef IN6_IFSTAT_STRICT + static struct route_in6 ro; + struct sockaddr_in6 *dst; +#endif + + ip6 = mtod(m, struct ip6_hdr *); +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, offset, sizeof(struct ip6_frag), IPPROTO_DONE); + ip6f = (struct ip6_frag *)((caddr_t)ip6 + offset); +#else + IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); + if (ip6f == NULL) + return IPPROTO_DONE; +#endif + + dstifp = NULL; +#ifdef IN6_IFSTAT_STRICT + /* find the destination interface of the packet. */ + dst = (struct sockaddr_in6 *)&ro.ro_dst; + if (ro.ro_rt + && ((ro.ro_rt->rt_flags & RTF_UP) == 0 + || !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst))) { + RTFREE(ro.ro_rt); + ro.ro_rt = (struct rtentry *)0; + } + if (ro.ro_rt == NULL) { + bzero(dst, sizeof(*dst)); + dst->sin6_family = AF_INET6; + dst->sin6_len = sizeof(struct sockaddr_in6); + dst->sin6_addr = ip6->ip6_dst; + } +#ifndef __bsdi__ + rtalloc((struct route *)&ro); +#else + rtcalloc((struct route *)&ro); +#endif + if (ro.ro_rt != NULL && ro.ro_rt->rt_ifa != NULL) + dstifp = ((struct in6_ifaddr *)ro.ro_rt->rt_ifa)->ia_ifp; +#else + /* we are violating the spec, this is not the destination interface */ + if ((m->m_flags & M_PKTHDR) != 0) + dstifp = m->m_pkthdr.rcvif; +#endif + + /* jumbo payload can't contain a fragment header */ + if (ip6->ip6_plen == 0) { + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); + in6_ifstat_inc(dstifp, ifs6_reass_fail); + return IPPROTO_DONE; + } + + /* + * check whether fragment packet's fragment length is + * multiple of 8 octets. + * sizeof(struct ip6_frag) == 8 + * sizeof(struct ip6_hdr) = 40 + */ + if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && + (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { + icmp6_error(m, ICMP6_PARAM_PROB, + ICMP6_PARAMPROB_HEADER, + offsetof(struct ip6_hdr, ip6_plen)); + in6_ifstat_inc(dstifp, ifs6_reass_fail); + return IPPROTO_DONE; + } + + ip6stat.ip6s_fragments++; + in6_ifstat_inc(dstifp, ifs6_reass_reqd); + + /* offset now points to data portion */ + offset += sizeof(struct ip6_frag); + + for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) + if (ip6f->ip6f_ident == q6->ip6q_ident && + IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && + IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) + break; + + if (q6 == &ip6q) { + /* + * the first fragment to arrive, create a reassembly queue. + */ + first_frag = 1; + frag6_nfragpackets++; + + /* + * Enforce upper bound on number of fragmented packets + * for which we attempt reassembly; + * If maxfrag is 0, never accept fragments. + * If maxfrag is -1, accept all fragments without limitation. + */ + if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets) { + ip6stat.ip6s_fragoverflow++; + in6_ifstat_inc(dstifp, ifs6_reass_fail); + frag6_freef(ip6q.ip6q_prev); + } + q6 = (struct ip6q *)_MALLOC(sizeof(struct ip6q), M_FTABLE, + M_DONTWAIT); + if (q6 == NULL) + goto dropfrag; + bzero(q6, sizeof(*q6)); + + frag6_insque(q6, &ip6q); + + /* ip6q_nxt will be filled afterwards, from 1st fragment */ + q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; +#if notyet + q6->ip6q_nxtp = (u_char *)nxtp; +#endif + q6->ip6q_ident = ip6f->ip6f_ident; + q6->ip6q_arrive = 0; /* Is it used anywhere? */ + q6->ip6q_ttl = IPV6_FRAGTTL; + q6->ip6q_src = ip6->ip6_src; + q6->ip6q_dst = ip6->ip6_dst; + q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ + } + + /* + * If it's the 1st fragment, record the length of the + * unfragmentable part and the next header of the fragment header. + */ + fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); + if (fragoff == 0) { + q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) + - sizeof(struct ip6_frag); + q6->ip6q_nxt = ip6f->ip6f_nxt; + } + + /* + * Check that the reassembled packet would not exceed 65535 bytes + * in size. + * If it would exceed, discard the fragment and return an ICMP error. + */ + frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; + if (q6->ip6q_unfrglen >= 0) { + /* The 1st fragment has already arrived. */ + if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, + offset - sizeof(struct ip6_frag) + + offsetof(struct ip6_frag, ip6f_offlg)); + return(IPPROTO_DONE); + } + } + else if (fragoff + frgpartlen > IPV6_MAXPACKET) { + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, + offset - sizeof(struct ip6_frag) + + offsetof(struct ip6_frag, ip6f_offlg)); + return(IPPROTO_DONE); + } + /* + * If it's the first fragment, do the above check for each + * fragment already stored in the reassembly queue. + */ + if (fragoff == 0) { + for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; + af6 = af6dwn) { + af6dwn = af6->ip6af_down; + + if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > + IPV6_MAXPACKET) { + struct mbuf *merr = IP6_REASS_MBUF(af6); + struct ip6_hdr *ip6err; + int erroff = af6->ip6af_offset; + + /* dequeue the fragment. */ + frag6_deq(af6); + _FREE(af6, M_FTABLE); + + /* adjust pointer. */ + ip6err = mtod(merr, struct ip6_hdr *); + + /* + * Restore source and destination addresses + * in the erroneous IPv6 header. + */ + ip6err->ip6_src = q6->ip6q_src; + ip6err->ip6_dst = q6->ip6q_dst; + + icmp6_error(merr, ICMP6_PARAM_PROB, + ICMP6_PARAMPROB_HEADER, + erroff - sizeof(struct ip6_frag) + + offsetof(struct ip6_frag, ip6f_offlg)); + } + } + } + + ip6af = (struct ip6asfrag *)_MALLOC(sizeof(struct ip6asfrag), M_FTABLE, + M_DONTWAIT); + if (ip6af == NULL) + goto dropfrag; + bzero(ip6af, sizeof(*ip6af)); + ip6af->ip6af_head = ip6->ip6_flow; + ip6af->ip6af_len = ip6->ip6_plen; + ip6af->ip6af_nxt = ip6->ip6_nxt; + ip6af->ip6af_hlim = ip6->ip6_hlim; + ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; + ip6af->ip6af_off = fragoff; + ip6af->ip6af_frglen = frgpartlen; + ip6af->ip6af_offset = offset; + IP6_REASS_MBUF(ip6af) = m; + + if (first_frag) { + af6 = (struct ip6asfrag *)q6; + goto insert; + } + + /* + * Find a segment which begins after this one does. + */ + for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; + af6 = af6->ip6af_down) + if (af6->ip6af_off > ip6af->ip6af_off) + break; + +#if 0 + /* + * If there is a preceding segment, it may provide some of + * our data already. If so, drop the data from the incoming + * segment. If it provides all of our data, drop us. + */ + if (af6->ip6af_up != (struct ip6asfrag *)q6) { + i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen + - ip6af->ip6af_off; + if (i > 0) { + if (i >= ip6af->ip6af_frglen) + goto dropfrag; + m_adj(IP6_REASS_MBUF(ip6af), i); + ip6af->ip6af_off += i; + ip6af->ip6af_frglen -= i; + } + } + + /* + * While we overlap succeeding segments trim them or, + * if they are completely covered, dequeue them. + */ + while (af6 != (struct ip6asfrag *)q6 && + ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { + i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; + if (i < af6->ip6af_frglen) { + af6->ip6af_frglen -= i; + af6->ip6af_off += i; + m_adj(IP6_REASS_MBUF(af6), i); + break; + } + af6 = af6->ip6af_down; + m_freem(IP6_REASS_MBUF(af6->ip6af_up)); + frag6_deq(af6->ip6af_up); + } +#else + /* + * If the incoming framgent overlaps some existing fragments in + * the reassembly queue, drop it, since it is dangerous to override + * existing fragments from a security point of view. + */ + if (af6->ip6af_up != (struct ip6asfrag *)q6) { + i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen + - ip6af->ip6af_off; + if (i > 0) { + log(LOG_ERR, "%d bytes of a fragment from %s " + "overlaps the previous fragment\n", + i, ip6_sprintf(&q6->ip6q_src)); + goto dropfrag; + } + } + if (af6 != (struct ip6asfrag *)q6) { + i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; + if (i > 0) { + log(LOG_ERR, "%d bytes of a fragment from %s " + "overlaps the succeeding fragment", + i, ip6_sprintf(&q6->ip6q_src)); + goto dropfrag; + } + } +#endif + +insert: + + /* + * Stick new segment in its place; + * check for complete reassembly. + * Move to front of packet queue, as we are + * the most recently active fragmented packet. + */ + frag6_enq(ip6af, af6->ip6af_up); +#if 0 /* xxx */ + if (q6 != ip6q.ip6q_next) { + frag6_remque(q6); + frag6_insque(q6, &ip6q); + } +#endif + next = 0; + for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; + af6 = af6->ip6af_down) { + if (af6->ip6af_off != next) { + frag6_doing_reass = 0; + return IPPROTO_DONE; + } + next += af6->ip6af_frglen; + } + if (af6->ip6af_up->ip6af_mff) { + frag6_doing_reass = 0; + return IPPROTO_DONE; + } + + /* + * Reassembly is complete; concatenate fragments. + */ + ip6af = q6->ip6q_down; + t = m = IP6_REASS_MBUF(ip6af); + af6 = ip6af->ip6af_down; + frag6_deq(ip6af); + while (af6 != (struct ip6asfrag *)q6) { + af6dwn = af6->ip6af_down; + frag6_deq(af6); + while (t->m_next) + t = t->m_next; + t->m_next = IP6_REASS_MBUF(af6); + m_adj(t->m_next, af6->ip6af_offset); + _FREE(af6, M_FTABLE); + af6 = af6dwn; + } + + /* adjust offset to point where the original next header starts */ + offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); + _FREE(ip6af, M_FTABLE); + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr)); + ip6->ip6_src = q6->ip6q_src; + ip6->ip6_dst = q6->ip6q_dst; + nxt = q6->ip6q_nxt; +#if notyet + *q6->ip6q_nxtp = (u_char)(nxt & 0xff); +#endif + + /* + * Delete frag6 header with as a few cost as possible. + */ + if (offset < m->m_len) { + ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag), + offset); + m->m_data += sizeof(struct ip6_frag); + m->m_len -= sizeof(struct ip6_frag); + } else { + /* this comes with no copy if the boundary is on cluster */ + if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) { + frag6_remque(q6); + _FREE(q6, M_FTABLE); + frag6_nfragpackets--; + goto dropfrag; + } + m_adj(t, sizeof(struct ip6_frag)); + m_cat(m, t); + } + + /* + * Store NXT to the original. + */ + { + char *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */ + *prvnxtp = nxt; + } + + frag6_remque(q6); + _FREE(q6, M_FTABLE); + frag6_nfragpackets--; + + if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ + int plen = 0; + for (t = m; t; t = t->m_next) + plen += t->m_len; + m->m_pkthdr.len = plen; + } + + ip6stat.ip6s_reassembled++; + in6_ifstat_inc(dstifp, ifs6_reass_ok); + + /* + * Tell launch routine the next header + */ + + *mp = m; + *offp = offset; + + frag6_doing_reass = 0; + return nxt; + + dropfrag: + in6_ifstat_inc(dstifp, ifs6_reass_fail); + ip6stat.ip6s_fragdropped++; + m_freem(m); + return IPPROTO_DONE; +} + +/* + * Free a fragment reassembly header and all + * associated datagrams. + */ +void +frag6_freef(q6) + struct ip6q *q6; +{ + struct ip6asfrag *af6, *down6; + + for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; + af6 = down6) { + struct mbuf *m = IP6_REASS_MBUF(af6); + + down6 = af6->ip6af_down; + frag6_deq(af6); + + /* + * Return ICMP time exceeded error for the 1st fragment. + * Just free other fragments. + */ + if (af6->ip6af_off == 0) { + struct ip6_hdr *ip6; + + /* adjust pointer */ + ip6 = mtod(m, struct ip6_hdr *); + + /* restoure source and destination addresses */ + ip6->ip6_src = q6->ip6q_src; + ip6->ip6_dst = q6->ip6q_dst; + + icmp6_error(m, ICMP6_TIME_EXCEEDED, + ICMP6_TIME_EXCEED_REASSEMBLY, 0); + } else + m_freem(m); + _FREE(af6, M_FTABLE); + + } + frag6_remque(q6); + _FREE(q6, M_FTABLE); + frag6_nfragpackets--; +} + +/* + * Put an ip fragment on a reassembly chain. + * Like insque, but pointers in middle of structure. + */ +void +frag6_enq(af6, up6) + struct ip6asfrag *af6, *up6; +{ + af6->ip6af_up = up6; + af6->ip6af_down = up6->ip6af_down; + up6->ip6af_down->ip6af_up = af6; + up6->ip6af_down = af6; +} + +/* + * To frag6_enq as remque is to insque. + */ +void +frag6_deq(af6) + struct ip6asfrag *af6; +{ + af6->ip6af_up->ip6af_down = af6->ip6af_down; + af6->ip6af_down->ip6af_up = af6->ip6af_up; +} + +void +frag6_insque(new, old) + struct ip6q *new, *old; +{ + new->ip6q_prev = old; + new->ip6q_next = old->ip6q_next; + old->ip6q_next->ip6q_prev= new; + old->ip6q_next = new; +} + +void +frag6_remque(p6) + struct ip6q *p6; +{ + p6->ip6q_prev->ip6q_next = p6->ip6q_next; + p6->ip6q_next->ip6q_prev = p6->ip6q_prev; +} + +/* + * IP timer processing; + * if a timer expires on a reassembly + * queue, discard it. + */ +void +frag6_slowtimo() +{ + struct ip6q *q6; + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif +#if 0 + extern struct route_in6 ip6_forward_rt; +#endif + + frag6_doing_reass = 1; + q6 = ip6q.ip6q_next; + if (q6) + while (q6 != &ip6q) { + --q6->ip6q_ttl; + q6 = q6->ip6q_next; + if (q6->ip6q_prev->ip6q_ttl == 0) { + ip6stat.ip6s_fragtimeout++; + /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ + frag6_freef(q6->ip6q_prev); + } + } + /* + * If we are over the maximum number of fragments + * (due to the limit being lowered), drain off + * enough to get down to the new limit. + */ + while (frag6_nfragpackets > (u_int)ip6_maxfragpackets) { + ip6stat.ip6s_fragoverflow++; + /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ + frag6_freef(ip6q.ip6q_prev); + } + frag6_doing_reass = 0; + +#if 0 + /* + * Routing changes might produce a better route than we last used; + * make sure we notice eventually, even if forwarding only for one + * destination and the cache is never replaced. + */ + if (ip6_forward_rt.ro_rt) { + RTFREE(ip6_forward_rt.ro_rt); + ip6_forward_rt.ro_rt = 0; + } + if (ipsrcchk_rt.ro_rt) { + RTFREE(ipsrcchk_rt.ro_rt); + ipsrcchk_rt.ro_rt = 0; + } +#endif + + splx(s); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + +/* + * Drain off all datagram fragments. + */ +void +frag6_drain() +{ + if (frag6_doing_reass) + return; + while (ip6q.ip6q_next != &ip6q) { + ip6stat.ip6s_fragdropped++; + /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ + frag6_freef(ip6q.ip6q_next); + } +} diff --git a/bsd/netinet6/icmp6.c b/bsd/netinet6/icmp6.c new file mode 100644 index 000000000..1291a6ea6 --- /dev/null +++ b/bsd/netinet6/icmp6.c @@ -0,0 +1,2524 @@ +/* $KAME: icmp6.c,v 1.77 2000/04/13 11:31:39 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_icmp.c 8.2 (Berkeley) 1/4/94 + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#ifdef __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#if defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) +#include +#include +#endif +#include +#include +#include +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__OpenBSD__) && !(defined(__bsdi__) && _BSDI_VERSION >= 199802) && !defined(__APPLE__) +#include +#else +#include +#endif +#include +#include +#include + +#ifdef __OpenBSD__ /*KAME IPSEC*/ +#undef IPSEC +#endif + +#if IPSEC +#include +#include +#include +#endif + +#include "faith.h" + +#include + +extern struct domain inet6domain; +extern struct ip6protosw inet6sw[]; +extern struct ip6protosw *ip6_protox[]; + +struct icmp6stat icmp6stat; + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) +extern struct in6pcb rawin6pcb; +#else +extern struct inpcbhead ripcb; +#endif +extern u_int icmp6errratelim; +extern int icmp6_nodeinfo; +#if defined(__NetBSD__) || defined(__OpenBSD__) +static struct rttimer_queue *icmp6_mtudisc_timeout_q = NULL; +extern int pmtu_expire; +#endif + +#ifndef HAVE_NRL_INPCB +static int icmp6_rip6_input __P((struct mbuf **, int)); +#endif +static void icmp6_mtudisc_update __P((struct in6_addr *, struct icmp6_hdr *, + struct mbuf *)); +static int icmp6_ratelimit __P((const struct in6_addr *, const int, const int)); +static const char *icmp6_redirect_diag __P((struct in6_addr *, + struct in6_addr *, struct in6_addr *)); +static struct mbuf * ni6_input __P((struct mbuf *, int)); +static int ni6_addrs __P((struct icmp6_nodeinfo *, struct mbuf *, + struct ifnet **)); +static int ni6_store_addrs __P((struct icmp6_nodeinfo *, struct icmp6_nodeinfo *, + struct ifnet *, int)); +#if defined(__NetBSD__) || defined(__OpenBSD__) +static struct rtentry *icmp6_mtudisc_clone __P((struct sockaddr *)); +static void icmp6_mtudisc_timeout __P((struct rtentry *, struct rttimer *)); +#endif + +#ifdef COMPAT_RFC1885 +static struct route_in6 icmp6_reflect_rt; +#endif +static struct timeval icmp6_nextsend = {0, 0}; + +#if MIP6 +int (*mip6_icmp6_input_hook)(struct mbuf *m, int off) = NULL; +#endif /* MIP6 */ + +void +icmp6_init() +{ + mld6_init(); +#if defined(__NetBSD__) || defined(__OpenBSD__) + icmp6_mtudisc_timeout_q = rt_timer_queue_create(pmtu_expire); +#endif +} + +/* + * Generate an error packet of type error in response to bad IP6 packet. + */ +void +icmp6_error(m, type, code, param) + struct mbuf *m; + int type, code, param; +{ + struct ip6_hdr *oip6, *nip6; + struct icmp6_hdr *icmp6; + u_int preplen; + int off; + int nxt; + + icmp6stat.icp6s_error++; + +#ifdef M_DECRYPTED /*not openbsd*/ + if (m->m_flags & M_DECRYPTED) { + icmp6stat.icp6s_canterror++; + goto freeit; + } +#endif + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), ); +#else + if (m->m_len < sizeof(struct ip6_hdr)) { + m = m_pullup(m, sizeof(struct ip6_hdr)); + if (m == NULL) + return; + } +#endif + oip6 = mtod(m, struct ip6_hdr *); + + /* + * Multicast destination check. For unrecognized option errors, + * this check has already done in ip6_unknown_opt(), so we can + * check only for other errors. + */ + if ((m->m_flags & (M_BCAST|M_MCAST) || + IN6_IS_ADDR_MULTICAST(&oip6->ip6_dst)) && + (type != ICMP6_PACKET_TOO_BIG && + (type != ICMP6_PARAM_PROB || + code != ICMP6_PARAMPROB_OPTION))) + goto freeit; + + /* Source address check. XXX: the case of anycast source? */ + if (IN6_IS_ADDR_UNSPECIFIED(&oip6->ip6_src) || + IN6_IS_ADDR_MULTICAST(&oip6->ip6_src)) + goto freeit; + + /* + * If we are about to send ICMPv6 against ICMPv6 error/redirect, + * don't do it. + */ + nxt = -1; + off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt); + if (off >= 0 && nxt == IPPROTO_ICMPV6) { + struct icmp6_hdr *icp; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, 0, off + sizeof(struct icmp6_hdr), ); + icp = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(icp, struct icmp6_hdr *, m, off, + sizeof(*icp)); + if (icp == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } +#endif + if (icp->icmp6_type < ICMP6_ECHO_REQUEST || + icp->icmp6_type == ND_REDIRECT) { + /* + * ICMPv6 error + * Special case: for redirect (which is + * informational) we must not send icmp6 error. + */ + icmp6stat.icp6s_canterror++; + goto freeit; + } else { + /* ICMPv6 informational - send the error */ + } + } else { + /* non-ICMPv6 - send the error */ + } + + oip6 = mtod(m, struct ip6_hdr *); /* adjust pointer */ + + /* Finally, do rate limitation check. */ + if (icmp6_ratelimit(&oip6->ip6_src, type, code)) { + icmp6stat.icp6s_toofreq++; + goto freeit; + } + + /* + * OK, ICMP6 can be generated. + */ + + if (m->m_pkthdr.len >= ICMPV6_PLD_MAXLEN) + m_adj(m, ICMPV6_PLD_MAXLEN - m->m_pkthdr.len); + + preplen = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); + M_PREPEND(m, preplen, M_DONTWAIT); + if (m && m->m_len < preplen) + m = m_pullup(m, preplen); + if (m == NULL) { + printf("ENOBUFS in icmp6_error %d\n", __LINE__); + return; + } + + nip6 = mtod(m, struct ip6_hdr *); + nip6->ip6_src = oip6->ip6_src; + nip6->ip6_dst = oip6->ip6_dst; + + if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) + oip6->ip6_src.s6_addr16[1] = 0; + if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) + oip6->ip6_dst.s6_addr16[1] = 0; + + icmp6 = (struct icmp6_hdr *)(nip6 + 1); + icmp6->icmp6_type = type; + icmp6->icmp6_code = code; + icmp6->icmp6_pptr = htonl((u_int32_t)param); + + icmp6stat.icp6s_outhist[type]++; + icmp6_reflect(m, sizeof(struct ip6_hdr)); /*header order: IPv6 - ICMPv6*/ + + return; + + freeit: + /* + * If we can't tell wheter or not we can generate ICMP6, free it. + */ + m_freem(m); +} + +/* + * Process a received ICMP6 message. + */ +int +icmp6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + struct mbuf *m = *mp, *n; + struct ip6_hdr *ip6, *nip6; + struct icmp6_hdr *icmp6, *nicmp6; + int off = *offp; + int icmp6len = m->m_pkthdr.len - *offp; + int code, sum, noff; + struct sockaddr_in6 icmp6src; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(struct icmp6_hdr), IPPROTO_DONE); + /* m might change if M_LOOP. So, call mtod after this */ +#endif + + /* + * Locate icmp6 structure in mbuf, and check + * that not corrupted and of at least minimum length + */ + + ip6 = mtod(m, struct ip6_hdr *); + if (icmp6len < sizeof(struct icmp6_hdr)) { + icmp6stat.icp6s_tooshort++; + goto freeit; + } + + /* + * calculate the checksum + */ +#ifndef PULLDOWN_TEST + icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off, sizeof(*icmp6)); + if (icmp6 == NULL) { + icmp6stat.icp6s_tooshort++; + return IPPROTO_DONE; + } +#endif + code = icmp6->icmp6_code; + + if ((sum = in6_cksum(m, IPPROTO_ICMPV6, off, icmp6len)) != 0) { + log(LOG_ERR, + "ICMP6 checksum error(%d|%x) %s\n", + icmp6->icmp6_type, + sum, + ip6_sprintf(&ip6->ip6_src)); + icmp6stat.icp6s_checksum++; + goto freeit; + } + +#if defined(NFAITH) && 0 < NFAITH + if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) { + /* + * Deliver very specific ICMP6 type only. + * This is important to deilver TOOBIG. Otherwise PMTUD + * will not work. + */ + switch (icmp6->icmp6_type) { + case ICMP6_DST_UNREACH: + case ICMP6_PACKET_TOO_BIG: + case ICMP6_TIME_EXCEEDED: + break; + default: + goto freeit; + } + } +#endif + +#if IPSEC + /* drop it if it does not match the default policy */ + if (ipsec6_in_reject(m, NULL)) { + ipsecstat.in_polvio++; + goto freeit; + } +#endif + + icmp6stat.icp6s_inhist[icmp6->icmp6_type]++; + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_msg); + if (icmp6->icmp6_type < ICMP6_INFOMSG_MASK) + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_error); + +#if MIP6 + /* + * Mobile IPv6 + * + * Check for ICMP errors and modifications and extensions to Router + * Advertisement. + */ + if (mip6_icmp6_input_hook) { + if ((*mip6_icmp6_input_hook)(m, off) != 0) + goto freeit; + } +#endif /* MIP6 */ + + switch (icmp6->icmp6_type) { + + case ICMP6_DST_UNREACH: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_dstunreach); + switch (code) { + case ICMP6_DST_UNREACH_NOROUTE: + code = PRC_UNREACH_NET; + break; + case ICMP6_DST_UNREACH_ADMIN: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_adminprohib); + code = PRC_UNREACH_PROTOCOL; /* is this a good code? */ + break; + case ICMP6_DST_UNREACH_ADDR: + code = PRC_HOSTDEAD; + break; +#ifdef COMPAT_RFC1885 + case ICMP6_DST_UNREACH_NOTNEIGHBOR: + code = PRC_UNREACH_SRCFAIL; + break; +#else + case ICMP6_DST_UNREACH_BEYONDSCOPE: + /* I mean "source address was incorrect." */ + code = PRC_PARAMPROB; + break; +#endif + case ICMP6_DST_UNREACH_NOPORT: + code = PRC_UNREACH_PORT; + break; + default: + goto badcode; + } + goto deliver; + break; + + case ICMP6_PACKET_TOO_BIG: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_pkttoobig); + if (code != 0) + goto badcode; + + code = PRC_MSGSIZE; + + /* + * Updating the path MTU will be done after examining + * intermediate extension headers. + */ + goto deliver; + break; + + case ICMP6_TIME_EXCEEDED: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_timeexceed); + switch (code) { + case ICMP6_TIME_EXCEED_TRANSIT: + case ICMP6_TIME_EXCEED_REASSEMBLY: + code += PRC_TIMXCEED_INTRANS; + break; + default: + goto badcode; + } + goto deliver; + break; + + case ICMP6_PARAM_PROB: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_paramprob); + switch (code) { + case ICMP6_PARAMPROB_NEXTHEADER: + code = PRC_UNREACH_PROTOCOL; + break; + case ICMP6_PARAMPROB_HEADER: + case ICMP6_PARAMPROB_OPTION: + code = PRC_PARAMPROB; + break; + default: + goto badcode; + } + goto deliver; + break; + + case ICMP6_ECHO_REQUEST: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_echo); + if (code != 0) + goto badcode; + if ((n = m_copy(m, 0, M_COPYALL)) == NULL) { + /* Give up remote */ + break; + } + if ((n->m_flags & M_EXT) != 0 + || n->m_len < off + sizeof(struct icmp6_hdr)) { + struct mbuf *n0 = n; + const int maxlen = sizeof(*nip6) + sizeof(*nicmp6); + + /* + * Prepare an internal mbuf. m_pullup() doesn't + * always copy the length we specified. + */ + if (maxlen >= MCLBYTES) { +#if DIAGNOSTIC + printf("MCLBYTES too small\n"); +#endif + /* Give up remote */ + m_freem(n0); + break; + } + MGETHDR(n, M_DONTWAIT, n0->m_type); + if (n && maxlen >= MHLEN) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + n = NULL; + } + } + if (n == NULL) { + /* Give up remote */ + m_freem(n0); + break; + } + M_COPY_PKTHDR(n, n0); + /* + * Copy IPv6 and ICMPv6 only. + */ + nip6 = mtod(n, struct ip6_hdr *); + bcopy(ip6, nip6, sizeof(struct ip6_hdr)); + nicmp6 = (struct icmp6_hdr *)(nip6 + 1); + bcopy(icmp6, nicmp6, sizeof(struct icmp6_hdr)); + noff = sizeof(struct ip6_hdr); + n->m_pkthdr.len = n->m_len = + noff + sizeof(struct icmp6_hdr); + /* + * Adjust mbuf. ip6_plen will be adjusted in + * ip6_output(). + */ + m_adj(n0, off + sizeof(struct icmp6_hdr)); + n->m_pkthdr.len += n0->m_pkthdr.len; + n->m_next = n0; + n0->m_flags &= ~M_PKTHDR; + } else { + nip6 = mtod(n, struct ip6_hdr *); + nicmp6 = (struct icmp6_hdr *)((caddr_t)nip6 + off); + noff = off; + } + nicmp6->icmp6_type = ICMP6_ECHO_REPLY; + nicmp6->icmp6_code = 0; + if (n) { + icmp6stat.icp6s_reflect++; + icmp6stat.icp6s_outhist[ICMP6_ECHO_REPLY]++; + icmp6_reflect(n, noff); + } + break; + + case ICMP6_ECHO_REPLY: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_echoreply); + if (code != 0) + goto badcode; + break; + + case MLD6_LISTENER_QUERY: + case MLD6_LISTENER_REPORT: + if (icmp6len < sizeof(struct mld6_hdr)) + goto badlen; + if (icmp6->icmp6_type == MLD6_LISTENER_QUERY) /* XXX: ugly... */ + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mldquery); + else + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mldreport); + if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { + /* give up local */ + mld6_input(m, off); + m = NULL; + goto freeit; + } + mld6_input(n, off); + /* m stays. */ + break; + + case MLD6_LISTENER_DONE: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mlddone); + if (icmp6len < sizeof(struct mld6_hdr)) /* necessary? */ + goto badlen; + break; /* nothing to be done in kernel */ + + case MLD6_MTRACE_RESP: + case MLD6_MTRACE: + /* XXX: these two are experimental. not officially defind. */ + /* XXX: per-interface statistics? */ + break; /* just pass it to applications */ + + case ICMP6_WRUREQUEST: /* ICMP6_FQDN_QUERY */ + { + enum { WRU, FQDN } mode; + + if (code != 0) + goto badcode; + if (!icmp6_nodeinfo) + break; + + if (icmp6len == sizeof(struct icmp6_hdr) + 4) + mode = WRU; + else if (icmp6len >= sizeof(struct icmp6_hdr) + 8) /* XXX */ + mode = FQDN; + else + goto badlen; + +#if defined( __FreeBSD__) || defined (__APPLE__) +#define hostnamelen strlen(hostname) +#endif + if (mode == FQDN) { +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(struct icmp6_nodeinfo), + IPPROTO_DONE); +#endif + n = m_copy(m, 0, M_COPYALL); + if (n) + n = ni6_input(n, off); + if (n) + noff = sizeof(struct ip6_hdr); + } else { + u_char *p; + int maxlen, maxhlen; + + maxlen = sizeof(*nip6) + sizeof(*nicmp6) + 4; + if (maxlen >= MCLBYTES) { +#if DIAGNOSTIC + printf("MCLBYTES too small\n"); +#endif + /* Give up remote */ + break; + } + MGETHDR(n, M_DONTWAIT, m->m_type); + if (n && maxlen > MHLEN) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + n = NULL; + } + } + if (n == NULL) { + /* Give up remote */ + break; + } + n->m_len = 0; + maxhlen = M_TRAILINGSPACE(n) - maxlen; + if (maxhlen > hostnamelen) + maxhlen = hostnamelen; + /* + * Copy IPv6 and ICMPv6 only. + */ + nip6 = mtod(n, struct ip6_hdr *); + bcopy(ip6, nip6, sizeof(struct ip6_hdr)); + nicmp6 = (struct icmp6_hdr *)(nip6 + 1); + bcopy(icmp6, nicmp6, sizeof(struct icmp6_hdr)); + p = (u_char *)(nicmp6 + 1); + bzero(p, 4); + bcopy(hostname, p + 4, maxhlen); + noff = sizeof(struct ip6_hdr); + M_COPY_PKTHDR(n, m); /* just for recvif */ + n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) + + sizeof(struct icmp6_hdr) + 4 + maxhlen; + nicmp6->icmp6_type = ICMP6_WRUREPLY; + nicmp6->icmp6_code = 0; + } +#undef hostnamelen + if (n) { + icmp6stat.icp6s_reflect++; + icmp6stat.icp6s_outhist[ICMP6_WRUREPLY]++; + icmp6_reflect(n, noff); + } + break; + } + + case ICMP6_WRUREPLY: + if (code != 0) + goto badcode; + break; + + case ND_ROUTER_SOLICIT: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_routersolicit); + if (code != 0) + goto badcode; + if (icmp6len < sizeof(struct nd_router_solicit)) + goto badlen; + if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { + /* give up local */ + nd6_rs_input(m, off, icmp6len); + m = NULL; + goto freeit; + } + nd6_rs_input(n, off, icmp6len); + /* m stays. */ + break; + + case ND_ROUTER_ADVERT: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_routeradvert); + if (code != 0) + goto badcode; + if (icmp6len < sizeof(struct nd_router_advert)) + goto badlen; + if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { + /* give up local */ + nd6_ra_input(m, off, icmp6len); + m = NULL; + goto freeit; + } + nd6_ra_input(n, off, icmp6len); + /* m stays. */ + break; + + case ND_NEIGHBOR_SOLICIT: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_neighborsolicit); + if (code != 0) + goto badcode; + if (icmp6len < sizeof(struct nd_neighbor_solicit)) + goto badlen; + if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { + /* give up local */ + nd6_ns_input(m, off, icmp6len); + m = NULL; + goto freeit; + } + nd6_ns_input(n, off, icmp6len); + /* m stays. */ + break; + + case ND_NEIGHBOR_ADVERT: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_neighboradvert); + if (code != 0) + goto badcode; + if (icmp6len < sizeof(struct nd_neighbor_advert)) + goto badlen; + if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { + /* give up local */ + nd6_na_input(m, off, icmp6len); + m = NULL; + goto freeit; + } + nd6_na_input(n, off, icmp6len); + /* m stays. */ + break; + + case ND_REDIRECT: + icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_redirect); + if (code != 0) + goto badcode; + if (icmp6len < sizeof(struct nd_redirect)) + goto badlen; + if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { + /* give up local */ + icmp6_redirect_input(m, off); + m = NULL; + goto freeit; + } + icmp6_redirect_input(n, off); + /* m stays. */ + break; + + case ICMP6_ROUTER_RENUMBERING: + if (code != ICMP6_ROUTER_RENUMBERING_COMMAND && + code != ICMP6_ROUTER_RENUMBERING_RESULT) + goto badcode; + if (icmp6len < sizeof(struct icmp6_router_renum)) + goto badlen; + break; + + default: + printf("icmp6_input: unknown type %d(src=%s, dst=%s, ifid=%d)\n", + icmp6->icmp6_type, ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst), + m->m_pkthdr.rcvif ? m->m_pkthdr.rcvif->if_index : 0); + if (icmp6->icmp6_type < ICMP6_ECHO_REQUEST) { + /* ICMPv6 error: MUST deliver it by spec... */ + code = PRC_NCMDS; + /* deliver */ + } else { + /* ICMPv6 informational: MUST not deliver */ + break; + } + deliver: + if (icmp6len < sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr)) { + icmp6stat.icp6s_tooshort++; + goto freeit; + } +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, + sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr), + IPPROTO_DONE); + icmp6 = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off, + sizeof(*icmp6) + sizeof(struct ip6_hdr)); + if (icmp6 == NULL) { + icmp6stat.icp6s_tooshort++; + return IPPROTO_DONE; + } +#endif + bzero(&icmp6src, sizeof(icmp6src)); + icmp6src.sin6_len = sizeof(struct sockaddr_in6); + icmp6src.sin6_family = AF_INET6; + icmp6src.sin6_addr = ((struct ip6_hdr *)(icmp6 + 1))->ip6_dst; + + /* Detect the upper level protocol */ + { + void (*ctlfunc) __P((int, struct sockaddr *, void *)); + struct ip6_hdr *eip6 = (struct ip6_hdr *)(icmp6 + 1); + u_int8_t nxt = eip6->ip6_nxt; + int eoff = off + sizeof(struct icmp6_hdr) + + sizeof(struct ip6_hdr); + struct ip6ctlparam ip6cp; + struct in6_addr *finaldst = NULL; + int icmp6type = icmp6->icmp6_type; + struct ip6_frag *fh; + struct ip6_rthdr *rth; + struct ip6_rthdr0 *rth0; + int rthlen; + + while (1) { /* XXX: should avoid inf. loop explicitly? */ + struct ip6_ext *eh; + + switch(nxt) { + case IPPROTO_HOPOPTS: + case IPPROTO_DSTOPTS: + case IPPROTO_AH: +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, 0, eoff + + sizeof(struct ip6_ext), + IPPROTO_DONE); + eh = (struct ip6_ext *)(mtod(m, caddr_t) + + eoff); +#else + IP6_EXTHDR_GET(eh, struct ip6_ext *, m, + eoff, sizeof(*eh)); + if (eh == NULL) { + icmp6stat.icp6s_tooshort++; + return IPPROTO_DONE; + } +#endif + + if (nxt == IPPROTO_AH) + eoff += (eh->ip6e_len + 2) << 2; + else + eoff += (eh->ip6e_len + 1) << 3; + nxt = eh->ip6e_nxt; + break; + case IPPROTO_ROUTING: + /* + * When the erroneous packet contains a + * routing header, we should examine the + * header to determine the final destination. + * Otherwise, we can't properly update + * information that depends on the final + * destination (e.g. path MTU). + */ +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, 0, eoff + sizeof(*rth), + IPPROTO_DONE); + rth = (struct ip6_rthdr *)(mtod(m, caddr_t) + + eoff); +#else + IP6_EXTHDR_GET(rth, struct ip6_rthdr *, m, + eoff, sizeof(*rth)); + if (rth == NULL) { + icmp6stat.icp6s_tooshort++; + return IPPROTO_DONE; + } +#endif + rthlen = (rth->ip6r_len + 1) << 3; + /* + * XXX: currently there is no + * officially defined type other + * than type-0. + * Note that if the segment left field + * is 0, all intermediate hops must + * have been passed. + */ + if (rth->ip6r_segleft && + rth->ip6r_type == IPV6_RTHDR_TYPE_0) { + int hops; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, 0, eoff + rthlen, + IPPROTO_DONE); + rth0 = (struct ip6_rthdr0 *)(mtod(m, caddr_t) + eoff); +#else + IP6_EXTHDR_GET(rth0, + struct ip6_rthdr0 *, m, + eoff, rthlen); + if (rth0 == NULL) { + icmp6stat.icp6s_tooshort++; + return IPPROTO_DONE; + } +#endif + /* just ignore a bogus header */ + if ((rth0->ip6r0_len % 2) == 0 && + (hops = rth0->ip6r0_len/2)) + finaldst = (struct in6_addr *)(rth0 + 1) + (hops - 1); + } + eoff += rthlen; + nxt = rth->ip6r_nxt; + break; + case IPPROTO_FRAGMENT: +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, 0, eoff + + sizeof(struct ip6_frag), + IPPROTO_DONE); + fh = (struct ip6_frag *)(mtod(m, caddr_t) + + eoff); +#else + IP6_EXTHDR_GET(fh, struct ip6_frag *, m, + eoff, sizeof(*fh)); + if (fh == NULL) { + icmp6stat.icp6s_tooshort++; + return IPPROTO_DONE; + } +#endif + /* + * Data after a fragment header is meaningless + * unless it is the first fragment, but + * we'll go to the notify label for path MTU + * discovery. + */ + if (fh->ip6f_offlg & IP6F_OFF_MASK) + goto notify; + + eoff += sizeof(struct ip6_frag); + nxt = fh->ip6f_nxt; + break; + default: + /* + * This case includes ESP and the No Next + * Header. In such cases going to the notify + * label does not have any meaning + * (i.e. ctlfunc will be NULL), but we go + * anyway since we might have to update + * path MTU information. + */ + goto notify; + } + } + notify: +#ifndef PULLDOWN_TEST + icmp6 = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off, + sizeof(*icmp6) + sizeof(struct ip6_hdr)); + if (icmp6 == NULL) { + icmp6stat.icp6s_tooshort++; + return IPPROTO_DONE; + } +#endif + if (icmp6type == ICMP6_PACKET_TOO_BIG) { + if (finaldst == NULL) + finaldst = &((struct ip6_hdr *)(icmp6 + 1))->ip6_dst; + icmp6_mtudisc_update(finaldst, icmp6, m); + } + + ctlfunc = (void (*) __P((int, struct sockaddr *, void *))) + (ip6_protox[nxt]->pr_ctlinput); + if (ctlfunc) { + ip6cp.ip6c_m = m; + ip6cp.ip6c_ip6 = (struct ip6_hdr *)(icmp6 + 1); + ip6cp.ip6c_off = eoff; + (*ctlfunc)(code, (struct sockaddr *)&icmp6src, &ip6cp); + } + } + break; + + badcode: + icmp6stat.icp6s_badcode++; + break; + + badlen: + icmp6stat.icp6s_badlen++; + break; + } + +#if HAVE_NRL_INPCB + rip6_input(&m, offp, IPPROTO_ICMPV6); +#else + icmp6_rip6_input(&m, *offp); +#endif + return IPPROTO_DONE; + + freeit: + m_freem(m); + return IPPROTO_DONE; +} + +static void +icmp6_mtudisc_update(dst, icmp6, m) + struct in6_addr *dst; + struct icmp6_hdr *icmp6;/* we can assume the validity of the pointer */ + struct mbuf *m; /* currently unused but added for scoped addrs */ +{ + u_int mtu = ntohl(icmp6->icmp6_mtu); + struct rtentry *rt = NULL; + struct sockaddr_in6 sin6; +#ifdef __bsdi__ + struct route_in6 ro6; +#endif + + bzero(&sin6, sizeof(sin6)); + sin6.sin6_family = PF_INET6; + sin6.sin6_len = sizeof(struct sockaddr_in6); + sin6.sin6_addr = *dst; + /* sin6.sin6_scope_id = XXX: should be set if DST is a scoped addr */ +#if defined(__NetBSD__) || defined(__OpenBSD__) + rt = rtalloc1((struct sockaddr *)&sin6, 1); /*clone*/ + if (!rt || (rt->rt_flags & RTF_HOST) == 0) { + if (rt) + RTFREE(rt); + rt = icmp6_mtudisc_clone((struct sockaddr *)&sin6); + } +#else +#if defined(__FreeBSD__) || defined (__APPLE__) + rt = rtalloc1((struct sockaddr *)&sin6, 0, + RTF_CLONING | RTF_PRCLONING); +#else +#ifdef __bsdi__ + bcopy(&sin6, &ro6.ro_dst, sizeof(struct sockaddr_in6)); + ro6.ro_rt = 0; + rtcalloc((struct route *)&ro6); + rt = ro6.ro_rt; +#else +#error no case for this particular operating system +#endif +#endif +#endif + + if (rt && (rt->rt_flags & RTF_HOST) + && !(rt->rt_rmx.rmx_locks & RTV_MTU)) { + if (mtu < IPV6_MMTU) { + /* xxx */ + rt->rt_rmx.rmx_locks |= RTV_MTU; + } else if (mtu < rt->rt_ifp->if_mtu && + rt->rt_rmx.rmx_mtu > mtu) { + rt->rt_rmx.rmx_mtu = mtu; + } + } + if (rt) + RTFREE(rt); +} + +/* + * Process a Node Information Query + */ +#if defined(__FreeBSD__) || defined (__APPLE__) +#define hostnamelen strlen(hostname) +#endif +#ifndef offsetof /* XXX */ +#define offsetof(type, member) ((size_t)(&((type *)0)->member)) +#endif + +static struct mbuf * +ni6_input(m, off) + struct mbuf *m; + int off; +{ + struct icmp6_nodeinfo *ni6, *nni6; + struct mbuf *n = NULL; + u_int16_t qtype; + int replylen = sizeof(struct ip6_hdr) + sizeof(struct icmp6_nodeinfo); + struct ni_reply_fqdn *fqdn; + int addrs; /* for NI_QTYPE_NODEADDR */ + struct ifnet *ifp = NULL; /* for NI_QTYPE_NODEADDR */ + +#ifndef PULLDOWN_TEST + ni6 = (struct icmp6_nodeinfo *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(ni6, struct icmp6_nodeinfo *, m, off, sizeof(*ni6)); + if (ni6 == NULL) { + /* m is already reclaimed */ + return NULL; + } +#endif + qtype = ntohs(ni6->ni_qtype); + + switch(qtype) { + case NI_QTYPE_NOOP: + break; /* no reply data */ + case NI_QTYPE_SUPTYPES: + goto bad; /* xxx: to be implemented */ + break; + case NI_QTYPE_FQDN: + replylen += offsetof(struct ni_reply_fqdn, ni_fqdn_name) + + hostnamelen; + break; + case NI_QTYPE_NODEADDR: + addrs = ni6_addrs(ni6, m, &ifp); + if ((replylen += addrs * sizeof(struct in6_addr)) > MCLBYTES) + replylen = MCLBYTES; /* XXX: we'll truncate later */ + + break; + default: + /* + * XXX: We must return a reply with the ICMP6 code + * `unknown Qtype' in this case. However we regard the case + * as an FQDN query for backward compatibility. + * Older versions set a random value to this field, + * so it rarely varies in the defined qtypes. + * But the mechanism is not reliable... + * maybe we should obsolete older versions. + */ + qtype = NI_QTYPE_FQDN; + replylen += offsetof(struct ni_reply_fqdn, ni_fqdn_name) + + hostnamelen; + break; + } + + /* allocate a mbuf to reply. */ + MGETHDR(n, M_DONTWAIT, m->m_type); + if (n == NULL) { + m_freem(m); + return(NULL); + } + M_COPY_PKTHDR(n, m); /* just for recvif */ + if (replylen > MHLEN) { + if (replylen > MCLBYTES) + /* + * XXX: should we try to allocate more? But MCLBYTES is + * probably much larger than IPV6_MMTU... + */ + goto bad; + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + goto bad; + } + } + n->m_pkthdr.len = n->m_len = replylen; + + /* copy mbuf header and IPv6 + Node Information base headers */ + bcopy(mtod(m, caddr_t), mtod(n, caddr_t), sizeof(struct ip6_hdr)); + nni6 = (struct icmp6_nodeinfo *)(mtod(n, struct ip6_hdr *) + 1); + bcopy((caddr_t)ni6, (caddr_t)nni6, sizeof(struct icmp6_nodeinfo)); + + /* qtype dependent procedure */ + switch (qtype) { + case NI_QTYPE_NOOP: + nni6->ni_flags = 0; + break; + case NI_QTYPE_SUPTYPES: + goto bad; /* xxx: to be implemented */ + break; + case NI_QTYPE_FQDN: + if (hostnamelen > 255) { /* XXX: rare case, but may happen */ + printf("ni6_input: " + "hostname length(%d) is too large for reply\n", + hostnamelen); + goto bad; + } + fqdn = (struct ni_reply_fqdn *)(mtod(n, caddr_t) + + sizeof(struct ip6_hdr) + + sizeof(struct icmp6_nodeinfo)); + nni6->ni_flags = 0; /* XXX: meaningless TTL */ + fqdn->ni_fqdn_ttl = 0; /* ditto. */ + fqdn->ni_fqdn_namelen = hostnamelen; + bcopy(hostname, &fqdn->ni_fqdn_name[0], hostnamelen); + break; + case NI_QTYPE_NODEADDR: + { + int lenlim, copied; + + if (n->m_flags & M_EXT) + lenlim = MCLBYTES - sizeof(struct ip6_hdr) - + sizeof(struct icmp6_nodeinfo); + else + lenlim = MHLEN - sizeof(struct ip6_hdr) - + sizeof(struct icmp6_nodeinfo); + copied = ni6_store_addrs(ni6, nni6, ifp, lenlim); + /* XXX: reset mbuf length */ + n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) + + sizeof(struct icmp6_nodeinfo) + copied; + break; + } + default: + break; /* XXX impossible! */ + } + + nni6->ni_type = ICMP6_NI_REPLY; + nni6->ni_code = ICMP6_NI_SUCESS; + m_freem(m); + return(n); + + bad: + m_freem(m); + if (n) + m_freem(n); + return(NULL); +} +#undef hostnamelen + +/* + * calculate the number of addresses to be returned in the node info reply. + */ +static int +ni6_addrs(ni6, m, ifpp) + struct icmp6_nodeinfo *ni6; + struct mbuf *m; + struct ifnet **ifpp; +{ + register struct ifnet *ifp; + register struct in6_ifaddr *ifa6; + register struct ifaddr *ifa; + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + int addrs = 0, addrsofif, iffound = 0; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifp = ifnet; ifp; ifp = ifp->if_next) +#else + for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) +#endif + { + addrsofif = 0; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + ifa6 = (struct in6_ifaddr *)ifa; + + if (!(ni6->ni_flags & NI_NODEADDR_FLAG_ALL) && + IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, + &ifa6->ia_addr.sin6_addr)) + iffound = 1; + + /* + * IPv4-mapped addresses can only be returned by a + * Node Information proxy, since they represent + * addresses of IPv4-only nodes, which perforce do + * not implement this protocol. + * [icmp-name-lookups-05] + * So we don't support NI_NODEADDR_FLAG_COMPAT in + * this function at this moment. + */ + + if (ifa6->ia6_flags & IN6_IFF_ANYCAST) + continue; /* we need only unicast addresses */ + + if ((ni6->ni_flags & (NI_NODEADDR_FLAG_LINKLOCAL | + NI_NODEADDR_FLAG_SITELOCAL | + NI_NODEADDR_FLAG_GLOBAL)) == 0) + continue; + + /* What do we have to do about ::1? */ + switch(in6_addrscope(&ifa6->ia_addr.sin6_addr)) { + case IPV6_ADDR_SCOPE_LINKLOCAL: + if (ni6->ni_flags & NI_NODEADDR_FLAG_LINKLOCAL) + addrsofif++; + break; + case IPV6_ADDR_SCOPE_SITELOCAL: + if (ni6->ni_flags & NI_NODEADDR_FLAG_SITELOCAL) + addrsofif++; + break; + case IPV6_ADDR_SCOPE_GLOBAL: + if (ni6->ni_flags & NI_NODEADDR_FLAG_GLOBAL) + addrsofif++; + break; + default: + continue; + } + } + if (iffound) { + *ifpp = ifp; + return(addrsofif); + } + + addrs += addrsofif; + } + + return(addrs); +} + +static int +ni6_store_addrs(ni6, nni6, ifp0, resid) + struct icmp6_nodeinfo *ni6, *nni6; + struct ifnet *ifp0; + int resid; +{ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + register struct ifnet *ifp = ifp0 ? ifp0 : ifnet; +#else + register struct ifnet *ifp = ifp0 ? ifp0 : TAILQ_FIRST(&ifnet); +#endif + register struct in6_ifaddr *ifa6; + register struct ifaddr *ifa; + int docopy, copied = 0; + u_char *cp = (u_char *)(nni6 + 1); + + if (ifp0 == NULL && !(ni6->ni_flags & NI_NODEADDR_FLAG_ALL)) + return(0); /* needless to copy */ + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (; ifp; ifp = ifp->if_next) +#else + for (; ifp; ifp = TAILQ_NEXT(ifp, if_list)) +#endif + { +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + docopy = 0; + + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + ifa6 = (struct in6_ifaddr *)ifa; + + if (ifa6->ia6_flags & IN6_IFF_ANYCAST) { + /* just experimental. not in the spec. */ + if (ni6->ni_flags & NI_NODEADDR_FLAG_ANYCAST) + docopy = 1; + else + continue; + } + else { /* unicast address */ + if (ni6->ni_flags & NI_NODEADDR_FLAG_ANYCAST) + continue; + else + docopy = 1; + } + + /* What do we have to do about ::1? */ + switch(in6_addrscope(&ifa6->ia_addr.sin6_addr)) { + case IPV6_ADDR_SCOPE_LINKLOCAL: + if (ni6->ni_flags & NI_NODEADDR_FLAG_LINKLOCAL) + docopy = 1; + break; + case IPV6_ADDR_SCOPE_SITELOCAL: + if (ni6->ni_flags & NI_NODEADDR_FLAG_SITELOCAL) + docopy = 1; + break; + case IPV6_ADDR_SCOPE_GLOBAL: + if (ni6->ni_flags & NI_NODEADDR_FLAG_GLOBAL) + docopy = 1; + break; + default: + continue; + } + + if (docopy) { + if (resid < sizeof(struct in6_addr)) { + /* + * We give up much more copy. + * Set the truncate flag and return. + */ + nni6->ni_flags |= + NI_NODEADDR_FLAG_TRUNCATE; + return(copied); + } + bcopy(&ifa6->ia_addr.sin6_addr, cp, + sizeof(struct in6_addr)); + /* XXX: KAME link-local hack; remove ifindex */ + if (IN6_IS_ADDR_LINKLOCAL(&ifa6->ia_addr.sin6_addr)) + ((struct in6_addr *)cp)->s6_addr16[1] = 0; + cp += sizeof(struct in6_addr); + resid -= sizeof(struct in6_addr); + copied += sizeof(struct in6_addr); + } + } + if (ifp0) /* we need search only on the specified IF */ + break; + } + + return(copied); +} + +#ifndef HAVE_NRL_INPCB +/* + * XXX almost dup'ed code with rip6_input. + */ +static int +icmp6_rip6_input(mp, off) + struct mbuf **mp; + int off; +{ + struct mbuf *m = *mp; + register struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + register struct in6pcb *in6p; + struct in6pcb *last = NULL; + struct sockaddr_in6 rip6src; + struct icmp6_hdr *icmp6; + struct ip6_recvpktopts opts; + +#ifndef PULLDOWN_TEST + /* this is assumed to be safe. */ + icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off, sizeof(*icmp6)); + if (icmp6 == NULL) { + /* m is already reclaimed */ + return IPPROTO_DONE; + } +#endif + + bzero(&opts, sizeof(opts)); + bzero(&rip6src, sizeof(rip6src)); + rip6src.sin6_len = sizeof(struct sockaddr_in6); + rip6src.sin6_family = AF_INET6; + rip6src.sin6_addr = ip6->ip6_src; + if (IN6_IS_SCOPE_LINKLOCAL(&rip6src.sin6_addr)) + rip6src.sin6_addr.s6_addr16[1] = 0; + if (m->m_pkthdr.rcvif) { + if (IN6_IS_SCOPE_LINKLOCAL(&rip6src.sin6_addr)) + rip6src.sin6_scope_id = m->m_pkthdr.rcvif->if_index; + else + rip6src.sin6_scope_id = 0; + } else + rip6src.sin6_scope_id = 0; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + LIST_FOREACH(in6p, &ripcb, inp_list) +#else + for (in6p = rawin6pcb.in6p_next; + in6p != &rawin6pcb; in6p = in6p->in6p_next) +#endif + { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if ((in6p->inp_vflag & INP_IPV6) == NULL) + continue; +#endif + if (in6p->in6p_ip6_nxt != IPPROTO_ICMPV6) + continue; + if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr) && + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst)) + continue; + if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) && + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src)) + continue; + if (in6p->in6p_icmp6filt + && ICMP6_FILTER_WILLBLOCK(icmp6->icmp6_type, + in6p->in6p_icmp6filt)) + continue; + if (last) { + struct mbuf *n; + if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) { + if (last->in6p_flags & IN6P_CONTROLOPTS) + ip6_savecontrol(last, ip6, n, &opts, + NULL); + /* strip intermediate headers */ + m_adj(n, off); + if (sbappendaddr(&last->in6p_socket->so_rcv, + (struct sockaddr *)&rip6src, + n, opts.head) == 0) { + /* should notify about lost packet */ + m_freem(n); + if (opts.head) { + m_freem(opts.head); + } + } else + sorwakeup(last->in6p_socket); + bzero(&opts, sizeof(opts)); + } + } + last = in6p; + } + if (last) { + if (last->in6p_flags & IN6P_CONTROLOPTS) + ip6_savecontrol(last, ip6, m, &opts, NULL); + /* strip intermediate headers */ + m_adj(m, off); + if (sbappendaddr(&last->in6p_socket->so_rcv, + (struct sockaddr *)&rip6src, + m, opts.head) == 0) { + m_freem(m); + if (opts.head) + m_freem(opts.head); + } else + sorwakeup(last->in6p_socket); + } else { + m_freem(m); + ip6stat.ip6s_delivered--; + } + return IPPROTO_DONE; +} +#endif /*OpenBSD*/ + +/* + * Reflect the ip6 packet back to the source. + * OFF points to the icmp6 header, counted from the top of the mbuf. + */ +void +icmp6_reflect(m, off) + struct mbuf *m; + size_t off; +{ + struct ip6_hdr *ip6; + struct icmp6_hdr *icmp6; + struct in6_ifaddr *ia; + struct in6_addr t, *src = 0; + int plen; + int type, code; + struct ifnet *outif = NULL; +#ifdef COMPAT_RFC1885 + int mtu = IPV6_MMTU; + struct sockaddr_in6 *sin6 = &icmp6_reflect_rt.ro_dst; +#endif + + /* too short to reflect */ + if (off < sizeof(struct ip6_hdr)) { + printf("sanity fail: off=%lx, sizeof(ip6)=%lx in %s:%d\n", + (u_long)off, (u_long)sizeof(struct ip6_hdr), + __FILE__, __LINE__); + goto bad; + } + + /* + * If there are extra headers between IPv6 and ICMPv6, strip + * off that header first. + */ + if (off > sizeof(struct ip6_hdr)) { + size_t l; + struct ip6_hdr nip6; + + l = off - sizeof(struct ip6_hdr); + m_copydata(m, 0, sizeof(nip6), (caddr_t)&nip6); + m_adj(m, l); + l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); + if (m->m_len < l) { + if ((m = m_pullup(m, l)) == NULL) + return; + } + bcopy((caddr_t)&nip6, mtod(m, caddr_t), sizeof(nip6)); + } else /* off == sizeof(struct ip6_hdr) */ { + size_t l; + l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); + if (m->m_len < l) { + if ((m = m_pullup(m, l)) == NULL) + return; + } + } + plen = m->m_pkthdr.len - sizeof(struct ip6_hdr); + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_nxt = IPPROTO_ICMPV6; + icmp6 = (struct icmp6_hdr *)(ip6 + 1); + type = icmp6->icmp6_type; /* keep type for statistics */ + code = icmp6->icmp6_code; /* ditto. */ + + t = ip6->ip6_dst; + /* + * ip6_input() drops a packet if its src is multicast. + * So, the src is never multicast. + */ + ip6->ip6_dst = ip6->ip6_src; + + /* XXX hack for link-local addresses */ + if (IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst)) + ip6->ip6_dst.s6_addr16[1] = + htons(m->m_pkthdr.rcvif->if_index); + if (IN6_IS_ADDR_LINKLOCAL(&t)) + t.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + +#ifdef COMPAT_RFC1885 + /* + * xxx guess MTU + * RFC 1885 requires that echo reply should be truncated if it + * does not fit in with (return) path MTU, but the description was + * removed in the new spec. + */ + if (icmp6_reflect_rt.ro_rt == 0 || + ! (IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr, &ip6->ip6_dst))) { + if (icmp6_reflect_rt.ro_rt) { +#if defined(__FreeBSD__) || defined(__APPLE__) + RTFREE(icmp6_reflect_rt.ro_rt); +#endif +#ifdef __bsdi__ + rtfree(icmp6_reflect_rt.ro_rt); +#endif + icmp6_reflect_rt.ro_rt = 0; + } + bzero(sin6, sizeof(*sin6)); + sin6->sin6_family = PF_INET6; + sin6->sin6_len = sizeof(struct sockaddr_in6); + sin6->sin6_addr = ip6->ip6_dst; + +#if defined(__FreeBSD__) || defined(__APPLE__) + rtalloc_ign((struct route *)&icmp6_reflect_rt.ro_rt, + RTF_PRCLONING); +#else + rtalloc((struct route *)&icmp6_reflect_rt.ro_rt); +#endif + } + + if (icmp6_reflect_rt.ro_rt == 0) + goto bad; + + if ((icmp6_reflect_rt.ro_rt->rt_flags & RTF_HOST) + && mtu < icmp6_reflect_rt.ro_rt->rt_ifp->if_mtu) + mtu = icmp6_reflect_rt.ro_rt->rt_rmx.rmx_mtu; + + if (mtu < m->m_pkthdr.len) { + plen -= (m->m_pkthdr.len - mtu); + m_adj(m, mtu - m->m_pkthdr.len); + } +#endif + /* + * If the incoming packet was addressed directly to us(i.e. unicast), + * use dst as the src for the reply. + * The IN6_IFF_NOTREADY case would be VERY rare, but is possible when + * (for example) when we encounter an error while forwarding procedure + * destined to a duplicated address of ours. + */ + for (ia = in6_ifaddr; ia; ia = ia->ia_next) + if (IN6_ARE_ADDR_EQUAL(&t, &ia->ia_addr.sin6_addr) && + (ia->ia6_flags & (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY)) == 0) { + src = &t; + break; + } + if (ia == NULL && IN6_IS_ADDR_LINKLOCAL(&t) && (m->m_flags & M_LOOP)) { + /* + * This is the case if the dst is our link-local address + * and the sender is also ourseleves. + */ + src = &t; + } + + if (src == 0) + /* + * This case matches to multicasts, our anycast, or unicasts + * that we do not own. Select a source address which has the + * same scope. + * XXX: for (non link-local) multicast addresses, this might + * not be a good choice. + */ + if ((ia = in6_ifawithscope(m->m_pkthdr.rcvif, &t)) != 0) + src = &IA6_SIN6(ia)->sin6_addr; + + if (src == 0) + goto bad; + + ip6->ip6_src = *src; + + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_nxt = IPPROTO_ICMPV6; + if (m->m_pkthdr.rcvif) { + /* XXX: This may not be the outgoing interface */ + ip6->ip6_hlim = nd_ifinfo[m->m_pkthdr.rcvif->if_index].chlim; + } + + icmp6->icmp6_cksum = 0; + icmp6->icmp6_cksum = in6_cksum(m, IPPROTO_ICMPV6, + sizeof(struct ip6_hdr), plen); + + /* + * xxx option handling + */ + + m->m_flags &= ~(M_BCAST|M_MCAST); +#if IPSEC + /* Don't lookup socket */ + ipsec_setsocket(m, NULL); +#endif /*IPSEC*/ + +#ifdef COMPAT_RFC1885 + ip6_output(m, NULL, &icmp6_reflect_rt, 0, NULL, &outif); +#else + ip6_output(m, NULL, NULL, 0, NULL, &outif); +#endif + if (outif) + icmp6_ifoutstat_inc(outif, type, code); + + return; + + bad: + m_freem(m); + return; +} + +void +icmp6_fasttimo() +{ + mld6_fasttimeo(); +} + +static const char * +icmp6_redirect_diag(src6, dst6, tgt6) + struct in6_addr *src6; + struct in6_addr *dst6; + struct in6_addr *tgt6; +{ + static char buf[1024]; +#if !defined(__OpenBSD__) && !defined(__bsdi__) + snprintf(buf, sizeof(buf), "(src=%s dst=%s tgt=%s)", + ip6_sprintf(src6), ip6_sprintf(dst6), ip6_sprintf(tgt6)); +#else + sprintf(buf, "(src=%s dst=%s tgt=%s)", + ip6_sprintf(src6), ip6_sprintf(dst6), ip6_sprintf(tgt6)); +#endif + return buf; +} + +void +icmp6_redirect_input(m, off) + register struct mbuf *m; + int off; +{ + struct ifnet *ifp = m->m_pkthdr.rcvif; + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + struct nd_redirect *nd_rd; + int icmp6len = ntohs(ip6->ip6_plen); + char *lladdr = NULL; + int lladdrlen = 0; + u_char *redirhdr = NULL; + int redirhdrlen = 0; + struct rtentry *rt = NULL; + int is_router; + int is_onlink; + struct in6_addr src6 = ip6->ip6_src; + struct in6_addr redtgt6; + struct in6_addr reddst6; + union nd_opts ndopts; + + if (!m || !ifp) + return; + + /* XXX if we are router, we don't update route by icmp6 redirect */ + if (ip6_forwarding) + goto freeit; + if (!icmp6_rediraccept) + goto freeit; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, icmp6len,); + nd_rd = (struct nd_redirect *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(nd_rd, struct nd_redirect *, m, off, icmp6len); + if (nd_rd == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } +#endif + redtgt6 = nd_rd->nd_rd_target; + reddst6 = nd_rd->nd_rd_dst; + + if (IN6_IS_ADDR_LINKLOCAL(&redtgt6)) + redtgt6.s6_addr16[1] = htons(ifp->if_index); + if (IN6_IS_ADDR_LINKLOCAL(&reddst6)) + reddst6.s6_addr16[1] = htons(ifp->if_index); + + /* validation */ + if (!IN6_IS_ADDR_LINKLOCAL(&src6)) { + log(LOG_ERR, + "ICMP6 redirect sent from %s rejected; " + "must be from linklocal\n", ip6_sprintf(&src6)); + goto freeit; + } + if (ip6->ip6_hlim != 255) { + log(LOG_ERR, + "ICMP6 redirect sent from %s rejected; " + "hlim=%d (must be 255)\n", + ip6_sprintf(&src6), ip6->ip6_hlim); + goto freeit; + } + { + /* ip6->ip6_src must be equal to gw for icmp6->icmp6_reddst */ + struct sockaddr_in6 sin6; + struct in6_addr *gw6; + + bzero(&sin6, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_len = sizeof(struct sockaddr_in6); + bcopy(&reddst6, &sin6.sin6_addr, sizeof(reddst6)); + rt = rtalloc1((struct sockaddr *)&sin6, 0 +#if defined(__FreeBSD__) || defined (__APPLE__) + , 0UL +#endif + ); + if (rt) { + gw6 = &(((struct sockaddr_in6 *)rt->rt_gateway)->sin6_addr); + if (bcmp(&src6, gw6, sizeof(struct in6_addr)) != 0) { + log(LOG_ERR, + "ICMP6 redirect rejected; " + "not equal to gw-for-src=%s (must be same): " + "%s\n", + ip6_sprintf(gw6), + icmp6_redirect_diag(&src6, &reddst6, &redtgt6)); + RTFREE(rt); + goto freeit; + } + } else { + log(LOG_ERR, + "ICMP6 redirect rejected; " + "no route found for redirect dst: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6)); + goto freeit; + } + RTFREE(rt); + rt = NULL; + } + if (IN6_IS_ADDR_MULTICAST(&reddst6)) { + log(LOG_ERR, + "ICMP6 redirect rejected; " + "redirect dst must be unicast: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6)); + goto freeit; + } + + is_router = is_onlink = 0; + if (IN6_IS_ADDR_LINKLOCAL(&redtgt6)) + is_router = 1; /* router case */ + if (bcmp(&redtgt6, &reddst6, sizeof(redtgt6)) == 0) + is_onlink = 1; /* on-link destination case */ + if (!is_router && !is_onlink) { + log(LOG_ERR, + "ICMP6 redirect rejected; " + "neither router case nor onlink case: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6)); + goto freeit; + } + /* validation passed */ + + icmp6len -= sizeof(*nd_rd); + nd6_option_init(nd_rd + 1, icmp6len, &ndopts); + if (nd6_options(&ndopts) < 0) { + log(LOG_INFO, "icmp6_redirect_input: " + "invalid ND option, rejected: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6)); + goto freeit; + } + + if (ndopts.nd_opts_tgt_lladdr) { + lladdr = (char *)(ndopts.nd_opts_tgt_lladdr + 1); + lladdrlen = ndopts.nd_opts_tgt_lladdr->nd_opt_len << 3; + } + + if (ndopts.nd_opts_rh) { + redirhdrlen = ndopts.nd_opts_rh->nd_opt_rh_len; + redirhdr = (u_char *)(ndopts.nd_opts_rh + 1); /* xxx */ + } + + if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { + log(LOG_INFO, + "icmp6_redirect_input: lladdrlen mismatch for %s " + "(if %d, icmp6 packet %d): %s\n", + ip6_sprintf(&redtgt6), ifp->if_addrlen, lladdrlen - 2, + icmp6_redirect_diag(&src6, &reddst6, &redtgt6)); + } + + /* RFC 2461 8.3 */ + nd6_cache_lladdr(ifp, &redtgt6, lladdr, lladdrlen, ND_REDIRECT, + is_onlink ? ND_REDIRECT_ONLINK : ND_REDIRECT_ROUTER); + + if (!is_onlink) { /* better router case. perform rtredirect. */ + /* perform rtredirect */ + struct sockaddr_in6 sdst; + struct sockaddr_in6 sgw; + struct sockaddr_in6 ssrc; +#ifdef __bsdi__ + extern int icmp_redirtimeout; /*XXX*/ +#endif + + bzero(&sdst, sizeof(sdst)); + bzero(&sgw, sizeof(sgw)); + bzero(&ssrc, sizeof(ssrc)); + sdst.sin6_family = sgw.sin6_family = ssrc.sin6_family = AF_INET6; + sdst.sin6_len = sgw.sin6_len = ssrc.sin6_len = + sizeof(struct sockaddr_in6); + bcopy(&redtgt6, &sgw.sin6_addr, sizeof(struct in6_addr)); + bcopy(&reddst6, &sdst.sin6_addr, sizeof(struct in6_addr)); + bcopy(&src6, &ssrc.sin6_addr, sizeof(struct in6_addr)); + rtredirect((struct sockaddr *)&sdst, (struct sockaddr *)&sgw, + (struct sockaddr *)NULL, RTF_GATEWAY | RTF_HOST, + (struct sockaddr *)&ssrc, +#ifdef __bsdi__ + icmp_redirtimeout +#else + (struct rtentry **)NULL +#endif /*__FreeBSD__, __NetBSD__, __bsdi__*/ + ); + } + /* finally update cached route in each socket via pfctlinput */ + { + struct sockaddr_in6 sdst; +#if 1 +#else + struct ip6protosw *pr; +#endif + + bzero(&sdst, sizeof(sdst)); + sdst.sin6_family = AF_INET6; + sdst.sin6_len = sizeof(struct sockaddr_in6); + bcopy(&reddst6, &sdst.sin6_addr, sizeof(struct in6_addr)); +#if 1 + pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&sdst); +#else + /* + * do not use pfctlinput() here, we have different prototype for + * xx_ctlinput() in ip6proto. + */ + for (pr = (struct ip6protosw *)inet6domain.dom_protosw; + pr < (struct ip6protosw *)inet6domain.dom_protoswNPROTOSW; + pr++) { + if (pr->pr_ctlinput) { + (*pr->pr_ctlinput)(PRC_REDIRECT_HOST, + (struct sockaddr *)&sdst, NULL, NULL, 0); + } + } +#endif +#if IPSEC + key_sa_routechange((struct sockaddr *)&sdst); +#endif + } + + freeit: + m_freem(m); +} + +void +icmp6_redirect_output(m0, rt) + struct mbuf *m0; + struct rtentry *rt; +{ + struct ifnet *ifp; /* my outgoing interface */ + struct in6_addr *ifp_ll6; + struct in6_addr *router_ll6; + struct ip6_hdr *sip6; /* m0 as struct ip6_hdr */ + struct mbuf *m = NULL; /* newly allocated one */ + struct ip6_hdr *ip6; /* m as struct ip6_hdr */ + struct nd_redirect *nd_rd; + size_t maxlen; + u_char *p; + struct ifnet *outif = NULL; + + /* if we are not router, we don't send icmp6 redirect */ + if (!ip6_forwarding || ip6_accept_rtadv) + goto fail; + + /* sanity check */ + if (!m0 || !rt || !(rt->rt_flags & RTF_UP) || !(ifp = rt->rt_ifp)) + goto fail; + + /* + * Address check: + * the source address must identify a neighbor, and + * the destination address must not be a multicast address + * [RFC 2461, sec 8.2] + */ + sip6 = mtod(m0, struct ip6_hdr *); + if (nd6_is_addr_neighbor(&sip6->ip6_src, ifp) == 0) + goto fail; + if (IN6_IS_ADDR_MULTICAST(&sip6->ip6_dst)) + goto fail; /* what should we do here? */ + + /* rate limit */ + if (icmp6_ratelimit(&sip6->ip6_src, ND_REDIRECT, 0)) + goto fail; + + /* + * Since we are going to append up to 1280 bytes (= IPV6_MMTU), + * we almost always ask for an mbuf cluster for simplicity. + * (MHLEN < IPV6_MMTU is almost always true) + */ +#if IPV6_MMTU >= MCLBYTES +# error assumption failed about IPV6_MMTU and MCLBYTES +#endif + MGETHDR(m, M_DONTWAIT, MT_HEADER); + if (m && IPV6_MMTU >= MHLEN) + MCLGET(m, M_DONTWAIT); + if (!m) + goto fail; + maxlen = (m->m_flags & M_EXT) ? MCLBYTES : MHLEN; + maxlen = min(IPV6_MMTU, maxlen); + /* just for safety */ + if (maxlen < sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr) + + ((sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7)) { + goto fail; + } + + { + /* get ip6 linklocal address for ifp(my outgoing interface). */ + struct in6_ifaddr *ia; + if ((ia = in6ifa_ifpforlinklocal(ifp, + IN6_IFF_NOTREADY| + IN6_IFF_ANYCAST)) == NULL) + goto fail; + ifp_ll6 = &ia->ia_addr.sin6_addr; + } + + /* get ip6 linklocal address for the router. */ + if (rt->rt_gateway && (rt->rt_flags & RTF_GATEWAY)) { + struct sockaddr_in6 *sin6; + sin6 = (struct sockaddr_in6 *)rt->rt_gateway; + router_ll6 = &sin6->sin6_addr; + if (!IN6_IS_ADDR_LINKLOCAL(router_ll6)) + router_ll6 = (struct in6_addr *)NULL; + } else + router_ll6 = (struct in6_addr *)NULL; + + /* ip6 */ + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + /* ip6->ip6_plen will be set later */ + ip6->ip6_nxt = IPPROTO_ICMPV6; + ip6->ip6_hlim = 255; + /* ip6->ip6_src must be linklocal addr for my outgoing if. */ + bcopy(ifp_ll6, &ip6->ip6_src, sizeof(struct in6_addr)); + bcopy(&sip6->ip6_src, &ip6->ip6_dst, sizeof(struct in6_addr)); + + /* ND Redirect */ + nd_rd = (struct nd_redirect *)(ip6 + 1); + nd_rd->nd_rd_type = ND_REDIRECT; + nd_rd->nd_rd_code = 0; + nd_rd->nd_rd_reserved = 0; + if (rt->rt_flags & RTF_GATEWAY) { + /* + * nd_rd->nd_rd_target must be a link-local address in + * better router cases. + */ + if (!router_ll6) + goto fail; + bcopy(router_ll6, &nd_rd->nd_rd_target, + sizeof(nd_rd->nd_rd_target)); + bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_dst, + sizeof(nd_rd->nd_rd_dst)); + } else { + /* make sure redtgt == reddst */ + bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_target, + sizeof(nd_rd->nd_rd_target)); + bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_dst, + sizeof(nd_rd->nd_rd_dst)); + } + + p = (u_char *)(nd_rd + 1); + + if (!router_ll6) + goto nolladdropt; + + { + /* target lladdr option */ + struct rtentry *rt_router = NULL; + int len; + struct sockaddr_dl *sdl; + struct nd_opt_hdr *nd_opt; + char *lladdr; + + rt_router = nd6_lookup(router_ll6, 0, ifp); + if (!rt_router) + goto nolladdropt; + len = sizeof(*nd_opt) + ifp->if_addrlen; + len = (len + 7) & ~7; /*round by 8*/ + /* safety check */ + if (len + (p - (u_char *)ip6) > maxlen) + goto nolladdropt; + if (!(rt_router->rt_flags & RTF_GATEWAY) && + (rt_router->rt_flags & RTF_LLINFO) && + (rt_router->rt_gateway->sa_family == AF_LINK) && + (sdl = (struct sockaddr_dl *)rt_router->rt_gateway) && + sdl->sdl_alen) { + nd_opt = (struct nd_opt_hdr *)p; + nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR; + nd_opt->nd_opt_len = len >> 3; + lladdr = (char *)(nd_opt + 1); + bcopy(LLADDR(sdl), lladdr, ifp->if_addrlen); + p += len; + } + } +nolladdropt:; + + m->m_pkthdr.len = m->m_len = p - (u_char *)ip6; + + /* just to be safe */ +#if M_DECRYPTED /*not openbsd*/ + if (m0->m_flags & M_DECRYPTED) + goto noredhdropt; +#endif + if (p - (u_char *)ip6 > maxlen) + goto noredhdropt; + + { + /* redirected header option */ + int len; + struct nd_opt_rd_hdr *nd_opt_rh; + + /* + * compute the maximum size for icmp6 redirect header option. + * XXX room for auth header? + */ + len = maxlen - (p - (u_char *)ip6); + len &= ~7; + + /* This is just for simplicity. */ + if (m0->m_pkthdr.len != m0->m_len) { + if (m0->m_next) { + m_freem(m0->m_next); + m0->m_next = NULL; + } + m0->m_pkthdr.len = m0->m_len; + } + + /* + * Redirected header option spec (RFC2461 4.6.3) talks nothing + * about padding/truncate rule for the original IP packet. + * From the discussion on IPv6imp in Feb 1999, the consensus was: + * - "attach as much as possible" is the goal + * - pad if not aligned (original size can be guessed by original + * ip6 header) + * Following code adds the padding if it is simple enough, + * and truncates if not. + */ + if (m0->m_next || m0->m_pkthdr.len != m0->m_len) + panic("assumption failed in %s:%d\n", __FILE__, __LINE__); + + if (len - sizeof(*nd_opt_rh) < m0->m_pkthdr.len) { + /* not enough room, truncate */ + m0->m_pkthdr.len = m0->m_len = len - sizeof(*nd_opt_rh); + } else { + /* enough room, pad or truncate */ + size_t extra; + + extra = m0->m_pkthdr.len % 8; + if (extra) { + /* pad if easy enough, truncate if not */ + if (8 - extra <= M_TRAILINGSPACE(m0)) { + /* pad */ + m0->m_len += (8 - extra); + m0->m_pkthdr.len += (8 - extra); + } else { + /* truncate */ + m0->m_pkthdr.len -= extra; + m0->m_len -= extra; + } + } + len = m0->m_pkthdr.len + sizeof(*nd_opt_rh); + m0->m_pkthdr.len = m0->m_len = len - sizeof(*nd_opt_rh); + } + + nd_opt_rh = (struct nd_opt_rd_hdr *)p; + bzero(nd_opt_rh, sizeof(*nd_opt_rh)); + nd_opt_rh->nd_opt_rh_type = ND_OPT_REDIRECTED_HEADER; + nd_opt_rh->nd_opt_rh_len = len >> 3; + p += sizeof(*nd_opt_rh); + m->m_pkthdr.len = m->m_len = p - (u_char *)ip6; + + /* connect m0 to m */ + m->m_next = m0; + m->m_pkthdr.len = m->m_len + m0->m_len; + } +noredhdropt:; + + if (IN6_IS_ADDR_LINKLOCAL(&sip6->ip6_src)) + sip6->ip6_src.s6_addr16[1] = 0; + if (IN6_IS_ADDR_LINKLOCAL(&sip6->ip6_dst)) + sip6->ip6_dst.s6_addr16[1] = 0; +#if 0 + if (IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_src)) + ip6->ip6_src.s6_addr16[1] = 0; + if (IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst)) + ip6->ip6_dst.s6_addr16[1] = 0; +#endif + if (IN6_IS_ADDR_LINKLOCAL(&nd_rd->nd_rd_target)) + nd_rd->nd_rd_target.s6_addr16[1] = 0; + if (IN6_IS_ADDR_LINKLOCAL(&nd_rd->nd_rd_dst)) + nd_rd->nd_rd_dst.s6_addr16[1] = 0; + + ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); + + nd_rd->nd_rd_cksum = 0; + nd_rd->nd_rd_cksum + = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), ntohs(ip6->ip6_plen)); + + /* send the packet to outside... */ +#if IPSEC + /* Don't lookup socket */ + ipsec_setsocket(m, NULL); +#endif /*IPSEC*/ + ip6_output(m, NULL, NULL, 0, NULL, &outif); + if (outif) { + icmp6_ifstat_inc(outif, ifs6_out_msg); + icmp6_ifstat_inc(outif, ifs6_out_redirect); + } + icmp6stat.icp6s_outhist[ND_REDIRECT]++; + + return; + +fail: + if (m) + m_freem(m); + if (m0) + m_freem(m0); +} + +#ifndef HAVE_NRL_INPCB +/* + * ICMPv6 socket option processing. + * + * NOTE: for OSes that use NRL inpcb (bsdi4/openbsd), do not forget to modify + * sys/netinet6/raw_ipv6.c:rip6_ctloutput(). + */ +int +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +icmp6_ctloutput(so, sopt) + struct socket *so; + struct sockopt *sopt; +#else +icmp6_ctloutput(op, so, level, optname, mp) + int op; + struct socket *so; + int level, optname; + struct mbuf **mp; +#endif +{ + int error = 0; + int optlen; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + register struct inpcb *inp = sotoinpcb(so); + int level, op, optname; + + if (sopt) { + level = sopt->sopt_level; + op = sopt->sopt_dir; + optname = sopt->sopt_name; + optlen = sopt->sopt_valsize; + } else + level = op = optname = optlen = 0; +#else + register struct in6pcb *in6p = sotoin6pcb(so); + register struct mbuf *m = *mp; + + optlen = m ? m->m_len : 0; +#endif + + if (level != IPPROTO_ICMPV6) { +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + if (op == PRCO_SETOPT && m) + (void)m_free(m); +#endif + return EINVAL; + } + + switch(op) { + case PRCO_SETOPT: + switch (optname) { + case ICMP6_FILTER: + { + struct icmp6_filter *p; + + if (optlen != sizeof(*p)) { + error = EMSGSIZE; + break; + } +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (inp->in6p_icmp6filt == NULL) { + error = EINVAL; + break; + } + error = sooptcopyin(sopt, inp->in6p_icmp6filt, optlen, + optlen); +#else + p = mtod(m, struct icmp6_filter *); + if (!p || !in6p->in6p_icmp6filt) { + error = EINVAL; + break; + } + bcopy(p, in6p->in6p_icmp6filt, + sizeof(struct icmp6_filter)); + error = 0; +#endif + break; + } + + default: + error = ENOPROTOOPT; + break; + } +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + if (m) + (void)m_freem(m); +#endif + break; + + case PRCO_GETOPT: + switch (optname) { + case ICMP6_FILTER: + { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (inp->in6p_icmp6filt == NULL) { + error = EINVAL; + break; + } + error = sooptcopyout(sopt, inp->in6p_icmp6filt, + sizeof(struct icmp6_filter)); +#else + struct icmp6_filter *p; + + if (!in6p->in6p_icmp6filt) { + error = EINVAL; + break; + } + *mp = m = m_get(M_WAIT, MT_SOOPTS); + m->m_len = sizeof(struct icmp6_filter); + p = mtod(m, struct icmp6_filter *); + bcopy(in6p->in6p_icmp6filt, p, + sizeof(struct icmp6_filter)); + error = 0; +#endif + break; + } + + default: + error = ENOPROTOOPT; + break; + } + break; + } + + return(error); +} +#endif /*NRL inpcb*/ + +/* + * Perform rate limit check. + * Returns 0 if it is okay to send the icmp6 packet. + * Returns 1 if the router SHOULD NOT send this icmp6 packet due to rate + * limitation. + * + * XXX per-destination/type check necessary? + */ +static int +icmp6_ratelimit(dst, type, code) + const struct in6_addr *dst; /* not used at this moment */ + const int type; /* not used at this moment */ + const int code; /* not used at this moment */ +{ + struct timeval tp; + long sec_diff, usec_diff; + + /* If we are not doing rate limitation, it is always okay to send */ + if (!icmp6errratelim) + return 0; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + microtime(&tp); + tp.tv_sec = time_second; +#else + tp = time; +#endif + if (tp.tv_sec < icmp6_nextsend.tv_sec + || (tp.tv_sec == icmp6_nextsend.tv_sec + && tp.tv_usec < icmp6_nextsend.tv_usec)) { + /* The packet is subject to rate limit */ + return 1; + } + sec_diff = icmp6errratelim / 1000000; + usec_diff = icmp6errratelim % 1000000; + icmp6_nextsend.tv_sec = tp.tv_sec + sec_diff; + if ((tp.tv_usec = tp.tv_usec + usec_diff) >= 1000000) { + icmp6_nextsend.tv_sec++; + icmp6_nextsend.tv_usec -= 1000000; + } + + /* it is okay to send this */ + return 0; +} + +#if defined(__NetBSD__) || defined(__OpenBSD__) +static struct rtentry * +icmp6_mtudisc_clone(dst) + struct sockaddr *dst; +{ + struct rtentry *rt; + int error; + + rt = rtalloc1(dst, 1); + if (rt == 0) + return NULL; + + /* If we didn't get a host route, allocate one */ + if ((rt->rt_flags & RTF_HOST) == 0) { + struct rtentry *nrt; + + error = rtrequest((int) RTM_ADD, dst, + (struct sockaddr *) rt->rt_gateway, + (struct sockaddr *) 0, + RTF_GATEWAY | RTF_HOST | RTF_DYNAMIC, &nrt); + if (error) { + rtfree(rt); + rtfree(nrt); + return NULL; + } + nrt->rt_rmx = rt->rt_rmx; + rtfree(rt); + rt = nrt; + } + error = rt_timer_add(rt, icmp6_mtudisc_timeout, + icmp6_mtudisc_timeout_q); + if (error) { + rtfree(rt); + return NULL; + } + + return rt; /* caller need to call rtfree() */ +} + +static void +icmp6_mtudisc_timeout(rt, r) + struct rtentry *rt; + struct rttimer *r; +{ + if (rt == NULL) + panic("icmp6_mtudisc_timeout: bad route to timeout"); + if ((rt->rt_flags & (RTF_DYNAMIC | RTF_HOST)) == + (RTF_DYNAMIC | RTF_HOST)) { + rtrequest((int) RTM_DELETE, (struct sockaddr *)rt_key(rt), + rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); + } else { + if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0) { + rt->rt_rmx.rmx_mtu = 0; + } + } +} +#endif /*__NetBSD__ || __OpenBSD__*/ + +#ifdef __bsdi__ +void +icmp6_mtuexpire(rt, rtt) + struct rtentry *rt; + struct rttimer *rtt; +{ + rt->rt_flags |= RTF_PROBEMTU; + Free(rtt); +} + +int *icmp6_sysvars[] = ICMPV6CTL_VARS; + +int +icmp6_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + if (name[0] >= ICMPV6CTL_MAXID) + return (EOPNOTSUPP); + switch (name[0]) { +#if 0 + ICMPV6CTL_ND6_PRUNE: + ICMPV6CTL_ND6_DELAY: + ICMPV6CTL_ND6_UMAXTRIES: + ICMPV6CTL_ND6_MMAXTRIES: + ICMPV6CTL_ND6_USELOOPBACK: + /* need to check the value. */ +#endif + case ICMPV6CTL_STATS: + return sysctl_rdtrunc(oldp, oldlenp, newp, &icmp6stat, + sizeof(icmp6stat)); + + default: + return (sysctl_int_arr(icmp6_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + } +} +#endif /*__bsdi__*/ + +#if defined(__NetBSD__) || defined(__OpenBSD__) +#include +#include +int +icmp6_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + + /* All sysctl names at this level are terminal. */ + if (namelen != 1) + return ENOTDIR; + + switch (name[0]) { + + case ICMPV6CTL_REDIRACCEPT: + return sysctl_int(oldp, oldlenp, newp, newlen, + &icmp6_rediraccept); + case ICMPV6CTL_REDIRTIMEOUT: + return sysctl_int(oldp, oldlenp, newp, newlen, + &icmp6_redirtimeout); + case ICMPV6CTL_STATS: + return sysctl_rdstruct(oldp, oldlenp, newp, + &icmp6stat, sizeof(icmp6stat)); + case ICMPV6CTL_ERRRATELIMIT: + return sysctl_int(oldp, oldlenp, newp, newlen, + &icmp6errratelim); + case ICMPV6CTL_ND6_PRUNE: + return sysctl_int(oldp, oldlenp, newp, newlen, &nd6_prune); + case ICMPV6CTL_ND6_DELAY: + return sysctl_int(oldp, oldlenp, newp, newlen, &nd6_delay); + case ICMPV6CTL_ND6_UMAXTRIES: + return sysctl_int(oldp, oldlenp, newp, newlen, &nd6_umaxtries); + case ICMPV6CTL_ND6_MMAXTRIES: + return sysctl_int(oldp, oldlenp, newp, newlen, &nd6_mmaxtries); + case ICMPV6CTL_ND6_USELOOPBACK: + return sysctl_int(oldp, oldlenp, newp, newlen, + &nd6_useloopback); + case ICMPV6CTL_NODEINFO: + return sysctl_int(oldp, oldlenp, newp, newlen, &icmp6_nodeinfo); + default: + return ENOPROTOOPT; + } + /* NOTREACHED */ +} +#endif /* __NetBSD__ */ diff --git a/bsd/netinet6/icmp6.h b/bsd/netinet6/icmp6.h new file mode 100644 index 000000000..d99439e0b --- /dev/null +++ b/bsd/netinet6/icmp6.h @@ -0,0 +1,4 @@ +/* $KAME$ */ + +/* just for backward compatibility, will be nuked shortly */ +#error "wrong include file - include netinet/icmp6.h instead" diff --git a/bsd/netinet6/in6.c b/bsd/netinet6/in6.c new file mode 100644 index 000000000..ab8b787e4 --- /dev/null +++ b/bsd/netinet6/in6.c @@ -0,0 +1,2636 @@ +/* $KAME: in6.c,v 1.72 2000/03/30 03:45:26 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.c 8.2 (Berkeley) 11/15/93 + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "gif.h" +#if NGIF > 0 +#include +#endif +#include + +#include +#include +#if __NetBSD__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include + +#if MIP6 +#include +#include + +struct nd_prefix *(*mip6_get_home_prefix_hook) __P((void)); +#endif /* MIP6 */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +MALLOC_DEFINE(M_IPMADDR, "in6_multi", "internet multicast address"); +#endif + +/* + * Definitions of some costant IP6 addresses. + */ +const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; +const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; +const struct in6_addr in6addr_nodelocal_allnodes = + IN6ADDR_NODELOCAL_ALLNODES_INIT; +const struct in6_addr in6addr_linklocal_allnodes = + IN6ADDR_LINKLOCAL_ALLNODES_INIT; +const struct in6_addr in6addr_linklocal_allrouters = + IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; + +const struct in6_addr in6mask0 = IN6MASK0; +const struct in6_addr in6mask32 = IN6MASK32; +const struct in6_addr in6mask64 = IN6MASK64; +const struct in6_addr in6mask96 = IN6MASK96; +const struct in6_addr in6mask128 = IN6MASK128; + +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) +static int in6_lifaddr_ioctl __P((struct socket *, u_long, caddr_t, + struct ifnet *, struct proc *)); +#else +static int in6_lifaddr_ioctl __P((struct socket *, u_long, caddr_t, + struct ifnet *)); +#endif + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +struct in6_multihead in6_multihead; /* XXX BSS initialization */ +#else +/* + * This structure is used to keep track of in6_multi chains which belong to + * deleted interface addresses. + */ +static LIST_HEAD(, multi6_kludge) in6_mk; /* XXX BSS initialization */ + +struct multi6_kludge { + LIST_ENTRY(multi6_kludge) mk_entry; + struct ifnet *mk_ifp; + struct in6_multihead mk_head; +}; +#endif + +/* + * Check if the loopback entry will be automatically generated. + * if 0 returned, will not be automatically generated. + * if 1 returned, will be automatically generated. + */ +static int +in6_is_ifloop_auto(struct ifaddr *ifa) +{ +#define SIN6(s) ((struct sockaddr_in6 *)s) + /* + * If RTF_CLONING is unset, or (IFF_LOOPBACK | IFF_POINTOPOINT), + * or netmask is all0 or all1, then cloning will not happen, + * then we can't rely on its loopback entry generation. + */ + if ((ifa->ifa_flags & RTF_CLONING) == 0 || + (ifa->ifa_ifp->if_flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) || + (SIN6(ifa->ifa_netmask)->sin6_len == sizeof(struct sockaddr_in6) + && + IN6_ARE_ADDR_EQUAL(&SIN6(ifa->ifa_netmask)->sin6_addr, + &in6mask128)) || + ((struct sockaddr_in6 *)ifa->ifa_netmask)->sin6_len == 0) + return 0; + else + return 1; +#undef SIN6 +} + +/* + * Subroutine for in6_ifaddloop() and in6_ifremloop(). + * This routine does actual work. + */ +static void +in6_ifloop_request(int cmd, struct ifaddr *ifa) +{ + struct sockaddr_in6 lo_sa; + struct sockaddr_in6 all1_sa; + struct rtentry *nrt = NULL; + + bzero(&lo_sa, sizeof(lo_sa)); + bzero(&all1_sa, sizeof(all1_sa)); + lo_sa.sin6_family = AF_INET6; + lo_sa.sin6_len = sizeof(struct sockaddr_in6); + all1_sa = lo_sa; + lo_sa.sin6_addr = in6addr_loopback; + all1_sa.sin6_addr = in6mask128; + + /* So we add or remove static loopback entry, here. */ + rtrequest(cmd, ifa->ifa_addr, + (struct sockaddr *)&lo_sa, + (struct sockaddr *)&all1_sa, + RTF_UP|RTF_HOST, &nrt); + + /* + * Make sure rt_ifa be equal to IFA, the second argument of the + * function. + * We need this because when we refer rt_ifa->ia6_flags in ip6_input, + * we assume that the rt_ifa points to the address instead of the + * loopback address. + */ + if (cmd == RTM_ADD && nrt && ifa != nrt->rt_ifa) { + IFAFREE(nrt->rt_ifa); + ifa->ifa_refcnt++; + nrt->rt_ifa = ifa; + nrt->rt_dlt = ifa->ifa_dlt; + } + if (nrt) + nrt->rt_refcnt--; +} + +/* + * Add ownaddr as loopback rtentry, if necessary(ex. on p2p link). + * Because, KAME needs loopback rtentry for ownaddr check in + * ip6_input(). + */ +static void +in6_ifaddloop(struct ifaddr *ifa) +{ + if (!in6_is_ifloop_auto(ifa)) { + struct rtentry *rt; + + /* If there is no loopback entry, allocate one. */ + rt = rtalloc1(ifa->ifa_addr, 0 +#if defined(__FreeBSD__) || defined (__APPLE__) + , 0 +#endif /* __FreeBSD__ */ + ); + if (rt == 0 || (rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) + in6_ifloop_request(RTM_ADD, ifa); + if (rt) + rt->rt_refcnt--; + } +} + +/* + * Remove loopback rtentry of ownaddr generated by in6_ifaddloop(), + * if it exists. + */ +static void +in6_ifremloop(struct ifaddr *ifa) +{ + if (!in6_is_ifloop_auto(ifa)) { + struct in6_ifaddr *ia; + int ia_count = 0; + + /* If only one ifa for the loopback entry, delete it. */ + for (ia = in6_ifaddr; ia; ia = ia->ia_next) { + if (IN6_ARE_ADDR_EQUAL(IFA_IN6(ifa), + &ia->ia_addr.sin6_addr)) { + ia_count++; + if (ia_count > 1) + break; + } + } + if (ia_count == 1) + in6_ifloop_request(RTM_DELETE, ifa); + } +} + +int +in6_ifindex2scopeid(idx) + int idx; +{ + struct ifnet *ifp; + struct ifaddr *ifa; + struct sockaddr_in6 *sin6; + + if (idx < 0 || if_index < idx) + return -1; + ifp = ifindex2ifnet[idx]; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + sin6 = (struct sockaddr_in6 *)ifa->ifa_addr; + if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) + return sin6->sin6_scope_id & 0xffff; + } + + return -1; +} + +int +in6_mask2len(mask) + struct in6_addr *mask; +{ + int x, y; + + for (x = 0; x < sizeof(*mask); x++) { + if (mask->s6_addr8[x] != 0xff) + break; + } + y = 0; + if (x < sizeof(*mask)) { + for (y = 0; y < 8; y++) { + if ((mask->s6_addr8[x] & (0x80 >> y)) == 0) + break; + } + } + return x * 8 + y; +} + +void +in6_len2mask(mask, len) + struct in6_addr *mask; + int len; +{ + int i; + + bzero(mask, sizeof(*mask)); + for (i = 0; i < len / 8; i++) + mask->s6_addr8[i] = 0xff; + if (len % 8) + mask->s6_addr8[i] = (0xff00 >> (len % 8)) & 0xff; +} + +#define ifa2ia6(ifa) ((struct in6_ifaddr *)(ifa)) +#define ia62ifa(ia6) (&((ia6)->ia_ifa)) + +int +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) +in6_control(so, cmd, data, ifp, p) + struct socket *so; + u_long cmd; + caddr_t data; + struct ifnet *ifp; + struct proc *p; +#else +in6_control(so, cmd, data, ifp) + struct socket *so; + u_long cmd; + caddr_t data; + struct ifnet *ifp; +#endif +{ + struct in6_ifreq *ifr = (struct in6_ifreq *)data; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + struct ifaddr *ifa; +#endif + struct in6_ifaddr *ia = NULL, *oia; + struct in6_aliasreq *ifra = (struct in6_aliasreq *)data; + struct sockaddr_in6 oldaddr; +#ifdef COMPAT_IN6IFIOCTL + struct sockaddr_in6 net; +#endif + int error = 0, hostIsNew, prefixIsNew; + int newifaddr; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + time_t time_second = (time_t)time.tv_sec; +#endif + int privileged; + u_long dl_tag; + + privileged = 0; +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) + if (p && !suser(p->p_ucred, &p->p_acflag)) + privileged++; +#else + if ((so->so_state & SS_PRIV) != 0) + privileged++; +#endif + + /* + * xxx should prevent processes for link-local addresses? + */ +#if NGIF > 0 + if (ifp && ifp->if_type == IFT_GIF) { + switch (cmd) { + case SIOCSIFPHYADDR_IN6: + if (!privileged) + return(EPERM); + /*fall through*/ + case SIOCGIFPSRCADDR_IN6: + case SIOCGIFPDSTADDR_IN6: + return gif_ioctl(ifp, cmd, data); + } + } +#endif + switch (cmd) { + case SIOCGETSGCNT_IN6: + case SIOCGETMIFCNT_IN6: + return (mrt6_ioctl(cmd, data)); + } +#if MIP6 + /* These require root privileges */ + switch (cmd) { + case SIOCSDEBUG_MIP6: + case SIOCSBCFLUSH_MIP6: + case SIOCSDEFCONFIG_MIP6: + case SIOCSBRUPDATE_MIP6: + case SIOCSENABLEBR_MIP6: + case SIOCSATTACH_MIP6: + case SIOCSRELEASE_MIP6: + + case SIOCSHALISTFLUSH_MIP6: + case SIOCSHAPREF_MIP6: + case SIOCSFWDSLUNICAST_MIP6: + case SIOCSFWDSLMULTICAST_MIP6: + + case SIOCSFORADDRFLUSH_MIP6: + case SIOCSHADDRFLUSH_MIP6: + case SIOCSBULISTFLUSH_MIP6: + case SIOCACOADDR_MIP6: + case SIOCAHOMEADDR_MIP6: + case SIOCSBULIFETIME_MIP6: + case SIOCSHRLIFETIME_MIP6: + case SIOCDCOADDR_MIP6: + case SIOCSPROMMODE_MIP6: + case SIOCSBU2CN_MIP6: + case SIOCSREVTUNNEL_MIP6: + case SIOCSAUTOCONFIG_MIP6: + case SIOCSEAGERMD_MIP6: + if (!privileged) + return(EPERM); + /* Anyone can use these or the user is root */ + /* case SIOCXVERYSAFECOMMAND_MIP6: */ +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined(__APPLE__) + return mip6_ioctl(so, cmd, data, ifp, p); +#else + return mip6_ioctl(so, cmd, data, ifp); +#endif + } +#endif /* MIP6 */ + + if (ifp == NULL) + return(EOPNOTSUPP); + + switch (cmd) { + case SIOCSNDFLUSH_IN6: + case SIOCSPFXFLUSH_IN6: + case SIOCSRTRFLUSH_IN6: + case SIOCSDEFIFACE_IN6: + case SIOCSIFINFO_FLAGS: + if (!privileged) + return(EPERM); + /*fall through*/ + case SIOCGIFINFO_IN6: + case SIOCGDRLST_IN6: + case SIOCGPRLST_IN6: + case SIOCGNBRINFO_IN6: + case SIOCGDEFIFACE_IN6: + return(nd6_ioctl(cmd, data, ifp)); + } + + switch (cmd) { + case SIOCSIFPREFIX_IN6: + case SIOCDIFPREFIX_IN6: + case SIOCAIFPREFIX_IN6: + case SIOCCIFPREFIX_IN6: + case SIOCSGIFPREFIX_IN6: + if (!privileged) + return(EPERM); + /*fall through*/ + case SIOCGIFPREFIX_IN6: + if (ip6_forwarding == 0) + return(EPERM); + return(in6_prefix_ioctl(so, cmd, data, ifp)); + } + + switch (cmd) { + case SIOCALIFADDR: + case SIOCDLIFADDR: + if (!privileged) + return(EPERM); + /*fall through*/ + case SIOCGLIFADDR: +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) + return in6_lifaddr_ioctl(so, cmd, data, ifp, p); +#else + return in6_lifaddr_ioctl(so, cmd, data, ifp); +#endif + } + + /* + * Find address for this interface, if it exists. + */ + if (ifra->ifra_addr.sin6_family == AF_INET6) { /* XXX */ + struct sockaddr_in6 *sa6 = + (struct sockaddr_in6 *)&ifra->ifra_addr; + + if (IN6_IS_ADDR_LINKLOCAL(&sa6->sin6_addr)) { + if (sa6->sin6_addr.s6_addr16[1] == 0) { + /* interface ID is not embedded by the user */ + sa6->sin6_addr.s6_addr16[1] = + htons(ifp->if_index); + } else if (sa6->sin6_addr.s6_addr16[1] != + htons(ifp->if_index)) { + return(EINVAL); /* ifid is contradict */ + } + if (sa6->sin6_scope_id) { + if (sa6->sin6_scope_id != + (u_int32_t)ifp->if_index) + return(EINVAL); + sa6->sin6_scope_id = 0; /* XXX: good way? */ + } + } + ia = in6ifa_ifpwithaddr(ifp, &ifra->ifra_addr.sin6_addr); + } + + switch (cmd) { + + case SIOCDIFADDR_IN6: + /* + * for IPv4, we look for existing in6_ifaddr here to allow + * "ifconfig if0 delete" to remove first IPv4 address on the + * interface. For IPv6, as the spec allow multiple interface + * address from the day one, we consider "remove the first one" + * semantics to be not preferrable. + */ + if (ia == NULL) + return(EADDRNOTAVAIL); + /* FALLTHROUGH */ + case SIOCAIFADDR_IN6: + case SIOCSIFADDR_IN6: +#if COMPAT_IN6IFIOCTL + case SIOCSIFDSTADDR_IN6: + case SIOCSIFNETMASK_IN6: + /* + * Since IPv6 allows a node to assign multiple addresses + * on a single interface, SIOCSIFxxx ioctls are not suitable + * and should be unused. + */ +#endif + if (ifra->ifra_addr.sin6_family != AF_INET6) + return(EAFNOSUPPORT); + if (!privileged) + return(EPERM); + if (ia == NULL) { + ia = (struct in6_ifaddr *) + _MALLOC(sizeof(*ia), M_IFADDR, M_WAITOK); + if (ia == NULL) + return (ENOBUFS); + bzero((caddr_t)ia, sizeof(*ia)); + /* Initialize the address and masks */ + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + ia->ia_addr.sin6_family = AF_INET6; + ia->ia_addr.sin6_len = sizeof(ia->ia_addr); + if (ifp->if_flags & IFF_POINTOPOINT) { + ia->ia_ifa.ifa_dstaddr + = (struct sockaddr *)&ia->ia_dstaddr; + ia->ia_dstaddr.sin6_family = AF_INET6; + ia->ia_dstaddr.sin6_len = sizeof(ia->ia_dstaddr); + } else { + ia->ia_ifa.ifa_dstaddr = NULL; + bzero(&ia->ia_dstaddr, sizeof(ia->ia_dstaddr)); + } + ia->ia_ifa.ifa_netmask + = (struct sockaddr *)&ia->ia_prefixmask; + + ia->ia_ifp = ifp; + + printf("in6_control: Attach dl_tag for if=%s%n\n", ifp->if_name, ifp->if_unit); + + if (strcmp(ifp->if_name, "en") == 0) + dl_tag = ether_attach_inet6(ifp); + + if (strcmp(ifp->if_name, "lo") == 0) + dl_tag = lo_attach_inet(ifp); + + if (strcmp(ifp->if_name, "gif") == 0) + dl_tag = gif_attach_inet(ifp); +/* End of temp code */ + ia->ia_ifa.ifa_dlt = dl_tag; + + + if ((oia = in6_ifaddr) != NULL) { + for ( ; oia->ia_next; oia = oia->ia_next) + continue; + oia->ia_next = ia; + } else + in6_ifaddr = ia; + ia->ia_ifa.ifa_refcnt++; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + if ((ifa = ifp->if_addrlist) != NULL) { + for ( ; ifa->ifa_next; ifa = ifa->ifa_next) + continue; + ifa->ifa_next = ia62ifa(ia); + } else + ifp->if_addrlist = ia62ifa(ia); +#else + TAILQ_INSERT_TAIL(&ifp->if_addrlist, &ia->ia_ifa, + ifa_list); +#endif + ia->ia_ifa.ifa_refcnt++; + + newifaddr = 1; + } else + newifaddr = 0; + + if (cmd == SIOCAIFADDR_IN6) { + /* sanity for overflow - beware unsigned */ + struct in6_addrlifetime *lt; + lt = &ifra->ifra_lifetime; + if (lt->ia6t_vltime != ND6_INFINITE_LIFETIME + && lt->ia6t_vltime + time_second < time_second) { + return EINVAL; + } + if (lt->ia6t_pltime != ND6_INFINITE_LIFETIME + && lt->ia6t_pltime + time_second < time_second) { + return EINVAL; + } + } + break; + + case SIOCGIFADDR_IN6: + /* This interface is basically deprecated. use SIOCGIFCONF. */ + /* fall through */ + case SIOCGIFAFLAG_IN6: + case SIOCGIFNETMASK_IN6: + case SIOCGIFDSTADDR_IN6: + case SIOCGIFALIFETIME_IN6: + /* must think again about its semantics */ + if (ia == NULL) + return(EADDRNOTAVAIL); + break; + case SIOCSIFALIFETIME_IN6: + { + struct in6_addrlifetime *lt; + + if (!privileged) + return(EPERM); + if (ia == NULL) + return(EADDRNOTAVAIL); + /* sanity for overflow - beware unsigned */ + lt = &ifr->ifr_ifru.ifru_lifetime; + if (lt->ia6t_vltime != ND6_INFINITE_LIFETIME + && lt->ia6t_vltime + time_second < time_second) { + return EINVAL; + } + if (lt->ia6t_pltime != ND6_INFINITE_LIFETIME + && lt->ia6t_pltime + time_second < time_second) { + return EINVAL; + } + break; + } + } + + switch (cmd) { + + case SIOCGIFADDR_IN6: + ifr->ifr_addr = ia->ia_addr; + break; + + case SIOCGIFDSTADDR_IN6: + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + return(EINVAL); + ifr->ifr_dstaddr = ia->ia_dstaddr; + break; + + case SIOCGIFNETMASK_IN6: + ifr->ifr_addr = ia->ia_prefixmask; + break; + + case SIOCGIFAFLAG_IN6: + ifr->ifr_ifru.ifru_flags6 = ia->ia6_flags; + break; + + case SIOCGIFSTAT_IN6: + if (ifp == NULL) + return EINVAL; + if (in6_ifstat == NULL || ifp->if_index >= in6_ifstatmax + || in6_ifstat[ifp->if_index] == NULL) { + /* return EAFNOSUPPORT? */ + bzero(&ifr->ifr_ifru.ifru_stat, + sizeof(ifr->ifr_ifru.ifru_stat)); + } else + ifr->ifr_ifru.ifru_stat = *in6_ifstat[ifp->if_index]; + break; + + case SIOCGIFSTAT_ICMP6: + if (ifp == NULL) + return EINVAL; + if (icmp6_ifstat == NULL || ifp->if_index >= icmp6_ifstatmax || + icmp6_ifstat[ifp->if_index] == NULL) { + /* return EAFNOSUPPORT? */ + bzero(&ifr->ifr_ifru.ifru_stat, + sizeof(ifr->ifr_ifru.ifru_icmp6stat)); + } else + ifr->ifr_ifru.ifru_icmp6stat = + *icmp6_ifstat[ifp->if_index]; + break; +#if COMPAT_IN6IFIOCTL /* should be unused */ + case SIOCSIFDSTADDR_IN6: + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + return(EINVAL); + oldaddr = ia->ia_dstaddr; + ia->ia_dstaddr = ifr->ifr_dstaddr; + + /* link-local index check */ + if (IN6_IS_ADDR_LINKLOCAL(&ia->ia_dstaddr.sin6_addr)) { + if (ia->ia_dstaddr.sin6_addr.s6_addr16[1] == 0) { + /* interface ID is not embedded by the user */ + ia->ia_dstaddr.sin6_addr.s6_addr16[1] + = htons(ifp->if_index); + } else if (ia->ia_dstaddr.sin6_addr.s6_addr16[1] != + htons(ifp->if_index)) { + ia->ia_dstaddr = oldaddr; + return(EINVAL); /* ifid is contradict */ + } + } +#ifdef __APPLE__ + error = dlil_ioctl(0, ifp, SIOCSIFDSTADDR, (caddr_t)ia); + if (error == EOPNOTSUPP) + error = 0; + if (error) { + ia->ia_dstaddr = oldaddr; + return(error); + } +#else + if (ifp->if_ioctl && (error = (ifp->if_ioctl) + (ifp, SIOCSIFDSTADDR, (caddr_t)ia))) { +#endif + if (ia->ia_flags & IFA_ROUTE) { + ia->ia_ifa.ifa_dstaddr = (struct sockaddr *)&oldaddr; + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + ia->ia_ifa.ifa_dstaddr = + (struct sockaddr *)&ia->ia_dstaddr; + rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_HOST|RTF_UP); + } + break; + +#endif + case SIOCGIFALIFETIME_IN6: + ifr->ifr_ifru.ifru_lifetime = ia->ia6_lifetime; + break; + + case SIOCSIFALIFETIME_IN6: + ia->ia6_lifetime = ifr->ifr_ifru.ifru_lifetime; + /* for sanity */ + if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) { + ia->ia6_lifetime.ia6t_expire = + time_second + ia->ia6_lifetime.ia6t_vltime; + } else + ia->ia6_lifetime.ia6t_expire = 0; + if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) { + ia->ia6_lifetime.ia6t_preferred = + time_second + ia->ia6_lifetime.ia6t_pltime; + } else + ia->ia6_lifetime.ia6t_preferred = 0; + break; + + case SIOCSIFADDR_IN6: + error = in6_ifinit(ifp, ia, &ifr->ifr_addr, 1); +#if 0 + /* + * the code chokes if we are to assign multiple addresses with + * the same address prefix (rtinit() will return EEXIST, which + * is not fatal actually). we will get memory leak if we + * don't do it. + * -> we may want to hide EEXIST from rtinit(). + */ + undo: + if (error && newifaddr) { +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + if ((ifa = ifp->if_addrlist) == ia62ifa(ia)) + ifp->if_addrlist = ifa->ifa_next; + else { + while (ifa->ifa_next && + (ifa->ifa_next != ia62ifa(ia))) + ifa = ifa->ifa_next; + if (ifa->ifa_next) + ifa->ifa_next = ia62ifa(ia)->ifa_next; + else { + printf("Couldn't unlink in6_ifaddr " + "from ifp\n"); + } + } +#else + TAILQ_REMOVE(&ifp->if_addrlist, &ia->ia_ifa, ifa_list); +#endif + IFAFREE(&ia->ia_ifa); + + oia = ia; + if (oia == (ia = in6_ifaddr)) + in6_ifaddr = ia->ia_next; + else { + while (ia->ia_next && (ia->ia_next != oia)) + ia = ia->ia_next; + if (ia->ia_next) + ia->ia_next = oia->ia_next; + else { + printf("Didn't unlink in6_ifaddr " + "from list\n"); + } + } + IFAFREE(&ia->ia_ifa); + } +#endif + return error; + +#if COMPAT_IN6IFIOCTL /* XXX should be unused */ + case SIOCSIFNETMASK_IN6: + ia->ia_prefixmask = ifr->ifr_addr; + bzero(&net, sizeof(net)); + net.sin6_len = sizeof(struct sockaddr_in6); + net.sin6_family = AF_INET6; + net.sin6_port = htons(0); + net.sin6_flowinfo = htonl(0); + net.sin6_addr.s6_addr32[0] + = ia->ia_addr.sin6_addr.s6_addr32[0] & + ia->ia_prefixmask.sin6_addr.s6_addr32[0]; + net.sin6_addr.s6_addr32[1] + = ia->ia_addr.sin6_addr.s6_addr32[1] & + ia->ia_prefixmask.sin6_addr.s6_addr32[1]; + net.sin6_addr.s6_addr32[2] + = ia->ia_addr.sin6_addr.s6_addr32[2] & + ia->ia_prefixmask.sin6_addr.s6_addr32[2]; + net.sin6_addr.s6_addr32[3] + = ia->ia_addr.sin6_addr.s6_addr32[3] & + ia->ia_prefixmask.sin6_addr.s6_addr32[3]; + ia->ia_net = net; + break; +#endif + + case SIOCAIFADDR_IN6: + prefixIsNew = 0; + hostIsNew = 1; + + if (ifra->ifra_addr.sin6_len == 0) { + ifra->ifra_addr = ia->ia_addr; + hostIsNew = 0; + } else if (IN6_ARE_ADDR_EQUAL(&ifra->ifra_addr.sin6_addr, + &ia->ia_addr.sin6_addr)) + hostIsNew = 0; + + /* Validate address families: */ + /* + * The destination address for a p2p link must have a family + * of AF_UNSPEC or AF_INET6. + */ + if ((ifp->if_flags & IFF_POINTOPOINT) != 0 && + ifra->ifra_dstaddr.sin6_family != AF_INET6 && + ifra->ifra_dstaddr.sin6_family != AF_UNSPEC) + return(EAFNOSUPPORT); + /* + * The prefixmask must have a family of AF_UNSPEC or AF_INET6. + */ + if (ifra->ifra_prefixmask.sin6_family != AF_INET6 && + ifra->ifra_prefixmask.sin6_family != AF_UNSPEC) + return(EAFNOSUPPORT); + + if (ifra->ifra_prefixmask.sin6_len) { + in6_ifscrub(ifp, ia); + ia->ia_prefixmask = ifra->ifra_prefixmask; + prefixIsNew = 1; + } + if ((ifp->if_flags & IFF_POINTOPOINT) && + (ifra->ifra_dstaddr.sin6_family == AF_INET6)) { + in6_ifscrub(ifp, ia); + oldaddr = ia->ia_dstaddr; + ia->ia_dstaddr = ifra->ifra_dstaddr; + /* link-local index check: should be a separate function? */ + if (IN6_IS_ADDR_LINKLOCAL(&ia->ia_dstaddr.sin6_addr)) { + if (ia->ia_dstaddr.sin6_addr.s6_addr16[1] == 0) { + /* + * interface ID is not embedded by + * the user + */ + ia->ia_dstaddr.sin6_addr.s6_addr16[1] + = htons(ifp->if_index); + } else if (ia->ia_dstaddr.sin6_addr.s6_addr16[1] != + htons(ifp->if_index)) { + ia->ia_dstaddr = oldaddr; + return(EINVAL); /* ifid is contradict */ + } + } + prefixIsNew = 1; /* We lie; but effect's the same */ + } + if (hostIsNew || prefixIsNew) { + error = in6_ifinit(ifp, ia, &ifra->ifra_addr, 0); +#if 0 + if (error) + goto undo; +#endif + } + if (hostIsNew && (ifp->if_flags & IFF_MULTICAST)) { + int error_local = 0; + + /* + * join solicited multicast addr for new host id + */ + struct in6_addr llsol; + bzero(&llsol, sizeof(struct in6_addr)); + llsol.s6_addr16[0] = htons(0xff02); + llsol.s6_addr16[1] = htons(ifp->if_index); + llsol.s6_addr32[1] = 0; + llsol.s6_addr32[2] = htonl(1); + llsol.s6_addr32[3] = + ifra->ifra_addr.sin6_addr.s6_addr32[3]; + llsol.s6_addr8[12] = 0xff; + (void)in6_addmulti(&llsol, ifp, &error_local); + if (error == 0) + error = error_local; + } + + ia->ia6_flags = ifra->ifra_flags; + ia->ia6_flags &= ~IN6_IFF_DUPLICATED; /*safety*/ + ia->ia6_flags &= ~IN6_IFF_NODAD; /* Mobile IPv6 */ + + ia->ia6_lifetime = ifra->ifra_lifetime; + /* for sanity */ + if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) { + ia->ia6_lifetime.ia6t_expire = + time_second + ia->ia6_lifetime.ia6t_vltime; + } else + ia->ia6_lifetime.ia6t_expire = 0; + if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) { + ia->ia6_lifetime.ia6t_preferred = + time_second + ia->ia6_lifetime.ia6t_pltime; + } else + ia->ia6_lifetime.ia6t_preferred = 0; + + /* + * Perform DAD, if needed. + * XXX It may be of use, if we can administratively + * disable DAD. + */ + switch (ifp->if_type) { + case IFT_ARCNET: + case IFT_ETHER: + case IFT_FDDI: +#if 0 + case IFT_ATM: + case IFT_SLIP: + case IFT_PPP: +#endif + /* Mobile IPv6 modification */ + if ((ifra->ifra_flags & IN6_IFF_NODAD) == 0) { + ia->ia6_flags |= IN6_IFF_TENTATIVE; + nd6_dad_start((struct ifaddr *)ia, NULL); + } + break; + case IFT_DUMMY: + case IFT_FAITH: + case IFT_GIF: + case IFT_LOOP: + default: + break; + } + + if (hostIsNew) { + int iilen; + int error_local = 0; + + iilen = (sizeof(ia->ia_prefixmask.sin6_addr) << 3) - + in6_mask2len(&ia->ia_prefixmask.sin6_addr); + error_local = in6_prefix_add_ifid(iilen, ia); + if (error == 0) + error = error_local; + } + + return(error); + + case SIOCDIFADDR_IN6: + in6_purgeaddr(&ia->ia_ifa, ifp); + break; + + default: +#ifdef __APPLE__ + error = dlil_ioctl(0, ifp, cmd, (caddr_t)data); + if (error == EOPNOTSUPP) + error = 0; + return error; + +#else + if (ifp == NULL || ifp->if_ioctl == 0) + return(EOPNOTSUPP); + return((*ifp->if_ioctl)(ifp, cmd, data)); +#endif + } + return(0); +} + +void +in6_purgeaddr(ifa, ifp) + struct ifaddr *ifa; + struct ifnet *ifp; +{ + struct in6_ifaddr *oia, *ia = (void *) ifa; + + in6_ifscrub(ifp, ia); + + if (ifp->if_flags & IFF_MULTICAST) { + /* + * delete solicited multicast addr for deleting host id + */ + struct in6_multi *in6m; + struct in6_addr llsol; + bzero(&llsol, sizeof(struct in6_addr)); + llsol.s6_addr16[0] = htons(0xff02); + llsol.s6_addr16[1] = htons(ifp->if_index); + llsol.s6_addr32[1] = 0; + llsol.s6_addr32[2] = htonl(1); + llsol.s6_addr32[3] = + ia->ia_addr.sin6_addr.s6_addr32[3]; + llsol.s6_addr8[12] = 0xff; + + IN6_LOOKUP_MULTI(llsol, ifp, in6m); + if (in6m) + in6_delmulti(in6m); + } + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + if ((ifa = ifp->if_addrlist) == ia62ifa(ia)) + ifp->if_addrlist = ifa->ifa_next; + else { + while (ifa->ifa_next && + (ifa->ifa_next != ia62ifa(ia))) + ifa = ifa->ifa_next; + if (ifa->ifa_next) + ifa->ifa_next = ia62ifa(ia)->ifa_next; + else + printf("Couldn't unlink in6_ifaddr from ifp\n"); + } +#else + TAILQ_REMOVE(&ifp->if_addrlist, &ia->ia_ifa, ifa_list); +#endif + IFAFREE(&ia->ia_ifa); + + oia = ia; + if (oia == (ia = in6_ifaddr)) + in6_ifaddr = ia->ia_next; + else { + while (ia->ia_next && (ia->ia_next != oia)) + ia = ia->ia_next; + if (ia->ia_next) + ia->ia_next = oia->ia_next; + else + printf("Didn't unlink in6_ifaddr from list\n"); + } + { + int iilen; + + iilen = (sizeof(oia->ia_prefixmask.sin6_addr) << 3) - + in6_mask2len(&oia->ia_prefixmask.sin6_addr); + in6_prefix_remove_ifid(iilen, oia); + } +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + if (oia->ia6_multiaddrs.lh_first != NULL) + in6_savemkludge(oia); +#endif + + IFAFREE(&oia->ia_ifa); +} + +/* + * SIOC[GAD]LIFADDR. + * SIOCGLIFADDR: get first address. (???) + * SIOCGLIFADDR with IFLR_PREFIX: + * get first address that matches the specified prefix. + * SIOCALIFADDR: add the specified address. + * SIOCALIFADDR with IFLR_PREFIX: + * add the specified prefix, filling hostid part from + * the first link-local address. prefixlen must be <= 64. + * SIOCDLIFADDR: delete the specified address. + * SIOCDLIFADDR with IFLR_PREFIX: + * delete the first address that matches the specified prefix. + * return values: + * EINVAL on invalid parameters + * EADDRNOTAVAIL on prefix match failed/specified address not found + * other values may be returned from in6_ioctl() + * + * NOTE: SIOCALIFADDR(with IFLR_PREFIX set) allows prefixlen less than 64. + * this is to accomodate address naming scheme other than RFC2374, + * in the future. + * RFC2373 defines interface id to be 64bit, but it allows non-RFC2374 + * address encoding scheme. (see figure on page 8) + */ +static int +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) +in6_lifaddr_ioctl(so, cmd, data, ifp, p) + struct socket *so; + u_long cmd; + caddr_t data; + struct ifnet *ifp; + struct proc *p; +#else +in6_lifaddr_ioctl(so, cmd, data, ifp) + struct socket *so; + u_long cmd; + caddr_t data; + struct ifnet *ifp; +#endif +{ + struct if_laddrreq *iflr = (struct if_laddrreq *)data; + struct ifaddr *ifa; + struct sockaddr *sa; + + /* sanity checks */ + if (!data || !ifp) { + panic("invalid argument to in6_lifaddr_ioctl"); + /*NOTRECHED*/ + } + + switch (cmd) { + case SIOCGLIFADDR: + /* address must be specified on GET with IFLR_PREFIX */ + if ((iflr->flags & IFLR_PREFIX) == 0) + break; + /*FALLTHROUGH*/ + case SIOCALIFADDR: + case SIOCDLIFADDR: + /* address must be specified on ADD and DELETE */ + sa = (struct sockaddr *)&iflr->addr; + if (sa->sa_family != AF_INET6) + return EINVAL; + if (sa->sa_len != sizeof(struct sockaddr_in6)) + return EINVAL; + /* XXX need improvement */ + sa = (struct sockaddr *)&iflr->dstaddr; + if (sa->sa_family && sa->sa_family != AF_INET6) + return EINVAL; + if (sa->sa_len && sa->sa_len != sizeof(struct sockaddr_in6)) + return EINVAL; + break; + default: /*shouldn't happen*/ +#if 0 + panic("invalid cmd to in6_lifaddr_ioctl"); + /*NOTREACHED*/ +#else + return EOPNOTSUPP; +#endif + } + if (sizeof(struct in6_addr) * 8 < iflr->prefixlen) + return EINVAL; + + switch (cmd) { + case SIOCALIFADDR: + { + struct in6_aliasreq ifra; + struct in6_addr *hostid = NULL; + int prefixlen; + + if ((iflr->flags & IFLR_PREFIX) != 0) { + struct sockaddr_in6 *sin6; + + /* + * hostid is to fill in the hostid part of the + * address. hostid points to the first link-local + * address attached to the interface. + */ + ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0); + if (!ifa) + return EADDRNOTAVAIL; + hostid = IFA_IN6(ifa); + + /* prefixlen must be <= 64. */ + if (64 < iflr->prefixlen) + return EINVAL; + prefixlen = iflr->prefixlen; + + /* hostid part must be zero. */ + sin6 = (struct sockaddr_in6 *)&iflr->addr; + if (sin6->sin6_addr.s6_addr32[2] != 0 + || sin6->sin6_addr.s6_addr32[3] != 0) { + return EINVAL; + } + } else + prefixlen = iflr->prefixlen; + + /* copy args to in6_aliasreq, perform ioctl(SIOCAIFADDR_IN6). */ + bzero(&ifra, sizeof(ifra)); + bcopy(iflr->iflr_name, ifra.ifra_name, + sizeof(ifra.ifra_name)); + + bcopy(&iflr->addr, &ifra.ifra_addr, + ((struct sockaddr *)&iflr->addr)->sa_len); + if (hostid) { + /* fill in hostid part */ + ifra.ifra_addr.sin6_addr.s6_addr32[2] = + hostid->s6_addr32[2]; + ifra.ifra_addr.sin6_addr.s6_addr32[3] = + hostid->s6_addr32[3]; + } + + if (((struct sockaddr *)&iflr->dstaddr)->sa_family) { /*XXX*/ + bcopy(&iflr->dstaddr, &ifra.ifra_dstaddr, + ((struct sockaddr *)&iflr->dstaddr)->sa_len); + if (hostid) { + ifra.ifra_dstaddr.sin6_addr.s6_addr32[2] = + hostid->s6_addr32[2]; + ifra.ifra_dstaddr.sin6_addr.s6_addr32[3] = + hostid->s6_addr32[3]; + } + } + + ifra.ifra_prefixmask.sin6_family = AF_INET6; + ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6); + in6_len2mask(&ifra.ifra_prefixmask.sin6_addr, prefixlen); + + ifra.ifra_flags = iflr->flags & ~IFLR_PREFIX; +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) + return in6_control(so, SIOCAIFADDR_IN6, (caddr_t)&ifra, ifp, p); +#else + return in6_control(so, SIOCAIFADDR_IN6, (caddr_t)&ifra, ifp); +#endif + } + case SIOCGLIFADDR: + case SIOCDLIFADDR: + { + struct in6_ifaddr *ia; + struct in6_addr mask, candidate, match; + struct sockaddr_in6 *sin6; + int cmp; + + bzero(&mask, sizeof(mask)); + if (iflr->flags & IFLR_PREFIX) { + /* lookup a prefix rather than address. */ + in6_len2mask(&mask, iflr->prefixlen); + + sin6 = (struct sockaddr_in6 *)&iflr->addr; + bcopy(&sin6->sin6_addr, &match, sizeof(match)); + match.s6_addr32[0] &= mask.s6_addr32[0]; + match.s6_addr32[1] &= mask.s6_addr32[1]; + match.s6_addr32[2] &= mask.s6_addr32[2]; + match.s6_addr32[3] &= mask.s6_addr32[3]; + + /* if you set extra bits, that's wrong */ + if (bcmp(&match, &sin6->sin6_addr, sizeof(match))) + return EINVAL; + + cmp = 1; + } else { + if (cmd == SIOCGLIFADDR) { + /* on getting an address, take the 1st match */ + cmp = 0; /*XXX*/ + } else { + /* on deleting an address, do exact match */ + in6_len2mask(&mask, 128); + sin6 = (struct sockaddr_in6 *)&iflr->addr; + bcopy(&sin6->sin6_addr, &match, sizeof(match)); + + cmp = 1; + } + } + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; + ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (!cmp) + break; + bcopy(IFA_IN6(ifa), &candidate, sizeof(candidate)); + candidate.s6_addr32[0] &= mask.s6_addr32[0]; + candidate.s6_addr32[1] &= mask.s6_addr32[1]; + candidate.s6_addr32[2] &= mask.s6_addr32[2]; + candidate.s6_addr32[3] &= mask.s6_addr32[3]; + if (IN6_ARE_ADDR_EQUAL(&candidate, &match)) + break; + } + if (!ifa) + return EADDRNOTAVAIL; + ia = ifa2ia6(ifa); + + if (cmd == SIOCGLIFADDR) { + /* fill in the if_laddrreq structure */ + bcopy(&ia->ia_addr, &iflr->addr, ia->ia_addr.sin6_len); + + if ((ifp->if_flags & IFF_POINTOPOINT) != 0) { + bcopy(&ia->ia_dstaddr, &iflr->dstaddr, + ia->ia_dstaddr.sin6_len); + } else + bzero(&iflr->dstaddr, sizeof(iflr->dstaddr)); + + iflr->prefixlen = + in6_mask2len(&ia->ia_prefixmask.sin6_addr); + + iflr->flags = ia->ia6_flags; /*XXX*/ + + return 0; + } else { + struct in6_aliasreq ifra; + + /* fill in6_aliasreq and do ioctl(SIOCDIFADDR_IN6) */ + bzero(&ifra, sizeof(ifra)); + bcopy(iflr->iflr_name, ifra.ifra_name, + sizeof(ifra.ifra_name)); + + bcopy(&ia->ia_addr, &ifra.ifra_addr, + ia->ia_addr.sin6_len); + if ((ifp->if_flags & IFF_POINTOPOINT) != 0) { + bcopy(&ia->ia_dstaddr, &ifra.ifra_dstaddr, + ia->ia_dstaddr.sin6_len); + } else { + bzero(&ifra.ifra_dstaddr, + sizeof(ifra.ifra_dstaddr)); + } + bcopy(&ia->ia_prefixmask, &ifra.ifra_dstaddr, + ia->ia_prefixmask.sin6_len); + + ifra.ifra_flags = ia->ia6_flags; +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) + return in6_control(so, SIOCDIFADDR_IN6, (caddr_t)&ifra, + ifp, p); +#else + return in6_control(so, SIOCDIFADDR_IN6, (caddr_t)&ifra, + ifp); +#endif + } + } + } + + return EOPNOTSUPP; /*just for safety*/ +} + +/* + * Delete any existing route for an interface. + */ +void +in6_ifscrub(ifp, ia) + register struct ifnet *ifp; + register struct in6_ifaddr *ia; +{ + if ((ia->ia_flags & IFA_ROUTE) == 0) + return; + if (ifp->if_flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + else + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, 0); + ia->ia_flags &= ~IFA_ROUTE; + + /* Remove ownaddr's loopback rtentry, if it exists. */ + in6_ifremloop(&(ia->ia_ifa)); +} + +/* + * Initialize an interface's intetnet6 address + * and routing table entry. + */ +int +in6_ifinit(ifp, ia, sin6, scrub) + struct ifnet *ifp; + struct in6_ifaddr *ia; + struct sockaddr_in6 *sin6; + int scrub; +{ + struct sockaddr_in6 oldaddr; + int error, flags = RTF_UP; + u_long dl_tag; + int s = splimp(); + + oldaddr = ia->ia_addr; + ia->ia_addr = *sin6; + /* + * Give the interface a chance to initialize + * if this is its first address, + * and to validate the address if necessary. + */ +#ifdef __APPLE__ + error = dlil_ioctl(0, ifp, SIOCSIFADDR, (caddr_t)ia) ; + if (error == EOPNOTSUPP) + error = 0; + if (error) { + +#else + if (ifp->if_ioctl && + (error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, (caddr_t)ia))) { +#endif + printf("in6_ifinit SIOCSIFADDR for if=%s returns error=%x\n", if_name(ifp), error); + splx(s); + ia->ia_addr = oldaddr; + return(error); + } + + switch (ifp->if_type) { + case IFT_ARCNET: + case IFT_ETHER: + case IFT_FDDI: + ia->ia_ifa.ifa_rtrequest = nd6_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + break; + case IFT_PPP: + ia->ia_ifa.ifa_rtrequest = nd6_p2p_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + break; + } + + splx(s); + if (scrub) { + ia->ia_ifa.ifa_addr = (struct sockaddr *)&oldaddr; + in6_ifscrub(ifp, ia); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + } + /* xxx + * in_socktrim + */ + /* + * Add route for the network. + */ + ia->ia_ifa.ifa_metric = ifp->if_metric; + if (ifp->if_flags & IFF_LOOPBACK) { + ia->ia_ifa.ifa_dstaddr = ia->ia_ifa.ifa_addr; + flags |= RTF_HOST; + } else if (ifp->if_flags & IFF_POINTOPOINT) { + if (ia->ia_dstaddr.sin6_family != AF_INET6) + return(0); + flags |= RTF_HOST; + } + if ((error = rtinit(&(ia->ia_ifa), (int)RTM_ADD, flags)) == 0) + ia->ia_flags |= IFA_ROUTE; + + /* Add ownaddr as loopback rtentry, if necessary(ex. on p2p link). */ + in6_ifaddloop(&(ia->ia_ifa)); + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + if (ifp->if_flags & IFF_MULTICAST) + in6_restoremkludge(ia, ifp); +#endif + +#ifdef __APPLE__ + printf("in6_ifinit: Attach dl_tag for if=%s%n\n", ifp->if_name, ifp->if_unit); + + if (strcmp(ifp->if_name, "en") == 0) + dl_tag = ether_attach_inet6(ifp); + + if (strcmp(ifp->if_name, "lo") == 0) + dl_tag = lo_attach_inet(ifp); + + if (strcmp(ifp->if_name, "gif") == 0) + dl_tag = gif_attach_inet(ifp); +/* End of temp code */ + ia->ia_ifa.ifa_dlt = dl_tag; +#endif + + return(error); +} + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) +/* + * Multicast address kludge: + * If there were any multicast addresses attached to this interface address, + * either move them to another address on this interface, or save them until + * such time as this interface is reconfigured for IPv6. + */ +void +in6_savemkludge(oia) + struct in6_ifaddr *oia; +{ + struct in6_ifaddr *ia; + struct in6_multi *in6m, *next; + + IFP_TO_IA6(oia->ia_ifp, ia); + if (ia) { /* there is another address */ + for (in6m = oia->ia6_multiaddrs.lh_first; in6m; in6m = next){ + next = in6m->in6m_entry.le_next; + IFAFREE(&in6m->in6m_ia->ia_ifa); + ia->ia_ifa.ifa_refcnt++; + in6m->in6m_ia = ia; + LIST_INSERT_HEAD(&ia->ia6_multiaddrs, in6m, in6m_entry); + } + } else { /* last address on this if deleted, save */ + struct multi6_kludge *mk; + + mk = _MALLOC(sizeof(*mk), M_IPMADDR, M_WAITOK); + + LIST_INIT(&mk->mk_head); + mk->mk_ifp = oia->ia_ifp; + + for (in6m = oia->ia6_multiaddrs.lh_first; in6m; in6m = next){ + next = in6m->in6m_entry.le_next; + IFAFREE(&in6m->in6m_ia->ia_ifa); /* release reference */ + in6m->in6m_ia = NULL; + LIST_INSERT_HEAD(&mk->mk_head, in6m, in6m_entry); + } + + if (mk->mk_head.lh_first != NULL) { + LIST_INSERT_HEAD(&in6_mk, mk, mk_entry); + } else { + FREE(mk, M_IPMADDR); + } + } +} + +/* + * Continuation of multicast address hack: + * If there was a multicast group list previously saved for this interface, + * then we re-attach it to the first address configured on the i/f. + */ +void +in6_restoremkludge(ia, ifp) + struct in6_ifaddr *ia; + struct ifnet *ifp; +{ + struct multi6_kludge *mk; + + for (mk = in6_mk.lh_first; mk; mk = mk->mk_entry.le_next) { + if (mk->mk_ifp == ifp) { + struct in6_multi *in6m, *next; + + for (in6m = mk->mk_head.lh_first; in6m; in6m = next){ + next = in6m->in6m_entry.le_next; + in6m->in6m_ia = ia; + ia->ia_ifa.ifa_refcnt++; + LIST_INSERT_HEAD(&ia->ia6_multiaddrs, + in6m, in6m_entry); + } + LIST_REMOVE(mk, mk_entry); + _FREE(mk, M_IPMADDR); + break; + } + } +} + +void +in6_purgemkludge(ifp) + struct ifnet *ifp; +{ + struct multi6_kludge *mk; + struct in6_multi *in6m; + + for (mk = in6_mk.lh_first; mk; mk = mk->mk_entry.le_next) { + if (mk->mk_ifp != ifp) + continue; + + /* leave from all multicast groups joined */ + while ((in6m = LIST_FIRST(&mk->mk_head)) != NULL) + in6_delmulti(in6m); + LIST_REMOVE(mk, mk_entry); + _FREE(mk, M_IPMADDR); + break; + } +} + +/* + * Add an address to the list of IP6 multicast addresses for a + * given interface. + */ +struct in6_multi * +in6_addmulti(maddr6, ifp, errorp) + register struct in6_addr *maddr6; + register struct ifnet *ifp; + int *errorp; +{ + struct in6_ifaddr *ia; + struct in6_ifreq ifr; + struct in6_multi *in6m; +#if __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + + *errorp = 0; + /* + * See if address already in list. + */ + IN6_LOOKUP_MULTI(*maddr6, ifp, in6m); + if (in6m != NULL) { + /* + * Found it; just increment the refrence count. + */ + in6m->in6m_refcount++; + } else { + /* + * New address; allocate a new multicast record + * and link it into the interface's multicast list. + */ + in6m = (struct in6_multi *) + _MALLOC(sizeof(*in6m), M_IPMADDR, M_NOWAIT); + if (in6m == NULL) { + splx(s); + *errorp = ENOBUFS; + return(NULL); + } + in6m->in6m_addr = *maddr6; + in6m->in6m_ifp = ifp; + in6m->in6m_refcount = 1; + IFP_TO_IA6(ifp, ia); + if (ia == NULL) { + _FREE(in6m, M_IPMADDR); + splx(s); + *errorp = EADDRNOTAVAIL; /* appropriate? */ + return(NULL); + } + in6m->in6m_ia = ia; + ia->ia_ifa.ifa_refcnt++; /* gain a reference */ + LIST_INSERT_HEAD(&ia->ia6_multiaddrs, in6m, in6m_entry); + + /* + * Ask the network driver to update its multicast reception + * filter appropriately for the new address. + */ + bzero(&ifr.ifr_addr, sizeof(struct sockaddr_in6)); + ifr.ifr_addr.sin6_len = sizeof(struct sockaddr_in6); + ifr.ifr_addr.sin6_family = AF_INET6; + ifr.ifr_addr.sin6_addr = *maddr6; +#ifdef __APPLE__ + *errorp = dlil_ioctl(0, ifp, SIOCADDMULTI, (caddr_t)&ifr); + printf("in6_addmulti: if=%s%n dlil_ioctl returns=%d\n", ifp->if_name, ifp->if_unit, *errorp); + if (*errorp == EOPNOTSUPP) + *errorp = 0; + +#else + if (ifp->if_ioctl == NULL) + *errorp = ENXIO; /* XXX: appropriate? */ + else + *errorp = (*ifp->if_ioctl)(ifp, SIOCADDMULTI, + (caddr_t)&ifr); +#endif + if (*errorp) { + LIST_REMOVE(in6m, in6m_entry); + _FREE(in6m, M_IPMADDR); + splx(s); + return(NULL); + } + /* + * Let MLD6 know that we have joined a new IP6 multicast + * group. + */ + mld6_start_listening(in6m); + } + splx(s); + return(in6m); +} + +/* + * Delete a multicast address record. + */ +void +in6_delmulti(in6m) + struct in6_multi *in6m; +{ + struct in6_ifreq ifr; +#if __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + + if (--in6m->in6m_refcount == 0) { + /* + * No remaining claims to this record; let MLD6 know + * that we are leaving the multicast group. + */ + mld6_stop_listening(in6m); + + /* + * Unlink from list. + */ + LIST_REMOVE(in6m, in6m_entry); + if (in6m->in6m_ia) + IFAFREE(&in6m->in6m_ia->ia_ifa); /* release reference */ + + /* + * Notify the network driver to update its multicast + * reception filter. + */ + bzero(&ifr.ifr_addr, sizeof(struct sockaddr_in6)); + ifr.ifr_addr.sin6_len = sizeof(struct sockaddr_in6); + ifr.ifr_addr.sin6_family = AF_INET6; + ifr.ifr_addr.sin6_addr = in6m->in6m_addr; +#ifdef __APPLE__ + dlil_ioctl(0, in6m->in6m_ifp, SIOCDELMULTI, (caddr_t)&ifr); +#else + (*in6m->in6m_ifp->if_ioctl)(in6m->in6m_ifp, + SIOCDELMULTI, (caddr_t)&ifr); +#endif + _FREE(in6m, M_IPMADDR); + } + splx(s); +} +#else /* not FreeBSD3 */ +/* + * Add an address to the list of IP6 multicast addresses for a + * given interface. + */ +struct in6_multi * +in6_addmulti(maddr6, ifp, errorp) + register struct in6_addr *maddr6; + register struct ifnet *ifp; + int *errorp; +{ + struct in6_multi *in6m; + struct sockaddr_in6 sin6; + struct ifmultiaddr *ifma; + int s = splnet(); + + *errorp = 0; + + /* + * Call generic routine to add membership or increment + * refcount. It wants addresses in the form of a sockaddr, + * so we build one here (being careful to zero the unused bytes). + */ + bzero(&sin6, sizeof sin6); + sin6.sin6_family = AF_INET6; + sin6.sin6_len = sizeof sin6; + sin6.sin6_addr = *maddr6; + *errorp = if_addmulti(ifp, (struct sockaddr *)&sin6, &ifma); + if (*errorp) { + splx(s); + return 0; + } + + /* + * If ifma->ifma_protospec is null, then if_addmulti() created + * a new record. Otherwise, we are done. + */ + if (ifma->ifma_protospec != 0) + return ifma->ifma_protospec; + + /* XXX - if_addmulti uses M_WAITOK. Can this really be called + at interrupt time? If so, need to fix if_addmulti. XXX */ + in6m = (struct in6_multi *)_MALLOC(sizeof(*in6m), M_IPMADDR, M_NOWAIT); + if (in6m == NULL) { + splx(s); + return (NULL); + } + + bzero(in6m, sizeof *in6m); + in6m->in6m_addr = *maddr6; + in6m->in6m_ifp = ifp; + in6m->in6m_ifma = ifma; + ifma->ifma_protospec = in6m; + LIST_INSERT_HEAD(&in6_multihead, in6m, in6m_entry); + + /* + * Let MLD6 know that we have joined a new IP6 multicast + * group. + */ + mld6_start_listening(in6m); + splx(s); + return(in6m); +} + +/* + * Delete a multicast address record. + */ +void +in6_delmulti(in6m) + struct in6_multi *in6m; +{ + struct ifmultiaddr *ifma = in6m->in6m_ifma; + int s = splnet(); + + if (ifma->ifma_refcount == 1) { + /* + * No remaining claims to this record; let MLD6 know + * that we are leaving the multicast group. + */ + mld6_stop_listening(in6m); + ifma->ifma_protospec = 0; + LIST_REMOVE(in6m, in6m_entry); + _FREE(in6m, M_IPMADDR); + } + /* XXX - should be separate API for when we have an ifma? */ + if_delmulti(ifma->ifma_ifp, ifma->ifma_addr); + splx(s); +} +#endif /* not FreeBSD3 */ + +/* + * Find an IPv6 interface link-local address specific to an interface. + */ +struct in6_ifaddr * +in6ifa_ifpforlinklocal(ifp, ignoreflags) + struct ifnet *ifp; + int ignoreflags; +{ + register struct ifaddr *ifa; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr == NULL) + continue; /* just for safety */ + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (IN6_IS_ADDR_LINKLOCAL(IFA_IN6(ifa))) { + if ((((struct in6_ifaddr *)ifa)->ia6_flags & + ignoreflags) != 0) + continue; + break; + } + } + + return((struct in6_ifaddr *)ifa); +} + + +/* + * find the internet address corresponding to a given interface and address. + */ +struct in6_ifaddr * +in6ifa_ifpwithaddr(ifp, addr) + struct ifnet *ifp; + struct in6_addr *addr; +{ + register struct ifaddr *ifa; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr == NULL) + continue; /* just for safety */ + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (IN6_ARE_ADDR_EQUAL(addr, IFA_IN6(ifa))) + break; + } + + return((struct in6_ifaddr *)ifa); +} + +/* + * Convert IP6 address to printable (loggable) representation. + */ +static char digits[] = "0123456789abcdef"; +static int ip6round = 0; +char * +ip6_sprintf(addr) +register struct in6_addr *addr; +{ + static char ip6buf[8][48]; + register int i; + register char *cp; + register u_short *a = (u_short *)addr; + register u_char *d; + int dcolon = 0; + + ip6round = (ip6round + 1) & 7; + cp = ip6buf[ip6round]; + + for (i = 0; i < 8; i++) { + if (dcolon == 1) { + if (*a == 0) { + if (i == 7) + *cp++ = ':'; + a++; + continue; + } else + dcolon = 2; + } + if (*a == 0) { + if (dcolon == 0 && *(a + 1) == 0) { + if (i == 0) + *cp++ = ':'; + *cp++ = ':'; + dcolon = 1; + } else { + *cp++ = '0'; + *cp++ = ':'; + } + a++; + continue; + } + d = (u_char *)a; + *cp++ = digits[*d >> 4]; + *cp++ = digits[*d++ & 0xf]; + *cp++ = digits[*d >> 4]; + *cp++ = digits[*d & 0xf]; + *cp++ = ':'; + a++; + } + *--cp = 0; + return(ip6buf[ip6round]); +} + +int +in6_localaddr(in6) + struct in6_addr *in6; +{ + struct in6_ifaddr *ia; + + if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_ADDR_LINKLOCAL(in6)) + return 1; + + for (ia = in6_ifaddr; ia; ia = ia->ia_next) + if (IN6_ARE_MASKED_ADDR_EQUAL(in6, &ia->ia_addr.sin6_addr, + &ia->ia_prefixmask.sin6_addr)) + return 1; + + return (0); +} + +/* + * Get a scope of the address. Node-local, link-local, site-local or global. + */ +int +in6_addrscope (addr) +struct in6_addr *addr; +{ + int scope; + + if (addr->s6_addr8[0] == 0xfe) { + scope = addr->s6_addr8[1] & 0xc0; + + switch (scope) { + case 0x80: + return IPV6_ADDR_SCOPE_LINKLOCAL; + break; + case 0xc0: + return IPV6_ADDR_SCOPE_SITELOCAL; + break; + default: + return IPV6_ADDR_SCOPE_GLOBAL; /* just in case */ + break; + } + } + + + if (addr->s6_addr8[0] == 0xff) { + scope = addr->s6_addr8[1] & 0x0f; + + /* + * due to other scope such as reserved, + * return scope doesn't work. + */ + switch (scope) { + case IPV6_ADDR_SCOPE_NODELOCAL: + return IPV6_ADDR_SCOPE_NODELOCAL; + break; + case IPV6_ADDR_SCOPE_LINKLOCAL: + return IPV6_ADDR_SCOPE_LINKLOCAL; + break; + case IPV6_ADDR_SCOPE_SITELOCAL: + return IPV6_ADDR_SCOPE_SITELOCAL; + break; + default: + return IPV6_ADDR_SCOPE_GLOBAL; + break; + } + } + + if (bcmp(&in6addr_loopback, addr, sizeof(addr) - 1) == 0) { + if (addr->s6_addr8[15] == 1) /* loopback */ + return IPV6_ADDR_SCOPE_NODELOCAL; + if (addr->s6_addr8[15] == 0) /* unspecified */ + return IPV6_ADDR_SCOPE_LINKLOCAL; + } + + return IPV6_ADDR_SCOPE_GLOBAL; +} + +int +in6_addr2scopeid(ifp, addr) + struct ifnet *ifp; /* must not be NULL */ + struct in6_addr *addr; /* must not be NULL */ +{ + int scope = in6_addrscope(addr); + + switch(scope) { + case IPV6_ADDR_SCOPE_NODELOCAL: + return(-1); /* XXX: is this an appropriate value? */ + + case IPV6_ADDR_SCOPE_LINKLOCAL: + /* XXX: we do not distinguish between a link and an I/F. */ + return(ifp->if_index); + + case IPV6_ADDR_SCOPE_SITELOCAL: + return(0); /* XXX: invalid. */ + + default: + return(0); /* XXX: treat as global. */ + } +} + +/* + * return length of part which dst and src are equal + * hard coding... + */ + +int +in6_matchlen(src, dst) +struct in6_addr *src, *dst; +{ + int match = 0; + u_char *s = (u_char *)src, *d = (u_char *)dst; + u_char *lim = s + 16, r; + + while (s < lim) + if ((r = (*d++ ^ *s++)) != 0) { + while (r < 128) { + match++; + r <<= 1; + } + break; + } else + match += 8; + return match; +} + +int +in6_are_prefix_equal(p1, p2, len) + struct in6_addr *p1, *p2; + int len; +{ + int bytelen, bitlen; + + /* sanity check */ + if (0 > len || len > 128) { + log(LOG_ERR, "in6_are_prefix_equal: invalid prefix length(%d)\n", + len); + return(0); + } + + bytelen = len / 8; + bitlen = len % 8; + + if (bcmp(&p1->s6_addr, &p2->s6_addr, bytelen)) + return(0); + if (p1->s6_addr[bytelen] >> (8 - bitlen) != + p2->s6_addr[bytelen] >> (8 - bitlen)) + return(0); + + return(1); +} + +void +in6_prefixlen2mask(maskp, len) + struct in6_addr *maskp; + int len; +{ + u_char maskarray[8] = {0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff}; + int bytelen, bitlen, i; + + /* sanity check */ + if (0 > len || len > 128) { + log(LOG_ERR, "in6_prefixlen2mask: invalid prefix length(%d)\n", + len); + return; + } + + bzero(maskp, sizeof(*maskp)); + bytelen = len / 8; + bitlen = len % 8; + for (i = 0; i < bytelen; i++) + maskp->s6_addr[i] = 0xff; + if (bitlen) + maskp->s6_addr[bytelen] = maskarray[bitlen - 1]; +} + +/* + * return the best address out of the same scope + */ +struct in6_ifaddr * +in6_ifawithscope(oifp, dst) + register struct ifnet *oifp; + register struct in6_addr *dst; +{ + int dst_scope = in6_addrscope(dst), src_scope, best_scope = 0; + int blen = -1; + struct ifaddr *ifa; + struct ifnet *ifp; + struct in6_ifaddr *ifa_best = NULL; + + if (oifp == NULL) { + printf("in6_ifawithscope: output interface is not specified\n"); + return(NULL); + } + + /* + * We search for all addresses on all interfaces from the beginning. + * Comparing an interface with the outgoing interface will be done + * only at the final stage of tiebreaking. + */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifp = ifnet; ifp; ifp = ifp->if_next) +#else + for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) +#endif + { + /* + * We can never take an address that breaks the scope zone + * of the destination. + */ + if (in6_addr2scopeid(ifp, dst) != in6_addr2scopeid(oifp, dst)) + continue; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#elif defined(__FreeBSD__) && __FreeBSD__ >= 4 + TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + int tlen = -1, dscopecmp, bscopecmp, matchcmp; + + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + + src_scope = in6_addrscope(IFA_IN6(ifa)); + +#if ADDRSELECT_DEBUG /* should be removed after stabilization */ + dscopecmp = IN6_ARE_SCOPE_CMP(src_scope, dst_scope); + printf("in6_ifawithscope: dst=%s bestaddr=%s, " + "newaddr=%s, scope=%x, dcmp=%d, bcmp=%d, " + "matchlen=%d, flgs=%x\n", + ip6_sprintf(dst), + ifa_best ? ip6_sprintf(&ifa_best->ia_addr.sin6_addr) : "none", + ip6_sprintf(IFA_IN6(ifa)), src_scope, + dscopecmp, + ifa_best ? IN6_ARE_SCOPE_CMP(src_scope, best_scope) : -1, + in6_matchlen(IFA_IN6(ifa), dst), + ((struct in6_ifaddr *)ifa)->ia6_flags); +#endif + + /* + * Don't use an address before completing DAD + * nor a duplicated address. + */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & + IN6_IFF_NOTREADY) + continue; + + /* XXX: is there any case to allow anycasts? */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & + IN6_IFF_ANYCAST) + continue; + + if (((struct in6_ifaddr *)ifa)->ia6_flags & + IN6_IFF_DETACHED) + continue; + + /* + * If this is the first address we find, + * keep it anyway. + */ + if (ifa_best == NULL) + goto replace; + + /* + * ifa_best is never NULL beyond this line except + * within the block labeled "replace". + */ + + /* + * If ifa_best has a smaller scope than dst and + * the current address has a larger one than + * (or equal to) dst, always replace ifa_best. + * Also, if the current address has a smaller scope + * than dst, ignore it unless ifa_best also has a + * smaller scope. + */ + if (IN6_ARE_SCOPE_CMP(best_scope, dst_scope) < 0 && + IN6_ARE_SCOPE_CMP(src_scope, dst_scope) >= 0) + goto replace; + if (IN6_ARE_SCOPE_CMP(src_scope, dst_scope) < 0 && + IN6_ARE_SCOPE_CMP(best_scope, dst_scope) >= 0) + continue; + + /* + * A deprecated address SHOULD NOT be used in new + * communications if an alternate (non-deprecated) + * address is available and has sufficient scope. + * RFC 2462, Section 5.5.4. + */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & + IN6_IFF_DEPRECATED) { + /* + * Ignore any deprecated addresses if + * specified by configuration. + */ + if (!ip6_use_deprecated) + continue; + + /* + * If we have already found a non-deprecated + * candidate, just ignore deprecated addresses. + */ + if ((ifa_best->ia6_flags & IN6_IFF_DEPRECATED) + == 0) + continue; + } + + /* + * A non-deprecated address is always preferred + * to a deprecated one regardless of scopes and + * address matching. + */ + if ((ifa_best->ia6_flags & IN6_IFF_DEPRECATED) && + (((struct in6_ifaddr *)ifa)->ia6_flags & + IN6_IFF_DEPRECATED) == 0) + goto replace; + + /* + * At this point, we have two cases: + * 1. we are looking at a non-deprecated address, + * and ifa_best is also non-deprecated. + * 2. we are looking at a deprecated address, + * and ifa_best is also deprecated. + * Also, we do not have to consider a case where + * the scope of if_best is larger(smaller) than dst and + * the scope of the current address is smaller(larger) + * than dst. Such a case has already been covered. + * Tiebreaking is done according to the following + * items: + * - the scope comparison between the address and + * dst (dscopecmp) + * - the scope comparison between the address and + * ifa_best (bscopecmp) + * - if the address match dst longer than ifa_best + * (matchcmp) + * - if the address is on the outgoing I/F (outI/F) + * + * Roughly speaking, the selection policy is + * - the most important item is scope. The same scope + * is best. Then search for a larger scope. + * Smaller scopes are the last resort. + * - A deprecated address is chosen only when we have + * no address that has an enough scope, but is + * prefered to any addresses of smaller scopes. + * - Longest address match against dst is considered + * only for addresses that has the same scope of dst. + * - If there is no other reasons to choose one, + * addresses on the outgoing I/F are preferred. + * + * The precise decision table is as follows: + * dscopecmp bscopecmp matchcmp outI/F | replace? + * !equal equal N/A Yes | Yes (1) + * !equal equal N/A No | No (2) + * larger larger N/A N/A | No (3) + * larger smaller N/A N/A | Yes (4) + * smaller larger N/A N/A | Yes (5) + * smaller smaller N/A N/A | No (6) + * equal smaller N/A N/A | Yes (7) + * equal larger (already done) + * equal equal larger N/A | Yes (8) + * equal equal smaller N/A | No (9) + * equal equal equal Yes | Yes (a) + * eaual eqaul equal No | No (b) + */ + dscopecmp = IN6_ARE_SCOPE_CMP(src_scope, dst_scope); + bscopecmp = IN6_ARE_SCOPE_CMP(src_scope, best_scope); + + if (dscopecmp && bscopecmp == 0) { + if (oifp == ifp) /* (1) */ + goto replace; + continue; /* (2) */ + } + if (dscopecmp > 0) { + if (bscopecmp > 0) /* (3) */ + continue; + goto replace; /* (4) */ + } + if (dscopecmp < 0) { + if (bscopecmp > 0) /* (5) */ + goto replace; + continue; /* (6) */ + } + + /* now dscopecmp must be 0 */ + if (bscopecmp < 0) + goto replace; /* (7) */ + + /* + * At last both dscopecmp and bscopecmp must be 0. + * We need address matching against dst for + * tiebreaking. + */ + tlen = in6_matchlen(IFA_IN6(ifa), dst); + matchcmp = tlen - blen; + if (matchcmp > 0) /* (8) */ + goto replace; + if (matchcmp < 0) /* (9) */ + continue; + if (oifp == ifp) /* (a) */ + goto replace; + continue; /* (b) */ + + replace: + ifa_best = (struct in6_ifaddr *)ifa; + blen = tlen >= 0 ? tlen : + in6_matchlen(IFA_IN6(ifa), dst); + best_scope = in6_addrscope(&ifa_best->ia_addr.sin6_addr); + } + } + + /* count statistics for future improvements */ + if (ifa_best == NULL) + ip6stat.ip6s_sources_none++; + else { + if (oifp == ifa_best->ia_ifp) + ip6stat.ip6s_sources_sameif[best_scope]++; + else + ip6stat.ip6s_sources_otherif[best_scope]++; + + if (best_scope == dst_scope) + ip6stat.ip6s_sources_samescope[best_scope]++; + else + ip6stat.ip6s_sources_otherscope[best_scope]++; + + if ((ifa_best->ia6_flags & IN6_IFF_DEPRECATED) != 0) + ip6stat.ip6s_sources_deprecated[best_scope]++; + } + + return(ifa_best); +} + +/* + * return the best address out of the same scope. if no address was + * found, return the first valid address from designated IF. + */ + +struct in6_ifaddr * +in6_ifawithifp(ifp, dst) + register struct ifnet *ifp; + register struct in6_addr *dst; +{ + int dst_scope = in6_addrscope(dst), blen = -1, tlen; + struct ifaddr *ifa; + struct in6_ifaddr *besta = 0; + struct in6_ifaddr *dep[2]; /*last-resort: deprecated*/ + + dep[0] = dep[1] = NULL; + +#if 0 +#if MIP6 + /* + * This is needed to assure that the Home Address is used for + * outgoing packets when not at home. We can't choose any other + * address if we want to keep connections up during movement. + */ + if (mip6_get_home_prefix_hook) { /* Only Mobile Node */ + struct nd_prefix *pr; + if ((pr = (*mip6_get_home_prefix_hook)()) && + !IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + { + if (dst_scope == in6_addrscope(&pr->ndpr_addr)) { +#if MIP6_DEBUG + /* Noisy but useful */ + mip6_debug("%s: Local address %s is chosen " + "for pcb to dest %s.\n", + __FUNCTION__, + ip6_sprintf(&pr->ndpr_addr), + ip6_sprintf(dst)); +#endif + return(in6ifa_ifpwithaddr(ifp, &pr->ndpr_addr)); + } + } + } +#endif /* MIP6 */ +#endif /* 0 */ + + /* + * We first look for addresses in the same scope. + * If there is one, return it. + * If two or more, return one which matches the dst longest. + * If none, return one of global addresses assigned other ifs. + */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST) + continue; /* XXX: is there any case to allow anycast? */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_NOTREADY) + continue; /* don't use this interface */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DETACHED) + continue; + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DEPRECATED) { + if (ip6_use_deprecated) + dep[0] = (struct in6_ifaddr *)ifa; + continue; + } + + if (dst_scope == in6_addrscope(IFA_IN6(ifa))) { + /* + * call in6_matchlen() as few as possible + */ + if (besta) { + if (blen == -1) + blen = in6_matchlen(&besta->ia_addr.sin6_addr, dst); + tlen = in6_matchlen(IFA_IN6(ifa), dst); + if (tlen > blen) { + blen = tlen; + besta = (struct in6_ifaddr *)ifa; + } + } else + besta = (struct in6_ifaddr *)ifa; + } + } + if (besta) + return(besta); + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST) + continue; /* XXX: is there any case to allow anycast? */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_NOTREADY) + continue; /* don't use this interface */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DETACHED) + continue; + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DEPRECATED) { + if (ip6_use_deprecated) + dep[1] = (struct in6_ifaddr *)ifa; + continue; + } + + return (struct in6_ifaddr *)ifa; + } + + /* use the last-resort values, that are, deprecated addresses */ + if (dep[0]) + return dep[0]; + if (dep[1]) + return dep[1]; + + return NULL; +} + +/* + * perform DAD when interface becomes IFF_UP. + */ +void +in6_if_up(ifp) + struct ifnet *ifp; +{ + struct ifaddr *ifa; + struct in6_ifaddr *ia; + struct sockaddr_dl *sdl; + int type; +#if __bsdi__ + u_char ea[ETHER_ADDR_LEN]; +#else + struct ether_addr ea; +#endif + int off; + int dad_delay; /* delay ticks before DAD output */ + + bzero(&ea, sizeof(ea)); + sdl = NULL; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family == AF_INET6 + && IN6_IS_ADDR_LINKLOCAL(&((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr)) { + goto dad; + } + if (ifa->ifa_addr->sa_family != AF_LINK) + continue; + sdl = (struct sockaddr_dl *)ifa->ifa_addr; + break; + } + + switch (ifp->if_type) { + case IFT_LOOP: + in6_ifattach(ifp, IN6_IFT_LOOP, NULL, 1); + break; + case IFT_SLIP: + case IFT_PPP: + case IFT_DUMMY: + case IFT_GIF: + case IFT_FAITH: + type = IN6_IFT_P2P; + in6_ifattach(ifp, type, 0, 1); + break; +#if IFT_STF + case IFT_STF: + /* + * This is VERY awkward to call nd6_ifattach while we will + * not do ND at all on the interface. It is necessary for + * initializing default hoplimit, and ND mtu. + */ + nd6_ifattach(ifp); + break; +#endif + case IFT_ETHER: + case IFT_FDDI: + case IFT_ATM: + type = IN6_IFT_802; + if (sdl == NULL) + break; + off = sdl->sdl_nlen; + if (bcmp(&sdl->sdl_data[off], &ea, sizeof(ea)) != 0) + in6_ifattach(ifp, type, LLADDR(sdl), 0); + break; + case IFT_ARCNET: + type = IN6_IFT_ARCNET; + if (sdl == NULL) + break; + off = sdl->sdl_nlen; + if (sdl->sdl_data[off] != 0) /* XXX ?: */ + in6_ifattach(ifp, type, LLADDR(sdl), 0); + break; + default: + break; + } + +dad: + dad_delay = 0; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + ia = (struct in6_ifaddr *)ifa; + if (ia->ia6_flags & IN6_IFF_TENTATIVE) + nd6_dad_start(ifa, &dad_delay); + } +} + +/* + * Calculate max IPv6 MTU through all the interfaces and store it + * to in6_maxmtu. + */ +void +in6_setmaxmtu() +{ + unsigned long maxmtu = 0; + struct ifnet *ifp; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifp = ifnet; ifp; ifp = ifp->if_next) +#else + for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) +#endif + { + if ((ifp->if_flags & IFF_LOOPBACK) == 0 && + nd_ifinfo[ifp->if_index].linkmtu > maxmtu) + maxmtu = nd_ifinfo[ifp->if_index].linkmtu; + } + if (maxmtu) /* update only when maxmtu is positive */ + in6_maxmtu = maxmtu; +} + +#if MAPPED_ADDR_ENABLED +/* + * Convert sockaddr_in6 to sockaddr_in. Original sockaddr_in6 must be + * v4 mapped addr or v4 compat addr + */ +void +in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) +{ + bzero(sin, sizeof(*sin)); + sin->sin_len = sizeof(struct sockaddr_in); + sin->sin_family = AF_INET; + sin->sin_port = sin6->sin6_port; + sin->sin_addr.s_addr = sin6->sin6_addr.s6_addr32[3]; +} + +/* Convert sockaddr_in to sockaddr_in6 in v4 mapped addr format. */ +void +in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) +{ + bzero(sin6, sizeof(*sin6)); + sin6->sin6_len = sizeof(struct sockaddr_in6); + sin6->sin6_family = AF_INET6; + sin6->sin6_port = sin->sin_port; + sin6->sin6_addr.s6_addr32[0] = 0; + sin6->sin6_addr.s6_addr32[1] = 0; + sin6->sin6_addr.s6_addr32[2] = IPV6_ADDR_INT32_SMP; + sin6->sin6_addr.s6_addr32[3] = sin->sin_addr.s_addr; +} + +/* Convert sockaddr_in6 into sockaddr_in. */ +void +in6_sin6_2_sin_in_sock(struct sockaddr *nam) +{ + struct sockaddr_in *sin_p; + struct sockaddr_in6 sin6; + + /* + * Save original sockaddr_in6 addr and convert it + * to sockaddr_in. + */ + sin6 = *(struct sockaddr_in6 *)nam; + sin_p = (struct sockaddr_in *)nam; + in6_sin6_2_sin(sin_p, &sin6); +} + +/* Convert sockaddr_in into sockaddr_in6 in v4 mapped addr format. */ +void +in6_sin_2_v4mapsin6_in_sock(struct sockaddr **nam) +{ + struct sockaddr_in *sin_p; + struct sockaddr_in6 *sin6_p; + + MALLOC(sin6_p, struct sockaddr_in6 *, sizeof *sin6_p, M_SONAME, + M_WAITOK); + sin_p = (struct sockaddr_in *)*nam; + in6_sin_2_v4mapsin6(sin_p, sin6_p); + FREE(*nam, M_SONAME); + *nam = (struct sockaddr *)sin6_p; +} +#endif /* MAPPED_ADDR_ENABLED */ + diff --git a/bsd/netinet6/in6.h b/bsd/netinet6/in6.h new file mode 100644 index 000000000..7c7a745fc --- /dev/null +++ b/bsd/netinet6/in6.h @@ -0,0 +1,710 @@ +/* $KAME: in6.h,v 1.40 2000/03/25 07:23:42 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.h 8.3 (Berkeley) 1/3/94 + */ + +#ifndef __KAME_NETINET_IN_H_INCLUDED_ +#error "do not include netinet6/in6.h directly, include netinet/in.h" +#endif + +#ifndef _NETINET6_IN6_H_ +#define _NETINET6_IN6_H_ + +#if !defined(_XOPEN_SOURCE) +#include +#endif + +/* + * Identification of the network protocol stack + */ +#define __KAME__ +#define __KAME_VERSION "STABLE 20000425" + +/* + * Local port number conventions: + * + * Ports < IPPORT_RESERVED are reserved for privileged processes (e.g. root), + * unless a kernel is compiled with IPNOPRIVPORTS defined. + * + * When a user does a bind(2) or connect(2) with a port number of zero, + * a non-conflicting local port address is chosen. + * + * The default range is IPPORT_ANONMIX to IPPORT_ANONMAX, although + * that is settable by sysctl(3); net.inet.ip.anonportmin and + * net.inet.ip.anonportmax respectively. + * + * A user may set the IPPROTO_IP option IP_PORTRANGE to change this + * default assignment range. + * + * The value IP_PORTRANGE_DEFAULT causes the default behavior. + * + * The value IP_PORTRANGE_HIGH is the same as IP_PORTRANGE_DEFAULT, + * and exists only for FreeBSD compatibility purposes. + * + * The value IP_PORTRANGE_LOW changes the range to the "low" are + * that is (by convention) restricted to privileged processes. + * This convention is based on "vouchsafe" principles only. + * It is only secure if you trust the remote host to restrict these ports. + * The range is IPPORT_RESERVEDMIN to IPPORT_RESERVEDMAX. + */ + +#define IPV6PORT_RESERVED 1024 +#define IPV6PORT_ANONMIN 49152 +#define IPV6PORT_ANONMAX 65535 +#define IPV6PORT_RESERVEDMIN 600 +#define IPV6PORT_RESERVEDMAX (IPV6PORT_RESERVED-1) + +/* + * IPv6 address + */ +struct in6_addr { + union { + u_int8_t __u6_addr8[16]; + u_int16_t __u6_addr16[8]; + u_int32_t __u6_addr32[4]; + } __u6_addr; /* 128-bit IP6 address */ +}; + +#define s6_addr __u6_addr.__u6_addr8 +#ifdef KERNEL /*XXX nonstandard*/ +#define s6_addr8 __u6_addr.__u6_addr8 +#define s6_addr16 __u6_addr.__u6_addr16 +#define s6_addr32 __u6_addr.__u6_addr32 +#endif + +#define INET6_ADDRSTRLEN 46 + +/* + * Socket address for IPv6 + */ +#if !defined(_XOPEN_SOURCE) +#define SIN6_LEN +#endif +struct sockaddr_in6 { + u_int8_t sin6_len; /* length of this struct(sa_family_t)*/ + u_int8_t sin6_family; /* AF_INET6 (sa_family_t) */ + u_int16_t sin6_port; /* Transport layer port # (in_port_t)*/ + u_int32_t sin6_flowinfo; /* IP6 flow information */ + struct in6_addr sin6_addr; /* IP6 address */ + u_int32_t sin6_scope_id; /* intface scope id */ +}; + +/* + * Local definition for masks + */ +#ifdef KERNEL /*XXX nonstandard*/ +#define IN6MASK0 {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} +#define IN6MASK32 {{{ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK64 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK96 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK128 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} +#endif + +#ifdef KERNEL +extern const struct in6_addr in6mask0; +extern const struct in6_addr in6mask32; +extern const struct in6_addr in6mask64; +extern const struct in6_addr in6mask96; +extern const struct in6_addr in6mask128; +#endif /* KERNEL */ + +/* + * Macros started with IPV6_ADDR is KAME local + */ +#ifdef KERNEL /*XXX nonstandard*/ +#if BYTE_ORDER == BIG_ENDIAN +#define IPV6_ADDR_INT32_ONE 1 +#define IPV6_ADDR_INT32_TWO 2 +#define IPV6_ADDR_INT32_MNL 0xff010000 +#define IPV6_ADDR_INT32_MLL 0xff020000 +#define IPV6_ADDR_INT32_SMP 0x0000ffff +#define IPV6_ADDR_INT16_ULL 0xfe80 +#define IPV6_ADDR_INT16_USL 0xfec0 +#define IPV6_ADDR_INT16_MLL 0xff02 +#elif BYTE_ORDER == LITTLE_ENDIAN +#define IPV6_ADDR_INT32_ONE 0x01000000 +#define IPV6_ADDR_INT32_TWO 0x02000000 +#define IPV6_ADDR_INT32_MNL 0x000001ff +#define IPV6_ADDR_INT32_MLL 0x000002ff +#define IPV6_ADDR_INT32_SMP 0xffff0000 +#define IPV6_ADDR_INT16_ULL 0x80fe +#define IPV6_ADDR_INT16_USL 0xc0fe +#define IPV6_ADDR_INT16_MLL 0x02ff +#endif +#endif + +/* + * Definition of some useful macros to handle IP6 addresses + */ +#define IN6ADDR_ANY_INIT \ + {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6ADDR_LOOPBACK_INIT \ + {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} +#define IN6ADDR_NODELOCAL_ALLNODES_INIT \ + {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} +#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ + {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} +#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ + {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }}} + +#ifdef KERNEL +extern const struct in6_addr in6addr_any; +extern const struct in6_addr in6addr_loopback; +extern const struct in6_addr in6addr_nodelocal_allnodes; +extern const struct in6_addr in6addr_linklocal_allnodes; +extern const struct in6_addr in6addr_linklocal_allrouters; +#endif + +/* + * Equality + * NOTE: Some of kernel programming environment (for example, openbsd/sparc) + * does not supply memcmp(). For userland memcmp() is preferred as it is + * in ANSI standard. + */ +#ifdef KERNEL +#define IN6_ARE_ADDR_EQUAL(a, b) \ + (bcmp((a), (b), sizeof(struct in6_addr)) == 0) +#else +#define IN6_ARE_ADDR_EQUAL(a, b) \ + (memcmp((a), (b), sizeof(struct in6_addr)) == 0) +#endif + +/* + * Unspecified + */ +#define IN6_IS_ADDR_UNSPECIFIED(a) \ + ((*(u_int32_t *)(&(a)->s6_addr[0]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[4]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[8]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[12]) == 0)) + +/* + * Loopback + */ +#define IN6_IS_ADDR_LOOPBACK(a) \ + ((*(u_int32_t *)(&(a)->s6_addr[0]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[4]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[8]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[12]) == ntohl(1))) + +/* + * IPv4 compatible + */ +#define IN6_IS_ADDR_V4COMPAT(a) \ + ((*(u_int32_t *)(&(a)->s6_addr[0]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[4]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[8]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[12]) != 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[12]) != ntohl(1))) + +/* + * Mapped + */ +#define IN6_IS_ADDR_V4MAPPED(a) \ + ((*(u_int32_t *)(&(a)->s6_addr[0]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[4]) == 0) && \ + (*(u_int32_t *)(&(a)->s6_addr[8]) == ntohl(0x0000ffff))) + +/* + * KAME Scope Values + */ + +#ifdef KERNEL /*XXX nonstandard*/ +#define IPV6_ADDR_SCOPE_NODELOCAL 0x01 +#define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 +#define IPV6_ADDR_SCOPE_SITELOCAL 0x05 +#define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ +#define IPV6_ADDR_SCOPE_GLOBAL 0x0e +#else +#define __IPV6_ADDR_SCOPE_NODELOCAL 0x01 +#define __IPV6_ADDR_SCOPE_LINKLOCAL 0x02 +#define __IPV6_ADDR_SCOPE_SITELOCAL 0x05 +#define __IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ +#define __IPV6_ADDR_SCOPE_GLOBAL 0x0e +#endif + +/* + * Unicast Scope + * Note that we must check topmost 10 bits only, not 16 bits (see RFC2373). + */ +#define IN6_IS_ADDR_LINKLOCAL(a) \ + (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0x80)) +#define IN6_IS_ADDR_SITELOCAL(a) \ + (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0xc0)) + +/* + * Multicast + */ +#define IN6_IS_ADDR_MULTICAST(a) ((a)->s6_addr[0] == 0xff) + +#ifdef KERNEL /*XXX nonstandard*/ +#define IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) +#else +#define __IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) +#endif + +/* + * Multicast Scope + */ +#ifdef KERNEL /*refers nonstandard items */ +#define IN6_IS_ADDR_MC_NODELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_NODELOCAL)) +#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_LINKLOCAL)) +#define IN6_IS_ADDR_MC_SITELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_SITELOCAL)) +#define IN6_IS_ADDR_MC_ORGLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_ORGLOCAL)) +#define IN6_IS_ADDR_MC_GLOBAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_GLOBAL)) +#else +#define IN6_IS_ADDR_MC_NODELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_NODELOCAL)) +#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_LINKLOCAL)) +#define IN6_IS_ADDR_MC_SITELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_SITELOCAL)) +#define IN6_IS_ADDR_MC_ORGLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_ORGLOCAL)) +#define IN6_IS_ADDR_MC_GLOBAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ + (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_GLOBAL)) +#endif + +/* + * Wildcard Socket + */ +#if 0 /*pre-RFC2553*/ +#define IN6_IS_ADDR_ANY(a) IN6_IS_ADDR_UNSPECIFIED(a) +#endif + +/* + * KAME Scope + */ +#ifdef KERNEL /*nonstandard*/ +#define IN6_IS_SCOPE_LINKLOCAL(a) \ + ((IN6_IS_ADDR_LINKLOCAL(a)) || \ + (IN6_IS_ADDR_MC_LINKLOCAL(a))) +#endif + +/* + * IP6 route structure + */ +#if !defined(_XOPEN_SOURCE) +struct route_in6 { + struct rtentry *ro_rt; + struct sockaddr_in6 ro_dst; +}; +#endif + +/* + * Options for use with [gs]etsockopt at the IPV6 level. + * First word of comment is data type; bool is stored in int. + */ +/* no hdrincl */ +#if 0 /* the followings are relic in IPv4 and hence are disabled */ +#define IPV6_OPTIONS 1 /* buf/ip6_opts; set/get IP6 options */ +#define IPV6_RECVOPTS 5 /* bool; receive all IP6 opts w/dgram */ +#define IPV6_RECVRETOPTS 6 /* bool; receive IP6 opts for response */ +#define IPV6_RECVDSTADDR 7 /* bool; receive IP6 dst addr w/dgram */ +#define IPV6_RETOPTS 8 /* ip6_opts; set/get IP6 options */ +#endif +#define IPV6_SOCKOPT_RESERVED1 3 /* reserved for future use */ +#define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */ +#define IPV6_MULTICAST_IF 9 /* u_char; set/get IP6 multicast i/f */ +#define IPV6_MULTICAST_HOPS 10 /* u_char; set/get IP6 multicast hops */ +#define IPV6_MULTICAST_LOOP 11 /* u_char; set/get IP6 multicast loopback */ +#define IPV6_JOIN_GROUP 12 /* ip6_mreq; join a group membership */ +#define IPV6_LEAVE_GROUP 13 /* ip6_mreq; leave a group membership */ +#define IPV6_PORTRANGE 14 /* int; range to choose for unspec port */ +#define ICMP6_FILTER 18 /* icmp6_filter; icmp6 filter */ +#define IPV6_PKTINFO 19 /* in6_pktinfo; send if, src addr */ +#define IPV6_HOPLIMIT 20 /* int; send hop limit */ +#define IPV6_NEXTHOP 21 /* sockaddr; next hop addr */ +#define IPV6_HOPOPTS 22 /* ip6_hbh; send hop-by-hop option */ +#define IPV6_DSTOPTS 23 /* ip6_dest; send dst option befor rthdr */ +#define IPV6_RTHDR 24 /* ip6_rthdr; send routing header */ +#define IPV6_PKTOPTIONS 25 /* buf/cmsghdr; set/get IPv6 options */ + /* obsoleted by 2292bis */ +#define IPV6_CHECKSUM 26 /* int; checksum offset for raw socket */ +#define IPV6_BINDV6ONLY 27 /* bool; only bind INET6 at null bind */ + +#if 1 /*IPSEC*/ +#define IPV6_IPSEC_POLICY 28 /* struct; get/set security policy */ +#endif +#define IPV6_FAITH 29 /* bool; accept FAITH'ed connections */ + +#if 1 /*IPV6FIREWALL*/ +#define IPV6_FW_ADD 30 /* add a firewall rule to chain */ +#define IPV6_FW_DEL 31 /* delete a firewall rule from chain */ +#define IPV6_FW_FLUSH 32 /* flush firewall rule chain */ +#define IPV6_FW_ZERO 33 /* clear single/all firewall counter(s) */ +#define IPV6_FW_GET 34 /* get entire firewall rule chain */ +#endif + +/* new socket options introduced in RFC2292bis */ +#define IPV6_RTHDRDSTOPTS 35 /* ip6_dest; send dst option before rthdr */ + +#define IPV6_RECVPKTINFO 36 /* bool; recv if, dst addr */ +#define IPV6_RECVHOPLIMIT 37 /* bool; recv hop limit */ +#define IPV6_RECVRTHDR 38 /* bool; recv routing header */ +#define IPV6_RECVHOPOPTS 39 /* bool; recv hop-by-hop option */ +#define IPV6_RECVDSTOPTS 40 /* bool; recv dst option after rthdr */ +#define IPV6_RECVRTHDRDSTOPTS 41 /* bool; recv dst option before rthdr */ + +#define IPV6_USE_MIN_MTU 42 /* bool; send packets at the minimum MTU */ +#define IPV6_RECVPATHMTU 43 /* bool; notify an according MTU */ + +/* the followings are used as cmsg type only */ +#define IPV6_PATHMTU 44 /* 4 bytes int; MTU notification */ +#define IPV6_REACHCONF 45 /* no data; ND reachability confirm */ + +#define IPV6_RTHDR_LOOSE 0 /* this hop need not be a neighbor. XXX old spec */ +#define IPV6_RTHDR_STRICT 1 /* this hop must be a neighbor. XXX old spec */ +#define IPV6_RTHDR_TYPE_0 0 /* IPv6 routing header type 0 */ + +/* + * Defaults and limits for options + */ +#define IPV6_DEFAULT_MULTICAST_HOPS 1 /* normally limit m'casts to 1 hop */ +#define IPV6_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ + +/* + * Argument structure for IPV6_JOIN_GROUP and IPV6_LEAVE_GROUP. + */ +struct ipv6_mreq { + struct in6_addr ipv6mr_multiaddr; + u_int ipv6mr_interface; +}; + +/* + * IPV6_PKTINFO: Packet information(RFC2292 sec 5) + */ +struct in6_pktinfo { + struct in6_addr ipi6_addr; /* src/dst IPv6 address */ + u_int ipi6_ifindex; /* send/recv interface index */ +}; + +/* + * Argument for IPV6_PORTRANGE: + * - which range to search when port is unspecified at bind() or connect() + */ +#define IPV6_PORTRANGE_DEFAULT 0 /* default range */ +#define IPV6_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ +#define IPV6_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ + +#if !defined(_XOPEN_SOURCE) +/* + * Definitions for inet6 sysctl operations. + * + * Third level is protocol number. + * Fourth level is desired variable within that protocol. + */ +#define IPV6PROTO_MAXID (IPPROTO_PIM + 1) /* don't list to IPV6PROTO_MAX */ + +#define CTL_IPV6PROTO_NAMES { \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, \ + { "tcp6", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "udp6", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, \ + { "ip6", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, \ + { "ipsec6", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "icmp6", CTLTYPE_NODE }, \ + { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "pim6", CTLTYPE_NODE }, \ +} + +/* + * Names for IP sysctl objects + */ +#define IPV6CTL_FORWARDING 1 /* act as router */ +#define IPV6CTL_SENDREDIRECTS 2 /* may send redirects when forwarding*/ +#define IPV6CTL_DEFHLIM 3 /* default Hop-Limit */ +#ifdef notyet +#define IPV6CTL_DEFMTU 4 /* default MTU */ +#endif +#define IPV6CTL_FORWSRCRT 5 /* forward source-routed dgrams */ +#define IPV6CTL_STATS 6 /* stats */ +#define IPV6CTL_MRTSTATS 7 /* multicast forwarding stats */ +#define IPV6CTL_MRTPROTO 8 /* multicast routing protocol */ +#define IPV6CTL_MAXFRAGPACKETS 9 /* max packets reassembly queue */ +#define IPV6CTL_SOURCECHECK 10 /* verify source route and intf */ +#define IPV6CTL_SOURCECHECK_LOGINT 11 /* minimume logging interval */ +#define IPV6CTL_ACCEPT_RTADV 12 +#define IPV6CTL_KEEPFAITH 13 +#define IPV6CTL_LOG_INTERVAL 14 +#define IPV6CTL_HDRNESTLIMIT 15 +#define IPV6CTL_DAD_COUNT 16 +#define IPV6CTL_AUTO_FLOWLABEL 17 +#define IPV6CTL_DEFMCASTHLIM 18 +#define IPV6CTL_GIF_HLIM 19 /* default HLIM for gif encap packet */ +#define IPV6CTL_KAME_VERSION 20 +#define IPV6CTL_USE_DEPRECATED 21 /* use deprecated addr (RFC2462 5.5.4) */ +#define IPV6CTL_RR_PRUNE 22 /* walk timer for router renumbering */ +#if MAPPED_ADDR_ENABLED +#define IPV6CTL_MAPPED_ADDR 23 +#endif /* MAPPED_ADDR_ENABLED */ +/* New entries should be added here from current IPV6CTL_MAXID value. */ +#define IPV6CTL_MAXID 24 + +#if MAPPED_ADDR_ENABLED +#define IPV6CTL_NAMES_MAPPED_ADDR "mapped_addr" +#define IPV6CTL_TYPE_MAPPED_ADDR CTLTYPE_INT +#define IPV6CTL_VARS_MAPPED_ADDR &ip6_mapped_addr_on +#else /* MAPPED_ADDR_ENABLED */ +#define IPV6CTL_NAMES_MAPPED_ADDR 0 +#define IPV6CTL_TYPE_MAPPED_ADDR 0 +#define IPV6CTL_VARS_MAPPED_ADDR 0 +#endif /* MAPPED_ADDR_ENABLED */ + +#if IPV6CTL_BINDV6ONLY +#define IPV6CTL_NAMES_BINDV6ONLY "bindv6only" +#define IPV6CTL_TYPE_BINDV6ONLY CTLTYPE_INT +#define IPV6CTL_VARS_BINDV6ONLY &ip6_bindv6only +#else +#define IPV6CTL_NAMES_BINDV6ONLY 0 +#define IPV6CTL_TYPE_BINDV6ONLY 0 +#define IPV6CTL_VARS_BINDV6ONLY 0 +#endif + +#define IPV6CTL_NAMES { \ + { 0, 0 }, \ + { "forwarding", CTLTYPE_INT }, \ + { "redirect", CTLTYPE_INT }, \ + { "hlim", CTLTYPE_INT }, \ + { "mtu", CTLTYPE_INT }, \ + { "forwsrcrt", CTLTYPE_INT }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "mrtproto", CTLTYPE_INT }, \ + { "maxfragpackets", CTLTYPE_INT }, \ + { "sourcecheck", CTLTYPE_INT }, \ + { "sourcecheck_logint", CTLTYPE_INT }, \ + { "accept_rtadv", CTLTYPE_INT }, \ + { "keepfaith", CTLTYPE_INT }, \ + { "log_interval", CTLTYPE_INT }, \ + { "hdrnestlimit", CTLTYPE_INT }, \ + { "dad_count", CTLTYPE_INT }, \ + { "auto_flowlabel", CTLTYPE_INT }, \ + { "defmcasthlim", CTLTYPE_INT }, \ + { "gifhlim", CTLTYPE_INT }, \ + { "kame_version", CTLTYPE_STRING }, \ + { "use_deprecated", CTLTYPE_INT }, \ + { "rr_prune", CTLTYPE_INT }, \ + { IPV6CTL_NAMES_MAPPED_ADDR, IPV6CTL_TYPE_MAPPED_ADDR }, \ + { IPV6CTL_NAMES_BINDV6ONLY, IPV6CTL_TYPE_BINDV6ONLY }, \ +} + +#ifdef __bsdi__ +#define IPV6CTL_VARS { \ + 0, \ + &ip6_forwarding, \ + &ip6_sendredirects, \ + &ip6_defhlim, \ + 0, \ + &ip6_forward_srcrt, \ + 0, \ + 0, \ + 0, \ + &ip6_maxfragpackets, \ + &ip6_sourcecheck, \ + &ip6_sourcecheck_interval, \ + &ip6_accept_rtadv, \ + &ip6_keepfaith, \ + &ip6_log_interval, \ + &ip6_hdrnestlimit, \ + &ip6_dad_count, \ + &ip6_auto_flowlabel, \ + &ip6_defmcasthlim, \ + &ip6_gif_hlim, \ + 0, \ + &ip6_use_deprecated, \ + &ip6_rr_prune, \ + IPV6CTL_VARS_MAPPED_ADDR, \ + IPV6CTL_VARS_BINDV6ONLY, \ +} +#endif +#endif /* !_XOPEN_SOURCE */ + +#ifdef KERNEL +struct cmsghdr; + +int in6_cksum __P((struct mbuf *, u_int8_t, u_int32_t, u_int32_t)); +int in6_localaddr __P((struct in6_addr *)); +int in6_addrscope __P((struct in6_addr *)); +struct in6_ifaddr *in6_ifawithscope __P((struct ifnet *, struct in6_addr *)); +struct in6_ifaddr *in6_ifawithifp __P((struct ifnet *, struct in6_addr *)); +extern void in6_if_up __P((struct ifnet *)); +#if MAPPED_ADDR_ENABLED +struct sockaddr; + +void in6_sin6_2_sin __P((struct sockaddr_in *sin, + struct sockaddr_in6 *sin6)); +void in6_sin_2_v4mapsin6 __P((struct sockaddr_in *sin, + struct sockaddr_in6 *sin6)); +void in6_sin6_2_sin_in_sock __P((struct sockaddr *nam)); +void in6_sin_2_v4mapsin6_in_sock __P((struct sockaddr **nam)); +#endif /* MAPPED_ADDR_ENABLED */ + +#define satosin6(sa) ((struct sockaddr_in6 *)(sa)) +#define sin6tosa(sin6) ((struct sockaddr *)(sin6)) +#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) +#endif /* KERNEL */ + +__BEGIN_DECLS +struct cmsghdr; + +extern int inet6_option_space __P((int)); +extern int inet6_option_init __P((void *, struct cmsghdr **, int)); +extern int inet6_option_append __P((struct cmsghdr *, const u_int8_t *, + int, int)); +extern u_int8_t *inet6_option_alloc __P((struct cmsghdr *, int, int, int)); +extern int inet6_option_next __P((const struct cmsghdr *, u_int8_t **)); +extern int inet6_option_find __P((const struct cmsghdr *, u_int8_t **, int)); + +extern size_t inet6_rthdr_space __P((int, int)); +extern struct cmsghdr *inet6_rthdr_init __P((void *, int)); +extern int inet6_rthdr_add __P((struct cmsghdr *, const struct in6_addr *, + unsigned int)); +extern int inet6_rthdr_lasthop __P((struct cmsghdr *, unsigned int)); +#if 0 /* not implemented yet */ +extern int inet6_rthdr_reverse __P((const struct cmsghdr *, struct cmsghdr *)); +#endif +extern int inet6_rthdr_segments __P((const struct cmsghdr *)); +extern struct in6_addr *inet6_rthdr_getaddr __P((struct cmsghdr *, int)); +extern int inet6_rthdr_getflags __P((const struct cmsghdr *, int)); + +extern int inet6_opt_init __P((void *, size_t)); +extern int inet6_opt_append __P((void *, size_t, int, u_int8_t, + size_t, u_int8_t, void **)); +extern int inet6_opt_finish __P((void *, size_t, int)); +extern int inet6_opt_set_val __P((void *, size_t, void *, int)); + +extern int inet6_opt_next __P((void *, size_t, int, u_int8_t *, + size_t *, void **)); +extern int inet6_opt_find __P((void *, size_t, int, u_int8_t, + size_t *, void **)); +extern int inet6_opt_get_val __P((void *, size_t, void *, int)); +extern size_t inet6_rth_space __P((int, int)); +extern void *inet6_rth_init __P((void *, int, int, int)); +extern int inet6_rth_add __P((void *, const struct in6_addr *)); +extern int inet6_rth_reverse __P((const void *, void *)); +extern int inet6_rth_segments __P((const void *)); +extern struct in6_addr *inet6_rth_getaddr __P((const void *, int)); +__END_DECLS + +#endif /* !_NETINET6_IN6_H_ */ diff --git a/bsd/netinet6/in6_cksum.c b/bsd/netinet6/in6_cksum.c new file mode 100644 index 000000000..21aea718b --- /dev/null +++ b/bsd/netinet6/in6_cksum.c @@ -0,0 +1,320 @@ +/* $KAME: in6_cksum.c,v 1.5 2000/02/22 14:04:17 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1988, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include + +#include + +/* + * Checksum routine for Internet Protocol family headers (Portable Version). + * + * This routine is very heavily used in the network + * code and should be modified for each CPU to be as fast as possible. + */ + +#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x) +#define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);} + +static union { + u_int16_t phs[4]; + struct { + u_int32_t ph_len; + u_int8_t ph_zero[3]; + u_int8_t ph_nxt; + } ph; +} uph; + +/* + * m MUST contain a continuous IP6 header. + * off is a offset where TCP/UDP/ICMP6 header starts. + * len is a total length of a transport segment. + * (e.g. TCP header + TCP payload) + */ + +int +in6_cksum(m, nxt, off, len) + register struct mbuf *m; + u_int8_t nxt; + u_int32_t off, len; +{ + register u_int16_t *w; + register int sum = 0; + register int mlen = 0; + int byte_swapped = 0; +#if 0 + int srcifid = 0, dstifid = 0; +#endif + struct ip6_hdr *ip6; + + union { + u_int8_t c[2]; + u_int16_t s; + } s_util; + union { + u_int16_t s[2]; + u_int32_t l; + } l_util; + + /* sanity check */ + if (m->m_pkthdr.len < off + len) { + panic("in6_cksum: mbuf len (%d) < off+len (%d+%d)\n", + m->m_pkthdr.len, off, len); + } + + /* + * First create IP6 pseudo header and calculate a summary. + */ + ip6 = mtod(m, struct ip6_hdr *); +#if 0 + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { + srcifid = ip6->ip6_src.s6_addr16[1]; + ip6->ip6_src.s6_addr16[1] = 0; + } + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) { + dstifid = ip6->ip6_dst.s6_addr16[1]; + ip6->ip6_dst.s6_addr16[1] = 0; + } +#endif + w = (u_int16_t *)&ip6->ip6_src; + uph.ph.ph_len = htonl(len); + uph.ph.ph_nxt = nxt; + + /* IPv6 source address */ + sum += w[0]; + if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) + sum += w[1]; + sum += w[2]; sum += w[3]; sum += w[4]; sum += w[5]; + sum += w[6]; sum += w[7]; + /* IPv6 destination address */ + sum += w[8]; + if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) + sum += w[9]; + sum += w[10]; sum += w[11]; sum += w[12]; sum += w[13]; + sum += w[14]; sum += w[15]; + /* Payload length and upper layer identifier */ + sum += uph.phs[0]; sum += uph.phs[1]; + sum += uph.phs[2]; sum += uph.phs[3]; + +#if 0 + if (srcifid) + ip6->ip6_src.s6_addr16[1] = srcifid; + if (dstifid) + ip6->ip6_dst.s6_addr16[1] = dstifid; +#endif + /* + * Secondly calculate a summary of the first mbuf excluding offset. + */ + while (m != NULL && off > 0) { + if (m->m_len <= off) + off -= m->m_len; + else + break; + m = m->m_next; + } + w = (u_int16_t *)(mtod(m, u_char *) + off); + mlen = m->m_len - off; + if (len < mlen) + mlen = len; + len -= mlen; + /* + * Force to even boundary. + */ + if ((1 & (long) w) && (mlen > 0)) { + REDUCE; + sum <<= 8; + s_util.c[0] = *(u_char *)w; + w = (u_int16_t *)((char *)w + 1); + mlen--; + byte_swapped = 1; + } + /* + * Unroll the loop to make overhead from + * branches &c small. + */ + while ((mlen -= 32) >= 0) { + sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; + sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; + sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11]; + sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15]; + w += 16; + } + mlen += 32; + while ((mlen -= 8) >= 0) { + sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; + w += 4; + } + mlen += 8; + if (mlen == 0 && byte_swapped == 0) + goto next; + REDUCE; + while ((mlen -= 2) >= 0) { + sum += *w++; + } + if (byte_swapped) { + REDUCE; + sum <<= 8; + byte_swapped = 0; + if (mlen == -1) { + s_util.c[1] = *(char *)w; + sum += s_util.s; + mlen = 0; + } else + mlen = -1; + } else if (mlen == -1) + s_util.c[0] = *(char *)w; + next: + m = m->m_next; + + /* + * Lastly calculate a summary of the rest of mbufs. + */ + + for (;m && len; m = m->m_next) { + if (m->m_len == 0) + continue; + w = mtod(m, u_int16_t *); + if (mlen == -1) { + /* + * The first byte of this mbuf is the continuation + * of a word spanning between this mbuf and the + * last mbuf. + * + * s_util.c[0] is already saved when scanning previous + * mbuf. + */ + s_util.c[1] = *(char *)w; + sum += s_util.s; + w = (u_int16_t *)((char *)w + 1); + mlen = m->m_len - 1; + len--; + } else + mlen = m->m_len; + if (len < mlen) + mlen = len; + len -= mlen; + /* + * Force to even boundary. + */ + if ((1 & (long) w) && (mlen > 0)) { + REDUCE; + sum <<= 8; + s_util.c[0] = *(u_char *)w; + w = (u_int16_t *)((char *)w + 1); + mlen--; + byte_swapped = 1; + } + /* + * Unroll the loop to make overhead from + * branches &c small. + */ + while ((mlen -= 32) >= 0) { + sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; + sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; + sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11]; + sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15]; + w += 16; + } + mlen += 32; + while ((mlen -= 8) >= 0) { + sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; + w += 4; + } + mlen += 8; + if (mlen == 0 && byte_swapped == 0) + continue; + REDUCE; + while ((mlen -= 2) >= 0) { + sum += *w++; + } + if (byte_swapped) { + REDUCE; + sum <<= 8; + byte_swapped = 0; + if (mlen == -1) { + s_util.c[1] = *(char *)w; + sum += s_util.s; + mlen = 0; + } else + mlen = -1; + } else if (mlen == -1) + s_util.c[0] = *(char *)w; + } + if (len) + panic("in6_cksum: out of data\n"); + if (mlen == -1) { + /* The last mbuf has odd # of bytes. Follow the + standard (the odd byte may be shifted left by 8 bits + or not as determined by endian-ness of the machine) */ + s_util.c[1] = 0; + sum += s_util.s; + } + REDUCE; + return (~sum & 0xffff); +} diff --git a/bsd/netinet6/in6_gif.c b/bsd/netinet6/in6_gif.c new file mode 100644 index 000000000..c756948dc --- /dev/null +++ b/bsd/netinet6/in6_gif.c @@ -0,0 +1,459 @@ +/* $KAME: in6_gif.c,v 1.27 2000/03/25 07:23:43 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * in6_gif.c + */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#if !defined(__FreeBSD__) || __FreeBSD__ < 3 +#include +#endif +#include +#include + +#include +#include + +#include +#include +#if INET +#include +#endif +#include +#if INET6 +#include +#include +#include +#include +#include +#endif +#include + +#include + +#include + +#if INET6 +extern struct ip6protosw in6_gif_protosw; +#endif + +int +in6_gif_output(ifp, family, m, rt) + struct ifnet *ifp; + int family; /* family of the packet to be encapsulate. */ + struct mbuf *m; + struct rtentry *rt; +{ + struct gif_softc *sc = (struct gif_softc*)ifp; + struct sockaddr_in6 *dst = (struct sockaddr_in6 *)&sc->gif_ro6.ro_dst; + struct sockaddr_in6 *sin6_src = (struct sockaddr_in6 *)sc->gif_psrc; + struct sockaddr_in6 *sin6_dst = (struct sockaddr_in6 *)sc->gif_pdst; + struct ip6_hdr *ip6; + int proto; + u_int8_t itos, otos; + + if (sin6_src == NULL || sin6_dst == NULL || + sin6_src->sin6_family != AF_INET6 || + sin6_dst->sin6_family != AF_INET6) { + m_freem(m); + return EAFNOSUPPORT; + } + + switch (family) { +#if INET + case AF_INET: + { + struct ip *ip; + + proto = IPPROTO_IPV4; + if (m->m_len < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) + return ENOBUFS; + } + ip = mtod(m, struct ip *); + itos = ip->ip_tos; + break; + } +#endif +#if INET6 + case AF_INET6: + { + struct ip6_hdr *ip6; + proto = IPPROTO_IPV6; + if (m->m_len < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) + return ENOBUFS; + } + ip6 = mtod(m, struct ip6_hdr *); + itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; + break; + } +#endif + default: +#if DEBUG + printf("in6_gif_output: warning: unknown family %d passed\n", + family); +#endif + m_freem(m); + return EAFNOSUPPORT; + } + + /* prepend new IP header */ + M_PREPEND(m, sizeof(struct ip6_hdr), M_DONTWAIT); + if (m && m->m_len < sizeof(struct ip6_hdr)) + m = m_pullup(m, sizeof(struct ip6_hdr)); + if (m == NULL) { + printf("ENOBUFS in in6_gif_output %d\n", __LINE__); + return ENOBUFS; + } + + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_plen = htons((u_short)m->m_pkthdr.len); + ip6->ip6_nxt = proto; + ip6->ip6_hlim = ip6_gif_hlim; + ip6->ip6_src = sin6_src->sin6_addr; + if (ifp->if_flags & IFF_LINK0) { + /* multi-destination mode */ + if (!IN6_IS_ADDR_UNSPECIFIED(&sin6_dst->sin6_addr)) + ip6->ip6_dst = sin6_dst->sin6_addr; + else if (rt) { + if (family != AF_INET6) { + m_freem(m); + return EINVAL; /*XXX*/ + } + ip6->ip6_dst = ((struct sockaddr_in6 *)(rt->rt_gateway))->sin6_addr; + } else { + m_freem(m); + return ENETUNREACH; + } + } else { + /* bidirectional configured tunnel mode */ + if (!IN6_IS_ADDR_UNSPECIFIED(&sin6_dst->sin6_addr)) + ip6->ip6_dst = sin6_dst->sin6_addr; + else { + m_freem(m); + return ENETUNREACH; + } + } + if (ifp->if_flags & IFF_LINK1) { + otos = 0; + ip_ecn_ingress(ECN_ALLOWED, &otos, &itos); + ip6->ip6_flow |= htonl((u_int32_t)otos << 20); + } + + if (dst->sin6_family != sin6_dst->sin6_family || + !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &sin6_dst->sin6_addr)) { + /* cache route doesn't match */ + bzero(dst, sizeof(*dst)); + dst->sin6_family = sin6_dst->sin6_family; + dst->sin6_len = sizeof(struct sockaddr_in6); + dst->sin6_addr = sin6_dst->sin6_addr; + if (sc->gif_ro6.ro_rt) { + RTFREE(sc->gif_ro6.ro_rt); + sc->gif_ro6.ro_rt = NULL; + } +#if 0 + sc->gif_if.if_mtu = GIF_MTU; +#endif + } + + if (sc->gif_ro6.ro_rt == NULL) { + rtalloc((struct route *)&sc->gif_ro6); + if (sc->gif_ro6.ro_rt == NULL) { + m_freem(m); + return ENETUNREACH; + } +#if 0 + ifp->if_mtu = sc->gif_ro6.ro_rt->rt_ifp->if_mtu + - sizeof(struct ip6_hdr); +#endif + } + + return(ip6_output(m, 0, &sc->gif_ro6, 0, 0, NULL)); +} + +int in6_gif_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + struct mbuf *m = *mp; +#if 0 + struct gif_softc *sc; +#endif + struct ifnet *gifp = NULL; + struct ip6_hdr *ip6; +#if 0 + int i; +#endif + int af = 0; + u_int32_t otos; + + ip6 = mtod(m, struct ip6_hdr *); + +#if 0 +#define satoin6(sa) (((struct sockaddr_in6 *)(sa))->sin6_addr) + for (i = 0, sc = gif; i < ngif; i++, sc++) { + if (sc->gif_psrc == NULL || + sc->gif_pdst == NULL || + sc->gif_psrc->sa_family != AF_INET6 || + sc->gif_pdst->sa_family != AF_INET6) { + continue; + } + if ((sc->gif_if.if_flags & IFF_UP) == 0) + continue; + if ((sc->gif_if.if_flags & IFF_LINK0) && + IN6_ARE_ADDR_EQUAL(&satoin6(sc->gif_psrc), &ip6->ip6_dst) && + IN6_IS_ADDR_UNSPECIFIED(&satoin6(sc->gif_pdst))) { + gifp = &sc->gif_if; + continue; + } + if (IN6_ARE_ADDR_EQUAL(&satoin6(sc->gif_psrc), &ip6->ip6_dst) && + IN6_ARE_ADDR_EQUAL(&satoin6(sc->gif_pdst), &ip6->ip6_src)) { + gifp = &sc->gif_if; + break; + } + } +#else + gifp = (struct ifnet *)encap_getarg(m); +#endif + + if (gifp == NULL) { + m_freem(m); + ip6stat.ip6s_nogif++; + return IPPROTO_DONE; + } + + if ((gifp->if_flags & IFF_UP) == 0) { + m_freem(m); + ip6stat.ip6s_nogif++; + return IPPROTO_DONE; + } + + otos = ip6->ip6_flow; + m_adj(m, *offp); + + switch (proto) { +#if INET + case IPPROTO_IPV4: + { + struct ip *ip; + u_int8_t otos8; + af = AF_INET; + otos8 = (ntohl(otos) >> 20) & 0xff; + if (m->m_len < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) + return IPPROTO_DONE; + } + ip = mtod(m, struct ip *); + if (gifp->if_flags & IFF_LINK1) + ip_ecn_egress(ECN_ALLOWED, &otos8, &ip->ip_tos); + break; + } +#endif /* INET */ +#if INET6 + case IPPROTO_IPV6: + { + struct ip6_hdr *ip6; + af = AF_INET6; + if (m->m_len < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) + return IPPROTO_DONE; + } + ip6 = mtod(m, struct ip6_hdr *); + if (gifp->if_flags & IFF_LINK1) + ip6_ecn_egress(ECN_ALLOWED, &otos, &ip6->ip6_flow); + break; + } +#endif + default: + ip6stat.ip6s_nogif++; + m_freem(m); + return IPPROTO_DONE; + } + + gif_input(m, af, gifp); + return IPPROTO_DONE; +} + +int +in6_gif_ioctl(ifp, cmd, data) + struct ifnet *ifp; +#if defined(__FreeBSD__) && __FreeBSD__ < 3 + int cmd; +#else + u_long cmd; +#endif + caddr_t data; +{ + struct gif_softc *sc = (struct gif_softc*)ifp; + struct ifreq *ifr = (struct ifreq*)data; + int error = 0, size; + struct sockaddr *sa, *dst, *src; + const struct encaptab *p; + struct sockaddr_in6 smask6, dmask6; + + switch (cmd) { + case SIOCSIFFLAGS: + /* + * whenever we change our idea about multi-destination mode + * we need to update encap attachment. + */ + if (((ifp->if_flags ^ sc->gif_oflags) & IFF_LINK0) == 0) + break; + if (sc->gif_psrc == NULL || sc->gif_pdst == NULL || + sc->gif_psrc->sa_family != sc->gif_pdst->sa_family) + break; + bzero(&smask6, sizeof(smask6)); + smask6.sin6_addr.s6_addr32[0] = ~0; + smask6.sin6_addr.s6_addr32[1] = ~0; + smask6.sin6_addr.s6_addr32[2] = ~0; + smask6.sin6_addr.s6_addr32[3] = ~0; +#if 0 /* we'll need to do this soon */ + smask6.sin6_scope_id = ~0; +#endif + dmask6 = smask6; + if ((ifp->if_flags & IFF_LINK0) == 0 && + IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)dst)->sin6_addr)) { + bzero(&dmask6, sizeof(dmask6)); +#if 0 /* we'll need to do this soon */ + dmask6.sin6_scope_id = ~0; +#endif + } + p = encap_attach(sc->gif_psrc->sa_family, -1, sc->gif_psrc, + (struct sockaddr *)&smask6, sc->gif_pdst, + (struct sockaddr *)&dmask6, + (struct protosw *)&in6_gif_protosw, &sc->gif_if); + if (p == NULL) { + error = EINVAL; + goto bad; + } + if (sc->encap_cookie != NULL) + (void)encap_detach(sc->encap_cookie); + sc->encap_cookie = p; + sc->gif_oflags = ifp->if_flags; + + break; + +#if INET6 + case SIOCSIFPHYADDR_IN6: +#endif + switch (ifr->ifr_addr.sa_family) { +#if INET6 + case AF_INET6: + src = (struct sockaddr *) + &(((struct in6_aliasreq *)data)->ifra_addr); + dst = (struct sockaddr *) + &(((struct in6_aliasreq *)data)->ifra_dstaddr); + + bzero(&smask6, sizeof(smask6)); + smask6.sin6_addr.s6_addr32[0] = ~0; + smask6.sin6_addr.s6_addr32[1] = ~0; + smask6.sin6_addr.s6_addr32[2] = ~0; + smask6.sin6_addr.s6_addr32[3] = ~0; +#if 0 /* we'll need to do this soon */ + smask6.sin6_scope_id = ~0; +#endif + dmask6 = smask6; + if ((ifp->if_flags & IFF_LINK0) == 0 && + IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *)dst)->sin6_addr)) { + bzero(&dmask6, sizeof(dmask6)); +#if 0 /* we'll need to do this soon */ + dmask6.sin6_scope_id = ~0; +#endif + } + size = sizeof(struct sockaddr_in6); + break; +#endif /* INET6 */ + default: + error = EAFNOSUPPORT; + goto bad; + } + + if (sc->encap_cookie != NULL) + (void)encap_detach(sc->encap_cookie); + if (sc->gif_psrc != NULL) { + _FREE((caddr_t)sc->gif_psrc, M_IFADDR); + sc->gif_psrc = NULL; + } + if (sc->gif_pdst != NULL) { + _FREE((caddr_t)sc->gif_pdst, M_IFADDR); + sc->gif_pdst = NULL; + } + + p = encap_attach(ifr->ifr_addr.sa_family, -1, src, + (struct sockaddr *)&smask6, dst, + (struct sockaddr *)&dmask6, + (struct protosw *)&in6_gif_protosw, &sc->gif_if); + if (p == NULL) { + error = EINVAL; + goto bad; + } + sc->encap_cookie = p; + sc->gif_oflags = ifp->if_flags; + + sa = (struct sockaddr *)_MALLOC(size, M_IFADDR, M_WAITOK); + bcopy((caddr_t)src, (caddr_t)sa, size); + sc->gif_psrc = sa; + + sa = (struct sockaddr *)_MALLOC(size, M_IFADDR, M_WAITOK); + bcopy((caddr_t)dst, (caddr_t)sa, size); + sc->gif_pdst = sa; + + ifp->if_flags |= IFF_UP; + if_up(ifp); /* send up RTM_IFINFO */ + + error = 0; + break; + default: + error = EINVAL; + goto bad; + } + + bad: + return error; +} diff --git a/bsd/netinet6/in6_gif.h b/bsd/netinet6/in6_gif.h new file mode 100644 index 000000000..7755c69e3 --- /dev/null +++ b/bsd/netinet6/in6_gif.h @@ -0,0 +1,45 @@ +/* $KAME: in6_gif.h,v 1.3 2000/02/22 14:04:17 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET6_IN6_GIF_H_ +#define _NETINET6_IN6_GIF_H_ + +#define GIF_HLIM 30 + +int in6_gif_input __P((struct mbuf **, int *, int)); +int in6_gif_output __P((struct ifnet *, int, struct mbuf *, struct rtentry *)); +#if defined(__FreeBSD__) && __FreeBSD__ < 3 || !defined(__APPLE__) +int in6_gif_ioctl __P((struct ifnet *, int, caddr_t)); +#else +int in6_gif_ioctl __P((struct ifnet *, u_long, caddr_t)); +#endif + +#endif /*_NETINET6_IN6_GIF_H_*/ diff --git a/bsd/netinet6/in6_ifattach.c b/bsd/netinet6/in6_ifattach.c new file mode 100644 index 000000000..722b8f8b6 --- /dev/null +++ b/bsd/netinet6/in6_ifattach.c @@ -0,0 +1,862 @@ +/* $KAME: in6_ifattach.c,v 1.41 2000/03/16 07:05:34 jinmei Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#ifdef __bsdi__ +#include +#elif defined(__OpenBSD__) +#include +#else +#include +#endif + +#include +#include +#include +#include + +#include +#include +#ifndef __NetBSD__ +#include +#endif + +#include +#include +#include +#include +#include + +#include + +static struct in6_addr llsol; + +struct in6_ifstat **in6_ifstat = NULL; +struct icmp6_ifstat **icmp6_ifstat = NULL; +size_t in6_ifstatmax = 0; +size_t icmp6_ifstatmax = 0; +unsigned long in6_maxmtu = 0; + +int found_first_ifid = 0; +#define IFID_LEN 8 +static u_int8_t first_ifid[IFID_LEN]; + +static int laddr_to_eui64 __P((u_int8_t *, u_int8_t *, size_t)); +static int gen_rand_eui64 __P((u_int8_t *)); + +#define DEBUG 1 + +static int +laddr_to_eui64(dst, src, len) + u_int8_t *dst; + u_int8_t *src; + size_t len; +{ + static u_int8_t zero[8]; + + bzero(zero, sizeof(zero)); + + switch (len) { + case 6: + if (bcmp(zero, src, 6) == 0) + return EINVAL; + dst[0] = src[0]; + dst[1] = src[1]; + dst[2] = src[2]; + dst[3] = 0xff; + dst[4] = 0xfe; + dst[5] = src[3]; + dst[6] = src[4]; + dst[7] = src[5]; + break; + case 8: + if (bcmp(zero, src, 8) == 0) + return EINVAL; + bcopy(src, dst, len); + break; + default: + return EINVAL; + } + + return 0; +} + +/* + * Generate a last-resort interface identifier, when the machine has no + * IEEE802/EUI64 address sources. + * The address should be random, and should not change across reboot. + */ +static int +gen_rand_eui64(dst) + u_int8_t *dst; +{ + MD5_CTX ctxt; + u_int8_t digest[16]; +#if defined(__FreeBSD__) || defined (__APPLE__) + int hostnamelen = strlen(hostname); +#endif + + /* generate 8bytes of pseudo-random value. */ + bzero(&ctxt, sizeof(ctxt)); + MD5Init(&ctxt); + MD5Update(&ctxt, hostname, hostnamelen); + MD5Final(digest, &ctxt); + + /* assumes sizeof(digest) > sizeof(first_ifid) */ + bcopy(digest, dst, 8); + + /* make sure to set "u" bit to local, and "g" bit to individual. */ + dst[0] &= 0xfe; + dst[0] |= 0x02; /* EUI64 "local" */ + + return 0; +} + +/* + * Find first ifid on list of interfaces. + * This is assumed that ifp0's interface token (for example, IEEE802 MAC) + * is globally unique. We may need to have a flag parameter in the future. + */ +int +in6_ifattach_getifid(ifp0) + struct ifnet *ifp0; +{ + struct ifnet *ifp; + struct ifaddr *ifa; + u_int8_t *addr = NULL; + int addrlen = 0; + struct sockaddr_dl *sdl; + + if (found_first_ifid) + return 0; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifp = ifnet; ifp; ifp = ifp->if_next) +#else + for (ifp = ifnet.tqh_first; ifp; ifp = ifp->if_list.tqe_next) +#endif + { + if (ifp0 != NULL && ifp0 != ifp) + continue; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; + ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_LINK) + continue; + sdl = (struct sockaddr_dl *)ifa->ifa_addr; + if (sdl == NULL) + continue; + if (sdl->sdl_alen == 0) + continue; + switch (ifp->if_type) { + case IFT_ETHER: + case IFT_FDDI: + case IFT_ATM: + /* IEEE802/EUI64 cases - what others? */ + addr = LLADDR(sdl); + addrlen = sdl->sdl_alen; + /* + * to copy ifid from IEEE802/EUI64 interface, + * u bit of the source needs to be 0. + */ + if ((addr[0] & 0x02) != 0) + break; + goto found; + case IFT_ARCNET: + /* + * ARCnet interface token cannot be used as + * globally unique identifier due to its + * small bitwidth. + */ + break; + default: + break; + } + } + } +#if DEBUG + printf("in6_ifattach_getifid: failed to get EUI64"); +#endif + return EADDRNOTAVAIL; + +found: + if (laddr_to_eui64(first_ifid, addr, addrlen) == 0) + found_first_ifid = 1; + + if (found_first_ifid) { + printf("%s: supplying EUI64: " + "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + if_name(ifp), + first_ifid[0] & 0xff, first_ifid[1] & 0xff, + first_ifid[2] & 0xff, first_ifid[3] & 0xff, + first_ifid[4] & 0xff, first_ifid[5] & 0xff, + first_ifid[6] & 0xff, first_ifid[7] & 0xff); + + /* invert u bit to convert EUI64 to RFC2373 interface ID. */ + first_ifid[0] ^= 0x02; + + return 0; + } else { +#if DEBUG + printf("in6_ifattach_getifid: failed to get EUI64"); +#endif + return EADDRNOTAVAIL; + } +} + +/* + * XXX multiple loopback interface needs more care. for instance, + * nodelocal address needs to be configured onto only one of them. + */ +void +in6_ifattach(ifp, type, laddr, noloop) + struct ifnet *ifp; + u_int type; + caddr_t laddr; + /* size_t laddrlen; */ + int noloop; +{ + static size_t if_indexlim = 8; + struct sockaddr_in6 mltaddr; + struct sockaddr_in6 mltmask; + struct sockaddr_in6 gate; + struct sockaddr_in6 mask; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + struct ifaddr **ifap; +#endif + + struct in6_ifaddr *ia, *ib, *oia; + struct ifaddr *ifa; + int rtflag = 0; + int s; + int error; + + if (type == IN6_IFT_P2P && found_first_ifid == 0) { + printf("%s: no ifid available for IPv6 link-local address\n", + if_name(ifp)); +#if 0 + return; +#else + /* last resort */ + if (gen_rand_eui64(first_ifid) == 0) { + printf("%s: using random value as EUI64: " + "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + if_name(ifp), + first_ifid[0] & 0xff, first_ifid[1] & 0xff, + first_ifid[2] & 0xff, first_ifid[3] & 0xff, + first_ifid[4] & 0xff, first_ifid[5] & 0xff, + first_ifid[6] & 0xff, first_ifid[7] & 0xff); + /* + * invert u bit to convert EUI64 to RFC2373 interface + * ID. + */ + first_ifid[0] ^= 0x02; + + found_first_ifid = 1; + } +#endif + } + + if ((ifp->if_flags & IFF_MULTICAST) == 0) { + printf("%s: not multicast capable, IPv6 not enabled\n", + if_name(ifp)); + return; + } + + /* + * We have some arrays that should be indexed by if_index. + * since if_index will grow dynamically, they should grow too. + * struct in6_ifstat **in6_ifstat + * struct icmp6_ifstat **icmp6_ifstat + */ + if (in6_ifstat == NULL || icmp6_ifstat == NULL + || if_index >= if_indexlim) { + size_t n; + caddr_t q; + size_t olim; + + olim = if_indexlim; + while (if_index >= if_indexlim) + if_indexlim <<= 1; + + /* grow in6_ifstat */ + n = if_indexlim * sizeof(struct in6_ifstat *); + q = (caddr_t)_MALLOC(n, M_IFADDR, M_WAITOK); + bzero(q, n); + if (in6_ifstat) { + bcopy((caddr_t)in6_ifstat, q, + olim * sizeof(struct in6_ifstat *)); + _FREE((caddr_t)in6_ifstat, M_IFADDR); + } + in6_ifstat = (struct in6_ifstat **)q; + in6_ifstatmax = if_indexlim; + + /* grow icmp6_ifstat */ + n = if_indexlim * sizeof(struct icmp6_ifstat *); + q = (caddr_t)_MALLOC(n, M_IFADDR, M_WAITOK); + bzero(q, n); + if (icmp6_ifstat) { + bcopy((caddr_t)icmp6_ifstat, q, + olim * sizeof(struct icmp6_ifstat *)); + _FREE((caddr_t)icmp6_ifstat, M_IFADDR); + } + icmp6_ifstat = (struct icmp6_ifstat **)q; + icmp6_ifstatmax = if_indexlim; + } + + /* + * To prevent to assign link-local address to PnP network + * cards multiple times. + * This is lengthy for P2P and LOOP but works. + */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + ifa = ifp->if_addrlist; + if (ifa != NULL) { + for ( ; ifa; ifa = ifa->ifa_next) { + ifap = &ifa->ifa_next; + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (IN6_IS_ADDR_LINKLOCAL(&satosin6(ifa->ifa_addr)->sin6_addr)) + return; + } + } else + ifap = &ifp->if_addrlist; +#else + ifa = TAILQ_FIRST(&ifp->if_addrlist); + if (ifa != NULL) { + for ( ; ifa; ifa = TAILQ_NEXT(ifa, ifa_list)) { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (IN6_IS_ADDR_LINKLOCAL(&satosin6(ifa->ifa_addr)->sin6_addr)) + return; + } + } else { + TAILQ_INIT(&ifp->if_addrlist); + } +#endif + + /* + * link-local address + */ + ia = (struct in6_ifaddr *)_MALLOC(sizeof(*ia), M_IFADDR, M_WAITOK); + bzero((caddr_t)ia, sizeof(*ia)); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + if (ifp->if_flags & IFF_POINTOPOINT) + ia->ia_ifa.ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr; + else + ia->ia_ifa.ifa_dstaddr = NULL; + ia->ia_ifa.ifa_netmask = (struct sockaddr *)&ia->ia_prefixmask; + ia->ia_ifp = ifp; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + *ifap = (struct ifaddr *)ia; +#else + TAILQ_INSERT_TAIL(&ifp->if_addrlist, (struct ifaddr *)ia, ifa_list); +#endif + ia->ia_ifa.ifa_refcnt++; + + /* + * Also link into the IPv6 address chain beginning with in6_ifaddr. + * kazu opposed it, but itojun & jinmei wanted. + */ + if ((oia = in6_ifaddr) != NULL) { + for (; oia->ia_next; oia = oia->ia_next) + continue; + oia->ia_next = ia; + } else + in6_ifaddr = ia; + ia->ia_ifa.ifa_refcnt++; + + ia->ia_prefixmask.sin6_len = sizeof(struct sockaddr_in6); + ia->ia_prefixmask.sin6_family = AF_INET6; + ia->ia_prefixmask.sin6_addr = in6mask64; + + bzero(&ia->ia_addr, sizeof(struct sockaddr_in6)); + ia->ia_addr.sin6_len = sizeof(struct sockaddr_in6); + ia->ia_addr.sin6_family = AF_INET6; + ia->ia_addr.sin6_addr.s6_addr16[0] = htons(0xfe80); + ia->ia_addr.sin6_addr.s6_addr16[1] = htons(ifp->if_index); + ia->ia_addr.sin6_addr.s6_addr32[1] = 0; + + switch (type) { + case IN6_IFT_LOOP: + ia->ia_addr.sin6_addr.s6_addr32[2] = 0; + ia->ia_addr.sin6_addr.s6_addr32[3] = htonl(1); + if (strcmp(ifp->if_name, "lo") == 0) { + ia->ia_ifa.ifa_dlt = lo_attach_inet(ifp); + printf("in6_ifattach: IFT_LOOP setting initial ifp=%s%d initial ia=%x ifa_dlt=%x\n", + ifp->if_name, ifp->if_unit, ia, ia->ia_ifa.ifa_dlt); + } + break; + case IN6_IFT_802: + ia->ia_ifa.ifa_rtrequest = nd6_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + rtflag = RTF_CLONING; + if (strcmp(ifp->if_name, "en") == 0) { + ia->ia_ifa.ifa_dlt = ether_attach_inet6(ifp); + printf("in6_ifattach: IFT_802 setting initial ifp=%s%d initial ia=%x ifa_dlt=%x\n", + ifp->if_name, ifp->if_unit, ia, ia->ia_ifa.ifa_dlt); + } + + /* fall through */ + case IN6_IFT_P2P802: + ia->ia_ifa.ifa_rtrequest = nd6_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + rtflag = RTF_CLONING; + if (laddr == NULL) + break; + /* XXX use laddrlen */ + if (laddr_to_eui64(&ia->ia_addr.sin6_addr.s6_addr8[8], + laddr, 6) != 0) { + break; + } + /* invert u bit to convert EUI64 to RFC2373 interface ID. */ + ia->ia_addr.sin6_addr.s6_addr8[8] ^= 0x02; + if (found_first_ifid == 0) + in6_ifattach_getifid(ifp); + bzero(&ia->ia_dstaddr, sizeof(struct sockaddr_in6)); + ia->ia_dstaddr.sin6_len = sizeof(struct sockaddr_in6); + ia->ia_dstaddr.sin6_family = AF_INET6; + + if (ia->ia_ifa.ifa_dlt == 0) { + ia->ia_ifa.ifa_dlt = ifp; +#if DEBUG + printf("in6_ifattach: IFT_P2P802 setting initial ifp=%s%d initial ia=%x ifa_dlt=%x\n", + ifp->if_name, ifp->if_unit, ia, ia->ia_ifa.ifa_dlt); +#endif + } + break; + case IN6_IFT_P2P: + ia->ia_ifa.ifa_rtrequest = nd6_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + rtflag = RTF_CLONING; + bcopy((caddr_t)first_ifid, + (caddr_t)&ia->ia_addr.sin6_addr.s6_addr8[8], + IFID_LEN); + bzero(&ia->ia_dstaddr, sizeof(struct sockaddr_in6)); + ia->ia_dstaddr.sin6_len = sizeof(struct sockaddr_in6); + ia->ia_dstaddr.sin6_family = AF_INET6; + + if (strcmp(ifp->if_name, "gif") == 0) { + ia->ia_ifa.ifa_dlt = gif_attach_inet(ifp); +#if DEBUG + printf("in6_ifattach: IFT_P2P setting initial ifp=%s%d initial ia=%x ifa_dlt=%x\n", + ifp->if_name, ifp->if_unit, ia, ia->ia_ifa.ifa_dlt); +#endif + } + break; + case IN6_IFT_ARCNET: + ia->ia_ifa.ifa_rtrequest = nd6_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + rtflag = RTF_CLONING; + if (laddr == NULL) + break; + + /* make non-global IF id out of link-level address */ + bzero(&ia->ia_addr.sin6_addr.s6_addr8[8], 7); + ia->ia_addr.sin6_addr.s6_addr8[15] = *laddr; + ia->ia_ifa.ifa_dlt = ifp; +#if DEBUG + printf("in6_ifattach: IFT_ARCNET setting initial ifp=%s%d initial ia=%x ifa_dlt=%x\n", + ifp->if_name, ifp->if_unit, ia, ia->ia_ifa.ifa_dlt); +#endif + } + + ia->ia_ifa.ifa_metric = ifp->if_metric; + + + /* + * give the interface a chance to initialize, in case this + * is the first address to be added. + */ + s = splimp(); +#ifdef __APPLE__ + error = dlil_ioctl(0, ifp, SIOCSIFADDR, (caddr_t)ia); +#else + error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, (caddr_t)ia); +#endif + splx(s); +#if DEBUG + printf("in6_ifattach: Calling SIOCSIFADDR for if=%s%d ia=%x error=%x\n", ifp->if_name, ifp->if_unit, ia, error); +#endif + if (error == EOPNOTSUPP) + error = 0; + + if (error) { + switch (error) { + case EAFNOSUPPORT: + printf("%s: IPv6 not supported\n", + if_name(ifp)); + break; + default: + printf("%s: SIOCSIFADDR error %d\n", + if_name(ifp), error); + break; + } + + /* undo changes */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + *ifap = NULL; +#else + TAILQ_REMOVE(&ifp->if_addrlist, (struct ifaddr *)ia, ifa_list); +#endif + IFAFREE(&ia->ia_ifa); + if (oia) + oia->ia_next = ia->ia_next; + else + in6_ifaddr = ia->ia_next; + IFAFREE(&ia->ia_ifa); + return; + } + + /* add route to the interface. */ + rtrequest(RTM_ADD, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&ia->ia_prefixmask, + RTF_UP|rtflag, + (struct rtentry **)0); + ia->ia_flags |= IFA_ROUTE; + + if (type == IN6_IFT_P2P || type == IN6_IFT_P2P802) { + /* + * route local address to loopback + */ + bzero(&gate, sizeof(gate)); + gate.sin6_len = sizeof(struct sockaddr_in6); + gate.sin6_family = AF_INET6; + gate.sin6_addr = in6addr_loopback; + bzero(&mask, sizeof(mask)); + mask.sin6_len = sizeof(struct sockaddr_in6); + mask.sin6_family = AF_INET6; + mask.sin6_addr = in6mask64; + rtrequest(RTM_ADD, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&gate, + (struct sockaddr *)&mask, + RTF_UP|RTF_HOST, + (struct rtentry **)0); + } + + /* + * loopback address + */ + ib = (struct in6_ifaddr *)NULL; + if (type == IN6_IFT_LOOP) { + ib = (struct in6_ifaddr *) + _MALLOC(sizeof(*ib), M_IFADDR, M_WAITOK); + bzero((caddr_t)ib, sizeof(*ib)); + ib->ia_ifa.ifa_addr = (struct sockaddr *)&ib->ia_addr; + ib->ia_ifa.ifa_dstaddr = (struct sockaddr *)&ib->ia_dstaddr; + ib->ia_ifa.ifa_netmask = (struct sockaddr *)&ib->ia_prefixmask; + ib->ia_ifa.ifa_dlt = lo_attach_inet(ifp); + ib->ia_ifp = ifp; + + ia->ia_next = ib; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + ia->ia_ifa.ifa_next = (struct ifaddr *)ib; +#else + TAILQ_INSERT_TAIL(&ifp->if_addrlist, (struct ifaddr *)ib, + ifa_list); +#endif + ib->ia_ifa.ifa_refcnt++; + + ib->ia_prefixmask.sin6_len = sizeof(struct sockaddr_in6); + ib->ia_prefixmask.sin6_family = AF_INET6; + ib->ia_prefixmask.sin6_addr = in6mask128; + ib->ia_addr.sin6_len = sizeof(struct sockaddr_in6); + ib->ia_addr.sin6_family = AF_INET6; + ib->ia_addr.sin6_addr = in6addr_loopback; + + /* + * Always initialize ia_dstaddr (= broadcast address) + * to loopback address, to make getifaddr happier. + * + * For BSDI, it is mandatory. The BSDI version of + * ifa_ifwithroute() rejects to add a route to the loopback + * interface. Even for other systems, loopback looks somewhat + * special. + */ + ib->ia_dstaddr.sin6_len = sizeof(struct sockaddr_in6); + ib->ia_dstaddr.sin6_family = AF_INET6; + ib->ia_dstaddr.sin6_addr = in6addr_loopback; + + ib->ia_ifa.ifa_metric = ifp->if_metric; + + rtrequest(RTM_ADD, + (struct sockaddr *)&ib->ia_addr, + (struct sockaddr *)&ib->ia_addr, + (struct sockaddr *)&ib->ia_prefixmask, + RTF_UP|RTF_HOST, + (struct rtentry **)0); + + ib->ia_flags |= IFA_ROUTE; + } + + /* + * join multicast + */ + if (ifp->if_flags & IFF_MULTICAST) { + int error; /* not used */ + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + /* Restore saved multicast addresses(if any). */ + in6_restoremkludge(ia, ifp); +#endif + + bzero(&mltmask, sizeof(mltmask)); + mltmask.sin6_len = sizeof(struct sockaddr_in6); + mltmask.sin6_family = AF_INET6; + mltmask.sin6_addr = in6mask32; + + /* + * join link-local all-nodes address + */ + bzero(&mltaddr, sizeof(mltaddr)); + mltaddr.sin6_len = sizeof(struct sockaddr_in6); + mltaddr.sin6_family = AF_INET6; + mltaddr.sin6_addr = in6addr_linklocal_allnodes; + mltaddr.sin6_addr.s6_addr16[1] = htons(ifp->if_index); + rtrequest(RTM_ADD, + (struct sockaddr *)&mltaddr, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&mltmask, + RTF_UP|RTF_CLONING, /* xxx */ + (struct rtentry **)0); + (void)in6_addmulti(&mltaddr.sin6_addr, ifp, &error); + + if (type == IN6_IFT_LOOP) { + /* + * join node-local all-nodes address + */ + mltaddr.sin6_addr = in6addr_nodelocal_allnodes; + rtrequest(RTM_ADD, + (struct sockaddr *)&mltaddr, + (struct sockaddr *)&ib->ia_addr, + (struct sockaddr *)&mltmask, + RTF_UP, + (struct rtentry **)0); + (void)in6_addmulti(&mltaddr.sin6_addr, ifp, &error); + } else { + /* + * join solicited multicast address + */ + bzero(&llsol, sizeof(llsol)); + llsol.s6_addr16[0] = htons(0xff02); + llsol.s6_addr16[1] = htons(ifp->if_index); + llsol.s6_addr32[1] = 0; + llsol.s6_addr32[2] = htonl(1); + llsol.s6_addr32[3] = ia->ia_addr.sin6_addr.s6_addr32[3]; + llsol.s6_addr8[12] = 0xff; + (void)in6_addmulti(&llsol, ifp, &error); + } + } + + /* update dynamically. */ + if (in6_maxmtu < ifp->if_mtu) + in6_maxmtu = ifp->if_mtu; + + if (in6_ifstat[ifp->if_index] == NULL) { + in6_ifstat[ifp->if_index] = (struct in6_ifstat *) + _MALLOC(sizeof(struct in6_ifstat), M_IFADDR, M_WAITOK); + bzero(in6_ifstat[ifp->if_index], sizeof(struct in6_ifstat)); + } + if (icmp6_ifstat[ifp->if_index] == NULL) { + icmp6_ifstat[ifp->if_index] = (struct icmp6_ifstat *) + _MALLOC(sizeof(struct icmp6_ifstat), M_IFADDR, M_WAITOK); + bzero(icmp6_ifstat[ifp->if_index], sizeof(struct icmp6_ifstat)); + } + + /* initialize NDP variables */ + nd6_ifattach(ifp); + + /* mark the address TENTATIVE, if needed. */ + switch (ifp->if_type) { + case IFT_ARCNET: + case IFT_ETHER: + case IFT_FDDI: +#if 0 + case IFT_ATM: + case IFT_SLIP: + case IFT_PPP: +#endif + ia->ia6_flags |= IN6_IFF_TENTATIVE; + /* nd6_dad_start() will be called in in6_if_up */ + break; + case IFT_DUMMY: + case IFT_GIF: /*XXX*/ + case IFT_LOOP: + case IFT_FAITH: + default: + break; + } + + return; +} + +/* + * NOTE: in6_ifdetach() does not support loopback if at this moment. + */ +void +in6_ifdetach(ifp) + struct ifnet *ifp; +{ + struct in6_ifaddr *ia, *oia; + struct ifaddr *ifa; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + struct ifaddr *ifaprev = NULL; +#endif + struct rtentry *rt; + short rtflags; + struct sockaddr_in6 sin6; + struct in6_multi *in6m; +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + struct in6_multi *in6m_next; +#endif + + /* nuke prefix list. this may try to remove some of ifaddrs as well */ + in6_purgeprefix(ifp); + + /* remove neighbor management table */ + nd6_purge(ifp); + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6 + || !IN6_IS_ADDR_LINKLOCAL(&satosin6(&ifa->ifa_addr)->sin6_addr)) { +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + ifaprev = ifa; +#endif + continue; + } + + ia = (struct in6_ifaddr *)ifa; + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + /* leave from all multicast groups joined */ + while ((in6m = LIST_FIRST(&oia->ia6_multiaddrs)) != NULL) + in6_delmulti(in6m); +#endif + + /* remove from the routing table */ + if ((ia->ia_flags & IFA_ROUTE) + && (rt = rtalloc1((struct sockaddr *)&ia->ia_addr, 0 +#if defined (__FreeBSD__) || defined (__APPLE__) + , 0UL +#endif + ))) { + rtflags = rt->rt_flags; + rtfree(rt); + rtrequest(RTM_DELETE, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&ia->ia_prefixmask, + rtflags, (struct rtentry **)0); + } + + /* remove from the linked list */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + if (ifaprev) + ifaprev->ifa_next = ifa->ifa_next; + else + ifp->if_addrlist = ifa->ifa_next; +#else + TAILQ_REMOVE(&ifp->if_addrlist, (struct ifaddr *)ia, ifa_list); +#endif + + /* also remove from the IPv6 address chain(itojun&jinmei) */ + oia = ia; + if (oia == (ia = in6_ifaddr)) + in6_ifaddr = ia->ia_next; + else { + while (ia->ia_next && (ia->ia_next != oia)) + ia = ia->ia_next; + if (ia->ia_next) + ia->ia_next = oia->ia_next; +#if DEBUG + else + printf("%s: didn't unlink in6ifaddr from " + "list\n", if_name(ifp)); +#endif + } + + _FREE(ia, M_IFADDR); + } + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + /* leave from all multicast groups joined */ + for (in6m = LIST_FIRST(&in6_multihead); in6m; in6m = in6m_next) { + in6m_next = LIST_NEXT(in6m, in6m_entry); + if (in6m->in6m_ifp != ifp) + continue; + in6_delmulti(in6m); + in6m = NULL; + } +#endif + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + /* cleanup multicast address kludge table, if there is any */ + in6_purgemkludge(ifp); +#endif + + /* remove neighbor management table */ + nd6_purge(ifp); + + /* remove route to link-local allnodes multicast (ff02::1) */ + bzero(&sin6, sizeof(sin6)); + sin6.sin6_len = sizeof(struct sockaddr_in6); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = in6addr_linklocal_allnodes; + sin6.sin6_addr.s6_addr16[1] = htons(ifp->if_index); +#if !defined(__FreeBSD__) && !defined (__APPLE__) + if ((rt = rtalloc1((struct sockaddr *)&sin6, 0)) != NULL) +#else + if ((rt = rtalloc1((struct sockaddr *)&sin6, 0, 0UL)) != NULL) +#endif + { + rtrequest(RTM_DELETE, (struct sockaddr *)rt_key(rt), + rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0); + rtfree(rt); + } +} diff --git a/bsd/netinet6/in6_ifattach.h b/bsd/netinet6/in6_ifattach.h new file mode 100644 index 000000000..4905f543b --- /dev/null +++ b/bsd/netinet6/in6_ifattach.h @@ -0,0 +1,50 @@ +/* $KAME: in6_ifattach.h,v 1.4 2000/02/22 14:04:18 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET6_IN6_IFATTACH_H_ +#define _NETINET6_IN6_IFATTACH_H_ + +#if KERNEL +extern int found_first_ifid; + +int in6_ifattach_getifid __P((struct ifnet *)); +void in6_ifattach_p2p __P((void)); +void in6_ifattach __P((struct ifnet *, u_int, caddr_t, int)); +void in6_ifdetach __P((struct ifnet *)); +#endif /* KERNEL */ + +#define IN6_IFT_LOOP 1 +#define IN6_IFT_P2P 2 +#define IN6_IFT_802 3 +#define IN6_IFT_P2P802 4 +#define IN6_IFT_ARCNET 5 + +#endif /* _NETINET6_IN6_IFATTACH_H_ */ diff --git a/bsd/netinet6/in6_pcb.c b/bsd/netinet6/in6_pcb.c new file mode 100644 index 000000000..daa61ee2f --- /dev/null +++ b/bsd/netinet6/in6_pcb.c @@ -0,0 +1,1182 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_pcb.c 8.2 (Berkeley) 1/4/94 + */ + +#ifdef __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "faith.h" + +#if IPSEC +#include +#include +#include +#endif /* IPSEC */ + +struct in6_addr zeroin6_addr; + +int +in6_pcbbind(inp, nam, p) + register struct inpcb *inp; + struct sockaddr *nam; + struct proc *p; +{ + struct socket *so = inp->inp_socket; + unsigned short *lastport; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)NULL; + struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; + u_short lport = 0; + int wild = 0, reuseport = (so->so_options & SO_REUSEPORT); + int error; + + if (!in6_ifaddr) /* XXX broken! */ + return (EADDRNOTAVAIL); + if (inp->inp_lport || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) + return(EINVAL); + if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) + wild = INPLOOKUP_WILDCARD; + if (nam) { + if (nam->sa_len != sizeof(*sin6)) + return(EINVAL); + /* + * We should check the family, but old programs + * incorrectly fail to intialize it. + */ + if (nam->sa_family != AF_INET6) + return(EAFNOSUPPORT); + sin6 = (struct sockaddr_in6 *)nam; + + /* + * If the scope of the destination is link-local, embed the + * interface index in the address. + */ + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { + /* XXX boundary check is assumed to be already done. */ + /* XXX sin6_scope_id is weaker than advanced-api. */ + struct in6_pktinfo *pi; + if (inp->in6p_outputopts && + (pi = inp->in6p_outputopts->ip6po_pktinfo) && + pi->ipi6_ifindex) { + sin6->sin6_addr.s6_addr16[1] + = htons(pi->ipi6_ifindex); + } else if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr) + && inp->in6p_moptions + && inp->in6p_moptions->im6o_multicast_ifp) { + sin6->sin6_addr.s6_addr16[1] = + htons(inp->in6p_moptions->im6o_multicast_ifp->if_index); + } else if (sin6->sin6_scope_id) { + /* boundary check */ + if (sin6->sin6_scope_id < 0 + || if_index < sin6->sin6_scope_id) { + return ENXIO; /* XXX EINVAL? */ + } + sin6->sin6_addr.s6_addr16[1] + = htons(sin6->sin6_scope_id & 0xffff);/*XXX*/ + /* this must be cleared for ifa_ifwithaddr() */ + sin6->sin6_scope_id = 0; + } + } + + lport = sin6->sin6_port; + if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { + /* + * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; + * allow compepte duplication of binding if + * SO_REUSEPORT is set, or if SO_REUSEADDR is set + * and a multicast address is bound on both + * new and duplicated sockets. + */ + if (so->so_options & SO_REUSEADDR) + reuseport = SO_REUSEADDR|SO_REUSEPORT; + } else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { + struct ifaddr *ia = NULL; + + sin6->sin6_port = 0; /* yech... */ + if ((ia = ifa_ifwithaddr((struct sockaddr *)sin6)) == 0) + return(EADDRNOTAVAIL); + + /* + * XXX: bind to an anycast address might accidentally + * cause sending a packet with anycast source address. + */ + if (ia && + ((struct in6_ifaddr *)ia)->ia6_flags & + (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY| + IN6_IFF_DETACHED|IN6_IFF_DEPRECATED)) { + return(EADDRNOTAVAIL); + } + } + if (lport) { + struct inpcb *t; + + /* GROSS */ + if (ntohs(lport) < IPV6PORT_RESERVED && p && +#if 0 + suser(p->p_ucred, &p->p_acflag)) +#else + ((so->so_state & SS_PRIV) == 0)) +#endif + return(EACCES); + + if (so->so_uid && + !IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { + t = in6_pcblookup_local(inp->inp_pcbinfo, + &sin6->sin6_addr, + lport, + INPLOOKUP_WILDCARD); + if (t && + (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || + !IN6_IS_ADDR_UNSPECIFIED(&t->in6p_laddr) || + (t->inp_socket->so_options & + SO_REUSEPORT) == 0) && + so->so_uid != t->inp_socket->so_uid) + return (EADDRINUSE); + } + t = in6_pcblookup_local(pcbinfo, &sin6->sin6_addr, + lport, wild); + if (t && (reuseport & t->inp_socket->so_options) == 0) + return(EADDRINUSE); + } + inp->in6p_laddr = sin6->sin6_addr; + } + if (lport == 0) { + ushort first, last; + int count; + + inp->inp_flags |= INP_ANONPORT; + + if (inp->inp_flags & INP_HIGHPORT) { + first = ipport_hifirstauto; /* sysctl */ + last = ipport_hilastauto; + lastport = &pcbinfo->lasthi; + } else if (inp->inp_flags & INP_LOWPORT) { +#if 0 + if (p && (error = suser(p->p_ucred, &p->p_acflag))) + return error; +#else + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); +#endif + first = ipport_lowfirstauto; /* 1023 */ + last = ipport_lowlastauto; /* 600 */ + lastport = &pcbinfo->lastlow; + } else { + first = ipport_firstauto; /* sysctl */ + last = ipport_lastauto; + lastport = &pcbinfo->lastport; + } + /* + * Simple check to ensure all ports are not used up causing + * a deadlock here. + * + * We split the two cases (up and down) so that the direction + * is not being tested on each round of the loop. + */ + if (first > last) { + /* + * counting down + */ + count = first - last; + + do { + if (count-- < 0) { /* completely used? */ + /* + * Undo any address bind that may have + * occurred above. + */ + inp->in6p_laddr = in6addr_any; + return (EAGAIN); + } + --*lastport; + if (*lastport > first || *lastport < last) + *lastport = first; + lport = htons(*lastport); + } while (in6_pcblookup_local(pcbinfo, + &inp->in6p_laddr, lport, wild)); + } else { + /* + * counting up + */ + count = last - first; + + do { + if (count-- < 0) { /* completely used? */ + /* + * Undo any address bind that may have + * occurred above. + */ + inp->in6p_laddr = in6addr_any; + return (EAGAIN); + } + ++*lastport; + if (*lastport < first || *lastport > last) + *lastport = first; + lport = htons(*lastport); + } while (in6_pcblookup_local(pcbinfo, + &inp->in6p_laddr, lport, wild)); + } + } + inp->inp_lport = lport; + if (in_pcbinshash(inp) != 0) { + inp->in6p_laddr = in6addr_any; + inp->inp_lport = 0; + return (EAGAIN); + } + inp->in6p_flowinfo = sin6 ? sin6->sin6_flowinfo : 0; /*XXX*/ + return(0); +} + +/* + * Transform old in6_pcbconnect() into an inner subroutine for new + * in6_pcbconnect(): Do some validity-checking on the remote + * address (in mbuf 'nam') and then determine local host address + * (i.e., which interface) to use to access that remote host. + * + * This preserves definition of in6_pcbconnect(), while supporting a + * slightly different version for T/TCP. (This is more than + * a bit of a kludge, but cleaning up the internal interfaces would + * have forced minor changes in every protocol). + */ + +int +in6_pcbladdr(inp, nam, plocal_addr6) + register struct inpcb *inp; + struct sockaddr *nam; + struct in6_addr **plocal_addr6; +{ + register struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam; + struct in6_pktinfo *pi; + struct ifnet *ifp = NULL; + int error = 0; + + if (nam->sa_len != sizeof (*sin6)) + return (EINVAL); + if (sin6->sin6_family != AF_INET6) + return (EAFNOSUPPORT); + if (sin6->sin6_port == 0) + return (EADDRNOTAVAIL); + + /* + * If the scope of the destination is link-local, embed the interface + * index in the address. + */ + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { + /* XXX boundary check is assumed to be already done. */ + /* XXX sin6_scope_id is weaker than advanced-api. */ + if (inp->in6p_outputopts && + (pi = inp->in6p_outputopts->ip6po_pktinfo) && + pi->ipi6_ifindex) { + sin6->sin6_addr.s6_addr16[1] = htons(pi->ipi6_ifindex); + ifp = ifindex2ifnet[pi->ipi6_ifindex]; + } + else if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr) && + inp->in6p_moptions && + inp->in6p_moptions->im6o_multicast_ifp) { + sin6->sin6_addr.s6_addr16[1] = + htons(inp->in6p_moptions->im6o_multicast_ifp->if_index); + ifp = ifindex2ifnet[inp->in6p_moptions->im6o_multicast_ifp->if_index]; + } else if (sin6->sin6_scope_id) { + /* boundary check */ + if (sin6->sin6_scope_id < 0 + || if_index < sin6->sin6_scope_id) { + return ENXIO; /* XXX EINVAL? */ + } + sin6->sin6_addr.s6_addr16[1] + = htons(sin6->sin6_scope_id & 0xffff);/*XXX*/ + ifp = ifindex2ifnet[sin6->sin6_scope_id]; + } + } + + if (in6_ifaddr) { + /* + * If the destination address is UNSPECIFIED addr, + * use the loopback addr, e.g ::1. + */ +#define satosin6(sa) ((struct sockaddr_in6 *)(sa)) +#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) + sin6->sin6_addr = in6addr_loopback; + } + { + /* + * XXX: in6_selectsrc might replace the bound local address + * with the address specified by setsockopt(IPV6_PKTINFO). + * Is it the intended behavior? + */ + *plocal_addr6 = in6_selectsrc(sin6, inp->in6p_outputopts, + inp->in6p_moptions, + &inp->in6p_route, + &inp->in6p_laddr, &error); + if (*plocal_addr6 == 0) { + if (error == 0) + error = EADDRNOTAVAIL; + return(error); + } + /* + * Don't do pcblookup call here; return interface in + * plocal_addr6 + * and exit to caller, that will do the lookup. + */ + } + + if (inp->in6p_route.ro_rt) + ifp = inp->in6p_route.ro_rt->rt_ifp; + + return(0); +} + +/* + * Outer subroutine: + * Connect from a socket to a specified address. + * Both address and port must be specified in argument sin. + * If don't have a local address for this socket yet, + * then pick one. + */ +int +in6_pcbconnect(inp, nam, p) + register struct inpcb *inp; + struct sockaddr *nam; + struct proc *p; +{ + struct in6_addr *addr6; + register struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)nam; + int error; + + /* + * Call inner routine, to assign local interface address. + */ + if (error = in6_pcbladdr(inp, nam, &addr6)) + return(error); + + if (in6_pcblookup_hash(inp->inp_pcbinfo, &sin6->sin6_addr, + sin6->sin6_port, + IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) + ? addr6 : &inp->in6p_laddr, + inp->inp_lport, 0, NULL) != NULL) { + return (EADDRINUSE); + } + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { + if (inp->inp_lport == 0) { + error = in6_pcbbind(inp, (struct sockaddr *)0, p); + if (error) + return (error); + } + inp->in6p_laddr = *addr6; + } + inp->in6p_faddr = sin6->sin6_addr; + inp->inp_fport = sin6->sin6_port; + /* + * xxx kazu flowlabel is necessary for connect? + * but if this line is missing, the garbage value remains. + */ + inp->in6p_flowinfo = sin6->sin6_flowinfo; + + in_pcbrehash(inp); + return (0); +} + +#if 0 +/* + * Return an IPv6 address, which is the most appropriate for given + * destination and user specified options. + * If necessary, this function lookups the routing table and return + * an entry to the caller for later use. + */ +struct in6_addr * +in6_selectsrc(dstsock, opts, mopts, ro, laddr, errorp) + struct sockaddr_in6 *dstsock; + struct ip6_pktopts *opts; + struct ip6_moptions *mopts; + struct route_in6 *ro; + struct in6_addr *laddr; + int *errorp; +{ + struct in6_addr *dst; + struct in6_ifaddr *ia6 = 0; + struct in6_pktinfo *pi = NULL; + + dst = &dstsock->sin6_addr; + *errorp = 0; + + /* + * If the source address is explicitly specified by the caller, + * use it. + */ + if (opts && (pi = opts->ip6po_pktinfo) && + !IN6_IS_ADDR_UNSPECIFIED(&pi->ipi6_addr)) + return(&pi->ipi6_addr); + + /* + * If the source address is not specified but the socket(if any) + * is already bound, use the bound address. + */ + if (laddr && !IN6_IS_ADDR_UNSPECIFIED(laddr)) + return(laddr); + + /* + * If the caller doesn't specify the source address but + * the outgoing interface, use an address associated with + * the interface. + */ + if (pi && pi->ipi6_ifindex) { + /* XXX boundary check is assumed to be already done. */ + ia6 = in6_ifawithscope(ifindex2ifnet[pi->ipi6_ifindex], + dst); + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + + /* + * If the destination address is a link-local unicast address or + * a multicast address, and if the outgoing interface is specified + * by the sin6_scope_id filed, use an address associated with the + * interface. + * XXX: We're now trying to define more specific semantics of + * sin6_scope_id field, so this part will be rewritten in + * the near future. + */ + if ((IN6_IS_ADDR_LINKLOCAL(dst) || IN6_IS_ADDR_MULTICAST(dst)) && + dstsock->sin6_scope_id) { + /* + * I'm not sure if boundary check for scope_id is done + * somewhere... + */ + if (dstsock->sin6_scope_id < 0 || + if_index < dstsock->sin6_scope_id) { + *errorp = ENXIO; /* XXX: better error? */ + return(0); + } + ia6 = in6_ifawithscope(ifindex2ifnet[dstsock->sin6_scope_id], + dst); + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + + /* + * If the destination address is a multicast address and + * the outgoing interface for the address is specified + * by the caller, use an address associated with the interface. + * There is a sanity check here; if the destination has node-local + * scope, the outgoing interfacde should be a loopback address. + * Even if the outgoing interface is not specified, we also + * choose a loopback interface as the outgoing interface. + */ + if (IN6_IS_ADDR_MULTICAST(dst)) { + struct ifnet *ifp = mopts ? mopts->im6o_multicast_ifp : NULL; +#ifdef __bsdi__ + extern struct ifnet loif; +#endif + + if (ifp == NULL && IN6_IS_ADDR_MC_NODELOCAL(dst)) { +#ifdef __bsdi__ + ifp = &loif; +#else + ifp = &loif[0]; +#endif + } + + if (ifp) { + ia6 = in6_ifawithscope(ifp, dst); + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&ia6->ia_addr.sin6_addr); + } + } + + /* + * If the next hop address for the packet is specified + * by caller, use an address associated with the route + * to the next hop. + */ + { + struct sockaddr_in6 *sin6_next; + struct rtentry *rt; + + if (opts && opts->ip6po_nexthop) { + sin6_next = satosin6(opts->ip6po_nexthop); + rt = nd6_lookup(&sin6_next->sin6_addr, 1, NULL); + if (rt) { + ia6 = in6_ifawithscope(rt->rt_ifp, dst); + if (ia6 == 0) + ia6 = ifatoia6(rt->rt_ifa); + } + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + } + + /* + * If route is known or can be allocated now, + * our src addr is taken from the i/f, else punt. + */ + if (ro) { + if (ro->ro_rt && + !IN6_ARE_ADDR_EQUAL(&satosin6(&ro->ro_dst)->sin6_addr, dst)) { + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } + if (ro->ro_rt == (struct rtentry *)0 || + ro->ro_rt->rt_ifp == (struct ifnet *)0) { + /* No route yet, so try to acquire one */ + bzero(&ro->ro_dst, sizeof(struct sockaddr_in6)); + ro->ro_dst.sin6_family = AF_INET6; + ro->ro_dst.sin6_len = sizeof(struct sockaddr_in6); + ro->ro_dst.sin6_addr = *dst; + if (IN6_IS_ADDR_MULTICAST(dst)) { +#if defined(__FreeBSD__) || defined (__APPLE__) + ro->ro_rt = rtalloc1(&((struct route *)ro) + ->ro_dst, 0, 0UL); +#endif /*__FreeBSD__*/ +#if defined(__bsdi__) || defined(__NetBSD__) + ro->ro_rt = rtalloc1(&((struct route *)ro) + ->ro_dst, 0); +#endif /*__bsdi__*/ + } else { + rtalloc((struct route *)ro); + } + } + + /* + * in_pcbconnect() checks out IFF_LOOPBACK to skip using + * the address. But we don't know why it does so. + * It is necessary to ensure the scope even for lo0 + * so doesn't check out IFF_LOOPBACK. + */ + + if (ro->ro_rt) { + ia6 = in6_ifawithscope(ro->ro_rt->rt_ifa->ifa_ifp, dst); + if (ia6 == 0) /* xxx scope error ?*/ + ia6 = ifatoia6(ro->ro_rt->rt_ifa); + } +#if 0 + /* + * xxx The followings are necessary? (kazu) + * I don't think so. + * It's for SO_DONTROUTE option in IPv4.(jinmei) + */ + if (ia6 == 0) { + struct sockaddr_in6 sin6 = {sizeof(sin6), AF_INET6, 0}; + + sin6->sin6_addr = *dst; + + ia6 = ifatoia6(ifa_ifwithdstaddr(sin6tosa(&sin6))); + if (ia6 == 0) + ia6 = ifatoia6(ifa_ifwithnet(sin6tosa(&sin6))); + if (ia6 == 0) { + *errorp = EHOSTUNREACH; /* no route */ + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } +#endif /* 0 */ + if (ia6 == 0) { + *errorp = EHOSTUNREACH; /* no route */ + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + + *errorp = EADDRNOTAVAIL; + return(0); +} + +/* + * Default hop limit selection. The precedence is as follows: + * 1. Hoplimit valued specified via ioctl. + * 2. (If the outgoing interface is detected) the current + * hop limit of the interface specified by router advertisement. + * 3. The system default hoplimit. +*/ +int +in6_selecthlim(in6p, ifp) + struct in6pcb *in6p; + struct ifnet *ifp; +{ + if (in6p && in6p->in6p_hops >= 0) + return(in6p->in6p_hops); + else if (ifp) + return(nd_ifinfo[ifp->if_index].chlim); + else + return(ip6_defhlim); +} +#endif + +void +in6_pcbdisconnect(inp) + struct inpcb *inp; +{ + bzero((caddr_t)&inp->in6p_faddr, sizeof(inp->in6p_faddr)); + inp->inp_fport = 0; + in_pcbrehash(inp); + if (inp->inp_socket->so_state & SS_NOFDREF) + in6_pcbdetach(inp); +} + +void +in6_pcbdetach(inp) + struct inpcb *inp; +{ + struct socket *so = inp->inp_socket; + struct inpcbinfo *ipi = inp->inp_pcbinfo; + +#if IPSEC + ipsec6_delete_pcbpolicy(inp); +#endif /* IPSEC */ + inp->inp_gencnt = ++ipi->ipi_gencnt; + in_pcbremlists(inp); + sotoinpcb(so) = 0; + sofree(so); + + if (inp->in6p_inputopts) /* Free all received options. */ + m_freem(inp->in6p_inputopts->head); /* this is safe */ + ip6_freepcbopts(inp->in6p_outputopts); + ip6_freemoptions(inp->in6p_moptions); + + if (inp->in6p_route.ro_rt) + rtfree(inp->in6p_route.ro_rt); + inp->inp_vflag = 0; + zfree(ipi->ipi_zone, inp); +} + +/* + * The calling convention of in6_setsockaddr() and in6_setpeeraddr() was + * modified to match the pru_sockaddr() and pru_peeraddr() entry points + * in struct pr_usrreqs, so that protocols can just reference then directly + * without the need for a wrapper function. The socket must have a valid + * (i.e., non-nil) PCB, but it should be impossible to get an invalid one + * except through a kernel programming error, so it is acceptable to panic + * (or in this case trap) if the PCB is invalid. (Actually, we don't trap + * because there actually /is/ a programming error somewhere... XXX) + */ +int +in6_setsockaddr(so, nam) + struct socket *so; + struct sockaddr **nam; +{ + int s; + register struct inpcb *inp; + register struct sockaddr_in6 *sin6; + + /* + * Do the malloc first in case it blocks. + */ + MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, M_SONAME, M_WAITOK); + bzero(sin6, sizeof *sin6); + sin6->sin6_family = AF_INET6; + sin6->sin6_len = sizeof(*sin6); + + s = splnet(); + inp = sotoinpcb(so); + if (!inp) { + splx(s); + _FREE(sin6, M_SONAME); + return EINVAL; + } + sin6->sin6_port = inp->inp_lport; + sin6->sin6_addr = inp->in6p_laddr; + splx(s); + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); + else + sin6->sin6_scope_id = 0; /*XXX*/ + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + sin6->sin6_addr.s6_addr16[1] = 0; + + *nam = (struct sockaddr *)sin6; + return 0; +} + +int +in6_setpeeraddr(so, nam) + struct socket *so; + struct sockaddr **nam; +{ + int s; + struct inpcb *inp; + register struct sockaddr_in6 *sin6; + + /* + * Do the malloc first in case it blocks. + */ + MALLOC(sin6, struct sockaddr_in6 *, sizeof(*sin6), M_SONAME, M_WAITOK); + bzero((caddr_t)sin6, sizeof (*sin6)); + sin6->sin6_family = AF_INET6; + sin6->sin6_len = sizeof(struct sockaddr_in6); + + s = splnet(); + inp = sotoinpcb(so); + if (!inp) { + splx(s); + _FREE(sin6, M_SONAME); + return EINVAL; + } + sin6->sin6_port = inp->inp_fport; + sin6->sin6_addr = inp->in6p_faddr; + splx(s); + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); + else + sin6->sin6_scope_id = 0; /*XXX*/ + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + sin6->sin6_addr.s6_addr16[1] = 0; + + *nam = (struct sockaddr *)sin6; + return 0; +} + +int +in6_mapped_sockaddr(struct socket *so, struct sockaddr **nam) +{ + struct inpcb *inp = sotoinpcb(so); + int error; + + if (inp == NULL) + return EINVAL; + if (inp->inp_vflag & INP_IPV4) { + error = in_setsockaddr(so, nam); + if (error == 0) + in6_sin_2_v4mapsin6_in_sock(nam); + } else + error = in6_setsockaddr(so, nam); + + return error; +} + +int +in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam) +{ + struct inpcb *inp = sotoinpcb(so); + int error; + + if (inp == NULL) + return EINVAL; + if (inp->inp_vflag & INP_IPV4) { + error = in_setpeeraddr(so, nam); + if (error == 0) + in6_sin_2_v4mapsin6_in_sock(nam); + } else + error = in6_setpeeraddr(so, nam); + + return error; +} + +/* + * Pass some notification to all connections of a protocol + * associated with address dst. The local address and/or port numbers + * may be specified to limit the search. The "usual action" will be + * taken, depending on the ctlinput cmd. The caller must filter any + * cmds that are uninteresting (e.g., no error in the map). + * Call the protocol specific routine (if any) to report + * any errors for each matching socket. + * + * Must be called at splnet. + */ +void +in6_pcbnotify(head, dst, fport_arg, laddr6, lport_arg, cmd, notify) + struct inpcbhead *head; + struct sockaddr *dst; + u_int fport_arg, lport_arg; + struct in6_addr *laddr6; + int cmd; + void (*notify) __P((struct inpcb *, int)); +{ + struct inpcb *inp, *ninp; + struct in6_addr faddr6; + u_short fport = fport_arg, lport = lport_arg; + int errno, s; + void (*notify2) __P((struct inpcb *, int)); + + notify2 = NULL; + + if ((unsigned)cmd > PRC_NCMDS || dst->sa_family != AF_INET6) + return; + faddr6 = ((struct sockaddr_in6 *)dst)->sin6_addr; + if (IN6_IS_ADDR_UNSPECIFIED(&faddr6)) + return; + + /* + * Redirects go to all references to the destination, + * and use in6_rtchange to invalidate the route cache. + * Dead host indications: also use in6_rtchange to invalidate + * the cache, and deliver the error to all the sockets. + * Otherwise, if we have knowledge of the local port and address, + * deliver only to that socket. + */ + if (PRC_IS_REDIRECT(cmd) || cmd == PRC_HOSTDEAD) { + fport = 0; + lport = 0; + bzero((caddr_t)laddr6, sizeof(*laddr6)); + + /* + * Keep the old notify function to store a soft error + * in each PCB. + */ + if (cmd == PRC_HOSTDEAD && notify != in6_rtchange) + notify2 = notify; + + notify = in6_rtchange; + } + errno = inet6ctlerrmap[cmd]; + s = splnet(); + for (inp = LIST_FIRST(head); inp != NULL; inp = ninp) { + ninp = LIST_NEXT(inp, inp_list); + + if ((inp->inp_vflag & INP_IPV6) == 0) + continue; + + if (notify == in6_rtchange) { + /* + * Since a non-connected PCB might have a cached route, + * we always call in6_rtchange without matching + * the PCB to the src/dst pair. + * + * XXX: we assume in6_rtchange does not free the PCB. + */ + if (IN6_ARE_ADDR_EQUAL(&inp->in6p_route.ro_dst.sin6_addr, + &faddr6)) + in6_rtchange(inp, errno); + + if (notify2 == NULL) + continue; + + notify = notify2; + } + + if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, &faddr6) || + inp->inp_socket == 0 || + (lport && inp->inp_lport != lport) || + (!IN6_IS_ADDR_UNSPECIFIED(laddr6) && + !IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr6)) || + (fport && inp->inp_fport != fport)) + continue; + + if (notify) + (*notify)(inp, errno); + } + splx(s); +} + +/* + * Lookup a PCB based on the local address and port. + */ +struct inpcb * +in6_pcblookup_local(pcbinfo, laddr, lport_arg, wild_okay) + struct inpcbinfo *pcbinfo; + struct in6_addr *laddr; + u_int lport_arg; + int wild_okay; +{ + register struct inpcb *inp; + int matchwild = 3, wildcard; + u_short lport = lport_arg; + + if (!wild_okay) { + struct inpcbhead *head; + /* + * Look for an unconnected (wildcard foreign addr) PCB that + * matches the local address and port we're looking for. + */ + head = &pcbinfo->hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, + pcbinfo->hashmask)]; + LIST_FOREACH(inp, head, inp_hash) { + if ((inp->inp_vflag & INP_IPV6) == 0) + continue; + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) && + IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) && + inp->inp_lport == lport) { + /* + * Found. + */ + return (inp); + } + } + /* + * Not found. + */ + return (NULL); + } else { + struct inpcbporthead *porthash; + struct inpcbport *phd; + struct inpcb *match = NULL; + /* + * Best fit PCB lookup. + * + * First see if this local port is in use by looking on the + * port hash list. + */ + porthash = &pcbinfo->porthashbase[INP_PCBPORTHASH(lport, + pcbinfo->porthashmask)]; + LIST_FOREACH(phd, porthash, phd_hash) { + if (phd->phd_port == lport) + break; + } + if (phd != NULL) { + /* + * Port is in use by one or more PCBs. Look for best + * fit. + */ + LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) { + wildcard = 0; + if ((inp->inp_vflag & INP_IPV6) == 0) + continue; + if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) + wildcard++; + if (!IN6_IS_ADDR_UNSPECIFIED( + &inp->in6p_laddr)) { + if (IN6_IS_ADDR_UNSPECIFIED(laddr)) + wildcard++; + else if (!IN6_ARE_ADDR_EQUAL( + &inp->in6p_laddr, laddr)) + continue; + } else { + if (!IN6_IS_ADDR_UNSPECIFIED(laddr)) + wildcard++; + } + if (wildcard < matchwild) { + match = inp; + matchwild = wildcard; + if (matchwild == 0) { + break; + } + } + } + } + return (match); + } +} + +/* + * Check for alternatives when higher level complains + * about service problems. For now, invalidate cached + * routing information. If the route was created dynamically + * (by a redirect), time to try a default gateway again. + */ +void +in6_losing(in6p) + struct inpcb *in6p; +{ + struct rtentry *rt; + struct rt_addrinfo info; + + if ((rt = in6p->in6p_route.ro_rt) != NULL) { + in6p->in6p_route.ro_rt = 0; + bzero((caddr_t)&info, sizeof(info)); + info.rti_info[RTAX_DST] = + (struct sockaddr *)&in6p->in6p_route.ro_dst; + info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; + info.rti_info[RTAX_NETMASK] = rt_mask(rt); + rt_missmsg(RTM_LOSING, &info, rt->rt_flags, 0); + if (rt->rt_flags & RTF_DYNAMIC) + (void)rtrequest(RTM_DELETE, rt_key(rt), + rt->rt_gateway, rt_mask(rt), rt->rt_flags, + (struct rtentry **)0); + else + /* + * A new route can be allocated + * the next time output is attempted. + */ + rtfree(rt); + } +} + +/* + * After a routing change, flush old routing + * and allocate a (hopefully) better one. + */ +void +in6_rtchange(inp, errno) + struct inpcb *inp; + int errno; +{ + if (inp->in6p_route.ro_rt) { + rtfree(inp->in6p_route.ro_rt); + inp->in6p_route.ro_rt = 0; + /* + * A new route can be allocated the next time + * output is attempted. + */ + } +} + +/* + * Lookup PCB in hash list. + */ +struct inpcb * +in6_pcblookup_hash(pcbinfo, faddr, fport_arg, laddr, lport_arg, wildcard, ifp) + struct inpcbinfo *pcbinfo; + struct in6_addr *faddr, *laddr; + u_int fport_arg, lport_arg; + int wildcard; + struct ifnet *ifp; +{ + struct inpcbhead *head; + register struct inpcb *inp; + u_short fport = fport_arg, lport = lport_arg; + + /* + * First look for an exact match. + */ + head = &pcbinfo->hashbase[INP_PCBHASH(faddr->s6_addr32[3] /* XXX */, + lport, fport, + pcbinfo->hashmask)]; + for (inp = head->lh_first; inp != NULL; inp = inp->inp_hash.le_next) { + if ((inp->inp_vflag & INP_IPV6) == 0) + continue; + if (IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, faddr) && + IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) && + inp->inp_fport == fport && + inp->inp_lport == lport) { + /* + * Found. + */ + return (inp); + } + } + if (wildcard) { + struct inpcb *local_wild = NULL; + + head = &pcbinfo->hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, + pcbinfo->hashmask)]; + for (inp = head->lh_first; inp != NULL; + inp = inp->inp_hash.le_next) { + if ((inp->inp_vflag & INP_IPV6) == 0) + continue; + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) && + inp->inp_lport == lport) { +#if defined(NFAITH) && NFAITH > 0 + if (ifp && ifp->if_type == IFT_FAITH && + (inp->inp_flags & INP_FAITH) == 0) + continue; +#endif + if (IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, + laddr)) + return (inp); + else if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) + local_wild = inp; + } + } + return (local_wild); + } + + /* + * Not found. + */ + return (NULL); +} + +void +init_sin6(struct sockaddr_in6 *sin6, struct mbuf *m) +{ + struct ip6_hdr *ip; + + ip = mtod(m, struct ip6_hdr *); + bzero(sin6, sizeof(*sin6)); + sin6->sin6_len = sizeof(*sin6); + sin6->sin6_family = AF_INET6; + sin6->sin6_addr = ip->ip6_src; + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + sin6->sin6_addr.s6_addr16[1] = 0; + sin6->sin6_scope_id = + (m->m_pkthdr.rcvif && IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + ? m->m_pkthdr.rcvif->if_index : 0; + + return; +} diff --git a/bsd/netinet6/in6_pcb.h b/bsd/netinet6/in6_pcb.h new file mode 100644 index 000000000..8620c248b --- /dev/null +++ b/bsd/netinet6/in6_pcb.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_pcb.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET6_IN6_PCB_H_ +#define _NETINET6_IN6_PCB_H_ + +#ifdef KERNEL +#define satosin6(sa) ((struct sockaddr_in6 *)(sa)) +#define sin6tosa(sin6) ((struct sockaddr *)(sin6)) +#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) + +void in6_losing __P((struct inpcb *)); +int in6_pcballoc __P((struct socket *, struct inpcbinfo *, struct proc *)); +int in6_pcbbind __P((struct inpcb *, struct sockaddr *, struct proc *)); +int in6_pcbconnect __P((struct inpcb *, struct sockaddr *, struct proc *)); +void in6_pcbdetach __P((struct inpcb *)); +void in6_pcbdisconnect __P((struct inpcb *)); +int in6_pcbladdr __P((struct inpcb *, struct sockaddr *, + struct in6_addr **)); +struct inpcb * + in6_pcblookup_local __P((struct inpcbinfo *, + struct in6_addr *, u_int, int)); +struct inpcb * + in6_pcblookup_hash __P((struct inpcbinfo *, + struct in6_addr *, u_int, struct in6_addr *, + u_int, int, struct ifnet *)); +void in6_pcbnotify __P((struct inpcbhead *, struct sockaddr *, + u_int, struct in6_addr *, u_int, int, + void (*)(struct inpcb *, int))); +void in6_rtchange __P((struct inpcb *, int)); +int in6_setpeeraddr __P((struct socket *so, struct sockaddr **nam)); +int in6_setsockaddr __P((struct socket *so, struct sockaddr **nam)); +int in6_mapped_sockaddr __P((struct socket *so, struct sockaddr **nam)); +int in6_mapped_peeraddr __P((struct socket *so, struct sockaddr **nam)); +struct in6_addr *in6_selectsrc __P((struct sockaddr_in6 *, + struct ip6_pktopts *, + struct ip6_moptions *, + struct route_in6 *, + struct in6_addr *, int *)); +int in6_selecthlim __P((struct inpcb *, struct ifnet *)); + +void init_sin6 __P((struct sockaddr_in6 *sin6, struct mbuf *m)); +#endif /* KERNEL */ + +#endif /* !_NETINET6_IN6_PCB_H_ */ diff --git a/bsd/netinet6/in6_prefix.c b/bsd/netinet6/in6_prefix.c new file mode 100644 index 000000000..0f746d0ea --- /dev/null +++ b/bsd/netinet6/in6_prefix.c @@ -0,0 +1,1226 @@ +/* $KAME: in6_prefix.c,v 1.27 2000/03/29 23:13:13 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.c 8.2 (Berkeley) 11/15/93 + */ + +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) +#include +#endif + +#include + +#include +#include +#include +#include +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static MALLOC_DEFINE(M_IPFW, "ip6rr", "IPv6 Router Renumbering Prefix"); +static MALLOC_DEFINE(M_IPFW, "rp_addr", "IPv6 Router Renumbering Ifid"); +#endif + +struct rr_prhead rr_prefix; + +#include + +static void add_each_addr __P((struct socket *so, struct rr_prefix *rpp, + struct rp_addr *rap)); +static int create_ra_entry __P((struct rp_addr **rapp)); +static int add_each_prefix __P((struct socket *so, struct rr_prefix *rpp)); +static void free_rp_entries __P((struct rr_prefix *rpp)); +static int link_stray_ia6s __P((struct rr_prefix *rpp)); +static void rp_remove __P((struct rr_prefix *rpp)); + +/* + * Copy bits from src to tgt, from off bit for len bits. + * Caller must specify collect tgtsize and srcsize. + */ +static void +bit_copy(char *tgt, u_int tgtsize, char *src, u_int srcsize, + u_int off, u_int len) +{ + char *sp, *tp; + + /* arg values check */ + if (srcsize < off || srcsize < (off + len) || + tgtsize < off || tgtsize < (off + len)) { + log(LOG_ERR, + "in6_prefix.c: bit_copy: invalid args: srcsize %d,\n" + "tgtsize %d, off %d, len %d\n", srcsize, tgtsize, off, + len); + return; + } + + /* search start point */ + for (sp = src, tp = tgt; off >= 8; sp++, tp++) + off-=8; + /* copy starting bits */ + if (off) { + char setbit; + int startbits; + + startbits = min((8 - off), len); + + for (setbit = (0x80 >> off); startbits; + setbit >>= 1, startbits--, len--) + *tp |= (setbit & *sp); + tp++; + sp++; + } + /* copy midium bits */ + for (; len >= 8; sp++, tp++) { + *tp = *sp; + len-=8; + } + /* copy ending bits */ + if (len) { + char setbit; + + for (setbit = 0x80; len; setbit >>= 1, len--) + *tp |= (setbit & *sp); + } +} + +static struct ifprefix * +in6_prefixwithifp(struct ifnet *ifp, int plen, struct in6_addr *dst) +{ + struct ifprefix *ifpr; + + /* search matched prefix */ + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = ifpr->ifpr_next) { + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + if (plen <= in6_matchlen(dst, IFPR_IN6(ifpr))) + break; + } + return (ifpr); +} + +/* + * Search prefix which matches arg prefix as specified in + * draft-ietf-ipngwg-router-renum-08.txt + */ +static struct rr_prefix * +search_matched_prefix(struct ifnet *ifp, struct in6_prefixreq *ipr) +{ + struct ifprefix *ifpr; + struct ifaddr *ifa; + struct rr_prefix *rpp; + + /* search matched prefix */ + ifpr = in6_prefixwithifp(ifp, ipr->ipr_plen, + &ipr->ipr_prefix.sin6_addr); + if (ifpr != NULL) + return ifpr2rp(ifpr); + + /* + * search matched addr, and then search prefix + * which matches the addr + */ + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (ipr->ipr_plen <= + in6_matchlen(&ipr->ipr_prefix.sin6_addr, IFA_IN6(ifa))) + break; + } + if (ifa == NULL) + return NULL; + + rpp = ifpr2rp(((struct in6_ifaddr *)ifa)->ia6_ifpr); + if (rpp != 0) + return rpp; + + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = ifpr->ifpr_next) { + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + if (ifpr->ifpr_plen <= in6_matchlen(IFA_IN6(ifa), + IFPR_IN6(ifpr))) + break; + } + if (ifpr != NULL) + log(LOG_ERR, "in6_prefix.c: search_matched_prefix: addr %s" + "has no pointer to prefix %s\n", ip6_sprintf(IFA_IN6(ifa)), + ip6_sprintf(IFPR_IN6(ifpr))); + return ifpr2rp(ifpr); +} + +/* + * Search prefix which matches arg prefix as specified in + * draft-ietf-ipngwg-router-renum-08.txt, and mark it if exists. + * Return 1 if anything matched, and 0 if nothing matched. + */ +static int +mark_matched_prefixes(u_long cmd, struct ifnet *ifp, struct in6_rrenumreq *irr) +{ + struct ifprefix *ifpr; + struct ifaddr *ifa; + int matchlen, matched = 0; + + /* search matched prefixes */ + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = ifpr->ifpr_next) { + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + matchlen = in6_matchlen(&irr->irr_matchprefix.sin6_addr, + IFPR_IN6(ifpr)); + if (irr->irr_m_minlen > ifpr->ifpr_plen || + irr->irr_m_maxlen < ifpr->ifpr_plen || + irr->irr_m_len > matchlen) + continue; + matched = 1; + ifpr2rp(ifpr)->rp_statef_addmark = 1; + if (cmd == SIOCCIFPREFIX_IN6) + ifpr2rp(ifpr)->rp_statef_delmark = 1; + } + + /* + * search matched addr, and then search prefixes + * which matche the addr + */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) +#endif + { + struct rr_prefix *rpp; + + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + matchlen = in6_matchlen(&irr->irr_matchprefix.sin6_addr, + IFA_IN6(ifa)); + if (irr->irr_m_minlen > matchlen || + irr->irr_m_maxlen < matchlen || irr->irr_m_len > matchlen) + continue; + rpp = ifpr2rp(((struct in6_ifaddr *)ifa)->ia6_ifpr); + if (rpp != 0) { + matched = 1; + rpp->rp_statef_addmark = 1; + if (cmd == SIOCCIFPREFIX_IN6) + rpp->rp_statef_delmark = 1; + } else + log(LOG_WARNING, "in6_prefix.c: mark_matched_prefixes:" + "no back pointer to ifprefix for %s. " + "ND autoconfigured addr?\n", + ip6_sprintf(IFA_IN6(ifa))); + } + return matched; +} + +/* + * Mark global prefixes as to be deleted. + */ +static void +delmark_global_prefixes(struct ifnet *ifp, struct in6_rrenumreq *irr) +{ + struct ifprefix *ifpr; + + /* search matched prefixes */ + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = ifpr->ifpr_next) { + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + /* mark delete global prefix */ + if (in6_addrscope(RP_IN6(ifpr2rp(ifpr))) == + IPV6_ADDR_SCOPE_GLOBAL) + ifpr2rp(ifpr)->rp_statef_delmark = 1; + } +} + +/* Unmark prefixes */ +static void +unmark_prefixes(struct ifnet *ifp) +{ + struct ifprefix *ifpr; + + /* unmark all prefix */ + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = ifpr->ifpr_next) { + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + /* unmark prefix */ + ifpr2rp(ifpr)->rp_statef_addmark = 0; + ifpr2rp(ifpr)->rp_statef_delmark = 0; + } +} + +static void +init_prefix_ltimes(struct rr_prefix *rpp) +{ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + if (rpp->rp_pltime == RR_INFINITE_LIFETIME || + rpp->rp_rrf_decrprefd == 0) + rpp->rp_preferred = 0; + else + rpp->rp_preferred = time_second + rpp->rp_pltime; + if (rpp->rp_vltime == RR_INFINITE_LIFETIME || + rpp->rp_rrf_decrvalid == 0) + rpp->rp_expire = 0; + else + rpp->rp_expire = time_second + rpp->rp_vltime; +} + +static int +rr_are_ifid_equal(struct in6_addr *ii1, struct in6_addr *ii2, int ii_len) +{ + int ii_bytelen, ii_bitlen; + int p_bytelen, p_bitlen; + + /* sanity check */ + if (1 > ii_len || + ii_len > 124) { /* as RFC2373, prefix is at least 4 bit */ + log(LOG_ERR, "rr_are_ifid_equal: invalid ifid length(%d)\n", + ii_len); + return(0); + } + + ii_bytelen = ii_len / 8; + ii_bitlen = ii_len % 8; + + p_bytelen = sizeof(struct in6_addr) - ii_bytelen - 1; + p_bitlen = 8 - ii_bitlen; + + if (bcmp(ii1->s6_addr + p_bytelen + 1, ii2->s6_addr + p_bytelen + 1, + ii_bytelen)) + return(0); + if (((ii1->s6_addr[p_bytelen] << p_bitlen) & 0xff) != + ((ii2->s6_addr[p_bytelen] << p_bitlen) & 0xff)) + return(0); + + return(1); +} + +static struct rp_addr * +search_ifidwithprefix(struct rr_prefix *rpp, struct in6_addr *ifid) +{ + struct rp_addr *rap; + + for (rap = rpp->rp_addrhead.lh_first; rap != NULL; + rap = rap->ra_entry.le_next) + if (rr_are_ifid_equal(ifid, &rap->ra_ifid, + (sizeof(struct in6_addr) << 3) - + rpp->rp_plen)) + break; + return rap; +} + +static int +assigne_ra_entry(struct rr_prefix *rpp, int iilen, struct in6_ifaddr *ia) +{ + int error = 0; + struct rp_addr *rap; + int s; + + if ((error = create_ra_entry(&rap)) != 0) + return error; + + /* copy interface id part */ + bit_copy((caddr_t)&rap->ra_ifid, sizeof(rap->ra_ifid) << 3, + (caddr_t)IA6_IN6(ia), + sizeof(*IA6_IN6(ia)) << 3, rpp->rp_plen, iilen); + /* link to ia, and put into list */ + rap->ra_addr = ia; + rap->ra_addr->ia_ifa.ifa_refcnt++; +#if 0 /* Can't do this now, because rpp may be on th stack. should fix it? */ + ia->ia6_ifpr = rp2ifpr(rpp); +#endif + s = splnet(); + LIST_INSERT_HEAD(&rpp->rp_addrhead, rap, ra_entry); + splx(s); + + return 0; +} + +static int +in6_prefix_add_llifid(int iilen, struct in6_ifaddr *ia) +{ + struct rr_prefix *rpp; + struct rp_addr *rap; + struct socket so; + int error, s; + + if ((error = create_ra_entry(&rap)) != 0) + return(error); + /* copy interface id part */ + bit_copy((caddr_t)&rap->ra_ifid, sizeof(rap->ra_ifid) << 3, + (caddr_t)IA6_IN6(ia), sizeof(*IA6_IN6(ia)) << 3, + 64, (sizeof(rap->ra_ifid) << 3) - 64); + /* XXX: init dummy so */ + bzero(&so, sizeof(so)); + /* insert into list */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + LIST_FOREACH(rpp, &rr_prefix, rp_entry) +#else + for (rpp = LIST_FIRST(&rr_prefix); rpp; rpp = LIST_NEXT(rpp, rp_entry)) +#endif + { + s = splnet(); + LIST_INSERT_HEAD(&rpp->rp_addrhead, rap, ra_entry); + splx(s); + add_each_addr(&so, rpp, rap); + } + return 0; +} + + +int +in6_prefix_add_ifid(int iilen, struct in6_ifaddr *ia) +{ + int plen = (sizeof(*IA6_IN6(ia)) << 3) - iilen; + struct ifprefix *ifpr; + struct rp_addr *rap; + int error = 0; + + if (IN6_IS_ADDR_LINKLOCAL(IA6_IN6(ia))) + return(in6_prefix_add_llifid(iilen, ia)); + ifpr = in6_prefixwithifp(ia->ia_ifp, plen, IA6_IN6(ia)); + if (ifpr == NULL) { + struct rr_prefix rp; + struct socket so; + int pplen = (plen == 128) ? 64 : plen; + + /* allocate a prefix for ia, with default properties */ + + /* init rp */ + bzero(&rp, sizeof(rp)); + rp.rp_type = IN6_PREFIX_RR; + rp.rp_ifp = ia->ia_ifp; + rp.rp_plen = pplen; + rp.rp_prefix.sin6_len = sizeof(rp.rp_prefix); + rp.rp_prefix.sin6_family = AF_INET6; + bit_copy((char *)RP_IN6(&rp), sizeof(*RP_IN6(&rp)) << 3, + (char *)&ia->ia_addr.sin6_addr, + sizeof(ia->ia_addr.sin6_addr) << 3, + 0, pplen); + rp.rp_vltime = rp.rp_pltime = RR_INFINITE_LIFETIME; + rp.rp_raf_onlink = 1; + rp.rp_raf_auto = 1; + /* Is some FlagMasks for rrf necessary? */ + rp.rp_rrf_decrvalid = rp.rp_rrf_decrprefd = 0; + rp.rp_origin = PR_ORIG_RR; /* can be renumbered */ + + /* create ra_entry */ + error = link_stray_ia6s(&rp); + if (error != 0) { + free_rp_entries(&rp); + return error; + } + + /* XXX: init dummy so */ + bzero(&so, sizeof(so)); +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__NetBSD__) + so.so_state |= SS_PRIV; +#endif + + error = add_each_prefix(&so, &rp); + + /* free each rp_addr entry */ + free_rp_entries(&rp); + + if (error != 0) + return error; + + /* search again */ + ifpr = in6_prefixwithifp(ia->ia_ifp, pplen, IA6_IN6(ia)); + if (ifpr == NULL) + return 0; + } + rap = search_ifidwithprefix(ifpr2rp(ifpr), IA6_IN6(ia)); + if (rap != NULL) { + if (rap->ra_addr == NULL) { + rap->ra_addr = ia; + rap->ra_addr->ia_ifa.ifa_refcnt++; + } else if (rap->ra_addr != ia) { + /* There may be some inconsistencies between addrs. */ + log(LOG_ERR, "ip6_prefix.c: addr %s/%d matched prefix" + "has already another ia %x(%s) on its ifid list\n", + ip6_sprintf(IA6_IN6(ia)), plen, + rap->ra_addr, + ip6_sprintf(IA6_IN6(rap->ra_addr))); + return EADDRINUSE /* XXX */; + } + ia->ia6_ifpr = ifpr; + return 0; + } + error = assigne_ra_entry(ifpr2rp(ifpr), iilen, ia); + if (error == 0) + ia->ia6_ifpr = ifpr; + return (error); +} + +void +in6_prefix_remove_ifid(int iilen, struct in6_ifaddr *ia) +{ + struct rp_addr *rap; + + if (ia->ia6_ifpr == NULL) + return; + rap = search_ifidwithprefix(ifpr2rp(ia->ia6_ifpr), IA6_IN6(ia)); + if (rap != NULL) { + int s = splnet(); + LIST_REMOVE(rap, ra_entry); + splx(s); + if (rap->ra_addr) + IFAFREE(&rap->ra_addr->ia_ifa); + _FREE(rap, M_IPFW); + } + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (LIST_EMPTY(&ifpr2rp(ia->ia6_ifpr)->rp_addrhead)) +#else + if (LIST_FIRST(&ifpr2rp(ia->ia6_ifpr)->rp_addrhead) == NULL) +#endif + rp_remove(ifpr2rp(ia->ia6_ifpr)); +} + +void +in6_purgeprefix(ifp) + struct ifnet *ifp; +{ + struct ifprefix *ifpr, *nextifpr; + + /* delete prefixes before ifnet goes away */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 4 + for (ifpr = TAILQ_FIRST(&ifp->if_prefixhead); ifpr; + ifpr = nextifpr) +#else + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = nextifpr) +#endif + { +#if defined(__FreeBSD__) && __FreeBSD__ >= 4 + nextifpr = TAILQ_NEXT(ifpr, ifpr_list); +#else + nextifpr = ifpr->ifpr_next; +#endif + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + (void)delete_each_prefix(ifpr2rp(ifpr), PR_ORIG_KERNEL); + } +} + +static void +add_each_addr(struct socket *so, struct rr_prefix *rpp, struct rp_addr *rap) +{ + struct in6_ifaddr *ia6; + struct in6_aliasreq ifra; + int error; + + /* init ifra */ + bzero(&ifra, sizeof(ifra)); + strncpy(ifra.ifra_name, if_name(rpp->rp_ifp), sizeof(ifra.ifra_name)); + ifra.ifra_addr.sin6_family = ifra.ifra_prefixmask.sin6_family = + AF_INET6; + ifra.ifra_addr.sin6_len = ifra.ifra_prefixmask.sin6_len = + sizeof(ifra.ifra_addr); + /* copy prefix part */ + bit_copy((char *)&ifra.ifra_addr.sin6_addr, + sizeof(ifra.ifra_addr.sin6_addr) << 3, + (char *)RP_IN6(rpp), sizeof(*RP_IN6(rpp)) << 3, + 0, rpp->rp_plen); + /* copy interface id part */ + bit_copy((char *)&ifra.ifra_addr.sin6_addr, + sizeof(ifra.ifra_addr.sin6_addr) << 3, + (char *)&rap->ra_ifid, sizeof(rap->ra_ifid) << 3, + rpp->rp_plen, (sizeof(rap->ra_ifid) << 3) - rpp->rp_plen); + in6_prefixlen2mask(&ifra.ifra_prefixmask.sin6_addr, rpp->rp_plen); + /* don't care ifra_flags for now */ + + ia6 = in6ifa_ifpwithaddr(rpp->rp_ifp, &ifra.ifra_addr.sin6_addr); + if (ia6 != NULL) { + if (ia6->ia6_ifpr == NULL) { + /* link this addr and the prefix each other */ + IFAFREE(&rap->ra_addr->ia_ifa); + rap->ra_addr = ia6; + rap->ra_addr->ia_ifa.ifa_refcnt++; + ia6->ia6_ifpr = rp2ifpr(rpp); + return; + } + if (ia6->ia6_ifpr == rp2ifpr(rpp)) { + IFAFREE(&rap->ra_addr->ia_ifa); + rap->ra_addr = ia6; + rap->ra_addr->ia_ifa.ifa_refcnt++; + return; + } + /* + * The addr is already assigned to other + * prefix. + * There may be some inconsistencies between + * prefixes. + * e.g. overraped prefixes with common starting + * part and different plefixlen. + * Or, completely duplicated prefixes? + * log it and return. + */ + log(LOG_ERR, "in6_prefix.c: add_each_addr: addition of an addr" + "%s/%d failed because there is already another addr %s/%d\n", + ip6_sprintf(&ifra.ifra_addr.sin6_addr), rpp->rp_plen, + ip6_sprintf(IA6_IN6(ia6)), + in6_mask2len(&ia6->ia_prefixmask.sin6_addr)); + return; + } + /* propagate ANYCAST flag if it is set for ancestor addr */ + if (rap->ra_flags.anycast != 0) + ifra.ifra_flags |= IN6_IFF_ANYCAST; + error = in6_control(so, SIOCAIFADDR_IN6, (caddr_t)&ifra, rpp->rp_ifp +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined(__APPLE__) + , current_proc() +#endif + ); + if (error != 0) + log(LOG_ERR, "in6_prefix.c: add_each_addr: addition of an addr" + "%s/%d failed because in6_control failed for error %d\n", + ip6_sprintf(&ifra.ifra_addr.sin6_addr), rpp->rp_plen, + error); + return; + + /* + * link beween this addr and the prefix will be done + * in in6_prefix_add_ifid + */ +} + +static int +rrpr_update(struct socket *so, struct rr_prefix *new) +{ + struct rr_prefix *rpp; + struct ifprefix *ifpr; + struct rp_addr *rap; + int s; + + /* search existing prefix */ + for (ifpr = new->rp_ifp->if_prefixlist; ifpr; ifpr = ifpr->ifpr_next) { + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + if (ifpr->ifpr_plen == new->rp_plen && + in6_are_prefix_equal(IFPR_IN6(ifpr), RP_IN6(new), + ifpr->ifpr_plen)) + break; + } + rpp = ifpr2rp(ifpr); + if (rpp != NULL) { + /* + * We got a prefix which we have seen in the past. + */ + /* + * If the origin of the already-installed prefix is more + * preferable than the new one, ignore installation request. + */ + if (rpp->rp_origin > new->rp_origin) + return(EPERM); + + /* update prefix information */ + rpp->rp_flags.prf_ra = new->rp_flags.prf_ra; + if (rpp->rp_origin >= PR_ORIG_RR) + rpp->rp_flags.prf_rr = new->rp_flags.prf_rr; + rpp->rp_vltime = new->rp_vltime; + rpp->rp_pltime = new->rp_pltime; + rpp->rp_expire = new->rp_expire; + rpp->rp_preferred = new->rp_preferred; + rpp->rp_statef_delmark = 0; /* cancel deletion */ + /* + * Interface id related update. + * add rp_addr entries in new into rpp, if they have not + * been already included in rpp. + */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined(__APPLE__) + while (!LIST_EMPTY(&new->rp_addrhead)) +#else + while (new->rp_addrhead.lh_first != NULL) +#endif + { + rap = LIST_FIRST(&new->rp_addrhead); + LIST_REMOVE(rap, ra_entry); + if (search_ifidwithprefix(rpp, &rap->ra_ifid) + != NULL) { + if (rap->ra_addr) + IFAFREE(&rap->ra_addr->ia_ifa); + _FREE(rap, M_IPFW); + continue; + } + s = splnet(); + LIST_INSERT_HEAD(&rpp->rp_addrhead, rap, ra_entry); + splx(s); + } + } else { + /* + * We got a fresh prefix. + */ + /* create new prefix */ + rpp = (struct rr_prefix *)_MALLOC(sizeof(*rpp), M_IPFW, + M_NOWAIT); + if (rpp == NULL) { + log(LOG_ERR, "in6_prefix.c: rrpr_update:%d" + ": ENOBUFS for rr_prefix\n", __LINE__); + return(ENOBUFS); + } + /* initilization */ + *rpp = *new; + LIST_INIT(&rpp->rp_addrhead); + /* move rp_addr entries of new to rpp */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined(__APPLE__) + while (!LIST_EMPTY(&new->rp_addrhead)) +#else + while (new->rp_addrhead.lh_first != NULL) +#endif + { + rap = LIST_FIRST(&new->rp_addrhead); + LIST_REMOVE(rap, ra_entry); + LIST_INSERT_HEAD(&rpp->rp_addrhead, rap, ra_entry); + } + + /* let rp_ifpr.ifpr_prefix point rr_prefix. */ + rpp->rp_ifpr.ifpr_prefix = (struct sockaddr *)&rpp->rp_prefix; + /* link rr_prefix entry to if_prefixlist */ + { + struct ifnet *ifp = rpp->rp_ifp; + struct ifprefix *ifpr; + + if ((ifpr = ifp->if_prefixlist) != NULL) { + for ( ; ifpr->ifpr_next; + ifpr = ifpr->ifpr_next) + continue; + ifpr->ifpr_next = rp2ifpr(rpp); + } else + ifp->if_prefixlist = rp2ifpr(rpp); + rp2ifpr(rpp)->ifpr_type = IN6_PREFIX_RR; + } + /* link rr_prefix entry to rr_prefix list */ + s = splnet(); + LIST_INSERT_HEAD(&rr_prefix, rpp, rp_entry); + splx(s); + } + + if (!new->rp_raf_auto) + return 0; + + /* + * Add an address for each interface id, if it is not yet + * If it existed but not pointing to the prefix yet, + * init the prefix pointer. + */ + for (rap = rpp->rp_addrhead.lh_first; rap != NULL; + rap = rap->ra_entry.le_next) { + if (rap->ra_addr != NULL) { + if (rap->ra_addr->ia6_ifpr == NULL) + rap->ra_addr->ia6_ifpr = rp2ifpr(rpp); + continue; + } + add_each_addr(so, rpp, rap); + } + return 0; +} + +static int +add_each_prefix(struct socket *so, struct rr_prefix *rpp) +{ + init_prefix_ltimes(rpp); + return(rrpr_update(so, rpp)); +} + +static void +rp_remove(struct rr_prefix *rpp) +{ + int s; + + s = splnet(); + /* unlink rp_entry from if_prefixlist */ + { + struct ifnet *ifp = rpp->rp_ifp; + struct ifprefix *ifpr; + + if ((ifpr = ifp->if_prefixlist) == rp2ifpr(rpp)) + ifp->if_prefixlist = ifpr->ifpr_next; + else { + while (ifpr->ifpr_next && + (ifpr->ifpr_next != rp2ifpr(rpp))) + ifpr = ifpr->ifpr_next; + if (ifpr->ifpr_next) + ifpr->ifpr_next = rp2ifpr(rpp)->ifpr_next; + else + printf("Couldn't unlink rr_prefix from ifp\n"); + } + } + /* unlink rp_entry from rr_prefix list */ + LIST_REMOVE(rpp, rp_entry); + splx(s); + _FREE(rpp, M_IPFW); +} + +static int +create_ra_entry(struct rp_addr **rapp) +{ + *rapp = (struct rp_addr *)_MALLOC(sizeof(struct rp_addr), M_IPFW, + M_NOWAIT); + if (*rapp == NULL) { + log(LOG_ERR, "in6_prefix.c: init_newprefix:%d: ENOBUFS" + "for rp_addr\n", __LINE__); + return ENOBUFS; + } + bzero(*rapp, sizeof(*(*rapp))); + + return 0; +} + +static int +init_newprefix(struct in6_rrenumreq *irr, struct ifprefix *ifpr, + struct rr_prefix *rpp) +{ + struct rp_addr *orap; + + /* init rp */ + bzero(rpp, sizeof(*rpp)); + rpp->rp_type = IN6_PREFIX_RR; + rpp->rp_ifp = ifpr->ifpr_ifp; + rpp->rp_plen = ifpr->ifpr_plen; + rpp->rp_prefix.sin6_len = sizeof(rpp->rp_prefix); + rpp->rp_prefix.sin6_family = AF_INET6; + bit_copy((char *)RP_IN6(rpp), sizeof(*RP_IN6(rpp)) << 3, + (char *)&irr->irr_useprefix.sin6_addr, + sizeof(irr->irr_useprefix.sin6_addr) << 3, + 0, irr->irr_u_uselen); + /* copy keeplen part if necessary as necessary len */ + if (irr->irr_u_uselen < ifpr->ifpr_plen) + bit_copy((char *)RP_IN6(rpp), sizeof(*RP_IN6(rpp)) << 3, + (char *)IFPR_IN6(ifpr), sizeof(*IFPR_IN6(ifpr)) << 3, + irr->irr_u_uselen, + min(ifpr->ifpr_plen - irr->irr_u_uselen, + irr->irr_u_keeplen)); + for (orap = (ifpr2rp(ifpr)->rp_addrhead).lh_first; orap != NULL; + orap = orap->ra_entry.le_next) { + struct rp_addr *rap; + int error = 0; + + if ((error = create_ra_entry(&rap)) != 0) + return error; + rap->ra_ifid = orap->ra_ifid; + rap->ra_flags.anycast = (orap->ra_addr != NULL && + (orap->ra_addr->ia6_flags & + IN6_IFF_ANYCAST) != 0) ? 1 : 0; + LIST_INSERT_HEAD(&rpp->rp_addrhead, rap, ra_entry); + } + rpp->rp_vltime = irr->irr_vltime; + rpp->rp_pltime = irr->irr_pltime; + rpp->rp_raf_onlink = irr->irr_raf_mask_onlink ? irr->irr_raf_onlink : + ifpr2rp(ifpr)->rp_raf_onlink; + rpp->rp_raf_auto = irr->irr_raf_mask_auto ? irr->irr_raf_auto : + ifpr2rp(ifpr)->rp_raf_auto; + /* Is some FlagMasks for rrf necessary? */ + rpp->rp_rrf = irr->irr_rrf; + rpp->rp_origin = irr->irr_origin; + + return 0; +} + +static void +free_rp_entries(struct rr_prefix *rpp) +{ + /* + * This func is only called with rpp on stack(not on list). + * So no splnet() here + */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined(__APPLE__) + while (!LIST_EMPTY(&rpp->rp_addrhead)) +#else + while (rpp->rp_addrhead.lh_first != NULL) +#endif + { + struct rp_addr *rap; + + rap = LIST_FIRST(&rpp->rp_addrhead); + LIST_REMOVE(rap, ra_entry); + if (rap->ra_addr) + IFAFREE(&rap->ra_addr->ia_ifa); + _FREE(rap, M_IPFW); + } +} + +static int +add_useprefixes(struct socket *so, struct ifnet *ifp, + struct in6_rrenumreq *irr) +{ + struct ifprefix *ifpr, *nextifpr; + struct rr_prefix rp; + int error = 0; + + /* add prefixes to each of marked prefix */ + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = nextifpr) { + nextifpr = ifpr->ifpr_next; + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + if (ifpr2rp(ifpr)->rp_statef_addmark) { + if ((error = init_newprefix(irr, ifpr, &rp)) != 0) + break; + error = add_each_prefix(so, &rp); + } + } + /* free each rp_addr entry */ + free_rp_entries(&rp); + + return error; +} + +static void +unprefer_prefix(struct rr_prefix *rpp) +{ + struct rp_addr *rap; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + long time_second = time.tv_sec; +#endif + + for (rap = rpp->rp_addrhead.lh_first; rap != NULL; + rap = rap->ra_entry.le_next) { + if (rap->ra_addr == NULL) + continue; + rap->ra_addr->ia6_lifetime.ia6t_preferred = time_second; + rap->ra_addr->ia6_lifetime.ia6t_pltime = 0; + } +} + +int +delete_each_prefix(struct rr_prefix *rpp, u_char origin) +{ + int error = 0; + + if (rpp->rp_origin > origin) + return(EPERM); + + while (rpp->rp_addrhead.lh_first != NULL) { + struct rp_addr *rap; + int s; + + s = splnet(); + rap = LIST_FIRST(&rpp->rp_addrhead); + if (rap == NULL) + break; + LIST_REMOVE(rap, ra_entry); + splx(s); + if (rap->ra_addr == NULL) { + _FREE(rap, M_IPFW); + continue; + } + rap->ra_addr->ia6_ifpr = NULL; + + in6_purgeaddr(&rap->ra_addr->ia_ifa, rpp->rp_ifp); + IFAFREE(&rap->ra_addr->ia_ifa); + _FREE(rap, M_IPFW); + } + rp_remove(rpp); + + return error; +} + +static void +delete_prefixes(struct ifnet *ifp, u_char origin) +{ + struct ifprefix *ifpr, *nextifpr; + + /* delete prefixes marked as tobe deleted */ + for (ifpr = ifp->if_prefixlist; ifpr; ifpr = nextifpr) { + nextifpr = ifpr->ifpr_next; + if (ifpr->ifpr_prefix->sa_family != AF_INET6 || + ifpr->ifpr_type != IN6_PREFIX_RR) + continue; + if (ifpr2rp(ifpr)->rp_statef_delmark) + (void)delete_each_prefix(ifpr2rp(ifpr), origin); + } +} + +static int +link_stray_ia6s(struct rr_prefix *rpp) +{ + struct ifaddr *ifa; + +#if (defined(__FreeBSD__) && __FreeBSD__ < 3) || defined(__bsdi__) + for (ifa = rpp->rp_ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = rpp->rp_ifp->if_addrlist.tqh_first; ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + struct rp_addr *rap; + struct rr_prefix *orpp; + int error = 0; + + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (rpp->rp_plen > in6_matchlen(RP_IN6(rpp), IFA_IN6(ifa))) + continue; + + orpp = ifpr2rp(((struct in6_ifaddr *)ifa)->ia6_ifpr); + if (orpp != NULL) { + if (!in6_are_prefix_equal(RP_IN6(orpp), RP_IN6(rpp), + rpp->rp_plen)) + log(LOG_ERR, "in6_prefix.c: link_stray_ia6s:" + "addr %s/%d already linked to a prefix" + "and it matches also %s/%d\n", + ip6_sprintf(IFA_IN6(ifa)), orpp->rp_plen, + ip6_sprintf(RP_IN6(rpp)), + rpp->rp_plen); + continue; + } + if ((error = assigne_ra_entry(rpp, + (sizeof(rap->ra_ifid) << 3) - + rpp->rp_plen, + (struct in6_ifaddr *)ifa)) != 0) + return error; + } + return 0; +} + +/* XXX assumes that permission is already checked by the caller */ +int +in6_prefix_ioctl(struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp) +{ + struct rr_prefix *rpp, rp_tmp; + struct rp_addr *rap; + struct in6_prefixreq *ipr = (struct in6_prefixreq *)data; + struct in6_rrenumreq *irr = (struct in6_rrenumreq *)data; + struct ifaddr *ifa; + int error = 0; + + /* + * Failsafe for errneous address config program. + * Let's hope rrenumd don't make a mistakes. + */ + if (ipr->ipr_origin <= PR_ORIG_RA) + ipr->ipr_origin = PR_ORIG_STATIC; + + switch (cmd) { + case SIOCSGIFPREFIX_IN6: + delmark_global_prefixes(ifp, irr); + /* FALL THROUGH */ + case SIOCAIFPREFIX_IN6: + case SIOCCIFPREFIX_IN6: + /* check if preferred lifetime > valid lifetime */ + if (irr->irr_pltime > irr->irr_vltime) { + log(LOG_NOTICE, + "in6_prefix_ioctl: preferred lifetime" + "(%ld) is greater than valid lifetime(%ld)\n", + (u_long)irr->irr_pltime, (u_long)irr->irr_vltime); + error = EINVAL; + break; + } + if (mark_matched_prefixes(cmd, ifp, irr)) { + if (irr->irr_u_uselen != 0) + if ((error = add_useprefixes(so, ifp, irr)) + != 0) + goto failed; + if (cmd != SIOCAIFPREFIX_IN6) + delete_prefixes(ifp, irr->irr_origin); + } else + return (EADDRNOTAVAIL); + failed: + unmark_prefixes(ifp); + break; + case SIOCGIFPREFIX_IN6: + rpp = search_matched_prefix(ifp, ipr); + if (rpp == NULL || ifp != rpp->rp_ifp) + return (EADDRNOTAVAIL); + + ipr->ipr_origin = rpp->rp_origin; + ipr->ipr_plen = rpp->rp_plen; + ipr->ipr_vltime = rpp->rp_vltime; + ipr->ipr_pltime = rpp->rp_pltime; + ipr->ipr_flags = rpp->rp_flags; + ipr->ipr_prefix = rpp->rp_prefix; + + break; + case SIOCSIFPREFIX_IN6: + /* check if preferred lifetime > valid lifetime */ + if (ipr->ipr_pltime > ipr->ipr_vltime) { + log(LOG_NOTICE, + "in6_prefix_ioctl: preferred lifetime" + "(%ld) is greater than valid lifetime(%ld)\n", + (u_long)ipr->ipr_pltime, (u_long)ipr->ipr_vltime); + error = EINVAL; + break; + } + + /* init rp_tmp */ + bzero((caddr_t)&rp_tmp, sizeof(rp_tmp)); + rp_tmp.rp_ifp = ifp; + rp_tmp.rp_plen = ipr->ipr_plen; + rp_tmp.rp_prefix = ipr->ipr_prefix; + rp_tmp.rp_vltime = ipr->ipr_vltime; + rp_tmp.rp_pltime = ipr->ipr_pltime; + rp_tmp.rp_flags = ipr->ipr_flags; + rp_tmp.rp_origin = ipr->ipr_origin; + + /* create rp_addr entries, usually at least for lladdr */ + if ((error = link_stray_ia6s(&rp_tmp)) != 0) { + free_rp_entries(&rp_tmp); + break; + } +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; + ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr == NULL) + continue; /* just for safety */ + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (IN6_IS_ADDR_LINKLOCAL(IFA_IN6(ifa)) == 0) + continue; + + if ((error = create_ra_entry(&rap)) != 0) { + free_rp_entries(&rp_tmp); + goto bad; + } + /* copy interface id part */ + bit_copy((caddr_t)&rap->ra_ifid, + sizeof(rap->ra_ifid) << 3, + (caddr_t)IFA_IN6(ifa), + sizeof(*IFA_IN6(ifa)) << 3, + rp_tmp.rp_plen, + (sizeof(rap->ra_ifid) << 3) - rp_tmp.rp_plen); + /* insert into list */ + LIST_INSERT_HEAD(&rp_tmp.rp_addrhead, rap, ra_entry); + } + + error = add_each_prefix(so, &rp_tmp); + + /* free each rp_addr entry */ + free_rp_entries(&rp_tmp); + + break; + case SIOCDIFPREFIX_IN6: + rpp = search_matched_prefix(ifp, ipr); + if (rpp == NULL || ifp != rpp->rp_ifp) + return (EADDRNOTAVAIL); + + error = delete_each_prefix(rpp, ipr->ipr_origin); + break; + } + bad: + return error; +} + +void +in6_rr_timer(void *ignored_arg) +{ + int s; + struct rr_prefix *rpp; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + long time_second = time.tv_sec; +#endif +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + timeout(in6_rr_timer, (caddr_t)0, ip6_rr_prune * hz); + + s = splnet(); + /* expire */ + rpp = LIST_FIRST(&rr_prefix); + while (rpp) { + if (rpp->rp_expire && rpp->rp_expire < time_second) { + struct rr_prefix *next_rpp; + + next_rpp = LIST_NEXT(rpp, rp_entry); + delete_each_prefix(rpp, PR_ORIG_KERNEL); + rpp = next_rpp; + continue; + } + if (rpp->rp_preferred && rpp->rp_preferred < time_second) + unprefer_prefix(rpp); + rpp = LIST_NEXT(rpp, rp_entry); + } + splx(s); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} diff --git a/bsd/netinet6/in6_prefix.h b/bsd/netinet6/in6_prefix.h new file mode 100644 index 000000000..906c01989 --- /dev/null +++ b/bsd/netinet6/in6_prefix.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 1995, 1996, 1997, 1998 and 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +struct rr_prefix { + struct ifprefix rp_ifpr; + LIST_ENTRY(rr_prefix) rp_entry; + LIST_HEAD(rp_addrhead, rp_addr) rp_addrhead; + struct sockaddr_in6 rp_prefix; /* prefix */ + u_int32_t rp_vltime; /* advertised valid lifetime */ + u_int32_t rp_pltime; /* advertised preferred lifetime */ + time_t rp_expire; /* expiration time of the prefix */ + time_t rp_preferred; /* preferred time of the prefix */ + struct in6_prflags rp_flags; + u_char rp_origin; /* from where this prefix info is obtained */ + struct rp_stateflags { + /* if some prefix should be added to this prefix */ + u_char addmark : 1; + u_char delmark : 1; /* if this prefix will be deleted */ + } rp_stateflags; +}; + +#define rp_type rp_ifpr.ifpr_type +#define rp_ifp rp_ifpr.ifpr_ifp +#define rp_plen rp_ifpr.ifpr_plen + +#define rp_raf rp_flags.prf_ra +#define rp_raf_onlink rp_flags.prf_ra.onlink +#define rp_raf_auto rp_flags.prf_ra.autonomous + +#define rp_statef_addmark rp_stateflags.addmark +#define rp_statef_delmark rp_stateflags.delmark + +#define rp_rrf rp_flags.prf_rr +#define rp_rrf_decrvalid rp_flags.prf_rr.decrvalid +#define rp_rrf_decrprefd rp_flags.prf_rr.decrprefd + +struct rp_addr { + LIST_ENTRY(rp_addr) ra_entry; + struct in6_addr ra_ifid; + struct in6_ifaddr *ra_addr; + struct ra_flags { + u_char anycast : 1; + } ra_flags; +}; + +#define ifpr2rp(ifpr) ((struct rr_prefix *)(ifpr)) +#define rp2ifpr(rp) ((struct ifprefix *)(rp)) + +#define RP_IN6(rp) (&(rp)->rp_prefix.sin6_addr) + +#define RR_INFINITE_LIFETIME 0xffffffff + + +LIST_HEAD(rr_prhead, rr_prefix); + +extern struct rr_prhead rr_prefix; + +void in6_rr_timer __P((void *)); +int delete_each_prefix __P((struct rr_prefix *rpp, u_char origin)); diff --git a/bsd/netinet6/in6_proto.c b/bsd/netinet6/in6_proto.c new file mode 100644 index 000000000..488791240 --- /dev/null +++ b/bsd/netinet6/in6_proto.c @@ -0,0 +1,626 @@ +/* $KAME: in6_proto.c,v 1.47 2000/03/29 07:37:22 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_proto.c 8.1 (Berkeley) 6/10/93 + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#if __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif +#endif + +#include +#include +#if defined(__FreeBSD__) || defined (__APPLE__) +#include +#endif +#include +#include +#include +#include +#if defined (__FreeBSD__) || defined (__APPLE__) +#include +#include +#endif + +#include +#include +#include + +#include +#include +#include +#include +#if defined (__APPLE__) || (defined(__FreeBSD__) && __FreeBSD__ >= 3) || (defined(__NetBSD__) && !defined(TCP6)) || defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) +#include +#include +#endif +#if (defined(__NetBSD__) && !defined(TCP6)) || defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) +#include +#endif +#include +#include +#include + +#if defined (__APPLE__) +#include +#include +#include +#include +#include +# if (defined(__FreeBSD__) && __FreeBSD__ >= 4) +#include +# endif +#else +#if defined(__NetBSD__) && !defined(TCP6) +#include +#include +#include +#include +#include +#include +#include +#else +#include +#include +#include +#include +#include +#endif +#endif + +#include +#include + +#include + +#include +#if defined (__APPLE__) +#include +#endif + +#if IPSEC +#include +#include +#if IPSEC_ESP +#include +#endif +#include +#endif /*IPSEC*/ + +#include +#include + +#include "gif.h" +#if NGIF > 0 +#include +#endif + +#if MIP6 +#include +#endif + +#include + +#define offsetof(type, member) ((size_t)(&((type *)0)->member)) + +/* + * TCP/IP protocol family: IP6, ICMP6, UDP, TCP. + */ + +extern struct domain inet6domain; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +static struct pr_usrreqs nousrreqs; +#endif + +struct ip6protosw inet6sw[] = { +{ 0, &inet6domain, IPPROTO_IPV6, 0, + 0, 0, 0, 0, + 0, + ip6_init, 0, frag6_slowtimo, frag6_drain, + 0, &nousrreqs +}, +{ SOCK_DGRAM, &inet6domain, IPPROTO_UDP, PR_ATOMIC | PR_ADDR, + udp6_input, 0, udp6_ctlinput, ip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &udp6_usrreqs +}, +{ SOCK_STREAM, &inet6domain, IPPROTO_TCP, PR_CONNREQUIRED | PR_WANTRCVD, + tcp6_input, 0, tcp6_ctlinput, tcp_ctloutput, + 0, + tcp_init, 0, 0, tcp_drain, + 0, &tcp6_usrreqs +}, +{ SOCK_RAW, &inet6domain, IPPROTO_RAW, PR_ATOMIC | PR_ADDR, + rip6_input, rip6_output, rip6_ctlinput, rip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip6_usrreqs +}, +{ SOCK_RAW, &inet6domain, IPPROTO_ICMPV6, PR_ATOMIC | PR_ADDR, + icmp6_input, rip6_output, 0, rip6_ctloutput, + 0, + icmp6_init, icmp6_fasttimo, 0, 0, + 0, &rip6_usrreqs +}, +{ SOCK_RAW, &inet6domain, IPPROTO_DSTOPTS,PR_ATOMIC|PR_ADDR, + dest6_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +{ SOCK_RAW, &inet6domain, IPPROTO_ROUTING,PR_ATOMIC|PR_ADDR, + route6_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +{ SOCK_RAW, &inet6domain, IPPROTO_FRAGMENT,PR_ATOMIC|PR_ADDR, + frag6_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#if IPSEC +{ SOCK_RAW, &inet6domain, IPPROTO_AH, PR_ATOMIC|PR_ADDR, + ah6_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#if IPSEC_ESP +{ SOCK_RAW, &inet6domain, IPPROTO_ESP, PR_ATOMIC|PR_ADDR, + esp6_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#endif +{ SOCK_RAW, &inet6domain, IPPROTO_IPCOMP, PR_ATOMIC|PR_ADDR, + ipcomp6_input, 0, 0, 0, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#endif /* IPSEC */ +{ SOCK_RAW, &inet6domain, IPPROTO_IPV4, PR_ATOMIC|PR_ADDR, + encap6_input, rip6_output, 0, rip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &nousrreqs +}, +#if INET6 +{ SOCK_RAW, &inet6domain, IPPROTO_IPV6, PR_ATOMIC|PR_ADDR, + encap6_input, rip6_output, 0, rip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip6_usrreqs +}, +#endif /*INET6*/ +{ SOCK_RAW, &inet6domain, IPPROTO_PIM, PR_ATOMIC|PR_ADDR, + pim6_input, rip6_output, 0, rip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip6_usrreqs +}, +/* raw wildcard */ +{ SOCK_RAW, &inet6domain, 0, PR_ATOMIC | PR_ADDR, + rip6_input, rip6_output, 0, rip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip6_usrreqs +}, +}; + +#if NGIF > 0 +struct ip6protosw in6_gif_protosw = +{ SOCK_RAW, &inet6domain, 0/*IPPROTO_IPV[46]*/, PR_ATOMIC|PR_ADDR, + in6_gif_input, rip6_output, 0, rip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip6_usrreqs +}; +#endif /*NGIF*/ + +#if MIP6 +struct ip6protosw mip6_tunnel_protosw = +{ SOCK_RAW, &inet6domain, 0/*IPPROTO_IPV[46]*/, PR_ATOMIC|PR_ADDR, + mip6_tunnel_input, rip6_output, 0, rip6_ctloutput, + 0, + 0, 0, 0, 0, + 0, &rip6_usrreqs +}; +#endif /* MIP6 */ + +extern int in6_inithead __P((void **, int)); +int in6_proto_count = (sizeof (inet6sw) / sizeof (struct ip6protosw)); +extern void in6_dinit(void); + +struct domain inet6domain = + { AF_INET6, "internet6", in6_dinit, 0, 0, + inet6sw, 0, + in6_inithead, offsetof(struct sockaddr_in6, sin6_addr) << 3, sizeof(struct sockaddr_in6) , + sizeof(struct sockaddr_in6), 0 + }; + +DOMAIN_SET(inet6); + +/* + * Internet configuration info + */ +#ifndef IPV6FORWARDING +#if GATEWAY6 +#define IPV6FORWARDING 1 /* forward IP6 packets not for us */ +#else +#define IPV6FORWARDING 0 /* don't forward IP6 packets not for us */ +#endif /* GATEWAY6 */ +#endif /* !IPV6FORWARDING */ + +#ifndef IPV6_SENDREDIRECTS +#define IPV6_SENDREDIRECTS 1 +#endif + +int ip6_forwarding = IPV6FORWARDING; /* act as router? */ +int ip6_sendredirects = IPV6_SENDREDIRECTS; +int ip6_defhlim = IPV6_DEFHLIM; +int ip6_defmcasthlim = IPV6_DEFAULT_MULTICAST_HOPS; +int ip6_accept_rtadv = 0; /* "IPV6FORWARDING ? 0 : 1" is dangerous */ +int ip6_maxfragpackets = 200; +int ip6_log_interval = 5; +int ip6_hdrnestlimit = 50; /* appropriate? */ +int ip6_dad_count = 1; /* DupAddrDetectionTransmits */ +u_int32_t ip6_flow_seq; +int ip6_auto_flowlabel = 1; +#if NGIF > 0 +int ip6_gif_hlim = GIF_HLIM; +#else +int ip6_gif_hlim = 0; +#endif +int ip6_use_deprecated = 1; /* allow deprecated addr (RFC2462 5.5.4) */ +int ip6_rr_prune = 5; /* router renumbering prefix + * walk list every 5 sec. */ +#if MAPPED_ADDR_ENABLED +int ip6_mapped_addr_on = 1; +#endif /* MAPPED_ADDR_ENABLED */ + +u_int32_t ip6_id = 0UL; +int ip6_keepfaith = 0; +time_t ip6_log_time = (time_t)0L; + +/* icmp6 */ +#ifndef __bsdi__ +/* + * BSDI4 defines these variables in in_proto.c... + * XXX: what if we don't define INET? Should we define pmtu6_expire + * or so? (jinmei@kame.net 19990310) + */ +int pmtu_expire = 60*10; +int pmtu_probe = 60*2; +#endif + +/* raw IP6 parameters */ +/* + * Nominal space allocated to a raw ip socket. + */ +#define RIPV6SNDQ 8192 +#define RIPV6RCVQ 8192 + +u_long rip6_sendspace = RIPV6SNDQ; +u_long rip6_recvspace = RIPV6RCVQ; + +/* ICMPV6 parameters */ +int icmp6_rediraccept = 1; /* accept and process redirects */ +int icmp6_redirtimeout = 10 * 60; /* 10 minutes */ +u_int icmp6errratelim = 1000; /* 1000usec = 1msec */ +int icmp6_nodeinfo = 1; /* enable/disable NI response */ + +#if TCP6 +/* TCP on IP6 parameters */ +int tcp6_sendspace = 1024 * 8; +int tcp6_recvspace = 1024 * 8; +int tcp6_mssdflt = TCP6_MSS; +int tcp6_rttdflt = TCP6TV_SRTTDFLT / PR_SLOWHZ; +int tcp6_do_rfc1323 = 1; +int tcp6_conntimeo = TCP6TV_KEEP_INIT; /* initial connection timeout */ +int tcp6_43maxseg = 0; +int tcp6_pmtu = 0; + +/* + * Parameters for keepalive option. + * Connections for which SO_KEEPALIVE is set will be probed + * after being idle for a time of tcp6_keepidle (in units of PR_SLOWHZ). + * Starting at that time, the connection is probed at intervals + * of tcp6_keepintvl (same units) until a response is received + * or until tcp6_keepcnt probes have been made, at which time + * the connection is dropped. Note that a tcp6_keepidle value + * under 2 hours is nonconformant with RFC-1122, Internet Host Requirements. + */ +int tcp6_keepidle = TCP6TV_KEEP_IDLE; /* time before probing idle */ +int tcp6_keepintvl = TCP6TV_KEEPINTVL; /* interval betwn idle probes */ +int tcp6_keepcnt = TCP6TV_KEEPCNT; /* max idle probes */ +int tcp6_maxpersistidle = TCP6TV_KEEP_IDLE; /* max idle time in persist */ + +#ifndef INET_SERVER +#define TCP6_LISTEN_HASH_SIZE 17 +#define TCP6_CONN_HASH_SIZE 97 +#define TCP6_SYN_HASH_SIZE 293 +#define TCP6_SYN_BUCKET_SIZE 35 +#else +#define TCP6_LISTEN_HASH_SIZE 97 +#define TCP6_CONN_HASH_SIZE 9973 +#define TCP6_SYN_HASH_SIZE 997 +#define TCP6_SYN_BUCKET_SIZE 35 +#endif +int tcp6_listen_hash_size = TCP6_LISTEN_HASH_SIZE; +int tcp6_conn_hash_size = TCP6_CONN_HASH_SIZE; +struct tcp6_hash_list tcp6_listen_hash[TCP6_LISTEN_HASH_SIZE], + tcp6_conn_hash[TCP6_CONN_HASH_SIZE]; + +int tcp6_syn_cache_size = TCP6_SYN_HASH_SIZE; +int tcp6_syn_cache_limit = TCP6_SYN_HASH_SIZE*TCP6_SYN_BUCKET_SIZE; +int tcp6_syn_bucket_limit = 3*TCP6_SYN_BUCKET_SIZE; +struct syn_cache_head6 tcp6_syn_cache[TCP6_SYN_HASH_SIZE]; +struct syn_cache_head6 *tcp6_syn_cache_first; +int tcp6_syn_cache_interval = 8; /* runs timer every 4 seconds */ +int tcp6_syn_cache_timeo = TCP6TV_KEEP_INIT; + +/* + * Parameters for computing a desirable data segment size + * given an upper bound (either interface MTU, or peer's MSS option)_. + * As applications tend to use a buffer size that is a multiple + * of kilobytes, try for something that divides evenly. However, + * do not round down too much. + * + * Round segment size down to a multiple of TCP6_ROUNDSIZE if this + * does not result in lowering by more than (size/TCP6_ROUNDFRAC). + * For example, round 536 to 512. Older versions of the system + * effectively used MCLBYTES (1K or 2K) as TCP6_ROUNDSIZE, with + * a value of 1 for TCP6_ROUNDFRAC (eliminating its effect). + * We round to a multiple of 256 for SLIP. + */ +#ifndef TCP6_ROUNDSIZE +#define TCP6_ROUNDSIZE 256 /* round to multiple of 256 */ +#endif +#ifndef TCP6_ROUNDFRAC +#define TCP6_ROUNDFRAC 10 /* round down at most N/10, or 10% */ +#endif + +int tcp6_roundsize = TCP6_ROUNDSIZE; +int tcp6_roundfrac = TCP6_ROUNDFRAC; +#endif /*TCP6*/ + +/* UDP on IP6 parameters */ +int udp6_sendspace = 9216; /* really max datagram size */ +int udp6_recvspace = 40 * (1024 + sizeof(struct sockaddr_in6)); + /* 40 1K datagrams */ + +#if defined(__FreeBSD__) || defined(__APPLE__) +/* + * sysctl related items. + */ +SYSCTL_NODE(_net, PF_INET6, inet6, CTLFLAG_RW, 0, + "Internet6 Family"); + +/* net.inet6 */ +SYSCTL_NODE(_net_inet6, IPPROTO_IPV6, ip6, CTLFLAG_RW, 0, "IP6"); +SYSCTL_NODE(_net_inet6, IPPROTO_ICMPV6, icmp6, CTLFLAG_RW, 0, "ICMP6"); +SYSCTL_NODE(_net_inet6, IPPROTO_UDP, udp6, CTLFLAG_RW, 0, "UDP6"); +SYSCTL_NODE(_net_inet6, IPPROTO_TCP, tcp6, CTLFLAG_RW, 0, "TCP6"); +#if IPSEC +SYSCTL_NODE(_net_inet6, IPPROTO_ESP, ipsec6, CTLFLAG_RW, 0, "IPSEC6"); +#endif /* IPSEC */ + +/* net.inet6.ip6 */ +static int +sysctl_ip6_forwarding SYSCTL_HANDLER_ARGS +{ + int error = 0; + int old_ip6_forwarding; + int changed; + + error = SYSCTL_OUT(req, arg1, sizeof(int)); + if (error || !req->newptr) + return (error); + old_ip6_forwarding = ip6_forwarding; + error = SYSCTL_IN(req, arg1, sizeof(int)); + if (error != 0) + return (error); + changed = (ip6_forwarding ? 1 : 0) ^ (old_ip6_forwarding ? 1 : 0); + if (changed == 0) + return (error); + /* + * XXX while host->router removes prefix got from RA, + * router->host case nukes all the prefixes managed by in6_prefix.c + * (both RR and static). therefore, switching from host->router->host + * will remove statically configured addresses/prefixes. + * not sure if it is intended behavior or not. + */ + if (ip6_forwarding != 0) { /* host becomes router */ + int s = splnet(); + struct nd_prefix *pr, *next; + + for (pr = nd_prefix.lh_first; pr; pr = next) { + next = pr->ndpr_next; + if (!IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + in6_ifdel(pr->ndpr_ifp, &pr->ndpr_addr); + prelist_remove(pr); + } + splx(s); + } else { /* router becomes host */ + while(!LIST_EMPTY(&rr_prefix)) + delete_each_prefix(LIST_FIRST(&rr_prefix), + PR_ORIG_KERNEL); + } + + return (error); +} + +SYSCTL_OID(_net_inet6_ip6, IPV6CTL_FORWARDING, forwarding, + CTLTYPE_INT|CTLFLAG_RW, &ip6_forwarding, 0, sysctl_ip6_forwarding, + "I", ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_SENDREDIRECTS, + redirect, CTLFLAG_RW, &ip6_sendredirects, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFHLIM, + hlim, CTLFLAG_RW, &ip6_defhlim, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXFRAGPACKETS, + maxfragpackets, CTLFLAG_RW, &ip6_maxfragpackets, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_ACCEPT_RTADV, + accept_rtadv, CTLFLAG_RW, &ip6_accept_rtadv, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_KEEPFAITH, + keepfaith, CTLFLAG_RW, &ip6_keepfaith, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_LOG_INTERVAL, + log_interval, CTLFLAG_RW, &ip6_log_interval, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_HDRNESTLIMIT, + hdrnestlimit, CTLFLAG_RW, &ip6_hdrnestlimit, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DAD_COUNT, + dad_count, CTLFLAG_RW, &ip6_dad_count, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_AUTO_FLOWLABEL, + auto_flowlabel, CTLFLAG_RW, &ip6_auto_flowlabel, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFMCASTHLIM, + defmcasthlim, CTLFLAG_RW, &ip6_defmcasthlim, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_GIF_HLIM, + gifhlim, CTLFLAG_RW, &ip6_gif_hlim, 0, ""); +SYSCTL_STRING(_net_inet6_ip6, IPV6CTL_KAME_VERSION, + kame_version, CTLFLAG_RD, __KAME_VERSION, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_USE_DEPRECATED, + use_deprecated, CTLFLAG_RW, &ip6_use_deprecated, 0, ""); +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_RR_PRUNE, + rr_prune, CTLFLAG_RW, &ip6_rr_prune, 0, ""); +#if MAPPED_ADDR_ENABLED +SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAPPED_ADDR, + mapped_addr, CTLFLAG_RW, &ip6_mapped_addr_on, 0, ""); +#endif /* MAPPED_ADDR_ENABLED */ + +/* net.inet6.icmp6 */ +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_REDIRACCEPT, + rediraccept, CTLFLAG_RW, &icmp6_rediraccept, 0, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_REDIRTIMEOUT, + redirtimeout, CTLFLAG_RW, &icmp6_redirtimeout, 0, ""); +SYSCTL_STRUCT(_net_inet6_icmp6, ICMPV6CTL_STATS, stats, CTLFLAG_RD, + &icmp6stat, icmp6stat, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ERRRATELIMIT, + errratelimit, CTLFLAG_RW, &icmp6errratelim, 0, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_PRUNE, + nd6_prune, CTLFLAG_RW, &nd6_prune, 0, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_DELAY, + nd6_delay, CTLFLAG_RW, &nd6_delay, 0, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_UMAXTRIES, + nd6_umaxtries, CTLFLAG_RW, &nd6_umaxtries, 0, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_MMAXTRIES, + nd6_mmaxtries, CTLFLAG_RW, &nd6_mmaxtries, 0, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_USELOOPBACK, + nd6_useloopback, CTLFLAG_RW, &nd6_useloopback, 0, ""); +//SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_PROXYALL, +// nd6_proxyall, CTLFLAG_RW, &nd6_proxyall, 0, ""); +SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_NODEINFO, + nodeinfo, CTLFLAG_RW, &icmp6_nodeinfo, 0, ""); + + +#if defined(__FreeBSD__) && __FreeBSD__ < 3 +/* net.inet6.udp6 */ +SYSCTL_INT(_net_inet6_udp6, UDP6CTL_SENDMAX, + sendmax, CTLFLAG_RW, &udp6_sendspace, 0, ""); +SYSCTL_INT(_net_inet6_udp6, UDP6CTL_RECVSPACE, + recvspace, CTLFLAG_RW, &udp6_recvspace, 0, ""); + +/* net.inet6.tcp6 */ +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_MSSDFLT, + mssdflt, CTLFLAG_RW, &tcp6_mssdflt, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_DO_RFC1323, + do_rfc1323, CTLFLAG_RW, &tcp6_do_rfc1323, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_KEEPIDLE, + keepidle, CTLFLAG_RW, &tcp6_keepidle, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_KEEPINTVL, + keepintvl, CTLFLAG_RW, &tcp6_keepintvl, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_KEEPCNT, + keepcnt, CTLFLAG_RW, &tcp6_keepcnt, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_MAXPERSISTIDLE, + maxpersistidle, CTLFLAG_RW, &tcp6_maxpersistidle, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_SENDSPACE, + sendspace, CTLFLAG_RW, &tcp6_sendspace, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_RECVSPACE, + recvspace, CTLFLAG_RW, &tcp6_recvspace, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_CONNTIMEO, + conntimeo, CTLFLAG_RW, &tcp6_conntimeo, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_PMTU, + pmtu, CTLFLAG_RW, &tcp6_pmtu, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_PMTU_EXPIRE, + pmtu_expire, CTLFLAG_RW, &pmtu_expire, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_PMTU_PROBE, + pmtu_probe, CTLFLAG_RW, &pmtu_probe, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_43MAXSEG, + pmtu_43maxseg, CTLFLAG_RW, &tcp6_43maxseg, 0, ""); +SYSCTL_STRUCT(_net_inet6_tcp6, TCP6CTL_STATS, stats, CTLFLAG_RD, + &tcp6stat, tcp6stat, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_SYN_CACHE_LIMIT, + syn_cache_limit, CTLFLAG_RW, &tcp6_syn_cache_limit, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_SYN_BUCKET_LIMIT, + syn_bucket_limit, CTLFLAG_RW, &tcp6_syn_bucket_limit, 0, ""); +SYSCTL_INT(_net_inet6_tcp6, TCP6CTL_SYN_CACHE_INTER, + syn_cache_interval, CTLFLAG_RW, &tcp6_syn_cache_interval, 0, ""); +#endif /* !(defined(__FreeBSD__) && __FreeBSD__ >= 3) */ + +#endif /* __FreeBSD__ */ diff --git a/bsd/netinet6/in6_rmx.c b/bsd/netinet6/in6_rmx.c new file mode 100644 index 000000000..f94f673dd --- /dev/null +++ b/bsd/netinet6/in6_rmx.c @@ -0,0 +1,518 @@ +/* $KAME: in6_rmx.c,v 1.6 2000/03/25 07:23:45 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright 1994, 1995 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +/* + * This code does two things necessary for the enhanced TCP metrics to + * function in a useful manner: + * 1) It marks all non-host routes as `cloning', thus ensuring that + * every actual reference to such a route actually gets turned + * into a reference to a host route to the specific destination + * requested. + * 2) When such routes lose all their references, it arranges for them + * to be deleted in some random collection of circumstances, so that + * a large quantity of stale routing data is not kept in kernel memory + * indefinitely. See in6_rtqtimo() below for the exact mechanism. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#if defined(__APPLE__) +#include +#endif +#include + +#include +#include + +#include + +#if !defined(__APPLE__) +#include +#include +#include +#include +#else +#include +#include +#include +#include +#endif + +#if !defined(__APPLE__) +#define tcp_sendspace tcp6_sendspace +#define tcp_recvspace tcp6_recvspace +#define time_second time.tv_sec +#define tvtohz hzto +#endif + +extern int in6_inithead __P((void **head, int off)); + +#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ + +/* + * Do what we need to do when inserting a route. + */ +static struct radix_node * +in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, + struct radix_node *treenodes) +{ + struct rtentry *rt = (struct rtentry *)treenodes; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)rt_key(rt); + struct radix_node *ret; + + /* + * For IPv6, all unicast non-host routes are automatically cloning. + */ + if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) + rt->rt_flags |= RTF_MULTICAST; + + if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) { + rt->rt_flags |= RTF_PRCLONING; + } + + /* + * A little bit of help for both IPv6 output and input: + * For local addresses, we make sure that RTF_LOCAL is set, + * with the thought that this might one day be used to speed up + * ip_input(). + * + * We also mark routes to multicast addresses as such, because + * it's easy to do and might be useful (but this is much more + * dubious since it's so easy to inspect the address). (This + * is done above.) + * + * XXX + * should elaborate the code. + */ + if (rt->rt_flags & RTF_HOST) { + if (IN6_ARE_ADDR_EQUAL(&satosin6(rt->rt_ifa->ifa_addr) + ->sin6_addr, + &sin6->sin6_addr)) { + rt->rt_flags |= RTF_LOCAL; + } + } + + /* + * We also specify a send and receive pipe size for every + * route added, to help TCP a bit. TCP doesn't actually + * want a true pipe size, which would be prohibitive in memory + * costs and is hard to compute anyway; it simply uses these + * values to size its buffers. So, we fill them in with the + * same values that TCP would have used anyway, and allow the + * installing program or the link layer to override these values + * as it sees fit. This will hopefully allow TCP more + * opportunities to save its ssthresh value. + */ + if (!rt->rt_rmx.rmx_sendpipe && !(rt->rt_rmx.rmx_locks & RTV_SPIPE)) + rt->rt_rmx.rmx_sendpipe = tcp_sendspace; + + if (!rt->rt_rmx.rmx_recvpipe && !(rt->rt_rmx.rmx_locks & RTV_RPIPE)) + rt->rt_rmx.rmx_recvpipe = tcp_recvspace; + + if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU) + && rt->rt_ifp) + rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; + + ret = rn_addroute(v_arg, n_arg, head, treenodes); + if (ret == NULL && rt->rt_flags & RTF_HOST) { + struct rtentry *rt2; + /* + * We are trying to add a host route, but can't. + * Find out if it is because of an + * ARP entry and delete it if so. + */ + rt2 = rtalloc1((struct sockaddr *)sin6, 0, + RTF_CLONING | RTF_PRCLONING); + if (rt2) { + if (rt2->rt_flags & RTF_LLINFO && + rt2->rt_flags & RTF_HOST && + rt2->rt_gateway && + rt2->rt_gateway->sa_family == AF_LINK) { + rtrequest(RTM_DELETE, + (struct sockaddr *)rt_key(rt2), + rt2->rt_gateway, + rt_mask(rt2), rt2->rt_flags, 0); + ret = rn_addroute(v_arg, n_arg, head, + treenodes); + } + RTFREE(rt2); + } + } else if (ret == NULL && rt->rt_flags & RTF_CLONING) { + struct rtentry *rt2; + /* + * We are trying to add a net route, but can't. + * The following case should be allowed, so we'll make a + * special check for this: + * Two IPv6 addresses with the same prefix is assigned + * to a single interrface. + * # ifconfig if0 inet6 3ffe:0501::1 prefix 64 alias (*1) + * # ifconfig if0 inet6 3ffe:0501::2 prefix 64 alias (*2) + * In this case, (*1) and (*2) want to add the same + * net route entry, 3ffe:0501:: -> if0. + * This case should not raise an error. + */ + rt2 = rtalloc1((struct sockaddr *)sin6, 0, + RTF_CLONING | RTF_PRCLONING); + if (rt2) { + if ((rt2->rt_flags & (RTF_CLONING|RTF_HOST|RTF_GATEWAY)) + == RTF_CLONING + && rt2->rt_gateway + && rt2->rt_gateway->sa_family == AF_LINK + && rt2->rt_ifp == rt->rt_ifp) { + ret = rt2->rt_nodes; + } + RTFREE(rt2); + } + } + return ret; +} + +/* + * This code is the inverse of in6_clsroute: on first reference, if we + * were managing the route, stop doing so and set the expiration timer + * back off again. + */ +static struct radix_node * +in6_matroute(void *v_arg, struct radix_node_head *head) +{ + struct radix_node *rn = rn_match(v_arg, head); + struct rtentry *rt = (struct rtentry *)rn; + + if (rt && rt->rt_refcnt == 0) { /* this is first reference */ + if (rt->rt_flags & RTPRF_OURS) { + rt->rt_flags &= ~RTPRF_OURS; + rt->rt_rmx.rmx_expire = 0; + } + } + return rn; +} + +static int rtq_reallyold = 60*60; + /* one hour is ``really old'' */ + +static int rtq_minreallyold = 10; + /* never automatically crank down to less */ + +static int rtq_toomany = 128; + /* 128 cached routes is ``too many'' */ + + +/* + * On last reference drop, mark the route as belong to us so that it can be + * timed out. + */ +static void +in6_clsroute(struct radix_node *rn, struct radix_node_head *head) +{ + struct rtentry *rt = (struct rtentry *)rn; + + if (!(rt->rt_flags & RTF_UP)) + return; /* prophylactic measures */ + + if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) + return; + + if ((rt->rt_flags & (RTF_WASCLONED | RTPRF_OURS)) + != RTF_WASCLONED) + return; + + /* + * As requested by David Greenman: + * If rtq_reallyold is 0, just delete the route without + * waiting for a timeout cycle to kill it. + */ + if (rtq_reallyold != 0) { + rt->rt_flags |= RTPRF_OURS; + rt->rt_rmx.rmx_expire = time_second + rtq_reallyold; + } else { + rtrequest(RTM_DELETE, + (struct sockaddr *)rt_key(rt), + rt->rt_gateway, rt_mask(rt), + rt->rt_flags, 0); + } +} + +struct rtqk_arg { + struct radix_node_head *rnh; + int mode; + int updating; + int draining; + int killed; + int found; + time_t nextstop; +}; + +/* + * Get rid of old routes. When draining, this deletes everything, even when + * the timeout is not expired yet. When updating, this makes sure that + * nothing has a timeout longer than the current value of rtq_reallyold. + */ +static int +in6_rtqkill(struct radix_node *rn, void *rock) +{ + struct rtqk_arg *ap = rock; + struct rtentry *rt = (struct rtentry *)rn; + int err; + + if (rt->rt_flags & RTPRF_OURS) { + ap->found++; + + if (ap->draining || rt->rt_rmx.rmx_expire <= time_second) { + if (rt->rt_refcnt > 0) + panic("rtqkill route really not free"); + + err = rtrequest(RTM_DELETE, + (struct sockaddr *)rt_key(rt), + rt->rt_gateway, rt_mask(rt), + rt->rt_flags, 0); + if (err) { + log(LOG_WARNING, "in6_rtqkill: error %d", err); + } else { + ap->killed++; + } + } else { + if (ap->updating + && (rt->rt_rmx.rmx_expire - time_second + > rtq_reallyold)) { + rt->rt_rmx.rmx_expire = time_second + + rtq_reallyold; + } + ap->nextstop = lmin(ap->nextstop, + rt->rt_rmx.rmx_expire); + } + } + + return 0; +} + +#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ +static int rtq_timeout = RTQ_TIMEOUT; + +static void +in6_rtqtimo(void *rock) +{ + struct radix_node_head *rnh = rock; + struct rtqk_arg arg; + struct timeval atv; + static time_t last_adjusted_timeout = 0; + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + arg.found = arg.killed = 0; + arg.rnh = rnh; + arg.nextstop = time_second + rtq_timeout; + arg.draining = arg.updating = 0; + s = splnet(); + rnh->rnh_walktree(rnh, in6_rtqkill, &arg); + splx(s); + + /* + * Attempt to be somewhat dynamic about this: + * If there are ``too many'' routes sitting around taking up space, + * then crank down the timeout, and see if we can't make some more + * go away. However, we make sure that we will never adjust more + * than once in rtq_timeout seconds, to keep from cranking down too + * hard. + */ + if ((arg.found - arg.killed > rtq_toomany) + && (time_second - last_adjusted_timeout >= rtq_timeout) + && rtq_reallyold > rtq_minreallyold) { + rtq_reallyold = 2*rtq_reallyold / 3; + if (rtq_reallyold < rtq_minreallyold) { + rtq_reallyold = rtq_minreallyold; + } + + last_adjusted_timeout = time_second; +#if DIAGNOSTIC + log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d", + rtq_reallyold); +#endif + arg.found = arg.killed = 0; + arg.updating = 1; + s = splnet(); + rnh->rnh_walktree(rnh, in6_rtqkill, &arg); + splx(s); + } + + atv.tv_usec = 0; + atv.tv_sec = arg.nextstop; + timeout(in6_rtqtimo, rock, tvtohz(&atv)); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + +/* + * Age old PMTUs. + */ +struct mtuex_arg { + struct radix_node_head *rnh; + time_t nextstop; +}; + +static int +in6_mtuexpire(struct radix_node *rn, void *rock) +{ + struct rtentry *rt = (struct rtentry *)rn; + struct mtuex_arg *ap = rock; + + /* sanity */ + if (!rt) + panic("rt == NULL in in6_mtuexpire"); + + if (rt->rt_rmx.rmx_expire && !(rt->rt_flags & RTF_PROBEMTU)) { + if (rt->rt_rmx.rmx_expire <= time_second) { + rt->rt_flags |= RTF_PROBEMTU; + } else { + ap->nextstop = lmin(ap->nextstop, + rt->rt_rmx.rmx_expire); + } + } + + return 0; +} + +#define MTUTIMO_DEFAULT (60*1) + +static void +in6_mtutimo(void *rock) +{ + struct radix_node_head *rnh = rock; + struct mtuex_arg arg; + struct timeval atv; + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + arg.rnh = rnh; + arg.nextstop = time_second + MTUTIMO_DEFAULT; + s = splnet(); + rnh->rnh_walktree(rnh, in6_mtuexpire, &arg); + splx(s); + + atv.tv_usec = 0; + atv.tv_sec = arg.nextstop; + if (atv.tv_sec < time_second) { + printf("invalid mtu expiration time on routing table\n"); + arg.nextstop = time_second + 30; /*last resort*/ + } + timeout(in6_mtutimo, rock, tvtohz(&atv)); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + +#if 0 +void +in6_rtqdrain() +{ + struct radix_node_head *rnh = rt_tables[AF_INET6]; + struct rtqk_arg arg; + int s; + arg.found = arg.killed = 0; + arg.rnh = rnh; + arg.nextstop = 0; + arg.draining = 1; + arg.updating = 0; + s = splnet(); + rnh->rnh_walktree(rnh, in6_rtqkill, &arg); + splx(s); +} +#endif + +/* + * Initialize our routing tree. + */ +int +in6_inithead(void **head, int off) +{ + struct radix_node_head *rnh; + + if (!rn_inithead(head, off)) + return 0; + + if (head != (void **)&rt_tables[AF_INET6]) /* BOGUS! */ + return 1; /* only do this for the real routing table */ + + rnh = *head; + rnh->rnh_addaddr = in6_addroute; + rnh->rnh_matchaddr = in6_matroute; + rnh->rnh_close = in6_clsroute; + in6_rtqtimo(rnh); /* kick off timeout first time */ + in6_mtutimo(rnh); /* kick off timeout first time */ + return 1; +} diff --git a/bsd/netinet6/in6_src.c b/bsd/netinet6/in6_src.c new file mode 100644 index 000000000..ff60c18e0 --- /dev/null +++ b/bsd/netinet6/in6_src.c @@ -0,0 +1,495 @@ +/* $KAME: in6_src.c,v 1.10 2000/03/28 09:02:23 k-sugyou Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_pcb.c 8.2 (Berkeley) 1/4/94 + */ + +/* for MIP6 */ +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#ifdef __NetBSD__ +#include "opt_ipsec.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 4) +#include +#endif +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#if !(defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802)) +#include +#endif +#include +#include + +#include + +#ifndef __bsdi__ +#include "loop.h" +#endif +#if defined(__NetBSD__) || defined(__OpenBSD__) +extern struct ifnet loif[NLOOP]; +#endif + +#if MIP6 +#include +#include + +extern struct nd_prefix *(*mip6_get_home_prefix_hook) __P((void)); +#endif + +/* + * Return an IPv6 address, which is the most appropriate for given + * destination and user specified options. + * If necessary, this function lookups the routing table and return + * an entry to the caller for later use. + */ +struct in6_addr * +in6_selectsrc(dstsock, opts, mopts, ro, laddr, errorp) + struct sockaddr_in6 *dstsock; + struct ip6_pktopts *opts; + struct ip6_moptions *mopts; + struct route_in6 *ro; + struct in6_addr *laddr; + int *errorp; +{ + struct in6_addr *dst; + struct in6_ifaddr *ia6 = 0; + struct in6_pktinfo *pi = NULL; + + dst = &dstsock->sin6_addr; + *errorp = 0; + + /* + * If the source address is explicitly specified by the caller, + * use it. + */ + if (opts && (pi = opts->ip6po_pktinfo) && + !IN6_IS_ADDR_UNSPECIFIED(&pi->ipi6_addr)) + return(&pi->ipi6_addr); + + /* + * If the source address is not specified but the socket(if any) + * is already bound, use the bound address. + */ + if (laddr && !IN6_IS_ADDR_UNSPECIFIED(laddr)) + return(laddr); + + /* + * If the caller doesn't specify the source address but + * the outgoing interface, use an address associated with + * the interface. + */ + if (pi && pi->ipi6_ifindex) { + /* XXX boundary check is assumed to be already done. */ + ia6 = in6_ifawithscope(ifindex2ifnet[pi->ipi6_ifindex], + dst); + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + + /* + * If the destination address is a link-local unicast address or + * a multicast address, and if the outgoing interface is specified + * by the sin6_scope_id filed, use an address associated with the + * interface. + * XXX: We're now trying to define more specific semantics of + * sin6_scope_id field, so this part will be rewritten in + * the near future. + */ + if ((IN6_IS_ADDR_LINKLOCAL(dst) || IN6_IS_ADDR_MULTICAST(dst)) && + dstsock->sin6_scope_id) { + /* + * I'm not sure if boundary check for scope_id is done + * somewhere... + */ + if (dstsock->sin6_scope_id < 0 || + if_index < dstsock->sin6_scope_id) { + *errorp = ENXIO; /* XXX: better error? */ + return(0); + } + ia6 = in6_ifawithscope(ifindex2ifnet[dstsock->sin6_scope_id], + dst); + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + + /* + * If the destination address is a multicast address and + * the outgoing interface for the address is specified + * by the caller, use an address associated with the interface. + * There is a sanity check here; if the destination has node-local + * scope, the outgoing interfacde should be a loopback address. + * Even if the outgoing interface is not specified, we also + * choose a loopback interface as the outgoing interface. + */ + if (IN6_IS_ADDR_MULTICAST(dst)) { + struct ifnet *ifp = mopts ? mopts->im6o_multicast_ifp : NULL; +#ifdef __bsdi__ +#if _BSDI_VERSION >= 199802 + extern struct ifnet *loifp; +#else + extern struct ifnet loif; + struct ifnet *loifp = &loif; +#endif +#endif + + if (ifp == NULL && IN6_IS_ADDR_MC_NODELOCAL(dst)) { +#ifdef __bsdi__ + ifp = loifp; +#else + ifp = &loif[0]; +#endif + } + + if (ifp) { + ia6 = in6_ifawithscope(ifp, dst); + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + } + + /* + * If the next hop address for the packet is specified + * by caller, use an address associated with the route + * to the next hop. + */ + { + struct sockaddr_in6 *sin6_next; + struct rtentry *rt; + + if (opts && opts->ip6po_nexthop) { + sin6_next = satosin6(opts->ip6po_nexthop); + rt = nd6_lookup(&sin6_next->sin6_addr, 1, NULL); + if (rt) { + ia6 = in6_ifawithscope(rt->rt_ifp, dst); + if (ia6 == 0) + ia6 = ifatoia6(rt->rt_ifa); + } + if (ia6 == 0) { + *errorp = EADDRNOTAVAIL; + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + } + +#if MIP6 + /* + * This is needed to assure that the Home Address is used for + * outgoing packets when not at home. We can't choose any other + * address if we want to keep connections up during movement. + */ + if (mip6_get_home_prefix_hook) { /* Only Mobile Node */ + struct nd_prefix *pr; + if ((pr = (*mip6_get_home_prefix_hook)()) && + !IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) { + if (in6_addrscope(dst) == + in6_addrscope(&pr->ndpr_addr)) { +#if MIP6_DEBUG + /* Noisy but useful */ + mip6_debug("%s: Local address %s is chosen " + "for pcb to dest %s.\n", + __FUNCTION__, + ip6_sprintf(&pr->ndpr_addr), + ip6_sprintf(dst)); +#endif + return(&pr->ndpr_addr); + } + } + } +#endif /* MIP6 */ + + /* + * If route is known or can be allocated now, + * our src addr is taken from the i/f, else punt. + */ + if (ro) { + if (ro->ro_rt && + !IN6_ARE_ADDR_EQUAL(&satosin6(&ro->ro_dst)->sin6_addr, dst)) { + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } + if (ro->ro_rt == (struct rtentry *)0 || + ro->ro_rt->rt_ifp == (struct ifnet *)0) { + /* No route yet, so try to acquire one */ + bzero(&ro->ro_dst, sizeof(struct sockaddr_in6)); + ro->ro_dst.sin6_family = AF_INET6; + ro->ro_dst.sin6_len = sizeof(struct sockaddr_in6); + ro->ro_dst.sin6_addr = *dst; + if (IN6_IS_ADDR_MULTICAST(dst)) { +#if defined( __FreeBSD__) || defined (__APPLE__) + ro->ro_rt = rtalloc1(&((struct route *)ro) + ->ro_dst, 0, 0UL); +#else + ro->ro_rt = rtalloc1(&((struct route *)ro) + ->ro_dst, 0); +#endif /*__FreeBSD__*/ + } else { +#ifdef __bsdi__ /* bsdi needs rtcalloc to make a host route */ + rtcalloc((struct route *)ro); +#else + rtalloc((struct route *)ro); +#endif + } + } + + /* + * in_pcbconnect() checks out IFF_LOOPBACK to skip using + * the address. But we don't know why it does so. + * It is necessary to ensure the scope even for lo0 + * so doesn't check out IFF_LOOPBACK. + */ + + if (ro->ro_rt) { + ia6 = in6_ifawithscope(ro->ro_rt->rt_ifa->ifa_ifp, dst); + if (ia6 == 0) /* xxx scope error ?*/ + ia6 = ifatoia6(ro->ro_rt->rt_ifa); + } +#if 0 + /* + * xxx The followings are necessary? (kazu) + * I don't think so. + * It's for SO_DONTROUTE option in IPv4.(jinmei) + */ + if (ia6 == 0) { + struct sockaddr_in6 sin6 = {sizeof(sin6), AF_INET6, 0}; + + sin6->sin6_addr = *dst; + + ia6 = ifatoia6(ifa_ifwithdstaddr(sin6tosa(&sin6))); + if (ia6 == 0) + ia6 = ifatoia6(ifa_ifwithnet(sin6tosa(&sin6))); + if (ia6 == 0) + return(0); + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } +#endif /* 0 */ + if (ia6 == 0) { + *errorp = EHOSTUNREACH; /* no route */ + return(0); + } + return(&satosin6(&ia6->ia_addr)->sin6_addr); + } + + *errorp = EADDRNOTAVAIL; + return(0); +} + +/* + * Default hop limit selection. The precedence is as follows: + * 1. Hoplimit value specified via ioctl. + * 2. (If the outgoing interface is detected) the current + * hop limit of the interface specified by router advertisement. + * 3. The system default hoplimit. +*/ +#if HAVE_NRL_INPCB +#define in6pcb inpcb +#define in6p_hops inp_hops +#endif +int +in6_selecthlim(in6p, ifp) + struct in6pcb *in6p; + struct ifnet *ifp; +{ + if (in6p && in6p->in6p_hops >= 0) + return(in6p->in6p_hops); + else if (ifp) + return(nd_ifinfo[ifp->if_index].chlim); + else + return(ip6_defhlim); +} +#if HAVE_NRL_INPCB +#undef in6pcb +#undef in6p_hops +#endif + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__OpenBSD__) && !defined(__APPLE__) +/* + * Find an empty port and set it to the specified PCB. + */ +#if HAVE_NRL_INPCB /* XXX: I really hate such ugly macros...(jinmei) */ +#define in6pcb inpcb +#define in6p_socket inp_socket +#define in6p_lport inp_lport +#define in6p_head inp_head +#define in6p_flags inp_flags +#define IN6PLOOKUP_WILDCARD INPLOOKUP_WILDCARD +#endif +int +in6_pcbsetport(laddr, in6p) + struct in6_addr *laddr; + struct in6pcb *in6p; +{ + struct socket *so = in6p->in6p_socket; + struct in6pcb *head = in6p->in6p_head; + u_int16_t last_port, lport = 0; + int wild = 0; + void *t; + u_int16_t min, max; +#ifdef __NetBSD__ + struct proc *p = curproc; /* XXX */ +#endif + + /* XXX: this is redundant when called from in6_pcbbind */ + if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0 && + ((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0 || + (so->so_options & SO_ACCEPTCONN) == 0)) + wild = IN6PLOOKUP_WILDCARD; + + if (in6p->in6p_flags & IN6P_LOWPORT) { +#ifdef __NetBSD__ + if (p == 0 || (suser(p->p_ucred, &p->p_acflag) != 0)) + return (EACCES); +#else + if ((so->so_state & SS_PRIV) == 0) + return (EACCES); +#endif + min = IPV6PORT_RESERVEDMIN; + max = IPV6PORT_RESERVEDMAX; + } else { + min = IPV6PORT_ANONMIN; + max = IPV6PORT_ANONMAX; + } + + /* value out of range */ + if (head->in6p_lport < min) + head->in6p_lport = min; + else if (head->in6p_lport > max) + head->in6p_lport = min; + last_port = head->in6p_lport; + goto startover; /*to randomize*/ + for (;;) { + lport = htons(head->in6p_lport); + if (IN6_IS_ADDR_V4MAPPED(laddr)) { +#if 0 + t = in_pcblookup_bind(&tcbtable, + (struct in_addr *)&in6p->in6p_laddr.s6_addr32[3], + lport); +#else + t = NULL; +#endif + } else { +#if HAVE_NRL_INPCB + /* XXX: ugly cast... */ + t = in_pcblookup(head, (struct in_addr *)&zeroin6_addr, + 0, (struct in_addr *)laddr, + lport, wild | INPLOOKUP_IPV6); +#else + t = in6_pcblookup(head, &zeroin6_addr, 0, laddr, + lport, wild); +#endif + } + if (t == 0) + break; + startover: + if (head->in6p_lport >= max) + head->in6p_lport = min; + else + head->in6p_lport++; + if (head->in6p_lport == last_port) + return (EADDRINUSE); + } + + in6p->in6p_lport = lport; + return(0); /* success */ +} +#if HAVE_NRL_INPCB +#undef in6pcb +#undef in6p_socket +#undef in6p_lport +#undef in6p_head +#undef in6p_flags +#undef IN6PLOOKUP_WILDCARD +#endif +#endif /* !FreeBSD3 && !OpenBSD*/ diff --git a/bsd/netinet6/in6_var.h b/bsd/netinet6/in6_var.h new file mode 100644 index 000000000..c04e40310 --- /dev/null +++ b/bsd/netinet6/in6_var.h @@ -0,0 +1,671 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1985, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_var.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET6_IN6_VAR_H_ +#define _NETINET6_IN6_VAR_H_ + +/* + * Interface address, Internet version. One of these structures + * is allocated for each interface with an Internet address. + * The ifaddr structure contains the protocol-independent part + * of the structure and is assumed to be first. + */ + +/* + * pltime/vltime are just for future reference (required to implements 2 + * hour rule for hosts). they should never be modified by nd6_timeout or + * anywhere else. + * userland -> kernel: accept pltime/vltime + * kernel -> userland: throuw up everything + * in kernel: modify preferred/expire only + */ +struct in6_addrlifetime { + time_t ia6t_expire; /* valid lifetime expiration time */ + time_t ia6t_preferred; /* preferred lifetime expiration time */ + u_int32_t ia6t_vltime; /* valid lifetime */ + u_int32_t ia6t_pltime; /* prefix lifetime */ +}; + +struct in6_ifaddr { + struct ifaddr ia_ifa; /* protocol-independent info */ +#define ia_ifp ia_ifa.ifa_ifp +#define ia_flags ia_ifa.ifa_flags + struct sockaddr_in6 ia_addr; /* interface address */ + struct sockaddr_in6 ia_net; /* network number of interface */ + struct sockaddr_in6 ia_dstaddr; /* space for destination addr */ + struct sockaddr_in6 ia_prefixmask; /* prefix mask */ + u_int32_t ia_plen; /* prefix length */ + struct in6_ifaddr *ia_next; /* next in6 list of IP6 addresses */ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + LIST_HEAD(in6_multihead, in6_multi) ia6_multiaddrs; + /* list of multicast addresses */ +#endif + int ia6_flags; + + struct in6_addrlifetime ia6_lifetime; /* NULL = infty */ + struct ifprefix *ia6_ifpr; /* back pointer to ifprefix */ +}; + +/* + * IPv6 interface statistics, as defined in RFC2465 Ipv6IfStatsEntry (p12). + */ +struct in6_ifstat { + u_quad_t ifs6_in_receive; /* # of total input datagram */ + u_quad_t ifs6_in_hdrerr; /* # of datagrams with invalid hdr */ + u_quad_t ifs6_in_toobig; /* # of datagrams exceeded MTU */ + u_quad_t ifs6_in_noroute; /* # of datagrams with no route */ + u_quad_t ifs6_in_addrerr; /* # of datagrams with invalid dst */ + u_quad_t ifs6_in_protounknown; /* # of datagrams with unknown proto */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_in_truncated; /* # of truncated datagrams */ + u_quad_t ifs6_in_discard; /* # of discarded datagrams */ + /* NOTE: fragment timeout is not here */ + u_quad_t ifs6_in_deliver; /* # of datagrams delivered to ULP */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_out_forward; /* # of datagrams forwarded */ + /* NOTE: increment on outgoing if */ + u_quad_t ifs6_out_request; /* # of outgoing datagrams from ULP */ + /* NOTE: does not include forwrads */ + u_quad_t ifs6_out_discard; /* # of discarded datagrams */ + u_quad_t ifs6_out_fragok; /* # of datagrams fragmented */ + u_quad_t ifs6_out_fragfail; /* # of datagrams failed on fragment */ + u_quad_t ifs6_out_fragcreat; /* # of fragment datagrams */ + /* NOTE: this is # after fragment */ + u_quad_t ifs6_reass_reqd; /* # of incoming fragmented packets */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_reass_ok; /* # of reassembled packets */ + /* NOTE: this is # after reass */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_reass_fail; /* # of reass failures */ + /* NOTE: may not be packet count */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_in_mcast; /* # of inbound multicast datagrams */ + u_quad_t ifs6_out_mcast; /* # of outbound multicast datagrams */ +}; + +/* + * ICMPv6 interface statistics, as defined in RFC2466 Ipv6IfIcmpEntry. + * XXX: I'm not sure if this file is the right place for this structure... + */ +struct icmp6_ifstat { + /* + * Input statistics + */ + /* ipv6IfIcmpInMsgs, total # of input messages */ + u_quad_t ifs6_in_msg; + /* ipv6IfIcmpInErrors, # of input error messages */ + u_quad_t ifs6_in_error; + /* ipv6IfIcmpInDestUnreachs, # of input dest unreach errors */ + u_quad_t ifs6_in_dstunreach; + /* ipv6IfIcmpInAdminProhibs, # of input administratively prohibited errs */ + u_quad_t ifs6_in_adminprohib; + /* ipv6IfIcmpInTimeExcds, # of input time exceeded errors */ + u_quad_t ifs6_in_timeexceed; + /* ipv6IfIcmpInParmProblems, # of input parameter problem errors */ + u_quad_t ifs6_in_paramprob; + /* ipv6IfIcmpInPktTooBigs, # of input packet too big errors */ + u_quad_t ifs6_in_pkttoobig; + /* ipv6IfIcmpInEchos, # of input echo requests */ + u_quad_t ifs6_in_echo; + /* ipv6IfIcmpInEchoReplies, # of input echo replies */ + u_quad_t ifs6_in_echoreply; + /* ipv6IfIcmpInRouterSolicits, # of input router solicitations */ + u_quad_t ifs6_in_routersolicit; + /* ipv6IfIcmpInRouterAdvertisements, # of input router advertisements */ + u_quad_t ifs6_in_routeradvert; + /* ipv6IfIcmpInNeighborSolicits, # of input neighbor solicitations */ + u_quad_t ifs6_in_neighborsolicit; + /* ipv6IfIcmpInNeighborAdvertisements, # of input neighbor advertisements */ + u_quad_t ifs6_in_neighboradvert; + /* ipv6IfIcmpInRedirects, # of input redirects */ + u_quad_t ifs6_in_redirect; + /* ipv6IfIcmpInGroupMembQueries, # of input MLD queries */ + u_quad_t ifs6_in_mldquery; + /* ipv6IfIcmpInGroupMembResponses, # of input MLD reports */ + u_quad_t ifs6_in_mldreport; + /* ipv6IfIcmpInGroupMembReductions, # of input MLD done */ + u_quad_t ifs6_in_mlddone; + + /* + * Output statistics. We should solve unresolved routing problem... + */ + /* ipv6IfIcmpOutMsgs, total # of output messages */ + u_quad_t ifs6_out_msg; + /* ipv6IfIcmpOutErrors, # of output error messages */ + u_quad_t ifs6_out_error; + /* ipv6IfIcmpOutDestUnreachs, # of output dest unreach errors */ + u_quad_t ifs6_out_dstunreach; + /* ipv6IfIcmpOutAdminProhibs, # of output administratively prohibited errs */ + u_quad_t ifs6_out_adminprohib; + /* ipv6IfIcmpOutTimeExcds, # of output time exceeded errors */ + u_quad_t ifs6_out_timeexceed; + /* ipv6IfIcmpOutParmProblems, # of output parameter problem errors */ + u_quad_t ifs6_out_paramprob; + /* ipv6IfIcmpOutPktTooBigs, # of output packet too big errors */ + u_quad_t ifs6_out_pkttoobig; + /* ipv6IfIcmpOutEchos, # of output echo requests */ + u_quad_t ifs6_out_echo; + /* ipv6IfIcmpOutEchoReplies, # of output echo replies */ + u_quad_t ifs6_out_echoreply; + /* ipv6IfIcmpOutRouterSolicits, # of output router solicitations */ + u_quad_t ifs6_out_routersolicit; + /* ipv6IfIcmpOutRouterAdvertisements, # of output router advertisements */ + u_quad_t ifs6_out_routeradvert; + /* ipv6IfIcmpOutNeighborSolicits, # of output neighbor solicitations */ + u_quad_t ifs6_out_neighborsolicit; + /* ipv6IfIcmpOutNeighborAdvertisements, # of output neighbor advertisements */ + u_quad_t ifs6_out_neighboradvert; + /* ipv6IfIcmpOutRedirects, # of output redirects */ + u_quad_t ifs6_out_redirect; + /* ipv6IfIcmpOutGroupMembQueries, # of output MLD queries */ + u_quad_t ifs6_out_mldquery; + /* ipv6IfIcmpOutGroupMembResponses, # of output MLD reports */ + u_quad_t ifs6_out_mldreport; + /* ipv6IfIcmpOutGroupMembReductions, # of output MLD done */ + u_quad_t ifs6_out_mlddone; +}; + +struct in6_ifreq { + char ifr_name[IFNAMSIZ]; + union { + struct sockaddr_in6 ifru_addr; + struct sockaddr_in6 ifru_dstaddr; + short ifru_flags; + int ifru_flags6; + int ifru_metric; + caddr_t ifru_data; + struct in6_addrlifetime ifru_lifetime; + struct in6_ifstat ifru_stat; + struct icmp6_ifstat ifru_icmp6stat; + } ifr_ifru; +}; + +struct in6_aliasreq { + char ifra_name[IFNAMSIZ]; + struct sockaddr_in6 ifra_addr; + struct sockaddr_in6 ifra_dstaddr; + struct sockaddr_in6 ifra_prefixmask; + int ifra_flags; + struct in6_addrlifetime ifra_lifetime; +}; + +/* prefix type macro */ +#define IN6_PREFIX_ND 1 +#define IN6_PREFIX_RR 2 + +/* + * prefix related flags passed between kernel(NDP related part) and + * user land command(ifconfig) and daemon(rtadvd). + */ +struct in6_prflags { + struct prf_ra { + u_char onlink : 1; + u_char autonomous : 1; + u_char reserved : 6; + } prf_ra; + u_char prf_reserved1; + u_short prf_reserved2; + /* want to put this on 4byte offset */ + struct prf_rr { + u_char decrvalid : 1; + u_char decrprefd : 1; + u_char reserved : 6; + } prf_rr; + u_char prf_reserved3; + u_short prf_reserved4; +}; + +struct in6_prefixreq { + char ipr_name[IFNAMSIZ]; + u_char ipr_origin; + u_char ipr_plen; + u_int32_t ipr_vltime; + u_int32_t ipr_pltime; + struct in6_prflags ipr_flags; + struct sockaddr_in6 ipr_prefix; +}; + +#define PR_ORIG_RA 0 +#define PR_ORIG_RR 1 +#define PR_ORIG_STATIC 2 +#define PR_ORIG_KERNEL 3 + +#define ipr_raf_onlink ipr_flags.prf_ra.onlink +#define ipr_raf_auto ipr_flags.prf_ra.autonomous + +#define ipr_statef_onlink ipr_flags.prf_state.onlink + +#define ipr_rrf_decrvalid ipr_flags.prf_rr.decrvalid +#define ipr_rrf_decrprefd ipr_flags.prf_rr.decrprefd + +struct in6_rrenumreq { + char irr_name[IFNAMSIZ]; + u_char irr_origin; + u_char irr_m_len; /* match len for matchprefix */ + u_char irr_m_minlen; /* minlen for matching prefix */ + u_char irr_m_maxlen; /* maxlen for matching prefix */ + u_char irr_u_uselen; /* uselen for adding prefix */ + u_char irr_u_keeplen; /* keeplen from matching prefix */ + struct irr_raflagmask { + u_char onlink : 1; + u_char autonomous : 1; + u_char reserved : 6; + } irr_raflagmask; + u_int32_t irr_vltime; + u_int32_t irr_pltime; + struct in6_prflags irr_flags; + struct sockaddr_in6 irr_matchprefix; + struct sockaddr_in6 irr_useprefix; +}; + +#define irr_raf_mask_onlink irr_raflagmask.onlink +#define irr_raf_mask_auto irr_raflagmask.autonomous +#define irr_raf_mask_reserved irr_raflagmask.reserved + +#define irr_raf_onlink irr_flags.prf_ra.onlink +#define irr_raf_auto irr_flags.prf_ra.autonomous + +#define irr_statef_onlink irr_flags.prf_state.onlink + +#define irr_rrf irr_flags.prf_rr +#define irr_rrf_decrvalid irr_flags.prf_rr.decrvalid +#define irr_rrf_decrprefd irr_flags.prf_rr.decrprefd + +/* + * Given a pointer to an in6_ifaddr (ifaddr), + * return a pointer to the addr as a sockaddr_in6 + */ +#define IA6_IN6(ia) (&((ia)->ia_addr.sin6_addr)) +#define IA6_DSTIN6(ia) (&((ia)->ia_dstaddr.sin6_addr)) +#define IA6_MASKIN6(ia) (&((ia)->ia_prefixmask.sin6_addr)) +#define IA6_SIN6(ia) (&((ia)->ia_addr)) +#define IA6_DSTSIN6(ia) (&((ia)->ia_dstaddr)) +#define IFA_IN6(x) (&((struct sockaddr_in6 *)((x)->ifa_addr))->sin6_addr) +#define IFA_DSTIN6(x) (&((struct sockaddr_in6 *)((x)->ifa_dstaddr))->sin6_addr) + +#define IFPR_IN6(x) (&((struct sockaddr_in6 *)((x)->ifpr_prefix))->sin6_addr) + +#ifdef KERNEL +#define IN6_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \ + (((d)->s6_addr32[0] ^ (a)->s6_addr32[0]) & (m)->s6_addr32[0]) == 0 && \ + (((d)->s6_addr32[1] ^ (a)->s6_addr32[1]) & (m)->s6_addr32[1]) == 0 && \ + (((d)->s6_addr32[2] ^ (a)->s6_addr32[2]) & (m)->s6_addr32[2]) == 0 && \ + (((d)->s6_addr32[3] ^ (a)->s6_addr32[3]) & (m)->s6_addr32[3]) == 0 ) +#endif + +#define SIOCSIFADDR_IN6 _IOW('i', 12, struct in6_ifreq) +#define SIOCGIFADDR_IN6 _IOWR('i', 33, struct in6_ifreq) + +#ifdef KERNEL +/* + * SIOCSxxx ioctls should be unused (see comments in in6.c), but + * we do not shift numbers for binary compatibility. + */ +#define SIOCSIFDSTADDR_IN6 _IOW('i', 14, struct in6_ifreq) +#define SIOCSIFNETMASK_IN6 _IOW('i', 22, struct in6_ifreq) +#endif + +#define SIOCGIFDSTADDR_IN6 _IOWR('i', 34, struct in6_ifreq) +#define SIOCGIFNETMASK_IN6 _IOWR('i', 37, struct in6_ifreq) + +#define SIOCDIFADDR_IN6 _IOW('i', 25, struct in6_ifreq) +#define SIOCAIFADDR_IN6 _IOW('i', 26, struct in6_aliasreq) + +#define SIOCSIFPHYADDR_IN6 _IOW('i', 70, struct in6_aliasreq) +#define SIOCGIFPSRCADDR_IN6 _IOWR('i', 71, struct in6_ifreq) +#define SIOCGIFPDSTADDR_IN6 _IOWR('i', 72, struct in6_ifreq) + +#define SIOCGIFAFLAG_IN6 _IOWR('i', 73, struct in6_ifreq) + +#define SIOCGDRLST_IN6 _IOWR('i', 74, struct in6_drlist) +#define SIOCGPRLST_IN6 _IOWR('i', 75, struct in6_prlist) +#define SIOCGIFINFO_IN6 _IOWR('i', 76, struct in6_ndireq) +#define SIOCSNDFLUSH_IN6 _IOWR('i', 77, struct in6_ifreq) +#define SIOCGNBRINFO_IN6 _IOWR('i', 78, struct in6_nbrinfo) +#define SIOCSPFXFLUSH_IN6 _IOWR('i', 79, struct in6_ifreq) +#define SIOCSRTRFLUSH_IN6 _IOWR('i', 80, struct in6_ifreq) + +#define SIOCGIFALIFETIME_IN6 _IOWR('i', 81, struct in6_ifreq) +#define SIOCSIFALIFETIME_IN6 _IOWR('i', 82, struct in6_ifreq) +#define SIOCGIFSTAT_IN6 _IOWR('i', 83, struct in6_ifreq) +#define SIOCGIFSTAT_ICMP6 _IOWR('i', 84, struct in6_ifreq) + +#define SIOCSDEFIFACE_IN6 _IOWR('i', 85, struct in6_ndifreq) +#define SIOCGDEFIFACE_IN6 _IOWR('i', 86, struct in6_ndifreq) + +#define SIOCSIFINFO_FLAGS _IOWR('i', 87, struct in6_ndireq) /* XXX */ + +#define SIOCSIFPREFIX_IN6 _IOW('i', 100, struct in6_prefixreq) /* set */ +#define SIOCGIFPREFIX_IN6 _IOWR('i', 101, struct in6_prefixreq) /* get */ +#define SIOCDIFPREFIX_IN6 _IOW('i', 102, struct in6_prefixreq) /* del */ +#define SIOCAIFPREFIX_IN6 _IOW('i', 103, struct in6_rrenumreq) /* add */ +#define SIOCCIFPREFIX_IN6 _IOW('i', 104, \ + struct in6_rrenumreq) /* change */ +#define SIOCSGIFPREFIX_IN6 _IOW('i', 105, \ + struct in6_rrenumreq) /* set global */ + +#define SIOCGETSGCNT_IN6 _IOWR('u', 106, \ + struct sioc_sg_req6) /* get s,g pkt cnt */ +#define SIOCGETMIFCNT_IN6 _IOWR('u', 107, \ + struct sioc_mif_req6) /* get pkt cnt per if */ + +#define IN6_IFF_ANYCAST 0x01 /* anycast address */ +#define IN6_IFF_TENTATIVE 0x02 /* tentative address */ +#define IN6_IFF_DUPLICATED 0x04 /* DAD detected duplicate */ +#define IN6_IFF_DETACHED 0x08 /* may be detached from the link */ +#define IN6_IFF_DEPRECATED 0x10 /* deprecated address */ +#define IN6_IFF_NODAD 0x20 /* don't perform DAD on this address + * (used only at first SIOC* call) + */ + +/* do not input/output */ +#define IN6_IFF_NOTREADY (IN6_IFF_TENTATIVE|IN6_IFF_DUPLICATED) + +#ifdef KERNEL +#define IN6_ARE_SCOPE_CMP(a,b) ((a)-(b)) +#define IN6_ARE_SCOPE_EQUAL(a,b) ((a)==(b)) +#endif + +#ifdef KERNEL +extern struct in6_ifaddr *in6_ifaddr; + +extern struct in6_ifstat **in6_ifstat; +extern size_t in6_ifstatmax; +extern struct icmp6stat icmp6stat; +extern struct icmp6_ifstat **icmp6_ifstat; +extern size_t icmp6_ifstatmax; +#define in6_ifstat_inc(ifp, tag) \ +do { \ + if ((ifp) && (ifp)->if_index <= if_index \ + && (ifp)->if_index < in6_ifstatmax \ + && in6_ifstat && in6_ifstat[(ifp)->if_index]) { \ + in6_ifstat[(ifp)->if_index]->tag++; \ + } \ +} while (0) + +extern struct ifqueue ip6intrq; /* IP6 packet input queue */ +extern struct in6_addr zeroin6_addr; +extern u_char inet6ctlerrmap[]; +extern unsigned long in6_maxmtu; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_IPMADDR); +#endif /* MALLOC_DECLARE */ +#endif + +/* + * Macro for finding the internet address structure (in6_ifaddr) corresponding + * to a given interface (ifnet structure). + */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + +#define IFP_TO_IA6(ifp, ia) \ +/* struct ifnet *ifp; */ \ +/* struct in6_ifaddr *ia; */ \ +do { \ + struct ifaddr *ifa; \ + for (ifa = (ifp)->if_addrlist; ifa; ifa = ifa->ifa_next) { \ + if (!ifa->ifa_addr) \ + continue; \ + if (ifa->ifa_addr->sa_family == AF_INET6) \ + break; \ + } \ + (ia) = (struct in6_ifaddr *)ifa; \ +} while (0) + +#else + +#define IFP_TO_IA6(ifp, ia) \ +/* struct ifnet *ifp; */ \ +/* struct in6_ifaddr *ia; */ \ +do { \ + struct ifaddr *ifa; \ + for (ifa = (ifp)->if_addrlist.tqh_first; ifa; ifa = ifa->ifa_list.tqe_next) { \ + if (!ifa->ifa_addr) \ + continue; \ + if (ifa->ifa_addr->sa_family == AF_INET6) \ + break; \ + } \ + (ia) = (struct in6_ifaddr *)ifa; \ +} while (0) +#endif + +#endif /* _KERNEL */ + +/* + * Multi-cast membership entry. One for each group/ifp that a PCB + * belongs to. + */ +struct in6_multi_mship { + struct in6_multi *i6mm_maddr; /* Multicast address pointer */ + LIST_ENTRY(in6_multi_mship) i6mm_chain; /* multicast options chain */ +}; + +struct in6_multi { + LIST_ENTRY(in6_multi) in6m_entry; /* list glue */ + struct in6_addr in6m_addr; /* IP6 multicast address */ + struct ifnet *in6m_ifp; /* back pointer to ifnet */ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + struct in6_ifaddr *in6m_ia; /* back pointer to in6_ifaddr */ +#else + struct ifmultiaddr *in6m_ifma; /* back pointer to ifmultiaddr */ +#endif + u_int in6m_refcount; /* # membership claims by sockets */ + u_int in6m_state; /* state of the membership */ + u_int in6m_timer; /* MLD6 listener report timer */ +}; + +#ifdef KERNEL +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +extern LIST_HEAD(in6_multihead, in6_multi) in6_multihead; +#endif + +/* + * Structure used by macros below to remember position when stepping through + * all of eht in6_multi records. + */ +struct in6_multistep { + struct in6_ifaddr *i_ia; + struct in6_multi *i_in6m; +}; + +/* + * Macros for looking up the in6_multi record for a given IP6 multicast + * address on a given interface. If no matching record is found, "in6m" + * returns NLL. + */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + +#define IN6_LOOKUP_MULTI(addr, ifp, in6m) \ +/* struct in6_addr addr; */ \ +/* struct ifnet *ifp; */ \ +/* struct in6_multi *in6m; */ \ +do { \ + register struct ifmultiaddr *ifma; \ + for (ifma = (ifp)->if_multiaddrs.lh_first; ifma; \ + ifma = ifma->ifma_link.le_next) { \ + if (ifma->ifma_addr->sa_family == AF_INET6 \ + && IN6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)ifma->ifma_addr)->sin6_addr, \ + &(addr))) \ + break; \ + } \ + (in6m) = (struct in6_multi *)(ifma ? ifma->ifma_protospec : 0); \ +} while(0) + +/* + * Macro to step through all of the in6_multi records, one at a time. + * The current position is remembered in "step", which the caller must + * provide. IN6_FIRST_MULTI(), below, must be called to initialize "step" + * and get the first record. Both macros return a NULL "in6m" when there + * are no remaining records. + */ +#define IN6_NEXT_MULTI(step, in6m) \ +/* struct in6_multistep step; */ \ +/* struct in6_multi *in6m; */ \ +do { \ + if (((in6m) = (step).i_in6m) != NULL) \ + (step).i_in6m = (step).i_in6m->in6m_entry.le_next; \ +} while(0) + +#define IN6_FIRST_MULTI(step, in6m) \ +/* struct in6_multistep step; */ \ +/* struct in6_multi *in6m */ \ +do { \ + (step).i_in6m = in6_multihead.lh_first; \ + IN6_NEXT_MULTI((step), (in6m)); \ +} while(0) + +#else /* not FreeBSD3 */ + +#define IN6_LOOKUP_MULTI(addr, ifp, in6m) \ +/* struct in6_addr addr; */ \ +/* struct ifnet *ifp; */ \ +/* struct in6_multi *in6m; */ \ +do { \ + register struct in6_ifaddr *ia; \ + \ + IFP_TO_IA6((ifp), ia); \ + if (ia == NULL) \ + (in6m) = NULL; \ + else \ + for ((in6m) = ia->ia6_multiaddrs.lh_first; \ + (in6m) != NULL && \ + !IN6_ARE_ADDR_EQUAL(&(in6m)->in6m_addr, &(addr)); \ + (in6m) = in6m->in6m_entry.le_next) \ + continue; \ +} while (0) + +/* + * Macro to step through all of the in6_multi records, one at a time. + * The current position is remembered in "step", which the caller must + * provide. IN6_FIRST_MULTI(), below, must be called to initialize "step" + * and get the first record. Both macros return a NULL "in6m" when there + * are no remaining records. + */ +#define IN6_NEXT_MULTI(step, in6m) \ +/* struct in6_multistep step; */ \ +/* struct in6_multi *in6m; */ \ +do { \ + if (((in6m) = (step).i_in6m) != NULL) \ + (step).i_in6m = (in6m)->in6m_entry.le_next; \ + else \ + while ((step).i_ia != NULL) { \ + (in6m) = (step).i_ia->ia6_multiaddrs.lh_first; \ + (step).i_ia = (step).i_ia->ia_next; \ + if ((in6m) != NULL) { \ + (step).i_in6m = (in6m)->in6m_entry.le_next; \ + break; \ + } \ + } \ +} while (0) + +#define IN6_FIRST_MULTI(step, in6m) \ +/* struct in6_multistep step; */ \ +/* struct in6_multi *in6m */ \ +do { \ + (step).i_ia = in6_ifaddr; \ + (step).i_in6m = NULL; \ + IN6_NEXT_MULTI((step), (in6m)); \ +} while (0) + +#endif /* not FreeBSD3 */ + +int in6_ifinit __P((struct ifnet *, + struct in6_ifaddr *, struct sockaddr_in6 *, int)); +struct in6_multi *in6_addmulti __P((struct in6_addr *, struct ifnet *, + int *)); +void in6_delmulti __P((struct in6_multi *)); +void in6_ifscrub __P((struct ifnet *, struct in6_ifaddr *)); +extern int in6_ifindex2scopeid __P((int)); +extern int in6_mask2len __P((struct in6_addr *)); +extern void in6_len2mask __P((struct in6_addr *, int)); +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) +int in6_control __P((struct socket *, + u_long, caddr_t, struct ifnet *, struct proc *)); +#else +int in6_control __P((struct socket *, u_long, caddr_t, struct ifnet *)); +#endif +void in6_purgeaddr __P((struct ifaddr *, struct ifnet *)); +void in6_savemkludge __P((struct in6_ifaddr *)); +void in6_setmaxmtu __P((void)); +void in6_restoremkludge __P((struct in6_ifaddr *, struct ifnet *)); +void in6_purgemkludge __P((struct ifnet *)); +struct in6_ifaddr *in6ifa_ifpforlinklocal __P((struct ifnet *, int)); +struct in6_ifaddr *in6ifa_ifpwithaddr __P((struct ifnet *, + struct in6_addr *)); +char *ip6_sprintf __P((struct in6_addr *)); +int in6_addr2scopeid __P((struct ifnet *, struct in6_addr *)); +int in6_matchlen __P((struct in6_addr *, struct in6_addr *)); +int in6_are_prefix_equal __P((struct in6_addr *p1, struct in6_addr *p2, + int len)); +void in6_prefixlen2mask __P((struct in6_addr *maskp, int len)); +int in6_prefix_ioctl __P((struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp)); +int in6_prefix_add_ifid __P((int iilen, struct in6_ifaddr *ia)); +void in6_prefix_remove_ifid __P((int iilen, struct in6_ifaddr *ia)); +void in6_purgeprefix __P((struct ifnet *)); +#endif /* _KERNEL */ + +#endif /* _NETINET6_IN6_VAR_H_ */ diff --git a/bsd/netinet6/ip6.h b/bsd/netinet6/ip6.h new file mode 100644 index 000000000..023215cc0 --- /dev/null +++ b/bsd/netinet6/ip6.h @@ -0,0 +1,4 @@ +/* $KAME$ */ + +/* just for backward compatibility, will be nuked shortly */ +#error "wrong include file - include netinet/ip6.h instead" diff --git a/bsd/netinet6/ip6_forward.c b/bsd/netinet6/ip6_forward.c new file mode 100644 index 000000000..95838ed3c --- /dev/null +++ b/bsd/netinet6/ip6_forward.c @@ -0,0 +1,524 @@ +/* $KAME: ip6_forward.c,v 1.29 2000/02/26 18:08:38 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include "opt_ip6fw.h" +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#if IPSEC_IPV6FWD +#include +#include +#include +#endif /* IPSEC_IPV6FWD */ + +#if IPV6FIREWALL +#include +#endif + +#if MIP6 +#include +#endif + +#include + +struct route_in6 ip6_forward_rt; + +/* + * Forward a packet. If some error occurs return the sender + * an icmp packet. Note we can't always generate a meaningful + * icmp message because icmp doesn't have a large enough repertoire + * of codes and types. + * + * If not forwarding, just drop the packet. This could be confusing + * if ipforwarding was zero but some routing protocol was advancing + * us as a gateway to somewhere. However, we must let the routing + * protocol deal with that. + * + */ + +void +ip6_forward(m, srcrt) + struct mbuf *m; + int srcrt; +{ + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + register struct sockaddr_in6 *dst; + register struct rtentry *rt; + int error, type = 0, code = 0; + struct mbuf *mcopy = NULL; +#if IPSEC_IPV6FWD + struct secpolicy *sp = NULL; +#endif +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + long time_second = time.tv_sec; +#endif + +#if IPSEC_IPV6FWD + /* + * Check AH/ESP integrity. + */ + /* + * Don't increment ip6s_cantforward because this is the check + * before forwarding packet actually. + */ + if (ipsec6_in_reject(m, NULL)) { + ipsec6stat.in_polvio++; + m_freem(m); + return; + } +#endif /*IPSEC_IPV6FWD*/ + + if ((m->m_flags & (M_BCAST|M_MCAST)) != 0 || + IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { + ip6stat.ip6s_cantforward++; + /* XXX in6_ifstat_inc(rt->rt_ifp, ifs6_in_discard) */ + if (ip6_log_time + ip6_log_interval < time_second) { + ip6_log_time = time_second; + log(LOG_DEBUG, + "cannot forward " + "from %s to %s nxt %d received on %s\n", + ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst), + ip6->ip6_nxt, + if_name(m->m_pkthdr.rcvif)); + } + m_freem(m); + return; + } + + if (ip6->ip6_hlim <= IPV6_HLIMDEC) { + /* XXX in6_ifstat_inc(rt->rt_ifp, ifs6_in_discard) */ + icmp6_error(m, ICMP6_TIME_EXCEEDED, + ICMP6_TIME_EXCEED_TRANSIT, 0); + return; + } + ip6->ip6_hlim -= IPV6_HLIMDEC; + + /* + * Save at most ICMPV6_PLD_MAXLEN (= the min IPv6 MTU - + * size of IPv6 + ICMPv6 headers) bytes of the packet in case + * we need to generate an ICMP6 message to the src. + * Thanks to M_EXT, in most cases copy will not occur. + * + * It is important to save it before IPsec processing as IPsec + * processing may modify the mbuf. + */ + mcopy = m_copy(m, 0, imin(m->m_pkthdr.len, ICMPV6_PLD_MAXLEN)); + + +#if IPSEC_IPV6FWD + /* get a security policy for this packet */ + sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, 0, &error); + if (sp == NULL) { + ipsec6stat.out_inval++; + ip6stat.ip6s_cantforward++; + if (mcopy) { +#if 0 + /* XXX: what icmp ? */ +#else + m_freem(mcopy); +#endif + } + m_freem(m); + return; + } + + error = 0; + + /* check policy */ + switch (sp->policy) { + case IPSEC_POLICY_DISCARD: + /* + * This packet is just discarded. + */ + ipsec6stat.out_polvio++; + ip6stat.ip6s_cantforward++; + key_freesp(sp); + if (mcopy) { +#if 0 + /* XXX: what icmp ? */ +#else + m_freem(mcopy); +#endif + } + m_freem(m); + return; + + case IPSEC_POLICY_BYPASS: + case IPSEC_POLICY_NONE: + /* no need to do IPsec. */ + key_freesp(sp); + goto skip_ipsec; + + case IPSEC_POLICY_IPSEC: + if (sp->req == NULL) { + /* XXX should be panic ? */ + printf("ip6_forward: No IPsec request specified.\n"); + ip6stat.ip6s_cantforward++; + key_freesp(sp); + if (mcopy) { +#if 0 + /* XXX: what icmp ? */ +#else + m_freem(mcopy); +#endif + } + m_freem(m); + return; + } + /* do IPsec */ + break; + + case IPSEC_POLICY_ENTRUST: + default: + /* should be panic ?? */ + printf("ip6_forward: Invalid policy found. %d\n", sp->policy); + key_freesp(sp); + goto skip_ipsec; + } + + { + struct ipsec_output_state state; + + /* + * All the extension headers will become inaccessible + * (since they can be encrypted). + * Don't panic, we need no more updates to extension headers + * on inner IPv6 packet (since they are now encapsulated). + * + * IPv6 [ESP|AH] IPv6 [extension headers] payload + */ + bzero(&state, sizeof(state)); + state.m = m; + state.ro = NULL; /* update at ipsec6_output_tunnel() */ + state.dst = NULL; /* update at ipsec6_output_tunnel() */ + + error = ipsec6_output_tunnel(&state, sp, 0); + + m = state.m; +#if 0 /* XXX allocate a route (ro, dst) again later */ + ro = (struct route_in6 *)state.ro; + dst = (struct sockaddr_in6 *)state.dst; +#endif + key_freesp(sp); + + if (error) { + /* mbuf is already reclaimed in ipsec6_output_tunnel. */ + switch (error) { + case EHOSTUNREACH: + case ENETUNREACH: + case EMSGSIZE: + case ENOBUFS: + case ENOMEM: + break; + default: + printf("ip6_output (ipsec): error code %d\n", error); + /*fall through*/ + case ENOENT: + /* don't show these error codes to the user */ + break; + } + ip6stat.ip6s_cantforward++; + if (mcopy) { +#if 0 + /* XXX: what icmp ? */ +#else + m_freem(mcopy); +#endif + } + m_freem(m); + return; + } + } + skip_ipsec: +#endif /* IPSEC_IPV6FWD */ + +#if MIP6 + { + struct mip6_bc *bc; + + bc = mip6_bc_find(&ip6->ip6_dst); + if ((bc != NULL) && (bc->hr_flag)) { + if (mip6_tunnel_output(&m, bc) != 0) { + ip6stat.ip6s_cantforward++; + if (mcopy) + m_freem(mcopy); + m_freem(m); + return; + } + } + ip6 = mtod(m, struct ip6_hdr *); /* m has changed */ + } +#endif + + dst = &ip6_forward_rt.ro_dst; + if (!srcrt) { + /* + * ip6_forward_rt.ro_dst.sin6_addr is equal to ip6->ip6_dst + */ + if (ip6_forward_rt.ro_rt == 0 || + (ip6_forward_rt.ro_rt->rt_flags & RTF_UP) == 0) { + if (ip6_forward_rt.ro_rt) { + RTFREE(ip6_forward_rt.ro_rt); + ip6_forward_rt.ro_rt = 0; + } + /* this probably fails but give it a try again */ +#if __FreeBSD__ || defined(__APPLE__) + rtalloc_ign((struct route *)&ip6_forward_rt, + RTF_PRCLONING); +#else + rtalloc((struct route *)&ip6_forward_rt); +#endif + } + + if (ip6_forward_rt.ro_rt == 0) { + ip6stat.ip6s_noroute++; + /* XXX in6_ifstat_inc(rt->rt_ifp, ifs6_in_noroute) */ + if (mcopy) { + icmp6_error(mcopy, ICMP6_DST_UNREACH, + ICMP6_DST_UNREACH_NOROUTE, 0); + } + m_freem(m); + return; + } + } else if ((rt = ip6_forward_rt.ro_rt) == 0 || + !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &dst->sin6_addr)) { + if (ip6_forward_rt.ro_rt) { + RTFREE(ip6_forward_rt.ro_rt); + ip6_forward_rt.ro_rt = 0; + } + bzero(dst, sizeof(*dst)); + dst->sin6_len = sizeof(struct sockaddr_in6); + dst->sin6_family = AF_INET6; + dst->sin6_addr = ip6->ip6_dst; + +#if __FreeBSD__ || defined(__APPLE__) + rtalloc_ign((struct route *)&ip6_forward_rt, RTF_PRCLONING); +#else + rtalloc((struct route *)&ip6_forward_rt); +#endif + if (ip6_forward_rt.ro_rt == 0) { + ip6stat.ip6s_noroute++; + /* XXX in6_ifstat_inc(rt->rt_ifp, ifs6_in_noroute) */ + if (mcopy) { + icmp6_error(mcopy, ICMP6_DST_UNREACH, + ICMP6_DST_UNREACH_NOROUTE, 0); + } + m_freem(m); + return; + } + } + rt = ip6_forward_rt.ro_rt; + + /* + * Scope check: if a packet can't be delivered to its destination + * for the reason that the destination is beyond the scope of the + * source address, discard the packet and return an icmp6 destination + * unreachable error with Code 2 (beyond scope of source address). + * [draft-ietf-ipngwg-icmp-v3-00.txt, Section 3.1] + */ + if (in6_addr2scopeid(m->m_pkthdr.rcvif, &ip6->ip6_src) != + in6_addr2scopeid(rt->rt_ifp, &ip6->ip6_src)) { + ip6stat.ip6s_cantforward++; + ip6stat.ip6s_badscope++; + in6_ifstat_inc(rt->rt_ifp, ifs6_in_discard); + + if (ip6_log_time + ip6_log_interval < time_second) { + ip6_log_time = time_second; + log(LOG_DEBUG, + "cannot forward " + "src %s, dst %s, nxt %d, rcvif %s, outif %s\n", + ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst), + ip6->ip6_nxt, + if_name(m->m_pkthdr.rcvif), if_name(rt->rt_ifp)); + } + if (mcopy) + icmp6_error(mcopy, ICMP6_DST_UNREACH, + ICMP6_DST_UNREACH_BEYONDSCOPE, 0); + m_freem(m); + return; + } + + if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) { + in6_ifstat_inc(rt->rt_ifp, ifs6_in_toobig); + if (mcopy) { + u_long mtu; +#if IPSEC_IPV6FWD + struct secpolicy *sp; + int ipsecerror; + size_t ipsechdrsiz; +#endif + + mtu = rt->rt_ifp->if_mtu; +#if IPSEC_IPV6FWD + /* + * When we do IPsec tunnel ingress, we need to play + * with if_mtu value (decrement IPsec header size + * from mtu value). The code is much simpler than v4 + * case, as we have the outgoing interface for + * encapsulated packet as "rt->rt_ifp". + */ + sp = ipsec6_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND, + IP_FORWARDING, &ipsecerror); + if (sp) { + ipsechdrsiz = ipsec6_hdrsiz(mcopy, + IPSEC_DIR_OUTBOUND, NULL); + if (ipsechdrsiz < mtu) + mtu -= ipsechdrsiz; + } + + /* + * if mtu becomes less than minimum MTU, + * tell minimum MTU (and I'll need to fragment it). + */ + if (mtu < IPV6_MMTU) + mtu = IPV6_MMTU; +#endif + icmp6_error(mcopy, ICMP6_PACKET_TOO_BIG, 0, mtu); + } + m_freem(m); + return; + } + + if (rt->rt_flags & RTF_GATEWAY) + dst = (struct sockaddr_in6 *)rt->rt_gateway; + + /* + * If we are to forward the packet using the same interface + * as one we got the packet from, perhaps we should send a redirect + * to sender to shortcut a hop. + * Only send redirect if source is sending directly to us, + * and if packet was not source routed (or has any options). + * Also, don't send redirect if forwarding using a route + * modified by a redirect. + */ + if (rt->rt_ifp == m->m_pkthdr.rcvif && !srcrt && + (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0) + type = ND_REDIRECT; + +#if IPV6FIREWALL + /* + * Check with the firewall... + */ + if (ip6_fw_chk_ptr) { + u_short port = 0; + /* If ipfw says divert, we have to just drop packet */ + if ((*ip6_fw_chk_ptr)(&ip6, rt->rt_ifp, &port, &m)) { + m_freem(m); + goto freecopy; + } + if (!m) + goto freecopy; + } +#endif + +#if OLDIP6OUTPUT + error = (*rt->rt_ifp->if_output)(rt->rt_ifp, m, + (struct sockaddr *)dst, + ip6_forward_rt.ro_rt); +#else + error = nd6_output(rt->rt_ifp, m, dst, rt); +#endif + if (error) { + in6_ifstat_inc(rt->rt_ifp, ifs6_out_discard); + ip6stat.ip6s_cantforward++; + } else { + ip6stat.ip6s_forward++; + in6_ifstat_inc(rt->rt_ifp, ifs6_out_forward); + if (type) + ip6stat.ip6s_redirectsent++; + else { + if (mcopy) + goto freecopy; + } + } + if (mcopy == NULL) + return; + + switch (error) { + case 0: +#if 1 + if (type == ND_REDIRECT) { + icmp6_redirect_output(mcopy, rt); + return; + } +#endif + goto freecopy; + + case EMSGSIZE: + /* xxx MTU is constant in PPP? */ + goto freecopy; + + case ENOBUFS: + /* Tell source to slow down like source quench in IP? */ + goto freecopy; + + case ENETUNREACH: /* shouldn't happen, checked above */ + case EHOSTUNREACH: + case ENETDOWN: + case EHOSTDOWN: + default: + type = ICMP6_DST_UNREACH; + code = ICMP6_DST_UNREACH_ADDR; + break; + } + icmp6_error(mcopy, type, code, 0); + return; + + freecopy: + m_freem(mcopy); + return; +} diff --git a/bsd/netinet6/ip6_fw.c b/bsd/netinet6/ip6_fw.c new file mode 100644 index 000000000..7777c4c6b --- /dev/null +++ b/bsd/netinet6/ip6_fw.c @@ -0,0 +1,1264 @@ +/* $KAME: ip6_fw.c,v 1.11 2000/03/10 04:22:18 k-sugyou Exp $ */ + +/* + * Copyright (c) 1993 Daniel Boulet + * Copyright (c) 1994 Ugen J.S.Antsilevich + * Copyright (c) 1996 Alex Nash + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + * + * $Id: ip6_fw.c,v 1.2 2000/09/14 20:35:14 lindak Exp $ + */ + +/* + * Implement IPv6 packet firewall + */ + +#ifdef __FreeBSD__ +#include "opt_ip6fw.h" +#if __FreeBSD__ >= 3 +#include "opt_inet.h" +#endif +#endif + +#if IP6DIVERT +#error "NOT SUPPORTED IPV6 DIVERT" +#endif +#if IP6FW_DIVERT_RESTART +#error "NOT SUPPORTED IPV6 DIVERT" +#endif + +#include +#include +#include +#include +#include +#include +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#include +#endif +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include +#if TCP6 +#include +#include +#include +#endif +#include +#include +#include +#include +#include +#include + +#if defined(__NetBSD__) || defined(__OpenBSD__) +#include +#endif +#if __FreeBSD__ || defined (__APPLE__) +#include +#endif + +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +MALLOC_DEFINE(M_IP6FW, "Ip6Fw/Ip6Acct", "Ip6Fw/Ip6Acct chain's"); +#else +#ifndef M_IP6FW +#define M_IP6FW M_TEMP +#endif +#endif + +static int fw6_debug = 1; +#if IPV6FIREWALL_VERBOSE +static int fw6_verbose = 1; +#else +static int fw6_verbose = 0; +#endif +#if IPV6FIREWALL_VERBOSE_LIMIT +static int fw6_verbose_limit = IPV6FIREWALL_VERBOSE_LIMIT; +#else +static int fw6_verbose_limit = 0; +#endif + +LIST_HEAD (ip6_fw_head, ip6_fw_chain) ip6_fw_chain; + +SYSCTL_DECL(_net_inet6_ip6); +SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW, 0, "Firewall"); +SYSCTL_INT(_net_inet6_ip6_fw, IP6FWCTL_DEBUG, debug, CTLFLAG_RW, &fw6_debug, 0, ""); +SYSCTL_INT(_net_inet6_ip6_fw, IP6FWCTL_VERBOSE, verbose, CTLFLAG_RW, &fw6_verbose, 0, ""); +SYSCTL_INT(_net_inet6_ip6_fw, IP6FWCTL_VERBLIMIT, verbose_limit, CTLFLAG_RW, &fw6_verbose_limit, 0, ""); + +#define dprintf(a) if (!fw6_debug); else printf a + +#define print_ip6(a) printf("[%s]", ip6_sprintf(a)) + +#define dprint_ip6(a) if (!fw6_debug); else print_ip6(a) + +static int add_entry6 __P((struct ip6_fw_head *chainptr, struct ip6_fw *frwl)); +static int del_entry6 __P((struct ip6_fw_head *chainptr, u_short number)); +static int zero_entry6 __P((struct mbuf *m)); +static struct ip6_fw *check_ip6fw_struct __P((struct ip6_fw *m)); +static struct ip6_fw *check_ip6fw_mbuf __P((struct mbuf *fw)); +static int ip6opts_match __P((struct ip6_hdr **ip6, struct ip6_fw *f, + struct mbuf **m, + int *off, int *nxt, u_short *offset)); +static int port_match6 __P((u_short *portptr, int nports, u_short port, + int range_flag)); +static int tcp6flg_match __P((struct tcphdr *tcp6, struct ip6_fw *f)); +static int icmp6type_match __P((struct icmp6_hdr * icmp, struct ip6_fw * f)); +static void ip6fw_report __P((struct ip6_fw *f, struct ip6_hdr *ip6, + struct ifnet *rif, struct ifnet *oif, int off, int nxt)); + +static int ip6_fw_chk __P((struct ip6_hdr **pip6, + struct ifnet *oif, u_int16_t *cookie, struct mbuf **m)); +static int ip6_fw_ctl __P((int stage, struct mbuf **mm)); + +static char err_prefix[] = "ip6_fw_ctl:"; + +/* + * Returns 1 if the port is matched by the vector, 0 otherwise + */ +static +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +__inline +#else +inline +#endif +int +port_match6(u_short *portptr, int nports, u_short port, int range_flag) +{ + if (!nports) + return 1; + if (range_flag) { + if (portptr[0] <= port && port <= portptr[1]) { + return 1; + } + nports -= 2; + portptr += 2; + } + while (nports-- > 0) { + if (*portptr++ == port) { + return 1; + } + } + return 0; +} + +static int +tcp6flg_match(struct tcphdr *tcp6, struct ip6_fw *f) +{ + u_char flg_set, flg_clr; + + if ((f->fw_tcpf & IPV6_FW_TCPF_ESTAB) && + (tcp6->th_flags & (IPV6_FW_TCPF_RST | IPV6_FW_TCPF_ACK))) + return 1; + + flg_set = tcp6->th_flags & f->fw_tcpf; + flg_clr = tcp6->th_flags & f->fw_tcpnf; + + if (flg_set != f->fw_tcpf) + return 0; + if (flg_clr) + return 0; + + return 1; +} + +static int +icmp6type_match(struct icmp6_hdr *icmp6, struct ip6_fw *f) +{ + int type; + + if (!(f->fw_flg & IPV6_FW_F_ICMPBIT)) + return(1); + + type = icmp6->icmp6_type; + + /* check for matching type in the bitmap */ + if (type < IPV6_FW_ICMPTYPES_DIM * sizeof(unsigned) * 8 && + (f->fw_icmp6types[type / (sizeof(unsigned) * 8)] & + (1U << (type % (8 * sizeof(unsigned)))))) + return(1); + + return(0); /* no match */ +} + +static int +is_icmp6_query(struct ip6_hdr *ip6, int off) +{ + const struct icmp6_hdr *icmp6; + int icmp6_type; + + icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off); + icmp6_type = icmp6->icmp6_type; + + if (icmp6_type == ICMP6_ECHO_REQUEST || + icmp6_type == ICMP6_MEMBERSHIP_QUERY || + icmp6_type == ICMP6_WRUREQUEST || + icmp6_type == ICMP6_FQDN_QUERY || + icmp6_type == ICMP6_NI_QUERY) + return(1); + + return(0); +} + +static int +ip6opts_match(struct ip6_hdr **pip6, struct ip6_fw *f, struct mbuf **m, + int *off, int *nxt, u_short *offset) +{ + int len; + struct ip6_hdr *ip6 = *pip6; + struct ip6_ext *ip6e; + u_char opts, nopts, nopts_sve; + + opts = f->fw_ip6opt; + nopts = nopts_sve = f->fw_ip6nopt; + + *nxt = ip6->ip6_nxt; + *off = sizeof(struct ip6_hdr); + len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr); + while (*off < len) { + ip6e = (struct ip6_ext *)((caddr_t) ip6 + *off); + if ((*m)->m_len < *off + sizeof(*ip6e)) + goto opts_check; /* XXX */ + + switch(*nxt) { + case IPPROTO_FRAGMENT: + if ((*m)->m_len < *off + sizeof(struct ip6_frag)) { + struct ip6_frag *ip6f; + + ip6f = (struct ip6_frag *) ((caddr_t)ip6 + *off); + *offset = ip6f->ip6f_offlg | IP6F_OFF_MASK; + } + opts &= ~IPV6_FW_IP6OPT_FRAG; + nopts &= ~IPV6_FW_IP6OPT_FRAG; + *off += sizeof(struct ip6_frag); + break; + case IPPROTO_AH: + opts &= ~IPV6_FW_IP6OPT_AH; + nopts &= ~IPV6_FW_IP6OPT_AH; + *off += (ip6e->ip6e_len + 2) << 2; + break; + default: + switch (*nxt) { + case IPPROTO_HOPOPTS: + opts &= ~IPV6_FW_IP6OPT_HOPOPT; + nopts &= ~IPV6_FW_IP6OPT_HOPOPT; + break; + case IPPROTO_ROUTING: + opts &= ~IPV6_FW_IP6OPT_ROUTE; + nopts &= ~IPV6_FW_IP6OPT_ROUTE; + break; + case IPPROTO_ESP: + opts &= ~IPV6_FW_IP6OPT_ESP; + nopts &= ~IPV6_FW_IP6OPT_ESP; + break; + case IPPROTO_NONE: + opts &= ~IPV6_FW_IP6OPT_NONXT; + nopts &= ~IPV6_FW_IP6OPT_NONXT; + goto opts_check; + break; + case IPPROTO_DSTOPTS: + opts &= ~IPV6_FW_IP6OPT_OPTS; + nopts &= ~IPV6_FW_IP6OPT_OPTS; + break; + default: + goto opts_check; + break; + } + *off += (ip6e->ip6e_len + 1) << 3; + break; + } + *nxt = ip6e->ip6e_nxt; + + } + opts_check: + if (f->fw_ip6opt == f->fw_ip6nopt) /* XXX */ + return 1; + + if (opts == 0 && nopts == nopts_sve) + return 1; + else + return 0; +} + +static +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +__inline +#else +inline +#endif +int +iface_match(struct ifnet *ifp, union ip6_fw_if *ifu, int byname) +{ + /* Check by name or by IP address */ + if (byname) { +#if __NetBSD__ + { + char xname[IFNAMSIZ]; + snprintf(xname, sizeof(xname), "%s%d", ifu->fu_via_if.name, + ifu->fu_via_if.unit); + if (strcmp(ifp->if_xname, xname)) + return(0); + } +#else + /* Check unit number (-1 is wildcard) */ + if (ifu->fu_via_if.unit != -1 + && ifp->if_unit != ifu->fu_via_if.unit) + return(0); + /* Check name */ + if (strncmp(ifp->if_name, ifu->fu_via_if.name, FW_IFNLEN)) + return(0); +#endif + return(1); + } else if (!IN6_IS_ADDR_UNSPECIFIED(&ifu->fu_via_ip6)) { /* Zero == wildcard */ + struct ifaddr *ia; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ia = ifp->if_addrlist; ia; ia = ia->ifa_next) +#else + for (ia = ifp->if_addrlist.tqh_first; ia; ia = ia->ifa_list.tqe_next) +#endif + { + + if (ia->ifa_addr == NULL) + continue; + if (ia->ifa_addr->sa_family != AF_INET6) + continue; + if (!IN6_ARE_ADDR_EQUAL(&ifu->fu_via_ip6, + &(((struct sockaddr_in6 *) + (ia->ifa_addr))->sin6_addr))) + continue; + return(1); + } + return(0); + } + return(1); +} + +static void +ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, + struct ifnet *rif, struct ifnet *oif, int off, int nxt) +{ + static int counter; + struct tcphdr *const tcp6 = (struct tcphdr *) ((caddr_t) ip6+ off); + struct udphdr *const udp = (struct udphdr *) ((caddr_t) ip6+ off); + struct icmp6_hdr *const icmp6 = (struct icmp6_hdr *) ((caddr_t) ip6+ off); + int count; + + count = f ? f->fw_pcnt : ++counter; + if (fw6_verbose_limit != 0 && count > fw6_verbose_limit) + return; + + /* Print command name */ + printf("ip6fw: %d ", f ? f->fw_number : -1); + if (!f) + printf("Refuse"); + else + switch (f->fw_flg & IPV6_FW_F_COMMAND) { + case IPV6_FW_F_DENY: + printf("Deny"); + break; + case IPV6_FW_F_REJECT: + if (f->fw_reject_code == IPV6_FW_REJECT_RST) + printf("Reset"); + else + printf("Unreach"); + break; + case IPV6_FW_F_ACCEPT: + printf("Accept"); + break; + case IPV6_FW_F_COUNT: + printf("Count"); + break; + case IPV6_FW_F_DIVERT: + printf("Divert %d", f->fw_divert_port); + break; + case IPV6_FW_F_TEE: + printf("Tee %d", f->fw_divert_port); + break; + case IPV6_FW_F_SKIPTO: + printf("SkipTo %d", f->fw_skipto_rule); + break; + default: + printf("UNKNOWN"); + break; + } + printf(" "); + + switch (nxt) { + case IPPROTO_TCP: + printf("TCP "); + print_ip6(&ip6->ip6_src); + if (off > 0) + printf(":%d ", ntohs(tcp6->th_sport)); + else + printf(" "); + print_ip6(&ip6->ip6_dst); + if (off > 0) + printf(":%d", ntohs(tcp6->th_dport)); + break; + case IPPROTO_UDP: + printf("UDP "); + print_ip6(&ip6->ip6_src); + if (off > 0) + printf(":%d ", ntohs(udp->uh_sport)); + else + printf(" "); + print_ip6(&ip6->ip6_dst); + if (off > 0) + printf(":%d", ntohs(udp->uh_dport)); + break; + case IPPROTO_ICMPV6: + if (off > 0) + printf("IPV6-ICMP:%u.%u ", icmp6->icmp6_type, icmp6->icmp6_code); + else + printf("IPV6-ICMP "); + print_ip6(&ip6->ip6_src); + printf(" "); + print_ip6(&ip6->ip6_dst); + break; + default: + printf("P:%d ", nxt); + print_ip6(&ip6->ip6_src); + printf(" "); + print_ip6(&ip6->ip6_dst); + break; + } + if (oif) + printf(" out via %s", if_name(oif)); + else if (rif) + printf(" in via %s", if_name(rif)); + printf("\n"); + if (fw6_verbose_limit != 0 && count == fw6_verbose_limit) + printf("ip6fw: limit reached on rule #%d\n", + f ? f->fw_number : -1); +} + +/* + * Parameters: + * + * ip Pointer to packet header (struct ip6_hdr *) + * hlen Packet header length + * oif Outgoing interface, or NULL if packet is incoming + * #ifndef IP6FW_DIVERT_RESTART + * *cookie Ignore all divert/tee rules to this port (if non-zero) + * #else + * *cookie Skip up to the first rule past this rule number; + * #endif + * *m The packet; we set to NULL when/if we nuke it. + * + * Return value: + * + * 0 The packet is to be accepted and routed normally OR + * the packet was denied/rejected and has been dropped; + * in the latter case, *m is equal to NULL upon return. + * port Divert the packet to port. + */ + +static int +ip6_fw_chk(struct ip6_hdr **pip6, + struct ifnet *oif, u_int16_t *cookie, struct mbuf **m) +{ + struct ip6_fw_chain *chain; + struct ip6_fw *rule = NULL; + struct ip6_hdr *ip6 = *pip6; + struct ifnet *const rif = (*m)->m_pkthdr.rcvif; + u_short offset = 0; + int off = sizeof(struct ip6_hdr), nxt = ip6->ip6_nxt; + u_short src_port, dst_port; +#if IP6FW_DIVERT_RESTART + u_int16_t skipto = *cookie; +#else + u_int16_t ignport = ntohs(*cookie); +#endif + + *cookie = 0; + /* + * Go down the chain, looking for enlightment + * #if IP6FW_DIVERT_RESTART + * If we've been asked to start at a given rule immediatly, do so. + * #endif + */ + chain = LIST_FIRST(&ip6_fw_chain); +#if IP6FW_DIVERT_RESTART + if (skipto) { + if (skipto >= 65535) + goto dropit; + while (chain && (chain->rule->fw_number <= skipto)) { + chain = LIST_NEXT(chain, chain); + } + if (! chain) goto dropit; + } +#endif /* IP6FW_DIVERT_RESTART */ + for (; chain; chain = LIST_NEXT(chain, chain)) { + register struct ip6_fw *const f = chain->rule; + + if (oif) { + /* Check direction outbound */ + if (!(f->fw_flg & IPV6_FW_F_OUT)) + continue; + } else { + /* Check direction inbound */ + if (!(f->fw_flg & IPV6_FW_F_IN)) + continue; + } + +#define IN6_ARE_ADDR_MASKEQUAL(x,y,z) (\ + (((x)->s6_addr32[0] & (y)->s6_addr32[0]) == (z)->s6_addr32[0]) && \ + (((x)->s6_addr32[1] & (y)->s6_addr32[1]) == (z)->s6_addr32[1]) && \ + (((x)->s6_addr32[2] & (y)->s6_addr32[2]) == (z)->s6_addr32[2]) && \ + (((x)->s6_addr32[3] & (y)->s6_addr32[3]) == (z)->s6_addr32[3])) + + /* If src-addr doesn't match, not this rule. */ + if (((f->fw_flg & IPV6_FW_F_INVSRC) != 0) ^ + (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_src,&f->fw_smsk,&f->fw_src))) + continue; + + /* If dest-addr doesn't match, not this rule. */ + if (((f->fw_flg & IPV6_FW_F_INVDST) != 0) ^ + (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_dst,&f->fw_dmsk,&f->fw_dst))) + continue; + +#undef IN6_ARE_ADDR_MASKEQUAL + /* Interface check */ + if ((f->fw_flg & IF6_FW_F_VIAHACK) == IF6_FW_F_VIAHACK) { + struct ifnet *const iface = oif ? oif : rif; + + /* Backwards compatibility hack for "via" */ + if (!iface || !iface_match(iface, + &f->fw_in_if, f->fw_flg & IPV6_FW_F_OIFNAME)) + continue; + } else { + /* Check receive interface */ + if ((f->fw_flg & IPV6_FW_F_IIFACE) + && (!rif || !iface_match(rif, + &f->fw_in_if, f->fw_flg & IPV6_FW_F_IIFNAME))) + continue; + /* Check outgoing interface */ + if ((f->fw_flg & IPV6_FW_F_OIFACE) + && (!oif || !iface_match(oif, + &f->fw_out_if, f->fw_flg & IPV6_FW_F_OIFNAME))) + continue; + } + + /* Check IP options */ + if (!ip6opts_match(&ip6, f, m, &off, &nxt, &offset)) + continue; + + /* Fragments */ + if ((f->fw_flg & IPV6_FW_F_FRAG) && !offset) + continue; + + /* Check protocol; if wildcard, match */ + if (f->fw_prot == IPPROTO_IPV6) + goto got_match; + + /* If different, don't match */ + if (nxt != f->fw_prot) + continue; + +#define PULLUP_TO(len) do { \ + if ((*m)->m_len < (len) \ + && (*m = m_pullup(*m, (len))) == 0) { \ + goto dropit; \ + } \ + *pip6 = ip6 = mtod(*m, struct ip6_hdr *); \ + } while (0) + + /* Protocol specific checks */ + switch (nxt) { + case IPPROTO_TCP: + { + struct tcphdr *tcp6; + + if (offset == 1) { /* cf. RFC 1858 */ + PULLUP_TO(off + 4); /* XXX ? */ + goto bogusfrag; + } + if (offset != 0) { + /* + * TCP flags and ports aren't available in this + * packet -- if this rule specified either one, + * we consider the rule a non-match. + */ + if (f->fw_nports != 0 || + f->fw_tcpf != f->fw_tcpnf) + continue; + + break; + } + PULLUP_TO(off + 14); + tcp6 = (struct tcphdr *) ((caddr_t)ip6 + off); + if (f->fw_tcpf != f->fw_tcpnf && !tcp6flg_match(tcp6, f)) + continue; + src_port = ntohs(tcp6->th_sport); + dst_port = ntohs(tcp6->th_dport); + goto check_ports; + } + + case IPPROTO_UDP: + { + struct udphdr *udp; + + if (offset != 0) { + /* + * Port specification is unavailable -- if this + * rule specifies a port, we consider the rule + * a non-match. + */ + if (f->fw_nports != 0) + continue; + + break; + } + PULLUP_TO(off + 4); + udp = (struct udphdr *) ((caddr_t)ip6 + off); + src_port = ntohs(udp->uh_sport); + dst_port = ntohs(udp->uh_dport); +check_ports: + if (!port_match6(&f->fw_pts[0], + IPV6_FW_GETNSRCP(f), src_port, + f->fw_flg & IPV6_FW_F_SRNG)) + continue; + if (!port_match6(&f->fw_pts[IPV6_FW_GETNSRCP(f)], + IPV6_FW_GETNDSTP(f), dst_port, + f->fw_flg & IPV6_FW_F_DRNG)) + continue; + break; + } + + case IPPROTO_ICMPV6: + { + struct icmp6_hdr *icmp; + + if (offset != 0) /* Type isn't valid */ + break; + PULLUP_TO(off + 2); + icmp = (struct icmp6_hdr *) ((caddr_t)ip6 + off); + if (!icmp6type_match(icmp, f)) + continue; + break; + } +#undef PULLUP_TO + +bogusfrag: + if (fw6_verbose) + ip6fw_report(NULL, ip6, rif, oif, off, nxt); + goto dropit; + } + +got_match: +#ifndef IP6FW_DIVERT_RESTART + /* Ignore divert/tee rule if socket port is "ignport" */ + switch (f->fw_flg & IPV6_FW_F_COMMAND) { + case IPV6_FW_F_DIVERT: + case IPV6_FW_F_TEE: + if (f->fw_divert_port == ignport) + continue; /* ignore this rule */ + break; + } + +#endif /* IP6FW_DIVERT_RESTART */ + /* Update statistics */ + f->fw_pcnt += 1; + f->fw_bcnt += ntohs(ip6->ip6_plen); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + f->timestamp = time_second; +#else + f->timestamp = time.tv_sec; +#endif + + /* Log to console if desired */ + if ((f->fw_flg & IPV6_FW_F_PRN) && fw6_verbose) + ip6fw_report(f, ip6, rif, oif, off, nxt); + + /* Take appropriate action */ + switch (f->fw_flg & IPV6_FW_F_COMMAND) { + case IPV6_FW_F_ACCEPT: + return(0); + case IPV6_FW_F_COUNT: + continue; + case IPV6_FW_F_DIVERT: +#if IP6FW_DIVERT_RESTART + *cookie = f->fw_number; +#else + *cookie = htons(f->fw_divert_port); +#endif /* IP6FW_DIVERT_RESTART */ + return(f->fw_divert_port); + case IPV6_FW_F_TEE: + /* + * XXX someday tee packet here, but beware that you + * can't use m_copym() or m_copypacket() because + * the divert input routine modifies the mbuf + * (and these routines only increment reference + * counts in the case of mbuf clusters), so need + * to write custom routine. + */ + continue; + case IPV6_FW_F_SKIPTO: +#if DIAGNOSTIC + while (chain->chain.le_next + && chain->chain.le_next->rule->fw_number + < f->fw_skipto_rule) +#else + while (chain->chain.le_next->rule->fw_number + < f->fw_skipto_rule) +#endif + chain = chain->chain.le_next; + continue; + } + + /* Deny/reject this packet using this rule */ + rule = f; + break; + } + +#if DIAGNOSTIC + /* Rule 65535 should always be there and should always match */ + if (!chain) + panic("ip6_fw: chain"); +#endif + + /* + * At this point, we're going to drop the packet. + * Send a reject notice if all of the following are true: + * + * - The packet matched a reject rule + * - The packet is not an ICMP packet, or is an ICMP query packet + * - The packet is not a multicast or broadcast packet + */ + if ((rule->fw_flg & IPV6_FW_F_COMMAND) == IPV6_FW_F_REJECT + && (nxt != IPPROTO_ICMPV6 || is_icmp6_query(ip6, off)) + && !((*m)->m_flags & (M_BCAST|M_MCAST)) + && !IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { + switch (rule->fw_reject_code) { + case IPV6_FW_REJECT_RST: +#if 1 /*not tested*/ + { + struct tcphdr *const tcp = + (struct tcphdr *) ((caddr_t)ip6 + off); + struct { + struct ip6_hdr ip6; + struct tcphdr th; + } ti; + tcp_seq ack, seq; + int flags; + + if (offset != 0 || (tcp->th_flags & TH_RST)) + break; + + ti.ip6 = *ip6; + ti.th = *tcp; + NTOHL(ti.th.th_seq); + NTOHL(ti.th.th_ack); + ti.ip6.ip6_nxt = IPPROTO_TCP; + if (ti.th.th_flags & TH_ACK) { + ack = 0; + seq = ti.th.th_ack; + flags = TH_RST; + } else { + ack = ti.th.th_seq; + if (((*m)->m_flags & M_PKTHDR) != 0) { + ack += (*m)->m_pkthdr.len - off + - (ti.th.th_off << 2); + } else if (ip6->ip6_plen) { + ack += ntohs(ip6->ip6_plen) + sizeof(*ip6) + - off - (ti.th.th_off << 2); + } else { + m_freem(*m); + *m = 0; + break; + } + seq = 0; + flags = TH_RST|TH_ACK; + } + bcopy(&ti, ip6, sizeof(ti)); +#if TCP6 + tcp6_respond(NULL, ip6, (struct tcp6hdr *)(ip6 + 1), + *m, ack, seq, flags); +#elif defined(__NetBSD__) + tcp_respond(NULL, NULL, *m, (struct tcphdr *)(ip6 + 1), + ack, seq, flags); +#elif defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + tcp_respond(NULL, ip6, (struct tcphdr *)(ip6 + 1), + *m, ack, seq, flags, 1); +#else + m_freem(*m); +#endif + *m = NULL; + break; + } +#endif + default: /* Send an ICMP unreachable using code */ + if (oif) + (*m)->m_pkthdr.rcvif = oif; + icmp6_error(*m, ICMP6_DST_UNREACH, + rule->fw_reject_code, 0); + *m = NULL; + break; + } + } + +dropit: + /* + * Finally, drop the packet. + */ + if (*m) { + m_freem(*m); + *m = NULL; + } + return(0); +} + +static int +add_entry6(struct ip6_fw_head *chainptr, struct ip6_fw *frwl) +{ + struct ip6_fw *ftmp = 0; + struct ip6_fw_chain *fwc = 0, *fcp, *fcpl = 0; + u_short nbr = 0; + int s; + + fwc = _MALLOC(sizeof *fwc, M_IP6FW, M_DONTWAIT); + ftmp = _MALLOC(sizeof *ftmp, M_IP6FW, M_DONTWAIT); + if (!fwc || !ftmp) { + dprintf(("%s malloc said no\n", err_prefix)); + if (fwc) _FREE(fwc, M_IP6FW); + if (ftmp) _FREE(ftmp, M_IP6FW); + return (ENOSPC); + } + + bcopy(frwl, ftmp, sizeof(struct ip6_fw)); + ftmp->fw_in_if.fu_via_if.name[FW_IFNLEN - 1] = '\0'; + ftmp->fw_pcnt = 0L; + ftmp->fw_bcnt = 0L; + fwc->rule = ftmp; + + s = splnet(); + + if (!chainptr->lh_first) { + LIST_INSERT_HEAD(chainptr, fwc, chain); + splx(s); + return(0); + } else if (ftmp->fw_number == (u_short)-1) { + if (fwc) _FREE(fwc, M_IP6FW); + if (ftmp) _FREE(ftmp, M_IP6FW); + splx(s); + dprintf(("%s bad rule number\n", err_prefix)); + return (EINVAL); + } + + /* If entry number is 0, find highest numbered rule and add 100 */ + if (ftmp->fw_number == 0) { + for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { + if (fcp->rule->fw_number != (u_short)-1) + nbr = fcp->rule->fw_number; + else + break; + } + if (nbr < (u_short)-1 - 100) + nbr += 100; + ftmp->fw_number = nbr; + } + + /* Got a valid number; now insert it, keeping the list ordered */ + for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { + if (fcp->rule->fw_number > ftmp->fw_number) { + if (fcpl) { + LIST_INSERT_AFTER(fcpl, fwc, chain); + } else { + LIST_INSERT_HEAD(chainptr, fwc, chain); + } + break; + } else { + fcpl = fcp; + } + } + + splx(s); + return (0); +} + +static int +del_entry6(struct ip6_fw_head *chainptr, u_short number) +{ + struct ip6_fw_chain *fcp; + int s; + + s = splnet(); + + fcp = chainptr->lh_first; + if (number != (u_short)-1) { + for (; fcp; fcp = fcp->chain.le_next) { + if (fcp->rule->fw_number == number) { + LIST_REMOVE(fcp, chain); + splx(s); + _FREE(fcp->rule, M_IP6FW); + _FREE(fcp, M_IP6FW); + return 0; + } + } + } + + splx(s); + return (EINVAL); +} + +static int +zero_entry6(struct mbuf *m) +{ + struct ip6_fw *frwl; + struct ip6_fw_chain *fcp; + int s; + + if (m) { + if (m->m_len != sizeof(struct ip6_fw)) + return(EINVAL); + frwl = mtod(m, struct ip6_fw *); + } + else + frwl = NULL; + + /* + * It's possible to insert multiple chain entries with the + * same number, so we don't stop after finding the first + * match if zeroing a specific entry. + */ + s = splnet(); + for (fcp = ip6_fw_chain.lh_first; fcp; fcp = fcp->chain.le_next) + if (!frwl || frwl->fw_number == fcp->rule->fw_number) { + fcp->rule->fw_bcnt = fcp->rule->fw_pcnt = 0; + fcp->rule->timestamp = 0; + } + splx(s); + + if (fw6_verbose) { + if (frwl) + printf("ip6fw: Entry %d cleared.\n", frwl->fw_number); + else + printf("ip6fw: Accounting cleared.\n"); + } + + return(0); +} + +static struct ip6_fw * +check_ip6fw_mbuf(struct mbuf *m) +{ + /* Check length */ + if (m->m_len != sizeof(struct ip6_fw)) { + dprintf(("%s len=%d, want %d\n", err_prefix, m->m_len, + sizeof(struct ip6_fw))); + return (NULL); + } + return(check_ip6fw_struct(mtod(m, struct ip6_fw *))); +} + +static struct ip6_fw * +check_ip6fw_struct(struct ip6_fw *frwl) +{ + /* Check for invalid flag bits */ + if ((frwl->fw_flg & ~IPV6_FW_F_MASK) != 0) { + dprintf(("%s undefined flag bits set (flags=%x)\n", + err_prefix, frwl->fw_flg)); + return (NULL); + } + /* Must apply to incoming or outgoing (or both) */ + if (!(frwl->fw_flg & (IPV6_FW_F_IN | IPV6_FW_F_OUT))) { + dprintf(("%s neither in nor out\n", err_prefix)); + return (NULL); + } + /* Empty interface name is no good */ + if (((frwl->fw_flg & IPV6_FW_F_IIFNAME) + && !*frwl->fw_in_if.fu_via_if.name) + || ((frwl->fw_flg & IPV6_FW_F_OIFNAME) + && !*frwl->fw_out_if.fu_via_if.name)) { + dprintf(("%s empty interface name\n", err_prefix)); + return (NULL); + } + /* Sanity check interface matching */ + if ((frwl->fw_flg & IF6_FW_F_VIAHACK) == IF6_FW_F_VIAHACK) { + ; /* allow "via" backwards compatibility */ + } else if ((frwl->fw_flg & IPV6_FW_F_IN) + && (frwl->fw_flg & IPV6_FW_F_OIFACE)) { + dprintf(("%s outgoing interface check on incoming\n", + err_prefix)); + return (NULL); + } + /* Sanity check port ranges */ + if ((frwl->fw_flg & IPV6_FW_F_SRNG) && IPV6_FW_GETNSRCP(frwl) < 2) { + dprintf(("%s src range set but n_src_p=%d\n", + err_prefix, IPV6_FW_GETNSRCP(frwl))); + return (NULL); + } + if ((frwl->fw_flg & IPV6_FW_F_DRNG) && IPV6_FW_GETNDSTP(frwl) < 2) { + dprintf(("%s dst range set but n_dst_p=%d\n", + err_prefix, IPV6_FW_GETNDSTP(frwl))); + return (NULL); + } + if (IPV6_FW_GETNSRCP(frwl) + IPV6_FW_GETNDSTP(frwl) > IPV6_FW_MAX_PORTS) { + dprintf(("%s too many ports (%d+%d)\n", + err_prefix, IPV6_FW_GETNSRCP(frwl), IPV6_FW_GETNDSTP(frwl))); + return (NULL); + } + /* + * Protocols other than TCP/UDP don't use port range + */ + if ((frwl->fw_prot != IPPROTO_TCP) && + (frwl->fw_prot != IPPROTO_UDP) && + (IPV6_FW_GETNSRCP(frwl) || IPV6_FW_GETNDSTP(frwl))) { + dprintf(("%s port(s) specified for non TCP/UDP rule\n", + err_prefix)); + return(NULL); + } + + /* + * Rather than modify the entry to make such entries work, + * we reject this rule and require user level utilities + * to enforce whatever policy they deem appropriate. + */ + if ((frwl->fw_src.s6_addr32[0] & (~frwl->fw_smsk.s6_addr32[0])) || + (frwl->fw_src.s6_addr32[1] & (~frwl->fw_smsk.s6_addr32[1])) || + (frwl->fw_src.s6_addr32[2] & (~frwl->fw_smsk.s6_addr32[2])) || + (frwl->fw_src.s6_addr32[3] & (~frwl->fw_smsk.s6_addr32[3])) || + (frwl->fw_dst.s6_addr32[0] & (~frwl->fw_dmsk.s6_addr32[0])) || + (frwl->fw_dst.s6_addr32[1] & (~frwl->fw_dmsk.s6_addr32[1])) || + (frwl->fw_dst.s6_addr32[2] & (~frwl->fw_dmsk.s6_addr32[2])) || + (frwl->fw_dst.s6_addr32[3] & (~frwl->fw_dmsk.s6_addr32[3]))) { + dprintf(("%s rule never matches\n", err_prefix)); + return(NULL); + } + + if ((frwl->fw_flg & IPV6_FW_F_FRAG) && + (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { + if (frwl->fw_nports) { + dprintf(("%s cannot mix 'frag' and ports\n", err_prefix)); + return(NULL); + } + if (frwl->fw_prot == IPPROTO_TCP && + frwl->fw_tcpf != frwl->fw_tcpnf) { + dprintf(("%s cannot mix 'frag' with TCP flags\n", err_prefix)); + return(NULL); + } + } + + /* Check command specific stuff */ + switch (frwl->fw_flg & IPV6_FW_F_COMMAND) + { + case IPV6_FW_F_REJECT: + if (frwl->fw_reject_code >= 0x100 + && !(frwl->fw_prot == IPPROTO_TCP + && frwl->fw_reject_code == IPV6_FW_REJECT_RST)) { + dprintf(("%s unknown reject code\n", err_prefix)); + return(NULL); + } + break; + case IPV6_FW_F_DIVERT: /* Diverting to port zero is invalid */ + case IPV6_FW_F_TEE: + if (frwl->fw_divert_port == 0) { + dprintf(("%s can't divert to port 0\n", err_prefix)); + return (NULL); + } + break; + case IPV6_FW_F_DENY: + case IPV6_FW_F_ACCEPT: + case IPV6_FW_F_COUNT: + case IPV6_FW_F_SKIPTO: + break; + default: + dprintf(("%s invalid command\n", err_prefix)); + return(NULL); + } + + return frwl; +} + +static int +ip6_fw_ctl(int stage, struct mbuf **mm) +{ + int error; + struct mbuf *m; + + if (stage == IPV6_FW_GET) { + struct ip6_fw_chain *fcp = ip6_fw_chain.lh_first; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + *mm = m = m_get(M_WAIT, MT_DATA); /* XXX */ +#else + *mm = m = m_get(M_WAIT, MT_SOOPTS); +#endif + if (!m) + return(ENOBUFS); + if (sizeof *(fcp->rule) > MLEN) { + MCLGET(m, M_WAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + return(ENOBUFS); + } + } + for (; fcp; fcp = fcp->chain.le_next) { + memcpy(m->m_data, fcp->rule, sizeof *(fcp->rule)); + m->m_len = sizeof *(fcp->rule); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + m->m_next = m_get(M_WAIT, MT_DATA); /* XXX */ +#else + m->m_next = m_get(M_WAIT, MT_SOOPTS); +#endif + if (!m->m_next) { + m_freem(*mm); + return(ENOBUFS); + } + m = m->m_next; + if (sizeof *(fcp->rule) > MLEN) { + MCLGET(m, M_WAIT); + if ((m->m_flags & M_EXT) == 0) { + m_freem(*mm); + return(ENOBUFS); + } + } + m->m_len = 0; + } + return (0); + } + m = *mm; + /* only allow get calls if secure mode > 2 */ + if (securelevel > 2) { + if (m) { + (void)m_freem(m); + *mm = 0; + } + return(EPERM); + } + if (stage == IPV6_FW_FLUSH) { + while (ip6_fw_chain.lh_first != NULL && + ip6_fw_chain.lh_first->rule->fw_number != (u_short)-1) { + struct ip6_fw_chain *fcp = ip6_fw_chain.lh_first; + int s = splnet(); + LIST_REMOVE(ip6_fw_chain.lh_first, chain); + splx(s); + _FREE(fcp->rule, M_IP6FW); + _FREE(fcp, M_IP6FW); + } + if (m) { + (void)m_freem(m); + *mm = 0; + } + return (0); + } + if (stage == IPV6_FW_ZERO) { + error = zero_entry6(m); + if (m) { + (void)m_freem(m); + *mm = 0; + } + return (error); + } + if (m == NULL) { + printf("%s NULL mbuf ptr\n", err_prefix); + return (EINVAL); + } + + if (stage == IPV6_FW_ADD) { + struct ip6_fw *frwl = check_ip6fw_mbuf(m); + + if (!frwl) + error = EINVAL; + else + error = add_entry6(&ip6_fw_chain, frwl); + if (m) { + (void)m_freem(m); + *mm = 0; + } + return error; + } + if (stage == IPV6_FW_DEL) { + if (m->m_len != sizeof(struct ip6_fw)) { + dprintf(("%s len=%d, want %d\n", err_prefix, m->m_len, + sizeof(struct ip6_fw))); + error = EINVAL; + } else if (mtod(m, struct ip6_fw *)->fw_number == (u_short)-1) { + dprintf(("%s can't delete rule 65535\n", err_prefix)); + error = EINVAL; + } else + error = del_entry6(&ip6_fw_chain, + mtod(m, struct ip6_fw *)->fw_number); + if (m) { + (void)m_freem(m); + *mm = 0; + } + return error; + } + + dprintf(("%s unknown request %d\n", err_prefix, stage)); + if (m) { + (void)m_freem(m); + *mm = 0; + } + return (EINVAL); +} + +void +ip6_fw_init(void) +{ + struct ip6_fw default_rule; + + ip6_fw_chk_ptr = ip6_fw_chk; + ip6_fw_ctl_ptr = ip6_fw_ctl; + LIST_INIT(&ip6_fw_chain); + + bzero(&default_rule, sizeof default_rule); + default_rule.fw_prot = IPPROTO_IPV6; + default_rule.fw_number = (u_short)-1; +#if IPV6FIREWALL_DEFAULT_TO_ACCEPT + default_rule.fw_flg |= IPV6_FW_F_ACCEPT; +#else + default_rule.fw_flg |= IPV6_FW_F_DENY; +#endif + default_rule.fw_flg |= IPV6_FW_F_IN | IPV6_FW_F_OUT; + if (check_ip6fw_struct(&default_rule) == NULL || + add_entry6(&ip6_fw_chain, &default_rule)) + panic(__FUNCTION__); + +#if 1 /* NOT SUPPORTED IPV6 DIVERT */ + printf("IPv6 packet filtering initialized, "); +#else + printf("IPv6 packet filtering initialized, " +#if IP6DIVERT + "divert enabled, "); +#else + "divert disabled, "); +#endif +#endif +#if IPV6FIREWALL_DEFAULT_TO_ACCEPT + printf("default to accept, "); +#endif +#ifndef IPV6FIREWALL_VERBOSE + printf("logging disabled\n"); +#else + if (fw6_verbose_limit == 0) + printf("unlimited logging\n"); + else + printf("logging limited to %d packets/entry\n", + fw6_verbose_limit); +#endif +} diff --git a/bsd/netinet6/ip6_fw.h b/bsd/netinet6/ip6_fw.h new file mode 100644 index 000000000..59c1fd933 --- /dev/null +++ b/bsd/netinet6/ip6_fw.h @@ -0,0 +1,219 @@ +/* $KAME: ip6_fw.h,v 1.2 2000/02/22 14:04:21 itojun Exp $ */ + +/* + * Copyright (c) 1993 Daniel Boulet + * Copyright (c) 1994 Ugen J.S.Antsilevich + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + * + */ + +#ifndef _IP6_FW_H +#define _IP6_FW_H + +#include + +/* + * This union structure identifies an interface, either explicitly + * by name or implicitly by IP address. The flags IP_FW_F_IIFNAME + * and IP_FW_F_OIFNAME say how to interpret this structure. An + * interface unit number of -1 matches any unit number, while an + * IP address of 0.0.0.0 indicates matches any interface. + * + * The receive and transmit interfaces are only compared against the + * the packet if the corresponding bit (IP_FW_F_IIFACE or IP_FW_F_OIFACE) + * is set. Note some packets lack a receive or transmit interface + * (in which case the missing "interface" never matches). + */ + +union ip6_fw_if { + struct in6_addr fu_via_ip6; /* Specified by IPv6 address */ + struct { /* Specified by interface name */ +#define FW_IFNLEN IFNAMSIZ + char name[FW_IFNLEN]; + short unit; /* -1 means match any unit */ + } fu_via_if; +}; + +/* + * Format of an IP firewall descriptor + * + * fw_src, fw_dst, fw_smsk, fw_dmsk are always stored in network byte order. + * fw_flg and fw_n*p are stored in host byte order (of course). + * Port numbers are stored in HOST byte order. + * Warning: setsockopt() will fail if sizeof(struct ip_fw) > MLEN (108) + */ + +struct ip6_fw { + u_long fw_pcnt,fw_bcnt; /* Packet and byte counters */ + struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ + struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ + u_short fw_number; /* Rule number */ + u_short fw_flg; /* Flags word */ +#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ + u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ + u_char fw_ip6opt,fw_ip6nopt; /* IPv6 options set/unset */ + u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ +#define IPV6_FW_ICMPTYPES_DIM (32 / (sizeof(unsigned) * 8)) + unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ + long timestamp; /* timestamp (tv_sec) of last match */ + union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ + } fw_un; + u_char fw_prot; /* IPv6 protocol */ + u_char fw_nports; /* N'of src ports and # of dst ports */ + /* in ports array (dst ports follow */ + /* src ports; max of 10 ports in all; */ + /* count of 0 means match all ports) */ +}; + +#define IPV6_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) +#define IPV6_FW_SETNSRCP(rule, n) do { \ + (rule)->fw_nports &= ~0x0f; \ + (rule)->fw_nports |= (n); \ + } while (0) +#define IPV6_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) +#define IPV6_FW_SETNDSTP(rule, n) do { \ + (rule)->fw_nports &= ~0xf0; \ + (rule)->fw_nports |= (n) << 4;\ + } while (0) + +#define fw_divert_port fw_un.fu_divert_port +#define fw_skipto_rule fw_un.fu_skipto_rule +#define fw_reject_code fw_un.fu_reject_code + +struct ip6_fw_chain { + LIST_ENTRY(ip6_fw_chain) chain; + struct ip6_fw *rule; +}; + +/* + * Values for "flags" field . + */ +#define IPV6_FW_F_IN 0x0001 /* Check inbound packets */ +#define IPV6_FW_F_OUT 0x0002 /* Check outbound packets */ +#define IPV6_FW_F_IIFACE 0x0004 /* Apply inbound interface test */ +#define IPV6_FW_F_OIFACE 0x0008 /* Apply outbound interface test */ + +#define IPV6_FW_F_COMMAND 0x0070 /* Mask for type of chain entry: */ +#define IPV6_FW_F_DENY 0x0000 /* This is a deny rule */ +#define IPV6_FW_F_REJECT 0x0010 /* Deny and send a response packet */ +#define IPV6_FW_F_ACCEPT 0x0020 /* This is an accept rule */ +#define IPV6_FW_F_COUNT 0x0030 /* This is a count rule */ +#define IPV6_FW_F_DIVERT 0x0040 /* This is a divert rule */ +#define IPV6_FW_F_TEE 0x0050 /* This is a tee rule */ +#define IPV6_FW_F_SKIPTO 0x0060 /* This is a skipto rule */ + +#define IPV6_FW_F_PRN 0x0080 /* Print if this rule matches */ + +#define IPV6_FW_F_SRNG 0x0100 /* The first two src ports are a min * + * and max range (stored in host byte * + * order). */ + +#define IPV6_FW_F_DRNG 0x0200 /* The first two dst ports are a min * + * and max range (stored in host byte * + * order). */ + +#define IPV6_FW_F_IIFNAME 0x0400 /* In interface by name/unit (not IP) */ +#define IPV6_FW_F_OIFNAME 0x0800 /* Out interface by name/unit (not IP) */ + +#define IPV6_FW_F_INVSRC 0x1000 /* Invert sense of src check */ +#define IPV6_FW_F_INVDST 0x2000 /* Invert sense of dst check */ + +#define IPV6_FW_F_FRAG 0x4000 /* Fragment */ + +#define IPV6_FW_F_ICMPBIT 0x8000 /* ICMP type bitmap is valid */ + +#define IPV6_FW_F_MASK 0xFFFF /* All possible flag bits mask */ + +/* + * For backwards compatibility with rules specifying "via iface" but + * not restricted to only "in" or "out" packets, we define this combination + * of bits to represent this configuration. + */ + +#define IF6_FW_F_VIAHACK (IPV6_FW_F_IN|IPV6_FW_F_OUT|IPV6_FW_F_IIFACE|IPV6_FW_F_OIFACE) + +/* + * Definitions for REJECT response codes. + * Values less than 256 correspond to ICMP unreachable codes. + */ +#define IPV6_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ + +/* + * Definitions for IPv6 option names. + */ +#define IPV6_FW_IP6OPT_HOPOPT 0x01 +#define IPV6_FW_IP6OPT_ROUTE 0x02 +#define IPV6_FW_IP6OPT_FRAG 0x04 +#define IPV6_FW_IP6OPT_ESP 0x08 +#define IPV6_FW_IP6OPT_AH 0x10 +#define IPV6_FW_IP6OPT_NONXT 0x20 +#define IPV6_FW_IP6OPT_OPTS 0x40 + +/* + * Definitions for TCP flags. + */ +#define IPV6_FW_TCPF_FIN TH_FIN +#define IPV6_FW_TCPF_SYN TH_SYN +#define IPV6_FW_TCPF_RST TH_RST +#define IPV6_FW_TCPF_PSH TH_PUSH +#define IPV6_FW_TCPF_ACK TH_ACK +#define IPV6_FW_TCPF_URG TH_URG +#define IPV6_FW_TCPF_ESTAB 0x40 + +/* + * Names for IPV6_FW sysctl objects + */ +#define IP6FWCTL_DEBUG 1 +#define IP6FWCTL_VERBOSE 2 +#define IP6FWCTL_VERBLIMIT 3 +#define IP6FWCTL_MAXID 4 + +#define IP6FWCTL_NAMES { \ + { 0, 0 }, \ + { 0, 0 }, \ + { "debug", CTLTYPE_INT }, \ + { "verbose", CTLTYPE_INT }, \ + { "verbose_limit", CTLTYPE_INT }, \ +} + +#define IP6FWCTL_VARS { \ + 0, \ + 0, \ + &fw6_debug, \ + &fw6_verbose, \ + &fw6_verbose_limit, \ +} + +/* + * Main firewall chains definitions and global var's definitions. + */ +#if KERNEL + +/* + * Function definitions. + */ +void ip6_fw_init(void); + +/* Firewall hooks */ +struct ip6_hdr; +typedef int ip6_fw_chk_t __P((struct ip6_hdr**, struct ifnet*, + u_short *, struct mbuf**)); +typedef int ip6_fw_ctl_t __P((int, struct mbuf**)); +extern ip6_fw_chk_t *ip6_fw_chk_ptr; +extern ip6_fw_ctl_t *ip6_fw_ctl_ptr; + +#endif /* KERNEL */ + +#endif /* _IP6_FW_H */ diff --git a/bsd/netinet6/ip6_input.c b/bsd/netinet6/ip6_input.c new file mode 100644 index 000000000..e933fa07d --- /dev/null +++ b/bsd/netinet6/ip6_input.c @@ -0,0 +1,1937 @@ +/* $KAME: ip6_input.c,v 1.75 2000/03/28 23:11:05 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 + */ +#define _IP_VHL +#ifdef __FreeBSD__ +#include "opt_ip6fw.h" +#endif +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#ifdef __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) +#include +#endif + +#include +#include +#include +#include +#include + +#include +#include +#if INET +#include +#include +#endif /*INET*/ +#include +#include +#include +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) || defined (__APPLE__) +#include +#endif +#if defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) +#include +#endif +#include +#include +#include +#include + +#if MIP6 +#include +#endif + +#if IPV6FIREWALL +#include +#endif + +#include + +/* we need it for NLOOP. */ +#ifndef __bsdi__ +#include "loop.h" +#endif +#include "faith.h" +#include "gif.h" +#include "bpfilter.h" + +#include + +extern struct domain inet6domain; +extern struct ip6protosw inet6sw[]; +#ifdef __bsdi__ +#if _BSDI_VERSION < 199802 +extern struct ifnet loif; +#else +extern struct ifnet *loifp; +#endif +#endif + +struct ip6protosw * ip6_protox[IPPROTO_MAX]; +static int ip6qmaxlen = IFQ_MAXLEN; +struct in6_ifaddr *in6_ifaddr; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 4) +struct ifqueue ip6intrq; +#endif + +#if defined(__NetBSD__) || defined(__OpenBSD__) +extern struct ifnet loif[NLOOP]; +#endif +int ip6_forward_srcrt; /* XXX */ +int ip6_sourcecheck; /* XXX */ +int ip6_sourcecheck_interval; /* XXX */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 4 +const int int6intrq_present = 1; +#endif + +#if IPV6FIREWALL +/* firewall hooks */ +ip6_fw_chk_t *ip6_fw_chk_ptr; +ip6_fw_ctl_t *ip6_fw_ctl_ptr; +#endif + +struct ip6stat ip6stat; + +static void ip6_init2 __P((void *)); + +static int ip6_hopopts_input __P((u_int32_t *, u_int32_t *, struct mbuf **, int *)); +#if PULLDOWN_TEST +static struct mbuf *ip6_pullexthdr __P((struct mbuf *, size_t, int)); +#endif + +#if NATPT +extern int ip6_protocol_tr; + +int natpt_in6 __P((struct mbuf *, struct mbuf **)); +extern void ip_forward __P((struct mbuf *, int)); +#endif + +/* Initialize the PF_INET6 domain, and add in the pre-defined protos */ +void +in6_dinit() +{ register int i; + register struct ip6protosw *pr; + register struct domain *dp; + static inet6domain_initted = 0; + extern int in6_proto_count; + + if (!inet6domain_initted) + { + dp = &inet6domain; + + for (i=0, pr = &inet6sw[0]; ipr_next) { + if(!((unsigned int)pr->pr_domain)) continue; /* If uninitialized, skip */ + if (pr->pr_domain->dom_family == PF_INET6 && + pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { + ip6_protox[pr->pr_protocol] = pr; + } + } + + ip6intrq.ifq_maxlen = ip6qmaxlen; + nd6_init(); + frag6_init(); +#if IPV6FIREWALL + ip6_fw_init(); +#endif + /* + * in many cases, random() here does NOT return random number + * as initialization during bootstrap time occur in fixed order. + */ + microtime(&tv); + ip6_flow_seq = random() ^ tv.tv_usec; + timeout(ip6_init2, (caddr_t)0, 6 * hz); +} + +static void +ip6_init2(dummy) + void *dummy; +{ + int ret; +#if defined(__bsdi__) && _BSDI_VERSION < 199802 + struct ifnet *loifp = &loif; +#endif +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + /* get EUI64 from somewhere */ + ret = in6_ifattach_getifid(NULL); + + /* + * to route local address of p2p link to loopback, + * assign loopback address first. + */ + in6_ifattach(&loif[0], IN6_IFT_LOOP, NULL, 0); + +#if MIP6 + /* Initialize the Mobile IPv6 code */ + mip6_init(); +#endif + +#import +#if NGIF > 0 + gifattach(); +#endif +#import +#if NFAITH > 0 + faithattach(); +#endif + + /* nd6_timer_init */ + timeout(nd6_timer, (caddr_t)0, hz); + /* router renumbering prefix list maintenance */ + timeout(in6_rr_timer, (caddr_t)0, hz); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + +#if __FreeBSD__ +/* cheat */ +SYSINIT(netinet6init2, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ip6_init2, NULL); +#endif + +/* + * IP6 input interrupt handling. Just pass the packet to ip6_input. + */ +void +ip6intr(void) +{ + int s; + struct mbuf *m; + + for (;;) { + s = splimp(); + IF_DEQUEUE(&ip6intrq, m); + splx(s); + if (m == 0) + return; + ip6_input(m); + } +} + +NETISR_SET(NETISR_IPV6, ip6intr); + +extern struct route_in6 ip6_forward_rt; + +void +ip6_input(m) + struct mbuf *m; +{ + struct ip6_hdr *ip6; + int off = sizeof(struct ip6_hdr), nest; + u_int32_t plen; + u_int32_t rtalert = ~0; + int nxt = 0, ours = 0; + struct ifnet *deliverifp = NULL; +#if defined(__bsdi__) && _BSDI_VERSION < 199802 + struct ifnet *loifp = &loif; +#endif + +#if IPSEC + /* + * should the inner packet be considered authentic? + * see comment in ah4_input(). + */ + if (m) { + m->m_flags &= ~M_AUTHIPHDR; + m->m_flags &= ~M_AUTHIPDGM; + } +#endif + + /* + * mbuf statistics by kazu + */ + if (m->m_flags & M_EXT) { + if (m->m_next) + ip6stat.ip6s_mext2m++; + else + ip6stat.ip6s_mext1++; + } else { + if (m->m_next) { + if (m->m_flags & M_LOOP) + ip6stat.ip6s_m2m[loif[0].if_index]++; /*XXX*/ + else if (m->m_pkthdr.rcvif->if_index <= 31) + ip6stat.ip6s_m2m[m->m_pkthdr.rcvif->if_index]++; + else + ip6stat.ip6s_m2m[0]++; + } else + ip6stat.ip6s_m1++; + } + + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_receive); + ip6stat.ip6s_total++; + +#ifndef PULLDOWN_TEST + /* XXX is the line really necessary? */ + IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), /*nothing*/); +#endif + + if (m->m_len < sizeof(struct ip6_hdr)) { + struct ifnet *inifp; + inifp = m->m_pkthdr.rcvif; + if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == 0) { + ip6stat.ip6s_toosmall++; + in6_ifstat_inc(inifp, ifs6_in_hdrerr); + return; + } + } + + ip6 = mtod(m, struct ip6_hdr *); + + if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { + ip6stat.ip6s_badvers++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); + goto bad; + } + + ip6stat.ip6s_nxthist[ip6->ip6_nxt]++; + +#if IPV6FIREWALL + /* + * Check with the firewall... + */ + if (ip6_fw_chk_ptr) { + u_short port = 0; + /* If ipfw says divert, we have to just drop packet */ + /* use port as a dummy argument */ + if ((*ip6_fw_chk_ptr)(&ip6, NULL, &port, &m)) { + m_freem(m); + m = NULL; + } + if (!m) + return; + } +#endif + + /* + * Scope check + */ + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src) || + IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst)) { + ip6stat.ip6s_badscope++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr); + goto bad; + } + + /* + * Don't check IPv4 mapped address here. SIIT assumes that + * routers would forward IPv6 native packets with IPv4 mapped + * address normally. + */ +#if 0 + /* + * Reject packets with IPv4 compatible addresses (auto tunnel). + * + * The code forbids auto tunnel relay case in RFC1933 (the check is + * stronger than RFC1933). We may want to re-enable it if mech-xx + * is revised to forbid relaying case. + */ + if (IN6_IS_ADDR_V4COMPAT(&ip6->ip6_src) || + IN6_IS_ADDR_V4COMPAT(&ip6->ip6_dst)) { + ip6stat.ip6s_badscope++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr); + goto bad; + } +#endif + if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) || + IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst)) { + if (m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) { + ours = 1; + deliverifp = m->m_pkthdr.rcvif; + goto hbhcheck; + } else { + ip6stat.ip6s_badscope++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr); + goto bad; + } + } + + if (m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) { + if (IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst)) { + ours = 1; + deliverifp = m->m_pkthdr.rcvif; + goto hbhcheck; + } + } else { + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) + ip6->ip6_src.s6_addr16[1] + = htons(m->m_pkthdr.rcvif->if_index); + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) + ip6->ip6_dst.s6_addr16[1] + = htons(m->m_pkthdr.rcvif->if_index); + } + + /* + * Multicast check + */ + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { + struct in6_multi *in6m = 0; + + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mcast); + /* + * See if we belong to the destination multicast group on the + * arrival interface. + */ + IN6_LOOKUP_MULTI(ip6->ip6_dst, m->m_pkthdr.rcvif, in6m); + if (in6m) + ours = 1; + else if (!ip6_mrouter) { + ip6stat.ip6s_notmember++; + ip6stat.ip6s_cantforward++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard); + goto bad; + } + deliverifp = m->m_pkthdr.rcvif; + goto hbhcheck; + } + + /* + * Unicast check + */ + if (ip6_forward_rt.ro_rt == 0 || + !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, + &ip6_forward_rt.ro_dst.sin6_addr)) { + if (ip6_forward_rt.ro_rt) { + RTFREE(ip6_forward_rt.ro_rt); + ip6_forward_rt.ro_rt = 0; + } + bzero(&ip6_forward_rt.ro_dst, sizeof(struct sockaddr_in6)); + ip6_forward_rt.ro_dst.sin6_len = sizeof(struct sockaddr_in6); + ip6_forward_rt.ro_dst.sin6_family = AF_INET6; + ip6_forward_rt.ro_dst.sin6_addr = ip6->ip6_dst; + +#if __FreeBSD__ || defined(__APPLE__) + rtalloc_ign((struct route *)&ip6_forward_rt, RTF_PRCLONING); +#else + rtalloc((struct route *)&ip6_forward_rt); +#endif + } + +#define rt6_key(r) ((struct sockaddr_in6 *)((r)->rt_nodes->rn_key)) + + /* + * Accept the packet if the forwarding interface to the destination + * according to the routing table is the loopback interface, + * unless the associated route has a gateway. + * Note that this approach causes to accept a packet if there is a + * route to the loopback interface for the destination of the packet. + * But we think it's even useful in some situations, e.g. when using + * a special daemon which wants to intercept the packet. + */ + if (ip6_forward_rt.ro_rt && + (ip6_forward_rt.ro_rt->rt_flags & + (RTF_HOST|RTF_GATEWAY)) == RTF_HOST && +#if 0 + /* + * The check below is redundant since the comparison of + * the destination and the key of the rtentry has + * already done through looking up the routing table. + */ + IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, + &rt6_key(ip6_forward_rt.ro_rt)->sin6_addr) && +#endif + ip6_forward_rt.ro_rt->rt_ifp->if_type == IFT_LOOP) { + struct in6_ifaddr *ia6 = + (struct in6_ifaddr *)ip6_forward_rt.ro_rt->rt_ifa; + /* packet to tentative address must not be received */ + if (ia6->ia6_flags & IN6_IFF_ANYCAST) + m->m_flags |= M_ANYCAST6; + if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) { + /* this interface is ready */ + ours = 1; + deliverifp = ia6->ia_ifp; /* correct? */ + goto hbhcheck; + } else { + /* this interface is not ready, fall through */ + } + } + + /* + * FAITH(Firewall Aided Internet Translator) + */ +#if defined(NFAITH) && 0 < NFAITH + if (ip6_keepfaith) { + if (ip6_forward_rt.ro_rt && ip6_forward_rt.ro_rt->rt_ifp + && ip6_forward_rt.ro_rt->rt_ifp->if_type == IFT_FAITH) { + /* XXX do we need more sanity checks? */ + ours = 1; + deliverifp = ip6_forward_rt.ro_rt->rt_ifp; /*faith*/ + goto hbhcheck; + } + } +#endif + +#if NATPT + /* + * NAT-PT (Network Address Translation - Protocol Translation) + */ + if (ip6_protocol_tr) + { + struct mbuf *m1 = NULL; + + switch (natpt_in6(m, &m1)) + { + case IPPROTO_IP: goto processpacket; + case IPPROTO_IPV4: ip_forward(m1, 0); break; + case IPPROTO_IPV6: ip6_forward(m1, 0); break; + case IPPROTO_MAX: /* discard this packet */ + default: break; + + case IPPROTO_DONE: /* discard without free */ + return; + } + + if (m != m1) + m_freem(m); + + return; + } + + processpacket: +#endif + +#if 0 + { + /* + * Last resort: check in6_ifaddr for incoming interface. + * The code is here until I update the "goto ours hack" code above + * working right. + */ + struct ifaddr *ifa; + for (ifa = m->m_pkthdr.rcvif->if_addrlist.tqh_first; + ifa; + ifa = ifa->ifa_list.tqe_next) { + if (ifa->ifa_addr == NULL) + continue; /* just for safety */ + if (ifa->ifa_addr->sa_family != AF_INET6) + continue; + if (IN6_ARE_ADDR_EQUAL(IFA_IN6(ifa), &ip6->ip6_dst)) { + ours = 1; + deliverifp = ifa->ifa_ifp; + goto hbhcheck; + } + } + } +#endif + + /* + * Now there is no reason to process the packet if it's not our own + * and we're not a router. + */ + if (!ip6_forwarding) { + ip6stat.ip6s_cantforward++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard); + goto bad; + } + + hbhcheck: + /* + * Process Hop-by-Hop options header if it's contained. + * m may be modified in ip6_hopopts_input(). + * If a JumboPayload option is included, plen will also be modified. + */ + plen = (u_int32_t)ntohs(ip6->ip6_plen); + if (ip6->ip6_nxt == IPPROTO_HOPOPTS) { + struct ip6_hbh *hbh; + + if (ip6_hopopts_input(&plen, &rtalert, &m, &off)) { +#if 0 /*touches NULL pointer*/ + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard); +#endif + return; /* m have already been freed */ + } + /* adjust pointer */ + ip6 = mtod(m, struct ip6_hdr *); +#ifndef PULLDOWN_TEST + /* ip6_hopopts_input() ensures that mbuf is contiguous */ + hbh = (struct ip6_hbh *)(ip6 + 1); +#else + IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr), + sizeof(struct ip6_hbh)); + if (hbh == NULL) { + ip6stat.ip6s_tooshort++; + return; + } +#endif + nxt = hbh->ip6h_nxt; + + /* + * accept the packet if a router alert option is included + * and we act as an IPv6 router. + */ + if (rtalert != ~0 && ip6_forwarding) + ours = 1; + } else + nxt = ip6->ip6_nxt; + + /* + * Check that the amount of data in the buffers + * is as at least much as the IPv6 header would have us expect. + * Trim mbufs if longer than we expect. + * Drop packet if shorter than we expect. + */ + if (m->m_pkthdr.len - sizeof(struct ip6_hdr) < plen) { + ip6stat.ip6s_tooshort++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); + goto bad; + } + if (m->m_pkthdr.len > sizeof(struct ip6_hdr) + plen) { + if (m->m_len == m->m_pkthdr.len) { + m->m_len = sizeof(struct ip6_hdr) + plen; + m->m_pkthdr.len = sizeof(struct ip6_hdr) + plen; + } else + m_adj(m, sizeof(struct ip6_hdr) + plen - m->m_pkthdr.len); + } + + /* + * Forward if desirable. + */ + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { + /* + * If we are acting as a multicast router, all + * incoming multicast packets are passed to the + * kernel-level multicast forwarding function. + * The packet is returned (relatively) intact; if + * ip6_mforward() returns a non-zero value, the packet + * must be discarded, else it may be accepted below. + */ + if (ip6_mrouter && ip6_mforward(ip6, m->m_pkthdr.rcvif, m)) { + ip6stat.ip6s_cantforward++; + m_freem(m); + return; + } + if (!ours) { + m_freem(m); + return; + } + } else if (!ours) { + ip6_forward(m, 0); + return; + } + + ip6 = mtod(m, struct ip6_hdr *); + + /* + * Malicious party may be able to use IPv4 mapped addr to confuse + * tcp/udp stack and bypass security checks (act as if it was from + * 127.0.0.1 by using IPv6 src ::ffff:127.0.0.1). Be cautious. + * + * For SIIT end node behavior, you may want to disable the check. + * However, you will become vulnerable to attacks using IPv4 mapped + * source. + */ + if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) || + IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) { + ip6stat.ip6s_badscope++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr); + goto bad; + } + + /* + * Tell launch routine the next header + */ +#if defined(__NetBSD__) && defined(IFA_STATS) + if (IFA_STATS && deliverifp != NULL) { + struct in6_ifaddr *ia6; + ia6 = in6_ifawithifp(deliverifp, &ip6->ip6_dst); + if (ia6) + ia6->ia_ifa.ifa_data.ifad_inbytes += m->m_pkthdr.len; + } +#endif + ip6stat.ip6s_delivered++; + in6_ifstat_inc(deliverifp, ifs6_in_deliver); + nest = 0; + +#if MIP6 + /* + * Mobile IPv6 + * + * Assume that the received packet shall be processed by MIPv6 when + * the destination header has been taken care of. Because of this, + * some flags have to be reset for later evaluation. + */ + if (mip6_new_packet_hook) + (*mip6_new_packet_hook)(m); +#endif /* MIP6 */ + + while (nxt != IPPROTO_DONE) { + if (ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) { + ip6stat.ip6s_toomanyhdr++; + goto bad; + } + + /* + * protection against faulty packet - there should be + * more sanity checks in header chain processing. + */ + if (m->m_pkthdr.len == 0 || m->m_pkthdr.len < off) { + ip6stat.ip6s_tooshort++; + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); + goto bad; + } + +#if MIP6 + if ((nxt != IPPROTO_HOPOPTS) && (nxt != IPPROTO_DSTOPTS) && + (nxt != IPPROTO_ROUTING) && (nxt != IPPROTO_FRAGMENT) && + (nxt != IPPROTO_ESP) && (nxt != IPPROTO_AH)) { + if (mip6_route_optimize_hook) + (*mip6_route_optimize_hook)(m); + } +#endif + nxt = (*ip6_protox[nxt]->pr_input)(&m, &off, nxt); + } + return; + bad: + m_freem(m); +} + +/* + * Hop-by-Hop options header processing. If a valid jumbo payload option is + * included, the real payload length will be stored in plenp. + */ +static int +ip6_hopopts_input(plenp, rtalertp, mp, offp) + u_int32_t *plenp; + u_int32_t *rtalertp; /* XXX: should be stored more smart way */ + struct mbuf **mp; + int *offp; +{ + register struct mbuf *m = *mp; + int off = *offp, hbhlen; + struct ip6_hbh *hbh; + u_int8_t *opt; + + /* validation of the length of the header */ +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(*hbh), -1); + hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off); + hbhlen = (hbh->ip6h_len + 1) << 3; + + IP6_EXTHDR_CHECK(m, off, hbhlen, -1); + hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, + sizeof(struct ip6_hdr), sizeof(struct ip6_hbh)); + if (hbh == NULL) { + ip6stat.ip6s_tooshort++; + return -1; + } + hbhlen = (hbh->ip6h_len + 1) << 3; + IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr), + hbhlen); + if (hbh == NULL) { + ip6stat.ip6s_tooshort++; + return -1; + } +#endif + off += hbhlen; + hbhlen -= sizeof(struct ip6_hbh); + opt = (u_int8_t *)hbh + sizeof(struct ip6_hbh); + + if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof(struct ip6_hbh), + hbhlen, rtalertp, plenp) < 0) + return(-1); + + *offp = off; + *mp = m; + return(0); +} + +/* + * Search header for all Hop-by-hop options and process each option. + * This function is separate from ip6_hopopts_input() in order to + * handle a case where the sending node itself process its hop-by-hop + * options header. In such a case, the function is called from ip6_output(). + */ +int +ip6_process_hopopts(m, opthead, hbhlen, rtalertp, plenp) + struct mbuf *m; + u_int8_t *opthead; + int hbhlen; + u_int32_t *rtalertp; + u_int32_t *plenp; +{ + struct ip6_hdr *ip6; + int optlen = 0; + u_int8_t *opt = opthead; + u_int16_t rtalert_val; + + for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) { + switch(*opt) { + case IP6OPT_PAD1: + optlen = 1; + break; + case IP6OPT_PADN: + if (hbhlen < IP6OPT_MINLEN) { + ip6stat.ip6s_toosmall++; + goto bad; + } + optlen = *(opt + 1) + 2; + break; + case IP6OPT_RTALERT: + /* XXX may need check for alignment */ + if (hbhlen < IP6OPT_RTALERT_LEN) { + ip6stat.ip6s_toosmall++; + goto bad; + } + if (*(opt + 1) != IP6OPT_RTALERT_LEN - 2) + /* XXX: should we discard the packet? */ + log(LOG_ERR, "length of router alert opt is inconsitent(%d)", + *(opt + 1)); + optlen = IP6OPT_RTALERT_LEN; + bcopy((caddr_t)(opt + 2), (caddr_t)&rtalert_val, 2); + *rtalertp = ntohs(rtalert_val); + break; + case IP6OPT_JUMBO: + /* XXX may need check for alignment */ + if (hbhlen < IP6OPT_JUMBO_LEN) { + ip6stat.ip6s_toosmall++; + goto bad; + } + if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) + /* XXX: should we discard the packet? */ + log(LOG_ERR, "length of jumbopayload opt " + "is inconsistent(%d)", + *(opt + 1)); + optlen = IP6OPT_JUMBO_LEN; + + /* + * We can simply cast because of the alignment + * requirement of the jumbo payload option. + */ +#if 0 + *plenp = ntohl(*(u_int32_t *)(opt + 2)); +#else + bcopy(opt + 2, plenp, sizeof(*plenp)); + *plenp = htonl(*plenp); +#endif + if (*plenp <= IPV6_MAXPACKET) { + /* + * jumbo payload length must be larger + * than 65535 + */ + ip6stat.ip6s_badoptions++; + icmp6_error(m, ICMP6_PARAM_PROB, + ICMP6_PARAMPROB_HEADER, + sizeof(struct ip6_hdr) + + sizeof(struct ip6_hbh) + + opt + 2 - opthead); + return(-1); + } + + ip6 = mtod(m, struct ip6_hdr *); + if (ip6->ip6_plen) { + /* + * IPv6 packets that have non 0 payload length + * must not contain a jumbo paylod option. + */ + ip6stat.ip6s_badoptions++; + icmp6_error(m, ICMP6_PARAM_PROB, + ICMP6_PARAMPROB_HEADER, + sizeof(struct ip6_hdr) + + sizeof(struct ip6_hbh) + + opt - opthead); + return(-1); + } + break; + default: /* unknown option */ + if (hbhlen < IP6OPT_MINLEN) { + ip6stat.ip6s_toosmall++; + goto bad; + } + if ((optlen = ip6_unknown_opt(opt, m, + sizeof(struct ip6_hdr) + + sizeof(struct ip6_hbh) + + opt - opthead)) == -1) + return(-1); + optlen += 2; + break; + } + } + + return(0); + + bad: + m_freem(m); + return(-1); +} + +/* + * Unknown option processing. + * The third argument `off' is the offset from the IPv6 header to the option, + * which is necessary if the IPv6 header the and option header and IPv6 header + * is not continuous in order to return an ICMPv6 error. + */ +int +ip6_unknown_opt(optp, m, off) + u_int8_t *optp; + struct mbuf *m; + int off; +{ + struct ip6_hdr *ip6; + + switch(IP6OPT_TYPE(*optp)) { + case IP6OPT_TYPE_SKIP: /* ignore the option */ + return((int)*(optp + 1)); + case IP6OPT_TYPE_DISCARD: /* silently discard */ + m_freem(m); + return(-1); + case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */ + ip6stat.ip6s_badoptions++; + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off); + return(-1); + case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */ + ip6stat.ip6s_badoptions++; + ip6 = mtod(m, struct ip6_hdr *); + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || + (m->m_flags & (M_BCAST|M_MCAST))) + m_freem(m); + else + icmp6_error(m, ICMP6_PARAM_PROB, + ICMP6_PARAMPROB_OPTION, off); + return(-1); + } + + m_freem(m); /* XXX: NOTREACHED */ + return(-1); +} + +/* + * Create the "control" list for this pcb. + * The function will not modify mbuf chain at all. + * + * with KAME mbuf chain restriction: + * The routine will be called from upper layer handlers like tcp6_input(). + * Thus the routine assumes that the caller (tcp6_input) have already + * called IP6_EXTHDR_CHECK() and all the extension headers are located in the + * very first mbuf on the mbuf chain. + */ +void +ip6_savecontrol(in6p, ip6, m, ctl, prevctlp) +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined(__APPLE__) + register struct inpcb *in6p; +#else + register struct in6pcb *in6p; +#endif + register struct ip6_hdr *ip6; + register struct mbuf *m; + struct ip6_recvpktopts *ctl, **prevctlp; +{ + register struct mbuf **mp; + struct cmsghdr *cm = NULL; + struct ip6_recvpktopts *prevctl = NULL; +#if HAVE_NRL_INPCB +# define in6p_flags inp_flags +#endif +#if defined(__NetBSD__) || (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + struct proc *p = current_proc(); /* XXX */ +#endif +#ifdef __bsdi__ +# define sbcreatecontrol so_cmsg +#endif + int privileged = 0; + + + if (ctl == NULL) /* validity check */ + return; + bzero(ctl, sizeof(*ctl)); /* XXX is it really OK? */ + mp = &ctl->head; + + /* + * If caller wanted to keep history, allocate space to store the + * history at the first time. + */ + if (prevctlp) { + if (*prevctlp == NULL) { + MALLOC(prevctl, struct ip6_recvpktopts *, + sizeof(*prevctl), M_IP6OPT, M_NOWAIT); + if (prevctl == NULL) { + printf("ip6_savecontrol: can't allocate " + " enough space for history\n"); + return; + } + bzero(prevctl, sizeof(*prevctl)); + *prevctlp = prevctl; + } + else + prevctl = *prevctlp; + } + +#if defined(__NetBSD__) || (defined(__FreeBSD__) && __FreeBSD__ == 3) + if (p && !suser(p->p_ucred, &p->p_acflag)) + privileged++; +#elif defined(__FreeBSD__) && __FreeBSD__ >= 4 + if (p && !suser(p)) + privileged++; +#else +#if HAVE_NRL_INPCB + if ((in6p->inp_socket->so_state & SS_PRIV) != 0) + privileged++; +#else + if ((in6p->in6p_socket->so_state & SS_PRIV) != 0) + privileged++; +#endif +#endif + +#if SO_TIMESTAMP + if (in6p->in6p_socket->so_options & SO_TIMESTAMP) { + struct timeval tv; + + microtime(&tv); + *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv), + SCM_TIMESTAMP, SOL_SOCKET); + if (*mp) { + /* always set regradless of the previous value */ + ctl->timestamp = *mp; + mp = &(*mp)->m_next; + } + } +#endif + + /* RFC 2292 sec. 5 */ + if (in6p->in6p_flags & IN6P_PKTINFO) { + struct in6_pktinfo pi6, *prevpi = NULL; + bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof(struct in6_addr)); + if (IN6_IS_SCOPE_LINKLOCAL(&pi6.ipi6_addr)) + pi6.ipi6_addr.s6_addr16[1] = 0; + pi6.ipi6_ifindex = (m && m->m_pkthdr.rcvif) + ? m->m_pkthdr.rcvif->if_index + : 0; + if (prevctl && prevctl->pktinfo) { + cm = mtod(prevctl->pktinfo, struct cmsghdr *); + prevpi = (struct in6_pktinfo *)CMSG_DATA(cm); + } + + /* + * Make a new option only if this is the first time or if the + * option value is chaned from last time. + */ + if (prevpi == NULL || bcmp(prevpi, &pi6, sizeof(pi6))) { + *mp = sbcreatecontrol((caddr_t) &pi6, + sizeof(struct in6_pktinfo), + IPV6_PKTINFO, + IPPROTO_IPV6); + if (*mp) { + ctl->pktinfo = *mp; + mp = &(*mp)->m_next; + } + } + } + + if (in6p->in6p_flags & IN6P_HOPLIMIT) { + int hlim = ip6->ip6_hlim & 0xff, oldhlim = -1; + + if (prevctl && prevctl->hlim) { + cm = mtod(prevctl->hlim, struct cmsghdr *); + oldhlim = (*(int *)CMSG_DATA(cm)) & 0xff; + } + + if (oldhlim < 0 || hlim != oldhlim) { + *mp = sbcreatecontrol((caddr_t) &hlim, + sizeof(int), IPV6_HOPLIMIT, + IPPROTO_IPV6); + if (*mp) { + ctl->hlim = *mp; + mp = &(*mp)->m_next; + } + } + } + + /* + * IPV6_HOPOPTS socket option. We require super-user privilege + * for the option, but it might be too strict, since there might + * be some hop-by-hop options which can be returned to normal user. + * See RFC 2292 section 6. + */ + if ((in6p->in6p_flags & IN6P_HOPOPTS) && privileged) { + /* + * Check if a hop-by-hop options header is contatined in the + * received packet, and if so, store the options as ancillary + * data. Note that a hop-by-hop options header must be + * just after the IPv6 header, which fact is assured through + * the IPv6 input processing. + */ + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + if (ip6->ip6_nxt == IPPROTO_HOPOPTS) { + struct ip6_hbh *hbh, *prevhbh = NULL; + int hbhlen = 0, prevhbhlen = 0; +#ifdef PULLDOWN_TEST + struct mbuf *ext; +#endif + +#ifndef PULLDOWN_TEST + hbh = (struct ip6_hbh *)(ip6 + 1); + hbhlen = (hbh->ip6h_len + 1) << 3; +#else + ext = ip6_pullexthdr(m, sizeof(struct ip6_hdr), + ip6->ip6_nxt); + if (ext == NULL) { + ip6stat.ip6s_tooshort++; + return; + } + hbh = mtod(ext, struct ip6_hbh *); + hbhlen = (hbh->ip6h_len + 1) << 3; + if (hbhlen != ext->m_len) { + m_freem(ext); + ip6stat.ip6s_tooshort++; + return; + } +#endif + + if (prevctl && prevctl->hbh) { + cm = mtod(prevctl->hbh, struct cmsghdr *); + prevhbh = (struct ip6_hbh *)CMSG_DATA(cm); + prevhbhlen = (prevhbh->ip6h_len + 1) << 3; + } + /* + * Check if there's difference between the current + * and previous HbH headers. + * XXX: should the next header field be ignored? + */ + if (prevhbh == NULL || hbhlen != prevhbhlen || + bcmp(prevhbh, hbh, hbhlen)) { + /* + * XXX: We copy whole the header even if a + * jumbo payload option is included, which + * option is to be removed before returning + * in the RFC 2292. + * Note: this constraint is removed in + * 2292bis. + */ + *mp = sbcreatecontrol((caddr_t)hbh, hbhlen, + IPV6_HOPOPTS, + IPPROTO_IPV6); + if (*mp) { + ctl->hbh = *mp; + mp = &(*mp)->m_next; + } + } +#ifdef PULLDOWN_TEST + m_freem(ext); +#endif + } + } + + /* IPV6_DSTOPTS and IPV6_RTHDR socket options */ + if (in6p->in6p_flags & (IN6P_DSTOPTS | IN6P_RTHDR)) { + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + int nxt = ip6->ip6_nxt, off = sizeof(struct ip6_hdr); + int rthdr = 0; /* flag if we've passed a routing header */ + + /* + * Search for destination options headers or routing + * header(s) through the header chain, and stores each + * header as ancillary data. + * Note that the order of the headers remains in + * the chain of ancillary data. + */ + while (1) { /* is explicit loop prevention necessary? */ + struct ip6_ext *ip6e = NULL; + int elen; +#ifdef PULLDOWN_TEST + struct mbuf *ext = NULL; +#endif + + /* + * if it is not an extension header, don't try to + * pull it from the chain. + */ + switch (nxt) { + case IPPROTO_DSTOPTS: + case IPPROTO_ROUTING: + case IPPROTO_HOPOPTS: + case IPPROTO_AH: /* is it possible? */ + break; + default: + goto loopend; + } + +#ifndef PULLDOWN_TEST + if (off + sizeof(*ip6e) > m->m_len) + goto loopend; + ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + off); + if (nxt == IPPROTO_AH) + elen = (ip6e->ip6e_len + 2) << 2; + else + elen = (ip6e->ip6e_len + 1) << 3; + if (off + elen > m->m_len) + goto loopend; +#else + ext = ip6_pullexthdr(m, off, nxt); + if (ext == NULL) { + ip6stat.ip6s_tooshort++; + return; + } + ip6e = mtod(ext, struct ip6_ext *); + if (nxt == IPPROTO_AH) + elen = (ip6e->ip6e_len + 2) << 2; + else + elen = (ip6e->ip6e_len + 1) << 3; + if (elen != ext->m_len) { + m_freem(ext); + ip6stat.ip6s_tooshort++; + return; + } +#endif + + switch (nxt) { + case IPPROTO_DSTOPTS: + { + struct ip6_dest *prevdest1 = NULL, + *prevdest2 = NULL; + int prevdestlen; + + if ((in6p->in6p_flags & + (IN6P_DSTOPTS | IN6P_RTHDRDSTOPTS)) == 0) + break; + + /* + * We also require super-user privilege for + * the option. + * See the comments on IN6_HOPOPTS. + */ + if (!privileged) + break; + + /* + * Save a dst opt header before a routing + * header if the user wanted. + */ + if (rthdr == 0 && + (in6p->in6p_flags & IN6P_RTHDRDSTOPTS)) { + if (prevctl && prevctl->dest1) { + cm = mtod(prevctl->dest1, + struct cmsghdr *); + prevdest1 = (struct ip6_dest *)CMSG_DATA(cm); + prevdestlen = (prevdest1->ip6d_len + 1) << 3; + } + + /* + * If this is the 1st dst opt header + * (that is placed before rthdr) + * we enconter and this header is + * not different from the previous one, + * simply ignore the header. + */ + if (ctl->dest1 == NULL && + (prevdest1 && + prevdestlen == elen && + bcmp(ip6e, prevdest1, elen) == 0)) + break; + + *mp = sbcreatecontrol((caddr_t)ip6e, + elen, + IPV6_RTHDRDSTOPTS, + IPPROTO_IPV6); + if (ctl->dest1 == NULL) + ctl->dest1 = *mp; + if (*mp) + mp = &(*mp)->m_next; + } + /* + * Save a dst opt header after a routing + * header if the user wanted. + */ + if (rthdr && + (in6p->in6p_flags & IN6P_DSTOPTS)) { + if (prevctl && prevctl->dest2) { + cm = mtod(prevctl->dest2, + struct cmsghdr *); + prevdest2 = (struct ip6_dest *)CMSG_DATA(cm); + prevdestlen = (prevdest2->ip6d_len + 1) << 3; + } + /* see the above comment */ + if (ctl->dest2 == NULL && + (prevdest2 && + prevdestlen == elen && + bcmp(ip6e, prevdest2, elen) == 0)) + break; + + *mp = sbcreatecontrol((caddr_t)ip6e, + elen, + IPV6_DSTOPTS, + IPPROTO_IPV6); + if (ctl->dest2 == NULL) + ctl->dest2 = *mp; + + if (*mp) + mp = &(*mp)->m_next; + } + break; + } + case IPPROTO_ROUTING: + { + struct ip6_rthdr *prevrth = NULL; + int prevrhlen = 0; + + rthdr++; + if (!in6p->in6p_flags & IN6P_RTHDR) + break; + + if (prevctl && prevctl->rthdr) { + cm = mtod(prevctl->rthdr, + struct cmsghdr *); + prevrth = (struct ip6_rthdr *)CMSG_DATA(cm); + prevrhlen = + (prevrth->ip6r_len + 1) << 3; + } + + /* + * Check if the rthdr should be passed to + * a user. See the comments for dstopt hdr. + */ + if (ctl->rthdr == NULL && prevrth && + prevrhlen == elen && + bcmp(ip6e, prevrth, elen) == 0) + break; + + *mp = sbcreatecontrol((caddr_t)ip6e, elen, + IPV6_RTHDR, + IPPROTO_IPV6); + if (ctl->rthdr == NULL) + ctl->rthdr = *mp; + if (*mp) + mp = &(*mp)->m_next; + break; + } + case IPPROTO_HOPOPTS: + case IPPROTO_AH: /* is it possible? */ + break; + + default: + /* + * other cases have been filtered in the above. + * none will visit this case. here we supply + * the code just in case (nxt overwritten or + * other cases). + */ +#ifdef PULLDOWN_TEST + m_freem(ext); +#endif + goto loopend; + + } + + /* proceed with the next header. */ + off += elen; + nxt = ip6e->ip6e_nxt; + ip6e = NULL; +#ifdef PULLDOWN_TEST + m_freem(ext); + ext = NULL; +#endif + } + loopend: + } + +#ifdef __bsdi__ +# undef sbcreatecontrol +#endif +#ifdef __OpenBSD__ +# undef in6p_flags +#endif +} + +#ifdef PULLDOWN_TEST +/* + * pull single extension header from mbuf chain. returns single mbuf that + * contains the result, or NULL on error. + */ +static struct mbuf * +ip6_pullexthdr(m, off, nxt) + struct mbuf *m; + size_t off; + int nxt; +{ + struct ip6_ext ip6e; + size_t elen; + struct mbuf *n; + +#ifdef DIAGNOSTIC + switch (nxt) { + case IPPROTO_DSTOPTS: + case IPPROTO_ROUTING: + case IPPROTO_HOPOPTS: + case IPPROTO_AH: /* is it possible? */ + break; + default: + printf("ip6_pullexthdr: invalid nxt=%d\n", nxt); + } +#endif + + m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e); + if (nxt == IPPROTO_AH) + elen = (ip6e.ip6e_len + 2) << 2; + else + elen = (ip6e.ip6e_len + 1) << 3; + + MGET(n, M_DONTWAIT, MT_DATA); + if (n && elen >= MLEN) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + n = NULL; + } + } + if (!n) + return NULL; + + n->m_len = 0; + if (elen >= M_TRAILINGSPACE(n)) { + m_free(n); + return NULL; + } + + m_copydata(m, off, elen, mtod(n, caddr_t)); + n->m_len = elen; + return n; +} +#endif + +/* + * Merge new IPv6 received options to previous ones. + * If a new option is not given, just re-link the option chain. + * If an old option exists but a corresponding new one doesn't, just + * keep the ole option. + * If a new option exists but a corresponding old one doesn't, just + * copy the new option. + * If both new and old options exist, free old one and overwrite the option + * with the new option. + * Otherwise, do nothing for the option. + * XXX: in any case, options that don't follow the recommend order and + * number of extension headers (RFC 2460 Section 4.1) are simply ignored. + * XXX: We assume that each option is stored in a single mbuf. + */ +#define CLEAN_RECVOPT(old, type) \ +do { \ + if ((old)->type && (old)->type->m_next) { \ + (old)->type->m_next = NULL; \ + } \ +} while (0) +#define MERGE_RECVOPT(new, old, type) if ((new)->type) {\ + if ((old)->type)\ + m_free((old)->type);\ + (old)->type = m_copy((new)->type, 0, (new)->type->m_len);\ + if (((old)->type) && ((old)->type->m_next)) {\ + m_freem((old)->type);\ + old->type = NULL;\ + }\ + } +#define LINK_RECVOPTS(opt, type, p) if ((opt)->type) {\ + *(p) = (opt)->type;\ + (p) = &(opt)->type->m_next;\ + } + +static void dump_inputopts __P((char *, struct ip6_recvpktopts *)); +static void +dump_inputopts(str, p) + char *str; + struct ip6_recvpktopts *p; +{ +#if 1 + return; +#else +#define PRINT1(p, name) \ +do { \ + if (p->name) { \ + printf(" %s: %p", #name, (p)->name); \ + if (p->name->m_next) \ + printf("[%p]", (p)->name->m_next); \ + } \ +} while (0) + + printf("%s p=%p head=%p", str, p, p->head); + PRINT1(p, hlim); + PRINT1(p, pktinfo); + PRINT1(p, hbh); + PRINT1(p, dest1); + PRINT1(p, dest2); + PRINT1(p, rthdr); + printf("\n"); +#undef PRINT1 +#endif +} + +void +ip6_update_recvpcbopt(old, new) + struct ip6_recvpktopts *new, *old; +{ + struct mbuf **mp; + + if (old == NULL) { + printf("ip6_update_recvpcbopt: invalid arguments\n"); + return; + } + + dump_inputopts("old before", old); + if (new) + dump_inputopts("new before", new); + +#if 0 + /* + * cleanup m->m_next linkage. note that we do it in reverse order + * to prevent possible memory leakage. + */ + old->head = NULL; + CLEAN_RECVOPT(old, rthdr); + CLEAN_RECVOPT(old, dest2); + CLEAN_RECVOPT(old, dest1); + CLEAN_RECVOPT(old, hbh); + CLEAN_RECVOPT(old, pktinfo); + CLEAN_RECVOPT(old, hlim); +#endif + + if (new) { + MERGE_RECVOPT(new, old, hlim); + MERGE_RECVOPT(new, old, pktinfo); + MERGE_RECVOPT(new, old, hbh); + MERGE_RECVOPT(new, old, dest1); + MERGE_RECVOPT(new, old, dest2); + MERGE_RECVOPT(new, old, rthdr); + } + + dump_inputopts("old middle", old); + if (new) + dump_inputopts("new middle", new); + + /* link options */ + mp = &old->head; + LINK_RECVOPTS(old, hlim, mp); + LINK_RECVOPTS(old, pktinfo, mp); + LINK_RECVOPTS(old, hbh, mp); + LINK_RECVOPTS(old, dest1, mp); + LINK_RECVOPTS(old, dest2, mp); + LINK_RECVOPTS(old, rthdr, mp); + *mp = NULL; + + dump_inputopts("old after", old); + if (new) + dump_inputopts("new after", new); +} + +#undef MERGE_RECVOPT +#undef LINK_RECVOPTS + +void +ip6_reset_rcvopt(opts, optname) + struct ip6_recvpktopts *opts; + int optname; +{ + if (opts == NULL) + return; + + switch(optname) { + case IPV6_RECVPKTINFO: + if (opts->pktinfo) m_free(opts->pktinfo); + opts->pktinfo = NULL; + break; + case IPV6_RECVHOPLIMIT: + if (opts->hlim) m_free(opts->hlim); + opts->hlim = NULL; + break; + case IPV6_RECVHOPOPTS: + if (opts->hbh) m_free(opts->hbh); + opts->hbh = NULL; + break; + case IPV6_RECVRTHDRDSTOPTS: + if (opts->dest1) m_free(opts->dest1); + opts->dest1 = NULL; + break; + case IPV6_RECVDSTOPTS: + if (opts->dest2) m_free(opts->dest2); + opts->dest2 = NULL; + break; + case IPV6_RECVRTHDR: + if (opts->rthdr) m_free(opts->rthdr); + opts->rthdr = NULL; + break; + default: + printf("ip6_reset_rcvopt: invalid option name (%d)\n", + optname); + return; + } + + ip6_update_recvpcbopt(opts, NULL); /* re-link the option chain */ +} + +/* + * Get pointer to the previous header followed by the header + * currently processed. + * XXX: This function supposes that + * M includes all headers, + * the next header field and the header length field of each header + * are valid, and + * the sum of each header length equals to OFF. + * Because of these assumptions, this function must be called very + * carefully. Moreover, it will not be used in the near future when + * we develop `neater' mechanism to process extension headers. + */ +char * +ip6_get_prevhdr(m, off) + struct mbuf *m; + int off; +{ + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + + if (off == sizeof(struct ip6_hdr)) + return(&ip6->ip6_nxt); + else { + int len, nxt; + struct ip6_ext *ip6e = NULL; + + nxt = ip6->ip6_nxt; + len = sizeof(struct ip6_hdr); + while (len < off) { + ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + len); + + switch(nxt) { + case IPPROTO_FRAGMENT: + len += sizeof(struct ip6_frag); + break; + case IPPROTO_AH: + len += (ip6e->ip6e_len + 2) << 2; + break; + default: + len += (ip6e->ip6e_len + 1) << 3; + break; + } + nxt = ip6e->ip6e_nxt; + } + if (ip6e) + return(&ip6e->ip6e_nxt); + else + return NULL; + } +} + +/* + * get next header offset. m will be retained. + */ +int +ip6_nexthdr(m, off, proto, nxtp) + struct mbuf *m; + int off; + int proto; + int *nxtp; +{ + struct ip6_hdr ip6; + struct ip6_ext ip6e; + struct ip6_frag fh; + + /* just in case */ + if (m == NULL) + panic("ip6_nexthdr: m == NULL"); + if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off) + return -1; + + switch (proto) { + case IPPROTO_IPV6: + if (m->m_pkthdr.len < off + sizeof(ip6)) + return -1; + m_copydata(m, off, sizeof(ip6), (caddr_t)&ip6); + if (nxtp) + *nxtp = ip6.ip6_nxt; + off += sizeof(ip6); + return off; + + case IPPROTO_FRAGMENT: + /* + * terminate parsing if it is not the first fragment, + * it does not make sense to parse through it. + */ + if (m->m_pkthdr.len < off + sizeof(fh)) + return -1; + m_copydata(m, off, sizeof(fh), (caddr_t)&fh); + if ((ntohs(fh.ip6f_offlg) & IP6F_OFF_MASK) != 0) + return -1; + if (nxtp) + *nxtp = fh.ip6f_nxt; + off += sizeof(struct ip6_frag); + return off; + + case IPPROTO_AH: + if (m->m_pkthdr.len < off + sizeof(ip6e)) + return -1; + m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e); + if (nxtp) + *nxtp = ip6e.ip6e_nxt; + off += (ip6e.ip6e_len + 2) << 2; + return off; + + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: + if (m->m_pkthdr.len < off + sizeof(ip6e)) + return -1; + m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e); + if (nxtp) + *nxtp = ip6e.ip6e_nxt; + off += (ip6e.ip6e_len + 1) << 3; + return off; + + case IPPROTO_NONE: + case IPPROTO_ESP: + case IPPROTO_IPCOMP: + /* give up */ + return -1; + + default: + return -1; + } + + return -1; +} + +/* + * get offset for the last header in the chain. m will be kept untainted. + */ +int +ip6_lasthdr(m, off, proto, nxtp) + struct mbuf *m; + int off; + int proto; + int *nxtp; +{ + int newoff; + int nxt; + + if (!nxtp) { + nxt = -1; + nxtp = &nxt; + } + while (1) { + newoff = ip6_nexthdr(m, off, proto, nxtp); + if (newoff < 0) + return off; + else if (newoff < off) + return -1; /* invalid */ + else if (newoff == off) + return newoff; + + off = newoff; + proto = *nxtp; + } +} + +/* + * System control for IP6 + */ + +u_char inet6ctlerrmap[PRC_NCMDS] = { + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, + ENOPROTOOPT +}; + +#if defined(__NetBSD__) || defined(__OpenBSD__) +#include +#include + +int +ip6_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + /* All sysctl names at this level are terminal. */ + if (namelen != 1) + return ENOTDIR; + + switch (name[0]) { + + case IPV6CTL_FORWARDING: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_forwarding); + case IPV6CTL_SENDREDIRECTS: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_sendredirects); + case IPV6CTL_DEFHLIM: + return sysctl_int(oldp, oldlenp, newp, newlen, &ip6_defhlim); + case IPV6CTL_MAXFRAGPACKETS: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_maxfragpackets); + case IPV6CTL_ACCEPT_RTADV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_accept_rtadv); + case IPV6CTL_KEEPFAITH: + return sysctl_int(oldp, oldlenp, newp, newlen, &ip6_keepfaith); + case IPV6CTL_LOG_INTERVAL: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_log_interval); + case IPV6CTL_HDRNESTLIMIT: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_hdrnestlimit); + case IPV6CTL_DAD_COUNT: + return sysctl_int(oldp, oldlenp, newp, newlen, &ip6_dad_count); + case IPV6CTL_AUTO_FLOWLABEL: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_auto_flowlabel); + case IPV6CTL_DEFMCASTHLIM: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_defmcasthlim); + case IPV6CTL_GIF_HLIM: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_gif_hlim); + case IPV6CTL_KAME_VERSION: + return sysctl_rdstring(oldp, oldlenp, newp, __KAME_VERSION); + case IPV6CTL_USE_DEPRECATED: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_use_deprecated); + case IPV6CTL_RR_PRUNE: + return sysctl_int(oldp, oldlenp, newp, newlen, &ip6_rr_prune); +#if defined(__NetBSD__) && !defined(INET6_BINDV6ONLY) + case IPV6CTL_BINDV6ONLY: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_bindv6only); +#endif + default: + return EOPNOTSUPP; + } + /* NOTREACHED */ +} +#endif /* __NetBSD__ || __OpenBSD__ */ + +#ifdef __bsdi__ +int *ip6_sysvars[] = IPV6CTL_VARS; + +int +ip6_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + if (name[0] >= IPV6CTL_MAXID) + return (EOPNOTSUPP); + + switch (name[0]) { + case IPV6CTL_STATS: + return sysctl_rdtrunc(oldp, oldlenp, newp, &ip6stat, + sizeof(ip6stat)); + case IPV6CTL_KAME_VERSION: + return sysctl_rdstring(oldp, oldlenp, newp, __KAME_VERSION); + default: + return (sysctl_int_arr(ip6_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + } +} +#endif /* __bsdi__ */ diff --git a/bsd/netinet6/ip6_mroute.c b/bsd/netinet6/ip6_mroute.c new file mode 100644 index 000000000..ec88a48a9 --- /dev/null +++ b/bsd/netinet6/ip6_mroute.c @@ -0,0 +1,1896 @@ +/* $KAME: ip6_mroute.c,v 1.15 2000/02/22 14:04:21 itojun Exp $ */ + +/* + * Copyright (C) 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* BSDI ip_mroute.c,v 2.10 1996/11/14 00:29:52 jch Exp */ + +/* + * IP multicast forwarding procedures + * + * Written by David Waitzman, BBN Labs, August 1988. + * Modified by Steve Deering, Stanford, February 1989. + * Modified by Mark J. Steiglitz, Stanford, May, 1991 + * Modified by Van Jacobson, LBL, January 1993 + * Modified by Ajit Thyagarajan, PARC, August 1993 + * Modified by Bill Fenenr, PARC, April 1994 + * + * MROUTING Revision: 3.5.1.2 + PIM-SMv2 (pimd) Support + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#ifndef _KERNEL +# ifdef KERNEL +# define _KERNEL +# endif +#endif + +#include +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include +#endif +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static MALLOC_DEFINE(M_MRTABLE, "mf6c", "multicast forwarding cache entry"); +#endif + +#define M_HASCL(m) ((m)->m_flags & M_EXT) + +static int ip6_mdq __P((struct mbuf *, struct ifnet *, struct mf6c *)); +static void phyint_send __P((struct ip6_hdr *, struct mif6 *, struct mbuf *)); + +static int set_pim6 __P((int *)); +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) +static int get_pim6 __P((struct mbuf *)); +#endif +static int socket_send __P((struct socket *, struct mbuf *, + struct sockaddr_in6 *)); +static int register_send __P((struct ip6_hdr *, struct mif6 *, + struct mbuf *)); + +/* + * Globals. All but ip6_mrouter, ip6_mrtproto and mrt6stat could be static, + * except for netstat or debugging purposes. + */ +struct socket *ip6_mrouter = NULL; +int ip6_mrtproto = IPPROTO_PIM; /* for netstat only */ +struct mrt6stat mrt6stat; + +#define NO_RTE_FOUND 0x1 +#define RTE_FOUND 0x2 + +struct mf6c *mf6ctable[MF6CTBLSIZ]; +u_char nexpire[MF6CTBLSIZ]; +static struct mif6 mif6table[MAXMIFS]; +#if MRT6DEBUG +u_int mrt6debug = 0; /* debug level */ +#define DEBUG_MFC 0x02 +#define DEBUG_FORWARD 0x04 +#define DEBUG_EXPIRE 0x08 +#define DEBUG_XMIT 0x10 +#define DEBUG_REG 0x20 +#define DEBUG_PIM 0x40 +#endif + +static void expire_upcalls __P((void *)); +#define EXPIRE_TIMEOUT (hz / 4) /* 4x / second */ +#define UPCALL_EXPIRE 6 /* number of timeouts */ + +#if INET +#if MROUTING +extern struct socket *ip_mrouter; +#endif +#endif + +static u_long lo_dl_tag = 0; +/* + * 'Interfaces' associated with decapsulator (so we can tell + * packets that went through it from ones that get reflected + * by a broken gateway). These interfaces are never linked into + * the system ifnet list & no routes point to them. I.e., packets + * can't be sent this way. They only exist as a placeholder for + * multicast source verification. + */ +struct ifnet multicast_register_if; + +#define ENCAP_HOPS 64 + +/* + * Private variables. + */ +static mifi_t nummifs = 0; +static mifi_t reg_mif_num = (mifi_t)-1; + +static struct pim6stat pim6stat; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static struct callout_handle expire_upcalls_ch; +#endif + +/* + * one-back cache used by ipip_input to locate a tunnel's mif + * given a datagram's src ip address. + */ +static int pim6; + +/* + * Hash function for a source, group entry + */ +#define MF6CHASH(a, g) MF6CHASHMOD((a).s6_addr32[0] ^ (a).s6_addr32[1] ^ \ + (a).s6_addr32[2] ^ (a).s6_addr32[3] ^ \ + (g).s6_addr32[0] ^ (g).s6_addr32[1] ^ \ + (g).s6_addr32[2] ^ (g).s6_addr32[3]) + +/* + * Find a route for a given origin IPv6 address and Multicast group address. + * Quality of service parameter to be added in the future!!! + */ + +#define MF6CFIND(o, g, rt) { \ + register struct mf6c *_rt = mf6ctable[MF6CHASH(o,g)]; \ + rt = NULL; \ + mrt6stat.mrt6s_mfc_lookups++; \ + while (_rt) { \ + if (IN6_ARE_ADDR_EQUAL(&_rt->mf6c_origin.sin6_addr, &(o)) && \ + IN6_ARE_ADDR_EQUAL(&_rt->mf6c_mcastgrp.sin6_addr, &(g)) && \ + (_rt->mf6c_stall == NULL)) { \ + rt = _rt; \ + break; \ + } \ + _rt = _rt->mf6c_next; \ + } \ + if (rt == NULL) { \ + mrt6stat.mrt6s_mfc_misses++; \ + } \ +} + +/* + * Macros to compute elapsed time efficiently + * Borrowed from Van Jacobson's scheduling code + */ +#define TV_DELTA(a, b, delta) { \ + register int xxs; \ + \ + delta = (a).tv_usec - (b).tv_usec; \ + if ((xxs = (a).tv_sec - (b).tv_sec)) { \ + switch (xxs) { \ + case 2: \ + delta += 1000000; \ + /* fall through */ \ + case 1: \ + delta += 1000000; \ + break; \ + default: \ + delta += (1000000 * xxs); \ + } \ + } \ +} + +#define TV_LT(a, b) (((a).tv_usec < (b).tv_usec && \ + (a).tv_sec <= (b).tv_sec) || (a).tv_sec < (b).tv_sec) + +#if UPCALL_TIMING +#define UPCALL_MAX 50 +u_long upcall_data[UPCALL_MAX + 1]; +static void collate(); +#endif /* UPCALL_TIMING */ + +static int get_sg_cnt __P((struct sioc_sg_req6 *)); +static int get_mif6_cnt __P((struct sioc_mif_req6 *)); +static int ip6_mrouter_init __P((struct socket *, struct mbuf *)); +static int add_m6if __P((struct mif6ctl *)); +static int del_m6if __P((mifi_t *)); +static int add_m6fc __P((struct mf6cctl *)); +static int del_m6fc __P((struct mf6cctl *)); + +/* + * Handle MRT setsockopt commands to modify the multicast routing tables. + */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int +ip6_mrouter_set(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error = 0; + struct mbuf *m; + + if (so != ip6_mrouter && sopt->sopt_name != MRT6_INIT) + return (EACCES); + + if (error = sooptgetm(sopt, &m)) /* XXX */ + return (error); + if (error = sooptmcopyin(sopt, m)) /* XXX */ + return (error); + + switch (sopt->sopt_name) { + case MRT6_INIT: + error = ip6_mrouter_init(so, m); + break; + case MRT6_DONE: + error = ip6_mrouter_done(); + break; + case MRT6_ADD_MIF: + error = add_m6if(mtod(m, struct mif6ctl *)); + break; + case MRT6_DEL_MIF: + error = del_m6if(mtod(m, mifi_t *)); + break; + case MRT6_ADD_MFC: + error = add_m6fc(mtod(m, struct mf6cctl *)); + break; + case MRT6_DEL_MFC: + error = del_m6fc(mtod(m, struct mf6cctl *)); + break; + case MRT6_PIM: + error = set_pim6(mtod(m, int *)); + break; + default: + error = EOPNOTSUPP; + break; + } + + (void)m_freem(m); + return(error); +} +#else +int +ip6_mrouter_set(cmd, so, m) + int cmd; + struct socket *so; + struct mbuf *m; +{ + if (cmd != MRT6_INIT && so != ip6_mrouter) + return EACCES; + + switch (cmd) { + case MRT6_INIT: return ip6_mrouter_init(so, m); + case MRT6_DONE: return ip6_mrouter_done(); + case MRT6_ADD_MIF: return add_m6if(mtod(m, struct mif6ctl *)); + case MRT6_DEL_MIF: return del_m6if(mtod(m, mifi_t *)); + case MRT6_ADD_MFC: return add_m6fc(mtod(m, struct mf6cctl *)); + case MRT6_DEL_MFC: return del_m6fc(mtod(m, struct mf6cctl *)); + case MRT6_PIM: return set_pim6(mtod(m, int *)); + default: return EOPNOTSUPP; + } +} +#endif + +/* + * Handle MRT getsockopt commands + */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int +ip6_mrouter_get(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error = 0; + + if (so != ip6_mrouter) return EACCES; + + switch (sopt->sopt_name) { + case MRT6_PIM: + error = sooptcopyout(sopt, &pim6, sizeof(pim6)); + break; + } + return (error); +} +#else +int +ip6_mrouter_get(cmd, so, m) + int cmd; + struct socket *so; + struct mbuf **m; +{ + struct mbuf *mb; + + if (so != ip6_mrouter) return EACCES; + + *m = mb = m_get(M_WAIT, MT_SOOPTS); + + switch (cmd) { + case MRT6_PIM: return get_pim6(mb); + default: + m_free(mb); + return EOPNOTSUPP; + } +} +#endif + +/* + * Handle ioctl commands to obtain information from the cache + */ +int +mrt6_ioctl(cmd, data) + int cmd; + caddr_t data; +{ + int error = 0; + + switch (cmd) { + case SIOCGETSGCNT_IN6: + return(get_sg_cnt((struct sioc_sg_req6 *)data)); + break; /* for safety */ + case SIOCGETMIFCNT_IN6: + return(get_mif6_cnt((struct sioc_mif_req6 *)data)); + break; /* for safety */ + default: + return (EINVAL); + break; + } + return error; +} + +/* + * returns the packet, byte, rpf-failure count for the source group provided + */ +static int +get_sg_cnt(req) + register struct sioc_sg_req6 *req; +{ + register struct mf6c *rt; + int s; + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + MF6CFIND(req->src.sin6_addr, req->grp.sin6_addr, rt); + splx(s); + if (rt != NULL) { + req->pktcnt = rt->mf6c_pkt_cnt; + req->bytecnt = rt->mf6c_byte_cnt; + req->wrong_if = rt->mf6c_wrong_if; + } else + return(ESRCH); +#if 0 + req->pktcnt = req->bytecnt = req->wrong_if = 0xffffffff; +#endif + + return 0; +} + +/* + * returns the input and output packet and byte counts on the mif provided + */ +static int +get_mif6_cnt(req) + register struct sioc_mif_req6 *req; +{ + register mifi_t mifi = req->mifi; + + if (mifi >= nummifs) + return EINVAL; + + req->icount = mif6table[mifi].m6_pkt_in; + req->ocount = mif6table[mifi].m6_pkt_out; + req->ibytes = mif6table[mifi].m6_bytes_in; + req->obytes = mif6table[mifi].m6_bytes_out; + + return 0; +} + +#if !(defined(__FreeBSD__) && __FreeBSD__ >=3) || !defined(__APPLE__) +/* + * Get PIM processiong global + */ +static int +get_pim6(m) + struct mbuf *m; +{ + int *i; + + i = mtod(m, int *); + + *i = pim6; + + return 0; +} +#endif + +static int +set_pim6(i) + int *i; +{ + if ((*i != 1) && (*i != 0)) + return EINVAL; + + pim6 = *i; + + return 0; +} + +/* + * Enable multicast routing + */ +static int +ip6_mrouter_init(so, m) + struct socket *so; + struct mbuf *m; +{ + int *v; + +#if MRT6DEBUG + if (mrt6debug) + log(LOG_DEBUG, + "ip6_mrouter_init: so_type = %d, pr_protocol = %d\n", + so->so_type, so->so_proto->pr_protocol); +#endif + + if (so->so_type != SOCK_RAW || + so->so_proto->pr_protocol != IPPROTO_ICMPV6) + return EOPNOTSUPP; + + if (!m || (m->m_len != sizeof(int *))) + return ENOPROTOOPT; + + v = mtod(m, int *); + if (*v != 1) + return ENOPROTOOPT; + + if (ip6_mrouter != NULL) return EADDRINUSE; + + ip6_mrouter = so; + + bzero((caddr_t)mf6ctable, sizeof(mf6ctable)); + bzero((caddr_t)nexpire, sizeof(nexpire)); + + pim6 = 0;/* used for stubbing out/in pim stuff */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + expire_upcalls_ch = +#endif + timeout(expire_upcalls, (caddr_t)NULL, EXPIRE_TIMEOUT); + +#if MRT6DEBUG + if (mrt6debug) + log(LOG_DEBUG, "ip6_mrouter_init\n"); +#endif + + return 0; +} + +/* + * Disable multicast routing + */ +int +ip6_mrouter_done() +{ + mifi_t mifi; + int i; + struct ifnet *ifp; + struct in6_ifreq ifr; + struct mf6c *rt; + struct rtdetq *rte; + int s; + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + + /* + * For each phyint in use, disable promiscuous reception of all IPv6 + * multicasts. + */ +#if INET +#if MROUTING + /* + * If there is still IPv4 multicast routing daemon, + * we remain interfaces to receive all muliticasted packets. + * XXX: there may be an interface in which the IPv4 multicast + * daemon is not interested... + */ + if (!ip_mrouter) +#endif +#endif + { + for (mifi = 0; mifi < nummifs; mifi++) { + if (mif6table[mifi].m6_ifp && + !(mif6table[mifi].m6_flags & MIFF_REGISTER)) { + ifr.ifr_addr.sin6_family = AF_INET6; + ifr.ifr_addr.sin6_addr= in6addr_any; + ifp = mif6table[mifi].m6_ifp; +#ifdef __APPLE__ + dlil_ioctl(0, ifp, SIOCDELMULTI, + (caddr_t)&ifr); +#else + (*ifp->if_ioctl)(ifp, SIOCDELMULTI, + (caddr_t)&ifr); +#endif + } + } + } +#if notyet + bzero((caddr_t)qtable, sizeof(qtable)); + bzero((caddr_t)tbftable, sizeof(tbftable)); +#endif + bzero((caddr_t)mif6table, sizeof(mif6table)); + nummifs = 0; + + pim6 = 0; /* used to stub out/in pim specific code */ + + untimeout(expire_upcalls, (caddr_t)NULL +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + , expire_upcalls_ch +#endif + ); + + /* + * Free all multicast forwarding cache entries. + */ + for (i = 0; i < MF6CTBLSIZ; i++) { + rt = mf6ctable[i]; + while (rt) { + struct mf6c *frt; + + for (rte = rt->mf6c_stall; rte != NULL; ) { + struct rtdetq *n = rte->next; + + m_free(rte->m); + _FREE(rte, M_MRTABLE); + rte = n; + } + frt = rt; + rt = rt->mf6c_next; + _FREE(frt, M_MRTABLE); + } + } + + bzero((caddr_t)mf6ctable, sizeof(mf6ctable)); + + /* + * Reset de-encapsulation cache + */ + reg_mif_num = -1; + + ip6_mrouter = NULL; + + splx(s); + +#if MRT6DEBUG + if (mrt6debug) + log(LOG_DEBUG, "ip6_mrouter_done\n"); +#endif + + return 0; +} + +static struct sockaddr_in6 sin6 = { sizeof(sin6), AF_INET6 }; + +/* + * Add a mif to the mif table + */ +static int +add_m6if(mifcp) + register struct mif6ctl *mifcp; +{ + register struct mif6 *mifp; + struct ifnet *ifp; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + struct in6_ifreq ifr; +#endif + int error, s; +#if notyet + struct tbf *m_tbf = tbftable + mifcp->mif6c_mifi; +#endif + + if (mifcp->mif6c_mifi >= MAXMIFS) + return EINVAL; + mifp = mif6table + mifcp->mif6c_mifi; + if (mifp->m6_ifp) + return EADDRINUSE; /* XXX: is it appropriate? */ + if (mifcp->mif6c_pifi == 0 || mifcp->mif6c_pifi > if_index) + return ENXIO; + ifp = ifindex2ifnet[mifcp->mif6c_pifi]; + + if (mifcp->mif6c_flags & MIFF_REGISTER) { + if (reg_mif_num == (mifi_t)-1) { +#if defined(__NetBSD__) || defined(__OpenBSD__) + strcpy(multicast_register_if.if_xname, + "register_mif"); /* XXX */ +#else + multicast_register_if.if_name = "register_mif"; +#endif + multicast_register_if.if_flags |= IFF_LOOPBACK; + multicast_register_if.if_index = mifcp->mif6c_mifi; + reg_mif_num = mifcp->mif6c_mifi; + } + + ifp = &multicast_register_if; + + } /* if REGISTER */ + else { + /* Make sure the interface supports multicast */ + if ((ifp->if_flags & IFF_MULTICAST) == 0) + return EOPNOTSUPP; + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + error = if_allmulti(ifp, 1); +#else + /* + * Enable promiscuous reception of all IPv6 multicasts + * from the interface. + */ + ifr.ifr_addr.sin6_family = AF_INET6; + ifr.ifr_addr.sin6_addr = in6addr_any; + error = (*ifp->if_ioctl)(ifp, SIOCADDMULTI, (caddr_t)&ifr); +#endif + splx(s); + if (error) + return error; + } + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + mifp->m6_flags = mifcp->mif6c_flags; + mifp->m6_ifp = ifp; +#if notyet + /* scaling up here allows division by 1024 in critical code */ + mifp->m6_rate_limit = mifcp->mif6c_rate_limit * 1024 / 1000; +#endif + /* initialize per mif pkt counters */ + mifp->m6_pkt_in = 0; + mifp->m6_pkt_out = 0; + mifp->m6_bytes_in = 0; + mifp->m6_bytes_out = 0; + splx(s); + + /* Adjust nummifs up if the mifi is higher than nummifs */ + if (nummifs <= mifcp->mif6c_mifi) + nummifs = mifcp->mif6c_mifi + 1; + +#if MRT6DEBUG + if (mrt6debug) + log(LOG_DEBUG, + "add_mif #%d, phyint %s%d\n", + mifcp->mif6c_mifi, + ifp->if_name, ifp->if_unit); +#endif + + return 0; +} + +/* + * Delete a mif from the mif table + */ +static int +del_m6if(mifip) + mifi_t *mifip; +{ + register struct mif6 *mifp = mif6table + *mifip; + register mifi_t mifi; + struct ifnet *ifp; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + struct in6_ifreq ifr; +#endif + int s; + + if (*mifip >= nummifs) + return EINVAL; + if (mifp->m6_ifp == NULL) + return EINVAL; + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + + if (!(mifp->m6_flags & MIFF_REGISTER)) { + /* + * XXX: what if there is yet IPv4 multicast daemon + * using the interface? + */ + ifp = mifp->m6_ifp; + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + if_allmulti(ifp, 0); +#else + ifr.ifr_addr.sin6_family = AF_INET6; + ifr.ifr_addr.sin6_addr = in6addr_any; + (*ifp->if_ioctl)(ifp, SIOCDELMULTI, (caddr_t)&ifr); +#endif + } + +#if notyet + bzero((caddr_t)qtable[*mifip], sizeof(qtable[*mifip])); + bzero((caddr_t)mifp->m6_tbf, sizeof(*(mifp->m6_tbf))); +#endif + bzero((caddr_t)mifp, sizeof (*mifp)); + + /* Adjust nummifs down */ + for (mifi = nummifs; mifi > 0; mifi--) + if (mif6table[mifi - 1].m6_ifp) + break; + nummifs = mifi; + + splx(s); + +#if MRT6DEBUG + if (mrt6debug) + log(LOG_DEBUG, "del_m6if %d, nummifs %d\n", *mifip, nummifs); +#endif + + return 0; +} + +/* + * Add an mfc entry + */ +static int +add_m6fc(mfccp) + struct mf6cctl *mfccp; +{ + struct mf6c *rt; + u_long hash; + struct rtdetq *rte; + register u_short nstl; + int s; + + MF6CFIND(mfccp->mf6cc_origin.sin6_addr, + mfccp->mf6cc_mcastgrp.sin6_addr, rt); + + /* If an entry already exists, just update the fields */ + if (rt) { +#if MRT6DEBUG + if (mrt6debug & DEBUG_MFC) + log(LOG_DEBUG,"add_m6fc update o %s g %s p %x\n", + ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), + ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), + mfccp->mf6cc_parent); +#endif + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + rt->mf6c_parent = mfccp->mf6cc_parent; + rt->mf6c_ifset = mfccp->mf6cc_ifset; + splx(s); + return 0; + } + + /* + * Find the entry for which the upcall was made and update + */ +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + hash = MF6CHASH(mfccp->mf6cc_origin.sin6_addr, + mfccp->mf6cc_mcastgrp.sin6_addr); + for (rt = mf6ctable[hash], nstl = 0; rt; rt = rt->mf6c_next) { + if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr, + &mfccp->mf6cc_origin.sin6_addr) && + IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr, + &mfccp->mf6cc_mcastgrp.sin6_addr) && + (rt->mf6c_stall != NULL)) { + + if (nstl++) + log(LOG_ERR, + "add_m6fc: %s o %s g %s p %x dbx %p\n", + "multiple kernel entries", + ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), + ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), + mfccp->mf6cc_parent, rt->mf6c_stall); + +#if MRT6DEBUG + if (mrt6debug & DEBUG_MFC) + log(LOG_DEBUG, + "add_m6fc o %s g %s p %x dbg %x\n", + ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), + ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), + mfccp->mf6cc_parent, rt->mf6c_stall); +#endif + + rt->mf6c_origin = mfccp->mf6cc_origin; + rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp; + rt->mf6c_parent = mfccp->mf6cc_parent; + rt->mf6c_ifset = mfccp->mf6cc_ifset; + /* initialize pkt counters per src-grp */ + rt->mf6c_pkt_cnt = 0; + rt->mf6c_byte_cnt = 0; + rt->mf6c_wrong_if = 0; + + rt->mf6c_expire = 0; /* Don't clean this guy up */ + nexpire[hash]--; + + /* free packets Qed at the end of this entry */ + for (rte = rt->mf6c_stall; rte != NULL; ) { + struct rtdetq *n = rte->next; + ip6_mdq(rte->m, rte->ifp, rt); + m_freem(rte->m); +#if UPCALL_TIMING + collate(&(rte->t)); +#endif /* UPCALL_TIMING */ + _FREE(rte, M_MRTABLE); + rte = n; + } + rt->mf6c_stall = NULL; + } + } + + /* + * It is possible that an entry is being inserted without an upcall + */ + if (nstl == 0) { +#if MRT6DEBUG + if (mrt6debug & DEBUG_MFC) + log(LOG_DEBUG,"add_mfc no upcall h %d o %s g %s p %x\n", + hash, + ip6_sprintf(&mfccp->mf6cc_origin.sin6_addr), + ip6_sprintf(&mfccp->mf6cc_mcastgrp.sin6_addr), + mfccp->mf6cc_parent); +#endif + + for (rt = mf6ctable[hash]; rt; rt = rt->mf6c_next) { + + if (IN6_ARE_ADDR_EQUAL(&rt->mf6c_origin.sin6_addr, + &mfccp->mf6cc_origin.sin6_addr)&& + IN6_ARE_ADDR_EQUAL(&rt->mf6c_mcastgrp.sin6_addr, + &mfccp->mf6cc_mcastgrp.sin6_addr)) { + + rt->mf6c_origin = mfccp->mf6cc_origin; + rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp; + rt->mf6c_parent = mfccp->mf6cc_parent; + /* initialize pkt counters per src-grp */ + rt->mf6c_pkt_cnt = 0; + rt->mf6c_byte_cnt = 0; + rt->mf6c_wrong_if = 0; + + if (rt->mf6c_expire) + nexpire[hash]--; + rt->mf6c_expire = 0; + } + } + if (rt == NULL) { + /* no upcall, so make a new entry */ + rt = (struct mf6c *)_MALLOC(sizeof(*rt), M_MRTABLE, + M_NOWAIT); + if (rt == NULL) { + splx(s); + return ENOBUFS; + } + + /* insert new entry at head of hash chain */ + rt->mf6c_origin = mfccp->mf6cc_origin; + rt->mf6c_mcastgrp = mfccp->mf6cc_mcastgrp; + rt->mf6c_parent = mfccp->mf6cc_parent; + /* initialize pkt counters per src-grp */ + rt->mf6c_pkt_cnt = 0; + rt->mf6c_byte_cnt = 0; + rt->mf6c_wrong_if = 0; + rt->mf6c_expire = 0; + rt->mf6c_stall = NULL; + + /* link into table */ + rt->mf6c_next = mf6ctable[hash]; + mf6ctable[hash] = rt; + } + } + splx(s); + return 0; +} + +#if UPCALL_TIMING +/* + * collect delay statistics on the upcalls + */ +static void +collate(t) + register struct timeval *t; +{ + register u_long d; + register struct timeval tp; + register u_long delta; + + GET_TIME(tp); + + if (TV_LT(*t, tp)) + { + TV_DELTA(tp, *t, delta); + + d = delta >> 10; + if (d > UPCALL_MAX) + d = UPCALL_MAX; + + ++upcall_data[d]; + } +} +#endif /* UPCALL_TIMING */ + +/* + * Delete an mfc entry + */ +static int +del_m6fc(mfccp) + struct mf6cctl *mfccp; +{ + struct sockaddr_in6 origin; + struct sockaddr_in6 mcastgrp; + struct mf6c *rt; + struct mf6c **nptr; + u_long hash; + int s; + + origin = mfccp->mf6cc_origin; + mcastgrp = mfccp->mf6cc_mcastgrp; + hash = MF6CHASH(origin.sin6_addr, mcastgrp.sin6_addr); + +#if MRT6DEBUG + if (mrt6debug & DEBUG_MFC) + log(LOG_DEBUG,"del_m6fc orig %s mcastgrp %s\n", + ip6_sprintf(&origin.sin6_addr), + ip6_sprintf(&mcastgrp.sin6_addr)); +#endif + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + + nptr = &mf6ctable[hash]; + while ((rt = *nptr) != NULL) { + if (IN6_ARE_ADDR_EQUAL(&origin.sin6_addr, + &rt->mf6c_origin.sin6_addr) && + IN6_ARE_ADDR_EQUAL(&mcastgrp.sin6_addr, + &rt->mf6c_mcastgrp.sin6_addr) && + rt->mf6c_stall == NULL) + break; + + nptr = &rt->mf6c_next; + } + if (rt == NULL) { + splx(s); + return EADDRNOTAVAIL; + } + + *nptr = rt->mf6c_next; + _FREE(rt, M_MRTABLE); + + splx(s); + + return 0; +} + +static int +socket_send(s, mm, src) + struct socket *s; + struct mbuf *mm; + struct sockaddr_in6 *src; +{ + if (s) { + if (sbappendaddr(&s->so_rcv, + (struct sockaddr *)src, + mm, (struct mbuf *)0) != 0) { + sorwakeup(s); + return 0; + } + } + m_freem(mm); + return -1; +} + +/* + * IPv6 multicast forwarding function. This function assumes that the packet + * pointed to by "ip6" has arrived on (or is about to be sent to) the interface + * pointed to by "ifp", and the packet is to be relayed to other networks + * that have members of the packet's destination IPv6 multicast group. + * + * The packet is returned unscathed to the caller, unless it is + * erroneous, in which case a non-zero return value tells the caller to + * discard it. + */ + +int +ip6_mforward(ip6, ifp, m) + register struct ip6_hdr *ip6; + struct ifnet *ifp; + struct mbuf *m; +{ + register struct mf6c *rt; + register struct mif6 *mifp; + register struct mbuf *mm; + int s; + mifi_t mifi; + +#if MRT6DEBUG + if (mrt6debug & DEBUG_FORWARD) + log(LOG_DEBUG, "ip6_mforward: src %s, dst %s, ifindex %d\n", + ip6_sprintf(&ip6->ip6_src), ip6_sprintf(&ip6->ip6_dst), + ifp->if_index); +#endif + + /* + * Don't forward a packet with Hop limit of zero or one, + * or a packet destined to a local-only group. + */ + if (ip6->ip6_hlim <= 1 || IN6_IS_ADDR_MC_NODELOCAL(&ip6->ip6_dst) || + IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst)) + return 0; + ip6->ip6_hlim--; + + /* + * Determine forwarding mifs from the forwarding cache table + */ +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + MF6CFIND(ip6->ip6_src, ip6->ip6_dst, rt); + + /* Entry exists, so forward if necessary */ + if (rt) { + splx(s); + return (ip6_mdq(m, ifp, rt)); + } else { + /* + * If we don't have a route for packet's origin, + * Make a copy of the packet & + * send message to routing daemon + */ + + register struct mbuf *mb0; + register struct rtdetq *rte; + register u_long hash; +/* register int i, npkts;*/ +#if UPCALL_TIMING + struct timeval tp; + + GET_TIME(tp); +#endif /* UPCALL_TIMING */ + + mrt6stat.mrt6s_no_route++; +#if MRT6DEBUG + if (mrt6debug & (DEBUG_FORWARD | DEBUG_MFC)) + log(LOG_DEBUG, "ip6_mforward: no rte s %s g %s\n", + ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst)); +#endif + + /* + * Allocate mbufs early so that we don't do extra work if we + * are just going to fail anyway. + */ + rte = (struct rtdetq *)_MALLOC(sizeof(*rte), M_MRTABLE, + M_NOWAIT); + if (rte == NULL) { + splx(s); + return ENOBUFS; + } + mb0 = m_copy(m, 0, M_COPYALL); + /* + * Pullup packet header if needed before storing it, + * as other references may modify it in the meantime. + */ + if (mb0 && + (M_HASCL(mb0) || mb0->m_len < sizeof(struct ip6_hdr))) + mb0 = m_pullup(mb0, sizeof(struct ip6_hdr)); + if (mb0 == NULL) { + _FREE(rte, M_MRTABLE); + splx(s); + return ENOBUFS; + } + + /* is there an upcall waiting for this packet? */ + hash = MF6CHASH(ip6->ip6_src, ip6->ip6_dst); + for (rt = mf6ctable[hash]; rt; rt = rt->mf6c_next) { + if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, + &rt->mf6c_origin.sin6_addr) && + IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, + &rt->mf6c_mcastgrp.sin6_addr) && + (rt->mf6c_stall != NULL)) + break; + } + + if (rt == NULL) { + struct mrt6msg *im; + + /* no upcall, so make a new entry */ + rt = (struct mf6c *)_MALLOC(sizeof(*rt), M_MRTABLE, + M_NOWAIT); + if (rt == NULL) { + _FREE(rte, M_MRTABLE); + m_freem(mb0); + splx(s); + return ENOBUFS; + } + /* + * Make a copy of the header to send to the user + * level process + */ + mm = m_copy(mb0, 0, sizeof(struct ip6_hdr)); + + if (mm == NULL) { + _FREE(rte, M_MRTABLE); + m_freem(mb0); + _FREE(rt, M_MRTABLE); + splx(s); + return ENOBUFS; + } + + /* + * Send message to routing daemon + */ + sin6.sin6_addr = ip6->ip6_src; + + im = mtod(mm, struct mrt6msg *); + im->im6_msgtype = MRT6MSG_NOCACHE; + im->im6_mbz = 0; + +#if MRT6DEBUG + if (mrt6debug & DEBUG_FORWARD) + log(LOG_DEBUG, + "getting the iif info in the kernel\n"); +#endif + + for (mifp = mif6table, mifi = 0; + mifi < nummifs && mifp->m6_ifp != ifp; + mifp++, mifi++) + ; + + im->im6_mif = mifi; + + if (socket_send(ip6_mrouter, mm, &sin6) < 0) { + log(LOG_WARNING, "ip6_mforward: ip6_mrouter " + "socket queue full\n"); + mrt6stat.mrt6s_upq_sockfull++; + _FREE(rte, M_MRTABLE); + m_freem(mb0); + _FREE(rt, M_MRTABLE); + splx(s); + return ENOBUFS; + } + + mrt6stat.mrt6s_upcalls++; + + /* insert new entry at head of hash chain */ + bzero(rt, sizeof(*rt)); + rt->mf6c_origin.sin6_family = AF_INET6; + rt->mf6c_origin.sin6_len = sizeof(struct sockaddr_in6); + rt->mf6c_origin.sin6_addr = ip6->ip6_src; + rt->mf6c_mcastgrp.sin6_family = AF_INET6; + rt->mf6c_mcastgrp.sin6_len = sizeof(struct sockaddr_in6); + rt->mf6c_mcastgrp.sin6_addr = ip6->ip6_dst; + rt->mf6c_expire = UPCALL_EXPIRE; + nexpire[hash]++; + rt->mf6c_parent = MF6C_INCOMPLETE_PARENT; + + /* link into table */ + rt->mf6c_next = mf6ctable[hash]; + mf6ctable[hash] = rt; + /* Add this entry to the end of the queue */ + rt->mf6c_stall = rte; + } else { + /* determine if q has overflowed */ + struct rtdetq **p; + register int npkts = 0; + + for (p = &rt->mf6c_stall; *p != NULL; p = &(*p)->next) + if (++npkts > MAX_UPQ6) { + mrt6stat.mrt6s_upq_ovflw++; + _FREE(rte, M_MRTABLE); + m_freem(mb0); + splx(s); + return 0; + } + + /* Add this entry to the end of the queue */ + *p = rte; + } + + rte->next = NULL; + rte->m = mb0; + rte->ifp = ifp; +#if UPCALL_TIMING + rte->t = tp; +#endif /* UPCALL_TIMING */ + + splx(s); + + return 0; + } +} + +/* + * Clean up cache entries if upcalls are not serviced + * Call from the Slow Timeout mechanism, every half second. + */ +static void +expire_upcalls(unused) + void *unused; +{ + struct rtdetq *rte; + struct mf6c *mfc, **nptr; + int i; + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + for (i = 0; i < MF6CTBLSIZ; i++) { + if (nexpire[i] == 0) + continue; + nptr = &mf6ctable[i]; + while ((mfc = *nptr) != NULL) { + rte = mfc->mf6c_stall; + /* + * Skip real cache entries + * Make sure it wasn't marked to not expire (shouldn't happen) + * If it expires now + */ + if (rte != NULL && + mfc->mf6c_expire != 0 && + --mfc->mf6c_expire == 0) { +#if MRT6DEBUG + if (mrt6debug & DEBUG_EXPIRE) + log(LOG_DEBUG, "expire_upcalls: expiring (%s %s)\n", + ip6_sprintf(&mfc->mf6c_origin.sin6_addr), + ip6_sprintf(&mfc->mf6c_mcastgrp.sin6_addr)); +#endif + /* + * drop all the packets + * free the mbuf with the pkt, if, timing info + */ + do { + struct rtdetq *n = rte->next; + m_freem(rte->m); + _FREE(rte, M_MRTABLE); + rte = n; + } while (rte != NULL); + mrt6stat.mrt6s_cache_cleanups++; + nexpire[i]--; + + *nptr = mfc->mf6c_next; + _FREE(mfc, M_MRTABLE); + } else { + nptr = &mfc->mf6c_next; + } + } + } + splx(s); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + expire_upcalls_ch = +#endif + timeout(expire_upcalls, (caddr_t)NULL, EXPIRE_TIMEOUT); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + +/* + * Packet forwarding routine once entry in the cache is made + */ +static int +ip6_mdq(m, ifp, rt) + register struct mbuf *m; + register struct ifnet *ifp; + register struct mf6c *rt; +{ + register struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + register mifi_t mifi, iif; + register struct mif6 *mifp; + register int plen = m->m_pkthdr.len; + +/* + * Macro to send packet on mif. Since RSVP packets don't get counted on + * input, they shouldn't get counted on output, so statistics keeping is + * seperate. + */ + +#define MC6_SEND(ip6,mifp,m) { \ + if ((mifp)->m6_flags & MIFF_REGISTER) \ + register_send((ip6), (mifp), (m)); \ + else \ + phyint_send((ip6), (mifp), (m)); \ +} + + /* + * Don't forward if it didn't arrive from the parent mif + * for its origin. + */ + mifi = rt->mf6c_parent; + if ((mifi >= nummifs) || (mif6table[mifi].m6_ifp != ifp)) { + /* came in the wrong interface */ +#if MRT6DEBUG + if (mrt6debug & DEBUG_FORWARD) + log(LOG_DEBUG, + "wrong if: ifid %d mifi %d mififid %x\n", + ifp->if_index, mifi, + mif6table[mifi].m6_ifp->if_index); +#endif + mrt6stat.mrt6s_wrong_if++; + rt->mf6c_wrong_if++; + /* + * If we are doing PIM processing, and we are forwarding + * packets on this interface, send a message to the + * routing daemon. + */ + if(mifi < nummifs) /* have to make sure this is a valid mif */ + if(mif6table[mifi].m6_ifp) + + if (pim6 && (m->m_flags & M_LOOP) == 0) { + /* + * Check the M_LOOP flag to avoid an + * unnecessary PIM assert. + * XXX: M_LOOP is an ad-hoc hack... + */ + static struct sockaddr_in6 sin6 = + { sizeof(sin6), AF_INET6 }; + + register struct mbuf *mm; + struct mrt6msg *im; + + mm = m_copy(m, 0, + sizeof(struct ip6_hdr)); + if (mm && + (M_HASCL(mm) || + mm->m_len < sizeof(struct ip6_hdr))) + mm = m_pullup(mm, sizeof(struct ip6_hdr)); + if (mm == NULL) + return ENOBUFS; + + im = mtod(mm, struct mrt6msg *); + im->im6_msgtype = MRT6MSG_WRONGMIF; + im->im6_mbz = 0; + + for (mifp = mif6table, iif = 0; + iif < nummifs && mifp && + mifp->m6_ifp != ifp; + mifp++, iif++); + + im->im6_mif = iif; + + sin6.sin6_addr = im->im6_src; + + mrt6stat.mrt6s_upcalls++; + + if (socket_send(ip6_mrouter, mm, + &sin6) < 0) { +#if MRT6DEBUG + if (mrt6debug) + log(LOG_WARNING, "mdq, ip6_mrouter socket queue full\n"); +#endif + ++mrt6stat.mrt6s_upq_sockfull; + return ENOBUFS; + } /* if socket Q full */ + } /* if PIM */ + return 0; + } /* if wrong iif */ + + /* If I sourced this packet, it counts as output, else it was input. */ + if (m->m_pkthdr.rcvif == NULL) { + /* XXX: is rcvif really NULL when output?? */ + mif6table[mifi].m6_pkt_out++; + mif6table[mifi].m6_bytes_out += plen; + } else { + mif6table[mifi].m6_pkt_in++; + mif6table[mifi].m6_bytes_in += plen; + } + rt->mf6c_pkt_cnt++; + rt->mf6c_byte_cnt += plen; + + /* + * For each mif, forward a copy of the packet if there are group + * members downstream on the interface. + */ + for (mifp = mif6table, mifi = 0; mifi < nummifs; mifp++, mifi++) + if (IF_ISSET(mifi, &rt->mf6c_ifset)) { + mifp->m6_pkt_out++; + mifp->m6_bytes_out += plen; + MC6_SEND(ip6, mifp, m); + } + return 0; +} + +static void +phyint_send(ip6, mifp, m) + struct ip6_hdr *ip6; + struct mif6 *mifp; + struct mbuf *m; +{ + register struct mbuf *mb_copy; + struct ifnet *ifp = mifp->m6_ifp; + int error = 0; +#if __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + static struct route_in6 ro6; + struct in6_multi *in6m; + + /* + * Make a new reference to the packet; make sure that + * the IPv6 header is actually copied, not just referenced, + * so that ip6_output() only scribbles on the copy. + */ + mb_copy = m_copy(m, 0, M_COPYALL); + if (mb_copy && + (M_HASCL(mb_copy) || mb_copy->m_len < sizeof(struct ip6_hdr))) + mb_copy = m_pullup(mb_copy, sizeof(struct ip6_hdr)); + if (mb_copy == NULL) + return; + /* set MCAST flag to the outgoing packet */ + mb_copy->m_flags |= M_MCAST; + + /* + * If we sourced the packet, call ip6_output since we may devide + * the packet into fragments when the packet is too big for the + * outgoing interface. + * Otherwise, we can simply send the packet to the interface + * sending queue. + */ + if (m->m_pkthdr.rcvif == NULL) { + struct ip6_moptions im6o; + + im6o.im6o_multicast_ifp = ifp; + /* XXX: ip6_output will override ip6->ip6_hlim */ + im6o.im6o_multicast_hlim = ip6->ip6_hlim; + im6o.im6o_multicast_loop = 1; + error = ip6_output(mb_copy, NULL, &ro6, + IPV6_FORWARDING, &im6o, NULL); + +#if MRT6DEBUG + if (mrt6debug & DEBUG_XMIT) + log(LOG_DEBUG, "phyint_send on mif %d err %d\n", + mifp - mif6table, error); +#endif + splx(s); + return; + } + + /* + * If we belong to the destination multicast group + * on the outgoing interface, loop back a copy. + */ + IN6_LOOKUP_MULTI(ip6->ip6_dst, ifp, in6m); + if (in6m != NULL) { + ro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6); + ro6.ro_dst.sin6_family = AF_INET6; + ro6.ro_dst.sin6_addr = ip6->ip6_dst; + ip6_mloopback(ifp, m, &ro6.ro_dst); + } + /* + * Put the packet into the sending queue of the outgoing interface + * if it would fit in the MTU of the interface. + */ + if (mb_copy->m_pkthdr.len < ifp->if_mtu || ifp->if_mtu < IPV6_MMTU) { + ro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6); + ro6.ro_dst.sin6_family = AF_INET6; + ro6.ro_dst.sin6_addr = ip6->ip6_dst; + /* + * We just call if_output instead of nd6_output here, since + * we need no ND for a multicast forwarded packet...right? + */ +#ifdef __APPLE__ + error = dlil_output(ifptodlt(ifp, PF_INET6), mb_copy, + NULL, (struct sockaddr *)&ro6.ro_dst, 0); +#else + error = (*ifp->if_output)(ifp, mb_copy, + (struct sockaddr *)&ro6.ro_dst, + NULL); +#endif +#if MRT6DEBUG + if (mrt6debug & DEBUG_XMIT) + log(LOG_DEBUG, "phyint_send on mif %d err %d\n", + mifp - mif6table, error); +#endif + } + else { +#if MULTICAST_PMTUD + icmp6_error(mb_copy, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); + return; +#else +#if MRT6DEBUG +#if __NetBSD__ + if (mrt6debug & DEBUG_DEBUG_XMIT) + log(LOG_DEBUG, + "phyint_send: packet too big on %s o %s g %s" + " size %d(discarded)\n", + ifp->if_xname, + ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst), + mb_copy->m_pkthdr.len); +#else + if (mrt6debug & DEBUG_XMIT) + log(LOG_DEBUG, + "phyint_send: packet too big on %s%u o %s g %s" + " size %d(discarded)\n", + ifp->if_name, ifp->if_unit, + ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst), + mb_copy->m_pkthdr.len); +#endif /* __NetBSD__ */ +#endif /* MRT6DEBUG */ + m_freem(mb_copy); /* simply discard the packet */ + return; +#endif + } +} + +static int +register_send(ip6, mif, m) + register struct ip6_hdr *ip6; + struct mif6 *mif; + register struct mbuf *m; +{ + register struct mbuf *mm; + register int i, len = m->m_pkthdr.len; + static struct sockaddr_in6 sin6 = { sizeof(sin6), AF_INET6 }; + struct mrt6msg *im6; + +#if MRT6DEBUG + if (mrt6debug) + log(LOG_DEBUG, "** IPv6 register_send **\n src %s dst %s\n", + ip6_sprintf(&ip6->ip6_src), ip6_sprintf(&ip6->ip6_dst)); +#endif + ++pim6stat.pim6s_snd_registers; + + /* Make a copy of the packet to send to the user level process */ + MGETHDR(mm, M_DONTWAIT, MT_HEADER); + if (mm == NULL) + return ENOBUFS; + mm->m_data += max_linkhdr; + mm->m_len = sizeof(struct ip6_hdr); + + if ((mm->m_next = m_copy(m, 0, M_COPYALL)) == NULL) { + m_freem(mm); + return ENOBUFS; + } + i = MHLEN - M_LEADINGSPACE(mm); + if (i > len) + i = len; + mm = m_pullup(mm, i); + if (mm == NULL){ + m_freem(mm); + return ENOBUFS; + } +/* TODO: check it! */ + mm->m_pkthdr.len = len + sizeof(struct ip6_hdr); + + /* + * Send message to routing daemon + */ + sin6.sin6_addr = ip6->ip6_src; + + im6 = mtod(mm, struct mrt6msg *); + im6->im6_msgtype = MRT6MSG_WHOLEPKT; + im6->im6_mbz = 0; + + im6->im6_mif = mif - mif6table; + + /* iif info is not given for reg. encap.n */ + mrt6stat.mrt6s_upcalls++; + + if (socket_send(ip6_mrouter, mm, &sin6) < 0) { +#if MRT6DEBUG + if (mrt6debug) + log(LOG_WARNING, + "register_send: ip_mrouter socket queue full\n"); +#endif + ++mrt6stat.mrt6s_upq_sockfull; + return ENOBUFS; + } + return 0; +} + +/* + * PIM sparse mode hook + * Receives the pim control messages, and passes them up to the listening + * socket, using rip6_input. + * The only message processed is the REGISTER pim message; the pim header + * is stripped off, and the inner packet is passed to register_mforward. + */ +int +pim6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + register struct pim *pim; /* pointer to a pim struct */ + register struct ip6_hdr *ip6; + register int pimlen; + struct mbuf *m = *mp; + int minlen; + int off = *offp; + + ++pim6stat.pim6s_rcv_total; + + ip6 = mtod(m, struct ip6_hdr *); + pimlen = m->m_pkthdr.len - *offp; + + /* + * Validate lengths + */ + if (pimlen < PIM_MINLEN) { + ++pim6stat.pim6s_rcv_tooshort; +#if MRT6DEBUG + if (mrt6debug & DEBUG_PIM) + log(LOG_DEBUG,"pim6_input: PIM packet too short\n"); +#endif + m_freem(m); + return(IPPROTO_DONE); + } + + /* + * if the packet is at least as big as a REGISTER, go ahead + * and grab the PIM REGISTER header size, to avoid another + * possible m_pullup() later. + * + * PIM_MINLEN == pimhdr + u_int32 == 8 + * PIM6_REG_MINLEN == pimhdr + reghdr + eip6hdr == 4 + 4 + 40 + */ + minlen = (pimlen >= PIM6_REG_MINLEN) ? PIM6_REG_MINLEN : PIM_MINLEN; + + /* + * Make sure that the IP6 and PIM headers in contiguous memory, and + * possibly the PIM REGISTER header + */ +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, minlen, IPPROTO_DONE); + /* adjust pointer */ + ip6 = mtod(m, struct ip6_hdr *); + + /* adjust mbuf to point to the PIM header */ + pim = (struct pim *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(pim, struct pim *, m, off, minlen); + if (pim == NULL) { + pim6stat.pim6s_rcv_tooshort++; + return IPPROTO_DONE; + } +#endif + + +#define PIM6_CHECKSUM 1 +#if PIM6_CHECKSUM + { + int cksumlen; + + /* + * Validate checksum. + * If PIM REGISTER, exclude the data packet + */ + if (pim->pim_type == PIM_REGISTER) + cksumlen = PIM_MINLEN; + else + cksumlen = pimlen; + + if (in6_cksum(m, IPPROTO_PIM, off, cksumlen)) { + ++pim6stat.pim6s_rcv_badsum; +#if MRT6DEBUG + if (mrt6debug & DEBUG_PIM) + log(LOG_DEBUG, + "pim6_input: invalid checksum\n"); +#endif + m_freem(m); + return(IPPROTO_DONE); + } + } +#endif /* PIM_CHECKSUM */ + + /* PIM version check */ + if (pim->pim_ver != PIM_VERSION) { + ++pim6stat.pim6s_rcv_badversion; +#if MRT6DEBUG + log(LOG_ERR, + "pim6_input: incorrect version %d, expecting %d\n", + pim->pim_ver, PIM_VERSION); +#endif + m_freem(m); + return(IPPROTO_DONE); + } + + if (pim->pim_type == PIM_REGISTER) { + /* + * since this is a REGISTER, we'll make a copy of the register + * headers ip6+pim+u_int32_t+encap_ip6, to be passed up to the + * routing daemon. + */ + static struct sockaddr_in6 dst = { sizeof(dst), AF_INET6 }; + + struct mbuf *mcp; + struct ip6_hdr *eip6; + u_int32_t *reghdr; + int rc; + + ++pim6stat.pim6s_rcv_registers; + + if ((reg_mif_num >= nummifs) || (reg_mif_num == (mifi_t) -1)) { +#if MRT6DEBUG + if (mrt6debug & DEBUG_PIM) + log(LOG_DEBUG, + "pim6_input: register mif not set: %d\n", + reg_mif_num); +#endif + m_freem(m); + return(IPPROTO_DONE); + } + + reghdr = (u_int32_t *)(pim + 1); + + if ((ntohl(*reghdr) & PIM_NULL_REGISTER)) + goto pim6_input_to_daemon; + + /* + * Validate length + */ + if (pimlen < PIM6_REG_MINLEN) { + ++pim6stat.pim6s_rcv_tooshort; + ++pim6stat.pim6s_rcv_badregisters; +#if MRT6DEBUG + log(LOG_ERR, + "pim6_input: register packet size too " + "small %d from %s\n", + pimlen, ip6_sprintf(&ip6->ip6_src)); +#endif + m_freem(m); + return(IPPROTO_DONE); + } + + eip6 = (struct ip6_hdr *) (reghdr + 1); +#if MRT6DEBUG + if (mrt6debug & DEBUG_PIM) + log(LOG_DEBUG, + "pim6_input[register], eip6: %s -> %s, " + "eip6 plen %d\n", + ip6_sprintf(&eip6->ip6_src), + ip6_sprintf(&eip6->ip6_dst), + ntohs(eip6->ip6_plen)); +#endif + + /* verify the inner packet is destined to a mcast group */ + if (!IN6_IS_ADDR_MULTICAST(&eip6->ip6_dst)) { + ++pim6stat.pim6s_rcv_badregisters; +#if MRT6DEBUG + if (mrt6debug & DEBUG_PIM) + log(LOG_DEBUG, + "pim6_input: inner packet of register " + "is not multicast %s\n", + ip6_sprintf(&eip6->ip6_dst)); +#endif + m_freem(m); + return(IPPROTO_DONE); + } + + /* + * make a copy of the whole header to pass to the daemon later. + */ + mcp = m_copy(m, 0, off + PIM6_REG_MINLEN); + if (mcp == NULL) { +#if MRT6DEBUG + log(LOG_ERR, + "pim6_input: pim register: " + "could not copy register head\n"); +#endif + m_freem(m); + return(IPPROTO_DONE); + } + + /* + * forward the inner ip6 packet; point m_data at the inner ip6. + */ + m_adj(m, off + PIM_MINLEN); +#if MRT6DEBUG + if (mrt6debug & DEBUG_PIM) { + log(LOG_DEBUG, + "pim6_input: forwarding decapsulated register: " + "src %s, dst %s, mif %d\n", + ip6_sprintf(&eip6->ip6_src), + ip6_sprintf(&eip6->ip6_dst), + reg_mif_num); + } +#endif + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + rc = if_simloop(mif6table[reg_mif_num].m6_ifp, m, + (struct sockaddr *) &dst, NULL); +#else +#if defined (__APPLE__) + if (lo_dl_tag == 0) + dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET6, &lo_dl_tag); + + if (lo_dl_tag) + dlil_output(lo_dl_tag, m, 0, (struct sockaddr *) &dst, 0); + else { + printf("Warning: pim6_input call to dlil_find_dltag failed!\n"); + m_freem(m); + } +#else + rc = looutput(mif6table[reg_mif_num].m6_ifp, m, + (struct sockaddr *) &dst, + (struct rtentry *) NULL); +#endif +#endif + + /* prepare the register head to send to the mrouting daemon */ + m = mcp; + } + + /* + * Pass the PIM message up to the daemon; if it is a register message + * pass the 'head' only up to the daemon. This includes the + * encapsulator ip6 header, pim header, register header and the + * encapsulated ip6 header. + */ + pim6_input_to_daemon: + rip6_input(&m, offp, proto); + return(IPPROTO_DONE); +} diff --git a/bsd/netinet6/ip6_mroute.h b/bsd/netinet6/ip6_mroute.h new file mode 100644 index 000000000..9fe2e5801 --- /dev/null +++ b/bsd/netinet6/ip6_mroute.h @@ -0,0 +1,249 @@ +/* $KAME: ip6_mroute.h,v 1.7 2000/02/22 14:04:22 itojun Exp $ */ + +/* + * Copyright (C) 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* BSDI ip_mroute.h,v 2.5 1996/10/11 16:01:48 pjd Exp */ + +/* + * Definitions for IP multicast forwarding. + * + * Written by David Waitzman, BBN Labs, August 1988. + * Modified by Steve Deering, Stanford, February 1989. + * Modified by Ajit Thyagarajan, PARC, August 1993. + * Modified by Ajit Thyagarajan, PARC, August 1994. + * Modified by Ahmed Helmy, USC, September 1996. + * + * MROUTING Revision: 1.2 + */ + +#ifndef _NETINET6_IP6_MROUTE_H_ +#define _NETINET6_IP6_MROUTE_H_ + +/* + * Multicast Routing set/getsockopt commands. + */ +#define MRT6_INIT 100 /* initialize forwarder */ +#define MRT6_DONE 101 /* shut down forwarder */ +#define MRT6_ADD_MIF 102 /* add multicast interface */ +#define MRT6_DEL_MIF 103 /* delete multicast interface */ +#define MRT6_ADD_MFC 104 /* insert forwarding cache entry */ +#define MRT6_DEL_MFC 105 /* delete forwarding cache entry */ +#define MRT6_PIM 107 /* enable pim code */ + +#if BSD >= 199103 +#define GET_TIME(t) microtime(&t) +#elif defined(sun) +#define GET_TIME(t) uniqtime(&t) +#else +#define GET_TIME(t) ((t) = time) +#endif + +/* + * Types and macros for handling bitmaps with one bit per multicast interface. + */ +typedef u_short mifi_t; /* type of a mif index */ +#define MAXMIFS 64 + +#ifndef IF_SETSIZE +#define IF_SETSIZE 256 +#endif + +typedef long if_mask; +#define NIFBITS (sizeof(if_mask) * NBBY) /* bits per mask */ + +#ifndef howmany +#define howmany(x, y) (((x) + ((y) - 1)) / (y)) +#endif + +typedef struct if_set { + fd_mask ifs_bits[howmany(IF_SETSIZE, NIFBITS)]; +} if_set; + +#define IF_SET(n, p) ((p)->ifs_bits[(n)/NIFBITS] |= (1 << ((n) % NIFBITS))) +#define IF_CLR(n, p) ((p)->ifs_bits[(n)/NIFBITS] &= ~(1 << ((n) % NIFBITS))) +#define IF_ISSET(n, p) ((p)->ifs_bits[(n)/NIFBITS] & (1 << ((n) % NIFBITS))) +#define IF_COPY(f, t) bcopy(f, t, sizeof(*(f))) +#define IF_ZERO(p) bzero(p, sizeof(*(p))) + +/* + * Argument structure for MRT6_ADD_IF. + */ +struct mif6ctl { + mifi_t mif6c_mifi; /* the index of the mif to be added */ + u_char mif6c_flags; /* MIFF_ flags defined below */ + u_short mif6c_pifi; /* the index of the physical IF */ +#if notyet + u_int mif6c_rate_limit; /* max rate */ +#endif +}; + +#define MIFF_REGISTER 0x1 /* mif represents a register end-point */ + +/* + * Argument structure for MRT6_ADD_MFC and MRT6_DEL_MFC + */ +struct mf6cctl { + struct sockaddr_in6 mf6cc_origin; /* IPv6 origin of mcasts */ + struct sockaddr_in6 mf6cc_mcastgrp; /* multicast group associated */ + mifi_t mf6cc_parent; /* incoming ifindex */ + struct if_set mf6cc_ifset; /* set of forwarding ifs */ +}; + +/* + * The kernel's multicast routing statistics. + */ +struct mrt6stat { + u_quad_t mrt6s_mfc_lookups; /* # forw. cache hash table hits */ + u_quad_t mrt6s_mfc_misses; /* # forw. cache hash table misses */ + u_quad_t mrt6s_upcalls; /* # calls to mrouted */ + u_quad_t mrt6s_no_route; /* no route for packet's origin */ + u_quad_t mrt6s_bad_tunnel; /* malformed tunnel options */ + u_quad_t mrt6s_cant_tunnel; /* no room for tunnel options */ + u_quad_t mrt6s_wrong_if; /* arrived on wrong interface */ + u_quad_t mrt6s_upq_ovflw; /* upcall Q overflow */ + u_quad_t mrt6s_cache_cleanups; /* # entries with no upcalls */ + u_quad_t mrt6s_drop_sel; /* pkts dropped selectively */ + u_quad_t mrt6s_q_overflow; /* pkts dropped - Q overflow */ + u_quad_t mrt6s_pkt2large; /* pkts dropped - size > BKT SIZE */ + u_quad_t mrt6s_upq_sockfull; /* upcalls dropped - socket full */ +}; + +/* + * Struct used to communicate from kernel to multicast router + * note the convenient similarity to an IPv6 header. + */ +struct mrt6msg { + u_long unused1; + u_char im6_msgtype; /* what type of message */ +#define MRT6MSG_NOCACHE 1 +#define MRT6MSG_WRONGMIF 2 +#define MRT6MSG_WHOLEPKT 3 /* used for user level encap*/ + u_char im6_mbz; /* must be zero */ + u_char im6_mif; /* mif rec'd on */ + u_char unused2; + struct in6_addr im6_src, im6_dst; +}; + +/* + * Argument structure used by multicast routing daemon to get src-grp + * packet counts + */ +struct sioc_sg_req6 { + struct sockaddr_in6 src; + struct sockaddr_in6 grp; + u_quad_t pktcnt; + u_quad_t bytecnt; + u_quad_t wrong_if; +}; + +/* + * Argument structure used by mrouted to get mif pkt counts + */ +struct sioc_mif_req6 { + mifi_t mifi; /* mif number */ + u_quad_t icount; /* Input packet count on mif */ + u_quad_t ocount; /* Output packet count on mif */ + u_quad_t ibytes; /* Input byte count on mif */ + u_quad_t obytes; /* Output byte count on mif */ +}; + +#if KERNEL +/* + * The kernel's multicast-interface structure. + */ +struct mif6 { + u_char m6_flags; /* MIFF_ flags defined above */ + u_int m6_rate_limit; /* max rate */ +#if notyet + struct tbf *m6_tbf; /* token bucket structure at intf. */ +#endif + struct in6_addr m6_lcl_addr; /* local interface address */ + struct ifnet *m6_ifp; /* pointer to interface */ + u_quad_t m6_pkt_in; /* # pkts in on interface */ + u_quad_t m6_pkt_out; /* # pkts out on interface */ + u_quad_t m6_bytes_in; /* # bytes in on interface */ + u_quad_t m6_bytes_out; /* # bytes out on interface */ + struct route_in6 m6_route;/* cached route if this is a tunnel */ +#if notyet + u_int m6_rsvp_on; /* RSVP listening on this vif */ + struct socket *m6_rsvpd; /* RSVP daemon socket */ +#endif +}; + +/* + * The kernel's multicast forwarding cache entry structure + */ +struct mf6c { + struct sockaddr_in6 mf6c_origin; /* IPv6 origin of mcasts */ + struct sockaddr_in6 mf6c_mcastgrp; /* multicast group associated*/ + mifi_t mf6c_parent; /* incoming IF */ + struct if_set mf6c_ifset; /* set of outgoing IFs */ + + u_quad_t mf6c_pkt_cnt; /* pkt count for src-grp */ + u_quad_t mf6c_byte_cnt; /* byte count for src-grp */ + u_quad_t mf6c_wrong_if; /* wrong if for src-grp */ + int mf6c_expire; /* time to clean entry up */ + struct timeval mf6c_last_assert; /* last time I sent an assert*/ + struct rtdetq *mf6c_stall; /* pkts waiting for route */ + struct mf6c *mf6c_next; /* hash table linkage */ +}; + +#define MF6C_INCOMPLETE_PARENT ((mifi_t)-1) + +/* + * Argument structure used for pkt info. while upcall is made + */ +#ifndef _NETINET_IP_MROUTE_H_ +struct rtdetq { /* XXX: rtdetq is also defined in ip_mroute.h */ + struct mbuf *m; /* A copy of the packet */ + struct ifnet *ifp; /* Interface pkt came in on */ +#if UPCALL_TIMING + struct timeval t; /* Timestamp */ +#endif /* UPCALL_TIMING */ + struct rtdetq *next; +}; +#endif /* _NETINET_IP_MROUTE_H_ */ + +#define MF6CTBLSIZ 256 +#if (MF6CTBLSIZ & (MF6CTBLSIZ - 1)) == 0 /* from sys:route.h */ +#define MF6CHASHMOD(h) ((h) & (MF6CTBLSIZ - 1)) +#else +#define MF6CHASHMOD(h) ((h) % MF6CTBLSIZ) +#endif + +#define MAX_UPQ6 4 /* max. no of pkts in upcall Q */ + +int ip6_mrouter_set __P((struct socket *so, struct sockopt *sopt)); +int ip6_mrouter_get __P((struct socket *so, struct sockopt *sopt)); +int ip6_mrouter_done __P((void)); +int mrt6_ioctl __P((int, caddr_t)); +#endif /* KERNEL */ + +#endif /* !_NETINET6_IP6_MROUTE_H_ */ diff --git a/bsd/netinet6/ip6_output.c b/bsd/netinet6/ip6_output.c new file mode 100644 index 000000000..a6e82dac0 --- /dev/null +++ b/bsd/netinet6/ip6_output.c @@ -0,0 +1,3248 @@ +/* $KAME: ip6_output.c,v 1.94 2000/04/04 14:45:44 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1988, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_output.c 8.3 (Berkeley) 1/21/94 + */ + +#if __FreeBSD__ +#include "opt_ip6fw.h" +#endif +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#if __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) +#include +#endif +#include + +#include +#include + +#include +#include +#if defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) +#include +#include +#endif +#include +#include +#include +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802) || defined (__APPLE__) +#include +#else +#include +#endif +#include + +#if IPSEC +#include +#include +#include +#endif /* IPSEC */ + +#ifndef __bsdi__ +#include "loop.h" +#endif + +#include + +#if IPV6FIREWALL +#include +#endif + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static MALLOC_DEFINE(M_IPMOPTS, "ip6_moptions", "internet multicast options"); +#endif + +struct ip6_exthdrs { + struct mbuf *ip6e_ip6; + struct mbuf *ip6e_hbh; + struct mbuf *ip6e_dest1; + struct mbuf *ip6e_rthdr; + struct mbuf *ip6e_dest2; +}; + +static int ip6_pcbopt __P((int, u_char *, int, struct ip6_pktopts **, int)); +static int ip6_getpcbopt __P((struct ip6_pktopts *, int, void **, int *)); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined(__APPLE__) +static int ip6_pcbopts __P((struct ip6_pktopts **, struct mbuf *, + struct socket *, struct sockopt *sopt)); +#else +static int ip6_pcbopts __P((struct ip6_pktopts **, struct mbuf *, + struct socket *)); +#endif +static int ip6_setmoptions __P((int, struct ip6_moptions **, struct mbuf *)); +static int ip6_getmoptions __P((int, struct ip6_moptions *, struct mbuf **)); +static int ip6_copyexthdr __P((struct mbuf **, caddr_t, int)); +static int ip6_insertfraghdr __P((struct mbuf *, struct mbuf *, int, + struct ip6_frag **)); +static int ip6_insert_jumboopt __P((struct ip6_exthdrs *, u_int32_t)); +static int ip6_splithdr __P((struct mbuf *, struct ip6_exthdrs *)); +#if defined(__bsdi__) || defined(__OpenBSD__) +extern struct ifnet loif; +#endif + +#if __NetBSD__ +extern struct ifnet **ifindex2ifnet; +extern struct ifnet loif[NLOOP]; +#endif + +#if MIP6 +int (*mip6_output_hook)(struct mbuf *m, struct ip6_pktopts **opt); +#endif /* MIP6 */ +static u_long lo_dl_tag = 0; + +/* + * IP6 output. The packet in mbuf chain m contains a skeletal IP6 + * header (with pri, len, nxt, hlim, src, dst). + * This function may modify ver and hlim only. + * The mbuf chain containing the packet will be freed. + * The mbuf opt, if present, will not be freed. + */ +int +ip6_output(m0, opt, ro, flags, im6o, ifpp) + struct mbuf *m0; + struct ip6_pktopts *opt; + struct route_in6 *ro; + int flags; + struct ip6_moptions *im6o; + struct ifnet **ifpp; /* XXX: just for statistics */ +{ + struct ip6_hdr *ip6, *mhip6; + struct ifnet *ifp; + struct mbuf *m = m0; + int hlen, tlen, len, off; + struct route_in6 ip6route; + struct sockaddr_in6 *dst; + int error = 0; + struct in6_ifaddr *ia; + u_long mtu; + u_int32_t optlen = 0, plen = 0, unfragpartlen = 0; + struct ip6_exthdrs exthdrs; + struct in6_addr finaldst; + struct route_in6 *ro_pmtu = NULL; + int hdrsplit = 0; + int needipsec = 0; + + +#if IPSEC + int needipsectun = 0; + struct socket *so; + struct secpolicy *sp = NULL; + + /* for AH processing. stupid to have "socket" variable in IP layer... */ + so = ipsec_getsocket(m); + ipsec_setsocket(m, NULL); + ip6 = mtod(m, struct ip6_hdr *); +#endif /* IPSEC */ + +#define MAKE_EXTHDR(hp,mp) \ + { \ + if (hp) { \ + struct ip6_ext *eh = (struct ip6_ext *)(hp); \ + error = ip6_copyexthdr((mp), (caddr_t)(hp), \ + ((eh)->ip6e_len + 1) << 3); \ + if (error) \ + goto freehdrs; \ + } \ + } + + bzero(&exthdrs, sizeof(exthdrs)); + +#if MIP6 + /* + * Mobile IPv6 + * + * Call Mobile IPv6 to check if there are any Destination Header + * options to add. + */ + if (mip6_output_hook) { + error = (*mip6_output_hook)(m, &opt); + if (error) + goto freehdrs; + } +#endif /* MIP6 */ + + if (opt) { + /* Hop-by-Hop options header */ + MAKE_EXTHDR(opt->ip6po_hbh, &exthdrs.ip6e_hbh); + if (opt->ip6po_rthdr) { + /* + * Destination options header(1st part) + * This only makes sence with a routing header. + */ + MAKE_EXTHDR(opt->ip6po_dest1, &exthdrs.ip6e_dest1); + } + /* Routing header */ + MAKE_EXTHDR(opt->ip6po_rthdr, &exthdrs.ip6e_rthdr); + /* Destination options header(2nd part) */ + MAKE_EXTHDR(opt->ip6po_dest2, &exthdrs.ip6e_dest2); + } + +#if IPSEC + /* get a security policy for this packet */ + if (so == NULL) + sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, 0, &error); + else + sp = ipsec6_getpolicybysock(m, IPSEC_DIR_OUTBOUND, so, &error); + + if (sp == NULL) { + ipsec6stat.out_inval++; + goto bad; + } + + error = 0; + + /* check policy */ + switch (sp->policy) { + case IPSEC_POLICY_DISCARD: + /* + * This packet is just discarded. + */ + ipsec6stat.out_polvio++; + goto bad; + + case IPSEC_POLICY_BYPASS: + case IPSEC_POLICY_NONE: + /* no need to do IPsec. */ + needipsec = 0; + break; + + case IPSEC_POLICY_IPSEC: + if (sp->req == NULL) { + /* acquire a policy */ + error = key_spdacquire(sp); + goto bad; + } + needipsec = 1; + break; + + case IPSEC_POLICY_ENTRUST: + default: + printf("ip6_output: Invalid policy found. %d\n", sp->policy); + } +#endif /* IPSEC */ + + /* + * Calculate the total length of the extension header chain. + * Keep the length of the unfragmentable part for fragmentation. + */ + optlen = 0; + if (exthdrs.ip6e_hbh) optlen += exthdrs.ip6e_hbh->m_len; + if (exthdrs.ip6e_dest1) optlen += exthdrs.ip6e_dest1->m_len; + if (exthdrs.ip6e_rthdr) optlen += exthdrs.ip6e_rthdr->m_len; + unfragpartlen = optlen + sizeof(struct ip6_hdr); + /* NOTE: we don't add AH/ESP length here. do that later. */ + if (exthdrs.ip6e_dest2) optlen += exthdrs.ip6e_dest2->m_len; + + /* + * If we need IPsec, or there is at least one extension header, + * separate IP6 header from the payload. + */ + if ((needipsec || optlen) && !hdrsplit) { + if ((error = ip6_splithdr(m, &exthdrs)) != 0) { + m = NULL; + goto freehdrs; + } + m = exthdrs.ip6e_ip6; + hdrsplit++; + } + + /* adjust pointer */ + ip6 = mtod(m, struct ip6_hdr *); + + /* adjust mbuf packet header length */ + m->m_pkthdr.len += optlen; + plen = m->m_pkthdr.len - sizeof(*ip6); + + /* If this is a jumbo payload, insert a jumbo payload option. */ + if (plen > IPV6_MAXPACKET) { + if (!hdrsplit) { + if ((error = ip6_splithdr(m, &exthdrs)) != 0) { + m = NULL; + goto freehdrs; + } + m = exthdrs.ip6e_ip6; + hdrsplit++; + } + /* adjust pointer */ + ip6 = mtod(m, struct ip6_hdr *); + if ((error = ip6_insert_jumboopt(&exthdrs, plen)) != 0) + goto freehdrs; + ip6->ip6_plen = 0; + } else + ip6->ip6_plen = htons(plen); + + /* + * Concatenate headers and fill in next header fields. + * Here we have, on "m" + * IPv6 payload + * and we insert headers accordingly. Finally, we should be getting: + * IPv6 hbh dest1 rthdr ah* [esp* dest2 payload] + * + * during the header composing process, "m" points to IPv6 header. + * "mprev" points to an extension header prior to esp. + */ + { + u_char *nexthdrp = &ip6->ip6_nxt; + struct mbuf *mprev = m; + + /* + * we treat dest2 specially. this makes IPsec processing + * much easier. + * + * result: IPv6 dest2 payload + * m and mprev will point to IPv6 header. + */ + if (exthdrs.ip6e_dest2) { + if (!hdrsplit) + panic("assumption failed: hdr not split"); + exthdrs.ip6e_dest2->m_next = m->m_next; + m->m_next = exthdrs.ip6e_dest2; + *mtod(exthdrs.ip6e_dest2, u_char *) = ip6->ip6_nxt; + ip6->ip6_nxt = IPPROTO_DSTOPTS; + } + +#define MAKE_CHAIN(m,mp,p,i)\ + {\ + if (m) {\ + if (!hdrsplit) \ + panic("assumption failed: hdr not split"); \ + *mtod((m), u_char *) = *(p);\ + *(p) = (i);\ + p = mtod((m), u_char *);\ + (m)->m_next = (mp)->m_next;\ + (mp)->m_next = (m);\ + (mp) = (m);\ + }\ + } + /* + * result: IPv6 hbh dest1 rthdr dest2 payload + * m will point to IPv6 header. mprev will point to the + * extension header prior to dest2 (rthdr in the above case). + */ + MAKE_CHAIN(exthdrs.ip6e_hbh, mprev, + nexthdrp, IPPROTO_HOPOPTS); + MAKE_CHAIN(exthdrs.ip6e_dest1, mprev, + nexthdrp, IPPROTO_DSTOPTS); + MAKE_CHAIN(exthdrs.ip6e_rthdr, mprev, + nexthdrp, IPPROTO_ROUTING); + +#if IPSEC + if (!needipsec) + goto skip_ipsec2; + + /* + * pointers after IPsec headers are not valid any more. + * other pointers need a great care too. + * (IPsec routines should not mangle mbufs prior to AH/ESP) + */ + exthdrs.ip6e_dest2 = NULL; + + { + struct ip6_rthdr *rh = NULL; + int segleft_org = 0; + struct ipsec_output_state state; + + if (exthdrs.ip6e_rthdr) { + rh = mtod(exthdrs.ip6e_rthdr, struct ip6_rthdr *); + segleft_org = rh->ip6r_segleft; + rh->ip6r_segleft = 0; + } + + bzero(&state, sizeof(state)); + state.m = m; + error = ipsec6_output_trans(&state, nexthdrp, mprev, sp, flags, + &needipsectun); + m = state.m; + if (error) { + /* mbuf is already reclaimed in ipsec6_output_trans. */ + m = NULL; + switch (error) { + case EHOSTUNREACH: + case ENETUNREACH: + case EMSGSIZE: + case ENOBUFS: + case ENOMEM: + break; + default: + printf("ip6_output (ipsec): error code %d\n", error); + /*fall through*/ + case ENOENT: + /* don't show these error codes to the user */ + error = 0; + break; + } + goto bad; + } + if (exthdrs.ip6e_rthdr) { + /* ah6_output doesn't modify mbuf chain */ + rh->ip6r_segleft = segleft_org; + } + } +skip_ipsec2:; +#endif + } + + /* + * If there is a routing header, replace destination address field + * with the first hop of the routing header. + */ + if (exthdrs.ip6e_rthdr) { + struct ip6_rthdr *rh = + (struct ip6_rthdr *)(mtod(exthdrs.ip6e_rthdr, + struct ip6_rthdr *)); + struct ip6_rthdr0 *rh0; + struct in6_addr *addr; + + finaldst = ip6->ip6_dst; + switch(rh->ip6r_type) { + case IPV6_RTHDR_TYPE_0: + rh0 = (struct ip6_rthdr0 *)rh; + addr = (struct in6_addr *)(rh0 + 1); + + ip6->ip6_dst = *addr; + bcopy((caddr_t)(addr + 1), (caddr_t)addr, + sizeof(struct in6_addr)*(rh0->ip6r0_segleft - 1) + ); + *(addr + rh0->ip6r0_segleft - 1) = finaldst; + break; + default: /* is it possible? */ + error = EINVAL; + goto bad; + } + } + + /* Source address validation */ + if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src) && + (flags & IPV6_DADOUTPUT) == 0) { + error = EOPNOTSUPP; + ip6stat.ip6s_badscope++; + goto bad; + } + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { + error = EOPNOTSUPP; + ip6stat.ip6s_badscope++; + goto bad; + } + + ip6stat.ip6s_localout++; + + /* + * Route packet. + */ + if (ro == 0) { + ro = &ip6route; + bzero((caddr_t)ro, sizeof(*ro)); + } + ro_pmtu = ro; + if (opt && opt->ip6po_rthdr) + ro = &opt->ip6po_route; + dst = (struct sockaddr_in6 *)&ro->ro_dst; + /* + * If there is a cached route, + * check that it is to the same destination + * and is still up. If not, free it and try again. + */ + if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || + !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst))) { + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } + if (ro->ro_rt == 0) { + bzero(dst, sizeof(*dst)); + dst->sin6_family = AF_INET6; + dst->sin6_len = sizeof(struct sockaddr_in6); + dst->sin6_addr = ip6->ip6_dst; + } +#if IPSEC + if (needipsec && needipsectun) { + struct ipsec_output_state state; + + /* + * All the extension headers will become inaccessible + * (since they can be encrypted). + * Don't panic, we need no more updates to extension headers + * on inner IPv6 packet (since they are now encapsulated). + * + * IPv6 [ESP|AH] IPv6 [extension headers] payload + */ + bzero(&exthdrs, sizeof(exthdrs)); + exthdrs.ip6e_ip6 = m; + + bzero(&state, sizeof(state)); + state.m = m; + state.ro = (struct route *)ro; + state.dst = (struct sockaddr *)dst; + + error = ipsec6_output_tunnel(&state, sp, flags); + + m = state.m; + ro = (struct route_in6 *)state.ro; + dst = (struct sockaddr_in6 *)state.dst; + if (error) { + /* mbuf is already reclaimed in ipsec6_output_tunnel. */ + m0 = m = NULL; + m = NULL; + switch (error) { + case EHOSTUNREACH: + case ENETUNREACH: + case EMSGSIZE: + case ENOBUFS: + case ENOMEM: + break; + default: + printf("ip6_output (ipsec): error code %d\n", error); + /*fall through*/ + case ENOENT: + /* don't show these error codes to the user */ + error = 0; + break; + } + goto bad; + } + + exthdrs.ip6e_ip6 = m; + } +#endif /*IPSEC*/ + + if (!IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { + /* Unicast */ + +#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) +#define sin6tosa(sin6) ((struct sockaddr *)(sin6)) + /* xxx + * interface selection comes here + * if an interface is specified from an upper layer, + * ifp must point it. + */ + if (ro->ro_rt == 0) { +#ifndef __bsdi__ + /* + * non-bsdi always clone routes, if parent is + * PRF_CLONING. + */ + rtalloc((struct route *)ro); +#else + if (ro == &ip6route) /* xxx kazu */ + rtalloc((struct route *)ro); + else + rtcalloc((struct route *)ro); +#endif + } + if (ro->ro_rt == 0) { + ip6stat.ip6s_noroute++; + error = EHOSTUNREACH; + /* XXX in6_ifstat_inc(ifp, ifs6_out_discard); */ + goto bad; + } + ia = ifatoia6(ro->ro_rt->rt_ifa); + ifp = ro->ro_rt->rt_ifp; + ro->ro_rt->rt_use++; + if (ro->ro_rt->rt_flags & RTF_GATEWAY) + dst = (struct sockaddr_in6 *)ro->ro_rt->rt_gateway; + m->m_flags &= ~(M_BCAST | M_MCAST); /* just in case */ + + in6_ifstat_inc(ifp, ifs6_out_request); + + /* + * Check if the outgoing interface conflicts with + * the interface specified by ifi6_ifindex (if specified). + * Note that loopback interface is always okay. + * (this may happen when we are sending a packet to one of + * our own addresses.) + */ + if (opt && opt->ip6po_pktinfo + && opt->ip6po_pktinfo->ipi6_ifindex) { + if (!(ifp->if_flags & IFF_LOOPBACK) + && ifp->if_index != opt->ip6po_pktinfo->ipi6_ifindex) { + ip6stat.ip6s_noroute++; + in6_ifstat_inc(ifp, ifs6_out_discard); + error = EHOSTUNREACH; + goto bad; + } + } + + if (opt && opt->ip6po_hlim != -1) + ip6->ip6_hlim = opt->ip6po_hlim & 0xff; + } else { + /* Multicast */ + struct in6_multi *in6m; + + m->m_flags = (m->m_flags & ~M_BCAST) | M_MCAST; + + /* + * See if the caller provided any multicast options + */ + ifp = NULL; + if (im6o != NULL) { + ip6->ip6_hlim = im6o->im6o_multicast_hlim; + if (im6o->im6o_multicast_ifp != NULL) + ifp = im6o->im6o_multicast_ifp; + } else + ip6->ip6_hlim = ip6_defmcasthlim; + + /* + * See if the caller provided the outgoing interface + * as an ancillary data. + * Boundary check for ifindex is assumed to be already done. + */ + if (opt && opt->ip6po_pktinfo && opt->ip6po_pktinfo->ipi6_ifindex) + ifp = ifindex2ifnet[opt->ip6po_pktinfo->ipi6_ifindex]; + + /* + * If the destination is a node-local scope multicast, + * the packet should be loop-backed only. + */ + if (IN6_IS_ADDR_MC_NODELOCAL(&ip6->ip6_dst)) { + /* + * If the outgoing interface is already specified, + * it should be a loopback interface. + */ + if (ifp && (ifp->if_flags & IFF_LOOPBACK) == 0) { + ip6stat.ip6s_badscope++; + error = ENETUNREACH; /* XXX: better error? */ + /* XXX correct ifp? */ + in6_ifstat_inc(ifp, ifs6_out_discard); + goto bad; + } else { +#ifdef __bsdi__ + ifp = loifp; +#else + ifp = &loif[0]; +#endif + } + } + + if (opt && opt->ip6po_hlim != -1) + ip6->ip6_hlim = opt->ip6po_hlim & 0xff; + + /* + * If caller did not provide an interface lookup a + * default in the routing table. This is either a + * default for the speicfied group (i.e. a host + * route), or a multicast default (a route for the + * ``net'' ff00::/8). + */ + if (ifp == NULL) { + if (ro->ro_rt == 0) { + ro->ro_rt = rtalloc1((struct sockaddr *) + &ro->ro_dst, 0 +#if __FreeBSD__ || defined (__APPLE__) + , 0UL +#endif + ); + } + if (ro->ro_rt == 0) { + ip6stat.ip6s_noroute++; + error = EHOSTUNREACH; + /* XXX in6_ifstat_inc(ifp, ifs6_out_discard) */ + goto bad; + } + ia = ifatoia6(ro->ro_rt->rt_ifa); + ifp = ro->ro_rt->rt_ifp; + ro->ro_rt->rt_use++; + } + + if ((flags & IPV6_FORWARDING) == 0) + in6_ifstat_inc(ifp, ifs6_out_request); + in6_ifstat_inc(ifp, ifs6_out_mcast); + + /* + * Confirm that the outgoing interface supports multicast. + */ + if ((ifp->if_flags & IFF_MULTICAST) == 0) { + ip6stat.ip6s_noroute++; + in6_ifstat_inc(ifp, ifs6_out_discard); + error = ENETUNREACH; + goto bad; + } + IN6_LOOKUP_MULTI(ip6->ip6_dst, ifp, in6m); + if (in6m != NULL && + (im6o == NULL || im6o->im6o_multicast_loop)) { + /* + * If we belong to the destination multicast group + * on the outgoing interface, and the caller did not + * forbid loopback, loop back a copy. + */ + ip6_mloopback(ifp, m, dst); + } else { + /* + * If we are acting as a multicast router, perform + * multicast forwarding as if the packet had just + * arrived on the interface to which we are about + * to send. The multicast forwarding function + * recursively calls this function, using the + * IPV6_FORWARDING flag to prevent infinite recursion. + * + * Multicasts that are looped back by ip6_mloopback(), + * above, will be forwarded by the ip6_input() routine, + * if necessary. + */ + if (ip6_mrouter && (flags & IPV6_FORWARDING) == 0) { + if (ip6_mforward(ip6, ifp, m) != NULL) { + m_freem(m); + goto done; + } + } + } + /* + * Multicasts with a hoplimit of zero may be looped back, + * above, but must not be transmitted on a network. + * Also, multicasts addressed to the loopback interface + * are not sent -- the above call to ip6_mloopback() will + * loop back a copy if this host actually belongs to the + * destination group on the loopback interface. + */ + if (ip6->ip6_hlim == 0 || (ifp->if_flags & IFF_LOOPBACK)) { + m_freem(m); + goto done; + } + } + + /* + * Fill the outgoing inteface to tell the upper layer + * to increment per-interface statistics. + */ + if (ifpp) + *ifpp = ifp; + + /* + * Upper-layer reachability confirmation + */ + if (opt && (opt->ip6po_flags & IP6PO_REACHCONF)) + nd6_nud_hint(ro->ro_rt, NULL); + + /* + * Determine path MTU. + */ + if (ro_pmtu != ro) { + /* The first hop and the final destination may differ. */ + struct sockaddr_in6 *sin6_fin = + (struct sockaddr_in6 *)&ro_pmtu->ro_dst; + if (ro_pmtu->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || + !IN6_ARE_ADDR_EQUAL(&sin6_fin->sin6_addr, + &finaldst))) { + RTFREE(ro_pmtu->ro_rt); + ro_pmtu->ro_rt = (struct rtentry *)0; + } + if (ro_pmtu->ro_rt == 0) { + bzero(sin6_fin, sizeof(*sin6_fin)); + sin6_fin->sin6_family = AF_INET6; + sin6_fin->sin6_len = sizeof(struct sockaddr_in6); + sin6_fin->sin6_addr = finaldst; + +#ifdef __bsdi__ /* bsdi needs rtcalloc to clone a route. */ + rtcalloc((struct route *)ro_pmtu); +#else + rtalloc((struct route *)ro_pmtu); +#endif + } + } + if (ro_pmtu->ro_rt != NULL) { + u_int32_t ifmtu = nd_ifinfo[ifp->if_index].linkmtu; + + mtu = ro_pmtu->ro_rt->rt_rmx.rmx_mtu; + if (mtu > ifmtu) { + /* + * The MTU on the route is larger than the MTU on + * the interface! This shouldn't happen, unless the + * MTU of the interface has been changed after the + * interface was brought up. Change the MTU in the + * route to match the interface MTU (as long as the + * field isn't locked). + */ + mtu = ifmtu; + if ((ro_pmtu->ro_rt->rt_rmx.rmx_locks & RTV_MTU) == 0) + ro_pmtu->ro_rt->rt_rmx.rmx_mtu = mtu; /* XXX */ + } + } else { + mtu = nd_ifinfo[ifp->if_index].linkmtu; + } + + /* + * advanced API (IPV6_USE_MIN_MTU) overrides mtu setting + */ + if (mtu > IPV6_MMTU) { + if ((opt && (opt->ip6po_flags & IP6PO_MINMTU)) || + (flags & IPV6_MINMTU)) { + mtu = IPV6_MMTU; + } + } + + /* + * Fake link-local scope-class addresses + */ + if ((ifp->if_flags & IFF_LOOPBACK) == 0) { + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) + ip6->ip6_src.s6_addr16[1] = 0; + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) + ip6->ip6_dst.s6_addr16[1] = 0; + } + +#if IPV6FIREWALL + /* + * Check with the firewall... + */ + if (ip6_fw_chk_ptr) { + u_short port = 0; + /* If ipfw says divert, we have to just drop packet */ + if ((*ip6_fw_chk_ptr)(&ip6, ifp, &port, &m)) { + m_freem(m); + goto done; + } + if (!m) { + error = EACCES; + goto done; + } + } +#endif + + /* + * If the outgoing packet contains a hop-by-hop options header, + * it must be examined and processed even by the source node. + * (RFC 2460, section 4.) + */ + if (exthdrs.ip6e_hbh) { + struct ip6_hbh *hbh = mtod(exthdrs.ip6e_hbh, + struct ip6_hbh *); + u_int32_t dummy1; /* XXX unused */ + u_int32_t dummy2; /* XXX unused */ + + /* + * XXX: if we have to send an ICMPv6 error to the sender, + * we need the M_LOOP flag since icmp6_error() expects + * the IPv6 and the hop-by-hop options header are + * continuous unless the flag is set. + */ + m->m_flags |= M_LOOP; + m->m_pkthdr.rcvif = ifp; + if (ip6_process_hopopts(m, + (u_int8_t *)(hbh + 1), + ((hbh->ip6h_len + 1) << 3) - + sizeof(struct ip6_hbh), + &dummy1, &dummy2) < 0) { + /* m was already freed at this point */ + error = EINVAL;/* better error? */ + goto done; + } + m->m_flags &= ~M_LOOP; /* XXX */ + m->m_pkthdr.rcvif = NULL; + } + + /* + * Send the packet to the outgoing interface. + * If necessary, do IPv6 fragmentation before sending. + */ + tlen = m->m_pkthdr.len; + if (tlen <= mtu +#if notyet + /* + * On any link that cannot convey a 1280-octet packet in one piece, + * link-specific fragmentation and reassembly must be provided at + * a layer below IPv6. [RFC 2460, sec.5] + * Thus if the interface has ability of link-level fragmentation, + * we can just send the packet even if the packet size is + * larger than the link's MTU. + * XXX: IFF_FRAGMENTABLE (or such) flag has not been defined yet... + */ + + || ifp->if_flags & IFF_FRAGMENTABLE +#endif + ) + { +#if defined(__NetBSD__) && defined(IFA_STATS) + if (IFA_STATS) { + struct in6_ifaddr *ia6; + ip6 = mtod(m, struct ip6_hdr *); + ia6 = in6_ifawithifp(ifp, &ip6->ip6_src); + if (ia6) { + ia->ia_ifa.ifa_data.ifad_outbytes += + m->m_pkthdr.len; + } + } +#endif +#if OLDIP6OUTPUT + error = (*ifp->if_output)(ifp, m, (struct sockaddr *)dst, + ro->ro_rt); +#else + error = nd6_output(ifp, m, dst, ro->ro_rt); +#endif + goto done; + } else if (mtu < IPV6_MMTU) { + /* + * note that path MTU is never less than IPV6_MMTU + * (see icmp6_input). + */ + error = EMSGSIZE; + in6_ifstat_inc(ifp, ifs6_out_fragfail); + goto bad; + } else if (ip6->ip6_plen == 0) { /* jumbo payload cannot be fragmented */ + error = EMSGSIZE; + in6_ifstat_inc(ifp, ifs6_out_fragfail); + goto bad; + } else { + struct mbuf **mnext, *m_frgpart; + struct ip6_frag *ip6f; + u_int32_t id = htonl(ip6_id++); + u_char nextproto; + + /* + * Too large for the destination or interface; + * fragment if possible. + * Must be able to put at least 8 bytes per fragment. + */ + hlen = unfragpartlen; + if (mtu > IPV6_MAXPACKET) + mtu = IPV6_MAXPACKET; + len = (mtu - hlen - sizeof(struct ip6_frag)) & ~7; + if (len < 8) { + error = EMSGSIZE; + in6_ifstat_inc(ifp, ifs6_out_fragfail); + goto bad; + } + + mnext = &m->m_nextpkt; + + /* + * Change the next header field of the last header in the + * unfragmentable part. + */ + if (exthdrs.ip6e_rthdr) { + nextproto = *mtod(exthdrs.ip6e_rthdr, u_char *); + *mtod(exthdrs.ip6e_rthdr, u_char *) = IPPROTO_FRAGMENT; + } else if (exthdrs.ip6e_dest1) { + nextproto = *mtod(exthdrs.ip6e_dest1, u_char *); + *mtod(exthdrs.ip6e_dest1, u_char *) = IPPROTO_FRAGMENT; + } else if (exthdrs.ip6e_hbh) { + nextproto = *mtod(exthdrs.ip6e_hbh, u_char *); + *mtod(exthdrs.ip6e_hbh, u_char *) = IPPROTO_FRAGMENT; + } else { + nextproto = ip6->ip6_nxt; + ip6->ip6_nxt = IPPROTO_FRAGMENT; + } + + /* + * Loop through length of segment after first fragment, + * make new header and copy data of each part and link onto chain. + */ + m0 = m; + for (off = hlen; off < tlen; off += len) { + MGETHDR(m, M_DONTWAIT, MT_HEADER); + if (!m) { + error = ENOBUFS; + ip6stat.ip6s_odropped++; + goto sendorfree; + } + m->m_flags = m0->m_flags & M_COPYFLAGS; + *mnext = m; + mnext = &m->m_nextpkt; + m->m_data += max_linkhdr; + mhip6 = mtod(m, struct ip6_hdr *); + *mhip6 = *ip6; + m->m_len = sizeof(*mhip6); + error = ip6_insertfraghdr(m0, m, hlen, &ip6f); + if (error) { + ip6stat.ip6s_odropped++; + goto sendorfree; + } + ip6f->ip6f_offlg = htons((u_short)((off - hlen) & ~7)); + if (off + len >= tlen) + len = tlen - off; + else + ip6f->ip6f_offlg |= IP6F_MORE_FRAG; + mhip6->ip6_plen = htons((u_short)(len + hlen + + sizeof(*ip6f) - + sizeof(struct ip6_hdr))); + if ((m_frgpart = m_copy(m0, off, len)) == 0) { + error = ENOBUFS; + ip6stat.ip6s_odropped++; + goto sendorfree; + } + m_cat(m, m_frgpart); + m->m_pkthdr.len = len + hlen + sizeof(*ip6f); + m->m_pkthdr.rcvif = (struct ifnet *)0; + ip6f->ip6f_reserved = 0; + ip6f->ip6f_ident = id; + ip6f->ip6f_nxt = nextproto; + ip6stat.ip6s_ofragments++; + in6_ifstat_inc(ifp, ifs6_out_fragcreat); + } + + in6_ifstat_inc(ifp, ifs6_out_fragok); + } + + /* + * Remove leading garbages. + */ +sendorfree: + m = m0->m_nextpkt; + m0->m_nextpkt = 0; + m_freem(m0); + for (m0 = m; m; m = m0) { + m0 = m->m_nextpkt; + m->m_nextpkt = 0; + if (error == 0) { +#if defined(__NetBSD__) && defined(IFA_STATS) + if (IFA_STATS) { + struct in6_ifaddr *ia6; + ip6 = mtod(m, struct ip6_hdr *); + ia6 = in6_ifawithifp(ifp, &ip6->ip6_src); + if (ia6) { + ia->ia_ifa.ifa_data.ifad_outbytes += + m->m_pkthdr.len; + } + } +#endif +#if OLDIP6OUTPUT + error = (*ifp->if_output)(ifp, m, + (struct sockaddr *)dst, + ro->ro_rt); +#else + error = nd6_output(ifp, m, dst, ro->ro_rt); +#endif + } else + m_freem(m); + } + + if (error == 0) + ip6stat.ip6s_fragmented++; + +done: + if (ro == &ip6route && ro->ro_rt) { /* brace necessary for RTFREE */ + RTFREE(ro->ro_rt); + } else if (ro_pmtu == &ip6route && ro_pmtu->ro_rt) { + RTFREE(ro_pmtu->ro_rt); + } + +#if IPSEC + if (sp != NULL) + key_freesp(sp); +#endif /* IPSEC */ + + return(error); + +freehdrs: + m_freem(exthdrs.ip6e_hbh); /* m_freem will check if mbuf is 0 */ + m_freem(exthdrs.ip6e_dest1); + m_freem(exthdrs.ip6e_rthdr); + m_freem(exthdrs.ip6e_dest2); + /* fall through */ +bad: + m_freem(m); + goto done; +} + +static int +ip6_copyexthdr(mp, hdr, hlen) + struct mbuf **mp; + caddr_t hdr; + int hlen; +{ + struct mbuf *m; + + if (hlen > MCLBYTES) + return(ENOBUFS); /* XXX */ + + MGET(m, M_DONTWAIT, MT_DATA); + if (!m) + return(ENOBUFS); + + if (hlen > MLEN) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + return(ENOBUFS); + } + } + m->m_len = hlen; + if (hdr) + bcopy(hdr, mtod(m, caddr_t), hlen); + + *mp = m; + return(0); +} + +/* + * Insert jumbo payload option. + */ +static int +ip6_insert_jumboopt(exthdrs, plen) + struct ip6_exthdrs *exthdrs; + u_int32_t plen; +{ + struct mbuf *mopt; + u_char *optbuf; + +#define JUMBOOPTLEN 8 /* length of jumbo payload option and padding */ + + /* + * If there is no hop-by-hop options header, allocate new one. + * If there is one but it doesn't have enough space to store the + * jumbo payload option, allocate a cluster to store the whole options. + * Otherwise, use it to store the options. + */ + if (exthdrs->ip6e_hbh == 0) { + MGET(mopt, M_DONTWAIT, MT_DATA); + if (mopt == 0) + return(ENOBUFS); + mopt->m_len = JUMBOOPTLEN; + optbuf = mtod(mopt, u_char *); + optbuf[1] = 0; /* = ((JUMBOOPTLEN) >> 3) - 1 */ + exthdrs->ip6e_hbh = mopt; + } else { + struct ip6_hbh *hbh; + + mopt = exthdrs->ip6e_hbh; + if (M_TRAILINGSPACE(mopt) < JUMBOOPTLEN) { + caddr_t oldoptp = mtod(mopt, caddr_t); + int oldoptlen = mopt->m_len; + + if (mopt->m_flags & M_EXT) + return(ENOBUFS); /* XXX */ + MCLGET(mopt, M_DONTWAIT); + if ((mopt->m_flags & M_EXT) == 0) + return(ENOBUFS); + + bcopy(oldoptp, mtod(mopt, caddr_t), oldoptlen); + optbuf = mtod(mopt, caddr_t) + oldoptlen; + mopt->m_len = oldoptlen + JUMBOOPTLEN; + } else { + optbuf = mtod(mopt, u_char *) + mopt->m_len; + mopt->m_len += JUMBOOPTLEN; + } + optbuf[0] = IP6OPT_PADN; + optbuf[1] = 1; + + /* + * Adjust the header length according to the pad and + * the jumbo payload option. + */ + hbh = mtod(mopt, struct ip6_hbh *); + hbh->ip6h_len += (JUMBOOPTLEN >> 3); + } + + /* fill in the option. */ + optbuf[2] = IP6OPT_JUMBO; + optbuf[3] = 4; + *(u_int32_t *)&optbuf[4] = htonl(plen + JUMBOOPTLEN); + + /* finally, adjust the packet header length */ + exthdrs->ip6e_ip6->m_pkthdr.len += JUMBOOPTLEN; + + return(0); +#undef JUMBOOPTLEN +} + +/* + * Insert fragment header and copy unfragmentable header portions. + */ +static int +ip6_insertfraghdr(m0, m, hlen, frghdrp) + struct mbuf *m0, *m; + int hlen; + struct ip6_frag **frghdrp; +{ + struct mbuf *n, *mlast; + + if (hlen > sizeof(struct ip6_hdr)) { + n = m_copym(m0, sizeof(struct ip6_hdr), + hlen - sizeof(struct ip6_hdr), M_DONTWAIT); + if (n == 0) + return(ENOBUFS); + m->m_next = n; + } else + n = m; + + /* Search for the last mbuf of unfragmentable part. */ + for (mlast = n; mlast->m_next; mlast = mlast->m_next) + ; + + if ((mlast->m_flags & M_EXT) == 0 && + M_TRAILINGSPACE(mlast) < sizeof(struct ip6_frag)) { + /* use the trailing space of the last mbuf for the fragment hdr */ + *frghdrp = + (struct ip6_frag *)(mtod(mlast, caddr_t) + mlast->m_len); + mlast->m_len += sizeof(struct ip6_frag); + m->m_pkthdr.len += sizeof(struct ip6_frag); + } else { + /* allocate a new mbuf for the fragment header */ + struct mbuf *mfrg; + + MGET(mfrg, M_DONTWAIT, MT_DATA); + if (mfrg == 0) + return(ENOBUFS); + mfrg->m_len = sizeof(struct ip6_frag); + *frghdrp = mtod(mfrg, struct ip6_frag *); + mlast->m_next = mfrg; + } + + return(0); +} + +/* + * IP6 socket option processing. + */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int +ip6_ctloutput(so, sopt) + struct socket *so; + struct sockopt *sopt; +#else +int +ip6_ctloutput(op, so, level, optname, mp) + int op; + struct socket *so; + int level, optname; + struct mbuf **mp; +#endif +{ + int privileged, optdatalen; + void *optdata; + struct ip6_recvpktopts *rcvopts; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + register struct inpcb *in6p = sotoinpcb(so); + int error, optval; + int level, op, optname; + int optlen; + struct proc *p; + + if (sopt) { + level = sopt->sopt_level; + op = sopt->sopt_dir; + optname = sopt->sopt_name; + optlen = sopt->sopt_valsize; + p = sopt->sopt_p; + } else { + panic("ip6_ctloutput: arg soopt is NULL"); + } +#else +#if HAVE_NRL_INPCB + register struct inpcb *inp = sotoinpcb(so); +#else + register struct in6pcb *in6p = sotoin6pcb(so); +#endif + register struct mbuf *m = *mp; + int error, optval; + int optlen; +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) + struct proc *p = curproc; /* XXX */ +#endif + + optlen = m ? m->m_len : 0; +#endif + error = optval = 0; + +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !defined (__APPLE__) + privileged = (p == 0 || suser(p->p_ucred, &p->p_acflag)) ? 0 : 1; +#else +#if HAVE_NRL_INPCB + privileged = (inp->inp_socket->so_state & SS_PRIV); +#else + privileged = (in6p->in6p_socket->so_state & SS_PRIV); +#endif +#endif + +#if defined(HAVE_NRL_INPCB) + rcvopts = inp->inp_inputopts6; +#else + rcvopts = in6p->in6p_inputopts; +#endif + + if (level == IPPROTO_IPV6) { + switch (op) { + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + case SOPT_SET: +#else + case PRCO_SETOPT: +#endif + switch (optname) { + case IPV6_PKTOPTIONS: + { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct mbuf *m; + + error = sooptgetm(sopt, &m); /* XXX */ + if (error != NULL) + break; + error = sooptmcopyin(sopt, m); /* XXX */ + if (error != NULL) + break; + error = ip6_pcbopts(&in6p->in6p_outputopts, + m, so, sopt); + m_freem(m); /* XXX */ +#else +#if HAVE_NRL_INPCB + error = ip6_pcbopts(&inp->inp_outputopts6, + m, so); +#else + error = ip6_pcbopts(&in6p->in6p_outputopts, + m, so); +#endif /* HAVE_NRL_INPCB */ +#endif /* FreeBSD >= 3 */ + break; + } + /* + * Use of some Hop-by-Hop options or some + * Destination options, might require special + * privilege. That is, normal applications + * (without special privilege) might be forbidden + * from setting certain options in outgoing packets, + * and might never see certain options in received + * packets. [RFC 2292 Section 6] + * KAME specific note: + * KAME prevents non-privileged users from sending or + * receiving ANY hbh/dst options in order to avoid + * overhead of parsing options in the kernel. + */ + case IPV6_RECVHOPOPTS: + case IPV6_RECVDSTOPTS: + case IPV6_RECVRTHDRDSTOPTS: + if (!privileged) { + error = EPERM; + break; + } + /* fall through */ + case IPV6_UNICAST_HOPS: + case IPV6_HOPLIMIT: + case IPV6_CHECKSUM: + case IPV6_FAITH: + + case IPV6_RECVPKTINFO: + case IPV6_RECVHOPLIMIT: + case IPV6_RECVRTHDR: + case IPV6_USE_MIN_MTU: +#ifdef notyet /* To be implemented */ + case IPV6_RECVPATHMTU: +#endif +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + case IPV6_BINDV6ONLY: +#endif + if (optlen != sizeof(int)) + error = EINVAL; + else { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + error = sooptcopyin(sopt, &optval, + sizeof optval, sizeof optval); + if (error) + break; +#else + optval = *mtod(m, int *); +#endif + switch (optname) { + + case IPV6_UNICAST_HOPS: + if (optval < -1 || optval >= 256) + error = EINVAL; + else { + /* -1 = kernel default */ +#if HAVE_NRL_INPCB + inp->inp_hops = optval; +#else + in6p->in6p_hops = optval; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if ((in6p->in6p_vflag & + INP_IPV4) != 0) + in6p->inp_ip_ttl = optval; +#endif +#endif + } + break; +#if HAVE_NRL_INPCB +#define OPTSET(bit) \ + if (optval) \ + inp->inp_flags |= (bit); \ + else \ + inp->inp_flags &= ~(bit); +#else +#define OPTSET(bit) \ + if (optval) \ + in6p->in6p_flags |= (bit); \ + else \ + in6p->in6p_flags &= ~(bit); +#endif +#if HAVE_NRL_INPCB +#define OPTBIT(bit) (inp->inp_flags & (bit) ? 1 : 0) +#else +#define OPTBIT(bit) (in6p->in6p_flags & (bit) ? 1 : 0) +#endif + + case IPV6_RECVPKTINFO: + OPTSET(IN6P_PKTINFO); + if (OPTBIT(IN6P_PKTINFO) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVPKTINFO); + break; + + case IPV6_HOPLIMIT: + { +#if COMPAT_RFC2292 + OPTSET(IN6P_HOPLIMIT); + if (OPTBIT(IN6P_HOPLIMIT) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVHOPLIMIT); + break; +#else /* new advanced API (2292bis) */ + struct ip6_pktopts **optp; +#if HAVE_NRL_INPCB + optp = &inp->inp_outputopts6; +#else + optp = &in6p->in6p_outputopts; +#endif + + error = ip6_pcbopt(IPV6_HOPLIMIT, + (u_char *)&optval, + sizeof(optval), + optp, + privileged); + break; +#endif + } + + case IPV6_RECVHOPLIMIT: + OPTSET(IN6P_HOPLIMIT); + if (OPTBIT(IN6P_HOPLIMIT) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVHOPLIMIT); + break; + + case IPV6_RECVHOPOPTS: + OPTSET(IN6P_HOPOPTS); + if (OPTBIT(IN6P_HOPOPTS) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVHOPOPTS); + break; + + case IPV6_RECVDSTOPTS: + OPTSET(IN6P_DSTOPTS); + if (OPTBIT(IN6P_DSTOPTS) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVDSTOPTS); + break; + + case IPV6_RECVRTHDRDSTOPTS: + OPTSET(IN6P_RTHDRDSTOPTS); + if (OPTBIT(IN6P_RTHDRDSTOPTS) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVRTHDRDSTOPTS); + break; + + case IPV6_RECVRTHDR: + OPTSET(IN6P_RTHDR); + if (OPTBIT(IN6P_RTHDR) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVRTHDR); + break; + + case IPV6_CHECKSUM: +#if HAVE_NRL_INPCB + inp->inp_csumoffset = optval; +#else + in6p->in6p_cksum = optval; +#endif + break; + + case IPV6_FAITH: + OPTSET(IN6P_FAITH); + break; + + case IPV6_USE_MIN_MTU: + OPTSET(IN6P_MINMTU); + break; + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || (defined(__NetBSD__) && !defined(INET6_BINDV6ONLY)) || defined (__APPLE__) + case IPV6_BINDV6ONLY: + OPTSET(IN6P_BINDV6ONLY); + break; +#endif + } + } + break; + case IPV6_PKTINFO: + case IPV6_HOPOPTS: + case IPV6_RTHDR: + case IPV6_DSTOPTS: + case IPV6_RTHDRDSTOPTS: + if (optlen == sizeof(int)) { + /* RFC 2292 */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + error = sooptcopyin(sopt, &optval, + sizeof optval, sizeof optval); + if (error == 0) + break; +#else + optval = *mtod(m, int *); +#endif + switch(optname) { + case IPV6_PKTINFO: + OPTSET(IN6P_PKTINFO); + if (OPTBIT(IN6P_PKTINFO) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVPKTINFO); + break; + case IPV6_HOPOPTS: + /* + * Check super-user privilege. + * See comments for + * IPV6_RECVHOPOPTS. + */ + if (!privileged) + return(EPERM); + OPTSET(IN6P_HOPOPTS); + if (OPTBIT(IN6P_HOPOPTS) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVHOPOPTS); + break; + case IPV6_DSTOPTS: + if (!privileged) + return(EPERM); + OPTSET(IN6P_DSTOPTS|IN6P_RTHDRDSTOPTS); /* XXX */ + if (OPTBIT(IN6P_DSTOPTS) == 0) { + ip6_reset_rcvopt(rcvopts, IPV6_RECVDSTOPTS); + ip6_reset_rcvopt(rcvopts, IPV6_RECVRTHDRDSTOPTS); + } + break; + case IPV6_RTHDR: + OPTSET(IN6P_RTHDR); + if (OPTBIT(IN6P_RTHDR) == 0) + ip6_reset_rcvopt(rcvopts, IPV6_RECVRTHDR); + break; + } + break; + } else { + /* new advanced API (2292bis) */ + u_char *optbuf; + int optlen; + struct ip6_pktopts **optp; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + optbuf = sopt->sopt_val; + optlen = sopt->sopt_valsize; +#else /* !fbsd3 */ + if (m && m->m_next) { + error = EINVAL; /* XXX */ + break; + } + if (m) { + optbuf = mtod(m, u_char *); + optlen = m->m_len; + } else { + optbuf = NULL; + optlen = 0; + } +#endif + +#if HAVE_NRL_INPCB + optp = &inp->inp_outputopts6; +#else + optp = &in6p->in6p_outputopts; +#endif + + error = ip6_pcbopt(optname, + optbuf, optlen, + optp, privileged); + } + break; +#undef OPTSET + + case IPV6_MULTICAST_IF: + case IPV6_MULTICAST_HOPS: + case IPV6_MULTICAST_LOOP: + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + { + struct mbuf *m; + if (sopt->sopt_valsize > MLEN) { + error = EMSGSIZE; + break; + } + /* XXX */ + MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_HEADER); + if (m == 0) { + error = ENOBUFS; + break; + } + m->m_len = sopt->sopt_valsize; + error = sooptcopyin(sopt, mtod(m, char *), + m->m_len, m->m_len); + error = ip6_setmoptions(sopt->sopt_name, + &in6p->in6p_moptions, + m); + (void)m_free(m); + } +#else +#if HAVE_NRL_INPCB + error = ip6_setmoptions(optname, + &inp->inp_moptions6, m); + /* + * XXX: setting the flag would be redundant + * except at the first time. Also, we + * actually don't have to reset the flag, + * since ip6_freemoptions() would simply + * return when the inp_moptions6 is NULL. + */ + if (inp->inp_moptions6) + inp->inp_flags |= INP_IPV6_MCAST; + else + inp->inp_flags &= ~INP_IPV6_MCAST; +#else + error = ip6_setmoptions(optname, + &in6p->in6p_moptions, m); +#endif +#endif + break; + +#ifndef __bsdi__ + case IPV6_PORTRANGE: +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + error = sooptcopyin(sopt, &optval, sizeof optval, + sizeof optval); + if (error) + break; +#else + optval = *mtod(m, int *); +#endif + +#if HAVE_NRL_INPCB +# define in6p inp +# define in6p_flags inp_flags +#endif + switch (optval) { + case IPV6_PORTRANGE_DEFAULT: + in6p->in6p_flags &= ~(IN6P_LOWPORT); + in6p->in6p_flags &= ~(IN6P_HIGHPORT); + break; + + case IPV6_PORTRANGE_HIGH: + in6p->in6p_flags &= ~(IN6P_LOWPORT); + in6p->in6p_flags |= IN6P_HIGHPORT; + break; + + case IPV6_PORTRANGE_LOW: + in6p->in6p_flags &= ~(IN6P_HIGHPORT); + in6p->in6p_flags |= IN6P_LOWPORT; + break; + + default: + error = EINVAL; + break; + } +#if HAVE_NRL_INPCB +# undef in6p +# undef in6p_flags +#endif + break; +#endif + +#if IPSEC + case IPV6_IPSEC_POLICY: + { + caddr_t req = NULL; + size_t len = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct mbuf *m; +#endif + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (error = sooptgetm(sopt, &m)) /* XXX */ + break; + if (error = sooptmcopyin(sopt, m)) /* XXX */ + break; +#endif + if (m) { + req = mtod(m, caddr_t); + len = m->m_len; + } +#if HAVE_NRL_INPCB + error = ipsec6_set_policy(inp, optname, req, + len, privileged); +#else + error = ipsec6_set_policy(in6p, optname, req, + len, privileged); +#endif +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + m_freem(m); +#endif + } + break; +#endif /* IPSEC */ + +#if IPV6FIREWALL + case IPV6_FW_ADD: + case IPV6_FW_DEL: + case IPV6_FW_FLUSH: + case IPV6_FW_ZERO: + { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct mbuf *m; + struct mbuf **mp = &m; +#endif + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (ip6_fw_ctl_ptr == NULL) + return EINVAL; + if (error = sooptgetm(sopt, &m)) /* XXX */ + break; + if (error = sooptmcopyin(sopt, m)) /* XXX */ + break; +#else + if (ip6_fw_ctl_ptr == NULL) { + if (m) (void)m_free(m); + return EINVAL; + } +#endif + error = (*ip6_fw_ctl_ptr)(optname, mp); + m = *mp; + } + break; +#endif + + default: + error = ENOPROTOOPT; + break; + } +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + if (m) + (void)m_free(m); +#endif + break; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + case SOPT_GET: +#else + case PRCO_GETOPT: +#endif + switch (optname) { + + case IPV6_PKTOPTIONS: +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (in6p->in6p_inputopts && + in6p->in6p_inputopts->head) { + error = sooptmcopyout(sopt, + in6p->in6p_inputopts->head); + } else + sopt->sopt_valsize = 0; +#elif defined(HAVE_NRL_INPCB) + if (inp->inp_options) { + *mp = m_copym(inp->inp_options, 0, + M_COPYALL, M_WAIT); + } else { + *mp = m_get(M_WAIT, MT_SOOPTS); + (*mp)->m_len = 0; + } +#else + if (in6p->in6p_inputopts && + in6p->in6p_inputopts->head) { + *mp = m_copym(in6p->in6p_inputopts->head, + 0, M_COPYALL, M_WAIT); + } else { + *mp = m_get(M_WAIT, MT_SOOPTS); + (*mp)->m_len = 0; + } +#endif + break; + + case IPV6_RECVHOPOPTS: + case IPV6_RECVDSTOPTS: + case IPV6_RECVRTHDRDSTOPTS: + if (!privileged) { + error = EPERM; + break; + } + /* fall through */ + case IPV6_UNICAST_HOPS: + case IPV6_CHECKSUM: + + case IPV6_RECVPKTINFO: + case IPV6_RECVHOPLIMIT: + case IPV6_RECVRTHDR: + case IPV6_USE_MIN_MTU: +#ifdef notyet /* To be implemented */ + case IPV6_RECVPATHMTU: +#endif + + case IPV6_FAITH: +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || (defined(__NetBSD__) && !defined(INET6_BINDV6ONLY)) || defined(__APPLE__) + case IPV6_BINDV6ONLY: +#endif +#ifndef __bsdi__ + case IPV6_PORTRANGE: +#endif + switch (optname) { + + case IPV6_UNICAST_HOPS: +#if HAVE_NRL_INPCB + optval = inp->inp_hops; +#else + optval = in6p->in6p_hops; +#endif + break; + + case IPV6_RECVPKTINFO: + optval = OPTBIT(IN6P_PKTINFO); + break; + + case IPV6_RECVHOPLIMIT: + optval = OPTBIT(IN6P_HOPLIMIT); + break; + + case IPV6_RECVHOPOPTS: + optval = OPTBIT(IN6P_HOPOPTS); + break; + + case IPV6_RECVDSTOPTS: + optval = OPTBIT(IN6P_DSTOPTS); + break; + + case IPV6_RECVRTHDRDSTOPTS: + optval = OPTBIT(IN6P_RTHDRDSTOPTS); + break; + + case IPV6_CHECKSUM: +#if HAVE_NRL_INPCB + optval = inp->inp_csumoffset; +#else + optval = in6p->in6p_cksum; +#endif + break; + + case IPV6_USE_MIN_MTU: + optval = OPTBIT(IN6P_MINMTU); + break; + + case IPV6_FAITH: + optval = OPTBIT(IN6P_FAITH); + break; + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || (defined(__NetBSD__) && !defined(INET6_BINDV6ONLY)) || defined (__APPLE__) + case IPV6_BINDV6ONLY: + optval = OPTBIT(IN6P_BINDV6ONLY); + break; +#endif + +#ifndef __bsdi__ + case IPV6_PORTRANGE: + { + int flags; +#if HAVE_NRL_INPCB + flags = inp->inp_flags; +#else + flags = in6p->in6p_flags; +#endif + if (flags & IN6P_HIGHPORT) + optval = IPV6_PORTRANGE_HIGH; + else if (flags & IN6P_LOWPORT) + optval = IPV6_PORTRANGE_LOW; + else + optval = 0; + break; + } +#endif + } +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + error = sooptcopyout(sopt, &optval, + sizeof optval); +#else + *mp = m = m_get(M_WAIT, MT_SOOPTS); + m->m_len = sizeof(int); + *mtod(m, int *) = optval; +#endif + break; + + case IPV6_PKTINFO: + case IPV6_HOPOPTS: + case IPV6_RTHDR: + case IPV6_DSTOPTS: + case IPV6_RTHDRDSTOPTS: +#if COMPAT_RFC2292 + if (optname == IPV6_HOPOPTS || + optname == IPV6_DSTOPTS || + !privileged) + return(EPERM); + switch(optname) { + case IPV6_PKTINFO: + optbit = OPTBIT(IN6P_PKTINFO); + break; + case IPV6_HOPLIMIT: + optval = OPTBIT(IN6P_HOPLIMIT); + break; + case IPV6_HOPOPTS: + optbit = OPTBIT(IN6P_HOPOPTS); + break; + case IPV6_RTHDR: + optbit = OPTBIT(IN6P_RTHDR); + break; + case IPV6_DSTOPTS: + optbit = OPTBIT(IN6P_DSTOPTS|IN6P_RTHDRDSTOPTS); + break; + case IPV6_RTHDRDSTOPTS: /* in 2292bis only */ + return(EOPNOTSUPP); + } +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + error = sooptcopyout(sopt, &optval, + sizeof optval); +#else + *mp = m = m_get(M_WAIT, MT_SOOPTS); + m->m_len = sizeof(int); + *mtod(m, int *) = optval; +#endif /* FreeBSD3 */ +#else /* new advanced API */ +#if HAVE_NRL_INPCB +#define in6p inp +#define in6p_outputopts inp_outputopts6 +#endif + error = ip6_getpcbopt(in6p->in6p_outputopts, + optname, &optdata, + &optdatalen); + if (error == 0) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + /* note that optdatalen maybe 0 */ + error = sooptcopyout(sopt, optdata, + optdatalen); +#else /* !FreeBSD3 */ + if (optdatalen > MCLBYTES) + return(EMSGSIZE); /* XXX */ + *mp = m = m_get(M_WAIT, MT_SOOPTS); + if (optdatalen > MLEN) + MCLGET(m, M_WAIT); + m->m_len = optdatalen; + bcopy(optdata, mtod(m, void *), + optdatalen); +#endif /* FreeBSD3 */ + } +#if HAVE_NRL_INPCB +#undef in6p +#undef in6p_outputopts +#endif +#endif /* COMPAT_RFC2292 */ + break; + + case IPV6_MULTICAST_IF: + case IPV6_MULTICAST_HOPS: + case IPV6_MULTICAST_LOOP: + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + { + struct mbuf *m; + error = ip6_getmoptions(sopt->sopt_name, + in6p->in6p_moptions, &m); + if (error == 0) + error = sooptcopyout(sopt, + mtod(m, char *), m->m_len); + m_freem(m); + } +#elif defined(HAVE_NRL_INPCB) + error = ip6_getmoptions(optname, inp->inp_moptions6, mp); +#else + error = ip6_getmoptions(optname, in6p->in6p_moptions, mp); +#endif + break; + +#if IPSEC + case IPV6_IPSEC_POLICY: + { + caddr_t req = NULL; + size_t len = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct mbuf *m = NULL; + struct mbuf **mp = &m; + + error = sooptgetm(sopt, &m); /* XXX */ + if (error != NULL) + break; + error = sooptmcopyin(sopt, m); /* XXX */ + if (error != NULL) + break; +#endif + if (m) { + req = mtod(m, caddr_t); + len = m->m_len; + } +#if HAVE_NRL_INPCB + error = ipsec6_get_policy(inp, req, len, mp); +#else + error = ipsec6_get_policy(in6p, req, len, mp); +#endif +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (error == 0) + error = sooptmcopyout(sopt, m); /*XXX*/ + m_freem(m); +#endif + break; + } +#endif /* IPSEC */ + +#if IPV6FIREWALL + case IPV6_FW_GET: + { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct mbuf *m; + struct mbuf **mp = &m; +#endif + + if (ip6_fw_ctl_ptr == NULL) + { +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + if (m) + (void)m_free(m); +#endif + return EINVAL; + } + error = (*ip6_fw_ctl_ptr)(optname, mp); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (error == 0) + error = sooptmcopyout(sopt, m); /* XXX */ + if (m) + m_freem(m); +#endif + } + break; +#endif + + default: + error = ENOPROTOOPT; + break; + } + break; + } + } else { + error = EINVAL; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + if (op == PRCO_SETOPT && *mp) + (void)m_free(*mp); +#endif + } + return(error); +} + +/* + * Set up IP6 options in pcb for insertion in output packets or + * specifying behavior of outgoing packets. + */ +static int +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +ip6_pcbopts(pktopt, m, so, sopt) +#else +ip6_pcbopts(pktopt, m, so) +#endif + struct ip6_pktopts **pktopt; + register struct mbuf *m; + struct socket *so; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct sockopt *sopt; +#endif +{ + register struct ip6_pktopts *opt = *pktopt; + int error = 0; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct proc *p = sopt->sopt_p; +#else + struct proc *p = curproc; /* XXX */ +#endif + int priv = 0; + + /* turn off any old options. */ + if (opt) { +#if DIAGNOSTIC + if (opt->ip6po_pktinfo || opt->ip6po_nexthop || + opt->ip6po_hbh || opt->ip6po_dest1 || opt->ip6po_dest2 || + opt->ip6po_rhinfo.ip6po_rhi_rthdr) + printf("ip6_pcbopts: all specified options are cleared.\n"); +#endif + ip6_clearpktopts(opt, 1, -1); + } + else + opt = _MALLOC(sizeof(*opt), M_IP6OPT, M_WAITOK); + *pktopt = NULL; + + if (!m || m->m_len == 0) { + /* + * Only turning off any previous options. + */ + if (opt) + _FREE(opt, M_IP6OPT); + return(0); + } + + /* set options specified by user. */ +#if 0 + if (p && !suser(p->p_ucred, &p->p_acflag)) + priv = 1; +#endif + if ((error = ip6_setpktoptions(m, opt, priv, 1)) != 0) { + ip6_clearpktopts(opt, 1, -1); /* XXX: discard all options */ + return(error); + } + *pktopt = opt; + return(0); +} + +/* + * Set up an IP6 option in pcb for insertion in output packets or + * specifying behavior of outgoing packets. + * XXX: The logic of this function is very similar to ip6_setpktoptions(). + */ +static int +ip6_pcbopt(optname, buf, len, pktopt, priv) + int optname, len, priv; + u_char *buf; + struct ip6_pktopts **pktopt; +{ + struct ip6_pktopts *opt; + struct in6_pktinfo *pktinfo; + + if (*pktopt == NULL) { + *pktopt = _MALLOC(sizeof(struct ip6_pktopts), M_IP6OPT, + M_WAITOK); + bzero(*pktopt, sizeof(struct ip6_pktopts)); + (*pktopt)->ip6po_hlim = -1; + } + opt = *pktopt; + + switch(optname) { + case IPV6_PKTINFO: + if (len == 0) { /* just remove the option */ + ip6_clearpktopts(opt, 1, IPV6_PKTINFO); + break; + } + + if (len != sizeof(struct in6_pktinfo)) + return EINVAL; + pktinfo = (struct in6_pktinfo *)buf; + + /* + * An application can clear any sticky IPV6_PKTINFO option by + * doing a "regular" setsockopt with ipi6_addr being + * in6addr_any and ipi6_ifindex being zero. + * [rfc2292bis-01, Section 6] + * XXX: Is this a good feature?? (jinmei@kame.net) + */ + if (pktinfo->ipi6_ifindex == 0 && + IN6_IS_ADDR_UNSPECIFIED(&pktinfo->ipi6_addr)) { + ip6_clearpktopts(opt, 1, IPV6_PKTINFO); + break; + } + + /* XXX: this overrides the original data space */ + if (pktinfo->ipi6_ifindex && + IN6_IS_ADDR_LINKLOCAL(&pktinfo->ipi6_addr)) + pktinfo->ipi6_addr.s6_addr16[1] = + htons(pktinfo->ipi6_ifindex); + + if (pktinfo->ipi6_ifindex > if_index || + pktinfo->ipi6_ifindex < 0) + return(ENXIO); + + /* + * Check if the requested source address is indeed a unicast + * address assigned to the node. + */ + if (!IN6_IS_ADDR_UNSPECIFIED(&pktinfo->ipi6_addr)) { + struct ifaddr *ia; + struct sockaddr_in6 sin6; + + bzero(&sin6, sizeof(sin6)); + sin6.sin6_len = sizeof(sin6); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = pktinfo->ipi6_addr; + ia = ifa_ifwithaddr(sin6tosa(&sin6)); + if (ia == NULL) + return(EADDRNOTAVAIL); + } + + if (opt->ip6po_pktinfo == NULL) + opt->ip6po_pktinfo = _MALLOC(sizeof(struct in6_pktinfo), + M_IP6OPT, M_WAITOK); + bcopy(pktinfo, opt->ip6po_pktinfo, sizeof(*pktinfo)); + + break; + case IPV6_HOPLIMIT: + { + int *hlimp; + + if (len != sizeof(int)) + return(EINVAL); + hlimp = (int *)buf; + if (*hlimp < -1 || *hlimp > 255) + return(EINVAL); + + opt->ip6po_hlim = *hlimp; + break; + } + case IPV6_NEXTHOP: + if (!priv) + return(EPERM); + + if (len == 0) { /* just remove the option */ + ip6_clearpktopts(opt, 1, IPV6_NEXTHOP); + break; + } + + /* check if cmsg_len is large enough for sa_len */ + if (len < sizeof(u_char) || + len < *buf) + return(EINVAL); + + /* turn off the previous option */ + ip6_clearpktopts(opt, 1, IPV6_NEXTHOP); + + opt->ip6po_nexthop = _MALLOC(*buf, M_IP6OPT, M_WAITOK); + bcopy(buf, opt->ip6po_nexthop, *buf); + break; + case IPV6_HOPOPTS: + { + struct ip6_hbh *hbh; + int hbhlen; + + /* + * XXX: We don't allow a non-privileged user to set ANY HbH + * options, since per-option restriction has too much + * overhead. + */ + if (!priv) + return(EPERM); + + if (len == 0) { + ip6_clearpktopts(opt, 1, IPV6_HOPOPTS); + break; /* just remove the option */ + } + + if (len < sizeof(struct ip6_hbh)) + return(EINVAL); + hbh = (struct ip6_hbh *)buf; + hbhlen = (hbh->ip6h_len + 1) << 3; + if (len != hbhlen) + return(EINVAL); + + /* turn off the previous option */ + ip6_clearpktopts(opt, 1, IPV6_HOPOPTS); + + opt->ip6po_hbh = _MALLOC(hbhlen, M_IP6OPT, M_WAITOK); + bcopy(buf, opt->ip6po_hbh, hbhlen); + + break; + } + case IPV6_DSTOPTS: + case IPV6_RTHDRDSTOPTS: + { + struct ip6_dest *dest, *newdest; + int destlen; + + if (!priv) /* XXX: see the comment for IPV6_HOPOPTS */ + return(EPERM); + + if (len == 0) { + ip6_clearpktopts(opt, 1, optname); + break; /* just remove the option */ + } + + if (len < sizeof(struct ip6_dest)) + return(EINVAL); + dest = (struct ip6_dest *)buf; + destlen = (dest->ip6d_len + 1) << 3; + if (len != destlen) + return(EINVAL); + + /* turn off the previous option */ + ip6_clearpktopts(opt, 1, optname); + + newdest = _MALLOC(destlen, M_IP6OPT, M_WAITOK); + bcopy(buf, newdest, destlen); + + if (optname == IPV6_DSTOPTS) + opt->ip6po_dest2 = newdest; + else + opt->ip6po_dest1 = newdest; + + break; + } + case IPV6_RTHDR: + { + struct ip6_rthdr *rth; + int rthlen; + + if (len == 0) { + ip6_clearpktopts(opt, 1, IPV6_RTHDR); + break; /* just remove the option */ + } + + if (len < sizeof(struct ip6_rthdr)) + return(EINVAL); + rth = (struct ip6_rthdr *)buf; + rthlen = (rth->ip6r_len + 1) << 3; + if (len != rthlen) + return(EINVAL); + + switch(rth->ip6r_type) { + case IPV6_RTHDR_TYPE_0: + if (rth->ip6r_len == 0) /* must contain one addr */ + return(EINVAL); + if (rth->ip6r_len % 2) /* length must be even */ + return(EINVAL); + if (rth->ip6r_len / 2 != rth->ip6r_segleft) + return(EINVAL); + break; + default: + return(EINVAL); /* not supported */ + } + + /* turn off the previous option */ + ip6_clearpktopts(opt, 1, IPV6_RTHDR); + + opt->ip6po_rthdr = _MALLOC(rthlen, M_IP6OPT, M_WAITOK); + bcopy(buf, opt->ip6po_rthdr, rthlen); + + break; + } + default: + return(ENOPROTOOPT); + } /* end of switch */ + + return(0); +} + +static int +ip6_getpcbopt(pktopt, optname, datap, datalenp) + struct ip6_pktopts *pktopt; + int optname, *datalenp; + void **datap; +{ + void *optdata = NULL; + struct ip6_ext *ip6e; + int optdatalen = 0; + + if (pktopt == NULL) + goto end; + + switch(optname) { + case IPV6_PKTINFO: + if (pktopt->ip6po_pktinfo) { + optdata = (void *)pktopt->ip6po_pktinfo; + optdatalen = sizeof(struct in6_pktinfo); + } + break; + case IPV6_HOPLIMIT: + optdata = (void *)&pktopt->ip6po_hlim; + optdatalen = sizeof(int); + break; + case IPV6_HOPOPTS: + if (pktopt->ip6po_hbh) { + optdata = (void *)pktopt->ip6po_hbh; + ip6e = (struct ip6_ext *)pktopt->ip6po_hbh; + optdatalen = (ip6e->ip6e_len + 1) << 3; + } + break; + case IPV6_RTHDR: + if (pktopt->ip6po_rthdr) { + optdata = (void *)pktopt->ip6po_rthdr; + ip6e = (struct ip6_ext *)pktopt->ip6po_rthdr; + optdatalen = (ip6e->ip6e_len + 1) << 3; + } + break; + case IPV6_RTHDRDSTOPTS: + if (pktopt->ip6po_dest1) { + optdata = (void *)pktopt->ip6po_dest1; + ip6e = (struct ip6_ext *)pktopt->ip6po_dest1; + optdatalen = (ip6e->ip6e_len + 1) << 3; + } + break; + case IPV6_DSTOPTS: + if (pktopt->ip6po_dest2) { + optdata = (void *)pktopt->ip6po_dest2; + ip6e = (struct ip6_ext *)pktopt->ip6po_dest2; + optdatalen = (ip6e->ip6e_len + 1) << 3; + } + break; + } + + end: + *datap = optdata; + *datalenp = optdatalen; + + return(0); +} + +void +ip6_clearpktopts(pktopt, needfree, optname) + struct ip6_pktopts *pktopt; + int needfree, optname; +{ + if (pktopt == NULL) + return; + + if (optname == -1 || optname == IPV6_PKTINFO) { + if (needfree && pktopt->ip6po_pktinfo) + _FREE(pktopt->ip6po_pktinfo, M_IP6OPT); + pktopt->ip6po_pktinfo = NULL; + } + if (optname == -1 || optname == IPV6_HOPLIMIT) + pktopt->ip6po_hlim = -1; + if (optname == -1 || optname == IPV6_NEXTHOP) { + if (needfree && pktopt->ip6po_nexthop) + _FREE(pktopt->ip6po_nexthop, M_IP6OPT); + pktopt->ip6po_nexthop = NULL; + } + if (optname == -1 || optname == IPV6_HOPOPTS) { + if (needfree && pktopt->ip6po_hbh) + _FREE(pktopt->ip6po_hbh, M_IP6OPT); + pktopt->ip6po_hbh = NULL; + } + if (optname == -1 || optname == IPV6_RTHDRDSTOPTS) { + if (needfree && pktopt->ip6po_dest1) + _FREE(pktopt->ip6po_dest1, M_IP6OPT); + pktopt->ip6po_dest1 = NULL; + } + if (optname == -1 || optname == IPV6_RTHDR) { + if (needfree && pktopt->ip6po_rhinfo.ip6po_rhi_rthdr) + _FREE(pktopt->ip6po_rhinfo.ip6po_rhi_rthdr, M_IP6OPT); + pktopt->ip6po_rhinfo.ip6po_rhi_rthdr = NULL; + if (pktopt->ip6po_route.ro_rt) { + RTFREE(pktopt->ip6po_route.ro_rt); + pktopt->ip6po_route.ro_rt = NULL; + } + } + if (optname == -1 || optname == IPV6_DSTOPTS) { + if (needfree && pktopt->ip6po_dest2) + _FREE(pktopt->ip6po_dest2, M_IP6OPT); + pktopt->ip6po_dest2 = NULL; + } +} + +#define PKTOPT_EXTHDRCPY(type) if (src->type) {\ + int hlen =\ + (((struct ip6_ext *)src->type)->ip6e_len + 1) << 3;\ + dst->type = _MALLOC(hlen, M_IP6OPT, canwait);\ + if (dst->type == NULL && canwait == M_NOWAIT)\ + goto bad;\ + bcopy(src->type, dst->type, hlen);\ + } + +struct ip6_pktopts * +ip6_copypktopts(src, canwait) + struct ip6_pktopts *src; + int canwait; +{ + struct ip6_pktopts *dst; + + if (src == NULL) { + printf("ip6_clearpktopts: invalid argument\n"); + return(NULL); + } + + dst = _MALLOC(sizeof(*dst), M_IP6OPT, canwait); + if (dst == NULL && canwait == M_NOWAIT) + goto bad; + bzero(dst, sizeof(*dst)); + + dst->ip6po_hlim = src->ip6po_hlim; + dst->ip6po_flags = src->ip6po_flags; + if (src->ip6po_pktinfo) { + dst->ip6po_pktinfo = _MALLOC(sizeof(*dst->ip6po_pktinfo), + M_IP6OPT, canwait); + if (dst->ip6po_pktinfo == NULL && canwait == M_NOWAIT) + goto bad; + *dst->ip6po_pktinfo = *src->ip6po_pktinfo; + } + if (src->ip6po_nexthop) { + dst->ip6po_nexthop = _MALLOC(src->ip6po_nexthop->sa_len, + M_IP6OPT, canwait); + if (dst->ip6po_nexthop == NULL && canwait == M_NOWAIT) + goto bad; + bcopy(src->ip6po_nexthop, dst->ip6po_nexthop, + src->ip6po_nexthop->sa_len); + } + PKTOPT_EXTHDRCPY(ip6po_hbh); + PKTOPT_EXTHDRCPY(ip6po_dest1); + PKTOPT_EXTHDRCPY(ip6po_dest2); + PKTOPT_EXTHDRCPY(ip6po_rthdr); /* not copy the cached route */ + return(dst); + + bad: + printf("ip6_copypktopts: copy failed"); + if (dst->ip6po_pktinfo) _FREE(dst->ip6po_pktinfo, M_IP6OPT); + if (dst->ip6po_nexthop) _FREE(dst->ip6po_nexthop, M_IP6OPT); + if (dst->ip6po_hbh) _FREE(dst->ip6po_hbh, M_IP6OPT); + if (dst->ip6po_dest1) _FREE(dst->ip6po_dest1, M_IP6OPT); + if (dst->ip6po_dest2) _FREE(dst->ip6po_dest2, M_IP6OPT); + if (dst->ip6po_rthdr) _FREE(dst->ip6po_rthdr, M_IP6OPT); + return(NULL); +} +#undef PKTOPT_EXTHDRCPY + +void +ip6_freepcbopts(pktopt) + struct ip6_pktopts *pktopt; +{ + if (pktopt == NULL) + return; + + ip6_clearpktopts(pktopt, 1, -1); + + _FREE(pktopt, M_IP6OPT); +} + +/* + * Set the IP6 multicast options in response to user setsockopt(). + */ +static int +ip6_setmoptions(optname, im6op, m) + int optname; + struct ip6_moptions **im6op; + struct mbuf *m; +{ + int error = 0; + u_int loop, ifindex; + struct ipv6_mreq *mreq; + struct ifnet *ifp; + struct ip6_moptions *im6o = *im6op; + struct route_in6 ro; + struct sockaddr_in6 *dst; + struct in6_multi_mship *imm; + + struct proc *p = current_proc(); /* ### */ + + if (im6o == NULL) { + /* + * No multicast option buffer attached to the pcb; + * allocate one and initialize to default values. + */ + im6o = (struct ip6_moptions *) + _MALLOC(sizeof(*im6o), M_IPMOPTS, M_WAITOK); + + if (im6o == NULL) + return(ENOBUFS); + *im6op = im6o; + im6o->im6o_multicast_ifp = NULL; + im6o->im6o_multicast_hlim = ip6_defmcasthlim; + im6o->im6o_multicast_loop = IPV6_DEFAULT_MULTICAST_LOOP; + LIST_INIT(&im6o->im6o_memberships); + } + + switch (optname) { + + case IPV6_MULTICAST_IF: + /* + * Select the interface for outgoing multicast packets. + */ + if (m == NULL || m->m_len != sizeof(u_int)) { + error = EINVAL; + break; + } + ifindex = *(mtod(m, u_int *)); + if (ifindex < 0 || if_index < ifindex) { + error = ENXIO; /* XXX EINVAL? */ + break; + } + ifp = ifindex2ifnet[ifindex]; + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + error = EADDRNOTAVAIL; + break; + } + im6o->im6o_multicast_ifp = ifp; + break; + + case IPV6_MULTICAST_HOPS: + { + /* + * Set the IP6 hoplimit for outgoing multicast packets. + */ + int optval; + if (m == NULL || m->m_len != sizeof(int)) { + error = EINVAL; + break; + } + optval = *(mtod(m, u_int *)); + if (optval < -1 || optval >= 256) + error = EINVAL; + else if (optval == -1) + im6o->im6o_multicast_hlim = ip6_defmcasthlim; + else + im6o->im6o_multicast_hlim = optval; + break; + } + + case IPV6_MULTICAST_LOOP: + /* + * Set the loopback flag for outgoing multicast packets. + * Must be zero or one. + */ + if (m == NULL || m->m_len != sizeof(u_int) || + (loop = *(mtod(m, u_int *))) > 1) { + error = EINVAL; + break; + } + im6o->im6o_multicast_loop = loop; + break; + + case IPV6_JOIN_GROUP: + /* + * Add a multicast group membership. + * Group must be a valid IP6 multicast address. + */ + if (m == NULL || m->m_len != sizeof(struct ipv6_mreq)) { + error = EINVAL; + break; + } + mreq = mtod(m, struct ipv6_mreq *); + if (IN6_IS_ADDR_UNSPECIFIED(&mreq->ipv6mr_multiaddr)) { + /* + * We use the unspecified address to specify to accept + * all multicast addresses. Only super user is allowed + * to do this. + */ +#if ISFB31 + if (suser(p->p_ucred, &p->p_acflag)) { + error = EACCES; + break; + } +#endif + } else if (!IN6_IS_ADDR_MULTICAST(&mreq->ipv6mr_multiaddr)) { + error = EINVAL; + break; + } + + /* + * If the interface is specified, validate it. + */ + if (mreq->ipv6mr_interface < 0 + || if_index < mreq->ipv6mr_interface) { + error = ENXIO; /* XXX EINVAL? */ + break; + } + /* + * If no interface was explicitly specified, choose an + * appropriate one according to the given multicast address. + */ + if (mreq->ipv6mr_interface == 0) { + /* + * If the multicast address is in node-local scope, + * the interface should be a loopback interface. + * Otherwise, look up the routing table for the + * address, and choose the outgoing interface. + * XXX: is it a good approach? + */ + if (IN6_IS_ADDR_MC_NODELOCAL(&mreq->ipv6mr_multiaddr)) { +#ifdef __bsdi__ + ifp = loifp; +#else + ifp = &loif[0]; +#endif + } else { + ro.ro_rt = NULL; + dst = (struct sockaddr_in6 *)&ro.ro_dst; + bzero(dst, sizeof(*dst)); + dst->sin6_len = sizeof(struct sockaddr_in6); + dst->sin6_family = AF_INET6; + dst->sin6_addr = mreq->ipv6mr_multiaddr; + rtalloc((struct route *)&ro); + if (ro.ro_rt == NULL) { + error = EADDRNOTAVAIL; + break; + } + ifp = ro.ro_rt->rt_ifp; + rtfree(ro.ro_rt); + } + } else + ifp = ifindex2ifnet[mreq->ipv6mr_interface]; + + /* + * See if we found an interface, and confirm that it + * supports multicast + */ + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + error = EADDRNOTAVAIL; + break; + } + /* + * Put interface index into the multicast address, + * if the address has link-local scope. + */ + if (IN6_IS_ADDR_MC_LINKLOCAL(&mreq->ipv6mr_multiaddr)) { + mreq->ipv6mr_multiaddr.s6_addr16[1] + = htons(mreq->ipv6mr_interface); + } + /* + * See if the membership already exists. + */ + for (imm = im6o->im6o_memberships.lh_first; + imm != NULL; imm = imm->i6mm_chain.le_next) + if (imm->i6mm_maddr->in6m_ifp == ifp && + IN6_ARE_ADDR_EQUAL(&imm->i6mm_maddr->in6m_addr, + &mreq->ipv6mr_multiaddr)) + break; + if (imm != NULL) { + error = EADDRINUSE; + break; + } + /* + * Everything looks good; add a new record to the multicast + * address list for the given interface. + */ + imm = _MALLOC(sizeof(*imm), M_IPMADDR, M_WAITOK); + if (imm == NULL) { + error = ENOBUFS; + break; + } + if ((imm->i6mm_maddr = + in6_addmulti(&mreq->ipv6mr_multiaddr, ifp, &error)) == NULL) { + _FREE(imm, M_IPMADDR); + break; + } + LIST_INSERT_HEAD(&im6o->im6o_memberships, imm, i6mm_chain); + break; + + case IPV6_LEAVE_GROUP: + /* + * Drop a multicast group membership. + * Group must be a valid IP6 multicast address. + */ + if (m == NULL || m->m_len != sizeof(struct ipv6_mreq)) { + error = EINVAL; + break; + } + mreq = mtod(m, struct ipv6_mreq *); + if (IN6_IS_ADDR_UNSPECIFIED(&mreq->ipv6mr_multiaddr)) { + if (suser(p->p_ucred, &p->p_acflag)) { + error = EACCES; + break; + } + } else if (!IN6_IS_ADDR_MULTICAST(&mreq->ipv6mr_multiaddr)) { + error = EINVAL; + break; + } + /* + * If an interface address was specified, get a pointer + * to its ifnet structure. + */ + if (mreq->ipv6mr_interface < 0 + || if_index < mreq->ipv6mr_interface) { + error = ENXIO; /* XXX EINVAL? */ + break; + } + ifp = ifindex2ifnet[mreq->ipv6mr_interface]; + /* + * Put interface index into the multicast address, + * if the address has link-local scope. + */ + if (IN6_IS_ADDR_MC_LINKLOCAL(&mreq->ipv6mr_multiaddr)) { + mreq->ipv6mr_multiaddr.s6_addr16[1] + = htons(mreq->ipv6mr_interface); + } + /* + * Find the membership in the membership list. + */ + for (imm = im6o->im6o_memberships.lh_first; + imm != NULL; imm = imm->i6mm_chain.le_next) { + if ((ifp == NULL || + imm->i6mm_maddr->in6m_ifp == ifp) && + IN6_ARE_ADDR_EQUAL(&imm->i6mm_maddr->in6m_addr, + &mreq->ipv6mr_multiaddr)) + break; + } + if (imm == NULL) { + /* Unable to resolve interface */ + error = EADDRNOTAVAIL; + break; + } + /* + * Give up the multicast address record to which the + * membership points. + */ + LIST_REMOVE(imm, i6mm_chain); + in6_delmulti(imm->i6mm_maddr); + _FREE(imm, M_IPMADDR); + break; + + default: + error = EOPNOTSUPP; + break; + } + + /* + * If all options have default values, no need to keep the mbuf. + */ + if (im6o->im6o_multicast_ifp == NULL && + im6o->im6o_multicast_hlim == ip6_defmcasthlim && + im6o->im6o_multicast_loop == IPV6_DEFAULT_MULTICAST_LOOP && + im6o->im6o_memberships.lh_first == NULL) { + _FREE(*im6op, M_IPMOPTS); + *im6op = NULL; + } + + return(error); +} + +/* + * Return the IP6 multicast options in response to user getsockopt(). + */ +static int +ip6_getmoptions(optname, im6o, mp) + int optname; + register struct ip6_moptions *im6o; + register struct mbuf **mp; +{ + u_int *hlim, *loop, *ifindex; + +#if __FreeBSD__ || defined (__APPLE__) + *mp = m_get(M_WAIT, MT_HEADER); /*XXX*/ +#else + *mp = m_get(M_WAIT, MT_SOOPTS); +#endif + + switch (optname) { + + case IPV6_MULTICAST_IF: + ifindex = mtod(*mp, u_int *); + (*mp)->m_len = sizeof(u_int); + if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) + *ifindex = 0; + else + *ifindex = im6o->im6o_multicast_ifp->if_index; + return(0); + + case IPV6_MULTICAST_HOPS: + hlim = mtod(*mp, u_int *); + (*mp)->m_len = sizeof(u_int); + if (im6o == NULL) + *hlim = ip6_defmcasthlim; + else + *hlim = im6o->im6o_multicast_hlim; + return(0); + + case IPV6_MULTICAST_LOOP: + loop = mtod(*mp, u_int *); + (*mp)->m_len = sizeof(u_int); + if (im6o == NULL) + *loop = ip6_defmcasthlim; + else + *loop = im6o->im6o_multicast_loop; + return(0); + + default: + return(EOPNOTSUPP); + } +} + +/* + * Discard the IP6 multicast options. + */ +void +ip6_freemoptions(im6o) + register struct ip6_moptions *im6o; +{ + struct in6_multi_mship *imm; + + if (im6o == NULL) + return; + + while ((imm = im6o->im6o_memberships.lh_first) != NULL) { + LIST_REMOVE(imm, i6mm_chain); + if (imm->i6mm_maddr) + in6_delmulti(imm->i6mm_maddr); + _FREE(imm, M_IPMADDR); + } + _FREE(im6o, M_IPMOPTS); +} + +/* + * Set IPv6 outgoing packet options based on advanced API. + */ +int +ip6_setpktoptions(control, opt, priv, needcopy) + struct mbuf *control; + struct ip6_pktopts *opt; + int priv, needcopy; +{ + register struct cmsghdr *cm = 0; + + if (control == 0 || opt == 0) + return(EINVAL); + + bzero(opt, sizeof(*opt)); + opt->ip6po_hlim = -1; /* -1 means to use default hop limit */ + + /* + * XXX: Currently, we assume all the optional information is stored + * in a single mbuf. + */ + if (control->m_next) + return(EINVAL); + + for (; control->m_len; control->m_data += CMSG_ALIGN(cm->cmsg_len), + control->m_len -= CMSG_ALIGN(cm->cmsg_len)) { + cm = mtod(control, struct cmsghdr *); + if (cm->cmsg_len == 0 || cm->cmsg_len > control->m_len) + return(EINVAL); + if (cm->cmsg_level != IPPROTO_IPV6) + continue; + + switch(cm->cmsg_type) { + case IPV6_PKTINFO: + if (cm->cmsg_len != CMSG_LEN(sizeof(struct in6_pktinfo))) + return(EINVAL); + if (needcopy) { + /* XXX: Is it really WAITOK? */ + opt->ip6po_pktinfo = + _MALLOC(sizeof(struct in6_pktinfo), + M_IP6OPT, M_WAITOK); + *opt->ip6po_pktinfo = + *(struct in6_pktinfo *)CMSG_DATA(cm); + } else + opt->ip6po_pktinfo = + (struct in6_pktinfo *)CMSG_DATA(cm); + if (opt->ip6po_pktinfo->ipi6_ifindex && + IN6_IS_ADDR_LINKLOCAL(&opt->ip6po_pktinfo->ipi6_addr)) + opt->ip6po_pktinfo->ipi6_addr.s6_addr16[1] = + htons(opt->ip6po_pktinfo->ipi6_ifindex); + + if (opt->ip6po_pktinfo->ipi6_ifindex > if_index + || opt->ip6po_pktinfo->ipi6_ifindex < 0) { + return(ENXIO); + } + + /* + * Check if the requested source address is indeed a + * unicast address assigned to the node. + */ + if (!IN6_IS_ADDR_UNSPECIFIED(&opt->ip6po_pktinfo->ipi6_addr)) { + struct ifaddr *ia; + struct sockaddr_in6 sin6; + + bzero(&sin6, sizeof(sin6)); + sin6.sin6_len = sizeof(sin6); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = + opt->ip6po_pktinfo->ipi6_addr; + ia = ifa_ifwithaddr(sin6tosa(&sin6)); + if (ia == NULL) + return(EADDRNOTAVAIL); + } + break; + + case IPV6_HOPLIMIT: + if (cm->cmsg_len != CMSG_LEN(sizeof(int))) + return(EINVAL); + + opt->ip6po_hlim = *(int *)CMSG_DATA(cm); + if (opt->ip6po_hlim < -1 || opt->ip6po_hlim > 255) + return(EINVAL); + break; + + case IPV6_NEXTHOP: + if (!priv) + return(EPERM); + + if (cm->cmsg_len < sizeof(u_char) || + /* check if cmsg_len is large enough for sa_len */ + cm->cmsg_len < CMSG_LEN(*CMSG_DATA(cm))) + return(EINVAL); + + if (needcopy) { + opt->ip6po_nexthop = + _MALLOC(*CMSG_DATA(cm), + M_IP6OPT, M_WAITOK); + bcopy(CMSG_DATA(cm), + opt->ip6po_nexthop, + *CMSG_DATA(cm)); + } else + opt->ip6po_nexthop = + (struct sockaddr *)CMSG_DATA(cm); + break; + + case IPV6_HOPOPTS: + { + struct ip6_hbh *hbh; + int hbhlen; + + if (cm->cmsg_len < CMSG_LEN(sizeof(struct ip6_hbh))) + return(EINVAL); + hbh = (struct ip6_hbh *)CMSG_DATA(cm); + hbhlen = (hbh->ip6h_len + 1) << 3; + if (cm->cmsg_len != CMSG_LEN(hbhlen)) + return(EINVAL); + + if (needcopy) { + opt->ip6po_hbh = + _MALLOC(hbhlen, M_IP6OPT, M_WAITOK); + bcopy(hbh, opt->ip6po_hbh, hbhlen); + } else + opt->ip6po_hbh = hbh; + break; + } + + case IPV6_DSTOPTS: + { + struct ip6_dest *dest; + int destlen; + + if (cm->cmsg_len < CMSG_LEN(sizeof(struct ip6_dest))) + return(EINVAL); + dest = (struct ip6_dest *)CMSG_DATA(cm); + destlen = (dest->ip6d_len + 1) << 3; + if (cm->cmsg_len != CMSG_LEN(destlen)) + return(EINVAL); + + /* + * If there is no routing header yet, the destination + * options header should be put on the 1st part. + * Otherwise, the header should be on the 2nd part. + * (See RFC 2460, section 4.1) + */ + if (opt->ip6po_rthdr == NULL) { + if (needcopy) { + opt->ip6po_dest1 = + _MALLOC(destlen, M_IP6OPT, + M_WAITOK); + bcopy(dest, opt->ip6po_dest1, destlen); + } else + opt->ip6po_dest1 = dest; + } else { + if (needcopy) { + opt->ip6po_dest2 = + _MALLOC(destlen, M_IP6OPT, + M_WAITOK); + bcopy(dest, opt->ip6po_dest2, destlen); + } else + opt->ip6po_dest2 = dest; + } + break; + } + + case IPV6_RTHDR: + { + struct ip6_rthdr *rth; + int rthlen; + + if (cm->cmsg_len < CMSG_LEN(sizeof(struct ip6_rthdr))) + return(EINVAL); + rth = (struct ip6_rthdr *)CMSG_DATA(cm); + rthlen = (rth->ip6r_len + 1) << 3; + if (cm->cmsg_len != CMSG_LEN(rthlen)) + return(EINVAL); + + switch(rth->ip6r_type) { + case IPV6_RTHDR_TYPE_0: + /* must contain one addr */ + if (rth->ip6r_len == 0) + return(EINVAL); + /* length must be even */ + if (rth->ip6r_len % 2) + return(EINVAL); + if (rth->ip6r_len / 2 != rth->ip6r_segleft) + return(EINVAL); + break; + default: + return(EINVAL); /* not supported */ + } + + if (needcopy) { + opt->ip6po_rthdr = _MALLOC(rthlen, M_IP6OPT, + M_WAITOK); + bcopy(rth, opt->ip6po_rthdr, rthlen); + } else + opt->ip6po_rthdr = rth; + + break; + } + + case IPV6_REACHCONF: +#if 1 + /* + * it looks dangerous to allow IPV6_REACHCONF to + * normal user. it affects the ND state (system state) + * and can affect communication by others - jinmei + */ + if (!priv) + return(EPERM); +#endif + + if (cm->cmsg_len != CMSG_LEN(0)) + return(EINVAL); + opt->ip6po_flags |= IP6PO_REACHCONF; + break; + + case IPV6_USE_MIN_MTU: + if (cm->cmsg_len != CMSG_LEN(0)) + return(EINVAL); + opt->ip6po_flags |= IP6PO_MINMTU; + break; + + default: + return(ENOPROTOOPT); + } + } + + return(0); +} + +/* + * Routine called from ip6_output() to loop back a copy of an IP6 multicast + * packet to the input queue of a specified interface. Note that this + * calls the output routine of the loopback "driver", but with an interface + * pointer that might NOT be &loif -- easier than replicating that code here. + */ +void +ip6_mloopback(ifp, m, dst) + struct ifnet *ifp; + register struct mbuf *m; + register struct sockaddr_in6 *dst; +{ + struct mbuf *copym; + + copym = m_copy(m, 0, M_COPYALL); + if (copym != NULL) { +#ifdef __APPLE__ + /* + * TedW: + * We need to send all loopback traffic down to dlil in case + * a filter has tapped-in. + */ + + if (lo_dl_tag == 0) + dlil_find_dltag(APPLE_IF_FAM_LOOPBACK, 0, PF_INET6, &lo_dl_tag); + + if (lo_dl_tag) + dlil_output(lo_dl_tag, copym, 0, (struct sockaddr *) dst, 0); + else { + printf("Warning: ip6_mloopback call to dlil_find_dltag failed!\n"); + m_freem(copym); + } +#else + (void)if_simloop(ifp, copym, (struct sockaddr *)dst, NULL); + (void)looutput(ifp, copym, (struct sockaddr *)dst, NULL); +#endif + } +} + +/* + * Chop IPv6 header off from the payload. + */ +static int +ip6_splithdr(m, exthdrs) + struct mbuf *m; + struct ip6_exthdrs *exthdrs; +{ + struct mbuf *mh; + struct ip6_hdr *ip6; + + ip6 = mtod(m, struct ip6_hdr *); + if (m->m_len > sizeof(*ip6)) { + MGETHDR(mh, M_DONTWAIT, MT_HEADER); + if (mh == 0) { + m_freem(m); + return ENOBUFS; + } + M_COPY_PKTHDR(mh, m); + MH_ALIGN(mh, sizeof(*ip6)); + m->m_flags &= ~M_PKTHDR; + m->m_len -= sizeof(*ip6); + m->m_data += sizeof(*ip6); + mh->m_next = m; + m = mh; + m->m_len = sizeof(*ip6); + bcopy((caddr_t)ip6, mtod(m, caddr_t), sizeof(*ip6)); + } + exthdrs->ip6e_ip6 = m; + return 0; +} + +/* + * Compute IPv6 extension header length. + */ +#if HAVE_NRL_INPCB +# define in6pcb inpcb +# define in6p_outputopts inp_outputopts6 +#endif +int +ip6_optlen(in6p) + struct in6pcb *in6p; +{ + int len; + + if (!in6p->in6p_outputopts) + return 0; + + len = 0; +#define elen(x) \ + (((struct ip6_ext *)(x)) ? (((struct ip6_ext *)(x))->ip6e_len + 1) << 3 : 0) + + len += elen(in6p->in6p_outputopts->ip6po_hbh); + if (in6p->in6p_outputopts->ip6po_rthdr) + /* dest1 is valid with rthdr only */ + len += elen(in6p->in6p_outputopts->ip6po_dest1); + len += elen(in6p->in6p_outputopts->ip6po_rthdr); + len += elen(in6p->in6p_outputopts->ip6po_dest2); + return len; +#undef elen +} +#if HAVE_NRL_INPCB +# undef in6pcb +# undef in6p_outputopts +#endif diff --git a/bsd/netinet6/ip6_var.h b/bsd/netinet6/ip6_var.h new file mode 100644 index 000000000..d863360c0 --- /dev/null +++ b/bsd/netinet6/ip6_var.h @@ -0,0 +1,368 @@ +/* $KAME: ip6_var.h,v 1.31 2000/04/04 08:48:26 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ip_var.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET6_IP6_VAR_H_ +#define _NETINET6_IP6_VAR_H_ + +/* + * IP6 reassembly queue structure. Each fragment + * being reassembled is attached to one of these structures. + */ +struct ip6q { + u_int32_t ip6q_head; + u_int16_t ip6q_len; + u_int8_t ip6q_nxt; /* ip6f_nxt in first fragment */ + u_int8_t ip6q_hlim; + struct ip6asfrag *ip6q_down; + struct ip6asfrag *ip6q_up; + u_int32_t ip6q_ident; + u_int8_t ip6q_arrive; + u_int8_t ip6q_ttl; + struct in6_addr ip6q_src, ip6q_dst; + struct ip6q *ip6q_next; + struct ip6q *ip6q_prev; + int ip6q_unfrglen; /* len of unfragmentable part */ +#if notyet + u_char *ip6q_nxtp; +#endif +}; + +struct ip6asfrag { + u_int32_t ip6af_head; + u_int16_t ip6af_len; + u_int8_t ip6af_nxt; + u_int8_t ip6af_hlim; + /* must not override the above members during reassembling */ + struct ip6asfrag *ip6af_down; + struct ip6asfrag *ip6af_up; + struct mbuf *ip6af_m; + int ip6af_offset; /* offset in ip6af_m to next header */ + int ip6af_frglen; /* fragmentable part length */ + int ip6af_off; /* fragment offset */ + u_int16_t ip6af_mff; /* more fragment bit in frag off */ +}; + +#define IP6_REASS_MBUF(ip6af) (*(struct mbuf **)&((ip6af)->ip6af_m)) + +struct ip6_moptions { + struct ifnet *im6o_multicast_ifp; /* ifp for outgoing multicasts */ + u_char im6o_multicast_hlim; /* hoplimit for outgoing multicasts */ + u_char im6o_multicast_loop; /* 1 >= hear sends if a member */ + LIST_HEAD(, in6_multi_mship) im6o_memberships; +}; + +/* + * Control options for outgoing packets + */ + +/* Routing header related info */ +struct ip6po_rhinfo { + struct ip6_rthdr *ip6po_rhi_rthdr; /* Routing header */ + struct route_in6 ip6po_rhi_route; /* Route to the 1st hop */ +}; +#define ip6po_rthdr ip6po_rhinfo.ip6po_rhi_rthdr +#define ip6po_route ip6po_rhinfo.ip6po_rhi_route + +struct ip6_pktopts { + int ip6po_hlim; /* Hoplimit for outgoing packets */ + + /* Outgoing IF/address information */ + struct in6_pktinfo *ip6po_pktinfo; + + struct sockaddr *ip6po_nexthop; /* Next-hop address */ + + struct ip6_hbh *ip6po_hbh; /* Hop-by-Hop options header */ + + /* Destination options header (before a routing header) */ + struct ip6_dest *ip6po_dest1; + + /* Routing header related info. */ + struct ip6po_rhinfo ip6po_rhinfo; + + /* Destination options header (after a routing header) */ + struct ip6_dest *ip6po_dest2; + + int ip6po_flags; +#define IP6PO_REACHCONF 0x01 /* upper-layer reachability confirmation */ +#define IP6PO_MINMTU 0x02 /* use minimum MTU (IPV6_USE_MIN_MTU) */ +}; + +/* + * Control options for incoming packets + */ + +struct ip6_recvpktopts { + struct mbuf *head; /* mbuf chain of data passed to a user */ + +#ifdef SO_TIMESTAMP + struct mbuf *timestamp; /* timestamp */ +#endif + struct mbuf *hlim; /* received hop limit */ + struct mbuf *pktinfo; /* packet information of rcv packet */ + struct mbuf *hbh; /* HbH options header of rcv packet */ + struct mbuf *dest1; /* Dest opt header of rcv packet */ + struct mbuf *dest2; /* Dest opt header (after rthdr) of rcv packet */ + struct mbuf *rthdr; /* Routing header of rcv packet */ +}; + +struct ip6stat { + u_quad_t ip6s_total; /* total packets received */ + u_quad_t ip6s_tooshort; /* packet too short */ + u_quad_t ip6s_toosmall; /* not enough data */ + u_quad_t ip6s_fragments; /* fragments received */ + u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */ + u_quad_t ip6s_fragtimeout; /* fragments timed out */ + u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */ + u_quad_t ip6s_forward; /* packets forwarded */ + u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */ + u_quad_t ip6s_redirectsent; /* packets forwarded on same net */ + u_quad_t ip6s_delivered; /* datagrams delivered to upper level*/ + u_quad_t ip6s_localout; /* total ip packets generated here */ + u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */ + u_quad_t ip6s_reassembled; /* total packets reassembled ok */ + u_quad_t ip6s_fragmented; /* datagrams sucessfully fragmented */ + u_quad_t ip6s_ofragments; /* output fragments created */ + u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */ + u_quad_t ip6s_badoptions; /* error in option processing */ + u_quad_t ip6s_noroute; /* packets discarded due to no route */ + u_quad_t ip6s_badvers; /* ip6 version != 6 */ + u_quad_t ip6s_rawout; /* total raw ip packets generated */ + u_quad_t ip6s_badscope; /* scope error */ + u_quad_t ip6s_notmember; /* don't join this multicast group */ + u_quad_t ip6s_nxthist[256]; /* next header history */ + u_quad_t ip6s_m1; /* one mbuf */ + u_quad_t ip6s_m2m[32]; /* two or more mbuf */ + u_quad_t ip6s_mext1; /* one ext mbuf */ + u_quad_t ip6s_mext2m; /* two or more ext mbuf */ + u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */ + u_quad_t ip6s_nogif; /* no match gif found */ + u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */ + /* XXX the following two items are not really AF_INET6 thing */ + u_quad_t ip6s_exthdrget; /* # of calls to IP6_EXTHDR_GET */ + u_quad_t ip6s_exthdrget0; /* # of calls to IP6_EXTHDR_GET0 */ + u_quad_t ip6s_pulldown; /* # of calls to m_pulldown */ + u_quad_t ip6s_pulldown_copy; /* # of mbuf copies in m_pulldown */ + u_quad_t ip6s_pulldown_alloc; /* # of mbuf allocs in m_pulldown */ + u_quad_t ip6s_pullup; /* # of calls to m_pullup */ + u_quad_t ip6s_pullup_copy; /* # of possible m_pullup copies */ + u_quad_t ip6s_pullup_alloc; /* # of possible m_pullup mallocs */ + u_quad_t ip6s_pullup_fail; /* # of possible m_pullup failures */ + u_quad_t ip6s_pullup2; /* # of calls to m_pullup2 */ + u_quad_t ip6s_pullup2_copy; /* # of possible m_pullup2 copies */ + u_quad_t ip6s_pullup2_alloc; /* # of possible m_pullup2 mallocs */ + u_quad_t ip6s_pullup2_fail; /* # of possible m_pullup2 failures */ + + /* + * statistics for improvement of the source address selection + * algorithm: + * XXX: hardcoded 16 = # of ip6 multicast scope types + 1 + */ + /* number of times that address selection fails */ + u_quad_t ip6s_sources_none; + /* number of times that an address on the outgoing I/F is chosen */ + u_quad_t ip6s_sources_sameif[16]; + /* number of times that an address on a non-outgoing I/F is chosen */ + u_quad_t ip6s_sources_otherif[16]; + /* + * number of times that an address that has the same scope + * from the destination is chosen. + */ + u_quad_t ip6s_sources_samescope[16]; + /* + * number of times that an address that has a different scope + * from the destination is chosen. + */ + u_quad_t ip6s_sources_otherscope[16]; + /* number of times that an deprecated address is chosen */ + u_quad_t ip6s_sources_deprecated[16]; +}; + +#if KERNEL +/* flags passed to ip6_output as last parameter */ +#define IPV6_DADOUTPUT 0x01 /* DAD */ +#define IPV6_FORWARDING 0x02 /* most of IPv6 header exists */ +#define IPV6_MINMTU 0x04 /* use minimum MTU (IPV6_USE_MIN_MTU) */ + +extern struct ip6stat ip6stat; /* statistics */ +extern u_int32_t ip6_id; /* fragment identifier */ +extern int ip6_defhlim; /* default hop limit */ +extern int ip6_defmcasthlim; /* default multicast hop limit */ +extern int ip6_forwarding; /* act as router? */ +extern int ip6_forward_srcrt; /* forward src-routed? */ +extern int ip6_gif_hlim; /* Hop limit for gif encap packet */ +extern int ip6_use_deprecated; /* allow deprecated addr as source */ +extern int ip6_rr_prune; /* router renumbering prefix + * walk list every 5 sec. */ +#if INET6 +extern int ip6_mapped_addr_on; +#endif +#if defined(__NetBSD__) && !defined(INET6_BINDV6ONLY) +extern int ip6_bindv6only; +#endif + +extern struct socket *ip6_mrouter; /* multicast routing daemon */ +extern int ip6_sendredirects; /* send IP redirects when forwarding? */ +extern int ip6_maxfragpackets; /* Maximum packets in reassembly queue */ +extern int ip6_sourcecheck; /* Verify source interface */ +extern int ip6_sourcecheck_interval; /* Interval between log messages */ +extern int ip6_accept_rtadv; /* Acts as a host not a router */ +extern int ip6_keepfaith; /* Firewall Aided Internet Translator */ +extern int ip6_log_interval; +extern time_t ip6_log_time; +extern int ip6_hdrnestlimit; /* upper limit of # of extension headers */ +extern int ip6_dad_count; /* DupAddrDetectionTransmits */ + +extern u_int32_t ip6_flow_seq; +extern int ip6_auto_flowlabel; + +#if !defined(__APPLE__) +//#if !defined(__FreeBSD__) || __FreeBSD__ < 3 +struct in6pcb; +#endif +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +extern struct pr_usrreqs rip6_usrreqs; + +struct inpcb; +struct sockopt; +#endif + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) +int icmp6_ctloutput __P((struct socket *, struct sockopt *sopt)); +#else +int icmp6_ctloutput __P((int, struct socket *, int, int, struct mbuf **)); +#endif + +void ip6_init __P((void)); +void ip6intr __P((void)); +void ip6_input __P((struct mbuf *)); +void ip6_freepcbopts __P((struct ip6_pktopts *)); +void ip6_freemoptions __P((struct ip6_moptions *)); +int ip6_unknown_opt __P((u_int8_t *, struct mbuf *, int)); +char * ip6_get_prevhdr __P((struct mbuf *, int)); +int ip6_nexthdr __P((struct mbuf *, int, int, int *)); +int ip6_lasthdr __P((struct mbuf *, int, int, int *)); +int ip6_mforward __P((struct ip6_hdr *, struct ifnet *, struct mbuf *)); +int ip6_process_hopopts __P((struct mbuf *, u_int8_t *, int, u_int32_t *, + u_int32_t *)); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +void ip6_savecontrol __P((struct inpcb *, struct ip6_hdr *, struct mbuf *, + struct ip6_recvpktopts *, + struct ip6_recvpktopts **)); +#else +void ip6_savecontrol __P((struct in6pcb *, struct ip6_hdr *, struct mbuf *, + struct ip6_recvpktopts *, + struct ip6_recvpktopts **)); +#endif +void ip6_update_recvpcbopt __P((struct ip6_recvpktopts *, + struct ip6_recvpktopts *)); +void ip6_reset_rcvopt __P((struct ip6_recvpktopts *, int)); +int ip6_sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); + +void ip6_forward __P((struct mbuf *, int)); + +void ip6_mloopback __P((struct ifnet *, struct mbuf *, struct sockaddr_in6 *)); +int ip6_output __P((struct mbuf *, struct ip6_pktopts *, + struct route_in6 *, int, + struct ip6_moptions *, struct ifnet **)); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int ip6_ctloutput __P((struct socket *, struct sockopt *sopt)); +#else +int ip6_ctloutput __P((int, struct socket *, int, int, struct mbuf **)); +#endif +int ip6_setpktoptions __P((struct mbuf *, struct ip6_pktopts *, int, int)); +void ip6_clearpktopts __P((struct ip6_pktopts *, int, int)); +struct ip6_pktopts *ip6_copypktopts __P((struct ip6_pktopts *, int)); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int ip6_optlen __P((struct inpcb *)); +#else +int ip6_optlen __P((struct in6pcb *)); +#endif + +int route6_input __P((struct mbuf **, int *, int)); + +void frag6_init __P((void)); +int frag6_input __P((struct mbuf **, int *, int)); +void frag6_slowtimo __P((void)); +void frag6_drain __P((void)); + +void rip6_init __P((void)); +int rip6_input __P((struct mbuf **mp, int *offp, int proto)); +void rip6_ctlinput __P((int, struct sockaddr *, void *)); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int rip6_ctloutput __P((struct socket *so, struct sockopt *sopt)); +#else +int rip6_ctloutput __P((int, struct socket *, int, int, struct mbuf **)); +#endif +int rip6_output __P((struct mbuf *, struct socket *, struct sockaddr_in6 *, + struct mbuf *)); +int rip6_usrreq __P((struct socket *, + int, struct mbuf *, struct mbuf *, struct mbuf *, struct proc *)); + +int dest6_input __P((struct mbuf **, int *, int)); +int none_input __P((struct mbuf **, int *, int)); +#endif /* _KERNEL */ + +#endif /* !_NETINET6_IP6_VAR_H_ */ diff --git a/bsd/netinet6/ip6protosw.h b/bsd/netinet6/ip6protosw.h new file mode 100644 index 000000000..61edf5ccf --- /dev/null +++ b/bsd/netinet6/ip6protosw.h @@ -0,0 +1,137 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +/* BSDI protosw.h,v 2.3 1996/10/11 16:02:40 pjd Exp */ + +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)protosw.h 8.1 (Berkeley) 6/2/93 + * $Id: ip6protosw.h,v 1.2 2000/09/14 20:35:15 lindak Exp $ + */ + +#ifndef _NETINET6_IP6PROTOSW_H_ +#define _NETINET6_IP6PROTOSW_H_ + +/* + * Protocol switch table for IPv6. + * All other definitions should refer to sys/protosw.h + */ + +struct mbuf; +struct sockaddr; +struct socket; +struct domain; +struct proc; +struct ip6_hdr; +#ifdef __FreeBSD__ +struct pr_usrreqs; +#endif + +#include +#include +/* + * argument type for the last arg of pr_ctlinput(). + * should be consulted only with AF_INET6 family. + */ +struct ip6ctlparam { + struct mbuf *ip6c_m; /* start of mbuf chain */ + struct ip6_hdr *ip6c_ip6; /* ip6 header of target packet */ + int ip6c_off; /* offset of the target proto header */ +}; + +struct ip6protosw { + short pr_type; /* socket type used for */ + struct domain *pr_domain; /* domain protocol a member of */ + short pr_protocol; /* protocol number */ + unsigned int pr_flags; /* see below */ +/* protocol-protocol hooks */ + int (*pr_input) __P((struct mbuf **, int *, int)); + /* input to protocol (from below) */ + int (*pr_output) __P((struct mbuf *m, struct socket *so, + struct sockaddr_in6 *, struct mbuf *)); + /* output to protocol (from above) */ + void (*pr_ctlinput)__P((int, struct sockaddr *, struct ip6_hdr *, + struct mbuf *, int)); + /* control input (from below) */ + int (*pr_ctloutput)__P((struct socket *, struct sockopt *)); + /* control output (from above) */ +/* user-protocol hook */ + int (*pr_usrreq) /* user request: see list below */ + __P((struct socket *, int, struct mbuf *, + struct mbuf *, struct mbuf *, struct proc *)); + +/* utility hooks */ + void (*pr_init) __P((void)); /* initialization hook */ + void (*pr_fasttimo) __P((void)); + /* fast timeout (200ms) */ + void (*pr_slowtimo) __P((void)); + /* slow timeout (500ms) */ + void (*pr_drain) __P((void)); + /* flush any excess space possible */ +/* ### Added for MacOS X */ + int (*pr_sysctl)(); /* sysctl for protocol */ + + struct pr_usrreqs *pr_usrreqs; /* supersedes pr_usrreq() */ +/* ### Added MacOS X Implant hooks */ + TAILQ_HEAD(pr6_sfilter, NFDescriptor) pr_sfilter; + struct protosw *pr_next; /* Chain for domain */ +}; + +#endif diff --git a/bsd/netinet6/ipcomp.h b/bsd/netinet6/ipcomp.h new file mode 100644 index 000000000..25ca896f0 --- /dev/null +++ b/bsd/netinet6/ipcomp.h @@ -0,0 +1,69 @@ +/* + * Copyright (C) 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC2393 IP payload compression protocol (IPComp). + */ + +#ifndef _NETINET6_IPCOMP_H_ +#define _NETINET6_IPCOMP_H_ + +struct ipcomp { + u_int8_t comp_nxt; /* Next Header */ + u_int8_t comp_flags; /* reserved, must be zero */ + u_int16_t comp_cpi; /* Compression parameter index */ +}; + +/* well-known algorithm number (in CPI), from RFC2409 */ +#define IPCOMP_OUI 1 /* vendor specific */ +#define IPCOMP_DEFLATE 2 /* RFC2394 */ +#define IPCOMP_LZS 3 /* RFC2395 */ +#define IPCOMP_MAX 4 + +#define IPCOMP_CPI_NEGOTIATE_MIN 256 + +#if defined(KERNEL) || defined(_KERNEL) +struct ipcomp_algorithm { + int (*compress) __P((struct mbuf *, struct mbuf *, size_t *)); + int (*decompress) __P((struct mbuf *, struct mbuf *, size_t *)); + size_t minplen; /* minimum required length for compression */ +}; + +struct ipsecrequest; +extern struct ipcomp_algorithm ipcomp_algorithms[]; +extern void ipcomp4_input __P((struct mbuf *, int)); +extern int ipcomp4_output __P((struct mbuf *, struct ipsecrequest *)); +#if INET6 +extern int ipcomp6_input __P((struct mbuf **, int *)); +extern int ipcomp6_output __P((struct mbuf *, u_char *, struct mbuf *, + struct ipsecrequest *)); +#endif +#endif /*KERNEL*/ + +#endif /*_NETINET6_IPCOMP_H_*/ diff --git a/bsd/netinet6/ipcomp_core.c b/bsd/netinet6/ipcomp_core.c new file mode 100644 index 000000000..9ef5e3d14 --- /dev/null +++ b/bsd/netinet6/ipcomp_core.c @@ -0,0 +1,311 @@ +/* $KAME: ipcomp_core.c,v 1.10 2000/02/22 14:04:23 itojun Exp $ */ + +/* + * Copyright (C) 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC2393 IP payload compression protocol (IPComp). + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +static void *deflate_alloc __P((void *, u_int, u_int)); +static void deflate_free __P((void *, void *)); +static int deflate_common __P((struct mbuf *, struct mbuf *, size_t *, int)); +static int deflate_compress __P((struct mbuf *, struct mbuf *, size_t *)); +static int deflate_decompress __P((struct mbuf *, struct mbuf *, size_t *)); + +/* + * We need to use default window size (2^15 = 32Kbytes as of writing) for + * inbound case. Otherwise we get interop problem. + * Use negative value to avoid Adler32 checksum. This is an undocumented + * feature in zlib (see ipsec wg mailing list archive in January 2000). + */ +static int deflate_policy = Z_DEFAULT_COMPRESSION; +static int deflate_window_out = -12; +static const int deflate_window_in = -1 * MAX_WBITS; /* don't change it */ +static int deflate_memlevel = MAX_MEM_LEVEL; + +struct ipcomp_algorithm ipcomp_algorithms[] = { + { NULL, NULL, -1 }, + { NULL, NULL, -1 }, + { deflate_compress, deflate_decompress, 90 }, + { NULL, NULL, 90 }, +}; + +static void * +deflate_alloc(aux, items, siz) + void *aux; + u_int items; + u_int siz; +{ + void *ptr; + MALLOC(ptr, void *, items * siz, M_TEMP, M_NOWAIT); + return ptr; +} + +static void +deflate_free(aux, ptr) + void *aux; + void *ptr; +{ + FREE(ptr, M_TEMP); +} + +static int +deflate_common(m, md, lenp, mode) + struct mbuf *m; + struct mbuf *md; + size_t *lenp; + int mode; /* 0: compress 1: decompress */ +{ + struct mbuf *mprev; + struct mbuf *p; + struct mbuf *n, *n0 = NULL, **np; + z_stream zs; + int error = 0; + int zerror; + size_t offset; + int firsttime, final, flush; + + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) + ; + if (!mprev) + panic("md is not in m in deflate_common"); + + bzero(&zs, sizeof(zs)); + zs.zalloc = deflate_alloc; + zs.zfree = deflate_free; + + zerror = mode ? inflateInit2(&zs, deflate_window_in) + : deflateInit2(&zs, deflate_policy, Z_DEFLATED, + deflate_window_out, deflate_memlevel, + Z_DEFAULT_STRATEGY); + if (zerror != Z_OK) { + error = ENOBUFS; + goto fail; + } + + n0 = n = NULL; + np = &n0; + offset = 0; + firsttime = 1; + final = 0; + flush = Z_NO_FLUSH; + zerror = 0; + p = md; + while (1) { + /* + * first time, we need to setup the buffer before calling + * compression function. + */ + if (firsttime) + firsttime = 0; + else { + zerror = mode ? inflate(&zs, flush) + : deflate(&zs, flush); + } + + /* get input buffer */ + if (p && zs.avail_in == 0) { + zs.next_in = mtod(p, u_int8_t *); + zs.avail_in = p->m_len; + p = p->m_next; + if (!p) { + final = 1; + flush = Z_PARTIAL_FLUSH; + } + } + + /* get output buffer */ + if (zs.next_out == NULL || zs.avail_out == 0) { + /* keep the reply buffer into our chain */ + if (n) { + n->m_len = zs.total_out - offset; + offset = zs.total_out; + *np = n; + np = &n->m_next; + } + + /* get a fresh reply buffer */ + MGET(n, M_DONTWAIT, MT_DATA); + if (n) { + MCLGET(n, M_DONTWAIT); + } + if (!n) { + error = ENOBUFS; + goto fail; + } + n->m_len = 0; + n->m_len = M_TRAILINGSPACE(n); + n->m_next = NULL; + /* + * if this is the first reply buffer, reserve + * region for ipcomp header. + */ + if (*np == NULL) { + n->m_len -= sizeof(struct ipcomp); + n->m_data += sizeof(struct ipcomp); + } + + zs.next_out = mtod(n, u_int8_t *); + zs.avail_out = n->m_len; + } + + if (zerror == Z_OK) { + /* + * to terminate deflate/inflate process, we need to + * call {in,de}flate() with different flushing methods. + * + * deflate() needs at least one Z_PARTIAL_FLUSH, + * then use Z_FINISH until we get to the end. + * (if we use Z_FLUSH without Z_PARTIAL_FLUSH, deflate() + * will assume contiguous single output buffer, and that + * is not what we want) + * inflate() does not care about flushing method, but + * needs output buffer until it gets to the end. + * + * the most outer loop will be terminated with + * Z_STREAM_END. + */ + if (final == 1) { + /* reached end of mbuf chain */ + if (mode == 0) + final = 2; + else + final = 3; + } else if (final == 2) { + /* terminate deflate case */ + flush = Z_FINISH; + } else if (final == 3) { + /* terminate inflate case */ + ; + } + } else if (zerror == Z_STREAM_END) + break; + else { + ipseclog((LOG_ERR, "ipcomp_%scompress: %sflate: %s\n", + mode ? "de" : "", mode ? "in" : "de", + zs.msg ? zs.msg : "unknown error")); + error = EINVAL; + goto fail; + } + } + zerror = mode ? inflateEnd(&zs) : deflateEnd(&zs); + if (zerror != Z_OK) { + ipseclog((LOG_ERR, "ipcomp_%scompress: %sflate: %s\n", + mode ? "de" : "", mode ? "in" : "de", + zs.msg ? zs.msg : "unknown error")); + error = EINVAL; + goto fail; + } + /* keep the final reply buffer into our chain */ + if (n) { + n->m_len = zs.total_out - offset; + offset = zs.total_out; + *np = n; + np = &n->m_next; + } + + /* switch the mbuf to the new one */ + mprev->m_next = n0; + m_freem(md); + *lenp = zs.total_out; + + return 0; + +fail: + if (m) + m_freem(m); + if (n0) + m_freem(n0); + return error; +} + +static int +deflate_compress(m, md, lenp) + struct mbuf *m; + struct mbuf *md; + size_t *lenp; +{ + if (!m) + panic("m == NULL in deflate_compress"); + if (!md) + panic("md == NULL in deflate_compress"); + if (!lenp) + panic("lenp == NULL in deflate_compress"); + + return deflate_common(m, md, lenp, 0); +} + +static int +deflate_decompress(m, md, lenp) + struct mbuf *m; + struct mbuf *md; + size_t *lenp; +{ + if (!m) + panic("m == NULL in deflate_decompress"); + if (!md) + panic("md == NULL in deflate_decompress"); + if (!lenp) + panic("lenp == NULL in deflate_decompress"); + + return deflate_common(m, md, lenp, 1); +} diff --git a/bsd/netinet6/ipcomp_input.c b/bsd/netinet6/ipcomp_input.c new file mode 100644 index 000000000..43039d9c7 --- /dev/null +++ b/bsd/netinet6/ipcomp_input.c @@ -0,0 +1,394 @@ +/* $KAME: ipcomp_input.c,v 1.11 2000/02/22 14:04:23 itojun Exp $ */ + +/* + * Copyright (C) 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC2393 IP payload compression protocol (IPComp). + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if INET6 +#include +#include +#endif +#include + +#include +#include +#include +#include + +#include + +#define IPLEN_FLIPPED + + +#if INET +extern struct protosw * ip_protox[]; +#if defined(__bsdi__) || defined(__NetBSD__) +extern u_char ip_protox[]; +#endif + +void +ipcomp4_input(struct mbuf *m, int off) +{ + struct ip *ip; + struct ipcomp *ipcomp; + struct ipcomp_algorithm *algo; + u_int16_t cpi; /* host order */ + u_int16_t nxt; + size_t hlen; + int error; + size_t newlen, olen; + struct secasvar *sav = NULL; + + + if (off + sizeof(struct ipcomp) > MHLEN) { + /*XXX the restriction should be relaxed*/ + ipseclog((LOG_DEBUG, "IPv4 IPComp input: assumption failed " + "(header too long)\n")); + ipsecstat.in_inval++; + goto fail; + } + if (m->m_len < off + sizeof(struct ipcomp)) { + m = m_pullup(m, off + sizeof(struct ipcomp)); + if (!m) { + ipseclog((LOG_DEBUG, "IPv4 IPComp input: can't pullup;" + "dropping the packet for simplicity\n")); + ipsecstat.in_nomem++; + goto fail; + } + } else if (m->m_len > off + sizeof(struct ipcomp)) { + /* chop header part from the packet header chain */ + struct mbuf *n; + MGETHDR(n, M_DONTWAIT, MT_HEADER); + if (!n) { + ipsecstat.in_nomem++; + goto fail; + } + M_COPY_PKTHDR(n, m); + MH_ALIGN(n, off + sizeof(struct ipcomp)); + n->m_len = off + sizeof(struct ipcomp); + bcopy(mtod(m, caddr_t), mtod(n, caddr_t), + off + sizeof(struct ipcomp)); + m_adj(m, off + sizeof(struct ipcomp)); + m->m_flags &= ~M_PKTHDR; + n->m_next = m; + m = n; + } + + ip = mtod(m, struct ip *); + ipcomp = (struct ipcomp *)(((caddr_t)ip) + off); + nxt = ipcomp->comp_nxt; +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + + cpi = ntohs(ipcomp->comp_cpi); + + if (cpi >= IPCOMP_CPI_NEGOTIATE_MIN) { + sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, + (caddr_t)&ip->ip_dst, IPPROTO_IPCOMP, htonl(cpi)); + if (sav != NULL + && (sav->state == SADB_SASTATE_MATURE + || sav->state == SADB_SASTATE_DYING)) { + cpi = sav->alg_enc; /*XXX*/ + /* other parameters to look at? */ + } + } + if (cpi < IPCOMP_MAX && ipcomp_algorithms[cpi].decompress != NULL) + algo = &ipcomp_algorithms[cpi]; + else + algo = NULL; + if (!algo) { + ipseclog((LOG_WARNING, "IPv4 IPComp input: unknown cpi %u\n", + cpi)); + ipsecstat.in_nosa++; + goto fail; + } + + /* chop ipcomp header */ + ipcomp = NULL; + m->m_len -= sizeof(struct ipcomp); + m->m_pkthdr.len -= sizeof(struct ipcomp); +#ifdef IPLEN_FLIPPED + ip->ip_len -= sizeof(struct ipcomp); +#else + ip->ip_len = htons(ntohs(ip->ip_len) - sizeof(struct ipcomp)); +#endif + + olen = m->m_pkthdr.len; + newlen = m->m_pkthdr.len - off; + error = (*algo->decompress)(m, m->m_next, &newlen); + if (error != 0) { + if (error == EINVAL) + ipsecstat.in_inval++; + else if (error == ENOBUFS) + ipsecstat.in_nomem++; + m = NULL; + goto fail; + } + ipsecstat.in_comphist[cpi]++; + + /* + * returning decompressed packet onto icmp is meaningless. + * mark it decrypted to prevent icmp from attaching original packet. + */ + m->m_flags |= M_DECRYPTED; + + m->m_pkthdr.len = off + newlen; + ip = mtod(m, struct ip *); + { + size_t len; +#ifdef IPLEN_FLIPPED + len = ip->ip_len; +#else + len = ntohs(ip->ip_len); +#endif + /* + * be careful about underflow. also, do not assign exact value + * as ip_len is manipulated differently on *BSDs. + */ + len += m->m_pkthdr.len; + len -= olen; + if (len & ~0xffff) { + /* packet too big after decompress */ + ipsecstat.in_inval++; + goto fail; + } +#ifdef IPLEN_FLIPPED + ip->ip_len = len & 0xffff; +#else + ip->ip_len = htons(len & 0xffff); +#endif + ip->ip_p = nxt; + } + + if (sav) { + key_sa_recordxfer(sav, m); + key_freesav(sav); + sav = NULL; + } + + if (nxt != IPPROTO_DONE) + (*ip_protox[nxt]->pr_input)(m, off); + else + m_freem(m); + m = NULL; + + ipsecstat.in_success++; + return; + +fail: + if (sav) + key_freesav(sav); + if (m) + m_freem(m); + return; +} +#endif /* INET */ + +#if INET6 +int +ipcomp6_input(mp, offp) + struct mbuf **mp; + int *offp; +{ + struct mbuf *m, *md; + int off; + struct ip6_hdr *ip6; + struct mbuf *ipcompm; + struct ipcomp *ipcomp; + struct ipcomp_algorithm *algo; + u_int16_t cpi; /* host order */ + u_int16_t nxt; + int error; + size_t newlen; + struct secasvar *sav = NULL; + + m = *mp; + off = *offp; + + IP6_EXTHDR_CHECK(m, off, sizeof(struct ipcomp), IPPROTO_DONE); + + { + int skip; + struct mbuf *n; + struct mbuf *p, *q; + size_t l; + + skip = off; + for (n = m; n && skip > 0; n = n->m_next) { + if (n->m_len <= skip) { + skip -= n->m_len; + continue; + } + break; + } + if (!n) { + ipseclog((LOG_DEBUG, "IPv6 IPComp input: wrong mbuf chain\n")); + ipsecstat.in_inval++; + goto fail; + } + if (n->m_len < skip + sizeof(struct ipcomp)) { + ipseclog((LOG_DEBUG, "IPv6 IPComp input: wrong mbuf chain\n")); + ipsecstat.in_inval++; + goto fail; + } + ip6 = mtod(m, struct ip6_hdr *); + ipcompm = n; + ipcomp = (struct ipcomp *)(mtod(n, caddr_t) + skip); + if (n->m_len > skip + sizeof(struct ipcomp)) { + /* split mbuf to ease the following steps*/ + l = n->m_len - (skip + sizeof(struct ipcomp)); + p = m_copym(n, skip + sizeof(struct ipcomp), l , M_DONTWAIT); + if (!p) { + ipsecstat.in_nomem++; + goto fail; + } + for (q = p; q && q->m_next; q = q->m_next) + ; + q->m_next = n->m_next; + n->m_next = p; + n->m_len -= l; + md = p; + } else + md = n->m_next; + } + + nxt = ipcomp->comp_nxt; + cpi = ntohs(ipcomp->comp_cpi); + + if (cpi >= IPCOMP_CPI_NEGOTIATE_MIN) { + sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, + (caddr_t)&ip6->ip6_dst, IPPROTO_IPCOMP, htonl(cpi)); + if (sav != NULL + && (sav->state == SADB_SASTATE_MATURE + || sav->state == SADB_SASTATE_DYING)) { + cpi = sav->alg_enc; /*XXX*/ + /* other parameters to look at? */ + } + } + if (cpi < IPCOMP_MAX && ipcomp_algorithms[cpi].decompress != NULL) + algo = &ipcomp_algorithms[cpi]; + else + algo = NULL; + if (!algo) { + ipseclog((LOG_WARNING, "IPv6 IPComp input: unknown cpi %u; " + "dropping the packet for simplicity\n", cpi)); + ipsec6stat.in_nosa++; + goto fail; + } + + newlen = m->m_pkthdr.len - off - sizeof(struct ipcomp); + error = (*algo->decompress)(m, md, &newlen); + if (error != 0) { + if (error == EINVAL) + ipsec6stat.in_inval++; + else if (error == ENOBUFS) + ipsec6stat.in_nomem++; + m = NULL; + goto fail; + } + ipsec6stat.in_comphist[cpi]++; + m->m_pkthdr.len = off + sizeof(struct ipcomp) + newlen; + + /* + * returning decompressed packet onto icmp is meaningless. + * mark it decrypted to prevent icmp from attaching original packet. + */ + m->m_flags |= M_DECRYPTED; + + { + char *prvnxtp; + + /* chop IPComp header */ + prvnxtp = ip6_get_prevhdr(m, off); + *prvnxtp = nxt; + ipcompm->m_len -= sizeof(struct ipcomp); + ipcompm->m_pkthdr.len -= sizeof(struct ipcomp); + + /* adjust payload length */ + ip6 = mtod(m, struct ip6_hdr *); + if (((m->m_pkthdr.len - sizeof(struct ip6_hdr)) & ~0xffff) != 0) + ip6->ip6_plen = 0; /*now a jumbogram*/ + else + ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(struct ip6_hdr)); + } + + if (sav) { + key_sa_recordxfer(sav, m); + key_freesav(sav); + sav = NULL; + } + *offp = off; + *mp = m; + ipsec6stat.in_success++; + return nxt; + +fail: + if (m) + m_freem(m); + if (sav) + key_freesav(sav); + return IPPROTO_DONE; +} +#endif /* INET6 */ diff --git a/bsd/netinet6/ipcomp_output.c b/bsd/netinet6/ipcomp_output.c new file mode 100644 index 000000000..484c0b481 --- /dev/null +++ b/bsd/netinet6/ipcomp_output.c @@ -0,0 +1,425 @@ +/* $KAME: ipcomp_output.c,v 1.11 2000/02/22 14:04:23 itojun Exp $ */ + +/* + * Copyright (C) 1999 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RFC2393 IP payload compression protocol (IPComp). + */ + +#define _IP_VHL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if INET6 +#include +#include +#endif +#include + +#include +#include +#include +#include + +#include + +static int ipcomp_output __P((struct mbuf *, u_char *, struct mbuf *, + struct ipsecrequest *, int)); + +/* + * Modify the packet so that the payload is compressed. + * The mbuf (m) must start with IPv4 or IPv6 header. + * On failure, free the given mbuf and return NULL. + * + * on invocation: + * m nexthdrp md + * v v v + * IP ......... payload + * during the encryption: + * m nexthdrp mprev md + * v v v v + * IP ............... ipcomp payload + * <-----><-----> + * complen plen + * <-> hlen + * <-----------------> compoff + */ +static int +ipcomp_output(m, nexthdrp, md, isr, af) + struct mbuf *m; + u_char *nexthdrp; + struct mbuf *md; + struct ipsecrequest *isr; + int af; +{ + struct mbuf *n; + struct mbuf *md0; + struct mbuf *mprev; + struct ipcomp *ipcomp; + struct secasvar *sav = isr->sav; + struct ipcomp_algorithm *algo; + u_int16_t cpi; /* host order */ + size_t plen0, plen; /*payload length to be compressed*/ + size_t compoff; + int afnumber; + int error = 0; + + switch (af) { +#if INET + case AF_INET: + afnumber = 4; + break; +#endif +#if INET6 + case AF_INET6: + afnumber = 6; + break; +#endif + default: + ipseclog((LOG_ERR, "ipcomp_output: unsupported af %d\n", af)); + return 0; /* no change at all */ + } + + /* grab parameters */ + if ((ntohl(sav->spi) & ~0xffff) != 0 || sav->alg_enc >= IPCOMP_MAX + || ipcomp_algorithms[sav->alg_enc].compress == NULL) { + ipsecstat.out_inval++; + m_freem(m); + return EINVAL; + } + if ((sav->flags & SADB_X_EXT_RAWCPI) == 0) + cpi = sav->alg_enc; + else + cpi = ntohl(sav->spi) & 0xffff; + algo = &ipcomp_algorithms[sav->alg_enc]; /*XXX*/ + + /* compute original payload length */ + plen = 0; + for (n = md; n; n = n->m_next) + plen += n->m_len; + + /* if the payload is short enough, we don't need to compress */ + if (plen < algo->minplen) + return 0; + + /* + * keep the original data packet, so that we can backout + * our changes when compression is not necessary. + */ + md0 = m_copym(md, 0, M_COPYALL, M_NOWAIT); + if (md0 == NULL) { + error = ENOBUFS; + return 0; + } + plen0 = plen; + + /* make the packet over-writable */ + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) + ; + if (mprev == NULL || mprev->m_next != md) { + ipseclog((LOG_DEBUG, "ipcomp%d_output: md is not in chain\n", + afnumber)); + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_inval++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_inval++; + break; +#endif + } + m_freem(m); + m_freem(md0); + return EINVAL; + } + mprev->m_next = NULL; + if ((md = ipsec_copypkt(md)) == NULL) { + m_freem(m); + m_freem(md0); + error = ENOBUFS; + goto fail; + } + mprev->m_next = md; + + /* compress data part */ + if ((*algo->compress)(m, md, &plen) || mprev->m_next == NULL) { + ipseclog((LOG_ERR, "packet compression failure\n")); + m = NULL; + m_freem(md0); + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_inval++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_inval++; + break; +#endif + } + error = EINVAL; + goto fail; + } + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_comphist[sav->alg_enc]++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_comphist[sav->alg_enc]++; + break; +#endif + } + md = mprev->m_next; + + /* + * if the packet became bigger, meaningless to use IPComp. + * we've only wasted our cpu time. + */ + if (plen0 < plen) { + m_freem(md); + mprev->m_next = md0; + return 0; + } + + /* no need to backout change beyond here */ + m_freem(md0); + md0 = NULL; + m->m_pkthdr.len -= plen0; + m->m_pkthdr.len += plen; + + { + /* + * insert IPComp header. + */ +#if INET + struct ip *ip = NULL; +#endif +#if INET6 + struct ip6_hdr *ip6 = NULL; +#endif + size_t hlen = 0; /*ip header len*/ + size_t complen = sizeof(struct ipcomp); + + switch (af) { +#if INET + case AF_INET: + ip = mtod(m, struct ip *); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + break; +#endif +#if INET6 + case AF_INET6: + ip6 = mtod(m, struct ip6_hdr *); + hlen = sizeof(*ip6); + break; +#endif + } + + compoff = m->m_pkthdr.len - plen; + + /* + * grow the mbuf to accomodate ipcomp header. + * before: IP ... payload + * after: IP ... ipcomp payload + */ + if (M_LEADINGSPACE(md) < complen) { + MGET(n, M_DONTWAIT, MT_DATA); + if (!n) { + m_freem(m); + error = ENOBUFS; + goto fail; + } + n->m_len = complen; + mprev->m_next = n; + n->m_next = md; + m->m_pkthdr.len += complen; + ipcomp = mtod(n, struct ipcomp *); + } else { + md->m_len += complen; + md->m_data -= complen; + m->m_pkthdr.len += complen; + ipcomp = mtod(md, struct ipcomp *); + } + + bzero(ipcomp, sizeof(*ipcomp)); + ipcomp->comp_nxt = *nexthdrp; + *nexthdrp = IPPROTO_IPCOMP; + ipcomp->comp_cpi = htons(cpi); + switch (af) { +#if INET + case AF_INET: + if (compoff + complen + plen < IP_MAXPACKET) + ip->ip_len = htons(compoff + complen + plen); + else { + ipseclog((LOG_ERR, + "IPv4 ESP output: size exceeds limit\n")); + ipsecstat.out_inval++; + m_freem(m); + error = EMSGSIZE; + goto fail; + } + break; +#endif +#if INET6 + case AF_INET6: + /* total packet length will be computed in ip6_output() */ + break; +#endif + } + } + + if (!m) { + ipseclog((LOG_DEBUG, + "NULL mbuf after compression in ipcomp%d_output", + afnumber)); + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_inval++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_inval++; + break; +#endif + } + } else { + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_success++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_success++; + break; +#endif + } + } +#if 0 + switch (af) { +#if INET + case AF_INET: + ipsecstat.out_esphist[sav->alg_enc]++; + break; +#endif +#if INET6 + case AF_INET6: + ipsec6stat.out_esphist[sav->alg_enc]++; + break; +#endif + } +#endif + key_sa_recordxfer(sav, m); + return 0; + +fail: +#if 1 + return error; +#else + panic("something bad in ipcomp_output"); +#endif +} + +#if INET +int +ipcomp4_output(m, isr) + struct mbuf *m; + struct ipsecrequest *isr; +{ + struct ip *ip; + if (m->m_len < sizeof(struct ip)) { + ipseclog((LOG_DEBUG, "ipcomp4_output: first mbuf too short\n")); + ipsecstat.out_inval++; + m_freem(m); + return NULL; + } + ip = mtod(m, struct ip *); + /* XXX assumes that m->m_next points to payload */ + return ipcomp_output(m, &ip->ip_p, m->m_next, isr, AF_INET); +} +#endif /*INET*/ + +#if INET6 +int +ipcomp6_output(m, nexthdrp, md, isr) + struct mbuf *m; + u_char *nexthdrp; + struct mbuf *md; + struct ipsecrequest *isr; +{ + if (m->m_len < sizeof(struct ip6_hdr)) { + ipseclog((LOG_DEBUG, "ipcomp6_output: first mbuf too short\n")); + ipsec6stat.out_inval++; + m_freem(m); + return NULL; + } + return ipcomp_output(m, nexthdrp, md, isr, AF_INET6); +} +#endif /*INET6*/ diff --git a/bsd/netinet6/ipsec.c b/bsd/netinet6/ipsec.c new file mode 100644 index 000000000..06f3ea54d --- /dev/null +++ b/bsd/netinet6/ipsec.c @@ -0,0 +1,3668 @@ +/* $KAME: ipsec.c,v 1.56 2000/04/04 08:47:34 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * IPsec controller part. + */ +#define _IP_VHL + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#if __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef __NetBSD__ +#include +#endif +#if defined(__NetBSD__) || defined(__FreeBSD__) || defined (__APPLE__) +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#if INET6 +#include +#include +#endif +#include +#if INET6 +#if !((defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__OpenBSD__) || (defined(__bsdi__) && _BSDI_VERSION >= 199802)) || defined (__APPLE__) +#include +#endif +#include +#endif + +#include +#include +#if IPSEC_ESP +#include +#endif +#include +#include +#include +#include + +#include + +#ifdef HAVE_NRL_INPCB +#define in6pcb inpcb +#define in6p_sp inp_sp +#define in6p_fport inp_fport +#define in6p_lport inp_lport +#define in6p_socket inp_socket +#define sotoin6pcb(so) ((struct inpcb *)(so)->so_pcb) +#endif + +#ifdef __NetBSD__ +#define ovbcopy bcopy +#endif + +#ifdef IPSEC_DEBUG +int ipsec_debug = 1; +#else +int ipsec_debug = 0; +#endif + +struct ipsecstat ipsecstat; +int ip4_inbound_call_ike = 0; +int ip4_ah_cleartos = 1; +int ip4_ah_offsetmask = 0; /* maybe IP_DF? */ +int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */ +int ip4_esp_trans_deflev = IPSEC_LEVEL_USE; +int ip4_esp_net_deflev = IPSEC_LEVEL_USE; +int ip4_ah_trans_deflev = IPSEC_LEVEL_USE; +int ip4_ah_net_deflev = IPSEC_LEVEL_USE; +struct secpolicy ip4_def_policy; +int ip4_ipsec_ecn = 0; /* ECN ignore(-1)/forbidden(0)/allowed(1) */ + +#if defined(__FreeBSD__) || defined(__APPLE__) +SYSCTL_DECL(_net_inet_ipsec); +/* net.inet.ipsec */ +SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS, + stats, CTLFLAG_RD, &ipsecstat, ipsecstat, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_POLICY, + def_policy, CTLFLAG_RW, &ip4_def_policy.policy, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, + CTLFLAG_RW, &ip4_esp_trans_deflev, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, + CTLFLAG_RW, &ip4_esp_net_deflev, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, + CTLFLAG_RW, &ip4_ah_trans_deflev, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, + CTLFLAG_RW, &ip4_ah_net_deflev, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_INBOUND_CALL_IKE, + inbound_call_ike, CTLFLAG_RW, &ip4_inbound_call_ike, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS, + ah_cleartos, CTLFLAG_RW, &ip4_ah_cleartos, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK, + ah_offsetmask, CTLFLAG_RW, &ip4_ah_offsetmask, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT, + dfbit, CTLFLAG_RW, &ip4_ipsec_dfbit, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN, + ecn, CTLFLAG_RW, &ip4_ipsec_ecn, 0, ""); +SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG, + debug, CTLFLAG_RW, &ipsec_debug, 0, ""); +#endif /* __FreeBSD__ */ + +#if INET6 +struct ipsecstat ipsec6stat; +int ip6_inbound_call_ike = 0; +int ip6_esp_trans_deflev = IPSEC_LEVEL_USE; +int ip6_esp_net_deflev = IPSEC_LEVEL_USE; +int ip6_ah_trans_deflev = IPSEC_LEVEL_USE; +int ip6_ah_net_deflev = IPSEC_LEVEL_USE; +struct secpolicy ip6_def_policy; +int ip6_ipsec_ecn = 0; /* ECN ignore(-1)/forbidden(0)/allowed(1) */ + +#if defined(__FreeBSD__) || defined(__APPLE__) +SYSCTL_DECL(_net_inet6_ipsec6); +/* net.inet6.ipsec6 */ +SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS, + stats, CTLFLAG_RD, &ipsec6stat, ipsecstat, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY, + def_policy, CTLFLAG_RW, &ip6_def_policy.policy, 0, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, + CTLFLAG_RW, &ip6_esp_trans_deflev, 0, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, + CTLFLAG_RW, &ip6_esp_net_deflev, 0, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, + CTLFLAG_RW, &ip6_ah_trans_deflev, 0, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, + CTLFLAG_RW, &ip6_ah_net_deflev, 0, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_INBOUND_CALL_IKE, + inbound_call_ike, CTLFLAG_RW, &ip6_inbound_call_ike, 0, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN, + ecn, CTLFLAG_RW, &ip6_ipsec_ecn, 0, ""); +SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG, + debug, CTLFLAG_RW, &ipsec_debug, 0, ""); +#endif /*__FreeBSD__*/ +#endif /* INET6 */ + +static int ipsec_setspidx_mbuf + __P((struct secpolicyindex *, u_int, u_int, struct mbuf *)); +static void ipsec4_setspidx_inpcb __P((struct mbuf *, struct inpcb *pcb)); +static void ipsec4_setspidx_ipaddr __P((struct mbuf *, struct secpolicyindex *)); +#if INET6 +static void ipsec6_get_ulp __P((struct mbuf *m, struct secpolicyindex *)); +static void ipsec6_setspidx_in6pcb __P((struct mbuf *, struct in6pcb *pcb)); +static void ipsec6_setspidx_ipaddr __P((struct mbuf *, struct secpolicyindex *)); +#endif +static struct inpcbpolicy *ipsec_newpcbpolicy __P((void)); +static void ipsec_delpcbpolicy __P((struct inpcbpolicy *)); +static struct secpolicy *ipsec_deepcopy_policy __P((struct secpolicy *src)); +static int ipsec_set_policy __P((struct secpolicy **pcb_sp, + int optname, caddr_t request, size_t len, int priv)); +static int ipsec_get_policy __P((struct secpolicy *pcb_sp, struct mbuf **mp)); +static void vshiftl __P((unsigned char *, int, int)); +static int ipsec_in_reject __P((struct secpolicy *, struct mbuf *)); +static size_t ipsec_hdrsiz __P((struct secpolicy *)); +static struct mbuf *ipsec4_splithdr __P((struct mbuf *)); +#if INET6 +static struct mbuf *ipsec6_splithdr __P((struct mbuf *)); +#endif +static int ipsec4_encapsulate __P((struct mbuf *, struct secasvar *)); +#if INET6 +static int ipsec6_encapsulate __P((struct mbuf *, struct secasvar *)); +#endif + +/* + * For OUTBOUND packet having a socket. Searching SPD for packet, + * and return a pointer to SP. + * OUT: NULL: no apropreate SP found, the following value is set to error. + * 0 : bypass + * EACCES : discard packet. + * ENOENT : ipsec_acquire() in progress, maybe. + * others : error occured. + * others: a pointer to SP + * + * NOTE: IPv6 mapped adddress concern is implemented here. + */ +struct secpolicy * +ipsec4_getpolicybysock(m, dir, so, error) + struct mbuf *m; + u_int dir; + struct socket *so; + int *error; +{ + struct inpcbpolicy *pcbsp = NULL; + struct secpolicy *currsp = NULL; /* policy on socket */ + struct secpolicy *kernsp = NULL; /* policy on kernel */ + + /* sanity check */ + if (m == NULL || so == NULL || error == NULL) + panic("ipsec4_getpolicybysock: NULL pointer was passed.\n"); + + switch (so->so_proto->pr_domain->dom_family) { + case AF_INET: + /* set spidx in pcb */ + ipsec4_setspidx_inpcb(m, sotoinpcb(so)); + pcbsp = sotoinpcb(so)->inp_sp; + break; +#if INET6 + case AF_INET6: + /* set spidx in pcb */ + ipsec6_setspidx_in6pcb(m, sotoin6pcb(so)); + pcbsp = sotoin6pcb(so)->in6p_sp; + break; +#endif + default: + panic("ipsec4_getpolicybysock: unsupported address family\n"); + } + + /* sanity check */ + if (pcbsp == NULL) + panic("ipsec4_getpolicybysock: pcbsp is NULL.\n"); + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined(__APPLE__) + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("send: priv=%d ", pcbsp->priv); + if (so->so_cred) { + printf("p_ruid=%d ", so->so_cred->p_ruid); + printf("p_svuid=%d ", so->so_cred->p_svuid); + printf("cr_uid=%d\n", so->so_cred->pc_ucred->cr_uid); + }); +#endif + switch (dir) { + case IPSEC_DIR_INBOUND: + currsp = pcbsp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + currsp = pcbsp->sp_out; + break; + default: + panic("ipsec4_getpolicybysock: illegal direction.\n"); + } + + /* sanity check */ + if (currsp == NULL) + panic("ipsec4_getpolicybysock: currsp is NULL.\n"); + + /* when privilieged socket */ + if (pcbsp->priv) { + switch (currsp->policy) { + case IPSEC_POLICY_BYPASS: + currsp->refcnt++; + *error = 0; + return currsp; + + case IPSEC_POLICY_ENTRUST: + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec4_getpolicybysock called " + "to allocate SP:%p\n", kernsp)); + *error = 0; + return kernsp; + } + + /* no SP found */ + if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD + && ip4_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, + "fixed system default policy: %d->%d\n", + ip4_def_policy.policy, IPSEC_POLICY_NONE)); + ip4_def_policy.policy = IPSEC_POLICY_NONE; + } + ip4_def_policy.refcnt++; + *error = 0; + return &ip4_def_policy; + + case IPSEC_POLICY_IPSEC: + currsp->refcnt++; + *error = 0; + return currsp; + + default: + ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " + "Invalid policy for PCB %d\n", currsp->policy)); + *error = EINVAL; + return NULL; + } + /* NOTREACHED */ + } + + /* when non-privilieged socket */ + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec4_getpolicybysock called " + "to allocate SP:%p\n", kernsp)); + *error = 0; + return kernsp; + } + + /* no SP found */ + switch (currsp->policy) { + case IPSEC_POLICY_BYPASS: + ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " + "Illegal policy for non-priviliged defined %d\n", + currsp->policy)); + *error = EINVAL; + return NULL; + + case IPSEC_POLICY_ENTRUST: + if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD + && ip4_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, + "fixed system default policy: %d->%d\n", + ip4_def_policy.policy, IPSEC_POLICY_NONE)); + ip4_def_policy.policy = IPSEC_POLICY_NONE; + } + ip4_def_policy.refcnt++; + *error = 0; + return &ip4_def_policy; + + case IPSEC_POLICY_IPSEC: + currsp->refcnt++; + *error = 0; + return currsp; + + default: + ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " + "Invalid policy for PCB %d\n", currsp->policy)); + *error = EINVAL; + return NULL; + } + /* NOTREACHED */ +} + +/* + * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet, + * and return a pointer to SP. + * OUT: positive: a pointer to the entry for security policy leaf matched. + * NULL: no apropreate SP found, the following value is set to error. + * 0 : bypass + * EACCES : discard packet. + * ENOENT : ipsec_acquire() in progress, maybe. + * others : error occured. + */ +struct secpolicy * +ipsec4_getpolicybyaddr(m, dir, flag, error) + struct mbuf *m; + u_int dir; + int flag; + int *error; +{ + struct secpolicy *sp = NULL; + + /* sanity check */ + if (m == NULL || error == NULL) + panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n"); + + { + struct secpolicyindex spidx; + + bzero(&spidx, sizeof(spidx)); + + /* make a index to look for a policy */ + *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m); + + if (*error != 0) + return NULL; + + sp = key_allocsp(&spidx, dir); + } + + /* SP found */ + if (sp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec4_getpolicybyaddr called " + "to allocate SP:%p\n", sp)); + *error = 0; + return sp; + } + + /* no SP found */ + if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD + && ip4_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n", + ip4_def_policy.policy, + IPSEC_POLICY_NONE)); + ip4_def_policy.policy = IPSEC_POLICY_NONE; + } + ip4_def_policy.refcnt++; + *error = 0; + return &ip4_def_policy; +} + +#if INET6 +/* + * For OUTBOUND packet having a socket. Searching SPD for packet, + * and return a pointer to SP. + * OUT: NULL: no apropreate SP found, the following value is set to error. + * 0 : bypass + * EACCES : discard packet. + * ENOENT : ipsec_acquire() in progress, maybe. + * others : error occured. + * others: a pointer to SP + */ +struct secpolicy * +ipsec6_getpolicybysock(m, dir, so, error) + struct mbuf *m; + u_int dir; + struct socket *so; + int *error; +{ + struct inpcbpolicy *pcbsp = NULL; + struct secpolicy *currsp = NULL; /* policy on socket */ + struct secpolicy *kernsp = NULL; /* policy on kernel */ + + /* sanity check */ + if (m == NULL || so == NULL || error == NULL) + panic("ipsec6_getpolicybysock: NULL pointer was passed.\n"); + + /* set spidx in pcb */ + ipsec6_setspidx_in6pcb(m, sotoin6pcb(so)); + + pcbsp = sotoin6pcb(so)->in6p_sp; + + /* sanity check */ + if (pcbsp == NULL) + panic("ipsec6_getpolicybysock: pcbsp is NULL.\n"); + + switch (dir) { + case IPSEC_DIR_INBOUND: + currsp = pcbsp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + currsp = pcbsp->sp_out; + break; + default: + panic("ipsec6_getpolicybysock: illegal direction.\n"); + } + + /* sanity check */ + if (currsp == NULL) + panic("ipsec6_getpolicybysock: currsp is NULL.\n"); + + /* when privilieged socket */ + if (pcbsp->priv) { + switch (currsp->policy) { + case IPSEC_POLICY_BYPASS: + currsp->refcnt++; + *error = 0; + return currsp; + + case IPSEC_POLICY_ENTRUST: + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec6_getpolicybysock called " + "to allocate SP:%p\n", kernsp)); + *error = 0; + return kernsp; + } + + /* no SP found */ + if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD + && ip6_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, + "fixed system default policy: %d->%d\n", + ip6_def_policy.policy, IPSEC_POLICY_NONE)); + ip6_def_policy.policy = IPSEC_POLICY_NONE; + } + ip6_def_policy.refcnt++; + *error = 0; + return &ip6_def_policy; + + case IPSEC_POLICY_IPSEC: + currsp->refcnt++; + *error = 0; + return currsp; + + default: + ipseclog((LOG_ERR, "ipsec6_getpolicybysock: " + "Invalid policy for PCB %d\n", currsp->policy)); + *error = EINVAL; + return NULL; + } + /* NOTREACHED */ + } + + /* when non-privilieged socket */ + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec6_getpolicybysock called " + "to allocate SP:%p\n", kernsp)); + *error = 0; + return kernsp; + } + + /* no SP found */ + switch (currsp->policy) { + case IPSEC_POLICY_BYPASS: + ipseclog((LOG_ERR, "ipsec6_getpolicybysock: " + "Illegal policy for non-priviliged defined %d\n", + currsp->policy)); + *error = EINVAL; + return NULL; + + case IPSEC_POLICY_ENTRUST: + if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD + && ip6_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, + "fixed system default policy: %d->%d\n", + ip6_def_policy.policy, IPSEC_POLICY_NONE)); + ip6_def_policy.policy = IPSEC_POLICY_NONE; + } + ip6_def_policy.refcnt++; + *error = 0; + return &ip6_def_policy; + + case IPSEC_POLICY_IPSEC: + currsp->refcnt++; + *error = 0; + return currsp; + + default: + ipseclog((LOG_ERR, + "ipsec6_policybysock: Invalid policy for PCB %d\n", + currsp->policy)); + *error = EINVAL; + return NULL; + } + /* NOTREACHED */ +} + +/* + * For FORWADING packet or OUTBOUND without a socket. Searching SPD for packet, + * and return a pointer to SP. + * `flag' means that packet is to be forwarded whether or not. + * flag = 1: forwad + * OUT: positive: a pointer to the entry for security policy leaf matched. + * NULL: no apropreate SP found, the following value is set to error. + * 0 : bypass + * EACCES : discard packet. + * ENOENT : ipsec_acquire() in progress, maybe. + * others : error occured. + */ +#ifndef IP_FORWARDING +#define IP_FORWARDING 1 +#endif + +struct secpolicy * +ipsec6_getpolicybyaddr(m, dir, flag, error) + struct mbuf *m; + u_int dir; + int flag; + int *error; +{ + struct secpolicy *sp = NULL; + + /* sanity check */ + if (m == NULL || error == NULL) + panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n"); + + { + struct secpolicyindex spidx; + + bzero(&spidx, sizeof(spidx)); + + /* make a index to look for a policy */ + *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m); + + if (*error != 0) + return NULL; + + sp = key_allocsp(&spidx, dir); + } + + /* SP found */ + if (sp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec6_getpolicybyaddr called " + "to allocate SP:%p\n", sp)); + *error = 0; + return sp; + } + + /* no SP found */ + if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD + && ip6_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n", + ip6_def_policy.policy, IPSEC_POLICY_NONE)); + ip6_def_policy.policy = IPSEC_POLICY_NONE; + } + ip6_def_policy.refcnt++; + *error = 0; + return &ip6_def_policy; +} +#endif /* INET6 */ + +/* + * set IP address into spidx from mbuf. + * When Forwarding packet and ICMP echo reply, this function is used. + * + * IN: get the followings from mbuf. + * protocol family, src, dst, next protocol + * OUT: + * 0: success. + * other: failure, and set errno. + */ +int +ipsec_setspidx_mbuf(spidx, dir, family, m) + struct secpolicyindex *spidx; + u_int dir, family; + struct mbuf *m; +{ + struct sockaddr *sa1, *sa2; + + /* sanity check */ + if (spidx == NULL || m == NULL) + panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n"); + + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_setspidx_mbuf: begin\n"); kdebug_mbuf(m)); + + /* initialize */ + bzero(spidx, sizeof(*spidx)); + + spidx->dir = dir; + sa1 = (struct sockaddr *)&spidx->src; + sa2 = (struct sockaddr *)&spidx->dst; + sa1->sa_len = sa2->sa_len = _SALENBYAF(family); + sa1->sa_family = sa2->sa_family = family; + spidx->prefs = spidx->prefd = _INALENBYAF(family) << 3; + + { + /* sanity check for packet length. */ + struct mbuf *n; + int tlen; + + tlen = 0; + for (n = m; n; n = n->m_next) + tlen += n->m_len; + if (m->m_pkthdr.len != tlen) { + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_setspidx_mbuf: " + "total of m_len(%d) != pkthdr.len(%d), " + "ignored.\n", + tlen, m->m_pkthdr.len)); + goto bad; + } + } + + switch (family) { + case AF_INET: + { + struct ip *ip; + struct ip ipbuf; + + /* sanity check 1 for minimum ip header length */ + if (m->m_pkthdr.len < sizeof(struct ip)) { + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_setspidx_mbuf: " + "pkthdr.len(%d) < sizeof(struct ip), " + "ignored.\n", + m->m_pkthdr.len)); + goto bad; + } + + /* + * get IPv4 header packet. usually the mbuf is contiguous + * and we need no copies. + */ + if (m->m_len >= sizeof(*ip)) + ip = mtod(m, struct ip *); + else { + m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf); + ip = &ipbuf; + } + + /* some more checks on IPv4 header. */ + bcopy(&ip->ip_src, _INADDRBYSA(&spidx->src), + sizeof(ip->ip_src)); + bcopy(&ip->ip_dst, _INADDRBYSA(&spidx->dst), + sizeof(ip->ip_dst)); + + spidx->ul_proto = ip->ip_p; + _INPORTBYSA(&spidx->src) = IPSEC_PORT_ANY; + _INPORTBYSA(&spidx->dst) = IPSEC_PORT_ANY; + break; + } + +#if INET6 + case AF_INET6: + { + struct ip6_hdr *ip6; + struct ip6_hdr ip6buf; + + /* sanity check 1 for minimum ip header length */ + if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) { + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_setspidx_mbuf: " + "pkthdr.len(%d) < sizeof(struct ip6_hdr), " + "ignored.\n", + m->m_pkthdr.len)); + goto bad; + } + + /* + * get IPv6 header packet. usually the mbuf is contiguous + * and we need no copies. + */ + if (m->m_len >= sizeof(*ip6)) + ip6 = mtod(m, struct ip6_hdr *); + else { + m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf); + ip6 = &ip6buf; + } + + /* some more checks on IPv4 header. */ + if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_setspidx_mbuf: " + "wrong ip version on packet " + "(expected IPv6), ignored.\n")); + goto bad; + } + + bcopy(&ip6->ip6_src, _INADDRBYSA(&spidx->src), + sizeof(ip6->ip6_src)); + bcopy(&ip6->ip6_dst, _INADDRBYSA(&spidx->dst), + sizeof(ip6->ip6_dst)); + + ipsec6_get_ulp(m, spidx); + break; + } +#endif /* INET6 */ + default: + panic("ipsec_secsecidx: no supported family passed.\n"); + } + + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_setspidx_mbuf: end\n"); + kdebug_secpolicyindex(spidx)); + + return 0; + + bad: + /* XXX initialize */ + bzero(spidx, sizeof(*spidx)); + return EINVAL; +} + +#if INET6 +/* + * Get upper layer protocol number and port number if there. + * Assumed all extension headers are in single mbuf. + */ +#include +#include +static void +ipsec6_get_ulp(m, spidx) + struct mbuf *m; + struct secpolicyindex *spidx; +{ + int off, nxt; + + /* sanity check */ + if (m == NULL) + panic("ipsec6_get_ulp: NULL pointer was passed.\n"); + + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m)); + + /* set default */ + spidx->ul_proto = IPSEC_ULPROTO_ANY; + _INPORTBYSA(&spidx->src) = IPSEC_PORT_ANY; + _INPORTBYSA(&spidx->dst) = IPSEC_PORT_ANY; + + nxt = -1; + off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt); + if (off < 0 || m->m_pkthdr.len < off) + return; + + switch (nxt) { + case IPPROTO_TCP: + spidx->ul_proto = nxt; + if (off + sizeof(struct tcphdr) <= m->m_pkthdr.len) { + struct tcphdr th; + m_copydata(m, off, sizeof(th), (caddr_t)&th); + _INPORTBYSA(&spidx->src) = th.th_sport; + _INPORTBYSA(&spidx->dst) = th.th_dport; + } + break; + case IPPROTO_UDP: + spidx->ul_proto = nxt; + if (off + sizeof(struct udphdr) <= m->m_pkthdr.len) { + struct udphdr uh; + m_copydata(m, off, sizeof(uh), (caddr_t)&uh); + _INPORTBYSA(&spidx->src) = uh.uh_sport; + _INPORTBYSA(&spidx->dst) = uh.uh_dport; + } + break; + case IPPROTO_ICMPV6: + spidx->ul_proto = nxt; + break; + default: + break; + } +} +#endif + +static void +ipsec4_setspidx_inpcb(m, pcb) + struct mbuf *m; + struct inpcb *pcb; +{ + struct secpolicyindex *spidx; + struct sockaddr *sa1, *sa2; + + /* sanity check */ + if (pcb == NULL) + panic("ipsec4_setspidx_inpcb: no PCB found.\n"); + if (pcb->inp_sp == NULL) + panic("ipsec4_setspidx_inpcb: no inp_sp found.\n"); + if (pcb->inp_sp->sp_out ==NULL || pcb->inp_sp->sp_in == NULL) + panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n"); + + bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx)); + bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx)); + + spidx = &pcb->inp_sp->sp_in->spidx; + spidx->dir = IPSEC_DIR_INBOUND; + sa1 = (struct sockaddr *)&spidx->src; + sa2 = (struct sockaddr *)&spidx->dst; + sa1->sa_len = sa2->sa_len = _SALENBYAF(AF_INET); + sa1->sa_family = sa2->sa_family = AF_INET; + spidx->prefs = _INALENBYAF(AF_INET) << 3; + spidx->prefd = _INALENBYAF(AF_INET) << 3; + spidx->ul_proto = pcb->inp_socket->so_proto->pr_protocol; + _INPORTBYSA(&spidx->src) = pcb->inp_fport; + _INPORTBYSA(&spidx->dst) = pcb->inp_lport; + ipsec4_setspidx_ipaddr(m, spidx); + + spidx = &pcb->inp_sp->sp_out->spidx; + spidx->dir = IPSEC_DIR_OUTBOUND; + sa1 = (struct sockaddr *)&spidx->src; + sa2 = (struct sockaddr *)&spidx->dst; + sa1->sa_len = sa2->sa_len = _SALENBYAF(AF_INET); + sa1->sa_family = sa2->sa_family = AF_INET; + spidx->prefs = _INALENBYAF(AF_INET) << 3; + spidx->prefd = _INALENBYAF(AF_INET) << 3; + spidx->ul_proto = pcb->inp_socket->so_proto->pr_protocol; + _INPORTBYSA(&spidx->src) = pcb->inp_lport; + _INPORTBYSA(&spidx->dst) = pcb->inp_fport; + ipsec4_setspidx_ipaddr(m, spidx); + + return; +} + +static void +ipsec4_setspidx_ipaddr(m, spidx) + struct mbuf *m; + struct secpolicyindex *spidx; +{ + struct ip *ip = NULL; + struct ip ipbuf; + + /* sanity check 1 for minimum ip header length */ + if (m == NULL) + panic("ipsec4_setspidx_ipaddr: m == 0 passed.\n"); + + if (m->m_pkthdr.len < sizeof(struct ip)) { + printf("ipsec4_setspidx_ipaddr: " + "pkthdr.len(%d) < sizeof(struct ip), " + "ignored.\n", + m->m_pkthdr.len); + return; + } + + if (m && m->m_len >= sizeof(*ip)) + ip = mtod(m, struct ip *); + else { + m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf); + ip = &ipbuf; + } + + bcopy(&ip->ip_src, _INADDRBYSA(&spidx->src), sizeof(ip->ip_src)); + bcopy(&ip->ip_dst, _INADDRBYSA(&spidx->dst), sizeof(ip->ip_dst)); + + return; +} + +#if INET6 +static void +ipsec6_setspidx_in6pcb(m, pcb) + struct mbuf *m; + struct in6pcb *pcb; +{ + struct secpolicyindex *spidx; + struct sockaddr *sa1, *sa2; + + /* sanity check */ + if (pcb == NULL) + panic("ipsec6_setspidx_in6pcb: no PCB found.\n"); + if (pcb->in6p_sp == NULL) + panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n"); + if (pcb->in6p_sp->sp_out ==NULL || pcb->in6p_sp->sp_in == NULL) + panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n"); + + bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx)); + bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx)); + + spidx = &pcb->in6p_sp->sp_in->spidx; + spidx->dir = IPSEC_DIR_INBOUND; + sa1 = (struct sockaddr *)&spidx->src; + sa2 = (struct sockaddr *)&spidx->dst; + sa1->sa_len = sa2->sa_len = _SALENBYAF(AF_INET6); + sa1->sa_family = sa2->sa_family = AF_INET6; + spidx->prefs = _INALENBYAF(AF_INET6) << 3; + spidx->prefd = _INALENBYAF(AF_INET6) << 3; + spidx->ul_proto = pcb->in6p_socket->so_proto->pr_protocol; + _INPORTBYSA(&spidx->src) = pcb->in6p_fport; + _INPORTBYSA(&spidx->dst) = pcb->in6p_lport; + ipsec6_setspidx_ipaddr(m, spidx); + + spidx = &pcb->in6p_sp->sp_out->spidx; + spidx->dir = IPSEC_DIR_OUTBOUND; + sa1 = (struct sockaddr *)&spidx->src; + sa2 = (struct sockaddr *)&spidx->dst; + sa1->sa_len = sa2->sa_len = _SALENBYAF(AF_INET6); + sa1->sa_family = sa2->sa_family = AF_INET6; + spidx->prefs = _INALENBYAF(AF_INET6) << 3; + spidx->prefd = _INALENBYAF(AF_INET6) << 3; + spidx->ul_proto = pcb->in6p_socket->so_proto->pr_protocol; + _INPORTBYSA(&spidx->src) = pcb->in6p_lport; + _INPORTBYSA(&spidx->dst) = pcb->in6p_fport; + ipsec6_setspidx_ipaddr(m, spidx); + + return; +} + +static void +ipsec6_setspidx_ipaddr(m, spidx) + struct mbuf *m; + struct secpolicyindex *spidx; +{ + struct ip6_hdr *ip6 = NULL; + struct ip6_hdr ip6buf; + + /* sanity check 1 for minimum ip header length */ + if (m == NULL) + panic("ipsec6_setspidx_in6pcb: m == 0 passed.\n"); + + if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) { + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec6_setspidx_ipaddr: " + "pkthdr.len(%d) < sizeof(struct ip6_hdr), " + "ignored.\n", + m->m_pkthdr.len)); + return; + } + + if (m->m_len >= sizeof(*ip6)) + ip6 = mtod(m, struct ip6_hdr *); + else { + m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf); + ip6 = &ip6buf; + } + + if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_setspidx_mbuf: " + "wrong ip version on packet " + "(expected IPv6), ignored.\n")); + return; + } + + bcopy(&ip6->ip6_src, _INADDRBYSA(&spidx->src), sizeof(ip6->ip6_src)); + bcopy(&ip6->ip6_dst, _INADDRBYSA(&spidx->dst), sizeof(ip6->ip6_dst)); + + return; +} +#endif + +static struct inpcbpolicy * +ipsec_newpcbpolicy() +{ + struct inpcbpolicy *p; + + p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_NOWAIT); + return p; +} + +static void +ipsec_delpcbpolicy(p) + struct inpcbpolicy *p; +{ + _FREE(p, M_SECA); +} + +/* initialize policy in PCB */ +int +ipsec_init_policy(so, pcb_sp) + struct socket *so; + struct inpcbpolicy **pcb_sp; +{ + struct inpcbpolicy *new; + + /* sanity check. */ + if (so == NULL || pcb_sp == NULL) + panic("ipsec_init_policy: NULL pointer was passed.\n"); + + new = ipsec_newpcbpolicy(); + if (new == NULL) { + ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n")); + return ENOBUFS; + } + bzero(new, sizeof(*new)); + +#if defined(__NetBSD__) || defined (__APPLE__) + if (so->so_uid == 0) /*XXX*/ + new->priv = 1; + else + new->priv = 0; +#elif defined(__FreeBSD__) && __FreeBSD__ >= 3 + if (so->so_cred != 0 && so->so_cred->pc_ucred->cr_uid == 0) + new->priv = 1; + else + new->priv = 0; + + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("init: priv=%d ", new->priv); + if (so->so_cred) { + printf("p_ruid=%d ", so->so_cred->p_ruid); + printf("p_svuid=%d ", so->so_cred->p_svuid); + printf("cr_uid=%d\n", so->so_cred->pc_ucred->cr_uid); + } else + printf("so_cred is NULL\n"); + ); +#else + new->priv = so->so_state & SS_PRIV; +#endif + + if ((new->sp_in = key_newsp()) == NULL) { + ipsec_delpcbpolicy(new); + return ENOBUFS; + } + new->sp_in->state = IPSEC_SPSTATE_ALIVE; + new->sp_in->policy = IPSEC_POLICY_ENTRUST; + + if ((new->sp_out = key_newsp()) == NULL) { + key_freesp(new->sp_in); + ipsec_delpcbpolicy(new); + return ENOBUFS; + } + new->sp_out->state = IPSEC_SPSTATE_ALIVE; + new->sp_out->policy = IPSEC_POLICY_ENTRUST; + + *pcb_sp = new; + + return 0; +} + +/* copy old ipsec policy into new */ +int +ipsec_copy_policy(old, new) + struct inpcbpolicy *old, *new; +{ + struct secpolicy *sp; + + sp = ipsec_deepcopy_policy(old->sp_in); + if (sp) { + key_freesp(new->sp_in); + new->sp_in = sp; + } else + return ENOBUFS; + + sp = ipsec_deepcopy_policy(old->sp_out); + if (sp) { + key_freesp(new->sp_out); + new->sp_out = sp; + } else + return ENOBUFS; + + new->priv = old->priv; + + return 0; +} + +/* deep-copy a policy in PCB */ +static struct secpolicy * +ipsec_deepcopy_policy(src) + struct secpolicy *src; +{ + struct ipsecrequest *newchain = NULL; + struct ipsecrequest *p; + struct ipsecrequest **q; + struct ipsecrequest *r; + struct secpolicy *dst; + + dst = key_newsp(); + if (src == NULL || dst == NULL) + return NULL; + + /* + * deep-copy IPsec request chain. This is required since struct + * ipsecrequest is not reference counted. + */ + q = &newchain; + for (p = src->req; p; p = p->next) { + *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest), + M_SECA, M_NOWAIT); + if (*q == NULL) + goto fail; + bzero(*q, sizeof(**q)); + (*q)->next = NULL; + + (*q)->saidx.proto = p->saidx.proto; + (*q)->saidx.mode = p->saidx.mode; + (*q)->level = p->level; + (*q)->saidx.reqid = p->saidx.reqid; + + bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src)); + bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst)); + + (*q)->sav = NULL; + (*q)->sp = dst; + + q = &((*q)->next); + } + + dst->req = newchain; + dst->state = src->state; + dst->policy = src->policy; + /* do not touch the refcnt fields */ + + return dst; + +fail: + for (p = newchain; p; p = r) { + r = p->next; + _FREE(p, M_SECA); + p = NULL; + } + return NULL; +} + +/* set policy and ipsec request if present. */ +static int +ipsec_set_policy(pcb_sp, optname, request, len, priv) + struct secpolicy **pcb_sp; + int optname; + caddr_t request; + size_t len; + int priv; +{ + struct sadb_x_policy *xpl; + struct secpolicy *newsp = NULL; + int error; + + /* sanity check. */ + if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) + return EINVAL; + if (len < sizeof(*xpl)) + return EINVAL; + xpl = (struct sadb_x_policy *)request; + + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_set_policy: passed policy\n"); + kdebug_sadb_x_policy((struct sadb_ext *)xpl)); + + /* check policy type */ + /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */ + if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD + || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) + return EINVAL; + + /* check privileged socket */ + if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) + return EACCES; + + /* allocation new SP entry */ + if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) + return error; + + newsp->state = IPSEC_SPSTATE_ALIVE; + + /* clear old SP and set new SP */ + key_freesp(*pcb_sp); + *pcb_sp = newsp; + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_set_policy: new policy\n"); + kdebug_secpolicy(newsp)); + + return 0; +} + +static int +ipsec_get_policy(pcb_sp, mp) + struct secpolicy *pcb_sp; + struct mbuf **mp; +{ + + /* sanity check. */ + if (pcb_sp == NULL || mp == NULL) + return EINVAL; + + *mp = key_sp2msg(pcb_sp); + if (!*mp) { + ipseclog((LOG_DEBUG, "ipsec_get_policy: No more memory.\n")); + return ENOBUFS; + } + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + (*mp)->m_type = MT_DATA; +#else + (*mp)->m_type = MT_SOOPTS; +#endif + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_get_policy:\n"); + kdebug_mbuf(*mp)); + + return 0; +} + +int +ipsec4_set_policy(inp, optname, request, len, priv) + struct inpcb *inp; + int optname; + caddr_t request; + size_t len; + int priv; +{ + struct sadb_x_policy *xpl; + struct secpolicy **pcb_sp; + + /* sanity check. */ + if (inp == NULL || request == NULL) + return EINVAL; + if (len < sizeof(*xpl)) + return EINVAL; + xpl = (struct sadb_x_policy *)request; + + /* select direction */ + switch (xpl->sadb_x_policy_dir) { + case IPSEC_DIR_INBOUND: + pcb_sp = &inp->inp_sp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + pcb_sp = &inp->inp_sp->sp_out; + break; + default: + ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n", + xpl->sadb_x_policy_dir)); + return EINVAL; + } + + return ipsec_set_policy(pcb_sp, optname, request, len, priv); +} + +int +ipsec4_get_policy(inp, request, len, mp) + struct inpcb *inp; + caddr_t request; + size_t len; + struct mbuf **mp; +{ + struct sadb_x_policy *xpl; + struct secpolicy *pcb_sp; + + /* sanity check. */ + if (inp == NULL || request == NULL || mp == NULL) + return EINVAL; + if (inp->inp_sp == NULL) + panic("policy in PCB is NULL\n"); + if (len < sizeof(*xpl)) + return EINVAL; + xpl = (struct sadb_x_policy *)request; + + /* select direction */ + switch (xpl->sadb_x_policy_dir) { + case IPSEC_DIR_INBOUND: + pcb_sp = inp->inp_sp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + pcb_sp = inp->inp_sp->sp_out; + break; + default: + ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n", + xpl->sadb_x_policy_dir)); + return EINVAL; + } + + return ipsec_get_policy(pcb_sp, mp); +} + +/* delete policy in PCB */ +int +ipsec4_delete_pcbpolicy(inp) + struct inpcb *inp; +{ + /* sanity check. */ + if (inp == NULL) + panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n"); + + if (inp->inp_sp == NULL) + return 0; + + if (inp->inp_sp->sp_in != NULL) { + key_freesp(inp->inp_sp->sp_in); + inp->inp_sp->sp_in = NULL; + } + + if (inp->inp_sp->sp_out != NULL) { + key_freesp(inp->inp_sp->sp_out); + inp->inp_sp->sp_out = NULL; + } + + ipsec_delpcbpolicy(inp->inp_sp); + inp->inp_sp = NULL; + + return 0; +} + +#if INET6 +int +ipsec6_set_policy(in6p, optname, request, len, priv) + struct in6pcb *in6p; + int optname; + caddr_t request; + size_t len; + int priv; +{ + struct sadb_x_policy *xpl; + struct secpolicy **pcb_sp; + + /* sanity check. */ + if (in6p == NULL || request == NULL) + return EINVAL; + if (len < sizeof(*xpl)) + return EINVAL; + xpl = (struct sadb_x_policy *)request; + + /* select direction */ + switch (xpl->sadb_x_policy_dir) { + case IPSEC_DIR_INBOUND: + pcb_sp = &in6p->in6p_sp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + pcb_sp = &in6p->in6p_sp->sp_out; + break; + default: + ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n", + xpl->sadb_x_policy_dir)); + return EINVAL; + } + + return ipsec_set_policy(pcb_sp, optname, request, len, priv); +} + +int +ipsec6_get_policy(in6p, request, len, mp) + struct in6pcb *in6p; + caddr_t request; + size_t len; + struct mbuf **mp; +{ + struct sadb_x_policy *xpl; + struct secpolicy *pcb_sp; + + /* sanity check. */ + if (in6p == NULL || request == NULL || mp == NULL) + return EINVAL; + if (in6p->in6p_sp == NULL) + panic("policy in PCB is NULL\n"); + if (len < sizeof(*xpl)) + return EINVAL; + xpl = (struct sadb_x_policy *)request; + + /* select direction */ + switch (xpl->sadb_x_policy_dir) { + case IPSEC_DIR_INBOUND: + pcb_sp = in6p->in6p_sp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + pcb_sp = in6p->in6p_sp->sp_out; + break; + default: + ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n", + xpl->sadb_x_policy_dir)); + return EINVAL; + } + + return ipsec_get_policy(pcb_sp, mp); +} + +int +ipsec6_delete_pcbpolicy(in6p) + struct in6pcb *in6p; +{ + /* sanity check. */ + if (in6p == NULL) + panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n"); + + if (in6p->in6p_sp == NULL) + return 0; + + if (in6p->in6p_sp->sp_in != NULL) { + key_freesp(in6p->in6p_sp->sp_in); + in6p->in6p_sp->sp_in = NULL; + } + + if (in6p->in6p_sp->sp_out != NULL) { + key_freesp(in6p->in6p_sp->sp_out); + in6p->in6p_sp->sp_out = NULL; + } + + ipsec_delpcbpolicy(in6p->in6p_sp); + in6p->in6p_sp = NULL; + + return 0; +} +#endif + +/* + * return current level. + * Either IPSEC_LEVEL_USE or IPSEC_LEVEL_REQUIRE are always returned. + */ +u_int +ipsec_get_reqlevel(isr) + struct ipsecrequest *isr; +{ + u_int level = 0; + u_int esp_trans_deflev, esp_net_deflev, ah_trans_deflev, ah_net_deflev; + + /* sanity check */ + if (isr == NULL || isr->sp == NULL) + panic("ipsec_get_reqlevel: NULL pointer is passed.\n"); + if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family + != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) + panic("ipsec_get_reqlevel: family mismatched.\n"); + +/* XXX note that we have ipseclog() expanded here - code sync issue */ +#define IPSEC_CHECK_DEFAULT(lev) \ + (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \ + && (lev) != IPSEC_LEVEL_UNIQUE) \ + ? (ipsec_debug \ + ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\ + (lev), IPSEC_LEVEL_REQUIRE) \ + : 0), \ + (lev) = IPSEC_LEVEL_REQUIRE, \ + (lev) \ + : (lev)) + + /* set default level */ + switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) { +#if INET + case AF_INET: + esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_trans_deflev); + esp_net_deflev = IPSEC_CHECK_DEFAULT(ip4_esp_net_deflev); + ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_trans_deflev); + ah_net_deflev = IPSEC_CHECK_DEFAULT(ip4_ah_net_deflev); + break; +#endif +#if INET6 + case AF_INET6: + esp_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_trans_deflev); + esp_net_deflev = IPSEC_CHECK_DEFAULT(ip6_esp_net_deflev); + ah_trans_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_trans_deflev); + ah_net_deflev = IPSEC_CHECK_DEFAULT(ip6_ah_net_deflev); + break; +#endif /* INET6 */ + default: + panic("key_get_reqlevel: Unknown family. %d\n", + ((struct sockaddr *)&isr->sp->spidx.src)->sa_family); + } + +#undef IPSEC_CHECK_DEFAULT(lev) + + /* set level */ + switch (isr->level) { + case IPSEC_LEVEL_DEFAULT: + switch (isr->saidx.proto) { + case IPPROTO_ESP: + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) + level = esp_net_deflev; + else + level = esp_trans_deflev; + break; + case IPPROTO_AH: + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) + level = ah_net_deflev; + else + level = ah_trans_deflev; + case IPPROTO_IPCOMP: + /* + * we don't really care, as IPcomp document says that + * we shouldn't compress small packets + */ + level = IPSEC_LEVEL_USE; + break; + default: + panic("ipsec_get_reqlevel: " + "Illegal protocol defined %u\n", + isr->saidx.proto); + } + break; + + case IPSEC_LEVEL_USE: + case IPSEC_LEVEL_REQUIRE: + level = isr->level; + break; + case IPSEC_LEVEL_UNIQUE: + level = IPSEC_LEVEL_REQUIRE; + break; + + default: + panic("ipsec_get_reqlevel: Illegal IPsec level %u\n", + isr->level); + } + + return level; +} + +/* + * Check AH/ESP integrity. + * OUT: + * 0: valid + * 1: invalid + */ +static int +ipsec_in_reject(sp, m) + struct secpolicy *sp; + struct mbuf *m; +{ + struct ipsecrequest *isr; + u_int level; + int need_auth, need_conf, need_icv; + + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("ipsec_in_reject: using SP\n"); + kdebug_secpolicy(sp)); + + /* check policy */ + switch (sp->policy) { + case IPSEC_POLICY_DISCARD: + return 1; + case IPSEC_POLICY_BYPASS: + case IPSEC_POLICY_NONE: + return 0; + + case IPSEC_POLICY_IPSEC: + break; + + case IPSEC_POLICY_ENTRUST: + default: + panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy); + } + + need_auth = 0; + need_conf = 0; + need_icv = 0; + + for (isr = sp->req; isr != NULL; isr = isr->next) { + + /* get current level */ + level = ipsec_get_reqlevel(isr); + + switch (isr->saidx.proto) { + case IPPROTO_ESP: + if (level == IPSEC_LEVEL_REQUIRE) { + need_conf++; + + if (isr->sav != NULL + && isr->sav->flags == SADB_X_EXT_NONE + && isr->sav->alg_auth != SADB_AALG_NONE) + need_icv++; + } + break; + case IPPROTO_AH: + if (level == IPSEC_LEVEL_REQUIRE) { + need_auth++; + need_icv++; + } + break; + case IPPROTO_IPCOMP: + /* + * we don't really care, as IPcomp document says that + * we shouldn't compress small packets + */ + break; + } + } + + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, + printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n", + need_auth, need_conf, need_icv, m->m_flags)); + + if ((need_conf && !(m->m_flags & M_DECRYPTED)) + || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM)) + || (need_auth && !(m->m_flags & M_AUTHIPHDR))) + return 1; + + return 0; +} + +/* + * Check AH/ESP integrity. + * This function is called from tcp_input(), udp_input(), + * and {ah,esp}4_input for tunnel mode + */ +int +ipsec4_in_reject_so(m, so) + struct mbuf *m; + struct socket *so; +{ + struct secpolicy *sp = NULL; + int error; + int result; + + /* sanity check */ + if (m == NULL) + return 0; /* XXX should be panic ? */ + + /* get SP for this packet. + * When we are called from ip_forward(), we call + * ipsec4_getpolicybyaddr() with IP_FORWARDING flag. + */ + if (so == NULL) + sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error); + else + sp = ipsec4_getpolicybysock(m, IPSEC_DIR_INBOUND, so, &error); + + if (sp == NULL) + return 0; /* XXX should be panic ? + * -> No, there may be error. */ + + result = ipsec_in_reject(sp, m); + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec4_in_reject_so call free SP:%p\n", sp)); + key_freesp(sp); + + return result; +} + +int +ipsec4_in_reject(m, inp) + struct mbuf *m; + struct inpcb *inp; +{ + if (inp == NULL) + return ipsec4_in_reject_so(m, NULL); + else { + if (inp->inp_socket) + return ipsec4_in_reject_so(m, inp->inp_socket); + else + panic("ipsec4_in_reject: invalid inpcb/socket"); + } +} + +#if INET6 +/* + * Check AH/ESP integrity. + * This function is called from tcp6_input(), udp6_input(), + * and {ah,esp}6_input for tunnel mode + */ +int +ipsec6_in_reject_so(m, so) + struct mbuf *m; + struct socket *so; +{ + struct secpolicy *sp = NULL; + int error; + int result; + + /* sanity check */ + if (m == NULL) + return 0; /* XXX should be panic ? */ + + /* get SP for this packet. + * When we are called from ip_forward(), we call + * ipsec6_getpolicybyaddr() with IP_FORWARDING flag. + */ + if (so == NULL) + sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error); + else + sp = ipsec6_getpolicybysock(m, IPSEC_DIR_INBOUND, so, &error); + + if (sp == NULL) + return 0; /* XXX should be panic ? */ + + result = ipsec_in_reject(sp, m); + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec6_in_reject_so call free SP:%p\n", sp)); + key_freesp(sp); + + return result; +} + +int +ipsec6_in_reject(m, in6p) + struct mbuf *m; + struct in6pcb *in6p; +{ + if (in6p == NULL) + return ipsec6_in_reject_so(m, NULL); + else { + if (in6p->in6p_socket) + return ipsec6_in_reject_so(m, in6p->in6p_socket); + else + panic("ipsec6_in_reject: invalid in6p/socket"); + } +} +#endif + +/* + * compute the byte size to be occupied by IPsec header. + * in case it is tunneled, it includes the size of outer IP header. + * NOTE: SP passed is free in this function. + */ +static size_t +ipsec_hdrsiz(sp) + struct secpolicy *sp; +{ + struct ipsecrequest *isr; + size_t siz, clen; + + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("ipsec_in_reject: using SP\n"); + kdebug_secpolicy(sp)); + + /* check policy */ + switch (sp->policy) { + case IPSEC_POLICY_DISCARD: + case IPSEC_POLICY_BYPASS: + case IPSEC_POLICY_NONE: + return 0; + + case IPSEC_POLICY_IPSEC: + break; + + case IPSEC_POLICY_ENTRUST: + default: + panic("ipsec_hdrsiz: Invalid policy found. %d\n", sp->policy); + } + + siz = 0; + + for (isr = sp->req; isr != NULL; isr = isr->next) { + + clen = 0; + + switch (isr->saidx.proto) { + case IPPROTO_ESP: +#if IPSEC_ESP + clen = esp_hdrsiz(isr); +#else + clen = 0; /*XXX*/ +#endif + break; + case IPPROTO_AH: + clen = ah_hdrsiz(isr); + break; + case IPPROTO_IPCOMP: + clen = sizeof(struct ipcomp); + break; + } + + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { + switch (((struct sockaddr *)&isr->saidx.dst)->sa_family) { + case AF_INET: + clen += sizeof(struct ip); + break; +#if INET6 + case AF_INET6: + clen += sizeof(struct ip6_hdr); + break; +#endif + default: + ipseclog((LOG_ERR, "ipsec_hdrsiz: " + "unknown AF %d in IPsec tunnel SA\n", + ((struct sockaddr *)&isr->saidx.dst)->sa_family)); + break; + } + } + siz += clen; + } + + return siz; +} + +/* This function is called from ip_forward() and ipsec4_hdrsize_tcp(). */ +size_t +ipsec4_hdrsiz(m, dir, inp) + struct mbuf *m; + u_int dir; + struct inpcb *inp; +{ + struct secpolicy *sp = NULL; + int error; + size_t size; + + /* sanity check */ + if (m == NULL) + return 0; /* XXX should be panic ? */ + if (inp != NULL && inp->inp_socket == NULL) + panic("ipsec4_hdrsize: why is socket NULL but there is PCB."); + + /* get SP for this packet. + * When we are called from ip_forward(), we call + * ipsec4_getpolicybyaddr() with IP_FORWARDING flag. + */ + if (inp == NULL) + sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error); + else + sp = ipsec4_getpolicybysock(m, dir, inp->inp_socket, &error); + + if (sp == NULL) + return 0; /* XXX should be panic ? */ + + size = ipsec_hdrsiz(sp); + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec4_hdrsiz call free SP:%p\n", sp)); + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("ipsec4_hdrsiz: size:%lu.\n", (unsigned long)size)); + key_freesp(sp); + + return size; +} + +#if INET6 +/* This function is called from ipsec6_hdrsize_tcp(), + * and maybe from ip6_forward.() + */ +size_t +ipsec6_hdrsiz(m, dir, in6p) + struct mbuf *m; + u_int dir; + struct in6pcb *in6p; +{ + struct secpolicy *sp = NULL; + int error; + size_t size; + + /* sanity check */ + if (m == NULL) + return 0; /* XXX shoud be panic ? */ + if (in6p != NULL && in6p->in6p_socket == NULL) + panic("ipsec6_hdrsize: why is socket NULL but there is PCB."); + + /* get SP for this packet */ + /* XXX Is it right to call with IP_FORWARDING. */ + if (in6p == NULL) + sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error); + else + sp = ipsec6_getpolicybysock(m, dir, in6p->in6p_socket, &error); + + if (sp == NULL) + return 0; + size = ipsec_hdrsiz(sp); + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec6_hdrsiz call free SP:%p\n", sp)); + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("ipsec6_hdrsiz: size:%lu.\n", (unsigned long)size)); + key_freesp(sp); + + return size; +} +#endif /*INET6*/ + +#if INET +/* + * encapsulate for ipsec tunnel. + * ip->ip_src must be fixed later on. + */ +static int +ipsec4_encapsulate(m, sav) + struct mbuf *m; + struct secasvar *sav; +{ + struct ip *oip; + struct ip *ip; + size_t hlen; + size_t plen; + + /* can't tunnel between different AFs */ + if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family + != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family + || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) { + m_freem(m); + return EINVAL; + } +#if 0 + /* XXX if the dst is myself, perform nothing. */ + if (key_ismyaddr(AF_INET, _INADDRBYSA(&sav->sah->saidx.dst))) { + m_freem(m); + return EINVAL; + } +#endif + + if (m->m_len < sizeof(*ip)) + panic("ipsec4_encapsulate: assumption failed (first mbuf length)"); + + ip = mtod(m, struct ip *); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + + if (m->m_len != hlen) + panic("ipsec4_encapsulate: assumption failed (first mbuf length)"); + + /* generate header checksum */ + ip->ip_sum = 0; +#ifdef _IP_VHL + ip->ip_sum = in_cksum(m, hlen); +#else + ip->ip_sum = in_cksum(m, hlen); +#endif + + plen = m->m_pkthdr.len; + + /* + * grow the mbuf to accomodate the new IPv4 header. + * NOTE: IPv4 options will never be copied. + */ + if (M_LEADINGSPACE(m->m_next) < hlen) { + struct mbuf *n; + MGET(n, M_DONTWAIT, MT_DATA); + if (!n) { + m_freem(m); + return ENOBUFS; + } + n->m_len = hlen; + n->m_next = m->m_next; + m->m_next = n; + m->m_pkthdr.len += hlen; + oip = mtod(n, struct ip *); + } else { + m->m_next->m_len += hlen; + m->m_next->m_data -= hlen; + m->m_pkthdr.len += hlen; + oip = mtod(m->m_next, struct ip *); + } + ip = mtod(m, struct ip *); + ovbcopy((caddr_t)ip, (caddr_t)oip, hlen); + m->m_len = sizeof(struct ip); + m->m_pkthdr.len -= (hlen - sizeof(struct ip)); + + /* construct new IPv4 header. see RFC 2401 5.1.2.1 */ + /* ECN consideration. */ + ip_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &oip->ip_tos); +#ifdef _IP_VHL + ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2); +#else + ip->ip_hl = sizeof(struct ip) >> 2; +#endif + ip->ip_off &= htons(~IP_OFFMASK); + ip->ip_off &= htons(~IP_MF); + switch (ip4_ipsec_dfbit) { + case 0: /*clear DF bit*/ + ip->ip_off &= htons(~IP_DF); + break; + case 1: /*set DF bit*/ + ip->ip_off |= htons(IP_DF); + break; + default: /*copy DF bit*/ + break; + } + ip->ip_p = IPPROTO_IPIP; + if (plen + sizeof(struct ip) < IP_MAXPACKET) + ip->ip_len = htons(plen + sizeof(struct ip)); + else { + ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: " + "leave ip_len as is (invalid packet)\n")); + } + ip->ip_id = htons(ip_id++); + bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr, + &ip->ip_src, sizeof(ip->ip_src)); + bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr, + &ip->ip_dst, sizeof(ip->ip_dst)); + + /* XXX Should ip_src be updated later ? */ + + return 0; +} +#endif /*INET*/ + +#if INET6 +static int +ipsec6_encapsulate(m, sav) + struct mbuf *m; + struct secasvar *sav; +{ + struct ip6_hdr *oip6; + struct ip6_hdr *ip6; + size_t plen; + + /* can't tunnel between different AFs */ + if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family + != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family + || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) { + m_freem(m); + return EINVAL; + } +#if 0 + /* XXX if the dst is myself, perform nothing. */ + if (key_ismyaddr(AF_INET6, _INADDRBYSA(&sav->sah->saidx.dst))) { + m_freem(m); + return EINVAL; + } +#endif + + plen = m->m_pkthdr.len; + + /* + * grow the mbuf to accomodate the new IPv6 header. + */ + if (m->m_len != sizeof(struct ip6_hdr)) + panic("ipsec6_encapsulate: assumption failed (first mbuf length)"); + if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) { + struct mbuf *n; + MGET(n, M_DONTWAIT, MT_DATA); + if (!n) { + m_freem(m); + return ENOBUFS; + } + n->m_len = sizeof(struct ip6_hdr); + n->m_next = m->m_next; + m->m_next = n; + m->m_pkthdr.len += sizeof(struct ip6_hdr); + oip6 = mtod(n, struct ip6_hdr *); + } else { + m->m_next->m_len += sizeof(struct ip6_hdr); + m->m_next->m_data -= sizeof(struct ip6_hdr); + m->m_pkthdr.len += sizeof(struct ip6_hdr); + oip6 = mtod(m->m_next, struct ip6_hdr *); + } + ip6 = mtod(m, struct ip6_hdr *); + ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr)); + + /* Fake link-local scope-class addresses */ + if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) + oip6->ip6_src.s6_addr16[1] = 0; + if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) + oip6->ip6_dst.s6_addr16[1] = 0; + + /* construct new IPv6 header. see RFC 2401 5.1.2.2 */ + /* ECN consideration. */ + ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow); + if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) + ip6->ip6_plen = htons(plen); + else { + /* ip6->ip6_plen will be updated in ip6_output() */ + } + ip6->ip6_nxt = IPPROTO_IPV6; + bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr, + &ip6->ip6_src, sizeof(ip6->ip6_src)); + bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr, + &ip6->ip6_dst, sizeof(ip6->ip6_dst)); + + /* XXX Should ip6_src be updated later ? */ + + return 0; +} +#endif /*INET6*/ + +/* + * Check the variable replay window. + * ipsec_chkreplay() performs replay check before ICV verification. + * ipsec_updatereplay() updates replay bitmap. This must be called after + * ICV verification (it also performs replay check, which is usually done + * beforehand). + * 0 (zero) is returned if packet disallowed, 1 if packet permitted. + * + * based on RFC 2401. + */ +int +ipsec_chkreplay(seq, sav) + u_int32_t seq; + struct secasvar *sav; +{ + const struct secreplay *replay; + u_int32_t diff; + int fr; + u_int32_t wsizeb; /* constant: bits of window size */ + int frlast; /* constant: last frame */ + + /* sanity check */ + if (sav == NULL) + panic("ipsec_chkreplay: NULL pointer was passed.\n"); + + replay = sav->replay; + + if (replay->wsize == 0) + return 1; /* no need to check replay. */ + + /* constant */ + frlast = replay->wsize - 1; + wsizeb = replay->wsize << 3; + + /* sequence number of 0 is invalid */ + if (seq == 0) + return 0; + + /* first time is always okay */ + if (replay->count == 0) + return 1; + + if (seq > replay->lastseq) { + /* larger sequences are okay */ + return 1; + } else { + /* seq is equal or less than lastseq. */ + diff = replay->lastseq - seq; + + /* over range to check, i.e. too old or wrapped */ + if (diff >= wsizeb) + return 0; + + fr = frlast - diff / 8; + + /* this packet already seen ? */ + if ((replay->bitmap)[fr] & (1 << (diff % 8))) + return 0; + + /* out of order but good */ + return 1; + } +} + +/* + * check replay counter whether to update or not. + * OUT: 0: OK + * 1: NG + */ +int +ipsec_updatereplay(seq, sav) + u_int32_t seq; + struct secasvar *sav; +{ + struct secreplay *replay; + u_int32_t diff; + int fr; + u_int32_t wsizeb; /* constant: bits of window size */ + int frlast; /* constant: last frame */ + + /* sanity check */ + if (sav == NULL) + panic("ipsec_chkreplay: NULL pointer was passed.\n"); + + replay = sav->replay; + + if (replay->wsize == 0) + goto ok; /* no need to check replay. */ + + /* constant */ + frlast = replay->wsize - 1; + wsizeb = replay->wsize << 3; + + /* sequence number of 0 is invalid */ + if (seq == 0) + return 1; + + /* first time */ + if (replay->count == 0) { + replay->lastseq = seq; + bzero(replay->bitmap, replay->wsize); + (replay->bitmap)[frlast] = 1; + goto ok; + } + + if (seq > replay->lastseq) { + /* seq is larger than lastseq. */ + diff = seq - replay->lastseq; + + /* new larger sequence number */ + if (diff < wsizeb) { + /* In window */ + /* set bit for this packet */ + vshiftl(replay->bitmap, diff, replay->wsize); + (replay->bitmap)[frlast] |= 1; + } else { + /* this packet has a "way larger" */ + bzero(replay->bitmap, replay->wsize); + (replay->bitmap)[frlast] = 1; + } + replay->lastseq = seq; + + /* larger is good */ + } else { + /* seq is equal or less than lastseq. */ + diff = replay->lastseq - seq; + + /* over range to check, i.e. too old or wrapped */ + if (diff >= wsizeb) + return 1; + + fr = frlast - diff / 8; + + /* this packet already seen ? */ + if ((replay->bitmap)[fr] & (1 << (diff % 8))) + return 1; + + /* mark as seen */ + (replay->bitmap)[fr] |= (1 << (diff % 8)); + + /* out of order but good */ + } + +ok: + if (replay->count == ~0) { + + /* set overflow flag */ + replay->overflow++; + + /* don't increment, no more packets accepted */ + if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) + return 1; + + ipseclog((LOG_WARNING, "replay counter made %d cycle. %s\n", + replay->overflow, ipsec_logsastr(sav))); + } + + replay->count++; + + return 0; +} + +/* + * shift variable length bunffer to left. + * IN: bitmap: pointer to the buffer + * nbit: the number of to shift. + * wsize: buffer size (bytes). + */ +static void +vshiftl(bitmap, nbit, wsize) + unsigned char *bitmap; + int nbit, wsize; +{ + int s, j, i; + unsigned char over; + + for (j = 0; j < nbit; j += 8) { + s = (nbit - j < 8) ? (nbit - j): 8; + bitmap[0] <<= s; + for (i = 1; i < wsize; i++) { + over = (bitmap[i] >> (8 - s)); + bitmap[i] <<= s; + bitmap[i-1] |= over; + } + } + + return; +} + +const char * +ipsec4_logpacketstr(ip, spi) + struct ip *ip; + u_int32_t spi; +{ + static char buf[256]; + char *p; + u_int8_t *s, *d; + + s = (u_int8_t *)(&ip->ip_src); + d = (u_int8_t *)(&ip->ip_dst); + + p = buf; + snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi)); + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), "src=%d.%d.%d.%d", + s[0], s[1], s[2], s[3]); + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), " dst=%d.%d.%d.%d", + d[0], d[1], d[2], d[3]); + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), ")"); + + return buf; +} + +#if INET6 +const char * +ipsec6_logpacketstr(ip6, spi) + struct ip6_hdr *ip6; + u_int32_t spi; +{ + static char buf[256]; + char *p; + + p = buf; + snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi)); + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), "src=%s", + ip6_sprintf(&ip6->ip6_src)); + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), " dst=%s", + ip6_sprintf(&ip6->ip6_dst)); + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), ")"); + + return buf; +} +#endif /*INET6*/ + +const char * +ipsec_logsastr(sav) + struct secasvar *sav; +{ + static char buf[256]; + char *p; + struct secasindex *saidx = &sav->sah->saidx; + + /* validity check */ + if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family + != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) + panic("ipsec_logsastr: family mismatched.\n"); + + p = buf; + snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi)); + while (p && *p) + p++; + if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) { + u_int8_t *s, *d; + s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr; + d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr; + snprintf(p, sizeof(buf) - (p - buf), + "src=%d.%d.%d.%d dst=%d.%d.%d.%d", + s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]); + } +#if INET6 + else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) { + snprintf(p, sizeof(buf) - (p - buf), + "src=%s", + ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr)); + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), + " dst=%s", + ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr)); + } +#endif + while (p && *p) + p++; + snprintf(p, sizeof(buf) - (p - buf), ")"); + + return buf; +} + +void +ipsec_dumpmbuf(m) + struct mbuf *m; +{ + int totlen; + int i; + u_char *p; + + totlen = 0; + printf("---\n"); + while (m) { + p = mtod(m, u_char *); + for (i = 0; i < m->m_len; i++) { + printf("%02x ", p[i]); + totlen++; + if (totlen % 16 == 0) + printf("\n"); + } + m = m->m_next; + } + if (totlen % 16 != 0) + printf("\n"); + printf("---\n"); +} + +/* + * IPsec output logic for IPv4. + */ +int +ipsec4_output(state, sp, flags) + struct ipsec_output_state *state; + struct secpolicy *sp; + int flags; +{ + struct ip *ip = NULL; + struct ipsecrequest *isr = NULL; + struct secasindex saidx; + int s; + int error; +#if IPSEC_SRCSEL + struct in_ifaddr *ia; +#endif + struct sockaddr_in *dst4; + struct sockaddr *sa; + + if (!state) + panic("state == NULL in ipsec4_output"); + if (!state->m) + panic("state->m == NULL in ipsec4_output"); + if (!state->ro) + panic("state->ro == NULL in ipsec4_output"); + if (!state->dst) + panic("state->dst == NULL in ipsec4_output"); + + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("ipsec4_output: applyed SP\n"); + kdebug_secpolicy(sp)); + + for (isr = sp->req; isr != NULL; isr = isr->next) { + +#if 0 /* give up to check restriction of transport mode */ + /* XXX but should be checked somewhere */ + /* + * some of the IPsec operation must be performed only in + * originating case. + */ + if (isr->saidx.mode == IPSEC_MODE_TRANSPORT + && (flags & IP_FORWARDING)) + continue; +#endif + + /* make SA index for search proper SA */ + ip = mtod(state->m, struct ip *); + bcopy(&isr->saidx, &saidx, sizeof(saidx)); + sa = (struct sockaddr *)&saidx.src; + if (sa->sa_len == 0) { + sa->sa_len = _SALENBYAF(AF_INET); + sa->sa_family = AF_INET; + _INPORTBYSA(&saidx.src) = IPSEC_PORT_ANY; + bcopy(&ip->ip_src, _INADDRBYSA(&saidx.src), + sizeof(ip->ip_src)); + } + sa = (struct sockaddr *)&saidx.dst; + if (sa->sa_len == 0) { + sa->sa_len = _SALENBYAF(AF_INET); + sa->sa_family = AF_INET; + _INPORTBYSA(&saidx.dst) = IPSEC_PORT_ANY; + bcopy(&ip->ip_dst, _INADDRBYSA(&saidx.dst), + sizeof(ip->ip_dst)); + } + + if ((error = key_checkrequest(isr, &saidx)) != 0) { + /* + * IPsec processing is required, but no SA found. + * I assume that key_acquire() had been called + * to get/establish the SA. Here I discard + * this packet because it is responsibility for + * upper layer to retransmit the packet. + */ + ipsecstat.out_nosa++; + goto bad; + } + + /* validity check */ + if (isr->sav == NULL) { + switch (ipsec_get_reqlevel(isr)) { + case IPSEC_LEVEL_USE: + continue; + case IPSEC_LEVEL_REQUIRE: + /* must be not reached here. */ + panic("ipsec4_output: no SA found, but required."); + } + } + + /* + * If there is no valid SA, we give up to process any + * more. In such a case, the SA's status is changed + * from DYING to DEAD after allocating. If a packet + * send to the receiver by dead SA, the receiver can + * not decode a packet because SA has been dead. + */ + if (isr->sav->state != SADB_SASTATE_MATURE + && isr->sav->state != SADB_SASTATE_DYING) { + ipsecstat.out_nosa++; + error = EINVAL; + goto bad; + } + + /* + * There may be the case that SA status will be changed when + * we are refering to one. So calling splsoftnet(). + */ +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { + /* + * build IPsec tunnel. + */ + /* XXX should be processed with other familiy */ + if (((struct sockaddr *)&isr->sav->sah->saidx.src)->sa_family != AF_INET) { + ipseclog((LOG_ERR, "ipsec4_output: " + "family mismatched between inner and outer spi=%u\n", + (u_int32_t)ntohl(isr->sav->spi))); + splx(s); + error = EAFNOSUPPORT; + goto bad; + } + + ip = mtod(state->m, struct ip *); + + state->m = ipsec4_splithdr(state->m); + if (!state->m) { + splx(s); + error = ENOMEM; + goto bad; + } + error = ipsec4_encapsulate(state->m, isr->sav); + splx(s); + if (error) { + state->m = NULL; + goto bad; + } + ip = mtod(state->m, struct ip *); + + state->ro = &isr->sav->sah->sa_route; + state->dst = (struct sockaddr *)&state->ro->ro_dst; + dst4 = (struct sockaddr_in *)state->dst; + if (state->ro->ro_rt + && ((state->ro->ro_rt->rt_flags & RTF_UP) == 0 + || dst4->sin_addr.s_addr != ip->ip_dst.s_addr)) { + RTFREE(state->ro->ro_rt); + bzero((caddr_t)state->ro, sizeof (*state->ro)); + } + if (state->ro->ro_rt == 0) { + dst4->sin_family = AF_INET; + dst4->sin_len = sizeof(*dst4); + dst4->sin_addr = ip->ip_dst; + rtalloc(state->ro); + } + if (state->ro->ro_rt == 0) { + ipstat.ips_noroute++; + error = EHOSTUNREACH; + goto bad; + } + +#if IPSEC_SRCSEL + /* + * Which address in SA or in routing table should I + * select from ? But I had set from SA at + * ipsec4_encapsulate(). + */ + ia = (struct in_ifaddr *)(state->ro->ro_rt->rt_ifa); + if (state->ro->ro_rt->rt_flags & RTF_GATEWAY) { + state->dst = (struct sockaddr *)state->ro->ro_rt->rt_gateway; + dst4 = (struct sockaddr_in *)state->dst; + } + ip->ip_src = IA_SIN(ia)->sin_addr; +#endif + } else + splx(s); + + state->m = ipsec4_splithdr(state->m); + if (!state->m) { + error = ENOMEM; + goto bad; + } + switch (isr->saidx.proto) { + case IPPROTO_ESP: +#if IPSEC_ESP + if ((error = esp4_output(state->m, isr)) != 0) { + state->m = NULL; + goto bad; + } + break; +#else + m_freem(state->m); + state->m = NULL; + error = EINVAL; + goto bad; +#endif + case IPPROTO_AH: + if ((error = ah4_output(state->m, isr)) != 0) { + state->m = NULL; + goto bad; + } + break; + case IPPROTO_IPCOMP: + if ((error = ipcomp4_output(state->m, isr)) != 0) { + state->m = NULL; + goto bad; + } + break; + default: + ipseclog((LOG_ERR, + "ipsec4_output: unknown ipsec protocol %d\n", + isr->saidx.proto)); + m_freem(state->m); + state->m = NULL; + error = EINVAL; + goto bad; + } + + if (state->m == 0) { + error = ENOMEM; + goto bad; + } + ip = mtod(state->m, struct ip *); + } + + return 0; + +bad: + m_freem(state->m); + state->m = NULL; + return error; +} + +#if INET6 +/* + * IPsec output logic for IPv6, transport mode. + */ +int +ipsec6_output_trans(state, nexthdrp, mprev, sp, flags, tun) + struct ipsec_output_state *state; + u_char *nexthdrp; + struct mbuf *mprev; + struct secpolicy *sp; + int flags; + int *tun; +{ + struct ip6_hdr *ip6; + struct ipsecrequest *isr = NULL; + struct secasindex saidx; + int error = 0; + int plen; + struct sockaddr *sa; + + if (!state) + panic("state == NULL in ipsec6_output"); + if (!state->m) + panic("state->m == NULL in ipsec6_output"); + if (!nexthdrp) + panic("nexthdrp == NULL in ipsec6_output"); + if (!mprev) + panic("mprev == NULL in ipsec6_output"); + if (!sp) + panic("sp == NULL in ipsec6_output"); + if (!tun) + panic("tun == NULL in ipsec6_output"); + + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("ipsec6_output_trans: applyed SP\n"); + kdebug_secpolicy(sp)); + + *tun = 0; + for (isr = sp->req; isr; isr = isr->next) { + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { + /* the rest will be handled by ipsec6_output_tunnel() */ + break; + } + + /* make SA index for search proper SA */ + ip6 = mtod(state->m, struct ip6_hdr *); + bcopy(&isr->saidx, &saidx, sizeof(saidx)); + sa = (struct sockaddr *)&saidx.src; + if (sa->sa_len == 0) { + sa->sa_len = _SALENBYAF(AF_INET6); + sa->sa_family = AF_INET6; + _INPORTBYSA(&saidx.src) = IPSEC_PORT_ANY; + bcopy(&ip6->ip6_src, _INADDRBYSA(&saidx.src), + sizeof(ip6->ip6_src)); + } + sa = (struct sockaddr *)&saidx.dst; + if (sa->sa_len == 0) { + sa->sa_len = _SALENBYAF(AF_INET6); + sa->sa_family = AF_INET6; + _INPORTBYSA(&saidx.dst) = IPSEC_PORT_ANY; + bcopy(&ip6->ip6_dst, _INADDRBYSA(&saidx.dst), + sizeof(ip6->ip6_dst)); + } + + if (key_checkrequest(isr, &saidx) == ENOENT) { + /* + * IPsec processing is required, but no SA found. + * I assume that key_acquire() had been called + * to get/establish the SA. Here I discard + * this packet because it is responsibility for + * upper layer to retransmit the packet. + */ + ipsec6stat.out_nosa++; + error = ENOENT; + goto bad; + } + + /* validity check */ + if (isr->sav == NULL) { + switch (ipsec_get_reqlevel(isr)) { + case IPSEC_LEVEL_USE: + continue; + case IPSEC_LEVEL_REQUIRE: + /* must be not reached here. */ + panic("ipsec6_output_trans: no SA found, but required."); + } + } + + /* + * If there is no valid SA, we give up to process. + * see same place at ipsec4_output(). + */ + if (isr->sav->state != SADB_SASTATE_MATURE + && isr->sav->state != SADB_SASTATE_DYING) { + ipsec6stat.out_nosa++; + error = EINVAL; + goto bad; + } + + switch (isr->saidx.proto) { + case IPPROTO_ESP: +#if IPSEC_ESP + error = esp6_output(state->m, nexthdrp, mprev->m_next, isr); +#else + m_freem(state->m); + error = EINVAL; +#endif + break; + case IPPROTO_AH: + error = ah6_output(state->m, nexthdrp, mprev->m_next, isr); + break; + case IPPROTO_IPCOMP: + error = ipcomp6_output(state->m, nexthdrp, mprev->m_next, isr); + break; + default: + ipseclog((LOG_ERR, "ipsec6_output_trans: " + "unknown ipsec protocol %d\n", isr->saidx.proto)); + m_freem(state->m); + ipsec6stat.out_inval++; + error = EINVAL; + break; + } + if (error) { + state->m = NULL; + goto bad; + } + plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr); + if (plen > IPV6_MAXPACKET) { + ipseclog((LOG_ERR, "ipsec6_output_trans: " + "IPsec with IPv6 jumbogram is not supported\n")); + ipsec6stat.out_inval++; + error = EINVAL; /*XXX*/ + goto bad; + } + ip6 = mtod(state->m, struct ip6_hdr *); + ip6->ip6_plen = htons(plen); + } + + /* if we have more to go, we need a tunnel mode processing */ + if (isr != NULL) + *tun = 1; + + return 0; + +bad: + m_freem(state->m); + state->m = NULL; + return error; +} + +/* + * IPsec output logic for IPv6, tunnel mode. + */ +int +ipsec6_output_tunnel(state, sp, flags) + struct ipsec_output_state *state; + struct secpolicy *sp; + int flags; +{ + struct ip6_hdr *ip6; + struct ipsecrequest *isr = NULL; + struct secasindex saidx; + int error = 0; + int plen; +#if IPSEC_SRCSEL + struct in6_addr *ia6; +#endif + struct sockaddr_in6* dst6; + int s; + + if (!state) + panic("state == NULL in ipsec6_output"); + if (!state->m) + panic("state->m == NULL in ipsec6_output"); + if (!sp) + panic("sp == NULL in ipsec6_output"); + + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("ipsec6_output_tunnel: applyed SP\n"); + kdebug_secpolicy(sp)); + + /* + * transport mode ipsec (before the 1st tunnel mode) is already + * processed by ipsec6_output_trans(). + */ + for (isr = sp->req; isr; isr = isr->next) { + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) + break; + } + + for (/*already initialized*/; isr; isr = isr->next) { + /* When tunnel mode, SA peers must be specified. */ + bcopy(&isr->saidx, &saidx, sizeof(saidx)); + if (key_checkrequest(isr, &saidx) == ENOENT) { + /* + * IPsec processing is required, but no SA found. + * I assume that key_acquire() had been called + * to get/establish the SA. Here I discard + * this packet because it is responsibility for + * upper layer to retransmit the packet. + */ + ipsec6stat.out_nosa++; + error = ENOENT; + goto bad; + } + + /* validity check */ + if (isr->sav == NULL) { + switch (ipsec_get_reqlevel(isr)) { + case IPSEC_LEVEL_USE: + continue; + case IPSEC_LEVEL_REQUIRE: + /* must be not reached here. */ + panic("ipsec6_output_tunnel: no SA found, but required."); + } + } + + /* + * If there is no valid SA, we give up to process. + * see same place at ipsec4_output(). + */ + if (isr->sav->state != SADB_SASTATE_MATURE + && isr->sav->state != SADB_SASTATE_DYING) { + ipsec6stat.out_nosa++; + error = EINVAL; + goto bad; + } + + /* + * There may be the case that SA status will be changed when + * we are refering to one. So calling splsoftnet(). + */ +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { + /* + * build IPsec tunnel. + */ + /* XXX should be processed with other familiy */ + if (((struct sockaddr *)&isr->sav->sah->saidx.src)->sa_family != AF_INET6) { + ipseclog((LOG_ERR, "ipsec6_output_tunnel: " + "family mismatched between inner and outer, spi=%u\n", + (u_int32_t)ntohl(isr->sav->spi))); + splx(s); + ipsec6stat.out_inval++; + error = EAFNOSUPPORT; + goto bad; + } + + ip6 = mtod(state->m, struct ip6_hdr *); + + state->m = ipsec6_splithdr(state->m); + if (!state->m) { + splx(s); + ipsec6stat.out_nomem++; + error = ENOMEM; + goto bad; + } + error = ipsec6_encapsulate(state->m, isr->sav); + splx(s); + if (error) { + state->m = 0; + goto bad; + } + ip6 = mtod(state->m, struct ip6_hdr *); + + state->ro = &isr->sav->sah->sa_route; + state->dst = (struct sockaddr *)&state->ro->ro_dst; + dst6 = (struct sockaddr_in6 *)state->dst; + if (state->ro->ro_rt + && ((state->ro->ro_rt->rt_flags & RTF_UP) == 0 + || !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst))) { + RTFREE(state->ro->ro_rt); + bzero((caddr_t)state->ro, sizeof (*state->ro)); + } + if (state->ro->ro_rt == 0) { + bzero(dst6, sizeof(*dst6)); + dst6->sin6_family = AF_INET6; + dst6->sin6_len = sizeof(*dst6); + dst6->sin6_addr = ip6->ip6_dst; + rtalloc(state->ro); + } + if (state->ro->ro_rt == 0) { + ip6stat.ip6s_noroute++; + ipsec6stat.out_noroute++; + error = EHOSTUNREACH; + goto bad; + } +#if 0 /* XXX Is the following need ? */ + if (state->ro->ro_rt->rt_flags & RTF_GATEWAY) { + state->dst = (struct sockaddr *)state->ro->ro_rt->rt_gateway; + dst6 = (struct sockaddr_in6 *)state->dst; + } +#endif +#if IPSEC_SRCSEL + /* + * Which address in SA or in routing table should I + * select from ? But I had set from SA at + * ipsec6_encapsulate(). + */ + ia6 = in6_selectsrc(dst6, NULL, NULL, + (struct route_in6 *)state->ro, + NULL, &error); + if (ia6 == NULL) { + ip6stat.ip6s_noroute++; + ipsec6stat.out_noroute++; + goto bad; + } + ip6->ip6_src = *ia6; +#endif + } else + splx(s); + + state->m = ipsec6_splithdr(state->m); + if (!state->m) { + ipsec6stat.out_nomem++; + error = ENOMEM; + goto bad; + } + ip6 = mtod(state->m, struct ip6_hdr *); + switch (isr->saidx.proto) { + case IPPROTO_ESP: +#if IPSEC_ESP + error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, isr); +#else + m_freem(state->m); + error = EINVAL; +#endif + break; + case IPPROTO_AH: + error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, isr); + break; + case IPPROTO_IPCOMP: + /* XXX code should be here */ + /*FALLTHROUGH*/ + default: + ipseclog((LOG_ERR, "ipsec6_output_tunnel: " + "unknown ipsec protocol %d\n", isr->saidx.proto)); + m_freem(state->m); + ipsec6stat.out_inval++; + error = EINVAL; + break; + } + if (error) { + state->m = NULL; + goto bad; + } + plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr); + if (plen > IPV6_MAXPACKET) { + ipseclog((LOG_ERR, "ipsec6_output_tunnel: " + "IPsec with IPv6 jumbogram is not supported\n")); + ipsec6stat.out_inval++; + error = EINVAL; /*XXX*/ + goto bad; + } + ip6 = mtod(state->m, struct ip6_hdr *); + ip6->ip6_plen = htons(plen); + } + + return 0; + +bad: + m_freem(state->m); + state->m = NULL; + return error; +} +#endif /*INET6*/ + +/* + * Chop IP header and option off from the payload. + */ +static struct mbuf * +ipsec4_splithdr(m) + struct mbuf *m; +{ + struct mbuf *mh; + struct ip *ip; + int hlen; + + if (m->m_len < sizeof(struct ip)) + panic("ipsec4_splithdr: first mbuf too short"); + ip = mtod(m, struct ip *); +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + if (m->m_len > hlen) { + MGETHDR(mh, M_DONTWAIT, MT_HEADER); + if (!mh) { + m_freem(m); + return NULL; + } + M_COPY_PKTHDR(mh, m); + MH_ALIGN(mh, hlen); + m->m_flags &= ~M_PKTHDR; + m->m_len -= hlen; + m->m_data += hlen; + mh->m_next = m; + m = mh; + m->m_len = hlen; + bcopy((caddr_t)ip, mtod(m, caddr_t), hlen); + } else if (m->m_len < hlen) { + m = m_pullup(m, hlen); + if (!m) + return NULL; + } + return m; +} + +#if INET6 +static struct mbuf * +ipsec6_splithdr(m) + struct mbuf *m; +{ + struct mbuf *mh; + struct ip6_hdr *ip6; + int hlen; + + if (m->m_len < sizeof(struct ip6_hdr)) + panic("ipsec6_splithdr: first mbuf too short"); + ip6 = mtod(m, struct ip6_hdr *); + hlen = sizeof(struct ip6_hdr); + if (m->m_len > hlen) { + MGETHDR(mh, M_DONTWAIT, MT_HEADER); + if (!mh) { + m_freem(m); + return NULL; + } + M_COPY_PKTHDR(mh, m); + MH_ALIGN(mh, hlen); + m->m_flags &= ~M_PKTHDR; + m->m_len -= hlen; + m->m_data += hlen; + mh->m_next = m; + m = mh; + m->m_len = hlen; + bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen); + } else if (m->m_len < hlen) { + m = m_pullup(m, hlen); + if (!m) + return NULL; + } + return m; +} +#endif + +/* validate inbound IPsec tunnel packet. */ +int +ipsec4_tunnel_validate(ip, nxt0, sav) + struct ip *ip; + u_int nxt0; + struct secasvar *sav; +{ + u_int8_t nxt = nxt0 & 0xff; + struct sockaddr_in *sin; + int hlen; + + if (nxt != IPPROTO_IPV4) + return 0; +#ifdef _IP_VHL + hlen = IP_VHL_HL(ip->ip_vhl) << 2; +#else + hlen = ip->ip_hl << 2; +#endif + if (hlen != sizeof(struct ip)) + return 0; + switch (((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) { + case AF_INET: + sin = (struct sockaddr_in *)&sav->sah->saidx.dst; + if (bcmp(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst)) != 0) + return 0; + break; +#if INET6 + case AF_INET6: + /* should be supported, but at this moment we don't. */ + /*FALLTHROUGH*/ +#endif + default: + return 0; + } + + return 1; +} + +#if INET6 +/* validate inbound IPsec tunnel packet. */ +int +ipsec6_tunnel_validate(ip6, nxt0, sav) + struct ip6_hdr *ip6; + u_int nxt0; + struct secasvar *sav; +{ + u_int8_t nxt = nxt0 & 0xff; + struct sockaddr_in6 *sin6; + + if (nxt != IPPROTO_IPV6) + return 0; + switch (((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) { + case AF_INET6: + sin6 = ((struct sockaddr_in6 *)&sav->sah->saidx.dst); + if (!IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &sin6->sin6_addr)) + return 0; + break; + case AF_INET: + /* should be supported, but at this moment we don't. */ + /*FALLTHROUGH*/ + default: + return 0; + } + + return 1; +} +#endif + +/* + * Make a mbuf chain for encryption. + * If the original mbuf chain contains a mbuf with a cluster, + * allocate a new cluster and copy the data to the new cluster. + * XXX: this hack is inefficient, but is necessary to handle cases + * of TCP retransmission... + */ +struct mbuf * +ipsec_copypkt(m) + struct mbuf *m; +{ + struct mbuf *n, **mpp, *mnew; + + for (n = m, mpp = &m; n; n = n->m_next) { + if (n->m_flags & M_EXT) { + /* + * Make a copy only if there are more than one references + * to the cluster. + * XXX: is this approach effective? + */ + if ( +#if __bsdi__ + n->m_ext.ext_func || +#else + n->m_ext.ext_free || +#endif +#if __NetBSD__ + MCLISREFERENCED(n) +#else + mclrefcnt[mtocl(n->m_ext.ext_buf)] > 1 +#endif + ) + { + int remain, copied; + struct mbuf *mm; + + if (n->m_flags & M_PKTHDR) { + MGETHDR(mnew, M_DONTWAIT, MT_HEADER); + if (mnew == NULL) + goto fail; + mnew->m_pkthdr = n->m_pkthdr; +#if 0 + if (n->m_pkthdr.aux) { + mnew->m_pkthdr.aux = + m_copym(n->m_pkthdr.aux, + 0, M_COPYALL, M_DONTWAIT); + } +#endif + M_COPY_PKTHDR(mnew, n); + mnew->m_flags = n->m_flags & M_COPYFLAGS; + } + else { + MGET(mnew, M_DONTWAIT, MT_DATA); + if (mnew == NULL) + goto fail; + } + mnew->m_len = 0; + mm = mnew; + + /* + * Copy data. If we don't have enough space to + * store the whole data, allocate a cluster + * or additional mbufs. + * XXX: we don't use m_copyback(), since the + * function does not use clusters and thus is + * inefficient. + */ + remain = n->m_len; + copied = 0; + while(1) { + int len; + struct mbuf *mn; + + if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) + len = remain; + else { /* allocate a cluster */ + MCLGET(mm, M_DONTWAIT); + if (!(mm->m_flags & M_EXT)) { + m_free(mm); + goto fail; + } + len = remain < MCLBYTES ? + remain : MCLBYTES; + } + + bcopy(n->m_data + copied, mm->m_data, + len); + + copied += len; + remain -= len; + mm->m_len = len; + + if (remain <= 0) /* completed? */ + break; + + /* need another mbuf */ + MGETHDR(mn, M_DONTWAIT, MT_HEADER); + if (mn == NULL) + goto fail; + mm->m_next = mn; + mm = mn; + } + + /* adjust chain */ + mm->m_next = m_free(n); + n = mm; + *mpp = mnew; + mpp = &n->m_next; + + continue; + } + } + *mpp = n; + mpp = &n->m_next; + } + + return(m); + fail: + m_freem(m); + return(NULL); +} + +void +ipsec_setsocket(m, so) + struct mbuf *m; + struct socket *so; +{ + struct mbuf *n; + + n = m_aux_find(m, AF_INET, IPPROTO_ESP); + if (so && !n) + n = m_aux_add(m, AF_INET, IPPROTO_ESP); + if (n) { + if (so) { + *mtod(n, struct socket **) = so; + /* + * XXX think again about it when we put decryption + * histrory into aux mbuf + */ + n->m_len = sizeof(struct socket *); + } else + m_aux_delete(m, n); + } +} + +struct socket * +ipsec_getsocket(m) + struct mbuf *m; +{ + struct mbuf *n; + + n = m_aux_find(m, AF_INET, IPPROTO_ESP); + if (n && n->m_len >= sizeof(struct socket *)) + return *mtod(n, struct socket **); + else + return NULL; +} + +#ifdef __bsdi__ +/* + * System control for IP + */ +u_char ipsecctlerrmap[PRC_NCMDS] = { + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, + ENOPROTOOPT +}; + +int *ipsec_sysvars[] = IPSECCTL_VARS; + +int +ipsec_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + if (name[0] >= IPSECCTL_MAXID) + return (EOPNOTSUPP); + + switch (name[0]) { + case IPSECCTL_STATS: + return sysctl_rdtrunc(oldp, oldlenp, newp, &ipsecstat, + sizeof(ipsecstat)); + case IPSECCTL_DEF_POLICY: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_POLICY_DISCARD: + case IPSEC_POLICY_NONE: + break; + default: + return EINVAL; + } + } + return (sysctl_int_arr(ipsec_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + case IPSECCTL_DEF_ESP_TRANSLEV: + case IPSECCTL_DEF_ESP_NETLEV: + case IPSECCTL_DEF_AH_TRANSLEV: + case IPSECCTL_DEF_AH_NETLEV: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_LEVEL_USE: + case IPSEC_LEVEL_REQUIRE: + break; + default: + return EINVAL; + } + } + return (sysctl_int_arr(ipsec_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + default: + return (sysctl_int_arr(ipsec_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + } +} + +#if INET6 +/* + * System control for IP6 + */ +u_char ipsec6ctlerrmap[PRC_NCMDS] = { + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, + ENOPROTOOPT +}; + +int *ipsec6_sysvars[] = IPSEC6CTL_VARS; + +int +ipsec6_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + if (name[0] >= IPSECCTL_MAXID) /* xxx no 6 in this definition */ + return (EOPNOTSUPP); + + switch (name[0]) { + case IPSECCTL_STATS: /* xxx no 6 in this definition */ + return sysctl_rdtrunc(oldp, oldlenp, newp, &ipsec6stat, + sizeof(ipsec6stat)); + case IPSECCTL_DEF_POLICY: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_POLICY_DISCARD: + case IPSEC_POLICY_NONE: + break; + default: + return EINVAL; + } + } + return (sysctl_int_arr(ipsec6_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + case IPSECCTL_DEF_ESP_TRANSLEV: + case IPSECCTL_DEF_ESP_NETLEV: + case IPSECCTL_DEF_AH_TRANSLEV: + case IPSECCTL_DEF_AH_NETLEV: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_LEVEL_USE: + case IPSEC_LEVEL_REQUIRE: + break; + default: + return EINVAL; + } + } + return (sysctl_int_arr(ipsec6_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + default: + return (sysctl_int_arr(ipsec6_sysvars, name, namelen, + oldp, oldlenp, newp, newlen)); + } +} +#endif /*INET6*/ +#endif /*__bsdi__*/ + + +#if __NetBSD__ +/* + * System control for IPSEC + */ +u_char ipsecctlermap[PRC_NCMDS] = { + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, + ENOPROTOOPT +}; + +int *ipsec_sysvars[] = IPSECCTL_VARS; + +int +ipsec_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + /* All sysctl names at this level are terminal. */ + if (namelen != 1) + return ENOTDIR; + + /* common sanity checks */ + switch (name[0]) { + case IPSECCTL_DEF_ESP_TRANSLEV: + case IPSECCTL_DEF_ESP_NETLEV: + case IPSECCTL_DEF_AH_TRANSLEV: + case IPSECCTL_DEF_AH_NETLEV: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_LEVEL_USE: + case IPSEC_LEVEL_REQUIRE: + break; + default: + return EINVAL; + } + } + } + + switch (name[0]) { + + case IPSECCTL_STATS: + return sysctl_struct(oldp, oldlenp, newp, newlen, + &ipsecstat, sizeof(ipsecstat)); + case IPSECCTL_DEF_POLICY: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_POLICY_DISCARD: + case IPSEC_POLICY_NONE: + break; + default: + return EINVAL; + } + } + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_def_policy.policy); + case IPSECCTL_DEF_ESP_TRANSLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_esp_trans_deflev); + case IPSECCTL_DEF_ESP_NETLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_esp_net_deflev); + case IPSECCTL_DEF_AH_TRANSLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_ah_trans_deflev); + case IPSECCTL_DEF_AH_NETLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_ah_net_deflev); + case IPSECCTL_INBOUND_CALL_IKE: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_inbound_call_ike); + case IPSECCTL_AH_CLEARTOS: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_ah_cleartos); + case IPSECCTL_AH_OFFSETMASK: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_ah_offsetmask); + case IPSECCTL_DFBIT: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip4_ipsec_dfbit); + case IPSECCTL_ECN: + return sysctl_int(oldp, oldlenp, newp, newlen, &ip4_ipsec_ecn); + case IPSECCTL_DEBUG: + return sysctl_int(oldp, oldlenp, newp, newlen, &ipsec_debug); + default: + return EOPNOTSUPP; + } + /* NOTREACHED */ +} + +#if INET6 +/* + * System control for IPSEC6 + */ +u_char ipsec6ctlermap[PRC_NCMDS] = { + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, + ENOPROTOOPT +}; + +int *ipsec6_sysvars[] = IPSEC6CTL_VARS; + +int +ipsec6_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + /* All sysctl names at this level are terminal. */ + if (namelen != 1) + return ENOTDIR; + + /* common sanity checks */ + switch (name[0]) { + case IPSECCTL_DEF_ESP_TRANSLEV: + case IPSECCTL_DEF_ESP_NETLEV: + case IPSECCTL_DEF_AH_TRANSLEV: + case IPSECCTL_DEF_AH_NETLEV: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_LEVEL_USE: + case IPSEC_LEVEL_REQUIRE: + break; + default: + return EINVAL; + } + } + } + + switch (name[0]) { + + case IPSECCTL_STATS: + return sysctl_struct(oldp, oldlenp, newp, newlen, + &ipsec6stat, sizeof(ipsec6stat)); + case IPSECCTL_DEF_POLICY: + if (newp != NULL && newlen == sizeof(int)) { + switch (*(int *)newp) { + case IPSEC_POLICY_DISCARD: + case IPSEC_POLICY_NONE: + break; + default: + return EINVAL; + } + } + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_def_policy.policy); + case IPSECCTL_DEF_ESP_TRANSLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_esp_trans_deflev); + case IPSECCTL_DEF_ESP_NETLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_esp_net_deflev); + case IPSECCTL_DEF_AH_TRANSLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_ah_trans_deflev); + case IPSECCTL_DEF_AH_NETLEV: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_ah_net_deflev); + case IPSECCTL_INBOUND_CALL_IKE: + return sysctl_int(oldp, oldlenp, newp, newlen, + &ip6_inbound_call_ike); + case IPSECCTL_ECN: + return sysctl_int(oldp, oldlenp, newp, newlen, &ip6_ipsec_ecn); + case IPSECCTL_DEBUG: + return sysctl_int(oldp, oldlenp, newp, newlen, &ipsec_debug); + default: + return EOPNOTSUPP; + } + /* NOTREACHED */ +} +#endif /*INET6*/ + +#endif /* __NetBSD__ */ diff --git a/bsd/netinet6/ipsec.h b/bsd/netinet6/ipsec.h new file mode 100644 index 000000000..b800edc6f --- /dev/null +++ b/bsd/netinet6/ipsec.h @@ -0,0 +1,436 @@ +/* $KAME: ipsec.h,v 1.28 2000/03/15 13:07:57 sakane Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * IPsec controller part. + */ + +#ifndef _NETINET6_IPSEC_H_ +#define _NETINET6_IPSEC_H_ + +#include +#include + +#if KERNEL +/* + * Security Policy Index + * NOTE: Encure to be same address family and upper layer protocol. + * NOTE: ul_proto, port number, uid, gid: + * ANY: reserved for waldcard. + * 0 to (~0 - 1): is one of the number of each value. + */ +struct secpolicyindex { + u_int8_t dir; /* direction of packet flow, see blow */ + struct sockaddr_storage src; /* IP src address for SP */ + struct sockaddr_storage dst; /* IP dst address for SP */ + u_int8_t prefs; /* prefix length in bits for src */ + u_int8_t prefd; /* prefix length in bits for dst */ + u_int16_t ul_proto; /* upper layer Protocol */ +#ifdef notyet + uid_t uids; + uid_t uidd; + gid_t gids; + gid_t gidd; +#endif +}; + +/* Security Policy Data Base */ +struct secpolicy { + LIST_ENTRY(secpolicy) chain; + + int refcnt; /* reference count */ + struct secpolicyindex spidx; /* selector */ + u_int32_t id; /* It's unique number on the system. */ + u_int state; /* 0: dead, others: alive */ +#define IPSEC_SPSTATE_DEAD 0 +#define IPSEC_SPSTATE_ALIVE 1 + + u_int policy; /* DISCARD, NONE or IPSEC, see keyv2.h */ + struct ipsecrequest *req; + /* pointer to the ipsec request tree, */ + /* if policy == IPSEC else this value == NULL.*/ +}; + +/* Request for IPsec */ +struct ipsecrequest { + struct ipsecrequest *next; + /* pointer to next structure */ + /* If NULL, it means the end of chain. */ + struct secasindex saidx;/* hint for search proper SA */ + /* if __ss_len == 0 then no address specified.*/ + u_int level; /* IPsec level defined below. */ + + struct secasvar *sav; /* place holder of SA for use */ + struct secpolicy *sp; /* back pointer to SP */ +}; + +/* security policy in PCB */ +struct inpcbpolicy { + struct secpolicy *sp_in; + struct secpolicy *sp_out; + int priv; /* privileged socket ? */ +}; + +/* SP acquiring list table. */ +struct secspacq { + LIST_ENTRY(secspacq) chain; + + struct secpolicyindex spidx; + + u_int32_t tick; /* for lifetime */ + int count; /* for lifetime */ + /* XXX: here is mbuf place holder to be sent ? */ +}; +#endif /*KERNEL*/ + +/* according to IANA assignment, port 0x0000 and proto 0xff are reserved. */ +#define IPSEC_PORT_ANY 0 +#define IPSEC_ULPROTO_ANY 255 +#define IPSEC_PROTO_ANY 255 + +/* mode of security protocol */ +/* NOTE: DON'T use IPSEC_MODE_ANY at SPD. It's only use in SAD */ +#define IPSEC_MODE_ANY 0 /* i.e. wildcard. */ +#define IPSEC_MODE_TRANSPORT 1 +#define IPSEC_MODE_TUNNEL 2 + +/* + * Direction of security policy. + * NOTE: Since INVALID is used just as flag. + * The other are used for loop counter too. + */ +#define IPSEC_DIR_ANY 0 +#define IPSEC_DIR_INBOUND 1 +#define IPSEC_DIR_OUTBOUND 2 +#define IPSEC_DIR_MAX 3 +#define IPSEC_DIR_INVALID 4 + +/* Policy level */ +/* + * IPSEC, ENTRUST and BYPASS are allowd for setsockopt() in PCB, + * DISCARD, IPSEC and NONE are allowd for setkey() in SPD. + * DISCARD and NONE are allowd for system default. + */ +#define IPSEC_POLICY_DISCARD 0 /* discarding packet */ +#define IPSEC_POLICY_NONE 1 /* through IPsec engine */ +#define IPSEC_POLICY_IPSEC 2 /* do IPsec */ +#define IPSEC_POLICY_ENTRUST 3 /* consulting SPD if present. */ +#define IPSEC_POLICY_BYPASS 4 /* only for privileged socket. */ + +/* Security protocol level */ +#define IPSEC_LEVEL_DEFAULT 0 /* reference to system default */ +#define IPSEC_LEVEL_USE 1 /* use SA if present. */ +#define IPSEC_LEVEL_REQUIRE 2 /* require SA. */ +#define IPSEC_LEVEL_UNIQUE 3 /* unique SA. */ + +#define IPSEC_MANUAL_REQID_MAX 0x3fff + /* + * if security policy level == unique, this id + * indicate to a relative SA for use, else is + * zero. + * 1 - 0x3fff are reserved for manual keying. + * 0 are reserved for above reason. Others is + * for kernel use. + * Note that this id doesn't identify SA + * by only itself. + */ +#define IPSEC_REPLAYWSIZE 32 + +/* statistics for ipsec processing */ +struct ipsecstat { + u_quad_t in_success; /* succeeded inbound process */ + u_quad_t in_polvio; + /* security policy violation for inbound process */ + u_quad_t in_nosa; /* inbound SA is unavailable */ + u_quad_t in_inval; /* inbound processing failed due to EINVAL */ + u_quad_t in_nomem; /* inbound processing failed due to ENOBUFS */ + u_quad_t in_badspi; /* failed getting a SPI */ + u_quad_t in_ahreplay; /* AH replay check failed */ + u_quad_t in_espreplay; /* ESP replay check failed */ + u_quad_t in_ahauthsucc; /* AH authentication success */ + u_quad_t in_ahauthfail; /* AH authentication failure */ + u_quad_t in_espauthsucc; /* ESP authentication success */ + u_quad_t in_espauthfail; /* ESP authentication failure */ + u_quad_t in_esphist[256]; + u_quad_t in_ahhist[256]; + u_quad_t in_comphist[256]; + u_quad_t out_success; /* succeeded outbound process */ + u_quad_t out_polvio; + /* security policy violation for outbound process */ + u_quad_t out_nosa; /* outbound SA is unavailable */ + u_quad_t out_inval; /* outbound process failed due to EINVAL */ + u_quad_t out_nomem; /* inbound processing failed due to ENOBUFS */ + u_quad_t out_noroute; /* there is no route */ + u_quad_t out_esphist[256]; + u_quad_t out_ahhist[256]; + u_quad_t out_comphist[256]; +}; + +/* + * Definitions for IPsec & Key sysctl operations. + */ +/* + * Names for IPsec & Key sysctl objects + */ +#define IPSECCTL_STATS 1 /* stats */ +#define IPSECCTL_DEF_POLICY 2 +#define IPSECCTL_DEF_ESP_TRANSLEV 3 /* int; ESP transport mode */ +#define IPSECCTL_DEF_ESP_NETLEV 4 /* int; ESP tunnel mode */ +#define IPSECCTL_DEF_AH_TRANSLEV 5 /* int; AH transport mode */ +#define IPSECCTL_DEF_AH_NETLEV 6 /* int; AH tunnel mode */ +#define IPSECCTL_INBOUND_CALL_IKE 7 +#define IPSECCTL_AH_CLEARTOS 8 +#define IPSECCTL_AH_OFFSETMASK 9 +#define IPSECCTL_DFBIT 10 +#define IPSECCTL_ECN 11 +#define IPSECCTL_DEBUG 12 +#define IPSECCTL_MAXID 13 + +#define IPSECCTL_NAMES { \ + { 0, 0 }, \ + { 0, 0 }, \ + { "def_policy", CTLTYPE_INT }, \ + { "esp_trans_deflev", CTLTYPE_INT }, \ + { "esp_net_deflev", CTLTYPE_INT }, \ + { "ah_trans_deflev", CTLTYPE_INT }, \ + { "ah_net_deflev", CTLTYPE_INT }, \ + { "inbound_call_ike", CTLTYPE_INT }, \ + { "ah_cleartos", CTLTYPE_INT }, \ + { "ah_offsetmask", CTLTYPE_INT }, \ + { "dfbit", CTLTYPE_INT }, \ + { "ecn", CTLTYPE_INT }, \ + { "debug", CTLTYPE_INT }, \ +} + +#define IPSEC6CTL_NAMES { \ + { 0, 0 }, \ + { 0, 0 }, \ + { "def_policy", CTLTYPE_INT }, \ + { "esp_trans_deflev", CTLTYPE_INT }, \ + { "esp_net_deflev", CTLTYPE_INT }, \ + { "ah_trans_deflev", CTLTYPE_INT }, \ + { "ah_net_deflev", CTLTYPE_INT }, \ + { "inbound_call_ike", CTLTYPE_INT }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { 0, 0 }, \ + { "ecn", CTLTYPE_INT }, \ + { "debug", CTLTYPE_INT }, \ +} + +#ifdef __bsdi__ +#define IPSECCTL_VARS { \ + 0, \ + 0, \ + &ip4_def_policy.policy, \ + &ip4_esp_trans_deflev, \ + &ip4_esp_net_deflev, \ + &ip4_ah_trans_deflev, \ + &ip4_ah_net_deflev, \ + &ip4_inbound_call_ike, \ + &ip4_ah_cleartos, \ + &ip4_ah_offsetmask, \ + &ip4_ipsec_dfbit, \ + &ip4_ipsec_ecn, \ + &ipsec_debug, \ +} + +#define IPSEC6CTL_VARS { \ + 0, \ + 0, \ + &ip6_def_policy.policy, \ + &ip6_esp_trans_deflev, \ + &ip6_esp_net_deflev, \ + &ip6_ah_trans_deflev, \ + &ip6_ah_net_deflev, \ + &ip6_inbound_call_ike, \ + 0, \ + 0, \ + 0, \ + &ip6_ipsec_ecn, \ + &ipsec_debug, \ +} +#endif + +#if KERNEL +struct ipsec_output_state { + struct mbuf *m; + struct route *ro; + struct sockaddr *dst; +}; + +extern int ipsec_debug; + +#if INET +extern struct ipsecstat ipsecstat; +extern struct secpolicy ip4_def_policy; +extern int ip4_esp_trans_deflev; +extern int ip4_esp_net_deflev; +extern int ip4_ah_trans_deflev; +extern int ip4_ah_net_deflev; +extern int ip4_inbound_call_ike; +extern int ip4_ah_cleartos; +extern int ip4_ah_offsetmask; +extern int ip4_ipsec_dfbit; +extern int ip4_ipsec_ecn; +#endif + +#if INET6 +extern struct ipsecstat ipsec6stat; +extern struct secpolicy ip6_def_policy; +extern int ip6_esp_trans_deflev; +extern int ip6_esp_net_deflev; +extern int ip6_ah_trans_deflev; +extern int ip6_ah_net_deflev; +extern int ip6_inbound_call_ike; +extern int ip6_ipsec_ecn; +#endif + +#define ipseclog(x) do { if (ipsec_debug) log x; } while (0) + +extern struct secpolicy *ipsec4_getpolicybysock + __P((struct mbuf *, u_int, struct socket *, int *)); +extern struct secpolicy *ipsec4_getpolicybyaddr + __P((struct mbuf *, u_int, int, int *)); + +#if INET6 +extern struct secpolicy *ipsec6_getpolicybysock + __P((struct mbuf *, u_int, struct socket *, int *)); +extern struct secpolicy *ipsec6_getpolicybyaddr + __P((struct mbuf *, u_int, int, int *)); +#endif /*INET6*/ + +struct inpcb; +#if INET6 +struct in6pcb; +#endif +extern int ipsec_init_policy __P((struct socket *so, struct inpcbpolicy **)); +extern int ipsec_copy_policy + __P((struct inpcbpolicy *, struct inpcbpolicy *)); +extern u_int ipsec_get_reqlevel __P((struct ipsecrequest *)); + +extern int ipsec4_set_policy __P((struct inpcb *inp, int optname, + caddr_t request, size_t len, int priv)); +extern int ipsec4_get_policy __P((struct inpcb *inpcb, caddr_t request, + size_t len, struct mbuf **mp)); +extern int ipsec4_delete_pcbpolicy __P((struct inpcb *)); +extern int ipsec4_in_reject_so __P((struct mbuf *, struct socket *)); +extern int ipsec4_in_reject __P((struct mbuf *, struct inpcb *)); + +#if INET6 +extern int ipsec6_in_reject_so __P((struct mbuf *, struct socket *)); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +extern int ipsec6_delete_pcbpolicy __P((struct inpcb *)); +extern int ipsec6_set_policy __P((struct inpcb *inp, int optname, + caddr_t request, size_t len, int priv)); +extern int ipsec6_get_policy + __P((struct inpcb *inp, caddr_t request, size_t len, struct mbuf **mp)); +extern int ipsec6_in_reject __P((struct mbuf *, struct inpcb *)); +#else +extern int ipsec6_delete_pcbpolicy __P((struct in6pcb *)); +extern int ipsec6_set_policy __P((struct in6pcb *in6p, int optname, + caddr_t request, size_t len, int priv)); +extern int ipsec6_get_policy __P((struct in6pcb *in6p, caddr_t request, + size_t len, struct mbuf **mp)); +extern int ipsec6_in_reject __P((struct mbuf *, struct in6pcb *)); +#endif +#endif /*INET6*/ + +struct secas; +struct tcpcb; +struct tcp6cb; +extern int ipsec_chkreplay __P((u_int32_t, struct secasvar *)); +extern int ipsec_updatereplay __P((u_int32_t, struct secasvar *)); + +extern size_t ipsec4_hdrsiz __P((struct mbuf *, u_int, struct inpcb *)); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +extern size_t ipsec_hdrsiz_tcp __P((struct tcpcb *, int)); +#else +extern size_t ipsec4_hdrsiz_tcp __P((struct tcpcb *)); +#endif +#if INET6 +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +extern size_t ipsec6_hdrsiz __P((struct mbuf *, u_int, struct inpcb *)); +#else +extern size_t ipsec6_hdrsiz __P((struct mbuf *, u_int, struct in6pcb *)); +#if defined(__NetBSD__) && !defined(TCP6) +extern size_t ipsec6_hdrsiz_tcp __P((struct tcpcb *)); +#else +extern size_t ipsec6_hdrsiz_tcp __P((struct tcp6cb *)); +#endif +#endif +#endif + +struct ip; +#if INET6 +struct ip6_hdr; +#endif +extern const char *ipsec4_logpacketstr __P((struct ip *, u_int32_t)); +#if INET6 +extern const char *ipsec6_logpacketstr __P((struct ip6_hdr *, u_int32_t)); +#endif +extern const char *ipsec_logsastr __P((struct secasvar *)); + +extern void ipsec_dumpmbuf __P((struct mbuf *)); + +extern int ipsec4_output __P((struct ipsec_output_state *, struct secpolicy *, + int)); +#if INET6 +extern int ipsec6_output_trans __P((struct ipsec_output_state *, u_char *, + struct mbuf *, struct secpolicy *, int, int *)); +extern int ipsec6_output_tunnel __P((struct ipsec_output_state *, + struct secpolicy *, int)); +#endif +extern int ipsec4_tunnel_validate __P((struct ip *, u_int, struct secasvar *)); +#if INET6 +extern int ipsec6_tunnel_validate __P((struct ip6_hdr *, u_int, + struct secasvar *)); +#endif +extern struct mbuf *ipsec_copypkt __P((struct mbuf *)); +extern void ipsec_setsocket __P((struct mbuf *, struct socket *)); +extern struct socket *ipsec_getsocket __P((struct mbuf *)); + +#if defined(__bsdi__) || defined(__NetBSD__) +extern int ipsec_sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); +extern int ipsec6_sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); +#endif /* __bsdi__ || __NetBSD__ */ + +#endif /*KERNEL*/ + +#ifndef KERNEL +extern caddr_t ipsec_set_policy __P((char *policy, int buflen)); +extern int ipsec_get_policylen __P((caddr_t buf)); +extern char *ipsec_dump_policy __P((caddr_t buf, char *delimiter)); + +extern char *ipsec_strerror __P((void)); +#endif /*!KERNEL*/ + +#endif /*_NETINET6_IPSEC_H_*/ diff --git a/bsd/netinet6/mip6.c b/bsd/netinet6/mip6.c new file mode 100644 index 000000000..24c805913 --- /dev/null +++ b/bsd/netinet6/mip6.c @@ -0,0 +1,3156 @@ +/* $KAME: mip6.c,v 1.20 2000/03/18 03:05:38 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, 1998, 1999 and 2000 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999 and 2000 Ericsson Radio Systems AB + * All rights reserved. + * + * Author: Conny Larsson + * Mattias Pettersson + * + */ + +/* + * TODO: nuke calls to in6_control, it is not supposed to be called from + * softintr + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#if MIP6_DEBUG +#include +#include +#include +#include +#include +#endif + +#include + +int (*mip6_rec_ra_hook)(struct mbuf *, int) = 0; + +struct in6_addr * (*mip6_global_addr_hook)(struct in6_addr *) = 0; +struct mip6_subopt_hal * (*mip6_hal_dynamic_hook)(struct in6_addr *) = 0; +int (*mip6_write_config_data_ha_hook)(u_long, void *) = 0; +int (*mip6_clear_config_data_ha_hook)(u_long, void *) = 0; +int (*mip6_enable_func_ha_hook)(u_long, caddr_t) = 0; + +int (*mip6_rec_ba_hook)(struct mbuf *, int) = 0; +int (*mip6_rec_br_hook)(struct mbuf *, int) = 0; +void (*mip6_stop_bu_hook)(struct in6_addr *) = 0; +int (*mip6_write_config_data_mn_hook)(u_long, void *) = 0; +int (*mip6_clear_config_data_mn_hook)(u_long, caddr_t) = 0; +int (*mip6_enable_func_mn_hook)(u_long, caddr_t) = 0; + + +#if MIP6_DEBUG +int mip6_debug_is_enabled = 0; +#endif + + +/* Declaration of Global variables. */ +struct mip6_bc *mip6_bcq = NULL; /* First entry in BC list */ +struct mip6_na *mip6_naq = NULL; /* First entry in NA retrans. list */ +struct mip6_prefix *mip6_pq = NULL; /* Ptr to prefix queue */ +struct mip6_config mip6_config; /* Config parameters for MIPv6 */ +struct mip6_link_list *mip6_llq = NULL; /* List of links receiving RA's */ + + +#if 0 /* Phasing out MIP6_HA and MIP6_MN */ +#if MIP6_HA +u_int8_t mip6_module = MIP6_HA_MODULE; /* Info about loaded modules (HA) */ +#elif defined(MIP6_MN) +u_int8_t mip6_module = MIP6_MN_MODULE; /* Info about loaded modules (MN) */ +#else +u_int8_t mip6_module = 0; /* Info about loaded modules (CN) */ +#endif +#else /* 0 */ +u_int8_t mip6_module = 0; /* Info about loaded modules (CN) */ +#endif /* 0 */ + +extern struct ip6protosw mip6_tunnel_protosw; + + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +struct callout_handle mip6_timer_na_handle; +struct callout_handle mip6_timer_bc_handle; +struct callout_handle mip6_timer_prefix_handle; +#endif + + +/* Definitions of some costant IP6 addresses. */ +struct in6_addr in6addr_linklocal; +struct in6_addr in6addr_sitelocal; +struct in6_addr in6addr_aha_64; +struct in6_addr in6addr_aha_nn; + + +/* + ############################################################################## + # + # INITIALIZATION AND EXIT FUNCTIONS + # These functions are executed when the MIPv6 code is activated and de- + # activated respectively. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_init + * Description: Initialization of MIPv6 variables that must be initialized + * before the code is executed. + ****************************************************************************** + */ +void +mip6_init(void) +{ + static int mip6_init_done = 0; + + if (mip6_init_done) + return; + + /* Initialize global addresses. */ + in6addr_linklocal.s6_addr32[0] = MIP6_ADDR_INT32_ULL; + in6addr_linklocal.s6_addr32[1] = 0x00000000; + in6addr_linklocal.s6_addr32[2] = 0x00000000; + in6addr_linklocal.s6_addr32[3] = 0x00000000; + + in6addr_sitelocal.s6_addr32[0] = MIP6_ADDR_INT32_USL; + in6addr_sitelocal.s6_addr32[1] = 0x00000000; + in6addr_sitelocal.s6_addr32[2] = 0x00000000; + in6addr_sitelocal.s6_addr32[3] = 0x00000000; + + in6addr_aha_64.s6_addr32[0] = 0x00000000; + in6addr_aha_64.s6_addr32[1] = 0xffffffff; + in6addr_aha_64.s6_addr32[2] = MIP6_ADDR_INT32_AHA2; + in6addr_aha_64.s6_addr32[3] = MIP6_ADDR_INT32_AHA1; + + in6addr_aha_nn.s6_addr32[0] = 0x00000000; + in6addr_aha_nn.s6_addr32[1] = 0xffffffff; + in6addr_aha_nn.s6_addr32[2] = 0xffffffff; + in6addr_aha_nn.s6_addr32[3] = MIP6_ADDR_INT32_AHA1; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + /* Initialize handle for timer functions. */ + callout_handle_init(&mip6_timer_na_handle); + callout_handle_init(&mip6_timer_bc_handle); + callout_handle_init(&mip6_timer_prefix_handle); +#endif + + /* Initialize global variable */ + bzero(&mip6_config, sizeof(struct mip6_config)); + + /* Set default values for MIP6 configuration parameters. */ + LIST_INIT(&mip6_config.fna_list); + + mip6_config.bu_lifetime = 600; + mip6_config.br_update = 60; + mip6_config.hr_lifetime = 3600; + mip6_config.enable_outq = 1; + + mip6_enable_hooks(MIP6_GENERIC_HOOKS); + mip6_enable_hooks(MIP6_CONFIG_HOOKS); + + mip6_init_done = 1; + printf("%s: MIP6 initialized\n", __FUNCTION__); +} + + + +/* + ****************************************************************************** + * Function: mip6_exit + * Description: This function is called when the module is unloaded (relesed) + * from the kernel. + ****************************************************************************** + */ +void +mip6_exit() +{ + struct mip6_na *nap, *nap_tmp; + struct mip6_bc *bcp, *bcp_nxt; + struct mip6_prefix *prefix; + int s; + + /* Cancel outstanding timeout function calls. */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_na, (void *)NULL, mip6_timer_na_handle); + untimeout(mip6_timer_bc, (void *)NULL , mip6_timer_bc_handle); + untimeout(mip6_timer_prefix, (void *)NULL, mip6_timer_prefix_handle); +#else + untimeout(mip6_timer_na, (void *)NULL); + untimeout(mip6_timer_bc, (void *)NULL); + untimeout(mip6_timer_prefix, (void *)NULL); +#endif + + /* Remove each entry in every queue. */ + s = splnet(); + for (nap = mip6_naq; nap;) { + nap_tmp = nap; + nap = nap->next; + _FREE(nap_tmp, M_TEMP); + } + mip6_naq = NULL; + + for (bcp = mip6_bcq; bcp;) { + mip6_bc_delete(bcp, &bcp_nxt); + bcp = bcp_nxt; + } + mip6_bcq = NULL; + + for (prefix = mip6_pq; prefix;) + prefix = mip6_prefix_delete(prefix); + mip6_pq = NULL; + splx(s); +} + + + +/* + ############################################################################## + # + # RECEIVING FUNCTIONS + # These functions receives the incoming IPv6 packet and further processing of + # the packet depends on the content in the packet. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_rec_ctrl_sig + * Description: This function receives incoming signals and calls the approp- + * riate function for further processing of the destination + * option. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_rec_ctrl_sig(m_in, off) +struct mbuf *m_in; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset (bytes) from beginning of mbuf to start of + destination option */ +{ + register struct ip6_hdr *ip6; /* IPv6 header */ + int res; /* Result of function call */ + +#if MIP6_DEBUG + static int count = 0; + + count += 1; + mip6_debug("\nMIPv6 Start processing a control signal (%d)\n", count); +#endif + + res = 0; + if (mip6_inp == NULL) { + log(LOG_ERR, "%s: Variabel mip6_inp is NULL\n", + __FUNCTION__); + return IPPROTO_DONE; + } + ip6 = mtod(m_in, struct ip6_hdr *); + + /* Store necessary data from IPv6 header */ + mip6_inp->ip6_src = ip6->ip6_src; + mip6_inp->ip6_dst = ip6->ip6_dst; + + /* Process incoming signal (BU, BA, BR and/or Home Address option) */ + if (mip6_inp->optflag & MIP6_DSTOPT_BU) { + res = mip6_rec_bu(m_in, off); + if (res != 0) { +#if MIP6_DEBUG + mip6_debug("\nMIPv6 Error processing control " + "signal BU (%d)\n", count); +#endif + return res; + } + } + + if (MIP6_IS_MN_ACTIVE) { + if (mip6_inp->optflag & MIP6_DSTOPT_BA) { + if (mip6_rec_ba_hook) + res = (*mip6_rec_ba_hook)(m_in, off); + if (res != 0) { +#if MIP6_DEBUG + mip6_debug("\nMIPv6 Error processing control " + "signal BA (%d)\n", count); +#endif + return res; + } + } + } + + if (MIP6_IS_MN_ACTIVE) { + if (mip6_inp->optflag & MIP6_DSTOPT_BR) { + if (mip6_rec_br_hook) + res = (*mip6_rec_br_hook)(m_in, off); + if (res != 0) { +#if MIP6_DEBUG + mip6_debug("\nMIPv6 Error processing control " + "signal BR (%d)\n", count); +#endif + return res; + } + } + } + + if (mip6_inp->optflag & MIP6_DSTOPT_HA) + mip6_ha2srcaddr(m_in); + +#if MIP6_DEBUG + mip6_debug("\nMIPv6 Finished processing a control signal (%d)\n", + count); +#endif + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_icmp6_input + * Description: Every ICMP6 message must be checked for errors. If a Router + * Advertisement is included the Home Agent List must be up- + * dated. + * The check of the Router Advertisement can not be done in + * function nd6_ra_input since this function only deals with + * configuration issues. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_icmp6_input(m, off) +struct mbuf *m; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset from start of mbuf to icmp6 message */ +{ + struct ip6_hdr *ip6; /* IPv6 header */ + struct ip6_hdr *ip6_icmp; /* IPv6 header in icmpv6 packet */ + struct icmp6_hdr *icmp6; /* ICMP6 header */ + struct mip6_bc *bcp; /* Binding Cache list entry */ + struct mip6_bc *bcp_nxt; /* Binding Cache list entry */ + struct nd_router_advert *ra; /* Router Advertisement */ + u_int8_t *err_ptr; /* Octet offset for error */ + int icmp6len, err_off, res = 0; + + ip6 = mtod(m, struct ip6_hdr *); + icmp6len = m->m_pkthdr.len - off; + icmp6 = (struct icmp6_hdr *)((caddr_t)ip6 + off); + + switch (icmp6->icmp6_type) { + case ICMP6_DST_UNREACH: + /* First we have to find the destination address + from the original IPv6 packet. Make sure that + the IPv6 packet is included in the ICMPv6 packet. */ + if ((off + sizeof(struct icmp6_hdr) + + sizeof(struct ip6_hdr)) >= m->m_pkthdr.len) + return 0; + + ip6_icmp = (struct ip6_hdr *) ((caddr_t)icmp6 + + sizeof(struct icmp6_hdr)); + + /* Remove BC entry if present */ + bcp = mip6_bc_find(&ip6_icmp->ip6_dst); + if (bcp && !bcp->hr_flag) + mip6_bc_delete(bcp, &bcp_nxt); + break; + + case ICMP6_PARAM_PROB: + if (icmp6->icmp6_code != ICMP6_PARAMPROB_OPTION) + break; + + /* First we have to find the destination address + from the original IPv6 packet. Make sure that + the ptr is within the ICMPv6 packet. */ + err_off = ntohl(icmp6->icmp6_data32[0]); + if ((off + sizeof(struct icmp6_hdr) + err_off) >= + m->m_pkthdr.len) + return 0; + + ip6_icmp = (struct ip6_hdr *)((caddr_t)icmp6 + + sizeof(struct icmp6_hdr)); + + /* Check which option that failed */ + err_ptr = (u_int8_t *) ((caddr_t)icmp6 + + sizeof(struct icmp6_hdr) + + err_off); + + if (MIP6_IS_MN_ACTIVE && (*err_ptr == IP6OPT_BINDING_UPDATE)) { + if (mip6_stop_bu_hook) + (*mip6_stop_bu_hook)(&ip6_icmp->ip6_dst); + } + + if (*err_ptr == IP6OPT_HOME_ADDRESS) { + log(LOG_ERR, + "Node %s does not recognize Home Address option\n", + ip6_sprintf(&ip6_icmp->ip6_dst)); + /* The message is discarded by the icmp code. */ + } + break; + + case ND_ROUTER_ADVERT: + if (icmp6->icmp6_code != 0) + break; + if (icmp6len < sizeof(struct nd_router_advert)) + break; + + ra = (struct nd_router_advert *)icmp6; + if ((ra->nd_ra_flags_reserved & ND_RA_FLAG_HA) == 0) + break; + + if (mip6_rec_ra_hook) { + res = mip6_rec_ra_hook(m, off); + if (res) return res; + break; + } + } + return 0; +} + + + +/* + ############################################################################## + # + # CONTROL SIGNAL FUNCTIONS + # Functions for processing of incoming control signals (Binding Update and + # Home Address option). + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_rec_bu + * Description: Receive a Binding Update option and evaluate the contents. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_rec_bu(m_in, off) +struct mbuf *m_in; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset from start of mbuf to start of dest option */ +{ + struct in6_addr *src_addr; /* Src addr for HA sending BU */ + struct mip6_subopt_hal *hal; /* Home Agents List sub-option */ + struct mip6_bc *bcp; /* Binding Cache list entry */ + struct mip6_bc *bcp_nxt; + struct in6_addr *coa; /* COA of the MN sending the BU */ + struct mip6_subbuf *subbuf; /* Buffer containing sub-options */ + struct in6_addr ll_allnode; /* Link local all nodes address */ + u_int32_t min_time; /* Minimum lifetime to be sent in BA */ + u_long na_flags = 0; /* Flags for NA message */ + int send_na; /* If node becomes HA for MN, broadcast NA */ + int res, error; + u_int8_t rtr; +#if MIP6_DEBUG + u_int8_t var; + int offset, ii; +#endif + + subbuf = NULL; + + /* Find the care-of address used by the MN when sending the BU. */ + if (mip6_inp->coa) + coa = &mip6_inp->coa->coa; + else + coa = &mip6_inp->ip6_src; + + /* Make sure that the BU contains a valid AH or ESP header. */ +#if IPSEC +#ifndef __OpenBSD__ + if ( !((m_in->m_flags & M_AUTHIPHDR && m_in->m_flags & M_AUTHIPDGM) || + (m_in->m_flags & M_AUTHIPDGM && m_in->m_flags & M_DECRYPTED))) { + ip6stat.ip6s_badoptions++; + log(LOG_INFO, + "%s: No AH or ESP header in BU from host %s\n", + __FUNCTION__, + ip6_sprintf(coa)); + return IPPROTO_DONE; + } +#endif +#endif + + /* Make sure that the BU contains a valid Home Address option. */ + if ((mip6_inp->optflag & MIP6_DSTOPT_HA) == 0) { + ip6stat.ip6s_badoptions++; + log(LOG_INFO, + "%s: No Home Address option included in BU from host %s\n", + __FUNCTION__, ip6_sprintf(coa)); + return IPPROTO_DONE; + } + + /* Make sure that the length field in the BU is >= 8. */ + if (mip6_inp->bu_opt->len < IP6OPT_BULEN) { + ip6stat.ip6s_badoptions++; + log(LOG_INFO, + "%s: Length field to short (%d) in BU from host %s\n", + __FUNCTION__, mip6_inp->bu_opt->len, ip6_sprintf(coa)); + return IPPROTO_DONE; + } + + /* The sequence no in the BU must be greater than or equal to the + sequence number in the previous BU recieved (modulo 2^^16). */ + send_na = 0; + bcp = mip6_bc_find(&mip6_inp->ha_opt->home_addr); + if (bcp != NULL) { + if (MIP6_LEQ(mip6_inp->bu_opt->seqno, bcp->seqno)) { + ip6stat.ip6s_badoptions++; + log(LOG_INFO, + "%s: Received sequence number (%d) <= " + "current (%d) in BU from host %s\n", + __FUNCTION__, mip6_inp->bu_opt->seqno, + bcp->seqno, ip6_sprintf(coa)); + return IPPROTO_DONE; + } + if (!bcp->hr_flag) + send_na = 1; + } else + send_na = 1; + +#if MIP6_DEBUG + mip6_debug("\nReceived Binding Update\n"); + mip6_debug("IP Header Src: %s\n", + ip6_sprintf(&mip6_inp->ip6_src)); + mip6_debug("IP Header Dst: %s\n", + ip6_sprintf(&mip6_inp->ip6_dst)); + mip6_debug("Type/Length/Flags: %x / %u / ", + mip6_inp->bu_opt->type, mip6_inp->bu_opt->len); + if (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG) + mip6_debug("A "); + if (mip6_inp->bu_opt->flags & MIP6_BU_HFLAG) + mip6_debug("H "); + if (mip6_inp->bu_opt->flags & MIP6_BU_RFLAG) + mip6_debug("R "); + mip6_debug("\n"); + mip6_debug("Seq no/Life time: %u / %u\n", + mip6_inp->bu_opt->seqno, + mip6_inp->bu_opt->lifetime); + mip6_debug("Prefix length: %u\n", + mip6_inp->bu_opt->prefix_len); + + if (mip6_inp->bu_opt->len > IP6OPT_BULEN) { + offset = mip6_opt_offset(m_in, off, IP6OPT_BINDING_UPDATE); + if (offset == 0) goto end_debug; + + mip6_debug("Sub-options present (TLV coded)\n"); + for (ii = IP6OPT_BULEN; ii < mip6_inp->bu_opt->len; ii++) { + if ((ii - IP6OPT_BULEN) % 16 == 0) + mip6_debug("\t0x:"); + if ((ii - IP6OPT_BULEN) % 4 == 0) + mip6_debug(" "); + m_copydata(m_in, offset + 2 + ii, sizeof(var), + (caddr_t)&var); + mip6_debug("%02x", var); + if ((ii - IP6OPT_BULEN + 1) % 16 == 0) + mip6_debug("\n"); + } + if ((ii - IP6OPT_BULEN) % 16) + mip6_debug("\n"); + } + end_debug: +#endif + + /* Shall Dynamic Home Agent Address Discovery be performed? */ + src_addr = NULL; + hal = NULL; + + if (MIP6_IS_HA_ACTIVE) { + if ((mip6_inp->ip6_dst.s6_addr8[15] & 0x7f) == + MIP6_ADDR_ANYCAST_HA) { + if (mip6_global_addr_hook) + src_addr = (*mip6_global_addr_hook) + (&mip6_inp->ip6_dst); + if (src_addr == NULL) { + log(LOG_ERR, + "%s: No global source address found\n", + __FUNCTION__); + return IPPROTO_DONE; + } + + if (mip6_hal_dynamic_hook) + hal = (*mip6_hal_dynamic_hook)(src_addr); + if (mip6_store_subopt(&subbuf, (caddr_t)hal)) { + if (subbuf) _FREE(subbuf, M_TEMP); + return IPPROTO_DONE; + } + error = mip6_send_ba(src_addr, + &mip6_inp->ha_opt->home_addr, + coa, subbuf, MIP6_BA_STATUS_DHAAD, + mip6_inp->bu_opt->seqno, 0); + return error; + } + } + + /* Check if BU includes Unique Identifier sub-option is present. */ + /* XXX Code have to be added. */ + + /* Check if this is a request to cache a binding for the MN. */ + if ((mip6_inp->bu_opt->lifetime != 0) && + (! IN6_ARE_ADDR_EQUAL(&mip6_inp->ha_opt->home_addr, coa))) { + /* The request to cache the binding depends on if the H-bit + is set or not in the BU. */ + error = 0; + if (mip6_inp->bu_opt->flags & MIP6_BU_HFLAG) { + /* The H-bit is set. Register the primary coa. Is the + node is a router implementing HA functionality */ + if ((!ip6_forwarding || !MIP6_IS_HA_ACTIVE) && + (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG)) { + error = mip6_send_ba( + &mip6_inp->ip6_dst, + &mip6_inp->ha_opt->home_addr, + coa, NULL, MIP6_BA_STATUS_HOMEREGNOSUP, + mip6_inp->bu_opt->seqno, 0); + return error; + } + + /* Verify that the home address is an on-link IPv6 + address and that the prefix length is correct. */ + res = mip6_addr_on_link(&mip6_inp->ha_opt->home_addr, + mip6_inp->bu_opt->prefix_len); + if ((res != 0) && + (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG)) { + error = mip6_send_ba( + &mip6_inp->ip6_dst, + &mip6_inp->ha_opt->home_addr, + coa, NULL, res, + mip6_inp->bu_opt->seqno, 0); + return error; + } + + /* Other reject reasons may be added, e.g. + insufficient resources to serve a MN. */ + /* XXX Code may be added. */ + + /* The BU is OK and this node becomes the HA for + the MN. Find out which lifetime to use in the BA */ + min_time = mip6_min_lifetime( + &mip6_inp->ha_opt->home_addr, + mip6_inp->bu_opt->prefix_len); + min_time = min(min_time, + mip6_inp->bu_opt->lifetime); + + /* Create a new or update an existing BC entry. */ + rtr = mip6_inp->bu_opt->flags & MIP6_BU_RFLAG; + bcp = mip6_bc_find(&mip6_inp->ha_opt->home_addr); + if (bcp) + mip6_bc_update(bcp, coa, min_time, 1, rtr, + mip6_inp->bu_opt->prefix_len, + mip6_inp->bu_opt->seqno, + bcp->info, bcp->lasttime); + else { + bcp = mip6_bc_create( + &mip6_inp->ha_opt->home_addr, + coa, min_time, 1, rtr, + mip6_inp->bu_opt->prefix_len, + mip6_inp->bu_opt->seqno); + if (bcp == NULL) + return IPPROTO_DONE; + } + + /* Send a BA to the mobile node if the A-bit is + set in the BU. */ + if (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG) { + error = mip6_send_ba(&mip6_inp->ip6_dst, + &bcp->home_addr, + &bcp->coa, + NULL, + MIP6_BA_STATUS_ACCEPT, + bcp->seqno, + bcp->lifetime); + if (error) + return error; + } + + /* The HA shall act as a proxy for the MN while it + is at a FN. Create a new or move an existing + tunnel to the MN. */ + error = mip6_tunnel(&mip6_inp->ip6_dst, + &bcp->coa, + MIP6_TUNNEL_MOVE, MIP6_NODE_HA, + (void *)bcp); + if (error) + return IPPROTO_DONE; + error = mip6_proxy(&bcp->home_addr, + &mip6_inp->ip6_dst, RTM_ADD); + if (error) { +#if MIP6_DEBUG + mip6_debug("%s: set proxy error = %d\n", + __FUNCTION__, error); +#endif + return IPPROTO_DONE; + } + + /* Create a NA for the MN if the HA did not already + have a BC entry for this MN marked as a "home + registration". + The first NA will be sent in the create function, + the remaining NAs are sent by the timer function. */ + if (send_na) { + ll_allnode = in6addr_linklocal_allnodes; + na_flags |= ND_NA_FLAG_OVERRIDE; + if (mip6_inp->bu_opt->flags & MIP6_BU_RFLAG) + na_flags |= ND_NA_FLAG_ROUTER; + + mip6_na_create(&mip6_inp->ha_opt->home_addr, + &ll_allnode, + &mip6_inp->ha_opt->home_addr, + mip6_inp->bu_opt->prefix_len, + na_flags, 1); + } + } else { + /* The H-bit is NOT set. Request to cache a binding. + Create a new or update an existing BC entry. */ + rtr = mip6_inp->bu_opt->flags & MIP6_BU_RFLAG; + bcp = mip6_bc_find(&mip6_inp->ha_opt->home_addr); + if (bcp) + mip6_bc_update(bcp, coa, + mip6_inp->bu_opt->lifetime, + 0, rtr, + mip6_inp->bu_opt->prefix_len, + mip6_inp->bu_opt->seqno, + bcp->info, bcp->lasttime); + else { + bcp = mip6_bc_create( + &mip6_inp->ha_opt->home_addr, + coa, mip6_inp->bu_opt->lifetime, + 0, rtr, mip6_inp->bu_opt->prefix_len, + mip6_inp->bu_opt->seqno); + if (bcp == NULL) + return IPPROTO_DONE; + } + + /* Send a BA to the mobile node if the A-bit is + set in the BU. */ + if (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG) { + error = mip6_send_ba(&mip6_inp->ip6_dst, + &bcp->home_addr, + &bcp->coa, NULL, + MIP6_BA_STATUS_ACCEPT, + bcp->seqno, + bcp->lifetime); + return error; + } + } + return 0; + } + + /* Check if this is a request to delete a binding for the MN. */ + if ((mip6_inp->bu_opt->lifetime == 0) || + (IN6_ARE_ADDR_EQUAL(&mip6_inp->ha_opt->home_addr, coa))) { + /* The request to delete the binding depends on if the + H-bit is set or not in the BU. */ + if (mip6_inp->bu_opt->flags & MIP6_BU_HFLAG) { + /* The H-bit is set. Make sure that there is an + entry in the BC marked as "home registration" + for this MN. */ + error = 0; + if (((bcp == NULL) || (bcp->hr_flag == 0)) && + (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG)) { + error = mip6_send_ba( + &mip6_inp->ip6_dst, + &mip6_inp->ha_opt->home_addr, + coa, NULL, MIP6_BA_STATUS_NOTHA, + mip6_inp->bu_opt->seqno, 0); + return error; + } + + /* The HA should delete BC entry, remove tunnel and + stop acting as a proxy for the MN. */ + error = mip6_bc_delete(bcp, &bcp_nxt); + if (error) + return IPPROTO_DONE; + + /* Send a BA to the MN if the A-bit is set. */ + if (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG) { + error = mip6_send_ba( + &mip6_inp->ip6_dst, + &mip6_inp->ha_opt->home_addr, + coa, NULL, MIP6_BA_STATUS_ACCEPT, + mip6_inp->bu_opt->seqno, 0); + if (error) + return error; + } + } else { + /* The H-bit is NOT set. Request the CN to delete + the binding. */ + if (bcp != NULL) { + error = mip6_bc_delete(bcp, &bcp_nxt); + if (error) + return IPPROTO_DONE; + } + + if (mip6_inp->bu_opt->flags & MIP6_BU_AFLAG) { + error = mip6_send_ba( + &mip6_inp->ip6_dst, + &mip6_inp->ha_opt->home_addr, + coa, NULL, MIP6_BA_STATUS_ACCEPT, + mip6_inp->bu_opt->seqno, 0); + if (error) + return error; + } + } + return 0; + } + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_ha2srcaddr + * Description: Copy Home Address option to IPv6 header source address, i.e + * replacing the existing source address. + ****************************************************************************** + */ +void +mip6_ha2srcaddr(m) +struct mbuf *m; /* The entire IPv6 packet */ +{ + register struct ip6_hdr *ip6; /* IPv6 header */ + +#if MIP6_DEBUG + mip6_debug("\nReceived Home Address Option\n"); + mip6_debug("Type/Length: %x / %u\n", mip6_inp->ha_opt->type, + mip6_inp->ha_opt->len); + mip6_debug("Home Address: %s\n", + ip6_sprintf(&mip6_inp->ha_opt->home_addr)); +#endif + + /* Copy the Home Address option address to the Source Address */ + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_src = mip6_inp->ha_opt->home_addr; +} + + + +/* + ############################################################################## + # + # SENDING FUNCTIONS + # These functions are called when an IPv6 packet has been created internally + # by MIPv6 and shall be sent directly to its destination or when an option + # (BU, BA, BR) has been created and shall be stored in the mipv6 output queue + # for piggybacking on the first outgoing packet sent to the node. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_send_ba + * Description: Send a Binding Acknowledgement back to the Mobile Node. A new + * IPv6 packet is built including a IPv6 header, a Routing header + * and a Destination header (where the BA is stored). + * Ret value: 0 OK + * IPPROTO_DONE If anything goes wrong. + ****************************************************************************** + */ +int +mip6_send_ba(ip6_src, ip6_dst, coa, subbuf, status, seqno, lifetime) +struct in6_addr *ip6_src; /* Source address for packet */ +struct in6_addr *ip6_dst; /* Destination address for packet */ +struct in6_addr *coa; /* Care-of address for MN */ +struct mip6_subbuf *subbuf; /* Home Agents List sub-option */ +u_int8_t status; /* Result of the Binding Update request */ +u_int16_t seqno; /* Seq no in the BU being acknowledged */ +u_int32_t lifetime; /* Proposed lifetime in the BU */ +{ + struct mbuf *m_ip6; /* IPv6 header stored in a mbuf */ + struct mip6_opt_ba *ba_opt; /* BA allocated in this function */ + struct ip6_pktopts *opt; /* Options for IPv6 packet */ + int error; +#if MIP6_DEBUG + u_int8_t var; + int ii; +#endif + + opt = (struct ip6_pktopts *)MALLOC ip6_pktopts), + M_TEMP, M_WAITOK); + if (opt == NULL) + return IPPROTO_DONE; + bzero(opt, sizeof(struct ip6_pktopts)); + + opt->ip6po_hlim = -1; /* -1 means to use default hop limit */ + m_ip6 = mip6_create_ip6hdr(ip6_src, ip6_dst, IPPROTO_NONE); + if(m_ip6 == NULL) + return IPPROTO_DONE; + + opt->ip6po_rhinfo.ip6po_rhi_rthdr = mip6_create_rh(coa, + IPPROTO_DSTOPTS); + if(opt->ip6po_rhinfo.ip6po_rhi_rthdr == NULL) + return IPPROTO_DONE; + + ba_opt = mip6_create_ba(status, seqno, lifetime); + if (ba_opt == NULL) + return IPPROTO_DONE; + + opt->ip6po_dest2 = mip6_create_dh((void *)ba_opt, subbuf, + IPPROTO_NONE); + if(opt->ip6po_dest2 == NULL) + return IPPROTO_DONE; + + mip6_config.enable_outq = 0; + error = ip6_output(m_ip6, opt, NULL, 0, NULL, NULL); + if (error) { + _FREE(opt->ip6po_rhinfo.ip6po_rhi_rthdr, M_TEMP); + _FREE(opt->ip6po_dest2, M_TEMP); + _FREE(ba_opt, M_TEMP); + mip6_config.enable_outq = 1; + log(LOG_ERR, + "%s: ip6_output function failed to send BA, error = %d\n", + __FUNCTION__, error); + return error; + } + mip6_config.enable_outq = 1; + +#if MIP6_DEBUG + mip6_debug("\nSent Binding Acknowledgement\n"); + mip6_debug("IP Header Src: %s\n", ip6_sprintf(ip6_src)); + mip6_debug("IP Header Dst: %s\n", ip6_sprintf(ip6_dst)); + mip6_debug("Type/Length/Status: %x / %u / %u\n", + ba_opt->type, ba_opt->len, ba_opt->status); + mip6_debug("Seq no/Life time: %u / %u\n", + ba_opt->seqno, ba_opt->lifetime); + mip6_debug("Refresh time: %u\n", ba_opt->refresh); + + if (subbuf) { + mip6_debug("Sub-options present (TLV coded)\n"); + for (ii = 0; ii < subbuf->len; ii++) { + if (ii % 16 == 0) + mip6_debug("\t0x:"); + if (ii % 4 == 0) + mip6_debug(" "); + bcopy((caddr_t)&subbuf->buffer[ii], (caddr_t)&var, 1); + mip6_debug("%02x", var); + if ((ii + 1) % 16 == 0) + mip6_debug("\n"); + } + if (ii % 16) + mip6_debug("\n"); + } +#endif + + _FREE(opt->ip6po_rhinfo.ip6po_rhi_rthdr, M_TEMP); + _FREE(opt->ip6po_dest2, M_TEMP); + _FREE(ba_opt, M_TEMP); + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_send_na + * Description: Sends a Neighbor Advertisement for a specific prefix. If the + * address is a aggregatable unicast address, i.e. prefix length + * is 64, a NA is sent to the site local and link local addresse + * as well. + * Ret value: - + ****************************************************************************** + */ +void +mip6_send_na(nap) +struct mip6_na *nap; /* Neighbor Advertisement sent */ +{ + struct mip6_prefix *pq; + struct nd_prefix *pr; /* Prefix list entry */ + struct in6_addr new_addr; /* New constructed address */ + struct in6_addr sl_addr; /* Site local address */ + + nap->no -= 1; + +#if MIP6_DEBUG + mip6_debug("\nSent Neighbor Advertisement (0x%x)\n", nap); +#endif + + /* Send NA for specified address if length equal to 0, otherwise for + each prefix with the same length as the address. + Different prefix list is used for HA and MN. */ + if (nap->prefix_len == 0) { + nd6_na_output(nap->ifp, &nap->dst_addr, &nap->target_addr, + nap->flags, nap->use_link_opt, NULL); +#if MIP6_DEBUG + mip6_debug("Target Address: %s\n", + ip6_sprintf(&nap->target_addr)); +#endif + } + + if ((MIP6_IS_HA_ACTIVE) && (nap->prefix_len != 0)) { + for (pq = mip6_pq; pq; pq = pq->next) { + if ((nap->prefix_len == pq->prefix_len) && + in6_are_prefix_equal(&pq->prefix, + &nap->target_addr, + pq->prefix_len)) { + mip6_build_in6addr(&new_addr, + &nap->target_addr, + &pq->prefix, + pq->prefix_len); + nd6_na_output(nap->ifp, &nap->dst_addr, + &new_addr, nap->flags, + nap->use_link_opt, NULL); +#if MIP6_DEBUG + mip6_debug("Target Address: %s\n", + ip6_sprintf(&new_addr)); +#endif + } else + continue; + + if (nap->prefix_len == 64) { + /* NA for the site-local address is + only sent if length equals to 64. */ + bcopy((caddr_t)&in6addr_sitelocal, + (caddr_t)&sl_addr, 6); + bcopy((caddr_t)&nap->target_addr + 6, + (caddr_t)&sl_addr + 6, 2); + mip6_build_in6addr(&new_addr, + &nap->target_addr, + &sl_addr, + nap->prefix_len); + nd6_na_output(nap->ifp, + &nap->dst_addr, + &new_addr, + nap->flags, + nap->use_link_opt, NULL); +#if MIP6_DEBUG + mip6_debug("Target Address: %s\n", + ip6_sprintf(&new_addr)); +#endif + + /* NA for the link-local address is + only sent if length equals to 64. */ + mip6_build_in6addr(&new_addr, + &nap->target_addr, + &in6addr_linklocal, + nap->prefix_len); + nd6_na_output(nap->ifp, + &nap->dst_addr, + &new_addr, + nap->flags, + nap->use_link_opt, NULL); +#if MIP6_DEBUG + mip6_debug("Target Address: %s\n", + ip6_sprintf(&new_addr)); +#endif + } + } + } else { + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if ((nap->prefix_len == pr->ndpr_plen) && + in6_are_prefix_equal(&nap->target_addr, + &pr->ndpr_addr, + pr->ndpr_plen)) { + mip6_build_in6addr( + &new_addr, + &nap->target_addr, + &pr->ndpr_prefix.sin6_addr, + pr->ndpr_plen); + nd6_na_output(nap->ifp, + &nap->dst_addr, + &new_addr, + nap->flags, + nap->use_link_opt, NULL); +#if MIP6_DEBUG + mip6_debug("Target Address: %s\n", + ip6_sprintf(&new_addr)); +#endif + } else + continue; + + if (nap->prefix_len == 64) { + /* NA for the site-local address is + only sent if length equals to 64. */ + bcopy((caddr_t)&in6addr_sitelocal, + (caddr_t)&sl_addr, 6); + bcopy((caddr_t)&nap->target_addr + 6, + (caddr_t)&sl_addr + 6, 2); + mip6_build_in6addr(&new_addr, + &nap->target_addr, + &sl_addr, + nap->prefix_len); + nd6_na_output(nap->ifp, + &nap->dst_addr, + &new_addr, + nap->flags, + nap->use_link_opt, NULL); +#if MIP6_DEBUG + mip6_debug("Target Address: %s\n", + ip6_sprintf(&new_addr)); +#endif + + /* NA for the link-local address is + only sent if length equals to 64. */ + mip6_build_in6addr(&new_addr, + &nap->target_addr, + &in6addr_linklocal, + nap->prefix_len); + nd6_na_output(nap->ifp, + &nap->dst_addr, + &new_addr, + nap->flags, + nap->use_link_opt, NULL); +#if MIP6_DEBUG + mip6_debug("Target Address: %s\n", + ip6_sprintf(&new_addr)); +#endif + } + } + } + return; +} + + + +/* + ############################################################################## + # + # UTILITY FUNCTIONS + # Miscellaneous functions needed for the internal processing of incoming and + # outgoing control signals. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_create_ip6hdr + * Description: Create and fill in data for an IPv6 header to be used by + * packets originating from MIPv6. + * Ret value: NULL if a IPv6 header could not be created. + * Otherwise, pointer to a mbuf including the IPv6 header. + ****************************************************************************** + */ +struct mbuf * +mip6_create_ip6hdr(ip6_src, ip6_dst, next) +struct in6_addr *ip6_src; /* Source address for packet */ +struct in6_addr *ip6_dst; /* Destination address for packet */ +u_int8_t next; /* Next header following the IPv6 header */ +{ + struct ip6_hdr *ip6; /* IPv6 header */ + struct mbuf *m; /* Ptr to mbuf allocated for output data */ + + /* Allocate memory for the IPv6 header and fill it with data */ + ip6 = (struct ip6_hdr *)MALLOC ip6_hdr), + M_TEMP, M_WAITOK); + if (ip6 == NULL) + return NULL; + bzero(ip6, sizeof(struct ip6_hdr)); + + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_plen = 0; + ip6->ip6_nxt = next; + ip6->ip6_hlim = IPV6_DEFHLIM; + + ip6->ip6_src = *ip6_src; + ip6->ip6_dst = *ip6_dst; + + /* Allocate memory for mbuf and copy IPv6 header to mbuf. */ + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) { + return NULL; + } + + m->m_len = sizeof(*ip6); + m->m_pkthdr.len = m->m_len; + m->m_pkthdr.rcvif = NULL; + bcopy((caddr_t)ip6, mtod(m, caddr_t), sizeof(*ip6)); + _FREE(ip6, M_TEMP); + return m; +} + + + +/* + ****************************************************************************** + * Function: mip6_create_rh + * Description: Create a routing header of type 0 and add the COA for the MN. + * Ret value: A pointer to the ip6_rthdr structure if everything is OK. + * Otherwise NULL. + ****************************************************************************** + */ +struct ip6_rthdr * +mip6_create_rh(coa, next) +struct in6_addr *coa; /* Care-of address for the MN */ +u_int8_t next; /* Next header following the routing header */ +{ + struct ip6_rthdr0 *rthdr0; /* Routing header type 0 */ + int len; + + len = sizeof(struct ip6_rthdr0) + sizeof(struct in6_addr); + rthdr0 = (struct ip6_rthdr0 *)MALLOC M_TEMP, M_WAITOK); + if (rthdr0 == NULL) + return NULL; + bzero(rthdr0, len); + + rthdr0->ip6r0_nxt = next; + rthdr0->ip6r0_len = 2; + rthdr0->ip6r0_type = 0; + rthdr0->ip6r0_segleft = 1; + rthdr0->ip6r0_reserved = 0; + bcopy((caddr_t)coa, (caddr_t)rthdr0 + sizeof(struct ip6_rthdr0), + sizeof(struct in6_addr)); + return (struct ip6_rthdr *)rthdr0; +} + + + +/* + ****************************************************************************** + * Function: mip6_create_ba + * Description: Create a Binding Acknowledgement option for transmission. + * Ret value: NULL if a BA option could not be created. + * Otherwise, pointer to the BA option. + ****************************************************************************** + */ +struct mip6_opt_ba * +mip6_create_ba(status, seqno, lifetime) +u_int8_t status; /* Result of the Binding Update request */ +u_int16_t seqno; /* Sequence number in the BU being acknowledged */ +u_int32_t lifetime; /* Proposed lifetime in the BU */ +{ + struct mip6_opt_ba *ba_opt; /* BA allocated in this function */ + + /* Allocate a Binding Aknowledgement option and set values */ + ba_opt = (struct mip6_opt_ba *)MALLOC mip6_opt_ba), + M_TEMP, M_WAITOK); + if (ba_opt == NULL) + return NULL; + bzero(ba_opt, sizeof(struct mip6_opt_ba)); + + ba_opt->type = IP6OPT_BINDING_ACK; + ba_opt->len = IP6OPT_BALEN; + ba_opt->status = status; + ba_opt->seqno = seqno; + ba_opt->lifetime = lifetime; + + /* Calculate value for refresh time */ + if (MIP6_IS_HA_ACTIVE) + ba_opt->refresh = (ba_opt->lifetime * 8) / 10; + else + ba_opt->refresh = ba_opt->lifetime; + + return ba_opt; +} + + + +/* + ****************************************************************************** + * Function: mip6_create_dh + * Description: Create a destination header and add either a BA or BU option. + * Ret value: A pointer to the ip6_dest structure if everything is OK. + * Otherwise NULL. + ****************************************************************************** + */ +struct ip6_dest * +mip6_create_dh(arg_opt, arg_sub, next) +void *arg_opt; /* BU or a BA option */ +struct mip6_subbuf *arg_sub; /* BU or BA sub-option (NULL if not present) */ +u_int8_t next; /* Next header following the dest header */ +{ + struct mip6_opt *opt; /* Destination option */ + struct ip6_dest *dest; /* Destination header */ + int off; /* Offset from start of Dest Header (byte) */ + int error; /* Error code from function call */ + + opt = (struct mip6_opt *)arg_opt; + dest = NULL; + if (opt->type == IP6OPT_BINDING_ACK) { + off = 3; + error = mip6_add_ba(&dest, &off, + (struct mip6_opt_ba *)opt, arg_sub); + if (error) { + if (dest != NULL) + _FREE(dest, M_TEMP); + return NULL; + } + dest->ip6d_nxt = next; + } else if (opt->type == IP6OPT_BINDING_UPDATE) { + off = 2; + error = mip6_add_bu(&dest, &off, + (struct mip6_opt_bu *)opt, arg_sub); + if (error) { + if (dest != NULL) + _FREE(dest, M_TEMP); + return NULL; + } + dest->ip6d_nxt = next; + } + return dest; +} + + + +/* + ****************************************************************************** + * Function: mip6_opt_offset + * Description: Find offset for BU, BA or BR option in the Destination Header. + * The option type is specified as input parameter and the offset + * to start of the first option of the specified type is returned. + * Ret value: Offset (bytes) to specified option from beginning of m_in. + * If no option is found a length of 0 is returned indicating an + * error. + ****************************************************************************** + */ +int +mip6_opt_offset(m_in, off, type) +struct mbuf *m_in; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset from start of mbuf to start of dest option */ +int type; /* Type of option to look for */ +{ + int ii; /* Internal counter */ + u_int8_t opttype; /* Option type found in Destination Header*/ + u_int8_t optlen; /* Option length incl type and length */ + u_int32_t len; /* Length of Destination Header in bytes */ + u_int8_t len8; /* Length of Destination Header in bytes */ + u_int32_t offset; /* Offset to BU option from beginning of m_in */ + + m_copydata(m_in, off + 1, sizeof(len8), (caddr_t)&len8); + len = (len8 + 1) << 3; + + offset = 0; + for (ii = 2; ii < len;) { + m_copydata(m_in, off + ii, sizeof(opttype), (caddr_t)&opttype); + if (opttype == type) { + offset = off + ii; + break; + } else if (opttype == IP6OPT_PAD1) { + ii += 1; + continue; + } else { + ii += 1; + } + + m_copydata(m_in, off + ii, sizeof(optlen), (caddr_t)&optlen); + ii += 1 + optlen; + } + return offset; +} + + + +/* + ****************************************************************************** + * Function: mip6_addr_on_link + * Description: Check if an address is an on-link IPv6 address with respect to + * the home agent's current prefix list. + * Ret value: 0 = OK + * 133 = Not home subnet + * 136 = Incorrect interface identifier length + ****************************************************************************** + */ +int +mip6_addr_on_link(addr, prefix_len) +struct in6_addr *addr; /* IPv6 address to check */ +int prefix_len; /* Prefix length for the address */ +{ + struct mip6_prefix *pr; /* Pointer to entries in the prexix list */ + + for (pr = mip6_pq; pr; pr = pr->next) { + /* Check if the IPv6 prefixes are equal, i.e. of the same + IPv6 type of address. */ + /* If they are, verify that the prefix length is correct. */ + if (in6_are_prefix_equal(addr, &pr->prefix, pr->prefix_len)) { + if (prefix_len == 0) + return 0; + + if (pr->prefix_len == prefix_len) + return 0; + else + return MIP6_BA_STATUS_IFLEN; + } + } + return MIP6_BA_STATUS_SUBNET; +} + + + +/* + ****************************************************************************** + * Function: mip6_min_lifetime + * Description: Decide the remaining valid lifetime for a home address. If the + * prefix length is zero the lifetime is the lifetime of the + * prefix list entry for this prefix. + * If the prefix length is non-zero the lifetime is the minimum + * remaining valid lifetime for all subnet prefixes on the mobile + * node's home link. + * Note: This function is only used by the Home Agent. + * Ret value: Lifetime + ****************************************************************************** + */ +u_int32_t +mip6_min_lifetime(addr, prefix_len) +struct in6_addr *addr; /* IPv6 address to check */ +int prefix_len; /* Prefix length for the address */ +{ + struct mip6_prefix *pr; /* Ptr to entries in the prexix list */ + u_int32_t min_time; /* Minimum life time */ + + min_time = 0xffffffff; + + for (pr = mip6_pq; pr; pr = pr->next) { + /* Different handling depending on the prefix length. */ + if (prefix_len == 0) { + if (in6_are_prefix_equal(addr, &pr->prefix, + pr->prefix_len)) { + return pr->valid_time; + } + } else + min_time = min(min_time, pr->valid_time); + } + return min_time; +} + + + +/* + ****************************************************************************** + * Function: mip6_build_in6addr + * Description: Build an in6 address from a prefix and the interface id. + * The length of the different parts is decided by prefix_len. + * Ret value: - + ****************************************************************************** + */ +void +mip6_build_in6addr(new_addr, id, prefix, prefix_len) +struct in6_addr *new_addr; /* New address built in this function */ +struct in6_addr *id; /* Interface id part of the address */ +const struct in6_addr *prefix; /* Prefix part of the address */ +int prefix_len; /* Prefix length (bits) */ +{ + u_int8_t byte_pr, byte_id; + int ii, jj; + + for (ii = 0; ii < prefix_len / 8; ii++) + new_addr->s6_addr8[ii] = prefix->s6_addr8[ii]; + + if (prefix_len % 8) { + /* Add the last bits of the prefix to the common byte. */ + byte_pr = prefix->s6_addr8[ii]; + byte_pr = byte_pr >> (8 - (prefix_len % 8)); + byte_pr = byte_pr << (8 - (prefix_len % 8)); + + /* Then, add the first bits of the interface id to the + common byte. */ + byte_id = id->s6_addr8[ii]; + byte_id = byte_id << (prefix_len % 8); + byte_id = byte_id >> (prefix_len % 8); + new_addr->s6_addr8[ii] = byte_pr | byte_id; + ii += 1; + } + + for (jj = ii; jj < 16; jj++) + new_addr->s6_addr8[jj] = id->s6_addr8[jj]; +} + + + +/* + ****************************************************************************** + * Function: mip6_build_ha_anycast + * Description: Build an mobile IPv6 Home-Agents anycast address from a prefix + * and the prefix length. The interface id is according to + * RFC2526. + * Ret value: - + ****************************************************************************** + */ +void +mip6_build_ha_anycast(new_addr, prefix, prefix_len) +struct in6_addr *new_addr; /* New address built in this function */ +const struct in6_addr *prefix; /* Prefix part of the address */ +int prefix_len; /* Prefix length (bits) */ +{ + struct in6_addr addr; + + + if (prefix->s6_addr8[0] == 0xff) { + *new_addr = in6addr_any; + return; + } + + if (((prefix->s6_addr8[0] & 0xe0) != 0) && (prefix_len != 64)) { + *new_addr = in6addr_any; + return; + } + + if (((prefix->s6_addr8[0] & 0xe0) != 0) && (prefix_len == 64)) + addr = in6addr_aha_64; + else + addr = in6addr_aha_nn; + + mip6_build_in6addr(new_addr, &addr, prefix, prefix_len); +} + + + +/* + ****************************************************************************** + * Function: mip6_add_ifaddr + * Description: Similar to "ifconfig prefixlen ". + * Ret value: Standard error codes. + ****************************************************************************** + */ +int +mip6_add_ifaddr(struct in6_addr *addr, + struct ifnet *ifp, + int plen, + int flags) /* Note: IN6_IFF_NODAD available flag */ +{ + struct in6_aliasreq *ifra, dummy; + struct sockaddr_in6 *sa6; + struct sockaddr_in6 oldaddr; + struct in6_ifaddr *ia, *oia; + struct in6_addrlifetime *lt; + int error = 0, hostIsNew, prefixIsNew; + int s; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + struct ifaddr *ifa; +#endif +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) + time_t time_second = (time_t)time.tv_sec; +#endif + + bzero(&dummy, sizeof(dummy)); + ifra = &dummy; + + ifra->ifra_addr.sin6_len = sizeof(ifra->ifra_addr); + ifra->ifra_addr.sin6_family = AF_INET6; + ifra->ifra_addr.sin6_addr = *addr; + + if (plen != 0) { + ifra->ifra_prefixmask.sin6_len = + sizeof(ifra->ifra_prefixmask); + ifra->ifra_prefixmask.sin6_family = AF_INET6; + in6_prefixlen2mask(&ifra->ifra_prefixmask.sin6_addr, plen); + /* XXXYYY Should the prefix also change its prefixmask? */ + } + + ifra->ifra_flags = flags; + ifra->ifra_lifetime.ia6t_vltime = ND6_INFINITE_LIFETIME; + ifra->ifra_lifetime.ia6t_pltime = ND6_INFINITE_LIFETIME; + + sa6 = &ifra->ifra_addr; + + /* "ifconfig ifp inet6 Home_Address prefixlen 64/128 (alias?)" */ + if (ifp == 0) + return EOPNOTSUPP; + + s = splnet(); + + /* + * Code recycled from in6_control(). + */ + + /* + * Find address for this interface, if it exists. + */ + if (IN6_IS_ADDR_LINKLOCAL(&sa6->sin6_addr)) { + if (sa6->sin6_addr.s6_addr16[1] == 0) { + /* interface ID is not embedded by the user */ + sa6->sin6_addr.s6_addr16[1] = + htons(ifp->if_index); + } + else if (sa6->sin6_addr.s6_addr16[1] != + htons(ifp->if_index)) { + splx(s); + return(EINVAL); /* ifid is contradict */ + } + if (sa6->sin6_scope_id) { + if (sa6->sin6_scope_id != + (u_int32_t)ifp->if_index) { + splx(s); + return(EINVAL); + } + sa6->sin6_scope_id = 0; /* XXX: good way? */ + } + } + ia = in6ifa_ifpwithaddr(ifp, &sa6->sin6_addr); + + if (ia == 0) { + ia = (struct in6_ifaddr *) + MALLOC M_IFADDR, M_WAITOK); + if (ia == NULL) { + splx(s); + return (ENOBUFS); + } + bzero((caddr_t)ia, sizeof(*ia)); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + ia->ia_ifa.ifa_dstaddr + = (struct sockaddr *)&ia->ia_dstaddr; + ia->ia_ifa.ifa_netmask + = (struct sockaddr *)&ia->ia_prefixmask; + + ia->ia_ifp = ifp; + if ((oia = in6_ifaddr) != NULL) { + for ( ; oia->ia_next; oia = oia->ia_next) + continue; + oia->ia_next = ia; + } else + in6_ifaddr = ia; + ia->ia_ifa.ifa_refcnt++; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + if ((ifa = ifp->if_addrlist) != NULL) { + for ( ; ifa->ifa_next; ifa = ifa->ifa_next) + continue; + ifa->ifa_next = &ia->ia_ifa; + } else + ifp->if_addrlist = &ia->ia_ifa; +#else + TAILQ_INSERT_TAIL(&ifp->if_addrlist, &ia->ia_ifa, + ifa_list); +#endif + ia->ia_ifa.ifa_refcnt++; + } + + /* sanity for overflow - beware unsigned */ + lt = &ifra->ifra_lifetime; + if (lt->ia6t_vltime != ND6_INFINITE_LIFETIME + && lt->ia6t_vltime + time_second < time_second) { + splx(s); + return EINVAL; + } + if (lt->ia6t_pltime != ND6_INFINITE_LIFETIME + && lt->ia6t_pltime + time_second < time_second) { + splx(s); + return EINVAL; + } + prefixIsNew = 0; + hostIsNew = 1; + + if (ifra->ifra_addr.sin6_len == 0) { + ifra->ifra_addr = ia->ia_addr; + hostIsNew = 0; + } else if (IN6_ARE_ADDR_EQUAL(&ifra->ifra_addr.sin6_addr, + &ia->ia_addr.sin6_addr)) + hostIsNew = 0; + + if (ifra->ifra_prefixmask.sin6_len) { + in6_ifscrub(ifp, ia); + ia->ia_prefixmask = ifra->ifra_prefixmask; + prefixIsNew = 1; + } + if ((ifp->if_flags & IFF_POINTOPOINT) && + (ifra->ifra_dstaddr.sin6_family == AF_INET6)) { + in6_ifscrub(ifp, ia); + oldaddr = ia->ia_dstaddr; + ia->ia_dstaddr = ifra->ifra_dstaddr; + /* link-local index check: should be a separate function? */ + if (IN6_IS_ADDR_LINKLOCAL(&ia->ia_dstaddr.sin6_addr)) { + if (ia->ia_dstaddr.sin6_addr.s6_addr16[1] == 0) { + /* + * interface ID is not embedded by + * the user + */ + ia->ia_dstaddr.sin6_addr.s6_addr16[1] + = htons(ifp->if_index); + } else if (ia->ia_dstaddr.sin6_addr.s6_addr16[1] != + htons(ifp->if_index)) { + ia->ia_dstaddr = oldaddr; + splx(s); + return(EINVAL); /* ifid is contradict */ + } + } + prefixIsNew = 1; /* We lie; but effect's the same */ + } + if (ifra->ifra_addr.sin6_family == AF_INET6 && + (hostIsNew || prefixIsNew)) + { + error = in6_ifinit(ifp, ia, &ifra->ifra_addr, 0); + } + if (ifra->ifra_addr.sin6_family == AF_INET6 + && hostIsNew && (ifp->if_flags & IFF_MULTICAST)) { + int error_local = 0; + + /* + * join solicited multicast addr for new host id + */ + struct in6_addr llsol; + bzero(&llsol, sizeof(struct in6_addr)); + llsol.s6_addr16[0] = htons(0xff02); + llsol.s6_addr16[1] = htons(ifp->if_index); + llsol.s6_addr32[1] = 0; + llsol.s6_addr32[2] = htonl(1); + llsol.s6_addr32[3] = + ifra->ifra_addr.sin6_addr.s6_addr32[3]; + llsol.s6_addr8[12] = 0xff; + (void)in6_addmulti(&llsol, ifp, &error_local); + if (error == 0) + error = error_local; + } + + ia->ia6_flags = ifra->ifra_flags; + ia->ia6_flags &= ~IN6_IFF_DUPLICATED; /*safety*/ + ia->ia6_flags &= ~IN6_IFF_NODAD; /* Mobile IPv6 */ + + ia->ia6_lifetime = ifra->ifra_lifetime; + /* for sanity */ + if (ia->ia6_lifetime.ia6t_vltime != ND6_INFINITE_LIFETIME) { + ia->ia6_lifetime.ia6t_expire = + time_second + ia->ia6_lifetime.ia6t_vltime; + } else + ia->ia6_lifetime.ia6t_expire = 0; + if (ia->ia6_lifetime.ia6t_pltime != ND6_INFINITE_LIFETIME) { + ia->ia6_lifetime.ia6t_preferred = + time_second + ia->ia6_lifetime.ia6t_pltime; + } else + ia->ia6_lifetime.ia6t_preferred = 0; + + /* + * Perform DAD, if needed. + * XXX It may be of use, if we can administratively + * disable DAD. + */ + switch (ifp->if_type) { + case IFT_ARCNET: + case IFT_ETHER: + case IFT_FDDI: +#if 0 + case IFT_ATM: + case IFT_SLIP: + case IFT_PPP: +#endif + /* Mobile IPv6 modification */ + if ((ifra->ifra_flags & IN6_IFF_NODAD) == 0) { + ia->ia6_flags |= IN6_IFF_TENTATIVE; + nd6_dad_start((struct ifaddr *)ia, NULL); + } + break; + case IFT_DUMMY: + case IFT_FAITH: + case IFT_GIF: + case IFT_LOOP: + default: + break; + } + + if (hostIsNew) { + int iilen; + int error_local = 0; + + iilen = (sizeof(ia->ia_prefixmask.sin6_addr) << 3) - + in6_mask2len(&ia->ia_prefixmask.sin6_addr); + error_local = in6_prefix_add_ifid(iilen, ia); + if (error == 0) + error = error_local; + } + + splx(s); + return error; + + +} + + + +/* + ****************************************************************************** + * Function: mip6_tunnel_output + * Description: Encapsulates packet in an outer header which is determined + * of the Binding Cache entry provided. Note that packet is + * (currently) not sent here, but should be sent by the caller. + * Ret value: != 0 if failure. It's up to the caller to free the mbuf chain. + ****************************************************************************** + */ +int +mip6_tunnel_output(mp, bc) + struct mbuf **mp; + struct mip6_bc *bc; +{ + struct sockaddr_in6 dst; + const struct encaptab *ep = bc->ep; + struct mbuf *m = *mp; + struct sockaddr_in6 *sin6_src = (struct sockaddr_in6 *)&ep->src; + struct sockaddr_in6 *sin6_dst = (struct sockaddr_in6 *)&ep->dst; + struct ip6_hdr *ip6; + u_int8_t itos; + int len; + + bzero(&dst, sizeof(dst)); + dst.sin6_len = sizeof(struct sockaddr_in6); + dst.sin6_family = AF_INET6; + dst.sin6_addr = bc->coa; + + if (ep->af != AF_INET6 || ep->dst.ss_len != dst.sin6_len || + bcmp(&ep->dst, &dst, dst.sin6_len) != 0 ) + return EFAULT; + + /* Recursion problems? */ + + if (IN6_IS_ADDR_UNSPECIFIED(&sin6_src->sin6_addr)) { + return EFAULT; + } + + len = m->m_pkthdr.len; + + if (m->m_len < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) + return ENOBUFS; + } + ip6 = mtod(m, struct ip6_hdr *); + itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; + + + /* prepend new IP header */ + M_PREPEND(m, sizeof(struct ip6_hdr), M_DONTWAIT); + if (m && m->m_len < sizeof(struct ip6_hdr)) + m = m_pullup(m, sizeof(struct ip6_hdr)); + if (m == NULL) { +#if MIP6_DEBUG + printf("ENOBUFS in mip6_tunnel_output %d\n", __LINE__); +#endif + return ENOBUFS; + } + + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_plen = htons((u_short)len); + ip6->ip6_nxt = IPPROTO_IPV6; + ip6->ip6_hlim = ip6_gif_hlim; /* Same? */ + ip6->ip6_src = sin6_src->sin6_addr; + + /* bidirectional configured tunnel mode */ + if (!IN6_IS_ADDR_UNSPECIFIED(&sin6_dst->sin6_addr)) + ip6->ip6_dst = sin6_dst->sin6_addr; + else + return ENETUNREACH; + + *mp = m; + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_tunnel_input + * Description: similar to gif_input() and in6_gif_input(). + * Ret value: standard error codes. + ****************************************************************************** + */ +int +mip6_tunnel_input(mp, offp, proto) +struct mbuf **mp; +int *offp, proto; +{ + struct mbuf *m = *mp; + struct ip6_hdr *ip6; + int s, af = 0; + u_int32_t otos; + + ip6 = mtod(m, struct ip6_hdr *); + otos = ip6->ip6_flow; + m_adj(m, *offp); + + switch (proto) { + case IPPROTO_IPV6: + { + struct ip6_hdr *ip6; + af = AF_INET6; + if (m->m_len < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) + return IPPROTO_DONE; + } + m->m_flags |= M_MIP6TUNNEL; /* Tell MN that this packet + was tunnelled. */ + ip6 = mtod(m, struct ip6_hdr *); + + s = splimp(); + if (IF_QFULL(&ip6intrq)) { + IF_DROP(&ip6intrq); /* update statistics */ + m_freem(m); + splx(s); + return IPPROTO_DONE; + } + IF_ENQUEUE(&ip6intrq, m); +#if 0 + /* we don't need it as we tunnel IPv6 in IPv6 only. */ + schednetisr(NETISR_IPV6); +#endif + splx(s); + break; + } + default: +#if MIP6_DEBUG + mip6_debug("%s: protocol %d not supported.\n", __FUNCTION__, + proto); +#endif + m_freem(m); + return IPPROTO_DONE; + } + + return IPPROTO_DONE; +} + + + +/* + ****************************************************************************** + * Function: mip6_tunnel + * Description: Create, move or delete a tunnel from the Home Agent to the MN + * or from the Mobile Node to the Home Agent. + * Ret value: Standard error codes. + ****************************************************************************** + */ +int +mip6_tunnel(ip6_src, ip6_dst, action, start, entry) +struct in6_addr *ip6_src; /* Tunnel start point */ +struct in6_addr *ip6_dst; /* Tunnel end point */ +int action; /* Action: MIP6_TUNNEL_{ADD,MOVE,DEL} */ +int start; /* Either the Home Agent or the Mobile Node */ +void *entry; /* BC or ESM depending on start variable */ +{ + const struct encaptab *ep; /* Encapsulation entry */ + const struct encaptab **ep_store; /* Where to store encap reference */ + struct sockaddr_in6 src, srcm; + struct sockaddr_in6 dst, dstm; + struct in6_addr mask; + int mask_len = 128; + + ep_store = NULL; + if ((start == MIP6_NODE_MN) && (entry != NULL)) + ep_store = &((struct mip6_esm *)entry)->ep; + else if ((start == MIP6_NODE_HA) && (entry != NULL)) + ep_store = &((struct mip6_bc *)entry)->ep; + else { +#if MIP6_DEBUG + printf("%s: Tunnel not modified\n", __FUNCTION__); +#endif + return 0; + } + + if (action == MIP6_TUNNEL_DEL) { + /* Moving to Home network. Remove tunnel. */ + if (ep_store && *ep_store) { + encap_detach(*ep_store); + *ep_store = NULL; + } + return 0; + } + + if ((action == MIP6_TUNNEL_ADD) || (action == MIP6_TUNNEL_MOVE)) { + if (action == MIP6_TUNNEL_MOVE && ep_store && *ep_store) { + /* Remove the old encapsulation entry first. */ + encap_detach(*ep_store); + *ep_store = NULL; + } + + bzero(&src, sizeof(src)); + src.sin6_family = AF_INET6; + src.sin6_len = sizeof(struct sockaddr_in6); + src.sin6_addr = *ip6_src; + + in6_prefixlen2mask(&mask, mask_len); + bzero(&srcm, sizeof(srcm)); + srcm.sin6_family = AF_INET6; + srcm.sin6_len = sizeof(struct sockaddr_in6); + srcm.sin6_addr = mask; + + bzero(&dst, sizeof(dst)); + dst.sin6_family = AF_INET6; + dst.sin6_len = sizeof(struct sockaddr_in6); + dst.sin6_addr = *ip6_dst; + + in6_prefixlen2mask(&mask, mask_len); + bzero(&dstm, sizeof(dstm)); + dstm.sin6_family = AF_INET6; + dstm.sin6_len = sizeof(struct sockaddr_in6); + dstm.sin6_addr = mask; + + ep = encap_attach(AF_INET6, -1, + (struct sockaddr *)&src, + (struct sockaddr *)&srcm, + (struct sockaddr *)&dst, + (struct sockaddr *)&dstm, + (struct protosw *)&mip6_tunnel_protosw, + NULL); + if (ep == NULL) + return EINVAL; + *ep_store = ep; + return 0; + } + return EINVAL; +} + + + +/* + ****************************************************************************** + * Function: mip6_proxy + * Description: Set or delete address to act proxy for. + * Ret value: Standard error codes. + ****************************************************************************** + */ +int +mip6_proxy(struct in6_addr* addr, + struct in6_addr* local, + int cmd) +{ + struct sockaddr_in6 mask /* = {sizeof(mask), AF_INET6 }*/; + struct sockaddr_in6 sa6; + struct sockaddr_dl *sdl; + struct ifaddr *ifa; + struct ifnet *ifp; + int flags, error; + struct rtentry *nrt; + + if (cmd == RTM_DELETE) { + struct rtentry *rt; + + bzero(&sa6, sizeof(sa6)); + sa6.sin6_family = AF_INET6; + sa6.sin6_len = sizeof(sa6); + sa6.sin6_addr = *addr; + +#ifdef __FreeBSD__ || defined (__APPLE__) + rt = rtalloc1((struct sockaddr *)&sa6, 1, 0UL); +#else + rt = rtalloc1((struct sockaddr *)&sa6, 1); +#endif + if (rt == NULL) + return EHOSTUNREACH; + + error = rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0, + rt_mask(rt), 0, (struct rtentry **)0); + rt->rt_refcnt--; + rt = NULL; + return error; + } + + /* Create sa6 */ + bzero(&sa6, sizeof(sa6)); + sa6.sin6_family = AF_INET6; + sa6.sin6_len = sizeof(sa6); + sa6.sin6_addr = *local; + + ifa = ifa_ifwithaddr((struct sockaddr *)&sa6); + if (ifa == NULL) + return EINVAL; + + sa6.sin6_addr = *addr; + + /* Create sdl */ + ifp = ifa->ifa_ifp; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + if (ifa->ifa_addr->sa_family == AF_LINK) + break; + + if (!ifa) + return EINVAL; + + MALLOC(sdl, struct sockaddr_dl *, ifa->ifa_addr->sa_len, M_IFMADDR, + M_WAITOK); + bcopy((struct sockaddr_dl *)ifa->ifa_addr, sdl, ifa->ifa_addr->sa_len); + + /* Create mask */ + bzero(&mask, sizeof(mask)); + mask.sin6_family = AF_INET6; + mask.sin6_len = sizeof(mask); + + in6_len2mask(&mask.sin6_addr, 128); + + flags = (RTF_STATIC | RTF_ANNOUNCE | RTA_NETMASK); + + error = rtrequest(RTM_ADD, (struct sockaddr *)&sa6, + (struct sockaddr *)sdl, + (struct sockaddr *)&mask, flags, &nrt); + + if (error == 0) { + /* avoid expiration */ + if (nrt) { + nrt->rt_rmx.rmx_expire = 0; + nrt->rt_genmask = NULL; + nrt->rt_refcnt--; + } + else + error = EINVAL; + } + _FREE(sdl, M_IFMADDR); + return error; +} + + + +/* + ############################################################################## + # + # LIST FUNCTIONS + # The correspondent node maintains a Binding Cache list for each node from + # which it has received a BU. + # It also maintains a list of Neighbor Advertisements that shall be sent + # either by the home agent when start acting as a proxy for the mobile node + # or by the mobile node when returning to the home network. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_bc_find + * Description: Find an entry in the Binding Cache list. + * Ret value: Pointer to Binding Cache entry or NULL if no entry found. + ****************************************************************************** + */ +struct mip6_bc * +mip6_bc_find(home_addr) +struct in6_addr *home_addr; /* Home Address of the MN for which the BC + entry is searched */ +{ + struct mip6_bc *bcp; /* Entry in the Binding Cache list */ + + for (bcp = mip6_bcq; bcp; bcp = bcp->next) { + if (IN6_ARE_ADDR_EQUAL(home_addr, &bcp->home_addr)) + return bcp; + } + return NULL; +} + + + +/* + ****************************************************************************** + * Function: mip6_bc_create + * Description: Create a new Binding Cache entry, add it first to the Binding + * Cache list and set parameters for the entry. + * Ret value: Pointer to the created BC entry or NULL. + * Note 1: If the BC timeout function has not been started it is started. + * The BC timeout function will be called once every second until + * there are no more entries in the BC list. + * Note 2: The gif i/f is created/updated in function mip6_tunnel and + * should not be taken care of here. + ****************************************************************************** + */ +struct mip6_bc * +mip6_bc_create(home_addr, coa, lifetime, hr, rtr, prefix_len, seqno) +struct in6_addr *home_addr; /* Home Address for the mobile node */ +struct in6_addr *coa; /* COA for the mobile node */ +u_int32_t lifetime; /* Remaining lifetime for this BC entry */ +u_int8_t hr; /* Flag for home registration (0/1) */ +u_int8_t rtr; /* MN is router (0/1) */ +u_int8_t prefix_len; /* Prefix length for Home Address */ +u_int16_t seqno; /* Sequence number in the received BU */ +{ + struct mip6_bc *bcp; /* Created BC list entry*/ + int s; + + bcp = (struct mip6_bc *)MALLOC mip6_bc), + M_TEMP, M_WAITOK); + if (bcp == NULL) + return NULL; + bzero((caddr_t)bcp, sizeof(struct mip6_bc)); + + bcp->next = NULL; + bcp->home_addr = *home_addr; + bcp->coa = *coa; + bcp->lifetime = lifetime; + bcp->hr_flag = hr; + bcp->prefix_len = prefix_len; + bcp->seqno = seqno; + bcp->lasttime = 0; + bcp->ep = NULL; + + if (bcp->hr_flag) + bcp->rtr_flag = rtr; + else { + bcp->rtr_flag = 0; + + if (mip6_config.br_update > 60) + bcp->info.br_interval = 60; + else if (mip6_config.br_update < 2) + bcp->info.br_interval = 2; + else + bcp->info.br_interval = mip6_config.br_update; + } + + /* Insert the entry as the first entry in the Binding Cache list. */ + s = splnet(); + if (mip6_bcq == NULL) { + mip6_bcq = bcp; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_bc_handle = +#endif + timeout(mip6_timer_bc, (void *)0, hz); + } else { + bcp->next = mip6_bcq; + mip6_bcq = bcp; + } + splx(s); + +#if MIP6_DEBUG + mip6_debug("\nBinding Cache Entry created (0x%x)\n", bcp); + mip6_debug("Home Addr/Prefix len: %s / %u\n", + ip6_sprintf(&bcp->home_addr), bcp->prefix_len); + mip6_debug("Care-of Address: %s\n", ip6_sprintf(&bcp->coa)); + mip6_debug("Remaining lifetime: %u\n", bcp->lifetime); + mip6_debug("Sequence number: %u\n", bcp->seqno); + mip6_debug("Home reg/Router: "); + if (bcp->hr_flag) + mip6_debug("TRUE / "); + else + mip6_debug("FALSE / "); + + if (bcp->rtr_flag) + mip6_debug("TRUE\n"); + else + mip6_debug("FALSE\n"); +#endif + return bcp; +} + + + +/* + ****************************************************************************** + * Function: mip6_bc_update + * Description: Update an existing Binding Cache entry + * Ret value: - + * Note: The gif i/f is created/updated in function mip6_tunnel and + * should not be taken care of here. + ****************************************************************************** + */ +void +mip6_bc_update(bcp, coa, lifetime, hr, rtr, prefix_len, seqno, info, lasttime) +struct mip6_bc *bcp; /* BC entry being allocated or updated */ +struct in6_addr *coa; /* COA for the mobile node */ +u_int32_t lifetime; /* Remaining lifetime for this BC entry */ +u_int8_t hr; /* Flag for home registration (0/1) */ +u_int8_t rtr; /* MN is router (0/1) */ +u_int8_t prefix_len; /* Prefix length for Home Address */ +u_int16_t seqno; /* Sequence number in the received BU */ +struct bc_info info; /* Usage info for cache replacement policy */ +time_t lasttime; /* The time at which a BR was last sent */ +{ + bcp->coa = *coa; + bcp->lifetime = lifetime; + bcp->hr_flag = hr; + bcp->prefix_len = prefix_len; + bcp->seqno = seqno; + + if (bcp->hr_flag) { + bcp->rtr_flag = rtr; + bzero((caddr_t)&bcp->info, sizeof(struct bc_info)); + } else { + bcp->rtr_flag = 0; + + if (info.br_interval > 60) + bcp->info.br_interval = 60; + else if (info.br_interval < 2) + bcp->info.br_interval = 2; + else + bcp->info.br_interval = info.br_interval; + } + bcp->lasttime = lasttime; + +#if MIP6_DEBUG + mip6_debug("\nBinding Cache Entry updated (0x%x)\n", bcp); + mip6_debug("Home Addr/Prefix len: %s / %u\n", + ip6_sprintf(&bcp->home_addr), bcp->prefix_len); + mip6_debug("Care-of Address: %s\n", ip6_sprintf(&bcp->coa)); + mip6_debug("Remaining lifetime: %u\n", bcp->lifetime); + mip6_debug("Sequence number: %u\n", bcp->seqno); + mip6_debug("Home reg/Router: "); + if (bcp->hr_flag) + mip6_debug("TRUE / "); + else + mip6_debug("FALSE / "); + + if (bcp->rtr_flag) + mip6_debug("TRUE\n"); + else + mip6_debug("FALSE\n"); +#endif + return; +} + + + +/* + ****************************************************************************** + * Function: mip6_bc_delete + * Description: Delete an entry in the Binding Cache list. + * Ret value: Error code + * Pointer to next entry in list or NULL if last entry removed. + ****************************************************************************** + */ +int +mip6_bc_delete(bcp_del, bcp_nxt) +struct mip6_bc *bcp_del; /* Pointer to BC entry to delete */ +struct mip6_bc **bcp_nxt; /* Returns next entry in the list */ +{ + struct mip6_bc *bcp; /* Current entry in the BC list */ + struct mip6_bc *bcp_prev; /* Previous entry in the BC list */ + struct mip6_bc *bcp_next; /* Next entry in the BC list */ + int s, error = 0; + + s = splnet(); + bcp_prev = NULL; + bcp_next = NULL; + for (bcp = mip6_bcq; bcp; bcp = bcp->next) { + bcp_next = bcp->next; + if (bcp != bcp_del) { + bcp_prev = bcp; + continue; + } + + /* Make sure that the list pointers are correct. */ + if (bcp_prev == NULL) + mip6_bcq = bcp->next; + else + bcp_prev->next = bcp->next; + + if (bcp->hr_flag) { + /* The HA should stop acting as a proxy for the MN. */ + error = mip6_proxy(&bcp->home_addr, NULL, RTM_DELETE); + if (error) { +#if MIP6_DEBUG + mip6_debug("%s: delete proxy error = %d\n", + __FUNCTION__, error); +#endif + *bcp_nxt = bcp_next; + return error; + } + + /* Delete the existing tunnel to the MN. */ + mip6_tunnel(NULL, NULL, MIP6_TUNNEL_DEL, MIP6_NODE_HA, + (void *)bcp); + } + +#if MIP6_DEBUG + mip6_debug("\nBinding Cache Entry deleted (0x%x)\n", bcp); +#endif + _FREE(bcp, M_TEMP); + + /* Remove the timer if the BC queue is empty */ + if (mip6_bcq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_bc, (void *)NULL, + mip6_timer_bc_handle); + callout_handle_init(&mip6_timer_bc_handle); +#else + untimeout(mip6_timer_bc, (void *)NULL); +#endif + } + break; + } + splx(s); + + *bcp_nxt = bcp_next; + return error; +} + + + +/* + ****************************************************************************** + * Function: mip6_na_create + * Description: Create a NA entry and add it to the list of Neighbor Adver- + * tisements. The NA will be repeateadly sent by either the + * Mobile Node when returning to its home link or by the Home + * Agent when acting as a proxy for a Mobile Node while away + * from its home network. + * Note: The first Neighbor Advertisement is sent by this function. + * Ret value: Pointer to the created entry or NULL in case of error. + ****************************************************************************** + */ +struct mip6_na * +mip6_na_create(home_addr, dst_addr, target_addr, prefix_len, + flags, use_link_opt) +struct in6_addr *home_addr; /* Home address of the mobile node */ +struct in6_addr *dst_addr; /* Destination address */ +struct in6_addr *target_addr; /* Target address */ +u_int8_t prefix_len; /* Prefix length of the home address */ +u_long flags; /* Flags for the NA message */ +int use_link_opt; /* Include Target link layer address option or + not (0 = Do not include, 1 = Include) */ +{ + struct mip6_na *nap; /* Created NA message */ + struct mip6_link_list *llp; /* Link list entry */ + struct mip6_ha_list *halp; /* Home agent list entry */ + struct mip6_addr_list *addrp; /* Address list entry */ + struct nd_prefix *pr; /* Prefix list entry */ + int s, start_timer = 0; + + llp = NULL; + halp = NULL; + addrp = NULL; + pr = NULL; + + if (mip6_naq == NULL) + start_timer = 1; + + nap = (struct mip6_na *)MALLOC mip6_na), + M_TEMP, M_WAITOK); + if (nap == NULL) + return NULL; + bzero(nap, sizeof(struct mip6_na)); + + nap->next = NULL; + nap->home_addr = *home_addr; + nap->dst_addr = *dst_addr; + nap->target_addr = *target_addr; + nap->prefix_len = prefix_len; + nap->flags = flags; + nap->use_link_opt = use_link_opt; + nap->no = MIP6_MAX_ADVERT_REXMIT; + + /* The interface that shall be used may not be assumed to be the + interface of the incoming packet, but must be the interface stated + in the prefix that matches the home address. */ + if (MIP6_IS_HA_ACTIVE) { + for (llp = mip6_llq; llp; llp = llp->next) { + for (halp = llp->ha_list; halp; halp = halp->next) { + for (addrp = halp->addr_list; addrp; + addrp = addrp->next) { + if (in6_are_prefix_equal( + home_addr, + &addrp->ip6_addr, + addrp->prefix_len)) + break; + } + if (addrp != NULL) + break; + } + if (addrp != NULL) + break; + } + if (addrp == NULL) { + log(LOG_ERR, + "%s: No interface found for sending Neighbor " + "Advertisements at\n", __FUNCTION__); + return NULL; + } + nap->ifp = llp->ifp; + } + + if (MIP6_IS_MN_ACTIVE) { + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if (!pr->ndpr_stateflags.onlink) + continue; + if (in6_are_prefix_equal(home_addr, + &pr->ndpr_prefix.sin6_addr, + pr->ndpr_plen)) + break; + } + if (pr == NULL) { + log(LOG_ERR, + "%s: No interface found for sending Neighbor " + "Advertisements at\n", __FUNCTION__); + return NULL; + } + nap->ifp = pr->ndpr_ifp; + } + + /* Add the new na entry first to the list. */ + s = splnet(); + nap->next = mip6_naq; + mip6_naq = nap; + splx(s); + +#if MIP6_DEBUG + mip6_debug("\nCreated Neighbor Advertisement List entry (0x%x)\n", + nap); + mip6_debug("Interface being used: %s\n", if_name(nap->ifp)); + mip6_debug("Home Addr/Prefix len: %s / %d\n", + ip6_sprintf(&nap->home_addr), nap->prefix_len); + mip6_debug("Destination Address: %s\n", ip6_sprintf(&nap->dst_addr)); + mip6_debug("Target Address: %s\n", + ip6_sprintf(&nap->target_addr)); + if (nap->use_link_opt) + mip6_debug("Incl Target ll_addr : TRUE\n"); + else + mip6_debug("Incl Target ll_addr : FALSE\n"); +#endif + + /* Send the Neighbor Advertisment entry to speed up cache changes. */ + mip6_send_na(nap); + + if (start_timer) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_na_handle = +#endif + timeout(mip6_timer_na, (void *)0, hz); + } + return nap; +} + + + +/* + ****************************************************************************** + * Function: mip6_na_delete + * Description: Delete an entry in the NA list. + * Ret value: Pointer to next entry in list or NULL if last entry removed. + ****************************************************************************** + */ +struct mip6_na * +mip6_na_delete(nap_del) +struct mip6_na *nap_del; /* Pointer to NA entry to delete */ +{ + struct mip6_na *nap; /* Current entry in the NA list */ + struct mip6_na *nap_prev; /* Previous entry in the NA list */ + struct mip6_na *nap_next; /* Next entry in the NA list */ + int s; + + s = splnet(); + nap_prev = NULL; + nap_next = NULL; + for (nap = mip6_naq; nap; nap = nap->next) { + nap_next = nap->next; + if (nap == nap_del) { + if (nap_prev == NULL) + mip6_naq = nap->next; + else + nap_prev->next = nap->next; + +#if MIP6_DEBUG + mip6_debug("\nNeighbor Advertisement Entry " + "deleted (0x%x)\n", nap); +#endif + _FREE(nap, M_TEMP); + + /* Remove the timer if the NA queue is empty */ + if (mip6_naq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_na, (void *)NULL, + mip6_timer_na_handle); + callout_handle_init(&mip6_timer_na_handle); +#else + untimeout(mip6_timer_na, (void *)NULL); +#endif + } + break; + } + nap_prev = nap; + } + splx(s); + return nap_next; +} + + + +/* + ****************************************************************************** + * Function: mip6_prefix_find + * Description: Try to find an existing prefix entry in the prefix list. + * Ret value: Pointer to found prefix list entry or NULL. + ****************************************************************************** + */ +struct mip6_prefix * +mip6_prefix_find(prefix, prefix_len) +struct in6_addr *prefix; /* Prefix to search for */ +u_int8_t prefix_len; /* Prefix length */ +{ + struct mip6_prefix *pq; + + for (pq = mip6_pq; pq; pq = pq->next) { + if (in6_are_prefix_equal(&pq->prefix, prefix, prefix_len)) + return pq; + } + return NULL; +} + + + +/* + ****************************************************************************** + * Function: mip6_prefix_create + * Description: Create a prefix and add it as the first entry in the list. + * Start the timer if not started already. + * Ret value: Pointer to created prefix list entry or NULL. + ****************************************************************************** + */ +struct mip6_prefix * +mip6_prefix_create(ifp, prefix, prefix_len, valid_time) +struct ifnet *ifp; /* Outgoing interface */ +struct in6_addr *prefix; /* Prefix to search for */ +u_int8_t prefix_len; /* Prefix length */ +u_int32_t valid_time; /* Time (s) that the prefix is valid */ +{ + struct mip6_prefix *pq; + int s, start_timer = 0; + + if (mip6_pq == NULL) + start_timer = 1; + + pq = (struct mip6_prefix *)MALLOC mip6_prefix), + M_TEMP, M_WAITOK); + if (pq == NULL) + return NULL; + bzero(pq, sizeof(struct mip6_prefix)); + + s = splnet(); + pq->next = mip6_pq; + pq->ifp = ifp; + pq->prefix = *prefix; + pq->prefix_len = prefix_len; + pq->valid_time = valid_time; + mip6_pq = pq; + splx(s); + +#if MIP6_DEBUG + mip6_debug("\nInternal Prefix list entry created (0x%x)\n", pq); + mip6_debug("Interface: %s\n", if_name(ifp)); + mip6_debug("Prefix: %s\n", ip6_sprintf(&pq->prefix)); + mip6_debug("Prefix len: %d\n", pq->prefix_len); + mip6_debug("Life time: %d\n", htonl(pq->valid_time)); +#endif + + if (start_timer) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_prefix_handle = +#endif + timeout(mip6_timer_prefix, (void *)0, hz); + } + return pq; +} + + + +/* + ****************************************************************************** + * Function: mip6_prefix_delete + * Description: Delete the requested prefix list entry. + * Ret value: Ptr to next entry in list or NULL if last entry removed. + ****************************************************************************** + */ +struct mip6_prefix * +mip6_prefix_delete(pre_del) +struct mip6_prefix *pre_del; /* Prefix list entry to be deleted */ +{ + struct mip6_prefix *pre; /* Current entry in the list */ + struct mip6_prefix *pre_prev; /* Previous entry in the list */ + struct mip6_prefix *pre_next; /* Next entry in the list */ + int s; + + /* Find the requested entry in the link list. */ + s = splnet(); + pre_next = NULL; + pre_prev = NULL; + for (pre = mip6_pq; pre; pre = pre->next) { + pre_next = pre->next; + if (pre == pre_del) { + if (pre_prev == NULL) + mip6_pq = pre->next; + else + pre_prev->next = pre->next; + +#if MIP6_DEBUG + mip6_debug("\nMIPv6 prefix entry deleted (0x%x)\n", pre); +#endif + _FREE(pre, M_TEMP); + + /* Remove the timer if the prefix queue is empty */ + if (mip6_pq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_prefix, (void *)NULL, + mip6_timer_prefix_handle); + callout_handle_init(&mip6_timer_prefix_handle); +#else + untimeout(mip6_timer_prefix, (void *)NULL); +#endif + } + break; + } + pre_prev = pre; + } + splx(s); + return pre_next; +} + + + +/* + ############################################################################## + # + # TIMER FUNCTIONS + # These functions are called at regular basis. They operate on the lists, e.g. + # reducing timer counters and removing entries from the list if needed. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_timer_na + * Description: Called once every second. For each entry in the list a Neighbor + * Advertisement is sent until the counter value reaches 0. Then + * the entry is removed. + * Ret value: - + ****************************************************************************** + */ +void +mip6_timer_na(arg) +void *arg; /* Not used */ +{ + struct mip6_na *nap; /* Neighbor Advertisement entry */ + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + /* Go through the entire list of Neighbor Advertisement entries. */ + s = splnet(); + for (nap = mip6_naq; nap;) { + mip6_send_na(nap); + if (nap->no <= 0) + nap = mip6_na_delete(nap); + else + nap = nap->next; + } + splx(s); + + if (mip6_naq != NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_na_handle = +#endif + timeout(mip6_timer_na, (void *)0, hz); + } +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + + + +/* + ****************************************************************************** + * Function: mip6_timer_bc + * Description: Called once every second. For each entry in the BC list, a + * counter is reduced by 1 until it reaches the value of zero, + * then the entry is removed. + * Ret value: - + ****************************************************************************** + */ +void +mip6_timer_bc(arg) +void *arg; /* Not used */ +{ + struct mip6_bc *bcp; /* Current entry in the BC list */ + struct mip6_bc *bcp_nxt; /* Next BC list entry */ + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + /* Go through the entire list of Binding Cache entries. */ + s = splnet(); + for (bcp = mip6_bcq; bcp;) { + bcp->lifetime -= 1; + if (bcp->lifetime == 0) { + mip6_bc_delete(bcp, &bcp_nxt); + bcp = bcp_nxt; + } else + bcp = bcp->next; + } + splx(s); + + /* XXX */ + /* Code have to be added to take care of bc_info.br_interval + variable. */ + /* We have to send a BR when the mip6_bc.lifetime == + mip6_bc.bc_info.br_interval. */ + if (mip6_bcq != NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_bc_handle = +#endif + timeout(mip6_timer_bc, (void *)0, hz); + } +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif + return; +} + + + +/* + ****************************************************************************** + * Function: mip6_timer_prefix + * Description: Called once every second. Search the list of prefixes and if + * a prefix has timed out it is removed from the list. + * Ret value: - + ****************************************************************************** + */ +void +mip6_timer_prefix(arg) +void *arg; /* Not used */ +{ + struct mip6_prefix *pq_entry; /* Current entry in the prefix list */ + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + /* Go through the entire list of prefix entries. */ + s = splnet(); + for (pq_entry = mip6_pq; pq_entry;) { + pq_entry->valid_time -= 1; + if (pq_entry->valid_time == 0) + pq_entry = mip6_prefix_delete(pq_entry); + else + pq_entry = pq_entry->next; + } + splx(s); + + if (mip6_pq != NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_prefix_handle = +#endif + timeout(mip6_timer_prefix, (void *)0, hz); + } +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif + return; +} + + + +/* + ############################################################################## + # + # IOCTL AND DEBUG FUNCTIONS + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_ioctl + * Description: The ioctl handler for MIPv6. These are used by the + * configuration program to set and get various parameters. + * Ret value: 0 or error code + ****************************************************************************** + */ +int +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) +mip6_ioctl(so, cmd, data, ifp, p) +struct socket *so; +u_long cmd; +caddr_t data; +struct ifnet *ifp; +struct proc *p; +#else +mip6_ioctl(so, cmd, data, ifp) +struct socket *so; +u_long cmd; +caddr_t data; +struct ifnet *ifp; +#endif +{ + int res; + + /* Note: privileges already checked in in6_control(). */ + + res = 0; + switch (cmd) { + case SIOCSBCFLUSH_MIP6: + case SIOCSDEFCONFIG_MIP6: + res = mip6_clear_config_data(cmd, data); + return res; + + case SIOCSBRUPDATE_MIP6: + res = mip6_write_config_data(cmd, data); + return res; + + case SIOCSHAPREF_MIP6: + /* Note: this one can be run before attach. */ + if (mip6_write_config_data_ha_hook) + res = (*mip6_write_config_data_ha_hook) + (cmd, data); + break; + + case SIOCACOADDR_MIP6: + case SIOCAHOMEADDR_MIP6: + case SIOCSBULIFETIME_MIP6: + case SIOCSHRLIFETIME_MIP6: + case SIOCDCOADDR_MIP6: + /* Note: these can be run before attach. */ + if (mip6_write_config_data_mn_hook) + res = (*mip6_write_config_data_mn_hook) + (cmd, data); + break; + + case SIOCSDEBUG_MIP6: + case SIOCSENABLEBR_MIP6: + case SIOCSATTACH_MIP6: + res = mip6_enable_func(cmd, data); + return res; + + case SIOCSFWDSLUNICAST_MIP6: + case SIOCSFWDSLMULTICAST_MIP6: + /* Note: these can be run before attach. */ + if (mip6_enable_func_ha_hook) + res = (*mip6_enable_func_ha_hook)(cmd, data); + break; + + case SIOCSPROMMODE_MIP6: + case SIOCSBU2CN_MIP6: + case SIOCSREVTUNNEL_MIP6: + case SIOCSAUTOCONFIG_MIP6: + case SIOCSEAGERMD_MIP6: + /* Note: these can be run before attach. */ + if (mip6_enable_func_mn_hook) + res = (*mip6_enable_func_mn_hook)(cmd, data); + break; + + case SIOCSRELEASE_MIP6: + mip6_release(); + return res; + + default: + res = EOPNOTSUPP; + break; + } + + if (MIP6_IS_HA_ACTIVE) { + res = 0; + switch (cmd) { + case SIOCSHALISTFLUSH_MIP6: + if (mip6_clear_config_data_ha_hook) + res = (*mip6_clear_config_data_ha_hook) + (cmd, data); + break; + + default: + res = EOPNOTSUPP; + break; + } + } + + if (MIP6_IS_MN_ACTIVE) { + res = 0; + switch (cmd) { + case SIOCSFORADDRFLUSH_MIP6: + case SIOCSHADDRFLUSH_MIP6: + case SIOCSBULISTFLUSH_MIP6: + if (mip6_clear_config_data_mn_hook) + res = (*mip6_clear_config_data_mn_hook) + (cmd, data); + break; + + default: + res = EOPNOTSUPP; + break; + } + } + if (res) { +#if MIP6_DEBUG + printf("%s: unknown command: %lu\n", __FUNCTION__, (u_long)cmd); +#endif + } + return res; +} + + + +/* + ****************************************************************************** + * Function: mip6_debug + * Description: This function displays MIPv6 debug messages to the console + * if activated with the configuration program. Note that this + * is included only when "options MIP6_DEBUG" is defined. + * Ret value: - + ****************************************************************************** + */ +#if MIP6_DEBUG +void mip6_debug(char *fmt, ...) +{ +#ifndef __bsdi__ + va_list ap; + + if (!mip6_debug_is_enabled) + return; + + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); +#endif +} + + + +void +mip6_enable_debug(int status) +{ + mip6_debug_is_enabled = status; +} +#endif /* MIP6_DEBUG */ + + + +/* + ****************************************************************************** + * Function: mip6_write_config_data + * Description: This function is called to write certain config values for + * MIPv6. The data is written into the global config structure. + * Ret value: - + ****************************************************************************** + */ +int mip6_write_config_data(u_long cmd, caddr_t data) +{ + int retval = 0; + + switch (cmd) { + case SIOCSBRUPDATE_MIP6: + mip6_config.br_update = *(u_int8_t *)data; + break; + } + return retval; +} + + + +/* + ****************************************************************************** + * Function: mip6_clear_config_data + * Description: This function is called to clear internal lists handled by + * MIPv6. + * Ret value: - + ****************************************************************************** + */ +int mip6_clear_config_data(u_long cmd, caddr_t data) +{ + int s, retval = 0; + struct mip6_bc *bcp, *bcp_nxt; + + s = splnet(); + switch (cmd) { + case SIOCSBCFLUSH_MIP6: + for (bcp = mip6_bcq; bcp;) { + if(!bcp->hr_flag) { + mip6_bc_delete(bcp, &bcp_nxt); + bcp = bcp_nxt; + } else + bcp = bcp->next; + } + break; + + case SIOCSDEFCONFIG_MIP6: + mip6_config.bu_lifetime = 600; + mip6_config.br_update = 60; + mip6_config.hr_lifetime = 3600; + mip6_config.enable_outq = 1; + break; + } + splx(s); + return retval; +} + + + +/* + ****************************************************************************** + * Function: mip6_enable_func + * Description: This function is called to enable or disable certain functions + * in mip6. The data is written into the global config struct. + * Ret value: - + ****************************************************************************** + */ +int mip6_enable_func(u_long cmd, caddr_t data) +{ + int enable; + int retval = 0; + + enable = ((struct mip6_input_data *)data)->value; + + switch (cmd) { + case SIOCSDEBUG_MIP6: +#if MIP6_DEBUG + mip6_enable_debug(enable); +#else + printf("No Mobile IPv6 debug information available!\n"); +#endif + break; + + case SIOCSENABLEBR_MIP6: + mip6_config.enable_br = enable; + break; + + case SIOCSATTACH_MIP6: + printf("%s: attach %d\n", __FUNCTION__, enable); /* RM */ + retval = mip6_attach(enable); + break; + } + return retval; +} diff --git a/bsd/netinet6/mip6.h b/bsd/netinet6/mip6.h new file mode 100644 index 000000000..328968380 --- /dev/null +++ b/bsd/netinet6/mip6.h @@ -0,0 +1,861 @@ +/* $KAME: mip6.h,v 1.8 2000/03/18 03:05:39 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET6_MIP6_H_ +#define _NETINET6_MIP6_H_ + +#include +#include + +struct ifnet; + +/* + * Definition For Mobile Internet Protocol Version 6. + * Draft draft-ietf-mobileip-ipv6-09.txt + */ + +/* Definition of MIPv6 states for the Event-State machine */ +#define MIP6_STATE_UNDEF 0x01 +#define MIP6_STATE_HOME 0x02 +#define MIP6_STATE_DEREG 0x03 +#define MIP6_STATE_NOTREG 0x04 +#define MIP6_STATE_REG 0x05 +#define MIP6_STATE_REREG 0x06 +#define MIP6_STATE_REGNEWCOA 0x07 + + +/* Definition of states used by the move detection algorithm used by MIPv6. */ +#define MIP6_MD_BOOT 0x01 +#define MIP6_MD_UNDEFINED 0x02 +#define MIP6_MD_HOME 0x03 +#define MIP6_MD_FOREIGN 0x04 + + +/* Definition of Home Address route states used by the move detection + algorithm used by MIPv6. */ +#define MIP6_ROUTE_NET 0x01 +#define MIP6_ROUTE_HOST 0x02 + + +/* Type of node calling mip6_tunnel */ +#define MIP6_NODE_MN 0x01 +#define MIP6_NODE_HA 0x02 + + +/* Movement Detection default values */ +#define MIP6_MAX_LOST_ADVINTS 3 + + +/* Scope for hook activation */ +#define MIP6_GENERIC_HOOKS 0x01 +#define MIP6_SPECIFIC_HOOKS 0x02 +#define MIP6_CONFIG_HOOKS 0x03 + + +/* Definition of states for tunnels set up by the Home Agent and the MN. */ +#define MIP6_TUNNEL_ADD 0 +#define MIP6_TUNNEL_MOVE 1 +#define MIP6_TUNNEL_DEL 2 + + +/* Definition of length for different destination options */ +#define IP6OPT_BULEN 8 /* Length of BU option */ +#define IP6OPT_BALEN 11 /* Length of BA option */ +#define IP6OPT_BRLEN 0 /* Length of BR option */ +#define IP6OPT_HALEN 16 /* Length of HA option */ +#define IP6OPT_UIDLEN 2 /* Length of Unique Identifier sub-option */ +#define IP6OPT_HALISTLEN 16 /* Length of HA List sub-ption */ +#define IP6OPT_COALEN 16 /* Length of Alternate COA sub-option */ + + +/* Definition of minimum length of MIPv6 destination options. + Length includes option Type and Length. */ +#define IP6OPT_BAMINLEN (IP6OPT_MINLEN + IP6OPT_BALEN) +#define IP6OPT_BRMINLEN (IP6OPT_MINLEN + IP6OPT_BRLEN) +#define IP6OPT_BUMINLEN (IP6OPT_MINLEN + IP6OPT_BULEN) +#define IP6OPT_HAMINLEN (IP6OPT_MINLEN + IP6OPT_HALEN) + + +/* Definition of sub-options used by the Destination Options */ +#define IP6SUBOPT_UNIQUEID 0x02 /* Unique Identifier (BU, BR) */ +#define IP6SUBOPT_HALIST 0x03 /* Home Agents List (BA) */ +#define IP6SUBOPT_ALTCOA 0x04 /* Alternate COA (BU) */ + + +/* Definition of MIPv6 Binding Update option flags */ +#define MIP6_BU_AFLAG 0x80 /* BU Acknowledgement flag present */ +#define MIP6_BU_HFLAG 0x40 /* BU Home Registration flag present */ +#define MIP6_BU_RFLAG 0x20 /* BU MN is Router flag present */ + + +/* Definition of flags used for indication of options present in a + destination header (mip6_indata->optflag) */ +#define MIP6_DSTOPT_BU 0x80 /* BU Option present */ +#define MIP6_DSTOPT_BA 0x40 /* BA Option present */ +#define MIP6_DSTOPT_BR 0x20 /* BR Option present */ +#define MIP6_DSTOPT_HA 0x10 /* HA Option present */ +#define MIP6_DSTOPT_UID 0x08 /* Sub-option Unique Id present */ +#define MIP6_DSTOPT_COA 0x04 /* Sub-option Alternate COA present */ +#define MIP6_DSTOPT_HAL 0x02 /* Sub-option HAs List present */ + + +#if 0 +/* Definition of flags for Home Agent */ +#define ND_RA_FLAG_HA 0x20 /* RA indicates that router works as HA */ +#define ND_OPT_PI_FLAG_RADDR 0x20 /* Prefix Information option incl. global + IP address */ +#endif + + +/* Definition of timers for signals */ +#define MIP6_BU_LIFETIME 600 /* Lifetime for BU (s) */ +#define MIP6_BU_LIFETIME_DEFRTR 60 /* Lifetime for BU sent to previous def + router (s) */ +#define MIP6_BU_LIFETIME_DHAAD 16 /* Lifetime for BU when Dynamic Home + Agent Address Discovery (s) */ +#define MIP6_MAX_FAST_UPDATES 5 /* Max number of fast updates (BUs) + being sent */ +#define MIP6_MAX_UPDATE_RATE 1 /* Rate limiting for sending successive + fast BUs (sec) */ +#define MIP6_SLOW_UPDATE_RATE 10 /* Rate limiting for sending successive + slow BUs (sec) */ +#define MIP6_MAX_BINDACK_TIMEOUT 256 /* Max time to wait for a BA */ +#define MIP6_MAX_ADVERT_REXMIT 3 /* Max retransmission of NA when retur- + ning to home link */ +#define MIP6_OUTQ_LIFETIME 20 /* Max number of 0.1s units that an entry + is stored in the output queue */ +#define MIP6_OUTQ_INTERVAL 5 /* Interval in units of 0.1s that the out + queue is searched */ + + +/* Definition of Binding Acknowledgement status field */ +#define MIP6_BA_STATUS_ACCEPT 0 /* Binding Update accepted */ +#define MIP6_BA_STATUS_UNSPEC 128 /* Reason unspecified */ +#define MIP6_BA_STATUS_PROHIBIT 130 /* Administratively prohibited */ +#define MIP6_BA_STATUS_RESOURCE 131 /* Insufficient resources */ +#define MIP6_BA_STATUS_HOMEREGNOSUP 132 /* Home registration not supported */ +#define MIP6_BA_STATUS_SUBNET 133 /* Not home subnet */ +#define MIP6_BA_STATUS_DHAAD 135 /* Dynamic home agent address + discovery response */ +#define MIP6_BA_STATUS_IFLEN 136 /* Incorrect interface id length */ +#define MIP6_BA_STATUS_NOTHA 137 /* Not home agent for this MN */ + + +/* Macro for modulo 2^^16 comparison */ +#define MIP6_LEQ(a,b) ((int16_t)((a)-(b)) <= 0) + + +/* Macros started with MIP6_ADDR is Mobile IPv6 local */ +#define MIP6_ADDR_ANYCAST_HA 0x7e + +#if BYTE_ORDER == BIG_ENDIAN +#define MIP6_ADDR_INT32_ULL 0xfe800000 /* Unicast Link Local */ +#define MIP6_ADDR_INT32_USL 0xfec00000 /* Unicast Site Local */ +#define MIP6_ADDR_INT32_AHA1 0xfffffffe /* Anycast Home Agent bit 97-128 */ +#define MIP6_ADDR_INT32_AHA2 0xfdffffff /* Anycast Home Agent bit 65-96 */ +#elif BYTE_ORDER == LITTLE_ENDIAN +#define MIP6_ADDR_INT32_ULL 0x000080fe +#define MIP6_ADDR_INT32_USL 0x0000c0fe +#define MIP6_ADDR_INT32_AHA1 0xfeffffff +#define MIP6_ADDR_INT32_AHA2 0xfffffffd +#endif + + +/* Definition of some useful macros to handle IP6 addresses */ +extern struct in6_addr in6addr_linklocal; +extern struct in6_addr in6addr_sitelocal; +extern struct in6_addr in6addr_aha_64; /* 64 bits identifier */ +extern struct in6_addr in6addr_aha_nn; /* 121-nn bits identifier */ + + +/* Definition of states for flag in queue for outgoing packets. */ +enum send_state {NOT_SENT, SENT}; + + +/* Definition of event-state machine type. */ +enum esm_type {PERMANENT, TEMPORARY}; + + +/* Configuration parameters needed for MIPv6. Controlled by the user */ +struct mip6_static_addr { + LIST_ENTRY(mip6_static_addr) addr_entry; /* Next IPv6 address list */ + struct ifnet *ifp; /* Interface */ + u_int8_t prefix_len; /* Prefix length for address */ + struct in6_addr ip6_addr; /* Address to be used at foreign network */ +}; + + +/* + * fna_list List of pre-assigned care-of addresses to be used at + * foreign networks that the MN might visit + * bu_lifetime Used by the MN when sending a BU to the CN if it wants + * to use a smaller value than received in the home + * registration acknowledgement + * br_update Indicates when the CN sends a BR to the MN. The value + * should be given as percentage of the bu_lifetime + * ha_pref Preference for the Home Agent + * hr_lifetime Default life time for home registration (only sent to the + * Home Agent) + * fwd_sl_unicast Enable forwarding of site local unicast dest addresses + * fwd_sl_multicast Enable forwarding of site local multicast dest addresses + * enable_prom_mode Enable link layer promiscus mode (used by move detection) + * enable_bu_to_cn Enable BU being sent to the CN (Route optimization on/off) + * enable_rev_tunnel Enable tunneling of packets from MN to CN via Home Agent + * enable_br Enable sending BR to the MN + * autoconfig Only enable MIP6 if the mip6 deamon is running + * eager_md Enable eager Movement Detection + * enable_outq Enable reading from the MIP6 output queue for piggy + * backing (Not configurable, handled internally) + */ +struct mip6_config { + LIST_HEAD(fna_list, mip6_static_addr) fna_list; + u_int32_t bu_lifetime; + u_int8_t br_update; + int16_t ha_pref; + u_int32_t hr_lifetime; + u_int8_t fwd_sl_unicast; + u_int8_t fwd_sl_multicast; + u_int8_t enable_prom_mode; + u_int8_t enable_bu_to_cn; + u_int8_t enable_rev_tunnel; + u_int8_t enable_br; + u_int8_t autoconfig; + u_int8_t eager_md; + u_int8_t enable_outq; +}; + + +/* Generic option format */ +struct mip6_bu_data { + u_int8_t prefix_len; /* Prefix length for a Home Address */ + u_int8_t ack; /* Acknowledgement flag */ +}; + + +/* Generic option format */ +struct mip6_opt { + u_int8_t type; /* Option type */ + u_int8_t len; /* Option length (octets) excl. type and length */ +} __attribute__ ((packed)); + + +/* List of prefixes extracted from Router Advertisments being sent by + the Home Agent. */ +struct mip6_prefix { + struct mip6_prefix *next; /* Ptr to next entry in the list */ + struct ifnet *ifp; /* Outgoing interface */ + struct in6_addr prefix; /* Announced prefix (on-link) */ + u_int8_t prefix_len; /* Prefix length for IP address */ + u_int32_t valid_time; /* Remaining (s) until prefix expires */ +} __attribute__ ((packed)); + + +/* Binding Update destination option format */ +struct mip6_opt_bu { + u_int8_t type; /* Option type */ + u_int8_t len; /* Option length excluding Type and length */ + u_int8_t flags; /* Flags (A, H and R) */ + u_int8_t prefix_len; /* Prefix length for IP address */ + u_int16_t seqno; /* Sequence number */ + u_int32_t lifetime; /* Seconds remaining until the binding expires */ +} __attribute__ ((packed)); + + +/* Binding Acknowledgement destination option format */ +struct mip6_opt_ba { + u_int8_t type; /* Option type */ + u_int8_t len; /* Option length (octets) excl. type and length */ + u_int8_t status; /* Result of the BU */ + u_int16_t seqno; /* Sequence number */ + u_int32_t lifetime; /* Granted lifetime (s) for the BU in the BC */ + u_int32_t refresh; /* Interval for MN to send BU to refresh BC */ +} __attribute__ ((packed)); + + +/* Binding Request destination option format */ +struct mip6_opt_br { + u_int8_t type; /* Option type */ + u_int8_t len; /* Option length (octets) excl. type and length */ +} __attribute__ ((packed)); + + +/* Home Address option format */ +struct mip6_opt_ha { + u_int8_t type; /* Option type */ + u_int8_t len; /* Option length excl. type and length */ + struct in6_addr home_addr; /* Home Addr of the MN sending the packet */ +} __attribute__ ((packed)); + + +/* Unique Identifier sub-option format */ +struct mip6_subopt_id { + u_int8_t type; /* Sub-option type */ + u_int8_t len; /* Sub-option length (octets) excl. type and length */ + u_int16_t id; /* Unique identifier */ +} __attribute__ ((packed)); + + +/* Home Agents list sub-option format */ +struct mip6_subopt_hal { + u_int8_t type; /* Sub-option type */ + u_int8_t len; /* Sub-option length excl. type and length */ + struct in6_addr halist[1]; /* List of HA's on the home link */ +} __attribute__ ((packed)); + + +/* Alternate Care-of Address sub-option format */ +struct mip6_subopt_coa { + u_int8_t type; /* Sub-option type */ + u_int8_t len; /* Length (octets) excl. type and len fields */ + struct in6_addr coa; /* Alternate COA */ +} __attribute__ ((packed)); + + +/* Buffer for storing a consequtive sequence of sub-options */ +struct mip6_subbuf { + u_int16_t len; /* # of used bytes in buffer */ + char buffer[512]; +}; + + +/* The event-state machine must be maintained for each Home Address. */ +struct mip6_dad { + struct mip6_subopt_hal *hal; /* Home Agents list */ + int index; /* Next entry in list to try */ +}; + +struct mip6_hafn { + time_t time; /* Absolute expire time */ + int16_t pref; /* Preference for this HA */ + u_int8_t prefix_len; /* Prefix_len for HA Address */ + struct in6_addr addr; /* FN Home Agent global unicast address */ +}; + +struct mip6_esm { + struct mip6_esm *next; /* Ptr to next entry in the list */ + struct ifnet *ifp; /* I/f where home address is applied */ + const struct encaptab *ep; /* Encapsulation attach (MN -> HA) */ + int state; /* State for the home address */ + enum esm_type type; /* Type of event-state machine */ + struct in6_addr home_addr; /* Home address */ + u_int8_t prefix_len; /* Prefix_len for Home Address */ + u_int16_t lifetime; /* if type=PERMANENT 0xFFFF, else x */ + struct in6_addr ha_hn; /* Home agent address (home network) */ + struct in6_addr coa; /* Current primary care-of address */ + struct mip6_hafn *ha_fn; /* Home agent address (foreign network) */ + struct mip6_dad *dad; /* For Dynamic HA Address Discovery */ +}; + + +/* Binding Cache parameters. Bindings for other IPv6 nodes. */ +/* Maintained by each node. */ +struct bc_info { + u_int32_t br_interval; /* % of mip6_lifetime, max 60s, min 2s */ + u_int8_t no_of_sent_br; /* Number of sent BR to a Mobile Node */ + u_int8_t max_advert; /* ? */ + u_int8_t ra_tunneled; /* RA being tunneled to MN */ + u_int8_t ra_interval; /* Interval for sending RA */ +}; + +struct mip6_bc { + struct mip6_bc *next; /* Ptr to next entry in the list */ + struct in6_addr home_addr; /* Home Address of the MN for which this is + the BC entry */ + struct in6_addr coa; /* COA for MN indicated by the HA field */ + u_int32_t lifetime; /* Remaining lifetime for this BC entry */ + u_int8_t hr_flag; /* Flag for home registration entry (T/F) */ + u_int8_t rtr_flag; /* MN is a router (T/F) */ + u_int8_t prefix_len; /* Prefix length in last received BU */ + u_int16_t seqno; /* Maximum value of the sequence number */ + struct bc_info info; /* Usage info for cache replacement policy */ + time_t lasttime; /* The time at which a BR was last sent */ + const struct encaptab *ep; /* Encapsulation attach (HA -> MN) */ +}; + + + +/* Binding Update List parameters. Information for each BU sent by this MN */ +/* Each MN maintains this list. */ +struct mip6_retrans { + struct mip6_opt_bu *bu_opt; /* BU option in case of retransmission */ + struct mip6_subbuf *bu_subopt; /* BU sub-option in case of retrans. */ + u_int8_t ba_timeout; /* Exponential back-off starting at 1 */ + u_int8_t time_left; /* Time left until next retransmission */ +}; + +struct mip6_bul { + struct mip6_bul *next; /* Ptr to next entry in the list */ + struct in6_addr dst_addr; /* Destination address for sent BU */ + struct in6_addr bind_addr; /* Home Address or previous COA */ + struct in6_addr coa; /* Care-of address sent in the BU */ + u_int32_t lifetime; /* Remaining binding lifetime */ + u_int32_t refreshtime; /* Refresh time for the BU */ + u_int16_t seqno; /* Last value for sent seq number */ + time_t lasttime; /* Time at which a BU was last sent */ + u_int32_t no_of_sent_bu; /* Number of sent BU to a MN */ + struct mip6_retrans *state; /* Status for BU being acknowledged */ + u_int8_t bu_flag; /* Flag for sending future BU (T/F) */ + u_int8_t hr_flag; /* Flag for home reg (True / False) */ + u_int8_t update_rate; /* Seconds between consequtive BUs */ +}; + + +/* Home Agent List parameters. Information about each other HA on the link + that this node is serving as a HA. One HA list for each link it is + serving. */ +/* Each HA maintains this list. */ +struct mip6_addr_list { + struct mip6_addr_list *next; /* Ptr to next entry in the list */ + struct in6_addr ip6_addr; /* IPv6 address */ + u_int8_t prefix_len; +}; + +struct mip6_ha_list { + struct mip6_ha_list *next; /* Ptr to next entry in the list */ + struct in6_addr ll_addr; /* Link-local IP-addr of a node on + the home link */ + u_int16_t lifetime; /* Remaining lifetime of this HA + list entry */ + int16_t pref; /* Preference for this HA */ + struct mip6_addr_list *addr_list; /* List of global IP addresses for + this HA */ +}; + +struct mip6_link_list { + struct mip6_link_list *next; /* Ptr to next entry in the list */ + struct mip6_ha_list *ha_list; /* List of Home Agents for the link */ + struct ifnet *ifp; /* Interface */ + char ifname[IFNAMSIZ+1]; /* Link identifier */ +}; + + +/* Neighbor Advertisement information stored for retransmission when the + Mobile Node is returning to its Home Network or the Home Agent is + requested to act as a proxy for the Mobile Node when it is moving to a + Foreign Network. */ +struct mip6_na +{ + struct mip6_na *next; /* Ptr to next entry in the list */ + struct ifnet *ifp; /* Interface for sending the NA */ + struct in6_addr home_addr; /* Home address of the mobile node */ + struct in6_addr dst_addr; /* Destination address */ + struct in6_addr target_addr; /* Target address */ + u_int8_t prefix_len; /* Prefix length for home address */ + u_long flags; /* Flags for the NA message */ + int use_link_opt; /* Include Target link layer address + option or not + (0 = Do not include, 1 = Include) */ + int no; /* Remaining no of times to send the NA */ +}; + + +/* Definition of global variable used by Mobile IPv6. All variables are + stored in node byte order. */ +struct mip6_indata { + u_int8_t flag; /* How to handle tunneled packets */ + u_int8_t optflag; /* Dest options and sub-options flag */ + struct in6_addr ip6_src; /* Orig src addr from IPv6 header */ + struct in6_addr ip6_dst; /* Orig dst addr from IPv6 header */ + struct mip6_opt_bu *bu_opt; /* BU option present */ + struct mip6_opt_ba *ba_opt; /* BA option present */ + struct mip6_opt_br *br_opt; /* BR option present */ + struct mip6_opt_ha *ha_opt; /* HA option present */ + struct mip6_subopt_id *uid; /* Sub-option Unique ID present */ + struct mip6_subopt_coa *coa; /* Sub-option alt coa present */ + struct mip6_subopt_hal *hal; /* Sub-option HAs List present */ +}; + + +/* Queue of outgoing packets that are waiting to be sent. */ +struct mip6_output { + struct mip6_output *next; /* Ptr to next option in chain */ + void *opt; /* BU, BA or BR dest option to be sent */ + struct mip6_subbuf *subopt; /* Sub-option to be sent (if present) */ + struct in6_addr ip6_dst; /* Destination address for IPv6 packet */ + struct in6_addr ip6_src; /* Source address for IPv6 packet */ + enum send_state flag; /* Has packet been sent or not? */ + u_int32_t lifetime; /* Time remaining for entry in output queue + (units of 0.1s) */ +}; + +#ifdef KERNEL + +/* + * Macro MIP6_FREEINDATA free memory allocated for the global variable + * mip6_inp and its members. Set the variable to point at NULL when + * the memory has been freed. + */ +#define MIP6_FREEINDATA \ +do { \ + if (mip6_inp != NULL) { \ + if (mip6_inp->bu_opt != NULL) \ + _FREE(mip6_inp->bu_opt, M_TEMP); \ + if (mip6_inp->ba_opt != NULL) \ + _FREE(mip6_inp->ba_opt, M_TEMP); \ + if (mip6_inp->br_opt != NULL) \ + _FREE(mip6_inp->br_opt, M_TEMP); \ + if (mip6_inp->ha_opt != NULL) \ + _FREE(mip6_inp->ha_opt, M_TEMP); \ + if (mip6_inp->uid != NULL) \ + _FREE(mip6_inp->uid, M_TEMP); \ + if (mip6_inp->coa != NULL) \ + _FREE(mip6_inp->coa, M_TEMP); \ + if (mip6_inp->hal != NULL) \ + _FREE(mip6_inp->hal, M_TEMP); \ + _FREE(mip6_inp, M_TEMP); \ + mip6_inp = NULL; \ + } \ +} while (0) + +#define MIP6_IS_MN_ACTIVE ((mip6_module & MIP6_MN_MODULE) == MIP6_MN_MODULE) +#define MIP6_IS_HA_ACTIVE ((mip6_module & MIP6_HA_MODULE) == MIP6_HA_MODULE) + + +/* External Declaration of Global variables. */ +extern struct mip6_indata *mip6_inp; /* Input data rec in one packet */ +extern struct mip6_output *mip6_outq; /* Ptr to output queue */ +extern struct mip6_esm *mip6_esmq; /* Ptr to list of Home Addresses */ +extern struct mip6_bc *mip6_bcq; /* First entry in the BC list */ +extern struct mip6_prefix *mip6_pq; /* First entry in prefix list */ +extern struct mip6_config mip6_config; /* Config parameters for MIP6 */ +extern struct mip6_bul *mip6_bulq; +extern struct mip6_link_list *mip6_llq; +extern struct nd_prefix *mip6_home_prefix; +extern struct nd_prefix *mip6_primary_prefix; + +extern u_int8_t mip6_module; /* Info about loaded modules (MN/HA) */ +extern int mip6_md_state; /* Movement Detection state */ +extern int mip6_route_state; /* Home Address route state */ +extern int mip6_max_lost_advints; /* No. lost Adv before start of NUD */ +extern int mip6_nd6_delay; +extern int mip6_nd6_umaxtries; + + +/* External declaration of function prototypes (mip6_io.c) */ +extern int mip6_new_packet + __P((struct mbuf *)); +extern int mip6_store_dstopt_pre + __P((struct mbuf *, u_int8_t *, u_int8_t, u_int8_t)); +extern int mip6_store_dstopt + __P((struct mbuf *, u_int8_t *, u_int8_t)); +extern int mip6_store_dstsubopt + __P((struct mbuf *, u_int8_t *, u_int8_t, int, int)); +extern int mip6_output + __P((struct mbuf *, struct ip6_pktopts **)); +extern int mip6_add_rh + __P((struct ip6_pktopts **, struct mip6_bc *)); +extern void mip6_align + __P((struct ip6_dest *, int *)); +extern void mip6_dest_offset + __P((struct ip6_dest *, int *)); +extern int mip6_add_ha + __P((struct ip6_dest **, int *, struct in6_addr *, struct in6_addr *)); +extern int mip6_add_bu + __P((struct ip6_dest **, int *, struct mip6_opt_bu *, + struct mip6_subbuf *)); +extern int mip6_add_ba + __P((struct ip6_dest **, int *, struct mip6_opt_ba *, + struct mip6_subbuf *)); +extern int mip6_add_br + __P((struct ip6_dest **, int *, struct mip6_opt_br *, + struct mip6_subbuf *)); +extern int mip6_store_subopt + __P((struct mip6_subbuf **, caddr_t)); + + +/* External declaration of function prototypes (mip6.c) */ +extern void mip6_init + __P((void)); +extern void mip6_exit + __P((void)); +extern int mip6_rec_ctrl_sig + __P((struct mbuf *, int)); +extern int mip6_icmp6_input + __P((struct mbuf *, int)); +extern int mip6_rec_bu + __P((struct mbuf *, int)); +extern void mip6_ha2srcaddr + __P((struct mbuf *)); +extern int mip6_send_ba + __P((struct in6_addr *, struct in6_addr *, struct in6_addr *, + struct mip6_subbuf *, u_int8_t, u_int16_t, u_int32_t)); +extern void mip6_send_na + __P((struct mip6_na *)); +extern struct mbuf *mip6_create_ip6hdr + __P((struct in6_addr *, struct in6_addr *, u_int8_t)); +extern struct ip6_rthdr *mip6_create_rh + __P((struct in6_addr *, u_int8_t)); +extern struct mip6_opt_ba *mip6_create_ba + __P((u_int8_t, u_int16_t, u_int32_t)); +extern struct ip6_dest *mip6_create_dh + __P((void *, struct mip6_subbuf *, u_int8_t)); +extern int mip6_opt_offset + __P((struct mbuf *, int, int)); +extern int mip6_addr_on_link + __P((struct in6_addr *, int)); +extern u_int32_t mip6_min_lifetime + __P((struct in6_addr *, int)); +extern void mip6_build_in6addr + __P((struct in6_addr *, struct in6_addr *, const struct in6_addr *, + int)); +extern void mip6_build_ha_anycast + __P((struct in6_addr *, const struct in6_addr *, int)); +extern int mip6_add_ifaddr + __P((struct in6_addr *addr, struct ifnet *ifp, int plen, int flags)); +extern int mip6_tunnel_output + __P((struct mbuf **, struct mip6_bc *)); +extern int mip6_tunnel_input + __P((struct mbuf **, int *, int)); +extern int mip6_tunnel + __P((struct in6_addr *, struct in6_addr *, int, int, void *)); +extern int mip6_proxy + __P((struct in6_addr*, struct in6_addr*, int)); +extern struct mip6_bc *mip6_bc_find + __P((struct in6_addr *)); +extern struct mip6_bc *mip6_bc_create + __P((struct in6_addr *, struct in6_addr *, u_int32_t, u_int8_t, + u_int8_t, u_int8_t, u_int16_t)); +extern void mip6_bc_update + __P((struct mip6_bc *, struct in6_addr *, u_int32_t, u_int8_t, + u_int8_t, u_int8_t, u_int16_t, struct bc_info, time_t)); +extern int mip6_bc_delete + __P((struct mip6_bc *, struct mip6_bc **)); +extern struct mip6_na *mip6_na_create + __P((struct in6_addr *, struct in6_addr *, struct in6_addr *, + u_int8_t, u_long, int)); +extern struct mip6_na *mip6_na_delete + __P((struct mip6_na *)); +extern struct mip6_prefix *mip6_prefix_find + __P((struct in6_addr *, u_int8_t)); +extern struct mip6_prefix *mip6_prefix_create + __P((struct ifnet *, struct in6_addr *, u_int8_t, u_int32_t)); +extern struct mip6_prefix *mip6_prefix_delete + __P((struct mip6_prefix *)); +extern void mip6_timer_na + __P((void *)); +extern void mip6_timer_bc + __P((void *)); +extern void mip6_timer_prefix + __P((void *)); + +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) +extern int mip6_ioctl __P((struct socket *, u_long, caddr_t, struct ifnet *, + struct proc *)); +#else +extern int mip6_ioctl __P((struct socket *, u_long, caddr_t, struct ifnet *)); +#endif + +#if MIP6_DEBUG +void mip6_debug __P((char *, ...)); +#endif + +extern void mip6_enable_debug + __P((int)); +extern int mip6_write_config_data + __P((u_long, caddr_t)); +extern int mip6_clear_config_data + __P((u_long, caddr_t)); +extern int mip6_enable_func + __P((u_long, caddr_t)); + + +/* External declaration of function prototypes (mip6_md.c) */ +extern void mip6_md_init + __P((void)); +extern void mip6_select_defrtr + __P((void)); +extern void mip6_prelist_update + __P((struct nd_prefix *, struct nd_defrouter *)); +extern void mip6_eager_md + __P((int enable)); +extern void mip6_expired_defrouter + __P((struct nd_defrouter *dr)); +extern void mip6_probe_defrouter + __P((struct nd_defrouter *dr)); +extern void mip6_probe_pfxrtrs + __P((void)); +extern void mip6_store_advint + __P((struct nd_opt_advint *, struct nd_defrouter *)); +extern int mip6_delete_ifaddr + __P((struct in6_addr *addr, struct ifnet *ifp)); +extern struct nd_prefix *mip6_get_home_prefix + __P((void)); +extern int mip6_get_md_state + __P((void)); +extern void mip6_md_exit + __P((void)); + + +/* External declaration of function prototypes (mip6_mn.c) */ +extern void mip6_mn_init + __P((void)); +extern void mip6_mn_exit + __P((void)); +extern void mip6_new_defrtr + __P((int, struct nd_prefix *, struct nd_prefix *, + struct nd_defrouter *)); +extern int mip6_rec_ba + __P((struct mbuf *, int)); +extern int mip6_rec_br + __P((struct mbuf *, int)); +extern int mip6_rec_hal + __P((struct in6_addr *, struct in6_addr *, struct mip6_subopt_hal *)); +extern int mip6_rec_ramn + __P((struct mbuf *, int)); +extern int mip6_route_optimize + __P((struct mbuf *)); +extern int mip6_send_bu + __P((struct mip6_bul *, struct mip6_bu_data *, struct mip6_subbuf *)); +extern void mip6_send_bu2fn + __P((struct in6_addr *, struct mip6_hafn *, struct in6_addr *, + struct ifnet *, u_int32_t)); +extern void mip6_update_cns + __P((struct in6_addr *, struct in6_addr *, u_int8_t, u_int32_t)); +extern void mip6_queue_bu + __P((struct mip6_bul *, struct in6_addr *, struct in6_addr *, + u_int8_t, u_int32_t)); +extern struct mip6_opt_bu *mip6_create_bu + __P((u_int8_t, int, int, u_int16_t, u_int32_t)); +extern void mip6_stop_bu + __P((struct in6_addr *)); +extern int mip6_ba_error + __P((struct in6_addr *, struct in6_addr *, struct in6_addr *, + u_int8_t)); +extern u_int32_t mip6_prefix_lifetime + __P((struct in6_addr *)); +extern struct mip6_retrans * mip6_create_retrans + __P((struct mip6_bul *)); +extern void mip6_clear_retrans + __P((struct mip6_bul *)); +extern struct mip6_bul *mip6_bul_find + __P((struct in6_addr *, struct in6_addr *)); +extern struct mip6_bul *mip6_bul_create + __P((struct in6_addr *, struct in6_addr *, struct in6_addr *, + u_int32_t, u_int8_t)); +extern struct mip6_bul *mip6_bul_delete + __P((struct mip6_bul *)); +extern struct mip6_esm *mip6_esm_find + __P((struct in6_addr *)); +extern struct mip6_esm *mip6_esm_create + __P((struct ifnet *, struct in6_addr *, struct in6_addr *, + struct in6_addr *, u_int8_t, int, enum esm_type, u_int16_t)); +extern struct mip6_esm *mip6_esm_delete + __P((struct mip6_esm *)); +extern int mip6_outq_create + __P((void *, struct mip6_subbuf *, struct in6_addr *, + struct in6_addr *, enum send_state)); +extern struct mip6_output *mip6_outq_delete + __P((struct mip6_output *)); +extern void mip6_outq_flush + __P((void)); +extern void mip6_timer_outqueue + __P((void *)); +extern void mip6_timer_bul + __P((void *)); +extern void mip6_timer_esm + __P((void *)); +extern int mip6_write_config_data_mn + __P((u_long, void *)); +extern int mip6_clear_config_data_mn + __P((u_long, caddr_t)); +extern int mip6_enable_func_mn + __P((u_long, caddr_t)); + + +/* External declaration of function prototypes (mip6_ha.c). */ +extern void mip6_ha_init + __P((void)); +extern void mip6_ha_exit + __P((void)); +extern int mip6_rec_raha + __P((struct mbuf *, int)); +extern int mip6_ra_options + __P((struct mip6_ha_list *, caddr_t, int)); +extern struct mip6_subopt_hal * mip6_hal_dynamic + __P((struct in6_addr *)); +extern struct in6_addr *mip6_global_addr + __P((struct in6_addr *)); +extern void mip6_icmp6_output + __P((struct mbuf *)); +extern void mip6_prefix_examine + __P((struct mip6_ha_list *, struct ifnet *, caddr_t, int)); +extern struct mip6_link_list *mip6_ll_find + __P((char *)); +extern struct mip6_link_list *mip6_ll_create + __P((char *, struct ifnet *)); +extern struct mip6_link_list *mip6_ll_delete + __P((struct mip6_link_list *)); +extern struct mip6_ha_list *mip6_hal_find + __P((struct mip6_ha_list *, struct in6_addr *)); +extern struct mip6_ha_list *mip6_hal_create + __P((struct mip6_ha_list **, struct in6_addr *, u_int32_t, int16_t)); +extern void mip6_hal_sort + __P((struct mip6_ha_list **)); +extern struct mip6_ha_list *mip6_hal_delete + __P((struct mip6_ha_list **, struct mip6_ha_list *)); +extern void mip6_timer_ll + __P((void *)); +extern int mip6_write_config_data_ha + __P((u_long, void *)); +extern int mip6_clear_config_data_ha + __P((u_long, void *)); +extern int mip6_enable_func_ha + __P((u_long, caddr_t)); + + +/* External declaration of function prototypes (mip6_hooks.c). */ +extern void mip6_minus_a_case + __P((struct nd_prefix *)); +extern struct nd_prefix *mip6_find_auto_home_addr + __P((void)); +extern void mip6_enable_hooks + __P((int)); +extern void mip6_disable_hooks + __P((int)); +extern int mip6_attach + __P((int)); +extern int mip6_release + __P((void)); + + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +extern struct callout_handle mip6_timer_na_handle; +extern struct callout_handle mip6_timer_bc_handle; +extern struct callout_handle mip6_timer_outqueue_handle; +extern struct callout_handle mip6_timer_bul_handle; +extern struct callout_handle mip6_timer_esm_handle; +extern struct callout_handle mip6_timer_prefix_handle; +extern struct callout_handle mip6_timer_ll_handle; +#endif + +#endif /* _KERNEL */ + +#endif /* not _NETINET6_MIP6_H_ */ diff --git a/bsd/netinet6/mip6_common.h b/bsd/netinet6/mip6_common.h new file mode 100644 index 000000000..70f3a3d6f --- /dev/null +++ b/bsd/netinet6/mip6_common.h @@ -0,0 +1,141 @@ +/* $KAME: mip6_common.h,v 1.9 2000/03/25 07:23:50 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999 and 2000 Ericsson Radio Systems AB + * All rights reserved. + * + * Author: Hesham Soliman + * Martti Kuparinen + */ + + +#ifndef _NETINET6_MIP6_COMMON_H_ +#define _NETINET6_MIP6_COMMON_H_ + + + +/* SIOCs used for communication between kernel and user space. + * + * SIOCSDEBUG_MIP6 Set MIP6 debug on/off + * + * SIOCSBCFLUSH_MIP6 Remove list of BC + * + * SIOCSDEFCONFIG_MIP6 Restore default configuration + * + * SIOCSBRUPDATE_MIP6 Set time when CN should send Binding request + * + * SIOCSENABLEBR_MIP6 Enable sending BR to the MN + * + * SIOCSHALISTFLUSH_MIP6 Remove list of Home Agents + * + * SIOCSHAPREF_MIP6 HA preference + * + * SIOCSFWDSLUNICAST_MIP6 Enable forwarding of SL Unicast dest addresses + * + * SIOCSFWDSLMULTICAST_MIP6 Enable forwarding of SL Multicast dest addresses + * + * SIOCSFORADDRFLUSH_MIP6 Remove default foreign address from list + * + * SIOCSHADDRFLUSH_MIP6 Remove Home Address + * + * SIOCSBULISTFLUSH_MIP6 Remove Binding Update list + * + * SIOCACOADDR_MIP6 Set Default foreign IP Address + * + * SIOCAHOMEADDR_MIP6 Add home address + * + * SIOCSBULIFETIME_MIP6 Set default BU lifetime + * + * SIOCSHRLIFETIME_MIP6 Set default lifetime for home registration, not BU + * + * SIOCDCOADDR_MIP6 Remove default foreign address from list + * + * SIOCSPROMMODE_MIP6 Enable link layer promiscuous mode + * + * SIOCSBU2CN_MIP6 Enable sending BU to CN, i.e. Route opt on/off + * + * SIOCSREVTUNNEL_MIP6 Enable tunneling of packets from MN to CN via HA + * + * SIOCSAUTOCONFIG_MIP6 Allow autoconfiguration of Home address + * + * SIOCSEAGERMD_MIP6 Enable eager Movement Detection + * + */ +#define SIOCSDEBUG_MIP6 _IOWR('M', 1, struct mip6_input_data) +#define SIOCSBCFLUSH_MIP6 _IOWR('M', 2, int) +#define SIOCSDEFCONFIG_MIP6 _IOWR('M', 3, int) +#define SIOCSBRUPDATE_MIP6 _IOWR('M', 4, u_int8_t) +#define SIOCSENABLEBR_MIP6 _IOWR('M', 5, u_int8_t) + +#define SIOCSHALISTFLUSH_MIP6 _IOWR('M', 6, int) +#define SIOCSHAPREF_MIP6 _IOWR('M', 7, int) +#define SIOCSFWDSLUNICAST_MIP6 _IOWR('M', 8, int) +#define SIOCSFWDSLMULTICAST_MIP6 _IOWR('M', 9, int) + +#define SIOCSFORADDRFLUSH_MIP6 _IOWR('M', 10, int) +#define SIOCSHADDRFLUSH_MIP6 _IOWR('M', 11, int) +#define SIOCSBULISTFLUSH_MIP6 _IOWR('M', 12, int) +#define SIOCACOADDR_MIP6 _IOWR('M', 13, struct mip6_input_data) +#define SIOCAHOMEADDR_MIP6 _IOWR('M', 14, struct mip6_input_data) +#define SIOCSBULIFETIME_MIP6 _IOWR('M', 15, struct mip6_input_data) +#define SIOCSHRLIFETIME_MIP6 _IOWR('M', 16, struct mip6_input_data) +#define SIOCDCOADDR_MIP6 _IOWR('M', 17, struct mip6_input_data) +#define SIOCSPROMMODE_MIP6 _IOWR('M', 18, struct mip6_input_data) +#define SIOCSBU2CN_MIP6 _IOWR('M', 19, struct mip6_input_data) +#define SIOCSREVTUNNEL_MIP6 _IOWR('M', 20, struct mip6_input_data) +#define SIOCSAUTOCONFIG_MIP6 _IOWR('M', 21, struct mip6_input_data) +#define SIOCSEAGERMD_MIP6 _IOWR('M', 22, struct mip6_input_data) +#define SIOCSATTACH_MIP6 _IOWR('M', 23, struct mip6_input_data) +#define SIOCSRELEASE_MIP6 _IOWR('M', 24, struct mip6_input_data) + + +/* + * Information about which module that has been compiled into the kernel or + * loaded as a module. + */ +#define MIP6_MN_MODULE 0x01 +#define MIP6_HA_MODULE 0x02 + + +/* + * Generic message to pass configuration parameters from mip6config to + * kernel. + */ +struct mip6_input_data { + char if_name[IFNAMSIZ]; /* Interface name */ + u_int8_t prefix_len; /* Prefix length for address */ + struct in6_addr ip6_addr; /* Address */ + struct in6_addr ha_addr; /* Corresponding Home Agent */ + u_int32_t value; /* Value */ +}; + +#endif /* not _NETINET6_MIP6_COMMON_H_ */ diff --git a/bsd/netinet6/mip6_ha.c b/bsd/netinet6/mip6_ha.c new file mode 100644 index 000000000..d582cee9e --- /dev/null +++ b/bsd/netinet6/mip6_ha.c @@ -0,0 +1,1190 @@ +/* $KAME: mip6_ha.c,v 1.8 2000/03/18 03:05:40 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, 1998, 1999 and 2000 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999 and 2000 Ericsson Radio Systems AB + * All rights reserved. + * + * Author: Conny Larsson + * + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +/* + * Mobile IPv6 Home Agent + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +/* Declaration of Global variables. */ +struct callout_handle mip6_timer_ll_handle; +#endif + + +/* + ############################################################################## + # + # INITIALIZATION AND EXIT FUNCTIONS + # These functions are executed when the MIPv6 code is activated and de- + # activated respectively. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_ha_init + * Description: Initialization of MIPv6 variables that must be initialized + * before the HA code is executed. + ****************************************************************************** + */ +void +mip6_ha_init(void) +{ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + /* Initialize handle for timer functions. */ + callout_handle_init(&mip6_timer_ll_handle); +#endif +} + + + +/* + ****************************************************************************** + * Function: mip6_ha_exit + * Description: This function is called when the HA module is unloaded + * (relesed) from the kernel. + ****************************************************************************** + */ +void +mip6_ha_exit() +{ + struct mip6_link_list *llp; + int s; + + /* Cancel outstanding timeout function calls. */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_ll, (void *)NULL, mip6_timer_ll_handle); +#else + untimeout(mip6_timer_ll, (void *)NULL); +#endif + + /* Remove each entry in every queue. */ + s = splnet(); + for (llp = mip6_llq; llp;) + llp = mip6_ll_delete(llp); + mip6_llq = NULL; + splx(s); +} + + + +/* + ############################################################################## + # + # RECEIVING FUNCTIONS + # These functions receives the incoming IPv6 packet and further processing of + # the packet depends on the content in the packet. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_rec_raha + * Description: Processed by a Home Agent. Includes a Router Advertisement + * with a H-bit set in the flags variable (checked by the calling + * function). + * A link list entry and a Home Agent List entry are created or + * modified if needed. + * Ret value: 0 Everything is OK. Otherwise appropriate error code. + ****************************************************************************** + */ +int +mip6_rec_raha(m, off) +struct mbuf *m; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset from start of mbuf to start of RA */ +{ + struct ifnet *ifp; /* Receiving interface */ + struct ip6_hdr *ip6; /* IPv6 header */ + struct nd_router_advert *ra; /* Router Advertisement */ + struct mip6_link_list *llp; /* Link list entry */ + struct mip6_ha_list *halp; /* Home Agent list entry */ + caddr_t icmp6msg; /* Copy of mbuf (consequtively) */ + char ifname[IFNAMSIZ+1]; /* Interface name */ + int res, s, icmp6len; + + /* Find out if the RA can be processed */ + ip6 = mtod(m, struct ip6_hdr *); + if (ip6->ip6_hlim != 255) { + log(LOG_INFO, + "%s: Invalid hlim %d in Router Advertisement\n", + __FUNCTION__, ip6->ip6_hlim); + return 0; + } + + if (!IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_src)) { + log(LOG_INFO, + "%s: Source Address %s is not link-local\n", + __FUNCTION__, ip6_sprintf(&ip6->ip6_src)); + return 0; + } + + /* Find out which interface the RA arrived at */ + ifp = m->m_pkthdr.rcvif; + sprintf(ifname, "%s", if_name(ifp)); + + llp = mip6_ll_find(ifname); + if (llp == NULL) { + llp = mip6_ll_create(ifname, ifp); + if (llp == NULL) + return ENOBUFS; + } + + /* The mbuf data must be stored consequtively to be able to + cast data from it. */ + icmp6len = m->m_pkthdr.len - off; + icmp6msg = (caddr_t)MALLOC(icmp6len, M_TEMP, M_NOWAIT); + if (icmp6msg == NULL) + return IPPROTO_DONE; + + m_copydata(m, off, icmp6len, icmp6msg); + ra = (struct nd_router_advert *)icmp6msg; + + /* Find the Home Agent sending the RA and read its options. + This section must have high priority since the Home Agent + list entry lifetime is initialized to 0 and could be + removed by the timer function before the RA options have + been evaluated. */ + s = splnet(); + halp = mip6_hal_find(llp->ha_list, &ip6->ip6_src); + if (halp == NULL) { + halp = mip6_hal_create(&llp->ha_list, &ip6->ip6_src, + ntohl(ra->nd_ra_router_lifetime), 0); + if (halp == NULL) { + splx(s); + return ENOBUFS; + } + } else { + halp->lifetime = ntohl(ra->nd_ra_router_lifetime); + halp->pref = 0; + } + + res = mip6_ra_options(halp, icmp6msg, icmp6len); + if (res) { + splx(s); + return res; + } + splx(s); + return 0; +} + + + +/* + ############################################################################## + # + # UTILITY FUNCTIONS + # Miscellaneous functions needed for the internal processing of incoming and + # outgoing control signals. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_ra_options + * Description: Search through all the options in the Router Advertisement + * and store them in the Home Agent List. + * Ret value: 0 Everything is OK. Otherwise appropriate error code. + ****************************************************************************** + */ +int +mip6_ra_options(halp, icmp6msg, icmp6len) +struct mip6_ha_list *halp; /* Home Agent list entry */ +caddr_t icmp6msg; /* icmp6 message */ +int icmp6len; /* Length of icmp6 message */ +{ + struct mip6_addr_list *ap; /* Address list entry */ + struct nd_opt_hai *hai; /* Home Agent information option */ + struct nd_opt_advint *ai; /* Advertisement Interval option */ + struct nd_opt_prefix_info *pi; /* Ptr to prefix information */ + u_int8_t *optp; /* Ptr to current option in RA */ + int cur_off; /* Cur offset from start of RA */ + + /* Process each option in the RA */ + cur_off = sizeof(struct nd_router_advert); + while (cur_off < icmp6len) { + optp = ((caddr_t)icmp6msg + cur_off); + if (*optp == ND_OPT_PREFIX_INFORMATION) { + /* Check the prefix information option */ + pi = (struct nd_opt_prefix_info *)optp; + if (pi->nd_opt_pi_len != 4) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + + if (!(pi->nd_opt_pi_flags_reserved & + ND_OPT_PI_FLAG_RTADDR)) { + cur_off += 4 * 8; + continue; + } + + if (IN6_IS_ADDR_MULTICAST(&pi->nd_opt_pi_prefix) || + IN6_IS_ADDR_LINKLOCAL(&pi->nd_opt_pi_prefix)) { + cur_off += 4 * 8; + continue; + } + + /* Aggregatable unicast address, rfc2374 */ + if (((pi->nd_opt_pi_prefix.s6_addr8[0] & 0xe0) > 0x10) + && (pi->nd_opt_pi_prefix_len != 64)) { + cur_off += 4 * 8; + continue; + } + + /* Store the address if not already present */ + for (ap = halp->addr_list; ap; ap = ap->next) { + if (IN6_ARE_ADDR_EQUAL(&ap->ip6_addr, + &pi->nd_opt_pi_prefix)) + break; + } + + if (ap == NULL) { + /* Create a new address list entry. */ + ap = (struct mip6_addr_list *) + MALLOC(sizeof(struct mip6_addr_list), + M_TEMP, M_WAITOK); + if (ap == NULL) + return ENOBUFS; + bzero(ap, sizeof(struct mip6_addr_list)); + + ap->next = halp->addr_list; + ap->ip6_addr = pi->nd_opt_pi_prefix; + ap->prefix_len = pi->nd_opt_pi_prefix_len; + halp->addr_list = ap; + } + cur_off += 4 * 8; + continue; + } else if (*optp == ND_OPT_ADV_INTERVAL) { + /* Check the advertisement interval option */ + ai = (struct nd_opt_advint *)optp; + if (ai->nd_opt_int_len != 1) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + + /* XXX. Function call to move detection */ + cur_off += 8; + continue; + } else if (*optp == ND_OPT_HA_INFORMATION) { + /* Check the home agent information option */ + hai = (struct nd_opt_hai *)optp; + if (hai->nd_opt_hai_len != 1) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + + halp->pref = ntohs(hai->nd_opt_hai_pref); + halp->lifetime = ntohs(hai->nd_opt_hai_lifetime); + cur_off += 8; + continue; + } else { + if (*(optp + 1) == 0) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + cur_off += *(optp + 1) * 8; + } + } + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_hal_dynamic + * Description: Search through all the link lists and home agents list and + * create a Home Agents List sub-option to be used in dynamic + * home agent address discovery. + * If my own global source address is included in the first + * home agents list entry, leave it. It will be in the source + * address of the outgoing packet anyway. + * Ret value: Ptr to the sub-option or NULL. + ****************************************************************************** + */ +struct mip6_subopt_hal * +mip6_hal_dynamic(own_addr) +struct in6_addr *own_addr; /* Own global unicast source address used */ +{ + struct mip6_link_list *llp; /* Link list entry */ + struct mip6_ha_list *halp; /* Home Agent list entry */ + struct mip6_subopt_hal *opt; /* Home Agents list sub-option */ + struct mip6_addr_list *addrp; /* Address list entry */ + struct mip6_addr_list *tmp_addrp; /* Temporary address list entry */ + struct ifaddr *if_addr; /* Interface data */ + struct sockaddr_in6 sin6; + char ifname[IFNAMSIZ+1]; /* Interface name */ + int ii, len, found; + + /* Find the interface */ + bzero(&sin6, sizeof(struct sockaddr_in6)); + sin6.sin6_len = sizeof(struct sockaddr_in6); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = *own_addr; + + if_addr = ifa_ifwithaddr((struct sockaddr *)&sin6); + if (if_addr == NULL) + return NULL; + + sprintf(ifname, "%s", if_name(if_addr->ifa_ifp)); + + llp = mip6_ll_find(ifname); + if (llp == NULL) + return NULL; + + /* Allocate memory for home agent list sub option */ + opt = (struct mip6_subopt_hal *)MALLOC(sizeof(struct mip6_subopt_hal) + + 31 * sizeof(struct in6_addr), + M_TEMP, M_WAITOK); + if (opt == NULL) + return NULL; + + opt->type = IP6SUBOPT_HALIST; + opt->len = 0; + + /* Search the home agents list for the specific link. */ + /* First, sort the Home Agent list in decending order */ + mip6_hal_sort(&llp->ha_list); + ii = 0; + for (halp = llp->ha_list; halp; halp = halp->next) { + tmp_addrp = NULL; + found = 0; + for (addrp = halp->addr_list; addrp; addrp = addrp->next) { + len = addrp->prefix_len; + if (in6_are_prefix_equal(own_addr, &addrp->ip6_addr, len)) { + if (IN6_ARE_ADDR_EQUAL(own_addr, &addrp->ip6_addr)) { + found = 1; + break; + } else if (tmp_addrp == NULL) + tmp_addrp = addrp; + } + } + + if (found && (ii != 0)) { + opt->halist[ii] = addrp->ip6_addr; + opt->len += IP6OPT_HALISTLEN; + ii += 1; + } else if (tmp_addrp != NULL) { + opt->halist[ii] = tmp_addrp->ip6_addr; + opt->len += IP6OPT_HALISTLEN; + ii += 1; + } + } + + if (opt->len != 0) + return opt; + else { + _FREE(opt, M_TEMP); + return NULL; + } +} + + + +/* + ****************************************************************************** + * Function: mip6_global_addr + * Description: Search the list of IP addresses and find the interface for + * the anycast address. Find a link local address and use this + * address while searching through the list of home agents. + * When my own home agent is found, pick the first global address + * which matches the aycast prefix. + * Ret value: Ptr to the global unicast address or NULL. + ****************************************************************************** + */ +struct in6_addr * +mip6_global_addr(anycast_addr) +struct in6_addr *anycast_addr; /* Home Agents anycast address */ +{ + struct in6_ifaddr *ia; /* I/f address for anycast address */ + struct in6_ifaddr *ia_ll; /* I/f address for link local address */ + struct ifnet *ifp; /* Interface */ + struct mip6_ha_list *halp; /* Home Agent list entry */ + struct mip6_addr_list *addrp; /* Address list entry */ + struct mip6_link_list *llp; /* Link list entry for anycast address */ + char ifname[IFNAMSIZ+1]; /* Interface name */ + + /* Find out the interface for the anycast address */ + for (ia = in6_ifaddr; ia; ia = ia->ia_next) + { + if (ia->ia_addr.sin6_family != AF_INET6) + continue; + if ((ia->ia6_flags & IN6_IFF_ANYCAST) && + IN6_ARE_ADDR_EQUAL(anycast_addr, &ia->ia_addr.sin6_addr)) + break; + } + + if (ia == NULL) + return NULL; + + ifp = ia->ia_ifa.ifa_ifp; + sprintf(ifname, "%s", if_name(ifp)); + llp = mip6_ll_find(ifname); + if (llp == NULL) + return NULL; + + /* Use link local address to identify my own home agent list entry */ + /* XXX: I'm not sure if the 2nd arg is OK(jinmei@kame) */ + ia_ll = in6ifa_ifpforlinklocal(ifp, 0); + if (ia_ll == NULL) + return NULL; + halp = mip6_hal_find(llp->ha_list, &ia_ll->ia_addr.sin6_addr); + if (halp == NULL) + return NULL; + + /* Find my global address */ + for (addrp = halp->addr_list; addrp; addrp = addrp->next) { + if (in6_are_prefix_equal(anycast_addr, &addrp->ip6_addr, + addrp->prefix_len)) + return &addrp->ip6_addr; + } + return NULL; +} + + + +/* + ****************************************************************************** + * Function: mip6_icmp6_output + * Description: Takes care of an outgoing Router Advertisement. It finds the + * outgoing interface and add each prefix to the home agents list. + * Each prefix is also added to the internal prefix list used + * when a BU is received to decide whether the MN is on-link or + * not. + * Ret value: - + ****************************************************************************** + */ +void +mip6_icmp6_output(m) +struct mbuf *m; /* Mbuf chain with IPv6 packet */ +{ + struct ip6_hdr *ip6; /* IPv6 header */ + struct icmp6_hdr *icmp6; /* ICMP6 header */ + struct nd_router_advert *ra; /* Router Advertisement */ + struct ifaddr *if_addr; /* Interface address */ + struct mip6_link_list *llp; /* Link list entry */ + struct mip6_ha_list *halp; /* Home Agent list entry */ + struct sockaddr_in6 sin6; + caddr_t icmp6msg; /* Copy of mbuf (consequtively) */ + char ifname[IFNAMSIZ+1]; /* Interface name */ + int icmp6len, s, res; + + /* Check if the packet shall be processed */ + if (!MIP6_IS_HA_ACTIVE) + return; + + ip6 = mtod(m, struct ip6_hdr *); + if (ip6->ip6_nxt != IPPROTO_ICMPV6) + return; + + /* The mbuf data must be stored consequtively to be able to cast data + from it. */ + icmp6len = m->m_pkthdr.len - sizeof(struct ip6_hdr); + icmp6msg = (caddr_t)MALLOC(icmp6len, M_TEMP, M_WAITOK); + if (icmp6msg == NULL) + return; + + m_copydata(m, sizeof(struct ip6_hdr), icmp6len, icmp6msg); + icmp6 = (struct icmp6_hdr *)icmp6msg; + + /* Check if the packet shall be processed */ + if (icmp6->icmp6_type != ND_ROUTER_ADVERT) { + _FREE(icmp6msg, M_TEMP); + return; + } + + if (icmp6->icmp6_code != 0) { + _FREE(icmp6msg, M_TEMP); + return; + } + + if (icmp6len < sizeof(struct nd_router_advert)) { + _FREE(icmp6msg, M_TEMP); + return; + } + + /* Find the outgoing interface */ + bzero(&sin6, sizeof(struct sockaddr_in6)); + sin6.sin6_len = sizeof(struct sockaddr_in6); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = ip6->ip6_src; + + if_addr = ifa_ifwithaddr((struct sockaddr *)&sin6); + if (if_addr == NULL) { + _FREE(icmp6msg, M_TEMP); + return; + } + + sprintf(ifname, "%s", if_name(if_addr->ifa_ifp)); + + llp = mip6_ll_find(ifname); + if (llp == NULL) { + llp = mip6_ll_create(ifname, if_addr->ifa_ifp); + if (llp == NULL) { + _FREE(icmp6msg, M_TEMP); + return; + } + } + + /* Find the Home Agent sending the RA and read its options. + This section must have high priority since the Home Agent list + entry lifetime is initialized to 0 and could be removed by the + timer function before the RA options have been evaluated. */ + s = splnet(); + ra = (struct nd_router_advert *)icmp6; + halp = mip6_hal_find(llp->ha_list, &ip6->ip6_src); + if (halp == NULL) { + halp = mip6_hal_create(&llp->ha_list, &ip6->ip6_src, + ntohl(ra->nd_ra_router_lifetime), 0); + if (halp == NULL) { + _FREE(icmp6msg, M_TEMP); + splx(s); + return; + } + } else { + halp->lifetime = ntohl(ra->nd_ra_router_lifetime); + halp->pref = 0; + } + + res = mip6_ra_options(halp, icmp6msg, icmp6len); + if (res) { + _FREE(icmp6msg, M_TEMP); + splx(s); + return; + } + splx(s); + + /* Add the prefix to prefix list and the anycast address to the + interface. */ + mip6_prefix_examine(halp, if_addr->ifa_ifp, icmp6msg, icmp6len); + _FREE(icmp6msg, M_TEMP); + return; +} + + + +/* + ****************************************************************************** + * Function: mip6_prefix_examine + * Description: Add each prefix in a RA to the internal prefix list. Make + * sure that the Home-Agents anycast address for the prefix + * has been assigned to the interface. + * Ret value: - + ****************************************************************************** + */ +void +mip6_prefix_examine(halp, ifp, icmp6msg, icmp6len) +struct mip6_ha_list *halp; /* Home Agent list entry */ +struct ifnet *ifp; /* Outgoing i/f for prefixes */ +caddr_t icmp6msg; /* icmp6 message */ +int icmp6len; /* Length of icmp6 message */ +{ + struct nd_opt_prefix_info *pi; /* Ptr to prefix information */ + struct mip6_prefix *pq; /* Prefix queue entry */ + struct in6_addr anycast_addr; + int cur_off; /* Cur offset from start of mbuf */ + u_int8_t *opt_ptr; /* Ptr to current option in RA */ + + /* Process each option in the RA */ + cur_off = sizeof(struct nd_router_advert); + while (cur_off < icmp6len) { + opt_ptr = ((caddr_t)icmp6msg + cur_off); + if (*opt_ptr == ND_OPT_PREFIX_INFORMATION) { + /* Check the prefix information option */ + pi = (struct nd_opt_prefix_info *)opt_ptr; + if (pi->nd_opt_pi_len != 4) + return; + + if (!(pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_ONLINK)) { + cur_off += 4 * 8; + continue; + } + + if (IN6_IS_ADDR_MULTICAST(&pi->nd_opt_pi_prefix) || + IN6_IS_ADDR_LINKLOCAL(&pi->nd_opt_pi_prefix)) { + cur_off += 4 * 8; + continue; + } + + /* Aggregatable unicast address, rfc2374 */ + if (((pi->nd_opt_pi_prefix.s6_addr8[0] & 0xe0) > 0x10) && + (pi->nd_opt_pi_prefix_len != 64)) { + cur_off += 4 * 8; + continue; + } + + /* Store the prefix if not already present */ + pq = mip6_prefix_find(&pi->nd_opt_pi_prefix, + pi->nd_opt_pi_prefix_len); + if (pq == NULL) { + int error; + pq = mip6_prefix_create(ifp, &pi->nd_opt_pi_prefix, + pi->nd_opt_pi_prefix_len, + pi->nd_opt_pi_valid_time); + if (pq == NULL) + return; + + /* Create an Home Agent anycast address, add it to the + interface */ + mip6_build_ha_anycast(&anycast_addr, + &pi->nd_opt_pi_prefix, + pi->nd_opt_pi_prefix_len); + error = mip6_add_ifaddr(&anycast_addr, ifp, + pi->nd_opt_pi_prefix_len, + IN6_IFF_ANYCAST); + if (error) + printf("%s: address assignment error (errno = %d).\n", + __FUNCTION__, error); + + } else + pq->valid_time = ntohl(pi->nd_opt_pi_valid_time); + + cur_off += 4 * 8; + continue; + } else { + if (*(opt_ptr + 1) == 0) { + return; + } + cur_off += *(opt_ptr + 1) * 8; + } + } +} + + + +/* + ############################################################################## + # + # LIST FUNCTIONS + # The Home Agent maintains three lists (link list, home agent list and global + # address list) which are integrated into each other. Besides from this an + # internal prefix list is maintained in order to know which prefixes it is + # supposed to be home network for. The functions in this section are used for + # maintenance (create, find, delete and update entries) of these lists. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_ll_find + * Description: For each physical interface, i.e. link, that a Home Agent + * send and receive Router Advertisements at, a link list entry + * is maintained. + * Ret value: Pointer to found link list entry or NULL. + ****************************************************************************** + */ +struct mip6_link_list * +mip6_ll_find(ifname) +char *ifname; +{ + struct mip6_link_list *llp; + + for (llp = mip6_llq; llp; llp = llp->next) { + if (strcmp(ifname, llp->ifname) == 0) + return llp; + } + return NULL; +} + + + +/* + ****************************************************************************** + * Function: mip6_ll_create + * Description: Create a new link list entry and add it first to the link + * list. Start the timer if not already started. + * Ret value: Pointer to created link list entry or NULL. + ****************************************************************************** + */ +struct mip6_link_list * +mip6_ll_create(ifname, ifp) +char *ifname; +struct ifnet *ifp; +{ + struct mip6_link_list *llp; + int s, start_timer = 0; + + if (mip6_llq == NULL) + start_timer = 1; + + llp = (struct mip6_link_list *)MALLOC(sizeof(struct mip6_link_list), + M_TEMP, M_WAITOK); + if (llp == NULL) + return NULL; + bzero(llp, sizeof(struct mip6_link_list)); + + /* Add the new link list entry first to the list. */ + s = splnet(); + llp->next = mip6_llq; + strcpy(llp->ifname, ifname); + llp->ifp = ifp; + llp->ha_list = NULL; + mip6_llq = llp; + splx(s); + + if (start_timer) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_ll_handle = +#endif + timeout(mip6_timer_ll, (void *)0, hz); + } + return llp; +} + + + +/* + ****************************************************************************** + * Function: mip6_ll_delete + * Description: Delete the requested link list entry. + * Ret value: Ptr to next entry in list or NULL if last entry removed. + ****************************************************************************** + */ +struct mip6_link_list * +mip6_ll_delete(llp_del) +struct mip6_link_list *llp_del; /* Link list entry to be deleted */ +{ + struct mip6_link_list *llp; /* Current entry in the list */ + struct mip6_link_list *llp_prev; /* Previous entry in the list */ + struct mip6_link_list *llp_next; /* Next entry in the list */ + struct mip6_ha_list *halp; /* Home Agents list */ + int s; + + /* Find the requested entry in the link list. */ + s = splnet(); + llp_next = NULL; + llp_prev = NULL; + for (llp = mip6_llq; llp; llp = llp->next) { + llp_next = llp->next; + if (llp == llp_del) { + if (llp_prev == NULL) + mip6_llq = llp->next; + else + llp_prev->next = llp->next; + + if (llp->ha_list) { + for (halp = llp->ha_list; halp;) + halp = mip6_hal_delete(&llp->ha_list, halp); + } + +#if MIP6_DEBUG + mip6_debug("\nLink List entry deleted (0x%x)\n", llp); +#endif + _FREE(llp, M_TEMP); + + /* Remove the timer if the BC queue is empty */ + if (mip6_llq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_ll, (void *)NULL, mip6_timer_ll_handle); + callout_handle_init(&mip6_timer_ll_handle); +#else + untimeout(mip6_timer_ll, (void *)NULL); +#endif + } + break; + } + llp_prev = llp; + } + splx(s); + return llp_next; +} + + + +/* + ****************************************************************************** + * Function: mip6_hal_find + * Description: Find a Home Agent list entry at a specific link. There will + * be one entry for each node sending a Router Advertisement + * with the H-bit set including a Prefix Information option + * with the R-bit set, for which the Router lifetime or the + * Home Agent lifetime (included in a separate option) is not 0. + * Ret value: Pointer to found Home Agent list entry or NULL. + ****************************************************************************** + */ +struct mip6_ha_list * +mip6_hal_find(hal_start, ll_addr) +struct mip6_ha_list *hal_start; /* First entry in the Home Agents list */ +struct in6_addr *ll_addr; /* Link local address to search for */ +{ + struct mip6_ha_list *halp; + + for (halp = hal_start; halp; halp = halp->next) { + if (IN6_ARE_ADDR_EQUAL(&halp->ll_addr, ll_addr)) + return halp; + } + return NULL; +} + + + +/* + ****************************************************************************** + * Function: mip6_hal_create + * Description: Create a Home Agent list entry for a specific link. + * Ret value: Pointer to created Home Agent list entry or NULL. + ****************************************************************************** + */ +struct mip6_ha_list * +mip6_hal_create(hal_start, ll_addr, lifetime, pref) +struct mip6_ha_list **hal_start; /* First entry in the Home Agents list */ +struct in6_addr *ll_addr; /* Link local address to search for */ +u_int32_t lifetime; /* Node lifetime */ +int16_t pref; /* Node preference */ +{ + struct mip6_ha_list *halp; + int s; + + halp = (struct mip6_ha_list *)MALLOC(sizeof(struct mip6_ha_list), + M_TEMP, M_WAITOK); + if (halp == NULL) + return NULL; + bzero(halp, sizeof(struct mip6_ha_list)); + + /* Add the new home agent list entry first to the list. */ + s = splnet(); + halp->next = *hal_start; + halp->ll_addr = *ll_addr; + halp->lifetime = lifetime; + halp->pref = pref; + halp->addr_list = NULL; + *hal_start = halp; + splx(s); + return halp; +} + + + +/* + ****************************************************************************** + * Function: mip6_hal_sort + * Description: Sort the Home Agent list in decending order. Uses a temporary + * list where all the existing elements are moved. + * Ret value: - + ****************************************************************************** + */ +void +mip6_hal_sort(ha_head) +struct mip6_ha_list **ha_head; /* Start of Home Agent list */ +{ + struct mip6_ha_list *start, *halp; + struct mip6_ha_list *halp_prev, *halp_before, *halp_move; + struct mip6_ha_list *local_start, *local_last; + int16_t last_pref; + int s; + + if (*ha_head == NULL) + return; + + s = splnet(); + start = *ha_head; + local_start = NULL; + local_last = NULL; + + while (1) { + /* Find entry with highest preference */ + last_pref = SHRT_MIN; + halp_prev = NULL; + for (halp = start; halp; halp = halp->next) { + if (halp->pref > last_pref) { + last_pref = halp->pref; + halp_move = halp; + halp_before = halp_prev; + } + halp_prev = halp; + } + + /* Move it to the new list */ + if (local_start == NULL) + local_start = halp_move; + else + local_last->next = halp_move; + local_last = halp_move; + + /* Update the existing list */ + if (halp_before == NULL) + start = halp_move->next; + else + halp_before->next = halp_move->next; + + if (start == NULL) + break; + } + *ha_head = local_start; + splx(s); + return; +} + + + +/* + ****************************************************************************** + * Function: mip6_hal_delete + * Description: Delete a Home Agent list entry. If there are any address list + * entries associated with the Home Agent entry they are deleted + * as well. + * Ret value: Pointer to the next Home Agent list entry. + * NULL if the remaining list is empty or end of list reached. + ****************************************************************************** + */ +struct mip6_ha_list * +mip6_hal_delete(ha_start, ha_delete) +struct mip6_ha_list **ha_start; /* First list entry of HAs for a link */ +struct mip6_ha_list *ha_delete; /* Home Agent entry to delete */ +{ + struct mip6_ha_list *halp; /* Current HA list entry */ + struct mip6_ha_list *halp_prev; /* Previous HA list entry */ + struct mip6_addr_list *addrp; /* Address list entry */ + struct mip6_addr_list *addr_delete; /* Address list entry to delete */ + int s; + + s = splnet(); + halp_prev = NULL; + for (halp = *ha_start; halp; halp = halp->next) { + if (halp != ha_delete) { + halp_prev = halp; + continue; + } + + /* Search the address list and remove each entry */ + for (addrp = halp->addr_list; addrp;) { + addr_delete = addrp; + addrp = addrp->next; + _FREE(addr_delete, M_TEMP); + } + + /* Make sure that the pointer to the first entry is correct */ + if (halp == *ha_start) { + *ha_start = halp->next; + _FREE(halp, M_TEMP); + splx(s); + return *ha_start; + } else { + halp_prev->next = halp->next; + _FREE(halp, M_TEMP); + splx(s); + return halp_prev->next; + } + } + splx(s); + return NULL; +} + + + +/* + ############################################################################## + # + # TIMER FUNCTIONS + # These functions are called at regular basis. They operate on the lists, + # e.g. reducing timer counters and removing entries from the list if needed. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_timer_ll + * Description: Search the Home Agent list for each link and delete entries for + * which the timer has expired. + * If there are more entries left in the Home Agent list, call + * this fuction again once every second until the list is empty. + * Ret value: - + ****************************************************************************** + */ +void +mip6_timer_ll(arg) +void *arg; /* Not used */ +{ + struct mip6_link_list *llp; /* Current Link list entry */ + struct mip6_ha_list *halp; /* Current Home Agent list entry */ + int s; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + /* Go through the entire Home Agent List and delete all entries + for which the time has expired. */ + s = splnet(); + for (llp = mip6_llq; llp;) { + for (halp = llp->ha_list; halp;) { + halp->lifetime -= 1; + if (halp->lifetime == 0) + halp = mip6_hal_delete(&llp->ha_list, halp); + else + halp = halp->next; + } + + if (llp->ha_list == NULL) + llp = mip6_ll_delete(llp); + else + llp = llp->next; + } + splx(s); + + if (mip6_llq != NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_ll_handle = +#endif + timeout(mip6_timer_ll, (void *)0, hz); + } +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + + + +/* + ############################################################################## + # + # IOCTL FUNCTIONS + # These functions are called from mip6_ioctl. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_write_config_data_ha + * Description: This function is called to write certain config values for + * MIPv6. The data is written into the global config structure. + * Ret value: - + ****************************************************************************** + */ +int mip6_write_config_data_ha(u_long cmd, void *arg) +{ + int retval = 0; + + switch (cmd) { + case SIOCSHAPREF_MIP6: + mip6_config.ha_pref = ((struct mip6_input_data *)arg)->value; + break; + } + return retval; +} + + + +/* + ****************************************************************************** + * Function: mip6_clear_config_data_ha + * Description: This function is called to clear internal lists handled by + * MIPv6. + * Ret value: - + ****************************************************************************** + */ +int mip6_clear_config_data_ha(u_long cmd, void *data) +{ + int retval = 0; + int s; + struct mip6_link_list *llp; + + s = splnet(); + switch (cmd) { + case SIOCSHALISTFLUSH_MIP6: + for (llp = mip6_llq; llp;) + llp = mip6_ll_delete(llp); + break; + } + splx(s); + return retval; +} + + + +/* + ****************************************************************************** + * Function: mip6_enable_func_ha + * Description: This function is called to enable or disable certain functions + * in mip6. The data is written into the global config struct. + * Ret value: - + ****************************************************************************** + */ +int mip6_enable_func_ha(u_long cmd, caddr_t data) +{ + int enable; + int retval = 0; + + enable = ((struct mip6_input_data *)data)->value; + + switch (cmd) { + case SIOCSFWDSLUNICAST_MIP6: + mip6_config.fwd_sl_unicast = enable; + break; + + case SIOCSFWDSLMULTICAST_MIP6: + mip6_config.fwd_sl_multicast = enable; + break; + } + return retval; +} diff --git a/bsd/netinet6/mip6_hooks.c b/bsd/netinet6/mip6_hooks.c new file mode 100644 index 000000000..25a05263f --- /dev/null +++ b/bsd/netinet6/mip6_hooks.c @@ -0,0 +1,428 @@ +/* $KAME: mip6_hooks.c,v 1.8 2000/03/25 07:23:51 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, 1998, 1999 and 2000 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999 and 2000 Ericsson Radio Systems AB + * All rights reserved. + * + * Author: Mattias Pettersson + * Hesham Soliman + * Martti Kuparinen + * + */ +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * These are defined in sys/netinet6/ + */ +extern int (*mip6_store_dstopt_pre_hook)(struct mbuf *, u_int8_t *, + u_int8_t, u_int8_t); +extern int (*mip6_rec_ctrl_sig_hook)(struct mbuf *, int); +extern int (*mip6_new_packet_hook)(struct mbuf *); +extern int (*mip6_icmp6_input_hook)(struct mbuf *, int); +extern int (*mip6_output_hook)(struct mbuf *, struct ip6_pktopts **); +extern int (*mip6_rec_ra_hook)(struct mbuf *, int); + +/* Home Agent-specific hooks */ +extern struct in6_addr * (*mip6_global_addr_hook)(struct in6_addr *); +extern struct mip6_subopt_hal * (*mip6_hal_dynamic_hook)(struct in6_addr *); +extern int (*mip6_write_config_data_ha_hook)(u_long, void *); +extern int (*mip6_clear_config_data_ha_hook)(u_long, void *); +extern int (*mip6_enable_func_ha_hook)(u_long, caddr_t); +extern void (*mip6_icmp6_output_hook)(struct mbuf *); + +/* Mobile Node-specific hooks */ +extern int (*mip6_route_optimize_hook)(struct mbuf *); +extern void (*mip6_select_defrtr_hook)(void); +extern struct nd_prefix * (*mip6_get_home_prefix_hook)(void); +extern void (*mip6_prelist_update_hook)(struct nd_prefix *, + struct nd_defrouter *); +extern void (*mip6_expired_defrouter_hook)(struct nd_defrouter *); +extern void (*mip6_probe_pfxrtrs_hook)(void); +extern void (*mip6_store_advint_hook)(struct nd_opt_advint *, + struct nd_defrouter *); +extern int (*mip6_get_md_state_hook)(void); +extern int (*mip6_rec_ba_hook)(struct mbuf *, int); +extern int (*mip6_rec_br_hook)(struct mbuf *, int); +extern void (*mip6_stop_bu_hook)(struct in6_addr *); +extern int (*mip6_write_config_data_mn_hook)(u_long, void *); +extern int (*mip6_clear_config_data_mn_hook)(u_long, caddr_t); +extern int (*mip6_enable_func_mn_hook)(u_long, caddr_t); +extern void (*mip6_minus_a_case_hook)(struct nd_prefix *); +extern struct mip6_esm * (*mip6_esm_find_hook)(struct in6_addr *); + + +void +mip6_minus_a_case(struct nd_prefix *pr) +{ + struct in6_addr addr; + + if (IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr) || + IN6_IS_ADDR_MULTICAST(&pr->ndpr_addr) || + IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_addr)) { + return; + } + + addr = in6addr_any; + mip6_esm_create(pr->ndpr_ifp, NULL, &addr, &pr->ndpr_addr, + pr->ndpr_plen, MIP6_STATE_UNDEF, PERMANENT, 0xFFFF); +#if MIP6_DEBUG + mip6_debug("Late Home Address %s found for autoconfig'd case. Starting" + " Mobile IPv6.\n", ip6_sprintf(&pr->ndpr_addr)); +#endif + mip6_minus_a_case_hook = 0; + mip6_enable_hooks(MIP6_SPECIFIC_HOOKS); + mip6_md_init(); +} + +struct nd_prefix * +mip6_find_auto_home_addr(void) +{ + struct nd_prefix *pr; +#if 0 + struct in6_ifaddr *ia6; +#endif + + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { +#if MIP6_DEBUG + mip6_debug("%s: scanning prefix %s (pr = %p)\n", __FUNCTION__, + ip6_sprintf(&pr->ndpr_prefix.sin6_addr), pr); +#endif + if (IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr) || + IN6_IS_ADDR_MULTICAST(&pr->ndpr_addr) || + IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_addr)) { + continue; + } +#if 0 + ia6 = in6ifa_ifpwithaddr(pr->ndpr_ifp, &pr->ndpr_addr); + if (ia6 && (ia6->ia6_flags | IN6_IFF_DETACHED)) + continue; + else + break; /* XXXYYY Remove in v2.0. */ +#else +#if MIP6_DEBUG + mip6_debug("%s: skipping detached test on prefix %s " + "(pr = %p)\n", __FUNCTION__, + ip6_sprintf(&pr->ndpr_prefix.sin6_addr), pr); +#endif + break; +#endif +#if 0 /* XXXYYY Add in v2.0 */ + for (pfxrtr = pr->ndpr_advrtrs.lh_first; pfxrtr; + pfxrtr = pfxrtr->pfr_next) { + if ((pfxrtr->router->flags & ND_RA_FLAG_HA) + == ND_RA_FLAG_HA) + break; + } +#endif /* 0 */ + } + if (pr) { +#if MIP6_DEBUG + mip6_debug("Found an autoconfigured home address " + "immediately: %s\n", ip6_sprintf(&pr->ndpr_addr)); +#endif + } + else { +#if MIP6_DEBUG + mip6_debug("Couldn't find an autoconfigured home address " + "immediately.\n"); +#endif + } + return pr; +} + + +void +mip6_enable_hooks(int scope) +{ + int s; + + /* + * Activate the hook functions. After this some packets might come + * to the module... + * Note: mip6_minus_a_case_hook() is an exception and is not handled + * here. + */ + s = splimp(); + if (scope == MIP6_GENERIC_HOOKS) { + mip6_store_dstopt_pre_hook = mip6_store_dstopt_pre; + mip6_rec_ctrl_sig_hook = mip6_rec_ctrl_sig; + mip6_new_packet_hook = mip6_new_packet; + mip6_icmp6_input_hook = mip6_icmp6_input; + mip6_output_hook = mip6_output; + } + + if (scope == MIP6_CONFIG_HOOKS) { + /* Activate Home Agent-specific hooks */ + mip6_write_config_data_ha_hook = mip6_write_config_data_ha; + mip6_clear_config_data_ha_hook = mip6_clear_config_data_ha; + mip6_enable_func_ha_hook = mip6_enable_func_ha; + + /* Activate Mobile Node-specific hooks */ + mip6_write_config_data_mn_hook = mip6_write_config_data_mn; + mip6_clear_config_data_mn_hook = mip6_clear_config_data_mn; + mip6_enable_func_mn_hook = mip6_enable_func_mn; + } + + if (scope == MIP6_SPECIFIC_HOOKS) { + /* Activate Home Agent-specific hooks */ + if (MIP6_IS_HA_ACTIVE) { + mip6_rec_ra_hook = mip6_rec_raha; + mip6_global_addr_hook = mip6_global_addr; + mip6_hal_dynamic_hook = mip6_hal_dynamic; + mip6_icmp6_output_hook = mip6_icmp6_output; + } + + /* Activate Mobile Node-specific hooks */ + if (MIP6_IS_MN_ACTIVE) { + mip6_route_optimize_hook = mip6_route_optimize; + mip6_rec_ra_hook = mip6_rec_ramn; + mip6_select_defrtr_hook = mip6_select_defrtr; + mip6_get_home_prefix_hook = mip6_get_home_prefix; + mip6_prelist_update_hook = mip6_prelist_update; + mip6_expired_defrouter_hook = mip6_expired_defrouter; + mip6_probe_pfxrtrs_hook = mip6_probe_pfxrtrs; + mip6_store_advint_hook = mip6_store_advint; + mip6_get_md_state_hook = mip6_get_md_state; + mip6_rec_ba_hook = mip6_rec_ba; + mip6_rec_br_hook = mip6_rec_bu; + mip6_stop_bu_hook = mip6_stop_bu; + mip6_esm_find_hook = mip6_esm_find; + } + } + splx(s); + return; +} + + +void +mip6_disable_hooks(int scope) +{ + int s; + + /* + * Deactivate the hook functions. After this some packets might not + * come to the module... + */ + s = splimp(); + + if (scope == MIP6_GENERIC_HOOKS) { + mip6_store_dstopt_pre_hook = 0; + mip6_rec_ctrl_sig_hook = 0; + mip6_new_packet_hook = 0; + mip6_icmp6_input_hook = 0; + mip6_output_hook = 0; + } + + if (scope == MIP6_SPECIFIC_HOOKS) { + + /* De-activate Home Agent-specific hooks */ + if (MIP6_IS_HA_ACTIVE) { + mip6_rec_ra_hook = 0; + mip6_global_addr_hook = 0; + mip6_hal_dynamic_hook = 0; + mip6_write_config_data_ha_hook = 0; + mip6_clear_config_data_ha_hook = 0; + mip6_enable_func_ha_hook = 0; + } + + /* De-activate Mobile Node-specific hooks */ + if (MIP6_IS_MN_ACTIVE) { + mip6_route_optimize_hook = 0; + mip6_rec_ra_hook = 0; + mip6_select_defrtr_hook = 0; + mip6_get_home_prefix_hook = 0; + mip6_prelist_update_hook = 0; + mip6_expired_defrouter_hook = 0; + mip6_probe_pfxrtrs_hook = 0; + mip6_store_advint_hook = 0; + mip6_get_md_state_hook = 0; + mip6_rec_ba_hook = 0; + mip6_rec_br_hook = 0; + mip6_stop_bu_hook = 0; + mip6_write_config_data_mn_hook = 0; + mip6_clear_config_data_mn_hook = 0; + mip6_enable_func_mn_hook = 0; + mip6_esm_find_hook = 0; + mip6_minus_a_case_hook = 0; + } + } + splx(s); + return; +} + + +int +mip6_attach(int module) +{ + /* + * Important that necessary settings have been done _before_ calling + * mip6_attach(), e.g. home address specified or autoconfig set. + * mip6config program sees to that. + */ + +/* + No support for modules here yet. XXXYYY + + Old check (not valid any longer): + #if (defined(MIP6_MN) || defined (MIP6_HA) || defined(MIP6_MODULES)) +*/ + if (mip6_module) { +#if MIP6_DEBUG + char *old = "?", *new = "?"; + if (mip6_module == MIP6_HA_MODULE) + strcpy(old, "Home Agent"); + if (mip6_module == MIP6_MN_MODULE) + strcpy(old, "Mobile Node"); + if (module == MIP6_HA_MODULE) + strcpy(new, "Home Agent"); + if (module == MIP6_MN_MODULE) + strcpy(new, "Mobile Node"); + + mip6_debug("Can't switch operation mode from %s to %s \n" + "- please deactivate first (\"mip6config -x\")\n", + old, new); +#endif + return EINVAL; + } + + switch (module) { + case MIP6_HA_MODULE: + printf("%s: attach ha\n", __FUNCTION__); /* RM */ + mip6_module = module; + mip6_ha_init(); + break; + + case MIP6_MN_MODULE: + printf("%s: attach mn\n", __FUNCTION__); /* RM */ + mip6_module = module; + mip6_mn_init(); + break; + + default: +#if MIP6_DEBUG + mip6_debug("%s: illegal attach (module = %d)\n", __FUNCTION__, + module); +#endif + return EINVAL; + } + + if (MIP6_IS_MN_ACTIVE) { + if(mip6_get_home_prefix_hook) /* Test arbitrary hook */ + return 0; + + /* + * If autoconfig state: find a global address to use as Home + * Address. + * - Take first available on any interface, else if no found: + * - Enable hook to wait for a Router Advertisement to give + * us one. + */ + if (mip6_config.autoconfig) { + struct nd_prefix *pr; + struct in6_addr addr; + + addr = in6addr_any; + if ((pr = mip6_find_auto_home_addr()) != NULL) { + mip6_esm_create(pr->ndpr_ifp, &addr, &addr, + &pr->ndpr_addr,pr->ndpr_plen, + MIP6_STATE_UNDEF, PERMANENT, + 0xFFFF); + mip6_enable_hooks(MIP6_SPECIFIC_HOOKS); + mip6_md_init(); + } + else { +#if MIP6_DEBUG + mip6_debug("Waiting for Router Advertisement " + "to give me an address.\n"); +#endif + mip6_minus_a_case_hook = mip6_minus_a_case; + } + } + else { + /* Manual config */ + mip6_enable_hooks(MIP6_SPECIFIC_HOOKS); + mip6_md_init(); + } + } + + if (MIP6_IS_HA_ACTIVE) { + /* XXXYYY Build anycast or is it done? */ + mip6_enable_hooks(MIP6_SPECIFIC_HOOKS); + } + return 0; +} + + +int +mip6_release(void) +{ + /* Disable the hooks */ + mip6_disable_hooks(MIP6_SPECIFIC_HOOKS); + + if (MIP6_IS_MN_ACTIVE) { + mip6_mn_exit(); + mip6_md_exit(); + } + + if (MIP6_IS_HA_ACTIVE) + mip6_ha_exit(); + +/* + Correspondent Node functionality is never terminated. + mip6_disable_hooks(MIP6_GENERIC_HOOKS); + mip6_exit(); +*/ + + mip6_module = 0; /* Make HA or MN inactive */ + + return 0; +} diff --git a/bsd/netinet6/mip6_io.c b/bsd/netinet6/mip6_io.c new file mode 100644 index 000000000..739a1177f --- /dev/null +++ b/bsd/netinet6/mip6_io.c @@ -0,0 +1,1507 @@ +/* $KAME: mip6_io.c,v 1.7 2000/03/25 07:23:53 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, 1998, 1999 and 2000 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999 and 2000 Ericsson Radio Systems AB + * All rights reserved. + * + * Author: Conny Larsson + * + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +void (*mip6_icmp6_output_hook)(struct mbuf *) = 0; +struct mip6_esm * (*mip6_esm_find_hook)(struct in6_addr *) = 0; + + +/* Declaration of Global variables. */ +struct mip6_indata *mip6_inp = NULL; +struct mip6_output *mip6_outq = NULL; + + + +/* + ############################################################################## + # + # RECEIVING FUNCTIONS + # These functions receives the incoming IPv6 packet and further processing of + # the packet depends on the content in the packet. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_new_packet + * Description: Called once when a new IPv6 packet is received. Resets the + * mip6_inp variable needed later when options in the dest- + * ination header are validated. + * Ret value: 0 if OK. Otherwise IPPROTO_DONE. + * Note: A prerequisite for this function is that the AH or ESP header + * is included in the same IPv6 packet as the destination header, + * i.e we are using transport mode and not tunneling mode. + ****************************************************************************** + */ +int +mip6_new_packet(m) +struct mbuf *m; /* Mbuf containing IPv6 header */ +{ + /* If memory for global variable mip6_indata already allocated, + discard it. */ + if (mip6_inp != NULL) { + if (mip6_inp->bu_opt != NULL) + FREE(mip6_inp->bu_opt, M_TEMP); + if (mip6_inp->ba_opt != NULL) + FREE(mip6_inp->ba_opt, M_TEMP); + if (mip6_inp->br_opt != NULL) + FREE(mip6_inp->br_opt, M_TEMP); + if (mip6_inp->ha_opt != NULL) + FREE(mip6_inp->ha_opt, M_TEMP); + if (mip6_inp->uid != NULL) + FREE(mip6_inp->uid, M_TEMP); + if (mip6_inp->coa != NULL) + FREE(mip6_inp->coa, M_TEMP); + if (mip6_inp->hal != NULL) + FREE(mip6_inp->hal, M_TEMP); + FREE(mip6_inp, M_TEMP); + mip6_inp = NULL; + } + + /* Allocate memory for global variable mip6_inp */ + mip6_inp = (struct mip6_indata *) + MALLOC(sizeof(struct mip6_indata), M_TEMP, M_WAITOK); + if (mip6_inp == NULL) + panic("%s: We should not come here !!!!", __FUNCTION__); + bzero(mip6_inp, sizeof(struct mip6_indata)); + + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_store_dstopt_pre + * Description: Pre-processing used by the hook function. + * Ret value: 0 if OK. Otherwise IPPROTO_DONE + ****************************************************************************** + */ +int +mip6_store_dstopt_pre(m, opt, off, dstlen) +struct mbuf *m; /* Pointer to the beginning of mbuf */ +u_int8_t *opt; /* Pointer to the beginning of current option in mbuf */ +u_int8_t off; /* Offset from beginning of mbuf to end of dest header */ +u_int8_t dstlen; /* Remaining length of Destination header */ +{ + u_int8_t type; /* Destination option type */ + + type = *opt; + if (type == IP6OPT_BINDING_UPDATE) { + if (dstlen < IP6OPT_BUMINLEN) { + ip6stat.ip6s_toosmall++; + return IPPROTO_DONE; + } + + if (mip6_store_dstopt(m, opt, off-dstlen) != 0) + return IPPROTO_DONE; + } else if (type == IP6OPT_BINDING_ACK) { + if (dstlen < IP6OPT_BAMINLEN) { + ip6stat.ip6s_toosmall++; + return IPPROTO_DONE; + } + + if (mip6_store_dstopt(m, opt, off-dstlen) != 0) + return IPPROTO_DONE; + } else if (type == IP6OPT_BINDING_REQ) { + if (dstlen < IP6OPT_BRMINLEN) { + ip6stat.ip6s_toosmall++; + return IPPROTO_DONE; + } + + if (mip6_store_dstopt(m, opt, off-dstlen) != 0) + return IPPROTO_DONE; + } else if (type == IP6OPT_HOME_ADDRESS) { + if (dstlen < IP6OPT_HAMINLEN) { + ip6stat.ip6s_toosmall++; + return IPPROTO_DONE; + } + + if (mip6_store_dstopt(m, opt, off-dstlen) != 0) + return IPPROTO_DONE; + } + + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_store_dstopt + * Description: Save each MIPv6 option from the Destination header continously. + * They will be evaluated when the entire destination header has + * been read. + * Ret value: 0 if OK + * Otherwise protocol error code from netinet/in.h + ****************************************************************************** + */ +int +mip6_store_dstopt(mp, opt, optoff) +struct mbuf *mp; /* Pointer to the beginning of mbuf */ +u_int8_t *opt; /* Pointer to the beginning of current option in mbuf */ +u_int8_t optoff; /* Offset from beginning of mbuf to start of current + option */ +{ + struct mip6_opt_bu *bu_opt; /* Ptr to BU option data */ + struct mip6_opt_ba *ba_opt; /* Ptr to BA option data */ + struct mip6_opt_br *br_opt; /* Ptr to BR option data */ + struct mip6_opt_ha *ha_opt; /* Ptr to HA option data */ + int tmplen; /* Tmp length for positioning in option */ + int totlen; /* Total length of option + sub-option */ + int error; + + /* Find out what kind of buffer we are dealing with */ + switch (*opt) { + case IP6OPT_BINDING_UPDATE: + /* Allocate and store Binding Update option data */ + mip6_inp->bu_opt = (struct mip6_opt_bu *) + MALLOC(sizeof(struct mip6_opt_bu), M_TEMP, M_WAITOK); + if (mip6_inp->bu_opt == NULL) + return ENOBUFS; + bzero(mip6_inp->bu_opt, sizeof(struct mip6_opt_bu)); + + bu_opt = mip6_inp->bu_opt; + m_copydata(mp, optoff, sizeof(bu_opt->type), + (caddr_t)&bu_opt->type); + tmplen = sizeof(bu_opt->type); + m_copydata(mp, optoff + tmplen, sizeof(bu_opt->len), + (caddr_t)&bu_opt->len); + tmplen += sizeof(bu_opt->len); + m_copydata(mp, optoff + tmplen, sizeof(bu_opt->flags), + (caddr_t)&bu_opt->flags); + tmplen += sizeof(bu_opt->flags); + m_copydata(mp, optoff + tmplen, sizeof(bu_opt->prefix_len), + (caddr_t)&bu_opt->prefix_len); + tmplen += sizeof(bu_opt->prefix_len); + m_copydata(mp, optoff + tmplen, sizeof(bu_opt->seqno), + (caddr_t)&bu_opt->seqno); + tmplen += sizeof(bu_opt->seqno); + m_copydata(mp, optoff + tmplen, sizeof(bu_opt->lifetime), + (caddr_t)&bu_opt->lifetime); + tmplen += sizeof(bu_opt->lifetime); + + bu_opt->seqno = ntohs(bu_opt->seqno); + bu_opt->lifetime = ntohl(bu_opt->lifetime); + + /* Set the BU option present flag */ + mip6_inp->optflag |= MIP6_DSTOPT_BU; + + /* If sub-options are present, store them as well. */ + if (bu_opt->len > IP6OPT_BULEN) { + totlen = bu_opt->len + 2; + error = mip6_store_dstsubopt(mp, opt, optoff, totlen, tmplen); + if (error) + return error; + } + break; + case IP6OPT_BINDING_ACK: + /* Allocate and store all Binding Acknowledgement option data */ + mip6_inp->ba_opt = (struct mip6_opt_ba *) + MALLOC(sizeof(struct mip6_opt_ba), M_TEMP, M_WAITOK); + if (mip6_inp->ba_opt == NULL) + return ENOBUFS; + bzero(mip6_inp->ba_opt, sizeof(struct mip6_opt_ba)); + + ba_opt = mip6_inp->ba_opt; + m_copydata(mp, optoff, sizeof(ba_opt->type), + (caddr_t)&ba_opt->type); + tmplen = sizeof(ba_opt->type); + m_copydata(mp, optoff + tmplen, sizeof(ba_opt->len), + (caddr_t)&ba_opt->len); + tmplen += sizeof(ba_opt->len); + m_copydata(mp, optoff + tmplen, sizeof(ba_opt->status), + (caddr_t)&ba_opt->status); + tmplen += sizeof(ba_opt->status); + m_copydata(mp, optoff + tmplen, sizeof(ba_opt->seqno), + (caddr_t)&ba_opt->seqno); + tmplen += sizeof(ba_opt->seqno); + m_copydata(mp, optoff + tmplen, sizeof(ba_opt->lifetime), + (caddr_t)&ba_opt->lifetime); + tmplen += sizeof(ba_opt->lifetime); + m_copydata(mp, optoff + tmplen, sizeof(ba_opt->refresh), + (caddr_t)&ba_opt->refresh); + tmplen += sizeof(ba_opt->refresh); + + ba_opt->seqno = ntohs(ba_opt->seqno); + ba_opt->lifetime = ntohl(ba_opt->lifetime); + ba_opt->refresh = ntohl(ba_opt->refresh); + + /* Set the BA option present flag */ + mip6_inp->optflag |= MIP6_DSTOPT_BA; + + /* If sub-options are present, store them as well */ + if (ba_opt->len > IP6OPT_BALEN) { + totlen = ba_opt->len + 2; + error = mip6_store_dstsubopt(mp, opt, optoff, totlen, tmplen); + if (error) + return error; + } + break; + case IP6OPT_BINDING_REQ: + /* Allocate and store Binding Update option data */ + mip6_inp->br_opt = (struct mip6_opt_br *) + MALLOC(sizeof(struct mip6_opt_br), M_TEMP, M_WAITOK); + if (mip6_inp->br_opt == NULL) + return ENOBUFS; + bzero(mip6_inp->br_opt, sizeof(struct mip6_opt_br)); + + br_opt = mip6_inp->br_opt; + m_copydata(mp, optoff, sizeof(br_opt->type), + (caddr_t)&br_opt->type); + tmplen = sizeof(br_opt->type); + m_copydata(mp, optoff + tmplen, sizeof(br_opt->len), + (caddr_t)&br_opt->len); + tmplen += sizeof(br_opt->len); + + /* Set the BR option present flag */ + mip6_inp->optflag |= MIP6_DSTOPT_BR; + + /* If sub-options are present, store them as well. */ + if (br_opt->len > IP6OPT_BRLEN) { + totlen = br_opt->len + 2; + error = mip6_store_dstsubopt(mp, opt, optoff, totlen, tmplen); + if (error) + return error; + } + break; + case IP6OPT_HOME_ADDRESS: + /* Allocate and store Home Address option data */ + mip6_inp->ha_opt = (struct mip6_opt_ha *) + MALLOC(sizeof(struct mip6_opt_ha), M_TEMP, M_WAITOK); + if (mip6_inp->ha_opt == NULL) + return ENOBUFS; + bzero(mip6_inp->ha_opt, sizeof(struct mip6_opt_ha)); + + /* Store Home Address option data */ + ha_opt = mip6_inp->ha_opt; + m_copydata(mp, optoff, sizeof(ha_opt->type), + (caddr_t)&ha_opt->type); + tmplen = sizeof(ha_opt->type); + m_copydata(mp, optoff + tmplen, sizeof(ha_opt->len), + (caddr_t)&ha_opt->len); + tmplen += sizeof(ha_opt->len); + m_copydata(mp, optoff + tmplen, sizeof(ha_opt->home_addr), + (caddr_t)&ha_opt->home_addr); + tmplen += sizeof(ha_opt->home_addr); + + /* Set the HA option present flag */ + mip6_inp->optflag |= MIP6_DSTOPT_HA; + break; + default: + /* We will not come here since the calling function knows + which options to call this function for. */ + } + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_store_dstsubopt + * Description: Save each MIPv6 suboption from the Destination header. + * They will be evaluated when the entire destination header has + * been read. + * Ret value: 0 if OK + * Otherwise protocol error code from netinet/in.h + ****************************************************************************** + */ +int +mip6_store_dstsubopt(mp, opt, optoff, totlen, tmplen) +struct mbuf *mp; /* Pointer to start of mbuf */ +u_int8_t *opt; /* Pointer to start of current option in mbuf */ +u_int8_t optoff; /* Offset from start of mbuf to current option */ +int totlen; /* Total length for option + sub-options */ +int tmplen; /* Tmp length for positioning in option */ +{ + struct mip6_subopt_hal *hal; + struct mip6_subopt_coa *coa; + int ii, len; + + /* Loop over the sub-options. */ + while (tmplen < totlen) { + switch (*(opt + tmplen)) { + case IP6OPT_PAD1: + tmplen += 1; + break; + case IP6OPT_PADN: + tmplen += *(opt + tmplen + 1) + 2; + break; + case IP6SUBOPT_UNIQUEID: + /* Make sure that the length is OK */ + if (*(opt + tmplen + 1) != IP6OPT_UIDLEN) { + MIP6_FREEINDATA; + return EIO; + } + + /* Allocate and store additional sub-option data */ + mip6_inp->uid = (struct mip6_subopt_id *) + MALLOC(sizeof(struct mip6_subopt_id), M_TEMP, M_WAITOK); + if (mip6_inp->uid == NULL) + return ENOBUFS; + bzero(mip6_inp->uid, sizeof(struct mip6_subopt_id)); + + m_copydata(mp, optoff + tmplen, sizeof(struct mip6_subopt_id), + (caddr_t)mip6_inp->uid); + tmplen += sizeof(struct mip6_subopt_id); + mip6_inp->uid->id = ntohs(mip6_inp->uid->id); + + /* Set the Unique Id sub-option present flag */ + mip6_inp->optflag |= MIP6_DSTOPT_UID; + break; + case IP6SUBOPT_HALIST: + /* Make sure that the length is OK */ + if (*(opt + tmplen + 1) % IP6OPT_HALISTLEN) { + MIP6_FREEINDATA; + return EIO; + } + + /* Allocate and store additional sub-option data */ + len = *(opt + tmplen +1) / IP6OPT_HALISTLEN; + mip6_inp->hal = (struct mip6_subopt_hal *) + MALLOC(sizeof(struct mip6_subopt_hal) + + (len - 1) * sizeof(struct in6_addr), + M_TEMP, M_WAITOK); + if (mip6_inp->hal == NULL) { + MIP6_FREEINDATA; + return ENOMEM; + } + + hal = mip6_inp->hal; + m_copydata(mp, optoff + tmplen, sizeof(hal->type), + (caddr_t)&hal->type); + tmplen += sizeof(hal->type); + m_copydata(mp, optoff + tmplen, sizeof(hal->len), + (caddr_t)&hal->len); + tmplen += sizeof(hal->len); + + /* Loop over the addresses */ + for (ii = 0; ii < len; ii++) { + m_copydata(mp, optoff, tmplen, (caddr_t)&hal->halist[ii]); + tmplen += sizeof(struct in6_addr); + } + + /* Set the BA HA List sub-option present flag */ + mip6_inp->optflag |= MIP6_DSTOPT_HAL; + break; + case IP6SUBOPT_ALTCOA: + /* Make sure that the length is OK */ + if (*(opt + tmplen + 1) != IP6OPT_COALEN) { + MIP6_FREEINDATA; + return EIO; + } + + /* Allocate and store additional sub-option data */ + mip6_inp->coa = (struct mip6_subopt_coa *) + MALLOC(sizeof(struct mip6_subopt_coa), M_TEMP, M_WAITOK); + if (mip6_inp->coa == NULL) + return ENOBUFS; + bzero(mip6_inp->coa, sizeof(struct mip6_subopt_coa)); + + coa = mip6_inp->coa; + m_copydata(mp, optoff + tmplen, sizeof(coa->type), + (caddr_t)&coa->type); + tmplen += sizeof(coa->type); + m_copydata(mp, optoff + tmplen, sizeof(coa->len), + (caddr_t)&coa->len); + tmplen += sizeof(coa->len); + m_copydata(mp, optoff + tmplen, sizeof(coa->coa), + (caddr_t)&coa->coa); + tmplen += sizeof(coa->coa); + + /* Set the Alternate COA sub-option present flag */ + mip6_inp->optflag |= MIP6_DSTOPT_COA; + break; + default: + /* Quietly ignore and skip over the sub-option. + No statistics done. */ + tmplen += *(opt + tmplen + 1) + 2; + } + } + return 0; +} + + + +/* + ############################################################################## + # + # SENDING FUNCTIONS + # Functions used for processing of the outgoing IPv6 packet. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_output + * Description: This function is always called by function ip6_output. If there + * are any Destination Header options they will be added. A Home + * Address option MUST be added if the MN is roaming. Otherwise + * nothing is done. + * The options are stored in an output queue as a chain of mbufs + * associated with a destination address. This approach makes it + * possible to send it in any IPv6 packet carrying any payload, + * i.e piggy backing. + * Ret value: 0 if OK + * Otherwise any appropriate error code + ****************************************************************************** + */ +int +mip6_output(m, pktopt) +struct mbuf *m; /* Includes IPv6 header */ +struct ip6_pktopts **pktopt; /* Packet Extension headers, options and data */ +{ + struct ip6_pktopts *opt; /* Packet Extension headers (local) */ + struct mip6_output *outp; /* Ptr to mip6 output element */ + struct mip6_esm *esp; /* Ptr to entry in event state list */ + struct ip6_hdr *ip6; /* IPv6 header */ + struct mip6_bc *bcp; /* Binding Cache list entry */ + struct mip6_bul *bulp; + struct mip6_bul *bulp_hr; + struct in6_addr *dst_addr; /* Original dst address for the packet */ + int error; /* Error code from function call */ + int off; /* Offset from start of Destination Header in bytes */ + u_int8_t opttype; /* Option type */ + + ip6 = mtod(m, struct ip6_hdr *); + opt = *pktopt; + + /* We have to maintain a list of all prefixes announced by the + rtadvd deamon (for on-link determination). */ + if (MIP6_IS_HA_ACTIVE) { + if (ip6->ip6_nxt == IPPROTO_ICMPV6) + if (mip6_icmp6_output_hook) (*mip6_icmp6_output_hook)(m); + } + + /* If a COA for the destination address exist, i.e a BC entry is found, + then add a Routing Header and change the destination address to the + MN's COA. */ + dst_addr = &ip6->ip6_dst; + bcp = mip6_bc_find(&ip6->ip6_dst); + if (bcp != NULL) { + dst_addr = &bcp->home_addr; + if ((error = mip6_add_rh(&opt, bcp)) != 0) + return error; + } + + /* If this is a MN and the source address is one of the home addresses + for the MN then a Home Address option must be inserted. */ + esp = NULL; + if (MIP6_IS_MN_ACTIVE) { + if (mip6_esm_find_hook) + esp = (*mip6_esm_find_hook)(&ip6->ip6_src); + + if ((esp != NULL) && (esp->state >= MIP6_STATE_DEREG)) { + if (opt == NULL) { + opt = (struct ip6_pktopts *) + MALLOC(sizeof(struct ip6_pktopts), M_TEMP, M_WAITOK); + if (opt == NULL) + return ENOBUFS; + bzero(opt, sizeof(struct ip6_pktopts)); + opt->ip6po_hlim = -1; /* -1 means to use default hop limit */ + } + + mip6_dest_offset(opt->ip6po_dest2, &off); + if ((error = mip6_add_ha(&opt->ip6po_dest2, + &off, &ip6->ip6_src, &esp->coa)) != 0) + return error; + + /* If the MN initiate the traffic it should add a BU option + to the packet if no BUL entry exist and there is a BUL + "home registration" entry. */ + bulp = mip6_bul_find(dst_addr, &esp->home_addr); + bulp_hr = mip6_bul_find(NULL, &esp->home_addr); + if ((bulp == NULL) && (bulp_hr != NULL)) { + /* Create BUL entry and BU option. */ + bulp = mip6_bul_create(dst_addr, &esp->home_addr, + &esp->coa, + bulp_hr->lifetime, 0); + if (bulp == NULL) + return ENOBUFS; + mip6_queue_bu(bulp, &esp->home_addr, &esp->coa, 0, + bulp_hr->lifetime); + } + } + } + + /* BU, BR and BA should not be sent to link-local, loop-back and + multicast addresses. */ + if (IN6_IS_ADDR_LINKLOCAL(dst_addr) || IN6_IS_ADDR_LOOPBACK(dst_addr) || + IN6_IS_ADDR_MULTICAST(dst_addr)) { + *pktopt = opt; + return 0; + } + + /* If the packet has not been generated completely by MIP6 the + output queue is searched. */ + outp = NULL; + if (mip6_config.enable_outq) { + for (outp = mip6_outq; outp; outp = outp->next) { + if ((outp->flag == NOT_SENT) && + (IN6_ARE_ADDR_EQUAL(&outp->ip6_dst, dst_addr))) + break; + } + } + if (outp == NULL) { + *pktopt = opt; + return 0; + } + + /* Destination option (either BU, BR or BA) found in the output list. + Add it to the existing destination options. */ + if (opt == NULL) { + opt = (struct ip6_pktopts *)MALLOC(sizeof(struct ip6_pktopts), + M_TEMP, M_WAITOK); + if (opt == NULL) + return ENOBUFS; + bzero(opt, sizeof(struct ip6_pktopts)); + opt->ip6po_hlim = -1; /* -1 means to use default hop limit */ + } + + mip6_dest_offset(opt->ip6po_dest2, &off); + bcopy((caddr_t)outp->opt, (caddr_t)&opttype, 1); + if (opttype == IP6OPT_BINDING_UPDATE) { + /* Add my Binding Update option to the Destination Header */ + error = mip6_add_bu(&opt->ip6po_dest2, &off, + (struct mip6_opt_bu *)outp->opt, + (struct mip6_subbuf *)outp->subopt); + if (error) + return error; + } else if (opttype == IP6OPT_BINDING_ACK) { + /* Add my BA option to the Destination Header */ + error = mip6_add_ba(&opt->ip6po_dest2, &off, + (struct mip6_opt_ba *)outp->opt, + (struct mip6_subbuf *)outp->subopt); + if (error) + return error; + } else if (opttype == IP6OPT_BINDING_REQ) { + /* Add my BR option to the Destination Header */ + error = mip6_add_br(&opt->ip6po_dest2, &off, + (struct mip6_opt_br *)outp->opt, + (struct mip6_subbuf *)outp->subopt); + if (error) + return error; + } + + /* Set flag for entry in output queueu to indicate that it has + been sent. */ + outp->flag = SENT; + *pktopt = opt; + return 0; +} + + + +/* + ############################################################################## + # + # UTILITY FUNCTIONS + # Miscellaneous functions needed for the internal processing of incoming and + # outgoing control signals. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_add_rh + * Description: Add a Routing Header type 0 to the outgoing packet, if its not + * already present, and add the COA for the MN. + * If a Routing Header type 0 exist, but contains no data, or the + * COA for the MN is missing it is added to the Routing Header. + * If the Routing Header is not of type 0 the function returns. + * Ret value: 0 OK. Routing Header might have been added + * ENOBUFS No memory available + * Note: The destination address for the outgoing packet is not changed + * since this is taken care of in the ip6_output function. + ****************************************************************************** + */ +int +mip6_add_rh(opt, bcp) +struct ip6_pktopts **opt; /* Packet Ext headers, options and data */ +struct mip6_bc *bcp; /* Binding Cache list entry */ +{ + struct ip6_pktopts *opt_local; /* Pkt Ext headers, options & data */ + struct ip6_rthdr0 *rthdr0; /* Routing header type 0 */ + struct in6_addr *ip6rt_addr; /* IPv6 routing address(es) */ + caddr_t ptr; /* Temporary pointer */ + int ii, len, new_len, idx; + + /* A Multicast address must not appear in a Routing Header. */ + if (IN6_IS_ADDR_MULTICAST(&bcp->coa)) + return 0; + + opt_local = *opt; + if (opt_local == NULL) { + /* No Packet options present at all. Add a Routing Header. */ + opt_local = (struct ip6_pktopts *)MALLOC(sizeof(struct ip6_pktopts), + M_TEMP, M_WAITOK); + if (opt_local == NULL) + return ENOBUFS; + bzero(opt_local, sizeof(struct ip6_pktopts)); + opt_local->ip6po_hlim = -1; /* -1 means to use default hop limit */ + + opt_local->ip6po_rhinfo.ip6po_rhi_rthdr = + mip6_create_rh(&bcp->coa, IPPROTO_IP); + if(opt_local->ip6po_rhinfo.ip6po_rhi_rthdr == NULL) + return ENOBUFS; + } else if (opt_local->ip6po_rhinfo.ip6po_rhi_rthdr == NULL) { + /* Packet extension header allocated but no RH present, add one. */ + opt_local->ip6po_rhinfo.ip6po_rhi_rthdr = + mip6_create_rh(&bcp->coa, IPPROTO_IP); + if(opt_local->ip6po_rhinfo.ip6po_rhi_rthdr == NULL) + return ENOBUFS; + } else { + /* A RH exist. Don't do anything if the type is not 0. */ + if (opt_local->ip6po_rhinfo.ip6po_rhi_rthdr->ip6r_type != + IPV6_RTHDR_TYPE_0) + return 0; + + /* If the outgoing packet contains a BA the Routing Header is + correct generated by MIP6. No further action is needed. */ + if (opt_local->ip6po_dest2 == NULL) + return 0; + + len = (opt_local->ip6po_dest2->ip6d_len + 1) << 3; + ii = 2; + ptr = (caddr_t)opt_local->ip6po_dest2 + 2; + while (ii < len) { + if (*ptr == IP6OPT_PAD1) { + ii += 1; + ptr += 1; + continue; + } + if (*ptr == IP6OPT_BINDING_ACK) + return 0; + ii += *(ptr + 1) + 2; + ptr += *(ptr + 1) + 2; + } + + /* A routing header exist and the outgoing packet does not include + a BA. The routing header has been generated by a user and must + be checked. If the last segment is not equal to the MN's COA, + add it. */ + len = opt_local->ip6po_rhinfo.ip6po_rhi_rthdr->ip6r_len; + if (len == 0) + new_len = 2; + else { + new_len = len + 2; + idx = (len / 2) - 1; + rthdr0 = (struct ip6_rthdr0 *) + opt_local->ip6po_rhinfo.ip6po_rhi_rthdr; + ptr = (caddr_t)rthdr0 + sizeof(struct ip6_rthdr0); + ip6rt_addr = (struct in6_addr *)ptr; + if (IN6_ARE_ADDR_EQUAL(&bcp->coa, ip6rt_addr + idx)) + return 0; + } + + rthdr0 = (struct ip6_rthdr0 *) + MALLOC(sizeof(struct ip6_rthdr0) + + (new_len / 2) * sizeof(struct in6_addr), M_TEMP, M_WAITOK); + if (rthdr0 == NULL) + return ENOBUFS; + + bcopy((caddr_t)opt_local->ip6po_rhinfo.ip6po_rhi_rthdr, + (caddr_t)rthdr0, (len + 1) * 8); + bcopy((caddr_t)&bcp->coa, (caddr_t)rthdr0 + (len + 1) * 8, + sizeof(struct in6_addr)); + rthdr0->ip6r0_len = new_len; + rthdr0->ip6r0_segleft = new_len / 2; + + FREE(opt_local->ip6po_rhinfo.ip6po_rhi_rthdr, M_IP6OPT); + opt_local->ip6po_rhinfo.ip6po_rhi_rthdr = + (struct ip6_rthdr *)rthdr0; + } + + /* Change the IP destination address to the COA for the MN. */ + *opt = opt_local; + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_align + * Description: Align the outgoing Destination Header to 8-byte + * Ret value: - + ****************************************************************************** + */ +void +mip6_align(dstopt, off) +struct ip6_dest *dstopt; /* IPv6 destination options for the packet */ +int *off; /* Offset from start of Destination Header (byte) */ +{ + int rest; /* Rest of modulo division */ + u_int8_t padlen; /* Number of bytes to pad */ + u_int8_t padn; /* Number for option type PADN */ + + padn = IP6OPT_PADN; + rest = *off % 8; + if (rest) { + padlen = 8 - rest; + if (rest == 7) { + /* Add a PAD1 option */ + bzero((caddr_t)dstopt + *off, 1); + *off += 1; + } else { + /* Add a PADN option */ + bzero((caddr_t)dstopt + *off, padlen); + bcopy(&padn, (caddr_t)dstopt + *off, 1); + padlen = padlen - 2; + bcopy(&padlen, (caddr_t)dstopt + *off + 1, 1); + *off += padlen + 2; + } + } +} + + + +/* + ****************************************************************************** + * Function: mip6_dest_offset + * Description: Calculate offset for new data in the Destination Header. + * Additional options will be added beginning at the offset. + ****************************************************************************** + */ +void +mip6_dest_offset(dstopt, off) +struct ip6_dest *dstopt; /* IPv6 destination options for the packet */ +int *off; /* Offset from start of Destination Header (byte) */ +{ + int ii; /* Internal counter */ + u_int8_t opttype; /* Option type found in Destination Header*/ + u_int8_t optlen; /* Option length incl type and length */ + u_int32_t len; /* Length of Destination Header in bytes */ + + if (dstopt == NULL) { + *off = 0; + return; + } + + len = (dstopt->ip6d_len + 1) << 3; + *off = 2; + + for (ii = 2; ii < len;) { + bcopy((caddr_t)dstopt + ii, (caddr_t)&opttype, 1); + if (opttype == IP6OPT_PAD1) { + *off = ii; + ii += 1; + continue; + } + bcopy((caddr_t)dstopt + ii + 1, (caddr_t)&optlen, 1); + if (opttype == IP6OPT_PADN) { + *off = ii; + ii += 2 + optlen; + } else { + ii += 2 + optlen; + *off = ii; + } + } +} + + + +/* + ****************************************************************************** + * Function: mip6_add_ha + * Description: Add Home Address option to the Destination Header. Change the + * IPv6 source address to the care-of address of the MN. + * Ret value: 0 if OK + * Otherwise any appropriate error code + ****************************************************************************** + */ +int +mip6_add_ha(dstopt, off, src_addr, coa) +struct ip6_dest **dstopt; /* IPv6 destination options for the packet */ +int *off; /* Offset from start of Dest Header (byte) */ +struct in6_addr *src_addr; /* IPv6 header source address */ +struct in6_addr *coa; /* MN's care-of address */ +{ + struct ip6_dest *new_opt; /* Old dest options + Home address option */ + struct ip6_dest *dest; /* Local variable for destination option */ + int ii; /* Internal counter */ + int rest; /* Rest of modulo division */ + u_int8_t padn; /* Number for option type PADN */ + u_int8_t opttype; /* Option type */ + u_int8_t optlen; /* Option length excluding type and length */ + u_int8_t dstlen; /* destination Header length in 8-bytes */ + u_int32_t len; /* Length of Destination Header in bytes */ + + /* Allocate memory for the Home Address option */ + dest = *dstopt; + if (dest == NULL) { + dest = (struct ip6_dest *)MALLOC(sizeof(struct ip6_dest) + + sizeof(struct mip6_opt_ha), + M_TEMP, M_WAITOK); + if (dest == NULL) + return ENOBUFS; + bzero(dest, sizeof(struct ip6_dest) + sizeof(struct mip6_opt_ha)); + *off = 2; + } else { + len = (dest->ip6d_len + 1) << 3; + new_opt = (struct ip6_dest *)MALLOC(len + + sizeof(struct mip6_opt_ha), + M_TEMP, M_WAITOK); + if (new_opt == NULL) + return ENOBUFS; + bzero(new_opt, len + sizeof(struct mip6_opt_ha)); + bcopy((caddr_t)dest, (caddr_t)new_opt, len); + FREE(dest, M_IP6OPT); + dest = new_opt; + } + + /* Make sure that the offset is correct for adding a Home Address + option */ + padn = IP6OPT_PADN; + rest = *off % 4; + if (rest == 0) { + /* Add a PADN option with length 0 */ + bzero((caddr_t)dest + *off, 2); + bcopy(&padn, (caddr_t)dest + *off, 1); + *off += 2; + } else if (rest == 1) { + /* Add a PAD1 option */ + bzero((caddr_t)dest + *off, 1); + *off += 1; + } else if (rest == 3) { + /* Add a PADN option with length 1 */ + bzero((caddr_t)dest + *off, 3); + bcopy(&padn, (caddr_t)dest + *off, 1); + bcopy(&padn, (caddr_t)dest + *off + 1, 1); + *off += 3; + } + + /* Add the options in the way they shall be added. */ + opttype = IP6OPT_HOME_ADDRESS; + optlen = IP6OPT_HALEN; + + bcopy(&opttype, (caddr_t)dest + *off, 1); + *off += 1; + bcopy(&optlen, (caddr_t)dest + *off, 1); + *off += 1; + + for (ii = 0; ii < 4; ii++) { + bcopy((caddr_t)&src_addr->s6_addr32[ii], (caddr_t)dest + *off, 4); + *off += 4; + } + + /* Align the Destination Header to 8-byte */ + mip6_align(dest, off); + + /* Change the total length of the Destination header */ + dstlen = (*off >> 3) - 1; + bcopy(&dstlen, (caddr_t)dest + 1, 1); + + /* Change the IP6 source address to the care-of address */ + src_addr->s6_addr32[0] = coa->s6_addr32[0]; + src_addr->s6_addr32[1] = coa->s6_addr32[1]; + src_addr->s6_addr32[2] = coa->s6_addr32[2]; + src_addr->s6_addr32[3] = coa->s6_addr32[3]; + *dstopt = dest; + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_add_bu + * Description: Copy BU option and sub-option (if present) to a Destination + * Header. + * Memory in the Destination Header for the BU is created, the + * header is aligned to 8-byte alignment and the total length of + * the header is updated. + * Ret value: 0 if OK + * Otherwise any appropriate error code + ****************************************************************************** + */ +int +mip6_add_bu(dstopt, off, optbu, subopt) +struct ip6_dest **dstopt; /* IPv6 destination options for the packet */ +int *off; /* Offset from start of Dest Header (byte) */ +struct mip6_opt_bu *optbu; /* BU option data */ +struct mip6_subbuf *subopt; /* BU sub-option data (NULL if not present) */ +{ + struct ip6_dest *new_opt; /* Old destination options + BU option */ + struct ip6_dest *dest; /* Local variable for destination option */ + u_int8_t padn; /* Number for option type PADN */ + u_int8_t dstlen; /* Destination Header length in 8-bytes */ + int offlen; /* Offset for option length in the buffer */ + int rest; /* Rest of modulo division */ + int optlen; /* Length of BU option incl sub-options */ + int tmp16; /* Temporary converting of 2-byte */ + int tmp32; /* Temporary converting of 4-byte */ + int len; /* Length of allocated memory */ + int after, before; + + /* Verify input */ + if (optbu == NULL) + return 0; + + /* Allocate memory for the BU option and sub-option (if present). */ + dest = *dstopt; + if (dest == NULL) { + len = sizeof(struct ip6_dest) + sizeof(struct mip6_opt_bu) + 8; + if (subopt != NULL) + len += subopt->len; + + dest = (struct ip6_dest *)MALLOC(len, M_TEMP, M_WAITOK); + if (dest == NULL) + return ENOBUFS; + bzero(dest, len); + *off = 2; + } else { + len = (dest->ip6d_len + 1) << 3; + len += sizeof(struct mip6_opt_bu) + 8; + if (subopt != NULL) + len += subopt->len; + + new_opt = (struct ip6_dest *)MALLOC(len, M_TEMP, M_WAITOK); + if (new_opt == NULL) + return ENOBUFS; + + bzero(new_opt, len); + bcopy((caddr_t)dest, (caddr_t)new_opt, (dest->ip6d_len + 1) << 3); + FREE(dest, M_IP6OPT); + dest = new_opt; + } + + /* Compensate for the alignment requirement. */ + padn = IP6OPT_PADN; + rest = *off % 4; + if (rest == 0) { + /* Add a PADN option with length 0 */ + bzero((caddr_t)dest + *off, 2); + bcopy(&padn, (caddr_t)dest + *off, 1); + *off += 2; + } else if (rest == 1) { + /* Add a PAD1 option */ + bzero((caddr_t)dest + *off, 1); + *off += 1; + } else if (rest == 3) { + /* Add a PADN option with length 1 */ + bzero((caddr_t)dest + *off, 3); + bcopy(&padn, (caddr_t)dest + *off, 1); + bcopy(&padn, (caddr_t)dest + *off + 1, 1); + *off += 3; + } + offlen = *off + 1; + + /* Reset BU option length in case of retransmission. */ + optbu->len = IP6OPT_BULEN; + + /* Copy the BU data from the internal structure to the Dest Header */ + bcopy((caddr_t)&optbu->type, (caddr_t)dest + *off, sizeof(optbu->type)); + *off += sizeof(optbu->type); + bcopy((caddr_t)&optbu->len, (caddr_t)dest + *off, sizeof(optbu->len)); + *off += sizeof(optbu->len); + bcopy((caddr_t)&optbu->flags, (caddr_t)dest + *off, sizeof(optbu->flags)); + *off += sizeof(optbu->flags); + bcopy((caddr_t)&optbu->prefix_len, (caddr_t)dest + *off, + sizeof(optbu->prefix_len)); + *off += sizeof(optbu->prefix_len); + tmp16 = htons(optbu->seqno); + bcopy((caddr_t)&tmp16, (caddr_t)dest + *off, sizeof(optbu->seqno)); + *off += sizeof(optbu->seqno); + tmp32 = htonl(optbu->lifetime); + bcopy((caddr_t)&tmp32, (caddr_t)dest + *off, sizeof(optbu->lifetime)); + *off += sizeof(optbu->lifetime); + + /* If sub-options are present, add them as well. */ + optlen = optbu->len; + if (subopt) { + /* Align the Destination Header to 8-byte before sub-options + are added. */ + before = *off; + mip6_align(dest, off); + after = *off; + optlen += after - before; + + bcopy((caddr_t)subopt->buffer, (caddr_t)dest + *off, subopt->len); + *off += subopt->len; + optlen += subopt->len; + optbu->len += subopt->len; + } + + /* Make sure that the option length is correct. */ + bcopy((caddr_t)&optlen, (caddr_t)dest + offlen, 1); + + /* Align the Destination Header to 8-byte */ + mip6_align(dest, off); + + /* Change the total length of the Destination header */ + dstlen = (*off >> 3) - 1; + bcopy(&dstlen, (caddr_t)dest + 1, 1); + *dstopt = dest; + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_add_ba + * Description: Copy BA option and sub-option (if present) to a Destination + * Header. + * Memory in the Destination Header for the BU is created, the + * header is aligned to 8-byte alignment and the total length of + * the header is updated. + * Ret value: 0 if OK + * Otherwise any appropriate error code + ****************************************************************************** + */ +int +mip6_add_ba(dstopt, off, optba, subopt) +struct ip6_dest **dstopt; /* IPv6 dest options for the packet */ +int *off; /* Offset from start of dest Header (byte) */ +struct mip6_opt_ba *optba; /* BA option data */ +struct mip6_subbuf *subopt; /* BA sub-option data (NULL if not present) */ +{ + struct ip6_dest *new_opt; /* Old destination options + BA option */ + struct ip6_dest *dest; /* Local variable for destination option */ + u_int8_t padn; /* Number for option type PADN */ + u_int8_t dstlen; /* Destination Header length in 8-bytes */ + int offlen; /* Offset for option length in the buffer */ + int optlen; /* Length of BA option incl sub-options */ + int rest; /* Rest of modulo division */ + int tmp16; /* Temporary converting of 2-byte */ + int tmp32; /* Temporary converting of 4-byte */ + int len; /* Length of allocated memory */ + int after, before; + + /* Verify input */ + if (optba == NULL) + return 0; + + /* Allocate memory for the BA option and sub-option (if present). */ + dest = *dstopt; + if (dest == NULL) { + len = sizeof(struct ip6_dest) + sizeof(struct mip6_opt_ba) + 8; + if (subopt != NULL) + len += subopt->len; + + dest = (struct ip6_dest *)MALLOC(len, M_TEMP, M_WAITOK); + if (dest == NULL) + return ENOBUFS; + bzero(dest, len); + *off = 2; + } else { + len = (dest->ip6d_len + 1) << 3; + len += sizeof(struct mip6_opt_ba) + 8; + if (subopt != NULL) + len += subopt->len; + + new_opt = (struct ip6_dest *)MALLOC(len, M_TEMP, M_WAITOK); + if (new_opt == NULL) + return ENOBUFS; + bzero(new_opt, len); + bcopy((caddr_t)dest, (caddr_t)new_opt, (dest->ip6d_len + 1) << 3); + FREE(dest, M_IP6OPT); + dest = new_opt; + } + + /* Compensate for the alignment requirement. */ + padn = IP6OPT_PADN; + rest = *off % 4; + if (rest == 1) { + /* Add a PADN option with length 0 */ + bzero((caddr_t)dest + *off, 2); + bcopy(&padn, (caddr_t)dest + *off, 1); + *off += 2; + } else if (rest == 2) { + /* Add a PAD1 option */ + bzero((caddr_t)dest + *off, 1); + *off += 1; + } else if (rest == 0) { + /* Add a PADN option with length 1 */ + bzero((caddr_t)dest + *off, 3); + bcopy(&padn, (caddr_t)dest + *off, 1); + bcopy(&padn, (caddr_t)dest + *off + 1, 1); + *off += 3; + } + offlen = *off + 1; + + /* Copy the BA data from the internal structure to mbuf */ + bcopy((caddr_t)&optba->type, (caddr_t)dest + *off, sizeof(optba->type)); + *off += sizeof(optba->type); + bcopy((caddr_t)&optba->len, (caddr_t)dest + *off, sizeof(optba->len)); + *off += sizeof(optba->len); + bcopy((caddr_t)&optba->status, (caddr_t)dest + *off, + sizeof(optba->status)); + *off += sizeof(optba->status); + tmp16 = htons(optba->seqno); + bcopy((caddr_t)&tmp16, (caddr_t)dest + *off, sizeof(optba->seqno)); + *off += sizeof(optba->seqno); + tmp32 = htonl(optba->lifetime); + bcopy((caddr_t)&tmp32, (caddr_t)dest + *off, sizeof(optba->lifetime)); + *off += sizeof(optba->lifetime); + tmp32 = htonl(optba->refresh); + bcopy((caddr_t)&tmp32, (caddr_t)dest + *off, sizeof(optba->refresh)); + *off += sizeof(optba->refresh); + + /* If sub-options are present, add them as well. */ + optlen = IP6OPT_BALEN; + if (subopt) { + /* Align the Destination Header to 8-byte before sub-options + are added. */ + before = *off; + mip6_align(dest, off); + after = *off; + optlen += after - before; + + bcopy((caddr_t)subopt->buffer, (caddr_t)dest + *off, subopt->len); + *off += subopt->len; + optlen += subopt->len; + optba->len += subopt->len; + } + + /* Make sure that the option length is correct. */ + bcopy((caddr_t)&optlen, (caddr_t)dest + offlen, 1); + + /* Align the Destination Header to 8-byte */ + mip6_align(dest, off); + + /* Change the total length of the Destination header */ + dstlen = (*off >> 3) - 1; + bcopy(&dstlen, (caddr_t)dest + 1, 1); + *dstopt = dest; + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_add_br + * Description: Copy BR option and sub-option (if present) to a Destination + * Header. + * Memory in the Destination Header for the BU is created, the + * header is aligned to 8-byte alignment and the total length of + * the header is updated. + * Ret value: 0 if OK + * Otherwise any appropriate error code + ****************************************************************************** + */ +int +mip6_add_br(dstopt, off, optbr, subopt) +struct ip6_dest **dstopt; /* IPv6 destination options for the packet */ +int *off; /* Offset from start of Dest Header (byte) */ +struct mip6_opt_br *optbr; /* BR option data */ +struct mip6_subbuf *subopt; /* BR sub-option data (NULL if not present) */ +{ + struct ip6_dest *new_opt; /* Old destination options + BU option */ + struct ip6_dest *dest; /* Local variable for destination option */ + u_int8_t dstlen; /* Destination Header length in 8-bytes */ + int offlen; /* Offset for option length in the buffer */ + int rest; /* Rest of modulo division */ + int optlen; /* Length of BR option incl sub-options */ + int len; /* Length of allocated memory */ + int after, before; + + /* Verify input */ + if (optbr == NULL) + return 0; + + /* Allocate memory for the BR option and sub-option (if present). */ + dest = *dstopt; + if (dest == NULL) { + len = sizeof(struct ip6_dest) + sizeof(struct mip6_opt_br) + 8; + if (subopt != NULL) + len += subopt->len; + + dest = (struct ip6_dest *)MALLOC(len, M_TEMP, M_WAITOK); + if (dest == NULL) + return ENOBUFS; + + bzero(dest, len); + *off = 2; + } else { + len = (dest->ip6d_len + 1) << 3; + len += sizeof(struct mip6_opt_br) + 8; + if (subopt != NULL) + len += subopt->len; + + new_opt = (struct ip6_dest *)MALLOC(len, M_TEMP, M_WAITOK); + if (new_opt == NULL) + return ENOBUFS; + + bzero(new_opt, len); + bcopy((caddr_t)dest, (caddr_t)new_opt, (dest->ip6d_len + 1) << 3); + FREE(dest, M_IP6OPT); + dest = new_opt; + } + + /* Compensate for the alignment requirement. */ + rest = *off % 4; + if ((rest == 1) || (rest == 3)) { + /* Add a PAD1 option */ + bzero((caddr_t)dest + *off, 1); + *off += 1; + } + offlen = *off +1; + + /* Copy the BR data from the internal structure to mbuf */ + bcopy((caddr_t)&optbr->type, (caddr_t)dest + *off, sizeof(optbr->type)); + *off += sizeof(optbr->type); + bcopy((caddr_t)&optbr->len, (caddr_t)dest + *off, sizeof(optbr->len)); + *off += sizeof(optbr->len); + + + /* If sub-options are present, add them as well. */ + optlen = IP6OPT_BRLEN; + if (subopt) { + /* Align the Destination Header to 8-byte before sub-options + are added. */ + before = *off; + mip6_align(dest, off); + after = *off; + optlen += after - before; + + bcopy((caddr_t)subopt->buffer, (caddr_t)dest + *off, subopt->len); + *off += subopt->len; + optlen += subopt->len; + optbr->len += subopt->len; + } + + /* Make sure that the option length is correct. */ + bcopy((caddr_t)&optlen, (caddr_t)dest + offlen, 1); + + /* Align the Destination Header to 8-byte */ + mip6_align(dest, off); + + /* Change the total length of the Destination header */ + dstlen = (*off >> 3) - 1; + bcopy(&dstlen, (caddr_t)dest + 1, 1); + *dstopt = dest; + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_store_subopt + * Description: Store a sub-option in a buffer. The buffer must be allocated + * by the calling function and big enough to hold all the sub- + * options that may be added to an option (BU, BR or BA). + * Alignement requirement for the different sub-options are taken + * care of before its added to the buffer. + * Ret value: 0 if OK. Otherwise 1 + ****************************************************************************** + */ +int +mip6_store_subopt(subbuf, subopt) +struct mip6_subbuf **subbuf; /* Buffert containing sub-options */ +caddr_t subopt; /* TLV coded sub-option */ +{ + struct mip6_subopt_id *uid; + struct mip6_subopt_hal *hal; + struct mip6_subopt_coa *altcoa; + struct mip6_subbuf *buf; + u_int8_t pad1, padn; + u_int16_t tmp16; + int rest, no, ii, padlen; + + /* Make sure that a sub-option is present. */ + if (subopt == NULL) + return 0; + + /* Allocate memory for buffer if not already allocated. */ + buf = *subbuf; + if (buf == NULL) { + buf = (struct mip6_subbuf *)MALLOC(sizeof(struct mip6_subbuf), + M_TEMP, M_WAITOK); + if (buf == NULL) + return 1; + bzero(buf, sizeof(struct mip6_subbuf)); + } + + /* Find offset in the current buffer */ + padn = IP6OPT_PADN; + pad1 = IP6OPT_PAD1; + + switch (*subopt) { + case IP6SUBOPT_UNIQUEID: + /* Make sure that the length is OK */ + uid = (struct mip6_subopt_id *)subopt; + if (uid->len != IP6OPT_UIDLEN) + return 1; + + /* Compensate for the alignment requirement. */ + rest = buf->len % 2; + if (rest == 1) { + bcopy(&pad1, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + } + + /* Copy the sub-option to the buffer. */ + bcopy(&uid->type, (caddr_t)buf->buffer + buf->len, + sizeof(uid->type)); + buf->len += sizeof(uid->type); + + bcopy(&uid->len, (caddr_t)buf->buffer + buf->len, + sizeof(uid->len)); + buf->len += sizeof(uid->len); + + tmp16 = htons(uid->id); + bcopy(&tmp16, (caddr_t)buf->buffer + buf->len, sizeof(tmp16)); + buf->len += sizeof(tmp16); + break; + case IP6SUBOPT_HALIST: + /* Make sure that the length is OK */ + hal = (struct mip6_subopt_hal *)subopt; + if (hal->len % IP6OPT_HALISTLEN) + return 1; + + /* Compensate for the alignment requirement. */ + rest = buf->len % 8; + if (rest > 3) { + padlen = rest - 4; + bcopy(&padn, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bcopy(&padlen, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bzero((caddr_t)buf->buffer + buf->len, padlen); + buf->len += padlen; + } else if (rest == 3) { + bcopy(&pad1, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + } else if (rest <= 1) { + padlen = rest + 4; + bcopy(&padn, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bcopy(&padlen, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bzero((caddr_t)buf->buffer + buf->len, padlen); + buf->len += padlen; + } + + /* Copy the sub-option to the buffer. */ + bcopy(&hal->type, (caddr_t)buf->buffer + buf->len, + sizeof(hal->type)); + buf->len += sizeof(hal->type); + + bcopy(&hal->len, (caddr_t)buf->buffer + buf->len, + sizeof(hal->len)); + buf->len += sizeof(hal->len); + + /* Loop over the addresses */ + no = hal->len / IP6OPT_HALISTLEN; + for (ii = 0; ii < no; ii++) { + bcopy(&hal->halist[ii], (caddr_t)buf->buffer + buf->len, + sizeof(hal->halist)); + buf->len += sizeof(hal->halist); + } + break; + case IP6SUBOPT_ALTCOA: + /* Make sure that the length is OK */ + altcoa = (struct mip6_subopt_coa *)subopt; + if (altcoa->len % IP6OPT_COALEN) + return 1; + + /* Compensate for the alignment requirement. */ + rest = buf->len % 8; + if (rest > 3) { + padlen = rest - 4; + bcopy(&padn, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bcopy(&padlen, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bzero((caddr_t)buf->buffer + buf->len, padlen); + buf->len += padlen; + } else if (rest == 3) { + bcopy(&pad1, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + } else if (rest <= 1) { + padlen = rest + 4; + bcopy(&padn, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bcopy(&padlen, (caddr_t)buf->buffer + buf->len, 1); + buf->len += 1; + bzero((caddr_t)buf->buffer + buf->len, padlen); + buf->len += padlen; + } + + /* Copy the sub-option to the buffer. */ + bcopy(&altcoa->type, (caddr_t)buf->buffer + buf->len, + sizeof(altcoa->type)); + buf->len += sizeof(altcoa->type); + + bcopy(&altcoa->len, (caddr_t)buf->buffer + buf->len, + sizeof(altcoa->len)); + buf->len += sizeof(altcoa->len); + + bcopy(&altcoa->coa, (caddr_t)buf->buffer + buf->len, + sizeof(altcoa->coa)); + buf->len += sizeof(altcoa->coa); + break; + default: + } + *subbuf = buf; + return 0; +} diff --git a/bsd/netinet6/mip6_md.c b/bsd/netinet6/mip6_md.c new file mode 100644 index 000000000..16b670388 --- /dev/null +++ b/bsd/netinet6/mip6_md.c @@ -0,0 +1,1293 @@ +/* $KAME: mip6_md.c,v 1.14 2000/03/25 07:23:53 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, 1998, 1999 and 2000 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999 and 2000 Ericsson Radio Systems AB + * All rights reserved. + * + * Author: Mattias Pettersson + * + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +/* + * Mobile IPv6 Movement Detection for Mobile Nodes + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#include +#endif +#include +#include + +#include + +struct nd_prefix *mip6_home_prefix; +struct nd_prefix *mip6_primary_prefix; +struct in6_addr mip6_primary_defrtr; +int mip6_md_state = MIP6_MD_UNDEFINED; +/* + * Mobile IPv6 Home Address route state for the Mobile Node. + * route_state NET == MD_HOME == network route. + * route_state HOST == MD_FOREIGN|UNDEFINED == host route. + */ +int mip6_route_state = MIP6_ROUTE_NET; /* According to MD_UNDEFINED state. */ +int mip6_max_lost_advints = MIP6_MAX_LOST_ADVINTS; +int mip6_nd6_delay = 0; +int mip6_nd6_umaxtries = 0; + + +/* + ****************************************************************************** + * Function: mip6_tell_em + * Description: Print state change and tell event-state machine. + * Ret value: - + ****************************************************************************** + */ +static void +mip6_tell_em(int state, + struct nd_prefix *hp, + struct nd_prefix *pp, + struct nd_defrouter *dr) +{ +#if MIP6_DEBUG + mip6_debug("\nNew state: "); + switch (state) { + case MIP6_MD_HOME: + mip6_debug("HOME!\n"); + break; + case MIP6_MD_FOREIGN: + mip6_debug("FOREIGN!\n"); + break; + case MIP6_MD_UNDEFINED: + mip6_debug("UNDEFINED!\n"); + break; + } + mip6_debug("Home Prefix = %s\n", hp ? ip6_sprintf( + &hp->ndpr_prefix.sin6_addr) : "NULL"); + mip6_debug("Primary Prefix = %s\n", pp ? ip6_sprintf( + &pp->ndpr_prefix.sin6_addr) : "NULL"); + mip6_debug("Default Router = %s\n", dr ? ip6_sprintf( + &dr->rtaddr) : "NULL"); +#endif + mip6_new_defrtr(state, hp, pp, dr); +} + + +/* + ****************************************************************************** + * Function: mip6_md_init + * Description: Scan through the Event-State Machine List. + * Create a Home Prefix and a Home Address for the Mobile Node + * and add it to the prefix list (or just update it if the prefix + * is already existing). Detect which initial Movement Detection + * state we are in (HOME, FOREIGN or UNDEFINED) and tell the + * event-state machine. + * Ret value: - + ****************************************************************************** + */ +void +mip6_md_init() +{ + struct nd_prefix *pr, *existing_pr = NULL; + struct nd_defrouter *dr; + struct in6_ifaddr *ia; + struct mip6_esm *esp; /* Entry in the Event State machine list */ + int i, s, error; + + for (esp = mip6_esmq; esp; esp = esp->next) { + + /* + * Add the home prefix statically to the prefix list. + * Code taken from prelist_update(), prelist_add() and + * in6_ifadd(). + */ + pr = (struct nd_prefix *)MALLOC(sizeof(*pr), M_TEMP, M_WAITOK); + if (pr == NULL) { + log(LOG_ERR, "mip6_md_init: no mem for home prefix\n"); + } else { + bzero(pr, sizeof(*pr)); + + pr->ndpr_ifp = esp->ifp; + pr->ndpr_plen = esp->prefix_len; + + pr->ndpr_prefix.sin6_family = AF_INET6; + pr->ndpr_prefix.sin6_len = sizeof(pr->ndpr_prefix); + pr->ndpr_prefix.sin6_addr = esp->home_addr; + in6_prefixlen2mask(&pr->ndpr_mask, pr->ndpr_plen); + + /* make prefix in the canonical form */ + for (i = 0; i < 4; i++) + pr->ndpr_prefix.sin6_addr.s6_addr32[i] &= + pr->ndpr_mask.s6_addr32[i]; + + /* TODO: link into interface prefix list */ + + /* Default settings for unadvertised home prefix */ + pr->ndpr_raf_onlink = 0; + pr->ndpr_raf_auto = 0; + + /* + * If home prefix already exists in prefix list, use that + * entry instead. + */ + if ( (existing_pr = prefix_lookup(pr)) ) { + _FREE(pr, M_TEMP); + pr = existing_pr; + } + + /* Update (or set) certain fields in the home prefix */ + pr->ndpr_vltime = ND6_INFINITE_LIFETIME; + pr->ndpr_pltime = ND6_INFINITE_LIFETIME; + + if (in6_init_prefix_ltimes(pr)) { + log(LOG_ERR, "mip6_md_init: bad lifetimes\n"); + goto failure; + } + + + s = splnet(); /* Must be before goto statement */ + + if (existing_pr != NULL) { +#if MIP6_DEBUG + mip6_debug("mip6_md_init: Home prefix already exists, " + "no need to create new prefix.\n"); +#endif + goto skip_initialization; + } + + /* New prefix, fix all initialization. */ + + pr->ndpr_statef_onlink = 0; /* Should be 0 since there + are no adv rtrs for + this pfx yet */ + LIST_INIT(&pr->ndpr_advrtrs); + + skip_initialization: + + /* If an autoconfigured address exists for pr, delete it */ + if (existing_pr != NULL) { + if (!IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) { + ia = in6ifa_ifpwithaddr(pr->ndpr_ifp, + &pr->ndpr_addr); + if (ia) { + error = mip6_delete_ifaddr( + &ia->ia_addr.sin6_addr, + pr->ndpr_ifp); + if (error) + printf("%s: address assignment" + " error " + "(errno = %d).\n", + __FUNCTION__, error); + } + } + } + + pr->ndpr_addr = esp->home_addr; + + if (existing_pr == NULL) { + /* link ndpr_entry to nd_prefix list */ + LIST_INSERT_HEAD(&nd_prefix, pr, ndpr_entry); + } + + splx(s); + } + if (esp != mip6_esmq) { +#if MIP6_DEBUG + mip6_debug("%s: Only supporting one home address in this " + "version.\n", __FUNCTION__); +#endif + } + mip6_home_prefix = pr; + + dr = TAILQ_FIRST(&nd_defrouter); +/* XXXYYY Add check for probably reachable router here as well. Mattias */ + if (pr->ndpr_advrtrs.lh_first && dr && + pfxrtr_lookup(pr, dr)) { + /* If we have home pfxrtrs and defrtr is one of these, then + we're home. */ + mip6_md_state = MIP6_MD_HOME; + /* XXX BUG ALERT: missing curly brace? */ + if ((error = mip6_add_ifaddr(&pr->ndpr_addr, pr->ndpr_ifp, 64, + IN6_IFF_NODAD)) != 0) + printf("%s: address assignment error (errno = %d).\n", + __FUNCTION__, error); + mip6_route_state = MIP6_ROUTE_NET; + mip6_primary_prefix = mip6_home_prefix; + mip6_primary_defrtr = dr->rtaddr; + + mip6_tell_em(MIP6_MD_HOME, mip6_home_prefix, NULL, dr); + } + else { + if (dr) { + mip6_md_state = MIP6_MD_FOREIGN; + if ((error = mip6_add_ifaddr( + &pr->ndpr_addr, pr->ndpr_ifp, 128, + IN6_IFF_NODAD)) != 0) + printf("%s: address assignment error " + "(errno = %d).\n", + __FUNCTION__, error); + mip6_route_state = MIP6_ROUTE_HOST; + + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if ((pfxrtr_lookup(pr, dr) != NULL) && + !IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)&& + !IN6_IS_ADDR_MULTICAST(&pr->ndpr_addr) && + !IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_addr)) { + break; + } + } + if (pr) { + mip6_primary_prefix = pr; + mip6_primary_defrtr = dr->rtaddr; + mip6_tell_em(MIP6_MD_FOREIGN, mip6_home_prefix, + pr, dr); + } + else { +#if MIP6_DEBUG + mip6_debug("%s: At FOREIGN, but no primary " + "prefix found!\n", __FUNCTION__); +#endif + goto undefined; + } + } + else { + undefined: + mip6_md_state = MIP6_MD_UNDEFINED; + if ((error = mip6_add_ifaddr(&pr->ndpr_addr, + pr->ndpr_ifp, 64, + IN6_IFF_NODAD)) != 0) + printf("%s: address assignment error " + "(errno = %d).\n", __FUNCTION__, error); + mip6_route_state = MIP6_ROUTE_NET; + mip6_primary_defrtr = in6addr_any; + mip6_primary_prefix = NULL; + + mip6_tell_em(MIP6_MD_UNDEFINED, mip6_home_prefix, + NULL, NULL); + } + } + failure: + } +} + + +/* + ****************************************************************************** + * Function: mip6_select_defrtr + * Description: Usually called as an extension to defrtrlist_del() when the + * previous primary default router times out. Tries to select a + * new default router that announces the Home Prefix if available. + * Manages the Movement Detection state transitions and + * reconfigures the Home Address with host or network route. + * Finally informs the event-state machine about any transitions + * and new default routers. + * Ret value: - + ****************************************************************************** + */ +void +mip6_select_defrtr() +{ + struct nd_prefix *pr = NULL/*, *prev_primary_prefix*/; + struct nd_defrouter *dr, anydr; + struct nd_pfxrouter *pfxrtr; + struct rtentry *rt = NULL; + struct llinfo_nd6 *ln = NULL; + int s = splnet(), error, state; + + pr = mip6_primary_prefix; + /* Only for sanity check */ + dr = mip6_primary_prefix ? + defrouter_lookup(&mip6_primary_defrtr, + mip6_primary_prefix->ndpr_ifp) : NULL; + state = mip6_md_state; + +#if MIP6_DEBUG + mip6_debug("\n"); +#endif +#if MIP6_DEBUG + mip6_debug("%s: previous primary dr = %s.\n", __FUNCTION__, + ip6_sprintf(&mip6_primary_defrtr)); + mip6_debug("%s: dr = %s.\n", __FUNCTION__, + dr ? ip6_sprintf(&dr->rtaddr) : "NULL"); +#endif + + if ( (mip6_md_state == MIP6_MD_HOME) || + (mip6_md_state == MIP6_MD_UNDEFINED) ) { + if ((pr = mip6_home_prefix) == NULL){ + log(LOG_ERR, "mip6_select_defrtr: no home prefix\n"); + splx(s); + return; + } + + if ((pfxrtr = find_pfxlist_reachable_router(pr)) != NULL) { +#if MIP6_DEBUG + mip6_debug("%s: there are (reachable) pfxrtrs at " + "home.\n", __FUNCTION__); +#endif + if (!IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr) && + !(IN6_IS_ADDR_MULTICAST(&pr->ndpr_addr) || + IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_addr))) { + + /* Pick first reachable pfxrtr. */ + state = MIP6_MD_HOME; + + dr = pfxrtr->router; + + /* Place dr first since its prim. */ + TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_INSERT_HEAD(&nd_defrouter, dr, dr_entry); + +#if MIP6_DEBUG + mip6_debug("%s: picking %s as default router " + "on home subnet.\n", + __FUNCTION__, + ip6_sprintf(&(dr->rtaddr))); +#endif + goto found; + } + } + + if (pr->ndpr_advrtrs.lh_first == NULL) { +#if MIP6_DEBUG + mip6_debug("%s: there are no pfxrtrs at home, trying " + "non-home instead.\n", __FUNCTION__); +#endif + } + + /* + * No home prefix defrtr found, just drop through and pick + * one by the ordinary procedure below. + */ +#if MIP6_DEBUG + mip6_debug("%s: no home prefix router found.\n", __FUNCTION__); +#endif + } + + /* + * Go through the Default Router List in search for a (probably) + * reachable router that advertises a prefix and with an associated + * Care-of Address. This is a merge from defrouter_select(). + */ + if (TAILQ_FIRST(&nd_defrouter)) { + for (dr = TAILQ_FIRST(&nd_defrouter); dr; + dr = TAILQ_NEXT(dr, dr_entry)) { + + if ((rt = nd6_lookup(&dr->rtaddr, 0, dr->ifp)) && + (ln = (struct llinfo_nd6 *)rt->rt_llinfo) && + ND6_IS_LLINFO_PROBREACH(ln)) { + + /* + * Find a Care-of Address from a prefix + * announced by this router. + */ + for (pr = nd_prefix.lh_first; pr; + pr = pr->ndpr_next) { + if ((pfxrtr_lookup(pr, dr) != NULL) && + !IN6_IS_ADDR_UNSPECIFIED( + &pr->ndpr_addr) && + !IN6_IS_ADDR_MULTICAST( + &pr->ndpr_addr) && + !IN6_IS_ADDR_LINKLOCAL( + &pr->ndpr_addr)) { + state = MIP6_MD_FOREIGN; + +#if MIP6_DEBUG + mip6_debug("%s: new probably reachable defrtr %s on foreign subnet selected.\n", __FUNCTION__, ip6_sprintf(&dr->rtaddr)); +#endif + + /* + * Place dr first since + * its prim. + */ + TAILQ_REMOVE(&nd_defrouter, + dr, dr_entry); + TAILQ_INSERT_HEAD( + &nd_defrouter, + dr, dr_entry); + + goto found; + } + } + } + } + + /* + * No (probably) reachable router found that matched our requirements. + * Go through the Default Router List again in search for any + * router that advertises a prefix and with an associated + * Care-of Address. This is a merge from defrouter_select(). + */ + for(dr = TAILQ_FIRST(&nd_defrouter); dr; dr = TAILQ_NEXT(dr, dr_entry)){ + /* + * Find a Care-of Address from a prefix announced by + * this router. + */ + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if ((pfxrtr_lookup(pr, dr) != NULL) && + !IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)&& + !IN6_IS_ADDR_MULTICAST(&pr->ndpr_addr) && + !IN6_IS_ADDR_LINKLOCAL(&pr->ndpr_addr)) { + state = MIP6_MD_FOREIGN; + +#if MIP6_DEBUG + mip6_debug("%s: new (unreachable?) " + "defrtr %s on foreign subnet " + "selected.\n", __FUNCTION__, + ip6_sprintf(&dr->rtaddr)); +#endif + + /* Place dr first since its prim. */ + TAILQ_REMOVE(&nd_defrouter, dr, + dr_entry); + TAILQ_INSERT_HEAD(&nd_defrouter, dr, + dr_entry); + goto found; + } + } + } + } + + /* + * No new defrtr or no with an associated Care-of Address found + * -> State = undefined + */ + pr = NULL; + dr = NULL; + state = MIP6_MD_UNDEFINED; +#if MIP6_DEBUG + mip6_debug("%s: no new good defrtr found.\n", __FUNCTION__); +#endif + + found: + /* XXXYYY Hope this merge is correct now... Fingers crossed. Mattias */ +#if MIP6_DEBUG + mip6_debug("%s: found: dr = %s.\n", __FUNCTION__, dr ? ip6_sprintf(&dr->rtaddr) : "NULL"); +#endif + if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { +#if MIP6_DEBUG + mip6_debug("%s: TAILQ: dr = %s.\n", __FUNCTION__, dr ? ip6_sprintf(&dr->rtaddr) : "NULL"); +#endif + /* + * De-install the previous default gateway and install + * a new one. + * Note that if there is no reachable router in the list, + * the head entry will be used anyway. + * XXX: do we have to check the current routing table entry? + */ + bzero(&anydr, sizeof(anydr)); + defrouter_delreq(&anydr, 0); + defrouter_addreq(dr); + } + else { + /* + * The Default Router List is empty, so install the default + * route to an inteface. + * XXX: The specification does not say this mechanism should + * be restricted to hosts, but this would be not useful + * (even harmful) for routers. + */ + if (!ip6_forwarding) { + /* + * De-install the current default route + * in advance. + */ + bzero(&anydr, sizeof(anydr)); + defrouter_delreq(&anydr, 0); + if (nd6_defifp) { + /* + * Install a route to the default interface + * as default route. + */ + defrouter_addifreq(nd6_defifp); + } + else /* noisy log? */ + log(LOG_INFO, "defrouter_select: " + "there's no default router and no default" + " interface\n"); + } + } + + + /* + * If we grab a (unreachable) defrouter that actually is a home + * prefix router, we should consider ourself at home rather than + * default foreign. + */ + if (dr) { + struct nd_pfxrouter *pfxrtr; + + pfxrtr = pfxrtr_lookup(mip6_home_prefix, dr); + if (pfxrtr && dr == pfxrtr->router) { +#if MIP6_DEBUG + mip6_debug("%s: dr = %s is obviously a home pfxrtr.\n", __FUNCTION__, dr ? ip6_sprintf(&dr->rtaddr) : "NULL"); +#endif + state = MIP6_MD_HOME; + pr = mip6_home_prefix; + } + } + + /* + * First case: same router as last time. + * Second case: coming from UNDEFINED, we might have had a router, but + * we didn't have a care-of address. + */ + if (IN6_ARE_ADDR_EQUAL(&mip6_primary_defrtr, + (dr ? &dr->rtaddr : &in6addr_any)) && + !(dr && mip6_primary_prefix == NULL)) { +#if MIP6_DEBUG + mip6_debug("%s: Warning: Primary default router hasn't " + "changed! No action taken.\n", __FUNCTION__); +#endif + return; + } + + /* + * Switch between network and host route for the Home Address + * in the following cases: + * + * md_state route_state + * + * HOME -> FOREIGN NET -> HOST + * UNDEFINED -> FOREIGN NET -> HOST + * FOREIGN -> HOME HOST -> NET + * FOREIGN -> UNDEFINED HOST -> NET + */ + + if ((state == MIP6_MD_HOME || state == MIP6_MD_UNDEFINED) + && mip6_route_state == MIP6_ROUTE_HOST) { + error = mip6_add_ifaddr(&mip6_home_prefix->ndpr_addr, + mip6_home_prefix->ndpr_ifp, 64, + IN6_IFF_NODAD); + if (error) + printf("%s: address assignment error (errno = %d).\n", + __FUNCTION__, error); + mip6_route_state = MIP6_ROUTE_NET; + } + else if (state == MIP6_MD_FOREIGN && + mip6_route_state == MIP6_ROUTE_NET) { + error = mip6_add_ifaddr(&mip6_home_prefix->ndpr_addr, + mip6_home_prefix->ndpr_ifp, 128, + IN6_IFF_NODAD); + if (error) + printf("%s: address assignment error (errno = %d).\n", + __FUNCTION__, error); + mip6_route_state = MIP6_ROUTE_HOST; + } + + /* + * If the Mobile Node has changed its primary prefix (probably due to + * a move to a different subnet), clear the Neighbor Cache from entries + * cloned from the previous primary prefix. This does not happen when + * we keep the same prefix but change default router. + */ +#if MIP6_DEBUG + mip6_debug("mip6_primary_prefix = %s\n", mip6_primary_prefix ? ip6_sprintf(&mip6_primary_prefix->ndpr_prefix.sin6_addr) : "NULL"); + mip6_debug("pr = %s\n", pr ? ip6_sprintf(&pr->ndpr_prefix.sin6_addr) : "NULL"); +#endif + if (mip6_primary_prefix && (pr != mip6_primary_prefix)) { + register struct llinfo_nd6 *ln; + + /* Taken from nd6_timer() */ + ln = llinfo_nd6.ln_next; + /* XXX BSD/OS separates this code -- itojun */ + while (ln && ln != &llinfo_nd6) { + struct rtentry *rt; + struct ifnet *ifp; + struct sockaddr_in6 *dst; + struct llinfo_nd6 *next = ln->ln_next; + + if ((rt = ln->ln_rt) == NULL) { + ln = next; + continue; + } + if ((ifp = rt->rt_ifp) == NULL) { + ln = next; + continue; + } + dst = (struct sockaddr_in6 *)rt_key(rt); + /* sanity check */ + if (!rt) + panic("rt=0 in %s(ln=%p)\n", __FUNCTION__, ln); + if (!dst) + panic("dst=0 in %s(ln=%p)\n", __FUNCTION__, ln); + + /* Skip if the address belongs to us */ + if (ln->ln_expire == 0) { + ln = next; + continue; + } + +#if MIP6_DEBUG + mip6_debug("Checking neighbor %s\n", dst ? ip6_sprintf(&dst->sin6_addr) : "NULL"); +#endif + if (in6_are_prefix_equal(&dst->sin6_addr, + &mip6_primary_prefix-> + ndpr_prefix.sin6_addr, + mip6_primary_prefix-> + ndpr_plen)) { + + /* Fake an INCOMPLETE neighbor that we're giving up */ + struct mbuf *m = ln->ln_hold; + if (m) { + m_freem(m); + } + ln->ln_hold = NULL; + +#if MIP6_DEBUG + mip6_debug("Deleting Neighbor %s.\n", + ip6_sprintf(&(satosin6( + rt_key(rt))->sin6_addr))); +#endif + +#if IPSEC +#ifndef __OpenBSD__ + key_sa_routechange(rt_key(rt)); +#endif +#endif + +#if MIP6_DEBUG + mip6_debug("Ref count = %d, now pfctlinput\n", + rt->rt_refcnt); +#endif + + /* New era */ + pfctlinput(PRC_REDIRECT_HOST, rt_key(rt)); + +#if 0 +#if MIP6_DEBUG + mip6_debug("Ref count = %d, now rt_mip6msg\n", + rt->rt_refcnt); +#endif + + rt_mip6msg(RTM_DELETE, ifp, rt); /* Useless? */ +#endif /* 0 */ +#if MIP6_DEBUG + mip6_debug("Ref count = %d, now RTM_DELETE\n", + rt->rt_refcnt); +#endif + nd6_free(rt); + } + ln = next; + /* + * XXX Also remove the link-local addresses which + * aren't ours? + */ + } + + ln = llinfo_nd6.ln_next; + while (ln && ln != &llinfo_nd6) { + struct rtentry *rt; + struct ifnet *ifp; + struct sockaddr_dl *sdl; + struct sockaddr_in6 *dst; + struct llinfo_nd6 *next = ln->ln_next; + + if ((rt = ln->ln_rt) == NULL) { + ln = next; + continue; + } + if ((ifp = rt->rt_ifp) == NULL) { + ln = next; + continue; + } + dst = (struct sockaddr_in6 *)rt_key(rt); + /* sanity check */ + if (!rt) + panic("rt=0 in %s(ln=%p)\n", __FUNCTION__, ln); + if (!dst) + panic("dst=0 in %s(ln=%p)\n", __FUNCTION__, ln); + + /* Skip if the address belongs to us */ + if (ln->ln_expire == 0) { + ln = next; + continue; + } + +#if MIP6_DEBUG + mip6_debug("Checking neighbor %s round 2\n", dst ? ip6_sprintf(&dst->sin6_addr) : "NULL"); +#endif + if (in6_are_prefix_equal(&dst->sin6_addr, + &mip6_primary_prefix-> + ndpr_prefix.sin6_addr, + mip6_primary_prefix-> + ndpr_plen)) { + +#if MIP6_DEBUG + mip6_debug("Deleting Neighbor %s round 2.\n", + ip6_sprintf(&(satosin6( + rt_key(rt))->sin6_addr))); +#endif + +#if MIP6_DEBUG + mip6_debug("Ref count = %d, now RTM_DELETE\n", + rt->rt_refcnt); +#endif + if (rt && rt->rt_gateway && + rt->rt_gateway->sa_family == AF_LINK) { + sdl = (struct sockaddr_dl *)rt-> + rt_gateway; + rtrequest(RTM_DELETE, rt_key(rt), + (struct sockaddr *)0, + rt_mask(rt), 0, + (struct rtentry **)0); + } + } + ln = next; + /* + * XXX Also remove the link-local addresses which + * aren't ours? + */ + } + } + + /* + * Make decision permanent. + * Primary Default Router is already set above. + */ + mip6_md_state = state; + mip6_primary_prefix = pr; /* Other depend on this */ + /* + * Save rtaddr for next mip6_select_defrtr session. + */ + mip6_primary_defrtr = dr ? dr->rtaddr : in6addr_any; + + /* + * Assumptions made below: + * - dr is the chosen Default Router + * - pr is the new Primary Prefix if we're not home + */ + switch (mip6_md_state) { + case MIP6_MD_HOME: + mip6_tell_em(mip6_md_state, mip6_home_prefix, NULL, dr); + break; + + case MIP6_MD_FOREIGN: + mip6_tell_em(mip6_md_state, mip6_home_prefix, pr, dr); + break; + case MIP6_MD_UNDEFINED: + /* + * Note: we pass dr == NULL, but we might have a Default + * Router anyway, but with no prefix/Care-of Address + * associated. + */ + mip6_tell_em(mip6_md_state, mip6_home_prefix, NULL, NULL); + break; + } + splx(s); + return; +} + + +/* + ****************************************************************************** + * Function: mip6_prelist_update(pr, dr) + * Description: A hook to ND's prelist_update(). Checks if the Home Prefix + * was announced and in that case tries to force the Mobile Node + * to select that default router. If the Mobile Node was in + * UNDEFINED state we want to select that router immediately, no + * matter what the prefix was. + * Ret value: - + ****************************************************************************** + */ +void +mip6_prelist_update(pr, dr) + struct nd_prefix *pr; + struct nd_defrouter *dr; +{ + if (dr == NULL) { + return; + } + if (pr == mip6_home_prefix) { + /* It was the Home Prefix that was advertised. */ + + if (mip6_md_state != MIP6_MD_HOME) { + /* + * We're not home but here's a router advertising + * our home prefix => make it primary defrtr and + * we're home! + */ +#if MIP6_DEBUG + mip6_debug("%s: returning home.\n", __FUNCTION__); +#endif + mip6_md_state = MIP6_MD_HOME; + + /* State must be home before call. */ + if (TAILQ_FIRST(&nd_defrouter) != NULL) { + defrouter_select(); + } + else { +#if MIP6_DEBUG + mip6_debug("%s: Undef -> Home: no previous " + "router available " + "at this stage.\n", __FUNCTION__); +#endif + /* XXXYYY or use defrouter_select()? */ + mip6_select_defrtr(); + } + } + } + else if (mip6_md_state == MIP6_MD_UNDEFINED) { + /* + * Take care of transitions from UNDEFINED to FOREIGN, when the + * prefix is already known. + */ + if (TAILQ_FIRST(&nd_defrouter) != NULL) { + defrouter_select(); + } + else { +#if MIP6_DEBUG + mip6_debug("%s: Strange, no default router available" + "at this stage.\n", __FUNCTION__); +#endif + /* XXXYYY or use defrouter_select()? */ + mip6_select_defrtr(); + } + } +} + + +/* + ****************************************************************************** + * Function: mip6_eager_md() + * Description: If eager Movement Detection is chosen, trim parameters to a + * really fast hand-off. The disadvantage is that the detection + * becomes very exposed to go into state UNDEFINED if one single + * packet is lost. + * Ret value: - + ****************************************************************************** + */ +void +mip6_eager_md(int enable) +{ + mip6_config.eager_md = enable; + if (enable) { + mip6_max_lost_advints = 1; /* Aggressive values */ + if (!mip6_nd6_delay) { + mip6_nd6_delay = nd6_delay; /* Store */ + mip6_nd6_umaxtries = nd6_umaxtries; /* Store */ + } + nd6_delay = 1; /* Aggressive values */ + nd6_umaxtries = 1; + } + else { + mip6_max_lost_advints = MIP6_MAX_LOST_ADVINTS; + if (mip6_nd6_delay) { + nd6_delay = mip6_nd6_delay; /* Restore */ + nd6_umaxtries = mip6_nd6_umaxtries; /* Restore */ + mip6_nd6_delay = 0; + mip6_nd6_umaxtries = 0; + } + } +} + + +/* + ****************************************************************************** + * Function: mip6_expired_defrouter() + * Description: If the field advint_expire (which is parallel to field + * expire for router lifetime) times out, allow a small number + * of lost Router Advertisements before doubting if this + * particular default router is still reachable. + * Ret value: - + ****************************************************************************** + */ +void +mip6_expired_defrouter(struct nd_defrouter *dr) +{ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + if (!dr) + return; + + if (dr->advint_expire && dr->advint_expire < time_second) { + if (++(dr->advints_lost) < mip6_max_lost_advints) { + /* advints_lost starts at 0. max = 1 (or more). */ + dr->advint_expire = time_second + dr->advint / 1000; +#if MIP6_DEBUG + mip6_debug("Adv Int #%d lost from router %s.\n", + dr->advints_lost, ip6_sprintf(&dr->rtaddr)); +#endif + } + else { + dr->advint_expire = 0; +#if MIP6_DEBUG + mip6_debug("Adv Int #%d lost from router %s.\n", + dr->advints_lost, ip6_sprintf(&dr->rtaddr)); +#endif + mip6_probe_defrouter(dr); + } + } +} + + +/* + ****************************************************************************** + * Function: mip6_probe_defrouter() + * Description: Probes a default router to see if it is still reachable. + * Ordinary Neigbor Discovery routines (NUD) takes care of the + * rest. Puts this router into ND state PROBE. + * Ret value: - + ****************************************************************************** + */ +void +mip6_probe_defrouter(struct nd_defrouter *dr) +{ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif + struct rtentry *rt; + struct llinfo_nd6 *ln; + + if (!dr) + return; + + if (!(rt = nd6_lookup(&dr->rtaddr, 0, NULL))) + return; + + if ((rt->rt_flags & RTF_GATEWAY) + || (rt->rt_flags & RTF_LLINFO) == 0 + || !rt->rt_llinfo + || !rt->rt_gateway + || rt->rt_gateway->sa_family != AF_LINK) { + /* This is not a host route. */ + return; + } + + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + if ((ln->ln_state == ND6_LLINFO_INCOMPLETE) + || (ln->ln_state == ND6_LLINFO_PROBE) + || (ln->ln_state == ND6_LLINFO_WAITDELETE) + || (ln->ln_state == ND6_LLINFO_NOSTATE)) + return; + + /* Force state to PROBE, simulate DELAY->PROBE */ + ln->ln_asked = 1; + ln->ln_state = ND6_LLINFO_PROBE; + ln->ln_expire = time_second + + nd_ifinfo[rt->rt_ifp->if_index].retrans / 1000; + nd6_ns_output(rt->rt_ifp, &dr->rtaddr, &dr->rtaddr, + ln, 0); +#if MIP6_DEBUG + mip6_debug("Probing defrouter %s\n", ip6_sprintf(&dr->rtaddr)); +#endif +} + + +/* + ****************************************************************************** + * Function: mip6_probe_pfxrtrs() + * Description: If a new or previously detached prefix is heard, probe (NUD) + * all prefix routers on the current primary prefix in order to + * quickly detect if we have moved. This is only enabled in + * eager Movement Detection. + * Ret value: - + ****************************************************************************** + */ +void +mip6_probe_pfxrtrs() +{ + struct nd_pfxrouter *pfr; + if (!mip6_config.eager_md) + return; + + if (!mip6_primary_prefix) + return; + +#if MIP6_DEBUG + mip6_debug("New or detached prefix received, probe old routers:\n"); +#endif + for (pfr = mip6_primary_prefix->ndpr_advrtrs.lh_first; + pfr; pfr = pfr->pfr_next) { + mip6_probe_defrouter(pfr->router); + } +} + + +/* + ****************************************************************************** + * Function: mip6_store_advint(ai, dr) + * Description: If Advertisement Interval option is available in Router + * Advertisements, keep a timer for this expiry parallel to the + * ordinary Router lifetime timer. + * Ret value: - + ****************************************************************************** + */ +void +mip6_store_advint(struct nd_opt_advint *ai, + struct nd_defrouter *dr) +{ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* Check the advertisement interval option */ + if (ai->nd_opt_int_len != 1) { + log(LOG_INFO, "%s: bad Advertisement Interval Option " + "length\n", __FUNCTION__); + } + else if (dr) { + dr->advint = ntohl(ai->nd_opt_int_interval); /* milliseconds */ + + /* Sorry for delay between reception and this setting */ + dr->advint_expire = time_second + dr->advint / 1000; + dr->advints_lost = 0; + } +} + + +/* + ****************************************************************************** + * Function: mip6_delete_ifaddr + * Description: Similar to "ifconfig delete". + * Ret value: - + ****************************************************************************** + */ +int +mip6_delete_ifaddr(struct in6_addr *addr, + struct ifnet *ifp) +{ + struct in6_aliasreq *ifra, dummy; + struct sockaddr_in6 *sa6; + struct in6_ifaddr *ia, *oia; + int s; +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) || defined (__APPLE__) + struct ifaddr *ifa; +#endif + + bzero(&dummy, sizeof(dummy)); + ifra = &dummy; + + ifra->ifra_addr.sin6_len = sizeof(ifra->ifra_addr); + ifra->ifra_addr.sin6_family = AF_INET6; + ifra->ifra_addr.sin6_addr = *addr; + + sa6 = &ifra->ifra_addr; + + if (ifp == 0) + return(EOPNOTSUPP); + + s = splnet(); + + /* + * Code recycled from in6_control(). + */ + + /* + * Find address for this interface, if it exists. + */ + if (IN6_IS_ADDR_LINKLOCAL(&sa6->sin6_addr)) { + if (sa6->sin6_addr.s6_addr16[1] == 0) { + /* interface ID is not embedded by the user */ + sa6->sin6_addr.s6_addr16[1] = + htons(ifp->if_index); + } + else if (sa6->sin6_addr.s6_addr16[1] != + htons(ifp->if_index)) { + splx(s); + return(EINVAL); /* ifid is contradict */ + } + if (sa6->sin6_scope_id) { + if (sa6->sin6_scope_id != + (u_int32_t)ifp->if_index) { + splx(s); + return(EINVAL); + } + sa6->sin6_scope_id = 0; /* XXX: good way? */ + } + } + ia = in6ifa_ifpwithaddr(ifp, &ifra->ifra_addr.sin6_addr); + + /* + * for IPv4, we look for existing in6_ifaddr here to allow + * "ifconfig if0 delete" to remove first IPv4 address on the + * interface. For IPv6, as the spec allow multiple interface + * address from the day one, we consider "remove the first one" + * semantics to be not preferrable. + */ + if (ia == 0) { + splx(s); + return(EADDRNOTAVAIL); + } + /* FALLTHROUGH */ + + if (ia == 0) { + ia = (struct in6_ifaddr *) + MALLOC(sizeof(*ia), M_IFADDR, M_WAITOK); + if (ia == NULL) { + splx(s); + return (ENOBUFS); + } + bzero((caddr_t)ia, sizeof(*ia)); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + ia->ia_ifa.ifa_dstaddr + = (struct sockaddr *)&ia->ia_dstaddr; + ia->ia_ifa.ifa_netmask + = (struct sockaddr *)&ia->ia_prefixmask; + + ia->ia_ifp = ifp; + if ((oia = in6_ifaddr) != NULL) { + for ( ; oia->ia_next; oia = oia->ia_next) + continue; + oia->ia_next = ia; + } else + in6_ifaddr = ia; + ia->ia_ifa.ifa_refcnt++; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) || defined (__APPLE__) + if ((ifa = ifp->if_addrlist) != NULL) { + for ( ; ifa->ifa_next; ifa = ifa->ifa_next) + continue; + ifa->ifa_next = &ia->ia_ifa; + } else + ifp->if_addrlist = &ia->ia_ifa; +#else + TAILQ_INSERT_TAIL(&ifp->if_addrlist, &ia->ia_ifa, + ifa_list); +#endif + ia->ia_ifa.ifa_refcnt++; + } + + in6_purgeaddr(&ia->ia_ifa, ifp); + + splx(s); + return(0); +} + + +#if 0 +/* + ****************************************************************************** + * Function: mip6_delete_ifaddr + * Description: Similar to "ifconfig delete". + * Ret value: - + ****************************************************************************** + */ +void +mip6_delete_ifaddr(struct in6_addr *addr, + struct ifnet *ifp) +{ + struct in6_aliasreq in6_addreq; + int s, error = 0; + + bzero(&in6_addreq, sizeof(in6_addreq)); + in6_addreq.ifra_addr.sin6_len = sizeof(in6_addreq.ifra_addr); + in6_addreq.ifra_addr.sin6_family = AF_INET6; + in6_addreq.ifra_addr.sin6_addr = *addr; + + s =splnet(); + error = in6_control(NULL, SIOCDIFADDR_IN6, (caddr_t)&in6_addreq, ifp +#if !defined(__bsdi__) && !(defined(__FreeBSD__) && __FreeBSD__ < 3) && !define (__APPLE__) + , NULL +#endif + ); + splx(s); + if (error) { +#if MIP6_DEBUG + mip6_debug("%s: Attempt to delete addr %s failed.\n", __FUNCTION__, + ip6_sprintf(addr)); +#endif + } +} +#endif /* 0 */ + +struct nd_prefix * +mip6_get_home_prefix(void) +{ + return(mip6_home_prefix); +} + + +int +mip6_get_md_state(void) +{ + return(mip6_md_state); +} + + +/* + ****************************************************************************** + * Function: mip6_md_exit + * Description: Tidy up after the Mobile IPv6 Movement Detection. This is + * used when releasing the kernel module. The Home Prefix is + * deleted (even if we're home) since it's parameters might be + * way wrong. The Home Address is released as well. If at home, + * the prefix and address will be automagically configured as + * specified by ND. + * Ret value: - + ****************************************************************************** + */ +void +mip6_md_exit() +{ + struct nd_prefix *pr; + + /* + * XXXYYY Should use mip6_esmq when multiple Home Addresses are + * supported. + */ + pr = mip6_home_prefix; + if (pr && pr->ndpr_ifp && !IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) { + mip6_delete_ifaddr(&pr->ndpr_addr, pr->ndpr_ifp); + + prelist_remove(pr); + mip6_home_prefix = NULL; + +#if MIP6_DEBUG + mip6_debug("Home Prefix and Home Address removed.\n"); +#endif + } +} diff --git a/bsd/netinet6/mip6_mn.c b/bsd/netinet6/mip6_mn.c new file mode 100644 index 000000000..d058c9a83 --- /dev/null +++ b/bsd/netinet6/mip6_mn.c @@ -0,0 +1,3106 @@ +/* $KAME: mip6_mn.c,v 1.11 2000/03/18 03:05:42 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, 1998, 1999 and 2000 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1999 and 2000 Ericsson Radio Systems AB + * All rights reserved. + * + * Author: Conny Larsson + * + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +/* + * Mobile IPv6 Mobile Nodes + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Declaration of Global variables. */ +struct mip6_bul *mip6_bulq = NULL; /* First entry in Binding Update list */ +struct mip6_esm *mip6_esmq = NULL; /* List of event-state machines */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +struct callout_handle mip6_timer_outqueue_handle; +struct callout_handle mip6_timer_bul_handle; +struct callout_handle mip6_timer_esm_handle; +#endif + + +/* + ############################################################################## + # + # INITIALIZATION AND EXIT FUNCTIONS + # These functions are executed when the MIPv6 code is activated and de- + # activated respectively. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_mn_init + * Description: Initialization of MIPv6 variables that must be initialized + * before the MN code is executed. + ****************************************************************************** + */ +void +mip6_mn_init(void) +{ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + /* Initialize handle for timer functions. */ + callout_handle_init(&mip6_timer_outqueue_handle); + callout_handle_init(&mip6_timer_bul_handle); + callout_handle_init(&mip6_timer_esm_handle); +#endif + + printf("%s: MIP6 Mobile Node initialized\n", __FUNCTION__); +} + + + +/* + ****************************************************************************** + * Function: mip6_mn_exit + * Description: This function is called when the MN module is unloaded + * (relesed) from the kernel. + ****************************************************************************** + */ +void +mip6_mn_exit() +{ + struct mip6_output *outp, *outp_tmp; + struct mip6_bul *bulp; + struct mip6_esm *esp; + int s; + + /* Cancel outstanding timeout function calls. */ +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_outqueue, (void *)NULL, + mip6_timer_outqueue_handle); + untimeout(mip6_timer_bul, (void *)NULL, mip6_timer_bul_handle); + untimeout(mip6_timer_esm, (void *)NULL, mip6_timer_esm_handle); +#else + untimeout(mip6_timer_outqueue, (void *)NULL); + untimeout(mip6_timer_bul, (void *)NULL); + untimeout(mip6_timer_esm, (void *)NULL); +#endif + + /* Remove each entry in every queue. */ + s = splnet(); + for (outp = mip6_outq; outp;) { + outp_tmp = outp; + outp = outp->next; + if (outp_tmp->opt) + _FREE(outp_tmp->opt, M_TEMP); + if (outp_tmp->subopt) + _FREE(outp_tmp->subopt, M_TEMP); + _FREE(outp_tmp, M_TEMP); + } + mip6_outq = NULL; + + for (bulp = mip6_bulq; bulp;) + bulp = mip6_bul_delete(bulp); + mip6_bulq = NULL; + + for (esp = mip6_esmq; esp;) + esp = mip6_esm_delete(esp); + mip6_esmq = NULL; + splx(s); +} + + + +/* + ############################################################################## + # + # RECEIVING FUNCTIONS + # These functions receives the incoming IPv6 packet and further processing of + # the packet depends on the content in the packet. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_new_defrtr + * Description: Called from the move detection algorithm when it has decided + * to change default router, i.e the network that we were + * connected to has changed. + * Ret value: - + ****************************************************************************** + */ +void +mip6_new_defrtr(state, home_prefix, prim_prefix, def_router) +int state; /* State from move detection algorithm */ +struct nd_prefix *home_prefix; /* Prefix for Home Address */ +struct nd_prefix *prim_prefix; /* Prefix for primary care-of address */ +struct nd_defrouter *def_router; /* New default router being used */ +{ + struct in6_addr *home_addr; /* Home Address for Mobile Node */ + struct in6_addr *prim_addr; /* Primary Care-of Adress for MN */ + struct mip6_esm *esp; /* Home address entry */ + struct mip6_bul *bulp; /* Entry in the BU list */ + struct ifaddr *if_addr; /* Interface address */ + struct mip6_bu_data bu_data; /* Data used when a BU is created */ + struct in6_addr ll_all_addr; /* Link local all nodes address */ + struct in6_addr old_coa; + struct sockaddr_in6 sin6; + u_int32_t lifetime; /* Lifetime used in BU */ + u_long na_flags; /* Flags for NA message */ + + /* Check incoming parameters */ + if (home_prefix != NULL) + home_addr = &home_prefix->ndpr_addr; + else { + log(LOG_ERR, "%s: No home address configured\n", __FUNCTION__); + return; + } + + esp = mip6_esm_find(home_addr); + if (esp == NULL) { + log(LOG_ERR, + "%s: No event-state machine found\n", __FUNCTION__); + return; + } + + if (prim_prefix != NULL) + prim_addr = &prim_prefix->ndpr_addr; + else + prim_addr = NULL; + + /* Decide how the mobile node has moved. */ + if ((prim_prefix == NULL) && (state == MIP6_MD_UNDEFINED)) { + /* The Mobile Node is not connected to a network */ + esp->state = MIP6_STATE_UNDEF; + esp->coa = in6addr_any; + if (esp->ha_fn != NULL) { + _FREE(esp->ha_fn, M_TEMP); + esp->ha_fn = NULL; + } + if (mip6_tunnel(NULL, NULL, MIP6_TUNNEL_DEL, MIP6_NODE_MN, + (void *)esp)) + return; + } else if ((prim_prefix == NULL) && (state == MIP6_MD_HOME)) { + /* The Mobile Node is returning to the home link. Change the + parameters for the event-state machine. */ + esp->state = MIP6_STATE_DEREG; + old_coa = esp->coa; + esp->coa = esp->home_addr; + + /* Send a BU de-registration to the Home Agent. */ + bulp = mip6_bul_find(NULL, home_addr); + if (bulp == NULL) { + /* The event-state machine was in state undefined. */ + esp->state = MIP6_STATE_HOME; + + /* When returning home and no home registration exist + we can not assume the home address to be unique. + Perform DAD, but find the i/f address first. */ + bzero(&sin6, sizeof(struct sockaddr_in6)); + sin6.sin6_len = sizeof(struct sockaddr_in6); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = esp->home_addr; + + if_addr = ifa_ifwithaddr((struct sockaddr *)&sin6); + if (if_addr == NULL) + return; + + ((struct in6_ifaddr *)if_addr)->ia6_flags |= + IN6_IFF_TENTATIVE; + nd6_dad_start(if_addr, NULL); + return; + } + + bulp->lifetime = mip6_config.hr_lifetime; + bulp->refreshtime = bulp->lifetime; + bulp->coa = bulp->bind_addr; + + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 1; + + if (mip6_send_bu(bulp, &bu_data, NULL) != 0) + return; + + /* Send a BU to the previous foreign network. */ + if ( !IN6_IS_ADDR_UNSPECIFIED(&old_coa) && + (esp->ha_fn != NULL)) { + /* Find lifetime used for the BU to the def router. */ + lifetime = mip6_prefix_lifetime(&old_coa); + lifetime = min(lifetime, MIP6_BU_LIFETIME_DEFRTR); + + /* Create a tunnel used by the MN to receive + incoming tunneled packets. */ + if (mip6_tunnel(home_addr, &esp->ha_fn->addr, + MIP6_TUNNEL_ADD, + MIP6_NODE_MN, (void *)esp)) + return; + + mip6_send_bu2fn(&old_coa, esp->ha_fn, home_addr, + esp->ifp, lifetime); + _FREE(esp->ha_fn, M_TEMP); + esp->ha_fn = NULL; + } + + /* The Mobile Node must send a Neighbor Advertisement to inform + other nodes that it has arrived back to its home network. + The first NA will be sent in the create function, the + remaining NAs are sent by the timer function. */ + ll_all_addr = in6addr_linklocal_allnodes; + na_flags = ND_NA_FLAG_OVERRIDE; + mip6_na_create(home_addr, &ll_all_addr, home_addr, + esp->prefix_len, na_flags, 1); + } else if ((prim_prefix != NULL) && (state == MIP6_MD_FOREIGN)) { + /* If no Home Agent Address exist. Build an anycast address */ + if (IN6_IS_ADDR_UNSPECIFIED(&esp->ha_hn)) { + mip6_build_ha_anycast(&esp->ha_hn, &esp->home_addr, + esp->prefix_len); + if (IN6_IS_ADDR_UNSPECIFIED(&esp->ha_hn)) { + log(LOG_ERR, + "%s: Could not create anycast address " + "for Mobile Node, wrong prefix length\n", + __FUNCTION__); + return; + } + } + + if ((esp->state == MIP6_STATE_UNDEF) || + (esp->state == MIP6_STATE_HOME) || + (esp->state == MIP6_STATE_DEREG)) { + /* Home Network --> Foreign Network */ + /* Update state information for the home address. */ + esp->state = MIP6_STATE_NOTREG; + esp->coa = *prim_addr; + if (esp->ha_fn != NULL) { + _FREE(esp->ha_fn, M_TEMP); + esp->ha_fn = NULL; + } + + /* Find an existing or create a new BUL entry. */ + bulp = mip6_bul_find(NULL, &esp->home_addr); + if (bulp == NULL) { + bulp = mip6_bul_create(&esp->ha_hn, + &esp->home_addr, + prim_addr, + mip6_config.hr_lifetime, + 1); + if (bulp == NULL) + return; + } else { + bulp->coa = *prim_addr; + bulp->lifetime = mip6_config.hr_lifetime; + bulp->refreshtime = bulp->lifetime; + } + + /* Send a BU registration to the Home Agent. */ + bulp->coa = *prim_addr; + bulp->lifetime = mip6_config.hr_lifetime; + bulp->refreshtime = mip6_config.hr_lifetime; + + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 1; + + if (mip6_send_bu(bulp, &bu_data, NULL) != 0) + return; + } else if (esp->state == MIP6_STATE_REG || + esp->state == MIP6_STATE_REREG || + esp->state == MIP6_STATE_REGNEWCOA || + esp->state == MIP6_STATE_NOTREG) { + /* Foreign Network --> New Foreign Network */ + /* Update state information for the home address. */ + esp->state = MIP6_STATE_REGNEWCOA; + old_coa = esp->coa; + esp->coa = *prim_addr; + + /* Find an existing or create a new BUL entry. */ + bulp = mip6_bul_find(NULL, &esp->home_addr); + if (bulp == NULL) { + bulp = mip6_bul_create(&esp->ha_hn, + &esp->home_addr, + prim_addr, + mip6_config.hr_lifetime, + 1); + if (bulp == NULL) + return; + } + + /* Send a BU registration to the Home Agent. */ + bulp->coa = *prim_addr; + bulp->lifetime = mip6_config.hr_lifetime; + bulp->refreshtime = mip6_config.hr_lifetime; + bulp->no_of_sent_bu = 0; + + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 1; + + if (mip6_send_bu(bulp, &bu_data, NULL) != 0) + return; + + /* Send a BU registration to the previous default + router. */ + if ( !IN6_IS_ADDR_UNSPECIFIED(&old_coa) && + (esp->ha_fn)) { + /* Find lifetime to be used for the BU to + the def router. */ + lifetime = mip6_prefix_lifetime(&old_coa); + lifetime = min(lifetime, + MIP6_BU_LIFETIME_DEFRTR); + + /* Create a tunnel used by the MN to receive + incoming tunneled packets. */ + if (mip6_tunnel(prim_addr, &esp->ha_fn->addr, + MIP6_TUNNEL_MOVE, + MIP6_NODE_MN, (void *)esp)) + return; + + mip6_send_bu2fn(&old_coa, esp->ha_fn, + prim_addr, + esp->ifp, lifetime); + _FREE(esp->ha_fn, M_TEMP); + esp->ha_fn = NULL; + } + } + } else + esp->state = MIP6_STATE_UNDEF; +} + + + +/* + ############################################################################## + # + # CONTROL SIGNAL FUNCTIONS + # Functions for processing of incoming control signals (Binding Acknowledge- + # ment and Binding Request option) and sub-options (Home Agents list). + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_rec_ba + * Description: Receive a BA option and evaluate the contents. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_rec_ba(m_in, off) +struct mbuf *m_in; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset from start of mbuf to start of dest option */ +{ + struct mip6_esm *esp; /* Home address entry */ + struct mip6_bul *bulp; /* Entry in the Binding Update list */ + struct in6_addr *from_src; /* Source address in received packet */ + struct in6_addr bind_addr; /* Binding addr in BU causing this BA */ + u_int8_t hr_flag; + int error; +#if MIP6_DEBUG + u_int8_t var; + int ii, offset; +#endif + + /* Make sure that the BA contains a valid AH or ESP header. */ +#if IPSEC +#ifndef __OpenBSD__ + if ( !((m_in->m_flags & M_AUTHIPHDR && m_in->m_flags & M_AUTHIPDGM) || + (m_in->m_flags & M_AUTHIPDGM && m_in->m_flags & M_DECRYPTED))) { + ip6stat.ip6s_badoptions++; + log(LOG_ERR, "%s: No AH or ESP included in BA\n", + __FUNCTION__); + return IPPROTO_DONE; + } +#endif +#endif + + /* Make sure that the length field in the BA is >= 11. */ + if (mip6_inp->ba_opt->len < IP6OPT_BALEN) { + ip6stat.ip6s_badoptions++; + log(LOG_ERR, "%s: Length field in BA < 11\n", __FUNCTION__); + return IPPROTO_DONE; + } + + /* Make sure that the sent BU sequence number == received BA sequence + number. But first, find the source address for the incoming packet + (it may include a home address option). */ + if (mip6_inp->optflag & MIP6_DSTOPT_HA) + from_src = &mip6_inp->ha_opt->home_addr; + else + from_src = &mip6_inp->ip6_src; + + bulp = mip6_bul_find(from_src, &mip6_inp->ip6_dst); + if (bulp == NULL) { + log(LOG_ERR, "%s: No Binding Update List entry found\n", + __FUNCTION__); + return IPPROTO_DONE; + } + + if (mip6_inp->ba_opt->seqno != bulp->seqno) { + ip6stat.ip6s_badoptions++; + log(LOG_ERR, + "%s: Received sequence number not equal to sent\n", + __FUNCTION__); + return IPPROTO_DONE; + } + +#if MIP6_DEBUG + mip6_debug("\nReceived Binding Acknowledgement\n"); + mip6_debug("IP Header Src: %s\n", ip6_sprintf(from_src)); + mip6_debug("IP Header Dst: %s\n", + ip6_sprintf(&mip6_inp->ip6_dst)); + mip6_debug("Type/Length/Status: %x / %u / %u\n", + mip6_inp->ba_opt->type, + mip6_inp->ba_opt->len, mip6_inp->ba_opt->status); + mip6_debug("Seq no/Life time: %u / %u\n", mip6_inp->ba_opt->seqno, + mip6_inp->ba_opt->lifetime); + mip6_debug("Refresh time: %u\n", mip6_inp->ba_opt->refresh); + + if (mip6_inp->ba_opt->len > IP6OPT_BALEN) { + offset = mip6_opt_offset(m_in, off, IP6OPT_BINDING_ACK); + if (offset == 0) + goto end_debug; + + mip6_debug("Sub-options present (TLV coded)\n"); + for (ii = IP6OPT_BALEN; ii < mip6_inp->ba_opt->len; ii++) { + if ((ii - IP6OPT_BALEN) % 16 == 0) + mip6_debug("\t0x:"); + if ((ii - IP6OPT_BALEN) % 4 == 0) + mip6_debug(" "); + m_copydata(m_in, offset + 2 + ii, sizeof(var), + (caddr_t)&var); + mip6_debug("%02x", var); + if ((ii - IP6OPT_BALEN + 1) % 16 == 0) + mip6_debug("\n"); + } + if ((ii - IP6OPT_BALEN) % 16) + mip6_debug("\n"); + } + end_debug: +#endif + + /* Check the status field in the BA. */ + if (mip6_inp->ba_opt->status >= 128) { + /* Remove the BUL entry and process the error + (order is important). */ + bind_addr = bulp->bind_addr; + hr_flag = bulp->hr_flag; + mip6_bul_delete(bulp); + + error = mip6_ba_error(from_src, &mip6_inp->ip6_dst, + &bind_addr, hr_flag); + return error; + } + + /* BA was accepted. Update corresponding entry in the BUL. + Stop retransmitting the BU. */ + bulp->no_of_sent_bu = 0; + bulp->update_rate = MIP6_MAX_UPDATE_RATE; + mip6_clear_retrans(bulp); + + /* If the BA was received from the Home Agent the state + of the event state machine shall be updated. */ + if (bulp->hr_flag) { + esp = mip6_esm_find(&bulp->bind_addr); + if (esp == NULL) { + log(LOG_ERR, "%s: No event-state machine found\n", + __FUNCTION__); + return IPPROTO_DONE; + } + + /* If Dynamic Home Agent Address Discovery, change + HA address and remove esp->dad entry. */ + if (esp->dad) { + esp->ha_hn = *from_src; + bulp->dst_addr = *from_src; + if (esp->dad->hal) + _FREE(esp->dad->hal, M_TEMP); + _FREE(esp->dad, M_TEMP); + esp->dad = NULL; + } + + /* Update the state for the home address. */ + if (esp->state == MIP6_STATE_DEREG) { + mip6_bul_delete(bulp); + + /* Remove the tunnel for the MN */ + mip6_tunnel(NULL, NULL, MIP6_TUNNEL_DEL, + MIP6_NODE_MN, (void *)esp); + + /* Send BU to each CN in the BUL to remove its + BC entry. */ + mip6_update_cns(&esp->home_addr, + &esp->home_addr, 0, 0); + mip6_outq_flush(); + + /* Don't set the state until BUs have been sent to + all CNs, otherwise the Home Address option will + not be added for the outgoing packet. */ + esp->state = MIP6_STATE_HOME; + esp->coa = in6addr_any; + } else { + esp->state = MIP6_STATE_REG; + + /* Create or modify a tunnel used by the MN to + receive incoming tunneled packets. */ + if (mip6_tunnel(&esp->coa, &esp->ha_hn, + MIP6_TUNNEL_MOVE, MIP6_NODE_MN, + (void *)esp)) + return IPPROTO_DONE; + + /* Send BU to each CN in the BUL to update BC entry. */ + bulp->lifetime = mip6_inp->ba_opt->lifetime; + bulp->refreshtime = mip6_inp->ba_opt->refresh; + mip6_update_cns(&esp->home_addr, &esp->coa, 0, + bulp->lifetime); + } + } + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_rec_br + * Description: Receive a Binding Request option and evaluate the contents. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_rec_br(m_in, off) +struct mbuf *m_in; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset from start of mbuf to start of dest option */ +{ + struct mip6_opt_bu *bu_opt; /* BU allocated in function */ + struct in6_addr *from_src; /* Src address in rec packet */ + struct mip6_esm *esp; /* Home address entry */ + struct mip6_bul *bulp_cn; /* CN entry in the BU list */ + struct mip6_bul *bulp_ha; /* HA entry in the BU list */ + struct mip6_subbuf *subbuf = NULL; /* Sub-options for an option */ + struct mip6_subopt_coa altcoa; /* Alternate care-of address */ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif +#if MIP6_DEBUG + const struct mbuf *m = (const struct mbuf *)m_in; + u_int8_t var; + int ii, offset; +#endif + + /* Make sure that the BA contains a valid AH or ESP header. */ + if (mip6_inp->br_opt->type != IP6OPT_BINDING_REQ) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + +#if MIP6_DEBUG + mip6_debug("\nReceived Binding Request\n"); + mip6_debug("Type/Length: %x / %u\n", mip6_inp->br_opt->type, + mip6_inp->br_opt->len); + + if (mip6_inp->br_opt->len > IP6OPT_BRLEN) { + offset = mip6_opt_offset(m_in, off, IP6OPT_BINDING_REQ); + if (offset == 0) + goto end_debug; + + mip6_debug("Sub-options present (TLV coded)\n"); + for (ii = IP6OPT_BRLEN; ii < mip6_inp->br_opt->len; ii++) { + if (m->m_len < offset + 2 + ii + 1) + break; + if ((ii - IP6OPT_BRLEN) % 16 == 0) + mip6_debug("\t0x:"); + if ((ii - IP6OPT_BRLEN) % 4 == 0) + mip6_debug(" "); + m_copydata(m_in, offset + 2 + ii, sizeof(var), + (caddr_t)&var); + mip6_debug("%02x", var); + if ((ii - IP6OPT_BRLEN + 1) % 16 == 0) + mip6_debug("\n"); + } + if ((ii - IP6OPT_BRLEN) % 16) + mip6_debug("\n"); + } + end_debug: +#endif + + /* Check if the BR includes a Unique Identifier sub-option. */ + if (mip6_inp->br_opt->len > IP6OPT_BRLEN) { + /* Received tunneled Router Advertisement when the MN's home + subnet is renumbered while the MN is away from home. */ + /* XXX Code have to be added. */ + } else { + /* A CN is requesting the MN to send a BU to update its BC. */ + /* Find the source address for the incoming packet (it may + include a home address option). */ + if (mip6_inp->optflag & MIP6_DSTOPT_HA) + from_src = &mip6_inp->ha_opt->home_addr; + else + from_src = &mip6_inp->ip6_src; + + /* Find out which lifetime to use in the BU */ + bulp_cn = mip6_bul_find(from_src, &mip6_inp->ip6_dst); + if (bulp_cn == NULL) + return IPPROTO_DONE; + + esp = mip6_esm_find(&mip6_inp->ip6_dst); + if (esp == NULL) { + log(LOG_ERR, "%s: no event-state machine found\n", + __FUNCTION__); + return IPPROTO_DONE; + } + + bulp_ha = mip6_bul_find(&esp->ha_hn, &mip6_inp->ip6_dst); + if (bulp_ha == NULL) + return IPPROTO_DONE; + + if (bulp_ha->lifetime > bulp_cn->lifetime) { + /* Send a BU to the previous default router. */ + bulp_cn->seqno += 1; + bu_opt = mip6_create_bu(0, 0, 0, bulp_cn->seqno, + bulp_ha->lifetime); + if (bu_opt == NULL) + return IPPROTO_DONE; + + altcoa.type = IP6SUBOPT_ALTCOA; + altcoa.len = IP6OPT_COALEN; + altcoa.coa = bulp_cn->coa; + if (mip6_store_subopt(&subbuf, (caddr_t)&altcoa) + != 0) { + if (subbuf) + _FREE(subbuf, M_TEMP); + return IPPROTO_DONE; + } + + mip6_outq_create(bu_opt, subbuf, &esp->home_addr, + from_src, NOT_SENT); + + bulp_cn->lifetime = bulp_ha->lifetime; + bulp_cn->refreshtime = bulp_ha->lifetime; + bulp_cn->lasttime = time_second; + bulp_cn->no_of_sent_bu = 0; + bulp_cn->update_rate = MIP6_MAX_UPDATE_RATE; + mip6_clear_retrans(bulp_cn); + } + } + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_rec_hal + * Description: Performs Dynamic Home Agent Address Discovery. Called when a + * list of global home agent addresses is received. Checks if the + * received packets source address is in the list. If not it shall + * be added as the first entry in the list. + * Save the home agent address list in the event-state machine + * and send a BU to the first address in the list. + * Note: The timeout used in the BU is a trade off between how long + * time it shall wait before the next entry in the list is picked + * and, if successful first registration, the time to perform + * next registration. I believe 16 - 32 seconds will be fine. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_rec_hal(src, dst, hal) +struct in6_addr *src; /* Incoming packet source address */ +struct in6_addr *dst; /* Incoming packet destination address */ +struct mip6_subopt_hal *hal; /* List of HA's on the home link */ +{ + struct mip6_esm *esp; /* Event-state machine */ + struct mip6_bul *bulp; /* Entry in the Binding Update list */ + struct mip6_subbuf *subbuf; /* Buffer containing sub-options */ + struct mip6_bu_data bu_data; /* Data used when a BU is created */ + int found, ii, new_len, index; + + subbuf = NULL; + + /* Find the event-state machine */ + esp = mip6_esm_find(dst); + if (esp == NULL) { + log(LOG_ERR, + "%s: Couldn't find an event-state machine for " + "home address %s\n", + __FUNCTION__, ip6_sprintf(dst)); + return IPPROTO_DONE; + } + + /* If the incoming source address is not in the list of home + agents it is treated as the HA with highest preference. + Otherwise, the HA's are tried in the listed order. */ + found = 0; + if (hal == NULL) + new_len = IP6OPT_HALEN; + else { + index = hal->len / IP6OPT_HALEN; + for (ii = 0; ii < index; ii++) { + if (IN6_ARE_ADDR_EQUAL(&hal->halist[ii], src)) { + found = 1; + break; + } + } + if (found) + new_len = hal->len; + else + new_len = hal->len + IP6OPT_HALEN; + } + + /* Store the home agents list in the event-state machine. Add the + incoming packets source address if necessary. */ + esp->dad = (struct mip6_dad *)MALLOC(sizeof(struct mip6_dad), + M_TEMP, M_WAITOK); + if (esp->dad == NULL) + return IPPROTO_DONE; + bzero(esp->dad, sizeof(struct mip6_dad)); + + index = new_len / IP6OPT_HALEN; + esp->dad->hal = (struct mip6_subopt_hal *) + MALLOC(sizeof(struct mip6_subopt_hal) + + ((index - 1) * sizeof(struct in6_addr)), + M_TEMP, M_WAITOK); + if (esp->dad->hal == NULL) + return IPPROTO_DONE; + + esp->dad->hal->type = IP6SUBOPT_HALIST; + esp->dad->hal->len = new_len; + if (found) { + for (ii = 0; ii < index; ii++) { + bcopy(&hal->halist[ii], &esp->dad->hal->halist[ii], + sizeof(struct in6_addr)); + } + } else { + bcopy(src, &esp->dad->hal->halist[0], sizeof(struct in6_addr)); + for (ii = 0; ii < index - 1; ii++) { + bcopy(&hal->halist[ii], &esp->dad->hal->halist[ii+1], + sizeof(struct in6_addr)); + } + } + + /* Create a BUL entry. If there exist one already something is + wrong and an error message is sent to the console. */ + bulp = mip6_bul_find(src, dst); + if (bulp != NULL) { + log(LOG_ERR, + "%s: A BUL entry found but it shouldn't have been. " + "Internal error that must be looked into\n", __FUNCTION__); + return IPPROTO_DONE; + } + + bulp = mip6_bul_create(&esp->dad->hal->halist[0], &esp->home_addr, + &esp->coa, MIP6_BU_LIFETIME_DHAAD, 1); + if (bulp == NULL) + return IPPROTO_DONE; + + /* Send a BU registration to the Home Agent with highest preference. */ + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 1; + + if (mip6_send_bu(bulp, &bu_data, subbuf) != 0) + return IPPROTO_DONE; + + /* Set index to next entry to be used in the list. + Starts at 0 (which has been sent in this function) */ + if ((esp->dad->hal->len / IP6OPT_HALEN) == 1) + esp->dad->index = 0; + else + esp->dad->index = 1; + + return 0; +}; + + + +/* + ****************************************************************************** + * Function: mip6_rec_ramn + * Description: Processed by a Mobile Node. Includes a Router Advertisement + * with a H-bit set in the flags variable (checked by the calling + * function). + * The global unicast address for the home agent with the highest + * preference and the time when it expires are stored. + * Ret value: 0 Everything is OK. Otherwise appropriate error code. + ****************************************************************************** + */ +int +mip6_rec_ramn(m, off) +struct mbuf *m; /* Mbuf containing the entire IPv6 packet */ +int off; /* Offset from start of mbuf to start of RA */ +{ + struct ip6_hdr *ip6; /* IPv6 header */ + struct nd_router_advert *ra; /* Router Advertisement */ + struct mip6_esm *esp; /* Event-state machine */ + struct nd_opt_hai *hai; /* Home Agent information option */ + struct nd_opt_prefix_info *pi; /* Ptr to prefix information */ + u_int8_t *opt_ptr; /* Ptr to current option in RA */ + int cur_off; /* Cur offset from start of RA */ + caddr_t icmp6msg; /* Copy of mbuf (consequtively) */ + int16_t tmp_pref; + time_t tmp_lifetime; + int icmp6len; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* Find out if the RA can be processed */ + ip6 = mtod(m, struct ip6_hdr *); + if (ip6->ip6_hlim != 255) { + log(LOG_INFO, + "%s: Invalid hlim %d in Router Advertisement\n", + __FUNCTION__, + ip6->ip6_hlim); + return 0; + } + + if (!IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_src)) { + log(LOG_INFO, + "%s: Source address %s is not link-local\n", __FUNCTION__, + ip6_sprintf(&ip6->ip6_src)); + return 0; + } + + /* The mbuf data must be stored consequtively to be able to + cast data from it. */ + icmp6len = m->m_pkthdr.len - off; + icmp6msg = (caddr_t)MALLOC(icmp6len, M_TEMP, M_WAITOK); + if (icmp6msg == NULL) + return IPPROTO_DONE; + + m_copydata(m, off, icmp6len, icmp6msg); + ra = (struct nd_router_advert *)icmp6msg; + + /* First, if a Home Agent Information option is present then the Home + Agent preference and lifetime is taken from the option. */ + cur_off = sizeof(struct nd_router_advert); + tmp_lifetime = ntohl(ra->nd_ra_router_lifetime); + tmp_pref = 0; + + while (cur_off < icmp6len) { + opt_ptr = ((caddr_t)icmp6msg + cur_off); + if (*opt_ptr == ND_OPT_HA_INFORMATION) { + /* Check the home agent information option */ + hai = (struct nd_opt_hai *)opt_ptr; + if (hai->nd_opt_hai_len != 1) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + + tmp_pref = ntohs(hai->nd_opt_hai_pref); + tmp_lifetime = ntohs(hai->nd_opt_hai_lifetime); + cur_off += 8; + continue; + } else { + if (*(opt_ptr + 1) == 0) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + cur_off += *(opt_ptr + 1) * 8; + } + } + + /* Go through all prefixes and store global address for the Home + Agent with the highest preference. */ + cur_off = sizeof(struct nd_router_advert); + while (cur_off < icmp6len) { + opt_ptr = ((caddr_t)icmp6msg + cur_off); + if (*opt_ptr == ND_OPT_PREFIX_INFORMATION) { + /* Check the prefix information option */ + pi = (struct nd_opt_prefix_info *)opt_ptr; + if (pi->nd_opt_pi_len != 4) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + + if (!(pi->nd_opt_pi_flags_reserved & + ND_OPT_PI_FLAG_RTADDR)) { + cur_off += 4 * 8; + continue; + } + + if (IN6_IS_ADDR_MULTICAST(&pi->nd_opt_pi_prefix) || + IN6_IS_ADDR_LINKLOCAL(&pi->nd_opt_pi_prefix)) { + cur_off += 4 * 8; + continue; + } + + /* Aggregatable unicast address, rfc2374 */ + if (((pi->nd_opt_pi_prefix.s6_addr8[0] & 0xe0) > + 0x10) && (pi->nd_opt_pi_prefix_len != 64)) { + cur_off += 4 * 8; + continue; + } + + /* Only save the address if it's equal to the coa. */ + for (esp = mip6_esmq; esp; esp = esp->next) { + if (in6_are_prefix_equal( + &pi->nd_opt_pi_prefix, + &esp->coa, + pi->nd_opt_pi_prefix_len)) { + if (esp->ha_fn == NULL) { + esp->ha_fn = (struct mip6_hafn *) + MALLOC(sizeof(struct mip6_hafn), M_TEMP, M_WAITOK); + if (esp->ha_fn == NULL) + return ENOBUFS; + bzero(esp->ha_fn, sizeof(struct mip6_hafn)); + + esp->ha_fn->addr = pi->nd_opt_pi_prefix; + esp->ha_fn->prefix_len = pi->nd_opt_pi_prefix_len; + esp->ha_fn->pref = tmp_pref; + esp->ha_fn->time = time_second + tmp_lifetime; + } else { + if (tmp_pref > esp->ha_fn->pref) { + esp->ha_fn->addr = pi->nd_opt_pi_prefix; + esp->ha_fn->prefix_len = pi->nd_opt_pi_prefix_len; + esp->ha_fn->pref = tmp_pref; + esp->ha_fn->time = time_second + tmp_lifetime; + } else + esp->ha_fn->time = time_second + tmp_lifetime; + } + } + } + + cur_off += 4 * 8; + continue; + } else { + if (*(opt_ptr + 1) == 0) { + ip6stat.ip6s_badoptions++; + return IPPROTO_DONE; + } + cur_off += *(opt_ptr + 1) * 8; + } + } + return 0; +} + + +/* + ****************************************************************************** + * Function: mip6_route_optimize + * Description: When a tunneled packet is received a BU shall be sent to the + * CN if no Binding Update List entry exist or if the rate limit + * for sending BUs for an existing BUL entry is not exceded. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_route_optimize(m) +struct mbuf *m; /* Mbuf containing the entire IPv6 packet */ +{ + struct ip6_hdr *ip6; + struct mip6_esm *esp; + struct mip6_bul *bulp, *bulp_hr; + struct mip6_subbuf *subbuf; /* Buffer containing sub-options */ + struct mip6_bu_data bu_data; /* Data used when a BU is created */ + struct mip6_subopt_coa altcoa; /* Alternate care-of address */ + time_t t; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* Make sure that all requirements are meet for sending a BU to + the original sender of the packet. */ + if (!(m->m_flags & M_MIP6TUNNEL)) + return 0; + + ip6 = mtod(m, struct ip6_hdr *); + esp = mip6_esm_find(&ip6->ip6_dst); + if (esp == NULL) + return 0; + + /* Try to find an existing BUL entry. */ + bulp = mip6_bul_find(&ip6->ip6_src, &esp->home_addr); + if (bulp == NULL) { + /* Some information needed from the BU home registration */ + bulp_hr = mip6_bul_find(NULL, &esp->home_addr); + if (bulp_hr == NULL) + return 0; + bulp = mip6_bul_create(&ip6->ip6_src, &esp->home_addr, + &esp->coa, bulp_hr->lifetime, 0); + if (bulp == NULL) + return IPPROTO_DONE; + } else { + /* If the existing BUL entry is waiting for an ack or + has disabled sending BU, no BU shall be sent. */ + if ((bulp->state) || (bulp->bu_flag == 0)) + return 0; + + /* Check the rate limiting for sending Binding Updates */ + t = (time_t)time_second; +#if MIP6_DEBUG + mip6_debug("%s: Rate limiting for sending BU\n", __FUNCTION__); + mip6_debug("(time - bulp->lasttime) < bulp->update_rate\n"); + mip6_debug("time = %lu\n", (u_long)t); + mip6_debug("bulp->lasttimetime = %lu\n", bulp->lasttime); + mip6_debug("bulp->update_rate = %d\n", bulp->update_rate); +#endif + if ((t - bulp->lasttime) < bulp->update_rate) + return 0; + } + + /* OK we have to send a BU. */ + subbuf = NULL; + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 0; + + altcoa.type = IP6SUBOPT_ALTCOA; + altcoa.len = IP6OPT_COALEN; + altcoa.coa = bulp->coa; + if (mip6_store_subopt(&subbuf, (caddr_t)&altcoa)) { + if (subbuf) _FREE(subbuf, M_TEMP); + return IPPROTO_DONE; + } + + if (mip6_send_bu(bulp, &bu_data, subbuf) != 0) + return IPPROTO_DONE; + return 0; +} + + + +/* + ############################################################################## + # + # SENDING FUNCTIONS + # These functions are called when an IPv6 packet has been created internally + # by MIPv6 and shall be sent directly to its destination or when an option + # (BU, BA, BR) has been created and shall be stored in the mipv6 output queue + # for piggybacking on the first outgoing packet sent to the node. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_send_bu + * Description: Send a Binding Update option to a node (CN, HA or MN). A new + * IPv6 packet is built including an IPv6 header and a Destination + * header (where the BU is stored). + * Arguments: bulp - BUL entry for which the BU is sent. + * data - BU data needed when the BU option is created. NULL + * if the BU option stored in the BUL entry is used. + * subopt - Sub-options for the BU. NULL if the BU sub-options + * stored in the BUL entry is used. + * Note: The following combinations of indata are possible: + * data == NULL && subbuf == NULL Use existing data, i.e used for + * retransmission + * data != NULL && subbuf == NULL Clear existing data and send a + * new BU without sub-options + * data != NULL && subbuf != NULL Clear existing data and send a + * new BU with new sub-options + * Ret value: 0 if everything OK. Otherwise appropriate error code. + ****************************************************************************** + */ +int +mip6_send_bu(bulp, data, subbuf) +struct mip6_bul *bulp; +struct mip6_bu_data *data; +struct mip6_subbuf *subbuf; +{ + struct mbuf *m_ip6; /* IPv6 header stored in a mbuf */ + struct ip6_pktopts *pktopt; /* Options for IPv6 packet */ + struct mip6_opt_bu *bu_opt; /* Binding Update option */ + struct mip6_subbuf *bu_subopt; /* Binding Update sub-options */ + struct mip6_esm *esp; /* Home address entry */ + int error; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif +#if MIP6_DEBUG + int ii; + u_int8_t var; +#endif + + /* Make sure that it's allowed to send a BU */ + if (bulp == NULL) + return 0; + + if (!bulp->bu_flag) { + log(LOG_INFO, + "%s: BU not sent to host %s due to an ICMP Parameter " + "Problem, Code 2, when a BU was sent previously\n", + __FUNCTION__, ip6_sprintf(&bulp->dst_addr)); + return 0; + } + + /* Only send BU if we are not in state UNDEFINED */ + esp = mip6_esm_find(&bulp->bind_addr); + if (esp == NULL) { + log(LOG_ERR, "%s: We should never come here\n", __FUNCTION__); + return 0; + } else if (esp->state == MIP6_STATE_UNDEF) { + log(LOG_INFO, + "%s: Mobile Node with home address %s not connected to " + "any network. Binding Update could not be sent.\n", + __FUNCTION__, ip6_sprintf(&bulp->bind_addr)); + return 0; + } + + /* Evaluate parameters according to the note in the function header */ + if ((data == NULL) && (subbuf == NULL)) { + if ((bulp->state == NULL) || (bulp->state->bu_opt == NULL)) { + log(LOG_ERR, + "%s: No existing BU option to send\n", + __FUNCTION__); + return 0; + } + bulp->seqno += 1; + bu_opt = bulp->state->bu_opt; + bu_opt->seqno = bulp->seqno; + bu_subopt = bulp->state->bu_subopt; + } else if (data != NULL) { + mip6_clear_retrans(bulp); + if (data->ack) { + bulp->state = mip6_create_retrans(bulp); + if (bulp->state == NULL) + return ENOBUFS; + } + + bulp->seqno += 1; + bu_opt = mip6_create_bu(data->prefix_len, data->ack, + bulp->hr_flag, + bulp->seqno, bulp->lifetime); + if (bu_opt == NULL) { + mip6_clear_retrans(bulp); + bulp->seqno -= 1; + return ENOBUFS; + } + + if (data->ack) { + bulp->state->bu_opt = bu_opt; + bulp->state->bu_subopt = subbuf; + bu_subopt = bulp->state->bu_subopt; + } else + bu_subopt = subbuf; + } else { + log(LOG_ERR, + "%s: Function parameter error. We should not come here\n", + __FUNCTION__); + return 0; + } + + /* Allocate necessary memory and send the BU */ + pktopt = (struct ip6_pktopts *)MALLOC(sizeof(struct ip6_pktopts), + M_TEMP, M_NOWAIT); + if (pktopt == NULL) + return ENOBUFS; + bzero(pktopt, sizeof(struct ip6_pktopts)); + + pktopt->ip6po_hlim = -1; /* -1 means to use default hop limit */ + m_ip6 = mip6_create_ip6hdr(&bulp->bind_addr, &bulp->dst_addr, + IPPROTO_NONE); + if(m_ip6 == NULL) { + _FREE(pktopt, M_TEMP); + return ENOBUFS; + } + + pktopt->ip6po_dest2 = mip6_create_dh((void *)bu_opt, bu_subopt, + IPPROTO_NONE); + if(pktopt->ip6po_dest2 == NULL) { + _FREE(pktopt, M_TEMP); + _FREE(m_ip6, M_TEMP); + return ENOBUFS; + } + + mip6_config.enable_outq = 0; + error = ip6_output(m_ip6, pktopt, NULL, 0, NULL, NULL); + if (error) { + _FREE(pktopt->ip6po_dest2, M_TEMP); + _FREE(pktopt, M_TEMP); + mip6_config.enable_outq = 1; + log(LOG_ERR, + "%s: ip6_output function failed to send BU, error = %d\n", + __FUNCTION__, error); + return error; + } + mip6_config.enable_outq = 1; + + /* Update Binding Update List variables. */ + bulp->lasttime = time_second; + bulp->no_of_sent_bu += 1; + + if ( !(bu_opt->flags & MIP6_BU_AFLAG)) { + if (bulp->no_of_sent_bu >= MIP6_MAX_FAST_UPDATES) + bulp->update_rate = MIP6_SLOW_UPDATE_RATE; + } + +#if MIP6_DEBUG + mip6_debug("\nSent Binding Update option (0x%x)\n", bu_opt); + mip6_debug("IP Header Src: %s\n", ip6_sprintf(&bulp->bind_addr)); + mip6_debug("IP Header Dst: %s\n", ip6_sprintf(&bulp->dst_addr)); + mip6_debug("Type/Length/Flags: %x / %u / ", bu_opt->type, bu_opt->len); + if (bu_opt->flags & MIP6_BU_AFLAG) + mip6_debug("A "); + if (bu_opt->flags & MIP6_BU_HFLAG) + mip6_debug("H "); + if (bu_opt->flags & MIP6_BU_RFLAG) + mip6_debug("R "); + mip6_debug("\n"); + mip6_debug("Seq no/Life time: %u / %u\n", bu_opt->seqno, + bu_opt->lifetime); + mip6_debug("Prefix length: %u\n", bu_opt->prefix_len); + + if (bu_subopt) { + mip6_debug("Sub-options present (TLV coded)\n"); + for (ii = 0; ii < bu_subopt->len; ii++) { + if (ii % 16 == 0) + mip6_debug("\t0x:"); + if (ii % 4 == 0) + mip6_debug(" "); + bcopy((caddr_t)&bu_subopt->buffer[ii], + (caddr_t)&var, 1); + mip6_debug("%02x", var); + if ((ii + 1) % 16 == 0) + mip6_debug("\n"); + } + if (ii % 16) + mip6_debug("\n"); + } +#endif + + _FREE(pktopt->ip6po_dest2, M_TEMP); + _FREE(pktopt, M_TEMP); + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_send_bu2fn + * Description: Create a new or modify an existing Binding Update List entry, + * create a Bindig Update option and a new temporary event-state + * machine and send the Binding Update option to a Home Agent at + * the previous foreign network. + * Ret value: - + ****************************************************************************** + */ +void +mip6_send_bu2fn(old_coa, old_ha, coa, esm_ifp, lifetime) +struct in6_addr *old_coa; /* Previous care-of address */ +struct mip6_hafn *old_ha; /* Previous Home Agent address */ +struct in6_addr *coa; /* Current coa or home address */ +struct ifnet *esm_ifp; /* Physical i/f used by event-state machine */ +u_int32_t lifetime; /* Lifetime for BU */ +{ + struct mip6_esm *esp; /* ESM for prev COA */ + struct mip6_bul *bulp; /* BU list entry*/ + struct mip6_subbuf *subbuf; /* Buffer containing sub-options */ + struct mip6_bu_data bu_data; /* Data used when a BU is created */ + struct mip6_subopt_coa altcoa; /* Alternate care-of address */ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* Make sure that the Home Agent at the previous network exist and + that it's still valid. */ + if (old_ha == NULL) + return; + else { + if (time_second > old_ha->time) { + log(LOG_INFO, + "%s: Timer had expired for Home Agent on " + "previous network. No BU sent\n", + __FUNCTION__); + return; + } + } + + /* Find an existing or create a new BUL entry. */ + bulp = mip6_bul_find(NULL, old_coa); + if (bulp == NULL) { + bulp = mip6_bul_create(&old_ha->addr, old_coa, coa, + lifetime, 1); + if (bulp == NULL) + return; + } else { + bulp->dst_addr = old_ha->addr; + bulp->coa = *coa; + bulp->lifetime = lifetime; + bulp->refreshtime = lifetime; + mip6_clear_retrans(bulp); + } + + /* Create an event-state machine to be used when the home address + option is created for outgoing packets. The event-state machine + must be removed when the BUL entry is removed. */ + esp = mip6_esm_create(esm_ifp, &old_ha->addr, coa, old_coa, 0, + MIP6_STATE_NOTREG, TEMPORARY, + MIP6_BU_LIFETIME_DEFRTR); + if (esp == NULL) + return; + + /* Send the Binding Update option */ + subbuf = NULL; + + bu_data.prefix_len = 0; + bu_data.ack = 0; + + altcoa.type = IP6SUBOPT_ALTCOA; + altcoa.len = IP6OPT_COALEN; + altcoa.coa = *coa; + if (mip6_store_subopt(&subbuf, (caddr_t)&altcoa)) { + if (subbuf) + _FREE(subbuf, M_TEMP); + return; + } + + if (mip6_send_bu(bulp, &bu_data, subbuf) != 0) + return; +} + + + +/* + ****************************************************************************** + * Function: mip6_update_cns + * Description: Search the BUL for each entry with a matching home address for + * which no Binding Update has been sent for the new COA. + * Call a function for queueing the BU. + * Note: Since this BU is stored in the MN for a couple of seconds + * before it is piggybacked or flashed from the queue it may + * not have the ack-bit set. + * Ret value: - + ****************************************************************************** + */ +void +mip6_update_cns(home_addr, coa, prefix_len, lifetime) +struct in6_addr *home_addr; /* Home Address for MN */ +struct in6_addr *coa; /* New Primary COA for MN */ +u_int8_t prefix_len; /* Prefix length for Home Address */ +u_int32_t lifetime; /* Lifetime for BU registration */ +{ + struct mip6_bul *bulp; /* Entry in the Binding Update List */ + + /* Try to find existing entry in the BUL. Home address must match. */ + for (bulp = mip6_bulq; bulp;) { + if (IN6_ARE_ADDR_EQUAL(home_addr, &bulp->bind_addr) && + !IN6_ARE_ADDR_EQUAL(coa, &bulp->coa)) { + /* Queue a BU for transmission to the node. */ + mip6_queue_bu(bulp, home_addr, coa, + prefix_len, lifetime); + + /* Remove BUL entry if it's a de-registration. */ + if (IN6_ARE_ADDR_EQUAL(home_addr, coa) || + (lifetime == 0)) + bulp = mip6_bul_delete(bulp); + else + bulp = bulp->next; + } else + bulp = bulp->next; + } +} + + + +/* + ****************************************************************************** + * Function: mip6_queue_bu + * Description: Create a BU and a sub-option (alternate care-of address). + * Update the BUL entry and store it in the output queue for + * piggy-backing. + * Note: Since this BU is stored in the MN for a couple of seconds + * before it is piggybacked or flashed from the queue it may + * not have the ack-bit set. + * Ret value: - + ****************************************************************************** + */ +void +mip6_queue_bu(bulp, home_addr, coa, prefix_len, lifetime) +struct mip6_bul *bulp; /* Entry in the Binding Update List */ +struct in6_addr *home_addr; /* Home Address for MN */ +struct in6_addr *coa; /* New Primary COA for MN */ +u_int8_t prefix_len; /* Prefix length for Home Address */ +u_int32_t lifetime; /* Lifetime for BU registration */ +{ + struct mip6_opt_bu *bu_opt; /* BU allocated in this function */ + struct mip6_subbuf *subbuf; /* Buffer containing sub-options */ + struct mip6_subopt_coa altcoa; /* Alternate care-of address */ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* Check if it's allowed to send a BU to this node. */ + if ((coa == NULL) || (bulp == NULL)) + return; + + if (bulp->bu_flag == 0) { + log(LOG_INFO, + "%s: BU not sent to host %s due to an ICMP Parameter " + "Problem, Code 2, when a BU was sent previously\n", + __FUNCTION__, ip6_sprintf(&bulp->dst_addr)); + return; + } + + /* Create the sub-option */ + subbuf = NULL; + altcoa.type = IP6SUBOPT_ALTCOA; + altcoa.len = IP6OPT_COALEN; + altcoa.coa = *coa; + if (mip6_store_subopt(&subbuf, (caddr_t)&altcoa)) { + if (subbuf) + _FREE(subbuf, M_TEMP); + return; + } + + /* Create a BU. */ + bulp->seqno += 1; + bu_opt = mip6_create_bu(prefix_len, 0, 0, bulp->seqno, lifetime); + if (bu_opt == NULL) { + log(LOG_ERR, "%s: Could not create a BU\n", __FUNCTION__); + return; + } + + /* Update BUL entry */ + bulp->coa = *coa; + bulp->lifetime = lifetime; + bulp->refreshtime = lifetime; + bulp->lasttime = time_second; + bulp->no_of_sent_bu += 1; + mip6_clear_retrans(bulp); + + /* Add entry to the output queue for transmission to the CN. */ + mip6_outq_create(bu_opt, subbuf, home_addr, &bulp->dst_addr, NOT_SENT); +} + + + +/* + ############################################################################## + # + # UTILITY FUNCTIONS + # Miscellaneous functions needed for processing of incoming control signals + # or events originated from the move detection algorithm. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_create_bu + * Description: Create a Binding Update option for transmission. + * Ret value: Pointer to the BU option or NULL. + * Note: Variable seqno and lifetime set in function + * mip6_update_bul_entry. + ****************************************************************************** + */ +struct mip6_opt_bu * +mip6_create_bu(prefix_len, ack, hr, seqno, lifetime) +u_int8_t prefix_len; /* Prefix length for Home Address */ +int ack; /* Ack required (0 = FALSE otherwise TRUE) */ +int hr; /* Home Registration (0 = FALSE otherwise TRUE) */ +u_int16_t seqno; /* Sequence number */ +u_int32_t lifetime; /* Suggested lifetime for the BU registration */ +{ + struct mip6_opt_bu *bu_opt; /* BU allocated in this function */ + + /* Allocate and store Binding Update option data */ + bu_opt = (struct mip6_opt_bu *)MALLOC(sizeof(struct mip6_opt_bu), + M_TEMP, M_WAITOK); + if (bu_opt == NULL) + return NULL; + bzero(bu_opt, sizeof(struct mip6_opt_bu)); + + bu_opt->type = IP6OPT_BINDING_UPDATE; + bu_opt->len = IP6OPT_BULEN; + bu_opt->seqno = seqno; + bu_opt->lifetime = lifetime; + + /* The prefix length field is valid only for "home registration" BU. */ + if (hr) { + bu_opt->flags |= MIP6_BU_HFLAG; + bu_opt->prefix_len = prefix_len; + if (ip6_forwarding) + bu_opt->flags |= MIP6_BU_RFLAG; + } else + bu_opt->prefix_len = 0; + + if (ack) + bu_opt->flags |= MIP6_BU_AFLAG; + +#if MIP6_DEBUG + mip6_debug("\nBinding Update option created (0x%x)\n", bu_opt); +#endif + return bu_opt; +} + + + +/* + ****************************************************************************** + * Function: mip6_stop_bu + * Description: Stop sending a Binding Update to the host that has generated + * the icmp error message. + * Ret value: - + ****************************************************************************** + */ +void +mip6_stop_bu(ip6_dst) +struct in6_addr *ip6_dst; /* Host that generated ICMP error message */ +{ + struct mip6_bul *bulp; /* Entry in the BU list */ + + /* No future BU shall be sent to this destination. */ + for (bulp = mip6_bulq; bulp; bulp = bulp->next) { + if (IN6_ARE_ADDR_EQUAL(ip6_dst, &bulp->dst_addr)) + bulp->bu_flag = 0; + } +} + + + +/* + ****************************************************************************** + * Function: mip6_ba_error + * Description: Each incoming BA error is taken care of by this function. + * If a registration to the Home Agent failed then dynamic home + * agent address discovery shall be performed. If a de-regi- + * stration failed then perform the same actions as when a + * BA with status equals to 0 is received. + * If a registration or de-registration to the CN failed then + * the error is logged, no further action is taken. + * If dynamic home agent address discovery already has been + * done then take the next entry in the list. If its just one + * entry in the list discard it and send a BU with destination + * address equals to Home Agents anycast address. + * Ret value: 0 Everything is OK. + * IPPROTO_DONE Error code used when something went wrong. + ****************************************************************************** + */ +int +mip6_ba_error(src, dst, bind_addr, hr_flag) +struct in6_addr *src; /* Src address for received BA option */ +struct in6_addr *dst; /* Dst address for received BA option */ +struct in6_addr *bind_addr; /* Binding addr in BU causing this error */ +u_int8_t hr_flag; /* Home reg flag in BU causing this error */ +{ + struct mip6_bul *bulp; /* New BUL entry*/ + struct mip6_esm *esp; /* Home address entry */ + struct in6_addr *dst_addr; + struct mip6_bu_data bu_data; /* Data used when a BU is created */ + u_int32_t lifetime; + int error, max_index; + + if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_UNSPEC) { + /* Reason unspecified + Received when either a Home Agent or Correspondent Node + was not able to process the BU. */ + log(LOG_INFO, + "\nBinding Acknowledgement error = %d " + "(Reason unspecified) from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + } else if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_PROHIBIT) { + /* Administratively prohibited */ + log(LOG_INFO, + "\nBinding Acknowledgement error = %d " + "(Administratively prohibited) from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + log(LOG_INFO, "Contact your system administrator\n"); + } else if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_RESOURCE) { + /* Insufficient resources + Received when a Home Agent receives a BU with the H-bit + set and insufficient space exist or can be reclaimed + (sec. 8.7). */ + log(LOG_INFO, + "\nBinding Acknowledgement error = %d " + "(Insufficient resources) from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + } else if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_HOMEREGNOSUP) { + /* Home registration not supported + Received when a primary care-of address registration + (sec. 9.3) is done and the node is not a router + implementing Home Agent functionality. */ + log(LOG_INFO, + "\nBinding Acknowledgement error = %d " + "(Home registration not supported) from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + } else if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_SUBNET) { + /* Not home subnet + Received when a primary care-of address registration + (sec. 9.3) is done and the home address for the binding + is not an on-link IPv6 address with respect to the Home + Agent's current prefix list. */ + log(LOG_INFO, + "\nBinding Acknowledgement error = %d " + "(Not home subnet) from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + } else if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_DHAAD) { + /* Dynamic Home Agent Address Discovery + Received when a Mobile Node is trying to find out the + global address of the home agents on its home subnetwork + (sec 9.2). */ + error = mip6_rec_hal(src, dst, mip6_inp->hal); + return error; + } else if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_IFLEN) { + /* Incorrect subnet prefix length + Received when a primary care-of address registration + (sec. 9.3) is done and the prefix length in the BU + differs from the length of the home agent's own knowledge + of the subnet prefix length on the home link. */ + log(LOG_INFO, + "\nBinding Acknowledgement error = %d " + "(Incorrect subnet prefix length) from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + } else if (mip6_inp->ba_opt->status == MIP6_BA_STATUS_NOTHA) { + /* Not Home Agent for this Mobile Node + Received when a primary care-of address de-registration + (sec. 9.4) is done and the Home Agent has no entry for + this mobil node marked as "home registration" in its + Binding Cache. */ + log(LOG_INFO, + "\nBinding Acknowledgement error = %d " + "(Not Home Agent for this Mobile Node) from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + } else { + log(LOG_INFO, + "\nBinding Acknowledgement error = %d (Unknown) " + "from host %s\n", + mip6_inp->ba_opt->status, ip6_sprintf(src)); + } + + /* Furthr processing according to the desription in the header. */ + if (hr_flag) { + esp = mip6_esm_find(bind_addr); + if (esp == NULL) { + log(LOG_ERR, + "%s: No event-state machine found\n", + __FUNCTION__); + return IPPROTO_DONE; + } + + /* If it's a de-registration, clear up the ESM. */ + if (esp->state == MIP6_STATE_DEREG) { + /* Remove the tunnel for the MN */ + mip6_tunnel(NULL, NULL, MIP6_TUNNEL_DEL, + MIP6_NODE_MN, (void *)esp); + + /* Send BU to each entry (CN) in the BUL to remove + the BC entry. */ + mip6_update_cns(&esp->home_addr, &esp->home_addr, + 0, 0); + mip6_outq_flush(); + + /* Don't set the state until BUs have been sent + to all CNs, otherwise the Home Address option + will not be added for the outgoing packet. */ + esp->state = MIP6_STATE_HOME; + esp->coa = in6addr_any; + return 0; + } + + /* If it's a registration, perform dynamic home agent address + discovery or use the existing. */ + if (esp->dad) { + if (esp->dad->hal->len == IP6OPT_HALEN) { + if (esp->dad->hal) + _FREE(esp->dad->hal, M_TEMP); + _FREE(esp->dad, M_TEMP); + + /* Build an anycast address */ + mip6_build_ha_anycast(&esp->ha_hn, + &esp->home_addr, + esp->prefix_len); + if (IN6_IS_ADDR_UNSPECIFIED(&esp->ha_hn)) { + log(LOG_ERR, + "%s: Could not create anycast " + "address for Mobile Node, " + "wrong prefix length\n", + __FUNCTION__); + return IPPROTO_DONE; + } + dst_addr = &esp->ha_hn; + lifetime = mip6_config.hr_lifetime; + } else { + dst_addr = &esp->dad->hal->halist[esp->dad->index]; + max_index = (esp->dad->hal->len / IP6OPT_HALEN) - 1; + if (esp->dad->index == max_index) + esp->dad->index = 0; + else + esp->dad->index += 1; + lifetime = MIP6_BU_LIFETIME_DHAAD; + } + } else { + /* Build an anycast address */ + mip6_build_ha_anycast(&esp->ha_hn, &esp->home_addr, + esp->prefix_len); + if (IN6_IS_ADDR_UNSPECIFIED(&esp->ha_hn)) { + log(LOG_ERR, + "%s: Could not create anycast address for Mobile " + "Node, wrong prefix length\n", __FUNCTION__); + return IPPROTO_DONE; + } + dst_addr = &esp->ha_hn; + lifetime = mip6_config.hr_lifetime; + } + + /* Create a new BUL entry and send a BU to the Home Agent */ + bulp = mip6_bul_create(dst_addr, &esp->home_addr, &esp->coa, + lifetime, 1); + if (bulp == NULL) + return IPPROTO_DONE; + + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 1; + + if (mip6_send_bu(bulp, &bu_data, NULL) != 0) + return IPPROTO_DONE; + } + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_prefix_lifetime + * Description: Decide the remaining valid lifetime for a home address. Search + * the prefix list for a match and use this lifetime value. + * Note: This function is used by the MN since no test of the on-link + * flag is done. + * Ret value: Lifetime + ****************************************************************************** + */ +u_int32_t +mip6_prefix_lifetime(addr) +struct in6_addr *addr; /* IPv6 address to check */ +{ + struct nd_prefix *pr; /* Entries in the prexix list */ + u_int32_t min_time; /* Minimum life time */ + + min_time = 0xffffffff; + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if (in6_are_prefix_equal(addr, &pr->ndpr_prefix.sin6_addr, + pr->ndpr_plen)) { + return pr->ndpr_vltime; + } + } + return min_time; +} + + + +/* + ****************************************************************************** + * Function: mip6_create_retrans + * Description: Removes the current content of the bulp->state variable and + * allocates new memory. + * Ret value: Pointer to the allocated memory or NULL. + ****************************************************************************** + */ +struct mip6_retrans * +mip6_create_retrans(bulp) +struct mip6_bul *bulp; +{ + if (bulp == NULL) + return NULL; + + mip6_clear_retrans(bulp); + bulp->state = (struct mip6_retrans *)MALLOC( + sizeof(struct mip6_retrans), + M_TEMP, M_WAITOK); + if (bulp->state == NULL) + return NULL; + bzero(bulp->state, sizeof(struct mip6_retrans)); + + bulp->state->bu_opt = NULL; + bulp->state->bu_subopt = NULL; + bulp->state->ba_timeout = 2; + bulp->state->time_left = 2; + return bulp->state; +} + + + +/* + ****************************************************************************** + * Function: mip6_clear_retrans + * Description: Removes the current content of the bulp->state variable and + * sets it to NULL. + * Ret value: - + ****************************************************************************** + */ +void +mip6_clear_retrans(bulp) +struct mip6_bul *bulp; +{ + if (bulp == NULL) + return; + + if (bulp->state) { + if (bulp->state->bu_opt) + _FREE(bulp->state->bu_opt, M_TEMP); + if (bulp->state->bu_subopt) + _FREE(bulp->state->bu_subopt, M_TEMP); + _FREE(bulp->state, M_TEMP); + bulp->state = NULL; + } + return; +} + + + +/* + ############################################################################## + # + # LIST FUNCTIONS + # The Mobile Node maintains a Bindig Update List (BUL) for each node to which + # a BU has been sent. + # Besides from this a list of event-state machines, one for each home address + # is handled by the Mobile Node and the Correspondent Node since it may + # become mobile at any time. + # An output queue for piggybacking of options (BU, BA, BR) on the first + # outgoing packet sent to the node is also maintained. If the option has not + # been sent with a packet within MIP6_OUTQ_LIFETIME it will be sent in a + # separate packet. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_bul_find + * Description: Find a Binding Update List entry for which a matching can be + * found for both the destination and binding address. + * If variable dst_addr is NULL an entry for home registration + * will be searched for. + * Ret value: Pointer to Binding Update List entry or NULL + ****************************************************************************** + */ +struct mip6_bul * +mip6_bul_find(dst_addr, bind_addr) +struct in6_addr *dst_addr; /* Destination Address for Binding Update */ +struct in6_addr *bind_addr; /* Home Address for MN or previous COA */ +{ + struct mip6_bul *bulp; /* Entry in the Binding Update list */ + + if (dst_addr == NULL) { + for (bulp = mip6_bulq; bulp; bulp = bulp->next) { + if (IN6_ARE_ADDR_EQUAL(bind_addr, &bulp->bind_addr) && + (bulp->hr_flag)) + break; + } + } else { + for (bulp = mip6_bulq; bulp; bulp = bulp->next) { + if (IN6_ARE_ADDR_EQUAL(dst_addr, &bulp->dst_addr) && + IN6_ARE_ADDR_EQUAL(bind_addr, &bulp->bind_addr)) + break; + } + if (bulp != NULL) + return bulp; + + /* It might be that the dest address for the BU was the Home + Agent anycast address and in that case we try to find it. */ + for (bulp = mip6_bulq; bulp; bulp = bulp->next) { + if ((bulp->dst_addr.s6_addr8[15] & 0x7f) == + MIP6_ADDR_ANYCAST_HA && + IN6_ARE_ADDR_EQUAL(bind_addr, &bulp->bind_addr)) { + break; + } + } + } + return bulp; +} + + + + +/* + ****************************************************************************** + * Function: mip6_bul_create + * Description: Create a new Binding Update List entry and insert it as the + * first entry in the list. + * Ret value: Pointer to Binding Update List entry or NULL. + * Note: If the BUL timeout function has not been started it is started. + * The BUL timeout function will be called once every second until + * there are no more entries in the BUL. + ****************************************************************************** + */ +struct mip6_bul * +mip6_bul_create(dst_addr, bind_addr, coa, lifetime, hr) +struct in6_addr *dst_addr; /* Dst address for Binding Update */ +struct in6_addr *bind_addr; /* Home Address for MN or previous COA */ +struct in6_addr *coa; /* Primary COA for MN */ +u_int32_t lifetime; /* Lifetime for BU */ +u_int8_t hr; /* Home registration flag */ +{ + struct mip6_bul *bulp; /* New Binding Update list entry */ + int s; + + bulp = (struct mip6_bul *)MALLOC(sizeof(struct mip6_bul), + M_TEMP, M_WAITOK); + if (bulp == NULL) + return NULL; + bzero(bulp, sizeof(struct mip6_bul)); + + bulp->next = NULL; + bulp->dst_addr = *dst_addr; + bulp->bind_addr = *bind_addr; + bulp->coa = *coa; + bulp->lifetime = lifetime; + bulp->refreshtime = lifetime; + bulp->seqno = 0; + bulp->lasttime = 0; + bulp->no_of_sent_bu = 0; + bulp->state = NULL; + bulp->bu_flag = 1; + bulp->hr_flag = hr; + bulp->update_rate = MIP6_MAX_UPDATE_RATE; + + /* Insert the entry as the first entry in the BUL. */ + s = splnet(); + if (mip6_bulq == NULL) { + mip6_bulq = bulp; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_bul_handle = +#endif + timeout(mip6_timer_bul, (void *)0, hz); + } else { + bulp->next = mip6_bulq; + mip6_bulq = bulp; + } + splx(s); + +#if MIP6_DEBUG + mip6_debug("\nBinding Update List Entry created (0x%x)\n", bulp); + mip6_debug("Destination Address: %s\n", ip6_sprintf(&bulp->dst_addr)); + mip6_debug("Binding Address: %s\n", ip6_sprintf(&bulp->bind_addr)); + mip6_debug("Care-of Address: %s\n", ip6_sprintf(&bulp->coa)); + mip6_debug("Life/Refresh time: %u / %u\n", bulp->lifetime, + bulp->refreshtime); + mip6_debug("Seq no/Home reg: %u / ", bulp->seqno); + if (bulp->hr_flag) + mip6_debug("TRUE\n"); + else + mip6_debug("FALSE\n"); +#endif + return bulp; +} + + + +/* + ****************************************************************************** + * Function: mip6_bul_delete + * Description: Delete the requested Binding Update list entry. + * Ret value: Ptr to next entry in list or NULL if last entry removed. + ****************************************************************************** + */ +struct mip6_bul * +mip6_bul_delete(bul_remove) +struct mip6_bul *bul_remove; /* BUL entry to be deleted */ +{ + struct mip6_bul *bulp; /* Current entry in the BU list */ + struct mip6_bul *bulp_prev; /* Previous entry in the BU list */ + struct mip6_bul *bulp_next; /* Next entry in the BU list */ + int s; + + /* Find the requested entry in the BUL. */ + s = splnet(); + bulp_next = NULL; + bulp_prev = NULL; + for (bulp = mip6_bulq; bulp; bulp = bulp->next) { + bulp_next = bulp->next; + if (bulp == bul_remove) { + if (bulp_prev == NULL) + mip6_bulq = bulp->next; + else + bulp_prev->next = bulp->next; +#if MIP6_DEBUG + mip6_debug("\nBU List Entry deleted (0x%x)\n", bulp); +#endif + mip6_clear_retrans(bulp); + _FREE(bulp, M_TEMP); + + /* Remove the timer if the BUL queue is empty */ + if (mip6_bulq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_bul, (void *)NULL, + mip6_timer_bul_handle); + callout_handle_init(&mip6_timer_bul_handle); +#else + untimeout(mip6_timer_bul, (void *)NULL); +#endif + } + break; + } + bulp_prev = bulp; + } + splx(s); + return bulp_next; +} + + + +/* + ****************************************************************************** + * Function: mip6_esm_find + * Description: Find an event-state machine for which the Mobile Nodes home + * address matches and the type is correct. + * Ret value: Pointer to event-state machine entry or NULL + ****************************************************************************** + */ +struct mip6_esm * +mip6_esm_find(home_addr) +struct in6_addr *home_addr; /* MNs home address */ +{ + struct mip6_esm *esp; + + for (esp = mip6_esmq; esp; esp = esp->next) { + if (IN6_ARE_ADDR_EQUAL(home_addr, &esp->home_addr)) + return esp; + } + return NULL; +} + + + +/* + ****************************************************************************** + * Function: mip6_esm_create + * Description: Create an event-state machine entry and add it first to the + * list. If type is PERMANENT the lifetime will be set to 0xFFFF, + * otherwise it will be set to the specified lifetime. If type is + * TEMPORARY the timer will be started if not already started. + * Ret value: Pointer to an event-state machine or NULL. + ****************************************************************************** + */ +struct mip6_esm * +mip6_esm_create(ifp, ha_hn, coa, home_addr, prefix_len, state, + type, lifetime) +struct ifnet *ifp; /* Physical i/f used by this home address */ +struct in6_addr *ha_hn; /* Home agent address (home network) */ +struct in6_addr *coa; /* Current care-of address */ +struct in6_addr *home_addr; /* Home address */ +u_int8_t prefix_len; /* Prefix length for the home address */ +int state; /* State of the home address */ +enum esm_type type; /* Permanent or Temporary esm */ +u_int16_t lifetime; /* Lifetime for event-state machine */ +{ + struct mip6_esm *esp, *esp_tmp; + int start_timer, s; + + esp = (struct mip6_esm *)MALLOC(sizeof(struct mip6_esm), + M_TEMP, M_WAITOK); + if (esp == NULL) { + log(LOG_ERR, + "%s: Could not create an event-state machine\n", + __FUNCTION__); + return NULL; + } + bzero(esp, sizeof(struct mip6_esm)); + + esp->next = NULL; + esp->ifp = ifp; + esp->ep = NULL; + esp->state = state; + esp->type = type; + esp->home_addr = *home_addr; + esp->prefix_len = prefix_len; + esp->ha_hn = *ha_hn; + esp->coa = *coa; + esp->ha_fn = NULL; + esp->dad = NULL; + + if (type == PERMANENT) { + esp->lifetime = 0xFFFF; + start_timer = 0; + } else { + esp->lifetime = lifetime; + start_timer = 1; + } + + /* If no TEMPORARY already exist and the new is TEMPORARY, start + the timer. */ + for (esp_tmp = mip6_esmq; esp_tmp; esp_tmp = esp_tmp->next) { + if (esp_tmp->type == TEMPORARY) + start_timer = 0; + } + + /* Insert entry as the first entry in the event-state machine list */ + s = splnet(); + if (mip6_esmq == NULL) + mip6_esmq = esp; + else { + esp->next = mip6_esmq; + mip6_esmq = esp; + } + splx(s); + + if (start_timer) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_esm_handle = +#endif + timeout(mip6_timer_esm, (void *)0, hz); + } + return esp; +} + + + +/* + ****************************************************************************** + * Function: mip6_esm_delete + * Description: Delete the requested event-state machine. + * Ret value: Ptr to next entry in list or NULL if last entry removed. + ****************************************************************************** + */ +struct mip6_esm * +mip6_esm_delete(esm_remove) +struct mip6_esm *esm_remove; /* Event-state machine to be deleted */ +{ + struct mip6_esm *esp; /* Current entry in event-state list */ + struct mip6_esm *esp_prev; /* Previous entry in event-state list */ + struct mip6_esm *esp_next; /* Next entry in the event-state list */ + int s; + + /* Find the requested entry in the event-state list. */ + s = splnet(); + esp_next = NULL; + esp_prev = NULL; + for (esp = mip6_esmq; esp; esp = esp->next) { + esp_next = esp->next; + if (esp == esm_remove) { + if (esp_prev == NULL) + mip6_esmq = esp->next; + else + esp_prev->next = esp->next; + + mip6_tunnel(NULL, NULL, MIP6_TUNNEL_DEL, MIP6_NODE_MN, + (void *)esp); + + if (esp->dad) { + if (esp->dad->hal) + _FREE(esp->dad->hal, M_TEMP); + _FREE(esp->dad, M_TEMP); + } + + if (esp->ha_fn) { + _FREE(esp->ha_fn, M_TEMP); + esp->ha_fn = NULL; + } + +#if MIP6_DEBUG + mip6_debug("\nEvent-state machine deleted (0x%x)\n", + esp); +#endif + _FREE(esp, M_TEMP); + + /* Remove the timer if the ESM queue is empty */ + if (mip6_esmq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_esm, (void *)NULL, + mip6_timer_esm_handle); + callout_handle_init(&mip6_timer_esm_handle); +#else + untimeout(mip6_timer_esm, (void *)NULL); +#endif + } + break; + } + esp_prev = esp; + } + splx(s); + return esp_next; +} + + + +/* + ****************************************************************************** + * Function: mip6_outq_create + * Description: Add an entry to the output queue and store the destination + * option and the sub-option (if present) to this entry. + * Ret value: 0 Everything is OK + * Otherwise appropriate error code + * Note: If the outqueue timeout function has not been started it is + * started. The outqueue timeout function will be called once + * every MIP6_OUTQ_INTERVAL second until there are no more entries + * in the list. + ****************************************************************************** + */ +int +mip6_outq_create(opt, subbuf, src_addr, dst_addr, flag) +void *opt; /* Destination option (BU, BR or BA) */ +struct mip6_subbuf *subbuf; /* Buffer containing destination sub-options */ +struct in6_addr *src_addr; /* Source address for the option */ +struct in6_addr *dst_addr; /* Destination address for the option */ +enum send_state flag; /* Flag indicating the state of the entry */ +{ + struct mip6_output *outp; /* Pointer to output list entry */ + int s; + + outp = (struct mip6_output *)MALLOC(sizeof(struct mip6_output), + M_TEMP, M_WAITOK); + if (outp == NULL) + return ENOBUFS; + bzero(outp, sizeof(struct mip6_output)); + + outp->next = NULL; + outp->opt = opt; + outp->subopt = subbuf; + outp->ip6_dst = *dst_addr; + outp->ip6_src = *src_addr; + outp->flag = flag; + outp->lifetime = MIP6_OUTQ_LIFETIME; + + s = splnet(); + if (mip6_outq == NULL) { + mip6_outq = outp; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_outqueue_handle = +#endif + timeout(mip6_timer_outqueue, (void *)0, + hz * (MIP6_OUTQ_INTERVAL/10)); + } else { + /* Add this entry as the first entry in the queue. */ + outp->next = mip6_outq; + mip6_outq = outp; + } + splx(s); + return 0; +} + + + +/* + ****************************************************************************** + * Function: mip6_outq_delete + * Description: Delete the requested output queue entry. + * Ret value: Ptr to next entry in list or NULL if last entry removed. + ****************************************************************************** + */ +struct mip6_output * +mip6_outq_delete(oqp_remove) +struct mip6_output *oqp_remove; /* Output queue entry to be deleted */ +{ + struct mip6_output *oqp; /* Current entry in output queue */ + struct mip6_output *oqp_prev; /* Previous entry in output queue */ + struct mip6_output *oqp_next; /* Next entry in the output queue */ + int s; + + /* Find the requested entry in the output queue. */ + s = splnet(); + oqp_next = NULL; + oqp_prev = NULL; + for (oqp = mip6_outq; oqp; oqp = oqp->next) { + oqp_next = oqp->next; + if (oqp == oqp_remove) { + if (oqp_prev == NULL) + mip6_outq = oqp->next; + else + oqp_prev->next = oqp->next; + + if (oqp->opt) + _FREE(oqp->opt, M_TEMP); + + if (oqp->subopt) + _FREE(oqp->subopt, M_TEMP); + +#if MIP6_DEBUG + mip6_debug("\nOutput Queue entry deleted (0x%x)\n", + oqp); +#endif + _FREE(oqp, M_TEMP); + + /* Remove the timer if the output queue is empty */ + if (mip6_outq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_outqueue, (void *)NULL, + mip6_timer_outqueue_handle); + callout_handle_init( + &mip6_timer_outqueue_handle); +#else + untimeout(mip6_timer_outqueue, (void *)NULL); +#endif + } + break; + } + oqp_prev = oqp; + } + splx(s); + return oqp_next; +} + + + +/* + ****************************************************************************** + * Function: mip6_outq_flush + * Description: All entries in the output queue that have not been sent are + * sent and then removed. No consideration of the time left for + * the entry is taken. + * Ret value: - + * XXX The code is almost the same as in mip6_timer_outqueue + ****************************************************************************** + */ +void +mip6_outq_flush() +{ + struct mip6_output *outp; /* Ptr to current mip6 output element */ + struct ip6_pktopts *pktopt; /* Packet Ext headers, options and data */ + struct mip6_opt *opt; /* Destination option */ + struct mbuf *m_ip6; /* IPv6 header stored in a mbuf */ + int error; /* Error code from function call */ + int off; /* Offset from start of DH (byte) */ + int s; + + /* Go through the entire output queue and send all packets that + have not been sent. */ + s = splnet(); + for (outp = mip6_outq; outp;) { + if (outp->flag == NOT_SENT) { + m_ip6 = mip6_create_ip6hdr(&outp->ip6_src, + &outp->ip6_dst, + IPPROTO_NONE); + if (m_ip6 == NULL) { + outp = outp->next; + continue; + } + + /* Allocate packet extension header. */ + pktopt = (struct ip6_pktopts *) + MALLOC(sizeof(struct ip6_pktopts), + M_TEMP, M_WAITOK); + if (pktopt == NULL) { + _FREE(m_ip6, M_TEMP); + outp = outp->next; + continue; + } + bzero(pktopt, sizeof(struct ip6_pktopts)); + pktopt->ip6po_hlim = -1; /* -1 use def hop limit */ + + opt = (struct mip6_opt *)outp->opt; + off = 2; + if (opt->type == IP6OPT_BINDING_UPDATE) { + /* Add my BU option to the Dest Header */ + error = mip6_add_bu(&pktopt->ip6po_dest2, + &off, + (struct mip6_opt_bu *) + outp->opt, + outp->subopt); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + outp = outp->next; + continue; + } + } else if (opt->type == IP6OPT_BINDING_ACK) { + /* Add my BA option to the Dest Header */ + error = mip6_add_ba(&pktopt->ip6po_dest2, + &off, + (struct mip6_opt_ba *) + outp->opt, + outp->subopt); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + outp = outp->next; + continue; + } + } else if (opt->type == IP6OPT_BINDING_REQ) { + /* Add my BR option to the Dest Header */ + error = mip6_add_br(&pktopt->ip6po_dest2, + &off, + (struct mip6_opt_br *) + outp->opt, + outp->subopt); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + outp = outp->next; + continue; + } + } + + /* Disable the search of the output queue to make + sure that we not end up in an infinite loop. */ + mip6_config.enable_outq = 0; + error = ip6_output(m_ip6, pktopt, NULL, 0, NULL, NULL); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + mip6_config.enable_outq = 1; + outp = outp->next; + log(LOG_ERR, + "%s: ip6_output function failed, " + "error = %d\n", __FUNCTION__, error); + continue; + } + mip6_config.enable_outq = 1; + outp->flag = SENT; +#if MIP6_DEBUG + mip6_debug("\nEntry from Output Queue sent\n"); +#endif + } + + /* Remove entry from the queue that has been sent. */ + if (outp->flag == SENT) + outp = mip6_outq_delete(outp); + else + outp = outp->next; + + /* Remove the timer if the output queue is empty */ + if (mip6_outq == NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + untimeout(mip6_timer_outqueue, (void *)NULL, + mip6_timer_outqueue_handle); + callout_handle_init(&mip6_timer_outqueue_handle); +#else + untimeout(mip6_timer_outqueue, (void *)NULL); +#endif + } + } + splx(s); +} + + + +/* + ############################################################################## + # + # TIMER FUNCTIONS + # These functions are called at regular basis. They operate on the lists, e.g. + # reducing timer counters and removing entries from the list if needed. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_timer_outqueue + * Description: Search the outqueue for entries that have not been sent yet and + * for which the lifetime has expired. + * If there are more entries left in the output queue, call this + * fuction again every MIP6_OUTQ_INTERVAL until the queue is + * empty. + * Ret value: - + ****************************************************************************** + */ +void +mip6_timer_outqueue(arg) +void *arg; /* Not used */ +{ + struct mip6_output *outp; /* Ptr to current mip6 output element */ + struct ip6_pktopts *pktopt; /* Packet Ext headers, options and data */ + struct mip6_opt *opt; /* Destination option */ + struct mbuf *m_ip6; /* IPv6 header stored in a mbuf */ + int error; /* Error code from function call */ + int off; /* Offset from start of DH (byte) */ + +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + /* Go through the entire output queue and send all packets that + have not been sent. */ + for (outp = mip6_outq; outp;) { + if (outp->flag == NOT_SENT) + outp->lifetime -= MIP6_OUTQ_INTERVAL; + + if ((outp->flag == NOT_SENT) && (outp->lifetime <= 0)) { + m_ip6 = mip6_create_ip6hdr(&outp->ip6_src, + &outp->ip6_dst, + IPPROTO_NONE); + if (m_ip6 == NULL) { + outp = outp->next; + continue; + } + + /* Allocate packet extension header. */ + pktopt = (struct ip6_pktopts *) + MALLOC(sizeof(struct ip6_pktopts), + M_TEMP, M_WAITOK); + if (pktopt == NULL) { + _FREE(m_ip6, M_TEMP); + outp = outp->next; + continue; + } + bzero(pktopt, sizeof(struct ip6_pktopts)); + pktopt->ip6po_hlim = -1; /* -1 default hop limit */ + + opt = (struct mip6_opt *)outp->opt; + off = 2; + if (opt->type == IP6OPT_BINDING_UPDATE) { + /* Add my BU option to the Dest Header */ + error = mip6_add_bu(&pktopt->ip6po_dest2, + &off, + (struct mip6_opt_bu *) + outp->opt, + outp->subopt); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + outp = outp->next; + continue; + } + } else if (opt->type == IP6OPT_BINDING_ACK) { + /* Add my BA option to the Dest Header */ + error = mip6_add_ba(&pktopt->ip6po_dest2, + &off, + (struct mip6_opt_ba *) + outp->opt, + outp->subopt); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + outp = outp->next; + continue; + } + } else if (opt->type == IP6OPT_BINDING_REQ) { + /* Add my BR option to the Dest Header */ + error = mip6_add_br(&pktopt->ip6po_dest2, + &off, + (struct mip6_opt_br *) + outp->opt, + outp->subopt); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + outp = outp->next; + continue; + } + } + + /* Disable the search of the output queue to make + sure that we not end up in an infinite loop. */ + mip6_config.enable_outq = 0; + error = ip6_output(m_ip6, pktopt, NULL, 0, NULL, NULL); + if (error) { + _FREE(m_ip6, M_TEMP); + _FREE(pktopt, M_TEMP); + mip6_config.enable_outq = 1; + outp = outp->next; + log(LOG_ERR, + "%s: ip6_output function failed, " + "error = %d\n", __FUNCTION__, error); + continue; + } + mip6_config.enable_outq = 1; + outp->flag = SENT; +#if MIP6_DEBUG + mip6_debug("\nEntry from Output Queue sent\n"); +#endif + } + + /* Remove entry from the queue that has been sent. */ + if (outp->flag == SENT) + outp = mip6_outq_delete(outp); + else + outp = outp->next; + } + + if (mip6_outq != NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_outqueue_handle = +#endif + timeout(mip6_timer_outqueue, (void *)0, + hz * (MIP6_OUTQ_INTERVAL/10)); + } +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + + + +/* + ****************************************************************************** + * Function: mip6_timer_bul + * Description: Search the Binding Update list for entries for which the life- + * time or refresh time has expired. + * If there are more entries left in the output queue, call this + * fuction again once every second until the queue is empty. + * Ret value: - + ****************************************************************************** + */ +void +mip6_timer_bul(arg) +void *arg; /* Not used */ +{ + struct mip6_bul *bulp; /* Ptr to current BUL element */ + struct mip6_bul *new_bulp; /* Pointer to new BUL entry */ + struct mip6_esm *esp; /* Home address entry */ + struct mip6_opt_bu *bu_opt; /* BU option to be sent */ + struct in6_addr *dst_addr; /* Destination address for BU */ + struct mip6_subbuf *subbuf; /* Buffer containing sub-options */ + struct mip6_bu_data bu_data; /* Data used when a BU is created */ + struct mip6_subopt_coa altcoa; /* Alternate care-of address */ + u_int32_t lifetime; + int max_index, s; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) + long time_second = time.tv_sec; +#endif +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + /* Go through the entire BUL and check if any BU have to be sent. */ + subbuf = NULL; + s = splnet(); + for (bulp = mip6_bulq; bulp;) { + /* Find the correct event-state machine */ + esp = mip6_esm_find(&bulp->bind_addr); + if (esp == NULL) { + bulp = bulp->next; + continue; + } + + /* If infinity lifetime, don't decrement it. */ + if (bulp->lifetime == 0xffffffff) { + bulp = bulp->next; + continue; + } + + bulp->lifetime -= 1; + if (bulp->lifetime == 0) { + if ((bulp->hr_flag) && (esp->type == PERMANENT)) { + /* If this BUL entry is for the Home Agent + a new one must be created before the old + is deleted. The new entry shall try to + register the MN again. + This is not done for the previous default + router. */ + if ((esp->state == MIP6_STATE_REG) || + (esp->state == MIP6_STATE_REREG) || + (esp->state == MIP6_STATE_REGNEWCOA) || + (esp->state == MIP6_STATE_NOTREG)) + esp->state = MIP6_STATE_NOTREG; + else if ((esp->state == MIP6_STATE_HOME) || + (esp->state == MIP6_STATE_DEREG)) + esp->state = MIP6_STATE_DEREG; + else + esp->state = MIP6_STATE_UNDEF; + + /* If Dynamic Home Agent Address Discovery, + pick the dst address from the esp->dad list + and set index. */ + if (esp->dad) { + dst_addr = &esp->dad->hal-> + halist[esp->dad->index]; + max_index = (esp->dad->hal->len / + IP6OPT_HALEN) - 1; + if (esp->dad->index == max_index) + esp->dad->index = 0; + else + esp->dad->index += 1; + lifetime = MIP6_BU_LIFETIME_DHAAD; + } else { + dst_addr = &esp->ha_hn; + lifetime = mip6_config.hr_lifetime; + } + + /* Send BU to the decided destination */ + new_bulp = mip6_bul_create(dst_addr, + &esp->home_addr, + &bulp->coa, + lifetime, 1); + if (new_bulp == NULL) + break; + + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 1; + + if (mip6_send_bu(new_bulp, &bu_data, NULL) + != 0) + break; + } + + /* The BUL entry must be deleted. */ + bulp = mip6_bul_delete(bulp); + continue; + } + + if (bulp->refreshtime > 0) + bulp->refreshtime -= 1; + + /* Skip the bul entry if its not allowed to send any further + BUs to the host. */ + if (bulp->bu_flag == 0) { + bulp = bulp->next; + continue; + } + + /* Check if a BU has already been sent to the destination. */ + if (bulp->state != NULL) { + bulp->state->time_left -= 1; + if (bulp->state->time_left == 0) { + if (bulp->hr_flag) { + /* This is a BUL entry for the HA */ + bulp->state->bu_opt->lifetime = + bulp->lifetime; + bulp->state->bu_opt->seqno++; + if (mip6_send_bu(bulp, NULL, NULL) + != 0) + break; + + if (bulp->state->ba_timeout < + MIP6_MAX_BINDACK_TIMEOUT) + bulp->state->ba_timeout = + 2 * bulp->state-> + ba_timeout; + else + bulp->state->ba_timeout = + (u_int8_t)MIP6_MAX_BINDACK_TIMEOUT; + + bulp->state->time_left = bulp->state->ba_timeout; + } else { + /* This is a BUL entry for a Correspondent Node */ + if (bulp->state->ba_timeout >= MIP6_MAX_BINDACK_TIMEOUT) { + /* Do NOT continue to retransmit the BU */ + bulp->no_of_sent_bu = 0; + mip6_clear_retrans(bulp); + } else { + bulp->state->bu_opt->lifetime = bulp->lifetime; + bulp->state->bu_opt->seqno++; + if (mip6_send_bu(bulp, NULL, NULL) != 0) + break; + + bulp->state->ba_timeout = 2 * bulp->state->ba_timeout; + bulp->state->time_left = bulp->state->ba_timeout; + } + } + } + bulp = bulp->next; + continue; + } + + /* Refreshtime has expired and no BU has been sent to the HA + so far. Then we do it. */ + if (bulp->refreshtime == 0) { + /* Store sub-option for BU option. */ + altcoa.type = IP6SUBOPT_ALTCOA; + altcoa.len = IP6OPT_COALEN; + altcoa.coa = bulp->coa; + if (mip6_store_subopt(&subbuf, (caddr_t)&altcoa)) { + if (subbuf) + _FREE(subbuf, M_TEMP); + break; + } + + if (bulp->hr_flag) { + /* Since this is an entry for the Home Agent a new BU + is being sent for which we require the receiver to + respond with a BA. */ + bu_data.prefix_len = esp->prefix_len; + bu_data.ack = 1; + + bulp->lifetime = mip6_config.hr_lifetime; + if (mip6_send_bu(bulp, &bu_data, subbuf) != 0) + break; + } else { + /* This is an entry for a CN that has requested a BU to be + sent when the refreshtime expires. We will NOT require + this BU to be acknowledged. */ + bulp->seqno += 1; + bu_opt = mip6_create_bu(0, 0, 0, bulp->seqno, + mip6_config.hr_lifetime); + if (bu_opt == NULL) + break; + + bulp->lasttime = time_second; + mip6_outq_create(bu_opt, subbuf, &bulp->bind_addr, + &bulp->dst_addr, NOT_SENT); + } + bulp = bulp->next; + continue; + } + bulp = bulp->next; + } + + if (mip6_bulq != NULL) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_bul_handle = +#endif + timeout(mip6_timer_bul, (void *)0, hz); + } + splx(s); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + + + +/* + ****************************************************************************** + * Function: mip6_timer_esm + * Description: This function is called when an event-state machine has been + * created for sending a BU to the previous default router. The + * event-state machine entry is needed for the correct addition + * of the home address option for outgoing packets. + * When the life time for the BU expires the event-state machine + * is removed as well. + * Ret value: - + ****************************************************************************** + */ +void +mip6_timer_esm(arg) +void *arg; /* Not used */ +{ + struct mip6_esm *esp; /* Current event-state machine entry */ + int s, start_timer; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + /* Go through the entire list of event-state machines. */ + s = splnet(); + for (esp = mip6_esmq; esp;) { + if (esp->type == TEMPORARY) { + esp->lifetime -= 1; + + if (esp->lifetime == 0) + esp = mip6_esm_delete(esp); + else + esp = esp->next; + continue; + } + esp = esp->next; + } + + /* Only start the timer if there is a TEMPORARY machine in the list. */ + start_timer = 0; + for (esp = mip6_esmq; esp; esp = esp->next) { + if (esp->type == TEMPORARY) { + start_timer = 1; + break; + } + } + + if (start_timer) { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + mip6_timer_esm_handle = +#endif + timeout(mip6_timer_esm, (void *)0, hz); + } + splx(s); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + + + +/* + ############################################################################## + # + # IOCTL FUNCTIONS + # These functions are called from mip6_ioctl. + # + ############################################################################## + */ + +/* + ****************************************************************************** + * Function: mip6_write_config_data_mn + * Description: This function is called to write certain config values for + * MIPv6. The data is written into the global config structure. + * Ret value: - + ****************************************************************************** + */ +int mip6_write_config_data_mn(u_long cmd, void *arg) +{ + struct mip6_esm *p; + struct ifnet *ifp; + struct mip6_input_data *input; + struct mip6_static_addr *np; + char ifn[10]; + int retval = 0; + struct in6_addr any = in6addr_any; + + switch (cmd) { + case SIOCACOADDR_MIP6: + input = (struct mip6_input_data *) arg; + np = (struct mip6_static_addr *) + MALLOC(sizeof(struct mip6_static_addr), + M_TEMP, M_WAITOK); + if (np == NULL) + return ENOBUFS; + + np->ip6_addr = input->ip6_addr; + np->prefix_len = input->prefix_len; + np->ifp = ifunit(input->if_name); + if (np->ifp == NULL) { + strncpy(ifn, input->if_name, sizeof(ifn)); + return EINVAL; + } + LIST_INSERT_HEAD(&mip6_config.fna_list, np, addr_entry); + break; + + case SIOCAHOMEADDR_MIP6: + input = (struct mip6_input_data *) arg; + ifp = ifunit(input->if_name); + if (ifp == NULL) + return EINVAL; + + p = mip6_esm_create(ifp, &input->ha_addr, &any, + &input->ip6_addr, input->prefix_len, + MIP6_STATE_UNDEF, PERMANENT, 0xFFFF); + if (p == NULL) + return EINVAL; /*XXX*/ + + break; + + case SIOCSBULIFETIME_MIP6: + mip6_config.bu_lifetime = ((struct mip6_input_data *)arg)->value; + break; + + case SIOCSHRLIFETIME_MIP6: + mip6_config.hr_lifetime = ((struct mip6_input_data *)arg)->value; + break; + + case SIOCDCOADDR_MIP6: + input = (struct mip6_input_data *) arg; + for (np = mip6_config.fna_list.lh_first; np != NULL; + np = np->addr_entry.le_next){ + if (IN6_ARE_ADDR_EQUAL(&input->ip6_addr, &np->ip6_addr)) + break; + } + if (np == NULL){ + retval = EADDRNOTAVAIL; + return retval; + } + LIST_REMOVE(np, addr_entry); + break; + } + return retval; +} + + + +/* + ****************************************************************************** + * Function: mip6_clear_config_data_mn + * Description: This function is called to clear internal lists handled by + * MIPv6. + * Ret value: - + ****************************************************************************** + */ +int mip6_clear_config_data_mn(u_long cmd, caddr_t data) +{ + int retval = 0; + int s; + + struct mip6_static_addr *np; + struct mip6_bul *bulp; + + s = splnet(); + switch (cmd) { + case SIOCSFORADDRFLUSH_MIP6: + for (np = LIST_FIRST(&mip6_config.fna_list); np; + np = LIST_NEXT(np, addr_entry)) { + LIST_REMOVE(np, addr_entry); + } + break; + + case SIOCSHADDRFLUSH_MIP6: + retval = EINVAL; + break; + + case SIOCSBULISTFLUSH_MIP6: + for (bulp = mip6_bulq; bulp;) + bulp = mip6_bul_delete(bulp); + break; + } + splx(s); + return retval; +} + + + +/* + ****************************************************************************** + * Function: mip6_enable_func_mn + * Description: This function is called to enable or disable certain functions + * in mip6. The data is written into the global config struct. + * Ret value: - + ****************************************************************************** + */ +int mip6_enable_func_mn(u_long cmd, caddr_t data) +{ + int enable; + int retval = 0; + + enable = ((struct mip6_input_data *)data)->value; + + switch (cmd) { + case SIOCSPROMMODE_MIP6: + mip6_config.enable_prom_mode = enable; + break; + + case SIOCSBU2CN_MIP6: + mip6_config.enable_bu_to_cn = enable; + break; + + case SIOCSREVTUNNEL_MIP6: + mip6_config.enable_rev_tunnel = enable; + break; + + case SIOCSAUTOCONFIG_MIP6: + mip6_config.autoconfig = enable; + break; + + case SIOCSEAGERMD_MIP6: + mip6_eager_md(enable); + break; + } + return retval; +} diff --git a/bsd/netinet6/mld6.c b/bsd/netinet6/mld6.c new file mode 100644 index 000000000..6117d50bc --- /dev/null +++ b/bsd/netinet6/mld6.c @@ -0,0 +1,494 @@ +/* $KAME: mld6.c,v 1.17 2000/03/01 12:37:25 itojun Exp $ */ + +/* + * Copyright (C) 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1988 Stephen Deering. + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Stephen Deering of Stanford University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)igmp.c 8.1 (Berkeley) 7/19/93 + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +/* + * Protocol constants + */ + +/* denotes that the MLD max response delay field specifies time in milliseconds */ +#define MLD6_TIMER_SCALE 1000 +/* + * time between repetitions of a node's initial report of interest in a + * multicast address(in seconds) + */ +#define MLD6_UNSOLICITED_REPORT_INTERVAL 10 + +static struct ip6_pktopts ip6_opts; +static int mld6_timers_are_running; +/* XXX: These are necessary for KAME's link-local hack */ +static struct in6_addr mld6_all_nodes_linklocal = IN6ADDR_LINKLOCAL_ALLNODES_INIT; +static struct in6_addr mld6_all_routers_linklocal = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; + +static void mld6_sendpkt __P((struct in6_multi *, int, const struct in6_addr *)); + +void +mld6_init() +{ + static u_int8_t hbh_buf[8]; + struct ip6_hbh *hbh = (struct ip6_hbh *)hbh_buf; + u_int16_t rtalert_code = htons((u_int16_t)IP6OPT_RTALERT_MLD); + + mld6_timers_are_running = 0; + + /* ip6h_nxt will be fill in later */ + hbh->ip6h_len = 0; /* (8 >> 3) - 1 */ + + /* XXX: grotty hard coding... */ + hbh_buf[2] = IP6OPT_PADN; /* 2 byte padding */ + hbh_buf[3] = 0; + hbh_buf[4] = IP6OPT_RTALERT; + hbh_buf[5] = IP6OPT_RTALERT_LEN - 2; + bcopy((caddr_t)&rtalert_code, &hbh_buf[6], sizeof(u_int16_t)); + + ip6_opts.ip6po_hbh = hbh; + /* We will specify the hoplimit by a multicast option. */ + ip6_opts.ip6po_hlim = -1; +} + +void +mld6_start_listening(in6m) + struct in6_multi *in6m; +{ +#ifdef __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + + /* + * RFC2710 page 10: + * The node never sends a Report or Done for the link-scope all-nodes + * address. + * MLD messages are never sent for multicast addresses whose scope is 0 + * (reserved) or 1 (node-local). + */ + mld6_all_nodes_linklocal.s6_addr16[1] = + htons(in6m->in6m_ifp->if_index); /* XXX */ + if (IN6_ARE_ADDR_EQUAL(&in6m->in6m_addr, &mld6_all_nodes_linklocal) || + IPV6_ADDR_MC_SCOPE(&in6m->in6m_addr) < IPV6_ADDR_SCOPE_LINKLOCAL) { + in6m->in6m_timer = 0; + in6m->in6m_state = MLD6_OTHERLISTENER; + } else { + mld6_sendpkt(in6m, MLD6_LISTENER_REPORT, NULL); + in6m->in6m_timer = MLD6_RANDOM_DELAY( + MLD6_UNSOLICITED_REPORT_INTERVAL * PR_FASTHZ); + in6m->in6m_state = MLD6_IREPORTEDLAST; + mld6_timers_are_running = 1; + } + splx(s); +} + +void +mld6_stop_listening(in6m) + struct in6_multi *in6m; +{ + mld6_all_nodes_linklocal.s6_addr16[1] = + htons(in6m->in6m_ifp->if_index); /* XXX */ + mld6_all_routers_linklocal.s6_addr16[1] = + htons(in6m->in6m_ifp->if_index); /* XXX: necessary when mrouting */ + + if (in6m->in6m_state == MLD6_IREPORTEDLAST && + (!IN6_ARE_ADDR_EQUAL(&in6m->in6m_addr, &mld6_all_nodes_linklocal)) && + IPV6_ADDR_MC_SCOPE(&in6m->in6m_addr) > IPV6_ADDR_SCOPE_NODELOCAL) + mld6_sendpkt(in6m, MLD6_LISTENER_DONE, + &mld6_all_routers_linklocal); +} + +void +mld6_input(m, off) + struct mbuf *m; + int off; +{ + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + struct mld6_hdr *mldh; + struct ifnet *ifp = m->m_pkthdr.rcvif; + struct in6_multi *in6m; + struct in6_ifaddr *ia; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + struct ifmultiaddr *ifma; +#endif + int timer; /* timer value in the MLD query header */ + + /* source address validation */ + if (!IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_src)) { + log(LOG_ERR, + "mld6_input: src %s is not link-local\n", + ip6_sprintf(&ip6->ip6_src)); + /* + * spec (RFC2710) does not explicitly + * specify to discard the packet from a non link-local + * source address. But we believe it's expected to do so. + */ + m_freem(m); + return; + } + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(*mldh),); + mldh = (struct mld6_hdr *)(mtod(m, caddr_t) + off); +#else + IP6_EXTHDR_GET(mldh, struct mld6_hdr *, m, off, sizeof(*mldh)); + if (mldh == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } +#endif + + /* + * In the MLD6 specification, there are 3 states and a flag. + * + * In Non-Listener state, we simply don't have a membership record. + * In Delaying Listener state, our timer is running (in6m->in6m_timer) + * In Idle Listener state, our timer is not running (in6m->in6m_timer==0) + * + * The flag is in6m->in6m_state, it is set to MLD6_OTHERLISTENER if + * we have heard a report from another member, or MLD6_IREPORTEDLAST + * if we sent the last report. + */ + switch(mldh->mld6_type) { + case MLD6_LISTENER_QUERY: + if (ifp->if_flags & IFF_LOOPBACK) + break; + + if (!IN6_IS_ADDR_UNSPECIFIED(&mldh->mld6_addr) && + !IN6_IS_ADDR_MULTICAST(&mldh->mld6_addr)) + break; /* print error or log stat? */ + if (IN6_IS_ADDR_MC_LINKLOCAL(&mldh->mld6_addr)) + mldh->mld6_addr.s6_addr16[1] = + htons(ifp->if_index); /* XXX */ + + /* + * - Start the timers in all of our membership records + * that the query applies to for the interface on + * which the query arrived excl. those that belong + * to the "all-nodes" group (ff02::1). + * - Restart any timer that is already running but has + * A value longer than the requested timeout. + * - Use the value specified in the query message as + * the maximum timeout. + */ + IFP_TO_IA6(ifp, ia); + if (ia == NULL) + break; + + /* + * XXX: System timer resolution is too low to handle Max + * Response Delay, so set 1 to the internal timer even if + * the calculated value equals to zero when Max Response + * Delay is positive. + */ + timer = ntohs(mldh->mld6_maxdelay)*PR_FASTHZ/MLD6_TIMER_SCALE; + if (timer == 0 && mldh->mld6_maxdelay) + timer = 1; + mld6_all_nodes_linklocal.s6_addr16[1] = + htons(ifp->if_index); /* XXX */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) +#else + for (in6m = ia->ia6_multiaddrs.lh_first; + in6m; + in6m = in6m->in6m_entry.le_next) +#endif + { +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + if (ifma->ifma_addr->sa_family != AF_INET6) + continue; + in6m = (struct in6_multi *)ifma->ifma_protospec; + if (IN6_ARE_ADDR_EQUAL(&in6m->in6m_addr, + &mld6_all_nodes_linklocal) || + IPV6_ADDR_MC_SCOPE(&in6m->in6m_addr) < + IPV6_ADDR_SCOPE_LINKLOCAL) + continue; +#else + if (IN6_ARE_ADDR_EQUAL(&in6m->in6m_addr, + &mld6_all_nodes_linklocal) || + IPV6_ADDR_MC_SCOPE(&in6m->in6m_addr) < + IPV6_ADDR_SCOPE_LINKLOCAL) + continue; +#endif + + if (IN6_IS_ADDR_UNSPECIFIED(&mldh->mld6_addr) || + IN6_ARE_ADDR_EQUAL(&mldh->mld6_addr, + &in6m->in6m_addr)) + { + if (timer == 0) { + /* send a report immediately */ + mld6_sendpkt(in6m, MLD6_LISTENER_REPORT, + NULL); + in6m->in6m_timer = 0; /* reset timer */ + in6m->in6m_state = MLD6_IREPORTEDLAST; + } + else if (in6m->in6m_timer == 0 || /*idle state*/ + in6m->in6m_timer > timer) { + in6m->in6m_timer = + MLD6_RANDOM_DELAY(timer); + mld6_timers_are_running = 1; + } + } + } + + if (IN6_IS_ADDR_MC_LINKLOCAL(&mldh->mld6_addr)) + mldh->mld6_addr.s6_addr16[1] = 0; /* XXX */ + break; + case MLD6_LISTENER_REPORT: + /* + * For fast leave to work, we have to know that we are the + * last person to send a report for this group. Reports + * can potentially get looped back if we are a multicast + * router, so discard reports sourced by me. + * Note that it is impossible to check IFF_LOOPBACK flag of + * ifp for this purpose, since ip6_mloopback pass the physical + * interface to looutput. + */ + if (m->m_flags & M_LOOP) /* XXX: grotty flag, but efficient */ + break; + + if (!IN6_IS_ADDR_MULTICAST(&mldh->mld6_addr)) + break; + + if (IN6_IS_ADDR_MC_LINKLOCAL(&mldh->mld6_addr)) + mldh->mld6_addr.s6_addr16[1] = + htons(ifp->if_index); /* XXX */ + /* + * If we belong to the group being reported, stop + * our timer for that group. + */ + IN6_LOOKUP_MULTI(mldh->mld6_addr, ifp, in6m); + if (in6m) { + in6m->in6m_timer = 0; /* transit to idle state */ + in6m->in6m_state = MLD6_OTHERLISTENER; /* clear flag */ + } + + if (IN6_IS_ADDR_MC_LINKLOCAL(&mldh->mld6_addr)) + mldh->mld6_addr.s6_addr16[1] = 0; /* XXX */ + break; + default: /* this is impossible */ + log(LOG_ERR, "mld6_input: illegal type(%d)", mldh->mld6_type); + break; + } + + m_freem(m); +} + +void +mld6_fasttimeo() +{ + register struct in6_multi *in6m; + struct in6_multistep step; + int s; + + /* + * Quick check to see if any work needs to be done, in order + * to minimize the overhead of fasttimo processing. + */ + if (!mld6_timers_are_running) + return; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + mld6_timers_are_running = 0; + IN6_FIRST_MULTI(step, in6m); + while (in6m != NULL) { + if (in6m->in6m_timer == 0) { + /* do nothing */ + } else if (--in6m->in6m_timer == 0) { + mld6_sendpkt(in6m, MLD6_LISTENER_REPORT, NULL); + in6m->in6m_state = MLD6_IREPORTEDLAST; + } else { + mld6_timers_are_running = 1; + } + IN6_NEXT_MULTI(step, in6m); + } + splx(s); +} + +static void +mld6_sendpkt(in6m, type, dst) + struct in6_multi *in6m; + int type; + const struct in6_addr *dst; +{ + struct mbuf *mh, *md; + struct mld6_hdr *mldh; + struct ip6_hdr *ip6; + struct ip6_moptions im6o; + struct in6_ifaddr *ia; + struct ifnet *ifp = in6m->in6m_ifp; + struct ifnet *outif = NULL; + + /* + * At first, find a link local address on the outgoing interface + * to use as the source address of the MLD packet. + */ + if ((ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST)) + == NULL) + return; + + /* + * Allocate mbufs to store ip6 header and MLD header. + * We allocate 2 mbufs and make chain in advance because + * it is more convenient when inserting the hop-by-hop option later. + */ + MGETHDR(mh, M_DONTWAIT, MT_HEADER); + if (mh == NULL) + return; + MGET(md, M_DONTWAIT, MT_DATA); + if (md == NULL) { + m_free(mh); + return; + } + mh->m_next = md; + + mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld6_hdr); + mh->m_len = sizeof(struct ip6_hdr); + MH_ALIGN(mh, sizeof(struct ip6_hdr)); + + /* fill in the ip6 header */ + ip6 = mtod(mh, struct ip6_hdr *); + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + /* ip6_plen will be set later */ + ip6->ip6_nxt = IPPROTO_ICMPV6; + /* ip6_hlim will be set by im6o.im6o_multicast_hlim */ + ip6->ip6_src = ia->ia_addr.sin6_addr; + ip6->ip6_dst = dst ? *dst : in6m->in6m_addr; + + /* fill in the MLD header */ + md->m_len = sizeof(struct mld6_hdr); + mldh = mtod(md, struct mld6_hdr *); + mldh->mld6_type = type; + mldh->mld6_code = 0; + mldh->mld6_cksum = 0; + /* XXX: we assume the function will not be called for query messages */ + mldh->mld6_maxdelay = 0; + mldh->mld6_reserved = 0; + mldh->mld6_addr = in6m->in6m_addr; + if (IN6_IS_ADDR_MC_LINKLOCAL(&mldh->mld6_addr)) + mldh->mld6_addr.s6_addr16[1] = 0; /* XXX */ + mldh->mld6_cksum = in6_cksum(mh, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), + sizeof(struct mld6_hdr)); + + /* construct multicast option */ + bzero(&im6o, sizeof(im6o)); + im6o.im6o_multicast_ifp = ifp; + im6o.im6o_multicast_hlim = 1; + + /* + * Request loopback of the report if we are acting as a multicast + * router, so that the process-level routing daemon can hear it. + */ + im6o.im6o_multicast_loop = (ip6_mrouter != NULL); + + /* increment output statictics */ + icmp6stat.icp6s_outhist[type]++; + + ip6_output(mh, &ip6_opts, NULL, 0, &im6o, &outif); + if (outif) { + icmp6_ifstat_inc(outif, ifs6_out_msg); + switch(type) { + case MLD6_LISTENER_QUERY: + icmp6_ifstat_inc(outif, ifs6_out_mldquery); + break; + case MLD6_LISTENER_REPORT: + icmp6_ifstat_inc(outif, ifs6_out_mldreport); + break; + case MLD6_LISTENER_DONE: + icmp6_ifstat_inc(outif, ifs6_out_mlddone); + break; + } + } +} diff --git a/bsd/netinet6/mld6_var.h b/bsd/netinet6/mld6_var.h new file mode 100644 index 000000000..a9f467e4b --- /dev/null +++ b/bsd/netinet6/mld6_var.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET6_MLD6_VAR_H_ +#define _NETINET6_MLD6_VAR_H_ + +#if KERNEL + +#define MLD6_RANDOM_DELAY(X) (random() % (X) + 1) + +/* + * States for MLD stop-listening processing + */ +#define MLD6_OTHERLISTENER 0 +#define MLD6_IREPORTEDLAST 1 + +void mld6_init __P((void)); +void mld6_input __P((struct mbuf *, int)); +void mld6_start_listening __P((struct in6_multi *)); +void mld6_stop_listening __P((struct in6_multi *)); +void mld6_fasttimeo __P((void)); +#endif /* KERNEL */ + +#endif /* _NETINET6_MLD6_VAR_H_ */ diff --git a/bsd/netinet6/natpt_defs.h b/bsd/netinet6/natpt_defs.h new file mode 100644 index 000000000..39fa41c4f --- /dev/null +++ b/bsd/netinet6/natpt_defs.h @@ -0,0 +1,319 @@ +/* $KAME: natpt_defs.h,v 1.7 2000/03/25 07:23:54 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#define SAME (0) + +#define NATPT_MAXHASH (397) +#define MAXTSLOTENTRY (4096) + +#define SZSIN6 sizeof(struct sockaddr_in6) +#define SZSIN sizeof(struct sockaddr_in) + +#define CAR(p) ((p)->car) +#define CDR(p) ((p)->cdr) +#define CAAR(p) (CAR(CAR(p))) +#define CADR(p) (CAR(CDR(p))) +#define CDAR(p) (CDR(CAR(p))) +#define CDDR(p) (CDR(CDR(p))) + +#ifndef TCP6 +#define tcp6hdr tcphdr +#endif + + +#if defined(NATPT_ASSERT) && (NATPT_ASSERT != 0) +# if defined(__STDC__) +# define ASSERT(e) ((e) ? (void)0 : natpt_assert(__FILE__, __LINE__, #e)) +# else /* PCC */ +# define ASSERT(e) ((e) ? (void)0 : natpt_assert(__FILE__, __LINE__, "e")) +# endif +#else +# undef NATPT_ASSERT +# define ASSERT(e) ((void)0) +#endif + + +#define IN4_ARE_ADDR_EQUAL(a, b) \ + ((a)->s_addr == (b)->s_addr) + + +#define ReturnEnobufs(m) if (m == NULL) { errno = ENOBUFS; return (NULL); } + + +#if (defined(KERNEL)) || (defined(_KERNEL)) + +#define isDebug(d) (natpt_debug & (d)) +#define isDump(d) (natpt_dump & (d)) + +#define D_DIVEIN4 0x00000001 +#define D_PEEKOUTGOINGV4 0x00000002 +#define D_TRANSLATINGIPV4 0x00000010 +#define D_TRANSLATEDIPV4 0x00001000 + +#define D_DIVEIN6 0x00010000 +#define D_IN6REJECT 0x00020000 +#define D_IN6ACCEPT 0x00040000 +#define D_PEEKOUTGOINGV6 0x00080000 +#define D_TRANSLATINGIPV6 0x00100000 +#define D_TRANSLATEDIPV6 0x01000000 + +#define fixSuMiReICMPBug (1) + +#ifdef fixSuMiReICMPBug +#define IPDST (0xc48db2cb) /* == 203.178.141.196 XXX */ +#define ICMPSRC (0x02c410ac) /* == 172.16.196.2 XXX */ +#endif + +#endif /* defined(KERNEL) */ + +/* + * OS dependencies + */ + +#ifdef KERNEL + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#define rcb_list list +#endif + +#ifdef __NetBSD__ +/* + * Macros for type conversion + * dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX) + */ +#define dtom(x) ((struct mbuf *)((long)(x) & ~(MSIZE-1))) +#endif + +#endif /* _KERNEL */ + + +/* + * Structure definitions. + */ + +typedef struct _cell +{ + struct _cell *car; + struct _cell *cdr; +} Cell; + + +/* Interface Box structure */ + +struct ifBox +{ + int side; +#define noSide (0) +#define inSide (1) +#define outSide (2) + char ifName[IFNAMSIZ]; + struct ifnet *ifnet; +}; + + +/* IP ... */ + +struct _cv /* 28[byte] */ +{ + u_char ip_p; /* IPPROTO_(ICMP[46]|TCP|UDP) */ + u_char ip_payload; /* IPPROTO_(ICMP|TCP|UDP) */ + + u_char inout; +/* #define NATPT_UNSPEC (0) */ +/* #define NATPT_INBOUND (1) */ +/* #define NATPT_OUTBOUND (2) */ + + u_char flags; +#define NATPT_TRACEROUTE (0x01) +#define NATPT_NEEDFRAGMENT (0x02) + + int poff; /* payload offset */ + int plen; /* payload length */ + + struct mbuf *m; + struct _tSlot *ats; + union + { + struct ip *_ip4; + struct ip6_hdr *_ip6; + } _ip; + union + { + caddr_t _caddr; + struct icmp *_icmp4; + struct icmp6_hdr *_icmp6; + struct tcphdr *_tcp4; + struct tcp6hdr *_tcp6; + struct udphdr *_udp; + } _payload; +}; + + +/* IP address structure */ + +union inaddr /* sizeof(): 16[byte] */ +{ + struct in_addr in4; + struct in6_addr in6; +}; + + +struct pAddr /* sizeof(): 44[byte] */ +{ + u_char ip_p; /* protocol family (within struct _tSlot) */ + u_char sa_family; /* address family (within struct _cSlot) */ + + u_short port[2]; +#define _port0 port[0] +#define _port1 port[1] + +#define _sport port[0] +#define _dport port[1] +#define _eport port[1] + + union inaddr addr[2]; + +#define in4src addr[0].in4 +#define in4dst addr[1].in4 +#define in4Addr addr[0].in4 +#define in4Mask addr[1].in4 +#define in4RangeStart addr[0].in4 +#define in4RangeEnd addr[1].in4 + +#define in6src addr[0].in6 +#define in6dst addr[1].in6 +#define in6Addr addr[0].in6 +#define in6Mask addr[1].in6 + + struct + { + u_char type; +#define ADDR_ANY (0) +#define ADDR_SINGLE (1) +#define ADDR_MASK (2) +#define ADDR_RANGE (3) +#define ADDR_FAITH (4) + + u_char prefix; + } ad; +}; + + +/* Configuration slot entry */ + +struct _cSlot /* sizeof(): 100[byte] */ +{ + u_char flags; +#define NATPT_STATIC (1) /* Rule was set statically */ +#define NATPT_DYNAMIC (2) /* Rule was set dynamically */ +#define NATPT_FAITH (3) + + u_char dir; +#define NATPT_UNSPEC (0) +#define NATPT_INBOUND (1) +#define NATPT_OUTBOUND (2) + + u_char map; +#define NATPT_PORT_MAP (0x01) /* Mapping dest port */ +#define NATPT_PORT_MAP_DYNAMIC (0x02) /* Mapping dest port dynamically */ +#define NATPT_ADDR_MAP (0x04) /* Mapping dest addr */ +#define NATPT_ADDR_MAP_DYNAMIC (0x08) /* Mapping dest addr dynamically */ + + u_char proto; + + u_short prefix; + u_short cport; /* current port */ + + struct pAddr local, remote; + struct _cSlotAux *aux; /* place holder */ +}; + + +#if 0 +/* Configuration slot auxiliary entry */ +/* currently not used */ + +struct _cSlotAux /* sizeof(): 0[byte] */ +{ +}; +#endif + + +/* Translation slot entry */ + +struct _tSlot /* sizeof(): 104[byte] */ +{ + u_char ip_payload; + + u_char session; +/* #define NATPT_UNSPEC (0) */ +/* #define NATPT_INBOUND (1) */ +/* #define NATPT_OUTBOUND (2) */ + + u_char remap; +/* #define NATPT_PORT_REMAP (0x01) */ +/* #define NATPT_ADDR_REMAP (0x02) */ + +/* #define NATPT_STATIC (0x1) */ +/* #define NATPT_DYNAMIC (0x2) */ +/* #define NATPT_FAITH (0x3) */ + + struct pAddr local; + struct pAddr remote; + time_t tstamp; + int lcount; + + union + { + struct _idseq + { + n_short icd_id; + n_short icd_seq; + } ih_idseq; + struct _tcpstate *tcp; + } suit; +}; + + +struct _tcpstate /* sizeof(): 28[byte] */ +{ + short _state; + short _session; + u_long _ip_id[2]; /* IP packet Identification */ + /* [0]: current packet */ + /* [1]: just before packet */ + u_short _port[2]; /* [0]:outGoing srcPort, [1]:inComing dstPort */ +/* u_long _iss; initial send sequence number */ + u_long _delta[3]; /* Sequence delta */ + /* [0]: current (cumulative) */ + /* [1]: just before (cumulative) */ + /* [2]: (this time) */ +}; diff --git a/bsd/netinet6/natpt_dispatch.c b/bsd/netinet6/natpt_dispatch.c new file mode 100644 index 000000000..22d775f3f --- /dev/null +++ b/bsd/netinet6/natpt_dispatch.c @@ -0,0 +1,718 @@ +/* $KAME: natpt_dispatch.c,v 1.9 2000/03/25 07:23:54 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#ifdef __FreeBSD__ +# include +#endif + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + + +/* + * + */ + +u_int natpt_debug; +u_int natpt_dump; + +static struct _cell *ifBox; + +struct ifnet *natpt_ip6src; + +struct in6_addr faith_prefix + = {{{0x00000000, 0x00000000, 0x00000000, 0x00000000}}}; +struct in6_addr faith_prefixmask + = {{{0x00000000, 0x00000000, 0x00000000, 0x00000000}}}; +struct in6_addr natpt_prefix + = {{{0x00000000, 0x00000000, 0x00000000, 0x00000000}}}; +struct in6_addr natpt_prefixmask + = {{{0x00000000, 0x00000000, 0x00000000, 0x00000000}}}; + +int natpt_in4 __P((struct mbuf *, struct mbuf **)); +int natpt_in6 __P((struct mbuf *, struct mbuf **)); +int natpt_out4 __P((struct mbuf *, struct mbuf **)); +int natpt_out6 __P((struct mbuf *, struct mbuf **)); +int natpt_incomingIPv4 __P((int, struct mbuf *, struct mbuf **)); +int natpt_outgoingIPv4 __P((int, struct mbuf *, struct mbuf **)); +int natpt_incomingIPv6 __P((int, struct mbuf *, struct mbuf **)); +int natpt_outgoingIPv6 __P((int, struct mbuf *, struct mbuf **)); + +int configCv4 __P((int, struct mbuf *, struct _cv *)); +int configCv6 __P((int, struct mbuf *, struct _cv *)); +caddr_t foundFinalPayload __P((struct mbuf *, int *, int *)); +int sanityCheckIn4 __P((struct _cv *)); +int sanityCheckOut6 __P((struct _cv *)); +int checkMTU __P((struct _cv *)); + + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static MALLOC_DEFINE(M_NATPT, "NATPT", "Network Address Translation - Protocol Translation"); +#endif + + +/* + * + */ + +int +natpt_in4(struct mbuf *m4, struct mbuf **m6) +{ + Cell *p; + struct ifnet *ifnet; + struct ifBox *ifb; + int rv = IPPROTO_IP; + + if (natpt_initialized == 0) + return (IPPROTO_IP); /* goto ours */ + + if (isDump(D_DIVEIN4)) + natpt_logMBuf(LOG_DEBUG, m4, "dive into natpt_in4."); + + ifnet = m4->m_pkthdr.rcvif; + for (p = ifBox; p; p = CDR(p)) + { + ifb = (struct ifBox *)CAR(p); + if (ifb->ifnet == ifnet) + { + if (ifb->side == outSide) + rv = natpt_incomingIPv4(NATPT_INBOUND, m4, m6); + else + rv = natpt_outgoingIPv4(NATPT_OUTBOUND, m4, m6); + goto exit; + } + } + + exit:; + return (rv); +} + + +int +natpt_in6(struct mbuf *m6, struct mbuf **m4) +{ + Cell *p; + struct ifnet *ifnet; + struct ifBox *ifb; + struct ip6_hdr *ip6; + struct in6_addr cand; + int rv = IPPROTO_IP; + + if (natpt_initialized == 0) + return (IPPROTO_IP); /* goto mcastcheck */ + + if (isDump(D_DIVEIN6)) + natpt_logMBuf(LOG_DEBUG, m6, "dive into natpt_in6."); + + ip6 = mtod(m6, struct ip6_hdr *); + + cand.s6_addr32[0] = ip6->ip6_dst.s6_addr32[0] & natpt_prefixmask.s6_addr32[0]; + cand.s6_addr32[1] = ip6->ip6_dst.s6_addr32[1] & natpt_prefixmask.s6_addr32[1]; + cand.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] & natpt_prefixmask.s6_addr32[2]; + cand.s6_addr32[3] = ip6->ip6_dst.s6_addr32[3] & natpt_prefixmask.s6_addr32[3]; + + if ((cand.s6_addr32[0] != natpt_prefix.s6_addr32[0]) + || (cand.s6_addr32[1] != natpt_prefix.s6_addr32[1]) + || (cand.s6_addr32[2] != natpt_prefix.s6_addr32[2]) + || (cand.s6_addr32[3] != natpt_prefix.s6_addr32[3])) + { + if (isDump(D_IN6REJECT)) + natpt_logMBuf(LOG_DEBUG, m6, "v6 translation rejected."); + + return (IPPROTO_IP); /* goto mcastcheck */ + } + + if (isDump(D_IN6ACCEPT)) + natpt_logMBuf(LOG_DEBUG, m6, "v6 translation start."); + + ifnet = m6->m_pkthdr.rcvif; + for (p = ifBox; p; p = CDR(p)) + { + ifb = (struct ifBox *)CAR(p); + if (ifb->ifnet == ifnet) + { + if (ifb->side == outSide) + rv = natpt_incomingIPv6(NATPT_INBOUND, m6, m4); + else + rv = natpt_outgoingIPv6(NATPT_OUTBOUND, m6, m4); + goto exit; + } + } + + exit:; + return (rv); +} + + +int +natpt_out4(struct mbuf *m4, struct mbuf **m6) +{ + Cell *p; + struct ifnet *ifnet; + struct ifBox *ifb; + int rv = IPPROTO_IP; + + ifnet = m4->m_pkthdr.rcvif; + for (p = ifBox; p; p = CDR(p)) + { + ifb = (struct ifBox *)CAR(p); + if (ifb->ifnet == ifnet) + { + if (ifb->side == outSide) + rv = natpt_outgoingIPv4(NATPT_OUTBOUND, m4, m6); + else + rv = natpt_incomingIPv4(NATPT_INBOUND, m4, m6); + goto exit; + } + } + + exit:; + return (rv); +} + + + +int +natpt_out6(struct mbuf *m6, struct mbuf **m4) +{ + Cell *p; + struct ifnet *ifnet; + struct ifBox *ifb; + int rv = IPPROTO_IP; + + ifnet = m6->m_pkthdr.rcvif; + for (p = ifBox; p; p = CDR(p)) + { + ifb = (struct ifBox *)CAR(p); + if (ifb->ifnet == ifnet) + { + if (ifb->side == outSide) + rv = natpt_outgoingIPv6(NATPT_OUTBOUND, m6, m4); + else + rv = natpt_incomingIPv6(NATPT_INBOUND, m6, m4); + goto exit; + } + } + + exit:; + return (rv); +} + + +int +natpt_incomingIPv4(int sess, struct mbuf *m4, struct mbuf **m6) +{ + int rv; + struct _cv cv; + struct _cSlot *acs; + struct _tSlot *ats; + + if ((rv = configCv4(sess, m4, &cv)) == IPPROTO_MAX) + return (IPPROTO_MAX); /* discard this packet */ + + if ((rv = sanityCheckIn4(&cv)) != IPPROTO_IPV4) + return (IPPROTO_DONE); /* discard this packet without free */ + + cv.ats = lookingForIncomingV4Hash(&cv); + if ((ats = checkTraceroute6Return(&cv)) != NULL) + cv.ats = ats; + + if (cv.ats == NULL) + { + if ((acs = lookingForIncomingV4Rule(&cv)) == NULL) + return (IPPROTO_IP); /* goto ours */ + + if ((cv.ats = internIncomingV4Hash(sess, acs, &cv)) == NULL) + return (IPPROTO_IP); /* goto ours */ + } + + if (checkMTU(&cv) != IPPROTO_IPV4) + return (IPPROTO_DONE); /* discard this packet without free */ + +#ifdef NATPT_NAT + if (cv.ats->local.sa_family == AF_INET) + { + if ((*m6 = translatingIPv4To4(&cv, &cv.ats->local)) != NULL) + return (IPPROTO_IPV4); + } + else +#endif + { + if ((*m6 = translatingIPv4To6(&cv, &cv.ats->local)) != NULL) + return (IPPROTO_IPV6); + } + + return (IPPROTO_MAX); /* discard this packet */ +} + + +int +natpt_outgoingIPv4(int sess, struct mbuf *m4, struct mbuf **m6) +{ + int rv; + struct _cv cv; + struct _cSlot *acs; + struct ip *ip4; + + if ((rv = configCv4(sess, m4, &cv)) == IPPROTO_MAX) + return (IPPROTO_MAX); /* discard this packet */ + + if ((cv.ats = lookingForOutgoingV4Hash(&cv)) == NULL) + { + if ((acs = lookingForOutgoingV4Rule(&cv)) == NULL) + return (IPPROTO_IP); /* goto ours */ + + ip4 = mtod(m4, struct ip *); + if (ip4->ip_ttl <= IPTTLDEC) + { + n_long dest = 0; + + icmp_error(m4, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); + return (IPPROTO_MAX); /* discard this packet */ + } + + if ((cv.ats = internOutgoingV4Hash(sess, acs, &cv)) == NULL) + return (IPPROTO_IP); /* goto ours */ + } + +#ifdef NATPT_NAT + if (cv.ats->remote.sa_family == AF_INET) + { + if ((*m6 = translatingIPv4To4(&cv, &cv.ats->remote)) != NULL) + return (IPPROTO_IPV4); + } + else +#endif + { + if ((*m6 = translatingIPv4To6(&cv, &cv.ats->remote)) != NULL) + return (IPPROTO_IPV6); + } + + return (IPPROTO_MAX); /* discard this packet */ +} + + +int +natpt_incomingIPv6(int sess, struct mbuf *m6, struct mbuf **m4) +{ + int rv; + struct _cv cv; + struct _cSlot *acs; + struct ip6_hdr *ip6; + + rv = configCv6(sess, m6, &cv); + if ((rv == IPPROTO_IP) || (rv == IPPROTO_MAX) || (rv == IPPROTO_DONE)) + return (rv); + + if ((cv.ats = lookingForIncomingV6Hash(&cv)) == NULL) + { + if ((acs = lookingForIncomingV6Rule(&cv)) == NULL) + return (IPPROTO_IP); /* goto mcastcheck */ + + ip6 = mtod(m6, struct ip6_hdr *); + if (ip6->ip6_hlim <= IPV6_HLIMDEC) + { + icmp6_error(m6, ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_TRANSIT, 0); + return (IPPROTO_MAX); /* discard this packet */ + } + + if ((cv.ats = internIncomingV6Hash(sess, acs, &cv)) == NULL) + return (IPPROTO_IP); /* goto mcastcheck */ + } + + if ((*m4 = translatingIPv6To4(&cv, &cv.ats->local)) != NULL) + return (IPPROTO_IPV4); + + return (IPPROTO_MAX); /* discard this packet */ +} + + +int +natpt_outgoingIPv6(int sess, struct mbuf *m6, struct mbuf **m4) +{ + int rv; + struct _cv cv6; + struct _cSlot *acs; + + rv = configCv6(sess, m6, &cv6); + if ((rv == IPPROTO_IP) || (rv == IPPROTO_MAX) || (rv == IPPROTO_DONE)) + return (rv); + + if ((rv = sanityCheckOut6(&cv6)) != IPPROTO_IPV6) + return (IPPROTO_DONE); /* discard this packet */ + + if (isDump(D_PEEKOUTGOINGV6)) + natpt_logIp6(LOG_DEBUG, cv6._ip._ip6); + + if ((cv6.ats = lookingForOutgoingV6Hash(&cv6)) == NULL) + { + if ((acs = lookingForOutgoingV6Rule(&cv6)) == NULL) + return (IPPROTO_IP); /* goto mcastcheck */ + + if ((cv6.ats = internOutgoingV6Hash(sess, acs, &cv6)) == NULL) + return (IPPROTO_IP); /* goto mcastcheck */ + } + + if ((*m4 = translatingIPv6To4(&cv6, &cv6.ats->remote)) != NULL) + return (IPPROTO_IPV4); + + return (IPPROTO_MAX); /* discard this packet */ +} + + +int +configCv4(int sess, struct mbuf *m, struct _cv *cv) +{ + struct ip *ip = mtod(m, struct ip *); + + bzero(cv, sizeof(struct _cv)); + cv->ip_p = ip->ip_p; + cv->m = m; + cv->_ip._ip4 = ip; + cv->inout = sess; + + switch (ip->ip_p) + { + case IPPROTO_ICMP: + case IPPROTO_TCP: + case IPPROTO_UDP: + cv->ip_payload = ip->ip_p; + cv->_payload._caddr = (caddr_t)((u_long *)ip + ip->ip_hl); + cv->poff = cv->_payload._caddr - (caddr_t)cv->_ip._ip4; + cv->plen = (caddr_t)m->m_data + m->m_len - cv->_payload._caddr; + return (ip->ip_p); + } + + return (IPPROTO_MAX); +} + + +int +configCv6(int sess, struct mbuf *m, struct _cv *cv) +{ + int proto; + int offset; + caddr_t tcpudp; + + bzero(cv, sizeof(struct _cv)); + cv->m = m; + cv->_ip._ip6 = mtod(m, struct ip6_hdr *); + cv->inout = sess; + + if ((tcpudp = foundFinalPayload(m, &proto, &offset))) + { + switch (proto) + { + case IPPROTO_ICMP: + case IPPROTO_ICMPV6: + case IPPROTO_TCP: + case IPPROTO_UDP: + cv->ip_p = proto; + cv->ip_payload = proto; + if (proto == IPPROTO_ICMPV6) + cv->ip_payload = IPPROTO_ICMP; + cv->_payload._caddr = tcpudp; + cv->poff = offset; + cv->plen = (caddr_t)m->m_data + m->m_len - cv->_payload._caddr; + return (proto); + } + } + + return (proto); +} + + +caddr_t +foundFinalPayload(struct mbuf *m, int *proto, int *offset) +{ + int nxt; + int off; + struct ip6_hdr *ip6; + struct ip6_ext *ip6ext; + + ip6 = mtod(m, struct ip6_hdr *); + nxt = ip6->ip6_nxt; + off = sizeof(struct ip6_hdr); + ip6ext = (struct ip6_ext *)((struct ip6_hdr *)(ip6 + 1)); + while (nxt != IPPROTO_NONE && off + sizeof(*ip6ext) < m->m_len) + { + switch (nxt) + { + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: +#if 0 + case IPPROTO_FRAGMENT: +#endif + case IPPROTO_DSTOPTS: + nxt = ip6ext->ip6e_nxt; + off += ip6ext->ip6e_len; + ip6ext = (struct ip6_ext *)(((caddr_t)ip6ext) + ip6ext->ip6e_len); + break; + + case IPPROTO_ICMPV6: + case IPPROTO_TCP: + case IPPROTO_UDP: + *proto = nxt; + *offset = off; + return ((caddr_t)ip6ext); + + default: + *proto = IPPROTO_IP; /* goto mcastcheck */ + *offset = off; + return (NULL); + } + } + + *proto = IPPROTO_IP; /* goto mcastcheck */ + *offset = off; + return (NULL); +} + + +int +sanityCheckIn4(struct _cv *cv4) +{ + struct mbuf *m4 = cv4->m; + struct ip *ip4 = mtod(m4, struct ip *); + + if (ip4->ip_ttl <= IPTTLDEC) + { + n_long dest = 0; + + icmp_error(m4, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, dest, 0); + return (IPPROTO_DONE); /* discard this packet without free */ + } + + return (IPPROTO_IPV4); +} + + +int +sanityCheckOut6(struct _cv *cv6) +{ + struct mbuf *m6 = cv6->m; + struct ip6_hdr *ip6 = mtod(m6, struct ip6_hdr *); + + if (ip6->ip6_hlim <= IPV6_HLIMDEC) + { + icmp6_error(m6, ICMP6_TIME_EXCEEDED, ICMP6_TIME_EXCEED_TRANSIT, 0); + return (IPPROTO_DONE); /* discard this packet */ + } + + return (IPPROTO_IPV6); +} + + +int +checkMTU(struct _cv *cv4) +{ + int mmtu; + struct mbuf *m4 = cv4->m; + struct ip *ip4 = mtod(m4, struct ip *); + + mmtu = IPV6_MMTU - sizeof(struct ip6_hdr) - sizeof(struct ip6_frag); + /* This should be 1232[byte] */ + + if ((m4->m_flags & M_PKTHDR) + && (m4->m_pkthdr.len >= mmtu)) + { + if (ip4->ip_off & IP_DF) + { + n_long dest = 0; + struct ifnet destif; + + bzero(&destif, sizeof(struct ifnet)); + destif.if_mtu = mmtu; + +#ifdef fixSuMiReICMPBug + ip4->ip_dst.s_addr = IPDST; /* XXX */ +#endif + + icmp_error(m4, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, dest, &destif); + return (IPPROTO_DONE); /* discard this packet without free */ + } + + cv4->flags |= NATPT_NEEDFRAGMENT; /* fragment, then translate */ + } + + return (IPPROTO_IPV4); +} + + +/* + * + */ + + +struct ifBox * +natpt_asIfBox(char *ifName) +{ + Cell *p; + + for (p = ifBox; p; p = CDR(p)) + { + if (strcmp(ifName, ((struct ifBox *)CAR(p))->ifName) == SAME) + return ((struct ifBox *)CAR(p)); + } + + return (NULL); +} + + +struct ifBox * +natpt_setIfBox(char *ifName) +{ + struct ifnet *p; + struct ifBox *q; + char Wow[IFNAMSIZ]; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (p = ifnet; p; p = p->if_next) +#else + for (p = TAILQ_FIRST(&ifnet); p; p = TAILQ_NEXT(p, if_list)) +#endif + { +#ifdef __NetBSD__ + sprintf(Wow, "%s%c", p->if_xname, '\0'); +#else + sprintf(Wow, "%s%d%c", p->if_name, p->if_unit, '\0'); +#endif + if (strcmp(ifName, Wow) != SAME) + continue; + + natpt_ip6src = p; + + MALLOC(q, struct ifBox *, sizeof(struct ifBox), M_NATPT, M_WAITOK); + bzero(q, sizeof(struct ifBox)); + + q->ifnet = p; +#ifdef __NetBSD__ + sprintf(q->ifName, "%s%c", p->if_xname, '\0'); +#else + sprintf(q->ifName, "%s%d%c", p->if_name, p->if_unit, '\0'); +#endif + + LST_hookup_list((Cell**)&ifBox, q); + return (q); + } + return (NULL); +} + + +/* + * + */ + +void +natpt_debugProbe() +{ + printf("DebugProbe"); +} + + +void +natpt_assert(const char *file, int line, const char *failedexpr) +{ + (void)printf("natpt assertion \"%s\" failed: file \"%s\", line %d\n", + failedexpr, file, line); + panic("natpt assertion"); + /* NOTREACHED */ +} + + +/* + * + */ + +void +natpt_initialize() +{ + struct ifnet *ifn; + struct ifaddr *ifa; + struct ifBox *ibox; + + if (natpt_initialized) + return; + + natpt_initialized = 1; + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifn = ifnet; ifn; ifn = ifn->if_next) +#else + for (ifn = TAILQ_FIRST(&ifnet); ifn; ifn = TAILQ_NEXT(ifn, if_list)) +#endif + { +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifn->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifn->if_addrlist.tqh_first; ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + if (((ifa->ifa_addr->sa_family) == AF_INET) + || ((ifa->ifa_addr->sa_family) == AF_INET6)) + { + MALLOC(ibox, struct ifBox *, sizeof(struct ifBox), M_TEMP, M_WAITOK); +#ifdef __NetBSD__ + sprintf(ibox->ifName, "%s", ifn->if_xname); +#else + sprintf(ibox->ifName, "%s%d", ifn->if_name, ifn->if_unit); +#endif + ibox->ifnet = ifn; + ibox->side = NULL; + LST_hookup_list(&ifBox, ibox); + goto nextif; + } + } + nextif: + } +} diff --git a/bsd/netinet6/natpt_list.c b/bsd/netinet6/natpt_list.c new file mode 100644 index 000000000..6a9fb33aa --- /dev/null +++ b/bsd/netinet6/natpt_list.c @@ -0,0 +1,54 @@ +/* $KAME: natpt_list.c,v 1.6 2000/03/25 07:23:55 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#if (defined(KERNEL)) || (defined(_KERNEL)) +/* #include */ +#include +#include +#include +#include + +#ifdef __FreeBSD__ +# include +#endif + +#include + +#include +#include + +#include + +#define INCLUDE_NATPT_LIST_C 1 +#include + +#endif /* defined(KERNEL) */ diff --git a/bsd/netinet6/natpt_list.h b/bsd/netinet6/natpt_list.h new file mode 100644 index 000000000..10a77de75 --- /dev/null +++ b/bsd/netinet6/natpt_list.h @@ -0,0 +1,254 @@ +/* $KAME: natpt_list.h,v 1.5 2000/03/25 07:23:55 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#define CELL_FREE_MARKER ((Cell *)0xdeadface) +#define CELL_WEIRD_ADDR ((Cell *)0xdeadbeef) + +Cell *LST_cons __P((void *, void *)); +void LST_free __P((Cell *)); +Cell *LST_last __P((Cell *)); +int LST_length __P((Cell *)); +Cell *LST_hookup __P((Cell *, void *)); +Cell *LST_hookup_list __P((Cell **, void *)); +Cell *LST_remove_elem __P((Cell **, void *)); + + +#ifdef INCLUDE_NATPT_LIST_C + +/* + * Typedefs and Miscellaneous definitions + */ + +#ifndef NULL +#define NULL 0 +#endif + +#define CELL_NUMS 64 +#define CELL_PAGE (CELL_NUMS * sizeof(Cell)) + + +/* + * Typedefs and Miscellaneous definitions + */ + +static int _cell_used; +static int _cell_free; +static Cell *_cell_freeList; +static Cell *_cell_mallBlock; + +static Cell *_getCell __P((void)); +static Cell *_getEmptyCell __P((void)); + + +#ifdef KERNEL +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static MALLOC_DEFINE(M_NATPT, "NATPT", "Network Address Translation - Protocol Translation"); +#endif /* defined(__FreeBSD__) && __FreeBSD__ >= 3 */ +#endif /* defined(_KERNEL) */ + + +/* + * + */ + +Cell * +LST_cons(void *c_car, void *c_cdr) +{ + Cell *ptr = NULL; + + ptr = _getCell(); + CAR(ptr) = c_car; + CDR(ptr) = c_cdr; + + _cell_used++; + _cell_free--; + + return (ptr); +} + + +void +LST_free(Cell *cell) +{ + if (CAR(cell) != CELL_FREE_MARKER) + { + CAR(cell) = CELL_FREE_MARKER; + CDR(cell) = _cell_freeList; + _cell_freeList = cell; + + _cell_used--; + _cell_free++; + } +} + + +Cell * +LST_last(Cell *list) +{ + register Cell *ptr = NULL; + + if (list == NULL) + ptr = NULL; + else + for (ptr = list; CDR(ptr) != NULL; ptr = CDR(ptr)) ; + + return (ptr); +} + + +int +LST_length(Cell *list) +{ + register int retval = 0; + + if (list == NULL) + retval = 0; + else + { + register Cell *ptr; + + for (ptr = list; ptr; retval++, ptr = CDR(ptr)) ; + } + + return (retval); +} + + +Cell * +LST_hookup(Cell *list, void *elem) +{ + register Cell *ptr = NULL; + + if (list == NULL) + ptr = LST_cons(elem, NULL); + else + CDR(LST_last(list)) = LST_cons(elem, NULL); + + return (ptr); +} + + +Cell * +LST_hookup_list(Cell **list, void *elem) +{ + register Cell *ptr = NULL; + + if (*list == NULL) + *list = LST_cons(elem, NULL); + else + CDR(LST_last(*list)) = LST_cons(elem, NULL); + + return (ptr); +} + + +Cell * +LST_remove_elem(Cell **list, void *elem) +{ + register Cell *p, *q; + + if (*list == NULL) + return (NULL); + + for (p = *list, q = NULL; p; q = p, p = CDR(p)) + { + if (CAR(p) == elem) + { + if (q == NULL) + *list = CDR(p); + else + CDR(q) = CDR(p); + + LST_free(p); + return (elem); + } + } + + return (NULL); +} + + +/* + * + */ + +static Cell * +_getCell() +{ + Cell *ptr = NULL; + + if (_cell_freeList == NULL) + _cell_freeList = _getEmptyCell(); + + ptr = _cell_freeList; + _cell_freeList = CDR(_cell_freeList); + + return (ptr); +} + + +static Cell * +_getEmptyCell() +{ + register int iter; + register Cell *ptr = NULL; + register Cell *p; + +#if (defined(KERNEL)) || (defined(_KERNEL)) + MALLOC(ptr, Cell *, CELL_PAGE, M_NATPT, M_NOWAIT); +#else + ptr = (Cell *)malloc(CELL_PAGE); +#endif /* defined(KERNEL) */ + if (ptr == NULL) + { + printf("ENOBUFS in _getEmptyCell %d\n", __LINE__); + return (ptr); + } + + CAR(ptr) = (Cell *)ptr; + CDR(ptr) = NULL; + + if (_cell_mallBlock == NULL) + _cell_mallBlock = ptr; + else + CDR(LST_last(_cell_mallBlock)) = ptr; + + ptr++; + for (iter = CELL_NUMS - 2 , p = ptr; iter; iter-- , p++) + CAR(p) = CELL_WEIRD_ADDR, CDR(p) = p + 1; + CAR(p) = CELL_WEIRD_ADDR; + CDR(p) = NULL; + _cell_free += CELL_NUMS - 1; + + return (ptr); +} + +#endif /* defined(INCLUDE_NATPT_LIST_C) */ diff --git a/bsd/netinet6/natpt_log.c b/bsd/netinet6/natpt_log.c new file mode 100644 index 000000000..b7460b969 --- /dev/null +++ b/bsd/netinet6/natpt_log.c @@ -0,0 +1,170 @@ +/* $KAME: natpt_log.c,v 1.6 2000/03/25 07:23:55 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include + + +/* + * + */ + +static struct sockaddr _natpt_dst = {2, PF_INET}; +static struct sockaddr _natpt_src = {2, PF_INET}; + + +struct mbuf *natpt_lbuf __P((int type, int priorities, size_t size)); + + +/* + * + */ + +void +natpt_logMsg(int priorities, void *item, size_t size) +{ + natpt_log(LOG_MSG, priorities, item, size); +} + + +void +natpt_logMBuf(int priorities, struct mbuf *m, char *msg) +{ + if (msg) + natpt_log(LOG_MSG, priorities, (void *)msg, strlen(msg)+1); + natpt_log(LOG_MBUF, priorities, (void *)m->m_data, min(m->m_len, LBFSZ)); +} + + +void +natpt_logIp4(int priorities, struct ip *ip4) +{ + natpt_log(LOG_IP4, priorities, (void *)ip4, sizeof(struct ip)+8); +} + + +void +natpt_logIp6(int priorities, struct ip6_hdr *ip6) +{ + natpt_log(LOG_IP6, priorities, (void *)ip6, sizeof(struct ip6_hdr)+8); +} + + +int +natpt_log(int type, int priorities, void *item, size_t size) +{ + struct sockproto proto; + struct mbuf *m; + struct lbuf *p; + + if ((m = natpt_lbuf(type, priorities, size)) == NULL) + return (ENOBUFS); + + p = (struct lbuf *)m->m_data; + m_copyback(m, sizeof(struct l_hdr), p->l_hdr.lh_size, (caddr_t)item); + + proto.sp_family = AF_INET; + proto.sp_protocol = IPPROTO_AHIP; + natpt_input(m, &proto, &_natpt_src, &_natpt_dst); + + return (0); +} + + +int +natpt_logIN6addr(int priorities, char *msg, struct in6_addr *sin6addr) +{ + int size, msgsz; + struct mbuf *m; + struct lbuf *p; + + msgsz = strlen(msg)+1; + size = sizeof(struct l_hdr) + IN6ADDRSZ + msgsz; + + m = natpt_lbuf(LOG_IN6ADDR, priorities, size); + if (m == NULL) + return (ENOBUFS); + + { + struct sockproto proto; + + p = (struct lbuf *)m->m_pktdat; + bcopy(sin6addr, p->l_addr.in6addr, sizeof(struct in6_addr)); + strncpy(p->l_msg, msg, min(msgsz, MSGSZ-1)); + p->l_msg[MSGSZ-1] = '\0'; + + proto.sp_family = AF_INET; + proto.sp_protocol = IPPROTO_AHIP; + natpt_input(m, &proto, &_natpt_src, &_natpt_dst); + } + + return (0); +} + + +struct mbuf * +natpt_lbuf(int type, int priorities, size_t size) +{ + struct mbuf *m; + struct lbuf *p; + + MGETHDR(m, M_NOWAIT, MT_DATA); + if (m == NULL) + return (NULL); + + m->m_pkthdr.len = m->m_len = MHLEN; + m->m_pkthdr.rcvif = NULL; + + p = (struct lbuf *)m->m_data; + p->l_hdr.lh_type = type; + p->l_hdr.lh_pri = priorities; + p->l_hdr.lh_size = size; + microtime((struct timeval *)&p->l_hdr.lh_sec); + + return (m); +} diff --git a/bsd/netinet6/natpt_log.h b/bsd/netinet6/natpt_log.h new file mode 100644 index 000000000..70014115f --- /dev/null +++ b/bsd/netinet6/natpt_log.h @@ -0,0 +1,129 @@ +/* $KAME: natpt_log.h,v 1.5 2000/03/25 07:23:55 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NATPT_LOG_H +#define _NATPT_LOG_H + + +#if (defined(KERNEL)) || (defined(_KERNEL)) + +/* Header at beginning of logged packet. */ + +struct l_pkt +{ + char ifName[IFNAMSIZ]; + char __buf[4]; +}; + + +/* Header at beginning of active Transration Table */ + +struct l_att +{ + u_int _stub; +#define ATT_ALLOC (0) +#define ATT_REMOVE (1) +#define ATT_FASTEN (2) +#define ATT_UNFASTEN (3) +#define ATT_REGIST (4) + caddr_t _addr; +#if 0 + struct _aTT _att; + struct _tcpstate _state; +#endif +}; +#endif /* defined(KERNEL) */ + + +/* Header at beginning of each lbuf. */ + +#ifndef IN6ADDRSZ +#define IN6ADDRSZ 16 /* IPv6 T_AAAA */ +#define INT16SZ 2 /* for systems without 16-bit ints */ +#endif /* !defined(IN6ADDRSZ) */ + +#define LBFSZ (MHLEN - sizeof(struct l_hdr)) /* LBUF payload within MBUF */ +#define MSGSZ (LBFSZ - IN6ADDRSZ) /* max message size */ + + +enum +{ + LOG_MSG, + LOG_MBUF, + LOG_IP4, + LOG_IP6, + LOG_IN4ADDR, + LOG_IN6ADDR, + LOG_CSLOT, + LOG_TSLOT, + LOG_RULE +}; + + +struct l_hdr +{ + u_short lh_type; /* Type of data in this lbuf */ + u_short lh_pri; /* Priorities of thie message */ + size_t lh_size; /* Amount of data in this lbuf */ + u_long lh_sec; /* Timestamp in second */ + u_long lh_usec; /* Timestamp in microsecond */ +}; + + +struct l_addr +{ + char in6addr[IN6ADDRSZ]; + char __msg[MSGSZ]; +}; + + +/* Definition of whole lbuf */ + +struct lbuf +{ + struct l_hdr l_hdr; + union + { +#ifdef _KERNEL + struct l_pkt l_pkt; + struct l_att l_att; +#endif /* defined(_KERNEL) */ + struct l_addr __laddr; + char __buf[LBFSZ]; + } l_dat; +}; + + +#define l_addr l_dat.__laddr +#define l_msg l_dat.__laddr.__msg + + +#endif /* !_NATPT_LOG_H */ diff --git a/bsd/netinet6/natpt_rule.c b/bsd/netinet6/natpt_rule.c new file mode 100644 index 000000000..d045e40d4 --- /dev/null +++ b/bsd/netinet6/natpt_rule.c @@ -0,0 +1,497 @@ +/* $KAME: natpt_rule.c,v 1.9 2000/03/25 07:23:56 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#if (defined(__FreeBSD__) && __FreeBSD__ < 3) || defined(__bsdi__) +#include +#endif + +#include +#include +#include +#include +#include + + +/* + * + */ + +Cell *natptStatic; /* list of struct _cSlot */ +Cell *natptDynamic; /* list of struct _cSlot */ +Cell *natptFaith; /* list of struct _cSlot */ + +int matchIn4addr __P((struct _cv *, struct pAddr *)); +int matchIn6addr __P((struct _cv *, struct pAddr *)); +static void _flushPtrRules __P((struct _cell **)); + + +extern struct in6_addr faith_prefix; +extern struct in6_addr faith_prefixmask; +extern struct in6_addr natpt_prefix; +extern struct in6_addr natpt_prefixmask; + +extern void in4_len2mask __P((struct in_addr *, int)); +extern void in6_len2mask __P((struct in6_addr *, int)); + + +/* + * + */ + +struct _cSlot * +lookingForIncomingV4Rule(struct _cv *cv) +{ + Cell *p; + struct _cSlot *acs; + + for (p = natptStatic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_INBOUND) + && ((acs->proto == 0) + || (acs->proto == cv->ip_payload)) + && (matchIn4addr(cv, &acs->remote) != 0)) + return (acs); + } + + for (p = natptDynamic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_INBOUND) + && ((acs->proto == 0) + || (acs->proto == cv->ip_payload)) + && (matchIn4addr(cv, &acs->remote) != 0)) + return (acs); + } + + return (NULL); +} + + +struct _cSlot * +lookingForOutgoingV4Rule(struct _cv *cv) +{ + Cell *p; + struct _cSlot *acs; + + for (p = natptStatic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + + if ((acs->dir == NATPT_OUTBOUND) + && (matchIn4addr(cv, &acs->local) != 0)) + return (acs); + } + + for (p = natptDynamic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_OUTBOUND) + && (matchIn4addr(cv, &acs->local) != 0)) + return (acs); + } + + return (NULL); +} + + +struct _cSlot * +lookingForIncomingV6Rule(struct _cv *cv) +{ + Cell *p; + struct _cSlot *acs; + + for (p = natptStatic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_INBOUND) + && (matchIn6addr(cv, &acs->remote)) != 0) + return (acs); + } + + for (p = natptDynamic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_INBOUND) + && (matchIn6addr(cv, &acs->remote)) != 0) + return (acs); + } + + return (NULL); +} + + +struct _cSlot * +lookingForOutgoingV6Rule(struct _cv *cv) +{ + Cell *p; + struct _cSlot *acs; + + for (p = natptStatic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_OUTBOUND) + && ((acs->proto == 0) + || (acs->proto == cv->ip_payload)) + && (matchIn6addr(cv, &acs->local)) != 0) + return (acs); + } + + for (p = natptDynamic; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_OUTBOUND) + && ((acs->proto == 0) + || (acs->proto == cv->ip_payload)) + && (matchIn6addr(cv, &acs->local)) != 0) + return (acs); + } + + for (p = natptFaith; p; p = CDR(p)) + { + acs = (struct _cSlot *)CAR(p); + if ((acs->dir == NATPT_OUTBOUND) + && ((acs->proto == 0) + || (acs->proto == cv->ip_payload)) + && (matchIn6addr(cv, &acs->local)) != 0) + return (acs); + } + + return (NULL); +} + + +int +matchIn4addr(struct _cv *cv4, struct pAddr *from) +{ + struct in_addr in4from = cv4->_ip._ip4->ip_src; + struct in_addr in4masked; + + if (from->sa_family != AF_INET) + return (0); + + switch (from->ad.type) + { + case ADDR_ANY: goto port; + + case ADDR_SINGLE: + if (in4from.s_addr == from->in4Addr.s_addr) goto port; + return (0); + + case ADDR_MASK: + in4masked.s_addr = in4from.s_addr & from->in4Mask.s_addr; + if (in4masked.s_addr == from->in4Addr.s_addr) goto port; + return (0); + + case ADDR_RANGE: + if ((in4from.s_addr >= from->in4RangeStart.s_addr) + && (in4from.s_addr <= from->in4RangeEnd.s_addr)) goto port; + return (0); + + default: + return (0); + } + +port:; + if ((cv4->ip_payload != IPPROTO_UDP) + && (cv4->ip_payload != IPPROTO_TCP)) return (1); + + if (from->_port0 == 0) return (1); + + if (from->_port1 == 0) + { + if ((cv4->_payload._tcp4->th_dport == from->_port0)) return (1); + } + else + { + u_short dport = ntohs(cv4->_payload._tcp4->th_dport); + u_short port0 = ntohs(from->_port0); + u_short port1 = ntohs(from->_port1); + + if ((dport >= port0) + && (dport <= port1)) return (1); + } + + return (0); +} + + +int +matchIn6addr(struct _cv *cv6, struct pAddr *from) +{ + struct in6_addr *in6from = &cv6->_ip._ip6->ip6_src; + struct in6_addr in6masked; + + if (from->sa_family != AF_INET6) + return (0); + + switch (from->ad.type) + { + case ADDR_ANY: goto port; + + case ADDR_SINGLE: + if (IN6_ARE_ADDR_EQUAL(in6from, &from->in6Addr)) goto port; + return (0); + + case ADDR_MASK: + in6masked.s6_addr32[0] = in6from->s6_addr32[0] & from->in6Mask.s6_addr32[0]; + in6masked.s6_addr32[1] = in6from->s6_addr32[1] & from->in6Mask.s6_addr32[1]; + in6masked.s6_addr32[2] = in6from->s6_addr32[2] & from->in6Mask.s6_addr32[2]; + in6masked.s6_addr32[3] = in6from->s6_addr32[3] & from->in6Mask.s6_addr32[3]; + + if (IN6_ARE_ADDR_EQUAL(&in6masked, &from->in6Addr)) goto port; + return (0); + + default: + return (0); + } + +port:; + if ((cv6->ip_payload != IPPROTO_UDP) + && (cv6->ip_payload != IPPROTO_TCP)) return (1); + + if (from->_port0 == 0) return (1); + + if (from->_port1 == 0) + { + if (cv6->_payload._tcp6->th_dport == from->_port0) return (1); + } + else + { + u_short dport = ntohs(cv6->_payload._tcp6->th_dport); +#ifdef UnusedVariable + u_short port0 = ntohs(from->_port0); + u_short port1 = ntohs(from->_port1); +#endif + + if ((dport >= from->_port0) + && (dport <= from->_port1)) return (1); + } + + + return (0); +} +/* + * + */ + +int +_natptEnableTrans(caddr_t addr) +{ + char Wow[64]; + + sprintf(Wow, "map enable"); + natpt_logMsg(LOG_INFO, Wow, strlen(Wow)); + + ip6_protocol_tr = 1; + return (0); +} + + +int +_natptDisableTrans(caddr_t addr) +{ + char Wow[64]; + + sprintf(Wow, "map disable"); + natpt_logMsg(LOG_INFO, Wow, strlen(Wow)); + + ip6_protocol_tr = 0; + return (0); +} + + +int +_natptSetRule(caddr_t addr) +{ + struct natpt_msgBox *mbx = (struct natpt_msgBox *)addr; + struct _cSlot *cst; + Cell **anchor; + +#if 0 + if (((ifb = natpt_asIfBox(mbx->m_ifName)) == NULL) + && ((ifb = natpt_setIfBox(mbx->m_ifName)) == NULL)) + return (ENXIO); +#endif + + if (mbx->flags == NATPT_FAITH) + return (_natptSetFaithRule(addr)); + + MALLOC(cst, struct _cSlot *, sizeof(struct _cSlot), M_TEMP, M_WAITOK); + copyin(mbx->freight, cst, sizeof(struct _cSlot)); + + { + struct pAddr *from; + + from = &cst->local; + if (cst->dir == NATPT_INBOUND) + from = &cst->remote; + + if (from->sa_family == AF_INET) + { + in4_len2mask(&from->in4Mask, cst->prefix); + from->in4Addr.s_addr &= from->in4Mask.s_addr; + } + else + { + in6_len2mask(&from->in6Mask, cst->prefix); + from->in6Addr.s6_addr32[0] + = from->in6Addr.s6_addr32[0] & from->in6Mask.s6_addr32[0]; + from->in6Addr.s6_addr32[1] + = from->in6Addr.s6_addr32[1] & from->in6Mask.s6_addr32[1]; + from->in6Addr.s6_addr32[2] + = from->in6Addr.s6_addr32[2] & from->in6Mask.s6_addr32[2]; + from->in6Addr.s6_addr32[3] + = from->in6Addr.s6_addr32[3] & from->in6Mask.s6_addr32[3]; + } + } + + natpt_log(LOG_CSLOT, LOG_DEBUG, (void *)cst, sizeof(struct _cSlot)); + + anchor = &natptStatic; + if (cst->flags == NATPT_DYNAMIC) + anchor = &natptDynamic; + + LST_hookup_list(anchor, cst); + + return (0); +} + + +int +_natptSetFaithRule(caddr_t addr) +{ + struct natpt_msgBox *mbx = (struct natpt_msgBox *)addr; + struct _cSlot *cst; + + MALLOC(cst, struct _cSlot *, sizeof(struct _cSlot), M_TEMP, M_WAITOK); + copyin(mbx->freight, cst, sizeof(struct _cSlot)); + + LST_hookup_list(&natptFaith, cst); + + return (0); +} + + +int +_natptFlushRule(caddr_t addr) +{ + struct natpt_msgBox *mbx = (struct natpt_msgBox *)addr; + + if (mbx->flags & FLUSH_STATIC) + _flushPtrRules(&natptStatic); + + if (mbx->flags & FLUSH_DYNAMIC) + _flushPtrRules(&natptDynamic); + + return (0); +} + + +int +_natptSetPrefix(caddr_t addr) +{ + struct natpt_msgBox *mbx = (struct natpt_msgBox *)addr; + struct pAddr *load; + + MALLOC(load, struct pAddr *, sizeof(struct pAddr), M_TEMP, M_WAITOK); + copyin(mbx->freight, load, SZSIN6 * 2); + + if (mbx->flags & PREFIX_FAITH) + { + faith_prefix = load->addr[0].in6; + faith_prefixmask = load->addr[1].in6; + + natpt_logIN6addr(LOG_INFO, "FAITH prefix: ", &faith_prefix); + natpt_logIN6addr(LOG_INFO, "FAITH prefixmask: ", &faith_prefixmask); + } + else if (mbx->flags & PREFIX_NATPT) + { + natpt_prefix = load->addr[0].in6; + natpt_prefixmask = load->addr[1].in6; + + natpt_logIN6addr(LOG_INFO, "NATPT prefix: ", &natpt_prefix); + natpt_logIN6addr(LOG_INFO, "NATPT prefixmask: ", &natpt_prefixmask); + } + + FREE(load, M_TEMP); + return (0); +} + + +int +_natptBreak() +{ + printf("break"); + + return (0); +} + + +/* + * + */ + +static void +_flushPtrRules(struct _cell **anchor) +{ + struct _cell *p0, *p1; + struct _cSlot *cslt; + + p0 = *anchor; + while (p0) + { + p1 = p0; + p0 = CDR(p0); + + cslt = (struct _cSlot *)CAR(p1); + FREE(cslt, M_TEMP); + LST_free(p1); + } + + *anchor = NULL; +} diff --git a/bsd/netinet6/natpt_soctl.h b/bsd/netinet6/natpt_soctl.h new file mode 100644 index 000000000..10204316f --- /dev/null +++ b/bsd/netinet6/natpt_soctl.h @@ -0,0 +1,91 @@ +/* $KAME: natpt_soctl.h,v 1.8 2000/03/25 07:23:56 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* cmd for use with ioctl at the socket */ +/* _IO() no parameters */ +/* _IOR() copy out parameters */ +/* _IOW() copy in parameters */ +/* _IOWR() copy in/out parameters */ + +#define SIOCSETIF _IOW ('n', 0, struct natpt_msgBox) /* Set interface side */ +#define SIOCGETIF _IOWR('n', 1, struct natpt_msgBox) /* Get interface sidde */ +#define SIOCENBTRANS _IOW ('n', 2, struct natpt_msgBox) /* Enable translation */ +#define SIOCDSBTRANS _IOW ('n', 3, struct natpt_msgBox) /* Disable translation */ +#define SIOCSETRULE _IOW ('n', 4, struct natpt_msgBox) /* Set rule */ +#define SIOCGETRULE _IOWR('n', 5, struct natpt_msgBox) /* Get rule */ +#define SIOCFLUSHRULE _IOW ('n', 6, struct natpt_msgBox) /* Flush rule */ +#define SIOCSETPREFIX _IOW ('n', 8, struct natpt_msgBox) /* Set prefix */ +#define SIOCGETPREFIX _IOWR('n', 9, struct natpt_msgBox) /* Get prefix */ +#define SIOCSETVALUE _IOW ('n', 10, struct natpt_msgBox) /* Set value */ +#define SIOCGETVALUE _IOW ('n', 11, struct natpt_msgBox) /* Get value */ + +#define SIOCTESTLOG _IOW ('n', 12, struct natpt_msgBox) /* Test log */ + +#define SIOCBREAK _IO ('n', 255) /* stop */ + + +typedef struct natpt_msgBox /* sizeof(): 44[byte] */ +{ + int flags; +/* in case SIOC(GET|SET)IF */ +#define IF_EXTERNAL (0x01) +#define IF_INTERNAL (0x02) + +/* in case SIOT(SET|GET)RULE */ +#ifndef NATPT_STATIC +#define NATPT_STATIC (0x01) +#define NATPT_DYNAMIC (0x02) +#define NATPT_FAITH (0x03) +#endif + +/* in case SIOCFLUSHRULE ... bitwise */ +#define FLUSH_STATIC (0x01) +#define FLUSH_DYNAMIC (0x02) + +/* in case SIOC(GET|SET)PREFIX */ +#define PREFIX_FAITH (0x01) +#define PREFIX_NATPT (0x02) + +/* in case SIOC(GET|SET)VALUE */ +#define NATPT_DEBUG (0x01) /* natpt_debug := */ +#define NATPT_DUMP (0x02) /* natpt_dump := */ + + int size; /* sizeof(*freight) */ + char *freight; + union + { + char M_ifName[IFNAMSIZ]; + char M_aux[32]; + } M_dat; +} natpt_msgBox; + +#define m_ifName M_dat.M_ifName +#define m_aux M_dat.M_aux diff --git a/bsd/netinet6/natpt_trans.c b/bsd/netinet6/natpt_trans.c new file mode 100644 index 000000000..03af7ea2c --- /dev/null +++ b/bsd/netinet6/natpt_trans.c @@ -0,0 +1,1612 @@ +/* $KAME: natpt_trans.c,v 1.12 2000/03/25 07:23:56 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#ifdef __FreeBSD__ +# include +#endif + +#include +#ifdef __bsdi__ +#include +#endif + +#include +#include +#include + +#if defined(__bsdi__) || defined(__NetBSD__) +#include /* netinet/in_pcb.h line 71 make happy. */ +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if !defined(__NetBSD__) && (!defined(__FreeBSD__) || (__FreeBSD__ < 3)) && !defined(__APPLE__) +#include +#endif + +#include +#include +#include + + +#define recalculateTCP4Checksum 1 +#define recalculateTCP6Checksum 1 + + +/* + * + */ + +int errno; +int natpt_initialized; +int ip6_protocol_tr; + +extern struct in6_addr natpt_prefix; +extern struct in6_addr natpt_prefixmask; + +struct mbuf *translatingTCPUDPv4To4 __P((struct _cv *, struct pAddr *, struct _cv *)); + +void tr_icmp4EchoReply __P((struct _cv *, struct _cv *)); +void tr_icmp4Unreach __P((struct _cv *, struct _cv *, struct pAddr *)); +void tr_icmp4Echo __P((struct _cv *, struct _cv *)); +void tr_icmp4Timxceed __P((struct _cv *, struct _cv *, struct pAddr *)); +void tr_icmp4Paramprob __P((struct _cv *, struct _cv *)); +void tr_icmp4MimicPayload __P((struct _cv *, struct _cv *, struct pAddr *)); + +void tr_icmp6DstUnreach __P((struct _cv *, struct _cv *)); +void tr_icmp6PacketTooBig __P((struct _cv *, struct _cv *)); +void tr_icmp6TimeExceed __P((struct _cv *, struct _cv *)); +void tr_icmp6ParamProb __P((struct _cv *, struct _cv *)); +void tr_icmp6EchoRequest __P((struct _cv *, struct _cv *)); +void tr_icmp6EchoReply __P((struct _cv *, struct _cv *)); + +static void _recalculateTCP4Checksum __P((struct _cv *)); + +static int updateTcpStatus __P((struct _cv *)); +static int _natpt_tcpfsm __P((int, int, u_short, u_char)); +static int _natpt_tcpfsmSessOut __P((int, short, u_char)); +static int _natpt_tcpfsmSessIn __P((int, short, u_char)); + +static void adjustUpperLayerChecksum __P((int, int, struct _cv *, struct _cv *)); +static int adjustChecksum __P((int, u_char *, int, u_char *, int)); + + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static MALLOC_DEFINE(M_NATPT, "NATPT", "Network Address Translation - Protocol Translation"); +#endif + + +#ifdef NATPT_NAT +/* + * Translating From IPv4 to IPv4 + */ + +struct mbuf * +translatingIPv4To4(struct _cv *cv4, struct pAddr *pad) +{ + struct timeval atv; + struct mbuf *m4 = NULL; + + if (isDump(D_TRANSLATINGIPV4)) + natpt_logIp4(LOG_DEBUG, cv4->_ip._ip4); + + microtime(&atv); + cv4->ats->tstamp = atv.tv_sec; + + switch (cv4->ip_payload) + { + case IPPROTO_ICMP: + m4 = translatingICMPv4To4(cv4, pad); + break; + + case IPPROTO_TCP: + m4 = translatingTCPv4To4(cv4, pad); + break; + + case IPPROTO_UDP: + m4 = translatingUDPv4To4(cv4, pad); + break; + } + + if (m4) + { + struct ip *ip4; + + ip4 = mtod(m4, struct ip *); + ip4->ip_sum = 0; /* Header checksum */ + ip4->ip_sum = in_cksum(m4, sizeof(struct ip)); + m4->m_pkthdr.rcvif = cv4->m->m_pkthdr.rcvif; + + m4->m_pkthdr.len = cv4->m->m_pkthdr.len; + } + + return (m4); +} + + +struct mbuf * +translatingICMPv4To4(struct _cv *cv4from, struct pAddr *pad) +{ + struct _cv cv4to; + struct mbuf *m4; + struct ip *ip4from, *ip4to; + struct icmp *icmp4from; + + ip4from = mtod(cv4from->m, struct ip *); + icmp4from = cv4from->_payload._icmp4; + + m4 = m_copym(cv4from->m,0, M_COPYALL, M_NOWAIT); + ReturnEnobufs(m4); + + bzero(&cv4to, sizeof(struct _cv)); + cv4to.m = m4; + cv4to._ip._ip4 = ip4to = mtod(m4, struct ip *); + cv4to._payload._caddr = (caddr_t)cv4to._ip._ip4 + (ip4from->ip_hl << 2); + + ip4to->ip_src = pad->in4src; /* source address */ + ip4to->ip_dst = pad->in4dst; /* destination address */ + + switch (icmp4from->icmp_type) + { + case ICMP_ECHOREPLY: + case ICMP_ECHO: + break; + + default: + m_freem(m4); + return (NULL); + } + + m4->m_len = cv4from->m->m_len; + return (m4); +} + + +struct mbuf * +translatingTCPv4To4(struct _cv *cv4from, struct pAddr *pad) +{ + struct _cv cv4to; + struct mbuf *m4; + + bzero(&cv4to, sizeof(struct _cv)); + m4 = translatingTCPUDPv4To4(cv4from, pad, &cv4to); + cv4to.ip_p = cv4to.ip_payload = IPPROTO_TCP; + + updateTcpStatus(&cv4to); + adjustUpperLayerChecksum(IPPROTO_IPV4, IPPROTO_TCP, cv4from, &cv4to); + +#ifdef recalculateTCP4Checksum + _recalculateTCP4Checksum(&cv4to); +#endif + + return (m4); +} + + +struct mbuf * +translatingUDPv4To4(struct _cv *cv4from, struct pAddr *pad) +{ + struct _cv cv4to; + struct mbuf *m4; + + bzero(&cv4to, sizeof(struct _cv)); + m4 = translatingTCPUDPv4To4(cv4from, pad, &cv4to); + cv4to.ip_p = cv4to.ip_payload = IPPROTO_UDP; + + adjustUpperLayerChecksum(IPPROTO_IPV4, IPPROTO_UDP, cv4from, &cv4to); + + return (m4); +} + + +struct mbuf * +translatingTCPUDPv4To4(struct _cv *cv4from, struct pAddr *pad, struct _cv *cv4to) +{ + struct mbuf *m4; + struct ip *ip4to; + struct tcphdr *tcp4to; + + m4 = m_copym(cv4from->m,0, M_COPYALL, M_NOWAIT); + ReturnEnobufs(m4); + + ip4to = mtod(m4, struct ip *); + + ip4to->ip_src = pad->in4src; + ip4to->ip_dst = pad->in4dst; + + tcp4to = (struct tcphdr *)((caddr_t)ip4to + (ip4to->ip_hl << 2)); + tcp4to->th_sport = pad->_sport; + tcp4to->th_dport = pad->_dport; + + cv4to->m = m4; + cv4to->_ip._ip4 = ip4to; + cv4to->_payload._tcp4 = tcp4to; + cv4to->ats = cv4from->ats; + + return (m4); +} + +#endif /* ifdef NATPT_NAT */ + + +/* + * Translating From IPv4 To IPv6 + */ + +struct mbuf * +translatingIPv4To6(struct _cv *cv4, struct pAddr *pad) +{ + struct timeval atv; + struct mbuf *m6 = NULL; + + if (isDump(D_TRANSLATINGIPV4)) + natpt_logIp4(LOG_DEBUG, cv4->_ip._ip4); + + microtime(&atv); + cv4->ats->tstamp = atv.tv_sec; + + switch (cv4->ip_payload) + { + case IPPROTO_ICMP: + m6 = translatingICMPv4To6(cv4, pad); + break; + + case IPPROTO_TCP: + m6 = translatingTCPv4To6(cv4, pad); + break; + + case IPPROTO_UDP: + m6 = translatingUDPv4To6(cv4, pad); + break; + } + + if (m6) + m6->m_pkthdr.rcvif = cv4->m->m_pkthdr.rcvif; + + return (m6); +} + + +struct mbuf * +translatingICMPv4To6(struct _cv *cv4, struct pAddr *pad) +{ + struct _cv cv6; + struct mbuf *m6; + struct ip *ip4; + struct ip6_hdr *ip6; + struct icmp *icmp4; + struct icmp6_hdr *icmp6; + + ip4 = mtod(cv4->m, struct ip *); + icmp4 = cv4->_payload._icmp4; + + { + caddr_t icmp4end; + int icmp4len; + + icmp4end = (caddr_t)ip4 + cv4->m->m_pkthdr.len; + icmp4len = icmp4end - (caddr_t)cv4->_payload._icmp4; + + MGETHDR(m6, M_NOWAIT, MT_HEADER); + if (m6 == NULL) + { + errno = ENOBUFS; + return (NULL); + } + if (MHLEN < (sizeof(struct ip6_hdr) + icmp4len)) + MCLGET(m6, M_NOWAIT); + } + + cv6.m = m6; + cv6._ip._ip6 = mtod(m6, struct ip6_hdr *); + cv6._payload._caddr = (caddr_t)cv6._ip._ip6 + sizeof(struct ip6_hdr); + + ip6 = mtod(cv6.m, struct ip6_hdr *); + icmp6 = cv6._payload._icmp6;; + + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_plen = 0; /* XXX */ + ip6->ip6_nxt = IPPROTO_ICMPV6; + ip6->ip6_hlim = ip4->ip_ttl -1; + ip6->ip6_dst = pad->in6dst; + ip6->ip6_src = pad->in6src; + if (natpt_prefix.s6_addr32[0] != 0) + { + ip6->ip6_src.s6_addr32[0] = natpt_prefix.s6_addr32[0]; + ip6->ip6_src.s6_addr32[1] = natpt_prefix.s6_addr32[1]; + ip6->ip6_src.s6_addr32[2] = natpt_prefix.s6_addr32[2]; + } + else + { + ip6->ip6_src.s6_addr32[0] = 0; + ip6->ip6_src.s6_addr32[1] = 0; + ip6->ip6_src.s6_addr32[2] = 0; + } + ip6->ip6_src.s6_addr32[3] = ip4->ip_src.s_addr; + + switch (icmp4->icmp_type) + { + case ICMP_ECHOREPLY: + tr_icmp4EchoReply(cv4, &cv6); + break; + + case ICMP_UNREACH: + tr_icmp4Unreach(cv4, &cv6, pad); + break; + + case ICMP_ECHO: + tr_icmp4Echo(cv4, &cv6); + break; + + case ICMP_TIMXCEED: + tr_icmp4Timxceed(cv4, &cv6, pad); + break; + + case ICMP_PARAMPROB: + tr_icmp4Paramprob(cv4, &cv6); + break; + + case ICMP_REDIRECT: + case ICMP_ROUTERADVERT: + case ICMP_ROUTERSOLICIT: + m_freem(m6); /* Single hop message. Silently drop. */ + return (NULL); + + case ICMP_SOURCEQUENCH: + case ICMP_TSTAMP: + case ICMP_TSTAMPREPLY: + case ICMP_IREQ: + case ICMP_IREQREPLY: + case ICMP_MASKREQ: + case ICMP_MASKREPLY: + m_freem(m6); /* Obsoleted in ICMPv6. Silently drop. */ + return (NULL); + + default: + m_freem(m6); /* Silently drop. */ + return (NULL); + } + + icmp6->icmp6_cksum = 0; + icmp6->icmp6_cksum = in6_cksum(cv6.m, IPPROTO_ICMPV6, + sizeof(struct ip6_hdr), ntohs(ip6->ip6_plen)); + + return (m6); +} + + +void +tr_icmp4EchoReply(struct _cv *cv4, struct _cv *cv6) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp6->icmp6_type = ICMP6_ECHO_REPLY; + icmp6->icmp6_code = 0; + icmp6->icmp6_id = icmp4->icmp_id; + icmp6->icmp6_seq = icmp4->icmp_seq; + + { + int dlen; + struct ip *ip4 = cv4->_ip._ip4; + struct ip6_hdr *ip6 = cv6->_ip._ip6; + caddr_t icmp4off, icmp6off; + caddr_t icmp4end = (caddr_t)ip4 + cv4->m->m_pkthdr.len; + int icmp4len = icmp4end - (caddr_t)cv4->_payload._icmp4; + + dlen = icmp4len - ICMP_MINLEN; + icmp4off = (caddr_t)(cv4->_payload._icmp4) + ICMP_MINLEN; + icmp6off = (caddr_t)(cv6->_payload._icmp6) + sizeof(struct icmp6_hdr); + bcopy(icmp4off, icmp6off, dlen); + + ip6->ip6_plen = ntohs(sizeof(struct icmp6_hdr) + dlen); + cv6->m->m_pkthdr.len + = cv6->m->m_len + = sizeof(struct ip6_hdr) + htons(ip6->ip6_plen); + } +} + + +void +tr_icmp4Unreach(struct _cv *cv4, struct _cv *cv6, struct pAddr *pad) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp6->icmp6_type = ICMP6_DST_UNREACH; + icmp6->icmp6_code = 0; + icmp6->icmp6_id = icmp4->icmp_id; + icmp6->icmp6_seq = icmp4->icmp_seq; + + switch (icmp4->icmp_code) + { + case ICMP_UNREACH_NET: + case ICMP_UNREACH_HOST: + icmp6->icmp6_code = ICMP6_DST_UNREACH_NOROUTE; + break; + + case ICMP_UNREACH_PROTOCOL: /* do more */ + icmp6->icmp6_type = ICMP6_PARAM_PROB; + icmp6->icmp6_code = ICMP6_PARAMPROB_NEXTHEADER; /* xxx */ + break; + + case ICMP_UNREACH_PORT: + icmp6->icmp6_code = ICMP6_DST_UNREACH_NOPORT; + break; + + case ICMP_UNREACH_NEEDFRAG: /* do more */ + icmp6->icmp6_type = ICMP6_PACKET_TOO_BIG; + icmp6->icmp6_code = ICMP6_PARAMPROB_HEADER; + break; + + case ICMP_UNREACH_SRCFAIL: + icmp6->icmp6_code = ICMP6_DST_UNREACH_NOTNEIGHBOR; + break; + + case ICMP_UNREACH_NET_UNKNOWN: + case ICMP_UNREACH_HOST_UNKNOWN: + icmp6->icmp6_code = ICMP6_DST_UNREACH_NOROUTE; + break; + + case ICMP_UNREACH_ISOLATED: + icmp6->icmp6_code = ICMP6_DST_UNREACH_NOROUTE; + break; + + case ICMP_UNREACH_NET_PROHIB: + case ICMP_UNREACH_HOST_PROHIB: + icmp6->icmp6_code = ICMP6_DST_UNREACH_ADMIN; + break; + + case ICMP_UNREACH_TOSNET: + case ICMP_UNREACH_TOSHOST: + icmp6->icmp6_code = ICMP6_DST_UNREACH_NOROUTE; + break; + + default: + break; + } + + tr_icmp4MimicPayload(cv4, cv6, pad); +} + + +void +tr_icmp4Echo(struct _cv *cv4, struct _cv *cv6) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp6->icmp6_type = ICMP6_ECHO_REQUEST; + icmp6->icmp6_code = 0; + icmp6->icmp6_id = icmp4->icmp_id; + icmp6->icmp6_seq = icmp4->icmp_seq; + + { + int dlen; + struct ip *ip4 = cv4->_ip._ip4; + struct ip6_hdr *ip6 = cv6->_ip._ip6; + caddr_t icmp4off, icmp6off; + caddr_t icmp4end = (caddr_t)ip4 + cv4->m->m_pkthdr.len; + int icmp4len = icmp4end - (caddr_t)cv4->_payload._icmp4; + + dlen = icmp4len - ICMP_MINLEN; + icmp4off = (caddr_t)(cv4->_payload._icmp4) + ICMP_MINLEN; + icmp6off = (caddr_t)(cv6->_payload._icmp6) + sizeof(struct icmp6_hdr); + bcopy(icmp4off, icmp6off, dlen); + + ip6->ip6_plen = ntohs(sizeof(struct icmp6_hdr) + dlen); + cv6->m->m_pkthdr.len + = cv6->m->m_len + = sizeof(struct ip6_hdr) + htons(ip6->ip6_plen); + } +} + + +void +tr_icmp4Timxceed(struct _cv *cv4, struct _cv *cv6, struct pAddr *pad) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp6->icmp6_type = ICMP6_TIME_EXCEEDED; + icmp6->icmp6_code = 0; + icmp6->icmp6_id = icmp4->icmp_id; + icmp6->icmp6_seq = icmp4->icmp_seq; + + tr_icmp4MimicPayload(cv4, cv6, pad); +} + + +void +tr_icmp4Paramprob(struct _cv *cv4, struct _cv *cv6) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp6->icmp6_type = ICMP6_PARAM_PROB; + icmp6->icmp6_code = 0; + icmp6->icmp6_id = icmp4->icmp_id; + icmp6->icmp6_seq = icmp4->icmp_seq; +} + + +void +tr_icmp4MimicPayload(struct _cv *cv4, struct _cv *cv6, struct pAddr *pad) +{ + int dgramlen; + int icmp6dlen, icmp6rest; + struct ip *ip4 = cv6->_ip._ip4; + struct ip6_hdr *ip6 = cv6->_ip._ip6; + struct ip6_hdr *icmpip6; + caddr_t icmp4off, icmp4dgramoff; + caddr_t icmp6off, icmp6dgramoff; + caddr_t icmp4end = (caddr_t)ip4 + cv4->m->m_pkthdr.len; + int icmp4len = icmp4end - (caddr_t)cv4->_payload._icmp4; + + icmp6rest = MHLEN - sizeof(struct ip6_hdr) * 2 - sizeof(struct icmp6_hdr); + dgramlen = icmp4len - ICMP_MINLEN - sizeof(struct ip); + dgramlen = min(icmp6rest, dgramlen); + + icmp4off = (caddr_t)(cv4->_payload._icmp4) + ICMP_MINLEN; + icmp6off = (caddr_t)(cv6->_payload._icmp6) + sizeof(struct icmp6_hdr); + icmp4dgramoff = icmp4off + sizeof(struct ip); + icmp6dgramoff = icmp6off + sizeof(struct ip6_hdr); + + icmpip6 = (struct ip6_hdr *)icmp6off; + bzero(icmpip6, sizeof(struct ip6_hdr)); + bcopy(icmp4dgramoff, icmp6dgramoff, dgramlen); + + icmpip6->ip6_flow = 0; + icmpip6->ip6_vfc &= ~IPV6_VERSION_MASK; + icmpip6->ip6_vfc |= IPV6_VERSION; + icmpip6->ip6_plen = 0; + icmpip6->ip6_nxt = IPPROTO_UDP; + icmpip6->ip6_hlim = 0; + icmpip6->ip6_src = pad->in6dst; + icmpip6->ip6_dst = pad->in6src; + + icmp6dlen = sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr) + dgramlen; + ip6->ip6_plen = ntohs(icmp6dlen); + cv6->m->m_pkthdr.len + = cv6->m->m_len + = sizeof(struct ip6_hdr) + htons(ip6->ip6_plen); + + if (cv4->flags & NATPT_TRACEROUTE) + { + struct udphdr *icmpudp6; + + icmpudp6 = (struct udphdr *)((caddr_t)icmpip6 + sizeof(struct ip6_hdr)); + icmpudp6->uh_sport = cv4->ats->local._dport; + icmpudp6->uh_dport = cv4->ats->local._sport; + } +} + + +struct mbuf * +translatingTCPv4To6(struct _cv *cv4, struct pAddr *pad) +{ + int cksumOrg; + struct _cv cv6; + struct mbuf *m6; + + bzero(&cv6, sizeof(struct _cv)); + m6 = translatingTCPUDPv4To6(cv4, pad, &cv6); + cv6.ip_p = cv6.ip_payload = IPPROTO_TCP; + cksumOrg = ntohs(cv4->_payload._tcp4->th_sum); + + updateTcpStatus(cv4); + adjustUpperLayerChecksum(IPPROTO_IPV4, IPPROTO_TCP, &cv6, cv4); + +#ifdef recalculateTCP6Checksum + { + int cksumAdj, cksumCks; + struct tcp6hdr *th; + + cksumAdj = cv6._payload._tcp6->th_sum; + + th = cv6._payload._tcp6; + th->th_sum = 0; + th->th_sum = in6_cksum(cv6.m, IPPROTO_TCP, sizeof(struct ip6_hdr), + cv6.m->m_pkthdr.len - sizeof(struct ip6_hdr)); + + cksumCks = th->th_sum; +#if 0 + printf("translatingTCPv4To6: TCP4->TCP6: %04x, %04x, %04x %d\n", + cksumOrg, cksumAdj, cksumCks, cv6.m->m_pkthdr.len); +#endif + } +#endif + + return (m6); +} + + +struct mbuf * +translatingUDPv4To6(struct _cv *cv4, struct pAddr *pad) +{ + struct _cv cv6; + struct mbuf *m6; + + bzero(&cv6, sizeof(struct _cv)); + m6 = translatingTCPUDPv4To6(cv4, pad, &cv6); + cv6.ip_p = cv6.ip_payload = IPPROTO_UDP; + + return (m6); +} + + +struct mbuf * +translatingTCPUDPv4To6(struct _cv *cv4, struct pAddr *pad, struct _cv *cv6) +{ + struct mbuf *m6; + struct ip *ip4; + struct ip6_hdr *ip6; + struct tcp6hdr *tcp6; + + if (cv4->m->m_flags & M_EXT) + { + if (cv4->plen + sizeof(struct ip6_hdr) > MHLEN) + { + struct mbuf *m6next; + + m6next = m_copym(cv4->m, 0, M_COPYALL, M_NOWAIT); + ReturnEnobufs(m6next); + + m6next->m_data += cv4->poff; + m6next->m_len -= cv4->poff; + + MGETHDR(m6, M_NOWAIT, MT_HEADER); + ReturnEnobufs(m6); + + m6->m_next = m6next; + m6->m_data += (MHLEN - sizeof(struct ip6_hdr)); + m6->m_len = sizeof(struct ip6_hdr); + m6->m_pkthdr.len = sizeof(struct ip6_hdr) + cv4->plen; + ip6 = mtod(m6, struct ip6_hdr *); + + cv6->m = m6; + cv6->_ip._ip6 = mtod(m6, struct ip6_hdr *); + cv6->_payload._caddr = m6next->m_data; + cv6->plen = cv4->plen; + cv6->poff = 0; + } + else /* (sizeof(struct ip6_hdr) + cv4->plen <= MHLEN) */ + { + caddr_t tcp4; + caddr_t tcp6; + + MGETHDR(m6, M_NOWAIT, MT_HEADER); + if (m6 == NULL) + { + errno = ENOBUFS; + return (NULL); + } + + ip6 = mtod(m6, struct ip6_hdr *); + tcp4 = (caddr_t)cv4->_payload._tcp4; + tcp6 = (caddr_t)ip6 + sizeof(struct ip6_hdr); + bcopy(tcp4, tcp6, cv4->plen); + + m6->m_pkthdr.len + = m6->m_len + = sizeof(struct ip6_hdr) + cv4->plen; + + cv6->m = m6; + cv6->_ip._ip6 = mtod(m6, struct ip6_hdr *); + cv6->_payload._caddr = (caddr_t)cv6->_ip._ip6 + sizeof(struct ip6_hdr); + cv6->plen = cv4->plen; + cv6->poff = cv6->_payload._caddr - (caddr_t)cv6->_ip._ip6; + } + } + else if (cv4->plen + sizeof(struct ip6_hdr) > MHLEN) + { + caddr_t tcp4; + caddr_t tcp6; + + MGETHDR(m6, M_NOWAIT, MT_HEADER); + ReturnEnobufs(m6); + MCLGET(m6, M_NOWAIT); + + m6->m_data += 128; /* make struct ether_header{} space. -- too many? */ + m6->m_pkthdr.len = m6->m_len = sizeof(struct ip6_hdr) + cv4->plen; + ip6 = mtod(m6, struct ip6_hdr *); + + tcp4 = (caddr_t)cv4->_payload._tcp4; + tcp6 = (caddr_t)ip6 + sizeof(struct ip6_hdr); + bcopy(tcp4, tcp6, cv4->plen); + + cv6->m = m6; + cv6->_ip._ip6 = mtod(m6, struct ip6_hdr *); + cv6->_payload._caddr = tcp6; + cv6->plen = cv4->plen; + cv6->poff = cv6->_payload._caddr - (caddr_t)cv6->_ip._ip6; + } + else + { + caddr_t tcp4; + caddr_t tcp6; + + MGETHDR(m6, M_NOWAIT, MT_HEADER); + if (m6 == NULL) + { + errno = ENOBUFS; + return (NULL); + } + + cv6->m = m6; + ip6 = mtod(m6, struct ip6_hdr *); + tcp4 = (caddr_t)cv4->_payload._tcp4; + tcp6 = (caddr_t)ip6 + sizeof(struct ip6_hdr); + bcopy(tcp4, tcp6, cv4->plen); + + m6->m_pkthdr.len + = m6->m_len + = sizeof(struct ip6_hdr) + cv4->plen; + + cv6->_ip._ip6 = mtod(m6, struct ip6_hdr *); + cv6->_payload._caddr = (caddr_t)cv6->_ip._ip6 + sizeof(struct ip6_hdr); + cv6->plen = cv4->plen; + cv6->poff = cv6->_payload._caddr - (caddr_t)cv6->_ip._ip6; + } + + cv6->ats = cv4->ats; + + ip4 = mtod(cv4->m, struct ip *); + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_plen = htons(cv4->plen); + ip6->ip6_nxt = IPPROTO_TCP; + ip6->ip6_hlim = ip4->ip_ttl -1; + ip6->ip6_src = pad->in6src; + ip6->ip6_dst = pad->in6dst; + + tcp6 = cv6->_payload._tcp6; + tcp6->th_sport = pad->_sport; + tcp6->th_dport = pad->_dport; + + return (m6); +} + + +/* + * Translating Form IPv6 To IPv4 + */ + +struct mbuf * +translatingIPv6To4(struct _cv *cv6, struct pAddr *pad) +{ + struct timeval atv; + struct mbuf *m4 = NULL; + + if (isDump(D_TRANSLATINGIPV6)) + natpt_logIp6(LOG_DEBUG, cv6->_ip._ip6); + + microtime(&atv); + cv6->ats->tstamp = atv.tv_sec; + + switch (cv6->ip_payload) + { + case IPPROTO_ICMP: + m4 = translatingICMPv6To4(cv6, pad); + break; + + case IPPROTO_TCP: + m4 = translatingTCPv6To4(cv6, pad); + break; + + case IPPROTO_UDP: + m4 = translatingUDPv6To4(cv6, pad); + break; + } + + if (m4) + { + int mlen; + struct mbuf *mm; + struct ip *ip4; + + ip4 = mtod(m4, struct ip *); + ip4->ip_sum = 0; /* Header checksum */ + ip4->ip_sum = in_cksum(m4, sizeof(struct ip)); + m4->m_pkthdr.rcvif = cv6->m->m_pkthdr.rcvif; + + for (mlen = 0, mm = m4; mm; mm = mm->m_next) + { + mlen += mm->m_len; + } + + m4->m_pkthdr.len = mlen; + + if (isDump(D_TRANSLATEDIPV4)) + natpt_logIp4(LOG_DEBUG, ip4); + } + + return (m4); +} + + +struct mbuf * +translatingICMPv6To4(struct _cv *cv6, struct pAddr *pad) +{ + struct _cv cv4; + struct mbuf *m4; + struct ip *ip4; + struct ip6_hdr *ip6; + struct icmp *icmp4; + struct icmp6_hdr *icmp6; + + ip6 = mtod(cv6->m, struct ip6_hdr *); + icmp6 = cv6->_payload._icmp6; + + { + caddr_t icmp6end = (caddr_t)ip6 + cv6->m->m_pkthdr.len; + int icmp6len = icmp6end - (caddr_t)cv6->_payload._icmp6; + + MGETHDR(m4, M_NOWAIT, MT_HEADER); + if (m4 == NULL) + { + errno = ENOBUFS; + return (NULL); + } + if (MHLEN < (sizeof(struct ip) + icmp6len)) + MCLGET(m4, M_NOWAIT); + } + + cv4.m = m4; + cv4._ip._ip4 = mtod(m4, struct ip *); + cv4._payload._caddr = (caddr_t)cv4._ip._ip4 + sizeof(struct ip); + + ip4 = mtod(cv4.m, struct ip *); + icmp4 = cv4._payload._icmp4; + + ip4->ip_v = IPVERSION; /* IP version */ + ip4->ip_hl = 5; /* header length (no IPv4 option) */ + ip4->ip_tos = 0; /* Type Of Service */ + ip4->ip_len = htons(ip6->ip6_plen); /* Payload length */ + ip4->ip_id = 0; /* Identification */ + ip4->ip_off = 0; /* flag and fragment offset */ + ip4->ip_ttl = ip6->ip6_hlim - 1; /* Time To Live */ + ip4->ip_p = cv6->ip_payload; /* Final Payload */ + ip4->ip_src = pad->in4src; /* source addresss */ + ip4->ip_dst = pad->in4dst; /* destination address */ + + switch (icmp6->icmp6_type) + { + case ICMP6_DST_UNREACH: + tr_icmp6DstUnreach(cv6, &cv4); + break; + + case ICMP6_PACKET_TOO_BIG: + tr_icmp6PacketTooBig(cv6, &cv4); + break; + + case ICMP6_TIME_EXCEEDED: + tr_icmp6TimeExceed(cv6, &cv4); + break; + + case ICMP6_PARAM_PROB: + tr_icmp6ParamProb(cv6, &cv4); + break; + + case ICMP6_ECHO_REQUEST: + tr_icmp6EchoRequest(cv6, &cv4); + break; + + case ICMP6_ECHO_REPLY: + tr_icmp6EchoReply(cv6, &cv4); + break; + + case MLD6_LISTENER_QUERY: + case MLD6_LISTENER_REPORT: + case MLD6_LISTENER_DONE: + m_freem(m4); /* Single hop message. Silently drop. */ + return (NULL); + + default: + m_freem(m4); /* Silently drop. */ + return (NULL); + } + + { + int hlen; + struct mbuf *m4 = cv4.m; + struct ip *ip4 = cv4._ip._ip4; + + hlen = ip4->ip_hl << 2; + m4->m_data += hlen; + m4->m_len -= hlen; + icmp4->icmp_cksum = 0; + icmp4->icmp_cksum = in_cksum(cv4.m, ip4->ip_len - hlen); + m4->m_data -= hlen; + m4->m_len += hlen; + } + + return (m4); +} + + +void +tr_icmp6DstUnreach(struct _cv *cv6, struct _cv *cv4) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp4->icmp_type = ICMP_UNREACH; + icmp4->icmp_code = 0; + icmp4->icmp_id = icmp6->icmp6_id; + icmp4->icmp_seq = icmp6->icmp6_seq; + + switch (icmp6->icmp6_code) + { + case ICMP6_DST_UNREACH_NOROUTE: + icmp4->icmp_code = ICMP_UNREACH_HOST; + break; + + case ICMP6_DST_UNREACH_ADMIN: + icmp4->icmp_code = ICMP_UNREACH_HOST_PROHIB; + break; + + case ICMP6_DST_UNREACH_NOTNEIGHBOR: + icmp4->icmp_code = ICMP_UNREACH_SRCFAIL; + break; + + case ICMP6_DST_UNREACH_ADDR: + icmp4->icmp_code = ICMP_UNREACH_HOST; + break; + + case ICMP6_DST_UNREACH_NOPORT: + icmp4->icmp_code = ICMP_UNREACH_PORT; + break; + } +} + + +void +tr_icmp6PacketTooBig(struct _cv *cv6, struct _cv *cv4) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp4->icmp_type = ICMP_UNREACH; + icmp4->icmp_code = ICMP_UNREACH_NEEDFRAG; /* do more */ + icmp4->icmp_id = icmp6->icmp6_id; + icmp4->icmp_seq = icmp6->icmp6_seq; +} + + +void +tr_icmp6TimeExceed(struct _cv *cv6, struct _cv *cv4) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp4->icmp_type = ICMP_TIMXCEED; + icmp4->icmp_code = icmp6->icmp6_code; /* code unchanged. */ + icmp4->icmp_id = icmp6->icmp6_id; + icmp4->icmp_seq = icmp6->icmp6_seq; +} + + +void +tr_icmp6ParamProb(struct _cv *cv6, struct _cv *cv4) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp4->icmp_type = ICMP_PARAMPROB; /* do more */ + icmp4->icmp_code = 0; + icmp4->icmp_id = icmp6->icmp6_id; + icmp4->icmp_seq = icmp6->icmp6_seq; + + if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER) + { + icmp4->icmp_type = ICMP_UNREACH; + icmp4->icmp_code = ICMP_UNREACH_PROTOCOL; + } +} + + +void +tr_icmp6EchoRequest(struct _cv *cv6, struct _cv *cv4) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp4->icmp_type = ICMP_ECHO; + icmp4->icmp_code = 0; + icmp4->icmp_id = icmp6->icmp6_id; + icmp4->icmp_seq = icmp6->icmp6_seq; + + { + int dlen; + struct ip *ip4 = cv4->_ip._ip4; + struct ip6_hdr *ip6 = cv6->_ip._ip6; + caddr_t icmp6off, icmp4off; + caddr_t icmp6end = (caddr_t)ip6 + cv6->m->m_pkthdr.len; + int icmp6len = icmp6end - (caddr_t)cv6->_payload._icmp6; + + dlen = icmp6len - sizeof(struct icmp6_hdr); + icmp6off = (caddr_t)(cv6->_payload._icmp6) + sizeof(struct icmp6_hdr); + icmp4off = (caddr_t)(cv4->_payload._icmp4) + ICMP_MINLEN; + bcopy(icmp6off, icmp4off, dlen); + + ip4->ip_len = cv4->m->m_len = sizeof(struct ip) + ICMP_MINLEN + dlen; + } +} + + +void +tr_icmp6EchoReply(struct _cv *cv6, struct _cv *cv4) +{ + struct icmp *icmp4 = cv4->_payload._icmp4; + struct icmp6_hdr *icmp6 = cv6->_payload._icmp6; + + icmp4->icmp_type = ICMP_ECHOREPLY; + icmp4->icmp_code = 0; + icmp4->icmp_id = icmp6->icmp6_id; + icmp4->icmp_seq = icmp6->icmp6_seq; + + { + int dlen; + struct ip *ip4 = cv4->_ip._ip4; + struct ip6_hdr *ip6 = cv6->_ip._ip6; + caddr_t icmp6off, icmp4off; + caddr_t icmp6end = (caddr_t)ip6 + cv6->m->m_pkthdr.len; + int icmp6len = icmp6end - (caddr_t)cv6->_payload._icmp6; + + dlen = icmp6len - sizeof(struct icmp6_hdr); + icmp6off = (caddr_t)(cv6->_payload._icmp6) + sizeof(struct icmp6_hdr); + icmp4off = (caddr_t)(cv4->_payload._icmp4) + ICMP_MINLEN; + bcopy(icmp6off, icmp4off, dlen); + + ip4->ip_len = cv4->m->m_len = sizeof(struct ip) + ICMP_MINLEN + dlen; + } +} + + +struct mbuf * +translatingTCPv6To4(struct _cv *cv6, struct pAddr *pad) +{ + int cksumOrg; + struct _cv cv4; + struct mbuf *m4; + + bzero(&cv4, sizeof(struct _cv)); + m4 = translatingTCPUDPv6To4(cv6, pad, &cv4); + cv4.ip_p = cv4.ip_payload = IPPROTO_TCP; + cksumOrg = ntohs(cv6->_payload._tcp6->th_sum); + + updateTcpStatus(cv6); + adjustUpperLayerChecksum(IPPROTO_IPV6, IPPROTO_TCP, cv6, &cv4); + +#ifdef recalculateTCP4Checksum + _recalculateTCP4Checksum(&cv4); +#endif + + return (m4); +} + + +struct mbuf * +translatingUDPv6To4(struct _cv *cv6, struct pAddr *pad) +{ + struct _cv cv4; + struct mbuf *m4; + + bzero(&cv4, sizeof(struct _cv)); + m4 = translatingTCPUDPv6To4(cv6, pad, &cv4); + cv4.ip_p = cv4.ip_payload = IPPROTO_UDP; + + adjustUpperLayerChecksum(IPPROTO_IPV6, IPPROTO_UDP, cv6, &cv4); + +#if 1 + { + int cksumAdj, cksumCks; + int iphlen; + struct ip *ip4 = cv4._ip._ip4; + struct ip save_ip; + struct udpiphdr *ui; + + cksumAdj = cv4._payload._tcp4->th_sum; + + ui = mtod(cv4.m, struct udpiphdr *); + iphlen = ip4->ip_hl << 2; + + save_ip = *cv4._ip._ip4; + bzero(ui, sizeof(struct udpiphdr)); + ui->ui_pr = IPPROTO_UDP; + ui->ui_len = htons(cv4.m->m_pkthdr.len - iphlen); + ui->ui_src = save_ip.ip_src; + ui->ui_dst = save_ip.ip_dst; + + ui->ui_sum = 0; + ui->ui_sum = in_cksum(cv4.m, cv4.m->m_pkthdr.len); + *cv4._ip._ip4 = save_ip; + + cksumCks = ui->ui_sum; +#if 0 + printf("translatingUDPv6To4: UDP6->UDP4: %04x, %04x %d\n", + cksumAdj, cksumCks, cv4.m->m_pkthdr.len); +#endif + } +#endif + + return (m4); +} + + +struct mbuf * +translatingTCPUDPv6To4(struct _cv *cv6, struct pAddr *pad, struct _cv *cv4) +{ + struct mbuf *m4; + struct ip *ip4; + struct ip6_hdr *ip6; + struct tcphdr *th; + + m4 = m_copym(cv6->m, 0, M_COPYALL, M_NOWAIT); + ReturnEnobufs(m4); + + m4->m_data += sizeof(struct ip6_hdr) - sizeof(struct ip); + m4->m_pkthdr.len = m4->m_len = sizeof(struct ip) + cv6->plen; + + cv4->m = m4; + cv4->plen = cv6->plen; + cv4->poff = sizeof(struct ip); + cv4->_ip._ip4 = mtod(m4, struct ip *); + cv4->_payload._caddr = (caddr_t)cv4->_ip._ip4 + sizeof(struct ip); + + cv4->ats = cv6->ats; + + ip4 = mtod(m4, struct ip *); + ip6 = mtod(cv6->m, struct ip6_hdr *); + ip4->ip_v = IPVERSION; /* IP version */ + ip4->ip_hl = 5; /* header length (no IPv4 option) */ + ip4->ip_tos = 0; /* Type Of Service */ + ip4->ip_len = sizeof(struct ip) + ntohs(ip6->ip6_plen); + /* Payload length */ + ip4->ip_id = 0; /* Identification */ + ip4->ip_off = 0; /* flag and fragment offset */ + ip4->ip_ttl = ip6->ip6_hlim; /* Time To Live */ + ip4->ip_p = cv6->ip_payload; /* Final Payload */ + ip4->ip_src = pad->in4src; /* source addresss */ + ip4->ip_dst = pad->in4dst; /* destination address */ + + th = (struct tcphdr *)(ip4 + 1); + th->th_sport = pad->_sport; + th->th_dport = pad->_dport; + + return (m4); +} + + +/* + * Itojun said 'code fragment in "#ifdef recalculateTCP4Checksum" + * does not make sense to me'. I agree, but + * adjustUpperLayerChecksum() cause checksum error sometime but + * not always, so I left its code. After I fixed it, this code + * will become vanish. + */ + +static void +_recalculateTCP4Checksum(struct _cv *cv4) +{ + int cksumAdj, cksumCks; + int iphlen; + struct ip *ip4 = cv4->_ip._ip4; + struct ip save_ip; + struct tcpiphdr *ti; + + cksumAdj = cv4->_payload._tcp4->th_sum; + + ti = mtod(cv4->m, struct tcpiphdr *); + iphlen = ip4->ip_hl << 2; + + save_ip = *cv4->_ip._ip4; +#ifdef ti_next + ti->ti_next = ti->ti_prev = 0; + ti->ti_x1 = 0; +#else + bzero(ti->ti_x1, 9); +#endif + ti->ti_pr = IPPROTO_TCP; + ti->ti_len = htons(cv4->m->m_pkthdr.len - iphlen); + ti->ti_src = save_ip.ip_src; + ti->ti_dst = save_ip.ip_dst; + + ti->ti_sum = 0; + ti->ti_sum = in_cksum(cv4->m, cv4->m->m_pkthdr.len); + *cv4->_ip._ip4 = save_ip; + + cksumCks = ti->ti_sum; +#if 0 + printf("translatingTCPv6To4: TCP6->TCP4: %04x, %04x, %04x %d\n", + cksumOrg, cksumAdj, cksumCks, cv4->m->m_pkthdr.len); +#endif +} + + +/* + * + */ + +static int +updateTcpStatus(struct _cv *cv) +{ + struct _tSlot *ats = cv->ats; + struct _tcpstate *ts; + + if (ats->ip_payload != IPPROTO_TCP) + return (0); /* XXX */ + + if ((ts = ats->suit.tcp) == NULL) + { + MALLOC(ts, struct _tcpstate *, sizeof(struct _tcpstate), M_NATPT, M_NOWAIT); + if (ts == NULL) + { + return (0); /* XXX */ + } + + bzero(ts, sizeof(struct _tcpstate)); + + ts->_state = TCPS_CLOSED; + ats->suit.tcp = ts; + } + + ts->_state + = _natpt_tcpfsm(ats->session, cv->inout, ts->_state, cv->_payload._tcp4->th_flags); + + return (0); +} + + +static int +_natpt_tcpfsm(int session, int inout, u_short state, u_char flags) +{ + int rv; + + if (flags & TH_RST) + return (TCPS_CLOSED); + + if (session == NATPT_OUTBOUND) + rv = _natpt_tcpfsmSessOut(inout, state, flags); + else + rv = _natpt_tcpfsmSessIn (inout, state, flags); + + return (rv); +} + + +/* +//## +//#------------------------------------------------------------------------ +//# _natpt_tcpfsmSessOut + + delta(start, eps) -> CLOSED + delta(CLOSED, TH_SYN & !TH_ACK) -> SYN_SENT + delta(SYN_SENT, in TH_SYN & TH_ACK) -> SYN_RCVD + delta(SYN_RCVD, TH_ACK) -> ESTABLISHED + delta(ESTABLISHED, TH_FIN) -> FIN_WAIT_1 + delta(FIN_WAIT_1, in TH_FIN | TH_ACK) -> TIME_WAIT + delta(FIN_WAIT_1, in TH_ACK) -> FIN_WAIT_2 + delta(FIN_WAIT_1, in TH_FIN) -> CLOSING + delta(FIN_WAIT_2, in TH_FIN) -> TIME_WAIT + delta(CLOSING, TH_ACK) -> TIME_WAIT + delta(TIME_WAIT, eps) -> CLOSED + +//#------------------------------------------------------------------------ +*/ + +static int +_natpt_tcpfsmSessOut(int inout, short state, u_char flags) +{ + int rv = state; + + switch (state) + { + case TCPS_CLOSED: + if ((inout == NATPT_OUTBOUND) + && (((flags & TH_SYN) != 0) + && (flags & TH_ACK) == 0)) + rv = TCPS_SYN_SENT; + break; + + case TCPS_SYN_SENT: + if ((inout == NATPT_INBOUND) + && (flags & (TH_SYN | TH_ACK))) + rv = TCPS_SYN_RECEIVED; + break; + + case TCPS_SYN_RECEIVED: + if ((inout == NATPT_OUTBOUND) + && (flags & TH_ACK)) + rv = TCPS_ESTABLISHED; + break; + + case TCPS_ESTABLISHED: + if ((inout == NATPT_OUTBOUND) + && (flags & TH_FIN)) + rv = TCPS_FIN_WAIT_1; + break; + + case TCPS_FIN_WAIT_1: + if (inout == NATPT_INBOUND) + { + if (flags & (TH_FIN | TH_ACK)) rv = TCPS_TIME_WAIT; + else if (flags & TH_ACK) rv = TCPS_FIN_WAIT_2; + else if (flags & TH_FIN) rv = TCPS_CLOSING; + } + break; + + case TCPS_CLOSING: + if ((inout == NATPT_OUTBOUND) + && (flags & TH_ACK)) + rv = TCPS_TIME_WAIT; + break; + + case TCPS_FIN_WAIT_2: + if ((inout == NATPT_INBOUND) + && (flags & TH_FIN)) + rv = TCPS_TIME_WAIT; + break; + } + + return (rv); +} + + +/* +//## +//#------------------------------------------------------------------------ +//# _natpt_tcpfsmSessIn + + delta(start, eps) -> CLOSED + delta(CLOSED, TH_SYN & !TH_ACK) -> SYN_RCVD + delta(SYN_RCVD, TH_ACK) -> ESTABLISHED + delta(ESTABLISHED, in TH_FIN) -> CLOSE_WAIT + delta(ESTABLISHED, out TH_FIN) -> FIN_WAIT_1 + delta(CLOSE_WAIT, out TH_FIN) -> LAST_ACK + delta(FIN_WAIT_1, TH_FIN & TH_ACK) -> TIME_WAIT + delta(FIN_WAIT_1, TH_FIN) -> CLOSING + delta(FIN_WAIT_1, TH_ACK) -> FIN_WAIT_2 + delta(CLOSING, TH_ACK) -> TIME_WAIT + delta(LAST_ACK), TH_ACK) -> CLOSED + delta(FIN_WAIT_2, TH_FIN) -> TIME_WAIT + delta(TIME_WAIT, eps) -> CLOSED + +//#------------------------------------------------------------------------ +*/ + +static int +_natpt_tcpfsmSessIn(int inout, short state, u_char flags) +{ + int rv = state; + + switch (state) + { + case TCPS_CLOSED: + if ((inout == NATPT_INBOUND) + && (((flags & TH_SYN) != 0) + && (flags & TH_ACK) == 0)) + rv = TCPS_SYN_RECEIVED; + break; + + case TCPS_SYN_RECEIVED: + if ((inout == NATPT_INBOUND) + && (flags & TH_ACK)) + rv = TCPS_ESTABLISHED; + break; + + case TCPS_ESTABLISHED: + if ((inout == NATPT_INBOUND) + && (flags & TH_FIN)) + rv = TCPS_CLOSE_WAIT; + if ((inout == NATPT_OUTBOUND) + && (flags & TH_FIN)) + rv = TCPS_FIN_WAIT_1; + break; + + case TCPS_CLOSE_WAIT: + if ((inout == NATPT_OUTBOUND) + && (flags & TH_FIN)) + rv = TCPS_LAST_ACK; + break; + + case TCPS_FIN_WAIT_1: + if (inout == NATPT_INBOUND) + { + if (flags & (TH_FIN | TH_ACK)) rv = TCPS_TIME_WAIT; + else if (flags & TH_FIN) rv = TCPS_CLOSING; + else if (flags & TH_ACK) rv = TCPS_FIN_WAIT_2; + } + break; + + case TCPS_CLOSING: + if ((inout == NATPT_INBOUND) + && (flags & TH_ACK)) + rv = TCPS_TIME_WAIT; + break; + + case TCPS_LAST_ACK: + if ((inout == NATPT_INBOUND) + && (flags & TH_ACK)) + rv = TCPS_CLOSED; + break; + + case TCPS_FIN_WAIT_2: + if ((inout == NATPT_INBOUND) + && (flags & TH_FIN)) + rv = TCPS_TIME_WAIT; + break; + } + + return (rv); +} + + +/* + * + */ + +static void +adjustUpperLayerChecksum(int header, int proto, struct _cv *cv6, struct _cv *cv4) +{ + u_short cksum; + struct ipovly ip4; + struct ulc + { + struct in6_addr ulc_src; + struct in6_addr ulc_dst; + u_long ulc_len; + u_char ulc_zero[3]; + u_char ulc_nxt; + } ulc; + + bzero(&ulc, sizeof(struct ulc)); + bzero(&ip4, sizeof(struct ipovly)); + + ulc.ulc_src = cv6->_ip._ip6->ip6_src; + ulc.ulc_dst = cv6->_ip._ip6->ip6_dst; + ulc.ulc_len = htonl(cv6->plen); + ulc.ulc_nxt = cv6->ip_p; + + ip4.ih_src = cv4->_ip._ip4->ip_src; + ip4.ih_dst = cv4->_ip._ip4->ip_dst; + ip4.ih_len = htons(cv4->plen); + ip4.ih_pr = cv4->ip_p; + + switch (proto) + { + case IPPROTO_TCP: + if (header == IPPROTO_IPV6) + { + cksum = adjustChecksum(ntohs(cv6->_payload._tcp6->th_sum), + (u_char *)&ulc, sizeof(struct ulc), + (u_char *)&ip4, sizeof(struct ipovly)); + cv4->_payload._tcp4->th_sum = htons(cksum); + } + else + { + cksum = adjustChecksum(ntohs(cv4->_payload._tcp4->th_sum), + (u_char *)&ip4, sizeof(struct ipovly), + (u_char *)&ulc, sizeof(struct ulc)); + cv6->_payload._tcp6->th_sum = htons(cksum); + } + break; + + case IPPROTO_UDP: + if (header == IPPROTO_IPV6) + { + cksum = adjustChecksum(ntohs(cv6->_payload._udp->uh_sum), + (u_char *)&ulc, sizeof(struct ulc), + (u_char *)&ip4, sizeof(struct ipovly)); + cv4->_payload._udp->uh_sum = htons(cksum); + } + else + { + cksum = adjustChecksum(ntohs(cv4->_payload._udp->uh_sum), + (u_char *)&ip4, sizeof(struct ipovly), + (u_char *)&ulc, sizeof(struct ulc)); + cv6->_payload._udp->uh_sum = htons(cksum); + } + break; + + default: + } +} + + +static int +adjustChecksum(int cksum, u_char *optr, int olen, u_char *nptr, int nlen) +{ + long x, old, new; + + x = ~cksum & 0xffff; + + while (olen) + { + if (olen == 1) + { + old = optr[0] * 256 + optr[1]; + x -= old & 0xff00; + if ( x <= 0 ) { x--; x &= 0xffff; } + break; + } + else + { + old = optr[0] * 256 + optr[1]; + x -= old & 0xffff; + if ( x <= 0 ) { x--; x &= 0xffff; } + optr += 2; + olen -= 2; + } + } + + while (nlen) + { + if (nlen == 1) + { + new = nptr[0] * 256 + nptr[1]; + x += new & 0xff00; + if (x & 0x10000) { x++; x &= 0xffff; } + break; + } + else + { + new = nptr[0] * 256 + nptr[1]; + x += new & 0xffff; + if (x & 0x10000) { x++; x &= 0xffff; } + nptr += 2; + nlen -= 2; + } + } + + return (~x & 0xffff); +} diff --git a/bsd/netinet6/natpt_tslot.c b/bsd/netinet6/natpt_tslot.c new file mode 100644 index 000000000..c8beef536 --- /dev/null +++ b/bsd/netinet6/natpt_tslot.c @@ -0,0 +1,1043 @@ +/* $KAME: natpt_tslot.c,v 1.8 2000/03/25 07:23:56 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if !defined(__NetBSD__) && (!defined(__FreeBSD__) || (__FreeBSD__ < 3)) && !defined(__APPLE__) +#include +#endif + +#include +#include +#include +#include + + +/* + * + */ + +static Cell *_insideHash [NATPT_MAXHASH]; +static Cell *_outsideHash[NATPT_MAXHASH]; + +static Cell *tSlotEntry; +static int tSlotEntryMax; +static int tSlotEntryUsed; + +static time_t tSlotTimer; +static time_t maxTTLany; +static time_t maxTTLicmp; +static time_t maxTTLudp; +static time_t maxTTLtcp; + +static time_t _natpt_TCPT_2MSL; +static time_t _natpt_tcp_maxidle; + +extern struct in6_addr natpt_prefix; +extern struct in6_addr natpt_prefixmask; +extern struct in6_addr faith_prefix; +extern struct in6_addr faith_prefixmask; + +static struct pAddr *fillupOutgoingV6local __P((struct _cSlot *, struct _cv *, struct pAddr *)); +static struct pAddr *fillupOutgoingV6Remote __P((struct _cSlot *, struct _cv *, struct pAddr *)); + +static struct _tSlot *registTSlotEntry __P((struct _tSlot *)); +static void _expireTSlot __P((void *)); +static void _expireTSlotEntry __P((struct timeval *)); +static void _removeTSlotEntry __P((struct _cell *, struct _cell *)); +static int _removeHash __P((struct _cell *(*table)[], int, caddr_t)); + +static int _hash_ip4 __P((struct _cv *)); +static int _hash_ip6 __P((struct _cv *)); +static int _hash_pat4 __P((struct pAddr *)); +static int _hash_pat6 __P((struct pAddr *)); +static int _hash_sockaddr4 __P((struct sockaddr_in *)); +static int _hash_sockaddr6 __P((struct sockaddr_in6 *)); +static int _hash_pjw __P((u_char *, int)); + + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +static MALLOC_DEFINE(M_NATPT, "NATPT", "Network Address Translation - Protocol Translation"); +#endif + + +/* + * + */ + +struct _tSlot * +lookingForIncomingV4Hash(struct _cv *cv) +{ + register Cell *p; + register struct _tSlot *ats; + register struct ip *ip4; + + int hv = _hash_ip4(cv); + + for (p = _outsideHash[hv]; p; p = CDR(p)) + { + ats = (struct _tSlot *)CAR(p); + + if ((ats->remote.ip_p != IPPROTO_IPV4) + || (cv->ip_payload != ats->ip_payload)) continue; + + if ((cv->ip_payload == IPPROTO_TCP) + || (cv->ip_payload == IPPROTO_UDP)) + { + if (cv->_payload._tcp4->th_sport!= ats->remote._dport) continue; + if (cv->_payload._tcp4->th_dport!= ats->remote._sport) continue; + } + + ip4 = cv->_ip._ip4; + if ((ip4->ip_src.s_addr == ats->remote.in4dst.s_addr) + && (ip4->ip_dst.s_addr == ats->remote.in4src.s_addr)) + return (ats); + } + + return (NULL); +} + + +struct _tSlot * +lookingForOutgoingV4Hash(struct _cv *cv) +{ + register Cell *p; + register struct _tSlot *ats; + register struct ip *ip4; + + int hv = _hash_ip4(cv); + + for (p = _insideHash[hv]; p; p = CDR(p)) + { + ats = (struct _tSlot *)CAR(p); + + if ((ats->local.ip_p != IPPROTO_IPV4) + || (cv->ip_payload != ats->ip_payload)) continue; + + if ((cv->ip_payload == IPPROTO_TCP) + || (cv->ip_payload == IPPROTO_UDP)) + { + if (cv->_payload._tcp4->th_sport != ats->local._dport) continue; + if (cv->_payload._tcp4->th_dport != ats->local._sport) continue; + } + + ip4 = cv->_ip._ip4; + if ((ip4->ip_src.s_addr == ats->local.in4dst.s_addr) + && (ip4->ip_dst.s_addr == ats->local.in4src.s_addr)) + return (ats); + } + + return (NULL); +} + + +struct _tSlot * +lookingForIncomingV6Hash(struct _cv *cv) +{ + register Cell *p; + register struct _tSlot *ats; + register struct ip6_hdr *ip6; + + int hv = _hash_ip6(cv); + + for (p = _outsideHash[hv]; p; p = CDR(p)) + { + ats = (struct _tSlot *)CAR(p); + + if ((ats->remote.ip_p != IPPROTO_IPV6) + || (cv->ip_payload != ats->ip_payload)) continue; + + if ((cv->ip_payload == IPPROTO_TCP) + || (cv->ip_payload == IPPROTO_UDP)) + { + if (cv->_payload._tcp6->th_sport != ats->remote._dport) continue; + if (cv->_payload._tcp6->th_dport != ats->remote._sport) continue; + } + + ip6 = cv->_ip._ip6; + if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ats->remote.in6dst)) + && (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ats->remote.in6src))) + return (ats); + } + + return (NULL); +} + + +struct _tSlot * +lookingForOutgoingV6Hash(struct _cv *cv) +{ + register Cell *p; + register struct _tSlot *ats; + register struct ip6_hdr *ip6; + + int hv = _hash_ip6(cv); + + for (p = _insideHash[hv]; p; p = CDR(p)) + { + ats = (struct _tSlot *)CAR(p); + + if ((ats->local.ip_p != IPPROTO_IPV6) + || (cv->ip_payload != ats->ip_payload)) continue; + + if ((cv->ip_payload == IPPROTO_TCP) + || (cv->ip_payload == IPPROTO_UDP)) + { + if (cv->_payload._tcp6->th_sport != ats->local._dport) continue; + if (cv->_payload._tcp6->th_dport != ats->local._sport) continue; + } + + ip6 = cv->_ip._ip6; + if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ats->local.in6dst)) + && (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ats->local.in6src))) + return (ats); + } + + return (NULL); +} + + +struct _tSlot * +internIncomingV4Hash(int sess, struct _cSlot *acs, struct _cv *cv4) +{ + int s, hv4, hv6; + struct pAddr *local, *remote; + struct _tSlot *ats; + + MALLOC(ats, struct _tSlot *, sizeof(struct _tSlot), M_TEMP, M_NOWAIT); + if (ats == NULL) + { + printf("ENOBUFS in internIncomingV4Hash %d\n", __LINE__); + return (NULL); + } + + bzero(ats, sizeof(struct _tSlot)); + + local = &ats->local; + remote = &ats->remote; + +#ifdef NATPT_NAT + if (acs->local.sa_family == AF_INET) + { + local->ip_p = IPPROTO_IPV4; + local->sa_family = AF_INET; + local->in4src = cv4->_ip._ip4->ip_src; + local->in4dst = acs->local.in4Addr; + if ((cv4->ip_payload == IPPROTO_TCP) + || (cv4->ip_payload == IPPROTO_UDP)) + { + local->_sport = cv4->_payload._tcp4->th_sport; + local->_dport = cv4->_payload._tcp4->th_dport; + if (acs->map & NATPT_PORT_MAP) + { + local->_dport = acs->local._port0; + } + } + } + else +#else + { + local->ip_p = IPPROTO_IPV6; + local->sa_family = AF_INET6; + local->in6src = natpt_prefix; + local->in6src.s6_addr32[3] = cv4->_ip._ip4->ip_src.s_addr; + local->in6dst = acs->local.in6src; + if ((cv4->ip_payload == IPPROTO_TCP) + || (cv4->ip_payload == IPPROTO_UDP)) + { + local->_sport = cv4->_payload._tcp4->th_sport; + local->_dport = cv4->_payload._tcp4->th_dport; + + if (acs->map & NATPT_PORT_MAP) + { + local->_dport = acs->local._port0; + } + } + } +#endif + + remote = &ats->remote; + remote->ip_p = IPPROTO_IPV4; + remote->sa_family = AF_INET; + remote->in4src = acs->remote.in4src; + remote->in4dst = cv4->_ip._ip4->ip_src; + if (acs->remote.ad.type == ADDR_ANY) + { + remote->in4src = cv4->_ip._ip4->ip_dst; + } + + if ((cv4->ip_payload == IPPROTO_TCP) + || (cv4->ip_payload == IPPROTO_UDP)) + { + remote->_sport = cv4->_payload._tcp4->th_dport; + remote->_dport = cv4->_payload._tcp4->th_sport; + } + + ats->ip_payload = cv4->ip_payload; + ats->session = sess; + registTSlotEntry(ats); /* XXX */ + + hv4 = _hash_pat4(remote); +#ifdef NATPT_NAT + if (acs->local.sa_family == AF_INET) + hv6 = _hash_pat4(local); + else +#else + hv6 = _hash_pat6(local); +#endif + + s = splnet(); + LST_hookup_list(&_insideHash [hv6], ats); + LST_hookup_list(&_outsideHash[hv4], ats); + splx(s); + + return (ats); +} + + +struct _tSlot * +internOutgoingV4Hash(int sess, struct _cSlot *acs, struct _cv *cv4) +{ + int s, hv4, hv6; + struct pAddr *local, *remote; + struct _tSlot *ats; + + MALLOC(ats, struct _tSlot *, sizeof(struct _tSlot), M_TEMP, M_NOWAIT); + if (ats == NULL) + { + printf("ENOBUFS in internOutgoingV4Hash %d\n", __LINE__); + return (NULL); + } + + bzero(ats, sizeof(struct _tSlot)); + + local = &ats->local; + local->ip_p = IPPROTO_IPV4; + local->sa_family = AF_INET; + if ((cv4->ip_payload == IPPROTO_TCP) + || (cv4->ip_payload == IPPROTO_UDP)) + { + local->_sport = cv4->_payload._tcp4->th_dport; + local->_dport = cv4->_payload._tcp4->th_sport; + } + + local->in4src = cv4->_ip._ip4->ip_dst; + local->in4dst = cv4->_ip._ip4->ip_src; + + remote = &ats->remote; +#ifdef NATPT_NAT + if (acs->remote.sa_family == AF_INET) + { + remote->ip_p = IPPROTO_IPV4; + remote->sa_family = AF_INET; + if ((cv4->ip_payload == IPPROTO_TCP) + || (cv4->ip_payload == IPPROTO_UDP)) + { + remote->_sport = cv4->_payload._tcp4->th_sport; + remote->_dport = cv4->_payload._tcp4->th_dport; + } + remote->in4src = acs->remote.in4src; + remote->in4dst = cv4->_ip._ip4->ip_dst; + } + else +#else /* need check */ + { + remote->ip_p = IPPROTO_IPV6; + remote->sa_family = AF_INET6; + if ((cv4->ip_payload == IPPROTO_TCP) + || (cv4->ip_payload == IPPROTO_UDP)) + { + remote->_sport = cv4->_payload._tcp4->th_sport; + remote->_dport = cv4->_payload._tcp4->th_dport; + } + + if (acs->flags == NATPT_FAITH) + { + struct in6_ifaddr *ia6; + + remote->in6dst.s6_addr32[0] = faith_prefix.s6_addr32[0]; + remote->in6dst.s6_addr32[1] = faith_prefix.s6_addr32[1]; + remote->in6dst.s6_addr32[3] = cv4->_ip._ip4->ip_dst.s_addr; + + ia6 = in6_ifawithscope(natpt_ip6src, &remote->in6dst); + remote->in6src = ia6->ia_addr.sin6_addr; + } + else + { + remote->in6src.s6_addr32[3] = cv4->_ip._ip4->ip_src.s_addr; + remote->in6dst = acs->remote.in6src; + } + } +#endif + + ats->ip_payload = cv4->ip_payload; + ats->session = sess; + registTSlotEntry(ats); /* XXX */ + + hv4 = _hash_pat4(local); +#ifdef NATPT_NAT + if (acs->remote.sa_family == AF_INET) + hv6 = _hash_pat4(remote); + else +#else + hv6 = _hash_pat6(remote); +#endif + + s = splnet(); + LST_hookup_list(&_insideHash [hv4], ats); + LST_hookup_list(&_outsideHash[hv6], ats); + splx(s); + + return (ats); +} + + +struct _tSlot * +internIncomingV6Hash(int sess, struct _cSlot *acs, struct _cv *cv6) +{ + int s, hv4, hv6; + struct pAddr *local, *remote; + struct _tSlot *ats; + + MALLOC(ats, struct _tSlot *, sizeof(struct _tSlot), M_TEMP, M_NOWAIT); + if (ats == NULL) + { + printf("ENOBUFS in internIncomingV6Hash %d\n", __LINE__); + return (NULL); + } + + bzero(ats, sizeof(struct _tSlot)); + + local = &ats->local; + local->ip_p = IPPROTO_IPV4; + local->sa_family = AF_INET; + if ((cv6->ip_payload == IPPROTO_TCP) + || (cv6->ip_payload == IPPROTO_UDP)) + { + local->_sport = cv6->_payload._tcp6->th_sport; + local->_dport = cv6->_payload._tcp6->th_dport; + } + local->in4src = acs->local.in4src; + local->in4dst.s_addr = cv6->_ip._ip6->ip6_dst.s6_addr32[3]; + local->sa_family = AF_INET; + local->ip_p = IPPROTO_IPV4; + + remote = &ats->remote; + remote->ip_p = IPPROTO_IPV6; + if ((cv6->ip_payload == IPPROTO_TCP) + || (cv6->ip_payload == IPPROTO_UDP)) + { + remote->_sport = cv6->_payload._tcp6->th_dport; + remote->_dport = cv6->_payload._tcp6->th_sport; + } + remote->in6src = cv6->_ip._ip6->ip6_dst; + remote->in6dst = acs->remote.in6dst; + remote->sa_family = AF_INET6; + remote->ip_p = IPPROTO_IPV6; + + ats->ip_payload = cv6->ip_payload; + ats->session = sess; + registTSlotEntry(ats); /* XXX */ + + hv6 = _hash_pat6(remote); + hv4 = _hash_pat4(local); + + s = splnet(); + LST_hookup_list(&_outsideHash[hv6], ats); + LST_hookup_list(&_insideHash [hv4], ats); + splx(s); + + return (ats); +} + + +struct _tSlot * +internOutgoingV6Hash(int sess, struct _cSlot *acs, struct _cv *cv6) +{ + int s, hv4, hv6; + struct pAddr *local, *remote; + struct _tSlot *ats; + + natpt_logIp6(LOG_DEBUG, cv6->_ip._ip6); + + MALLOC(ats, struct _tSlot *, sizeof(struct _tSlot), M_TEMP, M_NOWAIT); + if (ats == NULL) + { + printf("ENOBUFS in internOutgoingV6Hash %d\n", __LINE__); + return (NULL); + } + + bzero(ats, sizeof(struct _tSlot)); + + local = fillupOutgoingV6local(acs, cv6, &ats->local); + if ((remote = fillupOutgoingV6Remote(acs, cv6, &ats->remote)) == 0) + { + FREE(ats, M_TEMP); + return (NULL); + } + + ats->ip_payload = cv6->ip_payload; + ats->session = sess; + registTSlotEntry(ats); /* XXX */ + + hv6 = _hash_pat6(local); + hv4 = _hash_pat4(remote); + + s = splnet(); + LST_hookup_list(&_insideHash [hv6], ats); + LST_hookup_list(&_outsideHash[hv4], ats); + splx(s); + + return (ats); +} + + +struct _tSlot * +checkTraceroute6Return(struct _cv *cv4) +{ + int hv; + Cell *p; + struct ip *icmpip4; + struct udphdr *icmpudp4; + struct sockaddr_in src, dst; + struct _tSlot *ats; + + if ((cv4->ip_payload != IPPROTO_ICMP) + || ((cv4->_payload._icmp4->icmp_type != ICMP_UNREACH) + && (cv4->_payload._icmp4->icmp_type != ICMP_TIMXCEED))) + return (NULL); + + icmpip4 = &cv4->_payload._icmp4->icmp_ip; + if (icmpip4->ip_p != IPPROTO_UDP) + return (NULL); + +#ifdef fixSuMiReICMPBug + icmpip4->ip_src.s_addr = ICMPSRC; /* XXX */ +#endif + + icmpudp4 = (struct udphdr *)((caddr_t)icmpip4 + (icmpip4->ip_hl << 2)); + + bzero(&src, sizeof(struct sockaddr_in)); + bzero(&dst, sizeof(struct sockaddr_in)); + src.sin_addr = icmpip4->ip_src; + src.sin_port = icmpudp4->uh_sport; + dst.sin_addr = icmpip4->ip_dst; + dst.sin_port = icmpudp4->uh_dport; + hv = ((_hash_sockaddr4(&src) + _hash_sockaddr4(&dst)) % NATPT_MAXHASH); + for (p = _outsideHash[hv]; p; p = CDR(p)) + { + ats = (struct _tSlot *)CAR(p); + + if (ats->remote.ip_p != IPPROTO_IPV4) continue; + if (ats->ip_payload != IPPROTO_UDP) continue; + + if (icmpip4->ip_src.s_addr != ats->remote.in4src.s_addr) continue; + if (icmpip4->ip_dst.s_addr != ats->remote.in4dst.s_addr) continue; + + if (icmpudp4->uh_sport != ats->remote._sport) continue; + if (icmpudp4->uh_dport != ats->remote._dport) continue; + + cv4->flags |= NATPT_TRACEROUTE; + return (ats); + } + + return (NULL); +} + + +static struct pAddr * +fillupOutgoingV6local(struct _cSlot *acs, struct _cv *cv6, struct pAddr *local) +{ + local->ip_p = IPPROTO_IPV6; + local->sa_family = AF_INET6; + local->in6src = cv6->_ip._ip6->ip6_dst; + local->in6dst = cv6->_ip._ip6->ip6_src; + + if ((cv6->ip_payload == IPPROTO_TCP) + || (cv6->ip_payload == IPPROTO_UDP)) + { + local->_sport = cv6->_payload._tcp6->th_dport; + local->_dport = cv6->_payload._tcp6->th_sport; + } + + return (local); +} + + +static struct pAddr * +fillupOutgoingV6Remote(struct _cSlot *acs, struct _cv *cv6, struct pAddr *remote) +{ + remote->ip_p = IPPROTO_IPV4; + remote->sa_family = AF_INET; + remote->in4src = acs->remote.in4src; + remote->in4dst.s_addr = cv6->_ip._ip6->ip6_dst.s6_addr32[3]; + + if ((cv6->ip_payload == IPPROTO_TCP) + || (cv6->ip_payload == IPPROTO_UDP)) + { + remote->_sport = cv6->_payload._tcp6->th_sport; + remote->_dport = cv6->_payload._tcp6->th_dport; + + /* + * In case mappoing port number, + * acs->remote.port[0..1] has source port mapping range (from command line). + * remote->port[0..1] has actual translation slot info. + */ + if (acs->map & NATPT_PORT_MAP_DYNAMIC) + { + int firsttime = 0; + u_short cport, sport, eport; + struct pAddr pata; /* pata.{s,d}port hold network byte order */ + + cport = ntohs(acs->cport); + sport = ntohs(acs->remote._sport); + eport = ntohs(acs->remote._eport); + + if (cport == 0) + cport = sport - 1; + + bzero(&pata, sizeof(pata)); + pata.ip_p = IPPROTO_IPV4; + pata.sa_family = AF_INET; + pata.in4src = acs->remote.in4src; + pata.in4dst.s_addr = cv6->_ip._ip6->ip6_dst.s6_addr32[3]; + pata._dport = remote->_dport; + + for (;;) + { + while (++cport <= eport) + { + pata._sport = htons(cport); + if (_outsideHash[_hash_pat4(&pata)] == NULL) + goto found; + } + + if (firsttime == 0) + firsttime++, + cport = sport - 1; + else + return (NULL); + } + + found:; + remote->_sport = acs->cport = htons(cport); + } + } + + return (remote); +} + + +static struct _tSlot * +registTSlotEntry(struct _tSlot *ats) +{ + int s; + Cell *p; + struct timeval atv; + + if (tSlotEntryUsed >= tSlotEntryMax) + return (NULL); + + tSlotEntryUsed++; + + microtime(&atv); + ats->tstamp = atv.tv_sec; + + p = LST_cons(ats, NULL); + + s = splnet(); + + if (tSlotEntry == NULL) + tSlotEntry = p; + else + CDR(p) = tSlotEntry, tSlotEntry = p; + + splx(s); + + return (ats); +} + + +/* + * + */ + +static void +_expireTSlot(void *ignored_arg) +{ + struct timeval atv; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + + timeout(_expireTSlot, (caddr_t)0, tSlotTimer); + microtime(&atv); + + _expireTSlotEntry(&atv); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + + +static void +_expireTSlotEntry(struct timeval *atv) +{ + struct _cell *p0, *p1, *q; + struct _tSlot *tsl; + + p0 = tSlotEntry; + q = NULL; + while (p0) + { + tsl = (struct _tSlot *)CAR(p0); + p1 = CDR(p0); + + switch (tsl->ip_payload) + { + case IPPROTO_ICMP: + if ((atv->tv_sec - tsl->tstamp) >= maxTTLicmp) + _removeTSlotEntry(p0, q); + break; + + case IPPROTO_UDP: + if ((atv->tv_sec - tsl->tstamp) >= maxTTLudp) + _removeTSlotEntry(p0, q); + break; + + case IPPROTO_TCP: + switch (tsl->suit.tcp->_state) + { + case TCPS_CLOSED: + if ((atv->tv_sec - tsl->tstamp) >= _natpt_TCPT_2MSL) + _removeTSlotEntry(p0, q); + break; + + case TCPS_SYN_SENT: + case TCPS_SYN_RECEIVED: + if ((atv->tv_sec - tsl->tstamp) >= _natpt_tcp_maxidle) + _removeTSlotEntry(p0, q); + break; + + case TCPS_ESTABLISHED: + if ((atv->tv_sec - tsl->tstamp) >= maxTTLtcp) + _removeTSlotEntry(p0, q); + break; + + case TCPS_FIN_WAIT_1: + case TCPS_FIN_WAIT_2: + if ((atv->tv_sec - tsl->tstamp) >= _natpt_tcp_maxidle) + _removeTSlotEntry(p0, q); + break; + + case TCPS_TIME_WAIT: + if ((atv->tv_sec - tsl->tstamp) >= _natpt_TCPT_2MSL) + _removeTSlotEntry(p0, q); + break; + + default: + if ((atv->tv_sec - tsl->tstamp) >= maxTTLtcp) + _removeTSlotEntry(p0, q); + break; + } + break; + + default: + if ((atv->tv_sec - tsl->tstamp) >= maxTTLany) + _removeTSlotEntry(p0, q); + break; + } + + if (CAR(p0) != CELL_FREE_MARKER) /* p0 may not removed */ + q = p0; + + p0 = p1; + } +} + + +static void +_removeTSlotEntry(struct _cell *p, struct _cell *q) +{ + int s; + int hvin, hvout; + struct _tSlot *tsl = (struct _tSlot *)CAR(p); + + if ((tsl->ip_payload == IPPROTO_TCP) + && (tsl->suit.tcp != NULL)) + { + FREE(tsl->suit.tcp, M_NATPT); + } + + if (tsl->local.ip_p == IPPROTO_IPV4) + hvin = _hash_pat4(&tsl->local); + else + hvin = _hash_pat6(&tsl->local); + + if (tsl->remote.ip_p == IPPROTO_IPV4) + hvout = _hash_pat4(&tsl->remote); + else + hvout = _hash_pat6(&tsl->remote); + + s = splnet(); + + _removeHash(&_insideHash, hvin, (caddr_t)tsl); + _removeHash(&_outsideHash, hvout, (caddr_t)tsl); + + if (q != NULL) + CDR(q) = CDR(p); + else + tSlotEntry = CDR(p); + + splx(s); + + LST_free(p); + FREE(tsl, M_NATPT); + + tSlotEntryUsed--; +} + + +static int +_removeHash(Cell *(*table)[], int hv, caddr_t node) +{ + register Cell *p, *q; + + if ((p = (*table)[hv]) == NULL) + return (0); + + if (CDR(p) == NULL) + { + if (CAR(p) == (Cell *)node) + { + LST_free(p); + (*table)[hv] = NULL; + } + return (0); + } + + for (p = (*table)[hv], q = NULL; p; q = p, p = CDR(p)) + { + if (CAR(p) != (Cell *)node) + continue; + + if (q == NULL) + (*table)[hv] = CDR(p); + else + CDR(q) = CDR(p); + + LST_free(p); + return (0); + } + + return (0); +} + + +/* + * + */ + +static int +_hash_ip4(struct _cv *cv) +{ + struct ip *ip; + struct sockaddr_in src, dst; + + bzero(&src, sizeof(struct sockaddr_in)); + bzero(&dst, sizeof(struct sockaddr_in)); + + ip = cv->_ip._ip4; + src.sin_addr = ip->ip_src; + dst.sin_addr = ip->ip_dst; + + if ((ip->ip_p == IPPROTO_TCP) || (ip->ip_p == IPPROTO_UDP)) + { + struct tcphdr *tcp = cv->_payload._tcp4; + + src.sin_port = tcp->th_sport; + dst.sin_port = tcp->th_dport; + } + + return ((_hash_sockaddr4(&src) + _hash_sockaddr4(&dst)) % NATPT_MAXHASH); +} + + +static int +_hash_ip6(struct _cv *cv) +{ + struct ip6_hdr *ip6; + struct sockaddr_in6 src, dst; + + bzero(&src, sizeof(struct sockaddr_in6)); + bzero(&dst, sizeof(struct sockaddr_in6)); + + ip6 = cv->_ip._ip6; + src.sin6_addr = ip6->ip6_src; + dst.sin6_addr = ip6->ip6_dst; + + if ((cv->ip_payload == IPPROTO_TCP) || (cv->ip_payload == IPPROTO_UDP)) + { + struct tcp6hdr *tcp6 = cv->_payload._tcp6; + + src.sin6_port = tcp6->th_sport; + dst.sin6_port = tcp6->th_dport; + } + + return ((_hash_sockaddr6(&src) + _hash_sockaddr6(&dst)) % NATPT_MAXHASH); +} + + +static int +_hash_pat4(struct pAddr *pat4) +{ + struct sockaddr_in src, dst; + + bzero(&src, sizeof(struct sockaddr_in)); + bzero(&dst, sizeof(struct sockaddr_in)); + + src.sin_port = pat4->_sport; + src.sin_addr = pat4->in4src; + dst.sin_port = pat4->_dport; + dst.sin_addr = pat4->in4dst; + + return ((_hash_sockaddr4(&src) + _hash_sockaddr4(&dst)) % NATPT_MAXHASH); +} + + +static int +_hash_pat6(struct pAddr *pat6) +{ + struct sockaddr_in6 src, dst; + + bzero(&src, sizeof(struct sockaddr_in6)); + bzero(&dst, sizeof(struct sockaddr_in6)); + + src.sin6_port = pat6->_sport; + src.sin6_addr = pat6->in6src; + dst.sin6_port = pat6->_dport; + dst.sin6_addr = pat6->in6dst; + + return ((_hash_sockaddr6(&src) + _hash_sockaddr6(&dst)) % NATPT_MAXHASH); +} + + +static int +_hash_sockaddr4(struct sockaddr_in *sin4) +{ + int byte; + + byte = sizeof(sin4->sin_port) + sizeof(sin4->sin_addr); + return (_hash_pjw((char *)&sin4->sin_port, byte)); +} + + +static int +_hash_sockaddr6(struct sockaddr_in6 *sin6) +{ + int byte; + + sin6->sin6_flowinfo = 0; + byte = sizeof(sin6->sin6_port) + + sizeof(sin6->sin6_flowinfo) + + sizeof(sin6->sin6_addr); + return (_hash_pjw((char *)&sin6->sin6_port, byte)); +} + + +/* CAUTION */ +/* This hash routine is byte order sensitive. Be Careful. */ + +static int +_hash_pjw(register u_char *s, int len) +{ + register u_int c; + register u_int h, g; + + for (c = h = g = 0; c < len; c++, s++) + { + h = (h << 4) + (*s); + if ((g = h & 0xf0000000)) + { + h ^= (g >> 24); + h ^= g; + } + } + return (h % NATPT_MAXHASH); +} + + +/* + * + */ + +void +init_hash() +{ + bzero((caddr_t)_insideHash, sizeof(_insideHash)); + bzero((caddr_t)_outsideHash, sizeof(_outsideHash)); +} + + +void +init_tslot() +{ + tSlotEntry = NULL; + tSlotEntryMax = MAXTSLOTENTRY; + tSlotEntryUsed = 0; + + tSlotTimer = 60 * hz; + timeout(_expireTSlot, (caddr_t)0, tSlotTimer); + + _natpt_TCPT_2MSL = 120; /* [sec] */ + _natpt_tcp_maxidle = 600; /* [sec] */ + + maxTTLicmp = maxTTLudp = _natpt_TCPT_2MSL; + maxTTLtcp = maxTTLany = 86400; /* [sec] */ +} diff --git a/bsd/netinet6/natpt_usrreq.c b/bsd/netinet6/natpt_usrreq.c new file mode 100644 index 000000000..06e4a3afd --- /dev/null +++ b/bsd/netinet6/natpt_usrreq.c @@ -0,0 +1,581 @@ +/* $KAME: natpt_usrreq.c,v 1.9 2000/03/25 07:23:57 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +/* FreeBSD330 compiler complain that do not #include ioctl.h in the kernel, */ +/* Include xxxio.h instead */ +/* #include */ +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#include +#endif +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include + + +/* + * + */ + +#define NATPTSNDQ (8192) +#define NATPTRCVQ (8192) + +u_long natpt_sendspace = NATPTSNDQ; +u_long natpt_recvspace = NATPTRCVQ; + +#if defined(__bsdi__) || defined(__FreeBSD__) && __FreeBSD__ <= 2 +static struct rawcb ptrcb; +#else +LIST_HEAD(, rawcb) ptrcb; +#endif + +static struct sockaddr natpt_dst = {2, PF_INET}; +#ifdef notused +static struct sockaddr natpt_src = {2, PF_INET}; +#endif + +#if 0 +int natpt_sosetopt __P((struct socket *, int, struct mbuf *)); +int natpt_sogetopt __P((struct socket *, int, struct mbuf *)); +#endif + +static int _natptSetIf __P((caddr_t)); +static int _natptGetIf __P((caddr_t)); +static int _natptSetValue __P((caddr_t)); +static int _natptTestLog __P((caddr_t)); + +void natpt_init __P((void)); + +#ifdef __bsdi__ +int natpt_usrreq __P((struct socket *, int, + struct mbuf *, struct mbuf *, struct mbuf *)); +#elif defined(__NetBSD__) +int natpt_usrreq __P((struct socket *, int, + struct mbuf *, struct mbuf *, struct mbuf *, struct proc *)); +#endif /* defined(__bsdi__) || defined(__NetBSD__) */ + + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int natpt_uabort __P((struct socket *)); +int natpt_uattach __P((struct socket *, int, struct proc *)); +int natpt_ubind __P((struct socket *, struct sockaddr *, struct proc *)); +int natpt_uconnect __P((struct socket *, struct sockaddr *, struct proc *)); +int natpt_udetach __P((struct socket *)); +int natpt_ucontrol __P((struct socket *, u_long, caddr_t, struct ifnet *, struct proc *)); +#endif /* defined(__FreeBSD__) && __FreeBSD__ >= 3 */ + +int natpt_attach __P((struct socket *, int)); +int natpt_control __P((struct socket *, int, caddr_t, struct ifnet *)); +int natpt_detach __P((struct socket *)); +int natpt_disconnect __P((struct socket *)); + + +#ifdef __FreeBSD__ +#if __FreeBSD__ >= 3 +struct pr_usrreqs natpt_usrreqs = +{ + natpt_uabort, NULL, natpt_uattach, natpt_ubind, + natpt_uconnect, NULL, natpt_ucontrol, natpt_udetach, + natpt_disconnect, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, sosend, soreceive, sopoll +}; +#else +struct pr_usrreqs natpt_usrreqs = +{ + NULL, NULL, natpt_attach, NULL, + NULL, NULL, natpt_control, natpt_detach, + natpt_disconnect, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL +}; +#endif /* __FreeBSD__ >= 3 */ +#endif /* __FreeBSD__ */ + + +/* + * + */ + +void +natpt_init() +{ + natpt_initialized = 0; + ip6_protocol_tr = 0; + + init_tslot(); + +#if defined(__bsdi__) || defined(__FreeBSD__) && __FreeBSD__ <= 2 + ptrcb.rcb_next = ptrcb.rcb_prev = &ptrcb; +#else + LIST_INIT(&ptrcb); +#endif + + printf("NATPT: initialized.\n"); +} + + +void +natpt_input(struct mbuf *m0, struct sockproto *proto, + struct sockaddr *src, struct sockaddr *dst) +{ + struct rawcb *rp; + struct mbuf *m = m0; + struct socket *last; + int sockets; + + last = 0; +#if defined(__bsdi__) || defined(__FreeBSD__) && __FreeBSD__ <= 2 + for (rp = ptrcb.rcb_next; rp != &ptrcb; rp = rp->rcb_next) +#else + for (rp = ptrcb.lh_first; rp != 0; rp = rp->rcb_list.le_next) +#endif + { + if (rp->rcb_proto.sp_family != proto->sp_family) + continue; + if (rp->rcb_proto.sp_protocol + && (rp->rcb_proto.sp_protocol != proto->sp_protocol)) + continue; + +#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), a1->sa_len) == 0) + + if (rp->rcb_laddr && !equal(rp->rcb_laddr, dst)) + continue; + if (rp->rcb_faddr && !equal(rp->rcb_faddr, src)) + continue; + + if (last) + { + struct mbuf *n; + + if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) + { + if (sbappendaddr(&last->so_rcv, src, n, (struct mbuf *)NULL) == 0) + m_freem(n); /* should notify about lost packet */ + else + { + sorwakeup(last); + sockets++; + } + } + } + last = rp->rcb_socket; + } + + if (last) + { + if (sbappendaddr(&last->so_rcv, src, m, (struct mbuf *)NULL) == 0) + m_freem(m); + else + { + sorwakeup(last); + sockets++; + } + } + else + m_freem(m); +} + + +#if defined(__bsdi__) || defined(__NetBSD__) +int +natpt_usrreq(struct socket *so, int req, + struct mbuf *m, struct mbuf *nam, struct mbuf *control +#ifdef __NetBSD__ + ,struct proc *p +#endif + ) +{ + struct rawcb *rp = sotorawcb(so); + int error = 0; + + if ((rp == NULL) && (req != PRU_ATTACH)) + { + m_freem(m); + return (EINVAL); + } + + switch (req) + { + case PRU_ATTACH: + error = natpt_attach(so, (int)nam); + break; + + case PRU_DETACH: + error = natpt_detach(so); + break; + + case PRU_DISCONNECT: + if (rp->rcb_faddr == NULL) + { + error = ENOTCONN; + break; + } + rp->rcb_faddr = NULL; + raw_disconnect(rp); + soisdisconnected(so); + break; + + case PRU_SEND: + case PRU_BIND: + case PRU_LISTEN: + case PRU_CONNECT: + case PRU_ACCEPT: + case PRU_SHUTDOWN: + case PRU_RCVD: + case PRU_ABORT: + error = EOPNOTSUPP; + break; + + case PRU_CONTROL: + error = natpt_control(so, (int)m, (caddr_t)nam, (struct ifnet *)NULL); + return (error); + break; + + case PRU_SENSE: + case PRU_RCVOOB: + case PRU_SENDOOB: + case PRU_SOCKADDR: + case PRU_PEERADDR: + case PRU_CONNECT2: + case PRU_FASTTIMO: + case PRU_SLOWTIMO: + case PRU_PROTORCV: + case PRU_PROTOSEND: + error = EOPNOTSUPP; + break; + + default: + panic("raw_usrreq"); + } + + if (m != NULL) + m_freem(m); + + return (error); +} +#endif /* defined(__bsdi__) || defined(__NetBSD__) */ + + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +int +natpt_uabort(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return (EINVAL); + + raw_disconnect(rp); + sofree(so); + soisdisconnected(so); + + return (0); +} + + +int +natpt_uattach(struct socket *so, int proto, struct proc *p) +{ + int error; + +#if ISFB31 + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return (error); +#else + if ((so->so_state & SS_PRIV) != 0) + return (EPERM); +#endif + + return (natpt_attach(so, proto)); +} + + +int +natpt_ubind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + return (EINVAL); +} + + +int +natpt_uconnect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + return (EINVAL); +} + + +int +natpt_ucontrol(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, + struct proc *p) +{ + return (natpt_control(so, cmd, data, ifp)); +} + + +int +natpt_udetach(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == 0) + return (EINVAL); + + return (natpt_detach(so)); +} + +#endif /* defined(__FreeBSD__) && __FreeBSD__ >= 3 */ + + +int +natpt_attach(struct socket *so, int proto) +{ + struct rawcb *rp; + int error; + + if (so->so_pcb == NULL) + { + MALLOC(rp, struct rawcb *, sizeof(*rp), M_PCB, M_WAITOK); + so->so_pcb = (caddr_t)rp; + bzero(rp, sizeof(*rp)); + } + + if ((rp = sotorawcb(so)) == NULL) + return (ENOBUFS); + if ((error = soreserve(so, natpt_sendspace, natpt_recvspace))) + return (error); + + rp->rcb_socket = so; + rp->rcb_proto.sp_family = so->so_proto->pr_domain->dom_family; + rp->rcb_proto.sp_protocol = proto; +#if defined(__bsdi__) || defined(__FreeBSD__) && __FreeBSD__ <= 2 + insque(rp, &ptrcb); +#else + LIST_INSERT_HEAD(&ptrcb, rp, rcb_list); +#endif + + /* The socket is always "connected" because + we always know "where" to send the packet */ + rp->rcb_faddr = &natpt_dst; + soisconnected(so); + + return (0); +} + + +int +natpt_detach(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == NULL) + return (ENOTCONN); + + so->so_pcb = NULL; + sofree(so); + +#if defined(__bsdi__) || defined(__FreeBSD__) && __FreeBSD__ <= 2 + remque(rp); +#else + LIST_REMOVE(rp, rcb_list); +#endif + if (rp->rcb_laddr) + m_freem(dtom(rp->rcb_laddr)); + if (rp->rcb_faddr) + m_freem(dtom(rp->rcb_faddr)); + FREE(rp, M_PCB); + + return (0); +} + + +int +natpt_disconnect(struct socket *so) +{ + struct rawcb *rp = sotorawcb(so); + + if (rp == NULL) + return (EINVAL); + + if (rp->rcb_faddr == NULL) + return (ENOTCONN); + + rp->rcb_faddr = NULL; + raw_disconnect(rp); + soisdisconnected(so); + + return (0); +} + + +int +natpt_control(struct socket *so, int cmd, caddr_t data, struct ifnet *ifp) +{ + if (natpt_initialized == 0) + natpt_initialize(); + + switch (cmd) + { + case SIOCSETIF: return (_natptSetIf(data)); + case SIOCGETIF: return (_natptGetIf(data)); + case SIOCENBTRANS: return (_natptEnableTrans(data)); + case SIOCDSBTRANS: return (_natptDisableTrans(data)); + case SIOCSETRULE: return (_natptSetRule(data)); + case SIOCFLUSHRULE: return (_natptFlushRule(data)); + case SIOCSETPREFIX: return (_natptSetPrefix(data)); + case SIOCSETVALUE: return (_natptSetValue(data)); + + case SIOCTESTLOG: return (_natptTestLog(data)); + + case SIOCBREAK: return (_natptBreak()); + } + + return (EINVAL); +} + + +/* + * + */ + +static int +_natptSetIf(caddr_t addr) +{ + struct natpt_msgBox *mbx = (struct natpt_msgBox *)addr; + struct ifBox *ifb; + + if (((ifb = natpt_asIfBox(mbx->m_ifName)) == NULL) + && ((ifb = natpt_setIfBox(mbx->m_ifName)) == NULL)) + return (ENXIO); + + if (ifb->side != noSide) + { + char WoW[LBFSZ]; + + sprintf(WoW, "[natpt]: interface `%s\' already configured.", mbx->m_ifName); + natpt_logMsg(LOG_WARNING, WoW, strlen(WoW)); + return (EALREADY); + } + + { + char WoW[LBFSZ]; + char *s; + + natpt_ip6src = ifb->ifnet; + if (mbx->flags == IF_EXTERNAL) + ifb->side = outSide, s = "outside"; + else + ifb->side = inSide, s = "inside"; + + sprintf(WoW, "[natpt]: interface `%s\' set as %s.", mbx->m_ifName, s); + natpt_logMsg(LOG_INFO, WoW, strlen(WoW)); + } + + return (0); +} + + +static int +_natptGetIf(caddr_t addr) +{ + struct natpt_msgBox *mbx = (struct natpt_msgBox *)addr; + struct ifBox *ifb; + + if (((ifb = natpt_asIfBox(mbx->m_ifName)) == NULL) + && ((ifb = natpt_setIfBox(mbx->m_ifName)) == NULL)) + return (ENXIO); + + { + switch (ifb->side) + { + case outSide: mbx->flags |= IF_EXTERNAL; break; + case inSide: mbx->flags |= IF_INTERNAL; break; + default: mbx->flags = -1; break; + } + } + + return (0); +} + + +static int +_natptSetValue(caddr_t addr) +{ + struct natpt_msgBox *mbx = (struct natpt_msgBox *)addr; + + switch (mbx->flags) + { + case NATPT_DEBUG: + natpt_debug = *((u_int *)mbx->m_aux); + break; + + case NATPT_DUMP: + natpt_dump = *((u_int *)mbx->m_aux); + break; + } + + return (0); +} + + +static int +_natptTestLog(caddr_t addr) +{ + char *fragile; + struct natpt_msgBox *mbox = (struct natpt_msgBox *)addr; + + MALLOC(fragile, char *, mbox->size, M_TEMP, M_WAITOK); + copyin(mbox->freight, fragile, mbox->size); + + natpt_logMsg(LOG_DEBUG, fragile, mbox->size); + + FREE(fragile, M_TEMP); + return (0); +} + diff --git a/bsd/netinet6/natpt_var.h b/bsd/netinet6/natpt_var.h new file mode 100644 index 000000000..473793907 --- /dev/null +++ b/bsd/netinet6/natpt_var.h @@ -0,0 +1,112 @@ +/* $KAME: natpt_var.h,v 1.6 2000/03/25 07:23:57 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +extern int natpt_initialized; +extern int ip6_protocol_tr; +extern u_int natpt_debug; +extern u_int natpt_dump; + +extern struct ifnet *natpt_ip6src; + +/* natpt_log.c */ +void natpt_logMsg __P((int, void *, size_t)); +void natpt_logMBuf __P((int, struct mbuf *, char *)); +void natpt_logIp4 __P((int, struct ip *)); +void natpt_logIp6 __P((int, struct ip6_hdr *)); +int natpt_log __P((int, int, void *, size_t)); +int natpt_logIN6addr __P((int, char *, struct in6_addr *)); + +void natpt_debugProbe __P((void)); +void natpt_assert __P((const char *, int, const char *)); +void natpt_initialize __P((void)); + + +/* natpt_rule.c */ +struct _cSlot *lookingForIncomingV4Rule __P((struct _cv *)); +struct _cSlot *lookingForOutgoingV4Rule __P((struct _cv *)); +struct _cSlot *lookingForIncomingV6Rule __P((struct _cv *)); +struct _cSlot *lookingForOutgoingV6Rule __P((struct _cv *)); +int _natptEnableTrans __P((caddr_t)); +int _natptDisableTrans __P((caddr_t)); +int _natptSetRule __P((caddr_t)); +int _natptSetFaithRule __P((caddr_t)); +int _natptFlushRule __P((caddr_t)); +int _natptSetPrefix __P((caddr_t)); + +int _natptBreak __P((void)); + + +struct ifBox *natpt_asIfBox __P((char *)); +struct ifBox *natpt_setIfBox __P((char *)); + + +/* natpt_trans.c */ +#ifdef NATPT_NAT +struct mbuf *translatingIPv4To4 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingICMPv4To4 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingTCPv4To4 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingUDPv4To4 __P((struct _cv *, struct pAddr *)); +#endif + +struct mbuf *translatingIPv4To6 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingICMPv4To6 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingTCPv4To6 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingUDPv4To6 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingTCPUDPv4To6 __P((struct _cv *, struct pAddr *, struct _cv *)); + +struct mbuf *translatingIPv6To4 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingICMPv6To4 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingTCPv6To4 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingUDPv6To4 __P((struct _cv *, struct pAddr *)); +struct mbuf *translatingTCPUDPv6To4 __P((struct _cv *, struct pAddr *, struct _cv *)); + + +/* natpt_tslot.c */ +struct _tSlot *lookingForOutgoingV4Hash __P((struct _cv *)); +struct _tSlot *lookingForIncomingV4Hash __P((struct _cv *)); +struct _tSlot *lookingForOutgoingV6Hash __P((struct _cv *)); +struct _tSlot *lookingForIncomingV6Hash __P((struct _cv *)); +struct _tSlot *internIncomingV4Hash __P((int, struct _cSlot *, struct _cv *)); +struct _tSlot *internOutgoingV4Hash __P((int, struct _cSlot *, struct _cv *)); +struct _tSlot *internIncomingV6Hash __P((int, struct _cSlot *, struct _cv *)); +struct _tSlot *internOutgoingV6Hash __P((int, struct _cSlot *, struct _cv *)); + +struct _tSlot *checkTraceroute6Return __P((struct _cv *)); + +void init_hash __P((void)); +void init_tslot __P((void)); + + +/* natpt_usrreq.c */ +void natpt_input __P((struct mbuf *, struct sockproto *, + struct sockaddr *src, struct sockaddr *dst)); + diff --git a/bsd/netinet6/nd6.c b/bsd/netinet6/nd6.c new file mode 100644 index 000000000..ca1bcb13d --- /dev/null +++ b/bsd/netinet6/nd6.c @@ -0,0 +1,2065 @@ +/* $KAME: nd6.c,v 1.51.2.1 2000/04/13 11:59:29 jinmei Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * XXX + * KAME 970409 note: + * BSD/OS version heavily modifies this code, related to llinfo. + * Since we don't have BSD/OS version of net/route.c in our hand, + * I left the code mostly as it was in 970310. -- itojun + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include +#endif +#include +#include +#include + +#include +#include +#include +#if !(defined(__bsdi__) && _BSDI_VERSION >= 199802) && !defined(__APPLE__) +#include +#endif +#include +#include + +#include +#ifndef __NetBSD__ +#include +#if __FreeBSD__ +#include +#endif +#ifdef __bsdi__ +#include +#endif +#else /* __NetBSD__ */ +#include +#include +#include +#endif /* __NetBSD__ */ +#include +#include +#include +#include +#include +#include + +#ifndef __bsdi__ +#include "loop.h" +#endif +#if defined(__NetBSD__) || defined(__OpenBSD__) +extern struct ifnet loif[NLOOP]; +#endif + +#include + +#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */ +#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */ + +#define SIN6(s) ((struct sockaddr_in6 *)s) +#define SDL(s) ((struct sockaddr_dl *)s) + +/* timer values */ +int nd6_prune = 1; /* walk list every 1 seconds */ +int nd6_delay = 5; /* delay first probe time 5 second */ +int nd6_umaxtries = 3; /* maximum unicast query */ +int nd6_mmaxtries = 3; /* maximum multicast query */ +int nd6_useloopback = 1; /* use loopback interface for local traffic */ + +/* preventing too many loops in ND option parsing */ +int nd6_maxndopt = 10; /* max # of ND options allowed */ + +/* for debugging? */ +static int nd6_inuse, nd6_allocated; + +struct llinfo_nd6 llinfo_nd6 = {&llinfo_nd6, &llinfo_nd6}; +struct nd_ifinfo *nd_ifinfo = NULL; +struct nd_drhead nd_defrouter; +struct nd_prhead nd_prefix = { 0 }; + +int nd6_recalc_reachtm_interval = ND6_RECALC_REACHTM_INTERVAL; +static struct sockaddr_in6 all1_sa; + +static void nd6_slowtimo __P((void *)); + +#if MIP6 +void (*mip6_expired_defrouter_hook)(struct nd_defrouter *dr) = 0; +#endif + +void +nd6_init() +{ + static int nd6_init_done = 0; + int i; + + if (nd6_init_done) { + log(LOG_NOTICE, "nd6_init called more than once(ignored)\n"); + return; + } + + all1_sa.sin6_family = AF_INET6; + all1_sa.sin6_len = sizeof(struct sockaddr_in6); + for (i = 0; i < sizeof(all1_sa.sin6_addr); i++) + all1_sa.sin6_addr.s6_addr[i] = 0xff; + + /* initialization of the default router list */ + TAILQ_INIT(&nd_defrouter); + + nd6_init_done = 1; + + /* start timer */ + timeout(nd6_slowtimo, (caddr_t)0, ND6_SLOWTIMER_INTERVAL * hz); +} + +void +nd6_ifattach(ifp) + struct ifnet *ifp; +{ + static size_t if_indexlim = 8; + + /* + * We have some arrays that should be indexed by if_index. + * since if_index will grow dynamically, they should grow too. + */ + if (nd_ifinfo == NULL || if_index >= if_indexlim) { + size_t n; + caddr_t q; + + while (if_index >= if_indexlim) + if_indexlim <<= 1; + + /* grow nd_ifinfo */ + n = if_indexlim * sizeof(struct nd_ifinfo); + q = (caddr_t)_MALLOC(n, M_IP6NDP, M_WAITOK); + bzero(q, n); + if (nd_ifinfo) { + bcopy((caddr_t)nd_ifinfo, q, n/2); + _FREE((caddr_t)nd_ifinfo, M_IP6NDP); + } + nd_ifinfo = (struct nd_ifinfo *)q; + } + +#define ND nd_ifinfo[ifp->if_index] + ND.linkmtu = ifindex2ifnet[ifp->if_index]->if_mtu; + ND.chlim = IPV6_DEFHLIM; + ND.basereachable = REACHABLE_TIME; + ND.reachable = ND_COMPUTE_RTIME(ND.basereachable); + ND.retrans = RETRANS_TIMER; + ND.receivedra = 0; + ND.flags = ND6_IFF_PERFORMNUD; + nd6_setmtu(ifp); +#undef ND +} + +/* + * Reset ND level link MTU. This function is called when the physical MTU + * changes, which means we might have to adjust the ND level MTU. + */ +void +nd6_setmtu(ifp) + struct ifnet *ifp; +{ + struct nd_ifinfo *ndi = &nd_ifinfo[ifp->if_index]; + u_long oldmaxmtu = ndi->maxmtu; + u_long oldlinkmtu = ndi->linkmtu; + + switch(ifp->if_type) { + case IFT_ARCNET: /* XXX MTU handling needs more work */ + ndi->maxmtu = MIN(60480, ifp->if_mtu); + break; + case IFT_ETHER: + ndi->maxmtu = MIN(ETHERMTU, ifp->if_mtu); + break; +#if defined(__FreeBSD__) || defined(__bsdi__) + case IFT_FDDI: + ndi->maxmtu = MIN(FDDIIPMTU, ifp->if_mtu); + break; +#endif +#if !(defined(__bsdi__) && _BSDI_VERSION >= 199802) && !defined (__APPLE__) + case IFT_ATM: + ndi->maxmtu = MIN(ATMMTU, ifp->if_mtu); + break; +#endif + default: + ndi->maxmtu = ifp->if_mtu; + break; + } + + if (oldmaxmtu != ndi->maxmtu) { + /* + * If the ND level MTU is not set yet, or if the maxmtu + * is reset to a smaller value than the ND level MTU, + * also reset the ND level MTU. + */ + if (ndi->linkmtu == 0 || + ndi->maxmtu < ndi->linkmtu) { + ndi->linkmtu = ndi->maxmtu; + /* also adjust in6_maxmtu if necessary. */ + if (oldlinkmtu == 0) { + /* + * XXX: the case analysis is grotty, but + * it is not efficient to call in6_setmaxmtu() + * here when we are during the initialization + * procedure. + */ + if (in6_maxmtu < ndi->linkmtu) + in6_maxmtu = ndi->linkmtu; + } + else + in6_setmaxmtu(); + } + } +#undef MIN +} + +void +nd6_option_init(opt, icmp6len, ndopts) + void *opt; + int icmp6len; + union nd_opts *ndopts; +{ + bzero(ndopts, sizeof(*ndopts)); + ndopts->nd_opts_search = (struct nd_opt_hdr *)opt; + ndopts->nd_opts_last + = (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len); + + if (icmp6len == 0) { + ndopts->nd_opts_done = 1; + ndopts->nd_opts_search = NULL; + } +} + +/* + * Take one ND option. + */ +struct nd_opt_hdr * +nd6_option(ndopts) + union nd_opts *ndopts; +{ + struct nd_opt_hdr *nd_opt; + int olen; + + if (!ndopts) + panic("ndopts == NULL in nd6_option\n"); + if (!ndopts->nd_opts_last) + panic("uninitialized ndopts in nd6_option\n"); + if (!ndopts->nd_opts_search) + return NULL; + if (ndopts->nd_opts_done) + return NULL; + + nd_opt = ndopts->nd_opts_search; + + olen = nd_opt->nd_opt_len << 3; + if (olen == 0) { + /* + * Message validation requires that all included + * options have a length that is greater than zero. + */ + bzero(ndopts, sizeof(*ndopts)); + return NULL; + } + + ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen); + if (!(ndopts->nd_opts_search < ndopts->nd_opts_last)) { + ndopts->nd_opts_done = 1; + ndopts->nd_opts_search = NULL; + } + return nd_opt; +} + +/* + * Parse multiple ND options. + * This function is much easier to use, for ND routines that do not need + * multiple options of the same type. + */ +int +nd6_options(ndopts) + union nd_opts *ndopts; +{ + struct nd_opt_hdr *nd_opt; + int i = 0; + + if (!ndopts) + panic("ndopts == NULL in nd6_options\n"); + if (!ndopts->nd_opts_last) + panic("uninitialized ndopts in nd6_options\n"); + if (!ndopts->nd_opts_search) + return 0; + + while (1) { + nd_opt = nd6_option(ndopts); + if (!nd_opt && !ndopts->nd_opts_last) { + /* + * Message validation requires that all included + * options have a length that is greater than zero. + */ + bzero(ndopts, sizeof(*ndopts)); + return -1; + } + + if (!nd_opt) + goto skip1; + + switch (nd_opt->nd_opt_type) { + case ND_OPT_SOURCE_LINKADDR: + case ND_OPT_TARGET_LINKADDR: + case ND_OPT_MTU: + case ND_OPT_REDIRECTED_HEADER: + case ND_OPT_ADV_INTERVAL: + if (ndopts->nd_opt_array[nd_opt->nd_opt_type]) { + printf("duplicated ND6 option found " + "(type=%d)\n", nd_opt->nd_opt_type); + /* XXX bark? */ + } else { + ndopts->nd_opt_array[nd_opt->nd_opt_type] + = nd_opt; + } + break; + case ND_OPT_PREFIX_INFORMATION: + if (ndopts->nd_opt_array[nd_opt->nd_opt_type] == 0) { + ndopts->nd_opt_array[nd_opt->nd_opt_type] + = nd_opt; + } + ndopts->nd_opts_pi_end = + (struct nd_opt_prefix_info *)nd_opt; + break; + case ND_OPT_HA_INFORMATION: + break; + default: + /* + * Unknown options must be silently ignored, + * to accomodate future extension to the protocol. + */ + log(LOG_DEBUG, + "nd6_options: unsupported option %d - " + "option ignored\n", nd_opt->nd_opt_type); + } + +skip1: + i++; + if (i > nd6_maxndopt) { + icmp6stat.icp6s_nd_toomanyopt++; + printf("too many loop in nd opt\n"); + break; + } + + if (ndopts->nd_opts_done) + break; + } + + return 0; +} + +/* + * ND6 timer routine to expire default route list and prefix list + */ +void +nd6_timer(ignored_arg) + void *ignored_arg; +{ + int s; + register struct llinfo_nd6 *ln; + register struct nd_defrouter *dr; + register struct nd_prefix *pr; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + timeout(nd6_timer, (caddr_t)0, nd6_prune * hz); + + ln = llinfo_nd6.ln_next; + /* XXX BSD/OS separates this code -- itojun */ + while (ln && ln != &llinfo_nd6) { + struct rtentry *rt; + struct ifnet *ifp; + struct sockaddr_in6 *dst; + struct llinfo_nd6 *next = ln->ln_next; + /* XXX: used for the DELAY case only: */ + struct nd_ifinfo *ndi = NULL; + + if ((rt = ln->ln_rt) == NULL) { + ln = next; + continue; + } + if ((ifp = rt->rt_ifp) == NULL) { + ln = next; + continue; + } + ndi = &nd_ifinfo[ifp->if_index]; + dst = (struct sockaddr_in6 *)rt_key(rt); + + if (ln->ln_expire > time_second) { + ln = next; + continue; + } + + /* sanity check */ + if (!rt) + panic("rt=0 in nd6_timer(ln=%p)\n", ln); + if (rt->rt_llinfo && (struct llinfo_nd6 *)rt->rt_llinfo != ln) + panic("rt_llinfo(%p) is not equal to ln(%p)\n", + rt->rt_llinfo, ln); + if (!dst) + panic("dst=0 in nd6_timer(ln=%p)\n", ln); + + switch (ln->ln_state) { + case ND6_LLINFO_INCOMPLETE: + if (ln->ln_asked < nd6_mmaxtries) { + ln->ln_asked++; + ln->ln_expire = time_second + + nd_ifinfo[ifp->if_index].retrans / 1000; + nd6_ns_output(ifp, NULL, &dst->sin6_addr, + ln, 0); + } else { + struct mbuf *m = ln->ln_hold; + if (m) { + if (rt->rt_ifp) { + /* + * Fake rcvif to make ICMP error + * more helpful in diagnosing + * for the receiver. + * XXX: should we consider + * older rcvif? + */ + m->m_pkthdr.rcvif = rt->rt_ifp; + } + icmp6_error(m, ICMP6_DST_UNREACH, + ICMP6_DST_UNREACH_ADDR, 0); + ln->ln_hold = NULL; + } + nd6_free(rt); + } + break; + case ND6_LLINFO_REACHABLE: + if (ln->ln_expire) + ln->ln_state = ND6_LLINFO_STALE; + break; + /* + * ND6_LLINFO_STALE state requires nothing for timer + * routine. + */ + case ND6_LLINFO_DELAY: + if (ndi && (ndi->flags & ND6_IFF_PERFORMNUD) != 0) { + /* We need NUD */ + ln->ln_asked = 1; + ln->ln_state = ND6_LLINFO_PROBE; + ln->ln_expire = time_second + + ndi->retrans / 1000; + nd6_ns_output(ifp, &dst->sin6_addr, + &dst->sin6_addr, + ln, 0); + } + else + ln->ln_state = ND6_LLINFO_STALE; /* XXX */ + break; + case ND6_LLINFO_PROBE: + if (ln->ln_asked < nd6_umaxtries) { + ln->ln_asked++; + ln->ln_expire = time_second + + nd_ifinfo[ifp->if_index].retrans / 1000; + nd6_ns_output(ifp, &dst->sin6_addr, + &dst->sin6_addr, ln, 0); + } else { + nd6_free(rt); + } + break; + case ND6_LLINFO_WAITDELETE: + nd6_free(rt); + break; + } + ln = next; + } + + /* expire */ + dr = TAILQ_FIRST(&nd_defrouter); + while (dr) { + if (dr->expire && dr->expire < time_second) { + struct nd_defrouter *t; + t = TAILQ_NEXT(dr, dr_entry); + defrtrlist_del(dr); + dr = t; + } else { +#if MIP6 + if (mip6_expired_defrouter_hook) + (*mip6_expired_defrouter_hook)(dr); +#endif /* MIP6 */ + dr = TAILQ_NEXT(dr, dr_entry); + } + } + pr = nd_prefix.lh_first; + while (pr) { + struct in6_ifaddr *ia6; + struct in6_addrlifetime *lt6; + + if (IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + ia6 = NULL; + else + ia6 = in6ifa_ifpwithaddr(pr->ndpr_ifp, &pr->ndpr_addr); + + if (ia6) { + /* check address lifetime */ + lt6 = &ia6->ia6_lifetime; + if (lt6->ia6t_preferred && lt6->ia6t_preferred < time_second) + ia6->ia6_flags |= IN6_IFF_DEPRECATED; + if (lt6->ia6t_expire && lt6->ia6t_expire < time_second) { + if (!IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + in6_ifdel(pr->ndpr_ifp, &pr->ndpr_addr); + /* xxx ND_OPT_PI_FLAG_ONLINK processing */ + } + } + + /* + * check prefix lifetime. + * since pltime is just for autoconf, pltime processing for + * prefix is not necessary. + * + * we offset expire time by NDPR_KEEP_EXPIRE, so that we + * can use the old prefix information to validate the + * next prefix information to come. See prelist_update() + * for actual validation. + */ + if (pr->ndpr_expire + && pr->ndpr_expire + NDPR_KEEP_EXPIRED < time_second) { + struct nd_prefix *t; + t = pr->ndpr_next; + + /* + * address expiration and prefix expiration are + * separate. NEVER perform in6_ifdel here. + */ + + prelist_remove(pr); + pr = t; + } else + pr = pr->ndpr_next; + } + splx(s); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + +/* + * Nuke neighbor cache/prefix/default router management table, right before + * ifp goes away. + */ +void +nd6_purge(ifp) + struct ifnet *ifp; +{ + struct llinfo_nd6 *ln, *nln; + struct nd_defrouter *dr, *ndr, drany; + struct nd_prefix *pr, *npr; + + /* Nuke default router list entries toward ifp */ + if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { + /* + * The first entry of the list may be stored in + * the routing table, so we'll delete it later. + */ + for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = ndr) { + ndr = TAILQ_NEXT(dr, dr_entry); + if (dr->ifp == ifp) + defrtrlist_del(dr); + } + dr = TAILQ_FIRST(&nd_defrouter); + if (dr->ifp == ifp) + defrtrlist_del(dr); + } + + /* Nuke prefix list entries toward ifp */ + for (pr = nd_prefix.lh_first; pr; pr = npr) { + npr = pr->ndpr_next; + if (pr->ndpr_ifp == ifp) { + if (!IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + in6_ifdel(pr->ndpr_ifp, &pr->ndpr_addr); + prelist_remove(pr); + } + } + + /* cancel default outgoing interface setting */ + if (nd6_defifindex == ifp->if_index) + nd6_setdefaultiface(0); + + /* refresh default router list */ + bzero(&drany, sizeof(drany)); + defrouter_delreq(&drany, 0); + defrouter_select(); + + /* + * Nuke neighbor cache entries for the ifp. + * Note that rt->rt_ifp may not be the same as ifp, + * due to KAME goto ours hack. See RTM_RESOLVE case in + * nd6_rtrequest(), and ip6_input(). + */ + ln = llinfo_nd6.ln_next; + while (ln && ln != &llinfo_nd6) { + struct rtentry *rt; + struct sockaddr_dl *sdl; + + nln = ln->ln_next; + rt = ln->ln_rt; + if (rt && rt->rt_gateway && + rt->rt_gateway->sa_family == AF_LINK) { + sdl = (struct sockaddr_dl *)rt->rt_gateway; + if (sdl->sdl_index == ifp->if_index) + nd6_free(rt); + } + ln = nln; + } + + /* + * Neighbor cache entry for interface route will be retained + * with ND6_LLINFO_WAITDELETE state, by nd6_free(). Nuke it. + */ + ln = llinfo_nd6.ln_next; + while (ln && ln != &llinfo_nd6) { + struct rtentry *rt; + struct sockaddr_dl *sdl; + + nln = ln->ln_next; + rt = ln->ln_rt; + if (rt && rt->rt_gateway && + rt->rt_gateway->sa_family == AF_LINK) { + sdl = (struct sockaddr_dl *)rt->rt_gateway; + if (sdl->sdl_index == ifp->if_index) { + rtrequest(RTM_DELETE, rt_key(rt), + (struct sockaddr *)0, rt_mask(rt), 0, + (struct rtentry **)0); + } + } + ln = nln; + } +} + +struct rtentry * +nd6_lookup(addr6, create, ifp) + struct in6_addr *addr6; + int create; + struct ifnet *ifp; +{ + struct rtentry *rt; + struct sockaddr_in6 sin6; + + bzero(&sin6, sizeof(sin6)); + sin6.sin6_len = sizeof(struct sockaddr_in6); + sin6.sin6_family = AF_INET6; + sin6.sin6_addr = *addr6; + rt = rtalloc1((struct sockaddr *)&sin6, create +#if __FreeBSD__ || defined (__APPLE__) + , 0UL +#endif /*__FreeBSD__*/ + ); + if (rt && (rt->rt_flags & RTF_LLINFO) == 0) { + /* + * This is the case for the default route. + * If we want to create a neighbor cache for the address, we + * should free the route for the destination and allocate an + * interface route. + */ + if (create) { + RTFREE(rt); + rt = 0; + } + } + if (!rt) { + if (create && ifp) { + int e; + + /* + * If no route is available and create is set, + * we allocate a host route for the destination + * and treat it like an interface route. + * This hack is necessary for a neighbor which can't + * be covered by our own prefix. + */ + struct ifaddr *ifa = + ifaof_ifpforaddr((struct sockaddr *)&sin6, ifp); + if (ifa == NULL) + return(NULL); + + /* + * Create a new route. RTF_LLINFO is necessary + * to create a Neighbor Cache entry for the + * destination in nd6_rtrequest which will be + * called in rtequest via ifa->ifa_rtrequest. + */ + if ((e = rtrequest(RTM_ADD, (struct sockaddr *)&sin6, + ifa->ifa_addr, + (struct sockaddr *)&all1_sa, + (ifa->ifa_flags | + RTF_HOST | RTF_LLINFO) & + ~RTF_CLONING, + &rt)) != 0) + log(LOG_ERR, + "nd6_lookup: failed to add route for a " + "neighbor(%s), errno=%d\n", + ip6_sprintf(addr6), e); + if (rt == NULL) + return(NULL); + if (rt->rt_llinfo) { + struct llinfo_nd6 *ln = + (struct llinfo_nd6 *)rt->rt_llinfo; + ln->ln_state = ND6_LLINFO_NOSTATE; + } + } + else + return(NULL); + } + rt->rt_refcnt--; + /* + * Validation for the entry. + * XXX: we can't use rt->rt_ifp to check for the interface, since + * it might be the loopback interface if the entry is for our + * own address on a non-loopback interface. Instead, we should + * use rt->rt_ifa->ifa_ifp, which would specify the REAL interface. + */ + if ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 || + rt->rt_gateway->sa_family != AF_LINK || + (ifp && rt->rt_ifa->ifa_ifp != ifp)) { + if (create) { + log(LOG_DEBUG, "nd6_lookup: failed to lookup %s (if = %s)\n", + ip6_sprintf(addr6), ifp ? if_name(ifp) : "unspec"); + /* xxx more logs... kazu */ + } + return(0); + } + return(rt); +} + +/* + * Detect if a given IPv6 address identifies a neighbor on a given link. + * XXX: should take care of the destination of a p2p link? + */ +int +nd6_is_addr_neighbor(addr, ifp) + struct in6_addr *addr; + struct ifnet *ifp; +{ + register struct ifaddr *ifa; + int i; + +#define IFADDR6(a) ((((struct in6_ifaddr *)(a))->ia_addr).sin6_addr) +#define IFMASK6(a) ((((struct in6_ifaddr *)(a))->ia_prefixmask).sin6_addr) + + /* A link-local address is always a neighbor. */ + if (IN6_IS_ADDR_LINKLOCAL(addr)) + return(1); + + /* + * If the address matches one of our addresses, + * it should be a neighbor. + */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) +#else + for (ifa = ifp->if_addrlist.tqh_first; + ifa; + ifa = ifa->ifa_list.tqe_next) +#endif + { + if (ifa->ifa_addr->sa_family != AF_INET6) + next: continue; + + for (i = 0; i < 4; i++) { + if ((IFADDR6(ifa).s6_addr32[i] ^ addr->s6_addr32[i]) & + IFMASK6(ifa).s6_addr32[i]) + goto next; + } + return(1); + } + + /* + * Even if the address matches none of our addresses, it might be + * in the neighbor cache. + */ + if (nd6_lookup(addr, 0, ifp)) + return(1); + + return(0); +#undef IFADDR6 +#undef IFMASK6 +} + +/* + * Free an nd6 llinfo entry. + */ +void +nd6_free(rt) + struct rtentry *rt; +{ + struct llinfo_nd6 *ln = (struct llinfo_nd6 *)rt->rt_llinfo; + struct sockaddr_dl *sdl; + struct in6_addr in6 = ((struct sockaddr_in6 *)rt_key(rt))->sin6_addr; + struct nd_defrouter *dr; + + /* + * Clear all destination cache entries for the neighbor. + * XXX: is it better to restrict this to hosts? + */ + pfctlinput(PRC_HOSTDEAD, rt_key(rt)); + + if (!ip6_forwarding && ip6_accept_rtadv) { /* XXX: too restrictive? */ + int s; +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + dr = defrouter_lookup(&((struct sockaddr_in6 *)rt_key(rt))->sin6_addr, + rt->rt_ifp); + if (ln->ln_router || dr) { + /* + * rt6_flush must be called whether or not the neighbor + * is in the Default Router List. + * See a corresponding comment in nd6_na_input(). + */ + rt6_flush(&in6, rt->rt_ifp); + } + + if (dr) { + /* + * Unreachablity of a router might affect the default + * router selection and on-link detection of advertised + * prefixes. + */ + + /* + * Temporarily fake the state to choose a new default + * router and to perform on-link determination of + * prefixes coreectly. + * Below the state will be set correctly, + * or the entry itself will be deleted. + */ + ln->ln_state = ND6_LLINFO_INCOMPLETE; + + if (dr == TAILQ_FIRST(&nd_defrouter)) { + /* + * It is used as the current default router, + * so we have to move it to the end of the + * list and choose a new one. + * XXX: it is not very efficient if this is + * the only router. + */ + TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_INSERT_TAIL(&nd_defrouter, dr, dr_entry); + + defrouter_select(); + } + pfxlist_onlink_check(); + } + splx(s); + } + + if (rt->rt_refcnt > 0 && (sdl = SDL(rt->rt_gateway)) && + sdl->sdl_family == AF_LINK) { + sdl->sdl_alen = 0; + ln->ln_state = ND6_LLINFO_WAITDELETE; + ln->ln_asked = 0; + rt->rt_flags &= ~RTF_REJECT; + return; + } + + rtrequest(RTM_DELETE, rt_key(rt), (struct sockaddr *)0, + rt_mask(rt), 0, (struct rtentry **)0); +} + +/* + * Upper-layer reachability hint for Neighbor Unreachability Detection. + * + * XXX cost-effective metods? + */ +void +nd6_nud_hint(rt, dst6) + struct rtentry *rt; + struct in6_addr *dst6; +{ + struct llinfo_nd6 *ln; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* + * If the caller specified "rt", use that. Otherwise, resolve the + * routing table by supplied "dst6". + */ + if (!rt) { + if (!dst6) + return; + if (!(rt = nd6_lookup(dst6, 0, NULL))) + return; + } + + if ((rt->rt_flags & RTF_GATEWAY) + || (rt->rt_flags & RTF_LLINFO) == 0 + || !rt->rt_llinfo + || !rt->rt_gateway + || rt->rt_gateway->sa_family != AF_LINK) { + /* This is not a host route. */ + return; + } + + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + if (ln->ln_state < ND6_LLINFO_REACHABLE) + return; + + ln->ln_state = ND6_LLINFO_REACHABLE; + if (ln->ln_expire && nd_ifinfo) + ln->ln_expire = time_second + + (rt->rt_ifp ? nd_ifinfo[rt->rt_ifp->if_index].reachable : 0) ; +} + +#if OLDIP6OUTPUT +/* + * Resolve an IP6 address into an ethernet address. If success, + * desten is filled in. If there is no entry in ndptab, + * set one up and multicast a solicitation for the IP6 address. + * Hold onto this mbuf and resend it once the address + * is finally resolved. A return value of 1 indicates + * that desten has been filled in and the packet should be sent + * normally; a 0 return indicates that the packet has been + * taken over here, either now or for later transmission. + */ +int +nd6_resolve(ifp, rt, m, dst, desten) + struct ifnet *ifp; + struct rtentry *rt; + struct mbuf *m; + struct sockaddr *dst; + u_char *desten; +{ + struct llinfo_nd6 *ln = (struct llinfo_nd6 *)NULL; + struct sockaddr_dl *sdl; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + if (m->m_flags & M_MCAST) { + switch (ifp->if_type) { + case IFT_ETHER: + case IFT_FDDI: + ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, + desten); + return(1); + break; + case IFT_ARCNET: + *desten = 0; + return(1); + break; + default: + return(0); + } + } + if (rt && (rt->rt_flags & RTF_LLINFO) != 0) + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + else { + if ((rt = nd6_lookup(&(SIN6(dst)->sin6_addr), 1, ifp)) != NULL) + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + } + if (!ln || !rt) { + log(LOG_DEBUG, "nd6_resolve: can't allocate llinfo for %s\n", + ip6_sprintf(&(SIN6(dst)->sin6_addr))); + m_freem(m); + return(0); + } + sdl = SDL(rt->rt_gateway); + /* + * Ckeck the address family and length is valid, the address + * is resolved; otherwise, try to resolve. + */ + if (ln->ln_state >= ND6_LLINFO_REACHABLE + && sdl->sdl_family == AF_LINK + && sdl->sdl_alen != 0) { + bcopy(LLADDR(sdl), desten, sdl->sdl_alen); + if (ln->ln_state == ND6_LLINFO_STALE) { + ln->ln_asked = 0; + ln->ln_state = ND6_LLINFO_DELAY; + ln->ln_expire = time_second + nd6_delay; + } + return(1); + } + /* + * There is an ndp entry, but no ethernet address + * response yet. Replace the held mbuf with this + * latest one. + * + * XXX Does the code conform to rate-limiting rule? + * (RFC 2461 7.2.2) + */ + if (ln->ln_state == ND6_LLINFO_WAITDELETE || + ln->ln_state == ND6_LLINFO_NOSTATE) + ln->ln_state = ND6_LLINFO_INCOMPLETE; + if (ln->ln_hold) + m_freem(ln->ln_hold); + ln->ln_hold = m; + if (ln->ln_expire) { + rt->rt_flags &= ~RTF_REJECT; + if (ln->ln_asked < nd6_mmaxtries && + ln->ln_expire < time_second) { + ln->ln_asked++; + ln->ln_expire = time_second + + nd_ifinfo[ifp->if_index].retrans / 1000; + nd6_ns_output(ifp, NULL, &(SIN6(dst)->sin6_addr), + ln, 0); + } + } + return(0); +} +#endif /* OLDIP6OUTPUT */ + +void +#if defined(__bsdi__) && _BSDI_VERSION >= 199802 +nd6_rtrequest(req, rt, info) + int req; + struct rtentry *rt; + struct rt_addrinfo *info; /* xxx unused */ +#else +nd6_rtrequest(req, rt, sa) + int req; + struct rtentry *rt; + struct sockaddr *sa; /* xxx unused */ +#endif +{ + struct sockaddr *gate = rt->rt_gateway; + struct llinfo_nd6 *ln = (struct llinfo_nd6 *)rt->rt_llinfo; + static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK}; + struct ifnet *ifp = rt->rt_ifp; + struct ifaddr *ifa; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + if (rt->rt_flags & RTF_GATEWAY) + return; + + switch (req) { + case RTM_ADD: + /* + * There is no backward compatibility :) + * + * if ((rt->rt_flags & RTF_HOST) == 0 && + * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff) + * rt->rt_flags |= RTF_CLONING; + */ + if (rt->rt_flags & (RTF_CLONING | RTF_LLINFO)) { + /* + * Case 1: This route should come from + * a route to interface. RTF_LLINFO flag is set + * for a host route whose destination should be + * treated as on-link. + */ + rt_setgate(rt, rt_key(rt), + (struct sockaddr *)&null_sdl); + gate = rt->rt_gateway; + SDL(gate)->sdl_type = ifp->if_type; + SDL(gate)->sdl_index = ifp->if_index; + if (ln) + ln->ln_expire = time_second; +#if 1 + if (ln && ln->ln_expire == 0) { + /* cludge for desktops */ +#if 0 + printf("nd6_request: time.tv_sec is zero; " + "treat it as 1\n"); +#endif + ln->ln_expire = 1; + } +#endif + if (rt->rt_flags & RTF_CLONING) + break; + } + /* + * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here. + * We don't do that here since llinfo is not ready yet. + * + * There are also couple of other things to be discussed: + * - unsolicited NA code needs improvement beforehand + * - RFC2461 says we MAY send multicast unsolicited NA + * (7.2.6 paragraph 4), however, it also says that we + * SHOULD provide a mechanism to prevent multicast NA storm. + * we don't have anything like it right now. + * note that the mechanism need a mutual agreement + * between proxies, which means that we need to implement + * a new protocol, or new kludge. + * - from RFC2461 6.2.4, host MUST NOT send unsolicited NA. + * we need to check ip6forwarding before sending it. + * (or should we allow proxy ND configuration only for + * routers? there's no mention about proxy ND from hosts) + */ +#if 0 + /* XXX it does not work */ + if (rt->rt_flags & RTF_ANNOUNCE) + nd6_na_output(ifp, + &SIN6(rt_key(rt))->sin6_addr, + &SIN6(rt_key(rt))->sin6_addr, + ip6_forwarding ? ND_NA_FLAG_ROUTER : 0, + 1, NULL); +#endif + /* FALLTHROUGH */ + case RTM_RESOLVE: + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) { + /* + * Address resolution isn't necessary for a point to + * point link, so we can skip this test for a p2p link. + */ + if (gate->sa_family != AF_LINK || + gate->sa_len < sizeof(null_sdl)) { + log(LOG_DEBUG, + "nd6_rtrequest: bad gateway value\n"); + break; + } + SDL(gate)->sdl_type = ifp->if_type; + SDL(gate)->sdl_index = ifp->if_index; + } + if (ln != NULL) + break; /* This happens on a route change */ + /* + * Case 2: This route may come from cloning, or a manual route + * add with a LL address. + */ + R_Malloc(ln, struct llinfo_nd6 *, sizeof(*ln)); + rt->rt_llinfo = (caddr_t)ln; + if (!ln) { + log(LOG_DEBUG, "nd6_rtrequest: malloc failed\n"); + break; + } + nd6_inuse++; + nd6_allocated++; + Bzero(ln, sizeof(*ln)); + ln->ln_rt = rt; + /* this is required for "ndp" command. - shin */ + if (req == RTM_ADD) { + /* + * gate should have some valid AF_LINK entry, + * and ln->ln_expire should have some lifetime + * which is specified by ndp command. + */ + ln->ln_state = ND6_LLINFO_REACHABLE; + } else { + /* + * When req == RTM_RESOLVE, rt is created and + * initialized in rtrequest(), so rt_expire is 0. + */ + ln->ln_state = ND6_LLINFO_NOSTATE; + ln->ln_expire = time_second; + } + rt->rt_flags |= RTF_LLINFO; + ln->ln_next = llinfo_nd6.ln_next; + llinfo_nd6.ln_next = ln; + ln->ln_prev = &llinfo_nd6; + ln->ln_next->ln_prev = ln; + + /* + * check if rt_key(rt) is one of my address assigned + * to the interface. + */ + ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp, + &SIN6(rt_key(rt))->sin6_addr); + if (ifa) { + caddr_t macp = nd6_ifptomac(ifp); + ln->ln_expire = 0; + ln->ln_state = ND6_LLINFO_REACHABLE; + if (macp) { + Bcopy(macp, LLADDR(SDL(gate)), ifp->if_addrlen); + SDL(gate)->sdl_alen = ifp->if_addrlen; + } + if (nd6_useloopback) { +#ifdef __bsdi__ +#if _BSDI_VERSION >= 199802 + extern struct ifnet *loifp; + rt->rt_ifp = loifp; /*XXX*/ +#else + extern struct ifnet loif; + rt->rt_ifp = &loif; /*XXX*/ +#endif +#else /* non-bsdi */ + rt->rt_ifp = &loif[0]; /*XXX*/ +#endif + /* + * Make sure rt_ifa be equal to the ifaddr + * corresponding to the address. + * We need this because when we refer + * rt_ifa->ia6_flags in ip6_input, we assume + * that the rt_ifa points to the address instead + * of the loopback address. + */ + if (ifa != rt->rt_ifa) { + rt->rt_ifa->ifa_refcnt--; + ifa->ifa_refcnt++; + rt->rt_ifa = ifa; + } + } + } else if (rt->rt_flags & RTF_ANNOUNCE) { + ln->ln_expire = 0; + ln->ln_state = ND6_LLINFO_REACHABLE; + + /* join solicited node multicast for proxy ND */ + if (ifp->if_flags & IFF_MULTICAST) { + struct in6_addr llsol; + int error; + + llsol = SIN6(rt_key(rt))->sin6_addr; + llsol.s6_addr16[0] = htons(0xff02); + llsol.s6_addr16[1] = htons(ifp->if_index); + llsol.s6_addr32[1] = 0; + llsol.s6_addr32[2] = htonl(1); + llsol.s6_addr8[12] = 0xff; + + (void)in6_addmulti(&llsol, ifp, &error); + if (error) + printf( +"nd6_rtrequest: could not join solicited node multicast (errno=%d)\n", error); + } + } + break; + + case RTM_DELETE: + if (!ln) + break; + /* leave from solicited node multicast for proxy ND */ + if ((rt->rt_flags & RTF_ANNOUNCE) != 0 && + (ifp->if_flags & IFF_MULTICAST) != 0) { + struct in6_addr llsol; + struct in6_multi *in6m; + + llsol = SIN6(rt_key(rt))->sin6_addr; + llsol.s6_addr16[0] = htons(0xff02); + llsol.s6_addr16[1] = htons(ifp->if_index); + llsol.s6_addr32[1] = 0; + llsol.s6_addr32[2] = htonl(1); + llsol.s6_addr8[12] = 0xff; + + IN6_LOOKUP_MULTI(llsol, ifp, in6m); + if (in6m) + in6_delmulti(in6m); + } + nd6_inuse--; + ln->ln_next->ln_prev = ln->ln_prev; + ln->ln_prev->ln_next = ln->ln_next; + ln->ln_prev = NULL; + rt->rt_llinfo = 0; + rt->rt_flags &= ~RTF_LLINFO; + if (ln->ln_hold) + m_freem(ln->ln_hold); + Free((caddr_t)ln); + } +} + +void +#if defined(__bsdi__) && _BSDI_VERSION >= 199802 +nd6_p2p_rtrequest(req, rt, info) + int req; + struct rtentry *rt; + struct rt_addrinfo *info; /* xxx unused */ +#else +nd6_p2p_rtrequest(req, rt, sa) + int req; + struct rtentry *rt; + struct sockaddr *sa; /* xxx unused */ +#endif +{ + struct sockaddr *gate = rt->rt_gateway; + static struct sockaddr_dl null_sdl = {sizeof(null_sdl), AF_LINK}; + struct ifnet *ifp = rt->rt_ifp; + struct ifaddr *ifa; + + if (rt->rt_flags & RTF_GATEWAY) + return; + + switch (req) { + case RTM_ADD: + /* + * There is no backward compatibility :) + * + * if ((rt->rt_flags & RTF_HOST) == 0 && + * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff) + * rt->rt_flags |= RTF_CLONING; + */ + if (rt->rt_flags & RTF_CLONING) { + /* + * Case 1: This route should come from + * a route to interface. + */ + rt_setgate(rt, rt_key(rt), + (struct sockaddr *)&null_sdl); + gate = rt->rt_gateway; + SDL(gate)->sdl_type = ifp->if_type; + SDL(gate)->sdl_index = ifp->if_index; + break; + } + /* Announce a new entry if requested. */ + if (rt->rt_flags & RTF_ANNOUNCE) + nd6_na_output(ifp, + &SIN6(rt_key(rt))->sin6_addr, + &SIN6(rt_key(rt))->sin6_addr, + ip6_forwarding ? ND_NA_FLAG_ROUTER : 0, + 1, NULL); + /* FALLTHROUGH */ + case RTM_RESOLVE: + /* + * check if rt_key(rt) is one of my address assigned + * to the interface. + */ + ifa = (struct ifaddr *)in6ifa_ifpwithaddr(rt->rt_ifp, + &SIN6(rt_key(rt))->sin6_addr); + if (ifa) { + if (nd6_useloopback) { +#ifdef __bsdi__ +#if _BSDI_VERSION >= 199802 + extern struct ifnet *loifp; + rt->rt_ifp = loifp; /*XXX*/ +#else + extern struct ifnet loif; + rt->rt_ifp = &loif; /*XXX*/ +#endif +#else + rt->rt_ifp = &loif[0]; /*XXX*/ +#endif /*__bsdi__*/ + } + } + break; + } +} + +int +nd6_ioctl(cmd, data, ifp) + u_long cmd; + caddr_t data; + struct ifnet *ifp; +{ + struct in6_drlist *drl = (struct in6_drlist *)data; + struct in6_prlist *prl = (struct in6_prlist *)data; + struct in6_ndireq *ndi = (struct in6_ndireq *)data; + struct in6_nbrinfo *nbi = (struct in6_nbrinfo *)data; + struct in6_ndifreq *ndif = (struct in6_ndifreq *)data; + struct nd_defrouter *dr, any; + struct nd_prefix *pr; + struct rtentry *rt; + int i = 0, error = 0; + int s; + + switch (cmd) { + case SIOCGDRLST_IN6: + bzero(drl, sizeof(*drl)); +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + dr = TAILQ_FIRST(&nd_defrouter); + while (dr && i < DRLSTSIZ) { + drl->defrouter[i].rtaddr = dr->rtaddr; + if (IN6_IS_ADDR_LINKLOCAL(&drl->defrouter[i].rtaddr)) { + /* XXX: need to this hack for KAME stack */ + drl->defrouter[i].rtaddr.s6_addr16[1] = 0; + } + else + log(LOG_ERR, + "default router list contains a " + "non-linklocal address(%s)\n", + ip6_sprintf(&drl->defrouter[i].rtaddr)); + + drl->defrouter[i].flags = dr->flags; + drl->defrouter[i].rtlifetime = dr->rtlifetime; + drl->defrouter[i].expire = dr->expire; + drl->defrouter[i].if_index = dr->ifp->if_index; + i++; + dr = TAILQ_NEXT(dr, dr_entry); + } + splx(s); + break; + case SIOCGPRLST_IN6: + /* + * XXX meaning of fields, especialy "raflags", is very + * differnet between RA prefix list and RR/static prefix list. + * how about separating ioctls into two? + */ + bzero(prl, sizeof(*prl)); +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + pr = nd_prefix.lh_first; + while (pr && i < PRLSTSIZ) { + struct nd_pfxrouter *pfr; + int j; + + prl->prefix[i].prefix = pr->ndpr_prefix.sin6_addr; + prl->prefix[i].raflags = pr->ndpr_raf; + prl->prefix[i].prefixlen = pr->ndpr_plen; + prl->prefix[i].vltime = pr->ndpr_vltime; + prl->prefix[i].pltime = pr->ndpr_pltime; + prl->prefix[i].if_index = pr->ndpr_ifp->if_index; + prl->prefix[i].expire = pr->ndpr_expire; + + pfr = pr->ndpr_advrtrs.lh_first; + j = 0; + while(pfr) { + if (j < DRLSTSIZ) { +#define RTRADDR prl->prefix[i].advrtr[j] + RTRADDR = pfr->router->rtaddr; + if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) { + /* XXX: hack for KAME */ + RTRADDR.s6_addr16[1] = 0; + } + else + log(LOG_ERR, + "a router(%s) advertises " + "a prefix with " + "non-link local address\n", + ip6_sprintf(&RTRADDR)); +#undef RTRADDR + } + j++; + pfr = pfr->pfr_next; + } + prl->prefix[i].advrtrs = j; + prl->prefix[i].origin = PR_ORIG_RA; + + i++; + pr = pr->ndpr_next; + } + { + struct rr_prefix *rpp; + + for (rpp = LIST_FIRST(&rr_prefix); rpp; + rpp = LIST_NEXT(rpp, rp_entry)) { + if (i >= PRLSTSIZ) + break; + prl->prefix[i].prefix = rpp->rp_prefix.sin6_addr; + prl->prefix[i].raflags = rpp->rp_raf; + prl->prefix[i].prefixlen = rpp->rp_plen; + prl->prefix[i].vltime = rpp->rp_vltime; + prl->prefix[i].pltime = rpp->rp_pltime; + prl->prefix[i].if_index = rpp->rp_ifp->if_index; + prl->prefix[i].expire = rpp->rp_expire; + prl->prefix[i].advrtrs = 0; + prl->prefix[i].origin = rpp->rp_origin; + i++; + } + } + splx(s); + + break; + case SIOCGIFINFO_IN6: + ndi->ndi = nd_ifinfo[ifp->if_index]; + break; + case SIOCSIFINFO_FLAGS: + /* XXX: almost all other fields of ndi->ndi is unused */ + nd_ifinfo[ifp->if_index].flags = ndi->ndi.flags; + break; + case SIOCSNDFLUSH_IN6: /* XXX: the ioctl name is confusing... */ + /* flush default router list */ + /* + * xxx sumikawa: should not delete route if default + * route equals to the top of default router list + */ + bzero(&any, sizeof(any)); + defrouter_delreq(&any, 0); + defrouter_select(); + /* xxx sumikawa: flush prefix list */ + break; + case SIOCSPFXFLUSH_IN6: + { + /* flush all the prefix advertised by routers */ + struct nd_prefix *pr, *next; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + for (pr = nd_prefix.lh_first; pr; pr = next) { + next = pr->ndpr_next; + if (!IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + in6_ifdel(pr->ndpr_ifp, &pr->ndpr_addr); + prelist_remove(pr); + } + splx(s); + break; + } + case SIOCSRTRFLUSH_IN6: + { + /* flush all the default routers */ + struct nd_defrouter *dr, *next; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + if ((dr = TAILQ_FIRST(&nd_defrouter)) != NULL) { + /* + * The first entry of the list may be stored in + * the routing table, so we'll delete it later. + */ + for (dr = TAILQ_NEXT(dr, dr_entry); dr; dr = next) { + next = TAILQ_NEXT(dr, dr_entry); + defrtrlist_del(dr); + } + defrtrlist_del(TAILQ_FIRST(&nd_defrouter)); + } + splx(s); + break; + } + case SIOCGNBRINFO_IN6: + { + struct llinfo_nd6 *ln; + struct in6_addr nb_addr = nbi->addr; /* make local for safety */ + + /* + * XXX: KAME specific hack for scoped addresses + * XXXX: for other scopes than link-local? + */ + if (IN6_IS_ADDR_LINKLOCAL(&nbi->addr) || + IN6_IS_ADDR_MC_LINKLOCAL(&nbi->addr)) { + u_int16_t *idp = (u_int16_t *)&nb_addr.s6_addr[2]; + + if (*idp == 0) + *idp = htons(ifp->if_index); + } + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + if ((rt = nd6_lookup(&nb_addr, 0, ifp)) == NULL) { + error = EINVAL; + splx(s); + break; + } + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + nbi->state = ln->ln_state; + nbi->asked = ln->ln_asked; + nbi->isrouter = ln->ln_router; + nbi->expire = ln->ln_expire; + splx(s); + + break; + } + case SIOCGDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */ + ndif->ifindex = nd6_defifindex; + break; + case SIOCSDEFIFACE_IN6: /* XXX: should be implemented as a sysctl? */ + return(nd6_setdefaultiface(ndif->ifindex)); + break; + } + return(error); +} + +/* + * Create neighbor cache entry and cache link-layer address, + * on reception of inbound ND6 packets. (RS/RA/NS/redirect) + */ +struct rtentry * +nd6_cache_lladdr(ifp, from, lladdr, lladdrlen, type, code) + struct ifnet *ifp; + struct in6_addr *from; + char *lladdr; + int lladdrlen; + int type; /* ICMP6 type */ + int code; /* type dependent information */ +{ + struct rtentry *rt = NULL; + struct llinfo_nd6 *ln = NULL; + int is_newentry; + struct sockaddr_dl *sdl = NULL; + int do_update; + int olladdr; + int llchange; + int newstate = 0; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + if (!ifp) + panic("ifp == NULL in nd6_cache_lladdr"); + if (!from) + panic("from == NULL in nd6_cache_lladdr"); + + /* nothing must be updated for unspecified address */ + if (IN6_IS_ADDR_UNSPECIFIED(from)) + return NULL; + + /* + * Validation about ifp->if_addrlen and lladdrlen must be done in + * the caller. + * + * XXX If the link does not have link-layer adderss, what should + * we do? (ifp->if_addrlen == 0) + * Spec says nothing in sections for RA, RS and NA. There's small + * description on it in NS section (RFC 2461 7.2.3). + */ + + rt = nd6_lookup(from, 0, ifp); + if (!rt) { +#if 0 + /* nothing must be done if there's no lladdr */ + if (!lladdr || !lladdrlen) + return NULL; +#endif + + rt = nd6_lookup(from, 1, ifp); + is_newentry = 1; + } else + is_newentry = 0; + + if (!rt) + return NULL; + if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) { +fail: + nd6_free(rt); + return NULL; + } + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + if (!ln) + goto fail; + if (!rt->rt_gateway) + goto fail; + if (rt->rt_gateway->sa_family != AF_LINK) + goto fail; + sdl = SDL(rt->rt_gateway); + + olladdr = (sdl->sdl_alen) ? 1 : 0; + if (olladdr && lladdr) { + if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) + llchange = 1; + else + llchange = 0; + } else + llchange = 0; + + /* + * newentry olladdr lladdr llchange (*=record) + * 0 n n -- (1) + * 0 y n -- (2) + * 0 n y -- (3) * STALE + * 0 y y n (4) * + * 0 y y y (5) * STALE + * 1 -- n -- (6) NOSTATE(= PASSIVE) + * 1 -- y -- (7) * STALE + */ + + if (lladdr) { /*(3-5) and (7)*/ + /* + * Record source link-layer address + * XXX is it dependent to ifp->if_type? + */ + sdl->sdl_alen = ifp->if_addrlen; + bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen); + } + + if (!is_newentry) { + if ((!olladdr && lladdr) /*(3)*/ + || (olladdr && lladdr && llchange)) { /*(5)*/ + do_update = 1; + newstate = ND6_LLINFO_STALE; + } else /*(1-2,4)*/ + do_update = 0; + } else { + do_update = 1; + if (!lladdr) /*(6)*/ + newstate = ND6_LLINFO_NOSTATE; + else /*(7)*/ + newstate = ND6_LLINFO_STALE; + } + + if (do_update) { + /* + * Update the state of the neighbor cache. + */ + ln->ln_state = newstate; + + if (ln->ln_state == ND6_LLINFO_STALE) { + rt->rt_flags &= ~RTF_REJECT; + if (ln->ln_hold) { +#if OLDIP6OUTPUT + (*ifp->if_output)(ifp, ln->ln_hold, + rt_key(rt), rt); +#else + nd6_output(ifp, ln->ln_hold, + (struct sockaddr_in6 *)rt_key(rt), + rt); +#endif + ln->ln_hold = 0; + } + } else if (ln->ln_state == ND6_LLINFO_INCOMPLETE) { + /* probe right away */ + ln->ln_expire = time_second; + } + } + + /* + * ICMP6 type dependent behavior. + * + * NS: clear IsRouter if new entry + * RS: clear IsRouter + * RA: set IsRouter if there's lladdr + * redir: clear IsRouter if new entry + * + * RA case, (1): + * The spec says that we must set IsRouter in the following cases: + * - If lladdr exist, set IsRouter. This means (1-5). + * - If it is old entry (!newentry), set IsRouter. This means (7). + * So, based on the spec, in (1-5) and (7) cases we must set IsRouter. + * A quetion arises for (1) case. (1) case has no lladdr in the + * neighbor cache, this is similar to (6). + * This case is rare but we figured that we MUST NOT set IsRouter. + * + * newentry olladdr lladdr llchange NS RS RA redir + * D R + * 0 n n -- (1) c ? s + * 0 y n -- (2) c s s + * 0 n y -- (3) c s s + * 0 y y n (4) c s s + * 0 y y y (5) c s s + * 1 -- n -- (6) c c c s + * 1 -- y -- (7) c c s c s + * + * (c=clear s=set) + */ + switch (type & 0xff) { + case ND_NEIGHBOR_SOLICIT: + /* + * New entry must have is_router flag cleared. + */ + if (is_newentry) /*(6-7)*/ + ln->ln_router = 0; + break; + case ND_REDIRECT: + /* + * If the icmp is a redirect to a better router, always set the + * is_router flag. Otherwise, if the entry is newly created, + * clear the flag. [RFC 2461, sec 8.3] + * + */ + if (code == ND_REDIRECT_ROUTER) + ln->ln_router = 1; + else if (is_newentry) /*(6-7)*/ + ln->ln_router = 0; + break; + case ND_ROUTER_SOLICIT: + /* + * is_router flag must always be cleared. + */ + ln->ln_router = 0; + break; + case ND_ROUTER_ADVERT: + /* + * Mark an entry with lladdr as a router. + */ + if ((!is_newentry && (olladdr || lladdr)) /*(2-5)*/ + || (is_newentry && lladdr)) { /*(7)*/ + ln->ln_router = 1; + } + break; + } + + return rt; +} + +static void +nd6_slowtimo(ignored_arg) + void *ignored_arg; +{ + int s; + register int i; + register struct nd_ifinfo *nd6if; +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + + timeout(nd6_slowtimo, (caddr_t)0, ND6_SLOWTIMER_INTERVAL * hz); + for (i = 1; i < if_index + 1; i++) { + nd6if = &nd_ifinfo[i]; + if (nd6if->basereachable && /* already initialized */ + (nd6if->recalctm -= ND6_SLOWTIMER_INTERVAL) <= 0) { + /* + * Since reachable time rarely changes by router + * advertisements, we SHOULD insure that a new random + * value gets recomputed at least once every few hours. + * (RFC 2461, 6.3.4) + */ + nd6if->recalctm = nd6_recalc_reachtm_interval; + nd6if->reachable = ND_COMPUTE_RTIME(nd6if->basereachable); + } + } + splx(s); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif + +} + +#define senderr(e) { error = (e); goto bad;} +int +nd6_output(ifp, m0, dst, rt0) + register struct ifnet *ifp; + struct mbuf *m0; + struct sockaddr_in6 *dst; + struct rtentry *rt0; +{ + register struct mbuf *m = m0; + register struct rtentry *rt = rt0; + struct llinfo_nd6 *ln = NULL; + int error = 0; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr)) + goto sendpkt; + + /* + * XXX: we currently do not make neighbor cache on any interface + * other than ARCnet, Ethernet, FDDI and GIF. + * + * draft-ietf-ngtrans-mech-04.txt says: + * - unidirectional tunnels needs no ND + */ + switch (ifp->if_type) { + case IFT_ARCNET: + case IFT_ETHER: + case IFT_FDDI: + case IFT_GIF: /* XXX need more cases? */ + break; + default: + goto sendpkt; + } + + if ((ifp->if_flags & IFF_POINTOPOINT) != 0 && + (nd_ifinfo[ifp->if_index].flags & ND6_IFF_PERFORMNUD) == 0) + goto sendpkt; + + /* + * next hop determination. This routine is derived from ether_outpout. + */ + if (rt) { + if ((rt->rt_flags & RTF_UP) == 0) { +#if __FreeBSD__ || defined (__APPLE__) + if ((rt0 = rt = rtalloc1((struct sockaddr *)dst, 1, 0UL)) != + NULL) +#else + if ((rt0 = rt = rtalloc1((struct sockaddr *)dst, 1)) != + NULL) +#endif + { + rt->rt_refcnt--; + if (rt->rt_ifp != ifp) + return nd6_output(ifp, m0, dst, rt); /* XXX: loop care? */ + } else + senderr(EHOSTUNREACH); + } + if (rt->rt_flags & RTF_GATEWAY) { + if (rt->rt_gwroute == 0) + goto lookup; + if (((rt = rt->rt_gwroute)->rt_flags & RTF_UP) == 0) { + rtfree(rt); rt = rt0; +#if __FreeBSD__ || defined (__APPLE__) + lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1, 0UL); +#else + lookup: rt->rt_gwroute = rtalloc1(rt->rt_gateway, 1); +#endif + if ((rt = rt->rt_gwroute) == 0) + senderr(EHOSTUNREACH); +#ifdef __bsdi__ + /* the "G" test below also prevents rt == rt0 */ + if ((rt->rt_flags & RTF_GATEWAY) || + (rt->rt_ifp != ifp)) { + rt->rt_refcnt--; + rt0->rt_gwroute = 0; + senderr(EHOSTUNREACH); + } +#endif + } + } + if (rt->rt_flags & RTF_REJECT) + senderr(rt == rt0 ? EHOSTDOWN : EHOSTUNREACH); + } + + /* + * Address resolution or Neighbor Unreachability Detection + * for the next hop. + * At this point, the destination of the packet must be a unicast + * or an anycast address(i.e. not a multicast). + */ + + /* Look up the neighbor cache for the nexthop */ + if (rt && (rt->rt_flags & RTF_LLINFO) != 0) + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + else { + if ((rt = nd6_lookup(&dst->sin6_addr, 1, ifp)) != NULL) + ln = (struct llinfo_nd6 *)rt->rt_llinfo; + } + if (!ln || !rt) { + log(LOG_DEBUG, "nd6_output: can't allocate llinfo for %s " + "(ln=%p, rt=%p)\n", + ip6_sprintf(&dst->sin6_addr), ln, rt); + senderr(EIO); /* XXX: good error? */ + } + + /* We don't have to do link-layer address resolution on a p2p link. */ + if ((ifp->if_flags & IFF_POINTOPOINT) != 0 && + ln->ln_state < ND6_LLINFO_REACHABLE) + ln->ln_state = ND6_LLINFO_STALE; + + /* + * The first time we send a packet to a neighbor whose entry is + * STALE, we have to change the state to DELAY and a sets a timer to + * expire in DELAY_FIRST_PROBE_TIME seconds to ensure do + * neighbor unreachability detection on expiration. + * (RFC 2461 7.3.3) + */ + if (ln->ln_state == ND6_LLINFO_STALE) { + ln->ln_asked = 0; + ln->ln_state = ND6_LLINFO_DELAY; + ln->ln_expire = time_second + nd6_delay; + } + + /* + * If the neighbor cache entry has a state other than INCOMPLETE + * (i.e. its link-layer address is already reloved), just + * send the packet. + */ + if (ln->ln_state > ND6_LLINFO_INCOMPLETE) + goto sendpkt; + + /* + * There is a neighbor cache entry, but no ethernet address + * response yet. Replace the held mbuf (if any) with this + * latest one. + * + * XXX Does the code conform to rate-limiting rule? + * (RFC 2461 7.2.2) + */ + if (ln->ln_state == ND6_LLINFO_WAITDELETE || + ln->ln_state == ND6_LLINFO_NOSTATE) + ln->ln_state = ND6_LLINFO_INCOMPLETE; + if (ln->ln_hold) + m_freem(ln->ln_hold); + ln->ln_hold = m; + if (ln->ln_expire) { + rt->rt_flags &= ~RTF_REJECT; + if (ln->ln_asked < nd6_mmaxtries && + ln->ln_expire < time_second) { + ln->ln_asked++; + ln->ln_expire = time_second + + nd_ifinfo[ifp->if_index].retrans / 1000; + nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, 0); + } + } + return(0); + + sendpkt: +#ifdef __APPLE__ + return (dlil_output(ifptodlt(ifp, PF_INET6), m, rt, (struct sockaddr *)dst, 0)); +#else + return((*ifp->if_output)(ifp, m, (struct sockaddr *)dst, rt)); +#endif + + bad: + if (m) + m_freem(m); + return (error); +} +#undef senderr + +int +nd6_storelladdr(ifp, rt, m, dst, desten) + struct ifnet *ifp; + struct rtentry *rt; + struct mbuf *m; + struct sockaddr *dst; + u_char *desten; +{ + struct sockaddr_dl *sdl; + + if (m->m_flags & M_MCAST) { + switch (ifp->if_type) { + case IFT_ETHER: + case IFT_FDDI: + ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, + desten); + return(1); + break; + case IFT_ARCNET: + *desten = 0; + return(1); + default: + return(0); + } + } + + if (rt == NULL || + rt->rt_gateway->sa_family != AF_LINK) { + printf("nd6_storelladdr: something odd happens\n"); + return(0); + } + sdl = SDL(rt->rt_gateway); + if (sdl->sdl_alen == 0) { + /* this should be impossible, but we bark here for debugging */ + printf("nd6_storelladdr: sdl_alen == 0\n"); + return(0); + } + + bcopy(LLADDR(sdl), desten, sdl->sdl_alen); + return(1); +} diff --git a/bsd/netinet6/nd6.h b/bsd/netinet6/nd6.h new file mode 100644 index 000000000..aa83aa8aa --- /dev/null +++ b/bsd/netinet6/nd6.h @@ -0,0 +1,349 @@ +/* $KAME: nd6.h,v 1.18 2000/03/16 11:58:32 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET6_ND6_H_ +#define _NETINET6_ND6_H_ + +/* see net/route.h, or net/if_inarp.h */ +#ifndef RTF_ANNOUNCE +#define RTF_ANNOUNCE RTF_PROTO2 +#endif + +#include + +struct llinfo_nd6 { + struct llinfo_nd6 *ln_next; + struct llinfo_nd6 *ln_prev; + struct rtentry *ln_rt; + struct mbuf *ln_hold; /* last packet until resolved/timeout */ + long ln_asked; /* number of queries already sent for this addr */ + u_long ln_expire; /* lifetime for NDP state transition */ + short ln_state; /* reachability state */ + short ln_router; /* 2^0: ND6 router bit */ +}; + +#define ND6_LLINFO_NOSTATE -2 +#define ND6_LLINFO_WAITDELETE -1 +#define ND6_LLINFO_INCOMPLETE 0 +#define ND6_LLINFO_REACHABLE 1 +#define ND6_LLINFO_STALE 2 +#define ND6_LLINFO_DELAY 3 +#define ND6_LLINFO_PROBE 4 + +#define ND6_IS_LLINFO_PROBREACH(n) ((n)->ln_state > ND6_LLINFO_INCOMPLETE) + +struct nd_ifinfo { + u_int32_t linkmtu; /* LinkMTU */ + u_int32_t maxmtu; /* Upper bound of LinkMTU */ + u_int32_t basereachable; /* BaseReachableTime */ + u_int32_t reachable; /* Reachable Time */ + u_int32_t retrans; /* Retrans Timer */ + u_int32_t flags; /* Flags */ + int recalctm; /* BaseReacable re-calculation timer */ + u_int8_t chlim; /* CurHopLimit */ + u_int8_t receivedra; +}; + +#define ND6_IFF_PERFORMNUD 0x1 + +struct in6_nbrinfo { + char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct in6_addr addr; /* IPv6 address of the neighbor */ + long asked; /* number of queries already sent for this addr */ + int isrouter; /* if it acts as a router */ + int state; /* reachability state */ + int expire; /* lifetime for NDP state transition */ +}; + +#define DRLSTSIZ 10 +#define PRLSTSIZ 10 +struct in6_drlist { + char ifname[IFNAMSIZ]; + struct { + struct in6_addr rtaddr; + u_char flags; + u_short rtlifetime; + u_long expire; + u_short if_index; + } defrouter[DRLSTSIZ]; +}; + +struct in6_prlist { + char ifname[IFNAMSIZ]; + struct { + struct in6_addr prefix; + struct prf_ra raflags; + u_char prefixlen; + u_char origin; + u_long vltime; + u_long pltime; + u_long expire; + u_short if_index; + u_short advrtrs; /* number of advertisement routers */ + struct in6_addr advrtr[DRLSTSIZ]; /* XXX: explicit limit */ + } prefix[PRLSTSIZ]; +}; + +struct in6_ndireq { + char ifname[IFNAMSIZ]; + struct nd_ifinfo ndi; +}; + +struct in6_ndifreq { + char ifname[IFNAMSIZ]; + u_long ifindex; +}; + + +/* protocol constants */ +#define MAX_RTR_SOLICITATION_DELAY 1 /*1sec*/ +#define RTR_SOLICITATION_INTERVAL 4 /*4sec*/ +#define MAX_RTR_SOLICITATIONS 3 + +#define ND6_INFINITE_LIFETIME 0xffffffff + +#if KERNEL +/* node constants */ +#define MAX_REACHABLE_TIME 3600000 /* msec */ +#define REACHABLE_TIME 30000 /* msec */ +#define RETRANS_TIMER 1000 /* msec */ +#define MIN_RANDOM_FACTOR 512 /* 1024 * 0.5 */ +#define MAX_RANDOM_FACTOR 1536 /* 1024 * 1.5 */ +#ifndef __OpenBSD__ +#define ND_COMPUTE_RTIME(x) \ + (((MIN_RANDOM_FACTOR * (x >> 10)) + (random() & \ + ((MAX_RANDOM_FACTOR - MIN_RANDOM_FACTOR) * (x >> 10)))) /1000) +#else +#define ND_COMPUTE_RTIME(x) \ + (((MIN_RANDOM_FACTOR * (x >> 10)) + (arc4random() & \ + ((MAX_RANDOM_FACTOR - MIN_RANDOM_FACTOR) * (x >> 10)))) /1000) +#endif + +TAILQ_HEAD(nd_drhead, nd_defrouter); +struct nd_defrouter { + TAILQ_ENTRY(nd_defrouter) dr_entry; + struct in6_addr rtaddr; + u_char flags; + u_short rtlifetime; + u_long expire; + u_long advint; /* Mobile IPv6 addition (milliseconds) */ + u_long advint_expire; /* Mobile IPv6 addition */ + int advints_lost; /* Mobile IPv6 addition */ + struct ifnet *ifp; +}; + +struct nd_prefix { + struct ifnet *ndpr_ifp; + LIST_ENTRY(nd_prefix) ndpr_entry; + struct sockaddr_in6 ndpr_prefix; /* prefix */ + struct in6_addr ndpr_mask; /* netmask derived from the prefix */ + struct in6_addr ndpr_addr; /* address that is derived from the prefix */ + u_int32_t ndpr_vltime; /* advertised valid lifetime */ + u_int32_t ndpr_pltime; /* advertised preferred lifetime */ + time_t ndpr_expire; /* expiration time of the prefix */ + time_t ndpr_preferred; /* preferred time of the prefix */ + struct prf_ra ndpr_flags; + /* list of routers that advertise the prefix: */ + LIST_HEAD(pr_rtrhead, nd_pfxrouter) ndpr_advrtrs; + u_char ndpr_plen; + struct ndpr_stateflags { + /* if this prefix can be regarded as on-link */ + u_char onlink : 1; + } ndpr_stateflags; +}; + +#define ndpr_next ndpr_entry.le_next + +#define ndpr_raf ndpr_flags +#define ndpr_raf_onlink ndpr_flags.onlink +#define ndpr_raf_auto ndpr_flags.autonomous + +#define ndpr_statef_onlink ndpr_stateflags.onlink +#define ndpr_statef_addmark ndpr_stateflags.addmark + +/* + * We keep expired prefix for certain amount of time, for validation purposes. + * 1800s = MaxRtrAdvInterval + */ +#define NDPR_KEEP_EXPIRED (1800 * 2) + +/* + * Message format for use in obtaining information about prefixes + * from inet6 sysctl function + */ +struct inet6_ndpr_msghdr { + u_short inpm_msglen; /* to skip over non-understood messages */ + u_char inpm_version; /* future binary compatability */ + u_char inpm_type; /* message type */ + struct in6_addr inpm_prefix; + u_long prm_vltim; + u_long prm_pltime; + u_long prm_expire; + u_long prm_preferred; + struct in6_prflags prm_flags; + u_short prm_index; /* index for associated ifp */ + u_char prm_plen; /* length of prefix in bits */ +}; + +#define prm_raf_onlink prm_flags.prf_ra.onlink +#define prm_raf_auto prm_flags.prf_ra.autonomous + +#define prm_statef_onlink prm_flags.prf_state.onlink + +#define prm_rrf_decrvalid prm_flags.prf_rr.decrvalid +#define prm_rrf_decrprefd prm_flags.prf_rr.decrprefd + +#define ifpr2ndpr(ifpr) ((struct nd_prefix *)(ifpr)) +#define ndpr2ifpr(ndpr) ((struct ifprefix *)(ndpr)) + +struct nd_pfxrouter { + LIST_ENTRY(nd_pfxrouter) pfr_entry; +#define pfr_next pfr_entry.le_next + struct nd_defrouter *router; +}; + +LIST_HEAD(nd_prhead, nd_prefix); + +/* nd6.c */ +extern int nd6_prune; +extern int nd6_delay; +extern int nd6_umaxtries; +extern int nd6_mmaxtries; +extern int nd6_useloopback; +extern struct llinfo_nd6 llinfo_nd6; +extern struct nd_ifinfo *nd_ifinfo; +extern struct nd_drhead nd_defrouter; +extern struct nd_prhead nd_prefix; + +/* nd6_rtr.c */ +extern struct ifnet *nd6_defifp; /* XXXYYY */ +extern int nd6_defifindex; + +union nd_opts { + struct nd_opt_hdr *nd_opt_array[9]; /*max = home agent info*/ + struct { + struct nd_opt_hdr *zero; + struct nd_opt_hdr *src_lladdr; + struct nd_opt_hdr *tgt_lladdr; + struct nd_opt_prefix_info *pi_beg;/* multiple opts, start */ + struct nd_opt_rd_hdr *rh; + struct nd_opt_mtu *mtu; + struct nd_opt_hdr *six; + struct nd_opt_advint *adv; + struct nd_opt_hai *hai; + struct nd_opt_hdr *search; /* multiple opts */ + struct nd_opt_hdr *last; /* multiple opts */ + int done; + struct nd_opt_prefix_info *pi_end;/* multiple opts, end */ + } nd_opt_each; +}; +#define nd_opts_src_lladdr nd_opt_each.src_lladdr +#define nd_opts_tgt_lladdr nd_opt_each.tgt_lladdr +#define nd_opts_pi nd_opt_each.pi_beg +#define nd_opts_pi_end nd_opt_each.pi_end +#define nd_opts_rh nd_opt_each.rh +#define nd_opts_mtu nd_opt_each.mtu +#define nd_opts_adv nd_opt_each.adv +#define nd_opts_hai nd_opt_each.hai +#define nd_opts_search nd_opt_each.search +#define nd_opts_last nd_opt_each.last +#define nd_opts_done nd_opt_each.done + +/* XXX: need nd6_var.h?? */ +/* nd6.c */ +void nd6_init __P((void)); +void nd6_ifattach __P((struct ifnet *)); +int nd6_is_addr_neighbor __P((struct in6_addr *, struct ifnet *)); +void nd6_option_init __P((void *, int, union nd_opts *)); +struct nd_opt_hdr *nd6_option __P((union nd_opts *)); +int nd6_options __P((union nd_opts *)); +struct rtentry *nd6_lookup __P((struct in6_addr *, int, struct ifnet *)); +void nd6_setmtu __P((struct ifnet *)); +void nd6_timer __P((void *)); +void nd6_purge __P((struct ifnet *)); +void nd6_free __P((struct rtentry *)); +void nd6_nud_hint __P((struct rtentry *, struct in6_addr *)); +int nd6_resolve __P((struct ifnet *, struct rtentry *, + struct mbuf *, struct sockaddr *, u_char *)); +#if defined(__bsdi__) && _BSDI_VERSION >= 199802 +void nd6_rtrequest __P((int, struct rtentry *, struct rt_addrinfo *)); +void nd6_p2p_rtrequest __P((int, struct rtentry *, struct rt_addrinfo *)); +#else +void nd6_rtrequest __P((int, struct rtentry *, struct sockaddr *)); +void nd6_p2p_rtrequest __P((int, struct rtentry *, struct sockaddr *)); +#endif +int nd6_ioctl __P((u_long, caddr_t, struct ifnet *)); +struct rtentry *nd6_cache_lladdr __P((struct ifnet *, struct in6_addr *, + char *, int, int, int)); +/* for test */ +int nd6_output __P((struct ifnet *, struct mbuf *, struct sockaddr_in6 *, + struct rtentry *)); +int nd6_storelladdr __P((struct ifnet *, struct rtentry *, struct mbuf *, + struct sockaddr *, u_char *)); + +/* nd6_nbr.c */ +void nd6_na_input __P((struct mbuf *, int, int)); +void nd6_na_output __P((struct ifnet *, struct in6_addr *, + struct in6_addr *, u_long, int, struct sockaddr *)); +void nd6_ns_input __P((struct mbuf *, int, int)); +void nd6_ns_output __P((struct ifnet *, struct in6_addr *, + struct in6_addr *, struct llinfo_nd6 *, int)); +caddr_t nd6_ifptomac __P((struct ifnet *)); +void nd6_dad_start __P((struct ifaddr *, int *)); +void nd6_dad_duplicated __P((struct ifaddr *)); + +/* nd6_rtr.c */ +void nd6_rs_input __P((struct mbuf *, int, int)); +void nd6_ra_input __P((struct mbuf *, int, int)); +void prelist_del __P((struct nd_prefix *)); +void defrouter_addreq __P((struct nd_defrouter *)); +void defrouter_delreq __P((struct nd_defrouter *, int)); +void defrouter_select __P((void)); +void defrtrlist_del __P((struct nd_defrouter *)); +void prelist_remove __P((struct nd_prefix *)); +int prelist_update __P((struct nd_prefix *, struct nd_defrouter *, + struct mbuf *)); +struct nd_pfxrouter *find_pfxlist_reachable_router __P((struct nd_prefix *)); /* XXXYYY */ +void pfxlist_onlink_check __P((void)); +void defrouter_addifreq __P((struct ifnet *)); /* XXXYYY */ +struct nd_defrouter *defrouter_lookup __P((struct in6_addr *, + struct ifnet *)); +struct nd_prefix *prefix_lookup __P((struct nd_prefix *)); /* XXXYYY */ +int in6_ifdel __P((struct ifnet *, struct in6_addr *)); +struct nd_pfxrouter *pfxrtr_lookup __P((struct nd_prefix *, + struct nd_defrouter *)); /* XXXYYY */ +int in6_init_prefix_ltimes __P((struct nd_prefix *ndpr)); +void rt6_flush __P((struct in6_addr *, struct ifnet *)); +int nd6_setdefaultiface __P((int)); + +#endif /* KERNEL */ + +#endif /* _NETINET6_ND6_H_ */ diff --git a/bsd/netinet6/nd6_nbr.c b/bsd/netinet6/nd6_nbr.c new file mode 100644 index 000000000..eca3501f1 --- /dev/null +++ b/bsd/netinet6/nd6_nbr.c @@ -0,0 +1,1372 @@ +/* $KAME: nd6_nbr.c,v 1.32 2000/03/21 11:37:30 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#if __NetBSD__ /*XXX*/ +#include "opt_ipsec.h" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include +#endif +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __OpenBSD__ /*don't confuse KAME ipsec with OpenBSD ipsec*/ +#undef IPSEC +#endif + +#if IPSEC +#include +#endif + +#include + +#define SDL(s) ((struct sockaddr_dl *)s) + +struct dadq; +static struct dadq *nd6_dad_find __P((struct ifaddr *)); +static void nd6_dad_timer __P((struct ifaddr *)); +static void nd6_dad_ns_output __P((struct dadq *, struct ifaddr *)); +static void nd6_dad_ns_input __P((struct ifaddr *)); +static void nd6_dad_na_input __P((struct ifaddr *)); + +static int dad_ignore_ns = 0; /* ignore NS in DAD - specwise incorrect*/ +static int dad_maxtry = 15; /* max # of *tries* to transmit DAD packet */ + +/* + * Input an Neighbor Solicitation Message. + * + * Based on RFC 2461 + * Based on RFC 2462 (duplicated address detection) + */ +void +nd6_ns_input(m, off, icmp6len) + struct mbuf *m; + int off, icmp6len; +{ + struct ifnet *ifp = m->m_pkthdr.rcvif; + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + struct nd_neighbor_solicit *nd_ns; + struct in6_addr saddr6 = ip6->ip6_src; + struct in6_addr daddr6 = ip6->ip6_dst; + struct in6_addr taddr6; + struct in6_addr myaddr6; + char *lladdr = NULL; + struct ifaddr *ifa; + int lladdrlen = 0; + int anycast = 0, proxy = 0, tentative = 0; + int tlladdr; + union nd_opts ndopts; + struct sockaddr_dl *proxydl = NULL; + + if (ip6->ip6_hlim != 255) { + log(LOG_ERR, + "nd6_ns_input: invalid hlim %d\n", ip6->ip6_hlim); + goto freeit; + } + + if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) { + /* dst has to be solicited node multicast address. */ + if (daddr6.s6_addr16[0] == IPV6_ADDR_INT16_MLL + /*don't check ifindex portion*/ + && daddr6.s6_addr32[1] == 0 + && daddr6.s6_addr32[2] == IPV6_ADDR_INT32_ONE + && daddr6.s6_addr8[12] == 0xff) { + ; /*good*/ + } else { + log(LOG_INFO, "nd6_ns_input: bad DAD packet " + "(wrong ip6 dst)\n"); + goto bad; + } + } + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, icmp6len,); + nd_ns = (struct nd_neighbor_solicit *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(nd_ns, struct nd_neighbor_solicit *, m, off, icmp6len); + if (nd_ns == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } +#endif + taddr6 = nd_ns->nd_ns_target; + + if (IN6_IS_ADDR_MULTICAST(&taddr6)) { + log(LOG_INFO, "nd6_ns_input: bad NS target (multicast)\n"); + goto bad; + } + + if (IN6_IS_SCOPE_LINKLOCAL(&taddr6)) + taddr6.s6_addr16[1] = htons(ifp->if_index); + + icmp6len -= sizeof(*nd_ns); + nd6_option_init(nd_ns + 1, icmp6len, &ndopts); + if (nd6_options(&ndopts) < 0) { + log(LOG_INFO, "nd6_ns_input: invalid ND option, ignored\n"); + goto bad; + } + + if (ndopts.nd_opts_src_lladdr) { + lladdr = (char *)(ndopts.nd_opts_src_lladdr +1); + lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3; + } + + if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src) && lladdr) { + log(LOG_INFO, "nd6_ns_input: bad DAD packet " + "(link-layer address option)\n"); + goto bad; + } + + /* + * Attaching target link-layer address to the NA? + * (RFC 2461 7.2.4) + * + * NS IP dst is unicast/anycast MUST NOT add + * NS IP dst is solicited-node multicast MUST add + * + * In implementation, we add target link-layer address by default. + * We do not add one in MUST NOT cases. + */ +#if 0 /* too much! */ + ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &daddr6); + if (ifa && (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST)) + tlladdr = 0; + else +#endif + if (!IN6_IS_ADDR_MULTICAST(&daddr6)) + tlladdr = 0; + else + tlladdr = 1; + + /* + * Target address (taddr6) must be either: + * (1) Valid unicast/anycast address for my receiving interface, + * (2) Unicast address for which I'm offering proxy service, or + * (3) "tentative" address on which DAD is being performed. + */ + /* (1) and (3) check. */ + ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6); + + /* (2) check. */ + if (!ifa) { + struct rtentry *rt; + struct sockaddr_in6 tsin6; + + bzero(&tsin6, sizeof tsin6); + tsin6.sin6_len = sizeof(struct sockaddr_in6); + tsin6.sin6_family = AF_INET6; + tsin6.sin6_addr = taddr6; + + rt = rtalloc1((struct sockaddr *)&tsin6, 0 +#if __FreeBSD__ || defined (__APPLE__) + , 0 +#endif /* __FreeBSD__ */ + ); + if (rt && (rt->rt_flags & RTF_ANNOUNCE) != 0 && + rt->rt_gateway->sa_family == AF_LINK) { + /* + * proxy NDP for single entry + */ + ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, + IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); + if (ifa) { + proxy = 1; + proxydl = SDL(rt->rt_gateway); + } + } + if (rt) + rtfree(rt); + } + if (!ifa) { + /* + * We've got a NS packet, and we don't have that adddress + * assigned for us. We MUST silently ignore it. + * See RFC2461 7.2.3. + */ + goto freeit; + } + myaddr6 = *IFA_IN6(ifa); + anycast = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_ANYCAST; + tentative = ((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_TENTATIVE; + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_DUPLICATED) + goto freeit; + + if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { + log(LOG_INFO, + "nd6_ns_input: lladdrlen mismatch for %s " + "(if %d, NS packet %d)\n", + ip6_sprintf(&taddr6), ifp->if_addrlen, lladdrlen - 2); + } + + if (IN6_ARE_ADDR_EQUAL(&myaddr6, &saddr6)) { + log(LOG_INFO, + "nd6_ns_input: duplicate IP6 address %s\n", + ip6_sprintf(&saddr6)); + goto freeit; + } + + /* + * We have neighbor solicitation packet, with target address equals to + * one of my tentative address. + * + * src addr how to process? + * --- --- + * multicast of course, invalid (rejected in ip6_input) + * unicast somebody is doing address resolution -> ignore + * unspec dup address detection + * + * The processing is defined in RFC 2462. + */ + if (tentative) { + /* + * If source address is unspecified address, it is for + * duplicated address detection. + * + * If not, the packet is for addess resolution; + * silently ignore it. + */ + if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) + nd6_dad_ns_input(ifa); + + goto freeit; + } + + /* + * If the source address is unspecified address, entries must not + * be created or updated. + * It looks that sender is performing DAD. Output NA toward + * all-node multicast address, to tell the sender that I'm using + * the address. + * S bit ("solicited") must be zero. + */ + if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) { + saddr6 = in6addr_linklocal_allnodes; + saddr6.s6_addr16[1] = htons(ifp->if_index); + nd6_na_output(ifp, &saddr6, &taddr6, + ((anycast || proxy || !tlladdr) + ? 0 : ND_NA_FLAG_OVERRIDE) + | (ip6_forwarding ? ND_NA_FLAG_ROUTER : 0), + tlladdr, (struct sockaddr *)proxydl); + goto freeit; + } + + nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen, ND_NEIGHBOR_SOLICIT, 0); + + nd6_na_output(ifp, &saddr6, &taddr6, + ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) + | (ip6_forwarding ? ND_NA_FLAG_ROUTER : 0) + | ND_NA_FLAG_SOLICITED, + tlladdr, (struct sockaddr *)proxydl); + freeit: + m_freem(m); + return; + + bad: + log(LOG_ERR, "nd6_ns_input: src=%s\n", ip6_sprintf(&saddr6)); + log(LOG_ERR, "nd6_ns_input: dst=%s\n", ip6_sprintf(&daddr6)); + log(LOG_ERR, "nd6_ns_input: tgt=%s\n", ip6_sprintf(&taddr6)); + m_freem(m); +} + +/* + * Output an Neighbor Solicitation Message. Caller specifies: + * - ICMP6 header source IP6 address + * - ND6 header target IP6 address + * - ND6 header source datalink address + * + * Based on RFC 2461 + * Based on RFC 2462 (duplicated address detection) + */ +void +nd6_ns_output(ifp, daddr6, taddr6, ln, dad) + struct ifnet *ifp; + struct in6_addr *daddr6, *taddr6; + struct llinfo_nd6 *ln; /* for source address determination */ + int dad; /* duplicated address detection */ +{ + struct mbuf *m; + struct ip6_hdr *ip6; + struct nd_neighbor_solicit *nd_ns; + struct in6_ifaddr *ia = NULL; + struct ip6_moptions im6o; + int icmp6len; + int maxlen; + caddr_t mac; + struct ifnet *outif = NULL; + + if (IN6_IS_ADDR_MULTICAST(taddr6)) + return; + + /* estimate the size of message */ + maxlen = sizeof(*ip6) + sizeof(*nd_ns); + maxlen += (sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7; + if (max_linkhdr + maxlen >= MCLBYTES) { +#ifdef DIAGNOSTIC + printf("nd6_ns_output: max_linkhdr + maxlen >= MCLBYTES " + "(%d + %d > %d)\n", max_linkhdr, maxlen, MCLBYTES); +#endif + return; + } + + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m && max_linkhdr + maxlen >= MHLEN) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + m = NULL; + } + } + if (m == NULL) + return; + + if (daddr6 == NULL || IN6_IS_ADDR_MULTICAST(daddr6)) { + m->m_flags |= M_MCAST; + im6o.im6o_multicast_ifp = ifp; + im6o.im6o_multicast_hlim = 255; + im6o.im6o_multicast_loop = 0; + } + + icmp6len = sizeof(*nd_ns); + m->m_pkthdr.len = m->m_len = sizeof(*ip6) + icmp6len; + m->m_data += max_linkhdr; /*or MH_ALIGN() equivalent?*/ + + /* fill neighbor solicitation packet */ + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + /* ip6->ip6_plen will be set later */ + ip6->ip6_nxt = IPPROTO_ICMPV6; + ip6->ip6_hlim = 255; + if (daddr6) + ip6->ip6_dst = *daddr6; + else { + ip6->ip6_dst.s6_addr16[0] = IPV6_ADDR_INT16_MLL; + ip6->ip6_dst.s6_addr16[1] = htons(ifp->if_index); + ip6->ip6_dst.s6_addr32[1] = 0; + ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_ONE; + ip6->ip6_dst.s6_addr32[3] = taddr6->s6_addr32[3]; + ip6->ip6_dst.s6_addr8[12] = 0xff; + } + if (!dad) { +#if 0 /* KAME way, exact address scope match */ + /* + * Select a source whose scope is the same as that of the dest. + * Typically, the dest is link-local solicitation multicast + * (i.e. neighbor discovery) or link-local/global unicast + * (i.e. neighbor un-reachability detection). + */ + ia = in6_ifawithifp(ifp, &ip6->ip6_dst); + if (ia == NULL) { + m_freem(m); + return; + } + ip6->ip6_src = ia->ia_addr.sin6_addr; +#else /* spec-wise correct */ + /* + * RFC2461 7.2.2: + * "If the source address of the packet prompting the + * solicitation is the same as one of the addresses assigned + * to the outgoing interface, that address SHOULD be placed + * in the IP Source Address of the outgoing solicitation. + * Otherwise, any one of the addresses assigned to the + * interface should be used." + * + * We use the source address for the prompting packet + * (saddr6), if: + * - saddr6 is given from the caller (by giving "ln"), and + * - saddr6 belongs to the outgoing interface. + * Otherwise, we perform a scope-wise match. + */ + struct ip6_hdr *hip6; /*hold ip6*/ + struct in6_addr *saddr6; + + if (ln && ln->ln_hold) { + hip6 = mtod(ln->ln_hold, struct ip6_hdr *); + /* XXX pullup? */ + if (sizeof(*hip6) < ln->ln_hold->m_len) + saddr6 = &hip6->ip6_src; + else + saddr6 = NULL; + } else + saddr6 = NULL; + if (saddr6 && in6ifa_ifpwithaddr(ifp, saddr6)) + bcopy(saddr6, &ip6->ip6_src, sizeof(*saddr6)); + else { + ia = in6_ifawithifp(ifp, &ip6->ip6_dst); + if (ia == NULL) { + m_freem(m); /*XXX*/ + return; + } + ip6->ip6_src = ia->ia_addr.sin6_addr; + } +#endif + } else { + /* + * Source address for DAD packet must always be IPv6 + * unspecified address. (0::0) + */ + bzero(&ip6->ip6_src, sizeof(ip6->ip6_src)); + } + nd_ns = (struct nd_neighbor_solicit *)(ip6 + 1); + nd_ns->nd_ns_type = ND_NEIGHBOR_SOLICIT; + nd_ns->nd_ns_code = 0; + nd_ns->nd_ns_reserved = 0; + nd_ns->nd_ns_target = *taddr6; + + if (IN6_IS_SCOPE_LINKLOCAL(&nd_ns->nd_ns_target)) + nd_ns->nd_ns_target.s6_addr16[1] = 0; + + /* + * Add source link-layer address option. + * + * spec implementation + * --- --- + * DAD packet MUST NOT do not add the option + * there's no link layer address: + * impossible do not add the option + * there's link layer address: + * Multicast NS MUST add one add the option + * Unicast NS SHOULD add one add the option + */ + if (!dad && (mac = nd6_ifptomac(ifp))) { + int optlen = sizeof(struct nd_opt_hdr) + ifp->if_addrlen; + struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd_ns + 1); + /* 8 byte alignments... */ + optlen = (optlen + 7) & ~7; + + m->m_pkthdr.len += optlen; + m->m_len += optlen; + icmp6len += optlen; + bzero((caddr_t)nd_opt, optlen); + nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR; + nd_opt->nd_opt_len = optlen >> 3; + bcopy(mac, (caddr_t)(nd_opt + 1), ifp->if_addrlen); + } + + ip6->ip6_plen = htons((u_short)icmp6len); + nd_ns->nd_ns_cksum = 0; + nd_ns->nd_ns_cksum + = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), icmp6len); + +#if IPSEC + /* Don't lookup socket */ + ipsec_setsocket(m, NULL); +#endif + ip6_output(m, NULL, NULL, dad ? IPV6_DADOUTPUT : 0, &im6o, &outif); + if (outif) { + icmp6_ifstat_inc(outif, ifs6_out_msg); + icmp6_ifstat_inc(outif, ifs6_out_neighborsolicit); + } + icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]++; +} + +/* + * Neighbor advertisement input handling. + * + * Based on RFC 2461 + * Based on RFC 2462 (duplicated address detection) + * + * the following items are not implemented yet: + * - proxy advertisement delay rule (RFC2461 7.2.8, last paragraph, SHOULD) + * - anycast advertisement delay rule (RFC2461 7.2.7, SHOULD) + */ +void +nd6_na_input(m, off, icmp6len) + struct mbuf *m; + int off, icmp6len; +{ + struct ifnet *ifp = m->m_pkthdr.rcvif; + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + struct nd_neighbor_advert *nd_na; +#if 0 + struct in6_addr saddr6 = ip6->ip6_src; +#endif + struct in6_addr daddr6 = ip6->ip6_dst; + struct in6_addr taddr6; + int flags; + int is_router; + int is_solicited; + int is_override; + char *lladdr = NULL; + int lladdrlen = 0; + struct ifaddr *ifa; + struct llinfo_nd6 *ln; + struct rtentry *rt; + struct sockaddr_dl *sdl; + union nd_opts ndopts; + + if (ip6->ip6_hlim != 255) { + log(LOG_ERR, + "nd6_na_input: invalid hlim %d\n", ip6->ip6_hlim); + goto freeit; + } + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, icmp6len,); + nd_na = (struct nd_neighbor_advert *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(nd_na, struct nd_neighbor_advert *, m, off, icmp6len); + if (nd_na == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } +#endif + taddr6 = nd_na->nd_na_target; + flags = nd_na->nd_na_flags_reserved; + is_router = ((flags & ND_NA_FLAG_ROUTER) != 0); + is_solicited = ((flags & ND_NA_FLAG_SOLICITED) != 0); + is_override = ((flags & ND_NA_FLAG_OVERRIDE) != 0); + + if (IN6_IS_SCOPE_LINKLOCAL(&taddr6)) + taddr6.s6_addr16[1] = htons(ifp->if_index); + + if (IN6_IS_ADDR_MULTICAST(&taddr6)) { + log(LOG_ERR, + "nd6_na_input: invalid target address %s\n", + ip6_sprintf(&taddr6)); + goto freeit; + } + if (IN6_IS_ADDR_MULTICAST(&daddr6)) + if (is_solicited) { + log(LOG_ERR, + "nd6_na_input: a solicited adv is multicasted\n"); + goto freeit; + } + + icmp6len -= sizeof(*nd_na); + nd6_option_init(nd_na + 1, icmp6len, &ndopts); + if (nd6_options(&ndopts) < 0) { + log(LOG_INFO, "nd6_na_input: invalid ND option, ignored\n"); + goto freeit; + } + + if (ndopts.nd_opts_tgt_lladdr) { + lladdr = (char *)(ndopts.nd_opts_tgt_lladdr + 1); + lladdrlen = ndopts.nd_opts_tgt_lladdr->nd_opt_len << 3; + } + + ifa = (struct ifaddr *)in6ifa_ifpwithaddr(ifp, &taddr6); + + /* + * Target address matches one of my interface address. + * + * If my address is tentative, this means that there's somebody + * already using the same address as mine. This indicates DAD failure. + * This is defined in RFC 2462. + * + * Otherwise, process as defined in RFC 2461. + */ + if (ifa + && (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_TENTATIVE)) { + nd6_dad_na_input(ifa); + goto freeit; + } + + /* Just for safety, maybe unnecessery. */ + if (ifa) { + log(LOG_ERR, + "nd6_na_input: duplicate IP6 address %s\n", + ip6_sprintf(&taddr6)); + goto freeit; + } + + if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { + log(LOG_INFO, + "nd6_na_input: lladdrlen mismatch for %s " + "(if %d, NA packet %d)\n", + ip6_sprintf(&taddr6), ifp->if_addrlen, lladdrlen - 2); + } + + /* + * If no neighbor cache entry is found, NA SHOULD silently be discarded. + */ + rt = nd6_lookup(&taddr6, 0, ifp); + if ((rt == NULL) || + ((ln = (struct llinfo_nd6 *)rt->rt_llinfo) == NULL) || + ((sdl = SDL(rt->rt_gateway)) == NULL)) + goto freeit; + + if (ln->ln_state == ND6_LLINFO_INCOMPLETE) { + /* + * If the link-layer has address, and no lladdr option came, + * discard the packet. + */ + if (ifp->if_addrlen && !lladdr) + goto freeit; + + /* + * Record link-layer address, and update the state. + */ + sdl->sdl_alen = ifp->if_addrlen; + bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen); + if (is_solicited) { + ln->ln_state = ND6_LLINFO_REACHABLE; + if (ln->ln_expire) +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + ln->ln_expire = time.tv_sec + +#else + ln->ln_expire = time_second + +#endif + nd_ifinfo[rt->rt_ifp->if_index].reachable; + } else + ln->ln_state = ND6_LLINFO_STALE; + ln->ln_router = is_router; + } else { + int llchange; + + /* + * Check if the link-layer address has changed or not. + */ + if (!lladdr) + llchange = 0; + else { + if (sdl->sdl_alen) { + if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) + llchange = 1; + else + llchange = 0; + } else + llchange = 1; + } + + /* + * This is VERY complex. Look at it with care. + * + * override solicit lladdr llchange action + * (L: record lladdr) + * + * 0 0 n -- (2c) + * 0 0 y n (2b) L + * 0 0 y y (1) REACHABLE->STALE + * 0 1 n -- (2c) *->REACHABLE + * 0 1 y n (2b) L *->REACHABLE + * 0 1 y y (1) REACHABLE->STALE + * 1 0 n -- (2a) + * 1 0 y n (2a) L + * 1 0 y y (2a) L *->STALE + * 1 1 n -- (2a) *->REACHABLE + * 1 1 y n (2a) L *->REACHABLE + * 1 1 y y (2a) L *->REACHABLE + */ + if (!is_override && (lladdr && llchange)) { /* (1) */ + /* + * If state is REACHABLE, make it STALE. + * no other updates should be done. + */ + if (ln->ln_state == ND6_LLINFO_REACHABLE) + ln->ln_state = ND6_LLINFO_STALE; + goto freeit; + } else if (is_override /* (2a) */ + || (!is_override && (lladdr && !llchange)) /* (2b) */ + || !lladdr) { /* (2c) */ + /* + * Update link-local address, if any. + */ + if (lladdr) { + sdl->sdl_alen = ifp->if_addrlen; + bcopy(lladdr, LLADDR(sdl), ifp->if_addrlen); + } + + /* + * If solicited, make the state REACHABLE. + * If not solicited and the link-layer address was + * changed, make it STALE. + */ + if (is_solicited) { + ln->ln_state = ND6_LLINFO_REACHABLE; + if (ln->ln_expire) { +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) + ln->ln_expire = time.tv_sec + +#else + ln->ln_expire = time_second + +#endif + nd_ifinfo[ifp->if_index].reachable; + } + } else { + if (lladdr && llchange) + ln->ln_state = ND6_LLINFO_STALE; + } + } + + if (ln->ln_router && !is_router) { + /* + * The peer dropped the router flag. + * Remove the sender from the Default Router List and + * update the Destination Cache entries. + */ + struct nd_defrouter *dr; + struct in6_addr *in6; + int s; + + in6 = &((struct sockaddr_in6 *)rt_key(rt))->sin6_addr; +#if __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + dr = defrouter_lookup(in6, rt->rt_ifp); + if (dr) + defrtrlist_del(dr); + else if (!ip6_forwarding && ip6_accept_rtadv) { + /* + * Even if the neighbor is not in the default + * router list, the neighbor may be used + * as a next hop for some destinations + * (e.g. redirect case). So we must + * call rt6_flush explicitly. + */ + rt6_flush(&ip6->ip6_src, rt->rt_ifp); + } + splx(s); + } + ln->ln_router = is_router; + } + rt->rt_flags &= ~RTF_REJECT; + ln->ln_asked = 0; + if (ln->ln_hold) { +#if OLDIP6OUTPUT + (*ifp->if_output)(ifp, ln->ln_hold, rt_key(rt), rt); +#else + nd6_output(ifp, ln->ln_hold, + (struct sockaddr_in6 *)rt_key(rt), rt); +#endif + ln->ln_hold = 0; + } + + freeit: + m_freem(m); +} + +/* + * Neighbor advertisement output handling. + * + * Based on RFC 2461 + * + * the following items are not implemented yet: + * - proxy advertisement delay rule (RFC2461 7.2.8, last paragraph, SHOULD) + * - anycast advertisement delay rule (RFC2461 7.2.7, SHOULD) + */ +void +nd6_na_output(ifp, daddr6, taddr6, flags, tlladdr, sdl0) + struct ifnet *ifp; + struct in6_addr *daddr6, *taddr6; + u_long flags; + int tlladdr; /* 1 if include target link-layer address */ + struct sockaddr *sdl0; /* sockaddr_dl (= proxy NA) or NULL */ +{ + struct mbuf *m; + struct ip6_hdr *ip6; + struct nd_neighbor_advert *nd_na; + struct in6_ifaddr *ia = NULL; + struct ip6_moptions im6o; + int icmp6len; + int maxlen; + caddr_t mac; + struct ifnet *outif = NULL; + + /* estimate the size of message */ + maxlen = sizeof(*ip6) + sizeof(*nd_na); + maxlen += (sizeof(struct nd_opt_hdr) + ifp->if_addrlen + 7) & ~7; + if (max_linkhdr + maxlen >= MCLBYTES) { +#ifdef DIAGNOSTIC + printf("nd6_na_output: max_linkhdr + maxlen >= MCLBYTES " + "(%d + %d > %d)\n", max_linkhdr, maxlen, MCLBYTES); +#endif + return; + } + + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m && max_linkhdr + maxlen >= MHLEN) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + m = NULL; + } + } + if (m == NULL) + return; + + if (IN6_IS_ADDR_MULTICAST(daddr6)) { + m->m_flags |= M_MCAST; + im6o.im6o_multicast_ifp = ifp; + im6o.im6o_multicast_hlim = 255; + im6o.im6o_multicast_loop = 0; + } + + icmp6len = sizeof(*nd_na); + m->m_pkthdr.len = m->m_len = sizeof(struct ip6_hdr) + icmp6len; + m->m_data += max_linkhdr; /*or MH_ALIGN() equivalent?*/ + + /* fill neighbor advertisement packet */ + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_nxt = IPPROTO_ICMPV6; + ip6->ip6_hlim = 255; + if (IN6_IS_ADDR_UNSPECIFIED(daddr6)) { + /* reply to DAD */ + ip6->ip6_dst.s6_addr16[0] = IPV6_ADDR_INT16_MLL; + ip6->ip6_dst.s6_addr16[1] = htons(ifp->if_index); + ip6->ip6_dst.s6_addr32[1] = 0; + ip6->ip6_dst.s6_addr32[2] = 0; + ip6->ip6_dst.s6_addr32[3] = IPV6_ADDR_INT32_ONE; + flags &= ~ND_NA_FLAG_SOLICITED; + } else + ip6->ip6_dst = *daddr6; + + /* + * Select a source whose scope is the same as that of the dest. + */ + ia = in6_ifawithifp(ifp, &ip6->ip6_dst); + if (ia == NULL) { + m_freem(m); + return; + } + ip6->ip6_src = ia->ia_addr.sin6_addr; + nd_na = (struct nd_neighbor_advert *)(ip6 + 1); + nd_na->nd_na_type = ND_NEIGHBOR_ADVERT; + nd_na->nd_na_code = 0; + nd_na->nd_na_target = *taddr6; + if (IN6_IS_SCOPE_LINKLOCAL(&nd_na->nd_na_target)) + nd_na->nd_na_target.s6_addr16[1] = 0; + + /* + * "tlladdr" indicates NS's condition for adding tlladdr or not. + * see nd6_ns_input() for details. + * Basically, if NS packet is sent to unicast/anycast addr, + * target lladdr option SHOULD NOT be included. + */ + if (tlladdr) { + mac = NULL; + /* + * sdl0 != NULL indicates proxy NA. If we do proxy, use + * lladdr in sdl0. If we are not proxying (sending NA for + * my address) use lladdr configured for the interface. + */ + if (sdl0 == NULL) + mac = nd6_ifptomac(ifp); + else if (sdl0->sa_family == AF_LINK) { + struct sockaddr_dl *sdl; + sdl = (struct sockaddr_dl *)sdl0; + if (sdl->sdl_alen == ifp->if_addrlen) + mac = LLADDR(sdl); + } + } + if (tlladdr && mac) { + int optlen = sizeof(struct nd_opt_hdr) + ifp->if_addrlen; + struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)(nd_na + 1); + + /* roundup to 8 bytes alignment! */ + optlen = (optlen + 7) & ~7; + + m->m_pkthdr.len += optlen; + m->m_len += optlen; + icmp6len += optlen; + bzero((caddr_t)nd_opt, optlen); + nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR; + nd_opt->nd_opt_len = optlen >> 3; + bcopy(mac, (caddr_t)(nd_opt + 1), ifp->if_addrlen); + } else + flags &= ~ND_NA_FLAG_OVERRIDE; + + ip6->ip6_plen = htons((u_short)icmp6len); + nd_na->nd_na_flags_reserved = flags; + nd_na->nd_na_cksum = 0; + nd_na->nd_na_cksum = + in6_cksum(m, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), icmp6len); + +#if IPSEC + /* Don't lookup socket */ + ipsec_setsocket(m, NULL); +#endif + ip6_output(m, NULL, NULL, 0, &im6o, &outif); + if (outif) { + icmp6_ifstat_inc(outif, ifs6_out_msg); + icmp6_ifstat_inc(outif, ifs6_out_neighboradvert); + } + icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]++; +} + +caddr_t +nd6_ifptomac(ifp) + struct ifnet *ifp; +{ + switch (ifp->if_type) { + case IFT_ARCNET: + case IFT_ETHER: + case IFT_FDDI: +#if __NetBSD__ + return LLADDR(ifp->if_sadl); +#else + return ((caddr_t)(ifp + 1)); +#endif + break; + default: + return NULL; + } +} + +TAILQ_HEAD(dadq_head, dadq); +struct dadq { + TAILQ_ENTRY(dadq) dad_list; + struct ifaddr *dad_ifa; + int dad_count; /* max NS to send */ + int dad_ns_tcount; /* # of trials to send NS */ + int dad_ns_ocount; /* NS sent so far */ + int dad_ns_icount; + int dad_na_icount; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + struct callout_handle dad_timer; +#endif +}; + +static struct dadq_head dadq; + +static struct dadq * +nd6_dad_find(ifa) + struct ifaddr *ifa; +{ + struct dadq *dp; + + for (dp = dadq.tqh_first; dp; dp = dp->dad_list.tqe_next) { + if (dp->dad_ifa == ifa) + return dp; + } + return NULL; +} + +/* + * Start Duplicated Address Detection (DAD) for specified interface address. + */ +void +nd6_dad_start(ifa, tick) + struct ifaddr *ifa; + int *tick; /* minimum delay ticks for IFF_UP event */ +{ + struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa; + struct dadq *dp; + static int dad_init = 0; + + if (!dad_init) { + TAILQ_INIT(&dadq); + dad_init++; + } + + /* + * If we don't need DAD, don't do it. + * There are several cases: + * - DAD is disabled (ip6_dad_count == 0) + * - the interface address is anycast + */ + if (!(ia->ia6_flags & IN6_IFF_TENTATIVE)) { + log(LOG_DEBUG, + "nd6_dad_start: called with non-tentative address " + "%s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); + return; + } + if (ia->ia6_flags & IN6_IFF_ANYCAST) { + ia->ia6_flags &= ~IN6_IFF_TENTATIVE; + return; + } + if (!ip6_dad_count) { + ia->ia6_flags &= ~IN6_IFF_TENTATIVE; + return; + } + if (!ifa->ifa_ifp) + panic("nd6_dad_start: ifa->ifa_ifp == NULL"); + if (!(ifa->ifa_ifp->if_flags & IFF_UP)) + return; + if (nd6_dad_find(ifa) != NULL) { + /* DAD already in progress */ + return; + } + + dp = _MALLOC(sizeof(*dp), M_IP6NDP, M_NOWAIT); + if (dp == NULL) { + log(LOG_ERR, "nd6_dad_start: memory allocation failed for " + "%s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); + return; + } + bzero(dp, sizeof(*dp)); + TAILQ_INSERT_TAIL(&dadq, (struct dadq *)dp, dad_list); + +#if ND6_DEBUG + log(LOG_DEBUG, "%s: starting DAD for %s\n", if_name(ifa->ifa_ifp), + ip6_sprintf(&ia->ia_addr.sin6_addr)); +#endif + + /* + * Send NS packet for DAD, ip6_dad_count times. + * Note that we must delay the first transmission, if this is the + * first packet to be sent from the interface after interface + * (re)initialization. + */ + dp->dad_ifa = ifa; + ifa->ifa_refcnt++; /*just for safety*/ + dp->dad_count = ip6_dad_count; + dp->dad_ns_icount = dp->dad_na_icount = 0; + dp->dad_ns_ocount = dp->dad_ns_tcount = 0; + if (!tick) { + nd6_dad_ns_output(dp, ifa); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + dp->dad_timer = +#endif + timeout((void (*) __P((void *)))nd6_dad_timer, (void *)ifa, + nd_ifinfo[ifa->ifa_ifp->if_index].retrans * hz / 1000); + } else { + int ntick; + + if (*tick == 0) + ntick = random() % (MAX_RTR_SOLICITATION_DELAY * hz); + else + ntick = *tick + random() % (hz / 2); + *tick = ntick; +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + dp->dad_timer = +#endif + timeout((void (*) __P((void *)))nd6_dad_timer, (void *)ifa, + ntick); + } +} + +static void +nd6_dad_timer(ifa) + struct ifaddr *ifa; +{ + int s; + struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa; + struct dadq *dp; + +#ifdef __APPLE__ + boolean_t funnel_state; + funnel_state = thread_set_funneled(TRUE); +#endif +#if __NetBSD__ + s = splsoftnet(); /*XXX*/ +#else + s = splnet(); /*XXX*/ +#endif + + /* Sanity check */ + if (ia == NULL) { + log(LOG_ERR, "nd6_dad_timer: called with null parameter\n"); + goto done; + } + dp = nd6_dad_find(ifa); + if (dp == NULL) { + log(LOG_ERR, "nd6_dad_timer: DAD structure not found\n"); + goto done; + } + if (ia->ia6_flags & IN6_IFF_DUPLICATED) { + log(LOG_ERR, "nd6_dad_timer: called with duplicated address " + "%s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); + goto done; + } + if ((ia->ia6_flags & IN6_IFF_TENTATIVE) == 0) { + log(LOG_ERR, "nd6_dad_timer: called with non-tentative address " + "%s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); + goto done; + } + + /* timeouted with IFF_{RUNNING,UP} check */ + if (dp->dad_ns_tcount > dad_maxtry) { + log(LOG_ERR, "%s: could not run DAD, driver problem?\n", + if_name(ifa->ifa_ifp)); + + TAILQ_REMOVE(&dadq, (struct dadq *)dp, dad_list); + _FREE(dp, M_IP6NDP); + dp = NULL; + IFAFREE(ifa); + goto done; + } + + /* Need more checks? */ + if (dp->dad_ns_ocount < dp->dad_count) { + /* + * We have more NS to go. Send NS packet for DAD. + */ + nd6_dad_ns_output(dp, ifa); +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + dp->dad_timer = +#endif + timeout((void (*) __P((void *)))nd6_dad_timer, (void *)ifa, + nd_ifinfo[ifa->ifa_ifp->if_index].retrans * hz / 1000); + } else { + /* + * We have transmitted sufficient number of DAD packets. + * See what we've got. + */ + int duplicate; + + duplicate = 0; + + if (dp->dad_na_icount) { + /* + * the check is in nd6_dad_na_input(), + * but just in case + */ + duplicate++; + } + + if (dp->dad_ns_icount) { +#if 0 /*heuristics*/ + /* + * if + * - we have sent many(?) DAD NS, and + * - the number of NS we sent equals to the + * number of NS we've got, and + * - we've got no NA + * we may have a faulty network card/driver which + * loops back multicasts to myself. + */ + if (3 < dp->dad_count + && dp->dad_ns_icount == dp->dad_count + && dp->dad_na_icount == 0) { + log(LOG_INFO, "DAD questionable for %s(%s): " + "network card loops back multicast?\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + if_name(ifa->ifa_ifp)); + /* XXX consider it a duplicate or not? */ + /* duplicate++; */ + } else { + /* We've seen NS, means DAD has failed. */ + duplicate++; + } +#else + /* We've seen NS, means DAD has failed. */ + duplicate++; +#endif + } + + if (duplicate) { + /* (*dp) will be freed in nd6_dad_duplicated() */ + dp = NULL; + nd6_dad_duplicated(ifa); + } else { + /* + * We are done with DAD. No NA came, no NS came. + * duplicated address found. + */ + ia->ia6_flags &= ~IN6_IFF_TENTATIVE; + +#if ND6_DEBUG + log(LOG_INFO, + "%s: DAD complete for %s - no duplicates found\n", + if_name(ifa->ifa_ifp), + ip6_sprintf(&ia->ia_addr.sin6_addr)); +#endif + + TAILQ_REMOVE(&dadq, (struct dadq *)dp, dad_list); + _FREE(dp, M_IP6NDP); + dp = NULL; + IFAFREE(ifa); + } + } + +done: + splx(s); +#ifdef __APPLE__ + (void) thread_set_funneled(funnel_state); +#endif +} + +void +nd6_dad_duplicated(ifa) + struct ifaddr *ifa; +{ + struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa; + struct dadq *dp; + + dp = nd6_dad_find(ifa); + if (dp == NULL) { + log(LOG_ERR, "nd6_dad_duplicated: DAD structure not found\n"); + return; + } + + log(LOG_ERR, "%s: DAD detected duplicate IPv6 address %s: %d NS, " + "%d NA\n", if_name(ifa->ifa_ifp), + ip6_sprintf(&ia->ia_addr.sin6_addr), + dp->dad_ns_icount, dp->dad_na_icount); + + ia->ia6_flags &= ~IN6_IFF_TENTATIVE; + ia->ia6_flags |= IN6_IFF_DUPLICATED; + + /* We are done with DAD, with duplicated address found. (failure) */ + untimeout((void (*) __P((void *)))nd6_dad_timer, (void *)ifa +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 + , dp->dad_timer +#endif + ); + + log(LOG_ERR, "%s: DAD complete for %s - duplicate found\n", + if_name(ifa->ifa_ifp), ip6_sprintf(&ia->ia_addr.sin6_addr)); + log(LOG_ERR, "%s: manual intervention required\n", + if_name(ifa->ifa_ifp)); + + TAILQ_REMOVE(&dadq, (struct dadq *)dp, dad_list); + _FREE(dp, M_IP6NDP); + dp = NULL; + IFAFREE(ifa); +} + +static void +nd6_dad_ns_output(dp, ifa) + struct dadq *dp; + struct ifaddr *ifa; +{ + struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa; + struct ifnet *ifp = ifa->ifa_ifp; + + dp->dad_ns_tcount++; + if ((ifp->if_flags & IFF_UP) == 0) { +#if 0 + printf("%s: interface down?\n", if_name(ifp)); +#endif + return; + } + if ((ifp->if_flags & IFF_RUNNING) == 0) { +#if 0 + printf("%s: interface not running?\n", if_name(ifp)); +#endif + return; + } + + dp->dad_ns_ocount++; + nd6_ns_output(ifp, NULL, &ia->ia_addr.sin6_addr, NULL, 1); +} + +static void +nd6_dad_ns_input(ifa) + struct ifaddr *ifa; +{ + struct in6_ifaddr *ia; + struct ifnet *ifp; + struct in6_addr *taddr6; + struct dadq *dp; + int duplicate; + + if (!ifa) + panic("ifa == NULL in nd6_dad_ns_input"); + + ia = (struct in6_ifaddr *)ifa; + ifp = ifa->ifa_ifp; + taddr6 = &ia->ia_addr.sin6_addr; + duplicate = 0; + dp = nd6_dad_find(ifa); + + /* + * If it is from myself, ignore this. + */ + if (ifp && (ifp->if_flags & IFF_LOOPBACK)) + return; + + /* Quickhack - completely ignore DAD NS packets */ + if (dad_ignore_ns) { + log(LOG_INFO, "nd6_dad_ns_input: ignoring DAD NS packet for " + "address %s(%s)\n", ip6_sprintf(taddr6), + if_name(ifa->ifa_ifp)); + return; + } + + /* + * if I'm yet to start DAD, someone else started using this address + * first. I have a duplicate and you win. + */ + if (!dp || dp->dad_ns_ocount == 0) + duplicate++; + + /* XXX more checks for loopback situation - see nd6_dad_timer too */ + + if (duplicate) { + dp = NULL; /* will be freed in nd6_dad_duplicated() */ + nd6_dad_duplicated(ifa); + } else { + /* + * not sure if I got a duplicate. + * increment ns count and see what happens. + */ + if (dp) + dp->dad_ns_icount++; + } +} + +static void +nd6_dad_na_input(ifa) + struct ifaddr *ifa; +{ + struct dadq *dp; + + if (!ifa) + panic("ifa == NULL in nd6_dad_na_input"); + + dp = nd6_dad_find(ifa); + if (dp) + dp->dad_na_icount++; + + /* remove the address. */ + nd6_dad_duplicated(ifa); +} diff --git a/bsd/netinet6/nd6_rtr.c b/bsd/netinet6/nd6_rtr.c new file mode 100644 index 000000000..47c588de4 --- /dev/null +++ b/bsd/netinet6/nd6_rtr.c @@ -0,0 +1,1725 @@ +/* $KAME: nd6_rtr.c,v 1.30 2000/03/21 11:37:31 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) +#include +#endif +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if MIP6 +#include +#endif + +#include + +#define SDL(s) ((struct sockaddr_dl *)s) + +static struct nd_defrouter *defrtrlist_update __P((struct nd_defrouter *)); +static int prelist_add __P((struct nd_prefix *, struct nd_defrouter *)); +/* static struct nd_prefix *prefix_lookup __P((struct nd_prefix *)); XXXYYY */ +static struct in6_ifaddr *in6_ifadd __P((struct ifnet *, struct in6_addr *, + struct in6_addr *, int)); +/*static struct nd_pfxrouter *pfxrtr_lookup __P((struct nd_prefix *, + struct nd_defrouter *)); XXXYYYY */ +static void pfxrtr_add __P((struct nd_prefix *, struct nd_defrouter *)); +static void pfxrtr_del __P((struct nd_pfxrouter *)); +/*static struct nd_pfxrouter *find_pfxlist_reachable_router __P((struct nd_prefix *)); XXXYYY */ +static void nd6_detach_prefix __P((struct nd_prefix *)); +static void nd6_attach_prefix __P((struct nd_prefix *)); +/* static void defrouter_addifreq __P((struct ifnet *)); XXXYYY */ + +static void in6_init_address_ltimes __P((struct nd_prefix *ndpr, + struct in6_addrlifetime *lt6, + int update_vltime)); + +static int rt6_deleteroute __P((struct radix_node *, void *)); + +extern int nd6_recalc_reachtm_interval; + +struct ifnet *nd6_defifp; +int nd6_defifindex; + + +#if MIP6 +void (*mip6_select_defrtr_hook)(void) = NULL; +struct nd_prefix * (*mip6_get_home_prefix_hook)(void) = NULL; +void (*mip6_prelist_update_hook)(struct nd_prefix *pr, + struct nd_defrouter *dr) = NULL; +void (*mip6_probe_pfxrtrs_hook)(void) = NULL; +void (*mip6_store_advint_hook)(struct nd_opt_advint *ai, + struct nd_defrouter *dr) = NULL; +int (*mip6_get_md_state_hook)(void) = 0; +void (*mip6_minus_a_case_hook)(struct nd_prefix *) = NULL; +#endif /* MIP6 */ + +/* + * Receive Router Solicitation Message - just for routers. + * Router solicitation/advertisement is mostly managed by userland program + * (rtadvd) so here we have no function like nd6_ra_output(). + * + * Based on RFC 2461 + */ +void +nd6_rs_input(m, off, icmp6len) + struct mbuf *m; + int off, icmp6len; +{ + struct ifnet *ifp = m->m_pkthdr.rcvif; + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + struct nd_router_solicit *nd_rs; + struct in6_addr saddr6 = ip6->ip6_src; +#if 0 + struct in6_addr daddr6 = ip6->ip6_dst; +#endif + char *lladdr = NULL; + int lladdrlen = 0; +#if 0 + struct sockaddr_dl *sdl = (struct sockaddr_dl *)NULL; + struct llinfo_nd6 *ln = (struct llinfo_nd6 *)NULL; + struct rtentry *rt = NULL; + int is_newentry; +#endif + union nd_opts ndopts; + + /* If I'm not a router, ignore it. */ + if (ip6_accept_rtadv != 0 || ip6_forwarding != 1) + goto freeit; + + /* Sanity checks */ + if (ip6->ip6_hlim != 255) { + log(LOG_ERR, + "nd6_rs_input: invalid hlim %d\n", ip6->ip6_hlim); + goto freeit; + } + + /* + * Don't update the neighbor cache, if src = ::. + * This indicates that the src has no IP address assigned yet. + */ + if (IN6_IS_ADDR_UNSPECIFIED(&saddr6)) + goto freeit; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, icmp6len,); + nd_rs = (struct nd_router_solicit *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(nd_rs, struct nd_router_solicit *, m, off, icmp6len); + if (nd_rs == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } +#endif + + icmp6len -= sizeof(*nd_rs); + nd6_option_init(nd_rs + 1, icmp6len, &ndopts); + if (nd6_options(&ndopts) < 0) { + log(LOG_INFO, "nd6_rs_input: invalid ND option, ignored\n"); + goto freeit; + } + + if (ndopts.nd_opts_src_lladdr) { + lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1); + lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3; + } + + if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { + log(LOG_INFO, + "nd6_rs_input: lladdrlen mismatch for %s " + "(if %d, RS packet %d)\n", + ip6_sprintf(&saddr6), ifp->if_addrlen, lladdrlen - 2); + } + + nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen, ND_ROUTER_SOLICIT, 0); + + freeit: + m_freem(m); +} + +/* + * Receive Router Advertisement Message. + * + * Based on RFC 2461 + * TODO: on-link bit on prefix information + * TODO: ND_RA_FLAG_{OTHER,MANAGED} processing + */ +void +nd6_ra_input(m, off, icmp6len) + struct mbuf *m; + int off, icmp6len; +{ + struct ifnet *ifp = m->m_pkthdr.rcvif; + struct nd_ifinfo *ndi = &nd_ifinfo[ifp->if_index]; + struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + struct nd_router_advert *nd_ra; + struct in6_addr saddr6 = ip6->ip6_src; +#if 0 + struct in6_addr daddr6 = ip6->ip6_dst; + int flags; /* = nd_ra->nd_ra_flags_reserved; */ + int is_managed = ((flags & ND_RA_FLAG_MANAGED) != 0); + int is_other = ((flags & ND_RA_FLAG_OTHER) != 0); +#endif + union nd_opts ndopts; + struct nd_defrouter *dr; + + if (ip6_accept_rtadv == 0) + goto freeit; + + if (ip6->ip6_hlim != 255) { + log(LOG_ERR, + "nd6_ra_input: invalid hlim %d\n", ip6->ip6_hlim); + goto freeit; + } + + if (!IN6_IS_ADDR_LINKLOCAL(&saddr6)) { + log(LOG_ERR, + "nd6_ra_input: src %s is not link-local\n", + ip6_sprintf(&saddr6)); + goto freeit; + } + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, icmp6len,); + nd_ra = (struct nd_router_advert *)((caddr_t)ip6 + off); +#else + IP6_EXTHDR_GET(nd_ra, struct nd_router_advert *, m, off, icmp6len); + if (nd_ra == NULL) { + icmp6stat.icp6s_tooshort++; + return; + } +#endif + + icmp6len -= sizeof(*nd_ra); + nd6_option_init(nd_ra + 1, icmp6len, &ndopts); + if (nd6_options(&ndopts) < 0) { + log(LOG_INFO, "nd6_ra_input: invalid ND option, ignored\n"); + goto freeit; + } + + { + struct nd_defrouter dr0; + u_int32_t advreachable = nd_ra->nd_ra_reachable; +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + dr0.rtaddr = saddr6; + dr0.flags = nd_ra->nd_ra_flags_reserved; + dr0.rtlifetime = ntohs(nd_ra->nd_ra_router_lifetime); + dr0.expire = time_second + dr0.rtlifetime; + dr0.ifp = ifp; + dr0.advint = 0; /* Mobile IPv6 */ + dr0.advint_expire = 0; /* Mobile IPv6 */ + dr0.advints_lost = 0; /* Mobile IPv6 */ + /* unspecified or not? (RFC 2461 6.3.4) */ + if (advreachable) { + NTOHL(advreachable); + if (advreachable <= MAX_REACHABLE_TIME && + ndi->basereachable != advreachable) { + ndi->basereachable = advreachable; + ndi->reachable = ND_COMPUTE_RTIME(ndi->basereachable); + ndi->recalctm = nd6_recalc_reachtm_interval; /* reset */ + } + } + if (nd_ra->nd_ra_retransmit) + ndi->retrans = ntohl(nd_ra->nd_ra_retransmit); + if (nd_ra->nd_ra_curhoplimit) + ndi->chlim = nd_ra->nd_ra_curhoplimit; + dr = defrtrlist_update(&dr0); + } + + /* + * prefix + */ + if (ndopts.nd_opts_pi) { + struct nd_opt_hdr *pt; + struct nd_opt_prefix_info *pi; + struct nd_prefix pr; + + for (pt = (struct nd_opt_hdr *)ndopts.nd_opts_pi; + pt <= (struct nd_opt_hdr *)ndopts.nd_opts_pi_end; + pt = (struct nd_opt_hdr *)((caddr_t)pt + + (pt->nd_opt_len << 3))) { + if (pt->nd_opt_type != ND_OPT_PREFIX_INFORMATION) + continue; + pi = (struct nd_opt_prefix_info *)pt; + + if (pi->nd_opt_pi_len != 4) { + log(LOG_INFO, "nd6_ra_input: invalid option " + "len %d for prefix information option, " + "ignored\n", pi->nd_opt_pi_len); + continue; + } + + if (128 < pi->nd_opt_pi_prefix_len) { + log(LOG_INFO, "nd6_ra_input: invalid prefix " + "len %d for prefix information option, " + "ignored\n", pi->nd_opt_pi_prefix_len); + continue; + } + + if (IN6_IS_ADDR_MULTICAST(&pi->nd_opt_pi_prefix) + || IN6_IS_ADDR_LINKLOCAL(&pi->nd_opt_pi_prefix)) { + log(LOG_INFO, "nd6_ra_input: invalid prefix " + "%s, ignored\n", + ip6_sprintf(&pi->nd_opt_pi_prefix)); + continue; + } + + /* aggregatable unicast address, rfc2374 */ + if ((pi->nd_opt_pi_prefix.s6_addr8[0] & 0xe0) == 0x20 + && pi->nd_opt_pi_prefix_len != 64) { + log(LOG_INFO, "nd6_ra_input: invalid prefixlen " + "%d for rfc2374 prefix %s, ignored\n", + pi->nd_opt_pi_prefix_len, + ip6_sprintf(&pi->nd_opt_pi_prefix)); + continue; + } + + bzero(&pr, sizeof(pr)); + pr.ndpr_prefix.sin6_family = AF_INET6; + pr.ndpr_prefix.sin6_len = sizeof(pr.ndpr_prefix); + pr.ndpr_prefix.sin6_addr = pi->nd_opt_pi_prefix; + pr.ndpr_ifp = (struct ifnet *)m->m_pkthdr.rcvif; + + pr.ndpr_raf_onlink = (pi->nd_opt_pi_flags_reserved & + ND_OPT_PI_FLAG_ONLINK) ? 1 : 0; + pr.ndpr_raf_auto = (pi->nd_opt_pi_flags_reserved & + ND_OPT_PI_FLAG_AUTO) ? 1 : 0; + pr.ndpr_plen = pi->nd_opt_pi_prefix_len; + pr.ndpr_vltime = ntohl(pi->nd_opt_pi_valid_time); + pr.ndpr_pltime = + ntohl(pi->nd_opt_pi_preferred_time); + + if (in6_init_prefix_ltimes(&pr)) + continue; /* prefix lifetime init failed */ + + (void)prelist_update(&pr, dr, m); + } + } + + /* + * MTU + */ + if (ndopts.nd_opts_mtu && ndopts.nd_opts_mtu->nd_opt_mtu_len == 1) { + u_int32_t mtu = ntohl(ndopts.nd_opts_mtu->nd_opt_mtu_mtu); + + /* lower bound */ + if (mtu < IPV6_MMTU) { + log(LOG_INFO, "nd6_ra_input: bogus mtu option " + "mtu=%d sent from %s, ignoring\n", + mtu, ip6_sprintf(&ip6->ip6_src)); + goto skip; + } + + /* upper bound */ + if (ndi->maxmtu) { + if (mtu <= ndi->maxmtu) { + int change = (ndi->linkmtu != mtu); + + ndi->linkmtu = mtu; + if (change) /* in6_maxmtu may change */ + in6_setmaxmtu(); + } else { + log(LOG_INFO, "nd6_ra_input: bogus mtu " + "mtu=%d sent from %s; " + "exceeds maxmtu %d, ignoring\n", + mtu, ip6_sprintf(&ip6->ip6_src), + ndi->maxmtu); + } + } else { + log(LOG_INFO, "nd6_ra_input: mtu option " + "mtu=%d sent from %s; maxmtu unknown, " + "ignoring\n", + mtu, ip6_sprintf(&ip6->ip6_src)); + } + } + + skip: + + /* + * Src linkaddress + */ + { + char *lladdr = NULL; + int lladdrlen = 0; + + if (ndopts.nd_opts_src_lladdr) { + lladdr = (char *)(ndopts.nd_opts_src_lladdr + 1); + lladdrlen = ndopts.nd_opts_src_lladdr->nd_opt_len << 3; + } + + if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { + log(LOG_INFO, + "nd6_ra_input: lladdrlen mismatch for %s " + "(if %d, RA packet %d)\n", + ip6_sprintf(&saddr6), ifp->if_addrlen, lladdrlen - 2); + } + + nd6_cache_lladdr(ifp, &saddr6, lladdr, lladdrlen, ND_ROUTER_ADVERT, 0); + + /* + * Installing a link-layer address might change the state of the + * router's neighbor cache, which might also affect our on-link + * detection of adveritsed prefixes. + */ + pfxlist_onlink_check(); + } + +#if MIP6 + if (mip6_store_advint_hook) { + if (ndopts.nd_opts_adv) + (*mip6_store_advint_hook)(ndopts.nd_opts_adv, dr); + } +#endif + +freeit: + m_freem(m); +} + +/* + * default router list proccessing sub routines + */ +void +defrouter_addreq(new) + struct nd_defrouter *new; +{ + struct sockaddr_in6 def, mask, gate; + int s; + + Bzero(&def, sizeof(def)); + Bzero(&mask, sizeof(mask)); + Bzero(&gate, sizeof(gate)); + + def.sin6_len = mask.sin6_len = gate.sin6_len + = sizeof(struct sockaddr_in6); + def.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; + gate.sin6_addr = new->rtaddr; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + (void)rtrequest(RTM_ADD, (struct sockaddr *)&def, + (struct sockaddr *)&gate, (struct sockaddr *)&mask, + RTF_GATEWAY, NULL); + splx(s); + return; +} + +/* Add a route to a given interface as default */ +void +defrouter_addifreq(ifp) + struct ifnet *ifp; +{ + struct sockaddr_in6 def, mask; + struct ifaddr *ifa; + int error, flags; + + bzero(&def, sizeof(def)); + bzero(&mask, sizeof(mask)); + + def.sin6_len = mask.sin6_len = sizeof(struct sockaddr_in6); + def.sin6_family = mask.sin6_family = AF_INET6; + + /* + * Search for an ifaddr beloging to the specified interface. + * XXX: An IPv6 address are required to be assigned on the interface. + */ + if ((ifa = ifaof_ifpforaddr((struct sockaddr *)&def, ifp)) == NULL) { + log(LOG_ERR, /* better error? */ + "defrouter_addifreq: failed to find an ifaddr " + "to install a route to interface %s\n", + if_name(ifp)); + return; + } + + flags = ifa->ifa_flags; + if ((ifp->if_flags & IFF_POINTOPOINT) != 0) + flags &= ~RTF_CLONING; + if ((error = rtrequest(RTM_ADD, (struct sockaddr *)&def, + ifa->ifa_addr, (struct sockaddr *)&mask, + flags, NULL)) != 0) { + log(LOG_ERR, + "defrouter_addifreq: failed to install a route to " + "interface %s (errno = %d)\n", + if_name(ifp), error); + } +} + +struct nd_defrouter * +defrouter_lookup(addr, ifp) + struct in6_addr *addr; + struct ifnet *ifp; +{ + struct nd_defrouter *dr; + + for (dr = TAILQ_FIRST(&nd_defrouter); dr; + dr = TAILQ_NEXT(dr, dr_entry)) { + if (dr->ifp == ifp && IN6_ARE_ADDR_EQUAL(addr, &dr->rtaddr)) + return(dr); + } + + return(NULL); /* search failed */ +} + +void +defrouter_delreq(dr, dofree) + struct nd_defrouter *dr; + int dofree; +{ + struct sockaddr_in6 def, mask, gate; + + Bzero(&def, sizeof(def)); + Bzero(&mask, sizeof(mask)); + Bzero(&gate, sizeof(gate)); + + def.sin6_len = mask.sin6_len = gate.sin6_len + = sizeof(struct sockaddr_in6); + def.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; + gate.sin6_addr = dr->rtaddr; + + rtrequest(RTM_DELETE, (struct sockaddr *)&def, + (struct sockaddr *)&gate, + (struct sockaddr *)&mask, + RTF_GATEWAY, (struct rtentry **)0); + + if (dofree) /* XXX: necessary? */ + _FREE(dr, M_IP6NDP); +} + +void +defrtrlist_del(dr) + struct nd_defrouter *dr; +{ + struct nd_defrouter *deldr = NULL; + struct nd_prefix *pr; + + /* + * Flush all the routing table entries that use the router + * as a next hop. + */ + if (!ip6_forwarding && ip6_accept_rtadv) { + /* above is a good condition? */ + rt6_flush(&dr->rtaddr, dr->ifp); + } + + if (dr == TAILQ_FIRST(&nd_defrouter)) + deldr = dr; /* The router is primary. */ + + TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + + /* + * Also delete all the pointers to the router in each prefix lists. + */ + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + struct nd_pfxrouter *pfxrtr; + if ((pfxrtr = pfxrtr_lookup(pr, dr)) != NULL) + pfxrtr_del(pfxrtr); + } + pfxlist_onlink_check(); + + /* + * If the router is the primary one, choose a new one. + * Note that defrouter_select() will remove the current gateway + * from the routing table. + */ + if (deldr) + defrouter_select(); + + _FREE(dr, M_IP6NDP); +} + +/* + * Default Router Selection according to Section 6.3.6 of RFC 2461: + * 1) Routers that are reachable or probably reachable should be + * preferred. + * 2) When no routers on the list are known to be reachable or + * probably reachable, routers SHOULD be selected in a round-robin + * fashion. + * 3) If the Default Router List is empty, assume that all + * destinations are on-link. + */ +void +defrouter_select() +{ +#ifdef __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + struct nd_defrouter *dr, anydr; + struct rtentry *rt = NULL; + struct llinfo_nd6 *ln = NULL; + +#if MIP6 + /* Mobile IPv6 alternative routine */ + if (mip6_select_defrtr_hook) { + (*mip6_select_defrtr_hook)(); /* XXXYYY Temporary? */ + splx(s); + return; + } + /* End of Mobile IPv6 */ +#endif /* MIP6 */ + + /* + * Search for a (probably) reachable router from the list. + */ + for (dr = TAILQ_FIRST(&nd_defrouter); dr; + dr = TAILQ_NEXT(dr, dr_entry)) { + if ((rt = nd6_lookup(&dr->rtaddr, 0, dr->ifp)) && + (ln = (struct llinfo_nd6 *)rt->rt_llinfo) && + ND6_IS_LLINFO_PROBREACH(ln)) { + /* Got it, and move it to the head */ + TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); + TAILQ_INSERT_HEAD(&nd_defrouter, dr, dr_entry); + break; + } + } + + if ((dr = TAILQ_FIRST(&nd_defrouter))) { + /* + * De-install the previous default gateway and install + * a new one. + * Note that if there is no reachable router in the list, + * the head entry will be used anyway. + * XXX: do we have to check the current routing table entry? + */ + bzero(&anydr, sizeof(anydr)); + defrouter_delreq(&anydr, 0); + defrouter_addreq(dr); + } + else { + /* + * The Default Router List is empty, so install the default + * route to an inteface. + * XXX: The specification does not say this mechanism should + * be restricted to hosts, but this would be not useful + * (even harmful) for routers. + */ + if (!ip6_forwarding) { + /* + * De-install the current default route + * in advance. + */ + bzero(&anydr, sizeof(anydr)); + defrouter_delreq(&anydr, 0); + if (nd6_defifp) { + /* + * Install a route to the default interface + * as default route. + */ + defrouter_addifreq(nd6_defifp); + } +#if ND6_DEBUG + else /* noisy log? */ + log(LOG_INFO, "defrouter_select: " + "there's no default router and no default" + " interface\n"); +#endif + } + } + + splx(s); + return; +} + +static struct nd_defrouter * +defrtrlist_update(new) + struct nd_defrouter *new; +{ + struct nd_defrouter *dr, *n; +#ifdef __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + + if ((dr = defrouter_lookup(&new->rtaddr, new->ifp)) != NULL) { + /* entry exists */ + if (new->rtlifetime == 0) { + defrtrlist_del(dr); + dr = NULL; + } else { + /* override */ + dr->flags = new->flags; /* xxx flag check */ + dr->rtlifetime = new->rtlifetime; + dr->expire = new->expire; + } + splx(s); + return(dr); + } + + /* entry does not exist */ + if (new->rtlifetime == 0) { + splx(s); + return(NULL); + } + + n = (struct nd_defrouter *)_MALLOC(sizeof(*n), M_IP6NDP, M_NOWAIT); + if (n == NULL) { + splx(s); + return(NULL); + } + bzero(n, sizeof(*n)); + *n = *new; + + /* + * Insert the new router at the end of the Default Router List. + * If there is no other router, install it anyway. Otherwise, + * just continue to use the current default router. + */ + TAILQ_INSERT_TAIL(&nd_defrouter, n, dr_entry); + if (TAILQ_FIRST(&nd_defrouter) == n) + defrouter_select(); + splx(s); + + return(n); +} + +struct nd_pfxrouter * +pfxrtr_lookup(pr, dr) + struct nd_prefix *pr; + struct nd_defrouter *dr; +{ + struct nd_pfxrouter *search; + + for (search = pr->ndpr_advrtrs.lh_first; search; search = search->pfr_next) { + if (search->router == dr) + break; + } + + return(search); +} + +static void +pfxrtr_add(pr, dr) + struct nd_prefix *pr; + struct nd_defrouter *dr; +{ + struct nd_pfxrouter *new; + + new = (struct nd_pfxrouter *)_MALLOC(sizeof(*new), M_IP6NDP, M_NOWAIT); + if (new == NULL) + return; + bzero(new, sizeof(*new)); + new->router = dr; + + LIST_INSERT_HEAD(&pr->ndpr_advrtrs, new, pfr_entry); + + pfxlist_onlink_check(); +} + +static void +pfxrtr_del(pfr) + struct nd_pfxrouter *pfr; +{ + LIST_REMOVE(pfr, pfr_entry); + _FREE(pfr, M_IP6NDP); +} + +struct nd_prefix * +prefix_lookup(pr) + struct nd_prefix *pr; +{ + struct nd_prefix *search; + + for (search = nd_prefix.lh_first; search; search = search->ndpr_next) { + if (pr->ndpr_ifp == search->ndpr_ifp && + pr->ndpr_plen == search->ndpr_plen && + in6_are_prefix_equal(&pr->ndpr_prefix.sin6_addr, + &search->ndpr_prefix.sin6_addr, + pr->ndpr_plen) + ) { + break; + } + } + + return(search); +} + +static int +prelist_add(pr, dr) + struct nd_prefix *pr; + struct nd_defrouter *dr; +{ + struct nd_prefix *new; + int i, s; + + new = (struct nd_prefix *)_MALLOC(sizeof(*new), M_IP6NDP, M_NOWAIT); + if (new == NULL) + return ENOMEM; + bzero(new, sizeof(*new)); + *new = *pr; + + /* initilization */ + new->ndpr_statef_onlink = pr->ndpr_statef_onlink; + LIST_INIT(&new->ndpr_advrtrs); + in6_prefixlen2mask(&new->ndpr_mask, new->ndpr_plen); + /* make prefix in the canonical form */ + for (i = 0; i < 4; i++) + new->ndpr_prefix.sin6_addr.s6_addr32[i] &= + new->ndpr_mask.s6_addr32[i]; + + /* xxx ND_OPT_PI_FLAG_ONLINK processing */ + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + /* link ndpr_entry to nd_prefix list */ + LIST_INSERT_HEAD(&nd_prefix, new, ndpr_entry); + splx(s); + + if (dr) { + pfxrtr_add(new, dr); +#if MIP6 + if (mip6_get_md_state_hook) { + /* + * If we are in UNDEFINED state and a router appears, + * select that router and change state. + * This case takes care of transitions from UNDEFINED + * to FOREIGN when the prefix is not known from before. + */ + if ((*mip6_get_md_state_hook)() == MIP6_MD_UNDEFINED) { + if (mip6_select_defrtr_hook) + (*mip6_select_defrtr_hook)(); + } + } +#endif /* MIP6 */ + } + + return 0; +} + +void +prelist_remove(pr) + struct nd_prefix *pr; +{ + struct nd_pfxrouter *pfr, *next; + int s; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + /* unlink ndpr_entry from nd_prefix list */ + LIST_REMOVE(pr, ndpr_entry); + splx(s); + + /* free list of routers that adversed the prefix */ + for (pfr = pr->ndpr_advrtrs.lh_first; pfr; pfr = next) { + next = pfr->pfr_next; + + _FREE(pfr, M_IP6NDP); + } + _FREE(pr, M_IP6NDP); + + pfxlist_onlink_check(); +} + +/* + * NOTE: We set address lifetime to keep + * address lifetime <= prefix lifetime + * invariant. This is to simplify on-link determination code. + * If onlink determination is udated, this routine may have to be updated too. + */ +int +prelist_update(new, dr, m) + struct nd_prefix *new; + struct nd_defrouter *dr; /* may be NULL */ + struct mbuf *m; +{ + struct in6_ifaddr *ia6 = NULL; + struct nd_prefix *pr; +#ifdef __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + int error = 0; + int auth; + struct in6_addrlifetime *lt6; + u_char onlink; /* Mobile IPv6 */ + + auth = 0; + if (m) { + /* + * Authenticity for NA consists authentication for + * both IP header and IP datagrams, doesn't it ? + */ +#if defined(M_AUTHIPHDR) && defined(M_AUTHIPDGM) + auth = (m->m_flags & M_AUTHIPHDR + && m->m_flags & M_AUTHIPDGM) ? 1 : 0; +#endif + } + + if ((pr = prefix_lookup(new)) != NULL) { + if (pr->ndpr_ifp != new->ndpr_ifp) { + error = EADDRNOTAVAIL; + goto end; + } + +#if MIP6 + if (mip6_get_home_prefix_hook) { + /* + * The home prefix should be kept away from updates. + * XXXYYY Tunneled RA? New Home Prefix? Unless + * configured, the code below will be executed. + */ + if (pr == (*mip6_get_home_prefix_hook)()) + goto noautoconf1; + } +#endif /* MIP6 */ + + /* update prefix information */ + pr->ndpr_flags = new->ndpr_flags; + pr->ndpr_vltime = new->ndpr_vltime; + pr->ndpr_pltime = new->ndpr_pltime; + pr->ndpr_preferred = new->ndpr_preferred; + pr->ndpr_expire = new->ndpr_expire; + + /* + * RFC 2462 5.5.3 (d) or (e) + * We got a prefix which we have seen in the past. + */ + if (!new->ndpr_raf_auto) + goto noautoconf1; + + if (IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + ia6 = NULL; + else + ia6 = in6ifa_ifpwithaddr(pr->ndpr_ifp, &pr->ndpr_addr); + + if (ia6 == NULL) { + /* + * Special case: + * (1) We have seen the prefix advertised before, but + * we have never performed autoconfig for this prefix. + * This is because Autonomous bit was 0 previously, or + * autoconfig failed due to some other reasons. + * (2) We have seen the prefix advertised before and + * we have performed autoconfig in the past, but + * we seem to have no interface address right now. + * This is because the interface address have expired. + * + * This prefix is fresh, with respect to autoconfig + * process. + * + * Add an address based on RFC 2462 5.5.3 (d). + */ + ia6 = in6_ifadd(pr->ndpr_ifp, + &pr->ndpr_prefix.sin6_addr, &pr->ndpr_addr, + new->ndpr_plen); + if (!ia6) { + error = EADDRNOTAVAIL; + log(LOG_ERR, "prelist_update: failed to add a " + "new address\n"); + goto noautoconf1; + } + + lt6 = &ia6->ia6_lifetime; + + /* address lifetime <= prefix lifetime */ + lt6->ia6t_vltime = new->ndpr_vltime; + lt6->ia6t_pltime = new->ndpr_pltime; + in6_init_address_ltimes(new, lt6, 1); + } else { +#define TWOHOUR (120*60) + /* + * We have seen the prefix before, and we have added + * interface address in the past. We still have + * the interface address assigned. + * + * update address lifetime based on RFC 2462 + * 5.5.3 (e). + */ + int update = 0; + + lt6 = &ia6->ia6_lifetime; + +#if 0 /* RFC 2462 5.5.3 (e) */ + lt6->ia6t_pltime = new->ndpr_pltime; + if (TWOHOUR < new->ndpr_vltime + || lt6pr->nd < new->ndpr_vltime) { + lt6->ia6t_vltime = new->ndpr_vltime; + update++; + } else if (auth + && lt6->ia6t_vltime <= TWOHOUR0 + && new->ndpr_vltime <= lt6->ia6t_vltime) { + lt6->ia6t_vltime = new->ndpr_vltime; + update++; + } else { + lt6->ia6t_vltime = TWOHOUR; + update++; + } + + /* 2 hour rule is not imposed for pref lifetime */ + new->ndpr_apltime = new->ndpr_pltime; + lt6->ia6t_pltime = new->ndpr_pltime; +#else /* update from Jim Bound, (ipng 6712) */ + if (TWOHOUR < new->ndpr_vltime + || lt6->ia6t_vltime < new->ndpr_vltime) { + lt6->ia6t_vltime = new->ndpr_vltime; + update++; + } else if (auth) { + lt6->ia6t_vltime = new->ndpr_vltime; + update++; + } + + /* jim bound rule is not imposed for pref lifetime */ + lt6->ia6t_pltime = new->ndpr_pltime; +#endif + in6_init_address_ltimes(new, lt6, update); + } + + noautoconf1: + +#if 0 + /* address lifetime expire processing, RFC 2462 5.5.4. */ + if (pr->ndpr_preferred && pr->ndpr_preferred < time_second) { + struct in6_ifaddr *ia6; + + ia6 = in6ifa_ifpwithaddr(pr->ndpr_ifp, &pr->ndpr_addr); + if (ia6) + ia6->ia6_flags &= ~IN6_IFF_DEPRECATED; + } +#endif + + onlink = pr->ndpr_statef_onlink; /* Mobile IPv6 */ + + if (dr && pfxrtr_lookup(pr, dr) == NULL) + pfxrtr_add(pr, dr); + +#if MIP6 + if (mip6_prelist_update_hook) { + /* + * Check for home prefix. It can't be a fresh prefix + * (since it's static), so check here. + */ + (*mip6_prelist_update_hook)(pr, dr); + } + + if (mip6_probe_pfxrtrs_hook) { + /* + * If this prefix previously was detached, maybe we + * have moved. + */ + if (!onlink) + (*mip6_probe_pfxrtrs_hook)(); + } +#endif /* MIP6 */ + + } else { + int error_tmp; + + if (new->ndpr_vltime == 0) goto end; + + bzero(&new->ndpr_addr, sizeof(struct in6_addr)); + + /* + * RFC 2462 5.5.3 (d) + * We got a fresh prefix. Perform some sanity checks + * and add an interface address by appending interface ID + * to the advertised prefix. + */ + if (!new->ndpr_raf_auto) + goto noautoconf2; + + ia6 = in6_ifadd(new->ndpr_ifp, &new->ndpr_prefix.sin6_addr, + &new->ndpr_addr, new->ndpr_plen); + if (!ia6) { + error = EADDRNOTAVAIL; + log(LOG_ERR, "prelist_update: " + "failed to add a new address\n"); + goto noautoconf2; + } + /* set onlink bit if an interface route is configured */ + new->ndpr_statef_onlink = (ia6->ia_flags & IFA_ROUTE) ? 1 : 0; + + lt6 = &ia6->ia6_lifetime; + + /* address lifetime <= prefix lifetime */ + lt6->ia6t_vltime = new->ndpr_vltime; + lt6->ia6t_pltime = new->ndpr_pltime; + in6_init_address_ltimes(new, lt6, 1); + + noautoconf2: + error_tmp = prelist_add(new, dr); + error = error_tmp ? error_tmp : error; + +#if MIP6 + if (mip6_probe_pfxrtrs_hook) { + /* This is a new prefix, maybe we have moved. */ + (*mip6_probe_pfxrtrs_hook)(); + } + + if (mip6_minus_a_case_hook) { + /* + * If we are still looking for an autoconfigured home + * address when we are in "minus a" case, here's a new + * prefix and hopefully we can use the address derived + *from that. + */ + if (ia6) + (*mip6_minus_a_case_hook)(new); + } +#endif /* MIP6 */ + + } + + end: + splx(s); + return error; +} + +/* + * A supplement function used in the on-link detection below; + * detect if a given prefix has a (probably) reachable advertising router. + * XXX: lengthy function name... + */ +struct nd_pfxrouter * +find_pfxlist_reachable_router(pr) + struct nd_prefix *pr; +{ + struct nd_pfxrouter *pfxrtr; + struct rtentry *rt; + struct llinfo_nd6 *ln; + + for (pfxrtr = LIST_FIRST(&pr->ndpr_advrtrs); pfxrtr; + pfxrtr = LIST_NEXT(pfxrtr, pfr_entry)) { + if ((rt = nd6_lookup(&pfxrtr->router->rtaddr, 0, + pfxrtr->router->ifp)) && + (ln = (struct llinfo_nd6 *)rt->rt_llinfo) && + ND6_IS_LLINFO_PROBREACH(ln)) + break; /* found */ + } + + return(pfxrtr); + +} + +/* + * Check if each prefix in the prefix list has at least one available router + * that advertised the prefix (A router is "available" if its neighbor cache + * entry has reachable or probably reachable). + * If the check fails, the prefix may be off-link, because, for example, + * we have moved from the network but the lifetime of the prefix has not + * been expired yet. So we should not use the prefix if there is another + * prefix that has an available router. + * But if there is no prefix that has an available router, we still regards + * all the prefixes as on-link. This is because we can't tell if all the + * routers are simply dead or if we really moved from the network and there + * is no router around us. + */ +void +pfxlist_onlink_check() +{ + struct nd_prefix *pr; + + /* + * Check if there is a prefix that has a reachable advertising + * router. + */ + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if (find_pfxlist_reachable_router(pr)) + break; + } + + if (pr) { + /* + * There is at least one prefix that has a reachable router. + * First, detach prefixes which has no reachable advertising + * router and then attach other prefixes. + * The order is important since an attached prefix and a + * detached prefix may have a same interface route. + */ + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if (find_pfxlist_reachable_router(pr) == NULL && + pr->ndpr_statef_onlink) { + pr->ndpr_statef_onlink = 0; + nd6_detach_prefix(pr); + } + } + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { + if (find_pfxlist_reachable_router(pr) && + pr->ndpr_statef_onlink == 0) + nd6_attach_prefix(pr); + } + } + else { + /* there is no prefix that has a reachable router */ + for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) + if (pr->ndpr_statef_onlink == 0) + nd6_attach_prefix(pr); + } +} + +static void +nd6_detach_prefix(pr) + struct nd_prefix *pr; +{ + struct in6_ifaddr *ia6; + struct sockaddr_in6 sa6, mask6; + + /* + * Delete the interface route associated with the prefix. + */ + bzero(&sa6, sizeof(sa6)); + sa6.sin6_family = AF_INET6; + sa6.sin6_len = sizeof(sa6); + bcopy(&pr->ndpr_prefix.sin6_addr, &sa6.sin6_addr, + sizeof(struct in6_addr)); + bzero(&mask6, sizeof(mask6)); + mask6.sin6_family = AF_INET6; + mask6.sin6_len = sizeof(sa6); + bcopy(&pr->ndpr_mask, &mask6.sin6_addr, sizeof(struct in6_addr)); + { + int e; + + e = rtrequest(RTM_DELETE, (struct sockaddr *)&sa6, NULL, + (struct sockaddr *)&mask6, 0, NULL); + if (e) { + log(LOG_ERR, + "nd6_detach_prefix: failed to delete route: " + "%s/%d (errno = %d)\n", + ip6_sprintf(&sa6.sin6_addr), + pr->ndpr_plen, + e); + } + } + + /* + * Mark the address derived from the prefix detached so that + * it won't be used as a source address for a new connection. + */ + if (IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + ia6 = NULL; + else + ia6 = in6ifa_ifpwithaddr(pr->ndpr_ifp, &pr->ndpr_addr); + if (ia6) +#if MIP6 + if (mip6_get_home_prefix_hook) + if (pr != (*mip6_get_home_prefix_hook)()) + ia6->ia6_flags |= IN6_IFF_DETACHED; +#else + ia6->ia6_flags |= IN6_IFF_DETACHED; +#endif +} + +static void +nd6_attach_prefix(pr) + struct nd_prefix *pr; +{ + struct ifaddr *ifa; + struct in6_ifaddr *ia6; + + /* + * Add the interface route associated with the prefix(if necessary) + * Should we consider if the L bit is set in pr->ndpr_flags? + */ + ifa = ifaof_ifpforaddr((struct sockaddr *)&pr->ndpr_prefix, + pr->ndpr_ifp); + if (ifa == NULL) { + log(LOG_ERR, + "nd6_attach_prefix: failed to find any ifaddr" + " to add route for a prefix(%s/%d)\n", + ip6_sprintf(&pr->ndpr_addr), pr->ndpr_plen); + } + else { + int e; + struct sockaddr_in6 mask6; + + bzero(&mask6, sizeof(mask6)); + mask6.sin6_family = AF_INET6; + mask6.sin6_len = sizeof(mask6); + mask6.sin6_addr = pr->ndpr_mask; + e = rtrequest(RTM_ADD, (struct sockaddr *)&pr->ndpr_prefix, + ifa->ifa_addr, (struct sockaddr *)&mask6, + ifa->ifa_flags, NULL); + if (e == 0) + pr->ndpr_statef_onlink = 1; + else { + log(LOG_ERR, + "nd6_attach_prefix: failed to add route for" + " a prefix(%s/%d), errno = %d\n", + ip6_sprintf(&pr->ndpr_addr), pr->ndpr_plen, e); + } + } + + /* + * Now the address derived from the prefix can be used as a source + * for a new connection, so clear the detached flag. + */ + if (IN6_IS_ADDR_UNSPECIFIED(&pr->ndpr_addr)) + ia6 = NULL; + else + ia6 = in6ifa_ifpwithaddr(pr->ndpr_ifp, &pr->ndpr_addr); + if (ia6) { + ia6->ia6_flags &= ~IN6_IFF_DETACHED; + if (pr->ndpr_statef_onlink) + ia6->ia_flags |= IFA_ROUTE; + } +} + +static struct in6_ifaddr * +in6_ifadd(ifp, in6, addr, prefixlen) + struct ifnet *ifp; + struct in6_addr *in6; + struct in6_addr *addr; + int prefixlen; /* prefix len of the new prefix in "in6" */ +{ + struct ifaddr *ifa; + struct in6_ifaddr *ia, *ib, *oia; + int s, error; + struct in6_addr mask; + + in6_len2mask(&mask, prefixlen); + + /* find link-local address (will be interface ID) */ + ifa = (struct ifaddr *)in6ifa_ifpforlinklocal(ifp, 0);/* 0 is OK? */ + if (ifa) + ib = (struct in6_ifaddr *)ifa; + else + return NULL; + +#if 0 /* don't care link local addr state, and always do DAD */ + /* if link-local address is not eligible, do not autoconfigure. */ + if (((struct in6_ifaddr *)ifa)->ia6_flags & IN6_IFF_NOTREADY) { + printf("in6_ifadd: link-local address not ready\n"); + return NULL; + } +#endif + + /* prefixlen + ifidlen must be equal to 128 */ + if (prefixlen != in6_mask2len(&ib->ia_prefixmask.sin6_addr)) { + log(LOG_ERR, "in6_ifadd: wrong prefixlen for %s" + "(prefix=%d ifid=%d)\n", if_name(ifp), + prefixlen, + 128 - in6_mask2len(&ib->ia_prefixmask.sin6_addr)); + return NULL; + } + + /* make ifaddr */ + ia = (struct in6_ifaddr *)_MALLOC(sizeof(*ia), M_IFADDR, M_DONTWAIT); + if (ia == NULL) { + printf("ENOBUFS in in6_ifadd %d\n", __LINE__); + return NULL; + } + + bzero((caddr_t)ia, sizeof(*ia)); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + if (ifp->if_flags & IFF_POINTOPOINT) + ia->ia_ifa.ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr; + else + ia->ia_ifa.ifa_dstaddr = NULL; + ia->ia_ifa.ifa_netmask = (struct sockaddr *)&ia->ia_prefixmask; + ia->ia_ifp = ifp; + + /* link to in6_ifaddr */ + if ((oia = in6_ifaddr) != NULL) { + for( ; oia->ia_next; oia = oia->ia_next) + continue; + oia->ia_next = ia; + } else + in6_ifaddr = ia; + ia->ia_ifa.ifa_refcnt++; + + /* link to if_addrlist */ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + if ((ifa = ifp->if_addrlist) != NULL) { + for ( ; ifa->ifa_next; ifa = ifa->ifa_next) + continue; + ifa->ifa_next = (struct ifaddr *)ia; + } +#else + if (ifp->if_addrlist.tqh_first != NULL) { + TAILQ_INSERT_TAIL(&ifp->if_addrlist, (struct ifaddr *)ia, + ifa_list); + ia->ia_ifa.ifa_refcnt++; + } +#endif +#if 0 + else { + /* + * this should not be the case because there is at least one + * link-local address(see the beginning of the function). + */ + TAILQ_INIT(&ifp->if_addrlist); + } +#endif + + /* new address */ + ia->ia_addr.sin6_len = sizeof(struct sockaddr_in6); + ia->ia_addr.sin6_family = AF_INET6; + /* prefix */ + bcopy(in6, &ia->ia_addr.sin6_addr, sizeof(ia->ia_addr.sin6_addr)); + ia->ia_addr.sin6_addr.s6_addr32[0] &= mask.s6_addr32[0]; + ia->ia_addr.sin6_addr.s6_addr32[1] &= mask.s6_addr32[1]; + ia->ia_addr.sin6_addr.s6_addr32[2] &= mask.s6_addr32[2]; + ia->ia_addr.sin6_addr.s6_addr32[3] &= mask.s6_addr32[3]; + /* interface ID */ + ia->ia_addr.sin6_addr.s6_addr32[0] + |= (ib->ia_addr.sin6_addr.s6_addr32[0] & ~mask.s6_addr32[0]); + ia->ia_addr.sin6_addr.s6_addr32[1] + |= (ib->ia_addr.sin6_addr.s6_addr32[1] & ~mask.s6_addr32[1]); + ia->ia_addr.sin6_addr.s6_addr32[2] + |= (ib->ia_addr.sin6_addr.s6_addr32[2] & ~mask.s6_addr32[2]); + ia->ia_addr.sin6_addr.s6_addr32[3] + |= (ib->ia_addr.sin6_addr.s6_addr32[3] & ~mask.s6_addr32[3]); + + /* new prefix */ + ia->ia_prefixmask.sin6_len = sizeof(struct sockaddr_in6); + ia->ia_prefixmask.sin6_family = AF_INET6; + bcopy(&mask, &ia->ia_prefixmask.sin6_addr, + sizeof(ia->ia_prefixmask.sin6_addr)); + + /* same routine */ + ia->ia_ifa.ifa_rtrequest = + (ifp->if_type == IFT_PPP) ? nd6_p2p_rtrequest : nd6_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + ia->ia_ifa.ifa_metric = ifp->if_metric; + + /* add interface route */ + if ((error = rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_UP|RTF_CLONING))) { + log(LOG_NOTICE, "in6_ifadd: failed to add an interface route " + "for %s/%d on %s, errno = %d\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), prefixlen, + if_name(ifp), error); + } + else + ia->ia_flags |= IFA_ROUTE; + + *addr = ia->ia_addr.sin6_addr; + + if (ifp->if_flags & IFF_MULTICAST) { + int error; /* not used */ + struct in6_addr sol6; + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + /* Restore saved multicast addresses(if any). */ + in6_restoremkludge(ia, ifp); +#endif + + /* join solicited node multicast address */ + bzero(&sol6, sizeof(sol6)); + sol6.s6_addr16[0] = htons(0xff02); + sol6.s6_addr16[1] = htons(ifp->if_index); + sol6.s6_addr32[1] = 0; + sol6.s6_addr32[2] = htonl(1); + sol6.s6_addr32[3] = ia->ia_addr.sin6_addr.s6_addr32[3]; + sol6.s6_addr8[12] = 0xff; + (void)in6_addmulti(&sol6, ifp, &error); + } + + ia->ia6_flags |= IN6_IFF_TENTATIVE; + + /* + * To make the interface up. Only AF_INET6 in ia is used... + */ + s = splimp(); + error = dlil_ioctl(PF_INET6, ifp, SIOCSIFADDR, (caddr_t)ia); + if (error == EOPNOTSUPP) + error = 0; + if (error) { + + splx(s); + return NULL; + } + splx(s); + + /* Perform DAD, if needed. */ + nd6_dad_start((struct ifaddr *)ia, NULL); + + return ia; +} + +int +in6_ifdel(ifp, in6) + struct ifnet *ifp; + struct in6_addr *in6; +{ +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + struct ifaddr *ifa; +#endif + struct in6_ifaddr *ia = (struct in6_ifaddr *)NULL; + struct in6_ifaddr *oia = (struct in6_ifaddr *)NULL; + + if (!ifp) + return -1; + + ia = in6ifa_ifpwithaddr(ifp, in6); + if (!ia) + return -1; + + if (ifp->if_flags & IFF_MULTICAST) { + /* + * delete solicited multicast addr for deleting host id + */ + struct in6_multi *in6m; + struct in6_addr llsol; + bzero(&llsol, sizeof(struct in6_addr)); + llsol.s6_addr16[0] = htons(0xff02); + llsol.s6_addr16[1] = htons(ifp->if_index); + llsol.s6_addr32[1] = 0; + llsol.s6_addr32[2] = htonl(1); + llsol.s6_addr32[3] = + ia->ia_addr.sin6_addr.s6_addr32[3]; + llsol.s6_addr8[12] = 0xff; + + IN6_LOOKUP_MULTI(llsol, ifp, in6m); + if (in6m) + in6_delmulti(in6m); + } + + if (ia->ia_flags & IFA_ROUTE) { + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, 0); + ia->ia_flags &= ~IFA_ROUTE; + } + +#if defined(__bsdi__) || (defined(__FreeBSD__) && __FreeBSD__ < 3) + if ((ifa = ifp->if_addrlist) == (struct ifaddr *)ia) { + ifp->if_addrlist = ifa->ifa_next; + } else { + while (ifa->ifa_next && + (ifa->ifa_next != (struct ifaddr *)ia)) + ifa = ifa->ifa_next; + if (ifa->ifa_next) + ifa->ifa_next = ((struct ifaddr *)ia)->ifa_next; + else + return -1; + } +#else + TAILQ_REMOVE(&ifp->if_addrlist, (struct ifaddr *)ia, ifa_list); +#endif + IFAFREE(&ia->ia_ifa); + + /* lladdr is never deleted */ + oia = ia; + if (oia == (ia = in6_ifaddr)) + in6_ifaddr = ia->ia_next; + else { + while (ia->ia_next && (ia->ia_next != oia)) + ia = ia->ia_next; + if (ia->ia_next) + ia->ia_next = oia->ia_next; + else + return -1; + } + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + in6_savemkludge(oia); +#endif + IFAFREE((&oia->ia_ifa)); +/* xxx + rtrequest(RTM_DELETE, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)0 + (struct sockaddr *)&ia->ia_prefixmask, + RTF_UP|RTF_CLONING, + (struct rtentry **)0); +*/ + return 0; +} + +int +in6_init_prefix_ltimes(struct nd_prefix *ndpr) +{ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* check if preferred lifetime > valid lifetime */ + if (ndpr->ndpr_pltime > ndpr->ndpr_vltime) { + log(LOG_INFO, "in6_init_prefix_ltimes: preferred lifetime" + "(%d) is greater than valid lifetime(%d)\n", + (u_int)ndpr->ndpr_pltime, (u_int)ndpr->ndpr_vltime); + return (EINVAL); + } + if (ndpr->ndpr_pltime == ND6_INFINITE_LIFETIME) + ndpr->ndpr_preferred = 0; + else + ndpr->ndpr_preferred = time_second + ndpr->ndpr_pltime; + if (ndpr->ndpr_vltime == ND6_INFINITE_LIFETIME) + ndpr->ndpr_expire = 0; + else + ndpr->ndpr_expire = time_second + ndpr->ndpr_vltime; + + return 0; +} + +static void +in6_init_address_ltimes(struct nd_prefix *new, + struct in6_addrlifetime *lt6, + int update_vltime) +{ +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined (__APPLE__) + long time_second = time.tv_sec; +#endif + + /* Valid lifetime must not be updated unless explicitly specified. */ + if (update_vltime) { + /* init ia6t_expire */ + if (lt6->ia6t_vltime == ND6_INFINITE_LIFETIME) + lt6->ia6t_expire = 0; + else { + lt6->ia6t_expire = time_second; + lt6->ia6t_expire += lt6->ia6t_vltime; + } + /* Ensure addr lifetime <= prefix lifetime. */ + if (new->ndpr_expire && lt6->ia6t_expire && + new->ndpr_expire < lt6->ia6t_expire) + lt6->ia6t_expire = new->ndpr_expire; + } + + /* init ia6t_preferred */ + if (lt6->ia6t_pltime == ND6_INFINITE_LIFETIME) + lt6->ia6t_preferred = 0; + else { + lt6->ia6t_preferred = time_second; + lt6->ia6t_preferred += lt6->ia6t_pltime; + } + /* Ensure addr lifetime <= prefix lifetime. */ + if (new->ndpr_preferred && lt6->ia6t_preferred + && new->ndpr_preferred < lt6->ia6t_preferred) + lt6->ia6t_preferred = new->ndpr_preferred; +} + +/* + * Delete all the routing table entries that use the specified gateway. + * XXX: this function causes search through all entries of routing table, so + * it shouldn't be called when acting as a router. + */ +void +rt6_flush(gateway, ifp) + struct in6_addr *gateway; + struct ifnet *ifp; +{ + struct radix_node_head *rnh = rt_tables[AF_INET6]; +#ifdef __NetBSD__ + int s = splsoftnet(); +#else + int s = splnet(); +#endif + + /* We'll care only link-local addresses */ + if (!IN6_IS_ADDR_LINKLOCAL(gateway)) { + splx(s); + return; + } + /* XXX: hack for KAME's link-local address kludge */ + gateway->s6_addr16[1] = htons(ifp->if_index); + + rnh->rnh_walktree(rnh, rt6_deleteroute, (void *)gateway); + splx(s); +} + +static int +rt6_deleteroute(rn, arg) + struct radix_node *rn; + void *arg; +{ +#define SIN6(s) ((struct sockaddr_in6 *)s) + struct rtentry *rt = (struct rtentry *)rn; + struct in6_addr *gate = (struct in6_addr *)arg; + + if (rt->rt_gateway == NULL || rt->rt_gateway->sa_family != AF_INET6) + return(0); + + if (!IN6_ARE_ADDR_EQUAL(gate, &SIN6(rt->rt_gateway)->sin6_addr)) + return(0); + + /* + * We delete only host route. This means, in particular, we don't + * delete default route. + */ + if ((rt->rt_flags & RTF_HOST) == 0) + return(0); + + return(rtrequest(RTM_DELETE, rt_key(rt), + rt->rt_gateway, rt_mask(rt), rt->rt_flags, 0)); +#undef SIN6 +} + +int +nd6_setdefaultiface(ifindex) + int ifindex; +{ + int error = 0; + + if (ifindex < 0 || if_index < ifindex) + return(EINVAL); + + if (nd6_defifindex != ifindex) { + nd6_defifindex = ifindex; + if (nd6_defifindex > 0) + nd6_defifp = ifindex2ifnet[nd6_defifindex]; + else + nd6_defifp = NULL; + + /* + * If the Default Router List is empty, install a route + * to the specified interface as default or remove the default + * route when the default interface becomes canceled. + * The check for the queue is actually redundant, but + * we do this here to avoid re-install the default route + * if the list is NOT empty. + */ + if (TAILQ_FIRST(&nd_defrouter) == NULL) + defrouter_select(); + } + + return(error); +} diff --git a/bsd/netinet6/pim6.h b/bsd/netinet6/pim6.h new file mode 100644 index 000000000..81b2ab6c5 --- /dev/null +++ b/bsd/netinet6/pim6.h @@ -0,0 +1,68 @@ +/* $KAME: pim6.h,v 1.2 2000/02/22 14:04:33 itojun Exp $ */ + +/* + * Copyright (C) 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * Protocol Independent Multicast (PIM) definitions + * + * Written by Ahmed Helmy, SGI, July 1996 + * + * MULTICAST + */ + +/* + * PIM packet header + */ +#define PIM_VERSION 2 +struct pim { +#if defined(BYTE_ORDER) && (BYTE_ORDER == LITTLE_ENDIAN) + u_char pim_type:4, /* the PIM message type, currently they are: + * Hello, Register, Register-Stop, Join/Prune, + * Bootstrap, Assert, Graft (PIM-DM only), + * Graft-Ack (PIM-DM only), C-RP-Adv + */ + pim_ver:4; /* PIM version number; 2 for PIMv2 */ +#else + u_char pim_ver:4, /* PIM version */ + pim_type:4; /* PIM type */ +#endif + u_char pim_rsv; /* Reserved */ + u_short pim_cksum; /* IP style check sum */ +}; + +#define PIM_MINLEN 8 /* The header min. length is 8 */ +#define PIM6_REG_MINLEN (PIM_MINLEN+40) /* Register message + inner IP6 header */ + +/* + * Message types + */ +#define PIM_REGISTER 1 /* PIM Register type is 1 */ + +/* second bit in reg_head is the null bit */ +#define PIM_NULL_REGISTER 0x40000000 diff --git a/bsd/netinet6/pim6_var.h b/bsd/netinet6/pim6_var.h new file mode 100644 index 000000000..4b0530f82 --- /dev/null +++ b/bsd/netinet6/pim6_var.h @@ -0,0 +1,70 @@ +/* $KAME: pim6_var.h,v 1.5 2000/03/25 07:23:59 sumikawa Exp $ */ + +/* + * Copyright (C) 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET6_PIM6_VAR_H_ +#define _NETINET6_PIM6_VAR_H_ + +/* + * Protocol Independent Multicast (PIM), + * implementation-specific definitions. + * + * Written by George Edmond Eddy (Rusty), ISI, February 1998 + * Modified by Pavlin Ivanov Radoslavov, USC/ISI, May 1998 + */ + +struct pim6stat { + u_quad_t pim6s_rcv_total; /* total PIM messages received */ + u_quad_t pim6s_rcv_tooshort; /* received with too few bytes */ + u_quad_t pim6s_rcv_badsum; /* received with bad checksum */ + u_quad_t pim6s_rcv_badversion; /* received bad PIM version */ + u_quad_t pim6s_rcv_registers; /* received registers */ + u_quad_t pim6s_rcv_badregisters; /* received invalid registers */ + u_quad_t pim6s_snd_registers; /* sent registers */ +}; + +#if (defined(KERNEL)) || (defined(_KERNEL)) +extern struct pim6stat pim6stat; + +int pim6_input __P((struct mbuf **, int*, int)); +#endif /* KERNEL */ + +/* + * Names for PIM sysctl objects + */ +#define PIMCTL_STATS 1 /* statistics (read-only) */ +#define PIMCTL_MAXID 2 + +#define PIMCTL_NAMES { \ + { 0, 0 }, \ + { 0, 0 }, \ +} + +#endif /* _NETINET6_PIM6_VAR_H_ */ diff --git a/bsd/netinet6/raw_ip6.c b/bsd/netinet6/raw_ip6.c new file mode 100644 index 000000000..ea812b1de --- /dev/null +++ b/bsd/netinet6/raw_ip6.c @@ -0,0 +1,695 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)raw_ip.c 8.2 (Berkeley) 1/4/94 + */ +#if BSD310 +#include "opt_inet.h" +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#include +#endif +#include +#include +#include + +#if IPSEC +#include +#endif /*IPSEC*/ + + +#include "faith.h" + +#define satosin6(sa) ((struct sockaddr_in6 *)(sa)) +#define ifatoia6(ifa) ((struct in6_ifaddr *)(ifa)) + +/* + * Raw interface to IP6 protocol. + */ + +extern struct inpcbhead ripcb; +extern struct inpcbinfo ripcbinfo; +extern u_long rip_sendspace; +extern u_long rip_recvspace; + +/* + * Setup generic address and protocol structures + * for raw_input routine, then pass them along with + * mbuf chain. + */ +int +rip6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + struct mbuf *m = *mp; + register struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); + register struct inpcb *in6p; + struct inpcb *last = 0; + struct ip6_recvpktopts opts; + struct sockaddr_in6 rip6src; + +#if defined(NFAITH) && 0 < NFAITH + if (m->m_pkthdr.rcvif) { + if (m->m_pkthdr.rcvif->if_type == IFT_FAITH) { + /* XXX send icmp6 host/port unreach? */ + m_freem(m); + return IPPROTO_DONE; + } + } +#endif + init_sin6(&rip6src, m); /* general init */ + bzero(&opts, sizeof(opts)); + + LIST_FOREACH(in6p, &ripcb, inp_list) { + if ((in6p->in6p_vflag & INP_IPV6) == NULL) + continue; + if (in6p->in6p_ip6_nxt && + in6p->in6p_ip6_nxt != proto) + continue; + if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr) && + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst)) + continue; + if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) && + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src)) + continue; + if (in6p->in6p_cksum != -1 + && in6_cksum(m, ip6->ip6_nxt, *offp, + m->m_pkthdr.len - *offp)) { + /* XXX bark something */ + continue; + } + if (last) { + struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); + if (n) { + if (last->in6p_flags & IN6P_CONTROLOPTS || + last->in6p_socket->so_options & SO_TIMESTAMP) + ip6_savecontrol(last, ip6, n, &opts, + NULL); + /* strip intermediate headers */ + m_adj(n, *offp); + if (sbappendaddr(&last->in6p_socket->so_rcv, + (struct sockaddr *)&rip6src, + n, opts.head) == 0) { + /* should notify about lost packet */ + m_freem(n); + if (opts.head) + m_freem(opts.head); + } else + sorwakeup(last->in6p_socket); + bzero(&opts, sizeof(opts)); + } + } + last = in6p; + } + if (last) { + if (last->in6p_flags & IN6P_CONTROLOPTS || + last->in6p_socket->so_options & SO_TIMESTAMP) + ip6_savecontrol(last, ip6, m, &opts, NULL); + /* strip intermediate headers */ + m_adj(m, *offp); + if (sbappendaddr(&last->in6p_socket->so_rcv, + (struct sockaddr *)&rip6src, m, opts.head) == 0) { + m_freem(m); + if (opts.head) + m_freem(opts.head); + } else + sorwakeup(last->in6p_socket); + } else { + if (proto == IPPROTO_NONE) + m_freem(m); + else { + char *prvnxtp = ip6_get_prevhdr(m, *offp); /* XXX */ + icmp6_error(m, ICMP6_PARAM_PROB, + ICMP6_PARAMPROB_NEXTHEADER, + prvnxtp - mtod(m, char *)); + } + ip6stat.ip6s_delivered--; + } + return IPPROTO_DONE; +} + +void +rip6_ctlinput(cmd, sa, d) + int cmd; + struct sockaddr *sa; + void *d; +{ + struct sockaddr_in6 sa6; + struct ip6_hdr *ip6; + struct mbuf *m; + int off = 0; + void (*notify) __P((struct inpcb *, int)) = in6_rtchange; + + if (sa->sa_family != AF_INET6 || + sa->sa_len != sizeof(struct sockaddr_in6)) + return; + + if ((unsigned)cmd >= PRC_NCMDS) + return; + if (PRC_IS_REDIRECT(cmd)) + notify = in6_rtchange, d = NULL; + else if (cmd == PRC_HOSTDEAD) + d = NULL; + else if (inet6ctlerrmap[cmd] == 0) + return; + + /* if the parameter is from icmp6, decode it. */ + if (d != NULL) { + struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d; + m = ip6cp->ip6c_m; + ip6 = ip6cp->ip6c_ip6; + off = ip6cp->ip6c_off; + } else { + m = NULL; + ip6 = NULL; + } + + /* translate addresses into internal form */ + sa6 = *(struct sockaddr_in6 *)sa; + if (IN6_IS_ADDR_LINKLOCAL(&sa6.sin6_addr) && m && m->m_pkthdr.rcvif) + sa6.sin6_addr.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + + if (ip6) { + /* + * XXX: We assume that when IPV6 is non NULL, + * M and OFF are valid. + */ + struct in6_addr s; + + /* translate addresses into internal form */ + memcpy(&s, &ip6->ip6_src, sizeof(s)); + if (IN6_IS_ADDR_LINKLOCAL(&s)) + s.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + + (void) in6_pcbnotify(&ripcb, (struct sockaddr *)&sa6, + 0, &s, 0, cmd, notify); + } else + (void) in6_pcbnotify(&ripcb, (struct sockaddr *)&sa6, 0, + &zeroin6_addr, 0, cmd, notify); +} + +/* + * Generate IPv6 header and pass packet to ip6_output. + * Tack on options user may have setup with control call. + */ +int +rip6_output(m, so, dstsock, control) + register struct mbuf *m; + struct socket *so; + struct sockaddr_in6 *dstsock; + struct mbuf *control; +{ + struct in6_addr *dst; + struct ip6_hdr *ip6; + struct inpcb *in6p; + u_int plen = m->m_pkthdr.len; + int error = 0; + struct ip6_pktopts opt, *optp = 0; + struct ifnet *oifp = NULL; + int type = 0, code = 0; /* for ICMPv6 output statistics only */ + int priv = 0; + + in6p = sotoin6pcb(so); + + priv = 0; +#if !defined(__APPLE__) + { + struct proc *p = current_proc(); /* XXX */ + + if (p && !suser(p->p_ucred, &p->p_acflag)) + priv = 1; + } +#else + if ((so->so_state & SS_PRIV) != 0) + priv = 1; +#endif + dst = &dstsock->sin6_addr; + if (control) { + if ((error = ip6_setpktoptions(control, &opt, priv, 0)) != 0) + goto bad; + optp = &opt; + } else + optp = in6p->in6p_outputopts; + + /* + * For an ICMPv6 packet, we should know its type and code + * to update statistics. + */ + if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) { + struct icmp6_hdr *icmp6; + if (m->m_len < sizeof(struct icmp6_hdr) && + (m = m_pullup(m, sizeof(struct icmp6_hdr))) == NULL) { + error = ENOBUFS; + goto bad; + } + icmp6 = mtod(m, struct icmp6_hdr *); + type = icmp6->icmp6_type; + code = icmp6->icmp6_code; + } + + M_PREPEND(m, sizeof(*ip6), M_WAIT); + ip6 = mtod(m, struct ip6_hdr *); + + /* + * Next header might not be ICMP6 but use its pseudo header anyway. + */ + ip6->ip6_dst = *dst; + + /* + * If the scope of the destination is link-local, embed the interface + * index in the address. + * + * XXX advanced-api value overrides sin6_scope_id + */ + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) { + struct in6_pktinfo *pi; + + /* + * XXX Boundary check is assumed to be already done in + * ip6_setpktoptions(). + */ + if (optp && (pi = optp->ip6po_pktinfo) && pi->ipi6_ifindex) { + ip6->ip6_dst.s6_addr16[1] = htons(pi->ipi6_ifindex); + oifp = ifindex2ifnet[pi->ipi6_ifindex]; + } + else if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) && + in6p->in6p_moptions && + in6p->in6p_moptions->im6o_multicast_ifp) { + oifp = in6p->in6p_moptions->im6o_multicast_ifp; + ip6->ip6_dst.s6_addr16[1] = htons(oifp->if_index); + } else if (dstsock->sin6_scope_id) { + /* boundary check */ + if (dstsock->sin6_scope_id < 0 + || if_index < dstsock->sin6_scope_id) { + error = ENXIO; /* XXX EINVAL? */ + goto bad; + } + ip6->ip6_dst.s6_addr16[1] + = htons(dstsock->sin6_scope_id & 0xffff);/*XXX*/ + } + } + + /* + * Source address selection. + */ + { + struct in6_addr *in6a; + + if ((in6a = in6_selectsrc(dstsock, optp, + in6p->in6p_moptions, + &in6p->in6p_route, + &in6p->in6p_laddr, + &error)) == 0) { + if (error == 0) + error = EADDRNOTAVAIL; + goto bad; + } + ip6->ip6_src = *in6a; + if (in6p->in6p_route.ro_rt) + oifp = ifindex2ifnet[in6p->in6p_route.ro_rt->rt_ifp->if_index]; + } + + ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK; + ip6->ip6_vfc |= IPV6_VERSION; +#if 0 /* ip6_plen will be filled in ip6_output. */ + ip6->ip6_plen = htons((u_short)plen); +#endif + ip6->ip6_nxt = in6p->in6p_ip6_nxt; + ip6->ip6_hlim = in6_selecthlim(in6p, oifp); + + if (so->so_proto->pr_protocol == IPPROTO_ICMPV6 || + in6p->in6p_cksum != -1) { + struct mbuf *n; + int off; + u_int16_t *p; + +#define offsetof(type, member) ((size_t)(&((type *)0)->member)) /* XXX */ + + /* compute checksum */ + if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) + off = offsetof(struct icmp6_hdr, icmp6_cksum); + else + off = in6p->in6p_cksum; + if (plen < off + 1) { + error = EINVAL; + goto bad; + } + off += sizeof(struct ip6_hdr); + + n = m; + while (n && n->m_len <= off) { + off -= n->m_len; + n = n->m_next; + } + if (!n) + goto bad; + p = (u_int16_t *)(mtod(n, caddr_t) + off); + *p = 0; + *p = in6_cksum(m, ip6->ip6_nxt, sizeof(*ip6), plen); + } + +#if IPSEC + ipsec_setsocket(m, so); +#endif /*IPSEC*/ + + error = ip6_output(m, optp, &in6p->in6p_route, 0, in6p->in6p_moptions, + &oifp); + if (so->so_proto->pr_protocol == IPPROTO_ICMPV6) { + if (oifp) + icmp6_ifoutstat_inc(oifp, type, code); + icmp6stat.icp6s_outhist[type]++; + } + + goto freectl; + + bad: + if (m) + m_freem(m); + + freectl: + if (optp == &opt && optp->ip6po_rthdr && optp->ip6po_route.ro_rt) + RTFREE(optp->ip6po_route.ro_rt); + if (control) { + if (optp == &opt) + ip6_clearpktopts(optp, 0, -1); + m_freem(control); + } + return(error); +} + +/* + * Raw IPv6 socket option processing. + */ +int +rip6_ctloutput(so, sopt) + struct socket *so; + struct sockopt *sopt; +{ + int error; + + if (sopt->sopt_level == IPPROTO_ICMPV6) + /* + * XXX: is it better to call icmp6_ctloutput() directly + * from protosw? + */ + return(icmp6_ctloutput(so, sopt)); + else if (sopt->sopt_level != IPPROTO_IPV6) + return (EINVAL); + + error = 0; + + switch (sopt->sopt_dir) { + case SOPT_GET: + switch (sopt->sopt_name) { + case MRT6_INIT: + case MRT6_DONE: + case MRT6_ADD_MIF: + case MRT6_DEL_MIF: + case MRT6_ADD_MFC: + case MRT6_DEL_MFC: + case MRT6_PIM: + error = ip6_mrouter_get(so, sopt); + break; + default: + error = ip6_ctloutput(so, sopt); + break; + } + break; + + case SOPT_SET: + switch (sopt->sopt_name) { + case MRT6_INIT: + case MRT6_DONE: + case MRT6_ADD_MIF: + case MRT6_DEL_MIF: + case MRT6_ADD_MFC: + case MRT6_DEL_MFC: + case MRT6_PIM: + error = ip6_mrouter_set(so, sopt); + break; + default: + error = ip6_ctloutput(so, sopt); + break; + } + break; + } + + return (error); +} + +static int +rip6_attach(struct socket *so, int proto, struct proc *p) +{ + struct inpcb *inp; + int error, s; + + inp = sotoinpcb(so); + if (inp) + panic("rip6_attach"); + if (p && (error = suser(p->p_ucred, &p->p_acflag)) != 0) + return error; + + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { + error = soreserve(so, rip_sendspace, rip_recvspace); + if (error) + return error; + } + s = splnet(); + error = in_pcballoc(so, &ripcbinfo, p); + splx(s); + if (error) + return error; + inp = (struct inpcb *)so->so_pcb; + inp->inp_vflag |= INP_IPV6; + inp->in6p_ip6_nxt = (long)proto; + inp->in6p_hops = -1; /* use kernel default */ + inp->in6p_cksum = -1; +#if IPSEC + error = ipsec_init_policy(so, &inp->in6p_sp); + if (error != 0) { + in6_pcbdetach(inp); + return (error); + } +#endif /*IPSEC*/ + MALLOC(inp->in6p_icmp6filt, struct icmp6_filter *, + sizeof(struct icmp6_filter), M_PCB, M_NOWAIT); + ICMP6_FILTER_SETPASSALL(inp->in6p_icmp6filt); + return 0; +} + +static int +rip6_detach(struct socket *so) +{ + struct inpcb *inp; + + inp = sotoinpcb(so); + if (inp == 0) + panic("rip6_detach"); + if (so == ip6_mrouter) + ip6_mrouter_done(); + /* xxx: RSVP */ + if (inp->in6p_icmp6filt) { + FREE(inp->in6p_icmp6filt, M_PCB); + inp->in6p_icmp6filt = NULL; + } + in6_pcbdetach(inp); + return 0; +} + +static int +rip6_abort(struct socket *so) +{ + soisdisconnected(so); + return rip6_detach(so); +} + +static int +rip6_disconnect(struct socket *so) +{ + struct inpcb *inp = sotoinpcb(so); + + if ((so->so_state & SS_ISCONNECTED) == 0) + return ENOTCONN; + inp->in6p_faddr = in6addr_any; + return rip6_abort(so); +} + +static int +rip6_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp = sotoinpcb(so); + struct sockaddr_in6 *addr = (struct sockaddr_in6 *)nam; + struct ifaddr *ia = NULL; + + if (nam->sa_len != sizeof(*addr)) + return EINVAL; + + if (TAILQ_EMPTY(&ifnet) || addr->sin6_family != AF_INET6) + return EADDRNOTAVAIL; + if (!IN6_IS_ADDR_UNSPECIFIED(&addr->sin6_addr) && + (ia = ifa_ifwithaddr((struct sockaddr *)addr)) == 0) + return EADDRNOTAVAIL; + if (ia && + ((struct in6_ifaddr *)ia)->ia6_flags & + (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY| + IN6_IFF_DETACHED|IN6_IFF_DEPRECATED)) { + return(EADDRNOTAVAIL); + } + inp->in6p_laddr = addr->sin6_addr; + return 0; +} + +static int +rip6_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp = sotoinpcb(so); + struct sockaddr_in6 *addr = (struct sockaddr_in6 *)nam; + struct in6_addr *in6a = NULL; + int error = 0; + + if (nam->sa_len != sizeof(*addr)) + return EINVAL; + if (TAILQ_EMPTY(&ifnet)) + return EADDRNOTAVAIL; + if (addr->sin6_family != AF_INET6) + return EAFNOSUPPORT; + + /* Source address selection. XXX: need pcblookup? */ + in6a = in6_selectsrc(addr, inp->in6p_outputopts, + inp->in6p_moptions, &inp->in6p_route, + &inp->in6p_laddr, &error); + if (in6a == NULL) + return (error ? error : EADDRNOTAVAIL); + inp->in6p_laddr = *in6a; + inp->in6p_faddr = addr->sin6_addr; + soisconnected(so); + return 0; +} + +static int +rip6_shutdown(struct socket *so) +{ + socantsendmore(so); + return 0; +} + +static int +rip6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, + struct mbuf *control, struct proc *p) +{ + struct inpcb *inp = sotoinpcb(so); + struct sockaddr_in6 tmp; + struct sockaddr_in6 *dst; + + if (so->so_state & SS_ISCONNECTED) { + if (nam) { + m_freem(m); + return EISCONN; + } + /* XXX */ + bzero(&tmp, sizeof(tmp)); + tmp.sin6_family = AF_INET6; + tmp.sin6_len = sizeof(struct sockaddr_in6); + bcopy(&inp->in6p_faddr, &tmp.sin6_addr, + sizeof(struct in6_addr)); + dst = &tmp; + } else { + if (nam == NULL) { + m_freem(m); + return ENOTCONN; + } + dst = (struct sockaddr_in6 *)nam; + } + return rip6_output(m, so, dst, control); +} + +struct pr_usrreqs rip6_usrreqs = { + rip6_abort, pru_accept_notsupp, rip6_attach, rip6_bind, rip6_connect, + pru_connect2_notsupp, in6_control, rip6_detach, rip6_disconnect, + pru_listen_notsupp, in6_setpeeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, rip6_send, pru_sense_null, rip6_shutdown, + in6_setsockaddr, sosend, soreceive, sopoll +}; diff --git a/bsd/netinet6/route6.c b/bsd/netinet6/route6.c new file mode 100644 index 000000000..25d86cf55 --- /dev/null +++ b/bsd/netinet6/route6.c @@ -0,0 +1,193 @@ +/* $KAME: route6.c,v 1.10 2000/02/22 14:04:34 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include + +#if MIP6 +#include +#include +#endif + +static int ip6_rthdr0 __P((struct mbuf *, struct ip6_hdr *, + struct ip6_rthdr0 *)); + + +int +route6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; /* proto is unused */ +{ + register struct ip6_hdr *ip6; + register struct mbuf *m = *mp; + register struct ip6_rthdr *rh; + int off = *offp, rhlen; + +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, sizeof(*rh), IPPROTO_DONE); + ip6 = mtod(m, struct ip6_hdr *); + rh = (struct ip6_rthdr *)((caddr_t)ip6 + off); +#else + ip6 = mtod(m, struct ip6_hdr *); + IP6_EXTHDR_GET(rh, struct ip6_rthdr *, m, off, sizeof(*rh)); + if (rh == NULL) { + ip6stat.ip6s_tooshort++; + return IPPROTO_DONE; + } +#endif + + switch(rh->ip6r_type) { + case IPV6_RTHDR_TYPE_0: + rhlen = (rh->ip6r_len + 1) << 3; +#ifndef PULLDOWN_TEST + IP6_EXTHDR_CHECK(m, off, rhlen, IPPROTO_DONE); +#else + IP6_EXTHDR_GET(rh, struct ip6_rthdr *, m, off, rhlen); + if (rh == NULL) { + ip6stat.ip6s_tooshort++; + return IPPROTO_DONE; + } +#endif + if (ip6_rthdr0(m, ip6, (struct ip6_rthdr0 *)rh)) + return(IPPROTO_DONE); + break; + default: + /* unknown routing type */ + if (rh->ip6r_segleft == 0) { + rhlen = (rh->ip6r_len + 1) << 3; + break; /* Final dst. Just ignore the header. */ + } + ip6stat.ip6s_badoptions++; + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, + (caddr_t)&rh->ip6r_type - (caddr_t)ip6); + return(IPPROTO_DONE); + } + + *offp += rhlen; + return(rh->ip6r_nxt); +} + +/* + * Type0 routing header processing + */ +static int +ip6_rthdr0(m, ip6, rh0) + struct mbuf *m; + struct ip6_hdr *ip6; + struct ip6_rthdr0 *rh0; +{ + int addrs, index; + struct in6_addr *nextaddr, tmpaddr; + + if (rh0->ip6r0_segleft == 0) + return(0); + + if (rh0->ip6r0_len % 2 +#if COMPAT_RFC1883 + || rh0->ip6r0_len > 46 +#endif + ) { + /* + * Type 0 routing header can't contain more than 23 addresses. + * RFC 2462: this limitation was removed since stict/loose + * bitmap field was deleted. + */ + ip6stat.ip6s_badoptions++; + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, + (caddr_t)&rh0->ip6r0_len - (caddr_t)ip6); + return(-1); + } + + if ((addrs = rh0->ip6r0_len / 2) < rh0->ip6r0_segleft) { + ip6stat.ip6s_badoptions++; + icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, + (caddr_t)&rh0->ip6r0_segleft - (caddr_t)ip6); + return(-1); + } + + index = addrs - rh0->ip6r0_segleft; + rh0->ip6r0_segleft--; + nextaddr = ((struct in6_addr *)(rh0 + 1)) + index; + + /* + * reject invalid addresses. be proactive about malicious use of + * IPv4 mapped/compat address. + * XXX need more checks? + */ + if (IN6_IS_ADDR_MULTICAST(nextaddr) || + IN6_IS_ADDR_UNSPECIFIED(nextaddr) || + IN6_IS_ADDR_V4MAPPED(nextaddr) || + IN6_IS_ADDR_V4COMPAT(nextaddr)) { + ip6stat.ip6s_badoptions++; + m_freem(m); + return(-1); + } + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || + IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst) || + IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst) || + IN6_IS_ADDR_V4COMPAT(nextaddr)) { + ip6stat.ip6s_badoptions++; + m_freem(m); + return(-1); + } + + /* + * Swap the IPv6 destination address and nextaddr. Forward the packet. + */ + tmpaddr = *nextaddr; + *nextaddr = ip6->ip6_dst; + if (IN6_IS_ADDR_LINKLOCAL(nextaddr)) + nextaddr->s6_addr16[1] = 0; + ip6->ip6_dst = tmpaddr; + if (IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst)) + ip6->ip6_dst.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + +#if COMPAT_RFC1883 + if (rh0->ip6r0_slmap[index / 8] & (1 << (7 - (index % 8)))) + ip6_forward(m, IPV6_SRCRT_NEIGHBOR); + else + ip6_forward(m, IPV6_SRCRT_NOTNEIGHBOR); +#else + ip6_forward(m, 1); +#endif + + return(-1); /* m would be freed in ip6_forward() */ +} diff --git a/bsd/netinet6/udp6.h b/bsd/netinet6/udp6.h new file mode 100644 index 000000000..8e8b9b028 --- /dev/null +++ b/bsd/netinet6/udp6.h @@ -0,0 +1,38 @@ +/* $KAME: udp6.h,v 1.2 2000/02/22 14:04:37 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETINET6_UDP6_H_ +#define _NETINET6_UDP6_H_ + +#include + +#endif /*_NETINET6_UDP6_H_*/ + diff --git a/bsd/netinet6/udp6_usrreq.c b/bsd/netinet6/udp6_usrreq.c new file mode 100644 index 000000000..c6bfcc685 --- /dev/null +++ b/bsd/netinet6/udp6_usrreq.c @@ -0,0 +1,861 @@ +/* $KAME: udp6_usrreq.c,v 1.25 2000/04/04 11:18:10 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)udp_var.h 8.1 (Berkeley) 6/10/93 + */ +#if BSD310 +#include "opt_inet.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if IPSEC +#include +#endif /*IPSEC*/ + +#include "faith.h" + +/* + * UDP protocol inplementation. + * Per RFC 768, August, 1980. + */ + +extern struct protosw inetsw[]; +static int in6_mcmatch __P((struct inpcb *, struct in6_addr *, struct ifnet *)); +static int udp6_detach __P((struct socket *so)); + +static int +in6_mcmatch(in6p, ia6, ifp) + struct inpcb *in6p; + register struct in6_addr *ia6; + struct ifnet *ifp; +{ + struct ip6_moptions *im6o = in6p->in6p_moptions; + struct in6_multi_mship *imm; + + if (im6o == NULL) + return 0; + + for (imm = im6o->im6o_memberships.lh_first; imm != NULL; + imm = imm->i6mm_chain.le_next) { + if ((ifp == NULL || + imm->i6mm_maddr->in6m_ifp == ifp) && + IN6_ARE_ADDR_EQUAL(&imm->i6mm_maddr->in6m_addr, + ia6)) + return 1; + } + return 0; +} + +int +udp6_input(mp, offp, proto) + struct mbuf **mp; + int *offp, proto; +{ + struct mbuf *m = *mp; + register struct ip6_hdr *ip6; + register struct udphdr *uh; + register struct inpcb *in6p; + struct ip6_recvpktopts opts; + int off = *offp; + int plen, ulen; + struct sockaddr_in6 udp_in6; + +#if defined(NFAITH) && 0 < NFAITH + if (m->m_pkthdr.rcvif) { + if (m->m_pkthdr.rcvif->if_type == IFT_FAITH) { + /* XXX send icmp6 host/port unreach? */ + m_freem(m); + return IPPROTO_DONE; + } + } +#endif + udpstat.udps_ipackets++; + bzero(&opts, sizeof(opts)); + + IP6_EXTHDR_CHECK(m, off, sizeof(struct udphdr), IPPROTO_DONE); + + ip6 = mtod(m, struct ip6_hdr *); + plen = ntohs(ip6->ip6_plen) - off + sizeof(*ip6); + uh = (struct udphdr *)((caddr_t)ip6 + off); + ulen = ntohs((u_short)uh->uh_ulen); + + if (plen != ulen) { + udpstat.udps_badlen++; + goto bad; + } + + /* + * Checksum extended UDP header and data. + */ +// if (uh->uh_sum == 0) +// udpstat.udps_nosum++; + else if (in6_cksum(m, IPPROTO_UDP, off, ulen) != 0) { + udpstat.udps_badsum++; + goto bad; + } + + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { + struct inpcb *last; + + /* + * Deliver a multicast datagram to all sockets + * for which the local and remote addresses and ports match + * those of the incoming datagram. This allows more than + * one process to receive multicasts on the same port. + * (This really ought to be done for unicast datagrams as + * well, but that would cause problems with existing + * applications that open both address-specific sockets and + * a wildcard socket listening to the same port -- they would + * end up receiving duplicates of every unicast datagram. + * Those applications open the multiple sockets to overcome an + * inadequacy of the UDP socket interface, but for backwards + * compatibility we avoid the problem here rather than + * fixing the interface. Maybe 4.5BSD will remedy this?) + */ + + /* + * In a case that laddr should be set to the link-local + * address (this happens in RIPng), the multicast address + * specified in the received packet does not match with + * laddr. To cure this situation, the matching is relaxed + * if the receiving interface is the same as one specified + * in the socket and if the destination multicast address + * matches one of the multicast groups specified in the socket. + */ + + /* + * Construct sockaddr format source address. + */ + init_sin6(&udp_in6, m); /* general init */ + udp_in6.sin6_port = uh->uh_sport; + /* + * KAME note: usually we drop udphdr from mbuf here. + * We need udphdr for IPsec processing so we do that later. + */ + + /* + * Locate pcb(s) for datagram. + * (Algorithm copied from raw_intr().) + */ + last = NULL; + LIST_FOREACH(in6p, &udb, inp_list) { + if ((in6p->inp_vflag & INP_IPV6) == 0) + continue; + if (in6p->in6p_lport != uh->uh_dport) + continue; + if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr)) { + if (!IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, + &ip6->ip6_dst) && + !in6_mcmatch(in6p, &ip6->ip6_dst, + m->m_pkthdr.rcvif)) + continue; + } + if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) { + if (!IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, + &ip6->ip6_src) || + in6p->in6p_fport != uh->uh_sport) + continue; + } + + if (last != NULL) { + struct mbuf *n; + +#if IPSEC + /* + * Check AH/ESP integrity. + */ + if (ipsec6_in_reject_so(m, last->inp_socket)) + ipsec6stat.in_polvio++; + /* do not inject data into pcb */ + else +#endif /*IPSEC*/ + if ((n = m_copy(m, 0, M_COPYALL)) != NULL) { + /* + * KAME NOTE: do not + * m_copy(m, offset, ...) above. + * sbappendaddr() expects M_PKTHDR, + * and m_copy() will copy M_PKTHDR + * only if offset is 0. + */ + if (last->in6p_flags & IN6P_CONTROLOPTS + || last->in6p_socket->so_options & SO_TIMESTAMP) + ip6_savecontrol(last, ip6, n, + &opts, NULL); + + m_adj(n, off + sizeof(struct udphdr)); + if (sbappendaddr(&last->in6p_socket->so_rcv, + (struct sockaddr *)&udp_in6, + n, opts.head) == 0) { + m_freem(n); + if (opts.head) + m_freem(opts.head); + udpstat.udps_fullsock++; + } else + sorwakeup(last->in6p_socket); + bzero(&opts, sizeof(opts)); + } + } + last = in6p; + /* + * Don't look for additional matches if this one does + * not have either the SO_REUSEPORT or SO_REUSEADDR + * socket options set. This heuristic avoids searching + * through all pcbs in the common case of a non-shared + * port. It assumes that an application will never + * clear these options after setting them. + */ + if ((last->in6p_socket->so_options & + (SO_REUSEPORT|SO_REUSEADDR)) == 0) + break; + } + + if (last == NULL) { + /* + * No matching pcb found; discard datagram. + * (No need to send an ICMP Port Unreachable + * for a broadcast or multicast datgram.) + */ + udpstat.udps_noport++; +// udpstat.udps_noportmcast++; + goto bad; + } +#if IPSEC + /* + * Check AH/ESP integrity. + */ + if (ipsec6_in_reject_so(m, last->inp_socket)) { + ipsec6stat.in_polvio++; + goto bad; + } +#endif /*IPSEC*/ + if (last->in6p_flags & IN6P_CONTROLOPTS + || last->in6p_socket->so_options & SO_TIMESTAMP) + ip6_savecontrol(last, ip6, m, &opts, NULL); + + m_adj(m, off + sizeof(struct udphdr)); + if (sbappendaddr(&last->in6p_socket->so_rcv, + (struct sockaddr *)&udp_in6, + m, opts.head) == 0) { + udpstat.udps_fullsock++; + goto bad; + } + sorwakeup(last->in6p_socket); + return IPPROTO_DONE; + } + /* + * Locate pcb for datagram. + */ + in6p = in6_pcblookup_hash(&udbinfo, &ip6->ip6_src, uh->uh_sport, + &ip6->ip6_dst, uh->uh_dport, 1, + m->m_pkthdr.rcvif); + if (in6p == 0) { + if (log_in_vain) { + char buf[INET6_ADDRSTRLEN]; + + strcpy(buf, ip6_sprintf(&ip6->ip6_dst)); + log(LOG_INFO, + "Connection attempt to UDP %s:%d from %s:%d\n", + buf, ntohs(uh->uh_dport), + ip6_sprintf(&ip6->ip6_src), ntohs(uh->uh_sport)); + } + udpstat.udps_noport++; + if (m->m_flags & M_MCAST) { + printf("UDP6: M_MCAST is set in a unicast packet.\n"); +// udpstat.udps_noportmcast++; + goto bad; + } + icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOPORT, 0); + return IPPROTO_DONE; + } +#if IPSEC + /* + * Check AH/ESP integrity. + */ + if (ipsec6_in_reject_so(m, in6p->in6p_socket)) { + ipsec6stat.in_polvio++; + goto bad; + } +#endif /*IPSEC*/ + + /* + * Construct sockaddr format source address. + * Stuff source address and datagram in user buffer. + */ + init_sin6(&udp_in6, m); /* general init */ + udp_in6.sin6_port = uh->uh_sport; + if (in6p->in6p_flags & IN6P_CONTROLOPTS + || in6p->in6p_socket->so_options & SO_TIMESTAMP) + ip6_savecontrol(in6p, ip6, m, &opts, NULL); + m_adj(m, off + sizeof(struct udphdr)); + if (sbappendaddr(&in6p->in6p_socket->so_rcv, + (struct sockaddr *)&udp_in6, + m, opts.head) == 0) { + udpstat.udps_fullsock++; + goto bad; + } + sorwakeup(in6p->in6p_socket); + return IPPROTO_DONE; +bad: + if (m) + m_freem(m); + if (opts.head) + m_freem(opts.head); + return IPPROTO_DONE; +} + +void +udp6_ctlinput(cmd, sa, d) + int cmd; + struct sockaddr *sa; + void *d; +{ + register struct udphdr *uhp; + struct udphdr uh; + struct sockaddr_in6 sa6; + struct ip6_hdr *ip6; + struct mbuf *m; + int off = 0; + void (*notify) __P((struct inpcb *, int)) = udp_notify; + + if (sa->sa_family != AF_INET6 || + sa->sa_len != sizeof(struct sockaddr_in6)) + return; + + if ((unsigned)cmd >= PRC_NCMDS) + return; + if (PRC_IS_REDIRECT(cmd)) + notify = in6_rtchange, d = NULL; + else if (cmd == PRC_HOSTDEAD) + d = NULL; + else if (inet6ctlerrmap[cmd] == 0) + return; + + /* if the parameter is from icmp6, decode it. */ + if (d != NULL) { + struct ip6ctlparam *ip6cp = (struct ip6ctlparam *)d; + m = ip6cp->ip6c_m; + ip6 = ip6cp->ip6c_ip6; + off = ip6cp->ip6c_off; + } else { + m = NULL; + ip6 = NULL; + } + + /* translate addresses into internal form */ + sa6 = *(struct sockaddr_in6 *)sa; + if (IN6_IS_ADDR_LINKLOCAL(&sa6.sin6_addr) && m && m->m_pkthdr.rcvif) + sa6.sin6_addr.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + + if (ip6) { + /* + * XXX: We assume that when IPV6 is non NULL, + * M and OFF are valid. + */ + struct in6_addr s; + + /* translate addresses into internal form */ + memcpy(&s, &ip6->ip6_src, sizeof(s)); + if (IN6_IS_ADDR_LINKLOCAL(&s)) + s.s6_addr16[1] = htons(m->m_pkthdr.rcvif->if_index); + + if (m->m_len < off + sizeof(uh)) { + /* + * this should be rare case, + * so we compromise on this copy... + */ + m_copydata(m, off, sizeof(uh), (caddr_t)&uh); + uhp = &uh; + } else + uhp = (struct udphdr *)(mtod(m, caddr_t) + off); + (void) in6_pcbnotify(&udb, (struct sockaddr *)&sa6, + uhp->uh_dport, &s, + uhp->uh_sport, cmd, notify); + } else + (void) in6_pcbnotify(&udb, (struct sockaddr *)&sa6, 0, + &zeroin6_addr, 0, cmd, notify); +} +#if 0 +static int +udp6_getcred SYSCTL_HANDLER_ARGS +{ + struct sockaddr_in6 addrs[2]; + struct inpcb *inp; + int error, s; + + error = suser(req->p->p_ucred, &req->p->p_acflag); + if (error) + return (error); + + if (req->newlen != sizeof(addrs)) + return (EINVAL); + if (req->oldlen != sizeof(struct ucred)) + return (EINVAL); + error = SYSCTL_IN(req, addrs, sizeof(addrs)); + if (error) + return (error); + s = splnet(); + inp = in6_pcblookup_hash(&udbinfo, &addrs[1].sin6_addr, + addrs[1].sin6_port, + &addrs[0].sin6_addr, addrs[0].sin6_port, + 1, NULL); + if (!inp || !inp->inp_socket || !inp->inp_socket->so_cred) { + error = ENOENT; + goto out; + } + error = SYSCTL_OUT(req, inp->inp_socket->so_cred->pc_ucred, + sizeof(struct ucred)); + +out: + splx(s); + return (error); +} + +SYSCTL_PROC(_net_inet6_udp6, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW, + 0, 0, + udp6_getcred, "S,ucred", "Get the ucred of a UDP6 connection"); +#endif +int +udp6_output(in6p, m, addr6, control, p) + register struct inpcb *in6p; + register struct mbuf *m; + struct sockaddr *addr6; + struct mbuf *control; + struct proc *p; +{ + register int ulen = m->m_pkthdr.len; + int plen = sizeof(struct udphdr) + ulen; + struct ip6_hdr *ip6; + struct udphdr *udp6; + struct in6_addr laddr6; + int s = 0, error = 0; + struct ip6_pktopts opt, *stickyopt = in6p->in6p_outputopts; + int flags; + + if (control) { + if (error = ip6_setpktoptions(control, &opt, + p && + suser(p->p_ucred, &p->p_acflag), 0)) + goto release; + in6p->in6p_outputopts = &opt; + } + + if (addr6) { + laddr6 = in6p->in6p_laddr; + if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) { + error = EISCONN; + goto release; + } + /* + * Must block input while temporarily connected. + */ + s = splnet(); + /* + * XXX: the user might want to overwrite the local address + * via an ancillary data. + */ + bzero(&in6p->in6p_laddr, sizeof(struct in6_addr)); + error = in6_pcbconnect(in6p, addr6, p); + if (error) { + splx(s); + goto release; + } + } else { + if (IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) { + error = ENOTCONN; + goto release; + } + } + /* + * Calculate data length and get a mbuf + * for UDP and IP6 headers. + */ + M_PREPEND(m, sizeof(struct ip6_hdr) + sizeof(struct udphdr), + M_DONTWAIT); + if (m == 0) { + error = ENOBUFS; + if (addr6) + splx(s); + goto release; + } + + /* + * Stuff checksum and output datagram. + */ + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_flow = in6p->in6p_flowinfo & IPV6_FLOWINFO_MASK; + ip6->ip6_vfc |= IPV6_VERSION; +#if 0 /* ip6_plen will be filled in ip6_output. */ + ip6->ip6_plen = htons((u_short)plen); +#endif + ip6->ip6_nxt = IPPROTO_UDP; + ip6->ip6_hlim = in6_selecthlim(in6p, + in6p->in6p_route.ro_rt ? + in6p->in6p_route.ro_rt->rt_ifp : + NULL); + ip6->ip6_src = in6p->in6p_laddr; + ip6->ip6_dst = in6p->in6p_faddr; + + udp6 = (struct udphdr *)(ip6 + 1); + udp6->uh_sport = in6p->in6p_lport; + udp6->uh_dport = in6p->in6p_fport; + udp6->uh_ulen = htons((u_short)plen); + udp6->uh_sum = 0; + + if ((udp6->uh_sum = in6_cksum(m, IPPROTO_UDP, + sizeof(struct ip6_hdr), plen)) == 0) { + udp6->uh_sum = 0xffff; + } + + flags = 0; + if (in6p->in6p_flags & IN6P_MINMTU) + flags |= IPV6_MINMTU; + + udpstat.udps_opackets++; + +#if IPSEC + ipsec_setsocket(m, in6p->in6p_socket); +#endif /*IPSEC*/ + error = ip6_output(m, in6p->in6p_outputopts, &in6p->in6p_route, + flags, in6p->in6p_moptions, NULL); + + if (addr6) { + in6_pcbdisconnect(in6p); + in6p->in6p_laddr = laddr6; + splx(s); + } + goto releaseopt; + +release: + m_freem(m); + +releaseopt: + if (control) { + ip6_clearpktopts(in6p->in6p_outputopts, 0, -1); + in6p->in6p_outputopts = stickyopt; + m_freem(control); + } + return(error); +} + +static int +udp6_abort(struct socket *so) +{ + struct inpcb *inp; + int s; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; /* ??? possible? panic instead? */ + soisdisconnected(so); + s = splnet(); + in6_pcbdetach(inp); + splx(s); + return 0; +} + +static int +udp6_attach(struct socket *so, int proto, struct proc *p) +{ + struct inpcb *inp; + int s, error; + + inp = sotoinpcb(so); + if (inp != 0) + return EINVAL; + + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { + error = soreserve(so, udp_sendspace, udp_recvspace); + if (error) + return error; + } + s = splnet(); + error = in_pcballoc(so, &udbinfo, p); + splx(s); + if (error) + return error; + inp = (struct inpcb *)so->so_pcb; + inp->inp_vflag |= INP_IPV6; + inp->in6p_hops = -1; /* use kernel default */ + inp->in6p_cksum = -1; /* just to be sure */ +#if IPSEC + error = ipsec_init_policy(so, &inp->in6p_sp); + if (error != 0) { + in6_pcbdetach(inp); + return (error); + } +#endif /*IPSEC*/ + return 0; +} + +static int +udp6_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp; + int s, error; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + + inp->inp_vflag &= ~INP_IPV4; + inp->inp_vflag |= INP_IPV6; + if (ip6_mapped_addr_on && (inp->inp_flags & IN6P_BINDV6ONLY) == 0) { + struct sockaddr_in6 *sin6_p; + + sin6_p = (struct sockaddr_in6 *)nam; + + if (IN6_IS_ADDR_UNSPECIFIED(&sin6_p->sin6_addr)) + inp->inp_vflag |= INP_IPV4; + else if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { + struct sockaddr_in sin; + + in6_sin6_2_sin(&sin, sin6_p); + inp->inp_vflag |= INP_IPV4; + inp->inp_vflag &= ~INP_IPV6; + s = splnet(); + error = in_pcbbind(inp, (struct sockaddr *)&sin, p); + splx(s); + return error; + } + } + + s = splnet(); + error = in6_pcbbind(inp, nam, p); + splx(s); + return error; +} + +static int +udp6_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + struct inpcb *inp; + int s, error; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + + if (ip6_mapped_addr_on) { + struct sockaddr_in6 *sin6_p; + + sin6_p = (struct sockaddr_in6 *)nam; + if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { + struct sockaddr_in sin; + + if (inp->inp_faddr.s_addr != INADDR_ANY) + return EISCONN; + in6_sin6_2_sin(&sin, sin6_p); + s = splnet(); + error = in_pcbconnect(inp, (struct sockaddr *)&sin, p); + splx(s); + if (error == 0) { + inp->inp_vflag |= INP_IPV4; + inp->inp_vflag &= ~INP_IPV6; + soisconnected(so); + } + return error; + } + } + + if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) + return EISCONN; + s = splnet(); + error = in6_pcbconnect(inp, nam, p); + if (ip6_auto_flowlabel) { + inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK; + inp->in6p_flowinfo |= + (htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK); + } + splx(s); + if (error == 0) { + if (ip6_mapped_addr_on) { /* should be non mapped addr */ + inp->inp_vflag &= ~INP_IPV4; + inp->inp_vflag |= INP_IPV6; + } + soisconnected(so); + } + return error; +} + +static int +udp6_detach(struct socket *so) +{ + struct inpcb *inp; + int s; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + s = splnet(); + in6_pcbdetach(inp); + splx(s); + return 0; +} + +static int +udp6_disconnect(struct socket *so) +{ + struct inpcb *inp; + int s; + + inp = sotoinpcb(so); + if (inp == 0) + return EINVAL; + + if (inp->inp_vflag & INP_IPV4) { + struct pr_usrreqs *pru; + + pru = ip_protox[IPPROTO_UDP]->pr_usrreqs; + return ((*pru->pru_disconnect)(so)); + } + + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) + return ENOTCONN; + + s = splnet(); + in6_pcbdisconnect(inp); + inp->in6p_laddr = in6addr_any; + splx(s); + so->so_state &= ~SS_ISCONNECTED; /* XXX */ + return 0; +} + +static int +udp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, + struct mbuf *control, struct proc *p) +{ + struct inpcb *inp; + + inp = sotoinpcb(so); + if (inp == 0) { + m_freem(m); + return EINVAL; + } + + if (ip6_mapped_addr_on) { + int hasv4addr; + struct sockaddr_in6 *sin6 = 0; + + if (addr == 0) + hasv4addr = (inp->inp_vflag & INP_IPV4); + else { + sin6 = (struct sockaddr_in6 *)addr; + hasv4addr = IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr) + ? 1 : 0; + } + if (hasv4addr) { + struct pr_usrreqs *pru; + int error; + + if (sin6) + in6_sin6_2_sin_in_sock(addr); + pru = ip_protox[IPPROTO_UDP]->pr_usrreqs; + error = ((*pru->pru_send)(so, flags, m, addr, control, + p)); + /* addr will just be freed in sendit(). */ + return error; + } + } + + return udp6_output(inp, m, addr, control, p); +} + +struct pr_usrreqs udp6_usrreqs = { + udp6_abort, pru_accept_notsupp, udp6_attach, udp6_bind, udp6_connect, + pru_connect2_notsupp, in6_control, udp6_detach, udp6_disconnect, + pru_listen_notsupp, in6_mapped_peeraddr, pru_rcvd_notsupp, + pru_rcvoob_notsupp, udp6_send, pru_sense_null, udp_shutdown, + in6_mapped_sockaddr, sosend, soreceive, sopoll +}; diff --git a/bsd/netinet6/udp6_var.h b/bsd/netinet6/udp6_var.h new file mode 100644 index 000000000..d779920ec --- /dev/null +++ b/bsd/netinet6/udp6_var.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)udp_var.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _NETINET6_UDP6_VAR_H_ +#define _NETINET6_UDP6_VAR_H_ + +#ifdef KERNEL +extern struct pr_usrreqs udp6_usrreqs; + +void udp6_ctlinput __P((int, struct sockaddr *, void *)); +int udp6_input __P((struct mbuf **, int *, int)); +int udp6_output __P((struct inpcb *inp, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p)); +#endif /* KERNEL */ + +#endif /*_NETINET6_UDP6_VAR_H_*/ diff --git a/bsd/netiso/Makefile b/bsd/netiso/Makefile new file mode 100644 index 000000000..9f704c042 --- /dev/null +++ b/bsd/netiso/Makefile @@ -0,0 +1,42 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + argo_debug.h clnl.h clnp.h clnp_stat.h cltp_var.h cons.h \ + cons_pcb.h eonvar.h esis.h iso.h iso_errno.h iso_pcb.h \ + iso_snpac.h iso_var.h tp_clnp.h tp_events.h tp_ip.h tp_meas.h \ + tp_param.h tp_pcb.h tp_seq.h tp_stat.h tp_states.h tp_timer.h \ + tp_tpdu.h tp_trace.h tp_user.h tuba_table.h + + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = netiso + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = netiso + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/netiso/argo_debug.h b/bsd/netiso/argo_debug.h new file mode 100644 index 000000000..90cc23b34 --- /dev/null +++ b/bsd/netiso/argo_debug.h @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)argo_debug.h 8.1 (Berkeley) 6/10/93 + */ + +/***************************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#ifndef __ARGO_DEBUG__ +#define __ARGO_DEBUG__ + +#define dump_buf(a, b) Dump_buf((caddr_t)(a), (int)(b)) + +/*********************************************** + * Lint stuff + **********************************************/ +#if defined(lint) +/* + * lint can't handle the flaky vacuous definitions + * of IFDEBUG, ENDDEBUG, etc. + */ +#endif /* defined(lint) */ + +/*********************************************** + * DEBUG ON: + **********************************************/ +#ifndef ARGO_DEBUG +#define ARGO_DEBUG +#endif /* ARGO_DEBUG */ + + +#ifdef ARGO_DEBUG +#if 0 + #ifndef TPPT + #define TPPT + #endif /* TPPT */ + + #ifndef TP_PERF_MEAS + #define TP_PERF_MEAS + #endif /* TP_PERF_MEAS */ +#endif /* 0 */ + +unsigned char argo_debug[128]; + +#define IFDEBUG(ascii) \ + if(argo_debug[ascii]) { +#define ENDDEBUG ; } + +#else /* ARGO_DEBUG */ + +/*********************************************** + * DEBUG OFF: + **********************************************/ + +#ifndef STAR +#define STAR * +#endif /* STAR */ +#define IFDEBUG(ascii) //*beginning of comment*/STAR +#define ENDDEBUG STAR/*end of comment*// + +#endif /* ARGO_DEBUG */ + +/*********************************************** + * ASSERT + **********************************************/ +#ifdef ARGO_DEBUG + +#ifndef lint +#define ASSERT(phrase) \ +if( !(phrase) ) printf("ASSERTION NOT VALID at line %d file %s\n",__LINE__,__FILE__) +#else /* lint */ +#define ASSERT(phrase) /* phrase */ +#endif /* lint */ + +#else /* ARGO_DEBUG */ + +#define ASSERT(phrase) /* phrase */ + +#endif /* ARGO_DEBUG */ + + +/*********************************************** + * CLNP DEBUG OPTIONS + **********************************************/ +#define D_INPUT '\1' +/* clnp input */ +#define D_OUTPUT '\2' +/* clnp output */ +#define D_ROUTE '\3' +/* clnp routing */ +#define D_CTLINPUT '\4' +/* clnp control input */ +#define D_CTLOUTPUT '\5' +/* clnp control output */ +#define D_OPTIONS '\6' +/* clnp options */ +#define D_IOCTL '\7' +/* iso ioctls */ +#define D_ETHER '\10' +/* clnp over ethernet */ +#define D_TOKEN '\11' +/* clnp over token ring */ +#define D_ADCOM '\12' +/* clnp over the adcom */ +#define D_ISO '\13' +/* iso address family */ +#define D_FORWARD '\14' +/* clnp forwarding */ +#define D_DUMPOUT '\15' +/* dump clnp outgoing packets */ +#define D_DUMPIN '\16' +/* dump clnp input packets */ +#define D_DISCARD '\17' +/* debug clnp packet discard/er function */ +#define D_FRAG '\20' +/* clnp fragmentation */ +#define D_REASS '\21' +/* clnp reassembly */ + +char *clnp_iso_addrp(); + +/*********************************************** + * ESIS DEBUG OPTIONS + **********************************************/ +#define D_ESISOUTPUT '\30' +#define D_ESISINPUT '\31' +#define D_SNPA '\32' + +/*********************************************** + * ISIS DEBUG OPTIONS + **********************************************/ +#define D_ISISOUTPUT '\40' +#define D_ISISINPUT '\41' + +/*********************************************** + * EON DEBUG OPTION + **********************************************/ +#define D_EON '\57' + +/*********************************************** + * CONS DEBUG OPTIONS + **********************************************/ + +#define D_ECNWORK '\60' +#define D_ECNOUT '\61' +#define D_ECNFIN '\62' +#define D_ECNDWN '\63' +#define D_ECNUTIL '\64' + +#define D_INCOMING '\70' +#define D_CDATA '\71' +#define D_CFIND '\72' +#define D_CDUMP_REQ '\73' +#define D_CADDR '\74' +#define D_CCONS '\75' +#define D_CCONN '\76' + + +/*********************************************** + * TP DEBUG OPTIONS + **********************************************/ + +#define D_SETPARAMS '\137' +#define D_RTT '\140' + +#define D_ACKRECV '\141' +#define D_ACKSEND '\142' +#define D_CONN '\143' +#define D_CREDIT '\144' +#define D_DATA '\145' +#define D_DRIVER '\146' + +#define D_EMIT '\147' +#define D_ERROR_EMIT '\150' +#define D_TPINPUT '\151' +#define D_INDICATION '\152' +#define D_CHKSUM '\153' + +#define D_RENEG '\154' +#define D_PERF_MEAS '\155' +#define D_MBUF_MEAS '\156' +#define D_RTC '\157' +#define D_SB '\160' + +#define D_DISASTER_CHECK '\161' +#define D_REQUEST '\162' +#define D_STASH '\163' +#define D_NEWSOCK '\164' +#define D_TIMER '\165' + +#define D_TPIOCTL '\166' +#define D_SIZE_CHECK '\167' +#define D_2ER '\170' +#define D_DISASTER_CHECK_W '\171' + +#define D_XPD '\172' +#define D_SYSCALL '\173' +#define D_DROP '\174' +#define D_ZDREF '\175' +#define D_TPISO '\176' +#define D_QUENCH '\177' + +void dump_mbuf(); + +/*********************************************** + * New mbuf types for debugging w/ netstat -m + * This messes up 4.4 malloc for now. need bigger + * mbtypes array for now. + **********************************************/ +#ifdef notdef + +#define TPMT_DATA 0x21 +#define TPMT_RCVRTC 0x42 +#define TPMT_SNDRTC 0x41 +#define TPMT_TPHDR 0x22 +#define TPMT_IPHDR 0x32 +#define TPMT_SONAME 0x28 +#define TPMT_EOT 0x40 +#define TPMT_XPD 0x44 +#define TPMT_PCB 0x23 +#define TPMT_PERF 0x45 + +#else /* ARGO_DEBUG */ + +#define TPMT_DATA MT_DATA +#define TPMT_RCVRTC MT_DATA +#define TPMT_SNDRTC MT_DATA +#define TPMT_IPHDR MT_HEADER +#define TPMT_TPHDR MT_HEADER +#define TPMT_SONAME MT_SONAME +/* MT_EOT and MT_XPD are defined in tp_param.h */ +#define TPMT_XPD MT_OOBDATA +#define TPMT_PCB MT_PCB +#define TPMT_PERF MT_PCB + +#endif /* ARGO_DEBUG */ + +#endif /* __ARGO_DEBUG__ */ diff --git a/bsd/netiso/clnl.h b/bsd/netiso/clnl.h new file mode 100644 index 000000000..8e00cebfd --- /dev/null +++ b/bsd/netiso/clnl.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnl.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +struct clnl_protosw { + int (*clnl_input)(); /* input routine */ +}; diff --git a/bsd/netiso/clnp.h b/bsd/netiso/clnp.h new file mode 100644 index 000000000..265fac8f2 --- /dev/null +++ b/bsd/netiso/clnp.h @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp.h 8.2 (Berkeley) 4/16/94 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +/* should be config option but cpp breaks with too many #defines */ +#define DECBIT + +/* + * Return true if the mbuf is a cluster mbuf + */ +#define IS_CLUSTER(m) ((m)->m_flags & M_EXT) + +/* + * Move the halfword into the two characters + */ +#define HTOC(msb, lsb, hword)\ + (msb) = (u_char)((hword) >> 8);\ + (lsb) = (u_char)((hword) & 0xff) +/* + * Move the two charcters into the halfword + */ +#define CTOH(msb, lsb, hword)\ + (hword) = ((msb) << 8) | (lsb) + +/* + * Return true if the checksum has been set - ie. the checksum is + * not zero + */ +#define CKSUM_REQUIRED(clnp)\ + (((clnp)->cnf_cksum_msb != 0) || ((clnp)->cnf_cksum_lsb != 0)) + +/* + * Fixed part of clnp header + */ +struct clnp_fixed { + u_char cnf_proto_id; /* network layer protocol identifier */ + u_char cnf_hdr_len; /* length indicator (octets) */ + u_char cnf_vers; /* version/protocol identifier extension */ + u_char cnf_ttl; /* lifetime (500 milliseconds) */ + u_char cnf_type; /* type code */ + /* Includes err_ok, more_segs, and seg_ok */ + u_char cnf_seglen_msb; /* pdu segment length (octets) high byte */ + u_char cnf_seglen_lsb; /* pdu segment length (octets) low byte */ + u_char cnf_cksum_msb; /* checksum high byte */ + u_char cnf_cksum_lsb; /* checksum low byte */ +}; +#define CNF_TYPE 0x1f +#define CNF_ERR_OK 0x20 +#define CNF_MORE_SEGS 0x40 +#define CNF_SEG_OK 0x80 + +#define CLNP_CKSUM_OFF 0x07 /* offset of checksum */ + +#define clnl_fixed clnp_fixed + +/* + * Segmentation part of clnp header + */ +struct clnp_segment { + u_short cng_id; /* data unit identifier */ + u_short cng_off; /* segment offset */ + u_short cng_tot_len; /* total length */ +}; + +/* + * Clnp fragment reassembly structures: + * + * All packets undergoing reassembly are linked together in + * clnp_fragl structures. Each clnp_fragl structure contains a + * pointer to the original clnp packet header, as well as a + * list of packet fragments. Each packet fragment + * is headed by a clnp_frag structure. This structure contains the + * offset of the first and last byte of the fragment, as well as + * a pointer to the data (an mbuf chain) of the fragment. + */ + +/* + * NOTE: + * The clnp_frag structure is stored in an mbuf immedately preceeding + * the fragment data. Since there are words in this struct, + * it must be word aligned. + * + * NOTE: + * All the fragment code assumes that the entire clnp header is + * contained in the first mbuf. + */ +struct clnp_frag { + u_int cfr_first; /* offset of first byte of this frag */ + u_int cfr_last; /* offset of last byte of this frag */ + u_int cfr_bytes; /* bytes to shave to get to data */ + struct mbuf *cfr_data; /* ptr to data for this frag */ + struct clnp_frag *cfr_next; /* next fragment in list */ +}; + +struct clnp_fragl { + struct iso_addr cfl_src; /* source of the pkt */ + struct iso_addr cfl_dst; /* destination of the pkt */ + u_short cfl_id; /* id of the pkt */ + u_char cfl_ttl; /* current ttl of pkt */ + u_short cfl_last; /* offset of last byte of packet */ + struct mbuf *cfl_orighdr; /* ptr to original header */ + struct clnp_frag *cfl_frags; /* linked list of fragments for pkt */ + struct clnp_fragl *cfl_next; /* next pkt being reassembled */ +}; + +/* + * The following structure is used to index into an options section + * of a clnp datagram. These values can be used without worry that + * offset or length fields are invalid or too big, etc. That is, + * the consistancy of the options will be guaranteed before this + * structure is filled in. Any pointer (field ending in p) is + * actually the offset from the beginning of the mbuf the option + * is contained in. A value of NULL for any pointer + * means that the option is not present. The length any option + * does not include the option code or option length fields. + */ +struct clnp_optidx { + u_short cni_securep; /* ptr to beginning of security option */ + char cni_secure_len; /* length of entire security option */ + + u_short cni_srcrt_s; /* offset of start of src rt option */ + u_short cni_srcrt_len; /* length of entire src rt option */ + + u_short cni_recrtp; /* ptr to beginning of recrt option */ + char cni_recrt_len; /* length of entire recrt option */ + + char cni_priorp; /* ptr to priority option */ + + u_short cni_qos_formatp; /* ptr to format of qos option */ + char cni_qos_len; /* length of entire qos option */ + + u_char cni_er_reason; /* reason from ER pdu option */ + + /* ESIS options */ + + u_short cni_esct; /* value from ISH ESCT option */ + + u_short cni_netmaskp; /* ptr to beginning of netmask option */ + char cni_netmask_len; /* length of entire netmask option */ + + u_short cni_snpamaskp; /* ptr to beginning of snpamask option */ + char cni_snpamask_len; /* length of entire snpamask option */ + +}; + +#define ER_INVALREAS 0xff /* code for invalid ER pdu discard reason */ + +/* given an mbuf and addr of option, return offset from data of mbuf */ +#define CLNP_OPTTOOFF(m, opt)\ + ((u_short) (opt - mtod(m, caddr_t))) + +/* given an mbuf and offset of option, return address of option */ +#define CLNP_OFFTOOPT(m, off)\ + ((caddr_t) (mtod(m, caddr_t) + off)) + +/* return true iff src route is valid */ +#define CLNPSRCRT_VALID(oidx)\ + ((oidx) && (oidx->cni_srcrt_s)) + +/* return the offset field of the src rt */ +#define CLNPSRCRT_OFF(oidx, options)\ + (*((u_char *)(CLNP_OFFTOOPT(options, oidx->cni_srcrt_s) + 1))) + +/* return the type field of the src rt */ +#define CLNPSRCRT_TYPE(oidx, options)\ + ((u_char)(*(CLNP_OFFTOOPT(options, oidx->cni_srcrt_s)))) + +/* return the length of the current address */ +#define CLNPSRCRT_CLEN(oidx, options)\ + ((u_char)(*(CLNP_OFFTOOPT(options, oidx->cni_srcrt_s) + CLNPSRCRT_OFF(oidx, options) - 1))) + +/* return the address of the current address */ +#define CLNPSRCRT_CADDR(oidx, options)\ + ((caddr_t)(CLNP_OFFTOOPT(options, oidx->cni_srcrt_s) + CLNPSRCRT_OFF(oidx, options))) + +/* + * return true if the src route has run out of routes + * this is true if the offset of next route is greater than the end of the rt + */ +#define CLNPSRCRT_TERM(oidx, options)\ + (CLNPSRCRT_OFF(oidx, options) > oidx->cni_srcrt_len) + +/* + * Options a user can set/get + */ +#define CLNPOPT_FLAGS 0x01 /* flags: seg permitted, no er xmit, etc */ +#define CLNPOPT_OPTS 0x02 /* datagram options */ + +/* + * Values for particular datagram options + */ +#define CLNPOVAL_PAD 0xcc /* padding */ +#define CLNPOVAL_SECURE 0xc5 /* security */ +#define CLNPOVAL_SRCRT 0xc8 /* source routing */ +#define CLNPOVAL_RECRT 0xcb /* record route */ +#define CLNPOVAL_QOS 0xc3 /* quality of service */ +#define CLNPOVAL_PRIOR 0xcd /* priority */ +#define CLNPOVAL_ERREAS 0xc1 /* ER PDU ONLY: reason for discard */ + +#define CLNPOVAL_SRCSPEC 0x40 /* source address specific */ +#define CLNPOVAL_DSTSPEC 0x80 /* destination address specific */ +#define CLNPOVAL_GLOBAL 0xc0 /* globally unique */ + +/* Globally Unique QOS */ +#define CLNPOVAL_SEQUENCING 0x10 /* sequencing preferred */ +#define CLNPOVAL_CONGESTED 0x08 /* congestion experienced */ +#define CLNPOVAL_LOWDELAY 0x04 /* low transit delay */ + +#define CLNPOVAL_PARTRT 0x00 /* partial source routing */ +#define CLNPOVAL_COMPRT 0x01 /* complete source routing */ + +/* + * Clnp flags used in a control block flags field. + * NOTE: these must be out of the range of bits defined in ../net/raw_cb.h + */ +#define CLNP_NO_SEG 0x010 /* segmentation not permitted */ +#define CLNP_NO_ER 0x020 /* do not generate ERs */ +#define CLNP_SEND_RAW 0x080 /* send pkt as RAW DT rather than TP DT */ +#define CLNP_NO_CKSUM 0x100 /* don't use clnp checksum */ +#define CLNP_ECHO 0x200 /* send echo request */ +#define CLNP_NOCACHE 0x400 /* don't store cache information */ +#define CLNP_ECHOR 0x800 /* send echo reply */ + +/* valid clnp flags */ +#define CLNP_VFLAGS (CLNP_SEND_RAW|CLNP_NO_SEG|CLNP_NO_ER|CLNP_NO_CKSUM\ + |CLNP_ECHO|CLNP_NOCACHE|CLNP_ECHOR) + +/* + * Constants used by clnp + */ +#define CLNP_HDR_MIN (sizeof (struct clnp_fixed)) +#define CLNP_HDR_MAX (254) +#define CLNP_TTL_UNITS 2 /* 500 milliseconds */ +#define CLNP_TTL 15*CLNP_TTL_UNITS /* time to live (seconds) */ +#define ISO8473_V1 0x01 + +/* + * Clnp packet types + * In order to test raw clnp and tp/clnp simultaneously, a third type of + * packet has been defined: CLNP_RAW. This is done so that the input + * routine can switch to the correct input routine (rclnp_input or + * tpclnp_input) based on the type field. If clnp had a higher level protocol + * field, this would not be necessary. + */ +#define CLNP_DT 0x1C /* normal data */ +#define CLNP_ER 0x01 /* error report */ +#define CLNP_RAW 0x1D /* debug only */ +#define CLNP_EC 0x1E /* echo packet */ +#define CLNP_ECR 0x1F /* echo reply */ + +/* + * ER pdu error codes + */ +#define GEN_NOREAS 0x00 /* reason not specified */ +#define GEN_PROTOERR 0x01 /* protocol procedure error */ +#define GEN_BADCSUM 0x02 /* incorrect checksum */ +#define GEN_CONGEST 0x03 /* pdu discarded due to congestion */ +#define GEN_HDRSYNTAX 0x04 /* header syntax error */ +#define GEN_SEGNEEDED 0x05 /* segmentation needed, but not permitted */ +#define GEN_INCOMPLETE 0x06 /* incomplete pdu received */ +#define GEN_DUPOPT 0x07 /* duplicate option */ + +/* address errors */ +#define ADDR_DESTUNREACH 0x80 /* destination address unreachable */ +#define ADDR_DESTUNKNOWN 0x81 /* destination address unknown */ + +/* source routing */ +#define SRCRT_UNSPECERR 0x90 /* unspecified src rt error */ +#define SRCRT_SYNTAX 0x91 /* syntax error in src rt field */ +#define SRCRT_UNKNOWNADDR 0x92 /* unknown addr in src rt field */ +#define SRCRT_BADPATH 0x93 /* path not acceptable */ + +/* lifetime */ +#define TTL_EXPTRANSIT 0xa0 /* lifetime expired during transit */ +#define TTL_EXPREASS 0xa1 /* lifetime expired during reassembly */ + +/* pdu discarded */ +#define DISC_UNSUPPOPT 0xb0 /* unsupported option not specified? */ +#define DISC_UNSUPPVERS 0xb1 /* unsupported protocol version */ +#define DISC_UNSUPPSECURE 0xb2 /* unsupported security option */ +#define DISC_UNSUPPSRCRT 0xb3 /* unsupported src rt option */ +#define DISC_UNSUPPRECRT 0xb4 /* unsupported rec rt option */ + +/* reassembly */ +#define REASS_INTERFERE 0xc0 /* reassembly interference */ +#define CLNP_ERRORS 22 + + +#ifdef KERNEL +int clnp_er_index(); +#endif + +#ifdef CLNP_ER_CODES +u_char clnp_er_codes[CLNP_ERRORS] = { +GEN_NOREAS, GEN_PROTOERR, GEN_BADCSUM, GEN_CONGEST, +GEN_HDRSYNTAX, GEN_SEGNEEDED, GEN_INCOMPLETE, GEN_DUPOPT, +ADDR_DESTUNREACH, ADDR_DESTUNKNOWN, +SRCRT_UNSPECERR, SRCRT_SYNTAX, SRCRT_UNKNOWNADDR, SRCRT_BADPATH, +TTL_EXPTRANSIT, TTL_EXPREASS, +DISC_UNSUPPOPT, DISC_UNSUPPVERS, DISC_UNSUPPSECURE, +DISC_UNSUPPSRCRT, DISC_UNSUPPRECRT, REASS_INTERFERE }; +#endif + +#ifdef TROLL + +#define TR_DUPEND 0x01 /* duplicate end of fragment */ +#define TR_DUPPKT 0x02 /* duplicate entire packet */ +#define TR_DROPPKT 0x04 /* drop packet on output */ +#define TR_TRIM 0x08 /* trim bytes from packet */ +#define TR_CHANGE 0x10 /* change bytes in packet */ +#define TR_MTU 0x20 /* delta to change device mtu */ +#define TR_CHUCK 0x40 /* drop packet in rclnp_input */ +#define TR_BLAST 0x80 /* force rclnp_output to blast many packet */ +#define TR_RAWLOOP 0x100 /* make if_loop call clnpintr directly */ +struct troll { + int tr_ops; /* operations to perform */ + float tr_dup_size; /* % to duplicate */ + float tr_dup_freq; /* frequency to duplicate packets */ + float tr_drop_freq; /* frequence to drop packets */ + int tr_mtu_adj; /* delta to adjust if mtu */ + int tr_blast_cnt; /* # of pkts to blast out */ +}; + +#define SN_OUTPUT(clcp, m)\ + troll_output(clcp->clc_ifp, m, clcp->clc_firsthop, clcp->clc_rt) + +#define SN_MTU(ifp, rt) (((rt && rt->rt_rmx.rmx_mtu) ?\ + rt->rt_rmx.rmx_mtu : clnp_badmtu(ifp, rt, __LINE__, __FILE__))\ + - trollctl.tr_mtu_adj) + +#ifdef KERNEL +extern float troll_random; +#endif + +#else /* NO TROLL */ + +#define SN_OUTPUT(clcp, m)\ + (*clcp->clc_ifp->if_output)(clcp->clc_ifp, m, clcp->clc_firsthop, clcp->clc_rt) + +#define SN_MTU(ifp, rt) (((rt && rt->rt_rmx.rmx_mtu) ?\ + rt->rt_rmx.rmx_mtu : clnp_badmtu(ifp, rt, __LINE__, __FILE__))) + +#endif /* TROLL */ + +/* + * Macro to remove an address from a clnp header + */ +#define CLNP_EXTRACT_ADDR(isoa, hoff, hend)\ + {\ + isoa.isoa_len = (u_char)*hoff;\ + if ((((++hoff) + isoa.isoa_len) > hend) ||\ + (isoa.isoa_len > 20) || (isoa.isoa_len == 0)) {\ + hoff = (caddr_t)0;\ + } else {\ + (void) bcopy(hoff, (caddr_t)isoa.isoa_genaddr, isoa.isoa_len);\ + hoff += isoa.isoa_len;\ + }\ + } + +/* + * Macro to insert an address into a clnp header + */ +#define CLNP_INSERT_ADDR(hoff, isoa)\ + *hoff++ = (isoa).isoa_len;\ + (void) bcopy((caddr_t)((isoa).isoa_genaddr), hoff, (isoa).isoa_len);\ + hoff += (isoa).isoa_len; + +/* + * Clnp hdr cache. Whenever a clnp packet is sent, a copy of the + * header is made and kept in this cache. In addition to a copy of + * the cached clnp hdr, the cache contains + * information necessary to determine whether the new packet + * to send requires a new header to be built. + */ +struct clnp_cache { + /* these fields are used to check the validity of the cache */ + struct iso_addr clc_dst; /* destination of packet */ + struct mbuf *clc_options; /* ptr to options mbuf */ + int clc_flags; /* flags passed to clnp_output */ + + /* these fields are state that clnp_output requires to finish the pkt */ + int clc_segoff; /* offset of seg part of header */ + struct rtentry *clc_rt; /* ptr to rtentry (points into + the route structure) */ + struct sockaddr *clc_firsthop; /* first hop of packet */ + struct ifnet *clc_ifp; /* ptr to interface structure */ + struct iso_ifaddr *clc_ifa; /* ptr to interface address */ + struct mbuf *clc_hdr; /* cached pkt hdr (finally)! */ +}; + +#ifndef satosiso +#define satosiso(sa)\ + ((struct sockaddr_iso *)(sa)) +#endif + +#ifdef KERNEL +caddr_t clnp_insert_addr(); +struct iso_addr *clnp_srcaddr(); +struct mbuf *clnp_reass(); +#ifdef TROLL +struct troll trollctl; +#endif /* TROLL */ +#endif /* KERNEL */ diff --git a/bsd/netiso/clnp_debug.c b/bsd/netiso/clnp_debug.c new file mode 100644 index 000000000..538cb3006 --- /dev/null +++ b/bsd/netiso/clnp_debug.c @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_debug.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef ARGO_DEBUG + +#ifdef TESTDEBUG +#ifdef notdef +struct addr_37 u_37 = { + {0x00, 0x02, 0x00, 0x10, 0x20, 0x30, 0x35}, + {0x01, 0x02, 0x03, 0x04, 0x50, 0x60, 0x70, 0x80, 0x90} +}; +struct addr_osinet u_osinet = { + {0x00, 0x04}, + {0x00, 0x02, 0x00, 0x01, 0x23, 0x42, 0x78, 0x20, 0x01, 0x05, 0x00} +}; +#endif /* notdef */ +struct addr_rfc986 u_rfc986 = { + {0x00, 0x06}, + {0x01, 0xc0, 0x0c, 0x0c, 0xab, 0x11} +}; +struct addr_rfc986 u_bad = { + {0x00, 0x01}, + {0x01, 0xc0, 0x0c, 0x0c, 0xab, 0x11} +}; +#include +main() +{ + struct iso_addr a; + + a.isoa_afi = AFI_37; + a.isoa_u.addr_37 = u_37; + a.isoa_len = 17; + printf("type 37: %s\n", clnp_iso_addrp(&a)); + + a.isoa_afi = AFI_OSINET; + a.isoa_u.addr_osinet = u_osinet; + a.isoa_len = 14; + printf("type osinet: %s\n", clnp_iso_addrp(&a)); + + a.isoa_afi = AFI_RFC986; + a.isoa_u.addr_rfc986 = u_rfc986; + a.isoa_len = 9; + printf("type rfc986: %s\n", clnp_iso_addrp(&a)); + + a.isoa_afi = 12; + a.isoa_u.addr_rfc986 = u_rfc986; + a.isoa_len = 9; + printf("type bad afi: %s\n", clnp_iso_addrp(&a)); + + a.isoa_afi = AFI_RFC986; + a.isoa_u.addr_rfc986 = u_bad; + a.isoa_len = 9; + printf("type bad idi: %s\n", clnp_iso_addrp(&a)); +} +#endif /* TESTDEBUG */ + +unsigned int clnp_debug; +static char letters[] = "0123456789abcdef"; + +/* + * Print buffer in hex, return addr of where we left off. + * Do not null terminate. + */ +char * +clnp_hexp(src, len, where) +char *src; /* src of data to print */ +int len; /* lengthof src */ +char *where; /* where to put data */ +{ + int i; + + for (i=0; i> 4]; + *where++ = letters[j & 0x0f]; + } + return where; +} + +/* + * Return a ptr to a human readable form of an iso addr + */ +static char iso_addr_b[50]; +#define DELIM '.'; + +char * +clnp_iso_addrp(isoa) +struct iso_addr *isoa; +{ + char *cp; + + /* print length */ + sprintf(iso_addr_b, "[%d] ", isoa->isoa_len); + + /* set cp to end of what we have */ + cp = iso_addr_b; + while (*cp) + cp++; + + /* print afi */ + cp = clnp_hexp(isoa->isoa_genaddr, (int)isoa->isoa_len, cp); +#ifdef notdef + *cp++ = DELIM; + + /* print type specific part */ + switch(isoa->isoa_afi) { + case AFI_37: + cp = clnp_hexp(isoa->t37_idi, ADDR37_IDI_LEN, cp); + *cp++ = DELIM; + cp = clnp_hexp(isoa->t37_dsp, ADDR37_DSP_LEN, cp); + break; + +/* case AFI_OSINET:*/ + case AFI_RFC986: { + u_short idi; + + /* osinet and rfc986 have idi in the same place */ + /* print idi */ + cp = clnp_hexp(isoa->rfc986_idi, ADDROSINET_IDI_LEN, cp); + *cp++ = DELIM; + CTOH(isoa->rfc986_idi[0], isoa->rfc986_idi[1], idi); + + if (idi == IDI_OSINET) { + struct ovl_osinet *oosi = (struct ovl_osinet *)isoa; + cp = clnp_hexp(oosi->oosi_orgid, OVLOSINET_ORGID_LEN, cp); + *cp++ = DELIM; + cp = clnp_hexp(oosi->oosi_snetid, OVLOSINET_SNETID_LEN, cp); + *cp++ = DELIM; + cp = clnp_hexp(oosi->oosi_snpa, OVLOSINET_SNPA_LEN, cp); + *cp++ = DELIM; + cp = clnp_hexp(oosi->oosi_nsap, OVLOSINET_NSAP_LEN, cp); + } else if (idi == IDI_RFC986) { + struct ovl_rfc986 *o986 = (struct ovl_rfc986 *)isoa; + cp = clnp_hexp(&o986->o986_vers, 1, cp); + *cp++ = DELIM; +#ifdef vax + sprintf(cp, "%d.%d.%d.%d.%d", + o986->o986_inetaddr[0] & 0xff, + o986->o986_inetaddr[1] & 0xff, + o986->o986_inetaddr[2] & 0xff, + o986->o986_inetaddr[3] & 0xff, + o986->o986_upid & 0xff); + return(iso_addr_b); +#else + cp = clnp_hexp(&o986->o986_inetaddr[0], 1, cp); + *cp++ = DELIM; + cp = clnp_hexp(&o986->o986_inetaddr[1], 1, cp); + *cp++ = DELIM; + cp = clnp_hexp(&o986->o986_inetaddr[2], 1, cp); + *cp++ = DELIM; + cp = clnp_hexp(&o986->o986_inetaddr[3], 1, cp); + *cp++ = DELIM; + cp = clnp_hexp(&o986->o986_upid, 1, cp); +#endif /* vax */ + } + + } break; + + default: + *cp++ = '?'; + break; + } +#endif /* notdef */ + *cp = (char)0; + + return(iso_addr_b); +} + +char * +clnp_saddr_isop(s) +register struct sockaddr_iso *s; +{ + register char *cp = clnp_iso_addrp(&s->siso_addr); + + while (*cp) cp++; + *cp++ = '('; + cp = clnp_hexp(TSEL(s), (int)s->siso_tlen, cp); + *cp++ = ')'; + *cp++ = 0; + return (iso_addr_b); +} + +#endif /* ARGO_DEBUG */ diff --git a/bsd/netiso/clnp_er.c b/bsd/netiso/clnp_er.c new file mode 100644 index 000000000..ed8446984 --- /dev/null +++ b/bsd/netiso/clnp_er.c @@ -0,0 +1,394 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_er.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#define CLNP_ER_CODES +#include +#include +#include + +static struct clnp_fixed er_template = { + ISO8473_CLNP, /* network identifier */ + 0, /* length */ + ISO8473_V1, /* version */ + CLNP_TTL, /* ttl */ + CLNP_ER, /* type */ + 0, /* segment length */ + 0 /* checksum */ +}; + +/* + * FUNCTION: clnp_er_input + * + * PURPOSE: Process an ER pdu. + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: + */ +clnp_er_input(m, src, reason) +struct mbuf *m; /* ptr to packet itself */ +struct iso_addr *src; /* ptr to src of er */ +u_char reason; /* reason code of er */ +{ + int cmd = -1; + extern u_char clnp_protox[]; + + IFDEBUG(D_CTLINPUT) + printf("clnp_er_input: m x%x, src %s, reason x%x\n", m, + clnp_iso_addrp(src), reason); + ENDDEBUG + + INCSTAT(cns_er_inhist[clnp_er_index(reason)]); + switch (reason) { + case GEN_NOREAS: + case GEN_PROTOERR: + break; + case GEN_BADCSUM: + cmd = PRC_PARAMPROB; + break; + case GEN_CONGEST: + cmd = PRC_QUENCH; + break; + case GEN_HDRSYNTAX: + cmd = PRC_PARAMPROB; + break; + case GEN_SEGNEEDED: + cmd = PRC_MSGSIZE; + break; + case GEN_INCOMPLETE: + cmd = PRC_PARAMPROB; + break; + case GEN_DUPOPT: + cmd = PRC_PARAMPROB; + break; + case ADDR_DESTUNREACH: + cmd = PRC_UNREACH_HOST; + break; + case ADDR_DESTUNKNOWN: + cmd = PRC_UNREACH_PROTOCOL; + break; + case SRCRT_UNSPECERR: + case SRCRT_SYNTAX: + case SRCRT_UNKNOWNADDR: + case SRCRT_BADPATH: + cmd = PRC_UNREACH_SRCFAIL; + break; + case TTL_EXPTRANSIT: + cmd = PRC_TIMXCEED_INTRANS; + break; + case TTL_EXPREASS: + cmd = PRC_TIMXCEED_REASS; + break; + case DISC_UNSUPPOPT: + case DISC_UNSUPPVERS: + case DISC_UNSUPPSECURE: + case DISC_UNSUPPSRCRT: + case DISC_UNSUPPRECRT: + cmd = PRC_PARAMPROB; + break; + case REASS_INTERFERE: + cmd = PRC_TIMXCEED_REASS; + break; + } + + /* + * tpclnp_ctlinput1 is called directly so that we don't + * have to build an iso_sockaddr out of src. + */ + if (cmd >= 0) + tpclnp_ctlinput1(cmd, src); + + m_freem(m); +} + +/* + * FUNCTION: clnp_discard + * + * PURPOSE: Discard a clnp datagram + * + * RETURNS: nothing + * + * SIDE EFFECTS: Will emit an ER pdu if possible + * + * NOTES: This code assumes that we have previously tried to pull + * up the header of the datagram into one mbuf. + */ +clnp_discard(m, reason) +struct mbuf *m; /* header of packet to discard */ +char reason; /* reason for discard */ +{ + IFDEBUG(D_DISCARD) + printf("clnp_discard: m x%x, reason x%x\n", m, reason); + ENDDEBUG + + if (m != NULL) { + if (m->m_len >= sizeof(struct clnp_fixed)) { + register struct clnp_fixed *clnp = mtod(m, struct clnp_fixed *); + + if (((clnp->cnf_type & CNF_TYPE) != CLNP_ER) && + (clnp->cnf_type & CNF_ERR_OK)) { + clnp_emit_er(m, reason); + return; + } + } + m_freem(m); + } +} + +/* + * FUNCTION: clnp_emit_er + * + * PURPOSE: Send an ER pdu. + * The src of the of the ER pdu is the host that is sending + * the ER (ie. us), *not* the original destination of the + * packet. + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: Takes responsibility for freeing mbuf passed + * This function may be called with a packet that + * was created by us; in this case, do not send + * an ER. + */ +clnp_emit_er(m, reason) +struct mbuf *m; /* header of packet to discard */ +char reason; /* reason for discard */ +{ + register struct clnp_fixed *clnp = mtod(m, struct clnp_fixed *); + register struct clnp_fixed *er; + struct route_iso route; + struct ifnet *ifp; + struct sockaddr *first_hop; + struct iso_addr src, dst, *our_addr; + caddr_t hoff, hend; + int total_len; /* total len of dg */ + struct mbuf *m0; /* contains er pdu hdr */ + struct iso_ifaddr *ia = 0; + + IFDEBUG(D_DISCARD) + printf("clnp_emit_er: m x%x, hdr len %d\n", m, clnp->cnf_hdr_len); + ENDDEBUG + + bzero((caddr_t)&route, sizeof(route)); + + /* + * If header length is incorrect, or entire header is not contained + * in this mbuf, we punt + */ + if ((clnp->cnf_hdr_len < CLNP_HDR_MIN) || + (clnp->cnf_hdr_len > CLNP_HDR_MAX) || + (clnp->cnf_hdr_len > m->m_len)) + goto bad; + + /* extract src, dest address */ + hend = (caddr_t)clnp + clnp->cnf_hdr_len; + hoff = (caddr_t)clnp + sizeof(struct clnp_fixed); + CLNP_EXTRACT_ADDR(dst, hoff, hend); + if (hoff == (caddr_t)0) { + goto bad; + } + CLNP_EXTRACT_ADDR(src, hoff, hend); + if (hoff == (caddr_t)0) { + goto bad; + } + + /* + * Do not send ER if we generated the packet. + */ + if (clnp_ours(&src)) + goto bad; + + /* + * Trim mbuf to hold only the header. + * This mbuf will be the 'data' of the er pdu + */ + if (m->m_next != NULL) { + m_freem(m->m_next); + m->m_next = NULL; + } + + if (m->m_len > clnp->cnf_hdr_len) + m_adj(m, (int)-(m->m_len - (int)clnp->cnf_hdr_len)); + + /* route er pdu: note we send pkt to src of original packet */ + if (clnp_route(&src, &route, /* flags */0, &first_hop, &ia) != 0) + goto bad; + + /* compute our address based upon firsthop/ifp */ + if (ia) + our_addr = &ia->ia_addr.siso_addr; + else + goto bad; + ifp = ia->ia_ifp; + + IFDEBUG(D_DISCARD) + printf("clnp_emit_er: to %s", clnp_iso_addrp(&src)); + printf(" from %s\n", clnp_iso_addrp(our_addr)); + ENDDEBUG + + IFDEBUG(D_DISCARD) + printf("clnp_emit_er: packet routed to %s\n", + clnp_iso_addrp(&((struct sockaddr_iso *)first_hop)->siso_addr)); + ENDDEBUG + + /* allocate mbuf for er pdu header: punt on no space */ + MGET(m0, M_DONTWAIT, MT_HEADER); + if (m0 == 0) + goto bad; + + m0->m_next = m; + er = mtod(m0, struct clnp_fixed *); + *er = er_template; + + /* setup src/dst on er pdu */ + /* NOTE REVERSAL OF SRC/DST */ + hoff = (caddr_t)er + sizeof(struct clnp_fixed); + CLNP_INSERT_ADDR(hoff, src); + CLNP_INSERT_ADDR(hoff, *our_addr); + + /* + * TODO: if complete src rt was specified, then reverse path, and + * copy into er as option. + */ + + /* add er option */ + *hoff++ = CLNPOVAL_ERREAS; /* code */ + *hoff++ = 2; /* length */ + *hoff++ = reason; /* discard reason */ + *hoff++ = 0; /* error localization = not specified */ + + /* set length */ + er->cnf_hdr_len = m0->m_len = (u_char)(hoff - (caddr_t)er); + total_len = m0->m_len + m->m_len; + HTOC(er->cnf_seglen_msb, er->cnf_seglen_lsb, total_len); + + /* compute checksum (on header only) */ + iso_gen_csum(m0, CLNP_CKSUM_OFF, (int)er->cnf_hdr_len); + + /* trim packet if too large for interface */ + if (total_len > ifp->if_mtu) + m_adj(m0, -(total_len - ifp->if_mtu)); + + /* send packet */ + INCSTAT(cns_er_outhist[clnp_er_index(reason)]); + (void) (*ifp->if_output)(ifp, m0, first_hop, route.ro_rt); + goto done; + +bad: + m_freem(m); + +done: + /* free route if it is a temp */ + if (route.ro_rt != NULL) + RTFREE(route.ro_rt); +} + +clnp_er_index(p) +u_char p; +{ + register u_char *cp = clnp_er_codes + CLNP_ERRORS; + while (cp > clnp_er_codes) { + cp--; + if (*cp == p) + return (cp - clnp_er_codes); + } + return (CLNP_ERRORS + 1); +} diff --git a/bsd/netiso/clnp_frag.c b/bsd/netiso/clnp_frag.c new file mode 100644 index 000000000..f2db3ef1b --- /dev/null +++ b/bsd/netiso/clnp_frag.c @@ -0,0 +1,878 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_frag.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +/* all fragments are hung off this list */ +struct clnp_fragl *clnp_frags = NULL; + +struct mbuf *clnp_comp_pdu(); + + +/* + * FUNCTION: clnp_fragment + * + * PURPOSE: Fragment a datagram, and send the itty bitty pieces + * out over an interface. + * + * RETURNS: success - 0 + * failure - unix error code + * + * SIDE EFFECTS: + * + * NOTES: If there is an error sending the packet, clnp_discard + * is called to discard the packet and send an ER. If + * clnp_fragment was called from clnp_output, then + * we generated the packet, and should not send an + * ER -- clnp_emit_er will check for this. Otherwise, + * the packet was fragmented during forwarding. In this + * case, we ought to send an ER back. + */ +clnp_fragment(ifp, m, first_hop, total_len, segoff, flags, rt) +struct ifnet *ifp; /* ptr to outgoing interface */ +struct mbuf *m; /* ptr to packet */ +struct sockaddr *first_hop; /* ptr to first hop */ +int total_len; /* length of datagram */ +int segoff; /* offset of segpart in hdr */ +int flags; /* flags passed to clnp_output */ +struct rtentry *rt; /* route if direct ether */ +{ + struct clnp_fixed *clnp = mtod(m, struct clnp_fixed *); + int hdr_len = (int)clnp->cnf_hdr_len; + int frag_size = (SN_MTU(ifp, rt) - hdr_len) & ~7; + + total_len -= hdr_len; + if ((clnp->cnf_type & CNF_SEG_OK) && + (total_len >= 8) && + (frag_size > 8 || (frag_size == 8 && !(total_len & 7)))) { + + struct mbuf *hdr = NULL; /* save copy of clnp hdr */ + struct mbuf *frag_hdr = NULL; + struct mbuf *frag_data = NULL; + struct clnp_segment seg_part; /* segmentation header */ + int frag_base; + int error = 0; + + + INCSTAT(cns_fragmented); + (void) bcopy(segoff + mtod(m, caddr_t), (caddr_t)&seg_part, + sizeof(seg_part)); + frag_base = ntohs(seg_part.cng_off); + /* + * Duplicate header, and remove from packet + */ + if ((hdr = m_copy(m, 0, hdr_len)) == NULL) { + clnp_discard(m, GEN_CONGEST); + return(ENOBUFS); + } + m_adj(m, hdr_len); + + while (total_len > 0) { + int remaining, last_frag; + + IFDEBUG(D_FRAG) + struct mbuf *mdump = frag_hdr; + int tot_mlen = 0; + printf("clnp_fragment: total_len %d:\n", total_len); + while (mdump != NULL) { + printf("\tmbuf x%x, m_len %d\n", + mdump, mdump->m_len); + tot_mlen += mdump->m_len; + mdump = mdump->m_next; + } + printf("clnp_fragment: sum of mbuf chain %d:\n", tot_mlen); + ENDDEBUG + + frag_size = min(total_len, frag_size); + if ((remaining = total_len - frag_size) == 0) + last_frag = 1; + else { + /* + * If this fragment will cause the last one to + * be less than 8 bytes, shorten this fragment a bit. + * The obscure test on frag_size above ensures that + * frag_size will be positive. + */ + last_frag = 0; + if (remaining < 8) + frag_size -= 8; + } + + + IFDEBUG(D_FRAG) + printf("clnp_fragment: seg off %d, size %d, remaining %d\n", + ntohs(seg_part.cng_off), frag_size, total_len-frag_size); + if (last_frag) + printf("clnp_fragment: last fragment\n"); + ENDDEBUG + + if (last_frag) { + /* + * this is the last fragment; we don't need to get any other + * mbufs. + */ + frag_hdr = hdr; + frag_data = m; + } else { + /* duplicate header and data mbufs */ + if ((frag_hdr = m_copy(hdr, 0, (int)M_COPYALL)) == NULL) { + clnp_discard(hdr, GEN_CONGEST); + m_freem(m); + return(ENOBUFS); + } + if ((frag_data = m_copy(m, 0, frag_size)) == NULL) { + clnp_discard(hdr, GEN_CONGEST); + m_freem(m); + m_freem(frag_hdr); + return(ENOBUFS); + } + INCSTAT(cns_fragments); + } + clnp = mtod(frag_hdr, struct clnp_fixed *); + + if (!last_frag) + clnp->cnf_type |= CNF_MORE_SEGS; + + /* link together */ + m_cat(frag_hdr, frag_data); + + /* insert segmentation part; updated below */ + bcopy((caddr_t)&seg_part, mtod(frag_hdr, caddr_t) + segoff, + sizeof(struct clnp_segment)); + + { + int derived_len = hdr_len + frag_size; + HTOC(clnp->cnf_seglen_msb, clnp->cnf_seglen_lsb, derived_len); + if ((frag_hdr->m_flags & M_PKTHDR) == 0) + panic("clnp_frag:lost header"); + frag_hdr->m_pkthdr.len = derived_len; + } + /* compute clnp checksum (on header only) */ + if (flags & CLNP_NO_CKSUM) { + HTOC(clnp->cnf_cksum_msb, clnp->cnf_cksum_lsb, 0); + } else { + iso_gen_csum(frag_hdr, CLNP_CKSUM_OFF, hdr_len); + } + + IFDEBUG(D_DUMPOUT) + struct mbuf *mdump = frag_hdr; + printf("clnp_fragment: sending dg:\n"); + while (mdump != NULL) { + printf("\tmbuf x%x, m_len %d\n", mdump, mdump->m_len); + mdump = mdump->m_next; + } + ENDDEBUG + +#ifdef TROLL + error = troll_output(ifp, frag_hdr, first_hop, rt); +#else + error = (*ifp->if_output)(ifp, frag_hdr, first_hop, rt); +#endif /* TROLL */ + + /* + * Tough situation: if the error occured on the last + * fragment, we can not send an ER, as the if_output + * routine consumed the packet. If the error occured + * on any intermediate packets, we can send an ER + * because we still have the original header in (m). + */ + if (error) { + if (frag_hdr != hdr) { + /* + * The error was not on the last fragment. We must + * free hdr and m before returning + */ + clnp_discard(hdr, GEN_NOREAS); + m_freem(m); + } + return(error); + } + + /* bump segment offset, trim data mbuf, and decrement count left */ +#ifdef TROLL + /* + * Decrement frag_size by some fraction. This will cause the + * next fragment to start 'early', thus duplicating the end + * of the current fragment. troll.tr_dup_size controls + * the fraction. If positive, it specifies the fraction. If + * negative, a random fraction is used. + */ + if ((trollctl.tr_ops & TR_DUPEND) && (!last_frag)) { + int num_bytes = frag_size; + + if (trollctl.tr_dup_size > 0) + num_bytes *= trollctl.tr_dup_size; + else + num_bytes *= troll_random(); + frag_size -= num_bytes; + } +#endif /* TROLL */ + total_len -= frag_size; + if (!last_frag) { + frag_base += frag_size; + seg_part.cng_off = htons(frag_base); + m_adj(m, frag_size); + } + } + return(0); + } else { + cantfrag: + INCSTAT(cns_cantfrag); + clnp_discard(m, GEN_SEGNEEDED); + return(EMSGSIZE); + } +} + +/* + * FUNCTION: clnp_reass + * + * PURPOSE: Attempt to reassemble a clnp packet given the current + * fragment. If reassembly succeeds (all the fragments + * are present), then return a pointer to an mbuf chain + * containing the reassembled packet. This packet will + * appear in the mbufs as if it had just arrived in + * one piece. + * + * If reassembly fails, then save this fragment and + * return 0. + * + * RETURNS: Ptr to assembled packet, or 0 + * + * SIDE EFFECTS: + * + * NOTES: + * clnp_slowtimo can not affect this code because clnpintr, and thus + * this code, is called at a higher priority than clnp_slowtimo. + */ +struct mbuf * +clnp_reass(m, src, dst, seg) +struct mbuf *m; /* new fragment */ +struct iso_addr *src; /* src of new fragment */ +struct iso_addr *dst; /* dst of new fragment */ +struct clnp_segment *seg; /* segment part of fragment header */ +{ + register struct clnp_fragl *cfh; + + /* look for other fragments of this datagram */ + for (cfh = clnp_frags; cfh != NULL; cfh = cfh->cfl_next) { + if (seg->cng_id == cfh->cfl_id && + iso_addrmatch1(src, &cfh->cfl_src) && + iso_addrmatch1(dst, &cfh->cfl_dst)) { + IFDEBUG(D_REASS) + printf("clnp_reass: found packet\n"); + ENDDEBUG + /* + * There are other fragments here already. Lets see if + * this fragment is of any help + */ + clnp_insert_frag(cfh, m, seg); + if (m = clnp_comp_pdu(cfh)) { + register struct clnp_fixed *clnp = mtod(m, struct clnp_fixed *); + HTOC(clnp->cnf_seglen_msb, clnp->cnf_seglen_lsb, + seg->cng_tot_len); + } + return (m); + } + } + + IFDEBUG(D_REASS) + printf("clnp_reass: new packet!\n"); + ENDDEBUG + + /* + * This is the first fragment. If src is not consuming too many + * resources, then create a new fragment list and add + * this fragment to the list. + */ + /* TODO: don't let one src hog all the reassembly buffers */ + if (!clnp_newpkt(m, src, dst, seg) /* || this src is a hog */) { + INCSTAT(cns_fragdropped); + clnp_discard(m, GEN_CONGEST); + } + + return(NULL); +} + +/* + * FUNCTION: clnp_newpkt + * + * PURPOSE: Create the necessary structures to handle a new + * fragmented clnp packet. + * + * RETURNS: non-zero if it succeeds, zero if fails. + * + * SIDE EFFECTS: + * + * NOTES: Failure is only due to insufficient resources. + */ +clnp_newpkt(m, src, dst, seg) +struct mbuf *m; /* new fragment */ +struct iso_addr *src; /* src of new fragment */ +struct iso_addr *dst; /* dst of new fragment */ +struct clnp_segment *seg; /* segment part of fragment header */ +{ + register struct clnp_fragl *cfh; + register struct clnp_fixed *clnp; + struct mbuf *m0; + + clnp = mtod(m, struct clnp_fixed *); + + /* + * Allocate new clnp fragl structure to act as header of all fragments + * for this datagram. + */ + MGET(m0, M_DONTWAIT, MT_FTABLE); + if (m0 == NULL) { + return (0); + } + cfh = mtod(m0, struct clnp_fragl *); + + /* + * Duplicate the header of this fragment, and save in cfh. + * Free m0 and return if m_copy does not succeed. + */ + if ((cfh->cfl_orighdr = m_copy(m, 0, (int)clnp->cnf_hdr_len)) == NULL) { + m_freem(m0); + return (0); + } + + /* Fill in rest of fragl structure */ + bcopy((caddr_t)src, (caddr_t)&cfh->cfl_src, sizeof(struct iso_addr)); + bcopy((caddr_t)dst, (caddr_t)&cfh->cfl_dst, sizeof(struct iso_addr)); + cfh->cfl_id = seg->cng_id; + cfh->cfl_ttl = clnp->cnf_ttl; + cfh->cfl_last = (seg->cng_tot_len - clnp->cnf_hdr_len) - 1; + cfh->cfl_frags = NULL; + cfh->cfl_next = NULL; + + /* Insert into list of packets */ + cfh->cfl_next = clnp_frags; + clnp_frags = cfh; + + /* Insert this fragment into list headed by cfh */ + clnp_insert_frag(cfh, m, seg); + return(1); +} + +/* + * FUNCTION: clnp_insert_frag + * + * PURPOSE: Insert fragment into list headed by 'cf'. + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: This is the 'guts' of the reassembly algorithm. + * Each fragment in this list contains a clnp_frag + * structure followed by the data of the fragment. + * The clnp_frag structure actually lies on top of + * part of the old clnp header. + */ +clnp_insert_frag(cfh, m, seg) +struct clnp_fragl *cfh; /* header of list of packet fragments */ +struct mbuf *m; /* new fragment */ +struct clnp_segment *seg; /* segment part of fragment header */ +{ + register struct clnp_fixed *clnp; /* clnp hdr of fragment */ + register struct clnp_frag *cf; /* generic fragment ptr */ + register struct clnp_frag *cf_sub = NULL; /* frag subsequent to new one */ + register struct clnp_frag *cf_prev = NULL; /* frag previous to new one */ + u_short first; /* offset of first byte of initial pdu*/ + u_short last; /* offset of last byte of initial pdu */ + u_short fraglen;/* length of fragment */ + + clnp = mtod(m, struct clnp_fixed *); + first = seg->cng_off; + CTOH(clnp->cnf_seglen_msb, clnp->cnf_seglen_lsb, fraglen); + fraglen -= clnp->cnf_hdr_len; + last = (first + fraglen) - 1; + + IFDEBUG(D_REASS) + printf("clnp_insert_frag: New fragment: [%d ... %d], len %d\n", + first, last, fraglen); + printf("clnp_insert_frag: current fragments:\n"); + for (cf = cfh->cfl_frags; cf != NULL; cf = cf->cfr_next) { + printf("\tcf x%x: [%d ... %d]\n", cf, cf->cfr_first, cf->cfr_last); + } + ENDDEBUG + + if (cfh->cfl_frags != NULL) { + /* + * Find fragment which begins after the new one + */ + for (cf = cfh->cfl_frags; cf != NULL; cf_prev = cf, cf = cf->cfr_next) { + if (cf->cfr_first > first) { + cf_sub = cf; + break; + } + } + + IFDEBUG(D_REASS) + printf("clnp_insert_frag: Previous frag is "); + if (cf_prev == NULL) + printf("NULL\n"); + else + printf("[%d ... %d]\n", cf_prev->cfr_first, cf_prev->cfr_last); + printf("clnp_insert_frag: Subsequent frag is "); + if (cf_sub == NULL) + printf("NULL\n"); + else + printf("[%d ... %d]\n", cf_sub->cfr_first, cf_sub->cfr_last); + ENDDEBUG + + /* + * If there is a fragment before the new one, check if it + * overlaps the new one. If so, then trim the end of the + * previous one. + */ + if (cf_prev != NULL) { + if (cf_prev->cfr_last > first) { + u_short overlap = cf_prev->cfr_last - first; + + IFDEBUG(D_REASS) + printf("clnp_insert_frag: previous overlaps by %d\n", + overlap); + ENDDEBUG + + if (overlap > fraglen) { + /* + * The new fragment is entirely contained in the + * preceeding one. We can punt on the new frag + * completely. + */ + m_freem(m); + return; + } else { + /* Trim data off of end of previous fragment */ + /* inc overlap to prevent duplication of last byte */ + overlap++; + m_adj(cf_prev->cfr_data, -(int)overlap); + cf_prev->cfr_last -= overlap; + } + } + } + + /* + * For all fragments past the new one, check if any data on + * the new one overlaps data on existing fragments. If so, + * then trim the extra data off the end of the new one. + */ + for (cf = cf_sub; cf != NULL; cf = cf->cfr_next) { + if (cf->cfr_first < last) { + u_short overlap = last - cf->cfr_first; + + IFDEBUG(D_REASS) + printf("clnp_insert_frag: subsequent overlaps by %d\n", + overlap); + ENDDEBUG + + if (overlap > fraglen) { + /* + * The new fragment is entirely contained in the + * succeeding one. This should not happen, because + * early on in this code we scanned for the fragment + * which started after the new one! + */ + m_freem(m); + printf("clnp_insert_frag: internal error!\n"); + return; + } else { + /* Trim data off of end of new fragment */ + /* inc overlap to prevent duplication of last byte */ + overlap++; + m_adj(m, -(int)overlap); + last -= overlap; + } + } + } + } + + /* + * Insert the new fragment beween cf_prev and cf_sub + * + * Note: the clnp hdr is still in the mbuf. + * If the data of the mbuf is not word aligned, shave off enough + * so that it is. Then, cast the clnp_frag structure on top + * of the clnp header. + * The clnp_hdr will not be used again (as we already have + * saved a copy of it). + * + * Save in cfr_bytes the number of bytes to shave off to get to + * the data of the packet. This is used when we coalesce fragments; + * the clnp_frag structure must be removed before joining mbufs. + */ + { + int pad; + u_int bytes; + + /* determine if header is not word aligned */ + pad = (int)clnp % 4; + if (pad < 0) + pad = -pad; + + /* bytes is number of bytes left in front of data */ + bytes = clnp->cnf_hdr_len - pad; + + IFDEBUG(D_REASS) + printf("clnp_insert_frag: clnp x%x requires %d alignment\n", + clnp, pad); + ENDDEBUG + + /* make it word aligned if necessary */ + if (pad) + m_adj(m, pad); + + cf = mtod(m, struct clnp_frag *); + cf->cfr_bytes = bytes; + + IFDEBUG(D_REASS) + printf("clnp_insert_frag: cf now x%x, cfr_bytes %d\n", cf, + cf->cfr_bytes); + ENDDEBUG + } + cf->cfr_first = first; + cf->cfr_last = last; + + + /* + * The data is the mbuf itself, although we must remember that the + * first few bytes are actually a clnp_frag structure + */ + cf->cfr_data = m; + + /* link into place */ + cf->cfr_next = cf_sub; + if (cf_prev == NULL) + cfh->cfl_frags = cf; + else + cf_prev->cfr_next = cf; +} + +/* + * FUNCTION: clnp_comp_pdu + * + * PURPOSE: Scan the list of fragments headed by cfh. Merge + * any contigious fragments into one. If, after + * traversing all the fragments, it is determined that + * the packet is complete, then return a pointer to + * the packet (with header prepended). Otherwise, + * return NULL. + * + * RETURNS: NULL, or a pointer to the assembled pdu in an mbuf chain. + * + * SIDE EFFECTS: Will colapse contigious fragments into one. + * + * NOTES: This code assumes that there are no overlaps of + * fragment pdus. + */ +struct mbuf * +clnp_comp_pdu(cfh) +struct clnp_fragl *cfh; /* fragment header */ +{ + register struct clnp_frag *cf = cfh->cfl_frags; + + while (cf->cfr_next != NULL) { + register struct clnp_frag *cf_next = cf->cfr_next; + + IFDEBUG(D_REASS) + printf("clnp_comp_pdu: comparing: [%d ... %d] to [%d ... %d]\n", + cf->cfr_first, cf->cfr_last, cf_next->cfr_first, + cf_next->cfr_last); + ENDDEBUG + + if (cf->cfr_last == (cf_next->cfr_first - 1)) { + /* + * Merge fragment cf and cf_next + * + * - update cf header + * - trim clnp_frag structure off of cf_next + * - append cf_next to cf + */ + struct clnp_frag cf_next_hdr; + struct clnp_frag *next_frag; + + cf_next_hdr = *cf_next; + next_frag = cf_next->cfr_next; + + IFDEBUG(D_REASS) + struct mbuf *mdump; + int l; + printf("clnp_comp_pdu: merging fragments\n"); + printf("clnp_comp_pdu: 1st: [%d ... %d] (bytes %d)\n", + cf->cfr_first, cf->cfr_last, cf->cfr_bytes); + mdump = cf->cfr_data; + l = 0; + while (mdump != NULL) { + printf("\tmbuf x%x, m_len %d\n", mdump, mdump->m_len); + l += mdump->m_len; + mdump = mdump->m_next; + } + printf("\ttotal len: %d\n", l); + printf("clnp_comp_pdu: 2nd: [%d ... %d] (bytes %d)\n", + cf_next->cfr_first, cf_next->cfr_last, cf_next->cfr_bytes); + mdump = cf_next->cfr_data; + l = 0; + while (mdump != NULL) { + printf("\tmbuf x%x, m_len %d\n", mdump, mdump->m_len); + l += mdump->m_len; + mdump = mdump->m_next; + } + printf("\ttotal len: %d\n", l); + ENDDEBUG + + cf->cfr_last = cf_next->cfr_last; + /* + * After this m_adj, the cf_next ptr is useless because we + * have adjusted the clnp_frag structure away... + */ + IFDEBUG(D_REASS) + printf("clnp_comp_pdu: shaving off %d bytes\n", + cf_next_hdr.cfr_bytes); + ENDDEBUG + m_adj(cf_next_hdr.cfr_data, (int)cf_next_hdr.cfr_bytes); + m_cat(cf->cfr_data, cf_next_hdr.cfr_data); + cf->cfr_next = next_frag; + } else { + cf = cf->cfr_next; + } + } + + cf = cfh->cfl_frags; + + IFDEBUG(D_REASS) + struct mbuf *mdump = cf->cfr_data; + printf("clnp_comp_pdu: first frag now: [%d ... %d]\n", cf->cfr_first, + cf->cfr_last); + printf("clnp_comp_pdu: data for frag:\n"); + while (mdump != NULL) { + printf("mbuf x%x, m_len %d\n", mdump, mdump->m_len); +/* dump_buf(mtod(mdump, caddr_t), mdump->m_len);*/ + mdump = mdump->m_next; + } + ENDDEBUG + + /* Check if datagram is complete */ + if ((cf->cfr_first == 0) && (cf->cfr_last == cfh->cfl_last)) { + /* + * We have a complete pdu! + * - Remove the frag header from (only) remaining fragment + * (which is not really a fragment anymore, as the datagram is + * complete). + * - Prepend a clnp header + */ + struct mbuf *data = cf->cfr_data; + struct mbuf *hdr = cfh->cfl_orighdr; + struct clnp_fragl *scan; + + IFDEBUG(D_REASS) + printf("clnp_comp_pdu: complete pdu!\n"); + ENDDEBUG + + m_adj(data, (int)cf->cfr_bytes); + m_cat(hdr, data); + + IFDEBUG(D_DUMPIN) + struct mbuf *mdump = hdr; + printf("clnp_comp_pdu: pdu is:\n"); + while (mdump != NULL) { + printf("mbuf x%x, m_len %d\n", mdump, mdump->m_len); +/* dump_buf(mtod(mdump, caddr_t), mdump->m_len);*/ + mdump = mdump->m_next; + } + ENDDEBUG + + /* + * Remove cfh from the list of fragmented pdus + */ + if (clnp_frags == cfh) { + clnp_frags = cfh->cfl_next; + } else { + for (scan = clnp_frags; scan != NULL; scan = scan->cfl_next) { + if (scan->cfl_next == cfh) { + scan->cfl_next = cfh->cfl_next; + break; + } + } + } + + /* free cfh */ + m_freem(dtom(cfh)); + + return(hdr); + } + + return(NULL); +} +#ifdef TROLL +static int troll_cnt; +#include +/* + * FUNCTION: troll_random + * + * PURPOSE: generate a pseudo-random number between 0 and 1 + * + * RETURNS: the random number + * + * SIDE EFFECTS: + * + * NOTES: This is based on the clock. + */ +float troll_random() +{ + extern struct timeval time; + long t = time.tv_usec % 100; + + return((float)t / (float) 100); +} + +/* + * FUNCTION: troll_output + * + * PURPOSE: Do something sneaky with the datagram passed. Possible + * operations are: + * Duplicate the packet + * Drop the packet + * Trim some number of bytes from the packet + * Munge some byte in the packet + * + * RETURNS: 0, or unix error code + * + * SIDE EFFECTS: + * + * NOTES: The operation of this procedure is regulated by the + * troll control structure (Troll). + */ +troll_output(ifp, m, dst, rt) +struct ifnet *ifp; +struct mbuf *m; +struct sockaddr *dst; +struct rtentry *rt; +{ + int err = 0; + troll_cnt++; + + if (trollctl.tr_ops & TR_DUPPKT) { + /* + * Duplicate every Nth packet + * TODO: random? + */ + float f_freq = troll_cnt * trollctl.tr_dup_freq; + int i_freq = troll_cnt * trollctl.tr_dup_freq; + if (i_freq == f_freq) { + struct mbuf *dup = m_copy(m, 0, (int)M_COPYALL); + if (dup != NULL) + err = (*ifp->if_output)(ifp, dup, dst, rt); + } + if (!err) + err = (*ifp->if_output)(ifp, m, dst, rt); + return(err); + } else if (trollctl.tr_ops & TR_DROPPKT) { + } else if (trollctl.tr_ops & TR_CHANGE) { + struct clnp_fixed *clnp = mtod(m, struct clnp_fixed *); + clnp->cnf_cksum_msb = 0; + err = (*ifp->if_output)(ifp, m, dst, rt); + return(err); + } else { + err = (*ifp->if_output)(ifp, m, dst, rt); + return(err); + } +} + +#endif /* TROLL */ diff --git a/bsd/netiso/clnp_input.c b/bsd/netiso/clnp_input.c new file mode 100644 index 000000000..7bf537d7b --- /dev/null +++ b/bsd/netiso/clnp_input.c @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_input.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if ISO +u_char clnp_protox[ISOPROTO_MAX]; +struct clnl_protosw clnl_protox[256]; +int clnpqmaxlen = IFQ_MAXLEN; /* RAH? why is this a variable */ +struct mbuf *clnp_data_ck(); + +int clnp_input(); + +int esis_input(); + +#ifdef ISO_X25ESIS +int x25esis_input(); +#endif /* ISO_X25ESIS */ + +/* + * FUNCTION: clnp_init + * + * PURPOSE: clnp initialization. Fill in clnp switch tables. + * + * RETURNS: none + * + * SIDE EFFECTS: fills in clnp_protox table with correct offsets into + * the isosw table. + * + * NOTES: + */ +clnp_init() +{ + register struct protosw *pr; + + /* + * CLNP protox initialization + */ + if ((pr = pffindproto(PF_ISO, ISOPROTO_RAW, SOCK_RAW)) == 0) + printf("clnl_init: no raw CLNP\n"); + else + clnp_protox[ISOPROTO_RAW] = pr - isosw; + + if ((pr = pffindproto(PF_ISO, ISOPROTO_TP, SOCK_SEQPACKET)) == 0) + printf("clnl_init: no tp/clnp\n"); + else + clnp_protox[ISOPROTO_TP] = pr - isosw; + + /* + * CLNL protox initialization + */ + clnl_protox[ISO8473_CLNP].clnl_input = clnp_input; + + clnlintrq.ifq_maxlen = clnpqmaxlen; +} + +/* + * FUNCTION: clnlintr + * + * PURPOSE: Process a packet on the clnl input queue + * + * RETURNS: nothing. + * + * SIDE EFFECTS: + * + * NOTES: + */ +clnlintr() +{ + register struct mbuf *m; /* ptr to first mbuf of pkt */ + register struct clnl_fixed *clnl; /* ptr to fixed part of clnl hdr */ + int s; /* save and restore priority */ + struct clnl_protosw *clnlsw;/* ptr to protocol switch */ + struct snpa_hdr sh; /* subnetwork hdr */ + + /* + * Get next datagram off clnl input queue + */ +next: + s = splimp(); + /* IF_DEQUEUESNPAHDR(&clnlintrq, m, sh);*/ + IF_DEQUEUE(&clnlintrq, m); + splx(s); + + + if (m == 0) /* nothing to do */ + return; + if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.rcvif == 0) { + m_freem(m); + goto next; + } else { + register struct ifaddr *ifa; + for (ifa = m->m_pkthdr.rcvif->if_addrlist; ifa; ifa = ifa->ifa_next) + if (ifa->ifa_addr->sa_family == AF_ISO) + break; + if (ifa == 0) { + m_freem(m); + goto next; + } + } + bzero((caddr_t)&sh, sizeof(sh)); + sh.snh_flags = m->m_flags & (M_MCAST|M_BCAST); + switch((sh.snh_ifp = m->m_pkthdr.rcvif)->if_type) { + extern int ether_output(); + case IFT_EON: + bcopy(mtod(m, caddr_t), (caddr_t)sh.snh_dhost, sizeof(u_long)); + bcopy(sizeof(u_long) + mtod(m, caddr_t), + (caddr_t)sh.snh_shost, sizeof(u_long)); + sh.snh_dhost[4] = mtod(m, u_char *)[sizeof(struct ip) + + _offsetof(struct eon_hdr, eonh_class)]; + m->m_data += EONIPLEN; + m->m_len -= EONIPLEN; + m->m_pkthdr.len -= EONIPLEN; + break; + + default: + if (sh.snh_ifp->if_output == ether_output) { + bcopy((caddr_t)(mtod(m, struct ether_header *)->ether_dhost), + (caddr_t)sh.snh_dhost, 2*sizeof(sh.snh_dhost)); + m->m_data += sizeof (struct ether_header); + m->m_len -= sizeof (struct ether_header); + m->m_pkthdr.len -= sizeof (struct ether_header); + } + } + IFDEBUG(D_INPUT) + int i; + printf("clnlintr: src:"); + for (i=0; i<6; i++) + printf("%x%c", sh.snh_shost[i] & 0xff, (i<5) ? ':' : ' '); + printf(" dst:"); + for (i=0; i<6; i++) + printf("%x%c", sh.snh_dhost[i] & 0xff, (i<5) ? ':' : ' '); + printf("\n"); + ENDDEBUG + + /* + * Get the fixed part of the clnl header into the first mbuf. + * Drop the packet if this fails. + * Do not call m_pullup if we have a cluster mbuf or the + * data is not there. + */ + if ((IS_CLUSTER(m) || (m->m_len < sizeof(struct clnl_fixed))) && + ((m = m_pullup(m, sizeof(struct clnl_fixed))) == 0)) { + INCSTAT(cns_toosmall); /* TODO: use clnl stats */ + goto next; /* m_pullup discards mbuf */ + } + + clnl = mtod(m, struct clnl_fixed *); + + /* + * Drop packet if the length of the header is not reasonable. + */ + if ((clnl->cnf_hdr_len < CLNP_HDR_MIN) || + (clnl->cnf_hdr_len > CLNP_HDR_MAX)) { + INCSTAT(cns_badhlen); /* TODO: use clnl stats */ + m_freem(m); + goto next; + } + + /* + * If the header is not contained in this mbuf, make it so. + * Drop packet if this fails. + * Note: m_pullup will allocate a cluster mbuf if necessary + */ + if (clnl->cnf_hdr_len > m->m_len) { + if ((m = m_pullup(m, (int)clnl->cnf_hdr_len)) == 0) { + INCSTAT(cns_badhlen); /* TODO: use clnl stats */ + goto next; /* m_pullup discards mbuf */ + } + clnl = mtod(m, struct clnl_fixed *); + } + + clnlsw = &clnl_protox[clnl->cnf_proto_id]; + + + if (clnlsw->clnl_input) + (*clnlsw->clnl_input) (m, &sh); + else + m_freem(m); + + goto next; +} + +/* + * FUNCTION: clnp_input + * + * PURPOSE: process an incoming clnp packet + * + * RETURNS: nothing + * + * SIDE EFFECTS: increments fields of clnp_stat structure. + * + * NOTES: + * TODO: I would like to make seg_part a pointer into the mbuf, but + * will it be correctly aligned? + */ +clnp_input(m, shp) +struct mbuf *m; /* ptr to first mbuf of pkt */ +struct snpa_hdr *shp; /* subnetwork header */ +{ + register struct clnp_fixed *clnp; /* ptr to fixed part of header */ + struct sockaddr_iso source; /* source address of pkt */ + struct sockaddr_iso target; /* destination address of pkt */ +#define src source.siso_addr +#define dst target.siso_addr + caddr_t hoff; /* current offset in packet */ + caddr_t hend; /* address of end of header info */ + struct clnp_segment seg_part; /* segment part of hdr */ + int seg_off=0; /* offset of segment part of hdr */ + int seg_len;/* length of packet data&hdr in bytes */ + struct clnp_optidx oidx, *oidxp = NULL; /* option index */ + extern int iso_systype; /* used by ESIS config resp */ + extern struct sockaddr_iso blank_siso; /* used for initializing */ + int need_afrin = 0; + /* true if congestion experienced */ + /* which means you need afrin nose */ + /* spray. How clever! */ + + IFDEBUG(D_INPUT) + printf( + "clnp_input: proccessing dg; First mbuf m_len %d, m_type x%x, %s\n", + m->m_len, m->m_type, IS_CLUSTER(m) ? "cluster" : "normal"); + ENDDEBUG + need_afrin = 0; + + /* + * If no iso addresses have been set, there is nothing + * to do with the packet. + */ + if (iso_ifaddr == NULL) { + clnp_discard(m, ADDR_DESTUNREACH); + return; + } + + INCSTAT(cns_total); + clnp = mtod(m, struct clnp_fixed *); + + IFDEBUG(D_DUMPIN) + struct mbuf *mhead; + int total_len = 0; + printf("clnp_input: clnp header:\n"); + dump_buf(mtod(m, caddr_t), clnp->cnf_hdr_len); + printf("clnp_input: mbuf chain:\n"); + for (mhead = m; mhead != NULL; mhead=mhead->m_next) { + printf("m x%x, len %d\n", mhead, mhead->m_len); + total_len += mhead->m_len; + } + printf("clnp_input: total length of mbuf chain %d:\n", total_len); + ENDDEBUG + + /* + * Compute checksum (if necessary) and drop packet if + * checksum does not match + */ + if (CKSUM_REQUIRED(clnp) && iso_check_csum(m, (int)clnp->cnf_hdr_len)) { + INCSTAT(cns_badcsum); + clnp_discard(m, GEN_BADCSUM); + return; + } + + if (clnp->cnf_vers != ISO8473_V1) { + INCSTAT(cns_badvers); + clnp_discard(m, DISC_UNSUPPVERS); + return; + } + + + /* check mbuf data length: clnp_data_ck will free mbuf upon error */ + CTOH(clnp->cnf_seglen_msb, clnp->cnf_seglen_lsb, seg_len); + if ((m = clnp_data_ck(m, seg_len)) == 0) + return; + + clnp = mtod(m, struct clnp_fixed *); + hend = (caddr_t)clnp + clnp->cnf_hdr_len; + + /* + * extract the source and destination address + * drop packet on failure + */ + source = target = blank_siso; + + hoff = (caddr_t)clnp + sizeof(struct clnp_fixed); + CLNP_EXTRACT_ADDR(dst, hoff, hend); + if (hoff == (caddr_t)0) { + INCSTAT(cns_badaddr); + clnp_discard(m, GEN_INCOMPLETE); + return; + } + CLNP_EXTRACT_ADDR(src, hoff, hend); + if (hoff == (caddr_t)0) { + INCSTAT(cns_badaddr); + clnp_discard(m, GEN_INCOMPLETE); + return; + } + + IFDEBUG(D_INPUT) + printf("clnp_input: from %s", clnp_iso_addrp(&src)); + printf(" to %s\n", clnp_iso_addrp(&dst)); + ENDDEBUG + + /* + * extract the segmentation information, if it is present. + * drop packet on failure + */ + if (((clnp->cnf_type & CNF_TYPE) != CLNP_ER) && + (clnp->cnf_type & CNF_SEG_OK)) { + if (hoff + sizeof(struct clnp_segment) > hend) { + INCSTAT(cns_noseg); + clnp_discard(m, GEN_INCOMPLETE); + return; + } else { + (void) bcopy(hoff, (caddr_t)&seg_part, sizeof(struct clnp_segment)); + /* make sure segmentation fields are in host order */ + seg_part.cng_id = ntohs(seg_part.cng_id); + seg_part.cng_off = ntohs(seg_part.cng_off); + seg_part.cng_tot_len = ntohs(seg_part.cng_tot_len); + seg_off = hoff - (caddr_t)clnp; + hoff += sizeof(struct clnp_segment); + } + } + + /* + * process options if present. If clnp_opt_sanity returns + * false (indicating an error was found in the options) or + * an unsupported option was found + * then drop packet and emit an ER. + */ + if (hoff < hend) { + int errcode; + + oidxp = &oidx; + errcode = clnp_opt_sanity(m, hoff, hend-hoff, oidxp); + + /* we do not support security */ + if ((errcode == 0) && (oidxp->cni_securep)) + errcode = DISC_UNSUPPSECURE; + + /* the er option is valid with ER pdus only */ + if ((errcode == 0) && (oidxp->cni_er_reason != ER_INVALREAS) && + ((clnp->cnf_type & CNF_TYPE) != CLNP_ER)) + errcode = DISC_UNSUPPOPT; + +#ifdef DECBIT + /* check if the congestion experienced bit is set */ + if (oidxp->cni_qos_formatp) { + caddr_t qosp = CLNP_OFFTOOPT(m, oidxp->cni_qos_formatp); + u_char qos = *qosp; + + need_afrin = ((qos & (CLNPOVAL_GLOBAL|CLNPOVAL_CONGESTED)) == + (CLNPOVAL_GLOBAL|CLNPOVAL_CONGESTED)); + if (need_afrin) + INCSTAT(cns_congest_rcvd); + } +#endif /* DECBIT */ + + if (errcode != 0) { + clnp_discard(m, (char)errcode); + IFDEBUG(D_INPUT) + printf("clnp_input: dropped (err x%x) due to bad options\n", + errcode); + ENDDEBUG + return; + } + } + + /* + * check if this packet is for us. if not, then forward + */ + if (clnp_ours(&dst) == 0) { + IFDEBUG(D_INPUT) + printf("clnp_input: forwarding packet not for us\n"); + ENDDEBUG + clnp_forward(m, seg_len, &dst, oidxp, seg_off, shp); + return; + } + + /* + * ESIS Configuration Response Function + * + * If the packet received was sent to the multicast address + * all end systems, then send an esh to the source + */ + if ((shp->snh_flags & M_MCAST) && (iso_systype == SNPA_ES)) { + extern short esis_holding_time; + + esis_shoutput(shp->snh_ifp, ESIS_ESH, esis_holding_time, + shp->snh_shost, 6, &dst); + } + + /* + * If this is a fragment, then try to reassemble it. If clnp_reass + * returns non NULL, the packet has been reassembled, and should + * be give to TP. Otherwise the fragment has been delt with + * by the reassembly code (either stored or deleted). In either case + * we should have nothing more to do with it. + */ + if (((clnp->cnf_type & CNF_TYPE) != CLNP_ER) && + (clnp->cnf_type & CNF_SEG_OK) && + (seg_len != seg_part.cng_tot_len)) { + struct mbuf *m0; + + if ((m0 = clnp_reass(m, &src, &dst, &seg_part)) != NULL) { + m = m0; + clnp = mtod(m, struct clnp_fixed *); + INCSTAT(cns_reassembled); + } else { + return; + } + } + + /* + * give the packet to the higher layer + * + * Note: the total length of packet + * is the total length field of the segmentation part, + * or, if absent, the segment length field of the + * header. + */ + INCSTAT(cns_delivered); + switch (clnp->cnf_type & CNF_TYPE) { + case CLNP_ER: + /* + * This ER must have the er option. + * If the option is not present, discard datagram. + */ + if (oidxp == NULL || oidxp->cni_er_reason == ER_INVALREAS) { + clnp_discard(m, GEN_HDRSYNTAX); + } else { + clnp_er_input(m, &src, oidxp->cni_er_reason); + } + break; + + case CLNP_DT: + (*isosw[clnp_protox[ISOPROTO_TP]].pr_input)(m, &source, &target, + clnp->cnf_hdr_len, need_afrin); + break; + + case CLNP_RAW: + case CLNP_ECR: + IFDEBUG(D_INPUT) + printf("clnp_input: raw input of %d bytes\n", + clnp->cnf_type & CNF_SEG_OK ? seg_part.cng_tot_len : seg_len); + ENDDEBUG + (*isosw[clnp_protox[ISOPROTO_RAW]].pr_input)(m, &source, &target, + clnp->cnf_hdr_len); + break; + + case CLNP_EC: + IFDEBUG(D_INPUT) + printf("clnp_input: echoing packet\n"); + ENDDEBUG + (void)clnp_echoreply(m, + (clnp->cnf_type & CNF_SEG_OK ? (int)seg_part.cng_tot_len : seg_len), + &source, &target, oidxp); + break; + + default: + printf("clnp_input: unknown clnp pkt type %d\n", + clnp->cnf_type & CNF_TYPE); + clnp_stat.cns_delivered--; + clnp_stat.cns_noproto++; + clnp_discard(m, GEN_HDRSYNTAX); + break; + } +} +#endif /* ISO */ diff --git a/bsd/netiso/clnp_options.c b/bsd/netiso/clnp_options.c new file mode 100644 index 000000000..fe9498d22 --- /dev/null +++ b/bsd/netiso/clnp_options.c @@ -0,0 +1,551 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_options.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#if ISO + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +/* + * FUNCTION: clnp_update_srcrt + * + * PURPOSE: Process src rt option accompanying a clnp datagram. + * - bump src route ptr if src routing and + * we appear current in src route list. + * + * RETURNS: none + * + * SIDE EFFECTS: + * + * NOTES: If source routing has been terminated, do nothing. + */ +clnp_update_srcrt(options, oidx) +struct mbuf *options; /* ptr to options mbuf */ +struct clnp_optidx *oidx; /* ptr to option index */ +{ + u_char len; /* length of current address */ + struct iso_addr isoa; /* copy current address into here */ + + if (CLNPSRCRT_TERM(oidx, options)) { + IFDEBUG(D_OPTIONS) + printf("clnp_update_srcrt: src rt terminated\n"); + ENDDEBUG + return; + } + + len = CLNPSRCRT_CLEN(oidx, options); + bcopy(CLNPSRCRT_CADDR(oidx, options), (caddr_t)&isoa, len); + isoa.isoa_len = len; + + IFDEBUG(D_OPTIONS) + printf("clnp_update_srcrt: current src rt: %s\n", + clnp_iso_addrp(&isoa)); + ENDDEBUG + + if (clnp_ours(&isoa)) { + IFDEBUG(D_OPTIONS) + printf("clnp_update_srcrt: updating src rt\n"); + ENDDEBUG + + /* update pointer to next src route */ + len++; /* count length byte too! */ + CLNPSRCRT_OFF(oidx, options) += len; + } +} + +/* + * FUNCTION: clnp_dooptions + * + * PURPOSE: Process options accompanying a clnp datagram. + * Processing includes + * - log our address if recording route + * + * RETURNS: none + * + * SIDE EFFECTS: + * + * NOTES: + */ +clnp_dooptions(options, oidx, ifp, isoa) +struct mbuf *options; /* ptr to options mbuf */ +struct clnp_optidx *oidx; /* ptr to option index */ +struct ifnet *ifp; /* ptr to interface pkt is leaving on */ +struct iso_addr *isoa; /* ptr to our address for this ifp */ +{ + /* + * If record route is specified, move all + * existing records over, and insert the address of + * interface passed + */ + if (oidx->cni_recrtp) { + char *opt; /* ptr to beginning of recrt option */ + u_char off; /* offset from opt of first free byte */ + char *rec_start; /* beginning of new rt recorded */ + + opt = CLNP_OFFTOOPT(options, oidx->cni_recrtp); + off = *(opt + 1); + rec_start = opt + off - 1; + + IFDEBUG(D_OPTIONS) + printf("clnp_dooptions: record route: option x%x for %d bytes\n", + opt, oidx->cni_recrt_len); + printf("\tfree slot offset x%x\n", off); + printf("clnp_dooptions: recording %s\n", clnp_iso_addrp(isoa)); + printf("clnp_dooptions: option dump:\n"); + dump_buf(opt, oidx->cni_recrt_len); + ENDDEBUG + + /* proceed only if recording has not been terminated */ + if (off != 0xff) { + int new_addrlen = isoa->isoa_len + 1; + /* + * if there is insufficient room to store the next address, + * then terminate recording. Plus 1 on isoa_len is for the + * length byte itself + */ + if (oidx->cni_recrt_len - (off - 1) < new_addrlen) { + *(opt + 1) = 0xff; /* terminate recording */ + } else { + IFDEBUG(D_OPTIONS) + printf("clnp_dooptions: new addr at x%x for %d\n", + rec_start, new_addrlen); + ENDDEBUG + + bcopy((caddr_t)isoa, rec_start, new_addrlen); + + /* update offset field */ + *(opt + 1) += new_addrlen; + + IFDEBUG(D_OPTIONS) + printf("clnp_dooptions: new option dump:\n"); + dump_buf(opt, oidx->cni_recrt_len); + ENDDEBUG + } + } + } +} + +/* + * FUNCTION: clnp_set_opts + * + * PURPOSE: Check the data mbuf passed for option sanity. If it is + * ok, then set the options ptr to address the data mbuf. + * If an options mbuf exists, free it. This implies that + * any old options will be lost. If data is NULL, simply + * free any old options. + * + * RETURNS: unix error code + * + * SIDE EFFECTS: + * + * NOTES: + */ +clnp_set_opts(options, data) +struct mbuf **options; /* target for option information */ +struct mbuf **data; /* source of option information */ +{ + int error = 0; /* error return value */ + struct clnp_optidx dummy; /* dummy index - not used */ + + /* + * remove any existing options + */ + if (*options != NULL) { + m_freem(*options); + *options = NULL; + } + + if (*data != NULL) { + /* + * Insure that the options are reasonable. + * + * Also, we do not support security, priority, + * nor do we allow one to send an ER option + * + * The QOS parameter is checked for the DECBIT. + */ + if ((clnp_opt_sanity(*data, mtod(*data, caddr_t), (*data)->m_len, + &dummy) != 0) || + (dummy.cni_securep) || + (dummy.cni_priorp) || + (dummy.cni_er_reason != ER_INVALREAS)) { + error = EINVAL; + } else { + *options = *data; + *data = NULL; /* so caller won't free mbuf @ *data */ + } + } + return error; +} + +/* + * FUNCTION: clnp_opt_sanity + * + * PURPOSE: Check the options (beginning at opts for len bytes) for + * sanity. In addition, fill in the option index structure + * in with information about each option discovered. + * + * RETURNS: success (options check out) - 0 + * failure - an ER pdu error code describing failure + * + * SIDE EFFECTS: + * + * NOTES: Each pointer field of the option index is filled in with + * the offset from the beginning of the mbuf data, not the + * actual address. + */ +clnp_opt_sanity(m, opts, len, oidx) +struct mbuf *m; /* mbuf options reside in */ +caddr_t opts; /* ptr to buffer containing options */ +int len; /* length of buffer */ +struct clnp_optidx *oidx; /* RETURN: filled in with option idx info */ +{ + u_char opcode; /* code of particular option */ + u_char oplen; /* length of a particular option */ + caddr_t opts_end; /* ptr to end of options */ + u_char pad = 0, secure = 0, srcrt = 0, recrt = 0, qos = 0, prior = 0; + /* flags for catching duplicate options */ + + IFDEBUG(D_OPTIONS) + printf("clnp_opt_sanity: checking %d bytes of data:\n", len); + dump_buf(opts, len); + ENDDEBUG + + /* clear option index field if passed */ + bzero((caddr_t)oidx, sizeof(struct clnp_optidx)); + + /* + * We need to indicate whether the ER option is present. This is done + * by overloading the er_reason field to also indicate presense of + * the option along with the option value. I would like ER_INVALREAS + * to have value 0, but alas, 0 is a valid er reason... + */ + oidx->cni_er_reason = ER_INVALREAS; + + opts_end = opts + len; + while (opts < opts_end) { + /* must have at least 2 bytes per option (opcode and len) */ + if (opts + 2 > opts_end) + return(GEN_INCOMPLETE); + + opcode = *opts++; + oplen = *opts++; + IFDEBUG(D_OPTIONS) + printf("clnp_opt_sanity: opcode is %x and oplen %d\n", + opcode, oplen); + printf("clnp_opt_sanity: clnpoval_SRCRT is %x\n", CLNPOVAL_SRCRT); + + switch (opcode) { + case CLNPOVAL_PAD: { + printf("CLNPOVAL_PAD\n"); + } break; + case CLNPOVAL_SECURE: { + printf("CLNPOVAL_SECURE\n"); + } break; + case CLNPOVAL_SRCRT: { + printf("CLNPOVAL_SRCRT\n"); + } break; + case CLNPOVAL_RECRT: { + printf("CLNPOVAL_RECRT\n"); + } break; + case CLNPOVAL_QOS: { + printf("CLNPOVAL_QOS\n"); + } break; + case CLNPOVAL_PRIOR: { + printf("CLNPOVAL_PRIOR\n"); + } break; + case CLNPOVAL_ERREAS: { + printf("CLNPOVAL_ERREAS\n"); + } break; + default: + printf("UKNOWN option %x\n", opcode); + } + ENDDEBUG + + /* don't allow crazy length values */ + if (opts + oplen > opts_end) + return(GEN_INCOMPLETE); + + switch (opcode) { + case CLNPOVAL_PAD: + /* + * Padding: increment pointer by length of padding + */ + if (pad++) /* duplicate ? */ + return(GEN_DUPOPT); + opts += oplen; + break; + + case CLNPOVAL_SECURE: { + u_char format = *opts; + + if (secure++) /* duplicate ? */ + return(GEN_DUPOPT); + /* + * Security: high 2 bits of first octet indicate format + * (00 in high bits is reserved). + * Remaining bits must be 0. Remaining octets indicate + * actual security + */ + if (((format & 0x3f) > 0) || /* low 6 bits set ? */ + ((format & 0xc0) == 0)) /* high 2 bits zero ? */ + return(GEN_HDRSYNTAX); + + oidx->cni_securep = CLNP_OPTTOOFF(m, opts); + oidx->cni_secure_len = oplen; + opts += oplen; + } break; + + case CLNPOVAL_SRCRT: { + u_char type, offset; /* type of rt, offset of start */ + caddr_t route_end; /* address of end of route option */ + + IFDEBUG(D_OPTIONS) + printf("clnp_opt_sanity: SRC RT\n"); + ENDDEBUG + + if (srcrt++) /* duplicate ? */ + return(GEN_DUPOPT); + /* + * source route: There must be 2 bytes following the length + * field: type and offset. The type must be either + * partial route or complete route. The offset field must + * be within the option. A single exception is made, however. + * The offset may be 1 greater than the length. This case + * occurs when the last source route record is consumed. + * In this case, we ignore the source route option. + * RAH? You should be able to set offset to 'ff' like in record + * route! + * Following this is a series of address fields. + * Each address field is composed of a (length, address) pair. + * Insure that the offset and each address length is reasonable + */ + route_end = opts + oplen; + + if (opts + 2 > route_end) + return(SRCRT_SYNTAX); + + type = *opts; + offset = *(opts+1); + + + /* type must be partial or complete */ + if (!((type == CLNPOVAL_PARTRT) || (type == CLNPOVAL_COMPRT))) + return(SRCRT_SYNTAX); + + oidx->cni_srcrt_s = CLNP_OPTTOOFF(m, opts); + oidx->cni_srcrt_len = oplen; + + opts += offset-1; /*set opts to first addr in rt */ + + /* + * Offset must be reasonable: + * less than end of options, or equal to end of options + */ + if (opts >= route_end) { + if (opts == route_end) { + IFDEBUG(D_OPTIONS) + printf("clnp_opt_sanity: end of src route info\n"); + ENDDEBUG + break; + } else + return(SRCRT_SYNTAX); + } + + while (opts < route_end) { + u_char addrlen = *opts++; + if (opts + addrlen > route_end) + return(SRCRT_SYNTAX); + opts += addrlen; + } + } break; + case CLNPOVAL_RECRT: { + u_char type, offset; /* type of rt, offset of start */ + caddr_t record_end; /* address of end of record option */ + + if (recrt++) /* duplicate ? */ + return(GEN_DUPOPT); + /* + * record route: after the length field, expect a + * type and offset. Type must be partial or complete. + * Offset indicates where to start recording. Insure it + * is within the option. All ones for offset means + * recording is terminated. + */ + record_end = opts + oplen; + + oidx->cni_recrtp = CLNP_OPTTOOFF(m, opts); + oidx->cni_recrt_len = oplen; + + if (opts + 2 > record_end) + return(GEN_INCOMPLETE); + + type = *opts; + offset = *(opts+1); + + /* type must be partial or complete */ + if (!((type == CLNPOVAL_PARTRT) || (type == CLNPOVAL_COMPRT))) + return(GEN_HDRSYNTAX); + + /* offset must be reasonable */ + if ((offset < 0xff) && (opts + offset > record_end)) + return(GEN_HDRSYNTAX); + opts += oplen; + } break; + case CLNPOVAL_QOS: { + u_char format = *opts; + + if (qos++) /* duplicate ? */ + return(GEN_DUPOPT); + /* + * qos: high 2 bits of first octet indicate format + * (00 in high bits is reserved). + * Remaining bits must be 0 (unless format indicates + * globally unique qos, in which case remaining bits indicate + * qos (except bit 6 which is reserved)). Otherwise, + * remaining octets indicate actual qos. + */ + if (((format & 0xc0) == 0) || /* high 2 bits zero ? */ + (((format & 0xc0) != CLNPOVAL_GLOBAL) && + ((format & 0x3f) > 0))) /* not global,low bits used ? */ + return(GEN_HDRSYNTAX); + + oidx->cni_qos_formatp = CLNP_OPTTOOFF(m, opts); + oidx->cni_qos_len = oplen; + + opts += oplen; + } break; + + case CLNPOVAL_PRIOR: { + if (prior++) /* duplicate ? */ + return(GEN_DUPOPT); + /* + * priority: value must be one byte long + */ + if (oplen != 1) + return(GEN_HDRSYNTAX); + + oidx->cni_priorp = CLNP_OPTTOOFF(m, opts); + + opts += oplen; + } break; + + case CLNPOVAL_ERREAS: { + /* + * er reason: value must be two bytes long + */ + if (oplen != 2) + return(GEN_HDRSYNTAX); + + oidx->cni_er_reason = *opts; + + opts += oplen; + } break; + + default: { + IFDEBUG(D_OPTIONS) + printf("clnp_opt_sanity: UNKNOWN OPTION 0x%x\n", opcode); + ENDDEBUG + return(DISC_UNSUPPOPT); + } + } + } + IFDEBUG(D_OPTIONS) + printf("clnp_opt_sanity: return(0)\n", opcode); + ENDDEBUG + return(0); +} +#endif /* ISO */ diff --git a/bsd/netiso/clnp_output.c b/bsd/netiso/clnp_output.c new file mode 100644 index 000000000..e425f0b43 --- /dev/null +++ b/bsd/netiso/clnp_output.c @@ -0,0 +1,580 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_output.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +static struct clnp_fixed dt_template = { + ISO8473_CLNP, /* network identifier */ + 0, /* length */ + ISO8473_V1, /* version */ + CLNP_TTL, /* ttl */ + CLNP_DT|CNF_SEG_OK|CNF_ERR_OK, /* type */ + 0, /* segment length */ + 0 /* checksum */ +}; + +static struct clnp_fixed raw_template = { + ISO8473_CLNP, /* network identifier */ + 0, /* length */ + ISO8473_V1, /* version */ + CLNP_TTL, /* ttl */ + CLNP_RAW|CNF_SEG_OK|CNF_ERR_OK, /* type */ + 0, /* segment length */ + 0 /* checksum */ +}; + +static struct clnp_fixed echo_template = { + ISO8473_CLNP, /* network identifier */ + 0, /* length */ + ISO8473_V1, /* version */ + CLNP_TTL, /* ttl */ + CLNP_EC|CNF_SEG_OK|CNF_ERR_OK, /* type */ + 0, /* segment length */ + 0 /* checksum */ +}; + +static struct clnp_fixed echor_template = { + ISO8473_CLNP, /* network identifier */ + 0, /* length */ + ISO8473_V1, /* version */ + CLNP_TTL, /* ttl */ + CLNP_ECR|CNF_SEG_OK|CNF_ERR_OK, /* type */ + 0, /* segment length */ + 0 /* checksum */ +}; + +#ifdef DECBIT +u_char qos_option[] = {CLNPOVAL_QOS, 1, + CLNPOVAL_GLOBAL|CLNPOVAL_SEQUENCING|CLNPOVAL_LOWDELAY}; +#endif /* DECBIT */ + +int clnp_id = 0; /* id for segmented dgrams */ + +/* + * FUNCTION: clnp_output + * + * PURPOSE: output the data in the mbuf as a clnp datagram + * + * The data specified by m0 is sent as a clnp datagram. + * The mbuf chain m0 will be freed when this routine has + * returned. + * + * If options is non-null, it points to an mbuf which contains + * options to be sent with the datagram. The options must + * be formatted in the mbuf according to clnp rules. Options + * will not be freed. + * + * Datalen specifies the length of the data in m0. + * + * Src and dst are the addresses for the packet. + * + * If route is non-null, it is used as the route for + * the packet. + * + * By default, a DT is sent. However, if flags & CNLP_SEND_ER + * then an ER will be sent. If flags & CLNP_SEND_RAW, then + * the packet will be send as raw clnp. + * + * RETURNS: 0 success + * appropriate error code + * + * SIDE EFFECTS: none + * + * NOTES: + * Flags are interpretated as follows: + * CLNP_NO_SEG - do not allow this pkt to be segmented. + * CLNP_NO_ER - have pkt request ER suppression. + * CLNP_SEND_RAW - send pkt as RAW DT rather than TP DT + * CLNP_NO_CKSUM - don't compute clnp checksum + * CLNP_ECHO - send as ECHO packet + * + * When checking for a cached packet, clnp checks + * that the route taken is still up. It does not + * check that the route is still to the same destination. + * This means that any entity that alters an existing + * route for an isopcb (such as when a redirect arrives) + * must invalidate the clnp cache. It might be perferable + * to have clnp check that the route has the same dest, but + * by avoiding this check, we save a call to iso_addrmatch1. + */ +clnp_output(m0, isop, datalen, flags) +struct mbuf *m0; /* data for the packet */ +struct isopcb *isop; /* iso pcb */ +int datalen; /* number of bytes of data in m0 */ +int flags; /* flags */ +{ + int error = 0; /* return value of function */ + register struct mbuf *m = m0; /* mbuf for clnp header chain */ + register struct clnp_fixed *clnp; /* ptr to fixed part of hdr */ + register caddr_t hoff; /* offset into header */ + int total_len; /* total length of packet */ + struct iso_addr *src; /* ptr to source address */ + struct iso_addr *dst; /* ptr to destination address */ + struct clnp_cache clc; /* storage for cache information */ + struct clnp_cache *clcp = NULL; /* ptr to clc */ + int hdrlen = 0; + + dst = &isop->isop_faddr->siso_addr; + if (isop->isop_laddr == 0) { + struct iso_ifaddr *ia = 0; + clnp_route(dst, &isop->isop_route, flags, 0, &ia); + if (ia == 0 || ia->ia_ifa.ifa_addr->sa_family != AF_ISO) + return (ENETUNREACH); + src = &ia->ia_addr.siso_addr; + } else + src = &isop->isop_laddr->siso_addr; + + IFDEBUG(D_OUTPUT) + printf("clnp_output: to %s", clnp_iso_addrp(dst)); + printf(" from %s of %d bytes\n", clnp_iso_addrp(src), datalen); + printf("\toptions x%x, flags x%x, isop_clnpcache x%x\n", + isop->isop_options, flags, isop->isop_clnpcache); + ENDDEBUG + + if (isop->isop_clnpcache != NULL) { + clcp = mtod(isop->isop_clnpcache, struct clnp_cache *); + } + + /* + * Check if cache is valid ... + */ + IFDEBUG(D_OUTPUT) + printf("clnp_output: ck cache: clcp %x\n", clcp); + if (clcp != NULL) { + printf("\tclc_dst %s\n", clnp_iso_addrp(&clcp->clc_dst)); + printf("\tisop_opts x%x, clc_opts x%x\n", isop->isop_options, + clcp->clc_options); + if (isop->isop_route.ro_rt) + printf("\tro_rt x%x, rt_flags x%x\n", + isop->isop_route.ro_rt, isop->isop_route.ro_rt->rt_flags); + printf("\tflags x%x, clc_flags x%x\n", flags, clcp->clc_flags); + printf("\tclc_hdr x%x\n", clcp->clc_hdr); + } + ENDDEBUG + if ((clcp != NULL) && /* cache exists */ + (isop->isop_options == clcp->clc_options) && /* same options */ + (iso_addrmatch1(dst, &clcp->clc_dst)) && /* dst still same */ + (isop->isop_route.ro_rt != NULL) && /* route exists */ + (isop->isop_route.ro_rt == clcp->clc_rt) && /* and is cached */ + (isop->isop_route.ro_rt->rt_flags & RTF_UP) && /* route still up */ + (flags == clcp->clc_flags) && /* same flags */ + (clcp->clc_hdr != NULL)) { /* hdr mbuf exists */ + /* + * The cache is valid + */ + + IFDEBUG(D_OUTPUT) + printf("clnp_output: using cache\n"); + ENDDEBUG + + m = m_copy(clcp->clc_hdr, 0, (int)M_COPYALL); + if (m == NULL) { + /* + * No buffers left to copy cached packet header. Use + * the cached packet header this time, and + * mark the hdr as vacant + */ + m = clcp->clc_hdr; + clcp->clc_hdr = NULL; + } + m->m_next = m0; /* ASSUMES pkt hdr is 1 mbuf long */ + clnp = mtod(m, struct clnp_fixed *); + } else { + struct clnp_optidx *oidx = NULL; /* index to clnp options */ + + /* + * The cache is not valid. Allocate an mbuf (if necessary) + * to hold cached info. If one is not available, then + * don't bother with the cache + */ + INCSTAT(cns_cachemiss); + if (flags & CLNP_NOCACHE) { + clcp = &clc; + } else { + if (isop->isop_clnpcache == NULL) { + /* + * There is no clnpcache. Allocate an mbuf to hold one + */ + if ((isop->isop_clnpcache = m_get(M_DONTWAIT, MT_HEADER)) + == NULL) { + /* + * No mbufs available. Pretend that we don't want + * caching this time. + */ + IFDEBUG(D_OUTPUT) + printf("clnp_output: no mbufs to allocate to cache\n"); + ENDDEBUG + flags |= CLNP_NOCACHE; + clcp = &clc; + } else { + clcp = mtod(isop->isop_clnpcache, struct clnp_cache *); + } + } else { + /* + * A clnpcache mbuf exists. If the clc_hdr is not null, + * we must free it, as a new one is about to be created. + */ + clcp = mtod(isop->isop_clnpcache, struct clnp_cache *); + if (clcp->clc_hdr != NULL) { + /* + * The clc_hdr is not null but a clnpcache mbuf exists. + * This means that there was a cache, but the existing + * copy of the hdr is no longer valid. Free it now + * before we lose the pointer to it. + */ + IFDEBUG(D_OUTPUT) + printf("clnp_output: freeing old clc_hdr 0x%x\n", + clcp->clc_hdr); + ENDDEBUG + m_free(clcp->clc_hdr); + IFDEBUG(D_OUTPUT) + printf("clnp_output: freed old clc_hdr (done)\n"); + ENDDEBUG + } + } + } + IFDEBUG(D_OUTPUT) + printf("clnp_output: NEW clcp x%x\n",clcp); + ENDDEBUG + bzero((caddr_t)clcp, sizeof(struct clnp_cache)); + + if (isop->isop_optindex) + oidx = mtod(isop->isop_optindex, struct clnp_optidx *); + + /* + * Don't allow packets with security, quality of service, + * priority, or error report options to be sent. + */ + if ((isop->isop_options) && (oidx)) { + if ((oidx->cni_securep) || + (oidx->cni_priorp) || + (oidx->cni_qos_formatp) || + (oidx->cni_er_reason != ER_INVALREAS)) { + IFDEBUG(D_OUTPUT) + printf("clnp_output: pkt dropped - option unsupported\n"); + ENDDEBUG + m_freem(m0); + return(EINVAL); + } + } + + /* + * Don't allow any invalid flags to be set + */ + if ((flags & (CLNP_VFLAGS)) != flags) { + IFDEBUG(D_OUTPUT) + printf("clnp_output: packet dropped - flags unsupported\n"); + ENDDEBUG + INCSTAT(cns_odropped); + m_freem(m0); + return(EINVAL); + } + + /* + * Don't allow funny lengths on dst; src may be zero in which + * case we insert the source address based upon the interface + */ + if ((src->isoa_len > sizeof(struct iso_addr)) || + (dst->isoa_len == 0) || + (dst->isoa_len > sizeof(struct iso_addr))) { + m_freem(m0); + INCSTAT(cns_odropped); + return(ENAMETOOLONG); + } + + /* + * Grab mbuf to contain header + */ + MGETHDR(m, M_DONTWAIT, MT_HEADER); + if (m == 0) { + m_freem(m0); + INCSTAT(cns_odropped); + return(ENOBUFS); + } + INCSTAT(cns_sent); + m->m_next = m0; + clnp = mtod(m, struct clnp_fixed *); + clcp->clc_segoff = 0; + + /* + * Fill in all of fixed hdr except lengths and checksum + */ + if (flags & CLNP_SEND_RAW) { + *clnp = raw_template; + } else if (flags & CLNP_ECHO) { + *clnp = echo_template; + } else if (flags & CLNP_ECHOR) { + *clnp = echor_template; + } else { + *clnp = dt_template; + } + if (flags & CLNP_NO_SEG) + clnp->cnf_type &= ~CNF_SEG_OK; + if (flags & CLNP_NO_ER) + clnp->cnf_type &= ~CNF_ERR_OK; + + /* + * Route packet; special case for source rt + */ + if ((isop->isop_options) && CLNPSRCRT_VALID(oidx)) { + IFDEBUG(D_OUTPUT) + printf("clnp_output: calling clnp_srcroute\n"); + ENDDEBUG + error = clnp_srcroute(isop->isop_options, oidx, &isop->isop_route, + &clcp->clc_firsthop, &clcp->clc_ifa, dst); + } else { + IFDEBUG(D_OUTPUT) + ENDDEBUG + error = clnp_route(dst, &isop->isop_route, flags, + &clcp->clc_firsthop, &clcp->clc_ifa); + } + if (error || (clcp->clc_ifa == 0)) { + IFDEBUG(D_OUTPUT) + printf("clnp_output: route failed, errno %d\n", error); + printf("@clcp:\n"); + dump_buf(clcp, sizeof (struct clnp_cache)); + ENDDEBUG + goto bad; + } + clcp->clc_rt = isop->isop_route.ro_rt; /* XXX */ + clcp->clc_ifp = clcp->clc_ifa->ia_ifp; /* XXX */ + + IFDEBUG(D_OUTPUT) + printf("clnp_output: packet routed to %s\n", + clnp_iso_addrp( + &((struct sockaddr_iso *)clcp->clc_firsthop)->siso_addr)); + ENDDEBUG + + /* + * If src address is not yet specified, use address of + * interface. NOTE: this will now update the laddr field in + * the isopcb. Is this desirable? RAH? + */ + if (src->isoa_len == 0) { + src = &(clcp->clc_ifa->ia_addr.siso_addr); + IFDEBUG(D_OUTPUT) + printf("clnp_output: new src %s\n", clnp_iso_addrp(src)); + ENDDEBUG + } + + /* + * Insert the source and destination address, + */ + hoff = (caddr_t)clnp + sizeof(struct clnp_fixed); + CLNP_INSERT_ADDR(hoff, *dst); + CLNP_INSERT_ADDR(hoff, *src); + + /* + * Leave room for the segment part, if segmenting is selected + */ + if (clnp->cnf_type & CNF_SEG_OK) { + clcp->clc_segoff = hoff - (caddr_t)clnp; + hoff += sizeof(struct clnp_segment); + } + + clnp->cnf_hdr_len = m->m_len = (u_char)(hoff - (caddr_t)clnp); + hdrlen = clnp->cnf_hdr_len; + +#ifdef DECBIT + /* + * Add the globally unique QOS (with room for congestion experienced + * bit). I can safely assume that this option is not in the options + * mbuf below because I checked that the option was not specified + * previously + */ + if ((m->m_len + sizeof(qos_option)) < MLEN) { + bcopy((caddr_t)qos_option, hoff, sizeof(qos_option)); + clnp->cnf_hdr_len += sizeof(qos_option); + hdrlen += sizeof(qos_option); + m->m_len += sizeof(qos_option); + } +#endif /* DECBIT */ + + /* + * If an options mbuf is present, concatenate a copy to the hdr mbuf. + */ + if (isop->isop_options) { + struct mbuf *opt_copy = m_copy(isop->isop_options, 0, (int)M_COPYALL); + if (opt_copy == NULL) { + error = ENOBUFS; + goto bad; + } + /* Link in place */ + opt_copy->m_next = m->m_next; + m->m_next = opt_copy; + + /* update size of header */ + clnp->cnf_hdr_len += opt_copy->m_len; + hdrlen += opt_copy->m_len; + } + + if (hdrlen > CLNP_HDR_MAX) { + error = EMSGSIZE; + goto bad; + } + + /* + * Now set up the cache entry in the pcb + */ + if ((flags & CLNP_NOCACHE) == 0) { + if (clcp->clc_hdr = m_copy(m, 0, (int)clnp->cnf_hdr_len)) { + clcp->clc_dst = *dst; + clcp->clc_flags = flags; + clcp->clc_options = isop->isop_options; + } + } + } + /* + * If small enough for interface, send directly + * Fill in segmentation part of hdr if using the full protocol + */ + total_len = clnp->cnf_hdr_len + datalen; + if (clnp->cnf_type & CNF_SEG_OK) { + struct clnp_segment seg_part; /* segment part of hdr */ + seg_part.cng_id = htons(clnp_id++); + seg_part.cng_off = htons(0); + seg_part.cng_tot_len = htons(total_len); + (void) bcopy((caddr_t)&seg_part, (caddr_t) clnp + clcp->clc_segoff, + sizeof(seg_part)); + } + if (total_len <= SN_MTU(clcp->clc_ifp, clcp->clc_rt)) { + HTOC(clnp->cnf_seglen_msb, clnp->cnf_seglen_lsb, total_len); + m->m_pkthdr.len = total_len; + /* + * Compute clnp checksum (on header only) + */ + if (flags & CLNP_NO_CKSUM) { + HTOC(clnp->cnf_cksum_msb, clnp->cnf_cksum_lsb, 0); + } else { + iso_gen_csum(m, CLNP_CKSUM_OFF, (int)clnp->cnf_hdr_len); + } + + IFDEBUG(D_DUMPOUT) + struct mbuf *mdump = m; + printf("clnp_output: sending dg:\n"); + while (mdump != NULL) { + dump_buf(mtod(mdump, caddr_t), mdump->m_len); + mdump = mdump->m_next; + } + ENDDEBUG + + error = SN_OUTPUT(clcp, m); + goto done; + } else { + /* + * Too large for interface; fragment if possible. + */ + error = clnp_fragment(clcp->clc_ifp, m, clcp->clc_firsthop, + total_len, clcp->clc_segoff, flags, clcp->clc_rt); + goto done; + } +bad: + m_freem(m); +done: + if (error) { + clnp_stat.cns_sent--; + clnp_stat.cns_odropped++; + } + return (error); +} + +int clnp_ctloutput() +{ +} diff --git a/bsd/netiso/clnp_raw.c b/bsd/netiso/clnp_raw.c new file mode 100644 index 000000000..264196757 --- /dev/null +++ b/bsd/netiso/clnp_raw.c @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_raw.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include /* XXX -- defines SOL_NETWORK */ + +struct sockproto rclnp_proto = { PF_ISO, 0 }; +/* + * FUNCTION: rclnp_input + * + * PURPOSE: Setup generic address an protocol structures for + * raw input routine, then pass them along with the + * mbuf chain. + * + * RETURNS: none + * + * SIDE EFFECTS: + * + * NOTES: The protocol field of rclnp_proto is set to zero indicating + * no protocol. + */ +rclnp_input(m, src, dst, hdrlen) +struct mbuf *m; /* ptr to packet */ +struct sockaddr_iso *src; /* ptr to src address */ +struct sockaddr_iso *dst; /* ptr to dest address */ +int hdrlen; /* length (in bytes) of clnp header */ +{ +#ifdef TROLL + if (trollctl.tr_ops & TR_CHUCK) { + m_freem(m); + return; + } +#endif /* TROLL */ + + raw_input(m, &rclnp_proto, (struct sockaddr *)src, (struct sockaddr *)dst); +} + +/* + * FUNCTION: rclnp_output + * + * PURPOSE: Prepare to send a raw clnp packet. Setup src and dest + * addresses, count the number of bytes to send, and + * call clnp_output. + * + * RETURNS: success - 0 + * failure - an appropriate error code + * + * SIDE EFFECTS: + * + * NOTES: + */ +rclnp_output(m0, so) +struct mbuf *m0; /* packet to send */ +struct socket *so; /* socket to send from */ +{ + register struct mbuf *m; /* used to scan a chain */ + int len = 0; /* store length of chain here */ + struct rawisopcb *rp = sotorawisopcb(so); /* ptr to raw cb */ + int error; /* return value of function */ + int flags; /* flags for clnp_output */ + + if (0 == (m0->m_flags & M_PKTHDR)) + return (EINVAL); + /* + * Set up src address. If user has bound socket to an address, use it. + * Otherwise, do not specify src (clnp_output will fill it in). + */ + if (rp->risop_rcb.rcb_laddr) { + if (rp->risop_isop.isop_sladdr.siso_family != AF_ISO) { +bad: + m_freem(m0); + return(EAFNOSUPPORT); + } + } + /* set up dest address */ + if (rp->risop_rcb.rcb_faddr == 0) + goto bad; + rp->risop_isop.isop_sfaddr = + *(struct sockaddr_iso *)rp->risop_rcb.rcb_faddr; + rp->risop_isop.isop_faddr = &rp->risop_isop.isop_sfaddr; + + /* get flags and ship it off */ + flags = rp->risop_flags & CLNP_VFLAGS; + + error = clnp_output(m0, &rp->risop_isop, m0->m_pkthdr.len, + flags|CLNP_NOCACHE); + + return (error); +} + +/* + * FUNCTION: rclnp_ctloutput + * + * PURPOSE: Raw clnp socket option processing + * All options are stored inside an mbuf. + * + * RETURNS: success - 0 + * failure - unix error code + * + * SIDE EFFECTS: If the options mbuf does not exist, it the mbuf passed + * is used. + * + * NOTES: + */ +rclnp_ctloutput(op, so, level, optname, m) +int op; /* type of operation */ +struct socket *so; /* ptr to socket */ +int level; /* level of option */ +int optname; /* name of option */ +struct mbuf **m; /* ptr to ptr to option data */ +{ + int error = 0; + register struct rawisopcb *rp = sotorawisopcb(so);/* raw cb ptr */ + + IFDEBUG(D_CTLOUTPUT) + printf("rclnp_ctloutput: op = x%x, level = x%x, name = x%x\n", + op, level, optname); + if (*m != NULL) { + printf("rclnp_ctloutput: %d bytes of mbuf data\n", (*m)->m_len); + dump_buf(mtod((*m), caddr_t), (*m)->m_len); + } + ENDDEBUG + +#ifdef SOL_NETWORK + if (level != SOL_NETWORK) + error = EINVAL; + else switch (op) { +#else + switch (op) { +#endif /* SOL_NETWORK */ + case PRCO_SETOPT: + switch (optname) { + case CLNPOPT_FLAGS: { + u_short usr_flags; + /* + * Insure that the data passed has exactly one short in it + */ + if ((*m == NULL) || ((*m)->m_len != sizeof(short))) { + error = EINVAL; + break; + } + + /* + * Don't allow invalid flags to be set + */ + usr_flags = (*mtod((*m), short *)); + + if ((usr_flags & (CLNP_VFLAGS)) != usr_flags) { + error = EINVAL; + } else + rp->risop_flags |= usr_flags; + + } break; + + case CLNPOPT_OPTS: + if (error = clnp_set_opts(&rp->risop_isop.isop_options, m)) + break; + rp->risop_isop.isop_optindex = m_get(M_WAIT, MT_SOOPTS); + (void) clnp_opt_sanity(rp->risop_isop.isop_options, + mtod(rp->risop_isop.isop_options, caddr_t), + rp->risop_isop.isop_options->m_len, + mtod(rp->risop_isop.isop_optindex, + struct clnp_optidx *)); + break; + } + break; + + case PRCO_GETOPT: +#ifdef notdef + /* commented out to keep hi C quiet */ + switch (optname) { + default: + error = EINVAL; + break; + } +#endif /* notdef */ + break; + default: + error = EINVAL; + break; + } + if (op == PRCO_SETOPT) { + /* note: m_freem does not barf is *m is NULL */ + m_freem(*m); + *m = NULL; + } + + return error; +} + +/*ARGSUSED*/ +clnp_usrreq(so, req, m, nam, control) + register struct socket *so; + int req; + struct mbuf *m, *nam, *control; +{ + register int error = 0; + register struct rawisopcb *rp = sotorawisopcb(so); + + rp = sotorawisopcb(so); + switch (req) { + + case PRU_ATTACH: + if (rp) + panic("rip_attach"); + MALLOC(rp, struct rawisopcb *, sizeof *rp, M_PCB, M_WAITOK); + if (rp == 0) + return (ENOBUFS); + bzero((caddr_t)rp, sizeof *rp); + so->so_pcb = (caddr_t)rp; + break; + + case PRU_DETACH: + if (rp == 0) + panic("rip_detach"); + if (rp->risop_isop.isop_options) + m_freem(rp->risop_isop.isop_options); + if (rp->risop_isop.isop_route.ro_rt) + RTFREE(rp->risop_isop.isop_route.ro_rt); + if (rp->risop_rcb.rcb_laddr) + rp->risop_rcb.rcb_laddr = 0; + /* free clnp cached hdr if necessary */ + if (rp->risop_isop.isop_clnpcache != NULL) { + struct clnp_cache *clcp = + mtod(rp->risop_isop.isop_clnpcache, struct clnp_cache *); + if (clcp->clc_hdr != NULL) { + m_free(clcp->clc_hdr); + } + m_free(rp->risop_isop.isop_clnpcache); + } + if (rp->risop_isop.isop_optindex != NULL) + m_free(rp->risop_isop.isop_optindex); + + break; + + case PRU_BIND: + { + struct sockaddr_iso *addr = mtod(nam, struct sockaddr_iso *); + + if (nam->m_len != sizeof(*addr)) + return (EINVAL); + if ((ifnet == 0) || + (addr->siso_family != AF_ISO) || + (addr->siso_addr.isoa_len && + ifa_ifwithaddr((struct sockaddr *)addr) == 0)) + return (EADDRNOTAVAIL); + rp->risop_isop.isop_sladdr = *addr; + rp->risop_rcb.rcb_laddr = (struct sockaddr *) + (rp->risop_isop.isop_laddr = &rp->risop_isop.isop_sladdr); + return (0); + } + case PRU_CONNECT: + { + struct sockaddr_iso *addr = mtod(nam, struct sockaddr_iso *); + + if ((nam->m_len > sizeof(*addr)) || (addr->siso_len > sizeof(*addr))) + return (EINVAL); + if (ifnet == 0) + return (EADDRNOTAVAIL); + if (addr->siso_family != AF_ISO) + rp->risop_isop.isop_sfaddr = *addr; + rp->risop_rcb.rcb_faddr = (struct sockaddr *) + (rp->risop_isop.isop_faddr = &rp->risop_isop.isop_sfaddr); + soisconnected(so); + return (0); + } + } + error = raw_usrreq(so, req, m, nam, control); + + if (error && req == PRU_ATTACH && so->so_pcb) + FREE((caddr_t)rp, M_PCB); + return (error); +} diff --git a/bsd/netiso/clnp_stat.h b/bsd/netiso/clnp_stat.h new file mode 100644 index 000000000..901840c38 --- /dev/null +++ b/bsd/netiso/clnp_stat.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_stat.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + + +#ifndef __CLNP_STAT__ +#define __CLNP_STAT__ + +struct clnp_stat { + int cns_total; /* total pkts received */ + int cns_toosmall; /* fixed part of header too small */ + int cns_badhlen; /* header length is not reasonable */ + int cns_badcsum; /* checksum on packet failed */ + int cns_badaddr; /* address fields were not reasonable */ + int cns_badvers; /* incorrect version */ + int cns_noseg; /* segment information forgotten */ + int cns_noproto; /* incorrect protocol id */ + int cns_delivered; /* packets consumed by protocol */ + int cns_ttlexpired; /* ttl has expired */ + int cns_forward; /* forwarded packets */ + int cns_sent; /* total packets sent */ + int cns_odropped; /* o.k. packets discarded, e.g. ENOBUFS */ + int cns_cantforward; /* non-forwarded packets */ + int cns_fragmented; /* packets fragmented */ + int cns_fragments; /* fragments received */ + int cns_fragdropped; /* fragments discarded */ + int cns_fragtimeout; /* fragments timed out */ + int cns_ofragments; /* fragments generated */ + int cns_cantfrag; /* fragmentation prohibited */ + int cns_reassembled; /* packets reconstructed */ + int cns_cachemiss; /* cache misses */ + int cns_congest_set; /* congestion experienced bit set */ + int cns_congest_rcvd; /* congestion experienced bit received */ + int cns_er_inhist[CLNP_ERRORS + 1]; + int cns_er_outhist[CLNP_ERRORS + 1]; +} clnp_stat ; + +#ifdef INCSTAT +#undef INCSTAT +#endif /* INCSTAT */ +#define INCSTAT(x) clnp_stat./**/x/**/++ + +#endif /* __CLNP_STAT__ */ diff --git a/bsd/netiso/clnp_subr.c b/bsd/netiso/clnp_subr.c new file mode 100644 index 000000000..11e1ad742 --- /dev/null +++ b/bsd/netiso/clnp_subr.c @@ -0,0 +1,677 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_subr.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#if ISO + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * FUNCTION: clnp_data_ck + * + * PURPOSE: Check that the amount of data in the mbuf chain is + * at least as much as the clnp header would have us + * expect. Trim mbufs if longer than expected, drop + * packet if shorter than expected. + * + * RETURNS: success - ptr to mbuf chain + * failure - 0 + * + * SIDE EFFECTS: + * + * NOTES: + */ +struct mbuf * +clnp_data_ck(m, length) +register struct mbuf *m; /* ptr to mbuf chain containing hdr & data */ +int length; /* length (in bytes) of packet */ + { + register int len; /* length of data */ + register struct mbuf *mhead; /* ptr to head of chain */ + + len = -length; + mhead = m; + for (;;) { + len += m->m_len; + if (m->m_next == 0) + break; + m = m->m_next; + } + if (len != 0) { + if (len < 0) { + INCSTAT(cns_toosmall); + clnp_discard(mhead, GEN_INCOMPLETE); + return 0; + } + if (len <= m->m_len) + m->m_len -= len; + else + m_adj(mhead, -len); + } + return mhead; +} + +#ifdef notdef +/* + * FUNCTION: clnp_extract_addr + * + * PURPOSE: Extract the source and destination address from the + * supplied buffer. Place them in the supplied address buffers. + * If insufficient data is supplied, then fail. + * + * RETURNS: success - Address of first byte in the packet past + * the address part. + * failure - 0 + * + * SIDE EFFECTS: + * + * NOTES: + */ +caddr_t +clnp_extract_addr(bufp, buflen, srcp, destp) +caddr_t bufp; /* ptr to buffer containing addresses */ +int buflen; /* length of buffer */ +register struct iso_addr *srcp; /* ptr to source address buffer */ +register struct iso_addr *destp; /* ptr to destination address buffer */ + { + int len; /* argument to bcopy */ + + /* + * check that we have enough data. Plus1 is for length octet + */ + if ((u_char)*bufp + 1 > buflen) { + return((caddr_t)0); + } + len = destp->isoa_len = (u_char)*bufp++; + (void) bcopy(bufp, (caddr_t)destp, len); + buflen -= len; + bufp += len; + + /* + * check that we have enough data. Plus1 is for length octet + */ + if ((u_char)*bufp + 1 > buflen) { + return((caddr_t)0); + } + len = srcp->isoa_len = (u_char)* bufp++; + (void) bcopy(bufp, (caddr_t)srcp, len); + bufp += len; + + /* + * Insure that the addresses make sense + */ + if (iso_ck_addr(srcp) && iso_ck_addr(destp)) + return bufp; + else + return (caddr_t) 0; +} +#endif /* notdef */ + +/* + * FUNCTION: clnp_ours + * + * PURPOSE: Decide whether the supplied packet is destined for + * us, or that it should be forwarded on. + * + * RETURNS: packet is for us - 1 + * packet is not for us - 0 + * + * SIDE EFFECTS: + * + * NOTES: + */ +clnp_ours(dst) +register struct iso_addr *dst; /* ptr to destination address */ +{ + register struct iso_ifaddr *ia; /* scan through interface addresses */ + + for (ia = iso_ifaddr; ia; ia = ia->ia_next) { + IFDEBUG(D_ROUTE) + printf("clnp_ours: ia_sis x%x, dst x%x\n", &ia->ia_addr, + dst); + ENDDEBUG + /* + * XXX Warning: + * We are overloading siso_tlen in the if's address, as an nsel length. + */ + if (dst->isoa_len == ia->ia_addr.siso_nlen && + bcmp((caddr_t)ia->ia_addr.siso_addr.isoa_genaddr, + (caddr_t)dst->isoa_genaddr, + ia->ia_addr.siso_nlen - ia->ia_addr.siso_tlen) == 0) + return 1; + } + return 0; +} + +/* Dec bit set if ifp qlen is greater than congest_threshold */ +int congest_threshold = 0; + +/* + * FUNCTION: clnp_forward + * + * PURPOSE: Forward the datagram passed + * clnpintr guarantees that the header will be + * contigious (a cluster mbuf will be used if necessary). + * + * If oidx is NULL, no options are present. + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +clnp_forward(m, len, dst, oidx, seg_off, inbound_shp) +struct mbuf *m; /* pkt to forward */ +int len; /* length of pkt */ +struct iso_addr *dst; /* destination address */ +struct clnp_optidx *oidx; /* option index */ +int seg_off;/* offset of segmentation part */ +struct snpa_hdr *inbound_shp; /* subnetwork header of inbound packet */ +{ + struct clnp_fixed *clnp; /* ptr to fixed part of header */ + int error; /* return value of route function */ + struct sockaddr *next_hop; /* next hop for dgram */ + struct ifnet *ifp; /* ptr to outgoing interface */ + struct iso_ifaddr *ia = 0;/* ptr to iso name for ifp */ + struct route_iso route; /* filled in by clnp_route */ + extern int iso_systype; + + clnp = mtod(m, struct clnp_fixed *); + bzero((caddr_t)&route, sizeof(route)); /* MUST be done before "bad:" */ + + /* + * Don't forward multicast or broadcast packets + */ + if ((inbound_shp) && (IS_MULTICAST(inbound_shp->snh_dhost))) { + IFDEBUG(D_FORWARD) + printf("clnp_forward: dropping multicast packet\n"); + ENDDEBUG + clnp->cnf_type &= ~CNF_ERR_OK; /* so we don't generate an ER */ + clnp_discard(m, 0); + INCSTAT(cns_cantforward); + goto done; + } + + IFDEBUG(D_FORWARD) + printf("clnp_forward: %d bytes, to %s, options x%x\n", len, + clnp_iso_addrp(dst), oidx); + ENDDEBUG + + /* + * Decrement ttl, and if zero drop datagram + * Can't compare ttl as less than zero 'cause its a unsigned + */ + if ((clnp->cnf_ttl == 0) || (--clnp->cnf_ttl == 0)) { + IFDEBUG(D_FORWARD) + printf("clnp_forward: discarding datagram because ttl is zero\n"); + ENDDEBUG + INCSTAT(cns_ttlexpired); + clnp_discard(m, TTL_EXPTRANSIT); + goto done; + } + /* + * Route packet; special case for source rt + */ + if CLNPSRCRT_VALID(oidx) { + /* + * Update src route first + */ + clnp_update_srcrt(m, oidx); + error = clnp_srcroute(m, oidx, &route, &next_hop, &ia, dst); + } else { + error = clnp_route(dst, &route, 0, &next_hop, &ia); + } + if (error || ia == 0) { + IFDEBUG(D_FORWARD) + printf("clnp_forward: can't route packet (errno %d)\n", error); + ENDDEBUG + clnp_discard(m, ADDR_DESTUNREACH); + INCSTAT(cns_cantforward); + goto done; + } + ifp = ia->ia_ifp; + + IFDEBUG(D_FORWARD) + printf("clnp_forward: packet routed to %s\n", + clnp_iso_addrp(&((struct sockaddr_iso *)next_hop)->siso_addr)); + ENDDEBUG + + INCSTAT(cns_forward); + + /* + * If we are an intermediate system and + * we are routing outbound on the same ifp that the packet + * arrived upon, and we know the next hop snpa, + * then generate a redirect request + */ + if ((iso_systype & SNPA_IS) && (inbound_shp) && + (ifp == inbound_shp->snh_ifp)) + esis_rdoutput(inbound_shp, m, oidx, dst, route.ro_rt); + /* + * If options are present, update them + */ + if (oidx) { + struct iso_addr *mysrc = &ia->ia_addr.siso_addr; + if (mysrc == NULL) { + clnp_discard(m, ADDR_DESTUNREACH); + INCSTAT(cns_cantforward); + clnp_stat.cns_forward--; + goto done; + } else { + (void) clnp_dooptions(m, oidx, ifp, mysrc); + } + } + +#ifdef DECBIT + if (ifp->if_snd.ifq_len > congest_threshold) { + /* + * Congestion! Set the Dec Bit and thank Dave Oran + */ + IFDEBUG(D_FORWARD) + printf("clnp_forward: congestion experienced\n"); + ENDDEBUG + if ((oidx) && (oidx->cni_qos_formatp)) { + caddr_t qosp = CLNP_OFFTOOPT(m, oidx->cni_qos_formatp); + u_char qos = *qosp; + IFDEBUG(D_FORWARD) + printf("clnp_forward: setting congestion bit (qos x%x)\n", qos); + ENDDEBUG + if ((qos & CLNPOVAL_GLOBAL) == CLNPOVAL_GLOBAL) { + qos |= CLNPOVAL_CONGESTED; + INCSTAT(cns_congest_set); + *qosp = qos; + } + } + } +#endif /* DECBIT */ + + /* + * Dispatch the datagram if it is small enough, otherwise fragment + */ + if (len <= SN_MTU(ifp, route.ro_rt)) { + iso_gen_csum(m, CLNP_CKSUM_OFF, (int)clnp->cnf_hdr_len); + (void) (*ifp->if_output)(ifp, m, next_hop, route.ro_rt); + } else { + (void) clnp_fragment(ifp, m, next_hop, len, seg_off, /* flags */0, route.ro_rt); + } + +done: + /* + * Free route + */ + if (route.ro_rt != NULL) { + RTFREE(route.ro_rt); + } +} + +#ifdef notdef +/* + * FUNCTION: clnp_insert_addr + * + * PURPOSE: Insert the address part into a clnp datagram. + * + * RETURNS: Address of first byte after address part in datagram. + * + * SIDE EFFECTS: + * + * NOTES: Assume that there is enough space for the address part. + */ +caddr_t +clnp_insert_addr(bufp, srcp, dstp) +caddr_t bufp; /* address of where addr part goes */ +register struct iso_addr *srcp; /* ptr to src addr */ +register struct iso_addr *dstp; /* ptr to dst addr */ +{ + *bufp++ = dstp->isoa_len; + (void) bcopy((caddr_t)dstp, bufp, dstp->isoa_len); + bufp += dstp->isoa_len; + + *bufp++ = srcp->isoa_len; + (void) bcopy((caddr_t)srcp, bufp, srcp->isoa_len); + bufp += srcp->isoa_len; + + return bufp; +} + +#endif /* notdef */ + +/* + * FUNCTION: clnp_route + * + * PURPOSE: Route a clnp datagram to the first hop toward its + * destination. In many cases, the first hop will be + * the destination. The address of a route + * is specified. If a routing entry is present in + * that route, and it is still up to the same destination, + * then no further action is necessary. Otherwise, a + * new routing entry will be allocated. + * + * RETURNS: route found - 0 + * unix error code + * + * SIDE EFFECTS: + * + * NOTES: It is up to the caller to free the routing entry + * allocated in route. + */ +clnp_route(dst, ro, flags, first_hop, ifa) + struct iso_addr *dst; /* ptr to datagram destination */ + register struct route_iso *ro; /* existing route structure */ + int flags; /* flags for routing */ + struct sockaddr **first_hop; /* result: fill in with ptr to firsthop */ + struct iso_ifaddr **ifa; /* result: fill in with ptr to interface */ +{ + if (flags & SO_DONTROUTE) { + struct iso_ifaddr *ia; + + if (ro->ro_rt) { + RTFREE(ro->ro_rt); + ro->ro_rt = 0; + } + bzero((caddr_t)&ro->ro_dst, sizeof(ro->ro_dst)); + bcopy((caddr_t)dst, (caddr_t)&ro->ro_dst.siso_addr, + 1 + (unsigned)dst->isoa_len); + ro->ro_dst.siso_family = AF_ISO; + ro->ro_dst.siso_len = sizeof(ro->ro_dst); + ia = iso_localifa(&ro->ro_dst); + if (ia == 0) + return EADDRNOTAVAIL; + if (ifa) + *ifa = ia; + if (first_hop) + *first_hop = (struct sockaddr *)&ro->ro_dst; + return 0; + } + /* + * If there is a cached route, check that it is still up and to + * the same destination. If not, free it and try again. + */ + if (ro->ro_rt && ((ro->ro_rt->rt_flags & RTF_UP) == 0 || + (Bcmp(ro->ro_dst.siso_data, dst->isoa_genaddr, dst->isoa_len)))) { + IFDEBUG(D_ROUTE) + printf("clnp_route: freeing old route: ro->ro_rt 0x%x\n", + ro->ro_rt); + printf("clnp_route: old route refcnt: 0x%x\n", + ro->ro_rt->rt_refcnt); + ENDDEBUG + + /* free old route entry */ + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } else { + IFDEBUG(D_ROUTE) + printf("clnp_route: OK route exists\n"); + ENDDEBUG + } + + if (ro->ro_rt == 0) { + /* set up new route structure */ + bzero((caddr_t)&ro->ro_dst, sizeof(ro->ro_dst)); + ro->ro_dst.siso_len = sizeof(ro->ro_dst); + ro->ro_dst.siso_family = AF_ISO; + Bcopy(dst, &ro->ro_dst.siso_addr, 1 + dst->isoa_len); + /* allocate new route */ + IFDEBUG(D_ROUTE) + printf("clnp_route: allocating new route to %s\n", + clnp_iso_addrp(dst)); + ENDDEBUG + rtalloc((struct route *)ro); + } + if (ro->ro_rt == 0) + return(ENETUNREACH); /* rtalloc failed */ + ro->ro_rt->rt_use++; + if (ifa) + if ((*ifa = (struct iso_ifaddr *)ro->ro_rt->rt_ifa) == 0) + panic("clnp_route"); + if (first_hop) { + if (ro->ro_rt->rt_flags & RTF_GATEWAY) + *first_hop = ro->ro_rt->rt_gateway; + else + *first_hop = (struct sockaddr *)&ro->ro_dst; + } + return(0); +} + +/* + * FUNCTION: clnp_srcroute + * + * PURPOSE: Source route the datagram. If complete source + * routing is specified but not possible, then + * return an error. If src routing is terminated, then + * try routing on destination. + * Usage of first_hop, + * ifp, and error return is identical to clnp_route. + * + * RETURNS: 0 or unix error code + * + * SIDE EFFECTS: + * + * NOTES: Remember that option index pointers are really + * offsets from the beginning of the mbuf. + */ +clnp_srcroute(options, oidx, ro, first_hop, ifa, final_dst) +struct mbuf *options; /* ptr to options */ +struct clnp_optidx *oidx; /* index to options */ +struct route_iso *ro; /* route structure */ +struct sockaddr **first_hop; /* RETURN: fill in with ptr to firsthop */ +struct iso_ifaddr **ifa; /* RETURN: fill in with ptr to interface */ +struct iso_addr *final_dst; /* final destination */ +{ + struct iso_addr dst; /* first hop specified by src rt */ + int error = 0; /* return code */ + + /* + * Check if we have run out of routes + * If so, then try to route on destination. + */ + if CLNPSRCRT_TERM(oidx, options) { + dst.isoa_len = final_dst->isoa_len; + bcopy(final_dst->isoa_genaddr, dst.isoa_genaddr, dst.isoa_len); + } else { + /* + * setup dst based on src rt specified + */ + dst.isoa_len = CLNPSRCRT_CLEN(oidx, options); + bcopy(CLNPSRCRT_CADDR(oidx, options), dst.isoa_genaddr, dst.isoa_len); + } + + /* + * try to route it + */ + error = clnp_route(&dst, ro, 0, first_hop, ifa); + if (error != 0) + return error; + + /* + * If complete src rt, first hop must be equal to dst + */ + if ((CLNPSRCRT_TYPE(oidx, options) == CLNPOVAL_COMPRT) && + (!iso_addrmatch1(&(*(struct sockaddr_iso **)first_hop)->siso_addr,&dst))){ + IFDEBUG(D_OPTIONS) + printf("clnp_srcroute: complete src route failed\n"); + ENDDEBUG + return EHOSTUNREACH; /* RAH? would like ESRCRTFAILED */ + } + + return error; +} + +/* + * FUNCTION: clnp_echoreply + * + * PURPOSE: generate an echo reply packet and transmit + * + * RETURNS: result of clnp_output + * + * SIDE EFFECTS: + */ +clnp_echoreply(ec_m, ec_len, ec_src, ec_dst, ec_oidxp) +struct mbuf *ec_m; /* echo request */ +int ec_len; /* length of ec */ +struct sockaddr_iso *ec_src; /* src of ec */ +struct sockaddr_iso *ec_dst; /* destination of ec (i.e., us) */ +struct clnp_optidx *ec_oidxp; /* options index to ec packet */ +{ + struct isopcb isopcb; + int flags = CLNP_NOCACHE|CLNP_ECHOR; + int ret; + + /* fill in fake isopcb to pass to output function */ + bzero(&isopcb, sizeof(isopcb)); + isopcb.isop_laddr = ec_dst; + isopcb.isop_faddr = ec_src; + + /* forget copying the options for now. If implemented, need only + * copy record route option, but it must be reset to zero length */ + + ret = clnp_output(ec_m, &isopcb, ec_len, flags); + + IFDEBUG(D_OUTPUT) + printf("clnp_echoreply: output returns %d\n", ret); + ENDDEBUG + return ret; +} + +/* + * FUNCTION: clnp_badmtu + * + * PURPOSE: print notice of route with mtu not initialized. + * + * RETURNS: mtu of ifp. + * + * SIDE EFFECTS: prints notice, slows down system. + */ +clnp_badmtu(ifp, rt, line, file) +struct ifnet *ifp; /* outgoing interface */ +struct rtentry *rt; /* dst route */ +int line; /* where the dirty deed occured */ +char *file; /* where the dirty deed occured */ +{ + printf("sending on route 0x%x with no mtu, line %d of file %s\n", + rt, line, file); +#ifdef ARGO_DEBUG + printf("route dst is "); + dump_isoaddr(rt_key(rt)); +#endif + return ifp->if_mtu; +} + +/* + * FUNCTION: clnp_ypocb - backwards bcopy + * + * PURPOSE: bcopy starting at end of src rather than beginning. + * + * RETURNS: none + * + * SIDE EFFECTS: + * + * NOTES: No attempt has been made to make this efficient + */ +clnp_ypocb(from, to, len) +caddr_t from; /* src buffer */ +caddr_t to; /* dst buffer */ +u_int len; /* number of bytes */ +{ + while (len--) + *(to + len) = *(from + len); +} +#endif /* ISO */ diff --git a/bsd/netiso/clnp_timer.c b/bsd/netiso/clnp_timer.c new file mode 100644 index 000000000..5117dfd91 --- /dev/null +++ b/bsd/netiso/clnp_timer.c @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clnp_timer.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +extern struct clnp_fragl *clnp_frags; + +/* + * FUNCTION: clnp_freefrags + * + * PURPOSE: Free the resources associated with a fragment + * + * RETURNS: pointer to next fragment in list of fragments + * + * SIDE EFFECTS: + * + * NOTES: + * TODO: send ER back to source + */ +struct clnp_fragl * +clnp_freefrags(cfh) +register struct clnp_fragl *cfh; /* fragment header to delete */ +{ + struct clnp_fragl *next = cfh->cfl_next; + struct clnp_frag *cf; + + /* free any frags hanging around */ + cf = cfh->cfl_frags; + while (cf != NULL) { + struct clnp_frag *cf_next = cf->cfr_next; + INCSTAT(cns_fragdropped); + m_freem(cf->cfr_data); + cf = cf_next; + } + + /* free the copy of the header */ + INCSTAT(cns_fragdropped); + m_freem(cfh->cfl_orighdr); + + if (clnp_frags == cfh) { + clnp_frags = cfh->cfl_next; + } else { + struct clnp_fragl *scan; + + for (scan = clnp_frags; scan != NULL; scan = scan->cfl_next) { + if (scan->cfl_next == cfh) { + scan->cfl_next = cfh->cfl_next; + break; + } + } + } + + /* free the fragment header */ + m_freem(dtom(cfh)); + + return(next); +} + +/* + * FUNCTION: clnp_slowtimo + * + * PURPOSE: clnp timer processing; if the ttl expires on a + * packet on the reassembly queue, discard it. + * + * RETURNS: none + * + * SIDE EFFECTS: + * + * NOTES: + */ +clnp_slowtimo() +{ + register struct clnp_fragl *cfh = clnp_frags; + int s = splnet(); + + while (cfh != NULL) { + if (--cfh->cfl_ttl == 0) { + cfh = clnp_freefrags(cfh); + INCSTAT(cns_fragtimeout); + } else { + cfh = cfh->cfl_next; + } + } + splx(s); +} + +/* + * FUNCTION: clnp_drain + * + * PURPOSE: drain off all datagram fragments + * + * RETURNS: none + * + * SIDE EFFECTS: + * + * NOTES: + * TODO: should send back ER + */ +clnp_drain() +{ + register struct clnp_fragl *cfh = clnp_frags; + + while (cfh != NULL) + cfh = clnp_freefrags(cfh); +} diff --git a/bsd/netiso/cltp_usrreq.c b/bsd/netiso/cltp_usrreq.c new file mode 100644 index 000000000..3f3deff7b --- /dev/null +++ b/bsd/netiso/cltp_usrreq.c @@ -0,0 +1,426 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cltp_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +#ifndef CLTPOVAL_SRC /* XXX -- till files gets changed */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#endif + +/* + * CLTP protocol implementation. + * Per ISO 8602, December, 1987. + */ +cltp_init() +{ + + cltb.isop_next = cltb.isop_prev = &cltb; +} + +int cltp_cksum = 1; + + +/* ARGUSED */ +cltp_input(m0, srcsa, dstsa, cons_channel, output) + struct mbuf *m0; + struct sockaddr *srcsa, *dstsa; + u_int cons_channel; + int (*output)(); +{ + register struct isopcb *isop; + register struct mbuf *m = m0; + register u_char *up = mtod(m, u_char *); + register struct sockaddr_iso *src = (struct sockaddr_iso *)srcsa; + int len, hdrlen = *up + 1, dlen = 0; + u_char *uplim = up + hdrlen; + caddr_t dtsap; + + for (len = 0; m; m = m->m_next) + len += m->m_len; + up += 2; /* skip header */ + while (up < uplim) switch (*up) { /* process options */ + case CLTPOVAL_SRC: + src->siso_tlen = up[1]; + src->siso_len = up[1] + TSEL(src) - (caddr_t)src; + if (src->siso_len < sizeof(*src)) + src->siso_len = sizeof(*src); + else if (src->siso_len > sizeof(*src)) { + MGET(m, M_DONTWAIT, MT_SONAME); + if (m == 0) + goto bad; + m->m_len = src->siso_len; + src = mtod(m, struct sockaddr_iso *); + bcopy((caddr_t)srcsa, (caddr_t)src, srcsa->sa_len); + } + bcopy((caddr_t)up + 2, TSEL(src), up[1]); + up += 2 + src->siso_tlen; + continue; + + case CLTPOVAL_DST: + dtsap = 2 + (caddr_t)up; + dlen = up[1]; + up += 2 + dlen; + continue; + + case CLTPOVAL_CSM: + if (iso_check_csum(m0, len)) { + cltpstat.cltps_badsum++; + goto bad; + } + up += 4; + continue; + + default: + printf("clts: unknown option (%x)\n", up[0]); + cltpstat.cltps_hdrops++; + goto bad; + } + if (dlen == 0 || src->siso_tlen == 0) + goto bad; + for (isop = cltb.isop_next;; isop = isop->isop_next) { + if (isop == &cltb) { + cltpstat.cltps_noport++; + goto bad; + } + if (isop->isop_laddr && + bcmp(TSEL(isop->isop_laddr), dtsap, dlen) == 0) + break; + } + m = m0; + m->m_len -= hdrlen; + m->m_data += hdrlen; + if (sbappendaddr(&isop->isop_socket->so_rcv, (struct sockaddr *)src, + m, (struct mbuf *)0) == 0) + goto bad; + cltpstat.cltps_ipackets++; + sorwakeup(isop->isop_socket); + m0 = 0; +bad: + if (src != (struct sockaddr_iso *)srcsa) + m_freem(dtom(src)); + if (m0) + m_freem(m0); + return 0; +} + +/* + * Notify a cltp user of an asynchronous error; + * just wake up so that he can collect error status. + */ +cltp_notify(isop) + register struct isopcb *isop; +{ + + sorwakeup(isop->isop_socket); + sowwakeup(isop->isop_socket); +} + +cltp_ctlinput(cmd, sa) + int cmd; + struct sockaddr *sa; +{ + extern u_char inetctlerrmap[]; + struct sockaddr_iso *siso; + int iso_rtchange(); + + if ((unsigned)cmd > PRC_NCMDS) + return; + if (sa->sa_family != AF_ISO && sa->sa_family != AF_CCITT) + return; + siso = (struct sockaddr_iso *)sa; + if (siso == 0 || siso->siso_nlen == 0) + return; + + switch (cmd) { + case PRC_ROUTEDEAD: + case PRC_REDIRECT_NET: + case PRC_REDIRECT_HOST: + case PRC_REDIRECT_TOSNET: + case PRC_REDIRECT_TOSHOST: + iso_pcbnotify(&cltb, siso, + (int)inetctlerrmap[cmd], iso_rtchange); + break; + + default: + if (inetctlerrmap[cmd] == 0) + return; /* XXX */ + iso_pcbnotify(&cltb, siso, (int)inetctlerrmap[cmd], + cltp_notify); + } +} + +cltp_output(isop, m) + register struct isopcb *isop; + register struct mbuf *m; +{ + register int len; + register struct sockaddr_iso *siso; + int hdrlen, error = 0, docsum; + register u_char *up; + + if (isop->isop_laddr == 0 || isop->isop_faddr == 0) { + error = ENOTCONN; + goto bad; + } + /* + * Calculate data length and get a mbuf for CLTP header. + */ + hdrlen = 2 + 2 + isop->isop_laddr->siso_tlen + + 2 + isop->isop_faddr->siso_tlen; + if (docsum = /*isop->isop_flags & CLNP_NO_CKSUM*/ cltp_cksum) + hdrlen += 4; + M_PREPEND(m, hdrlen, M_WAIT); + len = m->m_pkthdr.len; + /* + * Fill in mbuf with extended CLTP header + */ + up = mtod(m, u_char *); + up[0] = hdrlen - 1; + up[1] = UD_TPDU_type; + up[2] = CLTPOVAL_SRC; + up[3] = (siso = isop->isop_laddr)->siso_tlen; + up += 4; + bcopy(TSEL(siso), (caddr_t)up, siso->siso_tlen); + up += siso->siso_tlen; + up[0] = CLTPOVAL_DST; + up[1] = (siso = isop->isop_faddr)->siso_tlen; + up += 2; + bcopy(TSEL(siso), (caddr_t)up, siso->siso_tlen); + /* + * Stuff checksum and output datagram. + */ + if (docsum) { + up += siso->siso_tlen; + up[0] = CLTPOVAL_CSM; + up[1] = 2; + iso_gen_csum(m, 2 + up - mtod(m, u_char *), len); + } + cltpstat.cltps_opackets++; + return (tpclnp_output(isop, m, len, !docsum)); +bad: + m_freem(m); + return (error); +} + +u_long cltp_sendspace = 9216; /* really max datagram size */ +u_long cltp_recvspace = 40 * (1024 + sizeof(struct sockaddr_iso)); + /* 40 1K datagrams */ + + +/*ARGSUSED*/ +cltp_usrreq(so, req, m, nam, control) + struct socket *so; + int req; + struct mbuf *m, *nam, *control; +{ + register struct isopcb *isop = sotoisopcb(so); + int s, error = 0; + + if (req == PRU_CONTROL) + return (iso_control(so, (int)m, (caddr_t)nam, + (struct ifnet *)control)); + if ((isop == NULL && req != PRU_ATTACH) || + (control && control->m_len)) { + error = EINVAL; + goto release; + } + switch (req) { + + case PRU_ATTACH: + if (isop != NULL) { + error = EINVAL; + break; + } + error = iso_pcballoc(so, &cltb); + if (error) + break; + error = soreserve(so, cltp_sendspace, cltp_recvspace); + if (error) + break; + break; + + case PRU_DETACH: + iso_pcbdetach(isop); + break; + + case PRU_BIND: + error = iso_pcbbind(isop, nam); + break; + + case PRU_LISTEN: + error = EOPNOTSUPP; + break; + + case PRU_CONNECT: + if (isop->isop_faddr) { + error = EISCONN; + break; + } + error = iso_pcbconnect(isop, nam); + if (error == 0) + soisconnected(so); + break; + + case PRU_CONNECT2: + error = EOPNOTSUPP; + break; + + case PRU_ACCEPT: + error = EOPNOTSUPP; + break; + + case PRU_DISCONNECT: + if (isop->isop_faddr == 0) { + error = ENOTCONN; + break; + } + iso_pcbdisconnect(isop); + so->so_state &= ~SS_ISCONNECTED; /* XXX */ + break; + + case PRU_SHUTDOWN: + socantsendmore(so); + break; + + case PRU_SEND: + if (nam) { + if (isop->isop_faddr) { + error = EISCONN; + break; + } + /* + * Must block input while temporarily connected. + */ + s = splnet(); + error = iso_pcbconnect(isop, nam); + if (error) { + splx(s); + break; + } + } else { + if (isop->isop_faddr == 0) { + error = ENOTCONN; + break; + } + } + error = cltp_output(isop, m); + m = 0; + if (nam) { + iso_pcbdisconnect(isop); + splx(s); + } + break; + + case PRU_ABORT: + soisdisconnected(so); + iso_pcbdetach(isop); + break; + + case PRU_SOCKADDR: + if (isop->isop_laddr) + bcopy((caddr_t)isop->isop_laddr, mtod(m, caddr_t), + nam->m_len = isop->isop_laddr->siso_len); + break; + + case PRU_PEERADDR: + if (isop->isop_faddr) + bcopy((caddr_t)isop->isop_faddr, mtod(m, caddr_t), + nam->m_len = isop->isop_faddr->siso_len); + break; + + case PRU_SENSE: + /* + * stat: don't bother with a blocksize. + */ + return (0); + + case PRU_SENDOOB: + case PRU_FASTTIMO: + case PRU_SLOWTIMO: + case PRU_PROTORCV: + case PRU_PROTOSEND: + error = EOPNOTSUPP; + break; + + case PRU_RCVD: + case PRU_RCVOOB: + return (EOPNOTSUPP); /* do not free mbuf's */ + + default: + panic("cltp_usrreq"); + } +release: + if (control != NULL) + m_freem(control); + if (m != NULL) + m_freem(m); + return (error); +} diff --git a/bsd/netiso/cltp_var.h b/bsd/netiso/cltp_var.h new file mode 100644 index 000000000..f0edca949 --- /dev/null +++ b/bsd/netiso/cltp_var.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cltp_var.h 8.1 (Berkeley) 6/10/93 + */ + +#define UD_TPDU_type 0x40 /* packet type */ + +#define CLTPOVAL_SRC 0xc1 /* Source TSAP -- required */ +#define CLTPOVAL_DST 0xc2 /* Destination TSAP -- required */ +#define CLTPOVAL_CSM 0xc3 /* Checksum parameter -- optional */ + +struct cltpstat { + int cltps_hdrops; + int cltps_badsum; + int cltps_badlen; + int cltps_noport; + int cltps_ipackets; + int cltps_opackets; +}; + +#ifdef KERNEL +struct isopcb cltb; +struct cltpstat cltpstat; +#endif diff --git a/bsd/netiso/cons.h b/bsd/netiso/cons.h new file mode 100644 index 000000000..cb984d302 --- /dev/null +++ b/bsd/netiso/cons.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cons.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * + * interface between TP and CONS + */ + +#define CONSOPT_X25CRUD 0x01 /* set x.25 call request user data */ + +struct dte_addr { + u_char dtea_addr[7]; + u_char dtea_niblen; +}; + +#ifdef KERNEL + +#define CONN_OPEN 0x33 +#define CONN_CONFIRM 0x30 +#define CONN_REFUSE 0x31 +#define CONN_CLOSE 0x32 + +#define CONS_IS_DGM 0x1 +#define CONS_NOT_DGM 0x0 + +#ifndef PRC_NCMDS +#include +#endif /* PRC_NCMDS */ + +#define PRC_CONS_SEND_DONE 2 /* something unused in protosw.h */ + +#endif /* KERNEL */ diff --git a/bsd/netiso/cons_pcb.h b/bsd/netiso/cons_pcb.h new file mode 100644 index 000000000..9a5c9c0e7 --- /dev/null +++ b/bsd/netiso/cons_pcb.h @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cons_pcb.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +/* + * protocol control block for the connection oriented network service + */ + +/* + * legit port #s for cons "transport" are 0..23 for su users only, and + * 1024..1099 for public users + */ +#define X25_SBSIZE 512 +#define X25_PORT_RESERVED 24 +#define X25_PORT_USERMAX 1099 +#define X25_FACIL_LEN_MAX 109 +#define X25_PARTIAL_PKT_LEN_MAX (MLEN - sizeof(struct cons_pcb)) + +#ifndef ARGO_DEBUG +#define X25_TTL 600 /* 5 min */ +#else /* ARGO_DEBUG */ +#define X25_TTL 120 /* 1 min */ +#endif /* ARGO_DEBUG */ + +struct cons_pcb { + struct isopcb _co_isopcb; +#define co_next _co_isopcb.isop_next +/* prev used for netstat only */ +#define co_prev _co_isopcb.isop_prev +#define co_head _co_isopcb.isop_head +#define co_laddr _co_isopcb.isop_laddr +#define co_faddr _co_isopcb.isop_faddr +#define co_lport _co_isopcb.isop_laddr.siso_tsuffix +#define co_fport _co_isopcb.isop_faddr.siso_tsuffix +#define co_route _co_isopcb.isop_route +#define co_socket _co_isopcb.isop_socket +#define co_chanmask _co_isopcb.isop_chanmask +#define co_negchanmask _co_isopcb.isop_negchanmask +#define co_x25crud _co_isopcb.isop_x25crud +#define co_x25crud_len _co_isopcb.isop_x25crud_len + u_short co_state; + u_char co_flags; + u_short co_ttl; /* time to live timer */ + u_short co_init_ttl; /* initial value of ttl */ + int co_channel; /* logical channel */ + struct ifnet * co_ifp; /* interface */ + struct protosw *co_proto; + + struct ifqueue co_pending; /* queue data to send when connection + completes*/ +#define MAX_DTE_LEN 0x7 /* 17 bcd digits */ + struct dte_addr co_peer_dte; + struct cons_pcb *co_myself; /* DEBUGGING AID */ +}; + +/* + * X.25 Packet types + */ +#define XPKT_DATA 1 +#define XPKT_INTERRUPT 2 +#define XPKT_FLOWCONTROL 3 /* not delivered? */ + +/* + * pcb xtates + */ + +#define CLOSED 0x0 +#define LISTENING 0x1 +#define CLOSING 0x2 +/* USABLE STATES MUST BE LAST */ +#define CONNECTING 0x3 +#define ACKWAIT 0x4 +#define OPEN 0x5 +#define MIN_USABLE_STATE CONNECTING + +#define cons_NSTATES 0x6 + + +/* type */ +#define CONSF_OCRE 0x40 /* created on OUTPUT */ +#define CONSF_ICRE 0x20 /* created on INPUT */ +#define CONSF_unused 0x10 /* not used */ +#define CONSF_unused2 0x08 /* not used */ +#define CONSF_DGM 0x04 /* for dgm use only */ +#define CONSF_XTS 0x02 /* for cons-as-transport-service */ +#define CONSF_LOOPBACK 0x01 /* loopback was on when connection commenced */ + +#define X_NOCHANNEL 0x80 + + +struct cons_stat { + u_int co_intr; /* input from eicon board */ + u_int co_restart; /* ecn_restart() request issued to board */ + u_int co_slowtimo; /* times slowtimo called */ + u_int co_timedout; /* connections closed by slowtimo */ + u_int co_ack; /* ECN_ACK indication came from eicon board */ + u_int co_receive; /* ECN_RECEIVE indication came from eicon board */ + u_int co_send; /* ECN_SEND request issued to board */ + u_int co_reset_in; /* ECN_RESET indication came from eicon board */ + u_int co_reset_out; /* ECN_RESET issued to the eicon board */ + u_int co_clear_in; /* ECN_CLEAR indication came from eicon board */ + u_int co_clear_out; /* ECN_CLEAR request issued to board */ + u_int co_refuse; /* ECN_REFUSE indication came from eicon board */ + u_int co_accept; /* ECN_ACCEPT indication came from eicon board */ + u_int co_connect; /* ECN_CONNECT indication came from eicon board */ + u_int co_call; /* ECN_CALL request issued to board */ + u_int co_Rdrops; /* bad pkt came from ll */ + u_int co_Xdrops; /* can't keep up */ + + u_int co_intrpt_pkts_in; /* interrupt packets in */ + u_int co_avg_qlen; + u_int co_avg_qdrop; + u_int co_active; + + u_int co_noresources; + u_int co_parse_facil_err; + u_int co_addr_proto_consist_err; + u_int co_no_copcb; +} cons_stat; + +u_char x25_error_stats[CONL_ERROR_MAX + 1]; + +struct ifqueue consintrq; + +/* reasons for clear are in a data mbuf chained to a clear ecn_request */ +struct e_clear_data { + u_char ecd_cause; + u_char ecd_diagnostic; +}; + +#ifdef KERNEL +#define IncStat(XYZ) cons_stat.XYZ++ +#endif /* KERNEL */ diff --git a/bsd/netiso/eonvar.h b/bsd/netiso/eonvar.h new file mode 100644 index 000000000..11a47e88b --- /dev/null +++ b/bsd/netiso/eonvar.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)eonvar.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#define EON_986_VERSION 0x3 +#define EON_VERSION 0x1 + +#define EON_CACHESIZE 30 + +#define E_FREE 1 +#define E_LINK 2 +#define E_ES 3 +#define E_IS 4 + + +/* + * this overlays a sockaddr_iso + */ + +struct sockaddr_eon { + u_char seon_len; /* Length */ + u_char seon_family; /* AF_ISO */ + u_char seon_status; /* overlays session suffixlen */ +#define EON_ESLINK_UP 0x1 +#define EON_ESLINK_DOWN 0x2 +#define EON_ISLINK_UP 0x10 +#define EON_ISLINK_DOWN 0x20 +/* no change is neither up or down */ + u_char seon_pad1; /* 0, overlays tsfxlen */ + u_char seon_adrlen; + u_char seon_afi; /* 47 */ + u_char seon_idi[2]; /* 0006 */ + u_char seon_vers; /* 03 */ + u_char seon_glbnum[2]; /* see RFC 1069 */ + u_char seon_RDN[2]; /* see RFC 1070 */ + u_char seon_pad2[3]; /* see RFC 1070 */ + u_char seon_LAREA[2]; /* see RFC 1070 */ + u_char seon_pad3[2]; /* see RFC 1070 */ + /* right now ip addr is aligned -- be careful -- + * future revisions may have it u_char[4] + */ + u_int seon_ipaddr; /* a.b.c.d */ + u_char seon_protoid; /* NSEL */ +}; + +#ifdef EON_TEMPLATE +struct sockaddr_eon eon_template = { + sizeof (eon_template), AF_ISO, 0, 0, 0x14, + 0x47, 0x0, 0x6, 0x3, 0 +}; +#endif + +#define DOWNBITS ( EON_ESLINK_DOWN | EON_ISLINK_DOWN ) +#define UPBITS ( EON_ESLINK_UP | EON_ISLINK_UP ) + +#define SIOCSEONCORE _IOWR('i',10, struct iso_ifreq) /* EON core member */ +#define SIOCGEONCORE _IOWR('i',11, struct iso_ifreq) /* EON core member */ + +struct eon_hdr { + u_char eonh_vers; /* value 1 */ + u_char eonh_class; /* address multicast class, below */ +#define EON_NORMAL_ADDR 0x0 +#define EON_MULTICAST_ES 0x1 +#define EON_MULTICAST_IS 0x2 +#define EON_BROADCAST 0x3 + u_short eonh_csum; /* osi checksum (choke)*/ +}; +struct eon_iphdr { + struct ip ei_ip; + struct eon_hdr ei_eh; +}; +#define EONIPLEN (sizeof(struct eon_hdr) + sizeof(struct ip)) + +/* stole these 2 fields of the flags for I-am-ES and I-am-IS */ +#define IFF_ES 0x400 +#define IFF_IS 0x800 + +struct eon_stat { + int es_in_multi_es; + int es_in_multi_is; + int es_in_broad; + int es_in_normal; + int es_out_multi_es; + int es_out_multi_is; + int es_out_broad; + int es_out_normal; + int es_ipout; + + int es_icmp[PRC_NCMDS]; + /* errors */ + int es_badcsum; + int es_badhdr; +} eonstat; + +#undef IncStat +#define IncStat(xxx) eonstat.xxx++ + +typedef struct qhdr { + struct qhdr *link, *rlink; +} *queue_t; + +struct eon_llinfo { + struct qhdr el_qhdr; /* keep all in a list */ + int el_flags; /* cache valid ? */ + int el_snpaoffset; /* IP address contained in dst nsap */ + struct rtentry *el_rt; /* back pointer to parent route */ + struct eon_iphdr el_ei; /* precomputed portion of hdr */ + struct route el_iproute; /* if direct route cache IP info */ + /* if gateway, cache secondary route */ +}; +#define el_iphdr el_ei.ei_ip +#define el_eonhdr el_ei.ei_eh diff --git a/bsd/netiso/esis.c b/bsd/netiso/esis.c new file mode 100644 index 000000000..a387c075e --- /dev/null +++ b/bsd/netiso/esis.c @@ -0,0 +1,1084 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)esis.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#if ISO + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Global variables to esis implementation + * + * esis_holding_time - the holding time (sec) parameter for outgoing pdus + * esis_config_time - the frequency (sec) that hellos are generated + * esis_esconfig_time - suggested es configuration time placed in the + * ish. + * + */ +struct rawcb esis_pcb; +void esis_config(), snpac_age(); +int esis_sendspace = 2048; +int esis_recvspace = 2048; +short esis_holding_time = ESIS_HT; +short esis_config_time = ESIS_CONFIG; +short esis_esconfig_time = ESIS_CONFIG; +extern int iso_systype; +struct sockaddr_dl esis_dl = { sizeof(esis_dl), AF_LINK }; +extern char all_es_snpa[], all_is_snpa[]; + +#define EXTEND_PACKET(m, mhdr, cp)\ + if (((m)->m_next = m_getclr(M_DONTWAIT, MT_HEADER)) == NULL) {\ + esis_stat.es_nomem++;\ + m_freem(mhdr);\ + return;\ + } else {\ + (m) = (m)->m_next;\ + (cp) = mtod((m), caddr_t);\ + } +/* + * FUNCTION: esis_init + * + * PURPOSE: Initialize the kernel portion of esis protocol + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +esis_init() +{ + extern struct clnl_protosw clnl_protox[256]; + int esis_input(), isis_input(); +#ifdef ISO_X25ESIS + int x25esis_input(); +#endif /* ISO_X25ESIS */ + + esis_pcb.rcb_next = esis_pcb.rcb_prev = &esis_pcb; + llinfo_llc.lc_next = llinfo_llc.lc_prev = &llinfo_llc; + + timeout(snpac_age, (caddr_t)0, hz); + timeout(esis_config, (caddr_t)0, hz); + + clnl_protox[ISO9542_ESIS].clnl_input = esis_input; + clnl_protox[ISO10589_ISIS].clnl_input = isis_input; +#ifdef ISO_X25ESIS + clnl_protox[ISO9542X25_ESIS].clnl_input = x25esis_input; +#endif /* ISO_X25ESIS */ +} + +/* + * FUNCTION: esis_usrreq + * + * PURPOSE: Handle user level esis requests + * + * RETURNS: 0 or appropriate errno + * + * SIDE EFFECTS: + * + */ +/*ARGSUSED*/ +esis_usrreq(so, req, m, nam, control) +struct socket *so; /* socket: used only to get to this code */ +int req; /* request */ +struct mbuf *m; /* data for request */ +struct mbuf *nam; /* optional name */ +struct mbuf *control; /* optional control */ +{ + struct rawcb *rp = sotorawcb(so); + int error = 0; + + if ((so->so_state & SS_PRIV) == 0) { + error = EACCES; + goto release; + } + if (rp == NULL && req != PRU_ATTACH) { + error = EINVAL; + goto release; + } + + switch (req) { + case PRU_ATTACH: + if (rp != NULL) { + error = EINVAL; + break; + } + MALLOC(rp, struct rawcb *, sizeof(*rp), M_PCB, M_WAITOK); + if (so->so_pcb = (caddr_t)rp) { + bzero(so->so_pcb, sizeof(*rp)); + insque(rp, &esis_pcb); + rp->rcb_socket = so; + error = soreserve(so, esis_sendspace, esis_recvspace); + } else + error = ENOBUFS; + break; + + case PRU_SEND: + if (nam == NULL) { + error = EINVAL; + break; + } + /* error checking here */ + error = isis_output(mtod(nam,struct sockaddr_dl *), m); + m = NULL; + break; + + case PRU_DETACH: + raw_detach(rp); + break; + + case PRU_SHUTDOWN: + socantsendmore(so); + break; + + case PRU_ABORT: + soisdisconnected(so); + raw_detach(rp); + break; + + case PRU_SENSE: + return (0); + + default: + return (EOPNOTSUPP); + } +release: + if (m != NULL) + m_freem(m); + + return (error); +} + +/* + * FUNCTION: esis_input + * + * PURPOSE: Process an incoming esis packet + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +esis_input(m0, shp) +struct mbuf *m0; /* ptr to first mbuf of pkt */ +struct snpa_hdr *shp; /* subnetwork header */ +{ + register struct esis_fixed *pdu = mtod(m0, struct esis_fixed *); + register int type; + + /* + * check checksum if necessary + */ + if (ESIS_CKSUM_REQUIRED(pdu) && iso_check_csum(m0, (int)pdu->esis_hdr_len)) { + esis_stat.es_badcsum++; + goto bad; + } + + /* check version */ + if (pdu->esis_vers != ESIS_VERSION) { + esis_stat.es_badvers++; + goto bad; + } + type = pdu->esis_type & 0x1f; + switch (type) { + case ESIS_ESH: + esis_eshinput(m0, shp); + break; + + case ESIS_ISH: + esis_ishinput(m0, shp); + break; + + case ESIS_RD: + esis_rdinput(m0, shp); + break; + + default: + esis_stat.es_badtype++; + } + +bad: + if (esis_pcb.rcb_next != &esis_pcb) + isis_input(m0, shp); + else + m_freem(m0); +} + +/* + * FUNCTION: esis_rdoutput + * + * PURPOSE: Transmit a redirect pdu + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: Assumes there is enough space for fixed part of header, + * DA, BSNPA and NET in first mbuf. + */ +esis_rdoutput(inbound_shp, inbound_m, inbound_oidx, rd_dstnsap, rt) +struct snpa_hdr *inbound_shp; /* snpa hdr from incoming packet */ +struct mbuf *inbound_m; /* incoming pkt itself */ +struct clnp_optidx *inbound_oidx; /* clnp options assoc with incoming pkt */ +struct iso_addr *rd_dstnsap; /* ultimate destination of pkt */ +struct rtentry *rt; /* snpa cache info regarding next hop of + pkt */ +{ + struct mbuf *m, *m0; + caddr_t cp; + struct esis_fixed *pdu; + int len, total_len = 0; + struct sockaddr_iso siso; + struct ifnet *ifp = inbound_shp->snh_ifp; + struct sockaddr_dl *sdl; + struct iso_addr *rd_gwnsap; + + if (rt->rt_flags & RTF_GATEWAY) { + rd_gwnsap = &((struct sockaddr_iso *)rt->rt_gateway)->siso_addr; + rt = rtalloc1(rt->rt_gateway, 0); + } else + rd_gwnsap = &((struct sockaddr_iso *)rt_key(rt))->siso_addr; + if (rt == 0 || (sdl = (struct sockaddr_dl *)rt->rt_gateway) == 0 || + sdl->sdl_family != AF_LINK) { + /* maybe we should have a function that you + could put in the iso_ifaddr structure + which could translate iso_addrs into snpa's + where there is a known mapping for that address type */ + esis_stat.es_badtype++; + return; + } + esis_stat.es_rdsent++; + IFDEBUG(D_ESISOUTPUT) + printf("esis_rdoutput: ifp x%x (%s%d), ht %d, m x%x, oidx x%x\n", + ifp, ifp->if_name, ifp->if_unit, esis_holding_time, inbound_m, + inbound_oidx); + printf("\tdestination: %s\n", clnp_iso_addrp(rd_dstnsap)); + printf("\tredirected toward:%s\n", clnp_iso_addrp(rd_gwnsap)); + ENDDEBUG + + if ((m0 = m = m_gethdr(M_DONTWAIT, MT_HEADER)) == NULL) { + esis_stat.es_nomem++; + return; + } + bzero(mtod(m, caddr_t), MHLEN); + + pdu = mtod(m, struct esis_fixed *); + cp = (caddr_t)(pdu + 1); /*pointer arith.; 1st byte after header */ + len = sizeof(struct esis_fixed); + + /* + * Build fixed part of header + */ + pdu->esis_proto_id = ISO9542_ESIS; + pdu->esis_vers = ESIS_VERSION; + pdu->esis_type = ESIS_RD; + HTOC(pdu->esis_ht_msb, pdu->esis_ht_lsb, esis_holding_time); + + /* Insert destination address */ + (void) esis_insert_addr(&cp, &len, rd_dstnsap, m, 0); + + /* Insert the snpa of better next hop */ + *cp++ = sdl->sdl_alen; + bcopy(LLADDR(sdl), cp, sdl->sdl_alen); + cp += sdl->sdl_alen; + len += (sdl->sdl_alen + 1); + + /* + * If the next hop is not the destination, then it ought to be + * an IS and it should be inserted next. Else, set the + * NETL to 0 + */ + /* PHASE2 use mask from ifp of outgoing interface */ + if (!iso_addrmatch1(rd_dstnsap, rd_gwnsap)) { + /* this should not happen: + if ((nhop_sc->sc_flags & SNPA_IS) == 0) { + printf("esis_rdoutput: next hop is not dst and not an IS\n"); + m_freem(m0); + return; + } */ + (void) esis_insert_addr(&cp, &len, rd_gwnsap, m, 0); + } else { + *cp++ = 0; /* NETL */ + len++; + } + m->m_len = len; + + /* + * PHASE2 + * If redirect is to an IS, add an address mask. The mask to be + * used should be the mask present in the routing entry used to + * forward the original data packet. + */ + + /* + * Copy Qos, priority, or security options present in original npdu + */ + if (inbound_oidx) { + /* THIS CODE IS CURRENTLY (mostly) UNTESTED */ + int optlen = 0; + if (inbound_oidx->cni_qos_formatp) + optlen += (inbound_oidx->cni_qos_len + 2); + if (inbound_oidx->cni_priorp) /* priority option is 1 byte long */ + optlen += 3; + if (inbound_oidx->cni_securep) + optlen += (inbound_oidx->cni_secure_len + 2); + if (M_TRAILINGSPACE(m) < optlen) { + EXTEND_PACKET(m, m0, cp); + m->m_len = 0; + /* assumes MLEN > optlen */ + } + /* assume MLEN-len > optlen */ + /* + * When copying options, copy from ptr - 2 in order to grab + * the option code and length + */ + if (inbound_oidx->cni_qos_formatp) { + bcopy(mtod(inbound_m, caddr_t) + inbound_oidx->cni_qos_formatp - 2, + cp, (unsigned)(inbound_oidx->cni_qos_len + 2)); + cp += inbound_oidx->cni_qos_len + 2; + } + if (inbound_oidx->cni_priorp) { + bcopy(mtod(inbound_m, caddr_t) + inbound_oidx->cni_priorp - 2, + cp, 3); + cp += 3; + } + if (inbound_oidx->cni_securep) { + bcopy(mtod(inbound_m, caddr_t) + inbound_oidx->cni_securep - 2, cp, + (unsigned)(inbound_oidx->cni_secure_len + 2)); + cp += inbound_oidx->cni_secure_len + 2; + } + m->m_len += optlen; + len += optlen; + } + + pdu->esis_hdr_len = m0->m_pkthdr.len = len; + iso_gen_csum(m0, ESIS_CKSUM_OFF, (int)pdu->esis_hdr_len); + + bzero((caddr_t)&siso, sizeof(siso)); + siso.siso_family = AF_ISO; + siso.siso_data[0] = AFI_SNA; + siso.siso_nlen = 6 + 1; /* should be taken from snpa_hdr */ + /* +1 is for AFI */ + bcopy(inbound_shp->snh_shost, siso.siso_data + 1, 6); + (ifp->if_output)(ifp, m0, (struct sockaddr *)&siso, 0); +} + +/* + * FUNCTION: esis_insert_addr + * + * PURPOSE: Insert an iso_addr into a buffer + * + * RETURNS: true if buffer was big enough, else false + * + * SIDE EFFECTS: Increment buf & len according to size of iso_addr + * + * NOTES: Plus 1 here is for length byte + */ +esis_insert_addr(buf, len, isoa, m, nsellen) +register caddr_t *buf; /* ptr to buffer to put address into */ +int *len; /* ptr to length of buffer so far */ +register struct iso_addr *isoa; /* ptr to address */ +register struct mbuf *m; /* determine if there remains space */ +int nsellen; +{ + register int newlen, result = 0; + + isoa->isoa_len -= nsellen; + newlen = isoa->isoa_len + 1; + if (newlen <= M_TRAILINGSPACE(m)) { + bcopy((caddr_t)isoa, *buf, newlen); + *len += newlen; + *buf += newlen; + m->m_len += newlen; + result = 1; + } + isoa->isoa_len += nsellen; + return (result); +} + +#define ESIS_EXTRACT_ADDR(d, b) { d = (struct iso_addr *)(b); b += (1 + *b); \ + if (b > buflim) {esis_stat.es_toosmall++; goto bad;}} +#define ESIS_NEXT_OPTION(b) { b += (2 + b[1]); \ + if (b > buflim) {esis_stat.es_toosmall++; goto bad;}} +int ESHonly = 0; +/* + +/* + * FUNCTION: esis_eshinput + * + * PURPOSE: Process an incoming ESH pdu + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +esis_eshinput(m, shp) +struct mbuf *m; /* esh pdu */ +struct snpa_hdr *shp; /* subnetwork header */ +{ + struct esis_fixed *pdu = mtod(m, struct esis_fixed *); + u_short ht; /* holding time */ + struct iso_addr *nsap; + int naddr; + u_char *buf = (u_char *)(pdu + 1); + u_char *buflim = pdu->esis_hdr_len + (u_char *)pdu; + int new_entry = 0; + + esis_stat.es_eshrcvd++; + + CTOH(pdu->esis_ht_msb, pdu->esis_ht_lsb, ht); + + naddr = *buf++; + if (buf >= buflim) + goto bad; + if (naddr == 1) { + ESIS_EXTRACT_ADDR(nsap, buf); + new_entry = snpac_add(shp->snh_ifp, + nsap, shp->snh_shost, SNPA_ES, ht, 0); + } else { + int nsellength = 0, nlen = 0; + { + /* See if we want to compress out multiple nsaps differing + only by nsel */ + register struct ifaddr *ifa = shp->snh_ifp->if_addrlist; + for (; ifa; ifa = ifa->ifa_next) + if (ifa->ifa_addr->sa_family == AF_ISO) { + nsellength = ((struct iso_ifaddr *)ifa)->ia_addr.siso_tlen; + break; + } + } + IFDEBUG(D_ESISINPUT) + printf("esis_eshinput: esh: ht %d, naddr %d nsellength %d\n", + ht, naddr, nsellength); + ENDDEBUG + while (naddr-- > 0) { + struct iso_addr *nsap2; u_char *buf2; + ESIS_EXTRACT_ADDR(nsap, buf); + /* see if there is at least one more nsap in ESH differing + only by nsel */ + if (nsellength != 0) for (buf2 = buf; buf2 < buflim;) { + ESIS_EXTRACT_ADDR(nsap2, buf2); + IFDEBUG(D_ESISINPUT) + printf("esis_eshinput: comparing %s ", + clnp_iso_addrp(nsap)); + printf("and %s\n", clnp_iso_addrp(nsap2)); + ENDDEBUG + if (Bcmp(nsap->isoa_genaddr, nsap2->isoa_genaddr, + nsap->isoa_len - nsellength) == 0) { + nlen = nsellength; + break; + } + } + new_entry |= snpac_add(shp->snh_ifp, + nsap, shp->snh_shost, SNPA_ES, ht, nlen); + nlen = 0; + } + } + IFDEBUG(D_ESISINPUT) + printf("esis_eshinput: nsap %s is %s\n", + clnp_iso_addrp(nsap), new_entry ? "new" : "old"); + ENDDEBUG + if (new_entry && (iso_systype & SNPA_IS)) + esis_shoutput(shp->snh_ifp, ESIS_ISH, esis_holding_time, + shp->snh_shost, 6, (struct iso_addr *)0); +bad: + return; +} + +/* + * FUNCTION: esis_ishinput + * + * PURPOSE: process an incoming ISH pdu + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: + */ +esis_ishinput(m, shp) +struct mbuf *m; /* esh pdu */ +struct snpa_hdr *shp; /* subnetwork header */ +{ + struct esis_fixed *pdu = mtod(m, struct esis_fixed *); + u_short ht, newct; /* holding time */ + struct iso_addr *nsap; /* Network Entity Title */ + register u_char *buf = (u_char *) (pdu + 1); + register u_char *buflim = pdu->esis_hdr_len + (u_char *)pdu; + int new_entry; + + esis_stat.es_ishrcvd++; + CTOH(pdu->esis_ht_msb, pdu->esis_ht_lsb, ht); + + IFDEBUG(D_ESISINPUT) + printf("esis_ishinput: ish: ht %d\n", ht); + ENDDEBUG + if (ESHonly) + goto bad; + + ESIS_EXTRACT_ADDR(nsap, buf); + + while (buf < buflim) { + switch (*buf) { + case ESISOVAL_ESCT: + if (iso_systype & SNPA_IS) + break; + if (buf[1] != 2) + goto bad; + CTOH(buf[2], buf[3], newct); + if (esis_config_time != newct) { + untimeout(esis_config,0); + esis_config_time = newct; + esis_config(); + } + break; + + default: + printf("Unknown ISH option: %x\n", *buf); + } + ESIS_NEXT_OPTION(buf); + } + new_entry = snpac_add(shp->snh_ifp, nsap, shp->snh_shost, SNPA_IS, ht, 0); + IFDEBUG(D_ESISINPUT) + printf("esis_ishinput: nsap %s is %s\n", + clnp_iso_addrp(nsap), new_entry ? "new" : "old"); + ENDDEBUG + + if (new_entry) + esis_shoutput(shp->snh_ifp, + iso_systype & SNPA_ES ? ESIS_ESH : ESIS_ISH, + esis_holding_time, shp->snh_shost, 6, (struct iso_addr *)0); +bad: + return; +} + +/* + * FUNCTION: esis_rdinput + * + * PURPOSE: Process an incoming RD pdu + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: + */ +esis_rdinput(m0, shp) +struct mbuf *m0; /* esh pdu */ +struct snpa_hdr *shp; /* subnetwork header */ +{ + struct esis_fixed *pdu = mtod(m0, struct esis_fixed *); + u_short ht; /* holding time */ + struct iso_addr *da, *net = 0, *netmask = 0, *snpamask = 0; + register struct iso_addr *bsnpa; + register u_char *buf = (u_char *)(pdu + 1); + register u_char *buflim = pdu->esis_hdr_len + (u_char *)pdu; + + esis_stat.es_rdrcvd++; + + /* intermediate systems ignore redirects */ + if (iso_systype & SNPA_IS) + return; + if (ESHonly) + return; + + CTOH(pdu->esis_ht_msb, pdu->esis_ht_lsb, ht); + if (buf >= buflim) + return; + + /* Extract DA */ + ESIS_EXTRACT_ADDR(da, buf); + + /* Extract better snpa */ + ESIS_EXTRACT_ADDR(bsnpa, buf); + + /* Extract NET if present */ + if (buf < buflim) { + if (*buf == 0) + buf++; /* no NET present, skip NETL anyway */ + else + ESIS_EXTRACT_ADDR(net, buf); + } + + /* process options */ + while (buf < buflim) { + switch (*buf) { + case ESISOVAL_SNPAMASK: + if (snpamask) /* duplicate */ + return; + snpamask = (struct iso_addr *)(buf + 1); + break; + + case ESISOVAL_NETMASK: + if (netmask) /* duplicate */ + return; + netmask = (struct iso_addr *)(buf + 1); + break; + + default: + printf("Unknown option in ESIS RD (0x%x)\n", buf[-1]); + } + ESIS_NEXT_OPTION(buf); + } + + IFDEBUG(D_ESISINPUT) + printf("esis_rdinput: rd: ht %d, da %s\n", ht, clnp_iso_addrp(da)); + if (net) + printf("\t: net %s\n", clnp_iso_addrp(net)); + ENDDEBUG + /* + * If netl is zero, then redirect is to an ES. We need to add an entry + * to the snpa cache for (destination, better snpa). + * If netl is not zero, then the redirect is to an IS. In this + * case, add an snpa cache entry for (net, better snpa). + * + * If the redirect is to an IS, add a route entry towards that + * IS. + */ + if (net == 0 || net->isoa_len == 0 || snpamask) { + /* redirect to an ES */ + snpac_add(shp->snh_ifp, da, + bsnpa->isoa_genaddr, SNPA_ES, ht, 0); + } else { + snpac_add(shp->snh_ifp, net, + bsnpa->isoa_genaddr, SNPA_IS, ht, 0); + snpac_addrt(shp->snh_ifp, da, net, netmask); + } +bad: ; /* Needed by ESIS_NEXT_OPTION */ +} + +/* + * FUNCTION: esis_config + * + * PURPOSE: Report configuration + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: Called every esis_config_time seconds + */ +void +esis_config() +{ + register struct ifnet *ifp; + + timeout(esis_config, (caddr_t)0, hz * esis_config_time); + + /* + * Report configuration for each interface that + * - is UP + * - has BROADCAST capability + * - has an ISO address + */ + /* Todo: a better way would be to construct the esh or ish + * once and copy it out for all devices, possibly calling + * a method in the iso_ifaddr structure to encapsulate and + * transmit it. This could work to advantage for non-broadcast media + */ + + for (ifp = ifnet; ifp; ifp = ifp->if_next) { + if ((ifp->if_flags & IFF_UP) && + (ifp->if_flags & IFF_BROADCAST)) { + /* search for an ISO address family */ + struct ifaddr *ia; + + for (ia = ifp->if_addrlist; ia; ia = ia->ifa_next) { + if (ia->ifa_addr->sa_family == AF_ISO) { + esis_shoutput(ifp, + iso_systype & SNPA_ES ? ESIS_ESH : ESIS_ISH, + esis_holding_time, + (caddr_t)(iso_systype & SNPA_ES ? all_is_snpa : + all_es_snpa), 6, (struct iso_addr *)0); + break; + } + } + } + } +} + +/* + * FUNCTION: esis_shoutput + * + * PURPOSE: Transmit an esh or ish pdu + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +esis_shoutput(ifp, type, ht, sn_addr, sn_len, isoa) +struct ifnet *ifp; +int type; +short ht; +caddr_t sn_addr; +int sn_len; +struct iso_addr *isoa; +{ + struct mbuf *m, *m0; + caddr_t cp, naddrp; + int naddr = 0; + struct esis_fixed *pdu; + struct iso_ifaddr *ia; + int len; + struct sockaddr_iso siso; + + if (type == ESIS_ESH) + esis_stat.es_eshsent++; + else if (type == ESIS_ISH) + esis_stat.es_ishsent++; + else { + printf("esis_shoutput: bad pdu type\n"); + return; + } + + IFDEBUG(D_ESISOUTPUT) + int i; + printf("esis_shoutput: ifp x%x (%s%d), %s, ht %d, to: [%d] ", + ifp, ifp->if_name, ifp->if_unit, type == ESIS_ESH ? "esh" : "ish", + ht, sn_len); + for (i=0; iesis_proto_id = ISO9542_ESIS; + pdu->esis_vers = ESIS_VERSION; + pdu->esis_type = type; + HTOC(pdu->esis_ht_msb, pdu->esis_ht_lsb, ht); + + if (type == ESIS_ESH) { + cp++; + len++; + } + + m->m_len = len; + if (isoa) { + /* + * Here we are responding to a clnp packet sent to an NSAP + * that is ours which was sent to the MAC addr all_es's. + * It is possible that we did not specifically advertise this + * NSAP, even though it is ours, so we will respond + * directly to the sender that we are here. If we do have + * multiple NSEL's we'll tack them on so he can compress them out. + */ + (void) esis_insert_addr(&cp, &len, isoa, m, 0); + naddr = 1; + } + for (ia = iso_ifaddr; ia; ia = ia->ia_next) { + int nsellen = (type == ESIS_ISH ? ia->ia_addr.siso_tlen : 0); + int n = ia->ia_addr.siso_nlen; + register struct iso_ifaddr *ia2; + + if (type == ESIS_ISH && naddr > 0) + break; + for (ia2 = iso_ifaddr; ia2 != ia; ia2 = ia2->ia_next) + if (Bcmp(ia->ia_addr.siso_data, ia2->ia_addr.siso_data, n) == 0) + break; + if (ia2 != ia) + continue; /* Means we have previously copied this nsap */ + if (isoa && Bcmp(ia->ia_addr.siso_data, isoa->isoa_genaddr, n) == 0) { + isoa = 0; + continue; /* Ditto */ + } + IFDEBUG(D_ESISOUTPUT) + printf("esis_shoutput: adding NSAP %s\n", + clnp_iso_addrp(&ia->ia_addr.siso_addr)); + ENDDEBUG + if (!esis_insert_addr(&cp, &len, + &ia->ia_addr.siso_addr, m, nsellen)) { + EXTEND_PACKET(m, m0, cp); + (void) esis_insert_addr(&cp, &len, &ia->ia_addr.siso_addr, m, + nsellen); + } + naddr++; + } + + if (type == ESIS_ESH) + *naddrp = naddr; + else { + /* add suggested es config timer option to ISH */ + if (M_TRAILINGSPACE(m) < 4) { + printf("esis_shoutput: extending packet\n"); + EXTEND_PACKET(m, m0, cp); + } + *cp++ = ESISOVAL_ESCT; + *cp++ = 2; + HTOC(*cp, *(cp+1), esis_esconfig_time); + len += 4; + m->m_len += 4; + IFDEBUG(D_ESISOUTPUT) + printf("m0 0x%x, m 0x%x, data 0x%x, len %d, cp 0x%x\n", + m0, m, m->m_data, m->m_len, cp); + ENDDEBUG + } + + m0->m_pkthdr.len = len; + pdu->esis_hdr_len = len; + iso_gen_csum(m0, ESIS_CKSUM_OFF, (int)pdu->esis_hdr_len); + + bzero((caddr_t)&siso, sizeof(siso)); + siso.siso_family = AF_ISO; + siso.siso_data[0] = AFI_SNA; + siso.siso_nlen = sn_len + 1; + bcopy(sn_addr, siso.siso_data + 1, (unsigned)sn_len); + (ifp->if_output)(ifp, m0, (struct sockaddr *)&siso, 0); +} + +/* + * FUNCTION: isis_input + * + * PURPOSE: Process an incoming isis packet + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +isis_input(m0, shp) +struct mbuf *m0; /* ptr to first mbuf of pkt */ +struct snpa_hdr *shp; /* subnetwork header */ +{ + register int type; + register struct rawcb *rp, *first_rp = 0; + struct ifnet *ifp = shp->snh_ifp; + char workbuf[16]; + struct mbuf *mm; + + IFDEBUG(D_ISISINPUT) + int i; + + printf("isis_input: pkt on ifp x%x (%s%d): from:", ifp, + ifp->if_name, ifp->if_unit); + for (i=0; i<6; i++) + printf("%x%c", shp->snh_shost[i]&0xff, (i<5) ? ':' : ' '); + printf(" to:"); + for (i=0; i<6; i++) + printf("%x%c", shp->snh_dhost[i]&0xff, (i<5) ? ':' : ' '); + printf("\n"); + ENDDEBUG + esis_dl.sdl_alen = ifp->if_addrlen; + esis_dl.sdl_index = ifp->if_index; + bcopy(shp->snh_shost, (caddr_t)esis_dl.sdl_data, esis_dl.sdl_alen); + for (rp = esis_pcb.rcb_next; rp != &esis_pcb; rp = rp->rcb_next) { + if (first_rp == 0) { + first_rp = rp; + continue; + } + if (mm = m_copy(m0, 0, M_COPYALL)) { /*can't block at interrupt level */ + if (sbappendaddr(&rp->rcb_socket->so_rcv, + &esis_dl, mm, (struct mbuf *)0) != 0) { + sorwakeup(rp->rcb_socket); + } else { + IFDEBUG(D_ISISINPUT) + printf("Error in sbappenaddr, mm = 0x%x\n", mm); + ENDDEBUG + m_freem(mm); + } + } + } + if (first_rp && sbappendaddr(&first_rp->rcb_socket->so_rcv, + &esis_dl, m0, (struct mbuf *)0) != 0) { + sorwakeup(first_rp->rcb_socket); + return; + } + m_freem(m0); +} + +isis_output(sdl, m) +register struct sockaddr_dl *sdl; +struct mbuf *m; +{ + register struct ifnet *ifp; + struct ifaddr *ifa, *ifa_ifwithnet(); + struct sockaddr_iso siso; + int error = 0; + unsigned sn_len; + + ifa = ifa_ifwithnet((struct sockaddr *)sdl); /* get ifp from sdl */ + if (ifa == 0) { + IFDEBUG(D_ISISOUTPUT) + printf("isis_output: interface not found\n"); + ENDDEBUG + error = EINVAL; + goto release; + } + ifp = ifa->ifa_ifp; + sn_len = sdl->sdl_alen; + IFDEBUG(D_ISISOUTPUT) + u_char *cp = (u_char *)LLADDR(sdl), *cplim = cp + sn_len; + printf("isis_output: ifp 0x%x (%s%d), to: ", + ifp, ifp->if_name, ifp->if_unit); + while (cp < cplim) { + printf("%x", *cp++); + printf("%c", (cp < cplim) ? ':' : ' '); + } + printf("\n"); + ENDDEBUG + bzero((caddr_t)&siso, sizeof(siso)); + siso.siso_family = AF_ISO; /* This convention may be useful for X.25 */ + siso.siso_data[0] = AFI_SNA; + siso.siso_nlen = sn_len + 1; + bcopy(LLADDR(sdl), siso.siso_data + 1, sn_len); + error = (ifp->if_output)(ifp, m, (struct sockaddr *)&siso, 0); + if (error) { + IFDEBUG(D_ISISOUTPUT) + printf("isis_output: error from ether_output is %d\n", error); + ENDDEBUG + } + return (error); + +release: + if (m != NULL) + m_freem(m); + return(error); +} + + +/* + * FUNCTION: esis_ctlinput + * + * PURPOSE: Handle the PRC_IFDOWN transition + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: Calls snpac_flush for interface specified. + * The loop through iso_ifaddr is stupid because + * back in if_down, we knew the ifp... + */ +esis_ctlinput(req, siso) +int req; /* request: we handle only PRC_IFDOWN */ +struct sockaddr_iso *siso; /* address of ifp */ +{ + register struct iso_ifaddr *ia; /* scan through interface addresses */ + + if (req == PRC_IFDOWN) + for (ia = iso_ifaddr; ia; ia = ia->ia_next) { + if (iso_addrmatch(IA_SIS(ia), siso)) + snpac_flushifp(ia->ia_ifp); + } +} + +#endif /* ISO */ diff --git a/bsd/netiso/esis.h b/bsd/netiso/esis.h new file mode 100644 index 000000000..f9a5ad998 --- /dev/null +++ b/bsd/netiso/esis.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)esis.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#ifndef BYTE_ORDER +/* + * Definitions for byte order, + * according to byte significance from low address to high. + */ +#define LITTLE_ENDIAN 1234 /* least-significant byte first (vax) */ +#define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ +#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp) */ + +#ifdef vax +#define BYTE_ORDER LITTLE_ENDIAN +#else +#define BYTE_ORDER BIG_ENDIAN /* mc68000, tahoe, most others */ +#endif +#endif /* BYTE_ORDER */ + +#define SNPAC_AGE 60 /* seconds */ +#define ESIS_CONFIG 60 /* seconds */ +#define ESIS_HT (ESIS_CONFIG * 2) + +/* + * Fixed part of an ESIS header + */ +struct esis_fixed { + u_char esis_proto_id; /* network layer protocol identifier */ + u_char esis_hdr_len; /* length indicator (octets) */ + u_char esis_vers; /* version/protocol identifier extension */ + u_char esis_res1; /* reserved */ + u_char esis_type; /* type code */ +/* technically, type should be &='d 0x1f */ +#define ESIS_ESH 0x02 /* End System Hello */ +#define ESIS_ISH 0x04 /* Intermediate System Hello */ +#define ESIS_RD 0x06 /* Redirect */ + u_char esis_ht_msb; /* holding time (seconds) high byte */ + u_char esis_ht_lsb; /* holding time (seconds) low byte */ + u_char esis_cksum_msb; /* checksum high byte */ + u_char esis_cksum_lsb; /* checksum low byte */ +}; +/* + * Values for ESIS datagram options + */ +#define ESISOVAL_NETMASK 0xe1 /* address mask option, RD PDU only */ +#define ESISOVAL_SNPAMASK 0xe2 /* snpa mask option, RD PDU only */ +#define ESISOVAL_ESCT 0xc6 /* end system conf. timer, ISH PDU only */ + + +#define ESIS_CKSUM_OFF 0x07 +#define ESIS_CKSUM_REQUIRED(pdu)\ + ((pdu->esis_cksum_msb != 0) || (pdu->esis_cksum_lsb != 0)) + +#define ESIS_VERSION 1 + +struct esis_stat { + u_short es_nomem; /* insufficient memory to send hello */ + u_short es_badcsum; /* incorrect checksum */ + u_short es_badvers; /* incorrect version number */ + u_short es_badtype; /* unknown pdu type field */ + u_short es_toosmall; /* packet too small */ + u_short es_eshsent; /* ESH sent */ + u_short es_eshrcvd; /* ESH rcvd */ + u_short es_ishsent; /* ISH sent */ + u_short es_ishrcvd; /* ISH rcvd */ + u_short es_rdsent; /* RD sent */ + u_short es_rdrcvd; /* RD rcvd */ +}; + +#ifdef KERNEL +struct esis_stat esis_stat; +#endif /* KERNEL */ diff --git a/bsd/netiso/idrp_usrreq.c b/bsd/netiso/idrp_usrreq.c new file mode 100644 index 000000000..1f57da43f --- /dev/null +++ b/bsd/netiso/idrp_usrreq.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)idrp_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +int idrp_input(); +struct isopcb idrp_isop; +static struct sockaddr_iso idrp_addrs[2] = +{ { sizeof(idrp_addrs), AF_ISO, }, { sizeof(idrp_addrs[1]), AF_ISO, } }; +/* + * IDRP initialization + */ +idrp_init() +{ + extern struct clnl_protosw clnl_protox[256]; + + idrp_isop.isop_next = idrp_isop.isop_prev = &idrp_isop; + idrp_isop.isop_faddr = &idrp_isop.isop_sfaddr; + idrp_isop.isop_laddr = &idrp_isop.isop_sladdr; + idrp_isop.isop_sladdr = idrp_addrs[1]; + idrp_isop.isop_sfaddr = idrp_addrs[1]; + clnl_protox[ISO10747_IDRP].clnl_input = idrp_input; +} + +/* + * CALLED FROM: + * tpclnp_input(). + * FUNCTION and ARGUMENTS: + * Take a packet (m) from clnp, strip off the clnp header + * and mke suitable for the idrp socket. + * No return value. + */ +idrp_input(m, src, dst) + register struct mbuf *m; + struct sockaddr_iso *src, *dst; +{ + if (idrp_isop.isop_socket == 0) { + bad: m_freem(m); + return 0; + } + bzero(idrp_addrs[0].siso_data, sizeof(idrp_addrs[0].siso_data)); + bcopy((caddr_t)&(src->siso_addr), (caddr_t)&idrp_addrs[0].siso_addr, + 1 + src->siso_nlen); + bzero(idrp_addrs[1].siso_data, sizeof(idrp_addrs[1].siso_data)); + bcopy((caddr_t)&(dst->siso_addr), (caddr_t)&idrp_addrs[1].siso_addr, + 1 + dst->siso_nlen); + if (sbappendaddr(&idrp_isop.isop_socket->so_rcv, + (struct sockaddr *)idrp_addrs, m, (struct mbuf *)0) == 0) + goto bad; + sorwakeup(idrp_isop.isop_socket); + return 0; +} + +idrp_output(m, addr) + struct mbuf *m, *addr; +{ + register struct sockaddr_iso *siso = mtod(addr, struct sockaddr_iso *); + int s = splnet(), i; + + bcopy((caddr_t)&(siso->siso_addr), + (caddr_t)&idrp_isop.isop_sfaddr.siso_addr, 1 + siso->siso_nlen); + siso++; + bcopy((caddr_t)&(siso->siso_addr), + (caddr_t)&idrp_isop.isop_sladdr.siso_addr, 1 + siso->siso_nlen); + i = clnp_output(m, idrp_isop, m->m_pkthdr.len, 0); + splx(s); + return (i); +} + +u_long idrp_sendspace = 3072; /* really max datagram size */ +u_long idrp_recvspace = 40 * 1024; /* 40 1K datagrams */ + +/*ARGSUSED*/ +idrp_usrreq(so, req, m, addr, control) + struct socket *so; + int req; + struct mbuf *m, *addr, *control; +{ + int error = 0; + + /* Note: need to block idrp_input while changing + * the udp pcb queue and/or pcb addresses. + */ + switch (req) { + + case PRU_ATTACH: + if (idrp_isop.isop_socket != NULL) { + error = ENXIO; + break; + } + idrp_isop.isop_socket = so; + error = soreserve(so, idrp_sendspace, idrp_recvspace); + break; + + case PRU_SHUTDOWN: + socantsendmore(so); + break; + + case PRU_SEND: + return (idrp_output(m, addr)); + + case PRU_ABORT: + soisdisconnected(so); + case PRU_DETACH: + idrp_isop.isop_socket = 0; + break; + + + case PRU_SENSE: + /* + * stat: don't bother with a blocksize. + */ + return (0); + + default: + return (EOPNOTSUPP); /* do not free mbuf's */ + } + +release: + if (control) { + printf("idrp control data unexpectedly retained\n"); + m_freem(control); + } + if (m) + m_freem(m); + return (error); +} diff --git a/bsd/netiso/if_cons.c b/bsd/netiso/if_cons.c new file mode 100644 index 000000000..48c289107 --- /dev/null +++ b/bsd/netiso/if_cons.c @@ -0,0 +1,978 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_cons.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * cons.c - Connection Oriented Network Service: + * including support for a) user transport-level service, + * b) COSNS below CLNP, and c) CONS below TP. + */ + +#if TPCONS +#ifdef KERNEL +#ifdef ARGO_DEBUG +#define Static +unsigned LAST_CALL_PCB; +#else /* ARGO_DEBUG */ +#define Static static +#endif /* ARGO_DEBUG */ + +#ifndef SOCK_STREAM +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#endif + +#ifdef ARGO_DEBUG +#define MT_XCONN 0x50 +#define MT_XCLOSE 0x51 +#define MT_XCONFIRM 0x52 +#define MT_XDATA 0x53 +#define MT_XHEADER 0x54 +#else +#define MT_XCONN MT_DATA +#define MT_XCLOSE MT_DATA +#define MT_XCONFIRM MT_DATA +#define MT_XDATA MT_DATA +#define MT_XHEADER MT_HEADER +#endif /* ARGO_DEBUG */ + +#define DONTCLEAR -1 + +/********************************************************************* + * cons.c - CONS interface to the x.25 layer + * + * TODO: figure out what resources we might run out of besides mbufs. + * If we run out of any of them (including mbufs) close and recycle + * lru x% of the connections, for some parameter x. + * + * There are 2 interfaces from above: + * 1) from TP0: + * cons CO network service + * TP associates a transport connection with a network connection. + * cons_output( isop, m, len, isdgm==0 ) + * co_flags == 0 + * 2) from TP4: + * It's a datagram service, like clnp is. - even though it calls + * cons_output( isop, m, len, isdgm==1 ) + * it eventually goes through + * cosns_output(ifp, m, dst). + * TP4 permits multiplexing (reuse, possibly simultaneously) of the + * network connections. + * This means that many sockets (many tpcbs) may be associated with + * this pklcd, hence cannot have a back ptr from pklcd to a tpcb. + * co_flags & CONSF_DGM + * co_socket is null since there may be many sockets that use this pklcd. + * +NOTE: + streams would really be nice. sigh. +NOTE: + PVCs could be handled by config-ing a cons with an address and with the + IFF_POINTTOPOINT flag on. This code would then have to skip the + connection setup stuff for pt-to-pt links. + + + *********************************************************************/ + + +#define CONS_IFQMAXLEN 5 + + +/* protosw pointers for getting to higher layer */ +Static struct protosw *CLNP_proto; +Static struct protosw *TP_proto; +Static struct protosw *X25_proto; +Static int issue_clear_req(); + +#ifndef PHASEONE +extern struct ifaddr *ifa_ifwithnet(); +#endif /* PHASEONE */ + +extern struct ifaddr *ifa_ifwithaddr(); + +extern struct isopcb tp_isopcb; /* chain of all TP pcbs */ + + +Static int parse_facil(), NSAPtoDTE(), make_partial_x25_packet(); +Static int FACILtoNSAP(), DTEtoNSAP(); +Static struct pklcd *cons_chan_to_pcb(); + +#define HIGH_NIBBLE 1 +#define LOW_NIBBLE 0 + +/* + * NAME: nibble_copy() + * FUNCTION and ARGUMENTS: + * copies (len) nibbles from (src_octet), high or low nibble + * to (dst_octet), high or low nibble, + * src_nibble & dst_nibble should be: + * HIGH_NIBBLE (1) if leftmost 4 bits/ most significant nibble + * LOW_NIBBLE (0) if rightmost 4 bits/ least significant nibble + * RETURNS: VOID + */ +void +nibble_copy(src_octet, src_nibble, dst_octet, dst_nibble, len) + register char *src_octet; + register char *dst_octet; + register unsigned src_nibble; + register unsigned dst_nibble; + int len; +{ + + register i; + register unsigned dshift, sshift; + + IFDEBUG(D_CADDR) + printf("nibble_copy ( 0x%x, 0x%x, 0x%x, 0x%x 0x%x)\n", + src_octet, src_nibble, dst_octet, dst_nibble, len); + ENDDEBUG +#define SHIFT 0x4 + + dshift = dst_nibble << 2; + sshift = src_nibble << 2; + + for (i=0; i> sshift))<< dshift; + + dshift ^= SHIFT; + sshift ^= SHIFT; + src_nibble = 1-src_nibble; + dst_nibble = 1-dst_nibble; + src_octet += src_nibble; + dst_octet += dst_nibble; + } + IFDEBUG(D_CADDR) + printf("nibble_copy DONE\n"); + ENDDEBUG +} + +/* + * NAME: nibble_match() + * FUNCTION and ARGUMENTS: + * compares src_octet/src_nibble and dst_octet/dst_nibble for len nibbles. + * RETURNS: 0 if they differ, 1 if they are the same. + */ +int +nibble_match( src_octet, src_nibble, dst_octet, dst_nibble, len) + register char *src_octet; + register char *dst_octet; + register unsigned src_nibble; + register unsigned dst_nibble; + int len; +{ + + register i; + register unsigned dshift, sshift; + u_char nibble_a, nibble_b; + + IFDEBUG(D_CADDR) + printf("nibble_match ( 0x%x, 0x%x, 0x%x, 0x%x 0x%x)\n", + src_octet, src_nibble, dst_octet, dst_nibble, len); + ENDDEBUG +#define SHIFT 0x4 + + dshift = dst_nibble << 2; + sshift = src_nibble << 2; + + for (i=0; i>dshift) & 0xf; + nibble_a = ( 0xf & (*src_octet >> sshift)); + if (nibble_b != nibble_a) + return 0; + + dshift ^= SHIFT; + sshift ^= SHIFT; + src_nibble = 1-src_nibble; + dst_nibble = 1-dst_nibble; + src_octet += src_nibble; + dst_octet += dst_nibble; + } + IFDEBUG(D_CADDR) + printf("nibble_match DONE\n"); + ENDDEBUG + return 1; +} + +/* + **************************** NET PROTOCOL cons *************************** + */ +/* + * NAME: cons_init() + * CALLED FROM: + * autoconf + * FUNCTION: + * initialize the protocol + */ +cons_init() +{ + int tp_incoming(), clnp_incoming(); + + + CLNP_proto = pffindproto(AF_ISO, ISOPROTO_CLNP, SOCK_DGRAM); + X25_proto = pffindproto(AF_ISO, ISOPROTO_X25, SOCK_STREAM); + TP_proto = pffindproto(AF_ISO, ISOPROTO_TP0, SOCK_SEQPACKET); + IFDEBUG(D_CCONS) + printf("cons_init end : cnlp_proto 0x%x cons proto 0x%x tp proto 0x%x\n", + CLNP_proto, X25_proto, TP_proto); + ENDDEBUG +#ifdef notdef + pk_protolisten(0x81, 0, clnp_incoming); + pk_protolisten(0x82, 0, esis_incoming); + pk_protolisten(0x84, 0, tp8878_A_incoming); + pk_protolisten(0, 0, tp_incoming); +#endif +} + +tp_incoming(lcp, m) +struct pklcd *lcp; +register struct mbuf *m; +{ + register struct isopcb *isop; + int cons_tpinput(); + + if (iso_pcballoc((struct socket *)0, &tp_isopcb)) { + pk_close(lcp); + return; + } + isop = tp_isopcb.isop_next; + lcp->lcd_upper = cons_tpinput; + lcp->lcd_upnext = (caddr_t)isop; + lcp->lcd_send(lcp); /* Confirms call */ + isop->isop_chan = (caddr_t)lcp; + isop->isop_laddr = &isop->isop_sladdr; + isop->isop_faddr = &isop->isop_sfaddr; + DTEtoNSAP(isop->isop_laddr, &lcp->lcd_laddr); + DTEtoNSAP(isop->isop_faddr, &lcp->lcd_faddr); + parse_facil(lcp, isop, &(mtod(m, struct x25_packet *)->packet_data), + m->m_pkthdr.len - PKHEADERLN); +} + +cons_tpinput(lcp, m0) +struct mbuf *m0; +struct pklcd *lcp; +{ + register struct isopcb *isop = (struct isopcb *)lcp->lcd_upnext; + register struct x25_packet *xp; + int cmd, ptype = CLEAR; + + if (isop == 0) + return; + if (m0 == 0) + goto dead; + switch(m0->m_type) { + case MT_DATA: + case MT_OOBDATA: + tpcons_input(m0, isop->isop_faddr, isop->isop_laddr, (caddr_t)lcp); + return; + + case MT_CONTROL: + switch (ptype = pk_decode(mtod(m0, struct x25_packet *))) { + + case RR: + cmd = PRC_CONS_SEND_DONE; + break; + + case CALL_ACCEPTED: + if (lcp->lcd_sb.sb_mb) + lcp->lcd_send(lcp); /* XXX - fix this */ + /*FALLTHROUGH*/ + default: + return; + + dead: + case CLEAR: + case CLEAR_CONF: + lcp->lcd_upper = 0; + lcp->lcd_upnext = 0; + isop->isop_chan = 0; + case RESET: + cmd = PRC_ROUTEDEAD; + } + tpcons_ctlinput(cmd, isop->isop_faddr, isop); + if (cmd = PRC_ROUTEDEAD && isop->isop_refcnt == 0) + iso_pcbdetach(isop); + } +} + +/* + * NAME: cons_connect() + * CALLED FROM: + * tpcons_pcbconnect() when opening a new connection. + * FUNCTION anD ARGUMENTS: + * Figures out which device to use, finding a route if one doesn't + * already exist. + * RETURN VALUE: + * returns E* + */ +cons_connect(isop) + register struct isopcb *isop; +{ + register struct pklcd *lcp = (struct pklcd *)isop->isop_chan; + register struct mbuf *m; + struct ifaddr *ifa; + int error; + + IFDEBUG(D_CCONN) + printf("cons_connect(0x%x): ", isop); + dump_isoaddr(isop->isop_faddr); + printf("myaddr: "); + dump_isoaddr(isop->isop_laddr); + printf("\n" ); + ENDDEBUG + NSAPtoDTE(isop->isop_faddr, &lcp->lcd_faddr); + lcp->lcd_upper = cons_tpinput; + lcp->lcd_upnext = (caddr_t)isop; + IFDEBUG(D_CCONN) + printf( + "calling make_partial_x25_packet( 0x%x, 0x%x, 0x%x)\n", + &lcp->lcd_faddr, &lcp->lcd_laddr, + isop->isop_socket->so_proto->pr_protocol); + ENDDEBUG + if ((error = make_partial_x25_packet(isop, lcp, m)) == 0) + error = pk_connect(lcp, &lcp->lcd_faddr); + return error; +} + +/* + **************************** DEVICE cons *************************** + */ + + +/* + * NAME: cons_ctlinput() + * CALLED FROM: + * lower layer when ECN_CLEAR occurs : this routine is here + * for consistency - cons subnet service calls its higher layer + * through the protosw entry. + * FUNCTION & ARGUMENTS: + * cmd is a PRC_* command, list found in ../sys/protosw.h + * copcb is the obvious. + * This serves the higher-layer cons service. + * NOTE: this takes 3rd arg. because cons uses it to inform itself + * of things (timeouts, etc) but has a pcb instead of an address. + */ +cons_ctlinput(cmd, sa, copcb) + int cmd; + struct sockaddr *sa; + register struct pklcd *copcb; +{ +} + + +find_error_reason( xp ) + register struct x25_packet *xp; +{ + extern u_char x25_error_stats[]; + int error, cause; + + if (xp) { + cause = 4[(char *)xp]; + switch (cause) { + case 0x00: + case 0x80: + /* DTE originated; look at the diagnostic */ + error = (CONL_ERROR_MASK | cause); + goto done; + + case 0x01: /* number busy */ + case 0x81: + case 0x09: /* Out of order */ + case 0x89: + case 0x11: /* Remot Procedure Error */ + case 0x91: + case 0x19: /* reverse charging accept not subscribed */ + case 0x99: + case 0x21: /* Incampat destination */ + case 0xa1: + case 0x29: /* fast select accept not subscribed */ + case 0xa9: + case 0x39: /* ship absent */ + case 0xb9: + case 0x03: /* invalid facil request */ + case 0x83: + case 0x0b: /* access barred */ + case 0x8b: + case 0x13: /* local procedure error */ + case 0x93: + case 0x05: /* network congestion */ + case 0x85: + case 0x8d: /* not obtainable */ + case 0x0d: + case 0x95: /* RPOA out of order */ + case 0x15: + /* take out bit 8 + * so we don't have to have so many perror entries + */ + error = (CONL_ERROR_MASK | 0x100 | (cause & ~0x80)); + goto done; + + case 0xc1: /* gateway-detected proc error */ + case 0xc3: /* gateway congestion */ + + error = (CONL_ERROR_MASK | 0x100 | cause); + goto done; + } + } + /* otherwise, a *hopefully* valid perror exists in the e_reason field */ + error = xp->packet_data; + if (error = 0) { + printf("Incoming PKT TYPE 0x%x with reason 0x%x\n", + pk_decode(xp), + cause); + error = E_CO_HLI_DISCA; + } + +done: + return error; +} + + + +#endif /* KERNEL */ + +/* + * NAME: make_partial_x25_packet() + * + * FUNCTION and ARGUMENTS: + * Makes part of an X.25 call packet, for use by x25. + * (src) and (dst) are the NSAP-addresses of source and destination. + * (buf) is a ptr to a buffer into which to write this partial header. + * + * 0 Facility length (in octets) + * 1 Facility field, which is a set of: + * m facil code + * m+1 facil param len (for >2-byte facilities) in octets + * m+2..p facil param field + * q user data (protocol identification octet) + * + * + * RETURNS: + * 0 if OK + * E* if failed. + * + * SIDE EFFECTS: + * Stores facilites mbuf in X.25 control block, where the connect + * routine knows where to look for it. + */ + +#ifdef X25_1984 +int cons_use_facils = 1; +#else /* X25_1984 */ +int cons_use_facils = 0; +#endif /* X25_1984 */ + +int cons_use_udata = 1; /* KLUDGE FOR DEBUGGING */ + +Static int +make_partial_x25_packet(isop, lcp) + struct isopcb *isop; + struct pklcd *lcp; +{ + u_int proto; + int flag; + caddr_t buf; + register caddr_t ptr; + register int len = 0; + int buflen =0; + caddr_t facil_len; + int oddness = 0; + struct mbuf *m; + + + IFDEBUG(D_CCONN) + printf("make_partial_x25_packet(0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", + isop->isop_laddr, isop->isop_faddr, proto, m, flag); + ENDDEBUG + if (cons_use_udata) { + if (isop->isop_x25crud_len > 0) { + /* + * The user specified something. Stick it in + */ + bcopy(isop->isop_x25crud, lcp->lcd_faddr.x25_udata, + isop->isop_x25crud_len); + lcp->lcd_faddr.x25_udlen = isop->isop_x25crud_len; + } + } + + if (cons_use_facils == 0) { + lcp->lcd_facilities = 0; + return 0; + } + MGETHDR(m, MT_DATA, M_WAITOK); + if (m == 0) + return ENOBUFS; + buf = mtod(m, caddr_t); + ptr = buf; + + /* ptr now points to facil length (len of whole facil field in OCTETS */ + facil_len = ptr ++; + m->m_len = 0; + pk_build_facilities(m, &lcp->lcd_faddr, 0); + + IFDEBUG(D_CADDR) + printf("make_partial calling: ptr 0x%x, len 0x%x\n", ptr, + isop->isop_laddr->siso_addr.isoa_len); + ENDDEBUG + if (cons_use_facils) { + *ptr++ = 0; /* Marker to separate X.25 facitilies from CCITT ones */ + *ptr++ = 0x0f; + *ptr = 0xcb; /* calling facility code */ + ptr ++; + ptr ++; /* leave room for facil param len (in OCTETS + 1) */ + ptr ++; /* leave room for the facil param len (in nibbles), + * high two bits of which indicate full/partial NSAP + */ + len = isop->isop_laddr->siso_addr.isoa_len; + bcopy( isop->isop_laddr->siso_data, ptr, len); + *(ptr-2) = len+1; /* facil param len in octets */ + *(ptr-1) = len<<1; /* facil param len in nibbles */ + ptr += len; + + IFDEBUG(D_CADDR) + printf("make_partial called: ptr 0x%x, len 0x%x\n", ptr, + isop->isop_faddr->siso_addr.isoa_len); + ENDDEBUG + *ptr = 0xc9; /* called facility code */ + ptr ++; + ptr ++; /* leave room for facil param len (in OCTETS + 1) */ + ptr ++; /* leave room for the facil param len (in nibbles), + * high two bits of which indicate full/partial NSAP + */ + len = isop->isop_faddr->siso_nlen; + bcopy(isop->isop_faddr->siso_data, ptr, len); + *(ptr-2) = len+1; /* facil param len = addr len + 1 for each of these + * two length fields, in octets */ + *(ptr-1) = len<<1; /* facil param len in nibbles */ + ptr += len; + + } + *facil_len = ptr - facil_len - 1; + if (*facil_len > MAX_FACILITIES) + return E_CO_PNA_LONG; + + buflen = (int)(ptr - buf); + + IFDEBUG(D_CDUMP_REQ) + register int i; + + printf("ECN_CONNECT DATA buf 0x%x len %d (0x%x)\n", + buf, buflen, buflen); + for( i=0; i < buflen; ) { + printf("+%d: %x %x %x %x %x %x %x %x\n", + i, + *(buf+i), *(buf+i+1), *(buf+i+2), *(buf+i+3), + *(buf+i+4), *(buf+i+5), *(buf+i+6), *(buf+i+7)); + i+=8; + } + ENDDEBUG + IFDEBUG(D_CADDR) + printf("make_partial returns buf 0x%x size 0x%x bytes\n", + mtod(m, caddr_t), buflen); + ENDDEBUG + + if (buflen > MHLEN) + return E_CO_PNA_LONG; + + m->m_pkthdr.len = m->m_len = buflen; + lcp->lcd_facilities = m; + return 0; +} + +/* + * NAME: NSAPtoDTE() + * CALLED FROM: + * make_partial_x25_packet() + * FUNCTION and ARGUMENTS: + * get a DTE address from an NSAP-address (struct sockaddr_iso) + * (dst_octet) is the octet into which to begin stashing the DTE addr + * (dst_nibble) takes 0 or 1. 1 means begin filling in the DTE addr + * in the high-order nibble of dst_octet. 0 means low-order nibble. + * (addr) is the NSAP-address + * (flag) is true if the transport suffix is to become the + * last two digits of the DTE address + * A DTE address is a series of ASCII digits + * + * A DTE address may have leading zeros. The are significant. + * 1 digit per nibble, may be an odd number of nibbles. + * + * An NSAP-address has the DTE address in the IDI. Leading zeros are + * significant. Trailing hex f indicates the end of the DTE address. + * The IDI is a series of BCD digits, one per nibble. + * + * RETURNS + * # significant digits in the DTE address, -1 if error. + */ + +Static int +NSAPtoDTE(siso, sx25) + register struct sockaddr_iso *siso; + register struct sockaddr_x25 *sx25; +{ + int dtelen = -1; + + IFDEBUG(D_CADDR) + printf("NSAPtoDTE: nsap: %s\n", clnp_iso_addrp(&siso->siso_addr)); + ENDDEBUG + + if (siso->siso_data[0] == AFI_37) { + register char *out = sx25->x25_addr; + register char *in = siso->siso_data + 1; + register int nibble; + char *lim = siso->siso_data + siso->siso_nlen; + char *olim = out+15; + int lowNibble = 0; + + while (in < lim) { + nibble = ((lowNibble ? *in++ : (*in >> 4)) & 0xf) | 0x30; + lowNibble ^= 1; + if (nibble != 0x3f && out < olim) + *out++ = nibble; + } + dtelen = out - sx25->x25_addr; + *out++ = 0; + } else { + /* error = iso_8208snparesolve(addr, x121string, &x121strlen);*/ + register struct rtentry *rt; + extern struct sockaddr_iso blank_siso; + struct sockaddr_iso nsiso; + + nsiso = blank_siso; + bcopy(nsiso.siso_data, siso->siso_data, + nsiso.siso_nlen = siso->siso_nlen); + if (rt = rtalloc1(&nsiso, 1)) { + register struct sockaddr_x25 *sxx = + (struct sockaddr_x25 *)rt->rt_gateway; + register char *in = sxx->x25_addr; + + rt->rt_use--; + if (sxx && sxx->x25_family == AF_CCITT) { + bcopy(sx25->x25_addr, sxx->x25_addr, sizeof(sx25->x25_addr)); + while (*in++) {} + dtelen = in - sxx->x25_addr; + } + } + } + return dtelen; +} + +/* + * NAME: FACILtoNSAP() + * CALLED FROM: + * parse_facil() + * FUNCTION and ARGUMENTS: + * Creates and NSAP in the sockaddr_iso (addr) from the + * x.25 facility found at buf - 1. + * RETURNS: + * 0 if ok, -1 if error. + */ + +Static int +FACILtoNSAP(addr, buf) + register u_char *buf; + register struct sockaddr_iso *addr; +{ + int len_in_nibbles = *++buf & 0x3f; + u_char buf_len = (len_in_nibbles + 1) >> 1;; /* in bytes */ + + IFDEBUG(D_CADDR) + printf("FACILtoNSAP( 0x%x, 0x%x, 0x%x )\n", + buf, buf_len, addr ); + ENDDEBUG + + len_in_nibbles = *buf & 0x3f; + /* despite the fact that X.25 makes us put a length in nibbles + * here, the NSAP-addrs are always in full octets + */ + switch (*buf++ & 0xc0) { + case 0: + /* Entire OSI NSAP address */ + bcopy((caddr_t)buf, addr->siso_data, addr->siso_nlen = buf_len); + break; + + case 40: + /* Partial OSI NSAP address, assume trailing */ + if (buf_len + addr->siso_nlen > sizeof(addr->siso_addr)) + return -1; + bcopy((caddr_t)buf, TSEL(addr), buf_len); + addr->siso_nlen += buf_len; + break; + + default: + /* Rather than blow away the connection, just ignore and use + NSAP from DTE */; + } + return 0; +} + +Static +init_siso(siso) +register struct sockaddr_iso *siso; +{ + siso->siso_len = sizeof (*siso); + siso->siso_family = AF_ISO; + siso->siso_data[0] = AFI_37; + siso->siso_nlen = 8; +} + +/* + * NAME: DTEtoNSAP() + * CALLED FROM: + * parse_facil() + * FUNCTION and ARGUMENTS: + * Creates a type 37 NSAP in the sockaddr_iso (addr) + * from a DTE address found in a sockaddr_x25. + * + * RETURNS: + * 0 if ok; E* otherwise. + */ + +Static int +DTEtoNSAP(addr, sx) + struct sockaddr_iso *addr; + struct sockaddr_x25 *sx; +{ + register char *in, *out; + register int first; + int pad_tail = 0; + int src_len; + + + init_siso(addr); + in = sx->x25_addr; + src_len = strlen(in); + addr->siso_nlen = (src_len + 3) / 2; + out = addr->siso_data; + *out++ = 0x37; + if (src_len & 1) { + pad_tail = 0xf; + src_len++; + } + for (first = 0; src_len > 0; src_len--) { + first |= 0xf & *in++; + if (src_len & 1) { + *out++ = first; + first = 0; + } + else first <<= 4; + } + if (pad_tail) + out[-1] |= 0xf; + return 0; /* ok */ +} + +/* + * FUNCTION and ARGUMENTS: + * parses (buf_len) bytes beginning at (buf) and finds + * a called nsap, a calling nsap, and protocol identifier. + * RETURNS: + * 0 if ok, E* otherwise. + */ + +Static int +parse_facil(lcp, isop, buf, buf_len) + caddr_t buf; + u_char buf_len; /* in bytes */ + struct isopcb *isop; + struct pklcd *lcp; +{ + register int i; + register u_char *ptr = (u_char *)buf; + u_char *ptr_lim, *facil_lim; + int facil_param_len, facil_len; + + IFDEBUG(D_CADDR) + printf("parse_facil(0x%x, 0x%x, 0x%x, 0x%x)\n", + lcp, isop, buf, buf_len); + dump_buf(buf, buf_len); + ENDDEBUG + + /* find the beginnings of the facility fields in buf + * by skipping over the called & calling DTE addresses + * i <- # nibbles in called + # nibbles in calling + * i += 1 so that an odd nibble gets rounded up to even + * before dividing by 2, then divide by two to get # octets + */ + i = (int)(*ptr >> 4) + (int)(*ptr&0xf); + i++; + ptr += i >> 1; + ptr ++; /* plus one for the DTE lengths byte */ + + /* ptr now is at facil_length field */ + facil_len = *ptr++; + facil_lim = ptr + facil_len; + IFDEBUG(D_CADDR) + printf("parse_facils: facil length is 0x%x\n", (int) facil_len); + ENDDEBUG + + while (ptr < facil_lim) { + /* get NSAP addresses from facilities */ + switch (*ptr++) { + case 0xcb: + /* calling NSAP */ + facil_param_len = FACILtoNSAP(isop->isop_faddr, ptr); + break; + case 0xc9: + /* called NSAP */ + facil_param_len = FACILtoNSAP(isop->isop_laddr, ptr); + break; + + /* from here to default are legit cases that I ignore */ + /* variable length */ + case 0xca: /* end-to-end transit delay negot */ + case 0xc6: /* network user id */ + case 0xc5: /* charging info : indicating monetary unit */ + case 0xc2: /* charging info : indicating segment count */ + case 0xc1: /* charging info : indicating call duration */ + case 0xc4: /* RPOA extended format */ + case 0xc3: /* call redirection notification */ + facil_param_len = 0; + break; + + /* 1 octet */ + case 0x0a: /* min. throughput class negot */ + case 0x02: /* throughput class */ + case 0x03: case 0x47: /* CUG stuff */ + case 0x0b: /* expedited data negot */ + case 0x01: /* Fast select or reverse charging + (example of intelligent protocol design) */ + case 0x04: /* charging info : requesting service */ + case 0x08: /* called line addr modified notification */ + case 0x00: /* marker to indicate beginning of CCITT facils */ + facil_param_len = 1; + break; + + /* any 2 octets */ + case 0x42: /* pkt size */ + case 0x43: /* win size */ + case 0x44: /* RPOA basic format */ + case 0x41: /* bilateral CUG stuff */ + case 0x49: /* transit delay selection and indication */ + facil_param_len = 2; + break; + + default: + printf( +"BOGUS FACILITY CODE facil_lim 0x%x facil_len %d, ptr 0x%x *ptr 0x%x\n", + facil_lim, facil_len, ptr - 1, ptr[-1]); + /* facil that we don't handle + return E_CO_HLI_REJI; */ + switch (ptr[-1] & 0xc0) { + case 0x00: facil_param_len = 1; break; + case 0x40: facil_param_len = 2; break; + case 0x80: facil_param_len = 3; break; + case 0xc0: facil_param_len = 0; break; + } + } + if (facil_param_len == -1) + return E_CO_REG_ICDA; + if (facil_param_len == 0) /* variable length */ + facil_param_len = (int)*ptr++; /* 1 + the real facil param */ + ptr += facil_param_len; + } + return 0; +} + +#endif /* TPCONS */ diff --git a/bsd/netiso/if_eon.c b/bsd/netiso/if_eon.c new file mode 100644 index 000000000..40c8716f7 --- /dev/null +++ b/bsd/netiso/if_eon.c @@ -0,0 +1,627 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if_eon.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * EON rfc + * Layer between IP and CLNL + * + * TODO: + * Put together a current rfc986 address format and get the right offset + * for the nsel + */ + +#if EON +#define NEON 1 + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +extern struct timeval time; +extern struct ifnet loif; + +#define EOK 0 + +int eoninput(); +int eonoutput(); +int eonioctl(); +int eonattach(); +int eoninit(); +void eonrtrequest(); +struct ifnet eonif[1]; + +eonprotoinit() { + (void) eonattach(); +} + +struct eon_llinfo eon_llinfo; +#define PROBE_OK 0; + + +/* + * FUNCTION: eonattach + * + * PURPOSE: autoconf attach routine + * + * RETURNS: void + */ + +eonattach() +{ + register struct ifnet *ifp = eonif; + + IFDEBUG(D_EON) + printf("eonattach()\n"); + ENDDEBUG + ifp->if_unit = 0; + ifp->if_name = "eon"; + ifp->if_mtu = ETHERMTU; + /* since everything will go out over ether or token ring */ + + ifp->if_init = eoninit; + ifp->if_ioctl = eonioctl; + ifp->if_output = eonoutput; + ifp->if_type = IFT_EON; + ifp->if_addrlen = 5; + ifp->if_hdrlen = EONIPLEN; + ifp->if_flags = IFF_BROADCAST; + if_attach(ifp); + eonioctl(ifp, SIOCSIFADDR, (caddr_t)ifp->if_addrlist); + eon_llinfo.el_qhdr.link = + eon_llinfo.el_qhdr.rlink = &(eon_llinfo.el_qhdr); + + IFDEBUG(D_EON) + printf("eonattach()\n"); + ENDDEBUG +} + + +/* + * FUNCTION: eonioctl + * + * PURPOSE: io controls - ifconfig + * need commands to + * link-UP (core addr) (flags: ES, IS) + * link-DOWN (core addr) (flags: ES, IS) + * must be callable from kernel or user + * + * RETURNS: nothing + */ +eonioctl(ifp, cmd, data) + register struct ifnet *ifp; + int cmd; + register caddr_t data; +{ + int s = splimp(); + register int error = 0; + + IFDEBUG(D_EON) + printf("eonioctl (cmd 0x%x) \n", cmd); + ENDDEBUG + + switch (cmd) { + register struct ifaddr *ifa; + + case SIOCSIFADDR: + if (ifa = (struct ifaddr *)data) { + ifp->if_flags |= IFF_UP; + if (ifa->ifa_addr->sa_family != AF_LINK) + ifa->ifa_rtrequest = eonrtrequest; + } + break; + } + splx(s); + return(error); +} + + +eoniphdr(hdr, loc, ro, class, zero) +struct route *ro; +register struct eon_iphdr *hdr; +caddr_t loc; +{ + struct mbuf mhead; + register struct sockaddr_in *sin = (struct sockaddr_in *)&ro->ro_dst; + if (zero) { + bzero((caddr_t)hdr, sizeof (*hdr)); + bzero((caddr_t)ro, sizeof (*ro)); + } + sin->sin_family = AF_INET; + sin->sin_len = sizeof (*sin); + bcopy(loc, (caddr_t)&sin->sin_addr, sizeof(struct in_addr)); + /* + * If there is a cached route, + * check that it is to the same destination + * and is still up. If not, free it and try again. + */ + if (ro->ro_rt) { + struct sockaddr_in *dst = + (struct sockaddr_in *)rt_key(ro->ro_rt); + if ((ro->ro_rt->rt_flags & RTF_UP) == 0 || + sin->sin_addr.s_addr != dst->sin_addr.s_addr) { + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } + } + rtalloc(ro); + if (ro->ro_rt) + ro->ro_rt->rt_use++; + hdr->ei_ip.ip_dst = sin->sin_addr; + hdr->ei_ip.ip_p = IPPROTO_EON; + hdr->ei_ip.ip_ttl = MAXTTL; + hdr->ei_eh.eonh_class = class; + hdr->ei_eh.eonh_vers = EON_VERSION; + hdr->ei_eh.eonh_csum = 0; + mhead.m_data = (caddr_t) &hdr->ei_eh; + mhead.m_len = sizeof(struct eon_hdr); + mhead.m_next = 0; + IFDEBUG(D_EON) + printf("eonoutput : gen csum (0x%x, offset %d, datalen %d)\n", + &mhead, + _offsetof(struct eon_hdr, eonh_csum), sizeof(struct eon_hdr)); + ENDDEBUG + iso_gen_csum(&mhead, + _offsetof(struct eon_hdr, eonh_csum), sizeof(struct eon_hdr)); +} +/* + * FUNCTION: eonrtrequest + * + * PURPOSE: maintains list of direct eon recipients. + * sets up IP route for rest. + * + * RETURNS: nothing + */ +void +eonrtrequest(cmd, rt, gate) +register struct rtentry *rt; +register struct sockaddr *gate; +{ + unsigned long zerodst = 0; + caddr_t ipaddrloc = (caddr_t) &zerodst; + register struct eon_llinfo *el = (struct eon_llinfo *)rt->rt_llinfo; + + /* + * Common Housekeeping + */ + switch (cmd) { + case RTM_DELETE: + if (el) { + remque(&(el->el_qhdr)); + if (el->el_iproute.ro_rt) + RTFREE(el->el_iproute.ro_rt); + Free(el); + rt->rt_llinfo = 0; + } + return; + + case RTM_ADD: + case RTM_RESOLVE: + rt->rt_rmx.rmx_mtu = loif.if_mtu; /* unless better below */ + R_Malloc(el, struct eon_llinfo *, sizeof(*el)); + rt->rt_llinfo = (caddr_t)el; + if (el == 0) + return; + Bzero(el, sizeof(*el)); + insque(&(el->el_qhdr), &eon_llinfo.el_qhdr); + el->el_rt = rt; + break; + } + if (gate || (gate = rt->rt_gateway)) switch (gate->sa_family) { + case AF_LINK: +#define SDL(x) ((struct sockaddr_dl *)x) + if (SDL(gate)->sdl_alen == 1) + el->el_snpaoffset = *(u_char *)LLADDR(SDL(gate)); + else + ipaddrloc = LLADDR(SDL(gate)); + break; + case AF_INET: +#define SIN(x) ((struct sockaddr_in *)x) + ipaddrloc = (caddr_t) &SIN(gate)->sin_addr; + break; + default: + return; + } + el->el_flags |= RTF_UP; + eoniphdr(&el->el_ei, ipaddrloc, &el->el_iproute, EON_NORMAL_ADDR, 0); + if (el->el_iproute.ro_rt) + rt->rt_rmx.rmx_mtu = el->el_iproute.ro_rt->rt_rmx.rmx_mtu + - sizeof(el->el_ei); +} + +/* + * FUNCTION: eoninit + * + * PURPOSE: initialization + * + * RETURNS: nothing + */ + +eoninit(unit) + int unit; +{ + printf("eon driver-init eon%d\n", unit); +} + + +/* + * FUNCTION: eonoutput + * + * PURPOSE: prepend an eon header and hand to IP + * ARGUMENTS: (ifp) is points to the ifnet structure for this unit/device + * (m) is an mbuf *, *m is a CLNL packet + * (dst) is a destination address - have to interp. as + * multicast or broadcast or real address. + * + * RETURNS: unix error code + * + * NOTES: + * + */ +eonoutput(ifp, m, dst, rt) + struct ifnet *ifp; + register struct mbuf *m; /* packet */ + struct sockaddr_iso *dst; /* destination addr */ + struct rtentry *rt; +{ + register struct eon_llinfo *el; + register struct eon_iphdr *ei; + struct route *ro; + int datalen; + struct mbuf *mh; + int error = 0, class = 0, alen = 0; + caddr_t ipaddrloc; + static struct eon_iphdr eon_iphdr; + static struct route route; + + IFDEBUG(D_EON) + printf("eonoutput \n" ); + ENDDEBUG + + ifp->if_lastchange = time; + ifp->if_opackets++; + if (rt == 0 || (el = (struct eon_llinfo *)rt->rt_llinfo) == 0) { + if (dst->siso_family == AF_LINK) { + register struct sockaddr_dl *sdl = (struct sockaddr_dl *)dst; + + ipaddrloc = LLADDR(sdl); + alen = sdl->sdl_alen; + } else if (dst->siso_family == AF_ISO && dst->siso_data[0] == AFI_SNA) { + alen = dst->siso_nlen - 1; + ipaddrloc = (caddr_t) dst->siso_data + 1; + } + switch (alen) { + case 5: + class = 4[(u_char *)ipaddrloc]; + case 4: + ro = &route; + ei = &eon_iphdr; + eoniphdr(ei, ipaddrloc, ro, class, 1); + goto send; + } +einval: + error = EINVAL; + goto flush; + } + if ((el->el_flags & RTF_UP) == 0) { + eonrtrequest(RTM_CHANGE, rt, (struct sockaddr *)0); + if ((el->el_flags & RTF_UP) == 0) { + error = EHOSTUNREACH; + goto flush; + } + } + if ((m->m_flags & M_PKTHDR) == 0) { + printf("eon: got non headered packet\n"); + goto einval; + } + ei = &el->el_ei; + ro = &el->el_iproute; + if (el->el_snpaoffset) { + if (dst->siso_family == AF_ISO) { + bcopy((caddr_t) &dst->siso_data[el->el_snpaoffset], + (caddr_t) &ei->ei_ip.ip_dst, sizeof(ei->ei_ip.ip_dst)); + } else + goto einval; + } +send: + /* put an eon_hdr in the buffer, prepended by an ip header */ + datalen = m->m_pkthdr.len + EONIPLEN; + MGETHDR(mh, M_DONTWAIT, MT_HEADER); + if(mh == (struct mbuf *)0) + goto flush; + mh->m_next = m; + m = mh; + MH_ALIGN(m, sizeof(struct eon_iphdr)); + m->m_len = sizeof(struct eon_iphdr); + ifp->if_obytes += + (ei->ei_ip.ip_len = (u_short)(m->m_pkthdr.len = datalen)); + *mtod(m, struct eon_iphdr *) = *ei; + + IFDEBUG(D_EON) + printf("eonoutput dst ip addr : %x\n", ei->ei_ip.ip_dst.s_addr); + printf("eonoutput ip_output : eonip header:\n"); + dump_buf(ei, sizeof(struct eon_iphdr)); + ENDDEBUG + + error = ip_output(m, (struct mbuf *)0, ro, 0, NULL); + m = 0; + if (error) { + ifp->if_oerrors++; + ifp->if_opackets--; + ifp->if_obytes -= datalen; + } +flush: + if (m) + m_freem(m); + return error; +} + +eoninput(m, iphlen) + register struct mbuf *m; + int iphlen; +{ + register struct eon_hdr *eonhdr; + register struct ip *iphdr; + struct ifnet *eonifp; + int s; + + eonifp = &eonif[0]; /* kludge - really want to give CLNP + * the ifp for eon, not for the real device + */ + + IFDEBUG(D_EON) + printf("eoninput() 0x%x m_data 0x%x m_len 0x%x dequeued\n", + m, m?m->m_data:0, m?m->m_len:0); + ENDDEBUG + + if (m == 0) + return; + if (iphlen > sizeof (struct ip)) + ip_stripoptions(m, (struct mbuf *)0); + if (m->m_len < EONIPLEN) { + if ((m = m_pullup(m, EONIPLEN)) == 0) { + IncStat(es_badhdr); +drop: + IFDEBUG(D_EON) + printf("eoninput: DROP \n" ); + ENDDEBUG + eonifp->if_ierrors ++; + m_freem(m); + return; + } + } + eonif->if_ibytes += m->m_pkthdr.len; + eonif->if_lastchange = time; + iphdr = mtod(m, struct ip *); + /* do a few checks for debugging */ + if( iphdr->ip_p != IPPROTO_EON ) { + IncStat(es_badhdr); + goto drop; + } + /* temporarily drop ip header from the mbuf */ + m->m_data += sizeof(struct ip); + eonhdr = mtod(m, struct eon_hdr *); + if( iso_check_csum( m, sizeof(struct eon_hdr) ) != EOK ) { + IncStat(es_badcsum); + goto drop; + } + m->m_data -= sizeof(struct ip); + + IFDEBUG(D_EON) + printf("eoninput csum ok class 0x%x\n", eonhdr->eonh_class ); + printf("eoninput: eon header:\n"); + dump_buf(eonhdr, sizeof(struct eon_hdr)); + ENDDEBUG + + /* checks for debugging */ + if( eonhdr->eonh_vers != EON_VERSION) { + IncStat(es_badhdr); + goto drop; + } + m->m_flags &= ~(M_BCAST|M_MCAST); + switch( eonhdr->eonh_class) { + case EON_BROADCAST: + IncStat(es_in_broad); + m->m_flags |= M_BCAST; + break; + case EON_NORMAL_ADDR: + IncStat(es_in_normal); + break; + case EON_MULTICAST_ES: + IncStat(es_in_multi_es); + m->m_flags |= M_MCAST; + break; + case EON_MULTICAST_IS: + IncStat(es_in_multi_is); + m->m_flags |= M_MCAST; + break; + } + eonifp->if_ipackets++; + + { + /* put it on the CLNP queue and set soft interrupt */ + struct ifqueue *ifq; + extern struct ifqueue clnlintrq; + + m->m_pkthdr.rcvif = eonifp; /* KLUDGE */ + IFDEBUG(D_EON) + printf("eoninput to clnl IFQ\n"); + ENDDEBUG + ifq = &clnlintrq; + s = splimp(); + if (IF_QFULL(ifq)) { + IF_DROP(ifq); + m_freem(m); + eonifp->if_iqdrops++; + eonifp->if_ipackets--; + splx(s); + return; + } + IF_ENQUEUE(ifq, m); + IFDEBUG(D_EON) + printf( + "0x%x enqueued on clnp Q: m_len 0x%x m_type 0x%x m_data 0x%x\n", + m, m->m_len, m->m_type, m->m_data); + dump_buf(mtod(m, caddr_t), m->m_len); + ENDDEBUG + schednetisr(NETISR_ISO); + splx(s); + } +} + +int +eonctlinput(cmd, sin) + int cmd; + struct sockaddr_in *sin; +{ + extern u_char inetctlerrmap[]; + + IFDEBUG(D_EON) + printf("eonctlinput: cmd 0x%x addr: ", cmd); + dump_isoaddr(sin); + printf("\n"); + ENDDEBUG + + if (cmd < 0 || cmd > PRC_NCMDS) + return 0; + + IncStat(es_icmp[cmd]); + switch (cmd) { + + case PRC_QUENCH: + case PRC_QUENCH2: + /* TODO: set the dec bit */ + break; + case PRC_TIMXCEED_REASS: + case PRC_ROUTEDEAD: + case PRC_HOSTUNREACH: + case PRC_UNREACH_NET: + case PRC_IFDOWN: + case PRC_UNREACH_HOST: + case PRC_HOSTDEAD: + case PRC_TIMXCEED_INTRANS: + /* TODO: mark the link down */ + break; + + case PRC_UNREACH_PROTOCOL: + case PRC_UNREACH_PORT: + case PRC_UNREACH_SRCFAIL: + case PRC_REDIRECT_NET: + case PRC_REDIRECT_HOST: + case PRC_REDIRECT_TOSNET: + case PRC_REDIRECT_TOSHOST: + case PRC_MSGSIZE: + case PRC_PARAMPROB: + /* printf("eonctlinput: ICMP cmd 0x%x\n", cmd );*/ + break; + } + return 0; +} + +#endif diff --git a/bsd/netiso/iso.c b/bsd/netiso/iso.c new file mode 100644 index 000000000..c625d013a --- /dev/null +++ b/bsd/netiso/iso.c @@ -0,0 +1,938 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso.c 8.2 (Berkeley) 11/15/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * iso.c: miscellaneous routines to support the iso address family + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#if TUBA +#include +#endif + +#if ISO + +int iso_interfaces = 0; /* number of external interfaces */ +extern struct ifnet loif; /* loopback interface */ +int ether_output(); +void llc_rtrequest(); + +/* + * FUNCTION: iso_addrmatch1 + * + * PURPOSE: decide if the two iso_addrs passed are equal + * + * RETURNS: true if the addrs match, false if they do not + * + * SIDE EFFECTS: + * + * NOTES: + */ +iso_addrmatch1(isoaa, isoab) +register struct iso_addr *isoaa, *isoab; /* addresses to check */ +{ + u_int compare_len; + + IFDEBUG(D_ROUTE) + printf("iso_addrmatch1: comparing lengths: %d to %d\n", isoaa->isoa_len, + isoab->isoa_len); + printf("a:\n"); + dump_buf(isoaa->isoa_genaddr, isoaa->isoa_len); + printf("b:\n"); + dump_buf(isoab->isoa_genaddr, isoab->isoa_len); + ENDDEBUG + + if ((compare_len = isoaa->isoa_len) != isoab->isoa_len) { + IFDEBUG(D_ROUTE) + printf("iso_addrmatch1: returning false because of lengths\n"); + ENDDEBUG + return 0; + } + +#ifdef notdef + /* TODO : generalize this to all afis with masks */ + if( isoaa->isoa_afi == AFI_37 ) { + /* must not compare 2 least significant digits, or for + * that matter, the DSP + */ + compare_len = ADDR37_IDI_LEN - 1; + } +#endif + + IFDEBUG(D_ROUTE) + int i; + char *a, *b; + + a = isoaa->isoa_genaddr; + b = isoab->isoa_genaddr; + + for (i=0; i", a[i]&0xff, b[i]&0xff); + if (a[i] != b[i]) { + printf("\naddrs are not equal at byte %d\n", i); + return(0); + } + } + printf("\n"); + printf("addrs are equal\n"); + return (1); + ENDDEBUG + return (!bcmp(isoaa->isoa_genaddr, isoab->isoa_genaddr, compare_len)); +} + +/* + * FUNCTION: iso_addrmatch + * + * PURPOSE: decide if the two sockadrr_isos passed are equal + * + * RETURNS: true if the addrs match, false if they do not + * + * SIDE EFFECTS: + * + * NOTES: + */ +iso_addrmatch(sisoa, sisob) +struct sockaddr_iso *sisoa, *sisob; /* addresses to check */ +{ + return(iso_addrmatch1(&sisoa->siso_addr, &sisob->siso_addr)); +} +#ifdef notdef +/* + * FUNCTION: iso_netmatch + * + * PURPOSE: similar to iso_addrmatch but takes sockaddr_iso + * as argument. + * + * RETURNS: true if same net, false if not + * + * SIDE EFFECTS: + * + * NOTES: + */ +iso_netmatch(sisoa, sisob) +struct sockaddr_iso *sisoa, *sisob; +{ + u_char bufa[sizeof(struct sockaddr_iso)]; + u_char bufb[sizeof(struct sockaddr_iso)]; + register int lena, lenb; + + lena = iso_netof(&sisoa->siso_addr, bufa); + lenb = iso_netof(&sisob->siso_addr, bufb); + + IFDEBUG(D_ROUTE) + printf("iso_netmatch: comparing lengths: %d to %d\n", lena, lenb); + printf("a:\n"); + dump_buf(bufa, lena); + printf("b:\n"); + dump_buf(bufb, lenb); + ENDDEBUG + + return ((lena == lenb) && (!bcmp(bufa, bufb, lena))); +} +#endif /* notdef */ + +/* + * FUNCTION: iso_hashchar + * + * PURPOSE: Hash all character in the buffer specified into + * a long. Return the long. + * + * RETURNS: The hash value. + * + * SIDE EFFECTS: + * + * NOTES: The hash is achieved by exclusive ORing 4 byte + * quantities. + */ +u_long +iso_hashchar(buf, len) +register caddr_t buf; /* buffer to pack from */ +register int len; /* length of buffer */ +{ + register u_long h = 0; + register int i; + + for (i=0; isiso_addr, buf); + hp->afh_nethash = iso_hashchar((caddr_t)buf, bufsize); + + IFDEBUG(D_ROUTE) + printf("iso_hash: iso_netof: bufsize = %d\n", bufsize); + ENDDEBUG + + hp->afh_hosthash = iso_hashchar((caddr_t)&siso->siso_addr, + siso->siso_addr.isoa_len); + + IFDEBUG(D_ROUTE) + printf("iso_hash: %s: nethash = x%x, hosthash = x%x\n", + clnp_iso_addrp(&siso->siso_addr), hp->afh_nethash, + hp->afh_hosthash); + ENDDEBUG +} +/* + * FUNCTION: iso_netof + * + * PURPOSE: Extract the network portion of the iso address. + * The network portion of the iso address varies depending + * on the type of address. The network portion of the + * address will include the IDP. The network portion is: + * + * TYPE DESC + * t37 The AFI and x.121 (IDI) + * osinet The AFI, orgid, snetid + * rfc986 The AFI, vers and network part of + * internet address. + * + * RETURNS: number of bytes placed into buf. + * + * SIDE EFFECTS: + * + * NOTES: Buf is assumed to be big enough + */ +iso_netof(isoa, buf) +struct iso_addr *isoa; /* address */ +caddr_t buf; /* RESULT: network portion of address here */ +{ + u_int len = 1; /* length of afi */ + + switch (isoa->isoa_afi) { + case AFI_37: + /* + * Due to classic x.25 tunnel vision, there is no + * net portion of an x.121 address. For our purposes + * the AFI will do, so that all x.25 -type addresses + * map to the single x.25 SNPA. (Cannot have more than + * one, obviously). + */ + + break; + +/* case AFI_OSINET:*/ + case AFI_RFC986: { + u_short idi; /* value of idi */ + + /* osinet and rfc986 have idi in the same place */ + CTOH(isoa->rfc986_idi[0], isoa->rfc986_idi[1], idi); + + if (idi == IDI_OSINET) +/* + * Network portion of OSINET address can only be the IDI. Clearly, + * with one x25 interface, one could get to several orgids, and + * several snetids. + len += (ADDROSINET_IDI_LEN + OVLOSINET_ORGID_LEN + + OVLOSINET_SNETID_LEN); + */ + len += ADDROSINET_IDI_LEN; + else if (idi == IDI_RFC986) { + u_long inetaddr; + struct ovl_rfc986 *o986 = (struct ovl_rfc986 *)isoa; + + /* bump len to include idi and version (1 byte) */ + len += ADDRRFC986_IDI_LEN + 1; + + /* get inet addr long aligned */ + bcopy(o986->o986_inetaddr, &inetaddr, sizeof(inetaddr)); + inetaddr = ntohl(inetaddr); /* convert to host byte order */ + + IFDEBUG(D_ROUTE) + printf("iso_netof: isoa "); + dump_buf(isoa, sizeof(*isoa)); + printf("iso_netof: inetaddr 0x%x ", inetaddr); + ENDDEBUG + + /* bump len by size of network portion of inet address */ + if (IN_CLASSA(inetaddr)) { + len += 4-IN_CLASSA_NSHIFT/8; + IFDEBUG(D_ROUTE) + printf("iso_netof: class A net len is now %d\n", len); + ENDDEBUG + } else if (IN_CLASSB(inetaddr)) { + len += 4-IN_CLASSB_NSHIFT/8; + IFDEBUG(D_ROUTE) + printf("iso_netof: class B net len is now %d\n", len); + ENDDEBUG + } else { + len += 4-IN_CLASSC_NSHIFT/8; + IFDEBUG(D_ROUTE) + printf("iso_netof: class C net len is now %d\n", len); + ENDDEBUG + } + } else + len = 0; + } break; + + default: + len = 0; + } + + bcopy((caddr_t)isoa, buf, len); + IFDEBUG(D_ROUTE) + printf("iso_netof: isoa "); + dump_buf(isoa, len); + printf("iso_netof: net "); + dump_buf(buf, len); + ENDDEBUG + return len; +} +#endif /* notdef */ +/* + * Generic iso control operations (ioctl's). + * Ifp is 0 if not an interface-specific ioctl. + */ +/* ARGSUSED */ +iso_control(so, cmd, data, ifp) + struct socket *so; + int cmd; + caddr_t data; + register struct ifnet *ifp; +{ + register struct iso_ifreq *ifr = (struct iso_ifreq *)data; + register struct iso_ifaddr *ia = 0; + register struct ifaddr *ifa; + struct iso_ifaddr *oia; + struct iso_aliasreq *ifra = (struct iso_aliasreq *)data; + int error, hostIsNew, maskIsNew; + + /* + * Find address for this interface, if it exists. + */ + if (ifp) + for (ia = iso_ifaddr; ia; ia = ia->ia_next) + if (ia->ia_ifp == ifp) + break; + + switch (cmd) { + + case SIOCAIFADDR_ISO: + case SIOCDIFADDR_ISO: + if (ifra->ifra_addr.siso_family == AF_ISO) + for (oia = ia; ia; ia = ia->ia_next) { + if (ia->ia_ifp == ifp && + SAME_ISOADDR(&ia->ia_addr, &ifra->ifra_addr)) + break; + } + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); + if (ifp == 0) + panic("iso_control"); + if (ia == (struct iso_ifaddr *)0) { + struct iso_ifaddr *nia; + if (cmd == SIOCDIFADDR_ISO) + return (EADDRNOTAVAIL); +#if TUBA + /* XXXXXX can't be done in the proto init routines */ + if (tuba_tree == 0) + tuba_table_init(); +#endif + MALLOC(nia, struct iso_ifaddr *, sizeof(*nia), + M_IFADDR, M_WAITOK); + if (nia == (struct iso_ifaddr *)0) + return (ENOBUFS); + bzero((caddr_t)nia, sizeof(*nia)); + if (ia = iso_ifaddr) { + for ( ; ia->ia_next; ia = ia->ia_next) + ; + ia->ia_next = nia; + } else + iso_ifaddr = nia; + ia = nia; + if (ifa = ifp->if_addrlist) { + for ( ; ifa->ifa_next; ifa = ifa->ifa_next) + ; + ifa->ifa_next = (struct ifaddr *) ia; + } else + ifp->if_addrlist = (struct ifaddr *) ia; + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + ia->ia_ifa.ifa_dstaddr + = (struct sockaddr *)&ia->ia_dstaddr; + ia->ia_ifa.ifa_netmask + = (struct sockaddr *)&ia->ia_sockmask; + ia->ia_ifp = ifp; + if (ifp != &loif) + iso_interfaces++; + } + break; + +#define cmdbyte(x) (((x) >> 8) & 0xff) + default: + if (cmdbyte(cmd) == 'a') + return (snpac_ioctl(so, cmd, data)); + if (ia == (struct iso_ifaddr *)0) + return (EADDRNOTAVAIL); + break; + } + switch (cmd) { + + case SIOCGIFADDR_ISO: + ifr->ifr_Addr = ia->ia_addr; + break; + + case SIOCGIFDSTADDR_ISO: + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + return (EINVAL); + ifr->ifr_Addr = ia->ia_dstaddr; + break; + + case SIOCGIFNETMASK_ISO: + ifr->ifr_Addr = ia->ia_sockmask; + break; + + case SIOCAIFADDR_ISO: + maskIsNew = 0; hostIsNew = 1; error = 0; + if (ia->ia_addr.siso_family == AF_ISO) { + if (ifra->ifra_addr.siso_len == 0) { + ifra->ifra_addr = ia->ia_addr; + hostIsNew = 0; + } else if (SAME_ISOADDR(&ia->ia_addr, &ifra->ifra_addr)) + hostIsNew = 0; + } + if (ifra->ifra_mask.siso_len) { + iso_ifscrub(ifp, ia); + ia->ia_sockmask = ifra->ifra_mask; + maskIsNew = 1; + } + if ((ifp->if_flags & IFF_POINTOPOINT) && + (ifra->ifra_dstaddr.siso_family == AF_ISO)) { + iso_ifscrub(ifp, ia); + ia->ia_dstaddr = ifra->ifra_dstaddr; + maskIsNew = 1; /* We lie; but the effect's the same */ + } + if (ifra->ifra_addr.siso_family == AF_ISO && + (hostIsNew || maskIsNew)) { + error = iso_ifinit(ifp, ia, &ifra->ifra_addr, 0); + } + if (ifra->ifra_snpaoffset) + ia->ia_snpaoffset = ifra->ifra_snpaoffset; + return (error); + + case SIOCDIFADDR_ISO: + iso_ifscrub(ifp, ia); + if ((ifa = ifp->if_addrlist) == (struct ifaddr *)ia) + ifp->if_addrlist = ifa->ifa_next; + else { + while (ifa->ifa_next && + (ifa->ifa_next != (struct ifaddr *)ia)) + ifa = ifa->ifa_next; + if (ifa->ifa_next) + ifa->ifa_next = ((struct ifaddr *)ia)->ifa_next; + else + printf("Couldn't unlink isoifaddr from ifp\n"); + } + oia = ia; + if (oia == (ia = iso_ifaddr)) { + iso_ifaddr = ia->ia_next; + } else { + while (ia->ia_next && (ia->ia_next != oia)) { + ia = ia->ia_next; + } + if (ia->ia_next) + ia->ia_next = oia->ia_next; + else + printf("Didn't unlink isoifadr from list\n"); + } + IFAFREE((&oia->ia_ifa)); + break; + + default: + if (ifp == 0 || ifp->if_ioctl == 0) + return (EOPNOTSUPP); + return ((*ifp->if_ioctl)(ifp, cmd, data)); + } + return (0); +} + +/* + * Delete any existing route for an interface. + */ +iso_ifscrub(ifp, ia) + register struct ifnet *ifp; + register struct iso_ifaddr *ia; +{ + int nsellength = ia->ia_addr.siso_tlen; + if ((ia->ia_flags & IFA_ROUTE) == 0) + return; + ia->ia_addr.siso_tlen = 0; + if (ifp->if_flags & IFF_LOOPBACK) + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + else if (ifp->if_flags & IFF_POINTOPOINT) + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + else { + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, 0); + } + ia->ia_addr.siso_tlen = nsellength; + ia->ia_flags &= ~IFA_ROUTE; +} + +/* + * Initialize an interface's internet address + * and routing table entry. + */ +iso_ifinit(ifp, ia, siso, scrub) + register struct ifnet *ifp; + register struct iso_ifaddr *ia; + struct sockaddr_iso *siso; +{ + struct sockaddr_iso oldaddr; + int s = splimp(), error, nsellength; + + oldaddr = ia->ia_addr; + ia->ia_addr = *siso; + /* + * Give the interface a chance to initialize + * if this is its first address, + * and to validate the address if necessary. + */ + if (ifp->if_ioctl && + (error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, (caddr_t)ia))) { + splx(s); + ia->ia_addr = oldaddr; + return (error); + } + if (scrub) { + ia->ia_ifa.ifa_addr = (struct sockaddr *)&oldaddr; + iso_ifscrub(ifp, ia); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + } + /* XXX -- The following is here temporarily out of laziness + in not changing every ethernet driver's if_ioctl routine */ + if (ifp->if_output == ether_output) { + ia->ia_ifa.ifa_rtrequest = llc_rtrequest; + ia->ia_ifa.ifa_flags |= RTF_CLONING; + } + /* + * Add route for the network. + */ + nsellength = ia->ia_addr.siso_tlen; + ia->ia_addr.siso_tlen = 0; + if (ifp->if_flags & IFF_LOOPBACK) { + ia->ia_ifa.ifa_dstaddr = ia->ia_ifa.ifa_addr; + error = rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_HOST|RTF_UP); + } else if (ifp->if_flags & IFF_POINTOPOINT && + ia->ia_dstaddr.siso_family == AF_ISO) + error = rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_HOST|RTF_UP); + else { + rt_maskedcopy(ia->ia_ifa.ifa_addr, ia->ia_ifa.ifa_dstaddr, + ia->ia_ifa.ifa_netmask); + ia->ia_dstaddr.siso_nlen = + min(ia->ia_addr.siso_nlen, (ia->ia_sockmask.siso_len - 6)); + error = rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_UP); + } + ia->ia_addr.siso_tlen = nsellength; + ia->ia_flags |= IFA_ROUTE; + splx(s); + return (error); +} +#ifdef notdef + +struct ifaddr * +iso_ifwithidi(addr) + register struct sockaddr *addr; +{ + register struct ifnet *ifp; + register struct ifaddr *ifa; + register u_int af = addr->sa_family; + + if (af != AF_ISO) + return (0); + IFDEBUG(D_ROUTE) + printf(">>> iso_ifwithidi addr\n"); + dump_isoaddr( (struct sockaddr_iso *)(addr)); + printf("\n"); + ENDDEBUG + for (ifp = ifnet; ifp; ifp = ifp->if_next) { + IFDEBUG(D_ROUTE) + printf("iso_ifwithidi ifnet %s\n", ifp->if_name); + ENDDEBUG + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) { + IFDEBUG(D_ROUTE) + printf("iso_ifwithidi address "); + dump_isoaddr( (struct sockaddr_iso *)(ifa->ifa_addr)); + ENDDEBUG + if (ifa->ifa_addr->sa_family != addr->sa_family) + continue; + +#define IFA_SIS(ifa)\ + ((struct sockaddr_iso *)((ifa)->ifa_addr)) + + IFDEBUG(D_ROUTE) + printf(" af same, args to iso_eqtype:\n"); + printf("0x%x ", IFA_SIS(ifa)->siso_addr); + printf(" 0x%x\n", + &(((struct sockaddr_iso *)addr)->siso_addr)); + ENDDEBUG + + if (iso_eqtype(&(IFA_SIS(ifa)->siso_addr), + &(((struct sockaddr_iso *)addr)->siso_addr))) { + IFDEBUG(D_ROUTE) + printf("ifa_ifwithidi: ifa found\n"); + ENDDEBUG + return (ifa); + } + IFDEBUG(D_ROUTE) + printf(" iso_eqtype failed\n"); + ENDDEBUG + } + } + return ((struct ifaddr *)0); +} + +#endif /* notdef */ +/* + * FUNCTION: iso_ck_addr + * + * PURPOSE: return true if the iso_addr passed is + * within the legal size limit for an iso address. + * + * RETURNS: true or false + * + * SIDE EFFECTS: + * + */ +iso_ck_addr(isoa) +struct iso_addr *isoa; /* address to check */ +{ + return (isoa->isoa_len <= 20); + +} + +#ifdef notdef +/* + * FUNCTION: iso_eqtype + * + * PURPOSE: Determine if two iso addresses are of the same type. + * This is flaky. Really we should consider all type 47 addrs to be the + * same - but there do exist different structures for 47 addrs. + * Gosip adds a 3rd. + * + * RETURNS: true if the addresses are the same type + * + * SIDE EFFECTS: + * + * NOTES: By type, I mean rfc986, t37, or osinet + * + * This will first compare afis. If they match, then + * if the addr is not t37, the idis must be compared. + */ +iso_eqtype(isoaa, isoab) +struct iso_addr *isoaa; /* first addr to check */ +struct iso_addr *isoab; /* other addr to check */ +{ + if (isoaa->isoa_afi == isoab->isoa_afi) { + if (isoaa->isoa_afi == AFI_37) + return(1); + else + return (!bcmp(&isoaa->isoa_u, &isoab->isoa_u, 2)); + } + return(0); +} +#endif /* notdef */ +/* + * FUNCTION: iso_localifa() + * + * PURPOSE: Find an interface addresss having a given destination + * or at least matching the net. + * + * RETURNS: ptr to an interface address + * + * SIDE EFFECTS: + * + * NOTES: + */ +struct iso_ifaddr * +iso_localifa(siso) + register struct sockaddr_iso *siso; +{ + register struct iso_ifaddr *ia; + register char *cp1, *cp2, *cp3; + register struct ifnet *ifp; + struct iso_ifaddr *ia_maybe = 0; + /* + * We make one pass looking for both net matches and an exact + * dst addr. + */ + for (ia = iso_ifaddr; ia; ia = ia->ia_next) { + if ((ifp = ia->ia_ifp) == 0 || ((ifp->if_flags & IFF_UP) == 0)) + continue; + if (ifp->if_flags & IFF_POINTOPOINT) { + if ((ia->ia_dstaddr.siso_family == AF_ISO) && + SAME_ISOADDR(&ia->ia_dstaddr, siso)) + return (ia); + else + if (SAME_ISOADDR(&ia->ia_addr, siso)) + ia_maybe = ia; + continue; + } + if (ia->ia_sockmask.siso_len) { + char *cplim = ia->ia_sockmask.siso_len + (char *)&ia->ia_sockmask; + cp1 = ia->ia_sockmask.siso_data; + cp2 = siso->siso_data; + cp3 = ia->ia_addr.siso_data; + while (cp1 < cplim) + if (*cp1++ & (*cp2++ ^ *cp3++)) + goto next; + ia_maybe = ia; + } + if (SAME_ISOADDR(&ia->ia_addr, siso)) + return ia; + next:; + } + return ia_maybe; +} + +#if TPCONS +#include +#endif /* TPCONS */ +/* + * FUNCTION: iso_nlctloutput + * + * PURPOSE: Set options at the network level + * + * RETURNS: E* + * + * SIDE EFFECTS: + * + * NOTES: This could embody some of the functions of + * rclnp_ctloutput and cons_ctloutput. + */ +iso_nlctloutput(cmd, optname, pcb, m) +int cmd; /* command:set or get */ +int optname; /* option of interest */ +caddr_t pcb; /* nl pcb */ +struct mbuf *m; /* data for set, buffer for get */ +{ + struct isopcb *isop = (struct isopcb *)pcb; + int error = 0; /* return value */ + caddr_t data; /* data for option */ + int data_len; /* data's length */ + + IFDEBUG(D_ISO) + printf("iso_nlctloutput: cmd %x, opt %x, pcb %x, m %x\n", + cmd, optname, pcb, m); + ENDDEBUG + + if ((cmd != PRCO_GETOPT) && (cmd != PRCO_SETOPT)) + return(EOPNOTSUPP); + + data = mtod(m, caddr_t); + data_len = (m)->m_len; + + IFDEBUG(D_ISO) + printf("iso_nlctloutput: data is:\n"); + dump_buf(data, data_len); + ENDDEBUG + + switch (optname) { + +#if TPCONS + case CONSOPT_X25CRUD: + if (cmd == PRCO_GETOPT) { + error = EOPNOTSUPP; + break; + } + + if (data_len > MAXX25CRUDLEN) { + error = EINVAL; + break; + } + + IFDEBUG(D_ISO) + printf("iso_nlctloutput: setting x25 crud\n"); + ENDDEBUG + + bcopy(data, (caddr_t)isop->isop_x25crud, (unsigned)data_len); + isop->isop_x25crud_len = data_len; + break; +#endif /* TPCONS */ + + default: + error = EOPNOTSUPP; + } + if (cmd == PRCO_SETOPT) + m_freem(m); + return error; +} +#endif /* ISO */ + +#ifdef ARGO_DEBUG + +/* + * FUNCTION: dump_isoaddr + * + * PURPOSE: debugging + * + * RETURNS: nada + * + */ +dump_isoaddr(s) + struct sockaddr_iso *s; +{ + char *clnp_saddr_isop(); + register int i; + + if( s->siso_family == AF_ISO) { + printf("ISO address: suffixlen %d, %s\n", + s->siso_tlen, clnp_saddr_isop(s)); + } else if( s->siso_family == AF_INET) { + /* hack */ + struct sockaddr_in *sin = (struct sockaddr_in *)s; + + printf("%d.%d.%d.%d: %d", + (sin->sin_addr.s_addr>>24)&0xff, + (sin->sin_addr.s_addr>>16)&0xff, + (sin->sin_addr.s_addr>>8)&0xff, + (sin->sin_addr.s_addr)&0xff, + sin->sin_port); + } +} + +#endif /* ARGO_DEBUG */ diff --git a/bsd/netiso/iso.h b/bsd/netiso/iso.h new file mode 100644 index 000000000..a1bd2d16b --- /dev/null +++ b/bsd/netiso/iso.h @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#ifndef __ISO__ +#define __ISO__ + +/* + * Return true if this is a multicast address + * This assumes that the bit transmission is lsb first. This + * assumption is valid for 802.3 but not 802.5. There is a + * kludge to get around this for 802.5 -- see if_lan.c + * where subnetwork header is setup. + */ +#define IS_MULTICAST(snpa)\ + ((snpa)[0] & 0x01) + +/* + * Protocols + */ +#define ISOPROTO_TCP 6 /* IETF experiment */ +#define ISOPROTO_UDP 17 /* IETF experiment */ +#define ISOPROTO_TP0 25 /* connection oriented transport protocol */ +#define ISOPROTO_TP1 26 /* not implemented */ +#define ISOPROTO_TP2 27 /* not implemented */ +#define ISOPROTO_TP3 28 /* not implemented */ +#define ISOPROTO_TP4 29 /* connection oriented transport protocol */ +#define ISOPROTO_TP ISOPROTO_TP4 /* tp-4 with negotiation */ +#define ISOPROTO_CLTP 30 /* connectionless transport (not yet impl.) */ +#define ISOPROTO_CLNP 31 /* connectionless internetworking protocol */ +#define ISOPROTO_X25 32 /* cons */ +#define ISOPROTO_INACT_NL 33 /* inactive network layer! */ +#define ISOPROTO_ESIS 34 /* ES-IS protocol */ +#define ISOPROTO_INTRAISIS 35 /* IS-IS protocol */ +#define ISOPROTO_IDRP 36 /* Interdomain Routing Protocol */ + +#define ISOPROTO_RAW 255 /* raw clnp */ +#define ISOPROTO_MAX 256 + +#define ISO_PORT_RESERVED 1024 +#define ISO_PORT_USERRESERVED 5000 +/* + * Port/socket numbers: standard network functions + * NOT PRESENTLY USED + */ +#define ISO_PORT_MAINT 501 +#define ISO_PORT_ECHO 507 +#define ISO_PORT_DISCARD 509 +#define ISO_PORT_SYSTAT 511 +#define ISO_PORT_NETSTAT 515 +/* + * Port/socket numbers: non-standard application functions + */ +#define ISO_PORT_LOGIN 513 +/* + * Port/socket numbers: public use + */ +#define ISO_PORT_PUBLIC 1024 /* high bit set --> public */ + +/* + * Network layer protocol identifiers + */ +#define ISO8473_CLNP 0x81 +#define ISO9542_ESIS 0x82 +#define ISO9542X25_ESIS 0x8a +#define ISO10589_ISIS 0x83 +#define ISO8878A_CONS 0x84 +#define ISO10747_IDRP 0x85 + + +#ifndef IN_CLASSA_NET +#include +#endif /* IN_CLASSA_NET */ + + + +/* The following looks like a sockaddr + * to facilitate using tree lookup routines */ +struct iso_addr { + u_char isoa_len; /* length (in bytes) */ + char isoa_genaddr[20]; /* general opaque address */ +}; + +struct sockaddr_iso { + u_char siso_len; /* length */ + u_char siso_family; /* family */ + u_char siso_plen; /* presentation selector length */ + u_char siso_slen; /* session selector length */ + u_char siso_tlen; /* transport selector length */ + struct iso_addr siso_addr; /* network address */ + u_char siso_pad[6]; /* space for gosip v2 sels */ + /* makes struct 32 bytes long */ +}; +#define siso_nlen siso_addr.isoa_len +#define siso_data siso_addr.isoa_genaddr + +#define TSEL(s) ((caddr_t)((s)->siso_data + (s)->siso_nlen)) + +#define SAME_ISOADDR(a, b) \ + (bcmp((a)->siso_data, (b)->siso_data, (unsigned)(a)->siso_nlen)==0) +/* + * The following are specific values for siso->siso_data[0], + * otherwise known as the AFI: + */ +#define AFI_37 0x37 /* bcd of "37" */ +#define AFI_OSINET 0x47 /* bcd of "47" */ +#define AFI_RFC986 0x47 /* bcd of "47" */ +#define AFI_SNA 0x00 /* SubNetwork Address; invalid really...*/ + +#ifdef KERNEL + +extern int iso_netmatch(); +extern int iso_hash(); +extern int iso_addrmatch(); +extern struct iso_ifaddr *iso_iaonnetof(); +extern struct domain isodomain; +extern struct protosw isosw[]; + +#else +/* user utilities definitions from the iso library */ + +#include + +__BEGIN_DECLS +struct iso_addr *iso_addr __P((const char *)); +char *iso_ntoa __P((const struct iso_addr *)); + +/* THESE DON'T EXIST YET */ +struct hostent *iso_gethostbyname(), *iso_gethostbyaddr(); +__END_DECLS + +#endif /* KERNEL */ + +#define _offsetof(t, m) ((int)((caddr_t)&((t *)0)->m)) +#endif /* __ISO__ */ diff --git a/bsd/netiso/iso_chksum.c b/bsd/netiso/iso_chksum.c new file mode 100644 index 000000000..ab0ac9e65 --- /dev/null +++ b/bsd/netiso/iso_chksum.c @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_chksum.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ISO CHECKSUM + * + * The checksum generation and check routines are here. + * The checksum is 2 bytes such that the sum of all the bytes b(i) == 0 + * and the sum of i * b(i) == 0. + * The whole thing is complicated by the fact that the data are in mbuf + * chains. + * Furthermore, there is the possibility of wraparound in the running + * sums after adding up 4102 octets. In order to avoid doing a mod + * operation after EACH add, we have restricted this implementation to + * negotiating a maximum of 4096-octets per TPDU (for the transport layer). + * The routine iso_check_csum doesn't need to know where the checksum + * octets are. + * The routine iso_gen_csum takes a pointer to an mbuf chain (logically + * a chunk of data), an offset into the chunk at which the 2 octets are to + * be stuffed, and the length of the chunk. The 2 octets have to be + * logically adjacent, but may be physically located in separate mbufs. + */ + +#include +#include + +#if ISO +#include +#include +#endif /* ISO */ + +#ifndef MNULL +#define MNULL (struct mbuf *)0 +#endif /* MNULL */ + +/* + * FUNCTION: iso_check_csum + * + * PURPOSE: To check the checksum of the packet in the mbuf chain (m). + * The total length of the packet is (len). + * Called from tp_input() and clnp_intr() + * + * RETURNS: TRUE (something non-zero) if there is a checksum error, + * FALSE if there was NO checksum error. + * + * SIDE EFFECTS: none + * + * NOTES: It might be possible to gain something by optimizing + * this routine (unrolling loops, etc). But it is such + * a horrible thing to fiddle with anyway, it probably + * isn't worth it. + */ +int +iso_check_csum(m, len) + struct mbuf *m; + int len; +{ + register u_char *p = mtod(m, u_char *); + register u_long c0=0, c1=0; + register int i=0; + int cume = 0; /* cumulative length */ + int l; + + l = len; + len = min(m->m_len, len); + i = 0; + + IFDEBUG(D_CHKSUM) + printf("iso_check_csum: m x%x, l x%x, m->m_len x%x\n", m, l, m->m_len); + ENDDEBUG + + while( im_next; + IFDEBUG(D_CHKSUM) + printf("iso_check_csum: new mbuf\n"); + if(l-i < m->m_len) + printf( + "bad mbuf chain in check csum l 0x%x i 0x%x m_data 0x%x", + l,i,m->m_data); + ENDDEBUG + ASSERT( m != MNULL); + len = min( m->m_len, l-i); + p = mtod(m, u_char *); + } + } + if ( ((int)c0 % 255) || ((int)c1 % 255) ) { + IFDEBUG(D_CHKSUM) + printf("BAD iso_check_csum l 0x%x cume 0x%x len 0x%x, i 0x%x", + l, cume, len, i); + ENDDEBUG + return ((int)c0 % 255)<<8 | ((int)c1 % 255); + } + return 0; +} + +/* + * FUNCTION: iso_gen_csum + * + * PURPOSE: To generate the checksum of the packet in the mbuf chain (m). + * The first of the 2 (logically) adjacent checksum bytes + * (x and y) go at offset (n). + * (n) is an offset relative to the beginning of the data, + * not the beginning of the mbuf. + * (l) is the length of the total mbuf chain's data. + * Called from tp_emit(), tp_error_emit() + * clnp_emit_er(), clnp_forward(), clnp_output(). + * + * RETURNS: Rien + * + * SIDE EFFECTS: Puts the 2 checksum bytes into the packet. + * + * NOTES: Ditto the note for iso_check_csum(). + */ + +void +iso_gen_csum(m,n,l) + struct mbuf *m; + int n; /* offset of 2 checksum bytes */ + int l; +{ + register u_char *p = mtod(m, u_char *); + register int c0=0, c1=0; + register int i=0; + int loc = n++, len=0; /* n is position, loc is offset */ + u_char *xloc; + u_char *yloc; + int cume=0; /* cume == cumulative length */ + + IFDEBUG(D_CHKSUM) + printf("enter gen csum m 0x%x n 0x%x l 0x%x\n",m, n-1 ,l ); + ENDDEBUG + + while(i < l) { + len = min(m->m_len, CLBYTES); + /* RAH: don't cksum more than l bytes */ + len = min(len, l - i); + + cume +=len; + p = mtod(m, u_char *); + + if(loc>=0) { + if (loc < len) { + xloc = loc + mtod(m, u_char *); + IFDEBUG(D_CHKSUM) + printf("1: zeroing xloc 0x%x loc 0x%x\n",xloc, loc ); + ENDDEBUG + *xloc = (u_char)0; + if (loc+1 < len) { + /* both xloc and yloc are in same mbuf */ + yloc = 1 + xloc; + IFDEBUG(D_CHKSUM) + printf("2: zeroing yloc 0x%x loc 0x%x\n",yloc, loc ); + ENDDEBUG + *yloc = (u_char)0; + } else { + /* crosses boundary of mbufs */ + yloc = mtod(m->m_next, u_char *); + IFDEBUG(D_CHKSUM) + printf("3: zeroing yloc 0x%x \n",yloc ); + ENDDEBUG + *yloc = (u_char)0; + } + } + loc -= len; + } + + while(i < cume) { + c0 = (c0 + *p); + c1 += c0 ; + i++; + p++; + } + m = m->m_next; + } + IFDEBUG(D_CHKSUM) + printf("gen csum final xloc 0x%x yloc 0x%x\n",xloc, yloc ); + ENDDEBUG + + c1 = (((c0 * (l-n))-c1)%255) ; + *xloc = (u_char) ((c1 < 0)? c1+255 : c1); + + c1 = (-(int)(c1+c0))%255; + *yloc = (u_char) (c1 < 0? c1 + 255 : c1); + + IFDEBUG(D_CHKSUM) + printf("gen csum end \n"); + ENDDEBUG +} + +/* + * FUNCTION: m_datalen + * + * PURPOSE: returns length of the mbuf chain. + * used all over the iso code. + * + * RETURNS: integer + * + * SIDE EFFECTS: none + * + * NOTES: + */ + +int +m_datalen (m) + register struct mbuf *m; +{ + register int datalen; + + for (datalen = 0; m; m = m->m_next) + datalen += m->m_len; + return datalen; +} + +int +m_compress(in, out) + register struct mbuf *in, **out; +{ + register int datalen = 0; + int s = splimp(); + + if( in->m_next == MNULL ) { + *out = in; + IFDEBUG(D_REQUEST) + printf("m_compress returning 0x%x: A\n", in->m_len); + ENDDEBUG + splx(s); + return in->m_len; + } + MGET((*out), M_DONTWAIT, MT_DATA); + if((*out) == MNULL) { + *out = in; + IFDEBUG(D_REQUEST) + printf("m_compress returning -1: B\n"); + ENDDEBUG + splx(s); + return -1; + } + (*out)->m_len = 0; + (*out)->m_act = MNULL; + + while (in) { + IFDEBUG(D_REQUEST) + printf("m_compress in 0x%x *out 0x%x\n", in, *out); + printf("m_compress in: len 0x%x, off 0x%x\n", in->m_len, in->m_data); + printf("m_compress *out: len 0x%x, off 0x%x\n", (*out)->m_len, + (*out)->m_data); + ENDDEBUG + if (in->m_flags & M_EXT) { + ASSERT(in->m_len == 0); + } + if ( in->m_len == 0) { + in = in->m_next; + continue; + } + if (((*out)->m_flags & M_EXT) == 0) { + int len; + + len = M_TRAILINGSPACE(*out); + len = min(len, in->m_len); + datalen += len; + + IFDEBUG(D_REQUEST) + printf("m_compress copying len %d\n", len); + ENDDEBUG + bcopy(mtod(in, caddr_t), mtod((*out), caddr_t) + (*out)->m_len, + (unsigned)len); + + (*out)->m_len += len; + in->m_len -= len; + continue; + } else { + /* (*out) is full */ + if(( (*out)->m_next = m_get(M_DONTWAIT, MT_DATA) ) == MNULL) { + m_freem(*out); + *out = in; + IFDEBUG(D_REQUEST) + printf("m_compress returning -1: B\n"); + ENDDEBUG + splx(s); + return -1; + } + (*out)->m_len = 0; + (*out)->m_act = MNULL; + *out = (*out)->m_next; + } + } + m_freem(in); + IFDEBUG(D_REQUEST) + printf("m_compress returning 0x%x: A\n", datalen); + ENDDEBUG + splx(s); + return datalen; +} diff --git a/bsd/netiso/iso_errno.h b/bsd/netiso/iso_errno.h new file mode 100644 index 000000000..ed1efb45c --- /dev/null +++ b/bsd/netiso/iso_errno.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_errno.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#ifndef __ISO_ERRNO__ +#define __ISO_ERRNO__ + +#define ISO_ERROR_MASK 0x8000 +#define BSD_ERROR_MASK 0x0000 +#define TP_ERROR_MASK 0x8800 /* transport layer */ +#define CONL_ERROR_MASK 0x8400 /* co network layer */ +#define CLNL_ERROR_MASK 0x8200 /* cl network layer */ +#define TP_ERROR_SNDC 0x10000 /* kludge to force DC's on certain errors */ + +#define E_CO_NOERROR (CONL_ERROR_MASK | 0x0) /* no add'l info */ + +/******************************************************************************/ +/* */ +/* */ +/* Transport Layer */ +/* */ +/* */ +/******************************************************************************/ + +#define E_TP_DR_NO_REAS (TP_ERROR_MASK | 0x0) /* dr reason not specified*/ +#define E_TP_CONGEST (TP_ERROR_MASK | 0x1) /* dr reason congestion */ +#define E_TP_NO_SESSION (TP_ERROR_MASK | 0x2) /* dr reason no sess ent */ +#define E_TP_ADDR_UNK (TP_ERROR_MASK | 0x3) /* dr reason addr unknown */ + +#define E_TP_ER_NO_REAS (TP_ERROR_MASK | 0x40) /* er reas not specified */ +#define E_TP_INV_PCODE (TP_ERROR_MASK | 0x41) /* er reas invalid parm code */ +#define E_TP_INV_TPDU (TP_ERROR_MASK | 0x42) /* er reas invalid tpdu type */ +#define E_TP_INV_PVAL (TP_ERROR_MASK | 0x43) /* er reas invalid parm value*/ + +#define E_TP_NORMAL_DISC (TP_ERROR_MASK | 0x80) /* dr reas normal disc */ +#define E_TP_CONGEST_2 (TP_ERROR_MASK | 0x81) /* dr reason congestion */ +#define E_TP_NEGOT_FAILED (TP_ERROR_MASK | 0x82) /* dr negotiation failed */ +#define E_TP_DUPL_SRCREF (TP_ERROR_MASK | 0x83) /* dr duplicate src ref */ +#define E_TP_MISM_REFS (TP_ERROR_MASK | 0x84) /* dr mismatched references*/ +#define E_TP_PROTO_ERR (TP_ERROR_MASK | 0x85) /* dr protocol error*/ +/* 0x86 not used */ +#define E_TP_REF_OVERFLOW (TP_ERROR_MASK | 0x87) /* dr reference overflow */ +#define E_TP_NO_CR_ON_NC (TP_ERROR_MASK | 0x88) /* dr cr refused on this nc */ +/* 0x89 not used */ +#define E_TP_LENGTH_INVAL (TP_ERROR_MASK | 0x8a) /* dr inval length in hdr*/ + +/******************************************************************************/ +/* */ +/* */ +/* Connection Less Network Layer */ +/* */ +/* */ +/******************************************************************************/ +#ifdef notdef /* Doesn't look like legal C and is causing + * compiler problems */ +#define E_CLNL_??? (CLNL_ERROR_MASK | 0x1) /* explanation */ +#endif + +/******************************************************************************/ +/* */ +/* */ +/* Connection Oriented Network Layer */ +/* */ +/* */ +/******************************************************************************/ + /* see p. 149 of ISO 8208 */ +#define E_CO_NOERROR (CONL_ERROR_MASK | 0x0) /* no add'l info */ +#define E_CO_INV_PS (CONL_ERROR_MASK | 0x1) /* invalid p(s) */ +#define E_CO_INV_PR (CONL_ERROR_MASK | 0x2) /* invalid p(r) */ + /* dot dot dot */ +#define E_CO_INV_PKT_TYPE (CONL_ERROR_MASK | 0x10) /* packet type invalid*/ +#define E_CO_INV_PKT_R1 (CONL_ERROR_MASK | 0x11) /* for state r1 */ +#define E_CO_INV_PKT_R2 (CONL_ERROR_MASK | 0x12) /* for state r2 */ +#define E_CO_INV_PKT_R3 (CONL_ERROR_MASK | 0x13) /* for state r3 */ +#define E_CO_INV_PKT_P1 (CONL_ERROR_MASK | 0x14) /* for state p1 */ +#define E_CO_INV_PKT_P2 (CONL_ERROR_MASK | 0x15) /* for state p2 */ +#define E_CO_INV_PKT_P3 (CONL_ERROR_MASK | 0x16) /* for state p3 */ +#define E_CO_INV_PKT_P4 (CONL_ERROR_MASK | 0x17) /* for state p4 */ +#define E_CO_INV_PKT_P5 (CONL_ERROR_MASK | 0x18) /* for state p5 */ +#define E_CO_INV_PKT_P6 (CONL_ERROR_MASK | 0x19) /* for state p6 */ +#define E_CO_INV_PKT_P7 (CONL_ERROR_MASK | 0x1a) /* for state p7 */ +#define E_CO_INV_PKT_D1 (CONL_ERROR_MASK | 0x1b) /* for state d1 */ +#define E_CO_INV_PKT_D2 (CONL_ERROR_MASK | 0x1c) /* for state d2 */ +#define E_CO_INV_PKT_D3 (CONL_ERROR_MASK | 0x1d) /* for state d3 */ + /* dot dot dot */ +#define E_CO_PKT_NOT_ALWD (CONL_ERROR_MASK | 0x20) /* packet not allowed */ +#define E_CO_PNA_UNIDENT (CONL_ERROR_MASK | 0x21) /* unidentifiable pkt */ +#define E_CO_PNA_ONEWAY (CONL_ERROR_MASK | 0x22) /* call on 1-way lc */ +#define E_CO_PNA_PVC (CONL_ERROR_MASK | 0x23) /* inv pkt type on a pvc */ +#define E_CO_PNA_UNASSLC (CONL_ERROR_MASK | 0x24) /* pkt on unassigned lc */ +#define E_CO_PNA_REJECT (CONL_ERROR_MASK | 0x25) /* REJ not subscribed to*/ +#define E_CO_PNA_SHORT (CONL_ERROR_MASK | 0x26) /* pkt too short */ +#define E_CO_PNA_LONG (CONL_ERROR_MASK | 0x27) /* pkt too long */ +#define E_CO_PNA_INVGFI (CONL_ERROR_MASK | 0x28) /* inv gen format id */ +#define E_CO_PNA_NZLCI (CONL_ERROR_MASK | 0x29) \ + /* restart or reg pkt with nonzero logical channel identifier */ +#define E_CO_PNA_FACIL (CONL_ERROR_MASK | 0x2a) \ + /* pkt type not compat with facility */ +#define E_CO_PNA_UINTCON (CONL_ERROR_MASK | 0x2b) /* unauthor intrpt conf */ +#define E_CO_PNA_UINTRPT (CONL_ERROR_MASK | 0x2c) /* unauthorized intrpt */ +#define E_CO_PNA_UREJECT (CONL_ERROR_MASK | 0x2d) /* unauthorized reject */ + +#define E_CO_TMR_EXP (CONL_ERROR_MASK | 0x30) /* timer expired */ +#define E_CO_TMR_CALR (CONL_ERROR_MASK | 0x31) /* inc. call or call req */ +#define E_CO_TMR_CLRI (CONL_ERROR_MASK | 0x32) /* clear indication */ +#define E_CO_TMR_RSTI (CONL_ERROR_MASK | 0x33) /* reset indication */ +#define E_CO_TMR_RRTI (CONL_ERROR_MASK | 0x34) /* restart indication */ + +#define E_CO_REG_PROB (CONL_ERROR_MASK | 0x40)\ + /* call setup, clear, or registration problem */ +#define E_CO_REG_CODE (CONL_ERROR_MASK | 0x41) /* code not allowed */ +#define E_CO_REG_PARM (CONL_ERROR_MASK | 0x42) /* parameter not allowed */ +#define E_CO_REG_ICDA (CONL_ERROR_MASK | 0x43) /* invalid called addr */ +#define E_CO_REG_ICGA (CONL_ERROR_MASK | 0x44) /* invalid calling addr */ +#define E_CO_REG_ILEN (CONL_ERROR_MASK | 0x45) /* invalid facil length */ +#define E_CO_REG_IBAR (CONL_ERROR_MASK | 0x46) /* incoming call barred */ +#define E_CO_REG_NOLC (CONL_ERROR_MASK | 0x47) /* no logical chan avail*/ +#define E_CO_REG_COLL (CONL_ERROR_MASK | 0x48) /* call collision */ +#define E_CO_REG_DUPF (CONL_ERROR_MASK | 0x49) /* dupl facil requested */ +#define E_CO_REG_NZAL (CONL_ERROR_MASK | 0x4a) /* non-zero addr length */ +#define E_CO_REG_NZFL (CONL_ERROR_MASK | 0x4b) /* non-zero facil length */ +#define E_CO_REG_EFNP (CONL_ERROR_MASK | 0x4c) \ + /* expected facil not provided */ +#define E_CO_REG_ICCITT (CONL_ERROR_MASK | 0x4d) \ + /* invalid CCITT-specified DTE facil */ + +#define E_CO_MISC (CONL_ERROR_MASK | 0x50) /* miscellaneous */ +#define E_CO_MISC_CAUSE (CONL_ERROR_MASK | 0x51) /* improper cause code */ +#define E_CO_MISC_ALIGN (CONL_ERROR_MASK | 0x52) /* not octet-aligned */ +#define E_CO_MISC_IQBS (CONL_ERROR_MASK | 0x53) \ + /* inconsistent Q bit settings */ + +#define E_CO_INTL (CONL_ERROR_MASK | 0x70) /* international problem */ +#define E_CO_IREMNWK (CONL_ERROR_MASK | 0x71) /* remote network problem */ +#define E_CO_INPROTO (CONL_ERROR_MASK | 0x72) /* int'l protocol problem */ +#define E_CO_ILINKDWN (CONL_ERROR_MASK | 0x73) /* int'l link down */ +#define E_CO_ILINKBSY (CONL_ERROR_MASK | 0x74) /* int'l link busy */ +#define E_CO_IXNETFAC (CONL_ERROR_MASK | 0x75) /* transit netwk facil */ +#define E_CO_IRNETFAC (CONL_ERROR_MASK | 0x76) /* remote netwk facil */ +#define E_CO_IROUTING (CONL_ERROR_MASK | 0x77) /* int'l routing prob */ +#define E_CO_ITMPRTG (CONL_ERROR_MASK | 0x78) /* temporary routing prob */ +#define E_CO_IUNKDNIC (CONL_ERROR_MASK | 0x79) /* unknown called DNIC */ +#define E_CO_IMAINT (CONL_ERROR_MASK | 0x7a) /* maintenance action */ + +#define E_CO_TIMO (CONL_ERROR_MASK | 0x90) \ + /* timer expired or retransmission count surpassed */ +#define E_CO_TIM_INTRP (CONL_ERROR_MASK | 0x91) /* for interrupt */ +#define E_CO_TIM_DATA (CONL_ERROR_MASK | 0x92) /* for data */ +#define E_CO_TIM_REJ (CONL_ERROR_MASK | 0x93) /* for reject */ + +#define E_CO_DTE_SPEC (CONL_ERROR_MASK | 0xa0) /* DTE-specific */ +#define E_CO_DTE_OK (CONL_ERROR_MASK | 0xa1) /* DTE operational */ +#define E_CO_DTE_NOK (CONL_ERROR_MASK | 0xa2) /* DTE not operational */ +#define E_CO_DTE_RSRC (CONL_ERROR_MASK | 0xa3) /* DTE resource constraint*/ +#define E_CO_DTE_FSLCT (CONL_ERROR_MASK | 0xa4) /* fast select not subsc */ +#define E_CO_DTE_PFPKT (CONL_ERROR_MASK | 0xa5) /* partially full pkt */ +#define E_CO_DTE_DBIT (CONL_ERROR_MASK | 0xa6) /* D-bit proc not supp */ +#define E_CO_DTE_RCCON (CONL_ERROR_MASK | 0xa7) /* reg/canell confirmed */ + +#define E_CO_OSI_NSP (CONL_ERROR_MASK | 0xe0) /* OSI net svc problem */ +#define E_CO_OSI_DISCT (CONL_ERROR_MASK | 0xe1) /* disconnect transient */ +#define E_CO_OSI_DISCP (CONL_ERROR_MASK | 0xe2) /* disconnect permanent */ +#define E_CO_OSI_REJT (CONL_ERROR_MASK | 0xe3) /* reject transient */ +#define E_CO_OSI_REJP (CONL_ERROR_MASK | 0xe4) /* reject permanent */ +#define E_CO_OSI_QOST (CONL_ERROR_MASK | 0xe5) /* reject QOS transient */ +#define E_CO_OSI_QOSP (CONL_ERROR_MASK | 0xe6) /* reject QOS permanent */ +#define E_CO_OSI_NSAPT (CONL_ERROR_MASK | 0xe7) /* NSAP unreach transient */ +#define E_CO_OSI_NSAPP (CONL_ERROR_MASK | 0xe8) /* NSAP unreach permanent */ +#define E_CO_OSI_RESET (CONL_ERROR_MASK | 0xe9) /* reset no reason */ +#define E_CO_OSI_CONGEST (CONL_ERROR_MASK | 0xea) /* reset congestion */ +#define E_CO_OSI_UNSAP (CONL_ERROR_MASK | 0xeb) /* unknown NSAP permanent */ + +#define E_CO_HLI_INIT (CONL_ERROR_MASK | 0xf0) /* higher level initiated*/ +#define E_CO_HLI_DISCN (CONL_ERROR_MASK | 0xf1) /* disconnect normal */ +#define E_CO_HLI_DISCA (CONL_ERROR_MASK | 0xf2) /* disconnect abnormal */ +#define E_CO_HLI_DISCI (CONL_ERROR_MASK | 0xf3) /* disconnect incompatible*/ +#define E_CO_HLI_REJT (CONL_ERROR_MASK | 0xf4) /* reject transient */ +#define E_CO_HLI_REJP (CONL_ERROR_MASK | 0xf5) /* reject permanent */ +#define E_CO_HLI_QOST (CONL_ERROR_MASK | 0xf6) /* reject QOS transient */ +#define E_CO_HLI_QOSP (CONL_ERROR_MASK | 0xf7) /* reject QOS permanent */ +#define E_CO_HLI_REJI (CONL_ERROR_MASK | 0xf8) /* reject incompatible */ +#define E_CO_HLI_PROTOID (CONL_ERROR_MASK | 0xf9) /* unrecog proto id */ +#define E_CO_HLI_RESYNC (CONL_ERROR_MASK | 0xfa) /* reset - user resync */ + +/* Cause on 8208 CLEAR field */ +#define E_CO_NUMBERBUSY (CONL_ERROR_MASK | 0x101) /* Number busy */ +#define E_CO_INVFACREQ (CONL_ERROR_MASK | 0x103) /* invalid facil req */ +#define E_CO_NETCONGEST (CONL_ERROR_MASK | 0x105) /* Network congestion */ +#define E_CO_OUTOFORDER (CONL_ERROR_MASK | 0x109) /* Out of order */ +#define E_CO_ACCESSBAR (CONL_ERROR_MASK | 0x10b) /* access barred */ +#define E_CO_NOTOBTAIN (CONL_ERROR_MASK | 0x10d) /* not obtainable */ +#define E_CO_REMPROCERR (CONL_ERROR_MASK | 0x111) /* Remote procedure err */ +#define E_CO_LOCPROCERR (CONL_ERROR_MASK | 0x113) /* Local procedure err */ +#define E_CO_RPOAOOO (CONL_ERROR_MASK | 0x115) /* RPOA out of order */ +#define E_CO_NOREVCHG (CONL_ERROR_MASK | 0x119) /* Revs chg not accepted*/ +#define E_CO_INCOMPAT (CONL_ERROR_MASK | 0x121) /* Incompatible dest */ +#define E_CO_NOFASTSEL (CONL_ERROR_MASK | 0x129) + /* Fast select accpt not subscribed */ +#define E_CO_NOSHIP (CONL_ERROR_MASK | 0x139) /* ship absent */ +#define E_CO_GWPROCERR (CONL_ERROR_MASK | 0x1c1) /* Gateway-detected err*/ +#define E_CO_GWCONGEST (CONL_ERROR_MASK | 0x1c3) /* Gateway congestion*/ + +/* ARGO only */ +#define E_CO_QFULL (CONL_ERROR_MASK | 0x100) /* dropped packet - queue full*/ +#define E_CO_AIWP (CONL_ERROR_MASK | 0x102) /* addr incompat w/proto */ +#define E_CO_CHAN (CONL_ERROR_MASK | 0x104) /* bad channel number */ + +/* ARGO only; driver specific */ +#define E_CO_NORESOURCES (CONL_ERROR_MASK | 0x1b0) /* eicon clogged */ +#define E_CO_PDNDOWN (CONL_ERROR_MASK | 0x1b1) /* physical net down */ +#define E_CO_DRVRCLRESET (CONL_ERROR_MASK | 0x1b2) /* driver clear/reset */ +#define E_CO_PDNCLRESET (CONL_ERROR_MASK | 0x1b3) /* PDN clear/reset */ +#define E_CO_DTECLRESET (CONL_ERROR_MASK | 0x1b4) /* board clear/reset */ +#define E_CO_UNKCLRESET (CONL_ERROR_MASK | 0x1b5) /* unexpected clr/rst */ + +#define CONL_ERROR_MAX 0x1c3 + +#endif /* __ISO_ERRNO__ */ diff --git a/bsd/netiso/iso_pcb.c b/bsd/netiso/iso_pcb.c new file mode 100644 index 000000000..defe1658e --- /dev/null +++ b/bsd/netiso/iso_pcb.c @@ -0,0 +1,635 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_pcb.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * Iso address family net-layer(s) pcb stuff. NEH 1/29/87 + */ + +#if ISO + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if TPCONS +#include +#include +#include +#endif + +#define PCBNULL (struct isopcb *)0 +struct iso_addr zeroiso_addr = { + 0 +}; + + +/* + * FUNCTION: iso_pcballoc + * + * PURPOSE: creates an isopcb structure in an mbuf, + * with socket (so), and + * puts it in the queue with head (head) + * + * RETURNS: 0 if OK, ENOBUFS if can't alloc the necessary mbuf + */ +int +iso_pcballoc(so, head) + struct socket *so; + struct isopcb *head; +{ + register struct isopcb *isop; + + IFDEBUG(D_ISO) + printf("iso_pcballoc(so 0x%x)\n", so); + ENDDEBUG + MALLOC(isop, struct isopcb *, sizeof(*isop), M_PCB, M_NOWAIT); + if (isop == NULL) + return ENOBUFS; + bzero((caddr_t)isop, sizeof(*isop)); + isop->isop_head = head; + isop->isop_socket = so; + insque(isop, head); + if (so) + so->so_pcb = (caddr_t)isop; + return 0; +} + +/* + * FUNCTION: iso_pcbbind + * + * PURPOSE: binds the address given in *(nam) to the socket + * specified by the isopcb in *(isop) + * If the given address is zero, it makes sure the + * address isn't already in use and if it's got a network + * portion, we look for an interface with that network + * address. If the address given is zero, we allocate + * a port and stuff it in the (nam) structure. + * + * RETURNS: errno E* or 0 if ok. + * + * SIDE EFFECTS: increments head->isop_lport if it allocates a port # + * + * NOTES: + */ +#define satosiso(sa) ((struct sockaddr_iso *)(sa)) +int +iso_pcbbind(isop, nam) + register struct isopcb *isop; + struct mbuf *nam; +{ + register struct isopcb *head = isop->isop_head; + register struct sockaddr_iso *siso; + struct iso_ifaddr *ia; + union { + char data[2]; + u_short s; + } suf; + + IFDEBUG(D_ISO) + printf("iso_pcbbind(isop 0x%x, nam 0x%x)\n", isop, nam); + ENDDEBUG + suf.s = 0; + if (iso_ifaddr == 0) /* any interfaces attached? */ + return EADDRNOTAVAIL; + if (isop->isop_laddr) /* already bound */ + return EADDRINUSE; + if(nam == (struct mbuf *)0) { + isop->isop_laddr = &isop->isop_sladdr; + isop->isop_sladdr.siso_len = sizeof(struct sockaddr_iso); + isop->isop_sladdr.siso_family = AF_ISO; + isop->isop_sladdr.siso_tlen = 2; + isop->isop_sladdr.siso_nlen = 0; + isop->isop_sladdr.siso_slen = 0; + isop->isop_sladdr.siso_plen = 0; + goto noname; + } + siso = mtod(nam, struct sockaddr_iso *); + IFDEBUG(D_ISO) + printf("iso_pcbbind(name len 0x%x)\n", nam->m_len); + printf("The address is %s\n", clnp_iso_addrp(&siso->siso_addr)); + ENDDEBUG + /* + * We would like sort of length check but since some OSI addrs + * do not have fixed length, we can't really do much. + * The ONLY thing we can say is that an osi addr has to have + * at LEAST an afi and one more byte and had better fit into + * a struct iso_addr. + * However, in fact the size of the whole thing is a struct + * sockaddr_iso, so probably this is what we should check for. + */ + if( (nam->m_len < 2) || (nam->m_len < siso->siso_len)) { + return ENAMETOOLONG; + } + if (siso->siso_nlen) { + /* non-zero net addr- better match one of our interfaces */ + IFDEBUG(D_ISO) + printf("iso_pcbbind: bind to NOT zeroisoaddr\n"); + ENDDEBUG + for (ia = iso_ifaddr; ia; ia = ia->ia_next) + if (SAME_ISOADDR(siso, &ia->ia_addr)) + break; + if (ia == 0) + return EADDRNOTAVAIL; + } + if (siso->siso_len <= sizeof (isop->isop_sladdr)) { + isop->isop_laddr = &isop->isop_sladdr; + } else { + if ((nam = m_copy(nam, 0, (int)M_COPYALL)) == 0) + return ENOBUFS; + isop->isop_laddr = mtod(nam, struct sockaddr_iso *); + } + bcopy((caddr_t)siso, (caddr_t)isop->isop_laddr, siso->siso_len); + if (siso->siso_tlen == 0) + goto noname; + if ((isop->isop_socket->so_options & SO_REUSEADDR) == 0 && + iso_pcblookup(head, 0, (caddr_t)0, isop->isop_laddr)) + return EADDRINUSE; + if (siso->siso_tlen <= 2) { + bcopy(TSEL(siso), suf.data, sizeof(suf.data)); + suf.s = ntohs(suf.s); + if((suf.s < ISO_PORT_RESERVED) && + (isop->isop_socket->so_state && SS_PRIV) == 0) + return EACCES; + } else { + register char *cp; +noname: + cp = TSEL(isop->isop_laddr); + IFDEBUG(D_ISO) + printf("iso_pcbbind noname\n"); + ENDDEBUG + do { + if (head->isop_lport++ < ISO_PORT_RESERVED || + head->isop_lport > ISO_PORT_USERRESERVED) + head->isop_lport = ISO_PORT_RESERVED; + suf.s = htons(head->isop_lport); + cp[0] = suf.data[0]; + cp[1] = suf.data[1]; + } while (iso_pcblookup(head, 0, (caddr_t)0, isop->isop_laddr)); + } + IFDEBUG(D_ISO) + printf("iso_pcbbind returns 0, suf 0x%x\n", suf); + ENDDEBUG + return 0; +} +/* + * FUNCTION: iso_pcbconnect + * + * PURPOSE: Make the isopcb (isop) look like it's connected. + * In other words, give it the peer address given in + * the mbuf * (nam). Make sure such a combination + * of local, peer addresses doesn't already exist + * for this protocol. Internet mentality prevails here, + * wherein a src,dst pair uniquely identifies a connection. + * Both net address and port must be specified in argument + * (nam). + * If we don't have a local address for this socket yet, + * we pick one by calling iso_pcbbind(). + * + * RETURNS: errno E* or 0 if ok. + * + * SIDE EFFECTS: Looks up a route, which may cause one to be left + * in the isopcb. + * + * NOTES: + */ +int +iso_pcbconnect(isop, nam) + register struct isopcb *isop; + struct mbuf *nam; +{ + register struct sockaddr_iso *siso = mtod(nam, struct sockaddr_iso *); + int local_zero, error = 0; + struct iso_ifaddr *ia; + + IFDEBUG(D_ISO) + printf("iso_pcbconnect(isop 0x%x sock 0x%x nam 0x%x", + isop, isop->isop_socket, nam); + printf("nam->m_len 0x%x), addr:\n", nam->m_len); + dump_isoaddr(siso); + ENDDEBUG + if (nam->m_len < siso->siso_len) + return EINVAL; + if (siso->siso_family != AF_ISO) + return EAFNOSUPPORT; + if (siso->siso_nlen == 0) { + if (ia = iso_ifaddr) { + int nlen = ia->ia_addr.siso_nlen; + ovbcopy(TSEL(siso), nlen + TSEL(siso), + siso->siso_plen + siso->siso_tlen + siso->siso_slen); + bcopy((caddr_t)&ia->ia_addr.siso_addr, + (caddr_t)&siso->siso_addr, nlen + 1); + /* includes siso->siso_nlen = nlen; */ + } else + return EADDRNOTAVAIL; + } + /* + * Local zero means either not bound, or bound to a TSEL, but no + * particular local interface. So, if we want to send somebody + * we need to choose a return address. + */ + local_zero = + ((isop->isop_laddr == 0) || (isop->isop_laddr->siso_nlen == 0)); + if (local_zero) { + int flags; + + IFDEBUG(D_ISO) + printf("iso_pcbconnect localzero 1\n"); + ENDDEBUG + /* + * If route is known or can be allocated now, + * our src addr is taken from the i/f, else punt. + */ + flags = isop->isop_socket->so_options & SO_DONTROUTE; + if (error = clnp_route(&siso->siso_addr, &isop->isop_route, flags, + (struct sockaddr **)0, &ia)) + return error; + IFDEBUG(D_ISO) + printf("iso_pcbconnect localzero 2, ro->ro_rt 0x%x", + isop->isop_route.ro_rt); + printf(" ia 0x%x\n", ia); + ENDDEBUG + } + IFDEBUG(D_ISO) + printf("in iso_pcbconnect before lookup isop 0x%x isop->sock 0x%x\n", + isop, isop->isop_socket); + ENDDEBUG + if (local_zero) { + int nlen, tlen, totlen; caddr_t oldtsel, newtsel; + siso = isop->isop_laddr; + if (siso == 0 || siso->siso_tlen == 0) + (void)iso_pcbbind(isop, (struct mbuf *)0); + /* + * Here we have problem of squezeing in a definite network address + * into an existing sockaddr_iso, which in fact may not have room + * for it. This gets messy. + */ + siso = isop->isop_laddr; + oldtsel = TSEL(siso); + tlen = siso->siso_tlen; + nlen = ia->ia_addr.siso_nlen; + totlen = tlen + nlen + _offsetof(struct sockaddr_iso, siso_data[0]); + if ((siso == &isop->isop_sladdr) && + (totlen > sizeof(isop->isop_sladdr))) { + struct mbuf *m = m_get(MT_SONAME, M_DONTWAIT); + if (m == 0) + return ENOBUFS; + m->m_len = totlen; + isop->isop_laddr = siso = mtod(m, struct sockaddr_iso *); + } + siso->siso_nlen = ia->ia_addr.siso_nlen; + newtsel = TSEL(siso); + ovbcopy(oldtsel, newtsel, tlen); + bcopy(ia->ia_addr.siso_data, siso->siso_data, nlen); + siso->siso_tlen = tlen; + siso->siso_family = AF_ISO; + siso->siso_len = totlen; + siso = mtod(nam, struct sockaddr_iso *); + } + IFDEBUG(D_ISO) + printf("in iso_pcbconnect before bcopy isop 0x%x isop->sock 0x%x\n", + isop, isop->isop_socket); + ENDDEBUG + /* + * If we had to allocate space to a previous big foreign address, + * and for some reason we didn't free it, we reuse it knowing + * that is going to be big enough, as sockaddrs are delivered in + * 128 byte mbufs. + * If the foreign address is small enough, we use default space; + * otherwise, we grab an mbuf to copy into. + */ + if (isop->isop_faddr == 0 || isop->isop_faddr == &isop->isop_sfaddr) { + if (siso->siso_len <= sizeof(isop->isop_sfaddr)) + isop->isop_faddr = &isop->isop_sfaddr; + else { + struct mbuf *m = m_get(MT_SONAME, M_DONTWAIT); + if (m == 0) + return ENOBUFS; + isop->isop_faddr = mtod(m, struct sockaddr_iso *); + } + } + bcopy((caddr_t)siso, (caddr_t)isop->isop_faddr, siso->siso_len); + IFDEBUG(D_ISO) + printf("in iso_pcbconnect after bcopy isop 0x%x isop->sock 0x%x\n", + isop, isop->isop_socket); + printf("iso_pcbconnect connected to addr:\n"); + dump_isoaddr(isop->isop_faddr); + printf("iso_pcbconnect end: src addr:\n"); + dump_isoaddr(isop->isop_laddr); + ENDDEBUG + return 0; +} + +/* + * FUNCTION: iso_pcbdisconnect() + * + * PURPOSE: washes away the peer address info so the socket + * appears to be disconnected. + * If there's no file descriptor associated with the socket + * it detaches the pcb. + * + * RETURNS: Nada. + * + * SIDE EFFECTS: May detach the pcb. + * + * NOTES: + */ +void +iso_pcbdisconnect(isop) + struct isopcb *isop; +{ + void iso_pcbdetach(); + register struct sockaddr_iso *siso; + + IFDEBUG(D_ISO) + printf("iso_pcbdisconnect(isop 0x%x)\n", isop); + ENDDEBUG + /* + * Preserver binding infnormation if already bound. + */ + if ((siso = isop->isop_laddr) && siso->siso_nlen && siso->siso_tlen) { + caddr_t otsel = TSEL(siso); + siso->siso_nlen = 0; + ovbcopy(otsel, TSEL(siso), siso->siso_tlen); + } + if (isop->isop_faddr && isop->isop_faddr != &isop->isop_sfaddr) + m_freem(dtom(isop->isop_faddr)); + isop->isop_faddr = 0; + if (isop->isop_socket->so_state & SS_NOFDREF) + iso_pcbdetach(isop); +} + +/* + * FUNCTION: iso_pcbdetach + * + * PURPOSE: detach the pcb at *(isop) from it's socket and free + * the mbufs associated with the pcb.. + * Dequeues (isop) from its head. + * + * RETURNS: Nada. + * + * SIDE EFFECTS: + * + * NOTES: + */ +void +iso_pcbdetach(isop) + struct isopcb *isop; +{ + struct socket *so = isop->isop_socket; + + IFDEBUG(D_ISO) + printf("iso_pcbdetach(isop 0x%x socket 0x%x so 0x%x)\n", + isop, isop->isop_socket, so); + ENDDEBUG +#if TPCONS + if (isop->isop_chan) { + register struct pklcd *lcp = (struct pklcd *)isop->isop_chan; + if (--isop->isop_refcnt > 0) + return; + if (lcp && lcp->lcd_state == DATA_TRANSFER) { + lcp->lcd_upper = 0; + lcp->lcd_upnext = 0; + pk_disconnect(lcp); + } + isop->isop_chan = 0; + } +#endif + if (so) { /* in the x.25 domain, we sometimes have no socket */ + so->so_pcb = 0; + sofree(so); + } + IFDEBUG(D_ISO) + printf("iso_pcbdetach 2 \n"); + ENDDEBUG + if (isop->isop_options) + (void)m_free(isop->isop_options); + IFDEBUG(D_ISO) + printf("iso_pcbdetach 3 \n"); + ENDDEBUG + if (isop->isop_route.ro_rt) + rtfree(isop->isop_route.ro_rt); + IFDEBUG(D_ISO) + printf("iso_pcbdetach 3.1\n"); + ENDDEBUG + if (isop->isop_clnpcache != NULL) { + struct clnp_cache *clcp = + mtod(isop->isop_clnpcache, struct clnp_cache *); + IFDEBUG(D_ISO) + printf("iso_pcbdetach 3.2: clcp 0x%x freeing clc_hdr x%x\n", + clcp, clcp->clc_hdr); + ENDDEBUG + if (clcp->clc_hdr != NULL) + m_free(clcp->clc_hdr); + IFDEBUG(D_ISO) + printf("iso_pcbdetach 3.3: freeing cache x%x\n", + isop->isop_clnpcache); + ENDDEBUG + m_free(isop->isop_clnpcache); + } + IFDEBUG(D_ISO) + printf("iso_pcbdetach 4 \n"); + ENDDEBUG + remque(isop); + IFDEBUG(D_ISO) + printf("iso_pcbdetach 5 \n"); + ENDDEBUG + if (isop->isop_laddr && (isop->isop_laddr != &isop->isop_sladdr)) + m_freem(dtom(isop->isop_laddr)); + FREE((caddr_t)isop, M_PCB); +} + + +/* + * FUNCTION: iso_pcbnotify + * + * PURPOSE: notify all connections in this protocol's queue (head) + * that have peer address (dst) of the problem (errno) + * by calling (notify) on the connections' isopcbs. + * + * RETURNS: Rien. + * + * SIDE EFFECTS: + * + * NOTES: (notify) is called at splimp! + */ +void +iso_pcbnotify(head, siso, errno, notify) + struct isopcb *head; + register struct sockaddr_iso *siso; + int errno, (*notify)(); +{ + register struct isopcb *isop; + int s = splimp(); + + IFDEBUG(D_ISO) + printf("iso_pcbnotify(head 0x%x, notify 0x%x) dst:\n", head, notify); + ENDDEBUG + for (isop = head->isop_next; isop != head; isop = isop->isop_next) { + if (isop->isop_socket == 0 || isop->isop_faddr == 0 || + !SAME_ISOADDR(siso, isop->isop_faddr)) { + IFDEBUG(D_ISO) + printf("iso_pcbnotify: CONTINUE isop 0x%x, sock 0x%x\n" , + isop, isop->isop_socket); + printf("addrmatch cmp'd with (0x%x):\n", isop->isop_faddr); + dump_isoaddr(isop->isop_faddr); + ENDDEBUG + continue; + } + if (errno) + isop->isop_socket->so_error = errno; + if (notify) + (*notify)(isop); + } + splx(s); + IFDEBUG(D_ISO) + printf("END OF iso_pcbnotify\n" ); + ENDDEBUG +} + + +/* + * FUNCTION: iso_pcblookup + * + * PURPOSE: looks for a given combination of (faddr), (fport), + * (lport), (laddr) in the queue named by (head). + * Argument (flags) is ignored. + * + * RETURNS: ptr to the isopcb if it finds a connection matching + * these arguments, o.w. returns zero. + * + * SIDE EFFECTS: + * + * NOTES: + */ +struct isopcb * +iso_pcblookup(head, fportlen, fport, laddr) + struct isopcb *head; + register struct sockaddr_iso *laddr; + caddr_t fport; + int fportlen; +{ + register struct isopcb *isop; + register caddr_t lp = TSEL(laddr); + unsigned int llen = laddr->siso_tlen; + + IFDEBUG(D_ISO) + printf("iso_pcblookup(head 0x%x laddr 0x%x fport 0x%x)\n", + head, laddr, fport); + ENDDEBUG + for (isop = head->isop_next; isop != head; isop = isop->isop_next) { + if (isop->isop_laddr == 0 || isop->isop_laddr == laddr) + continue; + if (isop->isop_laddr->siso_tlen != llen) + continue; + if (bcmp(lp, TSEL(isop->isop_laddr), llen)) + continue; + if (fportlen && isop->isop_faddr && + bcmp(fport, TSEL(isop->isop_faddr), (unsigned)fportlen)) + continue; + /* PHASE2 + * addrmatch1 should be iso_addrmatch(a, b, mask) + * where mask is taken from isop->isop_laddrmask (new field) + * isop_lnetmask will also be available in isop + if (laddr != &zeroiso_addr && + !iso_addrmatch1(laddr, &(isop->isop_laddr.siso_addr))) + continue; + */ + if (laddr->siso_nlen && (!SAME_ISOADDR(laddr, isop->isop_laddr))) + continue; + return (isop); + } + return (struct isopcb *)0; +} +#endif /* ISO */ diff --git a/bsd/netiso/iso_pcb.h b/bsd/netiso/iso_pcb.h new file mode 100644 index 000000000..d5cf7d4e1 --- /dev/null +++ b/bsd/netiso/iso_pcb.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_pcb.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#define MAXX25CRUDLEN 16 /* 16 bytes of call request user data */ + +/* + * Common structure pcb for argo protocol implementation. + */ +struct isopcb { + struct isopcb *isop_next,*isop_prev; /* pointers to other pcb's */ + struct isopcb *isop_head; /* pointer back to chain of pcbs for + this protocol */ + struct socket *isop_socket; /* back pointer to socket */ + struct sockaddr_iso *isop_laddr; + struct sockaddr_iso *isop_faddr; + struct route_iso { + struct rtentry *ro_rt; + struct sockaddr_iso ro_dst; + } isop_route; /* CLNP routing entry */ + struct mbuf *isop_options; /* CLNP options */ + struct mbuf *isop_optindex; /* CLNP options index */ + struct mbuf *isop_clnpcache; /* CLNP cached hdr */ + caddr_t isop_chan; /* actually struct pklcb * */ + u_short isop_refcnt; /* mult TP4 tpcb's -> here */ + u_short isop_lport; /* MISLEADLING work var */ + u_short isop_tuba_cached; /* for tuba address ref cnts */ + int isop_x25crud_len; /* x25 call request ud */ + char isop_x25crud[MAXX25CRUDLEN]; + struct ifaddr *isop_ifa; /* ESIS interface assoc w/sock */ + struct sockaddr_iso isop_sladdr, /* preallocated laddr */ + isop_sfaddr; /* preallocated faddr */ +}; + +#ifdef sotorawcb +/* + * Common structure pcb for raw clnp protocol access. + * Here are clnp specific extensions to the raw control block, + * and space is allocated to the necessary sockaddrs. + */ +struct rawisopcb { + struct rawcb risop_rcb; /* common control block prefix */ + int risop_flags; /* flags, e.g. raw sockopts */ + struct isopcb risop_isop; /* space for bound addresses, routes etc.*/ +}; +#endif + +#define sotoisopcb(so) ((struct isopcb *)(so)->so_pcb) +#define sotorawisopcb(so) ((struct rawisopcb *)(so)->so_pcb) + +#ifdef KERNEL +struct isopcb *iso_pcblookup(); +#endif diff --git a/bsd/netiso/iso_proto.c b/bsd/netiso/iso_proto.c new file mode 100644 index 000000000..2fba142f3 --- /dev/null +++ b/bsd/netiso/iso_proto.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_proto.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * iso_proto.c : protocol switch tables in the ISO domain + * + * ISO protocol family includes TP, CLTP, CLNP, 8208 + * TP and CLNP are implemented here. + */ + +#if ISO +#include +#include +#include +#include +#include + +#include + +#include + +int clnp_output(), clnp_init(),clnp_slowtimo(),clnp_drain(); +int rclnp_input(), rclnp_output(), rclnp_ctloutput(), raw_usrreq(); +int clnp_usrreq(); + +int tp_ctloutput(), tpclnp_ctlinput(), tpclnp_input(), tp_usrreq(); +int tp_init(), tp_fasttimo(), tp_slowtimo(), tp_drain(); +int cons_init(), tpcons_input(); + +int isis_input(); +int esis_input(), esis_ctlinput(), esis_init(), esis_usrreq(); +int idrp_input(), idrp_init(), idrp_usrreq(); +int cltp_input(), cltp_ctlinput(), cltp_init(), cltp_usrreq(), cltp_output(); + +#if TUBA +int tuba_usrreq(), tuba_ctloutput(), tuba_init(), tuba_tcpinput(); +int tuba_slowtimo(), tuba_fasttimo(); +#endif + +struct protosw isosw[] = { +/* + * We need a datagram entry through which net mgmt programs can get + * to the iso_control procedure (iso ioctls). Thus, a minimal + * SOCK_DGRAM interface is provided here. + * THIS ONE MUST BE FIRST: Kludge city : socket() says if(!proto) call + * pffindtype, which gets the first entry that matches the type. + * sigh. + */ +{ SOCK_DGRAM, &isodomain, ISOPROTO_CLTP, PR_ATOMIC|PR_ADDR, + 0, cltp_output, 0, 0, + cltp_usrreq, + cltp_init, 0, 0, 0 +}, + +/* + * A datagram interface for clnp cannot co-exist with TP/CLNP + * because CLNP has no way to discriminate incoming TP packets from + * packets coming in for any other higher layer protocol. + * Old way: set it up so that pffindproto(... dgm, clnp) fails. + * New way: let pffindproto work (for x.25, thank you) but create + * a clnp_usrreq() that returns error on PRU_ATTACH. + */ +{SOCK_DGRAM, &isodomain, ISOPROTO_CLNP, 0, + 0, clnp_output, 0, 0, + clnp_usrreq, + clnp_init, 0, clnp_slowtimo, clnp_drain, +}, + +/* raw clnp */ +{ SOCK_RAW, &isodomain, ISOPROTO_RAW, PR_ATOMIC|PR_ADDR, + rclnp_input, rclnp_output, 0, rclnp_ctloutput, + clnp_usrreq, + 0, 0, 0, 0 +}, + +/* ES-IS protocol */ +{ SOCK_DGRAM, &isodomain, ISOPROTO_ESIS, PR_ATOMIC|PR_ADDR, + esis_input, 0, esis_ctlinput, 0, + esis_usrreq, + esis_init, 0, 0, 0 +}, + +/* ISOPROTO_INTRAISIS */ +{ SOCK_DGRAM, &isodomain, ISOPROTO_INTRAISIS, PR_ATOMIC|PR_ADDR, + isis_input, 0, 0, 0, + esis_usrreq, + 0, 0, 0, 0 +}, + +/* ISOPROTO_IDRP */ +{ SOCK_DGRAM, &isodomain, ISOPROTO_IDRP, PR_ATOMIC|PR_ADDR, + idrp_input, 0, 0, 0, + idrp_usrreq, + idrp_init, 0, 0, 0 +}, + +/* ISOPROTO_TP */ +{ SOCK_SEQPACKET, &isodomain, ISOPROTO_TP, PR_CONNREQUIRED|PR_WANTRCVD, + tpclnp_input, 0, tpclnp_ctlinput, tp_ctloutput, + tp_usrreq, + tp_init, tp_fasttimo, tp_slowtimo, tp_drain, +}, + +#if TUBA +{ SOCK_STREAM, &isodomain, ISOPROTO_TCP, PR_CONNREQUIRED|PR_WANTRCVD, + tuba_tcpinput, 0, 0, tuba_ctloutput, + tuba_usrreq, + tuba_init, tuba_fasttimo, tuba_fasttimo, 0 +}, +#endif + +#if TPCONS +/* ISOPROTO_TP */ +{ SOCK_SEQPACKET, &isodomain, ISOPROTO_TP0, PR_CONNREQUIRED|PR_WANTRCVD, + tpcons_input, 0, 0, tp_ctloutput, + tp_usrreq, + cons_init, 0, 0, 0, +}, +#endif + +}; + + +struct domain isodomain = { + AF_ISO, /* family */ + "iso-domain", /* name */ + 0, /* initialize routine */ + 0, /* externalize access rights */ + 0, /* dispose of internalized rights */ + isosw, /* protosw */ + &isosw[sizeof(isosw)/sizeof(isosw[0])], /* NPROTOSW */ + 0, /* next */ + rn_inithead, /* rtattach */ + 48, /* rtoffset */ + sizeof(struct sockaddr_iso) /* maxkeylen */ +}; +#endif /* ISO */ diff --git a/bsd/netiso/iso_snpac.c b/bsd/netiso/iso_snpac.c new file mode 100644 index 000000000..23e0edb7c --- /dev/null +++ b/bsd/netiso/iso_snpac.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_snpac.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#include +#include + +#if ISO +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +int iso_systype = SNPA_ES; /* default to be an ES */ +extern short esis_holding_time, esis_config_time, esis_esconfig_time; +extern struct timeval time; +extern void esis_config(); +extern int hz; +static void snpac_fixdstandmask(); + +struct sockaddr_iso blank_siso = {sizeof(blank_siso), AF_ISO}; +extern u_long iso_hashchar(); +static struct sockaddr_iso + dst = {sizeof(dst), AF_ISO}, + gte = {sizeof(dst), AF_ISO}, + src = {sizeof(dst), AF_ISO}, + msk = {sizeof(dst), AF_ISO}, + zmk = {0}; +#define zsi blank_siso +#define zero_isoa zsi.siso_addr +#define zap_isoaddr(a, b) {Bzero(&a.siso_addr, sizeof(*r)); r = b; \ + Bcopy(r, &a.siso_addr, 1 + (r)->isoa_len);} +#define S(x) ((struct sockaddr *)&(x)) + +static struct sockaddr_dl blank_dl = {sizeof(blank_dl), AF_LINK}; +static struct sockaddr_dl gte_dl; +#define zap_linkaddr(a, b, c, i) \ + (*a = blank_dl, bcopy(b, a->sdl_data, a->sdl_alen = c), a->sdl_index = i) + +/* + * We only keep track of a single IS at a time. + */ +struct rtentry *known_is; + +/* + * Addresses taken from NBS agreements, December 1987. + * + * These addresses assume on-the-wire transmission of least significant + * bit first. This is the method used by 802.3. When these + * addresses are passed to the token ring driver, (802.5), they + * must be bit-swaped because 802.5 transmission order is MSb first. + * + * Furthermore, according to IBM Austin, these addresses are not + * true token ring multicast addresses. More work is necessary + * to get multicast to work right on token ring. + * + * Currently, the token ring driver does not handle multicast, so + * these addresses are converted into the broadcast address in + * lan_output() That means that if these multicast addresses change + * the token ring driver must be altered. + */ +char all_es_snpa[] = { 0x09, 0x00, 0x2b, 0x00, 0x00, 0x04 }; +char all_is_snpa[] = { 0x09, 0x00, 0x2b, 0x00, 0x00, 0x05 }; +char all_l1is_snpa[] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x14}; +char all_l2is_snpa[] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x15}; + +union sockunion { + struct sockaddr_iso siso; + struct sockaddr_dl sdl; + struct sockaddr sa; +}; + +/* + * FUNCTION: llc_rtrequest + * + * PURPOSE: Manage routing table entries specific to LLC for ISO. + * + * NOTES: This does a lot of obscure magic; + */ +llc_rtrequest(req, rt, sa) +int req; +register struct rtentry *rt; +struct sockaddr *sa; +{ + register union sockunion *gate = (union sockunion *)rt->rt_gateway; + register struct llinfo_llc *lc = (struct llinfo_llc *)rt->rt_llinfo, *lc2; + struct rtentry *rt2; + struct ifnet *ifp = rt->rt_ifp; + int addrlen = ifp->if_addrlen; +#define LLC_SIZE 3 /* XXXXXX do this right later */ + + IFDEBUG (D_SNPA) + printf("llc_rtrequest(%d, %x, %x)\n", req, rt, sa); + ENDDEBUG + if (rt->rt_flags & RTF_GATEWAY) + return; + else switch (req) { + case RTM_ADD: + /* + * Case 1: This route may come from a route to iface with mask + * or from a default route. + */ + if (rt->rt_flags & RTF_CLONING) { + iso_setmcasts(ifp, req); + rt_setgate(rt, rt_key(rt), &blank_dl); + return; + } + if (lc != 0) + return; /* happens on a route change */ + /* FALLTHROUGH */ + case RTM_RESOLVE: + /* + * Case 2: This route may come from cloning, or a manual route + * add with a LL address. + */ + if (gate->sdl.sdl_family != AF_LINK) { + log(LOG_DEBUG, "llc_rtrequest: got non-link non-gateway route\n"); + break; + } + R_Malloc(lc, struct llinfo_llc *, sizeof (*lc)); + rt->rt_llinfo = (caddr_t)lc; + if (lc == 0) { + log(LOG_DEBUG, "llc_rtrequest: malloc failed\n"); + break; + } + Bzero(lc, sizeof(*lc)); + lc->lc_rt = rt; + rt->rt_flags |= RTF_LLINFO; + insque(lc, &llinfo_llc); + if (gate->sdl.sdl_alen == sizeof(struct esis_req) + addrlen) { + gate->sdl.sdl_alen -= sizeof(struct esis_req); + bcopy(addrlen + LLADDR(&gate->sdl), + (caddr_t)&lc->lc_er, sizeof(lc->lc_er)); + } else if (gate->sdl.sdl_alen == addrlen) + lc->lc_flags = (SNPA_ES | SNPA_VALID | SNPA_PERM); + break; + case RTM_DELETE: + if (rt->rt_flags & RTF_CLONING) + iso_setmcasts(ifp, req); + if (lc == 0) + return; + remque(lc); + Free(lc); + rt->rt_llinfo = 0; + rt->rt_flags &= ~RTF_LLINFO; + break; + } + if (rt->rt_rmx.rmx_mtu == 0) { + rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu - LLC_SIZE; + } +} +/* + * FUNCTION: iso_setmcasts + * + * PURPOSE: Enable/Disable ESIS/ISIS multicast reception on interfaces. + * + * NOTES: This also does a lot of obscure magic; + */ +iso_setmcasts(ifp, req) + struct ifnet *ifp; + int req; +{ + static char *addrlist[] = + { all_es_snpa, all_is_snpa, all_l1is_snpa, all_l2is_snpa, 0}; + struct ifreq ifr; + register caddr_t *cpp; + int doreset = 0; + + bzero((caddr_t)&ifr, sizeof(ifr)); + for (cpp = (caddr_t *)addrlist; *cpp; cpp++) { + bcopy(*cpp, (caddr_t)ifr.ifr_addr.sa_data, 6); + if (req == RTM_ADD) + if (ether_addmulti(&ifr, (struct arpcom *)ifp) == ENETRESET) + doreset++; + else + if (ether_delmulti(&ifr, (struct arpcom *)ifp) == ENETRESET) + doreset++; + } + if (doreset) { + if (ifp->if_reset) + (*ifp->if_reset)(ifp->if_unit); + else + printf("iso_setmcasts: %s%d needs reseting to receive iso mcasts\n", + ifp->if_name, ifp->if_unit); + } +} +/* + * FUNCTION: iso_snparesolve + * + * PURPOSE: Resolve an iso address into snpa address + * + * RETURNS: 0 if addr is resolved + * errno if addr is unknown + * + * SIDE EFFECTS: + * + * NOTES: Now that we have folded the snpa cache into the routing + * table, we know there is no snpa address known for this + * destination. If we know of a default IS, then the address + * of the IS is returned. If no IS is known, then return the + * multi-cast address for "all ES" for this interface. + * + * NB: the last case described above constitutes the + * query configuration function 9542, sec 6.5 + * A mechanism is needed to prevent this function from + * being invoked if the system is an IS. + */ +iso_snparesolve(ifp, dest, snpa, snpa_len) +struct ifnet *ifp; /* outgoing interface */ +struct sockaddr_iso *dest; /* destination */ +caddr_t snpa; /* RESULT: snpa to be used */ +int *snpa_len; /* RESULT: length of snpa */ +{ + struct llinfo_llc *sc; /* ptr to snpa table entry */ + caddr_t found_snpa; + int addrlen; + + /* + * This hack allows us to send esis packets that have the destination snpa + * addresss embedded in the destination nsap address + */ + if (dest->siso_data[0] == AFI_SNA) { + /* + * This is a subnetwork address. Return it immediately + */ + IFDEBUG(D_SNPA) + printf("iso_snparesolve: return SN address\n"); + ENDDEBUG + addrlen = dest->siso_nlen - 1; /* subtract size of AFI */ + found_snpa = (caddr_t) dest->siso_data + 1; + /* + * If we are an IS, we can't do much with the packet; + * Check if we know about an IS. + */ + } else if (iso_systype != SNPA_IS && known_is != 0 && + (sc = (struct llinfo_llc *)known_is->rt_llinfo) && + (sc->lc_flags & SNPA_VALID)) { + register struct sockaddr_dl *sdl = + (struct sockaddr_dl *)(known_is->rt_gateway); + found_snpa = LLADDR(sdl); + addrlen = sdl->sdl_alen; + } else if (ifp->if_flags & IFF_BROADCAST) { + /* + * no IS, no match. Return "all es" multicast address for this + * interface, as per Query Configuration Function (9542 sec 6.5) + * + * Note: there is a potential problem here. If the destination + * is on the subnet and it does not respond with a ESH, but + * does send back a TP CC, a connection could be established + * where we always transmit the CLNP packet to "all es" + */ + addrlen = ifp->if_addrlen; + found_snpa = (caddr_t)all_es_snpa; + } else + return (ENETUNREACH); + bcopy(found_snpa, snpa, *snpa_len = addrlen); + return (0); +} + + +/* + * FUNCTION: snpac_free + * + * PURPOSE: free an entry in the iso address map table + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: If there is a route entry associated with cache + * entry, then delete that as well + */ +snpac_free(lc) +register struct llinfo_llc *lc; /* entry to free */ +{ + register struct rtentry *rt = lc->lc_rt; + register struct iso_addr *r; + + if (known_is == rt) + known_is = 0; + if (rt && (rt->rt_flags & RTF_UP) && + (rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED))) { + RTFREE(rt); + rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway, rt_mask(rt), + rt->rt_flags, (struct rtentry **)0); + RTFREE(rt); + } +} + +/* + * FUNCTION: snpac_add + * + * PURPOSE: Add an entry to the snpa cache + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: If entry already exists, then update holding time. + */ +snpac_add(ifp, nsap, snpa, type, ht, nsellength) +struct ifnet *ifp; /* interface info is related to */ +struct iso_addr *nsap; /* nsap to add */ +caddr_t snpa; /* translation */ +char type; /* SNPA_IS or SNPA_ES */ +u_short ht; /* holding time (in seconds) */ +int nsellength; /* nsaps may differ only in trailing bytes */ +{ + register struct llinfo_llc *lc; + register struct rtentry *rt; + struct rtentry *mrt = 0; + register struct iso_addr *r; /* for zap_isoaddr macro */ + int snpalen = min(ifp->if_addrlen, MAX_SNPALEN); + int new_entry = 0, index = ifp->if_index, iftype = ifp->if_type; + + IFDEBUG(D_SNPA) + printf("snpac_add(%x, %x, %x, %x, %x, %x)\n", + ifp, nsap, snpa, type, ht, nsellength); + ENDDEBUG + zap_isoaddr(dst, nsap); + rt = rtalloc1(S(dst), 0); + IFDEBUG(D_SNPA) + printf("snpac_add: rtalloc1 returns %x\n", rt); + ENDDEBUG + if (rt == 0) { + struct sockaddr *netmask; + int flags; + add: + if (nsellength) { + netmask = S(msk); flags = RTF_UP; + snpac_fixdstandmask(nsellength); + } else { + netmask = 0; flags = RTF_UP | RTF_HOST; + } + new_entry = 1; + zap_linkaddr((>e_dl), snpa, snpalen, index); + gte_dl.sdl_type = iftype; + if (rtrequest(RTM_ADD, S(dst), S(gte_dl), netmask, flags, &mrt) || + mrt == 0) + return (0); + rt = mrt; + rt->rt_refcnt--; + } else { + register struct sockaddr_dl *sdl = (struct sockaddr_dl *)rt->rt_gateway; + rt->rt_refcnt--; + if ((rt->rt_flags & RTF_LLINFO) == 0) + goto add; + if (nsellength && (rt->rt_flags & RTF_HOST)) { + if (rt->rt_refcnt == 0) { + rtrequest(RTM_DELETE, S(dst), (struct sockaddr *)0, + (struct sockaddr *)0, 0, (struct rtentry *)0); + rt = 0; + goto add; + } else { + static struct iso_addr nsap2; register char *cp; + nsap2 = *nsap; + cp = nsap2.isoa_genaddr + nsap->isoa_len - nsellength; + while (cp < (char *)(1 + &nsap2)) + *cp++ = 0; + (void) snpac_add(ifp, &nsap2, snpa, type, ht, nsellength); + } + } + if (sdl->sdl_family != AF_LINK || sdl->sdl_alen == 0) { + int old_sdl_len = sdl->sdl_len; + if (old_sdl_len < sizeof(*sdl)) { + log(LOG_DEBUG, "snpac_add: cant make room for lladdr\n"); + return (0); + } + zap_linkaddr(sdl, snpa, snpalen, index); + sdl->sdl_len = old_sdl_len; + sdl->sdl_type = iftype; + new_entry = 1; + } + } + if ((lc = (struct llinfo_llc *)rt->rt_llinfo) == 0) + panic("snpac_rtrequest"); + rt->rt_rmx.rmx_expire = ht + time.tv_sec; + lc->lc_flags = SNPA_VALID | type; + if ((type & SNPA_IS) && !(iso_systype & SNPA_IS)) + snpac_logdefis(rt); + return (new_entry); +} + +static void +snpac_fixdstandmask(nsellength) +{ + register char *cp = msk.siso_data, *cplim; + + cplim = cp + (dst.siso_nlen -= nsellength); + msk.siso_len = cplim - (char *)&msk; + msk.siso_nlen = 0; + while (cp < cplim) + *cp++ = -1; + while (cp < (char *)msk.siso_pad) + *cp++ = 0; + for (cp = dst.siso_data + dst.siso_nlen; cp < (char *)dst.siso_pad; ) + *cp++ = 0; +} + +/* + * FUNCTION: snpac_ioctl + * + * PURPOSE: Set/Get the system type and esis parameters + * + * RETURNS: 0 on success, or unix error code + * + * SIDE EFFECTS: + * + * NOTES: + */ +snpac_ioctl (so, cmd, data) +struct socket *so; +int cmd; /* ioctl to process */ +caddr_t data; /* data for the cmd */ +{ + register struct systype_req *rq = (struct systype_req *)data; + + IFDEBUG(D_IOCTL) + if (cmd == SIOCSSTYPE) + printf("snpac_ioctl: cmd set, type x%x, ht %d, ct %d\n", + rq->sr_type, rq->sr_holdt, rq->sr_configt); + else + printf("snpac_ioctl: cmd get\n"); + ENDDEBUG + + if (cmd == SIOCSSTYPE) { + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); + if ((rq->sr_type & (SNPA_ES|SNPA_IS)) == (SNPA_ES|SNPA_IS)) + return(EINVAL); + if (rq->sr_type & SNPA_ES) { + iso_systype = SNPA_ES; + } else if (rq->sr_type & SNPA_IS) { + iso_systype = SNPA_IS; + } else { + return(EINVAL); + } + esis_holding_time = rq->sr_holdt; + esis_config_time = rq->sr_configt; + if (esis_esconfig_time != rq->sr_esconfigt) { + untimeout(esis_config, (caddr_t)0); + esis_esconfig_time = rq->sr_esconfigt; + esis_config(); + } + } else if (cmd == SIOCGSTYPE) { + rq->sr_type = iso_systype; + rq->sr_holdt = esis_holding_time; + rq->sr_configt = esis_config_time; + rq->sr_esconfigt = esis_esconfig_time; + } else { + return (EINVAL); + } + return (0); +} + +/* + * FUNCTION: snpac_logdefis + * + * PURPOSE: Mark the IS passed as the default IS + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +snpac_logdefis(sc) +register struct rtentry *sc; +{ + register struct iso_addr *r; + register struct sockaddr_dl *sdl = (struct sockaddr_dl *)sc->rt_gateway; + register struct rtentry *rt; + + if (known_is == sc || !(sc->rt_flags & RTF_HOST)) + return; + if (known_is) { + RTFREE(known_is); + } + known_is = sc; + RTHOLD(sc); + rt = rtalloc1((struct sockaddr *)&zsi, 0); + if (rt == 0) + rtrequest(RTM_ADD, S(zsi), rt_key(sc), S(zmk), + RTF_DYNAMIC|RTF_GATEWAY, 0); + else { + if ((rt->rt_flags & RTF_DYNAMIC) && + (rt->rt_flags & RTF_GATEWAY) && rt_mask(rt)->sa_len == 0) + rt_setgate(rt, rt_key(rt), rt_key(sc)); + } +} + +/* + * FUNCTION: snpac_age + * + * PURPOSE: Time out snpac entries + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: When encountering an entry for the first time, snpac_age + * may delete up to SNPAC_AGE too many seconds. Ie. + * if the entry is added a moment before snpac_age is + * called, the entry will immediately have SNPAC_AGE + * seconds taken off the holding time, even though + * it has only been held a brief moment. + * + * The proper way to do this is set an expiry timeval + * equal to current time + holding time. Then snpac_age + * would time out entries where expiry date is older + * than the current time. + */ +void +snpac_age() +{ + register struct llinfo_llc *lc, *nlc; + register struct rtentry *rt; + + timeout(snpac_age, (caddr_t)0, SNPAC_AGE * hz); + + for (lc = llinfo_llc.lc_next; lc != & llinfo_llc; lc = nlc) { + nlc = lc->lc_next; + if (lc->lc_flags & SNPA_VALID) { + rt = lc->lc_rt; + if (rt->rt_rmx.rmx_expire && rt->rt_rmx.rmx_expire < time.tv_sec) + snpac_free(lc); + } + } +} + +/* + * FUNCTION: snpac_ownmulti + * + * PURPOSE: Determine if the snpa address is a multicast address + * of the same type as the system. + * + * RETURNS: true or false + * + * SIDE EFFECTS: + * + * NOTES: Used by interface drivers when not in eavesdrop mode + * as interm kludge until + * real multicast addresses can be configured + */ +snpac_ownmulti(snpa, len) +caddr_t snpa; +u_int len; +{ + return (((iso_systype & SNPA_ES) && + (!bcmp(snpa, (caddr_t)all_es_snpa, len))) || + ((iso_systype & SNPA_IS) && + (!bcmp(snpa, (caddr_t)all_is_snpa, len)))); +} + +/* + * FUNCTION: snpac_flushifp + * + * PURPOSE: Flush entries associated with specific ifp + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +snpac_flushifp(ifp) +struct ifnet *ifp; +{ + register struct llinfo_llc *lc; + + for (lc = llinfo_llc.lc_next; lc != & llinfo_llc; lc = lc->lc_next) { + if (lc->lc_rt->rt_ifp == ifp && (lc->lc_flags & SNPA_VALID)) + snpac_free(lc); + } +} + +/* + * FUNCTION: snpac_rtrequest + * + * PURPOSE: Make a routing request + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: In the future, this should make a request of a user + * level routing daemon. + */ +snpac_rtrequest(req, host, gateway, netmask, flags, ret_nrt) +int req; +struct iso_addr *host; +struct iso_addr *gateway; +struct iso_addr *netmask; +short flags; +struct rtentry **ret_nrt; +{ + register struct iso_addr *r; + + IFDEBUG(D_SNPA) + printf("snpac_rtrequest: "); + if (req == RTM_ADD) + printf("add"); + else if (req == RTM_DELETE) + printf("delete"); + else + printf("unknown command"); + printf(" dst: %s\n", clnp_iso_addrp(host)); + printf("\tgateway: %s\n", clnp_iso_addrp(gateway)); + ENDDEBUG + + + zap_isoaddr(dst, host); + zap_isoaddr(gte, gateway); + if (netmask) { + zap_isoaddr(msk, netmask); + msk.siso_nlen = 0; + msk.siso_len = msk.siso_pad - (u_char *)&msk; + } + + rtrequest(req, S(dst), S(gte), (netmask ? S(msk) : (struct sockaddr *)0), + flags, ret_nrt); +} + +/* + * FUNCTION: snpac_addrt + * + * PURPOSE: Associate a routing entry with an snpac entry + * + * RETURNS: nothing + * + * SIDE EFFECTS: + * + * NOTES: If a cache entry exists for gateway, then + * make a routing entry (host, gateway) and associate + * with gateway. + * + * If a route already exists and is different, first delete + * it. + * + * This could be made more efficient by checking + * the existing route before adding a new one. + */ +snpac_addrt(ifp, host, gateway, netmask) +struct ifnet *ifp; +struct iso_addr *host, *gateway, *netmask; +{ + register struct iso_addr *r; + + zap_isoaddr(dst, host); + zap_isoaddr(gte, gateway); + if (netmask) { + zap_isoaddr(msk, netmask); + msk.siso_nlen = 0; + msk.siso_len = msk.siso_pad - (u_char *)&msk; + rtredirect(S(dst), S(gte), S(msk), RTF_DONE, S(gte), 0); + } else + rtredirect(S(dst), S(gte), (struct sockaddr *)0, + RTF_DONE | RTF_HOST, S(gte), 0); +} +#endif /* ISO */ diff --git a/bsd/netiso/iso_snpac.h b/bsd/netiso/iso_snpac.h new file mode 100644 index 000000000..7f84b777f --- /dev/null +++ b/bsd/netiso/iso_snpac.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_snpac.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +#define MAX_SNPALEN 8 /* curiously equal to sizeof x.121 ( + plus 1 for nibble len) addr */ +struct snpa_req { + struct iso_addr sr_isoa; /* nsap address */ + u_char sr_len; /* length of snpa */ + u_char sr_snpa[MAX_SNPALEN]; /* snpa associated + with nsap address */ + u_char sr_flags; /* true if entry is valid */ + u_short sr_ht; /* holding time */ +}; + +#define SNPA_VALID 0x01 +#define SNPA_ES 0x02 +#define SNPA_IS 0x04 +#define SNPA_PERM 0x10 + +struct systype_req { + short sr_holdt; /* holding timer */ + short sr_configt; /* configuration timer */ + short sr_esconfigt; /* suggested ES configuration timer */ + char sr_type; /* SNPA_ES or SNPA_IS */ +}; + +struct esis_req { + short er_ht; /* holding time */ + u_char er_flags; /* type and validity */ +}; +/* + * Space for this structure gets added onto the end of a route + * going to an ethernet or other 802.[45x] device. + */ + +struct llinfo_llc { + struct llinfo_llc *lc_next; /* keep all llc routes linked */ + struct llinfo_llc *lc_prev; /* keep all llc routes linked */ + struct rtentry *lc_rt; /* backpointer to route */ + struct esis_req lc_er; /* holding time, etc */ +#define lc_ht lc_er.er_ht +#define lc_flags lc_er.er_flags +}; + + +/* ISO arp IOCTL data structures */ + +#define SIOCSSTYPE _IOW('a', 39, struct systype_req) /* set system type */ +#define SIOCGSTYPE _IOR('a', 40, struct systype_req) /* get system type */ + +#ifdef KERNEL +struct llinfo_llc llinfo_llc; /* head for linked lists */ +#endif /* KERNEL */ diff --git a/bsd/netiso/iso_var.h b/bsd/netiso/iso_var.h new file mode 100644 index 000000000..a7420c08f --- /dev/null +++ b/bsd/netiso/iso_var.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)iso_var.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ + +/* + * Interface address, iso version. One of these structures is + * allocated for each interface with an osi address. The ifaddr + * structure conatins the protocol-independent part + * of the structure, and is assumed to be first. + */ +struct iso_ifaddr { + struct ifaddr ia_ifa; /* protocol-independent info */ +#define ia_ifp ia_ifa.ifa_ifp +#define ia_flags ia_ifa.ifa_flags + int ia_snpaoffset; + struct iso_ifaddr *ia_next; /* next in list of iso addresses */ + struct sockaddr_iso ia_addr; /* reserve space for interface name */ + struct sockaddr_iso ia_dstaddr; /* reserve space for broadcast addr */ +#define ia_broadaddr ia_dstaddr + struct sockaddr_iso ia_sockmask; /* reserve space for general netmask */ +}; + +struct iso_aliasreq { + char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct sockaddr_iso ifra_addr; + struct sockaddr_iso ifra_dstaddr; + struct sockaddr_iso ifra_mask; + int ifra_snpaoffset; +}; + +struct iso_ifreq { + char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct sockaddr_iso ifr_Addr; +}; + +/* + * Given a pointer to an iso_ifaddr (ifaddr), + * return a pointer to the addr as a sockaddr_iso + */ +/* +#define IA_SIS(ia) ((struct sockaddr_iso *)(ia.ia_ifa->ifa_addr)) + * works if sockaddr_iso becomes variable sized. + */ +#define IA_SIS(ia) (&(((struct iso_ifaddr *)ia)->ia_addr)) + +#define SIOCDIFADDR_ISO _IOW('i',25, struct iso_ifreq) /* delete IF addr */ +#define SIOCAIFADDR_ISO _IOW('i',26, struct iso_aliasreq)/* add/chg IFalias */ +#define SIOCGIFADDR_ISO _IOWR('i',33, struct iso_ifreq) /* get ifnet address */ +#define SIOCGIFDSTADDR_ISO _IOWR('i',34, struct iso_ifreq) /* get dst address */ +#define SIOCGIFNETMASK_ISO _IOWR('i',37, struct iso_ifreq) /* get dst address */ + +/* + * This stuff should go in if.h or if_llc.h or someplace else, + * but for now . . . + */ + +struct llc_etherhdr { + char dst[6]; + char src[6]; + char len[2]; + char llc_dsap; + char llc_ssap; + char llc_ui_byte; +}; + +struct snpa_hdr { + struct ifnet *snh_ifp; + char snh_dhost[6]; + char snh_shost[6]; + short snh_flags; +}; +#ifdef KERNEL +struct iso_ifaddr *iso_ifaddr; /* linked list of iso address ifaces */ +struct iso_ifaddr *iso_localifa(); /* linked list of iso address ifaces */ +struct ifqueue clnlintrq; /* clnl packet input queue */ +#endif /* KERNEL */ diff --git a/bsd/netiso/tp_astring.c b/bsd/netiso/tp_astring.c new file mode 100644 index 000000000..ee70ea369 --- /dev/null +++ b/bsd/netiso/tp_astring.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_astring.c 8.1 (Berkeley) 6/10/93 + */ + +char *tp_sstring[] = { +"ST_ERROR(0x0)", +"TP_CLOSED(0x1)", +"TP_CRSENT(0x2)", +"TP_AKWAIT(0x3)", +"TP_OPEN(0x4)", +"TP_CLOSING(0x5)", +"TP_REFWAIT(0x6)", +"TP_LISTENING(0x7)", +"TP_CONFIRMING(0x8)", +}; + +char *tp_estring[] = { +"TM_inact(0x0)", +"TM_retrans(0x1)", +"TM_sendack(0x2)", +"TM_notused(0x3)", +"TM_reference(0x4)", +"TM_data_retrans(0x5)", +"ER_TPDU(0x6)", +"CR_TPDU(0x7)", +"DR_TPDU(0x8)", +"DC_TPDU(0x9)", +"CC_TPDU(0xa)", +"AK_TPDU(0xb)", +"DT_TPDU(0xc)", +"XPD_TPDU(0xd)", +"XAK_TPDU(0xe)", +"T_CONN_req(0xf)", +"T_DISC_req(0x10)", +"T_LISTEN_req(0x11)", +"T_DATA_req(0x12)", +"T_XPD_req(0x13)", +"T_USR_rcvd(0x14)", +"T_USR_Xrcvd(0x15)", +"T_DETACH(0x16)", +"T_NETRESET(0x17)", +"T_ACPT_req(0x18)", +}; diff --git a/bsd/netiso/tp_clnp.h b/bsd/netiso/tp_clnp.h new file mode 100644 index 000000000..48bd875ae --- /dev/null +++ b/bsd/netiso/tp_clnp.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_clnp.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * AF_ISO net-dependent structures and include files + * + */ + + +#ifndef __TP_CLNP__ +#define __TP_CLNP__ + +#ifndef SOCK_STREAM +#include +#endif /* SOCK_STREAM */ + +#ifndef RTFREE +#include +#endif +#include +#include +#include +#ifndef IF_DEQUEUE +#include +#endif +#include + +struct isopcb tp_isopcb; + /* queue of active inpcbs for tp ; for tp with dod ip */ + +#endif /* __TP_CLNP__ */ diff --git a/bsd/netiso/tp_cons.c b/bsd/netiso/tp_cons.c new file mode 100644 index 000000000..400acfb60 --- /dev/null +++ b/bsd/netiso/tp_cons.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_cons.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * Here is where you find the iso- and cons-dependent code. We've tried + * keep all net-level and (primarily) address-family-dependent stuff + * out of the tp source, and everthing here is reached indirectly + * through a switch table (struct nl_protosw *) tpcb->tp_nlproto + * (see tp_pcb.c). + * The routines here are: + * tpcons_input: pullup and call tp_input w/ correct arguments + * tpcons_output: package a pkt for cons given an isopcb & some data + * cons_chan_to_tpcb: find a tpcb based on the channel # + */ + +#if ISO +#if TPCONS + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef FALSE +#undef TRUE +#include +#include +#include + +#include +int tpcons_output(); + +/* + * CALLED FROM: + * tp_route_to() for PRU_CONNECT + * FUNCTION, ARGUMENTS, SIDE EFFECTS and RETURN VALUE: + * version of the previous procedure for X.25 + */ + +tpcons_pcbconnect(isop, nam) +struct isopcb *isop; +register struct mbuf *nam; +{ + int error; + if (error = iso_pcbconnect(isop, nam)) + return error; + if ((isop->isop_chan = (caddr_t) pk_attach((struct socket *)0)) == 0) { + IFDEBUG(D_CCONS) + printf("tpcons_pcbconnect: no pklcd; returns 0x%x\n", error); + ENDDEBUG + return ENOBUFS; + } + if (error = cons_connect(isop)) { /* if it doesn't work */ + /* oh, dear, throw packet away */ + pk_disconnect((struct pklcd *)isop->isop_chan); + isop->isop_chan = 0; + } else + isop->isop_refcnt = 1; + return error; +} + + +/* + * CALLED FROM: + * cons + * FUNCTION and ARGUMENTS: + * THIS MAYBE BELONGS IN SOME OTHER PLACE??? but i think not - + */ +ProtoHook +tpcons_ctlinput(cmd, siso, isop) + int cmd; + struct sockaddr_iso *siso; + struct isopcb *isop; +{ + register struct tp_pcb *tpcb = 0; + + if (isop->isop_socket) + tpcb = (struct tp_pcb *)isop->isop_socket->so_pcb; + switch (cmd) { + + case PRC_CONS_SEND_DONE: + if (tpcb) { + struct tp_event E; + int error = 0; + + if (tpcb->tp_class == TP_CLASS_0) { + /* only if class is exactly class zero, not + * still in class negotiation + */ + /* fake an ack */ + register SeqNum seq = SEQ_ADD(tpcb, tpcb->tp_snduna, 1); + + IFTRACE(D_DATA) + tptrace(TPPTmisc, "FAKE ACK seq cdt 1", + seq, 0,0,0); + ENDTRACE + IFDEBUG(D_DATA) + printf("FAKE ACK seq 0x%x cdt 1\n", seq ); + ENDDEBUG + E.ATTR(AK_TPDU).e_cdt = 1; + E.ATTR(AK_TPDU).e_seq = seq; + E.ATTR(AK_TPDU).e_subseq = 0; + E.ATTR(AK_TPDU).e_fcc_present = 0; + error = DoEvent(AK_TPDU); + if( error ) { + tpcb->tp_sock->so_error = error; + } + } /* else ignore it */ + } + break; + case PRC_ROUTEDEAD: + if (tpcb && tpcb->tp_class == TP_CLASS_0) { + tpiso_reset(isop); + break; + } /* else drop through */ + default: + (void) tpclnp_ctlinput(cmd, siso); + break; + } + return 0; +} + +/* + * CALLED FROM: + * cons's intr routine + * FUNCTION and ARGUMENTS: + * Take a packet (m) from cons, pullup m as required by tp, + * ignore the socket argument, and call tp_input. + * No return value. + */ +ProtoHook +tpcons_input(m, faddr, laddr, channel) + struct mbuf *m; + struct sockaddr_iso *faddr, *laddr; + caddr_t channel; +{ + if( m == MNULL) + return 0; + + m = (struct mbuf *)tp_inputprep(m); + + IFDEBUG(D_TPINPUT) + printf("tpcons_input before tp_input(m 0x%x)\n", m); + dump_buf( m, 12+ m->m_len); + ENDDEBUG + tp_input(m, faddr, laddr, channel, tpcons_output, 0); + return 0; +} + + +/* + * CALLED FROM: + * tp_emit() + * FUNCTION and ARGUMENTS: + * Take a packet(m0) from tp and package it so that cons will accept it. + * This means filling in a few of the fields. + * inp is the isopcb structure; datalen is the length of the data in the + * mbuf string m0. + * RETURN VALUE: + * whatever (E*) is returned form the net layer output routine. + */ + +int +tpcons_output(isop, m0, datalen, nochksum) + struct isopcb *isop; + struct mbuf *m0; + int datalen; + int nochksum; +{ + register struct mbuf *m = m0; + int error; + + IFDEBUG(D_EMIT) + printf( + "tpcons_output(isop 0x%x, m 0x%x, len 0x%x socket 0x%x\n", + isop, m0, datalen, isop->isop_socket); + ENDDEBUG + if (m == MNULL) + return 0; + if ((m->m_flags & M_PKTHDR) == 0) { + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == 0) + return ENOBUFS; + m->m_next = m0; + } + m->m_pkthdr.len = datalen; + if (isop->isop_chan == 0) { + /* got a restart maybe? */ + if ((isop->isop_chan = (caddr_t) pk_attach((struct socket *)0)) == 0) { + IFDEBUG(D_CCONS) + printf("tpcons_output: no pklcd\n"); + ENDDEBUG + error = ENOBUFS; + } + if (error = cons_connect(isop)) { + pk_disconnect((struct pklcd *)isop->isop_chan); + isop->isop_chan = 0; + IFDEBUG(D_CCONS) + printf("tpcons_output: can't reconnect\n"); + ENDDEBUG + } + } else { + error = pk_send(isop->isop_chan, m); + IncStat(ts_tpdu_sent); + } + return error; +} +/* + * CALLED FROM: + * tp_error_emit() + * FUNCTION and ARGUMENTS: + * Take a packet(m0) from tp and package it so that cons will accept it. + * chan is the cons channel to use; datalen is the length of the data in the + * mbuf string m0. + * RETURN VALUE: + * whatever (E*) is returned form the net layer output routine. + */ + +int +tpcons_dg_output(chan, m0, datalen) + caddr_t chan; + struct mbuf *m0; + int datalen; +{ + return tpcons_output(((struct pklcd *)chan)->lcd_upnext, m0, datalen, 0); +} +#endif /* TPCONS */ +#endif /* ISO */ diff --git a/bsd/netiso/tp_driver.c b/bsd/netiso/tp_driver.c new file mode 100644 index 000000000..f49415a13 --- /dev/null +++ b/bsd/netiso/tp_driver.c @@ -0,0 +1,1020 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef lint +static char *rcsid = "$Header/**/$"; +#endif lint +#define _XEBEC_PG static + +#include "tp_states.h" + +static struct act_ent { + int a_newstate; + int a_action; +} statetable[] = { {0,0}, +#include "tp_states.init" +}; + +/* @(#)tp.trans 8.1 (Berkeley) 6/10/93 */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVERTRACE TPPTdriver +#define sbwakeup(sb) sowakeup(p->tp_sock, sb); +#define MCPY(d, w) (d ? m_copym(d, 0, (int)M_COPYALL, w): 0) + +static trick_hc = 1; + +int tp_emit(), + tp_goodack(), tp_goodXack(), + tp_stash() +; +void tp_indicate(), tp_getoptions(), + tp_soisdisconnecting(), tp_soisdisconnected(), + tp_recycle_tsuffix(), +#ifdef TP_DEBUG_TIMERS + tp_etimeout(), tp_euntimeout(), + tp_ctimeout(), tp_cuntimeout(), + tp_ctimeout_MIN(), +#endif + tp_freeref(), tp_detach(), + tp0_stash(), tp0_send(), + tp_netcmd(), tp_send() +; + +typedef struct tp_pcb tpcb_struct; + + + +typedef tpcb_struct tp_PCB_; + +#include "tp_events.h" + +_XEBEC_PG int _Xebec_action(a,e,p) +int a; +struct tp_event *e; +tp_PCB_ *p; +{ +switch(a) { +case -1: return tp_protocol_error(e,p); +case 0x1: + { + (void) tp_emit(DC_TPDU_type, p, 0, 0, MNULL); + } + break; +case 0x2: + { +# ifdef TP_DEBUG + if( e->ev_number != AK_TPDU ) + printf("TPDU 0x%x in REFWAIT!!!!\n", e->ev_number); +# endif TP_DEBUG + } + break; +case 0x3: + { + /* oh, man is this grotesque or what? */ + (void) tp_goodack(p, e->ev_union.EV_AK_TPDU.e_cdt, e->ev_union.EV_AK_TPDU.e_seq, e->ev_union.EV_AK_TPDU.e_subseq); + /* but it's necessary because this pseudo-ack may happen + * before the CC arrives, but we HAVE to adjust the + * snduna as a result of the ack, WHENEVER it arrives + */ + } + break; +case 0x4: + { + tp_detach(p); + } + break; +case 0x5: + { + p->tp_refstate = REF_OPEN; /* has timers ??? */ + } + break; +case 0x6: + { + IFTRACE(D_CONN) + tptrace(TPPTmisc, "CR datalen data", e->ev_union.EV_CR_TPDU.e_datalen, e->ev_union.EV_CR_TPDU.e_data,0,0); + ENDTRACE + IFDEBUG(D_CONN) + printf("CR datalen 0x%x data 0x%x", e->ev_union.EV_CR_TPDU.e_datalen, e->ev_union.EV_CR_TPDU.e_data); + ENDDEBUG + p->tp_refstate = REF_OPEN; /* has timers */ + p->tp_fcredit = e->ev_union.EV_CR_TPDU.e_cdt; + + if (e->ev_union.EV_CR_TPDU.e_datalen > 0) { + /* n/a for class 0 */ + ASSERT(p->tp_Xrcv.sb_cc == 0); + sbappendrecord(&p->tp_Xrcv, e->ev_union.EV_CR_TPDU.e_data); + e->ev_union.EV_CR_TPDU.e_data = MNULL; + } + } + break; +case 0x7: + { + IncStat(ts_tp0_conn); + IFTRACE(D_CONN) + tptrace(TPPTmisc, "Confiming", p, 0,0,0); + ENDTRACE + IFDEBUG(D_CONN) + printf("Confirming connection: p" ); + ENDDEBUG + soisconnected(p->tp_sock); + (void) tp_emit(CC_TPDU_type, p, 0,0, MNULL) ; + p->tp_fcredit = 1; + } + break; +case 0x8: + { + IncStat(ts_tp4_conn); /* even though not quite open */ + IFTRACE(D_CONN) + tptrace(TPPTmisc, "Confiming", p, 0,0,0); + ENDTRACE + IFDEBUG(D_CONN) + printf("Confirming connection: p" ); + ENDDEBUG + tp_getoptions(p); + soisconnecting(p->tp_sock); + if ((p->tp_rx_strat & TPRX_FASTSTART) && (p->tp_fcredit > 0)) + p->tp_cong_win = p->tp_fcredit * p->tp_l_tpdusize; + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_cc_ticks); + } + break; +case 0x9: + { + IFDEBUG(D_CONN) + printf("event: CR_TPDU emit CC failed done " ); + ENDDEBUG + soisdisconnected(p->tp_sock); + tp_recycle_tsuffix(p); + tp_freeref(p->tp_lref); + tp_detach(p); + } + break; +case 0xa: + { + int error; + struct mbuf *data = MNULL; + + IFTRACE(D_CONN) + tptrace(TPPTmisc, "T_CONN_req flags ucddata", (int)p->tp_flags, + p->tp_ucddata, 0, 0); + ENDTRACE + data = MCPY(p->tp_ucddata, M_WAIT); + if (data) { + IFDEBUG(D_CONN) + printf("T_CONN_req.trans m_copy cc 0x%x\n", + p->tp_ucddata); + dump_mbuf(data, "sosnd @ T_CONN_req"); + ENDDEBUG + } + + if (error = tp_emit(CR_TPDU_type, p, 0, 0, data) ) + return error; /* driver WON'T change state; will return error */ + + p->tp_refstate = REF_OPEN; /* has timers */ + if(p->tp_class != TP_CLASS_0) { + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_cr_ticks); + } + } + break; +case 0xb: + { + sbflush(&p->tp_Xrcv); /* purge non-delivered data data */ + if (e->ev_union.EV_DR_TPDU.e_datalen > 0) { + sbappendrecord(&p->tp_Xrcv, e->ev_union.EV_DR_TPDU.e_data); + e->ev_union.EV_DR_TPDU.e_data = MNULL; + } + if (p->tp_state == TP_OPEN) + tp_indicate(T_DISCONNECT, p, 0); + else { + int so_error = ECONNREFUSED; + if (e->ev_union.EV_DR_TPDU.e_reason != (E_TP_NO_SESSION ^ TP_ERROR_MASK) && + e->ev_union.EV_DR_TPDU.e_reason != (E_TP_NO_CR_ON_NC ^ TP_ERROR_MASK) && + e->ev_union.EV_DR_TPDU.e_reason != (E_TP_REF_OVERFLOW ^ TP_ERROR_MASK)) + so_error = ECONNABORTED; + tp_indicate(T_DISCONNECT, p, so_error); + } + tp_soisdisconnected(p); + if (p->tp_class != TP_CLASS_0) { + if (p->tp_state == TP_OPEN ) { + tp_euntimeout(p, TM_data_retrans); /* all */ + tp_cuntimeout(p, TM_retrans); + tp_cuntimeout(p, TM_inact); + tp_cuntimeout(p, TM_sendack); + p->tp_flags &= ~TPF_DELACK; + } + tp_cuntimeout(p, TM_retrans); + if( e->ev_union.EV_DR_TPDU.e_sref != 0 ) + (void) tp_emit(DC_TPDU_type, p, 0, 0, MNULL); + } + } + break; +case 0xc: + { + if( e->ev_union.EV_DR_TPDU.e_sref != 0 ) + (void) tp_emit(DC_TPDU_type, p, 0, 0, MNULL); + /* reference timer already set - reset it to be safe (???) */ + tp_euntimeout(p, TM_reference); /* all */ + tp_etimeout(p, TM_reference, (int)p->tp_refer_ticks); + } + break; +case 0xd: + { + tp_cuntimeout(p, TM_retrans); + tp_indicate(ER_TPDU, p, e->ev_union.EV_ER_TPDU.e_reason); + tp_soisdisconnected(p); + } + break; +case 0xe: + { + tp_cuntimeout(p, TM_retrans); + tp_soisdisconnected(p); + } + break; +case 0xf: + { + tp_indicate(ER_TPDU, p, e->ev_union.EV_ER_TPDU.e_reason); + tp_cuntimeout(p, TM_retrans); + tp_soisdisconnected(p); + } + break; +case 0x10: + { + tp_cuntimeout(p, TM_retrans); + tp_soisdisconnected(p); + } + break; +case 0x11: + { /* don't ask me why we have to do this - spec says so */ + (void) tp_emit(DR_TPDU_type, p, 0, E_TP_NO_SESSION, MNULL); + /* don't bother with retransmissions of the DR */ + } + break; +case 0x12: + { + tp_soisdisconnecting(p->tp_sock); + tp_indicate(ER_TPDU, p, e->ev_union.EV_ER_TPDU.e_reason); + tp_soisdisconnected(p); + tp_netcmd( p, CONN_CLOSE ); + } + break; +case 0x13: + { + if (p->tp_state == TP_OPEN) { + tp_euntimeout(p, TM_data_retrans); /* all */ + tp_cuntimeout(p, TM_inact); + tp_cuntimeout(p, TM_sendack); + } + tp_soisdisconnecting(p->tp_sock); + tp_indicate(ER_TPDU, p, e->ev_union.EV_ER_TPDU.e_reason); + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_dr_ticks); + (void) tp_emit(DR_TPDU_type, p, 0, E_TP_PROTO_ERR, MNULL); + } + break; +case 0x14: + { + tp_cuntimeout(p, TM_retrans); + IncStat(ts_tp0_conn); + p->tp_fcredit = 1; + soisconnected(p->tp_sock); + } + break; +case 0x15: + { + IFDEBUG(D_CONN) + printf("trans: CC_TPDU in CRSENT state flags 0x%x\n", + (int)p->tp_flags); + ENDDEBUG + IncStat(ts_tp4_conn); + p->tp_fref = e->ev_union.EV_CC_TPDU.e_sref; + p->tp_fcredit = e->ev_union.EV_CC_TPDU.e_cdt; + if ((p->tp_rx_strat & TPRX_FASTSTART) && (e->ev_union.EV_CC_TPDU.e_cdt > 0)) + p->tp_cong_win = e->ev_union.EV_CC_TPDU.e_cdt * p->tp_l_tpdusize; + tp_getoptions(p); + tp_cuntimeout(p, TM_retrans); + if (p->tp_ucddata) { + IFDEBUG(D_CONN) + printf("dropping user connect data cc 0x%x\n", + p->tp_ucddata->m_len); + ENDDEBUG + m_freem(p->tp_ucddata); + p->tp_ucddata = 0; + } + soisconnected(p->tp_sock); + if (e->ev_union.EV_CC_TPDU.e_datalen > 0) { + ASSERT(p->tp_Xrcv.sb_cc == 0); /* should be empty */ + sbappendrecord(&p->tp_Xrcv, e->ev_union.EV_CC_TPDU.e_data); + e->ev_union.EV_CC_TPDU.e_data = MNULL; + } + + (void) tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 0, MNULL); + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + } + break; +case 0x16: + { + struct mbuf *data = MNULL; + int error; + + IncStat(ts_retrans_cr); + p->tp_cong_win = 1 * p->tp_l_tpdusize; + data = MCPY(p->tp_ucddata, M_NOWAIT); + if(p->tp_ucddata) { + IFDEBUG(D_CONN) + printf("TM_retrans.trans m_copy cc 0x%x\n", data); + dump_mbuf(p->tp_ucddata, "sosnd @ TM_retrans"); + ENDDEBUG + if( data == MNULL ) + return ENOBUFS; + } + + p->tp_retrans --; + if( error = tp_emit(CR_TPDU_type, p, 0, 0, data) ) { + p->tp_sock->so_error = error; + } + tp_ctimeout(p, TM_retrans, (int)p->tp_cr_ticks); + } + break; +case 0x17: + { + IncStat(ts_conn_gaveup); + p->tp_sock->so_error = ETIMEDOUT; + tp_indicate(T_DISCONNECT, p, ETIMEDOUT); + tp_soisdisconnected(p); + } + break; +case 0x18: + { + int error; + struct mbuf *data = MCPY(p->tp_ucddata, M_WAIT); + + if( error = tp_emit(CC_TPDU_type, p, 0, 0, data) ) { + p->tp_sock->so_error = error; + } + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_cc_ticks); + } + break; +case 0x19: + { + int doack; + + /* + * Get rid of any confirm or connect data, so that if we + * crash or close, it isn't thought of as disconnect data. + */ + if (p->tp_ucddata) { + m_freem(p->tp_ucddata); + p->tp_ucddata = 0; + } + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + tp_cuntimeout(p, TM_retrans); + soisconnected(p->tp_sock); + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + + /* see also next 2 transitions, if you make any changes */ + + doack = tp_stash(p, e); + IFDEBUG(D_DATA) + printf("tp_stash returns %d\n",doack); + ENDDEBUG + + if (doack) { + (void) tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 0, MNULL ); + tp_ctimeout(p, TM_sendack, (int)p->tp_keepalive_ticks); + } else + tp_ctimeout( p, TM_sendack, (int)p->tp_sendack_ticks); + + IFDEBUG(D_DATA) + printf("after stash calling sbwakeup\n"); + ENDDEBUG + } + break; +case 0x1a: + { + tp0_stash(p, e); + sbwakeup( &p->tp_sock->so_rcv ); + + IFDEBUG(D_DATA) + printf("after stash calling sbwakeup\n"); + ENDDEBUG + } + break; +case 0x1b: + { + int doack; /* tells if we must ack immediately */ + + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + sbwakeup( &p->tp_sock->so_rcv ); + + doack = tp_stash(p, e); + IFDEBUG(D_DATA) + printf("tp_stash returns %d\n",doack); + ENDDEBUG + + if(doack) + (void) tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 0, MNULL ); + else + tp_ctimeout_MIN( p, TM_sendack, (int)p->tp_sendack_ticks); + + IFDEBUG(D_DATA) + printf("after stash calling sbwakeup\n"); + ENDDEBUG + } + break; +case 0x1c: + { + IFTRACE(D_DATA) + tptrace(TPPTmisc, "NIW seq rcvnxt lcredit ", + e->ev_union.EV_DT_TPDU.e_seq, p->tp_rcvnxt, p->tp_lcredit, 0); + ENDTRACE + IncStat(ts_dt_niw); + m_freem(e->ev_union.EV_DT_TPDU.e_data); + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + (void) tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 0, MNULL ); + } + break; +case 0x1d: + { + if (p->tp_ucddata) { + m_freem(p->tp_ucddata); + p->tp_ucddata = 0; + } + (void) tp_goodack(p, e->ev_union.EV_AK_TPDU.e_cdt, e->ev_union.EV_AK_TPDU.e_seq, e->ev_union.EV_AK_TPDU.e_subseq); + tp_cuntimeout(p, TM_retrans); + + soisconnected(p->tp_sock); + IFTRACE(D_CONN) + struct socket *so = p->tp_sock; + tptrace(TPPTmisc, + "called sosiconn: so so_state rcv.sb_sel rcv.sb_flags", + so, so->so_state, so->so_rcv.sb_sel, so->so_rcv.sb_flags); + tptrace(TPPTmisc, + "called sosiconn 2: so_qlen so_error so_rcv.sb_cc so_head", + so->so_qlen, so->so_error, so->so_rcv.sb_cc, so->so_head); + ENDTRACE + + tp_ctimeout(p, TM_sendack, (int)p->tp_keepalive_ticks); + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + } + break; +case 0x1e: + { + if( p->tp_state == TP_AKWAIT ) { + if (p->tp_ucddata) { + m_freem(p->tp_ucddata); + p->tp_ucddata = 0; + } + tp_cuntimeout(p, TM_retrans); + soisconnected(p->tp_sock); + tp_ctimeout(p, TM_sendack, (int)p->tp_keepalive_ticks); + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + } + IFTRACE(D_XPD) + tptrace(TPPTmisc, "XPD tpdu accepted Xrcvnxt, e_seq datalen m_len\n", + p->tp_Xrcvnxt,e->ev_union.EV_XPD_TPDU.e_seq, e->ev_union.EV_XPD_TPDU.e_datalen, e->ev_union.EV_XPD_TPDU.e_data->m_len); + ENDTRACE + + p->tp_sock->so_state |= SS_RCVATMARK; + postevent(p->tp_sock, 0, EV_OOB); + e->ev_union.EV_XPD_TPDU.e_data->m_flags |= M_EOR; + sbinsertoob(&p->tp_Xrcv, e->ev_union.EV_XPD_TPDU.e_data); + IFDEBUG(D_XPD) + dump_mbuf(e->ev_union.EV_XPD_TPDU.e_data, "XPD TPDU: tp_Xrcv"); + ENDDEBUG + tp_indicate(T_XDATA, p, 0); + sbwakeup( &p->tp_Xrcv ); + + (void) tp_emit(XAK_TPDU_type, p, p->tp_Xrcvnxt, 0, MNULL); + SEQ_INC(p, p->tp_Xrcvnxt); + } + break; +case 0x1f: + { + if( p->tp_Xrcv.sb_cc == 0 ) { + /* kludge for select(): */ + /* p->tp_sock->so_state &= ~SS_OOBAVAIL; */ + } + } + break; +case 0x20: + { + IFTRACE(D_XPD) + tptrace(TPPTmisc, "XPD tpdu niw (Xrcvnxt, e_seq) or not cdt (cc)\n", + p->tp_Xrcvnxt, e->ev_union.EV_XPD_TPDU.e_seq, p->tp_Xrcv.sb_cc , 0); + ENDTRACE + if( p->tp_Xrcvnxt != e->ev_union.EV_XPD_TPDU.e_seq ) + IncStat(ts_xpd_niw); + if( p->tp_Xrcv.sb_cc ) { + /* might as well kick 'em again */ + tp_indicate(T_XDATA, p, 0); + IncStat(ts_xpd_dup); + } + m_freem(e->ev_union.EV_XPD_TPDU.e_data); + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + /* don't send an xack because the xak gives "last one received", not + * "next one i expect" (dumb) + */ + } + break; +case 0x21: + { + struct socket *so = p->tp_sock; + + /* detach from parent socket so it can finish closing */ + if (so->so_head) { + if (!soqremque(so, 0) && !soqremque(so, 1)) + panic("tp: T_DETACH"); + so->so_head = 0; + } + tp_soisdisconnecting(p->tp_sock); + tp_netcmd( p, CONN_CLOSE); + tp_soisdisconnected(p); + } + break; +case 0x22: + { + struct socket *so = p->tp_sock; + struct mbuf *data = MNULL; + + /* detach from parent socket so it can finish closing */ + if (so->so_head) { + if (!soqremque(so, 0) && !soqremque(so, 1)) + panic("tp: T_DETACH"); + so->so_head = 0; + } + if (p->tp_state != TP_CLOSING) { + tp_soisdisconnecting(p->tp_sock); + data = MCPY(p->tp_ucddata, M_NOWAIT); + (void) tp_emit(DR_TPDU_type, p, 0, E_TP_NORMAL_DISC, data); + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_dr_ticks); + } + } + break; +case 0x23: + { + tp_soisdisconnecting(p->tp_sock); + tp_netcmd( p, CONN_CLOSE); + tp_soisdisconnected(p); + } + break; +case 0x24: + { + struct mbuf *data = MCPY(p->tp_ucddata, M_WAIT); + + if(p->tp_state == TP_OPEN) { + tp_euntimeout(p, TM_data_retrans); /* all */ + tp_cuntimeout(p, TM_inact); + tp_cuntimeout(p, TM_sendack); + p->tp_flags &= ~TPF_DELACK; + } + if (data) { + IFDEBUG(D_CONN) + printf("T_DISC_req.trans tp_ucddata 0x%x\n", + p->tp_ucddata); + dump_mbuf(data, "ucddata @ T_DISC_req"); + ENDDEBUG + } + tp_soisdisconnecting(p->tp_sock); + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_dr_ticks); + + if( trick_hc ) + return tp_emit(DR_TPDU_type, p, 0, e->ev_union.EV_T_DISC_req.e_reason, data); + } + break; +case 0x25: + { + int error; + struct mbuf *data = MCPY(p->tp_ucddata, M_WAIT); + + IncStat(ts_retrans_cc); + p->tp_retrans --; + p->tp_cong_win = 1 * p->tp_l_tpdusize; + + if( error = tp_emit(CC_TPDU_type, p, 0, 0, data) ) + p->tp_sock->so_error = error; + tp_ctimeout(p, TM_retrans, (int)p->tp_cc_ticks); + } + break; +case 0x26: + { + IncStat(ts_conn_gaveup); + tp_soisdisconnecting(p->tp_sock); + p->tp_sock->so_error = ETIMEDOUT; + tp_indicate(T_DISCONNECT, p, ETIMEDOUT); + (void) tp_emit(DR_TPDU_type, p, 0, E_TP_CONGEST, MNULL); + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_dr_ticks); + } + break; +case 0x27: + { + tp_euntimeout(p, TM_data_retrans); /* all */ + tp_cuntimeout(p, TM_inact); + tp_cuntimeout(p, TM_sendack); + + IncStat(ts_conn_gaveup); + tp_soisdisconnecting(p->tp_sock); + p->tp_sock->so_error = ETIMEDOUT; + tp_indicate(T_DISCONNECT, p, ETIMEDOUT); + (void) tp_emit(DR_TPDU_type, p, 0, E_TP_CONGEST_2, MNULL); + p->tp_retrans = p->tp_Nretrans; + tp_ctimeout(p, TM_retrans, (int)p->tp_dr_ticks); + } + break; +case 0x28: + { + p->tp_cong_win = 1 * p->tp_l_tpdusize; + /* resume XPD */ + if ( p->tp_Xsnd.sb_mb ) { + struct mbuf *m = m_copy(p->tp_Xsnd.sb_mb, 0, (int)p->tp_Xsnd.sb_cc); + int shift; + + IFTRACE(D_XPD) + tptrace(TPPTmisc, "XPD retrans: Xuna Xsndnxt sndnxt snduna", + p->tp_Xuna, p->tp_Xsndnxt, p->tp_sndnxt, + p->tp_snduna); + ENDTRACE + IFDEBUG(D_XPD) + dump_mbuf(m, "XPD retrans emitting M"); + ENDDEBUG + IncStat(ts_retrans_xpd); + p->tp_retrans --; + shift = max(p->tp_Nretrans - p->tp_retrans, 6); + (void) tp_emit(XPD_TPDU_type, p, p->tp_Xuna, 1, m); + tp_ctimeout(p, TM_retrans, ((int)p->tp_dt_ticks) << shift); + } + } + break; +case 0x29: + { + p->tp_rxtshift++; + (void) tp_data_retrans(p); + } + break; +case 0x2a: + { + p->tp_retrans --; + (void) tp_emit(DR_TPDU_type, p, 0, E_TP_DR_NO_REAS, MNULL); + IncStat(ts_retrans_dr); + tp_ctimeout(p, TM_retrans, (int)p->tp_dr_ticks); + } + break; +case 0x2b: + { + p->tp_sock->so_error = ETIMEDOUT; + p->tp_refstate = REF_FROZEN; + tp_recycle_tsuffix( p ); + tp_etimeout(p, TM_reference, (int)p->tp_refer_ticks); + } + break; +case 0x2c: + { + tp_freeref(p->tp_lref); + tp_detach(p); + } + break; +case 0x2d: + { + if( p->tp_class != TP_CLASS_0) { + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + if ( e->ev_number == CC_TPDU ) + (void) tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 0, MNULL); + } + /* ignore it if class 0 - state tables are blank for this */ + } + break; +case 0x2e: + { + IFTRACE(D_DATA) + tptrace(TPPTmisc, "T_DATA_req sndnxt snduna fcredit, tpcb", + p->tp_sndnxt, p->tp_snduna, p->tp_fcredit, p); + ENDTRACE + + tp_send(p); + } + break; +case 0x2f: + { + int error = 0; + + /* resume XPD */ + if ( p->tp_Xsnd.sb_mb ) { + struct mbuf *m = m_copy(p->tp_Xsnd.sb_mb, 0, (int)p->tp_Xsnd.sb_cc); + /* m_copy doesn't preserve the m_xlink field, but at this pt. + * that doesn't matter + */ + + IFTRACE(D_XPD) + tptrace(TPPTmisc, "XPD req: Xuna Xsndnxt sndnxt snduna", + p->tp_Xuna, p->tp_Xsndnxt, p->tp_sndnxt, + p->tp_snduna); + ENDTRACE + IFDEBUG(D_XPD) + printf("T_XPD_req: sb_cc 0x%x\n", p->tp_Xsnd.sb_cc); + dump_mbuf(m, "XPD req emitting M"); + ENDDEBUG + error = + tp_emit(XPD_TPDU_type, p, p->tp_Xuna, 1, m); + p->tp_retrans = p->tp_Nretrans; + + tp_ctimeout(p, TM_retrans, (int)p->tp_rxtcur); + SEQ_INC(p, p->tp_Xsndnxt); + } + if(trick_hc) + return error; + } + break; +case 0x30: + { + struct sockbuf *sb = &p->tp_sock->so_snd; + + IFDEBUG(D_ACKRECV) + printf("GOOD ACK seq 0x%x cdt 0x%x\n", e->ev_union.EV_AK_TPDU.e_seq, e->ev_union.EV_AK_TPDU.e_cdt); + ENDDEBUG + if( p->tp_class != TP_CLASS_0) { + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + } + sbwakeup(sb); + IFDEBUG(D_ACKRECV) + printf("GOOD ACK new sndnxt 0x%x\n", p->tp_sndnxt); + ENDDEBUG + } + break; +case 0x31: + { + IFTRACE(D_ACKRECV) + tptrace(TPPTmisc, "BOGUS ACK fcc_present, tp_r_subseq e_subseq", + e->ev_union.EV_AK_TPDU.e_fcc_present, p->tp_r_subseq, e->ev_union.EV_AK_TPDU.e_subseq, 0); + ENDTRACE + if( p->tp_class != TP_CLASS_0 ) { + + if ( !e->ev_union.EV_AK_TPDU.e_fcc_present ) { + /* send ACK with FCC */ + IncStat( ts_ackreason[_ACK_FCC_] ); + (void) tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 1, MNULL); + } + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + } + } + break; +case 0x32: + { + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + tp_cuntimeout(p, TM_retrans); + + sbwakeup( &p->tp_sock->so_snd ); + + /* resume normal data */ + tp_send(p); + } + break; +case 0x33: + { + IFTRACE(D_ACKRECV) + tptrace(TPPTmisc, "BOGUS XACK eventtype ", e->ev_number, 0, 0,0); + ENDTRACE + if( p->tp_class != TP_CLASS_0 ) { + tp_ctimeout(p, TM_inact, (int)p->tp_inact_ticks); + } + } + break; +case 0x34: + { + int timo; + IFTRACE(D_TIMER) + tptrace(TPPTsendack, -1, p->tp_lcredit, p->tp_sent_uwe, + p->tp_sent_lcdt, 0); + ENDTRACE + IncPStat(p, tps_n_TMsendack); + (void) tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 0, MNULL); + if (p->tp_fcredit == 0) { + if (p->tp_rxtshift < TP_MAXRXTSHIFT) + p->tp_rxtshift++; + timo = (p->tp_dt_ticks) << p->tp_rxtshift; + } else + timo = p->tp_sendack_ticks; + tp_ctimeout(p, TM_sendack, timo); + } + break; +case 0x35: + { + if (sbspace(&p->tp_sock->so_rcv) > 0) + tp0_openflow(p); + } + break; +case 0x36: + { + if( trick_hc ) { + SeqNum ack_thresh; + /* + * If the upper window edge has advanced a reasonable + * amount beyond what was known, send an ACK. + * A reasonable amount is 2 packets, unless the max window + * is only 1 or 2 packets, in which case we + * should send an ack for any advance in the upper window edge. + */ + LOCAL_CREDIT(p); + ack_thresh = SEQ_SUB(p, p->tp_lcredit + p->tp_rcvnxt, + (p->tp_maxlcredit > 2 ? 2 : 1)); + if (SEQ_GT(p, ack_thresh, p->tp_sent_uwe)) { + IncStat(ts_ackreason[_ACK_USRRCV_]); + p->tp_flags &= ~TPF_DELACK; + return tp_emit(AK_TPDU_type, p, p->tp_rcvnxt, 0, MNULL); + } + } + } + break; +case 0x37: + { + if(trick_hc) + return ECONNABORTED; + } + break; +case 0x38: + { + ASSERT( p->tp_state != TP_LISTENING ); + tp_indicate(T_DISCONNECT, p, ECONNRESET); + tp_soisdisconnected(p); + } + break; + } +return 0; +} + +_XEBEC_PG int +_Xebec_index( e,p ) + struct tp_event *e; + tp_PCB_ *p; +{ +switch( (e->ev_number<<4)+(p->tp_state) ) { +case 0x12: + if ( p->tp_retrans > 0 ) return 0x1e; + else return 0x1f; +case 0x13: + if ( p->tp_retrans > 0 ) return 0x2f; + else return 0x30; +case 0x14: + if ( p->tp_retrans > 0 ) return 0x32; + else return 0x31; +case 0x15: + if ( p->tp_retrans > 0 ) return 0x34; + else return 0x35; +case 0x54: + if (p->tp_rxtshift < TP_NRETRANS) return 0x33; + else return 0x31; +case 0x64: + if (p->tp_class == TP_CLASS_0) return 0x1a; + else return 0x1b; +case 0x77: + if ( p->tp_class == TP_CLASS_0) return 0xd; + else return 0xe; +case 0x86: + if ( e->ev_union.EV_DR_TPDU.e_sref != 0 ) return 0x2; + else return 0x3; +case 0xa2: + if (p->tp_class == TP_CLASS_0) return 0x1c; + else return 0x1d; +case 0xb2: + if (p->tp_class == TP_CLASS_0) return 0x5; + else return 0x0; +case 0xb4: + if ( tp_goodack(p, e->ev_union.EV_AK_TPDU.e_cdt, e->ev_union.EV_AK_TPDU.e_seq, e->ev_union.EV_AK_TPDU.e_subseq) ) return 0x3a; + else return 0x3b; +case 0xc3: + if ( IN_RWINDOW( p, e->ev_union.EV_DT_TPDU.e_seq, + p->tp_rcvnxt, SEQ(p, p->tp_rcvnxt + p->tp_lcredit)) ) return 0x21; + else return 0x24; +case 0xc4: + if ( p->tp_class == TP_CLASS_0 ) return 0x22; + else if ( IN_RWINDOW( p, e->ev_union.EV_DT_TPDU.e_seq, + p->tp_rcvnxt, SEQ(p, p->tp_rcvnxt + p->tp_lcredit)) ) return 0x23; + else return 0x25; +case 0xd3: + if (p->tp_Xrcvnxt == e->ev_union.EV_XPD_TPDU.e_seq) return 0x27; + else return 0x2a; +case 0xd4: + if (p->tp_Xrcvnxt == e->ev_union.EV_XPD_TPDU.e_seq) return 0x27; + else return 0x29; +case 0xe4: + if ( tp_goodXack(p, e->ev_union.EV_XAK_TPDU.e_seq) ) return 0x3c; + else return 0x3d; +case 0x102: + if ( p->tp_class == TP_CLASS_0 ) return 0x2d; + else return 0x2e; +case 0x104: + if ( p->tp_class == TP_CLASS_0 ) return 0x2d; + else return 0x2e; +case 0x144: + if (p->tp_class == TP_CLASS_0) return 0x3f; + else return 0x40; +case 0x162: + if (p->tp_class == TP_CLASS_0) return 0x2b; + else return 0x2c; +case 0x172: + if ( p->tp_class != TP_CLASS_4 ) return 0x42; + else return 0x46; +case 0x174: + if ( p->tp_class != TP_CLASS_4 ) return 0x42; + else return 0x47; +case 0x177: + if ( p->tp_class != TP_CLASS_4 ) return 0x42; + else return 0x43; +case 0x188: + if ( p->tp_class == TP_CLASS_0 ) return 0xf; + else if (tp_emit(CC_TPDU_type, p, 0,0, MCPY(p->tp_ucddata, M_NOWAIT)) == 0) return 0x10; + else return 0x11; +default: return 0; +} /* end switch */ +} /* _Xebec_index() */ +static int inx[26][9] = { {0,0,0,0,0,0,0,0,0,}, + {0x0,0x0,0x0,0x0,0x31,0x0,0x0,0x0,0x0, }, + {0x0,0x0,-1,-1,-1,-1,0x0,0x0,0x0, }, + {0x0,0x0,0x0,0x0,0x3e,0x0,0x0,0x0,0x0, }, + {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0, }, + {0x0,0x0,0x0,0x0,0x0,0x0,0x36,0x0,0x0, }, + {0x0,0x0,0x0,0x0,-1,0x0,0x0,0x0,0x0, }, + {0x0,0x7,0x15,0x1b,-1,0x17,0x3,0xa,0x0, }, + {0x0,0x19,0x6,0x20,0x37,0x8,0x3,-1,0x0, }, + {0x0,0x14,0x13,0x13,0x13,0x16,-1,0xa,0x0, }, + {0x0,0x7,0x6,0x1,0x9,0x18,0x3,0xa,0x0, }, + {0x0,0x19,-1,0x1,0x37,0x8,0x3,0xa,0x0, }, + {0x0,0x7,-1,0x26,-1,0x8,0x3,0xa,0x0, }, + {0x0,0x7,0x6,-1,-1,0x8,0x3,0xa,0x0, }, + {0x0,0x7,0x6,-1,-1,0x8,0x3,0xa,0x0, }, + {0x0,0x7,0x6,0x1,-1,0x8,0x3,0xa,0x0, }, + {0x0,0x12,0x0,0x0,0x0,0x0,0x0,0x0,0x0, }, + {0x0,0x0,-1,0x2e,-1,0x0,0x4,0x0,0x2e, }, + {0x0,0xb,0x0,0x0,0x0,0x0,0x0,0x0,0x0, }, + {0x0,0x0,0x0,0x0,0x38,0x0,0x0,0x0,0x0, }, + {0x0,0x0,0x0,0x0,0x39,0x0,0x0,0x0,0x0, }, + {0x0,0x0,0x0,0x0,-1,0x0,0x41,0x0,0x0, }, + {0x0,0x0,0x0,0x0,0x28,0x0,0x41,0x0,0x0, }, + {0x0,0xc,-1,0x2c,0x0,0x2c,0x4,0xc,0x2c, }, + {0x0,0x49,-1,0x45,-1,0x44,0x48,-1,0x0, }, + {0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,-1, }, +}; +tp_driver(p, e) +register tp_PCB_ *p; +register struct tp_event *e; +{ + register int index, error=0; + struct act_ent *a; + static struct act_ent erroraction = {0,-1}; + + index = inx[1 + e->ev_number][p->tp_state]; + if(index<0) index=_Xebec_index(e, p); + if (index==0) { + a = &erroraction; + } else + a = &statetable[index]; + + if(a->a_action) + error = _Xebec_action( a->a_action, e, p ); + IFTRACE(D_DRIVER) + tptrace(DRIVERTRACE, a->a_newstate, p->tp_state, e->ev_number, a->a_action, 0); + ENDTRACE + if(error==0) + p->tp_state = a->a_newstate; + return error; +} diff --git a/bsd/netiso/tp_emit.c b/bsd/netiso/tp_emit.c new file mode 100644 index 000000000..07cfdd0d8 --- /dev/null +++ b/bsd/netiso/tp_emit.c @@ -0,0 +1,1015 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_emit.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * This file contains tp_emit() and tp_error_emit(), which + * form TPDUs and hand them to ip. + * They take data in the form of mbuf chain, allocate mbufs as + * necessary for headers, and set the fields as appropriate from + * information found in the tpcb and net-level pcb. + * + * The worst thing about this code is adding the variable-length + * options on a machine that requires alignment for any memory access + * that isn't of size 1. See the macro ADDOPTION() below. + * + * We don't do any concatenation. (There's a kludge to test the + * basic mechanism of separation under the 'w' tpdebug option, that's all.) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef TRUE +#undef FALSE +#undef TRUE +#endif +#include +#include +#include + +void iso_gen_csum(); + + +/* Here is a mighty kludge. The token ring misorders packets if you + * fire them at it too fast, and TP sans checksum is "too fast", so + * we have introduced a delay when checksumming isn't used. + */ +char tp_delay = 0x00; /* delay to keep token ring from blowing it */ + +/* + * NAME: tp_emit() + * + * CALLED FROM: tp.trans and from tp_sbsend() + * + * FUNCTION and ARGUMENTS: + * Emits one tpdu of the type (dutype), of the format appropriate + * to the connection described by the pcb (tpcb), with sequence + * number (seq) (where appropriate), end-of-tsdu bit (eot) where + * appropriate, and with the data in the mbuf chain (data). + * For DR and ER tpdus, the argument (eot) is + * the reason for issuing the tpdu rather than an end-of-tsdu indicator. + * + * RETURNS: + * 0 OK + * ENOBUFS + * E* returned from net layer output rtn + * + * SIDE EFFECTS: + * + * NOTES: + * + * WE ASSUME that the tp header + all options will fit in ONE mbuf. + * If mbufs are 256 this will most likely be true, but if they are 128 it's + * possible that they won't. + * If you used every option on the CR + max. user data you'd overrun + * 112 but unless you used > 115 bytes for the security + * parameter, it would fit in a 256-byte mbuf (240 bytes for the header) + * We don't support the security parameter, so this isn't a problem. + * If security is added, we ought to remove this assumption. + * + * We do not implement the flow control confirmation "element of procedure". + * A) it should not affect interoperability, + * B) it should not be necessary - the protocol will eventually + * straighten things out w/o FCC, as long as we don't have severely + * mismatched keepalive and inactivity timers, and + * C) it appears not to be REQUIRED, and + * D) it's incredibly grotesque, and no doubt will lengthen a few + * critical paths. + * HOWEVER, we're thinking about putting it in anyway, for + * completeness, just like we did with ack subsequencing. + */ + +int +tp_emit(dutype, tpcb, seq, eot, data) + int dutype; + struct tp_pcb *tpcb; + SeqNum seq; + u_int eot; + struct mbuf *data; +{ + register struct tpdu *hdr; + register struct mbuf *m; + int csum_offset=0; + int datalen = 0; + int error = 0; + SeqNum olduwe; + int acking_ooo; + + /* NOTE: + * here we treat tpdu_li as if it DID include the li field, up until + * the end, at which time we subtract 1 + * THis is because if we subtract 1 right away, we end up adding + * one every time we add an option. + */ + IFDEBUG(D_EMIT) + printf( + "tp_emit dutype 0x%x, tpcb 0x%x, eot 0x%x, seq 0x%x, data 0x%x", + dutype, tpcb, eot, seq, data); + ENDDEBUG + + if (dutype == CR_TPDU || dutype == CC_TPDU) { +// m = (struct mbuf *) malloc((u_long)256, M_MBUF, M_DONTWAIT); + MALLOC(m, struct mbuf *, 256, M_MBUF, M_NOWAIT); + if (m) { + m->m_type = TPMT_TPHDR; + mbstat.m_mtypes[TPMT_TPHDR]++; + m->m_next = MNULL; + m->m_nextpkt = MNULL; + m->m_data = m->m_pktdat; + m->m_flags = M_PKTHDR; + } + } else { + MGETHDR(m, M_DONTWAIT, TPMT_TPHDR); + } + m->m_data += max_hdr; + if (m == NULL) { + if(data != (struct mbuf *)0) + m_freem(data); + error = ENOBUFS; + goto done; + } + m->m_len = sizeof(struct tpdu); + m->m_act = MNULL; + + hdr = mtod(m, struct tpdu *); + bzero((caddr_t)hdr, sizeof(struct tpdu)); + + { + int tp_headersize(); + + hdr->tpdu_type = dutype; + hdr->tpdu_li = tp_headersize(dutype, tpcb); + /* + * class 0 doesn't use this for DT + * it'll just get overwritten below + */ + hdr->tpdu_dref = htons(tpcb->tp_fref); + if( tpcb->tp_use_checksum || + (dutype == CR_TPDU_type && (tpcb->tp_class & TP_CLASS_4) )) { + csum_offset = hdr->tpdu_li + 2; /* DOESN'T include csum */ + ADDOPTION(TPP_checksum, hdr, 2, eot /* dummy arg */); + IFDEBUG(D_CHKSUM) + printf( + "tp_emit: csum_offset 0x%x, hdr->tpdu_li 0x%x\n", + csum_offset, hdr->tpdu_li); + ENDDEBUG + } + /* + * VARIABLE PARTS... + */ + switch( dutype ) { + + case CR_TPDU_type: + hdr->tpdu_CRdref_0 = 0; /* must be zero */ + case CC_TPDU_type: + if (!tpcb->tp_cebit_off) { + tpcb->tp_win_recv = tp_start_win << 8; + LOCAL_CREDIT(tpcb); + CONG_INIT_SAMPLE(tpcb); + } else + LOCAL_CREDIT(tpcb); + +/* Case CC_TPDU_type used to be here */ + { + u_char x; + + hdr->tpdu_CCsref = htons(tpcb->tp_lref); /* same as CRsref */ + + if( tpcb->tp_class > TP_CLASS_1 ) { + tpcb->tp_sent_uwe = tpcb->tp_lcredit -1; + tpcb->tp_sent_rcvnxt = 1; + tpcb->tp_sent_lcdt = tpcb->tp_lcredit; + hdr->tpdu_cdt = tpcb->tp_lcredit; + } else { +#if TPCONS + if (tpcb->tp_netservice == ISO_CONS) { + struct isopcb *isop = (struct isopcb *)tpcb->tp_npcb; + struct pklcd *lcp = (struct pklcd *)(isop->isop_chan); + lcp->lcd_flags &= ~X25_DG_CIRCUIT; + } +#endif + hdr->tpdu_cdt = 0; + } + hdr->tpdu_CCclass = tp_mask_to_num(tpcb->tp_class); + hdr->tpdu_CCoptions = + (tpcb->tp_xtd_format? TPO_XTD_FMT:0) | + (tpcb->tp_use_efc? TPO_USE_EFC:0); + + IFPERF(tpcb) + u_char perf_meas = tpcb->tp_perf_on; + ADDOPTION(TPP_perf_meas, hdr, sizeof(perf_meas), perf_meas); + ENDPERF + + if( dutype == CR_TPDU_type ) { + IncStat(ts_CR_sent); + + ASSERT( tpcb->tp_lsuffixlen > 0 ); + ASSERT( tpcb->tp_fsuffixlen > 0 ); + + ADDOPTION(TPP_calling_sufx, hdr, + tpcb->tp_lsuffixlen, tpcb->tp_lsuffix[0]); + ADDOPTION(TPP_called_sufx, hdr, + tpcb->tp_fsuffixlen, tpcb->tp_fsuffix[0]); + } else { + IncStat(ts_CC_sent); + } + + ADDOPTION(TPP_tpdu_size, hdr, + sizeof(tpcb->tp_tpdusize), tpcb->tp_tpdusize); + + if (tpcb->tp_class != TP_CLASS_0) { + short millisec = 500*(tpcb->tp_sendack_ticks); + + millisec = htons(millisec); + ADDOPTION(TPP_acktime, hdr, sizeof(short), millisec); + + x = (tpcb->tp_use_nxpd? TPAO_USE_NXPD: 0) + | (tpcb->tp_use_rcc? TPAO_USE_RCC : 0) + | (tpcb->tp_use_checksum?0: TPAO_NO_CSUM) + | (tpcb->tp_xpd_service? TPAO_USE_TXPD: 0); + ADDOPTION(TPP_addl_opt, hdr, 1, x); + + if ((tpcb->tp_l_tpdusize ^ (1 << tpcb->tp_tpdusize)) != 0) { + u_short size_s = tpcb->tp_l_tpdusize >> 7; + u_char size_c = size_s; + ASSERT(tpcb->tp_l_tpdusize < 65536 * 128); + if (dutype == CR_TPDU_type) + tpcb->tp_ptpdusize = size_s; + if (size_s < 256) { + ADDOPTION(TPP_ptpdu_size, hdr, 1, size_c); + } else { + size_s = htons(size_s); + ADDOPTION(TPP_ptpdu_size, hdr, 2, size_s); + } + } + } + + if( (dutype == CR_TPDU_type) && (tpcb->tp_class != TP_CLASS_0)){ + + ASSERT( 1 == sizeof(tpcb->tp_vers) ); + ADDOPTION(TPP_vers, hdr, 1, tpcb->tp_vers); + + /* for each alt protocol class x, + * x = x<<4; + * option = concat(option, x); + * Well, for now we only have TP0 for an + * alternative so... this is easy. + * + * HOWEVER... There should be NO alt protocol + * class over CLNS. Need to see if the route suggests + * CONS, and iff so add alt class. + */ + x = 0; + ADDOPTION(TPP_alt_class, hdr, 1, x); + } + + if( hdr->tpdu_li > MLEN) + panic("tp_emit CR/CC"); + } + break; + + case DR_TPDU_type: + if( hdr->tpdu_DRdref == 0 ) { + /* don't issue the DR */ + goto done; + } + hdr->tpdu_cdt = 0; + hdr->tpdu_DRsref = htons(tpcb->tp_lref); + hdr->tpdu_DRreason = (u_char)eot; /* WHICH BYTE OF THIS??? */ + + /* forget the add'l information variable part */ + IncStat(ts_DR_sent); + break; + + case DC_TPDU_type: /* not used in class 0 */ + ASSERT( tpcb->tp_class != TP_CLASS_0); + hdr->tpdu_DCsref = htons(tpcb->tp_lref); + hdr->tpdu_cdt = 0; + data = (struct mbuf *)0; + IncStat(ts_DC_sent); + break; + + case XAK_TPDU_type: /* xak not used in class 0 */ + ASSERT( tpcb->tp_class != TP_CLASS_0); /* fall through */ + hdr->tpdu_cdt = 0; + + IFTRACE(D_XPD) + tptraceTPCB(TPPTXack, seq, 0, 0, 0, 0); + ENDTRACE + data = (struct mbuf *)0; + if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seq = seq; + seqeotX.s_eot = 1; + hdr->tpdu_seqeotX = htonl(seqeotX.s_seqeot); +#else + hdr->tpdu_XAKseqX = seq; +#endif /* BYTE_ORDER */ + } else { + hdr->tpdu_XAKseq = seq; + } + IncStat(ts_XAK_sent); + IncPStat(tpcb, tps_XAK_sent); + break; + + case XPD_TPDU_type: /* xpd not used in class 0 */ + ASSERT( tpcb->tp_class != TP_CLASS_0); /* fall through */ + hdr->tpdu_cdt = 0; + if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seq = seq; + seqeotX.s_eot = 1; + hdr->tpdu_seqeotX = htonl(seqeotX.s_seqeot); +#else + hdr->tpdu_XPDseqX = seq; + hdr->tpdu_XPDeotX = 1; /* always 1 for XPD tpdu */ +#endif /* BYTE_ORDER */ + } else { + hdr->tpdu_XPDseq = seq; + hdr->tpdu_XPDeot = 1; /* always 1 for XPD tpdu */ + } + IncStat(ts_XPD_sent); + IncPStat(tpcb, tps_XPD_sent); + + /* kludge to test the input size checking */ + IFDEBUG(D_SIZE_CHECK) + /*if(data->m_len <= 16 && data->m_off < (MLEN-18) ) { + printf("Sending too much data on XPD: 18 bytes\n"); + data->m_len = 18; + }*/ + ENDDEBUG + break; + + case DT_TPDU_type: + hdr->tpdu_cdt = 0; + IFTRACE(D_DATA) + tptraceTPCB(TPPTmisc, "emit DT: eot seq tpdu_li", eot, seq, + hdr->tpdu_li, 0); + ENDTRACE + if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seq = seq; + seqeotX.s_eot = eot; + hdr->tpdu_seqeotX = htonl(seqeotX.s_seqeot); +#else + hdr->tpdu_DTseqX = seq; + hdr->tpdu_DTeotX = eot; +#endif /* BYTE_ORDER */ + } else if (tpcb->tp_class == TP_CLASS_0) { + IFDEBUG(D_EMIT) + printf("DT tpdu: class 0 m 0x%x hdr 0x%x\n", m, hdr); + dump_buf( hdr, hdr->tpdu_li + 1 ); + ENDDEBUG + ((struct tp0du *)hdr)->tp0du_eot = eot; + ((struct tp0du *)hdr)->tp0du_mbz = 0; + IFDEBUG(D_EMIT) + printf("DT 2 tpdu: class 0 m 0x%x hdr 0x%x\n", m, hdr); + dump_buf( hdr, hdr->tpdu_li + 1 ); + ENDDEBUG + } else { + hdr->tpdu_DTseq = seq; + hdr->tpdu_DTeot = eot; + } + if(eot) { + IncStat(ts_EOT_sent); + } + IncStat(ts_DT_sent); + IncPStat(tpcb, tps_DT_sent); + break; + + case AK_TPDU_type:/* ak not used in class 0 */ + ASSERT( tpcb->tp_class != TP_CLASS_0); + data = (struct mbuf *)0; + olduwe = tpcb->tp_sent_uwe; + + if (seq != tpcb->tp_sent_rcvnxt || tpcb->tp_rsycnt == 0) { + LOCAL_CREDIT( tpcb ); + tpcb->tp_sent_uwe = + SEQ(tpcb,tpcb->tp_rcvnxt + tpcb->tp_lcredit -1); + tpcb->tp_sent_lcdt = tpcb->tp_lcredit; + acking_ooo = 0; + } else + acking_ooo = 1; + + IFDEBUG(D_RENEG) + /* occasionally fake a reneging so + you can test subsequencing */ + if( olduwe & 0x1 ) { + tpcb->tp_reneged = 1; + IncStat(ts_ldebug); + } + ENDDEBUG + /* Are we about to reneg on credit? + * When might we do so? + * a) when using optimistic credit (which we no longer do). + * b) when drain() gets implemented (not in the plans). + * c) when D_RENEG is on. + * d) when DEC BIT response is implemented. + * (not- when we do this, we'll need to implement flow control + * confirmation) + */ + if( SEQ_LT(tpcb, tpcb->tp_sent_uwe, olduwe) ) { + tpcb->tp_reneged = 1; + IncStat(ts_lcdt_reduced); + IFTRACE(D_CREDIT) + tptraceTPCB(TPPTmisc, + "RENEG: olduwe newuwe lcredit rcvnxt", + olduwe, + tpcb->tp_sent_uwe, tpcb->tp_lcredit, + tpcb->tp_rcvnxt); + ENDTRACE + } + IFPERF(tpcb) + /* new lwe is less than old uwe means we're + * acking before we received a whole window full + */ + if( SEQ_LT( tpcb, tpcb->tp_rcvnxt, olduwe) ) { + /* tmp1 = number of pkts fewer than the full window */ + register int tmp1 = + (int) SEQ_SUB( tpcb, olduwe, tpcb->tp_rcvnxt); + + if(tmp1 > TP_PM_MAX) + tmp1 = TP_PM_MAX; + IncPStat( tpcb, tps_ack_early[tmp1] ); + + /* tmp1 = amt of new cdt we're advertising */ + tmp1 = SEQ_SUB( tpcb, seq, tpcb->tp_sent_rcvnxt); + if(tmp1 > TP_PM_MAX ) + tmp1 = TP_PM_MAX; + + IncPStat( tpcb, + tps_cdt_acked [ tmp1 ] + [ ((tpcb->tp_lcredit > TP_PM_MAX)? + TP_PM_MAX:tpcb->tp_lcredit) ] ); + + } + ENDPERF + + IFTRACE(D_ACKSEND) + tptraceTPCB(TPPTack, seq, tpcb->tp_lcredit, tpcb->tp_sent_uwe, + tpcb->tp_r_subseq, 0); + ENDTRACE + if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seq = seq; + seqeotX.s_eot = 0; + hdr->tpdu_seqeotX = htonl(seqeotX.s_seqeot); + hdr->tpdu_AKcdtX = htons(tpcb->tp_lcredit); +#else + hdr->tpdu_cdt = 0; + hdr->tpdu_AKseqX = seq; + hdr->tpdu_AKcdtX = tpcb->tp_lcredit; +#endif /* BYTE_ORDER */ + } else { + hdr->tpdu_AKseq = seq; + hdr->tpdu_AKcdt = tpcb->tp_lcredit; + } + if ((tpcb->tp_class == TP_CLASS_4) && + (tpcb->tp_reneged || acking_ooo)) { + /* + * Ack subsequence parameter req'd if WE reneged on + * credit offered. (ISO 8073, 12.2.3.8.2, p. 74) + */ + IFDEBUG(D_RENEG) + printf("Adding subseq 0x%x\n", tpcb->tp_s_subseq); + ENDDEBUG + tpcb->tp_s_subseq++; + /* + * add tmp subseq and do a htons on it. + */ + ADDOPTION(TPP_subseq, hdr, + sizeof(tpcb->tp_s_subseq), tpcb->tp_s_subseq); + } else + tpcb->tp_s_subseq = 0; + + if ( tpcb->tp_sendfcc || eot ) /* overloaded to mean SEND FCC */ { + /* + * Rules for sending FCC ("should" send when) : + * %a) received an ack from peer with NO NEWS whatsoever, + * and it did not contain an FCC + * b) received an ack from peer that opens its closed window. + * c) received an ack from peer after it reneged on its + * offered credit, AND this ack raises UWE but LWE is same + * and below UWE at time of reneging (reduction) + * Now, ISO 8073 12.2.3.8.3 says + * that a retransmitted AK shall not contain the FCC + * parameter. Now, how the hell you tell the difference + * between a retransmitted ack and an ack that's sent in + * response to a received ack, I don't know, because without + * any local activity, and w/o any received DTs, they + * will contain exactly the same credit/seq# information. + * Anyway, given that the "retransmission of acks" + * procedure (ISO 8073 12.2.3.8.3) is optional, and we + * don't do it (although the peer can't tell that), we + * ignore this last rule. + * + * We send FCC for reasons a) and b) only. + * To add reason c) would require a ridiculous amount of state. + * + */ + u_short bogus[4]; /* lwe(32), subseq(16), cdt(16) */ + SeqNum lwe; + u_short subseq, fcredit; + + tpcb->tp_sendfcc = 0; + + lwe = (SeqNum) htonl(tpcb->tp_snduna); + subseq = htons(tpcb->tp_r_subseq); + fcredit = htons(tpcb->tp_fcredit); + + bcopy((caddr_t) &lwe, (caddr_t)&bogus[0], sizeof(SeqNum)); + bcopy((caddr_t) &subseq, (caddr_t)&bogus[2], sizeof(u_short)); + bcopy((caddr_t) &fcredit, (caddr_t)&bogus[3], sizeof(u_short)); + + IFTRACE(D_ACKSEND) + tptraceTPCB(TPPTmisc, + "emit w/FCC: snduna r_subseq fcredit", + tpcb->tp_snduna, tpcb->tp_r_subseq, + tpcb->tp_fcredit, 0); + ENDTRACE + + IFDEBUG(D_ACKSEND) + printf("Calling ADDOPTION 0x%x, 0x%x, 0x%x,0x%x\n", + TPP_flow_cntl_conf, + hdr, sizeof(bogus), bogus[0]); + ENDDEBUG + ADDOPTION(TPP_flow_cntl_conf, hdr, sizeof(bogus), bogus[0]); + IFDEBUG(D_ACKSEND) + printf("after ADDOPTION hdr 0x%x hdr->tpdu_li 0x%x\n", + hdr, hdr->tpdu_li); + printf( + "after ADDOPTION csum_offset 0x%x, hdr->tpdu_li 0x%x\n", + csum_offset, hdr->tpdu_li); + ENDDEBUG + + } + tpcb->tp_reneged = 0; + tpcb->tp_sent_rcvnxt = seq; + if (tpcb->tp_fcredit == 0) { + int timo = tpcb->tp_keepalive_ticks; + if (tpcb->tp_rxtshift < TP_MAXRXTSHIFT) + tpcb->tp_rxtshift++; + timo = min(timo, ((int)tpcb->tp_dt_ticks) << tpcb->tp_rxtshift); + tp_ctimeout(tpcb, TM_sendack, timo); + } else + tp_ctimeout(tpcb, TM_sendack, tpcb->tp_keepalive_ticks); + IncStat(ts_AK_sent); + IncPStat(tpcb, tps_AK_sent); + IFDEBUG(D_ACKSEND) + printf( + "2 after rADDOPTION csum_offset 0x%x, hdr->tpdu_li 0x%x\n", + csum_offset, hdr->tpdu_li); + ENDDEBUG + break; + + case ER_TPDU_type: + hdr->tpdu_ERreason = eot; + hdr->tpdu_cdt = 0; + /* no user data */ + data = (struct mbuf *)0; + IncStat(ts_ER_sent); + break; + } + + } + ASSERT( ((int)hdr->tpdu_li > 0) && ((int)hdr->tpdu_li < MLEN) ); + + m->m_next = data; + + ASSERT( hdr->tpdu_li < MLEN ); /* leave this in */ + ASSERT( hdr->tpdu_li != 0 ); /* leave this in */ + + m->m_len = hdr->tpdu_li ; + hdr->tpdu_li --; /* doesn't include the li field */ + + datalen = m_datalen( m ); /* total len */ + + ASSERT( datalen <= tpcb->tp_l_tpdusize ); /* may become a problem + when CLNP is used; leave in here for the time being */ + IFDEBUG(D_ACKSEND) + printf( + "4 after rADDOPTION csum_offset 0x%x, hdr->tpdu_li 0x%x\n", + csum_offset, hdr->tpdu_li); + ENDDEBUG + if( datalen > tpcb->tp_l_tpdusize ) { + printf("data len 0x%x tpcb->tp_l_tpdusize 0x%x\n", + datalen, tpcb->tp_l_tpdusize); + } + IFDEBUG(D_EMIT) + printf( + "tp_emit before gen_csum m_len 0x%x, csum_offset 0x%x, datalen 0x%x\n", + m->m_len, csum_offset, datalen); + ENDDEBUG + if( tpcb->tp_use_checksum || + (dutype == CR_TPDU_type && (tpcb->tp_class & TP_CLASS_4)) ) { + iso_gen_csum(m, csum_offset, datalen); + } + + IFDEBUG(D_EMIT) + printf("tp_emit before tpxxx_output tpcb 0x%x, dutype 0x%x, datalen 0x%x\n", + tpcb, dutype, datalen); + dump_buf(mtod(m, caddr_t), datalen); + ENDDEBUG + + IFPERF(tpcb) + if( dutype == DT_TPDU_type ) { + PStat(tpcb, Nb_to_ll) += (datalen - m->m_len); + tpmeas( tpcb->tp_lref, TPtime_to_ll, (struct timeval *)0, + seq, PStat(tpcb, Nb_to_ll), (datalen - m->m_len)); + } + ENDPERF + + IFTRACE(D_EMIT) + tptraceTPCB(TPPTtpduout, dutype, hdr, hdr->tpdu_li+1, datalen, 0); + ENDTRACE + IFDEBUG(D_EMIT) + printf("OUTPUT: tpcb 0x%x, isop 0x%x, so 0x%x\n", + tpcb, tpcb->tp_npcb, tpcb->tp_sock); + ENDDEBUG + + { extern char tp_delay; + + if( tp_delay ) + if( tpcb->tp_use_checksum == 0 ) { + register u_int i = tp_delay; + for (; i!= 0; i--) + (void) iso_check_csum(m, datalen); + } + } + ASSERT( m->m_len > 0 ); + error = (tpcb->tp_nlproto->nlp_output)(tpcb->tp_npcb, m, datalen, + !tpcb->tp_use_checksum); + IFDEBUG(D_EMIT) + printf("OUTPUT: returned 0x%x\n", error); + ENDDEBUG + IFTRACE(D_EMIT) + tptraceTPCB(TPPTmisc, + "tp_emit nlproto->output netservice returns datalen", + tpcb->tp_nlproto->nlp_output, tpcb->tp_netservice, error, datalen); + ENDTRACE +done: + if (error) { + if (dutype == AK_TPDU_type) + tp_ctimeout(tpcb, TM_sendack, 1); + if (error == E_CO_QFULL) { + tp_quench(tpcb, PRC_QUENCH); + return 0; + } + } + return error; +} +/* + * NAME: tp_error_emit() + * CALLED FROM: tp_input() when a DR or ER is to be issued in + * response to an input error. + * FUNCTION and ARGUMENTS: + * The error type is the first argument. + * The argument (sref) is the source reference on the bad incoming tpdu, + * and is used for a destination reference on the outgoing packet. + * (faddr) and (laddr) are the foreign and local addresses for this + * connection. + * (erdata) is a ptr to the errant incoming tpdu, and is copied into the + * outgoing ER, if an ER is to be issued. + * (erlen) is the number of octets of the errant tpdu that we should + * try to copy. + * (tpcb) is the pcb that describes the connection for which the bad tpdu + * arrived. + * RETURN VALUES: + * 0 OK + * ENOBUFS + * E* from net layer datagram output routine + * SIDE EFFECTS: + * + * NOTES: + */ + +int +tp_error_emit(error, sref, faddr, laddr, erdata, erlen, tpcb, cons_channel, + dgout_routine) + int error; + u_long sref; + struct sockaddr_iso *faddr, *laddr; + struct mbuf *erdata; + int erlen; + struct tp_pcb *tpcb; + caddr_t cons_channel; + int (*dgout_routine)(); +{ + int dutype; + int datalen = 0; + register struct tpdu *hdr; + register struct mbuf *m; + int csum_offset; + + IFTRACE(D_ERROR_EMIT) + tptrace(TPPTmisc, "tp_error_emit error sref tpcb erlen", + error, sref, tpcb, erlen); + ENDTRACE + IFDEBUG(D_ERROR_EMIT) + printf( + "tp_error_emit error 0x%x sref 0x%x tpcb 0x%x erlen 0x%x chan 0x%x\n", + error, sref, tpcb, erlen, cons_channel); + ENDDEBUG + + MGET(m, M_DONTWAIT, TPMT_TPHDR); + if (m == NULL) { + return ENOBUFS; + } + m->m_len = sizeof(struct tpdu); + m->m_act = MNULL; + + hdr = mtod(m, struct tpdu *); + + IFDEBUG(D_ERROR_EMIT) + printf("[error 0x%x] [error&0xff 0x%x] [(char)error 0x%x]\n", + error, error&0xff, (char)error); + ENDDEBUG + + + if (error & TP_ERROR_SNDC) + dutype = DC_TPDU_type; + else if (error & 0x40) { + error &= ~0x40; + dutype = ER_TPDU_type; + } else + dutype = DR_TPDU_type; + error &= 0xff; + + hdr->tpdu_type = dutype; + hdr->tpdu_cdt = 0; + + switch( dutype ) { + + case DC_TPDU_type: + IncStat(ts_DC_sent); + hdr->tpdu_li = 6; + hdr->tpdu_DCdref = htons(sref); + hdr->tpdu_DCsref = tpcb ? htons(tpcb->tp_lref) : 0; + IFDEBUG(D_ERROR_EMIT) + printf("DC case:\n"); + dump_buf( hdr, 6); + ENDDEBUG + /* forget the add'l information variable part */ + break; + + case DR_TPDU_type: + IncStat(ts_DR_sent); + hdr->tpdu_li = 7; + hdr->tpdu_DRdref = htons(sref); + hdr->tpdu_DRsref = 0; + hdr->tpdu_DRreason = (char)error; + IFDEBUG(D_ERROR_EMIT) + printf("DR case:\n"); + dump_buf( hdr, 7); + ENDDEBUG + /* forget the add'l information variable part */ + break; + + case ER_TPDU_type: + IncStat(ts_ER_sent); + hdr->tpdu_li = 5; + hdr->tpdu_ERreason = (char)error; + hdr->tpdu_ERdref = htons(sref); + break; + + default: + ASSERT(0); + printf("TP PANIC: bad dutype 0x%x\n", dutype); + } + + if(tpcb) + if( tpcb->tp_use_checksum ) { + ADDOPTION(TPP_checksum, hdr, 2, csum_offset /* dummy argument */); + csum_offset = hdr->tpdu_li - 2; + } + + ASSERT( hdr->tpdu_li < MLEN ); + + if (dutype == ER_TPDU_type) { + /* copy the errant tpdu into another 'variable part' */ + register caddr_t P; + + IFTRACE(D_ERROR_EMIT) + tptrace(TPPTmisc, "error_emit ER len tpduli", erlen, hdr->tpdu_li, + 0,0); + ENDTRACE + IFDEBUG(D_ERROR_EMIT) + printf("error_emit ER len 0x%x tpduli 0x%x\n", erlen, hdr->tpdu_li); + ENDDEBUG + + /* copy at most as many octets for which you have room */ + if (erlen + hdr->tpdu_li + 2 > TP_MAX_HEADER_LEN) + erlen = TP_MAX_HEADER_LEN - hdr->tpdu_li - 2; + + /* add the "invalid tpdu" parameter : required in class 0 */ + P = (caddr_t)hdr + (int)(hdr->tpdu_li); + vbptr(P)->tpv_code = TPP_invalid_tpdu; /* parameter code */ + vbptr(P)->tpv_len = erlen; /* parameter length */ + m->m_len = hdr->tpdu_li + 2; /* 1 for code, 1 for length */ + + /* tp_input very likely handed us an mbuf chain w/ nothing in + * the first mbuf and the data following the empty mbuf + */ + if(erdata->m_len == 0) { + erdata = m_free(erdata); /* returns the next mbuf on the chain */ + } + /* + * copy only up to the bad octet + * (or max that will fit in a header + */ + m->m_next = m_copy(erdata, 0, erlen); + hdr->tpdu_li += erlen + 2; + m_freem(erdata); + } else { + IFDEBUG(D_ERROR_EMIT) + printf("error_emit DR error tpduli 0x%x\n", error, hdr->tpdu_li); + dump_buf( (char *)hdr, hdr->tpdu_li ); + ENDDEBUG + m->m_len = hdr->tpdu_li ; + m_freem(erdata); + } + + hdr->tpdu_li --; + IFTRACE(D_ERROR_EMIT) + tptrace(TPPTtpduout, 2, hdr, hdr->tpdu_li+1, 0, 0); + ENDTRACE + + datalen = m_datalen( m); + if (tpcb) { + if( tpcb->tp_use_checksum ) { + IFTRACE(D_ERROR_EMIT) + tptrace(TPPTmisc, "before gen csum datalen", datalen,0,0,0); + ENDTRACE + IFDEBUG(D_ERROR_EMIT) + printf("before gen csum datalen 0x%x, csum_offset 0x%x\n", + datalen, csum_offset); + ENDDEBUG + + iso_gen_csum(m, csum_offset, datalen); + } + + IFDEBUG(D_ERROR_EMIT) + printf("OUTPUT: tpcb 0x%x, isop 0x%x, so 0x%x\n", + tpcb, tpcb->tp_npcb, tpcb->tp_sock); + ENDDEBUG + } + if (cons_channel) { +#if TPCONS + struct pklcd *lcp = (struct pklcd *)cons_channel; + struct isopcb *isop = (struct isopcb *)lcp->lcd_upnext; + + tpcons_dg_output(cons_channel, m, datalen); + /* was if (tpcb == 0) iso_pcbdetach(isop); */ + /* but other side may want to try again over same VC, + so, we'll depend on him closing it, but in case it gets forgotten + we'll mark it for garbage collection */ + lcp->lcd_flags |= X25_DG_CIRCUIT; + IFDEBUG(D_ERROR_EMIT) + printf("OUTPUT: dutype 0x%x channel 0x%x\n", + dutype, cons_channel); + ENDDEBUG +#else + printf("TP panic! cons channel 0x%x but not cons configured\n", + cons_channel); +#endif + } else if (tpcb) { + + IFDEBUG(D_ERROR_EMIT) + printf("tp_error_emit 1 sending DG: Laddr\n"); + dump_addr((struct sockaddr *)laddr); + printf("Faddr\n"); + dump_addr((struct sockaddr *)faddr); + ENDDEBUG + return (tpcb->tp_nlproto->nlp_dgoutput)( + &laddr->siso_addr, + &faddr->siso_addr, + m, datalen, + /* no route */ (caddr_t)0, !tpcb->tp_use_checksum); + } else if (dgout_routine) { + IFDEBUG(D_ERROR_EMIT) + printf("tp_error_emit sending DG: Laddr\n"); + dump_addr((struct sockaddr *)laddr); + printf("Faddr\n"); + dump_addr((struct sockaddr *)faddr); + ENDDEBUG + return (*dgout_routine)( &laddr->siso_addr, &faddr->siso_addr, + m, datalen, /* no route */ + (caddr_t)0, /* nochecksum==false */0); + } else { + IFDEBUG(D_ERROR_EMIT) + printf("tp_error_emit DROPPING \n", m); + ENDDEBUG + IncStat(ts_send_drop); + m_freem(m); + return 0; + } +} diff --git a/bsd/netiso/tp_events.h b/bsd/netiso/tp_events.h new file mode 100644 index 000000000..739629b66 --- /dev/null +++ b/bsd/netiso/tp_events.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +struct tp_event { + int ev_number; + struct timeval e_time; +#define TM_inact 0x0 +#define TM_retrans 0x1 +#define TM_sendack 0x2 +#define TM_notused 0x3 + + union{ +struct { SeqNum e_low; SeqNum e_high; int e_retrans; } EV_TM_reference; + +#define TM_reference 0x4 +struct { SeqNum e_low; SeqNum e_high; int e_retrans; } EV_TM_data_retrans; + +#define TM_data_retrans 0x5 +struct { + u_char e_reason; + } EV_ER_TPDU; + +#define ER_TPDU 0x6 +struct { struct mbuf *e_data; /* first field */ + int e_datalen; /* 2nd field */ + u_int e_cdt; + } EV_CR_TPDU; + +#define CR_TPDU 0x7 +struct { struct mbuf *e_data; /* first field */ + int e_datalen; /* 2nd field */ + u_short e_sref; + u_char e_reason; + } EV_DR_TPDU; + +#define DR_TPDU 0x8 +#define DC_TPDU 0x9 +struct { struct mbuf *e_data; /* first field */ + int e_datalen; /* 2nd field */ + u_short e_sref; + u_int e_cdt; + } EV_CC_TPDU; + +#define CC_TPDU 0xa +struct { u_int e_cdt; + SeqNum e_seq; + SeqNum e_subseq; + u_char e_fcc_present; + } EV_AK_TPDU; + +#define AK_TPDU 0xb +struct { struct mbuf *e_data; /* first field */ + int e_datalen; /* 2nd field */ + u_int e_eot; + SeqNum e_seq; + } EV_DT_TPDU; + +#define DT_TPDU 0xc +struct { struct mbuf *e_data; /* first field */ + int e_datalen; /* 2nd field */ + SeqNum e_seq; + } EV_XPD_TPDU; + +#define XPD_TPDU 0xd +struct { SeqNum e_seq; } EV_XAK_TPDU; + +#define XAK_TPDU 0xe +#define T_CONN_req 0xf +struct { u_char e_reason; } EV_T_DISC_req; + +#define T_DISC_req 0x10 +#define T_LISTEN_req 0x11 +#define T_DATA_req 0x12 +#define T_XPD_req 0x13 +#define T_USR_rcvd 0x14 +#define T_USR_Xrcvd 0x15 +#define T_DETACH 0x16 +#define T_NETRESET 0x17 +#define T_ACPT_req 0x18 + }ev_union; +};/* end struct event */ + +#define tp_NEVENTS 0x19 + +#define ATTR(X)ev_union.EV_/**/X/**/ diff --git a/bsd/netiso/tp_inet.c b/bsd/netiso/tp_inet.c new file mode 100644 index 000000000..1c128b47a --- /dev/null +++ b/bsd/netiso/tp_inet.c @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_inet.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * Here is where you find the inet-dependent code. We've tried + * keep all net-level and (primarily) address-family-dependent stuff + * out of the tp source, and everthing here is reached indirectly + * through a switch table (struct nl_protosw *) tpcb->tp_nlproto + * (see tp_pcb.c). + * The routines here are: + * in_getsufx: gets transport suffix out of an inpcb structure. + * in_putsufx: put transport suffix into an inpcb structure. + * in_putnetaddr: put a whole net addr into an inpcb. + * in_getnetaddr: get a whole net addr from an inpcb. + * in_cmpnetaddr: compare a whole net addr from an isopcb. + * in_recycle_suffix: clear suffix for reuse in inpcb + * tpip_mtu: figure out what size tpdu to use + * tpip_input: take a pkt from ip, strip off its ip header, give to tp + * tpip_output_dg: package a pkt for ip given 2 addresses & some data + * tpip_output: package a pkt for ip given an inpcb & some data + */ + +#if INET + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef ISO +#include +#endif + +/* + * NAME: in_getsufx() + + * CALLED FROM: pr_usrreq() on PRU_BIND, + * PRU_CONNECT, PRU_ACCEPT, and PRU_PEERADDR + * + * FUNCTION, ARGUMENTS, and RETURN VALUE: + * Get a transport suffix from an inpcb structure (inp). + * The argument (which) takes the value TP_LOCAL or TP_FOREIGN. + * + * RETURNS: internet port / transport suffix + * (CAST TO AN INT) + * + * SIDE EFFECTS: + * + * NOTES: + */ +in_getsufx(inp, lenp, data_out, which) + struct inpcb *inp; + u_short *lenp; + caddr_t data_out; + int which; +{ + *lenp = sizeof(u_short); + switch (which) { + case TP_LOCAL: + *(u_short *)data_out = inp->inp_lport; + return; + + case TP_FOREIGN: + *(u_short *)data_out = inp->inp_fport; + } + +} + +/* + * NAME: in_putsufx() + * + * CALLED FROM: tp_newsocket(); i.e., when a connection + * is being established by an incoming CR_TPDU. + * + * FUNCTION, ARGUMENTS: + * Put a transport suffix (found in name) into an inpcb structure (inp). + * The argument (which) takes the value TP_LOCAL or TP_FOREIGN. + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ +/*ARGSUSED*/ +void +in_putsufx(inp, sufxloc, sufxlen, which) + struct inpcb *inp; + caddr_t sufxloc; + int which; +{ + if (which == TP_FOREIGN) { + bcopy(sufxloc, (caddr_t)&inp->inp_fport, sizeof(inp->inp_fport)); + } +} + +/* + * NAME: in_recycle_tsuffix() + * + * CALLED FROM: tp.trans whenever we go into REFWAIT state. + * + * FUNCTION and ARGUMENT: + * Called when a ref is frozen, to allow the suffix to be reused. + * (inp) is the net level pcb. + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: This really shouldn't have to be done in a NET level pcb + * but... for the internet world that just the way it is done in BSD... + * The alternative is to have the port unusable until the reference + * timer goes off. + */ +void +in_recycle_tsuffix(inp) + struct inpcb *inp; +{ + inp->inp_fport = inp->inp_lport = 0; +} + +/* + * NAME: in_putnetaddr() + * + * CALLED FROM: + * tp_newsocket(); i.e., when a connection is being established by an + * incoming CR_TPDU. + * + * FUNCTION and ARGUMENTS: + * Copy a whole net addr from a struct sockaddr (name). + * into an inpcb (inp). + * The argument (which) takes values TP_LOCAL or TP_FOREIGN + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ +void +in_putnetaddr(inp, name, which) + register struct inpcb *inp; + struct sockaddr_in *name; + int which; +{ + switch (which) { + case TP_LOCAL: + bcopy((caddr_t)&name->sin_addr, + (caddr_t)&inp->inp_laddr, sizeof(struct in_addr)); + /* won't work if the dst address (name) is INADDR_ANY */ + + break; + case TP_FOREIGN: + if( name != (struct sockaddr_in *)0 ) { + bcopy((caddr_t)&name->sin_addr, + (caddr_t)&inp->inp_faddr, sizeof(struct in_addr)); + } + } +} + +/* + * NAME: in_putnetaddr() + * + * CALLED FROM: + * tp_input() when a connection is being established by an + * incoming CR_TPDU, and considered for interception. + * + * FUNCTION and ARGUMENTS: + * Compare a whole net addr from a struct sockaddr (name), + * with that implicitly stored in an inpcb (inp). + * The argument (which) takes values TP_LOCAL or TP_FOREIGN + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ +in_cmpnetaddr(inp, name, which) + register struct inpcb *inp; + register struct sockaddr_in *name; + int which; +{ + if (which == TP_LOCAL) { + if (name->sin_port && name->sin_port != inp->inp_lport) + return 0; + return (name->sin_addr.s_addr == inp->inp_laddr.s_addr); + } + if (name->sin_port && name->sin_port != inp->inp_fport) + return 0; + return (name->sin_addr.s_addr == inp->inp_faddr.s_addr); +} + +/* + * NAME: in_getnetaddr() + * + * CALLED FROM: + * pr_usrreq() PRU_SOCKADDR, PRU_ACCEPT, PRU_PEERADDR + * FUNCTION and ARGUMENTS: + * Copy a whole net addr from an inpcb (inp) into + * an mbuf (name); + * The argument (which) takes values TP_LOCAL or TP_FOREIGN. + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ + +void +in_getnetaddr( inp, name, which) + register struct mbuf *name; + struct inpcb *inp; + int which; +{ + register struct sockaddr_in *sin = mtod(name, struct sockaddr_in *); + bzero((caddr_t)sin, sizeof(*sin)); + switch (which) { + case TP_LOCAL: + sin->sin_addr = inp->inp_laddr; + sin->sin_port = inp->inp_lport; + break; + case TP_FOREIGN: + sin->sin_addr = inp->inp_faddr; + sin->sin_port = inp->inp_fport; + break; + default: + return; + } + name->m_len = sin->sin_len = sizeof (*sin); + sin->sin_family = AF_INET; +} + +/* + * NAME: tpip_mtu() + * + * CALLED FROM: + * tp_route_to() on incoming CR, CC, and pr_usrreq() for PRU_CONNECT + * + * FUNCTION, ARGUMENTS, and RETURN VALUE: + * + * Perform subnetwork dependent part of determining MTU information. + * It appears that setting a double pointer to the rtentry associated with + * the destination, and returning the header size for the network protocol + * suffices. + * + * SIDE EFFECTS: + * Sets tp_routep pointer in pcb. + * + * NOTES: + */ + +tpip_mtu(tpcb) +register struct tp_pcb *tpcb; +{ + struct inpcb *inp = (struct inpcb *)tpcb->tp_npcb; + + IFDEBUG(D_CONN) + printf("tpip_mtu(tpcb)\n", tpcb); + printf("tpip_mtu routing to addr 0x%x\n", inp->inp_faddr.s_addr); + ENDDEBUG + tpcb->tp_routep = &(inp->inp_route.ro_rt); + return (sizeof (struct ip)); + +} + +/* + * NAME: tpip_output() + * + * CALLED FROM: tp_emit() + * + * FUNCTION and ARGUMENTS: + * Take a packet(m0) from tp and package it so that ip will accept it. + * This means prepending space for the ip header and filling in a few + * of the fields. + * inp is the inpcb structure; datalen is the length of the data in the + * mbuf string m0. + * RETURNS: + * whatever (E*) is returned form the net layer output routine. + * + * SIDE EFFECTS: + * + * NOTES: + */ + +int +tpip_output(inp, m0, datalen, nochksum) + struct inpcb *inp; + struct mbuf *m0; + int datalen; + int nochksum; +{ + return tpip_output_dg( &inp->inp_laddr, &inp->inp_faddr, m0, datalen, + &inp->inp_route, nochksum); +} + +/* + * NAME: tpip_output_dg() + * + * CALLED FROM: tp_error_emit() + * + * FUNCTION and ARGUMENTS: + * This is a copy of tpip_output that takes the addresses + * instead of a pcb. It's used by the tp_error_emit, when we + * don't have an in_pcb with which to call the normal output rtn. + * + * RETURNS: ENOBUFS or whatever (E*) is + * returned form the net layer output routine. + * + * SIDE EFFECTS: + * + * NOTES: + */ + +/*ARGSUSED*/ +int +tpip_output_dg(laddr, faddr, m0, datalen, ro, nochksum) + struct in_addr *laddr, *faddr; + struct mbuf *m0; + int datalen; + struct route *ro; + int nochksum; +{ + register struct mbuf *m; + register struct ip *ip; + int error; + + IFDEBUG(D_EMIT) + printf("tpip_output_dg datalen 0x%x m0 0x%x\n", datalen, m0); + ENDDEBUG + + + MGETHDR(m, M_DONTWAIT, TPMT_IPHDR); + if (m == 0) { + error = ENOBUFS; + goto bad; + } + m->m_next = m0; + MH_ALIGN(m, sizeof(struct ip)); + m->m_len = sizeof(struct ip); + + ip = mtod(m, struct ip *); + bzero((caddr_t)ip, sizeof *ip); + + ip->ip_p = IPPROTO_TP; + m->m_pkthdr.len = ip->ip_len = sizeof(struct ip) + datalen; + ip->ip_ttl = MAXTTL; + /* don't know why you need to set ttl; + * overlay doesn't even make this available + */ + + ip->ip_src = *laddr; + ip->ip_dst = *faddr; + + IncStat(ts_tpdu_sent); + IFDEBUG(D_EMIT) + dump_mbuf(m, "tpip_output_dg before ip_output\n"); + ENDDEBUG + + error = ip_output(m, (struct mbuf *)0, ro, IP_ALLOWBROADCAST, NULL); + + IFDEBUG(D_EMIT) + printf("tpip_output_dg after ip_output\n"); + ENDDEBUG + + return error; + +bad: + m_freem(m); + IncStat(ts_send_drop); + return error; +} + +/* + * NAME: tpip_input() + * + * CALLED FROM: + * ip's input routine, indirectly through the protosw. + * + * FUNCTION and ARGUMENTS: + * Take a packet (m) from ip, strip off the ip header and give it to tp + * + * RETURNS: No return value. + * + * SIDE EFFECTS: + * + * NOTES: + */ +ProtoHook +tpip_input(m, iplen) + struct mbuf *m; + int iplen; +{ + struct sockaddr_in src, dst; + register struct ip *ip; + int s = splnet(), hdrlen; + + IncStat(ts_pkt_rcvd); + + /* + * IP layer has already pulled up the IP header, + * but the first byte after the IP header may not be there, + * e.g. if you came in via loopback, so you have to do an + * m_pullup to before you can even look to see how much you + * really need. The good news is that m_pullup will round + * up to almost the next mbuf's worth. + */ + + + if((m = m_pullup(m, iplen + 1)) == MNULL) + goto discard; + CHANGE_MTYPE(m, TPMT_DATA); + + /* + * Now pull up the whole tp header: + * Unfortunately, there may be IP options to skip past so we + * just fetch it as an unsigned char. + */ + hdrlen = iplen + 1 + mtod(m, u_char *)[iplen]; + + if( m->m_len < hdrlen ) { + if((m = m_pullup(m, hdrlen)) == MNULL){ + IFDEBUG(D_TPINPUT) + printf("tp_input, pullup 2!\n"); + ENDDEBUG + goto discard; + } + } + /* + * cannot use tp_inputprep() here 'cause you don't + * have quite the same situation + */ + + IFDEBUG(D_TPINPUT) + dump_mbuf(m, "after tpip_input both pullups"); + ENDDEBUG + /* + * m_pullup may have returned a different mbuf + */ + ip = mtod(m, struct ip *); + + /* + * drop the ip header from the front of the mbuf + * this is necessary for the tp checksum + */ + m->m_len -= iplen; + m->m_data += iplen; + + src.sin_addr = *(struct in_addr *)&(ip->ip_src); + src.sin_family = AF_INET; + src.sin_len = sizeof(src); + dst.sin_addr = *(struct in_addr *)&(ip->ip_dst); + dst.sin_family = AF_INET; + dst.sin_len = sizeof(dst); + + (void) tp_input(m, (struct sockaddr *)&src, (struct sockaddr *)&dst, + 0, tpip_output_dg, 0); + return 0; + +discard: + IFDEBUG(D_TPINPUT) + printf("tpip_input DISCARD\n"); + ENDDEBUG + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "tpip_input DISCARD m", m,0,0,0); + ENDTRACE + m_freem(m); + IncStat(ts_recv_drop); + splx(s); + return 0; +} + + +#include +#include + +extern void tp_quench(); +/* + * NAME: tpin_quench() + * + * CALLED FROM: tpip_ctlinput() + * + * FUNCTION and ARGUMENTS: find the tpcb pointer and pass it to tp_quench + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ + +void +tpin_quench(inp) + struct inpcb *inp; +{ + tp_quench((struct tp_pcb *)inp->inp_socket->so_pcb, PRC_QUENCH); +} + +/* + * NAME: tpip_ctlinput() + * + * CALLED FROM: + * The network layer through the protosw table. + * + * FUNCTION and ARGUMENTS: + * When clnp gets an ICMP msg this gets called. + * It either returns an error status to the user or + * causes all connections on this address to be aborted + * by calling the appropriate xx_notify() routine. + * (cmd) is the type of ICMP error. + * (sa) the address of the sender + * + * RETURNS: Nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ +ProtoHook +tpip_ctlinput(cmd, sin) + int cmd; + struct sockaddr_in *sin; +{ + extern u_char inetctlerrmap[]; + extern struct in_addr zeroin_addr; + void tp_quench __P((struct inpcb *,int)); + void tpin_abort __P((struct inpcb *,int)); + + if (sin->sin_family != AF_INET && sin->sin_family != AF_IMPLINK) + return 0; + if (sin->sin_addr.s_addr == INADDR_ANY) + return 0; + if (cmd < 0 || cmd > PRC_NCMDS) + return 0; + switch (cmd) { + + case PRC_QUENCH: + in_pcbnotify(&tp_inpcb, (struct sockaddr *)sin, 0, + zeroin_addr, 0, cmd, tp_quench); + break; + + case PRC_ROUTEDEAD: + case PRC_HOSTUNREACH: + case PRC_UNREACH_NET: + case PRC_IFDOWN: + case PRC_HOSTDEAD: + in_pcbnotify(&tp_inpcb, (struct sockaddr *)sin, 0, + zeroin_addr, 0, cmd, in_rtchange); + break; + + default: + /* + case PRC_MSGSIZE: + case PRC_UNREACH_HOST: + case PRC_UNREACH_PROTOCOL: + case PRC_UNREACH_PORT: + case PRC_UNREACH_NEEDFRAG: + case PRC_UNREACH_SRCFAIL: + case PRC_REDIRECT_NET: + case PRC_REDIRECT_HOST: + case PRC_REDIRECT_TOSNET: + case PRC_REDIRECT_TOSHOST: + case PRC_TIMXCEED_INTRANS: + case PRC_TIMXCEED_REASS: + case PRC_PARAMPROB: + */ + in_pcbnotify(&tp_inpcb, (struct sockaddr *)sin, 0, + zeroin_addr, 0, cmd, tpin_abort); + } + return 0; +} + +/* + * NAME: tpin_abort() + * + * CALLED FROM: + * xxx_notify() from tp_ctlinput() when + * net level gets some ICMP-equiv. type event. + * + * FUNCTION and ARGUMENTS: + * Cause the connection to be aborted with some sort of error + * reason indicating that the network layer caused the abort. + * Fakes an ER TPDU so we can go through the driver. + * + * RETURNS: Nothing + * + * SIDE EFFECTS: + * + * NOTES: + */ + +ProtoHook +tpin_abort(inp) + struct inpcb *inp; +{ + struct tp_event e; + + e.ev_number = ER_TPDU; + e.ATTR(ER_TPDU).e_reason = ENETRESET; + (void) tp_driver((struct tp_pcb *)inp->inp_ppcb, &e); + return 0; +} + +#ifdef ARGO_DEBUG +dump_inaddr(addr) + register struct sockaddr_in *addr; +{ + printf("INET: port 0x%x; addr 0x%x\n", addr->sin_port, addr->sin_addr); +} +#endif /* ARGO_DEBUG */ +#endif /* INET */ diff --git a/bsd/netiso/tp_input.c b/bsd/netiso/tp_input.c new file mode 100644 index 000000000..ceeecf5f1 --- /dev/null +++ b/bsd/netiso/tp_input.c @@ -0,0 +1,1642 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_input.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * tp_input() gets an mbuf chain from ip. Actually, not directly + * from ip, because ip calls a net-level routine that strips off + * the net header and then calls tp_input(), passing the proper type + * of addresses for the address family in use (how it figures out + * which AF is not yet determined.) + * + * Decomposing the tpdu is some of the most laughable code. The variable-length + * parameters and the problem of non-aligned memory references + * necessitates such abominations as the macros WHILE_OPTIONS (q.v. below) + * to loop through the header and decompose it. + * + * The routine tp_newsocket() is called when a CR comes in for a listening + * socket. tp_input calls sonewconn() and tp_newsocket() to set up the + * "child" socket. Most tpcb values are copied from the parent tpcb into + * the child. + * + * Also in here is tp_headersize() (grot) which tells the expected size + * of a tp header, to be used by other layers. It's in here because it + * uses the static structure tpdu_info. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#ifdef TRUE +#undef FALSE +#undef TRUE +#endif +#include +#include +#include + +int iso_check_csum(), tp_driver(), tp_headersize(), tp_error_emit(); + +/* + #ifdef lint + #undef ATTR + #define ATTR(X)ev_number + #endif lint +*/ + +struct mbuf * +tp_inputprep(m) + register struct mbuf *m; +{ + int hdrlen; + + IFDEBUG(D_TPINPUT) + printf("tp_inputprep: m 0x%x\n", m) ; + ENDDEBUG + + while( m->m_len < 1 ) { + /* The "m_free" logic + * if( (m = m_free(m)) == MNULL ) + * return (struct mbuf *)0; + * would cause a system crash if ever executed. + * This logic will be executed if the first mbuf + * in the chain only contains a CLNP header. The m_free routine + * will release the mbuf containing the CLNP header from the + * chain and the new head of the chain will not have the + * M_PKTHDR bit set. This routine, tp_inputprep, will + * eventually call the "sbappendaddr" routine. "sbappendaddr" + * calls "panic" if M_PKTHDR is not set. m_pullup is a cheap + * way of keeping the head of the chain from being freed. + */ + if((m = m_pullup(m, 1)) == MNULL) + return (MNULL); + } + if(((int)m->m_data) & 0x3) { + /* If we are not 4-byte aligned, we have to be + * above the beginning of the mbuf, and it is ok just + * to slide it back. + */ + caddr_t ocp = m->m_data; + + m->m_data = (caddr_t)(((int)m->m_data) & ~0x3); + bcopy(ocp, m->m_data, (unsigned)m->m_len); + } + CHANGE_MTYPE(m, TPMT_DATA); + + /* we KNOW that there is at least 1 byte in this mbuf + and that it is hdr->tpdu_li XXXXXXX! */ + + hdrlen = 1 + *mtod( m, u_char *); + + /* + * now pull up the whole tp header + */ + if ( m->m_len < hdrlen) { + if ((m = m_pullup(m, hdrlen)) == MNULL ) { + IncStat(ts_recv_drop); + return (struct mbuf *)0; + } + } + IFDEBUG(D_INPUT) + printf( + " at end: m 0x%x hdr->tpdu_li 0x%x m_len 0x%x\n",m, + hdrlen, m->m_len); + ENDDEBUG + return m; +} + +/* begin groan + * -- this array and the following macros allow you to step through the + * parameters of the variable part of a header + * note that if for any reason the values of the **_TPDU macros (in tp_events.h) + * should change, this array has to be rearranged + */ + +#define TP_LEN_CLASS_0_INDEX 2 +#define TP_MAX_DATA_INDEX 3 + +static u_char tpdu_info[][4] = +{ +/* length max data len */ +/* reg fmt xtd fmt class 0 */ + /* UNUSED 0x0 */ 0x0 , 0x0, 0x0, 0x0, + /* XPD_TPDU_type 0x1 */ 0x5, 0x8, 0x0, TP_MAX_XPD_DATA, + /* XAK_TPDU_type 0x2 */ 0x5 , 0x8, 0x0, 0x0, + /* GR_TPDU_type 0x3 */ 0x0 , 0x0, 0x0, 0x0, + /* UNUSED 0x4 */ 0x0 , 0x0, 0x0, 0x0, + /* UNUSED 0x5 */ 0x0 , 0x0, 0x0, 0x0, + /* AK_TPDU_type 0x6 */ 0x5, 0xa, 0x0, 0x0, + /* ER_TPDU_type 0x7 */ 0x5, 0x5, 0x0, 0x0, + /* DR_TPDU_type 0x8 */ 0x7, 0x7, 0x7, TP_MAX_DR_DATA, + /* UNUSED 0x9 */ 0x0 , 0x0, 0x0, 0x0, + /* UNUSED 0xa */ 0x0 , 0x0, 0x0, 0x0, + /* UNUSED 0xb */ 0x0 , 0x0, 0x0, 0x0, + /* DC_TPDU_type 0xc */ 0x6, 0x6, 0x0, 0x0, + /* CC_TPDU_type 0xd */ 0x7, 0x7, 0x7, TP_MAX_CC_DATA, + /* CR_TPDU_type 0xe */ 0x7, 0x7, 0x7, TP_MAX_CR_DATA, + /* DT_TPDU_type 0xf */ 0x5, 0x8, 0x3, 0x0, +}; + +#define CHECK(Phrase, Erval, Stat, Whattodo, Loc)\ + if (Phrase) {error = (Erval); errlen = (int)(Loc); IncStat(Stat);\ + goto Whattodo; } + +/* + * WHENEVER YOU USE THE FOLLOWING MACRO, + * BE SURE THE TPDUTYPE IS A LEGIT VALUE FIRST! + */ + +#define WHILE_OPTIONS(P, hdr, format)\ +{ register caddr_t P = tpdu_info[(hdr)->tpdu_type][(format)] + (caddr_t)hdr;\ + caddr_t PLIM = 1 + hdr->tpdu_li + (caddr_t)hdr;\ + for (;; P += 2 + ((struct tp_vbp *)P)->tpv_len) {\ + CHECK((P > PLIM), E_TP_LENGTH_INVAL, ts_inv_length,\ + respond, P - (caddr_t)hdr);\ + if (P == PLIM) break; + +#define END_WHILE_OPTIONS(P) } } + +/* end groan */ + +/* + * NAME: tp_newsocket() + * + * CALLED FROM: + * tp_input() on incoming CR, when a socket w/ the called suffix + * is awaiting a connection request + * + * FUNCTION and ARGUMENTS: + * Create a new socket structure, attach to it a new transport pcb, + * using a copy of the net level pcb for the parent socket. + * (so) is the parent socket. + * (fname) is the foreign address (all that's used is the nsap portion) + * + * RETURN VALUE: + * a new socket structure, being this end of the newly formed connection. + * + * SIDE EFFECTS: + * Sets a few things in the tpcb and net level pcb + * + * NOTES: + */ +static struct socket * +tp_newsocket(so, fname, cons_channel, class_to_use, netservice) + struct socket *so; + struct sockaddr *fname; + caddr_t cons_channel; + u_char class_to_use; + u_int netservice; +{ + register struct tp_pcb *tpcb = sototpcb(so); /* old tpcb, needed below */ + register struct tp_pcb *newtpcb; + + /* + * sonewconn() gets a new socket structure, + * a new lower layer pcb and a new tpcb, + * but the pcbs are unnamed (not bound) + */ + IFTRACE(D_NEWSOCK) + tptraceTPCB(TPPTmisc, "newsock: listg_so, _tpcb, so_head", + so, tpcb, so->so_head, 0); + ENDTRACE + + if ((so = sonewconn(so, SS_ISCONFIRMING)) == (struct socket *)0) + return so; + IFTRACE(D_NEWSOCK) + tptraceTPCB(TPPTmisc, "newsock: after newconn so, so_head", + so, so->so_head, 0, 0); + ENDTRACE + + IFDEBUG(D_NEWSOCK) + printf("tp_newsocket(channel 0x%x) after sonewconn so 0x%x \n", + cons_channel, so); + dump_addr(fname); + { + struct socket *t, *head ; + + head = so->so_head; + t = so; + printf("so 0x%x so_head 0x%x so_q0 0x%x, q0len %d\n", + t, t->so_head, t->so_q0, t->so_q0len); + while( (t=t->so_q0) && t!= so && t!= head) + printf("so 0x%x so_head 0x%x so_q0 0x%x, q0len %d\n", + t, t->so_head, t->so_q0, t->so_q0len); + } + ENDDEBUG + + /* + * before we clobber the old tpcb ptr, get these items from the parent pcb + */ + newtpcb = sototpcb(so); + newtpcb->_tp_param = tpcb->_tp_param; + newtpcb->tp_flags = tpcb->tp_flags; + newtpcb->tp_lcredit = tpcb->tp_lcredit; + newtpcb->tp_l_tpdusize = tpcb->tp_l_tpdusize; + newtpcb->tp_lsuffixlen = tpcb->tp_lsuffixlen; + bcopy( tpcb->tp_lsuffix, newtpcb->tp_lsuffix, newtpcb->tp_lsuffixlen); + + if( /* old */ tpcb->tp_ucddata) { + /* + * These data are the connect- , confirm- or disconnect- data. + */ + struct mbuf *conndata; + + conndata = m_copy(tpcb->tp_ucddata, 0, (int)M_COPYALL); + IFDEBUG(D_CONN) + dump_mbuf(conndata, "conndata after mcopy"); + ENDDEBUG + newtpcb->tp_ucddata = conndata; + } + + tpcb = newtpcb; + tpcb->tp_state = TP_LISTENING; + tpcb->tp_class = class_to_use; + tpcb->tp_netservice = netservice; + + + ASSERT( fname != 0 ) ; /* just checking */ + if ( fname ) { + /* + * tp_route_to takes its address argument in the form of an mbuf. + */ + struct mbuf *m; + int err; + + MGET(m, M_DONTWAIT, MT_SONAME); /* mbuf type used is confusing */ + if (m) { + /* + * this seems a bit grotesque, but tp_route_to expects + * an mbuf * instead of simply a sockaddr; it calls the ll + * pcb_connect, which expects the name/addr in an mbuf as well. + * sigh. + */ + bcopy((caddr_t)fname, mtod(m, caddr_t), fname->sa_len); + m->m_len = fname->sa_len; + + /* grot : have to say the kernel can override params in + * the passive open case + */ + tpcb->tp_dont_change_params = 0; + err = tp_route_to( m, tpcb, cons_channel); + m_free(m); + + if (!err) + goto ok; + } + IFDEBUG(D_CONN) + printf("tp_route_to FAILED! detaching tpcb 0x%x, so 0x%x\n", + tpcb, so); + ENDDEBUG + (void) tp_detach(tpcb); + return 0; + } +ok: + IFDEBUG(D_TPINPUT) + printf("tp_newsocket returning so 0x%x, sototpcb(so) 0x%x\n", + so, sototpcb(so)); + ENDDEBUG + return so; +} + +#ifndef TPCONS +tpcons_output() +{ + return(0); +} +#endif /* !CONS */ + +/* + * NAME: tp_input() + * + * CALLED FROM: + * net layer input routine + * + * FUNCTION and ARGUMENTS: + * Process an incoming TPDU (m), finding the associated tpcb if there + * is one. Create the appropriate type of event and call the driver. + * (faddr) and (laddr) are the foreign and local addresses. + * + * When tp_input() is called we KNOW that the ENTIRE TP HEADER + * has been m_pullup-ed. + * + * RETURN VALUE: Nada + * + * SIDE EFFECTS: + * When using COSNS it may affect the state of the net-level pcb + * + * NOTE: + * The initial value of acktime is 2 so that we will never + * have a 0 value for tp_peer_acktime. It gets used in the + * computation of the retransmission timer value, and so it + * mustn't be zero. + * 2 seems like a reasonable minimum. + */ +ProtoHook +tp_input(m, faddr, laddr, cons_channel, dgout_routine, ce_bit) + register struct mbuf *m; + struct sockaddr *faddr, *laddr; /* NSAP addresses */ + caddr_t cons_channel; + int (*dgout_routine)(); + int ce_bit; + +{ + register struct tp_pcb *tpcb; + register struct tpdu *hdr; + struct socket *so; + struct tp_event e; + int error; + unsigned dutype; + u_short dref, sref, acktime, subseq; + u_char preferred_class, class_to_use, pdusize; + u_char opt, dusize, addlopt, version; +#ifdef TP_PERF_MEAS + u_char perf_meas; +#endif /* TP_PERF_MEAS */ + u_char fsufxlen, lsufxlen; + caddr_t fsufxloc, lsufxloc; + int tpdu_len; + u_int takes_data; + u_int fcc_present; + int errlen; + struct tp_conn_param tpp; + int tpcons_output(); + +again: + hdr = mtod(m, struct tpdu *); + tpcb = 0; + error = errlen = tpdu_len = 0; + takes_data = fcc_present = FALSE; + acktime = 2; sref = subseq = 0; + fsufxloc = lsufxloc = NULL; + fsufxlen = lsufxlen = + preferred_class = class_to_use = pdusize = addlopt = 0; + dusize = TP_DFL_TPDUSIZE; +#ifdef TP_PERF_MEAS + GET_CUR_TIME( &e.e_time ); perf_meas = 0; +#endif /* TP_PERF_MEAS */ + + IFDEBUG(D_TPINPUT) + printf("tp_input(0x%x, ... 0x%x)\n", m, cons_channel); + ENDDEBUG + + + /* + * get the actual tpdu length - necessary for monitoring + * and for checksumming + * + * Also, maybe measure the mbuf chain lengths and sizes. + */ + + { register struct mbuf *n=m; +# ifdef ARGO_DEBUG + int chain_length = 0; +# endif ARGO_DEBUG + + for(;;) { + tpdu_len += n->m_len; + IFDEBUG(D_MBUF_MEAS) + if( n->m_flags & M_EXT) { + IncStat(ts_mb_cluster); + } else { + IncStat(ts_mb_small); + } + chain_length ++; + ENDDEBUG + if (n->m_next == MNULL ) { + break; + } + n = n->m_next; + } + IFDEBUG(D_MBUF_MEAS) + if(chain_length > 16) + chain_length = 0; /* zero used for anything > 16 */ + tp_stat.ts_mb_len_distr[chain_length] ++; + ENDDEBUG + } + IFTRACE(D_TPINPUT) + tptraceTPCB(TPPTtpduin, hdr->tpdu_type, hdr, hdr->tpdu_li+1, tpdu_len, + 0); + ENDTRACE + + dref = ntohs((short)hdr->tpdu_dref); + sref = ntohs((short)hdr->tpdu_sref); + dutype = (int)hdr->tpdu_type; + + IFDEBUG(D_TPINPUT) + printf("input: dutype 0x%x cons_channel 0x%x dref 0x%x\n", dutype, + cons_channel, dref); + printf("input: dref 0x%x sref 0x%x\n", dref, sref); + ENDDEBUG + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "channel dutype dref ", + cons_channel, dutype, dref, 0); + ENDTRACE + + +#ifdef ARGO_DEBUG + if( (dutype < TP_MIN_TPDUTYPE) || (dutype > TP_MAX_TPDUTYPE)) { + printf("BAD dutype! 0x%x, channel 0x%x dref 0x%x\n", + dutype, cons_channel, dref); + dump_buf (m, sizeof( struct mbuf )); + + IncStat(ts_inv_dutype); + goto discard; + } +#endif /* ARGO_DEBUG */ + + CHECK( (dutype < TP_MIN_TPDUTYPE || dutype > TP_MAX_TPDUTYPE), + E_TP_INV_TPDU, ts_inv_dutype, respond, + 2 ); + /* unfortunately we can't take the address of the tpdu_type field, + * since it's a bit field - so we just use the constant offset 2 + */ + + /* Now this isn't very neat but since you locate a pcb one way + * at the beginning of connection establishment, and by + * the dref for each tpdu after that, we have to treat CRs differently + */ + if ( dutype == CR_TPDU_type ) { + u_char alt_classes = 0; + + preferred_class = 1 << hdr->tpdu_CRclass; + opt = hdr->tpdu_CRoptions; + + WHILE_OPTIONS(P, hdr, 1 ) /* { */ + + switch( vbptr(P)->tpv_code ) { + + case TPP_tpdu_size: + vb_getval(P, u_char, dusize); + IFDEBUG(D_TPINPUT) + printf("CR dusize 0x%x\n", dusize); + ENDDEBUG + /* COS tests: NBS IA (Dec. 1987) Sec. 4.5.2.1 */ + if (dusize < TP_MIN_TPDUSIZE || dusize > TP_MAX_TPDUSIZE) + dusize = TP_DFL_TPDUSIZE; + break; + case TPP_ptpdu_size: + switch (vbptr(P)->tpv_len) { + case 1: pdusize = vbval(P, u_char); break; + case 2: pdusize = ntohs(vbval(P, u_short)); break; + default: ; + IFDEBUG(D_TPINPUT) + printf("malformed prefered TPDU option\n"); + ENDDEBUG + } + break; + case TPP_addl_opt: + vb_getval(P, u_char, addlopt); + break; + case TPP_calling_sufx: + /* could use vb_getval, but we want to save the loc & len + * for later use + */ + fsufxloc = (caddr_t) &vbptr(P)->tpv_val; + fsufxlen = vbptr(P)->tpv_len; + IFDEBUG(D_TPINPUT) + printf("CR fsufx:"); + { register int j; + for(j=0; jtpv_val; + lsufxlen = vbptr(P)->tpv_len; + IFDEBUG(D_TPINPUT) + printf("CR lsufx:"); + { register int j; + for(j=0; jtpv_val - (caddr_t)hdr) ); + setversion: + version = vbval(P, u_char); + break; + case TPP_acktime: + vb_getval(P, u_short, acktime); + acktime = ntohs(acktime); + acktime = acktime/500; /* convert to slowtimo ticks */ + if((short)acktime <=0 ) + acktime = 2; /* don't allow a bad peer to foul us up */ + IFDEBUG(D_TPINPUT) + printf("CR acktime 0x%x\n", acktime); + ENDDEBUG + break; + + case TPP_alt_class: + { + u_char *aclass = 0; + register int i; + static u_char bad_alt_classes[5] = + { ~0, ~3, ~5, ~0xf, ~0x1f}; + + aclass = + (u_char *) &(((struct tp_vbp *)P)->tpv_val); + for (i = ((struct tp_vbp *)P)->tpv_len; i>0; i--) { + alt_classes |= (1<<((*aclass++)>>4)); + } + CHECK( (bad_alt_classes[hdr->tpdu_CRclass] & alt_classes), + E_TP_INV_PVAL, ts_inv_aclass, respond, + ((caddr_t)aclass) - (caddr_t)hdr); + IFDEBUG(D_TPINPUT) + printf("alt_classes 0x%x\n", alt_classes); + ENDDEBUG + } + break; + + case TPP_security: + case TPP_residER: + case TPP_priority: + case TPP_transdelay: + case TPP_throughput: + case TPP_addl_info: + case TPP_subseq: + default: + IFDEBUG(D_TPINPUT) + printf("param ignored CR_TPDU code= 0x%x\n", + vbptr(P)->tpv_code); + ENDDEBUG + IncStat(ts_param_ignored); + break; + + case TPP_checksum: + IFDEBUG(D_TPINPUT) + printf("CR before cksum\n"); + ENDDEBUG + + CHECK( iso_check_csum(m, tpdu_len), + E_TP_INV_PVAL, ts_bad_csum, discard, 0) + + IFDEBUG(D_TPINPUT) + printf("CR before cksum\n"); + ENDDEBUG + break; + } + + /* } */ END_WHILE_OPTIONS(P) + + if (lsufxlen == 0) { + /* can't look for a tpcb w/o any called sufx */ + error = E_TP_LENGTH_INVAL; + IncStat(ts_inv_sufx); + goto respond; + } else { + register struct tp_pcb *t; + /* + * The intention here is to trap all CR requests + * to a given nsap, for constructing transport + * service bridges at user level; so these + * intercepts should precede the normal listens. + * Phrasing the logic in this way also allows for + * mop-up listeners, which we don't currently implement. + * We also wish to have a single socket be able to + * listen over any network service provider, + * (cons or clns or ip). + */ + for (t = tp_listeners; t ; t = t->tp_nextlisten) + if ((t->tp_lsuffixlen == 0 || + (lsufxlen == t->tp_lsuffixlen && + bcmp(lsufxloc, t->tp_lsuffix, lsufxlen) == 0)) && + ((t->tp_flags & TPF_GENERAL_ADDR) || + (laddr->sa_family == t->tp_domain && + (*t->tp_nlproto->nlp_cmpnetaddr) + (t->tp_npcb, laddr, TP_LOCAL)))) + break; + + CHECK(t == 0, E_TP_NO_SESSION, ts_inv_sufx, respond, + (1 + 2 + (caddr_t)&hdr->_tpduf - (caddr_t)hdr)) + /* _tpduf is the fixed part; add 2 to get the dref bits of + * the fixed part (can't take the address of a bit field) + */ + IFDEBUG(D_TPINPUT) + printf("checking if dup CR\n"); + ENDDEBUG + tpcb = t; + for (t = tpcb->tp_next; t != tpcb; t = t->tp_next) { + if (sref != t->tp_fref) + continue; + if ((*tpcb->tp_nlproto->nlp_cmpnetaddr)( + t->tp_npcb, faddr, TP_FOREIGN)) { + IFDEBUG(D_TPINPUT) + printf("duplicate CR discarded\n"); + ENDDEBUG + goto discard; + } + } + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "tp_input: tpcb *lsufxloc tpstate", + tpcb, *lsufxloc, tpcb->tp_state, 0); + ENDTRACE + } + + /* + * WE HAVE A TPCB + * already know that the classes in the CR match at least + * one class implemented, but we don't know yet if they + * include any classes permitted by this server. + */ + + IFDEBUG(D_TPINPUT) + printf("HAVE A TPCB 1: 0x%x\n", tpcb); + ENDDEBUG + IFDEBUG(D_CONN) + printf( +"CR: bef CHKS: flags 0x%x class_to_use 0x%x alt 0x%x opt 0x%x tp_class 0x%x\n", + tpcb->tp_flags, class_to_use, alt_classes, opt, tpcb->tp_class); + ENDDEBUG + /* tpcb->tp_class doesn't include any classes not implemented */ + class_to_use = (preferred_class & tpcb->tp_class); + if( (class_to_use = preferred_class & tpcb->tp_class) == 0 ) + class_to_use = alt_classes & tpcb->tp_class; + + class_to_use = 1 << tp_mask_to_num(class_to_use); + + { + tpp = tpcb->_tp_param; + tpp.p_class = class_to_use; + tpp.p_tpdusize = dusize; + tpp.p_ptpdusize = pdusize; + tpp.p_xtd_format = (opt & TPO_XTD_FMT) == TPO_XTD_FMT; + tpp.p_xpd_service = (addlopt & TPAO_USE_TXPD) == TPAO_USE_TXPD; + tpp.p_use_checksum = (tpp.p_class == TP_CLASS_0)?0: + (addlopt & TPAO_NO_CSUM) == 0; + tpp.p_version = version; +#ifdef notdef + tpp.p_use_efc = (opt & TPO_USE_EFC) == TPO_USE_EFC; + tpp.p_use_nxpd = (addlopt & TPAO_USE_NXPD) == TPAO_USE_NXPD; + tpp.p_use_rcc = (addlopt & TPAO_USE_RCC) == TPAO_USE_RCC; +#endif /* notdef */ + + CHECK( + tp_consistency(tpcb, 0 /* not force or strict */, &tpp) != 0, + E_TP_NEGOT_FAILED, ts_negotfailed, clear_parent_tcb, + (1 + 2 + (caddr_t)&hdr->_tpdufr.CRCC - (caddr_t)hdr) + /* ^ more or less the location of class */ + ) + } + IFTRACE(D_CONN) + tptrace(TPPTmisc, + "after 1 consist class_to_use class, out, tpconsout", + class_to_use, + tpcb->tp_class, dgout_routine, tpcons_output + ); + ENDTRACE + CHECK( + ((class_to_use == TP_CLASS_0)&&(dgout_routine != tpcons_output)), + E_TP_NEGOT_FAILED, ts_negotfailed, clear_parent_tcb, + (1 + 2 + (caddr_t)&hdr->_tpdufr.CRCC - (caddr_t)hdr) + /* ^ more or less the location of class */ + ) + IFDEBUG(D_CONN) + printf("CR: after CRCCCHECKS: tpcb 0x%x, flags 0x%x\n", + tpcb, tpcb->tp_flags); + ENDDEBUG + takes_data = TRUE; + e.ATTR(CR_TPDU).e_cdt = hdr->tpdu_CRcdt; + e.ev_number = CR_TPDU; + + so = tpcb->tp_sock; + if (so->so_options & SO_ACCEPTCONN) { + struct tp_pcb *parent_tpcb = tpcb; + /* + * Create a socket, tpcb, ll pcb, etc. + * for this newborn connection, and fill in all the values. + */ + IFDEBUG(D_CONN) + printf("abt to call tp_newsocket(0x%x, 0x%x, 0x%x, 0x%x)\n", + so, laddr, faddr, cons_channel); + ENDDEBUG + if( (so = + tp_newsocket(so, faddr, cons_channel, + class_to_use, + ((tpcb->tp_netservice == IN_CLNS) ? IN_CLNS : + (dgout_routine == tpcons_output)?ISO_CONS:ISO_CLNS)) + ) == (struct socket *)0 ) { + /* note - even if netservice is IN_CLNS, as far as + * the tp entity is concerned, the only differences + * are CO vs CL + */ + IFDEBUG(D_CONN) + printf("tp_newsocket returns 0\n"); + ENDDEBUG + goto discard; + clear_parent_tcb: + tpcb = 0; + goto respond; + } + tpcb = sototpcb(so); + insque(tpcb, parent_tpcb); + + /* + * Stash the addresses in the net level pcb + * kind of like a pcbconnect() but don't need + * or want all those checks. + */ + (tpcb->tp_nlproto->nlp_putnetaddr)(tpcb->tp_npcb, faddr, TP_FOREIGN); + (tpcb->tp_nlproto->nlp_putnetaddr)(tpcb->tp_npcb, laddr, TP_LOCAL); + + /* stash the f suffix in the new tpcb */ + if (tpcb->tp_fsuffixlen = fsufxlen) { + bcopy(fsufxloc, tpcb->tp_fsuffix, fsufxlen); + (tpcb->tp_nlproto->nlp_putsufx) + (tpcb->tp_npcb, fsufxloc, fsufxlen, TP_FOREIGN); + } + /* stash the l suffix in the new tpcb */ + tpcb->tp_lsuffixlen = lsufxlen; + bcopy(lsufxloc, tpcb->tp_lsuffix, lsufxlen); + (tpcb->tp_nlproto->nlp_putsufx) + (tpcb->tp_npcb, lsufxloc, lsufxlen, TP_LOCAL); +#ifdef TP_PERF_MEAS + if( tpcb->tp_perf_on = perf_meas ) { /* assignment */ + /* ok, let's create an mbuf for stashing the + * statistics if one doesn't already exist + */ + (void) tp_setup_perf(tpcb); + } +#endif /* TP_PERF_MEAS */ + tpcb->tp_fref = sref; + + /* We've already checked for consistency with the options + * set in tpp, but we couldn't set them earlier because + * we didn't want to change options in the LISTENING tpcb. + * Now we set the options in the new socket's tpcb. + */ + (void) tp_consistency( tpcb, TP_FORCE, &tpp); + + if(!tpcb->tp_use_checksum) + IncStat(ts_csum_off); + if(tpcb->tp_xpd_service) + IncStat(ts_use_txpd); + if(tpcb->tp_xtd_format) + IncStat(ts_xtd_fmt); + + tpcb->tp_peer_acktime = acktime; + + /* + * The following kludge is used to test retransmissions and + * timeout during connection establishment. + */ + IFDEBUG(D_ZDREF) + IncStat(ts_zdebug); + /*tpcb->tp_fref = 0;*/ + ENDDEBUG + } + LOCAL_CREDIT(tpcb); + IncStat(ts_CR_rcvd); + if (!tpcb->tp_cebit_off) { + tpcb->tp_win_recv = tp_start_win << 8; + tpcb->tp_cong_sample.cs_size = 0; + CONG_INIT_SAMPLE(tpcb); + CONG_UPDATE_SAMPLE(tpcb, ce_bit); + } + } else if ( dutype == ER_TPDU_type ) { + /* + * ER TPDUs have to be recognized separately + * because they don't necessarily have a tpcb + * with them and we don't want err out looking for such + * a beast. + * We could put a bunch of little kludges in the + * next section of code so it would avoid references to tpcb + * if dutype == ER_TPDU_type but we don't want code for ERs to + * mess up code for data transfer. + */ + IncStat(ts_ER_rcvd); + e.ev_number = ER_TPDU; + e.ATTR(ER_TPDU).e_reason = (u_char)hdr->tpdu_ERreason; + CHECK (((int)dref <= 0 || dref >= tp_refinfo.tpr_size || + (tpcb = tp_ref[dref].tpr_pcb ) == (struct tp_pcb *) 0 || + tpcb->tp_refstate == REF_FREE || + tpcb->tp_refstate == REF_FROZEN), + E_TP_MISM_REFS, ts_inv_dref, discard, 0) + + } else { + /* tpdu type is CC, XPD, XAK, GR, AK, DR, DC, or DT */ + + /* In the next 4 checks, + * _tpduf is the fixed part; add 2 to get the dref bits of + * the fixed part (can't take the address of a bit field) + */ +#if TPCONS + if (cons_channel && dutype == DT_TPDU_type) { + struct isopcb *isop = ((struct isopcb *) + ((struct pklcd *)cons_channel)->lcd_upnext); + if (isop && isop->isop_refcnt == 1 && isop->isop_socket && + (tpcb = sototpcb(isop->isop_socket)) && + (tpcb->tp_class == TP_CLASS_0/* || == CLASS_1 */)) { + IFDEBUG(D_TPINPUT) + printf("tpinput_dt: class 0 short circuit\n"); + ENDDEBUG + dref = tpcb->tp_lref; + sref = tpcb->tp_fref; + CHECK( (tpcb->tp_refstate == REF_FREE), + E_TP_MISM_REFS,ts_inv_dref, nonx_dref, + (1 + 2 + (caddr_t)&hdr->_tpduf - (caddr_t)hdr)) + goto tp0_data; + } + + } +#endif + { + + CHECK( ((int)dref <= 0 || dref >= tp_refinfo.tpr_size) , + E_TP_MISM_REFS,ts_inv_dref, nonx_dref, + (1 + 2 + (caddr_t)&hdr->_tpduf - (caddr_t)hdr)) + CHECK( ((tpcb = tp_ref[dref].tpr_pcb ) == (struct tp_pcb *) 0 ), + E_TP_MISM_REFS,ts_inv_dref, nonx_dref, + (1 + 2 + (caddr_t)&hdr->_tpduf - (caddr_t)hdr)) + CHECK( (tpcb->tp_refstate == REF_FREE), + E_TP_MISM_REFS,ts_inv_dref, nonx_dref, + (1 + 2 + (caddr_t)&hdr->_tpduf - (caddr_t)hdr)) + } + + IFDEBUG(D_TPINPUT) + printf("HAVE A TPCB 2: 0x%x\n", tpcb); + ENDDEBUG + + /* causes a DR to be sent for CC; ER for all else */ + CHECK( (tpcb->tp_refstate == REF_FROZEN), + (dutype == CC_TPDU_type?E_TP_NO_SESSION:E_TP_MISM_REFS), + ts_inv_dref, respond, + (1 + 2 + (caddr_t)&hdr->_tpduf - (caddr_t)hdr)) + + IFDEBUG(D_TPINPUT) + printf("state of dref %d ok, tpcb 0x%x\n", dref,tpcb); + ENDDEBUG + /* + * At this point the state of the dref could be + * FROZEN: tpr_pcb == NULL, has ( reference only) timers + * for example, DC may arrive after the close() has detached + * the tpcb (e.g., if user turned off SO_LISTEN option) + * OPENING : a tpcb exists but no timers yet + * OPEN : tpcb exists & timers are outstanding + */ + + if (!tpcb->tp_cebit_off) + CONG_UPDATE_SAMPLE(tpcb, ce_bit); + + dusize = tpcb->tp_tpdusize; + pdusize = tpcb->tp_ptpdusize; + + dutype = hdr->tpdu_type << 8; /* for the switch below */ + + WHILE_OPTIONS(P, hdr, tpcb->tp_xtd_format) /* { */ + +#define caseof(x,y) case (((x)<<8)+(y)) + switch( dutype | vbptr(P)->tpv_code ) { + + caseof( CC_TPDU_type, TPP_addl_opt ): + /* not in class 0; 1 octet */ + vb_getval(P, u_char, addlopt); + break; + caseof( CC_TPDU_type, TPP_tpdu_size ): + { + u_char odusize = dusize; + vb_getval(P, u_char, dusize); + CHECK( (dusize < TP_MIN_TPDUSIZE || + dusize > TP_MAX_TPDUSIZE || dusize > odusize), + E_TP_INV_PVAL, ts_inv_pval, respond, + (1 + (caddr_t)&vbptr(P)->tpv_val - (caddr_t)hdr) ) + IFDEBUG(D_TPINPUT) + printf("CC dusize 0x%x\n", dusize); + ENDDEBUG + } + break; + caseof( CC_TPDU_type, TPP_ptpdu_size ): + { + u_short opdusize = pdusize; + switch (vbptr(P)->tpv_len) { + case 1: pdusize = vbval(P, u_char); break; + case 2: pdusize = ntohs(vbval(P, u_short)); break; + default: ; + IFDEBUG(D_TPINPUT) + printf("malformed prefered TPDU option\n"); + ENDDEBUG + } + CHECK( (pdusize == 0 || + (opdusize && (pdusize > opdusize))), + E_TP_INV_PVAL, ts_inv_pval, respond, + (1 + (caddr_t)&vbptr(P)->tpv_val - (caddr_t)hdr) ) + } + break; + caseof( CC_TPDU_type, TPP_calling_sufx): + IFDEBUG(D_TPINPUT) + printf("CC calling (local) sufxlen 0x%x\n", lsufxlen); + ENDDEBUG + lsufxloc = (caddr_t) &vbptr(P)->tpv_val; + lsufxlen = vbptr(P)->tpv_len; + break; + caseof( CC_TPDU_type, TPP_acktime ): + /* class 4 only, 2 octets */ + vb_getval(P, u_short, acktime); + acktime = ntohs(acktime); + acktime = acktime/500; /* convert to slowtimo ticks */ + if( (short)acktime <=0 ) + acktime = 2; + break; + caseof( CC_TPDU_type, TPP_called_sufx): + fsufxloc = (caddr_t) &vbptr(P)->tpv_val; + fsufxlen = vbptr(P)->tpv_len; + IFDEBUG(D_TPINPUT) + printf("CC called (foreign) sufx len %d\n", fsufxlen); + ENDDEBUG + break; + + caseof( CC_TPDU_type, TPP_checksum): + caseof( DR_TPDU_type, TPP_checksum): + caseof( DT_TPDU_type, TPP_checksum): + caseof( XPD_TPDU_type, TPP_checksum): + if( tpcb->tp_use_checksum ) { + CHECK( iso_check_csum(m, tpdu_len), + E_TP_INV_PVAL, ts_bad_csum, discard, 0) + } + break; + + /* this is different from the above because in the context + * of concat/ sep tpdu_len might not be the same as hdr len + */ + caseof( AK_TPDU_type, TPP_checksum): + caseof( XAK_TPDU_type, TPP_checksum): + caseof( DC_TPDU_type, TPP_checksum): + if( tpcb->tp_use_checksum ) { + CHECK( iso_check_csum(m, (int)hdr->tpdu_li + 1), + E_TP_INV_PVAL, ts_bad_csum, discard, 0) + } + break; +#ifdef notdef + caseof( DR_TPDU_type, TPP_addl_info ): + /* ignore - its length and meaning are + * user defined and there's no way + * to pass this info to the user anyway + */ + break; +#endif /* notdef */ + + caseof( AK_TPDU_type, TPP_subseq ): + /* used after reduction of window */ + vb_getval(P, u_short, subseq); + subseq = ntohs(subseq); + IFDEBUG(D_ACKRECV) + printf("AK dref 0x%x Subseq 0x%x\n", dref, subseq); + ENDDEBUG + break; + + caseof( AK_TPDU_type, TPP_flow_cntl_conf ): + { + u_int ylwe; + u_short ysubseq, ycredit; + + fcc_present = TRUE; + vb_getval(P, u_int, ylwe); + vb_getval(P, u_short, ysubseq); + vb_getval(P, u_short, ycredit); + ylwe = ntohl(ylwe); + ysubseq = ntohs(ysubseq); + ycredit = ntohs(ycredit); + IFDEBUG(D_ACKRECV) + printf("%s%x, subseq 0x%x, cdt 0x%x dref 0x%x\n", + "AK FCC lwe 0x", ylwe, ysubseq, ycredit, dref); + ENDDEBUG + } + break; + + default: + IFDEBUG(D_TPINPUT) + printf("param ignored dutype 0x%x, code 0x%x\n", + dutype, vbptr(P)->tpv_code); + ENDDEBUG + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "param ignored dutype code ", + dutype, vbptr(P)->tpv_code ,0,0); + ENDTRACE + IncStat(ts_param_ignored); + break; +#undef caseof + } + /* } */ END_WHILE_OPTIONS(P) + + /* NOTE: the variable dutype has been shifted left! */ + + switch( hdr->tpdu_type ) { + case CC_TPDU_type: + /* If CC comes back with an unacceptable class + * respond with a DR or ER + */ + + opt = hdr->tpdu_CCoptions; /* 1 byte */ + + { + tpp = tpcb->_tp_param; + tpp.p_class = (1<tpdu_CCclass); + tpp.p_tpdusize = dusize; + tpp.p_ptpdusize = pdusize; + tpp.p_dont_change_params = 0; + tpp.p_xtd_format = (opt & TPO_XTD_FMT) == TPO_XTD_FMT; + tpp.p_xpd_service = (addlopt & TPAO_USE_TXPD) == TPAO_USE_TXPD; + tpp.p_use_checksum = (addlopt & TPAO_NO_CSUM) == 0; +#ifdef notdef + tpp.p_use_efc = (opt & TPO_USE_EFC) == TPO_USE_EFC; + tpp.p_use_nxpd = (addlopt & TPAO_USE_NXPD) == TPAO_USE_NXPD; + tpp.p_use_rcc = (addlopt & TPAO_USE_RCC) == TPAO_USE_RCC; +#endif /* notdef */ + + CHECK( + tp_consistency(tpcb, TP_FORCE, &tpp) != 0, + E_TP_NEGOT_FAILED, ts_negotfailed, respond, + (1 + 2 + (caddr_t)&hdr->_tpdufr.CRCC - (caddr_t)hdr) + /* ^ more or less the location of class */ + ) + IFTRACE(D_CONN) + tptrace(TPPTmisc, + "after 1 consist class, out, tpconsout", + tpcb->tp_class, dgout_routine, tpcons_output, 0 + ); + ENDTRACE + CHECK( + ((class_to_use == TP_CLASS_0)&& + (dgout_routine != tpcons_output)), + E_TP_NEGOT_FAILED, ts_negotfailed, respond, + (1 + 2 + (caddr_t)&hdr->_tpdufr.CRCC - (caddr_t)hdr) + /* ^ more or less the location of class */ + ) +#if TPCONS + if (tpcb->tp_netservice == ISO_CONS && + class_to_use == TP_CLASS_0) { + struct isopcb *isop = (struct isopcb *)tpcb->tp_npcb; + struct pklcd *lcp = (struct pklcd *)isop->isop_chan; + lcp->lcd_flags &= ~X25_DG_CIRCUIT; + } +#endif + } + if( ! tpcb->tp_use_checksum) + IncStat(ts_csum_off); + if(tpcb->tp_xpd_service) + IncStat(ts_use_txpd); + if(tpcb->tp_xtd_format) + IncStat(ts_xtd_fmt); + + IFTRACE(D_CONN) + tptrace(TPPTmisc, "after CC class flags dusize CCclass", + tpcb->tp_class, tpcb->tp_flags, tpcb->tp_tpdusize, + hdr->tpdu_CCclass); + ENDTRACE + + /* if called or calling suffices appeared on the CC, + * they'd better jive with what's in the pcb + */ + if( fsufxlen ) { + CHECK( ((tpcb->tp_fsuffixlen != fsufxlen) || + bcmp(fsufxloc, tpcb->tp_fsuffix, fsufxlen)), + E_TP_INV_PVAL,ts_inv_sufx, respond, + (1+fsufxloc - (caddr_t)hdr)) + } + if( lsufxlen ) { + CHECK( ((tpcb->tp_lsuffixlen != lsufxlen) || + bcmp(lsufxloc, tpcb->tp_lsuffix, lsufxlen)), + E_TP_INV_PVAL,ts_inv_sufx, respond, + (1+lsufxloc - (caddr_t)hdr)) + } + + e.ATTR(CC_TPDU).e_sref = sref; + e.ATTR(CC_TPDU).e_cdt = hdr->tpdu_CCcdt; + takes_data = TRUE; + e.ev_number = CC_TPDU; + IncStat(ts_CC_rcvd); + break; + + case DC_TPDU_type: + if (sref != tpcb->tp_fref) + printf("INPUT: inv sufx DCsref 0x%x, tp_fref 0x%x\n", + sref, tpcb->tp_fref); + + CHECK( (sref != tpcb->tp_fref), + E_TP_MISM_REFS, ts_inv_sufx, discard, + (1 + (caddr_t)&hdr->tpdu_DCsref - (caddr_t)hdr)) + + e.ev_number = DC_TPDU; + IncStat(ts_DC_rcvd); + break; + + case DR_TPDU_type: + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "DR recvd", hdr->tpdu_DRreason, 0, 0, 0); + ENDTRACE + if (sref != tpcb->tp_fref) { + printf("INPUT: inv sufx DRsref 0x%x tp_fref 0x%x\n", + sref, tpcb->tp_fref); + } + + CHECK( (sref != 0 && sref != tpcb->tp_fref && + tpcb->tp_state != TP_CRSENT), + (TP_ERROR_SNDC | E_TP_MISM_REFS),ts_inv_sufx, respond, + (1 + (caddr_t)&hdr->tpdu_DRsref - (caddr_t)hdr)) + + e.ATTR(DR_TPDU).e_reason = hdr->tpdu_DRreason; + e.ATTR(DR_TPDU).e_sref = (u_short)sref; + takes_data = TRUE; + e.ev_number = DR_TPDU; + IncStat(ts_DR_rcvd); + break; + + case ER_TPDU_type: + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "ER recvd", hdr->tpdu_ERreason,0,0,0); + ENDTRACE + e.ev_number = ER_TPDU; + e.ATTR(ER_TPDU).e_reason = hdr->tpdu_ERreason; + IncStat(ts_ER_rcvd); + break; + + case AK_TPDU_type: + + e.ATTR(AK_TPDU).e_subseq = subseq; + e.ATTR(AK_TPDU).e_fcc_present = fcc_present; + + if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seqeot = ntohl(hdr->tpdu_seqeotX); + e.ATTR(AK_TPDU).e_seq = seqeotX.s_seq; + e.ATTR(AK_TPDU).e_cdt = ntohs(hdr->tpdu_AKcdtX); +#else + e.ATTR(AK_TPDU).e_cdt = hdr->tpdu_AKcdtX; + e.ATTR(AK_TPDU).e_seq = hdr->tpdu_AKseqX; +#endif /* BYTE_ORDER */ + } else { + e.ATTR(AK_TPDU).e_cdt = hdr->tpdu_AKcdt; + e.ATTR(AK_TPDU).e_seq = hdr->tpdu_AKseq; + } + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "AK recvd seq cdt subseq fcc_pres", + e.ATTR(AK_TPDU).e_seq, e.ATTR(AK_TPDU).e_cdt, + subseq, fcc_present); + ENDTRACE + + e.ev_number = AK_TPDU; + IncStat(ts_AK_rcvd); + IncPStat(tpcb, tps_AK_rcvd); + break; + + case XAK_TPDU_type: + if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seqeot = ntohl(hdr->tpdu_seqeotX); + e.ATTR(XAK_TPDU).e_seq = seqeotX.s_seq; +#else + e.ATTR(XAK_TPDU).e_seq = hdr->tpdu_XAKseqX; +#endif /* BYTE_ORDER */ + } else { + e.ATTR(XAK_TPDU).e_seq = hdr->tpdu_XAKseq; + } + e.ev_number = XAK_TPDU; + IncStat(ts_XAK_rcvd); + IncPStat(tpcb, tps_XAK_rcvd); + break; + + case XPD_TPDU_type: + if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seqeot = ntohl(hdr->tpdu_seqeotX); + e.ATTR(XPD_TPDU).e_seq = seqeotX.s_seq; +#else + e.ATTR(XPD_TPDU).e_seq = hdr->tpdu_XPDseqX; +#endif /* BYTE_ORDER */ + } else { + e.ATTR(XPD_TPDU).e_seq = hdr->tpdu_XPDseq; + } + takes_data = TRUE; + e.ev_number = XPD_TPDU; + IncStat(ts_XPD_rcvd); + IncPStat(tpcb, tps_XPD_rcvd); + break; + + case DT_TPDU_type: + { /* the y option will cause occasional packets to be dropped. + * A little crude but it works. + */ + + IFDEBUG(D_DROP) + if(time.tv_usec & 0x4 && hdr->tpdu_DTseq & 0x1) { + IncStat(ts_ydebug); + goto discard; + } + ENDDEBUG + } + if (tpcb->tp_class == TP_CLASS_0) { + tp0_data: + e.ATTR(DT_TPDU).e_seq = 0; /* actually don't care */ + e.ATTR(DT_TPDU).e_eot = (((struct tp0du *)hdr)->tp0du_eot); + } else if (tpcb->tp_xtd_format) { +#ifdef BYTE_ORDER + union seq_type seqeotX; + + seqeotX.s_seqeot = ntohl(hdr->tpdu_seqeotX); + e.ATTR(DT_TPDU).e_seq = seqeotX.s_seq; + e.ATTR(DT_TPDU).e_eot = seqeotX.s_eot; +#else + e.ATTR(DT_TPDU).e_seq = hdr->tpdu_DTseqX; + e.ATTR(DT_TPDU).e_eot = hdr->tpdu_DTeotX; +#endif /* BYTE_ORDER */ + } else { + e.ATTR(DT_TPDU).e_seq = hdr->tpdu_DTseq; + e.ATTR(DT_TPDU).e_eot = hdr->tpdu_DTeot; + } + if(e.ATTR(DT_TPDU).e_eot) + IncStat(ts_eot_input); + takes_data = TRUE; + e.ev_number = DT_TPDU; + IncStat(ts_DT_rcvd); + IncPStat(tpcb, tps_DT_rcvd); + break; + + case GR_TPDU_type: + tp_indicate(T_DISCONNECT, tpcb, ECONNABORTED); + /* drop through */ + default: + /* this should NEVER happen because there is a + * check for dutype well above here + */ + error = E_TP_INV_TPDU; /* causes an ER */ + IFDEBUG(D_TPINPUT) + printf("INVALID dutype 0x%x\n", hdr->tpdu_type); + ENDDEBUG + IncStat(ts_inv_dutype); + goto respond; + } + } + /* peel off the tp header; + * remember that the du_li doesn't count itself. + * This may leave us w/ an empty mbuf at the front of a chain. + * We can't just throw away the empty mbuf because hdr still points + * into the mbuf's data area and we're still using hdr (the tpdu header) + */ + m->m_len -= ((int)hdr->tpdu_li + 1); + m->m_data += ((int)hdr->tpdu_li + 1); + + if (takes_data) { + int max = tpdu_info[ hdr->tpdu_type ] [TP_MAX_DATA_INDEX]; + int datalen = tpdu_len - hdr->tpdu_li - 1, mbtype = MT_DATA; + struct { + struct tp_disc_reason dr; + struct cmsghdr x_hdr; + } x; +#define c_hdr x.x_hdr + register struct mbuf *n; + + CHECK( (max && datalen > max), E_TP_LENGTH_INVAL, + ts_inv_length, respond, (max + hdr->tpdu_li + 1) ); + switch( hdr->tpdu_type ) { + + case CR_TPDU_type: + c_hdr.cmsg_type = TPOPT_CONN_DATA; + goto make_control_msg; + + case CC_TPDU_type: + c_hdr.cmsg_type = TPOPT_CFRM_DATA; + goto make_control_msg; + + case DR_TPDU_type: + x.dr.dr_hdr.cmsg_len = sizeof(x) - sizeof(c_hdr); + x.dr.dr_hdr.cmsg_type = TPOPT_DISC_REASON; + x.dr.dr_hdr.cmsg_level = SOL_TRANSPORT; + x.dr.dr_reason = hdr->tpdu_DRreason; + c_hdr.cmsg_type = TPOPT_DISC_DATA; + make_control_msg: + datalen += sizeof(c_hdr); + c_hdr.cmsg_len = datalen; + c_hdr.cmsg_level = SOL_TRANSPORT; + mbtype = MT_CONTROL; + MGET(n, M_DONTWAIT, MT_DATA); + if (n == 0) + {m_freem(m); m = 0; datalen = 0; goto invoke; } + if (hdr->tpdu_type == DR_TPDU_type) { + datalen += sizeof(x) - sizeof(c_hdr); + bcopy((caddr_t)&x, mtod(n, caddr_t), n->m_len = sizeof(x)); + } else + bcopy((caddr_t)&c_hdr, mtod(n, caddr_t), + n->m_len = sizeof(c_hdr)); + n->m_next = m; + m = n; + /* FALLTHROUGH */ + + case XPD_TPDU_type: + if (mbtype != MT_CONTROL) + mbtype = MT_OOBDATA; + m->m_flags |= M_EOR; + /* FALLTHROUGH */ + + case DT_TPDU_type: + for (n = m; n; n = n->m_next) { + MCHTYPE(n, mbtype); + } + invoke: + e.ATTR(DT_TPDU).e_datalen = datalen; + e.ATTR(DT_TPDU).e_data = m; + break; + + default: + printf( + "ERROR in tp_input! hdr->tpdu_type 0x%x takes_data 0x%x m 0x%x\n", + hdr->tpdu_type, takes_data, m); + break; + } + /* prevent m_freem() after tp_driver() from throwing it all away */ + m = MNULL; + } + + IncStat(ts_tpdu_rcvd); + + IFDEBUG(D_TPINPUT) + printf( "tp_input: before driver, state 0x%x event 0x%x m 0x%x", + tpcb->tp_state, e.ev_number, m ); + printf(" e.e_data 0x%x\n", e.ATTR(DT_TPDU).e_data); + printf("takes_data 0x%x m_len 0x%x, tpdu_len 0x%x\n", + takes_data, (m==MNULL)?0:m->m_len, tpdu_len); + ENDDEBUG + + error = tp_driver(tpcb, &e); + + ASSERT(tpcb != (struct tp_pcb *)0); + ASSERT(tpcb->tp_sock != (struct socket *)0); + if( tpcb->tp_sock->so_error == 0 ) + tpcb->tp_sock->so_error = error; + + /* Kludge to keep the state tables under control (adding + * data on connect & disconnect & freeing the mbuf containing + * the data would have exploded the tables and made a big mess ). + */ + switch(e.ev_number) { + case CC_TPDU: + case DR_TPDU: + case CR_TPDU: + m = e.ATTR(CC_TPDU).e_data; /* same field for all three dutypes */ + IFDEBUG(D_TPINPUT) + printf("after driver, restoring m to 0x%x, takes_data 0x%x\n", + m, takes_data); + ENDDEBUG + break; + default: + break; + } + /* Concatenated sequences are terminated by any tpdu that + * carries data: CR, CC, DT, XPD, DR. + * All other tpdu types may be concatenated: AK, XAK, DC, ER. + */ + +separate: + if ( takes_data == 0 ) { + ASSERT( m != MNULL ); + /* + * we already peeled off the prev. tp header so + * we can just pull up some more and repeat + */ + + if( m = tp_inputprep(m) ) { + IFDEBUG(D_TPINPUT) + hdr = mtod(m, struct tpdu *); + printf("tp_input @ separate: hdr 0x%x size %d m 0x%x\n", + hdr, (int) hdr->tpdu_li + 1, m); + dump_mbuf(m, "tp_input after driver, at separate"); + ENDDEBUG + + IncStat(ts_concat_rcvd); + goto again; + } + } + if ( m != MNULL ) { + IFDEBUG(D_TPINPUT) + printf("tp_input : m_freem(0x%x)\n", m); + ENDDEBUG + m_freem(m); + IFDEBUG(D_TPINPUT) + printf("tp_input : after m_freem 0x%x\n", m); + ENDDEBUG + } + return (ProtoHook) tpcb; + +discard: + /* class 4: drop the tpdu */ + /* class 2,0: Should drop the net connection, if you can figure out + * to which connection it applies + */ + IFDEBUG(D_TPINPUT) + printf("tp_input DISCARD\n"); + ENDDEBUG + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "tp_input DISCARD m", m,0,0,0); + ENDTRACE + m_freem(m); + IncStat(ts_recv_drop); + return (ProtoHook)0; + +nonx_dref: + switch (dutype) { + default: + goto discard; + case CC_TPDU_type: + /* error = E_TP_MISM_REFS; */ + break; + case DR_TPDU_type: + error |= TP_ERROR_SNDC; + } +respond: + IFDEBUG(D_TPINPUT) + printf("RESPOND: error 0x%x, errlen 0x%x\n", error, errlen); + ENDDEBUG + IFTRACE(D_TPINPUT) + tptrace(TPPTmisc, "tp_input RESPOND m error sref", m, error, sref, 0); + ENDTRACE + if (sref == 0) + goto discard; + (void) tp_error_emit(error, (u_long)sref, (struct sockaddr_iso *)faddr, + (struct sockaddr_iso *)laddr, m, errlen, tpcb, + cons_channel, dgout_routine); + IFDEBUG(D_ERROR_EMIT) + printf("tp_input after error_emit\n"); + ENDDEBUG + +#ifdef lint + printf("",sref,opt); +#endif /* lint */ + IncStat(ts_recv_drop); + return (ProtoHook)0; +} + + +/* + * NAME: tp_headersize() + * + * CALLED FROM: + * tp_emit() and tp_sbsend() + * TP needs to know the header size so it can figure out how + * much data to put in each tpdu. + * + * FUNCTION, ARGUMENTS, and RETURN VALUE: + * For a given connection, represented by (tpcb), and + * tpdu type (dutype), return the size of a tp header. + * + * RETURNS: the expected size of the heade in bytesr + * + * SIDE EFFECTS: + * + * NOTES: It would be nice if it got the network header size as well. + */ +int +tp_headersize(dutype, tpcb) + int dutype; + struct tp_pcb *tpcb; +{ + register int size = 0; + + IFTRACE(D_CONN) + tptrace(TPPTmisc, "tp_headersize dutype class xtd_format", + dutype, tpcb->tp_class, tpcb->tp_xtd_format, 0); + ENDTRACE + if( !( (tpcb->tp_class == TP_CLASS_0) || + (tpcb->tp_class == TP_CLASS_4) || + (dutype == DR_TPDU_type) || + (dutype == CR_TPDU_type) )) { + printf("tp_headersize:dutype 0x%x, class 0x%x", + dutype, tpcb->tp_class); + /* TODO: identify this and GET RID OF IT */ + } + ASSERT( (tpcb->tp_class == TP_CLASS_0) || + (tpcb->tp_class == TP_CLASS_4) || + (dutype == DR_TPDU_type) || + (dutype == CR_TPDU_type) ); + + if( tpcb->tp_class == TP_CLASS_0 ) { + size = tpdu_info[ dutype ] [TP_LEN_CLASS_0_INDEX]; + } else { + size = tpdu_info[ dutype ] [tpcb->tp_xtd_format]; + } + return size; + /* caller must get network level header size separately */ +} diff --git a/bsd/netiso/tp_ip.h b/bsd/netiso/tp_ip.h new file mode 100644 index 000000000..51b3111b7 --- /dev/null +++ b/bsd/netiso/tp_ip.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_ip.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * internet IP-dependent structures and include files + * + */ + + +#ifndef __TP_IP__ +#define __TP_IP__ + +#ifndef SOCK_STREAM +#include +#endif + +#include +#include +#include +#include +#include +#include + + +struct inpcb tp_inpcb; + /* queue of active inpcbs for tp ; for tp with dod ip */ + +#endif /* __TP_IP__ */ diff --git a/bsd/netiso/tp_iso.c b/bsd/netiso/tp_iso.c new file mode 100644 index 000000000..22a739984 --- /dev/null +++ b/bsd/netiso/tp_iso.c @@ -0,0 +1,712 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_iso.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * Here is where you find the iso-dependent code. We've tried + * keep all net-level and (primarily) address-family-dependent stuff + * out of the tp source, and everthing here is reached indirectly + * through a switch table (struct nl_protosw *) tpcb->tp_nlproto + * (see tp_pcb.c). + * The routines here are: + * iso_getsufx: gets transport suffix out of an isopcb structure. + * iso_putsufx: put transport suffix into an isopcb structure. + * iso_putnetaddr: put a whole net addr into an isopcb. + * iso_getnetaddr: get a whole net addr from an isopcb. + * iso_cmpnetaddr: compare a whole net addr from an isopcb. + * iso_recycle_suffix: clear suffix for reuse in isopcb + * tpclnp_ctlinput: handle ER CNLPdu : icmp-like stuff + * tpclnp_mtu: figure out what size tpdu to use + * tpclnp_input: take a pkt from clnp, strip off its clnp header, + * give to tp + * tpclnp_output_dg: package a pkt for clnp given 2 addresses & some data + * tpclnp_output: package a pkt for clnp given an isopcb & some data + */ + +#if ISO + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * CALLED FROM: + * pr_usrreq() on PRU_BIND, PRU_CONNECT, PRU_ACCEPT, and PRU_PEERADDR + * FUNCTION, ARGUMENTS: + * The argument (which) takes the value TP_LOCAL or TP_FOREIGN. + */ + +iso_getsufx(isop, lenp, data_out, which) + struct isopcb *isop; + u_short *lenp; + caddr_t data_out; + int which; +{ + register struct sockaddr_iso *addr = 0; + + switch (which) { + case TP_LOCAL: + addr = isop->isop_laddr; + break; + + case TP_FOREIGN: + addr = isop->isop_faddr; + } + if (addr) + bcopy(TSEL(addr), data_out, (*lenp = addr->siso_tlen)); +} + +/* CALLED FROM: + * tp_newsocket(); i.e., when a connection is being established by an + * incoming CR_TPDU. + * + * FUNCTION, ARGUMENTS: + * Put a transport suffix (found in name) into an isopcb structure (isop). + * The argument (which) takes the value TP_LOCAL or TP_FOREIGN. + */ +void +iso_putsufx(isop, sufxloc, sufxlen, which) + struct isopcb *isop; + caddr_t sufxloc; + int sufxlen, which; +{ + struct sockaddr_iso **dst, *backup; + register struct sockaddr_iso *addr; + struct mbuf *m; + int len; + + switch (which) { + default: + return; + + case TP_LOCAL: + dst = &isop->isop_laddr; + backup = &isop->isop_sladdr; + break; + + case TP_FOREIGN: + dst = &isop->isop_faddr; + backup = &isop->isop_sfaddr; + } + if ((addr = *dst) == 0) { + addr = *dst = backup; + addr->siso_nlen = 0; + addr->siso_slen = 0; + addr->siso_plen = 0; + printf("iso_putsufx on un-initialized isopcb\n"); + } + len = sufxlen + addr->siso_nlen + + (sizeof(*addr) - sizeof(addr->siso_data)); + if (addr == backup) { + if (len > sizeof(*addr)) { + m = m_getclr(M_DONTWAIT, MT_SONAME); + if (m == 0) + return; + addr = *dst = mtod(m, struct sockaddr_iso *); + *addr = *backup; + m->m_len = len; + } + } + bcopy(sufxloc, TSEL(addr), sufxlen); + addr->siso_tlen = sufxlen; + addr->siso_len = len; +} + +/* + * CALLED FROM: + * tp.trans whenever we go into REFWAIT state. + * FUNCTION and ARGUMENT: + * Called when a ref is frozen, to allow the suffix to be reused. + * (isop) is the net level pcb. This really shouldn't have to be + * done in a NET level pcb but... for the internet world that just + * the way it is done in BSD... + * The alternative is to have the port unusable until the reference + * timer goes off. + */ +void +iso_recycle_tsuffix(isop) + struct isopcb *isop; +{ + isop->isop_laddr->siso_tlen = isop->isop_faddr->siso_tlen = 0; +} + +/* + * CALLED FROM: + * tp_newsocket(); i.e., when a connection is being established by an + * incoming CR_TPDU. + * + * FUNCTION and ARGUMENTS: + * Copy a whole net addr from a struct sockaddr (name). + * into an isopcb (isop). + * The argument (which) takes values TP_LOCAL or TP_FOREIGN + */ +void +iso_putnetaddr(isop, name, which) + register struct isopcb *isop; + struct sockaddr_iso *name; + int which; +{ + struct sockaddr_iso **sisop, *backup; + register struct sockaddr_iso *siso; + + switch (which) { + default: + printf("iso_putnetaddr: should panic\n"); + return; + case TP_LOCAL: + sisop = &isop->isop_laddr; + backup = &isop->isop_sladdr; + break; + case TP_FOREIGN: + sisop = &isop->isop_faddr; + backup = &isop->isop_sfaddr; + } + siso = ((*sisop == 0) ? (*sisop = backup) : *sisop); + IFDEBUG(D_TPISO) + printf("ISO_PUTNETADDR\n"); + dump_isoaddr(isop->isop_faddr); + ENDDEBUG + siso->siso_addr = name->siso_addr; +} + +/* + * CALLED FROM: + * tp_input() when a connection is being established by an + * incoming CR_TPDU, and considered for interception. + * + * FUNCTION and ARGUMENTS: + * compare a whole net addr from a struct sockaddr (name), + * with that implicitly stored in an isopcb (isop). + * The argument (which) takes values TP_LOCAL or TP_FOREIGN. + */ +iso_cmpnetaddr(isop, name, which) + register struct isopcb *isop; + register struct sockaddr_iso *name; + int which; +{ + struct sockaddr_iso **sisop, *backup; + register struct sockaddr_iso *siso; + + switch (which) { + default: + printf("iso_cmpnetaddr: should panic\n"); + return 0; + case TP_LOCAL: + sisop = &isop->isop_laddr; + backup = &isop->isop_sladdr; + break; + case TP_FOREIGN: + sisop = &isop->isop_faddr; + backup = &isop->isop_sfaddr; + } + siso = ((*sisop == 0) ? (*sisop = backup) : *sisop); + IFDEBUG(D_TPISO) + printf("ISO_CMPNETADDR\n"); + dump_isoaddr(siso); + ENDDEBUG + if (name->siso_tlen && bcmp(TSEL(name), TSEL(siso), name->siso_tlen)) + return (0); + return (bcmp((caddr_t)name->siso_data, + (caddr_t)siso->siso_data, name->siso_nlen) == 0); +} + +/* + * CALLED FROM: + * pr_usrreq() PRU_SOCKADDR, PRU_ACCEPT, PRU_PEERADDR + * FUNCTION and ARGUMENTS: + * Copy a whole net addr from an isopcb (isop) into + * a struct sockaddr (name). + * The argument (which) takes values TP_LOCAL or TP_FOREIGN. + */ + +void +iso_getnetaddr( isop, name, which) + struct isopcb *isop; + struct mbuf *name; + int which; +{ + struct sockaddr_iso *siso = + (which == TP_LOCAL ? isop->isop_laddr : isop->isop_faddr); + if (siso) + bcopy((caddr_t)siso, mtod(name, caddr_t), + (unsigned)(name->m_len = siso->siso_len)); + else + name->m_len = 0; +} +/* + * NAME: tpclnp_mtu() + * + * CALLED FROM: + * tp_route_to() on incoming CR, CC, and pr_usrreq() for PRU_CONNECT + * + * FUNCTION, ARGUMENTS, and RETURN VALUE: + * + * Perform subnetwork dependent part of determining MTU information. + * It appears that setting a double pointer to the rtentry associated with + * the destination, and returning the header size for the network protocol + * suffices. + * + * SIDE EFFECTS: + * Sets tp_routep pointer in pcb. + * + * NOTES: + */ +tpclnp_mtu(tpcb) +register struct tp_pcb *tpcb; +{ + struct isopcb *isop = (struct isopcb *)tpcb->tp_npcb; + + IFDEBUG(D_CONN) + printf("tpclnp_mtu(tpcb)\n", tpcb); + ENDDEBUG + tpcb->tp_routep = &(isop->isop_route.ro_rt); + if (tpcb->tp_netservice == ISO_CONS) + return 0; + else + return (sizeof(struct clnp_fixed) + sizeof(struct clnp_segment) + + 2 * sizeof(struct iso_addr)); + +} + +/* + * CALLED FROM: + * tp_emit() + * FUNCTION and ARGUMENTS: + * Take a packet(m0) from tp and package it so that clnp will accept it. + * This means prepending space for the clnp header and filling in a few + * of the fields. + * isop is the isopcb structure; datalen is the length of the data in the + * mbuf string m0. + * RETURN VALUE: + * whatever (E*) is returned form the net layer output routine. + */ + +int +tpclnp_output(isop, m0, datalen, nochksum) + struct isopcb *isop; + struct mbuf *m0; + int datalen; + int nochksum; +{ + register struct mbuf *m = m0; + IncStat(ts_tpdu_sent); + + IFDEBUG(D_TPISO) + struct tpdu *hdr = mtod(m0, struct tpdu *); + + printf( +"abt to call clnp_output: datalen 0x%x, hdr.li 0x%x, hdr.dutype 0x%x nocsum x%x dst addr:\n", + datalen, + (int)hdr->tpdu_li, (int)hdr->tpdu_type, nochksum); + dump_isoaddr(isop->isop_faddr); + printf("\nsrc addr:\n"); + dump_isoaddr(isop->isop_laddr); + dump_mbuf(m0, "at tpclnp_output"); + ENDDEBUG + + return + clnp_output(m0, isop, datalen, /* flags */nochksum ? CLNP_NO_CKSUM : 0); +} + +/* + * CALLED FROM: + * tp_error_emit() + * FUNCTION and ARGUMENTS: + * This is a copy of tpclnp_output that takes the addresses + * instead of a pcb. It's used by the tp_error_emit, when we + * don't have an iso_pcb with which to call the normal output rtn. + * RETURN VALUE: + * ENOBUFS or + * whatever (E*) is returned form the net layer output routine. + */ + +int +tpclnp_output_dg(laddr, faddr, m0, datalen, ro, nochksum) + struct iso_addr *laddr, *faddr; + struct mbuf *m0; + int datalen; + struct route *ro; + int nochksum; +{ + struct isopcb tmppcb; + int err; + int flags; + register struct mbuf *m = m0; + + IFDEBUG(D_TPISO) + printf("tpclnp_output_dg datalen 0x%x m0 0x%x\n", datalen, m0); + ENDDEBUG + + /* + * Fill in minimal portion of isopcb so that clnp can send the + * packet. + */ + bzero((caddr_t)&tmppcb, sizeof(tmppcb)); + tmppcb.isop_laddr = &tmppcb.isop_sladdr; + tmppcb.isop_laddr->siso_addr = *laddr; + tmppcb.isop_faddr = &tmppcb.isop_sfaddr; + tmppcb.isop_faddr->siso_addr = *faddr; + + IFDEBUG(D_TPISO) + printf("tpclnp_output_dg faddr: \n"); + dump_isoaddr(&tmppcb.isop_sfaddr); + printf("\ntpclnp_output_dg laddr: \n"); + dump_isoaddr(&tmppcb.isop_sladdr); + printf("\n"); + ENDDEBUG + + /* + * Do not use packet cache since this is a one shot error packet + */ + flags = (CLNP_NOCACHE|(nochksum?CLNP_NO_CKSUM:0)); + + IncStat(ts_tpdu_sent); + + err = clnp_output(m0, &tmppcb, datalen, flags); + + /* + * Free route allocated by clnp (if the route was indeed allocated) + */ + if (tmppcb.isop_route.ro_rt) + RTFREE(tmppcb.isop_route.ro_rt); + + return(err); +} +/* + * CALLED FROM: + * clnp's input routine, indirectly through the protosw. + * FUNCTION and ARGUMENTS: + * Take a packet (m) from clnp, strip off the clnp header and give it to tp + * No return value. + */ +ProtoHook +tpclnp_input(m, src, dst, clnp_len, ce_bit) + register struct mbuf *m; + struct sockaddr_iso *src, *dst; + int clnp_len, ce_bit; +{ + struct mbuf *tp_inputprep(); + int tp_input(), cltp_input(), (*input)() = tp_input; + + IncStat(ts_pkt_rcvd); + + IFDEBUG(D_TPINPUT) + printf("tpclnp_input: m 0x%x clnp_len 0x%x\n", m, clnp_len); + dump_mbuf(m, "at tpclnp_input"); + ENDDEBUG + /* + * CLNP gives us an mbuf chain WITH the clnp header pulled up, + * and the length of the clnp header. + * First, strip off the Clnp header. leave the mbuf there for the + * pullup that follows. + */ + m->m_len -= clnp_len; + m->m_data += clnp_len; + m->m_pkthdr.len -= clnp_len; + /* XXXX: should probably be in clnp_input */ + switch (dst->siso_data[dst->siso_nlen - 1]) { +#if TUBA + case ISOPROTO_TCP: + return (tuba_tcpinput(m, src, dst)); +#endif + case 0: + if (m->m_len == 0 && (m = m_pullup(m, 1)) == 0) + return 0; + if (*(mtod(m, u_char *)) == ISO10747_IDRP) + return (idrp_input(m, src, dst)); + } + m = tp_inputprep(m); + if (m == 0) + return 0; + if (mtod(m, u_char *)[1] == UD_TPDU_type) + input = cltp_input; + + IFDEBUG(D_TPINPUT) + dump_mbuf(m, "after tpclnp_input both pullups"); + ENDDEBUG + + IFDEBUG(D_TPISO) + printf("calling %sinput : src 0x%x, dst 0x%x, src addr:\n", + (input == tp_input ? "tp_" : "clts_"), src, dst); + dump_isoaddr(src); + printf(" dst addr:\n"); + dump_isoaddr(dst); + ENDDEBUG + + (void) (*input)(m, (struct sockaddr *)src, (struct sockaddr *)dst, + 0, tpclnp_output_dg, ce_bit); + + IFDEBUG(D_QUENCH) + { + if(time.tv_usec & 0x4 && time.tv_usec & 0x40) { + printf("tpclnp_input: FAKING %s\n", + tp_stat.ts_pkt_rcvd & 0x1?"QUENCH":"QUENCH2"); + if(tp_stat.ts_pkt_rcvd & 0x1) { + tpclnp_ctlinput(PRC_QUENCH, &src); + } else { + tpclnp_ctlinput(PRC_QUENCH2, &src); + } + } + } + ENDDEBUG + + return 0; +} + +ProtoHook +iso_rtchange() +{ + return 0; +} + +/* + * CALLED FROM: + * tpclnp_ctlinput() + * FUNCTION and ARGUMENTS: + * find the tpcb pointer and pass it to tp_quench + */ +void +tpiso_decbit(isop) + struct isopcb *isop; +{ + tp_quench((struct tp_pcb *)isop->isop_socket->so_pcb, PRC_QUENCH2); +} +/* + * CALLED FROM: + * tpclnp_ctlinput() + * FUNCTION and ARGUMENTS: + * find the tpcb pointer and pass it to tp_quench + */ +void +tpiso_quench(isop) + struct isopcb *isop; +{ + tp_quench((struct tp_pcb *)isop->isop_socket->so_pcb, PRC_QUENCH); +} + +/* + * CALLED FROM: + * The network layer through the protosw table. + * FUNCTION and ARGUMENTS: + * When clnp an ICMP-like msg this gets called. + * It either returns an error status to the user or + * it causes all connections on this address to be aborted + * by calling the appropriate xx_notify() routine. + * (cmd) is the type of ICMP error. + * (siso) is the address of the guy who sent the ER CLNPDU + */ +ProtoHook +tpclnp_ctlinput(cmd, siso) + int cmd; + struct sockaddr_iso *siso; +{ + extern u_char inetctlerrmap[]; + extern ProtoHook tpiso_abort(); + extern ProtoHook iso_rtchange(); + extern ProtoHook tpiso_reset(); + void iso_pcbnotify(); + + IFDEBUG(D_TPINPUT) + printf("tpclnp_ctlinput1: cmd 0x%x addr: \n", cmd); + dump_isoaddr(siso); + ENDDEBUG + + if (cmd < 0 || cmd > PRC_NCMDS) + return 0; + if (siso->siso_family != AF_ISO) + return 0; + switch (cmd) { + + case PRC_QUENCH2: + iso_pcbnotify(&tp_isopcb, siso, 0, (int (*)())tpiso_decbit); + break; + + case PRC_QUENCH: + iso_pcbnotify(&tp_isopcb, siso, 0, (int (*)())tpiso_quench); + break; + + case PRC_TIMXCEED_REASS: + case PRC_ROUTEDEAD: + iso_pcbnotify(&tp_isopcb, siso, 0, tpiso_reset); + break; + + case PRC_HOSTUNREACH: + case PRC_UNREACH_NET: + case PRC_IFDOWN: + case PRC_HOSTDEAD: + iso_pcbnotify(&tp_isopcb, siso, + (int)inetctlerrmap[cmd], iso_rtchange); + break; + + default: + /* + case PRC_MSGSIZE: + case PRC_UNREACH_HOST: + case PRC_UNREACH_PROTOCOL: + case PRC_UNREACH_PORT: + case PRC_UNREACH_NEEDFRAG: + case PRC_UNREACH_SRCFAIL: + case PRC_REDIRECT_NET: + case PRC_REDIRECT_HOST: + case PRC_REDIRECT_TOSNET: + case PRC_REDIRECT_TOSHOST: + case PRC_TIMXCEED_INTRANS: + case PRC_PARAMPROB: + */ + iso_pcbnotify(&tp_isopcb, siso, (int)inetctlerrmap[cmd], tpiso_abort); + break; + } + return 0; +} +/* + * XXX - Variant which is called by clnp_er.c with an isoaddr rather + * than a sockaddr_iso. + */ + +static struct sockaddr_iso siso = {sizeof(siso), AF_ISO}; +tpclnp_ctlinput1(cmd, isoa) + int cmd; + struct iso_addr *isoa; +{ + bzero((caddr_t)&siso.siso_addr, sizeof(siso.siso_addr)); + bcopy((caddr_t)isoa, (caddr_t)&siso.siso_addr, isoa->isoa_len); + tpclnp_ctlinput(cmd, &siso); +} + +/* + * These next 2 routines are + * CALLED FROM: + * xxx_notify() from tp_ctlinput() when + * net level gets some ICMP-equiv. type event. + * FUNCTION and ARGUMENTS: + * Cause the connection to be aborted with some sort of error + * reason indicating that the network layer caused the abort. + * Fakes an ER TPDU so we can go through the driver. + * abort always aborts the TP connection. + * reset may or may not, depending on the TP class that's in use. + */ +ProtoHook +tpiso_abort(isop) + struct isopcb *isop; +{ + struct tp_event e; + + IFDEBUG(D_CONN) + printf("tpiso_abort 0x%x\n", isop); + ENDDEBUG + e.ev_number = ER_TPDU; + e.ATTR(ER_TPDU).e_reason = ECONNABORTED; + return tp_driver((struct tp_pcb *)isop->isop_socket->so_pcb, &e); +} + +ProtoHook +tpiso_reset(isop) + struct isopcb *isop; +{ + struct tp_event e; + + e.ev_number = T_NETRESET; + return tp_driver((struct tp_pcb *)isop->isop_socket->so_pcb, &e); + +} + +#endif /* ISO */ diff --git a/bsd/netiso/tp_meas.c b/bsd/netiso/tp_meas.c new file mode 100644 index 000000000..a0981f088 --- /dev/null +++ b/bsd/netiso/tp_meas.c @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_meas.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * tp_meas.c : create a performance measurement event + * in the circular buffer tp_Meas[] + */ + +#include +#include + +#include +#include + +extern struct timeval time; + +#ifdef TP_PERF_MEAS +int tp_Measn = 0; +struct tp_Meas tp_Meas[TPMEASN]; + +/* + * NAME: tpmeas() + * + * CALLED FROM: tp_emit(), tp_soisdisconecting(), tp_soisdisconnected() + * tp0_stash(), tp_stash(), tp_send(), tp_goodack(), tp_usrreq() + * + * FUNCTION and ARGUMENTS: + * stashes a performance-measurement event for the given reference (ref) + * (kind) tells which kind of event, timev is the time to be stored + * with this event, (seq), (win), and (size) are integers that usually + * refer to the sequence number, window number (on send) and + * size of tpdu or window. + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ +void +Tpmeas(ref, kind, timev, seq, win, size) + u_int ref; + u_int kind; + struct timeval *timev; + u_int seq, win, size; +{ + register struct tp_Meas *tpm; + static int mseq; + + tpm = &tp_Meas[tp_Measn++]; + tp_Measn %= TPMEASN; + + tpm->tpm_kind = kind; + tpm->tpm_tseq = mseq++; + tpm->tpm_ref = ref; + if(kind == TPtime_from_ll) + bcopy((caddr_t)timev, (caddr_t)&tpm->tpm_time, sizeof(struct timeval)); + else + bcopy( (caddr_t)&time, + (caddr_t)&tpm->tpm_time, sizeof(struct timeval) ); + tpm->tpm_seq = seq; + tpm->tpm_window = win; + tpm->tpm_size = size; +} + +#endif /* TP_PERF_MEAS */ diff --git a/bsd/netiso/tp_meas.h b/bsd/netiso/tp_meas.h new file mode 100644 index 000000000..b5bcf65c2 --- /dev/null +++ b/bsd/netiso/tp_meas.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_meas.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +#ifdef TP_PERF_MEAS +#define tpmeas(a, b, t, c, d, e) \ + Tpmeas((u_int)(a), (u_int)(b), t, (u_int)(c), (u_int)(d), (u_int)(e)) + +struct tp_Meas { + int tpm_tseq; + u_char tpm_kind; + u_short tpm_ref; + u_short tpm_size; + u_short tpm_window; + u_int tpm_seq; + struct timeval tpm_time; +}; + +#define TPMEASN 4000 +extern int tp_Measn; +extern struct tp_Meas tp_Meas[]; + +/* + * the kinds of events for packet tracing are: + */ +#define TPtime_from_session 0x01 +#define TPtime_to_session 0x02 +#define TPtime_ack_rcvd 0x03 +#define TPtime_ack_sent 0x04 +#define TPtime_from_ll 0x05 +#define TPtime_to_ll 0x06 +#define TPsbsend 0x07 +#define TPtime_open 0x08 +#define TPtime_open_X 0x28 /* xtd format */ +#define TPtime_close 0x09 + +#endif /* TP_PERF_MEAS */ diff --git a/bsd/netiso/tp_output.c b/bsd/netiso/tp_output.c new file mode 100644 index 000000000..4f97ba160 --- /dev/null +++ b/bsd/netiso/tp_output.c @@ -0,0 +1,730 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_output.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * In here is tp_ctloutput(), the guy called by [sg]etsockopt(), + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TPDUSIZESHIFT 24 +#define CLASSHIFT 16 + +/* + * NAME: tp_consistency() + * + * CALLED FROM: + * tp_ctloutput(), tp_input() + * + * FUNCTION and ARGUMENTS: + * Checks the consistency of options and tpdusize with class, + * using the parameters passed in via (param). + * (cmd) may be TP_STRICT or TP_FORCE or both. + * Force means it will set all the values in (tpcb) to those in + * the input arguements iff no errors were encountered. + * Strict means that no inconsistency will be tolerated. If it's + * not used, checksum and tpdusize inconsistencies will be tolerated. + * The reason for this is that in some cases, when we're negotiating down + * from class 4, these options should be changed but should not + * cause negotiation to fail. + * + * RETURNS + * E* or EOK + * E* if the various parms aren't ok for a given class + * EOK if they are ok for a given class + */ + +int +tp_consistency( tpcb, cmd, param ) + u_int cmd; + struct tp_conn_param *param; + struct tp_pcb *tpcb; +{ + register int error = EOK; + int class_to_use = tp_mask_to_num(param->p_class); + + IFTRACE(D_SETPARAMS) + tptrace(TPPTmisc, + "tp_consist enter class_to_use dontchange param.class cmd", + class_to_use, param->p_dont_change_params, param->p_class, cmd); + ENDTRACE + IFDEBUG(D_SETPARAMS) + printf("tp_consistency %s %s\n", + cmd& TP_FORCE? "TP_FORCE": "", + cmd& TP_STRICT? "TP_STRICT":""); + ENDDEBUG + if ((cmd & TP_FORCE) && (param->p_dont_change_params)) { + cmd &= ~TP_FORCE; + } + /* can switch net services within a domain, but + * cannot switch domains + */ + switch( param->p_netservice) { + case ISO_CONS: + case ISO_CLNS: + case ISO_COSNS: + /* param->p_netservice in ISO DOMAIN */ + if(tpcb->tp_domain != AF_ISO ) { + error = EINVAL; goto done; + } + break; + case IN_CLNS: + /* param->p_netservice in INET DOMAIN */ + if( tpcb->tp_domain != AF_INET ) { + error = EINVAL; goto done; + } + break; + /* no others not possible-> netservice is a 2-bit field! */ + } + + IFDEBUG(D_SETPARAMS) + printf("p_class 0x%x, class_to_use 0x%x\n", param->p_class, + class_to_use); + ENDDEBUG + if((param->p_netservice < 0) || (param->p_netservice > TP_MAX_NETSERVICES)){ + error = EINVAL; goto done; + } + if( (param->p_class & TP_CLASSES_IMPLEMENTED) == 0 ) { + error = EINVAL; goto done; + } + IFDEBUG(D_SETPARAMS) + printf("Nretrans 0x%x\n", param->p_Nretrans ); + ENDDEBUG + if( ( param->p_Nretrans < 1 ) || + (param->p_cr_ticks < 1) || (param->p_cc_ticks < 1) ) { + /* bad for any class because negot has to be done a la class 4 */ + error = EINVAL; goto done; + } + IFDEBUG(D_SETPARAMS) + printf("use_csum 0x%x\n", param->p_use_checksum ); + printf("xtd_format 0x%x\n", param->p_xtd_format ); + printf("xpd_service 0x%x\n", param->p_xpd_service ); + printf("tpdusize 0x%x\n", param->p_tpdusize ); + printf("tpcb->flags 0x%x\n", tpcb->tp_flags ); + ENDDEBUG + switch( class_to_use ) { + + case 0: + /* do not use checksums, xtd format, or XPD */ + + if( param->p_use_checksum | param->p_xtd_format | param->p_xpd_service ) { + if(cmd & TP_STRICT) { + error = EINVAL; + } else { + param->p_use_checksum = 0; + param->p_xtd_format = 0; + param->p_xpd_service = 0; + } + break; + } + + if (param->p_tpdusize < TP_MIN_TPDUSIZE) { + if(cmd & TP_STRICT) { + error = EINVAL; + } else { + param->p_tpdusize = TP_MIN_TPDUSIZE; + } + break; + } + if (param->p_tpdusize > TP0_TPDUSIZE) { + if (cmd & TP_STRICT) { + error = EINVAL; + } else { + param->p_tpdusize = TP0_TPDUSIZE; + } + break; + } + + /* connect/disc data not allowed for class 0 */ + if (tpcb->tp_ucddata) { + if(cmd & TP_STRICT) { + error = EINVAL; + } else if(cmd & TP_FORCE) { + m_freem(tpcb->tp_ucddata); + tpcb->tp_ucddata = 0; + } + } + break; + + case 4: + IFDEBUG(D_SETPARAMS) + printf("dt_ticks 0x%x\n", param->p_dt_ticks ); + printf("x_ticks 0x%x\n", param->p_x_ticks ); + printf("dr_ticks 0x%x\n", param->p_dr_ticks ); + printf("keepalive 0x%x\n", param->p_keepalive_ticks ); + printf("sendack 0x%x\n", param->p_sendack_ticks ); + printf("inact 0x%x\n", param->p_inact_ticks ); + printf("ref 0x%x\n", param->p_ref_ticks ); + ENDDEBUG + if( (param->p_class & TP_CLASS_4 ) && ( + (param->p_dt_ticks < 1) || (param->p_dr_ticks < 1) || + (param->p_x_ticks < 1) || (param->p_keepalive_ticks < 1) || + (param->p_sendack_ticks < 1) || (param->p_ref_ticks < 1) || + (param->p_inact_ticks < 1) ) ) { + error = EINVAL; + break; + } + IFDEBUG(D_SETPARAMS) + printf("rx_strat 0x%x\n", param->p_rx_strat ); + ENDDEBUG + if(param->p_rx_strat > + ( TPRX_USE_CW | TPRX_EACH | TPRX_FASTSTART) ) { + if(cmd & TP_STRICT) { + error = EINVAL; + } else { + param->p_rx_strat = TPRX_USE_CW; + } + break; + } + IFDEBUG(D_SETPARAMS) + printf("ack_strat 0x%x\n", param->p_ack_strat ); + ENDDEBUG + if((param->p_ack_strat != 0) && (param->p_ack_strat != 1)) { + if(cmd & TP_STRICT) { + error = EINVAL; + } else { + param->p_ack_strat = TPACK_WINDOW; + } + break; + } + if (param->p_tpdusize < TP_MIN_TPDUSIZE) { + if(cmd & TP_STRICT) { + error = EINVAL; + } else { + param->p_tpdusize = TP_MIN_TPDUSIZE; + } + break; + } + if (param->p_tpdusize > TP_TPDUSIZE) { + if(cmd & TP_STRICT) { + error = EINVAL; + } else { + param->p_tpdusize = TP_TPDUSIZE; + } + break; + } + break; + } + + if ((error==0) && (cmd & TP_FORCE)) { + long dusize = ((long)param->p_ptpdusize) << 7; + /* Enforce Negotation rules below */ + tpcb->tp_class = param->p_class; + if (tpcb->tp_use_checksum || param->p_use_checksum) + tpcb->tp_use_checksum = 1; + if (!tpcb->tp_xpd_service || !param->p_xpd_service) + tpcb->tp_xpd_service = 0; + if (!tpcb->tp_xtd_format || !param->p_xtd_format) + tpcb->tp_xtd_format = 0; + if (dusize) { + if (tpcb->tp_l_tpdusize > dusize) + tpcb->tp_l_tpdusize = dusize; + if (tpcb->tp_ptpdusize == 0 || + tpcb->tp_ptpdusize > param->p_ptpdusize) + tpcb->tp_ptpdusize = param->p_ptpdusize; + } else { + if (param->p_tpdusize != 0 && + tpcb->tp_tpdusize > param->p_tpdusize) + tpcb->tp_tpdusize = param->p_tpdusize; + tpcb->tp_l_tpdusize = 1 << tpcb->tp_tpdusize; + } + } +done: + + IFTRACE(D_CONN) + tptrace(TPPTmisc, "tp_consist returns class xtdfmt cmd", + error, tpcb->tp_class, tpcb->tp_xtd_format, cmd); + ENDTRACE + IFDEBUG(D_CONN) + printf( + "tp_consist rtns 0x%x class 0x%x xtd_fmt 0x%x cmd 0x%x\n", + error, tpcb->tp_class, tpcb->tp_xtd_format, cmd); + ENDDEBUG + return error; +} + +/* + * NAME: tp_ctloutput() + * + * CALLED FROM: + * [sg]etsockopt(), via so[sg]etopt(). + * + * FUNCTION and ARGUMENTS: + * Implements the socket options at transport level. + * (cmd) is either PRCO_SETOPT or PRCO_GETOPT (see ../sys/protosw.h). + * (so) is the socket. + * (level) is SOL_TRANSPORT (see ../sys/socket.h) + * (optname) is the particular command or option to be set. + * (**mp) is an mbuf structure. + * + * RETURN VALUE: + * ENOTSOCK if the socket hasn't got an associated tpcb + * EINVAL if + * trying to set window too big + * trying to set illegal max tpdu size + * trying to set illegal credit fraction + * trying to use unknown or unimplemented class of TP + * structure passed to set timer values is wrong size + * illegal combination of command/GET-SET option, + * e.g., GET w/ TPOPT_CDDATA_CLEAR: + * EOPNOTSUPP if the level isn't transport, or command is neither GET nor SET + * or if the transport-specific command is not implemented + * EISCONN if trying a command that isn't allowed after a connection + * is established + * ENOTCONN if trying a command that is allowed only if a connection is + * established + * EMSGSIZE if trying to give too much data on connect/disconnect + * + * SIDE EFFECTS: + * + * NOTES: + */ +ProtoHook +tp_ctloutput(cmd, so, level, optname, mp) + int cmd, level, optname; + struct socket *so; + struct mbuf **mp; +{ + struct tp_pcb *tpcb = sototpcb(so); + int s = splnet(); + caddr_t value; + unsigned val_len; + int error = 0; + + IFTRACE(D_REQUEST) + tptrace(TPPTmisc, "tp_ctloutput cmd so optname mp", + cmd, so, optname, mp); + ENDTRACE + IFDEBUG(D_REQUEST) + printf( + "tp_ctloutput so 0x%x cmd 0x%x optname 0x%x, mp 0x%x *mp 0x%x tpcb 0x%x\n", + so, cmd, optname, mp, mp?*mp:0, tpcb); + ENDDEBUG + if( tpcb == (struct tp_pcb *)0 ) { + error = ENOTSOCK; goto done; + } + if(*mp == MNULL) { + register struct mbuf *m; + + MGET(m, M_DONTWAIT, TPMT_SONAME); /* does off, type, next */ + if (m == NULL) { + splx(s); + return ENOBUFS; + } + m->m_len = 0; + m->m_act = 0; + *mp = m; + } + + /* + * Hook so one can set network options via a tp socket. + */ + if ( level == SOL_NETWORK ) { + if ((tpcb->tp_nlproto == NULL) || (tpcb->tp_npcb == NULL)) + error = ENOTSOCK; + else if (tpcb->tp_nlproto->nlp_ctloutput == NULL) + error = EOPNOTSUPP; + else + return ((tpcb->tp_nlproto->nlp_ctloutput)(cmd, optname, + tpcb->tp_npcb, *mp)); + goto done; + } else if ( level == SOL_SOCKET) { + if (optname == SO_RCVBUF && cmd == PRCO_SETOPT) { + u_long old_credit = tpcb->tp_maxlcredit; + tp_rsyset(tpcb); + if (tpcb->tp_rhiwat != so->so_rcv.sb_hiwat && + tpcb->tp_state == TP_OPEN && + (old_credit < tpcb->tp_maxlcredit)) + tp_emit(AK_TPDU_type, tpcb, + tpcb->tp_rcvnxt, 0, MNULL); + tpcb->tp_rhiwat = so->so_rcv.sb_hiwat; + } + goto done; + } else if ( level != SOL_TRANSPORT ) { + error = EOPNOTSUPP; goto done; + } + if (cmd != PRCO_GETOPT && cmd != PRCO_SETOPT) { + error = EOPNOTSUPP; goto done; + } + if ( so->so_error ) { + error = so->so_error; goto done; + } + + /* The only options allowed after connection is established + * are GET (anything) and SET DISC DATA and SET PERF MEAS + */ + if ( ((so->so_state & SS_ISCONNECTING)||(so->so_state & SS_ISCONNECTED)) + && + (cmd == PRCO_SETOPT && + optname != TPOPT_DISC_DATA && + optname != TPOPT_CFRM_DATA && + optname != TPOPT_PERF_MEAS && + optname != TPOPT_CDDATA_CLEAR ) ) { + error = EISCONN; goto done; + } + /* The only options allowed after disconnection are GET DISC DATA, + * and TPOPT_PSTATISTICS + * and they're not allowed if the ref timer has gone off, because + * the tpcb is gone + */ + if ((so->so_state & (SS_ISCONNECTED | SS_ISCONFIRMING)) == 0) { + if ( so->so_pcb == (caddr_t)0 ) { + error = ENOTCONN; goto done; + } + if ( (tpcb->tp_state == TP_REFWAIT || tpcb->tp_state == TP_CLOSING) && + (optname != TPOPT_DISC_DATA && optname != TPOPT_PSTATISTICS)) { + error = ENOTCONN; goto done; + } + } + + value = mtod(*mp, caddr_t); /* it's aligned, don't worry, + * but lint complains about it + */ + val_len = (*mp)->m_len; + + switch (optname) { + + case TPOPT_INTERCEPT: +#define INA(t) (((struct inpcb *)(t->tp_npcb))->inp_laddr.s_addr) +#define ISOA(t) (((struct isopcb *)(t->tp_npcb))->isop_laddr->siso_addr) + + if ((so->so_state & SS_PRIV) == 0) { + error = EPERM; + } else if (cmd != PRCO_SETOPT || tpcb->tp_state != TP_CLOSED || + (tpcb->tp_flags & TPF_GENERAL_ADDR) || + tpcb->tp_next == 0) + error = EINVAL; + else { + register struct tp_pcb *t; + error = EADDRINUSE; + for (t = tp_listeners; t; t = t->tp_nextlisten) + if ((t->tp_flags & TPF_GENERAL_ADDR) == 0 && + t->tp_domain == tpcb->tp_domain) + switch (tpcb->tp_domain) { + default: + goto done; +#if INET + case AF_INET: + if (INA(t) == INA(tpcb)) + goto done; + continue; +#endif +#if ISO + case AF_ISO: + if (bcmp(ISOA(t).isoa_genaddr, ISOA(tpcb).isoa_genaddr, + ISOA(t).isoa_len) == 0) + goto done; + continue; +#endif + } + tpcb->tp_lsuffixlen = 0; + tpcb->tp_state = TP_LISTENING; + error = 0; + remque(tpcb); + tpcb->tp_next = tpcb->tp_prev = tpcb; + tpcb->tp_nextlisten = tp_listeners; + tp_listeners = tpcb; + } + break; + + case TPOPT_MY_TSEL: + if ( cmd == PRCO_GETOPT ) { + ASSERT( tpcb->tp_lsuffixlen <= MAX_TSAP_SEL_LEN ); + bcopy((caddr_t)tpcb->tp_lsuffix, value, tpcb->tp_lsuffixlen); + (*mp)->m_len = tpcb->tp_lsuffixlen; + } else /* cmd == PRCO_SETOPT */ { + if( (val_len > MAX_TSAP_SEL_LEN) || (val_len <= 0 )) { + printf("val_len 0x%x (*mp)->m_len 0x%x\n", val_len, (*mp)); + error = EINVAL; + } else { + bcopy(value, (caddr_t)tpcb->tp_lsuffix, val_len); + tpcb->tp_lsuffixlen = val_len; + } + } + break; + + case TPOPT_PEER_TSEL: + if ( cmd == PRCO_GETOPT ) { + ASSERT( tpcb->tp_fsuffixlen <= MAX_TSAP_SEL_LEN ); + bcopy((caddr_t)tpcb->tp_fsuffix, value, tpcb->tp_fsuffixlen); + (*mp)->m_len = tpcb->tp_fsuffixlen; + } else /* cmd == PRCO_SETOPT */ { + if( (val_len > MAX_TSAP_SEL_LEN) || (val_len <= 0 )) { + printf("val_len 0x%x (*mp)->m_len 0x%x\n", val_len, (*mp)); + error = EINVAL; + } else { + bcopy(value, (caddr_t)tpcb->tp_fsuffix, val_len); + tpcb->tp_fsuffixlen = val_len; + } + } + break; + + case TPOPT_FLAGS: + IFDEBUG(D_REQUEST) + printf("%s TPOPT_FLAGS value 0x%x *value 0x%x, flags 0x%x \n", + cmd==PRCO_GETOPT?"GET":"SET", + value, + *value, + tpcb->tp_flags); + ENDDEBUG + + if ( cmd == PRCO_GETOPT ) { + *(int *)value = (int)tpcb->tp_flags; + (*mp)->m_len = sizeof(u_int); + } else /* cmd == PRCO_SETOPT */ { + error = EINVAL; goto done; + } + break; + + case TPOPT_PARAMS: + /* This handles: + * timer values, + * class, use of transport expedited data, + * max tpdu size, checksum, xtd format and + * disconnect indications, and may get rid of connect/disc data + */ + IFDEBUG(D_SETPARAMS) + printf("TPOPT_PARAMS value 0x%x, cmd %s \n", value, + cmd==PRCO_GETOPT?"GET":"SET"); + ENDDEBUG + IFDEBUG(D_REQUEST) + printf("TPOPT_PARAMS value 0x%x, cmd %s \n", value, + cmd==PRCO_GETOPT?"GET":"SET"); + ENDDEBUG + + if ( cmd == PRCO_GETOPT ) { + *(struct tp_conn_param *)value = tpcb->_tp_param; + (*mp)->m_len = sizeof(tpcb->_tp_param); + } else /* cmd == PRCO_SETOPT */ { + if( (error = + tp_consistency(tpcb, TP_STRICT | TP_FORCE, + (struct tp_conn_param *)value))==0) { + /* + * tp_consistency doesn't copy the whole set of params + */ + tpcb->_tp_param = *(struct tp_conn_param *)value; + (*mp)->m_len = sizeof(tpcb->_tp_param); + } + } + break; + + case TPOPT_PSTATISTICS: +#ifdef TP_PERF_MEAS + if (cmd == PRCO_SETOPT) { + error = EINVAL; goto done; + } + IFPERF(tpcb) + if (*mp) { + struct mbuf * n; + do { + MFREE(*mp, n); + *mp = n; + } while (n); + } + *mp = m_copym(tpcb->tp_p_mbuf, (int)M_COPYALL, M_WAITOK); + ENDPERF + else { + error = EINVAL; goto done; + } + break; +#else + error = EOPNOTSUPP; + goto done; +#endif /* TP_PERF_MEAS */ + + case TPOPT_CDDATA_CLEAR: + if (cmd == PRCO_GETOPT) { + error = EINVAL; + } else { + if (tpcb->tp_ucddata) { + m_freem(tpcb->tp_ucddata); + tpcb->tp_ucddata = 0; + } + } + break; + + case TPOPT_CFRM_DATA: + case TPOPT_DISC_DATA: + case TPOPT_CONN_DATA: + if( tpcb->tp_class == TP_CLASS_0 ) { + error = EOPNOTSUPP; + break; + } + IFDEBUG(D_REQUEST) + printf("%s\n", optname==TPOPT_DISC_DATA?"DISC data":"CONN data"); + printf("m_len 0x%x, vallen 0x%x so_snd.cc 0x%x\n", + (*mp)->m_len, val_len, so->so_snd.sb_cc); + dump_mbuf(so->so_snd.sb_mb, "tp_ctloutput: sosnd "); + ENDDEBUG + if (cmd == PRCO_SETOPT) { + int len = tpcb->tp_ucddata ? tpcb->tp_ucddata->m_len : 0; + /* can append connect data in several calls */ + if (len + val_len > + (optname==TPOPT_CONN_DATA?TP_MAX_CR_DATA:TP_MAX_DR_DATA) ) { + error = EMSGSIZE; goto done; + } + (*mp)->m_next = MNULL; + (*mp)->m_act = 0; + if (tpcb->tp_ucddata) + m_cat(tpcb->tp_ucddata, *mp); + else + tpcb->tp_ucddata = *mp; + IFDEBUG(D_REQUEST) + dump_mbuf(tpcb->tp_ucddata, "tp_ctloutput after CONN_DATA"); + ENDDEBUG + IFTRACE(D_REQUEST) + tptrace(TPPTmisc,"C/D DATA: flags snd.sbcc val_len", + tpcb->tp_flags, so->so_snd.sb_cc,val_len,0); + ENDTRACE + *mp = MNULL; + if (optname == TPOPT_CFRM_DATA && (so->so_state & SS_ISCONFIRMING)) + (void) tp_confirm(tpcb); + } + break; + + case TPOPT_PERF_MEAS: +#ifdef TP_PERF_MEAS + if (cmd == PRCO_GETOPT) { + *value = (u_int)tpcb->tp_perf_on; + (*mp)->m_len = sizeof(u_int); + } else if (cmd == PRCO_SETOPT) { + (*mp)->m_len = 0; + if ((*value) != 0 && (*value) != 1 ) + error = EINVAL; + else tpcb->tp_perf_on = (*value); + } + if( tpcb->tp_perf_on ) + error = tp_setup_perf(tpcb); +#else /* TP_PERF_MEAS */ + error = EOPNOTSUPP; +#endif /* TP_PERF_MEAS */ + break; + + default: + error = EOPNOTSUPP; + } + +done: + IFDEBUG(D_REQUEST) + dump_mbuf(so->so_snd.sb_mb, "tp_ctloutput sosnd at end"); + dump_mbuf(*mp, "tp_ctloutput *mp"); + ENDDEBUG + /* + * sigh: getsockopt looks only at m_len : all output data must + * reside in the first mbuf + */ + if (*mp) { + if (cmd == PRCO_SETOPT) { + m_freem(*mp); + *mp = MNULL; + } else { + ASSERT ( m_compress(*mp, mp) <= MLEN ); + if (error) + (*mp)->m_len = 0; + IFDEBUG(D_REQUEST) + dump_mbuf(*mp, "tp_ctloutput *mp after compress"); + ENDDEBUG + } + } + splx(s); + return error; +} diff --git a/bsd/netiso/tp_param.h b/bsd/netiso/tp_param.h new file mode 100644 index 000000000..59dffc1ca --- /dev/null +++ b/bsd/netiso/tp_param.h @@ -0,0 +1,385 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_param.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + */ + +#ifndef __TP_PARAM__ +#define __TP_PARAM__ + + +/****************************************************** + * compile time parameters that can be changed + *****************************************************/ + +#define TP_CLASSES_IMPLEMENTED 0x11 /* zero and 4 */ + +#define TP_DECBIT_CLEAR_COUNT 3 + +/*#define N_TPREF 100 */ +#ifdef KERNEL +extern int N_TPREF; +#endif + +#define TP_SOCKBUFSIZE ((u_long)4096) +#define TP0_SOCKBUFSIZE ((u_long)512) +#define MAX_TSAP_SEL_LEN 64 + +/* maximum tpdu size we'll accept: */ +#define TP_TPDUSIZE 0xc /* 4096 octets for classes 1-4*/ +#define TP0_TPDUSIZE 0xb /* 2048 octets for class 0 */ +#define TP_DFL_TPDUSIZE 0x7 /* 128 octets default */ + /* NOTE: don't ever negotiate 8192 because could get + * wraparound in checksumming + * (No mtu is likely to be larger than 4K anyway...) + */ +#define TP_NRETRANS 12 /* TCP_MAXRXTSHIFT + 1 */ +#define TP_MAXRXTSHIFT 6 /* factor of 64 */ +#define TP_MAXPORT 0xefff + +/* ALPHA: to be used in the context: gain= 1/(2**alpha), or + * put another way, gaintimes(x) (x)>>alpha (forgetting the case alpha==0) + */ +#define TP_RTT_ALPHA 3 +#define TP_RTV_ALPHA 2 +#define TP_REXMTVAL(tpcb)\ + ((tp_rttadd + (tpcb)->tp_rtt + ((tpcb)->tp_rtv) << 2) / tp_rttdiv) +#define TP_RANGESET(tv, value, min, max) \ + ((tv = value) > (max) ? (tv = max) : (tv < min ? tv = min : tv)) + +/* + * not sure how to treat data on disconnect + */ +#define T_CONN_DATA 0x1 +#define T_DISCONNECT 0x2 +#define T_DISC_DATA 0x4 +#define T_XDATA 0x8 + +#define ISO_CLNS 0 +#define IN_CLNS 1 +#define ISO_CONS 2 +#define ISO_COSNS 3 +#define TP_MAX_NETSERVICES 3 + +/* Indices into tp stats ackreason[i] */ +#define _ACK_DONT_ 0 +#define _ACK_STRAT_EACH_ 0x1 +#define _ACK_STRAT_FULLWIN_ 0x2 +#define _ACK_DUP_ 0x3 +#define _ACK_EOT_ 0x4 +#define _ACK_REORDER_ 0x5 +#define _ACK_USRRCV_ 0x6 +#define _ACK_FCC_ 0x7 +#define _ACK_NUM_REASONS_ 0x8 + +/* masks for use in tp_stash() */ +#define ACK_DONT 0 +#define ACK_STRAT_EACH (1<< _ACK_STRAT_EACH_) +#define ACK_STRAT_FULLWIN (1<< _ACK_STRAT_FULLWIN_) +#define ACK_DUP (1<< _ACK_DUP_) +#define ACK_EOT (1<< _ACK_EOT_) +#define ACK_REORDER (1<< _ACK_REORDER_) + +/****************************************************** + * constants used in the protocol + *****************************************************/ + +#define TP_VERSION 0x1 + +#define TP_MAX_HEADER_LEN 256 + +#define TP_MIN_TPDUSIZE 0x7 /* 128 octets */ +#define TP_MAX_TPDUSIZE 0xd /* 8192 octets */ + +#define TP_MAX_XPD_DATA 0x10 /* 16 octets */ +#define TP_MAX_CC_DATA 0x20 /* 32 octets */ +#define TP_MAX_CR_DATA TP_MAX_CC_DATA +#define TP_MAX_DR_DATA 0x40 /* 64 octets */ + +#define TP_XTD_FMT_BIT 0x80000000 +#define TP_XTD_FMT_MASK 0x7fffffff +#define TP_NML_FMT_BIT 0x80 +#define TP_NML_FMT_MASK 0x7f + +/* + * values for the tpdu_type field, 2nd byte in a tpdu + */ + +#define TP_MIN_TPDUTYPE 0x1 + +#define XPD_TPDU_type 0x1 +#define XAK_TPDU_type 0x2 +#define GR_TPDU_type 0x3 +#define AK_TPDU_type 0x6 +#define ER_TPDU_type 0x7 +#define DR_TPDU_type 0x8 +#define DC_TPDU_type 0xc +#define CC_TPDU_type 0xd +#define CR_TPDU_type 0xe +#define DT_TPDU_type 0xf + +#define TP_MAX_TPDUTYPE 0xf + +/* + * identifiers for the variable-length options in tpdus + */ + +#define TPP_acktime 0x85 +#define TPP_residER 0x86 +#define TPP_priority 0x87 +#define TPP_transdelay 0x88 +#define TPP_throughput 0x89 +#define TPP_subseq 0x8a +#define TPP_flow_cntl_conf 0x8c /* not implemented */ +#define TPP_addl_info 0xe0 +#define TPP_tpdu_size 0xc0 +#define TPP_calling_sufx 0xc1 +#define TPP_invalid_tpdu 0xc1 /* the bozos used a value twice */ +#define TPP_called_sufx 0xc2 +#define TPP_checksum 0xc3 +#define TPP_vers 0xc4 +#define TPP_security 0xc5 +#define TPP_addl_opt 0xc6 +#define TPP_alt_class 0xc7 +#define TPP_perf_meas 0xc8 /* local item : perf meas on, svp */ +#define TPP_ptpdu_size 0xf0 /* preferred TPDU size */ +#define TPP_inact_time 0xf2 /* inactivity time exchanged */ + + +/****************************************************** + * Some fundamental data types + *****************************************************/ +#ifndef TRUE +#define TRUE 1 +#endif /* TRUE */ + +#ifndef FALSE +#define FALSE 0 +#endif /* FALSE */ + +#define TP_LOCAL 22 +#define TP_FOREIGN 33 + +#ifndef EOK +#define EOK 0 +#endif /* EOK */ + +#define TP_CLASS_0 (1<<0) +#define TP_CLASS_1 (1<<1) +#define TP_CLASS_2 (1<<2) +#define TP_CLASS_3 (1<<3) +#define TP_CLASS_4 (1<<4) + +#define TP_FORCE 0x1 +#define TP_STRICT 0x2 + +#ifndef MNULL +#define MNULL (struct mbuf *)0 +#endif /* MNULL */ + /* if ../sys/mbuf.h gets MT_types up to 0x40, these will + * have to be changed: + */ +#define MT_XPD 0x44 +#define MT_EOT 0x40 + +#define TP_ENOREF 0x80000000 + +typedef unsigned int SeqNum; +typedef unsigned short RefNum; +typedef int ProtoHook; + +/****************************************************** + * Macro used all over, for driver + *****************************************************/ + +#define DoEvent(x) \ + ((E.ev_number=(x)),(tp_driver(tpcb,&E))) + +/****************************************************** + * Some macros used all over, for timestamping + *****************************************************/ + +#define GET_CUR_TIME(tvalp) ((*tvalp) = time) + +#define GET_TIME_SINCE(oldtvalp, diffp) {\ + (diffp)->tv_sec = time.tv_sec - (oldtvalp)->tv_sec;\ + (diffp)->tv_usec = time.tv_usec - (oldtvalp)->tv_usec;\ + if( (diffp)->tv_usec <0 ) {\ + (diffp)->tv_sec --;\ + (diffp)->tv_usec = 1000000 - (diffp)->tv_usec;\ + }\ +} + +/****************************************************** + * Some macros used for address families + *****************************************************/ + +#define satosiso(ADDR) ((struct sockaddr_iso *)(ADDR)) +#define satosin(ADDR) ((struct sockaddr_in *)(ADDR)) + +/****************************************************** + * Macro used for changing types of mbufs + *****************************************************/ + +#define CHANGE_MTYPE(m, TYPE)\ + if((m)->m_type != TYPE) { \ + mbstat.m_mtypes[(m)->m_type]--; mbstat.m_mtypes[TYPE]++; \ + (m)->m_type = TYPE; \ + } + +/****************************************************** + * Macros used for adding options to a tpdu header and for + * parsing the headers. + * Options are variable-length and must be bcopy-d because on the + * RT your assignments must be N-word aligned for objects of length + * N. Such a drag. + *****************************************************/ + +struct tp_vbp { + u_char tpv_code; + char tpv_len; + char tpv_val; +}; +#define vbptr(x) ((struct tp_vbp *)(x)) +#define vbval(x,type) (*((type *)&(((struct tp_vbp *)(x))->tpv_val))) +#define vbcode(x) (vbptr(x)->tpv_code) +#define vblen(x) (vbptr(x)->tpv_len) + +#define vb_putval(dst,type,src)\ + bcopy((caddr_t)&(src),(caddr_t)&(((struct tp_vbp *)(dst))->tpv_val),\ + sizeof(type)) + +#define vb_getval(src,type,dst)\ +bcopy((caddr_t)&(((struct tp_vbp *)(src))->tpv_val),(caddr_t)&(dst),sizeof(type)) + +#define ADDOPTION(type, DU, len, src)\ +{ register caddr_t P;\ + P = (caddr_t)(DU) + (int)((DU)->tpdu_li);\ + vbptr(P)->tpv_code = type;\ + vbptr(P)->tpv_len = len;\ + bcopy((caddr_t)&src, (caddr_t)&(vbptr(P)->tpv_val), (unsigned)len);\ + DU->tpdu_li += len+2;/* 1 for code, 1 for length */\ +} +/****************************************************** + * Macro for the local credit: + * uses max transmission unit for the ll + * (as modified by the max TPDU size negotiated) + *****************************************************/ + +#if defined(ARGO_DEBUG)&&!defined(LOCAL_CREDIT_EXPAND) +#define LOCAL_CREDIT(tpcb) tp_local_credit(tpcb) +#else +#define LOCAL_CREDIT(tpcb) { if (tpcb->tp_rsycnt == 0) {\ + register struct sockbuf *xxsb = &((tpcb)->tp_sock->so_rcv);\ + register int xxi = sbspace(xxsb);\ + xxi = (xxi<0) ? 0 : ((xxi) / (tpcb)->tp_l_tpdusize);\ + xxi = min(xxi, (tpcb)->tp_maxlcredit); \ + if (!(tpcb->tp_cebit_off)) { \ + (tpcb)->tp_lcredit = ROUND((tpcb)->tp_win_recv); \ + if (xxi < (tpcb)->tp_lcredit) { \ + (tpcb)->tp_lcredit = xxi; \ + } \ + } else \ + (tpcb)->tp_lcredit = xxi; \ +} } +#endif /* ARGO_DEBUG */ + +#ifdef KERNEL +extern int tp_rttadd, tp_rttdiv; +#include +#define printf logpri(LOG_DEBUG),addlog + +#ifndef tp_NSTATES + +#include +#include +#if defined(__STDC__) || defined(__cplusplus) +#undef ATTR +#define ATTR(X) ev_union.EV_ ## X +#endif /* defined(__STDC__) || defined(__cplusplus) */ + +#endif /* tp_NSTATES */ +#endif /* KERNEL */ + +#endif /* __TP_PARAM__ */ diff --git a/bsd/netiso/tp_pcb.c b/bsd/netiso/tp_pcb.c new file mode 100644 index 000000000..a4b91724f --- /dev/null +++ b/bsd/netiso/tp_pcb.c @@ -0,0 +1,1018 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_pcb.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * + * This is the initialization and cleanup stuff - + * for the tp machine in general as well as for the individual pcbs. + * tp_init() is called at system startup. tp_attach() and tp_getref() are + * called when a socket is created. tp_detach() and tp_freeref() + * are called during the closing stage and/or when the reference timer + * goes off. + * tp_soisdisconnecting() and tp_soisdisconnected() are tp-specific + * versions of soisconnect* + * and are called (obviously) during the closing phase. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* ticks are in units of: + * 500 nano-fortnights ;-) or + * 500 ms or + * 1/2 second + */ + +struct tp_conn_param tp_conn_param[] = { + /* ISO_CLNS: TP4 CONNECTION LESS */ + { + TP_NRETRANS, /* short p_Nretrans; */ + 20, /* 10 sec */ /* short p_dr_ticks; */ + + 20, /* 10 sec */ /* short p_cc_ticks; */ + 20, /* 10 sec */ /* short p_dt_ticks; */ + + 40, /* 20 sec */ /* short p_x_ticks; */ + 80, /* 40 sec */ /* short p_cr_ticks;*/ + + 240, /* 2 min */ /* short p_keepalive_ticks;*/ + 10, /* 5 sec */ /* short p_sendack_ticks; */ + + 600, /* 5 min */ /* short p_ref_ticks; */ + 360, /* 3 min */ /* short p_inact_ticks; */ + + (short) 100, /* short p_lcdtfract */ + (short) TP_SOCKBUFSIZE, /* short p_winsize */ + TP_TPDUSIZE, /* u_char p_tpdusize */ + + TPACK_WINDOW, /* 4 bits p_ack_strat */ + TPRX_USE_CW | TPRX_FASTSTART, + /* 4 bits p_rx_strat*/ + TP_CLASS_4 | TP_CLASS_0,/* 5 bits p_class */ + 1, /* 1 bit xtd format */ + 1, /* 1 bit xpd service */ + 1, /* 1 bit use_checksum */ + 0, /* 1 bit use net xpd */ + 0, /* 1 bit use rcc */ + 0, /* 1 bit use efc */ + 1, /* no disc indications */ + 0, /* don't change params */ + ISO_CLNS, /* p_netservice */ + }, + /* IN_CLNS: TP4 CONNECTION LESS */ + { + TP_NRETRANS, /* short p_Nretrans; */ + 20, /* 10 sec */ /* short p_dr_ticks; */ + + 20, /* 10 sec */ /* short p_cc_ticks; */ + 20, /* 10 sec */ /* short p_dt_ticks; */ + + 40, /* 20 sec */ /* short p_x_ticks; */ + 80, /* 40 sec */ /* short p_cr_ticks;*/ + + 240, /* 2 min */ /* short p_keepalive_ticks;*/ + 10, /* 5 sec */ /* short p_sendack_ticks; */ + + 600, /* 5 min */ /* short p_ref_ticks; */ + 360, /* 3 min */ /* short p_inact_ticks; */ + + (short) 100, /* short p_lcdtfract */ + (short) TP_SOCKBUFSIZE, /* short p_winsize */ + TP_TPDUSIZE, /* u_char p_tpdusize */ + + TPACK_WINDOW, /* 4 bits p_ack_strat */ + TPRX_USE_CW | TPRX_FASTSTART, + /* 4 bits p_rx_strat*/ + TP_CLASS_4, /* 5 bits p_class */ + 1, /* 1 bit xtd format */ + 1, /* 1 bit xpd service */ + 1, /* 1 bit use_checksum */ + 0, /* 1 bit use net xpd */ + 0, /* 1 bit use rcc */ + 0, /* 1 bit use efc */ + 1, /* no disc indications */ + 0, /* don't change params */ + IN_CLNS, /* p_netservice */ + }, + /* ISO_CONS: TP0 CONNECTION MODE */ + { + TP_NRETRANS, /* short p_Nretrans; */ + 0, /* n/a */ /* short p_dr_ticks; */ + + 40, /* 20 sec */ /* short p_cc_ticks; */ + 0, /* n/a */ /* short p_dt_ticks; */ + + 0, /* n/a */ /* short p_x_ticks; */ + 360, /* 3 min */ /* short p_cr_ticks;*/ + + 0, /* n/a */ /* short p_keepalive_ticks;*/ + 0, /* n/a */ /* short p_sendack_ticks; */ + + 600, /* for cr/cc to clear *//* short p_ref_ticks; */ + 0, /* n/a */ /* short p_inact_ticks; */ + + /* Use tp4 defaults just in case the user changes ONLY + * the class + */ + (short) 100, /* short p_lcdtfract */ + (short) TP0_SOCKBUFSIZE, /* short p_winsize */ + TP0_TPDUSIZE, /* 8 bits p_tpdusize */ + + 0, /* 4 bits p_ack_strat */ + 0, /* 4 bits p_rx_strat*/ + TP_CLASS_0, /* 5 bits p_class */ + 0, /* 1 bit xtd format */ + 0, /* 1 bit xpd service */ + 0, /* 1 bit use_checksum */ + 0, /* 1 bit use net xpd */ + 0, /* 1 bit use rcc */ + 0, /* 1 bit use efc */ + 0, /* no disc indications */ + 0, /* don't change params */ + ISO_CONS, /* p_netservice */ + }, + /* ISO_COSNS: TP4 CONNECTION LESS SERVICE over CONSNS */ + { + TP_NRETRANS, /* short p_Nretrans; */ + 40, /* 20 sec */ /* short p_dr_ticks; */ + + 40, /* 20 sec */ /* short p_cc_ticks; */ + 80, /* 40 sec */ /* short p_dt_ticks; */ + + 120, /* 1 min */ /* short p_x_ticks; */ + 360, /* 3 min */ /* short p_cr_ticks;*/ + + 360, /* 3 min */ /* short p_keepalive_ticks;*/ + 20, /* 10 sec */ /* short p_sendack_ticks; */ + + 600, /* 5 min */ /* short p_ref_ticks; */ + 480, /* 4 min */ /* short p_inact_ticks; */ + + (short) 100, /* short p_lcdtfract */ + (short) TP0_SOCKBUFSIZE, /* short p_winsize */ + TP0_TPDUSIZE, /* u_char p_tpdusize */ + + TPACK_WINDOW, /* 4 bits p_ack_strat */ + TPRX_USE_CW , /* No fast start */ + /* 4 bits p_rx_strat*/ + TP_CLASS_4 | TP_CLASS_0,/* 5 bits p_class */ + 0, /* 1 bit xtd format */ + 1, /* 1 bit xpd service */ + 1, /* 1 bit use_checksum */ + 0, /* 1 bit use net xpd */ + 0, /* 1 bit use rcc */ + 0, /* 1 bit use efc */ + 0, /* no disc indications */ + 0, /* don't change params */ + ISO_COSNS, /* p_netservice */ + }, +}; + +#if INET +int in_putnetaddr(); +int in_getnetaddr(); +int in_cmpnetaddr(); +int in_putsufx(); +int in_getsufx(); +int in_recycle_tsuffix(); +int tpip_mtu(); +int in_pcbbind(); +int in_pcbconnect(); +int in_pcbdisconnect(); +int in_pcbdetach(); +int in_pcballoc(); +int tpip_output(); +int tpip_output_dg(); +struct inpcb tp_inpcb; +#endif /* INET */ +#if ISO +int iso_putnetaddr(); +int iso_getnetaddr(); +int iso_cmpnetaddr(); +int iso_putsufx(); +int iso_getsufx(); +int iso_recycle_tsuffix(); +int tpclnp_mtu(); +int iso_pcbbind(); +int iso_pcbconnect(); +int iso_pcbdisconnect(); +int iso_pcbdetach(); +int iso_pcballoc(); +int tpclnp_output(); +int tpclnp_output_dg(); +int iso_nlctloutput(); +struct isopcb tp_isopcb; +#endif /* ISO */ +#if TPCONS +int iso_putnetaddr(); +int iso_getnetaddr(); +int iso_cmpnetaddr(); +int iso_putsufx(); +int iso_getsufx(); +int iso_recycle_tsuffix(); +int iso_pcbbind(); +int tpcons_pcbconnect(); +int tpclnp_mtu(); +int iso_pcbdisconnect(); +int iso_pcbdetach(); +int iso_pcballoc(); +int tpcons_output(); +struct isopcb tp_isopcb; +#endif /* TPCONS */ + + +struct nl_protosw nl_protosw[] = { + /* ISO_CLNS */ +#if ISO + { AF_ISO, iso_putnetaddr, iso_getnetaddr, iso_cmpnetaddr, + iso_putsufx, iso_getsufx, + iso_recycle_tsuffix, + tpclnp_mtu, iso_pcbbind, iso_pcbconnect, + iso_pcbdisconnect, iso_pcbdetach, + iso_pcballoc, + tpclnp_output, tpclnp_output_dg, iso_nlctloutput, + (caddr_t) &tp_isopcb, + }, +#else + { 0 }, +#endif /* ISO */ + /* IN_CLNS */ +#if INET + { AF_INET, in_putnetaddr, in_getnetaddr, in_cmpnetaddr, + in_putsufx, in_getsufx, + in_recycle_tsuffix, + tpip_mtu, in_pcbbind, in_pcbconnect, + in_pcbdisconnect, in_pcbdetach, + in_pcballoc, + tpip_output, tpip_output_dg, /* nl_ctloutput */ NULL, + (caddr_t) &tp_inpcb, + }, +#else + { 0 }, +#endif /* INET */ + /* ISO_CONS */ +#if defined(ISO) && defined(TPCONS) + { AF_ISO, iso_putnetaddr, iso_getnetaddr, iso_cmpnetaddr, + iso_putsufx, iso_getsufx, + iso_recycle_tsuffix, + tpclnp_mtu, iso_pcbbind, tpcons_pcbconnect, + iso_pcbdisconnect, iso_pcbdetach, + iso_pcballoc, + tpcons_output, tpcons_output, iso_nlctloutput, + (caddr_t) &tp_isopcb, + }, +#else + { 0 }, +#endif /* ISO_CONS */ + /* End of protosw marker */ + { 0 } +}; + +u_long tp_sendspace = 1024 * 4; +u_long tp_recvspace = 1024 * 4; + +/* + * NAME: tp_init() + * + * CALLED FROM: + * autoconf through the protosw structure + * + * FUNCTION: + * initialize tp machine + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ +int +tp_init() +{ + static int init_done=0; + void tp_timerinit(); + + if (init_done++) + return 0; + + + /* FOR INET */ + tp_inpcb.inp_next = tp_inpcb.inp_prev = &tp_inpcb; + /* FOR ISO */ + tp_isopcb.isop_next = tp_isopcb.isop_prev = &tp_isopcb; + + tp_start_win = 2; + + tp_timerinit(); + bzero((caddr_t)&tp_stat, sizeof(struct tp_stat)); + return 0; +} + +/* + * NAME: tp_soisdisconnecting() + * + * CALLED FROM: + * tp.trans + * + * FUNCTION and ARGUMENTS: + * Set state of the socket (so) to reflect that fact that we're disconnectING + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + * This differs from the regular soisdisconnecting() in that the latter + * also sets the SS_CANTRECVMORE and SS_CANTSENDMORE flags. + * We don't want to set those flags because those flags will cause + * a SIGPIPE to be delivered in sosend() and we don't like that. + * If anyone else is sleeping on this socket, wake 'em up. + */ +void +tp_soisdisconnecting(so) + register struct socket *so; +{ + soisdisconnecting(so); + so->so_state &= ~SS_CANTSENDMORE; + IFPERF(sototpcb(so)) + register struct tp_pcb *tpcb = sototpcb(so); + u_int fsufx, lsufx; + + bcopy ((caddr_t)tpcb->tp_fsuffix, (caddr_t)&fsufx, sizeof(u_int) ); + bcopy ((caddr_t)tpcb->tp_lsuffix, (caddr_t)&lsufx, sizeof(u_int) ); + + tpmeas(tpcb->tp_lref, TPtime_close, &time, fsufx, lsufx, tpcb->tp_fref); + tpcb->tp_perf_on = 0; /* turn perf off */ + ENDPERF +} + + +/* + * NAME: tp_soisdisconnected() + * + * CALLED FROM: + * tp.trans + * + * FUNCTION and ARGUMENTS: + * Set state of the socket (so) to reflect that fact that we're disconnectED + * Set the state of the reference structure to closed, and + * recycle the suffix. + * Start a reference timer. + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + * This differs from the regular soisdisconnected() in that the latter + * also sets the SS_CANTRECVMORE and SS_CANTSENDMORE flags. + * We don't want to set those flags because those flags will cause + * a SIGPIPE to be delivered in sosend() and we don't like that. + * If anyone else is sleeping on this socket, wake 'em up. + */ +void +tp_soisdisconnected(tpcb) + register struct tp_pcb *tpcb; +{ + register struct socket *so = tpcb->tp_sock; + + soisdisconnecting(so); + so->so_state &= ~SS_CANTSENDMORE; + IFPERF(tpcb) + register struct tp_pcb *ttpcb = sototpcb(so); + u_int fsufx, lsufx; + + /* CHOKE */ + bcopy ((caddr_t)ttpcb->tp_fsuffix, (caddr_t)&fsufx, sizeof(u_int) ); + bcopy ((caddr_t)ttpcb->tp_lsuffix, (caddr_t)&lsufx, sizeof(u_int) ); + + tpmeas(ttpcb->tp_lref, TPtime_close, + &time, &lsufx, &fsufx, ttpcb->tp_fref); + tpcb->tp_perf_on = 0; /* turn perf off */ + ENDPERF + + tpcb->tp_refstate = REF_FROZEN; + tp_recycle_tsuffix(tpcb); + tp_etimeout(tpcb, TM_reference, (int)tpcb->tp_refer_ticks); +} + +/* + * NAME: tp_freeref() + * + * CALLED FROM: + * tp.trans when the reference timer goes off, and + * from tp_attach() and tp_detach() when a tpcb is partially set up but not + * set up enough to have a ref timer set for it, and it's discarded + * due to some sort of error or an early close() + * + * FUNCTION and ARGUMENTS: + * Frees the reference represented by (r) for re-use. + * + * RETURNS: Nothing + * + * SIDE EFFECTS: + * + * NOTES: better be called at clock priority !!!!! + */ +void +tp_freeref(n) +RefNum n; +{ + register struct tp_ref *r = tp_ref + n; + register struct tp_pcb *tpcb; + + tpcb = r->tpr_pcb; + IFDEBUG(D_TIMER) + printf("tp_freeref called for ref %d pcb %x maxrefopen %d\n", + n, tpcb, tp_refinfo.tpr_maxopen); + ENDDEBUG + IFTRACE(D_TIMER) + tptrace(TPPTmisc, "tp_freeref ref maxrefopen pcb", + n, tp_refinfo.tpr_maxopen, tpcb, 0); + ENDTRACE + if (tpcb == 0) + return; + IFDEBUG(D_CONN) + printf("tp_freeref: CLEARING tpr_pcb 0x%x\n", tpcb); + ENDDEBUG + r->tpr_pcb = (struct tp_pcb *)0; + tpcb->tp_refstate = REF_FREE; + + for (r = tp_ref + tp_refinfo.tpr_maxopen; r > tp_ref; r--) + if (r->tpr_pcb) + break; + tp_refinfo.tpr_maxopen = r - tp_ref; + tp_refinfo.tpr_numopen--; + + IFDEBUG(D_TIMER) + printf("tp_freeref ends w/ maxrefopen %d\n", tp_refinfo.tpr_maxopen); + ENDDEBUG +} + +/* + * NAME: tp_getref() + * + * CALLED FROM: + * tp_attach() + * + * FUNCTION and ARGUMENTS: + * obtains the next free reference and allocates the appropriate + * ref structure, links that structure to (tpcb) + * + * RETURN VALUE: + * a reference number + * or TP_ENOREF + * + * SIDE EFFECTS: + * + * NOTES: + */ +u_long +tp_getref(tpcb) + register struct tp_pcb *tpcb; +{ + register struct tp_ref *r, *rlim; + register int i; + caddr_t obase; + unsigned size; + + if (++tp_refinfo.tpr_numopen < tp_refinfo.tpr_size) + for (r = tp_refinfo.tpr_base, rlim = r + tp_refinfo.tpr_size; + ++r < rlim; ) /* tp_ref[0] is never used */ + if (r->tpr_pcb == 0) + goto got_one; + /* else have to allocate more space */ + + obase = (caddr_t)tp_refinfo.tpr_base; + size = tp_refinfo.tpr_size * sizeof(struct tp_ref); +// r = (struct tp_ref *) malloc(size + size, M_PCB, M_NOWAIT); + MALLOC(r, struct tp_ref *, size + size, M_PCB, M_NOWAIT); + if (r == 0) + return (--tp_refinfo.tpr_numopen, TP_ENOREF); + tp_refinfo.tpr_base = tp_ref = r; + tp_refinfo.tpr_size *= 2; + bcopy(obase, (caddr_t)r, size); + FREE(obase, M_PCB); + r = (struct tp_ref *)(size + (caddr_t)r); + bzero((caddr_t)r, size); + +got_one: + r->tpr_pcb = tpcb; + tpcb->tp_refstate = REF_OPENING; + i = r - tp_refinfo.tpr_base; + if (tp_refinfo.tpr_maxopen < i) + tp_refinfo.tpr_maxopen = i; + return (u_long)i; +} + +/* + * NAME: tp_set_npcb() + * + * CALLED FROM: + * tp_attach(), tp_route_to() + * + * FUNCTION and ARGUMENTS: + * given a tpcb, allocate an appropriate lower-lever npcb, freeing + * any old ones that might need re-assigning. + */ +tp_set_npcb(tpcb) +register struct tp_pcb *tpcb; +{ + register struct socket *so = tpcb->tp_sock; + int error; + + if (tpcb->tp_nlproto && tpcb->tp_npcb) { + short so_state = so->so_state; + so->so_state &= ~SS_NOFDREF; + tpcb->tp_nlproto->nlp_pcbdetach(tpcb->tp_npcb); + so->so_state = so_state; + } + tpcb->tp_nlproto = &nl_protosw[tpcb->tp_netservice]; + /* xx_pcballoc sets so_pcb */ + error = tpcb->tp_nlproto->nlp_pcballoc(so, tpcb->tp_nlproto->nlp_pcblist); + tpcb->tp_npcb = so->so_pcb; + so->so_pcb = (caddr_t)tpcb; + return (error); +} +/* + * NAME: tp_attach() + * + * CALLED FROM: + * tp_usrreq, PRU_ATTACH + * + * FUNCTION and ARGUMENTS: + * given a socket (so) and a protocol family (dom), allocate a tpcb + * and ref structure, initialize everything in the structures that + * needs to be initialized. + * + * RETURN VALUE: + * 0 ok + * EINVAL if DEBUG(X) in is on and a disaster has occurred + * ENOPROTOOPT if TP hasn't been configured or if the + * socket wasn't created with tp as its protocol + * EISCONN if this socket is already part of a connection + * ETOOMANYREFS if ran out of tp reference numbers. + * E* whatever error is returned from soreserve() + * for from the network-layer pcb allocation routine + * + * SIDE EFFECTS: + * + * NOTES: + */ +tp_attach(so, protocol) + struct socket *so; + int protocol; +{ + register struct tp_pcb *tpcb; + int error = 0; + int dom = so->so_proto->pr_domain->dom_family; + u_long lref; + extern struct tp_conn_param tp_conn_param[]; + + IFDEBUG(D_CONN) + printf("tp_attach:dom 0x%x so 0x%x ", dom, so); + ENDDEBUG + IFTRACE(D_CONN) + tptrace(TPPTmisc, "tp_attach:dom so", dom, so, 0, 0); + ENDTRACE + + if (so->so_pcb != NULL) { + return EISCONN; /* socket already part of a connection*/ + } + + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) + error = soreserve(so, tp_sendspace, tp_recvspace); + /* later an ioctl will allow reallocation IF still in closed state */ + + if (error) + goto bad2; + + MALLOC(tpcb, struct tp_pcb *, sizeof(*tpcb), M_PCB, M_NOWAIT); + if (tpcb == NULL) { + error = ENOBUFS; + goto bad2; + } + bzero( (caddr_t)tpcb, sizeof (struct tp_pcb) ); + + if ( ((lref = tp_getref(tpcb)) & TP_ENOREF) != 0 ) { + error = ETOOMANYREFS; + goto bad3; + } + tpcb->tp_lref = lref; + tpcb->tp_sock = so; + tpcb->tp_domain = dom; + tpcb->tp_rhiwat = so->so_rcv.sb_hiwat; + /* tpcb->tp_proto = protocol; someday maybe? */ + if (protocol && protocoltp_netservice = ISO_CONS; + tpcb->tp_snduna = (SeqNum) -1;/* kludge so the pseudo-ack from the CR/CC + * will generate correct fake-ack values + */ + } else { + tpcb->tp_netservice = (dom== AF_INET)?IN_CLNS:ISO_CLNS; + /* the default */ + } + tpcb->_tp_param = tp_conn_param[tpcb->tp_netservice]; + + tpcb->tp_state = TP_CLOSED; + tpcb->tp_vers = TP_VERSION; + tpcb->tp_notdetached = 1; + + /* Spec says default is 128 octets, + * that is, if the tpdusize argument never appears, use 128. + * As the initiator, we will always "propose" the 2048 + * size, that is, we will put this argument in the CR + * always, but accept what the other side sends on the CC. + * If the initiator sends us something larger on a CR, + * we'll respond w/ this. + * Our maximum is 4096. See tp_chksum.c comments. + */ + tpcb->tp_cong_win = + tpcb->tp_l_tpdusize = 1 << tpcb->tp_tpdusize; + + tpcb->tp_seqmask = TP_NML_FMT_MASK; + tpcb->tp_seqbit = TP_NML_FMT_BIT; + tpcb->tp_seqhalf = tpcb->tp_seqbit >> 1; + + /* attach to a network-layer protoswitch */ + if ( error = tp_set_npcb(tpcb)) + goto bad4; + ASSERT( tpcb->tp_nlproto->nlp_afamily == tpcb->tp_domain); + + /* nothing to do for iso case */ + if( dom == AF_INET ) + sotoinpcb(so)->inp_ppcb = (caddr_t) tpcb; + + return 0; + +bad4: + IFDEBUG(D_CONN) + printf("BAD4 in tp_attach, so 0x%x\n", so); + ENDDEBUG + tp_freeref(tpcb->tp_lref); + +bad3: + IFDEBUG(D_CONN) + printf("BAD3 in tp_attach, so 0x%x\n", so); + ENDDEBUG + + FREE((caddr_t)tpcb, M_PCB); /* never a cluster */ + +bad2: + IFDEBUG(D_CONN) + printf("BAD2 in tp_attach, so 0x%x\n", so); + ENDDEBUG + so->so_pcb = 0; + +/*bad:*/ + IFDEBUG(D_CONN) + printf("BAD in tp_attach, so 0x%x\n", so); + ENDDEBUG + return error; +} + +/* + * NAME: tp_detach() + * + * CALLED FROM: + * tp.trans, on behalf of a user close request + * and when the reference timer goes off + * (if the disconnect was initiated by the protocol entity + * rather than by the user) + * + * FUNCTION and ARGUMENTS: + * remove the tpcb structure from the list of active or + * partially active connections, recycle all the mbufs + * associated with the pcb, ref structure, sockbufs, etc. + * Only free the ref structure if you know that a ref timer + * wasn't set for this tpcb. + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + * tp_soisdisconnected() was already when this is called + */ +void +tp_detach(tpcb) + register struct tp_pcb *tpcb; +{ + void tp_freeref(), tp_rsyflush(); + register struct socket *so = tpcb->tp_sock; + + IFDEBUG(D_CONN) + printf("tp_detach(tpcb 0x%x, so 0x%x)\n", + tpcb,so); + ENDDEBUG + IFTRACE(D_CONN) + tptraceTPCB(TPPTmisc, "tp_detach tpcb so lsufx", + tpcb, so, *(u_short *)(tpcb->tp_lsuffix), 0); + ENDTRACE + + IFDEBUG(D_CONN) + printf("so_snd at 0x%x so_rcv at 0x%x\n", &so->so_snd, &so->so_rcv); + dump_mbuf(so->so_snd.sb_mb, "so_snd at detach "); + printf("about to call LL detach, nlproto 0x%x, nl_detach 0x%x\n", + tpcb->tp_nlproto, tpcb->tp_nlproto->nlp_pcbdetach); + ENDDEBUG + + if (tpcb->tp_Xsnd.sb_mb) { + printf("Unsent Xdata on detach; would panic"); + sbflush(&tpcb->tp_Xsnd); + } + if (tpcb->tp_ucddata) + m_freem(tpcb->tp_ucddata); + + IFDEBUG(D_CONN) + printf("reassembly info cnt %d rsyq 0x%x\n", + tpcb->tp_rsycnt, tpcb->tp_rsyq); + ENDDEBUG + if (tpcb->tp_rsyq) + tp_rsyflush(tpcb); + + if (tpcb->tp_next) { + remque(tpcb); + tpcb->tp_next = tpcb->tp_prev = 0; + } + tpcb->tp_notdetached = 0; + + IFDEBUG(D_CONN) + printf("calling (...nlproto->...)(0x%x, so 0x%x)\n", + tpcb->tp_npcb, so); + printf("so 0x%x so_head 0x%x, qlen %d q0len %d qlimit %d\n", + so, so->so_head, + so->so_q0len, so->so_qlen, so->so_qlimit); + ENDDEBUG + + (tpcb->tp_nlproto->nlp_pcbdetach)(tpcb->tp_npcb); + /* does an so->so_pcb = 0; sofree(so) */ + + IFDEBUG(D_CONN) + printf("after xxx_pcbdetach\n"); + ENDDEBUG + + if (tpcb->tp_state == TP_LISTENING) { + register struct tp_pcb **tt; + for (tt = &tp_listeners; *tt; tt = &((*tt)->tp_nextlisten)) + if (*tt == tpcb) + break; + if (*tt) + *tt = tpcb->tp_nextlisten; + else + printf("tp_detach from listen: should panic\n"); + } + if (tpcb->tp_refstate == REF_OPENING ) { + /* no connection existed here so no reference timer will be called */ + IFDEBUG(D_CONN) + printf("SETTING ref %d to REF_FREE\n", tpcb->tp_lref); + ENDDEBUG + + tp_freeref(tpcb->tp_lref); + } +#ifdef TP_PERF_MEAS + /* + * Get rid of the cluster mbuf allocated for performance measurements, if + * there is one. Note that tpcb->tp_perf_on says nothing about whether or + * not a cluster mbuf was allocated, so you have to check for a pointer + * to one (that is, we need the TP_PERF_MEASs around the following section + * of code, not the IFPERFs) + */ + if (tpcb->tp_p_mbuf) { + register struct mbuf *m = tpcb->tp_p_mbuf; + struct mbuf *n; + IFDEBUG(D_PERF_MEAS) + printf("freeing tp_p_meas 0x%x ", tpcb->tp_p_meas); + ENDDEBUG + do { + MFREE(m, n); + m = n; + } while (n); + tpcb->tp_p_meas = 0; + tpcb->tp_p_mbuf = 0; + } +#endif /* TP_PERF_MEAS */ + + IFDEBUG(D_CONN) + printf( "end of detach, NOT single, tpcb 0x%x\n", tpcb); + ENDDEBUG + /* FREE((caddr_t)tpcb, M_PCB); WHere to put this ? */ +} + +struct que { + struct tp_pcb *next; + struct tp_pcb *prev; +} tp_bound_pcbs = +{(struct tp_pcb *)&tp_bound_pcbs, (struct tp_pcb *)&tp_bound_pcbs}; + +u_short tp_unique; + +tp_tselinuse(tlen, tsel, siso, reuseaddr) +caddr_t tsel; +register struct sockaddr_iso *siso; +{ + struct tp_pcb *b = tp_bound_pcbs.next, *l = tp_listeners; + register struct tp_pcb *t; + + for (;;) { + if (b != (struct tp_pcb *)&tp_bound_pcbs) { + t = b; b = t->tp_next; + } else if (l) { + t = l; l = t->tp_nextlisten; + } else + break; + if (tlen == t->tp_lsuffixlen && bcmp(tsel, t->tp_lsuffix, tlen) == 0) { + if (t->tp_flags & TPF_GENERAL_ADDR) { + if (siso == 0 || reuseaddr == 0) + return 1; + } else if (siso) { + if (siso->siso_family == t->tp_domain && + t->tp_nlproto->nlp_cmpnetaddr(t->tp_npcb, siso, TP_LOCAL)) + return 1; + } else if (reuseaddr == 0) + return 1; + } + } + return 0; + +} + + +tp_pcbbind(tpcb, nam) +register struct tp_pcb *tpcb; +register struct mbuf *nam; +{ + register struct sockaddr_iso *siso = 0; + int tlen = 0, wrapped = 0; + caddr_t tsel; + u_short tutil; + + if (tpcb->tp_state != TP_CLOSED) + return (EINVAL); + if (nam) { + siso = mtod(nam, struct sockaddr_iso *); + switch (siso->siso_family) { + default: + return (EAFNOSUPPORT); +#if ISO + case AF_ISO: + tlen = siso->siso_tlen; + tsel = TSEL(siso); + if (siso->siso_nlen == 0) + siso = 0; + break; +#endif +#if INET + case AF_INET: + tsel = (caddr_t)&tutil; + if (tutil = ((struct sockaddr_in *)siso)->sin_port) { + tlen = 2; + } + if (((struct sockaddr_in *)siso)->sin_addr.s_addr == 0) + siso = 0; + } +#endif + } + if (tpcb->tp_lsuffixlen == 0) { + if (tlen) { + if (tp_tselinuse(tlen, tsel, siso, + tpcb->tp_sock->so_options & SO_REUSEADDR)) + return (EINVAL); + } else { + for (tsel = (caddr_t)&tutil, tlen = 2;;){ + if (tp_unique++ < ISO_PORT_RESERVED || + tp_unique > ISO_PORT_USERRESERVED) { + if (wrapped++) + return ESRCH; + tp_unique = ISO_PORT_RESERVED; + } + tutil = htons(tp_unique); + if (tp_tselinuse(tlen, tsel, siso, 0) == 0) + break; + } + if (siso) switch (siso->siso_family) { +#if ISO + case AF_ISO: + bcopy(tsel, TSEL(siso), tlen); + siso->siso_tlen = tlen; + break; +#endif +#if INET + case AF_INET: + ((struct sockaddr_in *)siso)->sin_port = tutil; +#endif + } + } + bcopy(tsel, tpcb->tp_lsuffix, (tpcb->tp_lsuffixlen = tlen)); + insque(tpcb, &tp_bound_pcbs); + } else { + if (tlen || siso == 0) + return (EINVAL); + } + if (siso == 0) { + tpcb->tp_flags |= TPF_GENERAL_ADDR; + return (0); + } + return tpcb->tp_nlproto->nlp_pcbbind(tpcb->tp_npcb, nam); +} diff --git a/bsd/netiso/tp_pcb.h b/bsd/netiso/tp_pcb.h new file mode 100644 index 000000000..5318883da --- /dev/null +++ b/bsd/netiso/tp_pcb.h @@ -0,0 +1,374 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_pcb.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * + * This file defines the transport protocol control block (tpcb). + * and a bunch of #define values that are used in the tpcb. + */ + +#ifndef __TP_PCB__ +#define __TP_PCB__ + +#include +#include +#include +#ifndef sblock +#include +#endif /* sblock */ + +/* NOTE: the code depends on REF_CLOSED > REF_OPEN > the rest, and + * on REF_FREE being zero + * + * Possible improvement: + * think about merging the tp_ref w/ the tpcb and doing a search + * through the tpcb list, from tpb. This would slow down lookup + * during data transfer + * It would be a little nicer also to have something based on the + * clock (like top n bits of the reference is part of the clock, to + * minimize the likelihood of reuse after a crash) + * also, need to keep the timer servicing part to a minimum (although + * the cost of this is probably independent of whether the timers are + * in the pcb or in an array.. + * Last, would have to make the number of timers a function of the amount of + * mbufs available, plus some for the frozen references. + * + * Possible improvement: + * Might not need the ref_state stuff either... + * REF_FREE could correspond to tp_state == CLOSED or nonexistend tpcb, + * REF_OPEN to tp_state anywhere from AK_WAIT or CR_SENT to CLOSING + * REF_OPENING could correspond to LISTENING, because that's the + * way it's used, not because the correspondence is exact. + * REF_CLOSED could correspond to REFWAIT + */ +#define REF_FROZEN 3 /* has ref timer only */ +#define REF_OPEN 2 /* has timers, possibly active */ +#define REF_OPENING 1 /* in use (has a pcb) but no timers */ +#define REF_FREE 0 /* free to reallocate */ + +#define TM_NTIMERS 6 + +struct tp_ref { + struct tp_pcb *tpr_pcb; /* back ptr to PCB */ +}; + +/* PER system stuff (one static structure instead of a bunch of names) */ +struct tp_refinfo { + struct tp_ref *tpr_base; + int tpr_size; + int tpr_maxopen; + int tpr_numopen; +}; + +struct nl_protosw { + int nlp_afamily; /* address family */ + int (*nlp_putnetaddr)(); /* puts addresses in nl pcb */ + int (*nlp_getnetaddr)(); /* gets addresses from nl pcb */ + int (*nlp_cmpnetaddr)(); /* compares address in pcb with sockaddr */ + int (*nlp_putsufx)(); /* puts transport suffixes in nl pcb */ + int (*nlp_getsufx)(); /* gets transport suffixes from nl pcb */ + int (*nlp_recycle_suffix)();/* clears suffix from nl pcb */ + int (*nlp_mtu)(); /* figures out mtu based on nl used */ + int (*nlp_pcbbind)(); /* bind to pcb for net level */ + int (*nlp_pcbconn)(); /* connect for net level */ + int (*nlp_pcbdisc)(); /* disconnect net level */ + int (*nlp_pcbdetach)(); /* detach net level pcb */ + int (*nlp_pcballoc)(); /* allocate a net level pcb */ + int (*nlp_output)(); /* prepare a packet to give to nl */ + int (*nlp_dgoutput)(); /* prepare a packet to give to nl */ + int (*nlp_ctloutput)(); /* hook for network set/get options */ + caddr_t nlp_pcblist; /* list of xx_pcb's for connections */ +}; + + +struct tp_pcb { + struct tp_pcb *tp_next; + struct tp_pcb *tp_prev; + struct tp_pcb *tp_nextlisten; /* chain all listeners */ + struct socket *tp_sock; /* back ptr */ + u_short tp_state; /* state of fsm */ + short tp_retrans; /* # times can still retrans */ + caddr_t tp_npcb; /* to lower layer pcb */ + struct nl_protosw *tp_nlproto; /* lower-layer dependent routines */ + struct rtentry **tp_routep; /* obtain mtu; inside npcb */ + + + RefNum tp_lref; /* local reference */ + RefNum tp_fref; /* foreign reference */ + + u_int tp_seqmask; /* mask for seq space */ + u_int tp_seqbit; /* bit for seq number wraparound */ + u_int tp_seqhalf; /* half the seq space */ + + struct mbuf *tp_ucddata; /* user connect/disconnect data */ + + /* credit & sequencing info for SENDING */ + u_short tp_fcredit; /* current remote credit in # packets */ + u_short tp_maxfcredit; /* max remote credit in # packets */ + u_short tp_dupacks; /* intuit packet loss before rxt timo */ + u_long tp_cong_win; /* congestion window in bytes. + * see profuse comments in TCP code + */ + u_long tp_ssthresh; /* cong_win threshold for slow start + * exponential to linear switch + */ + SeqNum tp_snduna; /* seq # of lowest unacked DT */ + SeqNum tp_sndnew; /* seq # of lowest unsent DT */ + SeqNum tp_sndnum; /* next seq # to be assigned */ + SeqNum tp_sndnxt; /* what to do next; poss. rxt */ + struct mbuf *tp_sndnxt_m; /* packet corres. to sndnxt*/ + int tp_Nwindow; /* for perf. measurement */ + + /* credit & sequencing info for RECEIVING */ + SeqNum tp_rcvnxt; /* next DT seq # expect to recv */ + SeqNum tp_sent_lcdt; /* cdt according to last ack sent */ + SeqNum tp_sent_uwe; /* uwe according to last ack sent */ + SeqNum tp_sent_rcvnxt; /* rcvnxt according to last ack sent + * needed for perf measurements only + */ + u_short tp_lcredit; /* current local credit in # packets */ + u_short tp_maxlcredit; /* needed for reassembly queue */ + struct mbuf **tp_rsyq; /* unacked stuff recvd out of order */ + int tp_rsycnt; /* number of packets "" "" "" "" */ + u_long tp_rhiwat; /* remember original RCVBUF size */ + + /* receiver congestion state stuff ... */ + u_int tp_win_recv; + + /* receive window as a scaled int (8 bit fraction part) */ + + struct cong_sample { + ushort cs_size; /* current window size */ + ushort cs_received; /* PDUs received in this sample */ + ushort cs_ce_set; /* PDUs received in this sample with CE bit set */ + } tp_cong_sample; + + + /* parameters per-connection controllable by user */ + struct tp_conn_param _tp_param; + +#define tp_Nretrans _tp_param.p_Nretrans +#define tp_dr_ticks _tp_param.p_dr_ticks +#define tp_cc_ticks _tp_param.p_cc_ticks +#define tp_dt_ticks _tp_param.p_dt_ticks +#define tp_xpd_ticks _tp_param.p_x_ticks +#define tp_cr_ticks _tp_param.p_cr_ticks +#define tp_keepalive_ticks _tp_param.p_keepalive_ticks +#define tp_sendack_ticks _tp_param.p_sendack_ticks +#define tp_refer_ticks _tp_param.p_ref_ticks +#define tp_inact_ticks _tp_param.p_inact_ticks +#define tp_xtd_format _tp_param.p_xtd_format +#define tp_xpd_service _tp_param.p_xpd_service +#define tp_ack_strat _tp_param.p_ack_strat +#define tp_rx_strat _tp_param.p_rx_strat +#define tp_use_checksum _tp_param.p_use_checksum +#define tp_use_efc _tp_param.p_use_efc +#define tp_use_nxpd _tp_param.p_use_nxpd +#define tp_use_rcc _tp_param.p_use_rcc +#define tp_tpdusize _tp_param.p_tpdusize +#define tp_class _tp_param.p_class +#define tp_winsize _tp_param.p_winsize +#define tp_no_disc_indications _tp_param.p_no_disc_indications +#define tp_dont_change_params _tp_param.p_dont_change_params +#define tp_netservice _tp_param.p_netservice +#define tp_version _tp_param.p_version +#define tp_ptpdusize _tp_param.p_ptpdusize + + int tp_l_tpdusize; + /* whereas tp_tpdusize is log2(the negotiated max size) + * l_tpdusize is the size we'll use when sending, in # chars + */ + + int tp_rtv; /* max round-trip time variance */ + int tp_rtt; /* smoothed round-trip time */ + SeqNum tp_rttseq; /* packet being timed */ + int tp_rttemit; /* when emitted, in ticks */ + int tp_idle; /* last activity, in ticks */ + short tp_rxtcur; /* current retransmit value */ + short tp_rxtshift; /* log(2) of rexmt exp. backoff */ + u_char tp_cebit_off; /* real DEC bit algorithms not in use */ + u_char tp_oktonagle; /* Last unsent pckt may be append to */ + u_char tp_flags; /* values: */ +#define TPF_NLQOS_PDN TPFLAG_NLQOS_PDN +#define TPF_PEER_ON_SAMENET TPFLAG_PEER_ON_SAMENET +#define TPF_GENERAL_ADDR TPFLAG_GENERAL_ADDR +#define TPF_DELACK 0x8 +#define TPF_ACKNOW 0x10 + +#define PEER_IS_LOCAL(t) (((t)->tp_flags & TPF_PEER_ON_SAME_NET) != 0) +#define USES_PDN(t) (((t)->tp_flags & TPF_NLQOS_PDN) != 0) + + + unsigned + tp_sendfcc:1, /* shall next ack include FCC parameter? */ + tp_trace:1, /* is this pcb being traced? (not used yet) */ + tp_perf_on:1, /* 0/1 -> performance measuring on */ + tp_reneged:1, /* have we reneged on cdt since last ack? */ + tp_decbit:3, /* dec bit was set, we're in reneg mode */ + tp_notdetached:1; /* Call tp_detach before freeing XXXXXXX */ + +#ifdef TP_PERF_MEAS + /* performance stats - see tp_stat.h */ + struct tp_pmeas *tp_p_meas; + struct mbuf *tp_p_mbuf; +#endif /* TP_PERF_MEAS */ + + /* addressing */ + u_short tp_domain; /* domain (INET, ISO) */ + /* for compatibility with the *old* way and with INET, be sure that + * that lsuffix and fsuffix are aligned to a short addr. + * having them follow the u_short *suffixlen should suffice (choke) + */ + u_short tp_fsuffixlen; /* foreign suffix */ + char tp_fsuffix[MAX_TSAP_SEL_LEN]; + u_short tp_lsuffixlen; /* local suffix */ + char tp_lsuffix[MAX_TSAP_SEL_LEN]; +#define SHORT_LSUFXP(tpcb) ((short *)((tpcb)->tp_lsuffix)) +#define SHORT_FSUFXP(tpcb) ((short *)((tpcb)->tp_fsuffix)) + + /* Timer stuff */ + u_char tp_vers; /* protocol version */ + u_char tp_peer_acktime; /* used for DT retrans time */ + u_char tp_refstate; /* values REF_FROZEN, etc. above */ + struct tp_pcb *tp_fasttimeo; /* limit pcbs to examine */ + u_int tp_timer[TM_NTIMERS]; /* C timers */ + + struct sockbuf tp_Xsnd; /* for expedited data */ +/* struct sockbuf tp_Xrcv; /* for expedited data */ +#define tp_Xrcv tp_sock->so_rcv + SeqNum tp_Xsndnxt; /* next XPD seq # to send */ + SeqNum tp_Xuna; /* seq # of unacked XPD */ + SeqNum tp_Xrcvnxt; /* next XPD seq # expect to recv */ + + /* AK subsequencing */ + u_short tp_s_subseq; /* next subseq to send */ + u_short tp_r_subseq; /* highest recv subseq */ + +}; + +u_int tp_start_win; + +#define ROUND(scaled_int) (((scaled_int) >> 8) + (((scaled_int) & 0x80) ? 1:0)) + +/* to round off a scaled int with an 8 bit fraction part */ + +#define CONG_INIT_SAMPLE(pcb) \ + pcb->tp_cong_sample.cs_received = \ + pcb->tp_cong_sample.cs_ce_set = 0; \ + pcb->tp_cong_sample.cs_size = max(pcb->tp_lcredit, 1) << 1; + +#define CONG_UPDATE_SAMPLE(pcb, ce_bit) \ + pcb->tp_cong_sample.cs_received++; \ + if (ce_bit) { \ + pcb->tp_cong_sample.cs_ce_set++; \ + } \ + if (pcb->tp_cong_sample.cs_size <= pcb->tp_cong_sample.cs_received) { \ + if ((pcb->tp_cong_sample.cs_ce_set << 1) >= \ + pcb->tp_cong_sample.cs_size ) { \ + pcb->tp_win_recv -= pcb->tp_win_recv >> 3; /* multiply by .875 */ \ + pcb->tp_win_recv = max(1 << 8, pcb->tp_win_recv); \ + } \ + else { \ + pcb->tp_win_recv += (1 << 8); /* add one to the scaled int */ \ + } \ + pcb->tp_lcredit = ROUND(pcb->tp_win_recv); \ + CONG_INIT_SAMPLE(pcb); \ + } + +#ifdef KERNEL +extern struct tp_refinfo tp_refinfo; +extern struct timeval time; +extern struct tp_ref *tp_ref; +extern struct tp_param tp_param; +extern struct nl_protosw nl_protosw[]; +extern struct tp_pcb *tp_listeners; +extern struct tp_pcb *tp_ftimeolist; +#endif + +#define sototpcb(so) ((struct tp_pcb *)(so->so_pcb)) +#define sototpref(so) ((sototpcb(so)->tp_ref)) +#define tpcbtoso(tp) ((struct socket *)((tp)->tp_sock)) +#define tpcbtoref(tp) ((struct tp_ref *)((tp)->tp_ref)) + +#endif /* __TP_PCB__ */ diff --git a/bsd/netiso/tp_seq.h b/bsd/netiso/tp_seq.h new file mode 100644 index 000000000..c07eea6a4 --- /dev/null +++ b/bsd/netiso/tp_seq.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_seq.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * These macros perform sequence number arithmetic modulo (2**7 or 2**31). + * The relevant fields in the tpcb are: + * tp_seqmask : the mask of bits that define the sequence space. + * tp_seqbit : 1 + tp_seqmask + * tp_seqhalf : tp_seqbit / 2 or half the sequence space (rounded up) + * Not exactly fast, but at least it's maintainable. + */ + +#ifndef __TP_SEQ__ +#define __TP_SEQ__ + +#define SEQ(tpcb,x) \ + ((x) & (tpcb)->tp_seqmask) + +#define SEQ_GT(tpcb, seq, operand ) \ +( ((int)((seq)-(operand)) > 0)\ +? ((int)((seq)-(operand)) < (int)(tpcb)->tp_seqhalf)\ +: !(-((int)(seq)-(operand)) < (int)(tpcb)->tp_seqhalf)) + +#define SEQ_GEQ(tpcb, seq, operand ) \ +( ((int)((seq)-(operand)) >= 0)\ +? ((int)((seq)-(operand)) < (int)(tpcb)->tp_seqhalf)\ +: !((-((int)(seq)-(operand))) < (int)(tpcb)->tp_seqhalf)) + +#define SEQ_LEQ(tpcb, seq, operand ) \ +( ((int)((seq)-(operand)) <= 0)\ +? ((-(int)((seq)-(operand))) < (int)(tpcb)->tp_seqhalf)\ +: !(((int)(seq)-(operand)) < (int)(tpcb)->tp_seqhalf)) + +#define SEQ_LT(tpcb, seq, operand ) \ +( ((int)((seq)-(operand)) < 0)\ +? ((-(int)((seq)-(operand))) < (int)(tpcb)->tp_seqhalf)\ +: !(((int)(seq)-(operand)) < (int)(tpcb)->tp_seqhalf)) + +#define SEQ_MIN(tpcb, a, b) ( SEQ_GT(tpcb, a, b) ? b : a) + +#define SEQ_MAX(tpcb, a, b) ( SEQ_GT(tpcb, a, b) ? a : b) + +#define SEQ_INC(tpcb, Seq) ((++Seq), ((Seq) &= (tpcb)->tp_seqmask)) + +#define SEQ_DEC(tpcb, Seq)\ + ((Seq) = (((Seq)+(unsigned)((int)(tpcb)->tp_seqbit - 1))&(tpcb)->tp_seqmask)) + +/* (amt) had better be less than the seq bit ! */ + +#define SEQ_SUB(tpcb, Seq, amt)\ + (((Seq) + (unsigned)((int)(tpcb)->tp_seqbit - amt)) & (tpcb)->tp_seqmask) +#define SEQ_ADD(tpcb, Seq, amt) (((Seq) + (unsigned)amt) & (tpcb)->tp_seqmask) + + +#define IN_RWINDOW(tpcb, seq, lwe, uwe)\ + ( SEQ_GEQ(tpcb, seq, lwe) && SEQ_LT(tpcb, seq, uwe) ) + +#define IN_SWINDOW(tpcb, seq, lwe, uwe)\ + ( SEQ_GT(tpcb, seq, lwe) && SEQ_LEQ(tpcb, seq, uwe) ) + +#endif /* __TP_SEQ__ */ diff --git a/bsd/netiso/tp_stat.h b/bsd/netiso/tp_stat.h new file mode 100644 index 000000000..c22e40850 --- /dev/null +++ b/bsd/netiso/tp_stat.h @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_stat.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * Here are the data structures in which the global + * statistics(counters) are gathered. + */ + +#ifndef __TP_STAT__ +#define __TP_STAT__ + +struct tp_stat { + u_long ts_param_ignored; + u_long ts_unused3; + u_long ts_bad_csum; + + u_long ts_inv_length; + u_long ts_inv_pcode; + u_long ts_inv_dutype; + u_long ts_negotfailed; + u_long ts_inv_dref; + u_long ts_inv_pval; + u_long ts_inv_sufx; + u_long ts_inv_aclass; + + u_long ts_xtd_fmt; + u_long ts_use_txpd; + u_long ts_csum_off; + u_long ts_send_drop; + u_long ts_recv_drop; + + u_long ts_xpd_intheway;/* xpd mark caused data flow to stop */ + u_long ts_xpdmark_del; /* xpd markers thrown away */ + u_long ts_dt_ooo; /* dt tpdus received out of order */ + u_long ts_dt_niw; /* dt tpdus received & not in window */ + u_long ts_xpd_niw; /* xpd tpdus received & not in window */ + u_long ts_xpd_dup; + u_long ts_dt_dup; /* dt tpdus received & are duplicates */ + + u_long ts_zfcdt; /* # times f credit went down to 0 */ + u_long ts_lcdt_reduced; /* + # times local cdt reduced on an acknowledgement. + */ + + u_long ts_pkt_rcvd; /* from ip */ + u_long ts_tpdu_rcvd; /* accepted as a TPDU in tp_input */ + u_long ts_tpdu_sent; + u_long ts_unused2; + + u_long ts_retrans_cr; + u_long ts_retrans_cc; + u_long ts_retrans_dr; + u_long ts_retrans_dt; + u_long ts_retrans_xpd; + u_long ts_conn_gaveup; + + u_long ts_ER_sent; + u_long ts_DT_sent; + u_long ts_XPD_sent; + u_long ts_AK_sent; + u_long ts_XAK_sent; + u_long ts_DR_sent; + u_long ts_DC_sent; + u_long ts_CR_sent; + u_long ts_CC_sent; + + u_long ts_ER_rcvd; + u_long ts_DT_rcvd; + u_long ts_XPD_rcvd; + u_long ts_AK_rcvd; + u_long ts_XAK_rcvd; + u_long ts_DR_rcvd; + u_long ts_DC_rcvd; + u_long ts_CR_rcvd; + u_long ts_CC_rcvd; + + u_long ts_Eticks; + u_long ts_Eexpired; + u_long ts_Eset; + u_long ts_Ecan_act; + u_long ts_Cticks; + u_long ts_Cexpired; + u_long ts_Cset; + u_long ts_Ccan_act; + u_long ts_Ccan_inact; + u_long ts_Fdelack; + u_long ts_Fpruned; + + u_long ts_concat_rcvd; + + u_long ts_zdebug; /* zero dref to test timeout on conn estab tp_input.c */ + u_long ts_ydebug; /* throw away pseudo-random pkts tp_input.c */ + u_long ts_unused5; + u_long ts_unused; /* kludged concat to test separation tp_emit.c */ + u_long ts_vdebug; /* kludge to test input size checking tp_emit.c */ + u_long ts_unused4; + u_long ts_ldebug; /* faked a renegging of credit */ + + u_long ts_mb_small; + u_long ts_mb_cluster; + u_long ts_mb_len_distr[17]; + + u_long ts_eot_input; + u_long ts_eot_user; + u_long ts_EOT_sent; + u_long ts_tp0_conn; + u_long ts_tp4_conn; + u_long ts_quench; + u_long ts_rcvdecbit; + +#define NRTT_CATEGORIES 4 + /* The 4 categories are: + * 0 --> tp_flags: ~TPF_PEER_ON_SAMENET | TPF_NL_PDN + * 1 --> tp_flags: ~TPF_PEER_ON_SAMENET | ~TPF_NL_PDN + * 2 --> tp_flags: TPF_PEER_ON_SAMENET | ~TPF_NL_PDN + * 3 --> tp_flags: TPF_PEER_ON_SAMENET | TPF_NL_PDN + */ + int ts_rtt[NRTT_CATEGORIES]; + int ts_rtv[NRTT_CATEGORIES]; + + u_long ts_ackreason[_ACK_NUM_REASONS_]; + /* ACK_DONT 0 / ACK_STRAT_EACH 0x1 / ACK_STRAT_FULLWIN 0x4 + * ACK_DUP 0x8 / ACK_EOT 0x10 / ACK_REORDER 0x20 + * ACK_USRRCV ** + * ACK_FCC ** + */ +} tp_stat ; +#define TP_PM_MAX 0xa /* 10 decimal */ + +#define IncStat(x) tp_stat./**/x/**/++ + +#ifdef TP_PERF_MEAS + +#define PStat(Tpcb, X) (Tpcb)->tp_p_meas->/**/X/**/ +#define IncPStat(Tpcb, X) if((Tpcb)->tp_perf_on) (Tpcb)->tp_p_meas->/**/X/**/++ + +/* BEWARE OF MACROS like this ^^^ must be sure it's surrounded by {} if + * it's used in an if-else statement. + */ + + +/* for perf measurement stuff: maximum window size it can handle */ + +struct tp_pmeas { + /* the first few are distributions as a fn of window size + * only keep enough space for normal format plus 1 slot for + * extended format, in case any windows larger than 15 are used + */ + + /* + * tps_npdusent: for each call to tp_sbsend, we inc the + * element representing the number of pdus sent in this call + */ + int tps_win_lim_by_cdt[TP_PM_MAX+1]; + int tps_win_lim_by_data[TP_PM_MAX+1]; + /* + * tps_sendtime: Each call to tp_sbsend() is timed. For + * Each window size, we keep the running average of the time + * taken by tp_sbsend() for each window size. + */ + int tps_sendtime[TP_PM_MAX+1]; + /* + * n_TMsendack: # times ack sent because timer went off + * n_ack_cuz_eot: # times ack sent due to EOTSDU on incoming packet + * n_ack_cuz_dup: # times ack sent for receiving a duplicate pkt. + * n_ack_cuz_fullwin: # times ack sent for receiving the full window. + * n_ack_cuz_doack: # times ack sent for having just reordered data. + */ + int tps_n_TMsendack; + int tps_n_ack_cuz_eot; + int tps_n_ack_cuz_fullwin; + int tps_n_ack_cuz_reorder; + int tps_n_ack_cuz_dup; + int tps_n_ack_cuz_strat; + /* + * when we send an ack: how much less than the "expected" window + * did we actually ack. For example: if we last sent a credit + * of 10, and we're acking now for whatever reason, and have + * only received 6 since our last credit advertisement, we'll + * keep the difference, 4, in this variable. + */ + int tps_ack_early[TP_PM_MAX+1]; + /* + * when we ack, for the # pkts we actually acked w/ this ack, + * how much cdt are we advertising? + * [ size of window acknowledged ] [ cdt we're giving ] + */ + int tps_cdt_acked[TP_PM_MAX+1][TP_PM_MAX+1]; + + int tps_AK_sent; + int tps_XAK_sent; + int tps_DT_sent; + int tps_XPD_sent; + int tps_AK_rcvd; + int tps_XAK_rcvd; + int tps_DT_rcvd; + int tps_XPD_rcvd; + + int Nb_from_sess; + int Nb_to_sess; + int Nb_to_ll; + int Nb_from_ll; +}; + +#define IFPERF(tpcb) if (tpcb->tp_perf_on && tpcb->tp_p_meas) { +#define ENDPERF } + +#else + +int PStat_Junk; +#define PStat(tpcb, x) PStat_Junk +#define IncPStat(tpcb, x) /* no-op */ +#define tpmeas(a,b,c,d,e,f) 0 + +#define IFPERF(x) if (0) { +#define ENDPERF } + +#endif /* TP_PERF_MEAS */ + +#endif /* __TP_STAT__ */ diff --git a/bsd/netiso/tp_states.h b/bsd/netiso/tp_states.h new file mode 100644 index 000000000..69b5456f9 --- /dev/null +++ b/bsd/netiso/tp_states.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#define ST_ERROR 0x0 +#define TP_CLOSED 0x1 +#define TP_CRSENT 0x2 +#define TP_AKWAIT 0x3 +#define TP_OPEN 0x4 +#define TP_CLOSING 0x5 +#define TP_REFWAIT 0x6 +#define TP_LISTENING 0x7 +#define TP_CONFIRMING 0x8 + +#define tp_NSTATES 0x9 diff --git a/bsd/netiso/tp_subr.c b/bsd/netiso/tp_subr.c new file mode 100644 index 000000000..409f5dbf7 --- /dev/null +++ b/bsd/netiso/tp_subr.c @@ -0,0 +1,967 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_subr.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * The main work of data transfer is done here. + * These routines are called from tp.trans. + * They include the routines that check the validity of acks and Xacks, + * (tp_goodack() and tp_goodXack() ) + * take packets from socket buffers and send them (tp_send()), + * drop the data from the socket buffers (tp_sbdrop()), + * and put incoming packet data into socket buffers (tp_stash()). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int tp_emit(), tp_sbdrop(); +int tprexmtthresh = 3; +extern int ticks; +void tp_send(); + +/* + * CALLED FROM: + * tp.trans, when an XAK arrives + * FUNCTION and ARGUMENTS: + * Determines if the sequence number (seq) from the XAK + * acks anything new. If so, drop the appropriate tpdu + * from the XPD send queue. + * RETURN VALUE: + * Returns 1 if it did this, 0 if the ack caused no action. + */ +int +tp_goodXack(tpcb, seq) + struct tp_pcb *tpcb; + SeqNum seq; +{ + + IFTRACE(D_XPD) + tptraceTPCB(TPPTgotXack, + seq, tpcb->tp_Xuna, tpcb->tp_Xsndnxt, tpcb->tp_sndnew, + tpcb->tp_snduna); + ENDTRACE + + if ( seq == tpcb->tp_Xuna ) { + tpcb->tp_Xuna = tpcb->tp_Xsndnxt; + + /* DROP 1 packet from the Xsnd socket buf - just so happens + * that only one packet can be there at any time + * so drop the whole thing. If you allow > 1 packet + * the socket buffer, then you'll have to keep + * track of how many characters went w/ each XPD tpdu, so this + * will get messier + */ + IFDEBUG(D_XPD) + dump_mbuf(tpcb->tp_Xsnd.sb_mb, + "tp_goodXack Xsnd before sbdrop"); + ENDDEBUG + + IFTRACE(D_XPD) + tptraceTPCB(TPPTmisc, + "goodXack: dropping cc ", + (int)(tpcb->tp_Xsnd.sb_cc), + 0,0,0); + ENDTRACE + sbdroprecord(&tpcb->tp_Xsnd); + return 1; + } + return 0; +} + +/* + * CALLED FROM: + * tp_good_ack() + * FUNCTION and ARGUMENTS: + * updates + * smoothed average round trip time (*rtt) + * roundtrip time variance (*rtv) - actually deviation, not variance + * given the new value (diff) + * RETURN VALUE: + * void + */ + +void +tp_rtt_rtv(tpcb) +register struct tp_pcb *tpcb; +{ + int old = tpcb->tp_rtt; + int delta, elapsed = ticks - tpcb->tp_rttemit; + + if (tpcb->tp_rtt != 0) { + /* + * rtt is the smoothed round trip time in machine clock ticks (hz). + * It is stored as a fixed point number, unscaled (unlike the tcp + * srtt). The rationale here is that it is only significant to the + * nearest unit of slowtimo, which is at least 8 machine clock ticks + * so there is no need to scale. The smoothing is done according + * to the same formula as TCP (rtt = rtt*7/8 + measured_rtt/8). + */ + delta = elapsed - tpcb->tp_rtt; + if ((tpcb->tp_rtt += (delta >> TP_RTT_ALPHA)) <= 0) + tpcb->tp_rtt = 1; + /* + * rtv is a smoothed accumulated mean difference, unscaled + * for reasons expressed above. + * It is smoothed with an alpha of .75, and the round trip timer + * will be set to rtt + 4*rtv, also as TCP does. + */ + if (delta < 0) + delta = -delta; + if ((tpcb->tp_rtv += ((delta - tpcb->tp_rtv) >> TP_RTV_ALPHA)) <= 0) + tpcb->tp_rtv = 1; + } else { + /* + * No rtt measurement yet - use the unsmoothed rtt. + * Set the variance to half the rtt (so our first + * retransmit happens at 3*rtt) + */ + tpcb->tp_rtt = elapsed; + tpcb->tp_rtv = elapsed >> 1; + } + tpcb->tp_rttemit = 0; + tpcb->tp_rxtshift = 0; + /* + * Quoting TCP: "the retransmit should happen at rtt + 4 * rttvar. + * Because of the way we do the smoothing, srtt and rttvar + * will each average +1/2 tick of bias. When we compute + * the retransmit timer, we want 1/2 tick of rounding and + * 1 extra tick because of +-1/2 tick uncertainty in the + * firing of the timer. The bias will give us exactly the + * 1.5 tick we need. But, because the bias is + * statistical, we have to test that we don't drop below + * the minimum feasible timer (which is 2 ticks)." + */ + TP_RANGESET(tpcb->tp_dt_ticks, TP_REXMTVAL(tpcb), + tpcb->tp_peer_acktime, 128 /* XXX */); + IFDEBUG(D_RTT) + printf("%s tpcb 0x%x, elapsed %d, delta %d, rtt %d, rtv %d, old %d\n", + "tp_rtt_rtv:",tpcb,elapsed,delta,tpcb->tp_rtt,tpcb->tp_rtv,old); + ENDDEBUG + tpcb->tp_rxtcur = tpcb->tp_dt_ticks; +} + +/* + * CALLED FROM: + * tp.trans when an AK arrives + * FUNCTION and ARGUMENTS: + * Given (cdt), the credit from the AK tpdu, and + * (seq), the sequence number from the AK tpdu, + * tp_goodack() determines if the AK acknowledges something in the send + * window, and if so, drops the appropriate packets from the retransmission + * list, computes the round trip time, and updates the retransmission timer + * based on the new smoothed round trip time. + * RETURN VALUE: + * Returns 1 if + * EITHER it actually acked something heretofore unacknowledged + * OR no news but the credit should be processed. + * If something heretofore unacked was acked with this sequence number, + * the appropriate tpdus are dropped from the retransmission control list, + * by calling tp_sbdrop(). + * No need to see the tpdu itself. + */ +int +tp_goodack(tpcb, cdt, seq, subseq) + register struct tp_pcb *tpcb; + u_int cdt; + register SeqNum seq; + u_int subseq; +{ + int old_fcredit; + int bang = 0; /* bang --> ack for something heretofore unacked */ + u_int bytes_acked; + + IFDEBUG(D_ACKRECV) + printf("goodack tpcb 0x%x seq 0x%x cdt %d una 0x%x new 0x%x nxt 0x%x\n", + tpcb, seq, cdt, tpcb->tp_snduna, tpcb->tp_sndnew, tpcb->tp_sndnxt); + ENDDEBUG + IFTRACE(D_ACKRECV) + tptraceTPCB(TPPTgotack, + seq,cdt, tpcb->tp_snduna,tpcb->tp_sndnew,subseq); + ENDTRACE + + IFPERF(tpcb) + tpmeas(tpcb->tp_lref, TPtime_ack_rcvd, (struct timeval *)0, seq, 0, 0); + ENDPERF + + if (seq == tpcb->tp_snduna) { + if (subseq < tpcb->tp_r_subseq || + (subseq == tpcb->tp_r_subseq && cdt <= tpcb->tp_fcredit)) { + discard_the_ack: + IFDEBUG(D_ACKRECV) + printf("goodack discard : tpcb 0x%x subseq %d r_subseq %d\n", + tpcb, subseq, tpcb->tp_r_subseq); + ENDDEBUG + goto done; + } + if (cdt == tpcb->tp_fcredit /*&& thus subseq > tpcb->tp_r_subseq */) { + tpcb->tp_r_subseq = subseq; + if (tpcb->tp_timer[TM_data_retrans] == 0) + tpcb->tp_dupacks = 0; + else if (++tpcb->tp_dupacks == tprexmtthresh) { + /* partner went out of his way to signal with different + subsequences that he has the same lack of an expected + packet. This may be an early indiciation of a loss */ + + SeqNum onxt = tpcb->tp_sndnxt; + struct mbuf *onxt_m = tpcb->tp_sndnxt_m; + u_int win = min(tpcb->tp_fcredit, + tpcb->tp_cong_win / tpcb->tp_l_tpdusize) / 2; + IFDEBUG(D_ACKRECV) + printf("%s tpcb 0x%x seq 0x%x rttseq 0x%x onxt 0x%x\n", + "goodack dupacks:", tpcb, seq, tpcb->tp_rttseq, onxt); + ENDDEBUG + if (win < 2) + win = 2; + tpcb->tp_ssthresh = win * tpcb->tp_l_tpdusize; + tpcb->tp_timer[TM_data_retrans] = 0; + tpcb->tp_rttemit = 0; + tpcb->tp_sndnxt = tpcb->tp_snduna; + tpcb->tp_sndnxt_m = 0; + tpcb->tp_cong_win = tpcb->tp_l_tpdusize; + tp_send(tpcb); + tpcb->tp_cong_win = tpcb->tp_ssthresh + + tpcb->tp_dupacks * tpcb->tp_l_tpdusize; + if (SEQ_GT(tpcb, onxt, tpcb->tp_sndnxt)) { + tpcb->tp_sndnxt = onxt; + tpcb->tp_sndnxt_m = onxt_m; + } + + } else if (tpcb->tp_dupacks > tprexmtthresh) { + tpcb->tp_cong_win += tpcb->tp_l_tpdusize; + } + goto done; + } + } else if (SEQ_LT(tpcb, seq, tpcb->tp_snduna)) + goto discard_the_ack; + /* + * If the congestion window was inflated to account + * for the other side's cached packets, retract it. + */ + if (tpcb->tp_dupacks > tprexmtthresh && + tpcb->tp_cong_win > tpcb->tp_ssthresh) + tpcb->tp_cong_win = tpcb->tp_ssthresh; + tpcb->tp_r_subseq = subseq; + old_fcredit = tpcb->tp_fcredit; + tpcb->tp_fcredit = cdt; + if (cdt > tpcb->tp_maxfcredit) + tpcb->tp_maxfcredit = cdt; + tpcb->tp_dupacks = 0; + + if (IN_SWINDOW(tpcb, seq, tpcb->tp_snduna, tpcb->tp_sndnew)) { + + tpsbcheck(tpcb, 0); + bytes_acked = tp_sbdrop(tpcb, seq); + tpsbcheck(tpcb, 1); + /* + * If transmit timer is running and timed sequence + * number was acked, update smoothed round trip time. + * Since we now have an rtt measurement, cancel the + * timer backoff (cf., Phil Karn's retransmit alg.). + * Recompute the initial retransmit timer. + */ + if (tpcb->tp_rttemit && SEQ_GT(tpcb, seq, tpcb->tp_rttseq)) + tp_rtt_rtv(tpcb); + /* + * If all outstanding data is acked, stop retransmit timer. + * If there is more data to be acked, restart retransmit + * timer, using current (possibly backed-off) value. + * OSI combines the keepalive and persistance functions. + * So, there is no persistance timer per se, to restart. + */ + if (tpcb->tp_class != TP_CLASS_0) + tpcb->tp_timer[TM_data_retrans] = + (seq == tpcb->tp_sndnew) ? 0 : tpcb->tp_rxtcur; + /* + * When new data is acked, open the congestion window. + * If the window gives us less than ssthresh packets + * in flight, open exponentially (maxseg per packet). + * Otherwise open linearly: maxseg per window + * (maxseg^2 / cwnd per packet), plus a constant + * fraction of a packet (maxseg/8) to help larger windows + * open quickly enough. + */ + { + u_int cw = tpcb->tp_cong_win, incr = tpcb->tp_l_tpdusize; + + incr = min(incr, bytes_acked); + if (cw > tpcb->tp_ssthresh) + incr = incr * incr / cw + incr / 8; + tpcb->tp_cong_win = + min(cw + incr, tpcb->tp_sock->so_snd.sb_hiwat); + } + tpcb->tp_snduna = seq; + if (SEQ_LT(tpcb, tpcb->tp_sndnxt, seq)) { + tpcb->tp_sndnxt = seq; + tpcb->tp_sndnxt_m = 0; + } + bang++; + } + + if( cdt != 0 && old_fcredit == 0 ) { + tpcb->tp_sendfcc = 1; + } + if (cdt == 0) { + if (old_fcredit != 0) + IncStat(ts_zfcdt); + /* The following might mean that the window shrunk */ + if (tpcb->tp_timer[TM_data_retrans]) { + tpcb->tp_timer[TM_data_retrans] = 0; + tpcb->tp_timer[TM_sendack] = tpcb->tp_dt_ticks; + if (tpcb->tp_sndnxt != tpcb->tp_snduna) { + tpcb->tp_sndnxt = tpcb->tp_snduna; + tpcb->tp_sndnxt_m = 0; + } + } + } + tpcb->tp_fcredit = cdt; + bang |= (old_fcredit < cdt); + +done: + IFDEBUG(D_ACKRECV) + printf("goodack returns 0x%x, cdt 0x%x ocdt 0x%x cwin 0x%x\n", + bang, cdt, old_fcredit, tpcb->tp_cong_win); + ENDDEBUG + /* if (bang) XXXXX Very bad to remove this test, but somethings broken */ + tp_send(tpcb); + return (bang); +} + +/* + * CALLED FROM: + * tp_goodack() + * FUNCTION and ARGUMENTS: + * drops everything up TO but not INCLUDING seq # (seq) + * from the retransmission queue. + */ +tp_sbdrop(tpcb, seq) + register struct tp_pcb *tpcb; + SeqNum seq; +{ + struct sockbuf *sb = &tpcb->tp_sock->so_snd; + register int i = SEQ_SUB(tpcb, seq, tpcb->tp_snduna); + int oldcc = sb->sb_cc, oldi = i; + + if (i >= tpcb->tp_seqhalf) + printf("tp_spdropping too much -- should panic"); + while (i-- > 0) + sbdroprecord(sb); + IFDEBUG(D_ACKRECV) + printf("tp_sbdroping %d pkts %d bytes on %x at 0x%x\n", + oldi, oldcc - sb->sb_cc, tpcb, seq); + ENDDEBUG + if (sb->sb_flags & SB_NOTIFY) + sowwakeup(tpcb->tp_sock); + return (oldcc - sb->sb_cc); +} + +/* + * CALLED FROM: + * tp.trans on user send request, arrival of AK and arrival of XAK + * FUNCTION and ARGUMENTS: + * Emits tpdus starting at sequence number (tpcb->tp_sndnxt). + * Emits until a) runs out of data, or b) runs into an XPD mark, or + * c) it hits seq number (highseq) limited by cong or credit. + * + * If you want XPD to buffer > 1 du per socket buffer, you can + * modifiy this to issue XPD tpdus also, but then it'll have + * to take some argument(s) to distinguish between the type of DU to + * hand tp_emit. + * + * When something is sent for the first time, its time-of-send + * is stashed (in system clock ticks rather than pf_slowtimo ticks). + * When the ack arrives, the smoothed round-trip time is figured + * using this value. + */ +void +tp_send(tpcb) + register struct tp_pcb *tpcb; +{ + register int len; + register struct mbuf *m; + struct mbuf *mb = 0; + struct sockbuf *sb = &tpcb->tp_sock->so_snd; + unsigned int eotsdu = 0; + SeqNum highseq, checkseq; + int idle, idleticks, off, cong_win; +#ifdef TP_PERF_MEAS + int send_start_time = ticks; + SeqNum oldnxt = tpcb->tp_sndnxt; +#endif /* TP_PERF_MEAS */ + + idle = (tpcb->tp_snduna == tpcb->tp_sndnew); + if (idle) { + idleticks = tpcb->tp_inact_ticks - tpcb->tp_timer[TM_inact]; + if (idleticks > tpcb->tp_dt_ticks) + /* + * We have been idle for "a while" and no acks are + * expected to clock out any data we send -- + * slow start to get ack "clock" running again. + */ + tpcb->tp_cong_win = tpcb->tp_l_tpdusize; + } + + cong_win = tpcb->tp_cong_win; + highseq = SEQ(tpcb, tpcb->tp_fcredit + tpcb->tp_snduna); + if (tpcb->tp_Xsnd.sb_mb) + highseq = SEQ_MIN(tpcb, highseq, tpcb->tp_sndnew); + + IFDEBUG(D_DATA) + printf("tp_send enter tpcb 0x%x nxt 0x%x win %d high 0x%x\n", + tpcb, tpcb->tp_sndnxt, cong_win, highseq); + ENDDEBUG + IFTRACE(D_DATA) + tptraceTPCB( TPPTmisc, "tp_send sndnew snduna", + tpcb->tp_sndnew, tpcb->tp_snduna, 0, 0); + tptraceTPCB( TPPTmisc, "tp_send tpcb->tp_sndnxt win fcredit congwin", + tpcb->tp_sndnxt, cong_win, tpcb->tp_fcredit, tpcb->tp_cong_win); + ENDTRACE + IFTRACE(D_DATA) + tptraceTPCB( TPPTmisc, "tp_send 2 nxt high fcredit congwin", + tpcb->tp_sndnxt, highseq, tpcb->tp_fcredit, cong_win); + ENDTRACE + + if (tpcb->tp_sndnxt_m) + m = tpcb->tp_sndnxt_m; + else { + off = SEQ_SUB(tpcb, tpcb->tp_sndnxt, tpcb->tp_snduna); + for (m = sb->sb_mb; m && off > 0; m = m->m_next) + off--; + } +send: + /* + * Avoid silly window syndrome here . . . figure out how! + */ + checkseq = tpcb->tp_sndnum; + if (idle && SEQ_LT(tpcb, tpcb->tp_sndnum, highseq)) + checkseq = highseq; /* i.e. DON'T retain highest assigned packet */ + + while ((SEQ_LT(tpcb, tpcb->tp_sndnxt, highseq)) && m && cong_win > 0) { + + eotsdu = (m->m_flags & M_EOR) != 0; + len = m->m_pkthdr.len; + if (tpcb->tp_sndnxt == checkseq && eotsdu == 0 && + len < (tpcb->tp_l_tpdusize / 2)) + break; /* Nagle . . . . . */ + cong_win -= len; + /* make a copy - mb goes into the retransmission list + * while m gets emitted. m_copy won't copy a zero-length mbuf. + */ + mb = m; + m = m_copy(mb, 0, M_COPYALL); + if (m == MNULL) + break; + IFTRACE(D_STASH) + tptraceTPCB( TPPTmisc, + "tp_send mcopy nxt high eotsdu len", + tpcb->tp_sndnxt, highseq, eotsdu, len); + ENDTRACE + + IFDEBUG(D_DATA) + printf("tp_sending tpcb 0x%x nxt 0x%x\n", + tpcb, tpcb->tp_sndnxt); + ENDDEBUG + /* when headers are precomputed, may need to fill + in checksum here */ + if (tpcb->tp_sock->so_error = + tp_emit(DT_TPDU_type, tpcb, tpcb->tp_sndnxt, eotsdu, m)) { + /* error */ + break; + } + m = mb->m_nextpkt; + tpcb->tp_sndnxt_m = m; + if (tpcb->tp_sndnxt == tpcb->tp_sndnew) { + SEQ_INC(tpcb, tpcb->tp_sndnew); + /* + * Time this transmission if not a retransmission and + * not currently timing anything. + */ + if (tpcb->tp_rttemit == 0) { + tpcb->tp_rttemit = ticks; + tpcb->tp_rttseq = tpcb->tp_sndnxt; + } + tpcb->tp_sndnxt = tpcb->tp_sndnew; + } else + SEQ_INC(tpcb, tpcb->tp_sndnxt); + /* + * Set retransmit timer if not currently set. + * Initial value for retransmit timer is smoothed + * round-trip time + 2 * round-trip time variance. + * Initialize shift counter which is used for backoff + * of retransmit time. + */ + if (tpcb->tp_timer[TM_data_retrans] == 0 && + tpcb->tp_class != TP_CLASS_0) { + tpcb->tp_timer[TM_data_retrans] = tpcb->tp_dt_ticks; + tpcb->tp_timer[TM_sendack] = tpcb->tp_keepalive_ticks; + tpcb->tp_rxtshift = 0; + } + } + if (SEQ_GT(tpcb, tpcb->tp_sndnew, tpcb->tp_sndnum)) + tpcb->tp_oktonagle = 0; +#ifdef TP_PERF_MEAS + IFPERF(tpcb) + { + register int npkts; + int elapsed = ticks - send_start_time, *t; + struct timeval now; + + npkts = SEQ_SUB(tpcb, tpcb->tp_sndnxt, oldnxt); + + if (npkts > 0) + tpcb->tp_Nwindow++; + + if (npkts > TP_PM_MAX) + npkts = TP_PM_MAX; + + t = &(tpcb->tp_p_meas->tps_sendtime[npkts]); + *t += (t - elapsed) >> TP_RTT_ALPHA; + + if (mb == 0) { + IncPStat(tpcb, tps_win_lim_by_data[npkts] ); + } else { + IncPStat(tpcb, tps_win_lim_by_cdt[npkts] ); + /* not true with congestion-window being used */ + } + now.tv_sec = elapsed / hz; + now.tv_usec = (elapsed - (hz * now.tv_sec)) * 1000000 / hz; + tpmeas( tpcb->tp_lref, + TPsbsend, &elapsed, newseq, tpcb->tp_Nwindow, npkts); + } + ENDPERF +#endif /* TP_PERF_MEAS */ + + + IFTRACE(D_DATA) + tptraceTPCB( TPPTmisc, + "tp_send at end: new nxt eotsdu error", + tpcb->tp_sndnew, tpcb->tp_sndnxt, eotsdu, tpcb->tp_sock->so_error); + + ENDTRACE +} + +int TPNagleok; +int TPNagled; + +tp_packetize(tpcb, m, eotsdu) +register struct tp_pcb *tpcb; +register struct mbuf *m; +int eotsdu; +{ + register struct mbuf *n; + register struct sockbuf *sb = &tpcb->tp_sock->so_snd; + int maxsize = tpcb->tp_l_tpdusize + - tp_headersize(DT_TPDU_type, tpcb) + - (tpcb->tp_use_checksum?4:0) ; + int totlen = m->m_pkthdr.len; + struct mbuf *m_split(); + /* + * Pre-packetize the data in the sockbuf + * according to negotiated mtu. Do it here + * where we can safely wait for mbufs. + * + * This presumes knowledge of sockbuf conventions. + * TODO: allocate space for header and fill it in (once!). + */ + IFDEBUG(D_DATA) + printf("SEND BF: maxsize %d totlen %d eotsdu %d sndnum 0x%x\n", + maxsize, totlen, eotsdu, tpcb->tp_sndnum); + ENDTRACE + if (tpcb->tp_oktonagle) { + if ((n = sb->sb_mb) == 0) + panic("tp_packetize"); + while (n->m_act) + n = n->m_act; + if (n->m_flags & M_EOR) + panic("tp_packetize 2"); + SEQ_INC(tpcb, tpcb->tp_sndnum); + if (totlen + n->m_pkthdr.len < maxsize) { + /* There is an unsent packet with space, combine data */ + struct mbuf *old_n = n; + tpsbcheck(tpcb,3); + n->m_pkthdr.len += totlen; + while (n->m_next) + n = n->m_next; + sbcompress(sb, m, n); + tpsbcheck(tpcb,4); + n = old_n; + TPNagled++; + goto out; + } + } + while (m) { + n = m; + if (totlen > maxsize) { + if ((m = m_split(n, maxsize, M_WAIT)) == 0) + panic("tp_packetize"); + } else + m = 0; + totlen -= maxsize; + tpsbcheck(tpcb, 5); + sbappendrecord(sb, n); + tpsbcheck(tpcb, 6); + SEQ_INC(tpcb, tpcb->tp_sndnum); + } +out: + if (eotsdu) { + n->m_flags |= M_EOR; /* XXX belongs at end */ + tpcb->tp_oktonagle = 0; + } else { + SEQ_DEC(tpcb, tpcb->tp_sndnum); + tpcb->tp_oktonagle = 1; + TPNagleok++; + } + IFDEBUG(D_DATA) + printf("SEND out: oktonagle %d sndnum 0x%x\n", + tpcb->tp_oktonagle, tpcb->tp_sndnum); + ENDTRACE + return 0; +} + + +/* + * NAME: tp_stash() + * CALLED FROM: + * tp.trans on arrival of a DT tpdu + * FUNCTION, ARGUMENTS, and RETURN VALUE: + * Returns 1 if + * a) something new arrived and it's got eotsdu_reached bit on, + * b) this arrival was caused other out-of-sequence things to be + * accepted, or + * c) this arrival is the highest seq # for which we last gave credit + * (sender just sent a whole window) + * In other words, returns 1 if tp should send an ack immediately, 0 if + * the ack can wait a while. + * + * Note: this implementation no longer renegs on credit, (except + * when debugging option D_RENEG is on, for the purpose of testing + * ack subsequencing), so we don't need to check for incoming tpdus + * being in a reneged portion of the window. + */ + +tp_stash(tpcb, e) + register struct tp_pcb *tpcb; + register struct tp_event *e; +{ + register int ack_reason= tpcb->tp_ack_strat & ACK_STRAT_EACH; + /* 0--> delay acks until full window */ + /* 1--> ack each tpdu */ +#ifndef lint +#define E e->ATTR(DT_TPDU) +#else /* lint */ +#define E e->ev_union.EV_DT_TPDU +#endif /* lint */ + + if ( E.e_eot ) { + register struct mbuf *n = E.e_data; + n->m_flags |= M_EOR; + n->m_act = 0; + } + IFDEBUG(D_STASH) + dump_mbuf(tpcb->tp_sock->so_rcv.sb_mb, + "stash: so_rcv before appending"); + dump_mbuf(E.e_data, + "stash: e_data before appending"); + ENDDEBUG + + IFPERF(tpcb) + PStat(tpcb, Nb_from_ll) += E.e_datalen; + tpmeas(tpcb->tp_lref, TPtime_from_ll, &e->e_time, + E.e_seq, (u_int)PStat(tpcb, Nb_from_ll), (u_int)E.e_datalen); + ENDPERF + + if (E.e_seq == tpcb->tp_rcvnxt) { + + IFDEBUG(D_STASH) + printf("stash EQ: seq 0x%x datalen 0x%x eot 0x%x\n", + E.e_seq, E.e_datalen, E.e_eot); + ENDDEBUG + + IFTRACE(D_STASH) + tptraceTPCB(TPPTmisc, "stash EQ: seq len eot", + E.e_seq, E.e_datalen, E.e_eot, 0); + ENDTRACE + + SET_DELACK(tpcb); + + sbappend(&tpcb->tp_sock->so_rcv, E.e_data); + + SEQ_INC( tpcb, tpcb->tp_rcvnxt ); + /* + * move chains from the reassembly queue to the socket buffer + */ + if (tpcb->tp_rsycnt) { + register struct mbuf **mp; + struct mbuf **mplim; + + mp = tpcb->tp_rsyq + (tpcb->tp_rcvnxt % tpcb->tp_maxlcredit); + mplim = tpcb->tp_rsyq + tpcb->tp_maxlcredit; + + while (tpcb->tp_rsycnt && *mp) { + sbappend(&tpcb->tp_sock->so_rcv, *mp); + tpcb->tp_rsycnt--; + *mp = 0; + SEQ_INC(tpcb, tpcb->tp_rcvnxt); + ack_reason |= ACK_REORDER; + if (++mp == mplim) + mp = tpcb->tp_rsyq; + } + } + IFDEBUG(D_STASH) + dump_mbuf(tpcb->tp_sock->so_rcv.sb_mb, + "stash: so_rcv after appending"); + ENDDEBUG + + } else { + register struct mbuf **mp; + SeqNum uwe; + + IFTRACE(D_STASH) + tptraceTPCB(TPPTmisc, "stash Reseq: seq rcvnxt lcdt", + E.e_seq, tpcb->tp_rcvnxt, tpcb->tp_lcredit, 0); + ENDTRACE + + if (tpcb->tp_rsyq == 0) + tp_rsyset(tpcb); + uwe = SEQ(tpcb, tpcb->tp_rcvnxt + tpcb->tp_maxlcredit); + if (tpcb->tp_rsyq == 0 || + !IN_RWINDOW(tpcb, E.e_seq, tpcb->tp_rcvnxt, uwe)) { + ack_reason = ACK_DONT; + m_freem(E.e_data); + } else if (*(mp = tpcb->tp_rsyq + (E.e_seq % tpcb->tp_maxlcredit))) { + IFDEBUG(D_STASH) + printf("tp_stash - drop & ack\n"); + ENDDEBUG + + /* retransmission - drop it and force an ack */ + IncStat(ts_dt_dup); + IFPERF(tpcb) + IncPStat(tpcb, tps_n_ack_cuz_dup); + ENDPERF + + m_freem(E.e_data); + ack_reason |= ACK_DUP; + } else { + *mp = E.e_data; + tpcb->tp_rsycnt++; + ack_reason = ACK_DONT; + } + } + /* there were some comments of historical interest here. */ + { + LOCAL_CREDIT(tpcb); + + if ( E.e_seq == tpcb->tp_sent_uwe ) + ack_reason |= ACK_STRAT_FULLWIN; + + IFTRACE(D_STASH) + tptraceTPCB(TPPTmisc, + "end of stash, eot, ack_reason, sent_uwe ", + E.e_eot, ack_reason, tpcb->tp_sent_uwe, 0); + ENDTRACE + + if ( ack_reason == ACK_DONT ) { + IncStat( ts_ackreason[ACK_DONT] ); + return 0; + } else { + IFPERF(tpcb) + if(ack_reason & ACK_STRAT_EACH) { + IncPStat(tpcb, tps_n_ack_cuz_strat); + } else if(ack_reason & ACK_STRAT_FULLWIN) { + IncPStat(tpcb, tps_n_ack_cuz_fullwin); + } else if(ack_reason & ACK_REORDER) { + IncPStat(tpcb, tps_n_ack_cuz_reorder); + } + tpmeas(tpcb->tp_lref, TPtime_ack_sent, 0, + SEQ_ADD(tpcb, E.e_seq, 1), 0, 0); + ENDPERF + { + register int i; + + /* keep track of all reasons that apply */ + for( i=1; i<_ACK_NUM_REASONS_ ;i++) { + if( ack_reason & (1<tp_rsycnt) { + for (mp == tpcb->tp_rsyq + tpcb->tp_maxlcredit; + --mp >= tpcb->tp_rsyq; ) + if (*mp) { + tpcb->tp_rsycnt--; + m_freem(*mp); + } + if (tpcb->tp_rsycnt) { + printf("tp_rsyflush %x\n", tpcb); + tpcb->tp_rsycnt = 0; + } + } + FREE((caddr_t)tpcb->tp_rsyq, M_PCB); + tpcb->tp_rsyq = 0; +} + +tp_rsyset(tpcb) +register struct tp_pcb *tpcb; +{ + register struct socket *so = tpcb->tp_sock; + int maxcredit = tpcb->tp_xtd_format ? 0xffff : 0xf; + int old_credit = tpcb->tp_maxlcredit; + caddr_t rsyq; + + tpcb->tp_maxlcredit = maxcredit = min(maxcredit, + (so->so_rcv.sb_hiwat + tpcb->tp_l_tpdusize)/ tpcb->tp_l_tpdusize); + + if (old_credit == tpcb->tp_maxlcredit && tpcb->tp_rsyq != 0) + return; + maxcredit *= sizeof(struct mbuf *); + if (tpcb->tp_rsyq) + tp_rsyflush(tpcb); +// if (rsyq = (caddr_t)malloc(maxcredit, M_PCB, M_NOWAIT)) + MALLOC(rsyq, caddr_t, maxcredit, M_PCB, M_NOWAIT); + if (rsyq) + bzero(rsyq, maxcredit); + tpcb->tp_rsyq = (struct mbuf **)rsyq; +} + +tpsbcheck(tpcb, i) +struct tp_pcb *tpcb; +{ + register struct mbuf *n, *m; + register int len = 0, mbcnt = 0, pktlen; + struct sockbuf *sb = &tpcb->tp_sock->so_snd; + + for (n = sb->sb_mb; n; n = n->m_nextpkt) { + if ((n->m_flags & M_PKTHDR) == 0) + panic("tpsbcheck nohdr"); + pktlen = len + n->m_pkthdr.len; + for (m = n; m; m = m->m_next) { + len += m->m_len; + mbcnt += MSIZE; + if (m->m_flags & M_EXT) + mbcnt += m->m_ext.ext_size; + } + if (len != pktlen) { + printf("test %d; len %d != pktlen %d on mbuf 0x%x\n", + i, len, pktlen, n); + panic("tpsbcheck short"); + } + } + if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { + printf("test %d: cc %d != %d || mbcnt %d != %d\n", i, len, sb->sb_cc, + mbcnt, sb->sb_mbcnt); + panic("tpsbcheck"); + } +} diff --git a/bsd/netiso/tp_subr2.c b/bsd/netiso/tp_subr2.c new file mode 100644 index 000000000..755552db5 --- /dev/null +++ b/bsd/netiso/tp_subr2.c @@ -0,0 +1,898 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_subr2.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * Some auxiliary routines: + * tp_protocol_error: required by xebec- called when a combo of state, + * event, predicate isn't covered for by the transition file. + * tp_indicate: gives indications(signals) to the user process + * tp_getoptions: initializes variables that are affected by the options + * chosen. + */ + +/* this def'n is to cause the expansion of this macro in the + * routine tp_local_credit : + */ +#define LOCAL_CREDIT_EXPAND + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef MNULL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#ifdef TRUE +#undef FALSE +#undef TRUE +#endif +#include +#include +#include + +void tp_rsyset(); + +/* + * NAME: tp_local_credit() + * + * CALLED FROM: + * tp_emit(), tp_usrreq() + * + * FUNCTION and ARGUMENTS: + * Computes the local credit and stashes it in tpcb->tp_lcredit. + * It's a macro in the production system rather than a procdure. + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: + * This doesn't actually get called in a production system - + * the macro gets expanded instead in place of calls to this proc. + * But for debugging, we call this and that allows us to add + * debugging messages easily here. + */ +void +tp_local_credit(tpcb) + struct tp_pcb *tpcb; +{ + LOCAL_CREDIT(tpcb); + IFDEBUG(D_CREDIT) + printf("ref 0x%x lcdt 0x%x l_tpdusize 0x%x decbit 0x%x\n", + tpcb->tp_lref, + tpcb->tp_lcredit, + tpcb->tp_l_tpdusize, + tpcb->tp_decbit, + tpcb->tp_cong_win + ); + ENDDEBUG + IFTRACE(D_CREDIT) + tptraceTPCB(TPPTmisc, + "lcdt tpdusz \n", + tpcb->tp_lcredit, tpcb->tp_l_tpdusize, 0, 0); + ENDTRACE +} + +/* + * NAME: tp_protocol_error() + * + * CALLED FROM: + * tp_driver(), when it doesn't know what to do with + * a combo of event, state, predicate + * + * FUNCTION and ARGUMENTS: + * print error mesg + * + * RETURN VALUE: + * EIO - always + * + * SIDE EFFECTS: + * + * NOTES: + */ +int +tp_protocol_error(e,tpcb) + struct tp_event *e; + struct tp_pcb *tpcb; +{ + printf("TP PROTOCOL ERROR! tpcb 0x%x event 0x%x, state 0x%x\n", + tpcb, e->ev_number, tpcb->tp_state); + IFTRACE(D_DRIVER) + tptraceTPCB(TPPTmisc, "PROTOCOL ERROR tpcb event state", + tpcb, e->ev_number, tpcb->tp_state, 0 ); + ENDTRACE + return EIO; /* for lack of anything better */ +} + + +/* Not used at the moment */ +ProtoHook +tp_drain() +{ + return 0; +} + + +/* + * NAME: tp_indicate() + * + * CALLED FROM: + * tp.trans when XPD arrive, when a connection is being disconnected by + * the arrival of a DR or ER, and when a connection times out. + * + * FUNCTION and ARGUMENTS: + * (ind) is the type of indication : T_DISCONNECT, T_XPD + * (error) is an E* value that will be put in the socket structure + * to be passed along to the user later. + * Gives a SIGURG to the user process or group indicated by the socket + * attached to the tpcb. + * + * RETURNS: Rien + * + * SIDE EFFECTS: + * + * NOTES: + */ +void +tp_indicate(ind, tpcb, error) + int ind; + u_short error; + register struct tp_pcb *tpcb; +{ + register struct socket *so = tpcb->tp_sock; + IFTRACE(D_INDICATION) + tptraceTPCB(TPPTindicate, ind, *(u_short *)(tpcb->tp_lsuffix), + *(u_short *)(tpcb->tp_fsuffix), error,so->so_pgid); + ENDTRACE + IFDEBUG(D_INDICATION) + char *ls, *fs; + ls = tpcb->tp_lsuffix, + fs = tpcb->tp_fsuffix, + + printf( +"indicate 0x%x lsuf 0x%02x%02x fsuf 0x%02x%02x err 0x%x noind 0x%x ref 0x%x\n", + ind, + *ls, *(ls+1), *fs, *(fs+1), + error, /*so->so_pgrp,*/ + tpcb->tp_no_disc_indications, + tpcb->tp_lref); + ENDDEBUG + + if (ind == ER_TPDU) { + register struct mbuf *m; + struct tp_disc_reason x; + + if ((so->so_state & SS_CANTRCVMORE) == 0 && + (m = m_get(M_DONTWAIT, MT_OOBDATA)) != 0) { + + x.dr_hdr.cmsg_len = m->m_len = sizeof(x); + x.dr_hdr.cmsg_level = SOL_TRANSPORT; + x.dr_hdr.cmsg_type= TPOPT_DISC_REASON; + x.dr_reason = error; + *mtod(m, struct tp_disc_reason *) = x; + sbappendrecord(&tpcb->tp_Xrcv, m); + error = 0; + } else + error = ECONNRESET; + } + so->so_error = error; + + if (ind == T_DISCONNECT) { + if (error == 0) + so->so_error = ENOTCONN; + if ( tpcb->tp_no_disc_indications ) + return; + } + IFTRACE(D_INDICATION) + tptraceTPCB(TPPTmisc, "doing sohasoutofband(so)", so,0,0,0); + ENDTRACE + sohasoutofband(so); +} + +/* + * NAME : tp_getoptions() + * + * CALLED FROM: + * tp.trans whenever we go into OPEN state + * + * FUNCTION and ARGUMENTS: + * sets the proper flags and values in the tpcb, to control + * the appropriate actions for the given class, options, + * sequence space, etc, etc. + * + * RETURNS: Nada + * + * SIDE EFFECTS: + * + * NOTES: + */ +void +tp_getoptions(tpcb) +struct tp_pcb *tpcb; +{ + tpcb->tp_seqmask = + tpcb->tp_xtd_format ? TP_XTD_FMT_MASK : TP_NML_FMT_MASK ; + tpcb->tp_seqbit = + tpcb->tp_xtd_format ? TP_XTD_FMT_BIT : TP_NML_FMT_BIT ; + tpcb->tp_seqhalf = tpcb->tp_seqbit >> 1; + tpcb->tp_dt_ticks = + max(tpcb->tp_dt_ticks, (tpcb->tp_peer_acktime + 2)); + tp_rsyset(tpcb); + +} + +/* + * NAME: tp_recycle_tsuffix() + * + * CALLED FROM: + * Called when a ref is frozen. + * + * FUNCTION and ARGUMENTS: + * allows the suffix to be reused. + * + * RETURNS: zilch + * + * SIDE EFFECTS: + * + * NOTES: + */ +void +tp_recycle_tsuffix(tpcb) + struct tp_pcb *tpcb; +{ + bzero((caddr_t)tpcb->tp_lsuffix, sizeof( tpcb->tp_lsuffix)); + bzero((caddr_t)tpcb->tp_fsuffix, sizeof( tpcb->tp_fsuffix)); + tpcb->tp_fsuffixlen = tpcb->tp_lsuffixlen = 0; + + (tpcb->tp_nlproto->nlp_recycle_suffix)(tpcb->tp_npcb); +} + +/* + * NAME: tp_quench() + * + * CALLED FROM: + * tp{af}_quench() when ICMP source quench or similar thing arrives. + * + * FUNCTION and ARGUMENTS: + * Drop the congestion window back to 1. + * Congestion window scheme: + * Initial value is 1. ("slow start" as Nagle, et. al. call it) + * For each good ack that arrives, the congestion window is increased + * by 1 (up to max size of logical infinity, which is to say, + * it doesn't wrap around). + * Source quench causes it to drop back to 1. + * tp_send() uses the smaller of (regular window, congestion window). + * One retransmission strategy option is to have any retransmission + * cause reset the congestion window back to 1. + * + * (cmd) is either PRC_QUENCH: source quench, or + * PRC_QUENCH2: dest. quench (dec bit) + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: + */ +void +tp_quench( tpcb, cmd ) + struct tp_pcb *tpcb; + int cmd; +{ + IFDEBUG(D_QUENCH) + printf("tp_quench tpcb 0x%x ref 0x%x sufx 0x%x\n", + tpcb, tpcb->tp_lref, *(u_short *)(tpcb->tp_lsuffix)); + printf("cong_win 0x%x decbit 0x%x \n", + tpcb->tp_cong_win, tpcb->tp_decbit); + ENDDEBUG + switch(cmd) { + case PRC_QUENCH: + tpcb->tp_cong_win = tpcb->tp_l_tpdusize; + IncStat(ts_quench); + break; + case PRC_QUENCH2: + tpcb->tp_cong_win = tpcb->tp_l_tpdusize; /* might as well quench source also */ + tpcb->tp_decbit = TP_DECBIT_CLEAR_COUNT; + IncStat(ts_rcvdecbit); + break; + } +} + + +/* + * NAME: tp_netcmd() + * + * CALLED FROM: + * + * FUNCTION and ARGUMENTS: + * + * RETURNS: + * + * SIDE EFFECTS: + * + * NOTES: + */ +tp_netcmd( tpcb, cmd ) + struct tp_pcb *tpcb; + int cmd; +{ +#if TPCONS + struct isopcb *isop; + struct pklcd *lcp; + + if (tpcb->tp_netservice != ISO_CONS) + return; + isop = (struct isopcb *)tpcb->tp_npcb; + lcp = (struct pklcd *)isop->isop_chan; + switch (cmd) { + + case CONN_CLOSE: + case CONN_REFUSE: + if (isop->isop_refcnt == 1) { + /* This is really superfluous, since it would happen + anyway in iso_pcbdetach, although it is a courtesy + to free up the x.25 channel before the refwait timer + expires. */ + lcp->lcd_upper = 0; + lcp->lcd_upnext = 0; + pk_disconnect(lcp); + isop->isop_chan = 0; + isop->isop_refcnt = 0; + } + break; + + default: + printf("tp_netcmd(0x%x, 0x%x) NOT IMPLEMENTED\n", tpcb, cmd); + break; + } +#else /* TPCONS */ + printf("tp_netcmd(): X25 NOT CONFIGURED!!\n"); +#endif +} +/* + * CALLED FROM: + * tp_ctloutput() and tp_emit() + * FUNCTION and ARGUMENTS: + * Convert a class mask to the highest numeric value it represents. + */ + +int +tp_mask_to_num(x) + u_char x; +{ + register int j; + + for(j = 4; j>=0 ;j--) { + if(x & (1<p_tpdusize = src->p_tpdusize; + dst->p_ack_strat = src->p_ack_strat; + dst->p_rx_strat = src->p_rx_strat; +#undef COPYSIZE +} +/* + * Determine a reasonable value for maxseg size. + * If the route is known, check route for mtu. + * We also initialize the congestion/slow start + * window to be a single segment if the destination isn't local. + * While looking at the routing entry, we also initialize other path-dependent + * parameters from pre-set or cached values in the routing entry. + */ +void +tp_mss(tpcb, nhdr_size) + register struct tp_pcb *tpcb; + int nhdr_size; +{ + register struct rtentry *rt; + struct ifnet *ifp; + register int rtt, mss; + u_long bufsize; + int i, ssthresh = 0, rt_mss; + struct socket *so; + + if (tpcb->tp_ptpdusize) + mss = tpcb->tp_ptpdusize << 7; + else + mss = 1 << tpcb->tp_tpdusize; + so = tpcb->tp_sock; + if ((rt = *(tpcb->tp_routep)) == 0) { + bufsize = so->so_rcv.sb_hiwat; + goto punt_route; + } + ifp = rt->rt_ifp; + +#ifdef RTV_MTU /* if route characteristics exist ... */ + /* + * While we're here, check if there's an initial rtt + * or rttvar. Convert from the route-table units + * to hz ticks for the smoothed timers and slow-timeout units + * for other inital variables. + */ + if (tpcb->tp_rtt == 0 && (rtt = rt->rt_rmx.rmx_rtt)) { + tpcb->tp_rtt = rtt * hz / RTM_RTTUNIT; + if (rt->rt_rmx.rmx_rttvar) + tpcb->tp_rtv = rt->rt_rmx.rmx_rttvar + * hz / RTM_RTTUNIT; + else + tpcb->tp_rtv = tpcb->tp_rtt; + } + /* + * if there's an mtu associated with the route, use it + */ + if (rt->rt_rmx.rmx_mtu) + rt_mss = rt->rt_rmx.rmx_mtu - nhdr_size; + else +#endif /* RTV_MTU */ + rt_mss = (ifp->if_mtu - nhdr_size); + if (tpcb->tp_ptpdusize == 0 || /* assume application doesn't care */ + mss > rt_mss /* network won't support what was asked for */) + mss = rt_mss; + /* can propose mtu which are multiples of 128 */ + mss &= ~0x7f; + /* + * If there's a pipesize, change the socket buffer + * to that size. + */ +#ifdef RTV_SPIPE + if ((bufsize = rt->rt_rmx.rmx_sendpipe) > 0) { +#endif + bufsize = min(bufsize, so->so_snd.sb_hiwat); + (void) sbreserve(&so->so_snd, bufsize); + } +#ifdef RTV_SPIPE + if ((bufsize = rt->rt_rmx.rmx_recvpipe) > 0) { +#endif + bufsize = min(bufsize, so->so_rcv.sb_hiwat); + (void) sbreserve(&so->so_rcv, bufsize); + } else + bufsize = so->so_rcv.sb_hiwat; +#ifdef RTV_SSTHRESH + /* + * There's some sort of gateway or interface + * buffer limit on the path. Use this to set + * the slow start threshhold, but set the + * threshold to no less than 2*mss. + */ + ssthresh = rt->rt_rmx.rmx_ssthresh; +punt_route: + /* + * The current mss is initialized to the default value. + * If we compute a smaller value, reduce the current mss. + * If we compute a larger value, return it for use in sending + * a max seg size option. + * If we received an offer, don't exceed it. + * However, do not accept offers under 128 bytes. + */ + if (tpcb->tp_l_tpdusize) + mss = min(mss, tpcb->tp_l_tpdusize); + /* + * We want a minimum recv window of 4 packets to + * signal packet loss by duplicate acks. + */ + mss = min(mss, bufsize >> 2) & ~0x7f; + mss = max(mss, 128); /* sanity */ + tpcb->tp_cong_win = + (rt == 0 || (rt->rt_flags & RTF_GATEWAY)) ? mss : bufsize; + tpcb->tp_l_tpdusize = mss; + tp_rsyset(tpcb); + tpcb->tp_ssthresh = max(2 * mss, ssthresh); + /* Calculate log2 of mss */ + for (i = TP_MIN_TPDUSIZE + 1; i <= TP_MAX_TPDUSIZE; i++) + if ((1 << i) > mss) + break; + i--; + tpcb->tp_tpdusize = i; +#endif /* RTV_MTU */ +} + +/* + * CALLED FROM: + * tp_usrreq on PRU_CONNECT and tp_input on receipt of CR + * + * FUNCTION and ARGUMENTS: + * -- An mbuf containing the peer's network address. + * -- Our control block, which will be modified + * -- In the case of cons, a control block for that layer. + * + * + * RETURNS: + * errno value : + * EAFNOSUPPORT if can't find an nl_protosw for x.25 (really could panic) + * ECONNREFUSED if trying to run TP0 with non-type 37 address + * possibly other E* returned from cons_netcmd() + * + * SIDE EFFECTS: + * Determines recommended tpdusize, buffering and intial delays + * based on information cached on the route. + */ +int +tp_route_to( m, tpcb, channel) + struct mbuf *m; + register struct tp_pcb *tpcb; + caddr_t channel; +{ + register struct sockaddr_iso *siso; /* NOTE: this may be a sockaddr_in */ + extern struct tp_conn_param tp_conn_param[]; + int error = 0, save_netservice = tpcb->tp_netservice; + register struct rtentry *rt = 0; + int nhdr_size, mtu, bufsize; + + siso = mtod(m, struct sockaddr_iso *); + IFTRACE(D_CONN) + tptraceTPCB(TPPTmisc, + "route_to: so afi netservice class", + tpcb->tp_sock, siso->siso_addr.isoa_genaddr[0], tpcb->tp_netservice, + tpcb->tp_class); + ENDTRACE + IFDEBUG(D_CONN) + printf("tp_route_to( m x%x, channel 0x%x, tpcb 0x%x netserv 0x%x)\n", + m, channel, tpcb, tpcb->tp_netservice); + printf("m->mlen x%x, m->m_data:\n", m->m_len); + dump_buf(mtod(m, caddr_t), m->m_len); + ENDDEBUG + if (channel) { +#if TPCONS + struct pklcd *lcp = (struct pklcd *)channel; + struct isopcb *isop = (struct isopcb *)lcp->lcd_upnext, + *isop_new = (struct isopcb *)tpcb->tp_npcb; + /* The next 2 lines believe that you haven't + set any network level options or done a pcbconnect + and XXXXXXX'edly apply to both inpcb's and isopcb's */ + remque(isop_new); + FREE(isop_new, M_PCB); + tpcb->tp_npcb = (caddr_t)isop; + tpcb->tp_netservice = ISO_CONS; + tpcb->tp_nlproto = nl_protosw + ISO_CONS; + if (isop->isop_refcnt++ == 0) { + iso_putsufx(isop, tpcb->tp_lsuffix, tpcb->tp_lsuffixlen, TP_LOCAL); + isop->isop_socket = tpcb->tp_sock; + } else + /* there are already connections sharing this */; +#endif + } else { + switch (siso->siso_family) { + default: + error = EAFNOSUPPORT; + goto done; +#if ISO + case AF_ISO: + { + struct isopcb *isop = (struct isopcb *)tpcb->tp_npcb; + int flags = tpcb->tp_sock->so_options & SO_DONTROUTE; + tpcb->tp_netservice = ISO_CLNS; + if (clnp_route(&siso->siso_addr, &isop->isop_route, + flags, (void **)0, (void **)0) == 0) { + rt = isop->isop_route.ro_rt; + if (rt && rt->rt_flags & RTF_PROTO1) + tpcb->tp_netservice = ISO_CONS; + } + } break; +#endif +#if INET + case AF_INET: + tpcb->tp_netservice = IN_CLNS; +#endif + } + if (tpcb->tp_nlproto->nlp_afamily != siso->siso_family) { + IFDEBUG(D_CONN) + printf("tp_route_to( CHANGING nlproto old 0x%x new 0x%x)\n", + save_netservice, tpcb->tp_netservice); + ENDDEBUG + if (error = tp_set_npcb(tpcb)) + goto done; + } + IFDEBUG(D_CONN) + printf("tp_route_to calling nlp_pcbconn, netserv %d\n", + tpcb->tp_netservice); + ENDDEBUG + tpcb->tp_nlproto = nl_protosw + tpcb->tp_netservice; + error = (tpcb->tp_nlproto->nlp_pcbconn)(tpcb->tp_npcb, m); + } + if (error) + goto done; + nhdr_size = tpcb->tp_nlproto->nlp_mtu(tpcb); /* only gets common info */ + tp_mss(tpcb, nhdr_size); +done: + IFDEBUG(D_CONN) + printf("tp_route_to returns 0x%x\n", error); + ENDDEBUG + IFTRACE(D_CONN) + tptraceTPCB(TPPTmisc, "route_to: returns: error netserv class", error, + tpcb->tp_netservice, tpcb->tp_class, 0); + ENDTRACE + return error; +} + + +/* class zero version */ +void +tp0_stash( tpcb, e ) + register struct tp_pcb *tpcb; + register struct tp_event *e; +{ +#ifndef lint +#define E e->ATTR(DT_TPDU) +#else /* lint */ +#define E e->ev_union.EV_DT_TPDU +#endif /* lint */ + + register struct sockbuf *sb = &tpcb->tp_sock->so_rcv; + register struct isopcb *isop = (struct isopcb *)tpcb->tp_npcb; + + IFPERF(tpcb) + PStat(tpcb, Nb_from_ll) += E.e_datalen; + tpmeas(tpcb->tp_lref, TPtime_from_ll, &e->e_time, + E.e_seq, PStat(tpcb, Nb_from_ll), E.e_datalen); + ENDPERF + + IFDEBUG(D_STASH) + printf("stash EQ: seq 0x%x datalen 0x%x eot 0x%x", + E.e_seq, E.e_datalen, E.e_eot); + ENDDEBUG + + IFTRACE(D_STASH) + tptraceTPCB(TPPTmisc, "stash EQ: seq len eot", + E.e_seq, E.e_datalen, E.e_eot, 0); + ENDTRACE + + if ( E.e_eot ) { + register struct mbuf *n = E.e_data; + n->m_flags |= M_EOR; + n->m_act = MNULL; /* set on tp_input */ + } + sbappend(sb, E.e_data); + IFDEBUG(D_STASH) + dump_mbuf(sb->sb_mb, "stash 0: so_rcv after appending"); + ENDDEBUG + if (tpcb->tp_netservice != ISO_CONS) + printf("tp0_stash: tp running over something wierd\n"); + else { + register struct pklcd *lcp = (struct pklcd *)isop->isop_chan; + pk_flowcontrol(lcp, sbspace(sb) <= 0, 1); + } +} + +void +tp0_openflow(tpcb) +register struct tp_pcb *tpcb; +{ + register struct isopcb *isop = (struct isopcb *)tpcb->tp_npcb; + if (tpcb->tp_netservice != ISO_CONS) + printf("tp0_openflow: tp running over something wierd\n"); + else { + register struct pklcd *lcp = (struct pklcd *)isop->isop_chan; + if (lcp->lcd_rxrnr_condition) + pk_flowcontrol(lcp, 0, 0); + } +} +#ifndef TPCONS +static +pk_flowcontrol() {} +#endif + +#ifdef TP_PERF_MEAS +/* + * CALLED FROM: + * tp_ctloutput() when the user sets TPOPT_PERF_MEAS on + * and tp_newsocket() when a new connection is made from + * a listening socket with tp_perf_on == true. + * FUNCTION and ARGUMENTS: + * (tpcb) is the usual; this procedure gets a clear cluster mbuf for + * a tp_pmeas structure, and makes tpcb->tp_p_meas point to it. + * RETURN VALUE: + * ENOBUFS if it cannot get a cluster mbuf. + */ + +int +tp_setup_perf(tpcb) + register struct tp_pcb *tpcb; +{ + register struct mbuf *q; + + if( tpcb->tp_p_meas == 0 ) { + MGET(q, M_WAIT, MT_PCB); + if (q == 0) + return ENOBUFS; + MCLGET(q, M_WAIT); + if ((q->m_flags & M_EXT) == 0) { + (void) m_free(q); + return ENOBUFS; + } + q->m_len = sizeof (struct tp_pmeas); + tpcb->tp_p_mbuf = q; + tpcb->tp_p_meas = mtod(q, struct tp_pmeas *); + bzero( (caddr_t)tpcb->tp_p_meas, sizeof (struct tp_pmeas) ); + IFDEBUG(D_PERF_MEAS) + printf( + "tpcb 0x%x so 0x%x ref 0x%x tp_p_meas 0x%x tp_perf_on 0x%x\n", + tpcb, tpcb->tp_sock, tpcb->tp_lref, + tpcb->tp_p_meas, tpcb->tp_perf_on); + ENDDEBUG + tpcb->tp_perf_on = 1; + } + return 0; +} +#endif /* TP_PERF_MEAS */ + +#ifdef ARGO_DEBUG +dump_addr (addr) + register struct sockaddr *addr; +{ + switch( addr->sa_family ) { + case AF_INET: + dump_inaddr((struct sockaddr_in *)addr); + break; +#if ISO + case AF_ISO: + dump_isoaddr((struct sockaddr_iso *)addr); + break; +#endif /* ISO */ + default: + printf("BAD AF: 0x%x\n", addr->sa_family); + break; + } +} + +#define MAX_COLUMNS 8 +/* + * Dump the buffer to the screen in a readable format. Format is: + * + * hex/dec where hex is the hex format, dec is the decimal format. + * columns of hex/dec numbers will be printed, followed by the + * character representations (if printable). + */ +Dump_buf(buf, len) +caddr_t buf; +int len; +{ + int i,j; +#define Buf ((u_char *)buf) + printf("Dump buf 0x%x len 0x%x\n", buf, len); + for (i = 0; i < len; i += MAX_COLUMNS) { + printf("+%d:\t", i); + for (j = 0; j < MAX_COLUMNS; j++) { + if (i + j < len) { + printf("%x/%d\t", Buf[i+j], Buf[i+j]); + } else { + printf(" "); + } + } + + for (j = 0; j < MAX_COLUMNS; j++) { + if (i + j < len) { + if (((Buf[i+j]) > 31) && ((Buf[i+j]) < 128)) + printf("%c", Buf[i+j]); + else + printf("."); + } + } + printf("\n"); + } +} +#endif /* ARGO_DEBUG */ diff --git a/bsd/netiso/tp_timer.c b/bsd/netiso/tp_timer.c new file mode 100644 index 000000000..34c2738d9 --- /dev/null +++ b/bsd/netiso/tp_timer.c @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_timer.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +struct tp_ref *tp_ref; +int tp_rttdiv, tp_rttadd, N_TPREF = 127; +struct tp_refinfo tp_refinfo; +struct tp_pcb *tp_ftimeolist = (struct tp_pcb *)&tp_ftimeolist; + +/* + * CALLED FROM: + * at autoconfig time from tp_init() + * a combo of event, state, predicate + * FUNCTION and ARGUMENTS: + * initialize data structures for the timers + */ +void +tp_timerinit() +{ + register int s; + /* + * Initialize storage + */ + if (tp_refinfo.tpr_base) + return; + tp_refinfo.tpr_size = N_TPREF + 1; /* Need to start somewhere */ + s = sizeof(*tp_ref) * tp_refinfo.tpr_size; +// if ((tp_ref = (struct tp_ref *) malloc(s, M_PCB, M_NOWAIT)) == 0) + MALLOC(tp_ref, struct tp_ref *, s, M_PCB, M_NOWAIT); + if (tp_ref == 0) + panic("tp_timerinit"); + bzero((caddr_t)tp_ref, (unsigned) s); + tp_refinfo.tpr_base = tp_ref; + tp_rttdiv = hz / PR_SLOWHZ; + tp_rttadd = (2 * tp_rttdiv) - 1; +} +#ifdef TP_DEBUG_TIMERS +/********************** e timers *************************/ + +/* + * CALLED FROM: + * tp.trans all over + * FUNCTION and ARGUMENTS: + * Set an E type timer. + */ +void +tp_etimeout(tpcb, fun, ticks) + register struct tp_pcb *tpcb; + int fun; /* function to be called */ + int ticks; +{ + + register u_int *callp; + IFDEBUG(D_TIMER) + printf("etimeout pcb 0x%x state 0x%x\n", tpcb, tpcb->tp_state); + ENDDEBUG + IFTRACE(D_TIMER) + tptrace(TPPTmisc, "tp_etimeout ref refstate tks Etick", tpcb->tp_lref, + tpcb->tp_state, ticks, tp_stat.ts_Eticks); + ENDTRACE + if (tpcb == 0) + return; + IncStat(ts_Eset); + if (ticks == 0) + ticks = 1; + callp = tpcb->tp_timer + fun; + if (*callp == 0 || *callp > ticks) + *callp = ticks; +} + +/* + * CALLED FROM: + * tp.trans all over + * FUNCTION and ARGUMENTS: + * Cancel all occurrences of E-timer function (fun) for reference (refp) + */ +void +tp_euntimeout(tpcb, fun) + register struct tp_pcb *tpcb; + int fun; +{ + IFTRACE(D_TIMER) + tptrace(TPPTmisc, "tp_euntimeout ref", tpcb->tp_lref, 0, 0, 0); + ENDTRACE + + if (tpcb) + tpcb->tp_timer[fun] = 0; +} + +/**************** c timers ********************** + * + * These are not chained together; they sit + * in the tp_ref structure. they are the kind that + * are typically cancelled so it's faster not to + * mess with the chains + */ +#endif +/* + * CALLED FROM: + * the clock, every 500 ms + * FUNCTION and ARGUMENTS: + * Look for open references with active timers. + * If they exist, call the appropriate timer routines to update + * the timers and possibly generate events. + */ +ProtoHook +tp_slowtimo() +{ + register u_int *cp; + register struct tp_ref *rp; + struct tp_pcb *tpcb; + struct tp_event E; + int s = splnet(), t; + + /* check only open reference structures */ + IncStat(ts_Cticks); + /* tp_ref[0] is never used */ + for (rp = tp_ref + tp_refinfo.tpr_maxopen; rp > tp_ref; rp--) { + if ((tpcb = rp->tpr_pcb) == 0 || tpcb->tp_refstate < REF_OPEN) + continue; + /* check the timers */ + for (t = 0; t < TM_NTIMERS; t++) { + cp = tpcb->tp_timer + t; + if (*cp && --(*cp) <= 0 ) { + *cp = 0; + E.ev_number = t; + IFDEBUG(D_TIMER) + printf("tp_slowtimo: pcb 0x%x t %d\n", + tpcb, t); + ENDDEBUG + IncStat(ts_Cexpired); + tp_driver(tpcb, &E); + if (t == TM_reference && tpcb->tp_state == TP_CLOSED) { + if (tpcb->tp_notdetached) { + IFDEBUG(D_CONN) + printf("PRU_DETACH: not detached\n"); + ENDDEBUG + tp_detach(tpcb); + } + /* XXX wart; where else to do it? */ + FREE((caddr_t)tpcb, M_PCB); + } + } + } + } + splx(s); + return 0; +} + +/* + * Called From: tp.trans from tp_slowtimo() -- retransmission timer went off. + */ +tp_data_retrans(tpcb) +register struct tp_pcb *tpcb; +{ + int rexmt, win; + tpcb->tp_rttemit = 0; /* cancel current round trip time */ + tpcb->tp_dupacks = 0; + tpcb->tp_sndnxt = tpcb->tp_snduna; + if (tpcb->tp_fcredit == 0) { + /* + * We transmitted new data, started timing it and the window + * got shrunk under us. This can only happen if all data + * that they wanted us to send got acked, so don't + * bother shrinking the congestion windows, et. al. + * The retransmission timer should have been reset in goodack() + */ + IFDEBUG(D_ACKRECV) + printf("tp_data_retrans: 0 window tpcb 0x%x una 0x%x\n", + tpcb, tpcb->tp_snduna); + ENDDEBUG + tpcb->tp_rxtshift = 0; + tpcb->tp_timer[TM_data_retrans] = 0; + tpcb->tp_timer[TM_sendack] = tpcb->tp_dt_ticks; + return; + } + rexmt = tpcb->tp_dt_ticks << min(tpcb->tp_rxtshift, TP_MAXRXTSHIFT); + win = min(tpcb->tp_fcredit, (tpcb->tp_cong_win / tpcb->tp_l_tpdusize / 2)); + win = max(win, 2); + tpcb->tp_cong_win = tpcb->tp_l_tpdusize; /* slow start again. */ + tpcb->tp_ssthresh = win * tpcb->tp_l_tpdusize; + /* We're losing; our srtt estimate is probably bogus. + * Clobber it so we'll take the next rtt measurement as our srtt; + * Maintain current rxt times until then. + */ + if (++tpcb->tp_rxtshift > TP_NRETRANS / 4) { + /* tpcb->tp_nlprotosw->nlp_losing(tpcb->tp_npcb) someday */ + tpcb->tp_rtt = 0; + } + TP_RANGESET(tpcb->tp_rxtcur, rexmt, tpcb->tp_peer_acktime, 128); + tpcb->tp_timer[TM_data_retrans] = tpcb->tp_rxtcur; + tp_send(tpcb); +} + +int +tp_fasttimo() +{ + register struct tp_pcb *t; + int s = splnet(); + struct tp_event E; + + E.ev_number = TM_sendack; + while ((t = tp_ftimeolist) != (struct tp_pcb *)&tp_ftimeolist) { + if (t == 0) { + printf("tp_fasttimeo: should panic"); + tp_ftimeolist = (struct tp_pcb *)&tp_ftimeolist; + } else { + if (t->tp_flags & TPF_DELACK) { + IncStat(ts_Fdelack); + tp_driver(t, &E); + t->tp_flags &= ~TPF_DELACK; + } else + IncStat(ts_Fpruned); + tp_ftimeolist = t->tp_fasttimeo; + t->tp_fasttimeo = 0; + } + } + splx(s); +} + +#ifdef TP_DEBUG_TIMERS +/* + * CALLED FROM: + * tp.trans, tp_emit() + * FUNCTION and ARGUMENTS: + * Set a C type timer of type (which) to go off after (ticks) time. + */ +void +tp_ctimeout(tpcb, which, ticks) + register struct tp_pcb *tpcb; + int which, ticks; +{ + + IFTRACE(D_TIMER) + tptrace(TPPTmisc, "tp_ctimeout ref which tpcb active", + tpcb->tp_lref, which, tpcb, tpcb->tp_timer[which]); + ENDTRACE + if(tpcb->tp_timer[which]) + IncStat(ts_Ccan_act); + IncStat(ts_Cset); + if (ticks <= 0) + ticks = 1; + tpcb->tp_timer[which] = ticks; +} + +/* + * CALLED FROM: + * tp.trans + * FUNCTION and ARGUMENTS: + * Version of tp_ctimeout that resets the C-type time if the + * parameter (ticks) is > the current value of the timer. + */ +void +tp_ctimeout_MIN(tpcb, which, ticks) + register struct tp_pcb *tpcb; + int which, ticks; +{ + IFTRACE(D_TIMER) + tptrace(TPPTmisc, "tp_ctimeout_MIN ref which tpcb active", + tpcb->tp_lref, which, tpcb, tpcb->tp_timer[which]); + ENDTRACE + IncStat(ts_Cset); + if (tpcb->tp_timer[which]) { + tpcb->tp_timer[which] = min(ticks, tpcb->tp_timer[which]); + IncStat(ts_Ccan_act); + } else + tpcb->tp_timer[which] = ticks; +} + +/* + * CALLED FROM: + * tp.trans + * FUNCTION and ARGUMENTS: + * Cancel the (which) timer in the ref structure indicated by (refp). + */ +void +tp_cuntimeout(tpcb, which) + register struct tp_pcb *tpcb; + int which; +{ + IFDEBUG(D_TIMER) + printf("tp_cuntimeout(0x%x, %d) active %d\n", + tpcb, which, tpcb->tp_timer[which]); + ENDDEBUG + + IFTRACE(D_TIMER) + tptrace(TPPTmisc, "tp_cuntimeout ref which, active", refp-tp_ref, + which, tpcb->tp_timer[which], 0); + ENDTRACE + + if (tpcb->tp_timer[which]) + IncStat(ts_Ccan_act); + else + IncStat(ts_Ccan_inact); + tpcb->tp_timer[which] = 0; +} +#endif diff --git a/bsd/netiso/tp_timer.h b/bsd/netiso/tp_timer.h new file mode 100644 index 000000000..f2b364c38 --- /dev/null +++ b/bsd/netiso/tp_timer.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_timer.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * ARGO TP + * The callout structures used by the tp timers. + */ + +#ifndef __TP_TIMER__ +#define __TP_TIMER__ + +#define SET_DELACK(t) {\ + (t)->tp_flags |= TPF_DELACK; \ + if ((t)->tp_fasttimeo == 0)\ + { (t)->tp_fasttimeo = tp_ftimeolist; tp_ftimeolist = (t); } } + +#ifdef ARGO_DEBUG +#define TP_DEBUG_TIMERS +#endif + +#ifndef TP_DEBUG_TIMERS +#define tp_ctimeout(tpcb, which, timo) ((tpcb)->tp_timer[which] = (timo)) +#define tp_cuntimeout(tpcb, which) ((tpcb)->tp_timer[which] = 0) +#define tp_etimeout tp_ctimeout +#define tp_euntimeout tp_cuntimeout +#define tp_ctimeout_MIN(p, w, t) \ + { if((p)->tp_timer[w] > (t)) (p)->tp_timer[w] = (t);} +#endif /* TP_DEBUG_TIMERS */ + +#endif /* __TP_TIMER__ */ diff --git a/bsd/netiso/tp_tpdu.h b/bsd/netiso/tp_tpdu.h new file mode 100644 index 000000000..a94ea8fbc --- /dev/null +++ b/bsd/netiso/tp_tpdu.h @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_tpdu.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * This ghastly set of macros makes it possible to + * refer to tpdu structures without going mad. + */ + +#ifndef __TP_TPDU__ +#define __TP_TPDU__ + +#ifndef BYTE_ORDER +/* + * Definitions for byte order, + * according to byte significance from low address to high. + */ +#define LITTLE_ENDIAN 1234 /* least-significant byte first (vax) */ +#define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ +#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp) */ + +#ifdef vax +#define BYTE_ORDER LITTLE_ENDIAN +#else +#define BYTE_ORDER BIG_ENDIAN /* mc68000, tahoe, most others */ +#endif +#endif /* BYTE_ORDER */ + +/* This much of a tpdu is the same for all types of tpdus (except + * DT tpdus in class 0; their exceptions are handled by the data + * structure below + */ +struct tpdu_fixed { + u_char _tpduf_li:8, /* length indicator */ +#if BYTE_ORDER == LITTLE_ENDIAN + _tpduf_cdt: 4, /* credit */ + _tpduf_type: 4; /* type of tpdu (DT, CR, etc.) */ +#endif +#if BYTE_ORDER == BIG_ENDIAN + _tpduf_type: 4, /* type of tpdu (DT, CR, etc.) */ + _tpduf_cdt: 4; /* credit */ +#endif + u_short _tpduf_dref; /* destination ref; not in DT in class 0 */ +}; + +#define tpdu_li _tpduf._tpduf_li +#define tpdu_type _tpduf._tpduf_type +#define tpdu_cdt _tpduf._tpduf_cdt +#define tpdu_dref _tpduf._tpduf_dref + +struct tp0du { + u_char _tp0_li, + _tp0_cdt_type, /* same as in tpdu_fixed */ +#if BYTE_ORDER == BIG_ENDIAN + _tp0_eot: 1, /* eot */ + _tp0_mbz: 7, /* must be zero */ +#endif +#if BYTE_ORDER == LITTLE_ENDIAN + _tp0_mbz: 7, /* must be zero */ + _tp0_eot: 1, /* eot */ +#endif + _tp0_notused: 8; /* data begins on this octet */ +}; + +#define tp0du_eot _tp0_eot +#define tp0du_mbz _tp0_mbz + +/* + * This is used when the extended format seqence numbers are + * being sent and received. + */ + /* + * the seqeot field is an int that overlays the seq + * and eot fields, this allows the htonl operation + * to be applied to the entire 32 bit quantity, and + * simplifies the structure definitions. + */ +union seq_type { + struct { +#if BYTE_ORDER == BIG_ENDIAN + unsigned int st_eot:1, /* end-of-tsdu */ + st_seq:31; /* 31 bit sequence number */ +#endif +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned int st_seq:31, /* 31 bit sequence number */ + st_eot:1; /* end-of-tsdu */ +#endif + } st; + unsigned int s_seqeot; +#define s_eot st.st_eot +#define s_seq st.st_seq +}; + +/* Then most tpdu types have a portion that is always present but + * differs among the tpdu types : + */ +union tpdu_fixed_rest { + + struct { + u_short _tpdufr_sref, /* source reference */ +#if BYTE_ORDER == BIG_ENDIAN + _tpdufr_class: 4, /* class [ ISO 8073 13.3.3.e ] */ + _tpdufr_opt: 4, /* options [ ISO 8073 13.3.3.e ] */ +#endif +#if BYTE_ORDER == LITTLE_ENDIAN + _tpdufr_opt: 4, /* options [ ISO 8073 13.3.3.e ] */ + _tpdufr_class: 4, /* class [ ISO 8073 13.3.3.e ] */ +#endif + _tpdufr_xx: 8; /* unused */ + } CRCC; + +#define tpdu_CRli _tpduf._tpduf_li +#define tpdu_CRtype _tpduf._tpduf_type +#define tpdu_CRcdt _tpduf._tpduf_cdt +#define tpdu_CRdref_0 _tpduf._tpduf_dref +#define tpdu_CRsref _tpdufr.CRCC._tpdufr_sref +#define tpdu_sref _tpdufr.CRCC._tpdufr_sref +#define tpdu_CRclass _tpdufr.CRCC._tpdufr_class +#define tpdu_CRoptions _tpdufr.CRCC._tpdufr_opt + +#define tpdu_CCli _tpduf._tpduf_li +#define tpdu_CCtype _tpduf._tpduf_type +#define tpdu_CCcdt _tpduf._tpduf_cdt +#define tpdu_CCdref _tpduf._tpduf_dref +#define tpdu_CCsref _tpdufr.CRCC._tpdufr_sref +#define tpdu_CCclass _tpdufr.CRCC._tpdufr_class +#define tpdu_CCoptions _tpdufr.CRCC._tpdufr_opt + +/* OPTIONS and ADDL OPTIONS bits */ +#define TPO_USE_EFC 0x1 +#define TPO_XTD_FMT 0x2 +#define TPAO_USE_TXPD 0x1 +#define TPAO_NO_CSUM 0x2 +#define TPAO_USE_RCC 0x4 +#define TPAO_USE_NXPD 0x8 + + struct { + unsigned short _tpdufr_sref; /* source reference */ + unsigned char _tpdufr_reason; /* [ ISO 8073 13.5.3.d ] */ + } DR; +#define tpdu_DRli _tpduf._tpduf_li +#define tpdu_DRtype _tpduf._tpduf_type +#define tpdu_DRdref _tpduf._tpduf_dref +#define tpdu_DRsref _tpdufr.DR._tpdufr_sref +#define tpdu_DRreason _tpdufr.DR._tpdufr_reason + + unsigned short _tpdufr_sref; /* source reference */ + +#define tpdu_DCli _tpduf._tpduf_li +#define tpdu_DCtype _tpduf._tpduf_type +#define tpdu_DCdref _tpduf._tpduf_dref +#define tpdu_DCsref _tpdufr._tpdufr_sref + + struct { +#if BYTE_ORDER == BIG_ENDIAN + unsigned char _tpdufr_eot:1, /* end-of-tsdu */ + _tpdufr_seq:7; /* 7 bit sequence number */ +#endif +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned char _tpdufr_seq:7, /* 7 bit sequence number */ + _tpdufr_eot:1; /* end-of-tsdu */ +#endif + }SEQEOT; + struct { +#if BYTE_ORDER == BIG_ENDIAN + unsigned int _tpdufr_Xeot:1, /* end-of-tsdu */ + _tpdufr_Xseq:31; /* 31 bit sequence number */ +#endif +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned int _tpdufr_Xseq:31, /* 31 bit sequence number */ + _tpdufr_Xeot:1; /* end-of-tsdu */ +#endif + }SEQEOT31; + unsigned int _tpdufr_Xseqeot; +#define tpdu_seqeotX _tpdufr._tpdufr_Xseqeot + +#define tpdu_DTli _tpduf._tpduf_li +#define tpdu_DTtype _tpduf._tpduf_type +#define tpdu_DTdref _tpduf._tpduf_dref +#define tpdu_DTseq _tpdufr.SEQEOT._tpdufr_seq +#define tpdu_DTeot _tpdufr.SEQEOT._tpdufr_eot +#define tpdu_DTseqX _tpdufr.SEQEOT31._tpdufr_Xseq +#define tpdu_DTeotX _tpdufr.SEQEOT31._tpdufr_Xeot + +#define tpdu_XPDli _tpduf._tpduf_li +#define tpdu_XPDtype _tpduf._tpduf_type +#define tpdu_XPDdref _tpduf._tpduf_dref +#define tpdu_XPDseq _tpdufr.SEQEOT._tpdufr_seq +#define tpdu_XPDeot _tpdufr.SEQEOT._tpdufr_eot +#define tpdu_XPDseqX _tpdufr.SEQEOT31._tpdufr_Xseq +#define tpdu_XPDeotX _tpdufr.SEQEOT31._tpdufr_Xeot + + struct { +#if BYTE_ORDER == BIG_ENDIAN + unsigned _tpdufr_yrseq0:1, /* always zero */ + _tpdufr_yrseq:31; /* [ ISO 8073 13.9.3.d ] */ +#endif +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned _tpdufr_yrseq:31, /* [ ISO 8073 13.9.3.d ] */ + _tpdufr_yrseq0:1; /* always zero */ +#endif + unsigned short _tpdufr_cdt; /* [ ISO 8073 13.9.3.b ] */ + } AK31; + +#define tpdu_AKli _tpduf._tpduf_li +#define tpdu_AKtype _tpduf._tpduf_type +#define tpdu_AKdref _tpduf._tpduf_dref +#define tpdu_AKseq _tpdufr.SEQEOT._tpdufr_seq +#define tpdu_AKseqX _tpdufr.AK31._tpdufr_yrseq +/* location of cdt depends on size of seq. numbers */ +#define tpdu_AKcdt _tpduf._tpduf_cdt +#define tpdu_AKcdtX _tpdufr.AK31._tpdufr_cdt + +#define tpdu_XAKli _tpduf._tpduf_li +#define tpdu_XAKtype _tpduf._tpduf_type +#define tpdu_XAKdref _tpduf._tpduf_dref +#define tpdu_XAKseq _tpdufr.SEQEOT._tpdufr_seq +#define tpdu_XAKseqX _tpdufr.SEQEOT31._tpdufr_Xseq + + unsigned char _tpdu_ERreason; /* [ ISO 8073 13.12.3.c ] */ + +#define tpdu_ERli _tpduf._tpduf_li +#define tpdu_ERtype _tpduf._tpduf_type +#define tpdu_ERdref _tpduf._tpduf_dref +#define tpdu_ERreason _tpdufr._tpdu_ERreason + +}; + +struct tpdu { + struct tpdu_fixed _tpduf; + union tpdu_fixed_rest _tpdufr; +}; + +#endif /* __TP_TPDU__ */ diff --git a/bsd/netiso/tp_trace.c b/bsd/netiso/tp_trace.c new file mode 100644 index 000000000..a2d2f689c --- /dev/null +++ b/bsd/netiso/tp_trace.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_trace.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * The whole protocol trace module. + * We keep a circular buffer of trace structures, which are big + * unions of different structures we might want to see. + * Unfortunately this gets too big pretty easily. Pcbs were removed + * from the tracing when the kernel got too big to boot. + */ + +#define TP_TRACEFILE + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef TPPT +static tp_seq = 0; +u_char tp_traceflags[128]; + +/* + * The argument tpcb is the obvious. + * event here is just the type of trace event - TPPTmisc, etc. + * The rest of the arguments have different uses depending + * on the type of trace event. + */ +/*ARGSUSED*/ +/*VARARGS*/ + +void +tpTrace(tpcb, event, arg, src, len, arg4, arg5) + struct tp_pcb *tpcb; + u_int event, arg; + u_int src; + u_int len; + u_int arg4; + u_int arg5; +{ + register struct tp_Trace *tp; + + tp = &tp_Trace[tp_Tracen++]; + tp_Tracen %= TPTRACEN; + + tp->tpt_event = event; + tp->tpt_tseq = tp_seq++; + tp->tpt_arg = arg; + if(tpcb) + tp->tpt_arg2 = tpcb->tp_lref; + bcopy( (caddr_t)&time, (caddr_t)&tp->tpt_time, sizeof(struct timeval) ); + + switch(event) { + + case TPPTertpdu: + bcopy((caddr_t)src, (caddr_t)&tp->tpt_ertpdu, + (unsigned)MIN((int)len, sizeof(struct tp_Trace))); + break; + + case TPPTusrreq: + case TPPTmisc: + + /* arg is a string */ + bcopy((caddr_t)arg, (caddr_t)tp->tpt_str, + (unsigned)MIN(1+strlen((caddr_t) arg), TPTRACE_STRLEN)); + tp->tpt_m2 = src; + tp->tpt_m3 = len; + tp->tpt_m4 = arg4; + tp->tpt_m1 = arg5; + break; + + case TPPTgotXack: + case TPPTXack: + case TPPTsendack: + case TPPTgotack: + case TPPTack: + case TPPTindicate: + default: + case TPPTdriver: + tp->tpt_m2 = arg; + tp->tpt_m3 = src; + tp->tpt_m4 = len; + tp->tpt_m5 = arg4; + tp->tpt_m1 = arg5; + break; + case TPPTparam: + bcopy((caddr_t)src, (caddr_t)&tp->tpt_param, sizeof(struct tp_param)); + break; + case TPPTref: + bcopy((caddr_t)src, (caddr_t)&tp->tpt_ref, sizeof(struct tp_ref)); + break; + + case TPPTtpduin: + case TPPTtpduout: + tp->tpt_arg2 = arg4; + bcopy((caddr_t)src, (caddr_t)&tp->tpt_tpdu, + (unsigned)MIN((int)len, sizeof(struct tp_Trace))); + break; + } +} +#endif /* TPPT */ diff --git a/bsd/netiso/tp_trace.h b/bsd/netiso/tp_trace.h new file mode 100644 index 000000000..1e5d96aa1 --- /dev/null +++ b/bsd/netiso/tp_trace.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_trace.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * Definitions needed for the protocol trace mechanism. + */ + +#ifndef __TP_TRACE__ +#define __TP_TRACE__ + + +#define TPPTsendack 1 +#define TPPTgotack 2 +#define TPPTXack 3 +#define TPPTgotXack 4 +#define TPPTack 5 +#define TPPTindicate 6 +#define TPPTusrreq 7 +#define TPPTmisc 8 +#define TPPTpcb 9 +#define TPPTref 10 +#define TPPTtpduin 11 +#define TPPTparam 12 +#define TPPTertpdu 13 +#define TPPTdriver 14 +#define TPPTtpduout 15 + +#include + +/* this #if is to avoid lint */ + +#if defined(TP_TRACEFILE)||!defined(KERNEL) + +#include + +#define TPTRACE_STRLEN 50 + + +/* for packet tracing */ +struct tp_timeval { + SeqNum tptv_seq; + u_int tptv_kind; + u_int tptv_window; + u_int tptv_size; +}; + +struct tp_Trace { + u_int tpt_event; + u_int tpt_arg; + u_int tpt_arg2; + int tpt_tseq; + struct timeval tpt_time; + union { + struct inpcb tpt_Inpcb; /* protocol control block */ + struct tp_ref tpt_Ref; /* ref part of pcb */ + struct tpdu tpt_Tpdu; /* header*/ + struct tp_refinfo tpt_Param; /* ?? bytes, make sure < 128??*/ + struct tp_timeval tpt_Time; + struct { + u_int tptm_2; + u_int tptm_3; + u_int tptm_4; + u_int tptm_5; + char tpt_Str[TPTRACE_STRLEN]; + u_int tptm_1; + } tptmisc; + u_char tpt_Ertpdu; /* use rest of structure */ + } tpt_stuff; +}; +#define tpt_inpcb tpt_stuff.tpt_Inpcb +#define tpt_pcb tpt_stuff.tpt_Pcb +#define tpt_ref tpt_stuff.tpt_Ref +#define tpt_tpdu tpt_stuff.tpt_Tpdu +#define tpt_param tpt_stuff.tpt_Param +#define tpt_ertpdu tpt_stuff.tpt_Ertpdu +#define tpt_str tpt_stuff.tptmisc.tpt_Str +#define tpt_m1 tpt_stuff.tptmisc.tptm_1 +#define tpt_m2 tpt_stuff.tptmisc.tptm_2 +#define tpt_m3 tpt_stuff.tptmisc.tptm_3 +#define tpt_m4 tpt_stuff.tptmisc.tptm_4 +#define tpt_m5 tpt_stuff.tptmisc.tptm_5 + +#define tpt_seq tpt_stuff.tpt_Time.tptv_seq +#define tpt_kind tpt_stuff.tpt_Time.tptv_kind +#define tpt_window tpt_stuff.tpt_Time.tptv_window +#define tpt_size tpt_stuff.tpt_Time.tptv_size + +#endif /* defined(TP_TRACEFILE)||!defined(KERNEL) */ + + +#ifdef TPPT + +#define TPTRACEN 300 + +#define tptrace(A,B,C,D,E,F) \ + tpTrace((struct tp_pcb *)0,\ + (u_int)(A),(u_int)(B),(u_int)(C),(u_int)(D),(u_int)(E),(u_int)(F)) + +#define tptraceTPCB(A,B,C,D,E,F) \ + tpTrace(tpcb,\ + (u_int)(A),(u_int)(B),(u_int)(C),(u_int)(D),(u_int)(E),(u_int)(F)) + +extern void tpTrace(); +extern struct tp_Trace tp_Trace[]; +extern u_char tp_traceflags[]; +int tp_Tracen = 0; + +#define IFTRACE(ascii)\ + if(tp_traceflags[ascii]) { +/* + * for some reason lint complains about tp_param being undefined no + * matter where or how many times I define it. + */ +#define ENDTRACE } + + +#else /* TPPT */ + +/*********************************************** + * NO TPPT TRACE STUFF + **********************************************/ +#define TPTRACEN 1 + +#define tptrace(A,B,C,D,E,F) 0 +#define tptraceTPCB(A,B,C,D,E,F) 0 + +#define IFTRACE(ascii) if (0) { +#define ENDTRACE } + +#endif /* TPPT */ + + + +#endif /* __TP_TRACE__ */ diff --git a/bsd/netiso/tp_user.h b/bsd/netiso/tp_user.h new file mode 100644 index 000000000..2468ee10e --- /dev/null +++ b/bsd/netiso/tp_user.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_user.h 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * These are the values a real-live user ;-) needs. + */ + +#ifndef _TYPES_ +#include +#endif + +#ifndef __TP_USER__ +#define __TP_USER__ + +struct tp_conn_param { + /* PER CONNECTION parameters */ + short p_Nretrans; + short p_dr_ticks; + + short p_cc_ticks; + short p_dt_ticks; + + short p_x_ticks; + short p_cr_ticks; + + short p_keepalive_ticks; + short p_sendack_ticks; + + short p_ref_ticks; + short p_inact_ticks; + + short p_ptpdusize; /* preferred tpdusize/128 */ + short p_winsize; + + u_char p_tpdusize; /* log 2 of size */ + + u_char p_ack_strat; /* see comments in tp_pcb.h */ + u_char p_rx_strat; /* see comments in tp_pcb.h */ + u_char p_class; /* class bitmask */ + u_char p_xtd_format; + u_char p_xpd_service; + u_char p_use_checksum; + u_char p_use_nxpd; /* netwk expedited data: not implemented */ + u_char p_use_rcc; /* receipt confirmation: not implemented */ + u_char p_use_efc; /* explicit flow control: not implemented */ + u_char p_no_disc_indications; /* don't deliver indic on disc */ + u_char p_dont_change_params; /* use these params as they are */ + u_char p_netservice; + u_char p_version; /* only here for checking */ +}; + +/* + * These sockopt level definitions should be considered for socket.h + */ +#define SOL_TRANSPORT 0xfffe +#define SOL_NETWORK 0xfffd + +/* get/set socket opt commands */ +#define TPACK_WINDOW 0x0 /* ack only on full window */ +#define TPACK_EACH 0x1 /* ack every packet */ + +#define TPRX_USE_CW 0x8 /* use congestion window transmit */ +#define TPRX_EACH 0x4 /* retrans each packet of a set */ +#define TPRX_FASTSTART 0x1 /* don't use slow start */ + +#define TPOPT_INTERCEPT 0x200 +#define TPOPT_FLAGS 0x300 +#define TPOPT_CONN_DATA 0x400 +#define TPOPT_DISC_DATA 0x500 +#define TPOPT_CFRM_DATA 0x600 +#define TPOPT_CDDATA_CLEAR 0x700 +#define TPOPT_MY_TSEL 0x800 +#define TPOPT_PEER_TSEL 0x900 +#define TPOPT_PERF_MEAS 0xa00 +#define TPOPT_PSTATISTICS 0xb00 +#define TPOPT_PARAMS 0xc00 /* to replace a bunch of the others */ +#define TPOPT_DISC_REASON 0xe00 + +struct tp_disc_reason { + struct cmsghdr dr_hdr; + u_int dr_reason; +}; + +/* + ***********************flags********************************** + */ + +/* read only flags */ +#define TPFLAG_NLQOS_PDN (u_char)0x01 +#define TPFLAG_PEER_ON_SAMENET (u_char)0x02 +#define TPFLAG_GENERAL_ADDR (u_char)0x04 /* bound to wildcard addr */ + + +/* + ***********************end flags****************************** + */ + + +#endif /* __TP_USER__ */ diff --git a/bsd/netiso/tp_usrreq.c b/bsd/netiso/tp_usrreq.c new file mode 100644 index 000000000..984d52791 --- /dev/null +++ b/bsd/netiso/tp_usrreq.c @@ -0,0 +1,774 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tp_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +/*********************************************************** + Copyright IBM Corporation 1987 + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of IBM not be +used in advertising or publicity pertaining to distribution of the +software without specific, written prior permission. + +IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL +IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, +ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS +SOFTWARE. + +******************************************************************/ + +/* + * ARGO Project, Computer Sciences Dept., University of Wisconsin - Madison + */ +/* + * ARGO TP + * + * tp_usrreq(), the fellow that gets called from most of the socket code. + * Pretty straighforward. + * THe only really awful stuff here is the OOB processing, which is done + * wholly here. + * tp_rcvoob() and tp_sendoob() are contained here and called by tp_usrreq(). + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int tp_attach(), tp_driver(), tp_pcbbind(); +int TNew; +int TPNagle1, TPNagle2; +struct tp_pcb *tp_listeners, *tp_intercepts; + +#ifdef ARGO_DEBUG +/* + * CALLED FROM: + * anywhere you want to debug... + * FUNCTION and ARGUMENTS: + * print (str) followed by the control info in the mbufs of an mbuf chain (n) + */ +void +dump_mbuf(n, str) + struct mbuf *n; + char *str; +{ + struct mbuf *nextrecord; + + printf("dump %s\n", str); + + if (n == MNULL) { + printf("EMPTY:\n"); + return; + } + + while (n) { + nextrecord = n->m_act; + printf("RECORD:\n"); + while (n) { + printf("%x : Len %x Data %x A %x Nx %x Tp %x\n", + n, n->m_len, n->m_data, n->m_act, n->m_next, n->m_type); +#ifdef notdef + { + register char *p = mtod(n, char *); + register int i; + + printf("data: "); + for (i = 0; i < n->m_len; i++) { + if (i%8 == 0) + printf("\n"); + printf("0x%x ", *(p+i)); + } + printf("\n"); + } +#endif /* notdef */ + if (n->m_next == n) { + printf("LOOP!\n"); + return; + } + n = n->m_next; + } + n = nextrecord; + } + printf("\n"); +} + +#endif /* ARGO_DEBUG */ + +/* + * CALLED FROM: + * tp_usrreq(), PRU_RCVOOB + * FUNCTION and ARGUMENTS: + * Copy data from the expedited data socket buffer into + * the pre-allocated mbuf m. + * There is an isomorphism between XPD TPDUs and expedited data TSDUs. + * XPD tpdus are limited to 16 bytes of data so they fit in one mbuf. + * RETURN VALUE: + * EINVAL if debugging is on and a disaster has occurred + * ENOTCONN if the socket isn't connected + * EWOULDBLOCK if the socket is in non-blocking mode and there's no + * xpd data in the buffer + * E* whatever is returned from the fsm. + */ +tp_rcvoob(tpcb, so, m, outflags, inflags) + struct tp_pcb *tpcb; + register struct socket *so; + register struct mbuf *m; + int *outflags; + int inflags; +{ + register struct mbuf *n; + register struct sockbuf *sb = &so->so_rcv; + struct tp_event E; + int error = 0; + register struct mbuf **nn; + + IFDEBUG(D_XPD) + printf("PRU_RCVOOB, sostate 0x%x\n", so->so_state); + ENDDEBUG + + /* if you use soreceive */ + if (m == MNULL) + return ENOBUFS; + +restart: + if ((((so->so_state & SS_ISCONNECTED) == 0) + || (so->so_state & SS_ISDISCONNECTING) != 0) && + (so->so_proto->pr_flags & PR_CONNREQUIRED)) { + return ENOTCONN; + } + + /* Take the first mbuf off the chain. + * Each XPD TPDU gives you a complete TSDU so the chains don't get + * coalesced, but one TSDU may span several mbufs. + * Nevertheless, since n should have a most 16 bytes, it + * will fit into m. (size was checked in tp_input() ) + */ + + /* + * Code for excision of OOB data should be added to + * uipc_socket2.c (like sbappend). + */ + + sblock(sb, M_WAIT); + for (nn = &sb->sb_mb; n = *nn; nn = &n->m_act) + if (n->m_type == MT_OOBDATA) + break; + + if (n == 0) { + IFDEBUG(D_XPD) + printf("RCVOOB: empty queue!\n"); + ENDDEBUG + sbunlock(sb); + if (so->so_state & SS_NBIO) { + return EWOULDBLOCK; + } + sbwait(sb); + goto restart; + } + m->m_len = 0; + + /* Assuming at most one xpd tpdu is in the buffer at once */ + while (n != MNULL) { + m->m_len += n->m_len; + bcopy(mtod(n, caddr_t), mtod(m, caddr_t), (unsigned)n->m_len); + m->m_data += n->m_len; /* so mtod() in bcopy() above gives right addr */ + n = n->m_next; + } + m->m_data = m->m_dat; + m->m_flags |= M_EOR; + + IFDEBUG(D_XPD) + printf("tp_rcvoob: xpdlen 0x%x\n", m->m_len); + dump_mbuf(so->so_rcv.sb_mb, "RCVOOB: Rcv socketbuf"); + dump_mbuf(sb->sb_mb, "RCVOOB: Xrcv socketbuf"); + ENDDEBUG + + if ((inflags & MSG_PEEK) == 0) { + n = *nn; + *nn = n->m_act; + for (; n; n = m_free(n)) + sbfree(sb, n); + } + +release: + sbunlock(sb); + + IFTRACE(D_XPD) + tptraceTPCB(TPPTmisc, "PRU_RCVOOB @ release sb_cc m_len", + tpcb->tp_Xrcv.sb_cc, m->m_len, 0, 0); + ENDTRACE + if (error == 0) + error = DoEvent(T_USR_Xrcvd); + return error; +} + +/* + * CALLED FROM: + * tp_usrreq(), PRU_SENDOOB + * FUNCTION and ARGUMENTS: + * Send what's in the mbuf chain (m) as an XPD TPDU. + * The mbuf may not contain more then 16 bytes of data. + * XPD TSDUs aren't segmented, so they translate into + * exactly one XPD TPDU, with EOT bit set. + * RETURN VALUE: + * EWOULDBLOCK if socket is in non-blocking mode and the previous + * xpd data haven't been acked yet. + * EMSGSIZE if trying to send > max-xpd bytes (16) + * ENOBUFS if ran out of mbufs + */ +tp_sendoob(tpcb, so, xdata, outflags) + struct tp_pcb *tpcb; + register struct socket *so; + register struct mbuf *xdata; + int *outflags; /* not used */ +{ + /* + * Each mbuf chain represents a sequence # in the XPD seq space. + * The first one in the queue has sequence # tp_Xuna. + * When we add to the XPD queue, we stuff a zero-length + * mbuf (mark) into the DATA queue, with its sequence number in m_next + * to be assigned to this XPD tpdu, so data xfer can stop + * when it reaches the zero-length mbuf if this XPD TPDU hasn't + * yet been acknowledged. + */ + register struct sockbuf *sb = &(tpcb->tp_Xsnd); + register struct mbuf *xmark; + register int len=0; + struct tp_event E; + + IFDEBUG(D_XPD) + printf("tp_sendoob:"); + if (xdata) + printf("xdata len 0x%x\n", xdata->m_len); + ENDDEBUG + /* DO NOT LOCK the Xsnd buffer!!!! You can have at MOST one + * socket buf locked at any time!!! (otherwise you might + * sleep() in sblock() w/ a signal pending and cause the + * system call to be aborted w/ a locked socketbuf, which + * is a problem. So the so_snd buffer lock + * (done in sosend()) serves as the lock for Xpd. + */ + if (sb->sb_mb) { /* Anything already in eXpedited data sockbuf? */ + if (so->so_state & SS_NBIO) { + return EWOULDBLOCK; + } + while (sb->sb_mb) { + sbunlock(&so->so_snd); /* already locked by sosend */ + sbwait(&so->so_snd); + sblock(&so->so_snd, M_WAIT); /* sosend will unlock on return */ + } + } + + if (xdata == (struct mbuf *)0) { + /* empty xpd packet */ + MGETHDR(xdata, M_WAIT, MT_OOBDATA); + if (xdata == NULL) { + return ENOBUFS; + } + xdata->m_len = 0; + xdata->m_pkthdr.len = 0; + } + IFDEBUG(D_XPD) + printf("tp_sendoob 1:"); + if (xdata) + printf("xdata len 0x%x\n", xdata->m_len); + ENDDEBUG + xmark = xdata; /* temporary use of variable xmark */ + while (xmark) { + len += xmark->m_len; + xmark = xmark->m_next; + } + if (len > TP_MAX_XPD_DATA) { + return EMSGSIZE; + } + IFDEBUG(D_XPD) + printf("tp_sendoob 2:"); + if (xdata) + printf("xdata len 0x%x\n", len); + ENDDEBUG + + + IFTRACE(D_XPD) + tptraceTPCB(TPPTmisc, "XPD mark m_next ", xdata->m_next, 0, 0, 0); + ENDTRACE + + sbappendrecord(sb, xdata); + + IFDEBUG(D_XPD) + printf("tp_sendoob len 0x%x\n", len); + dump_mbuf(so->so_snd.sb_mb, "XPD request Regular sndbuf:"); + dump_mbuf(tpcb->tp_Xsnd.sb_mb, "XPD request Xsndbuf:"); + ENDDEBUG + return DoEvent(T_XPD_req); +} + +/* + * CALLED FROM: + * the socket routines + * FUNCTION and ARGUMENTS: + * Handles all "user requests" except the [gs]ockopts() requests. + * The argument (req) is the request type (PRU*), + * (m) is an mbuf chain, generally used for send and + * receive type requests only. + * (nam) is used for addresses usually, in particular for the bind request. + * + */ +/*ARGSUSED*/ +ProtoHook +tp_usrreq(so, req, m, nam, controlp) + struct socket *so; + u_int req; + struct mbuf *m, *nam, *controlp; +{ + register struct tp_pcb *tpcb = sototpcb(so); + int s = splnet(); + int error = 0; + int flags, *outflags = &flags; + u_long eotsdu = 0; + struct tp_event E; + + IFDEBUG(D_REQUEST) + printf("usrreq(0x%x,%d,0x%x,0x%x,0x%x)\n",so,req,m,nam,outflags); + if (so->so_error) + printf("WARNING!!! so->so_error is 0x%x\n", so->so_error); + ENDDEBUG + IFTRACE(D_REQUEST) + tptraceTPCB(TPPTusrreq, "req so m state [", req, so, m, + tpcb?tpcb->tp_state:0); + ENDTRACE + + if ((u_int)tpcb == 0 && req != PRU_ATTACH) { + IFTRACE(D_REQUEST) + tptraceTPCB(TPPTusrreq, "req failed NO TPCB[", 0, 0, 0, 0); + ENDTRACE + splx(s); + return ENOTCONN; + } + + switch (req) { + + case PRU_ATTACH: + if (tpcb) { + error = EISCONN; + } else if ((error = tp_attach(so, (int)nam)) == 0) + tpcb = sototpcb(so); + break; + + case PRU_ABORT: /* called from close() */ + /* called for each incoming connect queued on the + * parent (accepting) socket + */ + if (tpcb->tp_state == TP_OPEN || tpcb->tp_state == TP_CONFIRMING) { + E.ATTR(T_DISC_req).e_reason = E_TP_NO_SESSION; + error = DoEvent(T_DISC_req); /* pretend it was a close() */ + break; + } /* else DROP THROUGH */ + + case PRU_DETACH: /* called from close() */ + /* called only after disconnect was called */ + error = DoEvent(T_DETACH); + if (tpcb->tp_state == TP_CLOSED) { + if (tpcb->tp_notdetached) { + IFDEBUG(D_CONN) + printf("PRU_DETACH: not detached\n"); + ENDDEBUG + tp_detach(tpcb); + } + FREE((caddr_t)tpcb, M_PCB); + tpcb = 0; + } + break; + + case PRU_SHUTDOWN: + /* recv end may have been released; local credit might be zero */ + case PRU_DISCONNECT: + E.ATTR(T_DISC_req).e_reason = E_TP_NORMAL_DISC; + error = DoEvent(T_DISC_req); + break; + + case PRU_BIND: + error = tp_pcbbind(tpcb, nam); + break; + + case PRU_LISTEN: + if (tpcb->tp_state != TP_CLOSED || tpcb->tp_lsuffixlen == 0 || + tpcb->tp_next == 0) + error = EINVAL; + else { + register struct tp_pcb **tt; + remque(tpcb); + tpcb->tp_next = tpcb->tp_prev = tpcb; + for (tt = &tp_listeners; *tt; tt = &((*tt)->tp_nextlisten)) + if ((*tt)->tp_lsuffixlen) + break; + tpcb->tp_nextlisten = *tt; + *tt = tpcb; + error = DoEvent(T_LISTEN_req); + } + break; + + case PRU_CONNECT2: + error = EOPNOTSUPP; /* for unix domain sockets */ + break; + + case PRU_CONNECT: + IFTRACE(D_CONN) + tptraceTPCB(TPPTmisc, + "PRU_CONNECT: so 0x%x *SHORT_LSUFXP(tpcb) 0x%x lsuflen 0x%x, class 0x%x", + tpcb->tp_sock, *SHORT_LSUFXP(tpcb), tpcb->tp_lsuffixlen, + tpcb->tp_class); + ENDTRACE + IFDEBUG(D_CONN) + printf("PRU_CONNECT: so *SHORT_LSUFXP(tpcb) 0x%x lsuflen 0x%x, class 0x%x", + tpcb->tp_sock, *SHORT_LSUFXP(tpcb), tpcb->tp_lsuffixlen, + tpcb->tp_class); + ENDDEBUG + if (tpcb->tp_lsuffixlen == 0) { + if (error = tp_pcbbind(tpcb, MNULL)) { + IFDEBUG(D_CONN) + printf("pcbbind returns error 0x%x\n", error); + ENDDEBUG + break; + } + } + IFDEBUG(D_CONN) + printf("isop 0x%x isop->isop_socket offset 12 :\n", tpcb->tp_npcb); + dump_buf(tpcb->tp_npcb, 16); + ENDDEBUG + if (error = tp_route_to(nam, tpcb, /* channel */0)) + break; + IFDEBUG(D_CONN) + printf( + "PRU_CONNECT after tpcb 0x%x so 0x%x npcb 0x%x flags 0x%x\n", + tpcb, so, tpcb->tp_npcb, tpcb->tp_flags); + printf("isop 0x%x isop->isop_socket offset 12 :\n", tpcb->tp_npcb); + dump_buf(tpcb->tp_npcb, 16); + ENDDEBUG + if (tpcb->tp_fsuffixlen == 0) { + /* didn't set peer extended suffix */ + (tpcb->tp_nlproto->nlp_getsufx)(tpcb->tp_npcb, &tpcb->tp_fsuffixlen, + tpcb->tp_fsuffix, TP_FOREIGN); + } + if (tpcb->tp_state == TP_CLOSED) { + soisconnecting(so); + error = DoEvent(T_CONN_req); + } else { + (tpcb->tp_nlproto->nlp_pcbdisc)(tpcb->tp_npcb); + error = EISCONN; + } + IFPERF(tpcb) + u_int lsufx, fsufx; + lsufx = *(u_short *)(tpcb->tp_lsuffix); + fsufx = *(u_short *)(tpcb->tp_fsuffix); + + tpmeas(tpcb->tp_lref, + TPtime_open | (tpcb->tp_xtd_format << 4), + &time, lsufx, fsufx, tpcb->tp_fref); + ENDPERF + break; + + case PRU_ACCEPT: + (tpcb->tp_nlproto->nlp_getnetaddr)(tpcb->tp_npcb, nam, TP_FOREIGN); + IFDEBUG(D_REQUEST) + printf("ACCEPT PEERADDDR:"); + dump_buf(mtod(nam, char *), nam->m_len); + ENDDEBUG + IFPERF(tpcb) + u_int lsufx, fsufx; + lsufx = *(u_short *)(tpcb->tp_lsuffix); + fsufx = *(u_short *)(tpcb->tp_fsuffix); + + tpmeas(tpcb->tp_lref, TPtime_open, + &time, lsufx, fsufx, tpcb->tp_fref); + ENDPERF + break; + + case PRU_RCVD: + if (so->so_state & SS_ISCONFIRMING) { + if (tpcb->tp_state == TP_CONFIRMING) + error = tp_confirm(tpcb); + break; + } + IFTRACE(D_DATA) + tptraceTPCB(TPPTmisc, + "RCVD BF: lcredit sent_lcdt cc hiwat \n", + tpcb->tp_lcredit, tpcb->tp_sent_lcdt, + so->so_rcv.sb_cc, so->so_rcv.sb_hiwat); + LOCAL_CREDIT(tpcb); + tptraceTPCB(TPPTmisc, + "PRU_RCVD AF sbspace lcredit hiwat cc", + sbspace(&so->so_rcv), tpcb->tp_lcredit, + so->so_rcv.sb_cc, so->so_rcv.sb_hiwat); + ENDTRACE + IFDEBUG(D_REQUEST) + printf("RCVD: cc %d space %d hiwat %d\n", + so->so_rcv.sb_cc, sbspace(&so->so_rcv), + so->so_rcv.sb_hiwat); + ENDDEBUG + if (((int)nam) & MSG_OOB) + error = DoEvent(T_USR_Xrcvd); + else + error = DoEvent(T_USR_rcvd); + break; + + case PRU_RCVOOB: + if ((so->so_state & SS_ISCONNECTED) == 0) { + error = ENOTCONN; + break; + } + if (! tpcb->tp_xpd_service) { + error = EOPNOTSUPP; + break; + } + /* kludge - nam is really flags here */ + error = tp_rcvoob(tpcb, so, m, outflags, (int)nam); + break; + + case PRU_SEND: + case PRU_SENDOOB: + if (controlp) { + error = tp_snd_control(controlp, so, &m); + controlp = NULL; + if (error) + break; + } + if ((so->so_state & SS_ISCONFIRMING) && + (tpcb->tp_state == TP_CONFIRMING) && + (error = tp_confirm(tpcb))) + break; + if (req == PRU_SENDOOB) { + error = (tpcb->tp_xpd_service == 0) ? + EOPNOTSUPP : tp_sendoob(tpcb, so, m, outflags); + break; + } + if (m == 0) + break; + if (m->m_flags & M_EOR) { + eotsdu = 1; + m->m_flags &= ~M_EOR; + } + if (eotsdu == 0 && m->m_pkthdr.len == 0) + break; + if (tpcb->tp_state != TP_AKWAIT && tpcb->tp_state != TP_OPEN) { + error = ENOTCONN; + break; + } + /* + * The protocol machine copies mbuf chains, + * prepends headers, assigns seq numbers, and + * puts the packets on the device. + * When they are acked they are removed from the socket buf. + * + * sosend calls this up until sbspace goes negative. + * Sbspace may be made negative by appending this mbuf chain, + * possibly by a whole cluster. + */ + { + /* + * Could have eotsdu and no data.(presently MUST have + * an mbuf though, even if its length == 0) + */ + int totlen = m->m_pkthdr.len; + struct sockbuf *sb = &so->so_snd; + IFPERF(tpcb) + PStat(tpcb, Nb_from_sess) += totlen; + tpmeas(tpcb->tp_lref, TPtime_from_session, 0, 0, + PStat(tpcb, Nb_from_sess), totlen); + ENDPERF + IFDEBUG(D_SYSCALL) + printf( + "PRU_SEND: eot %d before sbappend 0x%x len 0x%x to sb @ 0x%x\n", + eotsdu, m, totlen, sb); + dump_mbuf(sb->sb_mb, "so_snd.sb_mb"); + dump_mbuf(m, "m : to be added"); + ENDDEBUG + tp_packetize(tpcb, m, eotsdu); + IFDEBUG(D_SYSCALL) + printf("PRU_SEND: eot %d after sbappend 0x%x\n", eotsdu, m); + dump_mbuf(sb->sb_mb, "so_snd.sb_mb"); + ENDDEBUG + if (tpcb->tp_state == TP_OPEN) + error = DoEvent(T_DATA_req); + IFDEBUG(D_SYSCALL) + printf("PRU_SEND: after driver error 0x%x \n",error); + printf("so_snd 0x%x cc 0t%d mbcnt 0t%d\n", + sb, sb->sb_cc, sb->sb_mbcnt); + dump_mbuf(sb->sb_mb, "so_snd.sb_mb after driver"); + ENDDEBUG + } + break; + + case PRU_SOCKADDR: + (tpcb->tp_nlproto->nlp_getnetaddr)(tpcb->tp_npcb, nam, TP_LOCAL); + break; + + case PRU_PEERADDR: + (tpcb->tp_nlproto->nlp_getnetaddr)(tpcb->tp_npcb, nam, TP_FOREIGN); + break; + + case PRU_CONTROL: + error = EOPNOTSUPP; + break; + + case PRU_PROTOSEND: + case PRU_PROTORCV: + case PRU_SENSE: + case PRU_SLOWTIMO: + case PRU_FASTTIMO: + error = EOPNOTSUPP; + break; + + default: +#ifdef ARGO_DEBUG + printf("tp_usrreq UNKNOWN PRU %d\n", req); +#endif /* ARGO_DEBUG */ + error = EOPNOTSUPP; + } + + IFDEBUG(D_REQUEST) + printf("%s, so 0x%x, tpcb 0x%x, error %d, state %d\n", + "returning from tp_usrreq", so, tpcb, error, + tpcb ? tpcb->tp_state : 0); + ENDDEBUG + IFTRACE(D_REQUEST) + tptraceTPCB(TPPTusrreq, "END req so m state [", req, so, m, + tpcb ? tpcb->tp_state : 0); + ENDTRACE + if (controlp) { + m_freem(controlp); + printf("control data unexpectedly retained in tp_usrreq()"); + } + splx(s); + return error; +} +tp_ltrace(so, uio) +struct socket *so; +struct uio *uio; +{ + IFTRACE(D_DATA) + register struct tp_pcb *tpcb = sototpcb(so); + if (tpcb) { + tptraceTPCB(TPPTmisc, "sosend so resid iovcnt", so, + uio->uio_resid, uio->uio_iovcnt, 0); + } + ENDTRACE +} + +tp_confirm(tpcb) +register struct tp_pcb *tpcb; +{ + struct tp_event E; + if (tpcb->tp_state == TP_CONFIRMING) + return DoEvent(T_ACPT_req); + printf("Tp confirm called when not confirming; tpcb 0x%x, state 0x%x\n", + tpcb, tpcb->tp_state); + return 0; +} + +/* + * Process control data sent with sendmsg() + */ +tp_snd_control(m, so, data) + struct mbuf *m; + struct socket *so; + register struct mbuf **data; +{ + register struct cmsghdr *ch; + int error = 0; + + if (m && m->m_len) { + ch = mtod(m, struct cmsghdr *); + m->m_len -= sizeof (*ch); + m->m_data += sizeof (*ch); + error = tp_ctloutput(PRCO_SETOPT, + so, ch->cmsg_level, ch->cmsg_type, &m); + if (ch->cmsg_type == TPOPT_DISC_DATA) { + if (data && *data) { + m_freem(*data); + *data = 0; + } + error = tp_usrreq(so, PRU_DISCONNECT, (struct mbuf *)0, + (caddr_t)0, (struct mbuf *)0); + } + } + if (m) + m_freem(m); + return error; +} diff --git a/bsd/netiso/tuba_subr.c b/bsd/netiso/tuba_subr.c new file mode 100644 index 000000000..00939d742 --- /dev/null +++ b/bsd/netiso/tuba_subr.c @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tuba_subr.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static struct sockaddr_iso null_siso = { sizeof(null_siso), AF_ISO, }; +extern int tuba_table_size, tcp_keepidle, tcp_keepintvl, tcp_maxidle; +extern int tcppcbcachemiss, tcppredack, tcppreddat, tcprexmtthresh; +extern struct tcpiphdr tcp_saveti; +struct inpcb tuba_inpcb; +struct inpcb *tuba_last_inpcb = &tuba_inpcb; +struct isopcb tuba_isopcb; +/* + * Tuba initialization + */ +tuba_init() +{ +#define TUBAHDRSIZE (3 /*LLC*/ + 9 /*CLNP Fixed*/ + 42 /*Addresses*/ \ + + 6 /*CLNP Segment*/ + 20 /*TCP*/) + + tuba_inpcb.inp_next = tuba_inpcb.inp_prev = &tuba_inpcb; + tuba_isopcb.isop_next = tuba_isopcb.isop_prev = &tuba_isopcb; + tuba_isopcb.isop_faddr = &tuba_isopcb.isop_sfaddr; + tuba_isopcb.isop_laddr = &tuba_isopcb.isop_sladdr; + if (max_protohdr < TUBAHDRSIZE) + max_protohdr = TUBAHDRSIZE; + if (max_linkhdr + TUBAHDRSIZE > MHLEN) + panic("tuba_init"); +} + +struct addr_arg { + int error; + int offset; + u_long sum; +}; + +/* + * Calculate contribution to fudge factor for TCP checksum, + * and coincidentally set pointer for convenience of clnp_output + * if we are are responding when there is no isopcb around. + */ +static void +tuba_getaddr(arg, siso, index) + register struct addr_arg *arg; + struct sockaddr_iso **siso; + u_long index; +{ + register struct tuba_cache *tc; + if (index <= tuba_table_size && (tc = tuba_table[index])) { + if (siso) + *siso = &tc->tc_siso; + arg->sum += (arg->offset & 1 ? tc->tc_ssum : tc->tc_sum) + + (0xffff ^ index); + arg->offset += tc->tc_siso.siso_nlen + 1; + } else + arg->error = 1; +} + +tuba_output(m, tp) + register struct mbuf *m; + struct tcpcb *tp; +{ + register struct tcpiphdr *n; + struct isopcb *isop; + struct addr_arg arg; + + if (tp == 0 || (n = tp->t_template) == 0 || + (isop = (struct isopcb *)tp->t_tuba_pcb) == 0) { + isop = &tuba_isopcb; + n = mtod(m, struct tcpiphdr *); + arg.error = arg.sum = arg.offset = 0; + tuba_getaddr(&arg, &tuba_isopcb.isop_faddr, n->ti_dst.s_addr); + tuba_getaddr(&arg, &tuba_isopcb.isop_laddr, n->ti_src.s_addr); + REDUCE(arg.sum, arg.sum); + goto adjust; + } + if (n->ti_sum == 0) { + arg.error = arg.sum = arg.offset = 0; + tuba_getaddr(&arg, (struct sockaddr_iso **)0, n->ti_dst.s_addr); + tuba_getaddr(&arg, (struct sockaddr_iso **)0, n->ti_src.s_addr); + REDUCE(arg.sum, arg.sum); + n->ti_sum = arg.sum; + n = mtod(m, struct tcpiphdr *); + adjust: + if (arg.error) { + m_freem(m); + return (EADDRNOTAVAIL); + } + REDUCE(n->ti_sum, n->ti_sum + (0xffff ^ arg.sum)); + } + m->m_len -= sizeof (struct ip); + m->m_pkthdr.len -= sizeof (struct ip); + m->m_data += sizeof (struct ip); + return (clnp_output(m, isop, m->m_pkthdr.len, 0)); +} + +tuba_refcnt(isop, delta) + struct isopcb *isop; +{ + register struct tuba_cache *tc; + unsigned index, sum; + + if (delta != 1) + delta = -1; + if (isop == 0 || isop->isop_faddr == 0 || isop->isop_laddr == 0 || + (delta == -1 && isop->isop_tuba_cached == 0) || + (delta == 1 && isop->isop_tuba_cached != 0)) + return; + isop->isop_tuba_cached = (delta == 1); + if ((index = tuba_lookup(isop->isop_faddr, M_DONTWAIT)) != 0 && + (tc = tuba_table[index]) != 0 && (delta == 1 || tc->tc_refcnt > 0)) + tc->tc_refcnt += delta; + if ((index = tuba_lookup(isop->isop_laddr, M_DONTWAIT)) != 0 && + (tc = tuba_table[index]) != 0 && (delta == 1 || tc->tc_refcnt > 0)) + tc->tc_refcnt += delta; +} + +tuba_pcbdetach(isop) + struct isopcb *isop; +{ + if (isop == 0) + return; + tuba_refcnt(isop, -1); + isop->isop_socket = 0; + iso_pcbdetach(isop); +} + +/* + * Avoid in_pcbconnect in faked out tcp_input() + */ +tuba_pcbconnect(inp, nam) + register struct inpcb *inp; + struct mbuf *nam; +{ + register struct sockaddr_iso *siso; + struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *); + struct tcpcb *tp = intotcpcb(inp); + struct isopcb *isop = (struct isopcb *)tp->t_tuba_pcb; + int error; + + /* hardwire iso_pcbbind() here */ + siso = isop->isop_laddr = &isop->isop_sladdr; + *siso = tuba_table[inp->inp_laddr.s_addr]->tc_siso; + siso->siso_tlen = sizeof(inp->inp_lport); + bcopy((caddr_t)&inp->inp_lport, TSEL(siso), sizeof(inp->inp_lport)); + + /* hardwire in_pcbconnect() here without assigning route */ + inp->inp_fport = sin->sin_port; + inp->inp_faddr = sin->sin_addr; + + /* reuse nam argument to call iso_pcbconnect() */ + nam->m_len = sizeof(*siso); + siso = mtod(nam, struct sockaddr_iso *); + *siso = tuba_table[inp->inp_faddr.s_addr]->tc_siso; + siso->siso_tlen = sizeof(inp->inp_fport); + bcopy((caddr_t)&inp->inp_fport, TSEL(siso), sizeof(inp->inp_fport)); + + if ((error = iso_pcbconnect(isop, nam)) == 0) + tuba_refcnt(isop, 1); + return (error); +} + +/* + * CALLED FROM: + * clnp's input routine, indirectly through the protosw. + * FUNCTION and ARGUMENTS: + * Take a packet (m) from clnp, strip off the clnp header + * and do tcp input processing. + * No return value. + */ +tuba_tcpinput(m, src, dst) + register struct mbuf *m; + struct sockaddr_iso *src, *dst; +{ + unsigned long sum, lindex, findex; + register struct tcpiphdr *ti; + register struct inpcb *inp; + caddr_t optp = NULL; + int optlen; + int len, tlen, off; + register struct tcpcb *tp = 0; + int tiflags; + struct socket *so; + int todrop, acked, ourfinisacked, needoutput = 0; + short ostate; + struct in_addr laddr; + int dropsocket = 0, iss = 0; + u_long tiwin, ts_val, ts_ecr; + int ts_present = 0; + + if ((m->m_flags & M_PKTHDR) == 0) + panic("tuba_tcpinput"); + /* + * Do some housekeeping looking up CLNP addresses. + * If we are out of space might as well drop the packet now. + */ + tcpstat.tcps_rcvtotal++; + lindex = tuba_lookup(dst, M_DONTWAIT); + findex = tuba_lookup(src, M_DONTWAIT); + if (lindex == 0 || findex == 0) + goto drop; + /* + * CLNP gave us an mbuf chain WITH the clnp header pulled up, + * but the data pointer pushed past it. + */ + len = m->m_len; + tlen = m->m_pkthdr.len; + m->m_data -= sizeof(struct ip); + m->m_len += sizeof(struct ip); + m->m_pkthdr.len += sizeof(struct ip); + m->m_flags &= ~(M_MCAST|M_BCAST); /* XXX should do this in clnp_input */ + /* + * The reassembly code assumes it will be overwriting a useless + * part of the packet, which is why we need to have it point + * into the packet itself. + * + * Check to see if the data is properly alligned + * so that we can save copying the tcp header. + * This code knows way too much about the structure of mbufs! + */ + off = ((sizeof (long) - 1) & ((m->m_flags & M_EXT) ? + (m->m_data - m->m_ext.ext_buf) : (m->m_data - m->m_pktdat))); + if (off || len < sizeof(struct tcphdr)) { + struct mbuf *m0 = m; + + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == 0) { + m = m0; + goto drop; + } + m->m_next = m0; + m->m_data += max_linkhdr; + m->m_pkthdr = m0->m_pkthdr; + m->m_flags = m0->m_flags & M_COPYFLAGS; + if (len < sizeof(struct tcphdr)) { + m->m_len = 0; + if ((m = m_pullup(m, sizeof(struct tcpiphdr))) == 0) { + tcpstat.tcps_rcvshort++; + return; + } + } else { + bcopy(mtod(m0, caddr_t) + sizeof(struct ip), + mtod(m, caddr_t) + sizeof(struct ip), + sizeof(struct tcphdr)); + m0->m_len -= sizeof(struct tcpiphdr); + m0->m_data += sizeof(struct tcpiphdr); + m->m_len = sizeof(struct tcpiphdr); + } + } + /* + * Calculate checksum of extended TCP header and data, + * replacing what would have been IP addresses by + * the IP checksum of the CLNP addresses. + */ + ti = mtod(m, struct tcpiphdr *); + ti->ti_dst.s_addr = tuba_table[lindex]->tc_sum; + if (dst->siso_nlen & 1) + ti->ti_src.s_addr = tuba_table[findex]->tc_sum; + else + ti->ti_src.s_addr = tuba_table[findex]->tc_ssum; + ti->ti_prev = ti->ti_next = 0; + ti->ti_x1 = 0; ti->ti_pr = ISOPROTO_TCP; + ti->ti_len = htons((u_short)tlen); + if (ti->ti_sum = in_cksum(m, m->m_pkthdr.len)) { + tcpstat.tcps_rcvbadsum++; + goto drop; + } + ti->ti_src.s_addr = findex; + ti->ti_dst.s_addr = lindex; + /* + * Now include the rest of TCP input + */ +#define TUBA_INCLUDE +#define in_pcbconnect tuba_pcbconnect +#define tcb tuba_inpcb +#define tcp_last_inpcb tuba_last_inpcb + +#include +} + +#define tcp_slowtimo tuba_slowtimo +#define tcp_fasttimo tuba_fasttimo + +#include diff --git a/bsd/netiso/tuba_table.c b/bsd/netiso/tuba_table.c new file mode 100644 index 000000000..3d8a005c2 --- /dev/null +++ b/bsd/netiso/tuba_table.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tuba_table.c 8.2 (Berkeley) 11/15/93 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +int tuba_table_size; +struct tuba_cache **tuba_table; +struct radix_node_head *tuba_tree; +extern int arpt_keep, arpt_prune; /* use same values as arp cache */ + +void +tuba_timer() +{ + int s = splnet(); + int i; + register struct tuba_cache *tc; + long timelimit = time.tv_sec - arpt_keep; + + timeout(tuba_timer, (caddr_t)0, arpt_prune * hz); + for (i = tuba_table_size; i > 0; i--) + if ((tc = tuba_table[i]) && (tc->tc_refcnt == 0) && + (tc->tc_time < timelimit)) { + tuba_table[i] = 0; + rn_delete(&tc->tc_siso.siso_addr, NULL, tuba_tree); + FREE((caddr_t)tc, M_RTABLE); + } + splx(s); +} + +tuba_table_init() +{ + rn_inithead((void **)&tuba_tree, 40); + timeout(tuba_timer, (caddr_t)0, arpt_prune * hz); +} + +int +tuba_lookup(siso, wait) + register struct sockaddr_iso *siso; +{ + struct radix_node *rn, *rn_match(); + register struct tuba_cache *tc; + struct tuba_cache **new; + int dupentry = 0, sum_a = 0, sum_b = 0, old_size, i; + + if ((rn = rn_match((caddr_t)&siso->siso_addr, tuba_tree->rnh_treetop)) + && ((rn->rn_flags & RNF_ROOT) == 0)) { + tc = (struct tuba_cache *)rn; + tc->tc_time = time.tv_sec; + return (tc->tc_index); + } +// if ((tc = (struct tuba_cache *)malloc(sizeof(*tc), M_RTABLE, wait)) +// == NULL) + if (wait == M_DONTWAIT) + MALLOC(tc, struct tuba_cache *, sizeof(*tc), M_RTABLE, M_NOWAIT); + else + MALLOC(tc, struct tuba_cache *, sizeof(*tc), M_RTABLE, M_WAITOK); + if (tc == NULL) + return (0); + bzero((caddr_t)tc, sizeof (*tc)); + bcopy(siso->siso_data, tc->tc_siso.siso_data, + tc->tc_siso.siso_nlen = siso->siso_nlen); + rn_insert(&tc->tc_siso.siso_addr, tuba_tree, &dupentry, tc->tc_nodes); + if (dupentry) + panic("tuba_lookup 1"); + tc->tc_siso.siso_family = AF_ISO; + tc->tc_siso.siso_len = sizeof(tc->tc_siso); + tc->tc_time = time.tv_sec; + for (i = sum_a = tc->tc_siso.siso_nlen; --i >= 0; ) + (i & 1 ? sum_a : sum_b) += (u_char)tc->tc_siso.siso_data[i]; + REDUCE(tc->tc_sum, (sum_a << 8) + sum_b); + HTONS(tc->tc_sum); + SWAB(tc->tc_ssum, tc->tc_sum); + for (i = tuba_table_size; i > 0; i--) + if (tuba_table[i] == 0) + goto fixup; + old_size = tuba_table_size; + if (tuba_table_size == 0) + tuba_table_size = 15; + if (tuba_table_size > 0x7fff) + return (0); + tuba_table_size = 1 + 2 * tuba_table_size; + i = (tuba_table_size + 1) * sizeof(tc); +// new = (struct tuba_cache **)malloc((unsigned)i, M_RTABLE, wait); + if (wait == M_DONTWAIT) + MALLOC(new, struct tuba_cache **, i, M_RTABLE, M_NOWAIT); + else + MALLOC(new, struct tuba_cache **, i, M_RTABLE, M_WAITOK); + if (new == 0) { + tuba_table_size = old_size; + rn_delete(&tc->tc_siso.siso_addr, NULL, tuba_tree); + FREE((caddr_t)tc, M_RTABLE); + return (0); + } + bzero((caddr_t)new, (unsigned)i); + if (tuba_table) { + bcopy((caddr_t)tuba_table, (caddr_t)new, i >> 1); + FREE((caddr_t)tuba_table, M_RTABLE); + } + tuba_table = new; + i = tuba_table_size; +fixup: + tuba_table[i] = tc; + tc->tc_index = i; + return (tc->tc_index); +} diff --git a/bsd/netiso/tuba_table.h b/bsd/netiso/tuba_table.h new file mode 100644 index 000000000..9d66ef9c6 --- /dev/null +++ b/bsd/netiso/tuba_table.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tuba_table.h 8.1 (Berkeley) 6/10/93 + */ + +struct tuba_cache { + struct radix_node tc_nodes[2]; /* convenient lookup */ + int tc_refcnt; + int tc_time; /* last looked up */ + int tc_flags; +#define TCF_PERM 1 + int tc_index; + u_short tc_sum; /* cksum of nsap inc. length */ + u_short tc_ssum; /* swab(tc_sum) */ + struct sockaddr_iso tc_siso; /* for responding */ +}; + +#define ADDCARRY(x) (x >= 65535 ? x -= 65535 : x) +#define REDUCE(a, b) { union { u_short s[2]; long l;} l_util; long x; \ + l_util.l = (b); x = l_util.s[0] + l_util.s[1]; ADDCARRY(x); \ + if (x == 0) x = 0xffff; a = x;} +#define SWAB(a, b) { union { u_char c[2]; u_short s;} s; u_char t; \ + s.s = (b); t = s.c[0]; s.c[0] = s.c[1]; s.c[1] = t; a = s.s;} + +#ifdef KERNEL +extern int tuba_table_size; +extern struct tuba_cache **tuba_table; +extern struct radix_node_head *tuba_tree; +#endif diff --git a/bsd/netiso/tuba_usrreq.c b/bsd/netiso/tuba_usrreq.c new file mode 100644 index 000000000..b3e5b60b1 --- /dev/null +++ b/bsd/netiso/tuba_usrreq.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tuba_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +/* + * TCP protocol interface to socket abstraction. + */ +extern char *tcpstates[]; +extern struct inpcb tuba_inpcb; +extern struct isopcb tuba_isopcb; + +/* + * Process a TCP user request for TCP tb. If this is a send request + * then m is the mbuf chain of send data. If this is a timer expiration + * (called from the software clock routine), then timertype tells which timer. + */ +/*ARGSUSED*/ +tuba_usrreq(so, req, m, nam, control) + struct socket *so; + int req; + struct mbuf *m, *nam, *control; +{ + register struct inpcb *inp; + register struct isopcb *isop; + register struct tcpcb *tp; + int s; + int error = 0; + int ostate; + struct sockaddr_iso *siso; + + if (req == PRU_CONTROL) + return (iso_control(so, (int)m, (caddr_t)nam, + (struct ifnet *)control)); + + s = splnet(); + inp = sotoinpcb(so); + /* + * When a TCP is attached to a socket, then there will be + * a (struct inpcb) pointed at by the socket, and this + * structure will point at a subsidary (struct tcpcb). + */ + if (inp == 0 && req != PRU_ATTACH) { + splx(s); + return (EINVAL); /* XXX */ + } + if (inp) { + tp = intotcpcb(inp); + if (tp == 0) + panic("tuba_usrreq"); + ostate = tp->t_state; + isop = (struct isopcb *)tp->t_tuba_pcb; + if (isop == 0) + panic("tuba_usrreq 2"); + } else + ostate = 0; + switch (req) { + + /* + * TCP attaches to socket via PRU_ATTACH, reserving space, + * and an internet control block. We also need to + * allocate an isopcb and separate the control block from + * tcp/ip ones. + */ + case PRU_ATTACH: + if (error = iso_pcballoc(so, &tuba_isopcb)) + break; + isop = (struct isopcb *)so->so_pcb; + so->so_pcb = 0; + if (error = tcp_usrreq(so, req, m, nam, control)) { + isop->isop_socket = 0; + iso_pcbdetach(isop); + } else { + inp = sotoinpcb(so); + remque(inp); + insque(inp, &tuba_inpcb); + inp->inp_head = &tuba_inpcb; + tp = intotcpcb(inp); + if (tp == 0) + panic("tuba_usrreq 3"); + tp->t_tuba_pcb = (caddr_t) isop; + } + goto notrace; + + /* + * PRU_DETACH detaches the TCP protocol from the socket. + * If the protocol state is non-embryonic, then can't + * do this directly: have to initiate a PRU_DISCONNECT, + * which may finish later; embryonic TCB's can just + * be discarded here. + */ + case PRU_DETACH: + if (tp->t_state > TCPS_LISTEN) + tp = tcp_disconnect(tp); + else + tp = tcp_close(tp); + if (tp == 0) + tuba_pcbdetach(isop); + break; + + /* + * Give the socket an address. + */ + case PRU_BIND: + siso = mtod(nam, struct sockaddr_iso *); + if (siso->siso_tlen && siso->siso_tlen != 2) { + error = EINVAL; + break; + } + if ((error = iso_pcbbind(isop, nam)) || + (siso = isop->isop_laddr) == 0) + break; + bcopy(TSEL(siso), &inp->inp_lport, 2); + if (siso->siso_nlen && + !(inp->inp_laddr.s_addr = tuba_lookup(siso, M_WAITOK))) + error = ENOBUFS; + break; + + /* + * Prepare to accept connections. + */ + case PRU_CONNECT: + case PRU_LISTEN: + if (inp->inp_lport == 0 && + (error = iso_pcbbind(isop, (struct mbuf *)0))) + break; + bcopy(TSEL(isop->isop_laddr), &inp->inp_lport, 2); + if (req == PRU_LISTEN) { + tp->t_state = TCPS_LISTEN; + break; + } + /*FALLTHROUGH*/ + /* + * Initiate connection to peer. + * Create a template for use in transmissions on this connection. + * Enter SYN_SENT state, and mark socket as connecting. + * Start keep-alive timer, and seed output sequence space. + * Send initial segment on connection. + */ + /* case PRU_CONNECT: */ + if (error = iso_pcbconnect(isop, nam)) + break; + if ((siso = isop->isop_laddr) && siso->siso_nlen > 1) + siso->siso_data[siso->siso_nlen - 1] = ISOPROTO_TCP; + else + panic("tuba_usrreq: connect"); + siso = mtod(nam, struct sockaddr_iso *); + if (!(inp->inp_faddr.s_addr = tuba_lookup(siso, M_WAITOK))) { + unconnect: + iso_pcbdisconnect(isop); + error = ENOBUFS; + break; + } + bcopy(TSEL(isop->isop_faddr), &inp->inp_fport, 2); + if (inp->inp_laddr.s_addr == 0 && + (inp->inp_laddr.s_addr = + tuba_lookup(isop->isop_laddr, M_WAITOK)) == 0) + goto unconnect; + if ((tp->t_template = tcp_template(tp)) == 0) + goto unconnect; + soisconnecting(so); + tcpstat.tcps_connattempt++; + tp->t_state = TCPS_SYN_SENT; + tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; + tp->iss = tcp_iss; tcp_iss += TCP_ISSINCR/2; + tcp_sendseqinit(tp); + error = tcp_output(tp); + tuba_refcnt(isop, 1); + break; + + /* + * Initiate disconnect from peer. + * If connection never passed embryonic stage, just drop; + * else if don't need to let data drain, then can just drop anyways, + * else have to begin TCP shutdown process: mark socket disconnecting, + * drain unread data, state switch to reflect user close, and + * send segment (e.g. FIN) to peer. Socket will be really disconnected + * when peer sends FIN and acks ours. + * + * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB. + */ + case PRU_DISCONNECT: + if ((tp = tcp_disconnect(tp)) == 0) + tuba_pcbdetach(isop); + break; + + /* + * Accept a connection. Essentially all the work is + * done at higher levels; just return the address + * of the peer, storing through addr. + */ + case PRU_ACCEPT: + bcopy((caddr_t)isop->isop_faddr, mtod(nam, caddr_t), + nam->m_len = isop->isop_faddr->siso_len); + break; + + /* + * Mark the connection as being incapable of further output. + */ + case PRU_SHUTDOWN: + socantsendmore(so); + tp = tcp_usrclosed(tp); + if (tp) + error = tcp_output(tp); + else + tuba_pcbdetach(isop); + break; + /* + * Abort the TCP. + */ + case PRU_ABORT: + if ((tp = tcp_drop(tp, ECONNABORTED)) == 0) + tuba_pcbdetach(isop); + break; + + + case PRU_SOCKADDR: + if (isop->isop_laddr) + bcopy((caddr_t)isop->isop_laddr, mtod(nam, caddr_t), + nam->m_len = isop->isop_laddr->siso_len); + break; + + case PRU_PEERADDR: + if (isop->isop_faddr) + bcopy((caddr_t)isop->isop_faddr, mtod(nam, caddr_t), + nam->m_len = isop->isop_faddr->siso_len); + break; + + default: + error = tcp_usrreq(so, req, m, nam, control); + goto notrace; + } + if (tp && (so->so_options & SO_DEBUG)) + tcp_trace(TA_USER, ostate, tp, (struct tcpiphdr *)0, req); +notrace: + splx(s); + return(error); +} + +tuba_ctloutput(op, so, level, optname, mp) + int op; + struct socket *so; + int level, optname; + struct mbuf **mp; +{ + int clnp_ctloutput(), tcp_ctloutput(); + + return ((level != IPPROTO_TCP ? clnp_ctloutput : tcp_ctloutput) + (op, so, level, optname, mp)); +} diff --git a/bsd/netkey/Makefile b/bsd/netkey/Makefile new file mode 100644 index 000000000..0ef16274a --- /dev/null +++ b/bsd/netkey/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + key.h key_debug.h keydb.h keysock.h keyv2.h key_var.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = netkey + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = netkey + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/netkey/key.c b/bsd/netkey/key.c new file mode 100644 index 000000000..9db82dcfd --- /dev/null +++ b/bsd/netkey/key.c @@ -0,0 +1,6899 @@ +/* $KAME: key.c,v 1.76 2000/03/27 05:11:04 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * This code is referd to RFC 2367 + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#ifdef __NetBSD__ +#include "opt_ipsec.h" +#endif +#endif + +/* this is for backward compatibility. we should not touch those. */ +#define ss_len __ss_len +#define ss_family __ss_family + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(__FreeBSD__) || defined (__APPLE__) +#include +#endif +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#if INET6 +#include +#include +#include +#endif /* INET6 */ + +#if INET +#include +#endif +#if INET6 +#if !(defined(__bsdi__) && _BSDI_VERSION >= 199802) +#include +#endif +#endif /* INET6 */ + +#include +#include +#include +#include +#include + +#include +#include +#if IPSEC_ESP +#include +#endif +#include + +#include + +/* + * Note on SA reference counting: + * - SAs that are not in DEAD state will have (total external reference + 1) + * following value in reference count field. they cannot be freed and are + * referenced from SA header. + * - SAs that are in DEAD state will have (total external reference) + * in reference count field. they are ready to be freed. reference from + * SA header will be removed in key_delsav(), when the reference count + * field hits 0 (= no external reference other than from SA header. + */ + +u_int32_t key_debug_level = 0; //### our sysctl is not dynamic +static u_int key_spi_trycnt = 1000; +static u_int32_t key_spi_minval = 0x100; +static u_int32_t key_spi_maxval = 0x0fffffff; /* XXX */ +static u_int32_t policy_id = 0; +static u_int key_int_random = 60; /*interval to initialize randseed,1(m)*/ +static u_int key_larval_lifetime = 30; /* interval to expire acquiring, 30(s)*/ +static int key_blockacq_count = 10; /* counter for blocking SADB_ACQUIRE.*/ +static int key_blockacq_lifetime = 20; /* lifetime for blocking SADB_ACQUIRE.*/ + +static u_int32_t acq_seq = 0; +static int key_tick_init_random = 0; + +static LIST_HEAD(_sptree, secpolicy) sptree[IPSEC_DIR_MAX]; /* SPD */ +static LIST_HEAD(_sahtree, secashead) sahtree; /* SAD */ +static LIST_HEAD(_regtree, secreg) regtree[SADB_SATYPE_MAX + 1]; + /* registed list */ +#ifndef IPSEC_NONBLOCK_ACQUIRE +static LIST_HEAD(_acqtree, secacq) acqtree; /* acquiring list */ +#endif +static LIST_HEAD(_spacqtree, secspacq) spacqtree; /* SP acquiring list */ + +struct key_cb key_cb; + +/* search order for SAs */ +static u_int saorder_state_valid[] = { + SADB_SASTATE_DYING, SADB_SASTATE_MATURE, + /* + * This order is important because we must select a oldest SA + * for outbound processing. For inbound, This is not important. + */ +}; +static u_int saorder_state_alive[] = { + /* except DEAD */ + SADB_SASTATE_MATURE, SADB_SASTATE_DYING, SADB_SASTATE_LARVAL +}; +static u_int saorder_state_any[] = { + SADB_SASTATE_MATURE, SADB_SASTATE_DYING, + SADB_SASTATE_LARVAL, SADB_SASTATE_DEAD +}; + +#if defined(__FreeBSD__) || defined (__APPLE__) +SYSCTL_DECL(_net_key); +//#if defined(IPSEC_DEBUG) +SYSCTL_INT(_net_key, KEYCTL_DEBUG_LEVEL, debug, CTLFLAG_RW, \ + &key_debug_level, 0, ""); +//#endif /* defined(IPSEC_DEBUG) */ + +/* max count of trial for the decision of spi value */ +SYSCTL_INT(_net_key, KEYCTL_SPI_TRY, spi_trycnt, CTLFLAG_RW, \ + &key_spi_trycnt, 0, ""); + +/* minimum spi value to allocate automatically. */ +SYSCTL_INT(_net_key, KEYCTL_SPI_MIN_VALUE, spi_minval, CTLFLAG_RW, \ + &key_spi_minval, 0, ""); + +/* maximun spi value to allocate automatically. */ +SYSCTL_INT(_net_key, KEYCTL_SPI_MAX_VALUE, spi_maxval, CTLFLAG_RW, \ + &key_spi_maxval, 0, ""); + +/* interval to initialize randseed */ +SYSCTL_INT(_net_key, KEYCTL_RANDOM_INT, int_random, CTLFLAG_RW, \ + &key_int_random, 0, ""); + +/* lifetime for larval SA */ +SYSCTL_INT(_net_key, KEYCTL_LARVAL_LIFETIME, larval_lifetime, CTLFLAG_RW, \ + &key_larval_lifetime, 0, ""); + +/* counter for blocking to send SADB_ACQUIRE to IKEd */ +SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_COUNT, blockacq_count, CTLFLAG_RW, \ + &key_blockacq_count, 0, ""); + +/* lifetime for blocking to send SADB_ACQUIRE to IKEd */ +SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME, blockacq_lifetime, CTLFLAG_RW, \ + &key_blockacq_lifetime, 0, ""); + +#endif /* __FreeBSD__ */ + +#ifndef LIST_FOREACH +#define LIST_FOREACH(elm, head, field) \ + for (elm = LIST_FIRST(head); elm; elm = LIST_NEXT(elm, field)) +#endif +#define __LIST_CHAINED(elm) \ + (!((elm)->chain.le_next == NULL && (elm)->chain.le_prev == NULL)) +#define LIST_INSERT_TAIL(head, elm, type, field) \ +do {\ + struct type *curelm = LIST_FIRST(head); \ + if (curelm == NULL) {\ + LIST_INSERT_HEAD(head, elm, field); \ + } else { \ + while (LIST_NEXT(curelm, field)) \ + curelm = LIST_NEXT(curelm, field);\ + LIST_INSERT_AFTER(curelm, elm, field);\ + }\ +} while (0) + +#define KEY_CHKSASTATE(head, sav, name) \ +do { \ + if ((head) != (sav)) { \ + printf("%s: state mismatched (TREE=%d SA=%d)\n", \ + (name), (head), (sav)); \ + continue; \ + } \ +} while (0) + +#define KEY_CHKSPDIR(head, sp, name) \ +do { \ + if ((head) != (sp)) { \ + printf("%s: direction mismatched (TREE=%d SP=%d), " \ + "anyway continue.\n", \ + (name), (head), (sp)); \ + } \ +} while (0) + +#if 1 +#define KMALLOC(p, t, n) \ + ((p) = (t) _MALLOC((unsigned long)(n), M_SECA, M_NOWAIT)) +#define KFREE(p) \ + _FREE((caddr_t)(p), M_SECA); +#else +#define KMALLOC(p, t, n) \ +do { \ + ((p) = (t)_MALLOC((unsigned long)(n), M_SECA, M_NOWAIT)); \ + printf("%s %d: %p <- KMALLOC(%s, %d)\n", \ + __FILE__, __LINE__, (p), #t, n); \ +} while (0) + +#define KFREE(p) \ + do { \ + printf("%s %d: %p -> KFREE()\n", __FILE__, __LINE__, (p)); \ + _FREE((caddr_t)(p), M_SECA); \ + } while (0) +#endif + +/* + * set parameters into secpolicyindex buffer. + * Must allocate secpolicyindex buffer passed to this function. + */ +#define KEY_SETSECSPIDX(_dir, s, d, ps, pd, ulp, idx) \ +do { \ + bzero((idx), sizeof(struct secpolicyindex)); \ + (idx)->dir = (_dir); \ + (idx)->prefs = (ps); \ + (idx)->prefd = (pd); \ + (idx)->ul_proto = (ulp); \ + bcopy((s), &(idx)->src, ((struct sockaddr *)(s))->sa_len); \ + bcopy((d), &(idx)->dst, ((struct sockaddr *)(d))->sa_len); \ +} while (0) + +/* + * set parameters into secasindex buffer. + * Must allocate secasindex buffer before calling this function. + */ +#define KEY_SETSECASIDX(p, m, s, d, idx) \ +do { \ + bzero((idx), sizeof(struct secasindex)); \ + (idx)->proto = (p); \ + (idx)->mode = (m)->sadb_msg_mode; \ + (idx)->reqid = (m)->sadb_msg_reqid; \ + bcopy((s), &(idx)->src, ((struct sockaddr *)(s))->sa_len); \ + bcopy((d), &(idx)->dst, ((struct sockaddr *)(d))->sa_len); \ +} while (0) + +/* key statistics */ +struct _keystat { + u_long getspi_count; /* the avarage of count to try to get new SPI */ +} keystat; + +static struct secasvar *key_allocsa_policy __P((struct secasindex *saidx)); +static void key_freesp_so __P((struct secpolicy **sp)); +static struct secasvar *key_do_allocsa_policy __P((struct secashead *sah, + u_int state)); +static void key_delsp __P((struct secpolicy *sp)); +static struct secpolicy *key_getsp __P((struct secpolicyindex *spidx)); +static struct secpolicy *key_getspbyid __P((u_int32_t id)); +static u_int32_t key_newreqid __P((void)); +static struct sadb_msg *key_spdadd __P((caddr_t *mhp)); +static u_int32_t key_getnewspid __P((void)); +static struct sadb_msg *key_spddelete __P((caddr_t *mhp)); +static struct sadb_msg *key_spddelete2 __P((caddr_t *mhp)); +static int key_spdget __P((caddr_t *mhp, struct socket *so, int target)); +static struct sadb_msg *key_spdflush __P((caddr_t *mhp)); +static int key_spddump __P((caddr_t *mhp, struct socket *so, int target)); +static struct mbuf *key_setdumpsp __P((struct secpolicy *sp, + u_int8_t type, u_int32_t seq, u_int32_t pid)); +static u_int key_getspmsglen __P((struct secpolicy *sp)); +static u_int key_getspreqmsglen __P((struct secpolicy *sp)); +static struct secashead *key_newsah __P((struct secasindex *saidx)); +static void key_delsah __P((struct secashead *sah)); +static struct secasvar *key_newsav __P((caddr_t *mhp, struct secashead *sah)); +static void key_delsav __P((struct secasvar *sav)); +static struct secashead *key_getsah __P((struct secasindex *saidx)); +static struct secasvar *key_checkspidup __P((struct secasindex *saidx, + u_int32_t spi)); +static struct secasvar *key_getsavbyspi __P((struct secashead *sah, + u_int32_t spi)); +static int key_setsaval __P((struct secasvar *sav, caddr_t *mhp)); +static u_int key_getmsglen __P((struct secasvar *sav)); +static int key_mature __P((struct secasvar *sav)); +static u_int key_setdumpsa __P((struct sadb_msg *newmsg, struct secasvar *sav, + u_int8_t type, u_int8_t satype, + u_int32_t seq, u_int32_t pid)); +#if 1 +static int key_setsadbmsg_m __P((struct mbuf *, u_int8_t type, int tlen, + u_int8_t satype, u_int32_t seq, pid_t pid, + u_int8_t mode, u_int32_t reqid, + u_int8_t reserved1, u_int32_t reserved2)); +#endif +static caddr_t key_setsadbmsg __P((caddr_t buf, u_int8_t type, int tlen, + u_int8_t satype, u_int32_t seq, pid_t pid, + u_int8_t mode, u_int32_t reqid, + u_int8_t reserved1, u_int32_t reserved2)); +static caddr_t key_setsadbsa __P((caddr_t buf, struct secasvar *sav)); +#if 1 +static int key_setsadbaddr_m __P((struct mbuf *m, u_int16_t exttype, + struct sockaddr *saddr, u_int8_t prefixlen, u_int16_t ul_proto)); +#endif +static caddr_t key_setsadbaddr __P((caddr_t buf, u_int16_t exttype, + struct sockaddr *saddr, u_int8_t prefixlen, u_int16_t ul_proto)); +static caddr_t key_setsadbident + __P((caddr_t buf, u_int16_t exttype, u_int16_t idtype, + caddr_t string, int stringlen, u_int64_t id)); +static caddr_t key_setsadbxpolicy + __P((caddr_t buf, u_int16_t type, u_int8_t dir, u_int32_t id)); +static caddr_t key_setsadbext __P((caddr_t p, caddr_t ext)); +static void *key_newbuf __P((void *src, u_int len)); +#if INET6 +static int key_ismyaddr6 __P((caddr_t addr)); +#endif +#if 0 +static int key_isloopback __P((u_int family, caddr_t addr)); +#endif +static int key_cmpsaidx_exactly + __P((struct secasindex *saidx0, struct secasindex *saidx1)); +static int key_cmpsaidx_withmode + __P((struct secasindex *saidx0, struct secasindex *saidx1)); +static int key_cmpspidx_exactly + __P((struct secpolicyindex *spidx0, struct secpolicyindex *spidx1)); +static int key_cmpspidx_withmask + __P((struct secpolicyindex *spidx0, struct secpolicyindex *spidx1)); +static int key_bbcmp __P((caddr_t p1, caddr_t p2, u_int bits)); +static u_int16_t key_satype2proto __P((u_int8_t satype)); +static u_int8_t key_proto2satype __P((u_int16_t proto)); + +static struct sadb_msg *key_getspi __P((caddr_t *mhp)); +static u_int32_t key_do_getnewspi __P((struct sadb_spirange *spirange, + struct secasindex *saidx)); +static struct sadb_msg *key_update __P((caddr_t *mhp)); +#ifdef IPSEC_DOSEQCHECK +static struct secasvar *key_getsavbyseq __P((struct secashead *sah, + u_int32_t seq)); +#endif +static struct sadb_msg *key_add __P((caddr_t *mhp)); +static int key_setident __P((struct secashead *sah, caddr_t *mhp)); +static struct sadb_msg *key_getmsgbuf_x1 __P((caddr_t *mhp)); +static struct sadb_msg *key_delete __P((caddr_t *mhp)); +static struct sadb_msg *key_get __P((caddr_t *mhp)); +static int key_acquire __P((struct secasindex *, struct secpolicy *)); +static struct secacq *key_newacq __P((struct secasindex *saidx)); +static struct secacq *key_getacq __P((struct secasindex *saidx)); +static struct secacq *key_getacqbyseq __P((u_int32_t seq)); +static struct secspacq *key_newspacq __P((struct secpolicyindex *spidx)); +static struct secspacq *key_getspacq __P((struct secpolicyindex *spidx)); +static struct sadb_msg *key_acquire2 __P((caddr_t *mhp)); +static struct sadb_msg *key_register __P((caddr_t *mhp, struct socket *so)); +static int key_expire __P((struct secasvar *sav)); +static struct sadb_msg *key_flush __P((caddr_t *mhp)); +static int key_dump __P((caddr_t *mhp, struct socket *so, int target)); +static void key_promisc __P((caddr_t *mhp, struct socket *so)); +static int key_sendall __P((struct sadb_msg *msg, u_int len)); +static int key_align __P((struct sadb_msg *msg, caddr_t *mhp)); +#if 0 +static const char *key_getfqdn __P((void)); +static const char *key_getuserfqdn __P((void)); +#endif +static void key_sa_chgstate __P((struct secasvar *sav, u_int8_t state)); +static caddr_t key_appendmbuf __P((struct mbuf *, int)); + +/* %%% IPsec policy management */ +/* + * allocating a SP for OUTBOUND or INBOUND packet. + * Must call key_freesp() later. + * OUT: NULL: not found + * others: found and return the pointer. + */ +struct secpolicy * +key_allocsp(spidx, dir) + struct secpolicyindex *spidx; + u_int dir; +{ + struct secpolicy *sp; + int s; + + /* sanity check */ + if (spidx == NULL) + panic("key_allocsp: NULL pointer is passed.\n"); + + /* check direction */ + switch (dir) { + case IPSEC_DIR_INBOUND: + case IPSEC_DIR_OUTBOUND: + break; + default: + panic("key_allocsp: Invalid direction is passed.\n"); + } + + /* get a SP entry */ +#ifdef __NetBSD__ + s = splsoftnet(); /*called from softclock()*/ +#else + s = splnet(); /*called from softclock()*/ +#endif + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("*** objects\n"); + kdebug_secpolicyindex(spidx)); + + LIST_FOREACH(sp, &sptree[dir], chain) { + KEYDEBUG(KEYDEBUG_IPSEC_DATA, + printf("*** in SPD\n"); + kdebug_secpolicyindex(&sp->spidx)); + + if (sp->state == IPSEC_SPSTATE_DEAD) + continue; + if (key_cmpspidx_withmask(&sp->spidx, spidx)) + goto found; + } + + splx(s); + return NULL; + +found: + /* sanity check */ + KEY_CHKSPDIR(sp->spidx.dir, dir, "key_allocsp"); + + /* found a SPD entry */ + sp->refcnt++; + splx(s); + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP key_allocsp cause refcnt++:%d SP:%p\n", + sp->refcnt, sp)); + + return sp; +} + +/* + * allocating a SA entry for a *OUTBOUND* packet. + * checking each request entries in SP, and acquire SA if need. + * OUT: 0: there are valid requests. + * ENOENT: policy may be valid, but SA with REQUIRE is on acquiring. + */ +int +key_checkrequest(isr, saidx) + struct ipsecrequest *isr; + struct secasindex *saidx; +{ + u_int level; + int error; + + /* sanity check */ + if (isr == NULL || saidx == NULL) + panic("key_checkrequest: NULL pointer is passed.\n"); + + /* check mode */ + switch (saidx->mode) { + case IPSEC_MODE_TRANSPORT: + case IPSEC_MODE_TUNNEL: + break; + case IPSEC_MODE_ANY: + default: + panic("key_checkrequest: Invalid policy defined.\n"); + } + + /* get current level */ + level = ipsec_get_reqlevel(isr); + +#if 0 + /* + * We do allocate new SA only if the state of SA in the holder is + * SADB_SASTATE_DEAD. The SA for outbound must be the oldest. + */ + if (isr->sav != NULL) { + if (isr->sav->sah == NULL) + panic("key_checkrequest: sah is null.\n"); + if (isr->sav == (struct secasvar *)LIST_FIRST( + &isr->sav->sah->savtree[SADB_SASTATE_DEAD])) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP checkrequest calls free SA:%p\n", + isr->sav)); + key_freesav(isr->sav); + isr->sav = NULL; + } + } +#else + /* + * we free any SA stashed in the IPsec request because a different + * SA may be involved each time this request is checked, either + * because new SAs are being configured, or this request is + * associated with an unconnected datagram socket, or this request + * is associated with a system default policy. + * + * The operation may have negative impact to performance. We may + * want to check cached SA carefully, rather than picking new SA + * every time. + */ + if (isr->sav != NULL) { + key_freesav(isr->sav); + isr->sav = NULL; + } +#endif + + /* + * new SA allocation if no SA found. + * key_allocsa_policy should allocate the oldest SA available. + * See key_do_allocsa_policy(), and draft-jenkins-ipsec-rekeying-03.txt. + */ + if (isr->sav == NULL) + isr->sav = key_allocsa_policy(saidx); + + /* When there is SA. */ + if (isr->sav != NULL) + return 0; + + /* there is no SA */ + if ((error = key_acquire(saidx, isr->sp)) != 0) { + /* XXX What I do ? */ +#ifdef IPSEC_DEBUG + printf("key_checkrequest: error %d returned " + "from key_acquire.\n", error); +#endif + return error; + } + + return level == IPSEC_LEVEL_REQUIRE ? ENOENT : 0; +} + +/* + * allocating a SA for policy entry from SAD. + * NOTE: searching SAD of aliving state. + * OUT: NULL: not found. + * others: found and return the pointer. + */ +static struct secasvar * +key_allocsa_policy(saidx) + struct secasindex *saidx; +{ + struct secashead *sah; + struct secasvar *sav; + u_int stateidx, state; + + LIST_FOREACH(sah, &sahtree, chain) { + if (sah->state == SADB_SASTATE_DEAD) + continue; + if (key_cmpsaidx_withmode(&sah->saidx, saidx)) + goto found; + } + + return NULL; + + found: + + /* search valid state */ + for (stateidx = 0; + stateidx < _ARRAYLEN(saorder_state_valid); + stateidx++) { + + state = saorder_state_valid[stateidx]; + + sav = key_do_allocsa_policy(sah, state); + if (sav != NULL) + return sav; + } + + return NULL; +} + +/* + * searching SAD with direction, protocol, mode and state. + * called by key_allocsa_policy(). + * OUT: + * NULL : not found + * others : found, pointer to a SA. + */ +static struct secasvar * +key_do_allocsa_policy(sah, state) + struct secashead *sah; + u_int state; +{ + struct secasvar *sav, *candidate; + + /* initilize */ + candidate = NULL; + + LIST_FOREACH(sav, &sah->savtree[state], chain) { + + /* sanity check */ + KEY_CHKSASTATE(sav->state, state, "key_do_allocsa_policy"); + + /* initialize */ + if (candidate == NULL) { + candidate = sav; + continue; + } + + /* Which SA is the better ? */ + + /* sanity check 2 */ + if (candidate->lft_c == NULL || sav->lft_c == NULL) + panic("key_do_allocsa_policy: " + "lifetime_current is NULL.\n"); + + /* XXX What the best method is to compare ? */ + if (candidate->lft_c->sadb_lifetime_addtime > + sav->lft_c->sadb_lifetime_addtime) { + candidate = sav; + continue; + } + } + + if (candidate) { + candidate->refcnt++; + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP allocsa_policy cause " + "refcnt++:%d SA:%p\n", + candidate->refcnt, candidate)); + } + return candidate; +} + +/* + * allocating a SA entry for a *INBOUND* packet. + * Must call key_freesav() later. + * OUT: positive: pointer to a sav. + * NULL: not found, or error occured. + * + * In the comparison, source address will be ignored for RFC2401 conformance. + * To quote, from section 4.1: + * A security association is uniquely identified by a triple consisting + * of a Security Parameter Index (SPI), an IP Destination Address, and a + * security protocol (AH or ESP) identifier. + * Note that, however, we do need to keep source address in IPsec SA. + * IPsec SA. IKE specification and PF_KEY specification do assume that we + * keep source address in IPsec SA. We see a tricky situation here. + */ +struct secasvar * +key_allocsa(family, src, dst, proto, spi) + u_int family, proto; + caddr_t src, dst; + u_int32_t spi; +{ + struct secashead *sah; + struct secasvar *sav; + u_int stateidx, state; + int s; + + /* sanity check */ + if (src == NULL || dst == NULL) + panic("key_allocsa: NULL pointer is passed.\n"); + + /* + * searching SAD. + * XXX: to be checked internal IP header somewhere. Also when + * IPsec tunnel packet is received. But ESP tunnel mode is + * encrypted so we can't check internal IP header. + */ +#ifdef __NetBSD__ + s = splsoftnet(); /*called from softclock()*/ +#else + s = splnet(); /*called from softclock()*/ +#endif + LIST_FOREACH(sah, &sahtree, chain) { + + /* search valid state */ + for (stateidx = 0; + stateidx < _ARRAYLEN(saorder_state_valid); + stateidx++) { + + state = saorder_state_valid[stateidx]; + LIST_FOREACH(sav, &sah->savtree[state], chain) { + + /* sanity check */ + KEY_CHKSASTATE(sav->state, state, "key_allocsav"); + if (proto != sav->sah->saidx.proto) + continue; + if (spi != sav->spi) + continue; + +#if 0 /* don't check src */ + if (!key_bbcmp(src, + _INADDRBYSA(&sav->sah->saidx.src), + _INALENBYAF(sav->sah->saidx.src.ss_family) << 3)) + continue; +#endif + if (!key_bbcmp(dst, + _INADDRBYSA(&sav->sah->saidx.dst), + _INALENBYAF(sav->sah->saidx.dst.ss_family) << 3)) + continue; + + goto found; + } + } + } + + /* not found */ + splx(s); + return NULL; + +found: + sav->refcnt++; + splx(s); + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP allocsa cause refcnt++:%d SA:%p\n", + sav->refcnt, sav)); + return sav; +} + +/* + * Must be called after calling key_allocsp(). + * For both the packet without socket and key_freeso(). + */ +void +key_freesp(sp) + struct secpolicy *sp; +{ + /* sanity check */ + if (sp == NULL) + panic("key_freesp: NULL pointer is passed.\n"); + + sp->refcnt--; + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP freesp cause refcnt--:%d SP:%p\n", + sp->refcnt, sp)); + + if (sp->refcnt == 0) + key_delsp(sp); + + return; +} + +/* + * Must be called after calling key_allocsp(). + * For the packet with socket. + */ +void +key_freeso(so) + struct socket *so; +{ + /* sanity check */ + if (so == NULL) + panic("key_freeso: NULL pointer is passed.\n"); + + switch (so->so_proto->pr_domain->dom_family) { +#if INET + case PF_INET: + { + struct inpcb *pcb = sotoinpcb(so); + + /* Does it have a PCB ? */ + if (pcb == NULL) + return; + key_freesp_so(&pcb->inp_sp->sp_in); + key_freesp_so(&pcb->inp_sp->sp_out); + } + break; +#endif +#if INET6 + case PF_INET6: + { +#if HAVE_NRL_INPCB + struct inpcb *pcb = sotoinpcb(so); + + /* Does it have a PCB ? */ + if (pcb == NULL) + return; + key_freesp_so(&pcb->inp_sp->sp_in); + key_freesp_so(&pcb->inp_sp->sp_out); +#else + struct in6pcb *pcb = sotoin6pcb(so); + + /* Does it have a PCB ? */ + if (pcb == NULL) + return; + key_freesp_so(&pcb->in6p_sp->sp_in); + key_freesp_so(&pcb->in6p_sp->sp_out); +#endif + } + break; +#endif /* INET6 */ + default: +#if IPSEC_DEBUG + printf("key_freeso: unknown address family=%d.\n", + so->so_proto->pr_domain->dom_family); +#endif + return; + } + + return; +} + +static void +key_freesp_so(sp) + struct secpolicy **sp; +{ + /* sanity check */ + if (sp == NULL || *sp == NULL) + panic("key_freesp_so: sp == NULL\n"); + + switch ((*sp)->policy) { + case IPSEC_POLICY_IPSEC: + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP freeso calls free SP:%p\n", *sp)); + key_freesp(*sp); + *sp = NULL; + break; + case IPSEC_POLICY_ENTRUST: + case IPSEC_POLICY_BYPASS: + return; + default: + panic("key_freesp_so: Invalid policy found %d", (*sp)->policy); + } + + return; +} + +/* + * Must be called after calling key_allocsa(). + * This function is called by key_freesp() to free some SA allocated + * for a policy. + */ +void +key_freesav(sav) + struct secasvar *sav; +{ + /* sanity check */ + if (sav == NULL) + panic("key_freesav: NULL pointer is passed.\n"); + + sav->refcnt--; + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP freesav cause refcnt--:%d SA:%p SPI %d\n", + sav->refcnt, sav, (u_int32_t)ntohl(sav->spi))); + + if (sav->refcnt == 0) + key_delsav(sav); + + return; +} + +/* %%% SPD management */ +/* + * free security policy entry. + */ +static void +key_delsp(sp) + struct secpolicy *sp; +{ + int s; + + /* sanity check */ + if (sp == NULL) + panic("key_delsp: NULL pointer is passed.\n"); + + sp->state = IPSEC_SPSTATE_DEAD; + + if (sp->refcnt > 0) + return; /* can't free */ + +#if __NetBSD__ + s = splsoftnet(); /*called from softclock()*/ +#else + s = splnet(); /*called from softclock()*/ +#endif + /* remove from SP index */ + if (__LIST_CHAINED(sp)) + LIST_REMOVE(sp, chain); + + { + struct ipsecrequest *isr = sp->req, *nextisr; + + while (isr != NULL) { + if (isr->sav != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP delsp calls free SA:%p\n", + isr->sav)); + key_freesav(isr->sav); + isr->sav = NULL; + } + + nextisr = isr->next; + KFREE(isr); + isr = nextisr; + } + } + + keydb_delsecpolicy(sp); + + splx(s); + + return; +} + +/* + * search SPD + * OUT: NULL : not found + * others : found, pointer to a SP. + */ +static struct secpolicy * +key_getsp(spidx) + struct secpolicyindex *spidx; +{ + struct secpolicy *sp; + + /* sanity check */ + if (spidx == NULL) + panic("key_getsp: NULL pointer is passed.\n"); + + LIST_FOREACH(sp, &sptree[spidx->dir], chain) { + if (sp->state == IPSEC_SPSTATE_DEAD) + continue; + if (key_cmpspidx_exactly(spidx, &sp->spidx)) { + sp->refcnt++; + return sp; + } + } + + return NULL; +} + +/* + * get SP by index. + * OUT: NULL : not found + * others : found, pointer to a SP. + */ +static struct secpolicy * +key_getspbyid(id) + u_int32_t id; +{ + struct secpolicy *sp; + + LIST_FOREACH(sp, &sptree[IPSEC_DIR_INBOUND], chain) { + if (sp->state == IPSEC_SPSTATE_DEAD) + continue; + if (sp->id == id) { + sp->refcnt++; + return sp; + } + } + + LIST_FOREACH(sp, &sptree[IPSEC_DIR_OUTBOUND], chain) { + if (sp->state == IPSEC_SPSTATE_DEAD) + continue; + if (sp->id == id) { + sp->refcnt++; + return sp; + } + } + + return NULL; +} + +struct secpolicy * +key_newsp() +{ + struct secpolicy *newsp = NULL; + + newsp = keydb_newsecpolicy(); + if (!newsp) + return newsp; + + newsp->refcnt = 1; + newsp->req = NULL; + + return newsp; +} + +/* + * create secpolicy structure from sadb_x_policy structure. + * NOTE: `state', `secpolicyindex' in secpolicy structure are not set, + * so must be set properly later. + */ +struct secpolicy * +key_msg2sp(xpl0, len, error) + struct sadb_x_policy *xpl0; + size_t len; + int *error; +{ + struct secpolicy *newsp; + + /* sanity check */ + if (xpl0 == NULL) + panic("key_msg2sp: NULL pointer was passed.\n"); + if (len < sizeof(*xpl0)) + panic("key_msg2sp: invalid length.\n"); + if (len != PFKEY_EXTLEN(xpl0)) { +#if IPSEC_DEBUG + printf("key_msg2sp: Invalid msg length.\n"); +#endif + *error = EINVAL; + return NULL; + } + + if ((newsp = key_newsp()) == NULL) { + *error = ENOBUFS; + return NULL; + } + + newsp->spidx.dir = xpl0->sadb_x_policy_dir; + newsp->policy = xpl0->sadb_x_policy_type; + + /* check policy */ + switch (xpl0->sadb_x_policy_type) { + case IPSEC_POLICY_DISCARD: + case IPSEC_POLICY_NONE: + case IPSEC_POLICY_ENTRUST: + case IPSEC_POLICY_BYPASS: + newsp->req = NULL; + break; + + case IPSEC_POLICY_IPSEC: + { + int tlen; + struct sadb_x_ipsecrequest *xisr; + struct ipsecrequest **p_isr = &newsp->req; + + /* validity check */ + if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) { +#if IPSEC_DEBUG + printf("key_msg2sp: Invalid msg length.\n"); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + + tlen = PFKEY_EXTLEN(xpl0) - sizeof(*xpl0); + xisr = (struct sadb_x_ipsecrequest *)(xpl0 + 1); + + while (tlen > 0) { + + /* length check */ + if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) { +#if IPSEC_DEBUG + printf("key_msg2sp: " + "invalid ipsecrequest length.\n"); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + + /* allocate request buffer */ + KMALLOC(*p_isr, struct ipsecrequest *, sizeof(**p_isr)); + if ((*p_isr) == NULL) { +#if IPSEC_DEBUG + printf("key_msg2sp: No more memory.\n"); +#endif + key_freesp(newsp); + *error = ENOBUFS; + return NULL; + } + bzero(*p_isr, sizeof(**p_isr)); + + /* set values */ + (*p_isr)->next = NULL; + + switch (xisr->sadb_x_ipsecrequest_proto) { + case IPPROTO_ESP: + case IPPROTO_AH: +#if 1 /*nonstandard*/ + case IPPROTO_IPCOMP: +#endif + break; + default: +#if IPSEC_DEBUG + printf("key_msg2sp: invalid proto type=%u\n", + xisr->sadb_x_ipsecrequest_proto); +#endif + key_freesp(newsp); + *error = EPROTONOSUPPORT; + return NULL; + } + (*p_isr)->saidx.proto = xisr->sadb_x_ipsecrequest_proto; + + switch (xisr->sadb_x_ipsecrequest_mode) { + case IPSEC_MODE_TRANSPORT: + case IPSEC_MODE_TUNNEL: + break; + case IPSEC_MODE_ANY: + default: +#if IPSEC_DEBUG + printf("key_msg2sp: invalid mode=%u\n", + xisr->sadb_x_ipsecrequest_mode); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + (*p_isr)->saidx.mode = xisr->sadb_x_ipsecrequest_mode; + + switch (xisr->sadb_x_ipsecrequest_level) { + case IPSEC_LEVEL_DEFAULT: + case IPSEC_LEVEL_USE: + case IPSEC_LEVEL_REQUIRE: + break; + case IPSEC_LEVEL_UNIQUE: + /* validity check */ + /* + * If range violation of reqid, kernel will + * update it, don't refuse it. + */ + if (xisr->sadb_x_ipsecrequest_reqid + > IPSEC_MANUAL_REQID_MAX) { +#if IPSEC_DEBUG + printf("key_msg2sp: reqid=%d " + "range violation, " + "updated by kernel.\n", + xisr->sadb_x_ipsecrequest_reqid); +#endif + xisr->sadb_x_ipsecrequest_reqid = 0; + } + + /* allocate new reqid id if reqid is zero. */ + if (xisr->sadb_x_ipsecrequest_reqid == 0) { + u_int32_t reqid; + if ((reqid = key_newreqid()) == 0) { + key_freesp(newsp); + *error = ENOBUFS; + return NULL; + } + (*p_isr)->saidx.reqid = reqid; + xisr->sadb_x_ipsecrequest_reqid = reqid; + } else { + /* set it for manual keying. */ + (*p_isr)->saidx.reqid = + xisr->sadb_x_ipsecrequest_reqid; + } + break; + + default: +#if IPSEC_DEBUG + printf("key_msg2sp: invalid level=%u\n", + xisr->sadb_x_ipsecrequest_level); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + (*p_isr)->level = xisr->sadb_x_ipsecrequest_level; + + /* set IP addresses if there */ + if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) { + struct sockaddr *paddr; + + paddr = (struct sockaddr *)(xisr + 1); + + /* validity check */ + if (paddr->sa_len + > sizeof((*p_isr)->saidx.src)) { +#if IPSEC_DEBUG + printf("key_msg2sp: invalid request " + "address length.\n"); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + bcopy(paddr, &(*p_isr)->saidx.src, + paddr->sa_len); + + paddr = (struct sockaddr *)((caddr_t)paddr + + paddr->sa_len); + + /* validity check */ + if (paddr->sa_len + > sizeof((*p_isr)->saidx.dst)) { +#if IPSEC_DEBUG + printf("key_msg2sp: invalid request " + "address length.\n"); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + bcopy(paddr, &(*p_isr)->saidx.dst, + paddr->sa_len); + } + + (*p_isr)->sav = NULL; + (*p_isr)->sp = newsp; + + /* initialization for the next. */ + p_isr = &(*p_isr)->next; + tlen -= xisr->sadb_x_ipsecrequest_len; + + /* validity check */ + if (tlen < 0) { +#if IPSEC_DEBUG + printf("key_msg2sp: becoming tlen < 0.\n"); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + + xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr + + xisr->sadb_x_ipsecrequest_len); + } + } + break; + default: +#if IPSEC_DEBUG + printf("key_msg2sp: invalid policy type.\n"); +#endif + key_freesp(newsp); + *error = EINVAL; + return NULL; + } + + *error = 0; + return newsp; +} + +static u_int32_t +key_newreqid() +{ + static u_int32_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1; + + auto_reqid = (auto_reqid == ~0 + ? IPSEC_MANUAL_REQID_MAX + 1 : auto_reqid + 1); + + /* XXX should be unique check */ + + return auto_reqid; +} + +/* + * copy secpolicy struct to sadb_x_policy structure indicated. + */ +struct mbuf * +key_sp2msg(sp) + struct secpolicy *sp; +{ + struct sadb_x_policy *xpl; + int tlen; + caddr_t p; + struct mbuf *m; + + /* sanity check. */ + if (sp == NULL) + panic("key_sp2msg: NULL pointer was passed.\n"); + + tlen = key_getspreqmsglen(sp); + + MGET(m, M_DONTWAIT, MT_DATA); + if (m && MLEN < tlen) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_free(m); + m = NULL; + } + } + m->m_len = 0; + if (!m || M_TRAILINGSPACE(m) < tlen) { +#if IPSEC_DEBUG + printf("key_sp2msg: No more memory.\n"); +#endif + if (m) + m_free(m); + return NULL; + } + + m->m_len = tlen; + m->m_next = NULL; + xpl = mtod(m, struct sadb_x_policy *); + bzero(xpl, tlen); + + xpl->sadb_x_policy_len = PFKEY_UNIT64(tlen); + xpl->sadb_x_policy_exttype = SADB_X_EXT_POLICY; + xpl->sadb_x_policy_type = sp->policy; + xpl->sadb_x_policy_dir = sp->spidx.dir; + xpl->sadb_x_policy_id = sp->id; + p = (caddr_t)xpl + sizeof(*xpl); + + /* if is the policy for ipsec ? */ + if (sp->policy == IPSEC_POLICY_IPSEC) { + struct sadb_x_ipsecrequest *xisr; + struct ipsecrequest *isr; + + for (isr = sp->req; isr != NULL; isr = isr->next) { + + xisr = (struct sadb_x_ipsecrequest *)p; + + xisr->sadb_x_ipsecrequest_proto = isr->saidx.proto; + xisr->sadb_x_ipsecrequest_mode = isr->saidx.mode; + xisr->sadb_x_ipsecrequest_level = isr->level; + xisr->sadb_x_ipsecrequest_reqid = isr->saidx.reqid; + + p += sizeof(*xisr); + bcopy(&isr->saidx.src, p, isr->saidx.src.ss_len); + p += isr->saidx.src.ss_len; + bcopy(&isr->saidx.dst, p, isr->saidx.dst.ss_len); + p += isr->saidx.src.ss_len; + + xisr->sadb_x_ipsecrequest_len = + PFKEY_ALIGN8(sizeof(*xisr) + + isr->saidx.src.ss_len + + isr->saidx.dst.ss_len); + } + } + + return m; +} + +/* + * SADB_X_SPDADD, SADB_X_SPDSETIDX or SADB_X_SPDUPDATE processing + * add a entry to SP database, when received + * + * from the user(?). + * Adding to SP database, + * and send + * + * to the socket which was send. + * + * SPDADD set a unique policy entry. + * SPDSETIDX like SPDADD without a part of policy requests. + * SPDUPDATE replace a unique policy entry. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + * + */ +static struct sadb_msg * +key_spdadd(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_address *src0, *dst0; + struct sadb_x_policy *xpl0; + struct secpolicyindex spidx; + struct secpolicy *newsp; + int error; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_spdadd: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + if (mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL + || mhp[SADB_X_EXT_POLICY] == NULL) { +#if IPSEC_DEBUG + printf("key_spdadd: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + src0 = (struct sadb_address *)mhp[SADB_EXT_ADDRESS_SRC]; + dst0 = (struct sadb_address *)mhp[SADB_EXT_ADDRESS_DST]; + xpl0 = (struct sadb_x_policy *)mhp[SADB_X_EXT_POLICY]; + + /* make secindex */ + KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, + src0 + 1, + dst0 + 1, + src0->sadb_address_prefixlen, + dst0->sadb_address_prefixlen, + src0->sadb_address_proto, + &spidx); + + /* checking the direciton. */ + switch (xpl0->sadb_x_policy_dir) { + case IPSEC_DIR_INBOUND: + case IPSEC_DIR_OUTBOUND: + break; + default: +#if IPSEC_DEBUG + printf("key_spdadd: Invalid SP direction.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* check policy */ + /* key_spdadd() accepts DISCARD, NONE and IPSEC. */ + if (xpl0->sadb_x_policy_type == IPSEC_POLICY_ENTRUST + || xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) { +#if IPSEC_DEBUG + printf("key_spdadd: Invalid policy type.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* policy requests are mandatory when action is ipsec. */ + if (msg0->sadb_msg_type != SADB_X_SPDSETIDX + && xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC + && PFKEY_EXTLEN(xpl0) <= sizeof(*xpl0)) { +#if IPSEC_DEBUG + printf("key_spdadd: some policy requests part required.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* + * checking there is SP already or not. + * If type is SPDUPDATE and no SP found, then error. + * If type is either SPDADD or SPDSETIDX and SP found, then error. + */ + newsp = key_getsp(&spidx); + if (msg0->sadb_msg_type == SADB_X_SPDUPDATE) { + if (newsp == NULL) { +#if IPSEC_DEBUG + printf("key_spdadd: no SP found.\n"); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + newsp->state = IPSEC_SPSTATE_DEAD; + key_freesp(newsp); + } else { + if (newsp != NULL) { + key_freesp(newsp); +#if IPSEC_DEBUG + printf("key_spdadd: a SP entry exists already.\n"); +#endif + msg0->sadb_msg_errno = EEXIST; + return NULL; + } + } + + /* allocation new SP entry */ + if ((newsp = key_msg2sp(xpl0, PFKEY_EXTLEN(xpl0), &error)) == NULL) { + msg0->sadb_msg_errno = error; + return NULL; + } + + if ((newsp->id = key_getnewspid()) == 0) { + msg0->sadb_msg_errno = ENOBUFS; + keydb_delsecpolicy(newsp); + return NULL; + } + + KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, + src0 + 1, + dst0 + 1, + src0->sadb_address_prefixlen, + dst0->sadb_address_prefixlen, + src0->sadb_address_proto, + &newsp->spidx); + + /* sanity check on addr pair */ + if (((struct sockaddr *)(src0 + 1))->sa_family != + ((struct sockaddr *)(dst0+ 1))->sa_family) { + msg0->sadb_msg_errno = EINVAL; + keydb_delsecpolicy(newsp); + return NULL; + } +#if 1 + if (newsp->req && newsp->req->saidx.src.ss_family) { + struct sockaddr *sa; + sa = (struct sockaddr *)(src0 + 1); + if (sa->sa_family != newsp->req->saidx.src.ss_family) { + msg0->sadb_msg_errno = EINVAL; + keydb_delsecpolicy(newsp); + return NULL; + } + } + if (newsp->req && newsp->req->saidx.dst.ss_family) { + struct sockaddr *sa; + sa = (struct sockaddr *)(dst0 + 1); + if (sa->sa_family != newsp->req->saidx.dst.ss_family) { + msg0->sadb_msg_errno = EINVAL; + keydb_delsecpolicy(newsp); + return NULL; + } + } +#endif + + newsp->refcnt = 1; /* do not reclaim until I say I do */ + newsp->state = IPSEC_SPSTATE_ALIVE; + LIST_INSERT_HEAD(&sptree[newsp->spidx.dir], newsp, chain); + + /* delete the entry in spacqtree */ + if (msg0->sadb_msg_type == SADB_X_SPDUPDATE) { + struct secspacq *spacq; + if ((spacq = key_getspacq(&spidx)) != NULL) { + /* reset counter in order to deletion by timehander. */ + spacq->tick = key_blockacq_lifetime; + spacq->count = 0; + } + } + + { + struct sadb_msg *newmsg; + u_int len; + caddr_t p; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + PFKEY_EXTLEN(mhp[SADB_X_EXT_POLICY]) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_SRC]) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_DST]); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_spdadd: No more memory.\n"); +#endif + /* newsp persists in the kernel */ + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)msg0, (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + p = (caddr_t)newmsg + sizeof(*msg0); + + /* + * reqid may had been updated at key_msg2sp() if reqid's + * range violation. + */ + ((struct sadb_x_policy *)mhp[SADB_X_EXT_POLICY])->sadb_x_policy_id = newsp->id; + p = key_setsadbext(p, mhp[SADB_X_EXT_POLICY]); + + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_SRC]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_DST]); + + return newmsg; + } +} + +/* + * get new policy id. + * OUT: + * 0: failure. + * others: success. + */ +static u_int32_t +key_getnewspid() +{ + u_int32_t newid = 0; + int count = key_spi_trycnt; /* XXX */ + struct secpolicy *sp; + + /* when requesting to allocate spi ranged */ + while (count--) { + newid = (policy_id = (policy_id == ~0 ? 1 : ++policy_id)); + + if ((sp = key_getspbyid(newid)) == NULL) + break; + + key_freesp(sp); + } + + if (count == 0 || newid == 0) { +#if IPSEC_DEBUG + printf("key_getnewspid: to allocate policy id is failed.\n"); +#endif + return 0; + } + + return newid; +} + +/* + * SADB_SPDDELETE processing + * receive + * + * from the user(?), and set SADB_SASTATE_DEAD, + * and send, + * + * to the ikmpd. + * policy(*) including direction of policy. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: other if success, return pointer to the message to send. + * 0 if fail. + */ +static struct sadb_msg * +key_spddelete(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_address *src0, *dst0; + struct sadb_x_policy *xpl0; + struct secpolicyindex spidx; + struct secpolicy *sp; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_spddelete: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + if (mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL + || mhp[SADB_X_EXT_POLICY] == NULL) { +#if IPSEC_DEBUG + printf("key_spddelete: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + xpl0 = (struct sadb_x_policy *)mhp[SADB_X_EXT_POLICY]; + + /* make secindex */ + KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, + src0 + 1, + dst0 + 1, + src0->sadb_address_prefixlen, + dst0->sadb_address_prefixlen, + src0->sadb_address_proto, + &spidx); + + /* checking the direciton. */ + switch (xpl0->sadb_x_policy_dir) { + case IPSEC_DIR_INBOUND: + case IPSEC_DIR_OUTBOUND: + break; + default: +#if IPSEC_DEBUG + printf("key_spddelete: Invalid SP direction.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* Is there SP in SPD ? */ + if ((sp = key_getsp(&spidx)) == NULL) { +#if IPSEC_DEBUG + printf("key_spddelete: no SP found.\n"); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + /* save policy id to buffer to be returned. */ + xpl0->sadb_x_policy_id = sp->id; + + sp->state = IPSEC_SPSTATE_DEAD; + key_freesp(sp); + + { + struct sadb_msg *newmsg; + u_int len; + caddr_t p; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + PFKEY_EXTLEN(mhp[SADB_X_EXT_POLICY]) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_SRC]) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_DST]); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_spddelete: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + p = (caddr_t)newmsg + sizeof(*msg0); + + p = key_setsadbext(p, mhp[SADB_X_EXT_POLICY]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_SRC]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_DST]); + + return newmsg; + } +} + +/* + * SADB_SPDDELETE2 processing + * receive + * + * from the user(?), and set SADB_SASTATE_DEAD, + * and send, + * + * to the ikmpd. + * policy(*) including direction of policy. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: other if success, return pointer to the message to send. + * 0 if fail. + */ +static struct sadb_msg * +key_spddelete2(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + u_int32_t id; + struct secpolicy *sp; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_spddelete2: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + if (mhp[SADB_X_EXT_POLICY] == NULL) { +#if IPSEC_DEBUG + printf("key_spddelete2: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + id = ((struct sadb_x_policy *)mhp[SADB_X_EXT_POLICY])->sadb_x_policy_id; + + /* Is there SP in SPD ? */ + if ((sp = key_getspbyid(id)) == NULL) { +#if IPSEC_DEBUG + printf("key_spddelete2: no SP found id:%u.\n", id); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + sp->state = IPSEC_SPSTATE_DEAD; + key_freesp(sp); + + { + struct sadb_msg *newmsg; + u_int len; + caddr_t p; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + PFKEY_EXTLEN(mhp[SADB_X_EXT_POLICY]); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_spddelete2: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + p = (caddr_t)newmsg + sizeof(*msg0); + + p = key_setsadbext(p, mhp[SADB_X_EXT_POLICY]); + + return newmsg; + } +} + +/* + * SADB_X_GET processing + * receive + * + * from the user(?), + * and send, + * + * to the ikmpd. + * policy(*) including direction of policy. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: other if success, return pointer to the message to send. + * 0 if fail. + */ +static int +key_spdget(mhp, so, target) + caddr_t *mhp; + struct socket *so; + int target; +{ + struct sadb_msg *msg0; + u_int32_t id; + struct secpolicy *sp; + struct mbuf *m; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_spdget: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + if (mhp[SADB_X_EXT_POLICY] == NULL) { +#if IPSEC_DEBUG + printf("key_spdget: invalid message is passed.\n"); +#endif + return EINVAL; + } + + id = ((struct sadb_x_policy *)mhp[SADB_X_EXT_POLICY])->sadb_x_policy_id; + + /* Is there SP in SPD ? */ + if ((sp = key_getspbyid(id)) == NULL) { +#if IPSEC_DEBUG + printf("key_spdget: no SP found id:%u.\n", id); +#endif + return ENOENT; + } + + m = key_setdumpsp(sp, SADB_X_SPDGET, 0, msg0->sadb_msg_pid); + if (m != NULL) + key_sendup_mbuf(so, m, target); + + return 0; +} + +/* + * SADB_X_SPDACQUIRE processing. + * Acquire policy and SA(s) for a *OUTBOUND* packet. + * send + * + * to KMD, and expect to receive + * with SADB_X_SPDACQUIRE if error occured, + * or + * + * with SADB_X_SPDUPDATE from KMD by PF_KEY. + * policy(*) is without policy requests. + * + * 0 : succeed + * others: error number + */ +int +key_spdacquire(sp) + struct secpolicy *sp; +{ + struct secspacq *newspacq; + int error; + + /* sanity check */ + if (sp == NULL) + panic("key_spdacquire: NULL pointer is passed.\n"); + if (sp->req != NULL) + panic("key_spdacquire: called but there is request.\n"); + if (sp->policy != IPSEC_POLICY_IPSEC) + panic("key_spdacquire: policy mismathed. IPsec is expected.\n"); + + /* get a entry to check whether sent message or not. */ + if ((newspacq = key_getspacq(&sp->spidx)) != NULL) { + if (key_blockacq_count < newspacq->count) { + /* reset counter and do send message. */ + newspacq->count = 0; + } else { + /* increment counter and do nothing. */ + newspacq->count++; + return 0; + } + } else { + /* make new entry for blocking to send SADB_ACQUIRE. */ + if ((newspacq = key_newspacq(&sp->spidx)) == NULL) + return ENOBUFS; + + /* add to acqtree */ + LIST_INSERT_HEAD(&spacqtree, newspacq, chain); + } + + { + struct sadb_msg *newmsg = NULL; + union sadb_x_ident_id id; + u_int len; + caddr_t p; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + sizeof(struct sadb_ident) + + PFKEY_ALIGN8(sp->spidx.src.ss_len) + + sizeof(struct sadb_ident) + + PFKEY_ALIGN8(sp->spidx.dst.ss_len); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == 0) { +#if IPSEC_DEBUG + printf("key_spdacquire: No more memory.\n"); +#endif + return ENOBUFS; + } + bzero((caddr_t)newmsg, len); + + newmsg->sadb_msg_version = PF_KEY_V2; + newmsg->sadb_msg_type = SADB_X_SPDACQUIRE; + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_satype = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + newmsg->sadb_msg_mode = 0; + newmsg->sadb_msg_reqid = 0; + newmsg->sadb_msg_seq = 0; + newmsg->sadb_msg_pid = 0; + p = (caddr_t)newmsg + sizeof(struct sadb_msg); + + /* set sadb_address for spidx's. */ + bzero(&id, sizeof(id)); + id.sadb_x_ident_id_addr.prefix = sp->spidx.prefs; + id.sadb_x_ident_id_addr.ul_proto = sp->spidx.ul_proto; + p = key_setsadbident(p, + SADB_EXT_IDENTITY_SRC, + SADB_X_IDENTTYPE_ADDR, + (caddr_t)&sp->spidx.src, + sp->spidx.src.ss_len, + *(u_int64_t *)&id); + + bzero(&id, sizeof(id)); + id.sadb_x_ident_id_addr.prefix = sp->spidx.prefd; + id.sadb_x_ident_id_addr.ul_proto = sp->spidx.ul_proto; + p = key_setsadbident(p, + SADB_EXT_IDENTITY_DST, + SADB_X_IDENTTYPE_ADDR, + (caddr_t)&sp->spidx.dst, + sp->spidx.dst.ss_len, + *(u_int64_t *)&id); + + error = key_sendall(newmsg, len); +#if IPSEC_DEBUG + if (error != 0) + printf("key_spdacquire: key_sendall returned %d\n", error); +#endif + return error; + } + + return 0; +} + +/* + * SADB_SPDFLUSH processing + * receive + * + * from the user, and free all entries in secpctree. + * and send, + * + * to the user. + * NOTE: what to do is only marking SADB_SASTATE_DEAD. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: other if success, return pointer to the message to send. + * 0 if fail. + */ +static struct sadb_msg * +key_spdflush(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct secpolicy *sp; + u_int dir; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_spdflush: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { + LIST_FOREACH(sp, &sptree[dir], chain) { + sp->state = IPSEC_SPSTATE_DEAD; + } + } + + { + struct sadb_msg *newmsg; + u_int len; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_spdflush: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + + return(newmsg); + } +} + +/* + * SADB_SPDDUMP processing + * receive + * + * from the user, and dump all SP leaves + * and send, + * ..... + * to the ikmpd. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: other if success, return pointer to the message to send. + * 0 if fail. + */ +static int +key_spddump(mhp, so, target) + caddr_t *mhp; + struct socket *so; + int target; +{ + struct sadb_msg *msg0; + struct secpolicy *sp; + int cnt; + u_int dir; + struct mbuf *m; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_spddump: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* search SPD entry and get buffer size. */ + cnt = 0; + for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { + LIST_FOREACH(sp, &sptree[dir], chain) { + cnt++; + } + } + + if (cnt == 0) + return ENOENT; + + for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { + LIST_FOREACH(sp, &sptree[dir], chain) { + --cnt; + m = key_setdumpsp(sp, SADB_X_SPDDUMP, + cnt, msg0->sadb_msg_pid); + + if (m) + key_sendup_mbuf(so, m, target); + } + } + + return 0; +} + +static struct mbuf * +key_setdumpsp(sp, type, seq, pid) + struct secpolicy *sp; + u_int8_t type; + u_int32_t seq, pid; +{ + struct mbuf *m; + u_int tlen; + + /* XXX it would be better to avoid pre-computing length */ + tlen = key_getspmsglen(sp); + + /* XXX maybe it's a wrong idea to insist on cluster? */ + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m != NULL) { + MCLGET(m, M_DONTWAIT); + if ((m->m_flags & M_EXT) == 0) { + m_freem(m); + m = NULL; + } + } + if (m == NULL) + return NULL; /*ENOBUFS*/ + + m->m_pkthdr.len = m->m_len = 0; + m->m_next = NULL; + + /* sadb_msg->sadb_msg_len must be filled afterwards */ + if (key_setsadbmsg_m(m, type, 0, SADB_SATYPE_UNSPEC, seq, pid, + IPSEC_MODE_ANY, 0, 0, sp->refcnt) != 0) { + m_freem(m); + return NULL; + } + + if (key_setsadbaddr_m(m, SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&sp->spidx.src, sp->spidx.prefs, + sp->spidx.ul_proto) != 0) { + m_freem(m); + return NULL; + } + + if (key_setsadbaddr_m(m, SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&sp->spidx.dst, sp->spidx.prefd, + sp->spidx.ul_proto) != 0) { + m_freem(m); + return NULL; + } + + { + struct mbuf *n; + struct sadb_x_policy *tmp; + + n = key_sp2msg(sp); + if (!n || n->m_len < sizeof(*tmp)) { +#if IPSEC_DEBUG + printf("key_setdumpsp: No more memory.\n"); +#endif + m_freem(m); + if (n) + m_freem(n); + return NULL; + } + + tmp = mtod(n, struct sadb_x_policy *); + + /* validity check */ + if (key_getspreqmsglen(sp) != PFKEY_UNUNIT64(tmp->sadb_x_policy_len) + || n->m_len != PFKEY_UNUNIT64(tmp->sadb_x_policy_len)) + panic("key_setdumpsp: length mismatch." + "sp:%d msg:%d\n", + key_getspreqmsglen(sp), + PFKEY_UNUNIT64(tmp->sadb_x_policy_len)); + + m_cat(m, n); + m->m_pkthdr.len += n->m_len; + } + + if (m->m_len < sizeof(struct sadb_msg)) { + m = m_pullup(m, sizeof(struct sadb_msg)); + if (m == NULL) + return NULL; + } + mtod(m, struct sadb_msg *)->sadb_msg_len = + PFKEY_UNIT64(m->m_pkthdr.len); + + return m; +} + +/* get sadb message length for a SP. */ +static u_int +key_getspmsglen(sp) + struct secpolicy *sp; +{ + u_int tlen; + + /* sanity check */ + if (sp == NULL) + panic("key_getspmsglen: NULL pointer is passed.\n"); + + tlen = (sizeof(struct sadb_msg) + + sizeof(struct sadb_address) + + PFKEY_ALIGN8(_SALENBYAF(sp->spidx.src.ss_family)) + + sizeof(struct sadb_address) + + PFKEY_ALIGN8(_SALENBYAF(sp->spidx.dst.ss_family))); + + tlen += key_getspreqmsglen(sp); + + return tlen; +} + +/* + * get PFKEY message length for security policy and request. + */ +static u_int +key_getspreqmsglen(sp) + struct secpolicy *sp; +{ + u_int tlen; + + tlen = sizeof(struct sadb_x_policy); + + /* if is the policy for ipsec ? */ + if (sp->policy != IPSEC_POLICY_IPSEC) + return tlen; + + /* get length of ipsec requests */ + { + struct ipsecrequest *isr; + int len; + + for (isr = sp->req; isr != NULL; isr = isr->next) { + len = sizeof(struct sadb_x_ipsecrequest) + + isr->saidx.src.ss_len + + isr->saidx.dst.ss_len; + + tlen += PFKEY_ALIGN8(len); + } + } + + return tlen; +} + +/* %%% SAD management */ +/* + * allocating a memory for new SA head, and copy from the values of mhp. + * OUT: NULL : failure due to the lack of memory. + * others : pointer to new SA head. + */ +static struct secashead * +key_newsah(saidx) + struct secasindex *saidx; +{ + struct secashead *newsah; + + /* sanity check */ + if (saidx == NULL) + panic("key_newsaidx: NULL pointer is passed.\n"); + + newsah = keydb_newsecashead(); + if (newsah == NULL) + return NULL; + + bcopy(saidx, &newsah->saidx, sizeof(newsah->saidx)); + + /* add to saidxtree */ + newsah->state = SADB_SASTATE_MATURE; + LIST_INSERT_HEAD(&sahtree, newsah, chain); + + return(newsah); +} + +/* + * delete SA index and all SA registerd. + */ +static void +key_delsah(sah) + struct secashead *sah; +{ + struct secasvar *sav, *nextsav; + u_int stateidx, state; + int s; + int zombie = 0; + + /* sanity check */ + if (sah == NULL) + panic("key_delsah: NULL pointer is passed.\n"); + +#ifdef __NetBSD__ + s = splsoftnet(); /*called from softclock()*/ +#else + s = splnet(); /*called from softclock()*/ +#endif + + /* searching all SA registerd in the secindex. */ + for (stateidx = 0; + stateidx < _ARRAYLEN(saorder_state_any); + stateidx++) { + + state = saorder_state_any[stateidx]; + for (sav = (struct secasvar *)LIST_FIRST(&sah->savtree[state]); + sav != NULL; + sav = nextsav) { + + nextsav = LIST_NEXT(sav, chain); + + if (sav->refcnt > 0) { + /* give up to delete this sa */ + zombie++; + continue; + } + + /* sanity check */ + KEY_CHKSASTATE(state, sav->state, "key_delsah"); + + key_freesav(sav); + + /* remove back pointer */ + sav->sah = NULL; + sav = NULL; + } + } + + /* don't delete sah only if there are savs. */ + if (zombie) { + splx(s); + return; + } + + if (sah->sa_route.ro_rt) { + RTFREE(sah->sa_route.ro_rt); + sah->sa_route.ro_rt = (struct rtentry *)NULL; + } + + /* remove from tree of SA index */ + if (__LIST_CHAINED(sah)) + LIST_REMOVE(sah, chain); + + KFREE(sah); + + splx(s); + return; +} + +/* + * allocating a new SA with LARVAL state. key_add() and key_getspi() call, + * and copy the values of mhp into new buffer. + * When SAD message type is GETSPI: + * to set sequence number from acq_seq++, + * to set zero to SPI. + * not to call key_setsava(). + * OUT: NULL : fail + * others : pointer to new secasvar. + */ +static struct secasvar * +key_newsav(mhp, sah) + caddr_t *mhp; + struct secashead *sah; +{ + struct secasvar *newsav; + struct sadb_msg *msg0; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL || sah == NULL) + panic("key_newsa: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + KMALLOC(newsav, struct secasvar *, sizeof(struct secasvar)); + if (newsav == NULL) { +#if IPSEC_DEBUG + printf("key_newsa: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newsav, sizeof(struct secasvar)); + + switch (msg0->sadb_msg_type) { + case SADB_GETSPI: + newsav->spi = 0; + +#if IPSEC_DOSEQCHECK + /* sync sequence number */ + if (msg0->sadb_msg_seq == 0) + newsav->seq = + (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); + else +#endif + newsav->seq = msg0->sadb_msg_seq; + break; + + case SADB_ADD: + /* sanity check */ + if (mhp[SADB_EXT_SA] == NULL) { + KFREE(newsav); +#if IPSEC_DEBUG + printf("key_newsa: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + newsav->spi = ((struct sadb_sa *)mhp[SADB_EXT_SA])->sadb_sa_spi; + newsav->seq = msg0->sadb_msg_seq; + break; + default: + KFREE(newsav); + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* copy sav values */ + if (msg0->sadb_msg_type != SADB_GETSPI && key_setsaval(newsav, mhp)) { + KFREE(newsav); + /* msg0->sadb_msg_errno is set at key_setsaval. */ + return NULL; + } + + /* reset tick */ + newsav->tick = 0; + + newsav->pid = msg0->sadb_msg_pid; + + /* add to satree */ + newsav->sah = sah; + newsav->refcnt = 1; + newsav->state = SADB_SASTATE_LARVAL; + LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav, + secasvar, chain); + + return newsav; +} + +/* + * free() SA variable entry. + */ +static void +key_delsav(sav) + struct secasvar *sav; +{ + /* sanity check */ + if (sav == NULL) + panic("key_delsav: NULL pointer is passed.\n"); + + if (sav->refcnt > 0) + return; /* can't free */ + + /* remove from SA header */ + if (__LIST_CHAINED(sav)) + LIST_REMOVE(sav, chain); + + if (sav->key_auth != NULL) + KFREE(sav->key_auth); + if (sav->key_enc != NULL) + KFREE(sav->key_enc); + if (sav->replay != NULL) + keydb_delsecreplay(sav->replay); + if (sav->lft_c != NULL) + KFREE(sav->lft_c); + if (sav->lft_h != NULL) + KFREE(sav->lft_h); + if (sav->lft_s != NULL) + KFREE(sav->lft_s); + if (sav->iv != NULL) + KFREE(sav->iv); +#if notyet + if (sav->misc1 != NULL) + KFREE(sav->misc1); + if (sav->misc2 != NULL) + KFREE(sav->misc2); + if (sav->misc3 != NULL) + KFREE(sav->misc3); +#endif + + KFREE(sav); + + return; +} + +/* + * search SAD. + * OUT: + * NULL : not found + * others : found, pointer to a SA. + */ +static struct secashead * +key_getsah(saidx) + struct secasindex *saidx; +{ + struct secashead *sah; + + LIST_FOREACH(sah, &sahtree, chain) { + if (sah->state == SADB_SASTATE_DEAD) + continue; + if (key_cmpsaidx_exactly(&sah->saidx, saidx)) + return(sah); + } + + return NULL; +} + +/* + * check not to be duplicated SPI. + * NOTE: this function is too slow due to searching all SAD. + * OUT: + * NULL : not found + * others : found, pointer to a SA. + */ +static struct secasvar * +key_checkspidup(saidx, spi) + struct secasindex *saidx; + u_int32_t spi; +{ + struct secashead *sah; + struct secasvar *sav; + + /* check address family */ + if (saidx->src.ss_family != saidx->dst.ss_family) { +#if IPSEC_DEBUG + printf("key_checkspidup: address family mismatched.\n"); +#endif + return NULL; + } + + /* check all SAD */ + LIST_FOREACH(sah, &sahtree, chain) { + if (!key_ismyaddr(sah->saidx.dst.ss_family, + _INADDRBYSA(&sah->saidx.dst))) + continue; + sav = key_getsavbyspi(sah, spi); + if (sav != NULL) + return sav; + } + + return NULL; +} + +/* + * search SAD litmited alive SA, protocol, SPI. + * OUT: + * NULL : not found + * others : found, pointer to a SA. + */ +static struct secasvar * +key_getsavbyspi(sah, spi) + struct secashead *sah; + u_int32_t spi; +{ + struct secasvar *sav; + u_int stateidx, state; + + /* search all status */ + for (stateidx = 0; + stateidx < _ARRAYLEN(saorder_state_alive); + stateidx++) { + + state = saorder_state_alive[stateidx]; + LIST_FOREACH(sav, &sah->savtree[state], chain) { + + /* sanity check */ + if (sav->state != state) { +#if IPSEC_DEBUG + printf("key_getsavbyspi: " + "invalid sav->state " + "(queue: %d SA: %d)\n", + state, sav->state); +#endif + continue; + } + + if (sav->spi == spi) + return sav; + } + } + + return NULL; +} + +/* + * copy SA values from PF_KEY message except *SPI, SEQ, PID, STATE and TYPE*. + * You must update these if need. + * OUT: 0: success. + * 1: failure. set errno to (mhp[0])->sadb_msg_errno. + */ +static int +key_setsaval(sav, mhp) + struct secasvar *sav; + caddr_t *mhp; +{ + struct sadb_msg *msg0; + int error = 0; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_setsaval: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* initialization */ + sav->replay = NULL; + sav->key_auth = NULL; + sav->key_enc = NULL; + sav->iv = NULL; + sav->lft_c = NULL; + sav->lft_h = NULL; + sav->lft_s = NULL; +#if notyet + sav->misc1 = NULL; + sav->misc2 = NULL; + sav->misc3 = NULL; +#endif + + /* SA */ + if (mhp[SADB_EXT_SA] != NULL) { + struct sadb_sa *sa0 = (struct sadb_sa *)mhp[SADB_EXT_SA]; + + sav->alg_auth = sa0->sadb_sa_auth; + sav->alg_enc = sa0->sadb_sa_encrypt; + sav->flags = sa0->sadb_sa_flags; + + /* replay window */ + if ((sa0->sadb_sa_flags & SADB_X_EXT_OLD) == 0) { + sav->replay = keydb_newsecreplay(sa0->sadb_sa_replay); + if (sav->replay == NULL) { +#if IPSEC_DEBUG + printf("key_setsaval: No more memory.\n"); +#endif + error = ENOBUFS; + goto err; + } + } + } + + /* Authentication keys */ + if (mhp[SADB_EXT_KEY_AUTH] != NULL) { + struct sadb_key *key0; + u_int len; + + key0 = (struct sadb_key *)mhp[SADB_EXT_KEY_AUTH]; + len = PFKEY_UNUNIT64(key0->sadb_key_len); + + error = 0; + if (len < sizeof(struct sadb_key)) + error = EINVAL; + switch (msg0->sadb_msg_satype) { + case SADB_SATYPE_AH: + case SADB_SATYPE_ESP: + if (len == sizeof(struct sadb_key) + && sav->alg_auth != SADB_AALG_NULL) { + error = EINVAL; + } + break; + case SADB_X_SATYPE_IPCOMP: + error = EINVAL; + break; + default: + error = EINVAL; + break; + } + if (error) { +#if IPSEC_DEBUG + printf("key_setsaval: invalid key_auth values.\n"); +#endif + goto err; + } + + sav->key_auth = (struct sadb_key *)key_newbuf(key0, len); + if (sav->key_auth == NULL) { +#if IPSEC_DEBUG + printf("key_setsaval: No more memory.\n"); +#endif + error = ENOBUFS; + goto err; + } + + /* make length shift up for kernel*/ + sav->key_auth->sadb_key_len = len; + } + + /* Encryption key */ + if (mhp[SADB_EXT_KEY_ENCRYPT] != NULL) { + struct sadb_key *key0; + u_int len; + + key0 = (struct sadb_key *)mhp[SADB_EXT_KEY_ENCRYPT]; + len = PFKEY_UNUNIT64(key0->sadb_key_len); + + error = 0; + if (len < sizeof(struct sadb_key)) + error = EINVAL; + switch (msg0->sadb_msg_satype) { + case SADB_SATYPE_ESP: + if (len == sizeof(struct sadb_key) + && sav->alg_enc != SADB_EALG_NULL) { + error = EINVAL; + } + break; + case SADB_SATYPE_AH: + error = EINVAL; + break; + case SADB_X_SATYPE_IPCOMP: + break; + default: + error = EINVAL; + break; + } + if (error) { +#if IPSEC_DEBUG + printf("key_setsatval: invalid key_enc value.\n"); +#endif + goto err; + } + + sav->key_enc = (struct sadb_key *)key_newbuf(key0, len); + if (sav->key_enc == NULL) { +#if IPSEC_DEBUG + printf("key_setsaval: No more memory.\n"); +#endif + error = ENOBUFS; + goto err; + } + + /* make length shift up for kernel*/ + sav->key_enc->sadb_key_len = len; + } + + /* set iv */ + sav->ivlen = 0; + + switch (msg0->sadb_msg_satype) { + case SADB_SATYPE_ESP: +#if IPSEC_ESP + { + struct esp_algorithm *algo; + + algo = &esp_algorithms[sav->alg_enc]; + if (algo && algo->ivlen) + sav->ivlen = (*algo->ivlen)(sav); + KMALLOC(sav->iv, caddr_t, sav->ivlen); + if (sav->iv == 0) { +#if IPSEC_DEBUG + printf("key_setsaval: No more memory.\n"); +#endif + error = ENOBUFS; + goto err; + } + /* initialize ? */ + break; + } +#else + break; +#endif + case SADB_SATYPE_AH: +#if 1 /*nonstandard*/ + case SADB_X_SATYPE_IPCOMP: +#endif + break; + default: +#if IPSEC_DEBUG + printf("key_setsaval: invalid SA type.\n"); +#endif + error = EINVAL; + goto err; + } + + /* reset tick */ + sav->tick = 0; + + /* make lifetime for CURRENT */ + { + struct timeval tv; + + KMALLOC(sav->lft_c, struct sadb_lifetime *, + sizeof(struct sadb_lifetime)); + if (sav->lft_c == NULL) { +#if IPSEC_DEBUG + printf("key_setsaval: No more memory.\n"); +#endif + error = ENOBUFS; + goto err; + } + + microtime(&tv); + + sav->lft_c->sadb_lifetime_len = + PFKEY_UNIT64(sizeof(struct sadb_lifetime)); + sav->lft_c->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; + sav->lft_c->sadb_lifetime_allocations = 0; + sav->lft_c->sadb_lifetime_bytes = 0; + sav->lft_c->sadb_lifetime_addtime = tv.tv_sec; + sav->lft_c->sadb_lifetime_usetime = 0; + } + + /* lifetimes for HARD and SOFT */ + { + struct sadb_lifetime *lft0; + + lft0 = (struct sadb_lifetime *)mhp[SADB_EXT_LIFETIME_HARD]; + if (lft0 != NULL) { + sav->lft_h = (struct sadb_lifetime *)key_newbuf(lft0, + sizeof(*lft0)); + if (sav->lft_h == NULL) { +#if IPSEC_DEBUG + printf("key_setsaval: No more memory.\n"); +#endif + error = ENOBUFS; + goto err; + } + /* to be initialize ? */ + } + + lft0 = (struct sadb_lifetime *)mhp[SADB_EXT_LIFETIME_SOFT]; + if (lft0 != NULL) { + sav->lft_s = (struct sadb_lifetime *)key_newbuf(lft0, + sizeof(*lft0)); + if (sav->lft_s == NULL) { +#if IPSEC_DEBUG + printf("key_setsaval: No more memory.\n"); +#endif + error = ENOBUFS; + goto err; + } + /* to be initialize ? */ + } + } + +#if notyet + /* pre-processing for DES */ + switch (sav->alg_enc) { + case SADB_EALG_DESCBC: + if (des_key_sched((C_Block *)_KEYBUF(sav->key_enc), + (des_key_schedule)sav->misc1) != 0) { +#if IPSEC_DEBUG + printf("key_setsaval: error des_key_sched.\n"); +#endif + sav->misc1 = NULL; + /* THROUGH */ + } + break; + case SADB_EALG_3DESCBC: + if (des_key_sched((C_Block *)_KEYBUF(sav->key_enc), + (des_key_schedule)sav->misc1) != 0 + || des_key_sched((C_Block *)(_KEYBUF(sav->key_enc) + 8), + (des_key_schedule)sav->misc2) != 0 + || des_key_sched((C_Block *)(_KEYBUF(sav->key_enc) + 16), + (des_key_schedule)sav->misc3) != 0) { +#if IPSEC_DEBUG + printf("key_setsaval: error des_key_sched.\n"); +#endif + sav->misc1 = NULL; + sav->misc2 = NULL; + sav->misc3 = NULL; + /* THROUGH */ + } + } +#endif + + msg0->sadb_msg_errno = 0; + return 0; + + err: + /* initialization */ + if (sav->replay != NULL) + keydb_delsecreplay(sav->replay); + if (sav->key_auth != NULL) + KFREE(sav->key_auth); + if (sav->key_enc != NULL) + KFREE(sav->key_enc); + if (sav->iv != NULL) + KFREE(sav->iv); + if (sav->lft_c != NULL) + KFREE(sav->lft_c); + if (sav->lft_h != NULL) + KFREE(sav->lft_h); + if (sav->lft_s != NULL) + KFREE(sav->lft_s); +#if notyet + if (sav->misc1 != NULL) + KFREE(sav->misc1); + if (sav->misc2 != NULL) + KFREE(sav->misc2); + if (sav->misc3 != NULL) + KFREE(sav->misc3); +#endif + + msg0->sadb_msg_errno = error; + return 1; +} + +/* + * get message buffer length. + */ +static u_int +key_getmsglen(sav) + struct secasvar *sav; +{ + int len = sizeof(struct sadb_msg); + + len += sizeof(struct sadb_sa); + len += (sizeof(struct sadb_address) + + PFKEY_ALIGN8(_SALENBYAF(sav->sah->saidx.src.ss_family))); + len += (sizeof(struct sadb_address) + + PFKEY_ALIGN8(_SALENBYAF(sav->sah->saidx.dst.ss_family))); + + if (sav->key_auth != NULL) + len += sav->key_auth->sadb_key_len; + if (sav->key_enc != NULL) + len += sav->key_enc->sadb_key_len; + + if (sav->lft_c != NULL) + len += sizeof(struct sadb_lifetime); + if (sav->lft_h != NULL) + len += sizeof(struct sadb_lifetime); + if (sav->lft_s != NULL) + len += sizeof(struct sadb_lifetime); + + return len; +} + +/* + * validation with a secasvar entry, and set SADB_SATYPE_MATURE. + * OUT: 0: valid + * other: errno + */ +static int +key_mature(sav) + struct secasvar *sav; +{ + int mature; + int checkmask = 0; /* 2^0: ealg 2^1: aalg 2^2: calg */ + int mustmask = 0; /* 2^0: ealg 2^1: aalg 2^2: calg */ + + mature = 0; + + /* check SPI value */ + if (ntohl(sav->spi) >= 0 && ntohl(sav->spi) <= 255) { +#if IPSEC_DEBUG + printf("key_mature: illegal range of SPI %d.\n", sav->spi); +#endif + return EINVAL; + } + + /* check satype */ + switch (sav->sah->saidx.proto) { + case IPPROTO_ESP: + /* check flags */ + if ((sav->flags & SADB_X_EXT_OLD) + && (sav->flags & SADB_X_EXT_DERIV)) { +#if IPSEC_DEBUG + printf("key_mature: " + "invalid flag (derived) given to old-esp.\n"); +#endif + return EINVAL; + } + checkmask = 3; + mustmask = 1; + break; + case IPPROTO_AH: + /* check flags */ + if (sav->flags & SADB_X_EXT_DERIV) { +#if IPSEC_DEBUG + printf("key_mature: " + "invalid flag (derived) given to AH SA.\n"); +#endif + return EINVAL; + } + if (sav->alg_enc != SADB_EALG_NONE) { +#if IPSEC_DEBUG + printf("key_mature: " + "protocol and algorithm mismated.\n"); +#endif + return(EINVAL); + } + checkmask = 2; + mustmask = 2; + break; +#if 1 /*nonstandard*/ + case IPPROTO_IPCOMP: + if (sav->alg_auth != SADB_AALG_NONE) { +#if IPSEC_DEBUG + printf("key_mature: " + "protocol and algorithm mismated.\n"); +#endif + return(EINVAL); + } + if ((sav->flags & SADB_X_EXT_RAWCPI) == 0 + && ntohl(sav->spi) >= 0x10000) { +#if IPSEC_DEBUG + printf("key_mature: invalid cpi for IPComp.\n"); +#endif + return(EINVAL); + } + checkmask = 4; + mustmask = 4; + break; +#endif + default: +#if IPSEC_DEBUG + printf("key_mature: Invalid satype.\n"); +#endif + return EPROTONOSUPPORT; + } + + /* check authentication algorithm */ + if ((checkmask & 2) != 0) { + struct ah_algorithm *algo; + int keylen; + + /* XXX: should use algorithm map to check. */ + switch (sav->alg_auth) { + case SADB_AALG_NONE: + case SADB_AALG_MD5HMAC: + case SADB_AALG_SHA1HMAC: + case SADB_AALG_MD5: + case SADB_AALG_SHA: + case SADB_AALG_NULL: + break; + default: +#if IPSEC_DEBUG + printf("key_mature: " + "unknown authentication algorithm.\n"); +#endif + return EINVAL; + } + + /* algorithm-dependent check */ + algo = &ah_algorithms[sav->alg_auth]; + + if (sav->key_auth) + keylen = sav->key_auth->sadb_key_bits; + else + keylen = 0; + if (keylen < algo->keymin || algo->keymax < keylen) { +#if IPSEC_DEBUG + printf("key_mature: invalid AH key length %d " + "(%d-%d allowed)\n", keylen, + algo->keymin, algo->keymax); +#endif + return EINVAL; + } + + if (algo->mature) { + if ((*algo->mature)(sav)) { + /* message generated in per-algorithm function*/ + return EINVAL; + } else + mature = SADB_SATYPE_AH; + } + + if ((mustmask & 2) != 0 && mature != SADB_SATYPE_AH) { +#if IPSEC_DEBUG + printf("key_mature: no satisfy algorithm for AH\n"); +#endif + return EINVAL; + } + } + + /* check encryption algorithm */ + if ((checkmask & 1) != 0) { +#if IPSEC_ESP + struct esp_algorithm *algo; + int keylen; + + switch (sav->alg_enc) { + case SADB_EALG_NONE: + case SADB_EALG_DESCBC: + case SADB_EALG_3DESCBC: + case SADB_EALG_NULL: + case SADB_EALG_BLOWFISHCBC: + case SADB_EALG_CAST128CBC: + case SADB_EALG_RC5CBC: + break; + default: +#if IPSEC_DEBUG + printf("key_mature: unknown encryption algorithm.\n"); +#endif + return EINVAL; + } + + /* algorithm-dependent check */ + algo = &esp_algorithms[sav->alg_enc]; + + if (sav->key_enc) + keylen = sav->key_enc->sadb_key_bits; + else + keylen = 0; + if (keylen < algo->keymin || algo->keymax < keylen) { +#if IPSEC_DEBUG + printf("key_mature: invalid ESP key length %d " + "(%d-%d allowed)\n", keylen, + algo->keymin, algo->keymax); +#endif + return EINVAL; + } + + if (algo->mature) { + if ((*algo->mature)(sav)) { + /* message generated in per-algorithm function*/ + return EINVAL; + } else + mature = SADB_SATYPE_ESP; + } + + if ((mustmask & 1) != 0 && mature != SADB_SATYPE_ESP) { +#if IPSEC_DEBUG + printf("key_mature: no satisfy algorithm for ESP\n"); +#endif + return EINVAL; + } +#else /*IPSEC_ESP*/ +#if IPSEC_DEBUG + printf("key_mature: ESP not supported in this configuration\n"); +#endif + return EINVAL; +#endif + } + + /* check compression algorithm */ + if ((checkmask & 4) != 0) { + struct ipcomp_algorithm *algo; + + switch (sav->alg_enc) { + case SADB_X_CALG_NONE: + case SADB_X_CALG_OUI: + case SADB_X_CALG_DEFLATE: + case SADB_X_CALG_LZS: + break; + default: +#if IPSEC_DEBUG + printf("key_mature: unknown compression algorithm.\n"); +#endif + return EINVAL; + } + + /* algorithm-dependent check */ + algo = &ipcomp_algorithms[sav->alg_enc]; + + if (!(algo->compress && algo->decompress)) { +#if IPSEC_DEBUG + printf("key_mature: " + "unsupported compression algorithm.\n"); +#endif + return EINVAL; + } + } + + key_sa_chgstate(sav, SADB_SASTATE_MATURE); + + return 0; +} + +/* + * subroutine for SADB_GET and SADB_DUMP. + * the buf must be allocated sufficent space. + */ +static u_int +key_setdumpsa(newmsg, sav, type, satype, seq, pid) + struct sadb_msg *newmsg; + struct secasvar *sav; + u_int8_t type, satype; + u_int32_t seq, pid; +{ + u_int tlen; + caddr_t p; + int i; + + tlen = key_getmsglen(sav); + + p = key_setsadbmsg((caddr_t)newmsg, type, tlen, + satype, seq, pid, + sav->sah->saidx.mode, sav->sah->saidx.reqid, + 0, sav->refcnt); + + for (i = 1; i <= SADB_EXT_MAX; i++) { + switch (i) { + case SADB_EXT_SA: + p = key_setsadbsa(p, sav); + break; + + case SADB_EXT_ADDRESS_SRC: + p = key_setsadbaddr(p, + SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&sav->sah->saidx.src, + _INALENBYAF(sav->sah->saidx.src.ss_family) << 3, + IPSEC_ULPROTO_ANY); + break; + + case SADB_EXT_ADDRESS_DST: + p = key_setsadbaddr(p, + SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&sav->sah->saidx.dst, + _INALENBYAF(sav->sah->saidx.dst.ss_family) << 3, + IPSEC_ULPROTO_ANY); + break; + + case SADB_EXT_KEY_AUTH: + { + u_int len; + if (sav->key_auth == NULL) break; + len = sav->key_auth->sadb_key_len; /* real length */ + bcopy((caddr_t)sav->key_auth, p, len); + ((struct sadb_ext *)p)->sadb_ext_len = PFKEY_UNIT64(len); + p += len; + } + break; + + case SADB_EXT_KEY_ENCRYPT: + { + u_int len; + if (sav->key_enc == NULL) break; + len = sav->key_enc->sadb_key_len; /* real length */ + bcopy((caddr_t)sav->key_enc, p, len); + ((struct sadb_ext *)p)->sadb_ext_len = PFKEY_UNIT64(len); + p += len; + } + break;; + + case SADB_EXT_LIFETIME_CURRENT: + if (sav->lft_c == NULL) break; + p = key_setsadbext(p, (caddr_t)sav->lft_c); + break; + + case SADB_EXT_LIFETIME_HARD: + if (sav->lft_h == NULL) break; + p = key_setsadbext(p, (caddr_t)sav->lft_h); + break; + + case SADB_EXT_LIFETIME_SOFT: + if (sav->lft_s == NULL) break; + p = key_setsadbext(p, (caddr_t)sav->lft_s); + break; + + case SADB_EXT_IDENTITY_SRC: + case SADB_EXT_IDENTITY_DST: + /* XXX: should we brought from SPD ? */ + case SADB_EXT_SENSITIVITY: + default: + break; + } + } + + return tlen; +} + +#if 1 +static int +key_setsadbmsg_m(m, type, tlen, satype, seq, pid, mode, reqid, + reserved1, reserved2) + struct mbuf *m; + u_int8_t type, satype; + u_int16_t tlen; + u_int32_t seq; + pid_t pid; + u_int8_t mode; + u_int32_t reqid; + u_int8_t reserved1; + u_int32_t reserved2; +{ + caddr_t p; + const size_t len = sizeof(struct sadb_msg); + + p = key_appendmbuf(m, len); + if (p == NULL) + return ENOBUFS; + + if (key_setsadbmsg(p, type, tlen, satype, seq, pid, mode, reqid, + reserved1, reserved2)) + return 0; + else + return EINVAL; +} +#endif + +/* + * set data into sadb_msg. + * `buf' must has been allocated sufficiently. + */ +static caddr_t +key_setsadbmsg(buf, type, tlen, satype, seq, pid, mode, reqid, + reserved1, reserved2) + caddr_t buf; + u_int8_t type, satype; + u_int16_t tlen; + u_int32_t seq; + pid_t pid; + u_int8_t mode; + u_int32_t reqid; + u_int8_t reserved1; + u_int32_t reserved2; +{ + struct sadb_msg *p; + u_int len; + + p = (struct sadb_msg *)buf; + len = sizeof(struct sadb_msg); + + bzero(p, len); + p->sadb_msg_version = PF_KEY_V2; + p->sadb_msg_type = type; + p->sadb_msg_errno = 0; + p->sadb_msg_satype = satype; + p->sadb_msg_len = PFKEY_UNIT64(tlen); + p->sadb_msg_mode = mode; + p->sadb_msg_reserved1 = reserved1; + p->sadb_msg_seq = seq; + p->sadb_msg_pid = (u_int32_t)pid; + p->sadb_msg_reqid = reqid; + p->sadb_msg_reserved2 = reserved2; + + return(buf + len); +} + +/* + * copy secasvar data into sadb_address. + * `buf' must has been allocated sufficiently. + */ +static caddr_t +key_setsadbsa(buf, sav) + caddr_t buf; + struct secasvar *sav; +{ + struct sadb_sa *p; + u_int len; + + p = (struct sadb_sa *)buf; + len = sizeof(struct sadb_sa); + + bzero(p, len); + p->sadb_sa_len = PFKEY_UNIT64(len); + p->sadb_sa_exttype = SADB_EXT_SA; + p->sadb_sa_spi = sav->spi; + p->sadb_sa_replay = (sav->replay != NULL ? sav->replay->wsize : 0); + p->sadb_sa_state = sav->state; + p->sadb_sa_auth = sav->alg_auth; + p->sadb_sa_encrypt = sav->alg_enc; + p->sadb_sa_flags = sav->flags; + + return(buf + len); +} + +#if 1 +static int +key_setsadbaddr_m(m, exttype, saddr, prefixlen, ul_proto) + struct mbuf *m; + u_int16_t exttype; + struct sockaddr *saddr; + u_int8_t prefixlen; + u_int16_t ul_proto; +{ + caddr_t p; + const size_t len = + sizeof(struct sadb_address) + PFKEY_ALIGN8(saddr->sa_len); + + p = key_appendmbuf(m, len); + if (p == NULL) + return ENOBUFS; + + if (key_setsadbaddr(p, exttype, saddr, prefixlen, ul_proto)) + return 0; + else + return EINVAL; +} +#endif + +/* + * set data into sadb_address. + * `buf' must has been allocated sufficiently. + */ +static caddr_t +key_setsadbaddr(buf, exttype, saddr, prefixlen, ul_proto) + caddr_t buf; + u_int16_t exttype; + struct sockaddr *saddr; + u_int8_t prefixlen; + u_int16_t ul_proto; +{ + struct sadb_address *p; + size_t len; + + p = (struct sadb_address *)buf; + len = sizeof(struct sadb_address) + PFKEY_ALIGN8(saddr->sa_len); + + bzero(p, len); + p->sadb_address_len = PFKEY_UNIT64(len); + p->sadb_address_exttype = exttype; + p->sadb_address_proto = ul_proto; + p->sadb_address_prefixlen = prefixlen; + p->sadb_address_reserved = 0; + + bcopy(saddr, p + 1, saddr->sa_len); + + return(buf + len); +} + +/* + * set data into sadb_ident. + * `buf' must has been allocated sufficiently. + */ +static caddr_t +key_setsadbident(buf, exttype, idtype, string, stringlen, id) + caddr_t buf; + u_int16_t exttype, idtype; + caddr_t string; + int stringlen; + u_int64_t id; +{ + struct sadb_ident *p; + u_int len; + + p = (struct sadb_ident *)buf; + len = sizeof(struct sadb_ident) + PFKEY_ALIGN8(stringlen); + + bzero(p, len); + p->sadb_ident_len = PFKEY_UNIT64(len); + p->sadb_ident_exttype = exttype; + p->sadb_ident_type = idtype; + p->sadb_ident_reserved = 0; + p->sadb_ident_id = id; + + bcopy(string, p + 1, stringlen); + + return(buf + len); +} + +/* + * set data into sadb_x_policy + * `buf' must has been allocated sufficiently. + */ +static caddr_t +key_setsadbxpolicy(buf, type, dir, id) + caddr_t buf; + u_int16_t type; + u_int8_t dir; + u_int32_t id; +{ + struct sadb_x_policy *p; + u_int len; + + p = (struct sadb_x_policy *)buf; + len = sizeof(struct sadb_x_policy); + + bzero(p, len); + p->sadb_x_policy_len = PFKEY_UNIT64(len); + p->sadb_x_policy_exttype = SADB_X_EXT_POLICY; + p->sadb_x_policy_type = type; + p->sadb_x_policy_dir = dir; + p->sadb_x_policy_id = id; + + return(buf + len); +} + +/* + * copy buffer of any sadb extension type into sadb_ext. + * assume that sadb_ext_len shifted down >> 3. + * i.e. shift length up when setting length of extension. + */ +static caddr_t +key_setsadbext(p, ext) + caddr_t p, ext; +{ + u_int len; + + len = PFKEY_UNUNIT64(((struct sadb_ext *)ext)->sadb_ext_len); + + bcopy(ext, p, len); + + return(p + len); +} + +/* %%% utilities */ +/* + * copy a buffer into the new buffer allocated. + */ +static void * +key_newbuf(src, len) + void *src; + u_int len; +{ + caddr_t new; + + KMALLOC(new, caddr_t, len); + if (new == NULL) { +#if IPSEC_DEBUG + printf("key_newbuf: No more memory.\n"); +#endif + return NULL; + } + bcopy((caddr_t)src, new, len); + + return new; +} + +/* compare my own address + * OUT: 1: true, i.e. my address. + * 0: false + */ +int +key_ismyaddr(family, addr) + u_int family; + caddr_t addr; +{ + /* sanity check */ + if (addr == NULL) + panic("key_ismyaddr: NULL pointer is passed.\n"); + + switch (family) { + case AF_INET: + { + struct in_ifaddr *ia; + +#ifdef __NetBSD__ + for (ia = in_ifaddr.tqh_first; ia; ia = ia->ia_list.tqe_next) +#elif defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + for (ia = in_ifaddrhead.tqh_first; ia; + ia = ia->ia_link.tqe_next) +#else + for (ia = in_ifaddr; ia; ia = ia->ia_next) +#endif + if (bcmp(addr, + (caddr_t)&ia->ia_addr.sin_addr, + _INALENBYAF(family)) == 0) + return 1; + } + break; +#if INET6 + case AF_INET6: + return key_ismyaddr6(addr); +#endif + } + + return 0; +} + +#if INET6 +/* + * compare my own address for IPv6. + * 1: ours + * 0: other + * NOTE: derived ip6_input() in KAME. This is necessary to modify more. + */ +#include + +static int +key_ismyaddr6(addr) + caddr_t addr; +{ + struct in6_addr *a = (struct in6_addr *)addr; + struct in6_ifaddr *ia; + + for (ia = in6_ifaddr; ia; ia = ia->ia_next) { + if (bcmp(addr, (caddr_t)&ia->ia_addr.sin6_addr, + _INALENBYAF(AF_INET6)) == 0) { + return 1; + } + + /* XXX Multicast */ + { + struct in6_multi *in6m = 0; + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + IN6_LOOKUP_MULTI(*(struct in6_addr *)addr, ia->ia_ifp, in6m); +#else + for ((in6m) = ia->ia6_multiaddrs.lh_first; + (in6m) != NULL && + !IN6_ARE_ADDR_EQUAL(&(in6m)->in6m_addr, a); + (in6m) = in6m->in6m_entry.le_next) + continue; +#endif + if (in6m) + return 1; + } + } + + /* loopback, just for safety */ + if (IN6_IS_ADDR_LOOPBACK(a)) + return 1; + +#if 0 + /* FAITH */ + if (ip6_keepfaith && + (a->s6_addr32[0] == ip6_faith_prefix.s6_addr32[0] && + a->s6_addr32[1] == ip6_faith_prefix.s6_addr32[1] && + a->s6_addr32[2] == ip6_faith_prefix.s6_addr32[2])) + return 1; +#endif + + /* XXX anycast */ + + return 0; +} +#endif /*INET6*/ + +#if 0 +/* checking address is whether loopback or not. + * OUT: 1: true + * 0: false + */ +static int +key_isloopback(family, addr) + u_int family; + caddr_t addr; +{ + switch (family) { + case PF_INET: + if (((caddr_t)addr)[0] == IN_LOOPBACKNET) + return 1; + break; +#if INET6 + case PF_INET6: + if (IN6_IS_ADDR_LOOPBACK((struct in6_addr *)addr)) + return 1; + break; +#endif /* INET6 */ + default: +#if IPSEC_DEBUG + printf("key_isloopback: unknown address family=%d.\n", family); +#endif + return 0; + } + + return 0; +} +#endif + +/* + * compare two secasindex structure exactly. + * IN: + * saidx0: source, it can be in SAD. + * saidx1: object. + * OUT: + * 1 : equal + * 0 : not equal + */ +static int +key_cmpsaidx_exactly(saidx0, saidx1) + struct secasindex *saidx0, *saidx1; +{ + /* sanity */ + if (saidx0 == NULL && saidx1 == NULL) + return 1; + + if (saidx0 == NULL || saidx1 == NULL) + return 0; + + if (saidx0->proto != saidx1->proto + || saidx0->mode != saidx1->mode + || saidx0->reqid != saidx1->reqid) + return 0; + + if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.ss_len) != 0 + || bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.ss_len) != 0) + return 0; + + return 1; +} + +/* + * compare two secasindex structure with consideration mode. + * don't compare port. + * IN: + * saidx0: source, it is often in SAD. + * saidx1: object, it is often from SPD. + * OUT: + * 1 : equal + * 0 : not equal + */ +static int +key_cmpsaidx_withmode(saidx0, saidx1) + struct secasindex *saidx0, *saidx1; +{ + /* sanity */ + if (saidx0 == NULL && saidx1 == NULL) + return 1; + + if (saidx0 == NULL || saidx1 == NULL) + return 0; + + if (saidx0->proto != saidx1->proto + || saidx0->src.ss_family != saidx1->src.ss_family + || saidx0->dst.ss_family != saidx1->dst.ss_family) + return 0; + + /* + * If reqid of SPD is non-zero, unique SA is required. + * The result must be of same reqid in this case. + */ + if (saidx1->reqid != 0 + && saidx0->reqid != saidx1->reqid) + return 0; + + if (saidx0->mode != IPSEC_MODE_ANY + && saidx0->mode != saidx1->mode) + return 0; + + { + int sa_len = _INALENBYAF(saidx0->src.ss_family); + + if (bcmp(_INADDRBYSA(&saidx0->src), _INADDRBYSA(&saidx1->src), sa_len) + || bcmp(_INADDRBYSA(&saidx0->dst), _INADDRBYSA(&saidx1->dst), sa_len)) + return 0; + } + + return 1; +} + +/* + * compare two secindex structure exactly. + * IN: + * spidx0: source, it is often in SPD. + * spidx1: object, it is often from PFKEY message. + * OUT: + * 1 : equal + * 0 : not equal + */ +static int +key_cmpspidx_exactly(spidx0, spidx1) + struct secpolicyindex *spidx0, *spidx1; +{ + /* sanity */ + if (spidx0 == NULL && spidx1 == NULL) + return 1; + + if (spidx0 == NULL || spidx1 == NULL) + return 0; + + if (spidx0->prefs != spidx1->prefs + || spidx0->prefd != spidx1->prefd + || spidx0->ul_proto != spidx1->ul_proto) + return 0; + + if (bcmp(&spidx0->src, &spidx1->src, spidx0->src.ss_len) != 0 + || bcmp(&spidx0->dst, &spidx1->dst, spidx0->dst.ss_len) != 0) + return 0; + + return 1; +} + +/* + * compare two secindex structure with mask. + * IN: + * spidx0: source, it is often in SPD. + * spidx1: object, it is often from IP header. + * OUT: + * 1 : equal + * 0 : not equal + */ +static int +key_cmpspidx_withmask(spidx0, spidx1) + struct secpolicyindex *spidx0, *spidx1; +{ + /* sanity */ + if (spidx0 == NULL && spidx1 == NULL) + return 1; + + if (spidx0 == NULL || spidx1 == NULL) + return 0; + + if (spidx0->src.ss_family != spidx1->src.ss_family + || spidx0->dst.ss_family != spidx1->dst.ss_family) + return 0; + + /* if spidx.ul_proto == IPSEC_ULPROTO_ANY, ignore. */ + if (spidx0->ul_proto != (u_int16_t)IPSEC_ULPROTO_ANY + && spidx0->ul_proto != spidx1->ul_proto) + return 0; + + if (_INPORTBYSA(&spidx0->src) != IPSEC_PORT_ANY + && _INPORTBYSA(&spidx0->src) != _INPORTBYSA(&spidx1->src)) + return 0; + + if (_INPORTBYSA(&spidx0->dst) != IPSEC_PORT_ANY + && _INPORTBYSA(&spidx0->dst) != _INPORTBYSA(&spidx1->dst)) + return 0; + + if (!key_bbcmp(_INADDRBYSA(&spidx0->src), + _INADDRBYSA(&spidx1->src), + spidx0->prefs)) + return 0; + + if (!key_bbcmp(_INADDRBYSA(&spidx0->dst), + _INADDRBYSA(&spidx1->dst), + spidx0->prefd)) + return 0; + + /* XXX Do we check other field ? e.g. flowinfo, scope_id. */ + + return 1; +} + +/* + * compare two buffers with mask. + * IN: + * addr1: source + * addr2: object + * bits: Number of bits to compare + * OUT: + * 1 : equal + * 0 : not equal + */ +static int +key_bbcmp(p1, p2, bits) + register caddr_t p1, p2; + register u_int bits; +{ + u_int8_t mask; + + /* XXX: This could be considerably faster if we compare a word + * at a time, but it is complicated on LSB Endian machines */ + + /* Handle null pointers */ + if (p1 == NULL || p2 == NULL) + return (p1 == p2); + + while (bits >= 8) { + if (*p1++ != *p2++) + return 0; + bits -= 8; + } + + if (bits > 0) { + mask = ~((1<<(8-bits))-1); + if ((*p1 & mask) != (*p2 & mask)) + return 0; + } + return 1; /* Match! */ +} + +/* + * time handler. + * scanning SPD and SAD to check status for each entries, + * and do to remove or to expire. + */ +void +key_timehandler(void) +{ + u_int dir; + int s; + + thread_funnel_set(network_flock, TRUE); +#if __NetBSD__ + s = splsoftnet(); /*called from softclock()*/ +#else + s = splnet(); /*called from softclock()*/ +#endif + + /* SPD */ + { + struct secpolicy *sp, *nextsp; + + for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { + for (sp = LIST_FIRST(&sptree[dir]); + sp != NULL; + sp = nextsp) { + + nextsp = LIST_NEXT(sp, chain); + + if (sp->state == IPSEC_SPSTATE_DEAD) + key_freesp(sp); + } + } + } + + /* SAD */ + { + struct secashead *sah, *nextsah; + struct secasvar *sav, *nextsav; + + for (sah = LIST_FIRST(&sahtree); + sah != NULL; + sah = nextsah) { + + nextsah = LIST_NEXT(sah, chain); + + /* if sah has been dead, then delete it and process next sah. */ + if (sah->state == SADB_SASTATE_DEAD) { + key_delsah(sah); + continue; + } + + /* if LARVAL entry doesn't become MATURE, delete it. */ + for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_LARVAL]); + sav != NULL; + sav = nextsav) { + + nextsav = LIST_NEXT(sav, chain); + + sav->tick++; + + if (key_larval_lifetime < sav->tick) { + key_freesav(sav); + } + } + + /* + * check MATURE entry to start to send expire message + * whether or not. + */ + for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_MATURE]); + sav != NULL; + sav = nextsav) { + + nextsav = LIST_NEXT(sav, chain); + + sav->tick++; + + /* we don't need to check. */ + if (sav->lft_s == NULL) + continue; + + /* sanity check */ + if (sav->lft_c == NULL) { +#if IPSEC_DEBUG + printf("key_timehandler: " + "There is no CURRENT time, why?\n"); +#endif + continue; + } + + /* compare SOFT lifetime and tick */ + if (sav->lft_s->sadb_lifetime_addtime != 0 + && sav->lft_s->sadb_lifetime_addtime < sav->tick) { + /* + * check SA to be used whether or not. + * when SA hasn't been used, delete it. + */ + if (sav->lft_c->sadb_lifetime_usetime == 0) { + key_sa_chgstate(sav, SADB_SASTATE_DEAD); + key_freesav(sav); + sav = NULL; + } else { + key_sa_chgstate(sav, SADB_SASTATE_DYING); + /* + * XXX If we keep to send expire + * message in the status of + * DYING. Do remove below code. + */ + key_expire(sav); + } + } + /* check SOFT lifetime by bytes */ + /* + * XXX I don't know the way to delete this SA + * when new SA is installed. Caution when it's + * installed too big lifetime by time. + */ + else if (sav->lft_s->sadb_lifetime_bytes != 0 + && sav->lft_s->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { + + key_sa_chgstate(sav, SADB_SASTATE_DYING); + /* + * XXX If we keep to send expire + * message in the status of + * DYING. Do remove below code. + */ + key_expire(sav); + } + } + + /* check DYING entry to change status to DEAD. */ + for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_DYING]); + sav != NULL; + sav = nextsav) { + + nextsav = LIST_NEXT(sav, chain); + + sav->tick++; + + /* we don't need to check. */ + if (sav->lft_h == NULL) + continue; + + /* sanity check */ + if (sav->lft_c == NULL) { +#if IPSEC_DEBUG + printf("key_timehandler: " + "There is no CURRENT time, why?\n"); +#endif + continue; + } + + /* compare HARD lifetime and tick */ + if (sav->lft_h->sadb_lifetime_addtime != 0 + && sav->lft_h->sadb_lifetime_addtime < sav->tick) { + key_sa_chgstate(sav, SADB_SASTATE_DEAD); + key_freesav(sav); + sav = NULL; + } +#if 0 /* XXX Should we keep to send expire message until HARD lifetime ? */ + else if (sav->lft_s != NULL + && sav->lft_s->sadb_lifetime_addtime != 0 + && sav->lft_s->sadb_lifetime_addtime < sav->tick) { + /* + * XXX: should be checked to be + * installed the valid SA. + */ + + /* + * If there is no SA then sending + * expire message. + */ + key_expire(sav); + } +#endif + /* check HARD lifetime by bytes */ + else if (sav->lft_h->sadb_lifetime_bytes != 0 + && sav->lft_h->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { + key_sa_chgstate(sav, SADB_SASTATE_DEAD); + key_freesav(sav); + sav = NULL; + } + } + + /* delete entry in DEAD */ + for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_DEAD]); + sav != NULL; + sav = nextsav) { + + nextsav = LIST_NEXT(sav, chain); + + /* sanity check */ + if (sav->state != SADB_SASTATE_DEAD) { +#if IPSEC_DEBUG + printf("key_timehandler: " + "invalid sav->state " + "(queue: %d SA: %d): " + "kill it anyway\n", + SADB_SASTATE_DEAD, sav->state); +#endif + } + + /* + * do not call key_freesav() here. + * sav should already be freed, and sav->refcnt + * shows other references to sav + * (such as from SPD). + */ + } + } + } + +#ifndef IPSEC_NONBLOCK_ACQUIRE + /* ACQ tree */ + { + struct secacq *acq, *nextacq; + + for (acq = LIST_FIRST(&acqtree); + acq != NULL; + acq = nextacq) { + + nextacq = LIST_NEXT(acq, chain); + + acq->tick++; + + if (key_blockacq_lifetime < acq->tick && __LIST_CHAINED(acq)) { + LIST_REMOVE(acq, chain); + KFREE(acq); + } + } + } +#endif + + /* SP ACQ tree */ + { + struct secspacq *acq, *nextacq; + + for (acq = LIST_FIRST(&spacqtree); + acq != NULL; + acq = nextacq) { + + nextacq = LIST_NEXT(acq, chain); + + acq->tick++; + + if (key_blockacq_lifetime < acq->tick && __LIST_CHAINED(acq)) { + LIST_REMOVE(acq, chain); + KFREE(acq); + } + } + } + + /* initialize random seed */ + if (key_tick_init_random++ > key_int_random) { + key_tick_init_random = 0; + key_srandom(); + } + +#ifndef IPSEC_DEBUG2 + /* do exchange to tick time !! */ + (void)timeout((void *)key_timehandler, (void *)0, 100); +#endif /* IPSEC_DEBUG2 */ + + splx(s); + thread_funnel_set(network_funnel, FALSE); + return; +} + +/* + * to initialize a seed for random() + */ +void +key_srandom() +{ + struct timeval tv; +#ifdef __bsdi__ + extern long randseed; /* it's defined at i386/i386/random.s */ +#endif /* __bsdi__ */ + + microtime(&tv); + +#if defined(__FreeBSD__) + srandom(tv.tv_usec); +#endif /* __FreeBSD__ */ +#if defined(__APPLE__) + random(); +#endif +#ifdef __bsdi__ + randseed = tv.tv_usec; +#endif /* __bsdi__ */ + + return; +} + +/* + * map SADB_SATYPE_* to IPPROTO_*. + * if satype == SADB_SATYPE then satype is mapped to ~0. + * OUT: + * 0: invalid satype. + */ +static u_int16_t +key_satype2proto(satype) + u_int8_t satype; +{ + switch (satype) { + case SADB_SATYPE_UNSPEC: + return IPSEC_PROTO_ANY; + case SADB_SATYPE_AH: + return IPPROTO_AH; + case SADB_SATYPE_ESP: + return IPPROTO_ESP; +#if 1 /*nonstandard*/ + case SADB_X_SATYPE_IPCOMP: + return IPPROTO_IPCOMP; + break; +#endif + default: + return 0; + } + /* NOTREACHED */ +} + +/* + * map IPPROTO_* to SADB_SATYPE_* + * OUT: + * 0: invalid protocol type. + */ +static u_int8_t +key_proto2satype(proto) + u_int16_t proto; +{ + switch (proto) { + case IPPROTO_AH: + return SADB_SATYPE_AH; + case IPPROTO_ESP: + return SADB_SATYPE_ESP; +#if 1 /*nonstandard*/ + case IPPROTO_IPCOMP: + return SADB_X_SATYPE_IPCOMP; + break; +#endif + default: + return 0; + } + /* NOTREACHED */ +} + +/* %%% PF_KEY */ +/* + * SADB_GETSPI processing is to receive + * + * from the IKMPd, to assign a unique spi value, to hang on the INBOUND + * tree with the status of LARVAL, and send + * + * to the IKMPd. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + */ +static struct sadb_msg * +key_getspi(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_address *src0, *dst0; + struct secasindex saidx; + struct secashead *newsah; + struct secasvar *newsav; + u_int8_t proto; + u_int32_t spi; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_getspi: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + if (mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL) { +#if IPSEC_DEBUG + printf("key_getspi: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_getspi: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + KEY_SETSECASIDX(proto, msg0, src0+1, dst0+1, &saidx); + + /* SPI allocation */ + spi = key_do_getnewspi((struct sadb_spirange *)mhp[SADB_EXT_SPIRANGE], + &saidx); + if (spi == 0) { + msg0->sadb_msg_errno = EEXIST; + return NULL; + } + + /* get a SA index */ + if ((newsah = key_getsah(&saidx)) == NULL) { + + /* create a new SA index */ + if ((newsah = key_newsah(&saidx)) == NULL) { +#if IPSEC_DEBUG + printf("key_getspi: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + } + + /* get a new SA */ + if ((newsav = key_newsav(mhp, newsah)) == NULL) { + msg0->sadb_msg_errno = ENOBUFS; + /* XXX don't free new SA index allocated in above. */ + return NULL; + } + + /* set spi */ + newsav->spi = htonl(spi); + +#ifndef IPSEC_NONBLOCK_ACQUIRE + /* delete the entry in acqtree */ + if (msg0->sadb_msg_seq != 0) { + struct secacq *acq; + if ((acq = key_getacqbyseq(msg0->sadb_msg_seq)) != NULL) { + /* reset counter in order to deletion by timehander. */ + acq->tick = key_blockacq_lifetime; + acq->count = 0; + } + } +#endif + + { + struct sadb_msg *newmsg; + u_int len; + caddr_t p; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + sizeof(struct sadb_sa) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_SRC]) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_DST]); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_getspi: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_seq = newsav->seq; + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + p = (caddr_t)newmsg + sizeof(*msg0); + + { + struct sadb_sa *m_sa; + m_sa = (struct sadb_sa *)p; + m_sa->sadb_sa_len = PFKEY_UNIT64(sizeof(struct sadb_sa)); + m_sa->sadb_sa_exttype = SADB_EXT_SA; + m_sa->sadb_sa_spi = htonl(spi); + p += sizeof(struct sadb_sa); + } + + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_SRC]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_DST]); + + return newmsg; + } +} + +/* + * allocating new SPI + * called by key_getspi(). + * OUT: + * 0: failure. + * others: success. + */ +static u_int32_t +key_do_getnewspi(spirange, saidx) + struct sadb_spirange *spirange; + struct secasindex *saidx; +{ + u_int32_t newspi; + u_int32_t min, max; + int count = key_spi_trycnt; + + /* set spi range to allocate */ + if (spirange != NULL) { + min = spirange->sadb_spirange_min; + max = spirange->sadb_spirange_max; + } else { + min = key_spi_minval; + max = key_spi_maxval; + } + /* IPCOMP needs 2-byte SPI */ + if (saidx->proto == IPPROTO_IPCOMP) { + u_int32_t t; + if (min >= 0x10000) + min = 0xffff; + if (max >= 0x10000) + max = 0xffff; + if (min > max) { + t = min; min = max; max = t; + } + } + + if (min == max) { + if (key_checkspidup(saidx, min) != NULL) { +#if IPSEC_DEBUG + printf("key_do_getnewspi: SPI %u exists already.\n", min); +#endif + return 0; + } + + count--; /* taking one cost. */ + newspi = min; + + } else { + + /* init SPI */ + newspi = 0; + + /* when requesting to allocate spi ranged */ + while (count--) { + /* generate pseudo-random SPI value ranged. */ + newspi = min + (random() % ( max - min + 1 )); + + if (key_checkspidup(saidx, newspi) == NULL) + break; + } + + if (count == 0 || newspi == 0) { +#if IPSEC_DEBUG + printf("key_do_getnewspi: to allocate spi is failed.\n"); +#endif + return 0; + } + } + + /* statistics */ + keystat.getspi_count = + (keystat.getspi_count + key_spi_trycnt - count) / 2; + + return newspi; +} + +/* + * SADB_UPDATE processing + * receive + * + * from the ikmpd, and update a secasvar entry whose status is SADB_SASTATE_LARVAL. + * and send + * + * to the ikmpd. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + */ +static struct sadb_msg * +key_update(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_sa *sa0; + struct sadb_address *src0, *dst0; + struct secasindex saidx; + struct secashead *sah; + struct secasvar *sav; + u_int16_t proto; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_update: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_update: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + if (mhp[SADB_EXT_SA] == NULL + || mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL + || (msg0->sadb_msg_satype == SADB_SATYPE_ESP + && mhp[SADB_EXT_KEY_ENCRYPT] == NULL) + || (msg0->sadb_msg_satype == SADB_SATYPE_AH + && mhp[SADB_EXT_KEY_AUTH] == NULL) + || (mhp[SADB_EXT_LIFETIME_HARD] != NULL + && mhp[SADB_EXT_LIFETIME_SOFT] == NULL) + || (mhp[SADB_EXT_LIFETIME_HARD] == NULL + && mhp[SADB_EXT_LIFETIME_SOFT] != NULL)) { +#if IPSEC_DEBUG + printf("key_update: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + sa0 = (struct sadb_sa *)mhp[SADB_EXT_SA]; + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + + KEY_SETSECASIDX(proto, msg0, src0+1, dst0+1, &saidx); + + /* get a SA header */ + if ((sah = key_getsah(&saidx)) == NULL) { +#if IPSEC_DEBUG + printf("key_update: no SA index found.\n"); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + /* set spidx if there */ + if (key_setident(sah, mhp) < 0) + return NULL; + + /* find a SA with sequence number. */ +#if IPSEC_DOSEQCHECK + if (msg0->sadb_msg_seq != 0 + && (sav = key_getsavbyseq(sah, msg0->sadb_msg_seq)) == NULL) { +#if IPSEC_DEBUG + printf("key_update: no larval SA with sequence %u exists.\n", + msg0->sadb_msg_seq); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } +#else + if ((sav = key_getsavbyspi(sah, sa0->sadb_sa_spi)) == NULL) { +#if IPSEC_DEBUG + printf("key_update: no such a SA found (spi:%u)\n", + (u_int32_t)ntohl(sa0->sadb_sa_spi)); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } +#endif + + /* validity check */ + if (sav->sah->saidx.proto != proto) { +#if IPSEC_DEBUG + printf("key_update: protocol mismatched (DB=%u param=%u)\n", + sav->sah->saidx.proto, proto); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } +#if IPSEC_DOSEQCHECK + if (sav->spi != sa0->sadb_sa_spi) { +#if IPSEC_DEBUG + printf("key_update: SPI mismatched (DB:%u param:%u)\n", + (u_int32_t)ntohl(sav->spi), + (u_int32_t)ntohl(sa0->sadb_sa_spi)); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } +#endif + if (sav->pid != msg0->sadb_msg_pid) { +#if IPSEC_DEBUG + printf("key_update: pid mismatched (DB:%u param:%u)\n", + sav->pid, msg0->sadb_msg_pid); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* copy sav values */ + if (key_setsaval(sav, mhp)) { + key_freesav(sav); + return NULL; + } + + /* check SA values to be mature. */ + if ((msg0->sadb_msg_errno = key_mature(sav)) != 0) { + key_freesav(sav); + return NULL; + } + + { + struct sadb_msg *newmsg; + + /* set msg buf from mhp */ + if ((newmsg = key_getmsgbuf_x1(mhp)) == NULL) { +#if IPSEC_DEBUG + printf("key_update: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + return newmsg; + } +} + +/* + * search SAD with sequence for a SA which state is SADB_SASTATE_LARVAL. + * only called by key_update(). + * OUT: + * NULL : not found + * others : found, pointer to a SA. + */ +#if IPSEC_DOSEQCHECK +static struct secasvar * +key_getsavbyseq(sah, seq) + struct secashead *sah; + u_int32_t seq; +{ + struct secasvar *sav; + u_int state; + + state = SADB_SASTATE_LARVAL; + + /* search SAD with sequence number ? */ + LIST_FOREACH(sav, &sah->savtree[state], chain) { + + KEY_CHKSASTATE(state, sav->state, "key_getsabyseq"); + + if (sav->seq == seq) { + sav->refcnt++; + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP key_getsavbyseq cause " + "refcnt++:%d SA:%p\n", + sav->refcnt, sav)); + return sav; + } + } + + return NULL; +} +#endif + +/* + * SADB_ADD processing + * add a entry to SA database, when received + * + * from the ikmpd, + * and send + * + * to the ikmpd. + * + * IGNORE identity and sensitivity messages. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + */ +static struct sadb_msg * +key_add(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_sa *sa0; + struct sadb_address *src0, *dst0; + struct secasindex saidx; + struct secashead *newsah; + struct secasvar *newsav; + u_int16_t proto; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_add: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_add: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + if (mhp[SADB_EXT_SA] == NULL + || mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL + || (msg0->sadb_msg_satype == SADB_SATYPE_ESP + && mhp[SADB_EXT_KEY_ENCRYPT] == NULL) + || (msg0->sadb_msg_satype == SADB_SATYPE_AH + && mhp[SADB_EXT_KEY_AUTH] == NULL) + || (mhp[SADB_EXT_LIFETIME_HARD] != NULL + && mhp[SADB_EXT_LIFETIME_SOFT] == NULL) + || (mhp[SADB_EXT_LIFETIME_HARD] == NULL + && mhp[SADB_EXT_LIFETIME_SOFT] != NULL)) { +#if IPSEC_DEBUG + printf("key_add: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + sa0 = (struct sadb_sa *)mhp[SADB_EXT_SA]; + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + + KEY_SETSECASIDX(proto, msg0, src0+1, dst0+1, &saidx); + + /* get a SA header */ + if ((newsah = key_getsah(&saidx)) == NULL) { + + /* create a new SA header */ + if ((newsah = key_newsah(&saidx)) == NULL) { +#if IPSEC_DEBUG + printf("key_add: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + } + + /* set spidx if there */ + if (key_setident(newsah, mhp) < 0) + return NULL; + + /* create new SA entry. */ + /* We can create new SA only if SPI is differenct. */ + if (key_getsavbyspi(newsah, sa0->sadb_sa_spi)) { +#if IPSEC_DEBUG + printf("key_add: SA already exists.\n"); +#endif + msg0->sadb_msg_errno = EEXIST; + return NULL; + } + if ((newsav = key_newsav(mhp, newsah)) == NULL) + return NULL; + + /* check SA values to be mature. */ + if ((msg0->sadb_msg_errno = key_mature(newsav)) != NULL) { + key_freesav(newsav); + return NULL; + } + + /* + * don't call key_freesav() here, as we would like to keep the SA + * in the database on success. + */ + + { + struct sadb_msg *newmsg; + + /* set msg buf from mhp */ + if ((newmsg = key_getmsgbuf_x1(mhp)) == NULL) { +#if IPSEC_DEBUG + printf("key_add: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + + return newmsg; + } +} + +static int +key_setident(sah, mhp) + struct secashead *sah; + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_ident *idsrc, *iddst; + int idsrclen, iddstlen; + + /* sanity check */ + if (sah == NULL || mhp == NULL || mhp[0] == NULL) + panic("key_setident: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* don't make buffer if not there */ + if (mhp[SADB_EXT_IDENTITY_SRC] == NULL + && mhp[SADB_EXT_IDENTITY_DST] == NULL) { + sah->idents = NULL; + sah->identd = NULL; + return 0; + } + + if (mhp[SADB_EXT_IDENTITY_SRC] == NULL + || mhp[SADB_EXT_IDENTITY_DST] == NULL) { +#if IPSEC_DEBUG + printf("key_setident: invalid identity.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return -1; + } + + idsrc = (struct sadb_ident *)mhp[SADB_EXT_IDENTITY_SRC]; + iddst = (struct sadb_ident *)mhp[SADB_EXT_IDENTITY_DST]; + idsrclen = PFKEY_UNUNIT64(idsrc->sadb_ident_len); + iddstlen = PFKEY_UNUNIT64(idsrc->sadb_ident_len); + + /* validity check */ + if (idsrc->sadb_ident_type != iddst->sadb_ident_type) { +#if IPSEC_DEBUG + printf("key_setident: ident type mismatch.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return -1; + } + + switch (idsrc->sadb_ident_type) { + case SADB_X_IDENTTYPE_ADDR: + if (idsrclen != + sizeof(*idsrc) + ((struct sockaddr *)(idsrc + 1))->sa_len + || iddstlen != + sizeof(*iddst) + ((struct sockaddr *)(iddst + 1))->sa_len) { +#if IPSEC_DEBUG + printf("key_setident: invalid length is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return -1; + } + if (((struct sockaddr *)(idsrc + 1))->sa_len > + sizeof(struct sockaddr_storage) + || ((struct sockaddr *)(iddst + 1))->sa_len > + sizeof(struct sockaddr_storage)) { +#if IPSEC_DEBUG + printf("key_setident: invalid sa_len is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return -1; + } +#define __IDENTXID(a) ((union sadb_x_ident_id *)&(a)->sadb_ident_id) + if (__IDENTXID(idsrc)->sadb_x_ident_id_addr.ul_proto + != __IDENTXID(iddst)->sadb_x_ident_id_addr.ul_proto) { +#if IPSEC_DEBUG + printf("key_setident: ul_proto mismatch.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return -1; + } +#undef __IDENTXID(a) + break; + case SADB_IDENTTYPE_PREFIX: + case SADB_IDENTTYPE_FQDN: + case SADB_IDENTTYPE_USERFQDN: + default: + /* XXX do nothing */ + sah->idents = NULL; + sah->identd = NULL; + return 0; + } + + /* make structure */ + KMALLOC(sah->idents, struct sadb_ident *, idsrclen); + if (sah->idents == NULL) { +#if IPSEC_DEBUG + printf("key_setident: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return -1; + } + KMALLOC(sah->identd, struct sadb_ident *, iddstlen); + if (sah->identd == NULL) { + KFREE(sah->idents); +#if IPSEC_DEBUG + printf("key_setident: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return -1; + } + bcopy(idsrc, sah->idents, idsrclen); + bcopy(iddst, sah->identd, iddstlen); + + return 0; +} + +static struct sadb_msg * +key_getmsgbuf_x1(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_msg *newmsg; + u_int len; + caddr_t p; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_getmsgbuf_x1: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + sizeof(struct sadb_sa) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_SRC]) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_DST]) + + (mhp[SADB_EXT_LIFETIME_HARD] == NULL + ? 0 : sizeof(struct sadb_lifetime)) + + (mhp[SADB_EXT_LIFETIME_SOFT] == NULL + ? 0 : sizeof(struct sadb_lifetime)) + + (mhp[SADB_EXT_IDENTITY_SRC] == NULL + ? 0 : PFKEY_EXTLEN(mhp[SADB_EXT_IDENTITY_SRC])) + + (mhp[SADB_EXT_IDENTITY_DST] == NULL + ? 0 : PFKEY_EXTLEN(mhp[SADB_EXT_IDENTITY_DST])); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) + return NULL; + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + p = (caddr_t)newmsg + sizeof(*msg0); + + p = key_setsadbext(p, mhp[SADB_EXT_SA]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_SRC]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_DST]); + + if (mhp[SADB_EXT_LIFETIME_HARD] != NULL) + p = key_setsadbext(p, mhp[SADB_EXT_LIFETIME_HARD]); + + if (mhp[SADB_EXT_LIFETIME_SOFT] != NULL) + p = key_setsadbext(p, mhp[SADB_EXT_LIFETIME_SOFT]); + + if (mhp[SADB_EXT_IDENTITY_SRC] != NULL) + p = key_setsadbext(p, mhp[SADB_EXT_IDENTITY_SRC]); + if (mhp[SADB_EXT_IDENTITY_DST] != NULL) + p = key_setsadbext(p, mhp[SADB_EXT_IDENTITY_DST]); + + return newmsg; +} + +/* + * SADB_DELETE processing + * receive + * + * from the ikmpd, and set SADB_SASTATE_DEAD, + * and send, + * + * to the ikmpd. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + */ +static struct sadb_msg * +key_delete(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_sa *sa0; + struct sadb_address *src0, *dst0; + struct secasindex saidx; + struct secashead *sah; + struct secasvar *sav; + u_int16_t proto; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_delete: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_delete: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + if (mhp[SADB_EXT_SA] == NULL + || mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL) { +#if IPSEC_DEBUG + printf("key_delete: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + sa0 = (struct sadb_sa *)mhp[SADB_EXT_SA]; + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + + KEY_SETSECASIDX(proto, msg0, src0+1, dst0+1, &saidx); + + /* get a SA header */ + if ((sah = key_getsah(&saidx)) == NULL) { +#if IPSEC_DEBUG + printf("key_delete: no SA found.\n"); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + /* get a SA with SPI. */ + sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); + if (sav == NULL) { +#if IPSEC_DEBUG + printf("key_delete: no alive SA found.\n"); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + key_sa_chgstate(sav, SADB_SASTATE_DEAD); + key_freesav(sav); + sav = NULL; + + { + struct sadb_msg *newmsg; + u_int len; + caddr_t p; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + sizeof(struct sadb_sa) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_SRC]) + + PFKEY_EXTLEN(mhp[SADB_EXT_ADDRESS_DST]); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_delete: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + p = (caddr_t)newmsg + sizeof(*msg0); + + p = key_setsadbext(p, mhp[SADB_EXT_SA]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_SRC]); + p = key_setsadbext(p, mhp[SADB_EXT_ADDRESS_DST]); + + return newmsg; + } +} + +/* + * SADB_GET processing + * receive + * + * from the ikmpd, and get a SP and a SA to respond, + * and send, + * + * to the ikmpd. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + */ +static struct sadb_msg * +key_get(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_sa *sa0; + struct sadb_address *src0, *dst0; + struct secasindex saidx; + struct secashead *sah; + struct secasvar *sav; + u_int16_t proto; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_get: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_get: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + if (mhp[SADB_EXT_SA] == NULL + || mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL) { +#if IPSEC_DEBUG + printf("key_get: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + sa0 = (struct sadb_sa *)mhp[SADB_EXT_SA]; + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + + KEY_SETSECASIDX(proto, msg0, src0+1, dst0+1, &saidx); + + /* get a SA header */ + if ((sah = key_getsah(&saidx)) == NULL) { +#if IPSEC_DEBUG + printf("key_get: no SA found.\n"); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + /* get a SA with SPI. */ + sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); + if (sav == NULL) { +#if IPSEC_DEBUG + printf("key_get: no SA with state of mature found.\n"); +#endif + msg0->sadb_msg_errno = ENOENT; + return NULL; + } + + { + struct sadb_msg *newmsg; + u_int len; + u_int8_t satype; + + /* map proto to satype */ + if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { +#if IPSEC_DEBUG + printf("key_get: there was invalid proto in SAD.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* calculate a length of message buffer */ + len = key_getmsglen(sav); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_get: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + + /* create new sadb_msg to reply. */ + (void)key_setdumpsa(newmsg, sav, SADB_GET, + satype, msg0->sadb_msg_seq, msg0->sadb_msg_pid); + + return newmsg; + } +} + +/* + * SADB_ACQUIRE processing called by key_checkrequest() and key_acquire2(). + * send + * + * to KMD, and expect to receive + * with SADB_ACQUIRE if error occured, + * or + * with SADB_GETSPI + * from KMD by PF_KEY. + * + * sensitivity is not supported. + * + * OUT: + * 0 : succeed + * others: error number + */ +static int +key_acquire(saidx, sp) + struct secasindex *saidx; + struct secpolicy *sp; +{ +#ifndef IPSEC_NONBLOCK_ACQUIRE + struct secacq *newacq; +#endif + struct secpolicyindex *spidx = NULL; + u_int8_t satype; + int error; + + /* sanity check */ + if (saidx == NULL || sp == NULL) + panic("key_acquire: NULL pointer is passed.\n"); + if ((satype = key_proto2satype(saidx->proto)) == 0) + panic("key_acquire: invalid proto is passed.\n"); + + spidx = &sp->spidx; + +#ifndef IPSEC_NONBLOCK_ACQUIRE + /* + * We never do anything about acquirng SA. There is anather + * solution that kernel blocks to send SADB_ACQUIRE message until + * getting something message from IKEd. In later case, to be + * managed with ACQUIRING list. + */ + /* get a entry to check whether sending message or not. */ + if ((newacq = key_getacq(saidx)) != NULL) { + if (key_blockacq_count < newacq->count) { + /* reset counter and do send message. */ + newacq->count = 0; + } else { + /* increment counter and do nothing. */ + newacq->count++; + return 0; + } + } else { + /* make new entry for blocking to send SADB_ACQUIRE. */ + if ((newacq = key_newacq(saidx)) == NULL) + return ENOBUFS; + + /* add to acqtree */ + LIST_INSERT_HEAD(&acqtree, newacq, chain); + } +#endif + + { + struct sadb_msg *newmsg = NULL; + union sadb_x_ident_id id; + u_int len; + caddr_t p; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + sizeof(struct sadb_address) + + PFKEY_ALIGN8(saidx->src.ss_len) + + sizeof(struct sadb_address) + + PFKEY_ALIGN8(saidx->dst.ss_len) + + sizeof(struct sadb_x_policy) + + sizeof(struct sadb_ident) + + PFKEY_ALIGN8(spidx->src.ss_len) + + sizeof(struct sadb_ident) + + PFKEY_ALIGN8(spidx->dst.ss_len) + + sizeof(struct sadb_prop) + + sizeof(struct sadb_comb); /* XXX to be multiple */ + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == 0) { +#if IPSEC_DEBUG + printf("key_acquire: No more memory.\n"); +#endif + return ENOBUFS; + } + bzero((caddr_t)newmsg, len); + + newmsg->sadb_msg_version = PF_KEY_V2; + newmsg->sadb_msg_type = SADB_ACQUIRE; + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_satype = satype; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + newmsg->sadb_msg_mode = saidx->mode; + newmsg->sadb_msg_reqid = saidx->reqid; + +#ifndef IPSEC_NONBLOCK_ACQUIRE + newmsg->sadb_msg_seq = newacq->seq; +#else + newmsg->sadb_msg_seq = (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); +#endif + + newmsg->sadb_msg_pid = 0; + + p = (caddr_t)newmsg + sizeof(struct sadb_msg); + + /* set sadb_address for saidx's. */ + p = key_setsadbaddr(p, + SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&saidx->src, + _INALENBYAF(saidx->src.ss_family) << 3, + IPSEC_ULPROTO_ANY); + p = key_setsadbaddr(p, + SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&saidx->dst, + _INALENBYAF(saidx->dst.ss_family) << 3, + IPSEC_ULPROTO_ANY); + + /* set sadb_x_policy */ + p = key_setsadbxpolicy(p, sp->policy, sp->spidx.dir, sp->id); + + /* set sadb_address for spidx's. */ + bzero(&id, sizeof(id)); + id.sadb_x_ident_id_addr.prefix = spidx->prefs; + id.sadb_x_ident_id_addr.ul_proto = spidx->ul_proto; + p = key_setsadbident(p, + SADB_EXT_IDENTITY_SRC, + SADB_X_IDENTTYPE_ADDR, + (caddr_t)&spidx->src, + spidx->src.ss_len, + *(u_int64_t *)&id); + + bzero(&id, sizeof(id)); + id.sadb_x_ident_id_addr.prefix = spidx->prefd; + id.sadb_x_ident_id_addr.ul_proto = spidx->ul_proto; + p = key_setsadbident(p, + SADB_EXT_IDENTITY_DST, + SADB_X_IDENTTYPE_ADDR, + (caddr_t)&spidx->dst, + spidx->dst.ss_len, + *(u_int64_t *)&id); + + /* create proposal extension */ + /* set combination extension */ + /* XXX: to be defined by proposal database */ + { + struct sadb_prop *prop; + struct sadb_comb *comb; + + prop = (struct sadb_prop *)p; + prop->sadb_prop_len = PFKEY_UNIT64(sizeof(*prop) + sizeof(*comb)); + /* XXX to be multiple */ + prop->sadb_prop_exttype = SADB_EXT_PROPOSAL; + prop->sadb_prop_replay = 32; /* XXX be variable ? */ + p += sizeof(struct sadb_prop); + + comb = (struct sadb_comb *)p; + comb->sadb_comb_auth = SADB_AALG_SHA1HMAC; /* XXX ??? */ + comb->sadb_comb_encrypt = SADB_EALG_DESCBC; /* XXX ??? */ + comb->sadb_comb_flags = 0; + comb->sadb_comb_auth_minbits = 8; /* XXX */ + comb->sadb_comb_auth_maxbits = 1024; /* XXX */ + comb->sadb_comb_encrypt_minbits = 64; /* XXX */ + comb->sadb_comb_encrypt_maxbits = 64; /* XXX */ + comb->sadb_comb_soft_allocations = 0; + comb->sadb_comb_hard_allocations = 0; + comb->sadb_comb_soft_bytes = 0; + comb->sadb_comb_hard_bytes = 0; + comb->sadb_comb_soft_addtime = 0; + comb->sadb_comb_hard_addtime = 0; + comb->sadb_comb_soft_usetime = 0; + comb->sadb_comb_hard_usetime = 0; + + p += sizeof(*comb); + } + +#if 0 /* XXX Do it ?*/ + if (idexttype && fqdn) { + /* create identity extension (FQDN) */ + struct sadb_ident *id; + int fqdnlen; + + fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */ + id = (struct sadb_ident *)p; + bzero(id, sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); + id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); + id->sadb_ident_exttype = idexttype; + id->sadb_ident_type = SADB_IDENTTYPE_FQDN; + bcopy(fqdn, id + 1, fqdnlen); + p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(fqdnlen); + } + + if (idexttype) { + /* create identity extension (USERFQDN) */ + struct sadb_ident *id; + int userfqdnlen; + + if (userfqdn) { + /* +1 for terminating-NUL */ + userfqdnlen = strlen(userfqdn) + 1; + } else + userfqdnlen = 0; + id = (struct sadb_ident *)p; + bzero(id, sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); + id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); + id->sadb_ident_exttype = idexttype; + id->sadb_ident_type = SADB_IDENTTYPE_USERFQDN; + /* XXX is it correct? */ + if (curproc && curproc->p_cred) + id->sadb_ident_id = curproc->p_cred->p_ruid; + if (userfqdn && userfqdnlen) + bcopy(userfqdn, id + 1, userfqdnlen); + p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(userfqdnlen); + } +#endif + + error = key_sendall(newmsg, len); +#if IPSEC_DEBUG + if (error != 0) + printf("key_acquire: key_sendall returned %d\n", error); +#endif + return error; + } + + return 0; +} + +#ifndef IPSEC_NONBLOCK_ACQUIRE +static struct secacq * +key_newacq(saidx) + struct secasindex *saidx; +{ + struct secacq *newacq; + + /* get new entry */ + KMALLOC(newacq, struct secacq *, sizeof(struct secacq)); + if (newacq == NULL) { +#if IPSEC_DEBUG + printf("key_newacq: No more memory.\n"); +#endif + return NULL; + } + bzero(newacq, sizeof(*newacq)); + + /* copy secindex */ + bcopy(saidx, &newacq->saidx, sizeof(newacq->saidx)); + newacq->seq = (acq_seq == ~0 ? 1 : ++acq_seq); + newacq->tick = 0; + newacq->count = 0; + + return newacq; +} + +static struct secacq * +key_getacq(saidx) + struct secasindex *saidx; +{ + struct secacq *acq; + + LIST_FOREACH(acq, &acqtree, chain) { + if (key_cmpsaidx_exactly(saidx, &acq->saidx)) + return acq; + } + + return NULL; +} + +static struct secacq * +key_getacqbyseq(seq) + u_int32_t seq; +{ + struct secacq *acq; + + LIST_FOREACH(acq, &acqtree, chain) { + if (acq->seq == seq) + return acq; + } + + return NULL; +} +#endif + +static struct secspacq * +key_newspacq(spidx) + struct secpolicyindex *spidx; +{ + struct secspacq *acq; + + /* get new entry */ + KMALLOC(acq, struct secspacq *, sizeof(struct secspacq)); + if (acq == NULL) { +#if IPSEC_DEBUG + printf("key_newspacq: No more memory.\n"); +#endif + return NULL; + } + bzero(acq, sizeof(*acq)); + + /* copy secindex */ + bcopy(spidx, &acq->spidx, sizeof(acq->spidx)); + acq->tick = 0; + acq->count = 0; + + return acq; +} + +static struct secspacq * +key_getspacq(spidx) + struct secpolicyindex *spidx; +{ + struct secspacq *acq; + + LIST_FOREACH(acq, &spacqtree, chain) { + if (key_cmpspidx_exactly(spidx, &acq->spidx)) + return acq; + } + + return NULL; +} + +/* + * SADB_ACQUIRE processing, + * in first situation, is receiving + * + * from the ikmpd, and clear sequence of its secasvar entry. + * + * In second situation, is receiving + * + * from a user land process, and return + * + * to the socket. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + */ +static struct sadb_msg * +key_acquire2(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct sadb_address *src0, *dst0; + struct secasindex saidx; + struct secashead *sah; + u_int16_t proto; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_acquire2: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* + * Error message from KMd. + * We assume that if error was occured in IKEd, the length of PFKEY + * message is equal to the size of sadb_msg structure. + * We return ~0 even if error occured in this function. + */ + if (msg0->sadb_msg_len == PFKEY_UNIT64(sizeof(struct sadb_msg))) { + +#ifndef IPSEC_NONBLOCK_ACQUIRE + struct secacq *acq; + + /* check sequence number */ + if (msg0->sadb_msg_seq == 0) { +#if IPSEC_DEBUG + printf("key_acquire2: must specify sequence number.\n"); +#endif + return (struct sadb_msg *)~0; + } + + if ((acq = key_getacqbyseq(msg0->sadb_msg_seq)) == NULL) { +#if IPSEC_DEBUG + printf("key_acquire2: " + "invalid sequence number is passed.\n"); +#endif + return (struct sadb_msg *)~0; + } + + /* reset acq counter in order to deletion by timehander. */ + acq->tick = key_blockacq_lifetime; + acq->count = 0; +#endif + return (struct sadb_msg *)~0; + /* NOTREACHED */ + } + + /* + * This message is from user land. + */ + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_acquire2: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + if (mhp[SADB_EXT_ADDRESS_SRC] == NULL + || mhp[SADB_EXT_ADDRESS_DST] == NULL + || mhp[SADB_EXT_PROPOSAL] == NULL) { + /* error */ +#if IPSEC_DEBUG + printf("key_acquire2: invalid message is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + + KEY_SETSECASIDX(proto, msg0, src0+1, dst0+1, &saidx); + + /* get a SA index */ + if ((sah = key_getsah(&saidx)) != NULL) { +#if IPSEC_DEBUG + printf("key_acquire2: a SA exists already.\n"); +#endif + msg0->sadb_msg_errno = EEXIST; + return NULL; + } + + msg0->sadb_msg_errno = key_acquire(&saidx, NULL); + if (msg0->sadb_msg_errno != 0) { +#if IPSEC_DEBUG + printf("key_acquire2: error %d returned " + "from key_acquire.\n", msg0->sadb_msg_errno); +#endif + return NULL; + } + + { + struct sadb_msg *newmsg; + u_int len; + + /* create new sadb_msg to reply. */ + len = PFKEY_UNUNIT64(msg0->sadb_msg_len); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_acquire2: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy(mhp[0], (caddr_t)newmsg, len); + + return newmsg; + } +} + +/* + * SADB_REGISTER processing. + * If SATYPE_UNSPEC has been passed as satype, only return sabd_supported. + * receive + * + * from the ikmpd, and register a socket to send PF_KEY messages, + * and send + * + * to KMD by PF_KEY. + * If socket is detached, must free from regnode. + * OUT: + * 0 : succeed + * others: error number + */ +static struct sadb_msg * +key_register(mhp, so) + caddr_t *mhp; + struct socket *so; +{ + struct sadb_msg *msg0; + struct secreg *reg, *newreg = 0; + + /* sanity check */ + if (mhp == NULL || so == NULL || mhp[0] == NULL) + panic("key_register: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* check for invalid register message */ + if (msg0->sadb_msg_satype >= sizeof(regtree)/sizeof(regtree[0])) { + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* When SATYPE_UNSPEC is specified, only return sabd_supported. */ + if (msg0->sadb_msg_satype == SADB_SATYPE_UNSPEC) + goto setmsg; + + /* check whether existing or not */ + LIST_FOREACH(reg, ®tree[msg0->sadb_msg_satype], chain) { + if (reg->so == so) { +#if IPSEC_DEBUG + printf("key_register: socket exists already.\n"); +#endif + msg0->sadb_msg_errno = EEXIST; + return NULL; + } + } + + /* create regnode */ + KMALLOC(newreg, struct secreg *, sizeof(struct secreg)); + if (newreg == NULL) { +#if IPSEC_DEBUG + printf("key_register: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newreg, sizeof(struct secreg)); + + newreg->so = so; + ((struct keycb *)sotorawcb(so))->kp_registered++; + + /* add regnode to regtree. */ + LIST_INSERT_HEAD(®tree[msg0->sadb_msg_satype], newreg, chain); + + setmsg: + { + struct sadb_msg *newmsg; + struct sadb_supported *sup; + u_int len, alen, elen; + caddr_t p; + + /* create new sadb_msg to reply. */ + alen = sizeof(struct sadb_supported) + + ((SADB_AALG_MAX - 1) * sizeof(struct sadb_alg)); + +#if IPSEC_ESP + elen = sizeof(struct sadb_supported) + + ((SADB_EALG_MAX - 1) * sizeof(struct sadb_alg)); +#else + elen = 0; +#endif + + len = sizeof(struct sadb_msg) + + alen + + elen; + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_register: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + p = (caddr_t)newmsg + sizeof(*msg0); + + /* for authentication algorithm */ + sup = (struct sadb_supported *)p; + sup->sadb_supported_len = PFKEY_UNIT64(alen); + sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH; + p += sizeof(*sup); + + { + int i; + struct sadb_alg *alg; + struct ah_algorithm *algo; + + for (i = 1; i < SADB_AALG_MAX; i++) { + algo = &ah_algorithms[i]; + alg = (struct sadb_alg *)p; + alg->sadb_alg_id = i; + alg->sadb_alg_ivlen = 0; + alg->sadb_alg_minbits = algo->keymin; + alg->sadb_alg_maxbits = algo->keymax; + p += sizeof(struct sadb_alg); + } + } + +#if IPSEC_ESP + /* for encryption algorithm */ + sup = (struct sadb_supported *)p; + sup->sadb_supported_len = PFKEY_UNIT64(elen); + sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT; + p += sizeof(*sup); + + { + int i; + struct sadb_alg *alg; + struct esp_algorithm *algo; + + for (i = 1; i < SADB_EALG_MAX; i++) { + algo = &esp_algorithms[i]; + + alg = (struct sadb_alg *)p; + alg->sadb_alg_id = i; + if (algo && algo->ivlen) { + /* + * give NULL to get the value preferred by algorithm + * XXX SADB_X_EXT_DERIV ? + */ + alg->sadb_alg_ivlen = (*algo->ivlen)(NULL); + } else + alg->sadb_alg_ivlen = 0; + alg->sadb_alg_minbits = algo->keymin; + alg->sadb_alg_maxbits = algo->keymax; + p += sizeof(struct sadb_alg); + } + } +#endif + + return newmsg; + } +} + +/* + * free secreg entry registered. + * XXX: I want to do free a socket marked done SADB_RESIGER to socket. + */ +void +key_freereg(so) + struct socket *so; +{ + struct secreg *reg; + int i; + + /* sanity check */ + if (so == NULL) + panic("key_freereg: NULL pointer is passed.\n"); + + /* + * check whether existing or not. + * check all type of SA, because there is a potential that + * one socket is registered to multiple type of SA. + */ + for (i = 0; i <= SADB_SATYPE_MAX; i++) { + LIST_FOREACH(reg, ®tree[i], chain) { + if (reg->so == so + && __LIST_CHAINED(reg)) { + LIST_REMOVE(reg, chain); + KFREE(reg); + break; + } + } + } + + return; +} + +/* + * SADB_EXPIRE processing + * send + * + * to KMD by PF_KEY. + * NOTE: We send only soft lifetime extension. + * + * OUT: 0 : succeed + * others : error number + */ +static int +key_expire(sav) + struct secasvar *sav; +{ + int s; + int satype; + + /* XXX: Why do we lock ? */ +#ifdef __NetBSD__ + s = splsoftnet(); /*called from softclock()*/ +#else + s = splnet(); /*called from softclock()*/ +#endif + + /* sanity check */ + if (sav == NULL) + panic("key_expire: NULL pointer is passed.\n"); + if (sav->sah == NULL) + panic("key_expire: Why was SA index in SA NULL.\n"); + if ((satype = key_proto2satype(sav->sah->saidx.proto)) == 0) + panic("key_expire: invalid proto is passed.\n"); + + { + struct sadb_msg *newmsg = NULL; + u_int len; + caddr_t p; + int error; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg) + + sizeof(struct sadb_sa) + + sizeof(struct sadb_lifetime) + + sizeof(struct sadb_lifetime) + + sizeof(struct sadb_address) + + PFKEY_ALIGN8(sav->sah->saidx.src.ss_len) + + sizeof(struct sadb_address) + + PFKEY_ALIGN8(sav->sah->saidx.dst.ss_len); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_expire: No more memory.\n"); +#endif + splx(s); + return ENOBUFS; + } + bzero((caddr_t)newmsg, len); + + /* set msg header */ + p = key_setsadbmsg((caddr_t)newmsg, SADB_EXPIRE, len, + satype, sav->seq, 0, + sav->sah->saidx.mode, sav->sah->saidx.reqid, + 0, sav->refcnt); + + /* create SA extension */ + p = key_setsadbsa(p, sav); + + /* create lifetime extension */ + { + struct sadb_lifetime *m_lt = (struct sadb_lifetime *)p; + + m_lt->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); + m_lt->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; + m_lt->sadb_lifetime_allocations = sav->lft_c->sadb_lifetime_allocations; + m_lt->sadb_lifetime_bytes = sav->lft_c->sadb_lifetime_bytes; + m_lt->sadb_lifetime_addtime = sav->lft_c->sadb_lifetime_addtime; + m_lt->sadb_lifetime_usetime = sav->lft_c->sadb_lifetime_usetime; + p += sizeof(struct sadb_lifetime); + + /* copy SOFT lifetime extension. */ + bcopy(sav->lft_s, p, sizeof(struct sadb_lifetime)); + p += sizeof(struct sadb_lifetime); + } + + /* set sadb_address for source */ + p = key_setsadbaddr(p, + SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&sav->sah->saidx.src, + _INALENBYAF(sav->sah->saidx.src.ss_family) << 3, + IPSEC_ULPROTO_ANY); + + /* set sadb_address for destination */ + p = key_setsadbaddr(p, + SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&sav->sah->saidx.dst, + _INALENBYAF(sav->sah->saidx.dst.ss_family) << 3, + IPSEC_ULPROTO_ANY); + + error = key_sendall(newmsg, len); + splx(s); + return error; + } +} + +/* + * SADB_FLUSH processing + * receive + * + * from the ikmpd, and free all entries in secastree. + * and send, + * + * to the ikmpd. + * NOTE: to do is only marking SADB_SASTATE_DEAD. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: NULL if fail. + * other if success, return pointer to the message to send. + */ +static struct sadb_msg * +key_flush(mhp) + caddr_t *mhp; +{ + struct sadb_msg *msg0; + struct secashead *sah, *nextsah; + struct secasvar *sav, *nextsav; + u_int16_t proto; + u_int8_t state; + u_int stateidx; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_flush: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_flush: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* no SATYPE specified, i.e. flushing all SA. */ + for (sah = LIST_FIRST(&sahtree); + sah != NULL; + sah = nextsah) { + + nextsah = LIST_NEXT(sah, chain); + + if (msg0->sadb_msg_satype != SADB_SATYPE_UNSPEC + && proto != sah->saidx.proto) + continue; + + for (stateidx = 0; + stateidx < _ARRAYLEN(saorder_state_alive); + stateidx++) { + + state = saorder_state_any[stateidx]; + for (sav = LIST_FIRST(&sah->savtree[state]); + sav != NULL; + sav = nextsav) { + + nextsav = LIST_NEXT(sav, chain); + + key_sa_chgstate(sav, SADB_SASTATE_DEAD); + key_freesav(sav); + } + } + + sah->state = SADB_SASTATE_DEAD; + } + + { + struct sadb_msg *newmsg; + u_int len; + + /* create new sadb_msg to reply. */ + len = sizeof(struct sadb_msg); + + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_flush: No more memory.\n"); +#endif + msg0->sadb_msg_errno = ENOBUFS; + return NULL; + } + bzero((caddr_t)newmsg, len); + + bcopy((caddr_t)mhp[0], (caddr_t)newmsg, sizeof(*msg0)); + newmsg->sadb_msg_errno = 0; + newmsg->sadb_msg_len = PFKEY_UNIT64(len); + + return newmsg; + } +} + +/* + * SADB_DUMP processing + * dump all entries including status of DEAD in SAD. + * receive + * + * from the ikmpd, and dump all secasvar leaves + * and send, + * ..... + * to the ikmpd. + * + * IN: mhp: pointer to the pointer to each header. + * OUT: error code. 0 on success. + */ +static int +key_dump(mhp, so, target) + caddr_t *mhp; + struct socket *so; + int target; +{ + struct sadb_msg *msg0; + struct secashead *sah; + struct secasvar *sav; + u_int16_t proto; + u_int stateidx; + u_int8_t satype; + u_int8_t state; + int len, cnt; + struct sadb_msg *newmsg; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_dump: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + + /* map satype to proto */ + if ((proto = key_satype2proto(msg0->sadb_msg_satype)) == 0) { +#if IPSEC_DEBUG + printf("key_dump: invalid satype is passed.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + /* count sav entries to be sent to the userland. */ + cnt = 0; + LIST_FOREACH(sah, &sahtree, chain) { + + if (msg0->sadb_msg_satype != SADB_SATYPE_UNSPEC + && proto != sah->saidx.proto) + continue; + + for (stateidx = 0; + stateidx < _ARRAYLEN(saorder_state_any); + stateidx++) { + + state = saorder_state_any[stateidx]; + LIST_FOREACH(sav, &sah->savtree[state], chain) { + cnt++; + } + } + } + + if (cnt == 0) + return ENOENT; + + /* send this to the userland, one at a time. */ + newmsg = NULL; + LIST_FOREACH(sah, &sahtree, chain) { + + if (msg0->sadb_msg_satype != SADB_SATYPE_UNSPEC + && proto != sah->saidx.proto) + continue; + + /* map proto to satype */ + if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { +#if IPSEC_DEBUG + printf("key_dump: there was invalid proto in SAD.\n"); +#endif + msg0->sadb_msg_errno = EINVAL; + return NULL; + } + + for (stateidx = 0; + stateidx < _ARRAYLEN(saorder_state_any); + stateidx++) { + + state = saorder_state_any[stateidx]; + LIST_FOREACH(sav, &sah->savtree[state], chain) { + + len = key_getmsglen(sav); + KMALLOC(newmsg, struct sadb_msg *, len); + if (newmsg == NULL) { +#if IPSEC_DEBUG + printf("key_dump: No more memory.\n"); +#endif + return ENOBUFS; + } + bzero((caddr_t)newmsg, len); + + --cnt; + (void)key_setdumpsa(newmsg, sav, SADB_DUMP, + satype, cnt, msg0->sadb_msg_pid); + + key_sendup(so, newmsg, len, target); + KFREE(newmsg); + newmsg = NULL; + } + } + } + + return 0; +} + +/* + * SADB_X_PROMISC processing + */ +static void +key_promisc(mhp, so) + caddr_t *mhp; + struct socket *so; +{ + struct sadb_msg *msg0; + int olen; + + /* sanity check */ + if (mhp == NULL || mhp[0] == NULL) + panic("key_promisc: NULL pointer is passed.\n"); + + msg0 = (struct sadb_msg *)mhp[0]; + olen = PFKEY_UNUNIT64(msg0->sadb_msg_len); + + if (olen < sizeof(struct sadb_msg)) { + return; + } else if (olen == sizeof(struct sadb_msg)) { + /* enable/disable promisc mode */ + struct keycb *kp; + int target = 0; + + target = KEY_SENDUP_ONE; + + if (so == NULL) { + return; + } + if ((kp = (struct keycb *)sotorawcb(so)) == NULL) { + msg0->sadb_msg_errno = EINVAL; + goto sendorig; + } + msg0->sadb_msg_errno = 0; + if (msg0->sadb_msg_satype == 1 || msg0->sadb_msg_satype == 0) { + kp->kp_promisc = msg0->sadb_msg_satype; + } else { + msg0->sadb_msg_errno = EINVAL; + goto sendorig; + } + + /* send the original message back to everyone */ + msg0->sadb_msg_errno = 0; + target = KEY_SENDUP_ALL; +sendorig: + key_sendup(so, msg0, PFKEY_UNUNIT64(msg0->sadb_msg_len), target); + } else { + /* send packet as is */ + struct sadb_msg *msg; + int len; + + len = olen - sizeof(struct sadb_msg); + KMALLOC(msg, struct sadb_msg *, len); + if (msg == NULL) { + msg0->sadb_msg_errno = ENOBUFS; + key_sendup(so, msg0, PFKEY_UNUNIT64(msg0->sadb_msg_len), + KEY_SENDUP_ONE); /*XXX*/ + } + + /* XXX if sadb_msg_seq is specified, send to specific pid */ + key_sendup(so, msg, len, KEY_SENDUP_ALL); + KFREE(msg); + } +} + +/* + * send message to the socket. + * OUT: + * 0 : success + * others : fail + */ +static int +key_sendall(msg, len) + struct sadb_msg *msg; + u_int len; +{ + struct secreg *reg; + int error = 0; + + /* sanity check */ + if (msg == NULL) + panic("key_sendall: NULL pointer is passed.\n"); + + /* search table registerd socket to send a message. */ + LIST_FOREACH(reg, ®tree[msg->sadb_msg_satype], chain) { + error = key_sendup(reg->so, msg, len, KEY_SENDUP_ONE); + if (error != 0) { +#if IPSEC_DEBUG + if (error == ENOBUFS) + printf("key_sendall: No more memory.\n"); + else { + printf("key_sendall: key_sendup returned %d\n", + error); + } +#endif + KFREE(msg); + return error; + } + } + + KFREE(msg); + return 0; +} + +/* + * parse sadb_msg buffer to process PFKEYv2, + * and create a data to response if needed. + * I think to be dealed with mbuf directly. + * IN: + * msgp : pointer to pointer to a received buffer pulluped. + * This is rewrited to response. + * so : pointer to socket. + * OUT: + * length for buffer to send to user process. + */ +int +key_parse(msgp, so, targetp) + struct sadb_msg **msgp; + struct socket *so; + int *targetp; +{ + struct sadb_msg *msg = *msgp, *newmsg = NULL; + caddr_t mhp[SADB_EXT_MAX + 1]; + u_int orglen; + int error; + + /* sanity check */ + if (msg == NULL || so == NULL) + panic("key_parse: NULL pointer is passed.\n"); + + KEYDEBUG(KEYDEBUG_KEY_DUMP, + printf("key_parse: passed sadb_msg\n"); + kdebug_sadb(msg)); + + orglen = PFKEY_UNUNIT64(msg->sadb_msg_len); + + if (targetp) + *targetp = KEY_SENDUP_ONE; + + /* check version */ + if (msg->sadb_msg_version != PF_KEY_V2) { +#if IPSEC_DEBUG + printf("key_parse: PF_KEY version %u is mismatched.\n", + msg->sadb_msg_version); +#endif + pfkeystat.out_invver++; + msg->sadb_msg_errno = EINVAL; + return orglen; + } + + /* check type */ + if (msg->sadb_msg_type > SADB_MAX) { +#if IPSEC_DEBUG + printf("key_parse: invalid type %u is passed.\n", + msg->sadb_msg_type); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invmsgtype++; + return orglen; + } + + /* align message. */ + if (key_align(msg, mhp) != 0) { + msg->sadb_msg_errno = EINVAL; + return orglen; + } + + /* check SA type */ + switch (msg->sadb_msg_satype) { + case SADB_SATYPE_UNSPEC: + switch (msg->sadb_msg_type) { + case SADB_GETSPI: + case SADB_UPDATE: + case SADB_ADD: + case SADB_DELETE: + case SADB_GET: + case SADB_ACQUIRE: + case SADB_EXPIRE: +#if IPSEC_DEBUG + printf("key_parse: must specify satype " + "when msg type=%u.\n", + msg->sadb_msg_type); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invsatype++; + return orglen; + } + break; + case SADB_SATYPE_AH: + case SADB_SATYPE_ESP: +#if 1 /*nonstandard*/ + case SADB_X_SATYPE_IPCOMP: +#endif + switch (msg->sadb_msg_type) { + case SADB_X_SPDADD: + case SADB_X_SPDDELETE: + case SADB_X_SPDGET: + case SADB_X_SPDDUMP: + case SADB_X_SPDFLUSH: + case SADB_X_SPDSETIDX: + case SADB_X_SPDUPDATE: + case SADB_X_SPDDELETE2: +#if IPSEC_DEBUG + printf("key_parse: illegal satype=%u\n", + msg->sadb_msg_type); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invsatype++; + return orglen; + } + break; + case SADB_SATYPE_RSVP: + case SADB_SATYPE_OSPFV2: + case SADB_SATYPE_RIPV2: + case SADB_SATYPE_MIP: +#if IPSEC_DEBUG + printf("key_parse: type %u isn't supported.\n", + msg->sadb_msg_satype); +#endif + msg->sadb_msg_errno = EOPNOTSUPP; + pfkeystat.out_invsatype++; + return orglen; + case 1: /* XXX: What does it do ? */ + if (msg->sadb_msg_type == SADB_X_PROMISC) + break; + /*FALLTHROUGH*/ + default: +#if IPSEC_DEBUG + printf("key_parse: invalid type %u is passed.\n", + msg->sadb_msg_satype); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invsatype++; + return orglen; + } + + /* check field of upper layer protocol and address family */ + if (mhp[SADB_EXT_ADDRESS_SRC] != NULL + && mhp[SADB_EXT_ADDRESS_DST] != NULL) { + struct sadb_address *src0, *dst0; + u_int prefix; + + src0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_SRC]); + dst0 = (struct sadb_address *)(mhp[SADB_EXT_ADDRESS_DST]); + + /* check upper layer protocol */ + if (src0->sadb_address_proto != dst0->sadb_address_proto) { +#if IPSEC_DEBUG + printf("key_parse: upper layer protocol mismatched.\n"); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invaddr++; + return orglen; + } + + /* check family */ + if (PFKEY_ADDR_SADDR(src0)->sa_family + != PFKEY_ADDR_SADDR(dst0)->sa_family) { +#if IPSEC_DEBUG + printf("key_parse: address family mismatched.\n"); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invaddr++; + return orglen; + } + + prefix = _INALENBYAF(PFKEY_ADDR_SADDR(src0)->sa_family) << 3; + + /* check max prefixlen */ + if (prefix < src0->sadb_address_prefixlen + || prefix < dst0->sadb_address_prefixlen) { +#if IPSEC_DEBUG + printf("key_parse: illegal prefixlen.\n"); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invaddr++; + return orglen; + } + + switch (PFKEY_ADDR_SADDR(src0)->sa_family) { + case AF_INET: + case AF_INET6: + break; + default: +#if IPSEC_DEBUG + printf("key_parse: invalid address family.\n"); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invaddr++; + return orglen; + } + + /* + * prefixlen == 0 is valid because there can be a case when + * all addresses are matched. + */ + } + + switch (msg->sadb_msg_type) { + case SADB_GETSPI: + if ((newmsg = key_getspi(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_UPDATE: + if ((newmsg = key_update(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_ADD: + if ((newmsg = key_add(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_DELETE: + if ((newmsg = key_delete(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_GET: + if ((newmsg = key_get(mhp)) == NULL) + return orglen; + break; + + case SADB_ACQUIRE: + if ((newmsg = key_acquire2(mhp)) == NULL) + return orglen; + + if (newmsg == (struct sadb_msg *)~0) { + /* + * It's not need to reply because of the message + * that was reporting an error occured from the KMd. + */ + KFREE(msg); + return 0; + } + break; + + case SADB_REGISTER: + if ((newmsg = key_register(mhp, so)) == NULL) + return orglen; +#if 1 + if (targetp) + *targetp = KEY_SENDUP_REGISTERED; +#else + /* send result to all registered sockets */ + KFREE(msg); + key_sendall(newmsg, PFKEY_UNUNIT64(newmsg->sadb_msg_len)); + return 0; +#endif + break; + + case SADB_EXPIRE: +#if IPSEC_DEBUG + printf("key_parse: why is SADB_EXPIRE received ?\n"); +#endif + msg->sadb_msg_errno = EINVAL; + if (targetp) + *targetp = KEY_SENDUP_ALL; + pfkeystat.out_invmsgtype++; + return orglen; + + case SADB_FLUSH: + if ((newmsg = key_flush(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_DUMP: + /* key_dump will call key_sendup() on her own */ + error = key_dump(mhp, so, KEY_SENDUP_ONE); + if (error) { + msg->sadb_msg_errno = error; + return orglen; + } else { + KFREE(msg); + return 0; + } + break; + + case SADB_X_PROMISC: + /* everything is handled in key_promisc() */ + key_promisc(mhp, so); + KFREE(msg); + return 0; /*nothing to reply*/ + + case SADB_X_PCHANGE: +#if IPSEC_DEBUG + printf("key_parse: SADB_X_PCHANGE isn't supported.\n"); +#endif + msg->sadb_msg_errno = EINVAL; + pfkeystat.out_invmsgtype++; + return orglen; +#if 0 + if (targetp) + *targetp = KEY_SENDUP_REGISTERED; +#endif + + case SADB_X_SPDADD: + case SADB_X_SPDSETIDX: + case SADB_X_SPDUPDATE: + if ((newmsg = key_spdadd(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_X_SPDDELETE: + if ((newmsg = key_spddelete(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_X_SPDDELETE2: + if ((newmsg = key_spddelete2(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + case SADB_X_SPDGET: + /* key_spdget will call key_sendup() on her own */ + error = key_spdget(mhp, so, KEY_SENDUP_ONE); + if (error) { + msg->sadb_msg_errno = error; + return orglen; + } else { + KFREE(msg); + return 0; + } + break; + + case SADB_X_SPDDUMP: + /* key_spddump will call key_sendup() on her own */ + error = key_spddump(mhp, so, KEY_SENDUP_ONE); + if (error) { + msg->sadb_msg_errno = error; + return orglen; + } else { + KFREE(msg); + return 0; + } + break; + + case SADB_X_SPDFLUSH: + if ((newmsg = key_spdflush(mhp)) == NULL) + return orglen; + if (targetp) + *targetp = KEY_SENDUP_ALL; + break; + + default: + msg->sadb_msg_errno = EOPNOTSUPP; + return orglen; + } + + /* switch from old sadb_msg to new one if success. */ + KFREE(msg); + *msgp = newmsg; + + return PFKEY_UNUNIT64((*msgp)->sadb_msg_len); +} + +/* + * set the pointer to each header into message buffer. + * IN: msg: pointer to message buffer. + * mhp: pointer to the buffer allocated like below: + * caddr_t mhp[SADB_EXT_MAX + 1]; + * OUT: 0: + * EINVAL: + */ +static int +key_align(msg, mhp) + struct sadb_msg *msg; + caddr_t *mhp; +{ + struct sadb_ext *ext; + int tlen, extlen; + int i; + + /* sanity check */ + if (msg == NULL || mhp == NULL) + panic("key_align: NULL pointer is passed.\n"); + + /* initialize */ + for (i = 0; i < SADB_EXT_MAX + 1; i++) + mhp[i] = NULL; + + mhp[0] = (caddr_t)msg; + + tlen = PFKEY_UNUNIT64(msg->sadb_msg_len) - sizeof(struct sadb_msg); + ext = (struct sadb_ext *)((caddr_t)msg + sizeof(struct sadb_msg)); + + while (tlen > 0) { + /* set pointer */ + switch (ext->sadb_ext_type) { + case SADB_EXT_SA: + case SADB_EXT_LIFETIME_CURRENT: + case SADB_EXT_LIFETIME_HARD: + case SADB_EXT_LIFETIME_SOFT: + case SADB_EXT_ADDRESS_SRC: + case SADB_EXT_ADDRESS_DST: + case SADB_EXT_ADDRESS_PROXY: + case SADB_EXT_KEY_AUTH: + case SADB_EXT_KEY_ENCRYPT: + case SADB_EXT_IDENTITY_SRC: + case SADB_EXT_IDENTITY_DST: + case SADB_EXT_SENSITIVITY: + case SADB_EXT_PROPOSAL: + case SADB_EXT_SUPPORTED_AUTH: + case SADB_EXT_SUPPORTED_ENCRYPT: + case SADB_EXT_SPIRANGE: + case SADB_X_EXT_POLICY: + /* duplicate check */ + /* + * XXX Are there duplication payloads of either + * KEY_AUTH or KEY_ENCRYPT ? + */ + if (mhp[ext->sadb_ext_type] != NULL) { +#if IPSEC_DEBUG + printf("key_align: duplicate ext_type %u " + "is passed.\n", + ext->sadb_ext_type); +#endif + pfkeystat.out_dupext++; + return EINVAL; + } + mhp[ext->sadb_ext_type] = (caddr_t)ext; + break; + default: +#if IPSEC_DEBUG + printf("key_align: invalid ext_type %u is passed.\n", + ext->sadb_ext_type); +#endif + pfkeystat.out_invexttype++; + return EINVAL; + } + + extlen = PFKEY_UNUNIT64(ext->sadb_ext_len); + tlen -= extlen; + ext = (struct sadb_ext *)((caddr_t)ext + extlen); + } + + return 0; +} + +void +key_init() +{ + int i; + + bzero((caddr_t)&key_cb, sizeof(key_cb)); + + for (i = 0; i < IPSEC_DIR_MAX; i++) { + LIST_INIT(&sptree[i]); + } + + LIST_INIT(&sahtree); + + for (i = 0; i <= SADB_SATYPE_MAX; i++) { + LIST_INIT(®tree[i]); + } + +#ifndef IPSEC_NONBLOCK_ACQUIRE + LIST_INIT(&acqtree); +#endif + LIST_INIT(&spacqtree); + + /* system default */ + ip4_def_policy.policy = IPSEC_POLICY_NONE; + ip4_def_policy.refcnt++; /*never reclaim this*/ +#if INET6 + ip6_def_policy.policy = IPSEC_POLICY_NONE; + ip6_def_policy.refcnt++; /*never reclaim this*/ +#endif + +#ifndef IPSEC_DEBUG2 + timeout((void *)key_timehandler, (void *)0, hz); +#endif /*IPSEC_DEBUG2*/ + + /* initialize key statistics */ + keystat.getspi_count = 1; + + printf("IPsec: Initialized Security Association Processing.\n"); + + return; +} + +/* + * XXX: maybe This function is called after INBOUND IPsec processing. + * + * Special check for tunnel-mode packets. + * We must make some checks for consistency between inner and outer IP header. + * + * xxx more checks to be provided + */ +int +key_checktunnelsanity(sav, family, src, dst) + struct secasvar *sav; + u_int family; + caddr_t src; + caddr_t dst; +{ + /* sanity check */ + if (sav->sah == NULL) + panic("sav->sah == NULL at key_checktunnelsanity"); + + /* XXX: check inner IP header */ + + return 1; +} + +#if 0 +#ifdef __FreeBSD__ +#define hostnamelen strlen(hostname) +#endif + +/* + * Get FQDN for the host. + * If the administrator configured hostname (by hostname(1)) without + * domain name, returns nothing. + */ +static const char * +key_getfqdn() +{ + int i; + int hasdot; + static char fqdn[MAXHOSTNAMELEN + 1]; + + if (!hostnamelen) + return NULL; + + /* check if it comes with domain name. */ + hasdot = 0; + for (i = 0; i < hostnamelen; i++) { + if (hostname[i] == '.') + hasdot++; + } + if (!hasdot) + return NULL; + + /* NOTE: hostname may not be NUL-terminated. */ + bzero(fqdn, sizeof(fqdn)); + bcopy(hostname, fqdn, hostnamelen); + fqdn[hostnamelen] = '\0'; + return fqdn; +} + +/* + * get username@FQDN for the host/user. + */ +static const char * +key_getuserfqdn() +{ + const char *host; + static char userfqdn[MAXHOSTNAMELEN + MAXLOGNAME + 2]; + struct proc *p = curproc; + char *q; + + if (!p || !p->p_pgrp || !p->p_pgrp->pg_session) + return NULL; + if (!(host = key_getfqdn())) + return NULL; + + /* NOTE: s_login may not be-NUL terminated. */ + bzero(userfqdn, sizeof(userfqdn)); + bcopy(p->p_pgrp->pg_session->s_login, userfqdn, MAXLOGNAME); + userfqdn[MAXLOGNAME] = '\0'; /* safeguard */ + q = userfqdn + strlen(userfqdn); + *q++ = '@'; + bcopy(host, q, strlen(host)); + q += strlen(host); + *q++ = '\0'; + + return userfqdn; +} +#endif + +/* record data transfer on SA, and update timestamps */ +void +key_sa_recordxfer(sav, m) + struct secasvar *sav; + struct mbuf *m; +{ + if (!sav) + panic("key_sa_recordxfer called with sav == NULL"); + if (!m) + panic("key_sa_recordxfer called with m == NULL"); + if (!sav->lft_c) + return; + + /* + * XXX Currently, there is a difference of bytes size + * between inbound and outbound processing. + */ + sav->lft_c->sadb_lifetime_bytes += m->m_pkthdr.len; + /* to check bytes lifetime is done in key_timehandler(). */ + + /* + * We use the number of packets as the unit of + * sadb_lifetime_allocations. We increment the variable + * whenever {esp,ah}_{in,out}put is called. + */ + sav->lft_c->sadb_lifetime_allocations++; + /* XXX check for expires? */ + + /* + * NOTE: We record CURRENT sadb_lifetime_usetime by using wall clock, + * in seconds. HARD and SOFT lifetime are measured by the time + * difference (again in seconds) from sadb_lifetime_usetime. + * + * usetime + * v expire expire + * -----+-----+--------+---> t + * <--------------> HARD + * <-----> SOFT + */ + { + struct timeval tv; + microtime(&tv); + sav->lft_c->sadb_lifetime_usetime = tv.tv_sec; + /* XXX check for expires? */ + } + + return; +} + +/* dumb version */ +void +key_sa_routechange(dst) + struct sockaddr *dst; +{ + struct secashead *sah; + struct route *ro; + + LIST_FOREACH(sah, &sahtree, chain) { + ro = &sah->sa_route; + if (ro->ro_rt && dst->sa_len == ro->ro_dst.sa_len + && bcmp(dst, &ro->ro_dst, dst->sa_len) == 0) { + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)NULL; + } + } + + return; +} + +static void +key_sa_chgstate(sav, state) + struct secasvar *sav; + u_int8_t state; +{ + if (sav == NULL) + panic("key_sa_chgstate called with sav == NULL"); + + if (sav->state == state) + return; + + if (__LIST_CHAINED(sav)) + LIST_REMOVE(sav, chain); + + sav->state = state; + LIST_INSERT_HEAD(&sav->sah->savtree[state], sav, chain); +} + +/* returns NULL on error, m0 will be left unchanged */ +static caddr_t +key_appendmbuf(m0, len) + struct mbuf *m0; + int len; +{ + caddr_t p; + struct mbuf *m; + struct mbuf *n; + + if (!m0 || (m0->m_flags & M_PKTHDR) == 0) + return NULL; /*EINVAL*/ + if (len > MCLBYTES) + return NULL; /*EINVAL*/ + + for (m = m0; m && m->m_next; m = m->m_next) + ; + if (len <= M_TRAILINGSPACE(m)) { + p = mtod(m, caddr_t) + m->m_len; + m->m_len += len; + m0->m_pkthdr.len += len; + + return p; + } + MGET(n, M_DONTWAIT, m->m_type); + if (n != NULL) { + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_freem(n); + n = NULL; + } + } + if (n == NULL) + return NULL; /*ENOBUFS*/ + n->m_next = NULL; + m->m_next = n; + n->m_len = len; + m0->m_pkthdr.len += len; + + return mtod(n, caddr_t); +} + +#ifdef __bsdi__ +#include +#include + +int *key_sysvars[] = KEYCTL_VARS; + +int +key_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + if (name[0] >= KEYCTL_MAXID) + return EOPNOTSUPP; + switch (name[0]) { + default: + return sysctl_int_arr(key_sysvars, name, namelen, + oldp, oldlenp, newp, newlen); + } +} +#endif /*__bsdi__*/ + +#ifdef __NetBSD__ +#include +#include + +static int *key_sysvars[] = KEYCTL_VARS; + +int +key_sysctl(name, namelen, oldp, oldlenp, newp, newlen) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; +{ + if (name[0] >= KEYCTL_MAXID) + return EOPNOTSUPP; + if (!key_sysvars[name[0]]) + return EOPNOTSUPP; + switch (name[0]) { + default: + return sysctl_int(oldp, oldlenp, newp, newlen, + key_sysvars[name[0]]); + } +} +#endif /*__NetBSD__*/ diff --git a/bsd/netkey/key.h b/bsd/netkey/key.h new file mode 100644 index 000000000..a8e387c70 --- /dev/null +++ b/bsd/netkey/key.h @@ -0,0 +1,82 @@ +/* $KAME: key.h,v 1.11 2000/03/25 07:24:12 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETKEY_KEY_H_ +#define _NETKEY_KEY_H_ + +#ifdef KERNEL + +extern struct key_cb key_cb; + +struct secpolicy; +struct secpolicyindex; +struct ipsecrequest; +struct secasvar; +struct sockaddr; +struct socket; +struct sadb_msg; +struct sadb_x_policy; + +extern struct secpolicy *key_allocsp __P((struct secpolicyindex *spidx, + u_int dir)); +extern int key_checkrequest + __P((struct ipsecrequest *isr, struct secasindex *saidx)); +extern struct secasvar *key_allocsa __P((u_int family, caddr_t src, caddr_t dst, + u_int proto, u_int32_t spi)); +extern void key_freesp __P((struct secpolicy *sp)); +extern void key_freeso __P((struct socket *so)); +extern void key_freesav __P((struct secasvar *sav)); +extern struct secpolicy *key_newsp __P((void)); +extern struct secpolicy *key_msg2sp __P((struct sadb_x_policy *xpl0, + size_t len, int *error)); +extern struct mbuf *key_sp2msg __P((struct secpolicy *sp)); +extern int key_ismyaddr __P((u_int family, caddr_t addr)); +extern void key_timehandler __P((void)); +extern void key_srandom __P((void)); +extern void key_freereg __P((struct socket *so)); +extern int key_parse __P((struct sadb_msg **msgp, struct socket *so, + int *targetp)); +extern void key_init __P((void)); +extern int key_checktunnelsanity __P((struct secasvar *sav, u_int family, + caddr_t src, caddr_t dst)); +extern void key_sa_recordxfer __P((struct secasvar *sav, struct mbuf *m)); +extern void key_sa_routechange __P((struct sockaddr *dst)); + +#if MALLOC_DECLARE +MALLOC_DECLARE(M_SECA); +#endif /* MALLOC_DECLARE */ + +#if defined(__bsdi__) || defined(__NetBSD__) +extern int key_sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); +#endif + +#endif /* defined(KERNEL) */ +#endif /* _NETKEY_KEY_H_ */ diff --git a/bsd/netkey/key_debug.c b/bsd/netkey/key_debug.c new file mode 100644 index 000000000..ee55d4789 --- /dev/null +++ b/bsd/netkey/key_debug.c @@ -0,0 +1,733 @@ +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* KAME @(#)$Id: key_debug.c,v 1.2 2000/09/14 20:35:26 lindak Exp $ */ + +#ifdef KERNEL +# define _KERNEL +#endif + +#ifdef KERNEL +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#endif +#endif + +#include +#include +#if defined(KERNEL) +#include +#include +#endif +#include + +#include + +#include +#include + +#include +#include + +#if !defined(KERNEL) +#include +#include +#include +#endif /* defined(KERNEL) */ + +#if !defined(_KERNEL) || (defined(_KERNEL) && defined(IPSEC_DEBUG)) + +static void kdebug_sadb_prop __P((struct sadb_ext *)); +static void kdebug_sadb_identity __P((struct sadb_ext *)); +static void kdebug_sadb_supported __P((struct sadb_ext *)); +static void kdebug_sadb_lifetime __P((struct sadb_ext *)); +static void kdebug_sadb_sa __P((struct sadb_ext *)); +static void kdebug_sadb_address __P((struct sadb_ext *)); +static void kdebug_sadb_key __P((struct sadb_ext *)); + +#ifdef KERNEL +static void kdebug_secreplay __P((struct secreplay *)); +#endif + +#ifndef KERNEL +#define panic(param) { printf(param); exit(-1); } +#endif + +/* NOTE: host byte order */ + +/* %%%: about struct sadb_msg */ +void +kdebug_sadb(base) + struct sadb_msg *base; +{ + struct sadb_ext *ext; + int tlen, extlen; + + /* sanity check */ + if (base == NULL) + panic("kdebug_sadb: NULL pointer was passed.\n"); + + printf("sadb_msg{ version=%u type=%u errno=%u satype=%u\n", + base->sadb_msg_version, base->sadb_msg_type, + base->sadb_msg_errno, base->sadb_msg_satype); + printf(" len=%u mode=%u seq=%u pid=%u reqid=%u\n", + base->sadb_msg_len, base->sadb_msg_mode, + base->sadb_msg_seq, base->sadb_msg_pid, base->sadb_msg_reqid); + printf(" reserved1=%u reserved2=%u\n", + base->sadb_msg_reserved1, base->sadb_msg_reserved2); + + tlen = PFKEY_UNUNIT64(base->sadb_msg_len) - sizeof(struct sadb_msg); + ext = (struct sadb_ext *)((caddr_t)base + sizeof(struct sadb_msg)); + + while (tlen > 0) { + printf("sadb_ext{ len=%u type=%u }\n", + ext->sadb_ext_len, ext->sadb_ext_type); + + if (ext->sadb_ext_len == 0) { + printf("kdebug_sadb: invalid ext_len=0 was passed.\n"); + return; + } + if (ext->sadb_ext_len > tlen) { + printf("kdebug_sadb: ext_len exceeds end of buffer.\n"); + return; + } + + switch (ext->sadb_ext_type) { + case SADB_EXT_SA: + kdebug_sadb_sa(ext); + break; + case SADB_EXT_LIFETIME_CURRENT: + case SADB_EXT_LIFETIME_HARD: + case SADB_EXT_LIFETIME_SOFT: + kdebug_sadb_lifetime(ext); + break; + case SADB_EXT_ADDRESS_SRC: + case SADB_EXT_ADDRESS_DST: + case SADB_EXT_ADDRESS_PROXY: + kdebug_sadb_address(ext); + break; + case SADB_EXT_KEY_AUTH: + case SADB_EXT_KEY_ENCRYPT: + kdebug_sadb_key(ext); + break; + case SADB_EXT_IDENTITY_SRC: + case SADB_EXT_IDENTITY_DST: + kdebug_sadb_identity(ext); + break; + case SADB_EXT_SENSITIVITY: + break; + case SADB_EXT_PROPOSAL: + kdebug_sadb_prop(ext); + break; + case SADB_EXT_SUPPORTED_AUTH: + case SADB_EXT_SUPPORTED_ENCRYPT: + kdebug_sadb_supported(ext); + break; + case SADB_EXT_SPIRANGE: + case SADB_X_EXT_KMPRIVATE: + break; + case SADB_X_EXT_POLICY: + kdebug_sadb_x_policy(ext); + break; + default: + printf("kdebug_sadb: invalid ext_type %u was passed.\n", + ext->sadb_ext_type); + return; + } + + extlen = PFKEY_UNUNIT64(ext->sadb_ext_len); + tlen -= extlen; + ext = (struct sadb_ext *)((caddr_t)ext + extlen); + } + + return; +} + +static void +kdebug_sadb_prop(ext) + struct sadb_ext *ext; +{ + struct sadb_prop *prop = (struct sadb_prop *)ext; + struct sadb_comb *comb; + int len; + + /* sanity check */ + if (ext == NULL) + panic("kdebug_sadb_prop: NULL pointer was passed.\n"); + + len = (PFKEY_UNUNIT64(prop->sadb_prop_len) - sizeof(*prop)) + / sizeof(*comb); + comb = (struct sadb_comb *)(prop + 1); + printf("sadb_prop{ replay=%u\n", prop->sadb_prop_replay); + + while (len--) { + printf("sadb_comb{ auth=%u encrypt=%u " + "flags=0x%04x reserved=0x%08x\n", + comb->sadb_comb_auth, comb->sadb_comb_encrypt, + comb->sadb_comb_flags, comb->sadb_comb_reserved); + + printf(" auth_minbits=%u auth_maxbits=%u " + "encrypt_minbits=%u encrypt_maxbits=%u\n", + comb->sadb_comb_auth_minbits, + comb->sadb_comb_auth_maxbits, + comb->sadb_comb_encrypt_minbits, + comb->sadb_comb_encrypt_maxbits); + + printf(" soft_alloc=%u hard_alloc=%u " + "soft_bytes=%lu hard_bytes=%lu\n", + comb->sadb_comb_soft_allocations, + comb->sadb_comb_hard_allocations, + (unsigned long)comb->sadb_comb_soft_bytes, + (unsigned long)comb->sadb_comb_hard_bytes); + + printf(" soft_alloc=%lu hard_alloc=%lu " + "soft_bytes=%lu hard_bytes=%lu }\n", + (unsigned long)comb->sadb_comb_soft_addtime, + (unsigned long)comb->sadb_comb_hard_addtime, + (unsigned long)comb->sadb_comb_soft_usetime, + (unsigned long)comb->sadb_comb_hard_usetime); + comb++; + } + printf("}\n"); + + return; +} + +static void +kdebug_sadb_identity(ext) + struct sadb_ext *ext; +{ + struct sadb_ident *id = (struct sadb_ident *)ext; + int len; + union sadb_x_ident_id *aid; + + /* sanity check */ + if (ext == NULL) + panic("kdebug_sadb_identity: NULL pointer was passed.\n"); + + len = PFKEY_UNUNIT64(id->sadb_ident_len) - sizeof(*id); + printf("sadb_ident_%s{", + id->sadb_ident_exttype == SADB_EXT_IDENTITY_SRC ? "src" : "dst"); + switch (id->sadb_ident_type) { + case SADB_X_IDENTTYPE_ADDR: + aid = (union sadb_x_ident_id *)&id->sadb_ident_id; + + printf(" type=%d prefix=%u ul_proto=%u\n", + id->sadb_ident_type, + aid->sadb_x_ident_id_addr.prefix, + aid->sadb_x_ident_id_addr.ul_proto); + kdebug_sockaddr((struct sockaddr *)(id + 1)); + break; + + default: + printf(" type=%d id=%lu", + id->sadb_ident_type, (u_long)id->sadb_ident_id); + if (len) { +#ifdef KERNEL + ipsec_hexdump((caddr_t)(id + 1), len); /*XXX cast ?*/ +#else + char *p, *ep; + printf("\n str=\""); + p = (char *)(id + 1); + ep = p + len; + for (/*nothing*/; *p && p < ep; p++) { + if (isprint(*p)) + printf("%c", *p & 0xff); + else + printf("\\%03o", *p & 0xff); + } +#endif + printf("\""); + } + break; + } + + printf(" }\n"); + + return; +} + +static void +kdebug_sadb_supported(ext) + struct sadb_ext *ext; +{ + struct sadb_supported *sup = (struct sadb_supported *)ext; + struct sadb_alg *alg; + int len; + + /* sanity check */ + if (ext == NULL) + panic("kdebug_sadb_supported: NULL pointer was passed.\n"); + + len = (PFKEY_UNUNIT64(sup->sadb_supported_len) - sizeof(*sup)) + / sizeof(*alg); + alg = (struct sadb_alg *)(sup + 1); + printf("sadb_sup{\n"); + while (len--) { + printf(" { id=%d ivlen=%d min=%d max=%d }\n", + alg->sadb_alg_id, alg->sadb_alg_ivlen, + alg->sadb_alg_minbits, alg->sadb_alg_maxbits); + alg++; + } + printf("}\n"); + + return; +} + +static void +kdebug_sadb_lifetime(ext) + struct sadb_ext *ext; +{ + struct sadb_lifetime *lft = (struct sadb_lifetime *)ext; + + /* sanity check */ + if (ext == NULL) + printf("kdebug_sadb_lifetime: NULL pointer was passed.\n"); + + printf("sadb_lifetime{ alloc=%u, bytes=%u\n", + lft->sadb_lifetime_allocations, + (u_int32_t)lft->sadb_lifetime_bytes); + printf(" addtime=%u, usetime=%u }\n", + (u_int32_t)lft->sadb_lifetime_addtime, + (u_int32_t)lft->sadb_lifetime_usetime); + + return; +} + +static void +kdebug_sadb_sa(ext) + struct sadb_ext *ext; +{ + struct sadb_sa *sa = (struct sadb_sa *)ext; + + /* sanity check */ + if (ext == NULL) + panic("kdebug_sadb_sa: NULL pointer was passed.\n"); + + printf("sadb_sa{ spi=%u replay=%u state=%u\n", + (u_int32_t)ntohl(sa->sadb_sa_spi), sa->sadb_sa_replay, + sa->sadb_sa_state); + printf(" auth=%u encrypt=%u flags=0x%08x }\n", + sa->sadb_sa_auth, sa->sadb_sa_encrypt, sa->sadb_sa_flags); + + return; +} + +static void +kdebug_sadb_address(ext) + struct sadb_ext *ext; +{ + struct sadb_address *addr = (struct sadb_address *)ext; + + /* sanity check */ + if (ext == NULL) + panic("kdebug_sadb_address: NULL pointer was passed.\n"); + + printf("sadb_address{ proto=%u prefixlen=%u reserved=0x%02x%02x }\n", + addr->sadb_address_proto, addr->sadb_address_prefixlen, + ((u_char *)&addr->sadb_address_reserved)[0], + ((u_char *)&addr->sadb_address_reserved)[1]); + + kdebug_sockaddr((struct sockaddr *)((caddr_t)ext + sizeof(*addr))); + + return; +} + +static void +kdebug_sadb_key(ext) + struct sadb_ext *ext; +{ + struct sadb_key *key = (struct sadb_key *)ext; + + /* sanity check */ + if (ext == NULL) + panic("kdebug_sadb_key: NULL pointer was passed.\n"); + + printf("sadb_key{ bits=%u reserved=%u\n", + key->sadb_key_bits, key->sadb_key_reserved); + printf(" key="); + + /* sanity check 2 */ + if ((key->sadb_key_bits >> 3) > + (PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key))) { + printf("kdebug_sadb_key: key length mismatch, bit:%d len:%ld.\n", + key->sadb_key_bits >> 3, + (long)PFKEY_UNUNIT64(key->sadb_key_len) - sizeof(struct sadb_key)); + } + + ipsec_hexdump((caddr_t)key + sizeof(struct sadb_key), + key->sadb_key_bits >> 3); + printf(" }\n"); + return; +} + +void +kdebug_sadb_x_policy(ext) + struct sadb_ext *ext; +{ + struct sadb_x_policy *xpl = (struct sadb_x_policy *)ext; + struct sockaddr *addr; + + /* sanity check */ + if (ext == NULL) + panic("kdebug_sadb_x_policy: NULL pointer was passed.\n"); + + printf("sadb_x_policy{ type=%u dir=%u reserved=%x }\n", + xpl->sadb_x_policy_type, xpl->sadb_x_policy_dir, + xpl->sadb_x_policy_reserved); + + if (xpl->sadb_x_policy_type == IPSEC_POLICY_IPSEC) { + int tlen; + struct sadb_x_ipsecrequest *xisr; + + tlen = PFKEY_UNUNIT64(xpl->sadb_x_policy_len) - sizeof(*xpl); + xisr = (struct sadb_x_ipsecrequest *)(xpl + 1); + + while (tlen > 0) { + printf(" { len=%u proto=%u mode=%u level=%u reqid=%u\n", + xisr->sadb_x_ipsecrequest_len, + xisr->sadb_x_ipsecrequest_proto, + xisr->sadb_x_ipsecrequest_mode, + xisr->sadb_x_ipsecrequest_level, + xisr->sadb_x_ipsecrequest_reqid); + + if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) { + addr = (struct sockaddr *)(xisr + 1); + kdebug_sockaddr(addr); + addr = (struct sockaddr *)((caddr_t)addr + + addr->sa_len); + kdebug_sockaddr(addr); + } + + printf(" }\n"); + + /* prevent infinite loop */ + if (xisr->sadb_x_ipsecrequest_len <= 0) { + printf("kdebug_sadb_x_policy: wrong policy struct.\n"); + return; + } + /* prevent overflow */ + if (xisr->sadb_x_ipsecrequest_len > tlen) { + printf("invalid ipsec policy length\n"); + return; + } + + tlen -= xisr->sadb_x_ipsecrequest_len; + + xisr = (struct sadb_x_ipsecrequest *)((caddr_t)xisr + + xisr->sadb_x_ipsecrequest_len); + } + + if (tlen != 0) + panic("kdebug_sadb_x_policy: wrong policy struct.\n"); + } + + return; +} + +#ifdef KERNEL +/* %%%: about SPD and SAD */ +void +kdebug_secpolicy(sp) + struct secpolicy *sp; +{ + /* sanity check */ + if (sp == NULL) + panic("kdebug_secpolicy: NULL pointer was passed.\n"); + + printf("secpolicy{ refcnt=%u state=%u policy=%u\n", + sp->refcnt, sp->state, sp->policy); + + kdebug_secpolicyindex(&sp->spidx); + + switch (sp->policy) { + case IPSEC_POLICY_DISCARD: + printf(" type=discard }\n"); + break; + case IPSEC_POLICY_NONE: + printf(" type=none }\n"); + break; + case IPSEC_POLICY_IPSEC: + { + struct ipsecrequest *isr; + for (isr = sp->req; isr != NULL; isr = isr->next) { + + printf(" level=%u\n", isr->level); + kdebug_secasindex(&isr->saidx); + + if (isr->sav != NULL) + kdebug_secasv(isr->sav); + } + printf(" }\n"); + } + break; + case IPSEC_POLICY_BYPASS: + printf(" type=bypass }\n"); + break; + case IPSEC_POLICY_ENTRUST: + printf(" type=entrust }\n"); + break; + default: + printf("kdebug_secpolicy: Invalid policy found. %d\n", + sp->policy); + break; + } + + return; +} + +void +kdebug_secpolicyindex(spidx) + struct secpolicyindex *spidx; +{ + /* sanity check */ + if (spidx == NULL) + panic("kdebug_secpolicyindex: NULL pointer was passed.\n"); + + printf("secpolicyindex{ dir=%u prefs=%u prefd=%u ul_proto=%u\n", + spidx->dir, spidx->prefs, spidx->prefd, spidx->ul_proto); + + ipsec_hexdump((caddr_t)&spidx->src, + ((struct sockaddr *)&spidx->src)->sa_len); + printf("\n"); + ipsec_hexdump((caddr_t)&spidx->dst, + ((struct sockaddr *)&spidx->dst)->sa_len); + printf("}\n"); + + return; +} + +void +kdebug_secasindex(saidx) + struct secasindex *saidx; +{ + /* sanity check */ + if (saidx == NULL) + panic("kdebug_secpolicyindex: NULL pointer was passed.\n"); + + printf("secasindex{ mode=%u proto=%u\n", + saidx->mode, saidx->proto); + + ipsec_hexdump((caddr_t)&saidx->src, + ((struct sockaddr *)&saidx->src)->sa_len); + printf("\n"); + ipsec_hexdump((caddr_t)&saidx->dst, + ((struct sockaddr *)&saidx->dst)->sa_len); + printf("\n"); + + return; +} + +void +kdebug_secasv(sav) + struct secasvar *sav; +{ + /* sanity check */ + if (sav == NULL) + panic("kdebug_secasv: NULL pointer was passed.\n"); + + printf("secas{"); + kdebug_secasindex(&sav->sah->saidx); + + printf(" refcnt=%u state=%u auth=%u enc=%u\n", + sav->refcnt, sav->state, sav->alg_auth, sav->alg_enc); + printf(" spi=%u flags=%u\n", + (u_int32_t)ntohl(sav->spi), sav->flags); + + if (sav->key_auth != NULL) + kdebug_sadb_key((struct sadb_ext *)sav->key_auth); + if (sav->key_enc != NULL) + kdebug_sadb_key((struct sadb_ext *)sav->key_enc); + if (sav->iv != NULL) { + printf(" iv="); + ipsec_hexdump(sav->iv, sav->ivlen ? sav->ivlen : 8); + printf("\n"); + } + + if (sav->replay != NULL) + kdebug_secreplay(sav->replay); + if (sav->lft_c != NULL) + kdebug_sadb_lifetime((struct sadb_ext *)sav->lft_c); + if (sav->lft_h != NULL) + kdebug_sadb_lifetime((struct sadb_ext *)sav->lft_h); + if (sav->lft_s != NULL) + kdebug_sadb_lifetime((struct sadb_ext *)sav->lft_s); + +#if notyet + /* XXX: misc[123] ? */ +#endif + + return; +} + +static void +kdebug_secreplay(rpl) + struct secreplay *rpl; +{ + int len, l; + + /* sanity check */ + if (rpl == NULL) + panic("kdebug_secreplay: NULL pointer was passed.\n"); + + printf(" secreplay{ count=%u wsize=%u seq=%u lastseq=%u", + rpl->count, rpl->wsize, rpl->seq, rpl->lastseq); + + if (rpl->bitmap == NULL) { + printf(" }\n"); + return; + } + + printf("\n bitmap { "); + + for (len = 0; len < rpl->wsize; len++) { + for (l = 7; l >= 0; l--) + printf("%u", (((rpl->bitmap)[len] >> l) & 1) ? 1 : 0); + } + printf(" }\n"); + + return; +} + +void +kdebug_mbufhdr(m) + struct mbuf *m; +{ + /* sanity check */ + if (m == NULL) + panic("debug_mbufhdr: NULL pointer was passed.\n"); + + printf("mbuf(%p){ m_next:%p m_nextpkt:%p m_data:%p " + "m_len:%d m_type:0x%02x m_flags:0x%02x }\n", + m, m->m_next, m->m_nextpkt, m->m_data, + m->m_len, m->m_type, m->m_flags); + + if (m->m_flags & M_PKTHDR) { + printf(" m_pkthdr{ len:%d rcvif:%p }\n", + m->m_pkthdr.len, m->m_pkthdr.rcvif); + } + +#ifdef __FreeBSD__ + if (m->m_flags & M_EXT) { + printf(" m_ext{ ext_buf:%p ext_free:%p " + "ext_size:%u ext_ref:%p }\n", + m->m_ext.ext_buf, m->m_ext.ext_free, + m->m_ext.ext_size, m->m_ext.ext_ref); + } +#endif + + return; +} + +void +kdebug_mbuf(m0) + struct mbuf *m0; +{ + struct mbuf *m = m0; + int i, j; + + kdebug_mbufhdr(m); + printf(" m_data=\n"); + for (j = 0; m; m = m->m_next) { + for (i = 0; i < m->m_len; i++) { + if (i != 0 && i % 32 == 0) printf("\n"); + if (i % 4 == 0) printf(" "); + printf("%02x", mtod(m, u_char *)[i]); + j++; + } + } + + printf("\n"); + + return; +} +#endif /* KERNEL */ + +void +kdebug_sockaddr(addr) + struct sockaddr *addr; +{ + /* sanity check */ + if (addr == NULL) + panic("kdebug_sockaddr: NULL pointer was passed.\n"); + + /* NOTE: We deal with port number as host byte order. */ + printf("sockaddr{ len=%u family=%u port=%u\n", + addr->sa_len, addr->sa_family, ntohs(_INPORTBYSA(addr))); + +#ifdef INET6 + if (addr->sa_family == PF_INET6) { + struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)addr; + printf(" flowinfo=0x%08x, scope_id=0x%08x\n", + in6->sin6_flowinfo, in6->sin6_scope_id); + } +#endif + + ipsec_hexdump(_INADDRBYSA(addr), _INALENBYAF(addr->sa_family)); + + printf(" }\n"); + + return; +} + +#endif /* !defined(KERNEL) || (defined(KERNEL) && defined(IPSEC_DEBUG)) */ + +void +ipsec_bindump(buf, len) + caddr_t buf; + int len; +{ + int i; + + for (i = 0; i < len; i++) + printf("%c", (unsigned char)buf[i]); + + return; +} + + +void +ipsec_hexdump(buf, len) + caddr_t buf; + int len; +{ + int i; + + for (i = 0; i < len; i++) { + if (i != 0 && i % 32 == 0) printf("\n"); + if (i % 4 == 0) printf(" "); + printf("%02x", (unsigned char)buf[i]); + } +#if 0 + if (i % 32 != 0) printf("\n"); +#endif + + return; +} + diff --git a/bsd/netkey/key_debug.h b/bsd/netkey/key_debug.h new file mode 100644 index 000000000..750a570d0 --- /dev/null +++ b/bsd/netkey/key_debug.h @@ -0,0 +1,95 @@ +/* $KAME: key_debug.h,v 1.5 2000/03/25 07:24:12 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETKEY_KEY_DEBUG_H_ +#define _NETKEY_KEY_DEBUG_H_ + +#if !defined(KERNEL) || (defined(KERNEL) && defined(IPSEC_DEBUG)) + +/* debug flags */ +#define KEYDEBUG_STAMP 0x00000001 /* path */ +#define KEYDEBUG_DATA 0x00000002 /* data */ +#define KEYDEBUG_DUMP 0x00000004 /* dump */ + +#define KEYDEBUG_KEY 0x00000010 /* key processing */ +#define KEYDEBUG_ALG 0x00000020 /* ciph & auth algorithm */ +#define KEYDEBUG_IPSEC 0x00000040 /* ipsec processing */ + +#define KEYDEBUG_KEY_STAMP (KEYDEBUG_KEY | KEYDEBUG_STAMP) +#define KEYDEBUG_KEY_DATA (KEYDEBUG_KEY | KEYDEBUG_DATA) +#define KEYDEBUG_KEY_DUMP (KEYDEBUG_KEY | KEYDEBUG_DUMP) +#define KEYDEBUG_ALG_STAMP (KEYDEBUG_ALG | KEYDEBUG_STAMP) +#define KEYDEBUG_ALG_DATA (KEYDEBUG_ALG | KEYDEBUG_DATA) +#define KEYDEBUG_ALG_DUMP (KEYDEBUG_ALG | KEYDEBUG_DUMP) +#define KEYDEBUG_IPSEC_STAMP (KEYDEBUG_IPSEC | KEYDEBUG_STAMP) +#define KEYDEBUG_IPSEC_DATA (KEYDEBUG_IPSEC | KEYDEBUG_DATA) +#define KEYDEBUG_IPSEC_DUMP (KEYDEBUG_IPSEC | KEYDEBUG_DUMP) + +#define KEYDEBUG(lev,arg) if ((key_debug_level & (lev)) == (lev)) { arg; } + +#ifdef KERNEL +extern u_int32_t key_debug_level; +#endif /*KERNEL*/ + +struct sadb_msg; +struct sadb_ext; +extern void kdebug_sadb __P((struct sadb_msg *)); +extern void kdebug_sadb_x_policy __P((struct sadb_ext *)); + +#ifdef KERNEL +struct secpolicy; +struct secpolicyindex; +struct secasindex; +struct secasvar; +struct secreplay; +struct mbuf; +extern void kdebug_secpolicy __P((struct secpolicy *)); +extern void kdebug_secpolicyindex __P((struct secpolicyindex *)); +extern void kdebug_secasindex __P((struct secasindex *)); +extern void kdebug_secasv __P((struct secasvar *)); +extern void kdebug_mbufhdr __P((struct mbuf *)); +extern void kdebug_mbuf __P((struct mbuf *)); +#endif /*KERNEL*/ + +struct sockaddr; +extern void kdebug_sockaddr __P((struct sockaddr *)); + +#else + +#define KEYDEBUG(lev,arg) + +#endif /*!defined(KERNEL) || (defined(KERNEL) && defined(IPSEC_DEBUG))*/ + +extern void ipsec_hexdump __P((caddr_t, int)); +extern void ipsec_bindump __P((caddr_t, int)); + +#endif /* _NETKEY_KEY_DEBUG_H_ */ + diff --git a/bsd/netkey/key_var.h b/bsd/netkey/key_var.h new file mode 100644 index 000000000..ac785a140 --- /dev/null +++ b/bsd/netkey/key_var.h @@ -0,0 +1,133 @@ +/* $KAME: key_var.h,v 1.5 2000/02/22 14:06:41 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETKEY_KEY_VAR_H_ +#define _NETKEY_KEY_VAR_H_ + +#ifdef __NetBSD__ +#if defined(_KERNEL) && !defined(_LKM) +#include "opt_inet.h" +#endif +#endif + +/* sysctl */ +#define KEYCTL_DEBUG_LEVEL 1 +#define KEYCTL_SPI_TRY 2 +#define KEYCTL_SPI_MIN_VALUE 3 +#define KEYCTL_SPI_MAX_VALUE 4 +#define KEYCTL_RANDOM_INT 5 +#define KEYCTL_LARVAL_LIFETIME 6 +#define KEYCTL_BLOCKACQ_COUNT 7 +#define KEYCTL_BLOCKACQ_LIFETIME 8 +#define KEYCTL_MAXID 9 + +#define KEYCTL_NAMES { \ + { 0, 0 }, \ + { "debug", CTLTYPE_INT }, \ + { "spi_try", CTLTYPE_INT }, \ + { "spi_min_value", CTLTYPE_INT }, \ + { "spi_max_value", CTLTYPE_INT }, \ + { "random_int", CTLTYPE_INT }, \ + { "larval_lifetime", CTLTYPE_INT }, \ + { "blockacq_count", CTLTYPE_INT }, \ + { "blockacq_lifetime", CTLTYPE_INT }, \ +} + +//#if IPSEC_DEBUG +#define KEYCTL_VARS { \ + 0, \ + &key_debug_level, \ + &key_spi_trycnt, \ + &key_spi_minval, \ + &key_spi_maxval, \ + &key_int_random, \ + &key_larval_lifetime, \ + &key_blockacq_count, \ + &key_blockacq_lifetime, \ +} +//#else +//#define KEYCTL_VARS { \ +// 0, \ +// 0, \ +// &key_spi_trycnt, \ +// &key_spi_minval, \ +// &key_spi_maxval, \ +// &key_int_random, \ +// &key_larval_lifetime, \ +// &key_blockacq_count, \ +// &key_blockacq_lifetime, \ +//} +//#endif + +#define _ARRAYLEN(p) (sizeof(p)/sizeof(p[0])) +#define _KEYLEN(key) ((u_int)((key)->sadb_key_bits >> 3)) +#define _KEYBITS(key) ((u_int)((key)->sadb_key_bits)) +#define _KEYBUF(key) ((caddr_t)((caddr_t)(key) + sizeof(struct sadb_key))) + +#define _INADDR(in) ((struct sockaddr_in *)(in)) + +#if defined(INET6) +#define _IN6ADDR(in6) ((struct sockaddr_in6 *)(in6)) +#define _SALENBYAF(family) \ + (((family) == AF_INET) ? \ + (u_int)sizeof(struct sockaddr_in) : \ + (u_int)sizeof(struct sockaddr_in6)) +#define _INALENBYAF(family) \ + (((family) == AF_INET) ? \ + (u_int)sizeof(struct in_addr) : \ + (u_int)sizeof(struct in6_addr)) +#define _INADDRBYSA(saddr) \ + ((((struct sockaddr *)(saddr))->sa_family == AF_INET) ? \ + (caddr_t)&((struct sockaddr_in *)(saddr))->sin_addr : \ + (caddr_t)&((struct sockaddr_in6 *)(saddr))->sin6_addr) +#define _INPORTBYSA(saddr) \ + ((((struct sockaddr *)(saddr))->sa_family == AF_INET) ? \ + ((struct sockaddr_in *)(saddr))->sin_port : \ + ((struct sockaddr_in6 *)(saddr))->sin6_port) +#if 0 +#define _SADDRBYSA(saddr) \ + ((((struct sockaddr *)(saddr))->sa_family == AF_INET) ? \ + (caddr_t)&((struct sockaddr_in *)(saddr))->sin_addr.s_addr : \ + (caddr_t)&((struct sockaddr_in6 *)(saddr))->sin6_addr.s6_addr) +#endif +#else +#define _IN6ADDR(in6) "#error" +#define _SALENBYAF(family) sizeof(struct sockaddr_in) +#define _INALENBYAF(family) sizeof(struct in_addr) +#define _INADDRBYSA(saddr) ((caddr_t)&((struct sockaddr_in *)(saddr))->sin_addr) +#define _INPORTBYSA(saddr) (((struct sockaddr_in *)(saddr))->sin_port) +#if 0 +#define _SADDRBYSA(saddr) \ + ((caddr_t)&((struct sockaddr_in *)(saddr))->sin_addr.s_addr) +#endif +#endif /* defined(INET6) */ + +#endif /* _NETKEY_KEY_VAR_H_ */ diff --git a/bsd/netkey/keydb.c b/bsd/netkey/keydb.c new file mode 100644 index 000000000..9c2bebccf --- /dev/null +++ b/bsd/netkey/keydb.c @@ -0,0 +1,229 @@ +/* $KAME: keydb.c,v 1.61 2000/03/25 07:24:13 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if (defined(__FreeBSD__) && __FreeBSD__ >= 3) || defined(__NetBSD__) +#include "opt_inet.h" +#ifdef __NetBSD__ +#include "opt_ipsec.h" +#endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include + +#include + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +MALLOC_DEFINE(M_SECA, "key mgmt", "security associations, key management"); +#endif + +static void keydb_delsecasvar __P((struct secasvar *)); + +/* + * secpolicy management + */ +struct secpolicy * +keydb_newsecpolicy() +{ + struct secpolicy *p; + + p = (struct secpolicy *)_MALLOC(sizeof(*p), M_SECA, M_NOWAIT); + if (!p) + return p; + bzero(p, sizeof(*p)); + return p; +} + +void +keydb_delsecpolicy(p) + struct secpolicy *p; +{ + + _FREE(p, M_SECA); +} + +/* + * secashead management + */ +struct secashead * +keydb_newsecashead() +{ + struct secashead *p; + int i; + + p = (struct secashead *)_MALLOC(sizeof(*p), M_SECA, M_NOWAIT); + if (!p) + return p; + bzero(p, sizeof(*p)); + for (i = 0; i < sizeof(p->savtree)/sizeof(p->savtree[0]); i++) + LIST_INIT(&p->savtree[i]); + return p; +} + +void +keydb_delsecashead(p) + struct secashead *p; +{ + + _FREE(p, M_SECA); +} + +/* + * secasvar management (reference counted) + */ +struct secasvar * +keydb_newsecasvar() +{ + struct secasvar *p; + + p = (struct secasvar *)_MALLOC(sizeof(*p), M_SECA, M_NOWAIT); + if (!p) + return p; + bzero(p, sizeof(*p)); + p->refcnt = 1; + return p; +} + +void +keydb_refsecasvar(p) + struct secasvar *p; +{ + int s; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + p->refcnt++; + splx(s); +} + +void +keydb_freesecasvar(p) + struct secasvar *p; +{ + int s; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + p->refcnt--; + if (p->refcnt == 0) + keydb_delsecasvar(p); + splx(s); +} + +static void +keydb_delsecasvar(p) + struct secasvar *p; +{ + + if (p->refcnt) + panic("keydb_delsecasvar called with refcnt != 0"); + + _FREE(p, M_SECA); +} + +/* + * secreplay management + */ +struct secreplay * +keydb_newsecreplay(wsize) + size_t wsize; +{ + struct secreplay *p; + + p = (struct secreplay *)_MALLOC(sizeof(*p), M_SECA, M_NOWAIT); + if (!p) + return p; + + bzero(p, sizeof(*p)); + if (wsize != 0) { + p->bitmap = (caddr_t)_MALLOC(wsize, M_SECA, M_NOWAIT); + if (!p->bitmap) { + _FREE(p, M_SECA); + return NULL; + } + bzero(p->bitmap, wsize); + } + p->wsize = wsize; + return p; +} + +void +keydb_delsecreplay(p) + struct secreplay *p; +{ + + if (p->bitmap) + _FREE(p->bitmap, M_SECA); + _FREE(p, M_SECA); +} + +/* + * secreg management + */ +struct secreg * +keydb_newsecreg() +{ + struct secreg *p; + + p = (struct secreg *)_MALLOC(sizeof(*p), M_SECA, M_NOWAIT); + if (p) + bzero(p, sizeof(*p)); + return p; +} + +void +keydb_delsecreg(p) + struct secreg *p; +{ + + _FREE(p, M_SECA); +} diff --git a/bsd/netkey/keydb.h b/bsd/netkey/keydb.h new file mode 100644 index 000000000..4e2160a75 --- /dev/null +++ b/bsd/netkey/keydb.h @@ -0,0 +1,164 @@ +/* $KAME: keydb.h,v 1.9 2000/02/22 14:06:41 itojun Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETKEY_KEYDB_H_ +#define _NETKEY_KEYDB_H_ + +#ifdef KERNEL + +#include + +/* Security Assocciation Index */ +/* NOTE: Ensure to be same address family */ +struct secasindex { + struct sockaddr_storage src; /* srouce address for SA */ + struct sockaddr_storage dst; /* destination address for SA */ + u_int16_t proto; /* IPPROTO_ESP or IPPROTO_AH */ + u_int8_t mode; /* mode of protocol, see ipsec.h */ + u_int32_t reqid; /* reqid id who owned this SA */ + /* see IPSEC_MANUAL_REQID_MAX. */ +}; + +/* Security Association Data Base */ +struct secashead { + LIST_ENTRY(secashead) chain; + + struct secasindex saidx; + + struct sadb_ident *idents; /* source identity */ + struct sadb_ident *identd; /* destination identity */ + /* XXX I don't know how to use them. */ + + u_int8_t state; /* MATURE or DEAD. */ + LIST_HEAD(_satree, secasvar) savtree[SADB_SASTATE_MAX+1]; + /* SA chain */ + /* The first of this list is newer SA */ + + struct route sa_route; /* route cache */ +}; + +/* Security Association */ +struct secasvar { + LIST_ENTRY(secasvar) chain; + + int refcnt; /* reference count */ + u_int8_t state; /* Status of this Association */ + + u_int8_t alg_auth; /* Authentication Algorithm Identifier*/ + u_int8_t alg_enc; /* Cipher Algorithm Identifier */ + u_int32_t spi; /* SPI Value, network byte order */ + u_int32_t flags; /* holder for SADB_KEY_FLAGS */ + + struct sadb_key *key_auth; /* Key for Authentication */ + /* length has been shifted up to 3. */ + struct sadb_key *key_enc; /* Key for Encryption */ + /* length has been shifted up to 3. */ + caddr_t iv; /* Initilization Vector */ + u_int ivlen; /* length of IV */ +#if 0 + caddr_t misc1; + caddr_t misc2; + caddr_t misc3; +#endif + + struct secreplay *replay; /* replay prevention */ + u_int32_t tick; /* for lifetime */ + + struct sadb_lifetime *lft_c; /* CURRENT lifetime, it's constant. */ + struct sadb_lifetime *lft_h; /* HARD lifetime */ + struct sadb_lifetime *lft_s; /* SOFT lifetime */ + + u_int32_t seq; /* sequence number */ + pid_t pid; /* message's pid */ + + struct secashead *sah; /* back pointer to the secashead */ +}; + +/* replay prevention */ +struct secreplay { + u_int32_t count; + u_int wsize; /* window size, i.g. 4 bytes */ + u_int32_t seq; /* used by sender */ + u_int32_t lastseq; /* used by receiver */ + caddr_t bitmap; /* used by receiver */ + int overflow; /* overflow flag */ +}; + +/* socket table due to send PF_KEY messages. */ +struct secreg { + LIST_ENTRY(secreg) chain; + + struct socket *so; +}; + +#ifndef IPSEC_NONBLOCK_ACQUIRE +/* acquiring list table. */ +struct secacq { + LIST_ENTRY(secacq) chain; + + struct secasindex saidx; + + u_int32_t seq; /* sequence number */ + u_int32_t tick; /* for lifetime */ + int count; /* for lifetime */ +}; +#endif + +/* Sensitivity Level Specification */ +/* nothing */ + +#define SADB_KILL_INTERVAL 600 /* six seconds */ + +struct key_cb { + int key_count; + int any_count; +}; + +/* secpolicy */ +extern struct secpolicy *keydb_newsecpolicy __P((void)); +extern void keydb_delsecpolicy __P((struct secpolicy *)); +/* secashead */ +extern struct secashead *keydb_newsecashead __P((void)); +extern void keydb_delsecashead __P((struct secashead *)); +/* secasvar */ +extern struct secasvar *keydb_newsecasvar __P((void)); +extern void keydb_refsecasvar __P((struct secasvar *)); +extern void keydb_freesecasvar __P((struct secasvar *)); +/* secreplay */ +extern struct secreplay *keydb_newsecreplay __P((size_t)); +extern void keydb_delsecreplay __P((struct secreplay *)); +/* secreg */ +extern struct secreg *keydb_newsecreg __P((void)); +extern void keydb_delsecreg __P((struct secreg *)); + +#endif /* KERNEL */ + +#endif /* _NETKEY_KEYDB_H_ */ diff --git a/bsd/netkey/keysock.c b/bsd/netkey/keysock.c new file mode 100644 index 000000000..6f38b7424 --- /dev/null +++ b/bsd/netkey/keysock.c @@ -0,0 +1,770 @@ +/* $KAME: keysock.c,v 1.13 2000/03/25 07:24:13 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 +#include "opt_inet.h" +#endif + +/* This code has derived from sys/net/rtsock.c on FreeBSD2.2.5 */ + +#if defined(__NetBSD__) || defined (__APPLE__) +# ifdef _KERNEL +# define KERNEL +# endif +#endif + +#include +#include +#include +#include +#if defined(__FreeBSD__) || defined (__APPLE__) +#include +#endif +#include +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +#include +#endif +#include +#include +#include +#include +#include +#ifdef __NetBSD__ +#include +#include +#endif + +#ifdef __FreeBSD__ +#if __FreeBSD__ >= 3 +#include +#else +#include +#endif +#endif + +#include +#include + +#include +#include +#include +#include +#include + + +struct sockaddr key_dst = { 2, PF_KEY, }; +struct sockaddr key_src = { 2, PF_KEY, }; +struct sockproto key_proto = { PF_KEY, PF_KEY_V2 }; + +static int key_sendup0 __P((struct rawcb *, struct mbuf *, int)); + +struct pfkeystat pfkeystat; + +#if !(defined(__FreeBSD__) && __FreeBSD__ >= 3) && !defined(__APPLE__) +/* + * key_usrreq() + * derived from net/rtsock.c:route_usrreq() + */ +#ifndef __NetBSD__ +int +key_usrreq(so, req, m, nam, control) + register struct socket *so; + int req; + struct mbuf *m, *nam, *control; +#else +int +key_usrreq(so, req, m, nam, control, p) + register struct socket *so; + int req; + struct mbuf *m, *nam, *control; + struct proc *p; +#endif /*__NetBSD__*/ +{ + register int error = 0; + register struct keycb *kp = (struct keycb *)sotorawcb(so); + int s; + +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + if (req == PRU_ATTACH) { + kp = (struct keycb *)_MALLOC(sizeof(*kp), M_PCB, M_WAITOK); + so->so_pcb = (caddr_t)kp; + if (so->so_pcb) + bzero(so->so_pcb, sizeof(*kp)); + } + if (req == PRU_DETACH && kp) { + int af = kp->kp_raw.rcb_proto.sp_protocol; + if (af == PF_KEY) /* XXX: AF_KEY */ + key_cb.key_count--; + key_cb.any_count--; + + key_freereg(so); + } + +#ifndef __NetBSD__ + error = raw_usrreq(so, req, m, nam, control); +#else + error = raw_usrreq(so, req, m, nam, control, p); +#endif + m = control = NULL; /* reclaimed in raw_usrreq */ + kp = (struct keycb *)sotorawcb(so); + if (req == PRU_ATTACH && kp) { + int af = kp->kp_raw.rcb_proto.sp_protocol; + if (error) { +#if IPSEC_DEBUG + printf("key_usrreq: key_usrreq results %d\n", error); +#endif + pfkeystat.sockerr++; + _FREE((caddr_t)kp, M_PCB); + so->so_pcb = (caddr_t) 0; + splx(s); + return(error); + } + + kp->kp_promisc = kp->kp_registered = 0; + + if (af == PF_KEY) /* XXX: AF_KEY */ + key_cb.key_count++; + key_cb.any_count++; +#ifndef __bsdi__ + kp->kp_raw.rcb_laddr = &key_src; + kp->kp_raw.rcb_faddr = &key_dst; +#else + /* + * XXX rcb_faddr must be dynamically allocated, otherwise + * raw_disconnect() will be angry. + */ + { + struct mbuf *m, *n; + MGET(m, M_WAITOK, MT_DATA); + if (!m) { + error = ENOBUFS; + pfkeystat.in_nomem++; + _FREE((caddr_t)kp, M_PCB); + so->so_pcb = (caddr_t) 0; + splx(s); + return(error); + } + MGET(n, M_WAITOK, MT_DATA); + if (!n) { + error = ENOBUFS; + m_freem(m); + pfkeystat.in_nomem++; + _FREE((caddr_t)kp, M_PCB); + so->so_pcb = (caddr_t) 0; + splx(s); + return(error); + } + m->m_len = sizeof(key_src); + kp->kp_raw.rcb_laddr = mtod(m, struct sockaddr *); + bcopy(&key_src, kp->kp_raw.rcb_laddr, sizeof(key_src)); + n->m_len = sizeof(key_dst); + kp->kp_raw.rcb_faddr = mtod(n, struct sockaddr *); + bcopy(&key_dst, kp->kp_raw.rcb_faddr, sizeof(key_dst)); + } +#endif + soisconnected(so); + so->so_options |= SO_USELOOPBACK; + } + splx(s); + return(error); +} +#endif /* other than FreeBSD >= 3 */ + +/* + * key_output() + */ +int +key_output(m, so) + register struct mbuf *m; + struct socket *so; +{ + struct sadb_msg *msg = NULL; + int len, error = 0; + int s; + int target; + + if (m == 0) + panic("key_output: NULL pointer was passed.\n"); + + pfkeystat.out_total++; + pfkeystat.out_bytes += m->m_pkthdr.len; + + len = m->m_pkthdr.len; + if (len < sizeof(struct sadb_msg)) { +#if IPSEC_DEBUG + printf("key_output: Invalid message length.\n"); +#endif + pfkeystat.out_tooshort++; + error = EINVAL; + goto end; + } + + if (m->m_len < sizeof(struct sadb_msg)) { + if ((m = m_pullup(m, sizeof(struct sadb_msg))) == 0) { +#if IPSEC_DEBUG + printf("key_output: can't pullup mbuf\n"); +#endif + pfkeystat.out_nomem++; + error = ENOBUFS; + goto end; + } + } + + if ((m->m_flags & M_PKTHDR) == 0) + panic("key_output: not M_PKTHDR ??"); + +#if defined(IPSEC_DEBUG) + KEYDEBUG(KEYDEBUG_KEY_DUMP, kdebug_mbuf(m)); +#endif /* defined(IPSEC_DEBUG) */ + + msg = mtod(m, struct sadb_msg *); + pfkeystat.out_msgtype[msg->sadb_msg_type]++; + if (len != PFKEY_UNUNIT64(msg->sadb_msg_len)) { +#if IPSEC_DEBUG + printf("key_output: Invalid message length.\n"); +#endif + pfkeystat.out_invlen++; + error = EINVAL; + goto end; + } + + /* + * allocate memory for sadb_msg, and copy to sadb_msg from mbuf + * XXX: To be processed directly without a copy. + */ + msg = (struct sadb_msg *)_MALLOC(len, M_SECA, M_NOWAIT); + if (msg == NULL) { +#if IPSEC_DEBUG + printf("key_output: No more memory.\n"); +#endif + error = ENOBUFS; + pfkeystat.out_nomem++; + goto end; + /* or do panic ? */ + } + m_copydata(m, 0, len, (caddr_t)msg); + + /*XXX giant lock*/ +#ifdef __NetBSD__ + s = splsoftnet(); +#else + s = splnet(); +#endif + if ((len = key_parse(&msg, so, &target)) == 0) { + /* discard. i.e. no need to reply. */ + /* msg has been freed at key_parse() */ + error = 0; + splx(s); + goto end; + } + + /* send up message to the socket */ + error = key_sendup(so, msg, len, target); + splx(s); + _FREE(msg, M_SECA); +end: + m_freem(m); + return (error); +} + +/* + * send message to the socket. + */ +static int +key_sendup0(rp, m, promisc) + struct rawcb *rp; + struct mbuf *m; + int promisc; +{ + if (promisc) { + struct sadb_msg *pmsg; + + M_PREPEND(m, sizeof(struct sadb_msg), M_NOWAIT); + if (m && m->m_len < sizeof(struct sadb_msg)) + m = m_pullup(m, sizeof(struct sadb_msg)); + if (!m) { +#if IPSEC_DEBUG + printf("key_sendup0: cannot pullup\n"); +#endif + pfkeystat.in_nomem++; + m_freem(m); + return ENOBUFS; + } + m->m_pkthdr.len += sizeof(*pmsg); + + pmsg = mtod(m, struct sadb_msg *); + bzero(pmsg, sizeof(*pmsg)); + pmsg->sadb_msg_version = PF_KEY_V2; + pmsg->sadb_msg_type = SADB_X_PROMISC; + pmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); + /* pid and seq? */ + + pfkeystat.in_msgtype[pmsg->sadb_msg_type]++; + } + + if (!sbappendaddr(&rp->rcb_socket->so_rcv, + (struct sockaddr *)&key_src, m, NULL)) { +#if IPSEC_DEBUG + printf("key_sendup0: sbappendaddr failed\n"); +#endif + pfkeystat.in_nomem++; + m_freem(m); + return ENOBUFS; + } + sorwakeup(rp->rcb_socket); + return 0; +} + +/* XXX this interface should be obsoleted. */ +int +key_sendup(so, msg, len, target) + struct socket *so; + struct sadb_msg *msg; + u_int len; + int target; /*target of the resulting message*/ +{ + struct mbuf *m, *n, *mprev; + int tlen; + + /* sanity check */ + if (so == 0 || msg == 0) + panic("key_sendup: NULL pointer was passed.\n"); + + KEYDEBUG(KEYDEBUG_KEY_DUMP, + printf("key_sendup: \n"); + kdebug_sadb(msg)); + + /* + * we increment statistics here, just in case we have ENOBUFS + * in this function. + */ + pfkeystat.in_total++; + pfkeystat.in_bytes += len; + pfkeystat.in_msgtype[msg->sadb_msg_type]++; + + /* + * Get mbuf chain whenever possible (not clusters), + * to save socket buffer. We'll be generating many SADB_ACQUIRE + * messages to listening key sockets. If we simmply allocate clusters, + * sbappendaddr() will raise ENOBUFS due to too little sbspace(). + * sbspace() computes # of actual data bytes AND mbuf region. + * + * TODO: SADB_ACQUIRE filters should be implemented. + */ + tlen = len; + m = mprev = NULL; + while (tlen > 0) { + if (tlen == len) { + MGETHDR(n, M_DONTWAIT, MT_DATA); + n->m_len = MHLEN; + } else { + MGET(n, M_DONTWAIT, MT_DATA); + n->m_len = MLEN; + } + if (!n) { + pfkeystat.in_nomem++; + return ENOBUFS; + } + if (tlen >= MCLBYTES) { /*XXX better threshold? */ + MCLGET(n, M_DONTWAIT); + if ((n->m_flags & M_EXT) == 0) { + m_free(n); + m_freem(m); + pfkeystat.in_nomem++; + return ENOBUFS; + } + n->m_len = MCLBYTES; + } + + if (tlen < n->m_len) + n->m_len = tlen; + n->m_next = NULL; + if (m == NULL) + m = mprev = n; + else { + mprev->m_next = n; + mprev = n; + } + tlen -= n->m_len; + n = NULL; + } + m->m_pkthdr.len = len; + m->m_pkthdr.rcvif = NULL; + m_copyback(m, 0, len, (caddr_t)msg); + + /* avoid duplicated statistics */ + pfkeystat.in_total--; + pfkeystat.in_bytes -= len; + pfkeystat.in_msgtype[msg->sadb_msg_type]--; + + return key_sendup_mbuf(so, m, target); +} + +int +key_sendup_mbuf(so, m, target) + struct socket *so; + struct mbuf *m; + int target; +{ + struct mbuf *n; + struct keycb *kp; + int sendup; + struct rawcb *rp; + int error; + + if (so == NULL || m == NULL) + panic("key_sendup_mbuf: NULL pointer was passed.\n"); + + pfkeystat.in_total++; + pfkeystat.in_bytes += m->m_pkthdr.len; + if (m->m_len < sizeof(struct sadb_msg)) { +#if 1 + m = m_pullup(m, sizeof(struct sadb_msg)); + if (m == NULL) { + pfkeystat.in_nomem++; + return ENOBUFS; + } +#else + /* don't bother pulling it up just for stats */ +#endif + } + if (m->m_len >= sizeof(struct sadb_msg)) { + struct sadb_msg *msg; + msg = mtod(m, struct sadb_msg *); + pfkeystat.in_msgtype[msg->sadb_msg_type]++; + } + +#ifdef __NetBSD__ + for (rp = rawcb.lh_first; rp; rp = rp->rcb_list.le_next) +#elif defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) + LIST_FOREACH(rp, &rawcb_list, list) +#else + for (rp = rawcb.rcb_next; rp != &rawcb; rp = rp->rcb_next) +#endif + { + if (rp->rcb_proto.sp_family != PF_KEY) + continue; + if (rp->rcb_proto.sp_protocol + && rp->rcb_proto.sp_protocol != PF_KEY_V2) { + continue; + } + + kp = (struct keycb *)rp; + + /* + * If you are in promiscuous mode, and when you get broadcasted + * reply, you'll get two PF_KEY messages. + * (based on pf_key@inner.net message on 14 Oct 1998) + */ + if (((struct keycb *)rp)->kp_promisc) { + if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) { + (void)key_sendup0(rp, n, 1); + n = NULL; + } + } + + /* the exact target will be processed later */ + if (sotorawcb(so) == rp) + continue; + + sendup = 0; + switch (target) { + case KEY_SENDUP_ONE: + /* the statement has no effect */ + if (sotorawcb(so) == rp) + sendup++; + break; + case KEY_SENDUP_ALL: + sendup++; + break; + case KEY_SENDUP_REGISTERED: + if (kp->kp_registered) + sendup++; + break; + } + pfkeystat.in_msgtarget[target]++; + + if (!sendup) + continue; + + if ((n = m_copy(m, 0, (int)M_COPYALL)) == NULL) { +#if IPSEC_DEBUG + printf("key_sendup: m_copy fail\n"); +#endif + m_freem(m); + pfkeystat.in_nomem++; + return ENOBUFS; + } + + if ((error = key_sendup0(rp, n, 0)) != 0) { + m_freem(m); + return error; + } + + n = NULL; + } + + error = key_sendup0(sotorawcb(so), m, 0); + m = NULL; + return error; +} + +#if defined(__FreeBSD__) && __FreeBSD__ >= 3 || defined (__APPLE__) +/* + * key_abort() + * derived from net/rtsock.c:rts_abort() + */ +static int +key_abort(struct socket *so) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_abort(so); + splx(s); + return error; +} + +/* + * key_attach() + * derived from net/rtsock.c:rts_attach() + */ +static int +key_attach(struct socket *so, int proto, struct proc *p) +{ + struct keycb *kp; + int s, error; + + if (sotorawcb(so) != 0) + return EISCONN; /* XXX panic? */ + kp = (struct keycb *)_MALLOC(sizeof *kp, M_PCB, M_WAITOK); /* XXX */ + if (kp == 0) + return ENOBUFS; + bzero(kp, sizeof *kp); + + /* + * The splnet() is necessary to block protocols from sending + * error notifications (like RTM_REDIRECT or RTM_LOSING) while + * this PCB is extant but incompletely initialized. + * Probably we should try to do more of this work beforehand and + * eliminate the spl. + */ + s = splnet(); + so->so_pcb = (caddr_t)kp; + error = raw_usrreqs.pru_attach(so, proto, p); + kp = (struct keycb *)sotorawcb(so); + if (error) { + _FREE(kp, M_PCB); + so->so_pcb = (caddr_t) 0; + splx(s); + printf("key_usrreq: key_usrreq results %d\n", error); + return error; + } + + kp->kp_promisc = kp->kp_registered = 0; + + if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) /* XXX: AF_KEY */ + key_cb.key_count++; + key_cb.any_count++; + kp->kp_raw.rcb_laddr = &key_src; + kp->kp_raw.rcb_faddr = &key_dst; + soisconnected(so); + so->so_options |= SO_USELOOPBACK; + + splx(s); + return 0; +} + +/* + * key_bind() + * derived from net/rtsock.c:rts_bind() + */ +static int +key_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_bind(so, nam, p); /* xxx just EINVAL */ + splx(s); + return error; +} + +/* + * key_connect() + * derived from net/rtsock.c:rts_connect() + */ +static int +key_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_connect(so, nam, p); /* XXX just EINVAL */ + splx(s); + return error; +} + +/* + * key_detach() + * derived from net/rtsock.c:rts_detach() + */ +static int +key_detach(struct socket *so) +{ + struct keycb *kp = (struct keycb *)sotorawcb(so); + int s, error; + + s = splnet(); + if (kp != 0) { + if (kp->kp_raw.rcb_proto.sp_protocol + == PF_KEY) /* XXX: AF_KEY */ + key_cb.key_count--; + key_cb.any_count--; + + key_freereg(so); + } + error = raw_usrreqs.pru_detach(so); + splx(s); + return error; +} + +/* + * key_disconnect() + * derived from net/rtsock.c:key_disconnect() + */ +static int +key_disconnect(struct socket *so) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_disconnect(so); + splx(s); + return error; +} + +/* + * key_peeraddr() + * derived from net/rtsock.c:rts_peeraddr() + */ +static int +key_peeraddr(struct socket *so, struct sockaddr **nam) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_peeraddr(so, nam); + splx(s); + return error; +} + +/* + * key_send() + * derived from net/rtsock.c:rts_send() + */ +static int +key_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, + struct mbuf *control, struct proc *p) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_send(so, flags, m, nam, control, p); + splx(s); + return error; +} + +/* + * key_shutdown() + * derived from net/rtsock.c:rts_shutdown() + */ +static int +key_shutdown(struct socket *so) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_shutdown(so); + splx(s); + return error; +} + +/* + * key_sockaddr() + * derived from net/rtsock.c:rts_sockaddr() + */ +static int +key_sockaddr(struct socket *so, struct sockaddr **nam) +{ + int s, error; + s = splnet(); + error = raw_usrreqs.pru_sockaddr(so, nam); + splx(s); + return error; +} + +struct pr_usrreqs key_usrreqs = { + key_abort, pru_accept_notsupp, key_attach, key_bind, + key_connect, + pru_connect2_notsupp, pru_control_notsupp, key_detach, + key_disconnect, pru_listen_notsupp, key_peeraddr, + pru_rcvd_notsupp, + pru_rcvoob_notsupp, key_send, pru_sense_null, key_shutdown, + key_sockaddr, sosend, soreceive, sopoll +}; +#endif /* __FreeBSD__ >= 3 */ + +#if __FreeBSD__ || defined (__APPLE__) +/* sysctl */ +SYSCTL_NODE(_net, PF_KEY, key, CTLFLAG_RW, 0, "Key Family"); +#endif + +/* + * Definitions of protocols supported in the KEY domain. + */ + +extern struct domain keydomain; + +struct protosw keysw[] = { +{ SOCK_RAW, &keydomain, PF_KEY_V2, PR_ATOMIC|PR_ADDR, + 0, key_output, raw_ctlinput, 0, + 0, + raw_init, 0, 0, 0, + 0, &key_usrreqs +} +}; + +struct domain keydomain = + { PF_KEY, "key", key_init, 0, 0, + keysw, 0, + 0,0, + sizeof(struct key_cb), 0 + }; + +DOMAIN_SET(key); diff --git a/bsd/netkey/keysock.h b/bsd/netkey/keysock.h new file mode 100644 index 000000000..051f059c9 --- /dev/null +++ b/bsd/netkey/keysock.h @@ -0,0 +1,86 @@ +/* $KAME: keysock.h,v 1.7 2000/03/25 07:24:14 sumikawa Exp $ */ + +/* + * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _NETKEY_KEYSOCK_H_ +#define _NETKEY_KEYSOCK_H_ + +/* statistics for pfkey socket */ +struct pfkeystat { + /* kernel -> userland */ + u_quad_t out_total; /* # of total calls */ + u_quad_t out_bytes; /* total bytecount */ + u_quad_t out_msgtype[256]; /* message type histogram */ + u_quad_t out_invlen; /* invalid length field */ + u_quad_t out_invver; /* invalid version field */ + u_quad_t out_invmsgtype; /* invalid message type field */ + u_quad_t out_tooshort; /* msg too short */ + u_quad_t out_nomem; /* memory allocation failure */ + u_quad_t out_dupext; /* duplicate extension */ + u_quad_t out_invexttype; /* invalid extension type */ + u_quad_t out_invsatype; /* invalid sa type */ + u_quad_t out_invaddr; /* invalid address extension */ + /* userland -> kernel */ + u_quad_t in_total; /* # of total calls */ + u_quad_t in_bytes; /* total bytecount */ + u_quad_t in_msgtype[256]; /* message type histogram */ + u_quad_t in_msgtarget[3]; /* one/all/registered */ + u_quad_t in_nomem; /* memory allocation failure */ + /* others */ + u_quad_t sockerr; /* # of socket related errors */ +}; + +#define KEY_SENDUP_ONE 0 +#define KEY_SENDUP_ALL 1 +#define KEY_SENDUP_REGISTERED 2 + +#ifdef KERNEL +struct keycb { + struct rawcb kp_raw; /* rawcb */ + int kp_promisc; /* promiscuous mode */ + int kp_registered; /* registered socket */ +}; + +extern struct pfkeystat pfkeystat; + +extern int key_output __P((struct mbuf *, struct socket *)); +#ifndef __NetBSD__ +extern int key_usrreq __P((struct socket *, + int, struct mbuf *, struct mbuf *, struct mbuf *)); +#else +extern int key_usrreq __P((struct socket *, + int, struct mbuf *, struct mbuf *, struct mbuf *, struct proc *)); +#endif + +extern int key_sendup __P((struct socket *, struct sadb_msg *, u_int, int)); +extern int key_sendup_mbuf __P((struct socket *, struct mbuf *, int)); +#endif /* KERNEL */ + +#endif /*_NETKEY_KEYSOCK_H_*/ diff --git a/bsd/netkey/keyv2.h b/bsd/netkey/keyv2.h new file mode 100644 index 000000000..35db59d10 --- /dev/null +++ b/bsd/netkey/keyv2.h @@ -0,0 +1,4 @@ +/* $KAME$ */ + +/* to be nuked shortly */ +#error "obsolete include file, include net/pfkeyv2.h instead" diff --git a/bsd/netns/Makefile b/bsd/netns/Makefile new file mode 100644 index 000000000..c9a2a65c4 --- /dev/null +++ b/bsd/netns/Makefile @@ -0,0 +1,38 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + idp.h idp_var.h ns.h ns_error.h ns_if.h ns_pcb.h \ + sp.h spidp.h spp_debug.h spp_timer.h spp_var.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = netns + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = netns + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/netns/idp.h b/bsd/netns/idp.h new file mode 100644 index 000000000..e4cd666ad --- /dev/null +++ b/bsd/netns/idp.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)idp.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Definitions for NS(tm) Internet Datagram Protocol + */ +struct idp { + u_short idp_sum; /* Checksum */ + u_short idp_len; /* Length, in bytes, including header */ + u_char idp_tc; /* Transport Crontrol (i.e. hop count) */ + u_char idp_pt; /* Packet Type (i.e. level 2 protocol) */ + struct ns_addr idp_dna; /* Destination Network Address */ + struct ns_addr idp_sna; /* Source Network Address */ +}; diff --git a/bsd/netns/idp_usrreq.c b/bsd/netns/idp_usrreq.c new file mode 100644 index 000000000..7814c0cc3 --- /dev/null +++ b/bsd/netns/idp_usrreq.c @@ -0,0 +1,587 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)idp_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * IDP protocol implementation. + */ + +struct sockaddr_ns idp_ns = { sizeof(idp_ns), AF_NS }; + +/* + * This may also be called for raw listeners. + */ +idp_input(m, nsp) + struct mbuf *m; + register struct nspcb *nsp; +{ + register struct idp *idp = mtod(m, struct idp *); + struct ifnet *ifp = m->m_pkthdr.rcvif; + + if (nsp==0) + panic("No nspcb"); + /* + * Construct sockaddr format source address. + * Stuff source address and datagram in user buffer. + */ + idp_ns.sns_addr = idp->idp_sna; + if (ns_neteqnn(idp->idp_sna.x_net, ns_zeronet) && ifp) { + register struct ifaddr *ifa; + + for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family == AF_NS) { + idp_ns.sns_addr.x_net = + IA_SNS(ifa)->sns_addr.x_net; + break; + } + } + } + nsp->nsp_rpt = idp->idp_pt; + if ( ! (nsp->nsp_flags & NSP_RAWIN) ) { + m->m_len -= sizeof (struct idp); + m->m_pkthdr.len -= sizeof (struct idp); + m->m_data += sizeof (struct idp); + } + if (sbappendaddr(&nsp->nsp_socket->so_rcv, (struct sockaddr *)&idp_ns, + m, (struct mbuf *)0) == 0) + goto bad; + sorwakeup(nsp->nsp_socket); + return; +bad: + m_freem(m); +} + +idp_abort(nsp) + struct nspcb *nsp; +{ + struct socket *so = nsp->nsp_socket; + + ns_pcbdisconnect(nsp); + soisdisconnected(so); +} +/* + * Drop connection, reporting + * the specified error. + */ +struct nspcb * +idp_drop(nsp, errno) + register struct nspcb *nsp; + int errno; +{ + struct socket *so = nsp->nsp_socket; + + /* + * someday, in the xerox world + * we will generate error protocol packets + * announcing that the socket has gone away. + */ + /*if (TCPS_HAVERCVDSYN(tp->t_state)) { + tp->t_state = TCPS_CLOSED; + (void) tcp_output(tp); + }*/ + so->so_error = errno; + ns_pcbdisconnect(nsp); + soisdisconnected(so); +} + +int noIdpRoute; +idp_output(nsp, m0) + struct nspcb *nsp; + struct mbuf *m0; +{ + register struct mbuf *m; + register struct idp *idp; + register struct socket *so; + register int len = 0; + register struct route *ro; + struct mbuf *mprev; + extern int idpcksum; + + /* + * Calculate data length. + */ + for (m = m0; m; m = m->m_next) { + mprev = m; + len += m->m_len; + } + /* + * Make sure packet is actually of even length. + */ + + if (len & 1) { + m = mprev; + if ((m->m_flags & M_EXT) == 0 && + (m->m_len + m->m_data < &m->m_dat[MLEN])) { + m->m_len++; + } else { + struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA); + + if (m1 == 0) { + m_freem(m0); + return (ENOBUFS); + } + m1->m_len = 1; + * mtod(m1, char *) = 0; + m->m_next = m1; + } + m0->m_pkthdr.len++; + } + + /* + * Fill in mbuf with extended IDP header + * and addresses and length put into network format. + */ + m = m0; + if (nsp->nsp_flags & NSP_RAWOUT) { + idp = mtod(m, struct idp *); + } else { + M_PREPEND(m, sizeof (struct idp), M_DONTWAIT); + if (m == 0) + return (ENOBUFS); + idp = mtod(m, struct idp *); + idp->idp_tc = 0; + idp->idp_pt = nsp->nsp_dpt; + idp->idp_sna = nsp->nsp_laddr; + idp->idp_dna = nsp->nsp_faddr; + len += sizeof (struct idp); + } + + idp->idp_len = htons((u_short)len); + + if (idpcksum) { + idp->idp_sum = 0; + len = ((len - 1) | 1) + 1; + idp->idp_sum = ns_cksum(m, len); + } else + idp->idp_sum = 0xffff; + + /* + * Output datagram. + */ + so = nsp->nsp_socket; + if (so->so_options & SO_DONTROUTE) + return (ns_output(m, (struct route *)0, + (so->so_options & SO_BROADCAST) | NS_ROUTETOIF)); + /* + * Use cached route for previous datagram if + * possible. If the previous net was the same + * and the interface was a broadcast medium, or + * if the previous destination was identical, + * then we are ok. + * + * NB: We don't handle broadcasts because that + * would require 3 subroutine calls. + */ + ro = &nsp->nsp_route; +#ifdef ancient_history + /* + * I think that this will all be handled in ns_pcbconnect! + */ + if (ro->ro_rt) { + if(ns_neteq(nsp->nsp_lastdst, idp->idp_dna)) { + /* + * This assumes we have no GH type routes + */ + if (ro->ro_rt->rt_flags & RTF_HOST) { + if (!ns_hosteq(nsp->nsp_lastdst, idp->idp_dna)) + goto re_route; + + } + if ((ro->ro_rt->rt_flags & RTF_GATEWAY) == 0) { + register struct ns_addr *dst = + &satons_addr(ro->ro_dst); + dst->x_host = idp->idp_dna.x_host; + } + /* + * Otherwise, we go through the same gateway + * and dst is already set up. + */ + } else { + re_route: + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + } + } + nsp->nsp_lastdst = idp->idp_dna; +#endif /* ancient_history */ + if (noIdpRoute) ro = 0; + return (ns_output(m, ro, so->so_options & SO_BROADCAST)); +} +/* ARGSUSED */ +idp_ctloutput(req, so, level, name, value) + int req, level; + struct socket *so; + int name; + struct mbuf **value; +{ + register struct mbuf *m; + struct nspcb *nsp = sotonspcb(so); + int mask, error = 0; + extern long ns_pexseq; + + if (nsp == NULL) + return (EINVAL); + + switch (req) { + + case PRCO_GETOPT: + if (value==NULL) + return (EINVAL); + m = m_get(M_DONTWAIT, MT_DATA); + if (m==NULL) + return (ENOBUFS); + switch (name) { + + case SO_ALL_PACKETS: + mask = NSP_ALL_PACKETS; + goto get_flags; + + case SO_HEADERS_ON_INPUT: + mask = NSP_RAWIN; + goto get_flags; + + case SO_HEADERS_ON_OUTPUT: + mask = NSP_RAWOUT; + get_flags: + m->m_len = sizeof(short); + *mtod(m, short *) = nsp->nsp_flags & mask; + break; + + case SO_DEFAULT_HEADERS: + m->m_len = sizeof(struct idp); + { + register struct idp *idp = mtod(m, struct idp *); + idp->idp_len = 0; + idp->idp_sum = 0; + idp->idp_tc = 0; + idp->idp_pt = nsp->nsp_dpt; + idp->idp_dna = nsp->nsp_faddr; + idp->idp_sna = nsp->nsp_laddr; + } + break; + + case SO_SEQNO: + m->m_len = sizeof(long); + *mtod(m, long *) = ns_pexseq++; + break; + + default: + error = EINVAL; + } + *value = m; + break; + + case PRCO_SETOPT: + switch (name) { + int *ok; + + case SO_ALL_PACKETS: + mask = NSP_ALL_PACKETS; + goto set_head; + + case SO_HEADERS_ON_INPUT: + mask = NSP_RAWIN; + goto set_head; + + case SO_HEADERS_ON_OUTPUT: + mask = NSP_RAWOUT; + set_head: + if (value && *value) { + ok = mtod(*value, int *); + if (*ok) + nsp->nsp_flags |= mask; + else + nsp->nsp_flags &= ~mask; + } else error = EINVAL; + break; + + case SO_DEFAULT_HEADERS: + { + register struct idp *idp + = mtod(*value, struct idp *); + nsp->nsp_dpt = idp->idp_pt; + } + break; +#ifdef NSIP + + case SO_NSIP_ROUTE: + error = nsip_route(*value); + break; +#endif /* NSIP */ + default: + error = EINVAL; + } + if (value && *value) + m_freem(*value); + break; + } + return (error); +} + +/*ARGSUSED*/ +idp_usrreq(so, req, m, nam, control) + struct socket *so; + int req; + struct mbuf *m, *nam, *control; +{ + struct nspcb *nsp = sotonspcb(so); + int error = 0; + + if (req == PRU_CONTROL) + return (ns_control(so, (int)m, (caddr_t)nam, + (struct ifnet *)control)); + if (control && control->m_len) { + error = EINVAL; + goto release; + } + if (nsp == NULL && req != PRU_ATTACH) { + error = EINVAL; + goto release; + } + switch (req) { + + case PRU_ATTACH: + if (nsp != NULL) { + error = EINVAL; + break; + } + error = ns_pcballoc(so, &nspcb); + if (error) + break; + error = soreserve(so, (u_long) 2048, (u_long) 2048); + if (error) + break; + break; + + case PRU_DETACH: + if (nsp == NULL) { + error = ENOTCONN; + break; + } + ns_pcbdetach(nsp); + break; + + case PRU_BIND: + error = ns_pcbbind(nsp, nam); + break; + + case PRU_LISTEN: + error = EOPNOTSUPP; + break; + + case PRU_CONNECT: + if (!ns_nullhost(nsp->nsp_faddr)) { + error = EISCONN; + break; + } + error = ns_pcbconnect(nsp, nam); + if (error == 0) + soisconnected(so); + break; + + case PRU_CONNECT2: + error = EOPNOTSUPP; + break; + + case PRU_ACCEPT: + error = EOPNOTSUPP; + break; + + case PRU_DISCONNECT: + if (ns_nullhost(nsp->nsp_faddr)) { + error = ENOTCONN; + break; + } + ns_pcbdisconnect(nsp); + soisdisconnected(so); + break; + + case PRU_SHUTDOWN: + socantsendmore(so); + break; + + case PRU_SEND: + { + struct ns_addr laddr; + int s; + + if (nam) { + laddr = nsp->nsp_laddr; + if (!ns_nullhost(nsp->nsp_faddr)) { + error = EISCONN; + break; + } + /* + * Must block input while temporarily connected. + */ + s = splnet(); + error = ns_pcbconnect(nsp, nam); + if (error) { + splx(s); + break; + } + } else { + if (ns_nullhost(nsp->nsp_faddr)) { + error = ENOTCONN; + break; + } + } + error = idp_output(nsp, m); + m = NULL; + if (nam) { + ns_pcbdisconnect(nsp); + splx(s); + nsp->nsp_laddr.x_host = laddr.x_host; + nsp->nsp_laddr.x_port = laddr.x_port; + } + } + break; + + case PRU_ABORT: + ns_pcbdetach(nsp); + sofree(so); + soisdisconnected(so); + break; + + case PRU_SOCKADDR: + ns_setsockaddr(nsp, nam); + break; + + case PRU_PEERADDR: + ns_setpeeraddr(nsp, nam); + break; + + case PRU_SENSE: + /* + * stat: don't bother with a blocksize. + */ + return (0); + + case PRU_SENDOOB: + case PRU_FASTTIMO: + case PRU_SLOWTIMO: + case PRU_PROTORCV: + case PRU_PROTOSEND: + error = EOPNOTSUPP; + break; + + case PRU_CONTROL: + case PRU_RCVD: + case PRU_RCVOOB: + return (EOPNOTSUPP); /* do not free mbuf's */ + + default: + panic("idp_usrreq"); + } +release: + if (control != NULL) + m_freem(control); + if (m != NULL) + m_freem(m); + return (error); +} +/*ARGSUSED*/ +idp_raw_usrreq(so, req, m, nam, control) + struct socket *so; + int req; + struct mbuf *m, *nam, *control; +{ + int error = 0; + struct nspcb *nsp = sotonspcb(so); + extern struct nspcb nsrawpcb; + + switch (req) { + + case PRU_ATTACH: + + if (!(so->so_state & SS_PRIV) || (nsp != NULL)) { + error = EINVAL; + break; + } + error = ns_pcballoc(so, &nsrawpcb); + if (error) + break; + error = soreserve(so, (u_long) 2048, (u_long) 2048); + if (error) + break; + nsp = sotonspcb(so); + nsp->nsp_faddr.x_host = ns_broadhost; + nsp->nsp_flags = NSP_RAWIN | NSP_RAWOUT; + break; + default: + error = idp_usrreq(so, req, m, nam, control); + } + return (error); +} + diff --git a/bsd/netns/idp_var.h b/bsd/netns/idp_var.h new file mode 100644 index 000000000..d04ad07c7 --- /dev/null +++ b/bsd/netns/idp_var.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)idp_var.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * IDP Kernel Structures and Variables + */ +struct idpstat { + int idps_badsum; /* checksum bad */ + int idps_tooshort; /* packet too short */ + int idps_toosmall; /* not enough data */ + int idps_badhlen; /* ip header length < data size */ + int idps_badlen; /* ip length < ip header length */ +}; + +#ifdef KERNEL +struct idpstat idpstat; +#endif diff --git a/bsd/netns/ns.c b/bsd/netns/ns.c new file mode 100644 index 000000000..12688f479 --- /dev/null +++ b/bsd/netns/ns.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns.c 8.2 (Berkeley) 11/15/93 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#ifdef NS + +struct ns_ifaddr *ns_ifaddr; +int ns_interfaces; +extern struct sockaddr_ns ns_netmask, ns_hostmask; + +/* + * Generic internet control operations (ioctl's). + */ +/* ARGSUSED */ +ns_control(so, cmd, data, ifp) + struct socket *so; + int cmd; + caddr_t data; + register struct ifnet *ifp; +{ + register struct ifreq *ifr = (struct ifreq *)data; + register struct ns_aliasreq *ifra = (struct ns_aliasreq *)data; + register struct ns_ifaddr *ia; + struct ifaddr *ifa; + struct ns_ifaddr *oia; + int error, dstIsNew, hostIsNew; + + /* + * Find address for this interface, if it exists. + */ + if (ifp == 0) + return (EADDRNOTAVAIL); + for (ia = ns_ifaddr; ia; ia = ia->ia_next) + if (ia->ia_ifp == ifp) + break; + + switch (cmd) { + + case SIOCGIFADDR: + if (ia == (struct ns_ifaddr *)0) + return (EADDRNOTAVAIL); + *(struct sockaddr_ns *)&ifr->ifr_addr = ia->ia_addr; + return (0); + + + case SIOCGIFBRDADDR: + if (ia == (struct ns_ifaddr *)0) + return (EADDRNOTAVAIL); + if ((ifp->if_flags & IFF_BROADCAST) == 0) + return (EINVAL); + *(struct sockaddr_ns *)&ifr->ifr_dstaddr = ia->ia_broadaddr; + return (0); + + case SIOCGIFDSTADDR: + if (ia == (struct ns_ifaddr *)0) + return (EADDRNOTAVAIL); + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + return (EINVAL); + *(struct sockaddr_ns *)&ifr->ifr_dstaddr = ia->ia_dstaddr; + return (0); + } + + if ((so->so_state & SS_PRIV) == 0) + return (EPERM); + + switch (cmd) { + case SIOCAIFADDR: + case SIOCDIFADDR: + if (ifra->ifra_addr.sns_family == AF_NS) + for (oia = ia; ia; ia = ia->ia_next) { + if (ia->ia_ifp == ifp && + ns_neteq(ia->ia_addr.sns_addr, + ifra->ifra_addr.sns_addr)) + break; + } + if (cmd == SIOCDIFADDR && ia == 0) + return (EADDRNOTAVAIL); + /* FALLTHROUGH */ + + case SIOCSIFADDR: + case SIOCSIFDSTADDR: + if (ia == (struct ns_ifaddr *)0) { +// oia = (struct ns_ifaddr *) +// malloc(sizeof *ia, M_IFADDR, M_WAITOK); + MALLOC(oia, struct ns_ifaddr *, sizeof *ia, M_IFADDR, M_WAITOK); + if (oia == (struct ns_ifaddr *)NULL) + return (ENOBUFS); + bzero((caddr_t)oia, sizeof(*oia)); + if (ia = ns_ifaddr) { + for ( ; ia->ia_next; ia = ia->ia_next) + ; + ia->ia_next = oia; + } else + ns_ifaddr = oia; + ia = oia; + if (ifa = ifp->if_addrlist) { + for ( ; ifa->ifa_next; ifa = ifa->ifa_next) + ; + ifa->ifa_next = (struct ifaddr *) ia; + } else + ifp->if_addrlist = (struct ifaddr *) ia; + ia->ia_ifp = ifp; + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + + ia->ia_ifa.ifa_netmask = + (struct sockaddr *)&ns_netmask; + + ia->ia_ifa.ifa_dstaddr = + (struct sockaddr *)&ia->ia_dstaddr; + if (ifp->if_flags & IFF_BROADCAST) { + ia->ia_broadaddr.sns_family = AF_NS; + ia->ia_broadaddr.sns_len = sizeof(ia->ia_addr); + ia->ia_broadaddr.sns_addr.x_host = ns_broadhost; + } + ns_interfaces++; + } + } + + switch (cmd) { + int error; + + case SIOCSIFDSTADDR: + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + return (EINVAL); + if (ia->ia_flags & IFA_ROUTE) { + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + ia->ia_flags &= ~IFA_ROUTE; + } + if (ifp->if_ioctl) { + error = (*ifp->if_ioctl)(ifp, SIOCSIFDSTADDR, ia); + if (error) + return (error); + } + *(struct sockaddr *)&ia->ia_dstaddr = ifr->ifr_dstaddr; + return (0); + + case SIOCSIFADDR: + return (ns_ifinit(ifp, ia, + (struct sockaddr_ns *)&ifr->ifr_addr, 1)); + + case SIOCDIFADDR: + ns_ifscrub(ifp, ia); + if ((ifa = ifp->if_addrlist) == (struct ifaddr *)ia) + ifp->if_addrlist = ifa->ifa_next; + else { + while (ifa->ifa_next && + (ifa->ifa_next != (struct ifaddr *)ia)) + ifa = ifa->ifa_next; + if (ifa->ifa_next) + ifa->ifa_next = ((struct ifaddr *)ia)->ifa_next; + else + printf("Couldn't unlink nsifaddr from ifp\n"); + } + oia = ia; + if (oia == (ia = ns_ifaddr)) { + ns_ifaddr = ia->ia_next; + } else { + while (ia->ia_next && (ia->ia_next != oia)) { + ia = ia->ia_next; + } + if (ia->ia_next) + ia->ia_next = oia->ia_next; + else + printf("Didn't unlink nsifadr from list\n"); + } + IFAFREE((&oia->ia_ifa)); + if (0 == --ns_interfaces) { + /* + * We reset to virginity and start all over again + */ + ns_thishost = ns_zerohost; + } + return (0); + + case SIOCAIFADDR: + dstIsNew = 0; hostIsNew = 1; + if (ia->ia_addr.sns_family == AF_NS) { + if (ifra->ifra_addr.sns_len == 0) { + ifra->ifra_addr = ia->ia_addr; + hostIsNew = 0; + } else if (ns_neteq(ifra->ifra_addr.sns_addr, + ia->ia_addr.sns_addr)) + hostIsNew = 0; + } + if ((ifp->if_flags & IFF_POINTOPOINT) && + (ifra->ifra_dstaddr.sns_family == AF_NS)) { + if (hostIsNew == 0) + ns_ifscrub(ifp, ia); + ia->ia_dstaddr = ifra->ifra_dstaddr; + dstIsNew = 1; + } + if (ifra->ifra_addr.sns_family == AF_NS && + (hostIsNew || dstIsNew)) + error = ns_ifinit(ifp, ia, &ifra->ifra_addr, 0); + return (error); + + default: + if (ifp->if_ioctl == 0) + return (EOPNOTSUPP); + return ((*ifp->if_ioctl)(ifp, cmd, data)); + } +} + +/* +* Delete any previous route for an old address. +*/ +ns_ifscrub(ifp, ia) + register struct ifnet *ifp; + register struct ns_ifaddr *ia; +{ + if (ia->ia_flags & IFA_ROUTE) { + if (ifp->if_flags & IFF_POINTOPOINT) { + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); + } else + rtinit(&(ia->ia_ifa), (int)RTM_DELETE, 0); + ia->ia_flags &= ~IFA_ROUTE; + } +} +/* + * Initialize an interface's internet address + * and routing table entry. + */ +ns_ifinit(ifp, ia, sns, scrub) + register struct ifnet *ifp; + register struct ns_ifaddr *ia; + register struct sockaddr_ns *sns; +{ + struct sockaddr_ns oldaddr; + register union ns_host *h = &ia->ia_addr.sns_addr.x_host; + int s = splimp(), error; + + /* + * Set up new addresses. + */ + oldaddr = ia->ia_addr; + ia->ia_addr = *sns; + /* + * The convention we shall adopt for naming is that + * a supplied address of zero means that "we don't care". + * if there is a single interface, use the address of that + * interface as our 6 byte host address. + * if there are multiple interfaces, use any address already + * used. + * + * Give the interface a chance to initialize + * if this is its first address, + * and to validate the address if necessary. + */ + if (ns_hosteqnh(ns_thishost, ns_zerohost)) { + if (ifp->if_ioctl && + (error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, ia))) { + ia->ia_addr = oldaddr; + splx(s); + return (error); + } + ns_thishost = *h; + } else if (ns_hosteqnh(sns->sns_addr.x_host, ns_zerohost) + || ns_hosteqnh(sns->sns_addr.x_host, ns_thishost)) { + *h = ns_thishost; + if (ifp->if_ioctl && + (error = (*ifp->if_ioctl)(ifp, SIOCSIFADDR, ia))) { + ia->ia_addr = oldaddr; + splx(s); + return (error); + } + if (!ns_hosteqnh(ns_thishost,*h)) { + ia->ia_addr = oldaddr; + splx(s); + return (EINVAL); + } + } else { + ia->ia_addr = oldaddr; + splx(s); + return (EINVAL); + } + ia->ia_ifa.ifa_metric = ifp->if_metric; + /* + * Add route for the network. + */ + if (scrub) { + ia->ia_ifa.ifa_addr = (struct sockaddr *)&oldaddr; + ns_ifscrub(ifp, ia); + ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; + } + if (ifp->if_flags & IFF_POINTOPOINT) + rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_HOST|RTF_UP); + else { + ia->ia_broadaddr.sns_addr.x_net = ia->ia_addr.sns_addr.x_net; + rtinit(&(ia->ia_ifa), (int)RTM_ADD, RTF_UP); + } + ia->ia_flags |= IFA_ROUTE; + return (0); +} + +/* + * Return address info for specified internet network. + */ +struct ns_ifaddr * +ns_iaonnetof(dst) + register struct ns_addr *dst; +{ + register struct ns_ifaddr *ia; + register struct ns_addr *compare; + register struct ifnet *ifp; + struct ns_ifaddr *ia_maybe = 0; + union ns_net net = dst->x_net; + + for (ia = ns_ifaddr; ia; ia = ia->ia_next) { + if (ifp = ia->ia_ifp) { + if (ifp->if_flags & IFF_POINTOPOINT) { + compare = &satons_addr(ia->ia_dstaddr); + if (ns_hosteq(*dst, *compare)) + return (ia); + if (ns_neteqnn(net, ia->ia_addr.sns_addr.x_net)) + ia_maybe = ia; + } else { + if (ns_neteqnn(net, ia->ia_addr.sns_addr.x_net)) + return (ia); + } + } + } + return (ia_maybe); +} +#endif diff --git a/bsd/netns/ns.h b/bsd/netns/ns.h new file mode 100644 index 000000000..acc1a9bca --- /dev/null +++ b/bsd/netns/ns.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Constants and Structures defined by the Xerox Network Software + * per "Internet Transport Protocols", XSIS 028112, December 1981 + */ + +/* + * Protocols + */ +#define NSPROTO_RI 1 /* Routing Information */ +#define NSPROTO_ECHO 2 /* Echo Protocol */ +#define NSPROTO_ERROR 3 /* Error Protocol */ +#define NSPROTO_PE 4 /* Packet Exchange */ +#define NSPROTO_SPP 5 /* Sequenced Packet */ +#define NSPROTO_RAW 255 /* Placemarker*/ +#define NSPROTO_MAX 256 /* Placemarker*/ + + +/* + * Port/Socket numbers: network standard functions + */ + +#define NSPORT_RI 1 /* Routing Information */ +#define NSPORT_ECHO 2 /* Echo */ +#define NSPORT_RE 3 /* Router Error */ + +/* + * Ports < NSPORT_RESERVED are reserved for priveleged + * processes (e.g. root). + */ +#define NSPORT_RESERVED 3000 + +/* flags passed to ns_output as last parameter */ + +#define NS_FORWARDING 0x1 /* most of idp header exists */ +#define NS_ROUTETOIF 0x10 /* same as SO_DONTROUTE */ +#define NS_ALLOWBROADCAST SO_BROADCAST /* can send broadcast packets */ + +#define NS_MAXHOPS 15 + +/* flags passed to get/set socket option */ +#define SO_HEADERS_ON_INPUT 1 +#define SO_HEADERS_ON_OUTPUT 2 +#define SO_DEFAULT_HEADERS 3 +#define SO_LAST_HEADER 4 +#define SO_NSIP_ROUTE 5 +#define SO_SEQNO 6 +#define SO_ALL_PACKETS 7 +#define SO_MTU 8 + + +/* + * NS addressing + */ +union ns_host { + u_char c_host[6]; + u_short s_host[3]; +}; + +union ns_net { + u_char c_net[4]; + u_short s_net[2]; +}; + +union ns_net_u { + union ns_net net_e; + u_long long_e; +}; + +struct ns_addr { + union ns_net x_net; + union ns_host x_host; + u_short x_port; +}; + +/* + * Socket address, Xerox style + */ +struct sockaddr_ns { + u_char sns_len; + u_char sns_family; + struct ns_addr sns_addr; + char sns_zero[2]; +}; +#define sns_port sns_addr.x_port + +#ifdef vax +#define ns_netof(a) (*(long *) & ((a).x_net)) /* XXX - not needed */ +#endif +#define ns_neteqnn(a,b) (((a).s_net[0]==(b).s_net[0]) && \ + ((a).s_net[1]==(b).s_net[1])) +#define ns_neteq(a,b) ns_neteqnn((a).x_net, (b).x_net) +#define satons_addr(sa) (((struct sockaddr_ns *)&(sa))->sns_addr) +#define ns_hosteqnh(s,t) ((s).s_host[0] == (t).s_host[0] && \ + (s).s_host[1] == (t).s_host[1] && (s).s_host[2] == (t).s_host[2]) +#define ns_hosteq(s,t) (ns_hosteqnh((s).x_host,(t).x_host)) +#define ns_nullhost(x) (((x).x_host.s_host[0]==0) && \ + ((x).x_host.s_host[1]==0) && ((x).x_host.s_host[2]==0)) + +#ifdef KERNEL +extern struct domain nsdomain; +union ns_host ns_thishost; +union ns_host ns_zerohost; +union ns_host ns_broadhost; +union ns_net ns_zeronet; +union ns_net ns_broadnet; +u_short ns_cksum(); +#else + +#include + +__BEGIN_DECLS +extern struct ns_addr ns_addr __P((const char *)); +extern char *ns_ntoa __P((struct ns_addr)); +__END_DECLS + +#endif diff --git a/bsd/netns/ns_cksum.c b/bsd/netns/ns_cksum.c new file mode 100644 index 000000000..9c13af8c5 --- /dev/null +++ b/bsd/netns/ns_cksum.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_cksum.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include + +/* + * Checksum routine for Network Systems Protocol Packets (Big-Endian). + * + * This routine is very heavily used in the network + * code and should be modified for each CPU to be as fast as possible. + */ + +#define ADDCARRY(x) { if ((x) > 65535) (x) -= 65535; } +#define FOLD(x) {l_util.l = (x); (x) = l_util.s[0] + l_util.s[1]; ADDCARRY(x);} + +u_short +ns_cksum(m, len) + register struct mbuf *m; + register int len; +{ + register u_short *w; + register int sum = 0; + register int mlen = 0; + register int sum2; + + union { + u_short s[2]; + long l; + } l_util; + + for (;m && len; m = m->m_next) { + if (m->m_len == 0) + continue; + /* + * Each trip around loop adds in + * word from one mbuf segment. + */ + w = mtod(m, u_short *); + if (mlen == -1) { + /* + * There is a byte left from the last segment; + * ones-complement add it into the checksum. + */ +#if BYTE_ORDER == BIG_ENDIAN + sum += *(u_char *)w; +#else + sum += *(u_char *)w << 8; +#endif + sum += sum; + w = (u_short *)(1 + (char *)w); + mlen = m->m_len - 1; + len--; + FOLD(sum); + } else + mlen = m->m_len; + if (len < mlen) + mlen = len; + len -= mlen; + /* + * We can do a 16 bit ones complement sum using + * 32 bit arithmetic registers for adding, + * with carries from the low added + * into the high (by normal carry-chaining) + * so long as we fold back before 16 carries have occured. + */ + if (1 & (int) w) + goto uuuuglyy; +#ifndef TINY +/* -DTINY reduces the size from 1250 to 550, but slows it down by 22% */ + while ((mlen -= 32) >= 0) { + sum += w[0]; sum += sum; sum += w[1]; sum += sum; + sum += w[2]; sum += sum; sum += w[3]; sum += sum; + sum += w[4]; sum += sum; sum += w[5]; sum += sum; + sum += w[6]; sum += sum; sum += w[7]; sum += sum; + FOLD(sum); + sum += w[8]; sum += sum; sum += w[9]; sum += sum; + sum += w[10]; sum += sum; sum += w[11]; sum += sum; + sum += w[12]; sum += sum; sum += w[13]; sum += sum; + sum += w[14]; sum += sum; sum += w[15]; sum += sum; + FOLD(sum); + w += 16; + } + mlen += 32; +#endif + while ((mlen -= 8) >= 0) { + sum += w[0]; sum += sum; sum += w[1]; sum += sum; + sum += w[2]; sum += sum; sum += w[3]; sum += sum; + FOLD(sum); + w += 4; + } + mlen += 8; + while ((mlen -= 2) >= 0) { + sum += *w++; sum += sum; + } + goto commoncase; +uuuuglyy: +#if BYTE_ORDER == BIG_ENDIAN +#define ww(n) (((u_char *)w)[n + n + 1]) +#define vv(n) (((u_char *)w)[n + n]) +#else +#if BYTE_ORDER == LITTLE_ENDIAN +#define vv(n) (((u_char *)w)[n + n + 1]) +#define ww(n) (((u_char *)w)[n + n]) +#endif +#endif + sum2 = 0; +#ifndef TINY + while ((mlen -= 32) >= 0) { + sum += ww(0); sum += sum; sum += ww(1); sum += sum; + sum += ww(2); sum += sum; sum += ww(3); sum += sum; + sum += ww(4); sum += sum; sum += ww(5); sum += sum; + sum += ww(6); sum += sum; sum += ww(7); sum += sum; + FOLD(sum); + sum += ww(8); sum += sum; sum += ww(9); sum += sum; + sum += ww(10); sum += sum; sum += ww(11); sum += sum; + sum += ww(12); sum += sum; sum += ww(13); sum += sum; + sum += ww(14); sum += sum; sum += ww(15); sum += sum; + FOLD(sum); + sum2 += vv(0); sum2 += sum2; sum2 += vv(1); sum2 += sum2; + sum2 += vv(2); sum2 += sum2; sum2 += vv(3); sum2 += sum2; + sum2 += vv(4); sum2 += sum2; sum2 += vv(5); sum2 += sum2; + sum2 += vv(6); sum2 += sum2; sum2 += vv(7); sum2 += sum2; + FOLD(sum2); + sum2 += vv(8); sum2 += sum2; sum2 += vv(9); sum2 += sum2; + sum2 += vv(10); sum2 += sum2; sum2 += vv(11); sum2 += sum2; + sum2 += vv(12); sum2 += sum2; sum2 += vv(13); sum2 += sum2; + sum2 += vv(14); sum2 += sum2; sum2 += vv(15); sum2 += sum2; + FOLD(sum2); + w += 16; + } + mlen += 32; +#endif + while ((mlen -= 8) >= 0) { + sum += ww(0); sum += sum; sum += ww(1); sum += sum; + sum += ww(2); sum += sum; sum += ww(3); sum += sum; + FOLD(sum); + sum2 += vv(0); sum2 += sum2; sum2 += vv(1); sum2 += sum2; + sum2 += vv(2); sum2 += sum2; sum2 += vv(3); sum2 += sum2; + FOLD(sum2); + w += 4; + } + mlen += 8; + while ((mlen -= 2) >= 0) { + sum += ww(0); sum += sum; + sum2 += vv(0); sum2 += sum2; + w++; + } + sum += (sum2 << 8); +commoncase: + if (mlen == -1) { +#if BYTE_ORDER == BIG_ENDIAN + sum += *(u_char *)w << 8; +#else + sum += *(u_char *)w; +#endif + } + FOLD(sum); + } + if (mlen == -1) { + /* We had an odd number of bytes to sum; assume a garbage + byte of zero and clean up */ + sum += sum; + FOLD(sum); + } + /* + * sum has already been kept to low sixteen bits. + * just examine result and exit. + */ + if(sum==0xffff) sum = 0; + return (sum); +} diff --git a/bsd/netns/ns_error.c b/bsd/netns/ns_error.c new file mode 100644 index 000000000..ea71eee6d --- /dev/null +++ b/bsd/netns/ns_error.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_error.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#ifdef lint +#define NS_ERRPRINTFS 1 +#endif + +#ifdef NS_ERRPRINTFS +/* + * NS_ERR routines: error generation, receive packet processing, and + * routines to turnaround packets back to the originator. + */ +int ns_errprintfs = 0; +#endif + +ns_err_x(c) +{ + register u_short *w, *lim, *base = ns_errstat.ns_es_codes; + u_short x = c; + + /* + * zero is a legit error code, handle specially + */ + if (x == 0) + return (0); + lim = base + NS_ERR_MAX - 1; + for (w = base + 1; w < lim; w++) { + if (*w == 0) + *w = x; + if (*w == x) + break; + } + return (w - base); +} + +/* + * Generate an error packet of type error + * in response to bad packet. + */ + +ns_error(om, type, param) + struct mbuf *om; + int type; +{ + register struct ns_epidp *ep; + struct mbuf *m; + struct idp *nip; + register struct idp *oip = mtod(om, struct idp *); + extern int idpcksum; + + /* + * If this packet was sent to the echo port, + * and nobody was there, just echo it. + * (Yes, this is a wart!) + */ + if (type == NS_ERR_NOSOCK && + oip->idp_dna.x_port == htons(2) && + (type = ns_echo(om))==0) + return; + +#ifdef NS_ERRPRINTFS + if (ns_errprintfs) + printf("ns_err_error(%x, %d, %d)\n", oip, type, param); +#endif + /* + * Don't Generate error packets in response to multicasts. + */ + if (oip->idp_dna.x_host.c_host[0] & 1) + goto freeit; + + ns_errstat.ns_es_error++; + /* + * Make sure that the old IDP packet had 30 bytes of data to return; + * if not, don't bother. Also don't EVER error if the old + * packet protocol was NS_ERR. + */ + if (oip->idp_len < sizeof(struct idp)) { + ns_errstat.ns_es_oldshort++; + goto freeit; + } + if (oip->idp_pt == NSPROTO_ERROR) { + ns_errstat.ns_es_oldns_err++; + goto freeit; + } + + /* + * First, formulate ns_err message + */ + m = m_gethdr(M_DONTWAIT, MT_HEADER); + if (m == NULL) + goto freeit; + m->m_len = sizeof(*ep); + MH_ALIGN(m, m->m_len); + ep = mtod(m, struct ns_epidp *); + if ((u_int)type > NS_ERR_TOO_BIG) + panic("ns_err_error"); + ns_errstat.ns_es_outhist[ns_err_x(type)]++; + ep->ns_ep_errp.ns_err_num = htons((u_short)type); + ep->ns_ep_errp.ns_err_param = htons((u_short)param); + bcopy((caddr_t)oip, (caddr_t)&ep->ns_ep_errp.ns_err_idp, 42); + nip = &ep->ns_ep_idp; + nip->idp_len = sizeof(*ep); + nip->idp_len = htons((u_short)nip->idp_len); + nip->idp_pt = NSPROTO_ERROR; + nip->idp_tc = 0; + nip->idp_dna = oip->idp_sna; + nip->idp_sna = oip->idp_dna; + if (idpcksum) { + nip->idp_sum = 0; + nip->idp_sum = ns_cksum(m, sizeof(*ep)); + } else + nip->idp_sum = 0xffff; + (void) ns_output(m, (struct route *)0, 0); + +freeit: + m_freem(om); +} + +ns_printhost(p) +register struct ns_addr *p; +{ + + printf("", + p->x_net.s_net[0], + p->x_net.s_net[1], + p->x_host.s_host[0], + p->x_host.s_host[1], + p->x_host.s_host[2], + p->x_port); + +} + +/* + * Process a received NS_ERR message. + */ +ns_err_input(m) + struct mbuf *m; +{ + register struct ns_errp *ep; + register struct ns_epidp *epidp = mtod(m, struct ns_epidp *); + register int i; + int type, code, param; + + /* + * Locate ns_err structure in mbuf, and check + * that not corrupted and of at least minimum length. + */ +#ifdef NS_ERRPRINTFS + if (ns_errprintfs) { + printf("ns_err_input from "); + ns_printhost(&epidp->ns_ep_idp.idp_sna); + printf("len %d\n", ntohs(epidp->ns_ep_idp.idp_len)); + } +#endif + i = sizeof (struct ns_epidp); + if (((m->m_flags & M_EXT) || m->m_len < i) && + (m = m_pullup(m, i)) == 0) { + ns_errstat.ns_es_tooshort++; + return; + } + ep = &(mtod(m, struct ns_epidp *)->ns_ep_errp); + type = ntohs(ep->ns_err_num); + param = ntohs(ep->ns_err_param); + ns_errstat.ns_es_inhist[ns_err_x(type)]++; + +#ifdef NS_ERRPRINTFS + /* + * Message type specific processing. + */ + if (ns_errprintfs) + printf("ns_err_input, type %d param %d\n", type, param); +#endif + if (type >= NS_ERR_TOO_BIG) { + goto badcode; + } + ns_errstat.ns_es_outhist[ns_err_x(type)]++; + switch (type) { + + case NS_ERR_UNREACH_HOST: + code = PRC_UNREACH_NET; + goto deliver; + + case NS_ERR_TOO_OLD: + code = PRC_TIMXCEED_INTRANS; + goto deliver; + + case NS_ERR_TOO_BIG: + code = PRC_MSGSIZE; + goto deliver; + + case NS_ERR_FULLUP: + code = PRC_QUENCH; + goto deliver; + + case NS_ERR_NOSOCK: + code = PRC_UNREACH_PORT; + goto deliver; + + case NS_ERR_UNSPEC_T: + case NS_ERR_BADSUM_T: + case NS_ERR_BADSUM: + case NS_ERR_UNSPEC: + code = PRC_PARAMPROB; + goto deliver; + + deliver: + /* + * Problem with datagram; advise higher level routines. + */ +#ifdef NS_ERRPRINTFS + if (ns_errprintfs) + printf("deliver to protocol %d\n", + ep->ns_err_idp.idp_pt); +#endif + switch(ep->ns_err_idp.idp_pt) { + case NSPROTO_SPP: + spp_ctlinput(code, (caddr_t)ep); + break; + + default: + idp_ctlinput(code, (caddr_t)ep); + } + + goto freeit; + + default: + badcode: + ns_errstat.ns_es_badcode++; + goto freeit; + + } +freeit: + m_freem(m); +} + +#ifdef notdef +u_long +nstime() +{ + int s = splclock(); + u_long t; + + t = (time.tv_sec % (24*60*60)) * 1000 + time.tv_usec / 1000; + splx(s); + return (htonl(t)); +} +#endif + +ns_echo(m) +struct mbuf *m; +{ + register struct idp *idp = mtod(m, struct idp *); + register struct echo { + struct idp ec_idp; + u_short ec_op; /* Operation, 1 = request, 2 = reply */ + } *ec = (struct echo *)idp; + struct ns_addr temp; + + if (idp->idp_pt!=NSPROTO_ECHO) return(NS_ERR_NOSOCK); + if (ec->ec_op!=htons(1)) return(NS_ERR_UNSPEC); + + ec->ec_op = htons(2); + + temp = idp->idp_dna; + idp->idp_dna = idp->idp_sna; + idp->idp_sna = temp; + + if (idp->idp_sum != 0xffff) { + idp->idp_sum = 0; + idp->idp_sum = ns_cksum(m, + (int)(((ntohs(idp->idp_len) - 1)|1)+1)); + } + (void) ns_output(m, (struct route *)0, NS_FORWARDING); + return(0); +} diff --git a/bsd/netns/ns_error.h b/bsd/netns/ns_error.h new file mode 100644 index 000000000..45de80a93 --- /dev/null +++ b/bsd/netns/ns_error.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_error.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Xerox NS error messages + */ + +struct ns_errp { + u_short ns_err_num; /* Error Number */ + u_short ns_err_param; /* Error Parameter */ + struct idp ns_err_idp; /* Initial segment of offending + packet */ + u_char ns_err_lev2[12]; /* at least this much higher + level protocol */ +}; +struct ns_epidp { + struct idp ns_ep_idp; + struct ns_errp ns_ep_errp; +}; + +#define NS_ERR_UNSPEC 0 /* Unspecified Error detected at dest. */ +#define NS_ERR_BADSUM 1 /* Bad Checksum detected at dest */ +#define NS_ERR_NOSOCK 2 /* Specified socket does not exist at dest*/ +#define NS_ERR_FULLUP 3 /* Dest. refuses packet due to resource lim.*/ +#define NS_ERR_UNSPEC_T 0x200 /* Unspec. Error occured before reaching dest*/ +#define NS_ERR_BADSUM_T 0x201 /* Bad Checksum detected in transit */ +#define NS_ERR_UNREACH_HOST 0x202 /* Dest cannot be reached from here*/ +#define NS_ERR_TOO_OLD 0x203 /* Packet x'd 15 routers without delivery*/ +#define NS_ERR_TOO_BIG 0x204 /* Packet too large to be forwarded through + some intermediate gateway. The error + parameter field contains the max packet + size that can be accommodated */ +#define NS_ERR_MAX 20 + +/* + * Variables related to this implementation + * of the network systems error message protocol. + */ +struct ns_errstat { +/* statistics related to ns_err packets generated */ + int ns_es_error; /* # of calls to ns_error */ + int ns_es_oldshort; /* no error 'cuz old ip too short */ + int ns_es_oldns_err; /* no error 'cuz old was ns_err */ + int ns_es_outhist[NS_ERR_MAX]; +/* statistics related to input messages processed */ + int ns_es_badcode; /* ns_err_code out of range */ + int ns_es_tooshort; /* packet < IDP_MINLEN */ + int ns_es_checksum; /* bad checksum */ + int ns_es_badlen; /* calculated bound mismatch */ + int ns_es_reflect; /* number of responses */ + int ns_es_inhist[NS_ERR_MAX]; + u_short ns_es_codes[NS_ERR_MAX];/* which error code for outhist + since we might not know all */ +}; + +#ifdef KERNEL +struct ns_errstat ns_errstat; +#endif diff --git a/bsd/netns/ns_if.h b/bsd/netns/ns_if.h new file mode 100644 index 000000000..5549575e3 --- /dev/null +++ b/bsd/netns/ns_if.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_if.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Interface address, xerox version. One of these structures + * is allocated for each interface with an internet address. + * The ifaddr structure contains the protocol-independent part + * of the structure and is assumed to be first. + */ + +struct ns_ifaddr { + struct ifaddr ia_ifa; /* protocol-independent info */ +#define ia_ifp ia_ifa.ifa_ifp +#define ia_flags ia_ifa.ifa_flags + struct ns_ifaddr *ia_next; /* next in list of xerox addresses */ + struct sockaddr_ns ia_addr; /* reserve space for my address */ + struct sockaddr_ns ia_dstaddr; /* space for my broadcast address */ +#define ia_broadaddr ia_dstaddr + struct sockaddr_ns ia_netmask; /* space for my network mask */ +}; + +struct ns_aliasreq { + char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct sockaddr_ns ifra_addr; + struct sockaddr_ns ifra_broadaddr; +#define ifra_dstaddr ifra_broadaddr +}; +/* + * Given a pointer to an ns_ifaddr (ifaddr), + * return a pointer to the addr as a sockadd_ns. + */ + +#define IA_SNS(ia) (&(((struct ns_ifaddr *)(ia))->ia_addr)) + +/* This is not the right place for this but where is? */ +#define ETHERTYPE_NS 0x0600 + +#ifdef NSIP +struct nsip_req { + struct sockaddr rq_ns; /* must be ns format destination */ + struct sockaddr rq_ip; /* must be ip format gateway */ + short rq_flags; +}; +#endif + +#ifdef KERNEL +struct ns_ifaddr *ns_ifaddr; +struct ns_ifaddr *ns_iaonnetof(); +struct ifqueue nsintrq; /* XNS input packet queue */ +#endif diff --git a/bsd/netns/ns_input.c b/bsd/netns/ns_input.c new file mode 100644 index 000000000..1f85ee06e --- /dev/null +++ b/bsd/netns/ns_input.c @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_input.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * NS initialization. + */ +union ns_host ns_thishost; +union ns_host ns_zerohost; +union ns_host ns_broadhost; +union ns_net ns_zeronet; +union ns_net ns_broadnet; +struct sockaddr_ns ns_netmask, ns_hostmask; + +static u_short allones[] = {-1, -1, -1}; + +struct nspcb nspcb; +struct nspcb nsrawpcb; + +struct ifqueue nsintrq; +int nsqmaxlen = IFQ_MAXLEN; + +int idpcksum = 1; +long ns_pexseq; + +ns_init() +{ + extern struct timeval time; + + ns_broadhost = * (union ns_host *) allones; + ns_broadnet = * (union ns_net *) allones; + nspcb.nsp_next = nspcb.nsp_prev = &nspcb; + nsrawpcb.nsp_next = nsrawpcb.nsp_prev = &nsrawpcb; + nsintrq.ifq_maxlen = nsqmaxlen; + ns_pexseq = time.tv_usec; + ns_netmask.sns_len = 6; + ns_netmask.sns_addr.x_net = ns_broadnet; + ns_hostmask.sns_len = 12; + ns_hostmask.sns_addr.x_net = ns_broadnet; + ns_hostmask.sns_addr.x_host = ns_broadhost; +} + +/* + * Idp input routine. Pass to next level. + */ +int nsintr_getpck = 0; +int nsintr_swtch = 0; +nsintr() +{ + register struct idp *idp; + register struct mbuf *m; + register struct nspcb *nsp; + register int i; + int len, s, error; + char oddpacketp; + +next: + /* + * Get next datagram off input queue and get IDP header + * in first mbuf. + */ + s = splimp(); + IF_DEQUEUE(&nsintrq, m); + splx(s); + nsintr_getpck++; + if (m == 0) + return; + if ((m->m_flags & M_EXT || m->m_len < sizeof (struct idp)) && + (m = m_pullup(m, sizeof (struct idp))) == 0) { + idpstat.idps_toosmall++; + goto next; + } + + /* + * Give any raw listeners a crack at the packet + */ + for (nsp = nsrawpcb.nsp_next; nsp != &nsrawpcb; nsp = nsp->nsp_next) { + struct mbuf *m1 = m_copy(m, 0, (int)M_COPYALL); + if (m1) idp_input(m1, nsp); + } + + idp = mtod(m, struct idp *); + len = ntohs(idp->idp_len); + if (oddpacketp = len & 1) { + len++; /* If this packet is of odd length, + preserve garbage byte for checksum */ + } + + /* + * Check that the amount of data in the buffers + * is as at least much as the IDP header would have us expect. + * Trim mbufs if longer than we expect. + * Drop packet if shorter than we expect. + */ + if (m->m_pkthdr.len < len) { + idpstat.idps_tooshort++; + goto bad; + } + if (m->m_pkthdr.len > len) { + if (m->m_len == m->m_pkthdr.len) { + m->m_len = len; + m->m_pkthdr.len = len; + } else + m_adj(m, len - m->m_pkthdr.len); + } + if (idpcksum && ((i = idp->idp_sum)!=0xffff)) { + idp->idp_sum = 0; + if (i != (idp->idp_sum = ns_cksum(m, len))) { + idpstat.idps_badsum++; + idp->idp_sum = i; + if (ns_hosteqnh(ns_thishost, idp->idp_dna.x_host)) + error = NS_ERR_BADSUM; + else + error = NS_ERR_BADSUM_T; + ns_error(m, error, 0); + goto next; + } + } + /* + * Is this a directed broadcast? + */ + if (ns_hosteqnh(ns_broadhost,idp->idp_dna.x_host)) { + if ((!ns_neteq(idp->idp_dna, idp->idp_sna)) && + (!ns_neteqnn(idp->idp_dna.x_net, ns_broadnet)) && + (!ns_neteqnn(idp->idp_sna.x_net, ns_zeronet)) && + (!ns_neteqnn(idp->idp_dna.x_net, ns_zeronet)) ) { + /* + * Look to see if I need to eat this packet. + * Algorithm is to forward all young packets + * and prematurely age any packets which will + * by physically broadcasted. + * Any very old packets eaten without forwarding + * would die anyway. + * + * Suggestion of Bill Nesheim, Cornell U. + */ + if (idp->idp_tc < NS_MAXHOPS) { + idp_forward(m); + goto next; + } + } + /* + * Is this our packet? If not, forward. + */ + } else if (!ns_hosteqnh(ns_thishost,idp->idp_dna.x_host)) { + idp_forward(m); + goto next; + } + /* + * Locate pcb for datagram. + */ + nsp = ns_pcblookup(&idp->idp_sna, idp->idp_dna.x_port, NS_WILDCARD); + /* + * Switch out to protocol's input routine. + */ + nsintr_swtch++; + if (nsp) { + if (oddpacketp) { + m_adj(m, -1); + } + if ((nsp->nsp_flags & NSP_ALL_PACKETS)==0) + switch (idp->idp_pt) { + + case NSPROTO_SPP: + spp_input(m, nsp); + goto next; + + case NSPROTO_ERROR: + ns_err_input(m); + goto next; + } + idp_input(m, nsp); + } else { + ns_error(m, NS_ERR_NOSOCK, 0); + } + goto next; + +bad: + m_freem(m); + goto next; +} + +u_char nsctlerrmap[PRC_NCMDS] = { + ECONNABORTED, ECONNABORTED, 0, 0, + 0, 0, EHOSTDOWN, EHOSTUNREACH, + ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, 0, 0, 0, + 0, 0, 0, 0 +}; + +int idp_donosocks = 1; + +idp_ctlinput(cmd, arg) + int cmd; + caddr_t arg; +{ + struct ns_addr *ns; + struct nspcb *nsp; + struct ns_errp *errp; + int idp_abort(); + extern struct nspcb *idp_drop(); + int type; + + if (cmd < 0 || cmd > PRC_NCMDS) + return; + if (nsctlerrmap[cmd] == 0) + return; /* XXX */ + type = NS_ERR_UNREACH_HOST; + switch (cmd) { + struct sockaddr_ns *sns; + + case PRC_IFDOWN: + case PRC_HOSTDEAD: + case PRC_HOSTUNREACH: + sns = (struct sockaddr_ns *)arg; + if (sns->sns_family != AF_NS) + return; + ns = &sns->sns_addr; + break; + + default: + errp = (struct ns_errp *)arg; + ns = &errp->ns_err_idp.idp_dna; + type = errp->ns_err_num; + type = ntohs((u_short)type); + } + switch (type) { + + case NS_ERR_UNREACH_HOST: + ns_pcbnotify(ns, (int)nsctlerrmap[cmd], idp_abort, (long)0); + break; + + case NS_ERR_NOSOCK: + nsp = ns_pcblookup(ns, errp->ns_err_idp.idp_sna.x_port, + NS_WILDCARD); + if(nsp && idp_donosocks && ! ns_nullhost(nsp->nsp_faddr)) + (void) idp_drop(nsp, (int)nsctlerrmap[cmd]); + } +} + +int idpprintfs = 0; +int idpforwarding = 1; +/* + * Forward a packet. If some error occurs return the sender + * an error packet. Note we can't always generate a meaningful + * error message because the NS errors don't have a large enough repetoire + * of codes and types. + */ +struct route idp_droute; +struct route idp_sroute; + +idp_forward(m) +struct mbuf *m; +{ + register struct idp *idp = mtod(m, struct idp *); + register int error, type, code; + struct mbuf *mcopy = NULL; + int agedelta = 1; + int flags = NS_FORWARDING; + int ok_there = 0; + int ok_back = 0; + + if (idpprintfs) { + printf("forward: src "); + ns_printhost(&idp->idp_sna); + printf(", dst "); + ns_printhost(&idp->idp_dna); + printf("hop count %d\n", idp->idp_tc); + } + if (idpforwarding == 0) { + /* can't tell difference between net and host */ + type = NS_ERR_UNREACH_HOST, code = 0; + goto senderror; + } + idp->idp_tc++; + if (idp->idp_tc > NS_MAXHOPS) { + type = NS_ERR_TOO_OLD, code = 0; + goto senderror; + } + /* + * Save at most 42 bytes of the packet in case + * we need to generate an NS error message to the src. + */ + mcopy = m_copy(m, 0, imin((int)ntohs(idp->idp_len), 42)); + + if ((ok_there = idp_do_route(&idp->idp_dna,&idp_droute))==0) { + type = NS_ERR_UNREACH_HOST, code = 0; + goto senderror; + } + /* + * Here we think about forwarding broadcast packets, + * so we try to insure that it doesn't go back out + * on the interface it came in on. Also, if we + * are going to physically broadcast this, let us + * age the packet so we can eat it safely the second time around. + */ + if (idp->idp_dna.x_host.c_host[0] & 0x1) { + struct ns_ifaddr *ia = ns_iaonnetof(&idp->idp_dna); + struct ifnet *ifp; + if (ia) { + /* I'm gonna hafta eat this packet */ + agedelta += NS_MAXHOPS - idp->idp_tc; + idp->idp_tc = NS_MAXHOPS; + } + if ((ok_back = idp_do_route(&idp->idp_sna,&idp_sroute))==0) { + /* error = ENETUNREACH; He'll never get it! */ + m_freem(m); + goto cleanup; + } + if (idp_droute.ro_rt && + (ifp=idp_droute.ro_rt->rt_ifp) && + idp_sroute.ro_rt && + (ifp!=idp_sroute.ro_rt->rt_ifp)) { + flags |= NS_ALLOWBROADCAST; + } else { + type = NS_ERR_UNREACH_HOST, code = 0; + goto senderror; + } + } + /* need to adjust checksum */ + if (idp->idp_sum!=0xffff) { + union bytes { + u_char c[4]; + u_short s[2]; + long l; + } x; + register int shift; + x.l = 0; x.c[0] = agedelta; + shift = (((((int)ntohs(idp->idp_len))+1)>>1)-2) & 0xf; + x.l = idp->idp_sum + (x.s[0] << shift); + x.l = x.s[0] + x.s[1]; + x.l = x.s[0] + x.s[1]; + if (x.l==0xffff) idp->idp_sum = 0; else idp->idp_sum = x.l; + } + if ((error = ns_output(m, &idp_droute, flags)) && + (mcopy!=NULL)) { + idp = mtod(mcopy, struct idp *); + type = NS_ERR_UNSPEC_T, code = 0; + switch (error) { + + case ENETUNREACH: + case EHOSTDOWN: + case EHOSTUNREACH: + case ENETDOWN: + case EPERM: + type = NS_ERR_UNREACH_HOST; + break; + + case EMSGSIZE: + type = NS_ERR_TOO_BIG; + code = 576; /* too hard to figure out mtu here */ + break; + + case ENOBUFS: + type = NS_ERR_UNSPEC_T; + break; + } + mcopy = NULL; + senderror: + ns_error(m, type, code); + } +cleanup: + if (ok_there) + idp_undo_route(&idp_droute); + if (ok_back) + idp_undo_route(&idp_sroute); + if (mcopy != NULL) + m_freem(mcopy); +} + +idp_do_route(src, ro) +struct ns_addr *src; +struct route *ro; +{ + + struct sockaddr_ns *dst; + + bzero((caddr_t)ro, sizeof (*ro)); + dst = (struct sockaddr_ns *)&ro->ro_dst; + + dst->sns_len = sizeof(*dst); + dst->sns_family = AF_NS; + dst->sns_addr = *src; + dst->sns_addr.x_port = 0; + rtalloc(ro); + if (ro->ro_rt == 0 || ro->ro_rt->rt_ifp == 0) { + return (0); + } + ro->ro_rt->rt_use++; + return (1); +} + +idp_undo_route(ro) +register struct route *ro; +{ + if (ro->ro_rt) {RTFREE(ro->ro_rt);} +} + +ns_watch_output(m, ifp) +struct mbuf *m; +struct ifnet *ifp; +{ + register struct nspcb *nsp; + register struct ifaddr *ifa; + /* + * Give any raw listeners a crack at the packet + */ + for (nsp = nsrawpcb.nsp_next; nsp != &nsrawpcb; nsp = nsp->nsp_next) { + struct mbuf *m0 = m_copy(m, 0, (int)M_COPYALL); + if (m0) { + register struct idp *idp; + + M_PREPEND(m0, sizeof (*idp), M_DONTWAIT); + if (m0 == NULL) + continue; + idp = mtod(m0, struct idp *); + idp->idp_sna.x_net = ns_zeronet; + idp->idp_sna.x_host = ns_thishost; + if (ifp && (ifp->if_flags & IFF_POINTOPOINT)) + for(ifa = ifp->if_addrlist; ifa; + ifa = ifa->ifa_next) { + if (ifa->ifa_addr->sa_family==AF_NS) { + idp->idp_sna = IA_SNS(ifa)->sns_addr; + break; + } + } + idp->idp_len = ntohl(m0->m_pkthdr.len); + idp_input(m0, nsp); + } + } +} diff --git a/bsd/netns/ns_ip.c b/bsd/netns/ns_ip.c new file mode 100644 index 000000000..cd093a377 --- /dev/null +++ b/bsd/netns/ns_ip.c @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_ip.c 8.1 (Berkeley) 6/10/93 + */ + +/* + * Software interface driver for encapsulating ns in ip. + */ + +#ifdef NSIP +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +struct ifnet_en { + struct ifnet ifen_ifnet; + struct route ifen_route; + struct in_addr ifen_src; + struct in_addr ifen_dst; + struct ifnet_en *ifen_next; +}; + +int nsipoutput(), nsipioctl(), nsipstart(); +#define LOMTU (1024+512); + +struct ifnet nsipif; +struct ifnet_en *nsip_list; /* list of all hosts and gateways or + broadcast addrs */ + +struct ifnet_en * +nsipattach() +{ + register struct ifnet_en *m; + register struct ifnet *ifp; + + if (nsipif.if_mtu == 0) { + ifp = &nsipif; + ifp->if_name = "nsip"; + ifp->if_mtu = LOMTU; + ifp->if_ioctl = nsipioctl; + ifp->if_output = nsipoutput; + ifp->if_start = nsipstart; + ifp->if_flags = IFF_POINTOPOINT; + } + + MALLOC((m), struct ifnet_en *, sizeof(*m), M_PCB, M_NOWAIT); + if (m == NULL) return (NULL); + m->ifen_next = nsip_list; + nsip_list = m; + ifp = &m->ifen_ifnet; + + ifp->if_name = "nsip"; + ifp->if_mtu = LOMTU; + ifp->if_ioctl = nsipioctl; + ifp->if_output = nsipoutput; + ifp->if_start = nsipstart; + ifp->if_flags = IFF_POINTOPOINT; + ifp->if_unit = nsipif.if_unit++; + if_attach(ifp); + + return (m); +} + + +/* + * Process an ioctl request. + */ +/* ARGSUSED */ +nsipioctl(ifp, cmd, data) + register struct ifnet *ifp; + int cmd; + caddr_t data; +{ + int error = 0; + struct ifreq *ifr; + + switch (cmd) { + + case SIOCSIFADDR: + ifp->if_flags |= IFF_UP; + /* fall into: */ + + case SIOCSIFDSTADDR: + /* + * Everything else is done at a higher level. + */ + break; + + case SIOCSIFFLAGS: + ifr = (struct ifreq *)data; + if ((ifr->ifr_flags & IFF_UP) == 0) + error = nsip_free(ifp); + + + default: + error = EINVAL; + } + return (error); +} + +struct mbuf *nsip_badlen; +struct mbuf *nsip_lastin; +int nsip_hold_input; + +idpip_input(m, ifp) + register struct mbuf *m; + struct ifnet *ifp; +{ + register struct ip *ip; + register struct idp *idp; + register struct ifqueue *ifq = &nsintrq; + int len, s; + + if (nsip_hold_input) { + if (nsip_lastin) { + m_freem(nsip_lastin); + } + nsip_lastin = m_copym(m, 0, (int)M_COPYALL, M_DONTWAIT); + } + /* + * Get IP and IDP header together in first mbuf. + */ + nsipif.if_ipackets++; + s = sizeof (struct ip) + sizeof (struct idp); + if (((m->m_flags & M_EXT) || m->m_len < s) && + (m = m_pullup(m, s)) == 0) { + nsipif.if_ierrors++; + return; + } + ip = mtod(m, struct ip *); + if (ip->ip_hl > (sizeof (struct ip) >> 2)) { + ip_stripoptions(m, (struct mbuf *)0); + if (m->m_len < s) { + if ((m = m_pullup(m, s)) == 0) { + nsipif.if_ierrors++; + return; + } + ip = mtod(m, struct ip *); + } + } + + /* + * Make mbuf data length reflect IDP length. + * If not enough data to reflect IDP length, drop. + */ + m->m_data += sizeof (struct ip); + m->m_len -= sizeof (struct ip); + m->m_pkthdr.len -= sizeof (struct ip); + idp = mtod(m, struct idp *); + len = ntohs(idp->idp_len); + if (len & 1) len++; /* Preserve Garbage Byte */ + if (ip->ip_len != len) { + if (len > ip->ip_len) { + nsipif.if_ierrors++; + if (nsip_badlen) m_freem(nsip_badlen); + nsip_badlen = m; + return; + } + /* Any extra will be trimmed off by the NS routines */ + } + + /* + * Place interface pointer before the data + * for the receiving protocol. + */ + m->m_pkthdr.rcvif = ifp; + /* + * Deliver to NS + */ + s = splimp(); + if (IF_QFULL(ifq)) { + IF_DROP(ifq); +bad: + m_freem(m); + splx(s); + return; + } + IF_ENQUEUE(ifq, m); + schednetisr(NETISR_NS); + splx(s); + return; +} + +/* ARGSUSED */ +nsipoutput(ifn, m, dst) + struct ifnet_en *ifn; + register struct mbuf *m; + struct sockaddr *dst; +{ + + register struct ip *ip; + register struct route *ro = &(ifn->ifen_route); + register int len = 0; + register struct idp *idp = mtod(m, struct idp *); + int error; + + ifn->ifen_ifnet.if_opackets++; + nsipif.if_opackets++; + + + /* + * Calculate data length and make space + * for IP header. + */ + len = ntohs(idp->idp_len); + if (len & 1) len++; /* Preserve Garbage Byte */ + /* following clause not necessary on vax */ + if (3 & (int)m->m_data) { + /* force longword alignment of ip hdr */ + struct mbuf *m0 = m_gethdr(MT_HEADER, M_DONTWAIT); + if (m0 == 0) { + m_freem(m); + return (ENOBUFS); + } + MH_ALIGN(m0, sizeof (struct ip)); + m0->m_flags = m->m_flags & M_COPYFLAGS; + m0->m_next = m; + m0->m_len = sizeof (struct ip); + m0->m_pkthdr.len = m0->m_len + m->m_len; + m->m_flags &= ~M_PKTHDR; + } else { + M_PREPEND(m, sizeof (struct ip), M_DONTWAIT); + if (m == 0) + return (ENOBUFS); + } + /* + * Fill in IP header. + */ + ip = mtod(m, struct ip *); + *(long *)ip = 0; + ip->ip_p = IPPROTO_IDP; + ip->ip_src = ifn->ifen_src; + ip->ip_dst = ifn->ifen_dst; + ip->ip_len = (u_short)len + sizeof (struct ip); + ip->ip_ttl = MAXTTL; + + /* + * Output final datagram. + */ + error = (ip_output(m, (struct mbuf *)0, ro, SO_BROADCAST, NULL)); + if (error) { + ifn->ifen_ifnet.if_oerrors++; + ifn->ifen_ifnet.if_ierrors = error; + } + return (error); +bad: + m_freem(m); + return (ENETUNREACH); +} + +nsipstart(ifp) +struct ifnet *ifp; +{ + panic("nsip_start called\n"); +} + +struct ifreq ifr = {"nsip0"}; + +nsip_route(m) + register struct mbuf *m; +{ + register struct nsip_req *rq = mtod(m, struct nsip_req *); + struct sockaddr_ns *ns_dst = (struct sockaddr_ns *)&rq->rq_ns; + struct sockaddr_in *ip_dst = (struct sockaddr_in *)&rq->rq_ip; + struct route ro; + struct ifnet_en *ifn; + struct sockaddr_in *src; + + /* + * First, make sure we already have an ns address: + */ + if (ns_hosteqnh(ns_thishost, ns_zerohost)) + return (EADDRNOTAVAIL); + /* + * Now, determine if we can get to the destination + */ + bzero((caddr_t)&ro, sizeof (ro)); + ro.ro_dst = *(struct sockaddr *)ip_dst; + rtalloc(&ro); + if (ro.ro_rt == 0 || ro.ro_rt->rt_ifp == 0) { + return (ENETUNREACH); + } + + /* + * And see how he's going to get back to us: + * i.e., what return ip address do we use? + */ + { + register struct in_ifaddr *ia; + struct ifnet *ifp = ro.ro_rt->rt_ifp; + + for (ia = in_ifaddr; ia; ia = ia->ia_next) + if (ia->ia_ifp == ifp) + break; + if (ia == 0) + ia = in_ifaddr; + if (ia == 0) { + RTFREE(ro.ro_rt); + return (EADDRNOTAVAIL); + } + src = (struct sockaddr_in *)&ia->ia_addr; + } + + /* + * Is there a free (pseudo-)interface or space? + */ + for (ifn = nsip_list; ifn; ifn = ifn->ifen_next) { + if ((ifn->ifen_ifnet.if_flags & IFF_UP) == 0) + break; + } + if (ifn == NULL) + ifn = nsipattach(); + if (ifn == NULL) { + RTFREE(ro.ro_rt); + return (ENOBUFS); + } + ifn->ifen_route = ro; + ifn->ifen_dst = ip_dst->sin_addr; + ifn->ifen_src = src->sin_addr; + + /* + * now configure this as a point to point link + */ + ifr.ifr_name[4] = '0' + nsipif.if_unit - 1; + ifr.ifr_dstaddr = * (struct sockaddr *) ns_dst; + (void)ns_control((struct socket *)0, (int)SIOCSIFDSTADDR, (caddr_t)&ifr, + (struct ifnet *)ifn); + satons_addr(ifr.ifr_addr).x_host = ns_thishost; + return (ns_control((struct socket *)0, (int)SIOCSIFADDR, (caddr_t)&ifr, + (struct ifnet *)ifn)); +} + +nsip_free(ifp) +struct ifnet *ifp; +{ + register struct ifnet_en *ifn = (struct ifnet_en *)ifp; + struct route *ro = & ifn->ifen_route; + + if (ro->ro_rt) { + RTFREE(ro->ro_rt); + ro->ro_rt = 0; + } + ifp->if_flags &= ~IFF_UP; + return (0); +} + +nsip_ctlinput(cmd, sa) + int cmd; + struct sockaddr *sa; +{ + extern u_char inetctlerrmap[]; + struct sockaddr_in *sin; + int in_rtchange(); + + if ((unsigned)cmd >= PRC_NCMDS) + return; + if (sa->sa_family != AF_INET && sa->sa_family != AF_IMPLINK) + return; + sin = (struct sockaddr_in *)sa; + if (sin->sin_addr.s_addr == INADDR_ANY) + return; + + switch (cmd) { + + case PRC_ROUTEDEAD: + case PRC_REDIRECT_NET: + case PRC_REDIRECT_HOST: + case PRC_REDIRECT_TOSNET: + case PRC_REDIRECT_TOSHOST: + nsip_rtchange(&sin->sin_addr); + break; + } +} + +nsip_rtchange(dst) + register struct in_addr *dst; +{ + register struct ifnet_en *ifn; + + for (ifn = nsip_list; ifn; ifn = ifn->ifen_next) { + if (ifn->ifen_dst.s_addr == dst->s_addr && + ifn->ifen_route.ro_rt) { + RTFREE(ifn->ifen_route.ro_rt); + ifn->ifen_route.ro_rt = 0; + } + } +} +#endif diff --git a/bsd/netns/ns_output.c b/bsd/netns/ns_output.c new file mode 100644 index 000000000..276de6c0a --- /dev/null +++ b/bsd/netns/ns_output.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_output.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef vax +#include +#endif +int ns_hold_output = 0; +int ns_copy_output = 0; +int ns_output_cnt = 0; +struct mbuf *ns_lastout; + +ns_output(m0, ro, flags) + struct mbuf *m0; + struct route *ro; + int flags; +{ + register struct idp *idp = mtod(m0, struct idp *); + register struct ifnet *ifp = 0; + int error = 0; + struct route idproute; + struct sockaddr_ns *dst; + extern int idpcksum; + + if (ns_hold_output) { + if (ns_lastout) { + (void)m_free(ns_lastout); + } + ns_lastout = m_copy(m0, 0, (int)M_COPYALL); + } + /* + * Route packet. + */ + if (ro == 0) { + ro = &idproute; + bzero((caddr_t)ro, sizeof (*ro)); + } + dst = (struct sockaddr_ns *)&ro->ro_dst; + if (ro->ro_rt == 0) { + dst->sns_family = AF_NS; + dst->sns_len = sizeof (*dst); + dst->sns_addr = idp->idp_dna; + dst->sns_addr.x_port = 0; + /* + * If routing to interface only, + * short circuit routing lookup. + */ + if (flags & NS_ROUTETOIF) { + struct ns_ifaddr *ia = ns_iaonnetof(&idp->idp_dna); + + if (ia == 0) { + error = ENETUNREACH; + goto bad; + } + ifp = ia->ia_ifp; + goto gotif; + } + rtalloc(ro); + } else if ((ro->ro_rt->rt_flags & RTF_UP) == 0) { + /* + * The old route has gone away; try for a new one. + */ + rtfree(ro->ro_rt); + ro->ro_rt = NULL; + rtalloc(ro); + } + if (ro->ro_rt == 0 || (ifp = ro->ro_rt->rt_ifp) == 0) { + error = ENETUNREACH; + goto bad; + } + ro->ro_rt->rt_use++; + if (ro->ro_rt->rt_flags & (RTF_GATEWAY|RTF_HOST)) + dst = (struct sockaddr_ns *)ro->ro_rt->rt_gateway; +gotif: + + /* + * Look for multicast addresses and + * and verify user is allowed to send + * such a packet. + */ + if (dst->sns_addr.x_host.c_host[0]&1) { + if ((ifp->if_flags & IFF_BROADCAST) == 0) { + error = EADDRNOTAVAIL; + goto bad; + } + if ((flags & NS_ALLOWBROADCAST) == 0) { + error = EACCES; + goto bad; + } + } + + if (htons(idp->idp_len) <= ifp->if_mtu) { + ns_output_cnt++; + if (ns_copy_output) { + ns_watch_output(m0, ifp); + } + error = (*ifp->if_output)(ifp, m0, + (struct sockaddr *)dst, ro->ro_rt); + goto done; + } else error = EMSGSIZE; + + +bad: + if (ns_copy_output) { + ns_watch_output(m0, ifp); + } + m_freem(m0); +done: + if (ro == &idproute && (flags & NS_ROUTETOIF) == 0 && ro->ro_rt) { + RTFREE(ro->ro_rt); + ro->ro_rt = 0; + } + return (error); +} diff --git a/bsd/netns/ns_pcb.c b/bsd/netns/ns_pcb.c new file mode 100644 index 000000000..c02bb6a5c --- /dev/null +++ b/bsd/netns/ns_pcb.c @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_pcb.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +struct ns_addr zerons_addr; + +ns_pcballoc(so, head) + struct socket *so; + struct nspcb *head; +{ + struct mbuf *m; + register struct nspcb *nsp; + + m = m_getclr(M_DONTWAIT, MT_PCB); + if (m == NULL) + return (ENOBUFS); + nsp = mtod(m, struct nspcb *); + nsp->nsp_socket = so; + insque(nsp, head); + so->so_pcb = (caddr_t)nsp; + return (0); +} + +ns_pcbbind(nsp, nam) + register struct nspcb *nsp; + struct mbuf *nam; +{ + register struct sockaddr_ns *sns; + u_short lport = 0; + + if (nsp->nsp_lport || !ns_nullhost(nsp->nsp_laddr)) + return (EINVAL); + if (nam == 0) + goto noname; + sns = mtod(nam, struct sockaddr_ns *); + if (nam->m_len != sizeof (*sns)) + return (EINVAL); + if (!ns_nullhost(sns->sns_addr)) { + int tport = sns->sns_port; + + sns->sns_port = 0; /* yech... */ + if (ifa_ifwithaddr((struct sockaddr *)sns) == 0) + return (EADDRNOTAVAIL); + sns->sns_port = tport; + } + lport = sns->sns_port; + if (lport) { + u_short aport = ntohs(lport); + + if (aport < NSPORT_RESERVED && + (nsp->nsp_socket->so_state & SS_PRIV) == 0) + return (EACCES); + if (ns_pcblookup(&zerons_addr, lport, 0)) + return (EADDRINUSE); + } + nsp->nsp_laddr = sns->sns_addr; +noname: + if (lport == 0) + do { + if (nspcb.nsp_lport++ < NSPORT_RESERVED) + nspcb.nsp_lport = NSPORT_RESERVED; + lport = htons(nspcb.nsp_lport); + } while (ns_pcblookup(&zerons_addr, lport, 0)); + nsp->nsp_lport = lport; + return (0); +} + +/* + * Connect from a socket to a specified address. + * Both address and port must be specified in argument sns. + * If don't have a local address for this socket yet, + * then pick one. + */ +ns_pcbconnect(nsp, nam) + struct nspcb *nsp; + struct mbuf *nam; +{ + struct ns_ifaddr *ia; + register struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *); + register struct ns_addr *dst; + register struct route *ro; + struct ifnet *ifp; + + if (nam->m_len != sizeof (*sns)) + return (EINVAL); + if (sns->sns_family != AF_NS) + return (EAFNOSUPPORT); + if (sns->sns_port==0 || ns_nullhost(sns->sns_addr)) + return (EADDRNOTAVAIL); + /* + * If we haven't bound which network number to use as ours, + * we will use the number of the outgoing interface. + * This depends on having done a routing lookup, which + * we will probably have to do anyway, so we might + * as well do it now. On the other hand if we are + * sending to multiple destinations we may have already + * done the lookup, so see if we can use the route + * from before. In any case, we only + * chose a port number once, even if sending to multiple + * destinations. + */ + ro = &nsp->nsp_route; + dst = &satons_addr(ro->ro_dst); + if (nsp->nsp_socket->so_options & SO_DONTROUTE) + goto flush; + if (!ns_neteq(nsp->nsp_lastdst, sns->sns_addr)) + goto flush; + if (!ns_hosteq(nsp->nsp_lastdst, sns->sns_addr)) { + if (ro->ro_rt && ! (ro->ro_rt->rt_flags & RTF_HOST)) { + /* can patch route to avoid rtalloc */ + *dst = sns->sns_addr; + } else { + flush: + if (ro->ro_rt) + RTFREE(ro->ro_rt); + ro->ro_rt = (struct rtentry *)0; + nsp->nsp_laddr.x_net = ns_zeronet; + } + }/* else cached route is ok; do nothing */ + nsp->nsp_lastdst = sns->sns_addr; + if ((nsp->nsp_socket->so_options & SO_DONTROUTE) == 0 && /*XXX*/ + (ro->ro_rt == (struct rtentry *)0 || + ro->ro_rt->rt_ifp == (struct ifnet *)0)) { + /* No route yet, so try to acquire one */ + ro->ro_dst.sa_family = AF_NS; + ro->ro_dst.sa_len = sizeof(ro->ro_dst); + *dst = sns->sns_addr; + dst->x_port = 0; + rtalloc(ro); + } + if (ns_neteqnn(nsp->nsp_laddr.x_net, ns_zeronet)) { + /* + * If route is known or can be allocated now, + * our src addr is taken from the i/f, else punt. + */ + + ia = (struct ns_ifaddr *)0; + /* + * If we found a route, use the address + * corresponding to the outgoing interface + */ + if (ro->ro_rt && (ifp = ro->ro_rt->rt_ifp)) + for (ia = ns_ifaddr; ia; ia = ia->ia_next) + if (ia->ia_ifp == ifp) + break; + if (ia == 0) { + u_short fport = sns->sns_addr.x_port; + sns->sns_addr.x_port = 0; + ia = (struct ns_ifaddr *) + ifa_ifwithdstaddr((struct sockaddr *)sns); + sns->sns_addr.x_port = fport; + if (ia == 0) + ia = ns_iaonnetof(&sns->sns_addr); + if (ia == 0) + ia = ns_ifaddr; + if (ia == 0) + return (EADDRNOTAVAIL); + } + nsp->nsp_laddr.x_net = satons_addr(ia->ia_addr).x_net; + } + if (ns_pcblookup(&sns->sns_addr, nsp->nsp_lport, 0)) + return (EADDRINUSE); + if (ns_nullhost(nsp->nsp_laddr)) { + if (nsp->nsp_lport == 0) + (void) ns_pcbbind(nsp, (struct mbuf *)0); + nsp->nsp_laddr.x_host = ns_thishost; + } + nsp->nsp_faddr = sns->sns_addr; + /* Includes nsp->nsp_fport = sns->sns_port; */ + return (0); +} + +ns_pcbdisconnect(nsp) + struct nspcb *nsp; +{ + + nsp->nsp_faddr = zerons_addr; + if (nsp->nsp_socket->so_state & SS_NOFDREF) + ns_pcbdetach(nsp); +} + +ns_pcbdetach(nsp) + struct nspcb *nsp; +{ + struct socket *so = nsp->nsp_socket; + + so->so_pcb = 0; + sofree(so); + if (nsp->nsp_route.ro_rt) + rtfree(nsp->nsp_route.ro_rt); + remque(nsp); + (void) m_free(dtom(nsp)); +} + +ns_setsockaddr(nsp, nam) + register struct nspcb *nsp; + struct mbuf *nam; +{ + register struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *); + + nam->m_len = sizeof (*sns); + sns = mtod(nam, struct sockaddr_ns *); + bzero((caddr_t)sns, sizeof (*sns)); + sns->sns_len = sizeof(*sns); + sns->sns_family = AF_NS; + sns->sns_addr = nsp->nsp_laddr; +} + +ns_setpeeraddr(nsp, nam) + register struct nspcb *nsp; + struct mbuf *nam; +{ + register struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *); + + nam->m_len = sizeof (*sns); + sns = mtod(nam, struct sockaddr_ns *); + bzero((caddr_t)sns, sizeof (*sns)); + sns->sns_len = sizeof(*sns); + sns->sns_family = AF_NS; + sns->sns_addr = nsp->nsp_faddr; +} + +/* + * Pass some notification to all connections of a protocol + * associated with address dst. Call the + * protocol specific routine to handle each connection. + * Also pass an extra paramter via the nspcb. (which may in fact + * be a parameter list!) + */ +ns_pcbnotify(dst, errno, notify, param) + register struct ns_addr *dst; + long param; + int errno, (*notify)(); +{ + register struct nspcb *nsp, *oinp; + int s = splimp(); + + for (nsp = (&nspcb)->nsp_next; nsp != (&nspcb);) { + if (!ns_hosteq(*dst,nsp->nsp_faddr)) { + next: + nsp = nsp->nsp_next; + continue; + } + if (nsp->nsp_socket == 0) + goto next; + if (errno) + nsp->nsp_socket->so_error = errno; + oinp = nsp; + nsp = nsp->nsp_next; + oinp->nsp_notify_param = param; + (*notify)(oinp); + } + splx(s); +} + +#ifdef notdef +/* + * After a routing change, flush old routing + * and allocate a (hopefully) better one. + */ +ns_rtchange(nsp) + struct nspcb *nsp; +{ + if (nsp->nsp_route.ro_rt) { + rtfree(nsp->nsp_route.ro_rt); + nsp->nsp_route.ro_rt = 0; + /* + * A new route can be allocated the next time + * output is attempted. + */ + } + /* SHOULD NOTIFY HIGHER-LEVEL PROTOCOLS */ +} +#endif + +struct nspcb * +ns_pcblookup(faddr, lport, wildp) + struct ns_addr *faddr; + u_short lport; +{ + register struct nspcb *nsp, *match = 0; + int matchwild = 3, wildcard; + u_short fport; + + fport = faddr->x_port; + for (nsp = (&nspcb)->nsp_next; nsp != (&nspcb); nsp = nsp->nsp_next) { + if (nsp->nsp_lport != lport) + continue; + wildcard = 0; + if (ns_nullhost(nsp->nsp_faddr)) { + if (!ns_nullhost(*faddr)) + wildcard++; + } else { + if (ns_nullhost(*faddr)) + wildcard++; + else { + if (!ns_hosteq(nsp->nsp_faddr, *faddr)) + continue; + if (nsp->nsp_fport != fport) { + if (nsp->nsp_fport != 0) + continue; + else + wildcard++; + } + } + } + if (wildcard && wildp==0) + continue; + if (wildcard < matchwild) { + match = nsp; + matchwild = wildcard; + if (wildcard == 0) + break; + } + } + return (match); +} diff --git a/bsd/netns/ns_pcb.h b/bsd/netns/ns_pcb.h new file mode 100644 index 000000000..47b2d62e6 --- /dev/null +++ b/bsd/netns/ns_pcb.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_pcb.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Ns protocol interface control block. + */ +struct nspcb { + struct nspcb *nsp_next; /* doubly linked list */ + struct nspcb *nsp_prev; + struct nspcb *nsp_head; + struct socket *nsp_socket; /* back pointer to socket */ + struct ns_addr nsp_faddr; /* destination address */ + struct ns_addr nsp_laddr; /* socket's address */ + caddr_t nsp_pcb; /* protocol specific stuff */ + struct route nsp_route; /* routing information */ + struct ns_addr nsp_lastdst; /* validate cached route for dg socks*/ + long nsp_notify_param; /* extra info passed via ns_pcbnotify*/ + short nsp_flags; + u_char nsp_dpt; /* default packet type for idp_output*/ + u_char nsp_rpt; /* last received packet type by + idp_input() */ +}; + +/* possible flags */ + +#define NSP_IN_ABORT 0x1 /* calling abort through socket */ +#define NSP_RAWIN 0x2 /* show headers on input */ +#define NSP_RAWOUT 0x4 /* show header on output */ +#define NSP_ALL_PACKETS 0x8 /* Turn off higher proto processing */ + +#define NS_WILDCARD 1 + +#define nsp_lport nsp_laddr.x_port +#define nsp_fport nsp_faddr.x_port + +#define sotonspcb(so) ((struct nspcb *)((so)->so_pcb)) + +/* + * Nominal space allocated to a ns socket. + */ +#define NSSNDQ 2048 +#define NSRCVQ 2048 + + +#ifdef KERNEL +struct nspcb nspcb; /* head of list */ +struct nspcb *ns_pcblookup(); +#endif diff --git a/bsd/netns/ns_proto.c b/bsd/netns/ns_proto.c new file mode 100644 index 000000000..04068d49c --- /dev/null +++ b/bsd/netns/ns_proto.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ns_proto.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include + +#include + +#include + +/* + * NS protocol family: IDP, ERR, PE, SPP, ROUTE. + */ +int ns_init(); +int idp_input(), idp_output(), idp_ctlinput(), idp_usrreq(); +int idp_raw_usrreq(), idp_ctloutput(); +int spp_input(), spp_ctlinput(); +int spp_usrreq(), spp_usrreq_sp(), spp_ctloutput(); +int spp_init(), spp_fasttimo(), spp_slowtimo(); +extern int raw_usrreq(); + +extern struct domain nsdomain; + +struct protosw nssw[] = { +{ 0, &nsdomain, 0, 0, + 0, idp_output, 0, 0, + 0, + ns_init, 0, 0, 0, +}, +{ SOCK_DGRAM, &nsdomain, 0, PR_ATOMIC|PR_ADDR, + 0, 0, idp_ctlinput, idp_ctloutput, + idp_usrreq, + 0, 0, 0, 0, +}, +{ SOCK_STREAM, &nsdomain, NSPROTO_SPP, PR_CONNREQUIRED|PR_WANTRCVD, + spp_input, 0, spp_ctlinput, spp_ctloutput, + spp_usrreq, + spp_init, spp_fasttimo, spp_slowtimo, 0, +}, +{ SOCK_SEQPACKET,&nsdomain, NSPROTO_SPP, PR_CONNREQUIRED|PR_WANTRCVD|PR_ATOMIC, + spp_input, 0, spp_ctlinput, spp_ctloutput, + spp_usrreq_sp, + 0, 0, 0, 0, +}, +{ SOCK_RAW, &nsdomain, NSPROTO_RAW, PR_ATOMIC|PR_ADDR, + idp_input, idp_output, 0, idp_ctloutput, + idp_raw_usrreq, + 0, 0, 0, 0, +}, +{ SOCK_RAW, &nsdomain, NSPROTO_ERROR, PR_ATOMIC|PR_ADDR, + idp_ctlinput, idp_output, 0, idp_ctloutput, + idp_raw_usrreq, + 0, 0, 0, 0, +}, +}; + +#if 0 +/*need to look at other init functions, use net_add_proto() to assure + things are init'd properly*/ +LINK_PROTOS(nssw); +#endif + +struct domain nsdomain = + { AF_NS, "network systems", link_nssw_protos, 0, 0, + nssw, 0, + rn_inithead, 16, sizeof(struct sockaddr_ns)}; + diff --git a/bsd/netns/sp.h b/bsd/netns/sp.h new file mode 100644 index 000000000..e756f5fc8 --- /dev/null +++ b/bsd/netns/sp.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)sp.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Definitions for Xerox NS style sequenced packet protocol + */ + +struct sphdr { + u_char sp_cc; /* connection control */ + u_char sp_dt; /* datastream type */ +#define SP_SP 0x80 /* system packet */ +#define SP_SA 0x40 /* send acknowledgement */ +#define SP_OB 0x20 /* attention (out of band data) */ +#define SP_EM 0x10 /* end of message */ + u_short sp_sid; /* source connection identifier */ + u_short sp_did; /* destination connection identifier */ + u_short sp_seq; /* sequence number */ + u_short sp_ack; /* acknowledge number */ + u_short sp_alo; /* allocation number */ +}; diff --git a/bsd/netns/spidp.h b/bsd/netns/spidp.h new file mode 100644 index 000000000..8967590a9 --- /dev/null +++ b/bsd/netns/spidp.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)spidp.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Definitions for NS(tm) Internet Datagram Protocol + * containing a Sequenced Packet Protocol packet. + */ +struct spidp { + struct idp si_i; + struct sphdr si_s; +}; +struct spidp_q { + struct spidp_q *si_next; + struct spidp_q *si_prev; +}; +#define SI(x) ((struct spidp *)x) +#define si_sum si_i.idp_sum +#define si_len si_i.idp_len +#define si_tc si_i.idp_tc +#define si_pt si_i.idp_pt +#define si_dna si_i.idp_dna +#define si_sna si_i.idp_sna +#define si_sport si_i.idp_sna.x_port +#define si_cc si_s.sp_cc +#define si_dt si_s.sp_dt +#define si_sid si_s.sp_sid +#define si_did si_s.sp_did +#define si_seq si_s.sp_seq +#define si_ack si_s.sp_ack +#define si_alo si_s.sp_alo diff --git a/bsd/netns/spp_debug.c b/bsd/netns/spp_debug.c new file mode 100644 index 000000000..52e025464 --- /dev/null +++ b/bsd/netns/spp_debug.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)spp_debug.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#define SPPTIMERS +#include +#include +#define SANAMES +#include + +int sppconsdebug = 0; +/* + * spp debug routines + */ +spp_trace(act, ostate, sp, si, req) + short act; + u_char ostate; + struct sppcb *sp; + struct spidp *si; + int req; +{ +#ifdef INET +#ifdef TCPDEBUG + u_short seq, ack, len, alo; + unsigned long iptime(); + int flags; + struct spp_debug *sd = &spp_debug[spp_debx++]; + extern char *prurequests[]; + extern char *sanames[]; + extern char *tcpstates[]; + extern char *spptimers[]; + + if (spp_debx == SPP_NDEBUG) + spp_debx = 0; + sd->sd_time = iptime(); + sd->sd_act = act; + sd->sd_ostate = ostate; + sd->sd_cb = (caddr_t)sp; + if (sp) + sd->sd_sp = *sp; + else + bzero((caddr_t)&sd->sd_sp, sizeof (*sp)); + if (si) + sd->sd_si = *si; + else + bzero((caddr_t)&sd->sd_si, sizeof (*si)); + sd->sd_req = req; + if (sppconsdebug == 0) + return; + if (ostate >= TCP_NSTATES) ostate = 0; + if (act >= SA_DROP) act = SA_DROP; + if (sp) + printf("%x %s:", sp, tcpstates[ostate]); + else + printf("???????? "); + printf("%s ", sanames[act]); + switch (act) { + + case SA_RESPOND: + case SA_INPUT: + case SA_OUTPUT: + case SA_DROP: + if (si == 0) + break; + seq = si->si_seq; + ack = si->si_ack; + alo = si->si_alo; + len = si->si_len; + if (act == SA_OUTPUT) { + seq = ntohs(seq); + ack = ntohs(ack); + alo = ntohs(alo); + len = ntohs(len); + } +#ifndef lint +#define p1(f) { printf("%s = %x, ", "f", f); } + p1(seq); p1(ack); p1(alo); p1(len); +#endif + flags = si->si_cc; + if (flags) { + char *cp = "<"; +#ifndef lint +#define pf(f) { if (flags&SP_/**/f) { printf("%s%s", cp, "f"); cp = ","; } } + pf(SP); pf(SA); pf(OB); pf(EM); +#else + cp = cp; +#endif + printf(">"); + } +#ifndef lint +#define p2(f) { printf("%s = %x, ", "f", si->si_/**/f); } + p2(sid);p2(did);p2(dt);p2(pt); +#endif + ns_printhost(&si->si_sna); + ns_printhost(&si->si_dna); + + if (act==SA_RESPOND) { + printf("idp_len = %x, ", + ((struct idp *)si)->idp_len); + } + break; + + case SA_USER: + printf("%s", prurequests[req&0xff]); + if ((req & 0xff) == PRU_SLOWTIMO) + printf("<%s>", spptimers[req>>8]); + break; + } + if (sp) + printf(" -> %s", tcpstates[sp->s_state]); + /* print out internal state of sp !?! */ + printf("\n"); + if (sp == 0) + return; +#ifndef lint +#define p3(f) { printf("%s = %x, ", "f", sp->s_/**/f); } + printf("\t"); p3(rack);p3(ralo);p3(smax);p3(flags); printf("\n"); +#endif +#endif +#endif +} diff --git a/bsd/netns/spp_debug.h b/bsd/netns/spp_debug.h new file mode 100644 index 000000000..8e665c622 --- /dev/null +++ b/bsd/netns/spp_debug.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)spp_debug.h 8.1 (Berkeley) 6/10/93 + */ + +struct spp_debug { + u_long sd_time; + short sd_act; + short sd_ostate; + caddr_t sd_cb; + short sd_req; + struct spidp sd_si; + struct sppcb sd_sp; +}; + +#define SA_INPUT 0 +#define SA_OUTPUT 1 +#define SA_USER 2 +#define SA_RESPOND 3 +#define SA_DROP 4 + +#ifdef SANAMES +char *sanames[] = + { "input", "output", "user", "respond", "drop" }; +#endif + +#define SPP_NDEBUG 100 +struct spp_debug spp_debug[SPP_NDEBUG]; +int spp_debx; diff --git a/bsd/netns/spp_timer.h b/bsd/netns/spp_timer.h new file mode 100644 index 000000000..85c562f4f --- /dev/null +++ b/bsd/netns/spp_timer.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)spp_timer.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Definitions of the SPP timers. These timers are counted + * down PR_SLOWHZ times a second. + */ +#define SPPT_NTIMERS 4 + +#define SPPT_REXMT 0 /* retransmit */ +#define SPPT_PERSIST 1 /* retransmit persistance */ +#define SPPT_KEEP 2 /* keep alive */ +#define SPPT_2MSL 3 /* 2*msl quiet time timer */ + +/* + * The SPPT_REXMT timer is used to force retransmissions. + * The SPP has the SPPT_REXMT timer set whenever segments + * have been sent for which ACKs are expected but not yet + * received. If an ACK is received which advances tp->snd_una, + * then the retransmit timer is cleared (if there are no more + * outstanding segments) or reset to the base value (if there + * are more ACKs expected). Whenever the retransmit timer goes off, + * we retransmit one unacknowledged segment, and do a backoff + * on the retransmit timer. + * + * The SPPT_PERSIST timer is used to keep window size information + * flowing even if the window goes shut. If all previous transmissions + * have been acknowledged (so that there are no retransmissions in progress), + * and the window is too small to bother sending anything, then we start + * the SPPT_PERSIST timer. When it expires, if the window is nonzero, + * we go to transmit state. Otherwise, at intervals send a single byte + * into the peer's window to force him to update our window information. + * We do this at most as often as SPPT_PERSMIN time intervals, + * but no more frequently than the current estimate of round-trip + * packet time. The SPPT_PERSIST timer is cleared whenever we receive + * a window update from the peer. + * + * The SPPT_KEEP timer is used to keep connections alive. If an + * connection is idle (no segments received) for SPPTV_KEEP amount of time, + * but not yet established, then we drop the connection. If the connection + * is established, then we force the peer to send us a segment by sending: + * + * This segment is (deliberately) outside the window, and should elicit + * an ack segment in response from the peer. If, despite the SPPT_KEEP + * initiated segments we cannot elicit a response from a peer in SPPT_MAXIDLE + * amount of time, then we drop the connection. + */ + +#define SPP_TTL 30 /* default time to live for SPP segs */ +/* + * Time constants. + */ +#define SPPTV_MSL ( 15*PR_SLOWHZ) /* max seg lifetime */ +#define SPPTV_SRTTBASE 0 /* base roundtrip time; + if 0, no idea yet */ +#define SPPTV_SRTTDFLT ( 3*PR_SLOWHZ) /* assumed RTT if no info */ + +#define SPPTV_PERSMIN ( 5*PR_SLOWHZ) /* retransmit persistance */ +#define SPPTV_PERSMAX ( 60*PR_SLOWHZ) /* maximum persist interval */ + +#define SPPTV_KEEP ( 75*PR_SLOWHZ) /* keep alive - 75 secs */ +#define SPPTV_MAXIDLE ( 8*SPPTV_KEEP) /* maximum allowable idle + time before drop conn */ + +#define SPPTV_MIN ( 1*PR_SLOWHZ) /* minimum allowable value */ +#define SPPTV_REXMTMAX ( 64*PR_SLOWHZ) /* max allowable REXMT value */ + +#define SPP_LINGERTIME 120 /* linger at most 2 minutes */ + +#define SPP_MAXRXTSHIFT 12 /* maximum retransmits */ + +#ifdef SPPTIMERS +char *spptimers[] = + { "REXMT", "PERSIST", "KEEP", "2MSL" }; +#endif + +/* + * Force a time value to be in a certain range. + */ +#define SPPT_RANGESET(tv, value, tvmin, tvmax) { \ + (tv) = (value); \ + if ((tv) < (tvmin)) \ + (tv) = (tvmin); \ + else if ((tv) > (tvmax)) \ + (tv) = (tvmax); \ +} + +#ifdef KERNEL +extern int spp_backoff[]; +#endif diff --git a/bsd/netns/spp_usrreq.c b/bsd/netns/spp_usrreq.c new file mode 100644 index 000000000..2178f9a19 --- /dev/null +++ b/bsd/netns/spp_usrreq.c @@ -0,0 +1,1827 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)spp_usrreq.c 8.1 (Berkeley) 6/10/93 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * SP protocol implementation. + */ +spp_init() +{ + + spp_iss = 1; /* WRONG !! should fish it out of TODR */ +} +struct spidp spp_savesi; +int traceallspps = 0; +extern int sppconsdebug; +int spp_hardnosed; +int spp_use_delack = 0; +u_short spp_newchecks[50]; + +/*ARGSUSED*/ +spp_input(m, nsp) + register struct mbuf *m; + register struct nspcb *nsp; +{ + register struct sppcb *cb; + register struct spidp *si = mtod(m, struct spidp *); + register struct socket *so; + short ostate; + int dropsocket = 0; + + + sppstat.spps_rcvtotal++; + if (nsp == 0) { + panic("No nspcb in spp_input\n"); + return; + } + + cb = nstosppcb(nsp); + if (cb == 0) goto bad; + + if (m->m_len < sizeof(*si)) { + if ((m = m_pullup(m, sizeof(*si))) == 0) { + sppstat.spps_rcvshort++; + return; + } + si = mtod(m, struct spidp *); + } + si->si_seq = ntohs(si->si_seq); + si->si_ack = ntohs(si->si_ack); + si->si_alo = ntohs(si->si_alo); + + so = nsp->nsp_socket; + if (so->so_options & SO_DEBUG || traceallspps) { + ostate = cb->s_state; + spp_savesi = *si; + } + if (so->so_options & SO_ACCEPTCONN) { + struct sppcb *ocb = cb; + + so = sonewconn(so, 0); + if (so == 0) { + goto drop; + } + /* + * This is ugly, but .... + * + * Mark socket as temporary until we're + * committed to keeping it. The code at + * ``drop'' and ``dropwithreset'' check the + * flag dropsocket to see if the temporary + * socket created here should be discarded. + * We mark the socket as discardable until + * we're committed to it below in TCPS_LISTEN. + */ + dropsocket++; + nsp = (struct nspcb *)so->so_pcb; + nsp->nsp_laddr = si->si_dna; + cb = nstosppcb(nsp); + cb->s_mtu = ocb->s_mtu; /* preserve sockopts */ + cb->s_flags = ocb->s_flags; /* preserve sockopts */ + cb->s_flags2 = ocb->s_flags2; /* preserve sockopts */ + cb->s_state = TCPS_LISTEN; + } + + /* + * Packet received on connection. + * reset idle time and keep-alive timer; + */ + cb->s_idle = 0; + cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; + + switch (cb->s_state) { + + case TCPS_LISTEN:{ + struct mbuf *am; + register struct sockaddr_ns *sns; + struct ns_addr laddr; + + /* + * If somebody here was carying on a conversation + * and went away, and his pen pal thinks he can + * still talk, we get the misdirected packet. + */ + if (spp_hardnosed && (si->si_did != 0 || si->si_seq != 0)) { + spp_istat.gonawy++; + goto dropwithreset; + } + am = m_get(M_DONTWAIT, MT_SONAME); + if (am == NULL) + goto drop; + am->m_len = sizeof (struct sockaddr_ns); + sns = mtod(am, struct sockaddr_ns *); + sns->sns_len = sizeof(*sns); + sns->sns_family = AF_NS; + sns->sns_addr = si->si_sna; + laddr = nsp->nsp_laddr; + if (ns_nullhost(laddr)) + nsp->nsp_laddr = si->si_dna; + if (ns_pcbconnect(nsp, am)) { + nsp->nsp_laddr = laddr; + (void) m_free(am); + spp_istat.noconn++; + goto drop; + } + (void) m_free(am); + spp_template(cb); + dropsocket = 0; /* committed to socket */ + cb->s_did = si->si_sid; + cb->s_rack = si->si_ack; + cb->s_ralo = si->si_alo; +#define THREEWAYSHAKE +#ifdef THREEWAYSHAKE + cb->s_state = TCPS_SYN_RECEIVED; + cb->s_force = 1 + SPPT_KEEP; + sppstat.spps_accepts++; + cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; + } + break; + /* + * This state means that we have heard a response + * to our acceptance of their connection + * It is probably logically unnecessary in this + * implementation. + */ + case TCPS_SYN_RECEIVED: { + if (si->si_did!=cb->s_sid) { + spp_istat.wrncon++; + goto drop; + } +#endif + nsp->nsp_fport = si->si_sport; + cb->s_timer[SPPT_REXMT] = 0; + cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; + soisconnected(so); + cb->s_state = TCPS_ESTABLISHED; + sppstat.spps_accepts++; + } + break; + + /* + * This state means that we have gotten a response + * to our attempt to establish a connection. + * We fill in the data from the other side, + * telling us which port to respond to, instead of the well- + * known one we might have sent to in the first place. + * We also require that this is a response to our + * connection id. + */ + case TCPS_SYN_SENT: + if (si->si_did!=cb->s_sid) { + spp_istat.notme++; + goto drop; + } + sppstat.spps_connects++; + cb->s_did = si->si_sid; + cb->s_rack = si->si_ack; + cb->s_ralo = si->si_alo; + cb->s_dport = nsp->nsp_fport = si->si_sport; + cb->s_timer[SPPT_REXMT] = 0; + cb->s_flags |= SF_ACKNOW; + soisconnected(so); + cb->s_state = TCPS_ESTABLISHED; + /* Use roundtrip time of connection request for initial rtt */ + if (cb->s_rtt) { + cb->s_srtt = cb->s_rtt << 3; + cb->s_rttvar = cb->s_rtt << 1; + SPPT_RANGESET(cb->s_rxtcur, + ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, + SPPTV_MIN, SPPTV_REXMTMAX); + cb->s_rtt = 0; + } + } + if (so->so_options & SO_DEBUG || traceallspps) + spp_trace(SA_INPUT, (u_char)ostate, cb, &spp_savesi, 0); + + m->m_len -= sizeof (struct idp); + m->m_pkthdr.len -= sizeof (struct idp); + m->m_data += sizeof (struct idp); + + if (spp_reass(cb, si)) { + (void) m_freem(m); + } + if (cb->s_force || (cb->s_flags & (SF_ACKNOW|SF_WIN|SF_RXT))) + (void) spp_output(cb, (struct mbuf *)0); + cb->s_flags &= ~(SF_WIN|SF_RXT); + return; + +dropwithreset: + if (dropsocket) + (void) soabort(so); + si->si_seq = ntohs(si->si_seq); + si->si_ack = ntohs(si->si_ack); + si->si_alo = ntohs(si->si_alo); + ns_error(dtom(si), NS_ERR_NOSOCK, 0); + if (cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || traceallspps) + spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); + return; + +drop: +bad: + if (cb == 0 || cb->s_nspcb->nsp_socket->so_options & SO_DEBUG || + traceallspps) + spp_trace(SA_DROP, (u_char)ostate, cb, &spp_savesi, 0); + m_freem(m); +} + +int spprexmtthresh = 3; + +/* + * This is structurally similar to the tcp reassembly routine + * but its function is somewhat different: It merely queues + * packets up, and suppresses duplicates. + */ +spp_reass(cb, si) +register struct sppcb *cb; +register struct spidp *si; +{ + register struct spidp_q *q; + register struct mbuf *m; + register struct socket *so = cb->s_nspcb->nsp_socket; + char packetp = cb->s_flags & SF_HI; + int incr; + char wakeup = 0; + + if (si == SI(0)) + goto present; + /* + * Update our news from them. + */ + if (si->si_cc & SP_SA) + cb->s_flags |= (spp_use_delack ? SF_DELACK : SF_ACKNOW); + if (SSEQ_GT(si->si_alo, cb->s_ralo)) + cb->s_flags |= SF_WIN; + if (SSEQ_LEQ(si->si_ack, cb->s_rack)) { + if ((si->si_cc & SP_SP) && cb->s_rack != (cb->s_smax + 1)) { + sppstat.spps_rcvdupack++; + /* + * If this is a completely duplicate ack + * and other conditions hold, we assume + * a packet has been dropped and retransmit + * it exactly as in tcp_input(). + */ + if (si->si_ack != cb->s_rack || + si->si_alo != cb->s_ralo) + cb->s_dupacks = 0; + else if (++cb->s_dupacks == spprexmtthresh) { + u_short onxt = cb->s_snxt; + int cwnd = cb->s_cwnd; + + cb->s_snxt = si->si_ack; + cb->s_cwnd = CUNIT; + cb->s_force = 1 + SPPT_REXMT; + (void) spp_output(cb, (struct mbuf *)0); + cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; + cb->s_rtt = 0; + if (cwnd >= 4 * CUNIT) + cb->s_cwnd = cwnd / 2; + if (SSEQ_GT(onxt, cb->s_snxt)) + cb->s_snxt = onxt; + return (1); + } + } else + cb->s_dupacks = 0; + goto update_window; + } + cb->s_dupacks = 0; + /* + * If our correspondent acknowledges data we haven't sent + * TCP would drop the packet after acking. We'll be a little + * more permissive + */ + if (SSEQ_GT(si->si_ack, (cb->s_smax + 1))) { + sppstat.spps_rcvacktoomuch++; + si->si_ack = cb->s_smax + 1; + } + sppstat.spps_rcvackpack++; + /* + * If transmit timer is running and timed sequence + * number was acked, update smoothed round trip time. + * See discussion of algorithm in tcp_input.c + */ + if (cb->s_rtt && SSEQ_GT(si->si_ack, cb->s_rtseq)) { + sppstat.spps_rttupdated++; + if (cb->s_srtt != 0) { + register short delta; + delta = cb->s_rtt - (cb->s_srtt >> 3); + if ((cb->s_srtt += delta) <= 0) + cb->s_srtt = 1; + if (delta < 0) + delta = -delta; + delta -= (cb->s_rttvar >> 2); + if ((cb->s_rttvar += delta) <= 0) + cb->s_rttvar = 1; + } else { + /* + * No rtt measurement yet + */ + cb->s_srtt = cb->s_rtt << 3; + cb->s_rttvar = cb->s_rtt << 1; + } + cb->s_rtt = 0; + cb->s_rxtshift = 0; + SPPT_RANGESET(cb->s_rxtcur, + ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1, + SPPTV_MIN, SPPTV_REXMTMAX); + } + /* + * If all outstanding data is acked, stop retransmit + * timer and remember to restart (more output or persist). + * If there is more data to be acked, restart retransmit + * timer, using current (possibly backed-off) value; + */ + if (si->si_ack == cb->s_smax + 1) { + cb->s_timer[SPPT_REXMT] = 0; + cb->s_flags |= SF_RXT; + } else if (cb->s_timer[SPPT_PERSIST] == 0) + cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; + /* + * When new data is acked, open the congestion window. + * If the window gives us less than ssthresh packets + * in flight, open exponentially (maxseg at a time). + * Otherwise open linearly (maxseg^2 / cwnd at a time). + */ + incr = CUNIT; + if (cb->s_cwnd > cb->s_ssthresh) + incr = max(incr * incr / cb->s_cwnd, 1); + cb->s_cwnd = min(cb->s_cwnd + incr, cb->s_cwmx); + /* + * Trim Acked data from output queue. + */ + while ((m = so->so_snd.sb_mb) != NULL) { + if (SSEQ_LT((mtod(m, struct spidp *))->si_seq, si->si_ack)) + sbdroprecord(&so->so_snd); + else + break; + } + sowwakeup(so); + cb->s_rack = si->si_ack; +update_window: + if (SSEQ_LT(cb->s_snxt, cb->s_rack)) + cb->s_snxt = cb->s_rack; + if (SSEQ_LT(cb->s_swl1, si->si_seq) || cb->s_swl1 == si->si_seq && + (SSEQ_LT(cb->s_swl2, si->si_ack) || + cb->s_swl2 == si->si_ack && SSEQ_LT(cb->s_ralo, si->si_alo))) { + /* keep track of pure window updates */ + if ((si->si_cc & SP_SP) && cb->s_swl2 == si->si_ack + && SSEQ_LT(cb->s_ralo, si->si_alo)) { + sppstat.spps_rcvwinupd++; + sppstat.spps_rcvdupack--; + } + cb->s_ralo = si->si_alo; + cb->s_swl1 = si->si_seq; + cb->s_swl2 = si->si_ack; + cb->s_swnd = (1 + si->si_alo - si->si_ack); + if (cb->s_swnd > cb->s_smxw) + cb->s_smxw = cb->s_swnd; + cb->s_flags |= SF_WIN; + } + /* + * If this packet number is higher than that which + * we have allocated refuse it, unless urgent + */ + if (SSEQ_GT(si->si_seq, cb->s_alo)) { + if (si->si_cc & SP_SP) { + sppstat.spps_rcvwinprobe++; + return (1); + } else + sppstat.spps_rcvpackafterwin++; + if (si->si_cc & SP_OB) { + if (SSEQ_GT(si->si_seq, cb->s_alo + 60)) { + ns_error(dtom(si), NS_ERR_FULLUP, 0); + return (0); + } /* else queue this packet; */ + } else { + /*register struct socket *so = cb->s_nspcb->nsp_socket; + if (so->so_state && SS_NOFDREF) { + ns_error(dtom(si), NS_ERR_NOSOCK, 0); + (void)spp_close(cb); + } else + would crash system*/ + spp_istat.notyet++; + ns_error(dtom(si), NS_ERR_FULLUP, 0); + return (0); + } + } + /* + * If this is a system packet, we don't need to + * queue it up, and won't update acknowledge # + */ + if (si->si_cc & SP_SP) { + return (1); + } + /* + * We have already seen this packet, so drop. + */ + if (SSEQ_LT(si->si_seq, cb->s_ack)) { + spp_istat.bdreas++; + sppstat.spps_rcvduppack++; + if (si->si_seq == cb->s_ack - 1) + spp_istat.lstdup++; + return (1); + } + /* + * Loop through all packets queued up to insert in + * appropriate sequence. + */ + for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { + if (si->si_seq == SI(q)->si_seq) { + sppstat.spps_rcvduppack++; + return (1); + } + if (SSEQ_LT(si->si_seq, SI(q)->si_seq)) { + sppstat.spps_rcvoopack++; + break; + } + } + insque(si, q->si_prev); + /* + * If this packet is urgent, inform process + */ + if (si->si_cc & SP_OB) { + cb->s_iobc = ((char *)si)[1 + sizeof(*si)]; + sohasoutofband(so); + cb->s_oobflags |= SF_IOOB; + } +present: +#define SPINC sizeof(struct sphdr) + /* + * Loop through all packets queued up to update acknowledge + * number, and present all acknowledged data to user; + * If in packet interface mode, show packet headers. + */ + for (q = cb->s_q.si_next; q!=&cb->s_q; q = q->si_next) { + if (SI(q)->si_seq == cb->s_ack) { + cb->s_ack++; + m = dtom(q); + if (SI(q)->si_cc & SP_OB) { + cb->s_oobflags &= ~SF_IOOB; + if (so->so_rcv.sb_cc) + so->so_oobmark = so->so_rcv.sb_cc; + else + so->so_state |= SS_RCVATMARK; + postevent(so, 0, EV_OOB); + } + q = q->si_prev; + remque(q->si_next); + wakeup = 1; + sppstat.spps_rcvpack++; +#ifdef SF_NEWCALL + if (cb->s_flags2 & SF_NEWCALL) { + struct sphdr *sp = mtod(m, struct sphdr *); + u_char dt = sp->sp_dt; + spp_newchecks[4]++; + if (dt != cb->s_rhdr.sp_dt) { + struct mbuf *mm = + m_getclr(M_DONTWAIT, MT_CONTROL); + spp_newchecks[0]++; + if (mm != NULL) { + u_short *s = + mtod(mm, u_short *); + cb->s_rhdr.sp_dt = dt; + mm->m_len = 5; /*XXX*/ + s[0] = 5; + s[1] = 1; + *(u_char *)(&s[2]) = dt; + sbappend(&so->so_rcv, mm); + } + } + if (sp->sp_cc & SP_OB) { + MCHTYPE(m, MT_OOBDATA); + spp_newchecks[1]++; + so->so_oobmark = 0; + so->so_state &= ~SS_RCVATMARK; + } + if (packetp == 0) { + m->m_data += SPINC; + m->m_len -= SPINC; + m->m_pkthdr.len -= SPINC; + } + if ((sp->sp_cc & SP_EM) || packetp) { + sbappendrecord(&so->so_rcv, m); + spp_newchecks[9]++; + } else + sbappend(&so->so_rcv, m); + } else +#endif + if (packetp) { + sbappendrecord(&so->so_rcv, m); + } else { + cb->s_rhdr = *mtod(m, struct sphdr *); + m->m_data += SPINC; + m->m_len -= SPINC; + m->m_pkthdr.len -= SPINC; + sbappend(&so->so_rcv, m); + } + } else + break; + } + if (wakeup) sorwakeup(so); + return (0); +} + +spp_ctlinput(cmd, arg) + int cmd; + caddr_t arg; +{ + struct ns_addr *na; + extern u_char nsctlerrmap[]; + extern spp_abort(), spp_quench(); + extern struct nspcb *idp_drop(); + struct ns_errp *errp; + struct nspcb *nsp; + struct sockaddr_ns *sns; + int type; + + if (cmd < 0 || cmd > PRC_NCMDS) + return; + type = NS_ERR_UNREACH_HOST; + + switch (cmd) { + + case PRC_ROUTEDEAD: + return; + + case PRC_IFDOWN: + case PRC_HOSTDEAD: + case PRC_HOSTUNREACH: + sns = (struct sockaddr_ns *)arg; + if (sns->sns_family != AF_NS) + return; + na = &sns->sns_addr; + break; + + default: + errp = (struct ns_errp *)arg; + na = &errp->ns_err_idp.idp_dna; + type = errp->ns_err_num; + type = ntohs((u_short)type); + } + switch (type) { + + case NS_ERR_UNREACH_HOST: + ns_pcbnotify(na, (int)nsctlerrmap[cmd], spp_abort, (long) 0); + break; + + case NS_ERR_TOO_BIG: + case NS_ERR_NOSOCK: + nsp = ns_pcblookup(na, errp->ns_err_idp.idp_sna.x_port, + NS_WILDCARD); + if (nsp) { + if(nsp->nsp_pcb) + (void) spp_drop((struct sppcb *)nsp->nsp_pcb, + (int)nsctlerrmap[cmd]); + else + (void) idp_drop(nsp, (int)nsctlerrmap[cmd]); + } + break; + + case NS_ERR_FULLUP: + ns_pcbnotify(na, 0, spp_quench, (long) 0); + } +} +/* + * When a source quench is received, close congestion window + * to one packet. We will gradually open it again as we proceed. + */ +spp_quench(nsp) + struct nspcb *nsp; +{ + struct sppcb *cb = nstosppcb(nsp); + + if (cb) + cb->s_cwnd = CUNIT; +} + +#ifdef notdef +int +spp_fixmtu(nsp) +register struct nspcb *nsp; +{ + register struct sppcb *cb = (struct sppcb *)(nsp->nsp_pcb); + register struct mbuf *m; + register struct spidp *si; + struct ns_errp *ep; + struct sockbuf *sb; + int badseq, len; + struct mbuf *firstbad, *m0; + + if (cb) { + /* + * The notification that we have sent + * too much is bad news -- we will + * have to go through queued up so far + * splitting ones which are too big and + * reassigning sequence numbers and checksums. + * we should then retransmit all packets from + * one above the offending packet to the last one + * we had sent (or our allocation) + * then the offending one so that the any queued + * data at our destination will be discarded. + */ + ep = (struct ns_errp *)nsp->nsp_notify_param; + sb = &nsp->nsp_socket->so_snd; + cb->s_mtu = ep->ns_err_param; + badseq = SI(&ep->ns_err_idp)->si_seq; + for (m = sb->sb_mb; m; m = m->m_act) { + si = mtod(m, struct spidp *); + if (si->si_seq == badseq) + break; + } + if (m == 0) return; + firstbad = m; + /*for (;;) {*/ + /* calculate length */ + for (m0 = m, len = 0; m ; m = m->m_next) + len += m->m_len; + if (len > cb->s_mtu) { + } + /* FINISH THIS + } */ + } +} +#endif + +spp_output(cb, m0) + register struct sppcb *cb; + struct mbuf *m0; +{ + struct socket *so = cb->s_nspcb->nsp_socket; + register struct mbuf *m; + register struct spidp *si = (struct spidp *) 0; + register struct sockbuf *sb = &so->so_snd; + int len = 0, win, rcv_win; + short span, off, recordp = 0; + u_short alo; + int error = 0, sendalot; +#ifdef notdef + int idle; +#endif + struct mbuf *mprev; + extern int idpcksum; + + if (m0) { + int mtu = cb->s_mtu; + int datalen; + /* + * Make sure that packet isn't too big. + */ + for (m = m0; m ; m = m->m_next) { + mprev = m; + len += m->m_len; + if (m->m_flags & M_EOR) + recordp = 1; + } + datalen = (cb->s_flags & SF_HO) ? + len - sizeof (struct sphdr) : len; + if (datalen > mtu) { + if (cb->s_flags & SF_PI) { + m_freem(m0); + return (EMSGSIZE); + } else { + int oldEM = cb->s_cc & SP_EM; + + cb->s_cc &= ~SP_EM; + while (len > mtu) { + /* + * Here we are only being called + * from usrreq(), so it is OK to + * block. + */ + m = m_copym(m0, 0, mtu, M_WAIT); + if (cb->s_flags & SF_NEWCALL) { + struct mbuf *mm = m; + spp_newchecks[7]++; + while (mm) { + mm->m_flags &= ~M_EOR; + mm = mm->m_next; + } + } + error = spp_output(cb, m); + if (error) { + cb->s_cc |= oldEM; + m_freem(m0); + return(error); + } + m_adj(m0, mtu); + len -= mtu; + } + cb->s_cc |= oldEM; + } + } + /* + * Force length even, by adding a "garbage byte" if + * necessary. + */ + if (len & 1) { + m = mprev; + if (M_TRAILINGSPACE(m) >= 1) + m->m_len++; + else { + struct mbuf *m1 = m_get(M_DONTWAIT, MT_DATA); + + if (m1 == 0) { + m_freem(m0); + return (ENOBUFS); + } + m1->m_len = 1; + *(mtod(m1, u_char *)) = 0; + m->m_next = m1; + } + } + m = m_gethdr(M_DONTWAIT, MT_HEADER); + if (m == 0) { + m_freem(m0); + return (ENOBUFS); + } + /* + * Fill in mbuf with extended SP header + * and addresses and length put into network format. + */ + MH_ALIGN(m, sizeof (struct spidp)); + m->m_len = sizeof (struct spidp); + m->m_next = m0; + si = mtod(m, struct spidp *); + si->si_i = *cb->s_idp; + si->si_s = cb->s_shdr; + if ((cb->s_flags & SF_PI) && (cb->s_flags & SF_HO)) { + register struct sphdr *sh; + if (m0->m_len < sizeof (*sh)) { + if((m0 = m_pullup(m0, sizeof(*sh))) == NULL) { + (void) m_free(m); + m_freem(m0); + return (EINVAL); + } + m->m_next = m0; + } + sh = mtod(m0, struct sphdr *); + si->si_dt = sh->sp_dt; + si->si_cc |= sh->sp_cc & SP_EM; + m0->m_len -= sizeof (*sh); + m0->m_data += sizeof (*sh); + len -= sizeof (*sh); + } + len += sizeof(*si); + if ((cb->s_flags2 & SF_NEWCALL) && recordp) { + si->si_cc |= SP_EM; + spp_newchecks[8]++; + } + if (cb->s_oobflags & SF_SOOB) { + /* + * Per jqj@cornell: + * make sure OB packets convey exactly 1 byte. + * If the packet is 1 byte or larger, we + * have already guaranted there to be at least + * one garbage byte for the checksum, and + * extra bytes shouldn't hurt! + */ + if (len > sizeof(*si)) { + si->si_cc |= SP_OB; + len = (1 + sizeof(*si)); + } + } + si->si_len = htons((u_short)len); + m->m_pkthdr.len = ((len - 1) | 1) + 1; + /* + * queue stuff up for output + */ + sbappendrecord(sb, m); + cb->s_seq++; + } +#ifdef notdef + idle = (cb->s_smax == (cb->s_rack - 1)); +#endif +again: + sendalot = 0; + off = cb->s_snxt - cb->s_rack; + win = min(cb->s_swnd, (cb->s_cwnd/CUNIT)); + + /* + * If in persist timeout with window of 0, send a probe. + * Otherwise, if window is small but nonzero + * and timer expired, send what we can and go into + * transmit state. + */ + if (cb->s_force == 1 + SPPT_PERSIST) { + if (win != 0) { + cb->s_timer[SPPT_PERSIST] = 0; + cb->s_rxtshift = 0; + } + } + span = cb->s_seq - cb->s_rack; + len = min(span, win) - off; + + if (len < 0) { + /* + * Window shrank after we went into it. + * If window shrank to 0, cancel pending + * restransmission and pull s_snxt back + * to (closed) window. We will enter persist + * state below. If the widndow didn't close completely, + * just wait for an ACK. + */ + len = 0; + if (win == 0) { + cb->s_timer[SPPT_REXMT] = 0; + cb->s_snxt = cb->s_rack; + } + } + if (len > 1) + sendalot = 1; + rcv_win = sbspace(&so->so_rcv); + + /* + * Send if we owe peer an ACK. + */ + if (cb->s_oobflags & SF_SOOB) { + /* + * must transmit this out of band packet + */ + cb->s_oobflags &= ~ SF_SOOB; + sendalot = 1; + sppstat.spps_sndurg++; + goto found; + } + if (cb->s_flags & SF_ACKNOW) + goto send; + if (cb->s_state < TCPS_ESTABLISHED) + goto send; + /* + * Silly window can't happen in spp. + * Code from tcp deleted. + */ + if (len) + goto send; + /* + * Compare available window to amount of window + * known to peer (as advertised window less + * next expected input.) If the difference is at least two + * packets or at least 35% of the mximum possible window, + * then want to send a window update to peer. + */ + if (rcv_win > 0) { + u_short delta = 1 + cb->s_alo - cb->s_ack; + int adv = rcv_win - (delta * cb->s_mtu); + + if ((so->so_rcv.sb_cc == 0 && adv >= (2 * cb->s_mtu)) || + (100 * adv / so->so_rcv.sb_hiwat >= 35)) { + sppstat.spps_sndwinup++; + cb->s_flags |= SF_ACKNOW; + goto send; + } + + } + /* + * Many comments from tcp_output.c are appropriate here + * including . . . + * If send window is too small, there is data to transmit, and no + * retransmit or persist is pending, then go to persist state. + * If nothing happens soon, send when timer expires: + * if window is nonzero, transmit what we can, + * otherwise send a probe. + */ + if (so->so_snd.sb_cc && cb->s_timer[SPPT_REXMT] == 0 && + cb->s_timer[SPPT_PERSIST] == 0) { + cb->s_rxtshift = 0; + spp_setpersist(cb); + } + /* + * No reason to send a packet, just return. + */ + cb->s_outx = 1; + return (0); + +send: + /* + * Find requested packet. + */ + si = 0; + if (len > 0) { + cb->s_want = cb->s_snxt; + for (m = sb->sb_mb; m; m = m->m_act) { + si = mtod(m, struct spidp *); + if (SSEQ_LEQ(cb->s_snxt, si->si_seq)) + break; + } + found: + if (si) { + if (si->si_seq == cb->s_snxt) + cb->s_snxt++; + else + sppstat.spps_sndvoid++, si = 0; + } + } + /* + * update window + */ + if (rcv_win < 0) + rcv_win = 0; + alo = cb->s_ack - 1 + (rcv_win / ((short)cb->s_mtu)); + if (SSEQ_LT(alo, cb->s_alo)) + alo = cb->s_alo; + + if (si) { + /* + * must make a copy of this packet for + * idp_output to monkey with + */ + m = m_copy(dtom(si), 0, (int)M_COPYALL); + if (m == NULL) { + return (ENOBUFS); + } + si = mtod(m, struct spidp *); + if (SSEQ_LT(si->si_seq, cb->s_smax)) + sppstat.spps_sndrexmitpack++; + else + sppstat.spps_sndpack++; + } else if (cb->s_force || cb->s_flags & SF_ACKNOW) { + /* + * Must send an acknowledgement or a probe + */ + if (cb->s_force) + sppstat.spps_sndprobe++; + if (cb->s_flags & SF_ACKNOW) + sppstat.spps_sndacks++; + m = m_gethdr(M_DONTWAIT, MT_HEADER); + if (m == 0) + return (ENOBUFS); + /* + * Fill in mbuf with extended SP header + * and addresses and length put into network format. + */ + MH_ALIGN(m, sizeof (struct spidp)); + m->m_len = sizeof (*si); + m->m_pkthdr.len = sizeof (*si); + si = mtod(m, struct spidp *); + si->si_i = *cb->s_idp; + si->si_s = cb->s_shdr; + si->si_seq = cb->s_smax + 1; + si->si_len = htons(sizeof (*si)); + si->si_cc |= SP_SP; + } else { + cb->s_outx = 3; + if (so->so_options & SO_DEBUG || traceallspps) + spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); + return (0); + } + /* + * Stuff checksum and output datagram. + */ + if ((si->si_cc & SP_SP) == 0) { + if (cb->s_force != (1 + SPPT_PERSIST) || + cb->s_timer[SPPT_PERSIST] == 0) { + /* + * If this is a new packet and we are not currently + * timing anything, time this one. + */ + if (SSEQ_LT(cb->s_smax, si->si_seq)) { + cb->s_smax = si->si_seq; + if (cb->s_rtt == 0) { + sppstat.spps_segstimed++; + cb->s_rtseq = si->si_seq; + cb->s_rtt = 1; + } + } + /* + * Set rexmt timer if not currently set, + * Initial value for retransmit timer is smoothed + * round-trip time + 2 * round-trip time variance. + * Initialize shift counter which is used for backoff + * of retransmit time. + */ + if (cb->s_timer[SPPT_REXMT] == 0 && + cb->s_snxt != cb->s_rack) { + cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; + if (cb->s_timer[SPPT_PERSIST]) { + cb->s_timer[SPPT_PERSIST] = 0; + cb->s_rxtshift = 0; + } + } + } else if (SSEQ_LT(cb->s_smax, si->si_seq)) { + cb->s_smax = si->si_seq; + } + } else if (cb->s_state < TCPS_ESTABLISHED) { + if (cb->s_rtt == 0) + cb->s_rtt = 1; /* Time initial handshake */ + if (cb->s_timer[SPPT_REXMT] == 0) + cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; + } + { + /* + * Do not request acks when we ack their data packets or + * when we do a gratuitous window update. + */ + if (((si->si_cc & SP_SP) == 0) || cb->s_force) + si->si_cc |= SP_SA; + si->si_seq = htons(si->si_seq); + si->si_alo = htons(alo); + si->si_ack = htons(cb->s_ack); + + if (idpcksum) { + si->si_sum = 0; + len = ntohs(si->si_len); + if (len & 1) + len++; + si->si_sum = ns_cksum(m, len); + } else + si->si_sum = 0xffff; + + cb->s_outx = 4; + if (so->so_options & SO_DEBUG || traceallspps) + spp_trace(SA_OUTPUT, cb->s_state, cb, si, 0); + + if (so->so_options & SO_DONTROUTE) + error = ns_output(m, (struct route *)0, NS_ROUTETOIF); + else + error = ns_output(m, &cb->s_nspcb->nsp_route, 0); + } + if (error) { + return (error); + } + sppstat.spps_sndtotal++; + /* + * Data sent (as far as we can tell). + * If this advertises a larger window than any other segment, + * then remember the size of the advertized window. + * Any pending ACK has now been sent. + */ + cb->s_force = 0; + cb->s_flags &= ~(SF_ACKNOW|SF_DELACK); + if (SSEQ_GT(alo, cb->s_alo)) + cb->s_alo = alo; + if (sendalot) + goto again; + cb->s_outx = 5; + return (0); +} + +int spp_do_persist_panics = 0; + +spp_setpersist(cb) + register struct sppcb *cb; +{ + register t = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; + extern int spp_backoff[]; + + if (cb->s_timer[SPPT_REXMT] && spp_do_persist_panics) + panic("spp_output REXMT"); + /* + * Start/restart persistance timer. + */ + SPPT_RANGESET(cb->s_timer[SPPT_PERSIST], + t*spp_backoff[cb->s_rxtshift], + SPPTV_PERSMIN, SPPTV_PERSMAX); + if (cb->s_rxtshift < SPP_MAXRXTSHIFT) + cb->s_rxtshift++; +} +/*ARGSUSED*/ +spp_ctloutput(req, so, level, name, value) + int req; + struct socket *so; + int name; + struct mbuf **value; +{ + register struct mbuf *m; + struct nspcb *nsp = sotonspcb(so); + register struct sppcb *cb; + int mask, error = 0; + + if (level != NSPROTO_SPP) { + /* This will have to be changed when we do more general + stacking of protocols */ + return (idp_ctloutput(req, so, level, name, value)); + } + if (nsp == NULL) { + error = EINVAL; + goto release; + } else + cb = nstosppcb(nsp); + + switch (req) { + + case PRCO_GETOPT: + if (value == NULL) + return (EINVAL); + m = m_get(M_DONTWAIT, MT_DATA); + if (m == NULL) + return (ENOBUFS); + switch (name) { + + case SO_HEADERS_ON_INPUT: + mask = SF_HI; + goto get_flags; + + case SO_HEADERS_ON_OUTPUT: + mask = SF_HO; + get_flags: + m->m_len = sizeof(short); + *mtod(m, short *) = cb->s_flags & mask; + break; + + case SO_MTU: + m->m_len = sizeof(u_short); + *mtod(m, short *) = cb->s_mtu; + break; + + case SO_LAST_HEADER: + m->m_len = sizeof(struct sphdr); + *mtod(m, struct sphdr *) = cb->s_rhdr; + break; + + case SO_DEFAULT_HEADERS: + m->m_len = sizeof(struct spidp); + *mtod(m, struct sphdr *) = cb->s_shdr; + break; + + default: + error = EINVAL; + } + *value = m; + break; + + case PRCO_SETOPT: + if (value == 0 || *value == 0) { + error = EINVAL; + break; + } + switch (name) { + int *ok; + + case SO_HEADERS_ON_INPUT: + mask = SF_HI; + goto set_head; + + case SO_HEADERS_ON_OUTPUT: + mask = SF_HO; + set_head: + if (cb->s_flags & SF_PI) { + ok = mtod(*value, int *); + if (*ok) + cb->s_flags |= mask; + else + cb->s_flags &= ~mask; + } else error = EINVAL; + break; + + case SO_MTU: + cb->s_mtu = *(mtod(*value, u_short *)); + break; + +#ifdef SF_NEWCALL + case SO_NEWCALL: + ok = mtod(*value, int *); + if (*ok) { + cb->s_flags2 |= SF_NEWCALL; + spp_newchecks[5]++; + } else { + cb->s_flags2 &= ~SF_NEWCALL; + spp_newchecks[6]++; + } + break; +#endif + + case SO_DEFAULT_HEADERS: + { + register struct sphdr *sp + = mtod(*value, struct sphdr *); + cb->s_dt = sp->sp_dt; + cb->s_cc = sp->sp_cc & SP_EM; + } + break; + + default: + error = EINVAL; + } + m_freem(*value); + break; + } + release: + return (error); +} + +/*ARGSUSED*/ +spp_usrreq(so, req, m, nam, controlp) + struct socket *so; + int req; + struct mbuf *m, *nam, *controlp; +{ + struct nspcb *nsp = sotonspcb(so); + register struct sppcb *cb; + int s = splnet(); + int error = 0, ostate; + struct mbuf *mm; + register struct sockbuf *sb; + + if (req == PRU_CONTROL) + return (ns_control(so, (int)m, (caddr_t)nam, + (struct ifnet *)controlp)); + if (nsp == NULL) { + if (req != PRU_ATTACH) { + error = EINVAL; + goto release; + } + } else + cb = nstosppcb(nsp); + + ostate = cb ? cb->s_state : 0; + + switch (req) { + + case PRU_ATTACH: + if (nsp != NULL) { + error = EISCONN; + break; + } + error = ns_pcballoc(so, &nspcb); + if (error) + break; + if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { + error = soreserve(so, (u_long) 3072, (u_long) 3072); + if (error) + break; + } + nsp = sotonspcb(so); + + mm = m_getclr(M_DONTWAIT, MT_PCB); + sb = &so->so_snd; + + if (mm == NULL) { + error = ENOBUFS; + break; + } + cb = mtod(mm, struct sppcb *); + mm = m_getclr(M_DONTWAIT, MT_HEADER); + if (mm == NULL) { + (void) m_free(dtom(m)); + error = ENOBUFS; + break; + } + cb->s_idp = mtod(mm, struct idp *); + cb->s_state = TCPS_LISTEN; + cb->s_smax = -1; + cb->s_swl1 = -1; + cb->s_q.si_next = cb->s_q.si_prev = &cb->s_q; + cb->s_nspcb = nsp; + cb->s_mtu = 576 - sizeof (struct spidp); + cb->s_cwnd = sbspace(sb) * CUNIT / cb->s_mtu; + cb->s_ssthresh = cb->s_cwnd; + cb->s_cwmx = sbspace(sb) * CUNIT / + (2 * sizeof (struct spidp)); + /* Above is recomputed when connecting to account + for changed buffering or mtu's */ + cb->s_rtt = SPPTV_SRTTBASE; + cb->s_rttvar = SPPTV_SRTTDFLT << 2; + SPPT_RANGESET(cb->s_rxtcur, + ((SPPTV_SRTTBASE >> 2) + (SPPTV_SRTTDFLT << 2)) >> 1, + SPPTV_MIN, SPPTV_REXMTMAX); + nsp->nsp_pcb = (caddr_t) cb; + break; + + case PRU_DETACH: + if (nsp == NULL) { + error = ENOTCONN; + break; + } + if (cb->s_state > TCPS_LISTEN) + cb = spp_disconnect(cb); + else + cb = spp_close(cb); + break; + + case PRU_BIND: + error = ns_pcbbind(nsp, nam); + break; + + case PRU_LISTEN: + if (nsp->nsp_lport == 0) + error = ns_pcbbind(nsp, (struct mbuf *)0); + if (error == 0) + cb->s_state = TCPS_LISTEN; + break; + + /* + * Initiate connection to peer. + * Enter SYN_SENT state, and mark socket as connecting. + * Start keep-alive timer, setup prototype header, + * Send initial system packet requesting connection. + */ + case PRU_CONNECT: + if (nsp->nsp_lport == 0) { + error = ns_pcbbind(nsp, (struct mbuf *)0); + if (error) + break; + } + error = ns_pcbconnect(nsp, nam); + if (error) + break; + soisconnecting(so); + sppstat.spps_connattempt++; + cb->s_state = TCPS_SYN_SENT; + cb->s_did = 0; + spp_template(cb); + cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; + cb->s_force = 1 + SPPTV_KEEP; + /* + * Other party is required to respond to + * the port I send from, but he is not + * required to answer from where I am sending to, + * so allow wildcarding. + * original port I am sending to is still saved in + * cb->s_dport. + */ + nsp->nsp_fport = 0; + error = spp_output(cb, (struct mbuf *) 0); + break; + + case PRU_CONNECT2: + error = EOPNOTSUPP; + break; + + /* + * We may decide later to implement connection closing + * handshaking at the spp level optionally. + * here is the hook to do it: + */ + case PRU_DISCONNECT: + cb = spp_disconnect(cb); + break; + + /* + * Accept a connection. Essentially all the work is + * done at higher levels; just return the address + * of the peer, storing through addr. + */ + case PRU_ACCEPT: { + struct sockaddr_ns *sns = mtod(nam, struct sockaddr_ns *); + + nam->m_len = sizeof (struct sockaddr_ns); + sns->sns_family = AF_NS; + sns->sns_addr = nsp->nsp_faddr; + break; + } + + case PRU_SHUTDOWN: + socantsendmore(so); + cb = spp_usrclosed(cb); + if (cb) + error = spp_output(cb, (struct mbuf *) 0); + break; + + /* + * After a receive, possibly send acknowledgment + * updating allocation. + */ + case PRU_RCVD: + cb->s_flags |= SF_RVD; + (void) spp_output(cb, (struct mbuf *) 0); + cb->s_flags &= ~SF_RVD; + break; + + case PRU_ABORT: + (void) spp_drop(cb, ECONNABORTED); + break; + + case PRU_SENSE: + case PRU_CONTROL: + m = NULL; + error = EOPNOTSUPP; + break; + + case PRU_RCVOOB: + if ((cb->s_oobflags & SF_IOOB) || so->so_oobmark || + (so->so_state & SS_RCVATMARK)) { + m->m_len = 1; + *mtod(m, caddr_t) = cb->s_iobc; + break; + } + error = EINVAL; + break; + + case PRU_SENDOOB: + if (sbspace(&so->so_snd) < -512) { + error = ENOBUFS; + break; + } + cb->s_oobflags |= SF_SOOB; + /* fall into */ + case PRU_SEND: + if (controlp) { + u_short *p = mtod(controlp, u_short *); + spp_newchecks[2]++; + if ((p[0] == 5) && p[1] == 1) { /* XXXX, for testing */ + cb->s_shdr.sp_dt = *(u_char *)(&p[2]); + spp_newchecks[3]++; + } + m_freem(controlp); + } + controlp = NULL; + error = spp_output(cb, m); + m = NULL; + break; + + case PRU_SOCKADDR: + ns_setsockaddr(nsp, nam); + break; + + case PRU_PEERADDR: + ns_setpeeraddr(nsp, nam); + break; + + case PRU_SLOWTIMO: + cb = spp_timers(cb, (int)nam); + req |= ((int)nam) << 8; + break; + + case PRU_FASTTIMO: + case PRU_PROTORCV: + case PRU_PROTOSEND: + error = EOPNOTSUPP; + break; + + default: + panic("sp_usrreq"); + } + if (cb && (so->so_options & SO_DEBUG || traceallspps)) + spp_trace(SA_USER, (u_char)ostate, cb, (struct spidp *)0, req); +release: + if (controlp != NULL) + m_freem(controlp); + if (m != NULL) + m_freem(m); + splx(s); + return (error); +} + +spp_usrreq_sp(so, req, m, nam, controlp) + struct socket *so; + int req; + struct mbuf *m, *nam, *controlp; +{ + int error = spp_usrreq(so, req, m, nam, controlp); + + if (req == PRU_ATTACH && error == 0) { + struct nspcb *nsp = sotonspcb(so); + ((struct sppcb *)nsp->nsp_pcb)->s_flags |= + (SF_HI | SF_HO | SF_PI); + } + return (error); +} + +/* + * Create template to be used to send spp packets on a connection. + * Called after host entry created, fills + * in a skeletal spp header (choosing connection id), + * minimizing the amount of work necessary when the connection is used. + */ +spp_template(cb) + register struct sppcb *cb; +{ + register struct nspcb *nsp = cb->s_nspcb; + register struct idp *idp = cb->s_idp; + register struct sockbuf *sb = &(nsp->nsp_socket->so_snd); + + idp->idp_pt = NSPROTO_SPP; + idp->idp_sna = nsp->nsp_laddr; + idp->idp_dna = nsp->nsp_faddr; + cb->s_sid = htons(spp_iss); + spp_iss += SPP_ISSINCR/2; + cb->s_alo = 1; + cb->s_cwnd = (sbspace(sb) * CUNIT) / cb->s_mtu; + cb->s_ssthresh = cb->s_cwnd; /* Try to expand fast to full complement + of large packets */ + cb->s_cwmx = (sbspace(sb) * CUNIT) / (2 * sizeof(struct spidp)); + cb->s_cwmx = max(cb->s_cwmx, cb->s_cwnd); + /* But allow for lots of little packets as well */ +} + +/* + * Close a SPIP control block: + * discard spp control block itself + * discard ns protocol control block + * wake up any sleepers + */ +struct sppcb * +spp_close(cb) + register struct sppcb *cb; +{ + register struct spidp_q *s; + struct nspcb *nsp = cb->s_nspcb; + struct socket *so = nsp->nsp_socket; + register struct mbuf *m; + + s = cb->s_q.si_next; + while (s != &(cb->s_q)) { + s = s->si_next; + m = dtom(s->si_prev); + remque(s->si_prev); + m_freem(m); + } + (void) m_free(dtom(cb->s_idp)); + (void) m_free(dtom(cb)); + nsp->nsp_pcb = 0; + soisdisconnected(so); + ns_pcbdetach(nsp); + sppstat.spps_closed++; + return ((struct sppcb *)0); +} +/* + * Someday we may do level 3 handshaking + * to close a connection or send a xerox style error. + * For now, just close. + */ +struct sppcb * +spp_usrclosed(cb) + register struct sppcb *cb; +{ + return (spp_close(cb)); +} +struct sppcb * +spp_disconnect(cb) + register struct sppcb *cb; +{ + return (spp_close(cb)); +} +/* + * Drop connection, reporting + * the specified error. + */ +struct sppcb * +spp_drop(cb, errno) + register struct sppcb *cb; + int errno; +{ + struct socket *so = cb->s_nspcb->nsp_socket; + + /* + * someday, in the xerox world + * we will generate error protocol packets + * announcing that the socket has gone away. + */ + if (TCPS_HAVERCVDSYN(cb->s_state)) { + sppstat.spps_drops++; + cb->s_state = TCPS_CLOSED; + /*(void) tcp_output(cb);*/ + } else + sppstat.spps_conndrops++; + so->so_error = errno; + return (spp_close(cb)); +} + +spp_abort(nsp) + struct nspcb *nsp; +{ + + (void) spp_close((struct sppcb *)nsp->nsp_pcb); +} + +int spp_backoff[SPP_MAXRXTSHIFT+1] = + { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; +/* + * Fast timeout routine for processing delayed acks + */ +spp_fasttimo() +{ + register struct nspcb *nsp; + register struct sppcb *cb; + int s = splnet(); + + nsp = nspcb.nsp_next; + if (nsp) + for (; nsp != &nspcb; nsp = nsp->nsp_next) + if ((cb = (struct sppcb *)nsp->nsp_pcb) && + (cb->s_flags & SF_DELACK)) { + cb->s_flags &= ~SF_DELACK; + cb->s_flags |= SF_ACKNOW; + sppstat.spps_delack++; + (void) spp_output(cb, (struct mbuf *) 0); + } + splx(s); +} + +/* + * spp protocol timeout routine called every 500 ms. + * Updates the timers in all active pcb's and + * causes finite state machine actions if timers expire. + */ +spp_slowtimo() +{ + register struct nspcb *ip, *ipnxt; + register struct sppcb *cb; + int s = splnet(); + register int i; + + /* + * Search through tcb's and update active timers. + */ + ip = nspcb.nsp_next; + if (ip == 0) { + splx(s); + return; + } + while (ip != &nspcb) { + cb = nstosppcb(ip); + ipnxt = ip->nsp_next; + if (cb == 0) + goto tpgone; + for (i = 0; i < SPPT_NTIMERS; i++) { + if (cb->s_timer[i] && --cb->s_timer[i] == 0) { + (void) spp_usrreq(cb->s_nspcb->nsp_socket, + PRU_SLOWTIMO, (struct mbuf *)0, + (struct mbuf *)i, (struct mbuf *)0, + (struct mbuf *)0); + if (ipnxt->nsp_prev != ip) + goto tpgone; + } + } + cb->s_idle++; + if (cb->s_rtt) + cb->s_rtt++; +tpgone: + ip = ipnxt; + } + spp_iss += SPP_ISSINCR/PR_SLOWHZ; /* increment iss */ + splx(s); +} +/* + * SPP timer processing. + */ +struct sppcb * +spp_timers(cb, timer) + register struct sppcb *cb; + int timer; +{ + long rexmt; + int win; + + cb->s_force = 1 + timer; + switch (timer) { + + /* + * 2 MSL timeout in shutdown went off. TCP deletes connection + * control block. + */ + case SPPT_2MSL: + printf("spp: SPPT_2MSL went off for no reason\n"); + cb->s_timer[timer] = 0; + break; + + /* + * Retransmission timer went off. Message has not + * been acked within retransmit interval. Back off + * to a longer retransmit interval and retransmit one packet. + */ + case SPPT_REXMT: + if (++cb->s_rxtshift > SPP_MAXRXTSHIFT) { + cb->s_rxtshift = SPP_MAXRXTSHIFT; + sppstat.spps_timeoutdrop++; + cb = spp_drop(cb, ETIMEDOUT); + break; + } + sppstat.spps_rexmttimeo++; + rexmt = ((cb->s_srtt >> 2) + cb->s_rttvar) >> 1; + rexmt *= spp_backoff[cb->s_rxtshift]; + SPPT_RANGESET(cb->s_rxtcur, rexmt, SPPTV_MIN, SPPTV_REXMTMAX); + cb->s_timer[SPPT_REXMT] = cb->s_rxtcur; + /* + * If we have backed off fairly far, our srtt + * estimate is probably bogus. Clobber it + * so we'll take the next rtt measurement as our srtt; + * move the current srtt into rttvar to keep the current + * retransmit times until then. + */ + if (cb->s_rxtshift > SPP_MAXRXTSHIFT / 4 ) { + cb->s_rttvar += (cb->s_srtt >> 2); + cb->s_srtt = 0; + } + cb->s_snxt = cb->s_rack; + /* + * If timing a packet, stop the timer. + */ + cb->s_rtt = 0; + /* + * See very long discussion in tcp_timer.c about congestion + * window and sstrhesh + */ + win = min(cb->s_swnd, (cb->s_cwnd/CUNIT)) / 2; + if (win < 2) + win = 2; + cb->s_cwnd = CUNIT; + cb->s_ssthresh = win * CUNIT; + (void) spp_output(cb, (struct mbuf *) 0); + break; + + /* + * Persistance timer into zero window. + * Force a probe to be sent. + */ + case SPPT_PERSIST: + sppstat.spps_persisttimeo++; + spp_setpersist(cb); + (void) spp_output(cb, (struct mbuf *) 0); + break; + + /* + * Keep-alive timer went off; send something + * or drop connection if idle for too long. + */ + case SPPT_KEEP: + sppstat.spps_keeptimeo++; + if (cb->s_state < TCPS_ESTABLISHED) + goto dropit; + if (cb->s_nspcb->nsp_socket->so_options & SO_KEEPALIVE) { + if (cb->s_idle >= SPPTV_MAXIDLE) + goto dropit; + sppstat.spps_keepprobe++; + (void) spp_output(cb, (struct mbuf *) 0); + } else + cb->s_idle = 0; + cb->s_timer[SPPT_KEEP] = SPPTV_KEEP; + break; + dropit: + sppstat.spps_keepdrops++; + cb = spp_drop(cb, ETIMEDOUT); + break; + } + return (cb); +} +#ifndef lint +int SppcbSize = sizeof (struct sppcb); +int NspcbSize = sizeof (struct nspcb); +#endif /* lint */ diff --git a/bsd/netns/spp_var.h b/bsd/netns/spp_var.h new file mode 100644 index 000000000..f68a84fec --- /dev/null +++ b/bsd/netns/spp_var.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1984, 1985, 1986, 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)spp_var.h 8.1 (Berkeley) 6/10/93 + */ + +/* + * Sp control block, one per connection + */ +struct sppcb { + struct spidp_q s_q; /* queue for out-of-order receipt */ + struct nspcb *s_nspcb; /* backpointer to internet pcb */ + u_char s_state; + u_char s_flags; +#define SF_ACKNOW 0x01 /* Ack peer immediately */ +#define SF_DELACK 0x02 /* Ack, but try to delay it */ +#define SF_HI 0x04 /* Show headers on input */ +#define SF_HO 0x08 /* Show headers on output */ +#define SF_PI 0x10 /* Packet (datagram) interface */ +#define SF_WIN 0x20 /* Window info changed */ +#define SF_RXT 0x40 /* Rxt info changed */ +#define SF_RVD 0x80 /* Calling from read usrreq routine */ + u_short s_mtu; /* Max packet size for this stream */ +/* use sequence fields in headers to store sequence numbers for this + connection */ + struct idp *s_idp; + struct sphdr s_shdr; /* prototype header to transmit */ +#define s_cc s_shdr.sp_cc /* connection control (for EM bit) */ +#define s_dt s_shdr.sp_dt /* datastream type */ +#define s_sid s_shdr.sp_sid /* source connection identifier */ +#define s_did s_shdr.sp_did /* destination connection identifier */ +#define s_seq s_shdr.sp_seq /* sequence number */ +#define s_ack s_shdr.sp_ack /* acknowledge number */ +#define s_alo s_shdr.sp_alo /* allocation number */ +#define s_dport s_idp->idp_dna.x_port /* where we are sending */ + struct sphdr s_rhdr; /* last received header (in effect!)*/ + u_short s_rack; /* their acknowledge number */ + u_short s_ralo; /* their allocation number */ + u_short s_smax; /* highest packet # we have sent */ + u_short s_snxt; /* which packet to send next */ + +/* congestion control */ +#define CUNIT 1024 /* scaling for ... */ + int s_cwnd; /* Congestion-controlled window */ + /* in packets * CUNIT */ + short s_swnd; /* == tcp snd_wnd, in packets */ + short s_smxw; /* == tcp max_sndwnd */ + /* difference of two spp_seq's can be + no bigger than a short */ + u_short s_swl1; /* == tcp snd_wl1 */ + u_short s_swl2; /* == tcp snd_wl2 */ + int s_cwmx; /* max allowable cwnd */ + int s_ssthresh; /* s_cwnd size threshhold for + * slow start exponential-to- + * linear switch */ +/* transmit timing stuff + * srtt and rttvar are stored as fixed point, for convenience in smoothing. + * srtt has 3 bits to the right of the binary point, rttvar has 2. + */ + short s_idle; /* time idle */ + short s_timer[SPPT_NTIMERS]; /* timers */ + short s_rxtshift; /* log(2) of rexmt exp. backoff */ + short s_rxtcur; /* current retransmit value */ + u_short s_rtseq; /* packet being timed */ + short s_rtt; /* timer for round trips */ + short s_srtt; /* averaged timer */ + short s_rttvar; /* variance in round trip time */ + char s_force; /* which timer expired */ + char s_dupacks; /* counter to intuit xmt loss */ + +/* out of band data */ + char s_oobflags; +#define SF_SOOB 0x08 /* sending out of band data */ +#define SF_IOOB 0x10 /* receiving out of band data */ + char s_iobc; /* input characters */ +/* debug stuff */ + u_short s_want; /* Last candidate for sending */ + char s_outx; /* exit taken from spp_output */ + char s_inx; /* exit taken from spp_input */ + u_short s_flags2; /* more flags for testing */ +#define SF_NEWCALL 0x100 /* for new_recvmsg */ +#define SO_NEWCALL 10 /* for new_recvmsg */ +}; + +#define nstosppcb(np) ((struct sppcb *)(np)->nsp_pcb) +#define sotosppcb(so) (nstosppcb(sotonspcb(so))) + +struct sppstat { + long spps_connattempt; /* connections initiated */ + long spps_accepts; /* connections accepted */ + long spps_connects; /* connections established */ + long spps_drops; /* connections dropped */ + long spps_conndrops; /* embryonic connections dropped */ + long spps_closed; /* conn. closed (includes drops) */ + long spps_segstimed; /* segs where we tried to get rtt */ + long spps_rttupdated; /* times we succeeded */ + long spps_delack; /* delayed acks sent */ + long spps_timeoutdrop; /* conn. dropped in rxmt timeout */ + long spps_rexmttimeo; /* retransmit timeouts */ + long spps_persisttimeo; /* persist timeouts */ + long spps_keeptimeo; /* keepalive timeouts */ + long spps_keepprobe; /* keepalive probes sent */ + long spps_keepdrops; /* connections dropped in keepalive */ + + long spps_sndtotal; /* total packets sent */ + long spps_sndpack; /* data packets sent */ + long spps_sndbyte; /* data bytes sent */ + long spps_sndrexmitpack; /* data packets retransmitted */ + long spps_sndrexmitbyte; /* data bytes retransmitted */ + long spps_sndacks; /* ack-only packets sent */ + long spps_sndprobe; /* window probes sent */ + long spps_sndurg; /* packets sent with URG only */ + long spps_sndwinup; /* window update-only packets sent */ + long spps_sndctrl; /* control (SYN|FIN|RST) packets sent */ + long spps_sndvoid; /* couldn't find requested packet*/ + + long spps_rcvtotal; /* total packets received */ + long spps_rcvpack; /* packets received in sequence */ + long spps_rcvbyte; /* bytes received in sequence */ + long spps_rcvbadsum; /* packets received with ccksum errs */ + long spps_rcvbadoff; /* packets received with bad offset */ + long spps_rcvshort; /* packets received too short */ + long spps_rcvduppack; /* duplicate-only packets received */ + long spps_rcvdupbyte; /* duplicate-only bytes received */ + long spps_rcvpartduppack; /* packets with some duplicate data */ + long spps_rcvpartdupbyte; /* dup. bytes in part-dup. packets */ + long spps_rcvoopack; /* out-of-order packets received */ + long spps_rcvoobyte; /* out-of-order bytes received */ + long spps_rcvpackafterwin; /* packets with data after window */ + long spps_rcvbyteafterwin; /* bytes rcvd after window */ + long spps_rcvafterclose; /* packets rcvd after "close" */ + long spps_rcvwinprobe; /* rcvd window probe packets */ + long spps_rcvdupack; /* rcvd duplicate acks */ + long spps_rcvacktoomuch; /* rcvd acks for unsent data */ + long spps_rcvackpack; /* rcvd ack packets */ + long spps_rcvackbyte; /* bytes acked by rcvd acks */ + long spps_rcvwinupd; /* rcvd window update packets */ +}; +struct spp_istat { + short hdrops; + short badsum; + short badlen; + short slotim; + short fastim; + short nonucn; + short noconn; + short notme; + short wrncon; + short bdreas; + short gonawy; + short notyet; + short lstdup; + struct sppstat newstats; +}; + +#ifdef KERNEL +struct spp_istat spp_istat; + +/* Following was struct sppstat sppstat; */ +#ifndef sppstat +#define sppstat spp_istat.newstats +#endif + +u_short spp_iss; +extern struct sppcb *spp_close(), *spp_disconnect(), + *spp_usrclosed(), *spp_timers(), *spp_drop(); +#endif + +#define SPP_ISSINCR 128 +/* + * SPP sequence numbers are 16 bit integers operated + * on with modular arithmetic. These macros can be + * used to compare such integers. + */ +#ifdef sun +short xnsCbug; +#define SSEQ_LT(a,b) ((xnsCbug = (short)((a)-(b))) < 0) +#define SSEQ_LEQ(a,b) ((xnsCbug = (short)((a)-(b))) <= 0) +#define SSEQ_GT(a,b) ((xnsCbug = (short)((a)-(b))) > 0) +#define SSEQ_GEQ(a,b) ((xnsCbug = (short)((a)-(b))) >= 0) +#else +#define SSEQ_LT(a,b) (((short)((a)-(b))) < 0) +#define SSEQ_LEQ(a,b) (((short)((a)-(b))) <= 0) +#define SSEQ_GT(a,b) (((short)((a)-(b))) > 0) +#define SSEQ_GEQ(a,b) (((short)((a)-(b))) >= 0) +#endif diff --git a/bsd/nfs/Makefile b/bsd/nfs/Makefile new file mode 100644 index 000000000..35118f8fd --- /dev/null +++ b/bsd/nfs/Makefile @@ -0,0 +1,39 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + krpc.h nfs.h nfsdiskless.h nfsm_subs.h nfsmount.h nfsnode.h \ + nfsproto.h nfsrtt.h nfsrvcache.h nqnfs.h rpcv2.h xdr_subs.h + + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = nfs + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = nfs + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/nfs/krpc.h b/bsd/nfs/krpc.h new file mode 100644 index 000000000..f4e4701a1 --- /dev/null +++ b/bsd/nfs/krpc.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + */ + +#include + +int krpc_call __P((struct sockaddr_in *sin, + u_int prog, u_int vers, u_int func, + struct mbuf **data, struct sockaddr_in **from)); + +int krpc_portmap __P((struct sockaddr_in *sin, + u_int prog, u_int vers, u_int16_t *portp)); + + +/* + * RPC definitions for the portmapper + */ +#define PMAPPORT 111 +#define PMAPPROG 100000 +#define PMAPVERS 2 +#define PMAPPROC_NULL 0 +#define PMAPPROC_SET 1 +#define PMAPPROC_UNSET 2 +#define PMAPPROC_GETPORT 3 +#define PMAPPROC_DUMP 4 +#define PMAPPROC_CALLIT 5 + + +/* + * RPC definitions for bootparamd + */ +#define BOOTPARAM_PROG 100026 +#define BOOTPARAM_VERS 1 +#define BOOTPARAM_WHOAMI 1 +#define BOOTPARAM_GETFILE 2 + diff --git a/bsd/nfs/krpc_subr.c b/bsd/nfs/krpc_subr.c new file mode 100644 index 000000000..98a7eee68 --- /dev/null +++ b/bsd/nfs/krpc_subr.c @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1994 Gordon Ross, Adam Glass + * Copyright (c) 1992 Regents of the University of California. + * All rights reserved. + * + * This software was developed by the Computer Systems Engineering group + * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and + * contributed to Berkeley. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Lawrence Berkeley Laboratory and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +/* + * Kernel support for Sun RPC + * + * Used currently for bootstrapping in nfs diskless configurations. + * + * Note: will not work on variable-sized rpc args/results. + * implicit size-limit of an mbuf. + */ + +/* + * Generic RPC headers + */ + +struct auth_info { + u_int32_t rp_atype; /* auth type */ + u_int32_t rp_alen; /* auth length */ +}; + +struct rpc_call { + u_int32_t rp_xid; /* request transaction id */ + int32_t rp_direction; /* call direction (0) */ + u_int32_t rp_rpcvers; /* rpc version (2) */ + u_int32_t rp_prog; /* program */ + u_int32_t rp_vers; /* version */ + u_int32_t rp_proc; /* procedure */ + struct auth_info rp_auth; + struct auth_info rp_verf; +}; + +struct rpc_reply { + u_int32_t rp_xid; /* request transaction id */ + int32_t rp_direction; /* call direction (1) */ + int32_t rp_astatus; /* accept status (0: accepted) */ + union { + u_int32_t rpu_errno; + struct { + struct auth_info rp_auth; + u_int32_t rp_rstatus; + } rpu_ok; + } rp_u; +}; + +#define MIN_REPLY_HDR 16 /* xid, dir, astat, errno */ + +/* + * What is the longest we will wait before re-sending a request? + * Note this is also the frequency of "RPC timeout" messages. + * The re-send loop count sup linearly to this maximum, so the + * first complaint will happen after (1+2+3+4+5)=15 seconds. + */ +#define MAX_RESEND_DELAY 5 /* seconds */ + +/* copied over from nfs_boot.c for printf format. could put in .h file... */ +#define IP_FORMAT "%d.%d.%d.%d" +#define IP_CH(ip) ((u_char *)ip) +#define IP_LIST(ip) IP_CH(ip)[0],IP_CH(ip)[1],IP_CH(ip)[2],IP_CH(ip)[3] + + +/* + * Call portmap to lookup a port number for a particular rpc program + * Returns non-zero error on failure. + */ +int +krpc_portmap(sin, prog, vers, portp) + struct sockaddr_in *sin; /* server address */ + u_int prog, vers; /* host order */ + u_int16_t *portp; /* network order */ +{ + struct sdata { + u_int32_t prog; /* call program */ + u_int32_t vers; /* call version */ + u_int32_t proto; /* call protocol */ + u_int32_t port; /* call port (unused) */ + } *sdata; + struct rdata { + u_int16_t pad; + u_int16_t port; + } *rdata; + struct mbuf *m; + int error; + + /* The portmapper port is fixed. */ + if (prog == PMAPPROG) { + *portp = htons(PMAPPORT); + return 0; + } + + m = m_gethdr(M_WAIT, MT_DATA); + if (m == NULL) + return ENOBUFS; + m->m_len = sizeof(*sdata); + m->m_pkthdr.len = m->m_len; + sdata = mtod(m, struct sdata *); + + /* Do the RPC to get it. */ + sdata->prog = htonl(prog); + sdata->vers = htonl(vers); + sdata->proto = htonl(IPPROTO_UDP); + sdata->port = 0; + + sin->sin_port = htons(PMAPPORT); + error = krpc_call(sin, PMAPPROG, PMAPVERS, + PMAPPROC_GETPORT, &m, NULL); + if (error) + return error; + + rdata = mtod(m, struct rdata *); + *portp = rdata->port; + + m_freem(m); + return 0; +} + +/* + * Do a remote procedure call (RPC) and wait for its reply. + * If from_p is non-null, then we are doing broadcast, and + * the address from whence the response came is saved there. + */ +int +krpc_call(sa, prog, vers, func, data, from_p) + struct sockaddr_in *sa; + u_int prog, vers, func; + struct mbuf **data; /* input/output */ + struct sockaddr_in **from_p; /* output */ +{ + struct socket *so; + struct sockaddr_in *sin; + struct mbuf *m, *nam, *mhead, *mhck; + struct rpc_call *call; + struct rpc_reply *reply; + struct uio auio; + int error, rcvflg, timo, secs, len; + static u_int32_t xid = ~0xFF; + u_int16_t tport; + struct sockopt sopt; + + /* + * Validate address family. + * Sorry, this is INET specific... + */ + if (sa->sin_family != AF_INET) + return (EAFNOSUPPORT); + + /* Free at end if not null. */ + nam = mhead = NULL; + if (from_p) + *from_p = 0; + + /* + * Create socket and set its recieve timeout. + */ + if ((error = socreate(AF_INET, &so, SOCK_DGRAM, 0))) + goto out; + + { + struct timeval tv; + + tv.tv_sec = 1; + tv.tv_usec = 0; + bzero(&sopt, sizeof sopt); + sopt.sopt_level = SOL_SOCKET; + sopt.sopt_name = SO_RCVTIMEO; + sopt.sopt_val = &tv; + sopt.sopt_valsize = sizeof tv; + + if (error = sosetopt(so, &sopt)) + goto out; + + } + + /* + * Enable broadcast if necessary. + */ + + if (from_p) { + int on = 1; + sopt.sopt_name = SO_BROADCAST; + sopt.sopt_val = &on; + sopt.sopt_valsize = sizeof on; + if (error = sosetopt(so, &sopt)) + goto out; + } + + /* + * Bind the local endpoint to a reserved port, + * because some NFS servers refuse requests from + * non-reserved (non-privileged) ports. + */ + m = m_getclr(M_WAIT, MT_SONAME); + sin = mtod(m, struct sockaddr_in *); + sin->sin_len = m->m_len = sizeof(*sin); + sin->sin_family = AF_INET; + sin->sin_addr.s_addr = INADDR_ANY; + tport = IPPORT_RESERVED; + do { + tport--; + sin->sin_port = htons(tport); + error = sobind(so, mtod(m, struct sockaddr *)); + } while (error == EADDRINUSE && + tport > IPPORT_RESERVED / 2); + m_freem(m); + if (error) { + printf("bind failed\n"); + goto out; + } + + /* + * Setup socket address for the server. + */ + nam = m_get(M_WAIT, MT_SONAME); + if (nam == NULL) { + error = ENOBUFS; + goto out; + } + sin = mtod(nam, struct sockaddr_in *); + bcopy((caddr_t)sa, (caddr_t)sin, (nam->m_len = sa->sin_len)); + + /* + * Prepend RPC message header. + */ + m = *data; + *data = NULL; +#if DIAGNOSTIC + if ((m->m_flags & M_PKTHDR) == 0) + panic("krpc_call: send data w/o pkthdr"); + if (m->m_pkthdr.len < m->m_len) + panic("krpc_call: pkthdr.len not set"); +#endif + mhead = m_prepend(m, sizeof(*call), M_WAIT); + if (mhead == NULL) { + error = ENOBUFS; + goto out; + } + mhead->m_pkthdr.len += sizeof(*call); + mhead->m_pkthdr.rcvif = NULL; + + /* + * Fill in the RPC header + */ + call = mtod(mhead, struct rpc_call *); + bzero((caddr_t)call, sizeof(*call)); + xid++; + call->rp_xid = htonl(xid); + /* call->rp_direction = 0; */ + call->rp_rpcvers = htonl(2); + call->rp_prog = htonl(prog); + call->rp_vers = htonl(vers); + call->rp_proc = htonl(func); + /* call->rp_auth = 0; */ + /* call->rp_verf = 0; */ + + /* + * Send it, repeatedly, until a reply is received, + * but delay each re-send by an increasing amount. + * If the delay hits the maximum, start complaining. + */ + timo = 0; + for (;;) { + /* Send RPC request (or re-send). */ + m = m_copym(mhead, 0, M_COPYALL, M_WAIT); + if (m == NULL) { + error = ENOBUFS; + goto out; + } + error = sosend(so, mtod(nam, struct sockaddr *), NULL, m, NULL, 0); + if (error) { + printf("krpc_call: sosend: %d\n", error); + goto out; + } + m = NULL; + + /* Determine new timeout. */ + if (timo < MAX_RESEND_DELAY) + timo++; + else + printf("RPC timeout for server " IP_FORMAT "\n", + IP_LIST(&(sin->sin_addr.s_addr))); + + /* + * Wait for up to timo seconds for a reply. + * The socket receive timeout was set to 1 second. + */ + secs = timo; + while (secs > 0) { + if ((from_p) && (*from_p)){ + FREE(*from_p, M_SONAME); + *from_p = NULL; + } + + if (m) { + m_freem(m); + m = NULL; + } + auio.uio_resid = len = 1<<16; + rcvflg = 0; + + error = soreceive(so, (struct sockaddr **) from_p, &auio, &m, NULL, &rcvflg); + + if (error == EWOULDBLOCK) { + secs--; + continue; + } + if (error) + goto out; + len -= auio.uio_resid; + + /* Does the reply contain at least a header? */ + if (len < MIN_REPLY_HDR) + continue; + if (m->m_len < MIN_REPLY_HDR) + continue; + reply = mtod(m, struct rpc_reply *); + + /* Is it the right reply? */ + if (reply->rp_direction != htonl(RPC_REPLY)) + continue; + + if (reply->rp_xid != htonl(xid)) + continue; + + /* Was RPC accepted? (authorization OK) */ + if (reply->rp_astatus != 0) { + error = ntohl(reply->rp_u.rpu_errno); + printf("rpc denied, error=%d\n", error); + continue; + } + + /* Did the call succeed? */ + if ((error = ntohl(reply->rp_u.rpu_ok.rp_rstatus)) != 0) { + printf("rpc status=%d\n", error); + continue; + } + + goto gotreply; /* break two levels */ + + } /* while secs */ + } /* forever send/receive */ + + error = ETIMEDOUT; + goto out; + + gotreply: + + /* + * Pull as much as we can into first mbuf, to make + * result buffer contiguous. Note that if the entire + * result won't fit into one mbuf, you're out of luck. + * XXX - Should not rely on making the entire reply + * contiguous (fix callers instead). -gwr + */ +#if DIAGNOSTIC + if ((m->m_flags & M_PKTHDR) == 0) + panic("krpc_call: received pkt w/o header?"); +#endif + len = m->m_pkthdr.len; + if (m->m_len < len) { + m = m_pullup(m, len); + if (m == NULL) { + error = ENOBUFS; + goto out; + } + reply = mtod(m, struct rpc_reply *); + } + + /* + * Strip RPC header + */ + len = sizeof(*reply); + if (reply->rp_u.rpu_ok.rp_auth.rp_atype != 0) { + len += ntohl(reply->rp_u.rpu_ok.rp_auth.rp_alen); + len = (len + 3) & ~3; /* XXX? */ + } + m_adj(m, len); + + /* result */ + *data = m; + out: + if (nam) m_freem(nam); + if (mhead) m_freem(mhead); + soclose(so); + return error; +} diff --git a/bsd/nfs/nfs.h b/bsd/nfs/nfs.h new file mode 100644 index 000000000..00cc5f468 --- /dev/null +++ b/bsd/nfs/nfs.h @@ -0,0 +1,841 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs.h 8.4 (Berkeley) 5/1/95 + * FreeBSD-Id: nfs.h,v 1.32 1997/10/12 20:25:38 phk Exp $ + */ + +#ifndef _NFS_NFS_H_ +#define _NFS_NFS_H_ + +/* + * Tunable constants for nfs + */ + +#define NFS_MAXIOVEC 34 +#define NFS_TICKINTVL 5 /* Desired time for a tick (msec) */ +#define NFS_HZ (hz / nfs_ticks) /* Ticks/sec */ +#define NFS_TIMEO (1 * NFS_HZ) /* Default timeout = 1 second */ +#define NFS_MINTIMEO (1 * NFS_HZ) /* Min timeout to use */ +#define NFS_MAXTIMEO (60 * NFS_HZ) /* Max timeout to backoff to */ +#define NFS_MINIDEMTIMEO (5 * NFS_HZ) /* Min timeout for non-idempotent ops*/ +#define NFS_MAXREXMIT 100 /* Stop counting after this many */ +#define NFS_MAXWINDOW 1024 /* Max number of outstanding requests */ +#define NFS_RETRANS 10 /* Num of retrans for soft mounts */ +#define NFS_MAXGRPS 16 /* Max. size of groups list */ +#ifndef NFS_MINATTRTIMO +#define NFS_MINATTRTIMO 5 /* Attribute cache timeout in sec */ +#endif +#ifndef NFS_MAXATTRTIMO +#define NFS_MAXATTRTIMO 60 +#endif +#define NFS_WSIZE 8192 /* Def. write data size <= 8192 */ +#define NFS_RSIZE 8192 /* Def. read data size <= 8192 */ +#define NFS_READDIRSIZE 8192 /* Def. readdir size */ +#define NFS_DEFRAHEAD 1 /* Def. read ahead # blocks */ +#define NFS_MAXRAHEAD 4 /* Max. read ahead # blocks */ +#define NFS_MAXUIDHASH 64 /* Max. # of hashed uid entries/mp */ +#define NFS_MAXASYNCDAEMON 20 /* Max. number async_daemons runnable */ +#define NFS_MAXGATHERDELAY 100 /* Max. write gather delay (msec) */ +#ifndef NFS_GATHERDELAY +#define NFS_GATHERDELAY 10 /* Default write gather delay (msec) */ +#endif +#define NFS_DIRBLKSIZ 4096 /* Must be a multiple of DIRBLKSIZ */ + +/* + * Oddballs + */ +#define NMOD(a) ((a) % nfs_asyncdaemons) +#define NFS_CMPFH(n, f, s) \ + ((n)->n_fhsize == (s) && !bcmp((caddr_t)(n)->n_fhp, (caddr_t)(f), (s))) +#define NFS_ISV3(v) (VFSTONFS((v)->v_mount)->nm_flag & NFSMNT_NFSV3) +#define NFS_SRVMAXDATA(n) \ + (((n)->nd_flag & ND_NFSV3) ? (((n)->nd_nam2) ? \ + NFS_MAXDGRAMDATA : NFS_MAXDATA) : NFS_V2MAXDATA) + +/* + * XXX + * The B_INVAFTERWRITE flag should be set to whatever is required by the + * buffer cache code to say "Invalidate the block after it is written back". + */ +#ifdef __FreeBSD__ +#define B_INVAFTERWRITE B_NOCACHE +#else +#define B_INVAFTERWRITE B_INVAL +#endif + +/* + * The IO_METASYNC flag should be implemented for local file systems. + * (Until then, it is nothin at all.) + */ +#ifndef IO_METASYNC +#define IO_METASYNC 0 +#endif + +/* + * Set the attribute timeout based on how recently the file has been modified. + */ +#define NFS_ATTRTIMEO(np) \ + ((((np)->n_flag & NMODIFIED) || \ + (time.tv_sec - (np)->n_mtime) / 10 < NFS_MINATTRTIMO) ? NFS_MINATTRTIMO : \ + ((time.tv_sec - (np)->n_mtime) / 10 > NFS_MAXATTRTIMO ? NFS_MAXATTRTIMO : \ + (time.tv_sec - (np)->n_mtime) / 10)) + +/* + * Expected allocation sizes for major data structures. If the actual size + * of the structure exceeds these sizes, then malloc() will be allocating + * almost twice the memory required. This is used in nfs_init() to warn + * the sysadmin that the size of a structure should be reduced. + * (These sizes are always a power of 2. If the kernel malloc() changes + * to one that does not allocate space in powers of 2 size, then this all + * becomes bunk!). + * Note that some of these structures come out of there own nfs zones. +*/ +#define NFS_NODEALLOC 512 +#define NFS_MNTALLOC 512 +#define NFS_SVCALLOC 256 +#define NFS_UIDALLOC 128 + +/* + * Arguments to mount NFS + */ +#define NFS_ARGSVERSION 3 /* change when nfs_args changes */ +struct nfs_args { + int version; /* args structure version number */ + struct sockaddr *addr; /* file server address */ + int addrlen; /* length of address */ + int sotype; /* Socket type */ + int proto; /* and Protocol */ + u_char *fh; /* File handle to be mounted */ + int fhsize; /* Size, in bytes, of fh */ + int flags; /* flags */ + int wsize; /* write size in bytes */ + int rsize; /* read size in bytes */ + int readdirsize; /* readdir size in bytes */ + int timeo; /* initial timeout in .1 secs */ + int retrans; /* times to retry send */ + int maxgrouplist; /* Max. size of group list */ + int readahead; /* # of blocks to readahead */ + int leaseterm; /* Term (sec) of lease */ + int deadthresh; /* Retrans threshold */ + char *hostname; /* server's name */ +}; + +/* + * NFS mount option flags + */ +#define NFSMNT_SOFT 0x00000001 /* soft mount (hard is default) */ +#define NFSMNT_WSIZE 0x00000002 /* set write size */ +#define NFSMNT_RSIZE 0x00000004 /* set read size */ +#define NFSMNT_TIMEO 0x00000008 /* set initial timeout */ +#define NFSMNT_RETRANS 0x00000010 /* set number of request retries */ +#define NFSMNT_MAXGRPS 0x00000020 /* set maximum grouplist size */ +#define NFSMNT_INT 0x00000040 /* allow interrupts on hard mount */ +#define NFSMNT_NOCONN 0x00000080 /* Don't Connect the socket */ +#define NFSMNT_NQNFS 0x00000100 /* Use Nqnfs protocol */ +#define NFSMNT_NFSV3 0x00000200 /* Use NFS Version 3 protocol */ +#define NFSMNT_KERB 0x00000400 /* Use Kerberos authentication */ +#define NFSMNT_DUMBTIMR 0x00000800 /* Don't estimate rtt dynamically */ +#define NFSMNT_LEASETERM 0x00001000 /* set lease term (nqnfs) */ +#define NFSMNT_READAHEAD 0x00002000 /* set read ahead */ +#define NFSMNT_DEADTHRESH 0x00004000 /* set dead server retry thresh */ +#define NFSMNT_RESVPORT 0x00008000 /* Allocate a reserved port */ +#define NFSMNT_RDIRPLUS 0x00010000 /* Use Readdirplus for V3 */ +#define NFSMNT_READDIRSIZE 0x00020000 /* Set readdir size */ +#define NFSMNT_INTERNAL 0xfffc0000 /* Bits set internally */ +#define NFSMNT_HASWRITEVERF 0x00040000 /* Has write verifier for V3 */ +#define NFSMNT_GOTPATHCONF 0x00080000 /* Got the V3 pathconf info */ +#define NFSMNT_GOTFSINFO 0x00100000 /* Got the V3 fsinfo */ +#define NFSMNT_MNTD 0x00200000 /* Mnt server for mnt point */ +#define NFSMNT_DISMINPROG 0x00400000 /* Dismount in progress */ +#define NFSMNT_DISMNT 0x00800000 /* Dismounted */ +#define NFSMNT_SNDLOCK 0x01000000 /* Send socket lock */ +#define NFSMNT_WANTSND 0x02000000 /* Want above */ +#define NFSMNT_RCVLOCK 0x04000000 /* Rcv socket lock */ +#define NFSMNT_WANTRCV 0x08000000 /* Want above */ +#define NFSMNT_WAITAUTH 0x10000000 /* Wait for authentication */ +#define NFSMNT_HASAUTH 0x20000000 /* Has authenticator */ +#define NFSMNT_WANTAUTH 0x40000000 /* Wants an authenticator */ +#define NFSMNT_AUTHERR 0x80000000 /* Authentication error */ + +/* + * Structures for the nfssvc(2) syscall. Not that anyone but nfsd and mount_nfs + * should ever try and use it. + */ +struct nfsd_args { + int sock; /* Socket to serve */ + caddr_t name; /* Client addr for connection based sockets */ + int namelen; /* Length of name */ +}; + +struct nfsd_srvargs { + struct nfsd *nsd_nfsd; /* Pointer to in kernel nfsd struct */ + uid_t nsd_uid; /* Effective uid mapped to cred */ + u_long nsd_haddr; /* Ip address of client */ + struct ucred nsd_cr; /* Cred. uid maps to */ + int nsd_authlen; /* Length of auth string (ret) */ + u_char *nsd_authstr; /* Auth string (ret) */ + int nsd_verflen; /* and the verfier */ + u_char *nsd_verfstr; + struct timeval nsd_timestamp; /* timestamp from verifier */ + u_long nsd_ttl; /* credential ttl (sec) */ + NFSKERBKEY_T nsd_key; /* Session key */ +}; + +struct nfsd_cargs { + char *ncd_dirp; /* Mount dir path */ + uid_t ncd_authuid; /* Effective uid */ + int ncd_authtype; /* Type of authenticator */ + int ncd_authlen; /* Length of authenticator string */ + u_char *ncd_authstr; /* Authenticator string */ + int ncd_verflen; /* and the verifier */ + u_char *ncd_verfstr; + NFSKERBKEY_T ncd_key; /* Session key */ +}; + +/* + * XXX to allow amd to include nfs.h without nfsproto.h + */ +#ifdef NFS_NPROCS +/* + * Stats structure + */ +struct nfsstats { + int attrcache_hits; + int attrcache_misses; + int lookupcache_hits; + int lookupcache_misses; + int direofcache_hits; + int direofcache_misses; + int biocache_reads; + int read_bios; + int read_physios; + int biocache_writes; + int write_bios; + int write_physios; + int biocache_readlinks; + int readlink_bios; + int biocache_readdirs; + int readdir_bios; + int rpccnt[NFS_NPROCS]; + int rpcretries; + int srvrpccnt[NFS_NPROCS]; + int srvrpc_errs; + int srv_errs; + int rpcrequests; + int rpctimeouts; + int rpcunexpected; + int rpcinvalid; + int srvcache_inproghits; + int srvcache_idemdonehits; + int srvcache_nonidemdonehits; + int srvcache_misses; + int srvnqnfs_leases; + int srvnqnfs_maxleases; + int srvnqnfs_getleases; + int srvvop_writes; + int pageins; + int pageouts; +}; +#endif + +/* + * Flags for nfssvc() system call. + */ +#define NFSSVC_BIOD 0x002 +#define NFSSVC_NFSD 0x004 +#define NFSSVC_ADDSOCK 0x008 +#define NFSSVC_AUTHIN 0x010 +#define NFSSVC_GOTAUTH 0x040 +#define NFSSVC_AUTHINFAIL 0x080 +#define NFSSVC_MNTD 0x100 + +/* + * fs.nfs sysctl(3) identifiers + */ +#define NFS_NFSSTATS 1 /* struct: struct nfsstats */ +#define NFS_NFSPRIVPORT 2 /* int: prohibit nfs to resvports */ + +#define FS_NFS_NAMES { \ + { 0, 0 }, \ + { "nfsstats", CTLTYPE_STRUCT }, \ + { "nfsprivport", CTLTYPE_INT }, \ +} + +#ifndef NFS_MUIDHASHSIZ +#define NFS_MUIDHASHSIZ 63 /* Tune the size of nfsmount with this */ +#endif +#ifndef NFS_WDELAYHASHSIZ +#define NFS_WDELAYHASHSIZ 16 /* and with this */ +#endif + +/* + * The set of signals the interrupt an I/O in progress for NFSMNT_INT mounts. + * What should be in this set is open to debate, but I believe that since + * I/O system calls on ufs are never interrupted by signals the set should + * be minimal. My reasoning is that many current programs that use signals + * such as SIGALRM will not expect file I/O system calls to be interrupted + * by them and break. + */ +#ifdef KERNEL + +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_NFSREQ); +MALLOC_DECLARE(M_NFSMNT); +MALLOC_DECLARE(M_NFSDIROFF); +MALLOC_DECLARE(M_NFSRVDESC); +MALLOC_DECLARE(M_NFSUID); +MALLOC_DECLARE(M_NQLEASE); +MALLOC_DECLARE(M_NFSD); +MALLOC_DECLARE(M_NFSBIGFH); +#endif + +struct uio; struct buf; struct vattr; struct nameidata; /* XXX */ + +#define NFSINT_SIGMASK (sigmask(SIGINT)|sigmask(SIGTERM)|sigmask(SIGKILL)| \ + sigmask(SIGHUP)|sigmask(SIGQUIT)) + +/* + * Socket errors ignored for connectionless sockets?? + * For now, ignore them all + */ +#define NFSIGNORE_SOERROR(s, e) \ + ((e) != EINTR && (e) != ERESTART && (e) != EWOULDBLOCK && \ + ((s) & PR_CONNREQUIRED) == 0) + +/* + * Nfs outstanding request list element + */ +struct nfsreq { + TAILQ_ENTRY(nfsreq) r_chain; + struct mbuf *r_mreq; + struct mbuf *r_mrep; + struct mbuf *r_md; + caddr_t r_dpos; + struct nfsmount *r_nmp; + struct vnode *r_vp; + u_long r_xid; + int r_flags; /* flags on request, see below */ + int r_retry; /* max retransmission count */ + int r_rexmit; /* current retrans count */ + int r_timer; /* tick counter on reply */ + u_int32_t r_procnum; /* NFS procedure number */ + int r_rtt; /* RTT for rpc */ + struct proc *r_procp; /* Proc that did I/O system call */ +}; + +/* + * Queue head for nfsreq's + */ +extern TAILQ_HEAD(nfs_reqq, nfsreq) nfs_reqq; + +/* Flag values for r_flags */ +#define R_TIMING 0x01 /* timing request (in mntp) */ +#define R_SENT 0x02 /* request has been sent */ +#define R_SOFTTERM 0x04 /* soft mnt, too many retries */ +#define R_INTR 0x08 /* intr mnt, signal pending */ +#define R_SOCKERR 0x10 /* Fatal error on socket */ +#define R_TPRINTFMSG 0x20 /* Did a tprintf msg. */ +#define R_MUSTRESEND 0x40 /* Must resend request */ +#define R_GETONEREP 0x80 /* Probe for one reply only */ + +/* + * A list of nfssvc_sock structures is maintained with all the sockets + * that require service by the nfsd. + * The nfsuid structs hang off of the nfssvc_sock structs in both lru + * and uid hash lists. + */ +#ifndef NFS_UIDHASHSIZ +#define NFS_UIDHASHSIZ 29 /* Tune the size of nfssvc_sock with this */ +#endif +#define NUIDHASH(sock, uid) \ + (&(sock)->ns_uidhashtbl[(uid) % NFS_UIDHASHSIZ]) +#define NWDELAYHASH(sock, f) \ + (&(sock)->ns_wdelayhashtbl[(*((u_long *)(f))) % NFS_WDELAYHASHSIZ]) +#define NMUIDHASH(nmp, uid) \ + (&(nmp)->nm_uidhashtbl[(uid) % NFS_MUIDHASHSIZ]) +#define NFSNOHASH(fhsum) \ + (&nfsnodehashtbl[(fhsum) & nfsnodehash]) + +/* + * Network address hash list element + */ +union nethostaddr { + u_long had_inetaddr; + struct mbuf *had_nam; +}; + +struct nfsuid { + TAILQ_ENTRY(nfsuid) nu_lru; /* LRU chain */ + LIST_ENTRY(nfsuid) nu_hash; /* Hash list */ + int nu_flag; /* Flags */ + union nethostaddr nu_haddr; /* Host addr. for dgram sockets */ + struct ucred nu_cr; /* Cred uid mapped to */ + int nu_expire; /* Expiry time (sec) */ + struct timeval nu_timestamp; /* Kerb. timestamp */ + u_long nu_nickname; /* Nickname on server */ + NFSKERBKEY_T nu_key; /* and session key */ +}; + +#define nu_inetaddr nu_haddr.had_inetaddr +#define nu_nam nu_haddr.had_nam +/* Bits for nu_flag */ +#define NU_INETADDR 0x1 +#define NU_NAM 0x2 +#define NU_NETFAM(u) (((u)->nu_flag & NU_INETADDR) ? AF_INET : AF_ISO) + +#ifdef notyet +/* XXX CSM 12/2/97 When/if we merge queue.h */ +struct nfsrv_rec { + STAILQ_ENTRY(nfsrv_rec) nr_link; + struct sockaddr *nr_address; + struct mbuf *nr_packet; +}; +#endif + +struct nfssvc_sock { + TAILQ_ENTRY(nfssvc_sock) ns_chain; /* List of all nfssvc_sock's */ + TAILQ_HEAD(, nfsuid) ns_uidlruhead; + struct file *ns_fp; + struct socket *ns_so; + struct mbuf *ns_nam; + struct mbuf *ns_raw; + struct mbuf *ns_rawend; + struct mbuf *ns_rec; + struct mbuf *ns_recend; + struct mbuf *ns_frag; + int ns_flag; + int ns_solock; + int ns_cc; + int ns_reclen; + int ns_numuids; + u_long ns_sref; + LIST_HEAD(, nfsrv_descript) ns_tq; /* Write gather lists */ + LIST_HEAD(, nfsuid) ns_uidhashtbl[NFS_UIDHASHSIZ]; + LIST_HEAD(nfsrvw_delayhash, nfsrv_descript) ns_wdelayhashtbl[NFS_WDELAYHASHSIZ]; +}; + +/* Bits for "ns_flag" */ +#define SLP_VALID 0x01 +#define SLP_DOREC 0x02 +#define SLP_NEEDQ 0x04 +#define SLP_DISCONN 0x08 +#define SLP_GETSTREAM 0x10 +#define SLP_LASTFRAG 0x20 +#define SLP_ALLFLAGS 0xff + +extern TAILQ_HEAD(nfssvc_sockhead, nfssvc_sock) nfssvc_sockhead; +extern int nfssvc_sockhead_flag; +#define SLP_INIT 0x01 +#define SLP_WANTINIT 0x02 + +/* + * One of these structures is allocated for each nfsd. + */ +struct nfsd { + TAILQ_ENTRY(nfsd) nfsd_chain; /* List of all nfsd's */ + int nfsd_flag; /* NFSD_ flags */ + struct nfssvc_sock *nfsd_slp; /* Current socket */ + int nfsd_authlen; /* Authenticator len */ + u_char nfsd_authstr[RPCAUTH_MAXSIZ]; /* Authenticator data */ + int nfsd_verflen; /* and the Verifier */ + u_char nfsd_verfstr[RPCVERF_MAXSIZ]; + struct proc *nfsd_procp; /* Proc ptr */ + struct nfsrv_descript *nfsd_nd; /* Associated nfsrv_descript */ +}; + +/* Bits for "nfsd_flag" */ +#define NFSD_WAITING 0x01 +#define NFSD_REQINPROG 0x02 +#define NFSD_NEEDAUTH 0x04 +#define NFSD_AUTHFAIL 0x08 + +/* + * This structure is used by the server for describing each request. + * Some fields are used only when write request gathering is performed. + */ +struct nfsrv_descript { + u_quad_t nd_time; /* Write deadline (usec) */ + off_t nd_off; /* Start byte offset */ + off_t nd_eoff; /* and end byte offset */ + LIST_ENTRY(nfsrv_descript) nd_hash; /* Hash list */ + LIST_ENTRY(nfsrv_descript) nd_tq; /* and timer list */ + LIST_HEAD(,nfsrv_descript) nd_coalesce; /* coalesced writes */ + struct mbuf *nd_mrep; /* Request mbuf list */ + struct mbuf *nd_md; /* Current dissect mbuf */ + struct mbuf *nd_mreq; /* Reply mbuf list */ + struct mbuf *nd_nam; /* and socket addr */ + struct mbuf *nd_nam2; /* return socket addr */ + caddr_t nd_dpos; /* Current dissect pos */ + u_int32_t nd_procnum; /* RPC # */ + int nd_stable; /* storage type */ + int nd_flag; /* nd_flag */ + int nd_len; /* Length of this write */ + int nd_repstat; /* Reply status */ + u_long nd_retxid; /* Reply xid */ + u_long nd_duration; /* Lease duration */ + struct timeval nd_starttime; /* Time RPC initiated */ + fhandle_t nd_fh; /* File handle */ + struct ucred nd_cr; /* Credentials */ +}; + +/* Bits for "nd_flag" */ +#define ND_READ LEASE_READ +#define ND_WRITE LEASE_WRITE +#define ND_CHECK 0x04 +#define ND_LEASE (ND_READ | ND_WRITE | ND_CHECK) +#define ND_NFSV3 0x08 +#define ND_NQNFS 0x10 +#define ND_KERBNICK 0x20 +#define ND_KERBFULL 0x40 +#define ND_KERBAUTH (ND_KERBNICK | ND_KERBFULL) + +extern TAILQ_HEAD(nfsd_head, nfsd) nfsd_head; +extern int nfsd_head_flag; +#define NFSD_CHECKSLP 0x01 + +/* + * These macros compare nfsrv_descript structures. + */ +#define NFSW_CONTIG(o, n) \ + ((o)->nd_eoff >= (n)->nd_off && \ + !bcmp((caddr_t)&(o)->nd_fh, (caddr_t)&(n)->nd_fh, NFSX_V3FH)) + +#define NFSW_SAMECRED(o, n) \ + (((o)->nd_flag & ND_KERBAUTH) == ((n)->nd_flag & ND_KERBAUTH) && \ + !bcmp((caddr_t)&(o)->nd_cr, (caddr_t)&(n)->nd_cr, \ + sizeof (struct ucred))) + +/* + * Defines for WebNFS + */ + +#define WEBNFS_ESC_CHAR '%' +#define WEBNFS_SPECCHAR_START 0x80 + +#define WEBNFS_NATIVE_CHAR 0x80 +/* + * .. + * Possibly more here in the future. + */ + +/* + * Macro for converting escape characters in WebNFS pathnames. + * Should really be in libkern. + */ + +#define HEXTOC(c) \ + ((c) >= 'a' ? ((c) - ('a' - 10)) : \ + ((c) >= 'A' ? ((c) - ('A' - 10)) : ((c) - '0'))) +#define HEXSTRTOI(p) \ + ((HEXTOC(p[0]) << 4) + HEXTOC(p[1])) + +#define NFSDIAG 0 +#if NFSDIAG + +extern int nfs_debug; +#define NFS_DEBUG_ASYNCIO 1 /* asynchronous i/o */ +#define NFS_DEBUG_WG 2 /* server write gathering */ +#define NFS_DEBUG_RC 4 /* server request caching */ +#define NFS_DEBUG_SILLY 8 /* nfs_sillyrename (.nfsXXX aka turd files) */ +#define NFS_DEBUG_DUP 16 /* debug duplicate requests */ +#define NFS_DEBUG_ATTR 32 + +#define NFS_DPF(cat, args) \ + do { \ + if (nfs_debug & NFS_DEBUG_##cat) kprintf args; \ + } while (0) + +#else + +#define NFS_DPF(cat, args) + +#endif /* NFSDIAG */ + +int nfs_init __P((struct vfsconf *vfsp)); +int nfs_reply __P((struct nfsreq *)); +int nfs_getreq __P((struct nfsrv_descript *,struct nfsd *,int)); +int nfs_send __P((struct socket *, struct mbuf *, struct mbuf *, + struct nfsreq *)); +int nfs_rephead __P((int, struct nfsrv_descript *, struct nfssvc_sock *, + int, int, u_quad_t *, struct mbuf **, struct mbuf **, + caddr_t *)); +int nfs_sndlock __P((int *, struct nfsreq *)); +void nfs_sndunlock __P((int *flagp)); +int nfs_disct __P((struct mbuf **, caddr_t *, int, int, caddr_t *)); +int nfs_vinvalbuf __P((struct vnode *, int, struct ucred *, struct proc *, + int)); +int nfs_readrpc __P((struct vnode *, struct uio *, struct ucred *)); +int nfs_writerpc __P((struct vnode *, struct uio *, struct ucred *, int *, + int *)); +int nfs_readdirrpc __P((struct vnode *, struct uio *, struct ucred *)); +int nfs_asyncio __P((struct buf *, struct ucred *)); +int nfs_doio __P((struct buf *, struct ucred *, struct proc *)); +int nfs_readlinkrpc __P((struct vnode *, struct uio *, struct ucred *)); +int nfs_sigintr __P((struct nfsmount *, struct nfsreq *, struct proc *)); +int nfs_readdirplusrpc __P((struct vnode *, struct uio *, struct ucred *)); +int nfsm_disct __P((struct mbuf **, caddr_t *, int, int, caddr_t *)); +void nfsm_srvfattr __P((struct nfsrv_descript *, struct vattr *, + struct nfs_fattr *)); +void nfsm_srvwcc __P((struct nfsrv_descript *, int, struct vattr *, int, + struct vattr *, struct mbuf **, char **)); +void nfsm_srvpostopattr __P((struct nfsrv_descript *, int, struct vattr *, + struct mbuf **, char **)); +int netaddr_match __P((int, union nethostaddr *, struct mbuf *)); +int nfs_request __P((struct vnode *, struct mbuf *, int, struct proc *, + struct ucred *, struct mbuf **, struct mbuf **, + caddr_t *)); +int nfs_loadattrcache __P((struct vnode **, struct mbuf **, caddr_t *, + struct vattr *)); +int nfs_namei __P((struct nameidata *, fhandle_t *, int, + struct nfssvc_sock *, struct mbuf *, struct mbuf **, + caddr_t *, struct vnode **, struct proc *, int, int)); +void nfsm_adj __P((struct mbuf *, int, int)); +int nfsm_mbuftouio __P((struct mbuf **, struct uio *, int, caddr_t *)); +void nfsrv_initcache __P((void)); +int nfs_getauth __P((struct nfsmount *, struct nfsreq *, struct ucred *, + char **, int *, char *, int *, NFSKERBKEY_T)); +int nfs_getnickauth __P((struct nfsmount *, struct ucred *, char **, + int *, char *, int)); +int nfs_savenickauth __P((struct nfsmount *, struct ucred *, int, + NFSKERBKEY_T, struct mbuf **, char **, + struct mbuf *)); +int nfs_adv __P((struct mbuf **, caddr_t *, int, int)); +void nfs_nhinit __P((void)); +void nfs_timer __P((void*)); +u_long nfs_hash __P((nfsfh_t *, int)); +int nfsrv_dorec __P((struct nfssvc_sock *, struct nfsd *, + struct nfsrv_descript **)); +int nfsrv_getcache __P((struct nfsrv_descript *, struct nfssvc_sock *, + struct mbuf **)); +void nfsrv_updatecache __P((struct nfsrv_descript *, int, struct mbuf *)); +void nfsrv_cleancache __P((void)); +int nfs_connect __P((struct nfsmount *, struct nfsreq *)); +void nfs_disconnect __P((struct nfsmount *)); +int nfs_getattrcache __P((struct vnode *, struct vattr *)); +int nfsm_strtmbuf __P((struct mbuf **, char **, char *, long)); +int nfs_bioread __P((struct vnode *, struct uio *, int, struct ucred *, + int)); +int nfsm_uiotombuf __P((struct uio *, struct mbuf **, int, caddr_t *)); +void nfsrv_init __P((int)); +void nfs_clearcommit __P((struct mount *)); +int nfsrv_errmap __P((struct nfsrv_descript *, int)); +void nfsrvw_sort __P((gid_t *, int)); +void nfsrv_setcred __P((struct ucred *, struct ucred *)); +int nfs_writebp __P((struct buf *, int)); +int nfsrv_object_create __P((struct vnode *)); +void nfsrv_wakenfsd __P((struct nfssvc_sock *slp)); +int nfsrv_writegather __P((struct nfsrv_descript **, struct nfssvc_sock *, + struct proc *, struct mbuf **)); +int nfs_fsinfo __P((struct nfsmount *, struct vnode *, struct ucred *, + struct proc *p)); + +int nfsrv3_access __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_commit __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_create __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_fhtovp __P((fhandle_t *, int, struct vnode **, struct ucred *, + struct nfssvc_sock *, struct mbuf *, int *, + int, int)); +int nfsrv_setpublicfs __P((struct mount *, struct netexport *, + struct export_args *)); +int nfs_ispublicfh __P((fhandle_t *)); +int nfsrv_fsinfo __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_getattr __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_link __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_lookup __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_mkdir __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_mknod __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_noop __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_null __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_pathconf __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, struct proc *procp, + struct mbuf **mrq)); +int nfsrv_read __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_readdir __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_readdirplus __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, struct proc *procp, + struct mbuf **mrq)); +int nfsrv_readlink __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, struct proc *procp, + struct mbuf **mrq)); +int nfsrv_remove __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_rename __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_rmdir __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_setattr __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_statfs __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_symlink __P((struct nfsrv_descript *nfsd, + struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +int nfsrv_write __P((struct nfsrv_descript *nfsd, struct nfssvc_sock *slp, + struct proc *procp, struct mbuf **mrq)); +void nfsrv_rcv __P((struct socket *so, caddr_t arg, int waitflag)); +void nfsrv_slpderef __P((struct nfssvc_sock *slp)); + +#if NFSDIAG + +extern int nfstraceindx; +#define NFSTBUFSIZ 8912 +struct nfstracerec { uint i1, i2, i3, i4; }; +extern struct nfstracerec nfstracebuf[NFSTBUFSIZ]; +extern uint nfstracemask; /* 32 bits - trace points over 31 are unconditional */ + +/* 0x0000000f nfs_getattrcache trace points */ +#define NFSTRC_GAC_MISS 0x00 /* 0x00000001 cache miss */ +#define NFSTRC_GAC_HIT 0x01 /* 0x00000002 cache hit */ +#define NFSTRC_GAC_NP 0x02 /* 0x00000004 np size mismatch - vp... */ +/* 0x00000038 nfs_loadattrcache trace points */ +#define NFSTRC_LAC 0x03 /* 0x00000008 function entry point - vp */ +#define NFSTRC_LAC_INIT 0x04 /* 0x00000010 new vp & init n_mtime - vp */ +#define NFSTRC_LAC_NP 0x05 /* 0x00000020 np size mismatch - vp... */ +/* 0x000000c0 nfs_getattr trace points */ +#define NFSTRC_GA_INV 0x06 /* 0x00000040 times mismatch - vp */ +#define NFSTRC_GA_INV1 0x07 /* 0x00000080 invalidate ok - vp */ +/* 0x00000100 vmp_invalidate trace points */ +#define NFSTRC_VMP_INV 0x08 /* 0x00000100 function entry point - vmp */ +/* 0x00000200 nfs_request trace points */ +#define NFSTRC_REQ 0x09 /* 0x00000200 - alternates vp and procnum */ +/* 0x00000c00 vmp_push_range trace points */ +#define NFSTRC_VPR 0xa /* 0x00000400 entry point - vp... */ +#define NFSTRC_VPR_DONE 0xb /* 0x00000800 tail exit - error # */ +/* 0x00003000 nfs_doio trace points */ +#define NFSTRC_DIO 0xc /* 0x00001000 entry point - vp */ +#define NFSTRC_DIO_DONE 0xd /* 0x00002000 exit points - vp */ +/* 0x000fc000 congestion window trace points */ +#define NFSTRC_CWND_INIT 0xe +#define NFSTRC_CWND_REPLY 0xf +#define NFSTRC_CWND_TIMER 0x10 +#define NFSTRC_CWND_REQ1 0x11 +#define NFSTRC_CWND_REQ2 0x12 +#define NFSTRC_CWND_SOFT 0x13 +/* 0xfff00000 nfs_rcvlock & nfs_rcvunlock trace points */ +#define NFSTRC_ECONN 0x14 +#define NFSTRC_RCVERR 0x15 +#define NFSTRC_REQFREE 0x16 +#define NFSTRC_NOTMINE 0x17 +#define NFSTRC_6 0x18 +#define NFSTRC_7 0x19 +#define NFSTRC_RCVLCKINTR 0x1a +#define NFSTRC_RCVALREADY 0x1b +#define NFSTRC_RCVLCKW 0x1c /* 0x10000000 seeking recieve lock (waiting) */ +#define NFSTRC_RCVLCK 0x1d /* 0x20000000 getting recieve lock */ +#define NFSTRC_RCVUNLW 0x1e /* 0x40000000 releasing rcv lock w/ wakeup */ +#define NFSTRC_RCVUNL 0x1f /* 0x80000000 releasing rcv lock w/o wakeup */ +/* trace points beyond 31 are on if any of above points are on */ +#define NFSTRC_GA_INV2 0x20 /* nfs_getattr invalidate - error# */ +#define NFSTRC_VBAD 0x21 +#define NFSTRC_REQERR 0x22 +#define NFSTRC_RPCERR 0x23 +#define NFSTRC_DISSECTERR 0x24 +#define NFSTRC_CONTINUE 0xff /* continuation record for previous entry */ + +#define NFSTRACEX(a1, a2, a3, a4) \ +( \ + nfstracebuf[nfstraceindx].i1 = (uint)(a1), \ + nfstracebuf[nfstraceindx].i2 = (uint)(a2), \ + nfstracebuf[nfstraceindx].i3 = (uint)(a3), \ + nfstracebuf[nfstraceindx].i4 = (uint)(a4), \ + nfstraceindx = (nfstraceindx + 1) % NFSTBUFSIZ, \ + 1 \ +) + +#define NFSTRACE(cnst, fptr) \ +( \ + (nfstracemask && ((cnst) > 31 || nfstracemask & 1<<(cnst))) ? \ + NFSTRACEX((cnst), (fptr), current_thread(), \ + clock_get_system_value().tv_nsec) : \ + 0 \ +) + +#define NFSTRACE4(cnst, fptr, a2, a3, a4) \ +( \ + NFSTRACE(cnst,fptr) ? \ + NFSTRACEX(NFSTRC_CONTINUE, a2, a3, a4) : \ + 0 \ +) + +#else /* NFSDIAG */ + + #define NFSTRACE(cnst, fptr) + #define NFSTRACE4(cnst, fptr, a2, a3, a4) + +#endif /* NFSDIAG */ + +#endif /* KERNEL */ + +#endif diff --git a/bsd/nfs/nfs_bio.c b/bsd/nfs/nfs_bio.c new file mode 100644 index 000000000..e341a0441 --- /dev/null +++ b/bsd/nfs/nfs_bio.c @@ -0,0 +1,1300 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95 + * FreeBSD-Id: nfs_bio.c,v 1.44 1997/09/10 19:52:25 phk Exp $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +static struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size, + struct proc *p, int operation)); +static struct buf *nfs_getwriteblk __P((struct vnode *vp, daddr_t bn, + int size, struct proc *p, + struct ucred *cred, int off, int len)); + +extern int nfs_numasync; +extern struct nfsstats nfsstats; + +/* + * Vnode op for read using bio + * Any similarity to readip() is purely coincidental + */ +int +nfs_bioread(vp, uio, ioflag, cred, getpages) + register struct vnode *vp; + register struct uio *uio; + int ioflag; + struct ucred *cred; + int getpages; +{ + register struct nfsnode *np = VTONFS(vp); + register int biosize, diff, i; + struct buf *bp = 0, *rabp; + struct vattr vattr; + struct proc *p; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + daddr_t lbn, rabn; + int bufsize; + int nra, error = 0, n = 0, on = 0, not_readin; + int operation = (getpages? BLK_PAGEIN : BLK_READ); + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_READ) + panic("nfs_read mode"); +#endif + if (uio->uio_resid == 0) + return (0); + if (uio->uio_offset < 0) + return (EINVAL); + p = uio->uio_procp; + if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + (void)nfs_fsinfo(nmp, vp, cred, p); + /*due to getblk/vm interractions, use vm page size or less values */ + biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); + /* + * For nfs, cache consistency can only be maintained approximately. + * Although RFC1094 does not specify the criteria, the following is + * believed to be compatible with the reference port. + * For nqnfs, full cache consistency is maintained within the loop. + * For nfs: + * If the file's modify time on the server has changed since the + * last read rpc or you have written to the file, + * you may have lost data cache consistency with the + * server, so flush all of the file's data out of the cache. + * Then force a getattr rpc to ensure that you have up to date + * attributes. + * NB: This implies that cache data can be read when up to + * NFS_ATTRTIMEO seconds out of date. If you find that you need current + * attributes this could be forced by setting n_attrstamp to 0 before + * the VOP_GETATTR() call. + */ + if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) { + if (np->n_flag & NMODIFIED) { + if (vp->v_type != VREG) { + if (vp->v_type != VDIR) + panic("nfs: bioread, not dir"); + nfs_invaldir(vp); + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + } + np->n_attrstamp = 0; + error = VOP_GETATTR(vp, &vattr, cred, p); + if (error) + return (error); + np->n_mtime = vattr.va_mtime.tv_sec; + } else { + error = VOP_GETATTR(vp, &vattr, cred, p); + if (error) + return (error); + if (np->n_mtime != vattr.va_mtime.tv_sec) { + if (vp->v_type == VDIR) + nfs_invaldir(vp); + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + np->n_mtime = vattr.va_mtime.tv_sec; + } + } + } + do { + + /* + * Get a valid lease. If cached data is stale, flush it. + */ + if (nmp->nm_flag & NFSMNT_NQNFS) { + if (NQNFS_CKINVALID(vp, np, ND_READ)) { + do { + error = nqnfs_getlease(vp, ND_READ, cred, p); + } while (error == NQNFS_EXPIRED); + if (error) + return (error); + if (np->n_lrev != np->n_brev || + (np->n_flag & NQNFSNONCACHE) || + ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) { + if (vp->v_type == VDIR) + nfs_invaldir(vp); + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + np->n_brev = np->n_lrev; + } + } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) { + nfs_invaldir(vp); + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + } + } + if (np->n_flag & NQNFSNONCACHE) { + switch (vp->v_type) { + case VREG: + return (nfs_readrpc(vp, uio, cred)); + case VLNK: + return (nfs_readlinkrpc(vp, uio, cred)); + case VDIR: + break; + default: + printf(" NQNFSNONCACHE: type %x unexpected\n", + vp->v_type); + }; + } + switch (vp->v_type) { + case VREG: + nfsstats.biocache_reads++; + lbn = uio->uio_offset / biosize; + on = uio->uio_offset & (biosize - 1); + not_readin = 1; + + /* + * Start the read ahead(s), as required. + */ + if (nfs_numasync > 0 && nmp->nm_readahead > 0) { + for (nra = 0; nra < nmp->nm_readahead && + (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) { + rabn = lbn + 1 + nra; + if (!incore(vp, rabn)) { + rabp = nfs_getcacheblk(vp, rabn, biosize, p, operation); + if (!rabp) + return (EINTR); + if (!ISSET(rabp->b_flags, (B_CACHE|B_DELWRI))) { + SET(rabp->b_flags, (B_READ | B_ASYNC)); + if (nfs_asyncio(rabp, cred)) { + SET(rabp->b_flags, (B_INVAL|B_ERROR)); + rabp->b_error = EIO; + brelse(rabp); + } + } else + brelse(rabp); + } + } + } + + /* + * If the block is in the cache and has the required data + * in a valid region, just copy it out. + * Otherwise, get the block and write back/read in, + * as required. + */ +again: + bufsize = biosize; + if ((off_t)(lbn + 1) * biosize > np->n_size && + (off_t)(lbn + 1) * biosize - np->n_size < biosize) { + bufsize = np->n_size - lbn * biosize; + bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); + } + bp = nfs_getcacheblk(vp, lbn, bufsize, p, operation); + if (!bp) + return (EINTR); + + if (!ISSET(bp->b_flags, B_CACHE)) { + SET(bp->b_flags, B_READ); + CLR(bp->b_flags, (B_DONE | B_ERROR | B_INVAL)); + not_readin = 0; + error = nfs_doio(bp, cred, p); + if (error) { + brelse(bp); + return (error); + } + } + if (bufsize > on) { + n = min((unsigned)(bufsize - on), uio->uio_resid); + } else { + n = 0; + } + diff = np->n_size - uio->uio_offset; + if (diff < n) + n = diff; + if (not_readin && n > 0) { + if (on < bp->b_validoff || (on + n) > bp->b_validend) { + SET(bp->b_flags, (B_NOCACHE|B_INVAFTERWRITE)); + if (bp->b_dirtyend > 0) { + if (!ISSET(bp->b_flags, B_DELWRI)) + panic("nfsbioread"); + if (VOP_BWRITE(bp) == EINTR) + return (EINTR); + } else + brelse(bp); + goto again; + } + } + vp->v_lastr = lbn; + diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on); + if (diff < n) + n = diff; + break; + case VLNK: + nfsstats.biocache_readlinks++; + bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p, operation); + if (!bp) + return (EINTR); + if (!ISSET(bp->b_flags, B_CACHE)) { + SET(bp->b_flags, B_READ); + error = nfs_doio(bp, cred, p); + if (error) { + SET(bp->b_flags, B_ERROR); + brelse(bp); + return (error); + } + } + n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid); + on = 0; + break; + case VDIR: + nfsstats.biocache_readdirs++; + if (np->n_direofoffset + && uio->uio_offset >= np->n_direofoffset) { + return (0); + } + lbn = uio->uio_offset / NFS_DIRBLKSIZ; + on = uio->uio_offset & (NFS_DIRBLKSIZ - 1); + bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, p, operation); + if (!bp) + return (EINTR); + if (!ISSET(bp->b_flags, B_CACHE)) { + SET(bp->b_flags, B_READ); + error = nfs_doio(bp, cred, p); + if (error) { + brelse(bp); + while (error == NFSERR_BAD_COOKIE) { + nfs_invaldir(vp); + error = nfs_vinvalbuf(vp, 0, cred, p, 1); + /* + * Yuck! The directory has been modified on the + * server. The only way to get the block is by + * reading from the beginning to get all the + * offset cookies. + */ + for (i = 0; i <= lbn && !error; i++) { + if (np->n_direofoffset + && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) + return (0); + bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, p, operation); + if (!bp) + return (EINTR); + if (!ISSET(bp->b_flags, B_DONE)) { + SET(bp->b_flags, B_READ); + error = nfs_doio(bp, cred, p); + if (error) { + brelse(bp); + } else if (i < lbn) + brelse(bp); + } + } + } + if (error) + return (error); + } + } + + /* + * If not eof and read aheads are enabled, start one. + * (You need the current block first, so that you have the + * directory offset cookie of the next block.) + */ + if (nfs_numasync > 0 && nmp->nm_readahead > 0 && + (np->n_direofoffset == 0 || + (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) && + !(np->n_flag & NQNFSNONCACHE) && + !incore(vp, lbn + 1)) { + rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, p, operation); + if (rabp) { + if (!ISSET(rabp->b_flags, (B_CACHE|B_DELWRI))) { + SET(rabp->b_flags, (B_READ | B_ASYNC)); + if (nfs_asyncio(rabp, cred)) { + SET(rabp->b_flags, (B_INVAL|B_ERROR)); + rabp->b_error = EIO; + brelse(rabp); + } + } else { + brelse(rabp); + } + } + } + /* + * Make sure we use a signed variant of min() since + * the second term may be negative. + */ + n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on); + break; + default: + printf(" nfs_bioread: type %x unexpected\n",vp->v_type); + break; + }; + + if (n > 0) { + error = uiomove(bp->b_data + on, (int)n, uio); + } + switch (vp->v_type) { + case VREG: + break; + case VLNK: + n = 0; + break; + case VDIR: + if (np->n_flag & NQNFSNONCACHE) + SET(bp->b_flags, B_INVAL); + break; + default: + printf(" nfs_bioread: type %x unexpected\n",vp->v_type); + } + brelse(bp); + } while (error == 0 && uio->uio_resid > 0 && n > 0); + return (error); +} + +/* + * Vnode op for write using bio + */ +int +nfs_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register int biosize; + register struct uio *uio = ap->a_uio; + struct proc *p = uio->uio_procp; + register struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + register struct ucred *cred = ap->a_cred; + int ioflag = ap->a_ioflag; + struct buf *bp; + struct vattr vattr; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + daddr_t lbn; + int bufsize; + int n, on, error = 0, iomode, must_commit; + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_WRITE) + panic("nfs_write mode"); + if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != current_proc()) + panic("nfs_write proc"); +#endif + if (vp->v_type != VREG) + return (EIO); + if (np->n_flag & NWRITEERR) { + np->n_flag &= ~NWRITEERR; + return (np->n_error); + } + if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + (void)nfs_fsinfo(nmp, vp, cred, p); + if (ioflag & (IO_APPEND | IO_SYNC)) { + if (np->n_flag & NMODIFIED) { + np->n_attrstamp = 0; + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + } + if (ioflag & IO_APPEND) { + np->n_attrstamp = 0; + error = VOP_GETATTR(vp, &vattr, cred, p); + if (error) + return (error); + uio->uio_offset = np->n_size; + } + } + if (uio->uio_offset < 0) + return (EINVAL); + if (uio->uio_resid == 0) + return (0); + /* + * Maybe this should be above the vnode op call, but so long as + * file servers have no limits, i don't think it matters + */ + if (p && uio->uio_offset + uio->uio_resid > + p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { + psignal(p, SIGXFSZ); + return (EFBIG); + } + /* + * I use nm_rsize, not nm_wsize so that all buffer cache blocks + * will be the same size within a filesystem. nfs_writerpc will + * still use nm_wsize when sizing the rpc's. + */ + /*due to getblk/vm interractions, use vm page size or less values */ + biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); + + do { + /* + * Check for a valid write lease. + */ + if ((nmp->nm_flag & NFSMNT_NQNFS) && + NQNFS_CKINVALID(vp, np, ND_WRITE)) { + do { + error = nqnfs_getlease(vp, ND_WRITE, cred, p); + } while (error == NQNFS_EXPIRED); + if (error) + return (error); + if (np->n_lrev != np->n_brev || + (np->n_flag & NQNFSNONCACHE)) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + np->n_brev = np->n_lrev; + } + } + if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) { + iomode = NFSV3WRITE_FILESYNC; + error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit); + if (must_commit) + nfs_clearcommit(vp->v_mount); + return (error); + } + nfsstats.biocache_writes++; + lbn = uio->uio_offset / biosize; + on = uio->uio_offset & (biosize-1); + n = min((unsigned)(biosize - on), uio->uio_resid); +again: + if (uio->uio_offset + n > np->n_size) { + np->n_size = uio->uio_offset + n; + np->n_flag |= NMODIFIED; + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)np->n_size); /* XXX check error */ + } + bufsize = biosize; +#if 0 +/* (removed for UBC) */ + if ((lbn + 1) * biosize > np->n_size) { + bufsize = np->n_size - lbn * biosize; + bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); + } +#endif + bp = nfs_getwriteblk(vp, lbn, bufsize, p, cred, on, n); + if (!bp) + return (EINTR); + if (ISSET(bp->b_flags, B_ERROR)) { + error = bp->b_error; + brelse(bp); + return (error); + } + if (bp->b_wcred == NOCRED) { + crhold(cred); + bp->b_wcred = cred; + } + np->n_flag |= NMODIFIED; + + /* + * Check for valid write lease and get one as required. + * In case getblk() and/or bwrite() delayed us. + */ + if ((nmp->nm_flag & NFSMNT_NQNFS) && + NQNFS_CKINVALID(vp, np, ND_WRITE)) { + do { + error = nqnfs_getlease(vp, ND_WRITE, cred, p); + } while (error == NQNFS_EXPIRED); + if (error) { + brelse(bp); + return (error); + } + if (np->n_lrev != np->n_brev || + (np->n_flag & NQNFSNONCACHE)) { + brelse(bp); + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + np->n_brev = np->n_lrev; + goto again; + } + } + error = uiomove((char *)bp->b_data + on, n, uio); + if (error) { + SET(bp->b_flags, B_ERROR); + brelse(bp); + return (error); + } + if (bp->b_dirtyend > 0) { + bp->b_dirtyoff = min(on, bp->b_dirtyoff); + bp->b_dirtyend = max((on + n), bp->b_dirtyend); + } else { + bp->b_dirtyoff = on; + bp->b_dirtyend = on + n; + } + if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff || + bp->b_validoff > bp->b_dirtyend) { + bp->b_validoff = bp->b_dirtyoff; + bp->b_validend = bp->b_dirtyend; + } else { + bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); + bp->b_validend = max(bp->b_validend, bp->b_dirtyend); + } + + /* + * Since this block is being modified, it must be written + * again and not just committed. + */ + CLR(bp->b_flags, B_NEEDCOMMIT); + + /* + * If the lease is non-cachable or IO_SYNC do bwrite(). + */ + if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) { + bp->b_proc = p; + error = VOP_BWRITE(bp); + if (error) + return (error); + if (np->n_flag & NQNFSNONCACHE) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) + return (error); + } + } else if ((n + on) == biosize && + (nmp->nm_flag & NFSMNT_NQNFS) == 0) { + bp->b_proc = (struct proc *)0; + SET(bp->b_flags, B_ASYNC); + (void)nfs_writebp(bp, 0); + } else + bdwrite(bp); + } while (uio->uio_resid > 0 && n > 0); + return (0); +} + +/* + * Get a cache block for writing. The range to be written is + * (off..off+len) within the block. This routine ensures that the + * block is either has no dirty region or that the given range is + * contiguous with the existing dirty region. + */ +static struct buf * +nfs_getwriteblk(vp, bn, size, p, cred, off, len) + struct vnode *vp; + daddr_t bn; + int size; + struct proc *p; + struct ucred *cred; + int off, len; +{ + struct nfsnode *np = VTONFS(vp); + struct buf *bp; + int error; + struct iovec iov; + struct uio uio; + off_t boff; + + again: + bp = nfs_getcacheblk(vp, bn, size, p, BLK_WRITE); + if (!bp) + return (NULL); + if (bp->b_wcred == NOCRED) { + crhold(cred); + bp->b_wcred = cred; + } + + if ((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend > np->n_size) { + bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); + } + + /* + * UBC doesn't (yet) handle partial pages so nfs_biowrite was + * hacked to never bdwrite, to start every little write right away. + * Running IE Avie noticed the performance problem, thus this code, + * which permits those delayed writes by ensuring an initial read + * of the entire page. The read may hit eof ("short read") but + * that we will handle. + * + * We are quite dependant on the correctness of B_CACHE so check + * that first in case of problems. + */ + if (!ISSET(bp->b_flags, B_CACHE) && len < PAGE_SIZE) { + struct nfsnode *np = VTONFS(vp); + + boff = (off_t)bp->b_blkno * DEV_BSIZE; + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + uio.uio_offset = boff; + uio.uio_resid = PAGE_SIZE; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_rw = UIO_READ; + uio.uio_procp = p; + iov.iov_base = bp->b_data; + iov.iov_len = PAGE_SIZE; + error = nfs_readrpc(vp, &uio, cred); + if (error) { + bp->b_error = error; + SET(bp->b_flags, B_ERROR); + printf("nfs_getwriteblk: readrpc returned %d", error); + } + if (uio.uio_resid > 0) + bzero(iov.iov_base, uio.uio_resid); + bp->b_validoff = 0; + bp->b_validend = PAGE_SIZE - uio.uio_resid; + if (np->n_size > boff + bp->b_validend) + bp->b_validend = min(np->n_size - boff, PAGE_SIZE); + bp->b_dirtyoff = 0; + bp->b_dirtyend = 0; + } + + /* + * If the new write will leave a contiguous dirty + * area, just update the b_dirtyoff and b_dirtyend, + * otherwise try to extend the dirty region. + */ + if (bp->b_dirtyend > 0 && + (off > bp->b_dirtyend || (off + len) < bp->b_dirtyoff)) { + off_t start, end; + + boff = (off_t)bp->b_blkno * DEV_BSIZE; + if (off > bp->b_dirtyend) { + start = boff + bp->b_validend; + end = boff + off; + } else { + start = boff + off + len; + end = boff + bp->b_validoff; + } + + /* + * It may be that the valid region in the buffer + * covers the region we want, in which case just + * extend the dirty region. Otherwise we try to + * extend the valid region. + */ + if (end > start) { + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + uio.uio_offset = start; + uio.uio_resid = end - start; + uio.uio_segflg = UIO_SYSSPACE; + uio.uio_rw = UIO_READ; + uio.uio_procp = p; + iov.iov_base = bp->b_data + (start - boff); + iov.iov_len = end - start; + error = nfs_readrpc(vp, &uio, cred); + if (error) { + /* + * If we couldn't read, fall back to writing + * out the old dirty region. + */ + bp->b_proc = p; + if (VOP_BWRITE(bp) == EINTR) + return (NULL); + goto again; + } else { + /* + * The read worked. + */ + if (uio.uio_resid > 0) { + /* + * If there was a short read, + * just zero fill. + */ + bzero(iov.iov_base, + uio.uio_resid); + } + if (off > bp->b_dirtyend) + bp->b_validend = off; + else + bp->b_validoff = off + len; + } + } + + /* + * We now have a valid region which extends up to the + * dirty region which we want. + */ + if (off > bp->b_dirtyend) + bp->b_dirtyend = off; + else + bp->b_dirtyoff = off + len; + } + + return bp; +} + +/* + * Get an nfs cache block. + * Allocate a new one if the block isn't currently in the cache + * and return the block marked busy. If the calling process is + * interrupted by a signal for an interruptible mount point, return + * NULL. + */ +static struct buf * +nfs_getcacheblk(vp, bn, size, p, operation) + struct vnode *vp; + daddr_t bn; + int size; + struct proc *p; + int operation; /* defined in sys/buf.h */ +{ + register struct buf *bp; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + /*due to getblk/vm interractions, use vm page size or less values */ + int biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); + + if (nmp->nm_flag & NFSMNT_INT) { + bp = getblk(vp, bn, size, PCATCH, 0, operation); + while (bp == (struct buf *)0) { + if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) + return ((struct buf *)0); + bp = getblk(vp, bn, size, 0, 2 * hz, operation); + } + } else + bp = getblk(vp, bn, size, 0, 0, operation); + + if( vp->v_type == VREG) + bp->b_blkno = (bn * biosize) / DEV_BSIZE; + + return (bp); +} + +/* + * Flush and invalidate all dirty buffers. If another process is already + * doing the flush, just wait for completion. + */ +int +nfs_vinvalbuf(vp, flags, cred, p, intrflg) + struct vnode *vp; + int flags; + struct ucred *cred; + struct proc *p; + int intrflg; +{ + register struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error = 0, slpflag, slptimeo; + + if ((nmp->nm_flag & NFSMNT_INT) == 0) + intrflg = 0; + if (intrflg) { + slpflag = PCATCH; + slptimeo = 2 * hz; + } else { + slpflag = 0; + slptimeo = 0; + } + /* + * First wait for any other process doing a flush to complete. + */ + while (np->n_flag & NFLUSHINPROG) { + np->n_flag |= NFLUSHWANT; + error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval", + slptimeo); + if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) + return (EINTR); + } + + /* + * Now, flush as required. + */ + np->n_flag |= NFLUSHINPROG; + error = vinvalbuf(vp, flags, cred, p, slpflag, 0); + while (error) { + if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) { + np->n_flag &= ~NFLUSHINPROG; + if (np->n_flag & NFLUSHWANT) { + np->n_flag &= ~NFLUSHWANT; + wakeup((caddr_t)&np->n_flag); + } + return (EINTR); + } + error = vinvalbuf(vp, flags, cred, p, 0, slptimeo); + } + np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); + if (np->n_flag & NFLUSHWANT) { + np->n_flag &= ~NFLUSHWANT; + wakeup((caddr_t)&np->n_flag); + } + (void) ubc_clean(vp, 1); /* get the pages out of vm also */ + return (0); +} + +/* + * Initiate asynchronous I/O. Return an error if no nfsiods are available. + * This is mainly to avoid queueing async I/O requests when the nfsiods + * are all hung on a dead server. + */ +int +nfs_asyncio(bp, cred) + register struct buf *bp; + struct ucred *cred; +{ + struct nfsmount *nmp; + int i; + int gotiod; + int slpflag = 0; + int slptimeo = 0; + int error; + + if (nfs_numasync == 0) + return (EIO); + + nmp = VFSTONFS(bp->b_vp->v_mount); +again: + if (nmp->nm_flag & NFSMNT_INT) + slpflag = PCATCH; + gotiod = FALSE; + + /* + * Find a free iod to process this request. + */ + for (i = 0; i < NFS_MAXASYNCDAEMON; i++) + if (nfs_iodwant[i]) { + /* + * Found one, so wake it up and tell it which + * mount to process. + */ + NFS_DPF(ASYNCIO, + ("nfs_asyncio: waking iod %d for mount %p\n", + i, nmp)); + nfs_iodwant[i] = (struct proc *)0; + nfs_iodmount[i] = nmp; + nmp->nm_bufqiods++; + wakeup((caddr_t)&nfs_iodwant[i]); + gotiod = TRUE; + break; + } + + /* + * If none are free, we may already have an iod working on this mount + * point. If so, it will process our request. + */ + if (!gotiod) { + if (nmp->nm_bufqiods > 0) { + NFS_DPF(ASYNCIO, + ("nfs_asyncio: %d iods are already processing mount %p\n", + nmp->nm_bufqiods, nmp)); + gotiod = TRUE; + } + } + + /* + * If we have an iod which can process the request, then queue + * the buffer. + */ + if (gotiod) { + /* + * Ensure that the queue never grows too large. + */ + while (nmp->nm_bufqlen >= 2*nfs_numasync) { + NFS_DPF(ASYNCIO, + ("nfs_asyncio: waiting for mount %p queue to drain\n", nmp)); + nmp->nm_bufqwant = TRUE; + error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO, + "nfsaio", slptimeo); + if (error) { + if (nfs_sigintr(nmp, NULL, bp->b_proc)) + return (EINTR); + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + } + /* + * We might have lost our iod while sleeping, + * so check and loop if nescessary. + */ + if (nmp->nm_bufqiods == 0) { + NFS_DPF(ASYNCIO, + ("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp)); + goto again; + } + } + + if (ISSET(bp->b_flags, B_READ)) { + if (bp->b_rcred == NOCRED && cred != NOCRED) { + crhold(cred); + bp->b_rcred = cred; + } + } else { + SET(bp->b_flags, B_WRITEINPROG); + if (bp->b_wcred == NOCRED && cred != NOCRED) { + crhold(cred); + bp->b_wcred = cred; + } + } + + TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist); + nmp->nm_bufqlen++; + return (0); + } + + /* + * All the iods are busy on other mounts, so return EIO to + * force the caller to process the i/o synchronously. + */ + NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n")); + return (EIO); +} + +/* + * Do an I/O operation to/from a cache block. This may be called + * synchronously or from an nfsiod. + */ +int +nfs_doio(bp, cr, p) + register struct buf *bp; + struct ucred *cr; + struct proc *p; +{ + register struct uio *uiop; + register struct vnode *vp; + struct nfsnode *np; + struct nfsmount *nmp; + int error = 0, diff, len, iomode, must_commit = 0; + struct uio uio; + struct iovec io; + + vp = bp->b_vp; + NFSTRACE(NFSTRC_DIO, vp); + np = VTONFS(vp); + nmp = VFSTONFS(vp->v_mount); + uiop = &uio; + uiop->uio_iov = &io; + uiop->uio_iovcnt = 1; + uiop->uio_segflg = UIO_SYSSPACE; + uiop->uio_procp = p; + + /* + * With UBC, getblk() can return a buf with B_DONE set. + * This indicates that the VM has valid data for that page. + * NFS being stateless, this case poses a problem. + * By definition, the NFS server should always be consulted + * for the data in that page. + * So we choose to clear the B_DONE and to the IO. + * + * XXX revisit this if there is a performance issue. + * XXX In that case, we could play the attribute cache games ... + */ + if (ISSET(bp->b_flags, B_DONE)) { + if (!ISSET(bp->b_flags, B_ASYNC)) + panic("nfs_doio: done and not async"); + CLR(bp->b_flags, B_DONE); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 256)) | DBG_FUNC_START, + (int)np->n_size, bp->b_blkno * DEV_BSIZE, bp->b_bcount, bp->b_flags, 0); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 257)) | DBG_FUNC_NONE, + bp->b_validoff, bp->b_validend, bp->b_dirtyoff, bp->b_dirtyend, 0); + + /* + * Historically, paging was done with physio, but no more. + */ + if (ISSET(bp->b_flags, B_PHYS)) { + /* + * ...though reading /dev/drum still gets us here. + */ + io.iov_len = uiop->uio_resid = bp->b_bcount; + /* mapping was done by vmapbuf() */ + io.iov_base = bp->b_data; + uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; + if (ISSET(bp->b_flags, B_READ)) { + uiop->uio_rw = UIO_READ; + nfsstats.read_physios++; + error = nfs_readrpc(vp, uiop, cr); + } else { + int com; + + iomode = NFSV3WRITE_DATASYNC; + uiop->uio_rw = UIO_WRITE; + nfsstats.write_physios++; + error = nfs_writerpc(vp, uiop, cr, &iomode, &com); + } + if (error) { + SET(bp->b_flags, B_ERROR); + bp->b_error = error; + } + } else if (ISSET(bp->b_flags, B_READ)) { + io.iov_len = uiop->uio_resid = bp->b_bcount; + io.iov_base = bp->b_data; + uiop->uio_rw = UIO_READ; + switch (vp->v_type) { + case VREG: + uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; + nfsstats.read_bios++; + error = nfs_readrpc(vp, uiop, cr); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 262)) | DBG_FUNC_NONE, + (int)np->n_size, bp->b_blkno * DEV_BSIZE, uiop->uio_resid, error, 0); + + + if (!error) { + bp->b_validoff = 0; + if (uiop->uio_resid) { + /* + * If len > 0, there is a hole in the file and + * no writes after the hole have been pushed to + * the server yet. + * Just zero fill the rest of the valid area. + */ + diff = bp->b_bcount - uiop->uio_resid; + len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE + + diff); + if (len > 0) { + len = min(len, uiop->uio_resid); + bzero((char *)bp->b_data + diff, len); + bp->b_validend = diff + len; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 258)) | DBG_FUNC_NONE, + diff, len, 0, 1, 0); + + } else + bp->b_validend = diff; + } else + bp->b_validend = bp->b_bcount; +#if 1 /* USV + JOE [ */ + if (bp->b_validend < bp->b_bufsize) { + /* + * we're about to release a partial buffer after a read... the only + * way we should get here is if this buffer contains the EOF + * before releasing it, we'll zero out to the end of the buffer + * so that if a mmap of this page occurs, we'll see zero's even + * if a ftruncate extends the file in the meantime + */ + bzero((caddr_t)(bp->b_data + bp->b_validend), (bp->b_bufsize - bp->b_validend)); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 258)) | DBG_FUNC_NONE, + bp->b_validend, (bp->b_bufsize - bp->b_validend), 0, 2, 0); + } +#endif /* ] USV + JOE */ + } + if (p && (vp->v_flag & VTEXT) && + (((nmp->nm_flag & NFSMNT_NQNFS) && + NQNFS_CKINVALID(vp, np, ND_READ) && + np->n_lrev != np->n_brev) || + (!(nmp->nm_flag & NFSMNT_NQNFS) && + np->n_mtime != np->n_vattr.va_mtime.tv_sec))) { + uprintf("Process killed due to text file modification\n"); + psignal(p, SIGKILL); + p->p_flag |= P_NOSWAP; + } + break; + case VLNK: + uiop->uio_offset = (off_t)0; + nfsstats.readlink_bios++; + error = nfs_readlinkrpc(vp, uiop, cr); + break; + case VDIR: + nfsstats.readdir_bios++; + uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ; + if (!(nmp->nm_flag & NFSMNT_NFSV3)) + nmp->nm_flag &= ~NFSMNT_RDIRPLUS; /* dk@farm.org */ + if (nmp->nm_flag & NFSMNT_RDIRPLUS) { + error = nfs_readdirplusrpc(vp, uiop, cr); + if (error == NFSERR_NOTSUPP) + nmp->nm_flag &= ~NFSMNT_RDIRPLUS; + } + if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0) + error = nfs_readdirrpc(vp, uiop, cr); + break; + default: + printf("nfs_doio: type %x unexpected\n",vp->v_type); + break; + }; + if (error) { + SET(bp->b_flags, B_ERROR); + bp->b_error = error; + } + } else { + if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size) + bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); + + if (bp->b_dirtyend > bp->b_dirtyoff) { + + io.iov_len = uiop->uio_resid = bp->b_dirtyend + - bp->b_dirtyoff; + uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + + bp->b_dirtyoff; + io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; + uiop->uio_rw = UIO_WRITE; + + nfsstats.write_bios++; + if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC) + iomode = NFSV3WRITE_UNSTABLE; + else + iomode = NFSV3WRITE_FILESYNC; + SET(bp->b_flags, B_WRITEINPROG); + error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit); + if (!error && iomode == NFSV3WRITE_UNSTABLE) + SET(bp->b_flags, B_NEEDCOMMIT); + else + CLR(bp->b_flags, B_NEEDCOMMIT); + CLR(bp->b_flags, B_WRITEINPROG); + + /* + * For an interrupted write, the buffer is still valid + * and the write hasn't been pushed to the server yet, + * so we can't set B_ERROR and report the interruption + * by setting B_EINTR. For the B_ASYNC case, B_EINTR + * is not relevant, so the rpc attempt is essentially + * a noop. For the case of a V3 write rpc not being + * committed to stable storage, the block is still + * dirty and requires either a commit rpc or another + * write rpc with iomode == NFSV3WRITE_FILESYNC before + * the block is reused. This is indicated by setting + * the B_DELWRI and B_NEEDCOMMIT flags. + */ + if (error == EINTR + || (!error && (bp->b_flags & B_NEEDCOMMIT))) { + int s; + + CLR(bp->b_flags, (B_INVAL|B_NOCACHE)); + SET(bp->b_flags, B_DELWRI); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 261)) | DBG_FUNC_NONE, + bp->b_validoff, bp->b_validend, bp->b_bufsize, bp->b_bcount, 0); + + /* + * Since for the B_ASYNC case, nfs_bwrite() has reassigned the + * buffer to the clean list, we have to reassign it back to the + * dirty one. Ugh. + */ + if (ISSET(bp->b_flags, B_ASYNC)) { + s = splbio(); + reassignbuf(bp, vp); + splx(s); + } else { + SET(bp->b_flags, B_EINTR); + } + } else { + if (error) { + SET(bp->b_flags, B_ERROR); + bp->b_error = np->n_error = error; + np->n_flag |= NWRITEERR; + } + bp->b_dirtyoff = bp->b_dirtyend = 0; + +#if 1 /* JOE */ + /* + * validoff and validend represent the real data present in this buffer + * if validoff is non-zero, than we have to invalidate the buffer and kill + * the page when biodone is called... the same is also true when validend + * doesn't extend all the way to the end of the buffer and validend doesn't + * equate to the current EOF... eventually we need to deal with this in a + * more humane way (like keeping the partial buffer without making it immediately + * available to the VM page cache). + */ + if (bp->b_validoff) + SET(bp->b_flags, B_INVAL); + else + if (bp->b_validend < bp->b_bufsize) { + if ((((off_t)bp->b_blkno * (off_t)DEV_BSIZE) + bp->b_validend) == np->n_size) { + bzero((caddr_t)(bp->b_data + bp->b_validend), (bp->b_bufsize - bp->b_validend)); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 259)) | DBG_FUNC_NONE, + bp->b_validend, (bp->b_bufsize - bp->b_validend), 0, 0, 0);; + } + else + SET(bp->b_flags, B_INVAL); + } +#endif + } + + } else { + +#if 1 /* JOE */ + if (bp->b_validoff) + SET(bp->b_flags, B_INVAL); + else if (bp->b_validend < bp->b_bufsize) { + if ((((off_t)bp->b_blkno * (off_t)DEV_BSIZE) + bp->b_validend) != np->n_size) + SET(bp->b_flags, B_INVAL); + } + if (bp->b_flags & B_INVAL) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 260)) | DBG_FUNC_NONE, + bp->b_validoff, bp->b_validend, bp->b_bufsize, bp->b_bcount, 0); + } +#endif + bp->b_resid = 0; + biodone(bp); + NFSTRACE(NFSTRC_DIO_DONE, vp); + return (0); + } + } + bp->b_resid = uiop->uio_resid; + if (must_commit) + nfs_clearcommit(vp->v_mount); + + if (bp->b_flags & B_INVAL) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 260)) | DBG_FUNC_NONE, + bp->b_validoff, bp->b_validend, bp->b_bufsize, bp->b_bcount, 0); + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 256)) | DBG_FUNC_END, + bp->b_validoff, bp->b_validend, bp->b_bcount, error, 0); + + biodone(bp); + NFSTRACE(NFSTRC_DIO_DONE, vp); + return (error); +} diff --git a/bsd/nfs/nfs_boot.c b/bsd/nfs/nfs_boot.c new file mode 100644 index 000000000..f7849b02d --- /dev/null +++ b/bsd/nfs/nfs_boot.c @@ -0,0 +1,984 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1994 Adam Glass, Gordon Ross + * All rights reserved. + * + * This software was developed by the Computer Systems Engineering group + * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and + * contributed to Berkeley. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Lawrence Berkeley Laboratory and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * History: + * 14-March-97 Dieter Siegmund (dieter@next.com) + * - Use BOOTP instead of RARP to get the IP address at boot time + * + * 23-May-97 Umesh Vaishampayan (umeshv@apple.com) + * - Added the ability to mount "/private" separately. + * + * 30-May-97 Dieter Siegmund (dieter@next.com) + * - Clear out the ireq structure before using it to prevent + * our sending using a bogus source IP address, we should use + * an IP address of all zeroes + * - Right after BOOTP, get the correct netmask using AUTONETMASK + * 18-Jul-97 Dieter Siegmund (dieter@apple.com) + * - we can't restrict the netmask until we have a default route, + * removed AUTONETMASK call (ifdef'd out) + * 5-Aug-97 Dieter Siegmund (dieter@apple.com) + * - use the default route from the bpwhoami call, enabled autonetmask + * again + * 19-Feb-1999 Dieter Siegmund (dieter@apple.com) + * - use new BOOTP routine to get the subnet mask and router + * and stop using SIOCAUTOADDR + * - don't bother mounting private separately if it's not + * specified or not required because they are substrings of + * one another ie. root=host:/A and private=host:/A/private + * - allow the root path to be specified in the boot variable + * "rp" (AKA "rootpath") + * 19-Jul-1999 Dieter Siegmund (dieter@apple.com) + * - replaced big automatic arrays with MALLOC'd data + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "ether.h" + +#include + +extern char *strchr(const char *str, int ch); + +#if NETHER == 0 + +int nfs_boot_init(nd, procp) + struct nfs_diskless *nd; + struct proc *procp; +{ + panic("nfs_boot_init: no ether"); +} + +#else /* NETHER */ + +/* + * Support for NFS diskless booting, specifically getting information + * about where to boot from, what pathnames, etc. + * + * This implememtation uses RARP and the bootparam RPC. + * We are forced to implement RPC anyway (to get file handles) + * so we might as well take advantage of it for bootparam too. + * + * The diskless boot sequence goes as follows: + * (1) Use RARP to get our interface address + * (2) Use RPC/bootparam/whoami to get our hostname, + * our IP address, and the server's IP address. + * (3) Use RPC/bootparam/getfile to get the root path + * (4) Use RPC/mountd to get the root file handle + * (5) Use RPC/bootparam/getfile to get the swap path + * (6) Use RPC/mountd to get the swap file handle + * + * (This happens to be the way Sun does it too.) + */ + +extern int bootp(struct ifnet * ifp, struct in_addr * iaddr_p, int max_retry, + struct in_addr * netmask_p, struct in_addr * router_p, + struct proc * procp); + +/* bootparam RPC */ +static int bp_whoami __P((struct sockaddr_in *bpsin, + struct in_addr *my_ip, struct in_addr *gw_ip)); +static int bp_getfile __P((struct sockaddr_in *bpsin, char *key, + struct sockaddr_in *mdsin, char *servname, char *path)); + +static boolean_t path_getfile __P((char * image_path, + struct sockaddr_in * sin_p, + char * serv_name, char * pathname)); + +static __inline__ +u_long iptohl(struct in_addr ip) +{ + return (ntohl(ip.s_addr)); +} + +static __inline__ boolean_t +same_subnet(struct in_addr addr1, struct in_addr addr2, struct in_addr mask) +{ + u_long m = iptohl(mask); + if ((iptohl(addr1) & m) != (iptohl(addr2) & m)) + return (FALSE); + return (TRUE); +} + +/* mountd RPC */ +static int md_mount __P((struct sockaddr_in *mdsin, char *path, + u_char *fh)); + +/* other helpers */ +static void get_file_handle __P((char *pathname, struct nfs_dlmount *ndmntp)); + +#define IP_FORMAT "%d.%d.%d.%d" +#define IP_CH(ip) ((u_char *)ip) +#define IP_LIST(ip) IP_CH(ip)[0],IP_CH(ip)[1],IP_CH(ip)[2],IP_CH(ip)[3] +/* + * Called with an empty nfs_diskless struct to be filled in. + */ +int +nfs_boot_init(nd, procp) + struct nfs_diskless *nd; + struct proc *procp; +{ + char * booter_path = NULL; + boolean_t do_bpwhoami = TRUE; + boolean_t do_bpgetfile = TRUE; + struct ifreq ireq; + struct in_addr my_ip; + struct sockaddr_in bp_sin; + struct sockaddr_in *sin; + struct ifnet *ifp; + struct in_addr gw_ip; + struct socket *so; + struct in_addr my_netmask; + int error; + char * root_path = NULL; + + MALLOC(booter_path, char *, MAXPATHLEN, M_TEMP, M_WAITOK); + MALLOC(root_path, char *, MAXPATHLEN, M_TEMP, M_WAITOK); + + /* booter-supplied path */ + if (!PE_parse_boot_arg("rp", booter_path) + && !PE_parse_boot_arg("rootpath", booter_path)) { + booter_path[0] = 0; + } + + root_path[0] = 0; + + gw_ip.s_addr = 0; + + /* clear out the request structure */ + bzero(&ireq, sizeof(ireq)); + + /* + * Find an interface, rarp for its ip address, stuff it, the + * implied broadcast addr, and netmask into a nfs_diskless struct. + * + * This was moved here from nfs_vfsops.c because this procedure + * would be quite different if someone decides to write (i.e.) a + * BOOTP version of this file (might not use RARP, etc.) + */ + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + /* + * Find a network interface. + */ + ifp = NULL; + { /* if the root device is set, use it */ + extern char rootdevice[]; + if (rootdevice[0]) + ifp = ifunit(rootdevice); + } + if (ifp == NULL) { /* search for network device */ + /* for (ifp = ifnet; ifp; ifp = ifp->if_next)*/ + TAILQ_FOREACH(ifp, &ifnet, if_link) + if ((ifp->if_flags & + (IFF_LOOPBACK|IFF_POINTOPOINT)) == 0) + break; + } + if (ifp == NULL) + panic("nfs_boot: no suitable interface"); + sprintf(ireq.ifr_name, "%s%d", ifp->if_name, ifp->if_unit); + printf("nfs_boot: using network interface '%s'\n", ireq.ifr_name); + + /* + * Bring up the interface. + */ + if ((error = socreate(AF_INET, &so, SOCK_DGRAM, 0)) != 0) + panic("nfs_boot: socreate, error=%d", error); + ireq.ifr_flags = ifp->if_flags | IFF_UP; + error = ifioctl(so, SIOCSIFFLAGS, (caddr_t)&ireq, procp); + if (error) + panic("nfs_boot: SIFFLAGS, error=%d", error); + +#define DO_BOOTP +#ifdef DO_BOOTP + { /* use BOOTP to retrieve IP address, netmask and router */ + struct sockaddr_in sockin; + struct in_addr router; + struct in_addr netmask; + + my_ip.s_addr = 0; + netmask.s_addr = 0; + router.s_addr = 0; + sockin.sin_family = AF_INET; + sockin.sin_len = sizeof(sockin); + sockin.sin_addr.s_addr = 0; +#define RETRY_COUNT 32 + while ((error = bootp(ifp, &my_ip, RETRY_COUNT, + &netmask, &router, procp))) { + if (error == ETIMEDOUT) + printf("nfs_boot: BOOTP timed out, retrying...\n"); + + else { + printf("nfs_boot: bootp() failed, error = %d\n", error); + panic("nfs_boot"); + } + } + /* clear the netmask */ + ((struct sockaddr_in *)&ireq.ifr_addr)->sin_addr.s_addr = 0; + error = ifioctl(so, SIOCSIFNETMASK, (caddr_t)&ireq, procp); + if (error) + printf("nfs_boot: SIOCSIFNETMASK failed: %d\n", error); + + if (netmask.s_addr) { + /* set our new subnet mask */ + sockin.sin_addr = netmask; + *((struct sockaddr_in *)&ireq.ifr_addr) = sockin; + error = ifioctl(so, SIOCSIFNETMASK, (caddr_t)&ireq, procp); + if (error) + printf("nfs_boot: SIOCSIFNETMASK failed: %d\n", error); + } + + /* set our address */ + sockin.sin_addr = my_ip; + *((struct sockaddr_in *)&ireq.ifr_addr) = sockin; + error = ifioctl(so, SIOCSIFADDR, (caddr_t)&ireq, procp); + if (error) { + printf("SIOCSIFADDR failed: %d\n", error); + panic("nfs_boot.c"); + } + printf("nfs_boot: IP address " IP_FORMAT, IP_LIST(&my_ip)); + if (netmask.s_addr) + printf(" netmask " IP_FORMAT, IP_LIST(&netmask)); + if (router.s_addr) { + gw_ip = router; + printf(" router " IP_FORMAT, IP_LIST(&router)); + } + printf("\n"); + } +#else + /* + * Do RARP for the interface address. + */ + if ((error = revarpwhoami(&my_ip, ifp)) != 0) + panic("revarp failed, error=%d", error); + printf("nfs_boot: client_addr=0x%x\n", ntohl(my_ip.s_addr)); + + /* + * Do enough of ifconfig(8) so that the chosen interface + * can talk to the servers. (just set the address) + */ + sin = (struct sockaddr_in *)&ireq.ifr_addr; + bzero((caddr_t)sin, sizeof(*sin)); + sin->sin_len = sizeof(*sin); + sin->sin_family = AF_INET; + sin->sin_addr.s_addr = my_ip.s_addr; + error = ifioctl(so, SIOCSIFADDR, (caddr_t)&ireq, procp); + if (error) + panic("nfs_boot: set if addr, error=%d", error); +#endif DO_BOOTP + + /* need netmask to determine whether NFS server local */ + sin = (struct sockaddr_in *)&ireq.ifr_addr; + bzero((caddr_t)sin, sizeof(*sin)); + sin->sin_len = sizeof(*sin); + sin->sin_family = AF_INET; + error = ifioctl(so, SIOCGIFNETMASK, (caddr_t)&ireq, procp); + if (error) + panic("nfs_boot: SIOCGIFNETMASK error=%d", error); + my_netmask = sin->sin_addr; + + soclose(so); + + /* check for a booter-specified path */ + if (booter_path[0]) { + nd->nd_root.ndm_saddr.sin_addr.s_addr = 0; + nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; + if (path_getfile(booter_path, &nd->nd_root.ndm_saddr, + nd->nd_root.ndm_host, root_path)) { + do_bpgetfile = FALSE; + printf("nfs_boot: using booter-supplied path '%s'\n", + booter_path); + if (same_subnet(nd->nd_root.ndm_saddr.sin_addr, + my_ip, my_netmask) + || gw_ip.s_addr) { + do_bpwhoami = FALSE; + } + else { + /* do bpwhoami to attempt to get the router */ + } + } + else { + printf("nfs_boot: ignoring badly formed bootpath '%s'\n", + booter_path); + } + } + + if (do_bpwhoami) { + /* + * Get client name and gateway address. + * RPC: bootparam/whoami + * Use the old broadcast address for the WHOAMI + * call because we do not yet know our netmask. + * The server address returned by the WHOAMI call + * is used for all subsequent booptaram RPCs. + */ + bzero((caddr_t)&bp_sin, sizeof(bp_sin)); + bp_sin.sin_len = sizeof(bp_sin); + bp_sin.sin_family = AF_INET; + bp_sin.sin_addr.s_addr = INADDR_BROADCAST; + hostnamelen = MAXHOSTNAMELEN; + + { /* bpwhoami also returns gateway IP address */ + + struct in_addr router; + + router.s_addr = 0; + error = bp_whoami(&bp_sin, &my_ip, &router); + if (error) { + printf("nfs_boot: bootparam whoami, error=%d", error); + panic("nfs_boot: bootparam whoami\n"); + } + /* if not already set by BOOTP, use the one from BPWHOAMI */ + if (gw_ip.s_addr == 0) + gw_ip = router; + } + printf("nfs_boot: BOOTPARAMS server " IP_FORMAT "\n", + IP_LIST(&bp_sin.sin_addr)); + printf("nfs_boot: hostname %s\n", hostname); + } +#define NFS_BOOT_GATEWAY 1 +#ifdef NFS_BOOT_GATEWAY + /* + * DWS 2/18/1999 + * The comment below does not apply to gw_ip discovered + * via BOOTP (see DO_BOOTP loop above) since BOOTP servers + * are supposed to be more trustworthy. + */ + /* + * XXX - This code is conditionally compiled only because + * many bootparam servers (in particular, SunOS 4.1.3) + * always set the gateway address to their own address. + * The bootparam server is not necessarily the gateway. + * We could just believe the server, and at worst you would + * need to delete the incorrect default route before adding + * the correct one, but for simplicity, ignore the gateway. + * If your server is OK, you can turn on this option. + * + * If the gateway address is set, add a default route. + * (The mountd RPCs may go across a gateway.) + */ + if (gw_ip.s_addr) { + struct sockaddr dst, gw, mask; + /* Destination: (default) */ + bzero((caddr_t)&dst, sizeof(dst)); + dst.sa_len = sizeof(dst); + dst.sa_family = AF_INET; + /* Gateway: */ + bzero((caddr_t)&gw, sizeof(gw)); + sin = (struct sockaddr_in *)&gw; + sin->sin_len = sizeof(gw); + sin->sin_family = AF_INET; + sin->sin_addr.s_addr = gw_ip.s_addr; + /* Mask: (zero length) */ + bzero(&mask, sizeof(mask)); + printf("nfs_boot: adding default route " IP_FORMAT "\n", + IP_LIST(&gw_ip)); + /* add, dest, gw, mask, flags, 0 */ + error = rtrequest(RTM_ADD, &dst, (struct sockaddr *)&gw, + &mask, (RTF_UP | RTF_GATEWAY | RTF_STATIC), NULL); + if (error) + printf("nfs_boot: add route, error=%d\n", error); + } +#endif + if (do_bpgetfile) { + error = bp_getfile(&bp_sin, "root", &nd->nd_root.ndm_saddr, + nd->nd_root.ndm_host, root_path); + if (error) { + printf("nfs_boot: bootparam get root: %d\n", error); + panic("nfs_boot: bootparam get root"); + } + } + + get_file_handle(root_path, &nd->nd_root); + +#if !defined(NO_MOUNT_PRIVATE) + if (do_bpgetfile) { /* get private path */ + char * private_path = NULL; + + MALLOC(private_path, char *, MAXPATHLEN, M_TEMP, M_WAITOK); + error = bp_getfile(&bp_sin, "private", + &nd->nd_private.ndm_saddr, + nd->nd_private.ndm_host, private_path); + if (!error) { + char * check_path = NULL; + + MALLOC(check_path, char *, MAXPATHLEN, M_TEMP, M_WAITOK); + sprintf(check_path, "%s/private", root_path); + if ((nd->nd_root.ndm_saddr.sin_addr.s_addr + == nd->nd_private.ndm_saddr.sin_addr.s_addr) + && (strcmp(check_path, private_path) == 0)) { + /* private path is prefix of root path, don't mount */ + nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; + } + else { + get_file_handle(private_path, &nd->nd_private); + } + _FREE(check_path, M_TEMP); + } + else { + /* private key not defined, don't mount */ + nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; + } + _FREE(private_path, M_TEMP); + } +#endif NO_MOUNT_PRIVATE + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + _FREE(booter_path, M_TEMP); + _FREE(root_path, M_TEMP); + return (0); +} + +int +inet_aton(char * cp, struct in_addr * pin) +{ + u_char * b = (char *)pin; + int i; + char * p; + + for (p = cp, i = 0; i < 4; i++) { + u_long l = strtoul(p, 0, 0); + if (l > 255) + return (FALSE); + b[i] = l; + p = strchr(p, '.'); + if (i < 3 && p == NULL) + return (FALSE); + p++; + } + return (TRUE); +} + +/* + * Function: parse_image_path + * Purpose: + * Parse a string of the form "::" into + * the given ip address and host and pathnames. + * Example: + * "17.202.16.17:seaport:/release/.images/Image9/CurrentHera" + */ +static __inline__ boolean_t +parse_image_path(char * c, struct in_addr * iaddr_p, char * hostname, + char * pathname) +{ + char * d; + char * p; +#define TMP_SIZE 128 + char tmp[TMP_SIZE]; + + p = strchr(c, ':'); + if (p == NULL) + return (FALSE); + if ((p - c) >= TMP_SIZE) + return (FALSE); + strncpy(tmp, c, p - c); + tmp[p - c] = 0; + if (inet_aton(tmp, iaddr_p) != 1) + return (FALSE); + p++; + d = strchr(p, ':'); + if (d == NULL) + return (FALSE); + strncpy(hostname, p, d - p); + hostname[d - p] = 0; + d++; + strcpy(pathname, d); + return (TRUE); +} + +static boolean_t +path_getfile(char * image_path, struct sockaddr_in * sin_p, + char * serv_name, char * pathname) +{ + bzero((caddr_t)sin_p, sizeof(*sin_p)); + sin_p->sin_len = sizeof(*sin_p); + sin_p->sin_family = AF_INET; + if (parse_image_path(image_path, &sin_p->sin_addr, serv_name, pathname) + == FALSE) + return (FALSE); + return (TRUE); +} + +static void +get_file_handle(pathname, ndmntp) + char *pathname; /* path on server */ + struct nfs_dlmount *ndmntp; /* output */ +{ + char *sp, *dp, *endp; + int error; + + /* + * Get file handle for "key" (root or swap) + * using RPC to mountd/mount + */ + error = md_mount(&ndmntp->ndm_saddr, pathname, ndmntp->ndm_fh); + if (error) + panic("nfs_boot: mountd, error=%d", error); + + /* Construct remote path (for getmntinfo(3)) */ + dp = ndmntp->ndm_host; + endp = dp + MNAMELEN - 1; + dp += strlen(dp); + *dp++ = ':'; + for (sp = pathname; *sp && dp < endp;) + *dp++ = *sp++; + *dp = '\0'; + +} + + +/* + * Get an mbuf with the given length, and + * initialize the pkthdr length field. + */ +static struct mbuf * +m_get_len(int msg_len) +{ + struct mbuf *m; + m = m_gethdr(M_WAIT, MT_DATA); + if (m == NULL) + return NULL; + if (msg_len > MHLEN) { + if (msg_len > MCLBYTES) + panic("nfs_boot: msg_len > MCLBYTES"); + MCLGET(m, M_WAIT); + if (m == NULL) + return NULL; + } + m->m_len = msg_len; + m->m_pkthdr.len = m->m_len; + return (m); +} + + +/* + * String representation for RPC. + */ +struct rpc_string { + u_long len; /* length without null or padding */ + u_char data[4]; /* data (longer, of course) */ + /* data is padded to a long-word boundary */ +}; +/* Compute space used given string length. */ +#define RPC_STR_SIZE(slen) (4 + ((slen + 3) & ~3)) + +/* + * Inet address in RPC messages + * (Note, really four longs, NOT chars. Blech.) + */ +struct bp_inaddr { + u_long atype; + long addr[4]; +}; + + +/* + * RPC: bootparam/whoami + * Given client IP address, get: + * client name (hostname) + * domain name (domainname) + * gateway address + * + * The hostname and domainname are set here for convenience. + * + * Note - bpsin is initialized to the broadcast address, + * and will be replaced with the bootparam server address + * after this call is complete. Have to use PMAP_PROC_CALL + * to make sure we get responses only from a servers that + * know about us (don't want to broadcast a getport call). + */ +static int +bp_whoami(bpsin, my_ip, gw_ip) + struct sockaddr_in *bpsin; + struct in_addr *my_ip; + struct in_addr *gw_ip; +{ + /* RPC structures for PMAPPROC_CALLIT */ + struct whoami_call { + u_long call_prog; + u_long call_vers; + u_long call_proc; + u_long call_arglen; + struct bp_inaddr call_ia; + } *call; + + struct rpc_string *str; + struct bp_inaddr *bia; + struct mbuf *m; + struct sockaddr_in *sin; + int error, msg_len; + int cn_len, dn_len; + u_char *p; + long *lp; + + /* + * Get message buffer of sufficient size. + */ + msg_len = sizeof(*call); + m = m_get_len(msg_len); + if (m == NULL) + return ENOBUFS; + + /* + * Build request message for PMAPPROC_CALLIT. + */ + call = mtod(m, struct whoami_call *); + call->call_prog = htonl(BOOTPARAM_PROG); + call->call_vers = htonl(BOOTPARAM_VERS); + call->call_proc = htonl(BOOTPARAM_WHOAMI); + call->call_arglen = htonl(sizeof(struct bp_inaddr)); + + /* client IP address */ + call->call_ia.atype = htonl(1); + p = (u_char*)my_ip; + lp = call->call_ia.addr; + *lp++ = htonl(*p); p++; + *lp++ = htonl(*p); p++; + *lp++ = htonl(*p); p++; + *lp++ = htonl(*p); p++; + + /* RPC: portmap/callit */ + bpsin->sin_port = htons(PMAPPORT); + + error = krpc_call(bpsin, PMAPPROG, PMAPVERS, + PMAPPROC_CALLIT, &m, &sin); + if (error) + return error; + + /* + * Parse result message. + */ + msg_len = m->m_len; + lp = mtod(m, long *); + + /* bootparam server port (also grab from address). */ + if (msg_len < sizeof(*lp)) + goto bad; + msg_len -= sizeof(*lp); + bpsin->sin_port = htons((short)ntohl(*lp++)); + bpsin->sin_addr.s_addr = sin->sin_addr.s_addr; + + /* length of encapsulated results */ + if (msg_len < (ntohl(*lp) + sizeof(*lp))) + goto bad; + msg_len = ntohl(*lp++); + p = (char*)lp; + + /* client name */ + if (msg_len < sizeof(*str)) + goto bad; + str = (struct rpc_string *)p; + cn_len = ntohl(str->len); + if (msg_len < cn_len) + goto bad; + if (cn_len >= MAXHOSTNAMELEN) + goto bad; + bcopy(str->data, hostname, cn_len); + hostname[cn_len] = '\0'; + hostnamelen = cn_len; + p += RPC_STR_SIZE(cn_len); + msg_len -= RPC_STR_SIZE(cn_len); + + /* domain name */ + if (msg_len < sizeof(*str)) + goto bad; + str = (struct rpc_string *)p; + dn_len = ntohl(str->len); + if (msg_len < dn_len) + goto bad; + if (dn_len >= MAXHOSTNAMELEN) + goto bad; + bcopy(str->data, domainname, dn_len); + domainname[dn_len] = '\0'; + domainnamelen = dn_len; + p += RPC_STR_SIZE(dn_len); + msg_len -= RPC_STR_SIZE(dn_len); + + /* gateway address */ + if (msg_len < sizeof(*bia)) + goto bad; + bia = (struct bp_inaddr *)p; + if (bia->atype != htonl(1)) + goto bad; + p = (u_char*)gw_ip; + *p++ = ntohl(bia->addr[0]); + *p++ = ntohl(bia->addr[1]); + *p++ = ntohl(bia->addr[2]); + *p++ = ntohl(bia->addr[3]); + goto out; + +bad: + printf("nfs_boot: bootparam_whoami: bad reply\n"); + error = EBADRPC; + +out: + if (sin) + FREE(sin, M_SONAME); + + m_freem(m); + return(error); +} + + +/* + * RPC: bootparam/getfile + * Given client name and file "key", get: + * server name + * server IP address + * server pathname + */ +static int +bp_getfile(bpsin, key, md_sin, serv_name, pathname) + struct sockaddr_in *bpsin; + char *key; + struct sockaddr_in *md_sin; + char *serv_name; + char *pathname; +{ + struct rpc_string *str; + struct mbuf *m; + struct bp_inaddr *bia; + struct sockaddr_in *sin; + u_char *p, *q; + int error, msg_len; + int cn_len, key_len, sn_len, path_len; + + /* + * Get message buffer of sufficient size. + */ + cn_len = hostnamelen; + key_len = strlen(key); + msg_len = 0; + msg_len += RPC_STR_SIZE(cn_len); + msg_len += RPC_STR_SIZE(key_len); + m = m_get_len(msg_len); + if (m == NULL) + return ENOBUFS; + + /* + * Build request message. + */ + p = mtod(m, u_char *); + bzero(p, msg_len); + /* client name (hostname) */ + str = (struct rpc_string *)p; + str->len = htonl(cn_len); + bcopy(hostname, str->data, cn_len); + p += RPC_STR_SIZE(cn_len); + /* key name (root or swap) */ + str = (struct rpc_string *)p; + str->len = htonl(key_len); + bcopy(key, str->data, key_len); + + /* RPC: bootparam/getfile */ + error = krpc_call(bpsin, BOOTPARAM_PROG, BOOTPARAM_VERS, + BOOTPARAM_GETFILE, &m, NULL); + if (error) + return error; + + /* + * Parse result message. + */ + p = mtod(m, u_char *); + msg_len = m->m_len; + + /* server name */ + if (msg_len < sizeof(*str)) + goto bad; + str = (struct rpc_string *)p; + sn_len = ntohl(str->len); + if (msg_len < sn_len) + goto bad; + if (sn_len >= MNAMELEN) + goto bad; + bcopy(str->data, serv_name, sn_len); + serv_name[sn_len] = '\0'; + p += RPC_STR_SIZE(sn_len); + msg_len -= RPC_STR_SIZE(sn_len); + + /* server IP address (mountd) */ + if (msg_len < sizeof(*bia)) + goto bad; + bia = (struct bp_inaddr *)p; + if (bia->atype != htonl(1)) + goto bad; + sin = md_sin; + bzero((caddr_t)sin, sizeof(*sin)); + sin->sin_len = sizeof(*sin); + sin->sin_family = AF_INET; + q = (u_char*) &sin->sin_addr; + *q++ = ntohl(bia->addr[0]); + *q++ = ntohl(bia->addr[1]); + *q++ = ntohl(bia->addr[2]); + *q++ = ntohl(bia->addr[3]); + p += sizeof(*bia); + msg_len -= sizeof(*bia); + + /* server pathname */ + if (msg_len < sizeof(*str)) + goto bad; + str = (struct rpc_string *)p; + path_len = ntohl(str->len); + if (msg_len < path_len) + goto bad; + if (path_len >= MAXPATHLEN) + goto bad; + bcopy(str->data, pathname, path_len); + pathname[path_len] = '\0'; + goto out; + +bad: + printf("nfs_boot: bootparam_getfile: bad reply\n"); + error = EBADRPC; + +out: + m_freem(m); + return(0); +} + + +/* + * RPC: mountd/mount + * Given a server pathname, get an NFS file handle. + * Also, sets sin->sin_port to the NFS service port. + */ +static int +md_mount(mdsin, path, fhp) + struct sockaddr_in *mdsin; /* mountd server address */ + char *path; + u_char *fhp; +{ + /* The RPC structures */ + struct rpc_string *str; + struct rdata { + u_long errno; + u_char fh[NFSX_V2FH]; + } *rdata; + struct mbuf *m; + int error, mlen, slen; + + /* Get port number for MOUNTD. */ + error = krpc_portmap(mdsin, RPCPROG_MNT, RPCMNT_VER1, + &mdsin->sin_port); + if (error) return error; + + slen = strlen(path); + mlen = RPC_STR_SIZE(slen); + + m = m_get_len(mlen); + if (m == NULL) + return ENOBUFS; + str = mtod(m, struct rpc_string *); + str->len = htonl(slen); + bcopy(path, str->data, slen); + + /* Do RPC to mountd. */ + error = krpc_call(mdsin, RPCPROG_MNT, RPCMNT_VER1, + RPCMNT_MOUNT, &m, NULL); + if (error) + return error; /* message already freed */ + + mlen = m->m_len; + if (mlen < sizeof(*rdata)) + goto bad; + rdata = mtod(m, struct rdata *); + error = ntohl(rdata->errno); + if (error) + goto bad; + bcopy(rdata->fh, fhp, NFSX_V2FH); + + /* Set port number for NFS use. */ + error = krpc_portmap(mdsin, NFS_PROG, NFS_VER2, + &mdsin->sin_port); + goto out; + +bad: + error = EBADRPC; + +out: + m_freem(m); + return error; +} + +#endif /* NETHER */ diff --git a/bsd/nfs/nfs_node.c b/bsd/nfs/nfs_node.c new file mode 100644 index 000000000..383428171 --- /dev/null +++ b/bsd/nfs/nfs_node.c @@ -0,0 +1,451 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95 + * FreeBSD-Id: nfs_node.c,v 1.22 1997/10/28 14:06:20 bde Exp $ + */ + + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef MALLOC_DEFINE +static MALLOC_DEFINE(M_NFSNODE, "NFS node", "NFS vnode private part"); +#endif + +LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl; +u_long nfsnodehash; + +#define TRUE 1 +#define FALSE 0 + +/* + * Initialize hash links for nfsnodes + * and build nfsnode free list. + */ +void +nfs_nhinit() +{ + nfsnodehashtbl = hashinit(desiredvnodes, M_NFSNODE, &nfsnodehash); +} + +/* + * Compute an entry in the NFS hash table structure + */ +u_long +nfs_hash(fhp, fhsize) + register nfsfh_t *fhp; + int fhsize; +{ + register u_char *fhpp; + register u_long fhsum; + register int i; + + fhpp = &fhp->fh_bytes[0]; + fhsum = 0; + for (i = 0; i < fhsize; i++) + fhsum += *fhpp++; + return (fhsum); +} + +/* + * Look up a vnode/nfsnode by file handle. + * Callers must check for mount points!! + * In all cases, a pointer to a + * nfsnode structure is returned. + */ +int nfs_node_hash_lock; + +int +nfs_nget(mntp, fhp, fhsize, npp) + struct mount *mntp; + register nfsfh_t *fhp; + int fhsize; + struct nfsnode **npp; +{ + struct proc *p = current_proc(); /* XXX */ + struct nfsnode *np; + struct nfsnodehashhead *nhpp; + register struct vnode *vp; + struct vnode *nvp; + int error; + + /* Check for unmount in progress */ + if (mntp->mnt_kern_flag & MNTK_UNMOUNT) { + *npp = 0; + return (EPERM); + } + + nhpp = NFSNOHASH(nfs_hash(fhp, fhsize)); +loop: + for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) { + if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize || + bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize)) + continue; + vp = NFSTOV(np); + if (vget(vp, LK_EXCLUSIVE, p)) + goto loop; + *npp = np; + return(0); + } + /* + * Obtain a lock to prevent a race condition if the getnewvnode() + * or MALLOC() below happens to block. + */ + if (nfs_node_hash_lock) { + while (nfs_node_hash_lock) { + nfs_node_hash_lock = -1; + tsleep(&nfs_node_hash_lock, PVM, "nfsngt", 0); + } + goto loop; + } + nfs_node_hash_lock = 1; + + /* + * Do the MALLOC before the getnewvnode since doing so afterward + * might cause a bogus v_data pointer to get dereferenced + * elsewhere if MALLOC should block. + */ + MALLOC_ZONE(np, struct nfsnode *, sizeof *np, M_NFSNODE, M_WAITOK); + + error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &nvp); + if (error) { + if (nfs_node_hash_lock < 0) + wakeup(&nfs_node_hash_lock); + nfs_node_hash_lock = 0; + *npp = 0; + FREE_ZONE(np, sizeof *np, M_NFSNODE); + return (error); + } + vp = nvp; + bzero((caddr_t)np, sizeof *np); + vp->v_data = np; + np->n_vnode = vp; + /* + * Insert the nfsnode in the hash queue for its new file handle + */ + LIST_INSERT_HEAD(nhpp, np, n_hash); + if (fhsize > NFS_SMALLFH) { + MALLOC_ZONE(np->n_fhp, nfsfh_t *, + fhsize, M_NFSBIGFH, M_WAITOK); + } else + np->n_fhp = &np->n_fh; + bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize); + np->n_fhsize = fhsize; + *npp = np; + + if (nfs_node_hash_lock < 0) + wakeup(&nfs_node_hash_lock); + nfs_node_hash_lock = 0; + + /* + * Lock the new nfsnode. + */ + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + + return (0); +} + +int +nfs_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + register struct nfsnode *np; + register struct sillyrename *sp; + struct proc *p = current_proc(); /* XXX */ + extern int prtactive; + struct ucred *cred; + + np = VTONFS(ap->a_vp); + if (prtactive && ap->a_vp->v_usecount != 0) + vprint("nfs_inactive: pushing active", ap->a_vp); + if (ap->a_vp->v_type != VDIR) { + sp = np->n_sillyrename; + np->n_sillyrename = (struct sillyrename *)0; + } else + sp = (struct sillyrename *)0; + + if (sp) { + /* + * Remove the silly file that was rename'd earlier + */ +#if DIAGNOSTIC + kprintf("nfs_inactive removing %s, dvp=%x, a_vp=%x, ap=%x, np=%x, sp=%x\n", &sp->s_name[0], (unsigned)sp->s_dvp, (unsigned)ap->a_vp, (unsigned)ap, (unsigned)np, (unsigned)sp); +#endif + /* + * We get a reference (vget) to ensure getnewvnode() + * doesn't recycle vp while we're asleep awaiting I/O. + * Note we don't need the reference unless usecount is + * already zero. In the case of a forcible unmount it + * wont be zero and doing a vget would fail because + * vclean holds VXLOCK. + */ + if (ap->a_vp->v_usecount > 0) { + VREF(ap->a_vp); + } else if (vget(ap->a_vp, 0, ap->a_p)) + panic("nfs_inactive: vget failed"); + (void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1); + ubc_setsize(ap->a_vp, (off_t)0); + + /* We have a problem. The dvp could have gone away on us + * while in the unmount path. Thus it appears as VBAD and we + * cannot use it. If we tried locking the parent (future), for silly + * rename files, it is unclear where we would lock. The unmount + * code just pulls unlocked vnodes as it goes thru its list and + * yanks them. Could unmount be smarter to see if a busy reg vnode has + * a parent, and not yank it yet? Put in more passes at unmount + * time? In the meantime, just check if it went away on us. Could + * have gone away during the nfs_vinvalbuf or ubc_setsize which block. + * Or perhaps even before nfs_inactive got called. + */ + if ((sp->s_dvp)->v_type != VBAD) + nfs_removeit(sp); /* uses the dvp */ + cred = sp->s_cred; + if (cred != NOCRED) { + sp->s_cred = NOCRED; + crfree(cred); + } + vrele(sp->s_dvp); + FREE_ZONE((caddr_t)sp, sizeof (struct sillyrename), M_NFSREQ); + vrele(ap->a_vp); + } + np->n_flag &= (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NQNFSEVICTED | + NQNFSNONCACHE | NQNFSWRITE); + VOP_UNLOCK(ap->a_vp, 0, ap->a_p); + return (0); +} + +/* + * Reclaim an nfsnode so that it can be used for other purposes. + */ +int +nfs_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + register struct nfsmount *nmp = VFSTONFS(vp->v_mount); + register struct nfsdmap *dp, *dp2; + extern int prtactive; + + if (prtactive && vp->v_usecount != 0) + vprint("nfs_reclaim: pushing active", vp); + + LIST_REMOVE(np, n_hash); + + /* + * In case we block during FREE_ZONEs below, get the entry out + * of tbe name cache now so subsequent lookups won't find it. + */ + cache_purge(vp); + + /* + * For nqnfs, take it off the timer queue as required. + */ + if ((nmp->nm_flag & NFSMNT_NQNFS) && np->n_timer.cqe_next != 0) { + CIRCLEQ_REMOVE(&nmp->nm_timerhead, np, n_timer); + } + + /* + * Free up any directory cookie structures and + * large file handle structures that might be associated with + * this nfs node. + */ + if (vp->v_type == VDIR) { + dp = np->n_cookies.lh_first; + while (dp) { + dp2 = dp; + dp = dp->ndm_list.le_next; + FREE_ZONE((caddr_t)dp2, + sizeof (struct nfsdmap), M_NFSDIROFF); + } + } + if (np->n_fhsize > NFS_SMALLFH) { + FREE_ZONE((caddr_t)np->n_fhp, np->n_fhsize, M_NFSBIGFH); + } + + FREE_ZONE(vp->v_data, sizeof (struct nfsnode), M_NFSNODE); + vp->v_data = (void *)0; + return (0); +} + +#if 0 +/* + * Lock an nfsnode + */ +int +nfs_lock(ap) + struct vop_lock_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + + /* + * Ugh, another place where interruptible mounts will get hung. + * If you make this sleep interruptible, then you have to fix all + * the VOP_LOCK() calls to expect interruptibility. + */ + while (vp->v_flag & VXLOCK) { + vp->v_flag |= VXWANT; + (void) tsleep((caddr_t)vp, PINOD, "nfslck", 0); + } + if (vp->v_tag == VT_NON) + return (ENOENT); + +#if 0 + /* + * Only lock regular files. If a server crashed while we were + * holding a directory lock, we could easily end up sleeping + * until the server rebooted while holding a lock on the root. + * Locks are only needed for protecting critical sections in + * VMIO at the moment. + * New vnodes will have type VNON but they should be locked + * since they may become VREG. This is checked in loadattrcache + * and unwanted locks are released there. + */ + if (vp->v_type == VREG || vp->v_type == VNON) { + while (np->n_flag & NLOCKED) { + np->n_flag |= NWANTED; + (void) tsleep((caddr_t) np, PINOD, "nfslck2", 0); + /* + * If the vnode has transmuted into a VDIR while we + * were asleep, then skip the lock. + */ + if (vp->v_type != VREG && vp->v_type != VNON) + return (0); + } + np->n_flag |= NLOCKED; + } +#endif + + return (0); +} + +/* + * Unlock an nfsnode + */ +int +nfs_unlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + } */ *ap; +{ +#if 0 + struct vnode* vp = ap->a_vp; + struct nfsnode* np = VTONFS(vp); + + if (vp->v_type == VREG || vp->v_type == VNON) { + if (!(np->n_flag & NLOCKED)) + panic("nfs_unlock: nfsnode not locked"); + np->n_flag &= ~NLOCKED; + if (np->n_flag & NWANTED) { + np->n_flag &= ~NWANTED; + wakeup((caddr_t) np); + } + } +#endif + + return (0); +} + +/* + * Check for a locked nfsnode + */ +int +nfs_islocked(ap) + struct vop_islocked_args /* { + struct vnode *a_vp; + } */ *ap; +{ + return VTONFS(ap->a_vp)->n_flag & NLOCKED ? 1 : 0; +} +#endif + +/* + * Nfs abort op, called after namei() when a CREATE/DELETE isn't actually + * done. Currently nothing to do. + */ +/* ARGSUSED */ +int +nfs_abortop(ap) + struct vop_abortop_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + } */ *ap; +{ + + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + return (0); +} diff --git a/bsd/nfs/nfs_nqlease.c b/bsd/nfs/nfs_nqlease.c new file mode 100644 index 000000000..3f3d51ef0 --- /dev/null +++ b/bsd/nfs/nfs_nqlease.c @@ -0,0 +1,1323 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_nqlease.c 8.9 (Berkeley) 5/20/95 + * FreeBSD-Id: nfs_nqlease.c,v 1.32 1997/11/07 08:53:23 phk Exp $ + */ + + +/* + * References: + * Cary G. Gray and David R. Cheriton, "Leases: An Efficient Fault-Tolerant + * Mechanism for Distributed File Cache Consistency", + * In Proc. of the Twelfth ACM Symposium on Operating Systems + * Principals, pg. 202-210, Litchfield Park, AZ, Dec. 1989. + * Michael N. Nelson, Brent B. Welch and John K. Ousterhout, "Caching + * in the Sprite Network File System", ACM TOCS 6(1), + * pages 134-154, February 1988. + * V. Srinivasan and Jeffrey C. Mogul, "Spritely NFS: Implementation and + * Performance of Cache-Consistency Protocols", Digital + * Equipment Corporation WRL Research Report 89/5, May 1989. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +time_t nqnfsstarttime = (time_t)0; +int nqsrv_clockskew = NQ_CLOCKSKEW; +int nqsrv_writeslack = NQ_WRITESLACK; +int nqsrv_maxlease = NQ_MAXLEASE; +static int nqsrv_maxnumlease = NQ_MAXNUMLEASE; + +struct vop_lease_args; + +static int nqsrv_cmpnam __P((struct nfssvc_sock *, struct mbuf *, + struct nqhost *)); +extern void nqnfs_lease_updatetime __P((int deltat)); +static int nqnfs_vacated __P((struct vnode *vp, struct ucred *cred)); +static void nqsrv_addhost __P((struct nqhost *lph, struct nfssvc_sock *slp, + struct mbuf *nam)); +static void nqsrv_instimeq __P((struct nqlease *lp, u_long duration)); +static void nqsrv_locklease __P((struct nqlease *lp)); +static void nqsrv_send_eviction __P((struct vnode *vp, struct nqlease *lp, + struct nfssvc_sock *slp, + struct mbuf *nam, struct ucred *cred)); +static void nqsrv_unlocklease __P((struct nqlease *lp)); +static void nqsrv_waitfor_expiry __P((struct nqlease *lp)); + +/* + * Signifies which rpcs can have piggybacked lease requests + */ +int nqnfs_piggy[NFS_NPROCS] = { + 0, + 0, + ND_WRITE, + ND_READ, + 0, + ND_READ, + ND_READ, + ND_WRITE, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ND_READ, + ND_READ, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, +}; + +extern nfstype nfsv2_type[9]; +extern nfstype nfsv3_type[9]; +extern struct nfssvc_sock *nfs_udpsock, *nfs_cltpsock; +extern int nfsd_waiting; +extern struct nfsstats nfsstats; +extern int nfs_mount_type; + +#define TRUE 1 +#define FALSE 0 + +#ifndef NFS_NOSERVER +/* + * Get or check for a lease for "vp", based on ND_CHECK flag. + * The rules are as follows: + * - if a current non-caching lease, reply non-caching + * - if a current lease for same host only, extend lease + * - if a read cachable lease and a read lease request + * add host to list any reply cachable + * - else { set non-cachable for read-write sharing } + * send eviction notice messages to all other hosts that have lease + * wait for lease termination { either by receiving vacated messages + * from all the other hosts or expiry + * via. timeout } + * modify lease to non-cachable + * - else if no current lease, issue new one + * - reply + * - return boolean TRUE iff nam should be m_freem()'d + * NB: Since nqnfs_serverd() is called from a timer, any potential tsleep() + * in here must be framed by nqsrv_locklease() and nqsrv_unlocklease(). + * nqsrv_locklease() is coded such that at least one of LC_LOCKED and + * LC_WANTED is set whenever a process is tsleeping in it. The exception + * is when a new lease is being allocated, since it is not in the timer + * queue yet. (Ditto for the splsoftclock() and splx(s) calls) + */ +int +nqsrv_getlease(vp, duration, flags, slp, procp, nam, cachablep, frev, cred) + struct vnode *vp; + u_long *duration; + int flags; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf *nam; + int *cachablep; + u_quad_t *frev; + struct ucred *cred; +{ + register struct nqlease *lp; + register struct nqfhhashhead *lpp = 0; + register struct nqhost *lph = 0; + struct nqlease *tlp; + struct nqm **lphp; + struct vattr vattr; + fhandle_t fh; + int i, ok, error, s; + + if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) + return (0); + if (*duration > nqsrv_maxlease) + *duration = nqsrv_maxlease; + error = VOP_GETATTR(vp, &vattr, cred, procp); + if (error) + return (error); + *frev = vattr.va_filerev; + s = splsoftclock(); + tlp = vp->v_lease; + if ((flags & ND_CHECK) == 0) + nfsstats.srvnqnfs_getleases++; + if (tlp == (struct nqlease *)0) { + + /* + * Find the lease by searching the hash list. + */ + fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid; + error = VFS_VPTOFH(vp, &fh.fh_fid); + if (error) { + splx(s); + return (error); + } + lpp = NQFHHASH(fh.fh_fid.fid_data); + for (lp = lpp->lh_first; lp != 0; lp = lp->lc_hash.le_next) + if (fh.fh_fsid.val[0] == lp->lc_fsid.val[0] && + fh.fh_fsid.val[1] == lp->lc_fsid.val[1] && + !bcmp(fh.fh_fid.fid_data, lp->lc_fiddata, + fh.fh_fid.fid_len - sizeof (long))) { + /* Found it */ + lp->lc_vp = vp; + vp->v_lease = lp; + tlp = lp; + break; + } + } else + lp = tlp; + if (lp) { + if ((lp->lc_flag & LC_NONCACHABLE) || + (lp->lc_morehosts == (struct nqm *)0 && + nqsrv_cmpnam(slp, nam, &lp->lc_host))) + goto doreply; + if ((flags & ND_READ) && (lp->lc_flag & LC_WRITE) == 0) { + if (flags & ND_CHECK) + goto doreply; + if (nqsrv_cmpnam(slp, nam, &lp->lc_host)) + goto doreply; + i = 0; + if (lp->lc_morehosts) { + lph = lp->lc_morehosts->lpm_hosts; + lphp = &lp->lc_morehosts->lpm_next; + ok = 1; + } else { + lphp = &lp->lc_morehosts; + ok = 0; + } + while (ok && (lph->lph_flag & LC_VALID)) { + if (nqsrv_cmpnam(slp, nam, lph)) + goto doreply; + if (++i == LC_MOREHOSTSIZ) { + i = 0; + if (*lphp) { + lph = (*lphp)->lpm_hosts; + lphp = &((*lphp)->lpm_next); + } else + ok = 0; + } else + lph++; + } + nqsrv_locklease(lp); + if (!ok) { + MALLOC_ZONE(*lphp, struct nqm *, + sizeof(struct nqm), + M_NQMHOST, M_WAITOK); + bzero((caddr_t)*lphp, sizeof (struct nqm)); + lph = (*lphp)->lpm_hosts; + } + nqsrv_addhost(lph, slp, nam); + nqsrv_unlocklease(lp); + } else { + lp->lc_flag |= LC_NONCACHABLE; + nqsrv_locklease(lp); + nqsrv_send_eviction(vp, lp, slp, nam, cred); + nqsrv_waitfor_expiry(lp); + nqsrv_unlocklease(lp); + } +doreply: + /* + * Update the lease and return + */ + if ((flags & ND_CHECK) == 0) + nqsrv_instimeq(lp, *duration); + if (lp->lc_flag & LC_NONCACHABLE) + *cachablep = 0; + else { + *cachablep = 1; + if (flags & ND_WRITE) + lp->lc_flag |= LC_WRITTEN; + } + splx(s); + return (0); + } + splx(s); + if (flags & ND_CHECK) + return (0); + + /* + * Allocate new lease + * The value of nqsrv_maxnumlease should be set generously, so that + * the following "printf" happens infrequently. + */ + if (nfsstats.srvnqnfs_leases > nqsrv_maxnumlease) { + printf("Nqnfs server, too many leases\n"); + do { + (void) tsleep((caddr_t)&lbolt, PSOCK, + "nqsrvnuml", 0); + } while (nfsstats.srvnqnfs_leases > nqsrv_maxnumlease); + } + MALLOC_ZONE(lp, struct nqlease *, + sizeof (struct nqlease), M_NQLEASE, M_WAITOK); + bzero((caddr_t)lp, sizeof (struct nqlease)); + if (flags & ND_WRITE) + lp->lc_flag |= (LC_WRITE | LC_WRITTEN); + nqsrv_addhost(&lp->lc_host, slp, nam); + lp->lc_vp = vp; + lp->lc_fsid = fh.fh_fsid; + bcopy(fh.fh_fid.fid_data, lp->lc_fiddata, + fh.fh_fid.fid_len - sizeof (long)); + if(!lpp) + panic("nfs_nqlease.c: Phoney lpp"); + LIST_INSERT_HEAD(lpp, lp, lc_hash); + vp->v_lease = lp; + s = splsoftclock(); + nqsrv_instimeq(lp, *duration); + splx(s); + *cachablep = 1; + if (++nfsstats.srvnqnfs_leases > nfsstats.srvnqnfs_maxleases) + nfsstats.srvnqnfs_maxleases = nfsstats.srvnqnfs_leases; + return (0); +} + +/* + * Local lease check for server syscalls. + * Just set up args and let nqsrv_getlease() do the rest. + * nqnfs_vop_lease_check() is the VOP_LEASE() form of the same routine. + * Ifdef'd code in nfsnode.h renames these routines to whatever a particular + * OS needs. + */ +void +nqnfs_lease_check(vp, p, cred, flag) + struct vnode *vp; + struct proc *p; + struct ucred *cred; + int flag; +{ + u_long duration = 0; + int cache; + u_quad_t frev; + + (void) nqsrv_getlease(vp, &duration, ND_CHECK | flag, NQLOCALSLP, + p, (struct mbuf *)0, &cache, &frev, cred); +} + +int +nqnfs_vop_lease_check(ap) + struct vop_lease_args /* { + struct vnode *a_vp; + struct proc *a_p; + struct ucred *a_cred; + int a_flag; + } */ *ap; +{ + u_long duration = 0; + int cache; + u_quad_t frev; + + (void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag, + NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred); + return (0); +} + +#endif /* NFS_NOSERVER */ + +/* + * Add a host to an nqhost structure for a lease. + */ +static void +nqsrv_addhost(lph, slp, nam) + register struct nqhost *lph; + struct nfssvc_sock *slp; + struct mbuf *nam; +{ + register struct sockaddr_in *saddr; + + if (slp == NQLOCALSLP) + lph->lph_flag |= (LC_VALID | LC_LOCAL); + else if (slp == nfs_udpsock) { + saddr = mtod(nam, struct sockaddr_in *); + lph->lph_flag |= (LC_VALID | LC_UDP); + lph->lph_inetaddr = saddr->sin_addr.s_addr; + lph->lph_port = saddr->sin_port; + } else if (slp == nfs_cltpsock) { + lph->lph_nam = m_copym(nam, 0, M_COPYALL, M_WAIT); + lph->lph_flag |= (LC_VALID | LC_CLTP); + } else { + lph->lph_flag |= (LC_VALID | LC_SREF); + lph->lph_slp = slp; + slp->ns_sref++; + } +} + +/* + * Update the lease expiry time and position it in the timer queue correctly. + */ +static void +nqsrv_instimeq(lp, duration) + register struct nqlease *lp; + u_long duration; +{ + register struct nqlease *tlp; + time_t newexpiry; + + newexpiry = time.tv_sec + duration + nqsrv_clockskew; + if (lp->lc_expiry == newexpiry) + return; + if (lp->lc_timer.cqe_next != 0) { + CIRCLEQ_REMOVE(&nqtimerhead, lp, lc_timer); + } + lp->lc_expiry = newexpiry; + + /* + * Find where in the queue it should be. + */ + tlp = nqtimerhead.cqh_last; + while (tlp != (void *)&nqtimerhead && tlp->lc_expiry > newexpiry) + tlp = tlp->lc_timer.cqe_prev; +#ifdef HASNVRAM + if (tlp == nqtimerhead.cqh_last) + NQSTORENOVRAM(newexpiry); +#endif /* HASNVRAM */ + if (tlp == (void *)&nqtimerhead) { + CIRCLEQ_INSERT_HEAD(&nqtimerhead, lp, lc_timer); + } else { + CIRCLEQ_INSERT_AFTER(&nqtimerhead, tlp, lp, lc_timer); + } +} + +/* + * Compare the requesting host address with the lph entry in the lease. + * Return true iff it is the same. + * This is somewhat messy due to the union in the nqhost structure. + * The local host is indicated by the special value of NQLOCALSLP for slp. + */ +static int +nqsrv_cmpnam(slp, nam, lph) + register struct nfssvc_sock *slp; + struct mbuf *nam; + register struct nqhost *lph; +{ + register struct sockaddr_in *saddr; + struct mbuf *addr; + union nethostaddr lhaddr; + int ret; + + if (slp == NQLOCALSLP) { + if (lph->lph_flag & LC_LOCAL) + return (1); + else + return (0); + } + if (slp == nfs_udpsock || slp == nfs_cltpsock) + addr = nam; + else + addr = slp->ns_nam; + if (lph->lph_flag & LC_UDP) + ret = netaddr_match(AF_INET, &lph->lph_haddr, addr); + else if (lph->lph_flag & LC_CLTP) + ret = netaddr_match(AF_ISO, &lph->lph_claddr, addr); + else { + if ((lph->lph_slp->ns_flag & SLP_VALID) == 0) + return (0); + saddr = mtod(lph->lph_slp->ns_nam, struct sockaddr_in *); + if (saddr->sin_family == AF_INET) + lhaddr.had_inetaddr = saddr->sin_addr.s_addr; + else + lhaddr.had_nam = lph->lph_slp->ns_nam; + ret = netaddr_match(saddr->sin_family, &lhaddr, addr); + } + return (ret); +} + +/* + * Send out eviction notice messages to all other hosts for the lease. + */ +static void +nqsrv_send_eviction(vp, lp, slp, nam, cred) + struct vnode *vp; + register struct nqlease *lp; + struct nfssvc_sock *slp; + struct mbuf *nam; + struct ucred *cred; +{ + register struct nqhost *lph = &lp->lc_host; + register struct mbuf *m; + register int siz; + struct nqm *lphnext = lp->lc_morehosts; + struct mbuf *mreq, *mb, *mb2, *mheadend; + struct socket *so; + struct mbuf *nam2; + struct sockaddr_in *saddr; + nfsfh_t nfh; + fhandle_t *fhp; + caddr_t bpos, cp; + u_long xid, *tl; + int len = 1, ok = 1, i = 0; + int sotype, *solockp; + + while (ok && (lph->lph_flag & LC_VALID)) { + if (nqsrv_cmpnam(slp, nam, lph)) + lph->lph_flag |= LC_VACATED; + else if ((lph->lph_flag & (LC_LOCAL | LC_VACATED)) == 0) { + if (lph->lph_flag & LC_UDP) { + MGET(nam2, M_WAIT, MT_SONAME); + saddr = mtod(nam2, struct sockaddr_in *); + nam2->m_len = saddr->sin_len = + sizeof (struct sockaddr_in); + saddr->sin_family = AF_INET; + saddr->sin_addr.s_addr = lph->lph_inetaddr; + saddr->sin_port = lph->lph_port; + so = nfs_udpsock->ns_so; + } else if (lph->lph_flag & LC_CLTP) { + nam2 = lph->lph_nam; + so = nfs_cltpsock->ns_so; + } else if (lph->lph_slp->ns_flag & SLP_VALID) { + nam2 = (struct mbuf *)0; + so = lph->lph_slp->ns_so; + } else + goto nextone; + sotype = so->so_type; + if (so->so_proto->pr_flags & PR_CONNREQUIRED) + solockp = &lph->lph_slp->ns_solock; + else + solockp = (int *)0; + nfsm_reqhead((struct vnode *)0, NQNFSPROC_EVICTED, + NFSX_V3FH + NFSX_UNSIGNED); + fhp = &nfh.fh_generic; + bzero((caddr_t)fhp, sizeof(nfh)); + fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid; + VFS_VPTOFH(vp, &fhp->fh_fid); + nfsm_srvfhtom(fhp, 1); + m = mreq; + siz = 0; + while (m) { + siz += m->m_len; + m = m->m_next; + } + if (siz <= 0 || siz > NFS_MAXPACKET) { + printf("mbuf siz=%d\n",siz); + panic("Bad nfs svc reply"); + } + m = nfsm_rpchead(cred, (NFSMNT_NFSV3 | NFSMNT_NQNFS), + NQNFSPROC_EVICTED, + RPCAUTH_UNIX, 5 * NFSX_UNSIGNED, (char *)0, + 0, (char *)NULL, mreq, siz, &mheadend, &xid); + /* + * For stream protocols, prepend a Sun RPC + * Record Mark. + */ + if (sotype == SOCK_STREAM) { + M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); + *mtod(m, u_long *) = htonl(0x80000000 | + (m->m_pkthdr.len - NFSX_UNSIGNED)); + } + if (((lph->lph_flag & (LC_UDP | LC_CLTP)) == 0 && + (lph->lph_slp->ns_flag & SLP_VALID) == 0) || + (solockp && (*solockp & NFSMNT_SNDLOCK))) + m_freem(m); + else { + if (solockp) + *solockp |= NFSMNT_SNDLOCK; + (void) nfs_send(so, nam2, m, + (struct nfsreq *)0); + if (solockp) + nfs_sndunlock(solockp); + } + if (lph->lph_flag & LC_UDP) + MFREE(nam2, m); + } +nextone: + if (++i == len) { + if (lphnext) { + i = 0; + len = LC_MOREHOSTSIZ; + lph = lphnext->lpm_hosts; + lphnext = lphnext->lpm_next; + } else + ok = 0; + } else + lph++; + } +} + +/* + * Wait for the lease to expire. + * This will occur when all clients have sent "vacated" messages to + * this server OR when it expires do to timeout. + */ +static void +nqsrv_waitfor_expiry(lp) + register struct nqlease *lp; +{ + register struct nqhost *lph; + register int i; + struct nqm *lphnext; + int len, ok; + +tryagain: + if (time.tv_sec > lp->lc_expiry) + return; + lph = &lp->lc_host; + lphnext = lp->lc_morehosts; + len = 1; + i = 0; + ok = 1; + while (ok && (lph->lph_flag & LC_VALID)) { + if ((lph->lph_flag & (LC_LOCAL | LC_VACATED)) == 0) { + lp->lc_flag |= LC_EXPIREDWANTED; + (void) tsleep((caddr_t)&lp->lc_flag, PSOCK, + "nqexp", 0); + goto tryagain; + } + if (++i == len) { + if (lphnext) { + i = 0; + len = LC_MOREHOSTSIZ; + lph = lphnext->lpm_hosts; + lphnext = lphnext->lpm_next; + } else + ok = 0; + } else + lph++; + } +} + +#ifndef NFS_NOSERVER + +/* + * Nqnfs server timer that maintains the server lease queue. + * Scan the lease queue for expired entries: + * - when one is found, wakeup anyone waiting for it + * else dequeue and free + */ +void +nqnfs_serverd() +{ + register struct nqlease *lp; + register struct nqhost *lph; + struct nqlease *nextlp; + struct nqm *lphnext, *olphnext; + struct mbuf *n; + int i, len, ok; + + for (lp = nqtimerhead.cqh_first; lp != (void *)&nqtimerhead; + lp = nextlp) { + if (lp->lc_expiry >= time.tv_sec) + break; + nextlp = lp->lc_timer.cqe_next; + if (lp->lc_flag & LC_EXPIREDWANTED) { + lp->lc_flag &= ~LC_EXPIREDWANTED; + wakeup((caddr_t)&lp->lc_flag); + } else if ((lp->lc_flag & (LC_LOCKED | LC_WANTED)) == 0) { + /* + * Make a best effort at keeping a write caching lease long + * enough by not deleting it until it has been explicitly + * vacated or there have been no writes in the previous + * write_slack seconds since expiry and the nfsds are not + * all busy. The assumption is that if the nfsds are not + * all busy now (no queue of nfs requests), then the client + * would have been able to do at least one write to the + * file during the last write_slack seconds if it was still + * trying to push writes to the server. + */ + if ((lp->lc_flag & (LC_WRITE | LC_VACATED)) == LC_WRITE && + ((lp->lc_flag & LC_WRITTEN) || nfsd_waiting == 0)) { + lp->lc_flag &= ~LC_WRITTEN; + nqsrv_instimeq(lp, nqsrv_writeslack); + } else { + CIRCLEQ_REMOVE(&nqtimerhead, lp, lc_timer); + LIST_REMOVE(lp, lc_hash); + /* + * This soft reference may no longer be valid, but + * no harm done. The worst case is if the vnode was + * recycled and has another valid lease reference, + * which is dereferenced prematurely. + */ + lp->lc_vp->v_lease = (struct nqlease *)0; + lph = &lp->lc_host; + lphnext = lp->lc_morehosts; + olphnext = (struct nqm *)0; + len = 1; + i = 0; + ok = 1; + while (ok && (lph->lph_flag & LC_VALID)) { + if (lph->lph_flag & LC_CLTP) + MFREE(lph->lph_nam, n); + if (lph->lph_flag & LC_SREF) + nfsrv_slpderef(lph->lph_slp); + if (++i == len) { + if (olphnext) { + _FREE_ZONE((caddr_t)olphnext, + sizeof (struct nqm), + M_NQMHOST); + olphnext = (struct nqm *)0; + } + if (lphnext) { + olphnext = lphnext; + i = 0; + len = LC_MOREHOSTSIZ; + lph = lphnext->lpm_hosts; + lphnext = lphnext->lpm_next; + } else + ok = 0; + } else + lph++; + } + FREE_ZONE((caddr_t)lp, + sizeof (struct nqlease), M_NQLEASE); + if (olphnext) + _FREE_ZONE((caddr_t)olphnext, + sizeof (struct nqm), M_NQMHOST); + nfsstats.srvnqnfs_leases--; + } + } + } +} + +/* + * Called from nfssvc_nfsd() for a getlease rpc request. + * Do the from/to xdr translation and call nqsrv_getlease() to + * do the real work. + */ +int +nqnfsrv_getlease(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register struct nfs_fattr *fp; + struct vattr va; + register struct vattr *vap = &va; + struct vnode *vp; + nfsfh_t nfh; + fhandle_t *fhp; + register u_long *tl; + register long t1; + u_quad_t frev; + caddr_t bpos; + int error = 0; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + int flags, rdonly, cache; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + flags = fxdr_unsigned(int, *tl++); + nfsd->nd_duration = fxdr_unsigned(int, *tl); + error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, &rdonly, + (nfsd->nd_flag & ND_KERBAUTH), TRUE); + if (error) + nfsm_reply(0); + if (rdonly && flags == ND_WRITE) { + error = EROFS; + vput(vp); + nfsm_reply(0); + } + (void) nqsrv_getlease(vp, &nfsd->nd_duration, flags, slp, procp, + nam, &cache, &frev, cred); + error = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + nfsm_reply(NFSX_V3FATTR + 4 * NFSX_UNSIGNED); + nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(cache); + *tl++ = txdr_unsigned(nfsd->nd_duration); + txdr_hyper(&frev, tl); + nfsm_build(fp, struct nfs_fattr *, NFSX_V3FATTR); + nfsm_srvfillattr(vap, fp); + nfsm_srvdone; +} + +/* + * Called from nfssvc_nfsd() when a "vacated" message is received from a + * client. Find the entry and expire it. + */ +int +nqnfsrv_vacated(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + register struct nqlease *lp; + register struct nqhost *lph; + struct nqlease *tlp = (struct nqlease *)0; + nfsfh_t nfh; + fhandle_t *fhp; + register u_long *tl; + register long t1; + struct nqm *lphnext; + struct mbuf *mreq, *mb; + int error = 0, i, len, ok, gotit = 0, cache = 0; + char *cp2, *bpos; + u_quad_t frev; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + m_freem(mrep); + /* + * Find the lease by searching the hash list. + */ + for (lp = NQFHHASH(fhp->fh_fid.fid_data)->lh_first; lp != 0; + lp = lp->lc_hash.le_next) + if (fhp->fh_fsid.val[0] == lp->lc_fsid.val[0] && + fhp->fh_fsid.val[1] == lp->lc_fsid.val[1] && + !bcmp(fhp->fh_fid.fid_data, lp->lc_fiddata, + MAXFIDSZ)) { + /* Found it */ + tlp = lp; + break; + } + if (tlp) { + lp = tlp; + len = 1; + i = 0; + lph = &lp->lc_host; + lphnext = lp->lc_morehosts; + ok = 1; + while (ok && (lph->lph_flag & LC_VALID)) { + if (nqsrv_cmpnam(slp, nam, lph)) { + lph->lph_flag |= LC_VACATED; + gotit++; + break; + } + if (++i == len) { + if (lphnext) { + len = LC_MOREHOSTSIZ; + i = 0; + lph = lphnext->lpm_hosts; + lphnext = lphnext->lpm_next; + } else + ok = 0; + } else + lph++; + } + if ((lp->lc_flag & LC_EXPIREDWANTED) && gotit) { + lp->lc_flag &= ~LC_EXPIREDWANTED; + wakeup((caddr_t)&lp->lc_flag); + } +nfsmout: + return (EPERM); + } + return (EPERM); +} + +#endif /* NFS_NOSERVER */ + +/* + * Client get lease rpc function. + */ +int +nqnfs_getlease(vp, rwflag, cred, p) + register struct vnode *vp; + int rwflag; + struct ucred *cred; + struct proc *p; +{ + register u_long *tl; + register caddr_t cp; + register long t1, t2; + register struct nfsnode *np; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + caddr_t bpos, dpos, cp2; + time_t reqtime; + int error = 0; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int cachable; + u_quad_t frev; + + nfsstats.rpccnt[NQNFSPROC_GETLEASE]++; + mb = mreq = nfsm_reqh(vp, NQNFSPROC_GETLEASE, NFSX_V3FH+2*NFSX_UNSIGNED, + &bpos); + nfsm_fhtom(vp, 1); + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(rwflag); + *tl = txdr_unsigned(nmp->nm_leaseterm); + reqtime = time.tv_sec; + nfsm_request(vp, NQNFSPROC_GETLEASE, p, cred); + np = VTONFS(vp); + nfsm_dissect(tl, u_long *, 4 * NFSX_UNSIGNED); + cachable = fxdr_unsigned(int, *tl++); + reqtime += fxdr_unsigned(int, *tl++); + if (reqtime > time.tv_sec) { + fxdr_hyper(tl, &frev); + nqnfs_clientlease(nmp, np, rwflag, cachable, reqtime, frev); + nfsm_loadattr(vp, (struct vattr *)0); + } else + error = NQNFS_EXPIRED; + nfsm_reqdone; + return (error); +} + +/* + * Client vacated message function. + */ +static int +nqnfs_vacated(vp, cred) + register struct vnode *vp; + struct ucred *cred; +{ + register caddr_t cp; + register struct mbuf *m; + register int i; + register u_long *tl; + register long t2; + caddr_t bpos; + u_long xid; + int error = 0; + struct mbuf *mreq, *mb, *mb2, *mheadend; + struct nfsmount *nmp; + struct nfsreq myrep; + + nmp = VFSTONFS(vp->v_mount); + nfsstats.rpccnt[NQNFSPROC_VACATED]++; + nfsm_reqhead(vp, NQNFSPROC_VACATED, NFSX_FH(1)); + nfsm_fhtom(vp, 1); + m = mreq; + i = 0; + while (m) { + i += m->m_len; + m = m->m_next; + } + m = nfsm_rpchead(cred, nmp->nm_flag, NQNFSPROC_VACATED, + RPCAUTH_UNIX, 5 * NFSX_UNSIGNED, (char *)0, + 0, (char *)NULL, mreq, i, &mheadend, &xid); + if (nmp->nm_sotype == SOCK_STREAM) { + M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); + *mtod(m, u_long *) = htonl(0x80000000 | (m->m_pkthdr.len - + NFSX_UNSIGNED)); + } + myrep.r_flags = 0; + myrep.r_nmp = nmp; + if (nmp->nm_soflags & PR_CONNREQUIRED) + (void) nfs_sndlock(&nmp->nm_flag, (struct nfsreq *)0); + (void) nfs_send(nmp->nm_so, nmp->nm_nam, m, &myrep); + if (nmp->nm_soflags & PR_CONNREQUIRED) + nfs_sndunlock(&nmp->nm_flag); +nfsmout: + return (error); +} + +#ifndef NFS_NOSERVER + +/* + * Called for client side callbacks + */ +int +nqnfs_callback(nmp, mrep, md, dpos) + struct nfsmount *nmp; + struct mbuf *mrep, *md; + caddr_t dpos; +{ + register struct vnode *vp; + register u_long *tl; + register long t1; + nfsfh_t nfh; + fhandle_t *fhp; + struct nfsnode *np; + struct nfsd tnfsd; + struct nfssvc_sock *slp; + struct nfsrv_descript ndesc; + register struct nfsrv_descript *nfsd = &ndesc; + struct mbuf **mrq = (struct mbuf **)0, *mb, *mreq; + int error = 0, cache = 0; + char *cp2, *bpos; + u_quad_t frev; + +#ifndef nolint + slp = NULL; +#endif + nfsd->nd_mrep = mrep; + nfsd->nd_md = md; + nfsd->nd_dpos = dpos; + error = nfs_getreq(nfsd, &tnfsd, FALSE); + if (error) + return (error); + md = nfsd->nd_md; + dpos = nfsd->nd_dpos; + if (nfsd->nd_procnum != NQNFSPROC_EVICTED) { + m_freem(mrep); + return (EPERM); + } + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + m_freem(mrep); + error = nfs_nget(nmp->nm_mountp, (nfsfh_t *)fhp, NFSX_V3FH, &np); + if (error) + return (error); + vp = NFSTOV(np); + if (np->n_timer.cqe_next != 0) { + np->n_expiry = 0; + np->n_flag |= NQNFSEVICTED; + if (nmp->nm_timerhead.cqh_first != np) { + CIRCLEQ_REMOVE(&nmp->nm_timerhead, np, n_timer); + CIRCLEQ_INSERT_HEAD(&nmp->nm_timerhead, np, n_timer); + } + } + vput(vp); + nfsm_srvdone; +} + + +/* + * Nqnfs client helper daemon. Runs once a second to expire leases. + * It also get authorization strings for "kerb" mounts. + * It must start at the beginning of the list again after any potential + * "sleep" since nfs_reclaim() called from vclean() can pull a node off + * the list asynchronously. + */ +int +nqnfs_clientd(nmp, cred, ncd, flag, argp, p) + register struct nfsmount *nmp; + struct ucred *cred; + struct nfsd_cargs *ncd; + int flag; + caddr_t argp; + struct proc *p; +{ + register struct nfsnode *np; + struct vnode *vp; + struct nfsreq myrep; + struct nfsuid *nuidp, *nnuidp; + int error = 0, vpid; + register struct nfsreq *rp; + + /* + * First initialize some variables + */ + + /* + * If an authorization string is being passed in, get it. + */ + if ((flag & NFSSVC_GOTAUTH) && + (nmp->nm_flag & (NFSMNT_WAITAUTH | NFSMNT_DISMNT)) == 0) { + if (nmp->nm_flag & NFSMNT_HASAUTH) + panic("cld kerb"); + if ((flag & NFSSVC_AUTHINFAIL) == 0) { + if (ncd->ncd_authlen <= nmp->nm_authlen && + ncd->ncd_verflen <= nmp->nm_verflen && + !copyin(ncd->ncd_authstr,nmp->nm_authstr,ncd->ncd_authlen)&& + !copyin(ncd->ncd_verfstr,nmp->nm_verfstr,ncd->ncd_verflen)){ + nmp->nm_authtype = ncd->ncd_authtype; + nmp->nm_authlen = ncd->ncd_authlen; + nmp->nm_verflen = ncd->ncd_verflen; +#if NFSKERB + nmp->nm_key = ncd->ncd_key; +#endif + } else + nmp->nm_flag |= NFSMNT_AUTHERR; + } else + nmp->nm_flag |= NFSMNT_AUTHERR; + nmp->nm_flag |= NFSMNT_HASAUTH; + wakeup((caddr_t)&nmp->nm_authlen); + } else + nmp->nm_flag |= NFSMNT_WAITAUTH; + + /* + * Loop every second updating queue until there is a termination sig. + */ + while ((nmp->nm_flag & NFSMNT_DISMNT) == 0) { + if (nmp->nm_flag & NFSMNT_NQNFS) { + /* + * If there are no outstanding requests (and therefore no + * processes in nfs_reply) and there is data in the receive + * queue, poke for callbacks. + */ + if (nfs_reqq.tqh_first == 0 && nmp->nm_so && + nmp->nm_so->so_rcv.sb_cc > 0) { + myrep.r_flags = R_GETONEREP; + myrep.r_nmp = nmp; + myrep.r_mrep = (struct mbuf *)0; + myrep.r_procp = (struct proc *)0; + (void) nfs_reply(&myrep); + } + + /* + * Loop through the leases, updating as required. + */ + np = nmp->nm_timerhead.cqh_first; + while (np != (void *)&nmp->nm_timerhead && + (nmp->nm_flag & NFSMNT_DISMINPROG) == 0) { + vp = NFSTOV(np); + vpid = vp->v_id; + if (np->n_expiry < time.tv_sec) { + if (vget(vp, LK_EXCLUSIVE, p) == 0) { + nmp->nm_inprog = vp; + if (vpid == vp->v_id) { + CIRCLEQ_REMOVE(&nmp->nm_timerhead, np, n_timer); + np->n_timer.cqe_next = 0; + if (np->n_flag & (NMODIFIED | NQNFSEVICTED)) { + if (np->n_flag & NQNFSEVICTED) { + if (vp->v_type == VDIR) + nfs_invaldir(vp); + cache_purge(vp); + (void) nfs_vinvalbuf(vp, + V_SAVE, cred, p, 0); + np->n_flag &= ~NQNFSEVICTED; + (void) nqnfs_vacated(vp, cred); + } else if (vp->v_type == VREG) { + (void) VOP_FSYNC(vp, cred, + MNT_WAIT, p); + np->n_flag &= ~NMODIFIED; + } + } + } + vrele(vp); + nmp->nm_inprog = NULLVP; + } + } else if ((np->n_expiry - NQ_RENEWAL) < time.tv_sec) { + if ((np->n_flag & (NQNFSWRITE | NQNFSNONCACHE)) + == NQNFSWRITE && vp->v_dirtyblkhd.lh_first && + vget(vp, LK_EXCLUSIVE, p) == 0) { + nmp->nm_inprog = vp; + if (vpid == vp->v_id && + nqnfs_getlease(vp, ND_WRITE, cred, p)==0) + np->n_brev = np->n_lrev; + vrele(vp); + nmp->nm_inprog = NULLVP; + } + } else + break; + if (np == nmp->nm_timerhead.cqh_first) + break; + np = nmp->nm_timerhead.cqh_first; + } + } + + /* + * Get an authorization string, if required. + */ + if ((nmp->nm_flag & (NFSMNT_WAITAUTH | NFSMNT_DISMNT | NFSMNT_HASAUTH)) == 0) { + ncd->ncd_authuid = nmp->nm_authuid; + if (copyout((caddr_t)ncd, argp, sizeof (struct nfsd_cargs))) + nmp->nm_flag |= NFSMNT_WAITAUTH; + else + return (ENEEDAUTH); + } + + /* + * Wait a bit (no pun) and do it again. + */ + if ((nmp->nm_flag & NFSMNT_DISMNT) == 0 && + (nmp->nm_flag & (NFSMNT_WAITAUTH | NFSMNT_HASAUTH))) { + error = tsleep((caddr_t)&nmp->nm_authstr, PSOCK | PCATCH, + "nqnfstimr", hz / 3); + if (error == EINTR || error == ERESTART) + (void) dounmount(nmp->nm_mountp, 0, p); + } + } + + /* + * Finally, we can free up the mount structure. + */ + for (nuidp = nmp->nm_uidlruhead.tqh_first; nuidp != 0; nuidp = nnuidp) { + nnuidp = nuidp->nu_lru.tqe_next; + LIST_REMOVE(nuidp, nu_hash); + TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru); + _FREE_ZONE((caddr_t)nuidp, sizeof (struct nfsuid), M_NFSUID); + } + /* + * Loop through outstanding request list and remove dangling + * references to defunct nfsmount struct + */ + for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next) + if (rp->r_nmp == nmp) + rp->r_nmp = (struct nfsmount *)0; + _FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); + if (error == EWOULDBLOCK) + error = 0; + return (error); +} + +#endif /* NFS_NOSERVER */ + +/* + * Adjust all timer queue expiry times when the time of day clock is changed. + * Called from the settimeofday() syscall. + */ +void +nqnfs_lease_updatetime(deltat) + register int deltat; +{ + struct proc *p = current_proc(); /* XXX */ + struct nqlease *lp; + struct nfsnode *np; + struct mount *mp, *nxtmp; + struct nfsmount *nmp; + int s; + + if (nqnfsstarttime != 0) + nqnfsstarttime += deltat; + s = splsoftclock(); + for (lp = nqtimerhead.cqh_first; lp != (void *)&nqtimerhead; + lp = lp->lc_timer.cqe_next) + lp->lc_expiry += deltat; + splx(s); + + /* + * Search the mount list for all nqnfs mounts and do their timer + * queues. + */ + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nxtmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { + nxtmp = mp->mnt_list.cqe_next; + continue; + } + if (mp->mnt_stat.f_type == nfs_mount_type) { + nmp = VFSTONFS(mp); + if (nmp->nm_flag & NFSMNT_NQNFS) { + for (np = nmp->nm_timerhead.cqh_first; + np != (void *)&nmp->nm_timerhead; + np = np->n_timer.cqe_next) { + np->n_expiry += deltat; + } + } + } + simple_lock(&mountlist_slock); + nxtmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + } + simple_unlock(&mountlist_slock); +} + +/* + * Lock a server lease. + */ +static void +nqsrv_locklease(lp) + struct nqlease *lp; +{ + + while (lp->lc_flag & LC_LOCKED) { + lp->lc_flag |= LC_WANTED; + (void) tsleep((caddr_t)lp, PSOCK, "nqlc", 0); + } + lp->lc_flag |= LC_LOCKED; + lp->lc_flag &= ~LC_WANTED; +} + +/* + * Unlock a server lease. + */ +static void +nqsrv_unlocklease(lp) + struct nqlease *lp; +{ + + lp->lc_flag &= ~LC_LOCKED; + if (lp->lc_flag & LC_WANTED) + wakeup((caddr_t)lp); +} + +/* + * Update a client lease. + */ +void +nqnfs_clientlease(nmp, np, rwflag, cachable, expiry, frev) + register struct nfsmount *nmp; + register struct nfsnode *np; + int rwflag, cachable; + time_t expiry; + u_quad_t frev; +{ + register struct nfsnode *tp; + + if (np->n_timer.cqe_next != 0) { + CIRCLEQ_REMOVE(&nmp->nm_timerhead, np, n_timer); + if (rwflag == ND_WRITE) + np->n_flag |= NQNFSWRITE; + } else if (rwflag == ND_READ) + np->n_flag &= ~NQNFSWRITE; + else + np->n_flag |= NQNFSWRITE; + if (cachable) + np->n_flag &= ~NQNFSNONCACHE; + else + np->n_flag |= NQNFSNONCACHE; + np->n_expiry = expiry; + np->n_lrev = frev; + tp = nmp->nm_timerhead.cqh_last; + while (tp != (void *)&nmp->nm_timerhead && tp->n_expiry > np->n_expiry) + tp = tp->n_timer.cqe_prev; + if (tp == (void *)&nmp->nm_timerhead) { + CIRCLEQ_INSERT_HEAD(&nmp->nm_timerhead, np, n_timer); + } else { + CIRCLEQ_INSERT_AFTER(&nmp->nm_timerhead, tp, np, n_timer); + } +} diff --git a/bsd/nfs/nfs_serv.c b/bsd/nfs/nfs_serv.c new file mode 100644 index 000000000..0cbfa4334 --- /dev/null +++ b/bsd/nfs/nfs_serv.c @@ -0,0 +1,3543 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_serv.c 8.7 (Berkeley) 5/14/95 + * FreeBSD-Id: nfs_serv.c,v 1.52 1997/10/28 15:59:05 bde Exp $ + */ + +/* + * nfs version 2 and 3 server calls to vnode ops + * - these routines generally have 3 phases + * 1 - break down and validate rpc request in mbuf list + * 2 - do the vnode ops for the request + * (surprisingly ?? many are very similar to syscalls in vfs_syscalls.c) + * 3 - build the rpc reply in an mbuf list + * nb: + * - do not mix the phases, since the nfsm_?? macros can return failures + * on a bad rpc or similar and do not do any vrele() or vput()'s + * + * - the nfsm_reply() macro generates an nfs rpc reply with the nfs + * error number iff error != 0 whereas + * returning an error from the server function implies a fatal error + * such as a badly constructed rpc request that should be dropped without + * a reply. + * For Version 3, nfsm_reply() does not return for the error case, since + * most version 3 rpcs return more than the status for error cases. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +nfstype nfsv3_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK, + NFFIFO, NFNON }; +#ifndef NFS_NOSERVER +nfstype nfsv2_type[9] = { NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFNON, + NFCHR, NFNON }; +/* Global vars */ +extern u_long nfs_xdrneg1; +extern u_long nfs_false, nfs_true; +extern enum vtype nv3tov_type[8]; +extern struct nfsstats nfsstats; + +int nfsrvw_procrastinate = NFS_GATHERDELAY * 1000; +int nfsrvw_procrastinate_v3 = 0; + +int nfs_async = 0; +#ifdef notyet +/* XXX CSM 11/25/97 Upgrade sysctl.h someday */ +SYSCTL_INT(_vfs_nfs, OID_AUTO, async, CTLFLAG_RW, &nfs_async, 0, ""); +#endif + +static int nfsrv_access __P((struct vnode *,int,struct ucred *,int, + struct proc *, int)); +static void nfsrvw_coalesce __P((struct nfsrv_descript *, + struct nfsrv_descript *)); + +/* + * nfs v3 access service + */ +int +nfsrv3_access(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct vnode *vp; + nfsfh_t nfh; + fhandle_t *fhp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, getret; + char *cp2; + struct mbuf *mb, *mreq, *mb2; + struct vattr vattr, *vap = &vattr; + u_long testmode, nfsmode; + u_quad_t frev; + +#ifndef nolint + cache = 0; +#endif + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(NFSX_UNSIGNED); + nfsm_srvpostop_attr(1, (struct vattr *)0); + return (0); + } + nfsmode = fxdr_unsigned(u_long, *tl); + if ((nfsmode & NFSV3ACCESS_READ) && + nfsrv_access(vp, VREAD, cred, rdonly, procp, 0)) + nfsmode &= ~NFSV3ACCESS_READ; + if (vp->v_type == VDIR) + testmode = (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | + NFSV3ACCESS_DELETE); + else + testmode = (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); + if ((nfsmode & testmode) && + nfsrv_access(vp, VWRITE, cred, rdonly, procp, 0)) + nfsmode &= ~testmode; + if (vp->v_type == VDIR) + testmode = NFSV3ACCESS_LOOKUP; + else + testmode = NFSV3ACCESS_EXECUTE; + if ((nfsmode & testmode) && + nfsrv_access(vp, VEXEC, cred, rdonly, procp, 0)) + nfsmode &= ~testmode; + getret = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + nfsm_reply(NFSX_POSTOPATTR(1) + NFSX_UNSIGNED); + nfsm_srvpostop_attr(getret, vap); + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = txdr_unsigned(nfsmode); + nfsm_srvdone; +} + +/* + * nfs getattr service + */ +int +nfsrv_getattr(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register struct nfs_fattr *fp; + struct vattr va; + register struct vattr *vap = &va; + struct vnode *vp; + nfsfh_t nfh; + fhandle_t *fhp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + u_quad_t frev; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(0); + return (0); + } + nqsrv_getl(vp, ND_READ); + error = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + nfsm_reply(NFSX_FATTR(nfsd->nd_flag & ND_NFSV3)); + if (error) + return (0); + nfsm_build(fp, struct nfs_fattr *, NFSX_FATTR(nfsd->nd_flag & ND_NFSV3)); + nfsm_srvfillattr(vap, fp); + nfsm_srvdone; +} + +/* + * nfs setattr service + */ +int +nfsrv_setattr(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct vattr va, preat; + register struct vattr *vap = &va; + register struct nfsv2_sattr *sp; + register struct nfs_fattr *fp; + struct vnode *vp; + nfsfh_t nfh; + fhandle_t *fhp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, preat_ret = 1, postat_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3), gcheck = 0; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + u_quad_t frev; + struct timespec guard; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + VATTR_NULL(vap); + if (v3) { + nfsm_srvsattr(vap); + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + gcheck = fxdr_unsigned(int, *tl); + if (gcheck) { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + fxdr_nfsv3time(tl, &guard); + } + } else { + nfsm_dissect(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + /* + * Nah nah nah nah na nah + * There is a bug in the Sun client that puts 0xffff in the mode + * field of sattr when it should put in 0xffffffff. The u_short + * doesn't sign extend. + * --> check the low order 2 bytes for 0xffff + */ + if ((fxdr_unsigned(int, sp->sa_mode) & 0xffff) != 0xffff) + vap->va_mode = nfstov_mode(sp->sa_mode); + if (sp->sa_uid != nfs_xdrneg1) + vap->va_uid = fxdr_unsigned(uid_t, sp->sa_uid); + if (sp->sa_gid != nfs_xdrneg1) + vap->va_gid = fxdr_unsigned(gid_t, sp->sa_gid); + if (sp->sa_size != nfs_xdrneg1) + vap->va_size = fxdr_unsigned(u_quad_t, sp->sa_size); + if (sp->sa_atime.nfsv2_sec != nfs_xdrneg1) { +#ifdef notyet + fxdr_nfsv2time(&sp->sa_atime, &vap->va_atime); +#else + vap->va_atime.tv_sec = + fxdr_unsigned(long, sp->sa_atime.nfsv2_sec); + vap->va_atime.tv_nsec = 0; +#endif + } + if (sp->sa_mtime.nfsv2_sec != nfs_xdrneg1) + fxdr_nfsv2time(&sp->sa_mtime, &vap->va_mtime); + + } + + /* + * Now that we have all the fields, lets do it. + */ + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(2 * NFSX_UNSIGNED); + nfsm_srvwcc_data(preat_ret, &preat, postat_ret, vap); + return (0); + } + nqsrv_getl(vp, ND_WRITE); + if (v3) { + error = preat_ret = VOP_GETATTR(vp, &preat, cred, procp); + if (!error && gcheck && + (preat.va_ctime.tv_sec != guard.tv_sec || + preat.va_ctime.tv_nsec != guard.tv_nsec)) + error = NFSERR_NOT_SYNC; + if (error) { + vput(vp); + nfsm_reply(NFSX_WCCDATA(v3)); + nfsm_srvwcc_data(preat_ret, &preat, postat_ret, vap); + return (0); + } + } + + /* + * If the size is being changed write acces is required, otherwise + * just check for a read only file system. + */ + if (vap->va_size == ((u_quad_t)((quad_t) -1))) { + if (rdonly || (vp->v_mount->mnt_flag & MNT_RDONLY)) { + error = EROFS; + goto out; + } + } else { + if (vp->v_type == VDIR) { + error = EISDIR; + goto out; + } else if ((error = nfsrv_access(vp, VWRITE, cred, rdonly, + procp, 0))) + goto out; + } + error = VOP_SETATTR(vp, vap, cred, procp); + postat_ret = VOP_GETATTR(vp, vap, cred, procp); + if (!error) + error = postat_ret; +out: + vput(vp); + nfsm_reply(NFSX_WCCORFATTR(v3)); + if (v3) { + nfsm_srvwcc_data(preat_ret, &preat, postat_ret, vap); + return (0); + } else { + nfsm_build(fp, struct nfs_fattr *, NFSX_V2FATTR); + nfsm_srvfillattr(vap, fp); + } + nfsm_srvdone; +} + +/* + * nfs lookup rpc + */ +int +nfsrv_lookup(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register struct nfs_fattr *fp; + struct nameidata nd, *ndp = &nd; +#ifdef notdef + struct nameidata ind; +#endif + struct vnode *vp, *dirp; + nfsfh_t nfh; + fhandle_t *fhp; + register caddr_t cp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, cache, len, dirattr_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3), pubflag; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vattr va, dirattr, *vap = &va; + u_quad_t frev; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvnamesiz(len); + + pubflag = nfs_ispublicfh(fhp); + + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = LOOKUP; + nd.ni_cnd.cn_flags = LOCKLEAF | SAVESTART; + error = nfs_namei(&nd, fhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), pubflag); + +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +#ifdef notyet + if (!error && pubflag) { + if (nd.ni_vp->v_type == VDIR && nfs_pub.np_index != NULL) { + /* + * Setup call to lookup() to see if we can find + * the index file. Arguably, this doesn't belong + * in a kernel.. Ugh. + */ + ind = nd; + VOP_UNLOCK(nd.ni_vp, 0, procp); + ind.ni_pathlen = strlen(nfs_pub.np_index); + ind.ni_cnd.cn_nameptr = ind.ni_cnd.cn_pnbuf = + nfs_pub.np_index; + ind.ni_startdir = nd.ni_vp; + VREF(ind.ni_startdir); + error = lookup(&ind); + if (!error) { + /* + * Found an index file. Get rid of + * the old references. + */ + if (dirp) + vrele(dirp); + dirp = nd.ni_vp; + vrele(nd.ni_startdir); + ndp = &ind; + } else + error = 0; + } + /* + * If the public filehandle was used, check that this lookup + * didn't result in a filehandle outside the publicly exported + * filesystem. + */ + + if (!error && ndp->ni_vp->v_mount != nfs_pub.np_mount) { + vput(nd.ni_vp); + error = EPERM; + } + } +#endif + + if (dirp) { + if (v3) + dirattr_ret = VOP_GETATTR(dirp, &dirattr, cred, + procp); + vrele(dirp); + } + + if (error) { + nfsm_reply(NFSX_POSTOPATTR(v3)); + nfsm_srvpostop_attr(dirattr_ret, &dirattr); + return (0); + } + + nqsrv_getl(ndp->ni_startdir, ND_READ); + vrele(ndp->ni_startdir); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + vp = ndp->ni_vp; + bzero((caddr_t)fhp, sizeof(nfh)); + fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid; + error = VFS_VPTOFH(vp, &fhp->fh_fid); + if (!error) + error = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + nfsm_reply(NFSX_SRVFH(v3) + NFSX_POSTOPORFATTR(v3) + NFSX_POSTOPATTR(v3)); + if (error) { + nfsm_srvpostop_attr(dirattr_ret, &dirattr); + return (0); + } + nfsm_srvfhtom(fhp, v3); + if (v3) { + nfsm_srvpostop_attr(0, vap); + nfsm_srvpostop_attr(dirattr_ret, &dirattr); + } else { + nfsm_build(fp, struct nfs_fattr *, NFSX_V2FATTR); + nfsm_srvfillattr(vap, fp); + } + nfsm_srvdone; +} + +/* + * nfs readlink service + */ +int +nfsrv_readlink(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct iovec iv[(NFS_MAXPATHLEN+MLEN-1)/MLEN]; + register struct iovec *ivp = iv; + register struct mbuf *mp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, i, tlen, len, getret; + int v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mb2, *mp2, *mp3, *mreq; + struct vnode *vp; + struct vattr attr; + nfsfh_t nfh; + fhandle_t *fhp; + struct uio io, *uiop = &io; + u_quad_t frev; + +#ifndef nolint + mp2 = mp3 = (struct mbuf *)0; +#endif + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + len = 0; + i = 0; + while (len < NFS_MAXPATHLEN) { + MGET(mp, M_WAIT, MT_DATA); + MCLGET(mp, M_WAIT); + mp->m_len = NFSMSIZ(mp); + if (len == 0) + mp3 = mp2 = mp; + else { + mp2->m_next = mp; + mp2 = mp; + } + if ((len+mp->m_len) > NFS_MAXPATHLEN) { + mp->m_len = NFS_MAXPATHLEN-len; + len = NFS_MAXPATHLEN; + } else + len += mp->m_len; + ivp->iov_base = mtod(mp, caddr_t); + ivp->iov_len = mp->m_len; + i++; + ivp++; + } + uiop->uio_iov = iv; + uiop->uio_iovcnt = i; + uiop->uio_offset = 0; + uiop->uio_resid = len; + uiop->uio_rw = UIO_READ; + uiop->uio_segflg = UIO_SYSSPACE; + uiop->uio_procp = (struct proc *)0; + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + m_freem(mp3); + nfsm_reply(2 * NFSX_UNSIGNED); + nfsm_srvpostop_attr(1, (struct vattr *)0); + return (0); + } + if (vp->v_type != VLNK) { + if (v3) + error = EINVAL; + else + error = ENXIO; + goto out; + } + nqsrv_getl(vp, ND_READ); + error = VOP_READLINK(vp, uiop, cred); +out: + getret = VOP_GETATTR(vp, &attr, cred, procp); + vput(vp); + if (error) + m_freem(mp3); + nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_UNSIGNED); + if (v3) { + nfsm_srvpostop_attr(getret, &attr); + if (error) + return (0); + } + if (uiop->uio_resid > 0) { + len -= uiop->uio_resid; + tlen = nfsm_rndup(len); + nfsm_adj(mp3, NFS_MAXPATHLEN-tlen, tlen-len); + } + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = txdr_unsigned(len); + mb->m_next = mp3; + nfsm_srvdone; +} + +/* + * nfs read service + */ +int +nfsrv_read(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register struct iovec *iv; + struct iovec *iv2; + register struct mbuf *m; + register struct nfs_fattr *fp; + register u_long *tl; + register long t1; + register int i; + caddr_t bpos; + int error = 0, rdonly, cache, cnt, len, left, siz, tlen, getret; + int v3 = (nfsd->nd_flag & ND_NFSV3), reqlen; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct mbuf *m2; + struct vnode *vp; + nfsfh_t nfh; + fhandle_t *fhp; + struct uio io, *uiop = &io; + struct vattr va, *vap = &va; + off_t off; + u_quad_t frev; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + if (v3) { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + fxdr_hyper(tl, &off); + } else { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + off = (off_t)fxdr_unsigned(u_long, *tl); + } + nfsm_srvstrsiz(reqlen, NFS_SRVMAXDATA(nfsd)); + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(2 * NFSX_UNSIGNED); + nfsm_srvpostop_attr(1, (struct vattr *)0); + return (0); + } + if (vp->v_type != VREG) { + if (v3) + error = EINVAL; + else + error = (vp->v_type == VDIR) ? EISDIR : EACCES; + } + if (!error) { + nqsrv_getl(vp, ND_READ); + if ((error = nfsrv_access(vp, VREAD, cred, rdonly, procp, 1))) + error = nfsrv_access(vp, VEXEC, cred, rdonly, procp, 1); + } + getret = VOP_GETATTR(vp, vap, cred, procp); + if (!error) + error = getret; + if (error) { + vput(vp); + nfsm_reply(NFSX_POSTOPATTR(v3)); + nfsm_srvpostop_attr(getret, vap); + return (0); + } + if (off >= vap->va_size) + cnt = 0; + else if ((off + reqlen) > vap->va_size) + cnt = nfsm_rndup(vap->va_size - off); + else + cnt = reqlen; + nfsm_reply(NFSX_POSTOPORFATTR(v3) + 3 * NFSX_UNSIGNED+nfsm_rndup(cnt)); + if (v3) { + nfsm_build(tl, u_long *, NFSX_V3FATTR + 4 * NFSX_UNSIGNED); + *tl++ = nfs_true; + fp = (struct nfs_fattr *)tl; + tl += (NFSX_V3FATTR / sizeof (u_long)); + } else { + nfsm_build(tl, u_long *, NFSX_V2FATTR + NFSX_UNSIGNED); + fp = (struct nfs_fattr *)tl; + tl += (NFSX_V2FATTR / sizeof (u_long)); + } + len = left = cnt; + if (cnt > 0) { + /* + * Generate the mbuf list with the uio_iov ref. to it. + */ + i = 0; + m = m2 = mb; + while (left > 0) { + siz = min(M_TRAILINGSPACE(m), left); + if (siz > 0) { + left -= siz; + i++; + } + if (left > 0) { + MGET(m, M_WAIT, MT_DATA); + MCLGET(m, M_WAIT); + m->m_len = 0; + m2->m_next = m; + m2 = m; + } + } + MALLOC(iv, struct iovec *, i * sizeof (struct iovec), + M_TEMP, M_WAITOK); + uiop->uio_iov = iv2 = iv; + m = mb; + left = cnt; + i = 0; + while (left > 0) { + if (m == NULL) + panic("nfsrv_read iov"); + siz = min(M_TRAILINGSPACE(m), left); + if (siz > 0) { + iv->iov_base = mtod(m, caddr_t) + m->m_len; + iv->iov_len = siz; + m->m_len += siz; + left -= siz; + iv++; + i++; + } + m = m->m_next; + } + uiop->uio_iovcnt = i; + uiop->uio_offset = off; + uiop->uio_resid = cnt; + uiop->uio_rw = UIO_READ; + uiop->uio_segflg = UIO_SYSSPACE; + error = VOP_READ(vp, uiop, IO_NODELOCKED, cred); + off = uiop->uio_offset; + FREE((caddr_t)iv2, M_TEMP); + /* Though our code replaces error with getret, the way I read + * the v3 spec, it appears you should leave the error alone, but + * still return vap and not assign error = getret. But leaving + * that alone. m_freem(mreq) looks bogus. Taking it out. Should be + * mrep or not there at all. Causes panic. ekn */ + if (error || (getret = VOP_GETATTR(vp, vap, cred, procp))) { + if (!error) + error = getret; + /* m_freem(mreq);*/ + vput(vp); + nfsm_reply(NFSX_POSTOPATTR(v3)); + nfsm_srvpostop_attr(getret, vap); + return (0); + } + } else + uiop->uio_resid = 0; + vput(vp); + nfsm_srvfillattr(vap, fp); + len -= uiop->uio_resid; + tlen = nfsm_rndup(len); + if (cnt != tlen || tlen != len) + nfsm_adj(mb, cnt - tlen, tlen - len); + if (v3) { + *tl++ = txdr_unsigned(len); + if (len < reqlen) + *tl++ = nfs_true; + else + *tl++ = nfs_false; + } + *tl = txdr_unsigned(len); + nfsm_srvdone; +} + +/* + * nfs write service + */ +int +nfsrv_write(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register struct iovec *ivp; + register int i, cnt; + register struct mbuf *mp; + register struct nfs_fattr *fp; + struct iovec *iv; + struct vattr va, forat; + register struct vattr *vap = &va; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, len, forat_ret = 1; + int ioflags, aftat_ret = 1, retlen, zeroing, adjust; + int stable = NFSV3WRITE_FILESYNC; + int v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vnode *vp; + nfsfh_t nfh; + fhandle_t *fhp; + struct uio io, *uiop = &io; + off_t off; + u_quad_t frev; + + if (mrep == NULL) { + *mrq = NULL; + return (0); + } + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + if (v3) { + nfsm_dissect(tl, u_long *, 5 * NFSX_UNSIGNED); + fxdr_hyper(tl, &off); + tl += 3; + stable = fxdr_unsigned(int, *tl++); + } else { + nfsm_dissect(tl, u_long *, 4 * NFSX_UNSIGNED); + off = (off_t)fxdr_unsigned(u_long, *++tl); + tl += 2; + if (nfs_async) + stable = NFSV3WRITE_UNSTABLE; + } + retlen = len = fxdr_unsigned(long, *tl); + cnt = i = 0; + + /* + * For NFS Version 2, it is not obvious what a write of zero length + * should do, but I might as well be consistent with Version 3, + * which is to return ok so long as there are no permission problems. + */ + if (len > 0) { + zeroing = 1; + mp = mrep; + while (mp) { + if (mp == md) { + zeroing = 0; + adjust = dpos - mtod(mp, caddr_t); + mp->m_len -= adjust; + if (mp->m_len > 0 && adjust > 0) + NFSMADV(mp, adjust); + } + if (zeroing) + mp->m_len = 0; + else if (mp->m_len > 0) { + i += mp->m_len; + if (i > len) { + mp->m_len -= (i - len); + zeroing = 1; + } + if (mp->m_len > 0) + cnt++; + } + mp = mp->m_next; + } + } + if (len > NFS_MAXDATA || len < 0 || i < len) { + error = EIO; + nfsm_reply(2 * NFSX_UNSIGNED); + nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, vap); + return (0); + } + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(2 * NFSX_UNSIGNED); + nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, vap); + return (0); + } + if (v3) + forat_ret = VOP_GETATTR(vp, &forat, cred, procp); + if (vp->v_type != VREG) { + if (v3) + error = EINVAL; + else + error = (vp->v_type == VDIR) ? EISDIR : EACCES; + } + if (!error) { + nqsrv_getl(vp, ND_WRITE); + error = nfsrv_access(vp, VWRITE, cred, rdonly, procp, 1); + } + if (error) { + vput(vp); + nfsm_reply(NFSX_WCCDATA(v3)); + nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, vap); + return (0); + } + + if (len > 0) { + MALLOC(ivp, struct iovec *, cnt * sizeof (struct iovec), M_TEMP, + M_WAITOK); + uiop->uio_iov = iv = ivp; + uiop->uio_iovcnt = cnt; + mp = mrep; + while (mp) { + if (mp->m_len > 0) { + ivp->iov_base = mtod(mp, caddr_t); + ivp->iov_len = mp->m_len; + ivp++; + } + mp = mp->m_next; + } + + /* + * XXX + * The IO_METASYNC flag indicates that all metadata (and not just + * enough to ensure data integrity) mus be written to stable storage + * synchronously. + * (IO_METASYNC is not yet implemented in 4.4BSD-Lite.) + */ + if (stable == NFSV3WRITE_UNSTABLE) + ioflags = IO_NODELOCKED; + else if (stable == NFSV3WRITE_DATASYNC) + ioflags = (IO_SYNC | IO_NODELOCKED); + else + ioflags = (IO_METASYNC | IO_SYNC | IO_NODELOCKED); + uiop->uio_resid = len; + uiop->uio_rw = UIO_WRITE; + uiop->uio_segflg = UIO_SYSSPACE; + uiop->uio_procp = (struct proc *)0; + uiop->uio_offset = off; + error = VOP_WRITE(vp, uiop, ioflags, cred); + nfsstats.srvvop_writes++; + FREE((caddr_t)iv, M_TEMP); + } + aftat_ret = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + if (!error) + error = aftat_ret; + nfsm_reply(NFSX_PREOPATTR(v3) + NFSX_POSTOPORFATTR(v3) + + 2 * NFSX_UNSIGNED + NFSX_WRITEVERF(v3)); + if (v3) { + nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, vap); + if (error) + return (0); + nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(retlen); + /* + * If nfs_async is set, then pretend the write was FILESYNC. + */ + if (stable == NFSV3WRITE_UNSTABLE && !nfs_async) + *tl++ = txdr_unsigned(stable); + else + *tl++ = txdr_unsigned(NFSV3WRITE_FILESYNC); + /* + * Actually, there is no need to txdr these fields, + * but it may make the values more human readable, + * for debugging purposes. + */ + *tl++ = txdr_unsigned(boottime.tv_sec); + *tl = txdr_unsigned(boottime.tv_usec); + } else { + nfsm_build(fp, struct nfs_fattr *, NFSX_V2FATTR); + nfsm_srvfillattr(vap, fp); + } + nfsm_srvdone; +} + +/* + * NFS write service with write gathering support. Called when + * nfsrvw_procrastinate > 0. + * See: Chet Juszczak, "Improving the Write Performance of an NFS Server", + * in Proc. of the Winter 1994 Usenix Conference, pg. 247-259, San Franscisco, + * Jan. 1994. + */ +int +nfsrv_writegather(ndp, slp, procp, mrq) + struct nfsrv_descript **ndp; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + register struct iovec *ivp; + register struct mbuf *mp; + register struct nfsrv_descript *wp, *nfsd, *owp, *swp; + register struct nfs_fattr *fp; + register int i; + struct iovec *iov; + struct nfsrvw_delayhash *wpp; + struct ucred *cred; + struct vattr va, forat; + register u_long *tl; + register long t1; + caddr_t bpos, dpos; + int error = 0, rdonly, cache, len, forat_ret = 1; + int ioflags, aftat_ret = 1, s, adjust, v3, zeroing; + char *cp2; + struct mbuf *mb, *mb2, *mreq, *mrep, *md; + struct vnode *vp; + struct uio io, *uiop = &io; + u_quad_t frev, cur_usec; + +#ifndef nolint + i = 0; + len = 0; +#endif + *mrq = NULL; + if (*ndp) { + nfsd = *ndp; + *ndp = NULL; + mrep = nfsd->nd_mrep; + md = nfsd->nd_md; + dpos = nfsd->nd_dpos; + cred = &nfsd->nd_cr; + v3 = (nfsd->nd_flag & ND_NFSV3); + LIST_INIT(&nfsd->nd_coalesce); + nfsd->nd_mreq = NULL; + nfsd->nd_stable = NFSV3WRITE_FILESYNC; + cur_usec = (u_quad_t)time.tv_sec * 1000000 + (u_quad_t)time.tv_usec; + nfsd->nd_time = cur_usec + + (v3 ? nfsrvw_procrastinate_v3 : nfsrvw_procrastinate); + + /* + * Now, get the write header.. + */ + nfsm_srvmtofh(&nfsd->nd_fh); + if (v3) { + nfsm_dissect(tl, u_long *, 5 * NFSX_UNSIGNED); + fxdr_hyper(tl, &nfsd->nd_off); + tl += 3; + nfsd->nd_stable = fxdr_unsigned(int, *tl++); + } else { + nfsm_dissect(tl, u_long *, 4 * NFSX_UNSIGNED); + nfsd->nd_off = (off_t)fxdr_unsigned(u_long, *++tl); + tl += 2; + if (nfs_async) + nfsd->nd_stable = NFSV3WRITE_UNSTABLE; + } + len = fxdr_unsigned(long, *tl); + nfsd->nd_len = len; + nfsd->nd_eoff = nfsd->nd_off + len; + + /* + * Trim the header out of the mbuf list and trim off any trailing + * junk so that the mbuf list has only the write data. + */ + zeroing = 1; + i = 0; + mp = mrep; + while (mp) { + if (mp == md) { + zeroing = 0; + adjust = dpos - mtod(mp, caddr_t); + mp->m_len -= adjust; + if (mp->m_len > 0 && adjust > 0) + NFSMADV(mp, adjust); + } + if (zeroing) + mp->m_len = 0; + else { + i += mp->m_len; + if (i > len) { + mp->m_len -= (i - len); + zeroing = 1; + } + } + mp = mp->m_next; + } + if (len > NFS_MAXDATA || len < 0 || i < len) { +nfsmout: + m_freem(mrep); + error = EIO; + nfsm_writereply(2 * NFSX_UNSIGNED, v3); + if (v3) + nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, &va); + nfsd->nd_mreq = mreq; + nfsd->nd_mrep = NULL; + nfsd->nd_time = 0; + } + + /* + * Add this entry to the hash and time queues. + */ + s = splsoftclock(); + owp = NULL; + wp = slp->ns_tq.lh_first; + while (wp && wp->nd_time < nfsd->nd_time) { + owp = wp; + wp = wp->nd_tq.le_next; + } + NFS_DPF(WG, ("Q%03x", nfsd->nd_retxid & 0xfff)); + if (owp) { + LIST_INSERT_AFTER(owp, nfsd, nd_tq); + } else { + LIST_INSERT_HEAD(&slp->ns_tq, nfsd, nd_tq); + } + if (nfsd->nd_mrep) { + wpp = NWDELAYHASH(slp, nfsd->nd_fh.fh_fid.fid_data); + owp = NULL; + wp = wpp->lh_first; + while (wp && + bcmp((caddr_t)&nfsd->nd_fh,(caddr_t)&wp->nd_fh,NFSX_V3FH)) { + owp = wp; + wp = wp->nd_hash.le_next; + } + while (wp && wp->nd_off < nfsd->nd_off && + !bcmp((caddr_t)&nfsd->nd_fh,(caddr_t)&wp->nd_fh,NFSX_V3FH)) { + owp = wp; + wp = wp->nd_hash.le_next; + } + if (owp) { + LIST_INSERT_AFTER(owp, nfsd, nd_hash); + + /* + * Search the hash list for overlapping entries and + * coalesce. + */ + for(; nfsd && NFSW_CONTIG(owp, nfsd); nfsd = wp) { + wp = nfsd->nd_hash.le_next; + if (NFSW_SAMECRED(owp, nfsd)) + nfsrvw_coalesce(owp, nfsd); + } + } else { + LIST_INSERT_HEAD(wpp, nfsd, nd_hash); + } + } + splx(s); + } + + /* + * Now, do VOP_WRITE()s for any one(s) that need to be done now + * and generate the associated reply mbuf list(s). + */ +loop1: + cur_usec = (u_quad_t)time.tv_sec * 1000000 + (u_quad_t)time.tv_usec; + s = splsoftclock(); + for (nfsd = slp->ns_tq.lh_first; nfsd; nfsd = owp) { + owp = nfsd->nd_tq.le_next; + if (nfsd->nd_time > cur_usec) + break; + if (nfsd->nd_mreq) + continue; + NFS_DPF(WG, ("P%03x", nfsd->nd_retxid & 0xfff)); + LIST_REMOVE(nfsd, nd_tq); + LIST_REMOVE(nfsd, nd_hash); + splx(s); + mrep = nfsd->nd_mrep; + nfsd->nd_mrep = NULL; + cred = &nfsd->nd_cr; + v3 = (nfsd->nd_flag & ND_NFSV3); + forat_ret = aftat_ret = 1; + error = nfsrv_fhtovp(&nfsd->nd_fh, 1, &vp, cred, slp, + nfsd->nd_nam, &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE); + if (!error) { + if (v3) + forat_ret = VOP_GETATTR(vp, &forat, cred, procp); + if (vp->v_type != VREG) { + if (v3) + error = EINVAL; + else + error = (vp->v_type == VDIR) ? EISDIR : EACCES; + } + } else + vp = NULL; + if (!error) { + nqsrv_getl(vp, ND_WRITE); + error = nfsrv_access(vp, VWRITE, cred, rdonly, procp, 1); + } + + if (nfsd->nd_stable == NFSV3WRITE_UNSTABLE) + ioflags = IO_NODELOCKED; + else if (nfsd->nd_stable == NFSV3WRITE_DATASYNC) + ioflags = (IO_SYNC | IO_NODELOCKED); + else + ioflags = (IO_METASYNC | IO_SYNC | IO_NODELOCKED); + uiop->uio_rw = UIO_WRITE; + uiop->uio_segflg = UIO_SYSSPACE; + uiop->uio_procp = (struct proc *)0; + uiop->uio_offset = nfsd->nd_off; + uiop->uio_resid = nfsd->nd_eoff - nfsd->nd_off; + if (uiop->uio_resid > 0) { + mp = mrep; + i = 0; + while (mp) { + if (mp->m_len > 0) + i++; + mp = mp->m_next; + } + uiop->uio_iovcnt = i; + MALLOC(iov, struct iovec *, i * sizeof (struct iovec), + M_TEMP, M_WAITOK); + uiop->uio_iov = ivp = iov; + mp = mrep; + while (mp) { + if (mp->m_len > 0) { + ivp->iov_base = mtod(mp, caddr_t); + ivp->iov_len = mp->m_len; + ivp++; + } + mp = mp->m_next; + } + if (!error) { + error = VOP_WRITE(vp, uiop, ioflags, cred); + nfsstats.srvvop_writes++; + } + FREE((caddr_t)iov, M_TEMP); + } + m_freem(mrep); + if (vp) { + aftat_ret = VOP_GETATTR(vp, &va, cred, procp); + vput(vp); + } + + /* + * Loop around generating replies for all write rpcs that have + * now been completed. + */ + swp = nfsd; + do { + NFS_DPF(WG, ("R%03x", nfsd->nd_retxid & 0xfff)); + if (error) { + nfsm_writereply(NFSX_WCCDATA(v3), v3); + if (v3) { + nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, &va); + } + } else { + nfsm_writereply(NFSX_PREOPATTR(v3) + + NFSX_POSTOPORFATTR(v3) + 2 * NFSX_UNSIGNED + + NFSX_WRITEVERF(v3), v3); + if (v3) { + nfsm_srvwcc_data(forat_ret, &forat, aftat_ret, &va); + nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(nfsd->nd_len); + *tl++ = txdr_unsigned(swp->nd_stable); + /* + * Actually, there is no need to txdr these fields, + * but it may make the values more human readable, + * for debugging purposes. + */ + *tl++ = txdr_unsigned(boottime.tv_sec); + *tl = txdr_unsigned(boottime.tv_usec); + } else { + nfsm_build(fp, struct nfs_fattr *, NFSX_V2FATTR); + nfsm_srvfillattr(&va, fp); + } + } + nfsd->nd_mreq = mreq; + if (nfsd->nd_mrep) + panic("nfsrv_write: nd_mrep not free"); + + /* + * Done. Put it at the head of the timer queue so that + * the final phase can return the reply. + */ + s = splsoftclock(); + if (nfsd != swp) { + nfsd->nd_time = 0; + LIST_INSERT_HEAD(&slp->ns_tq, nfsd, nd_tq); + } + nfsd = swp->nd_coalesce.lh_first; + if (nfsd) { + LIST_REMOVE(nfsd, nd_tq); + } + splx(s); + } while (nfsd); + s = splsoftclock(); + swp->nd_time = 0; + LIST_INSERT_HEAD(&slp->ns_tq, swp, nd_tq); + splx(s); + goto loop1; + } + splx(s); + + /* + * Search for a reply to return. + */ + s = splsoftclock(); + for (nfsd = slp->ns_tq.lh_first; nfsd; nfsd = nfsd->nd_tq.le_next) + if (nfsd->nd_mreq) { + NFS_DPF(WG, ("X%03x", nfsd->nd_retxid & 0xfff)); + LIST_REMOVE(nfsd, nd_tq); + *mrq = nfsd->nd_mreq; + *ndp = nfsd; + break; + } + splx(s); + return (0); +} + +/* + * Coalesce the write request nfsd into owp. To do this we must: + * - remove nfsd from the queues + * - merge nfsd->nd_mrep into owp->nd_mrep + * - update the nd_eoff and nd_stable for owp + * - put nfsd on owp's nd_coalesce list + * NB: Must be called at splsoftclock(). + */ +static void +nfsrvw_coalesce(owp, nfsd) + register struct nfsrv_descript *owp; + register struct nfsrv_descript *nfsd; +{ + register int overlap; + register struct mbuf *mp; + struct nfsrv_descript *p; + + NFS_DPF(WG, ("C%03x-%03x", + nfsd->nd_retxid & 0xfff, owp->nd_retxid & 0xfff)); + LIST_REMOVE(nfsd, nd_hash); + LIST_REMOVE(nfsd, nd_tq); + if (owp->nd_eoff < nfsd->nd_eoff) { + overlap = owp->nd_eoff - nfsd->nd_off; + if (overlap < 0) + panic("nfsrv_coalesce: bad off"); + if (overlap > 0) + m_adj(nfsd->nd_mrep, overlap); + mp = owp->nd_mrep; + while (mp->m_next) + mp = mp->m_next; + mp->m_next = nfsd->nd_mrep; + owp->nd_eoff = nfsd->nd_eoff; + } else + m_freem(nfsd->nd_mrep); + nfsd->nd_mrep = NULL; + if (nfsd->nd_stable == NFSV3WRITE_FILESYNC) + owp->nd_stable = NFSV3WRITE_FILESYNC; + else if (nfsd->nd_stable == NFSV3WRITE_DATASYNC && + owp->nd_stable == NFSV3WRITE_UNSTABLE) + owp->nd_stable = NFSV3WRITE_DATASYNC; + LIST_INSERT_HEAD(&owp->nd_coalesce, nfsd, nd_tq); + + /* + * If nfsd had anything else coalesced into it, transfer them + * to owp, otherwise their replies will never get sent. + */ + for (p = nfsd->nd_coalesce.lh_first; p; + p = nfsd->nd_coalesce.lh_first) { + LIST_REMOVE(p, nd_tq); + LIST_INSERT_HEAD(&owp->nd_coalesce, p, nd_tq); + } +} + +/* + * Sort the group list in increasing numerical order. + * (Insertion sort by Chris Torek, who was grossed out by the bubble sort + * that used to be here.) + */ +void +nfsrvw_sort(list, num) + register gid_t *list; + register int num; +{ + register int i, j; + gid_t v; + + /* Insertion sort. */ + for (i = 1; i < num; i++) { + v = list[i]; + /* find correct slot for value v, moving others up */ + for (j = i; --j >= 0 && v < list[j];) + list[j + 1] = list[j]; + list[j + 1] = v; + } +} + +/* + * copy credentials making sure that the result can be compared with bcmp(). + */ +void +nfsrv_setcred(incred, outcred) + register struct ucred *incred, *outcred; +{ + register int i; + + bzero((caddr_t)outcred, sizeof (struct ucred)); + outcred->cr_ref = 1; + outcred->cr_uid = incred->cr_uid; + outcred->cr_ngroups = incred->cr_ngroups; + for (i = 0; i < incred->cr_ngroups; i++) + outcred->cr_groups[i] = incred->cr_groups[i]; + nfsrvw_sort(outcred->cr_groups, outcred->cr_ngroups); +} + +/* + * nfs create service + * now does a truncate to 0 length via. setattr if it already exists + */ +int +nfsrv_create(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register struct nfs_fattr *fp; + struct vattr va, dirfor, diraft; + register struct vattr *vap = &va; + register struct nfsv2_sattr *sp; + register u_long *tl; + struct nameidata nd; + register caddr_t cp; + register long t1; + caddr_t bpos; + int error = 0, rdev, cache, len, tsize, dirfor_ret = 1, diraft_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3), how, exclusive_flag = 0; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vnode *vp, *dirp = (struct vnode *)0; + nfsfh_t nfh; + fhandle_t *fhp; + u_quad_t frev, tempsize; + u_char cverf[NFSX_V3CREATEVERF]; + +#ifndef nolint + rdev = 0; +#endif + nd.ni_cnd.cn_nameiop = 0; + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvnamesiz(len); + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = CREATE; + nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | SAVESTART; + error = nfs_namei(&nd, fhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (dirp) { + if (v3) + dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, + procp); + else { + vrele(dirp); + dirp = (struct vnode *)0; + } + } + if (error) { + nfsm_reply(NFSX_WCCDATA(v3)); + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + if (dirp) + vrele(dirp); + return (0); + } + VATTR_NULL(vap); + if (v3) { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + how = fxdr_unsigned(int, *tl); + switch (how) { + case NFSV3CREATE_GUARDED: + if (nd.ni_vp) { + error = EEXIST; + break; + } + case NFSV3CREATE_UNCHECKED: + nfsm_srvsattr(vap); + break; + case NFSV3CREATE_EXCLUSIVE: + nfsm_dissect(cp, caddr_t, NFSX_V3CREATEVERF); + bcopy(cp, cverf, NFSX_V3CREATEVERF); + exclusive_flag = 1; + if (nd.ni_vp == NULL) + vap->va_mode = 0; + break; + }; + vap->va_type = VREG; + } else { + nfsm_dissect(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + vap->va_type = IFTOVT(fxdr_unsigned(u_long, sp->sa_mode)); + if (vap->va_type == VNON) + vap->va_type = VREG; + vap->va_mode = nfstov_mode(sp->sa_mode); + switch (vap->va_type) { + case VREG: + tsize = fxdr_unsigned(long, sp->sa_size); + if (tsize != -1) + vap->va_size = (u_quad_t)tsize; + break; + case VCHR: + case VBLK: + case VFIFO: + rdev = fxdr_unsigned(long, sp->sa_size); + break; + }; + } + + /* + * Iff doesn't exist, create it + * otherwise just truncate to 0 length + * should I set the mode too ?? + */ + if (nd.ni_vp == NULL) { + if (vap->va_type == VREG || vap->va_type == VSOCK) { + vrele(nd.ni_startdir); + nqsrv_getl(nd.ni_dvp, ND_WRITE); + error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap); + if (!error) { + nfsrv_object_create(nd.ni_vp); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + if (exclusive_flag) { + exclusive_flag = 0; + VATTR_NULL(vap); + bcopy(cverf, (caddr_t)&vap->va_atime, + NFSX_V3CREATEVERF); + error = VOP_SETATTR(nd.ni_vp, vap, cred, + procp); + } + } + } else if (vap->va_type == VCHR || vap->va_type == VBLK || + vap->va_type == VFIFO) { + if (vap->va_type == VCHR && rdev == 0xffffffff) + vap->va_type = VFIFO; + if (vap->va_type != VFIFO && + (error = suser(cred, (u_short *)0))) { + vrele(nd.ni_startdir); + _FREE_ZONE(nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + vput(nd.ni_dvp); + nfsm_reply(0); + return (error); + } else + vap->va_rdev = (dev_t)rdev; + nqsrv_getl(nd.ni_dvp, ND_WRITE); + if ((error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap))) { + vrele(nd.ni_startdir); + nfsm_reply(0); + } + nd.ni_cnd.cn_nameiop = LOOKUP; + nd.ni_cnd.cn_flags &= ~(LOCKPARENT | SAVESTART); + nd.ni_cnd.cn_proc = procp; + nd.ni_cnd.cn_cred = cred; + if ((error = lookup(&nd))) { + _FREE_ZONE(nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + nfsm_reply(0); + } + nfsrv_object_create(nd.ni_vp); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + if (nd.ni_cnd.cn_flags & ISSYMLINK) { + vrele(nd.ni_dvp); + vput(nd.ni_vp); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + error = EINVAL; + nfsm_reply(0); + } + } else { + vrele(nd.ni_startdir); + _FREE_ZONE(nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + vput(nd.ni_dvp); + error = ENXIO; + } + vp = nd.ni_vp; + } else { + vrele(nd.ni_startdir); + _FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + vp = nd.ni_vp; + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (vap->va_size != -1) { + error = nfsrv_access(vp, VWRITE, cred, + (nd.ni_cnd.cn_flags & RDONLY), procp, 0); + if (!error) { + nqsrv_getl(vp, ND_WRITE); + tempsize = vap->va_size; + VATTR_NULL(vap); + vap->va_size = tempsize; + error = VOP_SETATTR(vp, vap, cred, + procp); + } + if (error) + vput(vp); + } else { + if (error) + vput(vp); /* make sure we catch the EEXIST for nfsv3 */ + } + } + if (!error) { + bzero((caddr_t)fhp, sizeof(nfh)); + fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid; + error = VFS_VPTOFH(vp, &fhp->fh_fid); + if (!error) + error = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + } + if (v3) { + if (exclusive_flag && !error && + bcmp(cverf, (caddr_t)&vap->va_atime, NFSX_V3CREATEVERF)) + error = EEXIST; + diraft_ret = VOP_GETATTR(dirp, &diraft, cred, procp); + vrele(dirp); + } + nfsm_reply(NFSX_SRVFH(v3) + NFSX_FATTR(v3) + NFSX_WCCDATA(v3)); + if (v3) { + if (!error) { + nfsm_srvpostop_fh(fhp); + nfsm_srvpostop_attr(0, vap); + } + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + } else { + nfsm_srvfhtom(fhp, v3); + nfsm_build(fp, struct nfs_fattr *, NFSX_V2FATTR); + nfsm_srvfillattr(vap, fp); + } + return (error); +nfsmout: + if (dirp) + vrele(dirp); + if (nd.ni_cnd.cn_nameiop) { + vrele(nd.ni_startdir); + _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + } + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (nd.ni_vp) + vput(nd.ni_vp); + return (error); +} + +/* + * nfs v3 mknod service + */ +int +nfsrv_mknod(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct vattr va, dirfor, diraft; + register struct vattr *vap = &va; + register u_long *tl; + struct nameidata nd; + register long t1; + caddr_t bpos; + int error = 0, cache, len, dirfor_ret = 1, diraft_ret = 1; + u_long major, minor; + enum vtype vtyp; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vnode *vp, *dirp = (struct vnode *)0; + nfsfh_t nfh; + fhandle_t *fhp; + u_quad_t frev; + + nd.ni_cnd.cn_nameiop = 0; + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvnamesiz(len); + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = CREATE; + nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | SAVESTART; + error = nfs_namei(&nd, fhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (dirp) + dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, procp); + if (error) { + nfsm_reply(NFSX_WCCDATA(1)); + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + if (dirp) + vrele(dirp); + return (0); + } + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + vtyp = nfsv3tov_type(*tl); + if (vtyp != VCHR && vtyp != VBLK && vtyp != VSOCK && vtyp != VFIFO) { + vrele(nd.ni_startdir); + _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + error = NFSERR_BADTYPE; + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + vput(nd.ni_dvp); + goto out; + } + VATTR_NULL(vap); + nfsm_srvsattr(vap); + if (vtyp == VCHR || vtyp == VBLK) { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + major = fxdr_unsigned(u_long, *tl++); + minor = fxdr_unsigned(u_long, *tl); + vap->va_rdev = makedev(major, minor); + } + + /* + * Iff doesn't exist, create it. + */ + if (nd.ni_vp) { + vrele(nd.ni_startdir); + _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + error = EEXIST; + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + vput(nd.ni_dvp); + goto out; + } + vap->va_type = vtyp; + if (vtyp == VSOCK) { + vrele(nd.ni_startdir); + nqsrv_getl(nd.ni_dvp, ND_WRITE); + error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap); + if (!error) + FREE_ZONE(nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + } else { + if (vtyp != VFIFO && (error = suser(cred, (u_short *)0))) { + vrele(nd.ni_startdir); + _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + vput(nd.ni_dvp); + goto out; + } + nqsrv_getl(nd.ni_dvp, ND_WRITE); + if ((error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap))) { + vrele(nd.ni_startdir); + goto out; + } + nd.ni_cnd.cn_nameiop = LOOKUP; + nd.ni_cnd.cn_flags &= ~(LOCKPARENT | SAVESTART); + nd.ni_cnd.cn_proc = procp; + nd.ni_cnd.cn_cred = procp->p_ucred; + error = lookup(&nd); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + if (error) + goto out; + if (nd.ni_cnd.cn_flags & ISSYMLINK) { + vrele(nd.ni_dvp); + vput(nd.ni_vp); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + error = EINVAL; + } + } +out: + vp = nd.ni_vp; + if (!error) { + bzero((caddr_t)fhp, sizeof(nfh)); + fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid; + error = VFS_VPTOFH(vp, &fhp->fh_fid); + if (!error) + error = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + } + diraft_ret = VOP_GETATTR(dirp, &diraft, cred, procp); + vrele(dirp); + nfsm_reply(NFSX_SRVFH(1) + NFSX_POSTOPATTR(1) + NFSX_WCCDATA(1)); + if (!error) { + nfsm_srvpostop_fh(fhp); + nfsm_srvpostop_attr(0, vap); + } + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); +nfsmout: + if (dirp) + vrele(dirp); + if (nd.ni_cnd.cn_nameiop) { + vrele(nd.ni_startdir); + _FREE_ZONE((caddr_t)nd.ni_cnd.cn_pnbuf, + nd.ni_cnd.cn_pnlen, M_NAMEI); + } + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (nd.ni_vp) + vput(nd.ni_vp); + return (error); +} + +/* + * nfs remove service + */ +int +nfsrv_remove(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct nameidata nd; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, cache, len, dirfor_ret = 1, diraft_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mreq; + struct vnode *vp, *dirp; + struct vattr dirfor, diraft; + nfsfh_t nfh; + fhandle_t *fhp; + u_quad_t frev; + +#ifndef nolint + vp = (struct vnode *)0; +#endif + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvnamesiz(len); + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = DELETE; + nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; + error = nfs_namei(&nd, fhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (dirp) { + if (v3) + dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, + procp); + else + vrele(dirp); + } + if (!error) { + vp = nd.ni_vp; + if (vp->v_type == VDIR) { + error = EPERM; /* POSIX */ + goto out; + } + /* + * The root of a mounted filesystem cannot be deleted. + */ + if (vp->v_flag & VROOT) { + error = EBUSY; + goto out; + } +out: + if (!error) { + nqsrv_getl(nd.ni_dvp, ND_WRITE); + nqsrv_getl(vp, ND_WRITE); + + error = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd); + + } else { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vput(vp); + } + } + if (dirp && v3) { + diraft_ret = VOP_GETATTR(dirp, &diraft, cred, procp); + vrele(dirp); + } + nfsm_reply(NFSX_WCCDATA(v3)); + if (v3) { + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + nfsm_srvdone; +} + +/* + * nfs rename service + */ +int +nfsrv_rename(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, cache, len, len2, fdirfor_ret = 1, fdiraft_ret = 1; + int tdirfor_ret = 1, tdiraft_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mreq; + struct nameidata fromnd, tond; + struct vnode *fvp, *tvp, *tdvp, *fdirp = (struct vnode *)0; + struct vnode *tdirp = (struct vnode *)0; + struct vattr fdirfor, fdiraft, tdirfor, tdiraft; + nfsfh_t fnfh, tnfh; + fhandle_t *ffhp, *tfhp; + u_quad_t frev; + uid_t saved_uid; + +#ifndef nolint + fvp = (struct vnode *)0; +#endif + ffhp = &fnfh.fh_generic; + tfhp = &tnfh.fh_generic; + fromnd.ni_cnd.cn_nameiop = 0; + tond.ni_cnd.cn_nameiop = 0; + nfsm_srvmtofh(ffhp); + nfsm_srvnamesiz(len); + /* + * Remember our original uid so that we can reset cr_uid before + * the second nfs_namei() call, in case it is remapped. + */ + saved_uid = cred->cr_uid; + fromnd.ni_cnd.cn_cred = cred; + fromnd.ni_cnd.cn_nameiop = DELETE; + fromnd.ni_cnd.cn_flags = WANTPARENT | SAVESTART; + error = nfs_namei(&fromnd, ffhp, len, slp, nam, &md, + &dpos, &fdirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (fdirp) { + if (v3) + fdirfor_ret = VOP_GETATTR(fdirp, &fdirfor, cred, + procp); + else { + vrele(fdirp); + fdirp = (struct vnode *)0; + } + } + if (error) { + nfsm_reply(2 * NFSX_WCCDATA(v3)); + nfsm_srvwcc_data(fdirfor_ret, &fdirfor, fdiraft_ret, &fdiraft); + nfsm_srvwcc_data(tdirfor_ret, &tdirfor, tdiraft_ret, &tdiraft); + if (fdirp) + vrele(fdirp); + return (0); + } + fvp = fromnd.ni_vp; + nfsm_srvmtofh(tfhp); + nfsm_strsiz(len2, NFS_MAXNAMLEN); + cred->cr_uid = saved_uid; + tond.ni_cnd.cn_cred = cred; + tond.ni_cnd.cn_nameiop = RENAME; + tond.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART; + error = nfs_namei(&tond, tfhp, len2, slp, nam, &md, + &dpos, &tdirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (tdirp) { + if (v3) + tdirfor_ret = VOP_GETATTR(tdirp, &tdirfor, cred, + procp); + else { + vrele(tdirp); + tdirp = (struct vnode *)0; + } + } + if (error) { + VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd); + vrele(fromnd.ni_dvp); + vrele(fvp); + goto out1; + } + tdvp = tond.ni_dvp; + tvp = tond.ni_vp; + if (tvp != NULL) { + if (fvp->v_type == VDIR && tvp->v_type != VDIR) { + if (v3) + error = EEXIST; + else + error = EISDIR; + goto out; + } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) { + if (v3) + error = EEXIST; + else + error = ENOTDIR; + goto out; + } + if (tvp->v_type == VDIR && tvp->v_mountedhere) { + if (v3) + error = EXDEV; + else + error = ENOTEMPTY; + goto out; + } + } + if (fvp->v_type == VDIR && fvp->v_mountedhere) { + if (v3) + error = EXDEV; + else + error = ENOTEMPTY; + goto out; + } + if (fvp->v_mount != tdvp->v_mount) { + if (v3) + error = EXDEV; + else + error = ENOTEMPTY; + goto out; + } + if (fvp == tdvp) + if (v3) + error = EINVAL; + else + error = ENOTEMPTY; + /* + * If source is the same as the destination (that is the + * same vnode) then there is nothing to do. + * (fixed to have POSIX semantics - CSM 3/2/98) + */ + if (fvp == tvp) + error = -1; +out: + if (!error) { + nqsrv_getl(fromnd.ni_dvp, ND_WRITE); + nqsrv_getl(tdvp, ND_WRITE); + if (tvp) + nqsrv_getl(tvp, ND_WRITE); + error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd, + tond.ni_dvp, tond.ni_vp, &tond.ni_cnd); + } else { + VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd); + if (tdvp == tvp) + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd); + vrele(fromnd.ni_dvp); + vrele(fvp); + if (error == -1) + error = 0; + } + vrele(tond.ni_startdir); + FREE_ZONE(tond.ni_cnd.cn_pnbuf, tond.ni_cnd.cn_pnlen, M_NAMEI); +out1: + if (fdirp) { + fdiraft_ret = VOP_GETATTR(fdirp, &fdiraft, cred, procp); + vrele(fdirp); + } + if (tdirp) { + tdiraft_ret = VOP_GETATTR(tdirp, &tdiraft, cred, procp); + vrele(tdirp); + } + vrele(fromnd.ni_startdir); + FREE_ZONE(fromnd.ni_cnd.cn_pnbuf, fromnd.ni_cnd.cn_pnlen, M_NAMEI); + nfsm_reply(2 * NFSX_WCCDATA(v3)); + if (v3) { + nfsm_srvwcc_data(fdirfor_ret, &fdirfor, fdiraft_ret, &fdiraft); + nfsm_srvwcc_data(tdirfor_ret, &tdirfor, tdiraft_ret, &tdiraft); + } + return (0); + +nfsmout: + if (fdirp) + vrele(fdirp); + if (tdirp) + vrele(tdirp); + if (tond.ni_cnd.cn_nameiop) { + vrele(tond.ni_startdir); + FREE_ZONE(tond.ni_cnd.cn_pnbuf, tond.ni_cnd.cn_pnlen, M_NAMEI); + } + if (fromnd.ni_cnd.cn_nameiop) { + vrele(fromnd.ni_startdir); + FREE_ZONE(fromnd.ni_cnd.cn_pnbuf, + fromnd.ni_cnd.cn_pnlen, M_NAMEI); + VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd); + vrele(fromnd.ni_dvp); + vrele(fvp); + } + return (error); +} + +/* + * nfs link service + */ +int +nfsrv_link(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct nameidata nd; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, len, dirfor_ret = 1, diraft_ret = 1; + int getret = 1, v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mreq; + struct vnode *vp, *xp, *dirp = (struct vnode *)0; + struct vattr dirfor, diraft, at; + nfsfh_t nfh, dnfh; + fhandle_t *fhp, *dfhp; + u_quad_t frev; + + fhp = &nfh.fh_generic; + dfhp = &dnfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvmtofh(dfhp); + nfsm_srvnamesiz(len); + if ((error = nfsrv_fhtovp(fhp, FALSE, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_WCCDATA(v3)); + nfsm_srvpostop_attr(getret, &at); + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + if (vp->v_type == VDIR) { + error = EPERM; /* POSIX */ + goto out1; + } + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = CREATE; + nd.ni_cnd.cn_flags = LOCKPARENT; + error = nfs_namei(&nd, dfhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (dirp) { + if (v3) + dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, + procp); + else { + vrele(dirp); + dirp = (struct vnode *)0; + } + } + if (error) + goto out1; + xp = nd.ni_vp; + if (xp != NULL) { + error = EEXIST; + goto out; + } + xp = nd.ni_dvp; + if (vp->v_mount != xp->v_mount) + error = EXDEV; +out: + if (!error) { + nqsrv_getl(vp, ND_WRITE); + nqsrv_getl(xp, ND_WRITE); + error = VOP_LINK(vp, nd.ni_dvp, &nd.ni_cnd); + } else { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (nd.ni_vp) + vrele(nd.ni_vp); + } +out1: + if (v3) + getret = VOP_GETATTR(vp, &at, cred, procp); + if (dirp) { + diraft_ret = VOP_GETATTR(dirp, &diraft, cred, procp); + vrele(dirp); + } + vrele(vp); + nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_WCCDATA(v3)); + if (v3) { + nfsm_srvpostop_attr(getret, &at); + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + nfsm_srvdone; +} + +/* + * nfs symbolic link service + */ +int +nfsrv_symlink(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct vattr va, dirfor, diraft; + struct nameidata nd; + register struct vattr *vap = &va; + register u_long *tl; + register long t1; + struct nfsv2_sattr *sp; + char *bpos, *pathcp = (char *)0, *cp2; + struct uio io; + struct iovec iv; + int error = 0, cache, len, len2, dirfor_ret = 1, diraft_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3); + struct mbuf *mb, *mreq, *mb2; + struct vnode *dirp = (struct vnode *)0; + nfsfh_t nfh; + fhandle_t *fhp; + u_quad_t frev; + + nd.ni_cnd.cn_nameiop = 0; + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvnamesiz(len); + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = CREATE; + nd.ni_cnd.cn_flags = LOCKPARENT | SAVESTART; + error = nfs_namei(&nd, fhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (dirp) { + if (v3) + dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, + procp); + else { + vrele(dirp); + dirp = (struct vnode *)0; + } + } + if (error) + goto out; + VATTR_NULL(vap); + if (v3) + nfsm_srvsattr(vap); + nfsm_strsiz(len2, NFS_MAXPATHLEN); + MALLOC(pathcp, caddr_t, len2 + 1, M_TEMP, M_WAITOK); + iv.iov_base = pathcp; + iv.iov_len = len2; + io.uio_resid = len2; + io.uio_offset = 0; + io.uio_iov = &iv; + io.uio_iovcnt = 1; + io.uio_segflg = UIO_SYSSPACE; + io.uio_rw = UIO_READ; + io.uio_procp = (struct proc *)0; + nfsm_mtouio(&io, len2); + if (!v3) { + nfsm_dissect(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + vap->va_mode = fxdr_unsigned(u_short, sp->sa_mode); + } + *(pathcp + len2) = '\0'; + if (nd.ni_vp) { + vrele(nd.ni_startdir); + _FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(nd.ni_vp); + error = EEXIST; + goto out; + } + nqsrv_getl(nd.ni_dvp, ND_WRITE); + error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap, pathcp); + if (error) + vrele(nd.ni_startdir); + else { + if (v3) { + nd.ni_cnd.cn_nameiop = LOOKUP; + nd.ni_cnd.cn_flags &= ~(LOCKPARENT | SAVESTART | FOLLOW); + nd.ni_cnd.cn_flags |= (NOFOLLOW | LOCKLEAF); + nd.ni_cnd.cn_proc = procp; + nd.ni_cnd.cn_cred = cred; + error = lookup(&nd); + if (!error) { + bzero((caddr_t)fhp, sizeof(nfh)); + fhp->fh_fsid = nd.ni_vp->v_mount->mnt_stat.f_fsid; + error = VFS_VPTOFH(nd.ni_vp, &fhp->fh_fid); + if (!error) + error = VOP_GETATTR(nd.ni_vp, vap, cred, + procp); + vput(nd.ni_vp); + } + } else + vrele(nd.ni_startdir); + FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + } +out: + if (pathcp) + FREE(pathcp, M_TEMP); + if (dirp) { + diraft_ret = VOP_GETATTR(dirp, &diraft, cred, procp); + vrele(dirp); + } + nfsm_reply(NFSX_SRVFH(v3) + NFSX_POSTOPATTR(v3) + NFSX_WCCDATA(v3)); + if (v3) { + if (!error) { + nfsm_srvpostop_fh(fhp); + nfsm_srvpostop_attr(0, vap); + } + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + } + return (0); +nfsmout: + if (nd.ni_cnd.cn_nameiop) { + vrele(nd.ni_startdir); + _FREE_ZONE(nd.ni_cnd.cn_pnbuf, nd.ni_cnd.cn_pnlen, M_NAMEI); + } + if (dirp) + vrele(dirp); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (nd.ni_vp) + vrele(nd.ni_vp); + if (pathcp) + FREE(pathcp, M_TEMP); + return (error); +} + +/* + * nfs mkdir service + */ +int +nfsrv_mkdir(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct vattr va, dirfor, diraft; + register struct vattr *vap = &va; + register struct nfs_fattr *fp; + struct nameidata nd; + register caddr_t cp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, cache, len, dirfor_ret = 1, diraft_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vnode *vp, *dirp = (struct vnode *)0; + nfsfh_t nfh; + fhandle_t *fhp; + u_quad_t frev; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvnamesiz(len); + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = CREATE; + nd.ni_cnd.cn_flags = LOCKPARENT; + error = nfs_namei(&nd, fhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (dirp) { + if (v3) + dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, + procp); + else { + vrele(dirp); + dirp = (struct vnode *)0; + } + } + if (error) { + nfsm_reply(NFSX_WCCDATA(v3)); + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + if (dirp) + vrele(dirp); + return (0); + } + VATTR_NULL(vap); + if (v3) { + nfsm_srvsattr(vap); + } else { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + vap->va_mode = nfstov_mode(*tl++); + } + vap->va_type = VDIR; + vp = nd.ni_vp; + if (vp != NULL) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(vp); + error = EEXIST; + goto out; + } + nqsrv_getl(nd.ni_dvp, ND_WRITE); + error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, vap); + if (!error) { + vp = nd.ni_vp; + bzero((caddr_t)fhp, sizeof(nfh)); + fhp->fh_fsid = vp->v_mount->mnt_stat.f_fsid; + error = VFS_VPTOFH(vp, &fhp->fh_fid); + if (!error) + error = VOP_GETATTR(vp, vap, cred, procp); + vput(vp); + } +out: + if (dirp) { + diraft_ret = VOP_GETATTR(dirp, &diraft, cred, procp); + vrele(dirp); + } + nfsm_reply(NFSX_SRVFH(v3) + NFSX_POSTOPATTR(v3) + NFSX_WCCDATA(v3)); + if (v3) { + if (!error) { + nfsm_srvpostop_fh(fhp); + nfsm_srvpostop_attr(0, vap); + } + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + } else { + nfsm_srvfhtom(fhp, v3); + nfsm_build(fp, struct nfs_fattr *, NFSX_V2FATTR); + nfsm_srvfillattr(vap, fp); + } + return (0); +nfsmout: + if (dirp) + vrele(dirp); + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (nd.ni_vp) + vrele(nd.ni_vp); + return (error); +} + +/* + * nfs rmdir service + */ +int +nfsrv_rmdir(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, cache, len, dirfor_ret = 1, diraft_ret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mreq; + struct vnode *vp, *dirp = (struct vnode *)0; + struct vattr dirfor, diraft; + nfsfh_t nfh; + fhandle_t *fhp; + struct nameidata nd; + u_quad_t frev; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_srvnamesiz(len); + nd.ni_cnd.cn_cred = cred; + nd.ni_cnd.cn_nameiop = DELETE; + nd.ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; + error = nfs_namei(&nd, fhp, len, slp, nam, &md, &dpos, + &dirp, procp, (nfsd->nd_flag & ND_KERBAUTH), FALSE); + if (dirp) { + if (v3) + dirfor_ret = VOP_GETATTR(dirp, &dirfor, cred, + procp); + else { + vrele(dirp); + dirp = (struct vnode *)0; + } + } + if (error) { + nfsm_reply(NFSX_WCCDATA(v3)); + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + if (dirp) + vrele(dirp); + return (0); + } + vp = nd.ni_vp; + if (vp->v_type != VDIR) { + error = ENOTDIR; + goto out; + } + /* + * No rmdir "." please. + */ + if (nd.ni_dvp == vp) { + error = EINVAL; + goto out; + } + /* + * The root of a mounted filesystem cannot be deleted. + */ + if (vp->v_flag & VROOT) + error = EBUSY; +out: + if (!error) { + nqsrv_getl(nd.ni_dvp, ND_WRITE); + nqsrv_getl(vp, ND_WRITE); + error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd); + } else { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vput(vp); + } + if (dirp) { + diraft_ret = VOP_GETATTR(dirp, &diraft, cred, procp); + vrele(dirp); + } + nfsm_reply(NFSX_WCCDATA(v3)); + if (v3) { + nfsm_srvwcc_data(dirfor_ret, &dirfor, diraft_ret, &diraft); + return (0); + } + nfsm_srvdone; +} + +/* + * nfs readdir service + * - mallocs what it thinks is enough to read + * count rounded up to a multiple of NFS_DIRBLKSIZ <= NFS_MAXREADDIR + * - calls VOP_READDIR() + * - loops around building the reply + * if the output generated exceeds count break out of loop + * The nfsm_clget macro is used here so that the reply will be packed + * tightly in mbuf clusters. + * - it only knows that it has encountered eof when the VOP_READDIR() + * reads nothing + * - as such one readdir rpc will return eof false although you are there + * and then the next will return eof + * - it trims out records with d_fileno == 0 + * this doesn't matter for Unix clients, but they might confuse clients + * for other os'. + * NB: It is tempting to set eof to true if the VOP_READDIR() reads less + * than requested, but this may not apply to all filesystems. For + * example, client NFS does not { although it is never remote mounted + * anyhow } + * The alternate call nfsrv_readdirplus() does lookups as well. + * PS: The NFS protocol spec. does not clarify what the "count" byte + * argument is a count of.. just name strings and file id's or the + * entire reply rpc or ... + * I tried just file name and id sizes and it confused the Sun client, + * so I am using the full rpc size now. The "paranoia.." comment refers + * to including the status longwords that are not a part of the dir. + * "entry" structures, but are in the rpc. + */ +struct flrep { + nfsuint64 fl_off; + u_long fl_postopok; + u_long fl_fattr[NFSX_V3FATTR / sizeof (u_long)]; + u_long fl_fhok; + u_long fl_fhsize; + u_long fl_nfh[NFSX_V3FH / sizeof (u_long)]; +}; + +int +nfsrv_readdir(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register char *bp, *be; + register struct mbuf *mp; + register struct dirent *dp; + register caddr_t cp; + register u_long *tl; + register long t1; + caddr_t bpos; + struct mbuf *mb, *mb2, *mreq, *mp2; + char *cpos, *cend, *cp2, *rbuf; + struct vnode *vp; + struct vattr at; + nfsfh_t nfh; + fhandle_t *fhp; + struct uio io; + struct iovec iv; + int len, nlen, rem, xfer, tsiz, i, error = 0, getret = 1; + int siz, cnt, fullsiz, eofflag, rdonly, cache, ncookies = 0; + int v3 = (nfsd->nd_flag & ND_NFSV3); + u_quad_t frev, off, toff, verf; + u_long *cookies = NULL, *cookiep; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + if (v3) { + nfsm_dissect(tl, u_long *, 5 * NFSX_UNSIGNED); + fxdr_hyper(tl, &toff); + tl += 2; + fxdr_hyper(tl, &verf); + tl += 2; + } else { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + toff = fxdr_unsigned(u_quad_t, *tl++); + } + off = toff; + cnt = fxdr_unsigned(int, *tl); + siz = ((cnt + DIRBLKSIZ - 1) & ~(DIRBLKSIZ - 1)); + xfer = NFS_SRVMAXDATA(nfsd); + if (siz > xfer) + siz = xfer; + fullsiz = siz; + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(NFSX_UNSIGNED); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + nqsrv_getl(vp, ND_READ); + if (v3) { + error = getret = VOP_GETATTR(vp, &at, cred, procp); + if (!error && toff && verf && verf != at.va_filerev) + error = NFSERR_BAD_COOKIE; + } + if (!error) + error = nfsrv_access(vp, VEXEC, cred, rdonly, procp, 0); + if (error) { + vput(vp); + nfsm_reply(NFSX_POSTOPATTR(v3)); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + VOP_UNLOCK(vp, 0, procp); + MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); +again: + iv.iov_base = rbuf; + iv.iov_len = fullsiz; + io.uio_iov = &iv; + io.uio_iovcnt = 1; + io.uio_offset = (off_t)off; + io.uio_resid = fullsiz; + io.uio_segflg = UIO_SYSSPACE; + io.uio_rw = UIO_READ; + io.uio_procp = (struct proc *)0; + eofflag = 0; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, procp); + if (cookies) { + _FREE((caddr_t)cookies, M_TEMP); + cookies = NULL; + } + error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies); + off = (off_t)io.uio_offset; + /* + * We cannot set the error in the case where there are no cookies + * and no error, only, as FreeBSD. In the scenario the client is + * calling us back being told there were "more" entries on last readdir + * return, and we have no more entries, our VOP_READDIR can give + * cookies = NULL and no error. This is due to a zero size to MALLOC + * returning NULL unlike FreeBSD which returns a pointer. + * With FreeBSD it makes sense if the MALLOC failed and you get in that + * bind. For us, we need something more. Thus, we should make sure we + * had some cookies to return, but no pointer and no error for EPERM case. + * Otherwise, go thru normal processing of sending back the eofflag. This check + * is also legit on first call to the routine by client since . and .. + * should be returned. Make same change to nfsrv_readdirplus. + */ + if ((ncookies != 0) && !cookies && !error) + error = NFSERR_PERM; + + if (v3) { + getret = VOP_GETATTR(vp, &at, cred, procp); + if (!error) + error = getret; + } + VOP_UNLOCK(vp, 0, procp); + if (error) { + vrele(vp); + _FREE((caddr_t)rbuf, M_TEMP); + if (cookies) + _FREE((caddr_t)cookies, M_TEMP); + nfsm_reply(NFSX_POSTOPATTR(v3)); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + if (io.uio_resid) { + siz -= io.uio_resid; + + /* + * If nothing read, return eof + * rpc reply + */ + if (siz == 0) { + vrele(vp); + nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_COOKIEVERF(v3) + + 2 * NFSX_UNSIGNED); + if (v3) { + nfsm_srvpostop_attr(getret, &at); + nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); + txdr_hyper(&at.va_filerev, tl); + tl += 2; + } else + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + *tl++ = nfs_false; + *tl = nfs_true; + FREE((caddr_t)rbuf, M_TEMP); + FREE((caddr_t)cookies, M_TEMP); + return (0); + } + } + + /* + * Check for degenerate cases of nothing useful read. + * If so go try again + */ + cpos = rbuf; + cend = rbuf + siz; + dp = (struct dirent *)cpos; + cookiep = cookies; +#ifdef __FreeBSD__ + /* + * For some reason FreeBSD's ufs_readdir() chooses to back the + * directory offset up to a block boundary, so it is necessary to + * skip over the records that preceed the requested offset. This + * requires the assumption that file offset cookies monotonically + * increase. + */ + while (cpos < cend && ncookies > 0 && + (dp->d_fileno == 0 || ((u_quad_t)(*cookiep)) <= toff)) { +#else + while (dp->d_fileno == 0 && cpos < cend && ncookies > 0) { +#endif + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + if (cpos >= cend || ncookies == 0) { + toff = off; + siz = fullsiz; + goto again; + } + + len = 3 * NFSX_UNSIGNED; /* paranoia, probably can be 0 */ + nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_COOKIEVERF(v3) + siz); + if (v3) { + nfsm_srvpostop_attr(getret, &at); + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + txdr_hyper(&at.va_filerev, tl); + } + mp = mp2 = mb; + bp = bpos; + be = bp + M_TRAILINGSPACE(mp); + + /* Loop through the records and build reply */ + while (cpos < cend && ncookies > 0) { + if (dp->d_fileno != 0) { + nlen = dp->d_namlen; + rem = nfsm_rndup(nlen)-nlen; + len += (4 * NFSX_UNSIGNED + nlen + rem); + if (v3) + len += 2 * NFSX_UNSIGNED; + if (len > cnt) { + eofflag = 0; + break; + } + /* + * Build the directory record xdr from + * the dirent entry. + */ + nfsm_clget; + *tl = nfs_true; + bp += NFSX_UNSIGNED; + if (v3) { + nfsm_clget; + *tl = 0; + bp += NFSX_UNSIGNED; + } + nfsm_clget; + *tl = txdr_unsigned(dp->d_fileno); + bp += NFSX_UNSIGNED; + nfsm_clget; + *tl = txdr_unsigned(nlen); + bp += NFSX_UNSIGNED; + + /* And loop around copying the name */ + xfer = nlen; + cp = dp->d_name; + while (xfer > 0) { + nfsm_clget; + if ((bp+xfer) > be) + tsiz = be-bp; + else + tsiz = xfer; + bcopy(cp, bp, tsiz); + bp += tsiz; + xfer -= tsiz; + if (xfer > 0) + cp += tsiz; + } + /* And null pad to a long boundary */ + for (i = 0; i < rem; i++) + *bp++ = '\0'; + nfsm_clget; + + /* Finish off the record */ + if (v3) { + *tl = 0; + bp += NFSX_UNSIGNED; + nfsm_clget; + } + *tl = txdr_unsigned(*cookiep); + bp += NFSX_UNSIGNED; + } + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + vrele(vp); + nfsm_clget; + *tl = nfs_false; + bp += NFSX_UNSIGNED; + nfsm_clget; + if (eofflag) + *tl = nfs_true; + else + *tl = nfs_false; + bp += NFSX_UNSIGNED; + if (mp != mb) { + if (bp < be) + mp->m_len = bp - mtod(mp, caddr_t); + } else + mp->m_len += bp - bpos; + FREE((caddr_t)rbuf, M_TEMP); + FREE((caddr_t)cookies, M_TEMP); + nfsm_srvdone; +} + +int +nfsrv_readdirplus(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register char *bp, *be; + register struct mbuf *mp; + register struct dirent *dp; + register caddr_t cp; + register u_long *tl; + register long t1; + caddr_t bpos; + struct mbuf *mb, *mb2, *mreq, *mp2; + char *cpos, *cend, *cp2, *rbuf; + struct vnode *vp, *nvp; + struct flrep fl; + nfsfh_t nfh; + fhandle_t *fhp, *nfhp = (fhandle_t *)fl.fl_nfh; + struct uio io; + struct iovec iv; + struct vattr va, at, *vap = &va; + struct nfs_fattr *fp; + int len, nlen, rem, xfer, tsiz, i, error = 0, getret = 1; + int siz, cnt, fullsiz, eofflag, rdonly, cache, dirlen, ncookies = 0; + u_quad_t frev, off, toff, verf; + u_long *cookies = NULL, *cookiep; + + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_dissect(tl, u_long *, 6 * NFSX_UNSIGNED); + fxdr_hyper(tl, &toff); + tl += 2; + fxdr_hyper(tl, &verf); + tl += 2; + siz = fxdr_unsigned(int, *tl++); + cnt = fxdr_unsigned(int, *tl); + off = toff; + siz = ((siz + DIRBLKSIZ - 1) & ~(DIRBLKSIZ - 1)); + xfer = NFS_SRVMAXDATA(nfsd); + if (siz > xfer) + siz = xfer; + fullsiz = siz; + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(NFSX_UNSIGNED); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + error = getret = VOP_GETATTR(vp, &at, cred, procp); + if (!error && toff && verf && verf != at.va_filerev) + error = NFSERR_BAD_COOKIE; + if (!error) { + nqsrv_getl(vp, ND_READ); + error = nfsrv_access(vp, VEXEC, cred, rdonly, procp, 0); + } + if (error) { + vput(vp); + nfsm_reply(NFSX_V3POSTOPATTR); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + VOP_UNLOCK(vp, 0, procp); + MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); +again: + iv.iov_base = rbuf; + iv.iov_len = fullsiz; + io.uio_iov = &iv; + io.uio_iovcnt = 1; + io.uio_offset = (off_t)off; + io.uio_resid = fullsiz; + io.uio_segflg = UIO_SYSSPACE; + io.uio_rw = UIO_READ; + io.uio_procp = (struct proc *)0; + eofflag = 0; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, procp); + if (cookies) { + _FREE((caddr_t)cookies, M_TEMP); + cookies = NULL; + } + error = VOP_READDIR(vp, &io, cred, &eofflag, &ncookies, &cookies); + off = (u_quad_t)io.uio_offset; + getret = VOP_GETATTR(vp, &at, cred, procp); + VOP_UNLOCK(vp, 0, procp); + /* + * See nfsrv_readdir comment above on this + */ + if ((ncookies != 0) && !cookies && !error) + error = NFSERR_PERM; + + if (!error) + error = getret; + if (error) { + vrele(vp); + if (cookies) + _FREE((caddr_t)cookies, M_TEMP); + _FREE((caddr_t)rbuf, M_TEMP); + nfsm_reply(NFSX_V3POSTOPATTR); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + if (io.uio_resid) { + siz -= io.uio_resid; + + /* + * If nothing read, return eof + * rpc reply + */ + if (siz == 0) { + vrele(vp); + nfsm_reply(NFSX_V3POSTOPATTR + NFSX_V3COOKIEVERF + + 2 * NFSX_UNSIGNED); + nfsm_srvpostop_attr(getret, &at); + nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); + txdr_hyper(&at.va_filerev, tl); + tl += 2; + *tl++ = nfs_false; + *tl = nfs_true; + FREE((caddr_t)cookies, M_TEMP); + FREE((caddr_t)rbuf, M_TEMP); + return (0); + } + } + + /* + * Check for degenerate cases of nothing useful read. + * If so go try again + */ + cpos = rbuf; + cend = rbuf + siz; + dp = (struct dirent *)cpos; + cookiep = cookies; +#ifdef __FreeBSD__ + /* + * For some reason FreeBSD's ufs_readdir() chooses to back the + * directory offset up to a block boundary, so it is necessary to + * skip over the records that preceed the requested offset. This + * requires the assumption that file offset cookies monotonically + * increase. + */ + while (cpos < cend && ncookies > 0 && + (dp->d_fileno == 0 || ((u_quad_t)(*cookiep)) <= toff)) { +#else + while (dp->d_fileno == 0 && cpos < cend && ncookies > 0) { +#endif + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + if (cpos >= cend || ncookies == 0) { + toff = off; + siz = fullsiz; + goto again; + } + + /* + * Probe one of the directory entries to see if the filesystem + * supports VGET. + */ + if (VFS_VGET(vp->v_mount, dp->d_fileno, &nvp) == EOPNOTSUPP) { + error = NFSERR_NOTSUPP; + vrele(vp); + _FREE((caddr_t)cookies, M_TEMP); + _FREE((caddr_t)rbuf, M_TEMP); + nfsm_reply(NFSX_V3POSTOPATTR); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + vput(nvp); + + dirlen = len = NFSX_V3POSTOPATTR + NFSX_V3COOKIEVERF + 2 * NFSX_UNSIGNED; + nfsm_reply(cnt); + nfsm_srvpostop_attr(getret, &at); + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + txdr_hyper(&at.va_filerev, tl); + mp = mp2 = mb; + bp = bpos; + be = bp + M_TRAILINGSPACE(mp); + + /* Loop through the records and build reply */ + while (cpos < cend && ncookies > 0) { + if (dp->d_fileno != 0) { + nlen = dp->d_namlen; + rem = nfsm_rndup(nlen)-nlen; + + /* + * For readdir_and_lookup get the vnode using + * the file number. + */ + if (VFS_VGET(vp->v_mount, dp->d_fileno, &nvp)) + goto invalid; + bzero((caddr_t)nfhp, NFSX_V3FH); + nfhp->fh_fsid = + nvp->v_mount->mnt_stat.f_fsid; + if (VFS_VPTOFH(nvp, &nfhp->fh_fid)) { + vput(nvp); + goto invalid; + } + if (VOP_GETATTR(nvp, vap, cred, procp)) { + vput(nvp); + goto invalid; + } + vput(nvp); + + /* + * If either the dircount or maxcount will be + * exceeded, get out now. Both of these lengths + * are calculated conservatively, including all + * XDR overheads. + */ + len += (7 * NFSX_UNSIGNED + nlen + rem + NFSX_V3FH + + NFSX_V3POSTOPATTR); + dirlen += (6 * NFSX_UNSIGNED + nlen + rem); + if (len > cnt || dirlen > fullsiz) { + eofflag = 0; + break; + } + + /* + * Build the directory record xdr from + * the dirent entry. + */ + fp = (struct nfs_fattr *)&fl.fl_fattr; + nfsm_srvfillattr(vap, fp); + fl.fl_fhsize = txdr_unsigned(NFSX_V3FH); + fl.fl_fhok = nfs_true; + fl.fl_postopok = nfs_true; + fl.fl_off.nfsuquad[0] = 0; + fl.fl_off.nfsuquad[1] = txdr_unsigned(*cookiep); + + nfsm_clget; + *tl = nfs_true; + bp += NFSX_UNSIGNED; + nfsm_clget; + *tl = 0; + bp += NFSX_UNSIGNED; + nfsm_clget; + *tl = txdr_unsigned(dp->d_fileno); + bp += NFSX_UNSIGNED; + nfsm_clget; + *tl = txdr_unsigned(nlen); + bp += NFSX_UNSIGNED; + + /* And loop around copying the name */ + xfer = nlen; + cp = dp->d_name; + while (xfer > 0) { + nfsm_clget; + if ((bp + xfer) > be) + tsiz = be - bp; + else + tsiz = xfer; + bcopy(cp, bp, tsiz); + bp += tsiz; + xfer -= tsiz; + if (xfer > 0) + cp += tsiz; + } + /* And null pad to a long boundary */ + for (i = 0; i < rem; i++) + *bp++ = '\0'; + + /* + * Now copy the flrep structure out. + */ + xfer = sizeof (struct flrep); + cp = (caddr_t)&fl; + while (xfer > 0) { + nfsm_clget; + if ((bp + xfer) > be) + tsiz = be - bp; + else + tsiz = xfer; + bcopy(cp, bp, tsiz); + bp += tsiz; + xfer -= tsiz; + if (xfer > 0) + cp += tsiz; + } + } +invalid: + cpos += dp->d_reclen; + dp = (struct dirent *)cpos; + cookiep++; + ncookies--; + } + vrele(vp); + nfsm_clget; + *tl = nfs_false; + bp += NFSX_UNSIGNED; + nfsm_clget; + if (eofflag) + *tl = nfs_true; + else + *tl = nfs_false; + bp += NFSX_UNSIGNED; + if (mp != mb) { + if (bp < be) + mp->m_len = bp - mtod(mp, caddr_t); + } else + mp->m_len += bp - bpos; + FREE((caddr_t)cookies, M_TEMP); + FREE((caddr_t)rbuf, M_TEMP); + nfsm_srvdone; +} + +/* + * nfs commit service + */ +int +nfsrv_commit(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + struct vattr bfor, aft; + struct vnode *vp; + nfsfh_t nfh; + fhandle_t *fhp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, for_ret = 1, aft_ret = 1, cnt, cache; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + u_quad_t frev, off; + +#ifndef nolint + cache = 0; +#endif + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + + /* + * XXX At this time VOP_FSYNC() does not accept offset and byte + * count parameters, so these arguments are useless (someday maybe). + */ + fxdr_hyper(tl, &off); + tl += 2; + cnt = fxdr_unsigned(int, *tl); + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(2 * NFSX_UNSIGNED); + nfsm_srvwcc_data(for_ret, &bfor, aft_ret, &aft); + return (0); + } + for_ret = VOP_GETATTR(vp, &bfor, cred, procp); + error = VOP_FSYNC(vp, cred, MNT_WAIT, procp); + aft_ret = VOP_GETATTR(vp, &aft, cred, procp); + vput(vp); + nfsm_reply(NFSX_V3WCCDATA + NFSX_V3WRITEVERF); + nfsm_srvwcc_data(for_ret, &bfor, aft_ret, &aft); + if (!error) { + nfsm_build(tl, u_long *, NFSX_V3WRITEVERF); + *tl++ = txdr_unsigned(boottime.tv_sec); + *tl = txdr_unsigned(boottime.tv_usec); + } else + return (0); + nfsm_srvdone; +} + +/* + * nfs statfs service + */ +int +nfsrv_statfs(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register struct statfs *sf; + register struct nfs_statfs *sfp; + register u_long *tl; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, getret = 1; + int v3 = (nfsd->nd_flag & ND_NFSV3); + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vnode *vp; + struct vattr at; + nfsfh_t nfh; + fhandle_t *fhp; + struct statfs statfs; + u_quad_t frev, tval; + +#ifndef nolint + cache = 0; +#endif + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(NFSX_UNSIGNED); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + sf = &statfs; + error = VFS_STATFS(vp->v_mount, sf, procp); + getret = VOP_GETATTR(vp, &at, cred, procp); + vput(vp); + nfsm_reply(NFSX_POSTOPATTR(v3) + NFSX_STATFS(v3)); + if (v3) + nfsm_srvpostop_attr(getret, &at); + if (error) + return (0); + nfsm_build(sfp, struct nfs_statfs *, NFSX_STATFS(v3)); + if (v3) { + tval = (u_quad_t)sf->f_blocks; + tval *= (u_quad_t)sf->f_bsize; + txdr_hyper(&tval, &sfp->sf_tbytes); + tval = (u_quad_t)sf->f_bfree; + tval *= (u_quad_t)sf->f_bsize; + txdr_hyper(&tval, &sfp->sf_fbytes); + tval = (u_quad_t)sf->f_bavail; + tval *= (u_quad_t)sf->f_bsize; + txdr_hyper(&tval, &sfp->sf_abytes); + sfp->sf_tfiles.nfsuquad[0] = 0; + sfp->sf_tfiles.nfsuquad[1] = txdr_unsigned(sf->f_files); + sfp->sf_ffiles.nfsuquad[0] = 0; + sfp->sf_ffiles.nfsuquad[1] = txdr_unsigned(sf->f_ffree); + sfp->sf_afiles.nfsuquad[0] = 0; + sfp->sf_afiles.nfsuquad[1] = txdr_unsigned(sf->f_ffree); + sfp->sf_invarsec = 0; + } else { + sfp->sf_tsize = txdr_unsigned(NFS_MAXDGRAMDATA); + sfp->sf_bsize = txdr_unsigned(sf->f_bsize); + sfp->sf_blocks = txdr_unsigned(sf->f_blocks); + sfp->sf_bfree = txdr_unsigned(sf->f_bfree); + sfp->sf_bavail = txdr_unsigned(sf->f_bavail); + } + nfsm_srvdone; +} + +/* + * nfs fsinfo service + */ +int +nfsrv_fsinfo(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register u_long *tl; + register struct nfsv3_fsinfo *sip; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, getret = 1, pref; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vnode *vp; + struct vattr at; + nfsfh_t nfh; + fhandle_t *fhp; + u_quad_t frev; + +#ifndef nolint + cache = 0; +#endif + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(NFSX_UNSIGNED); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + getret = VOP_GETATTR(vp, &at, cred, procp); + vput(vp); + nfsm_reply(NFSX_V3POSTOPATTR + NFSX_V3FSINFO); + nfsm_srvpostop_attr(getret, &at); + nfsm_build(sip, struct nfsv3_fsinfo *, NFSX_V3FSINFO); + + /* + * XXX + * There should be file system VFS OP(s) to get this information. + * For now, assume ufs. + */ + if (slp->ns_so->so_type == SOCK_DGRAM) + pref = NFS_MAXDGRAMDATA; + else + pref = NFS_MAXDATA; + sip->fs_rtmax = txdr_unsigned(NFS_MAXDATA); + sip->fs_rtpref = txdr_unsigned(pref); + sip->fs_rtmult = txdr_unsigned(NFS_FABLKSIZE); + sip->fs_wtmax = txdr_unsigned(NFS_MAXDATA); + sip->fs_wtpref = txdr_unsigned(pref); + sip->fs_wtmult = txdr_unsigned(NFS_FABLKSIZE); + sip->fs_dtpref = txdr_unsigned(pref); + sip->fs_maxfilesize.nfsuquad[0] = 0xffffffff; + sip->fs_maxfilesize.nfsuquad[1] = 0xffffffff; + sip->fs_timedelta.nfsv3_sec = 0; + sip->fs_timedelta.nfsv3_nsec = txdr_unsigned(1); + sip->fs_properties = txdr_unsigned(NFSV3FSINFO_LINK | + NFSV3FSINFO_SYMLINK | NFSV3FSINFO_HOMOGENEOUS | + NFSV3FSINFO_CANSETTIME); + nfsm_srvdone; +} + +/* + * nfs pathconf service + */ +int +nfsrv_pathconf(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep, *md = nfsd->nd_md; + struct mbuf *nam = nfsd->nd_nam; + caddr_t dpos = nfsd->nd_dpos; + struct ucred *cred = &nfsd->nd_cr; + register u_long *tl; + register struct nfsv3_pathconf *pc; + register long t1; + caddr_t bpos; + int error = 0, rdonly, cache, getret = 1, linkmax, namemax; + int chownres, notrunc; + char *cp2; + struct mbuf *mb, *mb2, *mreq; + struct vnode *vp; + struct vattr at; + nfsfh_t nfh; + fhandle_t *fhp; + u_quad_t frev; + +#ifndef nolint + cache = 0; +#endif + fhp = &nfh.fh_generic; + nfsm_srvmtofh(fhp); + if ((error = nfsrv_fhtovp(fhp, 1, &vp, cred, slp, nam, + &rdonly, (nfsd->nd_flag & ND_KERBAUTH), TRUE))) { + nfsm_reply(NFSX_UNSIGNED); + nfsm_srvpostop_attr(getret, &at); + return (0); + } + error = VOP_PATHCONF(vp, _PC_LINK_MAX, &linkmax); + if (!error) + error = VOP_PATHCONF(vp, _PC_NAME_MAX, &namemax); + if (!error) + error = VOP_PATHCONF(vp, _PC_CHOWN_RESTRICTED, &chownres); + if (!error) + error = VOP_PATHCONF(vp, _PC_NO_TRUNC, ¬runc); + getret = VOP_GETATTR(vp, &at, cred, procp); + vput(vp); + nfsm_reply(NFSX_V3POSTOPATTR + NFSX_V3PATHCONF); + nfsm_srvpostop_attr(getret, &at); + if (error) + return (0); + nfsm_build(pc, struct nfsv3_pathconf *, NFSX_V3PATHCONF); + + pc->pc_linkmax = txdr_unsigned(linkmax); + pc->pc_namemax = txdr_unsigned(namemax); + pc->pc_notrunc = txdr_unsigned(notrunc); + pc->pc_chownrestricted = txdr_unsigned(chownres); + + /* + * These should probably be supported by VOP_PATHCONF(), but + * until msdosfs is exportable (why would you want to?), the + * Unix defaults should be ok. + */ + pc->pc_caseinsensitive = nfs_false; + pc->pc_casepreserving = nfs_true; + nfsm_srvdone; +} + +/* + * Null operation, used by clients to ping server + */ +/* ARGSUSED */ +int +nfsrv_null(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep; + caddr_t bpos; + int error = NFSERR_RETVOID, cache; + struct mbuf *mb, *mreq; + u_quad_t frev; + +#ifndef nolint + cache = 0; +#endif + nfsm_reply(0); + return (0); +} + +/* + * No operation, used for obsolete procedures + */ +/* ARGSUSED */ +int +nfsrv_noop(nfsd, slp, procp, mrq) + struct nfsrv_descript *nfsd; + struct nfssvc_sock *slp; + struct proc *procp; + struct mbuf **mrq; +{ + struct mbuf *mrep = nfsd->nd_mrep; + caddr_t bpos; + int error, cache; + struct mbuf *mb, *mreq; + u_quad_t frev; + +#ifndef nolint + cache = 0; +#endif + if (nfsd->nd_repstat) + error = nfsd->nd_repstat; + else + error = EPROCUNAVAIL; + nfsm_reply(0); + return (0); +} + +/* + * Perform access checking for vnodes obtained from file handles that would + * refer to files already opened by a Unix client. You cannot just use + * vn_writechk() and VOP_ACCESS() for two reasons. + * 1 - You must check for exported rdonly as well as MNT_RDONLY for the write case + * 2 - The owner is to be given access irrespective of mode bits so that + * processes that chmod after opening a file don't break. I don't like + * this because it opens a security hole, but since the nfs server opens + * a security hole the size of a barn door anyhow, what the heck. + + * The exception to rule 2 is EPERM. If a file is IMMUTABLE, VOP_ACCESS() + * will return EPERM instead of EACCESS. EPERM is always an error. + */ + +static int +nfsrv_access(vp, flags, cred, rdonly, p, override) + register struct vnode *vp; + int flags; + register struct ucred *cred; + int rdonly; + struct proc *p; + int override; +{ + struct vattr vattr; + int error; + if (flags & VWRITE) { + /* Just vn_writechk() changed to check rdonly */ + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket or a block or character + * device resident on the file system. + */ + if (rdonly || (vp->v_mount->mnt_flag & MNT_RDONLY)) { + switch (vp->v_type) { + case VREG: case VDIR: case VLNK: case VCPLX: + return (EROFS); + } + } + /* + * If there's shared text associated with + * the inode, we can't allow writing. + */ + if (vp->v_flag & VTEXT) + return (ETXTBSY); + } + if ((error = VOP_GETATTR(vp, &vattr, cred, p))) + return (error); + error = VOP_ACCESS(vp, flags, cred, p); + /* + * Allow certain operations for the owner (reads and writes + * on files that are already open). Picking up from FreeBSD. + */ + if (override && error == EACCES && cred->cr_uid == vattr.va_uid) + error = 0; + return error; +} +#endif /* NFS_NOSERVER */ + diff --git a/bsd/nfs/nfs_socket.c b/bsd/nfs/nfs_socket.c new file mode 100644 index 000000000..22d5a17ba --- /dev/null +++ b/bsd/nfs/nfs_socket.c @@ -0,0 +1,2628 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1991, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 + * FreeBSD-Id: nfs_socket.c,v 1.30 1997/10/28 15:59:07 bde Exp $ + */ + +/* + * Socket operations for use by nfs + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TRUE 1 +#define FALSE 0 + +/* + * Estimate rto for an nfs rpc sent via. an unreliable datagram. + * Use the mean and mean deviation of rtt for the appropriate type of rpc + * for the frequent rpcs and a default for the others. + * The justification for doing "other" this way is that these rpcs + * happen so infrequently that timer est. would probably be stale. + * Also, since many of these rpcs are + * non-idempotent, a conservative timeout is desired. + * getattr, lookup - A+2D + * read, write - A+4D + * other - nm_timeo + */ +#define NFS_RTO(n, t) \ + ((t) == 0 ? (n)->nm_timeo : \ + ((t) < 3 ? \ + (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ + ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) +#define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] +#define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] +/* + * External data, mostly RPC constants in XDR form + */ +extern u_long rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, rpc_auth_unix, + rpc_msgaccepted, rpc_call, rpc_autherr, + rpc_auth_kerb; +extern u_long nfs_prog, nqnfs_prog; +extern time_t nqnfsstarttime; +extern struct nfsstats nfsstats; +extern int nfsv3_procid[NFS_NPROCS]; +extern int nfs_ticks; + +/* + * Defines which timer to use for the procnum. + * 0 - default + * 1 - getattr + * 2 - lookup + * 3 - read + * 4 - write + */ +static int proct[NFS_NPROCS] = { + 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, + 0, 0, 0, +}; + +/* + * There is a congestion window for outstanding rpcs maintained per mount + * point. The cwnd size is adjusted in roughly the way that: + * Van Jacobson, Congestion avoidance and Control, In "Proceedings of + * SIGCOMM '88". ACM, August 1988. + * describes for TCP. The cwnd size is chopped in half on a retransmit timeout + * and incremented by 1/cwnd when each rpc reply is received and a full cwnd + * of rpcs is in progress. + * (The sent count and cwnd are scaled for integer arith.) + * Variants of "slow start" were tried and were found to be too much of a + * performance hit (ave. rtt 3 times larger), + * I suspect due to the large rtt that nfs rpcs have. + */ +#define NFS_CWNDSCALE 256 +#define NFS_MAXCWND (NFS_CWNDSCALE * 32) +static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; +int nfsrtton = 0; +struct nfsrtt nfsrtt; + +static int nfs_msg __P((struct proc *,char *,char *)); +static int nfs_rcvlock __P((struct nfsreq *)); +static void nfs_rcvunlock __P((int *flagp)); +static int nfs_receive __P((struct nfsreq *rep, struct mbuf **aname, + struct mbuf **mp)); +static int nfs_reconnect __P((struct nfsreq *rep)); +#ifndef NFS_NOSERVER +static int nfsrv_getstream __P((struct nfssvc_sock *,int)); + +int (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *nd, + struct nfssvc_sock *slp, + struct proc *procp, + struct mbuf **mreqp)) = { + nfsrv_null, + nfsrv_getattr, + nfsrv_setattr, + nfsrv_lookup, + nfsrv3_access, + nfsrv_readlink, + nfsrv_read, + nfsrv_write, + nfsrv_create, + nfsrv_mkdir, + nfsrv_symlink, + nfsrv_mknod, + nfsrv_remove, + nfsrv_rmdir, + nfsrv_rename, + nfsrv_link, + nfsrv_readdir, + nfsrv_readdirplus, + nfsrv_statfs, + nfsrv_fsinfo, + nfsrv_pathconf, + nfsrv_commit, + nqnfsrv_getlease, + nqnfsrv_vacated, + nfsrv_noop, + nfsrv_noop +}; +#endif /* NFS_NOSERVER */ + +#if NFSDIAG +int nfstraceindx = 0; +struct nfstracerec nfstracebuf[NFSTBUFSIZ] = {{0,0,0,0}}; + +#define NFSTRACESUSPENDERS +#ifdef NFSTRACESUSPENDERS +uint nfstracemask = 0xfff00200; +int nfstracexid = -1; +uint onfstracemask = 0; +int nfstracesuspend = -1; +#define NFSTRACE_SUSPEND \ + { \ + if (nfstracemask) { \ + onfstracemask = nfstracemask; \ + nfstracemask = 0; \ + } \ + } +#define NFSTRACE_RESUME \ + { \ + nfstracesuspend = -1; \ + if (!nfstracemask) \ + nfstracemask = onfstracemask; \ + } +#define NFSTRACE_STARTSUSPENDCOUNTDOWN \ + { \ + nfstracesuspend = (nfstraceindx+100) % NFSTBUFSIZ; \ + } +#define NFSTRACE_SUSPENDING (nfstracesuspend != -1) +#define NFSTRACE_SUSPENSEOVER \ + (nfstracesuspend > 100 ? \ + (nfstraceindx >= nfstracesuspend || \ + nfstraceindx < nfstracesuspend - 100) : \ + (nfstraceindx >= nfstracesuspend && \ + nfstraceindx < nfstracesuspend + 8192 - 100)) +#else +uint nfstracemask = 0; +#endif /* NFSTRACESUSPENDERS */ + +int nfsprnttimo = 1; + +int nfsodata[1024]; +int nfsoprocnum, nfsolen; +int nfsbt[32], nfsbtlen; + +#if defined(__ppc__) +int +backtrace(int *where, int size) +{ + int register sp, *fp, numsaved; + + __asm__ volatile("mr %0,r1" : "=r" (sp)); + + fp = (int *)*((int *)sp); + size /= sizeof(int); + for (numsaved = 0; numsaved < size; numsaved++) { + *where++ = fp[2]; + if ((int)fp <= 0) + break; + fp = (int *)*fp; + } + return (numsaved); +} +#elif defined(__i386__) +int +backtrace() +{ + return (0); /* Till someone implements a real routine */ +} +#else +#error architecture not implemented. +#endif + +void +nfsdup(struct nfsreq *rep) +{ + int *ip, i, first = 1, end; + char *s, b[240]; + struct mbuf *mb; + + if ((nfs_debug & NFS_DEBUG_DUP) == 0) + return; + /* last mbuf in chain will be nfs content */ + for (mb = rep->r_mreq; mb->m_next; mb = mb->m_next) + ; + if (rep->r_procnum == nfsoprocnum && mb->m_len == nfsolen && + !bcmp((caddr_t)nfsodata, mb->m_data, nfsolen)) { + s = b + sprintf(b, "nfsdup x=%x p=%d h=", rep->r_xid, + rep->r_procnum); + end = (int)(VTONFS(rep->r_vp)->n_fhp); + ip = (int *)(end & ~3); + end += VTONFS(rep->r_vp)->n_fhsize; + while ((int)ip < end) { + i = *ip++; + if (first) { /* avoid leading zeroes */ + if (i == 0) + continue; + first = 0; + s += sprintf(s, "%x", i); + } else + s += sprintf(s, "%08x", i); + } + if (first) + sprintf(s, "%x", 0); + else /* eliminate trailing zeroes */ + while (*--s == '0') + *s = 0; + /* + * set a breakpoint here and you can view the + * current backtrace and the one saved in nfsbt + */ + kprintf("%s\n", b); + } + nfsoprocnum = rep->r_procnum; + nfsolen = mb->m_len; + bcopy(mb->m_data, (caddr_t)nfsodata, mb->m_len); + nfsbtlen = backtrace(&nfsbt, sizeof(nfsbt)); +} +#endif /* NFSDIAG */ + +/* + * Initialize sockets and congestion for a new NFS connection. + * We do not free the sockaddr if error. + */ +int +nfs_connect(nmp, rep) + register struct nfsmount *nmp; + struct nfsreq *rep; +{ + register struct socket *so; + int s, error, rcvreserve, sndreserve; + struct sockaddr *saddr; + struct sockaddr_in sin; + u_short tport; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + nmp->nm_so = (struct socket *)0; + saddr = mtod(nmp->nm_nam, struct sockaddr *); + error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype, + nmp->nm_soproto); + if (error) { + goto bad; + } + so = nmp->nm_so; + nmp->nm_soflags = so->so_proto->pr_flags; + + /* + * Some servers require that the client port be a reserved port number. + */ + if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { + sin.sin_len = sizeof (struct sockaddr_in); + sin.sin_family = AF_INET; + sin.sin_addr.s_addr = INADDR_ANY; + tport = IPPORT_RESERVED - 1; + sin.sin_port = htons(tport); + + while ((error = sobind(so, (struct sockaddr *) &sin) == EADDRINUSE) && + (--tport > IPPORT_RESERVED / 2)) + sin.sin_port = htons(tport); + if (error) { + goto bad; + } + } + + /* + * Protocols that do not require connections may be optionally left + * unconnected for servers that reply from a port other than NFS_PORT. + */ + if (nmp->nm_flag & NFSMNT_NOCONN) { + if (nmp->nm_soflags & PR_CONNREQUIRED) { + error = ENOTCONN; + goto bad; + } + } else { + error = soconnect(so, mtod(nmp->nm_nam, struct sockaddr *)); + if (error) { + goto bad; + } + + /* + * Wait for the connection to complete. Cribbed from the + * connect system call but with the wait timing out so + * that interruptible mounts don't hang here for a long time. + */ + s = splnet(); + while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { + (void) tsleep((caddr_t)&so->so_timeo, PSOCK, + "nfscon", 2 * hz); + if ((so->so_state & SS_ISCONNECTING) && + so->so_error == 0 && rep && + (error = nfs_sigintr(nmp, rep, rep->r_procp))) { + so->so_state &= ~SS_ISCONNECTING; + splx(s); + goto bad; + } + } + if (so->so_error) { + error = so->so_error; + so->so_error = 0; + splx(s); + goto bad; + } + splx(s); + } + if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { + so->so_rcv.sb_timeo = (5 * hz); + so->so_snd.sb_timeo = (5 * hz); + } else { + so->so_rcv.sb_timeo = 0; + so->so_snd.sb_timeo = 0; + } + if (nmp->nm_sotype == SOCK_DGRAM) { + sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; + rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; + } else if (nmp->nm_sotype == SOCK_SEQPACKET) { + sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; + rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; + } else { + if (nmp->nm_sotype != SOCK_STREAM) + panic("nfscon sotype"); + + if (so->so_proto->pr_flags & PR_CONNREQUIRED) { + struct sockopt sopt; + int val; + + bzero(&sopt, sizeof sopt); + sopt.sopt_level = SOL_SOCKET; + sopt.sopt_name = SO_KEEPALIVE; + sopt.sopt_val = &val; + sopt.sopt_valsize = sizeof val; + val = 1; + sosetopt(so, &sopt); + } + if (so->so_proto->pr_protocol == IPPROTO_TCP) { + struct sockopt sopt; + int val; + + bzero(&sopt, sizeof sopt); + sopt.sopt_level = IPPROTO_TCP; + sopt.sopt_name = TCP_NODELAY; + sopt.sopt_val = &val; + sopt.sopt_valsize = sizeof val; + val = 1; + sosetopt(so, &sopt); + } + + sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) + * 2; + rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) + * 2; + } + + error = soreserve(so, sndreserve, rcvreserve); + if (error) { + goto bad; + } + so->so_rcv.sb_flags |= SB_NOINTR; + so->so_snd.sb_flags |= SB_NOINTR; + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + /* Initialize other non-zero congestion variables */ + nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = + nmp->nm_srtt[3] = (NFS_TIMEO << 3); + nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = + nmp->nm_sdrtt[3] = 0; + nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ + nmp->nm_sent = 0; + NFSTRACE4(NFSTRC_CWND_INIT, nmp, nmp->nm_flag, nmp->nm_soflags, + nmp->nm_cwnd); + nmp->nm_timeouts = 0; + return (0); + +bad: + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + nfs_disconnect(nmp); + return (error); +} + +/* + * Reconnect routine: + * Called when a connection is broken on a reliable protocol. + * - clean up the old socket + * - nfs_connect() again + * - set R_MUSTRESEND for all outstanding requests on mount point + * If this fails the mount point is DEAD! + * nb: Must be called with the nfs_sndlock() set on the mount point. + */ +static int +nfs_reconnect(rep) + register struct nfsreq *rep; +{ + register struct nfsreq *rp; + register struct nfsmount *nmp = rep->r_nmp; + int error; + + nfs_disconnect(nmp); + while ((error = nfs_connect(nmp, rep))) { + if (error == EINTR || error == ERESTART) + return (EINTR); + (void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0); + } + + NFS_DPF(DUP, ("nfs_reconnect RESEND\n")); + /* + * Loop through outstanding request list and fix up all requests + * on old socket. + */ + for (rp = nfs_reqq.tqh_first; rp != 0; rp = rp->r_chain.tqe_next) { + if (rp->r_nmp == nmp) + rp->r_flags |= R_MUSTRESEND; + } + return (0); +} + +/* + * NFS disconnect. Clean up and unlink. + */ +void +nfs_disconnect(nmp) + register struct nfsmount *nmp; +{ + register struct socket *so; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if (nmp->nm_so) { + so = nmp->nm_so; + nmp->nm_so = (struct socket *)0; + soshutdown(so, 2); + soclose(so); + } + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); +} + +/* + * This is the nfs send routine. For connection based socket types, it + * must be called with an nfs_sndlock() on the socket. + * "rep == NULL" indicates that it has been called from a server. + * For the client side: + * - return EINTR if the RPC is terminated, 0 otherwise + * - set R_MUSTRESEND if the send fails for any reason + * - do any cleanup required by recoverable socket errors (???) + * For the server side: + * - return EINTR or ERESTART if interrupted by a signal + * - return EPIPE if a connection is lost for connection based sockets (TCP...) + * - do any cleanup required by recoverable socket errors (???) + */ +int +nfs_send(so, nam, top, rep) + register struct socket *so; + struct mbuf *nam; + register struct mbuf *top; + struct nfsreq *rep; +{ + struct sockaddr *sendnam; + int error, soflags, flags; + int xidqueued = 0; + struct nfsreq *rp; + char savenametolog[MNAMELEN]; + + if (rep) { + if (rep->r_flags & R_SOFTTERM) { + m_freem(top); + return (EINTR); + } + if ((so = rep->r_nmp->nm_so) == NULL) { + rep->r_flags |= R_MUSTRESEND; + m_freem(top); + return (0); + } + rep->r_flags &= ~R_MUSTRESEND; + soflags = rep->r_nmp->nm_soflags; + for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next) + if (rp == rep) + break; + if (rp) + xidqueued = rp->r_xid; + } else + soflags = so->so_proto->pr_flags; + if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED) || + (nam == 0)) + sendnam = (struct sockaddr *)0; + else + sendnam = mtod(nam, struct sockaddr *); + + if (so->so_type == SOCK_SEQPACKET) + flags = MSG_EOR; + else + flags = 0; + +#if NFSDIAG + if (rep) + nfsdup(rep); +#endif + /* + * Save the name here in case mount point goes away when we switch + * funnels. The name is using local stack and is large, but don't + * want to block if we malloc. + */ + if (rep) + strncpy(savenametolog, + rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname, + MNAMELEN); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = sosend(so, sendnam, (struct uio *)0, top, + (struct mbuf *)0, flags); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + if (error) { + if (rep) { + if (xidqueued) { + for (rp = nfs_reqq.tqh_first; rp; + rp = rp->r_chain.tqe_next) + if (rp == rep && rp->r_xid == xidqueued) + break; + if (!rp) + panic("nfs_send: error %d xid %x gone", + error, xidqueued); + } + log(LOG_INFO, "nfs send error %d for server %s\n", + error, savenametolog); + /* + * Deal with errors for the client side. + */ + if (rep->r_flags & R_SOFTTERM) + error = EINTR; + else { + rep->r_flags |= R_MUSTRESEND; + NFS_DPF(DUP, + ("nfs_send RESEND error=%d\n", error)); + } + } else + log(LOG_INFO, "nfsd send error %d\n", error); + + /* + * Handle any recoverable (soft) socket errors here. (???) + */ + if (error != EINTR && error != ERESTART && + error != EWOULDBLOCK && error != EPIPE) + error = 0; + } + return (error); +} + +/* + * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all + * done by soreceive(), but for SOCK_STREAM we must deal with the Record + * Mark and consolidate the data into a new mbuf list. + * nb: Sometimes TCP passes the data up to soreceive() in long lists of + * small mbufs. + * For SOCK_STREAM we must be very careful to read an entire record once + * we have read any of it, even if the system call has been interrupted. + */ +static int +nfs_receive(rep, aname, mp) + register struct nfsreq *rep; + struct mbuf **aname; + struct mbuf **mp; +{ + register struct socket *so; + struct uio auio; + struct iovec aio; + register struct mbuf *m; + struct mbuf *control; + u_long len; + struct sockaddr **getnam; + struct sockaddr *tmp_nam; + struct mbuf *mhck; + struct sockaddr_in *sin; + int error, sotype, rcvflg; + struct proc *p = current_proc(); /* XXX */ + + /* + * Set up arguments for soreceive() + */ + *mp = (struct mbuf *)0; + *aname = (struct mbuf *)0; + sotype = rep->r_nmp->nm_sotype; + + /* + * For reliable protocols, lock against other senders/receivers + * in case a reconnect is necessary. + * For SOCK_STREAM, first get the Record Mark to find out how much + * more there is to get. + * We must lock the socket against other receivers + * until we have an entire rpc request/reply. + */ + if (sotype != SOCK_DGRAM) { + error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); + if (error) + return (error); +tryagain: + /* + * Check for fatal errors and resending request. + */ + /* + * Ugh: If a reconnect attempt just happened, nm_so + * would have changed. NULL indicates a failed + * attempt that has essentially shut down this + * mount point. + */ + if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { + nfs_sndunlock(&rep->r_nmp->nm_flag); + return (EINTR); + } + so = rep->r_nmp->nm_so; + if (!so) { + error = nfs_reconnect(rep); + if (error) { + nfs_sndunlock(&rep->r_nmp->nm_flag); + return (error); + } + goto tryagain; + } + while (rep->r_flags & R_MUSTRESEND) { + m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT); + nfsstats.rpcretries++; + NFS_DPF(DUP, + ("nfs_receive RESEND %s\n", + rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname)); + error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); + /* + * we also hold rcv lock so rep is still + * legit this point + */ + if (error) { + if (error == EINTR || error == ERESTART || + (error = nfs_reconnect(rep))) { + nfs_sndunlock(&rep->r_nmp->nm_flag); + return (error); + } + goto tryagain; + } + } + nfs_sndunlock(&rep->r_nmp->nm_flag); + if (sotype == SOCK_STREAM) { + aio.iov_base = (caddr_t) &len; + aio.iov_len = sizeof(u_long); + auio.uio_iov = &aio; + auio.uio_iovcnt = 1; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_READ; + auio.uio_offset = 0; + auio.uio_resid = sizeof(u_long); + auio.uio_procp = p; + do { + rcvflg = MSG_WAITALL; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + error = soreceive(so, (struct sockaddr **)0, &auio, + (struct mbuf **)0, (struct mbuf **)0, &rcvflg); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + if (!rep->r_nmp) /* if unmounted then bailout */ + goto shutout; + if (error == EWOULDBLOCK && rep) { + if (rep->r_flags & R_SOFTTERM) + return (EINTR); + } + } while (error == EWOULDBLOCK); + if (!error && auio.uio_resid > 0) { + log(LOG_INFO, + "short receive (%d/%d) from nfs server %s\n", + sizeof(u_long) - auio.uio_resid, + sizeof(u_long), + rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); + error = EPIPE; + } + if (error) + goto errout; + len = ntohl(len) & ~0x80000000; + /* + * This is SERIOUS! We are out of sync with the sender + * and forcing a disconnect/reconnect is all I can do. + */ + if (len > NFS_MAXPACKET) { + log(LOG_ERR, "%s (%d) from nfs server %s\n", + "impossible packet length", + len, + rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); + error = EFBIG; + goto errout; + } + auio.uio_resid = len; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + do { + rcvflg = MSG_WAITALL; + error = soreceive(so, (struct sockaddr **)0, + &auio, mp, (struct mbuf **)0, &rcvflg); + if (!rep->r_nmp) /* if unmounted then bailout */ { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto shutout; + } + } while (error == EWOULDBLOCK || error == EINTR || + error == ERESTART); + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + if (!error && auio.uio_resid > 0) { + log(LOG_INFO, + "short receive (%d/%d) from nfs server %s\n", + len - auio.uio_resid, len, + rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); + error = EPIPE; + } + } else { + /* + * NB: Since uio_resid is big, MSG_WAITALL is ignored + * and soreceive() will return when it has either a + * control msg or a data msg. + * We have no use for control msg., but must grab them + * and then throw them away so we know what is going + * on. + */ + auio.uio_resid = len = 100000000; /* Anything Big */ + auio.uio_procp = p; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + do { + rcvflg = 0; + error = soreceive(so, (struct sockaddr **)0, + &auio, mp, &control, &rcvflg); + if (!rep->r_nmp) /* if unmounted then bailout */ { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto shutout; + } + if (control) + m_freem(control); + if (error == EWOULDBLOCK && rep) { + if (rep->r_flags & R_SOFTTERM) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (EINTR); + } + } + } while (error == EWOULDBLOCK || + (!error && *mp == NULL && control)); + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + if ((rcvflg & MSG_EOR) == 0) + printf("Egad!!\n"); + if (!error && *mp == NULL) + error = EPIPE; + len -= auio.uio_resid; + } +errout: + if (error && error != EINTR && error != ERESTART) { + m_freem(*mp); + *mp = (struct mbuf *)0; + if (error != EPIPE) + log(LOG_INFO, + "receive error %d from nfs server %s\n", + error, + rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); + error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); + if (!error) + error = nfs_reconnect(rep); + if (!error) + goto tryagain; + } + } else { + if ((so = rep->r_nmp->nm_so) == NULL) + return (EACCES); + if (so->so_state & SS_ISCONNECTED) + getnam = (struct sockaddr **)0; + else + getnam = &tmp_nam;; + auio.uio_resid = len = 1000000; + auio.uio_procp = p; + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + do { + rcvflg = 0; + error = soreceive(so, getnam, &auio, mp, + (struct mbuf **)0, &rcvflg); + + if ((getnam) && (*getnam)) { + MGET(mhck, M_WAIT, MT_SONAME); + mhck->m_len = (*getnam)->sa_len; + sin = mtod(mhck, struct sockaddr_in *); + bcopy(*getnam, sin, sizeof(struct sockaddr_in)); + mhck->m_hdr.mh_len = sizeof(struct sockaddr_in); + FREE(*getnam, M_SONAME); + *aname = mhck; + } + if (!rep->r_nmp) /* if unmounted then bailout */ { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + goto shutout; + } + + if (error == EWOULDBLOCK && + (rep->r_flags & R_SOFTTERM)) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (EINTR); + } + } while (error == EWOULDBLOCK); + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + len -= auio.uio_resid; + } +shutout: + if (error) { + m_freem(*mp); + *mp = (struct mbuf *)0; + } + return (error); +} + +/* + * Implement receipt of reply on a socket. + * We must search through the list of received datagrams matching them + * with outstanding requests using the xid, until ours is found. + */ +/* ARGSUSED */ +int +nfs_reply(myrep) + struct nfsreq *myrep; +{ + register struct nfsreq *rep; + register struct nfsmount *nmp = myrep->r_nmp; + register long t1; + struct mbuf *mrep, *md; + struct mbuf *nam; + u_long rxid, *tl; + caddr_t dpos, cp2; + int error; + + /* + * Loop around until we get our own reply + */ + for (;;) { + /* + * Lock against other receivers so that I don't get stuck in + * sbwait() after someone else has received my reply for me. + * Also necessary for connection based protocols to avoid + * race conditions during a reconnect. + * If nfs_rcvlock() returns EALREADY, that means that + * the reply has already been recieved by another + * process and we can return immediately. In this + * case, the lock is not taken to avoid races with + * other processes. + */ + error = nfs_rcvlock(myrep); + if (error == EALREADY) + return (0); + if (error) + return (error); + + /* + * This is being checked after nfs_receive, but + * it doesn't hurt to check prior, since nfs_receive + * will dereference r_nmp also. Bullet-proofing code + * since changing funnels since the request to the + * receive can leave us vulnerable for kernel to unmount + * us. + */ + if (!myrep->r_nmp) { + NFSTRACE4(NFSTRC_ECONN, myrep->r_xid, myrep, nmp, 1); + return (ECONNABORTED); + } + /* + * If we slept after putting bits otw, then reply may have + * arrived. In which case returning is required, or we + * would hang trying to nfs_receive an already received reply. + */ + if (myrep->r_mrep != NULL) { + nfs_rcvunlock(&nmp->nm_flag); + NFSTRACE4(NFSTRC_RCVALREADY, myrep->r_xid, myrep, + myrep->r_nmp, 2); + return (0); + } + /* + * Get the next Rpc reply off the socket + */ + error = nfs_receive(myrep, &nam, &mrep); + /* + * Bailout asap if nfsmount struct gone (unmounted) + */ + if (!myrep->r_nmp) { + NFSTRACE4(NFSTRC_ECONN, myrep->r_xid, myrep, nmp, 2); + return (ECONNABORTED); + } + if (error) { + NFSTRACE4(NFSTRC_RCVERR, myrep->r_xid, myrep, nmp, + error); + nfs_rcvunlock(&nmp->nm_flag); + + /* + * Ignore routing errors on connectionless protocols?? + */ + if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { + nmp->nm_so->so_error = 0; + if (myrep->r_flags & R_GETONEREP) + return (0); + continue; + } + return (error); + } + if (nam) + m_freem(nam); + + /* + * We assume all is fine, but if we did not have an error + * and mrep is 0, better not dereference it. nfs_receieve + * calls soreceive which carefully sets error=0 when it got + * errors on sbwait (tsleep). In most cases, I assume that's + * so we could go back again. In tcp case, EPIPE is returned. + * In udp, case nfs_receive gets back here with no error and no + * mrep. Is the right fix to have soreceive check for process + * aborted after sbwait and return something non-zero? Should + * nfs_receive give an EPIPE? Too risky to play with those + * two this late in game for a shutdown problem. Instead, + * just check here and get out. (ekn) + */ + if (!mrep) { + NFSTRACE4(NFSTRC_ECONN, myrep->r_xid, myrep, nmp, 3); + return (ECONNABORTED); /* sounds good */ + } + + /* + * Get the xid and check that it is an rpc reply + */ + md = mrep; + dpos = mtod(md, caddr_t); + nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); + rxid = *tl++; + if (*tl != rpc_reply) { +#ifndef NFS_NOSERVER + if (nmp->nm_flag & NFSMNT_NQNFS) { + if (nqnfs_callback(nmp, mrep, md, dpos)) + nfsstats.rpcinvalid++; + } else { + nfsstats.rpcinvalid++; + m_freem(mrep); + } +#else + nfsstats.rpcinvalid++; + m_freem(mrep); +#endif +nfsmout: + if (nmp->nm_flag & NFSMNT_RCVLOCK) + nfs_rcvunlock(&nmp->nm_flag); + if (myrep->r_flags & R_GETONEREP) + return (0); /* this path used by NQNFS */ + continue; + } + + /* + * Loop through the request list to match up the reply + * Iff no match, just drop the datagram + */ + for (rep = nfs_reqq.tqh_first; rep != 0; + rep = rep->r_chain.tqe_next) { + if (rep->r_mrep == NULL && rxid == rep->r_xid) { + /* Found it.. */ + rep->r_mrep = mrep; + rep->r_md = md; + rep->r_dpos = dpos; + if (nfsrtton) { + struct rttl *rt; + + rt = &nfsrtt.rttl[nfsrtt.pos]; + rt->proc = rep->r_procnum; + rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]); + rt->sent = nmp->nm_sent; + rt->cwnd = nmp->nm_cwnd; + if (proct[rep->r_procnum] == 0) + panic("nfs_reply: proct[%d] is zero", rep->r_procnum); + rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; + rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; + rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; + rt->tstamp = time; + if (rep->r_flags & R_TIMING) + rt->rtt = rep->r_rtt; + else + rt->rtt = 1000000; + nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; + } + /* + * Update congestion window. + * Do the additive increase of + * one rpc/rtt. + */ + NFSTRACE4(NFSTRC_CWND_REPLY, rep->r_xid, rep, + nmp->nm_sent, nmp->nm_cwnd); + if (nmp->nm_cwnd <= nmp->nm_sent) { + nmp->nm_cwnd += + (NFS_CWNDSCALE * NFS_CWNDSCALE + + (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; + if (nmp->nm_cwnd > NFS_MAXCWND) + nmp->nm_cwnd = NFS_MAXCWND; + } + if (!(rep->r_flags & R_SENT)) + printf("nfs_reply: unsent xid=%x", + rep->r_xid); + rep->r_flags &= ~R_SENT; + nmp->nm_sent -= NFS_CWNDSCALE; + /* + * Update rtt using a gain of 0.125 on the mean + * and a gain of 0.25 on the deviation. + */ + if (rep->r_flags & R_TIMING) { + /* + * Since the timer resolution of + * NFS_HZ is so course, it can often + * result in r_rtt == 0. Since + * r_rtt == N means that the actual + * rtt is between N+dt and N+2-dt ticks, + * add 1. + */ + if (proct[rep->r_procnum] == 0) + panic("nfs_reply: proct[%d] is zero", rep->r_procnum); + t1 = rep->r_rtt + 1; + t1 -= (NFS_SRTT(rep) >> 3); + NFS_SRTT(rep) += t1; + if (t1 < 0) + t1 = -t1; + t1 -= (NFS_SDRTT(rep) >> 2); + NFS_SDRTT(rep) += t1; + } + nmp->nm_timeouts = 0; + break; + } + } + nfs_rcvunlock(&nmp->nm_flag); + /* + * If not matched to a request, drop it. + * If it's mine, get out. + */ + if (rep == 0) { + nfsstats.rpcunexpected++; + m_freem(mrep); + } else if (rep == myrep) { + if (rep->r_mrep == NULL) + panic("nfs_reply: nil r_mrep"); + return (0); + } + NFSTRACE4(NFSTRC_NOTMINE, myrep->r_xid, myrep, rep, + rep ? rep->r_xid : myrep->r_flags); + if (myrep->r_flags & R_GETONEREP) + return (0); /* this path used by NQNFS */ + } +} + +/* + * nfs_request - goes something like this + * - fill in request struct + * - links it into list + * - calls nfs_send() for first transmit + * - calls nfs_receive() to get reply + * - break down rpc header and return with nfs reply pointed to + * by mrep or error + * nb: always frees up mreq mbuf list + */ +int +nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp) + struct vnode *vp; + struct mbuf *mrest; + int procnum; + struct proc *procp; + struct ucred *cred; + struct mbuf **mrp; + struct mbuf **mdp; + caddr_t *dposp; +{ + register struct mbuf *m, *mrep; + register struct nfsreq *rep, *rp; + register u_long *tl; + register int i; + struct nfsmount *nmp; + struct mbuf *md, *mheadend; + struct nfsnode *np; + char nickv[RPCX_NICKVERF]; + time_t reqtime, waituntil; + caddr_t dpos, cp2; + int t1, nqlflag, cachable, s, error = 0, mrest_len, auth_len, auth_type; + int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0; + int verf_len, verf_type; + u_long xid; + u_quad_t frev; + char *auth_str, *verf_str; + NFSKERBKEY_T key; /* save session key */ + + nmp = VFSTONFS(vp->v_mount); + MALLOC_ZONE(rep, struct nfsreq *, + sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); + NFSTRACE4(NFSTRC_REQ, vp, procnum, nmp, rep); + + /* + * make sure if we blocked above, that the file system didn't get + * unmounted leaving nmp bogus value to trip on later and crash. + * Note nfs_unmount will set rep->r_nmp if unmounted volume, but we + * aren't that far yet. SO this is best we can do. I wanted to check + * for vp->v_mount = 0 also below, but that caused reboot crash. + * Something must think it's okay for vp-v_mount=0 during booting. + * Thus the best I can do here is see if we still have a vnode. + */ + + if (vp->v_type == VBAD) { + NFSTRACE4(NFSTRC_VBAD, 1, vp, nmp, rep); + _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + return (EINVAL); + } + rep->r_nmp = nmp; + rep->r_vp = vp; + rep->r_procp = procp; + rep->r_procnum = procnum; + i = 0; + m = mrest; + while (m) { + i += m->m_len; + m = m->m_next; + } + mrest_len = i; + + /* + * Get the RPC header with authorization. + */ +kerbauth: + verf_str = auth_str = (char *)0; + if (nmp->nm_flag & NFSMNT_KERB) { + verf_str = nickv; + verf_len = sizeof (nickv); + auth_type = RPCAUTH_KERB4; + bzero((caddr_t)key, sizeof (key)); + if (failed_auth || nfs_getnickauth(nmp, cred, &auth_str, + &auth_len, verf_str, verf_len)) { + error = nfs_getauth(nmp, rep, cred, &auth_str, + &auth_len, verf_str, &verf_len, key); + if (error) { + _FREE_ZONE((caddr_t)rep, + sizeof (struct nfsreq), M_NFSREQ); + m_freem(mrest); + return (error); + } + } + } else { + auth_type = RPCAUTH_UNIX; + if (cred->cr_ngroups < 1) + panic("nfsreq nogrps"); + auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? + nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + + 5 * NFSX_UNSIGNED; + } + m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len, + auth_str, verf_len, verf_str, mrest, mrest_len, &mheadend, &xid); + if (auth_str) + _FREE(auth_str, M_TEMP); + + /* + * For stream protocols, insert a Sun RPC Record Mark. + */ + if (nmp->nm_sotype == SOCK_STREAM) { + M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); + *mtod(m, u_long *) = htonl(0x80000000 | + (m->m_pkthdr.len - NFSX_UNSIGNED)); + } + rep->r_mreq = m; + rep->r_xid = xid; +tryagain: + if (nmp->nm_flag & NFSMNT_SOFT) + rep->r_retry = nmp->nm_retry; + else + rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ + rep->r_rtt = rep->r_rexmit = 0; + if (proct[procnum] > 0) + rep->r_flags = R_TIMING; + else + rep->r_flags = 0; + rep->r_mrep = NULL; + + /* + * Do the client side RPC. + */ + nfsstats.rpcrequests++; + /* + * Chain request into list of outstanding requests. Be sure + * to put it LAST so timer finds oldest requests first. + */ + s = splsoftclock(); + TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain); + + /* Get send time for nqnfs */ + reqtime = time.tv_sec; + + /* + * If backing off another request or avoiding congestion, don't + * send this one now but let timer do it. If not timing a request, + * do it now. + */ + if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || + (nmp->nm_flag & NFSMNT_DUMBTIMR) || + nmp->nm_sent < nmp->nm_cwnd)) { + splx(s); + if (nmp->nm_soflags & PR_CONNREQUIRED) + error = nfs_sndlock(&nmp->nm_flag, rep); + + /* + * Set the R_SENT before doing the send in case another thread + * processes the reply before the nfs_send returns here + */ + if (!error) { + if ((rep->r_flags & R_MUSTRESEND) == 0) { + NFSTRACE4(NFSTRC_CWND_REQ1, rep->r_xid, rep, + nmp->nm_sent, nmp->nm_cwnd); + nmp->nm_sent += NFS_CWNDSCALE; + rep->r_flags |= R_SENT; + } + + m = m_copym(m, 0, M_COPYALL, M_WAIT); + error = nfs_send(nmp->nm_so, nmp->nm_nam, m, rep); + if (nmp->nm_soflags & PR_CONNREQUIRED) + nfs_sndunlock(&nmp->nm_flag); + } + if (error) { + nmp->nm_sent -= NFS_CWNDSCALE; + rep->r_flags &= ~R_SENT; + } + } else { + splx(s); + rep->r_rtt = -1; + } + + /* + * Wait for the reply from our send or the timer's. + */ + if (!error || error == EPIPE) + error = nfs_reply(rep); + + /* + * RPC done, unlink the request. + */ + s = splsoftclock(); + for (rp = nfs_reqq.tqh_first; rp; + rp = rp->r_chain.tqe_next) + if (rp == rep && rp->r_xid == xid) + break; + if (!rp) + panic("nfs_request race, rep %x xid %x", rep, xid); + TAILQ_REMOVE(&nfs_reqq, rep, r_chain); + splx(s); + + /* + * Decrement the outstanding request count. + */ + if (rep->r_flags & R_SENT) { + NFSTRACE4(NFSTRC_CWND_REQ2, rep->r_xid, rep, nmp->nm_sent, + nmp->nm_cwnd); + rep->r_flags &= ~R_SENT; /* paranoia */ + nmp->nm_sent -= NFS_CWNDSCALE; + } + + /* + * If there was a successful reply and a tprintf msg. + * tprintf a response. + */ + if (!error && (rep->r_flags & R_TPRINTFMSG)) + nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, + "is alive again"); + mrep = rep->r_mrep; + md = rep->r_md; + dpos = rep->r_dpos; + if (error) { + m_freem(rep->r_mreq); + NFSTRACE4(NFSTRC_REQERR, error, rep->r_xid, nmp, rep); + _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + return (error); + } + + /* + * break down the rpc header and check if ok + */ + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + if (*tl++ == rpc_msgdenied) { + if (*tl == rpc_mismatch) + error = EOPNOTSUPP; + else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { + if (!failed_auth) { + failed_auth++; + mheadend->m_next = (struct mbuf *)0; + m_freem(mrep); + m_freem(rep->r_mreq); + goto kerbauth; + } else + error = EAUTH; + } else + error = EACCES; + m_freem(mrep); + m_freem(rep->r_mreq); + NFSTRACE4(NFSTRC_RPCERR, error, rep->r_xid, nmp, rep); + _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + return (error); + } + + /* + * Grab any Kerberos verifier, otherwise just throw it away. + */ + verf_type = fxdr_unsigned(int, *tl++); + i = fxdr_unsigned(int, *tl); + if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { + error = nfs_savenickauth(nmp, cred, i, key, &md, &dpos, mrep); + if (error) + goto nfsmout; + } else if (i > 0) + nfsm_adv(nfsm_rndup(i)); + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + /* 0 == ok */ + if (*tl == 0) { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + if (*tl != 0) { + error = fxdr_unsigned(int, *tl); + if ((nmp->nm_flag & NFSMNT_NFSV3) && + error == NFSERR_TRYLATER) { + m_freem(mrep); + error = 0; + waituntil = time.tv_sec + trylater_delay; + NFS_DPF(DUP, + ("nfs_request %s flag=%x trylater_cnt=%x waituntil=%lx trylater_delay=%x\n", + nmp->nm_mountp->mnt_stat.f_mntfromname, + nmp->nm_flag, trylater_cnt, waituntil, + trylater_delay)); + while (time.tv_sec < waituntil) + (void)tsleep((caddr_t)&lbolt, + PSOCK, "nqnfstry", 0); + trylater_delay *= nfs_backoff[trylater_cnt]; + if (trylater_cnt < 7) + trylater_cnt++; + goto tryagain; + } + + /* + * If the File Handle was stale, invalidate the + * lookup cache, just in case. + */ + if (error == ESTALE) + cache_purge(vp); + if (nmp->nm_flag & NFSMNT_NFSV3) { + *mrp = mrep; + *mdp = md; + *dposp = dpos; + error |= NFSERR_RETERR; + } else + m_freem(mrep); + m_freem(rep->r_mreq); + NFSTRACE4(NFSTRC_DISSECTERR, error, rep->r_xid, nmp, + rep); + _FREE_ZONE((caddr_t)rep, + sizeof (struct nfsreq), M_NFSREQ); + return (error); + } + + /* + * For nqnfs, get any lease in reply + */ + if (nmp->nm_flag & NFSMNT_NQNFS) { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + if (*tl) { + np = VTONFS(vp); + nqlflag = fxdr_unsigned(int, *tl); + nfsm_dissect(tl, u_long *, 4*NFSX_UNSIGNED); + cachable = fxdr_unsigned(int, *tl++); + reqtime += fxdr_unsigned(int, *tl++); + if (reqtime > time.tv_sec) { + fxdr_hyper(tl, &frev); + nqnfs_clientlease(nmp, np, nqlflag, + cachable, reqtime, frev); + } + } + } + *mrp = mrep; + *mdp = md; + *dposp = dpos; + m_freem(rep->r_mreq); + NFSTRACE4(NFSTRC_REQFREE, 0xf0f0f0f0, rep->r_xid, nmp, rep); + FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + return (0); + } + m_freem(mrep); + error = EPROTONOSUPPORT; +nfsmout: + m_freem(rep->r_mreq); + NFSTRACE4(NFSTRC_REQFREE, error, rep->r_xid, nmp, rep); + _FREE_ZONE((caddr_t)rep, sizeof (struct nfsreq), M_NFSREQ); + return (error); +} + +#ifndef NFS_NOSERVER +/* + * Generate the rpc reply header + * siz arg. is used to decide if adding a cluster is worthwhile + */ +int +nfs_rephead(siz, nd, slp, err, cache, frev, mrq, mbp, bposp) + int siz; + struct nfsrv_descript *nd; + struct nfssvc_sock *slp; + int err; + int cache; + u_quad_t *frev; + struct mbuf **mrq; + struct mbuf **mbp; + caddr_t *bposp; +{ + register u_long *tl; + register struct mbuf *mreq; + caddr_t bpos; + struct mbuf *mb, *mb2; + + MGETHDR(mreq, M_WAIT, MT_DATA); + mb = mreq; + /* + * If this is a big reply, use a cluster else + * try and leave leading space for the lower level headers. + */ + siz += RPC_REPLYSIZ; + if (siz >= MINCLSIZE) { + MCLGET(mreq, M_WAIT); + } else + mreq->m_data += max_hdr; + tl = mtod(mreq, u_long *); + mreq->m_len = 6 * NFSX_UNSIGNED; + bpos = ((caddr_t)tl) + mreq->m_len; + *tl++ = txdr_unsigned(nd->nd_retxid); + *tl++ = rpc_reply; + if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { + *tl++ = rpc_msgdenied; + if (err & NFSERR_AUTHERR) { + *tl++ = rpc_autherr; + *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); + mreq->m_len -= NFSX_UNSIGNED; + bpos -= NFSX_UNSIGNED; + } else { + *tl++ = rpc_mismatch; + *tl++ = txdr_unsigned(RPC_VER2); + *tl = txdr_unsigned(RPC_VER2); + } + } else { + *tl++ = rpc_msgaccepted; + + /* + * For Kerberos authentication, we must send the nickname + * verifier back, otherwise just RPCAUTH_NULL. + */ + if (nd->nd_flag & ND_KERBFULL) { + register struct nfsuid *nuidp; + struct timeval ktvin, ktvout; + + for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; + nuidp != 0; nuidp = nuidp->nu_hash.le_next) { + if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && + (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), + &nuidp->nu_haddr, nd->nd_nam2))) + break; + } + if (nuidp) { + ktvin.tv_sec = + txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); + ktvin.tv_usec = + txdr_unsigned(nuidp->nu_timestamp.tv_usec); + + /* + * Encrypt the timestamp in ecb mode using the + * session key. + */ +#if NFSKERB + XXX +#endif + + *tl++ = rpc_auth_kerb; + *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); + *tl = ktvout.tv_sec; + nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); + *tl++ = ktvout.tv_usec; + *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); + } else { + *tl++ = 0; + *tl++ = 0; + } + } else { + *tl++ = 0; + *tl++ = 0; + } + switch (err) { + case EPROGUNAVAIL: + *tl = txdr_unsigned(RPC_PROGUNAVAIL); + break; + case EPROGMISMATCH: + *tl = txdr_unsigned(RPC_PROGMISMATCH); + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + if (nd->nd_flag & ND_NQNFS) { + *tl++ = txdr_unsigned(3); + *tl = txdr_unsigned(3); + } else { + *tl++ = txdr_unsigned(2); + *tl = txdr_unsigned(3); + } + break; + case EPROCUNAVAIL: + *tl = txdr_unsigned(RPC_PROCUNAVAIL); + break; + case EBADRPC: + *tl = txdr_unsigned(RPC_GARBAGE); + break; + default: + *tl = 0; + if (err != NFSERR_RETVOID) { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + if (err) + *tl = txdr_unsigned(nfsrv_errmap(nd, err)); + else + *tl = 0; + } + break; + }; + } + + /* + * For nqnfs, piggyback lease as requested. + */ + if ((nd->nd_flag & ND_NQNFS) && err == 0) { + if (nd->nd_flag & ND_LEASE) { + nfsm_build(tl, u_long *, 5 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(nd->nd_flag & ND_LEASE); + *tl++ = txdr_unsigned(cache); + *tl++ = txdr_unsigned(nd->nd_duration); + txdr_hyper(frev, tl); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = 0; + } + } + if (mrq != NULL) + *mrq = mreq; + *mbp = mb; + *bposp = bpos; + if (err != 0 && err != NFSERR_RETVOID) + nfsstats.srvrpc_errs++; + return (0); +} + + +#endif /* NFS_NOSERVER */ + + +/* + * From FreeBSD 1.58, a Matt Dillon fix... + * Flag a request as being about to terminate. + * The nm_sent count is decremented now to avoid deadlocks when the process + * in soreceive() hasn't yet managed to send its own request. + */ +static void +nfs_softterm(struct nfsreq *rep) +{ + rep->r_flags |= R_SOFTTERM; + if (rep->r_flags & R_SENT) { + NFSTRACE4(NFSTRC_CWND_SOFT, rep->r_xid, rep, + rep->r_nmp->nm_sent, rep->r_nmp->nm_cwnd); + rep->r_nmp->nm_sent -= NFS_CWNDSCALE; + rep->r_flags &= ~R_SENT; + } +} + +void +nfs_timer_funnel(arg) + void * arg; +{ + (void) thread_funnel_set(kernel_flock, TRUE); + nfs_timer(arg); + (void) thread_funnel_set(kernel_flock, FALSE); + +} + +/* + * Nfs timer routine + * Scan the nfsreq list and retranmit any requests that have timed out + * To avoid retransmission attempts on STREAM sockets (in the future) make + * sure to set the r_retry field to 0 (implies nm_retry == 0). + */ +void +nfs_timer(arg) + void *arg; /* never used */ +{ + register struct nfsreq *rep, *rp; + register struct mbuf *m; + register struct socket *so; + register struct nfsmount *nmp; + register int timeo; + int s, error; +#ifndef NFS_NOSERVER + static long lasttime = 0; + register struct nfssvc_sock *slp; + u_quad_t cur_usec; +#endif /* NFS_NOSERVER */ +#if NFSDIAG + int rttdiag; +#endif + int flags, rexmit, cwnd, sent; + u_long xid; + + s = splnet(); + /* + * XXX If preemptable threads are implemented the spls used for the + * outstanding request queue must be replaced with mutexes. + */ +rescan: +#ifdef NFSTRACESUSPENDERS + if (NFSTRACE_SUSPENDING) { + for (rep = nfs_reqq.tqh_first; rep != 0; + rep = rep->r_chain.tqe_next) + if (rep->r_xid == nfstracexid) + break; + if (!rep) { + NFSTRACE_RESUME; + } else if (NFSTRACE_SUSPENSEOVER) { + NFSTRACE_SUSPEND; + } + } +#endif + for (rep = nfs_reqq.tqh_first; rep != 0; rep = rep->r_chain.tqe_next) { +#ifdef NFSTRACESUSPENDERS + if (rep->r_mrep && !NFSTRACE_SUSPENDING) { + nfstracexid = rep->r_xid; + NFSTRACE_STARTSUSPENDCOUNTDOWN; + } +#endif + nmp = rep->r_nmp; + if (!nmp) /* unmounted */ + continue; + if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) + continue; + if (nfs_sigintr(nmp, rep, rep->r_procp)) { + nfs_softterm(rep); + continue; + } + if (rep->r_rtt >= 0) { + rep->r_rtt++; + if (nmp->nm_flag & NFSMNT_DUMBTIMR) + timeo = nmp->nm_timeo; + else + timeo = NFS_RTO(nmp, proct[rep->r_procnum]); + /* ensure 62.5 ms floor */ + while (16 * timeo < hz) + timeo *= 2; + if (nmp->nm_timeouts > 0) + timeo *= nfs_backoff[nmp->nm_timeouts - 1]; + if (rep->r_rtt <= timeo) + continue; + if (nmp->nm_timeouts < 8) + nmp->nm_timeouts++; + } + /* + * Check for server not responding + */ + if ((rep->r_flags & R_TPRINTFMSG) == 0 && + rep->r_rexmit > nmp->nm_deadthresh) { + nfs_msg(rep->r_procp, + nmp->nm_mountp->mnt_stat.f_mntfromname, + "not responding"); + rep->r_flags |= R_TPRINTFMSG; + } + if (rep->r_rexmit >= rep->r_retry) { /* too many */ + nfsstats.rpctimeouts++; + nfs_softterm(rep); + continue; + } + if (nmp->nm_sotype != SOCK_DGRAM) { + if (++rep->r_rexmit > NFS_MAXREXMIT) + rep->r_rexmit = NFS_MAXREXMIT; + continue; + } + if ((so = nmp->nm_so) == NULL) + continue; + + /* + * If there is enough space and the window allows.. + * Resend it + * Set r_rtt to -1 in case we fail to send it now. + */ +#if NFSDIAG + rttdiag = rep->r_rtt; +#endif + rep->r_rtt = -1; + if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && + ((nmp->nm_flag & NFSMNT_DUMBTIMR) || + (rep->r_flags & R_SENT) || + nmp->nm_sent < nmp->nm_cwnd) && + (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ + + struct proc *p = current_proc(); + +#if NFSDIAG + if (rep->r_flags & R_SENT && nfsprnttimo && + nmp->nm_timeouts >= nfsprnttimo) { + int t = proct[rep->r_procnum]; + if (t) + NFS_DPF(DUP, ("nfs_timer %s nmtm=%d tms=%d rtt=%d tm=%d p=%d A=%d D=%d\n", nmp->nm_mountp->mnt_stat.f_mntfromname, nmp->nm_timeo, nmp->nm_timeouts, rttdiag, timeo, rep->r_procnum, nmp->nm_srtt[t-1], nmp->nm_sdrtt[t-1])); + else + NFS_DPF(DUP, ("nfs_timer %s nmtm=%d tms=%d rtt=%d tm=%d p=%d\n", nmp->nm_mountp->mnt_stat.f_mntfromname, nmp->nm_timeo, nmp->nm_timeouts, rttdiag, timeo, rep->r_procnum)); + } + nfsdup(rep); +#endif /* NFSDIAG */ + /* + * Iff first send, start timing + * else turn timing off, backoff timer + * and divide congestion window by 2. + * We update these *before* the send to avoid + * racing against receiving the reply. + * We save them so we can restore them on send error. + */ + flags = rep->r_flags; + rexmit = rep->r_rexmit; + cwnd = nmp->nm_cwnd; + sent = nmp->nm_sent; + xid = rep->r_xid; + if (rep->r_flags & R_SENT) { + rep->r_flags &= ~R_TIMING; + if (++rep->r_rexmit > NFS_MAXREXMIT) + rep->r_rexmit = NFS_MAXREXMIT; + nmp->nm_cwnd >>= 1; + if (nmp->nm_cwnd < NFS_CWNDSCALE) + nmp->nm_cwnd = NFS_CWNDSCALE; + nfsstats.rpcretries++; + } else { + rep->r_flags |= R_SENT; + nmp->nm_sent += NFS_CWNDSCALE; + } + NFSTRACE4(NFSTRC_CWND_TIMER, xid, rep, + nmp->nm_sent, nmp->nm_cwnd); + + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + + if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) + error = (*so->so_proto->pr_usrreqs->pru_send) + (so, 0, m, 0, 0, p); + else + error = (*so->so_proto->pr_usrreqs->pru_send) + (so, 0, m, mtod(nmp->nm_nam, struct sockaddr *), 0, p); + + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + + NFSTRACE4(NFSTRC_CWND_TIMER, xid, error, sent, cwnd); + /* + * This is to fix "nfs_sigintr" DSI panics. + * We may have slept during the send so the current + * place in the request queue may have been released. + * Due to zone_gc it may even be part of an + * unrelated newly allocated data structure. + * Restart the list scan from the top if needed... + */ + for (rp = nfs_reqq.tqh_first; rp; + rp = rp->r_chain.tqe_next) + if (rp == rep && rp->r_xid == xid) + break; + if (!rp) { + if (!error) + goto rescan; + panic("nfs_timer: race error %d xid 0x%x\n", + error, xid); + } + + if (error) { + if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) + so->so_error = 0; + rep->r_flags = flags; + rep->r_rexmit = rexmit; + nmp->nm_cwnd = cwnd; + nmp->nm_sent = sent; + if (flags & R_SENT) + nfsstats.rpcretries--; + } else + rep->r_rtt = 0; + } + } +#ifndef NFS_NOSERVER + /* + * Call the nqnfs server timer once a second to handle leases. + */ + if (lasttime != time.tv_sec) { + lasttime = time.tv_sec; + nqnfs_serverd(); + } + + /* + * Scan the write gathering queues for writes that need to be + * completed now. + */ + cur_usec = (u_quad_t)time.tv_sec * 1000000 + (u_quad_t)time.tv_usec; + for (slp = nfssvc_sockhead.tqh_first; slp != 0; + slp = slp->ns_chain.tqe_next) { + if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) + nfsrv_wakenfsd(slp); + } +#endif /* NFS_NOSERVER */ + splx(s); + timeout(nfs_timer_funnel, (void *)0, nfs_ticks); + +} + + +/* + * Test for a termination condition pending on the process. + * This is used for NFSMNT_INT mounts. + */ +int +nfs_sigintr(nmp, rep, p) + struct nfsmount *nmp; + struct nfsreq *rep; + register struct proc *p; +{ + + if (rep && (rep->r_flags & R_SOFTTERM)) + return (EINTR); + if (!(nmp->nm_flag & NFSMNT_INT)) + return (0); + if (p && p->p_siglist && + (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) & + NFSINT_SIGMASK)) + return (EINTR); + return (0); +} + +/* + * Lock a socket against others. + * Necessary for STREAM sockets to ensure you get an entire rpc request/reply + * and also to avoid race conditions between the processes with nfs requests + * in progress when a reconnect is necessary. + */ +int +nfs_sndlock(flagp, rep) + register int *flagp; + struct nfsreq *rep; +{ + struct proc *p; + int slpflag = 0, slptimeo = 0; + + if (rep) { + p = rep->r_procp; + if (rep->r_nmp->nm_flag & NFSMNT_INT) + slpflag = PCATCH; + } else + p = (struct proc *)0; + while (*flagp & NFSMNT_SNDLOCK) { + if (nfs_sigintr(rep->r_nmp, rep, p)) + return (EINTR); + *flagp |= NFSMNT_WANTSND; + (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "nfsndlck", + slptimeo); + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + } + *flagp |= NFSMNT_SNDLOCK; + return (0); +} + +/* + * Unlock the stream socket for others. + */ +void +nfs_sndunlock(flagp) + register int *flagp; +{ + + if ((*flagp & NFSMNT_SNDLOCK) == 0) + panic("nfs sndunlock"); + *flagp &= ~NFSMNT_SNDLOCK; + if (*flagp & NFSMNT_WANTSND) { + *flagp &= ~NFSMNT_WANTSND; + wakeup((caddr_t)flagp); + } +} + +static int +nfs_rcvlock(rep) + register struct nfsreq *rep; +{ + register int *flagp = &rep->r_nmp->nm_flag; + int slpflag, slptimeo = 0; + + if (*flagp & NFSMNT_INT) + slpflag = PCATCH; + else + slpflag = 0; + while (*flagp & NFSMNT_RCVLOCK) { + if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp)) { + NFSTRACE4(NFSTRC_RCVLCKINTR, rep->r_xid, rep, + rep->r_nmp, *flagp); + return (EINTR); + } else if (rep->r_mrep != NULL) { + /* + * Don't bother sleeping if reply already arrived + */ + NFSTRACE4(NFSTRC_RCVALREADY, rep->r_xid, rep, + rep->r_nmp, 1); + return (EALREADY); + } + NFSTRACE4(NFSTRC_RCVLCKW, rep->r_xid, rep, rep->r_nmp, *flagp); + *flagp |= NFSMNT_WANTRCV; + (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "nfsrcvlk", + slptimeo); + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + } + /* + * nfs_reply will handle it if reply already arrived. + * (We may have slept or been preempted while on network funnel). + */ + NFSTRACE4(NFSTRC_RCVLCK, rep->r_xid, rep, rep->r_nmp, *flagp); + *flagp |= NFSMNT_RCVLOCK; + return (0); +} + +/* + * Unlock the stream socket for others. + */ +static void +nfs_rcvunlock(flagp) + register int *flagp; +{ + + if ((*flagp & NFSMNT_RCVLOCK) == 0) + panic("nfs rcvunlock"); + *flagp &= ~NFSMNT_RCVLOCK; + if (*flagp & NFSMNT_WANTRCV) { + NFSTRACE(NFSTRC_RCVUNLW, flagp); + *flagp &= ~NFSMNT_WANTRCV; + wakeup((caddr_t)flagp); + } else { + NFSTRACE(NFSTRC_RCVUNL, flagp); + } +} + + +#ifndef NFS_NOSERVER +/* + * Socket upcall routine for the nfsd sockets. + * The caddr_t arg is a pointer to the "struct nfssvc_sock". + * Essentially do as much as possible non-blocking, else punt and it will + * be called with M_WAIT from an nfsd. + */ + /* + * Needs to eun under network funnel + */ +void +nfsrv_rcv(so, arg, waitflag) + struct socket *so; + caddr_t arg; + int waitflag; +{ + register struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; + register struct mbuf *m; + struct mbuf *mp, *mhck; + struct sockaddr *nam=0; + struct uio auio; + int flags, error; + struct sockaddr_in *sin; + + if ((slp->ns_flag & SLP_VALID) == 0) + return; +#ifdef notdef + /* + * Define this to test for nfsds handling this under heavy load. + */ + if (waitflag == M_DONTWAIT) { + slp->ns_flag |= SLP_NEEDQ; goto dorecs; + } +#endif + auio.uio_procp = NULL; + if (so->so_type == SOCK_STREAM) { + /* + * If there are already records on the queue, defer soreceive() + * to an nfsd so that there is feedback to the TCP layer that + * the nfs servers are heavily loaded. + */ + if (slp->ns_rec && waitflag == M_DONTWAIT) { + slp->ns_flag |= SLP_NEEDQ; + goto dorecs; + } + + /* + * Do soreceive(). + */ + auio.uio_resid = 1000000000; + flags = MSG_DONTWAIT; + error = soreceive(so, (struct sockaddr **) 0, &auio, &mp, (struct mbuf **)0, &flags); + if (error || mp == (struct mbuf *)0) { + if (error == EWOULDBLOCK) + slp->ns_flag |= SLP_NEEDQ; + else + slp->ns_flag |= SLP_DISCONN; + goto dorecs; + } + m = mp; + if (slp->ns_rawend) { + slp->ns_rawend->m_next = m; + slp->ns_cc += 1000000000 - auio.uio_resid; + } else { + slp->ns_raw = m; + slp->ns_cc = 1000000000 - auio.uio_resid; + } + while (m->m_next) + m = m->m_next; + slp->ns_rawend = m; + + /* + * Now try and parse record(s) out of the raw stream data. + */ + error = nfsrv_getstream(slp, waitflag); + if (error) { + if (error == EPERM) + slp->ns_flag |= SLP_DISCONN; + else + slp->ns_flag |= SLP_NEEDQ; + } + } else { + do { + auio.uio_resid = 1000000000; + flags = MSG_DONTWAIT; + nam = 0; + error = soreceive(so, &nam, &auio, &mp, + (struct mbuf **)0, &flags); + + if (mp) { + if (nam) { + MGET(mhck, M_WAIT, MT_SONAME); + mhck->m_len = nam->sa_len; + sin = mtod(mhck, struct sockaddr_in *); + bcopy(nam, sin, sizeof(struct sockaddr_in)); + mhck->m_hdr.mh_len = sizeof(struct sockaddr_in); + FREE(nam, M_SONAME); + + m = mhck; + m->m_next = mp; + } else + m = mp; + if (slp->ns_recend) + slp->ns_recend->m_nextpkt = m; + else + slp->ns_rec = m; + slp->ns_recend = m; + m->m_nextpkt = (struct mbuf *)0; + } + if (error) { + if ((so->so_proto->pr_flags & PR_CONNREQUIRED) + && error != EWOULDBLOCK) { + slp->ns_flag |= SLP_DISCONN; + goto dorecs; + } + } + } while (mp); + } + + /* + * Now try and process the request records, non-blocking. + */ +dorecs: + if (waitflag == M_DONTWAIT && + (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) { + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + nfsrv_wakenfsd(slp); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + } +} + +/* + * Try and extract an RPC request from the mbuf data list received on a + * stream socket. The "waitflag" argument indicates whether or not it + * can sleep. + */ +static int +nfsrv_getstream(slp, waitflag) + register struct nfssvc_sock *slp; + int waitflag; +{ + register struct mbuf *m, **mpp; + register char *cp1, *cp2; + register int len; + struct mbuf *om, *m2, *recm = 0; + u_long recmark; + + if (slp->ns_flag & SLP_GETSTREAM) + panic("nfs getstream"); + slp->ns_flag |= SLP_GETSTREAM; + for (;;) { + if (slp->ns_reclen == 0) { + if (slp->ns_cc < NFSX_UNSIGNED) { + slp->ns_flag &= ~SLP_GETSTREAM; + return (0); + } + m = slp->ns_raw; + if (m->m_len >= NFSX_UNSIGNED) { + bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); + m->m_data += NFSX_UNSIGNED; + m->m_len -= NFSX_UNSIGNED; + } else { + cp1 = (caddr_t)&recmark; + cp2 = mtod(m, caddr_t); + while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { + while (m->m_len == 0) { + m = m->m_next; + cp2 = mtod(m, caddr_t); + } + *cp1++ = *cp2++; + m->m_data++; + m->m_len--; + } + } + slp->ns_cc -= NFSX_UNSIGNED; + recmark = ntohl(recmark); + slp->ns_reclen = recmark & ~0x80000000; + if (recmark & 0x80000000) + slp->ns_flag |= SLP_LASTFRAG; + else + slp->ns_flag &= ~SLP_LASTFRAG; + if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) { + slp->ns_flag &= ~SLP_GETSTREAM; + return (EPERM); + } + } + + /* + * Now get the record part. + */ + if (slp->ns_cc == slp->ns_reclen) { + recm = slp->ns_raw; + slp->ns_raw = slp->ns_rawend = (struct mbuf *)0; + slp->ns_cc = slp->ns_reclen = 0; + } else if (slp->ns_cc > slp->ns_reclen) { + len = 0; + m = slp->ns_raw; + om = (struct mbuf *)0; + while (len < slp->ns_reclen) { + if ((len + m->m_len) > slp->ns_reclen) { + m2 = m_copym(m, 0, slp->ns_reclen - len, + waitflag); + if (m2) { + if (om) { + om->m_next = m2; + recm = slp->ns_raw; + } else + recm = m2; + m->m_data += slp->ns_reclen - len; + m->m_len -= slp->ns_reclen - len; + len = slp->ns_reclen; + } else { + slp->ns_flag &= ~SLP_GETSTREAM; + return (EWOULDBLOCK); + } + } else if ((len + m->m_len) == slp->ns_reclen) { + om = m; + len += m->m_len; + m = m->m_next; + recm = slp->ns_raw; + om->m_next = (struct mbuf *)0; + } else { + om = m; + len += m->m_len; + m = m->m_next; + } + } + slp->ns_raw = m; + slp->ns_cc -= len; + slp->ns_reclen = 0; + } else { + slp->ns_flag &= ~SLP_GETSTREAM; + return (0); + } + + /* + * Accumulate the fragments into a record. + */ + mpp = &slp->ns_frag; + while (*mpp) + mpp = &((*mpp)->m_next); + *mpp = recm; + if (slp->ns_flag & SLP_LASTFRAG) { + if (slp->ns_recend) + slp->ns_recend->m_nextpkt = slp->ns_frag; + else + slp->ns_rec = slp->ns_frag; + slp->ns_recend = slp->ns_frag; + slp->ns_frag = (struct mbuf *)0; + } + } +} + +/* + * Parse an RPC header. + */ +int +nfsrv_dorec(slp, nfsd, ndp) + register struct nfssvc_sock *slp; + struct nfsd *nfsd; + struct nfsrv_descript **ndp; +{ + register struct mbuf *m; + register struct mbuf *nam; + register struct nfsrv_descript *nd; + int error; + + *ndp = NULL; + if ((slp->ns_flag & SLP_VALID) == 0 || + (m = slp->ns_rec) == (struct mbuf *)0) + return (ENOBUFS); + slp->ns_rec = m->m_nextpkt; + if (slp->ns_rec) + m->m_nextpkt = (struct mbuf *)0; + else + slp->ns_recend = (struct mbuf *)0; + if (m->m_type == MT_SONAME) { + nam = m; + m = m->m_next; + nam->m_next = NULL; + } else + nam = NULL; + MALLOC_ZONE(nd, struct nfsrv_descript *, + sizeof (struct nfsrv_descript), M_NFSRVDESC, M_WAITOK); + nd->nd_md = nd->nd_mrep = m; + nd->nd_nam2 = nam; + nd->nd_dpos = mtod(m, caddr_t); + error = nfs_getreq(nd, nfsd, TRUE); + if (error) { + m_freem(nam); + _FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC); + return (error); + } + *ndp = nd; + nfsd->nfsd_nd = nd; + return (0); +} + +/* + * Parse an RPC request + * - verify it + * - fill in the cred struct. + */ +int +nfs_getreq(nd, nfsd, has_header) + register struct nfsrv_descript *nd; + struct nfsd *nfsd; + int has_header; +{ + register int len, i; + register u_long *tl; + register long t1; + struct uio uio; + struct iovec iov; + caddr_t dpos, cp2, cp; + u_long nfsvers, auth_type; + uid_t nickuid; + int error = 0, nqnfs = 0, ticklen; + struct mbuf *mrep, *md; + register struct nfsuid *nuidp; + struct timeval tvin, tvout; +#if 0 /* until encrypted keys are implemented */ + NFSKERBKEYSCHED_T keys; /* stores key schedule */ +#endif + + mrep = nd->nd_mrep; + md = nd->nd_md; + dpos = nd->nd_dpos; + if (has_header) { + nfsm_dissect(tl, u_long *, 10 * NFSX_UNSIGNED); + nd->nd_retxid = fxdr_unsigned(u_long, *tl++); + if (*tl++ != rpc_call) { + m_freem(mrep); + return (EBADRPC); + } + } else + nfsm_dissect(tl, u_long *, 8 * NFSX_UNSIGNED); + nd->nd_repstat = 0; + nd->nd_flag = 0; + if (*tl++ != rpc_vers) { + nd->nd_repstat = ERPCMISMATCH; + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + if (*tl != nfs_prog) { + if (*tl == nqnfs_prog) + nqnfs++; + else { + nd->nd_repstat = EPROGUNAVAIL; + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + } + tl++; + nfsvers = fxdr_unsigned(u_long, *tl++); + if (((nfsvers < NFS_VER2 || nfsvers > NFS_VER3) && !nqnfs) || + (nfsvers != NQNFS_VER3 && nqnfs)) { + nd->nd_repstat = EPROGMISMATCH; + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + if (nqnfs) + nd->nd_flag = (ND_NFSV3 | ND_NQNFS); + else if (nfsvers == NFS_VER3) + nd->nd_flag = ND_NFSV3; + nd->nd_procnum = fxdr_unsigned(u_long, *tl++); + if (nd->nd_procnum == NFSPROC_NULL) + return (0); + if (nd->nd_procnum >= NFS_NPROCS || + (!nqnfs && nd->nd_procnum >= NQNFSPROC_GETLEASE) || + (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { + nd->nd_repstat = EPROCUNAVAIL; + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + if ((nd->nd_flag & ND_NFSV3) == 0) + nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; + auth_type = *tl++; + len = fxdr_unsigned(int, *tl++); + if (len < 0 || len > RPCAUTH_MAXSIZ) { + m_freem(mrep); + return (EBADRPC); + } + + nd->nd_flag &= ~ND_KERBAUTH; + /* + * Handle auth_unix or auth_kerb. + */ + if (auth_type == rpc_auth_unix) { + len = fxdr_unsigned(int, *++tl); + if (len < 0 || len > NFS_MAXNAMLEN) { + m_freem(mrep); + return (EBADRPC); + } + nfsm_adv(nfsm_rndup(len)); + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); + nd->nd_cr.cr_ref = 1; + nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); + nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); + len = fxdr_unsigned(int, *tl); + if (len < 0 || len > RPCAUTH_UNIXGIDS) { + m_freem(mrep); + return (EBADRPC); + } + nfsm_dissect(tl, u_long *, (len + 2) * NFSX_UNSIGNED); + for (i = 1; i <= len; i++) + if (i < NGROUPS) + nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); + else + tl++; + nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); + if (nd->nd_cr.cr_ngroups > 1) + nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); + len = fxdr_unsigned(int, *++tl); + if (len < 0 || len > RPCAUTH_MAXSIZ) { + m_freem(mrep); + return (EBADRPC); + } + if (len > 0) + nfsm_adv(nfsm_rndup(len)); + } else if (auth_type == rpc_auth_kerb) { + switch (fxdr_unsigned(int, *tl++)) { + case RPCAKN_FULLNAME: + ticklen = fxdr_unsigned(int, *tl); + *((u_long *)nfsd->nfsd_authstr) = *tl; + uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; + nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; + if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { + m_freem(mrep); + return (EBADRPC); + } + uio.uio_offset = 0; + uio.uio_iov = &iov; + uio.uio_iovcnt = 1; + uio.uio_segflg = UIO_SYSSPACE; + iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; + iov.iov_len = RPCAUTH_MAXSIZ - 4; + nfsm_mtouio(&uio, uio.uio_resid); + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + if (*tl++ != rpc_auth_kerb || + fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { + printf("Bad kerb verifier\n"); + nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + nfsm_dissect(cp, caddr_t, 4 * NFSX_UNSIGNED); + tl = (u_long *)cp; + if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { + printf("Not fullname kerb verifier\n"); + nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + cp += NFSX_UNSIGNED; + bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); + nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; + nd->nd_flag |= ND_KERBFULL; + nfsd->nfsd_flag |= NFSD_NEEDAUTH; + break; + case RPCAKN_NICKNAME: + if (len != 2 * NFSX_UNSIGNED) { + printf("Kerb nickname short\n"); + nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + nickuid = fxdr_unsigned(uid_t, *tl); + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + if (*tl++ != rpc_auth_kerb || + fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { + printf("Kerb nick verifier bad\n"); + nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + tvin.tv_sec = *tl++; + tvin.tv_usec = *tl; + + for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; + nuidp != 0; nuidp = nuidp->nu_hash.le_next) { + if (nuidp->nu_cr.cr_uid == nickuid && + (!nd->nd_nam2 || + netaddr_match(NU_NETFAM(nuidp), + &nuidp->nu_haddr, nd->nd_nam2))) + break; + } + if (!nuidp) { + nd->nd_repstat = + (NFSERR_AUTHERR|AUTH_REJECTCRED); + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + + /* + * Now, decrypt the timestamp using the session key + * and validate it. + */ +#if NFSKERB + XXX +#endif + + tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); + tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); + if (nuidp->nu_expire < time.tv_sec || + nuidp->nu_timestamp.tv_sec > tvout.tv_sec || + (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && + nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { + nuidp->nu_expire = 0; + nd->nd_repstat = + (NFSERR_AUTHERR|AUTH_REJECTVERF); + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); + nd->nd_flag |= ND_KERBNICK; + }; + } else { + nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); + nd->nd_procnum = NFSPROC_NOOP; + return (0); + } + + /* + * For nqnfs, get piggybacked lease request. + */ + if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + nd->nd_flag |= fxdr_unsigned(int, *tl); + if (nd->nd_flag & ND_LEASE) { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + nd->nd_duration = fxdr_unsigned(int, *tl); + } else + nd->nd_duration = NQ_MINLEASE; + } else + nd->nd_duration = NQ_MINLEASE; + nd->nd_md = md; + nd->nd_dpos = dpos; + return (0); +nfsmout: + return (error); +} + +/* + * Search for a sleeping nfsd and wake it up. + * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the + * running nfsds will go look for the work in the nfssvc_sock list. + */ +void +nfsrv_wakenfsd(slp) + struct nfssvc_sock *slp; +{ + register struct nfsd *nd; + + if ((slp->ns_flag & SLP_VALID) == 0) + return; + for (nd = nfsd_head.tqh_first; nd != 0; nd = nd->nfsd_chain.tqe_next) { + if (nd->nfsd_flag & NFSD_WAITING) { + nd->nfsd_flag &= ~NFSD_WAITING; + if (nd->nfsd_slp) + panic("nfsd wakeup"); + slp->ns_sref++; + nd->nfsd_slp = slp; + wakeup((caddr_t)nd); + return; + } + } + slp->ns_flag |= SLP_DOREC; + nfsd_head_flag |= NFSD_CHECKSLP; +} +#endif /* NFS_NOSERVER */ + +static int +nfs_msg(p, server, msg) + struct proc *p; + char *server, *msg; +{ + tpr_t tpr; + + if (p) + tpr = tprintf_open(p); + else + tpr = NULL; + tprintf(tpr, "nfs server %s: %s\n", server, msg); + tprintf_close(tpr); + return (0); +} diff --git a/bsd/nfs/nfs_srvcache.c b/bsd/nfs/nfs_srvcache.c new file mode 100644 index 000000000..9e7007ddb --- /dev/null +++ b/bsd/nfs/nfs_srvcache.c @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_srvcache.c 8.3 (Berkeley) 3/30/95 + * FreeBSD-Id: nfs_srvcache.c,v 1.15 1997/10/12 20:25:46 phk Exp $ + */ + +#ifndef NFS_NOSERVER +/* + * Reference: Chet Juszczak, "Improving the Performance and Correctness + * of an NFS Server", in Proc. Winter 1989 USENIX Conference, + * pages 53-63. San Diego, February 1989. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for dup_sockaddr */ + +#include +#if ISO +#include +#endif +#include +#include +#include +#include + +extern struct nfsstats nfsstats; +extern int nfsv2_procid[NFS_NPROCS]; +long numnfsrvcache; +static long desirednfsrvcache = NFSRVCACHESIZ; + +#define NFSRCHASH(xid) \ + (&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash]) +LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl; +TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead; +u_long nfsrvhash; + +#define TRUE 1 +#define FALSE 0 + +#define NETFAMILY(rp) \ + (((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO) + +/* + * Static array that defines which nfs rpc's are nonidempotent + */ +static int nonidempotent[NFS_NPROCS] = { + FALSE, + FALSE, + TRUE, + FALSE, + FALSE, + FALSE, + FALSE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + TRUE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, +}; + +/* True iff the rpc reply is an nfs status ONLY! */ +static int nfsv2_repstat[NFS_NPROCS] = { + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + FALSE, + TRUE, + TRUE, + TRUE, + TRUE, + FALSE, + TRUE, + FALSE, + FALSE, +}; + +/* + * Initialize the server request cache list + */ +void +nfsrv_initcache() +{ + + nfsrvhashtbl = hashinit(desirednfsrvcache, M_NFSD, &nfsrvhash); + TAILQ_INIT(&nfsrvlruhead); +} + +/* + * Look for the request in the cache + * If found then + * return action and optionally reply + * else + * insert it in the cache + * + * The rules are as follows: + * - if in progress, return DROP request + * - if completed within DELAY of the current time, return DROP it + * - if completed a longer time ago return REPLY if the reply was cached or + * return DOIT + * Update/add new request at end of lru list + */ +int +nfsrv_getcache(nd, slp, repp) + register struct nfsrv_descript *nd; + struct nfssvc_sock *slp; + struct mbuf **repp; +{ + register struct nfsrvcache *rp; + struct mbuf *mb; + struct sockaddr_in *saddr; + caddr_t bpos; + int ret; + + /* + * Don't cache recent requests for reliable transport protocols. + * (Maybe we should for the case of a reconnect, but..) + */ + if (!nd->nd_nam2) + return (RC_DOIT); +loop: + for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0; + rp = rp->rc_hash.le_next) { + if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc && + netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) { + NFS_DPF(RC, ("H%03x", rp->rc_xid & 0xfff)); + if ((rp->rc_flag & RC_LOCKED) != 0) { + rp->rc_flag |= RC_WANTED; + (void) tsleep((caddr_t)rp, PZERO-1, "nfsrc", 0); + goto loop; + } + rp->rc_flag |= RC_LOCKED; + /* If not at end of LRU chain, move it there */ + if (rp->rc_lru.tqe_next) { + TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru); + TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru); + } + if (rp->rc_state == RC_UNUSED) + panic("nfsrv cache"); + if (rp->rc_state == RC_INPROG) { + nfsstats.srvcache_inproghits++; + ret = RC_DROPIT; + } else if (rp->rc_flag & RC_REPSTATUS) { + nfsstats.srvcache_nonidemdonehits++; + nfs_rephead(0, nd, slp, rp->rc_status, + 0, (u_quad_t *)0, repp, &mb, &bpos); + ret = RC_REPLY; + } else if (rp->rc_flag & RC_REPMBUF) { + nfsstats.srvcache_nonidemdonehits++; + *repp = m_copym(rp->rc_reply, 0, M_COPYALL, + M_WAIT); + ret = RC_REPLY; + } else { + nfsstats.srvcache_idemdonehits++; + rp->rc_state = RC_INPROG; + ret = RC_DOIT; + } + rp->rc_flag &= ~RC_LOCKED; + if (rp->rc_flag & RC_WANTED) { + rp->rc_flag &= ~RC_WANTED; + wakeup((caddr_t)rp); + } + return (ret); + } + } + nfsstats.srvcache_misses++; + NFS_DPF(RC, ("M%03x", nd->nd_retxid & 0xfff)); + if (numnfsrvcache < desirednfsrvcache) { + MALLOC(rp, struct nfsrvcache *, sizeof *rp, M_NFSD, M_WAITOK); + bzero((char *)rp, sizeof *rp); + numnfsrvcache++; + rp->rc_flag = RC_LOCKED; + } else { + rp = nfsrvlruhead.tqh_first; + while ((rp->rc_flag & RC_LOCKED) != 0) { + rp->rc_flag |= RC_WANTED; + (void) tsleep((caddr_t)rp, PZERO-1, "nfsrc", 0); + rp = nfsrvlruhead.tqh_first; + } + rp->rc_flag |= RC_LOCKED; + LIST_REMOVE(rp, rc_hash); + TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru); + if (rp->rc_flag & RC_REPMBUF) + m_freem(rp->rc_reply); + if (rp->rc_flag & RC_NAM) + MFREE(rp->rc_nam, mb); + rp->rc_flag &= (RC_LOCKED | RC_WANTED); + } + TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru); + rp->rc_state = RC_INPROG; + rp->rc_xid = nd->nd_retxid; + saddr = mtod(nd->nd_nam, struct sockaddr_in *); + switch (saddr->sin_family) { + case AF_INET: + rp->rc_flag |= RC_INETADDR; + rp->rc_inetaddr = saddr->sin_addr.s_addr; + break; + case AF_ISO: + default: + rp->rc_flag |= RC_NAM; + rp->rc_nam = m_copym(nd->nd_nam, 0, M_COPYALL, M_WAIT); + break; + }; + rp->rc_proc = nd->nd_procnum; + LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash); + rp->rc_flag &= ~RC_LOCKED; + if (rp->rc_flag & RC_WANTED) { + rp->rc_flag &= ~RC_WANTED; + wakeup((caddr_t)rp); + } + return (RC_DOIT); +} + +/* + * Update a request cache entry after the rpc has been done + */ +void +nfsrv_updatecache(nd, repvalid, repmbuf) + register struct nfsrv_descript *nd; + int repvalid; + struct mbuf *repmbuf; +{ + register struct nfsrvcache *rp; + + if (!nd->nd_nam2) + return; +loop: + for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0; + rp = rp->rc_hash.le_next) { + if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc && + netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) { + NFS_DPF(RC, ("U%03x", rp->rc_xid & 0xfff)); + if ((rp->rc_flag & RC_LOCKED) != 0) { + rp->rc_flag |= RC_WANTED; + (void) tsleep((caddr_t)rp, PZERO-1, "nfsrc", 0); + goto loop; + } + rp->rc_flag |= RC_LOCKED; + rp->rc_state = RC_DONE; + /* + * If we have a valid reply update status and save + * the reply for non-idempotent rpc's. + */ + if (repvalid && nonidempotent[nd->nd_procnum]) { + if ((nd->nd_flag & ND_NFSV3) == 0 && + nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) { + rp->rc_status = nd->nd_repstat; + rp->rc_flag |= RC_REPSTATUS; + } else { + rp->rc_reply = m_copym(repmbuf, + 0, M_COPYALL, M_WAIT); + rp->rc_flag |= RC_REPMBUF; + } + } + rp->rc_flag &= ~RC_LOCKED; + if (rp->rc_flag & RC_WANTED) { + rp->rc_flag &= ~RC_WANTED; + wakeup((caddr_t)rp); + } + return; + } + } + NFS_DPF(RC, ("L%03x", nd->nd_retxid & 0xfff)); +} + +/* + * Clean out the cache. Called when the last nfsd terminates. + */ +void +nfsrv_cleancache() +{ + register struct nfsrvcache *rp, *nextrp; + + for (rp = nfsrvlruhead.tqh_first; rp != 0; rp = nextrp) { + nextrp = rp->rc_lru.tqe_next; + LIST_REMOVE(rp, rc_hash); + TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru); + _FREE(rp, M_NFSD); + } + numnfsrvcache = 0; +} + +#endif /* NFS_NOSERVER */ diff --git a/bsd/nfs/nfs_subs.c b/bsd/nfs/nfs_subs.c new file mode 100644 index 000000000..9018b50a6 --- /dev/null +++ b/bsd/nfs/nfs_subs.c @@ -0,0 +1,2192 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95 + * FreeBSD-Id: nfs_subs.c,v 1.47 1997/11/07 08:53:24 phk Exp $ + */ + +/* + * These functions support the macros and help fiddle mbuf chains for + * the nfs op functions. They do things like create the rpc header and + * copy data between mbuf chains and uio lists. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#if ISO +#include +#endif + +/* + * Data items converted to xdr at startup, since they are constant + * This is kinda hokey, but may save a little time doing byte swaps + */ +u_long nfs_xdrneg1; +u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied, rpc_autherr, + rpc_mismatch, rpc_auth_unix, rpc_msgaccepted, + rpc_auth_kerb; +u_long nfs_prog, nqnfs_prog, nfs_true, nfs_false; + +/* And other global data */ +static u_long nfs_xid = 0; +static enum vtype nv2tov_type[8]= { + VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON +}; +enum vtype nv3tov_type[8]= { + VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO +}; + +int nfs_mount_type; +int nfs_ticks; + +struct nfs_reqq nfs_reqq; +struct nfssvc_sockhead nfssvc_sockhead; +int nfssvc_sockhead_flag; +struct nfsd_head nfsd_head; +int nfsd_head_flag; +struct nfs_bufq nfs_bufq; +struct nqtimerhead nqtimerhead; +struct nqfhhashhead *nqfhhashtbl; +u_long nqfhhash; + +#ifndef NFS_NOSERVER +/* + * Mapping of old NFS Version 2 RPC numbers to generic numbers. + */ +int nfsv3_procid[NFS_NPROCS] = { + NFSPROC_NULL, + NFSPROC_GETATTR, + NFSPROC_SETATTR, + NFSPROC_NOOP, + NFSPROC_LOOKUP, + NFSPROC_READLINK, + NFSPROC_READ, + NFSPROC_NOOP, + NFSPROC_WRITE, + NFSPROC_CREATE, + NFSPROC_REMOVE, + NFSPROC_RENAME, + NFSPROC_LINK, + NFSPROC_SYMLINK, + NFSPROC_MKDIR, + NFSPROC_RMDIR, + NFSPROC_READDIR, + NFSPROC_FSSTAT, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP, + NFSPROC_NOOP +}; + +#endif /* NFS_NOSERVER */ +/* + * and the reverse mapping from generic to Version 2 procedure numbers + */ +int nfsv2_procid[NFS_NPROCS] = { + NFSV2PROC_NULL, + NFSV2PROC_GETATTR, + NFSV2PROC_SETATTR, + NFSV2PROC_LOOKUP, + NFSV2PROC_NOOP, + NFSV2PROC_READLINK, + NFSV2PROC_READ, + NFSV2PROC_WRITE, + NFSV2PROC_CREATE, + NFSV2PROC_MKDIR, + NFSV2PROC_SYMLINK, + NFSV2PROC_CREATE, + NFSV2PROC_REMOVE, + NFSV2PROC_RMDIR, + NFSV2PROC_RENAME, + NFSV2PROC_LINK, + NFSV2PROC_READDIR, + NFSV2PROC_NOOP, + NFSV2PROC_STATFS, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, + NFSV2PROC_NOOP, +}; + +#ifndef NFS_NOSERVER +/* + * Maps errno values to nfs error numbers. + * Use NFSERR_IO as the catch all for ones not specifically defined in + * RFC 1094. + */ +static u_char nfsrv_v2errmap[ELAST] = { + NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, + NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, + NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, +}; + +/* + * Maps errno values to nfs error numbers. + * Although it is not obvious whether or not NFS clients really care if + * a returned error value is in the specified list for the procedure, the + * safest thing to do is filter them appropriately. For Version 2, the + * X/Open XNFS document is the only specification that defines error values + * for each RPC (The RFC simply lists all possible error values for all RPCs), + * so I have decided to not do this for Version 2. + * The first entry is the default error return and the rest are the valid + * errors for that RPC in increasing numeric order. + */ +static short nfsv3err_null[] = { + 0, + 0, +}; + +static short nfsv3err_getattr[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_setattr[] = { + NFSERR_IO, + NFSERR_PERM, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_INVAL, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOT_SYNC, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_lookup[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_NOTDIR, + NFSERR_NAMETOL, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_access[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_readlink[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_INVAL, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_read[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_NXIO, + NFSERR_ACCES, + NFSERR_INVAL, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_write[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_INVAL, + NFSERR_FBIG, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_create[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NOTDIR, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_NAMETOL, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_mkdir[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NOTDIR, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_NAMETOL, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_symlink[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NOTDIR, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_NAMETOL, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_mknod[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NOTDIR, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_NAMETOL, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + NFSERR_BADTYPE, + 0, +}; + +static short nfsv3err_remove[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_NOTDIR, + NFSERR_ROFS, + NFSERR_NAMETOL, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_rmdir[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_NOTDIR, + NFSERR_INVAL, + NFSERR_ROFS, + NFSERR_NAMETOL, + NFSERR_NOTEMPTY, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_rename[] = { + NFSERR_IO, + NFSERR_NOENT, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_XDEV, + NFSERR_NOTDIR, + NFSERR_ISDIR, + NFSERR_INVAL, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_MLINK, + NFSERR_NAMETOL, + NFSERR_NOTEMPTY, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_link[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_EXIST, + NFSERR_XDEV, + NFSERR_NOTDIR, + NFSERR_INVAL, + NFSERR_NOSPC, + NFSERR_ROFS, + NFSERR_MLINK, + NFSERR_NAMETOL, + NFSERR_DQUOT, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_NOTSUPP, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_readdir[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_NOTDIR, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_BAD_COOKIE, + NFSERR_TOOSMALL, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_readdirplus[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_ACCES, + NFSERR_NOTDIR, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_BAD_COOKIE, + NFSERR_NOTSUPP, + NFSERR_TOOSMALL, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_fsstat[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_fsinfo[] = { + NFSERR_STALE, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_pathconf[] = { + NFSERR_STALE, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short nfsv3err_commit[] = { + NFSERR_IO, + NFSERR_IO, + NFSERR_STALE, + NFSERR_BADHANDLE, + NFSERR_SERVERFAULT, + 0, +}; + +static short *nfsrv_v3errmap[] = { + nfsv3err_null, + nfsv3err_getattr, + nfsv3err_setattr, + nfsv3err_lookup, + nfsv3err_access, + nfsv3err_readlink, + nfsv3err_read, + nfsv3err_write, + nfsv3err_create, + nfsv3err_mkdir, + nfsv3err_symlink, + nfsv3err_mknod, + nfsv3err_remove, + nfsv3err_rmdir, + nfsv3err_rename, + nfsv3err_link, + nfsv3err_readdir, + nfsv3err_readdirplus, + nfsv3err_fsstat, + nfsv3err_fsinfo, + nfsv3err_pathconf, + nfsv3err_commit, +}; + +#endif /* NFS_NOSERVER */ + +extern struct nfsrtt nfsrtt; +extern time_t nqnfsstarttime; +extern int nqsrv_clockskew; +extern int nqsrv_writeslack; +extern int nqsrv_maxlease; +extern struct nfsstats nfsstats; +extern int nqnfs_piggy[NFS_NPROCS]; +extern nfstype nfsv2_type[9]; +extern nfstype nfsv3_type[9]; +extern struct nfsnodehashhead *nfsnodehashtbl; +extern u_long nfsnodehash; + +struct getfh_args; +extern int getfh(struct proc *, struct getfh_args *, int *); +struct nfssvc_args; +extern int nfssvc(struct proc *, struct nfssvc_args *, int *); + +LIST_HEAD(nfsnodehashhead, nfsnode); + +int nfs_webnamei __P((struct nameidata *, struct vnode *, struct proc *)); + +/* + * Create the header for an rpc request packet + * The hsiz is the size of the rest of the nfs request header. + * (just used to decide if a cluster is a good idea) + */ +struct mbuf * +nfsm_reqh(vp, procid, hsiz, bposp) + struct vnode *vp; + u_long procid; + int hsiz; + caddr_t *bposp; +{ + register struct mbuf *mb; + register u_long *tl; + register caddr_t bpos; + struct mbuf *mb2; + struct nfsmount *nmp; + int nqflag; + + MGET(mb, M_WAIT, MT_DATA); + if (hsiz >= MINCLSIZE) + MCLGET(mb, M_WAIT); + mb->m_len = 0; + bpos = mtod(mb, caddr_t); + + /* + * For NQNFS, add lease request. + */ + if (vp) { + nmp = VFSTONFS(vp->v_mount); + if (nmp->nm_flag & NFSMNT_NQNFS) { + nqflag = NQNFS_NEEDLEASE(vp, procid); + if (nqflag) { + nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); + *tl++ = txdr_unsigned(nqflag); + *tl = txdr_unsigned(nmp->nm_leaseterm); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = 0; + } + } + } + /* Finally, return values */ + *bposp = bpos; + return (mb); +} + +/* + * Build the RPC header and fill in the authorization info. + * The authorization string argument is only used when the credentials + * come from outside of the kernel. + * Returns the head of the mbuf list. + */ +struct mbuf * +nfsm_rpchead(cr, nmflag, procid, auth_type, auth_len, auth_str, verf_len, + verf_str, mrest, mrest_len, mbp, xidp) + register struct ucred *cr; + int nmflag; + int procid; + int auth_type; + int auth_len; + char *auth_str; + int verf_len; + char *verf_str; + struct mbuf *mrest; + int mrest_len; + struct mbuf **mbp; + u_long *xidp; +{ + register struct mbuf *mb; + register u_long *tl; + register caddr_t bpos; + register int i; + struct mbuf *mreq, *mb2; + int siz, grpsiz, authsiz; + struct timeval tv; + static u_long base; + + authsiz = nfsm_rndup(auth_len); + MGETHDR(mb, M_WAIT, MT_DATA); + if ((authsiz + 10 * NFSX_UNSIGNED) >= MINCLSIZE) { + MCLGET(mb, M_WAIT); + } else if ((authsiz + 10 * NFSX_UNSIGNED) < MHLEN) { + MH_ALIGN(mb, authsiz + 10 * NFSX_UNSIGNED); + } else { + MH_ALIGN(mb, 8 * NFSX_UNSIGNED); + } + mb->m_len = 0; + mreq = mb; + bpos = mtod(mb, caddr_t); + + /* + * First the RPC header. + */ + nfsm_build(tl, u_long *, 8 * NFSX_UNSIGNED); + + /* + * derive initial xid from system time + * XXX time is invalid if root not yet mounted + */ + if (!base && (rootvp)) { + microtime(&tv); + base = tv.tv_sec << 12; + nfs_xid = base; + } + /* + * Skip zero xid if it should ever happen. + */ + if (++nfs_xid == 0) + nfs_xid++; + + *tl++ = *xidp = txdr_unsigned(nfs_xid); + *tl++ = rpc_call; + *tl++ = rpc_vers; + if (nmflag & NFSMNT_NQNFS) { + *tl++ = txdr_unsigned(NQNFS_PROG); + *tl++ = txdr_unsigned(NQNFS_VER3); + } else { + *tl++ = txdr_unsigned(NFS_PROG); + if (nmflag & NFSMNT_NFSV3) + *tl++ = txdr_unsigned(NFS_VER3); + else + *tl++ = txdr_unsigned(NFS_VER2); + } + if (nmflag & NFSMNT_NFSV3) + *tl++ = txdr_unsigned(procid); + else + *tl++ = txdr_unsigned(nfsv2_procid[procid]); + + /* + * And then the authorization cred. + */ + *tl++ = txdr_unsigned(auth_type); + *tl = txdr_unsigned(authsiz); + switch (auth_type) { + case RPCAUTH_UNIX: + nfsm_build(tl, u_long *, auth_len); + *tl++ = 0; /* stamp ?? */ + *tl++ = 0; /* NULL hostname */ + *tl++ = txdr_unsigned(cr->cr_uid); + *tl++ = txdr_unsigned(cr->cr_groups[0]); + grpsiz = (auth_len >> 2) - 5; + *tl++ = txdr_unsigned(grpsiz); + for (i = 1; i <= grpsiz; i++) + *tl++ = txdr_unsigned(cr->cr_groups[i]); + break; + case RPCAUTH_KERB4: + siz = auth_len; + while (siz > 0) { + if (M_TRAILINGSPACE(mb) == 0) { + MGET(mb2, M_WAIT, MT_DATA); + if (siz >= MINCLSIZE) + MCLGET(mb2, M_WAIT); + mb->m_next = mb2; + mb = mb2; + mb->m_len = 0; + bpos = mtod(mb, caddr_t); + } + i = min(siz, M_TRAILINGSPACE(mb)); + bcopy(auth_str, bpos, i); + mb->m_len += i; + auth_str += i; + bpos += i; + siz -= i; + } + if ((siz = (nfsm_rndup(auth_len) - auth_len)) > 0) { + for (i = 0; i < siz; i++) + *bpos++ = '\0'; + mb->m_len += siz; + } + break; + }; + + /* + * And the verifier... + */ + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + if (verf_str) { + *tl++ = txdr_unsigned(RPCAUTH_KERB4); + *tl = txdr_unsigned(verf_len); + siz = verf_len; + while (siz > 0) { + if (M_TRAILINGSPACE(mb) == 0) { + MGET(mb2, M_WAIT, MT_DATA); + if (siz >= MINCLSIZE) + MCLGET(mb2, M_WAIT); + mb->m_next = mb2; + mb = mb2; + mb->m_len = 0; + bpos = mtod(mb, caddr_t); + } + i = min(siz, M_TRAILINGSPACE(mb)); + bcopy(verf_str, bpos, i); + mb->m_len += i; + verf_str += i; + bpos += i; + siz -= i; + } + if ((siz = (nfsm_rndup(verf_len) - verf_len)) > 0) { + for (i = 0; i < siz; i++) + *bpos++ = '\0'; + mb->m_len += siz; + } + } else { + *tl++ = txdr_unsigned(RPCAUTH_NULL); + *tl = 0; + } + mb->m_next = mrest; + mreq->m_pkthdr.len = authsiz + 10 * NFSX_UNSIGNED + mrest_len; + mreq->m_pkthdr.rcvif = (struct ifnet *)0; + *mbp = mb; + return (mreq); +} + +/* + * copies mbuf chain to the uio scatter/gather list + */ +int +nfsm_mbuftouio(mrep, uiop, siz, dpos) + struct mbuf **mrep; + register struct uio *uiop; + int siz; + caddr_t *dpos; +{ + register char *mbufcp, *uiocp; + register int xfer, left, len; + register struct mbuf *mp; + long uiosiz, rem; + int error = 0; + + mp = *mrep; + mbufcp = *dpos; + len = mtod(mp, caddr_t)+mp->m_len-mbufcp; + rem = nfsm_rndup(siz)-siz; + while (siz > 0) { + if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL) + return (EFBIG); + left = uiop->uio_iov->iov_len; + uiocp = uiop->uio_iov->iov_base; + if (left > siz) + left = siz; + uiosiz = left; + while (left > 0) { + while (len == 0) { + mp = mp->m_next; + if (mp == NULL) + return (EBADRPC); + mbufcp = mtod(mp, caddr_t); + len = mp->m_len; + } + xfer = (left > len) ? len : left; +#ifdef notdef + /* Not Yet.. */ + if (uiop->uio_iov->iov_op != NULL) + (*(uiop->uio_iov->iov_op)) + (mbufcp, uiocp, xfer); + else +#endif + if (uiop->uio_segflg == UIO_SYSSPACE) + bcopy(mbufcp, uiocp, xfer); + else + copyout(mbufcp, uiocp, xfer); + left -= xfer; + len -= xfer; + mbufcp += xfer; + uiocp += xfer; + uiop->uio_offset += xfer; + uiop->uio_resid -= xfer; + } + if (uiop->uio_iov->iov_len <= siz) { + uiop->uio_iovcnt--; + uiop->uio_iov++; + } else { + uiop->uio_iov->iov_base += uiosiz; + uiop->uio_iov->iov_len -= uiosiz; + } + siz -= uiosiz; + } + *dpos = mbufcp; + *mrep = mp; + if (rem > 0) { + if (len < rem) + error = nfs_adv(mrep, dpos, rem, len); + else + *dpos += rem; + } + return (error); +} + +/* + * copies a uio scatter/gather list to an mbuf chain. + * NOTE: can ony handle iovcnt == 1 + */ +int +nfsm_uiotombuf(uiop, mq, siz, bpos) + register struct uio *uiop; + struct mbuf **mq; + int siz; + caddr_t *bpos; +{ + register char *uiocp; + register struct mbuf *mp, *mp2; + register int xfer, left, mlen; + int uiosiz, clflg, rem; + char *cp; + + if (uiop->uio_iovcnt != 1) + panic("nfsm_uiotombuf: iovcnt != 1"); + + if (siz > MLEN) /* or should it >= MCLBYTES ?? */ + clflg = 1; + else + clflg = 0; + rem = nfsm_rndup(siz)-siz; + mp = mp2 = *mq; + while (siz > 0) { + left = uiop->uio_iov->iov_len; + uiocp = uiop->uio_iov->iov_base; + if (left > siz) + left = siz; + uiosiz = left; + while (left > 0) { + mlen = M_TRAILINGSPACE(mp); + if (mlen == 0) { + MGET(mp, M_WAIT, MT_DATA); + if (clflg) + MCLGET(mp, M_WAIT); + mp->m_len = 0; + mp2->m_next = mp; + mp2 = mp; + mlen = M_TRAILINGSPACE(mp); + } + xfer = (left > mlen) ? mlen : left; +#ifdef notdef + /* Not Yet.. */ + if (uiop->uio_iov->iov_op != NULL) + (*(uiop->uio_iov->iov_op)) + (uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); + else +#endif + if (uiop->uio_segflg == UIO_SYSSPACE) + bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); + else + copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer); + mp->m_len += xfer; + left -= xfer; + uiocp += xfer; + uiop->uio_offset += xfer; + uiop->uio_resid -= xfer; + } + uiop->uio_iov->iov_base += uiosiz; + uiop->uio_iov->iov_len -= uiosiz; + siz -= uiosiz; + } + if (rem > 0) { + if (rem > M_TRAILINGSPACE(mp)) { + MGET(mp, M_WAIT, MT_DATA); + mp->m_len = 0; + mp2->m_next = mp; + } + cp = mtod(mp, caddr_t)+mp->m_len; + for (left = 0; left < rem; left++) + *cp++ = '\0'; + mp->m_len += rem; + *bpos = cp; + } else + *bpos = mtod(mp, caddr_t)+mp->m_len; + *mq = mp; + return (0); +} + +/* + * Help break down an mbuf chain by setting the first siz bytes contiguous + * pointed to by returned val. + * This is used by the macros nfsm_dissect and nfsm_dissecton for tough + * cases. (The macros use the vars. dpos and dpos2) + */ +int +nfsm_disct(mdp, dposp, siz, left, cp2) + struct mbuf **mdp; + caddr_t *dposp; + int siz; + int left; + caddr_t *cp2; +{ + register struct mbuf *mp, *mp2; + register int siz2, xfer; + register caddr_t p; + + mp = *mdp; + while (left == 0) { + *mdp = mp = mp->m_next; + if (mp == NULL) + return (EBADRPC); + left = mp->m_len; + *dposp = mtod(mp, caddr_t); + } + if (left >= siz) { + *cp2 = *dposp; + *dposp += siz; + } else if (mp->m_next == NULL) { + return (EBADRPC); + } else if (siz > MHLEN) { + panic("nfs S too big"); + } else { + MGET(mp2, M_WAIT, MT_DATA); + mp2->m_next = mp->m_next; + mp->m_next = mp2; + mp->m_len -= left; + mp = mp2; + *cp2 = p = mtod(mp, caddr_t); + bcopy(*dposp, p, left); /* Copy what was left */ + siz2 = siz-left; + p += left; + mp2 = mp->m_next; + /* Loop around copying up the siz2 bytes */ + while (siz2 > 0) { + if (mp2 == NULL) + return (EBADRPC); + xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2; + if (xfer > 0) { + bcopy(mtod(mp2, caddr_t), p, xfer); + NFSMADV(mp2, xfer); + mp2->m_len -= xfer; + p += xfer; + siz2 -= xfer; + } + if (siz2 > 0) + mp2 = mp2->m_next; + } + mp->m_len = siz; + *mdp = mp2; + *dposp = mtod(mp2, caddr_t); + } + return (0); +} + +/* + * Advance the position in the mbuf chain. + */ +int +nfs_adv(mdp, dposp, offs, left) + struct mbuf **mdp; + caddr_t *dposp; + int offs; + int left; +{ + register struct mbuf *m; + register int s; + + m = *mdp; + s = left; + while (s < offs) { + offs -= s; + m = m->m_next; + if (m == NULL) + return (EBADRPC); + s = m->m_len; + } + *mdp = m; + *dposp = mtod(m, caddr_t)+offs; + return (0); +} + +/* + * Copy a string into mbufs for the hard cases... + */ +int +nfsm_strtmbuf(mb, bpos, cp, siz) + struct mbuf **mb; + char **bpos; + char *cp; + long siz; +{ + register struct mbuf *m1 = 0, *m2; + long left, xfer, len, tlen; + u_long *tl; + int putsize; + + putsize = 1; + m2 = *mb; + left = M_TRAILINGSPACE(m2); + if (left > 0) { + tl = ((u_long *)(*bpos)); + *tl++ = txdr_unsigned(siz); + putsize = 0; + left -= NFSX_UNSIGNED; + m2->m_len += NFSX_UNSIGNED; + if (left > 0) { + bcopy(cp, (caddr_t) tl, left); + siz -= left; + cp += left; + m2->m_len += left; + left = 0; + } + } + /* Loop around adding mbufs */ + while (siz > 0) { + MGET(m1, M_WAIT, MT_DATA); + if (siz > MLEN) + MCLGET(m1, M_WAIT); + m1->m_len = NFSMSIZ(m1); + m2->m_next = m1; + m2 = m1; + tl = mtod(m1, u_long *); + tlen = 0; + if (putsize) { + *tl++ = txdr_unsigned(siz); + m1->m_len -= NFSX_UNSIGNED; + tlen = NFSX_UNSIGNED; + putsize = 0; + } + if (siz < m1->m_len) { + len = nfsm_rndup(siz); + xfer = siz; + if (xfer < len) + *(tl+(xfer>>2)) = 0; + } else { + xfer = len = m1->m_len; + } + bcopy(cp, (caddr_t) tl, xfer); + m1->m_len = len+tlen; + siz -= xfer; + cp += xfer; + } + *mb = m1; + *bpos = mtod(m1, caddr_t)+m1->m_len; + return (0); +} + +/* + * Called once to initialize data structures... + */ +int +nfs_init(vfsp) + struct vfsconf *vfsp; +{ + register int i; + + /* + * Check to see if major data structures haven't bloated. + */ + if (sizeof (struct nfsnode) > NFS_NODEALLOC) { + printf("struct nfsnode bloated (> %dbytes)\n", NFS_NODEALLOC); + printf("Try reducing NFS_SMALLFH\n"); + } + if (sizeof (struct nfsmount) > NFS_MNTALLOC) { + printf("struct nfsmount bloated (> %dbytes)\n", NFS_MNTALLOC); + printf("Try reducing NFS_MUIDHASHSIZ\n"); + } + if (sizeof (struct nfssvc_sock) > NFS_SVCALLOC) { + printf("struct nfssvc_sock bloated (> %dbytes)\n",NFS_SVCALLOC); + printf("Try reducing NFS_UIDHASHSIZ\n"); + } + if (sizeof (struct nfsuid) > NFS_UIDALLOC) { + printf("struct nfsuid bloated (> %dbytes)\n",NFS_UIDALLOC); + printf("Try unionizing the nu_nickname and nu_flag fields\n"); + } + nfs_mount_type = vfsp->vfc_typenum; + nfsrtt.pos = 0; + rpc_vers = txdr_unsigned(RPC_VER2); + rpc_call = txdr_unsigned(RPC_CALL); + rpc_reply = txdr_unsigned(RPC_REPLY); + rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED); + rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED); + rpc_mismatch = txdr_unsigned(RPC_MISMATCH); + rpc_autherr = txdr_unsigned(RPC_AUTHERR); + rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX); + rpc_auth_kerb = txdr_unsigned(RPCAUTH_KERB4); + nfs_prog = txdr_unsigned(NFS_PROG); + nqnfs_prog = txdr_unsigned(NQNFS_PROG); + nfs_true = txdr_unsigned(TRUE); + nfs_false = txdr_unsigned(FALSE); + nfs_xdrneg1 = txdr_unsigned(-1); + nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000; + if (nfs_ticks < 1) + nfs_ticks = 1; + /* Ensure async daemons disabled */ + for (i = 0; i < NFS_MAXASYNCDAEMON; i++) { + nfs_iodwant[i] = (struct proc *)0; + nfs_iodmount[i] = (struct nfsmount *)0; + } + nfs_nhinit(); /* Init the nfsnode table */ +#ifndef NFS_NOSERVER + nfsrv_init(0); /* Init server data structures */ + nfsrv_initcache(); /* Init the server request cache */ +#endif + + /* + * Initialize the nqnfs server stuff. + */ + if (nqnfsstarttime == 0) { + nqnfsstarttime = boottime.tv_sec + nqsrv_maxlease + + nqsrv_clockskew + nqsrv_writeslack; + NQLOADNOVRAM(nqnfsstarttime); + CIRCLEQ_INIT(&nqtimerhead); + nqfhhashtbl = hashinit(NQLCHSZ, M_NQLEASE, &nqfhhash); + } + + /* + * Initialize reply list and start timer + */ + TAILQ_INIT(&nfs_reqq); + + nfs_timer(0); + + +/* XXX CSM 12/4/97 Where are these declared in FreeBSD? */ +#ifdef notyet + /* + * Set up lease_check and lease_updatetime so that other parts + * of the system can call us, if we are loadable. + */ +#ifndef NFS_NOSERVER + default_vnodeop_p[VOFFSET(vop_lease)] = (vop_t *)nqnfs_vop_lease_check; +#endif + lease_updatetime = nfs_lease_updatetime; +#endif + vfsp->vfc_refcount++; /* make us non-unloadable */ + sysent[SYS_nfssvc].sy_narg = 2; + sysent[SYS_nfssvc].sy_call = nfssvc; +#ifndef NFS_NOSERVER + sysent[SYS_getfh].sy_narg = 2; + sysent[SYS_getfh].sy_call = getfh; +#endif + + return (0); +} + +/* + * Attribute cache routines. + * nfs_loadattrcache() - loads or updates the cache contents from attributes + * that are on the mbuf list + * nfs_getattrcache() - returns valid attributes if found in cache, returns + * error otherwise + */ + +/* + * Load the attribute cache (that lives in the nfsnode entry) with + * the values on the mbuf list and + * Iff vap not NULL + * copy the attributes to *vaper + */ +int +nfs_loadattrcache(vpp, mdp, dposp, vaper) + struct vnode **vpp; + struct mbuf **mdp; + caddr_t *dposp; + struct vattr *vaper; +{ + register struct vnode *vp = *vpp; + register struct vattr *vap; + register struct nfs_fattr *fp; + register struct nfsnode *np; + register long t1; + caddr_t cp2; + int error = 0, rdev; + struct mbuf *md; + enum vtype vtyp; + u_short vmode; + struct timespec mtime; + struct vnode *nvp; + int v3; + + /* this routine is a good place to check for VBAD again. We caught most of them + * in nfsm_request, but postprocessing may indirectly get here, so check again. + */ + if (vp->v_type==VBAD) + return (EINVAL); + + v3 = NFS_ISV3(vp); + NFSTRACE(NFSTRC_LAC, vp); + md = *mdp; + t1 = (mtod(md, caddr_t) + md->m_len) - *dposp; + if ((error = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, &cp2))) + return (error); + fp = (struct nfs_fattr *)cp2; + if (v3) { + vtyp = nfsv3tov_type(fp->fa_type); + vmode = fxdr_unsigned(u_short, fp->fa_mode); + rdev = makedev(fxdr_unsigned(int, fp->fa3_rdev.specdata1), + fxdr_unsigned(int, fp->fa3_rdev.specdata2)); + fxdr_nfsv3time(&fp->fa3_mtime, &mtime); + } else { + vtyp = nfsv2tov_type(fp->fa_type); + vmode = fxdr_unsigned(u_short, fp->fa_mode); + /* + * XXX + * + * The duplicate information returned in fa_type and fa_mode + * is an ambiguity in the NFS version 2 protocol. + * + * VREG should be taken literally as a regular file. If a + * server intents to return some type information differently + * in the upper bits of the mode field (e.g. for sockets, or + * FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we + * leave the examination of the mode bits even in the VREG + * case to avoid breakage for bogus servers, but we make sure + * that there are actually type bits set in the upper part of + * fa_mode (and failing that, trust the va_type field). + * + * NFSv3 cleared the issue, and requires fa_mode to not + * contain any type information (while also introduing sockets + * and FIFOs for fa_type). + */ + if (vtyp == VNON || (vtyp == VREG && (vmode & S_IFMT) != 0)) + vtyp = IFTOVT(vmode); + rdev = fxdr_unsigned(long, fp->fa2_rdev); + fxdr_nfsv2time(&fp->fa2_mtime, &mtime); + + /* + * Really ugly NFSv2 kludge. + */ + if (vtyp == VCHR && rdev == 0xffffffff) + vtyp = VFIFO; + } + + /* + * If v_type == VNON it is a new node, so fill in the v_type, + * n_mtime fields. Check to see if it represents a special + * device, and if so, check for a possible alias. Once the + * correct vnode has been obtained, fill in the rest of the + * information. + */ + np = VTONFS(vp); + if (vp->v_type != vtyp) { + vp->v_type = vtyp; + + if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp)) + if (error = ubc_info_init(vp)) /* VREG */ + return(error); + + if (vp->v_type == VFIFO) { + vp->v_op = fifo_nfsv2nodeop_p; + } + if (vp->v_type == VCHR || vp->v_type == VBLK) { + vp->v_op = spec_nfsv2nodeop_p; + nvp = checkalias(vp, (dev_t)rdev, vp->v_mount); + if (nvp) { + /* + * Discard unneeded vnode, but save its nfsnode. + * Since the nfsnode does not have a lock, its + * vnode lock has to be carried over. + */ + nvp->v_vnlock = vp->v_vnlock; + vp->v_vnlock = NULL; + nvp->v_data = vp->v_data; + vp->v_data = NULL; + vp->v_op = spec_vnodeop_p; + vrele(vp); + vgone(vp); + /* + * Reinitialize aliased node. + */ + np->n_vnode = nvp; + *vpp = vp = nvp; + } + } + np->n_mtime = mtime.tv_sec; + NFSTRACE(NFSTRC_LAC_INIT, vp); + } + vap = &np->n_vattr; + vap->va_type = vtyp; + vap->va_mode = (vmode & 07777); + vap->va_rdev = (dev_t)rdev; + vap->va_mtime = mtime; + vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; + if (v3) { + vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); + vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); + vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); + fxdr_hyper(&fp->fa3_size, &vap->va_size); + vap->va_blocksize = NFS_FABLKSIZE; + fxdr_hyper(&fp->fa3_used, &vap->va_bytes); + vap->va_fileid = fxdr_unsigned(int, fp->fa3_fileid.nfsuquad[1]); + fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime); + fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime); + vap->va_flags = 0; + vap->va_filerev = 0; + } else { + vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink); + vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid); + vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid); + vap->va_size = fxdr_unsigned(u_long, fp->fa2_size); + vap->va_blocksize = fxdr_unsigned(long, fp->fa2_blocksize); + vap->va_bytes = fxdr_unsigned(long, fp->fa2_blocks) * NFS_FABLKSIZE; + vap->va_fileid = fxdr_unsigned(long, fp->fa2_fileid); + fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime); + vap->va_flags = 0; + vap->va_ctime.tv_sec = fxdr_unsigned(long, fp->fa2_ctime.nfsv2_sec); + vap->va_ctime.tv_nsec = 0; + vap->va_gen = fxdr_unsigned(u_long, fp->fa2_ctime.nfsv2_usec); + vap->va_filerev = 0; + } + + if (vap->va_size != np->n_size) { + NFSTRACE4(NFSTRC_LAC_NP, vp, vap->va_size, np->n_size, + (vap->va_type == VREG) | + (np->n_flag & NMODIFIED ? 2 : 0)); + if (vap->va_type == VREG) { + int orig_size; + + orig_size = np->n_size; + + if (np->n_flag & NMODIFIED) { + if (vap->va_size < np->n_size) + vap->va_size = np->n_size; + else + np->n_size = vap->va_size; + } else + np->n_size = vap->va_size; + if (UBCISVALID(vp) && np->n_size > orig_size) + ubc_setsize(vp, (off_t)np->n_size); /* XXX check error */ + } else + np->n_size = vap->va_size; + } + + np->n_attrstamp = time.tv_sec; + if (vaper != NULL) { + bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap)); + if (np->n_flag & NCHG) { + if (np->n_flag & NACC) + vaper->va_atime = np->n_atim; + if (np->n_flag & NUPD) + vaper->va_mtime = np->n_mtim; + } + } + return (0); +} + +/* + * Check the time stamp + * If the cache is valid, copy contents to *vap and return 0 + * otherwise return an error + */ +int +nfs_getattrcache(vp, vaper) + register struct vnode *vp; + struct vattr *vaper; +{ + register struct nfsnode *np = VTONFS(vp); + register struct vattr *vap; + + if ((time.tv_sec - np->n_attrstamp) >= NFS_ATTRTIMEO(np)) { + NFSTRACE(NFSTRC_GAC_MISS, vp); + nfsstats.attrcache_misses++; + return (ENOENT); + } + NFSTRACE(NFSTRC_GAC_HIT, vp); + nfsstats.attrcache_hits++; + vap = &np->n_vattr; + + if (vap->va_size != np->n_size) { + NFSTRACE4(NFSTRC_GAC_NP, vp, vap->va_size, np->n_size, + (vap->va_type == VREG) | + (np->n_flag & NMODIFIED ? 2 : 0)); + if (vap->va_type == VREG) { + int orig_size; + + orig_size = np->n_size; + + if (np->n_flag & NMODIFIED) { + if (vap->va_size < np->n_size) + vap->va_size = np->n_size; + else + np->n_size = vap->va_size; + } else + np->n_size = vap->va_size; + if (UBCISVALID(vp) && np->n_size > orig_size) + ubc_setsize(vp, (off_t)np->n_size); /* XXX check error */ + } else + np->n_size = vap->va_size; + } + + bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr)); + if (np->n_flag & NCHG) { + if (np->n_flag & NACC) + vaper->va_atime = np->n_atim; + if (np->n_flag & NUPD) + vaper->va_mtime = np->n_mtim; + } + return (0); +} + +#ifndef NFS_NOSERVER +/* + * Set up nameidata for a lookup() call and do it. + * + * If pubflag is set, this call is done for a lookup operation on the + * public filehandle. In that case we allow crossing mountpoints and + * absolute pathnames. However, the caller is expected to check that + * the lookup result is within the public fs, and deny access if + * it is not. + */ +int +nfs_namei(ndp, fhp, len, slp, nam, mdp, dposp, retdirp, p, kerbflag, pubflag) + register struct nameidata *ndp; + fhandle_t *fhp; + int len; + struct nfssvc_sock *slp; + struct mbuf *nam; + struct mbuf **mdp; + caddr_t *dposp; + struct vnode **retdirp; + struct proc *p; + int kerbflag, pubflag; +{ + register int i, rem; + register struct mbuf *md; + register char *fromcp, *tocp, *cp; + struct iovec aiov; + struct uio auio; + struct vnode *dp; + int error, rdonly, linklen; + struct componentname *cnp = &ndp->ni_cnd; + int olen = len; + + *retdirp = (struct vnode *)0; + MALLOC_ZONE(cnp->cn_pnbuf, char *, len + 1, M_NAMEI, M_WAITOK); + cnp->cn_pnlen = len + 1; + + /* + * Copy the name from the mbuf list to ndp->ni_pnbuf + * and set the various ndp fields appropriately. + */ + fromcp = *dposp; + tocp = cnp->cn_pnbuf; + md = *mdp; + rem = mtod(md, caddr_t) + md->m_len - fromcp; + cnp->cn_hash = 0; + for (i = 1; i <= len; i++) { + while (rem == 0) { + md = md->m_next; + if (md == NULL) { + error = EBADRPC; + goto out; + } + fromcp = mtod(md, caddr_t); + rem = md->m_len; + } +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +#ifdef notdef + if (*fromcp == '\0' || (!pubflag && *fromcp == '/')) { +#else + if (*fromcp == '\0' || *fromcp == '/') { +#endif + error = EACCES; + goto out; + } + cnp->cn_hash += (unsigned char)*fromcp * i; + *tocp++ = *fromcp++; + rem--; + } + *tocp = '\0'; + *mdp = md; + *dposp = fromcp; + len = nfsm_rndup(len)-len; + if (len > 0) { + if (rem >= len) + *dposp += len; + else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0) + goto out; + } + + /* + * Extract and set starting directory. + */ + error = nfsrv_fhtovp(fhp, FALSE, &dp, ndp->ni_cnd.cn_cred, slp, + nam, &rdonly, kerbflag, pubflag); + if (error) + goto out; + if (dp->v_type != VDIR) { + vrele(dp); + error = ENOTDIR; + goto out; + } + + if (rdonly) + cnp->cn_flags |= RDONLY; + + *retdirp = dp; + +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +/* XXX debo 12/15/97 Need to fix M_NAMEI allocations to use zone protocol */ +#ifdef notyet + if (pubflag) { + /* + * Oh joy. For WebNFS, handle those pesky '%' escapes, + * and the 'native path' indicator. + */ + MALLOC(cp, char *, olen + 1, M_NAMEI, M_WAITOK); + fromcp = cnp->cn_pnbuf; + tocp = cp; + if ((unsigned char)*fromcp >= WEBNFS_SPECCHAR_START) { + switch ((unsigned char)*fromcp) { + case WEBNFS_NATIVE_CHAR: + /* + * 'Native' path for us is the same + * as a path according to the NFS spec, + * just skip the escape char. + */ + fromcp++; + break; + /* + * More may be added in the future, range 0x80-0xff + */ + default: + error = EIO; + FREE(cp, M_NAMEI); + goto out; + } + } + /* + * Translate the '%' escapes, URL-style. + */ + while (*fromcp != '\0') { + if (*fromcp == WEBNFS_ESC_CHAR) { + if (fromcp[1] != '\0' && fromcp[2] != '\0') { + fromcp++; + *tocp++ = HEXSTRTOI(fromcp); + fromcp += 2; + continue; + } else { + error = ENOENT; + FREE(cp, M_NAMEI); + goto out; + } + } else + *tocp++ = *fromcp++; + } + *tocp = '\0'; + FREE(cnp->cn_pnbuf, M_NAMEI); + cnp->cn_pnbuf = cp; + } +#endif + + ndp->ni_pathlen = (tocp - cnp->cn_pnbuf) + 1; + ndp->ni_segflg = UIO_SYSSPACE; + +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +#ifdef notyet + if (pubflag) { + ndp->ni_rootdir = rootvnode; + ndp->ni_loopcnt = 0; + if (cnp->cn_pnbuf[0] == '/') + dp = rootvnode; + } else { + cnp->cn_flags |= NOCROSSMOUNT; + } +#else + cnp->cn_flags |= NOCROSSMOUNT; +#endif + + cnp->cn_proc = p; + VREF(dp); + + for (;;) { + cnp->cn_nameptr = cnp->cn_pnbuf; + ndp->ni_startdir = dp; + /* + * And call lookup() to do the real work + */ + error = lookup(ndp); + if (error) + break; + /* + * Check for encountering a symbolic link + */ + if ((cnp->cn_flags & ISSYMLINK) == 0) { + nfsrv_object_create(ndp->ni_vp); + if (cnp->cn_flags & (SAVENAME | SAVESTART)) { + cnp->cn_flags |= HASBUF; + return (0); + } + break; + } else { + if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) + VOP_UNLOCK(ndp->ni_dvp, 0, p); +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +#ifdef notyet + if (!pubflag) { +#endif + vrele(ndp->ni_dvp); + vput(ndp->ni_vp); + ndp->ni_vp = NULL; + error = EINVAL; + break; +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +/* XXX debo 12/15/97 Need to fix M_NAMEI allocations to use zone protocol */ +#ifdef notyet + } + + if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { + error = ELOOP; + break; + } + if (ndp->ni_pathlen > 1) + MALLOC(cp, char *, olen + 1, M_NAMEI, M_WAITOK); + else + cp = cnp->cn_pnbuf; + aiov.iov_base = cp; + aiov.iov_len = MAXPATHLEN; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_procp = (struct proc *)0; + auio.uio_resid = MAXPATHLEN; + error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred); + if (error) { + badlink: + if (ndp->ni_pathlen > 1) + FREE(cp, M_NAMEI); + break; + } + linklen = MAXPATHLEN - auio.uio_resid; + if (linklen == 0) { + error = ENOENT; + goto badlink; + } + if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { + error = ENAMETOOLONG; + goto badlink; + } + if (ndp->ni_pathlen > 1) { + bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); + FREE(cnp->cn_pnbuf, M_NAMEI); + cnp->cn_pnbuf = cp; + } else + cnp->cn_pnbuf[linklen] = '\0'; + ndp->ni_pathlen += linklen; + vput(ndp->ni_vp); + dp = ndp->ni_dvp; + /* + * Check if root directory should replace current directory. + */ + if (cnp->cn_pnbuf[0] == '/') { + vrele(dp); + dp = ndp->ni_rootdir; + VREF(dp); + } +#endif + } + } +out: + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + return (error); +} + +/* + * A fiddled version of m_adj() that ensures null fill to a long + * boundary and only trims off the back end + */ +void +nfsm_adj(mp, len, nul) + struct mbuf *mp; + register int len; + int nul; +{ + register struct mbuf *m; + register int count, i; + register char *cp; + + /* + * Trim from tail. Scan the mbuf chain, + * calculating its length and finding the last mbuf. + * If the adjustment only affects this mbuf, then just + * adjust and return. Otherwise, rescan and truncate + * after the remaining size. + */ + count = 0; + m = mp; + for (;;) { + count += m->m_len; + if (m->m_next == (struct mbuf *)0) + break; + m = m->m_next; + } + if (m->m_len > len) { + m->m_len -= len; + if (nul > 0) { + cp = mtod(m, caddr_t)+m->m_len-nul; + for (i = 0; i < nul; i++) + *cp++ = '\0'; + } + return; + } + count -= len; + if (count < 0) + count = 0; + /* + * Correct length for chain is "count". + * Find the mbuf with last data, adjust its length, + * and toss data from remaining mbufs on chain. + */ + for (m = mp; m; m = m->m_next) { + if (m->m_len >= count) { + m->m_len = count; + if (nul > 0) { + cp = mtod(m, caddr_t)+m->m_len-nul; + for (i = 0; i < nul; i++) + *cp++ = '\0'; + } + break; + } + count -= m->m_len; + } + for (m = m->m_next;m;m = m->m_next) + m->m_len = 0; +} + +/* + * Make these functions instead of macros, so that the kernel text size + * doesn't get too big... + */ +void +nfsm_srvwcc(nfsd, before_ret, before_vap, after_ret, after_vap, mbp, bposp) + struct nfsrv_descript *nfsd; + int before_ret; + register struct vattr *before_vap; + int after_ret; + struct vattr *after_vap; + struct mbuf **mbp; + char **bposp; +{ + register struct mbuf *mb = *mbp, *mb2; + register char *bpos = *bposp; + register u_long *tl; + + if (before_ret) { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = nfs_false; + } else { + nfsm_build(tl, u_long *, 7 * NFSX_UNSIGNED); + *tl++ = nfs_true; + txdr_hyper(&(before_vap->va_size), tl); + tl += 2; + txdr_nfsv3time(&(before_vap->va_mtime), tl); + tl += 2; + txdr_nfsv3time(&(before_vap->va_ctime), tl); + } + *bposp = bpos; + *mbp = mb; + nfsm_srvpostopattr(nfsd, after_ret, after_vap, mbp, bposp); +} + +void +nfsm_srvpostopattr(nfsd, after_ret, after_vap, mbp, bposp) + struct nfsrv_descript *nfsd; + int after_ret; + struct vattr *after_vap; + struct mbuf **mbp; + char **bposp; +{ + register struct mbuf *mb = *mbp, *mb2; + register char *bpos = *bposp; + register u_long *tl; + register struct nfs_fattr *fp; + + if (after_ret) { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = nfs_false; + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED + NFSX_V3FATTR); + *tl++ = nfs_true; + fp = (struct nfs_fattr *)tl; + nfsm_srvfattr(nfsd, after_vap, fp); + } + *mbp = mb; + *bposp = bpos; +} + +void +nfsm_srvfattr(nfsd, vap, fp) + register struct nfsrv_descript *nfsd; + register struct vattr *vap; + register struct nfs_fattr *fp; +{ + + fp->fa_nlink = txdr_unsigned(vap->va_nlink); + fp->fa_uid = txdr_unsigned(vap->va_uid); + fp->fa_gid = txdr_unsigned(vap->va_gid); + if (nfsd->nd_flag & ND_NFSV3) { + fp->fa_type = vtonfsv3_type(vap->va_type); + fp->fa_mode = vtonfsv3_mode(vap->va_mode); + txdr_hyper(&vap->va_size, &fp->fa3_size); + txdr_hyper(&vap->va_bytes, &fp->fa3_used); + fp->fa3_rdev.specdata1 = txdr_unsigned(major(vap->va_rdev)); + fp->fa3_rdev.specdata2 = txdr_unsigned(minor(vap->va_rdev)); + fp->fa3_fsid.nfsuquad[0] = 0; + fp->fa3_fsid.nfsuquad[1] = txdr_unsigned(vap->va_fsid); + fp->fa3_fileid.nfsuquad[0] = 0; + fp->fa3_fileid.nfsuquad[1] = txdr_unsigned(vap->va_fileid); + txdr_nfsv3time(&vap->va_atime, &fp->fa3_atime); + txdr_nfsv3time(&vap->va_mtime, &fp->fa3_mtime); + txdr_nfsv3time(&vap->va_ctime, &fp->fa3_ctime); + } else { + fp->fa_type = vtonfsv2_type(vap->va_type); + fp->fa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); + fp->fa2_size = txdr_unsigned(vap->va_size); + fp->fa2_blocksize = txdr_unsigned(vap->va_blocksize); + if (vap->va_type == VFIFO) + fp->fa2_rdev = 0xffffffff; + else + fp->fa2_rdev = txdr_unsigned(vap->va_rdev); + fp->fa2_blocks = txdr_unsigned(vap->va_bytes / NFS_FABLKSIZE); + fp->fa2_fsid = txdr_unsigned(vap->va_fsid); + fp->fa2_fileid = txdr_unsigned(vap->va_fileid); + txdr_nfsv2time(&vap->va_atime, &fp->fa2_atime); + txdr_nfsv2time(&vap->va_mtime, &fp->fa2_mtime); + txdr_nfsv2time(&vap->va_ctime, &fp->fa2_ctime); + } +} + +/* + * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked) + * - look up fsid in mount list (if not found ret error) + * - get vp and export rights by calling VFS_FHTOVP() + * - if cred->cr_uid == 0 or MNT_EXPORTANON set it to credanon + * - if not lockflag unlock it with VOP_UNLOCK() + */ +int +nfsrv_fhtovp(fhp, lockflag, vpp, cred, slp, nam, rdonlyp, kerbflag, pubflag) + fhandle_t *fhp; + int lockflag; + struct vnode **vpp; + struct ucred *cred; + struct nfssvc_sock *slp; + struct mbuf *nam; + int *rdonlyp; + int kerbflag; + int pubflag; +{ + struct proc *p = current_proc(); /* XXX */ + register struct mount *mp; + register int i; + struct ucred *credanon; + int error, exflags; + + *vpp = (struct vnode *)0; + +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +#ifdef notyet + if (nfs_ispublicfh(fhp)) { + if (!pubflag || !nfs_pub.np_valid) + return (ESTALE); + fhp = &nfs_pub.np_handle; + } +#endif + + mp = vfs_getvfs(&fhp->fh_fsid); + if (!mp) + return (ESTALE); + error = VFS_FHTOVP(mp, &fhp->fh_fid, nam, vpp, &exflags, &credanon); + if (error) + return (error); + /* vnode pointer should be good at this point or ... */ + if (*vpp == NULL) + return (ESTALE); + /* + * Check/setup credentials. + */ + if (exflags & MNT_EXKERB) { + if (!kerbflag) { + vput(*vpp); + return (NFSERR_AUTHERR | AUTH_TOOWEAK); + } + } else if (kerbflag) { + vput(*vpp); + return (NFSERR_AUTHERR | AUTH_TOOWEAK); + } else if (cred->cr_uid == 0 || (exflags & MNT_EXPORTANON)) { + cred->cr_uid = credanon->cr_uid; + for (i = 0; i < credanon->cr_ngroups && i < NGROUPS; i++) + cred->cr_groups[i] = credanon->cr_groups[i]; + cred->cr_ngroups = i; + } + if (exflags & MNT_EXRDONLY) + *rdonlyp = 1; + else + *rdonlyp = 0; + + nfsrv_object_create(*vpp); + + if (!lockflag) + VOP_UNLOCK(*vpp, 0, p); + return (0); +} + + +/* + * WebNFS: check if a filehandle is a public filehandle. For v3, this + * means a length of 0, for v2 it means all zeroes. nfsm_srvmtofh has + * transformed this to all zeroes in both cases, so check for it. + */ +int +nfs_ispublicfh(fhp) + fhandle_t *fhp; +{ + char *cp = (char *)fhp; + int i; + + for (i = 0; i < NFSX_V3FH; i++) + if (*cp++ != 0) + return (FALSE); + return (TRUE); +} + +#endif /* NFS_NOSERVER */ +/* + * This function compares two net addresses by family and returns TRUE + * if they are the same host. + * If there is any doubt, return FALSE. + * The AF_INET family is handled as a special case so that address mbufs + * don't need to be saved to store "struct in_addr", which is only 4 bytes. + */ +int +netaddr_match(family, haddr, nam) + int family; + union nethostaddr *haddr; + struct mbuf *nam; +{ + register struct sockaddr_in *inetaddr; + + switch (family) { + case AF_INET: + inetaddr = mtod(nam, struct sockaddr_in *); + if (inetaddr->sin_family == AF_INET && + inetaddr->sin_addr.s_addr == haddr->had_inetaddr) + return (1); + break; +#if ISO + case AF_ISO: + { + register struct sockaddr_iso *isoaddr1, *isoaddr2; + + isoaddr1 = mtod(nam, struct sockaddr_iso *); + isoaddr2 = mtod(haddr->had_nam, struct sockaddr_iso *); + if (isoaddr1->siso_family == AF_ISO && + isoaddr1->siso_nlen > 0 && + isoaddr1->siso_nlen == isoaddr2->siso_nlen && + SAME_ISOADDR(isoaddr1, isoaddr2)) + return (1); + break; + } +#endif /* ISO */ + default: + break; + }; + return (0); +} + +static nfsuint64 nfs_nullcookie = { 0, 0 }; +/* + * This function finds the directory cookie that corresponds to the + * logical byte offset given. + */ +nfsuint64 * +nfs_getcookie(np, off, add) + register struct nfsnode *np; + off_t off; + int add; +{ + register struct nfsdmap *dp, *dp2; + register int pos; + + pos = off / NFS_DIRBLKSIZ; + if (pos == 0) { +#if DIAGNOSTIC + if (add) + panic("nfs getcookie add at 0"); +#endif + return (&nfs_nullcookie); + } + pos--; + dp = np->n_cookies.lh_first; + if (!dp) { + if (add) { + MALLOC_ZONE(dp, struct nfsdmap *, + sizeof (struct nfsdmap), + M_NFSDIROFF, M_WAITOK); + dp->ndm_eocookie = 0; + LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list); + } else + return ((nfsuint64 *)0); + } + while (pos >= NFSNUMCOOKIES) { + pos -= NFSNUMCOOKIES; + if (dp->ndm_list.le_next) { + if (!add && dp->ndm_eocookie < NFSNUMCOOKIES && + pos >= dp->ndm_eocookie) + return ((nfsuint64 *)0); + dp = dp->ndm_list.le_next; + } else if (add) { + MALLOC_ZONE(dp2, struct nfsdmap *, + sizeof (struct nfsdmap), + M_NFSDIROFF, M_WAITOK); + dp2->ndm_eocookie = 0; + LIST_INSERT_AFTER(dp, dp2, ndm_list); + dp = dp2; + } else + return ((nfsuint64 *)0); + } + if (pos >= dp->ndm_eocookie) { + if (add) + dp->ndm_eocookie = pos + 1; + else + return ((nfsuint64 *)0); + } + return (&dp->ndm_cookies[pos]); +} + +/* + * Invalidate cached directory information, except for the actual directory + * blocks (which are invalidated separately). + * Done mainly to avoid the use of stale offset cookies. + */ +void +nfs_invaldir(vp) + register struct vnode *vp; +{ + register struct nfsnode *np = VTONFS(vp); + +#if DIAGNOSTIC + if (vp->v_type != VDIR) + panic("nfs: invaldir not dir"); +#endif + np->n_direofoffset = 0; + np->n_cookieverf.nfsuquad[0] = 0; + np->n_cookieverf.nfsuquad[1] = 0; + if (np->n_cookies.lh_first) + np->n_cookies.lh_first->ndm_eocookie = 0; +} + +/* + * The write verifier has changed (probably due to a server reboot), so all + * B_NEEDCOMMIT blocks will have to be written again. Since they are on the + * dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT + * flag. Once done the new write verifier can be set for the mount point. + */ +void +nfs_clearcommit(mp) + struct mount *mp; +{ + register struct vnode *vp, *nvp; + register struct buf *bp, *nbp; + int s; + + s = splbio(); +loop: + for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { + if (vp->v_mount != mp) /* Paranoia */ + goto loop; + nvp = vp->v_mntvnodes.le_next; + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) + == (B_DELWRI | B_NEEDCOMMIT)) + bp->b_flags &= ~B_NEEDCOMMIT; + } + } + splx(s); +} + +#ifndef NFS_NOSERVER +/* + * Map errnos to NFS error numbers. For Version 3 also filter out error + * numbers not specified for the associated procedure. + */ +int +nfsrv_errmap(nd, err) + struct nfsrv_descript *nd; + register int err; +{ + register short *defaulterrp, *errp; + + if (nd->nd_flag & ND_NFSV3) { + if (nd->nd_procnum <= NFSPROC_COMMIT) { + errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum]; + while (*++errp) { + if (*errp == err) + return (err); + else if (*errp > err) + break; + } + return ((int)*defaulterrp); + } else + return (err & 0xffff); + } + if (err <= ELAST) + return ((int)nfsrv_v2errmap[err - 1]); + return (NFSERR_IO); +} + +/* XXX CSM 11/25/97 Revisit when Ramesh merges vm with buffer cache */ +#define vfs_object_create(v, p, c, l) (0) + +int +nfsrv_object_create(struct vnode *vp) { + struct proc *curproc = current_proc(); + + if ((vp == NULL) || (vp->v_type != VREG)) + return 1; + return vfs_object_create(vp, curproc, curproc?curproc->p_ucred:NULL, 1); +} +#endif /* NFS_NOSERVER */ + diff --git a/bsd/nfs/nfs_syscalls.c b/bsd/nfs/nfs_syscalls.c new file mode 100644 index 000000000..3ec010a93 --- /dev/null +++ b/bsd/nfs/nfs_syscalls.c @@ -0,0 +1,1302 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95 + * FreeBSD-Id: nfs_syscalls.c,v 1.32 1997/11/07 08:53:25 phk Exp $ + */ + +#include +#include +/* XXX CSM 11/25/97 FreeBSD's generated syscall prototypes */ +#ifdef notyet +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#if ISO +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Global defs. */ +extern int (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *nd, + struct nfssvc_sock *slp, + struct proc *procp, + struct mbuf **mreqp)); +extern int nfs_numasync; +extern time_t nqnfsstarttime; +extern int nqsrv_writeslack; +extern int nfsrtton; +extern struct nfsstats nfsstats; +extern int nfsrvw_procrastinate; +extern int nfsrvw_procrastinate_v3; +struct nfssvc_sock *nfs_udpsock, *nfs_cltpsock; +static int nuidhash_max = NFS_MAXUIDHASH; + +static void nfsrv_zapsock __P((struct nfssvc_sock *slp)); +static int nfssvc_iod __P((struct proc *)); + +#define TRUE 1 +#define FALSE 0 + +static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON]; + +#ifndef NFS_NOSERVER +int nfsd_waiting = 0; +static struct nfsdrt nfsdrt; +static int nfs_numnfsd = 0; +static int notstarted = 1; +static int modify_flag = 0; +static void nfsd_rt __P((int sotype, struct nfsrv_descript *nd, + int cacherep)); +static int nfssvc_addsock __P((struct file *, struct mbuf *, + struct proc *)); +static int nfssvc_nfsd __P((struct nfsd_srvargs *,caddr_t,struct proc *)); + +static int nfs_privport = 0; +/* XXX CSM 11/25/97 Upgrade sysctl.h someday */ +#ifdef notyet +SYSCTL_INT(_vfs_nfs, NFS_NFSPRIVPORT, nfs_privport, CTLFLAG_RW, &nfs_privport, 0, ""); +SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay, CTLFLAG_RW, &nfsrvw_procrastinate, 0, ""); +SYSCTL_INT(_vfs_nfs, OID_AUTO, gatherdelay_v3, CTLFLAG_RW, &nfsrvw_procrastinate_v3, 0, ""); +#endif + +/* + * NFS server system calls + * getfh() lives here too, but maybe should move to kern/vfs_syscalls.c + */ + +/* + * Get file handle system call + */ +#ifndef _SYS_SYSPROTO_H_ +struct getfh_args { + char *fname; + fhandle_t *fhp; +}; +#endif +int +getfh(p, uap) + struct proc *p; + register struct getfh_args *uap; +{ + register struct vnode *vp; + fhandle_t fh; + int error; + struct nameidata nd; + + /* + * Must be super user + */ + error = suser(p->p_ucred, &p->p_acflag); + if(error) + return (error); + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, p); + error = namei(&nd); + if (error) + return (error); + vp = nd.ni_vp; + bzero((caddr_t)&fh, sizeof(fh)); + fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid; + error = VFS_VPTOFH(vp, &fh.fh_fid); + vput(vp); + if (error) + return (error); + error = copyout((caddr_t)&fh, (caddr_t)uap->fhp, sizeof (fh)); + return (error); +} + +#endif /* NFS_NOSERVER */ +/* + * Nfs server psuedo system call for the nfsd's + * Based on the flag value it either: + * - adds a socket to the selection list + * - remains in the kernel as an nfsd + * - remains in the kernel as an nfsiod + */ +#ifndef _SYS_SYSPROTO_H_ +struct nfssvc_args { + int flag; + caddr_t argp; +}; +#endif +int +nfssvc(p, uap) + struct proc *p; + register struct nfssvc_args *uap; +{ +#ifndef NFS_NOSERVER + struct nameidata nd; + struct file *fp; + struct mbuf *nam; + struct nfsd_args nfsdarg; + struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs; + struct nfsd_cargs ncd; + struct nfsd *nfsd; + struct nfssvc_sock *slp; + struct nfsuid *nuidp; + struct nfsmount *nmp; +#endif /* NFS_NOSERVER */ + int error; + + /* + * Must be super user + */ + error = suser(p->p_ucred, &p->p_acflag); + if(error) + return (error); + while (nfssvc_sockhead_flag & SLP_INIT) { + nfssvc_sockhead_flag |= SLP_WANTINIT; + (void) tsleep((caddr_t)&nfssvc_sockhead, PSOCK, "nfsd init", 0); + } + if (uap->flag & NFSSVC_BIOD) + error = nfssvc_iod(p); +#ifdef NFS_NOSERVER + else + error = ENXIO; +#else /* !NFS_NOSERVER */ + else if (uap->flag & NFSSVC_MNTD) { + error = copyin(uap->argp, (caddr_t)&ncd, sizeof (ncd)); + if (error) + return (error); + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + ncd.ncd_dirp, p); + error = namei(&nd); + if (error) + return (error); + if ((nd.ni_vp->v_flag & VROOT) == 0) + error = EINVAL; + nmp = VFSTONFS(nd.ni_vp->v_mount); + vput(nd.ni_vp); + if (error) + return (error); + + /* disable split funnels now */ + thread_funnel_merge(kernel_flock, network_flock); + + if ((nmp->nm_flag & NFSMNT_MNTD) && + (uap->flag & NFSSVC_GOTAUTH) == 0) + return (0); + nmp->nm_flag |= NFSMNT_MNTD; + error = nqnfs_clientd(nmp, p->p_ucred, &ncd, uap->flag, + uap->argp, p); + } else if (uap->flag & NFSSVC_ADDSOCK) { + error = copyin(uap->argp, (caddr_t)&nfsdarg, sizeof(nfsdarg)); + if (error) + return (error); + error = getsock(p->p_fd, nfsdarg.sock, &fp); + if (error) + return (error); + /* + * Get the client address for connected sockets. + */ + if (nfsdarg.name == NULL || nfsdarg.namelen == 0) + nam = (struct mbuf *)0; + else { + error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen, + MT_SONAME); + if (error) + return (error); + } + error = nfssvc_addsock(fp, nam, p); + } else { + error = copyin(uap->argp, (caddr_t)nsd, sizeof (*nsd)); + if (error) + return (error); + + /* disable split funnels now */ + thread_funnel_merge(kernel_flock, network_flock); + + if ((uap->flag & NFSSVC_AUTHIN) && ((nfsd = nsd->nsd_nfsd)) && + (nfsd->nfsd_slp->ns_flag & SLP_VALID)) { + slp = nfsd->nfsd_slp; + + /* + * First check to see if another nfsd has already + * added this credential. + */ + for (nuidp = NUIDHASH(slp,nsd->nsd_cr.cr_uid)->lh_first; + nuidp != 0; nuidp = nuidp->nu_hash.le_next) { + if (nuidp->nu_cr.cr_uid == nsd->nsd_cr.cr_uid && + (!nfsd->nfsd_nd->nd_nam2 || + netaddr_match(NU_NETFAM(nuidp), + &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2))) + break; + } + if (nuidp) { + nfsrv_setcred(&nuidp->nu_cr,&nfsd->nfsd_nd->nd_cr); + nfsd->nfsd_nd->nd_flag |= ND_KERBFULL; + } else { + /* + * Nope, so we will. + */ + if (slp->ns_numuids < nuidhash_max) { + slp->ns_numuids++; + nuidp = (struct nfsuid *) + _MALLOC_ZONE(sizeof (struct nfsuid), + M_NFSUID, M_WAITOK); + } else + nuidp = (struct nfsuid *)0; + if ((slp->ns_flag & SLP_VALID) == 0) { + if (nuidp) + _FREE_ZONE((caddr_t)nuidp, + sizeof (struct nfsuid), M_NFSUID); + } else { + if (nuidp == (struct nfsuid *)0) { + nuidp = slp->ns_uidlruhead.tqh_first; + LIST_REMOVE(nuidp, nu_hash); + TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, + nu_lru); + if (nuidp->nu_flag & NU_NAM) + m_freem(nuidp->nu_nam); + } + nuidp->nu_flag = 0; + nuidp->nu_cr = nsd->nsd_cr; + if (nuidp->nu_cr.cr_ngroups > NGROUPS) + nuidp->nu_cr.cr_ngroups = NGROUPS; + nuidp->nu_cr.cr_ref = 1; + nuidp->nu_timestamp = nsd->nsd_timestamp; + nuidp->nu_expire = time.tv_sec + nsd->nsd_ttl; + /* + * and save the session key in nu_key. + */ + bcopy(nsd->nsd_key, nuidp->nu_key, + sizeof (nsd->nsd_key)); + if (nfsd->nfsd_nd->nd_nam2) { + struct sockaddr_in *saddr; + + saddr = mtod(nfsd->nfsd_nd->nd_nam2, + struct sockaddr_in *); + switch (saddr->sin_family) { + case AF_INET: + nuidp->nu_flag |= NU_INETADDR; + nuidp->nu_inetaddr = + saddr->sin_addr.s_addr; + break; + case AF_ISO: + default: + nuidp->nu_flag |= NU_NAM; + nuidp->nu_nam = m_copym( + nfsd->nfsd_nd->nd_nam2, 0, + M_COPYALL, M_WAIT); + break; + }; + } + TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp, + nu_lru); + LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid), + nuidp, nu_hash); + nfsrv_setcred(&nuidp->nu_cr, + &nfsd->nfsd_nd->nd_cr); + nfsd->nfsd_nd->nd_flag |= ND_KERBFULL; + } + } + } + if ((uap->flag & NFSSVC_AUTHINFAIL) && (nfsd = nsd->nsd_nfsd)) + nfsd->nfsd_flag |= NFSD_AUTHFAIL; + error = nfssvc_nfsd(nsd, uap->argp, p); + } +#endif /* NFS_NOSERVER */ + if (error == EINTR || error == ERESTART) + error = 0; + return (error); +} + +#ifndef NFS_NOSERVER +/* + * Adds a socket to the list for servicing by nfsds. + */ +static int +nfssvc_addsock(fp, mynam, p) + struct file *fp; + struct mbuf *mynam; + struct proc *p; +{ + register struct mbuf *m; + register int siz; + register struct nfssvc_sock *slp; + register struct socket *so; + struct nfssvc_sock *tslp; + int error, s; + + so = (struct socket *)fp->f_data; + tslp = (struct nfssvc_sock *)0; + /* + * Add it to the list, as required. + */ + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + if (so->so_proto->pr_protocol == IPPROTO_UDP) { + tslp = nfs_udpsock; + if (tslp->ns_flag & SLP_VALID) { + m_freem(mynam); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (EPERM); + } +#if ISO + } else if (so->so_proto->pr_protocol == ISOPROTO_CLTP) { + tslp = nfs_cltpsock; + if (tslp->ns_flag & SLP_VALID) { + m_freem(mynam); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (EPERM); + } +#endif /* ISO */ + } + if (so->so_type == SOCK_STREAM) + siz = NFS_MAXPACKET + sizeof (u_long); + else + siz = NFS_MAXPACKET; + error = soreserve(so, siz, siz); + if (error) { + m_freem(mynam); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + return (error); + } + + /* + * Set protocol specific options { for now TCP only } and + * reserve some space. For datagram sockets, this can get called + * repeatedly for the same socket, but that isn't harmful. + */ + if (so->so_type == SOCK_STREAM) { + struct sockopt sopt; + int val; + + bzero(&sopt, sizeof sopt); + sopt.sopt_level = SOL_SOCKET; + sopt.sopt_name = SO_KEEPALIVE; + sopt.sopt_val = &val; + sopt.sopt_valsize = sizeof val; + val = 1; + sosetopt(so, &sopt); + } + if (so->so_proto->pr_domain->dom_family == AF_INET && + so->so_proto->pr_protocol == IPPROTO_TCP) { + struct sockopt sopt; + int val; + + bzero(&sopt, sizeof sopt); + sopt.sopt_level = IPPROTO_TCP; + sopt.sopt_name = TCP_NODELAY; + sopt.sopt_val = &val; + sopt.sopt_valsize = sizeof val; + val = 1; + sosetopt(so, &sopt); + } + + so->so_rcv.sb_flags &= ~SB_NOINTR; + so->so_rcv.sb_timeo = 0; + so->so_snd.sb_flags &= ~SB_NOINTR; + so->so_snd.sb_timeo = 0; + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + if (tslp) + slp = tslp; + else { + MALLOC(slp, struct nfssvc_sock *, sizeof(struct nfssvc_sock), + M_NFSSVC, M_WAITOK); + bzero((caddr_t)slp, sizeof (struct nfssvc_sock)); + TAILQ_INIT(&slp->ns_uidlruhead); + TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain); + } + slp->ns_so = so; + slp->ns_nam = mynam; + slp->ns_fp = fp; + (void)fref(fp); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + s = splnet(); + so->so_upcallarg = (caddr_t)slp; + so->so_upcall = nfsrv_rcv; + so->so_rcv.sb_flags |= SB_UPCALL; /* required for freebsd merge */ + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + slp->ns_flag = (SLP_VALID | SLP_NEEDQ); + nfsrv_wakenfsd(slp); + splx(s); + return (0); +} + +/* + * Called by nfssvc() for nfsds. Just loops around servicing rpc requests + * until it is killed by a signal. + */ +static int +nfssvc_nfsd(nsd, argp, p) + struct nfsd_srvargs *nsd; + caddr_t argp; + struct proc *p; +{ + register struct mbuf *m; + register int siz; + register struct nfssvc_sock *slp; + register struct socket *so; + register int *solockp; + struct nfsd *nfsd = nsd->nsd_nfsd; + struct nfsrv_descript *nd = NULL; + struct mbuf *mreq; + int error = 0, cacherep, s, sotype, writes_todo; + int procrastinate; + u_quad_t cur_usec; + extern void nfs_aio_thread_init(); + +#ifndef nolint + cacherep = RC_DOIT; + writes_todo = 0; +#endif + s = splnet(); + if (nfsd == (struct nfsd *)0) { + MALLOC(nfsd, struct nfsd *, sizeof(struct nfsd), M_NFSD, M_WAITOK); + nsd->nsd_nfsd = nfsd; + bzero((caddr_t)nfsd, sizeof (struct nfsd)); + nfsd->nfsd_procp = p; + TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain); + nfs_numnfsd++; + nfs_aio_thread_init(); + } + /* + * Loop getting rpc requests until SIGKILL. + */ + for (;;) { + if ((nfsd->nfsd_flag & NFSD_REQINPROG) == 0) { + while (nfsd->nfsd_slp == (struct nfssvc_sock *)0 && + (nfsd_head_flag & NFSD_CHECKSLP) == 0) { + nfsd->nfsd_flag |= NFSD_WAITING; + nfsd_waiting++; + error = tsleep((caddr_t)nfsd, PSOCK | PCATCH, + "nfsd", 0); + nfsd_waiting--; + if (error) + goto done; + } + if (nfsd->nfsd_slp == (struct nfssvc_sock *)0 && + (nfsd_head_flag & NFSD_CHECKSLP) != 0) { + for (slp = nfssvc_sockhead.tqh_first; slp != 0; + slp = slp->ns_chain.tqe_next) { + if ((slp->ns_flag & (SLP_VALID | SLP_DOREC)) + == (SLP_VALID | SLP_DOREC)) { + slp->ns_flag &= ~SLP_DOREC; + slp->ns_sref++; + nfsd->nfsd_slp = slp; + break; + } + } + if (slp == 0) + nfsd_head_flag &= ~NFSD_CHECKSLP; + } + if ((slp = nfsd->nfsd_slp) == (struct nfssvc_sock *)0) + continue; + if (slp->ns_flag & SLP_VALID) { + if (slp->ns_flag & SLP_DISCONN) + nfsrv_zapsock(slp); + else if (slp->ns_flag & SLP_NEEDQ) { + slp->ns_flag &= ~SLP_NEEDQ; + (void) nfs_sndlock(&slp->ns_solock, + (struct nfsreq *)0); + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + nfsrv_rcv(slp->ns_so, (caddr_t)slp, + M_WAIT); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + nfs_sndunlock(&slp->ns_solock); + } + error = nfsrv_dorec(slp, nfsd, &nd); + cur_usec = (u_quad_t)time.tv_sec * 1000000 + + (u_quad_t)time.tv_usec; + if (error && slp->ns_tq.lh_first && + slp->ns_tq.lh_first->nd_time <= cur_usec) { + error = 0; + cacherep = RC_DOIT; + writes_todo = 1; + } else + writes_todo = 0; + nfsd->nfsd_flag |= NFSD_REQINPROG; + } + } else { + error = 0; + slp = nfsd->nfsd_slp; + } + if (error || (slp->ns_flag & SLP_VALID) == 0) { + if (nd) { + _FREE_ZONE((caddr_t)nd, + sizeof *nd, M_NFSRVDESC); + nd = NULL; + } + nfsd->nfsd_slp = (struct nfssvc_sock *)0; + nfsd->nfsd_flag &= ~NFSD_REQINPROG; + nfsrv_slpderef(slp); + continue; + } + splx(s); + so = slp->ns_so; + sotype = so->so_type; + if (so->so_proto->pr_flags & PR_CONNREQUIRED) + solockp = &slp->ns_solock; + else + solockp = (int *)0; + if (nd) { + nd->nd_starttime = time; + if (nd->nd_nam2) + nd->nd_nam = nd->nd_nam2; + else + nd->nd_nam = slp->ns_nam; + + /* + * Check to see if authorization is needed. + */ + if (nfsd->nfsd_flag & NFSD_NEEDAUTH) { + nfsd->nfsd_flag &= ~NFSD_NEEDAUTH; + nsd->nsd_haddr = mtod(nd->nd_nam, + struct sockaddr_in *)->sin_addr.s_addr; + nsd->nsd_authlen = nfsd->nfsd_authlen; + nsd->nsd_verflen = nfsd->nfsd_verflen; + if (!copyout(nfsd->nfsd_authstr,nsd->nsd_authstr, + nfsd->nfsd_authlen) && + !copyout(nfsd->nfsd_verfstr, nsd->nsd_verfstr, + nfsd->nfsd_verflen) && + !copyout((caddr_t)nsd, argp, sizeof (*nsd))) + return (ENEEDAUTH); + cacherep = RC_DROPIT; + } else + cacherep = nfsrv_getcache(nd, slp, &mreq); + + /* + * Check for just starting up for NQNFS and send + * fake "try again later" replies to the NQNFS clients. + */ + if (notstarted && nqnfsstarttime <= time.tv_sec) { + if (modify_flag) { + nqnfsstarttime = time.tv_sec + nqsrv_writeslack; + modify_flag = 0; + } else + notstarted = 0; + } + if (notstarted) { + if ((nd->nd_flag & ND_NQNFS) == 0) + cacherep = RC_DROPIT; + else if (nd->nd_procnum != NFSPROC_WRITE) { + nd->nd_procnum = NFSPROC_NOOP; + nd->nd_repstat = NQNFS_TRYLATER; + cacherep = RC_DOIT; + } else + modify_flag = 1; + } else if (nfsd->nfsd_flag & NFSD_AUTHFAIL) { + nfsd->nfsd_flag &= ~NFSD_AUTHFAIL; + nd->nd_procnum = NFSPROC_NOOP; + nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK); + cacherep = RC_DOIT; + } else if (nfs_privport) { + /* Check if source port is privileged */ + u_short port; + struct sockaddr *nam = nd->nd_nam; + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)nam; + port = ntohs(sin->sin_port); + if (port >= IPPORT_RESERVED && + nd->nd_procnum != NFSPROC_NULL) { + nd->nd_procnum = NFSPROC_NOOP; + nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK); + cacherep = RC_DOIT; + printf("NFS request from unprivileged port (%s:%d)\n", + (char *)(inet_ntoa(sin->sin_addr)), port); + } + } + + } + + /* + * Loop to get all the write rpc relies that have been + * gathered together. + */ + do { + switch (cacherep) { + case RC_DOIT: + if (nd && (nd->nd_flag & ND_NFSV3)) + procrastinate = nfsrvw_procrastinate_v3; + else + procrastinate = nfsrvw_procrastinate; + if (writes_todo || (nd->nd_procnum == NFSPROC_WRITE && + procrastinate > 0 && !notstarted)) + error = nfsrv_writegather(&nd, slp, + nfsd->nfsd_procp, &mreq); + else + error = (*(nfsrv3_procs[nd->nd_procnum]))(nd, + slp, nfsd->nfsd_procp, &mreq); + if (mreq == NULL) + break; + if (error) { + if (nd->nd_procnum != NQNFSPROC_VACATED) + nfsstats.srv_errs++; + nfsrv_updatecache(nd, FALSE, mreq); + if (nd->nd_nam2) + m_freem(nd->nd_nam2); + break; + } + nfsstats.srvrpccnt[nd->nd_procnum]++; + nfsrv_updatecache(nd, TRUE, mreq); + nd->nd_mrep = (struct mbuf *)0; + case RC_REPLY: + m = mreq; + siz = 0; + while (m) { + siz += m->m_len; + m = m->m_next; + } + if (siz <= 0 || siz > NFS_MAXPACKET) { + printf("mbuf siz=%d\n",siz); + panic("Bad nfs svc reply"); + } + m = mreq; + m->m_pkthdr.len = siz; + m->m_pkthdr.rcvif = (struct ifnet *)0; + /* + * For stream protocols, prepend a Sun RPC + * Record Mark. + */ + if (sotype == SOCK_STREAM) { + M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); + *mtod(m, u_long *) = htonl(0x80000000 | siz); + } + if (solockp) + (void) nfs_sndlock(solockp, (struct nfsreq *)0); + if (slp->ns_flag & SLP_VALID) + error = nfs_send(so, nd->nd_nam2, m, NULL); + else { + error = EPIPE; + m_freem(m); + } + if (nfsrtton) + nfsd_rt(sotype, nd, cacherep); + if (nd->nd_nam2) + MFREE(nd->nd_nam2, m); + if (nd->nd_mrep) + m_freem(nd->nd_mrep); + if (error == EPIPE) + nfsrv_zapsock(slp); + if (solockp) + nfs_sndunlock(solockp); + if (error == EINTR || error == ERESTART) { + _FREE_ZONE((caddr_t)nd, + sizeof *nd, M_NFSRVDESC); + nfsrv_slpderef(slp); + s = splnet(); + goto done; + } + break; + case RC_DROPIT: + if (nfsrtton) + nfsd_rt(sotype, nd, cacherep); + m_freem(nd->nd_mrep); + m_freem(nd->nd_nam2); + break; + }; + if (nd) { + FREE_ZONE((caddr_t)nd, sizeof *nd, M_NFSRVDESC); + nd = NULL; + } + + /* + * Check to see if there are outstanding writes that + * need to be serviced. + */ + cur_usec = (u_quad_t)time.tv_sec * 1000000 + + (u_quad_t)time.tv_usec; + s = splsoftclock(); + if (slp->ns_tq.lh_first && + slp->ns_tq.lh_first->nd_time <= cur_usec) { + cacherep = RC_DOIT; + writes_todo = 1; + } else + writes_todo = 0; + splx(s); + } while (writes_todo); + s = splnet(); + if (nfsrv_dorec(slp, nfsd, &nd)) { + nfsd->nfsd_flag &= ~NFSD_REQINPROG; + nfsd->nfsd_slp = NULL; + nfsrv_slpderef(slp); + } + } +done: + TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain); + splx(s); + _FREE((caddr_t)nfsd, M_NFSD); + nsd->nsd_nfsd = (struct nfsd *)0; + if (--nfs_numnfsd == 0) + nfsrv_init(TRUE); /* Reinitialize everything */ + return (error); +} +#endif /* NFS_NOSERVER */ + +int nfs_defect = 0; +/* XXX CSM 11/25/97 Upgrade sysctl.h someday */ +#ifdef notyet +SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0, ""); +#endif + +static int nfssvc_iod_continue(int); + +/* + * Asynchronous I/O daemons for client nfs. + * They do read-ahead and write-behind operations on the block I/O cache. + * Never returns unless it fails or gets killed. + */ +static int +nfssvc_iod(p) + struct proc *p; +{ + register struct buf *bp; + register int i, myiod; + struct nfsmount *nmp; + int error = 0; + struct uthread *ut; + + /* + * Assign my position or return error if too many already running + */ + myiod = -1; + for (i = 0; i < NFS_MAXASYNCDAEMON; i++) + if (nfs_asyncdaemon[i] == 0) { + nfs_asyncdaemon[i]++; + myiod = i; + break; + } + if (myiod == -1) + return (EBUSY); + nfs_numasync++; + + /* stuff myiod into uthread to get off local stack for + continuation */ + + ut = get_bsdthread_info(current_act()); + ut->uu_state.uu_nfs_myiod = myiod; /* squirrel away for continuation */ + + nfssvc_iod_continue(0); + /* NOTREACHED */ + +} + +/* + * Continuation for Asynchronous I/O daemons for client nfs. + */ +static int +nfssvc_iod_continue(error) +{ + register struct buf *bp; + register int i, myiod; + struct nfsmount *nmp; + struct uthread *ut; + struct proc *p; + + /* + * real myiod is stored in uthread, recover it + */ + ut = get_bsdthread_info(current_act()); + myiod = ut->uu_state.uu_nfs_myiod; + p = get_bsdtask_info(current_task()); + + /* + * Just loop around doin our stuff until SIGKILL + * - actually we don't loop with continuations... + */ + for (;;) { + while (((nmp = nfs_iodmount[myiod]) == NULL + || nmp->nm_bufq.tqh_first == NULL) + && error == 0) { + if (nmp) + nmp->nm_bufqiods--; + nfs_iodwant[myiod] = p; + nfs_iodmount[myiod] = NULL; + error = tsleep0((caddr_t)&nfs_iodwant[myiod], + PWAIT | PCATCH, "nfsidl", 0, nfssvc_iod_continue); + /* NOTREACHED */ + } + if (error) { + nfs_asyncdaemon[myiod] = 0; + if (nmp) nmp->nm_bufqiods--; + nfs_iodwant[myiod] = NULL; + nfs_iodmount[myiod] = NULL; + nfs_numasync--; + if (error == EINTR || error == ERESTART) + error = 0; +#if defined (__i386__) + return(error); +#else + unix_syscall_return(error); +#endif + } + while ((bp = nmp->nm_bufq.tqh_first) != NULL) { + /* Take one off the front of the list */ + TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist); + nmp->nm_bufqlen--; + if (nmp->nm_bufqwant && nmp->nm_bufqlen < 2 * nfs_numasync) { + nmp->nm_bufqwant = FALSE; + wakeup(&nmp->nm_bufq); + } + if (ISSET(bp->b_flags, B_READ)) + (void) nfs_doio(bp, bp->b_rcred, (struct proc *)0); + else + (void) nfs_doio(bp, bp->b_wcred, (struct proc *)0); + + /* + * If there are more than one iod on this mount, then defect + * so that the iods can be shared out fairly between the mounts + */ + if (nfs_defect && nmp->nm_bufqiods > 1) { + NFS_DPF(ASYNCIO, + ("nfssvc_iod: iod %d defecting from mount %p\n", + myiod, nmp)); + nfs_iodmount[myiod] = NULL; + nmp->nm_bufqiods--; + break; + } + } + } +} + +/* + * Shut down a socket associated with an nfssvc_sock structure. + * Should be called with the send lock set, if required. + * The trick here is to increment the sref at the start, so that the nfsds + * will stop using it and clear ns_flag at the end so that it will not be + * reassigned during cleanup. + */ +static void +nfsrv_zapsock(slp) + register struct nfssvc_sock *slp; +{ + register struct nfsuid *nuidp, *nnuidp; + register struct nfsrv_descript *nwp, *nnwp; + struct socket *so; + struct file *fp; + struct mbuf *m; + int s; + + slp->ns_flag &= ~SLP_ALLFLAGS; + fp = slp->ns_fp; + if (fp) { + slp->ns_fp = (struct file *)0; + so = slp->ns_so; + thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL); + so->so_upcall = NULL; + so->so_rcv.sb_flags &= ~SB_UPCALL; + soshutdown(so, 2); + thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL); + closef(fp, (struct proc *)0); + if (slp->ns_nam) + MFREE(slp->ns_nam, m); + m_freem(slp->ns_raw); + m_freem(slp->ns_rec); + for (nuidp = slp->ns_uidlruhead.tqh_first; nuidp != 0; + nuidp = nnuidp) { + nnuidp = nuidp->nu_lru.tqe_next; + LIST_REMOVE(nuidp, nu_hash); + TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru); + if (nuidp->nu_flag & NU_NAM) + m_freem(nuidp->nu_nam); + _FREE_ZONE((caddr_t)nuidp, + sizeof (struct nfsuid), M_NFSUID); + } + s = splsoftclock(); + for (nwp = slp->ns_tq.lh_first; nwp; nwp = nnwp) { + nnwp = nwp->nd_tq.le_next; + LIST_REMOVE(nwp, nd_tq); + _FREE_ZONE((caddr_t)nwp, sizeof *nwp, M_NFSRVDESC); + } + LIST_INIT(&slp->ns_tq); + splx(s); + } +} + +/* + * Get an authorization string for the uid by having the mount_nfs sitting + * on this mount point porpous out of the kernel and do it. + */ +int +nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key) + register struct nfsmount *nmp; + struct nfsreq *rep; + struct ucred *cred; + char **auth_str; + int *auth_len; + char *verf_str; + int *verf_len; + NFSKERBKEY_T key; /* return session key */ +{ + int error = 0; + + while ((nmp->nm_flag & NFSMNT_WAITAUTH) == 0) { + nmp->nm_flag |= NFSMNT_WANTAUTH; + (void) tsleep((caddr_t)&nmp->nm_authtype, PSOCK, + "nfsauth1", 2 * hz); + error = nfs_sigintr(nmp, rep, rep->r_procp); + if (error) { + nmp->nm_flag &= ~NFSMNT_WANTAUTH; + return (error); + } + } + nmp->nm_flag &= ~(NFSMNT_WAITAUTH | NFSMNT_WANTAUTH); + MALLOC(*auth_str, char *, RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK); + nmp->nm_authstr = *auth_str; + nmp->nm_authlen = RPCAUTH_MAXSIZ; + nmp->nm_verfstr = verf_str; + nmp->nm_verflen = *verf_len; + nmp->nm_authuid = cred->cr_uid; + wakeup((caddr_t)&nmp->nm_authstr); + + /* + * And wait for mount_nfs to do its stuff. + */ + while ((nmp->nm_flag & NFSMNT_HASAUTH) == 0 && error == 0) { + (void) tsleep((caddr_t)&nmp->nm_authlen, PSOCK, + "nfsauth2", 2 * hz); + error = nfs_sigintr(nmp, rep, rep->r_procp); + } + if (nmp->nm_flag & NFSMNT_AUTHERR) { + nmp->nm_flag &= ~NFSMNT_AUTHERR; + error = EAUTH; + } + if (error) + _FREE((caddr_t)*auth_str, M_TEMP); + else { + *auth_len = nmp->nm_authlen; + *verf_len = nmp->nm_verflen; + bcopy((caddr_t)nmp->nm_key, (caddr_t)key, sizeof (key)); + } + nmp->nm_flag &= ~NFSMNT_HASAUTH; + nmp->nm_flag |= NFSMNT_WAITAUTH; + if (nmp->nm_flag & NFSMNT_WANTAUTH) { + nmp->nm_flag &= ~NFSMNT_WANTAUTH; + wakeup((caddr_t)&nmp->nm_authtype); + } + return (error); +} + +/* + * Get a nickname authenticator and verifier. + */ +int +nfs_getnickauth(nmp, cred, auth_str, auth_len, verf_str, verf_len) + struct nfsmount *nmp; + struct ucred *cred; + char **auth_str; + int *auth_len; + char *verf_str; + int verf_len; +{ + register struct nfsuid *nuidp; + register u_long *nickp, *verfp; + struct timeval ktvin, ktvout; + +#if DIAGNOSTIC + if (verf_len < (4 * NFSX_UNSIGNED)) + panic("nfs_getnickauth verf too small"); +#endif + for (nuidp = NMUIDHASH(nmp, cred->cr_uid)->lh_first; + nuidp != 0; nuidp = nuidp->nu_hash.le_next) { + if (nuidp->nu_cr.cr_uid == cred->cr_uid) + break; + } + if (!nuidp || nuidp->nu_expire < time.tv_sec) + return (EACCES); + + /* + * Move to the end of the lru list (end of lru == most recently used). + */ + TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru); + TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru); + + MALLOC(nickp, u_long *, 2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK); + *nickp++ = txdr_unsigned(RPCAKN_NICKNAME); + *nickp = txdr_unsigned(nuidp->nu_nickname); + *auth_str = (char *)nickp; + *auth_len = 2 * NFSX_UNSIGNED; + + /* + * Now we must encrypt the verifier and package it up. + */ + verfp = (u_long *)verf_str; + *verfp++ = txdr_unsigned(RPCAKN_NICKNAME); + if (time.tv_sec > nuidp->nu_timestamp.tv_sec || + (time.tv_sec == nuidp->nu_timestamp.tv_sec && + time.tv_usec > nuidp->nu_timestamp.tv_usec)) + nuidp->nu_timestamp = time; + else + nuidp->nu_timestamp.tv_usec++; + ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec); + ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec); + + /* + * Now encrypt the timestamp verifier in ecb mode using the session + * key. + */ +#if NFSKERB + XXX +#endif + + *verfp++ = ktvout.tv_sec; + *verfp++ = ktvout.tv_usec; + *verfp = 0; + return (0); +} + +/* + * Save the current nickname in a hash list entry on the mount point. + */ +int +nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep) + register struct nfsmount *nmp; + struct ucred *cred; + int len; + NFSKERBKEY_T key; + struct mbuf **mdp; + char **dposp; + struct mbuf *mrep; +{ + register struct nfsuid *nuidp; + register u_long *tl; + register long t1; + struct mbuf *md = *mdp; + struct timeval ktvin, ktvout; + u_long nick; + char *dpos = *dposp, *cp2; + int deltasec, error = 0; + + if (len == (3 * NFSX_UNSIGNED)) { + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + ktvin.tv_sec = *tl++; + ktvin.tv_usec = *tl++; + nick = fxdr_unsigned(u_long, *tl); + + /* + * Decrypt the timestamp in ecb mode. + */ +#if NFSKERB + XXX +#endif + ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec); + ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec); + deltasec = time.tv_sec - ktvout.tv_sec; + if (deltasec < 0) + deltasec = -deltasec; + /* + * If ok, add it to the hash list for the mount point. + */ + if (deltasec <= NFS_KERBCLOCKSKEW) { + if (nmp->nm_numuids < nuidhash_max) { + nmp->nm_numuids++; + MALLOC_ZONE(nuidp, struct nfsuid *, + sizeof (struct nfsuid), + M_NFSUID, M_WAITOK); + } else { + nuidp = nmp->nm_uidlruhead.tqh_first; + LIST_REMOVE(nuidp, nu_hash); + TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, + nu_lru); + } + nuidp->nu_flag = 0; + nuidp->nu_cr.cr_uid = cred->cr_uid; + nuidp->nu_expire = time.tv_sec + NFS_KERBTTL; + nuidp->nu_timestamp = ktvout; + nuidp->nu_nickname = nick; + bcopy(key, nuidp->nu_key, sizeof (key)); + TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, + nu_lru); + LIST_INSERT_HEAD(NMUIDHASH(nmp, cred->cr_uid), + nuidp, nu_hash); + } + } else + nfsm_adv(nfsm_rndup(len)); +nfsmout: + *mdp = md; + *dposp = dpos; + return (error); +} + +#ifndef NFS_NOSERVER + +/* + * Derefence a server socket structure. If it has no more references and + * is no longer valid, you can throw it away. + */ +void +nfsrv_slpderef(slp) + register struct nfssvc_sock *slp; +{ + if (--(slp->ns_sref) == 0 && (slp->ns_flag & SLP_VALID) == 0) { + TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain); + _FREE((caddr_t)slp, M_NFSSVC); + } +} + +/* + * Initialize the data structures for the server. + * Handshake with any new nfsds starting up to avoid any chance of + * corruption. + */ +void +nfsrv_init(terminating) + int terminating; +{ + register struct nfssvc_sock *slp, *nslp; + + if (nfssvc_sockhead_flag & SLP_INIT) + panic("nfsd init"); + nfssvc_sockhead_flag |= SLP_INIT; + if (terminating) { + for (slp = nfssvc_sockhead.tqh_first; slp != 0; slp = nslp) { + nslp = slp->ns_chain.tqe_next; + if (slp->ns_flag & SLP_VALID) + nfsrv_zapsock(slp); + TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain); + _FREE((caddr_t)slp, M_NFSSVC); + } + nfsrv_cleancache(); /* And clear out server cache */ +/* XXX CSM 12/4/97 Revisit when enabling WebNFS */ +#ifdef notyet + } else + nfs_pub.np_valid = 0; +#else + } +#endif + + TAILQ_INIT(&nfssvc_sockhead); + nfssvc_sockhead_flag &= ~SLP_INIT; + if (nfssvc_sockhead_flag & SLP_WANTINIT) { + nfssvc_sockhead_flag &= ~SLP_WANTINIT; + wakeup((caddr_t)&nfssvc_sockhead); + } + + TAILQ_INIT(&nfsd_head); + nfsd_head_flag &= ~NFSD_CHECKSLP; + + MALLOC(nfs_udpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock), + M_NFSSVC, M_WAITOK); + bzero((caddr_t)nfs_udpsock, sizeof (struct nfssvc_sock)); + TAILQ_INIT(&nfs_udpsock->ns_uidlruhead); + TAILQ_INSERT_HEAD(&nfssvc_sockhead, nfs_udpsock, ns_chain); + + MALLOC(nfs_cltpsock, struct nfssvc_sock *, sizeof(struct nfssvc_sock), + M_NFSSVC, M_WAITOK); + bzero((caddr_t)nfs_cltpsock, sizeof (struct nfssvc_sock)); + TAILQ_INIT(&nfs_cltpsock->ns_uidlruhead); + TAILQ_INSERT_TAIL(&nfssvc_sockhead, nfs_cltpsock, ns_chain); +} + +/* + * Add entries to the server monitor log. + */ +static void +nfsd_rt(sotype, nd, cacherep) + int sotype; + register struct nfsrv_descript *nd; + int cacherep; +{ + register struct drt *rt; + + rt = &nfsdrt.drt[nfsdrt.pos]; + if (cacherep == RC_DOIT) + rt->flag = 0; + else if (cacherep == RC_REPLY) + rt->flag = DRT_CACHEREPLY; + else + rt->flag = DRT_CACHEDROP; + if (sotype == SOCK_STREAM) + rt->flag |= DRT_TCP; + if (nd->nd_flag & ND_NQNFS) + rt->flag |= DRT_NQNFS; + else if (nd->nd_flag & ND_NFSV3) + rt->flag |= DRT_NFSV3; + rt->proc = nd->nd_procnum; + if (mtod(nd->nd_nam, struct sockaddr *)->sa_family == AF_INET) + rt->ipadr = mtod(nd->nd_nam, struct sockaddr_in *)->sin_addr.s_addr; + else + rt->ipadr = INADDR_ANY; + rt->resptime = ((time.tv_sec - nd->nd_starttime.tv_sec) * 1000000) + + (time.tv_usec - nd->nd_starttime.tv_usec); + rt->tstamp = time; + nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ; +} +#endif /* NFS_NOSERVER */ diff --git a/bsd/nfs/nfs_vfsops.c b/bsd/nfs/nfs_vfsops.c new file mode 100644 index 000000000..1df71854c --- /dev/null +++ b/bsd/nfs/nfs_vfsops.c @@ -0,0 +1,1194 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_vfsops.c 8.12 (Berkeley) 5/20/95 + * FreeBSD-Id: nfs_vfsops.c,v 1.52 1997/11/12 05:42:21 julian Exp $ + * + * History: + * + * + * 23-May-97 Umesh Vaishampayan (umeshv@apple.com) + * Added the ability to mount "/private" separately. + * Fixed bug which caused incorrect reporting of "mounted on" + * directory name in case of nfs root. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#if !defined(NO_MOUNT_PRIVATE) +#include +#endif /* NO_MOUNT_PRIVATE */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int nfs_mountroot __P((void)); + +extern int nfs_ticks; + +struct nfsstats nfsstats; +static int nfs_sysctl(int *, u_int, void *, size_t *, void *, size_t, + struct proc *); +/* XXX CSM 11/25/97 Upgrade sysctl.h someday */ +#ifdef notyet +SYSCTL_NODE(_vfs, MOUNT_NFS, nfs, CTLFLAG_RW, 0, "NFS filesystem"); +SYSCTL_STRUCT(_vfs_nfs, NFS_NFSSTATS, nfsstats, CTLFLAG_RD, + &nfsstats, nfsstats, ""); +#endif +#if NFSDIAG +int nfs_debug; +/* XXX CSM 11/25/97 Upgrade sysctl.h someday */ +#ifdef notyet +SYSCTL_INT(_vfs_nfs, OID_AUTO, debug, CTLFLAG_RW, &nfs_debug, 0, ""); +#endif +#endif + +static int nfs_iosize __P((struct nfsmount *nmp)); +static int mountnfs __P((struct nfs_args *,struct mount *, + struct mbuf *,char *,char *,struct vnode **)); +static int nfs_mount __P(( struct mount *mp, char *path, caddr_t data, + struct nameidata *ndp, struct proc *p)); +static int nfs_start __P(( struct mount *mp, int flags, + struct proc *p)); +static int nfs_unmount __P(( struct mount *mp, int mntflags, + struct proc *p)); +static int nfs_root __P(( struct mount *mp, struct vnode **vpp)); +static int nfs_quotactl __P(( struct mount *mp, int cmds, uid_t uid, + caddr_t arg, struct proc *p)); +static int nfs_statfs __P(( struct mount *mp, struct statfs *sbp, + struct proc *p)); +static int nfs_sync __P(( struct mount *mp, int waitfor, + struct ucred *cred, struct proc *p)); +static int nfs_vptofh __P(( struct vnode *vp, struct fid *fhp)); +static int nfs_fhtovp __P((struct mount *mp, struct fid *fhp, + struct mbuf *nam, struct vnode **vpp, + int *exflagsp, struct ucred **credanonp)); +static int nfs_vget __P((struct mount *, ino_t, struct vnode **)); + + +/* + * nfs vfs operations. + */ +struct vfsops nfs_vfsops = { + nfs_mount, + nfs_start, + nfs_unmount, + nfs_root, + nfs_quotactl, + nfs_statfs, + nfs_sync, + nfs_vget, + nfs_fhtovp, + nfs_vptofh, + nfs_init, + nfs_sysctl +}; +/* XXX CSM 11/25/97 Mysterious kernel.h ld crud */ +#ifdef notyet +VFS_SET(nfs_vfsops, nfs, MOUNT_NFS, VFCF_NETWORK); +#endif + +/* + * This structure must be filled in by a primary bootstrap or bootstrap + * server for a diskless/dataless machine. It is initialized below just + * to ensure that it is allocated to initialized data (.data not .bss). + */ +struct nfs_diskless nfs_diskless = { 0 }; +int nfs_diskless_valid = 0; + +/* XXX CSM 11/25/97 Upgrade sysctl.h someday */ +#ifdef notyet +SYSCTL_INT(_vfs_nfs, OID_AUTO, diskless_valid, CTLFLAG_RD, + &nfs_diskless_valid, 0, ""); + +SYSCTL_STRING(_vfs_nfs, OID_AUTO, diskless_rootpath, CTLFLAG_RD, + nfs_diskless.root_hostnam, 0, ""); + +SYSCTL_OPAQUE(_vfs_nfs, OID_AUTO, diskless_rootaddr, CTLFLAG_RD, + &nfs_diskless.root_saddr, sizeof nfs_diskless.root_saddr, + "%Ssockaddr_in", ""); + +SYSCTL_STRING(_vfs_nfs, OID_AUTO, diskless_swappath, CTLFLAG_RD, + nfs_diskless.swap_hostnam, 0, ""); + +SYSCTL_OPAQUE(_vfs_nfs, OID_AUTO, diskless_swapaddr, CTLFLAG_RD, + &nfs_diskless.swap_saddr, sizeof nfs_diskless.swap_saddr, + "%Ssockaddr_in",""); +#endif + + +void nfsargs_ntoh __P((struct nfs_args *)); +static int +nfs_mount_diskless __P((struct nfs_dlmount *, char *, int, struct vnode **, + struct mount **)); +#if !defined(NO_MOUNT_PRIVATE) +static int +nfs_mount_diskless_private __P((struct nfs_dlmount *, char *, int, + struct vnode **, struct mount **)); +#endif /* NO_MOUNT_PRIVATE */ +static void nfs_convert_oargs __P((struct nfs_args *args, + struct onfs_args *oargs)); +#if NFSDIAG +int nfsreqqusers = 0; +extern int nfsbtlen, nfsbtcpu, nfsbtthread, nfsbt[32]; +#endif + +static int nfs_iosize(nmp) + struct nfsmount* nmp; +{ + int iosize; + + /* + * Calculate the size used for io buffers. Use the larger + * of the two sizes to minimise nfs requests but make sure + * that it is at least one VM page to avoid wasting buffer + * space. + */ + iosize = max(nmp->nm_rsize, nmp->nm_wsize); + if (iosize < PAGE_SIZE) iosize = PAGE_SIZE; +#if 0 + /* XXX UPL changes for UBC do not support multiple pages */ + iosize = PAGE_SIZE; /* XXX FIXME */ +#endif + /* return iosize; */ + return (trunc_page(iosize)); +} + +static void nfs_convert_oargs(args,oargs) + struct nfs_args *args; + struct onfs_args *oargs; +{ + args->version = NFS_ARGSVERSION; + args->addr = oargs->addr; + args->addrlen = oargs->addrlen; + args->sotype = oargs->sotype; + args->proto = oargs->proto; + args->fh = oargs->fh; + args->fhsize = oargs->fhsize; + args->flags = oargs->flags; + args->wsize = oargs->wsize; + args->rsize = oargs->rsize; + args->readdirsize = oargs->readdirsize; + args->timeo = oargs->timeo; + args->retrans = oargs->retrans; + args->maxgrouplist = oargs->maxgrouplist; + args->readahead = oargs->readahead; + args->leaseterm = oargs->leaseterm; + args->deadthresh = oargs->deadthresh; + args->hostname = oargs->hostname; +} + +/* + * nfs statfs call + */ +int +nfs_statfs(mp, sbp, p) + struct mount *mp; + register struct statfs *sbp; + struct proc *p; +{ + register struct vnode *vp; + register struct nfs_statfs *sfp; + register caddr_t cp; + register u_long *tl; + register long t1, t2; + caddr_t bpos, dpos, cp2; + struct nfsmount *nmp = VFSTONFS(mp); + int error = 0, v3 = (nmp->nm_flag & NFSMNT_NFSV3), retattr; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + struct ucred *cred; + u_quad_t tquad; + extern int nfs_mount_type; + +#ifndef nolint + sfp = (struct nfs_statfs *)0; +#endif + vp = nmp->nm_dvp; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + cred = crget(); + cred->cr_ngroups = 1; + if (v3 && (nmp->nm_flag & NFSMNT_GOTFSINFO) == 0) + (void)nfs_fsinfo(nmp, vp, cred, p); + nfsstats.rpccnt[NFSPROC_FSSTAT]++; + nfsm_reqhead(vp, NFSPROC_FSSTAT, NFSX_FH(v3)); + nfsm_fhtom(vp, v3); + nfsm_request(vp, NFSPROC_FSSTAT, p, cred); + if (v3) + nfsm_postop_attr(vp, retattr); + nfsm_dissect(sfp, struct nfs_statfs *, NFSX_STATFS(v3)); + +/* XXX CSM 12/2/97 Cleanup when/if we integrate FreeBSD mount.h */ +#ifdef notyet + sbp->f_type = MOUNT_NFS; +#else + sbp->f_type = nfs_mount_type; +#endif + sbp->f_flags = nmp->nm_flag; + sbp->f_iosize = nfs_iosize(nmp); + if (v3) { + sbp->f_bsize = NFS_FABLKSIZE; + fxdr_hyper(&sfp->sf_tbytes, &tquad); + sbp->f_blocks = (long)(tquad / ((u_quad_t)NFS_FABLKSIZE)); + fxdr_hyper(&sfp->sf_fbytes, &tquad); + sbp->f_bfree = (long)(tquad / ((u_quad_t)NFS_FABLKSIZE)); + fxdr_hyper(&sfp->sf_abytes, &tquad); + sbp->f_bavail = (long)(tquad / ((u_quad_t)NFS_FABLKSIZE)); + sbp->f_files = (fxdr_unsigned(long, sfp->sf_tfiles.nfsuquad[1]) + & 0x7fffffff); + sbp->f_ffree = (fxdr_unsigned(long, sfp->sf_ffiles.nfsuquad[1]) + & 0x7fffffff); + } else { + sbp->f_bsize = fxdr_unsigned(long, sfp->sf_bsize); + sbp->f_blocks = fxdr_unsigned(long, sfp->sf_blocks); + sbp->f_bfree = fxdr_unsigned(long, sfp->sf_bfree); + sbp->f_bavail = fxdr_unsigned(long, sfp->sf_bavail); + sbp->f_files = 0; + sbp->f_ffree = 0; + } + if (sbp != &mp->mnt_stat) { + bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN); + bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN); + } + nfsm_reqdone; + VOP_UNLOCK(vp, 0, p); + crfree(cred); + return (error); +} + +/* + * nfs version 3 fsinfo rpc call + */ +int +nfs_fsinfo(nmp, vp, cred, p) + register struct nfsmount *nmp; + register struct vnode *vp; + struct ucred *cred; + struct proc *p; +{ + register struct nfsv3_fsinfo *fsp; + register caddr_t cp; + register long t1, t2; + register u_long *tl, pref, max; + caddr_t bpos, dpos, cp2; + int error = 0, retattr; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + + nfsstats.rpccnt[NFSPROC_FSINFO]++; + nfsm_reqhead(vp, NFSPROC_FSINFO, NFSX_FH(1)); + nfsm_fhtom(vp, 1); + nfsm_request(vp, NFSPROC_FSINFO, p, cred); + nfsm_postop_attr(vp, retattr); + if (!error) { + nfsm_dissect(fsp, struct nfsv3_fsinfo *, NFSX_V3FSINFO); + pref = fxdr_unsigned(u_long, fsp->fs_wtpref); + if (pref < nmp->nm_wsize) + nmp->nm_wsize = (pref + NFS_FABLKSIZE - 1) & + ~(NFS_FABLKSIZE - 1); + max = fxdr_unsigned(u_long, fsp->fs_wtmax); + if (max < nmp->nm_wsize) { + nmp->nm_wsize = max & ~(NFS_FABLKSIZE - 1); + if (nmp->nm_wsize == 0) + nmp->nm_wsize = max; + } + pref = fxdr_unsigned(u_long, fsp->fs_rtpref); + if (pref < nmp->nm_rsize) + nmp->nm_rsize = (pref + NFS_FABLKSIZE - 1) & + ~(NFS_FABLKSIZE - 1); + max = fxdr_unsigned(u_long, fsp->fs_rtmax); + if (max < nmp->nm_rsize) { + nmp->nm_rsize = max & ~(NFS_FABLKSIZE - 1); + if (nmp->nm_rsize == 0) + nmp->nm_rsize = max; + } + pref = fxdr_unsigned(u_long, fsp->fs_dtpref); + if (pref < nmp->nm_readdirsize) + nmp->nm_readdirsize = pref; + if (max < nmp->nm_readdirsize) { + nmp->nm_readdirsize = max; + } + nmp->nm_flag |= NFSMNT_GOTFSINFO; + } + nfsm_reqdone; + return (error); +} + +/* + * Mount a remote root fs via. nfs. This depends on the info in the + * nfs_diskless structure that has been filled in properly by some primary + * bootstrap. + * It goes something like this: + * - do enough of "ifconfig" by calling ifioctl() so that the system + * can talk to the server + * - If nfs_diskless.mygateway is filled in, use that address as + * a default gateway. + * - hand craft the swap nfs vnode hanging off a fake mount point + * if swdevt[0].sw_dev == NODEV + * - build the rootfs mount point and call mountnfs() to do the rest. + */ +int +nfs_mountroot() +{ + struct nfs_diskless nd; + struct vattr attr; + struct mount *mp; + struct vnode *vp; + struct proc *procp; + long n; + int error; +#if !defined(NO_MOUNT_PRIVATE) + struct mount *mppriv; + struct vnode *vppriv; +#endif /* NO_MOUNT_PRIVATE */ + + procp = current_proc(); /* XXX */ + + /* + * Call nfs_boot_init() to fill in the nfs_diskless struct. + * Side effect: Finds and configures a network interface. + */ + bzero((caddr_t) &nd, sizeof(nd)); + nfs_boot_init(&nd, procp); + + /* + * Create the root mount point. + */ +#if !defined(NO_MOUNT_PRIVATE) + if ((error = nfs_mount_diskless(&nd.nd_root, "/", MNT_RDONLY, &vp, &mp))) { +#else + if (error = nfs_mount_diskless(&nd.nd_root, "/", NULL, &vp, &mp)) { +#endif /* NO_MOUNT_PRIVATE */ + return(error); + } + printf("root on %s\n", (char *)&nd.nd_root.ndm_host); + + simple_lock(&mountlist_slock); + CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); + simple_unlock(&mountlist_slock); + vfs_unbusy(mp, procp); + rootvp = vp; + +#if !defined(NO_MOUNT_PRIVATE) + if (nd.nd_private.ndm_saddr.sin_addr.s_addr) { + error = nfs_mount_diskless_private(&nd.nd_private, "/private", + NULL, &vppriv, &mppriv); + if (error) + return(error); + printf("private on %s\n", (char *)&nd.nd_private.ndm_host); + + simple_lock(&mountlist_slock); + CIRCLEQ_INSERT_TAIL(&mountlist, mppriv, mnt_list); + simple_unlock(&mountlist_slock); + vfs_unbusy(mppriv, procp); + } + +#endif /* NO_MOUNT_PRIVATE */ + + /* Get root attributes (for the time). */ + error = VOP_GETATTR(vp, &attr, procp->p_ucred, procp); + if (error) panic("nfs_mountroot: getattr for root"); + n = attr.va_mtime.tv_sec; + inittodr(n); + return (0); +} + +/* + * Internal version of mount system call for diskless setup. + */ +static int +nfs_mount_diskless(ndmntp, mntname, mntflag, vpp, mpp) + struct nfs_dlmount *ndmntp; + char *mntname; + int mntflag; + struct vnode **vpp; + struct mount **mpp; +{ + struct nfs_args args; + struct mount *mp; + struct mbuf *m; + int error; + struct proc *procp; + + procp = current_proc(); /* XXX */ + + if ((error = vfs_rootmountalloc("nfs", ndmntp->ndm_host, &mp))) { + printf("nfs_mountroot: NFS not configured"); + return (error); + } + mp->mnt_flag = mntflag; + + /* Initialize mount args. */ + bzero((caddr_t) &args, sizeof(args)); + args.addr = (struct sockaddr *)&ndmntp->ndm_saddr; + args.addrlen = args.addr->sa_len; + args.sotype = SOCK_DGRAM; + args.fh = ndmntp->ndm_fh; + args.fhsize = NFSX_V2FH; + args.hostname = ndmntp->ndm_host; + args.flags = NFSMNT_RESVPORT; + + MGET(m, M_DONTWAIT, MT_SONAME); + bcopy((caddr_t)args.addr, mtod(m, caddr_t), + (m->m_len = args.addr->sa_len)); + if ((error = mountnfs(&args, mp, m, mntname, args.hostname, vpp))) { + printf("nfs_mountroot: mount %s failed: %d", mntname, error); + mp->mnt_vfc->vfc_refcount--; + vfs_unbusy(mp, procp); + _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + return (error); + } +#if 0 /* Causes incorrect reporting of "mounted on" */ + (void) copystr(args.hostname, mp->mnt_stat.f_mntonname, MNAMELEN - 1, 0); +#endif /* 0 */ + *mpp = mp; + return (0); +} + +#if !defined(NO_MOUNT_PRIVATE) +/* + * Internal version of mount system call to mount "/private" + * separately in diskless setup + */ +static int +nfs_mount_diskless_private(ndmntp, mntname, mntflag, vpp, mpp) + struct nfs_dlmount *ndmntp; + char *mntname; + int mntflag; + struct vnode **vpp; + struct mount **mpp; +{ + struct nfs_args args; + struct mount *mp; + struct mbuf *m; + int error; + struct proc *procp; + struct vfsconf *vfsp; + struct nameidata nd; + struct vnode *vp; + + procp = current_proc(); /* XXX */ + + { + /* + * mimic main()!. Temporarily set up rootvnode and other stuff so + * that namei works. Need to undo this because main() does it, too + */ + struct filedesc *fdp; /* pointer to file descriptor state */ + fdp = procp->p_fd; + mountlist.cqh_first->mnt_flag |= MNT_ROOTFS; + + /* Get the vnode for '/'. Set fdp->fd_cdir to reference it. */ + if (VFS_ROOT(mountlist.cqh_first, &rootvnode)) + panic("cannot find root vnode"); + fdp->fd_cdir = rootvnode; + VREF(fdp->fd_cdir); + VOP_UNLOCK(rootvnode, 0, procp); + fdp->fd_rdir = NULL; + } + + /* + * Get vnode to be covered + */ + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, + mntname, procp); + if ((error = namei(&nd))) { + printf("nfs_mountroot: private namei failed!"); + return (error); + } + { + /* undo VREF in mimic main()! */ + vrele(rootvnode); + } + vp = nd.ni_vp; + if ((error = vinvalbuf(vp, V_SAVE, procp->p_ucred, procp, 0, 0))) { + vput(vp); + return (error); + } + if (vp->v_type != VDIR) { + vput(vp); + return (ENOTDIR); + } + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (!strcmp(vfsp->vfc_name, "nfs")) + break; + if (vfsp == NULL) { + printf("nfs_mountroot: private NFS not configured"); + vput(vp); + return (ENODEV); + } + if (vp->v_mountedhere != NULL) { + vput(vp); + return (EBUSY); + } + + /* + * Allocate and initialize the filesystem. + */ + mp = _MALLOC_ZONE((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); + bzero((char *)mp, (u_long)sizeof(struct mount)); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + (void)vfs_busy(mp, LK_NOWAIT, 0, procp); + LIST_INIT(&mp->mnt_vnodelist); + mp->mnt_op = vfsp->vfc_vfsops; + mp->mnt_vfc = vfsp; + vfsp->vfc_refcount++; + mp->mnt_stat.f_type = vfsp->vfc_typenum; + mp->mnt_flag = mntflag; + mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; + strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); + vp->v_mountedhere = mp; + mp->mnt_vnodecovered = vp; + mp->mnt_stat.f_owner = procp->p_ucred->cr_uid; + (void) copystr(mntname, mp->mnt_stat.f_mntonname, MNAMELEN - 1, 0); + (void) copystr(ndmntp->ndm_host, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); + + /* Initialize mount args. */ + bzero((caddr_t) &args, sizeof(args)); + args.addr = (struct sockaddr *)&ndmntp->ndm_saddr; + args.addrlen = args.addr->sa_len; + args.sotype = SOCK_DGRAM; + args.fh = ndmntp->ndm_fh; + args.fhsize = NFSX_V2FH; + args.hostname = ndmntp->ndm_host; + args.flags = NFSMNT_RESVPORT; + + MGET(m, M_DONTWAIT, MT_SONAME); + bcopy((caddr_t)args.addr, mtod(m, caddr_t), + (m->m_len = args.addr->sa_len)); + if ((error = mountnfs(&args, mp, m, mntname, args.hostname, &vp))) { + printf("nfs_mountroot: mount %s failed: %d", mntname, error); + mp->mnt_vfc->vfc_refcount--; + vfs_unbusy(mp, procp); + _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + return (error); + } + + *mpp = mp; + *vpp = vp; + return (0); +} +#endif /* NO_MOUNT_PRIVATE */ + +/* + * VFS Operations. + * + * mount system call + * It seems a bit dumb to copyinstr() the host and path here and then + * bcopy() them in mountnfs(), but I wanted to detect errors before + * doing the sockargs() call because sockargs() allocates an mbuf and + * an error after that means that I have to release the mbuf. + */ +/* ARGSUSED */ +static int +nfs_mount(mp, path, data, ndp, p) + struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + int error; + struct nfs_args args; + struct mbuf *nam; + struct vnode *vp; + char pth[MNAMELEN], hst[MNAMELEN]; + u_int len; + u_char nfh[NFSX_V3FHMAX]; + + error = copyin(data, (caddr_t)&args, sizeof (struct nfs_args)); + if (error) + return (error); + if (args.version != NFS_ARGSVERSION) { +#ifndef NO_COMPAT_PRELITE2 + /* + * If the argument version is unknown, then assume the + * caller is a pre-lite2 4.4BSD client and convert its + * arguments. + */ + struct onfs_args oargs; + error = copyin(data, (caddr_t)&oargs, sizeof (struct onfs_args)); + if (error) + return (error); + nfs_convert_oargs(&args,&oargs); +#else /* NO_COMPAT_PRELITE2 */ + return (EPROGMISMATCH); +#endif /* !NO_COMPAT_PRELITE2 */ + } + if (args.fhsize > NFSX_V3FHMAX) + return (EINVAL); + error = copyin((caddr_t)args.fh, (caddr_t)nfh, args.fhsize); + if (error) + return (error); + error = copyinstr(path, pth, MNAMELEN-1, &len); + if (error) + return (error); + bzero(&pth[len], MNAMELEN - len); + error = copyinstr(args.hostname, hst, MNAMELEN-1, &len); + if (error) + return (error); + bzero(&hst[len], MNAMELEN - len); + /* sockargs() call must be after above copyin() calls */ + error = sockargs(&nam, (caddr_t)args.addr, args.addrlen, MT_SONAME); + if (error) + return (error); + args.fh = nfh; + error = mountnfs(&args, mp, nam, pth, hst, &vp); + return (error); +} + +/* + * Common code for mount and mountroot + */ +static int +mountnfs(argp, mp, nam, pth, hst, vpp) + register struct nfs_args *argp; + register struct mount *mp; + struct mbuf *nam; + char *pth, *hst; + struct vnode **vpp; +{ + register struct nfsmount *nmp; + struct nfsnode *np; + int error, maxio; + struct vattr attrs; + struct proc *curproc; + + /* + * turning off NQNFS until we have further testing + * with UBC changes, in particular, nfs_pagein and nfs_pageout. + * Those have NQNFS defined out in conjunction with this + * returning an error. Remove when fully tested. + */ + if (argp->flags & NFSMNT_NQNFS) { + error = NFSERR_NOTSUPP; + goto bad2; + } + + if (mp->mnt_flag & MNT_UPDATE) { + nmp = VFSTONFS(mp); + /* update paths, file handles, etc, here XXX */ + m_freem(nam); + return (0); + } else { + MALLOC_ZONE(nmp, struct nfsmount *, + sizeof (struct nfsmount), M_NFSMNT, M_WAITOK); + bzero((caddr_t)nmp, sizeof (struct nfsmount)); + TAILQ_INIT(&nmp->nm_uidlruhead); + TAILQ_INIT(&nmp->nm_bufq); + mp->mnt_data = (qaddr_t)nmp; + } + vfs_getnewfsid(mp); + nmp->nm_mountp = mp; + nmp->nm_flag = argp->flags; + if (nmp->nm_flag & NFSMNT_NQNFS) + /* + * We have to set mnt_maxsymlink to a non-zero value so + * that COMPAT_43 routines will know that we are setting + * the d_type field in directories (and can zero it for + * unsuspecting binaries). + */ + mp->mnt_maxsymlinklen = 1; + nmp->nm_timeo = NFS_TIMEO; + nmp->nm_retry = NFS_RETRANS; + nmp->nm_wsize = NFS_WSIZE; + nmp->nm_rsize = NFS_RSIZE; + nmp->nm_readdirsize = NFS_READDIRSIZE; + nmp->nm_numgrps = NFS_MAXGRPS; + nmp->nm_readahead = NFS_DEFRAHEAD; + nmp->nm_leaseterm = NQ_DEFLEASE; + nmp->nm_deadthresh = NQ_DEADTHRESH; + CIRCLEQ_INIT(&nmp->nm_timerhead); + nmp->nm_inprog = NULLVP; + bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN); + bcopy(pth, mp->mnt_stat.f_mntonname, MNAMELEN); + nmp->nm_nam = nam; + + /* + * Silently clear NFSMNT_NOCONN if it's a TCP mount, it makes + * no sense in that context. + */ + if (argp->sotype == SOCK_STREAM) + argp->flags &= ~NFSMNT_NOCONN; + + if ((argp->flags & NFSMNT_TIMEO) && argp->timeo > 0) { + nmp->nm_timeo = (argp->timeo * NFS_HZ + 5) / 10; + if (nmp->nm_timeo < NFS_MINTIMEO) + nmp->nm_timeo = NFS_MINTIMEO; + else if (nmp->nm_timeo > NFS_MAXTIMEO) + nmp->nm_timeo = NFS_MAXTIMEO; + } + + if ((argp->flags & NFSMNT_RETRANS) && argp->retrans > 1) { + nmp->nm_retry = argp->retrans; + if (nmp->nm_retry > NFS_MAXREXMIT) + nmp->nm_retry = NFS_MAXREXMIT; + } + + if (argp->flags & NFSMNT_NFSV3) { + if (argp->sotype == SOCK_DGRAM) + maxio = NFS_MAXDGRAMDATA; + else + maxio = NFS_MAXDATA; + } else + maxio = NFS_V2MAXDATA; + + if ((argp->flags & NFSMNT_WSIZE) && argp->wsize > 0) { + nmp->nm_wsize = argp->wsize; + /* Round down to multiple of blocksize */ + nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1); + if (nmp->nm_wsize <= 0) + nmp->nm_wsize = NFS_FABLKSIZE; + } + if (nmp->nm_wsize > maxio) + nmp->nm_wsize = maxio; + if (nmp->nm_wsize > MAXBSIZE) + nmp->nm_wsize = MAXBSIZE; + + if ((argp->flags & NFSMNT_RSIZE) && argp->rsize > 0) { + nmp->nm_rsize = argp->rsize; + /* Round down to multiple of blocksize */ + nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1); + if (nmp->nm_rsize <= 0) + nmp->nm_rsize = NFS_FABLKSIZE; + } + if (nmp->nm_rsize > maxio) + nmp->nm_rsize = maxio; + if (nmp->nm_rsize > MAXBSIZE) + nmp->nm_rsize = MAXBSIZE; + + if ((argp->flags & NFSMNT_READDIRSIZE) && argp->readdirsize > 0) { + nmp->nm_readdirsize = argp->readdirsize; + } + if (nmp->nm_readdirsize > maxio) + nmp->nm_readdirsize = maxio; + if (nmp->nm_readdirsize > nmp->nm_rsize) + nmp->nm_readdirsize = nmp->nm_rsize; + + if ((argp->flags & NFSMNT_MAXGRPS) && argp->maxgrouplist >= 0 && + argp->maxgrouplist <= NFS_MAXGRPS) + nmp->nm_numgrps = argp->maxgrouplist; + if ((argp->flags & NFSMNT_READAHEAD) && argp->readahead >= 0 && + argp->readahead <= NFS_MAXRAHEAD) + nmp->nm_readahead = argp->readahead; + if ((argp->flags & NFSMNT_LEASETERM) && argp->leaseterm >= 2 && + argp->leaseterm <= NQ_MAXLEASE) + nmp->nm_leaseterm = argp->leaseterm; + if ((argp->flags & NFSMNT_DEADTHRESH) && argp->deadthresh >= 1 && + argp->deadthresh <= NQ_NEVERDEAD) + nmp->nm_deadthresh = argp->deadthresh; + /* Set up the sockets and per-host congestion */ + nmp->nm_sotype = argp->sotype; + nmp->nm_soproto = argp->proto; + + /* + * For Connection based sockets (TCP,...) defer the connect until + * the first request, in case the server is not responding. + */ + if (nmp->nm_sotype == SOCK_DGRAM && + (error = nfs_connect(nmp, (struct nfsreq *)0))) + goto bad; + + /* + * This is silly, but it has to be set so that vinifod() works. + * We do not want to do an nfs_statfs() here since we can get + * stuck on a dead server and we are holding a lock on the mount + * point. + */ + mp->mnt_stat.f_iosize = nfs_iosize(nmp); + /* + * A reference count is needed on the nfsnode representing the + * remote root. If this object is not persistent, then backward + * traversals of the mount point (i.e. "..") will not work if + * the nfsnode gets flushed out of the cache. UFS does not have + * this problem, because one can identify root inodes by their + * number == ROOTINO (2). + */ + error = nfs_nget(mp, (nfsfh_t *)argp->fh, argp->fhsize, &np); + if (error) + goto bad; + + /* + * save this vnode pointer. That way nfs_unmount() + * does not need to call nfs_net() just get it to drop + * this vnode reference. + */ + nmp->nm_dvp = *vpp = NFSTOV(np); + + /* + * Get file attributes for the mountpoint. This has the side + * effect of filling in (*vpp)->v_type with the correct value. + */ + curproc = current_proc(); + VOP_GETATTR(*vpp, &attrs, curproc->p_ucred, curproc); + + /* + * Lose the lock but keep the ref. + */ + VOP_UNLOCK(*vpp, 0, curproc); + + return (0); +bad: + nfs_disconnect(nmp); + _FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); +bad2: + m_freem(nam); + return (error); +} + + +/* + * unmount system call + */ +static int +nfs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + register struct nfsmount *nmp; + struct vnode *vp; + int error, flags = 0; + + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + nmp = VFSTONFS(mp); + /* + * Goes something like this.. + * - Check for activity on the root vnode (other than ourselves). + * - Call vflush() to clear out vnodes for this file system, + * except for the root vnode. + * - Decrement reference on the vnode representing remote root. + * - Close the socket + * - Free up the data structures + */ + /* + * We need to decrement the ref. count on the nfsnode representing + * the remote root. See comment in mountnfs(). The VFS unmount() + * has done vput on this vnode, otherwise we would get deadlock! + */ + vp = nmp->nm_dvp; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (vp->v_usecount > 1) { + VOP_UNLOCK(vp, 0, p); + return (EBUSY); + } + + /* + * Must handshake with nqnfs_clientd() if it is active. + */ + nmp->nm_flag |= NFSMNT_DISMINPROG; + while (nmp->nm_inprog != NULLVP) + (void) tsleep((caddr_t)&lbolt, PSOCK, "nfsdism", 0); + error = vflush(mp, vp, flags); + if (error) { + VOP_UNLOCK(vp, 0, p); + nmp->nm_flag &= ~NFSMNT_DISMINPROG; + return (error); + } + + /* + * We are now committed to the unmount. + * For NQNFS, let the server daemon free the nfsmount structure. + */ + if (nmp->nm_flag & (NFSMNT_NQNFS | NFSMNT_KERB)) + nmp->nm_flag |= NFSMNT_DISMNT; + + /* + * Release the root vnode reference held by mountnfs() + */ + vput(vp); + vgone(vp); + nfs_disconnect(nmp); + m_freem(nmp->nm_nam); + + if ((nmp->nm_flag & (NFSMNT_NQNFS | NFSMNT_KERB)) == 0) { + register struct nfsreq *rp; + /* + * Loop through outstanding request list and remove dangling + * references to defunct nfsmount struct + */ +#if NFSDIAG && 0 + if (hw_atomic_add(&nfsreqqusers, 1) != 1) + nfsatompanic("unmount add"); + nfsbtlen = backtrace(&nfsbt, sizeof(nfsbt)); + nfsbtcpu = cpu_number(); + nfsbtthread = (int)(current_thread()); +#endif + + for (rp = nfs_reqq.tqh_first; rp; rp = rp->r_chain.tqe_next) + if (rp->r_nmp == nmp) + rp->r_nmp = (struct nfsmount *)0; +#if NFSDIAG && 0 + if (hw_atomic_sub(&nfsreqqusers, 1) != 0) + nfsatompanic("unmount sub"); +#endif + _FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); + } + return (0); +} + +/* + * Return root of a filesystem + */ +static int +nfs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + register struct vnode *vp; + struct nfsmount *nmp; + int error; + + nmp = VFSTONFS(mp); + vp = nmp->nm_dvp; + error = vget(vp, LK_EXCLUSIVE, current_proc()); + if (error) + return (error); + if (vp->v_type == VNON) + vp->v_type = VDIR; + vp->v_flag |= VROOT; + *vpp = vp; + return (0); +} + +extern int syncprt; + +/* + * Flush out the buffer cache + */ +/* ARGSUSED */ +static int +nfs_sync(mp, waitfor, cred, p) + struct mount *mp; + int waitfor; + struct ucred *cred; + struct proc *p; +{ + register struct vnode *vp; + int error, allerror = 0; + + /* + * Force stale buffer cache information to be flushed. + */ +loop: + for (vp = mp->mnt_vnodelist.lh_first; + vp != NULL; + vp = vp->v_mntvnodes.le_next) { + /* + * If the vnode that we are about to sync is no longer + * associated with this mount point, start over. + */ + if (vp->v_mount != mp) + goto loop; + if (VOP_ISLOCKED(vp) || vp->v_dirtyblkhd.lh_first == NULL) + continue; + if (vget(vp, LK_EXCLUSIVE, p)) + goto loop; + error = VOP_FSYNC(vp, cred, waitfor, p); + if (error) + allerror = error; + vput(vp); + } + return (allerror); +} + +/* + * NFS flat namespace lookup. + * Currently unsupported. + */ +/* ARGSUSED */ +static int +nfs_vget(mp, ino, vpp) + struct mount *mp; + ino_t ino; + struct vnode **vpp; +{ + + return (EOPNOTSUPP); +} + +/* + * At this point, this should never happen + */ +/* ARGSUSED */ +static int +nfs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) + register struct mount *mp; + struct fid *fhp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred **credanonp; +{ + + return (EINVAL); +} + +/* + * Vnode pointer to File handle, should never happen either + */ +/* ARGSUSED */ +static int +nfs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + + return (EINVAL); +} + +/* + * Vfs start routine, a no-op. + */ +/* ARGSUSED */ +static int +nfs_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + + return (0); +} + +/* + * Do operations associated with quotas, not supported + */ +/* ARGSUSED */ +static int +nfs_quotactl(mp, cmd, uid, arg, p) + struct mount *mp; + int cmd; + uid_t uid; + caddr_t arg; + struct proc *p; +{ + + return (EOPNOTSUPP); +} + +/* + * Do that sysctl thang... + */ +static int +nfs_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, + size_t newlen, struct proc *p) +{ + int rv; + + /* + * All names at this level are terminal. + */ + if(namelen > 1) + return ENOTDIR; /* overloaded */ + + switch(name[0]) { + case NFS_NFSSTATS: + if(!oldp) { + *oldlenp = sizeof nfsstats; + return 0; + } + + if(*oldlenp < sizeof nfsstats) { + *oldlenp = sizeof nfsstats; + return ENOMEM; + } + + rv = copyout(&nfsstats, oldp, sizeof nfsstats); + if(rv) return rv; + + if(newp && newlen != sizeof nfsstats) + return EINVAL; + + if(newp) { + return copyin(newp, &nfsstats, sizeof nfsstats); + } + return 0; + + default: + return EOPNOTSUPP; + } +} + diff --git a/bsd/nfs/nfs_vnops.c b/bsd/nfs/nfs_vnops.c new file mode 100644 index 000000000..06f5961e3 --- /dev/null +++ b/bsd/nfs/nfs_vnops.c @@ -0,0 +1,4632 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95 + * FreeBSD-Id: nfs_vnops.c,v 1.72 1997/11/07 09:20:48 phk Exp $ + */ + + +/* + * vnode op calls for Sun NFS version 2 and 3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#define TRUE 1 +#define FALSE 0 + +static int nfsspec_read __P((struct vop_read_args *)); +static int nfsspec_write __P((struct vop_write_args *)); +static int nfsfifo_read __P((struct vop_read_args *)); +static int nfsfifo_write __P((struct vop_write_args *)); +static int nfsspec_close __P((struct vop_close_args *)); +static int nfsfifo_close __P((struct vop_close_args *)); +#define nfs_poll vop_nopoll +static int nfs_ioctl __P((struct vop_ioctl_args *)); +static int nfs_select __P((struct vop_select_args *)); +static int nfs_flush __P((struct vnode *,struct ucred *,int,struct proc *,int)); +static int nfs_setattrrpc __P((struct vnode *,struct vattr *,struct ucred *,struct proc *)); +static int nfs_lookup __P((struct vop_lookup_args *)); +static int nfs_create __P((struct vop_create_args *)); +static int nfs_mknod __P((struct vop_mknod_args *)); +static int nfs_open __P((struct vop_open_args *)); +static int nfs_close __P((struct vop_close_args *)); +static int nfs_access __P((struct vop_access_args *)); +static int nfs_getattr __P((struct vop_getattr_args *)); +static int nfs_setattr __P((struct vop_setattr_args *)); +static int nfs_read __P((struct vop_read_args *)); +static int nfs_mmap __P((struct vop_mmap_args *)); +static int nfs_fsync __P((struct vop_fsync_args *)); +static int nfs_remove __P((struct vop_remove_args *)); +static int nfs_link __P((struct vop_link_args *)); +static int nfs_rename __P((struct vop_rename_args *)); +static int nfs_mkdir __P((struct vop_mkdir_args *)); +static int nfs_rmdir __P((struct vop_rmdir_args *)); +static int nfs_symlink __P((struct vop_symlink_args *)); +static int nfs_readdir __P((struct vop_readdir_args *)); +static int nfs_bmap __P((struct vop_bmap_args *)); +static int nfs_strategy __P((struct vop_strategy_args *)); +static int nfs_lookitup __P((struct vnode *,char *,int,struct ucred *,struct proc *,struct nfsnode **)); +static int nfs_sillyrename __P((struct vnode *,struct vnode *,struct componentname *)); +static int nfsspec_access __P((struct vop_access_args *)); +static int nfs_readlink __P((struct vop_readlink_args *)); +static int nfs_print __P((struct vop_print_args *)); +static int nfs_pathconf __P((struct vop_pathconf_args *)); +static int nfs_advlock __P((struct vop_advlock_args *)); +static int nfs_blkatoff __P((struct vop_blkatoff_args *)); +static int nfs_bwrite __P((struct vop_bwrite_args *)); +static int nfs_valloc __P((struct vop_valloc_args *)); +static int nfs_vfree __P((struct vop_vfree_args *)); +static int nfs_truncate __P((struct vop_truncate_args *)); +static int nfs_update __P((struct vop_update_args *)); +static int nfs_pagein __P((struct vop_pagein_args *)); +static int nfs_pageout __P((struct vop_pageout_args *)); +static int nfs_blktooff __P((struct vop_blktooff_args *)); +static int nfs_offtoblk __P((struct vop_offtoblk_args *)); +static int nfs_cmap __P((struct vop_cmap_args *)); + +/* + * Global vfs data structures for nfs + */ +vop_t **nfsv2_vnodeop_p; +static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { + { &vop_default_desc, (vop_t *)vn_default_error }, + { &vop_lookup_desc, (vop_t *)nfs_lookup }, /* lookup */ + { &vop_create_desc, (vop_t *)nfs_create }, /* create */ + { &vop_mknod_desc, (vop_t *)nfs_mknod }, /* mknod */ + { &vop_open_desc, (vop_t *)nfs_open }, /* open */ + { &vop_close_desc, (vop_t *)nfs_close }, /* close */ + { &vop_access_desc, (vop_t *)nfs_access }, /* access */ + { &vop_getattr_desc, (vop_t *)nfs_getattr }, /* getattr */ + { &vop_setattr_desc, (vop_t *)nfs_setattr }, /* setattr */ + { &vop_read_desc, (vop_t *)nfs_read }, /* read */ + { &vop_write_desc, (vop_t *)nfs_write }, /* write */ + { &vop_lease_desc, (vop_t *)nfs_lease_check }, /* lease */ + { &vop_ioctl_desc, (vop_t *)nfs_ioctl }, /* ioctl */ + { &vop_select_desc, (vop_t *)nfs_select }, /* select */ + { &vop_revoke_desc, (vop_t *)nfs_revoke }, /* revoke */ + { &vop_mmap_desc, (vop_t *)nfs_mmap }, /* mmap */ + { &vop_fsync_desc, (vop_t *)nfs_fsync }, /* fsync */ + { &vop_seek_desc, (vop_t *)nfs_seek }, /* seek */ + { &vop_remove_desc, (vop_t *)nfs_remove }, /* remove */ + { &vop_link_desc, (vop_t *)nfs_link }, /* link */ + { &vop_rename_desc, (vop_t *)nfs_rename }, /* rename */ + { &vop_mkdir_desc, (vop_t *)nfs_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (vop_t *)nfs_rmdir }, /* rmdir */ + { &vop_symlink_desc, (vop_t *)nfs_symlink }, /* symlink */ + { &vop_readdir_desc, (vop_t *)nfs_readdir }, /* readdir */ + { &vop_readlink_desc, (vop_t *)nfs_readlink }, /* readlink */ + { &vop_abortop_desc, (vop_t *)nfs_abortop }, /* abortop */ + { &vop_inactive_desc, (vop_t *)nfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (vop_t *)nfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (vop_t *)nfs_lock }, /* lock */ + { &vop_unlock_desc, (vop_t *)nfs_unlock }, /* unlock */ + { &vop_bmap_desc, (vop_t *)nfs_bmap }, /* bmap */ + { &vop_strategy_desc, (vop_t *)nfs_strategy }, /* strategy */ + { &vop_print_desc, (vop_t *)nfs_print }, /* print */ + { &vop_islocked_desc, (vop_t *)nfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (vop_t *)nfs_pathconf }, /* pathconf */ + { &vop_advlock_desc, (vop_t *)nfs_advlock }, /* advlock */ + { &vop_blkatoff_desc, (vop_t *)nfs_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (vop_t *)nfs_valloc }, /* valloc */ + { &vop_reallocblks_desc, (vop_t *)nfs_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (vop_t *)nfs_vfree }, /* vfree */ + { &vop_truncate_desc, (vop_t *)nfs_truncate }, /* truncate */ + { &vop_update_desc, (vop_t *)nfs_update }, /* update */ + { &vop_bwrite_desc, (vop_t *)nfs_bwrite }, /* bwrite */ + { &vop_pagein_desc, (vop_t *)nfs_pagein }, /* Pagein */ + { &vop_pageout_desc, (vop_t *)nfs_pageout }, /* Pageout */ + { &vop_copyfile_desc, (vop_t *)err_copyfile }, /* Copyfile */ + { &vop_blktooff_desc, (vop_t *)nfs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (vop_t *)nfs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (vop_t *)nfs_cmap }, /* cmap */ + { NULL, NULL } +}; +struct vnodeopv_desc nfsv2_vnodeop_opv_desc = + { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; +#ifdef __FreeBSD__ +VNODEOP_SET(nfsv2_vnodeop_opv_desc); +#endif + +/* + * Special device vnode ops + */ +vop_t **spec_nfsv2nodeop_p; +static struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { + { &vop_default_desc, (vop_t *)vn_default_error }, + { &vop_lookup_desc, (vop_t *)spec_lookup }, /* lookup */ + { &vop_create_desc, (vop_t *)spec_create }, /* create */ + { &vop_mknod_desc, (vop_t *)spec_mknod }, /* mknod */ + { &vop_open_desc, (vop_t *)spec_open }, /* open */ + { &vop_close_desc, (vop_t *)nfsspec_close }, /* close */ + { &vop_access_desc, (vop_t *)nfsspec_access }, /* access */ + { &vop_getattr_desc, (vop_t *)nfs_getattr }, /* getattr */ + { &vop_setattr_desc, (vop_t *)nfs_setattr }, /* setattr */ + { &vop_read_desc, (vop_t *)nfsspec_read }, /* read */ + { &vop_write_desc, (vop_t *)nfsspec_write }, /* write */ + { &vop_lease_desc, (vop_t *)spec_lease_check }, /* lease */ + { &vop_ioctl_desc, (vop_t *)spec_ioctl }, /* ioctl */ + { &vop_select_desc, (vop_t *)spec_select }, /* select */ + { &vop_revoke_desc, (vop_t *)spec_revoke }, /* revoke */ + { &vop_mmap_desc, (vop_t *)spec_mmap }, /* mmap */ + { &vop_fsync_desc, (vop_t *)nfs_fsync }, /* fsync */ + { &vop_seek_desc, (vop_t *)spec_seek }, /* seek */ + { &vop_remove_desc, (vop_t *)spec_remove }, /* remove */ + { &vop_link_desc, (vop_t *)spec_link }, /* link */ + { &vop_rename_desc, (vop_t *)spec_rename }, /* rename */ + { &vop_mkdir_desc, (vop_t *)spec_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (vop_t *)spec_rmdir }, /* rmdir */ + { &vop_symlink_desc, (vop_t *)spec_symlink }, /* symlink */ + { &vop_readdir_desc, (vop_t *)spec_readdir }, /* readdir */ + { &vop_readlink_desc, (vop_t *)spec_readlink }, /* readlink */ + { &vop_abortop_desc, (vop_t *)spec_abortop }, /* abortop */ + { &vop_inactive_desc, (vop_t *)nfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (vop_t *)nfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (vop_t *)nfs_lock }, /* lock */ + { &vop_unlock_desc, (vop_t *)nfs_unlock }, /* unlock */ + { &vop_bmap_desc, (vop_t *)spec_bmap }, /* bmap */ + { &vop_strategy_desc, (vop_t *)spec_strategy }, /* strategy */ + { &vop_print_desc, (vop_t *)nfs_print }, /* print */ + { &vop_islocked_desc, (vop_t *)nfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (vop_t *)spec_pathconf }, /* pathconf */ + { &vop_advlock_desc, (vop_t *)spec_advlock }, /* advlock */ + { &vop_blkatoff_desc, (vop_t *)spec_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (vop_t *)spec_valloc }, /* valloc */ + { &vop_reallocblks_desc, (vop_t *)spec_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (vop_t *)spec_vfree }, /* vfree */ + { &vop_truncate_desc, (vop_t *)spec_truncate }, /* truncate */ + { &vop_update_desc, (vop_t *)nfs_update }, /* update */ + { &vop_bwrite_desc, (vop_t *)vn_bwrite }, /* bwrite */ + { &vop_devblocksize_desc, (vop_t *)spec_devblocksize }, /* devblocksize */ + { &vop_pagein_desc, (vop_t *)nfs_pagein }, /* Pagein */ + { &vop_pageout_desc, (vop_t *)nfs_pageout }, /* Pageout */ + { &vop_blktooff_desc, (vop_t *)nfs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (vop_t *)nfs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (vop_t *)nfs_cmap }, /* cmap */ + { NULL, NULL } +}; +struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = + { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; +#ifdef __FreeBSD__ +VNODEOP_SET(spec_nfsv2nodeop_opv_desc); +#endif + +vop_t **fifo_nfsv2nodeop_p; +static struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { + { &vop_default_desc, (vop_t *)vn_default_error }, + { &vop_lookup_desc, (vop_t *)fifo_lookup }, /* lookup */ + { &vop_create_desc, (vop_t *)fifo_create }, /* create */ + { &vop_mknod_desc, (vop_t *)fifo_mknod }, /* mknod */ + { &vop_open_desc, (vop_t *)fifo_open }, /* open */ + { &vop_close_desc, (vop_t *)nfsfifo_close }, /* close */ + { &vop_access_desc, (vop_t *)nfsspec_access }, /* access */ + { &vop_getattr_desc, (vop_t *)nfs_getattr }, /* getattr */ + { &vop_setattr_desc, (vop_t *)nfs_setattr }, /* setattr */ + { &vop_read_desc, (vop_t *)nfsfifo_read }, /* read */ + { &vop_write_desc, (vop_t *)nfsfifo_write }, /* write */ + { &vop_lease_desc, (vop_t *)fifo_lease_check }, /* lease */ + { &vop_ioctl_desc, (vop_t *)fifo_ioctl }, /* ioctl */ + { &vop_select_desc, (vop_t *)fifo_select }, /* select */ + { &vop_revoke_desc, (vop_t *)fifo_revoke }, /* revoke */ + { &vop_mmap_desc, (vop_t *)fifo_mmap }, /* mmap */ + { &vop_fsync_desc, (vop_t *)nfs_fsync }, /* fsync */ + { &vop_seek_desc, (vop_t *)fifo_seek }, /* seek */ + { &vop_remove_desc, (vop_t *)fifo_remove }, /* remove */ + { &vop_link_desc, (vop_t *)fifo_link }, /* link */ + { &vop_rename_desc, (vop_t *)fifo_rename }, /* rename */ + { &vop_mkdir_desc, (vop_t *)fifo_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (vop_t *)fifo_rmdir }, /* rmdir */ + { &vop_symlink_desc, (vop_t *)fifo_symlink }, /* symlink */ + { &vop_readdir_desc, (vop_t *)fifo_readdir }, /* readdir */ + { &vop_readlink_desc, (vop_t *)fifo_readlink }, /* readlink */ + { &vop_abortop_desc, (vop_t *)fifo_abortop }, /* abortop */ + { &vop_inactive_desc, (vop_t *)nfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (vop_t *)nfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (vop_t *)nfs_lock }, /* lock */ + { &vop_unlock_desc, (vop_t *)nfs_unlock }, /* unlock */ + { &vop_bmap_desc, (vop_t *)fifo_bmap }, /* bmap */ + { &vop_strategy_desc, (vop_t *)fifo_badop }, /* strategy */ + { &vop_print_desc, (vop_t *)nfs_print }, /* print */ + { &vop_islocked_desc, (vop_t *)nfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (vop_t *)fifo_pathconf }, /* pathconf */ + { &vop_advlock_desc, (vop_t *)fifo_advlock }, /* advlock */ + { &vop_blkatoff_desc, (vop_t *)fifo_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (vop_t *)fifo_valloc }, /* valloc */ + { &vop_reallocblks_desc, (vop_t *)fifo_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (vop_t *)fifo_vfree }, /* vfree */ + { &vop_truncate_desc, (vop_t *)fifo_truncate }, /* truncate */ + { &vop_update_desc, (vop_t *)nfs_update }, /* update */ + { &vop_bwrite_desc, (vop_t *)vn_bwrite }, /* bwrite */ + { &vop_pagein_desc, (vop_t *)nfs_pagein }, /* Pagein */ + { &vop_pageout_desc, (vop_t *)nfs_pageout }, /* Pageout */ + { &vop_blktooff_desc, (vop_t *)nfs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (vop_t *)nfs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (vop_t *)nfs_cmap }, /* cmap */ + { NULL, NULL } +}; +struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = + { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; +#ifdef __FreeBSD__ +VNODEOP_SET(fifo_nfsv2nodeop_opv_desc); +#endif + +static int nfs_commit __P((struct vnode *vp, u_quad_t offset, int cnt, + struct ucred *cred, struct proc *procp)); +static int nfs_mknodrpc __P((struct vnode *dvp, struct vnode **vpp, + struct componentname *cnp, + struct vattr *vap)); +static int nfs_removerpc __P((struct vnode *dvp, char *name, int namelen, + struct ucred *cred, struct proc *proc)); +static int nfs_renamerpc __P((struct vnode *fdvp, char *fnameptr, + int fnamelen, struct vnode *tdvp, + char *tnameptr, int tnamelen, + struct ucred *cred, struct proc *proc)); +static int nfs_renameit __P((struct vnode *sdvp, + struct componentname *scnp, + struct sillyrename *sp)); + +/* + * Global variables + */ +extern u_long nfs_true, nfs_false; +extern struct nfsstats nfsstats; +extern nfstype nfsv3_type[9]; +struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; +struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON]; +int nfs_numasync = 0; +#define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1)) + +static int nfsaccess_cache_timeout = NFS_MAXATTRTIMO; +/* SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_timeout, CTLFLAG_RW, + &nfsaccess_cache_timeout, 0, "NFS ACCESS cache timeout"); +*/ +#define NFSV3ACCESS_ALL (NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY \ + | NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE \ + | NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP) + + + +static int +nfs3_access_otw(struct vnode *vp, + int wmode, + struct proc *p, + struct ucred *cred) +{ + const int v3 = 1; + u_int32_t *tl; + int error = 0, attrflag; + + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + caddr_t bpos, dpos, cp2; + register int32_t t1, t2; + register caddr_t cp; + u_int32_t rmode; + struct nfsnode *np = VTONFS(vp); + + nfsstats.rpccnt[NFSPROC_ACCESS]++; + nfsm_reqhead(vp, NFSPROC_ACCESS, NFSX_FH(v3) + NFSX_UNSIGNED); + nfsm_fhtom(vp, v3); + nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); + *tl = txdr_unsigned(wmode); + nfsm_request(vp, NFSPROC_ACCESS, p, cred); + nfsm_postop_attr(vp, attrflag); + if (!error) { + nfsm_dissect(tl, u_int32_t *, NFSX_UNSIGNED); + rmode = fxdr_unsigned(u_int32_t, *tl); + np->n_mode = rmode; + np->n_modeuid = cred->cr_uid; + np->n_modestamp = time_second; + } + nfsm_reqdone; + return error; +} + +/* + * nfs access vnode op. + * For nfs version 2, just return ok. File accesses may fail later. + * For nfs version 3, use the access rpc to check accessibility. If file modes + * are changed on the server, accesses might still fail later. + */ +static int +nfs_access(ap) + struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + int error = 0; + u_long mode, wmode; + int v3 = NFS_ISV3(vp); + struct nfsnode *np = VTONFS(vp); + + /* + * For nfs v3, do an access rpc, otherwise you are stuck emulating + * ufs_access() locally using the vattr. This may not be correct, + * since the server may apply other access criteria such as + * client uid-->server uid mapping that we do not know about, but + * this is better than just returning anything that is lying about + * in the cache. + */ + if (v3) { + if (ap->a_mode & VREAD) + mode = NFSV3ACCESS_READ; + else + mode = 0; + if (vp->v_type == VDIR) { + if (ap->a_mode & VWRITE) + mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND | + NFSV3ACCESS_DELETE); + if (ap->a_mode & VEXEC) + mode |= NFSV3ACCESS_LOOKUP; + } else { + if (ap->a_mode & VWRITE) + mode |= (NFSV3ACCESS_MODIFY | NFSV3ACCESS_EXTEND); + if (ap->a_mode & VEXEC) + mode |= NFSV3ACCESS_EXECUTE; + } + /* XXX safety belt, only make blanket request if caching */ + if (nfsaccess_cache_timeout > 0) { + wmode = NFSV3ACCESS_READ | NFSV3ACCESS_MODIFY | + NFSV3ACCESS_EXTEND | NFSV3ACCESS_EXECUTE | + NFSV3ACCESS_DELETE | NFSV3ACCESS_LOOKUP; + } else { + wmode = mode; + } + + /* + * Does our cached result allow us to give a definite yes to + * this request? + */ + if ((time_second < (np->n_modestamp + nfsaccess_cache_timeout)) && + (ap->a_cred->cr_uid == np->n_modeuid) && + ((np->n_mode & mode) == mode)) { + /* nfsstats.accesscache_hits++; */ + } else { + /* + * Either a no, or a don't know. Go to the wire. + */ + /* nfsstats.accesscache_misses++; */ + error = nfs3_access_otw(vp, wmode, ap->a_p,ap->a_cred); + if (!error) { + if ((np->n_mode & mode) != mode) + error = EACCES; + } + } + } else + return (nfsspec_access(ap)); /* NFSv2 case checks for EROFS here*/ + /* CSM - moved EROFS check down per NetBSD rev 1.71. So you + * get the correct error value with layered filesystems. + * EKN - moved the return(error) below this so it does get called.*/ + /* + * Disallow write attempts on filesystems mounted read-only; + * unless the file is a socket, fifo, or a block or character + * device resident on the filesystem. + */ + if (!error && (ap->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { + switch (vp->v_type) { + case VREG: case VDIR: case VLNK: + error = EROFS; + default: + break; + } + } + return (error); +} + +/* + * nfs open vnode op + * Check to see if the type is ok + * and that deletion is not in progress. + * For paged in text files, you will need to flush the page cache + * if consistency is lost. + */ +/* ARGSUSED */ +static int +nfs_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + struct nfsnode *np = VTONFS(vp); + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct vattr vattr; + int error; + + if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) +{ printf("open eacces vtyp=%d\n",vp->v_type); + return (EACCES); +} + /* + * Get a valid lease. If cached data is stale, flush it. + */ + if (nmp->nm_flag & NFSMNT_NQNFS) { + if (NQNFS_CKINVALID(vp, np, ND_READ)) { + do { + error = nqnfs_getlease(vp, ND_READ, ap->a_cred, + ap->a_p); + } while (error == NQNFS_EXPIRED); + if (error) + return (error); + if (np->n_lrev != np->n_brev || + (np->n_flag & NQNFSNONCACHE)) { + if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, + ap->a_p, 1)) == EINTR) + return (error); + np->n_brev = np->n_lrev; + } + } + } else { + if (np->n_flag & NMODIFIED) { + if ((error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, + ap->a_p, 1)) == EINTR) + return (error); + np->n_attrstamp = 0; + if (vp->v_type == VDIR) + np->n_direofoffset = 0; + error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); + if (error) + return (error); + np->n_mtime = vattr.va_mtime.tv_sec; + } else { + error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_p); + if (error) + return (error); + if (np->n_mtime != vattr.va_mtime.tv_sec) { + if (vp->v_type == VDIR) + np->n_direofoffset = 0; + if ((error = nfs_vinvalbuf(vp, V_SAVE, + ap->a_cred, ap->a_p, 1)) == EINTR) + return (error); + np->n_mtime = vattr.va_mtime.tv_sec; + } + } + } + if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) + np->n_attrstamp = 0; /* For Open/Close consistency */ + return (0); +} + +/* + * nfs close vnode op + * What an NFS client should do upon close after writing is a debatable issue. + * Most NFS clients push delayed writes to the server upon close, basically for + * two reasons: + * 1 - So that any write errors may be reported back to the client process + * doing the close system call. By far the two most likely errors are + * NFSERR_NOSPC and NFSERR_DQUOT to indicate space allocation failure. + * 2 - To put a worst case upper bound on cache inconsistency between + * multiple clients for the file. + * There is also a consistency problem for Version 2 of the protocol w.r.t. + * not being able to tell if other clients are writing a file concurrently, + * since there is no way of knowing if the changed modify time in the reply + * is only due to the write for this client. + * (NFS Version 3 provides weak cache consistency data in the reply that + * should be sufficient to detect and handle this case.) + * + * The current code does the following: + * for NFS Version 2 - play it safe and flush/invalidate all dirty buffers + * for NFS Version 3 - flush dirty buffers to the server but don't invalidate + * or commit them (this satisfies 1 and 2 except for the + * case where the server crashes after this close but + * before the commit RPC, which is felt to be "good + * enough". Changing the last argument to nfs_flush() to + * a 1 would force a commit operation, if it is felt a + * commit is necessary now. + * for NQNFS - do nothing now, since 2 is dealt with via leases and + * 1 should be dealt with via an fsync() system call for + * cases where write errors are important. + */ +/* ARGSUSED */ +static int +nfs_close(ap) + struct vop_close_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + int error = 0; + + if (vp->v_type == VREG) { +#if DIAGNOSTIC + register struct sillyrename *sp = np->n_sillyrename; + if (sp) + kprintf("nfs_close: %s, dvp=%x, vp=%x, ap=%x, np=%x, sp=%x\n", + &sp->s_name[0], (unsigned)(sp->s_dvp), (unsigned)vp, + (unsigned)ap, (unsigned)np, (unsigned)sp); +#endif + if ((VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) == 0 && + (np->n_flag & NMODIFIED)) { + if (NFS_ISV3(vp)) { + error = nfs_flush(vp, ap->a_cred, MNT_WAIT, ap->a_p, 0); + np->n_flag &= ~NMODIFIED; + } else + error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1); + np->n_attrstamp = 0; + } + if (np->n_flag & NWRITEERR) { + np->n_flag &= ~NWRITEERR; + error = np->n_error; + } + } + return (error); +} + +/* + * nfs getattr call from vfs. + */ +static int +nfs_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + register caddr_t cp; + register u_long *tl; + register int t1, t2; + caddr_t bpos, dpos; + int error = 0; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int v3 = NFS_ISV3(vp); + + /* + * Update local times for special files. + */ + if (np->n_flag & (NACC | NUPD)) + np->n_flag |= NCHG; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 513)) | DBG_FUNC_START, + (int)np->n_size, 0, (int)np->n_vattr.va_size, np->n_flag, 0); + + /* + * First look in the cache. + */ + if ((error = nfs_getattrcache(vp, ap->a_vap)) == 0) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 513)) | DBG_FUNC_END, + (int)np->n_size, 0, (int)np->n_vattr.va_size, np->n_flag, 0); + + return (0); + } + if (error != ENOENT) + return (error); + error = 0; + + if (v3 && nfsaccess_cache_timeout > 0) { + /* nfsstats.accesscache_misses++; */ + if (error = nfs3_access_otw(vp, NFSV3ACCESS_ALL, ap->a_p, ap->a_cred)) + return (error); + if ((error = nfs_getattrcache(vp, ap->a_vap)) == 0) + return (0); + if (error != ENOENT) + return (error); + error = 0; + } + + nfsstats.rpccnt[NFSPROC_GETATTR]++; + nfsm_reqhead(vp, NFSPROC_GETATTR, NFSX_FH(v3)); + nfsm_fhtom(vp, v3); + nfsm_request(vp, NFSPROC_GETATTR, ap->a_p, ap->a_cred); + if (!error) { + nfsm_loadattr(vp, ap->a_vap); + if (np->n_mtime != ap->a_vap->va_mtime.tv_sec) { + NFSTRACE(NFSTRC_GA_INV, vp); + if (vp->v_type == VDIR) + nfs_invaldir(vp); + error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, + ap->a_p, 1); + if (!error) { + NFSTRACE(NFSTRC_GA_INV1, vp); + np->n_mtime = ap->a_vap->va_mtime.tv_sec; + } else { + NFSTRACE(NFSTRC_GA_INV2, error); + } + } + } + nfsm_reqdone; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 513)) | DBG_FUNC_END, + (int)np->n_size, -1, (int)np->n_vattr.va_size, error, 0); + + return (error); +} + +/* + * nfs setattr call. + */ +static int +nfs_setattr(ap) + struct vop_setattr_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + register struct vattr *vap = ap->a_vap; + int error = 0; + u_quad_t tsize; + +#ifndef nolint + tsize = (u_quad_t)0; +#endif + /* + * Disallow write attempts if the filesystem is mounted read-only. + */ + if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || + vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || + vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && + (vp->v_mount->mnt_flag & MNT_RDONLY)) + return (EROFS); + if (vap->va_size != VNOVAL) { + switch (vp->v_type) { + case VDIR: + return (EISDIR); + case VCHR: + case VBLK: + case VSOCK: + case VFIFO: + if (vap->va_mtime.tv_sec == VNOVAL && + vap->va_atime.tv_sec == VNOVAL && + vap->va_mode == (u_short)VNOVAL && + vap->va_uid == (uid_t)VNOVAL && + vap->va_gid == (gid_t)VNOVAL) + return (0); + vap->va_size = VNOVAL; + break; + default: + /* + * Disallow write attempts if the filesystem is + * mounted read-only. + */ + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + np->n_flag |= NMODIFIED; + tsize = np->n_size; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 512)) | DBG_FUNC_START, + (int)np->n_size, (int)vap->va_size, (int)np->n_vattr.va_size, np->n_flag, 0); + + if (vap->va_size == 0) + error = nfs_vinvalbuf(vp, 0, + ap->a_cred, ap->a_p, 1); + else + error = nfs_vinvalbuf(vp, V_SAVE, + ap->a_cred, ap->a_p, 1); + + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)vap->va_size); /* XXX check error */ + + if (error) { + printf("nfs_setattr: nfs_vinvalbuf %d\n", error); + +#if DIAGNOSTIC + kprintf("nfs_setattr: nfs_vinvalbuf %d\n", + error); +#endif /* DIAGNOSTIC */ + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)tsize); /* XXX check error */ + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 512)) | DBG_FUNC_END, + (int)np->n_size, (int)vap->va_size, (int)np->n_vattr.va_size, -1, 0); + + return (error); + } + np->n_size = np->n_vattr.va_size = vap->va_size; + + }; + } else if ((vap->va_mtime.tv_sec != VNOVAL || + vap->va_atime.tv_sec != VNOVAL) && (np->n_flag & NMODIFIED) && + vp->v_type == VREG && + (error = nfs_vinvalbuf(vp, V_SAVE, ap->a_cred, + ap->a_p, 1)) == EINTR) + return (error); + + error = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 512)) | DBG_FUNC_END, + (int)np->n_size, (int)vap->va_size, (int)np->n_vattr.va_size, error, 0); + + if (error && vap->va_size != VNOVAL) { + /* make every effort to resync file size w/ server... */ + int err = 0; /* preserve "error" for return */ + + printf("nfs_setattr: nfs_setattrrpc %d\n", error); +#if DIAGNOSTIC + kprintf("nfs_setattr: nfs_setattrrpc %d\n", error); +#endif /* DIAGNOSTIC */ + np->n_size = np->n_vattr.va_size = tsize; + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)np->n_size); /* XXX check error */ + vap->va_size = tsize; + err = nfs_setattrrpc(vp, vap, ap->a_cred, ap->a_p); + + if (err) + printf("nfs_setattr1: nfs_setattrrpc %d\n", err); +#if DIAGNOSTIC + if (err) + kprintf("nfs_setattr nfs_setattrrpc %d\n", err); +#endif /* DIAGNOSTIC */ + } + return (error); +} + +/* + * Do an nfs setattr rpc. + */ +static int +nfs_setattrrpc(vp, vap, cred, procp) + register struct vnode *vp; + register struct vattr *vap; + struct ucred *cred; + struct proc *procp; +{ + register struct nfsv2_sattr *sp; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + u_long *tl; + int error = 0, wccflag = NFSV3_WCCRATTR; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int v3 = NFS_ISV3(vp); + + nfsstats.rpccnt[NFSPROC_SETATTR]++; + nfsm_reqhead(vp, NFSPROC_SETATTR, NFSX_FH(v3) + NFSX_SATTR(v3)); + nfsm_fhtom(vp, v3); + if (v3) { + if (vap->va_mode != (u_short)VNOVAL) { + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + *tl++ = nfs_true; + *tl = txdr_unsigned(vap->va_mode); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = nfs_false; + } + if (vap->va_uid != (uid_t)VNOVAL) { + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + *tl++ = nfs_true; + *tl = txdr_unsigned(vap->va_uid); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = nfs_false; + } + if (vap->va_gid != (gid_t)VNOVAL) { + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + *tl++ = nfs_true; + *tl = txdr_unsigned(vap->va_gid); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = nfs_false; + } + if (vap->va_size != VNOVAL) { + nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); + *tl++ = nfs_true; + txdr_hyper(&vap->va_size, tl); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = nfs_false; + } + if (vap->va_atime.tv_sec != VNOVAL) { + if (vap->va_atime.tv_sec != time.tv_sec) { + nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); + txdr_nfsv3time(&vap->va_atime, tl); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); + } + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); + } + if (vap->va_mtime.tv_sec != VNOVAL) { + if (vap->va_mtime.tv_sec != time.tv_sec) { + nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT); + txdr_nfsv3time(&vap->va_mtime, tl); + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); + } + } else { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE); + } + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + *tl = nfs_false; + } else { + nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + if (vap->va_mode == (u_short)VNOVAL) + sp->sa_mode = VNOVAL; + else + sp->sa_mode = vtonfsv2_mode(vp->v_type, vap->va_mode); + if (vap->va_uid == (uid_t)VNOVAL) + sp->sa_uid = VNOVAL; + else + sp->sa_uid = txdr_unsigned(vap->va_uid); + if (vap->va_gid == (gid_t)VNOVAL) + sp->sa_gid = VNOVAL; + else + sp->sa_gid = txdr_unsigned(vap->va_gid); + sp->sa_size = txdr_unsigned(vap->va_size); + txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); + txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); + } + nfsm_request(vp, NFSPROC_SETATTR, procp, cred); + if (v3) { + nfsm_wcc_data(vp, wccflag); + if ((!wccflag) && (vp->v_type != VBAD)) /* EINVAL set on VBAD vnode */ + VTONFS(vp)->n_attrstamp = 0; + } else + nfsm_loadattr(vp, (struct vattr *)0); + nfsm_reqdone; + return (error); +} + +/* + * nfs lookup call, one step at a time... + * First look in cache + * If not found, unlock the directory nfsnode and do the rpc + */ +static int +nfs_lookup(ap) + struct vop_lookup_args /* { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + register struct componentname *cnp = ap->a_cnp; + register struct vnode *dvp = ap->a_dvp; + register struct vnode **vpp = ap->a_vpp; + register int flags = cnp->cn_flags; + register struct vnode *newvp; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + struct nfsmount *nmp; + caddr_t bpos, dpos, cp2; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + long len; + nfsfh_t *fhp; + struct nfsnode *np; + int lockparent, wantparent, error = 0, attrflag, fhsize; + int v3 = NFS_ISV3(dvp); + struct proc *p = cnp->cn_proc; + int worldbuildworkaround = 1; + + if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) + return (EROFS); + *vpp = NULLVP; + if (dvp->v_type != VDIR) + return (ENOTDIR); + lockparent = flags & LOCKPARENT; + wantparent = flags & (LOCKPARENT|WANTPARENT); + nmp = VFSTONFS(dvp->v_mount); + np = VTONFS(dvp); + + if (worldbuildworkaround) { + /* temporary workaround for world builds to not have dvp go + VBAD on during server calls in this routine. When + the real ref counting problem is found take this out. + Note if this was later and before the nfsm_request + set up, the workaround did not work (NOTE other difference + was I only put one VREF in that time. Thus it needs + to be above the cache_lookup branch or with 2 VREFS. Not + sure which. Can't play with world builds right now to see + which. VOP_ACCESS could also make it go to server. - EKN */ + VREF(dvp); /* hang on to this dvp - EKN */ + VREF(dvp); /* hang on tight - EKN */ + } + + if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) { + struct vattr vattr; + int vpid; + + if ((error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p))) { + *vpp = NULLVP; + if (worldbuildworkaround) { + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + } + return (error); + } + + /* got to check to make sure the vnode didn't go away if access went to server */ + if ((*vpp)->v_type == VBAD) { + if (worldbuildworkaround) { + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + } + return(EINVAL); + } + + newvp = *vpp; + vpid = newvp->v_id; + /* + * See the comment starting `Step through' in ufs/ufs_lookup.c + * for an explanation of the locking protocol + */ + if (dvp == newvp) { + VREF(newvp); + error = 0; + } else if (flags & ISDOTDOT) { + VOP_UNLOCK(dvp, 0, p); + error = vget(newvp, LK_EXCLUSIVE, p); + if (!error && lockparent && (flags & ISLASTCN)) + error = vn_lock(dvp, LK_EXCLUSIVE, p); + } else { + error = vget(newvp, LK_EXCLUSIVE, p); + if (!lockparent || error || !(flags & ISLASTCN)) + VOP_UNLOCK(dvp, 0, p); + } + if (!error) { + if (vpid == newvp->v_id) { + if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p) + && vattr.va_ctime.tv_sec == VTONFS(newvp)->n_ctime) { + nfsstats.lookupcache_hits++; + if (cnp->cn_nameiop != LOOKUP && + (flags & ISLASTCN)) + cnp->cn_flags |= SAVENAME; + + if (worldbuildworkaround) { + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + } + + return (0); + } + cache_purge(newvp); + } + vput(newvp); + if (lockparent && dvp != newvp && (flags & ISLASTCN)) + VOP_UNLOCK(dvp, 0, p); + } + error = vn_lock(dvp, LK_EXCLUSIVE, p); + *vpp = NULLVP; + if (error) { + if (worldbuildworkaround) { + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + } + return (error); + } + } + + /* + * Got to check to make sure the vnode didn't go away if VOP_GETATTR went to server + * or callers prior to this blocked and had it go VBAD. + */ + if (dvp->v_type == VBAD) { + if (worldbuildworkaround) { + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + } + return(EINVAL); + } + + error = 0; + newvp = NULLVP; + nfsstats.lookupcache_misses++; + nfsstats.rpccnt[NFSPROC_LOOKUP]++; + len = cnp->cn_namelen; + nfsm_reqhead(dvp, NFSPROC_LOOKUP, + NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); + nfsm_request(dvp, NFSPROC_LOOKUP, cnp->cn_proc, cnp->cn_cred); + + /* this two lines set dvp refcounts back to where they were + * before we took extra 2 VREFS to avoid VBAD vnode on dvp + * during server calls for world builds. Remove when real + * fix is found. - EKN */ + if (worldbuildworkaround) { + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + vrele(dvp); /* end of hanging on tight to dvp - EKN */ + } + + if (error) { + nfsm_postop_attr(dvp, attrflag); + m_freem(mrep); + goto nfsmout; + } + nfsm_getfh(fhp, fhsize, v3); + + /* + * Handle RENAME case... + */ + if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) { + if (NFS_CMPFH(np, fhp, fhsize)) { + m_freem(mrep); + return (EISDIR); + } + if ((error = nfs_nget(dvp->v_mount, fhp, fhsize, &np))) { + m_freem(mrep); + return (error); + } + newvp = NFSTOV(np); + if (v3) { + nfsm_postop_attr(newvp, attrflag); + nfsm_postop_attr(dvp, attrflag); + } else + nfsm_loadattr(newvp, (struct vattr *)0); + *vpp = newvp; + m_freem(mrep); + cnp->cn_flags |= SAVENAME; + if (!lockparent) + VOP_UNLOCK(dvp, 0, p); + return (0); + } + + if (flags & ISDOTDOT) { + VOP_UNLOCK(dvp, 0, p); + error = nfs_nget(dvp->v_mount, fhp, fhsize, &np); + if (error) { + vn_lock(dvp, LK_EXCLUSIVE + LK_RETRY, p); + return (error); + } + newvp = NFSTOV(np); + if (lockparent && (flags & ISLASTCN) && + (error = vn_lock(dvp, LK_EXCLUSIVE, p))) { + vput(newvp); + return (error); + } + } else if (NFS_CMPFH(np, fhp, fhsize)) { + VREF(dvp); + newvp = dvp; + } else { + if ((error = nfs_nget(dvp->v_mount, fhp, fhsize, &np))) { + m_freem(mrep); + return (error); + } + if (!lockparent || !(flags & ISLASTCN)) + VOP_UNLOCK(dvp, 0, p); + newvp = NFSTOV(np); + } + if (v3) { + nfsm_postop_attr(newvp, attrflag); + nfsm_postop_attr(dvp, attrflag); + } else + nfsm_loadattr(newvp, (struct vattr *)0); + if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) + cnp->cn_flags |= SAVENAME; + if ((cnp->cn_flags & MAKEENTRY) && + (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) { + np->n_ctime = np->n_vattr.va_ctime.tv_sec; + cache_enter(dvp, newvp, cnp); + } + *vpp = newvp; + nfsm_reqdone; + if (error) { + if (newvp != NULLVP) { + vrele(newvp); + *vpp = NULLVP; + } + if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) && + (flags & ISLASTCN) && error == ENOENT) { + if (!lockparent) + VOP_UNLOCK(dvp, 0, p); + if (dvp->v_mount->mnt_flag & MNT_RDONLY) + error = EROFS; + else + error = EJUSTRETURN; + } + if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)) + cnp->cn_flags |= SAVENAME; + } + return (error); +} + +/* + * nfs read call. + * Just call nfs_bioread() to do the work. + */ +static int +nfs_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + + if (vp->v_type != VREG) + return (EPERM); + return (nfs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0)); +} + +/* + * nfs readlink call + */ +static int +nfs_readlink(ap) + struct vop_readlink_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + + if (vp->v_type != VLNK) + return (EPERM); + return (nfs_bioread(vp, ap->a_uio, 0, ap->a_cred, 0)); +} + +/* + * Do a readlink rpc. + * Called by nfs_doio() from below the buffer cache. + */ +int +nfs_readlinkrpc(vp, uiop, cred) + register struct vnode *vp; + struct uio *uiop; + struct ucred *cred; +{ + register u_long *tl; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + int error = 0, len, attrflag; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int v3 = NFS_ISV3(vp); + + nfsstats.rpccnt[NFSPROC_READLINK]++; + nfsm_reqhead(vp, NFSPROC_READLINK, NFSX_FH(v3)); + nfsm_fhtom(vp, v3); + nfsm_request(vp, NFSPROC_READLINK, uiop->uio_procp, cred); + if (v3) + nfsm_postop_attr(vp, attrflag); + if (!error) { + nfsm_strsiz(len, NFS_MAXPATHLEN); + if (len == NFS_MAXPATHLEN) { + struct nfsnode *np = VTONFS(vp); +#if DIAGNOSTIC + if (!np) + panic("nfs_readlinkrpc: null np"); +#endif + if (np->n_size && np->n_size < NFS_MAXPATHLEN) + len = np->n_size; + } + nfsm_mtouio(uiop, len); + } + nfsm_reqdone; + return (error); +} + +/* + * nfs read rpc call + * Ditto above + */ +int +nfs_readrpc(vp, uiop, cred) + register struct vnode *vp; + struct uio *uiop; + struct ucred *cred; +{ + register u_long *tl; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + struct nfsmount *nmp; + int error = 0, len, retlen, tsiz, eof, attrflag; + int v3 = NFS_ISV3(vp); + +#ifndef nolint + eof = 0; +#endif + nmp = VFSTONFS(vp->v_mount); + tsiz = uiop->uio_resid; + if (((u_int64_t)uiop->uio_offset + (unsigned int)tsiz > 0xffffffff) && !v3) + return (EFBIG); + while (tsiz > 0) { + nfsstats.rpccnt[NFSPROC_READ]++; + len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz; + nfsm_reqhead(vp, NFSPROC_READ, NFSX_FH(v3) + NFSX_UNSIGNED * 3); + nfsm_fhtom(vp, v3); + nfsm_build(tl, u_long *, NFSX_UNSIGNED * 3); + if (v3) { + txdr_hyper(&uiop->uio_offset, tl); + *(tl + 2) = txdr_unsigned(len); + } else { + *tl++ = txdr_unsigned(uiop->uio_offset); + *tl++ = txdr_unsigned(len); + *tl = 0; + } + nfsm_request(vp, NFSPROC_READ, uiop->uio_procp, cred); + if (v3) { + nfsm_postop_attr(vp, attrflag); + if (error) { + m_freem(mrep); + goto nfsmout; + } + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + eof = fxdr_unsigned(int, *(tl + 1)); + } else + nfsm_loadattr(vp, (struct vattr *)0); + nfsm_strsiz(retlen, nmp->nm_rsize); + nfsm_mtouio(uiop, retlen); + m_freem(mrep); + tsiz -= retlen; + if (v3) { + if (eof || retlen == 0) + tsiz = 0; + } else if (retlen < len) + tsiz = 0; + } +nfsmout: + return (error); +} + +/* + * nfs write call + */ +int +nfs_writerpc(vp, uiop, cred, iomode, must_commit) + register struct vnode *vp; + register struct uio *uiop; + struct ucred *cred; + int *iomode, *must_commit; +{ + register u_long *tl; + register caddr_t cp; + register int t1, t2, backup; + caddr_t bpos, dpos, cp2; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error = 0, len, tsiz, wccflag = NFSV3_WCCRATTR, rlen, commit; + int v3 = NFS_ISV3(vp), committed = NFSV3WRITE_FILESYNC; + +#if DIAGNOSTIC + if (uiop->uio_iovcnt != 1) + panic("nfs_writerpc: iovcnt > 1"); +#endif + *must_commit = 0; + tsiz = uiop->uio_resid; + if (((u_int64_t)uiop->uio_offset + (unsigned int)tsiz > 0xffffffff) && !v3) + return (EFBIG); + while (tsiz > 0) { + nfsstats.rpccnt[NFSPROC_WRITE]++; + len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz; + nfsm_reqhead(vp, NFSPROC_WRITE, + NFSX_FH(v3) + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); + nfsm_fhtom(vp, v3); + if (v3) { + nfsm_build(tl, u_long *, 5 * NFSX_UNSIGNED); + txdr_hyper(&uiop->uio_offset, tl); + tl += 2; + *tl++ = txdr_unsigned(len); + *tl++ = txdr_unsigned(*iomode); + } else { + nfsm_build(tl, u_long *, 4 * NFSX_UNSIGNED); + *++tl = txdr_unsigned(uiop->uio_offset); + tl += 2; + } + *tl = txdr_unsigned(len); + nfsm_uiotom(uiop, len); + nfsm_request(vp, NFSPROC_WRITE, uiop->uio_procp, cred); + if (v3) { + wccflag = NFSV3_WCCCHK; + nfsm_wcc_data(vp, wccflag); + if (!error) { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED + + NFSX_V3WRITEVERF); + rlen = fxdr_unsigned(int, *tl++); + if (rlen <= 0) { + error = NFSERR_IO; + break; + } else if (rlen < len) { + backup = len - rlen; + uiop->uio_iov->iov_base -= backup; + uiop->uio_iov->iov_len += backup; + uiop->uio_offset -= backup; + uiop->uio_resid += backup; + len = rlen; + } + commit = fxdr_unsigned(int, *tl++); + + /* + * Return the lowest committment level + * obtained by any of the RPCs. + */ + if (committed == NFSV3WRITE_FILESYNC) + committed = commit; + else if (committed == NFSV3WRITE_DATASYNC && + commit == NFSV3WRITE_UNSTABLE) + committed = commit; + if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) { + bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, + NFSX_V3WRITEVERF); + nmp->nm_flag |= NFSMNT_HASWRITEVERF; + } else if (bcmp((caddr_t)tl, + (caddr_t)nmp->nm_verf, NFSX_V3WRITEVERF)) { + *must_commit = 1; + bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, + NFSX_V3WRITEVERF); + } + } + } else + nfsm_loadattr(vp, (struct vattr *)0); + if ((wccflag) && (vp->v_type != VBAD)) /* EINVAL set on VBAD vnode */ + VTONFS(vp)->n_mtime = VTONFS(vp)->n_vattr.va_mtime.tv_sec; + m_freem(mrep); + /* + * we seem to have a case where we end up looping on shutdown and taking down nfs servers. + * For V3, error cases, there is no way to terminate loop, if the len was 0, meaning, + * nmp->nm_wsize was trashed. FreeBSD has this fix in it. Let's try it. + */ + if (error) + break; + tsiz -= len; + } +nfsmout: + /* does it make sense to even say it was committed if we had an error? EKN */ + /* okay well just don't on bad vnodes then. EINVAL will be returned on bad vnodes */ + if ((vp->v_type != VBAD) && (vp->v_mount->mnt_flag & MNT_ASYNC)) + committed = NFSV3WRITE_FILESYNC; + *iomode = committed; + if (error) + uiop->uio_resid = tsiz; + return (error); +} + +/* + * nfs mknod rpc + * For NFS v2 this is a kludge. Use a create rpc but with the IFMT bits of the + * mode set to specify the file type and the size field for rdev. + */ +static int +nfs_mknodrpc(dvp, vpp, cnp, vap) + register struct vnode *dvp; + register struct vnode **vpp; + register struct componentname *cnp; + register struct vattr *vap; +{ + register struct nfsv2_sattr *sp; + register struct nfsv3_sattr *sp3; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + struct vnode *newvp = (struct vnode *)0; + struct nfsnode *np = (struct nfsnode *)0; + struct vattr vattr; + char *cp2; + caddr_t bpos, dpos; + int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + u_long rdev; + int v3 = NFS_ISV3(dvp); + + if (vap->va_type == VCHR || vap->va_type == VBLK) + rdev = txdr_unsigned(vap->va_rdev); + else if (vap->va_type == VFIFO || vap->va_type == VSOCK) + rdev = 0xffffffff; + else { + VOP_ABORTOP(dvp, cnp); + vput(dvp); + return (EOPNOTSUPP); + } + if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc))) { + VOP_ABORTOP(dvp, cnp); + vput(dvp); + return (error); + } + nfsstats.rpccnt[NFSPROC_MKNOD]++; + nfsm_reqhead(dvp, NFSPROC_MKNOD, NFSX_FH(v3) + 4 * NFSX_UNSIGNED + + + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); + if (v3) { + nfsm_build(tl, u_long *, NFSX_UNSIGNED + NFSX_V3SRVSATTR); + *tl++ = vtonfsv3_type(vap->va_type); + sp3 = (struct nfsv3_sattr *)tl; + nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); + if (vap->va_type == VCHR || vap->va_type == VBLK) { + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + *tl++ = txdr_unsigned(major(vap->va_rdev)); + *tl = txdr_unsigned(minor(vap->va_rdev)); + } + } else { + nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); + sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); + sp->sa_gid = txdr_unsigned(vattr.va_gid); + sp->sa_size = rdev; + txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); + txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); + } + nfsm_request(dvp, NFSPROC_MKNOD, cnp->cn_proc, cnp->cn_cred); + if (!error) { + nfsm_mtofh(dvp, newvp, v3, gotvp); + if (!gotvp) { + if (newvp) { + vput(newvp); + newvp = (struct vnode *)0; + } + error = nfs_lookitup(dvp, cnp->cn_nameptr, + cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); + if (!error) + newvp = NFSTOV(np); + } + } + if (v3) + nfsm_wcc_data(dvp, wccflag); + nfsm_reqdone; + if (error) { + if (newvp) + vput(newvp); + } else { + if (cnp->cn_flags & MAKEENTRY) + cache_enter(dvp, newvp, cnp); + *vpp = newvp; + } + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; + } + vput(dvp); + return (error); +} + +/* + * nfs mknod vop + * just call nfs_mknodrpc() to do the work. + */ +/* ARGSUSED */ +static int +nfs_mknod(ap) + struct vop_mknod_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + struct vnode *newvp; + int error; + + error = nfs_mknodrpc(ap->a_dvp, &newvp, ap->a_cnp, ap->a_vap); + if (!error) + vput(newvp); + return (error); +} + +static u_long create_verf; +/* + * nfs file create call + */ +static int +nfs_create(ap) + struct vop_create_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + register struct vnode *dvp = ap->a_dvp; + register struct vattr *vap = ap->a_vap; + register struct componentname *cnp = ap->a_cnp; + register struct nfsv2_sattr *sp; + register struct nfsv3_sattr *sp3; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + struct nfsnode *np = (struct nfsnode *)0; + struct vnode *newvp = (struct vnode *)0; + caddr_t bpos, dpos, cp2; + int error = 0, wccflag = NFSV3_WCCRATTR, gotvp = 0, fmode = 0; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + struct vattr vattr; + int v3 = NFS_ISV3(dvp); + + /* + * Oops, not for me.. + */ + if (vap->va_type == VSOCK) + return (nfs_mknodrpc(dvp, ap->a_vpp, cnp, vap)); + + if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc))) { + VOP_ABORTOP(dvp, cnp); + vput(dvp); + return (error); + } + if (vap->va_vaflags & VA_EXCLUSIVE) + fmode |= O_EXCL; +again: + nfsstats.rpccnt[NFSPROC_CREATE]++; + nfsm_reqhead(dvp, NFSPROC_CREATE, NFSX_FH(v3) + 2 * NFSX_UNSIGNED + + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(v3)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); + if (v3) { + nfsm_build(tl, u_long *, NFSX_UNSIGNED); + if (fmode & O_EXCL) { + *tl = txdr_unsigned(NFSV3CREATE_EXCLUSIVE); + nfsm_build(tl, u_long *, NFSX_V3CREATEVERF); + if (!TAILQ_EMPTY(&in_ifaddrhead)) + *tl++ = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr; + else + *tl++ = create_verf; + *tl = ++create_verf; + } else { + *tl = txdr_unsigned(NFSV3CREATE_UNCHECKED); + nfsm_build(tl, u_long *, NFSX_V3SRVSATTR); + sp3 = (struct nfsv3_sattr *)tl; + nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); + } + } else { + nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + sp->sa_mode = vtonfsv2_mode(vap->va_type, vap->va_mode); + sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); + sp->sa_gid = txdr_unsigned(vattr.va_gid); + sp->sa_size = 0; + txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); + txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); + } + nfsm_request(dvp, NFSPROC_CREATE, cnp->cn_proc, cnp->cn_cred); + if (!error) { + nfsm_mtofh(dvp, newvp, v3, gotvp); + if (!gotvp) { + if (newvp) { + vput(newvp); + newvp = (struct vnode *)0; + } + error = nfs_lookitup(dvp, cnp->cn_nameptr, + cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc, &np); + if (!error) + newvp = NFSTOV(np); + } + } + if (v3) + nfsm_wcc_data(dvp, wccflag); + nfsm_reqdone; + if (error) { + if (v3 && (fmode & O_EXCL) && error == NFSERR_NOTSUPP) { + fmode &= ~O_EXCL; + goto again; + } + if (newvp) + vput(newvp); + } else if (v3 && (fmode & O_EXCL)) + error = nfs_setattrrpc(newvp, vap, cnp->cn_cred, cnp->cn_proc); + if (!error) { + if (cnp->cn_flags & MAKEENTRY) + cache_enter(dvp, newvp, cnp); + *ap->a_vpp = newvp; + } + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; + } + vput(dvp); + return (error); +} + +/* + * nfs file remove call + * To try and make nfs semantics closer to ufs semantics, a file that has + * other processes using the vnode is renamed instead of removed and then + * removed later on the last close. + * - If v_usecount > 1 + * If a rename is not already in the works + * call nfs_sillyrename() to set it up + * else + * do the remove rpc + */ +static int +nfs_remove(ap) + struct vop_remove_args /* { + struct vnodeop_desc *a_desc; + struct vnode * a_dvp; + struct vnode * a_vp; + struct componentname * a_cnp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct vnode *dvp = ap->a_dvp; + register struct componentname *cnp = ap->a_cnp; + register struct nfsnode *np = VTONFS(vp); + int error = 0; + struct vattr vattr; + int file_deleted = 0; + +#if DIAGNOSTIC + if ((cnp->cn_flags & HASBUF) == 0) + panic("nfs_remove: no name"); + if (vp->v_usecount < 1) + panic("nfs_remove: bad v_usecount"); +#endif + if (vp->v_usecount == 1 || + (UBCISVALID(vp)&&(vp->v_usecount==2)) || + (np->n_sillyrename && + VOP_GETATTR(vp, &vattr, cnp->cn_cred, cnp->cn_proc) == 0 && + vattr.va_nlink > 1)) { + /* + * Purge the name cache so that the chance of a lookup for + * the name succeeding while the remove is in progress is + * minimized. Without node locking it can still happen, such + * that an I/O op returns ESTALE, but since you get this if + * another host removes the file.. + */ + cache_purge(vp); + /* + * throw away biocache buffers, mainly to avoid + * unnecessary delayed writes later. + */ + error = nfs_vinvalbuf(vp, 0, cnp->cn_cred, cnp->cn_proc, 1); + ubc_setsize(vp, (off_t)0); + /* Do the rpc */ + if (error != EINTR) + error = nfs_removerpc(dvp, cnp->cn_nameptr, + cnp->cn_namelen, cnp->cn_cred, cnp->cn_proc); + /* + * Kludge City: If the first reply to the remove rpc is lost.. + * the reply to the retransmitted request will be ENOENT + * since the file was in fact removed + * Therefore, we cheat and return success. + */ + if (error == ENOENT) + error = 0; + file_deleted = 1; + } else if (!np->n_sillyrename) { + error = nfs_sillyrename(dvp, vp, cnp); + } + + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + np->n_attrstamp = 0; + vput(dvp); + + + if (vp == dvp) + vrele(vp); + else + vput(vp); + + if (file_deleted && UBCINFOEXISTS(vp)) { + (void) ubc_uncache(vp); + ubc_release(vp); + /* WARNING vp may not be valid after this */ + } + + return (error); +} + +/* + * nfs file remove rpc called from nfs_inactive + */ +int +nfs_removeit(sp) + register struct sillyrename *sp; +{ + + return (nfs_removerpc(sp->s_dvp, sp->s_name, sp->s_namlen, sp->s_cred, + (struct proc *)0)); +} + +/* + * Nfs remove rpc, called from nfs_remove() and nfs_removeit(). + */ +static int +nfs_removerpc(dvp, name, namelen, cred, proc) + register struct vnode *dvp; + char *name; + int namelen; + struct ucred *cred; + struct proc *proc; +{ + register u_long *tl; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + int error = 0, wccflag = NFSV3_WCCRATTR; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int v3 = NFS_ISV3(dvp); + + nfsstats.rpccnt[NFSPROC_REMOVE]++; + nfsm_reqhead(dvp, NFSPROC_REMOVE, + NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(namelen)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(name, namelen, NFS_MAXNAMLEN); + nfsm_request(dvp, NFSPROC_REMOVE, proc, cred); + if (v3) + nfsm_wcc_data(dvp, wccflag); + nfsm_reqdone; + if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; + } + return (error); +} + +/* + * nfs file rename call + */ +static int +nfs_rename(ap) + struct vop_rename_args /* { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; + } */ *ap; +{ + register struct vnode *fvp = ap->a_fvp; + register struct vnode *tvp = ap->a_tvp; + register struct vnode *fdvp = ap->a_fdvp; + register struct vnode *tdvp = ap->a_tdvp; + register struct componentname *tcnp = ap->a_tcnp; + register struct componentname *fcnp = ap->a_fcnp; + int error; + +#if DIAGNOSTIC + if ((tcnp->cn_flags & HASBUF) == 0 || + (fcnp->cn_flags & HASBUF) == 0) + panic("nfs_rename: no name"); +#endif + /* Check for cross-device rename */ + if ((fvp->v_mount != tdvp->v_mount) || + (tvp && (fvp->v_mount != tvp->v_mount))) { + error = EXDEV; + goto out; + } + + /* + * If the tvp exists and is in use, sillyrename it before doing the + * rename of the new file over it. + * XXX Can't sillyrename a directory. + */ + if (tvp && (tvp->v_usecount>(UBCISVALID(tvp) ? 2 : 1)) && + !VTONFS(tvp)->n_sillyrename && + tvp->v_type != VDIR && !nfs_sillyrename(tdvp, tvp, tcnp)) { + vput(tvp); + tvp = NULL; + } + + error = nfs_renamerpc(fdvp, fcnp->cn_nameptr, fcnp->cn_namelen, + tdvp, tcnp->cn_nameptr, tcnp->cn_namelen, tcnp->cn_cred, + tcnp->cn_proc); + + if (fvp->v_type == VDIR) { + if (tvp != NULL && tvp->v_type == VDIR) + cache_purge(tdvp); + cache_purge(fdvp); + } +out: + if (tdvp == tvp) + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + vrele(fdvp); + vrele(fvp); + /* + * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. + */ + if (error == ENOENT) + error = 0; + return (error); +} + +/* + * nfs file rename rpc called from nfs_remove() above + */ +static int +nfs_renameit(sdvp, scnp, sp) + struct vnode *sdvp; + struct componentname *scnp; + register struct sillyrename *sp; +{ + return (nfs_renamerpc(sdvp, scnp->cn_nameptr, scnp->cn_namelen, + sdvp, sp->s_name, sp->s_namlen, scnp->cn_cred, scnp->cn_proc)); +} + +/* + * Do an nfs rename rpc. Called from nfs_rename() and nfs_renameit(). + */ +static int +nfs_renamerpc(fdvp, fnameptr, fnamelen, tdvp, tnameptr, tnamelen, cred, proc) + register struct vnode *fdvp; + char *fnameptr; + int fnamelen; + register struct vnode *tdvp; + char *tnameptr; + int tnamelen; + struct ucred *cred; + struct proc *proc; +{ + register u_long *tl; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + int error = 0, fwccflag = NFSV3_WCCRATTR, twccflag = NFSV3_WCCRATTR; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int v3 = NFS_ISV3(fdvp); + + nfsstats.rpccnt[NFSPROC_RENAME]++; + nfsm_reqhead(fdvp, NFSPROC_RENAME, + (NFSX_FH(v3) + NFSX_UNSIGNED)*2 + nfsm_rndup(fnamelen) + + nfsm_rndup(tnamelen)); + nfsm_fhtom(fdvp, v3); + nfsm_strtom(fnameptr, fnamelen, NFS_MAXNAMLEN); + nfsm_fhtom(tdvp, v3); + nfsm_strtom(tnameptr, tnamelen, NFS_MAXNAMLEN); + nfsm_request(fdvp, NFSPROC_RENAME, proc, cred); + if (v3) { + nfsm_wcc_data(fdvp, fwccflag); + nfsm_wcc_data(tdvp, twccflag); + } + nfsm_reqdone; + if (fdvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ + VTONFS(fdvp)->n_flag |= NMODIFIED; + if (!fwccflag) + VTONFS(fdvp)->n_attrstamp = 0; + } + if (tdvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ + VTONFS(tdvp)->n_flag |= NMODIFIED; + if (!twccflag) + VTONFS(tdvp)->n_attrstamp = 0; + } + return (error); +} + +/* + * nfs hard link create call + */ +static int +nfs_link(ap) + struct vop_link_args /* { + struct vnode *a_vp; + struct vnode *a_tdvp; + struct componentname *a_cnp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct vnode *tdvp = ap->a_tdvp; + register struct componentname *cnp = ap->a_cnp; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + int error = 0, wccflag = NFSV3_WCCRATTR, attrflag = 0; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int v3 = NFS_ISV3(vp); + + if (vp->v_mount != tdvp->v_mount) { + VOP_ABORTOP(vp, cnp); + if (tdvp == vp) + vrele(tdvp); + else + vput(tdvp); + return (EXDEV); + } + + /* + * Push all writes to the server, so that the attribute cache + * doesn't get "out of sync" with the server. + * XXX There should be a better way! + */ + VOP_FSYNC(vp, cnp->cn_cred, MNT_WAIT, cnp->cn_proc); + + nfsstats.rpccnt[NFSPROC_LINK]++; + nfsm_reqhead(vp, NFSPROC_LINK, + NFSX_FH(v3)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); + nfsm_fhtom(vp, v3); + nfsm_fhtom(tdvp, v3); + nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); + nfsm_request(vp, NFSPROC_LINK, cnp->cn_proc, cnp->cn_cred); + if (v3) { + nfsm_postop_attr(vp, attrflag); + nfsm_wcc_data(tdvp, wccflag); + } + nfsm_reqdone; + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + + VTONFS(tdvp)->n_flag |= NMODIFIED; + if ((!attrflag) && (vp->v_type != VBAD)) /* EINVAL set on VBAD vnode */ + VTONFS(vp)->n_attrstamp = 0; + if ((!wccflag) && (tdvp->v_type != VBAD)) /* EINVAL set on VBAD vnode */ + VTONFS(tdvp)->n_attrstamp = 0; + vput(tdvp); + /* + * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. + */ + if (error == EEXIST) + error = 0; + return (error); +} + +/* + * nfs symbolic link create call + */ +static int +nfs_symlink(ap) + struct vop_symlink_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; + } */ *ap; +{ + register struct vnode *dvp = ap->a_dvp; + register struct vattr *vap = ap->a_vap; + register struct componentname *cnp = ap->a_cnp; + register struct nfsv2_sattr *sp; + register struct nfsv3_sattr *sp3; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + int slen, error = 0, wccflag = NFSV3_WCCRATTR, gotvp; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + struct vnode *newvp = (struct vnode *)0; + int v3 = NFS_ISV3(dvp); + + nfsstats.rpccnt[NFSPROC_SYMLINK]++; + slen = strlen(ap->a_target); + nfsm_reqhead(dvp, NFSPROC_SYMLINK, NFSX_FH(v3) + 2*NFSX_UNSIGNED + + nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(v3)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); + if (v3) { + nfsm_build(sp3, struct nfsv3_sattr *, NFSX_V3SRVSATTR); + nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, + cnp->cn_cred->cr_gid); + } + nfsm_strtom(ap->a_target, slen, NFS_MAXPATHLEN); + if (!v3) { + nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + sp->sa_mode = vtonfsv2_mode(VLNK, vap->va_mode); + sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); + sp->sa_gid = txdr_unsigned(cnp->cn_cred->cr_gid); + sp->sa_size = -1; + txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); + txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); + } + nfsm_request(dvp, NFSPROC_SYMLINK, cnp->cn_proc, cnp->cn_cred); + if (v3) { + if (!error) + nfsm_mtofh(dvp, newvp, v3, gotvp); + nfsm_wcc_data(dvp, wccflag); + } + nfsm_reqdone; + if (newvp) + vput(newvp); + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + if (dvp->v_type != VBAD) { /* EINVAL set on VBAD vnode */ + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; + } + vput(dvp); + /* + * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. + */ + if (error == EEXIST) + error = 0; + return (error); +} + +/* + * nfs make dir call + */ +static int +nfs_mkdir(ap) + struct vop_mkdir_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + register struct vnode *dvp = ap->a_dvp; + register struct vattr *vap = ap->a_vap; + register struct componentname *cnp = ap->a_cnp; + register struct nfsv2_sattr *sp; + register struct nfsv3_sattr *sp3; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + register int len; + struct nfsnode *np = (struct nfsnode *)0; + struct vnode *newvp = (struct vnode *)0; + caddr_t bpos, dpos, cp2; + int error = 0, wccflag = NFSV3_WCCRATTR; + int gotvp = 0; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + struct vattr vattr; + int v3 = NFS_ISV3(dvp); + + if ((error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc))) { + VOP_ABORTOP(dvp, cnp); + vput(dvp); + return (error); + } + len = cnp->cn_namelen; + nfsstats.rpccnt[NFSPROC_MKDIR]++; + nfsm_reqhead(dvp, NFSPROC_MKDIR, + NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len) + NFSX_SATTR(v3)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(cnp->cn_nameptr, len, NFS_MAXNAMLEN); + if (v3) { + nfsm_build(sp3, struct nfsv3_sattr *, NFSX_V3SRVSATTR); + nfsm_v3sattr(sp3, vap, cnp->cn_cred->cr_uid, vattr.va_gid); + } else { + nfsm_build(sp, struct nfsv2_sattr *, NFSX_V2SATTR); + sp->sa_mode = vtonfsv2_mode(VDIR, vap->va_mode); + sp->sa_uid = txdr_unsigned(cnp->cn_cred->cr_uid); + sp->sa_gid = txdr_unsigned(vattr.va_gid); + sp->sa_size = -1; + txdr_nfsv2time(&vap->va_atime, &sp->sa_atime); + txdr_nfsv2time(&vap->va_mtime, &sp->sa_mtime); + } + nfsm_request(dvp, NFSPROC_MKDIR, cnp->cn_proc, cnp->cn_cred); + if (!error) + nfsm_mtofh(dvp, newvp, v3, gotvp); + if (v3) + nfsm_wcc_data(dvp, wccflag); + nfsm_reqdone; + if (dvp->v_type != VBAD) { /* EINVAL set on this case */ + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; + } + /* + * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry + * if we can succeed in looking up the directory. + */ + if (error == EEXIST || (!error && !gotvp)) { + if (newvp) { + vrele(newvp); + newvp = (struct vnode *)0; + } + error = nfs_lookitup(dvp, cnp->cn_nameptr, len, cnp->cn_cred, + cnp->cn_proc, &np); + if (!error) { + newvp = NFSTOV(np); + if (newvp->v_type != VDIR) + error = EEXIST; + } + } + if (error) { + if (newvp) + vrele(newvp); + } else + *ap->a_vpp = newvp; + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vput(dvp); + return (error); +} + +/* + * nfs remove directory call + */ +static int +nfs_rmdir(ap) + struct vop_rmdir_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct vnode *dvp = ap->a_dvp; + register struct componentname *cnp = ap->a_cnp; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + caddr_t bpos, dpos, cp2; + int error = 0, wccflag = NFSV3_WCCRATTR; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + int v3 = NFS_ISV3(dvp); + + nfsstats.rpccnt[NFSPROC_RMDIR]++; + nfsm_reqhead(dvp, NFSPROC_RMDIR, + NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(cnp->cn_nameptr, cnp->cn_namelen, NFS_MAXNAMLEN); + nfsm_request(dvp, NFSPROC_RMDIR, cnp->cn_proc, cnp->cn_cred); + if (v3) + nfsm_wcc_data(dvp, wccflag); + nfsm_reqdone; + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + if (dvp->v_type != VBAD) { /* EINVAL set on this case */ + VTONFS(dvp)->n_flag |= NMODIFIED; + if (!wccflag) + VTONFS(dvp)->n_attrstamp = 0; + } + cache_purge(dvp); + cache_purge(vp); + vput(vp); + vput(dvp); + /* + * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. + */ + if (error == ENOENT) + error = 0; + return (error); +} + +/* + * nfs readdir call + */ +static int +nfs_readdir(ap) + struct vop_readdir_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + register struct uio *uio = ap->a_uio; + int tresid, error; + struct vattr vattr; + + if (vp->v_type != VDIR) + return (EPERM); + /* + * First, check for hit on the EOF offset cache + */ + if (np->n_direofoffset > 0 && uio->uio_offset >= np->n_direofoffset && + (np->n_flag & NMODIFIED) == 0) { + if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS) { + if (NQNFS_CKCACHABLE(vp, ND_READ)) { + nfsstats.direofcache_hits++; + return (0); + } + } else if (VOP_GETATTR(vp, &vattr, ap->a_cred, uio->uio_procp) == 0 && + np->n_mtime == vattr.va_mtime.tv_sec) { + nfsstats.direofcache_hits++; + return (0); + } + } + + /* + * Call nfs_bioread() to do the real work. + */ + tresid = uio->uio_resid; + error = nfs_bioread(vp, uio, 0, ap->a_cred, 0); + + if (!error && uio->uio_resid == tresid) + nfsstats.direofcache_misses++; + return (error); +} + +/* + * Readdir rpc call. + * Called from below the buffer cache by nfs_doio(). + */ +int +nfs_readdirrpc(vp, uiop, cred) + struct vnode *vp; + register struct uio *uiop; + struct ucred *cred; + +{ + register int len, left; + register struct dirent *dp; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + register nfsuint64 *cookiep; + caddr_t bpos, dpos, cp2; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + nfsuint64 cookie; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct nfsnode *dnp = VTONFS(vp); + u_quad_t fileno; + int error = 0, tlen, more_dirs = 1, blksiz = 0, bigenough = 1; + int attrflag; + int v3 = NFS_ISV3(vp); + +#ifndef nolint + dp = (struct dirent *)0; +#endif +#if DIAGNOSTIC + if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (NFS_DIRBLKSIZ - 1)) || + (uiop->uio_resid & (NFS_DIRBLKSIZ - 1))) + panic("nfs_readdirrpc: bad uio"); +#endif + + /* + * If there is no cookie, assume directory was stale. + */ + cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0); + if (cookiep) + cookie = *cookiep; + else + return (NFSERR_BAD_COOKIE); + /* + * Loop around doing readdir rpc's of size nm_readdirsize + * truncated to a multiple of DIRBLKSIZ. + * The stopping criteria is EOF or buffer full. + */ + while (more_dirs && bigenough) { + nfsstats.rpccnt[NFSPROC_READDIR]++; + nfsm_reqhead(vp, NFSPROC_READDIR, NFSX_FH(v3) + + NFSX_READDIR(v3)); + nfsm_fhtom(vp, v3); + if (v3) { + nfsm_build(tl, u_long *, 5 * NFSX_UNSIGNED); + *tl++ = cookie.nfsuquad[0]; + *tl++ = cookie.nfsuquad[1]; + *tl++ = dnp->n_cookieverf.nfsuquad[0]; + *tl++ = dnp->n_cookieverf.nfsuquad[1]; + } else { + nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED); + *tl++ = cookie.nfsuquad[0]; + } + *tl = txdr_unsigned(nmp->nm_readdirsize); + nfsm_request(vp, NFSPROC_READDIR, uiop->uio_procp, cred); + if (v3) { + nfsm_postop_attr(vp, attrflag); + if (!error) { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + dnp->n_cookieverf.nfsuquad[0] = *tl++; + dnp->n_cookieverf.nfsuquad[1] = *tl; + } else { + m_freem(mrep); + goto nfsmout; + } + } + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + more_dirs = fxdr_unsigned(int, *tl); + + /* loop thru the dir entries, doctoring them to 4bsd form */ + while (more_dirs && bigenough) { + if (v3) { + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + fxdr_hyper(tl, &fileno); + len = fxdr_unsigned(int, *(tl + 2)); + } else { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + fileno = fxdr_unsigned(u_quad_t, *tl++); + len = fxdr_unsigned(int, *tl); + } + if (len <= 0 || len > NFS_MAXNAMLEN) { + error = EBADRPC; + m_freem(mrep); + goto nfsmout; + } + tlen = nfsm_rndup(len); + if (tlen == len) + tlen += 4; /* To ensure null termination */ + left = DIRBLKSIZ - blksiz; + if ((tlen + DIRHDSIZ) > left) { + dp->d_reclen += left; + uiop->uio_iov->iov_base += left; + uiop->uio_iov->iov_len -= left; + uiop->uio_offset += left; + uiop->uio_resid -= left; + blksiz = 0; + } + if ((tlen + DIRHDSIZ) > uiop->uio_resid) + bigenough = 0; + if (bigenough) { + dp = (struct dirent *)uiop->uio_iov->iov_base; + dp->d_fileno = (int)fileno; + dp->d_namlen = len; + dp->d_reclen = tlen + DIRHDSIZ; + dp->d_type = DT_UNKNOWN; + blksiz += dp->d_reclen; + if (blksiz == DIRBLKSIZ) + blksiz = 0; + uiop->uio_offset += DIRHDSIZ; + uiop->uio_resid -= DIRHDSIZ; + uiop->uio_iov->iov_base += DIRHDSIZ; + uiop->uio_iov->iov_len -= DIRHDSIZ; + nfsm_mtouio(uiop, len); + cp = uiop->uio_iov->iov_base; + tlen -= len; + *cp = '\0'; /* null terminate */ + uiop->uio_iov->iov_base += tlen; + uiop->uio_iov->iov_len -= tlen; + uiop->uio_offset += tlen; + uiop->uio_resid -= tlen; + } else + nfsm_adv(nfsm_rndup(len)); + if (v3) { + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + } else { + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); + } + if (bigenough) { + cookie.nfsuquad[0] = *tl++; + if (v3) + cookie.nfsuquad[1] = *tl++; + } else if (v3) + tl += 2; + else + tl++; + more_dirs = fxdr_unsigned(int, *tl); + } + /* + * If at end of rpc data, get the eof boolean + */ + if (!more_dirs) { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + more_dirs = (fxdr_unsigned(int, *tl) == 0); + } + m_freem(mrep); + } + /* + * Fill last record, iff any, out to a multiple of DIRBLKSIZ + * by increasing d_reclen for the last record. + */ + if (blksiz > 0) { + left = DIRBLKSIZ - blksiz; + dp->d_reclen += left; + uiop->uio_iov->iov_base += left; + uiop->uio_iov->iov_len -= left; + uiop->uio_offset += left; + uiop->uio_resid -= left; + } + + /* + * We are now either at the end of the directory or have filled the + * block. + */ + if (bigenough) + dnp->n_direofoffset = uiop->uio_offset; + else { + if (uiop->uio_resid > 0) + printf("EEK! readdirrpc resid > 0\n"); + cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1); + *cookiep = cookie; + } +nfsmout: + return (error); +} + +/* + * NFS V3 readdir plus RPC. Used in place of nfs_readdirrpc(). + */ +int +nfs_readdirplusrpc(vp, uiop, cred) + struct vnode *vp; + register struct uio *uiop; + struct ucred *cred; +{ + register int len, left; + register struct dirent *dp; + register u_long *tl; + register caddr_t cp; + register long t1, t2; + register struct vnode *newvp; + register nfsuint64 *cookiep; + caddr_t bpos, dpos, cp2, dpossav1, dpossav2; + struct mbuf *mreq, *mrep, *md, *mb, *mb2, *mdsav1, *mdsav2; + struct nameidata nami, *ndp = &nami; + struct componentname *cnp = &ndp->ni_cnd; + nfsuint64 cookie; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + struct nfsnode *dnp = VTONFS(vp), *np; + nfsfh_t *fhp; + u_quad_t fileno; + int error = 0, tlen, more_dirs = 1, blksiz = 0, doit, bigenough = 1, i; + int attrflag, fhsize; + +#ifndef nolint + dp = (struct dirent *)0; +#endif +#if DIAGNOSTIC + if (uiop->uio_iovcnt != 1 || (uiop->uio_offset & (DIRBLKSIZ - 1)) || + (uiop->uio_resid & (DIRBLKSIZ - 1))) + panic("nfs_readdirplusrpc: bad uio"); +#endif + ndp->ni_dvp = vp; + newvp = NULLVP; + + /* + * If there is no cookie, assume directory was stale. + */ + cookiep = nfs_getcookie(dnp, uiop->uio_offset, 0); + if (cookiep) + cookie = *cookiep; + else + return (NFSERR_BAD_COOKIE); + /* + * Loop around doing readdir rpc's of size nm_readdirsize + * truncated to a multiple of DIRBLKSIZ. + * The stopping criteria is EOF or buffer full. + */ + while (more_dirs && bigenough) { + nfsstats.rpccnt[NFSPROC_READDIRPLUS]++; + nfsm_reqhead(vp, NFSPROC_READDIRPLUS, + NFSX_FH(1) + 6 * NFSX_UNSIGNED); + nfsm_fhtom(vp, 1); + nfsm_build(tl, u_long *, 6 * NFSX_UNSIGNED); + *tl++ = cookie.nfsuquad[0]; + *tl++ = cookie.nfsuquad[1]; + *tl++ = dnp->n_cookieverf.nfsuquad[0]; + *tl++ = dnp->n_cookieverf.nfsuquad[1]; + *tl++ = txdr_unsigned(nmp->nm_readdirsize); + *tl = txdr_unsigned(nmp->nm_rsize); + nfsm_request(vp, NFSPROC_READDIRPLUS, uiop->uio_procp, cred); + nfsm_postop_attr(vp, attrflag); + if (error) { + m_freem(mrep); + goto nfsmout; + } + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + dnp->n_cookieverf.nfsuquad[0] = *tl++; + dnp->n_cookieverf.nfsuquad[1] = *tl++; + more_dirs = fxdr_unsigned(int, *tl); + + /* loop thru the dir entries, doctoring them to 4bsd form */ + while (more_dirs && bigenough) { + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + fxdr_hyper(tl, &fileno); + len = fxdr_unsigned(int, *(tl + 2)); + if (len <= 0 || len > NFS_MAXNAMLEN) { + error = EBADRPC; + m_freem(mrep); + goto nfsmout; + } + tlen = nfsm_rndup(len); + if (tlen == len) + tlen += 4; /* To ensure null termination*/ + left = DIRBLKSIZ - blksiz; + if ((tlen + DIRHDSIZ) > left) { + dp->d_reclen += left; + uiop->uio_iov->iov_base += left; + uiop->uio_iov->iov_len -= left; + uiop->uio_offset += left; + uiop->uio_resid -= left; + blksiz = 0; + } + if ((tlen + DIRHDSIZ) > uiop->uio_resid) + bigenough = 0; + if (bigenough) { + dp = (struct dirent *)uiop->uio_iov->iov_base; + dp->d_fileno = (int)fileno; + dp->d_namlen = len; + dp->d_reclen = tlen + DIRHDSIZ; + dp->d_type = DT_UNKNOWN; + blksiz += dp->d_reclen; + if (blksiz == DIRBLKSIZ) + blksiz = 0; + uiop->uio_offset += DIRHDSIZ; + uiop->uio_resid -= DIRHDSIZ; + uiop->uio_iov->iov_base += DIRHDSIZ; + uiop->uio_iov->iov_len -= DIRHDSIZ; + cnp->cn_nameptr = uiop->uio_iov->iov_base; + cnp->cn_namelen = len; + nfsm_mtouio(uiop, len); + cp = uiop->uio_iov->iov_base; + tlen -= len; + *cp = '\0'; + uiop->uio_iov->iov_base += tlen; + uiop->uio_iov->iov_len -= tlen; + uiop->uio_offset += tlen; + uiop->uio_resid -= tlen; + } else + nfsm_adv(nfsm_rndup(len)); + nfsm_dissect(tl, u_long *, 3 * NFSX_UNSIGNED); + if (bigenough) { + cookie.nfsuquad[0] = *tl++; + cookie.nfsuquad[1] = *tl++; + } else + tl += 2; + + /* + * Since the attributes are before the file handle + * (sigh), we must skip over the attributes and then + * come back and get them. + */ + attrflag = fxdr_unsigned(int, *tl); + if (attrflag) { + dpossav1 = dpos; + mdsav1 = md; + nfsm_adv(NFSX_V3FATTR); + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + doit = fxdr_unsigned(int, *tl); + if (doit) { + nfsm_getfh(fhp, fhsize, 1); + if (NFS_CMPFH(dnp, fhp, fhsize)) { + VREF(vp); + newvp = vp; + np = dnp; + } else { + if ((error = nfs_nget(vp->v_mount, fhp, + fhsize, &np))) + doit = 0; + else + newvp = NFSTOV(np); + } + } + if (doit) { + dpossav2 = dpos; + dpos = dpossav1; + mdsav2 = md; + md = mdsav1; + nfsm_loadattr(newvp, (struct vattr *)0); + dpos = dpossav2; + md = mdsav2; + dp->d_type = + IFTODT(VTTOIF(np->n_vattr.va_type)); + ndp->ni_vp = newvp; + cnp->cn_hash = 0; + for (cp = cnp->cn_nameptr, i = 1; i <= len; + i++, cp++) + cnp->cn_hash += (unsigned char)*cp * i; + if (cnp->cn_namelen <= NCHNAMLEN) + cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp); + } + } else { + /* Just skip over the file handle */ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + i = fxdr_unsigned(int, *tl); + nfsm_adv(nfsm_rndup(i)); + } + if (newvp != NULLVP) { + vrele(newvp); + newvp = NULLVP; + } + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + more_dirs = fxdr_unsigned(int, *tl); + } + /* + * If at end of rpc data, get the eof boolean + */ + if (!more_dirs) { + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); + more_dirs = (fxdr_unsigned(int, *tl) == 0); + } + m_freem(mrep); + } + /* + * Fill last record, iff any, out to a multiple of NFS_DIRBLKSIZ + * by increasing d_reclen for the last record. + */ + if (blksiz > 0) { + left = DIRBLKSIZ - blksiz; + dp->d_reclen += left; + uiop->uio_iov->iov_base += left; + uiop->uio_iov->iov_len -= left; + uiop->uio_offset += left; + uiop->uio_resid -= left; + } + + /* + * We are now either at the end of the directory or have filled the + * block. + */ + if (bigenough) + dnp->n_direofoffset = uiop->uio_offset; + else { + if (uiop->uio_resid > 0) + printf("EEK! readdirplusrpc resid > 0\n"); + cookiep = nfs_getcookie(dnp, uiop->uio_offset, 1); + *cookiep = cookie; + } +nfsmout: + if (newvp != NULLVP) { + if (newvp == vp) + vrele(newvp); + else + vput(newvp); + newvp = NULLVP; + } + return (error); +} + +/* + * Silly rename. To make the NFS filesystem that is stateless look a little + * more like the "ufs" a remove of an active vnode is translated to a rename + * to a funny looking filename that is removed by nfs_inactive on the + * nfsnode. There is the potential for another process on a different client + * to create the same funny name between the nfs_lookitup() fails and the + * nfs_rename() completes, but... + */ +static int +nfs_sillyrename(dvp, vp, cnp) + struct vnode *dvp, *vp; + struct componentname *cnp; +{ + register struct sillyrename *sp; + struct nfsnode *np; + int error; + short pid; + struct ucred *cred; + + cache_purge(dvp); + np = VTONFS(vp); +#if DIAGNOSTIC + if (vp->v_type == VDIR) + panic("nfs_sillyrename: dir"); +#endif + MALLOC_ZONE(sp, struct sillyrename *, + sizeof (struct sillyrename), M_NFSREQ, M_WAITOK); + sp->s_cred = crdup(cnp->cn_cred); + sp->s_dvp = dvp; + VREF(dvp); + + /* Fudge together a funny name */ + pid = cnp->cn_proc->p_pid; + sp->s_namlen = sprintf(sp->s_name, ".nfsA%04x4.4", pid); + + /* Try lookitups until we get one that isn't there */ + while (nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, + cnp->cn_proc, (struct nfsnode **)0) == 0) { + sp->s_name[4]++; + if (sp->s_name[4] > 'z') { + error = EINVAL; + goto bad; + } + } + if ((error = nfs_renameit(dvp, cnp, sp))) + goto bad; + error = nfs_lookitup(dvp, sp->s_name, sp->s_namlen, sp->s_cred, + cnp->cn_proc, &np); +#if DIAGNOSTIC + kprintf("sillyrename: %s, vp=%x, np=%x, dvp=%x\n", + &sp->s_name[0], (unsigned)vp, (unsigned)np, (unsigned)dvp); +#endif + np->n_sillyrename = sp; + return (0); +bad: + vrele(sp->s_dvp); + cred = sp->s_cred; + sp->s_cred = NOCRED; + crfree(cred); + _FREE_ZONE((caddr_t)sp, sizeof (struct sillyrename), M_NFSREQ); + return (error); +} + +/* + * Look up a file name and optionally either update the file handle or + * allocate an nfsnode, depending on the value of npp. + * npp == NULL --> just do the lookup + * *npp == NULL --> allocate a new nfsnode and make sure attributes are + * handled too + * *npp != NULL --> update the file handle in the vnode + */ +static int +nfs_lookitup(dvp, name, len, cred, procp, npp) + register struct vnode *dvp; + char *name; + int len; + struct ucred *cred; + struct proc *procp; + struct nfsnode **npp; +{ + register u_long *tl; + register caddr_t cp; + register long t1, t2; + struct vnode *newvp = (struct vnode *)0; + struct nfsnode *np, *dnp = VTONFS(dvp); + caddr_t bpos, dpos, cp2; + int error = 0, fhlen, attrflag; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + nfsfh_t *nfhp; + int v3 = NFS_ISV3(dvp); + + nfsstats.rpccnt[NFSPROC_LOOKUP]++; + nfsm_reqhead(dvp, NFSPROC_LOOKUP, + NFSX_FH(v3) + NFSX_UNSIGNED + nfsm_rndup(len)); + nfsm_fhtom(dvp, v3); + nfsm_strtom(name, len, NFS_MAXNAMLEN); + nfsm_request(dvp, NFSPROC_LOOKUP, procp, cred); + if (npp && !error) { + nfsm_getfh(nfhp, fhlen, v3); + if (*npp) { + np = *npp; + if (np->n_fhsize > NFS_SMALLFH && fhlen <= NFS_SMALLFH) { + _FREE_ZONE((caddr_t)np->n_fhp, + np->n_fhsize, M_NFSBIGFH); + np->n_fhp = &np->n_fh; + } else if (np->n_fhsize <= NFS_SMALLFH && fhlen>NFS_SMALLFH) + MALLOC_ZONE(np->n_fhp, nfsfh_t *, + fhlen, M_NFSBIGFH, M_WAITOK); + bcopy((caddr_t)nfhp, (caddr_t)np->n_fhp, fhlen); + np->n_fhsize = fhlen; + newvp = NFSTOV(np); + } else if (NFS_CMPFH(dnp, nfhp, fhlen)) { + VREF(dvp); + newvp = dvp; + } else { + error = nfs_nget(dvp->v_mount, nfhp, fhlen, &np); + if (error) { + m_freem(mrep); + return (error); + } + newvp = NFSTOV(np); + } + if (v3) { + nfsm_postop_attr(newvp, attrflag); + if (!attrflag && *npp == NULL) { + m_freem(mrep); + if (newvp == dvp) + vrele(newvp); + else + vput(newvp); + return (ENOENT); + } + } else + nfsm_loadattr(newvp, (struct vattr *)0); + } + nfsm_reqdone; + if (npp && *npp == NULL) { + if (error) { + if (newvp) + if (newvp == dvp) + vrele(newvp); + else + vput(newvp); + } else + *npp = np; + } + return (error); +} + +/* + * Nfs Version 3 commit rpc + */ +static int +nfs_commit(vp, offset, cnt, cred, procp) + register struct vnode *vp; + u_quad_t offset; + int cnt; + struct ucred *cred; + struct proc *procp; +{ + register caddr_t cp; + register u_long *tl; + register int t1, t2; + register struct nfsmount *nmp = VFSTONFS(vp->v_mount); + caddr_t bpos, dpos, cp2; + int error = 0, wccflag = NFSV3_WCCRATTR; + struct mbuf *mreq, *mrep, *md, *mb, *mb2; + + if ((nmp->nm_flag & NFSMNT_HASWRITEVERF) == 0) + return (0); + nfsstats.rpccnt[NFSPROC_COMMIT]++; + nfsm_reqhead(vp, NFSPROC_COMMIT, NFSX_FH(1)); + nfsm_fhtom(vp, 1); + nfsm_build(tl, u_long *, 3 * NFSX_UNSIGNED); + txdr_hyper(&offset, tl); + tl += 2; + *tl = txdr_unsigned(cnt); + nfsm_request(vp, NFSPROC_COMMIT, procp, cred); + nfsm_wcc_data(vp, wccflag); + if (!error) { + nfsm_dissect(tl, u_long *, NFSX_V3WRITEVERF); + if (bcmp((caddr_t)nmp->nm_verf, (caddr_t)tl, + NFSX_V3WRITEVERF)) { + bcopy((caddr_t)tl, (caddr_t)nmp->nm_verf, + NFSX_V3WRITEVERF); + error = NFSERR_STALEWRITEVERF; + } + } + nfsm_reqdone; + return (error); +} + +/* + * Kludge City.. + * - make nfs_bmap() essentially a no-op that does no translation + * - do nfs_strategy() by doing I/O with nfs_readrpc/nfs_writerpc + * (Maybe I could use the process's page mapping, but I was concerned that + * Kernel Write might not be enabled and also figured copyout() would do + * a lot more work than bcopy() and also it currently happens in the + * context of the swapper process (2). + */ +static int +nfs_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; + int *a_runb; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + int devBlockSize = DEV_BSIZE; + + if (ap->a_vpp != NULL) + *ap->a_vpp = vp; + if (ap->a_bnp != NULL) + *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize, + devBlockSize); + if (ap->a_runp != NULL) + *ap->a_runp = 0; +#ifdef notyet + if (ap->a_runb != NULL) + *ap->a_runb = 0; +#endif + return (0); +} + +/* + * Strategy routine. + * For async requests when nfsiod(s) are running, queue the request by + * calling nfs_asyncio(), otherwise just all nfs_doio() to do the + * request. + */ +static int +nfs_strategy(ap) + struct vop_strategy_args *ap; +{ + register struct buf *bp = ap->a_bp; + struct ucred *cr; + struct proc *p; + int error = 0; + + if (ISSET(bp->b_flags, B_PHYS)) + panic("nfs_strategy: physio"); + if (ISSET(bp->b_flags, B_ASYNC)) + p = (struct proc *)0; + else + p = current_proc(); /* XXX */ + if (ISSET(bp->b_flags, B_READ)) + cr = bp->b_rcred; + else + cr = bp->b_wcred; + /* + * If the op is asynchronous and an i/o daemon is waiting + * queue the request, wake it up and wait for completion + * otherwise just do it ourselves. + */ + if (!ISSET(bp->b_flags, B_ASYNC) || nfs_asyncio(bp, NOCRED)) + error = nfs_doio(bp, cr, p); + return (error); +} + +/* + * Mmap a file + * + * NB Currently unsupported. + */ +/* ARGSUSED */ +static int +nfs_mmap(ap) + struct vop_mmap_args /* { + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + return (EINVAL); +} + +/* + * fsync vnode op. Just call nfs_flush() with commit == 1. + */ +/* ARGSUSED */ +static int +nfs_fsync(ap) + struct vop_fsync_args /* { + struct vnodeop_desc *a_desc; + struct vnode * a_vp; + struct ucred * a_cred; + int a_waitfor; + struct proc * a_p; + } */ *ap; +{ + + return (nfs_flush(ap->a_vp, ap->a_cred, ap->a_waitfor, ap->a_p, 1)); +} + +/* + * Flush all the blocks associated with a vnode. + * Walk through the buffer pool and push any dirty pages + * associated with the vnode. + */ +static int +nfs_flush(vp, cred, waitfor, p, commit) + register struct vnode *vp; + struct ucred *cred; + int waitfor; + struct proc *p; + int commit; +{ + register struct nfsnode *np = VTONFS(vp); + register struct buf *bp; + register int i; + struct buf *nbp; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int s, error = 0, slptimeo = 0, slpflag = 0, retv, bvecpos, err; + int passone = 1; + u_quad_t off, endoff, toff; + struct ucred* wcred = NULL; + struct buf **bvec = NULL; + void * object; + kern_return_t kret; + upl_t *upls = NULL; + + +#ifndef NFS_COMMITBVECSIZ +#define NFS_COMMITBVECSIZ 20 +#endif + struct buf *bvec_on_stack[NFS_COMMITBVECSIZ]; + struct upl_t *upls_on_stack[NFS_COMMITBVECSIZ]; + int bvecsize = 0, bveccount, buplpos; + + if (nmp->nm_flag & NFSMNT_INT) + slpflag = PCATCH; + if (!commit) + passone = 0; + + /* + * A b_flags == (B_DELWRI | B_NEEDCOMMIT) block has been written to the + * server, but nas not been committed to stable storage on the server + * yet. On the first pass, the byte range is worked out and the commit + * rpc is done. On the second pass, nfs_writebp() is called to do the + * job. + */ +again: + if (vp->v_dirtyblkhd.lh_first) + np->n_flag |= NMODIFIED; + off = (u_quad_t)-1; + endoff = 0; + bvecpos = 0; + buplpos = 0; + if (NFS_ISV3(vp) && commit) { + s = splbio(); + /* + * Count up how many buffers waiting for a commit. + */ + bveccount = 0; + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) + == (B_DELWRI | B_NEEDCOMMIT)) + bveccount++; + } + /* + * Allocate space to remember the list of bufs to commit. It is + * important to use M_NOWAIT here to avoid a race with nfs_write. + * If we can't get memory (for whatever reason), we will end up + * committing the buffers one-by-one in the loop below. + */ + if (bveccount > NFS_COMMITBVECSIZ) { + if (bvec != NULL && bvec != bvec_on_stack) + _FREE(bvec, M_TEMP); + MALLOC(bvec, struct buf **, + bveccount * sizeof(struct buf *), M_TEMP, M_NOWAIT); + if (bvec == NULL) { + bvec = bvec_on_stack; + bvecsize = NFS_COMMITBVECSIZ; + } else + bvecsize = bveccount; + /* allocate the upl structure before the loop based on buffers to commit */ + if (upls != NULL && upls != upls_on_stack) + _FREE(upls, M_TEMP); + MALLOC(upls, struct upl_t *, + bveccount * sizeof(upl_t), M_TEMP, M_NOWAIT); + if (upls == NULL) + upls = upls_on_stack; + } else { + if (bvec && bvec != bvec_on_stack) + _FREE(bvec, M_TEMP); + bvec = bvec_on_stack; + bvecsize = NFS_COMMITBVECSIZ; + if (upls && upls != upls_on_stack) + _FREE(upls, M_TEMP); + upls = upls_on_stack; + } + + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if (bvecpos >= bvecsize) + break; + if ((bp->b_flags & (B_BUSY | B_DELWRI | B_NEEDCOMMIT)) + != (B_DELWRI | B_NEEDCOMMIT)) + continue; + bremfree(bp); + /* + * Work out if all buffers are using the same cred + * so we can deal with them all with one commit. + */ + if (wcred == NULL) + wcred = bp->b_wcred; + else if (wcred != bp->b_wcred) + wcred = NOCRED; + SET(bp->b_flags, (B_BUSY | B_WRITEINPROG)); + + /* + * we need vm_fault_list_request so if vm decides to + * do paging while we are waiting on commit rpc, + * that it doesn't pick these pages. + */ + if (!ISSET(bp->b_flags, B_PAGELIST)) { + /* if pagelist exists, assume vm pages are locked/busy already */ off_t file_offset = ubc_blktooff(vp, bp->b_lblkno); + object = ubc_getobject(vp, (UBC_NOREACTIVATE|UBC_HOLDOBJECT)); + if (object == (void*)NULL) + panic("nfs_getcacheblk: NULL vmobject"); + if(bp->b_bufsize & 0xfff) + panic("nfs_getcacheblk: list request is less than 4k"); + kret = vm_fault_list_request( + object, (vm_object_offset_t)file_offset, + bp->b_bufsize, &(upls[buplpos]), NULL, 0, + (int)(UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |UPL_PRECIOUS | + UPL_SET_INTERNAL)); + if (kret != KERN_SUCCESS) + panic("nfs_getcacheblk: get pagelists failed with (%d)", kret); + +#ifdef UBC_DEBUG + upl_ubc_alias_set(pl, ioaddr, 1); +#endif /* UBC_DEBUG */ + buplpos++; /* not same as bvecpos if upl existed already */ + } + + /* + * A list of these buffers is kept so that the + * second loop knows which buffers have actually + * been committed. This is necessary, since there + * may be a race between the commit rpc and new + * uncommitted writes on the file. + */ + bvec[bvecpos++] = bp; + toff = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + + bp->b_dirtyoff; + if (toff < off) + off = toff; + toff += (u_quad_t)(bp->b_dirtyend - bp->b_dirtyoff); + if (toff > endoff) + endoff = toff; + } + splx(s); + } + if (bvecpos > 0) { + /* + * Commit data on the server, as required. + * If all bufs are using the same wcred, then use that with + * one call for all of them, otherwise commit each one + * separately. + */ + if (wcred != NOCRED) + retv = nfs_commit(vp, off, (int)(endoff - off), + wcred, p); + else { + retv = 0; + for (i = 0; i < bvecpos; i++) { + off_t off, size; + bp = bvec[i]; + off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + + bp->b_dirtyoff; + size = (u_quad_t)(bp->b_dirtyend + - bp->b_dirtyoff); + retv = nfs_commit(vp, off, (int)size, + bp->b_wcred, p); + if (retv) break; + } + } + + if (retv == NFSERR_STALEWRITEVERF) + nfs_clearcommit(vp->v_mount); + + for (i = 0; i < buplpos; i++) { + /* + * before the VOP_BWRITE and biodone(ASYNC)/brelse, we have to undo + * holding the vm page or we we will deadlock on another vm_fault_list_request. + * Here's a convenient place to put it. + * Better if we could hold it by setting the PAGELIST flag and kernel_upl_map + * as does nfs_writebp. Then normal biodones and brelse will clean it up and + * we can avoid this abort. For now make minimal changse and test this out. + */ + err = kernel_upl_abort(upls[i], NULL); + if (err) + printf("nfs_flush: kernel_upl_abort %d\n", err); + } + + /* + * Now, either mark the blocks I/O done or mark the + * blocks dirty, depending on whether the commit + * succeeded. + */ + for (i = 0; i < bvecpos; i++) { + + bp = bvec[i]; + CLR(bp->b_flags, (B_NEEDCOMMIT | B_WRITEINPROG)); + if (retv) { + brelse(bp); + } else { + vp->v_numoutput++; + SET(bp->b_flags, B_ASYNC); + s = splbio(); + CLR(bp->b_flags, (B_READ|B_DONE|B_ERROR|B_DELWRI)); + bp->b_dirtyoff = bp->b_dirtyend = 0; + reassignbuf(bp, vp); + splx(s); + biodone(bp); + } + } + + } + + /* + * Start/do any write(s) that are required. + * There is a window here where B_BUSY protects the buffer. The vm pages have been + * freed up, yet B_BUSY is set. Don't think you will hit any busy/incore problems while + * we sleep, but not absolutely sure. Keep an eye on it. Otherwise we will have to hold + * vm page across this locked. - EKN + */ +loop: + if (current_thread_aborted()) { + error = EINTR; + goto done; + } + s = splbio(); + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if (ISSET(bp->b_flags, B_BUSY)) { + if (waitfor != MNT_WAIT || passone) + continue; + SET(bp->b_flags, B_WANTED); + error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1), + "nfsfsync", slptimeo); + splx(s); + if (error) { + if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { + error = EINTR; + goto done; + } + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + } + goto loop; + } + if (!ISSET(bp->b_flags, B_DELWRI)) + panic("nfs_fsync: not dirty"); + if ((passone || !commit) && ISSET(bp->b_flags, B_NEEDCOMMIT)) + continue; + bremfree(bp); + if (passone || !commit) + SET(bp->b_flags, (B_BUSY|B_ASYNC)); + else + SET(bp->b_flags, (B_BUSY|B_ASYNC|B_WRITEINPROG|B_NEEDCOMMIT)); + + splx(s); + VOP_BWRITE(bp); + goto loop; + } + splx(s); + if (passone) { + passone = 0; + goto again; + } + if (waitfor == MNT_WAIT) { + while (vp->v_numoutput) { + vp->v_flag |= VBWAIT; + error = tsleep((caddr_t)&vp->v_numoutput, + slpflag | (PRIBIO + 1), "nfsfsync", slptimeo); + if (error) { + if (nfs_sigintr(nmp, (struct nfsreq *)0, p)) { + error = EINTR; + goto done; + } + if (slpflag == PCATCH) { + slpflag = 0; + slptimeo = 2 * hz; + } + } + } + if (vp->v_dirtyblkhd.lh_first && commit) { + goto loop; + } + } + if (np->n_flag & NWRITEERR) { + error = np->n_error; + np->n_flag &= ~NWRITEERR; + } +done: + if (bvec != NULL && bvec != bvec_on_stack) + _FREE(bvec, M_TEMP); + if (upls != NULL && upls != upls_on_stack) + _FREE(upls, M_TEMP); + return (error); +} + +/* + * Return POSIX pathconf information applicable to nfs. + * + * The NFS V2 protocol doesn't support this, so just return EINVAL + * for V2. + */ +/* ARGSUSED */ +static int +nfs_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; + } */ *ap; +{ + + return (EINVAL); +} + +/* + * NFS advisory byte-level locks. + * Currently unsupported. + */ +static int +nfs_advlock(ap) + struct vop_advlock_args /* { + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; + } */ *ap; +{ +#ifdef __FreeBSD__ + register struct nfsnode *np = VTONFS(ap->a_vp); + + /* + * The following kludge is to allow diskless support to work + * until a real NFS lockd is implemented. Basically, just pretend + * that this is a local lock. + */ + return (lf_advlock(ap, &(np->n_lockf), np->n_size)); +#else +#if DIAGNOSTIC + printf("nfs_advlock: pid %d comm %s\n", current_proc()->p_pid, current_proc()->p_comm); +#endif + return (EOPNOTSUPP); +#endif +} + +/* + * Print out the contents of an nfsnode. + */ +static int +nfs_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + + printf("tag VT_NFS, fileid %ld fsid 0x%lx", + np->n_vattr.va_fileid, np->n_vattr.va_fsid); + if (vp->v_type == VFIFO) + fifo_printinfo(vp); + printf("\n"); + return (0); +} + +/* + * NFS directory offset lookup. + * Currently unsupported. + */ +static int +nfs_blkatoff(ap) + struct vop_blkatoff_args /* { + struct vnode *a_vp; + off_t a_offset; + char **a_res; + struct buf **a_bpp; + } */ *ap; +{ + +#if DIAGNOSTIC + printf("nfs_blkatoff: unimplemented!!"); +#endif + return (EOPNOTSUPP); +} + +/* + * NFS flat namespace allocation. + * Currently unsupported. + */ +static int +nfs_valloc(ap) + struct vop_valloc_args /* { + struct vnode *a_pvp; + int a_mode; + struct ucred *a_cred; + struct vnode **a_vpp; + } */ *ap; +{ + + return (EOPNOTSUPP); +} + +/* + * NFS flat namespace free. + * Currently unsupported. + */ +static int +nfs_vfree(ap) + struct vop_vfree_args /* { + struct vnode *a_pvp; + ino_t a_ino; + int a_mode; + } */ *ap; +{ + +#if DIAGNOSTIC + printf("nfs_vfree: unimplemented!!"); +#endif + return (EOPNOTSUPP); +} + +/* + * NFS file truncation. + */ +static int +nfs_truncate(ap) + struct vop_truncate_args /* { + struct vnode *a_vp; + off_t a_length; + int a_flags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + /* Use nfs_setattr */ +#if DIAGNOSTIC + printf("nfs_truncate: unimplemented!!"); +#endif + return (EOPNOTSUPP); +} + +/* + * NFS update. + */ +static int +nfs_update(ap) + struct vop_update_args /* { + struct vnode *a_vp; + struct timeval *a_ta; + struct timeval *a_tm; + int a_waitfor; + } */ *ap; +{ + + /* Use nfs_setattr */ +#if DIAGNOSTIC + printf("nfs_update: unimplemented!!"); +#endif + return (EOPNOTSUPP); +} + +int nfs_aio_threads = 0; /* 1 per nfd (arbitrary) */ +struct slock nfs_aio_slock; +TAILQ_HEAD(bqueues, buf) nfs_aio_bufq; +int nfs_aio_bufq_len = 0; /* diagnostic only */ + +void +nfs_aio_thread() +{ /* see comment below in nfs_bwrite() for some rationale */ + struct buf *bp; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + for(;;) { + simple_lock(&nfs_aio_slock); + if ((bp = nfs_aio_bufq.tqh_first)) { + TAILQ_REMOVE(&nfs_aio_bufq, bp, b_freelist); + nfs_aio_bufq_len--; + simple_unlock(&nfs_aio_slock); + nfs_writebp(bp, 1); + } else { /* nothing to do - goodnight */ + assert_wait(&nfs_aio_bufq, THREAD_UNINT); + simple_unlock(&nfs_aio_slock); + (void)tsleep((caddr_t)0, PRIBIO+1, "nfs_aio_bufq", 0); + } + } + (void) thread_funnel_set(kernel_flock, FALSE); +} + + +void +nfs_aio_thread_init() +{ + if (nfs_aio_threads++ == 0) { + simple_lock_init(&nfs_aio_slock); + TAILQ_INIT(&nfs_aio_bufq); + } + kernel_thread(kernel_task, nfs_aio_thread); +} + + +/* + * Just call nfs_writebp() with the force argument set to 1. + */ +static int +nfs_bwrite(ap) + struct vop_bwrite_args /* { + struct vnode *a_bp; + } */ *ap; +{ + extern void wakeup_one(caddr_t chan); + + /* + * nfs_writebp will issue a synchronous rpc to if B_ASYNC then + * to avoid distributed deadlocks we handoff the write to the + * nfs_aio threads. Doing so allows us to complete the + * current request, rather than blocking on a server which may + * be ourself (or blocked on ourself). + * + * Note the loopback deadlocks happened when the thread + * invoking us was nfsd, and also when it was the pagedaemon. + * + * This solution has one known problem. If *ALL* buffers get + * on the nfs_aio queue then no forward progress can be made + * until one of those writes complete. And if the current + * nfs_aio writes-in-progress block due to a non-responsive server we + * are in a deadlock circle. Probably the cure is to limit the + * async write concurrency in getnewbuf as in FreeBSD 3.2. + */ + if (nfs_aio_threads && ISSET(ap->a_bp->b_flags, B_ASYNC)) { + simple_lock(&nfs_aio_slock); + nfs_aio_bufq_len++; + TAILQ_INSERT_TAIL(&nfs_aio_bufq, ap->a_bp, b_freelist); + simple_unlock(&nfs_aio_slock); + wakeup_one((caddr_t)&nfs_aio_bufq); + return (0); + } + return (nfs_writebp(ap->a_bp, 1)); +} + +/* + * This is a clone of vn_bwrite(), except that B_WRITEINPROG isn't set unless + * the force flag is one and it also handles the B_NEEDCOMMIT flag. + */ +int +nfs_writebp(bp, force) + register struct buf *bp; + int force; +{ + int s; + register int oldflags = bp->b_flags, retv = 1; + off_t off; + upl_t upl; + void * object; + kern_return_t kret; + struct vnode *vp = bp->b_vp; + upl_page_info_t *pl; + + if(!ISSET(bp->b_flags, B_BUSY)) + panic("nfs_writebp: buffer is not busy???"); + + s = splbio(); + CLR(bp->b_flags, (B_READ|B_DONE|B_ERROR|B_DELWRI)); + + if (ISSET(oldflags, (B_ASYNC|B_DELWRI))) { + reassignbuf(bp, vp); + } + + vp->v_numoutput++; + current_proc()->p_stats->p_ru.ru_oublock++; + splx(s); + + /* + * Since the B_BUSY flag is set, we need to lock the page before doing nfs_commit. + * Otherwise we may block and get a busy incore pages during a vm pageout. + * Move the existing code up before the commit. + */ + + if (!ISSET(bp->b_flags, B_META) && UBCISVALID(vp)) { + + if (!ISSET(bp->b_flags, B_PAGELIST)) { + + off_t file_offset = ubc_blktooff(vp, bp->b_lblkno); + + object = ubc_getobject(vp, (UBC_NOREACTIVATE|UBC_HOLDOBJECT)); + if (object == (void*)NULL) + panic("nfs_writebp: NULL vmobject"); + + if(bp->b_bufsize & 0xfff) + panic("nfs_writebp: list request is with less than 4k"); + + kret = vm_fault_list_request(object, (vm_object_offset_t)file_offset, + bp->b_bufsize, &upl, NULL, 0, + (int)(UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS | UPL_SET_INTERNAL)); + if (kret != KERN_SUCCESS) { + panic("nfs_writebp: get pagelists failed with (%d)", kret); + } + +#ifdef UBC_DEBUG + upl_ubc_alias_set(pl, ioaddr, 2); +#endif /* UBC_DEBUG */ + + s = splbio(); + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + bp->b_pagelist = upl; + SET(bp->b_flags, B_PAGELIST); + splx(s); + + kret = kernel_upl_map(kernel_map, upl, + (vm_address_t *)&(bp->b_data)); + if (kret != KERN_SUCCESS) { + panic("nfs_writebp: kernel_upl_map() failed with (%d)", kret); + } + if(bp->b_data == 0) + panic("nfs_writebp: upl_map mapped 0"); + if (!upl_page_present(pl, 0)) { + /* + * may be the page got paged out. + * let's just read it in. It is marked + * busy so we should not have any one + * yanking this page underneath the fileIO + */ + panic("nfs_writebp: nopage"); + } + } + } + + /* + * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not + * an actual write will have to be scheduled via. VOP_STRATEGY(). + * If B_WRITEINPROG is already set, then push it with a write anyhow. + */ + if ((oldflags & (B_NEEDCOMMIT | B_WRITEINPROG)) == B_NEEDCOMMIT) { + off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; + SET(bp->b_flags, B_WRITEINPROG); + retv = nfs_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff, + bp->b_wcred, bp->b_proc); + CLR(bp->b_flags, B_WRITEINPROG); + if (!retv) { + bp->b_dirtyoff = bp->b_dirtyend = 0; + CLR(bp->b_flags, B_NEEDCOMMIT); + biodone(bp); /* on B_ASYNC will brelse the buffer */ + + } else if (retv == NFSERR_STALEWRITEVERF) + nfs_clearcommit(vp->v_mount); + } + if (retv) { + if (force) + SET(bp->b_flags, B_WRITEINPROG); + + VOP_STRATEGY(bp); + + } + + if( (oldflags & B_ASYNC) == 0) { + int rtval = biowait(bp); + + if (oldflags & B_DELWRI) { + s = splbio(); + reassignbuf(bp, vp); + splx(s); + } + brelse(bp); + return (rtval); + } + + return (0); +} + +/* + * nfs special file access vnode op. + * Essentially just get vattr and then imitate iaccess() since the device is + * local to the client. + */ +static int +nfsspec_access(ap) + struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vattr *vap; + register gid_t *gp; + register struct ucred *cred = ap->a_cred; + struct vnode *vp = ap->a_vp; + mode_t mode = ap->a_mode; + struct vattr vattr; + register int i; + int error; + + /* + * Disallow write attempts on filesystems mounted read-only; + * unless the file is a socket, fifo, or a block or character + * device resident on the filesystem. + */ + if ((mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { + switch (vp->v_type) { + case VREG: case VDIR: case VLNK: + return (EROFS); + } + } + /* + * If you're the super-user, + * you always get access. + */ + if (cred->cr_uid == 0) + return (0); + vap = &vattr; + error = VOP_GETATTR(vp, vap, cred, ap->a_p); + if (error) + return (error); + /* + * Access check is based on only one of owner, group, public. + * If not owner, then check group. If not a member of the + * group, then check public access. + */ + if (cred->cr_uid != vap->va_uid) { + mode >>= 3; + gp = cred->cr_groups; + for (i = 0; i < cred->cr_ngroups; i++, gp++) + if (vap->va_gid == *gp) + goto found; + mode >>= 3; +found: + ; + } + error = (vap->va_mode & mode) == mode ? 0 : EACCES; + return (error); +} + +/* + * Read wrapper for special devices. + */ +static int +nfsspec_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register struct nfsnode *np = VTONFS(ap->a_vp); + + /* + * Set access flag. + */ + np->n_flag |= NACC; + np->n_atim.tv_sec = time.tv_sec; + np->n_atim.tv_nsec = time.tv_usec * 1000; + return (VOCALL(spec_vnodeop_p, VOFFSET(vop_read), ap)); +} + +/* + * Write wrapper for special devices. + */ +static int +nfsspec_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register struct nfsnode *np = VTONFS(ap->a_vp); + + /* + * Set update flag. + */ + np->n_flag |= NUPD; + np->n_mtim.tv_sec = time.tv_sec; + np->n_mtim.tv_nsec = time.tv_usec * 1000; + return (VOCALL(spec_vnodeop_p, VOFFSET(vop_write), ap)); +} + +/* + * Close wrapper for special devices. + * + * Update the times on the nfsnode then do device close. + */ +static int +nfsspec_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + struct vattr vattr; + + if (np->n_flag & (NACC | NUPD)) { + np->n_flag |= NCHG; + if (vp->v_usecount == 1 && + (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { + VATTR_NULL(&vattr); + if (np->n_flag & NACC) + vattr.va_atime = np->n_atim; + if (np->n_flag & NUPD) + vattr.va_mtime = np->n_mtim; + (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); + } + } + return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); +} + +/* + * Read wrapper for fifos. + */ +static int +nfsfifo_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + extern vop_t **fifo_vnodeop_p; + register struct nfsnode *np = VTONFS(ap->a_vp); + + /* + * Set access flag. + */ + np->n_flag |= NACC; + np->n_atim.tv_sec = time.tv_sec; + np->n_atim.tv_nsec = time.tv_usec * 1000; + return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_read), ap)); +} + +/* + * Write wrapper for fifos. + */ +static int +nfsfifo_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + extern vop_t **fifo_vnodeop_p; + register struct nfsnode *np = VTONFS(ap->a_vp); + + /* + * Set update flag. + */ + np->n_flag |= NUPD; + np->n_mtim.tv_sec = time.tv_sec; + np->n_mtim.tv_nsec = time.tv_usec * 1000; + return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), ap)); +} + +/* + * Close wrapper for fifos. + * + * Update the times on the nfsnode then do fifo close. + */ +static int +nfsfifo_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct nfsnode *np = VTONFS(vp); + struct vattr vattr; + extern vop_t **fifo_vnodeop_p; + + if (np->n_flag & (NACC | NUPD)) { + if (np->n_flag & NACC) { + np->n_atim.tv_sec = time.tv_sec; + np->n_atim.tv_nsec = time.tv_usec * 1000; + } + if (np->n_flag & NUPD) { + np->n_mtim.tv_sec = time.tv_sec; + np->n_mtim.tv_nsec = time.tv_usec * 1000; + } + np->n_flag |= NCHG; + if (vp->v_usecount == 1 && + (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { + VATTR_NULL(&vattr); + if (np->n_flag & NACC) + vattr.va_atime = np->n_atim; + if (np->n_flag & NUPD) + vattr.va_mtime = np->n_mtim; + (void)VOP_SETATTR(vp, &vattr, ap->a_cred, ap->a_p); + } + } + return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); +} + +static int +nfs_ioctl(ap) + struct vop_ioctl_args *ap; +{ + + /* + * XXX we were once bogusly enoictl() which returned this (ENOTTY). + * Probably we should return ENODEV. + */ + return (ENOTTY); +} + +static int +nfs_select(ap) + struct vop_select_args *ap; +{ + + /* + * We were once bogusly seltrue() which returns 1. Is this right? + */ + return (1); +} + +/* XXX Eliminate use of struct bp here */ +/* + * Vnode op for pagein using getblk_pages + * derived from nfs_bioread() + * No read aheads are started from pagein operation + */ +static int +nfs_pagein(ap) + struct vop_pagein_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + upl_t pl = ap->a_pl; + size_t size= ap->a_size; + off_t f_offset = ap->a_f_offset; + vm_offset_t pl_offset = ap->a_pl_offset; + int flags = ap->a_flags; + struct ucred *cred; + register struct nfsnode *np = VTONFS(vp); + register int biosize; + register int xsize; + struct vattr vattr; + struct proc *p = current_proc(); + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + int error = 0; + vm_offset_t ioaddr; + struct uio auio; + struct iovec aiov; + struct uio * uio = &auio; + int nocommit = flags & UPL_NOCOMMIT; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 322)) | DBG_FUNC_NONE, + (int)f_offset, size, pl, pl_offset, 0); + + if (UBCINVALID(vp)) { +#if DIAGNOSTIC + panic("nfs_pagein: invalid vp"); +#endif /* DIAGNOSTIC */ + return (EPERM); + } + + UBCINFOCHECK("nfs_pagein", vp); + if(pl == (upl_t)NULL) { + panic("nfs_pagein: no upl"); + } + + cred = ubc_getcred(vp); + if (cred == NOCRED) + cred = ap->a_cred; + + if (size <= 0) + return (EINVAL); + + if (f_offset < 0 || f_offset >= np->n_size + || (f_offset & PAGE_MASK_64)) { + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + return (EINVAL); + } + + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = f_offset; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_READ; + auio.uio_procp = NULL; + + + if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + (void)nfs_fsinfo(nmp, vp, cred, p); + biosize = min(vp->v_mount->mnt_stat.f_iosize, size); + + if (biosize & PAGE_MASK) + panic("nfs_pagein(%x): biosize not page aligned", biosize); + +#if 0 /* Why bother? */ +/* DO NOT BOTHER WITH "approximately maintained cache consistency" */ +/* Does not make sense in paging paths -- Umesh*/ + /* + * For nfs, cache consistency can only be maintained approximately. + * Although RFC1094 does not specify the criteria, the following is + * believed to be compatible with the reference port. + * For nqnfs, full cache consistency is maintained within the loop. + * For nfs: + * If the file's modify time on the server has changed since the + * last read rpc or you have written to the file, + * you may have lost data cache consistency with the + * server, so flush all of the file's data out of the cache. + * Then force a getattr rpc to ensure that you have up to date + * attributes. + * NB: This implies that cache data can be read when up to + * NFS_ATTRTIMEO seconds out of date. If you find that you need current + * attributes this could be forced by setting n_attrstamp to 0 before + * the VOP_GETATTR() call. + */ + if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) { + if (np->n_flag & NMODIFIED) { + np->n_attrstamp = 0; + error = VOP_GETATTR(vp, &vattr, cred, p); + if (error) { + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, + size, + UPL_ABORT_ERROR | + UPL_ABORT_FREE_ON_EMPTY); + return (error); + } + np->n_mtime = vattr.va_mtime.tv_sec; + } else { + error = VOP_GETATTR(vp, &vattr, cred, p); + if (error){ + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_ERROR | + UPL_ABORT_FREE_ON_EMPTY); + return (error); + } + if (np->n_mtime != vattr.va_mtime.tv_sec) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error){ + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_ERROR | + UPL_ABORT_FREE_ON_EMPTY); + return (error); + } + np->n_mtime = vattr.va_mtime.tv_sec; + } + } + } +#endif 0 /* Why bother? */ + + kernel_upl_map(kernel_map, pl, &ioaddr); + ioaddr += pl_offset; + xsize = size; + + do { + uio->uio_resid = min(biosize, xsize); + aiov.iov_len = uio->uio_resid; + aiov.iov_base = (caddr_t)ioaddr; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 322)) | DBG_FUNC_NONE, + (int)uio->uio_offset, uio->uio_resid, ioaddr, xsize, 0); + +#warning nfs_pagein does not support NQNFS yet. +#if 0 /* why bother? */ +/* NO RESOURCES TO FIX NQNFS CASE */ +/* We need to deal with this later -- Umesh */ + /* + * Get a valid lease. If cached data is stale, flush it. + */ + if (nmp->nm_flag & NFSMNT_NQNFS) { + if (NQNFS_CKINVALID(vp, np, ND_READ)) { + do { + error = nqnfs_getlease(vp, ND_READ, cred, p); + } while (error == NQNFS_EXPIRED); + if (error){ + kernel_upl_unmap(kernel_map, pl); + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, + size ,UPL_ABORT_ERROR | + UPL_ABORT_FREE_ON_EMPTY); + + return (error); + } + if (np->n_lrev != np->n_brev || + (np->n_flag & NQNFSNONCACHE)) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) { + kernel_upl_unmap(kernel_map, pl); + if (!nocommit) + kernel_upl_abort_range(pl, + pl_offset,size , + UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + return (error); + } + np->n_brev = np->n_lrev; + } + } + } +#endif 0 /* why bother? */ + + if (np->n_flag & NQNFSNONCACHE) { + error = nfs_readrpc(vp, uio, cred); + kernel_upl_unmap(kernel_map, pl); + + if (!nocommit) { + if(error) + kernel_upl_abort_range(pl, pl_offset, size , + UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + else + kernel_upl_commit_range(pl, + pl_offset, size, + UPL_COMMIT_CLEAR_DIRTY + | UPL_COMMIT_FREE_ON_EMPTY, + UPL_GET_INTERNAL_PAGE_LIST(pl), + MAX_UPL_TRANSFER); + } + return (error); + } + + /* + * With UBC we get here only when the file data is not in the VM + * page cache, so go ahead and read in. + */ +#ifdef UBC_DEBUG + upl_ubc_alias_set(pl, ioaddr, 2); +#endif /* UBC_DEBUG */ + nfsstats.pageins++; + error = nfs_readrpc(vp, uio, cred); + + if (!error) { + int zoff; + int zcnt; + + if (uio->uio_resid) { + /* + * If uio_resid > 0, there is a hole in the file and + * no writes after the hole have been pushed to + * the server yet... or we're at the EOF + * Just zero fill the rest of the valid area. + */ + zcnt = uio->uio_resid; + zoff = biosize - zcnt; + bzero((char *)ioaddr + zoff, zcnt); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 324)) | DBG_FUNC_NONE, + (int)uio->uio_offset, zoff, zcnt, ioaddr, 0); + + uio->uio_offset += zcnt; + } + ioaddr += biosize; + xsize -= biosize; + } else + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 322)) | DBG_FUNC_NONE, + (int)uio->uio_offset, uio->uio_resid, error, -1, 0); + + if (p && (vp->v_flag & VTEXT) && + (((nmp->nm_flag & NFSMNT_NQNFS) && + NQNFS_CKINVALID(vp, np, ND_READ) && + np->n_lrev != np->n_brev) || + (!(nmp->nm_flag & NFSMNT_NQNFS) && + np->n_mtime != np->n_vattr.va_mtime.tv_sec))) { + uprintf("Process killed due to text file modification\n"); + psignal(p, SIGKILL); + p->p_flag |= P_NOSWAP; + } + + } while (error == 0 && xsize > 0); + + kernel_upl_unmap(kernel_map, pl); + + if (!nocommit) { + if (error) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + else + kernel_upl_commit_range(pl, pl_offset, size, + UPL_COMMIT_CLEAR_DIRTY + | UPL_COMMIT_FREE_ON_EMPTY, + UPL_GET_INTERNAL_PAGE_LIST(pl), + MAX_UPL_TRANSFER); + } + + return (error); +} + +/* + * Vnode op for pageout using UPL + * Derived from nfs_write() + * File size changes are not permitted in pageout. + */ +static int +nfs_pageout(ap) + struct vop_pageout_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + upl_t pl = ap->a_pl; + size_t size= ap->a_size; + off_t f_offset = ap->a_f_offset; + vm_offset_t pl_offset = ap->a_pl_offset; + int flags = ap->a_flags; + int ioflag = ap->a_flags; + register int biosize; + struct proc *p = current_proc(); + struct nfsnode *np = VTONFS(vp); + register struct ucred *cred; + struct buf *bp; + struct nfsmount *nmp = VFSTONFS(vp->v_mount); + daddr_t lbn; + int bufsize; + int n = 0, on, error = 0, iomode, must_commit, s; + off_t off; + vm_offset_t ioaddr; + struct uio auio; + struct iovec aiov; + struct uio * uio = &auio; + int nocommit = flags & UPL_NOCOMMIT; + int iosize; + int pgsize; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 323)) | DBG_FUNC_NONE, + (int)f_offset, size, pl, pl_offset, 0); + + if (UBCINVALID(vp)) { +#if DIAGNOSTIC + panic("nfs_pageout: invalid vnode"); +#endif + return (EIO); + } + UBCINFOCHECK("nfs_pageout", vp); + + if (size <= 0) + return (EINVAL); + + if (pl == (upl_t)NULL) { + panic("nfs_pageout: no upl"); + } + + /* + * I use nm_rsize, not nm_wsize so that all buffer cache blocks + * will be the same size within a filesystem. nfs_writerpc will + * still use nm_wsize when sizing the rpc's. + */ + biosize = min(vp->v_mount->mnt_stat.f_iosize, size); + + if (biosize & PAGE_MASK) + panic("nfs_pageout(%x): biosize not page aligned", biosize); + + + /* + * Check to see whether the buffer is incore + * If incore and not busy invalidate it from the cache + * we should not find it BUSY, since we always do a + * vm_fault_list_request in 'getblk' before returning + * which would block on the page busy status + */ + lbn = f_offset / PAGE_SIZE; /* to match the size getblk uses */ + + for (iosize = size; iosize > 0; iosize -= PAGE_SIZE, lbn++) { + + s = splbio(); + if (bp = incore(vp, lbn)) { + if (ISSET(bp->b_flags, B_BUSY)) { + /* don't panic incore. just tell vm we are busy */ + (void) kernel_upl_abort(pl, NULL); + return(EBUSY); + }; + + bremfree(bp); + SET(bp->b_flags, (B_BUSY | B_INVAL)); + brelse(bp); + } + splx(s); + } + + cred = ubc_getcred(vp); + if (cred == NOCRED) + cred = ap->a_cred; + + if (np->n_flag & NWRITEERR) { + np->n_flag &= ~NWRITEERR; + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_FREE_ON_EMPTY); + return (np->n_error); + } + if ((nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_GOTFSINFO)) == NFSMNT_NFSV3) + (void)nfs_fsinfo(nmp, vp, cred, p); + + if (f_offset < 0 || f_offset >= np->n_size || + (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) { + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_FREE_ON_EMPTY); + return (EINVAL); + } + + kernel_upl_map(kernel_map, pl, &ioaddr); + + if ((f_offset + size) > np->n_size) + iosize = np->n_size - f_offset; + else + iosize = size; + + pgsize = (iosize + (PAGE_SIZE - 1)) & ~PAGE_MASK; + + if (size > pgsize) { + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset + pgsize, size - pgsize, + UPL_ABORT_FREE_ON_EMPTY); + } + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = f_offset; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_READ; + auio.uio_resid = iosize; + auio.uio_procp = NULL; + + aiov.iov_len = iosize; + aiov.iov_base = (caddr_t)ioaddr + pl_offset; + + /* + * check for partial page and clear the + * contents past end of the file before + * releasing it in the VM page cache + */ + if ((f_offset < np->n_size) && (f_offset + size) > np->n_size) { + size_t io = np->n_size - f_offset; + + bzero((caddr_t)(ioaddr + pl_offset + io), size - io); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 321)) | DBG_FUNC_NONE, + (int)np->n_size, (int)f_offset, (int)f_offset + io, size - io, 0); + } + + do { + +#warning nfs_pageout does not support NQNFS yet. +#if 0 /* why bother? */ +/* NO RESOURCES TO FIX NQNFS CASE */ +/* We need to deal with this later -- Umesh */ + + /* + * Check for a valid write lease. + */ + if ((nmp->nm_flag & NFSMNT_NQNFS) && + NQNFS_CKINVALID(vp, np, ND_WRITE)) { + do { + error = nqnfs_getlease(vp, ND_WRITE, cred, p); + } while (error == NQNFS_EXPIRED); + if (error) { + kernel_upl_unmap(kernel_map, pl); + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_FREE_ON_EMPTY); + return (error); + } + if (np->n_lrev != np->n_brev || + (np->n_flag & NQNFSNONCACHE)) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) { + kernel_upl_unmap(kernel_map, pl); + if (!nocommit) + kernel_upl_abort_range(pl, + pl_offset, size, + UPL_ABORT_FREE_ON_EMPTY); + return (error); + } + np->n_brev = np->n_lrev; + } + } +#endif 0 /* why bother? */ + + if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) { + iomode = NFSV3WRITE_FILESYNC; + error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit); + if (must_commit) + nfs_clearcommit(vp->v_mount); + kernel_upl_unmap(kernel_map, pl); + + /* see comments below after other nfs_writerpc and ESTALE */ + if (error == ESTALE) { + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY); + } else { + if (!nocommit) { + if(error) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_FREE_ON_EMPTY); + else + kernel_upl_commit_range(pl, + pl_offset, size, + UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY, + UPL_GET_INTERNAL_PAGE_LIST(pl), MAX_UPL_TRANSFER); + } + } + return (error); + } + nfsstats.pageouts++; + lbn = uio->uio_offset / biosize; + on = uio->uio_offset & (biosize-1); + n = min((unsigned)(biosize - on), uio->uio_resid); +again: + bufsize = biosize; +#if 0 + if ((lbn + 1) * biosize > np->n_size) { + bufsize = np->n_size - lbn * biosize; + bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); + } +#endif + vp->v_numoutput++; + + np->n_flag |= NMODIFIED; + +#if 0 /* why bother? */ +/* NO RESOURCES TO FIX NQNFS CASE */ +/* We need to deal with this later -- Umesh */ + /* + * Check for valid write lease and get one as required. + * In case getblk() and/or bwrite() delayed us. + */ + if ((nmp->nm_flag & NFSMNT_NQNFS) && + NQNFS_CKINVALID(vp, np, ND_WRITE)) { + do { + error = nqnfs_getlease(vp, ND_WRITE, cred, p); + } while (error == NQNFS_EXPIRED); + if (error) + goto cleanup; + + if (np->n_lrev != np->n_brev || + (np->n_flag & NQNFSNONCACHE)) { + error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1); + if (error) { + kernel_upl_unmap(kernel_map, pl); + if (!nocommit) + kernel_upl_abort_range(pl, + pl_offset, + size, + UPL_ABORT_FREE_ON_EMPTY); + + return (error); + } + np->n_brev = np->n_lrev; + goto again; + } + } +#endif 0 /* why bother? */ + + iomode = NFSV3WRITE_FILESYNC; + error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit); + if (must_commit) + nfs_clearcommit(vp->v_mount); + vp->v_numoutput--; + + if (error) + goto cleanup; + + if (n > 0) { + uio->uio_resid -= n; + uio->uio_offset += n; + uio->uio_iov->iov_base += n; + uio->uio_iov->iov_len -= n; + } + } while (uio->uio_resid > 0 && n > 0); + +cleanup: + kernel_upl_unmap(kernel_map, pl); + /* + * EStale is special. In this case, we want vm to dump out + * the pages. Better yet, sever the object so we don't come + * back here on each page of the object to page out. For now, + * just dump. + * XXX What about !nocommit case? Should ESTALE only be checked + * in that portion? - EKN + */ + if (error == ESTALE) { + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY); + } else { + if (!nocommit) { + if(error) + kernel_upl_abort_range(pl, pl_offset, pgsize, + UPL_ABORT_FREE_ON_EMPTY); + else + kernel_upl_commit_range(pl, pl_offset, pgsize, + UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY, + UPL_GET_INTERNAL_PAGE_LIST(pl), MAX_UPL_TRANSFER); + } + } + + return (error); +} + +/* Blktooff derives file offset given a logical block number */ +static int +nfs_blktooff(ap) + struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; + } */ *ap; +{ + int biosize; + register struct vnode *vp = ap->a_vp; + + biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); /* nfs_bio.c */ + + *ap->a_offset = (off_t)(ap->a_lblkno * biosize); + + return (0); +} + +/* Blktooff derives file offset given a logical block number */ +static int +nfs_offtoblk(ap) + struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; + } */ *ap; +{ + int biosize; + register struct vnode *vp = ap->a_vp; + + biosize = min(vp->v_mount->mnt_stat.f_iosize, PAGE_SIZE); /* nfs_bio.c */ + + *ap->a_lblkno = (daddr_t)(ap->a_offset / biosize); + + return (0); +} +static int +nfs_cmap(ap) + struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_offset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; + } */ *ap; +{ + return (EOPNOTSUPP); +} diff --git a/bsd/nfs/nfsdiskless.h b/bsd/nfs/nfsdiskless.h new file mode 100644 index 000000000..b04ee1dd4 --- /dev/null +++ b/bsd/nfs/nfsdiskless.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfsdiskless.h 8.2 (Berkeley) 3/30/95 + * FreeBSD-Id: nfsdiskless.h,v 1.10 1997/09/07 12:56:46 bde Exp $ + */ + + +#ifndef _NFS_NFSDISKLESS_H_ +#define _NFS_NFSDISKLESS_H_ + +/* + * Structure that must be initialized for a diskless nfs client. + * This structure is used by nfs_mountroot() to set up the root and swap + * vnodes plus do a partial ifconfig(8) and route(8) so that the critical net + * interface can communicate with the server. + * The primary bootstrap is expected to fill in the appropriate fields before + * starting the kernel. Whether or not the swap area is nfs mounted is + * determined by the value in swdevt[0]. (equal to NODEV --> swap over nfs) + * Currently only works for AF_INET protocols. + * NB: All fields are stored in net byte order to avoid hassles with + * client/server byte ordering differences. + */ + +/* + * I have defined a new structure that can handle an NFS Version 3 file handle + * but the kernel still expects the old Version 2 one to be provided. The + * changes required in nfs_vfsops.c for using the new are documented there in + * comments. (I felt that breaking network booting code by changing this + * structure would not be prudent at this time, since almost all servers are + * still Version 2 anyhow.) + */ +struct nfsv3_diskless { + struct ifaliasreq myif; /* Default interface */ + struct sockaddr_in mygateway; /* Default gateway */ + struct nfs_args swap_args; /* Mount args for swap file */ + int swap_fhsize; /* Size of file handle */ + u_char swap_fh[NFSX_V3FHMAX]; /* Swap file's file handle */ + struct sockaddr_in swap_saddr; /* Address of swap server */ + char swap_hostnam[MNAMELEN]; /* Host name for mount pt */ + int swap_nblks; /* Size of server swap file */ + struct ucred swap_ucred; /* Swap credentials */ + struct nfs_args root_args; /* Mount args for root fs */ + int root_fhsize; /* Size of root file handle */ + u_char root_fh[NFSX_V3FHMAX]; /* File handle of root dir */ + struct sockaddr_in root_saddr; /* Address of root server */ + char root_hostnam[MNAMELEN]; /* Host name for mount pt */ + long root_time; /* Timestamp of root fs */ + char my_hostnam[MAXHOSTNAMELEN]; /* Client host name */ +}; + +struct nfs_dlmount { + struct sockaddr_in ndm_saddr; /* Address of file server */ + char ndm_host[MNAMELEN]; /* Host name for mount pt */ + u_char ndm_fh[NFSX_V2FH]; /* The file's file handle */ +}; + +/* + * Old arguments to mount NFS + */ +struct onfs_args { + struct sockaddr *addr; /* file server address */ + int addrlen; /* length of address */ + int sotype; /* Socket type */ + int proto; /* and Protocol */ + u_char *fh; /* File handle to be mounted */ + int fhsize; /* Size, in bytes, of fh */ + int flags; /* flags */ + int wsize; /* write size in bytes */ + int rsize; /* read size in bytes */ + int readdirsize; /* readdir size in bytes */ + int timeo; /* initial timeout in .1 secs */ + int retrans; /* times to retry send */ + int maxgrouplist; /* Max. size of group list */ + int readahead; /* # of blocks to readahead */ + int leaseterm; /* Term (sec) of lease */ + int deadthresh; /* Retrans threshold */ + char *hostname; /* server's name */ +}; + +struct nfs_diskless { + struct nfs_dlmount nd_root; /* Mount info for root */ + struct nfs_dlmount nd_private; /* Mount info for private */ +}; + +#endif diff --git a/bsd/nfs/nfsm_subs.h b/bsd/nfs/nfsm_subs.h new file mode 100644 index 000000000..75f019ce0 --- /dev/null +++ b/bsd/nfs/nfsm_subs.h @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfsm_subs.h 8.2 (Berkeley) 3/30/95 + * FreeBSD-Id: nfsm_subs.h,v 1.13 1997/07/16 09:06:30 dfr Exp $ + */ + + +#ifndef _NFS_NFSM_SUBS_H_ +#define _NFS_NFSM_SUBS_H_ + + +/* + * These macros do strange and peculiar things to mbuf chains for + * the assistance of the nfs code. To attempt to use them for any + * other purpose will be dangerous. (they make weird assumptions) + */ + +/* + * First define what the actual subs. return + */ +struct mbuf *nfsm_reqh __P((struct vnode *vp, u_long procid, int hsiz, + caddr_t *bposp)); +struct mbuf *nfsm_rpchead __P((struct ucred *cr, int nmflag, int procid, + int auth_type, int auth_len, char *auth_str, + int verf_len, char *verf_str, + struct mbuf *mrest, int mrest_len, + struct mbuf **mbp, u_long *xidp)); + +#define M_HASCL(m) ((m)->m_flags & M_EXT) +#define NFSMINOFF(m) \ + if (M_HASCL(m)) \ + (m)->m_data = (m)->m_ext.ext_buf; \ + else if ((m)->m_flags & M_PKTHDR) \ + (m)->m_data = (m)->m_pktdat; \ + else \ + (m)->m_data = (m)->m_dat +#define NFSMADV(m, s) (m)->m_data += (s) +#define NFSMSIZ(m) ((M_HASCL(m))?MCLBYTES: \ + (((m)->m_flags & M_PKTHDR)?MHLEN:MLEN)) + +/* + * Now for the macros that do the simple stuff and call the functions + * for the hard stuff. + * These macros use several vars. declared in nfsm_reqhead and these + * vars. must not be used elsewhere unless you are careful not to corrupt + * them. The vars. starting with pN and tN (N=1,2,3,..) are temporaries + * that may be used so long as the value is not expected to retained + * after a macro. + * I know, this is kind of dorkey, but it makes the actual op functions + * fairly clean and deals with the mess caused by the xdr discriminating + * unions. + */ + +#define nfsm_build(a,c,s) \ + { if ((s) > M_TRAILINGSPACE(mb)) { \ + MGET(mb2, M_WAIT, MT_DATA); \ + if ((s) > MLEN) \ + panic("build > MLEN"); \ + mb->m_next = mb2; \ + mb = mb2; \ + mb->m_len = 0; \ + bpos = mtod(mb, caddr_t); \ + } \ + (a) = (c)(bpos); \ + mb->m_len += (s); \ + bpos += (s); } + +#define nfsm_dissect(a, c, s) \ + { t1 = mtod(md, caddr_t)+md->m_len-dpos; \ + if (t1 >= (s)) { \ + (a) = (c)(dpos); \ + dpos += (s); \ + } else if ((t1 = nfsm_disct(&md, &dpos, (s), t1, &cp2))) { \ + error = t1; \ + m_freem(mrep); \ + goto nfsmout; \ + } else { \ + (a) = (c)cp2; \ + } } + +#define nfsm_fhtom(v, v3) \ + { if (v3) { \ + t2 = nfsm_rndup(VTONFS(v)->n_fhsize) + NFSX_UNSIGNED; \ + if (t2 <= M_TRAILINGSPACE(mb)) { \ + nfsm_build(tl, u_long *, t2); \ + *tl++ = txdr_unsigned(VTONFS(v)->n_fhsize); \ + *(tl + ((t2>>2) - 2)) = 0; \ + bcopy((caddr_t)VTONFS(v)->n_fhp,(caddr_t)tl, \ + VTONFS(v)->n_fhsize); \ + } else if ((t2 = nfsm_strtmbuf(&mb, &bpos, \ + (caddr_t)VTONFS(v)->n_fhp, VTONFS(v)->n_fhsize))) { \ + error = t2; \ + m_freem(mreq); \ + goto nfsmout; \ + } \ + } else { \ + nfsm_build(cp, caddr_t, NFSX_V2FH); \ + bcopy((caddr_t)VTONFS(v)->n_fhp, cp, NFSX_V2FH); \ + } } + +#define nfsm_srvfhtom(f, v3) \ + { if (v3) { \ + nfsm_build(tl, u_long *, NFSX_UNSIGNED + NFSX_V3FH); \ + *tl++ = txdr_unsigned(NFSX_V3FH); \ + bcopy((caddr_t)(f), (caddr_t)tl, NFSX_V3FH); \ + } else { \ + nfsm_build(cp, caddr_t, NFSX_V2FH); \ + bcopy((caddr_t)(f), cp, NFSX_V2FH); \ + } } + +#define nfsm_srvpostop_fh(f) \ + { nfsm_build(tl, u_long *, 2 * NFSX_UNSIGNED + NFSX_V3FH); \ + *tl++ = nfs_true; \ + *tl++ = txdr_unsigned(NFSX_V3FH); \ + bcopy((caddr_t)(f), (caddr_t)tl, NFSX_V3FH); \ + } + +#define nfsm_mtofh(d, v, v3, f) \ + { struct nfsnode *ttnp; nfsfh_t *ttfhp; int ttfhsize; \ + if (v3) { \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + (f) = fxdr_unsigned(int, *tl); \ + } else \ + (f) = 1; \ + if (f) { \ + nfsm_getfh(ttfhp, ttfhsize, (v3)); \ + if ((t1 = nfs_nget((d)->v_mount, ttfhp, ttfhsize, \ + &ttnp))) { \ + error = t1; \ + m_freem(mrep); \ + goto nfsmout; \ + } \ + (v) = NFSTOV(ttnp); \ + } \ + if (v3) { \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (f) \ + (f) = fxdr_unsigned(int, *tl); \ + else if (fxdr_unsigned(int, *tl)) \ + nfsm_adv(NFSX_V3FATTR); \ + } \ + if (f) \ + nfsm_loadattr((v), (struct vattr *)0); \ + } + +#define nfsm_getfh(f, s, v3) \ + { if (v3) { \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (((s) = fxdr_unsigned(int, *tl)) <= 0 || \ + (s) > NFSX_V3FHMAX) { \ + m_freem(mrep); \ + error = EBADRPC; \ + goto nfsmout; \ + } \ + } else \ + (s) = NFSX_V2FH; \ + nfsm_dissect((f), nfsfh_t *, nfsm_rndup(s)); } + +#define nfsm_loadattr(v, a) \ + { struct vnode *ttvp = (v); \ + if ((t1 = nfs_loadattrcache(&ttvp, &md, &dpos, (a)))) { \ + error = t1; \ + m_freem(mrep); \ + goto nfsmout; \ + } \ + (v) = ttvp; } + +#define nfsm_postop_attr(v, f) \ + { struct vnode *ttvp = (v); \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (((f) = fxdr_unsigned(int, *tl))) { \ + if ((t1 = nfs_loadattrcache(&ttvp, &md, &dpos, \ + (struct vattr *)0))) { \ + error = t1; \ + (f) = 0; \ + m_freem(mrep); \ + goto nfsmout; \ + } \ + (v) = ttvp; \ + } } + +/* Used as (f) for nfsm_wcc_data() */ +#define NFSV3_WCCRATTR 0 +#define NFSV3_WCCCHK 1 + +#define nfsm_wcc_data(v, f) \ + { int ttattrf, ttretf = 0; \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (*tl == nfs_true) { \ + nfsm_dissect(tl, u_long *, 6 * NFSX_UNSIGNED); \ + if (f) \ + ttretf = (VTONFS(v)->n_mtime == \ + fxdr_unsigned(u_long, *(tl + 2))); \ + } \ + nfsm_postop_attr((v), ttattrf); \ + if (f) { \ + (f) = ttretf; \ + } else { \ + (f) = ttattrf; \ + } } + +#define nfsm_v3sattr(s, a, u, g) \ + { (s)->sa_modetrue = nfs_true; \ + (s)->sa_mode = vtonfsv3_mode((a)->va_mode); \ + (s)->sa_uidtrue = nfs_true; \ + (s)->sa_uid = txdr_unsigned(u); \ + (s)->sa_gidtrue = nfs_true; \ + (s)->sa_gid = txdr_unsigned(g); \ + (s)->sa_sizefalse = nfs_false; \ + (s)->sa_atimetype = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); \ + (s)->sa_mtimetype = txdr_unsigned(NFSV3SATTRTIME_TOSERVER); \ + } + +#define nfsm_strsiz(s,m) \ + { nfsm_dissect(tl,u_long *,NFSX_UNSIGNED); \ + if (((s) = fxdr_unsigned(long,*tl)) > (m)) { \ + m_freem(mrep); \ + error = EBADRPC; \ + goto nfsmout; \ + } } + +#define nfsm_srvstrsiz(s,m) \ + { nfsm_dissect(tl,u_long *,NFSX_UNSIGNED); \ + if (((s) = fxdr_unsigned(long,*tl)) > (m) || (s) <= 0) { \ + error = EBADRPC; \ + nfsm_reply(0); \ + } } + +#define nfsm_srvnamesiz(s) \ + { nfsm_dissect(tl,u_long *,NFSX_UNSIGNED); \ + if (((s) = fxdr_unsigned(long,*tl)) > NFS_MAXNAMLEN) \ + error = NFSERR_NAMETOL; \ + if ((s) <= 0) \ + error = EBADRPC; \ + if (error) \ + nfsm_reply(0); \ + } + +#define nfsm_mtouio(p,s) \ + if ((s) > 0 && \ + (t1 = nfsm_mbuftouio(&md,(p),(s),&dpos))) { \ + error = t1; \ + m_freem(mrep); \ + goto nfsmout; \ + } + +#define nfsm_uiotom(p,s) \ + if ((t1 = nfsm_uiotombuf((p),&mb,(s),&bpos))) { \ + error = t1; \ + m_freem(mreq); \ + goto nfsmout; \ + } + +#define nfsm_reqhead(v,a,s) \ + mb = mreq = nfsm_reqh((v),(a),(s),&bpos) + +#define nfsm_reqdone m_freem(mrep); \ + nfsmout: + +#define nfsm_rndup(a) (((a)+3)&(~0x3)) + +/* +* We seem to see cases mainly on shutdown where the vnode got recycled +* on use while waiting on server. Maybe nfs vnode locking will help if +* we implement that, but for now, check for bad vnodes and return an +* error. This call spot should catch most of them. Note that NFSv2 +* just goes to nfsmout here, while nfsV3 goes back to caller's next +* line for post-processing. It will do a nfsm_reqdone also making +* m_freem(mrep). Wondering if some of our freeing problems could be +* due to nfsv3 calling nfsm_reqdone unlike nfsv2. Separate problem. +*/ +#define nfsm_request(v, t, p, c) \ + { \ + int nfsv3 = (VFSTONFS((v)->v_mount))->nm_flag & NFSMNT_NFSV3; \ + if ((error = nfs_request((v), mreq, (t), (p), \ + (c), &mrep, &md, &dpos))) { \ + if (error & NFSERR_RETERR) \ + error &= ~NFSERR_RETERR; \ + else \ + goto nfsmout; \ + } \ + else if ((v)->v_type==VBAD) { \ + error = EINVAL; \ + if (!nfsv3) \ + goto nfsmout; \ + } \ + } + +#define nfsm_strtom(a,s,m) \ + if ((s) > (m)) { \ + m_freem(mreq); \ + error = ENAMETOOLONG; \ + goto nfsmout; \ + } \ + t2 = nfsm_rndup(s)+NFSX_UNSIGNED; \ + if (t2 <= M_TRAILINGSPACE(mb)) { \ + nfsm_build(tl,u_long *,t2); \ + *tl++ = txdr_unsigned(s); \ + *(tl+((t2>>2)-2)) = 0; \ + bcopy((caddr_t)(a), (caddr_t)tl, (s)); \ + } else if ((t2 = nfsm_strtmbuf(&mb, &bpos, (a), (s)))) { \ + error = t2; \ + m_freem(mreq); \ + goto nfsmout; \ + } + +#define nfsm_srvdone \ + nfsmout: \ + return(error) + +#define nfsm_reply(s) \ + { \ + nfsd->nd_repstat = error; \ + if (error && !(nfsd->nd_flag & ND_NFSV3)) \ + (void) nfs_rephead(0, nfsd, slp, error, cache, &frev, \ + mrq, &mb, &bpos); \ + else \ + (void) nfs_rephead((s), nfsd, slp, error, cache, &frev, \ + mrq, &mb, &bpos); \ + m_freem(mrep); \ + mreq = *mrq; \ + if (error && (!(nfsd->nd_flag & ND_NFSV3) || \ + error == EBADRPC)) \ + return(0); \ + } + +#define nfsm_writereply(s, v3) \ + { \ + nfsd->nd_repstat = error; \ + if (error && !(v3)) \ + (void) nfs_rephead(0, nfsd, slp, error, cache, &frev, \ + &mreq, &mb, &bpos); \ + else \ + (void) nfs_rephead((s), nfsd, slp, error, cache, &frev, \ + &mreq, &mb, &bpos); \ + } + +#define nfsm_adv(s) \ + { t1 = mtod(md, caddr_t)+md->m_len-dpos; \ + if (t1 >= (s)) { \ + dpos += (s); \ + } else if ((t1 = nfs_adv(&md, &dpos, (s), t1))) { \ + error = t1; \ + m_freem(mrep); \ + goto nfsmout; \ + } } + +#define nfsm_srvmtofh(f) \ + { if (nfsd->nd_flag & ND_NFSV3) { \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (fxdr_unsigned(int, *tl) != NFSX_V3FH) { \ + error = EBADRPC; \ + nfsm_reply(0); \ + } \ + } \ + nfsm_dissect(tl, u_long *, NFSX_V3FH); \ + bcopy((caddr_t)tl, (caddr_t)(f), NFSX_V3FH); \ + if ((nfsd->nd_flag & ND_NFSV3) == 0) \ + nfsm_adv(NFSX_V2FH - NFSX_V3FH); \ + } + +#define nfsm_clget \ + if (bp >= be) { \ + if (mp == mb) \ + mp->m_len += bp-bpos; \ + MGET(mp, M_WAIT, MT_DATA); \ + MCLGET(mp, M_WAIT); \ + mp->m_len = NFSMSIZ(mp); \ + mp2->m_next = mp; \ + mp2 = mp; \ + bp = mtod(mp, caddr_t); \ + be = bp+mp->m_len; \ + } \ + tl = (u_long *)bp + +#define nfsm_srvfillattr(a, f) \ + nfsm_srvfattr(nfsd, (a), (f)) + +#define nfsm_srvwcc_data(br, b, ar, a) \ + nfsm_srvwcc(nfsd, (br), (b), (ar), (a), &mb, &bpos) + +#define nfsm_srvpostop_attr(r, a) \ + nfsm_srvpostopattr(nfsd, (r), (a), &mb, &bpos) + +#define nfsm_srvsattr(a) \ + { nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (*tl == nfs_true) { \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + (a)->va_mode = nfstov_mode(*tl); \ + } \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (*tl == nfs_true) { \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + (a)->va_uid = fxdr_unsigned(uid_t, *tl); \ + } \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (*tl == nfs_true) { \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + (a)->va_gid = fxdr_unsigned(gid_t, *tl); \ + } \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + if (*tl == nfs_true) { \ + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); \ + fxdr_hyper(tl, &(a)->va_size); \ + } \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + switch (fxdr_unsigned(int, *tl)) { \ + case NFSV3SATTRTIME_TOCLIENT: \ + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); \ + fxdr_nfsv3time(tl, &(a)->va_atime); \ + break; \ + case NFSV3SATTRTIME_TOSERVER: \ + (a)->va_atime.tv_sec = time.tv_sec; \ + (a)->va_atime.tv_nsec = time.tv_usec * 1000; \ + break; \ + }; \ + nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); \ + switch (fxdr_unsigned(int, *tl)) { \ + case NFSV3SATTRTIME_TOCLIENT: \ + nfsm_dissect(tl, u_long *, 2 * NFSX_UNSIGNED); \ + fxdr_nfsv3time(tl, &(a)->va_mtime); \ + break; \ + case NFSV3SATTRTIME_TOSERVER: \ + (a)->va_mtime.tv_sec = time.tv_sec; \ + (a)->va_mtime.tv_nsec = time.tv_usec * 1000; \ + break; \ + }; } + +#endif diff --git a/bsd/nfs/nfsmount.h b/bsd/nfs/nfsmount.h new file mode 100644 index 000000000..35af84312 --- /dev/null +++ b/bsd/nfs/nfsmount.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfsmount.h 8.3 (Berkeley) 3/30/95 + * FreeBSD-Id: nfsmount.h,v 1.13 1997/08/16 19:16:05 wollman Exp $ + */ + + +#ifndef _NFS_NFSMOUNT_H_ +#define _NFS_NFSMOUNT_H_ + +/* + * Mount structure. + * One allocated on every NFS mount. + * Holds NFS specific information for mount. + */ +struct nfsmount { + int nm_flag; /* Flags for soft/hard... */ + struct mount *nm_mountp; /* Vfs structure for this filesystem */ + int nm_numgrps; /* Max. size of groupslist */ + struct vnode *nm_dvp; /* root directory vnode pointer */ + struct socket *nm_so; /* Rpc socket */ + int nm_sotype; /* Type of socket */ + int nm_soproto; /* and protocol */ + int nm_soflags; /* pr_flags for socket protocol */ + struct mbuf *nm_nam; /* Addr of server */ + int nm_timeo; /* Init timer for NFSMNT_DUMBTIMR */ + int nm_retry; /* Max retries */ + int nm_srtt[4]; /* Timers for rpcs */ + int nm_sdrtt[4]; + int nm_sent; /* Request send count */ + int nm_cwnd; /* Request send window */ + int nm_timeouts; /* Request timeouts */ + int nm_deadthresh; /* Threshold of timeouts-->dead server*/ + int nm_rsize; /* Max size of read rpc */ + int nm_wsize; /* Max size of write rpc */ + int nm_readdirsize; /* Size of a readdir rpc */ + int nm_readahead; /* Num. of blocks to readahead */ + int nm_leaseterm; /* Term (sec) for NQNFS lease */ + CIRCLEQ_HEAD(, nfsnode) nm_timerhead; /* Head of lease timer queue */ + struct vnode *nm_inprog; /* Vnode in prog by nqnfs_clientd() */ + uid_t nm_authuid; /* Uid for authenticator */ + int nm_authtype; /* Authenticator type */ + int nm_authlen; /* and length */ + char *nm_authstr; /* Authenticator string */ + char *nm_verfstr; /* and the verifier */ + int nm_verflen; + u_char nm_verf[NFSX_V3WRITEVERF]; /* V3 write verifier */ + NFSKERBKEY_T nm_key; /* and the session key */ + int nm_numuids; /* Number of nfsuid mappings */ + TAILQ_HEAD(, nfsuid) nm_uidlruhead; /* Lists of nfsuid mappings */ + LIST_HEAD(, nfsuid) nm_uidhashtbl[NFS_MUIDHASHSIZ]; + TAILQ_HEAD(, buf) nm_bufq; /* async io buffer queue */ + short nm_bufqlen; /* number of buffers in queue */ + short nm_bufqwant; /* process wants to add to the queue */ + int nm_bufqiods; /* number of iods processing queue */ +}; + +#if defined(KERNEL) +/* + * Convert mount ptr to nfsmount ptr. + */ +#define VFSTONFS(mp) ((struct nfsmount *)((mp)->mnt_data)) + +#endif /* KERNEL */ + +#endif diff --git a/bsd/nfs/nfsnode.h b/bsd/nfs/nfsnode.h new file mode 100644 index 000000000..829508a14 --- /dev/null +++ b/bsd/nfs/nfsnode.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfsnode.h 8.9 (Berkeley) 5/14/95 + * FreeBSD-Id: nfsnode.h,v 1.24 1997/10/28 14:06:25 bde Exp $ + */ + + +#ifndef _NFS_NFSNODE_H_ +#define _NFS_NFSNODE_H_ + +#ifndef _NFS_NFS_H_ +#include +#endif + +/* + * Silly rename structure that hangs off the nfsnode until the name + * can be removed by nfs_inactive() + */ +struct sillyrename { + struct ucred *s_cred; + struct vnode *s_dvp; + long s_namlen; + char s_name[20]; +}; + +/* + * This structure is used to save the logical directory offset to + * NFS cookie mappings. + * The mappings are stored in a list headed + * by n_cookies, as required. + * There is one mapping for each NFS_DIRBLKSIZ bytes of directory information + * stored in increasing logical offset byte order. + */ +#define NFSNUMCOOKIES 31 + +struct nfsdmap { + LIST_ENTRY(nfsdmap) ndm_list; + int ndm_eocookie; + nfsuint64 ndm_cookies[NFSNUMCOOKIES]; +}; + +/* + * The nfsnode is the nfs equivalent to ufs's inode. Any similarity + * is purely coincidental. + * There is a unique nfsnode allocated for each active file, + * each current directory, each mounted-on file, text file, and the root. + * An nfsnode is 'named' by its file handle. (nget/nfs_node.c) + * If this structure exceeds 256 bytes (it is currently 256 using 4.4BSD-Lite + * type definitions), file handles of > 32 bytes should probably be split out + * into a separate MALLOC()'d data structure. (Reduce the size of nfsfh_t by + * changing the definition in sys/mount.h of NFS_SMALLFH.) + * NB: Hopefully the current order of the fields is such that everything will + * be well aligned and, therefore, tightly packed. + */ +struct nfsnode { + LIST_ENTRY(nfsnode) n_hash; /* Hash chain */ + CIRCLEQ_ENTRY(nfsnode) n_timer; /* Nqnfs timer chain */ + u_quad_t n_size; /* Current size of file */ + u_quad_t n_brev; /* Modify rev when cached */ + u_quad_t n_lrev; /* Modify rev for lease */ + struct vattr n_vattr; /* Vnode attribute cache */ + time_t n_attrstamp; /* Attr. cache timestamp */ + u_int32_t n_mode; /* ACCESS mode cache */ + uid_t n_modeuid; /* credentials having mode */ + time_t n_modestamp; /* mode cache timestamp */ + time_t n_mtime; /* Prev modify time. */ + time_t n_ctime; /* Prev create time. */ + time_t n_expiry; /* Lease expiry time */ + nfsfh_t *n_fhp; /* NFS File Handle */ + struct vnode *n_vnode; /* associated vnode */ + struct lockf *n_lockf; /* Locking record of file */ + int n_error; /* Save write error value */ + union { + struct timespec nf_atim; /* Special file times */ + nfsuint64 nd_cookieverf; /* Cookie verifier (dir only) */ + } n_un1; + union { + struct timespec nf_mtim; + off_t nd_direof; /* Dir. EOF offset cache */ + } n_un2; + union { + struct sillyrename *nf_silly; /* Ptr to silly rename struct */ + LIST_HEAD(, nfsdmap) nd_cook; /* cookies */ + } n_un3; + short n_fhsize; /* size in bytes, of fh */ + short n_flag; /* Flag for locking.. */ + nfsfh_t n_fh; /* Small File Handle */ +}; + +#define n_atim n_un1.nf_atim +#define n_mtim n_un2.nf_mtim +#define n_sillyrename n_un3.nf_silly +#define n_cookieverf n_un1.nd_cookieverf +#define n_direofoffset n_un2.nd_direof +#define n_cookies n_un3.nd_cook + +/* + * Flags for n_flag + */ +#define NFLUSHWANT 0x0001 /* Want wakeup from a flush in prog. */ +#define NFLUSHINPROG 0x0002 /* Avoid multiple calls to vinvalbuf() */ +#define NMODIFIED 0x0004 /* Might have a modified buffer in bio */ +#define NWRITEERR 0x0008 /* Flag write errors so close will know */ +#define NQNFSNONCACHE 0x0020 /* Non-cachable lease */ +#define NQNFSWRITE 0x0040 /* Write lease */ +#define NQNFSEVICTED 0x0080 /* Has been evicted */ +#define NACC 0x0100 /* Special file accessed */ +#define NUPD 0x0200 /* Special file updated */ +#define NCHG 0x0400 /* Special file times changed */ +#define NLOCKED 0x0800 /* node is locked */ +#define NWANTED 0x0100 /* someone wants to lock */ + +/* + * Convert between nfsnode pointers and vnode pointers + */ +#define VTONFS(vp) ((struct nfsnode *)(vp)->v_data) +#define NFSTOV(np) ((struct vnode *)(np)->n_vnode) + +/* + * Queue head for nfsiod's + */ +extern TAILQ_HEAD(nfs_bufq, buf) nfs_bufq; +extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON]; +extern struct nfsmount *nfs_iodmount[NFS_MAXASYNCDAEMON]; + +#if defined(KERNEL) +typedef int vop_t __P((void *)); +extern vop_t **fifo_nfsv2nodeop_p; +extern vop_t **nfsv2_vnodeop_p; +extern vop_t **spec_nfsv2nodeop_p; + +/* + * Prototypes for NFS vnode operations + */ +int nfs_write __P((struct vop_write_args *)); +#define nfs_lease_check ((int (*) __P((struct vop_lease_args *)))nullop) +#define nqnfs_vop_lease_check lease_check +int nqnfs_vop_lease_check __P((struct vop_lease_args *)); +#define nfs_revoke vop_revoke +#define nfs_seek ((int (*) __P((struct vop_seek_args *)))nullop) +int nfs_abortop __P((struct vop_abortop_args *)); +int nfs_inactive __P((struct vop_inactive_args *)); +int nfs_reclaim __P((struct vop_reclaim_args *)); +#define nfs_lock ((int (*) __P((struct vop_lock_args *)))vop_nolock) +#define nfs_unlock ((int (*) __P((struct vop_unlock_args *)))vop_nounlock) +#define nfs_islocked ((int (*) __P((struct vop_islocked_args *)))vop_noislocked) +#define nfs_reallocblks \ + ((int (*) __P((struct vop_reallocblks_args *)))eopnotsupp) + +/* other stuff */ +int nfs_removeit __P((struct sillyrename *)); +int nfs_nget __P((struct mount *,nfsfh_t *,int,struct nfsnode **)); +nfsuint64 *nfs_getcookie __P((struct nfsnode *, off_t, int)); +void nfs_invaldir __P((struct vnode *)); + +#define nqnfs_lease_updatetime lease_updatetime + +#endif /* KERNEL */ + +#endif diff --git a/bsd/nfs/nfsproto.h b/bsd/nfs/nfsproto.h new file mode 100644 index 000000000..63901cda5 --- /dev/null +++ b/bsd/nfs/nfsproto.h @@ -0,0 +1,475 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfsproto.h 8.2 (Berkeley) 3/30/95 + * FreeBSD-Id: nfsproto.h,v 1.3 1997/02/22 09:42:50 peter Exp $ + */ + +#ifndef _NFS_NFSPROTO_H_ +#define _NFS_NFSPROTO_H_ + +/* + * nfs definitions as per the Version 2 and 3 specs + */ + +/* + * Constants as defined in the Sun NFS Version 2 and 3 specs. + * "NFS: Network File System Protocol Specification" RFC1094 + * and in the "NFS: Network File System Version 3 Protocol + * Specification" + */ + +/* Only define these if nfs_prot.h hasn't been included */ +#ifndef NFS_PROGRAM + +#define NFS_PORT 2049 +#define NFS_PROG 100003 +#define NFS_VER2 2 +#define NFS_VER3 3 +#define NFS_V2MAXDATA 8192 +#define NFS_MAXDGRAMDATA 16384 +#define NFS_MAXDATA 32768 +#define NFS_MAXPATHLEN 1024 +#define NFS_MAXNAMLEN 255 +#define NFS_MAXPKTHDR 404 +#define NFS_MAXPACKET (NFS_MAXPKTHDR + NFS_MAXDATA) +#define NFS_MINPACKET 20 +#define NFS_FABLKSIZE 512 /* Size in bytes of a block wrt fa_blocks */ + +/* Stat numbers for rpc returns (version 2 and 3) */ +#define NFS_OK 0 +#define NFSERR_PERM 1 +#define NFSERR_NOENT 2 +#define NFSERR_IO 5 +#define NFSERR_NXIO 6 +#define NFSERR_ACCES 13 +#define NFSERR_EXIST 17 +#define NFSERR_XDEV 18 /* Version 3 only */ +#define NFSERR_NODEV 19 +#define NFSERR_NOTDIR 20 +#define NFSERR_ISDIR 21 +#define NFSERR_INVAL 22 /* Version 3 only */ +#define NFSERR_FBIG 27 +#define NFSERR_NOSPC 28 +#define NFSERR_ROFS 30 +#define NFSERR_MLINK 31 /* Version 3 only */ +#define NFSERR_NAMETOL 63 +#define NFSERR_NOTEMPTY 66 +#define NFSERR_DQUOT 69 +#define NFSERR_STALE 70 +#define NFSERR_REMOTE 71 /* Version 3 only */ +#define NFSERR_WFLUSH 99 /* Version 2 only */ +#define NFSERR_BADHANDLE 10001 /* The rest Version 3 only */ +#define NFSERR_NOT_SYNC 10002 +#define NFSERR_BAD_COOKIE 10003 +#define NFSERR_NOTSUPP 10004 +#define NFSERR_TOOSMALL 10005 +#define NFSERR_SERVERFAULT 10006 +#define NFSERR_BADTYPE 10007 +#define NFSERR_JUKEBOX 10008 +#define NFSERR_TRYLATER NFSERR_JUKEBOX +#define NFSERR_STALEWRITEVERF 30001 /* Fake return for nfs_commit() */ + +#define NFSERR_RETVOID 0x20000000 /* Return void, not error */ +#define NFSERR_AUTHERR 0x40000000 /* Mark an authentication error */ +#define NFSERR_RETERR 0x80000000 /* Mark an error return for V3 */ + +#endif /* !NFS_PROGRAM */ + +/* Sizes in bytes of various nfs rpc components */ +#define NFSX_UNSIGNED 4 + +/* specific to NFS Version 2 */ +#define NFSX_V2FH 32 +#define NFSX_V2FATTR 68 +#define NFSX_V2SATTR 32 +#define NFSX_V2COOKIE 4 +#define NFSX_V2STATFS 20 + +/* specific to NFS Version 3 */ +#define NFSX_V3FH (sizeof (fhandle_t)) /* size this server uses */ +#define NFSX_V3FHMAX 64 /* max. allowed by protocol */ +#define NFSX_V3FATTR 84 +#define NFSX_V3SATTR 60 /* max. all fields filled in */ +#define NFSX_V3SRVSATTR (sizeof (struct nfsv3_sattr)) +#define NFSX_V3POSTOPATTR (NFSX_V3FATTR + NFSX_UNSIGNED) +#define NFSX_V3WCCDATA (NFSX_V3POSTOPATTR + 8 * NFSX_UNSIGNED) +#define NFSX_V3COOKIEVERF 8 +#define NFSX_V3WRITEVERF 8 +#define NFSX_V3CREATEVERF 8 +#define NFSX_V3STATFS 52 +#define NFSX_V3FSINFO 48 +#define NFSX_V3PATHCONF 24 + +/* variants for both versions */ +#define NFSX_FH(v3) ((v3) ? (NFSX_V3FHMAX + NFSX_UNSIGNED) : \ + NFSX_V2FH) +#define NFSX_SRVFH(v3) ((v3) ? NFSX_V3FH : NFSX_V2FH) +#define NFSX_FATTR(v3) ((v3) ? NFSX_V3FATTR : NFSX_V2FATTR) +#define NFSX_PREOPATTR(v3) ((v3) ? (7 * NFSX_UNSIGNED) : 0) +#define NFSX_POSTOPATTR(v3) ((v3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : 0) +#define NFSX_POSTOPORFATTR(v3) ((v3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : \ + NFSX_V2FATTR) +#define NFSX_WCCDATA(v3) ((v3) ? NFSX_V3WCCDATA : 0) +#define NFSX_WCCORFATTR(v3) ((v3) ? NFSX_V3WCCDATA : NFSX_V2FATTR) +#define NFSX_SATTR(v3) ((v3) ? NFSX_V3SATTR : NFSX_V2SATTR) +#define NFSX_COOKIEVERF(v3) ((v3) ? NFSX_V3COOKIEVERF : 0) +#define NFSX_WRITEVERF(v3) ((v3) ? NFSX_V3WRITEVERF : 0) +#define NFSX_READDIR(v3) ((v3) ? (5 * NFSX_UNSIGNED) : \ + (2 * NFSX_UNSIGNED)) +#define NFSX_STATFS(v3) ((v3) ? NFSX_V3STATFS : NFSX_V2STATFS) + +/* Only define these if nfs_prot.h hasn't been included */ +#ifndef NFS_PROGRAM + +/* nfs rpc procedure numbers (before version mapping) */ +#define NFSPROC_NULL 0 +#define NFSPROC_GETATTR 1 +#define NFSPROC_SETATTR 2 +#define NFSPROC_LOOKUP 3 +#define NFSPROC_ACCESS 4 +#define NFSPROC_READLINK 5 +#define NFSPROC_READ 6 +#define NFSPROC_WRITE 7 +#define NFSPROC_CREATE 8 +#define NFSPROC_MKDIR 9 +#define NFSPROC_SYMLINK 10 +#define NFSPROC_MKNOD 11 +#define NFSPROC_REMOVE 12 +#define NFSPROC_RMDIR 13 +#define NFSPROC_RENAME 14 +#define NFSPROC_LINK 15 +#define NFSPROC_READDIR 16 +#define NFSPROC_READDIRPLUS 17 +#define NFSPROC_FSSTAT 18 +#define NFSPROC_FSINFO 19 +#define NFSPROC_PATHCONF 20 +#define NFSPROC_COMMIT 21 + +#endif /* !NFS_PROGRAM */ + +/* And leasing (nqnfs) procedure numbers (must be last) */ +#define NQNFSPROC_GETLEASE 22 +#define NQNFSPROC_VACATED 23 +#define NQNFSPROC_EVICTED 24 + +#define NFSPROC_NOOP 25 +#define NFS_NPROCS 26 + +/* Actual Version 2 procedure numbers */ +#define NFSV2PROC_NULL 0 +#define NFSV2PROC_GETATTR 1 +#define NFSV2PROC_SETATTR 2 +#define NFSV2PROC_NOOP 3 +#define NFSV2PROC_ROOT NFSV2PROC_NOOP /* Obsolete */ +#define NFSV2PROC_LOOKUP 4 +#define NFSV2PROC_READLINK 5 +#define NFSV2PROC_READ 6 +#define NFSV2PROC_WRITECACHE NFSV2PROC_NOOP /* Obsolete */ +#define NFSV2PROC_WRITE 8 +#define NFSV2PROC_CREATE 9 +#define NFSV2PROC_REMOVE 10 +#define NFSV2PROC_RENAME 11 +#define NFSV2PROC_LINK 12 +#define NFSV2PROC_SYMLINK 13 +#define NFSV2PROC_MKDIR 14 +#define NFSV2PROC_RMDIR 15 +#define NFSV2PROC_READDIR 16 +#define NFSV2PROC_STATFS 17 + +/* + * Constants used by the Version 3 protocol for various RPCs + */ +#define NFSV3SATTRTIME_DONTCHANGE 0 +#define NFSV3SATTRTIME_TOSERVER 1 +#define NFSV3SATTRTIME_TOCLIENT 2 + +#define NFSV3ACCESS_READ 0x01 +#define NFSV3ACCESS_LOOKUP 0x02 +#define NFSV3ACCESS_MODIFY 0x04 +#define NFSV3ACCESS_EXTEND 0x08 +#define NFSV3ACCESS_DELETE 0x10 +#define NFSV3ACCESS_EXECUTE 0x20 + +#define NFSV3WRITE_UNSTABLE 0 +#define NFSV3WRITE_DATASYNC 1 +#define NFSV3WRITE_FILESYNC 2 + +#define NFSV3CREATE_UNCHECKED 0 +#define NFSV3CREATE_GUARDED 1 +#define NFSV3CREATE_EXCLUSIVE 2 + +#define NFSV3FSINFO_LINK 0x01 +#define NFSV3FSINFO_SYMLINK 0x02 +#define NFSV3FSINFO_HOMOGENEOUS 0x08 +#define NFSV3FSINFO_CANSETTIME 0x10 + +/* Conversion macros */ +#define vtonfsv2_mode(t,m) \ + txdr_unsigned(((t) == VFIFO) ? MAKEIMODE(VCHR, (m)) : \ + MAKEIMODE((t), (m))) +#define vtonfsv3_mode(m) txdr_unsigned((m) & 07777) +#define nfstov_mode(a) (fxdr_unsigned(u_short, (a))&07777) +#define vtonfsv2_type(a) txdr_unsigned(nfsv2_type[((long)(a))]) +#define vtonfsv3_type(a) txdr_unsigned(nfsv3_type[((long)(a))]) +#define nfsv2tov_type(a) nv2tov_type[fxdr_unsigned(u_long,(a))&0x7] +#define nfsv3tov_type(a) nv3tov_type[fxdr_unsigned(u_long,(a))&0x7] + +/* Only define these if nfs_prot.h hasn't been included */ +#ifndef NFS_PROGRAM + +/* File types */ +typedef enum { NFNON=0, NFREG=1, NFDIR=2, NFBLK=3, NFCHR=4, NFLNK=5, + NFSOCK=6, NFFIFO=7 } nfstype; +#endif /* !NFS_PROGRAM */ + +/* Structs for common parts of the rpc's */ +/* + * File Handle (32 bytes for version 2), variable up to 64 for version 3. + * File Handles of up to NFS_SMALLFH in size are stored directly in the + * nfs node, whereas larger ones are malloc'd. (This never happens when + * NFS_SMALLFH is set to 64.) + * NFS_SMALLFH should be in the range of 32 to 64 and be divisible by 4. + */ +#ifndef NFS_SMALLFH +#define NFS_SMALLFH 64 +#endif +union nfsfh { + fhandle_t fh_generic; + u_char fh_bytes[NFS_SMALLFH]; +}; +typedef union nfsfh nfsfh_t; + +struct nfsv2_time { + u_long nfsv2_sec; + u_long nfsv2_usec; +}; +typedef struct nfsv2_time nfstime2; + +struct nfsv3_time { + u_long nfsv3_sec; + u_long nfsv3_nsec; +}; +typedef struct nfsv3_time nfstime3; + +/* + * Quads are defined as arrays of 2 longs to ensure dense packing for the + * protocol and to facilitate xdr conversion. + */ +struct nfs_uquad { + u_long nfsuquad[2]; +}; +typedef struct nfs_uquad nfsuint64; + +/* + * Used to convert between two u_longs and a u_quad_t. + */ +union nfs_quadconvert { + u_long lval[2]; + u_quad_t qval; +}; +typedef union nfs_quadconvert nfsquad_t; + +/* + * NFS Version 3 special file number. + */ +struct nfsv3_spec { + u_long specdata1; + u_long specdata2; +}; +typedef struct nfsv3_spec nfsv3spec; + +/* + * File attributes and setable attributes. These structures cover both + * NFS version 2 and the version 3 protocol. Note that the union is only + * used so that one pointer can refer to both variants. These structures + * go out on the wire and must be densely packed, so no quad data types + * are used. (all fields are longs or u_longs or structures of same) + * NB: You can't do sizeof(struct nfs_fattr), you must use the + * NFSX_FATTR(v3) macro. + */ +struct nfs_fattr { + u_long fa_type; + u_long fa_mode; + u_long fa_nlink; + u_long fa_uid; + u_long fa_gid; + union { + struct { + u_long nfsv2fa_size; + u_long nfsv2fa_blocksize; + u_long nfsv2fa_rdev; + u_long nfsv2fa_blocks; + u_long nfsv2fa_fsid; + u_long nfsv2fa_fileid; + nfstime2 nfsv2fa_atime; + nfstime2 nfsv2fa_mtime; + nfstime2 nfsv2fa_ctime; + } fa_nfsv2; + struct { + nfsuint64 nfsv3fa_size; + nfsuint64 nfsv3fa_used; + nfsv3spec nfsv3fa_rdev; + nfsuint64 nfsv3fa_fsid; + nfsuint64 nfsv3fa_fileid; + nfstime3 nfsv3fa_atime; + nfstime3 nfsv3fa_mtime; + nfstime3 nfsv3fa_ctime; + } fa_nfsv3; + } fa_un; +}; + +/* and some ugly defines for accessing union components */ +#define fa2_size fa_un.fa_nfsv2.nfsv2fa_size +#define fa2_blocksize fa_un.fa_nfsv2.nfsv2fa_blocksize +#define fa2_rdev fa_un.fa_nfsv2.nfsv2fa_rdev +#define fa2_blocks fa_un.fa_nfsv2.nfsv2fa_blocks +#define fa2_fsid fa_un.fa_nfsv2.nfsv2fa_fsid +#define fa2_fileid fa_un.fa_nfsv2.nfsv2fa_fileid +#define fa2_atime fa_un.fa_nfsv2.nfsv2fa_atime +#define fa2_mtime fa_un.fa_nfsv2.nfsv2fa_mtime +#define fa2_ctime fa_un.fa_nfsv2.nfsv2fa_ctime +#define fa3_size fa_un.fa_nfsv3.nfsv3fa_size +#define fa3_used fa_un.fa_nfsv3.nfsv3fa_used +#define fa3_rdev fa_un.fa_nfsv3.nfsv3fa_rdev +#define fa3_fsid fa_un.fa_nfsv3.nfsv3fa_fsid +#define fa3_fileid fa_un.fa_nfsv3.nfsv3fa_fileid +#define fa3_atime fa_un.fa_nfsv3.nfsv3fa_atime +#define fa3_mtime fa_un.fa_nfsv3.nfsv3fa_mtime +#define fa3_ctime fa_un.fa_nfsv3.nfsv3fa_ctime + +struct nfsv2_sattr { + u_long sa_mode; + u_long sa_uid; + u_long sa_gid; + u_long sa_size; + nfstime2 sa_atime; + nfstime2 sa_mtime; +}; + +/* + * NFS Version 3 sattr structure for the new node creation case. + */ +struct nfsv3_sattr { + u_long sa_modetrue; + u_long sa_mode; + u_long sa_uidtrue; + u_long sa_uid; + u_long sa_gidtrue; + u_long sa_gid; + u_long sa_sizefalse; + u_long sa_atimetype; + u_long sa_mtimetype; +}; + +struct nfs_statfs { + union { + struct { + u_long nfsv2sf_tsize; + u_long nfsv2sf_bsize; + u_long nfsv2sf_blocks; + u_long nfsv2sf_bfree; + u_long nfsv2sf_bavail; + } sf_nfsv2; + struct { + nfsuint64 nfsv3sf_tbytes; + nfsuint64 nfsv3sf_fbytes; + nfsuint64 nfsv3sf_abytes; + nfsuint64 nfsv3sf_tfiles; + nfsuint64 nfsv3sf_ffiles; + nfsuint64 nfsv3sf_afiles; + u_long nfsv3sf_invarsec; + } sf_nfsv3; + } sf_un; +}; + +#define sf_tsize sf_un.sf_nfsv2.nfsv2sf_tsize +#define sf_bsize sf_un.sf_nfsv2.nfsv2sf_bsize +#define sf_blocks sf_un.sf_nfsv2.nfsv2sf_blocks +#define sf_bfree sf_un.sf_nfsv2.nfsv2sf_bfree +#define sf_bavail sf_un.sf_nfsv2.nfsv2sf_bavail +#define sf_tbytes sf_un.sf_nfsv3.nfsv3sf_tbytes +#define sf_fbytes sf_un.sf_nfsv3.nfsv3sf_fbytes +#define sf_abytes sf_un.sf_nfsv3.nfsv3sf_abytes +#define sf_tfiles sf_un.sf_nfsv3.nfsv3sf_tfiles +#define sf_ffiles sf_un.sf_nfsv3.nfsv3sf_ffiles +#define sf_afiles sf_un.sf_nfsv3.nfsv3sf_afiles +#define sf_invarsec sf_un.sf_nfsv3.nfsv3sf_invarsec + +struct nfsv3_fsinfo { + u_long fs_rtmax; + u_long fs_rtpref; + u_long fs_rtmult; + u_long fs_wtmax; + u_long fs_wtpref; + u_long fs_wtmult; + u_long fs_dtpref; + nfsuint64 fs_maxfilesize; + nfstime3 fs_timedelta; + u_long fs_properties; +}; + +struct nfsv3_pathconf { + u_long pc_linkmax; + u_long pc_namemax; + u_long pc_notrunc; + u_long pc_chownrestricted; + u_long pc_caseinsensitive; + u_long pc_casepreserving; +}; + +#endif diff --git a/bsd/nfs/nfsrtt.h b/bsd/nfs/nfsrtt.h new file mode 100644 index 000000000..0a58dd9d4 --- /dev/null +++ b/bsd/nfs/nfsrtt.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfsrtt.h 8.2 (Berkeley) 3/30/95 + * FreeBSD-Id: nfsrtt.h,v 1.8 1997/06/03 17:22:47 dfr Exp $ + */ + + +#ifndef _NFS_NFSRTT_H_ +#define _NFS_NFSRTT_H_ + +/* + * Definitions for performance monitor. + * The client and server logging are turned on by setting the global + * constant "nfsrtton" to 1. + */ +#define NFSRTTLOGSIZ 128 + +/* + * Circular log of client side rpc activity. Each log entry is for one + * rpc filled in upon completion. (ie. in order of completion) + * The "pos" is the table index for the "next" entry, therefore the + * list goes from nfsrtt.rttl[pos] --> nfsrtt.rttl[pos - 1] in + * chronological order of completion. + */ +struct nfsrtt { + int pos; /* Position in array for next entry */ + struct rttl { + u_int32_t proc; /* NFS procedure number */ + int rtt; /* Measured round trip time */ + int rto; /* Round Trip Timeout */ + int sent; /* # rpcs in progress */ + int cwnd; /* Send window */ + int srtt; /* Ave Round Trip Time */ + int sdrtt; /* Ave mean deviation of RTT */ + fsid_t fsid; /* Fsid for mount point */ + struct timeval tstamp; /* Timestamp of log entry */ + } rttl[NFSRTTLOGSIZ]; +}; + +/* + * And definitions for server side performance monitor. + * The log organization is the same as above except it is filled in at the + * time the server sends the rpc reply. + */ + +/* + * Bits for the flags field. + */ +#define DRT_NQNFS 0x01 /* Rpc used Nqnfs protocol */ +#define DRT_TCP 0x02 /* Client used TCP transport */ +#define DRT_CACHEREPLY 0x04 /* Reply was from recent request cache */ +#define DRT_CACHEDROP 0x08 /* Rpc request dropped, due to recent reply */ +#define DRT_NFSV3 0x10 /* Rpc used NFS Version 3 */ + +/* + * Server log structure + * NB: ipadr == INADDR_ANY indicates a client using a non IP protocol. + * (ISO perhaps?) + */ +struct nfsdrt { + int pos; /* Position of next log entry */ + struct drt { + int flag; /* Bits as defined above */ + u_int32_t proc; /* NFS procedure number */ + u_long ipadr; /* IP address of client */ + int resptime; /* Response time (usec) */ + struct timeval tstamp; /* Timestamp of log entry */ + } drt[NFSRTTLOGSIZ]; +}; + +#endif diff --git a/bsd/nfs/nfsrvcache.h b/bsd/nfs/nfsrvcache.h new file mode 100644 index 000000000..54b93d50f --- /dev/null +++ b/bsd/nfs/nfsrvcache.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nfsrvcache.h 8.3 (Berkeley) 3/30/95 + * FreeBSD-Id: nfsrvcache.h,v 1.9 1997/06/03 17:22:47 dfr Exp $ + */ + + +#ifndef _NFS_NFSRVCACHE_H_ +#define _NFS_NFSRVCACHE_H_ + +/* + * Definitions for the server recent request cache + */ + +#define NFSRVCACHESIZ 64 + +struct nfsrvcache { + TAILQ_ENTRY(nfsrvcache) rc_lru; /* LRU chain */ + LIST_ENTRY(nfsrvcache) rc_hash; /* Hash chain */ + u_long rc_xid; /* rpc id number */ + union { + struct mbuf *ru_repmb; /* Reply mbuf list OR */ + int ru_repstat; /* Reply status */ + } rc_un; + union nethostaddr rc_haddr; /* Host address */ + u_int32_t rc_proc; /* rpc proc number */ + u_char rc_state; /* Current state of request */ + u_char rc_flag; /* Flag bits */ +}; + +#define rc_reply rc_un.ru_repmb +#define rc_status rc_un.ru_repstat +#define rc_inetaddr rc_haddr.had_inetaddr +#define rc_nam rc_haddr.had_nam + +/* Cache entry states */ +#define RC_UNUSED 0 +#define RC_INPROG 1 +#define RC_DONE 2 + +/* Return values */ +#define RC_DROPIT 0 +#define RC_REPLY 1 +#define RC_DOIT 2 +#define RC_CHECKIT 3 + +/* Flag bits */ +#define RC_LOCKED 0x01 +#define RC_WANTED 0x02 +#define RC_REPSTATUS 0x04 +#define RC_REPMBUF 0x08 +#define RC_NQNFS 0x10 +#define RC_INETADDR 0x20 +#define RC_NAM 0x40 + +#endif diff --git a/bsd/nfs/nqnfs.h b/bsd/nfs/nqnfs.h new file mode 100644 index 000000000..0cbb47564 --- /dev/null +++ b/bsd/nfs/nqnfs.h @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)nqnfs.h 8.3 (Berkeley) 3/30/95 + * FreeBSD-Id: nqnfs.h,v 1.14 1997/08/16 19:16:05 wollman Exp $ + */ + + +#ifndef _NFS_NQNFS_H_ +#define _NFS_NQNFS_H_ + +/* + * Definitions for NQNFS (Not Quite NFS) cache consistency protocol. + */ + +/* Tunable constants */ +#define NQ_CLOCKSKEW 3 /* Clock skew factor (sec) */ +#define NQ_WRITESLACK 5 /* Delay for write cache flushing */ +#define NQ_MAXLEASE 60 /* Max lease duration (sec) */ +#define NQ_MINLEASE 5 /* Min lease duration (sec) */ +#define NQ_DEFLEASE 30 /* Default lease duration (sec) */ +#define NQ_RENEWAL 3 /* Time before expiry (sec) to renew */ +#define NQ_TRYLATERDEL 15 /* Initial try later delay (sec) */ +#define NQ_MAXNUMLEASE 2048 /* Upper bound on number of server leases */ +#define NQ_DEADTHRESH NQ_NEVERDEAD /* Default nm_deadthresh */ +#define NQ_NEVERDEAD 9 /* Greater than max. nm_timeouts */ +#define NQLCHSZ 256 /* Server hash table size */ + +#define NQNFS_PROG 300105 /* As assigned by Sun */ +#define NQNFS_VER3 3 +#define NQNFS_EVICTSIZ 156 /* Size of eviction request in bytes */ + +/* + * Definitions used for saving the "last lease expires" time in Non-volatile + * RAM on the server. The default definitions below assume that NOVRAM is not + * available. + */ +#ifdef HASNVRAM +# undef HASNVRAM +#endif +#define NQSTORENOVRAM(t) +#define NQLOADNOVRAM(t) + +/* + * Defn and structs used on the server to maintain state for current leases. + * The list of host(s) that hold the lease are kept as nqhost structures. + * The first one lives in nqlease and any others are held in a linked + * list of nqm structures hanging off of nqlease. + * + * Each nqlease structure is chained into two lists. The first is a list + * ordered by increasing expiry time for nqsrv_timer() and the second is a chain + * hashed on lc_fh. + */ +#define LC_MOREHOSTSIZ 10 + +struct nqhost { + union { + struct { + u_short udp_flag; + u_short udp_port; + union nethostaddr udp_haddr; + } un_udp; + struct { + u_short connless_flag; + u_short connless_spare; + union nethostaddr connless_haddr; + } un_connless; + struct { + u_short conn_flag; + u_short conn_spare; + struct nfssvc_sock *conn_slp; + } un_conn; + } lph_un; +}; +#define lph_flag lph_un.un_udp.udp_flag +#define lph_port lph_un.un_udp.udp_port +#define lph_haddr lph_un.un_udp.udp_haddr +#define lph_inetaddr lph_un.un_udp.udp_haddr.had_inetaddr +#define lph_claddr lph_un.un_connless.connless_haddr +#define lph_nam lph_un.un_connless.connless_haddr.had_nam +#define lph_slp lph_un.un_conn.conn_slp + +struct nqlease { + LIST_ENTRY(nqlease) lc_hash; /* Fhandle hash list */ + CIRCLEQ_ENTRY(nqlease) lc_timer; /* Timer queue list */ + time_t lc_expiry; /* Expiry time (sec) */ + struct nqhost lc_host; /* Host that got lease */ + struct nqm *lc_morehosts; /* Other hosts that share read lease */ + fsid_t lc_fsid; /* Fhandle */ + char lc_fiddata[MAXFIDSZ]; + struct vnode *lc_vp; /* Soft reference to associated vnode */ +}; +#define lc_flag lc_host.lph_un.un_udp.udp_flag + +/* lc_flag bits */ +#define LC_VALID 0x0001 /* Host address valid */ +#define LC_WRITE 0x0002 /* Write cache */ +#define LC_NONCACHABLE 0x0004 /* Non-cachable lease */ +#define LC_LOCKED 0x0008 /* Locked */ +#define LC_WANTED 0x0010 /* Lock wanted */ +#define LC_EXPIREDWANTED 0x0020 /* Want lease when expired */ +#define LC_UDP 0x0040 /* Host address for udp socket */ +#define LC_CLTP 0x0080 /* Host address for other connectionless */ +#define LC_LOCAL 0x0100 /* Host is server */ +#define LC_VACATED 0x0200 /* Host has vacated lease */ +#define LC_WRITTEN 0x0400 /* Recently wrote to the leased file */ +#define LC_SREF 0x0800 /* Holds a nfssvc_sock reference */ + +struct nqm { + struct nqm *lpm_next; + struct nqhost lpm_hosts[LC_MOREHOSTSIZ]; +}; + +/* + * Special value for slp for local server calls. + */ +#define NQLOCALSLP ((struct nfssvc_sock *) -1) + +/* + * Server side macros. + */ +#define nqsrv_getl(v, l) \ + (void) nqsrv_getlease((v), &nfsd->nd_duration, \ + ((nfsd->nd_flag & ND_LEASE) ? (nfsd->nd_flag & ND_LEASE) : \ + ((l) | ND_CHECK)), \ + slp, procp, nfsd->nd_nam, &cache, &frev, cred) + +/* + * Client side macros that check for a valid lease. + */ +#define NQNFS_CKINVALID(v, n, f) \ + ((time.tv_sec > (n)->n_expiry && \ + VFSTONFS((v)->v_mount)->nm_timeouts < VFSTONFS((v)->v_mount)->nm_deadthresh) \ + || ((f) == ND_WRITE && ((n)->n_flag & NQNFSWRITE) == 0)) + +#define NQNFS_CKCACHABLE(v, f) \ + ((time.tv_sec <= VTONFS(v)->n_expiry || \ + VFSTONFS((v)->v_mount)->nm_timeouts >= VFSTONFS((v)->v_mount)->nm_deadthresh) \ + && (VTONFS(v)->n_flag & NQNFSNONCACHE) == 0 && \ + ((f) == ND_READ || (VTONFS(v)->n_flag & NQNFSWRITE))) + +#define NQNFS_NEEDLEASE(v, p) \ + (time.tv_sec > VTONFS(v)->n_expiry ? \ + ((VTONFS(v)->n_flag & NQNFSEVICTED) ? 0 : nqnfs_piggy[p]) : \ + (((time.tv_sec + NQ_RENEWAL) > VTONFS(v)->n_expiry && \ + nqnfs_piggy[p]) ? \ + ((VTONFS(v)->n_flag & NQNFSWRITE) ? \ + ND_WRITE : nqnfs_piggy[p]) : 0)) + +/* + * List head for timer queue. + */ +extern CIRCLEQ_HEAD(nqtimerhead, nqlease) nqtimerhead; + +/* + * List head for the file handle hash table. + */ +#define NQFHHASH(f) \ + (&nqfhhashtbl[(*((u_long *)(f))) & nqfhhash]) +extern LIST_HEAD(nqfhhashhead, nqlease) *nqfhhashtbl; +extern u_long nqfhhash; + +/* + * Nqnfs return status numbers. + */ +#define NQNFS_EXPIRED 500 +#define NQNFS_TRYLATER 501 + +#if defined(KERNEL) +void nqnfs_lease_check __P((struct vnode *, struct proc *, struct ucred *, int)); +void nqnfs_lease_updatetime __P((int)); +int nqsrv_getlease __P((struct vnode *, u_long *, int, + struct nfssvc_sock *, struct proc *, + struct mbuf *, int *, u_quad_t *, + struct ucred *)); +int nqnfs_getlease __P((struct vnode *,int,struct ucred *,struct proc *)); +int nqnfs_callback __P((struct nfsmount *,struct mbuf *,struct mbuf *,caddr_t)); +int nqnfs_clientd __P((struct nfsmount *,struct ucred *,struct nfsd_cargs *,int,caddr_t,struct proc *)); +struct nfsnode; +void nqnfs_clientlease __P((struct nfsmount *, struct nfsnode *, int, int, time_t, u_quad_t)); +void nqnfs_serverd __P((void)); +int nqnfsrv_getlease __P((struct nfsrv_descript *, struct nfssvc_sock *, struct proc *, struct mbuf **)); +int nqnfsrv_vacated __P((struct nfsrv_descript *, struct nfssvc_sock *, struct proc *, struct mbuf **)); +#endif + +#endif diff --git a/bsd/nfs/rpcv2.h b/bsd/nfs/rpcv2.h new file mode 100644 index 000000000..b1d28ff7e --- /dev/null +++ b/bsd/nfs/rpcv2.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)rpcv2.h 8.2 (Berkeley) 3/30/95 + * FreeBSD-Id: rpcv2.h,v 1.8 1997/05/11 18:05:39 tegge Exp $ + */ + + +#ifndef _NFS_RPCV2_H_ +#define _NFS_RPCV2_H_ + +/* + * Definitions for Sun RPC Version 2, from + * "RPC: Remote Procedure Call Protocol Specification" RFC1057 + */ + +/* Version # */ +#define RPC_VER2 2 + +/* Authentication */ +#define RPCAUTH_NULL 0 +#define RPCAUTH_UNIX 1 +#define RPCAUTH_SHORT 2 +#define RPCAUTH_KERB4 4 +#define RPCAUTH_NQNFS 300000 +#define RPCAUTH_MAXSIZ 400 +#define RPCVERF_MAXSIZ 12 /* For Kerb, can actually be 400 */ +#define RPCAUTH_UNIXGIDS 16 + +/* + * Constants associated with authentication flavours. + */ +#define RPCAKN_FULLNAME 0 +#define RPCAKN_NICKNAME 1 + +/* Rpc Constants */ +#define RPC_CALL 0 +#define RPC_REPLY 1 +#define RPC_MSGACCEPTED 0 +#define RPC_MSGDENIED 1 +#define RPC_PROGUNAVAIL 1 +#define RPC_PROGMISMATCH 2 +#define RPC_PROCUNAVAIL 3 +#define RPC_GARBAGE 4 /* I like this one */ +#define RPC_MISMATCH 0 +#define RPC_AUTHERR 1 + +/* Authentication failures */ +#define AUTH_BADCRED 1 +#define AUTH_REJECTCRED 2 +#define AUTH_BADVERF 3 +#define AUTH_REJECTVERF 4 +#define AUTH_TOOWEAK 5 /* Give em wheaties */ + +/* Sizes of rpc header parts */ +#define RPC_SIZ 24 +#define RPC_REPLYSIZ 28 + +/* RPC Prog definitions */ +#define RPCPROG_MNT 100005 +#define RPCMNT_VER1 1 +#define RPCMNT_VER3 3 +#define RPCMNT_MOUNT 1 +#define RPCMNT_DUMP 2 +#define RPCMNT_UMOUNT 3 +#define RPCMNT_UMNTALL 4 +#define RPCMNT_EXPORT 5 +#define RPCMNT_NAMELEN 255 +#define RPCMNT_PATHLEN 1024 +#define RPCPROG_NFS 100003 + +/* + * Structures used for RPCAUTH_KERB4. + */ +struct nfsrpc_fullverf { + u_long t1; + u_long t2; + u_long w2; +}; + +struct nfsrpc_fullblock { + u_long t1; + u_long t2; + u_long w1; + u_long w2; +}; + +struct nfsrpc_nickverf { + u_long kind; + struct nfsrpc_fullverf verf; +}; + +/* + * and their sizes in bytes.. If sizeof (struct nfsrpc_xx) != these + * constants, well then things will break in mount_nfs and nfsd. + */ +#define RPCX_FULLVERF 12 +#define RPCX_FULLBLOCK 16 +#define RPCX_NICKVERF 16 + +#if NFSKERB +XXX +#else +typedef u_char NFSKERBKEY_T[2]; +typedef u_char NFSKERBKEYSCHED_T[2]; +#endif +#define NFS_KERBSRV "rcmd" /* Kerberos Service for NFS */ +#define NFS_KERBTTL (30 * 60) /* Credential ttl (sec) */ +#define NFS_KERBCLOCKSKEW (5 * 60) /* Clock skew (sec) */ +#define NFS_KERBW1(t) (*((u_long *)(&((t).dat[((t).length + 3) & ~0x3])))) +#endif diff --git a/bsd/nfs/xdr_subs.h b/bsd/nfs/xdr_subs.h new file mode 100644 index 000000000..5f73c96d8 --- /dev/null +++ b/bsd/nfs/xdr_subs.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + * This code is derived from software contributed to Berkeley by + * Rick Macklem at The University of Guelph. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)xdr_subs.h 8.3 (Berkeley) 3/30/95 + * FreeBSD-Id: xdr_subs.h,v 1.9 1997/02/22 09:42:53 peter Exp $ + */ + + +#ifndef _NFS_XDR_SUBS_H_ +#define _NFS_XDR_SUBS_H_ + +/* + * Macros used for conversion to/from xdr representation by nfs... + * These use the MACHINE DEPENDENT routines ntohl, htonl + * As defined by "XDR: External Data Representation Standard" RFC1014 + * + * To simplify the implementation, we use ntohl/htonl even on big-endian + * machines, and count on them being `#define'd away. Some of these + * might be slightly more efficient as quad_t copies on a big-endian, + * but we cannot count on their alignment anyway. + */ + +#define fxdr_unsigned(t, v) ((t)ntohl((long)(v))) +#define txdr_unsigned(v) (htonl((long)(v))) + +#define fxdr_nfsv2time(f, t) { \ + (t)->tv_sec = ntohl(((struct nfsv2_time *)(f))->nfsv2_sec); \ + if (((struct nfsv2_time *)(f))->nfsv2_usec != 0xffffffff) \ + (t)->tv_nsec = 1000 * ntohl(((struct nfsv2_time *)(f))->nfsv2_usec); \ + else \ + (t)->tv_nsec = 0; \ +} +#define txdr_nfsv2time(f, t) { \ + ((struct nfsv2_time *)(t))->nfsv2_sec = htonl((f)->tv_sec); \ + if ((f)->tv_nsec != -1) \ + ((struct nfsv2_time *)(t))->nfsv2_usec = htonl((f)->tv_nsec / 1000); \ + else \ + ((struct nfsv2_time *)(t))->nfsv2_usec = 0xffffffff; \ +} + +#define fxdr_nfsv3time(f, t) { \ + (t)->tv_sec = ntohl(((struct nfsv3_time *)(f))->nfsv3_sec); \ + (t)->tv_nsec = ntohl(((struct nfsv3_time *)(f))->nfsv3_nsec); \ +} +#define txdr_nfsv3time(f, t) { \ + ((struct nfsv3_time *)(t))->nfsv3_sec = htonl((f)->tv_sec); \ + ((struct nfsv3_time *)(t))->nfsv3_nsec = htonl((f)->tv_nsec); \ +} + +#define fxdr_hyper(f, t) { \ + ((long *)(t))[_QUAD_HIGHWORD] = ntohl(((long *)(f))[0]); \ + ((long *)(t))[_QUAD_LOWWORD] = ntohl(((long *)(f))[1]); \ +} +#define txdr_hyper(f, t) { \ + ((long *)(t))[0] = htonl(((long *)(f))[_QUAD_HIGHWORD]); \ + ((long *)(t))[1] = htonl(((long *)(f))[_QUAD_LOWWORD]); \ +} + +#endif diff --git a/bsd/ppc/Makefile b/bsd/ppc/Makefile new file mode 100644 index 000000000..b4bd40dda --- /dev/null +++ b/bsd/ppc/Makefile @@ -0,0 +1,26 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + cpu.h disklabel.h endian.h exec.h label_t.h param.h profile.h \ + psl.h ptrace.h reboot.h reg.h setjmp.h signal.h spl.h \ + table.h types.h user.h vmparam.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = ppc + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = ppc + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/ppc/cpu.h b/bsd/ppc/cpu.h new file mode 100644 index 000000000..e7a2dc69e --- /dev/null +++ b/bsd/ppc/cpu.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * HISTORY + * + */ + +#ifndef _BSD_PPC_CPU_H_ +#define _BSD_PPC_CPU_H_ + +#define cpu_number() (0) + +#endif /* _BSD_PPC_CPU_H_ */ + diff --git a/bsd/ppc/disklabel.h b/bsd/ppc/disklabel.h new file mode 100644 index 000000000..f93d6b294 --- /dev/null +++ b/bsd/ppc/disklabel.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* +* + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + */ + +#ifndef _MACHINE_DISKLABEL_H_ +#define _MACHINE_DISKLABEL_H_ + +#define LABELSECTOR (1024 / DEV_BSIZE) /* sector containing label */ +#define LABELOFFSET 0 /* offset of label in sector */ +#define MAXPARTITIONS 8 /* number of partitions */ +#define RAW_PART 2 /* raw partition: xx?c */ + +/* Just a dummy */ +struct cpu_disklabel { + int cd_dummy; /* must have one element. */ +}; + +#endif /* _MACHINE_DISKLABEL_H_ */ diff --git a/bsd/ppc/endian.h b/bsd/ppc/endian.h new file mode 100644 index 000000000..ea3b815f5 --- /dev/null +++ b/bsd/ppc/endian.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 1995 NeXT Computer, Inc. All rights reserved. + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1987, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)endian.h 8.1 (Berkeley) 6/10/93 + */ + +#ifndef _PPC_ENDIAN_H_ +#define _PPC_ENDIAN_H_ + +/* + * Define the order of 32-bit words in 64-bit words. + */ +#define _QUAD_HIGHWORD 0 +#define _QUAD_LOWWORD 1 + +#if defined(KERNEL) || !defined(_POSIX_SOURCE) +/* + * Definitions for byte order, according to byte significance from low + * address to high. + */ +#define LITTLE_ENDIAN 1234 /* LSB first: i386, vax */ +#define BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net, ppc */ +#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */ + +#define BYTE_ORDER BIG_ENDIAN + +#include + +#ifndef __ASSEMBLER__ +__BEGIN_DECLS +unsigned long htonl __P((unsigned long)); +unsigned short htons __P((unsigned short)); +unsigned long ntohl __P((unsigned long)); +unsigned short ntohs __P((unsigned short)); +__END_DECLS +#endif /* __ASSEMBLER__ */ + +/* + * Macros for network/external number representation conversion. + */ +#if BYTE_ORDER == BIG_ENDIAN && !defined(lint) +#define ntohl(x) (x) +#define ntohs(x) (x) +#define htonl(x) (x) +#define htons(x) (x) + +#define NTOHL(x) (x) +#define NTOHS(x) (x) +#define HTONL(x) (x) +#define HTONS(x) (x) + +#else + +#include + +#define ntohl(x) NXSwapBigLongToHost(x) +#define ntohs(x) NXSwapBigShortToHost(x) +#define htonl(x) NXSwapHostLongToBig(x) +#define htons(x) NXSwapHostShortToBig(x) + +#define NTOHL(x) (x) = ntohl((u_long)x) +#define NTOHS(x) (x) = ntohs((u_short)x) +#define HTONL(x) (x) = htonl((u_long)x) +#define HTONS(x) (x) = htons((u_short)x) +#endif +#endif /* defined(KERNEL) || !defined(_POSIX_SOURCE) */ +#endif /* !_PPC_ENDIAN_H_ */ diff --git a/bsd/ppc/exec.h b/bsd/ppc/exec.h new file mode 100644 index 000000000..9db931975 --- /dev/null +++ b/bsd/ppc/exec.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1994, The University of Utah and + * the Center for Software Science at the University of Utah (CSS). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSS requests users of this software to return to css-dist@cs.utah.edu any + * improvements that they make and grant CSS redistribution rights. + * + */ + +/* Size of a page in an object file. */ +#define __LDPGSZ 4096 + +/* Valid magic number check. */ +#define N_BADMAG(ex) \ + ((ex).a_magic != NMAGIC && (ex).a_magic != OMAGIC && \ + (ex).a_magic != ZMAGIC) + +/* Address of the bottom of the text segment. */ +#define N_TXTADDR(X) 0 + +/* Address of the bottom of the data segment. */ +#define N_DATADDR(ex) \ + (N_TXTADDR(ex) + ((ex).a_magic == OMAGIC ? (ex).a_text \ + : __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) + +/* Text segment offset. */ +#define N_TXTOFF(ex) \ + ((ex).a_magic == ZMAGIC ? __LDPGSZ : sizeof(struct exec)) + +/* Data segment offset. */ +#define N_DATOFF(ex) \ + (N_TXTOFF(ex) + ((ex).a_magic != ZMAGIC ? (ex).a_text : \ + __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) + +/* Symbol table offset. */ +#define N_SYMOFF(ex) \ + (N_TXTOFF(ex) + (ex).a_text + (ex).a_data + (ex).a_trsize + \ + (ex).a_drsize) + +/* String table offset. */ +#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms) + +/* Description of the object file header (a.out format). */ +struct exec { +#define OMAGIC 0407 /* old impure format */ +#define NMAGIC 0410 /* read-only text */ +#define ZMAGIC 0413 /* demand load format */ +#define QMAGIC 0314 /* demand load format. Header in text. */ + unsigned int a_magic; /* magic number */ + + unsigned int a_text; /* text segment size */ + unsigned int a_data; /* initialized data size */ + unsigned int a_bss; /* uninitialized data size */ + unsigned int a_syms; /* symbol table size */ + unsigned int a_entry; /* entry point */ + unsigned int a_trsize; /* text relocation size */ + unsigned int a_drsize; /* data relocation size */ +}; + +/* + * Address of ps_strings structure (in user space). + */ +#define PS_STRINGS \ + ((struct ps_strings *)(USRSTACK - sizeof(struct ps_strings))) + diff --git a/bsd/ppc/label_t.h b/bsd/ppc/label_t.h new file mode 100644 index 000000000..54e9bbc93 --- /dev/null +++ b/bsd/ppc/label_t.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 NeXT Computer, Inc. + * + * PowerPC Family: For setjmp/longjmp (kernel version). + * + * HISTORY + * + */ + +#ifndef _BSD_PPC_LABEL_T_H_ +#define _BSD_PPC_LABEL_T_H_ + +typedef struct label_t { + int val[59]; +} label_t; + +#endif /* _BSD_PPC_LABEL_T_H_ */ diff --git a/bsd/ppc/param.h b/bsd/ppc/param.h new file mode 100644 index 000000000..b0fb62ded --- /dev/null +++ b/bsd/ppc/param.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1993,1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + */ + +#ifndef _PPC_PARAM_H_ +#define _PPC_PARAM_H_ + +/* + * Round p (pointer or byte index) up to a correctly-aligned value for all + * data types (int, long, ...). The result is u_int and must be cast to + * any desired pointer type. + */ +#define ALIGNBYTES 3 +#define ALIGN(p) (((u_int)(p) + ALIGNBYTES) &~ ALIGNBYTES) + +#define NBPG 4096 /* bytes/page */ +#define PGOFSET (NBPG-1) /* byte offset into page */ +#define PGSHIFT 12 /* LOG2(NBPG) */ + +#define NBSEG 0x40000000 /* bytes/segment (quadrant) */ +#define SEGOFSET (NBSEG-1) /* byte offset into segment */ +#define SEGSHIFT 30 /* LOG2(NBSEG) */ + +#define DEV_BSIZE 512 +#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ +#define BLKDEV_IOSIZE 2048 +#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */ + +#define STACK_GROWTH_UP 0 /* stack grows to lower addresses */ + +#define CLSIZE 1 +#define CLSIZELOG2 0 + +#define STACKSIZE 4 /* pages in kernel stack */ +#define UPAGES (USIZE+STACKSIZE) /* total pages in u-area */ + /* red zone is beyond this */ + +/* + * Constants related to network buffer management. + * MCLBYTES must be no larger than CLBYTES (the software page size), and, + * on machines that exchange pages of input or output buffers with mbuf + * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple + * of the hardware page size. + */ +#define MSIZE 256 /* size of an mbuf */ +#define MCLBYTES 2048 /* large enough for ether MTU */ +#define MCLSHIFT 11 +#define MCLOFSET (MCLBYTES - 1) +#ifndef NMBCLUSTERS +#if GATEWAY +#define NMBCLUSTERS ((1024 * 1024) / MCLBYTES) /* cl map size: 1MB */ +#else +#define NMBCLUSTERS ((1024 * 1024) / MCLBYTES) + /* cl map size was 0.5MB when MSIZE was 128, now it's 1MB*/ +#endif +#endif + +/* pages ("clicks") (NBPG bytes) to disk blocks */ +#define ctod(x) ((x)<<(PGSHIFT-DEV_BSHIFT)) +#define dtoc(x) ((x)>>(PGSHIFT-DEV_BSHIFT)) +#define dtob(x) ((x)<>PGSHIFT) +#ifdef __APPLE__ +#define btodb(bytes, devBlockSize) \ + ((unsigned)(bytes) / devBlockSize) +#define dbtob(db, devBlockSize) \ + ((unsigned)(db) * devBlockSize) +#else +#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ + ((unsigned)(bytes) >> DEV_BSHIFT) +#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ + ((unsigned)(db) << DEV_BSHIFT) +#endif + +/* + * Map a ``block device block'' to a file system block. + * This should be device dependent, and should use the bsize + * field from the disk label. + * For now though just use DEV_BSIZE. + */ +#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE)) + +/* from machdep/ppc/proc_reg.h */ +#if __BIG_ENDIAN__ +#define ENDIAN_MASK(val,size) (1 << (size-1 - val)) +#else +#error code not ported to little endian targets yet +#endif /* __BIG_ENDIAN__ */ + +#ifndef MASK +#define MASK(PART) ENDIAN_MASK(PART ## _BIT, 32) +#endif + +#define MSR_EE_BIT 16 +#define MSR_PR_BIT 17 +#define USERMODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE) +#define BASEPRI(msr) (msr & MASK(MSR_EE) ? TRUE : FALSE) +/* end of from proc_reg.h */ + +#if defined(KERNEL) || defined(STANDALONE) +#define DELAY(n) delay(n) +#else +#define DELAY(n) { register int N = (n); while (--N > 0); } +#endif /* defined(KERNEL) || defined(STANDALONE) */ + +#define NPIDS 16 /* maximum number of PIDs per process */ +#define NIOPIDS 8 /* maximum number of IO space PIDs */ + +#endif /* _PPC_PARAM_H_ */ diff --git a/bsd/ppc/profile.h b/bsd/ppc/profile.h new file mode 100644 index 000000000..a6f2b3ca1 --- /dev/null +++ b/bsd/ppc/profile.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997, Apple Computer, Inc. All rights reserved. + * + * History : + * 29-Sep-1997 Umesh Vaishampayan + * Created. + */ + +#ifndef _BSD_PPC_PROFILE_H_ +#define _BSD_PPC_PROFILE_H_ + +#ifdef KERNEL +/* + * Block interrupts during mcount so that those interrupts can also be + * counted (as soon as we get done with the current counting). On the + * PPC platfom, can't do splhigh/splx as those are C routines and can + * recursively invoke mcount. + */ +extern unsigned long disable_ee(); +extern void restore_ee(unsigned long smsr); + +#define MCOUNT_INIT register unsigned long smsr; + +#define MCOUNT_ENTER smsr = disable_ee(); + +#define MCOUNT_EXIT restore_ee(smsr); + +#endif /* KERNEL */ + +#endif /* _BSD_PPC_PROFILE_H_ */ diff --git a/bsd/ppc/psl.h b/bsd/ppc/psl.h new file mode 100644 index 000000000..488e97ecf --- /dev/null +++ b/bsd/ppc/psl.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * + * File: bsd/ppc/psl.h + * + */ + +#if KERNEL_PRIVATE + +#ifndef _BSD_PPC_PSL_H_ +#define _BSD_PPC_PSL_H_ + +/* empty */ + +#endif /* _BSD_PPC_PSL_H_ */ + +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/ppc/ptrace.h b/bsd/ppc/ptrace.h new file mode 100644 index 000000000..6fd1d3ed8 --- /dev/null +++ b/bsd/ppc/ptrace.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ptrace.h 8.1 (Berkeley) 6/11/93 + */ + +/* + * Machine dependent trace commands. + * + * None for the PowerPC at this time. + */ diff --git a/bsd/ppc/reboot.h b/bsd/ppc/reboot.h new file mode 100644 index 000000000..839e80100 --- /dev/null +++ b/bsd/ppc/reboot.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: next/reboot.h + * Author: Avadis Tevanian, Jr. + * + * NeXT specific reboot flags. + * + * HISTORY + * 28-Feb-90 John Seamons (jks) at NeXT + * Added RB_COMMAND flag that allows a specific reboot command to be used. + * + * 06-Jul-88 Avadis Tevanian (avie) at NeXT, Inc. + * Created. + */ + +#ifndef _BSD_PPC_REBOOT_H_ +#define _BSD_PPC_REBOOT_H_ + +/* + * Empty file (publicly) + */ +#ifdef KERNEL +/* + * Use most significant 16 bits to avoid collisions with + * machine independent flags. + */ +#define RB_POWERDOWN 0x00010000 /* power down on halt */ +#define RB_NOBOOTRC 0x00020000 /* don't run '/etc/rc.boot' */ +#define RB_DEBUG 0x00040000 /* drop into mini monitor on panic */ +#define RB_EJECT 0x00080000 /* eject disks on halt */ +#define RB_COMMAND 0x00100000 /* new boot command specified */ +#define RB_NOFP 0x00200000 /* don't use floating point */ +#define RB_BOOTNEXT 0x00400000 /* reboot into NeXT */ +#define RB_BOOTDOS 0x00800000 /* reboot into DOS */ + +#endif /* KERNEL */ +#endif /* _BSD_PPC_REBOOT_H_ */ diff --git a/bsd/ppc/reg.h b/bsd/ppc/reg.h new file mode 100644 index 000000000..c33c3606d --- /dev/null +++ b/bsd/ppc/reg.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1993, NeXT Computer, Inc. + */ + +#ifndef _BSD_PPC_REG_H_ +#define _BSD_PPC_REG_H_ + +#ifdef KERNEL_PRIVATE + +/* Index into the thread_state */ +#define SP 3 +#define PC 0 + +#endif /* KERNEL_PRIVATE */ +#endif /* _BSD_PPC_REG_H_ */ diff --git a/bsd/ppc/setjmp.h b/bsd/ppc/setjmp.h new file mode 100644 index 000000000..08b834ac4 --- /dev/null +++ b/bsd/ppc/setjmp.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * File: ppc/setjmp.h + * + * Declaration of setjmp routines and data structures. + */ +#ifndef _BSD_PPC_SETJMP_H_ +#define _BSD_PPC_SETJMP_H_ + +#include +#include + +struct _jmp_buf { + struct sigcontext sigcontext; /* kernel state preserved by set/longjmp */ + unsigned long vmask __attribute__((aligned(8))); /* vector mask register */ + unsigned long vreg[32 * 4] __attribute__((aligned(16))); + /* 32 128-bit vector registers */ +}; + +/* + * _JBLEN is number of ints required to save the following: + * r1, r2, r13-r31, lr, cr, ctr, xer, sig == 26 ints + * fr14 - fr31 = 18 doubles = 36 ints + * vmask, 32 vector registers = 129 ints + * 2 ints to get all the elements aligned + */ + +#define _JBLEN (26 + 36 + 129 + 1) + +#if defined(KERNEL) +typedef struct sigcontext jmp_buf[1]; +typedef struct __sigjmp_buf { + int __storage[_JBLEN + 1] __attribute__((aligned(8))); + } sigjmp_buf[1]; +#else +typedef int jmp_buf[_JBLEN]; +typedef int sigjmp_buf[_JBLEN + 1]; +#endif + +__BEGIN_DECLS +extern int setjmp __P((jmp_buf env)); +extern void longjmp __P((jmp_buf env, int val)); + +#ifndef _ANSI_SOURCE +int sigsetjmp __P((sigjmp_buf env, int val)); +void siglongjmp __P((sigjmp_buf env, int val)); +#endif /* _ANSI_SOURCE */ + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +int _setjmp __P((jmp_buf env)); +void _longjmp __P((jmp_buf, int val)); +void longjmperror __P((void)); +#endif /* neither ANSI nor POSIX */ +__END_DECLS + +#endif /* !_BSD_PPC_SETJMP_H_ */ diff --git a/bsd/ppc/signal.h b/bsd/ppc/signal.h new file mode 100644 index 000000000..19c940513 --- /dev/null +++ b/bsd/ppc/signal.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 NeXT Computer, Inc. + * + * HISTORY + * + * Machine specific signal information. + * + * HISTORY + * 25-MAR-97 Umesh Vaishampayan (umeshv@NeXT.com) + * Ported from m98k and hppa. + * + * 13-Jan-92 Peter King (king) at NeXT Computer, Inc. + * Filled out struct sigcontext to hold all registers. + * Added regs_saved_t to specify which regs stored in the + * sigcontext are valid. + * + * 09-Nov-92 Ben Fathi (benf) at NeXT, Inc. + * Ported to m98k. + * + * 09-May-91 Mike DeMoney (mike) at NeXT, Inc. + * Ported to m88k. + */ + +#ifndef _PPC_SIGNAL_ +#define _PPC_SIGNAL_ 1 + +typedef int sig_atomic_t; + +/* + * Machine-dependant flags used in sigvec call. + */ +#define SV_SAVE_REGS 0x1000 /* Save all regs in sigcontext */ + +/* + * regs_saved_t -- Describes which registers beyond what the kernel cares + * about are saved to and restored from this sigcontext. + * + * The default is REGS_SAVED_CALLER, only the caller saved registers + * are saved. If the SV_SAVE_REGS flag was set when the signal + * handler was registered with sigvec() then all the registers will be + * saved in the sigcontext, and REGS_SAVED_ALL will be set. The C + * library uses REGS_SAVED_NONE in order to quickly restore kernel + * state during a longjmp(). + */ +typedef enum { + REGS_SAVED_NONE, /* Only kernel managed regs restored */ + REGS_SAVED_CALLER, /* "Caller saved" regs: rpc, a0-a7, + t0-t4, at, lk0-lk1, xt1-xt20, + xr0-xr1 */ + REGS_SAVED_ALL /* All registers */ +} regs_saved_t; + + +/* + * Information pushed on stack when a signal is delivered. + * This is used by the kernel to restore state following + * execution of the signal handler. It is also made available + * to the handler to allow it to properly restore state if + * a non-standard exit is performed. + */ +struct sigcontext { + int sc_onstack; /* sigstack state to restore */ + int sc_mask; /* signal mask to restore */ + int sc_ir; /* pc */ + int sc_psw; /* processor status word */ + int sc_sp; /* stack pointer if sc_regs == NULL */ + void *sc_regs; /* (kernel private) saved state */ +}; + +#endif /* _PPC_SIGNAL_ */ + diff --git a/bsd/ppc/spl.h b/bsd/ppc/spl.h new file mode 100644 index 000000000..4284f241b --- /dev/null +++ b/bsd/ppc/spl.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_PPC_SPL_H_ +#define _BSD_PPC_SPL_H_ + +#ifdef KERNEL +#ifndef __ASSEMBLER__ +/* + * Machine-dependent SPL definitions. + * + */ +typedef unsigned spl_t; + +extern unsigned sploff(void); +extern unsigned splhigh(void); +extern unsigned splsched(void); +extern unsigned splclock(void); +extern unsigned splpower(void); +extern unsigned splvm(void); +extern unsigned splbio(void); +extern unsigned splimp(void); +extern unsigned spltty(void); +extern unsigned splnet(void); +extern unsigned splsoftclock(void); + +extern void spllo(void); +extern void splon(unsigned level); +extern void splx(unsigned level); +extern void spln(unsigned level); +#define splstatclock() splhigh() + +#endif /* __ASSEMBLER__ */ + +#endif + +#endif /* _BSD_PPC_SPL_H_ */ diff --git a/bsd/ppc/table.h b/bsd/ppc/table.h new file mode 100644 index 000000000..0d0533d90 --- /dev/null +++ b/bsd/ppc/table.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989 Next, Inc. + */ + +#ifndef _BSD_PPC_TABLE_H_ +#define _BSD_PPC_TABLE_H_ + +/* + * No machine dependent table calls for ppc. + */ + +#endif /* _BSD_PPC_TABLE_H_ */ diff --git a/bsd/ppc/types.h b/bsd/ppc/types.h new file mode 100644 index 000000000..538ded1de --- /dev/null +++ b/bsd/ppc/types.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1995 NeXT Computer, Inc. All rights reserved. + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)types.h 8.3 (Berkeley) 1/5/94 + */ + +#ifndef _MACHTYPES_H_ +#define _MACHTYPES_H_ + +#ifndef __ASSEMBLER__ +#include +/* + * Basic integral types. Omit the typedef if + * not possible for a machine/compiler combination. + */ +typedef __signed char int8_t; +typedef unsigned char u_int8_t; +typedef short int16_t; +typedef unsigned short u_int16_t; +typedef int int32_t; +typedef unsigned int u_int32_t; +typedef long long int64_t; +typedef unsigned long long u_int64_t; + +typedef int32_t register_t; + + +typedef int *intptr_t; +typedef unsigned long *uintptr_t; + +#endif /* __ASSEMBLER__ */ +#endif /* _MACHTYPES_H_ */ diff --git a/bsd/ppc/user.h b/bsd/ppc/user.h new file mode 100644 index 000000000..6a7651a71 --- /dev/null +++ b/bsd/ppc/user.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1989, NeXT, Inc. + * + * bsd/ppc/user.h + * + * We can use the default definition of u, so this file is empty. + */ + +#warning ---- Empty bsd/ppc/user.h ---- diff --git a/bsd/ppc/vmparam.h b/bsd/ppc/vmparam.h new file mode 100644 index 000000000..ce5c25c4e --- /dev/null +++ b/bsd/ppc/vmparam.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * HISTORY + * 05-Mar-89 Avadis Tevanian, Jr. (avie) at NeXT + * Make MAXDSIZ infinity. + * + * 12-Aug-87 John Seamons (jks) at NeXT + * Ported to NeXT. + */ + +#ifndef _BSD_PPC_VMPARAM_H_ +#define _BSD_PPC_VMPARAM_H_ 1 + +#include + +#define USRSTACK 0xc0000000 + +/* + * Virtual memory related constants, all in bytes + */ +#ifndef DFLDSIZ +#define DFLDSIZ (6*1024*1024) /* initial data size limit */ +#endif +#ifndef MAXDSIZ +#define MAXDSIZ (RLIM_INFINITY) /* max data size */ +#endif +#ifndef DFLSSIZ +#define DFLSSIZ (512*1024) /* initial stack size limit */ +#endif +#ifndef MAXSSIZ +#define MAXSSIZ (64*1024*1024) /* max stack size */ +#endif +#ifndef DFLCSIZ +#define DFLCSIZ (0) /* initial core size limit */ +#endif +#ifndef MAXCSIZ +#define MAXCSIZ (RLIM_INFINITY) /* max core size */ +#endif + +#endif /* _BSD_PPC_VMPARAM_H_ */ diff --git a/bsd/sys/Makefile b/bsd/sys/Makefile new file mode 100644 index 000000000..4853cddfa --- /dev/null +++ b/bsd/sys/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + acct.h attr.h buf.h callout.h cdefs.h cdio.h chio.h clist.h conf.h \ + dir.h dirent.h disklabel.h disktab.h dkbad.h dkstat.h dmap.h domain.h \ + errno.h ev.h exec.h fcntl.h file.h filedesc.h filio.h gmon.h ioccom.h ioctl.h \ + ioctl_compat.h ipc.h kernel.h kern_event.h ktrace.h loadable_fs.h lock.h lockf.h mach_swapon.h malloc.h \ + kdebug.h linker_set.h md5.h\ + mbuf.h mman.h mount.h msgbuf.h mtio.h namei.h netport.h param.h paths.h \ + proc.h protosw.h ptrace.h queue.h reboot.h resource.h resourcevar.h \ + select.h semaphore.h shm.h signal.h signalvar.h socket.h socketvar.h sockio.h stat.h \ + syscall.h sysctl.h syslimits.h syslog.h systm.h termios.h time.h \ + timeb.h times.h tprintf.h trace.h tty.h ttychars.h ttycom.h \ + ttydefaults.h ttydev.h types.h ubc.h ucred.h uio.h un.h unistd.h unpcb.h \ + user.h utfconv.h utsname.h ux_exception.h vadvise.h vcmd.h version.h vlimit.h \ + vm.h vmmeter.h vmparam.h vnode.h vnode_if.h vstat.h wait.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = sys + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = sys + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/sys/acct.h b/bsd/sys/acct.h new file mode 100644 index 000000000..2470ca277 --- /dev/null +++ b/bsd/sys/acct.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)acct.h 8.4 (Berkeley) 1/9/95 + */ +#ifndef _SYS_ACCT_H_ +#define _SYS_ACCT_H_ + +/* + * Accounting structures; these use a comp_t type which is a 3 bits base 8 + * exponent, 13 bit fraction ``floating point'' number. Units are 1/AHZ + * seconds. + */ +typedef u_int16_t comp_t; + +struct acct { + char ac_comm[10]; /* command name */ + comp_t ac_utime; /* user time */ + comp_t ac_stime; /* system time */ + comp_t ac_etime; /* elapsed time */ + time_t ac_btime; /* starting time */ + uid_t ac_uid; /* user id */ + gid_t ac_gid; /* group id */ + u_int16_t ac_mem; /* average memory usage */ + comp_t ac_io; /* count of IO blocks */ + dev_t ac_tty; /* controlling tty */ + +#define AFORK 0x01 /* fork'd but not exec'd */ +#define ASU 0x02 /* used super-user permissions */ +#define ACOMPAT 0x04 /* used compatibility mode */ +#define ACORE 0x08 /* dumped core */ +#define AXSIG 0x10 /* killed by a signal */ + u_int8_t ac_flag; /* accounting flags */ +}; + +/* + * 1/AHZ is the granularity of the data encoded in the comp_t fields. + * This is not necessarily equal to hz. + */ +#define AHZ 64 + +#ifdef KERNEL +extern struct vnode *acctp; + +int acct_process __P((struct proc *p)); +#endif + +#endif /* ! _SYS_ACCT_H_ */ diff --git a/bsd/sys/attr.h b/bsd/sys/attr.h new file mode 100644 index 000000000..96881d20c --- /dev/null +++ b/bsd/sys/attr.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * attr.h - attribute data structures and interfaces + * + * Copyright (c) 1998, Apple Computer, Inc. All Rights Reserved. + */ + +#ifndef _SYS_ATTR_H_ +#define _SYS_ATTR_H_ + +#ifndef _SYS_TYPES_H_ +#include +#endif +#ifndef _SYS_UCRED_H +#include +#endif +#ifndef _SYS_MOUNT_H_ +#include +#endif +#ifndef _SYS_TIME_H_ +#include +#endif + +#define FSOPT_NOFOLLOW 0x00000001 +#define FSOPT_NOINMEMUPDATE 0x00000002 + +typedef u_int32_t text_encoding_t; + +typedef u_int32_t fsobj_type_t; + +typedef u_int32_t fsobj_tag_t; + +typedef u_int32_t fsfile_type_t; + +typedef u_int32_t fsvolid_t; + +typedef struct fsobj_id { + u_int32_t fid_objno; + u_int32_t fid_generation; +} fsobj_id_t; + +typedef u_int32_t attrgroup_t; + +struct attrlist { + u_short bitmapcount; /* number of attr. bit sets in list (should be 5) */ + u_int16_t reserved; /* (to maintain 4-byte alignment) */ + attrgroup_t commonattr; /* common attribute group */ + attrgroup_t volattr; /* Volume attribute group */ + attrgroup_t dirattr; /* directory attribute group */ + attrgroup_t fileattr; /* file attribute group */ + attrgroup_t forkattr; /* fork attribute group */ +}; +#define ATTR_BIT_MAP_COUNT 5 + +typedef struct attribute_set { + attrgroup_t commonattr; /* common attribute group */ + attrgroup_t volattr; /* Volume attribute group */ + attrgroup_t dirattr; /* directory attribute group */ + attrgroup_t fileattr; /* file attribute group */ + attrgroup_t forkattr; /* fork attribute group */ +} attribute_set_t; + +typedef struct attrreference { + long attr_dataoffset; + size_t attr_length; +} attrreference_t; + +/* XXX PPD This is derived from HFSVolumePriv.h and should perhaps be referenced from there? */ + +struct diskextent { + u_int32_t startblock; /* first block allocated */ + u_int32_t blockcount; /* number of blocks allocated */ +}; + +typedef struct diskextent extentrecord[8]; + +typedef u_int32_t vol_capabilities_set_t[4]; + +#define VOL_CAPABILITIES_FORMAT 0 +#define VOL_CAPABILITIES_INTERFACES 1 +#define VOL_CAPABILITIES_RESERVED1 2 +#define VOL_CAPABILITIES_RESERVED2 3 + +typedef struct vol_capabilities_attr { + vol_capabilities_set_t capabilities; + vol_capabilities_set_t valid; +} vol_capabilities_attr_t; + +#define VOL_CAP_FMT_PERSISTENTOBJECTIDS 0x00000001 +#define VOL_CAP_FMT_SYMBOLICLINKS 0x00000002 +#define VOL_CAP_FMT_HARDLINKS 0x00000004 + +#define VOL_CAP_INT_SEARCHFS 0x00000001 +#define VOL_CAP_INT_ATTRLIST 0x00000002 +#define VOL_CAP_INT_NFSEXPORT 0x00000004 + +typedef struct vol_attributes_attr { + attribute_set_t validattr; + attribute_set_t nativeattr; +} vol_attributes_attr_t; + +#define DIR_MNTSTATUS_MNTPOINT 0x00000001 + +#define ATTR_CMN_NAME 0x00000001 +#define ATTR_CMN_DEVID 0x00000002 +#define ATTR_CMN_FSID 0x00000004 +#define ATTR_CMN_OBJTYPE 0x00000008 +#define ATTR_CMN_OBJTAG 0x00000010 +#define ATTR_CMN_OBJID 0x00000020 +#define ATTR_CMN_OBJPERMANENTID 0x00000040 +#define ATTR_CMN_PAROBJID 0x00000080 +#define ATTR_CMN_SCRIPT 0x00000100 +#define ATTR_CMN_CRTIME 0x00000200 +#define ATTR_CMN_MODTIME 0x00000400 +#define ATTR_CMN_CHGTIME 0x00000800 +#define ATTR_CMN_ACCTIME 0x00001000 +#define ATTR_CMN_BKUPTIME 0x00002000 +#define ATTR_CMN_FNDRINFO 0x00004000 +#define ATTR_CMN_OWNERID 0x00008000 +#define ATTR_CMN_GRPID 0x00010000 +#define ATTR_CMN_ACCESSMASK 0x00020000 +#define ATTR_CMN_FLAGS 0x00040000 +#define ATTR_CMN_NAMEDATTRCOUNT 0x00080000 +#define ATTR_CMN_NAMEDATTRLIST 0x00100000 +#define ATTR_CMN_USERACCESS 0x00200000 + +#define ATTR_CMN_VALIDMASK 0x003FFFFF +#define ATTR_CMN_SETMASK 0x0007FF00 +#define ATTR_CMN_VOLSETMASK 0x00006700 + +#define ATTR_VOL_FSTYPE 0x00000001 +#define ATTR_VOL_SIGNATURE 0x00000002 +#define ATTR_VOL_SIZE 0x00000004 +#define ATTR_VOL_SPACEFREE 0x00000008 +#define ATTR_VOL_SPACEAVAIL 0x00000010 +#define ATTR_VOL_MINALLOCATION 0x00000020 +#define ATTR_VOL_ALLOCATIONCLUMP 0x00000040 +#define ATTR_VOL_IOBLOCKSIZE 0x00000080 +#define ATTR_VOL_OBJCOUNT 0x00000100 +#define ATTR_VOL_FILECOUNT 0x00000200 +#define ATTR_VOL_DIRCOUNT 0x00000400 +#define ATTR_VOL_MAXOBJCOUNT 0x00000800 +#define ATTR_VOL_MOUNTPOINT 0x00001000 +#define ATTR_VOL_NAME 0x00002000 +#define ATTR_VOL_MOUNTFLAGS 0x00004000 +#define ATTR_VOL_MOUNTEDDEVICE 0x00008000 +#define ATTR_VOL_ENCODINGSUSED 0x00010000 +#define ATTR_VOL_CAPABILITIES 0x00020000 +#define ATTR_VOL_ATTRIBUTES 0x40000000 +#define ATTR_VOL_INFO 0x80000000 + +#define ATTR_VOL_VALIDMASK 0xC003FFFF +#define ATTR_VOL_SETMASK 0x80002000 + + +/* File/directory attributes: */ +#define ATTR_DIR_LINKCOUNT 0x00000001 +#define ATTR_DIR_ENTRYCOUNT 0x00000002 +#define ATTR_DIR_MOUNTSTATUS 0x00000004 + +#define ATTR_DIR_VALIDMASK 0x00000007 +#define ATTR_DIR_SETMASK 0x00000000 + +#define ATTR_FILE_LINKCOUNT 0x00000001 +#define ATTR_FILE_TOTALSIZE 0x00000002 +#define ATTR_FILE_ALLOCSIZE 0x00000004 +#define ATTR_FILE_IOBLOCKSIZE 0x00000008 +#define ATTR_FILE_CLUMPSIZE 0x00000010 +#define ATTR_FILE_DEVTYPE 0x00000020 +#define ATTR_FILE_FILETYPE 0x00000040 +#define ATTR_FILE_FORKCOUNT 0x00000080 +#define ATTR_FILE_FORKLIST 0x00000100 +#define ATTR_FILE_DATALENGTH 0x00000200 +#define ATTR_FILE_DATAALLOCSIZE 0x00000400 +#define ATTR_FILE_DATAEXTENTS 0x00000800 +#define ATTR_FILE_RSRCLENGTH 0x00001000 +#define ATTR_FILE_RSRCALLOCSIZE 0x00002000 +#define ATTR_FILE_RSRCEXTENTS 0x00004000 + +#define ATTR_FILE_VALIDMASK 0x00007FFF +#define ATTR_FILE_SETMASK 0x00000020 + +#define ATTR_FORK_TOTALSIZE 0x00000001 +#define ATTR_FORK_ALLOCSIZE 0x00000002 + +#define ATTR_FORK_VALIDMASK 0x00000003 +#define ATTR_FORK_SETMASK 0x00000000 + +#define SRCHFS_START 0x00000001 +#define SRCHFS_MATCHPARTIALNAMES 0x00000002 +#define SRCHFS_MATCHDIRS 0x00000004 +#define SRCHFS_MATCHFILES 0x00000008 +#define SRCHFS_NEGATEPARAMS 0x80000000 +#define SRCHFS_VALIDOPTIONSMASK 0x8000000F + +struct fssearchblock { + struct attrlist *returnattrs; + void *returnbuffer; + size_t returnbuffersize; + u_long maxmatches; + struct timeval timelimit; + void *searchparams1; + size_t sizeofsearchparams1; + void *searchparams2; + size_t sizeofsearchparams2; + struct attrlist searchattrs; +}; + + +struct searchstate { + u_char reserved[556]; // sizeof( SearchState ) +}; + + + +#define FST_EOF (-1) /* end-of-file offset */ + +__BEGIN_DECLS +/* XXX PPD This should be moved to dirent.h to be with getdirentries(2) et al. */ +//int getdirentriesattr(int fd, const struct attrlist *attrspec, void *attrbuf, size_t bufsize, long //*cookie); + +__END_DECLS + +#endif /* !_SYS_ATTR_H_ */ diff --git a/bsd/sys/buf.h b/bsd/sys/buf.h new file mode 100644 index 000000000..25a87d6cf --- /dev/null +++ b/bsd/sys/buf.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)buf.h 8.9 (Berkeley) 3/30/95 + */ + +#ifndef _SYS_BUF_H_ +#define _SYS_BUF_H_ +#include +#include + +#include + +#define NOLIST ((struct buf *)0x87654321) + +#include + +/* + * The buffer header describes an I/O operation in the kernel. + */ +struct buf { + LIST_ENTRY(buf) b_hash; /* Hash chain. */ + LIST_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */ + TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */ + struct proc *b_proc; /* Associated proc; NULL if kernel. */ + volatile long b_flags; /* B_* flags. */ + int b_error; /* Errno value. */ + long b_bufsize; /* Allocated buffer size. */ + long b_bcount; /* Valid bytes in buffer. */ + long b_resid; /* Remaining I/O. */ + dev_t b_dev; /* Device associated with buffer. */ + struct { + caddr_t b_addr; /* Memory, superblocks, indirect etc.*/ + } b_un; + void *b_saveaddr; /* Original b_addr for physio. */ + daddr_t b_lblkno; /* Logical block number. */ + daddr_t b_blkno; /* Underlying physical block number. */ + /* Function to call upon completion. */ + void (*b_iodone) __P((struct buf *)); + struct vnode *b_vp; /* Device vnode. */ + int b_dirtyoff; /* Offset in buffer of dirty region. */ + int b_dirtyend; /* Offset of end of dirty region. */ + int b_validoff; /* Offset in buffer of valid region. */ + int b_validend; /* Offset of end of valid region. */ + struct ucred *b_rcred; /* Read credentials reference. */ + struct ucred *b_wcred; /* Write credentials reference. */ + int b_timestamp; /* timestamp for queuing operation */ + long b_vectorcount; /* number of vectors in b_vectorlist */ + void *b_vectorlist; /* vector list for I/O */ + void *b_pagelist; /* to save pagelist info */ + long b_vects[2]; /* vectorlist when b_vectorcount is 1 */ + long b_whichq; /* the free list the buffer belongs to */ + TAILQ_ENTRY(buf) b_act; /* Device driver queue when active */ + void *b_drvdata; /* Device driver private use */ +}; + +/* + * For portability with historic industry practice, the cylinder number has + * to be maintained in the `b_resid' field. + */ +#define b_cylinder b_resid /* Cylinder number for disksort(). */ + +/* Device driver compatibility definitions. */ +#define b_active b_bcount /* Driver queue head: drive active. */ +#define b_data b_un.b_addr /* b_un.b_addr is not changeable. */ +#define b_errcnt b_resid /* Retry count while I/O in progress. */ +#define iodone biodone /* Old name for biodone. */ +#define iowait biowait /* Old name for biowait. */ + +/* cluster_io definitions for use with io bufs */ +#define b_uploffset b_bufsize +#define b_trans_head b_freelist.tqe_prev +#define b_trans_next b_freelist.tqe_next +#define b_real_bp b_saveaddr + +/* + * These flags are kept in b_flags. + */ +#define B_AGE 0x00000001 /* Move to age queue when I/O done. */ +#define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */ +#define B_ASYNC 0x00000004 /* Start I/O, do not wait. */ +#define B_BAD 0x00000008 /* Bad block revectoring in progress. */ +#define B_BUSY 0x00000010 /* I/O in progress. */ +#define B_CACHE 0x00000020 /* Bread found us in the cache. */ +#define B_CALL 0x00000040 /* Call b_iodone from biodone. */ +#define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */ +#define B_DIRTY 0x00000100 /* Dirty page to be pushed out async. */ +#define B_DONE 0x00000200 /* I/O completed. */ +#define B_EINTR 0x00000400 /* I/O was interrupted */ +#define B_ERROR 0x00000800 /* I/O error occurred. */ +#define B_WASDIRTY 0x00001000 /* page was found dirty in the VM cache */ +#define B_INVAL 0x00002000 /* Does not contain valid info. */ +#define B_LOCKED 0x00004000 /* Locked in core (not reusable). */ +#define B_NOCACHE 0x00008000 /* Do not cache block after use. */ +#define B_PAGEOUT 0x00010000 /* Page out indicator... */ +#define B_PGIN 0x00020000 /* Pagein op, so swap() can count it. */ +#define B_PHYS 0x00040000 /* I/O to user memory. */ +#define B_RAW 0x00080000 /* Set by physio for raw transfers. */ +#define B_READ 0x00100000 /* Read buffer. */ +#define B_TAPE 0x00200000 /* Magnetic tape I/O. */ +#define B_PAGELIST 0x00400000 /* Buffer describes pagelist I/O. */ +#define B_WANTED 0x00800000 /* Process wants this buffer. */ +#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */ +#define B_WRITEINPROG 0x01000000 /* Write in progress. */ +#define B_UNUSED0 0x02000000 /* Unused bit */ +#define B_UNUSED1 0x04000000 /* Unused bit */ +#define B_NEED_IODONE 0x08000000 + /* need to do a biodone on the */ + /* real_bp associated with a cluster_io */ +#define B_COMMIT_UPL 0x10000000 + /* commit pages in upl when */ + /* I/O completes/fails */ +#define B_ZALLOC 0x20000000 /* b_data is zalloc()ed */ +#define B_META 0x40000000 /* buffer contains meta-data. */ +#define B_VECTORLIST 0x80000000 /* Used by device drivers. */ + + +/* + * Zero out the buffer's data area. + */ +#define clrbuf(bp) { \ + bzero((bp)->b_data, (u_int)(bp)->b_bcount); \ + (bp)->b_resid = 0; \ +} + +/* Flags to low-level allocation routines. */ +#define B_CLRBUF 0x01 /* Request allocated buffer be cleared. */ +#define B_SYNC 0x02 /* Do all allocations synchronously. */ +#define B_NOBUFF 0x04 /* Do not allocate struct buf */ + +/* Flags for operation type in getblk() */ +#define BLK_READ 0x01 /* buffer for read */ +#define BLK_WRITE 0x02 /* buffer for write */ +#define BLK_PAGEIN 0x04 /* buffer for pagein */ +#define BLK_PAGEOUT 0x08 /* buffer for pageout */ +#define BLK_META 0x10 /* buffer for metadata */ +#define BLK_CLREAD 0x20 /* buffer for cluster read */ +#define BLK_CLWRITE 0x40 /* buffer for cluster write */ + +#ifdef KERNEL +extern int nbuf; /* The number of buffer headers */ +extern struct buf *buf; /* The buffer headers. */ + +/* Macros to clear/set/test flags. */ +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) + +/* + * Definitions for the buffer free lists. + */ +#define BQUEUES 5 /* number of free buffer queues */ + +#define BQ_LOCKED 0 /* super-blocks &c */ +#define BQ_LRU 1 /* lru, useful buffers */ +#define BQ_AGE 2 /* rubbish */ +#define BQ_EMPTY 3 /* buffer headers with no memory */ +#define BQ_META 4 /* buffer containing metadata */ + +__BEGIN_DECLS +int allocbuf __P((struct buf *, int)); +void bawrite __P((struct buf *)); +void bdwrite __P((struct buf *)); +void biodone __P((struct buf *)); +int biowait __P((struct buf *)); +int bread __P((struct vnode *, daddr_t, int, + struct ucred *, struct buf **)); +int meta_bread __P((struct vnode *, daddr_t, int, + struct ucred *, struct buf **)); +int breada __P((struct vnode *, daddr_t, int, daddr_t, int, + struct ucred *, struct buf **)); +int breadn __P((struct vnode *, daddr_t, int, daddr_t *, int *, int, + struct ucred *, struct buf **)); +void brelse __P((struct buf *)); +void bremfree __P((struct buf *)); +void bufinit __P((void)); +int bwrite __P((struct buf *)); +struct buf *getblk __P((struct vnode *, daddr_t, int, int, int, int)); +struct buf *geteblk __P((int)); +struct buf *incore __P((struct vnode *, daddr_t)); +u_int minphys __P((struct buf *bp)); +int physio __P((void (*)(struct buf *), struct buf *, dev_t, int , u_int (*)(struct buf *), struct uio *, int )); +int count_busy_buffers __P((void)); +struct buf *alloc_io_buf __P((struct vnode *vp)); +void free_io_buf __P((struct buf *bp)); +__END_DECLS + +/* + * Stats on usefulness of the buffer cache + */ +struct bufstats { + long bufs_incore; /* found incore */ + long bufs_busyincore; /* found incore. was busy */ + long bufs_vmhits; /* not incore. found in VM */ + long bufs_miss; /* not incore. not in VM */ + long bufs_sleeps; /* buffer starvation */ + long bufs_eblk; /* Calls to geteblk */ + long bufs_iobufmax; /* Max. number of IO buffers used */ + long bufs_iobufinuse; /* number of IO buffers in use */ + long bufs_iobufsleeps; /* IO buffer starvation */ +}; + +#endif /* KERNEL */ +#endif /* !_SYS_BUF_H_ */ diff --git a/bsd/sys/callout.h b/bsd/sys/callout.h new file mode 100644 index 000000000..c3faa8f79 --- /dev/null +++ b/bsd/sys/callout.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)callout.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_CALLOUT_H_ +#define _SYS_CALLOUT_H_ + +#include + + + +#define CALLOUT_PRI_SOFTINT0 0 +#define CALLOUT_PRI_SOFTINT1 1 +#define CALLOUT_PRI_RETRACE 2 +#define CALLOUT_PRI_DSP 3 +#define CALLOUT_PRI_THREAD 4 /* run in a thread */ +#define CALLOUT_PRI_NOW 5 /* must be last */ +#define N_CALLOUT_PRI 6 + + +#endif /* _SYS_CALLOUT_H_ */ diff --git a/bsd/sys/cdefs.h b/bsd/sys/cdefs.h new file mode 100644 index 000000000..5622154f3 --- /dev/null +++ b/bsd/sys/cdefs.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright 1995 NeXT Computer, Inc. All rights reserved. */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Berkeley Software Design, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cdefs.h 8.8 (Berkeley) 1/9/95 + */ + +#ifndef _CDEFS_H_ +#define _CDEFS_H_ + +#if defined(__cplusplus) +#define __BEGIN_DECLS extern "C" { +#define __END_DECLS } +#else +#define __BEGIN_DECLS +#define __END_DECLS +#endif + +/* + * The __CONCAT macro is used to concatenate parts of symbol names, e.g. + * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo. + * The __CONCAT macro is a bit tricky -- make sure you don't put spaces + * in between its arguments. __CONCAT can also concatenate double-quoted + * strings produced by the __STRING macro, but this only works with ANSI C. + */ +#if defined(__STDC__) || defined(__cplusplus) +#define __P(protos) protos /* full-blown ANSI C */ +#define __CONCAT(x,y) x ## y +#define __STRING(x) #x + +#define __const const /* define reserved names to standard */ +#define __signed signed +#define __volatile volatile +#if defined(__cplusplus) +#define __inline inline /* convert to C++ keyword */ +#else +#ifndef __GNUC__ +#define __inline /* delete GCC keyword */ +#endif /* !__GNUC__ */ +#endif /* !__cplusplus */ + +#else /* !(__STDC__ || __cplusplus) */ +#define __P(protos) () /* traditional C preprocessor */ +#define __CONCAT(x,y) x/**/y +#define __STRING(x) "x" + +#ifndef __GNUC__ +#define __const /* delete pseudo-ANSI C keywords */ +#define __inline +#define __signed +#define __volatile +#endif /* !__GNUC__ */ + +/* + * In non-ANSI C environments, new programs will want ANSI-only C keywords + * deleted from the program and old programs will want them left alone. + * When using a compiler other than gcc, programs using the ANSI C keywords + * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS. + * When using "gcc -traditional", we assume that this is the intent; if + * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone. + */ +#ifndef NO_ANSI_KEYWORDS +#define const __const /* convert ANSI C keywords */ +#define inline __inline +#define signed __signed +#define volatile __volatile +#endif /* !NO_ANSI_KEYWORDS */ +#endif /* !(__STDC__ || __cplusplus) */ + +/* + * GCC1 and some versions of GCC2 declare dead (non-returning) and + * pure (no side effects) functions using "volatile" and "const"; + * unfortunately, these then cause warnings under "-ansi -pedantic". + * GCC2 uses a new, peculiar __attribute__((attrs)) style. All of + * these work for GNU C++ (modulo a slight glitch in the C++ grammar + * in the distribution version of 2.5.5). + */ +#if !defined(__GNUC__) || __GNUC__ < 2 || \ + (__GNUC__ == 2 && __GNUC_MINOR__ < 5) +#define __attribute__(x) /* delete __attribute__ if non-gcc or gcc1 */ +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +#define __dead __volatile +#define __pure __const +#endif +#endif + +/* Delete pseudo-keywords wherever they are not available or needed. */ +#ifndef __dead +#define __dead +#define __pure +#endif + +#define __IDSTRING(name,string) \ + static const char name[] __attribute__((__unused__)) = string + +#ifndef __COPYRIGHT +#define __COPYRIGHT(s) __IDSTRING(copyright,s) +#endif + +#ifndef __RCSID +#define __RCSID(s) __IDSTRING(rcsid,s) +#endif + +#ifndef __SCCSID +#define __SCCSID(s) __IDSTRING(sccsid,s) +#endif + +#ifndef __PROJECT_VERSION +#define __PROJECT_VERSION(s) __IDSTRING(project_version,s) +#endif + +#endif /* !_CDEFS_H_ */ diff --git a/bsd/sys/cdio.h b/bsd/sys/cdio.h new file mode 100644 index 000000000..edbd57b46 --- /dev/null +++ b/bsd/sys/cdio.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + */ + +#ifndef _SYS_CDIO_H_ +#define _SYS_CDIO_H_ + +/* Shared between kernel & process */ + +struct cd_toc_entry { + u_char nothing1; + u_char control:4; + u_char addr_type:4; + u_char track; + u_char nothing2; + u_char addr[4]; +}; + +struct cd_sub_channel_header { + u_char nothing1; + u_char audio_status; +#define CD_AS_AUDIO_INVALID 0x00 +#define CD_AS_PLAY_IN_PROGRESS 0x11 +#define CD_AS_PLAY_PAUSED 0x12 +#define CD_AS_PLAY_COMPLETED 0x13 +#define CD_AS_PLAY_ERROR 0x14 +#define CD_AS_NO_STATUS 0x15 + u_char data_len[2]; +}; + +struct cd_sub_channel_position_data { + u_char data_format; + u_char control:4; + u_char addr_type:4; + u_char track_number; + u_char index_number; + u_char absaddr[4]; + u_char reladdr[4]; +}; + +struct cd_sub_channel_media_catalog { + u_char data_format; + u_char nothing1; + u_char nothing2; + u_char nothing3; + u_char :7; + u_char mc_valid:1; + u_char mc_number[15]; +}; + +struct cd_sub_channel_track_info { + u_char data_format; + u_char nothing1; + u_char track_number; + u_char nothing2; + u_char :7; + u_char ti_valid:1; + u_char ti_number[15]; +}; + +struct cd_sub_channel_info { + struct cd_sub_channel_header header; + union { + struct cd_sub_channel_position_data position; + struct cd_sub_channel_media_catalog media_catalog; + struct cd_sub_channel_track_info track_info; + } what; +}; + +/* + * Ioctls for the CD drive + */ +struct ioc_play_track { + u_char start_track; + u_char start_index; + u_char end_track; + u_char end_index; +}; + +#define CDIOCPLAYTRACKS _IOW('c', 1, struct ioc_play_track) +struct ioc_play_blocks { + int blk; + int len; +}; +#define CDIOCPLAYBLOCKS _IOW('c', 2, struct ioc_play_blocks) + +struct ioc_read_subchannel { + u_char address_format; +#define CD_LBA_FORMAT 1 +#define CD_MSF_FORMAT 2 + u_char data_format; +#define CD_SUBQ_DATA 0 +#define CD_CURRENT_POSITION 1 +#define CD_MEDIA_CATALOG 2 +#define CD_TRACK_INFO 3 + u_char track; + int data_len; + struct cd_sub_channel_info *data; +}; +#define CDIOCREADSUBCHANNEL _IOWR('c', 3, struct ioc_read_subchannel ) + +struct ioc_toc_header { + u_short len; + u_char starting_track; + u_char ending_track; +}; + +#define CDIOREADTOCHEADER _IOR('c', 4, struct ioc_toc_header) + +struct ioc_read_toc_entry { + u_char address_format; + u_char starting_track; + u_short data_len; + struct cd_toc_entry *data; +}; +#define CDIOREADTOCENTRYS _IOWR('c', 5, struct ioc_read_toc_entry) + +struct ioc_patch { + u_char patch[4]; /* one for each channel */ +}; +#define CDIOCSETPATCH _IOW('c', 9, struct ioc_patch) + +struct ioc_vol { + u_char vol[4]; /* one for each channel */ +}; +#define CDIOCGETVOL _IOR('c', 10, struct ioc_vol) +#define CDIOCSETVOL _IOW('c', 11, struct ioc_vol) +#define CDIOCSETMONO _IO('c', 12) +#define CDIOCSETSTEREO _IO('c', 13) +#define CDIOCSETMUTE _IO('c', 14) +#define CDIOCSETLEFT _IO('c', 15) +#define CDIOCSETRIGHT _IO('c', 16) +#define CDIOCSETDEBUG _IO('c', 17) +#define CDIOCCLRDEBUG _IO('c', 18) +#define CDIOCPAUSE _IO('c', 19) +#define CDIOCRESUME _IO('c', 20) +#define CDIOCRESET _IO('c', 21) +#define CDIOCSTART _IO('c', 22) +#define CDIOCSTOP _IO('c', 23) +#define CDIOCEJECT _IO('c', 24) +#define CDIOCALLOW _IO('c', 25) +#define CDIOCPREVENT _IO('c', 26) + +struct ioc_play_msf { + u_char start_m; + u_char start_s; + u_char start_f; + u_char end_m; + u_char end_s; + u_char end_f; +}; +#define CDIOCPLAYMSF _IOW('c', 25, struct ioc_play_msf) + +#endif /* !_SYS_CDIO_H_ */ diff --git a/bsd/sys/chio.h b/bsd/sys/chio.h new file mode 100644 index 000000000..6d35d8529 --- /dev/null +++ b/bsd/sys/chio.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + */ + +#ifndef _SYS_CHIO_H_ +#define _SYS_CHIO_H_ + +/* + * Structures and definitions for changer io control commands + */ + +#define CH_INVERT 0x10000 +#define CH_ADDR_MASK 0xffff +struct chop { + short ch_op; /* operations defined below */ + short result; /* the result */ + union { + struct { + int chm; /* Transport element */ + int from; + int to; + } move; + struct { + int chm; /* Transport element */ + int to; + } position; + struct { + short chmo; /* Offset of first CHM */ + short chms; /* No. of CHM */ + short slots; /* No. of Storage Elements */ + short sloto; /* Offset of first SE */ + short imexs; /* No. of Import/Export Slots */ + short imexo; /* Offset of first IM/EX */ + short drives; /* No. of CTS */ + short driveo; /* Offset of first CTS */ + short rot; /* CHM can rotate */ + } getparam; + struct { + int type; +#define CH_CHM 1 +#define CH_STOR 2 +#define CH_IMEX 3 +#define CH_CTS 4 + int from; + struct { + u_char elema_1; + u_char elema_0; + u_char full:1; + u_char rsvd:1; + u_char except:1; + u_char :5; + u_char rsvd2; + union { + struct { + u_char add_sense_code; + u_char add_sense_code_qualifier; + } specs; + short add_sense; +/* WARINING LSB only */ +#define CH_CHOLDER 0x0290 /* Cartridge holder is missing */ +#define CH_STATUSQ 0x0390 /* Status is questionable */ +#define CH_CTS_CLOSED 0x0490 /* CTS door is closed */ + } ch_add_sense; + u_char rsvd3[3]; + u_char :6; + u_char invert:1; + u_char svalid:1; + u_char source_1; + u_char source_0; + u_char rsvd4[4]; + } elem_data; + } get_elem_stat; + } u; +}; + +/* operations */ +#define CHMOVE 1 +#define CHPOSITION 2 +#define CHGETPARAM 3 +#define CHGETELEM 4 + + +/* Changer IO control command */ +#define CHIOOP _IOWR('c', 1, struct chop) /* do a mag tape op */ + +#endif /* !_SYS_CHIO_H_ */ diff --git a/bsd/sys/clist.h b/bsd/sys/clist.h new file mode 100644 index 000000000..1ff1e0dbb --- /dev/null +++ b/bsd/sys/clist.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)clist.h 8.1 (Berkeley) 6/4/93 + */ + +#ifndef _SYS_CLIST_H_ +#define _SYS_CLIST_H_ + +struct cblock { + struct cblock *c_next; /* next cblock in queue */ + char c_quote[CBQSIZE]; /* quoted characters */ + char c_info[CBSIZE]; /* characters */ +}; + +#ifdef KERNEL +extern struct cblock *cfree, *cfreelist; +extern int cfreecount, nclist; +#endif + +#endif /* _SYS_CLIST_H_ */ + diff --git a/bsd/sys/conf.h b/bsd/sys/conf.h new file mode 100644 index 000000000..535881b08 --- /dev/null +++ b/bsd/sys/conf.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)conf.h 8.5 (Berkeley) 1/9/95 + */ + +#ifndef _SYS_CONF_H_ +#define _SYS_CONF_H_ 1 + +/* + * Definitions of device driver entry switches + */ + +struct buf; +struct proc; +struct tty; +struct uio; +struct vnode; + +/* + * Device switch function types. + */ +typedef int open_close_fcn_t __P((dev_t dev, int flags, int devtype, + struct proc *p)); + +typedef struct tty *d_devtotty_t __P((dev_t dev)); + +typedef void strategy_fcn_t __P((struct buf *bp)); +typedef int ioctl_fcn_t __P((dev_t dev, u_long cmd, caddr_t data, + int fflag, struct proc *p)); +typedef int dump_fcn_t (); /* parameters vary by architecture */ +typedef int psize_fcn_t __P((dev_t dev)); +typedef int read_write_fcn_t __P((dev_t dev, struct uio *uio, int ioflag)); +typedef int stop_fcn_t __P((struct tty *tp, int rw)); +typedef int reset_fcn_t __P((int uban)); +typedef int select_fcn_t __P((dev_t dev, int which, struct proc *p)); +typedef int mmap_fcn_t __P(()); +typedef int getc_fcn_t __P((dev_t dev)); +typedef int putc_fcn_t __P((dev_t dev, char c)); +typedef int d_poll_t __P((dev_t dev, int events, struct proc *p)); + +#define d_open_t open_close_fcn_t +#define d_close_t open_close_fcn_t +#define d_read_t read_write_fcn_t +#define d_write_t read_write_fcn_t +#define d_ioctl_t ioctl_fcn_t + +__BEGIN_DECLS +int enodev (); /* avoid actual prototype for multiple use */ +void enodev_strat(); +__END_DECLS + +/* + * Versions of enodev() pointer, cast to appropriate function type. For use + * in empty devsw slots. + */ +#define eno_opcl ((open_close_fcn_t *)&enodev) +#define eno_strat ((strategy_fcn_t *)&enodev_strat) +#define eno_ioctl ((ioctl_fcn_t *)&enodev) +#define eno_dump ((dump_fcn_t *)&enodev) +#define eno_psize ((psize_fcn_t *)&enodev) +#define eno_rdwrt ((read_write_fcn_t *)&enodev) +#define eno_stop ((stop_fcn_t *)&enodev) +#define eno_reset ((reset_fcn_t *)&enodev) +#define eno_mmap ((mmap_fcn_t *)&enodev) +#define eno_getc ((getc_fcn_t *)&enodev) +#define eno_putc ((putc_fcn_t *)&enodev) +#define eno_select ((select_fcn_t *)&enodev) + +/* + * Types for d_type. + */ +#define D_TAPE 1 +#define D_DISK 2 +#define D_TTY 3 + +/* + * Block device switch table + */ +struct bdevsw { + open_close_fcn_t *d_open; + open_close_fcn_t *d_close; + strategy_fcn_t *d_strategy; + ioctl_fcn_t *d_ioctl; + dump_fcn_t *d_dump; + psize_fcn_t *d_psize; + int d_type; +}; + +#ifdef KERNEL + +d_devtotty_t nodevtotty; +d_write_t nowrite; + + +extern struct bdevsw bdevsw[]; + +/* + * Contents of empty bdevsw slot. + */ +#define NO_BDEVICE \ + { eno_opcl, eno_opcl, eno_strat, eno_ioctl, \ + eno_dump, eno_psize, 0 } + +#endif /* KERNEL */ + +/* + * Character device switch table + */ +struct cdevsw { + open_close_fcn_t *d_open; + open_close_fcn_t *d_close; + read_write_fcn_t *d_read; + read_write_fcn_t *d_write; + ioctl_fcn_t *d_ioctl; + stop_fcn_t *d_stop; + reset_fcn_t *d_reset; + struct tty **d_ttys; + select_fcn_t *d_select; + mmap_fcn_t *d_mmap; + strategy_fcn_t *d_strategy; + getc_fcn_t *d_getc; + putc_fcn_t *d_putc; + int d_type; +}; + +#ifdef KERNEL + +extern struct cdevsw cdevsw[]; + +/* + * Contents of empty cdevsw slot. + */ + +#define NO_CDEVICE \ + { \ + eno_opcl, eno_opcl, eno_rdwrt, eno_rdwrt, \ + eno_ioctl, eno_stop, eno_reset, 0, \ + seltrue, eno_mmap, eno_strat, eno_getc, \ + eno_putc, 0 \ + } + +#endif /* KERNEL */ + +/* + * Line discipline switch table + */ +struct linesw { + int (*l_open) __P((dev_t dev, struct tty *tp)); + int (*l_close) __P((struct tty *tp, int flags)); + int (*l_read) __P((struct tty *tp, struct uio *uio, + int flag)); + int (*l_write) __P((struct tty *tp, struct uio *uio, + int flag)); + int (*l_ioctl) __P((struct tty *tp, u_long cmd, caddr_t data, + int flag, struct proc *p)); + int (*l_rint) __P((int c, struct tty *tp)); + int (*l_start) __P((struct tty *tp)); + int (*l_modem) __P((struct tty *tp, int flag)); +}; + +#ifdef KERNEL +extern struct linesw linesw[]; +extern int nlinesw; + +int ldisc_register __P((int , struct linesw *)); +void ldisc_deregister __P((int)); +#define LDISC_LOAD -1 /* Loadable line discipline */ +#endif + +/* + * Swap device table + */ +struct swdevt { + dev_t sw_dev; + int sw_flags; + int sw_nblks; + struct vnode *sw_vp; +}; +#define SW_FREED 0x01 +#define SW_SEQUENTIAL 0x02 +#define sw_freed sw_flags /* XXX compat */ + +#ifdef KERNEL +extern struct swdevt swdevt[]; +#endif + +#ifdef KERNEL +/* + * ***_free finds free slot; + * ***_add adds entries to the devsw table + * If int arg is -1; finds a free slot + * Returns the major number if successful + * else -1 + */ +__BEGIN_DECLS +int bdevsw_isfree __P((int)); +int bdevsw_add __P((int, struct bdevsw *)); +int bdevsw_remove __P((int, struct bdevsw *)); +int cdevsw_isfree __P((int)); +int cdevsw_add __P((int, struct cdevsw *)); +int cdevsw_remove __P((int, struct cdevsw *)); +__END_DECLS +#endif + +#endif /* _SYS_CONF_H_ */ diff --git a/bsd/sys/dir.h b/bsd/sys/dir.h new file mode 100644 index 000000000..112b8526c --- /dev/null +++ b/bsd/sys/dir.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dir.h 8.2 (Berkeley) 1/4/94 + */ + +/* + * The information in this file should be obtained from + * and is provided solely (and temporarily) for backward compatibility. + */ + +#ifndef _SYS_DIR_H_ +#define _SYS_DIR_H_ + +#ifdef KERNEL +#include +#else +#include +#endif + +/* + * Backwards compatibility. + */ +#define direct dirent + +/* + * The DIRSIZ macro gives the minimum record length which will hold + * the directory entry. This requires the amount of space in struct direct + * without the d_name field, plus enough space for the name with a terminating + * null byte (dp->d_namlen+1), rounded up to a 4 byte boundary. + */ +#undef DIRSIZ +#define DIRSIZ(dp) \ + ((sizeof (struct direct) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3)) + +#endif /* !_SYS_DIR_H_ */ diff --git a/bsd/sys/dirent.h b/bsd/sys/dirent.h new file mode 100644 index 000000000..8dc0359cc --- /dev/null +++ b/bsd/sys/dirent.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dirent.h 8.3 (Berkeley) 8/10/94 + */ + +/* + * The dirent structure defines the format of directory entries returned by + * the getdirentries(2) system call. + * + * A directory entry has a struct dirent at the front of it, containing its + * inode number, the length of the entry, and the length of the name + * contained in the entry. These are followed by the name padded to a 4 + * byte boundary with null bytes. All names are guaranteed null terminated. + * The maximum length of a name in a directory is MAXNAMLEN. + */ + +#ifndef _SYS_DIRENT_H +#define _SYS_DIRENT_H + +struct dirent { + u_int32_t d_fileno; /* file number of entry */ + u_int16_t d_reclen; /* length of this record */ + u_int8_t d_type; /* file type, see below */ + u_int8_t d_namlen; /* length of string in d_name */ +#ifdef _POSIX_SOURCE + char d_name[255 + 1]; /* name must be no longer than this */ +#else +#define MAXNAMLEN 255 + char d_name[MAXNAMLEN + 1]; /* name must be no longer than this */ +#endif +}; + +/* + * File types + */ +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 + +/* + * Convert between stat structure types and directory types. + */ +#define IFTODT(mode) (((mode) & 0170000) >> 12) +#define DTTOIF(dirtype) ((dirtype) << 12) + +#endif /* _SYS_DIRENT_H */ diff --git a/bsd/sys/disklabel.h b/bsd/sys/disklabel.h new file mode 100644 index 000000000..21258ddf0 --- /dev/null +++ b/bsd/sys/disklabel.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1987, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)disklabel.h 8.2 (Berkeley) 7/10/94 + */ +#ifndef _SYS_DISKLABEL_H_ +#define _SYS_DISKLABEL_H_ + +/* + * Disk description table, see disktab(5) + */ +#define _PATH_DISKTAB "/etc/disktab" +#define DISKTAB "/etc/disktab" /* deprecated */ + +/* + * Each disk has a label which includes information about the hardware + * disk geometry, filesystem partitions, and drive specific information. + * The location of the label, as well as the number of partitions the + * label can describe and the number of the "whole disk" (raw) + * paritition are machine dependent. + */ +#include + +/* + * The absolute maximum number of disk partitions allowed. + * This is the maximum value of MAXPARTITIONS for which 'struct disklabel' + * is <= DEV_BSIZE bytes long. If MAXPARTITIONS is greater than this, beware. + */ +#define MAXMAXPARTITIONS 22 +#if MAXPARTITIONS > MAXMAXPARTITIONS +#warning beware: MAXPARTITIONS bigger than MAXMAXPARTITIONS +#endif + +/* + * Translate between device numbers and major/disk unit/disk partition. + */ +#define DISKUNIT(dev) (minor(dev) / MAXPARTITIONS) +#define DISKPART(dev) (minor(dev) % MAXPARTITIONS) +#define MAKEDISKDEV(maj, unit, part) \ + (makedev((maj), ((unit) * MAXPARTITIONS) + (part))) + +#define DISKMAGIC ((u_int32_t)0x82564557) /* The disk magic number */ + +#ifndef LOCORE +struct disklabel { + u_int32_t d_magic; /* the magic number */ + u_int16_t d_type; /* drive type */ + u_int16_t d_subtype; /* controller/d_type specific */ + char d_typename[16]; /* type name, e.g. "eagle" */ + + /* + * d_packname contains the pack identifier and is returned when + * the disklabel is read off the disk or in-core copy. + * d_boot0 and d_boot1 are the (optional) names of the + * primary (block 0) and secondary (block 1-15) bootstraps + * as found in /usr/mdec. These are returned when using + * getdiskbyname(3) to retrieve the values from /etc/disktab. + */ + union { + char un_d_packname[16]; /* pack identifier */ + struct { + char *un_d_boot0; /* primary bootstrap name */ + char *un_d_boot1; /* secondary bootstrap name */ + } un_b; + } d_un; +#define d_packname d_un.un_d_packname +#define d_boot0 d_un.un_b.un_d_boot0 +#define d_boot1 d_un.un_b.un_d_boot1 + + /* disk geometry: */ + u_int32_t d_secsize; /* # of bytes per sector */ + u_int32_t d_nsectors; /* # of data sectors per track */ + u_int32_t d_ntracks; /* # of tracks per cylinder */ + u_int32_t d_ncylinders; /* # of data cylinders per unit */ + u_int32_t d_secpercyl; /* # of data sectors per cylinder */ + u_int32_t d_secperunit; /* # of data sectors per unit */ + + /* + * Spares (bad sector replacements) below are not counted in + * d_nsectors or d_secpercyl. Spare sectors are assumed to + * be physical sectors which occupy space at the end of each + * track and/or cylinder. + */ + u_int16_t d_sparespertrack; /* # of spare sectors per track */ + u_int16_t d_sparespercyl; /* # of spare sectors per cylinder */ + /* + * Alternate cylinders include maintenance, replacement, configuration + * description areas, etc. + */ + u_int32_t d_acylinders; /* # of alt. cylinders per unit */ + + /* hardware characteristics: */ + /* + * d_interleave, d_trackskew and d_cylskew describe perturbations + * in the media format used to compensate for a slow controller. + * Interleave is physical sector interleave, set up by the + * formatter or controller when formatting. When interleaving is + * in use, logically adjacent sectors are not physically + * contiguous, but instead are separated by some number of + * sectors. It is specified as the ratio of physical sectors + * traversed per logical sector. Thus an interleave of 1:1 + * implies contiguous layout, while 2:1 implies that logical + * sector 0 is separated by one sector from logical sector 1. + * d_trackskew is the offset of sector 0 on track N relative to + * sector 0 on track N-1 on the same cylinder. Finally, d_cylskew + * is the offset of sector 0 on cylinder N relative to sector 0 + * on cylinder N-1. + */ + u_int16_t d_rpm; /* rotational speed */ + u_int16_t d_interleave; /* hardware sector interleave */ + u_int16_t d_trackskew; /* sector 0 skew, per track */ + u_int16_t d_cylskew; /* sector 0 skew, per cylinder */ + u_int32_t d_headswitch; /* head switch time, usec */ + u_int32_t d_trkseek; /* track-to-track seek, usec */ + u_int32_t d_flags; /* generic flags */ +#define NDDATA 5 + u_int32_t d_drivedata[NDDATA]; /* drive-type specific information */ +#define NSPARE 5 + u_int32_t d_spare[NSPARE]; /* reserved for future use */ + u_int32_t d_magic2; /* the magic number (again) */ + u_int16_t d_checksum; /* xor of data incl. partitions */ + + /* filesystem and partition information: */ + u_int16_t d_npartitions; /* number of partitions in following */ + u_int32_t d_bbsize; /* size of boot area at sn0, bytes */ + u_int32_t d_sbsize; /* max size of fs superblock, bytes */ + struct partition { /* the partition table */ + u_int32_t p_size; /* number of sectors in partition */ + u_int32_t p_offset; /* starting sector */ + u_int32_t p_fsize; /* filesystem basic fragment size */ + u_int8_t p_fstype; /* filesystem type, see below */ + u_int8_t p_frag; /* filesystem fragments per block */ + union { + u_int16_t cpg; /* UFS: FS cylinders per group */ + u_int16_t sgs; /* LFS: FS segment shift */ + } __partition_u1; +#define p_cpg __partition_u1.cpg +#define p_sgs __partition_u1.sgs + } d_partitions[MAXPARTITIONS]; /* actually may be more */ +}; +#else /* LOCORE */ + /* + * offsets for asm boot files. + */ + .set d_secsize,40 + .set d_nsectors,44 + .set d_ntracks,48 + .set d_ncylinders,52 + .set d_secpercyl,56 + .set d_secperunit,60 + .set d_end_,276 /* size of disk label */ +#endif /* LOCORE */ + +/* d_type values: */ +#define DTYPE_SMD 1 /* SMD, XSMD; VAX hp/up */ +#define DTYPE_MSCP 2 /* MSCP */ +#define DTYPE_DEC 3 /* other DEC (rk, rl) */ +#define DTYPE_SCSI 4 /* SCSI */ +#define DTYPE_ESDI 5 /* ESDI interface */ +#define DTYPE_ST506 6 /* ST506 etc. */ +#define DTYPE_HPIB 7 /* CS/80 on HP-IB */ +#define DTYPE_HPFL 8 /* HP Fiber-link */ +#define DTYPE_FLOPPY 10 /* floppy */ + +#ifdef DKTYPENAMES +static char *dktypenames[] = { + "unknown", + "SMD", + "MSCP", + "old DEC", + "SCSI", + "ESDI", + "ST506", + "HP-IB", + "HP-FL", + "type 9", + "floppy", + NULL +}; +#define DKMAXTYPES (sizeof(dktypenames) / sizeof(dktypenames[0]) - 1) +#endif + +/* + * Filesystem type and version. + * Used to interpret other filesystem-specific + * per-partition information. + */ +#define FS_UNUSED 0 /* unused */ +#define FS_SWAP 1 /* swap */ +#define FS_V6 2 /* Sixth Edition */ +#define FS_V7 3 /* Seventh Edition */ +#define FS_SYSV 4 /* System V */ +#define FS_V71K 5 /* V7 with 1K blocks (4.1, 2.9) */ +#define FS_V8 6 /* Eighth Edition, 4K blocks */ +#define FS_BSDFFS 7 /* 4.2BSD fast file system */ +#define FS_MSDOS 8 /* MSDOS file system */ +#define FS_BSDLFS 9 /* 4.4BSD log-structured file system */ +#define FS_OTHER 10 /* in use, but unknown/unsupported */ +#define FS_HPFS 11 /* OS/2 high-performance file system */ +#define FS_ISO9660 12 /* ISO 9660, normally CD-ROM */ +#define FS_BOOT 13 /* partition contains bootstrap */ +#define FS_ADOS 14 /* AmigaDOS fast file system */ +#define FS_HFS 15 /* Macintosh HFS */ + +#ifdef DKTYPENAMES +static char *fstypenames[] = { + "unused", + "swap", + "Version 6", + "Version 7", + "System V", + "4.1BSD", + "Eighth Edition", + "4.2BSD", + "MSDOS", + "4.4LFS", + "unknown", + "HPFS", + "ISO9660", + "boot", + "ADOS", + "HFS", + NULL +}; +#define FSMAXTYPES (sizeof(fstypenames) / sizeof(fstypenames[0]) - 1) +#endif + +/* + * flags shared by various drives: + */ +#define D_REMOVABLE 0x01 /* removable media */ +#define D_ECC 0x02 /* supports ECC */ +#define D_BADSECT 0x04 /* supports bad sector forw. */ +#define D_RAMDISK 0x08 /* disk emulator */ +#define D_CHAIN 0x10 /* can do back-back transfers */ + +/* + * Drive data for SMD. + */ +#define d_smdflags d_drivedata[0] +#define D_SSE 0x1 /* supports skip sectoring */ +#define d_mindist d_drivedata[1] +#define d_maxdist d_drivedata[2] +#define d_sdist d_drivedata[3] + +/* + * Drive data for ST506. + */ +#define d_precompcyl d_drivedata[0] +#define d_gap3 d_drivedata[1] /* used only when formatting */ + +/* + * Drive data for SCSI. + */ +#define d_blind d_drivedata[0] + +#ifndef LOCORE +/* + * Structure used to perform a format or other raw operation, returning + * data and/or register values. Register identification and format + * are device- and driver-dependent. + */ +struct format_op { + char *df_buf; + int df_count; /* value-result */ + daddr_t df_startblk; + int df_reg[8]; /* result */ +}; + +/* + * Structure used internally to retrieve information about a partition + * on a disk. + */ +struct partinfo { + struct disklabel *disklab; + struct partition *part; +}; + +/* + * Disk-specific ioctls. + */ + /* get and set disklabel; DIOCGPART used internally */ +#define DIOCGDINFO _IOR('d', 101, struct disklabel)/* get */ +#define DIOCSDINFO _IOW('d', 102, struct disklabel)/* set */ +#define DIOCWDINFO _IOW('d', 103, struct disklabel)/* set, update disk */ +#define DIOCGPART _IOW('d', 104, struct partinfo) /* get partition */ + +/* do format operation, read or write */ +#define DIOCRFORMAT _IOWR('d', 105, struct format_op) +#define DIOCWFORMAT _IOWR('d', 106, struct format_op) + +#define DIOCSSTEP _IOW('d', 107, int) /* set step rate */ +#define DIOCSRETRIES _IOW('d', 108, int) /* set # of retries */ +#define DIOCWLABEL _IOW('d', 109, int) /* write en/disable label */ + +#define DIOCSBAD _IOW('d', 110, struct dkbad) /* set kernel dkbad */ + +#endif /* LOCORE */ + +#if !defined(KERNEL) && !defined(LOCORE) + +#include + +__BEGIN_DECLS +struct disklabel *getdiskbyname __P((const char *)); +__END_DECLS + +#endif + +#endif /* ! _SYS_DISKLABEL_H_ */ + diff --git a/bsd/sys/disktab.h b/bsd/sys/disktab.h new file mode 100644 index 000000000..ef2a6faa5 --- /dev/null +++ b/bsd/sys/disktab.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * HISTORY: + * 16-Mar-88 John Seamons (jks) at NeXT + * Cleaned up to support standard disk label definitions. + * + * 24-Feb-88 Mike DeMoney (mike) at NeXT + * Added d_boot0_blkno to indicate logical block number + * of "block 0" boot. This blkno is in d_secsize sectors. + * Added d_bootfile to indicate the default operating system + * image to be booted by the blk 0 boot. + * Changed d_name and d_type to be char arrays rather than ptrs + * so they are part of label. This limits length of info in + * /etc/disktab, sorry. + */ + +#ifndef _SYS_DISKTAB_ +#define _SYS_DISKTAB_ + +/* + * Disk description table, see disktab(5) + */ +#ifndef KERNEL +#define DISKTAB "/etc/disktab" +#endif /* !KERNEL */ + +#define MAXDNMLEN 24 // drive name length +#define MAXMPTLEN 16 // mount point length +#define MAXFSTLEN 8 // file system type length +#define MAXTYPLEN 24 // drive type length +#define NBOOTS 2 // # of boot blocks +#define MAXBFLEN 24 // bootfile name length +#define MAXHNLEN 32 // host name length +#define NPART 8 // # of partitions + +typedef struct partition { + int p_base; /* base sector# of partition */ + int p_size; /* #sectors in partition */ + short p_bsize; /* block size in bytes */ + short p_fsize; /* frag size in bytes */ + char p_opt; /* 's'pace/'t'ime optimization pref */ + short p_cpg; /* cylinders per group */ + short p_density; /* bytes per inode density */ + char p_minfree; /* minfree (%) */ + char p_newfs; /* run newfs during init */ + char p_mountpt[MAXMPTLEN];/* mount point */ + char p_automnt; /* auto-mount when inserted */ + char p_type[MAXFSTLEN];/* file system type */ +} partition_t; + +typedef struct disktab { + char d_name[MAXDNMLEN]; /* drive name */ + char d_type[MAXTYPLEN]; /* drive type */ + int d_secsize; /* sector size in bytes */ + int d_ntracks; /* # tracks/cylinder */ + int d_nsectors; /* # sectors/track */ + int d_ncylinders; /* # cylinders */ + int d_rpm; /* revolutions/minute */ + short d_front; /* size of front porch (sectors) */ + short d_back; /* size of back porch (sectors) */ + short d_ngroups; /* number of alt groups */ + short d_ag_size; /* alt group size (sectors) */ + short d_ag_alts; /* alternate sectors / alt group */ + short d_ag_off; /* sector offset to first alternate */ + int d_boot0_blkno[NBOOTS]; /* "blk 0" boot locations */ + char d_bootfile[MAXBFLEN]; /* default bootfile */ + char d_hostname[MAXHNLEN]; /* host name */ + char d_rootpartition; /* root partition e.g. 'a' */ + char d_rwpartition; /* r/w partition e.g. 'b' */ + partition_t d_partitions[NPART]; +} disktab_t; + +#ifndef KERNEL +struct disktab *getdiskbyname(), *getdiskbydev(); +#endif /* !KERNEL */ + +#endif /* _SYS_DISKTAB_ */ diff --git a/bsd/sys/dkbad.h b/bsd/sys/dkbad.h new file mode 100644 index 000000000..d4aefd847 --- /dev/null +++ b/bsd/sys/dkbad.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dkbad.h 8.2 (Berkeley) 7/10/94 + */ + +#ifndef _SYS_DKBAD_H_ +#define _SYS_DKBAD_H_ + +/* + * Definitions needed to perform bad sector revectoring ala DEC STD 144. + * + * The bad sector information is located in the first 5 even numbered + * sectors of the last track of the disk pack. There are five identical + * copies of the information, described by the dkbad structure. + * + * Replacement sectors are allocated starting with the first sector before + * the bad sector information and working backwards towards the beginning of + * the disk. A maximum of 126 bad sectors are supported. The position of + * the bad sector in the bad sector table determines which replacement sector + * it corresponds to. + * + * The bad sector information and replacement sectors are conventionally + * only accessible through the 'c' file system partition of the disk. If + * that partition is used for a file system, the user is responsible for + * making sure that it does not overlap the bad sector information or any + * replacement sectors. + */ +struct dkbad { + int32_t bt_csn; /* cartridge serial number */ + u_int16_t bt_mbz; /* unused; should be 0 */ + u_int16_t bt_flag; /* -1 => alignment cartridge */ + struct bt_bad { + u_int16_t bt_cyl; /* cylinder number of bad sector */ + u_int16_t bt_trksec; /* track and sector number */ + } bt_bad[126]; +}; + +#define ECC 0 +#define SSE 1 +#define BSE 2 +#define CONT 3 + +#endif /* _SYS_DKBAD_H_ */ diff --git a/bsd/sys/dkstat.h b/bsd/sys/dkstat.h new file mode 100644 index 000000000..4abbc3e88 --- /dev/null +++ b/bsd/sys/dkstat.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dkstat.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_DKSTAT_H_ +#define _SYS_DKSTAT_H_ + +#define CP_USER 0 +#define CP_NICE 1 +#define CP_SYS 2 +#define CP_INTR 3 +#define CP_IDLE 4 +#define CPUSTATES 5 + +#define DK_NDRIVE 8 + +#ifdef KERNEL +extern long cp_time[CPUSTATES]; +extern long dk_seek[DK_NDRIVE]; +extern long dk_time[DK_NDRIVE]; +extern long dk_wds[DK_NDRIVE]; +extern long dk_wpms[DK_NDRIVE]; +extern long dk_xfer[DK_NDRIVE]; + +extern int dk_busy; +extern int dk_ndrive; + +extern long tk_cancc; +extern long tk_nin; +extern long tk_nout; +extern long tk_rawcc; +#endif +#endif /* _SYS_DKSTAT_H_ */ diff --git a/bsd/sys/dmap.h b/bsd/sys/dmap.h new file mode 100644 index 000000000..befe5710f --- /dev/null +++ b/bsd/sys/dmap.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dmap.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_DMAP_H_ +#define _SYS_DMAP_H_ + +/* + * Definitions for the mapping of vitual swap space to the physical swap + * area - the disk map. + */ +#define NDMAP 38 /* size of the swap area map */ + +struct dmap { + swblk_t dm_size; /* current size used by process */ + swblk_t dm_alloc; /* amount of physical swap space allocated */ + swblk_t dm_map[NDMAP]; /* first disk block number in each chunk */ +}; +#ifdef KERNEL +extern struct dmap zdmap; +extern int dmmin, dmmax, dmtext; +#endif + +/* The following structure is that ``returned'' from a call to vstodb(). */ +struct dblock { + swblk_t db_base; /* base of physical contig drum block */ + swblk_t db_size; /* size of block */ +}; +#endif /* !_SYS_DMAP_H_ */ diff --git a/bsd/sys/domain.h b/bsd/sys/domain.h new file mode 100644 index 000000000..1fbc76ed5 --- /dev/null +++ b/bsd/sys/domain.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)domain.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_DOMAIN_H_ +#define _SYS_DOMAIN_H_ + +/* + * Structure per communications domain. + */ + +/* + * Forward structure declarations for function prototypes [sic]. + */ +struct mbuf; + +struct domain { + int dom_family; /* AF_xxx */ + char *dom_name; + void (*dom_init) /* initialize domain data structures */ + __P((void)); + int (*dom_externalize) /* externalize access rights */ + __P((struct mbuf *)); + void (*dom_dispose) /* dispose of internalized rights */ + __P((struct mbuf *)); + struct protosw *dom_protosw; /* Chain of protosw's for AF */ + struct domain *dom_next; + int (*dom_rtattach) /* initialize routing table */ + __P((void **, int)); + int dom_rtoffset; /* an arg to rtattach, in bits */ + int dom_maxrtkey; /* for routing layer */ + int dom_protohdrlen; /* Let the protocol tell us */ + int dom_refs; /* # socreates outstanding */ +}; + +#ifdef KERNEL +extern struct domain *domains; +extern struct domain localdomain; +extern void net_add_domain(struct domain *dp); +extern int net_del_domain(struct domain *); + +#define DOMAIN_SET(domain_set) + +/* +#define DOMAIN_SET(name) \ + DATA_SET(domain_set, name ## domain) +*/ + +#endif +#endif /* _SYS_DOMAIN_H_ */ diff --git a/bsd/sys/errno.h b/bsd/sys/errno.h new file mode 100644 index 000000000..86abc959c --- /dev/null +++ b/bsd/sys/errno.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)errno.h 8.5 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_ERRNO_H_ +#define _SYS_ERRNO_H_ + +#if !defined(KERNEL) && !defined(KERNEL_PRIVATE) +#include +__BEGIN_DECLS +extern int * __error __P((void)); +#define errno (*__error()) +__END_DECLS +#endif + +/* + * Error codes + */ + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* Input/output error */ +#define ENXIO 6 /* Device not configured */ +#define E2BIG 7 /* Argument list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file descriptor */ +#define ECHILD 10 /* No child processes */ +#define EDEADLK 11 /* Resource deadlock avoided */ + /* 11 was EAGAIN */ +#define ENOMEM 12 /* Cannot allocate memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#ifndef _POSIX_SOURCE +#define ENOTBLK 15 /* Block device required */ +#endif +#define EBUSY 16 /* Device busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* Operation not supported by device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* Too many open files in system */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Inappropriate ioctl for device */ +#ifndef _POSIX_SOURCE +#define ETXTBSY 26 /* Text file busy */ +#endif +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ + +/* math software */ +#define EDOM 33 /* Numerical argument out of domain */ +#define ERANGE 34 /* Result too large */ + +/* non-blocking and interrupt i/o */ +#define EAGAIN 35 /* Resource temporarily unavailable */ +#ifndef _POSIX_SOURCE +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ + +/* ipc/network software -- argument errors */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#endif /* ! _POSIX_SOURCE */ +#define ENOTSUP 45 /* Operation not supported */ +#ifndef _POSIX_SOURCE +#define EOPNOTSUPP ENOTSUP /* Operation not supported */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Can't assign requested address */ + +/* ipc/network software -- operational errors */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection on reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Socket is already connected */ +#define ENOTCONN 57 /* Socket is not connected */ +#define ESHUTDOWN 58 /* Can't send after socket shutdown */ +#define ETOOMANYREFS 59 /* Too many references: can't splice */ +#define ETIMEDOUT 60 /* Operation timed out */ +#define ECONNREFUSED 61 /* Connection refused */ + +#define ELOOP 62 /* Too many levels of symbolic links */ +#endif /* _POSIX_SOURCE */ +#define ENAMETOOLONG 63 /* File name too long */ + +/* should be rearranged */ +#ifndef _POSIX_SOURCE +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#endif /* _POSIX_SOURCE */ +#define ENOTEMPTY 66 /* Directory not empty */ + +/* quotas & mush */ +#ifndef _POSIX_SOURCE +#define EPROCLIM 67 /* Too many processes */ +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Disc quota exceeded */ + +/* Network File System */ +#define ESTALE 70 /* Stale NFS file handle */ +#define EREMOTE 71 /* Too many levels of remote in path */ +#define EBADRPC 72 /* RPC struct is bad */ +#define ERPCMISMATCH 73 /* RPC version wrong */ +#define EPROGUNAVAIL 74 /* RPC prog. not avail */ +#define EPROGMISMATCH 75 /* Program version wrong */ +#define EPROCUNAVAIL 76 /* Bad procedure for program */ +#endif /* _POSIX_SOURCE */ + +#define ENOLCK 77 /* No locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#ifndef _POSIX_SOURCE +#define EFTYPE 79 /* Inappropriate file type or format */ +#define EAUTH 80 /* Authentication error */ +#define ENEEDAUTH 81 /* Need authenticator */ +#endif /* _POSIX_SOURCE */ + +/* Intelligent device errors */ +#define EPWROFF 82 /* Device power is off */ +#define EDEVERR 83 /* Device error, e.g. paper out */ + +#ifndef _POSIX_SOURCE +#define EOVERFLOW 84 /* Value too large to be stored in data type */ + +/* Program loading errors */ +#define EBADEXEC 85 /* Bad executable */ +#define EBADARCH 86 /* Bad CPU type in executable */ +#define ESHLIBVERS 87 /* Shared library version mismatch */ +#define EBADMACHO 88 /* Malformed Macho file */ + +#define ELAST 88 /* Must be equal largest errno */ +#endif /* _POSIX_SOURCE */ + +#ifdef KERNEL +/* pseudo-errors returned inside kernel to modify return to process */ +#define ERESTART -1 /* restart syscall */ +#define EJUSTRETURN -2 /* don't modify regs, just return */ +#endif +#endif /* _SYS_ERRNO_H_ */ diff --git a/bsd/sys/ev.h b/bsd/sys/ev.h new file mode 100644 index 000000000..2efddee91 --- /dev/null +++ b/bsd/sys/ev.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998 Apple Computer, Inc. All rights reserved */ + +#ifndef _SYS_EV_H_ +#define _SYS_EV_H_ + +#include + +struct eventreq { + int er_type; +#define EV_FD 1 // file descriptor + int er_handle; + void *er_data; + int er_rcnt; + int er_wcnt; + int er_ecnt; + int er_eventbits; +#define EV_RE 1 +#define EV_WR 2 +#define EV_EX 4 +#define EV_RM 8 +#define EV_MASK 0xf +}; + +typedef struct eventreq *er_t; + +#define EV_RBYTES 0x100 +#define EV_WBYTES 0x200 +#define EV_RWBYTES (EV_RBYTES|EV_WBYTES) +#define EV_RCLOSED 0x400 +#define EV_RCONN 0x800 +#define EV_WCLOSED 0x1000 +#define EV_WCONN 0x2000 +#define EV_OOB 0x4000 +#define EV_FIN 0x8000 +#define EV_RESET 0x10000 +#define EV_TIMEOUT 0x20000 +#define EV_DMASK 0xffffff00 + +#ifdef KERNEL + +struct eventqelt { + TAILQ_ENTRY(eventqelt) ee_slist; + TAILQ_ENTRY(eventqelt) ee_plist; + struct eventreq ee_req; + struct proc * ee_proc; + u_int ee_flags; +#define EV_QUEUED 1 + u_int ee_eventmask; + struct socket *ee_sp; +}; + +#endif /* KERNEL */ + +#endif /* _SYS_EV_H_ */ diff --git a/bsd/sys/exec.h b/bsd/sys/exec.h new file mode 100644 index 000000000..3abf356dd --- /dev/null +++ b/bsd/sys/exec.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)exec.h 8.3 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_EXEC_H_ +#define _SYS_EXEC_H_ + +/* + * The following structure is found at the top of the user stack of each + * user process. The ps program uses it to locate argv and environment + * strings. Programs that wish ps to display other information may modify + * it; normally ps_argvstr points to the text for argv[0], and ps_nargvstr + * is the same as the program's argc. The fields ps_envstr and ps_nenvstr + * are the equivalent for the environment. + */ +struct ps_strings { + char *ps_argvstr; /* first of 0 or more argument strings */ + int ps_nargvstr; /* the number of argument strings */ + char *ps_envstr; /* first of 0 or more environment strings */ + int ps_nenvstr; /* the number of environment strings */ +}; + +#include + +#ifdef KERNEL +/* + * Arguments to the exec system call. + */ +struct execve_args { + char *fname; + char **argp; + char **envp; +}; +#endif /* KERNEL */ +#endif /* !_SYS_EXEC_H_ */ + diff --git a/bsd/sys/fcntl.h b/bsd/sys/fcntl.h new file mode 100644 index 000000000..d1af6af91 --- /dev/null +++ b/bsd/sys/fcntl.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1983, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fcntl.h 8.3 (Berkeley) 1/21/94 + */ + + +#ifndef _SYS_FCNTL_H_ +#define _SYS_FCNTL_H_ + +/* + * This file includes the definitions for open and fcntl + * described by POSIX for ; it also includes + * related kernel definitions. + */ + +#ifndef KERNEL +#include +#endif + +/* + * File status flags: these are used by open(2), fcntl(2). + * They are also used (indirectly) in the kernel file structure f_flags, + * which is a superset of the open/fcntl flags. Open flags and f_flags + * are inter-convertible using OFLAGS(fflags) and FFLAGS(oflags). + * Open/fcntl flags begin with O_; kernel-internal flags begin with F. + */ +/* open-only flags */ +#define O_RDONLY 0x0000 /* open for reading only */ +#define O_WRONLY 0x0001 /* open for writing only */ +#define O_RDWR 0x0002 /* open for reading and writing */ +#define O_ACCMODE 0x0003 /* mask for above modes */ + +/* + * Kernel encoding of open mode; separate read and write bits that are + * independently testable: 1 greater than the above. + * + * XXX + * FREAD and FWRITE are excluded from the #ifdef KERNEL so that TIOCFLUSH, + * which was documented to use FREAD/FWRITE, continues to work. + */ +#ifndef _POSIX_SOURCE +#define FREAD 0x0001 +#define FWRITE 0x0002 +#endif +#define O_NONBLOCK 0x0004 /* no delay */ +#define O_APPEND 0x0008 /* set append mode */ +#ifndef _POSIX_SOURCE +#define O_SHLOCK 0x0010 /* open with shared file lock */ +#define O_EXLOCK 0x0020 /* open with exclusive file lock */ +#define O_ASYNC 0x0040 /* signal pgrp when data ready */ +#define O_FSYNC 0x0080 /* synchronous writes */ +#endif +#define O_CREAT 0x0200 /* create if nonexistant */ +#define O_TRUNC 0x0400 /* truncate to zero length */ +#define O_EXCL 0x0800 /* error if already exists */ +#ifdef KERNEL +#define FMARK 0x1000 /* mark during gc() */ +#define FDEFER 0x2000 /* defer for next gc pass */ +#define FHASLOCK 0x4000 /* descriptor holds advisory lock */ +#endif + +/* defined by POSIX 1003.1; BSD default, so no bit required */ +#define O_NOCTTY 0 /* don't assign controlling terminal */ + +#ifdef KERNEL +/* convert from open() flags to/from fflags; convert O_RD/WR to FREAD/FWRITE */ +#define FFLAGS(oflags) ((oflags) + 1) +#define OFLAGS(fflags) ((fflags) - 1) + +/* bits to save after open */ +#define FMASK (FREAD|FWRITE|FAPPEND|FASYNC|FFSYNC|FNONBLOCK) +/* bits settable by fcntl(F_SETFL, ...) */ +#define FCNTLFLAGS (FAPPEND|FASYNC|FFSYNC|FNONBLOCK) +#endif + +/* + * The O_* flags used to have only F* names, which were used in the kernel + * and by fcntl. We retain the F* names for the kernel f_flags field + * and for backward compatibility for fcntl. + */ +#ifndef _POSIX_SOURCE +#define FAPPEND O_APPEND /* kernel/compat */ +#define FASYNC O_ASYNC /* kernel/compat */ +#define FFSYNC O_FSYNC /* kernel */ +#define FNONBLOCK O_NONBLOCK /* kernel */ +#define FNDELAY O_NONBLOCK /* compat */ +#define O_NDELAY O_NONBLOCK /* compat */ +#endif + +/* + * Flags used for copyfile(2) + */ + +#ifndef _POSIX_SOURCE +#define CPF_OVERWRITE 1 +#define CPF_IGNORE_MODE 2 +#define CPF_MASK (CPF_OVERWRITE|CPF_IGNORE_MODE) +#endif + +/* + * Constants used for fcntl(2) + */ + +/* command values */ +#define F_DUPFD 0 /* duplicate file descriptor */ +#define F_GETFD 1 /* get file descriptor flags */ +#define F_SETFD 2 /* set file descriptor flags */ +#define F_GETFL 3 /* get file status flags */ +#define F_SETFL 4 /* set file status flags */ +#ifndef _POSIX_SOURCE +#define F_GETOWN 5 /* get SIGIO/SIGURG proc/pgrp */ +#define F_SETOWN 6 /* set SIGIO/SIGURG proc/pgrp */ +#endif +#define F_GETLK 7 /* get record locking information */ +#define F_SETLK 8 /* set record locking information */ +#define F_SETLKW 9 /* F_SETLK; wait if blocked */ +#define F_PREALLOCATE 42 /* Preallocate storage */ +#define F_SETSIZE 43 /* Truncate a file without zeroing space */ +#define F_RDADVISE 44 /* Issue an advisory read async with no copy to user */ +#define F_RDAHEAD 45 /* turn read ahead off/on */ +#define F_READBOOTSTRAP 46 /* Read bootstrap from disk */ +#define F_WRITEBOOTSTRAP 47 /* Write bootstrap on disk */ +#define F_NOCACHE 48 /* turning data caching off/on */ +#define F_LOG2PHYS 49 /* file offset to device offset */ + +/* file descriptor flags (F_GETFD, F_SETFD) */ +#define FD_CLOEXEC 1 /* close-on-exec flag */ + +/* record locking flags (F_GETLK, F_SETLK, F_SETLKW) */ +#define F_RDLCK 1 /* shared or read lock */ +#define F_UNLCK 2 /* unlock */ +#define F_WRLCK 3 /* exclusive or write lock */ +#ifdef KERNEL +#define F_WAIT 0x010 /* Wait until lock is granted */ +#define F_FLOCK 0x020 /* Use flock(2) semantics for lock */ +#define F_POSIX 0x040 /* Use POSIX semantics for lock */ +#endif + +/* allocate flags (F_PREALLOCATE) */ + +#define F_ALLOCATECONTIG 0x00000002 /* allocate contigious space */ +#define F_ALLOCATEALL 0x00000004 /* allocate all requested space or no space at all */ + +/* Position Modes (fst_posmode) for F_PREALLOCATE */ + +#define F_PEOFPOSMODE 3 /* Make it past all of the SEEK pos modes so that */ + /* we can keep them in sync should we desire */ + +/* + * Advisory file segment locking data type - + * information passed to system by user + */ +struct flock { + off_t l_start; /* starting offset */ + off_t l_len; /* len = 0 means until end of file */ + pid_t l_pid; /* lock owner */ + short l_type; /* lock type: read/write, etc. */ + short l_whence; /* type of l_start */ +}; + + +/* + * advisory file read data type - + * information passed by user to system + */ +struct radvisory { + off_t ra_offset; + int ra_count; +}; + + +#ifndef _POSIX_SOURCE +/* lock operations for flock(2) */ +#define LOCK_SH 0x01 /* shared file lock */ +#define LOCK_EX 0x02 /* exclusive file lock */ +#define LOCK_NB 0x04 /* don't block when locking */ +#define LOCK_UN 0x08 /* unlock file */ +#endif + +/* fstore_t type used by F_DEALLCOATE and F_PREALLCOATE commands */ + +typedef struct fstore { + u_int32_t fst_flags; /* IN: flags word */ + int fst_posmode; /* IN: indicates use of offset field */ + off_t fst_offset; /* IN: start of the region */ + off_t fst_length; /* IN: size of the region */ + off_t fst_bytesalloc; /* OUT: number of bytes allocated */ +} fstore_t; + +/* fbootstraptransfer_t used by F_READBOOTSTRAP and F_WRITEBOOTSTRAP commands */ + +typedef struct fbootstraptransfer { + off_t fbt_offset; /* IN: offset to start read/write */ + size_t fbt_length; /* IN: number of bytes to transfer */ + void *fbt_buffer; /* IN: buffer to be read/written */ +} fbootstraptransfer_t; + +/* + * For F_LOG2PHYS this information is passed back to user + * Currently only devoffset is returned - that is the VOP_BMAP + * result - the disk device address corresponding to the + * current file offset (likely set with an lseek). + * + * The flags could hold an indication of whether the # of + * contiguous bytes reflects the true extent length on disk, + * or is an advisory value that indicates there is at least that + * many bytes contiguous. For some filesystems it might be too + * inefficient to provide anything beyond the advisory value. + * Flags and contiguous bytes return values are not yet implemented. + * For them the fcntl will nedd to switch from using BMAP to CMAP + * and a per filesystem type flag will be needed to interpret the + * contiguous bytes count result from CMAP. + */ +struct log2phys { + u_int32_t l2p_flags; /* unused so far */ + off_t l2p_contigbytes; /* unused so far */ + off_t l2p_devoffset; /* bytes into device */ +}; + +#ifndef _POSIX_SOURCE +#define O_POPUP 0x80000000 /* force window to popup on open */ +#define O_ALERT 0x20000000 /* small, clean popup window */ +#endif + +#ifndef KERNEL +#include + +__BEGIN_DECLS +int open __P((const char *, int, ...)); +int creat __P((const char *, mode_t)); +int fcntl __P((int, int, ...)); +#ifndef _POSIX_SOURCE +int flock __P((int, int)); +#endif /* !_POSIX_SOURCE */ +__END_DECLS +#endif + +#endif /* !_SYS_FCNTL_H_ */ diff --git a/bsd/sys/file.h b/bsd/sys/file.h new file mode 100644 index 000000000..9ddd3affe --- /dev/null +++ b/bsd/sys/file.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)file.h 8.3 (Berkeley) 1/9/95 + */ + +#ifndef _SYS_FILE_H_ +#define _SYS_FILE_H_ + +#include +#include + +#ifdef KERNEL +#include +#include + +struct proc; +struct uio; + +/* + * Kernel descriptor table. + * One entry for each open kernel vnode and socket. + */ +struct file { + LIST_ENTRY(file) f_list;/* list of active files */ + short f_flag; /* see fcntl.h */ +#define DTYPE_VNODE 1 /* file */ +#define DTYPE_SOCKET 2 /* communications endpoint */ +#define DTYPE_PSXSHM 3 /* POSIX Shared memory */ +#define DTYPE_PSXSEM 4 /* POSIX Semaphores */ + short f_type; /* descriptor type */ + short f_count; /* reference count */ + short f_msgcount; /* references from message queue */ + struct ucred *f_cred; /* credentials associated with descriptor */ + struct fileops { + int (*fo_read) __P((struct file *fp, struct uio *uio, + struct ucred *cred)); + int (*fo_write) __P((struct file *fp, struct uio *uio, + struct ucred *cred)); + int (*fo_ioctl) __P((struct file *fp, u_long com, + caddr_t data, struct proc *p)); + int (*fo_select) __P((struct file *fp, int which, + struct proc *p)); + int (*fo_close) __P((struct file *fp, struct proc *p)); + } *f_ops; + off_t f_offset; + caddr_t f_data; /* vnode or socket */ +}; + +LIST_HEAD(filelist, file); +extern struct filelist filehead; /* head of list of open files */ +extern int maxfiles; /* kernel limit on number of open files */ +extern int nfiles; /* actual number of open files */ + +__BEGIN_DECLS +int fref __P((struct file *)); /* take a reference on file pointer */ +int frele __P((struct file *)); /* release a reference on file pointer */ +int fcount __P((struct file *)); /* returns the reference count */ +__END_DECLS + +#endif /* KERNEL */ + +#endif /* !_SYS_FILE_H_ */ diff --git a/bsd/sys/filedesc.h b/bsd/sys/filedesc.h new file mode 100644 index 000000000..a0f0bc75a --- /dev/null +++ b/bsd/sys/filedesc.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)filedesc.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_FILEDESC_H_ +#define _SYS_FILEDESC_H_ + +/* + * This structure is used for the management of descriptors. It may be + * shared by multiple processes. + * + * A process is initially started out with NDFILE descriptors [XXXstored within + * this structureXXX], selected to be enough for typical applications based on + * the historical limit of 20 open files (and the usage of descriptors by + * shells). If these descriptors are exhausted, a larger descriptor table + * may be allocated, up to a process' resource limit; [XXXthe internal arrays + * are then unusedXXX]. The initial expansion is set to NDEXTENT; each time + * it runs out, it is doubled until the resource limit is reached. NDEXTENT + * should be selected to be the biggest multiple of OFILESIZE (see below) + * that will fit in a power-of-two sized piece of memory. + */ +#define NDFILE 25 /* 125 bytes */ +#define NDEXTENT 50 /* 250 bytes in 256-byte alloc. */ + +struct filedesc { + struct file **fd_ofiles; /* file structures for open files */ + char *fd_ofileflags; /* per-process open file flags */ + struct vnode *fd_cdir; /* current directory */ + struct vnode *fd_rdir; /* root directory */ + int fd_nfiles; /* number of open files allocated */ + u_short fd_lastfile; /* high-water mark of fd_ofiles */ + u_short fd_freefile; /* approx. next free file */ + u_short fd_cmask; /* mask for file creation */ + u_short fd_refcnt; /* reference count */ +}; + +/* + * Per-process open flags. + */ +#define UF_EXCLOSE 0x01 /* auto-close on exec */ +#define UF_MAPPED 0x02 /* mapped from device */ +#define UF_RESERVED 0x04 /* open pending / in progress */ + +/* + * Storage required per open file descriptor. + */ +#define OFILESIZE (sizeof(struct file *) + sizeof(char)) + +#ifdef KERNEL +/* + * Kernel global variables and routines. + */ +extern int dupfdopen __P((struct filedesc *fdp, + int indx, int dfd, int mode, int error)); +extern int fdalloc __P((struct proc *p, int want, int *result)); +extern void fdrelse __P((struct proc *p, int fd)); +extern int fdavail __P((struct proc *p, int n)); +extern int fdgetf __P((struct proc *p, int fd, struct file **resultfp)); +#define fdfile(p, fd) \ + (&(p)->p_fd->fd_ofiles[(fd)]) +#define fdflags(p, fd) \ + (&(p)->p_fd->fd_ofileflags[(fd)]) +extern int falloc __P((struct proc *p, + struct file **resultfp, int *resultfd)); +extern void ffree __P((struct file *fp)); +extern struct filedesc *fdcopy __P((struct proc *p)); +extern void fdfree __P((struct proc *p)); +extern void fdexec __P((struct proc *p)); + +#endif /* KERNEL */ + +#endif /* !_SYS_FILEDESC_H_ */ diff --git a/bsd/sys/filio.h b/bsd/sys/filio.h new file mode 100644 index 000000000..5a29cc8d9 --- /dev/null +++ b/bsd/sys/filio.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)filio.h 8.1 (Berkeley) 3/28/94 + */ + +#ifndef _SYS_FILIO_H_ +#define _SYS_FILIO_H_ + +#include + +/* Generic file-descriptor ioctl's. */ +#define FIOCLEX _IO('f', 1) /* set close on exec on fd */ +#define FIONCLEX _IO('f', 2) /* remove close on exec */ +#define FIONREAD _IOR('f', 127, int) /* get # bytes to read */ +#define FIONBIO _IOW('f', 126, int) /* set/clear non-blocking i/o */ +#define FIOASYNC _IOW('f', 125, int) /* set/clear async i/o */ +#define FIOSETOWN _IOW('f', 124, int) /* set owner */ +#define FIOGETOWN _IOR('f', 123, int) /* get owner */ + +#endif /* !_SYS_FILIO_H_ */ diff --git a/bsd/sys/fsctl.h b/bsd/sys/fsctl.h new file mode 100644 index 000000000..63198c7ec --- /dev/null +++ b/bsd/sys/fsctl.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fsctl.h 8.6 (Berkeley) 3/28/94 + */ + +#ifndef _SYS_FSCTL_H_ +#define _SYS_FSCTL_H_ + +#include + +/* get size of mount info struct: */ +#define FSGETMOUNTINFOSIZE _IOR('m', 1, long) + +#ifndef KERNEL + +#include + +__BEGIN_DECLS +int fsctl __P((const char *, unsigned long, void *, unsigned long)); +__END_DECLS + +#endif /* !KERNEL */ +#endif /* !_SYS_FSCTL_H_ */ diff --git a/bsd/sys/gmon.h b/bsd/sys/gmon.h new file mode 100644 index 000000000..b368a1fd2 --- /dev/null +++ b/bsd/sys/gmon.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)gmon.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_GMON_H_ +#define _SYS_GMON_H_ + +/* + * Structure prepended to gmon.out profiling data file. + */ +struct gmonhdr { + u_long lpc; /* base pc address of sample buffer */ + u_long hpc; /* max pc address of sampled buffer */ + int ncnt; /* size of sample buffer (plus this header) */ + int version; /* version number */ + int profrate; /* profiling clock rate */ + int spare[3]; /* reserved */ +}; +#define GMONVERSION 0x00051879 + +/* + * histogram counters are unsigned shorts (according to the kernel). + */ +#define HISTCOUNTER unsigned short + +/* + * fraction of text space to allocate for histogram counters here, 1/2 + */ +#define HISTFRACTION 2 + +/* + * Fraction of text space to allocate for from hash buckets. + * The value of HASHFRACTION is based on the minimum number of bytes + * of separation between two subroutine call points in the object code. + * Given MIN_SUBR_SEPARATION bytes of separation the value of + * HASHFRACTION is calculated as: + * + * HASHFRACTION = MIN_SUBR_SEPARATION / (2 * sizeof(short) - 1); + * + * For example, on the VAX, the shortest two call sequence is: + * + * calls $0,(r0) + * calls $0,(r0) + * + * which is separated by only three bytes, thus HASHFRACTION is + * calculated as: + * + * HASHFRACTION = 3 / (2 * 2 - 1) = 1 + * + * Note that the division above rounds down, thus if MIN_SUBR_FRACTION + * is less than three, this algorithm will not work! + * + * In practice, however, call instructions are rarely at a minimal + * distance. Hence, we will define HASHFRACTION to be 2 across all + * architectures. This saves a reasonable amount of space for + * profiling data structures without (in practice) sacrificing + * any granularity. + */ +#define HASHFRACTION 2 + +/* + * percent of text space to allocate for tostructs with a minimum. + */ +#define ARCDENSITY 2 +#define MINARCS 50 +#define MAXARCS ((1 << (8 * sizeof(HISTCOUNTER))) - 2) + +struct tostruct { + u_long selfpc; + long count; + u_short link; + u_short order; +}; + +/* + * a raw arc, with pointers to the calling site and + * the called site and a count. + */ +struct rawarc { + u_long raw_frompc; + u_long raw_selfpc; + long raw_count; +}; + +/* + * general rounding functions. + */ +#define ROUNDDOWN(x,y) (((x)/(y))*(y)) +#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) + +/* + * The profiling data structures are housed in this structure. + */ +struct gmonparam { + int state; + u_short *kcount; + u_long kcountsize; + u_short *froms; + u_long fromssize; + struct tostruct *tos; + u_long tossize; + long tolimit; + u_long lowpc; + u_long highpc; + u_long textsize; + u_long hashfraction; +}; +extern struct gmonparam _gmonparam; + +/* + * Possible states of profiling. + */ +#define GMON_PROF_ON 0 +#define GMON_PROF_BUSY 1 +#define GMON_PROF_ERROR 2 +#define GMON_PROF_OFF 3 + +/* + * Sysctl definitions for extracting profiling information from the kernel. + */ +#define GPROF_STATE 0 /* int: profiling enabling variable */ +#define GPROF_COUNT 1 /* struct: profile tick count buffer */ +#define GPROF_FROMS 2 /* struct: from location hash bucket */ +#define GPROF_TOS 3 /* struct: destination/count structure */ +#define GPROF_GMONPARAM 4 /* struct: profiling parameters (see above) */ + +/* + * In order to support more information than in the original mon.out and + * gmon.out files there is an alternate gmon.out file format. The alternate + * gmon.out file format starts with a magic number then separates the + * information with gmon_data structs. + */ +#define GMON_MAGIC 0xbeefbabe +struct gmon_data { + unsigned long type; /* constant for type of data following this struct */ + unsigned long size; /* size in bytes of the data following this struct */ +}; + +/* + * The GMONTYPE_SAMPLES gmon_data.type is for the histogram counters described + * above and has a struct gmonhdr followed by the counters. + */ +#define GMONTYPE_SAMPLES 1 +/* + * The GMONTYPE_RAWARCS gmon_data.type is for the raw arcs described above. + */ +#define GMONTYPE_RAWARCS 2 +/* + * The GMONTYPE_ARCS_ORDERS gmon_data.type is for the raw arcs with a call + * order field. The order is the order is a sequence number for the order each + * call site was executed. Raw_order values start at 1 not zero. Other than + * the raw_order field this is the same information as in the struct rawarc. + */ +#define GMONTYPE_ARCS_ORDERS 3 +struct rawarc_order { + unsigned long raw_frompc; + unsigned long raw_selfpc; + unsigned long raw_count; + unsigned long raw_order; +}; +/* + * The GMONTYPE_DYLD_STATE gmon_data.type is for the dynamic link editor state + * of the program. + * The informations starts with an unsigned long with the count of states: + * image_count + * Then each state follows in the file. The state is made up of + * image_header (the address where dyld loaded this image) + * vmaddr_slide (the amount dyld slid this image from it's vmaddress) + * name (the file name dyld loaded this image from) + */ +#define GMONTYPE_DYLD_STATE 4 +#endif /* !_SYS_GMON_H_ */ diff --git a/bsd/sys/ioccom.h b/bsd/sys/ioccom.h new file mode 100644 index 000000000..00e87d9aa --- /dev/null +++ b/bsd/sys/ioccom.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ioccom.h 8.2 (Berkeley) 3/28/94 + */ + +#ifndef _SYS_IOCCOM_H_ +#define _SYS_IOCCOM_H_ + +/* + * Ioctl's have the command encoded in the lower word, and the size of + * any in or out parameters in the upper word. The high 3 bits of the + * upper word are used to encode the in/out status of the parameter. + */ +#define IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */ +#define IOCPARM_LEN(x) (((x) >> 16) & IOCPARM_MASK) +#define IOCBASECMD(x) ((x) & ~(IOCPARM_MASK << 16)) +#define IOCGROUP(x) (((x) >> 8) & 0xff) + +#define IOCPARM_MAX (IOCPARM_MASK + 1) /* max size of ioctl args */ + /* no parameters */ +#define IOC_VOID (unsigned long)0x20000000 + /* copy parameters out */ +#define IOC_OUT (unsigned long)0x40000000 + /* copy parameters in */ +#define IOC_IN (unsigned long)0x80000000 + /* copy paramters in and out */ +#define IOC_INOUT (IOC_IN|IOC_OUT) + /* mask for IN/OUT/VOID */ +#define IOC_DIRMASK (unsigned long)0xe0000000 + +#define _IOC(inout,group,num,len) \ + (inout | ((len & IOCPARM_MASK) << 16) | ((group) << 8) | (num)) +#define _IO(g,n) _IOC(IOC_VOID, (g), (n), 0) +#define _IOR(g,n,t) _IOC(IOC_OUT, (g), (n), sizeof(t)) +#define _IOW(g,n,t) _IOC(IOC_IN, (g), (n), sizeof(t)) +/* this should be _IORW, but stdio got there first */ +#define _IOWR(g,n,t) _IOC(IOC_INOUT, (g), (n), sizeof(t)) + +#endif /* !_SYS_IOCCOM_H_ */ diff --git a/bsd/sys/ioctl.h b/bsd/sys/ioctl.h new file mode 100644 index 000000000..a9dfad6b3 --- /dev/null +++ b/bsd/sys/ioctl.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ioctl.h 8.6 (Berkeley) 3/28/94 + */ + +#ifndef _SYS_IOCTL_H_ +#define _SYS_IOCTL_H_ + +#include + +/* + * Pun for SunOS prior to 3.2. SunOS 3.2 and later support TIOCGWINSZ + * and TIOCSWINSZ (yes, even 3.2-3.5, the fact that it wasn't documented + * nonwithstanding). + */ +struct ttysize { + unsigned short ts_lines; + unsigned short ts_cols; + unsigned short ts_xxx; + unsigned short ts_yyy; +}; +#define TIOCGSIZE TIOCGWINSZ +#define TIOCSSIZE TIOCSWINSZ + +#include + +#include +#include + +#ifndef KERNEL + +#include + +__BEGIN_DECLS +int ioctl __P((int, unsigned long, ...)); +__END_DECLS +#endif /* !KERNEL */ +#endif /* !_SYS_IOCTL_H_ */ + +/* + * Keep outside _SYS_IOCTL_H_ + * Compatability with old terminal driver + * + * Source level -> #define USE_OLD_TTY + * Kernel level -> options COMPAT_43 or COMPAT_SUNOS + */ +#if defined(USE_OLD_TTY) || COMPAT_43 || defined(COMPAT_SUNOS) || \ + defined(COMPAT_SVR4) || defined(COMPAT_NEXT_3X) +#include +#endif /* !_SYS_IOCTL_H_ */ diff --git a/bsd/sys/ioctl_compat.h b/bsd/sys/ioctl_compat.h new file mode 100644 index 000000000..9ff3a2bc1 --- /dev/null +++ b/bsd/sys/ioctl_compat.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ioctl_compat.h 8.4 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_IOCTL_COMPAT_H_ +#define _SYS_IOCTL_COMPAT_H_ + +#include +#include + +struct tchars { + char t_intrc; /* interrupt */ + char t_quitc; /* quit */ + char t_startc; /* start output */ + char t_stopc; /* stop output */ + char t_eofc; /* end-of-file */ + char t_brkc; /* input delimiter (like nl) */ +}; + +struct ltchars { + char t_suspc; /* stop process signal */ + char t_dsuspc; /* delayed stop process signal */ + char t_rprntc; /* reprint line */ + char t_flushc; /* flush output (toggles) */ + char t_werasc; /* word erase */ + char t_lnextc; /* literal next character */ +}; + +/* + * Structure for TIOCGETP and TIOCSETP ioctls. + */ +#ifndef _SGTTYB_ +#define _SGTTYB_ +struct sgttyb { + char sg_ispeed; /* input speed */ + char sg_ospeed; /* output speed */ + char sg_erase; /* erase character */ + char sg_kill; /* kill character */ + short sg_flags; /* mode flags */ +}; +#endif + +#ifdef USE_OLD_TTY +# undef TIOCGETD +# define TIOCGETD _IOR('t', 0, int) /* get line discipline */ +# undef TIOCSETD +# define TIOCSETD _IOW('t', 1, int) /* set line discipline */ +#else +# define OTIOCGETD _IOR('t', 0, int) /* get line discipline */ +# define OTIOCSETD _IOW('t', 1, int) /* set line discipline */ +#endif +#define TIOCHPCL _IO('t', 2) /* hang up on last close */ +#define TIOCGETP _IOR('t', 8,struct sgttyb)/* get parameters -- gtty */ +#define TIOCSETP _IOW('t', 9,struct sgttyb)/* set parameters -- stty */ +#define TIOCSETN _IOW('t',10,struct sgttyb)/* as above, but no flushtty*/ +#define TIOCSETC _IOW('t',17,struct tchars)/* set special characters */ +#define TIOCGETC _IOR('t',18,struct tchars)/* get special characters */ +#define TANDEM 0x00000001 /* send stopc on out q full */ +#define CBREAK 0x00000002 /* half-cooked mode */ +#define LCASE 0x00000004 /* simulate lower case */ +#define ECHO 0x00000008 /* echo input */ +#define CRMOD 0x00000010 /* map \r to \r\n on output */ +#define RAW 0x00000020 /* no i/o processing */ +#define ODDP 0x00000040 /* get/send odd parity */ +#define EVENP 0x00000080 /* get/send even parity */ +#define ANYP 0x000000c0 /* get any parity/send none */ +#define NLDELAY 0x00000300 /* \n delay */ +#define NL0 0x00000000 +#define NL1 0x00000100 /* tty 37 */ +#define NL2 0x00000200 /* vt05 */ +#define NL3 0x00000300 +#define TBDELAY 0x00000c00 /* horizontal tab delay */ +#define TAB0 0x00000000 +#define TAB1 0x00000400 /* tty 37 */ +#define TAB2 0x00000800 +#define XTABS 0x00000c00 /* expand tabs on output */ +#define CRDELAY 0x00003000 /* \r delay */ +#define CR0 0x00000000 +#define CR1 0x00001000 /* tn 300 */ +#define CR2 0x00002000 /* tty 37 */ +#define CR3 0x00003000 /* concept 100 */ +#define VTDELAY 0x00004000 /* vertical tab delay */ +#define FF0 0x00000000 +#define FF1 0x00004000 /* tty 37 */ +#define BSDELAY 0x00008000 /* \b delay */ +#define BS0 0x00000000 +#define BS1 0x00008000 +#define ALLDELAY (NLDELAY|TBDELAY|CRDELAY|VTDELAY|BSDELAY) +#define CRTBS 0x00010000 /* do backspacing for crt */ +#define PRTERA 0x00020000 /* \ ... / erase */ +#define CRTERA 0x00040000 /* " \b " to wipe out char */ +#define TILDE 0x00080000 /* hazeltine tilde kludge */ +#define MDMBUF 0x00100000 /*start/stop output on carrier*/ +#define LITOUT 0x00200000 /* literal output */ +#define TOSTOP 0x00400000 /*SIGSTOP on background output*/ +#define FLUSHO 0x00800000 /* flush output to terminal */ +#define NOHANG 0x01000000 /* (no-op) was no SIGHUP on carrier drop */ +#define L001000 0x02000000 +#define CRTKIL 0x04000000 /* kill line with " \b " */ +#define PASS8 0x08000000 +#define CTLECH 0x10000000 /* echo control chars as ^X */ +#define PENDIN 0x20000000 /* tp->t_rawq needs reread */ +#define DECCTQ 0x40000000 /* only ^Q starts after ^S */ +#define NOFLSH 0x80000000 /* no output flush on signal */ +#define TIOCLBIS _IOW('t', 127, int) /* bis local mode bits */ +#define TIOCLBIC _IOW('t', 126, int) /* bic local mode bits */ +#define TIOCLSET _IOW('t', 125, int) /* set entire local mode word */ +#define TIOCLGET _IOR('t', 124, int) /* get local modes */ +#define LCRTBS (CRTBS>>16) +#define LPRTERA (PRTERA>>16) +#define LCRTERA (CRTERA>>16) +#define LTILDE (TILDE>>16) +#define LMDMBUF (MDMBUF>>16) +#define LLITOUT (LITOUT>>16) +#define LTOSTOP (TOSTOP>>16) +#define LFLUSHO (FLUSHO>>16) +#define LNOHANG (NOHANG>>16) +#define LCRTKIL (CRTKIL>>16) +#define LPASS8 (PASS8>>16) +#define LCTLECH (CTLECH>>16) +#define LPENDIN (PENDIN>>16) +#define LDECCTQ (DECCTQ>>16) +#define LNOFLSH (NOFLSH>>16) +#define TIOCSLTC _IOW('t',117,struct ltchars)/* set local special chars*/ +#define TIOCGLTC _IOR('t',116,struct ltchars)/* get local special chars*/ +#define OTIOCCONS _IO('t', 98) /* for hp300 -- sans int arg */ +#define OTTYDISC 0 +#define NETLDISC 1 +#define NTTYDISC 2 + +#define TIOCGSID _IOR('t', 99, int) /* For svr4 -- get session id */ + +#endif /* !_SYS_IOCTL_COMPAT_H_ */ diff --git a/bsd/sys/ipc.h b/bsd/sys/ipc.h new file mode 100644 index 000000000..2a3f3169a --- /dev/null +++ b/bsd/sys/ipc.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ipc.h 8.4 (Berkeley) 2/19/95 + */ + +/* + * SVID compatible ipc.h file + */ +#ifndef _SYS_IPC_H_ +#define _SYS_IPC_H_ + +struct ipc_perm { + ushort cuid; /* creator user id */ + ushort cgid; /* creator group id */ + ushort uid; /* user id */ + ushort gid; /* group id */ + ushort mode; /* r/w permission */ + ushort seq; /* sequence # (to generate unique msg/sem/shm id) */ + key_t key; /* user specified msg/sem/shm key */ +}; + +/* common mode bits */ +#define IPC_R 000400 /* read permission */ +#define IPC_W 000200 /* write/alter permission */ +#define IPC_M 010000 /* permission to change control info */ + +/* SVID required constants (same values as system 5) */ +#define IPC_CREAT 001000 /* create entry if key does not exist */ +#define IPC_EXCL 002000 /* fail if key exists */ +#define IPC_NOWAIT 004000 /* error if request must wait */ + +#define IPC_PRIVATE (key_t)0 /* private key */ + +#define IPC_RMID 0 /* remove identifier */ +#define IPC_SET 1 /* set options */ +#define IPC_STAT 2 /* get options */ + +#ifdef KERNEL +/* Macros to convert between ipc ids and array indices or sequence ids */ +#define IPCID_TO_IX(id) ((id) & 0xffff) +#define IPCID_TO_SEQ(id) (((id) >> 16) & 0xffff) +#define IXSEQ_TO_IPCID(ix,perm) (((perm.seq) << 16) | (ix & 0xffff)) + +struct ucred; + +int ipcperm __P((struct ucred *, struct ipc_perm *, int)); +#else /* ! KERNEL */ + +/* XXX doesn't really belong here, but has been historical practice in SysV. */ + +#include + +__BEGIN_DECLS +key_t ftok __P((const char *, int)); +__END_DECLS + +#endif /* KERNEL */ + +#endif /* !_SYS_IPC_H_ */ diff --git a/bsd/sys/kdebug.h b/bsd/sys/kdebug.h new file mode 100644 index 000000000..b242cecb4 --- /dev/null +++ b/bsd/sys/kdebug.h @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Copyright (c) 1997 Apple Computer, Inc. All rights reserved. + * + * kdebug.h - kernel_debug definitions + * + */ + +#ifndef BSD_SYS_KDEBUG_H +#define BSD_SYS_KDEBUG_H + +#include +__BEGIN_DECLS + +#include +#if defined(KERNEL_BUILD) +#include +#endif /* KERNEL_BUILD */ + +/* + * types of faults that vm_fault handles + * and creates trace entries for + */ +#define DBG_ZERO_FILL_FAULT 1 +#define DBG_PAGEIN_FAULT 2 +#define DBG_COW_FAULT 3 +#define DBG_CACHE_HIT_FAULT 4 + + +/* The debug code consists of the following +* +* ---------------------------------------------------------------------- +*| | | |Func | +*| Class (8) | SubClass (8) | Code (14) |Qual(2)| +* ---------------------------------------------------------------------- +* The class specifies the higher level +*/ + +/* The Function qualifiers */ +#define DBG_FUNC_START 1 +#define DBG_FUNC_END 2 +#define DBG_FUNC_NONE 0 + + +/* The Kernel Debug Classes */ +#define DBG_MACH 1 +#define DBG_NETWORK 2 +#define DBG_FSYSTEM 3 +#define DBG_BSD 4 +#define DBG_IOKIT 5 +#define DBG_DRIVERS 6 +#define DBG_TRACE 7 +#define DBG_DLIL 8 +#define DBG_MISC 20 +#define DBG_MIG 255 + +/* **** The Kernel Debug Sub Classes for Mach (DBG_MACH) **** */ +#define DBG_MACH_EXCP_DFLT 0x03 /* Data Translation Fault */ +#define DBG_MACH_EXCP_IFLT 0x04 /* Inst Translation Fault */ +#define DBG_MACH_EXCP_INTR 0x05 /* Interrupts */ +#define DBG_MACH_EXCP_ALNG 0x06 /* Alignment Exception */ +#define DBG_MACH_EXCP_TRAP 0x07 /* Traps */ +#define DBG_MACH_EXCP_FP 0x08 /* FP Unavail */ +#define DBG_MACH_EXCP_DECI 0x09 /* Decrementer Interrupt */ +#define DBG_MACH_EXCP_SC 0x0C /* System Calls */ +#define DBG_MACH_EXCP_TRACE 0x0D /* Trace exception */ +#define DBG_MACH_IHDLR 0x10 /* Interrupt Handlers */ +#define DBG_MACH_IPC 0x20 /* Inter Process Comm */ +#define DBG_MACH_VM 0x30 /* Virtual Memory */ +#define DBG_MACH_SCHED 0x40 /* Scheduler */ +#define DBG_MACH_MSGID_INVALID 0x50 /* Messages - invalid */ + +/* Codes for Scheduler (DBG_MACH_SCHED) */ +#define MACH_SCHED 0x0 /* Scheduler */ +#define MACH_STACK_ATTACH 0x1 /* stack_attach() */ +#define MACH_STACK_HANDOFF 0x2 /* stack_handoff() */ +#define MACH_CALL_CONT 0x3 /* call_continuation() */ +#define MACH_CALLOUT 0x4 /* callouts */ +#define MACH_STACK_DETACH 0x5 + +/* **** The Kernel Debug Sub Classes for Network (DBG_NETWORK) **** */ +#define DBG_NETIP 1 /* Internet Protocol */ +#define DBG_NETARP 2 /* Address Resolution Protocol */ +#define DBG_NETUDP 3 /* User Datagram Protocol */ +#define DBG_NETTCP 4 /* Transmission Control Protocol */ +#define DBG_NETICMP 5 /* Internet Control Message Protocol */ +#define DBG_NETIGMP 6 /* Internet Group Management Protocol */ +#define DBG_NETRIP 7 /* Routing Information Protocol */ +#define DBG_NETOSPF 8 /* Open Shortest Path First */ +#define DBG_NETISIS 9 /* Intermediate System to Intermediate System */ +#define DBG_NETSNMP 10 /* Simple Network Management Protocol */ +#define DBG_NETSOCK 11 /* Socket Layer */ + +/* For Apple talk */ +#define DBG_NETAARP 100 /* Apple ARP */ +#define DBG_NETDDP 101 /* Datagram Delivery Protocol */ +#define DBG_NETNBP 102 /* Name Binding Protocol */ +#define DBG_NETZIP 103 /* Zone Information Protocol */ +#define DBG_NETADSP 104 /* Name Binding Protocol */ +#define DBG_NETATP 105 /* Apple Transaction Protocol */ +#define DBG_NETASP 106 /* Apple Session Protocol */ +#define DBG_NETAFP 107 /* Apple Filing Protocol */ +#define DBG_NETRTMP 108 /* Routing Table Maintenance Protocol */ +#define DBG_NETAURP 109 /* Apple Update Routing Protocol */ + +/* **** The Kernel Debug Sub Classes for IOKIT (DBG_IOKIT) **** */ +#define DBG_IOSCSI 1 /* SCSI */ +#define DBG_IODISK 2 /* Disk layers */ +#define DBG_IONETWORK 3 /* Network layers */ +#define DBG_IOKEYBOARD 4 /* Keyboard */ +#define DBG_IOPOINTING 5 /* Pointing Devices */ +#define DBG_IOAUDIO 6 /* Audio */ +#define DBG_IOFLOPPY 7 /* Floppy */ +#define DBG_IOSERIAL 8 /* Serial */ +#define DBG_IOTTY 9 /* TTY layers */ +#define DBG_IOWORKLOOP 10 /* Work from work loop */ +#define DBG_IOINTES 11 /* Interrupt event source */ +#define DBG_IOCLKES 12 /* Clock event source */ +#define DBG_IOCMDQ 13 /* Command queue latencies */ +#define DBG_IOMCURS 14 /* Memory Cursor */ +#define DBG_IOMDESC 15 /* Memory Descriptors */ + +/* **** The Kernel Debug Sub Classes for Device Drivers (DBG_DRIVERS) **** */ +#define DBG_DRVSCSI 1 /* SCSI */ +#define DBG_DRVDISK 2 /* Disk layers */ +#define DBG_DRVNETWORK 3 /* Network layers */ +#define DBG_DRVKEYBOARD 4 /* Keyboard */ +#define DBG_DRVPOINTING 5 /* Pointing Devices */ +#define DBG_DRVAUDIO 6 /* Audio */ +#define DBG_DRVFLOPPY 7 /* Floppy */ +#define DBG_DRVSERIAL 8 /* Serial */ +#define DBG_DRVSPLT 9 + +/* **** The Kernel Debug Sub Classes for the DLIL Layer (DBG_DLIL) **** */ +#define DBG_DLIL_STATIC 1 /* Static DLIL code */ +#define DBG_DLIL_PR_MOD 2 /* DLIL Protocol Module */ +#define DBG_DLIL_IF_MOD 3 /* DLIL Interface Module */ +#define DBG_DLIL_PR_FLT 4 /* DLIL Protocol Filter */ +#define DBG_DLIL_IF_FLT 5 /* DLIL Interface FIlter */ + +/* The Kernel Debug Sub Classes for File System */ +#define DBG_FSRW 1 /* reads and writes to the filesystem */ + +/* The Kernel Debug Sub Classes for BSD */ +#define DBG_BSD_EXCP_SC 0x0C /* System Calls */ + +/* The Kernel Debug Sub Classes for DBG_TRACE */ +#define DBG_TRACE_DATA 0 +#define DBG_TRACE_STRING 1 + +/**********************************************************************/ + +#define KDBG_CODE(Class, SubClass, code) (((Class & 0xff) << 24) | ((SubClass & 0xff) << 16) | ((code & 0x3fff) << 2)) + +#define KDBG_MIGCODE(msgid) ((DBG_MIG << 24) | (((msgid) & 0x3fffff) << 2)) + +#define MACHDBG_CODE(SubClass, code) KDBG_CODE(DBG_MACH, SubClass, code) +#define NETDBG_CODE(SubClass, code) KDBG_CODE(DBG_NETWORK, SubClass, code) +#define FSDBG_CODE(SubClass, code) KDBG_CODE(DBG_FSYSTEM, SubClass, code) +#define BSDDBG_CODE(SubClass, code) KDBG_CODE(DBG_BSD, SubClass, code) +#define IOKDBG_CODE(SubClass, code) KDBG_CODE(DBG_IOKIT, SubClass, code) +#define DRVDBG_CODE(SubClass, code) KDBG_CODE(DBG_DRIVERS, SubClass, code) +#define TRACEDBG_CODE(SubClass,code) KDBG_CODE(DBG_TRACE, SubClass, code) +#define MISCDBG_CODE(SubClass,code) KDBG_CODE(DBG_MISC, SubClass, code) +#define DLILDBG_CODE(SubClass,code) KDBG_CODE(DBG_DLIL, SubClass, code) + +/* Usage: +* kernel_debug((KDBG_CODE(DBG_NETWORK, DNET_PROTOCOL, 51) | DBG_FUNC_START), +* offset, 0, 0, 0,0) +* +* For ex, +* +* #include +* +* #define DBG_NETIPINIT NETDBG_CODE(DBG_NETIP,1) +* +* +* void +* ip_init() +* { +* register struct protosw *pr; +* register int i; +* +* KERNEL_DEBUG(DBG_NETIPINIT | DBG_FUNC_START, 0,0,0,0,0) +* -------- +* KERNEL_DEBUG(DBG_NETIPINIT, 0,0,0,0,0) +* -------- +* KERNEL_DEBUG(DBG_NETIPINIT | DBG_FUNC_END, 0,0,0,0,0) +* } +* + +*/ + +extern unsigned int kdebug_enable; +#define KERNEL_DEBUG_CONSTANT(x,a,b,c,d,e) \ +do { \ + if (kdebug_enable) \ + kernel_debug(x,a,b,c,d,e); \ +} while(0) + +extern void kernel_debug(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3, unsigned int arg4, unsigned int arg5); + +extern void kernel_debug1(unsigned int debugid, unsigned int arg1, unsigned int arg2, unsigned int arg3, unsigned int arg4, unsigned int arg5); + +#if KDEBUG + +#define KERNEL_DEBUG(x,a,b,c,d,e) \ +do { \ + if (kdebug_enable) \ + kernel_debug(x,a,b,c,d,e); \ +} while(0) + +#define KERNEL_DEBUG1(x,a,b,c,d,e) \ +do { \ + if (kdebug_enable) \ + kernel_debug1(x,a,b,c,d,e); \ +} while(0) + +#else + +#define KERNEL_DEBUG(x,a,b,c,d,e) +#define KERNEL_DEBUG1(x,a,b,c,d,e) + +#endif + +__END_DECLS + + +#ifdef KERNEL_PRIVATE +/* + * private kernel_debug definitions + */ + +typedef struct { +mach_timespec_t timestamp; +unsigned int arg1; +unsigned int arg2; +unsigned int arg3; +unsigned int arg4; +unsigned int arg5; /* will hold current thread */ +unsigned int debugid; +} kd_buf; + +#define KDBG_THREAD_MASK 0x7fffffff +#define KDBG_CPU_MASK 0x80000000 + +/* Debug Flags */ +#define KDBG_INIT 0x1 +#define KDBG_NOWRAP 0x2 +#define KDBG_FREERUN 0x4 +#define KDBG_WRAPPED 0x8 +#define KDBG_USERFLAGS (KDBG_FREERUN|KDBG_NOWRAP|KDBG_INIT) +#define KDBG_PIDCHECK 0x10 +#define KDBG_MAPINIT 0x20 +#define KDBG_PIDEXCLUDE 0x40 + +typedef struct { + unsigned int type; + unsigned int value1; + unsigned int value2; + unsigned int value3; + unsigned int value4; + +} kd_regtype; + +typedef struct +{ + int nkdbufs; + int nolog; + int flags; + int nkdthreads; +} kbufinfo_t; + +typedef struct +{ + unsigned int thread; + int valid; + char command[20]; +} kd_threadmap; + +#define KDBG_CLASSTYPE 0x10000 +#define KDBG_SUBCLSTYPE 0x20000 +#define KDBG_RANGETYPE 0x40000 +#define KDBG_TYPENONE 0x80000 +#define KDBG_CKTYPES 0xF0000 + +#define KDBG_RANGECHECK 0x100000 +#define KDBG_VALCHECK 0x200000 /* Check up to 4 individual values */ + +#define KDBG_BUFINIT 0x80000000 + +/* Control operations */ +#define KDBG_EFLAGS 1 +#define KDBG_DFLAGS 2 +#define KDBG_ENABLE 3 +#define KDBG_SETNUMBUF 4 +#define KDBG_GETNUMBUF 5 +#define KDBG_SETUP 6 +#define KDBG_REMOVE 7 +#define KDBG_SETREGCODE 8 +#define KDBG_GETREGCODE 9 +#define KDBG_READTRACE 10 +#define KDBG_PIDTR 11 +#define KDBG_THRMAP 12 +#define KDBG_PIDEX 14 +#define KDBG_SETRTCDEC 15 + +/* Minimum value allowed when setting decrementer ticks */ +#define KDBG_MINRTCDEC 2500 + + +/* PCSAMPLES control operations */ +#define PCSAMPLE_DISABLE 1 +#define PCSAMPLE_SETNUMBUF 2 +#define PCSAMPLE_GETNUMBUF 3 +#define PCSAMPLE_SETUP 4 +#define PCSAMPLE_REMOVE 5 +#define PCSAMPLE_READBUF 6 +#define PCSAMPLE_SETREG 7 +#define PCSAMPLE_COMM 8 + +#define MAX_PCSAMPLES 1000000 /* Maximum number of pc's in a single buffer */ + + +extern unsigned int pcsample_enable; + +typedef struct +{ + int npcbufs; + int bufsize; + int enable; + unsigned long pcsample_beg; + unsigned long pcsample_end; +} pcinfo_t; + +#endif /* KERNEL_PRIVATE */ + +#endif /* !BSD_SYS_KDEBUG_H */ diff --git a/bsd/sys/kern_event.h b/bsd/sys/kern_event.h new file mode 100644 index 000000000..962005cc2 --- /dev/null +++ b/bsd/sys/kern_event.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ + + +#ifndef SYS_KERN_EVENT_H +#define SYS_KERN_EVENT_H + +#define SYSPROTO_EVENT 1 + +#include + +#define KEVENTS_ON 1 +#define KEV_SNDSPACE (4 * 1024) +#define KEV_RECVSPACE (8 * 1024) + +#define KEV_ANY_VENDOR 0 +#define KEV_ANY_CLASS 0 +#define KEV_ANY_SUBCLASS 0 + +/* + * Vendor Code + */ + +#define KEV_VENDOR_APPLE 1 + + +/* + * Definition of top-level classifications + */ + +#define KEV_NETWORK_CLASS 1 +#define KEV_IOKIT_CLASS 2 + + +struct kern_event_msg { + u_long total_size; /* Size of entire event msg */ + u_long vendor_code; /* For non-Apple extensibility */ + u_long kev_class; /* Layer of event source */ + u_long kev_subclass; /* Component within layer */ + u_long id; /* Monotonically increasing value */ + u_long event_code; /* unique code */ + u_long event_data[1]; /* One or more data longwords */ + +}; + +#define KEV_MSG_HEADER_SIZE (6 * sizeof(u_long)) + + +struct kev_request { + u_long vendor_code; + u_long kev_class; + u_long kev_subclass; +}; + +#define SIOCGKEVID _IOR('e', 1, u_long) +#define SIOCSKEVFILT _IOW('e', 2, struct kev_request) +#define SIOCGKEVFILT _IOR('e', 3, struct kev_request) + +#ifdef KERNEL + +#define N_KEV_VECTORS 5 + +struct kev_d_vectors { + + u_long data_length; /* Length of the event data */ + void *data_ptr; /* Pointer to event data */ +}; + + +struct kev_msg { + u_long vendor_code; /* For non-Apple extensibility */ + u_long kev_class; /* Layer of event source */ + u_long kev_subclass; /* Component within layer */ + u_long event_code; /* The event code */ + struct kev_d_vectors dv[N_KEV_VECTORS]; /* Up to n data vectors */ +}; + + + +LIST_HEAD(kern_event_head, kern_event_pcb); + +struct kern_event_pcb { + LIST_ENTRY(kern_event_pcb) ev_link; /* glue on list of all PCBs */ + struct socket *ev_socket; /* pointer back to socket */ + u_long vendor_code_filter; + u_long class_filter; + u_long subclass_filter; +}; + +#define sotoevpcb(so) ((struct kern_event_pcb *)((so)->so_pcb)) + + +int kev_post_msg(struct kev_msg *event); + +#endif + +#endif diff --git a/bsd/sys/kernel.h b/bsd/sys/kernel.h new file mode 100644 index 000000000..91e4c30d7 --- /dev/null +++ b/bsd/sys/kernel.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kernel.h 8.3 (Berkeley) 1/21/94 + */ +#ifndef _SYS_KERNEL_H_ +#define _SYS_KERNEL_H_ + +#ifdef KERNEL + +#include + +/* Global variables for the kernel. */ + +/* 1.1 */ +extern long hostid; +extern char hostname[MAXHOSTNAMELEN]; +extern int hostnamelen; +extern char domainname[MAXHOSTNAMELEN]; +extern int domainnamelen; + +/* 1.2 */ +// extern volatile struct timeval mono_time; +extern struct timeval boottime; +extern struct timeval runtime; +extern volatile struct timeval time; +extern struct timezone tz; /* XXX */ + +extern int tick; /* usec per tick (1000000 / hz) */ +extern int tickfix; /* periodic tick adj. tick not integral */ +extern int tickfixinterval; /* interval at which to apply adjustment */ +extern int tickadj; /* "standard" clock skew, us./tick */ +extern int hz; /* system clock's frequency */ +extern int stathz; /* statistics clock's frequency */ +extern int profhz; /* profiling clock's frequency */ +extern int lbolt; /* once a second sleep address */ +#endif /* KERNEL */ +#endif /* !_SYS_KERNEL_H_ */ + diff --git a/bsd/sys/ktrace.h b/bsd/sys/ktrace.h new file mode 100644 index 000000000..c10ea539b --- /dev/null +++ b/bsd/sys/ktrace.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ktrace.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_KTRACE_H_ +#define _SYS_KTRACE_H_ + +/* + * operations to ktrace system call (KTROP(op)) + */ +#define KTROP_SET 0 /* set trace points */ +#define KTROP_CLEAR 1 /* clear trace points */ +#define KTROP_CLEARFILE 2 /* stop all tracing to file */ +#define KTROP(o) ((o)&3) /* macro to extract operation */ +/* + * flags (ORed in with operation) + */ +#define KTRFLAG_DESCEND 4 /* perform op on all children too */ + +/* + * ktrace record header + */ +struct ktr_header { + int ktr_len; /* length of buf */ + short ktr_type; /* trace record type */ + pid_t ktr_pid; /* process id */ + char ktr_comm[MAXCOMLEN+1]; /* command name */ + struct timeval ktr_time; /* timestamp */ + caddr_t ktr_buf; +}; + +/* + * Test for kernel trace point + */ +#define KTRPOINT(p, type) \ + (((p)->p_traceflag & ((1<<(type))|KTRFAC_ACTIVE)) == (1<<(type))) + +/* + * ktrace record types + */ + +/* + * KTR_SYSCALL - system call record + */ +#define KTR_SYSCALL 1 +struct ktr_syscall { + int ktr_code; /* syscall number */ + int ktr_argsize; /* size of arguments */ + /* + * followed by ktr_argsize/sizeof(register_t) "register_t"s + */ +}; + +/* + * KTR_SYSRET - return from system call record + */ +#define KTR_SYSRET 2 +struct ktr_sysret { + short ktr_code; + short ktr_eosys; + int ktr_error; + int ktr_retval; +}; + +/* + * KTR_NAMEI - namei record + */ +#define KTR_NAMEI 3 + /* record contains pathname */ + +/* + * KTR_GENIO - trace generic process i/o + */ +#define KTR_GENIO 4 +struct ktr_genio { + int ktr_fd; + enum uio_rw ktr_rw; + /* + * followed by data successfully read/written + */ +}; + +/* + * KTR_PSIG - trace processed signal + */ +#define KTR_PSIG 5 +struct ktr_psig { + int signo; + sig_t action; + int mask; + int code; +}; + +/* + * KTR_CSW - trace context switches + */ +#define KTR_CSW 6 +struct ktr_csw { + int out; /* 1 if switch out, 0 if switch in */ + int user; /* 1 if usermode (ivcsw), 0 if kernel (vcsw) */ +}; + +/* + * kernel trace points (in p_traceflag) + */ +#define KTRFAC_MASK 0x00ffffff +#define KTRFAC_SYSCALL (1< + +__BEGIN_DECLS +int ktrace __P((const char *, int, int, pid_t)); +__END_DECLS + +#endif /* !KERNEL */ +#endif /* !_SYS_KTRACE_H_ */ + diff --git a/bsd/sys/linker_set.h b/bsd/sys/linker_set.h new file mode 100644 index 000000000..e57d1b8d0 --- /dev/null +++ b/bsd/sys/linker_set.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1999 John D. Polstra + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef _SYS_LINKER_SET_H_ +#define _SYS_LINKER_SET_H_ + +/* + * The following macros are used to declare global sets of objects, which + * are collected by the linker into a `struct linker_set' as defined below. + * For ELF, this is done by constructing a separate segment for each set. + * For a.out, it is done automatically by the linker. + */ + +#define __ELF__ +#ifdef __ELF__ + +#define MAKE_SET(seg, set, sym) \ + static void const * const __set_##set##_sym_##sym = &sym; \ + __asm(".section seg, " #set ""); \ + __asm(".long " #sym); + +/* __asm(".previous") */ + + +#define TEXT_SET(set, sym) MAKE_SET(__TEXT, set, sym) +#define DATA_SET(set, sym) MAKE_SET(__DATA, set, sym) +#define BSS_SET(set, sym) MAKE_SET(__BSS, set, sym) +#define ABS_SET(set, sym) MAKE_SET(__ABS, set, sym) + +#else + +/* + * NB: the constants defined below must match those defined in + * nlist.h. Since their calculation requires arithmetic, we + * can't name them symbolically (e.g., 7 is N_DATA | N_EXT). + */ +#define MAKE_SET(set, sym, type) \ + static void const * const __set_##set##_sym_##sym = &sym; \ + __asm(".stabs \"_" #set "\", " #type ", 0, 0, _" #sym) + +#define TEXT_SET(set, sym) MAKE_SET(set, sym, 5) +#define DATA_SET(set, sym) MAKE_SET(set, sym, 7) +#define BSS_SET(set, sym) MAKE_SET(set, sym, 9) +#define ABS_SET(set, sym) MAKE_SET(set, sym, 3) + +#endif + +struct linker_set { + int ls_length; + const void *ls_items[1]; /* really ls_length of them, + * trailing NULL */ +}; + +#endif /* _SYS_LINKER_SET_H_ */ + diff --git a/bsd/sys/loadable_fs.h b/bsd/sys/loadable_fs.h new file mode 100644 index 000000000..5b57f06e5 --- /dev/null +++ b/bsd/sys/loadable_fs.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)loadable_fs.h 2.0 26/06/90 (c) 1990 NeXT */ + +/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + * + * W I L L D R A S T I C A L L Y C H A N G E S O O N + * U S E A T Y O U R O W N R I S K + * + * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ + + +/* + * loadable_fs.h - message struct for loading and initializing loadable + * file systems. + * + * HISTORY + * 26-Jun-90 Doug Mitchell at NeXT + * Created. + * 5-Nov-91 Lee Boynton at NeXT + * Added support for initialization, labels, and WSM options + * 29-Mar-99 A.Ramesh at Apple + * Added to Mac OS X, removed mach message related defns + */ + +#ifndef _LOADABLE_FS_ +#define _LOADABLE_FS_ + + +/* + * Constants for Loadabls FS Utilities (in "/System/Library/Filesystems") + * + * Example of a /usr/filesystems directory + * + * /usr/filesystems/dos.fs/dos.util utility with which WSM + * communicates + * /usr/filesystems/dos.fs/dos.name "DOS Floppy" + * /usr/filesystems/dos.fs/dos_reloc actual loadable filesystem + * /usr/filesystems/dos.fs/dos.openfs.tiff "open folder" icon + * /usr/filesystems/dos.fs/dos.fs.tiff "closed folder" icon + */ +#define FS_DIR_LOCATION "/System/Library/Filesystems" +#define FS_DIR_SUFFIX ".fs" +#define FS_UTIL_SUFFIX ".util" +#define FS_OPEN_SUFFIX ".openfs.tiff" +#define FS_CLOSED_SUFFIX ".fs.tiff" +#define FS_NAME_SUFFIX ".name" +#define FS_LABEL_SUFFIX ".label" + +/* + * .util program commands - all sent in the form "-p" or "-m" ... as argv[1]. + */ +#define FSUC_PROBE 'p' /* probe FS for mount or init */ + /* example usage: foo.util -p fd0 removable writable */ + +#define FSUC_PROBEFORINIT 'P' /* probe FS for init only */ + /* example usage: foo.util -P fd0 removable */ + +#define FSUC_MOUNT 'm' /* mount FS */ + /* example usage: foo.util -m fd0 /bar removable writable */ + +#define FSUC_REPAIR 'r' /* repair ('fsck') FS */ + /* example usage: foo.util -r fd0 removable */ + +#define FSUC_INITIALIZE 'i' /* initialize FS */ + /* example usage: foo.util -i fd0 removable */ + +#define FSUC_UNMOUNT 'u' /* unmount FS */ + /* example usage: foo.util -u fd0 /bar */ + +/* The following is not used by Workspace Manager */ +#define FSUC_MOUNT_FORCE 'M' /* like FSUC_MOUNT, but proceed even on + * error. */ +/* + * Return codes from .util program + */ +#define FSUR_RECOGNIZED (-1) /* response to FSUC_PROBE; implies that + * a mount is possible */ +#define FSUR_UNRECOGNIZED (-2) /* negative response to FSUC_PROBE */ +#define FSUR_IO_SUCCESS (-3) /* mount, unmount, repair succeeded */ +#define FSUR_IO_FAIL (-4) /* unrecoverable I/O error */ +#define FSUR_IO_UNCLEAN (-5) /* mount failed, file system not clean + */ +#define FSUR_INVAL (-6) /* invalid argument */ +#define FSUR_LOADERR (-7) /* kern_loader error */ +#define FSUR_INITRECOGNIZED (-8) /* response to FSUC_PROBE or + * FSUC_PROBEFORINIT, implies that + * initialization is possible */ + +/* + * mount parameters passed from WSM to the .util program. + */ +#define DEVICE_READONLY "readonly" +#define DEVICE_WRITABLE "writable" + +#define DEVICE_REMOVABLE "removable" +#define DEVICE_FIXED "fixed" + +/* + * Additional parameters to the mount command - used by WSM when they + * appear in the /etc/mtab file. + */ +#define MNTOPT_FS "filesystem=" /* e.g. "filesystem=DOS" */ +#define MNTOPT_REMOVABLE "removable" + +#endif /* _LOADABLE_FS_ */ diff --git a/bsd/sys/lock.h b/bsd/sys/lock.h new file mode 100644 index 000000000..0e9783854 --- /dev/null +++ b/bsd/sys/lock.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1995 + * The Regents of the University of California. All rights reserved. + * + * This code contains ideas from software contributed to Berkeley by + * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating + * System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)lock.h 8.12 (Berkeley) 5/19/95 + */ + +#ifndef _SYS_LOCK_H_ +#define _SYS_LOCK_H_ +#ifdef KERNEL + +#include +#include + +#if defined(simple_lock_init) +#undef simple_lock_init +#endif +#define simple_lock_init(l) usimple_lock_init((l),0) + +#else /* KERNEL */ + +#ifndef _MACHINE_SIMPLE_LOCK_DATA_ +#define _MACHINE_SIMPLE_LOCK_DATA_ + +#include + +#if defined(__ppc__) +struct slock{ + volatile unsigned int lock_data[10]; +}; +#else +struct slock{ + volatile unsigned int lock_data[9]; +}; +#endif +typedef struct slock simple_lock_data_t; +typedef struct slock *simple_lock_t; +#define decl_simple_lock_data(class,name) \ +class simple_lock_data_t name; + +#endif /* _MACHINE_SIMPLE_LOCK_DATA_ */ + +#endif /* KERNEL */ +/* + * The general lock structure. Provides for multiple shared locks, + * upgrading from shared to exclusive, and sleeping until the lock + * can be gained. The simple locks are defined in . + */ +struct lock__bsd__ { + simple_lock_data_t + lk_interlock; /* lock on remaining fields */ + u_int lk_flags; /* see below */ + int lk_sharecount; /* # of accepted shared locks */ + int lk_waitcount; /* # of processes sleeping for lock */ + short lk_exclusivecount; /* # of recursive exclusive locks */ + short lk_prio; /* priority at which to sleep */ + char *lk_wmesg; /* resource sleeping (for tsleep) */ + int lk_timo; /* maximum sleep time (for tsleep) */ + pid_t lk_lockholder; /* pid of exclusive lock holder */ + void *lk_lockthread; /* thread which acquired excl lock */ +}; +/* + * Lock request types: + * LK_SHARED - get one of many possible shared locks. If a process + * holding an exclusive lock requests a shared lock, the exclusive + * lock(s) will be downgraded to shared locks. + * LK_EXCLUSIVE - stop further shared locks, when they are cleared, + * grant a pending upgrade if it exists, then grant an exclusive + * lock. Only one exclusive lock may exist at a time, except that + * a process holding an exclusive lock may get additional exclusive + * locks if it explicitly sets the LK_CANRECURSE flag in the lock + * request, or if the LK_CANRECUSE flag was set when the lock was + * initialized. + * LK_UPGRADE - the process must hold a shared lock that it wants to + * have upgraded to an exclusive lock. Other processes may get + * exclusive access to the resource between the time that the upgrade + * is requested and the time that it is granted. + * LK_EXCLUPGRADE - the process must hold a shared lock that it wants to + * have upgraded to an exclusive lock. If the request succeeds, no + * other processes will have gotten exclusive access to the resource + * between the time that the upgrade is requested and the time that + * it is granted. However, if another process has already requested + * an upgrade, the request will fail (see error returns below). + * LK_DOWNGRADE - the process must hold an exclusive lock that it wants + * to have downgraded to a shared lock. If the process holds multiple + * (recursive) exclusive locks, they will all be downgraded to shared + * locks. + * LK_RELEASE - release one instance of a lock. + * LK_DRAIN - wait for all activity on the lock to end, then mark it + * decommissioned. This feature is used before freeing a lock that + * is part of a piece of memory that is about to be freed. + * + * These are flags that are passed to the lockmgr routine. + */ +#define LK_TYPE_MASK 0x0000000f /* type of lock sought */ +#define LK_SHARED 0x00000001 /* shared lock */ +#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */ +#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */ +#define LK_EXCLUPGRADE 0x00000004 /* first shared-to-exclusive upgrade */ +#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */ +#define LK_RELEASE 0x00000006 /* release any type of lock */ +#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */ +/* + * External lock flags. + * + * The first three flags may be set in lock_init to set their mode permanently, + * or passed in as arguments to the lock manager. The LK_REENABLE flag may be + * set only at the release of a lock obtained by drain. + */ +#define LK_EXTFLG_MASK 0x00000070 /* mask of external flags */ +#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */ +#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */ +#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */ +#define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */ +/* + * Internal lock flags. + * + * These flags are used internally to the lock manager. + */ +#define LK_WANT_UPGRADE 0x00000100 /* waiting for share-to-excl upgrade */ +#define LK_WANT_EXCL 0x00000200 /* exclusive lock sought */ +#define LK_HAVE_EXCL 0x00000400 /* exclusive lock obtained */ +#define LK_WAITDRAIN 0x00000800 /* process waiting for lock to drain */ +#define LK_DRAINING 0x00004000 /* lock is being drained */ +#define LK_DRAINED 0x00008000 /* lock has been decommissioned */ +/* + * Control flags + * + * Non-persistent external flags. + */ +#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after + getting lk_interlock */ +#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */ + +/* + * Lock return status. + * + * Successfully obtained locks return 0. Locks will always succeed + * unless one of the following is true: + * LK_FORCEUPGRADE is requested and some other process has already + * requested a lock upgrade (returns EBUSY). + * LK_WAIT is set and a sleep would be required (returns EBUSY). + * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK). + * PCATCH is set in lock priority and a signal arrives (returns + * either EINTR or ERESTART if system calls is to be restarted). + * Non-null lock timeout and timeout expires (returns EWOULDBLOCK). + * A failed lock attempt always returns a non-zero error value. No lock + * is held after an error return (in particular, a failed LK_UPGRADE + * or LK_FORCEUPGRADE will have released its shared access lock). + */ + +/* + * Indicator that no process holds exclusive lock + */ +#define LK_KERNPROC ((pid_t) -2) +#define LK_NOPROC ((pid_t) -1) + +struct proc; + +void lockinit __P((struct lock__bsd__ *, int prio, char *wmesg, int timo, + int flags)); +int lockmgr __P((struct lock__bsd__ *, u_int flags, + simple_lock_t, struct proc *p)); +int lockstatus __P((struct lock__bsd__ *)); + +#endif /* _SYS_LOCK_H_ */ diff --git a/bsd/sys/lockf.h b/bsd/sys/lockf.h new file mode 100644 index 000000000..5eaaac6dd --- /dev/null +++ b/bsd/sys/lockf.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Scooter Morris at Genentech Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)lockf.h 8.1 (Berkeley) 6/11/93 + */ + +#ifndef _SYS_LOCKF_H_ +#define _SYS_LOCKF_H_ + +/* + * The lockf structure is a kernel structure which contains the information + * associated with a byte range lock. The lockf structures are linked into + * the inode structure. Locks are sorted by the starting byte of the lock for + * efficiency. + */ +struct lockf { + short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */ + short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ + off_t lf_start; /* The byte # of the start of the lock */ + off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/ + caddr_t lf_id; /* The id of the resource holding the lock */ + struct lockf **lf_head; /* Back pointer to the head of lockf list */ + struct lockf *lf_next; /* A pointer to the next lock on this inode */ + struct lockf *lf_block; /* The list of blocked locks */ +}; + +/* Maximum length of sleep chains to traverse to try and detect deadlock. */ +#define MAXDEPTH 50 + +__BEGIN_DECLS +void lf_addblock __P((struct lockf *, struct lockf *)); +int lf_advlock __P((struct lockf **, + off_t, caddr_t, int, struct flock *, int)); +int lf_clearlock __P((struct lockf *)); +int lf_findoverlap __P((struct lockf *, + struct lockf *, int, struct lockf ***, struct lockf **)); +struct lockf * + lf_getblock __P((struct lockf *)); +int lf_getlock __P((struct lockf *, struct flock *)); +int lf_setlock __P((struct lockf *)); +void lf_split __P((struct lockf *, struct lockf *)); +void lf_wakelock __P((struct lockf *)); +__END_DECLS + +#if LOCKF_DEBUG +extern int lockf_debug; + +__BEGIN_DECLS +void lf_print __P((char *, struct lockf *)); +void lf_printlist __P((char *, struct lockf *)); +__END_DECLS +#endif /* LOCKF_DEBUG */ +#endif /* !_SYS_LOCKF_H_ */ diff --git a/bsd/sys/mach_swapon.h b/bsd/sys/mach_swapon.h new file mode 100644 index 000000000..8e76ea9da --- /dev/null +++ b/bsd/sys/mach_swapon.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989,1995 NeXT, Inc. + * All rights reserved. + * + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + */ + +/* + * mach_swapon.h + * + * Definitions for the mach_swapon system call. + * + * HISTORY + * 28-Feb-88 Peter King (king) at NeXT, Inc. + * Created + */ + +#ifndef _MACH_SWAPON_H +#define _MACH_SWAPON_H + +#define MS_PREFER 0x1 /* This device/file is preferred */ + +#endif /* _MACH_SWAPON_H */ diff --git a/bsd/sys/malloc.h b/bsd/sys/malloc.h new file mode 100644 index 000000000..152058bcb --- /dev/null +++ b/bsd/sys/malloc.h @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1987, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)malloc.h 8.5 (Berkeley) 5/3/95 + */ + +#ifndef _SYS_MALLOC_H_ +#define _SYS_MALLOC_H_ + +#define KMEMSTATS + +/* + * flags to malloc + */ +#define M_WAITOK 0x0000 +#define M_NOWAIT 0x0001 + +/* + * Types of memory to be allocated (not all are used by us) + */ +#define M_FREE 0 /* should be on free list */ +#define M_MBUF 1 /* mbuf */ +#define M_DEVBUF 2 /* device driver memory */ +#define M_SOCKET 3 /* socket structure */ +#define M_PCB 4 /* protocol control block */ +#define M_RTABLE 5 /* routing tables */ +#define M_HTABLE 6 /* IMP host tables */ +#define M_FTABLE 7 /* fragment reassembly header */ +#define M_ZOMBIE 8 /* zombie proc status */ +#define M_IFADDR 9 /* interface address */ +#define M_SOOPTS 10 /* socket options */ +#define M_SONAME 11 /* socket name */ +#define M_NAMEI 12 /* namei path name buffer */ +#define M_GPROF 13 /* kernel profiling buffer */ +#define M_IOCTLOPS 14 /* ioctl data buffer */ +#define M_MAPMEM 15 /* mapped memory descriptors */ +#define M_CRED 16 /* credentials */ +#define M_PGRP 17 /* process group header */ +#define M_SESSION 18 /* session header */ +#define M_IOV 19 /* large iov's */ +#define M_MOUNT 20 /* vfs mount struct */ +#define M_FHANDLE 21 /* network file handle */ +#define M_NFSREQ 22 /* NFS request header */ +#define M_NFSMNT 23 /* NFS mount structure */ +#define M_NFSNODE 24 /* NFS vnode private part */ +#define M_VNODE 25 /* Dynamically allocated vnodes */ +#define M_CACHE 26 /* Dynamically allocated cache entries */ +#define M_DQUOT 27 /* UFS quota entries */ +#define M_UFSMNT 28 /* UFS mount structure */ +#define M_SHM 29 /* SVID compatible shared memory segments */ +#define M_VMMAP 30 /* VM map structures */ +#define M_VMMAPENT 31 /* VM map entry structures */ +#define M_VMOBJ 32 /* VM object structure */ +#define M_VMOBJHASH 33 /* VM object hash structure */ +#define M_VMPMAP 34 /* VM pmap */ +#define M_VMPVENT 35 /* VM phys-virt mapping entry */ +#define M_VMPAGER 36 /* XXX: VM pager struct */ +#define M_VMPGDATA 37 /* XXX: VM pager private data */ +#define M_FILE 38 /* Open file structure */ +#define M_FILEDESC 39 /* Open file descriptor table */ +#define M_LOCKF 40 /* Byte-range locking structures */ +#define M_PROC 41 /* Proc structures */ +#define M_SUBPROC 42 /* Proc sub-structures */ +#define M_SEGMENT 43 /* Segment for LFS */ +#define M_LFSNODE 44 /* LFS vnode private part */ +#define M_FFSNODE 45 /* FFS vnode private part */ +#define M_MFSNODE 46 /* MFS vnode private part */ +#define M_NQLEASE 47 /* Nqnfs lease */ +#define M_NQMHOST 48 /* Nqnfs host address table */ +#define M_NETADDR 49 /* Export host address structure */ +#define M_NFSSVC 50 /* Nfs server structure */ +#define M_NFSUID 51 /* Nfs uid mapping structure */ +#define M_NFSD 52 /* Nfs server daemon structure */ +#define M_IPMOPTS 53 /* internet multicast options */ +#define M_IPMADDR 54 /* internet multicast address */ +#define M_IFMADDR 55 /* link-level multicast address */ +#define M_MRTABLE 56 /* multicast routing tables */ +#define M_ISOFSMNT 57 /* ISOFS mount structure */ +#define M_ISOFSNODE 58 /* ISOFS vnode private part */ +#define M_NFSRVDESC 59 /* NFS server socket descriptor */ +#define M_NFSDIROFF 60 /* NFS directory offset data */ +#define M_NFSBIGFH 61 /* NFS version 3 file handle */ +#define M_MSDOSFSMNT 62 /* MSDOS FS mount structure */ +#define M_MSDOSFSFAT 63 /* MSDOS FS fat table */ +#define M_MSDOSFSNODE 64 /* MSDOS FS vnode private part */ +#define M_TTYS 65 /* allocated tty structures */ +#define M_EXEC 66 /* argument lists & other mem used by exec */ +#define M_MISCFSMNT 67 /* miscfs mount structures */ +#define M_MISCFSNODE 68 /* miscfs vnode private part */ +#define M_ADOSFSMNT 69 /* adosfs mount structures */ +#define M_ADOSFSNODE 70 /* adosfs vnode private part */ +#define M_ANODE 71 /* adosfs anode structures and tables. */ +#define M_BUFHDR 72 /* File buffer cache headers */ +#define M_OFILETABL 73 /* Open file descriptor table */ +#define M_MCLUST 74 /* mbuf cluster buffers */ +#define M_HFSMNT 75 /* HFS mount structure */ +#define M_HFSNODE 76 /* HFS private node structre */ +#define M_HFSFMETA 77 /* HFS file meta data */ +#define M_VOLFSMNT 78 /* VOLFS mount structure */ +#define M_VOLFSNODE 79 /* VOLFS private node part */ +#define M_TEMP 80 /* misc temporary data buffers */ +#define M_SECA 81 /* security associations, key management */ +#define M_DEVFS 82 +#define M_IPFW 83 /* IP Forwarding/NAT */ +#define M_UDFNODE 84 /* UDF inodes */ +#define M_UDFMNT 85 /* UDF mount structures */ +#define M_IP6NDP 86 /* IPv6 Neighbour Discovery*/ +#define M_IP6OPT 87 /* IPv6 options management */ +#define M_NATPT 88 /* KAME NAT feature */ + +#define M_LAST 89 /* Must be last type + 1 */ + +/* Strings corresponding to types of memory */ +/* Must be in synch with the #defines above */ +#define INITKMEMNAMES { \ + "free", /* 0 M_FREE */ \ + "mbuf", /* 1 M_MBUF */ \ + "devbuf", /* 2 M_DEVBUF */ \ + "socket", /* 3 M_SOCKET */ \ + "pcb", /* 4 M_PCB */ \ + "routetbl", /* 5 M_RTABLE */ \ + "hosttbl", /* 6 M_HTABLE */ \ + "fragtbl", /* 7 M_FTABLE */ \ + "zombie", /* 8 M_ZOMBIE */ \ + "ifaddr", /* 9 M_IFADDR */ \ + "soopts", /* 10 M_SOOPTS */ \ + "soname", /* 11 M_SONAME */ \ + "namei", /* 12 M_NAMEI */ \ + "gprof", /* 13 M_GPROF */ \ + "ioctlops", /* 14 M_IOCTLOPS */ \ + "mapmem", /* 15 M_MAPMEM */ \ + "cred", /* 16 M_CRED */ \ + "pgrp", /* 17 M_PGRP */ \ + "session", /* 18 M_SESSION */ \ + "iov", /* 19 M_IOV */ \ + "mount", /* 20 M_MOUNT */ \ + "fhandle", /* 21 M_FHANDLE */ \ + "NFS req", /* 22 M_NFSREQ */ \ + "NFS mount", /* 23 M_NFSMNT */ \ + "NFS node", /* 24 M_NFSNODE */ \ + "vnodes", /* 25 M_VNODE */ \ + "namecache", /* 26 M_CACHE */ \ + "UFS quota", /* 27 M_DQUOT */ \ + "UFS mount", /* 28 M_UFSMNT */ \ + "shm", /* 29 M_SHM */ \ + "VM map", /* 30 M_VMMAP */ \ + "VM mapent", /* 31 M_VMMAPENT */ \ + "VM object", /* 32 M_VMOBJ */ \ + "VM objhash", /* 33 M_VMOBJHASH */ \ + "VM pmap", /* 34 M_VMPMAP */ \ + "VM pvmap", /* 35 M_VMPVENT */ \ + "VM pager", /* 36 M_VMPAGER */ \ + "VM pgdata", /* 37 M_VMPGDATA */ \ + "file", /* 38 M_FILE */ \ + "file desc", /* 39 M_FILEDESC */ \ + "lockf", /* 40 M_LOCKF */ \ + "proc", /* 41 M_PROC */ \ + "subproc", /* 42 M_SUBPROC */ \ + "LFS segment", /* 43 M_SEGMENT */ \ + "LFS node", /* 44 M_LFSNODE */ \ + "FFS node", /* 45 M_FFSNODE */ \ + "MFS node", /* 46 M_MFSNODE */ \ + "NQNFS Lease", /* 47 M_NQLEASE */ \ + "NQNFS Host", /* 48 M_NQMHOST */ \ + "Export Host", /* 49 M_NETADDR */ \ + "NFS srvsock", /* 50 M_NFSSVC */ \ + "NFS uid", /* 51 M_NFSUID */ \ + "NFS daemon", /* 52 M_NFSD */ \ + "ip_moptions", /* 53 M_IPMOPTS */ \ + "in_multi", /* 54 M_IPMADDR */ \ + "ether_multi", /* 55 M_IFMADDR */ \ + "mrt", /* 56 M_MRTABLE */ \ + "ISOFS mount", /* 57 M_ISOFSMNT */ \ + "ISOFS node", /* 58 M_ISOFSNODE */ \ + "NFSV3 srvdesc",/* 59 M_NFSRVDESC */ \ + "NFSV3 diroff", /* 60 M_NFSDIROFF */ \ + "NFSV3 bigfh", /* 61 M_NFSBIGFH */ \ + "MSDOSFS mount",/* 62 M_MSDOSFSMNT */ \ + "MSDOSFS fat", /* 63 M_MSDOSFSFAT */ \ + "MSDOSFS node", /* 64 M_MSDOSFSNODE */ \ + "ttys", /* 65 M_TTYS */ \ + "exec", /* 66 M_EXEC */ \ + "miscfs mount", /* 67 M_MISCFSMNT */ \ + "miscfs node", /* 68 M_MISCFSNODE */ \ + "adosfs mount", /* 69 M_ADOSFSMNT */ \ + "adosfs node", /* 70 M_ADOSFSNODE */ \ + "adosfs anode", /* 71 M_ANODE */ \ + "buf hdrs", /* 72 M_BUFHDR */ \ + "ofile tabl", /* 73 M_OFILETABL */ \ + "mbuf clust", /* 74 M_MCLUST */ \ + "HFS mount", /* 75 M_HFSMNT */ \ + "HFS node", /* 76 M_HFSNODE */ \ + "HFS fmeta", /* 77 M_HFSFMETA */ \ + "VOLFS mount", /* 78 M_VOLFSMNT */ \ + "VOLFS node", /* 79 M_VOLFSNODE */ \ + "temp", /* 80 M_TEMP */ \ + "key mgmt", /* 81 M_SECA */ \ + "DEVFS", /* 82 M_DEVFS */ \ + "IpFw/IpAcct", /* 83 M_IPFW */ \ + "UDF node", /* 84 M_UDFNODE */ \ + "UDF mount" /* 85 M_UDFMNT */ \ + "IPv6 NDP", /* 86 M_IP6NDP */ \ + "IPv6 options", /* 87 M_IP6OPT */ \ + "NATPT", /* 88 M_NATPT */ \ +} + +struct kmemstats { + long ks_inuse; /* # of packets of this type currently + * in use */ + long ks_calls; /* total packets of this type ever allocated */ + long ks_memuse; /* total memory held in bytes */ + u_short ks_limblocks; /* number of times blocked for hitting limit */ + u_short ks_mapblocks; /* number of times blocked for kernel map */ + long ks_maxused; /* maximum number ever used */ + long ks_limit; /* most that are allowed to exist */ + long ks_size; /* sizes of this thing that are allocated */ + long ks_spare; +}; + +#ifdef KERNEL +extern struct kmemstats kmemstats[]; + +/* + * The malloc/free primatives used + * by the BSD kernel code. + */ +#define MALLOC(space, cast, size, type, flags) \ + (space) = (cast)_MALLOC(size, type, flags) + +#define FREE(addr, type) \ + _FREE((void *)addr, type) + +#define MALLOC_ZONE(space, cast, size, type, flags) \ + (space) = (cast)_MALLOC_ZONE(size, type, flags) + +#define FREE_ZONE(addr, size, type) \ + _FREE_ZONE((void *)addr, size, type) + +extern void *_MALLOC __P(( + size_t size, + int type, + int flags)); + +extern void _FREE __P(( + void *addr, + int type)); + +extern void *_MALLOC_ZONE __P(( + size_t size, + int type, + int flags)); + +extern void _FREE_ZONE __P(( + void *elem, + size_t size, + int type)); + +#endif /* KERNEL */ + +#endif /* _SYS_MALLOC_H_ */ diff --git a/bsd/sys/mbuf.h b/bsd/sys/mbuf.h new file mode 100644 index 000000000..7b051386d --- /dev/null +++ b/bsd/sys/mbuf.h @@ -0,0 +1,507 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * Copyright (c) 1994 NeXT Computer, Inc. All rights reserved. + * + * Copyright (c) 1982, 1986, 1988 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mbuf.h 8.3 (Berkeley) 1/21/94 + ********************************************************************** + * HISTORY + * 20-May-95 Mac Gillon (mgillon) at NeXT + * New version based on 4.4 + * Purged old history + */ + +#ifndef _SYS_MBUF_H_ +#define _SYS_MBUF_H_ + +#include + +/* + * Mbufs are of a single size, MSIZE (machine/param.h), which + * includes overhead. An mbuf may add a single "mbuf cluster" of size + * MCLBYTES (also in machine/param.h), which has no additional overhead + * and is used instead of the internal data area; this is done when + * at least MINCLSIZE of data must be stored. + */ + +#define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */ +#define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */ + +#define MINCLSIZE (MHLEN + MLEN) /* smallest amount to put in cluster */ +#define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */ + +#define NMBPCL (sizeof(union mcluster) / sizeof(struct mbuf)) + + +/* + * Macros for type conversion + * mtod(m,t) - convert mbuf pointer to data pointer of correct type + * dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX) + * mtocl(x) - convert pointer within cluster to cluster index # + * cltom(x) - convert cluster # to ptr to beginning of cluster + */ +#define mtod(m,t) ((t)((m)->m_data)) +#define dtom(x) ((struct mbuf *)((u_long)(x) & ~(MSIZE-1))) +#define mtocl(x) ((union mcluster *)(x) - (union mcluster *)mbutl) +#define cltom(x) ((union mcluster *)(mbutl + (x))) + +#define MCLREF(p) (++mclrefcnt[mtocl(p)]) +#define MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0) + +/* header at beginning of each mbuf: */ +struct m_hdr { + struct mbuf *mh_next; /* next buffer in chain */ + struct mbuf *mh_nextpkt; /* next chain in queue/record */ + long mh_len; /* amount of data in this mbuf */ + caddr_t mh_data; /* location of data */ + short mh_type; /* type of data in this mbuf */ + short mh_flags; /* flags; see below */ +}; + +/* record/packet header in first mbuf of chain; valid if M_PKTHDR set */ +struct pkthdr { + int len; /* total packet length */ + struct ifnet *rcvif; /* rcv interface */ + + /* variables for ip and tcp reassembly */ + void *header; /* pointer to packet header */ + /* variables for hardware checksum */ + int csum_flags; /* flags regarding checksum */ + int csum_data; /* data field used by csum routines */ + struct mbuf *aux; /* extra data buffer; ipsec/others */ + void *reserved1; /* for future use */ + void *reserved2; /* for future use */ +}; + + +/* description of external storage mapped into mbuf, valid if M_EXT set */ +struct m_ext { + caddr_t ext_buf; /* start of buffer */ + void (*ext_free)(); /* free routine if not the usual */ + u_int ext_size; /* size of buffer, for ext_free */ + caddr_t ext_arg; /* additional ext_free argument */ + struct ext_refsq { /* references held */ + struct ext_refsq *forward, *backward; + } ext_refs; +}; + +struct mbuf { + struct m_hdr m_hdr; + union { + struct { + struct pkthdr MH_pkthdr; /* M_PKTHDR set */ + union { + struct m_ext MH_ext; /* M_EXT set */ + char MH_databuf[MHLEN]; + } MH_dat; + } MH; + char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */ + } M_dat; +}; + +#define m_next m_hdr.mh_next +#define m_len m_hdr.mh_len +#define m_data m_hdr.mh_data +#define m_type m_hdr.mh_type +#define m_flags m_hdr.mh_flags +#define m_nextpkt m_hdr.mh_nextpkt +#define m_act m_nextpkt +#define m_pkthdr M_dat.MH.MH_pkthdr +#define m_ext M_dat.MH.MH_dat.MH_ext +#define m_pktdat M_dat.MH.MH_dat.MH_databuf +#define m_dat M_dat.M_databuf + +/* mbuf flags */ +#define M_EXT 0x0001 /* has associated external storage */ +#define M_PKTHDR 0x0002 /* start of record */ +#define M_EOR 0x0004 /* end of record */ +#define M_PROTO1 0x0008 /* protocol-specific */ + +#define M_MIP6TUNNEL 0x0010 /* MIP6 temporary use */ + +/* mbuf pkthdr flags, also in m_flags */ +#define M_BCAST 0x0100 /* send/received as link-level broadcast */ +#define M_MCAST 0x0200 /* send/received as link-level multicast */ +#define M_FRAG 0x0400 /* packet is a fragment of a larger packet */ +#define M_ANYCAST6 0x0800 /* received as IPv6 anycast */ + +/* mbuf pkthdr flags, also in m_flags */ +#define M_AUTHIPHDR 0x1000 /* data origin authentication for IP header */ +#define M_DECRYPTED 0x2000 /* confidentiality */ +#define M_LOOP 0x4000 /* for Mbuf statistics */ +#define M_AUTHIPDGM 0x8000 /* data origin authentication */ + +/* flags copied when copying m_pkthdr */ +#define M_COPYFLAGS (M_PKTHDR|M_EOR|M_BCAST|M_MCAST|M_FRAG|M_ANYCAST6|M_AUTHIPHDR|M_DECRYPTED|M_LOOP|M_AUTHIPDGM) + +/* flags indicating hw checksum support and sw checksum requirements [freebsd4.1]*/ +#define CSUM_IP 0x0001 /* will csum IP */ +#define CSUM_TCP 0x0002 /* will csum TCP */ +#define CSUM_UDP 0x0004 /* will csum UDP */ +#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */ +#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */ + +#define CSUM_IP_CHECKED 0x0100 /* did csum IP */ +#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */ +#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */ +#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */ + +#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP) +#define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */ + + +/* mbuf types */ +#define MT_FREE 0 /* should be on free list */ +#define MT_DATA 1 /* dynamic (data) allocation */ +#define MT_HEADER 2 /* packet header */ +#define MT_SOCKET 3 /* socket structure */ +#define MT_PCB 4 /* protocol control block */ +#define MT_RTABLE 5 /* routing tables */ +#define MT_HTABLE 6 /* IMP host tables */ +#define MT_ATABLE 7 /* address resolution tables */ +#define MT_SONAME 8 /* socket name */ +#define MT_SOOPTS 10 /* socket options */ +#define MT_FTABLE 11 /* fragment reassembly header */ +#define MT_RIGHTS 12 /* access rights */ +#define MT_IFADDR 13 /* interface address */ +#define MT_CONTROL 14 /* extra-data protocol message */ +#define MT_OOBDATA 15 /* expedited data */ +#define MT_MAX 32 /* enough? */ + +/* flags to m_get/MGET */ +/* Need to include malloc.h to get right options for malloc */ +#include + +#define M_DONTWAIT M_NOWAIT +#define M_WAIT M_WAITOK + +/* + * mbuf utility macros: + * + * MBUFLOCK(code) + * prevents a section of code from from being interrupted by network + * drivers. + */ + + +extern +decl_simple_lock_data(, mbuf_slock); +#define MBUF_LOCK() simple_lock(&mbuf_slock); +#define MBUF_UNLOCK() simple_unlock(&mbuf_slock); +#define MBUF_LOCKINIT() simple_lock_init(&mbuf_slock); + + +/* + * mbuf allocation/deallocation macros: + * + * MGET(struct mbuf *m, int how, int type) + * allocates an mbuf and initializes it to contain internal data. + * + * MGETHDR(struct mbuf *m, int how, int type) + * allocates an mbuf and initializes it to contain a packet header + * and internal data. + */ + +#ifdef DIAGNOSE +#define MCHECK(m) if ((m)->m_type != MT_FREE) panic("mget") +#else +#define MCHECK(m) +#endif + +extern struct mbuf *mfree; /* mbuf free list */ +extern simple_lock_data_t mbuf_slock; + +#define _MINTGET(m, type) { \ + MBUF_LOCK(); \ + if (((m) = mfree) != 0) { \ + MCHECK(m); \ + ++mclrefcnt[mtocl(m)]; \ + mbstat.m_mtypes[MT_FREE]--; \ + mbstat.m_mtypes[type]++; \ + mfree = (m)->m_next; \ + } \ + MBUF_UNLOCK(); \ +} + +#define MGET(m, how, type) { \ + _MINTGET(m, type); \ + if (m) { \ + (m)->m_next = (m)->m_nextpkt = 0; \ + (m)->m_type = (type); \ + (m)->m_data = (m)->m_dat; \ + (m)->m_flags = 0; \ + } else \ + (m) = m_retry((how), (type)); \ +} + +#define MGETHDR(m, how, type) { \ + _MINTGET(m, type); \ + if (m) { \ + (m)->m_next = (m)->m_nextpkt = 0; \ + (m)->m_type = (type); \ + (m)->m_data = (m)->m_pktdat; \ + (m)->m_flags = M_PKTHDR; \ + (m)->m_pkthdr.rcvif = NULL; \ + (m)->m_pkthdr.header = NULL; \ + (m)->m_pkthdr.csum_flags = 0; \ + (m)->m_pkthdr.csum_data = 0; \ + (m)->m_pkthdr.aux = (struct mbuf *)NULL; \ + (m)->m_pkthdr.reserved1 = NULL; \ + (m)->m_pkthdr.reserved2 = NULL; \ + } else \ + (m) = m_retryhdr((how), (type)); \ +} + +/* + * Mbuf cluster macros. + * MCLALLOC(caddr_t p, int how) allocates an mbuf cluster. + * MCLGET adds such clusters to a normal mbuf; + * the flag M_EXT is set upon success. + * MCLFREE releases a reference to a cluster allocated by MCLALLOC, + * freeing the cluster if the reference count has reached 0. + * + * Normal mbuf clusters are normally treated as character arrays + * after allocation, but use the first word of the buffer as a free list + * pointer while on the free list. + */ +union mcluster { + union mcluster *mcl_next; + char mcl_buf[MCLBYTES]; +}; + +#define MCLALLOC(p, how) { \ + (void)m_clalloc(1, (how)); \ + if (((p) = (caddr_t)mclfree)) { \ + ++mclrefcnt[mtocl(p)]; \ + mbstat.m_clfree--; \ + mclfree = ((union mcluster *)(p))->mcl_next; \ + } \ + MBUF_UNLOCK(); \ +} + +#define MCLGET(m, how) { \ + MCLALLOC((m)->m_ext.ext_buf, (how)); \ + if ((m)->m_ext.ext_buf) { \ + (m)->m_data = (m)->m_ext.ext_buf; \ + (m)->m_flags |= M_EXT; \ + (m)->m_ext.ext_size = MCLBYTES; \ + (m)->m_ext.ext_free = 0; \ + (m)->m_ext.ext_refs.forward = (m)->m_ext.ext_refs.backward = \ + &(m)->m_ext.ext_refs; \ + } \ +} + +#define MCLFREE(p) { \ + MBUF_LOCK(); \ + if (--mclrefcnt[mtocl(p)] == 0) { \ + ((union mcluster *)(p))->mcl_next = mclfree; \ + mclfree = (union mcluster *)(p); \ + mbstat.m_clfree++; \ + } \ + MBUF_UNLOCK(); \ +} + +#define MCLHASREFERENCE(m) \ + ((m)->m_ext.ext_refs.forward != &((m)->m_ext.ext_refs)) + +/* + * MFREE(struct mbuf *m, struct mbuf *n) + * Free a single mbuf and associated external storage. + * Place the successor, if any, in n. + */ + +#define MFREE(m, n) (n) = m_free(m) + +/* + * Copy mbuf pkthdr from from to to. + * from must have M_PKTHDR set, and to must be empty. + * aux pointer will be moved to `to'. + */ +#define M_COPY_PKTHDR(to, from) { \ + (to)->m_pkthdr = (from)->m_pkthdr; \ + (from)->m_pkthdr.aux = (struct mbuf *)NULL; \ + (to)->m_flags = (from)->m_flags & M_COPYFLAGS; \ + (to)->m_data = (to)->m_pktdat; \ +} + +/* + * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place + * an object of the specified size at the end of the mbuf, longword aligned. + */ +#define M_ALIGN(m, len) \ + { (m)->m_data += (MLEN - (len)) &~ (sizeof(long) - 1); } +/* + * As above, for mbufs allocated with m_gethdr/MGETHDR + * or initialized by M_COPY_PKTHDR. + */ +#define MH_ALIGN(m, len) \ + { (m)->m_data += (MHLEN - (len)) &~ (sizeof(long) - 1); } + +/* + * Compute the amount of space available + * before the current start of data in an mbuf. + * Subroutine - data not available if certain references. + */ +int m_leadingspace(struct mbuf *); +#define M_LEADINGSPACE(m) m_leadingspace(m) + +/* + * Compute the amount of space available + * after the end of data in an mbuf. + * Subroutine - data not available if certain references. + */ +int m_trailingspace(struct mbuf *); +#define M_TRAILINGSPACE(m) m_trailingspace(m) + +/* + * Arrange to prepend space of size plen to mbuf m. + * If a new mbuf must be allocated, how specifies whether to wait. + * If how is M_DONTWAIT and allocation fails, the original mbuf chain + * is freed and m is set to NULL. + */ +#define M_PREPEND(m, plen, how) { \ + if (M_LEADINGSPACE(m) >= (plen)) { \ + (m)->m_data -= (plen); \ + (m)->m_len += (plen); \ + } else \ + (m) = m_prepend((m), (plen), (how)); \ + if ((m) && (m)->m_flags & M_PKTHDR) \ + (m)->m_pkthdr.len += (plen); \ +} + +/* change mbuf to new type */ +#define MCHTYPE(m, t) { \ + MBUF_LOCK(); \ + mbstat.m_mtypes[(m)->m_type]--; \ + mbstat.m_mtypes[t]++; \ + (m)->m_type = t; \ + MBUF_UNLOCK(); \ +} + +/* length to m_copy to copy all */ +#define M_COPYALL 1000000000 + +/* compatiblity with 4.3 */ +#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT) + +/* + * Mbuf statistics. + */ +struct mbstat { + u_long m_mbufs; /* mbufs obtained from page pool */ + u_long m_clusters; /* clusters obtained from page pool */ + u_long m_spare; /* spare field */ + u_long m_clfree; /* free clusters */ + u_long m_drops; /* times failed to find space */ + u_long m_wait; /* times waited for space */ + u_long m_drain; /* times drained protocols for space */ + u_short m_mtypes[256]; /* type specific mbuf allocations */ + u_long m_mcfail; /* times m_copym failed */ + u_long m_mpfail; /* times m_pullup failed */ + u_long m_msize; /* length of an mbuf */ + u_long m_mclbytes; /* length of an mbuf cluster */ + u_long m_minclsize; /* min length of data to allocate a cluster */ + u_long m_mlen; /* length of data in an mbuf */ + u_long m_mhlen; /* length of data in a header mbuf */ +}; + +/* + * pkthdr.aux type tags. + */ +struct mauxtag { + int af; + int type; +}; + +#ifdef KERNEL +extern union mcluster *mbutl; /* virtual address of mclusters */ +extern union mcluster *embutl; /* ending virtual address of mclusters */ +extern short *mclrefcnt; /* cluster reference counts */ +extern int *mcl_paddr; /* physical addresses of clusters */ +extern struct mbstat mbstat; /* statistics */ +extern int nmbclusters; /* number of mapped clusters */ +extern union mcluster *mclfree; /* free mapped cluster list */ +extern int max_linkhdr; /* largest link-level header */ +extern int max_protohdr; /* largest protocol header */ +extern int max_hdr; /* largest link+protocol header */ +extern int max_datalen; /* MHLEN - max_hdr */ + +struct mbuf *m_copym __P((struct mbuf *, int, int, int)); +struct mbuf *m_free __P((struct mbuf *)); +struct mbuf *m_get __P((int, int)); +struct mbuf *m_getpacket __P((void)); +struct mbuf *m_getclr __P((int, int)); +struct mbuf *m_gethdr __P((int, int)); +struct mbuf *m_prepend __P((struct mbuf *, int, int)); +struct mbuf *m_pullup __P((struct mbuf *, int)); +struct mbuf *m_retry __P((int, int)); +struct mbuf *m_retryhdr __P((int, int)); +void m_adj __P((struct mbuf *, int)); +int m_clalloc __P((int, int)); +void m_freem __P((struct mbuf *)); +int m_freem_list __P((struct mbuf *)); +struct mbuf *m_devget __P((char *, int, int, struct ifnet *, void (*)())); +char *mcl_to_paddr __P((char *)); +struct mbuf *m_aux_add __P((struct mbuf *, int, int)); +struct mbuf *m_aux_find __P((struct mbuf *, int, int)); +void m_aux_delete __P((struct mbuf *, struct mbuf *)); +#endif +#endif /* !_SYS_MBUF_H_ */ diff --git a/bsd/sys/md5.h b/bsd/sys/md5.h new file mode 100644 index 000000000..14ce796cc --- /dev/null +++ b/bsd/sys/md5.h @@ -0,0 +1,50 @@ +/* MD5.H - header file for MD5C.C + * $Id: md5.h,v 1.2 2000/09/14 20:35:28 lindak Exp $ + */ + +/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All +rights reserved. + +License to copy and use this software is granted provided that it +is identified as the "RSA Data Security, Inc. MD5 Message-Digest +Algorithm" in all material mentioning or referencing this software +or this function. + +License is also granted to make and use derivative works provided +that such works are identified as "derived from the RSA Data +Security, Inc. MD5 Message-Digest Algorithm" in all material +mentioning or referencing the derived work. + +RSA Data Security, Inc. makes no representations concerning either +the merchantability of this software or the suitability of this +software for any particular purpose. It is provided "as is" +without express or implied warranty of any kind. + +These notices must be retained in any copies of any part of this +documentation and/or software. + */ + +#ifndef _SYS_MD5_H_ +#define _SYS_MD5_H_ +/* MD5 context. */ +typedef struct MD5Context { + u_int32_t state[4]; /* state (ABCD) */ + u_int32_t count[2]; /* number of bits, modulo 2^64 (lsb first) */ + unsigned char buffer[64]; /* input buffer */ +} MD5_CTX; + +#include + +__BEGIN_DECLS +void MD5Init (MD5_CTX *); +void MD5Update (MD5_CTX *, const unsigned char *, unsigned int); +void MD5Pad (MD5_CTX *); +void MD5Final (unsigned char [16], MD5_CTX *); +char * MD5End(MD5_CTX *, char *); +char * MD5File(const char *, char *); +char * MD5Data(const unsigned char *, unsigned int, char *); +#ifdef KERNEL +void MD5Transform __P((u_int32_t [4], const unsigned char [64])); +#endif +__END_DECLS +#endif /* _SYS_MD5_H_ */ diff --git a/bsd/sys/mman.h b/bsd/sys/mman.h new file mode 100644 index 000000000..3b216759f --- /dev/null +++ b/bsd/sys/mman.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mman.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_MMAN_H_ +#define _SYS_MMAN_H_ + +#include + +/* + * Protections are chosen from these bits, or-ed together + */ +#define PROT_NONE 0x00 /* no permissions */ +#define PROT_READ 0x01 /* pages can be read */ +#define PROT_WRITE 0x02 /* pages can be written */ +#define PROT_EXEC 0x04 /* pages can be executed */ + +/* + * Flags contain sharing type and options. + * Sharing types; choose one. + */ +#define MAP_SHARED 0x0001 /* share changes */ +#define MAP_PRIVATE 0x0002 /* changes are private */ +#define MAP_COPY MAP_PRIVATE /* Obsolete */ + +/* + * Other flags + */ +#define MAP_FIXED 0x0010 /* map addr must be exactly as requested */ +#define MAP_RENAME 0x0020 /* Sun: rename private pages to file */ +#define MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */ +#define MAP_INHERIT 0x0080 /* region is retained after exec */ +#define MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */ +#define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */ + +#ifdef _P1003_1B_VISIBLE +/* + * Process memory locking + */ +#define MCL_CURRENT 0x0001 /* Lock only current memory */ +#define MCL_FUTURE 0x0002 /* Lock all future memory as well */ + +#endif /* _P1003_1B_VISIBLE */ + +/* + * Error return from mmap() + */ +#define MAP_FAILED ((void *)-1) + +/* + * msync() flags + */ +#define MS_SYNC 0x0000 /* msync synchronously */ +#define MS_ASYNC 0x0001 /* return immediately */ +#define MS_INVALIDATE 0x0002 /* invalidate all cached data */ + +#ifndef _POSIX_SOURCE +#define MS_KILLPAGES 0x0004 /* invalidate pages, leave mapped */ +#define MS_DEACTIVATE 0x0008 /* deactivate pages, leave mapped */ +#endif + +/* + * Mapping type + */ +#define MAP_FILE 0x0000 /* map from file (default) */ +#define MAP_ANON 0x1000 /* allocated from memory, swap space */ + +/* + * Advice to madvise + */ +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_DONTNEED 4 /* dont need these pages */ +#define MADV_FREE 5 /* dont need these pages, and junk contents */ + +/* + * Return bits from mincore + */ +#define MINCORE_INCORE 0x1 /* Page is incore */ +#define MINCORE_REFERENCED 0x2 /* Page has been referenced by us */ +#define MINCORE_MODIFIED 0x4 /* Page has been modified by us */ +#define MINCORE_REFERENCED_OTHER 0x8 /* Page has been referenced */ +#define MINCORE_MODIFIED_OTHER 0x10 /* Page has been modified */ + +#ifndef KERNEL + +#include + +__BEGIN_DECLS +#ifdef _P1003_1B_VISIBLE +int mlockall __P((int)); +int munlockall __P((void)); +#endif /* _P1003_1B_VISIBLE */ +int mlock __P((const void *, size_t)); +#ifndef _MMAP_DECLARED +#define _MMAP_DECLARED +void * mmap __P((void *, size_t, int, int, int, off_t)); +#endif +int mprotect __P((const void *, size_t, int)); +int msync __P((void *, size_t, int)); +int munlock __P((const void *, size_t)); +int munmap __P((void *, size_t)); +int shm_open __P((const char *, int, ...)); +int shm_unlink __P((const char *)); +#ifndef _POSIX_SOURCE +int load_shared_file __P((char *, caddr_t, u_long, caddr_t *, int, sf_mapping_t *, int *)); +int reset_shared_file __P((caddr_t *, int, sf_mapping_t *)); +int madvise __P((void *, size_t, int)); +int mincore __P((const void *, size_t, char *)); +int minherit __P((void *, size_t, int)); +#endif +__END_DECLS + +#endif /* !KERNEL */ +#endif /* !_SYS_MMAN_H_ */ diff --git a/bsd/sys/mount.h b/bsd/sys/mount.h new file mode 100644 index 000000000..4a939095c --- /dev/null +++ b/bsd/sys/mount.h @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mount.h 8.21 (Berkeley) 5/20/95 + */ + +#ifndef _SYS_MOUNT_H_ +#define _SYS_MOUNT_H_ + +#ifndef KERNEL +#include +#endif +#include +#include +#include +#include /* XXX for AF_MAX */ + +typedef struct fsid { int32_t val[2]; } fsid_t; /* file system id type */ + +/* + * File identifier. + * These are unique per filesystem on a single machine. + */ +#define MAXFIDSZ 16 + +struct fid { + u_short fid_len; /* length of data in bytes */ + u_short fid_reserved; /* force longword alignment */ + char fid_data[MAXFIDSZ]; /* data (variable length) */ +}; + +/* + * file system statistics + */ + +#define MFSNAMELEN 15 /* length of fs type name, not inc. null */ +#define MNAMELEN 90 /* length of buffer for returned name */ + +struct statfs { + short f_otype; /* TEMPORARY SHADOW COPY OF f_type */ + short f_oflags; /* TEMPORARY SHADOW COPY OF f_flags */ + long f_bsize; /* fundamental file system block size */ + long f_iosize; /* optimal transfer block size */ + long f_blocks; /* total data blocks in file system */ + long f_bfree; /* free blocks in fs */ + long f_bavail; /* free blocks avail to non-superuser */ + long f_files; /* total file nodes in file system */ + long f_ffree; /* free file nodes in fs */ + fsid_t f_fsid; /* file system id */ + uid_t f_owner; /* user that mounted the filesystem */ + short f_reserved1; /* spare for later */ + short f_type; /* type of filesystem */ + long f_flags; /* copy of mount exported flags */ + long f_reserved2[2]; /* reserved for future use */ + char f_fstypename[MFSNAMELEN]; /* fs type name */ + char f_mntonname[MNAMELEN]; /* directory on which mounted */ + char f_mntfromname[MNAMELEN];/* mounted filesystem */ +#if COMPAT_GETFSSTAT + char f_reserved3[0]; /* For alignment */ + long f_reserved4[0]; /* For future use */ +#else + char f_reserved3; /* For alignment */ + long f_reserved4[4]; /* For future use */ +#endif +}; + +/* + * Structure per mounted file system. Each mounted file system has an + * array of operations and an instance record. The file systems are + * put on a doubly linked list. + */ +LIST_HEAD(vnodelst, vnode); + +struct mount { + CIRCLEQ_ENTRY(mount) mnt_list; /* mount list */ + struct vfsops *mnt_op; /* operations on fs */ + struct vfsconf *mnt_vfc; /* configuration info */ + struct vnode *mnt_vnodecovered; /* vnode we mounted on */ + struct vnodelst mnt_vnodelist; /* list of vnodes this mount */ + struct lock__bsd__ mnt_lock; /* mount structure lock */ + int mnt_flag; /* flags */ + int mnt_kern_flag; /* kernel only flags */ + int mnt_maxsymlinklen; /* max size of short symlink */ + struct statfs mnt_stat; /* cache of filesystem stats */ + qaddr_t mnt_data; /* private data */ +}; + +/* + * User specifiable flags. + * + * Unmount uses MNT_FORCE flag. + */ +#define MNT_RDONLY 0x00000001 /* read only filesystem */ +#define MNT_SYNCHRONOUS 0x00000002 /* file system written synchronously */ +#define MNT_NOEXEC 0x00000004 /* can't exec from filesystem */ +#define MNT_NOSUID 0x00000008 /* don't honor setuid bits on fs */ +#define MNT_NODEV 0x00000010 /* don't interpret special files */ +#define MNT_UNION 0x00000020 /* union with underlying filesystem */ +#define MNT_ASYNC 0x00000040 /* file system written asynchronously */ +#define MNT_DONTBROWSE 0x00100000 /* file system is not appropriate path to user data */ +#define MNT_UNKNOWNPERMISSIONS 0x00200000 /* no known mapping for uid/gid in permissions information on disk */ +#define MNT_AUTOMOUNTED 0x00400000 /* filesystem was mounted by automounter */ + +/* + * NFS export related mount flags. + */ +#define MNT_EXRDONLY 0x00000080 /* exported read only */ +#define MNT_EXPORTED 0x00000100 /* file system is exported */ +#define MNT_DEFEXPORTED 0x00000200 /* exported to the world */ +#define MNT_EXPORTANON 0x00000400 /* use anon uid mapping for everyone */ +#define MNT_EXKERB 0x00000800 /* exported with Kerberos uid mapping */ + +/* + * Flags set by internal operations. + */ +#define MNT_LOCAL 0x00001000 /* filesystem is stored locally */ +#define MNT_QUOTA 0x00002000 /* quotas are enabled on filesystem */ +#define MNT_ROOTFS 0x00004000 /* identifies the root filesystem */ +#define MNT_DOVOLFS 0x00008000 /* FS supports volfs */ +#define MNT_FIXEDSCRIPTENCODING 0x10000000 /* FS supports only fixed script encoding [HFS] */ + +/* + * XXX I think that this could now become (~(MNT_CMDFLAGS)) + * but the 'mount' program may need changing to handle this. + */ +#define MNT_VISFLAGMASK (MNT_RDONLY | MNT_SYNCHRONOUS | MNT_NOEXEC | \ + MNT_NOSUID | MNT_NODEV | MNT_UNION | \ + MNT_ASYNC | MNT_EXRDONLY | MNT_EXPORTED | \ + MNT_DEFEXPORTED | MNT_EXPORTANON| MNT_EXKERB | \ + MNT_LOCAL | MNT_QUOTA | \ + MNT_ROOTFS | MNT_DOVOLFS | MNT_DONTBROWSE | \ + MNT_UNKNOWNPERMISSIONS | MNT_AUTOMOUNTED | MNT_FIXEDSCRIPTENCODING ) +/* + * External filesystem command modifier flags. + * Unmount can use the MNT_FORCE flag. + * XXX These are not STATES and really should be somewhere else. + * External filesystem control flags. + */ +#define MNT_UPDATE 0x00010000 /* not a real mount, just an update */ +#define MNT_DELEXPORT 0x00020000 /* delete export host lists */ +#define MNT_RELOAD 0x00040000 /* reload filesystem data */ +#define MNT_FORCE 0x00080000 /* force unmount or readonly change */ +#define MNT_CMDFLAGS (MNT_UPDATE|MNT_DELEXPORT|MNT_RELOAD|MNT_FORCE) + +/* + * Internal filesystem control flags stored in mnt_kern_flag. + * + * MNTK_UNMOUNT locks the mount entry so that name lookup cannot proceed + * past the mount point. This keeps the subtree stable during mounts + * and unmounts. + */ +#define MNTK_UNMOUNT 0x01000000 /* unmount in progress */ +#define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */ +#define MNTK_WANTRDWR 0x04000000 /* upgrade to read/write requested */ +#if REV_ENDIAN_FS +#define MNT_REVEND 0x08000000 /* Reverse endian FS */ +#endif /* REV_ENDIAN_FS */ +/* + * Sysctl CTL_VFS definitions. + * + * Second level identifier specifies which filesystem. Second level + * identifier VFS_GENERIC returns information about all filesystems. + */ +#define VFS_GENERIC 0 /* generic filesystem information */ +#define VFS_NUMMNTOPS 1 /* int: total num of vfs mount/unmount operations */ +/* + * Third level identifiers for VFS_GENERIC are given below; third + * level identifiers for specific filesystems are given in their + * mount specific header files. + */ +#define VFS_MAXTYPENUM 1 /* int: highest defined filesystem type */ +#define VFS_CONF 2 /* struct: vfsconf for filesystem given + as next argument */ +/* + * Flags for various system call interfaces. + * + * waitfor flags to vfs_sync() and getfsstat() + */ +#define MNT_WAIT 1 /* synchronously wait for I/O to complete */ +#define MNT_NOWAIT 2 /* start all I/O, but do not wait for it */ + +/* + * Generic file handle + */ +struct fhandle { + fsid_t fh_fsid; /* File system id of mount point */ + struct fid fh_fid; /* File sys specific id */ +}; +typedef struct fhandle fhandle_t; + +/* + * Export arguments for local filesystem mount calls. + */ +struct export_args { + int ex_flags; /* export related flags */ + uid_t ex_root; /* mapping for root uid */ + struct ucred ex_anon; /* mapping for anonymous user */ + struct sockaddr *ex_addr; /* net address to which exported */ + int ex_addrlen; /* and the net address length */ + struct sockaddr *ex_mask; /* mask of valid bits in saddr */ + int ex_masklen; /* and the smask length */ +}; + +/* + * Filesystem configuration information. One of these exists for each + * type of filesystem supported by the kernel. These are searched at + * mount time to identify the requested filesystem. + */ +struct vfsconf { + struct vfsops *vfc_vfsops; /* filesystem operations vector */ + char vfc_name[MFSNAMELEN]; /* filesystem type name */ + int vfc_typenum; /* historic filesystem type number */ + int vfc_refcount; /* number mounted of this type */ + int vfc_flags; /* permanent flags */ + int (*vfc_mountroot)(void); /* if != NULL, routine to mount root */ + struct vfsconf *vfc_next; /* next in list */ +}; + +#ifdef KERNEL + +extern int maxvfsconf; /* highest defined filesystem type */ +extern struct vfsconf *vfsconf; /* head of list of filesystem types */ +extern int maxvfsslots; /* Maximum slots available to be used */ +extern int numused_vfsslots; /* number of slots already used */ + +int vfsconf_add __P((struct vfsconf *)); +int vfsconf_del __P((char *)); + +/* + * Operations supported on mounted file system. + */ +#ifdef __STDC__ +struct nameidata; +struct mbuf; +#endif + +struct vfsops { + int (*vfs_mount) __P((struct mount *mp, char *path, caddr_t data, + struct nameidata *ndp, struct proc *p)); + int (*vfs_start) __P((struct mount *mp, int flags, + struct proc *p)); + int (*vfs_unmount) __P((struct mount *mp, int mntflags, + struct proc *p)); + int (*vfs_root) __P((struct mount *mp, struct vnode **vpp)); + int (*vfs_quotactl) __P((struct mount *mp, int cmds, uid_t uid, + caddr_t arg, struct proc *p)); + int (*vfs_statfs) __P((struct mount *mp, struct statfs *sbp, + struct proc *p)); + int (*vfs_sync) __P((struct mount *mp, int waitfor, + struct ucred *cred, struct proc *p)); + int (*vfs_vget) __P((struct mount *mp, void *ino, + struct vnode **vpp)); + int (*vfs_fhtovp) __P((struct mount *mp, struct fid *fhp, + struct mbuf *nam, struct vnode **vpp, + int *exflagsp, struct ucred **credanonp)); + int (*vfs_vptofh) __P((struct vnode *vp, struct fid *fhp)); + int (*vfs_init) __P((struct vfsconf *)); + int (*vfs_sysctl) __P((int *, u_int, void *, size_t *, void *, + size_t, struct proc *)); +}; + +#define VFS_MOUNT(MP, PATH, DATA, NDP, P) \ + (*(MP)->mnt_op->vfs_mount)(MP, PATH, DATA, NDP, P) +#define VFS_START(MP, FLAGS, P) (*(MP)->mnt_op->vfs_start)(MP, FLAGS, P) +#define VFS_UNMOUNT(MP, FORCE, P) (*(MP)->mnt_op->vfs_unmount)(MP, FORCE, P) +#define VFS_ROOT(MP, VPP) (*(MP)->mnt_op->vfs_root)(MP, VPP) +#define VFS_QUOTACTL(MP,C,U,A,P) (*(MP)->mnt_op->vfs_quotactl)(MP, C, U, A, P) +#define VFS_STATFS(MP, SBP, P) (*(MP)->mnt_op->vfs_statfs)(MP, SBP, P) +#define VFS_SYNC(MP, WAIT, C, P) (*(MP)->mnt_op->vfs_sync)(MP, WAIT, C, P) +#define VFS_VGET(MP, INO, VPP) (*(MP)->mnt_op->vfs_vget)(MP, INO, VPP) +#define VFS_FHTOVP(MP, FIDP, NAM, VPP, EXFLG, CRED) \ + (*(MP)->mnt_op->vfs_fhtovp)(MP, FIDP, NAM, VPP, EXFLG, CRED) +#define VFS_VPTOFH(VP, FIDP) (*(VP)->v_mount->mnt_op->vfs_vptofh)(VP, FIDP) + +/* + * Network address lookup element + */ +struct netcred { + struct radix_node netc_rnodes[2]; + int netc_exflags; + struct ucred netc_anon; +}; + +/* + * Network export information + */ +struct netexport { + struct netcred ne_defexported; /* Default export */ + struct radix_node_head *ne_rtable[AF_MAX+1]; /* Individual exports */ +}; + +/* + * exported vnode operations + */ +int vfs_busy __P((struct mount *, int, struct slock *, struct proc *)); +int vfs_export __P((struct mount *, struct netexport *, + struct export_args *)); +struct netcred *vfs_export_lookup __P((struct mount *, struct netexport *, + struct mbuf *)); +void vfs_getnewfsid __P((struct mount *)); +struct mount *vfs_getvfs __P((fsid_t *)); +int vfs_mountedon __P((struct vnode *)); +int vfs_mountroot __P((void)); +int vfs_rootmountalloc __P((char *, char *, struct mount **)); +void vfs_unbusy __P((struct mount *, struct proc *)); +void vfs_unmountall __P((void)); +extern CIRCLEQ_HEAD(mntlist, mount) mountlist; +extern struct slock mountlist_slock; + +#else /* !KERNEL */ + +#include + +__BEGIN_DECLS +int fstatfs __P((int, struct statfs *)); +int getfh __P((const char *, fhandle_t *)); +int getfsstat __P((struct statfs *, long, int)); +int getmntinfo __P((struct statfs **, int)); +int mount __P((const char *, const char *, int, void *)); +int statfs __P((const char *, struct statfs *)); +int unmount __P((const char *, int)); +__END_DECLS + +#endif /* KERNEL */ +#endif /* !_SYS_MOUNT_H_ */ diff --git a/bsd/sys/msg.h b/bsd/sys/msg.h new file mode 100644 index 000000000..d639477d8 --- /dev/null +++ b/bsd/sys/msg.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: msg.h,v 1.4 1994/06/29 06:44:43 cgd Exp $ */ + +/* + * SVID compatible msg.h file + * + * Author: Daniel Boulet + * + * Copyright 1993 Daniel Boulet and RTMX Inc. + * + * This system call was implemented by Daniel Boulet under contract from RTMX. + * + * Redistribution and use in source forms, with and without modification, + * are permitted provided that this entire comment appears intact. + * + * Redistribution in binary form may occur without any restrictions. + * Obviously, it would be nice if you gave credit where credit is due + * but requiring it would be too onerous. + * + * This software is provided ``AS IS'' without any warranties of any kind. + */ + +#ifndef _SYS_MSG_H_ +#define _SYS_MSG_H_ + +#include + +/* + * The MSG_NOERROR identifier value, the msqid_ds struct and the msg struct + * are as defined by the SV API Intel 386 Processor Supplement. + */ + +#define MSG_NOERROR 010000 /* don't complain about too long msgs */ + +struct msqid_ds { + struct ipc_perm msg_perm; /* msg queue permission bits */ + struct msg *msg_first; /* first message in the queue */ + struct msg *msg_last; /* last message in the queue */ + u_long msg_cbytes; /* number of bytes in use on the queue */ + u_long msg_qnum; /* number of msgs in the queue */ + u_long msg_qbytes; /* max # of bytes on the queue */ + pid_t msg_lspid; /* pid of last msgsnd() */ + pid_t msg_lrpid; /* pid of last msgrcv() */ + time_t msg_stime; /* time of last msgsnd() */ + long msg_pad1; + time_t msg_rtime; /* time of last msgrcv() */ + long msg_pad2; + time_t msg_ctime; /* time of last msgctl() */ + long msg_pad3; + long msg_pad4[4]; +}; + +struct msg { + struct msg *msg_next; /* next msg in the chain */ + long msg_type; /* type of this message */ + /* >0 -> type of this message */ + /* 0 -> free header */ + u_short msg_ts; /* size of this message */ + short msg_spot; /* location of start of msg in buffer */ +}; + +/* + * Structure describing a message. The SVID doesn't suggest any + * particular name for this structure. There is a reference in the + * msgop man page that reads "The structure mymsg is an example of what + * this user defined buffer might look like, and includes the following + * members:". This sentence is followed by two lines equivalent + * to the mtype and mtext field declarations below. It isn't clear + * if "mymsg" refers to the naem of the structure type or the name of an + * instance of the structure... + */ +struct mymsg { + long mtype; /* message type (+ve integer) */ + char mtext[1]; /* message body */ +}; + +/* + * Based on the configuration parameters described in an SVR2 (yes, two) + * config(1m) man page. + * + * Each message is broken up and stored in segments that are msgssz bytes + * long. For efficiency reasons, this should be a power of two. Also, + * it doesn't make sense if it is less than 8 or greater than about 256. + * Consequently, msginit in kern/sysv_msg.c checks that msgssz is a power of + * two between 8 and 1024 inclusive (and panic's if it isn't). + */ +struct msginfo { + int msgmax, /* max chars in a message */ + msgmni, /* max message queue identifiers */ + msgmnb, /* max chars in a queue */ + msgtql, /* max messages in system */ + msgssz, /* size of a message segment (see notes above) */ + msgseg; /* number of message segments */ +}; +#ifdef KERNEL +extern struct msginfo msginfo; + +#ifndef MSGSSZ +#define MSGSSZ 8 /* Each segment must be 2^N long */ +#endif +#ifndef MSGSEG +#define MSGSEG 2048 /* must be less than 32767 */ +#endif +#define MSGMAX (MSGSSZ*MSGSEG) +#ifndef MSGMNB +#define MSGMNB 2048 /* max # of bytes in a queue */ +#endif +#ifndef MSGMNI +#define MSGMNI 40 +#endif +#ifndef MSGTQL +#define MSGTQL 40 +#endif + +/* + * macros to convert between msqid_ds's and msqid's. + * (specific to this implementation) + */ +#define MSQID(ix,ds) ((ix) & 0xffff | (((ds).msg_perm.seq << 16) & 0xffff0000)) +#define MSQID_IX(id) ((id) & 0xffff) +#define MSQID_SEQ(id) (((id) >> 16) & 0xffff) + +/* + * The rest of this file is specific to this particular implementation. + */ + + +/* + * Stuff allocated in machdep.h + */ +struct msgmap { + short next; /* next segment in buffer */ + /* -1 -> available */ + /* 0..(MSGSEG-1) -> index of next segment */ +}; + +extern char *msgpool; /* MSGMAX byte long msg buffer pool */ +extern struct msgmap *msgmaps; /* MSGSEG msgmap structures */ +extern struct msg *msghdrs; /* MSGTQL msg headers */ +extern struct msqid_ds *msqids; /* MSGMNI msqid_ds struct's */ + +#define MSG_LOCKED 01000 /* Is this msqid_ds locked? */ + +#endif /* KERNEL */ + +#ifndef KERNEL +#include + +__BEGIN_DECLS +int msgsys __P((int, ...)); +int msgctl __P((int, int, struct msqid_ds *)); +int msgget __P((key_t, int)); +int msgsnd __P((int, void *, size_t, int)); +int msgrcv __P((int, void*, size_t, long, int)); +__END_DECLS +#endif /* !KERNEL */ + +#endif /* !_SYS_MSG_H_ */ diff --git a/bsd/sys/msgbuf.h b/bsd/sys/msgbuf.h new file mode 100644 index 000000000..15e2dcb86 --- /dev/null +++ b/bsd/sys/msgbuf.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1981, 1984, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)msgbuf.h 8.1 (Berkeley) 6/2/93 + */ +#ifndef _SYS_MSGBUF_H_ +#define _SYS_MSGBUF_H_ + +#define MSG_BSIZE (4096 - 3 * sizeof(long)) +struct msgbuf { +#define MSG_MAGIC 0x063061 + long msg_magic; + long msg_bufx; /* write pointer */ + long msg_bufr; /* read pointer */ + char msg_bufc[MSG_BSIZE]; /* buffer */ +}; +#ifdef KERNEL +extern struct msgbuf *msgbufp; +extern void log_putc(char); +#endif +#endif /* !_SYS_MSGBUF_H_ */ diff --git a/bsd/sys/mtio.h b/bsd/sys/mtio.h new file mode 100644 index 000000000..d7b9006e9 --- /dev/null +++ b/bsd/sys/mtio.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mtio.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_MTIO_H_ +#define _SYS_MTIO_H_ + +/* + * Structures and definitions for mag tape io control commands + */ + +/* structure for MTIOCTOP - mag tape op command */ +struct mtop { + short mt_op; /* operations defined below */ + daddr_t mt_count; /* how many of them */ +}; + +/* operations */ +#define MTWEOF 0 /* write an end-of-file record */ +#define MTFSF 1 /* forward space file */ +#define MTBSF 2 /* backward space file */ +#define MTFSR 3 /* forward space record */ +#define MTBSR 4 /* backward space record */ +#define MTREW 5 /* rewind */ +#define MTOFFL 6 /* rewind and put the drive offline */ +#define MTNOP 7 /* no operation, sets status only */ +#define MTRETEN 8 /* retension */ +#define MTERASE 9 /* erase entire tape */ +#define MTEOM 10 /* forward to end of media */ +#define MTNBSF 11 /* backward space to beginning of file */ +#define MTCACHE 12 /* enable controller cache */ +#define MTNOCACHE 13 /* disable controller cache */ +#define MTSETBSIZ 14 /* set block size; 0 for variable */ +#define MTSETDNSTY 15 /* set density code for current mode */ + +/* structure for MTIOCGET - mag tape get status command */ + +struct mtget { + short mt_type; /* type of magtape device */ +/* the following two registers are grossly device dependent */ + u_short mt_dsreg; /* ``drive status'' register. SCSI sense byte 0x02. */ + u_short mt_erreg; /* ``error'' register. SCSI sense byte 0x0C. */ + u_short mt_ext_err0; /* SCSI sense bytes 0x13..0x14 */ + u_short mt_ext_err1; /* SCSI sense bytes 0x15..0x16 */ +/* end device-dependent registers */ + short mt_resid; /* residual count */ +/* the following two are not yet implemented */ + daddr_t mt_fileno; /* file number of current position */ + daddr_t mt_blkno; /* block number of current position */ +/* end not yet implemented */ + daddr_t mt_blksiz; /* current block size */ + daddr_t mt_density; /* current density code */ + daddr_t mt_mblksiz[4]; /* block size for different modes */ + daddr_t mt_mdensity[4]; /* density codes for different modes */ +}; + +/* + * Constants for mt_type byte. These are the same + * for controllers compatible with the types listed. + */ +#define MT_ISTS 0x01 /* TS-11 */ +#define MT_ISHT 0x02 /* TM03 Massbus: TE16, TU45, TU77 */ +#define MT_ISTM 0x03 /* TM11/TE10 Unibus */ +#define MT_ISMT 0x04 /* TM78/TU78 Massbus */ +#define MT_ISUT 0x05 /* SI TU-45 emulation on Unibus */ +#define MT_ISCPC 0x06 /* SUN */ +#define MT_ISAR 0x07 /* SUN */ +#define MT_ISTMSCP 0x08 /* DEC TMSCP protocol (TU81, TK50) */ +#define MT_ISCY 0x09 /* CCI Cipher */ +#define MT_ISCT 0x0a /* HP 1/4 tape */ +#define MT_ISFHP 0x0b /* HP 7980 1/2 tape */ +#define MT_ISEXABYTE 0x0c /* Exabyte */ +#define MT_ISEXA8200 0x0c /* Exabyte EXB-8200 */ +#define MT_ISEXA8500 0x0d /* Exabyte EXB-8500 */ +#define MT_ISVIPER1 0x0e /* Archive Viper-150 */ +#define MT_ISPYTHON 0x0f /* Archive Python (DAT) */ +#define MT_ISHPDAT 0x10 /* HP 35450A DAT drive */ +#define MT_ISWANGTEK 0x11 /* WANGTEK 5150ES */ +#define MT_ISCALIPER 0x12 /* Caliper CP150 */ +#define MT_ISWTEK5099 0x13 /* WANGTEK 5099ES */ +#define MT_ISVIPER2525 0x14 /* Archive Viper 2525 */ +#define MT_ISMFOUR 0x11 /* M4 Data 1/2 9track drive */ +#define MT_ISTK50 0x12 /* DEC SCSI TK50 */ +#define MT_ISMT02 0x13 /* Emulex MT02 SCSI tape controller */ +#define MT_ISGS 0x14 /* Generic SCSI Tape */ + +/* mag tape io control commands */ +#define MTIOCTOP _IOW('m', 1, struct mtop) /* do a mag tape op */ +#define MTIOCGET _IOR('m', 2, struct mtget) /* get tape status */ +#define MTIOCIEOT _IO('m', 3) /* ignore EOT error */ +#define MTIOCEEOT _IO('m', 4) /* enable EOT error */ + +#ifndef KERNEL +#define DEFTAPE "/dev/rst0" +#endif + +#ifdef KERNEL +/* + * minor device number + */ + +#define T_UNIT 003 /* unit selection */ +#define T_NOREWIND 004 /* no rewind on close */ +#define T_DENSEL 030 /* density select */ +#define T_800BPI 000 /* select 800 bpi */ +#define T_1600BPI 010 /* select 1600 bpi */ +#define T_6250BPI 020 /* select 6250 bpi */ +#define T_BADBPI 030 /* undefined selection */ +#endif +#endif /* !_SYS_MTIO_H_ */ diff --git a/bsd/sys/namei.h b/bsd/sys/namei.h new file mode 100644 index 000000000..1dd8bffad --- /dev/null +++ b/bsd/sys/namei.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1985, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)namei.h 8.4 (Berkeley) 8/20/94 + */ + +#ifndef _SYS_NAMEI_H_ +#define _SYS_NAMEI_H_ + +#include +#include + +/* + * Lookup parameters: this structure describes the subset of + * information from the nameidata structure that is passed + * through the VOP interface. + */ +struct componentname { + /* + * Arguments to lookup. + */ + u_long cn_nameiop; /* namei operation */ + u_long cn_flags; /* flags to namei */ + struct proc *cn_proc; /* process requesting lookup */ + struct ucred *cn_cred; /* credentials */ + /* + * Shared between lookup and commit routines. + */ + char *cn_pnbuf; /* pathname buffer */ + long cn_pnlen; /* length of allocated buffer */ + char *cn_nameptr; /* pointer to looked up name */ + long cn_namelen; /* length of looked up component */ + u_long cn_hash; /* hash value of looked up name */ + long cn_consume; /* chars to consume in lookup() */ +}; + +/* + * Encapsulation of namei parameters. + */ +struct nameidata { + /* + * Arguments to namei/lookup. + */ + caddr_t ni_dirp; /* pathname pointer */ + enum uio_seg ni_segflg; /* location of pathname */ + /* u_long ni_nameiop; namei operation */ + /* u_long ni_flags; flags to namei */ + /* struct proc *ni_proc; process requesting lookup */ + /* + * Arguments to lookup. + */ + /* struct ucred *ni_cred; credentials */ + struct vnode *ni_startdir; /* starting directory */ + struct vnode *ni_rootdir; /* logical root directory */ + /* + * Results: returned from/manipulated by lookup + */ + struct vnode *ni_vp; /* vnode of result */ + struct vnode *ni_dvp; /* vnode of intermediate directory */ + /* + * Shared between namei and lookup/commit routines. + */ + u_int ni_pathlen; /* remaining chars in path */ + char *ni_next; /* next location in pathname */ + u_long ni_loopcnt; /* count of symlinks encountered */ + struct componentname ni_cnd; +}; + +#ifdef KERNEL +/* + * namei operations + */ +#define LOOKUP 0 /* perform name lookup only */ +#define CREATE 1 /* setup for file creation */ +#define DELETE 2 /* setup for file deletion */ +#define RENAME 3 /* setup for file renaming */ +#define OPMASK 3 /* mask for operation */ +/* + * namei operational modifier flags, stored in ni_cnd.flags + */ +#define LOCKLEAF 0x0004 /* lock inode on return */ +#define LOCKPARENT 0x0008 /* want parent vnode returned locked */ +#define WANTPARENT 0x0010 /* want parent vnode returned unlocked */ +#define NOCACHE 0x0020 /* name must not be left in cache */ +#define FOLLOW 0x0040 /* follow symbolic links */ +#define NOFOLLOW 0x0000 /* do not follow symbolic links (pseudo) */ +#define MODMASK 0x00fc /* mask of operational modifiers */ +/* + * Namei parameter descriptors. + * + * SAVENAME may be set by either the callers of namei or by VOP_LOOKUP. + * If the caller of namei sets the flag (for example execve wants to + * know the name of the program that is being executed), then it must + * free the buffer. If VOP_LOOKUP sets the flag, then the buffer must + * be freed by either the commit routine or the VOP_ABORT routine. + * SAVESTART is set only by the callers of namei. It implies SAVENAME + * plus the addition of saving the parent directory that contains the + * name in ni_startdir. It allows repeated calls to lookup for the + * name being sought. The caller is responsible for releasing the + * buffer and for vrele'ing ni_startdir. + */ +#define NOCROSSMOUNT 0x000100 /* do not cross mount points */ +#define RDONLY 0x000200 /* lookup with read-only semantics */ +#define HASBUF 0x000400 /* has allocated pathname buffer */ +#define SAVENAME 0x000800 /* save pathanme buffer */ +#define SAVESTART 0x001000 /* save starting directory */ +#define ISDOTDOT 0x002000 /* current component name is .. */ +#define MAKEENTRY 0x004000 /* entry is to be added to name cache */ +#define ISLASTCN 0x008000 /* this is last component of pathname */ +#define ISSYMLINK 0x010000 /* symlink needs interpretation */ +#define ISWHITEOUT 0x020000 /* found whiteout */ +#define DOWHITEOUT 0x040000 /* do whiteouts */ +#define NODELETEBUSY 0x800000 /* do not delete busy files (HFS semantic) */ +#define PARAMASK 0x0fff00 /* mask of parameter descriptors */ +/* + * Initialization of an nameidata structure. + */ +#define NDINIT(ndp, op, flags, segflg, namep, p) { \ + (ndp)->ni_cnd.cn_nameiop = op; \ + (ndp)->ni_cnd.cn_flags = flags; \ + (ndp)->ni_segflg = segflg; \ + (ndp)->ni_dirp = namep; \ + (ndp)->ni_cnd.cn_proc = p; \ +} +#endif /* KERNEL */ + +/* + * This structure describes the elements in the cache of recent + * names looked up by namei. NCHNAMLEN is sized to make structure + * size a power of two to optimize malloc's. Minimum reasonable + * size is 15. + */ + +#define NCHNAMLEN 31 /* maximum name segment length we bother with */ + +struct namecache { + LIST_ENTRY(namecache) nc_hash; /* hash chain */ + TAILQ_ENTRY(namecache) nc_lru; /* LRU chain */ + struct vnode *nc_dvp; /* vnode of parent of name */ + u_long nc_dvpid; /* capability number of nc_dvp */ + struct vnode *nc_vp; /* vnode the name refers to */ + u_long nc_vpid; /* capability number of nc_vp */ + char nc_nlen; /* length of name */ + char nc_name[NCHNAMLEN]; /* segment name */ +}; + +#ifdef KERNEL +extern u_long nextvnodeid; +int namei __P((struct nameidata *ndp)); +int lookup __P((struct nameidata *ndp)); +int relookup __P((struct vnode *dvp, struct vnode **vpp, + struct componentname *cnp)); + +/* namecache function prototypes */ +int cache_lookup __P((struct vnode *dvp, struct vnode **vpp, + struct componentname *cnp)); +void cache_enter __P((struct vnode *dvp, struct vnode *vpp, + struct componentname *cnp)); +void cache_purge __P((struct vnode *vp)); +void cache_purgevfs __P((struct mount *mp)); +#endif /* KERNEL */ + +/* + * Stats on usefulness of namei caches. + */ +struct nchstats { + long ncs_goodhits; /* hits that we can really use */ + long ncs_neghits; /* negative hits that we can use */ + long ncs_badhits; /* hits we must drop */ + long ncs_falsehits; /* hits with id mismatch */ + long ncs_miss; /* misses */ + long ncs_long; /* long names that ignore cache */ + long ncs_pass2; /* names found with passes == 2 */ + long ncs_2passes; /* number of times we attempt it */ +}; +#endif /* !_SYS_NAMEI_H_ */ diff --git a/bsd/sys/netport.h b/bsd/sys/netport.h new file mode 100644 index 000000000..177eeb37a --- /dev/null +++ b/bsd/sys/netport.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1987,1988,1989 Carnegie-Mellon University All rights reserved. + */ +#ifndef _SYS_NETPORT_H_ +#define _SYS_NETPORT_H_ + +typedef unsigned long netaddr_t; + +/* + * Network Port structure. + */ +typedef struct { + long np_uid_high; + long np_uid_low; +} np_uid_t; + +typedef struct { + netaddr_t np_receiver; + netaddr_t np_owner; + np_uid_t np_puid; + np_uid_t np_sid; +} network_port_t; + +#endif /* !_SYS_NETPORT_H_ */ + diff --git a/bsd/sys/param.h b/bsd/sys/param.h new file mode 100644 index 000000000..6a45fc8a4 --- /dev/null +++ b/bsd/sys/param.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)param.h 8.3 (Berkeley) 4/4/95 + */ + +#ifndef _SYS_PARAM_H_ +#define _SYS_PARAM_H_ + +#define BSD 199506 /* System version (year & month). */ +#define BSD4_3 1 +#define BSD4_4 1 + +#define NeXTBSD 1995064 /* NeXTBSD version (year, month, release) */ +#define NeXTBSD4_0 0 /* NeXTBSD 4.0 */ + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef LOCORE +#include +#endif + +/* + * Machine-independent constants (some used in following include files). + * Redefined constants are from POSIX 1003.1 limits file. + * + * MAXCOMLEN should be >= sizeof(ac_comm) (see ) + * MAXLOGNAME should be >= UT_NAMESIZE (see ) + */ +#include + +#define MAXCOMLEN 16 /* max command name remembered */ +#define MAXINTERP 64 /* max interpreter file name length */ +#define MAXLOGNAME 12 /* max login name length */ +#define MAXUPRC CHILD_MAX /* max simultaneous processes */ +#define NCARGS ARG_MAX /* max bytes for an exec function */ +#define NGROUPS NGROUPS_MAX /* max number groups */ +#define NOFILE 256 /* default max open files per process */ +#define NOGROUP 65535 /* marker for empty group set member */ +#define MAXHOSTNAMELEN 256 /* max hostname size */ +#define MAXDOMNAMELEN 256 /* maximum domain name length */ + +/* Machine type dependent parameters. */ +#include +#include + +/* More types and definitions used throughout the kernel. */ +#ifdef KERNEL +#include +#include +#include +#include +#include +#include +#endif + +/* Signals. */ +#include + +/* + * Priorities. Note that with 32 run queues, differences less than 4 are + * insignificant. + */ +#define PSWP 0 +#define PVM 4 +#define PINOD 8 +#define PRIBIO 16 +#define PVFS 20 +#define PZERO 22 /* No longer magic, shouldn't be here. XXX */ +#define PSOCK 24 +#define PWAIT 32 +#define PLOCK 36 +#define PPAUSE 40 +#define PUSER 50 +#define MAXPRI 127 /* Priorities range from 0 through MAXPRI. */ + +#define PRIMASK 0x0ff +#define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ +#define PTTYBLOCK 0x200 /* for tty SIGTTOU and SIGTTIN blocking */ + +#define NZERO 0 /* default "nice" */ + +#define NBPW sizeof(int) /* number of bytes per word (integer) */ + +#define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ +#define NODEV (dev_t)(-1) /* non-existent device */ + +/* + * Clustering of hardware pages on machines with ridiculously small + * page sizes is done here. The paging subsystem deals with units of + * CLSIZE pte's describing NBPG (from machine/param.h) pages each. + */ +#define CLBYTES (CLSIZE*NBPG) +#define CLOFSET (CLSIZE*NBPG-1) /* for clusters, like PGOFSET */ +#define claligned(x) ((((int)(x))&CLOFSET)==0) +#define CLOFF CLOFSET +#define CLSHIFT (PGSHIFT+CLSIZELOG2) + +#if CLSIZE==1 +#define clbase(i) (i) +#define clrnd(i) (i) +#else +/* Give the base virtual address (first of CLSIZE). */ +#define clbase(i) ((i) &~ (CLSIZE-1)) +/* Round a number of clicks up to a whole cluster. */ +#define clrnd(i) (((i) + (CLSIZE-1)) &~ (CLSIZE-1)) +#endif + +#define CBLOCK 64 /* Clist block size, must be a power of 2. */ +#define CBQSIZE (CBLOCK/NBBY) /* Quote bytes/cblock - can do better. */ + /* Data chars/clist. */ +#define CBSIZE (CBLOCK - sizeof(struct cblock *) - CBQSIZE) +#define CROUND (CBLOCK - 1) /* Clist rounding. */ + +/* + * File system parameters and macros. + * + * The file system is made out of blocks of at most MAXBSIZE units, with + * smaller units (fragments) only in the last direct block. MAXBSIZE + * primarily determines the size of buffers in the buffer pool. It may be + * made larger without any effect on existing file systems; however making + * it smaller make make some file systems unmountable. + */ +#define MAXBSIZE MAXPHYS +#define MAXPHYSIO MAXPHYS +#define MAXFRAG 8 + +/* + * MAXPATHLEN defines the longest permissable path length after expanding + * symbolic links. It is used to allocate a temporary buffer from the buffer + * pool in which to do the name expansion, hence should be a power of two, + * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the + * maximum number of symbolic links that may be expanded in a path name. + * It should be set high enough to allow all legitimate uses, but halt + * infinite loops reasonably quickly. + */ +#define MAXPATHLEN PATH_MAX +#define MAXSYMLINKS 32 + +/* Bit map related macros. */ +#define setbit(a,i) (((char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) +#define clrbit(a,i) (((char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) +#define isset(a,i) (((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) +#define isclr(a,i) ((((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) + +/* Macros for counting and rounding. */ +#ifndef howmany +#define howmany(x, y) (((x)+((y)-1))/(y)) +#endif +#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#define powerof2(x) ((((x)-1)&(x))==0) + +/* Macros for min/max. */ +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif /* MIN */ +#ifndef MAX +#define MAX(a,b) (((a)>(b))?(a):(b)) +#endif /* MAX */ + +/* + * Constants for setting the parameters of the kernel memory allocator. + * + * 2 ** MINBUCKET is the smallest unit of memory that will be + * allocated. It must be at least large enough to hold a pointer. + * + * Units of memory less or equal to MAXALLOCSAVE will permanently + * allocate physical memory; requests for these size pieces of + * memory are quite fast. Allocations greater than MAXALLOCSAVE must + * always allocate and free physical memory; requests for these + * size allocations should be done infrequently as they will be slow. + * + * Constraints: CLBYTES <= MAXALLOCSAVE <= 2 ** (MINBUCKET + 14), and + * MAXALLOCSIZE must be a power of two. + */ +#define MINBUCKET 4 /* 4 => min allocation of 16 bytes */ +#define MAXALLOCSAVE (2 * CLBYTES) + +/* + * Scale factor for scaled integers used to count %cpu time and load avgs. + * + * The number of CPU `tick's that map to a unique `%age' can be expressed + * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that + * can be calculated (assuming 32 bits) can be closely approximated using + * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). + * + * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', + * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. + */ +#define FSHIFT 11 /* bits to right of fixed binary point */ +#define FSCALE (1< + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef _SYS_POLL_H_ +#define _SYS_POLL_H_ + +/* + * This file is intended to be compatable with the traditional poll.h. + */ + +/* + * Requestable events. If poll(2) finds any of these set, they are + * copied to revents on return. + * XXX Note that FreeBSD doesn't make much distinction between POLLPRI + * and POLLRDBAND since none of the file types have distinct priority + * bands - and only some have an urgent "mode". + * XXX Note POLLIN isn't really supported in true SVSV terms. Under SYSV + * POLLIN includes all of normal, band and urgent data. Most poll handlers + * on FreeBSD only treat it as "normal" data. + */ +#define POLLIN 0x0001 /* any readable data available */ +#define POLLPRI 0x0002 /* OOB/Urgent readable data */ +#define POLLOUT 0x0004 /* file descriptor is writeable */ +#define POLLRDNORM 0x0040 /* non-OOB/URG data available */ +#define POLLWRNORM POLLOUT /* no write type differentiation */ +#define POLLRDBAND 0x0080 /* OOB/Urgent readable data */ +#define POLLWRBAND 0x0100 /* OOB/Urgent data can be written */ + +/* + * FreeBSD extensions: polling on a regular file might return one + * of these events (currently only supported on UFS). + */ +#define POLLEXTEND 0x0200 /* file may have been extended */ +#define POLLATTRIB 0x0400 /* file attributes may have changed */ +#define POLLNLINK 0x0800 /* (un)link/rename may have happened */ +#define POLLWRITE 0x1000 /* file's contents may have changed */ + +/* + * These events are set if they occur regardless of whether they were + * requested. + */ +#define POLLERR 0x0008 /* some poll error occurred */ +#define POLLHUP 0x0010 /* file descriptor was "hung up" */ +#define POLLNVAL 0x0020 /* requested events "invalid" */ + +#define POLLSTANDARD (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\ + POLLWRBAND|POLLERR|POLLHUP|POLLNVAL) + + +#endif /* !_SYS_POLL_H_ */ diff --git a/bsd/sys/proc.h b/bsd/sys/proc.h new file mode 100644 index 000000000..4ca9147c9 --- /dev/null +++ b/bsd/sys/proc.h @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)proc.h 8.15 (Berkeley) 5/19/95 + */ + +#ifndef _SYS_PROC_H_ +#define _SYS_PROC_H_ + +#include + +#include /* For struct selinfo. */ +#include +#include +#include + +/* + * One structure allocated per session. + */ +struct session { + int s_count; /* Ref cnt; pgrps in session. */ + struct proc *s_leader; /* Session leader. */ + struct vnode *s_ttyvp; /* Vnode of controlling terminal. */ + struct tty *s_ttyp; /* Controlling terminal. */ + char s_login[MAXLOGNAME]; /* Setlogin() name. */ +}; + +/* + * One structure allocated per process group. + */ +struct pgrp { + LIST_ENTRY(pgrp) pg_hash; /* Hash chain. */ + LIST_HEAD(, proc) pg_members; /* Pointer to pgrp members. */ + struct session *pg_session; /* Pointer to session. */ + pid_t pg_id; /* Pgrp id. */ + int pg_jobc; /* # procs qualifying pgrp for job control */ +}; + +/* + * Description of a process. + * + * This structure contains the information needed to manage a thread of + * control, known in UN*X as a process; it has references to substructures + * containing descriptions of things that the process uses, but may share + * with related processes. The process structure and the substructures + * are always addressible except for those marked "(PROC ONLY)" below, + * which might be addressible only on a processor on which the process + * is running. + */ +struct proc { + LIST_ENTRY(proc) p_list; /* List of all processes. */ + + /* substructures: */ + struct pcred *p_cred; /* Process owner's identity. */ + struct filedesc *p_fd; /* Ptr to open files structure. */ + struct pstats *p_stats; /* Accounting/statistics (PROC ONLY). */ + struct plimit *p_limit; /* Process limits. */ + struct sigacts *p_sigacts; /* Signal actions, state (PROC ONLY). */ + +#define p_ucred p_cred->pc_ucred +#define p_rlimit p_limit->pl_rlimit + + int p_flag; /* P_* flags. */ + char p_stat; /* S* process status. */ + char p_pad1[3]; + + pid_t p_pid; /* Process identifier. */ + LIST_ENTRY(proc) p_pglist; /* List of processes in pgrp. */ + struct proc *p_pptr; /* Pointer to parent process. */ + LIST_ENTRY(proc) p_sibling; /* List of sibling processes. */ + LIST_HEAD(, proc) p_children; /* Pointer to list of children. */ + +/* The following fields are all zeroed upon creation in fork. */ +#define p_startzero p_oppid + + pid_t p_oppid; /* Save parent pid during ptrace. XXX */ + int p_dupfd; /* Sideways return value from fdopen. XXX */ + + /* scheduling */ + u_int p_estcpu; /* Time averaged value of p_cpticks. */ + int p_cpticks; /* Ticks of cpu time. */ + fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ + void *p_wchan; /* Sleep address. */ + char *p_wmesg; /* Reason for sleep. */ + u_int p_swtime; /* Time swapped in or out. */ + u_int p_slptime; /* Time since last blocked. */ + + struct itimerval p_realtimer; /* Alarm timer. */ + struct timeval p_rtime; /* Real time. */ + u_quad_t p_uticks; /* Statclock hits in user mode. */ + u_quad_t p_sticks; /* Statclock hits in system mode. */ + u_quad_t p_iticks; /* Statclock hits processing intr. */ + + int p_traceflag; /* Kernel trace points. */ + struct vnode *p_tracep; /* Trace to vnode. */ + + sigset_t p_siglist; /* Signals arrived but not delivered. */ + + struct vnode *p_textvp; /* Vnode of executable. */ + +/* End area that is zeroed on creation. */ +#define p_endzero p_hash.le_next + + /* + * Not copied, not zero'ed. + * Belongs after p_pid, but here to avoid shifting proc elements. + */ + LIST_ENTRY(proc) p_hash; /* Hash chain. */ + TAILQ_HEAD( ,eventqelt) p_evlist; + +/* The following fields are all copied upon creation in fork. */ +#define p_startcopy p_sigmask + + sigset_t p_sigmask; /* Current signal mask. */ + sigset_t p_sigignore; /* Signals being ignored. */ + sigset_t p_sigcatch; /* Signals being caught by user. */ + + u_char p_priority; /* Process priority. */ + u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ + char p_nice; /* Process "nice" value. */ + char p_comm[MAXCOMLEN+1]; + + struct pgrp *p_pgrp; /* Pointer to process group. */ + +/* End area that is copied on creation. */ +#define p_endcopy p_xstat + + u_short p_xstat; /* Exit status for wait; also stop signal. */ + u_short p_acflag; /* Accounting flags. */ + struct rusage *p_ru; /* Exit information. XXX */ + + int p_debugger; /* 1: can exec set-bit programs if suser */ + + void *task; /* corresponding task */ + void *sigwait_thread; /* 'thread' holding sigwait */ + struct lock__bsd__ signal_lock; /* multilple thread prot for signals*/ + boolean_t sigwait; /* indication to suspend */ + void *exit_thread; /* Which thread is exiting? */ + caddr_t user_stack; /* where user stack was allocated */ + void * exitarg; /* exit arg for proc terminate */ + void * vm_shm; /* for sysV shared memory */ + sigset_t p_sigpending; /* pended Signals as traced process is blocked. */ +#if DIAGNOSTIC +#if SIGNAL_DEBUG + unsigned int lockpc[8]; + unsigned int unlockpc[8]; +#endif /* SIGNAL_DEBUG */ +#endif /* DIAGNOSTIC */ +}; + +/* Exported fields for kern sysctls */ +struct extern_proc { + struct proc *p_forw; /* Doubly-linked run/sleep queue. */ + struct proc *p_back; + struct vmspace *p_vmspace; /* Address space. */ + struct sigacts *p_sigacts; /* Signal actions, state (PROC ONLY). */ + int p_flag; /* P_* flags. */ + char p_stat; /* S* process status. */ + pid_t p_pid; /* Process identifier. */ + pid_t p_oppid; /* Save parent pid during ptrace. XXX */ + int p_dupfd; /* Sideways return value from fdopen. XXX */ + /* Mach related */ + caddr_t user_stack; /* where user stack was allocated */ + void *exit_thread; /* XXX Which thread is exiting? */ + int p_debugger; /* allow to debug */ + boolean_t sigwait; /* indication to suspend */ + /* scheduling */ + u_int p_estcpu; /* Time averaged value of p_cpticks. */ + int p_cpticks; /* Ticks of cpu time. */ + fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ + void *p_wchan; /* Sleep address. */ + char *p_wmesg; /* Reason for sleep. */ + u_int p_swtime; /* Time swapped in or out. */ + u_int p_slptime; /* Time since last blocked. */ + struct itimerval p_realtimer; /* Alarm timer. */ + struct timeval p_rtime; /* Real time. */ + u_quad_t p_uticks; /* Statclock hits in user mode. */ + u_quad_t p_sticks; /* Statclock hits in system mode. */ + u_quad_t p_iticks; /* Statclock hits processing intr. */ + int p_traceflag; /* Kernel trace points. */ + struct vnode *p_tracep; /* Trace to vnode. */ + int p_siglist; /* Signals arrived but not delivered. */ + struct vnode *p_textvp; /* Vnode of executable. */ + int p_holdcnt; /* If non-zero, don't swap. */ + sigset_t p_sigmask; /* Current signal mask. */ + sigset_t p_sigignore; /* Signals being ignored. */ + sigset_t p_sigcatch; /* Signals being caught by user. */ + u_char p_priority; /* Process priority. */ + u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ + char p_nice; /* Process "nice" value. */ + char p_comm[MAXCOMLEN+1]; + struct pgrp *p_pgrp; /* Pointer to process group. */ + struct user *p_addr; /* Kernel virtual addr of u-area (PROC ONLY). */ + u_short p_xstat; /* Exit status for wait; also stop signal. */ + u_short p_acflag; /* Accounting flags. */ + struct rusage *p_ru; /* Exit information. XXX */ +}; + +#define p_session p_pgrp->pg_session +#define p_pgid p_pgrp->pg_id + +/* Status values. */ +#define SIDL 1 /* Process being created by fork. */ +#define SRUN 2 /* Currently runnable. */ +#define SSLEEP 3 /* Sleeping on an address. */ +#define SSTOP 4 /* Process debugging or suspension. */ +#define SZOMB 5 /* Awaiting collection by parent. */ + +/* These flags are kept in p_flags. */ +#define P_ADVLOCK 0x00001 /* Process may hold a POSIX advisory lock. */ +#define P_CONTROLT 0x00002 /* Has a controlling terminal. */ +#define P_INMEM 0x00004 /* Loaded into memory. */ +#define P_NOCLDSTOP 0x00008 /* No SIGCHLD when children stop. */ +#define P_PPWAIT 0x00010 /* Parent is waiting for child to exec/exit. */ +#define P_PROFIL 0x00020 /* Has started profiling. */ +#define P_SELECT 0x00040 /* Selecting; wakeup/waiting danger. */ +#define P_SINTR 0x00080 /* Sleep is interruptible. */ +#define P_SUGID 0x00100 /* Had set id privileges since last exec. */ +#define P_SYSTEM 0x00200 /* System proc: no sigs, stats or swapping. */ +#define P_TIMEOUT 0x00400 /* Timing out during sleep. */ +#define P_TRACED 0x00800 /* Debugged process being traced. */ +#define P_WAITED 0x01000 /* Debugging process has waited for child. */ +#define P_WEXIT 0x02000 /* Working on exiting. */ +#define P_EXEC 0x04000 /* Process called exec. */ + +/* Should probably be changed into a hold count. */ +#define P_NOSWAP 0x08000 /* Another flag to prevent swap out. */ +#define P_PHYSIO 0x10000 /* Doing physical I/O. */ + +/* Should be moved to machine-dependent areas. */ +#define P_OWEUPC 0x08000 /* Owe process an addupc() call at next ast. */ + +/* XXX Not sure what to do with these, yet. */ +#define P_FSTRACE 0x10000 /* tracing via file system (elsewhere?) */ +#define P_SSTEP 0x20000 /* process needs single-step fixup ??? */ + +#define P_WAITING 0x0040000 /* process has a wait() in progress */ +#define P_KDEBUG 0x0080000 /* kdebug tracing is on for this process */ +#define P_TTYSLEEP 0x0100000 /* blocked due to SIGTTOU or SIGTTIN */ +#define P_REBOOT 0x0200000 /* Process called reboot() */ +#define P_TBE 0x0400000 /* Process is TBE */ +#define P_SIGTHR 0x0800000 /* signal pending handling thread scheduled */ + +/* + * Shareable process credentials (always resident). This includes a reference + * to the current user credentials as well as real and saved ids that may be + * used to change ids. + */ +struct pcred { + struct lock__bsd__ pc_lock; + struct ucred *pc_ucred; /* Current credentials. */ + uid_t p_ruid; /* Real user id. */ + uid_t p_svuid; /* Saved effective user id. */ + gid_t p_rgid; /* Real group id. */ + gid_t p_svgid; /* Saved effective group id. */ + int p_refcnt; /* Number of references. */ +}; + +#define pcred_readlock(p) lockmgr(&(p)->p_cred->pc_lock, \ + LK_SHARED, 0, (p)) +#define pcred_writelock(p) lockmgr(&(p)->p_cred->pc_lock, \ + LK_EXCLUSIVE, 0, (p)) +#define pcred_unlock(p) lockmgr(&(p)->p_cred->pc_lock, \ + LK_RELEASE, 0, (p)) + +#ifdef KERNEL + +__BEGIN_DECLS +/* + * We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t, + * as it is used to represent "no process group". + */ +#define PID_MAX 30000 +#define NO_PID 30001 + +#define SESS_LEADER(p) ((p)->p_session->s_leader == (p)) +#define SESSHOLD(s) ((s)->s_count++) +#define SESSRELE(s) sessrele(s) + +#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) +extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; +extern u_long pidhash; + +#define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) +extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; +extern u_long pgrphash; + +extern int nprocs, maxproc; /* Current and max number of procs. */ + +LIST_HEAD(proclist, proc); +extern struct proclist allproc; /* List of all processes. */ +extern struct proclist zombproc; /* List of zombie processes. */ +extern struct proc *initproc, *kernproc; + +extern struct proc *pfind __P((pid_t)); /* Find process by id. */ +extern struct pgrp *pgfind __P((pid_t)); /* Find process group by id. */ + +extern int chgproccnt __P((uid_t uid, int diff)); +extern int enterpgrp __P((struct proc *p, pid_t pgid, int mksess)); +extern void fixjobc __P((struct proc *p, struct pgrp *pgrp, int entering)); +extern int inferior __P((struct proc *p)); +extern int leavepgrp __P((struct proc *p)); +extern void mi_switch __P((void)); +extern void pgdelete __P((struct pgrp *pgrp)); +extern void sessrele __P((struct session *sess)); +extern void procinit __P((void)); +extern void resetpriority __P((struct proc *)); +extern void setrunnable __P((struct proc *)); +extern void setrunqueue __P((struct proc *)); +extern int sleep __P((void *chan, int pri)); +extern int tsleep __P((void *chan, int pri, char *wmesg, int timo)); +extern int tsleep0 __P((void *chan, int pri, char *wmesg, int timo, int (*continuation)(int) )); +extern void unsleep __P((struct proc *)); +extern void wakeup __P((void *chan)); +__END_DECLS + +#endif /* KERNEL */ + +#endif /* !_SYS_PROC_H_ */ diff --git a/bsd/sys/protosw.h b/bsd/sys/protosw.h new file mode 100644 index 000000000..b26fefda7 --- /dev/null +++ b/bsd/sys/protosw.h @@ -0,0 +1,378 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)protosw.h 8.1 (Berkeley) 6/2/93 + */ + +/* + * Protocol switch table. + * + * Each protocol has a handle initializing one of these structures, + * which is used for protocol-protocol and system-protocol communication. + * + * A protocol is called through the pr_init entry before any other. + * Thereafter it is called every 200ms through the pr_fasttimo entry and + * every 500ms through the pr_slowtimo for timer based actions. + * The system will call the pr_drain entry if it is low on space and + * this should throw away any non-critical data. + * + * Protocols pass data between themselves as chains of mbufs using + * the pr_input and pr_output hooks. Pr_input passes data up (towards + * UNIX) and pr_output passes it down (towards the imps); control + * information passes up and down on pr_ctlinput and pr_ctloutput. + * The protocol is responsible for the space occupied by any the + * arguments to these entries and must dispose it. + * + * The userreq routine interfaces protocols to the system and is + * described below. + */ + +#ifndef _SYS_PROTOSW_H_ +#define _SYS_PROTOSW_H_ + +#include +#include + +struct protosw { + short pr_type; /* socket type used for */ + struct domain *pr_domain; /* domain protocol a member of */ + short pr_protocol; /* protocol number */ + unsigned int pr_flags; /* see below */ +/* protocol-protocol hooks */ + void (*pr_input) __P((struct mbuf *, int len)); + /* input to protocol (from below) */ + int (*pr_output) __P((struct mbuf *m, struct socket *so)); + /* output to protocol (from above) */ + void (*pr_ctlinput)__P((int, struct sockaddr *, void *)); + /* control input (from below) */ + int (*pr_ctloutput)__P((struct socket *, struct sockopt *)); + /* control output (from above) */ +/* user-protocol hook */ + void *pr_ousrreq; +/* utility hooks */ + void (*pr_init) __P((void)); /* initialization hook */ + void (*pr_fasttimo) __P((void)); + /* fast timeout (200ms) */ + void (*pr_slowtimo) __P((void)); + /* slow timeout (500ms) */ + void (*pr_drain) __P((void)); + /* flush any excess space possible */ + + int (*pr_sysctl)(); /* sysctl for protocol */ + + struct pr_usrreqs *pr_usrreqs; /* supersedes pr_usrreq() */ +/* Implant hooks */ + TAILQ_HEAD(pr_sfilter, NFDescriptor) pr_sfilter; + struct protosw *pr_next; /* Chain for domain */ +}; + +#define PR_SLOWHZ 2 /* 2 slow timeouts per second */ +#define PR_FASTHZ 5 /* 5 fast timeouts per second */ + +/* + * Values for pr_flags. + * PR_ADDR requires PR_ATOMIC; + * PR_ADDR and PR_CONNREQUIRED are mutually exclusive. + */ +#define PR_ATOMIC 0x01 /* exchange atomic messages only */ +#define PR_ADDR 0x02 /* addresses given with messages */ +#define PR_CONNREQUIRED 0x04 /* connection required by protocol */ +#define PR_WANTRCVD 0x08 /* want PRU_RCVD calls */ +#define PR_RIGHTS 0x10 /* passes capabilities */ +#define PR_IMPLOPCL 0x20 /* implied open/close */ + +/* + * The arguments to usrreq are: + * (*protosw[].pr_usrreq)(up, req, m, nam, opt); + * where up is a (struct socket *), req is one of these requests, + * m is a optional mbuf chain containing a message, + * nam is an optional mbuf chain containing an address, + * and opt is a pointer to a socketopt structure or nil. + * The protocol is responsible for disposal of the mbuf chain m, + * the caller is responsible for any space held by nam and opt. + * A non-zero return from usrreq gives an + * UNIX error number which should be passed to higher level software. + */ +#define PRU_ATTACH 0 /* attach protocol to up */ +#define PRU_DETACH 1 /* detach protocol from up */ +#define PRU_BIND 2 /* bind socket to address */ +#define PRU_LISTEN 3 /* listen for connection */ +#define PRU_CONNECT 4 /* establish connection to peer */ +#define PRU_ACCEPT 5 /* accept connection from peer */ +#define PRU_DISCONNECT 6 /* disconnect from peer */ +#define PRU_SHUTDOWN 7 /* won't send any more data */ +#define PRU_RCVD 8 /* have taken data; more room now */ +#define PRU_SEND 9 /* send this data */ +#define PRU_ABORT 10 /* abort (fast DISCONNECT, DETATCH) */ +#define PRU_CONTROL 11 /* control operations on protocol */ +#define PRU_SENSE 12 /* return status into m */ +#define PRU_RCVOOB 13 /* retrieve out of band data */ +#define PRU_SENDOOB 14 /* send out of band data */ +#define PRU_SOCKADDR 15 /* fetch socket's address */ +#define PRU_PEERADDR 16 /* fetch peer's address */ +#define PRU_CONNECT2 17 /* connect two sockets */ +/* begin for protocols internal use */ +#define PRU_FASTTIMO 18 /* 200ms timeout */ +#define PRU_SLOWTIMO 19 /* 500ms timeout */ +#define PRU_PROTORCV 20 /* receive from below */ +#define PRU_PROTOSEND 21 /* send to below */ +/* end for protocol's internal use */ +#define PRU_SEND_EOF 22 /* send and close */ +#define PRU_NREQ 22 + +#ifdef PRUREQUESTS +char *prurequests[] = { + "ATTACH", "DETACH", "BIND", "LISTEN", + "CONNECT", "ACCEPT", "DISCONNECT", "SHUTDOWN", + "RCVD", "SEND", "ABORT", "CONTROL", + "SENSE", "RCVOOB", "SENDOOB", "SOCKADDR", + "PEERADDR", "CONNECT2", "FASTTIMO", "SLOWTIMO", + "PROTORCV", "PROTOSEND", + "SEND_EOF", +}; +#endif + +#ifdef KERNEL /* users shouldn't see this decl */ + +struct ifnet; +struct stat; +struct ucred; +struct uio; + +/* + * If the ordering here looks odd, that's because it's alphabetical. + * Having this structure separated out from the main protoswitch is allegedly + * a big (12 cycles per call) lose on high-end CPUs. We will eventually + * migrate this stuff back into the main structure. + */ +struct pr_usrreqs { + int (*pru_abort) __P((struct socket *so)); + int (*pru_accept) __P((struct socket *so, struct sockaddr **nam)); + int (*pru_attach) __P((struct socket *so, int proto, + struct proc *p)); + int (*pru_bind) __P((struct socket *so, struct sockaddr *nam, + struct proc *p)); + int (*pru_connect) __P((struct socket *so, struct sockaddr *nam, + struct proc *p)); + int (*pru_connect2) __P((struct socket *so1, struct socket *so2)); + int (*pru_control) __P((struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp, struct proc *p)); + int (*pru_detach) __P((struct socket *so)); + int (*pru_disconnect) __P((struct socket *so)); + int (*pru_listen) __P((struct socket *so, struct proc *p)); + int (*pru_peeraddr) __P((struct socket *so, + struct sockaddr **nam)); + int (*pru_rcvd) __P((struct socket *so, int flags)); + int (*pru_rcvoob) __P((struct socket *so, struct mbuf *m, + int flags)); + int (*pru_send) __P((struct socket *so, int flags, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p)); +#define PRUS_OOB 0x1 +#define PRUS_EOF 0x2 +#define PRUS_MORETOCOME 0x4 + int (*pru_sense) __P((struct socket *so, struct stat *sb)); + int (*pru_shutdown) __P((struct socket *so)); + int (*pru_sockaddr) __P((struct socket *so, + struct sockaddr **nam)); + + /* + * These three added later, so they are out of order. They are used + * for shortcutting (fast path input/output) in some protocols. + * XXX - that's a lie, they are not implemented yet + * Rather than calling sosend() etc. directly, calls are made + * through these entry points. For protocols which still use + * the generic code, these just point to those routines. + */ + int (*pru_sosend) __P((struct socket *so, struct sockaddr *addr, + struct uio *uio, struct mbuf *top, + struct mbuf *control, int flags)); + int (*pru_soreceive) __P((struct socket *so, + struct sockaddr **paddr, + struct uio *uio, struct mbuf **mp0, + struct mbuf **controlp, int *flagsp)); + int (*pru_sopoll) __P((struct socket *so, int events, + struct ucred *cred)); +}; + + +extern int pru_abort_notsupp(struct socket *so); +extern int pru_accept_notsupp(struct socket *so, struct sockaddr **nam); +extern int pru_attach_notsupp(struct socket *so, int proto, + struct proc *p); +extern int pru_bind_notsupp(struct socket *so, struct sockaddr *nam, + struct proc *p); +extern int pru_connect_notsupp(struct socket *so, struct sockaddr *nam, + struct proc *p); +extern int pru_connect2_notsupp(struct socket *so1, struct socket *so2); +extern int pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp, struct proc *p); +extern int pru_detach_notsupp(struct socket *so); +extern int pru_disconnect_notsupp(struct socket *so); +extern int pru_listen_notsupp(struct socket *so, struct proc *p); +extern int pru_peeraddr_notsupp(struct socket *so, + struct sockaddr **nam); +extern int pru_rcvd_notsupp(struct socket *so, int flags); +extern int pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, + int flags); +extern int pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p); +extern int pru_sense_null(struct socket *so, struct stat *sb); +extern int pru_shutdown_notsupp(struct socket *so); +extern int pru_sockaddr_notsupp(struct socket *so, + struct sockaddr **nam); +extern int pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, + struct uio *uio, struct mbuf *top, + struct mbuf *control, int flags); +extern int pru_soreceive_notsupp(struct socket *so, + struct sockaddr **paddr, + struct uio *uio, struct mbuf **mp0, + struct mbuf **controlp, int *flagsp); +extern int pru_sopoll_notsupp(struct socket *so, int events, + struct ucred *cred); + + +#endif /* KERNEL */ + +/* + * The arguments to the ctlinput routine are + * (*protosw[].pr_ctlinput)(cmd, sa, arg); + * where cmd is one of the commands below, sa is a pointer to a sockaddr, + * and arg is a `void *' argument used within a protocol family. + */ +#define PRC_IFDOWN 0 /* interface transition */ +#define PRC_ROUTEDEAD 1 /* select new route if possible ??? */ +#define PRC_IFUP 2 /* interface has come back up */ +#define PRC_QUENCH2 3 /* DEC congestion bit says slow down */ +#define PRC_QUENCH 4 /* some one said to slow down */ +#define PRC_MSGSIZE 5 /* message size forced drop */ +#define PRC_HOSTDEAD 6 /* host appears to be down */ +#define PRC_HOSTUNREACH 7 /* deprecated (use PRC_UNREACH_HOST) */ +#define PRC_UNREACH_NET 8 /* no route to network */ +#define PRC_UNREACH_HOST 9 /* no route to host */ +#define PRC_UNREACH_PROTOCOL 10 /* dst says bad protocol */ +#define PRC_UNREACH_PORT 11 /* bad port # */ +/* was PRC_UNREACH_NEEDFRAG 12 (use PRC_MSGSIZE) */ +#define PRC_UNREACH_SRCFAIL 13 /* source route failed */ +#define PRC_REDIRECT_NET 14 /* net routing redirect */ +#define PRC_REDIRECT_HOST 15 /* host routing redirect */ +#define PRC_REDIRECT_TOSNET 16 /* redirect for type of service & net */ +#define PRC_REDIRECT_TOSHOST 17 /* redirect for tos & host */ +#define PRC_TIMXCEED_INTRANS 18 /* packet lifetime expired in transit */ +#define PRC_TIMXCEED_REASS 19 /* lifetime expired on reass q */ +#define PRC_PARAMPROB 20 /* header incorrect */ + +#define PRC_NCMDS 21 + +#define PRC_IS_REDIRECT(cmd) \ + ((cmd) >= PRC_REDIRECT_NET && (cmd) <= PRC_REDIRECT_TOSHOST) + +#ifdef PRCREQUESTS +char *prcrequests[] = { + "IFDOWN", "ROUTEDEAD", "IFUP", "DEC-BIT-QUENCH2", + "QUENCH", "MSGSIZE", "HOSTDEAD", "#7", + "NET-UNREACH", "HOST-UNREACH", "PROTO-UNREACH", "PORT-UNREACH", + "#12", "SRCFAIL-UNREACH", "NET-REDIRECT", "HOST-REDIRECT", + "TOSNET-REDIRECT", "TOSHOST-REDIRECT", "TX-INTRANS", "TX-REASS", + "PARAMPROB" +}; +#endif + +/* + * The arguments to ctloutput are: + * (*protosw[].pr_ctloutput)(req, so, level, optname, optval, p); + * req is one of the actions listed below, so is a (struct socket *), + * level is an indication of which protocol layer the option is intended. + * optname is a protocol dependent socket option request, + * optval is a pointer to a mbuf-chain pointer, for value-return results. + * The protocol is responsible for disposal of the mbuf chain *optval + * if supplied, + * the caller is responsible for any space held by *optval, when returned. + * A non-zero return from usrreq gives an + * UNIX error number which should be passed to higher level software. + */ +#define PRCO_GETOPT 0 +#define PRCO_SETOPT 1 + +#define PRCO_NCMDS 2 + +#ifdef PRCOREQUESTS +char *prcorequests[] = { + "GETOPT", "SETOPT", +}; +#endif + +#ifdef KERNEL +void pfctlinput __P((int, struct sockaddr *)); +struct protosw *pffindproto __P((int family, int protocol, int type)); +struct protosw *pffindtype __P((int family, int type)); + +extern int net_add_proto(struct protosw *, struct domain *); +extern int net_del_proto(int, int, struct domain *); + +/* Temp hack to link static domains together */ + +#define LINK_PROTOS(psw) \ +static void link_ ## psw ## _protos() \ +{ \ + int i; \ + \ + for (i=0; i < ((sizeof(psw)/sizeof(psw[0])) - 1); i++) \ + psw[i].pr_next = &psw[i + 1]; \ +} + +#endif +#endif /* !_SYS_PROTOSW_H_ */ diff --git a/bsd/sys/ptrace.h b/bsd/sys/ptrace.h new file mode 100644 index 000000000..5304ecdef --- /dev/null +++ b/bsd/sys/ptrace.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1984, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ptrace.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_PTRACE_H_ +#define _SYS_PTRACE_H_ + +#define PT_TRACE_ME 0 /* child declares it's being traced */ +#define PT_READ_I 1 /* read word in child's I space */ +#define PT_READ_D 2 /* read word in child's D space */ +#define PT_READ_U 3 /* read word in child's user structure */ +#define PT_WRITE_I 4 /* write word in child's I space */ +#define PT_WRITE_D 5 /* write word in child's D space */ +#define PT_WRITE_U 6 /* write word in child's user structure */ +#define PT_CONTINUE 7 /* continue the child */ +#define PT_KILL 8 /* kill the child process */ +#define PT_STEP 9 /* single step the child */ +#define PT_ATTACH 10 /* trace some running process */ +#define PT_DETACH 11 /* stop tracing a process */ + +#define PT_FIRSTMACH 32 /* for machine-specific requests */ +#include /* machine-specific requests, if any */ + +#ifdef KERNEL +void proc_reparent __P((struct proc *child, struct proc *newparent)); + +#else /* !KERNEL */ + +#include + +__BEGIN_DECLS +int ptrace __P((int _request, pid_t _pid, caddr_t _addr, int _data)); +__END_DECLS + +#endif /* !KERNEL */ + +#endif /* !_SYS_PTRACE_H_ */ diff --git a/bsd/sys/queue.h b/bsd/sys/queue.h new file mode 100644 index 000000000..bd4b21341 --- /dev/null +++ b/bsd/sys/queue.h @@ -0,0 +1,550 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)queue.h 8.5 (Berkeley) 8/20/94 + */ + +#ifndef _SYS_QUEUE_H_ +#define _SYS_QUEUE_H_ + +/* + * This file defines five types of data structures: singly-linked lists, + * slingly-linked tail queues, lists, tail queues, and circular queues. + * + * A singly-linked list is headed by a single forward pointer. The elements + * are singly linked for minimum space and pointer manipulation overhead at + * the expense of O(n) removal for arbitrary elements. New elements can be + * added to the list after an existing element or at the head of the list. + * Elements being removed from the head of the list should use the explicit + * macro for this purpose for optimum efficiency. A singly-linked list may + * only be traversed in the forward direction. Singly-linked lists are ideal + * for applications with large datasets and few or no removals or for + * implementing a LIFO queue. + * + * A singly-linked tail queue is headed by a pair of pointers, one to the + * head of the list and the other to the tail of the list. The elements are + * singly linked for minimum space and pointer manipulation overhead at the + * expense of O(n) removal for arbitrary elements. New elements can be added + * to the list after an existing element, at the head of the list, or at the + * end of the list. Elements being removed from the head of the tail queue + * should use the explicit macro for this purpose for optimum efficiency. + * A singly-linked tail queue may only be traversed in the forward direction. + * Singly-linked tail queues are ideal for applications with large datasets + * and few or no removals or for implementing a FIFO queue. + * + * A list is headed by a single forward pointer (or an array of forward + * pointers for a hash table header). The elements are doubly linked + * so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before + * or after an existing element or at the head of the list. A list + * may only be traversed in the forward direction. + * + * A tail queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or + * after an existing element, at the head of the list, or at the end of + * the list. A tail queue may only be traversed in the forward direction. + * + * A circle queue is headed by a pair of pointers, one to the head of the + * list and the other to the tail of the list. The elements are doubly + * linked so that an arbitrary element can be removed without a need to + * traverse the list. New elements can be added to the list before or after + * an existing element, at the head of the list, or at the end of the list. + * A circle queue may be traversed in either direction, but has a more + * complex end of list detection. + * + * For details on the use of these macros, see the queue(3) manual page. + * + * + * SLIST LIST STAILQ TAILQ CIRCLEQ + * _HEAD + + + + + + * _ENTRY + + + + + + * _INIT + + + + + + * _EMPTY + + + + + + * _FIRST + + + + + + * _NEXT + + + + + + * _PREV - - - + + + * _LAST - - + + + + * _FOREACH + + - + + + * _INSERT_HEAD + + + + + + * _INSERT_BEFORE - + - + + + * _INSERT_AFTER + + + + + + * _INSERT_TAIL - - + + + + * _REMOVE_HEAD + - + - - + * _REMOVE + + + + + + * + */ + +/* + * Singly-linked List definitions. + */ +#define SLIST_HEAD(name, type) \ +struct name { \ + struct type *slh_first; /* first element */ \ +} + +#define SLIST_ENTRY(type) \ +struct { \ + struct type *sle_next; /* next element */ \ +} + +/* + * Singly-linked List functions. + */ +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) + +#define SLIST_FIRST(head) ((head)->slh_first) + +#define SLIST_FOREACH(var, head, field) \ + for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) + +#define SLIST_INIT(head) { \ + (head)->slh_first = NULL; \ +} + +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + (elm)->field.sle_next = (slistelm)->field.sle_next; \ + (slistelm)->field.sle_next = (elm); \ +} while (0) + +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.sle_next = (head)->slh_first; \ + (head)->slh_first = (elm); \ +} while (0) + +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) + +#define SLIST_REMOVE_HEAD(head, field) do { \ + (head)->slh_first = (head)->slh_first->field.sle_next; \ +} while (0) + +#define SLIST_REMOVE(head, elm, type, field) do { \ + if ((head)->slh_first == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = (head)->slh_first; \ + while( curelm->field.sle_next != (elm) ) \ + curelm = curelm->field.sle_next; \ + curelm->field.sle_next = \ + curelm->field.sle_next->field.sle_next; \ + } \ +} while (0) + +/* + * Singly-linked Tail queue definitions. + */ +#define STAILQ_HEAD(name, type) \ +struct name { \ + struct type *stqh_first;/* first element */ \ + struct type **stqh_last;/* addr of last next element */ \ +} + +#define STAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).stqh_first } + +#define STAILQ_ENTRY(type) \ +struct { \ + struct type *stqe_next; /* next element */ \ +} + +/* + * Singly-linked Tail queue functions. + */ +#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) + +#define STAILQ_INIT(head) do { \ + (head)->stqh_first = NULL; \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (0) + +#define STAILQ_FIRST(head) ((head)->stqh_first) +#define STAILQ_LAST(head) (*(head)->stqh_last) + +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (head)->stqh_first = (elm); \ +} while (0) + +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.stqe_next = NULL; \ + *(head)->stqh_last = (elm); \ + (head)->stqh_last = &(elm)->field.stqe_next; \ +} while (0) + +#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ + if (((elm)->field.stqe_next = (tqelm)->field.stqe_next) == NULL)\ + (head)->stqh_last = &(elm)->field.stqe_next; \ + (tqelm)->field.stqe_next = (elm); \ +} while (0) + +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) + +#define STAILQ_REMOVE_HEAD(head, field) do { \ + if (((head)->stqh_first = \ + (head)->stqh_first->field.stqe_next) == NULL) \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (0) + +#define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ + if (((head)->stqh_first = (elm)->field.stqe_next) == NULL) \ + (head)->stqh_last = &(head)->stqh_first; \ +} while (0) + + +#define STAILQ_REMOVE(head, elm, type, field) do { \ + if ((head)->stqh_first == (elm)) { \ + STAILQ_REMOVE_HEAD(head, field); \ + } \ + else { \ + struct type *curelm = (head)->stqh_first; \ + while( curelm->field.stqe_next != (elm) ) \ + curelm = curelm->field.stqe_next; \ + if((curelm->field.stqe_next = \ + curelm->field.stqe_next->field.stqe_next) == NULL) \ + (head)->stqh_last = &(curelm)->field.stqe_next; \ + } \ +} while (0) + +/* + * List definitions. + */ +#define LIST_HEAD(name, type) \ +struct name { \ + struct type *lh_first; /* first element */ \ +} + +#define LIST_HEAD_INITIALIZER(head) \ + { NULL } + +#define LIST_ENTRY(type) \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} + +/* + * List functions. + */ + +#define LIST_EMPTY(head) ((head)->lh_first == NULL) + +#define LIST_FIRST(head) ((head)->lh_first) + +#define LIST_FOREACH(var, head, field) \ + for((var) = (head)->lh_first; (var); (var) = (var)->field.le_next) + +#define LIST_INIT(head) do { \ + (head)->lh_first = NULL; \ +} while (0) + +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ + (listelm)->field.le_next->field.le_prev = \ + &(elm)->field.le_next; \ + (listelm)->field.le_next = (elm); \ + (elm)->field.le_prev = &(listelm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + (elm)->field.le_next = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &(elm)->field.le_next; \ +} while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.le_next = (head)->lh_first) != NULL) \ + (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ + (head)->lh_first = (elm); \ + (elm)->field.le_prev = &(head)->lh_first; \ +} while (0) + +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_REMOVE(elm, field) do { \ + if ((elm)->field.le_next != NULL) \ + (elm)->field.le_next->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = (elm)->field.le_next; \ +} while (0) + +/* + * Tail queue definitions. + */ +#define TAILQ_HEAD(name, type) \ +struct name { \ + struct type *tqh_first; /* first element */ \ + struct type **tqh_last; /* addr of last next element */ \ +} + +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, &(head).tqh_first } + +#define TAILQ_ENTRY(type) \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ +} + +/* + * Tail queue functions. + */ +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) + +#define TAILQ_FOREACH(var, head, field) \ + for (var = TAILQ_FIRST(head); var; var = TAILQ_NEXT(var, field)) + +#define TAILQ_FOREACH_REVERSE(var, head, field, headname) \ + for (var = TAILQ_LAST(head, headname); \ + var; var = TAILQ_PREV(var, headname, field)) + +#define TAILQ_FIRST(head) ((head)->tqh_first) + +#define TAILQ_LAST(head, headname) \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) + +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) + +#define TAILQ_PREV(elm, headname, field) \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) + +#define TAILQ_INIT(head) do { \ + (head)->tqh_first = NULL; \ + (head)->tqh_last = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ + (head)->tqh_first->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (head)->tqh_first = (elm); \ + (elm)->field.tqe_prev = &(head)->tqh_first; \ +} while (0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.tqe_next = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ + (elm)->field.tqe_next->field.tqe_prev = \ + &(elm)->field.tqe_next; \ + else \ + (head)->tqh_last = &(elm)->field.tqe_next; \ + (listelm)->field.tqe_next = (elm); \ + (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ +} while (0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + (elm)->field.tqe_next = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ +} while (0) + +#define TAILQ_REMOVE(head, elm, field) do { \ + if (((elm)->field.tqe_next) != NULL) \ + (elm)->field.tqe_next->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ +} while (0) + +/* + * Circular queue definitions. + */ +#define CIRCLEQ_HEAD(name, type) \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} + +#define CIRCLEQ_ENTRY(type) \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} + +/* + * Circular queue functions. + */ +#define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) + +#define CIRCLEQ_FIRST(head) ((head)->cqh_first) + +#define CIRCLEQ_FOREACH(var, head, field) \ + for((var) = (head)->cqh_first; \ + (var) != (void *)(head); \ + (var) = (var)->field.cqe_next) + +#define CIRCLEQ_INIT(head) do { \ + (head)->cqh_first = (void *)(head); \ + (head)->cqh_last = (void *)(head); \ +} while (0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ + (elm)->field.cqe_prev = (listelm); \ + if ((listelm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ + (listelm)->field.cqe_next = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + (elm)->field.cqe_next = (listelm); \ + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ + if ((listelm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ + (listelm)->field.cqe_prev = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + (elm)->field.cqe_next = (head)->cqh_first; \ + (elm)->field.cqe_prev = (void *)(head); \ + if ((head)->cqh_last == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (head)->cqh_first->field.cqe_prev = (elm); \ + (head)->cqh_first = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.cqe_next = (void *)(head); \ + (elm)->field.cqe_prev = (head)->cqh_last; \ + if ((head)->cqh_first == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (head)->cqh_last->field.cqe_next = (elm); \ + (head)->cqh_last = (elm); \ +} while (0) + +#define CIRCLEQ_LAST(head) ((head)->cqh_last) + +#define CIRCLEQ_NEXT(elm,field) ((elm)->field.cqe_next) + +#define CIRCLEQ_PREV(elm,field) ((elm)->field.cqe_prev) + +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + if ((elm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm)->field.cqe_prev; \ + else \ + (elm)->field.cqe_next->field.cqe_prev = \ + (elm)->field.cqe_prev; \ + if ((elm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm)->field.cqe_next; \ + else \ + (elm)->field.cqe_prev->field.cqe_next = \ + (elm)->field.cqe_next; \ +} while (0) + +#ifdef KERNEL + +#if NOTFB31 + +/* + * XXX insque() and remque() are an old way of handling certain queues. + * They bogusly assumes that all queue heads look alike. + */ + +struct quehead { + struct quehead *qh_link; + struct quehead *qh_rlink; +}; + +#ifdef __GNUC__ + +static __inline void +insque(void *a, void *b) +{ + struct quehead *element = a, *head = b; + + element->qh_link = head->qh_link; + element->qh_rlink = head; + head->qh_link = element; + element->qh_link->qh_rlink = element; +} + +static __inline void +remque(void *a) +{ + struct quehead *element = a; + + element->qh_link->qh_rlink = element->qh_rlink; + element->qh_rlink->qh_link = element->qh_link; + element->qh_rlink = 0; +} + +#else /* !__GNUC__ */ + +void insque __P((void *a, void *b)); +void remque __P((void *a)); + +#endif /* __GNUC__ */ + +#endif +#endif /* KERNEL */ + +#endif /* !_SYS_QUEUE_H_ */ diff --git a/bsd/sys/reboot.h b/bsd/sys/reboot.h new file mode 100644 index 000000000..b7e8a53f2 --- /dev/null +++ b/bsd/sys/reboot.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1988, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)reboot.h 8.3 (Berkeley) 12/13/94 + */ + +#ifndef _SYS_REBOOT_H_ +#define _SYS_REBOOT_H_ + +#ifdef KERNEL_BUILD +#include +#endif /* KERNEL_BUILD */ + +/* + * Arguments to reboot system call. + */ + +#define RB_AUTOBOOT 0 /* flags for system auto-booting itself */ + +#define RB_ASKNAME 0x01 /* ask for file name to reboot from */ +#define RB_SINGLE 0x02 /* reboot to single user only */ +#define RB_NOSYNC 0x04 /* dont sync before reboot */ +#define RB_KDB 0x04 /* load kernel debugger */ +#define RB_HALT 0x08 /* don't reboot, just halt */ +#define RB_INITNAME 0x10 /* name given for /etc/init */ +#define RB_DFLTROOT 0x20 /* use compiled-in rootdev */ +#define RB_ALTBOOT 0x40 /* use /boot.old vs /boot */ +#define RB_UNIPROC 0x80 /* don't start slaves */ +#define RB_PANIC 0 /* reboot due to panic */ +#define RB_BOOT 1 /* reboot due to boot() */ + +/* + * Constants for converting boot-style device number to type, + * adaptor (uba, mba, etc), unit number and partition number. + * Type (== major device number) is in the low byte + * for backward compatibility. Except for that of the "magic + * number", each mask applies to the shifted value. + * Format: + * (4) (4) (4) (4) (8) (8) + * -------------------------------- + * |MA | AD| CT| UN| PART | TYPE | + * -------------------------------- + */ +#define B_ADAPTORSHIFT 24 +#define B_ADAPTORMASK 0x0f +#define B_ADAPTOR(val) (((val) >> B_ADAPTORSHIFT) & B_ADAPTORMASK) +#define B_CONTROLLERSHIFT 20 +#define B_CONTROLLERMASK 0xf +#define B_CONTROLLER(val) (((val)>>B_CONTROLLERSHIFT) & B_CONTROLLERMASK) +#define B_UNITSHIFT 16 +#define B_UNITMASK 0xff +#define B_UNIT(val) (((val) >> B_UNITSHIFT) & B_UNITMASK) +#define B_PARTITIONSHIFT 8 +#define B_PARTITIONMASK 0xff +#define B_PARTITION(val) (((val) >> B_PARTITIONSHIFT) & B_PARTITIONMASK) +#define B_TYPESHIFT 0 +#define B_TYPEMASK 0xff +#define B_TYPE(val) (((val) >> B_TYPESHIFT) & B_TYPEMASK) +#define B_MAGICMASK 0xf0000000 +#define B_DEVMAGIC 0xa0000000 + +#define MAKEBOOTDEV(type, adaptor, controller, unit, partition) \ + (((type) << B_TYPESHIFT) | ((adaptor) << B_ADAPTORSHIFT) | \ + ((controller) << B_CONTROLLERSHIFT) | ((unit) << B_UNITSHIFT) | \ + ((partition) << B_PARTITIONSHIFT) | B_DEVMAGIC) + +#include + +#endif /* _SYS_REBOOT_H_ */ diff --git a/bsd/sys/resource.h b/bsd/sys/resource.h new file mode 100644 index 000000000..1087aa342 --- /dev/null +++ b/bsd/sys/resource.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)resource.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_RESOURCE_H_ +#define _SYS_RESOURCE_H_ + +/* + * Process priority specifications to get/setpriority. + */ +#define PRIO_MIN -20 +#define PRIO_MAX 20 + +#define PRIO_PROCESS 0 +#define PRIO_PGRP 1 +#define PRIO_USER 2 + +/* + * Resource utilization information. + */ + +#define RUSAGE_SELF 0 +#define RUSAGE_CHILDREN -1 + +struct rusage { + struct timeval ru_utime; /* user time used */ + struct timeval ru_stime; /* system time used */ + long ru_maxrss; /* max resident set size */ +#define ru_first ru_ixrss + long ru_ixrss; /* integral shared memory size */ + long ru_idrss; /* integral unshared data " */ + long ru_isrss; /* integral unshared stack " */ + long ru_minflt; /* page reclaims */ + long ru_majflt; /* page faults */ + long ru_nswap; /* swaps */ + long ru_inblock; /* block input operations */ + long ru_oublock; /* block output operations */ + long ru_msgsnd; /* messages sent */ + long ru_msgrcv; /* messages received */ + long ru_nsignals; /* signals received */ + long ru_nvcsw; /* voluntary context switches */ + long ru_nivcsw; /* involuntary " */ +#define ru_last ru_nivcsw +}; + +/* + * Resource limits + */ +#define RLIMIT_CPU 0 /* cpu time in milliseconds */ +#define RLIMIT_FSIZE 1 /* maximum file size */ +#define RLIMIT_DATA 2 /* data size */ +#define RLIMIT_STACK 3 /* stack size */ +#define RLIMIT_CORE 4 /* core file size */ +#define RLIMIT_RSS 5 /* resident set size */ +#define RLIMIT_MEMLOCK 6 /* locked-in-memory address space */ +#define RLIMIT_NPROC 7 /* number of processes */ +#define RLIMIT_NOFILE 8 /* number of open files */ + +#define RLIM_NLIMITS 9 /* number of resource limits */ + +#define RLIM_INFINITY (((u_quad_t)1 << 63) - 1) + +struct orlimit { + int32_t rlim_cur; /* current (soft) limit */ + int32_t rlim_max; /* maximum value for rlim_cur */ +}; + +struct rlimit { + rlim_t rlim_cur; /* current (soft) limit */ + rlim_t rlim_max; /* maximum value for rlim_cur */ +}; + +/* Load average structure. */ +struct loadavg { + fixpt_t ldavg[3]; + long fscale; +}; + +#ifdef KERNEL +extern struct loadavg averunnable; +#define LSCALE 1000 /* scaling for "fixed point" arithmetic */ +#else +#include + +__BEGIN_DECLS +int getpriority __P((int, int)); +int getrlimit __P((int, struct rlimit *)); +int getrusage __P((int, struct rusage *)); +int setpriority __P((int, int, int)); +int setrlimit __P((int, const struct rlimit *)); +__END_DECLS + +#endif /* KERNEL */ +#endif /* !_SYS_RESOURCE_H_ */ diff --git a/bsd/sys/resourcevar.h b/bsd/sys/resourcevar.h new file mode 100644 index 000000000..4865dedf7 --- /dev/null +++ b/bsd/sys/resourcevar.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)resourcevar.h 8.4 (Berkeley) 1/9/95 + */ + +#ifndef _SYS_RESOURCEVAR_H_ +#define _SYS_RESOURCEVAR_H_ + +/* + * Kernel per-process accounting / statistics + * (not necessarily resident except when running). + */ +struct pstats { +#define pstat_startzero p_ru + struct rusage p_ru; /* stats for this proc */ + struct rusage p_cru; /* sum of stats for reaped children */ + + struct uprof { /* profile arguments */ + struct uprof *pr_next; /* multiple prof buffers allowed */ + caddr_t pr_base; /* buffer base */ + u_long pr_size; /* buffer size */ + u_long pr_off; /* pc offset */ + u_long pr_scale; /* pc scaling */ + u_long pr_addr; /* temp storage for addr until AST */ + u_long pr_ticks; /* temp storage for ticks until AST */ + } p_prof; +#define pstat_endzero pstat_startcopy + +#define pstat_startcopy p_timer + struct itimerval p_timer[3]; /* virtual-time timers */ +#define pstat_endcopy p_start + struct timeval p_start; /* starting time */ +}; + +/* + * Kernel shareable process resource limits. Because this structure + * is moderately large but changes infrequently, it is normally + * shared copy-on-write after forks. If a group of processes + * ("threads") share modifications, the PL_SHAREMOD flag is set, + * and a copy must be made for the child of a new fork that isn't + * sharing modifications to the limits. + */ +struct plimit { + struct rlimit pl_rlimit[RLIM_NLIMITS]; +#define PL_SHAREMOD 0x01 /* modifications are shared */ + int p_lflags; + int p_refcnt; /* number of references */ +}; + +/* add user profiling from AST */ +#define ADDUPROF(p) \ + addupc_task(p, \ + (p)->p_stats->p_prof.pr_addr, (p)->p_stats->p_prof.pr_ticks) + +#ifdef KERNEL +void addupc_intr __P((struct proc *p, u_long pc, u_int ticks)); +void addupc_task __P((struct proc *p, u_long pc, u_int ticks)); +void calcru __P((struct proc *p, struct timeval *up, struct timeval *sp, + struct timeval *ip)); +struct plimit + *limcopy __P((struct plimit *lim)); +void ruadd __P((struct rusage *ru, struct rusage *ru2)); +#endif + +#endif /* !_SYS_RESOURCEVAR_H_ */ diff --git a/bsd/sys/select.h b/bsd/sys/select.h new file mode 100644 index 000000000..10cf8b78c --- /dev/null +++ b/bsd/sys/select.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)select.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_SELECT_H_ +#define _SYS_SELECT_H_ + +/* + * Used to maintain information about processes that wish to be + * notified when I/O becomes possible. + */ +struct selinfo { + void *si_thread; /* thread to be notified */ + short si_flags; /* see below */ +}; +#define SI_COLL 0x0001 /* collision occurred */ + +#ifdef KERNEL +struct proc; + +void selrecord __P((struct proc *selector, struct selinfo *)); +void selwakeup __P((struct selinfo *)); +void selthreadclear __P((struct selinfo *)); +#endif + +#endif /* !_SYS_SELECT_H_ */ diff --git a/bsd/sys/sem.h b/bsd/sys/sem.h new file mode 100644 index 000000000..dce2afd8d --- /dev/null +++ b/bsd/sys/sem.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: sem.h,v 1.5 1994/06/29 06:45:15 cgd Exp $ */ + +/* + * SVID compatible sem.h file + * + * Author: Daniel Boulet + */ + +#ifndef _SYS_SEM_H_ +#define _SYS_SEM_H_ + +#include + +struct sem { + u_short semval; /* semaphore value */ + pid_t sempid; /* pid of last operation */ + u_short semncnt; /* # awaiting semval > cval */ + u_short semzcnt; /* # awaiting semval = 0 */ +}; + +struct semid_ds { + struct ipc_perm sem_perm; /* operation permission struct */ + struct sem *sem_base; /* pointer to first semaphore in set */ + u_short sem_nsems; /* number of sems in set */ + time_t sem_otime; /* last operation time */ + long sem_pad1; /* SVABI/386 says I need this here */ + time_t sem_ctime; /* last change time */ + /* Times measured in secs since */ + /* 00:00:00 GMT, Jan. 1, 1970 */ + long sem_pad2; /* SVABI/386 says I need this here */ + long sem_pad3[4]; /* SVABI/386 says I need this here */ +}; + +/* + * semop's sops parameter structure + */ +struct sembuf { + u_short sem_num; /* semaphore # */ + short sem_op; /* semaphore operation */ + short sem_flg; /* operation flags */ +}; +#define SEM_UNDO 010000 + +#define MAX_SOPS 5 /* maximum # of sembuf's per semop call */ + +/* + * semctl's arg parameter structure + */ +union semun { + int val; /* value for SETVAL */ + struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */ + u_short *array; /* array for GETALL & SETALL */ +}; + +/* + * commands for semctl + */ +#define GETNCNT 3 /* Return the value of semncnt {READ} */ +#define GETPID 4 /* Return the value of sempid {READ} */ +#define GETVAL 5 /* Return the value of semval {READ} */ +#define GETALL 6 /* Return semvals into arg.array {READ} */ +#define GETZCNT 7 /* Return the value of semzcnt {READ} */ +#define SETVAL 8 /* Set the value of semval to arg.val {ALTER} */ +#define SETALL 9 /* Set semvals from arg.array {ALTER} */ + +/* + * Permissions + */ +#define SEM_A 0200 /* alter permission */ +#define SEM_R 0400 /* read permission */ + +#ifdef KERNEL +/* + * Kernel implementation stuff + */ +#define SEMVMX 32767 /* semaphore maximum value */ +#define SEMAEM 16384 /* adjust on exit max value */ + + +/* + * Undo structure (one per process) + */ +struct sem_undo { + struct sem_undo *un_next; /* ptr to next active undo structure */ + struct proc *un_proc; /* owner of this structure */ + short un_cnt; /* # of active entries */ + struct undo { + short un_adjval; /* adjust on exit values */ + short un_num; /* semaphore # */ + int un_id; /* semid */ + } un_ent[1]; /* undo entries */ +}; + +/* + * semaphore info struct + */ +struct seminfo { + int semmap, /* # of entries in semaphore map */ + semmni, /* # of semaphore identifiers */ + semmns, /* # of semaphores in system */ + semmnu, /* # of undo structures in system */ + semmsl, /* max # of semaphores per id */ + semopm, /* max # of operations per semop call */ + semume, /* max # of undo entries per process */ + semusz, /* size in bytes of undo structure */ + semvmx, /* semaphore maximum value */ + semaem; /* adjust on exit max value */ +}; +extern struct seminfo seminfo; + +/* internal "mode" bits */ +#define SEM_ALLOC 01000 /* semaphore is allocated */ +#define SEM_DEST 02000 /* semaphore will be destroyed on last detach */ + +/* + * Configuration parameters + */ +#ifndef SEMMNI +#define SEMMNI 10 /* # of semaphore identifiers */ +#endif +#ifndef SEMMNS +#define SEMMNS 60 /* # of semaphores in system */ +#endif +#ifndef SEMUME +#define SEMUME 10 /* max # of undo entries per process */ +#endif +#ifndef SEMMNU +#define SEMMNU 30 /* # of undo structures in system */ +#endif + +/* shouldn't need tuning */ +#ifndef SEMMAP +#define SEMMAP 30 /* # of entries in semaphore map */ +#endif +#ifndef SEMMSL +#define SEMMSL SEMMNS /* max # of semaphores per id */ +#endif +#ifndef SEMOPM +#define SEMOPM 100 /* max # of operations per semop call */ +#endif + +/* + * Due to the way semaphore memory is allocated, we have to ensure that + * SEMUSZ is properly aligned. + */ + +#define SEM_ALIGN(bytes) (((bytes) + (sizeof(long) - 1)) & ~(sizeof(long) - 1)) + +/* actual size of an undo structure */ +#define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME])) + +extern struct semid_ds *sema; /* semaphore id pool */ +extern struct sem *sem; /* semaphore pool */ +extern int *semu; /* undo structure pool */ + +/* + * Macro to find a particular sem_undo vector + */ +#define SEMU(ix) ((struct sem_undo *)(((intptr_t)semu)+ix * seminfo.semusz)) + +/* + * Process sem_undo vectors at proc exit. + */ +void semexit __P((struct proc *p)); + +/* + * Parameters to the semconfig system call + */ +typedef enum { + SEM_CONFIG_FREEZE, /* Freeze the semaphore facility. */ + SEM_CONFIG_THAW /* Thaw the semaphore facility. */ +} semconfig_ctl_t; +#endif /* KERNEL */ + +#ifndef KERNEL +#include + +__BEGIN_DECLS +int semsys __P((int, ...)); +int semctl __P((int, int, int, ...)); +int semget __P((key_t, int, int)); +int semop __P((int, struct sembuf *,unsigned)); +__END_DECLS +#endif /* !KERNEL */ + +#endif /* !_SEM_H_ */ diff --git a/bsd/sys/semaphore.h b/bsd/sys/semaphore.h new file mode 100644 index 000000000..16e0dfc5c --- /dev/null +++ b/bsd/sys/semaphore.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* @(#)semaphore.h 1.0 2/29/00 */ + + + +/* + * semaphore.h - POSIX semaphores + * + * HISTORY + * 29-Feb-00 A.Ramesh at Apple + * Created for Mac OS X + */ + +#ifndef _SYS_SEMAPHORE_H_ +#define _SYS_SEMAPHORE_H_ + +typedef int sem_t; +/* this should go in limits.h> */ +#define SEM_VALUE_MAX 32767 +#define SEM_FAILED -1 + +#ifndef KERNEL +int sem_close(sem_t *); +int sem_destroy(sem_t *); +int sem_getvalue(sem_t *, int *); +int sem_init(sem_t *, int, unsigned int); +sem_t * sem_open(const char *, int, ...); +int sem_post(sem_t *); +int sem_trywait(sem_t *); +int sem_unlink(const char *); +int sem_wait(sem_t *); + +#endif /* KERNEL */ + +#endif /* _SYS_SEMAPHORE_H_ */ diff --git a/bsd/sys/shm.h b/bsd/sys/shm.h new file mode 100644 index 000000000..a1a10f7d5 --- /dev/null +++ b/bsd/sys/shm.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: shm.h,v 1.15 1994/06/29 06:45:17 cgd Exp $ */ + +/* + * Copyright (c) 1994 Adam Glass + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Adam Glass. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * As defined+described in "X/Open System Interfaces and Headers" + * Issue 4, p. XXX + */ + +#ifndef _SYS_SHM_H_ +#define _SYS_SHM_H_ + +#include +#include + +#define SHM_RDONLY 010000 /* Attach read-only (else read-write) */ +#define SHM_RND 020000 /* Round attach address to SHMLBA */ +#define SHMLBA NBPG /* Segment low boundary address multiple */ + +/* "official" access mode definitions; somewhat braindead since you have + to specify (SHM_* >> 3) for group and (SHM_* >> 6) for world permissions */ +#define SHM_R (IPC_R) +#define SHM_W (IPC_W) + + +struct shmid_ds { + struct ipc_perm shm_perm; /* operation permission structure */ + int shm_segsz; /* size of segment in bytes */ + pid_t shm_lpid; /* process ID of last shared memory op */ + pid_t shm_cpid; /* process ID of creator */ + short shm_nattch; /* number of current attaches */ + time_t shm_atime; /* time of last shmat() */ + time_t shm_dtime; /* time of last shmdt() */ + time_t shm_ctime; /* time of last change by shmctl() */ + void *shm_internal; /* sysv stupidity */ +}; + +#ifdef KERNEL + +/* + * System 5 style catch-all structure for shared memory constants that + * might be of interest to user programs. Do we really want/need this? + */ +struct shminfo { + int shmmax, /* max shared memory segment size (bytes) */ + shmmin, /* min shared memory segment size (bytes) */ + shmmni, /* max number of shared memory identifiers */ + shmseg, /* max shared memory segments per process */ + shmall; /* max amount of shared memory (pages) */ +}; +extern struct shminfo shminfo; +extern struct shmid_ds *shmsegs; + +struct proc; + +void shmexit __P((struct proc *)); +void shmfork __P((struct proc *, struct proc *)); +#else /* !KERNEL */ + +#include + +__BEGIN_DECLS +int shmsys __P((int, ...)); +void *shmat __P((int, void *, int)); +int shmget __P((key_t, int, int)); +int shmctl __P((int, int, struct shmid_ds *)); +int shmdt __P((void *)); +__END_DECLS + +#endif /* !KERNEL */ + +#endif /* !_SYS_SHM_H_ */ diff --git a/bsd/sys/signal.h b/bsd/sys/signal.h new file mode 100644 index 000000000..136b3d1f0 --- /dev/null +++ b/bsd/sys/signal.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)signal.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_SIGNAL_H_ +#define _SYS_SIGNAL_H_ + +#if !defined(_ANSI_SOURCE) && !defined(_POSIX_SOURCE) +#define NSIG 32 /* counting 0; could be 33 (mask is 1-32) */ +#endif + +#include /* sigcontext; codes for SIGILL, SIGFPE */ + +#define SIGHUP 1 /* hangup */ +#define SIGINT 2 /* interrupt */ +#define SIGQUIT 3 /* quit */ +#define SIGILL 4 /* illegal instruction (not reset when caught) */ +#if !defined(_POSIX_SOURCE) +#define SIGTRAP 5 /* trace trap (not reset when caught) */ +#endif +#define SIGABRT 6 /* abort() */ +#if !defined(_POSIX_SOURCE) +#define SIGIOT SIGABRT /* compatibility */ +#define SIGEMT 7 /* EMT instruction */ +#endif +#define SIGFPE 8 /* floating point exception */ +#define SIGKILL 9 /* kill (cannot be caught or ignored) */ +#if !defined(_POSIX_SOURCE) +#define SIGBUS 10 /* bus error */ +#endif +#define SIGSEGV 11 /* segmentation violation */ +#if !defined(_POSIX_SOURCE) +#define SIGSYS 12 /* bad argument to system call */ +#endif +#define SIGPIPE 13 /* write on a pipe with no one to read it */ +#define SIGALRM 14 /* alarm clock */ +#define SIGTERM 15 /* software termination signal from kill */ +#if !defined(_POSIX_SOURCE) +#define SIGURG 16 /* urgent condition on IO channel */ +#endif +#define SIGSTOP 17 /* sendable stop signal not from tty */ +#define SIGTSTP 18 /* stop signal from tty */ +#define SIGCONT 19 /* continue a stopped process */ +#define SIGCHLD 20 /* to parent on child stop or exit */ +#define SIGTTIN 21 /* to readers pgrp upon background tty read */ +#define SIGTTOU 22 /* like TTIN for output if (tp->t_local<OSTOP) */ +#if !defined(_POSIX_SOURCE) +#define SIGIO 23 /* input/output possible signal */ +#define SIGXCPU 24 /* exceeded CPU time limit */ +#define SIGXFSZ 25 /* exceeded file size limit */ +#define SIGVTALRM 26 /* virtual time alarm */ +#define SIGPROF 27 /* profiling time alarm */ +#define SIGWINCH 28 /* window size changes */ +#define SIGINFO 29 /* information request */ +#endif +#define SIGUSR1 30 /* user defined signal 1 */ +#define SIGUSR2 31 /* user defined signal 2 */ + +#if defined(_ANSI_SOURCE) || defined(__cplusplus) +/* + * Language spec sez we must list exactly one parameter, even though we + * actually supply three. Ugh! + */ +#define SIG_DFL (void (*)(int))0 +#define SIG_IGN (void (*)(int))1 +#define SIG_ERR (void (*)(int))-1 +#else +#define SIG_DFL (void (*)())0 +#define SIG_IGN (void (*)())1 +#define SIG_ERR (void (*)())-1 +#endif + +#ifndef _ANSI_SOURCE +typedef unsigned int sigset_t; + +/* + * Signal vector "template" used in sigaction call. + */ +struct sigaction { +#if defined(__cplusplus) + void (*sa_handler)(int); /* signal handler */ +#else + void (*sa_handler)(); /* signal handler */ +#endif /* __cplusplus */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ +}; +#if !defined(_POSIX_SOURCE) +#define SA_ONSTACK 0x0001 /* take signal on signal stack */ +#define SA_RESTART 0x0002 /* restart system on signal return */ +#define SA_DISABLE 0x0004 /* disable taking signals on alternate stack */ +#define SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ +#endif +#define SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ + +/* + * Flags for sigprocmask: + */ +#define SIG_BLOCK 1 /* block specified signal set */ +#define SIG_UNBLOCK 2 /* unblock specified signal set */ +#define SIG_SETMASK 3 /* set specified signal set */ + +#if !defined(_POSIX_SOURCE) +#include +typedef void (*sig_t) __P((int)); /* type of signal function */ + +/* + * Structure used in sigaltstack call. + */ +struct sigaltstack { + char *ss_sp; /* signal stack base */ + int ss_size; /* signal stack length */ + int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ +}; +#define MINSIGSTKSZ 8192 /* minimum allowable stack */ +#define SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended stack size */ + +/* + * 4.3 compatibility: + * Signal vector "template" used in sigvec call. + */ +struct sigvec { + void (*sv_handler)(); /* signal handler */ + int sv_mask; /* signal mask to apply */ + int sv_flags; /* see signal options below */ +}; + +#define SV_ONSTACK SA_ONSTACK +#define SV_INTERRUPT SA_RESTART /* same bit, opposite sense */ +#define sv_onstack sv_flags /* isn't compatibility wonderful! */ + +/* + * Structure used in sigstack call. + */ +struct sigstack { + char *ss_sp; /* signal stack pointer */ + int ss_onstack; /* current status */ +}; + +/* + * Macro for converting signal number to a mask suitable for + * sigblock(). + */ +#define sigmask(m) (1 << ((m)-1)) + +#ifdef KERNEL + /* + * signals delivered on a per-thread basis. + */ + #define threadmask (sigmask(SIGILL)|sigmask(SIGTRAP)|\ + sigmask(SIGIOT)|sigmask(SIGEMT)|\ + sigmask(SIGFPE)|sigmask(SIGBUS)|\ + sigmask(SIGSEGV)|sigmask(SIGSYS)|\ + sigmask(SIGPIPE)) +#endif /* KERNEL */ + +#define BADSIG SIG_ERR + +#endif /* !_POSIX_SOURCE */ +#endif /* !_ANSI_SOURCE */ + +/* + * For historical reasons; programs expect signal's return value to be + * defined by . + */ +__BEGIN_DECLS +void (*signal __P((int, void (*) __P((int))))) __P((int)); +__END_DECLS +#endif /* !_SYS_SIGNAL_H_ */ diff --git a/bsd/sys/signalvar.h b/bsd/sys/signalvar.h new file mode 100644 index 000000000..0b242758d --- /dev/null +++ b/bsd/sys/signalvar.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)signalvar.h 8.3 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_SIGNALVAR_H_ /* tmp for user.h */ +#define _SYS_SIGNALVAR_H_ + +/* + * Kernel signal definitions and data structures, + * not exported to user programs. + */ + +/* + * Process signal actions and state, needed only within the process + * (not necessarily resident). + */ +struct sigacts { + sig_t ps_sigact[NSIG]; /* disposition of signals */ + sigset_t ps_catchmask[NSIG]; /* signals to be blocked */ + sigset_t ps_sigonstack; /* signals to take on sigstack */ + sigset_t ps_sigintr; /* signals that interrupt syscalls */ + sigset_t ps_oldmask; /* saved mask from before sigpause */ + int ps_flags; /* signal flags, below */ + struct sigaltstack ps_sigstk; /* sp & on stack state variable */ + int ps_sig; /* for core dump/debugger XXX */ + int ps_code; /* for core dump/debugger XXX */ + int ps_addr; /* for core dump/debugger XXX */ + sigset_t ps_usertramp; /* SunOS compat; libc sigtramp XXX */ +}; + +/* signal flags */ +#define SAS_OLDMASK 0x01 /* need to restore mask before pause */ +#define SAS_ALTSTACK 0x02 /* have alternate signal stack */ + +/* additional signal action values, used only temporarily/internally */ +#define SIG_CATCH (void (*)())2 +#define SIG_HOLD (void (*)())3 + + +#define pgsigio(pgid, sig, notused) \ + { \ + struct proc *p; \ + if (pgid < 0) \ + gsignal(-(pgid), sig);\ + else if (pgid > 0 && (p = pfind(pgid)) != 0) \ + psignal(p, sig); \ +} + + +/* + * get signal action for process and signal; currently only for current process + */ +#define SIGACTION(p, sig) (p->p_sigacts->ps_sigact[(sig)]) + +/* + * Determine signal that should be delivered to process p, the current + * process, 0 if none. If there is a pending stop signal with default + * action, the process stops in issig(). + */ + +#define HAVE_SIGNALS(p) \ + ((p)->p_siglist \ + & ~((((p)->p_sigmask) \ + | (((p)->p_flag & P_TRACED) ? 0 : (p)->p_sigignore)) \ + & ~sigcantmask)) + +/* + * Check for per-process and per thread signals. + */ +#define SHOULDissignal(p,uthreadp) \ + (((p)->p_siglist | (uthreadp)->uu_sig) \ + & ~((((p)->p_sigmask) \ + | (((p)->p_flag & P_TRACED) ? 0 : (p)->p_sigignore)) \ + & ~sigcantmask)) + +/* + * Check for signals and per-thread signals. + * Use in trap() and syscall() before + * exiting kernel. + */ +#define CHECK_SIGNALS(p, thread, uthreadp) \ + (!thread_should_halt(thread) \ + && (SHOULDissignal(p,uthreadp))) + + +/* + * Clear a pending signal from a process. + */ +#define CLRSIG(p, sig) { (p)->p_siglist &= ~sigmask(sig); } + +/* + * Signal properties and actions. + * The array below categorizes the signals and their default actions + * according to the following properties: + */ +#define SA_KILL 0x01 /* terminates process by default */ +#define SA_CORE 0x02 /* ditto and coredumps */ +#define SA_STOP 0x04 /* suspend process */ +#define SA_TTYSTOP 0x08 /* ditto, from tty */ +#define SA_IGNORE 0x10 /* ignore by default */ +#define SA_CONT 0x20 /* continue if suspended */ +#define SA_CANTMASK 0x40 /* non-maskable, catchable */ + +#ifdef SIGPROP +int sigprop[NSIG + 1] = { + 0, /* unused */ + SA_KILL, /* SIGHUP */ + SA_KILL, /* SIGINT */ + SA_KILL|SA_CORE, /* SIGQUIT */ + SA_KILL|SA_CORE, /* SIGILL */ + SA_KILL|SA_CORE, /* SIGTRAP */ + SA_KILL|SA_CORE, /* SIGABRT */ + SA_KILL|SA_CORE, /* SIGEMT */ + SA_KILL|SA_CORE, /* SIGFPE */ + SA_KILL, /* SIGKILL */ + SA_KILL|SA_CORE, /* SIGBUS */ + SA_KILL|SA_CORE, /* SIGSEGV */ + SA_KILL|SA_CORE, /* SIGSYS */ + SA_KILL, /* SIGPIPE */ + SA_KILL, /* SIGALRM */ + SA_KILL, /* SIGTERM */ + SA_IGNORE, /* SIGURG */ + SA_STOP, /* SIGSTOP */ + SA_STOP|SA_TTYSTOP, /* SIGTSTP */ + SA_IGNORE|SA_CONT, /* SIGCONT */ + SA_IGNORE, /* SIGCHLD */ + SA_STOP|SA_TTYSTOP, /* SIGTTIN */ + SA_STOP|SA_TTYSTOP, /* SIGTTOU */ + SA_IGNORE, /* SIGIO */ + SA_KILL, /* SIGXCPU */ + SA_KILL, /* SIGXFSZ */ + SA_KILL, /* SIGVTALRM */ + SA_KILL, /* SIGPROF */ + SA_IGNORE, /* SIGWINCH */ + SA_IGNORE, /* SIGINFO */ + SA_KILL, /* SIGUSR1 */ + SA_KILL, /* SIGUSR2 */ +}; + +#define contsigmask (sigmask(SIGCONT)) +#define stopsigmask (sigmask(SIGSTOP) | sigmask(SIGTSTP) | \ + sigmask(SIGTTIN) | sigmask(SIGTTOU)) + +#endif /* SIGPROP */ + +#define sigcantmask (sigmask(SIGKILL) | sigmask(SIGSTOP)) + +#ifdef KERNEL +/* + * Machine-independent functions: + */ +int coredump __P((struct proc *p)); +void execsigs __P((struct proc *p)); +void gsignal __P((int pgid, int sig)); +int issignal __P((struct proc *p)); +int CURSIG __P((struct proc *p)); +int clear_sigbits __P((struct proc *p, int bit)); +void pgsignal __P((struct pgrp *pgrp, int sig, int checkctty)); +void postsig __P((int sig)); +void psignal __P((struct proc *p, int sig)); +void siginit __P((struct proc *p)); +void trapsignal __P((struct proc *p, int sig, unsigned code)); +void pt_setrunnable __P((struct proc *p)); + +/* + * Machine-dependent functions: + */ +void sendsig __P((struct proc *, sig_t action, int sig, int returnmask, u_long code)); +#endif /* KERNEL */ +#endif /* !_SYS_SIGNALVAR_H_ */ diff --git a/bsd/sys/socket.h b/bsd/sys/socket.h new file mode 100644 index 000000000..b14277293 --- /dev/null +++ b/bsd/sys/socket.h @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1985, 1986, 1988, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)socket.h 8.4 (Berkeley) 2/21/94 + */ + +#ifndef _SYS_SOCKET_H_ +#define _SYS_SOCKET_H_ + + +/* + * Definitions related to sockets: types, address families, options. + */ + +/* + * Types + */ +#define SOCK_STREAM 1 /* stream socket */ +#define SOCK_DGRAM 2 /* datagram socket */ +#define SOCK_RAW 3 /* raw-protocol interface */ +#define SOCK_RDM 4 /* reliably-delivered message */ +#define SOCK_SEQPACKET 5 /* sequenced packet stream */ + +/* + * Option flags per-socket. + */ +#define SO_DEBUG 0x0001 /* turn on debugging info recording */ +#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ +#define SO_REUSEADDR 0x0004 /* allow local address reuse */ +#define SO_KEEPALIVE 0x0008 /* keep connections alive */ +#define SO_DONTROUTE 0x0010 /* just use interface addresses */ +#define SO_BROADCAST 0x0020 /* permit sending of broadcast msgs */ +#define SO_USELOOPBACK 0x0040 /* bypass hardware when possible */ +#define SO_LINGER 0x0080 /* linger on close if data present */ +#define SO_OOBINLINE 0x0100 /* leave received OOB data in line */ +#define SO_REUSEPORT 0x0200 /* allow local address & port reuse */ +#define SO_TIMESTAMP 0x0400 /* timestamp received dgram traffic */ + +#define SO_DONTTRUNC 0x2000 /* APPLE: Retain unread data */ + /* (ATOMIC proto) */ +#define SO_WANTMORE 0x4000 /* APPLE: Give hint when more data ready */ +#define SO_WANTOOBFLAG 0x8000 /* APPLE: Want OOB in MSG_FLAG on receive */ + +/* + * Additional options, not kept in so_options. + */ +#define SO_SNDBUF 0x1001 /* send buffer size */ +#define SO_RCVBUF 0x1002 /* receive buffer size */ +#define SO_SNDLOWAT 0x1003 /* send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_TYPE 0x1008 /* get socket type */ +#define SO_PRIVSTATE 0x1009 /* get/deny privileged state */ +#define SO_NREAD 0x1020 /* APPLE: get 1st-packet byte count */ +#define SO_NKE 0x1021 /* APPLE: Install socket-level NKE */ + +/* + * Structure used for manipulating linger option. + */ +struct linger { + int l_onoff; /* option on/off */ + int l_linger; /* linger time */ +}; + +/* + * Level number for (get/set)sockopt() to apply to socket itself. + */ +#define SOL_SOCKET 0xffff /* options for socket level */ + +/* + * Address families. + */ +#define AF_UNSPEC 0 /* unspecified */ +#define AF_LOCAL 1 /* local to host (pipes, portals) */ +#define AF_UNIX AF_LOCAL /* backward compatibility */ +#define AF_INET 2 /* internetwork: UDP, TCP, etc. */ +#define AF_IMPLINK 3 /* arpanet imp addresses */ +#define AF_PUP 4 /* pup protocols: e.g. BSP */ +#define AF_CHAOS 5 /* mit CHAOS protocols */ +#define AF_NS 6 /* XEROX NS protocols */ +#define AF_ISO 7 /* ISO protocols */ +#define AF_OSI AF_ISO +#define AF_ECMA 8 /* european computer manufacturers */ +#define AF_DATAKIT 9 /* datakit protocols */ +#define AF_CCITT 10 /* CCITT protocols, X.25 etc */ +#define AF_SNA 11 /* IBM SNA */ +#define AF_DECnet 12 /* DECnet */ +#define AF_DLI 13 /* DEC Direct data link interface */ +#define AF_LAT 14 /* LAT */ +#define AF_HYLINK 15 /* NSC Hyperchannel */ +#define AF_APPLETALK 16 /* Apple Talk */ +#define AF_ROUTE 17 /* Internal Routing Protocol */ +#define AF_LINK 18 /* Link layer interface */ +#define pseudo_AF_XTP 19 /* eXpress Transfer Protocol (no AF) */ +#define AF_COIP 20 /* connection-oriented IP, aka ST II */ +#define AF_CNT 21 /* Computer Network Technology */ +#define pseudo_AF_RTIP 22 /* Help Identify RTIP packets */ +#define AF_IPX 23 /* Novell Internet Protocol */ +#define AF_SIP 24 /* Simple Internet Protocol */ +#define pseudo_AF_PIP 25 /* Help Identify PIP packets */ + /* Sigh - The following 2 should */ + /* be maintained for MacOSX */ + /* binary compatibility */ +#define pseudo_AF_BLUE 26 /* Identify packets for Blue Box */ +#define AF_NDRV 27 /* Network Driver 'raw' access */ +#define AF_ISDN 28 /* Integrated Services Digital Network*/ +#define AF_E164 AF_ISDN /* CCITT E.164 recommendation */ +#define pseudo_AF_KEY 29 /* Internal key-management function */ +#define AF_INET6 30 /* IPv6 */ +#define AF_NATM 31 /* native ATM access */ +#define AF_SYSTEM 32 /* Kernel event messages */ +#define AF_NETBIOS 33 /* NetBIOS */ + +#define AF_MAX 34 + +/* + * Structure used by kernel to store most + * addresses. + */ +struct sockaddr { + u_char sa_len; /* total length */ + u_char sa_family; /* address family */ + char sa_data[14]; /* actually longer; address value */ +}; +#define SOCK_MAXADDRLEN 255 /* longest possible addresses */ + +/* + * Structure used by kernel to pass protocol + * information in raw sockets. + */ +struct sockproto { + u_short sp_family; /* address family */ + u_short sp_protocol; /* protocol */ +}; + +#if 1 +/* + * bsd-api-new-02a: protocol-independent placeholder for socket addresses + */ +#define _SS_MAXSIZE 128 +#define _SS_ALIGNSIZE (sizeof(int64_t)) +#define _SS_PAD1SIZE (_SS_ALIGNSIZE - sizeof(u_char) * 2) +#define _SS_PAD2SIZE (_SS_MAXSIZE - sizeof(u_char) * 2 - \ + _SS_PAD1SIZE - _SS_ALIGNSIZE) + +struct sockaddr_storage { + u_char ss_len; /* address length */ + u_char ss_family; /* address family */ + char _ss_pad1[_SS_PAD1SIZE]; + int64_t _ss_align; /* force desired structure storage alignment */ + char _ss_pad2[_SS_PAD2SIZE]; +}; +#endif + +/* + * Protocol families, same as address families for now. + */ +#define PF_UNSPEC AF_UNSPEC +#define PF_LOCAL AF_LOCAL +#define PF_UNIX PF_LOCAL /* backward compatibility */ +#define PF_INET AF_INET +#define PF_INET6 AF_INET6 +#define PF_IMPLINK AF_IMPLINK +#define PF_PUP AF_PUP +#define PF_CHAOS AF_CHAOS +#define PF_NS AF_NS +#define PF_ISO AF_ISO +#define PF_OSI AF_ISO +#define PF_ECMA AF_ECMA +#define PF_DATAKIT AF_DATAKIT +#define PF_CCITT AF_CCITT +#define PF_SNA AF_SNA +#define PF_DECnet AF_DECnet +#define PF_DLI AF_DLI +#define PF_LAT AF_LAT +#define PF_HYLINK AF_HYLINK +#define PF_APPLETALK AF_APPLETALK +#define PF_ROUTE AF_ROUTE +#define PF_LINK AF_LINK +#define PF_XTP pseudo_AF_XTP /* really just proto family, no AF */ +#define PF_COIP AF_COIP +#define PF_CNT AF_CNT +#define PF_SIP AF_SIP +#define PF_IPX AF_IPX /* same format as AF_NS */ +#define PF_RTIP pseudo_AF_RTIP /* same format as AF_INET */ +#define PF_PIP pseudo_AF_PIP + +#define PF_NDRV AF_NDRV +#define PF_ISDN AF_ISDN +#define PF_KEY pseudo_AF_KEY +#define PF_INET6 AF_INET6 +#define PF_NATM AF_NATM +#define PF_ATM AF_ATM +#define PF_SYSTEM AF_SYSTEM +#define PF_NETBIOS AF_NETBIOS + +#define PF_MAX AF_MAX + +/* + * Definitions for network related sysctl, CTL_NET. + * + * Second level is protocol family. + * Third level is protocol number. + * + * Further levels are defined by the individual families below. + */ +#define NET_MAXID AF_MAX + +#define CTL_NET_NAMES { \ + { 0, 0 }, \ + { "local", CTLTYPE_NODE }, \ + { "inet", CTLTYPE_NODE }, \ + { "implink", CTLTYPE_NODE }, \ + { "pup", CTLTYPE_NODE }, \ + { "chaos", CTLTYPE_NODE }, \ + { "xerox_ns", CTLTYPE_NODE }, \ + { "iso", CTLTYPE_NODE }, \ + { "emca", CTLTYPE_NODE }, \ + { "datakit", CTLTYPE_NODE }, \ + { "ccitt", CTLTYPE_NODE }, \ + { "ibm_sna", CTLTYPE_NODE }, \ + { "decnet", CTLTYPE_NODE }, \ + { "dec_dli", CTLTYPE_NODE }, \ + { "lat", CTLTYPE_NODE }, \ + { "hylink", CTLTYPE_NODE }, \ + { "appletalk", CTLTYPE_NODE }, \ + { "route", CTLTYPE_NODE }, \ + { "link_layer", CTLTYPE_NODE }, \ + { "xtp", CTLTYPE_NODE }, \ + { "coip", CTLTYPE_NODE }, \ + { "cnt", CTLTYPE_NODE }, \ + { "rtip", CTLTYPE_NODE }, \ + { "ipx", CTLTYPE_NODE }, \ + { "sip", CTLTYPE_NODE }, \ + { "pip", CTLTYPE_NODE }, \ + { "isdn", CTLTYPE_NODE }, \ + { "key", CTLTYPE_NODE }, \ + { "inet6", CTLTYPE_NODE }, \ + { "natm", CTLTYPE_NODE }, \ +} + +/* + * PF_ROUTE - Routing table + * + * Three additional levels are defined: + * Fourth: address family, 0 is wildcard + * Fifth: type of info, defined below + * Sixth: flag(s) to mask with for NET_RT_FLAGS + */ +#define NET_RT_DUMP 1 /* dump; may limit to a.f. */ +#define NET_RT_FLAGS 2 /* by flags, e.g. RESOLVING */ +#define NET_RT_IFLIST 3 /* survey interface list */ +#define NET_RT_MAXID 4 + +#define CTL_NET_RT_NAMES { \ + { 0, 0 }, \ + { "dump", CTLTYPE_STRUCT }, \ + { "flags", CTLTYPE_STRUCT }, \ + { "iflist", CTLTYPE_STRUCT }, \ +} + +/* + * Maximum queue length specifiable by listen. + */ +#define SOMAXCONN 128 + +/* + * Message header for recvmsg and sendmsg calls. + * Used value-result for recvmsg, value only for sendmsg. + */ +struct msghdr { + caddr_t msg_name; /* optional address */ + u_int msg_namelen; /* size of address */ + struct iovec *msg_iov; /* scatter/gather array */ + u_int msg_iovlen; /* # elements in msg_iov */ + caddr_t msg_control; /* ancillary data, see below */ + u_int msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ +}; + +#define MSG_OOB 0x1 /* process out-of-band data */ +#define MSG_PEEK 0x2 /* peek at incoming message */ +#define MSG_DONTROUTE 0x4 /* send without using routing tables */ +#define MSG_EOR 0x8 /* data completes record */ +#define MSG_TRUNC 0x10 /* data discarded before delivery */ +#define MSG_CTRUNC 0x20 /* control data lost before delivery */ +#define MSG_WAITALL 0x40 /* wait for full request or error */ +#define MSG_DONTWAIT 0x80 /* this message should be nonblocking */ +#define MSG_EOF 0x100 /* data completes connection */ +#define MSG_FLUSH 0x400 /* Start of 'hold' seq; dump so_temp */ +#define MSG_HOLD 0x800 /* Hold frag in so_temp */ +#define MSG_SEND 0x1000 /* Send the packet in so_temp */ +#define MSG_HAVEMORE 0x2000 /* Data ready to be read */ +#define MSG_RCVMORE 0x4000 /* Data remains in current pkt */ +#define MSG_COMPAT 0x8000 /* used in sendit() */ + +/* + * Header for ancillary data objects in msg_control buffer. + * Used for additional information with/about a datagram + * not expressible by flags. The format is a sequence + * of message elements headed by cmsghdr structures. + */ +struct cmsghdr { + u_int cmsg_len; /* data byte count, including hdr */ + int cmsg_level; /* originating protocol */ + int cmsg_type; /* protocol-specific type */ +/* followed by u_char cmsg_data[]; */ +}; + +/* given pointer to struct cmsghdr, return pointer to data */ +#define CMSG_DATA(cmsg) ((u_char *)((cmsg) + 1)) + +/* Alignment requirement for CMSG struct manipulation. + * This is different from ALIGN() defined in ARCH/include/param.h. + * XXX think again carefully about architecture dependencies. + */ +#define CMSG_ALIGN(n) (((n) + 3) & ~3) + +/* given pointer to struct cmsghdr, return pointer to next cmsghdr */ +#define CMSG_NXTHDR(mhdr, cmsg) \ + (((caddr_t)(cmsg) + (cmsg)->cmsg_len + sizeof(struct cmsghdr) > \ + (mhdr)->msg_control + (mhdr)->msg_controllen) ? \ + (struct cmsghdr *)NULL : \ + (struct cmsghdr *)((caddr_t)(cmsg) + CMSG_ALIGN((cmsg)->cmsg_len))) + +#define CMSG_FIRSTHDR(mhdr) ((struct cmsghdr *)(mhdr)->msg_control) + +#define CMSG_SPACE(l) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(l)) +#define CMSG_LEN(l) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (l)) + +/* "Socket"-level control message types: */ +#define SCM_RIGHTS 0x01 /* access rights (array of int) */ +#define SCM_TIMESTAMP 0x02 /* timestamp (struct timeval) */ +#define SCM_CREDS 0x03 /* process creds (struct cmsgcred) */ + +/* + * 4.3 compat sockaddr, move to compat file later + */ +struct osockaddr { + u_short sa_family; /* address family */ + char sa_data[14]; /* up to 14 bytes of direct address */ +}; + +/* + * 4.3-compat message header (move to compat file later). + */ +struct omsghdr { + caddr_t msg_name; /* optional address */ + int msg_namelen; /* size of address */ + struct iovec *msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + caddr_t msg_accrights; /* access rights sent/received */ + int msg_accrightslen; +}; + +/* + * howto arguments for shutdown(2), specified by Posix.1g. + */ +#define SHUT_RD 0 /* shut down the reading side */ +#define SHUT_WR 1 /* shut down the writing side */ +#define SHUT_RDWR 2 /* shut down both sides */ + +#if SENDFILE +/* + * sendfile(2) header/trailer struct + */ +struct sf_hdtr { + struct iovec *headers; /* pointer to an array of header struct iovec's */ + int hdr_cnt; /* number of header iovec's */ + struct iovec *trailers; /* pointer to an array of trailer struct iovec's */ + int trl_cnt; /* number of trailer iovec's */ +}; +#endif + +#ifndef KERNEL + +#include + +__BEGIN_DECLS +int accept __P((int, struct sockaddr *, int *)); +int bind __P((int, const struct sockaddr *, int)); +int connect __P((int, const struct sockaddr *, int)); +int getpeername __P((int, struct sockaddr *, int *)); +int getsockname __P((int, struct sockaddr *, int *)); +int getsockopt __P((int, int, int, void *, int *)); +int listen __P((int, int)); +ssize_t recv __P((int, void *, size_t, int)); +ssize_t recvfrom __P((int, void *, size_t, int, struct sockaddr *, int *)); +ssize_t recvmsg __P((int, struct msghdr *, int)); +ssize_t send __P((int, const void *, size_t, int)); +ssize_t sendto __P((int, const void *, + size_t, int, const struct sockaddr *, int)); +ssize_t sendmsg __P((int, const struct msghdr *, int)); +#if SENDFILE +int sendfile __P((int, int, off_t, size_t, struct sf_hdtr *, off_t *, int)); +#endif +int setsockopt __P((int, int, int, const void *, int)); +int shutdown __P((int, int)); +int socket __P((int, int, int)); +int socketpair __P((int, int, int, int *)); +__END_DECLS + +#endif /* !KERNEL */ +#endif /* !_SYS_SOCKET_H_ */ diff --git a/bsd/sys/socketvar.h b/bsd/sys/socketvar.h new file mode 100644 index 000000000..d9e2b7be5 --- /dev/null +++ b/bsd/sys/socketvar.h @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)socketvar.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_SOCKETVAR_H_ +#define _SYS_SOCKETVAR_H_ + +#include /* for struct selinfo */ +#include +#include +#include +/* + * Hacks to get around compiler complaints + */ +struct mbuf; +struct socket; +struct uio; +struct sockbuf; +struct sockaddr; +struct kextcb; +struct protosw; +struct sockif; +struct sockutil; + +/* strings for sleep message: */ +extern char netio[], netcon[], netcls[]; +#define SOCKET_CACHE_ON +#define SO_CACHE_FLUSH_INTERVAL 1 /* Seconds */ +#define SO_CACHE_TIME_LIMIT (120/SO_CACHE_FLUSH_INTERVAL) /* Seconds */ +#define SO_CACHE_MAX_FREE_BATCH 50 +#define MAX_CACHED_SOCKETS 60000 +#define TEMPDEBUG 0 + +/* + * Kernel structure per socket. + * Contains send and receive buffer queues, + * handle on protocol and pointer to protocol + * private data and error information. + */ +typedef u_quad_t so_gen_t; + +struct socket { + int so_zone; /* zone we were allocated from */ + short so_type; /* generic type, see socket.h */ + short so_options; /* from socket call, see socket.h */ + short so_linger; /* time to linger while closing */ + short so_state; /* internal state flags SS_*, below */ + caddr_t so_pcb; /* protocol control block */ + struct protosw *so_proto; /* protocol handle */ +/* + * Variables for connection queueing. + * Socket where accepts occur is so_head in all subsidiary sockets. + * If so_head is 0, socket is not related to an accept. + * For head socket so_q0 queues partially completed connections, + * while so_q is a queue of connections ready to be accepted. + * If a connection is aborted and it has so_head set, then + * it has to be pulled out of either so_q0 or so_q. + * We allow connections to queue up based on current queue lengths + * and limit on number of queued connections for this socket. + */ + struct socket *so_head; /* back pointer to accept socket */ + TAILQ_HEAD(, socket) so_incomp; /* queue of partial unaccepted connections */ + TAILQ_HEAD(, socket) so_comp; /* queue of complete unaccepted connections */ + TAILQ_ENTRY(socket) so_list; /* list of unaccepted connections */ + short so_qlen; /* number of unaccepted connections */ + short so_incqlen; /* number of unaccepted incomplete + connections */ + short so_qlimit; /* max number queued connections */ + short so_timeo; /* connection timeout */ + u_short so_error; /* error affecting connection */ + pid_t so_pgid; /* pgid for signals */ + u_long so_oobmark; /* chars to oob mark */ +/* + * Variables for socket buffering. + */ + struct sockbuf { + u_long sb_cc; /* actual chars in buffer */ + u_long sb_hiwat; /* max actual char count */ + u_long sb_mbcnt; /* chars of mbufs used */ + u_long sb_mbmax; /* max chars of mbufs to use */ + long sb_lowat; /* low water mark */ + struct mbuf *sb_mb; /* the mbuf chain */ + struct socket *sb_so; /* socket back ptr */ + struct selinfo sb_sel; /* process selecting read/write */ + short sb_flags; /* flags, see below */ + short sb_timeo; /* timeout for read/write */ + } so_rcv, so_snd; +#define SB_MAX (256*1024) /* default for max chars in sockbuf */ +#define SB_LOCK 0x01 /* lock on data queue */ +#define SB_WANT 0x02 /* someone is waiting to lock */ +#define SB_WAIT 0x04 /* someone is waiting for data/space */ +#define SB_SEL 0x08 /* someone is selecting */ +#define SB_ASYNC 0x10 /* ASYNC I/O, need signals */ +#define SB_NOTIFY (SB_WAIT|SB_SEL|SB_ASYNC) +#define SB_UPCALL 0x20 /* someone wants an upcall */ +#define SB_NOINTR 0x40 /* operations not interruptible */ +#define SB_RECV 0x8000 /* this is rcv sb */ + + caddr_t so_tpcb; /* Wisc. protocol control block XXX */ + void (*so_upcall) __P((struct socket *so, caddr_t arg, int waitf)); + caddr_t so_upcallarg; /* Arg for above */ + uid_t so_uid; /* who opened the socket */ + /* NB: generation count must not be first; easiest to make it last. */ + so_gen_t so_gencnt; /* generation count */ + TAILQ_HEAD(,eventqelt) so_evlist; + int cached_in_sock_layer; /* Is socket bundled with pcb/pcb.inp_ppcb? */ + struct socket *cache_next; + struct socket *cache_prev; + u_long cache_timestamp; + caddr_t so_saved_pcb; /* Saved pcb when cacheing */ + struct mbuf *so_temp; /* Holding area for outbound frags */ + /* Plug-in support - make the socket interface overridable */ + struct mbuf *so_tail; + struct kextcb *so_ext; /* NKE hook */ +}; + +/* + * Socket state bits. + */ +#define SS_NOFDREF 0x001 /* no file table ref any more */ +#define SS_ISCONNECTED 0x002 /* socket connected to a peer */ +#define SS_ISCONNECTING 0x004 /* in process of connecting to peer */ +#define SS_ISDISCONNECTING 0x008 /* in process of disconnecting */ +#define SS_CANTSENDMORE 0x010 /* can't send more data to peer */ +#define SS_CANTRCVMORE 0x020 /* can't receive more data from peer */ +#define SS_RCVATMARK 0x040 /* at mark on input */ + +#define SS_PRIV 0x080 /* privileged for broadcast, raw... */ +#define SS_NBIO 0x100 /* non-blocking ops */ +#define SS_ASYNC 0x200 /* async i/o notify */ +#define SS_ISCONFIRMING 0x400 /* deciding to accept connection req */ +#define SS_INCOMP 0x800 /* Unaccepted, incomplete connection */ +#define SS_COMP 0x1000 /* unaccepted, complete connection */ + +/* + * Externalized form of struct socket used by the sysctl(3) interface. + */ +struct xsocket { + size_t xso_len; /* length of this structure */ + struct socket *xso_so; /* makes a convenient handle sometimes */ + short so_type; + short so_options; + short so_linger; + short so_state; + caddr_t so_pcb; /* another convenient handle */ + int xso_protocol; + int xso_family; + short so_qlen; + short so_incqlen; + short so_qlimit; + short so_timeo; + u_short so_error; + pid_t so_pgid; + u_long so_oobmark; + struct xsockbuf { + u_long sb_cc; + u_long sb_hiwat; + u_long sb_mbcnt; + u_long sb_mbmax; + long sb_lowat; + short sb_flags; + short sb_timeo; + } so_rcv, so_snd; + uid_t so_uid; /* XXX */ +}; + +/* + * Macros for sockets and socket buffering. + */ +#define sbtoso(sb) (sb->sb_so) + +/* + * Do we need to notify the other side when I/O is possible? + */ +#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT|SB_SEL|SB_ASYNC|SB_UPCALL)) != 0) + +/* + * How much space is there in a socket buffer (so->so_snd or so->so_rcv)? + * This is problematical if the fields are unsigned, as the space might + * still be negative (cc > hiwat or mbcnt > mbmax). Should detect + * overflow and return 0. Should use "lmin" but it doesn't exist now. + */ +#define sbspace(sb) \ + ((long) imin((int)((sb)->sb_hiwat - (sb)->sb_cc), \ + (int)((sb)->sb_mbmax - (sb)->sb_mbcnt))) + +/* do we have to send all at once on a socket? */ +#define sosendallatonce(so) \ + ((so)->so_proto->pr_flags & PR_ATOMIC) + +/* can we read something from so? */ +#define soreadable(so) \ + ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \ + ((so)->so_state & SS_CANTRCVMORE) || \ + (so)->so_comp.tqh_first || (so)->so_error) + +/* can we write something to so? */ +#define sowriteable(so) \ + ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat && \ + (((so)->so_state&SS_ISCONNECTED) || \ + ((so)->so_proto->pr_flags&PR_CONNREQUIRED)==0)) || \ + ((so)->so_state & SS_CANTSENDMORE) || \ + (so)->so_error) + +/* adjust counters in sb reflecting allocation of m */ +#define sballoc(sb, m) { \ + (sb)->sb_cc += (m)->m_len; \ + (sb)->sb_mbcnt += MSIZE; \ + if ((m)->m_flags & M_EXT) \ + (sb)->sb_mbcnt += (m)->m_ext.ext_size; \ +} + +/* adjust counters in sb reflecting freeing of m */ +#define sbfree(sb, m) { \ + (sb)->sb_cc -= (m)->m_len; \ + (sb)->sb_mbcnt -= MSIZE; \ + if ((m)->m_flags & M_EXT) \ + (sb)->sb_mbcnt -= (m)->m_ext.ext_size; \ +} + +/* + * Set lock on sockbuf sb; sleep if lock is already held. + * Unless SB_NOINTR is set on sockbuf, sleep is interruptible. + * Returns error without lock if sleep is interrupted. + */ +#define sblock(sb, wf) ((sb)->sb_flags & SB_LOCK ? \ + (((wf) == M_WAIT) ? sb_lock(sb) : EWOULDBLOCK) : \ + ((sb)->sb_flags |= SB_LOCK), 0) + +/* release lock on sockbuf sb */ +#define sbunlock(sb) { \ + (sb)->sb_flags &= ~SB_LOCK; \ + if ((sb)->sb_flags & SB_WANT) { \ + (sb)->sb_flags &= ~SB_WANT; \ + wakeup((caddr_t)&(sb)->sb_flags); \ + } \ +} + +#define sorwakeup(so) do { \ + if (sb_notify(&(so)->so_rcv)) \ + sowakeup((so), &(so)->so_rcv); \ + } while (0) + +#define sowwakeup(so) do { \ + if (sb_notify(&(so)->so_snd)) \ + sowakeup((so), &(so)->so_snd); \ + } while (0) + + +/* + * Socket extension mechanism: control block hooks: + * This is the "head" of any control block for an extenstion + * Note: we separate intercept function dispatch vectors from + * the NFDescriptor to permit selective replacement during + * operation, e.g., to disable some functions. + */ +struct kextcb +{ struct kextcb *e_next; /* Next kext control block */ + void *e_fcb; /* Real filter control block */ + struct NFDescriptor *e_nfd; /* NKE Descriptor */ + /* Plug-in support - intercept functions */ + struct sockif *e_soif; /* Socket functions */ + struct sockutil *e_sout; /* Sockbuf utility functions */ +}; +#define EXT_NULL 0x0 /* STATE: Not in use */ +#define sotokextcb(so) (so ? so->so_ext : 0) + +#ifdef KERNEL +/* + * Argument structure for sosetopt et seq. This is in the KERNEL + * section because it will never be visible to user code. + */ +enum sopt_dir { SOPT_GET, SOPT_SET }; +struct sockopt { + enum sopt_dir sopt_dir; /* is this a get or a set? */ + int sopt_level; /* second arg of [gs]etsockopt */ + int sopt_name; /* third arg of [gs]etsockopt */ + void *sopt_val; /* fourth arg of [gs]etsockopt */ + size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ + struct proc *sopt_p; /* calling process or null if kernel */ +}; + +#if SENDFILE + +struct sf_buf { + SLIST_ENTRY(sf_buf) free_list; /* list of free buffer slots */ + int refcnt; /* reference count */ + struct vm_page *m; /* currently mapped page */ + vm_offset_t kva; /* va of mapping */ +}; + +#endif + +#ifdef MALLOC_DECLARE +MALLOC_DECLARE(M_PCB); +MALLOC_DECLARE(M_SONAME); +#endif + +extern int maxsockets; +extern u_long sb_max; +extern int socket_zone; +extern so_gen_t so_gencnt; + +struct file; +struct filedesc; +struct mbuf; +struct sockaddr; +struct stat; +struct ucred; +struct uio; + +/* + * File operations on sockets. + */ +int soo_read __P((struct file *fp, struct uio *uio, struct ucred *cred)); +int soo_write __P((struct file *fp, struct uio *uio, struct ucred *cred)); +int soo_ioctl __P((struct file *fp, u_long cmd, caddr_t data, + struct proc *p)); +int soo_select __P((struct file *fp, int which, struct proc *p)); +int soo_stat __P((struct socket *so, struct stat *ub)); + +int soo_close __P((struct file *fp, struct proc *p)); + + +/* + * From uipc_socket and friends + */ +struct sockaddr *dup_sockaddr __P((struct sockaddr *sa, int canwait)); +int getsock __P((struct filedesc *fdp, int fd, struct file **fpp)); +int sockargs __P((struct mbuf **mp, caddr_t buf, int buflen, int type)); +int getsockaddr __P((struct sockaddr **namp, caddr_t uaddr, size_t len)); +void sbappend __P((struct sockbuf *sb, struct mbuf *m)); +int sbappendaddr __P((struct sockbuf *sb, struct sockaddr *asa, + struct mbuf *m0, struct mbuf *control)); +int sbappendcontrol __P((struct sockbuf *sb, struct mbuf *m0, + struct mbuf *control)); +void sbappendrecord __P((struct sockbuf *sb, struct mbuf *m0)); +void sbcheck __P((struct sockbuf *sb)); +void sbcompress __P((struct sockbuf *sb, struct mbuf *m, struct mbuf *n)); +struct mbuf * + sbcreatecontrol __P((caddr_t p, int size, int type, int level)); +void sbdrop __P((struct sockbuf *sb, int len)); +void sbdroprecord __P((struct sockbuf *sb)); +void sbflush __P((struct sockbuf *sb)); +void sbinsertoob __P((struct sockbuf *sb, struct mbuf *m0)); +void sbrelease __P((struct sockbuf *sb)); +int sbreserve __P((struct sockbuf *sb, u_long cc)); +void sbtoxsockbuf __P((struct sockbuf *sb, struct xsockbuf *xsb)); +int sbwait __P((struct sockbuf *sb)); +int sb_lock __P((struct sockbuf *sb)); +int soabort __P((struct socket *so)); +int soaccept __P((struct socket *so, struct sockaddr **nam)); +struct socket *soalloc __P((int waitok, int dom, int type)); +int sobind __P((struct socket *so, struct sockaddr *nam)); +void socantrcvmore __P((struct socket *so)); +void socantsendmore __P((struct socket *so)); +int soclose __P((struct socket *so)); +int soconnect __P((struct socket *so, struct sockaddr *nam)); +int soconnect2 __P((struct socket *so1, struct socket *so2)); +int socreate __P((int dom, struct socket **aso, int type, int proto)); +void sodealloc __P((struct socket *so)); +int sodisconnect __P((struct socket *so)); +void sofree __P((struct socket *so)); +int sogetopt __P((struct socket *so, struct sockopt *sopt)); +void sohasoutofband __P((struct socket *so)); +void soisconnected __P((struct socket *so)); +void soisconnecting __P((struct socket *so)); +void soisdisconnected __P((struct socket *so)); +void soisdisconnecting __P((struct socket *so)); +int solisten __P((struct socket *so, int backlog)); +struct socket * + sodropablereq __P((struct socket *head)); +struct socket * + sonewconn __P((struct socket *head, int connstatus)); +int sooptcopyin __P((struct sockopt *sopt, void *buf, size_t len, + size_t minlen)); +int sooptcopyout __P((struct sockopt *sopt, void *buf, size_t len)); +int sopoll __P((struct socket *so, int events, struct ucred *cred)); +int soreceive __P((struct socket *so, struct sockaddr **paddr, + struct uio *uio, struct mbuf **mp0, + struct mbuf **controlp, int *flagsp)); +int soreserve __P((struct socket *so, u_long sndcc, u_long rcvcc)); +void sorflush __P((struct socket *so)); +int sosend __P((struct socket *so, struct sockaddr *addr, struct uio *uio, + struct mbuf *top, struct mbuf *control, int flags)); + +int sosetopt __P((struct socket *so, struct sockopt *sopt)); + + +int soshutdown __P((struct socket *so, int how)); +void sotoxsocket __P((struct socket *so, struct xsocket *xso)); +void sowakeup __P((struct socket *so, struct sockbuf *sb)); + + +#endif /* KERNEL */ +#endif /* !_SYS_SOCKETVAR_H_ */ diff --git a/bsd/sys/sockio.h b/bsd/sys/sockio.h new file mode 100644 index 000000000..fe2e3a6f5 --- /dev/null +++ b/bsd/sys/sockio.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)sockio.h 8.1 (Berkeley) 3/28/94 + */ + +#ifndef _SYS_SOCKIO_H_ +#define _SYS_SOCKIO_H_ + +#include + +/* Socket ioctl's. */ +#define SIOCSHIWAT _IOW('s', 0, int) /* set high watermark */ +#define SIOCGHIWAT _IOR('s', 1, int) /* get high watermark */ +#define SIOCSLOWAT _IOW('s', 2, int) /* set low watermark */ +#define SIOCGLOWAT _IOR('s', 3, int) /* get low watermark */ +#define SIOCATMARK _IOR('s', 7, int) /* at oob mark? */ +#define SIOCSPGRP _IOW('s', 8, int) /* set process group */ +#define SIOCGPGRP _IOR('s', 9, int) /* get process group */ + +#define SIOCADDRT _IOW('r', 10, struct ortentry) /* add route */ +#define SIOCDELRT _IOW('r', 11, struct ortentry) /* delete route */ + +#define SIOCSIFADDR _IOW('i', 12, struct ifreq) /* set ifnet address */ +#define OSIOCGIFADDR _IOWR('i', 13, struct ifreq) /* get ifnet address */ +#define SIOCSIFDSTADDR _IOW('i', 14, struct ifreq) /* set p-p address */ +#define OSIOCGIFDSTADDR _IOWR('i', 15, struct ifreq) /* get p-p address */ +#define SIOCSIFFLAGS _IOW('i', 16, struct ifreq) /* set ifnet flags */ +#define SIOCGIFFLAGS _IOWR('i', 17, struct ifreq) /* get ifnet flags */ +#define OSIOCGIFBRDADDR _IOWR('i', 18, struct ifreq) /* get broadcast addr */ +#define SIOCSIFBRDADDR _IOW('i', 19, struct ifreq) /* set broadcast addr */ +#define OSIOCGIFCONF _IOWR('i', 20, struct ifconf) /* get ifnet list */ +#define OSIOCGIFNETMASK _IOWR('i', 21, struct ifreq) /* get net addr mask */ +#define SIOCSIFNETMASK _IOW('i', 22, struct ifreq) /* set net addr mask */ +#define SIOCGIFMETRIC _IOWR('i', 23, struct ifreq) /* get IF metric */ +#define SIOCSIFMETRIC _IOW('i', 24, struct ifreq) /* set IF metric */ +#define SIOCDIFADDR _IOW('i', 25, struct ifreq) /* delete IF addr */ +#define SIOCAIFADDR _IOW('i', 26, struct ifaliasreq)/* add/chg IF alias */ +#define SIOCGETVIFCNT _IOWR('r', 27, struct sioc_vif_req)/* get vif pkt cnt */ +#define SIOCGETSGCNT _IOWR('r', 28, struct sioc_sg_req) /* get s,g pkt cnt */ +#define SIOCALIFADDR _IOW('i', 29, struct if_laddrreq) /* add IF addr */ +#define SIOCGLIFADDR _IOWR('i', 30, struct if_laddrreq) /* get IF addr */ +#define SIOCDLIFADDR _IOW('i', 31, struct if_laddrreq) /* delete IF addr */ + + +#define SIOCGIFADDR _IOWR('i', 33, struct ifreq) /* get ifnet address */ +#define SIOCGIFDSTADDR _IOWR('i', 34, struct ifreq) /* get p-p address */ +#define SIOCGIFBRDADDR _IOWR('i', 35, struct ifreq) /* get broadcast addr */ +#define SIOCGIFCONF _IOWR('i', 36, struct ifconf) /* get ifnet list */ +#define SIOCGIFNETMASK _IOWR('i', 37, struct ifreq) /* get net addr mask */ +#define SIOCAUTOADDR _IOWR('i', 38, struct ifreq) /* autoconf address */ +#define SIOCAUTONETMASK _IOW('i', 39, struct ifreq) /* autoconf netmask */ + + +#define SIOCADDMULTI _IOW('i', 49, struct ifreq) /* add m'cast addr */ +#define SIOCDELMULTI _IOW('i', 50, struct ifreq) /* del m'cast addr */ +#define SIOCGIFMTU _IOWR('i', 51, struct ifreq) /* get IF mtu */ +#define SIOCSIFMTU _IOW('i', 52, struct ifreq) /* set IF mtu */ +#define SIOCGIFPHYS _IOWR('i', 53, struct ifreq) /* get IF wire */ +#define SIOCSIFPHYS _IOW('i', 54, struct ifreq) /* set IF wire */ +#define SIOCSIFMEDIA _IOWR('i', 55, struct ifreq) /* set net media */ +#define SIOCGIFMEDIA _IOWR('i', 56, struct ifmediareq) /* get net media */ +#define SIOCSIFGENERIC _IOW('i', 57, struct ifreq) /* generic IF set op */ +#define SIOCGIFGENERIC _IOWR('i', 58, struct ifreq) /* generic IF get op */ +#define SIOCRSLVMULTI _IOWR('i', 59, struct rslvmulti_req) + +#define SIOCSIFPHYADDR _IOW('i', 70, struct ifaliasreq) /* set gif addres */ +#define SIOCGIFPSRCADDR _IOWR('i', 71, struct ifreq) /* get gif psrc addr */ +#define SIOCGIFPDSTADDR _IOWR('i', 72, struct ifreq) /* get gif pdst addr */ + +#define SIOCGIFASYNCMAP _IOWR('i', 124, struct ifreq) /* get ppp asyncmap */ +#define SIOCSIFASYNCMAP _IOW('i', 125, struct ifreq) /* set ppp asyncmap */ + + +#define SIOCSETOT _IOW('s', 128, int) /* set socket for LibOT */ + + +#endif /* !_SYS_SOCKIO_H_ */ diff --git a/bsd/sys/stat.h b/bsd/sys/stat.h new file mode 100644 index 000000000..22c79a223 --- /dev/null +++ b/bsd/sys/stat.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stat.h 8.9 (Berkeley) 8/17/94 + */ + + +#ifndef _SYS_STAT_H_ +#define _SYS_STAT_H_ + +#include + +#ifndef _POSIX_SOURCE +struct ostat { + u_int16_t st_dev; /* inode's device */ + ino_t st_ino; /* inode's number */ + mode_t st_mode; /* inode protection mode */ + nlink_t st_nlink; /* number of hard links */ + u_int16_t st_uid; /* user ID of the file's owner */ + u_int16_t st_gid; /* group ID of the file's group */ + u_int16_t st_rdev; /* device type */ + int32_t st_size; /* file size, in bytes */ + struct timespec st_atimespec; /* time of last access */ + struct timespec st_mtimespec; /* time of last data modification */ + struct timespec st_ctimespec; /* time of last file status change */ + int32_t st_blksize; /* optimal blocksize for I/O */ + int32_t st_blocks; /* blocks allocated for file */ + u_int32_t st_flags; /* user defined flags for file */ + u_int32_t st_gen; /* file generation number */ +}; +#endif /* !_POSIX_SOURCE */ + +struct stat { + dev_t st_dev; /* inode's device */ + ino_t st_ino; /* inode's number */ + mode_t st_mode; /* inode protection mode */ + nlink_t st_nlink; /* number of hard links */ + uid_t st_uid; /* user ID of the file's owner */ + gid_t st_gid; /* group ID of the file's group */ + dev_t st_rdev; /* device type */ +#ifndef _POSIX_SOURCE + struct timespec st_atimespec; /* time of last access */ + struct timespec st_mtimespec; /* time of last data modification */ + struct timespec st_ctimespec; /* time of last file status change */ +#else + time_t st_atime; /* time of last access */ + long st_atimensec; /* nsec of last access */ + time_t st_mtime; /* time of last data modification */ + long st_mtimensec; /* nsec of last data modification */ + time_t st_ctime; /* time of last file status change */ + long st_ctimensec; /* nsec of last file status change */ +#endif + off_t st_size; /* file size, in bytes */ + int64_t st_blocks; /* blocks allocated for file */ + u_int32_t st_blksize; /* optimal blocksize for I/O */ + u_int32_t st_flags; /* user defined flags for file */ + u_int32_t st_gen; /* file generation number */ + int32_t st_lspare; + int64_t st_qspare[2]; +}; + + +#ifndef _POSIX_SOURCE +#define st_atime st_atimespec.tv_sec +#define st_mtime st_mtimespec.tv_sec +#define st_ctime st_ctimespec.tv_sec +#endif + +#define S_ISUID 0004000 /* set user id on execution */ +#define S_ISGID 0002000 /* set group id on execution */ +#ifndef _POSIX_SOURCE +#define S_ISTXT 0001000 /* sticky bit */ +#endif + +#define S_IRWXU 0000700 /* RWX mask for owner */ +#define S_IRUSR 0000400 /* R for owner */ +#define S_IWUSR 0000200 /* W for owner */ +#define S_IXUSR 0000100 /* X for owner */ + +#ifndef _POSIX_SOURCE +#define S_IREAD S_IRUSR +#define S_IWRITE S_IWUSR +#define S_IEXEC S_IXUSR +#endif + +#define S_IRWXG 0000070 /* RWX mask for group */ +#define S_IRGRP 0000040 /* R for group */ +#define S_IWGRP 0000020 /* W for group */ +#define S_IXGRP 0000010 /* X for group */ + +#define S_IRWXO 0000007 /* RWX mask for other */ +#define S_IROTH 0000004 /* R for other */ +#define S_IWOTH 0000002 /* W for other */ +#define S_IXOTH 0000001 /* X for other */ + +#ifndef _POSIX_SOURCE +#define S_IFMT 0170000 /* type of file mask */ +#define S_IFIFO 0010000 /* named pipe (fifo) */ +#define S_IFCHR 0020000 /* character special */ +#define S_IFDIR 0040000 /* directory */ +#define S_IFBLK 0060000 /* block special */ +#define S_IFREG 0100000 /* regular */ +#define S_IFLNK 0120000 /* symbolic link */ +#define S_IFSOCK 0140000 /* socket */ +#define S_IFWHT 0160000 /* whiteout */ +#define S_ISVTX 0001000 /* save swapped text even after use */ +#endif + +#define S_ISDIR(m) ((m & 0170000) == 0040000) /* directory */ +#define S_ISCHR(m) ((m & 0170000) == 0020000) /* char special */ +#define S_ISBLK(m) ((m & 0170000) == 0060000) /* block special */ +#define S_ISREG(m) ((m & 0170000) == 0100000) /* regular file */ +#define S_ISFIFO(m) ((m & 0170000) == 0010000 || \ + (m & 0170000) == 0140000) /* fifo or socket */ +#ifndef _POSIX_SOURCE +#define S_ISLNK(m) ((m & 0170000) == 0120000) /* symbolic link */ +#define S_ISSOCK(m) ((m & 0170000) == 0010000 || \ + (m & 0170000) == 0140000) /* fifo or socket */ +#define S_ISWHT(m) ((m & 0170000) == 0160000) /* whiteout */ +#endif + +#ifndef _POSIX_SOURCE +#define ACCESSPERMS (S_IRWXU|S_IRWXG|S_IRWXO) /* 0777 */ + /* 7777 */ +#define ALLPERMS (S_ISUID|S_ISGID|S_ISTXT|S_IRWXU|S_IRWXG|S_IRWXO) + /* 0666 */ +#define DEFFILEMODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH) + +#define S_BLKSIZE 512 /* block size used in the stat struct */ + +/* + * Definitions of flags stored in file flags word. + * + * Super-user and owner changeable flags. + */ +#define UF_SETTABLE 0x0000ffff /* mask of owner changeable flags */ +#define UF_NODUMP 0x00000001 /* do not dump file */ +#define UF_IMMUTABLE 0x00000002 /* file may not be changed */ +#define UF_APPEND 0x00000004 /* writes to file may only append */ +#define UF_OPAQUE 0x00000008 /* directory is opaque wrt. union */ +/* + * Super-user changeable flags. + */ +#define SF_SETTABLE 0xffff0000 /* mask of superuser changeable flags */ +#define SF_ARCHIVED 0x00010000 /* file is archived */ +#define SF_IMMUTABLE 0x00020000 /* file may not be changed */ +#define SF_APPEND 0x00040000 /* writes to file may only append */ + +#ifdef KERNEL +/* + * Shorthand abbreviations of above. + */ +#define OPAQUE (UF_OPAQUE) +#define APPEND (UF_APPEND | SF_APPEND) +#define IMMUTABLE (UF_IMMUTABLE | SF_IMMUTABLE) +#endif +#endif + +#ifndef KERNEL +#include + +__BEGIN_DECLS +int chmod __P((const char *, mode_t)); +int fstat __P((int, struct stat *)); +int mkdir __P((const char *, mode_t)); +int mkfifo __P((const char *, mode_t)); +int stat __P((const char *, struct stat *)); +mode_t umask __P((mode_t)); +#ifndef _POSIX_SOURCE +int chflags __P((const char *, u_long)); +int fchflags __P((int, u_long)); +int fchmod __P((int, mode_t)); +int lstat __P((const char *, struct stat *)); +#endif +__END_DECLS +#endif +#endif /* !_SYS_STAT_H_ */ diff --git a/bsd/sys/subr_prf.h b/bsd/sys/subr_prf.h new file mode 100644 index 000000000..8b6950494 --- /dev/null +++ b/bsd/sys/subr_prf.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1986, 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * HISTORY + * 23-Oct-1995 Doug Mitchell at NeXT + * Split out from subr_prf.c + */ +#ifdef KERNEL_PRIVATE + +#ifndef _SYS_SUBRPRF_H_ +#define _SYS_SUBRPRF_H_ + +/* + * "flags" argument to prf(). + */ +#define TOCONS 0x01 +#define TOTTY 0x02 +#define TOLOG 0x04 +#define TOSTR 0x8 + +extern int prf(const char *fmt, va_list ap, int flags, struct tty *ttyp); + +#endif /* ! _SYS_SUBRPRF_H_ */ + +#endif /* KERNEL_PRIVATE */ + diff --git a/bsd/sys/syscall.h b/bsd/sys/syscall.h new file mode 100644 index 000000000..9f9d32f89 --- /dev/null +++ b/bsd/sys/syscall.h @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992, 1995-1999 Apple Computer, Inc. All Rights Reserved */ +/* + * + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + */ + + +#define SYS_syscall 0 +#define SYS_exit 1 +#define SYS_fork 2 +#define SYS_read 3 +#define SYS_write 4 +#define SYS_open 5 +#define SYS_close 6 +#define SYS_wait4 7 + /* 8 is old creat */ +#define SYS_link 9 +#define SYS_unlink 10 + /* 11 is obsolete execv */ +#define SYS_chdir 12 +#define SYS_fchdir 13 +#define SYS_mknod 14 +#define SYS_chmod 15 +#define SYS_chown 16 + /* 17 is obsolete sbreak */ +#if COMPAT_GETFSSTAT + /* 18 is old getfsstat */ +#else +#define SYS_getfsstat 18 +#endif + /* 19 is old lseek */ +#define SYS_getpid 20 + /* 21 is obsolete mount */ + /* 22 is obsolete umount */ +#define SYS_setuid 23 +#define SYS_getuid 24 +#define SYS_geteuid 25 +#define SYS_ptrace 26 +#define SYS_recvmsg 27 +#define SYS_sendmsg 28 +#define SYS_recvfrom 29 +#define SYS_accept 30 +#define SYS_getpeername 31 +#define SYS_getsockname 32 +#define SYS_access 33 +#define SYS_chflags 34 +#define SYS_fchflags 35 +#define SYS_sync 36 +#define SYS_kill 37 + /* 38 is old stat */ +#define SYS_getppid 39 + /* 40 is old lstat */ +#define SYS_dup 41 +#define SYS_pipe 42 +#define SYS_getegid 43 +#define SYS_profil 44 +#define SYS_ktrace 45 +#define SYS_sigaction 46 +#define SYS_getgid 47 +#define SYS_sigprocmask 48 +#define SYS_getlogin 49 +#define SYS_setlogin 50 +#define SYS_acct 51 +#define SYS_sigpending 52 +#define SYS_sigaltstack 53 +#define SYS_ioctl 54 +#define SYS_reboot 55 +#define SYS_revoke 56 +#define SYS_symlink 57 +#define SYS_readlink 58 +#define SYS_execve 59 +#define SYS_umask 60 +#define SYS_chroot 61 + /* 62 is old fstat */ + /* 63 is unused */ + /* 64 is old getpagesize */ +#define SYS_msync 65 +#define SYS_vfork 66 + /* 67 is obsolete vread */ + /* 68 is obsolete vwrite */ +#define SYS_sbrk 69 +#define SYS_sstk 70 + /* 71 is old mmap */ + /* 72 is obsolete vadvise */ +#define SYS_munmap 73 +#define SYS_mprotect 74 +#define SYS_madvise 75 + /* 76 is obsolete vhangup */ + /* 77 is obsolete vlimit */ +#define SYS_mincore 78 +#define SYS_getgroups 79 +#define SYS_setgroups 80 +#define SYS_getpgrp 81 +#define SYS_setpgid 82 +#define SYS_setitimer 83 + /* 84 is old wait */ +#define SYS_swapon 85 +#define SYS_getitimer 86 + /* 87 is old gethostname */ + /* 88 is old sethostname */ +#define SYS_getdtablesize 89 +#define SYS_dup2 90 +#define SYS_fcntl 92 +#define SYS_select 93 + /* 94 is obsolete setdopt */ +#define SYS_fsync 95 +#define SYS_setpriority 96 +#define SYS_socket 97 +#define SYS_connect 98 + /* 99 is old accept */ +#define SYS_getpriority 100 + /* 101 is old send */ + /* 102 is old recv */ +#define SYS_sigreturn 103 +#define SYS_bind 104 +#define SYS_setsockopt 105 +#define SYS_listen 106 + /* 107 is obsolete vtimes */ + /* 108 is old sigvec */ + /* 109 is old sigblock */ + /* 110 is old sigsetmask */ +#define SYS_sigsuspend 111 + /* 112 is old sigstack */ + /* 113 is old recvmsg */ + /* 114 is old sendmsg */ + /* 115 is obsolete vtrace */ +#define SYS_gettimeofday 116 +#define SYS_getrusage 117 +#define SYS_getsockopt 118 + /* 119 is obsolete resuba */ +#define SYS_readv 120 +#define SYS_writev 121 +#define SYS_settimeofday 122 +#define SYS_fchown 123 +#define SYS_fchmod 124 + /* 125 is old recvfrom */ + /* 126 is old setreuid */ + /* 127 is old setregid */ +#define SYS_rename 128 + /* 129 is old truncate */ + /* 130 is old ftruncate */ +#define SYS_flock 131 +#define SYS_mkfifo 132 +#define SYS_sendto 133 +#define SYS_shutdown 134 +#define SYS_socketpair 135 +#define SYS_mkdir 136 +#define SYS_rmdir 137 +#define SYS_utimes 138 + /* 139 is unused */ +#define SYS_adjtime 140 + /* 141 is old getpeername */ + /* 142 is old gethostid */ + /* 143 is old sethostid */ + /* 144 is old getrlimit */ + /* 145 is old setrlimit */ + /* 146 is old killpg */ +#define SYS_setsid 147 + /* 148 is obsolete setquota */ + /* 149 is obsolete quota */ + /* 150 is old getsockname */ + /* 151 is reserved */ +#define SYS_setprivexec 152 + /* 153 is reserved */ + /* 154 is reserved */ +#define SYS_nfssvc 155 + /* 156 is old getdirentries */ +#define SYS_statfs 157 +#define SYS_fstatfs 158 +#define SYS_unmount 159 + /* 160 is obsolete async_daemon */ +#define SYS_getfh 161 + /* 162 is old getdomainname */ + /* 163 is old setdomainname */ + /* 164 is obsolete pcfs_mount */ +#define SYS_quotactl 165 + /* 166 is obsolete exportfs */ +#define SYS_mount 167 + /* 168 is obsolete ustat */ + /* 169 is unused */ +#define SYS_table 170 + /* 171 is old wait_3 */ + /* 172 is obsolete rpause */ + /* 173 is unused */ + /* 174 is obsolete getdents */ +#define SYS_gc_control 175 +#define SYS_add_profil 176 + /* 177 is unused */ + /* 178 is unused */ + /* 179 is unused */ +#define SYS_kdebug_trace 180 +#define SYS_setgid 181 +#define SYS_setegid 182 +#define SYS_seteuid 183 +#define SYS_lfs_bmapv 184 +#define SYS_lfs_markv 185 +#define SYS_lfs_segclean 186 +#define SYS_lfs_segwait 187 +#define SYS_stat 188 +#define SYS_fstat 189 +#define SYS_lstat 190 +#define SYS_pathconf 191 +#define SYS_fpathconf 192 +#if COMPAT_GETFSSTAT +#define SYS_getfsstat 193 +#endif +#define SYS_getrlimit 194 +#define SYS_setrlimit 195 +#define SYS_getdirentries 196 +#define SYS_mmap 197 +#define SYS___syscall 198 +#define SYS_lseek 199 +#define SYS_truncate 200 +#define SYS_ftruncate 201 +#define SYS___sysctl 202 +#define SYS_mlock 203 +#define SYS_munlock 204 +#define SYS_undelete 205 +#define SYS_ATsocket 206 +#define SYS_ATgetmsg 207 +#define SYS_ATputmsg 208 +#define SYS_ATPsndreq 209 +#define SYS_ATPsndrsp 210 +#define SYS_ATPgetreq 211 +#define SYS_ATPgetrsp 212 + /* 213-215 are reserved for AppleTalk */ +#define SYS_mkcomplex 216 +#define SYS_statv 217 +#define SYS_lstatv 218 +#define SYS_fstatv 219 +#define SYS_getattrlist 220 +#define SYS_setattrlist 221 +#define SYS_getdirentriesattr 222 +#define SYS_exchangedata 223 +#define SYS_checkuseraccess 224 +#define SYS_searchfs 225 + + /* 226 - 230 are reserved for HFS expansion */ + /* 231 - 249 are reserved */ +#define SYS_minherit 250 +#define SYS_semsys 251 +#define SYS_msgsys 252 +#define SYS_shmsys 253 +#define SYS_semctl 254 +#define SYS_semget 255 +#define SYS_semop 256 +#define SYS_semconfig 257 +#define SYS_msgctl 258 +#define SYS_msgget 259 +#define SYS_msgsnd 260 +#define SYS_msgrcv 261 +#define SYS_shmat 262 +#define SYS_shmctl 263 +#define SYS_shmdt 264 +#define SYS_shmget 265 +#define SYS_shm_open 266 +#define SYS_shm_unlink 267 +#define SYS_sem_open 268 +#define SYS_sem_close 269 +#define SYS_sem_unlink 270 +#define SYS_sem_wait 271 +#define SYS_sem_trywait 272 +#define SYS_sem_post 273 +#define SYS_sem_getvalue 274 +#define SYS_sem_init 275 +#define SYS_sem_destroy 276 + /* 277 - 295 are reserved */ +#define SYS_load_shared_file 296 +#define SYS_reset_shared_file 297 + /* 298 - 323 are reserved */ +#define SYS_mlockall 324 +#define SYS_munlockall 325 + diff --git a/bsd/sys/sysctl.h b/bsd/sys/sysctl.h new file mode 100644 index 000000000..557b4d936 --- /dev/null +++ b/bsd/sys/sysctl.h @@ -0,0 +1,646 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Mike Karels at Berkeley Software Design, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)sysctl.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_SYSCTL_H_ +#define _SYS_SYSCTL_H_ + +/* + * These are for the eproc structure defined below. + */ +#ifndef KERNEL +#include +#include + + +#endif + +#include +#include +#include + +/* + * Definitions for sysctl call. The sysctl call uses a hierarchical name + * for objects that can be examined or modified. The name is expressed as + * a sequence of integers. Like a file path name, the meaning of each + * component depends on its place in the hierarchy. The top-level and kern + * identifiers are defined here, and other identifiers are defined in the + * respective subsystem header files. + */ + +#define CTL_MAXNAME 12 /* largest number of components supported */ + +/* + * Each subsystem defined by sysctl defines a list of variables + * for that subsystem. Each name is either a node with further + * levels defined below it, or it is a leaf of some particular + * type given below. Each sysctl level defines a set of name/type + * pairs to be used by sysctl(1) in manipulating the subsystem. + */ +struct ctlname { + char *ctl_name; /* subsystem name */ + int ctl_type; /* type of name */ +}; + +#define CTLTYPE 0xf /* Mask for the type */ +#define CTLTYPE_NODE 1 /* name is a node */ +#define CTLTYPE_INT 2 /* name describes an integer */ +#define CTLTYPE_STRING 3 /* name describes a string */ +#define CTLTYPE_QUAD 4 /* name describes a 64-bit number */ +#define CTLTYPE_OPAQUE 5 /* name describes a structure */ +#define CTLTYPE_STRUCT CTLTYPE_OPAQUE /* name describes a structure */ + +#define CTLFLAG_RD 0x80000000 /* Allow reads of variable */ +#define CTLFLAG_WR 0x40000000 /* Allow writes to the variable */ +#define CTLFLAG_RW (CTLFLAG_RD|CTLFLAG_WR) +#define CTLFLAG_NOLOCK 0x20000000 /* XXX Don't Lock */ +#define CTLFLAG_ANYBODY 0x10000000 /* All users can set this var */ +#define CTLFLAG_SECURE 0x08000000 /* Permit set only if securelevel<=0 */ + +/* + * USE THIS instead of a hardwired number from the categories below + * to get dynamically assigned sysctl entries using the linker-set + * technology. This is the way nearly all new sysctl variables should + * be implemented. + * e.g. SYSCTL_INT(_parent, OID_AUTO, name, CTLFLAG_RW, &variable, 0, ""); + */ +#define OID_AUTO (-1) + +#ifdef KERNEL +#define SYSCTL_HANDLER_ARGS (struct sysctl_oid *oidp, void *arg1, int arg2, \ + struct sysctl_req *req) + +/* + * This describes the access space for a sysctl request. This is needed + * so that we can use the interface from the kernel or from user-space. + */ +struct sysctl_req { + struct proc *p; + int lock; + void *oldptr; + size_t oldlen; + size_t oldidx; + int (*oldfunc)(struct sysctl_req *, const void *, size_t); + void *newptr; + size_t newlen; + size_t newidx; + int (*newfunc)(struct sysctl_req *, void *, size_t); +}; + +SLIST_HEAD(sysctl_oid_list, sysctl_oid); + +/* + * This describes one "oid" in the MIB tree. Potentially more nodes can + * be hidden behind it, expanded by the handler. + */ +struct sysctl_oid { + struct sysctl_oid_list *oid_parent; + SLIST_ENTRY(sysctl_oid) oid_link; + int oid_number; + int oid_kind; + void *oid_arg1; + int oid_arg2; + const char *oid_name; + int (*oid_handler) SYSCTL_HANDLER_ARGS; + const char *oid_fmt; +}; + +#define SYSCTL_IN(r, p, l) (r->newfunc)(r, p, l) +#define SYSCTL_OUT(r, p, l) (r->oldfunc)(r, p, l) + +int sysctl_handle_int SYSCTL_HANDLER_ARGS; +int sysctl_handle_long SYSCTL_HANDLER_ARGS; +int sysctl_handle_intptr SYSCTL_HANDLER_ARGS; +int sysctl_handle_string SYSCTL_HANDLER_ARGS; +int sysctl_handle_opaque SYSCTL_HANDLER_ARGS; + +/* + * These functions are used to add/remove an oid from the mib. + */ +void sysctl_register_oid(struct sysctl_oid *oidp); +void sysctl_unregister_oid(struct sysctl_oid *oidp); + +/* Declare an oid to allow child oids to be added to it. */ +#define SYSCTL_DECL(name) \ + extern struct sysctl_oid_list sysctl_##name##_children + +/* This constructs a "raw" MIB oid. */ +#define SYSCTL_OID(parent, nbr, name, kind, a1, a2, handler, fmt, descr) \ + struct sysctl_oid sysctl_##parent##_##name## = { \ + &sysctl_##parent##_children, { 0 }, \ + nbr, kind, a1, a2, #name, handler, fmt }; + + +/* This constructs a node from which other oids can hang. */ +#define SYSCTL_NODE(parent, nbr, name, access, handler, descr) \ + struct sysctl_oid_list sysctl_##parent##_##name##_children; \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_NODE|access, \ + (void*)&sysctl_##parent##_##name##_children, 0, handler, \ + "N", descr); + +/* Oid for a string. len can be 0 to indicate '\0' termination. */ +#define SYSCTL_STRING(parent, nbr, name, access, arg, len, descr) \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_STRING|access, \ + arg, len, sysctl_handle_string, "A", descr) + +/* Oid for an int. If ptr is NULL, val is returned. */ +#define SYSCTL_INT(parent, nbr, name, access, ptr, val, descr) \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ + ptr, val, sysctl_handle_int, "I", descr) + +/* Oid for a long. The pointer must be non NULL. */ +#define SYSCTL_LONG(parent, nbr, name, access, ptr, descr) \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ + ptr, 0, sysctl_handle_long, "L", descr) + +/* Oid for an opaque object. Specified by a pointer and a length. */ +#define SYSCTL_OPAQUE(parent, nbr, name, access, ptr, len, fmt, descr) \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|access, \ + ptr, len, sysctl_handle_opaque, fmt, descr) + +/* Oid for a struct. Specified by a pointer and a type. */ +#define SYSCTL_STRUCT(parent, nbr, name, access, ptr, type, descr) \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|access, \ + ptr, sizeof(struct type), sysctl_handle_opaque, \ + "S," #type, descr) + +/* Oid for a procedure. Specified by a pointer and an arg. */ +#define SYSCTL_PROC(parent, nbr, name, access, ptr, arg, handler, fmt, descr) \ + SYSCTL_OID(parent, nbr, name, access, \ + ptr, arg, handler, fmt, descr) +#endif /* KERNEL */ + +/* + * Top-level identifiers + */ +#define CTL_UNSPEC 0 /* unused */ +#define CTL_KERN 1 /* "high kernel": proc, limits */ +#define CTL_VM 2 /* virtual memory */ +#define CTL_VFS 3 /* file system, mount type is next */ +#define CTL_NET 4 /* network, see socket.h */ +#define CTL_DEBUG 5 /* debugging parameters */ +#define CTL_HW 6 /* generic cpu/io */ +#define CTL_MACHDEP 7 /* machine dependent */ +#define CTL_USER 8 /* user-level */ +#define CTL_MAXID 9 /* number of valid top-level ids */ + +#define CTL_NAMES { \ + { 0, 0 }, \ + { "kern", CTLTYPE_NODE }, \ + { "vm", CTLTYPE_NODE }, \ + { "vfs", CTLTYPE_NODE }, \ + { "net", CTLTYPE_NODE }, \ + { "debug", CTLTYPE_NODE }, \ + { "hw", CTLTYPE_NODE }, \ + { "machdep", CTLTYPE_NODE }, \ + { "user", CTLTYPE_NODE }, \ +} + +/* + * CTL_KERN identifiers + */ +#define KERN_OSTYPE 1 /* string: system version */ +#define KERN_OSRELEASE 2 /* string: system release */ +#define KERN_OSREV 3 /* int: system revision */ +#define KERN_VERSION 4 /* string: compile time info */ +#define KERN_MAXVNODES 5 /* int: max vnodes */ +#define KERN_MAXPROC 6 /* int: max processes */ +#define KERN_MAXFILES 7 /* int: max open files */ +#define KERN_ARGMAX 8 /* int: max arguments to exec */ +#define KERN_SECURELVL 9 /* int: system security level */ +#define KERN_HOSTNAME 10 /* string: hostname */ +#define KERN_HOSTID 11 /* int: host identifier */ +#define KERN_CLOCKRATE 12 /* struct: struct clockrate */ +#define KERN_VNODE 13 /* struct: vnode structures */ +#define KERN_PROC 14 /* struct: process entries */ +#define KERN_FILE 15 /* struct: file entries */ +#define KERN_PROF 16 /* node: kernel profiling info */ +#define KERN_POSIX1 17 /* int: POSIX.1 version */ +#define KERN_NGROUPS 18 /* int: # of supplemental group ids */ +#define KERN_JOB_CONTROL 19 /* int: is job control available */ +#define KERN_SAVED_IDS 20 /* int: saved set-user/group-ID */ +#define KERN_BOOTTIME 21 /* struct: time kernel was booted */ +#define KERN_NISDOMAINNAME 22 /* string: YP domain name */ +#define KERN_DOMAINNAME KERN_NISDOMAINNAME +#define KERN_MAXPARTITIONS 23 /* int: number of partitions/disk */ +#define KERN_KDEBUG 24 /* int: kernel trace points */ +#define KERN_UPDATEINTERVAL 25 /* int: update process sleep time */ +#define KERN_OSRELDATE 26 /* int: OS release date */ +#define KERN_NTP_PLL 27 /* node: NTP PLL control */ +#define KERN_BOOTFILE 28 /* string: name of booted kernel */ +#define KERN_MAXFILESPERPROC 29 /* int: max open files per proc */ +#define KERN_MAXPROCPERUID 30 /* int: max processes per uid */ +#define KERN_DUMPDEV 31 /* dev_t: device to dump on */ +#define KERN_IPC 32 /* node: anything related to IPC */ +#define KERN_DUMMY 33 /* unused */ +#define KERN_PS_STRINGS 34 /* int: address of PS_STRINGS */ +#define KERN_USRSTACK 35 /* int: address of USRSTACK */ +#define KERN_LOGSIGEXIT 36 /* int: do we log sigexit procs? */ +#define KERN_SYMFILE 37 /* string: kernel symbol filename */ +#define KERN_PROCARGS 38 +#define KERN_PCSAMPLES 39 /* int: pc sampling */ +#define KERN_MAXID 40 /* number of valid kern ids */ + + +/* KERN_KDEBUG types */ +#define KERN_KDEFLAGS 1 +#define KERN_KDDFLAGS 2 +#define KERN_KDENABLE 3 +#define KERN_KDSETBUF 4 +#define KERN_KDGETBUF 5 +#define KERN_KDSETUP 6 +#define KERN_KDREMOVE 7 +#define KERN_KDSETREG 8 +#define KERN_KDGETREG 9 +#define KERN_KDREADTR 10 +#define KERN_KDPIDTR 11 +#define KERN_KDTHRMAP 12 +/* Don't use 13 as it is overloaded with KERN_VNODE */ +#define KERN_KDPIDEX 14 +#define KERN_KDSETRTCDEC 15 + +/* KERN_PCSAMPLES types */ +#define KERN_PCDISABLE 1 +#define KERN_PCSETBUF 2 +#define KERN_PCGETBUF 3 +#define KERN_PCSETUP 4 +#define KERN_PCREMOVE 5 +#define KERN_PCREADBUF 6 +#define KERN_PCSETREG 7 +#define KERN_PCCOMM 8 + +#define CTL_KERN_NAMES { \ + { 0, 0 }, \ + { "ostype", CTLTYPE_STRING }, \ + { "osrelease", CTLTYPE_STRING }, \ + { "osrevision", CTLTYPE_INT }, \ + { "version", CTLTYPE_STRING }, \ + { "maxvnodes", CTLTYPE_INT }, \ + { "maxproc", CTLTYPE_INT }, \ + { "maxfiles", CTLTYPE_INT }, \ + { "argmax", CTLTYPE_INT }, \ + { "securelevel", CTLTYPE_INT }, \ + { "hostname", CTLTYPE_STRING }, \ + { "hostid", CTLTYPE_INT }, \ + { "clockrate", CTLTYPE_STRUCT }, \ + { "vnode", CTLTYPE_STRUCT }, \ + { "proc", CTLTYPE_STRUCT }, \ + { "file", CTLTYPE_STRUCT }, \ + { "profiling", CTLTYPE_NODE }, \ + { "posix1version", CTLTYPE_INT }, \ + { "ngroups", CTLTYPE_INT }, \ + { "job_control", CTLTYPE_INT }, \ + { "saved_ids", CTLTYPE_INT }, \ + { "boottime", CTLTYPE_STRUCT }, \ + { "nisdomainname", CTLTYPE_STRING }, \ + { "maxpartitions", CTLTYPE_INT }, \ + { "kdebug", CTLTYPE_INT }, \ + { "update", CTLTYPE_INT }, \ + { "osreldate", CTLTYPE_INT }, \ + { "ntp_pll", CTLTYPE_NODE }, \ + { "bootfile", CTLTYPE_STRING }, \ + { "maxfilesperproc", CTLTYPE_INT }, \ + { "maxprocperuid", CTLTYPE_INT }, \ + { "dumpdev", CTLTYPE_STRUCT }, /* we lie; don't print as int */ \ + { "ipc", CTLTYPE_NODE }, \ + { "dummy", CTLTYPE_INT }, \ + { "ps_strings", CTLTYPE_INT }, \ + { "usrstack", CTLTYPE_INT }, \ + { "logsigexit", CTLTYPE_INT }, \ + { "symfile",CTLTYPE_STRING },\ +} + +/* + * CTL_VFS identifiers + */ +#define CTL_VFS_NAMES { \ + { "vfsconf", CTLTYPE_STRUCT }, \ +} + +/* + * KERN_PROC subtypes + */ +#define KERN_PROC_ALL 0 /* everything */ +#define KERN_PROC_PID 1 /* by process id */ +#define KERN_PROC_PGRP 2 /* by process group id */ +#define KERN_PROC_SESSION 3 /* by session of pid */ +#define KERN_PROC_TTY 4 /* by controlling tty */ +#define KERN_PROC_UID 5 /* by effective uid */ +#define KERN_PROC_RUID 6 /* by real uid */ + +/* + * KERN_PROC subtype ops return arrays of augmented proc structures: + */ +struct kinfo_proc { + struct extern_proc kp_proc; /* proc structure */ + struct eproc { + struct proc *e_paddr; /* address of proc */ + struct session *e_sess; /* session pointer */ + struct pcred e_pcred; /* process credentials */ + struct ucred e_ucred; /* current credentials */ +#ifdef sparc + struct { + segsz_t vm_rssize; /* resident set size */ + segsz_t vm_tsize; /* text size */ + segsz_t vm_dsize; /* data size */ + segsz_t vm_ssize; /* stack size */ + } e_vm; +#else + struct vmspace e_vm; /* address space */ +#endif + pid_t e_ppid; /* parent process id */ + pid_t e_pgid; /* process group id */ + short e_jobc; /* job control counter */ + dev_t e_tdev; /* controlling tty dev */ + pid_t e_tpgid; /* tty process group id */ + struct session *e_tsess; /* tty session pointer */ +#define WMESGLEN 7 + char e_wmesg[WMESGLEN+1]; /* wchan message */ + segsz_t e_xsize; /* text size */ + short e_xrssize; /* text rss */ + short e_xccount; /* text references */ + short e_xswrss; + long e_flag; +#define EPROC_CTTY 0x01 /* controlling tty vnode active */ +#define EPROC_SLEADER 0x02 /* session leader */ + char e_login[MAXLOGNAME]; /* setlogin() name */ + long e_spare[4]; + } kp_eproc; +}; + +/* + * KERN_IPC identifiers + */ +#define KIPC_MAXSOCKBUF 1 /* int: max size of a socket buffer */ +#define KIPC_SOCKBUF_WASTE 2 /* int: wastage factor in sockbuf */ +#define KIPC_SOMAXCONN 3 /* int: max length of connection q */ +#define KIPC_MAX_LINKHDR 4 /* int: max length of link header */ +#define KIPC_MAX_PROTOHDR 5 /* int: max length of network header */ +#define KIPC_MAX_HDR 6 /* int: max total length of headers */ +#define KIPC_MAX_DATALEN 7 /* int: max length of data? */ +#define KIPC_MBSTAT 8 /* struct: mbuf usage statistics */ +#define KIPC_NMBCLUSTERS 9 /* int: maximum mbuf clusters */ + +/* + * CTL_VM identifiers + */ +#define VM_METER 1 /* struct vmmeter */ +#define VM_LOADAVG 2 /* struct loadavg */ +#define VM_MAXID 3 /* number of valid vm ids */ +#define VM_MACHFACTOR 4 /* struct loadavg with mach factor*/ + +#define CTL_VM_NAMES { \ + { 0, 0 }, \ + { "vmmeter", CTLTYPE_STRUCT }, \ + { "loadavg", CTLTYPE_STRUCT }, \ +} + +/* + * CTL_HW identifiers + */ +#define HW_MACHINE 1 /* string: machine class */ +#define HW_MODEL 2 /* string: specific machine model */ +#define HW_NCPU 3 /* int: number of cpus */ +#define HW_BYTEORDER 4 /* int: machine byte order */ +#define HW_PHYSMEM 5 /* int: total memory */ +#define HW_USERMEM 6 /* int: non-kernel memory */ +#define HW_PAGESIZE 7 /* int: software page size */ +#define HW_DISKNAMES 8 /* strings: disk drive names */ +#define HW_DISKSTATS 9 /* struct: diskstats[] */ +#define HW_EPOCH 10 /* int: 0 for Legacy, else NewWorld */ +#define HW_FLOATINGPT 11 /* int: has HW floating point? */ +#define HW_MACHINE_ARCH 12 /* string: machine architecture */ +#define HW_VECTORUNIT 13 /* int: has HW vector unit? */ +#define HW_BUS_FREQ 14 /* int: Bus Frequency */ +#define HW_CPU_FREQ 15 /* int: CPU Frequency */ +#define HW_CACHELINE 16 /* int: Cache Line Size in Bytes */ +#define HW_L1ICACHESIZE 17 /* int: L1 I Cache Size in Bytes */ +#define HW_L1DCACHESIZE 18 /* int: L1 D Cache Size in Bytes */ +#define HW_L2SETTINGS 19 /* int: L2 Cache Settings */ +#define HW_L2CACHESIZE 20 /* int: L2 Cache Size in Bytes */ +#define HW_L3SETTINGS 21 /* int: L3 Cache Settings */ +#define HW_L3CACHESIZE 22 /* int: L3 Cache Size in Bytes */ +#define HW_MAXID 23 /* number of valid hw ids */ + +#define CTL_HW_NAMES { \ + { 0, 0 }, \ + { "machine", CTLTYPE_STRING }, \ + { "model", CTLTYPE_STRING }, \ + { "ncpu", CTLTYPE_INT }, \ + { "byteorder", CTLTYPE_INT }, \ + { "physmem", CTLTYPE_INT }, \ + { "usermem", CTLTYPE_INT }, \ + { "pagesize", CTLTYPE_INT }, \ + { "disknames", CTLTYPE_STRUCT }, \ + { "diskstats", CTLTYPE_STRUCT }, \ + { "epoch", CTLTYPE_INT }, \ + { "floatingpoint", CTLTYPE_INT }, \ + { "machinearch", CTLTYPE_STRING }, \ + { "vectorunit", CTLTYPE_INT }, \ + { "busfrequency", CTLTYPE_INT }, \ + { "cpufrequency", CTLTYPE_INT }, \ + { "cachelinesize", CTLTYPE_INT }, \ + { "l1icachesize", CTLTYPE_INT }, \ + { "l1dcachesize", CTLTYPE_INT }, \ + { "l2settings", CTLTYPE_INT }, \ + { "l2cachesize", CTLTYPE_INT }, \ + { "l3settings", CTLTYPE_INT }, \ + { "l3cachesize", CTLTYPE_INT }, \ +} + +/* + * CTL_USER definitions + */ +#define USER_CS_PATH 1 /* string: _CS_PATH */ +#define USER_BC_BASE_MAX 2 /* int: BC_BASE_MAX */ +#define USER_BC_DIM_MAX 3 /* int: BC_DIM_MAX */ +#define USER_BC_SCALE_MAX 4 /* int: BC_SCALE_MAX */ +#define USER_BC_STRING_MAX 5 /* int: BC_STRING_MAX */ +#define USER_COLL_WEIGHTS_MAX 6 /* int: COLL_WEIGHTS_MAX */ +#define USER_EXPR_NEST_MAX 7 /* int: EXPR_NEST_MAX */ +#define USER_LINE_MAX 8 /* int: LINE_MAX */ +#define USER_RE_DUP_MAX 9 /* int: RE_DUP_MAX */ +#define USER_POSIX2_VERSION 10 /* int: POSIX2_VERSION */ +#define USER_POSIX2_C_BIND 11 /* int: POSIX2_C_BIND */ +#define USER_POSIX2_C_DEV 12 /* int: POSIX2_C_DEV */ +#define USER_POSIX2_CHAR_TERM 13 /* int: POSIX2_CHAR_TERM */ +#define USER_POSIX2_FORT_DEV 14 /* int: POSIX2_FORT_DEV */ +#define USER_POSIX2_FORT_RUN 15 /* int: POSIX2_FORT_RUN */ +#define USER_POSIX2_LOCALEDEF 16 /* int: POSIX2_LOCALEDEF */ +#define USER_POSIX2_SW_DEV 17 /* int: POSIX2_SW_DEV */ +#define USER_POSIX2_UPE 18 /* int: POSIX2_UPE */ +#define USER_STREAM_MAX 19 /* int: POSIX2_STREAM_MAX */ +#define USER_TZNAME_MAX 20 /* int: POSIX2_TZNAME_MAX */ +#define USER_MAXID 21 /* number of valid user ids */ + +#define CTL_USER_NAMES { \ + { 0, 0 }, \ + { "cs_path", CTLTYPE_STRING }, \ + { "bc_base_max", CTLTYPE_INT }, \ + { "bc_dim_max", CTLTYPE_INT }, \ + { "bc_scale_max", CTLTYPE_INT }, \ + { "bc_string_max", CTLTYPE_INT }, \ + { "coll_weights_max", CTLTYPE_INT }, \ + { "expr_nest_max", CTLTYPE_INT }, \ + { "line_max", CTLTYPE_INT }, \ + { "re_dup_max", CTLTYPE_INT }, \ + { "posix2_version", CTLTYPE_INT }, \ + { "posix2_c_bind", CTLTYPE_INT }, \ + { "posix2_c_dev", CTLTYPE_INT }, \ + { "posix2_char_term", CTLTYPE_INT }, \ + { "posix2_fort_dev", CTLTYPE_INT }, \ + { "posix2_fort_run", CTLTYPE_INT }, \ + { "posix2_localedef", CTLTYPE_INT }, \ + { "posix2_sw_dev", CTLTYPE_INT }, \ + { "posix2_upe", CTLTYPE_INT }, \ + { "stream_max", CTLTYPE_INT }, \ + { "tzname_max", CTLTYPE_INT }, \ +} + + + +/* + * CTL_DEBUG definitions + * + * Second level identifier specifies which debug variable. + * Third level identifier specifies which stucture component. + */ +#define CTL_DEBUG_NAME 0 /* string: variable name */ +#define CTL_DEBUG_VALUE 1 /* int: variable value */ +#define CTL_DEBUG_MAXID 20 + +#ifdef KERNEL + +extern struct sysctl_oid_list sysctl__children; +SYSCTL_DECL(_kern); +SYSCTL_DECL(_sysctl); +SYSCTL_DECL(_vm); +SYSCTL_DECL(_vfs); +SYSCTL_DECL(_net); +SYSCTL_DECL(_debug); +SYSCTL_DECL(_hw); +SYSCTL_DECL(_machdep); +SYSCTL_DECL(_user); + + +#ifdef DEBUG +/* + * CTL_DEBUG variables. + * + * These are declared as separate variables so that they can be + * individually initialized at the location of their associated + * variable. The loader prevents multiple use by issuing errors + * if a variable is initialized in more than one place. They are + * aggregated into an array in debug_sysctl(), so that it can + * conveniently locate them when querried. If more debugging + * variables are added, they must also be declared here and also + * entered into the array. + */ +struct ctldebug { + char *debugname; /* name of debugging variable */ + int *debugvar; /* pointer to debugging variable */ +}; +extern struct ctldebug debug0, debug1, debug2, debug3, debug4; +extern struct ctldebug debug5, debug6, debug7, debug8, debug9; +extern struct ctldebug debug10, debug11, debug12, debug13, debug14; +extern struct ctldebug debug15, debug16, debug17, debug18, debug19; +#endif /* DEBUG */ + +extern char machine[]; +extern char osrelease[]; +extern char ostype[]; + +struct linker_set; + +void sysctl_register_set(struct linker_set *lsp); +void sysctl_unregister_set(struct linker_set *lsp); +int kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, + size_t *oldlenp, void *new, size_t newlen, + size_t *retval); +int userland_sysctl(struct proc *p, int *name, u_int namelen, void *old, + size_t *oldlenp, int inkernel, void *new, size_t newlen, + size_t *retval); +/* + * Internal sysctl function calling convention: + * + * (*sysctlfn)(name, namelen, oldval, oldlenp, newval, newlen); + * + * The name parameter points at the next component of the name to be + * interpreted. The namelen parameter is the number of integers in + * the name. + */ +typedef int (sysctlfn) + __P((int *, u_int, void *, size_t *, void *, size_t, struct proc *)); + +int sysctl_int __P((void *, size_t *, void *, size_t, int *)); +int sysctl_rdint __P((void *, size_t *, void *, int)); +int sysctl_string __P((void *, size_t *, void *, size_t, char *, int)); +int sysctl_rdstring __P((void *, size_t *, void *, char *)); +int sysctl_rdstruct __P((void *, size_t *, void *, void *, int)); +void fill_eproc __P((struct proc *, struct eproc *)); + +#else /* !KERNEL */ +#include + +__BEGIN_DECLS +int sysctl __P((int *, u_int, void *, size_t *, void *, size_t)); +int sysctlbyname __P((const char *, void *, size_t *, void *, size_t)); +__END_DECLS +#endif /* KERNEL */ +#endif /* !_SYS_SYSCTL_H_ */ diff --git a/bsd/sys/syslimits.h b/bsd/sys/syslimits.h new file mode 100644 index 000000000..a43c854a1 --- /dev/null +++ b/bsd/sys/syslimits.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: syslimits.h,v 1.15 1997/06/25 00:48:09 lukem Exp $ */ + +/* + * Copyright (c) 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)syslimits.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_SYSLIMITS_H_ +#define _SYS_SYSLIMITS_H_ + +#if !defined(_ANSI_SOURCE) +#define ARG_MAX (64 * 1024) /* max bytes for an exec function */ +#define CHILD_MAX 100 /* max simultaneous processes */ +#define GID_MAX 2147483647U /* max value for a gid_t (2^31-2) */ +#define LINK_MAX 32767 /* max file link count */ +#define MAX_CANON 255 /* max bytes in term canon input line */ +#define MAX_INPUT 255 /* max bytes in terminal input */ +#define NAME_MAX 255 /* max bytes in a file name */ +#define NGROUPS_MAX 16 /* max supplemental group id's */ +#define UID_MAX 2147483647U /* max value for a uid_t (2^31-2) */ +#define OPEN_MAX 10240 /* max open files per process */ +#define PATH_MAX 1024 /* max bytes in pathname */ +#define PIPE_BUF 512 /* max bytes for atomic pipe writes */ + +#define BC_BASE_MAX INT_MAX /* max ibase/obase values in bc(1) */ +#define BC_DIM_MAX 65535 /* max array elements in bc(1) */ +#define BC_SCALE_MAX INT_MAX /* max scale value in bc(1) */ +#define BC_STRING_MAX INT_MAX /* max const string length in bc(1) */ +#define COLL_WEIGHTS_MAX 2 /* max weights for order keyword */ +#define EQUIV_CLASS_MAX 2 +#define EXPR_NEST_MAX 32 /* max expressions nested in expr(1) */ +#define LINE_MAX 2048 /* max bytes in an input line */ +#define RE_DUP_MAX 255 /* max RE's in interval notation */ +#endif /* !_ANSI_SOURCE */ + +#endif /* !_SYS_SYSLIMITS_H_ */ diff --git a/bsd/sys/syslog.h b/bsd/sys/syslog.h new file mode 100644 index 000000000..ce67c44d8 --- /dev/null +++ b/bsd/sys/syslog.h @@ -0,0 +1,303 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1988, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)syslog.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_SYSLOG_H_ +#define _SYS_SYSLOG_H_ + +#define _PATH_LOG "/var/run/syslog" + +/* + * priorities/facilities are encoded into a single 32-bit quantity, where the + * bottom 3 bits are the priority (0-7) and the top 28 bits are the facility + * (0-big number). Both the priorities and the facilities map roughly + * one-to-one to strings in the syslogd(8) source code. This mapping is + * included in this file. + * + * priorities (these are ordered) + */ +#define LOG_EMERG 0 /* system is unusable */ +#define LOG_ALERT 1 /* action must be taken immediately */ +#define LOG_CRIT 2 /* critical conditions */ +#define LOG_ERR 3 /* error conditions */ +#define LOG_WARNING 4 /* warning conditions */ +#define LOG_NOTICE 5 /* normal but significant condition */ +#define LOG_INFO 6 /* informational */ +#define LOG_DEBUG 7 /* debug-level messages */ + +#define LOG_PRIMASK 0x07 /* mask to extract priority part (internal) */ + /* extract priority */ +#define LOG_PRI(p) ((p) & LOG_PRIMASK) +#define LOG_MAKEPRI(fac, pri) (((fac) << 3) | (pri)) + +#ifdef SYSLOG_NAMES +#define INTERNAL_NOPRI 0x10 /* the "no priority" priority */ + /* mark "facility" */ +#define INTERNAL_MARK LOG_MAKEPRI(LOG_NFACILITIES, 0) +typedef struct _code { + char *c_name; + int c_val; +} CODE; + +CODE prioritynames[] = { + "alert", LOG_ALERT, + "crit", LOG_CRIT, + "debug", LOG_DEBUG, + "emerg", LOG_EMERG, + "err", LOG_ERR, + "error", LOG_ERR, /* DEPRECATED */ + "info", LOG_INFO, + "none", INTERNAL_NOPRI, /* INTERNAL */ + "notice", LOG_NOTICE, + "panic", LOG_EMERG, /* DEPRECATED */ + "warn", LOG_WARNING, /* DEPRECATED */ + "warning", LOG_WARNING, + NULL, -1, +}; +#endif + +/* facility codes */ +#define LOG_KERN (0<<3) /* kernel messages */ +#define LOG_USER (1<<3) /* random user-level messages */ +#define LOG_MAIL (2<<3) /* mail system */ +#define LOG_DAEMON (3<<3) /* system daemons */ +#define LOG_AUTH (4<<3) /* security/authorization messages */ +#define LOG_SYSLOG (5<<3) /* messages generated internally by syslogd */ +#define LOG_LPR (6<<3) /* line printer subsystem */ +#define LOG_NEWS (7<<3) /* network news subsystem */ +#define LOG_UUCP (8<<3) /* UUCP subsystem */ +#define LOG_CRON (9<<3) /* clock daemon */ +#define LOG_AUTHPRIV (10<<3) /* security/authorization messages (private) */ +#define LOG_FTP (11<<3) /* ftp daemon */ +#define LOG_NETINFO (12<<3) /* NetInfo */ +#define LOG_REMOTEAUTH (13<<3) /* remote authentication/authorization */ + + /* other codes through 15 reserved for system use */ +#define LOG_LOCAL0 (16<<3) /* reserved for local use */ +#define LOG_LOCAL1 (17<<3) /* reserved for local use */ +#define LOG_LOCAL2 (18<<3) /* reserved for local use */ +#define LOG_LOCAL3 (19<<3) /* reserved for local use */ +#define LOG_LOCAL4 (20<<3) /* reserved for local use */ +#define LOG_LOCAL5 (21<<3) /* reserved for local use */ +#define LOG_LOCAL6 (22<<3) /* reserved for local use */ +#define LOG_LOCAL7 (23<<3) /* reserved for local use */ + +#define LOG_NFACILITIES 24 /* current number of facilities */ +#define LOG_FACMASK 0x03f8 /* mask to extract facility part */ + /* facility of pri */ +#define LOG_FAC(p) (((p) & LOG_FACMASK) >> 3) + +#ifdef SYSLOG_NAMES +CODE facilitynames[] = { + "auth", LOG_AUTH, + "authpriv", LOG_AUTHPRIV, + "cron", LOG_CRON, + "daemon", LOG_DAEMON, + "ftp", LOG_FTP, + "kern", LOG_KERN, + "lpr", LOG_LPR, + "mail", LOG_MAIL, + "mark", INTERNAL_MARK, /* INTERNAL */ + "netinfo", LOG_NETINFO, + "remoteauth", LOG_REMOTEAUTH, + "news", LOG_NEWS, + "security", LOG_AUTH, /* DEPRECATED */ + "syslog", LOG_SYSLOG, + "user", LOG_USER, + "uucp", LOG_UUCP, + "local0", LOG_LOCAL0, + "local1", LOG_LOCAL1, + "local2", LOG_LOCAL2, + "local3", LOG_LOCAL3, + "local4", LOG_LOCAL4, + "local5", LOG_LOCAL5, + "local6", LOG_LOCAL6, + "local7", LOG_LOCAL7, + NULL, -1, +}; +#endif + +#ifdef KERNEL +#define LOG_PRINTF -1 /* pseudo-priority to indicate use of printf */ +#endif + +/* + * arguments to setlogmask. + */ +#define LOG_MASK(pri) (1 << (pri)) /* mask for one priority */ +#define LOG_UPTO(pri) ((1 << ((pri)+1)) - 1) /* all priorities through pri */ + +/* + * Option flags for openlog. + * + * LOG_ODELAY no longer does anything. + * LOG_NDELAY is the inverse of what it used to be. + */ +#define LOG_PID 0x01 /* log the pid with each message */ +#define LOG_CONS 0x02 /* log on the console if errors in sending */ +#define LOG_ODELAY 0x04 /* delay open until first syslog() (default) */ +#define LOG_NDELAY 0x08 /* don't delay open */ +#define LOG_NOWAIT 0x10 /* don't wait for console forks: DEPRECATED */ +#define LOG_PERROR 0x20 /* log to stderr as well */ + +#include + +#ifndef KERNEL + +/* + * Don't use va_list in the vsyslog() prototype. Va_list is typedef'd in two + * places ( and ), so if we include one + * of them here we may collide with the utility's includes. It's unreasonable + * for utilities to have to include one of them to include syslog.h, so we get + * _BSD_VA_LIST_ from and use it. + */ +#include + +__BEGIN_DECLS +void closelog __P((void)); +void openlog __P((const char *, int, int)); +int setlogmask __P((int)); +void syslog __P((int, const char *, ...)); +void vsyslog __P((int, const char *, _BSD_VA_LIST_)); +__END_DECLS + +#else /* !KERNEL */ + +/* + * bit field descriptions for printf %r and %R formats + */ + +/* + * printf("%r %R", val, reg_descp); + * struct reg_desc *reg_descp; + * + * the %r and %R formats allow formatted output of bit fields. + * reg_descp points to an array of reg_desc structures, each element of the + * array describes a range of bits within val. the array should have a + * final element with all structure elements 0. + * %r outputs a string of the format "" + * %R outputs a string of the format "0x%x" + * + * The fields in a reg_desc are: + * unsigned rd_mask; An appropriate mask to isolate the bit field + * within a word, and'ed with val + * + * int rd_shift; A shift amount to be done to the isolated + * bit field. done before printing the isolate + * bit field with rd_format and before searching + * for symbolic value names in rd_values + * + * char *rd_name; If non-null, a bit field name to label any + * out from rd_format or searching rd_values. + * if neither rd_format or rd_values is non-null + * rd_name is printed only if the isolated + * bit field is non-null. + * + * char *rd_format; If non-null, the shifted bit field value + * is printed using this format. + * + * struct reg_values *rd_values; If non-null, a pointer to a table + * matching numeric values with symbolic names. + * rd_values are searched and the symbolic + * value is printed if a match is found, if no + * match is found "???" is printed. + * + * printf("%n %N", val, reg_valuesp); + * struct reg_values *reg_valuesp; + * + * the %n and %N formats allow formatted output of symbolic constants + * Reg_valuesp is a pointer to an array of struct reg_values which pairs + * numeric values (rv_value) with symbolic names (rv_name). The array is + * terminated with a reg_values entry that has a null pointer for the + * rv_name field. When %n or %N is used rd_values are searched and the + * symbolic value is printed if a match is found, if no match is found + * "???" is printed. + * + * printf("%C", val); + * int val; + * + * the %C format prints an int as a 4 character string. + * The most significant byte of the int is printed first, the least + * significant byte is printed last. + */ + +/* + * register values + * map between numeric values and symbolic values + */ +struct reg_values { + unsigned rv_value; + char *rv_name; +}; + +/* + * register descriptors are used for formatted prints of register values + * rd_mask and rd_shift must be defined, other entries may be null + */ +struct reg_desc { + unsigned rd_mask; /* mask to extract field */ + int rd_shift; /* shift for extracted value, - >>, + << */ + char *rd_name; /* field name */ + char *rd_format; /* format to print field */ + struct reg_values *rd_values; /* symbolic names of values */ +}; + +void logpri __P((int)); +void log __P((int, const char *, ...)); +void addlog __P((const char *, ...)); + +#endif /* !KERNEL */ +#endif /* !_SYS_SYSLOG_H_ */ diff --git a/bsd/sys/systm.h b/bsd/sys/systm.h new file mode 100644 index 000000000..88057b052 --- /dev/null +++ b/bsd/sys/systm.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1988, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)systm.h 8.7 (Berkeley) 3/29/95 + */ + +/* + * The `securelevel' variable controls the security level of the system. + * It can only be decreased by process 1 (/sbin/init). + * + * Security levels are as follows: + * -1 permannently insecure mode - always run system in level 0 mode. + * 0 insecure mode - immutable and append-only flags make be turned off. + * All devices may be read or written subject to permission modes. + * 1 secure mode - immutable and append-only flags may not be changed; + * raw disks of mounted filesystems, /dev/mem, and /dev/kmem are + * read-only. + * 2 highly secure mode - same as (1) plus raw disks are always + * read-only whether mounted or not. This level precludes tampering + * with filesystems by unmounting them, but also inhibits running + * newfs while the system is secured. + * + * In normal operation, the system runs in level 0 mode while single user + * and in level 1 mode while multiuser. If level 2 mode is desired while + * running multiuser, it can be set in the multiuser startup script + * (/etc/rc.local) using sysctl(1). If it is desired to run the system + * in level 0 mode while multiuser, initialize the variable securelevel + * in /sys/kern/kern_sysctl.c to -1. Note that it is NOT initialized to + * zero as that would allow the vmunix binary to be patched to -1. + * Without initialization, securelevel loads in the BSS area which only + * comes into existence when the kernel is loaded and hence cannot be + * patched by a stalking hacker. + */ + +#ifndef _SYS_SYSTM_H_ +#define _SYS_SYSTM_H_ + +#include + +#include +#include +#include +#include +#include +#include +__BEGIN_DECLS +#include +__END_DECLS + +#define KERNEL_FUNNEL 1 +#define NETWORK_FUNNEL 2 + +extern int securelevel; /* system security level */ +extern const char *panicstr; /* panic message */ +extern char version[]; /* system version */ +extern char copyright[]; /* system copyright */ + +extern int nblkdev; /* number of entries in bdevsw */ +extern int nchrdev; /* number of entries in cdevsw */ + +extern dev_t rootdev; /* root device */ +extern struct vnode *rootvp; /* vnode equivalent to above */ + +extern struct sysent { /* system call table */ + int16_t sy_narg; /* number of args */ + int8_t sy_parallel;/* can execute in parallel */ + int8_t sy_funnel; /* funnel type */ + int32_t (*sy_call)(); /* implementing function */ +} sysent[]; +extern int nsysent; + +extern int boothowto; /* reboot flags, from console subsystem */ +extern int show_space; + +extern funnel_t * kernel_flock; +extern funnel_t * network_flock; + +#define SYSINIT(a,b,c,d,e) +#define MALLOC_DEFINE(a,b,c) + + +#define getenv_int(a,b) (*b = 0) +#define KASSERT(exp,msg) + +/* + * General function declarations. + */ +__BEGIN_DECLS +int nullop __P((void)); +int enodev (); /* avoid actual prototype for multiple use */ +void enodev_strat(); +int nulldev(); +int enoioctl __P((void)); +int enxio __P((void)); +int eopnotsupp __P((void)); +int einval __P((void)); +int seltrue __P((dev_t dev, int which, struct proc *p)); +void *hashinit __P((int count, int type, u_long *hashmask)); +int nosys __P((struct proc *, void *, register_t *)); + +#ifdef __GNUC__ +volatile void panic __P((const char *, ...)); +#else +void panic __P((const char *, ...)); +#endif +void tablefull __P((const char *)); +void log __P((int, const char *, ...)); +void kprintf __P((const char *, ...)); +void ttyprintf __P((struct tty *, const char *, ...)); + +int kvprintf __P((char const *, void (*)(int, void*), void *, int, + _BSD_VA_LIST_)); + +int snprintf __P((char *, size_t, const char *, ...)); +int sprintf __P((char *buf, const char *, ...)); +void uprintf __P((const char *, ...)); +void vprintf __P((const char *, _BSD_VA_LIST_)); +int vsnprintf __P((char *, size_t, const char *, _BSD_VA_LIST_)); +int vsprintf __P((char *buf, const char *, _BSD_VA_LIST_)); + + +void bcopy __P((const void *from, void *to, size_t len)); +void ovbcopy __P((const void *from, void *to, size_t len)); +void bzero __P((void *buf, size_t len)); + +int copystr __P((void *kfaddr, void *kdaddr, size_t len, size_t *done)); +int copyinstr __P((void *udaddr, void *kaddr, size_t len, size_t *done)); +int copyoutstr __P((void *kaddr, void *udaddr, size_t len, size_t *done)); +int copyin __P((void *udaddr, void *kaddr, size_t len)); +int copyout __P((void *kaddr, void *udaddr, size_t len)); +int copywithin __P((void *saddr, void *daddr, size_t len)); + +int fubyte __P((void *base)); +#ifdef notdef +int fuibyte __P((void *base)); +#endif +int subyte __P((void *base, int byte)); +int suibyte __P((void *base, int byte)); +long fuword __P((void *base)); +long fuiword __P((void *base)); +int suword __P((void *base, long word)); +int suiword __P((void *base, long word)); + +int hzto __P((struct timeval *tv)); +typedef void (*timeout_fcn_t)(void *); +void timeout __P((void (*)(void *), void *arg, int ticks)); +void untimeout __P((void (*)(void *), void *arg)); +void realitexpire __P((void *)); + +void bsd_hardclock __P((boolean_t usermode, caddr_t pc, int numticks)); +void gatherstats __P((boolean_t usermode, caddr_t pc)); + +void initclocks __P((void)); + +void startprofclock __P((struct proc *)); +void stopprofclock __P((struct proc *)); +void setstatclockrate __P((int hzrate)); +void set_fsblocksize __P((struct vnode *)); + +void addlog __P((const char *, ...)); +void printf __P((const char *, ...)); + +extern boolean_t thread_funnel_switch(int oldfnl, int newfnl); + +#include + +#ifdef DDB +/* debugger entry points */ +int Debugger __P((void)); /* in DDB only */ +#endif + +__END_DECLS + +#endif /* !_SYS_SYSTM_H_ */ + diff --git a/bsd/sys/table.h b/bsd/sys/table.h new file mode 100644 index 000000000..ec4da3c58 --- /dev/null +++ b/bsd/sys/table.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1986 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * HISTORY + * 27-Apr-97 A.Ramesh + * added limited set to MacOSX + * + * 05-Sep-91 Doug Mitchell at NeXT + * Made entire contents PRIVATE. + * + * + */ + +#ifdef KERNEL_PRIVATE + +#ifndef _SYS_TABLE_ +#define _SYS_TABLE_ + +#include +#include + +#define TBL_LOADAVG 3 /* (no index) */ +#define TBL_ARGUMENTS 6 /* index by process ID */ +#define TBL_PROCINFO 10 /* index by proc table slot */ +#define TBL_MACHFACTOR 11 /* index by cpu number */ +#define TBL_CPUINFO 12 /* (no index), generic CPU info */ + +/* + * Machine specific table id base + */ +#define TBL_MACHDEP_BASE 0x4000 /* Machine dependent codes start here */ + +/* + * Return codes from machine dependent calls + */ +#define TBL_MACHDEP_NONE 0 /* Not handled by machdep code */ +#define TBL_MACHDEP_OKAY 1 /* Handled by machdep code */ +#define TBL_MACHDEP_BAD -1 /* Bad status from machdep code */ + + + +/* + * TBL_LOADAVG data layout + * (used by TBL_MACHFACTOR too) + */ +struct tbl_loadavg +{ + long tl_avenrun[3]; + int tl_lscale; /* 0 scale when floating point */ +}; + +/* + * TBL_PROCINFO data layout + */ +#define PI_COMLEN 19 /* length of command string */ +struct tbl_procinfo +{ + int pi_uid; /* user ID */ + int pi_pid; /* proc ID */ + int pi_ppid; /* parent proc ID */ + int pi_pgrp; /* proc group ID */ + int pi_ttyd; /* controlling terminal number */ + int pi_status; /* process status: */ +#define PI_EMPTY 0 /* no process */ +#define PI_ACTIVE 1 /* active process */ +#define PI_EXITING 2 /* exiting */ +#define PI_ZOMBIE 3 /* zombie */ + int pi_flag; /* other random flags */ + char pi_comm[PI_COMLEN+1]; + /* short command name */ +}; + +/* + * TBL_CPUINFO data layout + */ +struct tbl_cpuinfo +{ + int ci_swtch; /* # context switches */ + int ci_intr; /* # interrupts */ + int ci_syscall; /* # system calls */ + int ci_traps; /* # system traps */ + int ci_hz; /* # ticks per second */ + int ci_phz; /* profiling hz */ + int ci_cptime[CPUSTATES]; /* cpu state times */ +}; + + + +#ifdef KERNEL +/* + * Machine specific procedure prototypes. + */ +int machine_table(int id, int index, caddr_t addr, int nel, u_int lel, int set); +int machine_table_setokay(int id); +#endif /* KERNEL */ + +#endif /* _SYS_TABLE_ */ + +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/sys/termios.h b/bsd/sys/termios.h new file mode 100644 index 000000000..71afa6606 --- /dev/null +++ b/bsd/sys/termios.h @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1988, 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)termios.h 8.3 (Berkeley) 3/28/94 + */ + +#ifndef _SYS_TERMIOS_H_ +#define _SYS_TERMIOS_H_ + +/* + * Special Control Characters + * + * Index into c_cc[] character array. + * + * Name Subscript Enabled by + */ +#define VEOF 0 /* ICANON */ +#define VEOL 1 /* ICANON */ +#ifndef _POSIX_SOURCE +#define VEOL2 2 /* ICANON together with IEXTEN */ +#endif +#define VERASE 3 /* ICANON */ +#ifndef _POSIX_SOURCE +#define VWERASE 4 /* ICANON together with IEXTEN */ +#endif +#define VKILL 5 /* ICANON */ +#ifndef _POSIX_SOURCE +#define VREPRINT 6 /* ICANON together with IEXTEN */ +#endif +/* 7 spare 1 */ +#define VINTR 8 /* ISIG */ +#define VQUIT 9 /* ISIG */ +#define VSUSP 10 /* ISIG */ +#ifndef _POSIX_SOURCE +#define VDSUSP 11 /* ISIG together with IEXTEN */ +#endif +#define VSTART 12 /* IXON, IXOFF */ +#define VSTOP 13 /* IXON, IXOFF */ +#ifndef _POSIX_SOURCE +#define VLNEXT 14 /* IEXTEN */ +#define VDISCARD 15 /* IEXTEN */ +#endif +#define VMIN 16 /* !ICANON */ +#define VTIME 17 /* !ICANON */ +#ifndef _POSIX_SOURCE +#define VSTATUS 18 /* ICANON together with IEXTEN */ +/* 19 spare 2 */ +#endif +#define NCCS 20 + +#ifndef _POSIX_VDISABLE +#define _POSIX_VDISABLE 0xff +#endif + +#ifndef _POSIX_SOURCE +#define CCEQ(val, c) ((c) == (val) ? (val) != _POSIX_VDISABLE : 0) +#endif + +/* + * Input flags - software input processing + */ +#define IGNBRK 0x00000001 /* ignore BREAK condition */ +#define BRKINT 0x00000002 /* map BREAK to SIGINTR */ +#define IGNPAR 0x00000004 /* ignore (discard) parity errors */ +#define PARMRK 0x00000008 /* mark parity and framing errors */ +#define INPCK 0x00000010 /* enable checking of parity errors */ +#define ISTRIP 0x00000020 /* strip 8th bit off chars */ +#define INLCR 0x00000040 /* map NL into CR */ +#define IGNCR 0x00000080 /* ignore CR */ +#define ICRNL 0x00000100 /* map CR to NL (ala CRMOD) */ +#define IXON 0x00000200 /* enable output flow control */ +#define IXOFF 0x00000400 /* enable input flow control */ +#ifndef _POSIX_SOURCE +#define IXANY 0x00000800 /* any char will restart after stop */ +#define IMAXBEL 0x00002000 /* ring bell on input queue full */ +#endif /*_POSIX_SOURCE */ + +/* + * Output flags - software output processing + */ +#define OPOST 0x00000001 /* enable following output processing */ +#ifndef _POSIX_SOURCE +#define ONLCR 0x00000002 /* map NL to CR-NL (ala CRMOD) */ +#define OXTABS 0x00000004 /* expand tabs to spaces */ +#define ONOEOT 0x00000008 /* discard EOT's (^D) on output) */ +#endif /*_POSIX_SOURCE */ + +/* + * Control flags - hardware control of terminal + */ +#ifndef _POSIX_SOURCE +#define CIGNORE 0x00000001 /* ignore control flags */ +#endif +#define CSIZE 0x00000300 /* character size mask */ +#define CS5 0x00000000 /* 5 bits (pseudo) */ +#define CS6 0x00000100 /* 6 bits */ +#define CS7 0x00000200 /* 7 bits */ +#define CS8 0x00000300 /* 8 bits */ +#define CSTOPB 0x00000400 /* send 2 stop bits */ +#define CREAD 0x00000800 /* enable receiver */ +#define PARENB 0x00001000 /* parity enable */ +#define PARODD 0x00002000 /* odd parity, else even */ +#define HUPCL 0x00004000 /* hang up on last close */ +#define CLOCAL 0x00008000 /* ignore modem status lines */ +#ifndef _POSIX_SOURCE +#define CCTS_OFLOW 0x00010000 /* CTS flow control of output */ +#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW) +#define CRTS_IFLOW 0x00020000 /* RTS flow control of input */ +#define CDTR_IFLOW 0x00040000 /* DTR flow control of input */ +#define CDSR_OFLOW 0x00080000 /* DSR flow control of output */ +#define CCAR_OFLOW 0x00100000 /* DCD flow control of output */ +#define MDMBUF 0x00100000 /* old name for CCAR_OFLOW */ +#endif + + +/* + * "Local" flags - dumping ground for other state + * + * Warning: some flags in this structure begin with + * the letter "I" and look like they belong in the + * input flag. + */ + +#ifndef _POSIX_SOURCE +#define ECHOKE 0x00000001 /* visual erase for line kill */ +#endif /*_POSIX_SOURCE */ +#define ECHOE 0x00000002 /* visually erase chars */ +#define ECHOK 0x00000004 /* echo NL after line kill */ +#define ECHO 0x00000008 /* enable echoing */ +#define ECHONL 0x00000010 /* echo NL even if ECHO is off */ +#ifndef _POSIX_SOURCE +#define ECHOPRT 0x00000020 /* visual erase mode for hardcopy */ +#define ECHOCTL 0x00000040 /* echo control chars as ^(Char) */ +#endif /*_POSIX_SOURCE */ +#define ISIG 0x00000080 /* enable signals INTR, QUIT, [D]SUSP */ +#define ICANON 0x00000100 /* canonicalize input lines */ +#ifndef _POSIX_SOURCE +#define ALTWERASE 0x00000200 /* use alternate WERASE algorithm */ +#endif /*_POSIX_SOURCE */ +#define IEXTEN 0x00000400 /* enable DISCARD and LNEXT */ +#define EXTPROC 0x00000800 /* external processing */ +#define TOSTOP 0x00400000 /* stop background jobs from output */ +#ifndef _POSIX_SOURCE +#define FLUSHO 0x00800000 /* output being flushed (state) */ +#define NOKERNINFO 0x02000000 /* no kernel output from VSTATUS */ +#define PENDIN 0x20000000 /* XXX retype pending input (state) */ +#endif /*_POSIX_SOURCE */ +#define NOFLSH 0x80000000 /* don't flush after interrupt */ + +typedef unsigned long tcflag_t; +typedef unsigned char cc_t; +typedef long speed_t; /* XXX should be unsigned long */ + +struct termios { + tcflag_t c_iflag; /* input flags */ + tcflag_t c_oflag; /* output flags */ + tcflag_t c_cflag; /* control flags */ + tcflag_t c_lflag; /* local flags */ + cc_t c_cc[NCCS]; /* control chars */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* + * Commands passed to tcsetattr() for setting the termios structure. + */ +#define TCSANOW 0 /* make change immediate */ +#define TCSADRAIN 1 /* drain output, then change */ +#define TCSAFLUSH 2 /* drain output, flush input */ +#ifndef _POSIX_SOURCE +#define TCSASOFT 0x10 /* flag - don't alter h.w. state */ +#endif + +/* + * Standard speeds + */ +#define B0 0 +#define B50 50 +#define B75 75 +#define B110 110 +#define B134 134 +#define B150 150 +#define B200 200 +#define B300 300 +#define B600 600 +#define B1200 1200 +#define B1800 1800 +#define B2400 2400 +#define B4800 4800 +#define B9600 9600 +#define B19200 19200 +#define B38400 38400 +#ifndef _POSIX_SOURCE +#define B7200 7200 +#define B14400 14400 +#define B28800 28800 +#define B57600 57600 +#define B76800 76800 +#define B115200 115200 +#define B230400 230400 +#define EXTA 19200 +#define EXTB 38400 +#endif /* !_POSIX_SOURCE */ + +#ifndef KERNEL + +#define TCIFLUSH 1 +#define TCOFLUSH 2 +#define TCIOFLUSH 3 +#define TCOOFF 1 +#define TCOON 2 +#define TCIOFF 3 +#define TCION 4 + +#include + +__BEGIN_DECLS +speed_t cfgetispeed __P((const struct termios *)); +speed_t cfgetospeed __P((const struct termios *)); +int cfsetispeed __P((struct termios *, speed_t)); +int cfsetospeed __P((struct termios *, speed_t)); +int tcgetattr __P((int, struct termios *)); +int tcsetattr __P((int, int, const struct termios *)); +int tcdrain __P((int)); +int tcflow __P((int, int)); +int tcflush __P((int, int)); +int tcsendbreak __P((int, int)); + +#ifndef _POSIX_SOURCE +void cfmakeraw __P((struct termios *)); +int cfsetspeed __P((struct termios *, speed_t)); +#endif /* !_POSIX_SOURCE */ +__END_DECLS + +#endif /* !KERNEL */ + +#ifndef _POSIX_SOURCE + +/* + * Include tty ioctl's that aren't just for backwards compatibility + * with the old tty driver. These ioctl definitions were previously + * in . + */ +#include +#endif + +/* + * END OF PROTECTED INCLUDE. + */ +#endif /* !_SYS_TERMIOS_H_ */ + +#ifndef _POSIX_SOURCE +#include +#endif diff --git a/bsd/sys/time.h b/bsd/sys/time.h new file mode 100644 index 000000000..a3c667ea0 --- /dev/null +++ b/bsd/sys/time.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)time.h 8.2 (Berkeley) 7/10/94 + */ + +#ifndef _SYS_TIME_H_ +#define _SYS_TIME_H_ + +#include + +#define getmicrouptime(a) microtime(a) +#define getmicrotime(a) microtime(a) +/* + * Structure returned by gettimeofday(2) system call, + * and used in other calls. + */ +struct timeval { + int32_t tv_sec; /* seconds */ + int32_t tv_usec; /* and microseconds */ +}; + +/* + * Structure defined by POSIX.4 to be like a timeval. + */ +struct timespec { + time_t tv_sec; /* seconds */ + int32_t tv_nsec; /* and nanoseconds */ +}; + +#define TIMEVAL_TO_TIMESPEC(tv, ts) { \ + (ts)->tv_sec = (tv)->tv_sec; \ + (ts)->tv_nsec = (tv)->tv_usec * 1000; \ +} +#define TIMESPEC_TO_TIMEVAL(tv, ts) { \ + (tv)->tv_sec = (ts)->tv_sec; \ + (tv)->tv_usec = (ts)->tv_nsec / 1000; \ +} + +struct timezone { + int tz_minuteswest; /* minutes west of Greenwich */ + int tz_dsttime; /* type of dst correction */ +}; +#define DST_NONE 0 /* not on dst */ +#define DST_USA 1 /* USA style dst */ +#define DST_AUST 2 /* Australian style dst */ +#define DST_WET 3 /* Western European dst */ +#define DST_MET 4 /* Middle European dst */ +#define DST_EET 5 /* Eastern European dst */ +#define DST_CAN 6 /* Canada */ + +#define time_second time.tv_sec + +/* Operations on timevals. */ +#define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 +#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) +#define timercmp(tvp, uvp, cmp) \ + (((tvp)->tv_sec == (uvp)->tv_sec) ? \ + ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ + ((tvp)->tv_sec cmp (uvp)->tv_sec)) +#define timeradd(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ + if ((vvp)->tv_usec >= 1000000) { \ + (vvp)->tv_sec++; \ + (vvp)->tv_usec -= 1000000; \ + } \ + } while (0) +#define timersub(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ + if ((vvp)->tv_usec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_usec += 1000000; \ + } \ + } while (0) + +/* + * Names of the interval timers, and structure + * defining a timer setting. + */ +#define ITIMER_REAL 0 +#define ITIMER_VIRTUAL 1 +#define ITIMER_PROF 2 + +struct itimerval { + struct timeval it_interval; /* timer interval */ + struct timeval it_value; /* current value */ +}; + +/* + * Getkerninfo clock information structure + */ +struct clockinfo { + int hz; /* clock frequency */ + int tick; /* micro-seconds per hz tick */ + int tickadj; /* clock skew rate for adjtime() */ + int stathz; /* statistics clock frequency */ + int profhz; /* profiling clock frequency */ +}; + +#include + +#ifdef KERNEL +int itimerfix __P((struct timeval *tv)); +int itimerdecr __P((struct itimerval *itp, int usec)); +void microtime __P((struct timeval *tv)); +#else /* !KERNEL */ +#include + +#ifndef _POSIX_SOURCE +#include + +__BEGIN_DECLS +int adjtime __P((const struct timeval *, struct timeval *)); +int getitimer __P((int, struct itimerval *)); +int gettimeofday __P((struct timeval *, struct timezone *)); +int setitimer __P((int, const struct itimerval *, struct itimerval *)); +int settimeofday __P((const struct timeval *, const struct timezone *)); +int utimes __P((const char *, const struct timeval *)); +__END_DECLS +#endif /* !POSIX */ + +#endif /* !KERNEL */ + +#endif /* !_SYS_TIME_H_ */ diff --git a/bsd/sys/timeb.h b/bsd/sys/timeb.h new file mode 100644 index 000000000..4dd452ddf --- /dev/null +++ b/bsd/sys/timeb.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)timeb.h 8.2 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_TIMEB_H_ +#define _SYS_TIMEB_H_ + +/* The ftime(2) system call structure -- deprecated. */ +struct timeb { + time_t time; /* seconds since the Epoch */ + unsigned short millitm; /* + milliseconds since the Epoch */ + short timezone; /* minutes west of CUT */ + short dstflag; /* DST == non-zero */ +}; + +#endif /* !_SYS_TIMEB_H_ */ diff --git a/bsd/sys/times.h b/bsd/sys/times.h new file mode 100644 index 000000000..01d0a3734 --- /dev/null +++ b/bsd/sys/times.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)times.h 8.4 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_TIMES_H_ +#define _SYS_TIMES_H_ + +#include + +#ifndef _BSD_CLOCK_T_DEFINED_ +#define _BSD_CLOCK_T_DEFINED_ +typedef _BSD_CLOCK_T_ clock_t; +#endif + +struct tms { + clock_t tms_utime; /* User CPU time */ + clock_t tms_stime; /* System CPU time */ + clock_t tms_cutime; /* User CPU time of terminated child procs */ + clock_t tms_cstime; /* System CPU time of terminated child procs */ +}; + +#ifndef KERNEL +#include + +__BEGIN_DECLS +clock_t times __P((struct tms *)); +__END_DECLS +#endif +#endif /* !_SYS_TIMES_H_ */ diff --git a/bsd/sys/tprintf.h b/bsd/sys/tprintf.h new file mode 100644 index 000000000..907230180 --- /dev/null +++ b/bsd/sys/tprintf.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tprintf.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_TPRINTF_H_ +#define _SYS_TPRINTF_H_ + +typedef struct session *tpr_t; + +tpr_t tprintf_open __P((struct proc *)); +void tprintf_close __P((tpr_t)); + +void tprintf __P((tpr_t, const char *fmt, ...)); +#endif /* !_SYS_TPRINTF_H_ */ diff --git a/bsd/sys/trace.h b/bsd/sys/trace.h new file mode 100644 index 000000000..49e230ffd --- /dev/null +++ b/bsd/sys/trace.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)trace.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_TRACE_H_ +#define _SYS_TRACE_H_ + +/* + * File system buffer tracing points; all trace + */ +#define TR_BREADHIT 0 /* buffer read found in cache */ +#define TR_BREADMISS 1 /* buffer read not in cache */ +#define TR_BWRITE 2 /* buffer written */ +#define TR_BREADHITRA 3 /* buffer read-ahead found in cache */ +#define TR_BREADMISSRA 4 /* buffer read-ahead not in cache */ +#define TR_XFODMISS 5 /* exe fod read */ +#define TR_XFODHIT 6 /* exe fod read */ +#define TR_BRELSE 7 /* brelse */ +#define TR_BREALLOC 8 /* expand/contract a buffer */ + +/* + * Memory allocator trace points; all trace the amount of memory involved + */ +#define TR_MALL 10 /* memory allocated */ + +/* + * Paging trace points: all are + */ +#define TR_INTRANS 20 /* page intransit block */ +#define TR_EINTRANS 21 /* page intransit wait done */ +#define TR_FRECLAIM 22 /* reclaim from free list */ +#define TR_RECLAIM 23 /* reclaim from loop */ +#define TR_XSFREC 24 /* reclaim from free list instead of drum */ +#define TR_XIFREC 25 /* reclaim from free list instead of fsys */ +#define TR_WAITMEM 26 /* wait for memory in pagein */ +#define TR_EWAITMEM 27 /* end memory wait in pagein */ +#define TR_ZFOD 28 /* zfod page fault */ +#define TR_EXFOD 29 /* exec fod page fault */ +#define TR_VRFOD 30 /* vread fod page fault */ +#define TR_CACHEFOD 31 /* fod in file system cache */ +#define TR_SWAPIN 32 /* drum page fault */ +#define TR_PGINDONE 33 /* page in done */ +#define TR_SWAPIO 34 /* swap i/o request arrives */ + +/* + * System call trace points. + */ +#define TR_VADVISE 40 /* vadvise occurred with */ + +/* + * Miscellaneous + */ +#define TR_STAMP 45 /* user said vtrace(VTR_STAMP, value); */ + +/* + * This defines the size of the trace flags array. + */ +#define TR_NFLAGS 100 /* generous */ + +#define TRCSIZ 4096 + +/* + * Specifications of the vtrace() system call, which takes one argument. + */ +#define VTRACE 64+51 + +#define VTR_DISABLE 0 /* set a trace flag to 0 */ +#define VTR_ENABLE 1 /* set a trace flag to 1 */ +#define VTR_VALUE 2 /* return value of a trace flag */ +#define VTR_UALARM 3 /* set alarm to go off (sig 16) */ + /* in specified number of hz */ +#define VTR_STAMP 4 /* user specified stamp */ + +#ifdef KERNEL +#if TRACE +extern struct proc *traceproc; +extern int tracewhich, tracebuf[TRCSIZ]; +extern u_int tracex; +extern char traceflags[TR_NFLAGS]; +#define pack(v,b) (((v)->v_mount->mnt_stat.f_fsid.val[0])<<16)|(b) +#define trace(a,b,c) { \ + if (traceflags[a]) \ + trace1(a,b,c); \ +} +#else +#define trace(a,b,c) +#endif +#endif /* KERNEL */ +#endif /* !_SYS_TRACE_H_ */ + diff --git a/bsd/sys/tty.h b/bsd/sys/tty.h new file mode 100644 index 000000000..277f50849 --- /dev/null +++ b/bsd/sys/tty.h @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)tty.h 8.6 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_TTY_H_ +#define _SYS_TTY_H_ + +#include + +#include +#include /* For struct selinfo. */ + +#ifndef __APPLE__ +/* + * Clists are character lists, which is a variable length linked list + * of cblocks, with a count of the number of characters in the list. + */ +struct clist { + int c_cc; /* Number of characters in the clist. */ + int c_cbcount; /* Number of cblocks. */ + int c_cbmax; /* Max # cblocks allowed for this clist. */ + int c_cbreserved; /* # cblocks reserved for this clist. */ + char *c_cf; /* Pointer to the first cblock. */ + char *c_cl; /* Pointer to the last cblock. */ +}; +#else /* __APPLE__ */ +/* + * NetBSD Clists are actually ring buffers. The c_cc, c_cf, c_cl fields have + * exactly the same behaviour as in true clists. + * if c_cq is NULL, the ring buffer has no TTY_QUOTE functionality + * (but, saves memory and cpu time) + * + * *DON'T* play with c_cs, c_ce, c_cq, or c_cl outside tty_subr.c!!! + */ +struct clist { + int c_cc; /* count of characters in queue */ + int c_cn; /* total ring buffer length */ + u_char *c_cf; /* points to first character */ + u_char *c_cl; /* points to next open character */ + u_char *c_cs; /* start of ring buffer */ + u_char *c_ce; /* c_ce + c_len */ + u_char *c_cq; /* N bits/bytes long, see tty_subr.c */ +}; + +#ifndef TTYCLSIZE +#define TTYCLSIZE 1024 +#endif + +#endif /* __APPLE__ */ + +/* + * Per-tty structure. + * + * Should be split in two, into device and tty drivers. + * Glue could be masks of what to echo and circular buffer + * (low, high, timeout). + */ +struct tty { + struct clist t_rawq; /* Device raw input queue. */ + long t_rawcc; /* Raw input queue statistics. */ + struct clist t_canq; /* Device canonical queue. */ + long t_cancc; /* Canonical queue statistics. */ + struct clist t_outq; /* Device output queue. */ + long t_outcc; /* Output queue statistics. */ + int t_line; /* Interface to device drivers. */ + dev_t t_dev; /* Device. */ + int t_state; /* Device and driver (TS*) state. */ + int t_flags; /* Tty flags. */ + int t_timeout; /* Timeout for ttywait() */ + struct pgrp *t_pgrp; /* Foreground process group. */ + struct session *t_session; /* Enclosing session. */ + struct selinfo t_rsel; /* Tty read/oob select. */ + struct selinfo t_wsel; /* Tty write select. */ + struct termios t_termios; /* Termios state. */ + struct winsize t_winsize; /* Window size. */ + /* Start output. */ + void (*t_oproc) __P((struct tty *)); + /* Stop output. */ + void (*t_stop) __P((struct tty *, int)); + /* Set hardware state. */ + int (*t_param) __P((struct tty *, struct termios *)); + void *t_sc; /* XXX: net/if_sl.c:sl_softc. */ + int t_column; /* Tty output column. */ + int t_rocount, t_rocol; /* Tty. */ + int t_hiwat; /* High water mark. */ + int t_lowat; /* Low water mark. */ + int t_gen; /* Generation number. */ +}; + +#define t_cc t_termios.c_cc +#define t_cflag t_termios.c_cflag +#define t_iflag t_termios.c_iflag +#define t_ispeed t_termios.c_ispeed +#define t_lflag t_termios.c_lflag +#define t_min t_termios.c_min +#define t_oflag t_termios.c_oflag +#define t_ospeed t_termios.c_ospeed +#define t_time t_termios.c_time + +#define TTIPRI 25 /* Sleep priority for tty reads. */ +#define TTOPRI 26 /* Sleep priority for tty writes. */ + +/* + * User data unfortunately has to be copied through buffers on the way to + * and from clists. The buffers are on the stack so their sizes must be + * fairly small. + */ +#define IBUFSIZ 384 /* Should be >= max value of MIN. */ +#define OBUFSIZ 100 + +#ifndef TTYHOG +#define TTYHOG 1024 +#endif + +#ifdef KERNEL +#define TTMAXHIWAT roundup(2048, CBSIZE) +#define TTMINHIWAT roundup(100, CBSIZE) +#define TTMAXLOWAT 256 +#define TTMINLOWAT 32 +#endif /* KERNEL */ + +/* These flags are kept in t_state. */ +#define TS_SO_OLOWAT 0x00001 /* Wake up when output <= low water. */ +#define TS_ASYNC 0x00002 /* Tty in async I/O mode. */ +#define TS_BUSY 0x00004 /* Draining output. */ +#define TS_CARR_ON 0x00008 /* Carrier is present. */ +#define TS_FLUSH 0x00010 /* Outq has been flushed during DMA. */ +#define TS_ISOPEN 0x00020 /* Open has completed. */ +#define TS_TBLOCK 0x00040 /* Further input blocked. */ +#define TS_TIMEOUT 0x00080 /* Wait for output char processing. */ +#define TS_TTSTOP 0x00100 /* Output paused. */ +#ifdef notyet +#define TS_WOPEN 0x00200 /* Open in progress. */ +#endif +#define TS_XCLUDE 0x00400 /* Tty requires exclusivity. */ + +/* State for intra-line fancy editing work. */ +#define TS_BKSL 0x00800 /* State for lowercase \ work. */ +#define TS_CNTTB 0x01000 /* Counting tab width, ignore FLUSHO. */ +#define TS_ERASE 0x02000 /* Within a \.../ for PRTRUB. */ +#define TS_LNCH 0x04000 /* Next character is literal. */ +#define TS_TYPEN 0x08000 /* Retyping suspended input (PENDIN). */ +#define TS_LOCAL (TS_BKSL | TS_CNTTB | TS_ERASE | TS_LNCH | TS_TYPEN) + +/* Extras. */ +#define TS_CAN_BYPASS_L_RINT 0x010000 /* Device in "raw" mode. */ +#define TS_CONNECTED 0x020000 /* Connection open. */ +#define TS_SNOOP 0x040000 /* Device is being snooped on. */ +#define TS_SO_OCOMPLETE 0x080000 /* Wake up when output completes. */ +#define TS_ZOMBIE 0x100000 /* Connection lost. */ + +/* Hardware flow-control-invoked bits. */ +#define TS_CAR_OFLOW 0x200000 /* For MDMBUF (XXX handle in driver). */ +#ifdef notyet +#define TS_CTS_OFLOW 0x400000 /* For CCTS_OFLOW. */ +#define TS_DSR_OFLOW 0x800000 /* For CDSR_OFLOW. */ +#endif + +/* Character type information. */ +#define ORDINARY 0 +#define CONTROL 1 +#define BACKSPACE 2 +#define NEWLINE 3 +#define TAB 4 +#define VTAB 5 +#define RETURN 6 + +struct speedtab { + int sp_speed; /* Speed. */ + int sp_code; /* Code. */ +}; + +/* Modem control commands (driver). */ +#define DMSET 0 +#define DMBIS 1 +#define DMBIC 2 +#define DMGET 3 + +/* Flags on a character passed to ttyinput. */ +#define TTY_CHARMASK 0x000000ff /* Character mask */ +#define TTY_QUOTE 0x00000100 /* Character quoted */ +#define TTY_ERRORMASK 0xff000000 /* Error mask */ +#define TTY_FE 0x01000000 /* Framing error */ +#define TTY_PE 0x02000000 /* Parity error */ +#define TTY_OE 0x04000000 /* Overrun error */ +#define TTY_BI 0x08000000 /* Break condition */ + +/* Is tp controlling terminal for p? */ +#define isctty(p, tp) \ + ((p)->p_session == (tp)->t_session && (p)->p_flag & P_CONTROLT) + +/* Is p in background of tp? */ +#define isbackground(p, tp) \ + (isctty((p), (tp)) && (p)->p_pgrp != (tp)->t_pgrp) + +/* Unique sleep addresses. */ +#define TSA_CARR_ON(tp) ((void *)&(tp)->t_rawq) +#define TSA_HUP_OR_INPUT(tp) ((void *)&(tp)->t_rawq.c_cf) +#define TSA_OCOMPLETE(tp) ((void *)&(tp)->t_outq.c_cl) +#define TSA_OLOWAT(tp) ((void *)&(tp)->t_outq) +#define TSA_PTC_READ(tp) ((void *)&(tp)->t_outq.c_cf) +#define TSA_PTC_WRITE(tp) ((void *)&(tp)->t_rawq.c_cl) +#define TSA_PTS_READ(tp) ((void *)&(tp)->t_canq) + +#ifdef KERNEL +__BEGIN_DECLS + +#ifndef __APPLE__ +extern struct tty *constty; /* Temporary virtual console. */ + +int b_to_q __P((char *cp, int cc, struct clist *q)); +void catq __P((struct clist *from, struct clist *to)); +void clist_alloc_cblocks __P((struct clist *q, int ccmax, int ccres)); +void clist_free_cblocks __P((struct clist *q)); +/* void clist_init __P((void)); */ /* defined in systm.h for main() */ +int getc __P((struct clist *q)); +void ndflush __P((struct clist *q, int cc)); +int ndqb __P((struct clist *q, int flag)); +char *nextc __P((struct clist *q, char *cp, int *c)); +int putc __P((int c, struct clist *q)); +int q_to_b __P((struct clist *q, char *cp, int cc)); +int unputc __P((struct clist *q)); + +int ttcompat __P((struct tty *tp, int com, caddr_t data, int flag)); +int ttsetcompat __P((struct tty *tp, int *com, caddr_t data, struct termios *term)); +#else /* __APPLE__ */ +int b_to_q __P((u_char *cp, int cc, struct clist *q)); +void catq __P((struct clist *from, struct clist *to)); +void clist_init __P((void)); +int getc __P((struct clist *q)); +void ndflush __P((struct clist *q, int cc)); +int ndqb __P((struct clist *q, int flag)); +u_char *firstc __P((struct clist *clp, int *c)); +u_char *nextc __P((struct clist *q, u_char *cp, int *c)); +int putc __P((int c, struct clist *q)); +int q_to_b __P((struct clist *q, u_char *cp, int cc)); +int unputc __P((struct clist *q)); +int clalloc __P((struct clist *clp, int size, int quot)); +void clfree __P((struct clist *clp)); + +#ifdef KERNEL_PRIVATE +int ttcompat __P((struct tty *tp, u_long com, caddr_t data, int flag, + struct proc *p)); +int ttsetcompat __P((struct tty *tp, u_long *com, caddr_t data, struct termios *term)); +#endif /* KERNEL_PRIVATE */ +#endif /* __APPLE__ */ + +void termioschars __P((struct termios *t)); +int tputchar __P((int c, struct tty *tp)); +#ifndef __APPLE__ +int ttioctl __P((struct tty *tp, int com, void *data, int flag)); +#else +int ttioctl __P((struct tty *tp, u_long com, caddr_t data, int flag, + struct proc *p)); +#endif +int ttread __P((struct tty *tp, struct uio *uio, int flag)); +void ttrstrt __P((void *tp)); +int ttyselect __P((struct tty *tp, int rw, struct proc *p)); +int ttselect __P((dev_t dev, int rw, struct proc *p)); +void ttsetwater __P((struct tty *tp)); +int ttspeedtab __P((int speed, struct speedtab *table)); +int ttstart __P((struct tty *tp)); +void ttwakeup __P((struct tty *tp)); +int ttwrite __P((struct tty *tp, struct uio *uio, int flag)); +void ttwwakeup __P((struct tty *tp)); +void ttyblock __P((struct tty *tp)); +void ttychars __P((struct tty *tp)); +int ttycheckoutq __P((struct tty *tp, int wait)); +int ttyclose __P((struct tty *tp)); +void ttyflush __P((struct tty *tp, int rw)); +void ttyinfo __P((struct tty *tp)); +int ttyinput __P((int c, struct tty *tp)); +int ttylclose __P((struct tty *tp, int flag)); +int ttymodem __P((struct tty *tp, int flag)); +int ttyopen __P((dev_t device, struct tty *tp)); +int ttysleep __P((struct tty *tp, + void *chan, int pri, char *wmesg, int timeout)); +int ttywait __P((struct tty *tp)); +struct tty *ttymalloc __P((void)); +void ttyfree __P((struct tty *)); + +__END_DECLS + +#endif /* KERNEL */ + +#endif /* !_SYS_TTY_H_ */ diff --git a/bsd/sys/ttychars.h b/bsd/sys/ttychars.h new file mode 100644 index 000000000..9e7bffd91 --- /dev/null +++ b/bsd/sys/ttychars.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ttychars.h 8.2 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_TTYCHARS_H_ +#define _SYS_TTYCHARS_H_ + +/* + * 4.3 COMPATIBILITY FILE + * + * User visible structures and constants related to terminal handling. + */ + +struct ttychars { + char tc_erase; /* erase last character */ + char tc_kill; /* erase entire line */ + char tc_intrc; /* interrupt */ + char tc_quitc; /* quit */ + char tc_startc; /* start output */ + char tc_stopc; /* stop output */ + char tc_eofc; /* end-of-file */ + char tc_brkc; /* input delimiter (like nl) */ + char tc_suspc; /* stop process signal */ + char tc_dsuspc; /* delayed stop process signal */ + char tc_rprntc; /* reprint line */ + char tc_flushc; /* flush output (toggles) */ + char tc_werasc; /* word erase */ + char tc_lnextc; /* literal next character */ +}; +#ifdef USE_OLD_TTY +#include /* to pick up character defaults */ +#endif +#endif /* !_SYS_TTYCHARS_H_ */ diff --git a/bsd/sys/ttycom.h b/bsd/sys/ttycom.h new file mode 100644 index 000000000..d2a3ef597 --- /dev/null +++ b/bsd/sys/ttycom.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ttycom.h 8.1 (Berkeley) 3/28/94 + */ + +#ifndef _SYS_TTYCOM_H_ +#define _SYS_TTYCOM_H_ + +#include + +/* + * Tty ioctl's except for those supported only for backwards compatibility + * with the old tty driver. + */ + +/* + * Window/terminal size structure. This information is stored by the kernel + * in order to provide a consistent interface, but is not used by the kernel. + */ +struct winsize { + unsigned short ws_row; /* rows, in characters */ + unsigned short ws_col; /* columns, in characters */ + unsigned short ws_xpixel; /* horizontal size, pixels */ + unsigned short ws_ypixel; /* vertical size, pixels */ +}; + +#define TIOCMODG _IOR('t', 3, int) /* get modem control state */ +#define TIOCMODS _IOW('t', 4, int) /* set modem control state */ +#define TIOCM_LE 0001 /* line enable */ +#define TIOCM_DTR 0002 /* data terminal ready */ +#define TIOCM_RTS 0004 /* request to send */ +#define TIOCM_ST 0010 /* secondary transmit */ +#define TIOCM_SR 0020 /* secondary receive */ +#define TIOCM_CTS 0040 /* clear to send */ +#define TIOCM_CAR 0100 /* carrier detect */ +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RNG 0200 /* ring */ +#define TIOCM_RI TIOCM_RNG +#define TIOCM_DSR 0400 /* data set ready */ + /* 8-10 compat */ +#define TIOCEXCL _IO('t', 13) /* set exclusive use of tty */ +#define TIOCNXCL _IO('t', 14) /* reset exclusive use of tty */ + /* 15 unused */ +#define TIOCFLUSH _IOW('t', 16, int) /* flush buffers */ + /* 17-18 compat */ +#define TIOCGETA _IOR('t', 19, struct termios) /* get termios struct */ +#define TIOCSETA _IOW('t', 20, struct termios) /* set termios struct */ +#define TIOCSETAW _IOW('t', 21, struct termios) /* drain output, set */ +#define TIOCSETAF _IOW('t', 22, struct termios) /* drn out, fls in, set */ +#define TIOCGETD _IOR('t', 26, int) /* get line discipline */ +#define TIOCSETD _IOW('t', 27, int) /* set line discipline */ + /* 127-124 compat */ +#define TIOCSBRK _IO('t', 123) /* set break bit */ +#define TIOCCBRK _IO('t', 122) /* clear break bit */ +#define TIOCSDTR _IO('t', 121) /* set data terminal ready */ +#define TIOCCDTR _IO('t', 120) /* clear data terminal ready */ +#define TIOCGPGRP _IOR('t', 119, int) /* get pgrp of tty */ +#define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */ + /* 117-116 compat */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ +#define TIOCSTI _IOW('t', 114, char) /* simulate terminal input */ +#define TIOCNOTTY _IO('t', 113) /* void tty association */ +#define TIOCPKT _IOW('t', 112, int) /* pty: set/clear packet mode */ +#define TIOCPKT_DATA 0x00 /* data packet */ +#define TIOCPKT_FLUSHREAD 0x01 /* flush packet */ +#define TIOCPKT_FLUSHWRITE 0x02 /* flush packet */ +#define TIOCPKT_STOP 0x04 /* stop output */ +#define TIOCPKT_START 0x08 /* start output */ +#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */ +#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */ +#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCMSET _IOW('t', 109, int) /* set all modem bits */ +#define TIOCMBIS _IOW('t', 108, int) /* bis modem bits */ +#define TIOCMBIC _IOW('t', 107, int) /* bic modem bits */ +#define TIOCMGET _IOR('t', 106, int) /* get all modem bits */ +#define TIOCREMOTE _IOW('t', 105, int) /* remote input editing */ +#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */ +#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */ +#define TIOCUCNTL _IOW('t', 102, int) /* pty: set/clr usr cntl mode */ +#define TIOCSTAT _IO('t', 101) /* simulate ^T status message */ +#define UIOCCMD(n) _IO('u', n) /* usr cntl op "n" */ +#define TIOCSCONS _IO('t', 99) /* 4.2 compatibility */ +#define TIOCCONS _IOW('t', 98, int) /* become virtual console */ +#define TIOCSCTTY _IO('t', 97) /* become controlling tty */ +#define TIOCEXT _IOW('t', 96, int) /* pty: external processing */ +#define TIOCSIG _IO('t', 95) /* pty: generate signal */ +#define TIOCDRAIN _IO('t', 94) /* wait till output drained */ +#define TIOCMSDTRWAIT _IOW('t', 91, int) /* modem: set wait on close */ +#define TIOCMGDTRWAIT _IOR('t', 90, int) /* modem: get wait on close */ +#define TIOCTIMESTAMP _IOR('t', 89, struct timeval) /* enable/get timestamp + * of last input event */ +#define TIOCDCDTIMESTAMP _IOR('t', 88, struct timeval) /* enable/get timestamp + * of last DCd rise */ +#define TIOCSDRAINWAIT _IOW('t', 87, int) /* set ttywait timeout */ +#define TIOCGDRAINWAIT _IOR('t', 86, int) /* get ttywait timeout */ +#define TIOCDSIMICROCODE _IO('t', 85) /* download microcode to + * DSI Softmodem */ + +#define TTYDISC 0 /* termios tty line discipline */ +#define TABLDISC 3 /* tablet discipline */ +#define SLIPDISC 4 /* serial IP discipline */ +#define PPPDISC 5 /* PPP discipline */ + +#endif /* !_SYS_TTYCOM_H_ */ diff --git a/bsd/sys/ttydefaults.h b/bsd/sys/ttydefaults.h new file mode 100644 index 000000000..5e9a33e45 --- /dev/null +++ b/bsd/sys/ttydefaults.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ttydefaults.h 8.4 (Berkeley) 1/21/94 + */ + +/* + * System wide defaults for terminal state. + */ +#ifndef _SYS_TTYDEFAULTS_H_ +#define _SYS_TTYDEFAULTS_H_ + +/* + * Defaults on "first" open. + */ +#define TTYDEF_IFLAG (BRKINT | ICRNL | IMAXBEL | IXON | IXANY) +#define TTYDEF_OFLAG (OPOST | ONLCR) +#define TTYDEF_LFLAG (ECHO | ICANON | ISIG | IEXTEN | ECHOE|ECHOKE|ECHOCTL) +#define TTYDEF_CFLAG (CREAD | CS8 | HUPCL) +#define TTYDEF_SPEED (B9600) + +/* + * Control Character Defaults + */ +#define CTRL(x) (x&037) +#define CEOF CTRL('d') +#define CEOL 0xff /* XXX avoid _POSIX_VDISABLE */ +#define CERASE 0177 +#define CINTR CTRL('c') +#define CSTATUS 0xff /* XXX avoid _POSIX_VDISABLE */ +#define CKILL CTRL('u') +#define CMIN 1 +#define CQUIT 034 /* FS, ^\ */ +#define CSUSP CTRL('z') +#define CTIME 0 +#define CDSUSP CTRL('y') +#define CSTART CTRL('q') +#define CSTOP CTRL('s') +#define CLNEXT CTRL('v') +#define CDISCARD CTRL('o') +#define CWERASE CTRL('w') +#define CREPRINT CTRL('r') +#define CEOT CEOF +/* compat */ +#define CBRK CEOL +#define CRPRNT CREPRINT +#define CFLUSH CDISCARD + +/* PROTECTED INCLUSION ENDS HERE */ +#endif /* !_SYS_TTYDEFAULTS_H_ */ + +/* + * #define TTYDEFCHARS to include an array of default control characters. + */ +#ifdef TTYDEFCHARS +static cc_t ttydefchars[NCCS] = { + CEOF, CEOL, CEOL, CERASE, CWERASE, CKILL, CREPRINT, + _POSIX_VDISABLE, CINTR, CQUIT, CSUSP, CDSUSP, CSTART, CSTOP, CLNEXT, + CDISCARD, CMIN, CTIME, CSTATUS, _POSIX_VDISABLE +}; +#undef TTYDEFCHARS +#endif diff --git a/bsd/sys/ttydev.h b/bsd/sys/ttydev.h new file mode 100644 index 000000000..b08a16e8c --- /dev/null +++ b/bsd/sys/ttydev.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ttydev.h 8.2 (Berkeley) 1/4/94 + */ + +/* COMPATIBILITY HEADER FILE */ + +#ifndef _SYS_TTYDEV_H_ +#define _SYS_TTYDEV_H_ + +#ifdef USE_OLD_TTY +#define B0 0 +#define B50 1 +#define B75 2 +#define B110 3 +#define B134 4 +#define B150 5 +#define B200 6 +#define B300 7 +#define B600 8 +#define B1200 9 +#define B1800 10 +#define B2400 11 +#define B4800 12 +#define B9600 13 +#define EXTA 14 +#define EXTB 15 +#define B57600 16 +#define B115200 17 +#endif /* USE_OLD_TTY */ + +#endif /* !_SYS_TTYDEV_H_ */ diff --git a/bsd/sys/types.h b/bsd/sys/types.h new file mode 100644 index 000000000..dd19de0ec --- /dev/null +++ b/bsd/sys/types.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)types.h 8.4 (Berkeley) 1/21/94 + */ + +#ifndef _SYS_TYPES_H_ +#define _SYS_TYPES_H_ + +#ifndef __ASSEMBLER__ +#include + +/* Machine type dependent parameters. */ +#include + +#include +#include + +#ifndef _POSIX_SOURCE +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef unsigned int u_int; +typedef unsigned long u_long; +typedef unsigned short ushort; /* Sys V compatibility */ +typedef unsigned int uint; /* Sys V compatibility */ +#endif + +typedef u_int64_t u_quad_t; /* quads */ +typedef int64_t quad_t; +typedef quad_t * qaddr_t; + +typedef char * caddr_t; /* core address */ +typedef int32_t daddr_t; /* disk address */ +typedef int32_t dev_t; /* device number */ +typedef u_int32_t fixpt_t; /* fixed point number */ +typedef u_int32_t gid_t; /* group id */ +typedef u_int32_t ino_t; /* inode number */ +typedef long key_t; /* IPC key (for Sys V IPC) */ +typedef u_int16_t mode_t; /* permissions */ +typedef u_int16_t nlink_t; /* link count */ +typedef quad_t off_t; /* file offset */ +typedef int32_t pid_t; /* process id */ +typedef quad_t rlim_t; /* resource limit */ +typedef int32_t segsz_t; /* segment size */ +typedef int32_t swblk_t; /* swap offset */ +typedef u_int32_t uid_t; /* user id */ + + +#ifndef _POSIX_SOURCE +/* Major, minor numbers, dev_t's. */ +#define major(x) ((int32_t)(((u_int32_t)(x) >> 24) & 0xff)) +#define minor(x) ((int32_t)((x) & 0xffffff)) +#define makedev(x,y) ((dev_t)(((x) << 24) | (y))) +#endif + +#ifndef _BSD_CLOCK_T_DEFINED_ +#define _BSD_CLOCK_T_DEFINED_ +typedef _BSD_CLOCK_T_ clock_t; +#endif + +#ifndef _BSD_SIZE_T_DEFINED_ +#define _BSD_SIZE_T_DEFINED_ +typedef _BSD_SIZE_T_ size_t; +#endif + +#ifndef _BSD_SSIZE_T_DEFINED_ +#define _BSD_SSIZE_T_DEFINED_ +typedef _BSD_SSIZE_T_ ssize_t; +#endif + +#ifndef _BSD_TIME_T_DEFINED_ +#define _BSD_TIME_T_DEFINED_ +typedef _BSD_TIME_T_ time_t; +#endif + +#ifndef _POSIX_SOURCE +#define NBBY 8 /* number of bits in a byte */ + +/* + * Select uses bit masks of file descriptors in longs. These macros + * manipulate such bit fields (the filesystem macros use chars). + */ +#ifndef FD_SETSIZE +#define FD_SETSIZE 256 +#endif + +typedef int32_t fd_mask; +#define NFDBITS (sizeof(fd_mask) * NBBY) /* bits per mask */ + +#ifndef howmany +#define howmany(x, y) (((x) + ((y) - 1)) / (y)) +#endif + +typedef struct fd_set { + fd_mask fds_bits[howmany(FD_SETSIZE, NFDBITS)]; +} fd_set; + +#define FD_SET(n, p) ((p)->fds_bits[(n)/NFDBITS] |= (1 << ((n) % NFDBITS))) +#define FD_CLR(n, p) ((p)->fds_bits[(n)/NFDBITS] &= ~(1 << ((n) % NFDBITS))) +#define FD_ISSET(n, p) ((p)->fds_bits[(n)/NFDBITS] & (1 << ((n) % NFDBITS))) +#define FD_COPY(f, t) bcopy(f, t, sizeof(*(f))) +#define FD_ZERO(p) bzero(p, sizeof(*(p))) + +#if defined(__STDC__) && defined(KERNEL) +/* + * Forward structure declarations for function prototypes. We include the + * common structures that cross subsystem boundaries here; others are mostly + * used in the same place that the structure is defined. + */ +struct proc; +struct pgrp; +struct ucred; +struct rusage; +struct file; +struct buf; +struct tty; +struct uio; +#endif + +#endif /* !_POSIX_SOURCE */ +#endif /* __ASSEMBLER__ */ + +struct _pthread_handler_rec +{ + void (*routine)(void *); /* Routine to call */ + void *arg; /* Argument to pass */ + struct _pthread_handler_rec *next; +}; + +#ifndef __POSIX_LIB__ + +#define __PTHREAD_SIZE__ 596 +#define __PTHREAD_ATTR_SIZE__ 36 +#define __PTHREAD_MUTEXATTR_SIZE__ 8 +#define __PTHREAD_MUTEX_SIZE__ 40 +#define __PTHREAD_CONDATTR_SIZE__ 4 +#define __PTHREAD_COND_SIZE__ 24 +#define __PTHREAD_ONCE_SIZE__ 4 + + +typedef struct _opaque_pthread_t { long sig; struct _pthread_handler_rec *cleanup_stack; char opaque[__PTHREAD_SIZE__];} *pthread_t; + +typedef struct _opaque_pthread_attr_t { long sig; char opaque[__PTHREAD_ATTR_SIZE__]; } pthread_attr_t; + +typedef struct _opaque_pthread_mutexattr_t { long sig; char opaque[__PTHREAD_MUTEXATTR_SIZE__]; } pthread_mutexattr_t; + +typedef struct _opaque_pthread_mutex_t { long sig; char opaque[__PTHREAD_MUTEX_SIZE__]; } pthread_mutex_t; + +typedef struct _opaque_pthread_condattr_t { long sig; char opaque[__PTHREAD_CONDATTR_SIZE__]; } pthread_condattr_t; + +typedef struct _opaque_pthread_cond_t { long sig; char opaque[__PTHREAD_COND_SIZE__]; } pthread_cond_t; + +typedef struct { long sig; char opaque[__PTHREAD_ONCE_SIZE__]; } pthread_once_t; + +#endif /* __POSIX_LIB__ */ + +typedef unsigned long pthread_key_t; /* Opaque 'pointer' */ + +#endif /* !_SYS_TYPES_H_ */ diff --git a/bsd/sys/ubc.h b/bsd/sys/ubc.h new file mode 100644 index 000000000..0b647d1db --- /dev/null +++ b/bsd/sys/ubc.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 1999, 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: ubc.h + * Author: Umesh Vaishampayan [umeshv@apple.com] + * 05-Aug-1999 umeshv Created. + * + * Header file for Unified Buffer Cache. + * + */ + +#ifndef _SYS_UBC_H_ +#define _SYS_UBC_H_ + +#include +#include +#include + +#include + +#include + +#define UBC_INFO_NULL ((struct ubc_info *) 0) +#define UBC_NOINFO ((struct ubc_info *)0xDEADD1ED) + +extern struct zone *ubc_info_zone; + +/* + * The following data structure keeps the information to associate + * a vnode to the correspondig VM objects. + */ + +struct ubc_info { + void * ui_pager; /* pager */ + void *ui_object; /* VM object corresponding to the pager */ + long ui_flags; /* flags */ + struct vnode *ui_vnode; /* The vnode associated with this ubc_info */ + struct ucred *ui_ucred; /* holds credentials for NFS paging */ + int ui_holdcnt; /* hold the memory object */ + off_t ui_size; /* file size for the vnode */ + long ui_mapped; /* is it currently mapped */ +}; + +/* Defines for ui_flags */ +#define UI_NONE 0x00000000 /* none */ +#define UI_HASPAGER 0x00000001 /* has a pager associated */ +#define UI_INITED 0x00000002 /* newly initialized vnode */ +#define UI_HASOBJREF 0x00000004 /* hold a reference on object */ +#define UI_WASMAPPED 0x00000008 /* vnode was mapped */ +#define UI_DONTCACHE 0x00000010 /* do not cache object */ + +/* + * exported primitives for loadable file systems. + */ + +__BEGIN_DECLS +int ubc_info_init __P((struct vnode *)); +void ubc_info_free __P((struct vnode *)); +int ubc_setsize __P((struct vnode *, off_t)); +off_t ubc_getsize __P((struct vnode *)); +int ubc_uncache __P((struct vnode *)); +int ubc_umount __P((struct mount *)); +void ubc_unmountall __P(()); +int ubc_setcred __P((struct vnode *, struct proc *)); +struct ucred *ubc_getcred __P((struct vnode *)); +void *ubc_getpager __P((struct vnode *)); +void *ubc_getobject __P((struct vnode *, int)); +int ubc_setpager __P((struct vnode *, void *)); +int ubc_setflags __P((struct vnode *, int)); +int ubc_clearflags __P((struct vnode *, int)); +int ubc_issetflags __P((struct vnode *, int)); +off_t ubc_blktooff __P((struct vnode *, daddr_t)); +daddr_t ubc_offtoblk __P((struct vnode *, off_t)); +int ubc_clean __P((struct vnode *, int)); +int ubc_pushdirty __P((struct vnode *)); +int ubc_hold __P((struct vnode *)); +void ubc_rele __P((struct vnode *)); +void ubc_map __P((struct vnode *)); +int ubc_release __P((struct vnode *)); +int ubc_invalidate __P((struct vnode *, off_t, size_t)); +int ubc_isinuse __P((struct vnode *, int)); + +/* cluster IO routines */ +int cluster_read __P((struct vnode *, struct uio *, off_t, int, int)); +int advisory_read __P((struct vnode *, off_t, off_t, int, int)); +int cluster_write __P((struct vnode *, struct uio*, off_t, off_t, + off_t, off_t, int, int)); +int cluster_push __P((struct vnode *)); +int cluster_pageout __P((struct vnode *, upl_t, vm_offset_t, off_t, int, + off_t, int, int)); +int cluster_pagein __P((struct vnode *, upl_t, vm_offset_t, off_t, int, + off_t, int, int)); +int cluster_bp __P((struct buf *)); +__END_DECLS + +#define UBCINFOMISSING(vp) \ + ((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_INFO_NULL)) + +#define UBCINFORECLAIMED(vp) \ + ((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo == UBC_NOINFO)) + +#define UBCINFOEXISTS(vp) \ + ((vp) && ((vp)->v_type == VREG) && \ + ((vp)->v_ubcinfo) && ((vp)->v_ubcinfo != UBC_NOINFO)) + +#define UBCISVALID(vp) \ + ((vp) && ((vp)->v_type == VREG) && !((vp)->v_flag & VSYSTEM)) + +#define UBCINVALID(vp) \ + (((vp) == NULL) || ((vp) && ((vp)->v_type != VREG)) \ + || ((vp) && ((vp)->v_flag & VSYSTEM))) + +#define UBCINFOCHECK(fun, vp) \ + if ((vp) && ((vp)->v_type == VREG) && \ + (((vp)->v_ubcinfo == UBC_INFO_NULL) \ + || ((vp)->v_ubcinfo == UBC_NOINFO))) \ + panic("%s: lost ubc_info", (fun)); + +/* Flags for ubc_getobject() */ +#define UBC_HOLDOBJECT 0x0001 +#define UBC_NOREACTIVATE 0x0002 +#define UBC_PAGINGOP 0x0004 + + +#endif /* _SYS_UBC_H_ */ + diff --git a/bsd/sys/ucred.h b/bsd/sys/ucred.h new file mode 100644 index 000000000..751d9151b --- /dev/null +++ b/bsd/sys/ucred.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ucred.h 8.4 (Berkeley) 1/9/95 + */ + +#ifndef _SYS_UCRED_H_ +#define _SYS_UCRED_H_ + +#include + +/* + * Credentials. + */ +struct ucred { + u_long cr_ref; /* reference count */ + uid_t cr_uid; /* effective user id */ + short cr_ngroups; /* number of groups */ + gid_t cr_groups[NGROUPS]; /* groups */ +}; +#define cr_gid cr_groups[0] +#define NOCRED ((struct ucred *)0) /* no credential available */ +#define FSCRED ((struct ucred *)-1) /* filesystem credential */ + +#ifdef KERNEL +#define crhold(cr) \ +{ \ + if (++(cr)->cr_ref == 0) \ + panic("crhold"); \ +} + +struct ucred *crcopy __P((struct ucred *cr)); +struct ucred *crdup __P((struct ucred *cr)); +void crfree __P((struct ucred *cr)); +struct ucred *crget __P((void)); +int suser __P((struct ucred *cred, u_short *acflag)); +#endif /* KERNEL */ + +#endif /* !_SYS_UCRED_H_ */ diff --git a/bsd/sys/uio.h b/bsd/sys/uio.h new file mode 100644 index 000000000..c9bd96de6 --- /dev/null +++ b/bsd/sys/uio.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)uio.h 8.5 (Berkeley) 2/22/94 + */ + +#ifndef _SYS_UIO_H_ +#define _SYS_UIO_H_ + +/* + * XXX + * iov_base should be a void *. + */ +struct iovec { + char *iov_base; /* Base address. */ + size_t iov_len; /* Length. */ +}; + +enum uio_rw { UIO_READ, UIO_WRITE }; + +/* Segment flag values. */ +enum uio_seg { + UIO_USERSPACE, /* from user data space */ + UIO_USERISPACE, /* from user I space */ + UIO_SYSSPACE, /* from system space */ + UIO_PHYS_USERSPACE /* kernel address is physical, to/from user data space */ +}; + +#ifdef KERNEL +struct uio { + struct iovec *uio_iov; + int uio_iovcnt; + off_t uio_offset; + int uio_resid; + enum uio_seg uio_segflg; + enum uio_rw uio_rw; + struct proc *uio_procp; +}; + +/* + * Limits + */ +#define UIO_MAXIOV 1024 /* max 1K of iov's */ +#define UIO_SMALLIOV 8 /* 8 on stack, else malloc */ + +extern int uiomove __P((caddr_t cp, int n, struct uio *uio)); +extern int ureadc __P((int c, struct uio *uio)); +extern int uwritec __P((struct uio *uio)); + +#endif /* KERNEL */ + +#ifndef KERNEL +#include + +__BEGIN_DECLS +ssize_t readv __P((int, const struct iovec *, int)); +ssize_t writev __P((int, const struct iovec *, int)); +__END_DECLS +#endif /* !KERNEL */ +#endif /* !_SYS_UIO_H_ */ diff --git a/bsd/sys/un.h b/bsd/sys/un.h new file mode 100644 index 000000000..5cb13ac75 --- /dev/null +++ b/bsd/sys/un.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)un.h 8.3 (Berkeley) 2/19/95 + */ + +#ifndef _SYS_UN_H_ +#define _SYS_UN_H_ + +/* + * Definitions for UNIX IPC domain. + */ +struct sockaddr_un { + u_char sun_len; /* sockaddr len including null */ + u_char sun_family; /* AF_UNIX */ + char sun_path[104]; /* path name (gag) */ +}; + +#ifdef KERNEL +struct mbuf; +struct socket; + +int uipc_usrreq __P((struct socket *so, int req, struct mbuf *m, + struct mbuf *nam, struct mbuf *control)); +int unp_connect2 __P((struct socket *so, struct socket *so2)); +void unp_dispose __P((struct mbuf *m)); +int unp_externalize __P((struct mbuf *rights)); +void unp_init __P((void)); +extern struct pr_usrreqs uipc_usrreqs; +#else /* !KERNEL */ + +/* actual length of an initialized sockaddr_un */ +#define SUN_LEN(su) \ + (sizeof(*(su)) - sizeof((su)->sun_path) + strlen((su)->sun_path)) + +#endif /* KERNEL */ + +#endif /* !_SYS_UN_H_ */ diff --git a/bsd/sys/unistd.h b/bsd/sys/unistd.h new file mode 100644 index 000000000..252212434 --- /dev/null +++ b/bsd/sys/unistd.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)unistd.h 8.2 (Berkeley) 1/7/94 + */ + +#ifndef _SYS_UNISTD_H_ +#define _SYS_UNISTD_H_ + +/* compile-time symbolic constants */ +#define _POSIX_JOB_CONTROL /* implementation supports job control */ + +/* + * Although we have saved user/group IDs, we do not use them in setuid + * as described in POSIX 1003.1, because the feature does not work for + * root. We use the saved IDs in seteuid/setegid, which are not currently + * part of the POSIX 1003.1 specification. + */ +#ifdef _NOT_AVAILABLE +#define _POSIX_SAVED_IDS /* saved set-user-ID and set-group-ID */ +#endif + +#define _POSIX_VERSION 198808L +#define _POSIX2_VERSION 199212L + +/* execution-time symbolic constants */ + /* chown requires appropriate privileges */ +#define _POSIX_CHOWN_RESTRICTED 1 + /* too-long path components generate errors */ +#define _POSIX_NO_TRUNC 1 + /* may disable terminal special characters */ +#ifndef _POSIX_VDISABLE +#define _POSIX_VDISABLE ((unsigned char)'\377') +#endif + +#define _POSIX_THREADS +#define _POSIX_THREAD_ATTR_STACKADDR +#define _POSIX_THREAD_ATTR_STACKSIZE +#define _POSIX_THREAD_PRIORITY_SCHEDULING +#define _POSIX_THREAD_PRIO_INHERIT +#define _POSIX_THREAD_PRIO_PROTECT + +#define _POSIX_THREAD_KEYS_MAX 128 + +/* access function */ +#define F_OK 0 /* test for existence of file */ +#define X_OK 0x01 /* test for execute or search permission */ +#define W_OK 0x02 /* test for write permission */ +#define R_OK 0x04 /* test for read permission */ + +/* whence values for lseek(2) */ +#define SEEK_SET 0 /* set file offset to offset */ +#define SEEK_CUR 1 /* set file offset to current plus offset */ +#define SEEK_END 2 /* set file offset to EOF plus offset */ + +#ifndef _POSIX_SOURCE +/* whence values for lseek(2); renamed by POSIX 1003.1 */ +#define L_SET SEEK_SET +#define L_INCR SEEK_CUR +#define L_XTND SEEK_END +#endif + +/* configurable pathname variables */ +#define _PC_LINK_MAX 1 +#define _PC_MAX_CANON 2 +#define _PC_MAX_INPUT 3 +#define _PC_NAME_MAX 4 +#define _PC_PATH_MAX 5 +#define _PC_PIPE_BUF 6 +#define _PC_CHOWN_RESTRICTED 7 +#define _PC_NO_TRUNC 8 +#define _PC_VDISABLE 9 + +#ifndef _POSIX_SOURCE +#define _PC_NAME_CHARS_MAX 10 +#define _PC_CASE_SENSITIVE 11 +#define _PC_CASE_PRESERVING 12 +#endif + +/* configurable system variables */ +#define _SC_ARG_MAX 1 +#define _SC_CHILD_MAX 2 +#define _SC_CLK_TCK 3 +#define _SC_NGROUPS_MAX 4 +#define _SC_OPEN_MAX 5 +#define _SC_JOB_CONTROL 6 +#define _SC_SAVED_IDS 7 +#define _SC_VERSION 8 +#define _SC_BC_BASE_MAX 9 +#define _SC_BC_DIM_MAX 10 +#define _SC_BC_SCALE_MAX 11 +#define _SC_BC_STRING_MAX 12 +#define _SC_COLL_WEIGHTS_MAX 13 +#define _SC_EXPR_NEST_MAX 14 +#define _SC_LINE_MAX 15 +#define _SC_RE_DUP_MAX 16 +#define _SC_2_VERSION 17 +#define _SC_2_C_BIND 18 +#define _SC_2_C_DEV 19 +#define _SC_2_CHAR_TERM 20 +#define _SC_2_FORT_DEV 21 +#define _SC_2_FORT_RUN 22 +#define _SC_2_LOCALEDEF 23 +#define _SC_2_SW_DEV 24 +#define _SC_2_UPE 25 +#define _SC_STREAM_MAX 26 +#define _SC_TZNAME_MAX 27 + +/* configurable system strings */ +#define _CS_PATH 1 + +#endif /* !_SYS_UNISTD_H_ */ diff --git a/bsd/sys/unpcb.h b/bsd/sys/unpcb.h new file mode 100644 index 000000000..bd22c6bee --- /dev/null +++ b/bsd/sys/unpcb.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)unpcb.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_UNPCB_H_ +#define _SYS_UNPCB_H_ + +#include +#include + +/* + * Protocol control block for an active + * instance of a UNIX internal protocol. + * + * A socket may be associated with an vnode in the + * file system. If so, the unp_vnode pointer holds + * a reference count to this vnode, which should be irele'd + * when the socket goes away. + * + * A socket may be connected to another socket, in which + * case the control block of the socket to which it is connected + * is given by unp_conn. + * + * A socket may be referenced by a number of sockets (e.g. several + * sockets may be connected to a datagram socket.) These sockets + * are in a linked list starting with unp_refs, linked through + * unp_nextref and null-terminated. Note that a socket may be referenced + * by a number of other sockets and may also reference a socket (not + * necessarily one which is referencing it). This generates + * the need for unp_refs and unp_nextref to be separate fields. + * + * Stream sockets keep copies of receive sockbuf sb_cc and sb_mbcnt + * so that changes in the sockbuf may be computed to modify + * back pressure on the sender accordingly. + */ +typedef u_quad_t unp_gen_t; +LIST_HEAD(unp_head, unpcb); + +struct unpcb { + LIST_ENTRY(unpcb) unp_link; /* glue on list of all PCBs */ + struct socket *unp_socket; /* pointer back to socket */ + struct vnode *unp_vnode; /* if associated with file */ + ino_t unp_ino; /* fake inode number */ + struct unpcb *unp_conn; /* control block of connected socket */ + struct unp_head unp_refs; /* referencing socket linked list */ + LIST_ENTRY(unpcb) unp_reflink; /* link in unp_refs list */ + struct sockaddr_un *unp_addr; /* bound address of socket */ + int unp_cc; /* copy of rcv.sb_cc */ + int unp_mbcnt; /* copy of rcv.sb_mbcnt */ + unp_gen_t unp_gencnt; /* generation count of this instance */ +}; + +#define sotounpcb(so) ((struct unpcb *)((so)->so_pcb)) + +/* Hack alert -- this structure depends on . */ +#ifdef _SYS_SOCKETVAR_H_ +struct xunpcb { + size_t xu_len; /* length of this structure */ + struct unpcb *xu_unpp; /* to help netstat, fstat */ + struct unpcb xu_unp; /* our information */ + union { + struct sockaddr_un xuu_addr; /* our bound address */ + char xu_dummy1[256]; + } xu_au; +#define xu_addr xu_au.xuu_addr + union { + struct sockaddr_un xuu_caddr; /* their bound address */ + char xu_dummy2[256]; + } xu_cau; +#define xu_caddr xu_cau.xuu_caddr + struct xsocket xu_socket; + u_quad_t xu_alignment_hack; +}; + +struct xunpgen { + size_t xug_len; + u_int xug_count; + unp_gen_t xug_gen; + so_gen_t xug_sogen; +}; +#endif /* _SYS_SOCKETVAR_H_ */ + +#endif /* _SYS_UNPCB_H_ */ diff --git a/bsd/sys/user.h b/bsd/sys/user.h new file mode 100644 index 000000000..0abd1ef5d --- /dev/null +++ b/bsd/sys/user.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)user.h 8.2 (Berkeley) 9/23/93 + */ + +#ifndef _SYS_USER_H_ +#define _SYS_USER_H_ + +#ifndef KERNEL +/* stuff that *used* to be included by user.h, or is now needed */ +#include +#include +#include +#include +#include +#endif +#include +#include +#include /* XXX */ +#include + +#ifdef KERNEL +/* + * Per-thread U area. + * + * It is likely that this structure contains no fields that must be + * saved between system calls. + * + * WARNING: IF THE SIZE OF THIS STRUCT CHANGES YOU MUST CHANGE THE + * CONSTANT IN struct thread_act.bsd_space + */ +struct uthread { + int *uu_ar0; /* address of users saved R0 */ + + /* syscall parameters, results and catches */ + int uu_arg[8]; /* arguments to current system call */ + int *uu_ap; /* pointer to arglist */ + int uu_rval[2]; + + /* thread exception handling */ + int uu_code; /* ``code'' to trap */ + char uu_cursig; /* p_cursig for exc. */ + int uu_sig; /* p_sig for exc. */ + + /* support for syscalls which use continuations */ + union { + struct _select { + u_int32_t *ibits, *obits; /* bits to select on */ + uint nbytes; /* number of bytes in ibits and obits */ + struct timeval atv; + int poll; + int error; + } ss_select; /* saved state for select() */ + struct _wait { + int f; + } ss_wait; /* saved state for wait?() */ + struct _owait { + int pid; + int *status; + int options; + struct rusage *rusage; + } ss_owait; + int uu_nfs_myiod; /* saved state for nfsd */ + } uu_state; + + /* internal support for continuation framework */ + int (*uu_continuation)(int); + int uu_pri; + int uu_timo; + +}; +typedef struct uthread * uthread_t; +#endif /* KERNEL */ + +/* + * Per process structure containing data that isn't needed in core + * when the process isn't running (esp. when swapped out). + * This structure may or may not be at the same kernel address + * in all processes. + */ + +struct user { + /* NOT USED ANYMORE */ +}; + +#endif /* !_SYS_USER_H_ */ diff --git a/bsd/sys/utfconv.h b/bsd/sys/utfconv.h new file mode 100644 index 000000000..f3f2bfacc --- /dev/null +++ b/bsd/sys/utfconv.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _SYS_UTFCONV_H_ +#define _SYS_UTFCONV_H_ + +#ifdef KERNEL +/* + * UTF-8 encode/decode flags + */ +#define UTF_REVERSE_ENDIAN 0x01 /* reverse UCS-2 byte order */ +#define UTF_NO_NULL_TERM 0x02 /* do not add null termination */ +#define UTF_DECOMPOSED 0x04 /* generate fully decompsed UCS-2 */ + +__BEGIN_DECLS +size_t utf8_encodelen __P((const u_int16_t *, size_t, u_int16_t, int)); + +int utf8_encodestr __P((const u_int16_t *, size_t, u_int8_t *, size_t *, + size_t, u_int16_t, int)); + +int utf8_decodestr __P((const u_int8_t *, size_t, u_int16_t *,size_t *, + size_t, u_int16_t, int)); +__END_DECLS + +#endif /* KERNEL */ + +#endif /* !_SYS_UTFCONV_H_ */ diff --git a/bsd/sys/utsname.h b/bsd/sys/utsname.h new file mode 100644 index 000000000..35779be0d --- /dev/null +++ b/bsd/sys/utsname.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright 1993,1995 NeXT Computer Inc. All Rights Reserved */ +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Chuck Karish of Mindcraft, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)utsname.h 8.1 (Berkeley) 1/4/94 + */ + +#ifndef _SYS_UTSNAME_H +#define _SYS_UTSNAME_H + +#define _SYS_NAMELEN 256 + +struct utsname { + char sysname[_SYS_NAMELEN]; /* Name of OS */ + char nodename[_SYS_NAMELEN]; /* Name of this network node */ + char release[_SYS_NAMELEN]; /* Release level */ + char version[_SYS_NAMELEN]; /* Version level */ + char machine[_SYS_NAMELEN]; /* Hardware type */ +}; + +#include + +__BEGIN_DECLS +int uname __P((struct utsname *)); +__END_DECLS + +#endif /* !_SYS_UTSNAME_H */ diff --git a/bsd/sys/ux_exception.h b/bsd/sys/ux_exception.h new file mode 100644 index 000000000..493834456 --- /dev/null +++ b/bsd/sys/ux_exception.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * Copyright (c) 1988 Carnegie-Mellon University + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * HISTORY + * + * Revision 1.2.32.1 1998/11/11 21:54:39 aramesh + * Atlas merge + * + * Revision 1.1.1.1 1997/09/30 02:42:22 wsanchez + * Import of kernel from umeshv/kernel + * + * Revision 2.7 89/10/03 19:23:14 rpd + * Change from NeXT: added EXC_UNIX_ABORT. + * [89/08/20 23:16:13 rpd] + * + * Revision 2.6 89/03/09 19:35:07 rpd + * More cleanup. + * + * Revision 2.5 89/02/25 15:01:07 gm0w + * Changes for cleanup. + * + * Revision 2.4 89/02/07 01:01:10 mwyoung + * Relocated from uxkern/ux_exception.h + * + * Revision 2.3 89/01/15 16:35:44 rpd + * Use decl_simple_lock_data. + * [89/01/15 15:19:58 rpd] + * + * Revision 2.2 88/08/24 02:52:12 mwyoung + * Adjusted include file references. + * [88/08/17 02:27:27 mwyoung] + * + * 29-Sep-87 David Black (dlb) at Carnegie-Mellon University + * Created. + * + */ + +/* + * Codes for Unix software exceptions under EXC_SOFTWARE. + */ + +#ifndef _SYS_UX_EXCEPTION_H_ +#define _SYS_UX_EXCEPTION_H_ + +#define EXC_UNIX_BAD_SYSCALL 0x10000 /* SIGSYS */ + +#define EXC_UNIX_BAD_PIPE 0x10001 /* SIGPIPE */ + +#define EXC_UNIX_ABORT 0x10002 /* SIGABRT */ + +#ifdef KERNEL +/* + * Kernel data structures for Unix exception handler. + */ + +#include + +extern mach_port_name_t ux_exception_port; + +#endif /* KERNEL */ + +#endif /* _SYS_UX_EXCEPTION_H_ */ diff --git a/bsd/sys/vadvise.h b/bsd/sys/vadvise.h new file mode 100644 index 000000000..81c1cee89 --- /dev/null +++ b/bsd/sys/vadvise.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vadvise.h 8.1 (Berkeley) 6/2/93 + */ + + +#ifndef _SYS_VADVISE_H_ +#define _SYS_VADVISE_H_ + +/* + * Parameters to vadvise() to tell system of particular paging + * behaviour: + * VA_NORM Normal strategy + * VA_ANOM Sampling page behaviour is not a win, don't bother + * Suitable during GCs in LISP, or sequential or random + * page referencing. + * VA_SEQL Sequential behaviour expected. + * VA_FLUSH Invalidate all page table entries. + */ +#define VA_NORM 0 +#define VA_ANOM 1 +#define VA_SEQL 2 +#define VA_FLUSH 3 + +#endif /* !_SYS_VADVISE_H_ */ diff --git a/bsd/sys/vcmd.h b/bsd/sys/vcmd.h new file mode 100644 index 000000000..9e5b596cd --- /dev/null +++ b/bsd/sys/vcmd.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vcmd.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_VCMD_H_ +#define _SYS_VCMD_H_ + +#include + +#define VPRINT 0100 +#define VPLOT 0200 +#define VPRINTPLOT 0400 + +#define VGETSTATE _IOR('v', 0, int) +#define VSETSTATE _IOW('v', 1, int) +#endif /* !_SYS_VCMD_H_ */ diff --git a/bsd/sys/version.h b/bsd/sys/version.h new file mode 100644 index 000000000..308e5f5f4 --- /dev/null +++ b/bsd/sys/version.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * File: sys/version.h + * + * HISTORY + * 29-Oct-86 Avadis Tevanian (avie) at Carnegie-Mellon University + * Created. + */ + +/* + * Each kernel has a major and minor version number. Changes in + * the major number in general indicate a change in exported features. + * Changes in minor number usually correspond to internal-only + * changes that the user need not be aware of (in general). These + * values are stored at boot time in the machine_info strucuture and + * can be obtained by user programs with the host_info kernel call. + * This mechanism is intended to be the formal way for Mach programs + * to provide for backward compatibility in future releases. + * + * Following is an informal history of the numbers: + * + * 20-Mar-1998 Umesh Vaishampayan + * MacOSX DR2 + * + * 28-Sep-94 ? + * NEXTSTEP Release 4.0. + * + * 03-Sep-91 Doug Mitchell + * Major 3 for NeXT release 3.0. + * + * 04-Mar-90 Avadis Tevanian, Jr. + * Major 2, minor 0 for NeXT release 2.0. + * + * 11-May-89 Avadis Tevanian, Jr. + * Advance version to major 1, minor 0 to conform to NeXT + * release 1.0. + * + * 05-December-88 Avadis Tevanian, Jr. + * Aborted previous numbering, set major to 0, minor to 9 + * to conform to NeXT's 0.9 release. + * + * 25-March-87 Avadis Tevanian, Jr. + * Created version numbering scheme. Started with major 1, + * minor 0. + */ +#ifndef _SYS_VERSION_H_ +#define _SYS_VERSION_H_ + +#define KERNEL_MAJOR_VERSION 10 +#define KERNEL_MINOR_VERSION 0 + +#endif /* ! _SYS_VERSION_H_ */ diff --git a/bsd/sys/vlimit.h b/bsd/sys/vlimit.h new file mode 100644 index 000000000..5c0f0b617 --- /dev/null +++ b/bsd/sys/vlimit.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vlimit.h 8.1 (Berkeley) 6/2/93 + */ + +#ifndef _SYS_VLIMIT_H_ +#define _SYS_VLIMIT_H_ + +/* + * Limits for u.u_limit[i], per process, inherited. + */ +#define LIM_NORAISE 0 /* if <> 0, can't raise limits */ +#define LIM_CPU 1 /* max secs cpu time */ +#define LIM_FSIZE 2 /* max size of file created */ +#define LIM_DATA 3 /* max growth of data space */ +#define LIM_STACK 4 /* max growth of stack */ +#define LIM_CORE 5 /* max size of ``core'' file */ +#define LIM_MAXRSS 6 /* max desired data+stack core usage */ + +#define NLIMITS 6 + +#define INFINITY 0x7fffffff + +#endif /* !_SYS_VLIMIT_H_ */ diff --git a/bsd/sys/vm.h b/bsd/sys/vm.h new file mode 100644 index 000000000..2bb8a30f3 --- /dev/null +++ b/bsd/sys/vm.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vm.h 8.5 (Berkeley) 5/11/95 + */ +/* HISTORY + * 05-Jun-95 Mac Gillon (mgillon) at NeXT + * 4.4 code uses this file to import MACH API + */ + +#ifndef _SYS_VM_H +#define _SYS_VM_H + +/* Machine specific config stuff */ +#if defined(KERNEL) && !defined(MACH_USER_API) +#include +#include +#include +#endif + +/* + * Shareable process virtual address space. + * May eventually be merged with vm_map. + * Several fields are temporary (text, data stuff). + */ +struct vmspace { + int vm_refcnt; /* number of references */ + caddr_t vm_shm; /* SYS5 shared memory private data XXX */ +/* we copy from vm_startcopy to the end of the structure on fork */ +#define vm_startcopy vm_rssize + segsz_t vm_rssize; /* current resident set size in pages */ + segsz_t vm_swrss; /* resident set size before last swap */ + segsz_t vm_tsize; /* text size (pages) XXX */ + segsz_t vm_dsize; /* data size (pages) XXX */ + segsz_t vm_ssize; /* stack size (pages) */ + caddr_t vm_taddr; /* user virtual address of text XXX */ + caddr_t vm_daddr; /* user virtual address of data XXX */ + caddr_t vm_maxsaddr; /* user VA at max stack growth */ +}; + +#ifdef KERNEL + +#ifdef BSD_BUILD +#include +#endif /* BSD_BUILD */ + +struct proc *current_proc(void); + +#endif /* KERNEL */ + +#endif /* _SYS_VM_H */ diff --git a/bsd/sys/vmmeter.h b/bsd/sys/vmmeter.h new file mode 100644 index 000000000..dba664e8e --- /dev/null +++ b/bsd/sys/vmmeter.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vmmeter.h 8.2 (Berkeley) 7/10/94 + */ + +#ifndef _SYS_VMMETER_H_ +#define _SYS_VMMETER_H_ + +/* + * System wide statistics counters. + */ +struct vmmeter { + /* + * General system activity. + */ + u_int v_swtch; /* context switches */ + u_int v_trap; /* calls to trap */ + u_int v_syscall; /* calls to syscall() */ + u_int v_intr; /* device interrupts */ + u_int v_soft; /* software interrupts */ + u_int v_faults; /* total faults taken */ + /* + * Virtual memory activity. + */ + u_int v_lookups; /* object cache lookups */ + u_int v_hits; /* object cache hits */ + u_int v_vm_faults; /* number of address memory faults */ + u_int v_cow_faults; /* number of copy-on-writes */ + u_int v_swpin; /* swapins */ + u_int v_swpout; /* swapouts */ + u_int v_pswpin; /* pages swapped in */ + u_int v_pswpout; /* pages swapped out */ + u_int v_pageins; /* number of pageins */ + u_int v_pageouts; /* number of pageouts */ + u_int v_pgpgin; /* pages paged in */ + u_int v_pgpgout; /* pages paged out */ + u_int v_intrans; /* intransit blocking page faults */ + u_int v_reactivated; /* number of pages reactivated from free list */ + u_int v_rev; /* revolutions of the hand */ + u_int v_scan; /* scans in page out daemon */ + u_int v_dfree; /* pages freed by daemon */ + u_int v_pfree; /* pages freed by exiting processes */ + u_int v_zfod; /* pages zero filled on demand */ + u_int v_nzfod; /* number of zfod's created */ + /* + * Distribution of page usages. + */ + u_int v_page_size; /* page size in bytes */ + u_int v_kernel_pages; /* number of pages in use by kernel */ + u_int v_free_target; /* number of pages desired free */ + u_int v_free_min; /* minimum number of pages desired free */ + u_int v_free_count; /* number of pages free */ + u_int v_wire_count; /* number of pages wired down */ + u_int v_active_count; /* number of pages active */ + u_int v_inactive_target; /* number of pages desired inactive */ + u_int v_inactive_count; /* number of pages inactive */ +}; +#ifdef KERNEL +extern struct vmmeter cnt; +#endif + +/* systemwide totals computed every five seconds */ +struct vmtotal +{ + int16_t t_rq; /* length of the run queue */ + int16_t t_dw; /* jobs in ``disk wait'' (neg priority) */ + int16_t t_pw; /* jobs in page wait */ + int16_t t_sl; /* jobs sleeping in core */ + int16_t t_sw; /* swapped out runnable/short block jobs */ + int32_t t_vm; /* total virtual memory */ + int32_t t_avm; /* active virtual memory */ + int32_t t_rm; /* total real memory in use */ + int32_t t_arm; /* active real memory */ + int32_t t_vmshr; /* shared virtual memory */ + int32_t t_avmshr; /* active shared virtual memory */ + int32_t t_rmshr; /* shared real memory */ + int32_t t_armshr; /* active shared real memory */ + int32_t t_free; /* free memory pages */ +}; +#ifdef KERNEL +extern struct vmtotal total; +#endif + +#endif /* !_SYS_VMMETER_H_ */ diff --git a/bsd/sys/vmparam.h b/bsd/sys/vmparam.h new file mode 100644 index 000000000..d3ada1c8d --- /dev/null +++ b/bsd/sys/vmparam.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * Copyright (c) 1988 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * HISTORY + */ + +#ifndef _SYS_VMPARAM_H_ +#define _SYS_VMPARAM_H_ + +/* + * Machine dependent constants + */ + +#include + +#endif /* _SYS_VMPARAM_H_ */ diff --git a/bsd/sys/vnode.h b/bsd/sys/vnode.h new file mode 100644 index 000000000..03672ac1a --- /dev/null +++ b/bsd/sys/vnode.h @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vnode.h 8.17 (Berkeley) 5/20/95 + */ + +#ifndef _VNODE_H_ +#define _VNODE_H_ + +#include +#include +#include + +#include +#include + +#include +#ifdef KERNEL +#include +#include +#endif + +/* + * The vnode is the focus of all file activity in UNIX. There is a + * unique vnode allocated for each active file, each current directory, + * each mounted-on file, text file, and the root. + */ + +/* + * Vnode types. VNON means no type. + */ +enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD, VSTR, + VCPLX }; + +/* + * Vnode tag types. + * These are for the benefit of external programs only (e.g., pstat) + * and should NEVER be inspected by the kernel. + */ +enum vtagtype { + VT_NON, VT_UFS, VT_NFS, VT_MFS, VT_MSDOSFS, VT_LFS, VT_LOFS, VT_FDESC, + VT_PORTAL, VT_NULL, VT_UMAP, VT_KERNFS, VT_PROCFS, VT_AFS, VT_ISOFS, + VT_UNION, VT_HFS, VT_VOLFS, VT_DEVFS, VT_WEBDAV, VT_UDF, VT_AFP, + VT_CDDA, VT_CIFS,VT_OTHER}; + +/* + * Each underlying filesystem allocates its own private area and hangs + * it from v_data. If non-null, this area is freed in getnewvnode(). + */ +LIST_HEAD(buflists, buf); + +/* + * Reading or writing any of these items requires holding the appropriate lock. + * v_freelist is locked by the global vnode_free_list simple lock. + * v_mntvnodes is locked by the global mntvnodes simple lock. + * v_flag, v_usecount, v_holdcount and v_writecount are + * locked by the v_interlock simple lock. + */ +struct vnode { + u_long v_flag; /* vnode flags (see below) */ + long v_usecount; /* reference count of users */ + long v_holdcnt; /* page & buffer references */ + daddr_t v_lastr; /* last read (read-ahead) */ + u_long v_id; /* capability identifier */ + struct mount *v_mount; /* ptr to vfs we are in */ + int (**v_op)(void *); /* vnode operations vector */ + TAILQ_ENTRY(vnode) v_freelist; /* vnode freelist */ + LIST_ENTRY(vnode) v_mntvnodes; /* vnodes for mount point */ + struct buflists v_cleanblkhd; /* clean blocklist head */ + struct buflists v_dirtyblkhd; /* dirty blocklist head */ + long v_numoutput; /* num of writes in progress */ + enum vtype v_type; /* vnode type */ + union { + struct mount *vu_mountedhere;/* ptr to mounted vfs (VDIR) */ + struct socket *vu_socket; /* unix ipc (VSOCK) */ + struct specinfo *vu_specinfo; /* device (VCHR, VBLK) */ + struct fifoinfo *vu_fifoinfo; /* fifo (VFIFO) */ + } v_un; + struct ubc_info *v_ubcinfo; /* valid for (VREG) */ + struct nqlease *v_lease; /* Soft reference to lease */ + daddr_t v_lastw; /* last write (write cluster) */ + daddr_t v_cstart; /* start block of cluster */ + daddr_t v_ciosiz; /* real size of I/O for cluster */ + int v_clen; /* length of current cluster */ + int v_ralen; /* Read-ahead length */ + daddr_t v_maxra; /* last readahead block */ + simple_lock_data_t v_interlock; /* lock on usecount and flag */ + struct lock__bsd__ *v_vnlock; /* used for non-locking fs's */ + long v_writecount; /* reference count of writers */ + enum vtagtype v_tag; /* type of underlying data */ + void *v_data; /* private data for fs */ +}; +#define v_mountedhere v_un.vu_mountedhere +#define v_socket v_un.vu_socket +#define v_specinfo v_un.vu_specinfo +#define v_fifoinfo v_un.vu_fifoinfo + +/* + * Vnode flags. + */ +#define VROOT 0x000001 /* root of its file system */ +#define VTEXT 0x000002 /* vnode is a pure text prototype */ +#define VSYSTEM 0x000004 /* vnode being used by kernel */ +#define VISTTY 0x000008 /* vnode represents a tty */ +#define VWASMAPPED 0x000010 /* vnode was mapped before */ +#define VTERMINATE 0x000020 /* terminating memory object */ +#define VTERMWANT 0x000040 /* wating for memory object death */ +#define VMOUNT 0x000080 /* mount operation in progress */ +#define VXLOCK 0x000100 /* vnode is locked to change underlying type */ +#define VXWANT 0x000200 /* process is waiting for vnode */ +#define VBWAIT 0x000400 /* waiting for output to complete */ +#define VALIASED 0x000800 /* vnode has an alias */ +#define VORECLAIM 0x001000 /* vm object is being reclaimed */ +#define VNOCACHE_DATA 0x002000 /* don't keep data cached once it's been consumed */ +#define VSTANDARD 0x004000 /* vnode obtained from common pool */ +#define VAGE 0x008000 /* Insert vnode at head of free list */ +#define VRAOFF 0x010000 /* read ahead disabled */ +#define VUINIT 0x020000 /* ubc_info being initialized */ +#define VUWANT 0x040000 /* process is wating for VUINIT */ +#define VUINACTIVE 0x080000 /* UBC vnode is on inactive list */ +#define VHASDIRTY 0x100000 /* UBC vnode may have 1 or more */ + /* delayed dirty pages that need to be flushed at the next 'sync' */ +#define VSWAP 0x200000 /* vnode is being used as swapfile */ +/* + * Vnode attributes. A field value of VNOVAL represents a field whose value + * is unavailable (getattr) or which is not to be changed (setattr). + */ +struct vattr { + enum vtype va_type; /* vnode type (for create) */ + u_short va_mode; /* files access mode and type */ + short va_nlink; /* number of references to file */ + uid_t va_uid; /* owner user id */ + gid_t va_gid; /* owner group id */ + long va_fsid; /* file system id (dev for now) */ + long va_fileid; /* file id */ + u_quad_t va_size; /* file size in bytes */ + long va_blocksize; /* blocksize preferred for i/o */ + struct timespec va_atime; /* time of last access */ + struct timespec va_mtime; /* time of last modification */ + struct timespec va_ctime; /* time file changed */ + u_long va_gen; /* generation number of file */ + u_long va_flags; /* flags defined for file */ + dev_t va_rdev; /* device the special file represents */ + u_quad_t va_bytes; /* bytes of disk space held by file */ + u_quad_t va_filerev; /* file modification number */ + u_int va_vaflags; /* operations flags, see below */ + long va_spare; /* remain quad aligned */ +}; + +/* + * Flags for va_vaflags. + */ +#define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */ +#define VA_EXCLUSIVE 0x02 /* exclusive create request */ + +/* + * Flags for ioflag. + */ +#define IO_UNIT 0x01 /* do I/O as atomic unit */ +#define IO_APPEND 0x02 /* append write to end */ +#define IO_SYNC 0x04 /* do I/O synchronously */ +#define IO_NODELOCKED 0x08 /* underlying node already locked */ +#define IO_NDELAY 0x10 /* FNDELAY flag set in file table */ +#define IO_NOZEROFILL 0x20 /* F_SETSIZE fcntl uses to prevent zero filling */ +#define IO_TAILZEROFILL 0x40 /* zero fills at the tail of write */ +#define IO_HEADZEROFILL 0x80 /* zero fills at the head of write */ +#define IO_NOZEROVALID 0x100 /* do not zero fill if valid page */ + +/* + * Modes. Some values same as Ixxx entries from inode.h for now. + */ +#define VSUID 04000 /* set user id on execution */ +#define VSGID 02000 /* set group id on execution */ +#define VSVTX 01000 /* save swapped text even after use */ +#define VREAD 00400 /* read, write, execute permissions */ +#define VWRITE 00200 +#define VEXEC 00100 + +/* + * Token indicating no attribute value yet assigned. + */ +#define VNOVAL (-1) + +#ifdef KERNEL +/* + * Convert between vnode types and inode formats (since POSIX.1 + * defines mode word of stat structure in terms of inode formats). + */ +extern enum vtype iftovt_tab[]; +extern int vttoif_tab[]; +#define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12]) +#define VTTOIF(indx) (vttoif_tab[(int)(indx)]) +#define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode)) + +/* + * Flags to various vnode functions. + */ +#define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */ +#define FORCECLOSE 0x0002 /* vflush: force file closeure */ +#define WRITECLOSE 0x0004 /* vflush: only close writeable files */ +#define SKIPSWAP 0x0008 /* vflush: skip vnodes marked VSWAP */ + +#define DOCLOSE 0x0008 /* vclean: close active files */ + +#define V_SAVE 0x0001 /* vinvalbuf: sync file first */ +#define V_SAVEMETA 0x0002 /* vinvalbuf: leave indirect blocks */ + +#define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */ + +/* flags for vop_allocate */ +#define PREALLOCATE 0x00000001 /* preallocate allocation blocks */ +#define ALLOCATECONTIG 0x00000002 /* allocate contigious space */ +#define ALLOCATEALL 0x00000004 /* allocate all requested space */ + /* or no space at all */ +#define FREEREMAINDER 0x00000008 /* deallocate allocated but */ + /* unfilled blocks */ +#define ALLOCATEFROMPEOF 0x00000010 /* allocate from the physical eof */ + +#if DIAGNOSTIC +#define VATTR_NULL(vap) vattr_null(vap) +#define HOLDRELE(vp) holdrele(vp) +#define VHOLD(vp) vhold(vp) + +void holdrele __P((struct vnode *)); +void vattr_null __P((struct vattr *)); +void vhold __P((struct vnode *)); +#else +#define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */ +#define HOLDRELE(vp) holdrele(vp) /* decrease buf or page ref */ +extern __inline void holdrele(struct vnode *vp) +{ + simple_lock(&vp->v_interlock); + vp->v_holdcnt--; + simple_unlock(&vp->v_interlock); +} +#define VHOLD(vp) vhold(vp) /* increase buf or page ref */ +extern __inline void vhold(struct vnode *vp) +{ + simple_lock(&vp->v_interlock); + if (++vp->v_holdcnt <= 0) + panic("vhold: v_holdcnt"); + simple_unlock(&vp->v_interlock); +} +#endif /* DIAGNOSTIC */ + +#define VREF(vp) vref(vp) +void vref __P((struct vnode *)); +#define NULLVP ((struct vnode *)NULL) + +/* + * Global vnode data. + */ +extern struct vnode *rootvnode; /* root (i.e. "/") vnode */ +extern int desiredvnodes; /* number of vnodes desired */ +extern struct vattr va_null; /* predefined null vattr structure */ + +/* + * Macro/function to check for client cache inconsistency w.r.t. leasing. + */ +#define LEASE_READ 0x1 /* Check lease for readers */ +#define LEASE_WRITE 0x2 /* Check lease for modifiers */ + +#endif /* KERNEL */ + +/* + * Mods for exensibility. + */ + +/* + * Flags for vdesc_flags: + */ +#define VDESC_MAX_VPS 16 +/* Low order 16 flag bits are reserved for willrele flags for vp arguments. */ +#define VDESC_VP0_WILLRELE 0x0001 +#define VDESC_VP1_WILLRELE 0x0002 +#define VDESC_VP2_WILLRELE 0x0004 +#define VDESC_VP3_WILLRELE 0x0008 +#define VDESC_NOMAP_VPP 0x0100 +#define VDESC_VPP_WILLRELE 0x0200 + +/* + * VDESC_NO_OFFSET is used to identify the end of the offset list + * and in places where no such field exists. + */ +#define VDESC_NO_OFFSET -1 + +/* + * This structure describes the vnode operation taking place. + */ +struct vnodeop_desc { + int vdesc_offset; /* offset in vector--first for speed */ + char *vdesc_name; /* a readable name for debugging */ + int vdesc_flags; /* VDESC_* flags */ + + /* + * These ops are used by bypass routines to map and locate arguments. + * Creds and procs are not needed in bypass routines, but sometimes + * they are useful to (for example) transport layers. + * Nameidata is useful because it has a cred in it. + */ + int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */ + int vdesc_vpp_offset; /* return vpp location */ + int vdesc_cred_offset; /* cred location, if any */ + int vdesc_proc_offset; /* proc location, if any */ + int vdesc_componentname_offset; /* if any */ + /* + * Finally, we've got a list of private data (about each operation) + * for each transport layer. (Support to manage this list is not + * yet part of BSD.) + */ + caddr_t *vdesc_transports; +}; + +#ifdef KERNEL +/* + * A list of all the operation descs. + */ +extern struct vnodeop_desc *vnodeop_descs[]; + +/* + * Interlock for scanning list of vnodes attached to a mountpoint + */ +extern struct slock mntvnode_slock; + +/* + * This macro is very helpful in defining those offsets in the vdesc struct. + * + * This is stolen from X11R4. I ingored all the fancy stuff for + * Crays, so if you decide to port this to such a serious machine, + * you might want to consult Intrisics.h's XtOffset{,Of,To}. + */ +#define VOPARG_OFFSET(p_type,field) \ + ((int) (((char *) (&(((p_type)NULL)->field))) - ((char *) NULL))) +#define VOPARG_OFFSETOF(s_type,field) \ + VOPARG_OFFSET(s_type*,field) +#define VOPARG_OFFSETTO(S_TYPE,S_OFFSET,STRUCT_P) \ + ((S_TYPE)(((char*)(STRUCT_P))+(S_OFFSET))) + + +/* + * This structure is used to configure the new vnodeops vector. + */ +struct vnodeopv_entry_desc { + struct vnodeop_desc *opve_op; /* which operation this is */ + int (*opve_impl)(void *); /* code implementing this operation */ +}; +struct vnodeopv_desc { + /* ptr to the ptr to the vector where op should go */ + int (***opv_desc_vector_p)(void *); + struct vnodeopv_entry_desc *opv_desc_ops; /* null terminated list */ +}; + +/* + * A default routine which just returns an error. + */ +int vn_default_error __P((void)); + +/* + * A generic structure. + * This can be used by bypass routines to identify generic arguments. + */ +struct vop_generic_args { + struct vnodeop_desc *a_desc; + /* other random data follows, presumably */ +}; + +/* + * VOCALL calls an op given an ops vector. We break it out because BSD's + * vclean changes the ops vector and then wants to call ops with the old + * vector. + */ +#define VOCALL(OPSV,OFF,AP) (( *((OPSV)[(OFF)])) (AP)) + +/* + * This call works for vnodes in the kernel. + */ +#define VCALL(VP,OFF,AP) VOCALL((VP)->v_op,(OFF),(AP)) +#define VDESC(OP) (& __CONCAT(OP,_desc)) +#define VOFFSET(OP) (VDESC(OP)->vdesc_offset) + +/* + * Finally, include the default set of vnode operations. + */ +#include + +/* + * Public vnode manipulation functions. + */ +struct file; +struct mount; +struct nameidata; +struct ostat; +struct proc; +struct stat; +struct ucred; +struct uio; +struct vattr; +struct vnode; +struct vop_bwrite_args; + +int bdevvp __P((dev_t dev, struct vnode **vpp)); +void cvtstat __P((struct stat *st, struct ostat *ost)); +int getnewvnode __P((enum vtagtype tag, + struct mount *mp, int (**vops)(void *), struct vnode **vpp)); +void insmntque __P((struct vnode *vp, struct mount *mp)); +void vattr_null __P((struct vattr *vap)); +int vcount __P((struct vnode *vp)); +int vflush __P((struct mount *mp, struct vnode *skipvp, int flags)); +int vget __P((struct vnode *vp, int lockflag, struct proc *p)); +void vgone __P((struct vnode *vp)); +int vinvalbuf __P((struct vnode *vp, int save, struct ucred *cred, + struct proc *p, int slpflag, int slptimeo)); +void vprint __P((char *label, struct vnode *vp)); +int vrecycle __P((struct vnode *vp, struct slock *inter_lkp, + struct proc *p)); +int vn_bwrite __P((struct vop_bwrite_args *ap)); +int vn_close __P((struct vnode *vp, + int flags, struct ucred *cred, struct proc *p)); +int vn_closefile __P((struct file *fp, struct proc *p)); +int vn_ioctl __P((struct file *fp, u_long com, caddr_t data, + struct proc *p)); +int vn_lock __P((struct vnode *vp, int flags, struct proc *p)); +int vn_open __P((struct nameidata *ndp, int fmode, int cmode)); +int vn_rdwr __P((enum uio_rw rw, struct vnode *vp, caddr_t base, + int len, off_t offset, enum uio_seg segflg, int ioflg, + struct ucred *cred, int *aresid, struct proc *p)); +int vn_read __P((struct file *fp, struct uio *uio, struct ucred *cred)); +int vn_select __P((struct file *fp, int which, struct proc *p)); +int vn_stat __P((struct vnode *vp, struct stat *sb, struct proc *p)); +int vn_write __P((struct file *fp, struct uio *uio, struct ucred *cred)); +int vop_noislocked __P((struct vop_islocked_args *)); +int vop_nolock __P((struct vop_lock_args *)); +int vop_nounlock __P((struct vop_unlock_args *)); +int vop_revoke __P((struct vop_revoke_args *)); +struct vnode * + checkalias __P((struct vnode *vp, dev_t nvp_rdev, struct mount *mp)); +void vput __P((struct vnode *vp)); +void vrele __P((struct vnode *vp)); +int vaccess __P((mode_t file_mode, uid_t uid, gid_t gid, + mode_t acc_mode, struct ucred *cred)); +int getvnode __P((struct proc *p, int fd, struct file **fpp)); + +#endif /* KERNEL */ + +#endif /* !_VNODE_H_ */ diff --git a/bsd/sys/vnode_if.h b/bsd/sys/vnode_if.h new file mode 100644 index 000000000..6f13d346e --- /dev/null +++ b/bsd/sys/vnode_if.h @@ -0,0 +1,1339 @@ + +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved + * Copyright (c) 1992, 1993, 1994, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +/* + * Warning: This file is generated automatically. + * (Modifications made here may easily be lost!) + * + * Created by the script: + * @(#)vnode_if.sh 8.7 (Berkeley) 5/11/95 + */ + + +#ifndef _VNODE_IF_H_ +#define _VNODE_IF_H_ + +extern struct vnodeop_desc vop_default_desc; + + +struct vop_lookup_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; +}; +extern struct vnodeop_desc vop_lookup_desc; +#define VOP_LOOKUP(dvp, vpp, cnp) _VOP_LOOKUP(dvp, vpp, cnp) +static __inline int _VOP_LOOKUP(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) +{ + struct vop_lookup_args a; + a.a_desc = VDESC(vop_lookup); + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + return (VCALL(dvp, VOFFSET(vop_lookup), &a)); +} + +struct vop_cachedlookup_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; +}; +extern struct vnodeop_desc vop_cachedlookup_desc; +#define VOP_CACHEDLOOKUP(dvp, vpp, cnp) _VOP_CACHEDLOOKUP(dvp, vpp, cnp) +static __inline int _VOP_CACHEDLOOKUP(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) +{ + struct vop_cachedlookup_args a; + a.a_desc = VDESC(vop_cachedlookup); + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + return (VCALL(dvp, VOFFSET(vop_cachedlookup), &a)); +} + +struct vop_create_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +}; +extern struct vnodeop_desc vop_create_desc; +#define VOP_CREATE(dvp, vpp, cnp, vap) _VOP_CREATE(dvp, vpp, cnp, vap) +static __inline int _VOP_CREATE(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) +{ + struct vop_create_args a; + a.a_desc = VDESC(vop_create); + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + return (VCALL(dvp, VOFFSET(vop_create), &a)); +} + +struct vop_whiteout_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct componentname *a_cnp; + int a_flags; +}; +extern struct vnodeop_desc vop_whiteout_desc; +#define VOP_WHITEOUT(dvp, cnp, flags) _VOP_WHITEOUT(dvp, cnp, flags) +static __inline int _VOP_WHITEOUT(struct vnode *dvp, struct componentname *cnp, int flags) +{ + struct vop_whiteout_args a; + a.a_desc = VDESC(vop_whiteout); + a.a_dvp = dvp; + a.a_cnp = cnp; + a.a_flags = flags; + return (VCALL(dvp, VOFFSET(vop_whiteout), &a)); +} + +struct vop_mknod_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +}; +extern struct vnodeop_desc vop_mknod_desc; +#define VOP_MKNOD(dvp, vpp, cnp, vap) _VOP_MKNOD(dvp, vpp, cnp, vap) +static __inline int _VOP_MKNOD(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) +{ + struct vop_mknod_args a; + a.a_desc = VDESC(vop_mknod); + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + return (VCALL(dvp, VOFFSET(vop_mknod), &a)); +} + +struct vop_mkcomplex_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + u_long a_type; +}; +extern struct vnodeop_desc vop_mkcomplex_desc; +#define VOP_MKCOMPLEX(dvp, vpp, cnp, vap, type) _VOP_MKCOMPLEX(dvp, vpp, cnp, vap, type) +static __inline int _VOP_MKCOMPLEX(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap, u_long type) +{ + struct vop_mkcomplex_args a; + a.a_desc = VDESC(vop_mkcomplex); + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + a.a_type = type; + return (VCALL(dvp, VOFFSET(vop_mkcomplex), &a)); +} + +struct vop_open_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_open_desc; +#define VOP_OPEN(vp, mode, cred, p) _VOP_OPEN(vp, mode, cred, p) +static __inline int _VOP_OPEN(struct vnode *vp, int mode, struct ucred *cred, struct proc *p) +{ + struct vop_open_args a; + a.a_desc = VDESC(vop_open); + a.a_vp = vp; + a.a_mode = mode; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_open), &a)); +} + +struct vop_close_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_close_desc; +#define VOP_CLOSE(vp, fflag, cred, p) _VOP_CLOSE(vp, fflag, cred, p) +static __inline int _VOP_CLOSE(struct vnode *vp, int fflag, struct ucred *cred, struct proc *p) +{ + struct vop_close_args a; + a.a_desc = VDESC(vop_close); + a.a_vp = vp; + a.a_fflag = fflag; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_close), &a)); +} + +struct vop_access_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_access_desc; +#define VOP_ACCESS(vp, mode, cred, p) _VOP_ACCESS(vp, mode, cred, p) +static __inline int _VOP_ACCESS(struct vnode *vp, int mode, struct ucred *cred, struct proc *p) +{ + struct vop_access_args a; + a.a_desc = VDESC(vop_access); + a.a_vp = vp; + a.a_mode = mode; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_access), &a)); +} + +struct vop_getattr_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_getattr_desc; +#define VOP_GETATTR(vp, vap, cred, p) _VOP_GETATTR(vp, vap, cred, p) +static __inline int _VOP_GETATTR(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct proc *p) +{ + struct vop_getattr_args a; + a.a_desc = VDESC(vop_getattr); + a.a_vp = vp; + a.a_vap = vap; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_getattr), &a)); +} + +struct vop_setattr_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_setattr_desc; +#define VOP_SETATTR(vp, vap, cred, p) _VOP_SETATTR(vp, vap, cred, p) +static __inline int _VOP_SETATTR(struct vnode *vp, struct vattr *vap, struct ucred *cred, struct proc *p) +{ + struct vop_setattr_args a; + a.a_desc = VDESC(vop_setattr); + a.a_vp = vp; + a.a_vap = vap; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_setattr), &a)); +} + +struct vop_getattrlist_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct attrlist *a_alist; + struct uio *a_uio; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_getattrlist_desc; +#define VOP_GETATTRLIST(vp, alist, uio, cred, p) _VOP_GETATTRLIST(vp, alist, uio, cred, p) +static __inline int _VOP_GETATTRLIST(struct vnode *vp, struct attrlist *alist, struct uio *uio, struct ucred *cred, struct proc *p) +{ + struct vop_getattrlist_args a; + a.a_desc = VDESC(vop_getattrlist); + a.a_vp = vp; + a.a_alist = alist; + a.a_uio = uio; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_getattrlist), &a)); +} + +struct vop_setattrlist_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct attrlist *a_alist; + struct uio *a_uio; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_setattrlist_desc; +#define VOP_SETATTRLIST(vp, alist, uio, cred, p) _VOP_SETATTRLIST(vp, alist, uio, cred, p) +static __inline int _VOP_SETATTRLIST(struct vnode *vp, struct attrlist *alist, struct uio *uio, struct ucred *cred, struct proc *p) +{ + struct vop_setattrlist_args a; + a.a_desc = VDESC(vop_setattrlist); + a.a_vp = vp; + a.a_alist = alist; + a.a_uio = uio; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_setattrlist), &a)); +} + +struct vop_read_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; +}; +extern struct vnodeop_desc vop_read_desc; +#define VOP_READ(vp, uio, ioflag, cred) _VOP_READ(vp, uio, ioflag, cred) +static __inline int _VOP_READ(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) +{ + struct vop_read_args a; + a.a_desc = VDESC(vop_read); + a.a_vp = vp; + a.a_uio = uio; + a.a_ioflag = ioflag; + a.a_cred = cred; + { + int _err; + extern int ubc_hold(); + extern void ubc_rele(); + int _didhold = ubc_hold(vp); + _err = VCALL(vp, VOFFSET(vop_read), &a); + if (_didhold) + ubc_rele(vp); + return (_err); + } +} + +struct vop_write_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; +}; +extern struct vnodeop_desc vop_write_desc; +#define VOP_WRITE(vp, uio, ioflag, cred) _VOP_WRITE(vp, uio, ioflag, cred) +static __inline int _VOP_WRITE(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred) +{ + struct vop_write_args a; + a.a_desc = VDESC(vop_write); + a.a_vp = vp; + a.a_uio = uio; + a.a_ioflag = ioflag; + a.a_cred = cred; + { + int _err; + extern int ubc_hold(); + extern void ubc_rele(); + int _didhold = ubc_hold(vp); + _err = VCALL(vp, VOFFSET(vop_write), &a); + if (_didhold) + ubc_rele(vp); + return (_err); + } +} + +struct vop_lease_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct proc *a_p; + struct ucred *a_cred; + int a_flag; +}; +extern struct vnodeop_desc vop_lease_desc; +#define VOP_LEASE(vp, p, cred, flag) _VOP_LEASE(vp, p, cred, flag) +static __inline int _VOP_LEASE(struct vnode *vp, struct proc *p, struct ucred *cred, int flag) +{ + struct vop_lease_args a; + a.a_desc = VDESC(vop_lease); + a.a_vp = vp; + a.a_p = p; + a.a_cred = cred; + a.a_flag = flag; + return (VCALL(vp, VOFFSET(vop_lease), &a)); +} + +struct vop_ioctl_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + u_long a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_ioctl_desc; +#define VOP_IOCTL(vp, command, data, fflag, cred, p) _VOP_IOCTL(vp, command, data, fflag, cred, p) +static __inline int _VOP_IOCTL(struct vnode *vp, u_long command, caddr_t data, int fflag, struct ucred *cred, struct proc *p) +{ + struct vop_ioctl_args a; + a.a_desc = VDESC(vop_ioctl); + a.a_vp = vp; + a.a_command = command; + a.a_data = data; + a.a_fflag = fflag; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_ioctl), &a)); +} + +struct vop_select_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_select_desc; +#define VOP_SELECT(vp, which, fflags, cred, p) _VOP_SELECT(vp, which, fflags, cred, p) +static __inline int _VOP_SELECT(struct vnode *vp, int which, int fflags, struct ucred *cred, struct proc *p) +{ + struct vop_select_args a; + a.a_desc = VDESC(vop_select); + a.a_vp = vp; + a.a_which = which; + a.a_fflags = fflags; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_select), &a)); +} + +struct vop_exchange_args { + struct vnodeop_desc *a_desc; + struct vnode *a_fvp; + struct vnode *a_tvp; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_exchange_desc; +#define VOP_EXCHANGE(fvp, tvp, cred, p) _VOP_EXCHANGE(fvp, tvp, cred, p) +static __inline int _VOP_EXCHANGE(struct vnode *fvp, struct vnode *tvp, struct ucred *cred, struct proc *p) +{ + struct vop_exchange_args a; + a.a_desc = VDESC(vop_exchange); + a.a_fvp = fvp; + a.a_tvp = tvp; + a.a_cred = cred; + a.a_p = p; + return (VCALL(fvp, VOFFSET(vop_exchange), &a)); +} + +struct vop_revoke_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_flags; +}; +extern struct vnodeop_desc vop_revoke_desc; +#define VOP_REVOKE(vp, flags) _VOP_REVOKE(vp, flags) +static __inline int _VOP_REVOKE(struct vnode *vp, int flags) +{ + struct vop_revoke_args a; + a.a_desc = VDESC(vop_revoke); + a.a_vp = vp; + a.a_flags = flags; + return (VCALL(vp, VOFFSET(vop_revoke), &a)); +} + +struct vop_mmap_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_mmap_desc; +#define VOP_MMAP(vp, fflags, cred, p) _VOP_MMAP(vp, fflags, cred, p) +static __inline int _VOP_MMAP(struct vnode *vp, int fflags, struct ucred *cred, struct proc *p) +{ + struct vop_mmap_args a; + a.a_desc = VDESC(vop_mmap); + a.a_vp = vp; + a.a_fflags = fflags; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_mmap), &a)); +} + +struct vop_fsync_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct ucred *a_cred; + int a_waitfor; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_fsync_desc; +#define VOP_FSYNC(vp, cred, waitfor, p) _VOP_FSYNC(vp, cred, waitfor, p) +static __inline int _VOP_FSYNC(struct vnode *vp, struct ucred *cred, int waitfor, struct proc *p) +{ + struct vop_fsync_args a; + a.a_desc = VDESC(vop_fsync); + a.a_vp = vp; + a.a_cred = cred; + a.a_waitfor = waitfor; + a.a_p = p; + { + int _err; + extern int ubc_hold(); + extern void ubc_rele(); + int _didhold = ubc_hold(vp); + _err = VCALL(vp, VOFFSET(vop_fsync), &a); + if (_didhold) + ubc_rele(vp); + return (_err); + } +} + +struct vop_seek_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + off_t a_oldoff; + off_t a_newoff; + struct ucred *a_cred; +}; +extern struct vnodeop_desc vop_seek_desc; +#define VOP_SEEK(vp, oldoff, newoff, cred) _VOP_SEEK(vp, oldoff, newoff, cred) +static __inline int _VOP_SEEK(struct vnode *vp, off_t oldoff, off_t newoff, struct ucred *cred) +{ + struct vop_seek_args a; + a.a_desc = VDESC(vop_seek); + a.a_vp = vp; + a.a_oldoff = oldoff; + a.a_newoff = newoff; + a.a_cred = cred; + return (VCALL(vp, VOFFSET(vop_seek), &a)); +} + +struct vop_remove_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +}; +extern struct vnodeop_desc vop_remove_desc; +#define VOP_REMOVE(dvp, vp, cnp) _VOP_REMOVE(dvp, vp, cnp) +static __inline int _VOP_REMOVE(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) +{ + struct vop_remove_args a; + a.a_desc = VDESC(vop_remove); + a.a_dvp = dvp; + a.a_vp = vp; + a.a_cnp = cnp; + return (VCALL(dvp, VOFFSET(vop_remove), &a)); +} + +struct vop_link_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct vnode *a_tdvp; + struct componentname *a_cnp; +}; +extern struct vnodeop_desc vop_link_desc; +#define VOP_LINK(vp, tdvp, cnp) _VOP_LINK(vp, tdvp, cnp) +static __inline int _VOP_LINK(struct vnode *vp, struct vnode *tdvp, struct componentname *cnp) +{ + struct vop_link_args a; + a.a_desc = VDESC(vop_link); + a.a_vp = vp; + a.a_tdvp = tdvp; + a.a_cnp = cnp; + return (VCALL(vp, VOFFSET(vop_link), &a)); +} + +struct vop_rename_args { + struct vnodeop_desc *a_desc; + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; +}; +extern struct vnodeop_desc vop_rename_desc; +#define VOP_RENAME(fdvp, fvp, fcnp, tdvp, tvp, tcnp) _VOP_RENAME(fdvp, fvp, fcnp, tdvp, tvp, tcnp) +static __inline int _VOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp) +{ + struct vop_rename_args a; + a.a_desc = VDESC(vop_rename); + a.a_fdvp = fdvp; + a.a_fvp = fvp; + a.a_fcnp = fcnp; + a.a_tdvp = tdvp; + a.a_tvp = tvp; + a.a_tcnp = tcnp; + return (VCALL(fdvp, VOFFSET(vop_rename), &a)); +} + +struct vop_mkdir_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +}; +extern struct vnodeop_desc vop_mkdir_desc; +#define VOP_MKDIR(dvp, vpp, cnp, vap) _VOP_MKDIR(dvp, vpp, cnp, vap) +static __inline int _VOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap) +{ + struct vop_mkdir_args a; + a.a_desc = VDESC(vop_mkdir); + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + return (VCALL(dvp, VOFFSET(vop_mkdir), &a)); +} + +struct vop_rmdir_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +}; +extern struct vnodeop_desc vop_rmdir_desc; +#define VOP_RMDIR(dvp, vp, cnp) _VOP_RMDIR(dvp, vp, cnp) +static __inline int _VOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) +{ + struct vop_rmdir_args a; + a.a_desc = VDESC(vop_rmdir); + a.a_dvp = dvp; + a.a_vp = vp; + a.a_cnp = cnp; + return (VCALL(dvp, VOFFSET(vop_rmdir), &a)); +} + +struct vop_symlink_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; +}; +extern struct vnodeop_desc vop_symlink_desc; +#define VOP_SYMLINK(dvp, vpp, cnp, vap, target) _VOP_SYMLINK(dvp, vpp, cnp, vap, target) +static __inline int _VOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, struct vattr *vap, char *target) +{ + struct vop_symlink_args a; + a.a_desc = VDESC(vop_symlink); + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + a.a_target = target; + return (VCALL(dvp, VOFFSET(vop_symlink), &a)); +} + +struct vop_readdir_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + int *a_ncookies; + u_long **a_cookies; +}; +extern struct vnodeop_desc vop_readdir_desc; +#define VOP_READDIR(vp, uio, cred, eofflag, ncookies, cookies) _VOP_READDIR(vp, uio, cred, eofflag, ncookies, cookies) +static __inline int _VOP_READDIR(struct vnode *vp, struct uio *uio, struct ucred *cred, int *eofflag, int *ncookies, u_long **cookies) +{ + struct vop_readdir_args a; + a.a_desc = VDESC(vop_readdir); + a.a_vp = vp; + a.a_uio = uio; + a.a_cred = cred; + a.a_eofflag = eofflag; + a.a_ncookies = ncookies; + a.a_cookies = cookies; + return (VCALL(vp, VOFFSET(vop_readdir), &a)); +} + +struct vop_readdirattr_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct attrlist *a_alist; + struct uio *a_uio; + u_long a_maxcount; + u_long a_options; + u_long *a_newstate; + int *a_eofflag; + u_long *a_actualcount; + u_long **a_cookies; + struct ucred *a_cred; +}; +extern struct vnodeop_desc vop_readdirattr_desc; +#define VOP_READDIRATTR(vp, alist, uio, maxcount, options, newstate, eofflag, actualcount, cookies, cred) _VOP_READDIRATTR(vp, alist, uio, maxcount, options, newstate, eofflag, actualcount, cookies, cred) +static __inline int _VOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, u_long maxcount, u_long options, u_long *newstate, int *eofflag, u_long *actualcount, u_long **cookies, struct ucred *cred) +{ + struct vop_readdirattr_args a; + a.a_desc = VDESC(vop_readdirattr); + a.a_vp = vp; + a.a_alist = alist; + a.a_uio = uio; + a.a_maxcount = maxcount; + a.a_options = options; + a.a_newstate = newstate; + a.a_eofflag = eofflag; + a.a_actualcount = actualcount; + a.a_cookies = cookies; + a.a_cred = cred; + return (VCALL(vp, VOFFSET(vop_readdirattr), &a)); +} + +struct vop_readlink_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; +}; +extern struct vnodeop_desc vop_readlink_desc; +#define VOP_READLINK(vp, uio, cred) _VOP_READLINK(vp, uio, cred) +static __inline int _VOP_READLINK(struct vnode *vp, struct uio *uio, struct ucred *cred) +{ + struct vop_readlink_args a; + a.a_desc = VDESC(vop_readlink); + a.a_vp = vp; + a.a_uio = uio; + a.a_cred = cred; + return (VCALL(vp, VOFFSET(vop_readlink), &a)); +} + +struct vop_abortop_args { + struct vnodeop_desc *a_desc; + struct vnode *a_dvp; + struct componentname *a_cnp; +}; +extern struct vnodeop_desc vop_abortop_desc; +#define VOP_ABORTOP(dvp, cnp) _VOP_ABORTOP(dvp, cnp) +static __inline int _VOP_ABORTOP(struct vnode *dvp, struct componentname *cnp) +{ + struct vop_abortop_args a; + a.a_desc = VDESC(vop_abortop); + a.a_dvp = dvp; + a.a_cnp = cnp; + return (VCALL(dvp, VOFFSET(vop_abortop), &a)); +} + +struct vop_inactive_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_inactive_desc; +#define VOP_INACTIVE(vp, p) _VOP_INACTIVE(vp, p) +static __inline int _VOP_INACTIVE(struct vnode *vp, struct proc *p) +{ + struct vop_inactive_args a; + a.a_desc = VDESC(vop_inactive); + a.a_vp = vp; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_inactive), &a)); +} + +struct vop_reclaim_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_reclaim_desc; +#define VOP_RECLAIM(vp, p) _VOP_RECLAIM(vp, p) +static __inline int _VOP_RECLAIM(struct vnode *vp, struct proc *p) +{ + struct vop_reclaim_args a; + a.a_desc = VDESC(vop_reclaim); + a.a_vp = vp; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_reclaim), &a)); +} + +struct vop_lock_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_lock_desc; +#define VOP_LOCK(vp, flags, p) _VOP_LOCK(vp, flags, p) +static __inline int _VOP_LOCK(struct vnode *vp, int flags, struct proc *p) +{ + struct vop_lock_args a; + a.a_desc = VDESC(vop_lock); + a.a_vp = vp; + a.a_flags = flags; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_lock), &a)); +} + +struct vop_unlock_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_unlock_desc; +#define VOP_UNLOCK(vp, flags, p) _VOP_UNLOCK(vp, flags, p) +static __inline int _VOP_UNLOCK(struct vnode *vp, int flags, struct proc *p) +{ + struct vop_unlock_args a; + a.a_desc = VDESC(vop_unlock); + a.a_vp = vp; + a.a_flags = flags; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_unlock), &a)); +} + +struct vop_bmap_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; +}; +extern struct vnodeop_desc vop_bmap_desc; +#define VOP_BMAP(vp, bn, vpp, bnp, runp) _VOP_BMAP(vp, bn, vpp, bnp, runp) +static __inline int _VOP_BMAP(struct vnode *vp, daddr_t bn, struct vnode **vpp, daddr_t *bnp, int *runp) +{ + struct vop_bmap_args a; + a.a_desc = VDESC(vop_bmap); + a.a_vp = vp; + a.a_bn = bn; + a.a_vpp = vpp; + a.a_bnp = bnp; + a.a_runp = runp; + return (VCALL(vp, VOFFSET(vop_bmap), &a)); +} + +struct vop_print_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; +}; +extern struct vnodeop_desc vop_print_desc; +#define VOP_PRINT(vp) _VOP_PRINT(vp) +static __inline int _VOP_PRINT(struct vnode *vp) +{ + struct vop_print_args a; + a.a_desc = VDESC(vop_print); + a.a_vp = vp; + return (VCALL(vp, VOFFSET(vop_print), &a)); +} + +struct vop_islocked_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; +}; +extern struct vnodeop_desc vop_islocked_desc; +#define VOP_ISLOCKED(vp) _VOP_ISLOCKED(vp) +static __inline int _VOP_ISLOCKED(struct vnode *vp) +{ + struct vop_islocked_args a; + a.a_desc = VDESC(vop_islocked); + a.a_vp = vp; + return (VCALL(vp, VOFFSET(vop_islocked), &a)); +} + +struct vop_pathconf_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + int a_name; + register_t *a_retval; +}; +extern struct vnodeop_desc vop_pathconf_desc; +#define VOP_PATHCONF(vp, name, retval) _VOP_PATHCONF(vp, name, retval) +static __inline int _VOP_PATHCONF(struct vnode *vp, int name, register_t *retval) +{ + struct vop_pathconf_args a; + a.a_desc = VDESC(vop_pathconf); + a.a_vp = vp; + a.a_name = name; + a.a_retval = retval; + return (VCALL(vp, VOFFSET(vop_pathconf), &a)); +} + +struct vop_advlock_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; +}; +extern struct vnodeop_desc vop_advlock_desc; +#define VOP_ADVLOCK(vp, id, op, fl, flags) _VOP_ADVLOCK(vp, id, op, fl, flags) +static __inline int _VOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags) +{ + struct vop_advlock_args a; + a.a_desc = VDESC(vop_advlock); + a.a_vp = vp; + a.a_id = id; + a.a_op = op; + a.a_fl = fl; + a.a_flags = flags; + return (VCALL(vp, VOFFSET(vop_advlock), &a)); +} + +struct vop_blkatoff_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + off_t a_offset; + char **a_res; + struct buf **a_bpp; +}; +extern struct vnodeop_desc vop_blkatoff_desc; +#define VOP_BLKATOFF(vp, offset, res, bpp) _VOP_BLKATOFF(vp, offset, res, bpp) +static __inline int _VOP_BLKATOFF(struct vnode *vp, off_t offset, char **res, struct buf **bpp) +{ + struct vop_blkatoff_args a; + a.a_desc = VDESC(vop_blkatoff); + a.a_vp = vp; + a.a_offset = offset; + a.a_res = res; + a.a_bpp = bpp; + return (VCALL(vp, VOFFSET(vop_blkatoff), &a)); +} + +struct vop_valloc_args { + struct vnodeop_desc *a_desc; + struct vnode *a_pvp; + int a_mode; + struct ucred *a_cred; + struct vnode **a_vpp; +}; +extern struct vnodeop_desc vop_valloc_desc; +#define VOP_VALLOC(pvp, mode, cred, vpp) _VOP_VALLOC(pvp, mode, cred, vpp) +static __inline int _VOP_VALLOC(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp) +{ + struct vop_valloc_args a; + a.a_desc = VDESC(vop_valloc); + a.a_pvp = pvp; + a.a_mode = mode; + a.a_cred = cred; + a.a_vpp = vpp; + return (VCALL(pvp, VOFFSET(vop_valloc), &a)); +} + +struct vop_reallocblks_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct cluster_save *a_buflist; +}; +extern struct vnodeop_desc vop_reallocblks_desc; +#define VOP_REALLOCBLKS(vp, buflist) _VOP_REALLOCBLKS(vp, buflist) +static __inline int _VOP_REALLOCBLKS(struct vnode *vp, struct cluster_save *buflist) +{ + struct vop_reallocblks_args a; + a.a_desc = VDESC(vop_reallocblks); + a.a_vp = vp; + a.a_buflist = buflist; + return (VCALL(vp, VOFFSET(vop_reallocblks), &a)); +} + +struct vop_vfree_args { + struct vnodeop_desc *a_desc; + struct vnode *a_pvp; + ino_t a_ino; + int a_mode; +}; +extern struct vnodeop_desc vop_vfree_desc; +#define VOP_VFREE(pvp, ino, mode) _VOP_VFREE(pvp, ino, mode) +static __inline int _VOP_VFREE(struct vnode *pvp, ino_t ino, int mode) +{ + struct vop_vfree_args a; + a.a_desc = VDESC(vop_vfree); + a.a_pvp = pvp; + a.a_ino = ino; + a.a_mode = mode; + return (VCALL(pvp, VOFFSET(vop_vfree), &a)); +} + +struct vop_truncate_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + off_t a_length; + int a_flags; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_truncate_desc; +#define VOP_TRUNCATE(vp, length, flags, cred, p) _VOP_TRUNCATE(vp, length, flags, cred, p) +static __inline int _VOP_TRUNCATE(struct vnode *vp, off_t length, int flags, struct ucred *cred, struct proc *p) +{ + struct vop_truncate_args a; + a.a_desc = VDESC(vop_truncate); + a.a_vp = vp; + a.a_length = length; + a.a_flags = flags; + a.a_cred = cred; + a.a_p = p; + { + int _err; + extern int ubc_hold(); + extern void ubc_rele(); + int _didhold = ubc_hold(vp); + _err = VCALL(vp, VOFFSET(vop_truncate), &a); + if (_didhold) + ubc_rele(vp); + return (_err); + } +} + +struct vop_allocate_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + off_t a_length; + u_int32_t a_flags; + off_t *a_bytesallocated; + struct ucred *a_cred; + struct proc *a_p; +}; +extern struct vnodeop_desc vop_allocate_desc; +#define VOP_ALLOCATE(vp, length, flags, bytesallocated, cred, p) _VOP_ALLOCATE(vp, length, flags, bytesallocated, cred, p) +static __inline int _VOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, struct ucred *cred, struct proc *p) +{ + struct vop_allocate_args a; + a.a_desc = VDESC(vop_allocate); + a.a_vp = vp; + a.a_length = length; + a.a_flags = flags; + a.a_bytesallocated = bytesallocated; + a.a_cred = cred; + a.a_p = p; + return (VCALL(vp, VOFFSET(vop_allocate), &a)); +} + +struct vop_update_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct timeval *a_access; + struct timeval *a_modify; + int a_waitfor; +}; +extern struct vnodeop_desc vop_update_desc; +#define VOP_UPDATE(vp, access, modify, waitfor) _VOP_UPDATE(vp, access, modify, waitfor) +static __inline int _VOP_UPDATE(struct vnode *vp, struct timeval *access, struct timeval *modify, int waitfor) +{ + struct vop_update_args a; + a.a_desc = VDESC(vop_update); + a.a_vp = vp; + a.a_access = access; + a.a_modify = modify; + a.a_waitfor = waitfor; + return (VCALL(vp, VOFFSET(vop_update), &a)); +} + +struct vop_pgrd_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; +}; +extern struct vnodeop_desc vop_pgrd_desc; +#define VOP_PGRD(vp, uio, cred) _VOP_PGRD(vp, uio, cred) +static __inline int _VOP_PGRD(struct vnode *vp, struct uio *uio, struct ucred *cred) +{ + struct vop_pgrd_args a; + a.a_desc = VDESC(vop_pgrd); + a.a_vp = vp; + a.a_uio = uio; + a.a_cred = cred; + return (VCALL(vp, VOFFSET(vop_pgrd), &a)); +} + +struct vop_pgwr_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + vm_offset_t a_offset; +}; +extern struct vnodeop_desc vop_pgwr_desc; +#define VOP_PGWR(vp, uio, cred, offset) _VOP_PGWR(vp, uio, cred, offset) +static __inline int _VOP_PGWR(struct vnode *vp, struct uio *uio, struct ucred *cred, vm_offset_t offset) +{ + struct vop_pgwr_args a; + a.a_desc = VDESC(vop_pgwr); + a.a_vp = vp; + a.a_uio = uio; + a.a_cred = cred; + a.a_offset = offset; + return (VCALL(vp, VOFFSET(vop_pgwr), &a)); +} + +struct vop_pagein_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + upl_t a_pl; + vm_offset_t a_pl_offset; + off_t a_f_offset; + size_t a_size; + struct ucred *a_cred; + int a_flags; +}; +extern struct vnodeop_desc vop_pagein_desc; +#define VOP_PAGEIN(vp, pl, pl_offset, f_offset, size, cred, flags) _VOP_PAGEIN(vp, pl, pl_offset, f_offset, size, cred, flags) +static __inline int _VOP_PAGEIN(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, struct ucred *cred, int flags) +{ + struct vop_pagein_args a; + a.a_desc = VDESC(vop_pagein); + a.a_vp = vp; + a.a_pl = pl; + a.a_pl_offset = pl_offset; + a.a_f_offset = f_offset; + a.a_size = size; + a.a_cred = cred; + a.a_flags = flags; + return (VCALL(vp, VOFFSET(vop_pagein), &a)); +} + +struct vop_pageout_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + upl_t a_pl; + vm_offset_t a_pl_offset; + off_t a_f_offset; + size_t a_size; + struct ucred *a_cred; + int a_flags; +}; +extern struct vnodeop_desc vop_pageout_desc; +#define VOP_PAGEOUT(vp, pl, pl_offset, f_offset, size, cred, flags) _VOP_PAGEOUT(vp, pl, pl_offset, f_offset, size, cred, flags) +static __inline int _VOP_PAGEOUT(struct vnode *vp, upl_t pl, vm_offset_t pl_offset, off_t f_offset, size_t size, struct ucred *cred, int flags) +{ + struct vop_pageout_args a; + a.a_desc = VDESC(vop_pageout); + a.a_vp = vp; + a.a_pl = pl; + a.a_pl_offset = pl_offset; + a.a_f_offset = f_offset; + a.a_size = size; + a.a_cred = cred; + a.a_flags = flags; + return (VCALL(vp, VOFFSET(vop_pageout), &a)); +} + +struct vop_devblocksize_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + register_t *a_retval; +}; +extern struct vnodeop_desc vop_devblocksize_desc; +#define VOP_DEVBLOCKSIZE(vp, retval) _VOP_DEVBLOCKSIZE(vp, retval) +static __inline int _VOP_DEVBLOCKSIZE(struct vnode *vp, register_t *retval) +{ + struct vop_devblocksize_args a; + a.a_desc = VDESC(vop_devblocksize); + a.a_vp = vp; + a.a_retval = retval; + return (VCALL(vp, VOFFSET(vop_devblocksize), &a)); +} + +struct vop_searchfs_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + void *a_searchparams1; + void *a_searchparams2; + struct attrlist *a_searchattrs; + u_long a_maxmatches; + struct timeval *a_timelimit; + struct attrlist *a_returnattrs; + u_long *a_nummatches; + u_long a_scriptcode; + u_long a_options; + struct uio *a_uio; + struct searchstate *a_searchstate; +}; +extern struct vnodeop_desc vop_searchfs_desc; +#define VOP_SEARCHFS(vp, searchparams1, searchparams2, searchattrs, maxmatches, timelimit, returnattrs, nummatches, scriptcode, options, uio, searchstate) _VOP_SEARCHFS(vp, searchparams1, searchparams2, searchattrs, maxmatches, timelimit, returnattrs, nummatches, scriptcode, options, uio, searchstate) +static __inline int _VOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, u_long maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, u_long *nummatches, u_long scriptcode, u_long options, struct uio *uio, struct searchstate *searchstate) +{ + struct vop_searchfs_args a; + a.a_desc = VDESC(vop_searchfs); + a.a_vp = vp; + a.a_searchparams1 = searchparams1; + a.a_searchparams2 = searchparams2; + a.a_searchattrs = searchattrs; + a.a_maxmatches = maxmatches; + a.a_timelimit = timelimit; + a.a_returnattrs = returnattrs; + a.a_nummatches = nummatches; + a.a_scriptcode = scriptcode; + a.a_options = options; + a.a_uio = uio; + a.a_searchstate = searchstate; + return (VCALL(vp, VOFFSET(vop_searchfs), &a)); +} + +struct vop_copyfile_args { + struct vnodeop_desc *a_desc; + struct vnode *a_fvp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; + int a_mode; + int a_flags; +}; +extern struct vnodeop_desc vop_copyfile_desc; +#define VOP_COPYFILE(fvp, tdvp, tvp, tcnp, mode, flags) _VOP_COPYFILE(fvp, tdvp, tvp, tcnp, mode, flags) +static __inline int _VOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, int mode, int flags) +{ + struct vop_copyfile_args a; + a.a_desc = VDESC(vop_copyfile); + a.a_fvp = fvp; + a.a_tdvp = tdvp; + a.a_tvp = tvp; + a.a_tcnp = tcnp; + a.a_mode = mode; + a.a_flags = flags; + return (VCALL(fvp, VOFFSET(vop_copyfile), &a)); +} + +struct vop_blktooff_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; +}; +extern struct vnodeop_desc vop_blktooff_desc; +#define VOP_BLKTOOFF(vp, lblkno, offset) _VOP_BLKTOOFF(vp, lblkno, offset) +static __inline int _VOP_BLKTOOFF(struct vnode *vp, daddr_t lblkno, off_t *offset) +{ + struct vop_blktooff_args a; + a.a_desc = VDESC(vop_blktooff); + a.a_vp = vp; + a.a_lblkno = lblkno; + a.a_offset = offset; + return (VCALL(vp, VOFFSET(vop_blktooff), &a)); +} + +struct vop_offtoblk_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; +}; +extern struct vnodeop_desc vop_offtoblk_desc; +#define VOP_OFFTOBLK(vp, offset, lblkno) _VOP_OFFTOBLK(vp, offset, lblkno) +static __inline int _VOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr_t *lblkno) +{ + struct vop_offtoblk_args a; + a.a_desc = VDESC(vop_offtoblk); + a.a_vp = vp; + a.a_offset = offset; + a.a_lblkno = lblkno; + return (VCALL(vp, VOFFSET(vop_offtoblk), &a)); +} + +struct vop_cmap_args { + struct vnodeop_desc *a_desc; + struct vnode *a_vp; + off_t a_foffset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; +}; +extern struct vnodeop_desc vop_cmap_desc; +#define VOP_CMAP(vp, foffset, size, bpn, run, poff) _VOP_CMAP(vp, foffset, size, bpn, run, poff) +static __inline int _VOP_CMAP(struct vnode *vp, off_t foffset, size_t size, daddr_t *bpn, size_t *run, void *poff) +{ + struct vop_cmap_args a; + a.a_desc = VDESC(vop_cmap); + a.a_vp = vp; + a.a_foffset = foffset; + a.a_size = size; + a.a_bpn = bpn; + a.a_run = run; + a.a_poff = poff; + return (VCALL(vp, VOFFSET(vop_cmap), &a)); +} + +/* Special cases: */ +#include +#include + +struct vop_strategy_args { + struct vnodeop_desc *a_desc; + struct buf *a_bp; +}; +extern struct vnodeop_desc vop_strategy_desc; +#define VOP_STRATEGY(bp) _VOP_STRATEGY(bp) +static __inline int _VOP_STRATEGY(struct buf *bp) +{ + struct vop_strategy_args a; + a.a_desc = VDESC(vop_strategy); + a.a_bp = bp; + return (VCALL(bp->b_vp, VOFFSET(vop_strategy), &a)); +} + +struct vop_bwrite_args { + struct vnodeop_desc *a_desc; + struct buf *a_bp; +}; +extern struct vnodeop_desc vop_bwrite_desc; +#define VOP_BWRITE(bp) _VOP_BWRITE(bp) +static __inline int _VOP_BWRITE(struct buf *bp) +{ + struct vop_bwrite_args a; + a.a_desc = VDESC(vop_bwrite); + a.a_bp = bp; + return (VCALL(bp->b_vp, VOFFSET(vop_bwrite), &a)); +} + +/* End of special cases. */ + +#endif /* !_VNODE_IF_H_ */ diff --git a/bsd/sys/vstat.h b/bsd/sys/vstat.h new file mode 100644 index 000000000..0d8e745a3 --- /dev/null +++ b/bsd/sys/vstat.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1998 Apple Computer, Inc. All Rights Reserved */ +/*- + * @(#)vstat.h + */ +/* HISTORY + * 01-27-98 chw at Apple Computer, Inc. + * Created. + */ + +#ifndef _SYS_VSTAT_H_ +#define _SYS_VSTAT_H_ + +#include +#include + +#ifndef _POSIX_SOURCE + +struct vstat { + fsid_t vst_volid; /* volume identifier */ + fsobj_id_t vst_nodeid; /* object's id */ + fsobj_type_t vst_vnodetype; /* vnode type (VREG, VCPLX, VDIR, etc.) */ + fsobj_tag_t vst_vnodetag; /* vnode tag (HFS, UFS, etc.) */ + mode_t vst_mode; /* inode protection mode */ + nlink_t vst_nlink; /* number of hard links */ + uid_t vst_uid; /* user ID of the file's owner */ + gid_t vst_gid; /* group ID of the file's group */ + dev_t vst_dev; /* inode's device */ + dev_t vst_rdev; /* device type */ +#ifndef _POSIX_SOURCE + struct timespec vst_atimespec; /* time of last access */ + struct timespec vst_mtimespec; /* time of last data modification */ + struct timespec vst_ctimespec; /* time of last file status change */ +#else + time_t vst_atime; /* time of last access */ + long vst_atimensec; /* nsec of last access */ + time_t vst_mtime; /* time of last data modification */ + long vst_mtimensec; /* nsec of last data modification */ + time_t vst_ctime; /* time of last file status change */ + long vst_ctimensec; /* nsec of last file status change */ +#endif + off_t vst_filesize; /* file size, in bytes */ + quad_t vst_blocks; /* bytes allocated for file */ + u_int32_t vst_blksize; /* optimal blocksize for I/O */ + u_int32_t vst_flags; /* user defined flags for file */ +}; + +#endif /* ! _POSIX_SOURCE */ + +#endif /* !_SYS_VSTAT_H_ */ diff --git a/bsd/sys/wait.h b/bsd/sys/wait.h new file mode 100644 index 000000000..76bf41a3b --- /dev/null +++ b/bsd/sys/wait.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)wait.h 8.2 (Berkeley) 7/10/94 + */ + +#ifndef _SYS_WAIT_H_ +#define _SYS_WAIT_H_ + +/* + * This file holds definitions relevent to the wait4 system call + * and the alternate interfaces that use it (wait, wait3, waitpid). + */ + +/* + * Macros to test the exit status returned by wait + * and extract the relevant values. + */ +#ifdef _POSIX_SOURCE +#define _W_INT(i) (i) +#else +#define _W_INT(w) (*(int *)&(w)) /* convert union wait to int */ +#define WCOREFLAG 0200 + +#endif /* _POSIX_SOURCE */ + +#define _WSTATUS(x) (_W_INT(x) & 0177) +#define _WSTOPPED 0177 /* _WSTATUS if process is stopped */ +#define WIFSTOPPED(x) (_WSTATUS(x) == _WSTOPPED) +#define WSTOPSIG(x) (_W_INT(x) >> 8) +#define WIFSIGNALED(x) (_WSTATUS(x) != _WSTOPPED && _WSTATUS(x) != 0) +#define WTERMSIG(x) (_WSTATUS(x)) +#define WIFEXITED(x) (_WSTATUS(x) == 0) +#define WEXITSTATUS(x) (_W_INT(x) >> 8) +#if !defined(_POSIX_SOURCE) +#define WCOREDUMP(x) (_W_INT(x) & WCOREFLAG) + +#define W_EXITCODE(ret, sig) ((ret) << 8 | (sig)) +#define W_STOPCODE(sig) ((sig) << 8 | _WSTOPPED) +#endif /* !defined(_POSIX_SOURCE) */ + +/* + * Option bits for the third argument of wait4. WNOHANG causes the + * wait to not hang if there are no stopped or terminated processes, rather + * returning an error indication in this case (pid==0). WUNTRACED + * indicates that the caller should receive status about untraced children + * which stop due to signals. If children are stopped and a wait without + * this option is done, it is as though they were still running... nothing + * about them is returned. + */ +#define WNOHANG 1 /* don't hang in wait */ +#define WUNTRACED 2 /* tell about stopped, untraced children */ + +#if !defined(_POSIX_SOURCE) +/* POSIX extensions and 4.2/4.3 compatability: */ + +/* + * Tokens for special values of the "pid" parameter to wait4. + */ +#define WAIT_ANY (-1) /* any process */ +#define WAIT_MYPGRP 0 /* any process in my process group */ + +#include + +/* + * Deprecated: + * Structure of the information in the status word returned by wait4. + * If w_stopval==WSTOPPED, then the second structure describes + * the information returned, else the first. + */ +union wait { + int w_status; /* used in syscall */ + /* + * Terminated process status. + */ + struct { +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned int w_Termsig:7, /* termination signal */ + w_Coredump:1, /* core dump indicator */ + w_Retcode:8, /* exit code if w_termsig==0 */ + w_Filler:16; /* upper bits filler */ +#endif +#if BYTE_ORDER == BIG_ENDIAN + unsigned int w_Filler:16, /* upper bits filler */ + w_Retcode:8, /* exit code if w_termsig==0 */ + w_Coredump:1, /* core dump indicator */ + w_Termsig:7; /* termination signal */ +#endif + } w_T; + /* + * Stopped process status. Returned + * only for traced children unless requested + * with the WUNTRACED option bit. + */ + struct { +#if BYTE_ORDER == LITTLE_ENDIAN + unsigned int w_Stopval:8, /* == W_STOPPED if stopped */ + w_Stopsig:8, /* signal that stopped us */ + w_Filler:16; /* upper bits filler */ +#endif +#if BYTE_ORDER == BIG_ENDIAN + unsigned int w_Filler:16, /* upper bits filler */ + w_Stopsig:8, /* signal that stopped us */ + w_Stopval:8; /* == W_STOPPED if stopped */ +#endif + } w_S; +}; +#define w_termsig w_T.w_Termsig +#define w_coredump w_T.w_Coredump +#define w_retcode w_T.w_Retcode +#define w_stopval w_S.w_Stopval +#define w_stopsig w_S.w_Stopsig + +#define WSTOPPED _WSTOPPED +#endif /* !defined(_POSIX_SOURCE) */ + +#ifndef KERNEL +#include +#include + +__BEGIN_DECLS +struct rusage; /* forward declaration */ + +pid_t wait __P((int *)); +pid_t waitpid __P((pid_t, int *, int)); +#if !defined(_POSIX_SOURCE) +pid_t wait3 __P((int *, int, struct rusage *)); +pid_t wait4 __P((pid_t, int *, int, struct rusage *)); +#endif /* !defined(_POSIX_SOURCE) */ +__END_DECLS +#endif +#endif /* !_SYS_WAIT_H_ */ diff --git a/bsd/ufs/Makefile b/bsd/ufs/Makefile new file mode 100644 index 000000000..e0bf3fa37 --- /dev/null +++ b/bsd/ufs/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + ffs \ + ufs + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + ffs \ + ufs + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +SETUP_SUBDIRS = \ + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/ufs/ffs/Makefile b/bsd/ufs/ffs/Makefile new file mode 100644 index 000000000..02f68adee --- /dev/null +++ b/bsd/ufs/ffs/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + ffs_extern.h fs.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = ufs/ffs + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = ufs/ffs + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/ufs/ffs/ffs_alloc.c b/bsd/ufs/ffs/ffs_alloc.c new file mode 100644 index 000000000..c6e462097 --- /dev/null +++ b/bsd/ufs/ffs/ffs_alloc.c @@ -0,0 +1,1648 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +extern u_long nextgennumber; + +static ufs_daddr_t ffs_alloccg __P((struct inode *, int, ufs_daddr_t, int)); +static ufs_daddr_t ffs_alloccgblk __P((struct fs *, struct cg *, ufs_daddr_t)); +static ufs_daddr_t ffs_clusteralloc __P((struct inode *, int, ufs_daddr_t, + int)); +static ino_t ffs_dirpref __P((struct fs *)); +static ufs_daddr_t ffs_fragextend __P((struct inode *, int, long, int, int)); +static void ffs_fserr __P((struct fs *, u_int, char *)); +static u_long ffs_hashalloc + __P((struct inode *, int, long, int, u_int32_t (*)())); +static ino_t ffs_nodealloccg __P((struct inode *, int, ufs_daddr_t, int)); +static ufs_daddr_t ffs_mapsearch __P((struct fs *, struct cg *, ufs_daddr_t, + int)); + +/* + * Allocate a block in the file system. + * + * The size of the requested block is given, which must be some + * multiple of fs_fsize and <= fs_bsize. + * A preference may be optionally specified. If a preference is given + * the following hierarchy is used to allocate a block: + * 1) allocate the requested block. + * 2) allocate a rotationally optimal block in the same cylinder. + * 3) allocate a block in the same cylinder group. + * 4) quadradically rehash into other cylinder groups, until an + * available block is located. + * If no block preference is given the following heirarchy is used + * to allocate a block: + * 1) allocate a block in the cylinder group that contains the + * inode for the file. + * 2) quadradically rehash into other cylinder groups, until an + * available block is located. + */ +ffs_alloc(ip, lbn, bpref, size, cred, bnp) + register struct inode *ip; + ufs_daddr_t lbn, bpref; + int size; + struct ucred *cred; + ufs_daddr_t *bnp; +{ + register struct fs *fs; + ufs_daddr_t bno; + int cg, error; + int devBlockSize=0; + *bnp = 0; + fs = ip->i_fs; +#if DIAGNOSTIC + if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { + printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", + ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); + panic("ffs_alloc: bad size"); + } + if (cred == NOCRED) + panic("ffs_alloc: missing credential\n"); +#endif /* DIAGNOSTIC */ + if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) + goto nospace; + if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) + goto nospace; + VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize); +#if QUOTA + if (error = chkdq(ip, (long)btodb(size, devBlockSize), cred, 0)) + return (error); +#endif /* QUOTA */ + if (bpref >= fs->fs_size) + bpref = 0; + if (bpref == 0) + cg = ino_to_cg(fs, ip->i_number); + else + cg = dtog(fs, bpref); + bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, size, + (u_int32_t (*)())ffs_alloccg); + if (bno > 0) { + ip->i_blocks += btodb(size, devBlockSize); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + *bnp = bno; + return (0); + } +#if QUOTA + /* + * Restore user's disk quota because allocation failed. + */ + (void) chkdq(ip, (long)-btodb(size, devBlockSize), cred, FORCE); +#endif /* QUOTA */ +nospace: + ffs_fserr(fs, cred->cr_uid, "file system full"); + uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); + return (ENOSPC); +} + +/* + * Reallocate a fragment to a bigger size + * + * The number and size of the old block is given, and a preference + * and new size is also specified. The allocator attempts to extend + * the original block. Failing that, the regular block allocator is + * invoked to get an appropriate block. + */ +ffs_realloccg(ip, lbprev, bpref, osize, nsize, cred, bpp) + register struct inode *ip; + ufs_daddr_t lbprev; + ufs_daddr_t bpref; + int osize, nsize; + struct ucred *cred; + struct buf **bpp; +{ + register struct fs *fs; + struct buf *bp; + int cg, request, error; + ufs_daddr_t bprev, bno; + int devBlockSize=0; + + *bpp = 0; + fs = ip->i_fs; +#if DIAGNOSTIC + if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || + (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { + printf( + "dev = 0x%x, bsize = %d, osize = %d, nsize = %d, fs = %s\n", + ip->i_dev, fs->fs_bsize, osize, nsize, fs->fs_fsmnt); + panic("ffs_realloccg: bad size"); + } + if (cred == NOCRED) + panic("ffs_realloccg: missing credential\n"); +#endif /* DIAGNOSTIC */ + if (cred->cr_uid != 0 && freespace(fs, fs->fs_minfree) <= 0) + goto nospace; + if ((bprev = ip->i_db[lbprev]) == 0) { + printf("dev = 0x%x, bsize = %d, bprev = %d, fs = %s\n", + ip->i_dev, fs->fs_bsize, bprev, fs->fs_fsmnt); + panic("ffs_realloccg: bad bprev"); + } + /* + * Allocate the extra space in the buffer. + */ + if (error = bread(ITOV(ip), lbprev, osize, NOCRED, &bp)) { + brelse(bp); + return (error); + } + VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize); + +#if QUOTA + if (error = chkdq(ip, (long)btodb(nsize - osize, devBlockSize), cred, 0)) + { + brelse(bp); + return (error); + } +#endif /* QUOTA */ + /* + * Check for extension in the existing location. + */ + cg = dtog(fs, bprev); + if (bno = ffs_fragextend(ip, cg, (long)bprev, osize, nsize)) { + if (bp->b_blkno != fsbtodb(fs, bno)) + panic("bad blockno"); + ip->i_blocks += btodb(nsize - osize, devBlockSize); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + allocbuf(bp, nsize); + bp->b_flags |= B_DONE; + bzero((char *)bp->b_data + osize, (u_int)nsize - osize); + *bpp = bp; + return (0); + } + /* + * Allocate a new disk location. + */ + if (bpref >= fs->fs_size) + bpref = 0; + switch ((int)fs->fs_optim) { + case FS_OPTSPACE: + /* + * Allocate an exact sized fragment. Although this makes + * best use of space, we will waste time relocating it if + * the file continues to grow. If the fragmentation is + * less than half of the minimum free reserve, we choose + * to begin optimizing for time. + */ + request = nsize; + if (fs->fs_minfree < 5 || + fs->fs_cstotal.cs_nffree > + fs->fs_dsize * fs->fs_minfree / (2 * 100)) + break; + log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", + fs->fs_fsmnt); + fs->fs_optim = FS_OPTTIME; + break; + case FS_OPTTIME: + /* + * At this point we have discovered a file that is trying to + * grow a small fragment to a larger fragment. To save time, + * we allocate a full sized block, then free the unused portion. + * If the file continues to grow, the `ffs_fragextend' call + * above will be able to grow it in place without further + * copying. If aberrant programs cause disk fragmentation to + * grow within 2% of the free reserve, we choose to begin + * optimizing for space. + */ + request = fs->fs_bsize; + if (fs->fs_cstotal.cs_nffree < + fs->fs_dsize * (fs->fs_minfree - 2) / 100) + break; + log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", + fs->fs_fsmnt); + fs->fs_optim = FS_OPTSPACE; + break; + default: + printf("dev = 0x%x, optim = %d, fs = %s\n", + ip->i_dev, fs->fs_optim, fs->fs_fsmnt); + panic("ffs_realloccg: bad optim"); + /* NOTREACHED */ + } + bno = (ufs_daddr_t)ffs_hashalloc(ip, cg, (long)bpref, request, + (u_int32_t (*)())ffs_alloccg); + if (bno > 0) { + bp->b_blkno = fsbtodb(fs, bno); + ffs_blkfree(ip, bprev, (long)osize); + if (nsize < request) + ffs_blkfree(ip, bno + numfrags(fs, nsize), + (long)(request - nsize)); + ip->i_blocks += btodb(nsize - osize, devBlockSize); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + allocbuf(bp, nsize); + bp->b_flags |= B_DONE; + bzero((char *)bp->b_data + osize, (u_int)nsize - osize); + *bpp = bp; + return (0); + } +#if QUOTA + /* + * Restore user's disk quota because allocation failed. + */ + (void) chkdq(ip, (long)-btodb(nsize - osize, devBlockSize), cred, FORCE); +#endif /* QUOTA */ + brelse(bp); +nospace: + /* + * no space available + */ + ffs_fserr(fs, cred->cr_uid, "file system full"); + uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); + return (ENOSPC); +} + +/* + * Reallocate a sequence of blocks into a contiguous sequence of blocks. + * + * The vnode and an array of buffer pointers for a range of sequential + * logical blocks to be made contiguous is given. The allocator attempts + * to find a range of sequential blocks starting as close as possible to + * an fs_rotdelay offset from the end of the allocation for the logical + * block immediately preceeding the current range. If successful, the + * physical block numbers in the buffer pointers and in the inode are + * changed to reflect the new allocation. If unsuccessful, the allocation + * is left unchanged. The success in doing the reallocation is returned. + * Note that the error return is not reflected back to the user. Rather + * the previous block allocation will be used. + */ +/* Note: This routine is unused in UBC cluster I/O */ + +int doasyncfree = 1; +int doreallocblks = 1; + +int +ffs_reallocblks(ap) + struct vop_reallocblks_args *ap; +{ + return (ENOSPC); +} + +/* + * Allocate an inode in the file system. + * + * If allocating a directory, use ffs_dirpref to select the inode. + * If allocating in a directory, the following hierarchy is followed: + * 1) allocate the preferred inode. + * 2) allocate an inode in the same cylinder group. + * 3) quadradically rehash into other cylinder groups, until an + * available inode is located. + * If no inode preference is given the following heirarchy is used + * to allocate an inode: + * 1) allocate an inode in cylinder group 0. + * 2) quadradically rehash into other cylinder groups, until an + * available inode is located. + */ +int +ffs_valloc(ap) + struct vop_valloc_args /* { + struct vnode *a_pvp; + int a_mode; + struct ucred *a_cred; + struct vnode **a_vpp; + } */ *ap; +{ + register struct vnode *pvp = ap->a_pvp; + register struct inode *pip; + register struct fs *fs; + register struct inode *ip; + mode_t mode = ap->a_mode; + ino_t ino, ipref; + int cg, error; + + *ap->a_vpp = NULL; + pip = VTOI(pvp); + fs = pip->i_fs; + if (fs->fs_cstotal.cs_nifree == 0) + goto noinodes; + + if ((mode & IFMT) == IFDIR) + ipref = ffs_dirpref(fs); + else + ipref = pip->i_number; + if (ipref >= fs->fs_ncg * fs->fs_ipg) + ipref = 0; + cg = ino_to_cg(fs, ipref); + ino = (ino_t)ffs_hashalloc(pip, cg, (long)ipref, mode, ffs_nodealloccg); + if (ino == 0) + goto noinodes; + error = VFS_VGET(pvp->v_mount, ino, ap->a_vpp); + if (error) { + VOP_VFREE(pvp, ino, mode); + return (error); + } + ip = VTOI(*ap->a_vpp); + if (ip->i_mode) { + printf("mode = 0%o, inum = %d, fs = %s\n", + ip->i_mode, ip->i_number, fs->fs_fsmnt); + panic("ffs_valloc: dup alloc"); + } + if (ip->i_blocks) { /* XXX */ + printf("free inode %s/%d had %d blocks\n", + fs->fs_fsmnt, ino, ip->i_blocks); + ip->i_blocks = 0; + } + ip->i_flags = 0; + /* + * Set up a new generation number for this inode. + */ + if (++nextgennumber < (u_long)time.tv_sec) + nextgennumber = time.tv_sec; + ip->i_gen = nextgennumber; + return (0); +noinodes: + ffs_fserr(fs, ap->a_cred->cr_uid, "out of inodes"); + uprintf("\n%s: create/symlink failed, no inodes free\n", fs->fs_fsmnt); + return (ENOSPC); +} + +/* + * Find a cylinder to place a directory. + * + * The policy implemented by this algorithm is to select from + * among those cylinder groups with above the average number of + * free inodes, the one with the smallest number of directories. + */ +static ino_t +ffs_dirpref(fs) + register struct fs *fs; +{ + int cg, minndir, mincg, avgifree; + + avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; + minndir = fs->fs_ipg; + mincg = 0; + for (cg = 0; cg < fs->fs_ncg; cg++) + if (fs->fs_cs(fs, cg).cs_ndir < minndir && + fs->fs_cs(fs, cg).cs_nifree >= avgifree) { + mincg = cg; + minndir = fs->fs_cs(fs, cg).cs_ndir; + } + return ((ino_t)(fs->fs_ipg * mincg)); +} + +/* + * Select the desired position for the next block in a file. The file is + * logically divided into sections. The first section is composed of the + * direct blocks. Each additional section contains fs_maxbpg blocks. + * + * If no blocks have been allocated in the first section, the policy is to + * request a block in the same cylinder group as the inode that describes + * the file. If no blocks have been allocated in any other section, the + * policy is to place the section in a cylinder group with a greater than + * average number of free blocks. An appropriate cylinder group is found + * by using a rotor that sweeps the cylinder groups. When a new group of + * blocks is needed, the sweep begins in the cylinder group following the + * cylinder group from which the previous allocation was made. The sweep + * continues until a cylinder group with greater than the average number + * of free blocks is found. If the allocation is for the first block in an + * indirect block, the information on the previous allocation is unavailable; + * here a best guess is made based upon the logical block number being + * allocated. + * + * If a section is already partially allocated, the policy is to + * contiguously allocate fs_maxcontig blocks. The end of one of these + * contiguous blocks and the beginning of the next is physically separated + * so that the disk head will be in transit between them for at least + * fs_rotdelay milliseconds. This is to allow time for the processor to + * schedule another I/O transfer. + */ +ufs_daddr_t +ffs_blkpref(ip, lbn, indx, bap) + struct inode *ip; + ufs_daddr_t lbn; + int indx; + ufs_daddr_t *bap; +{ + register struct fs *fs; + register int cg; + int avgbfree, startcg; + ufs_daddr_t nextblk; +#if REV_ENDIAN_FS + daddr_t prev=0; + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + fs = ip->i_fs; +#if REV_ENDIAN_FS + if (indx && bap) { + if (rev_endian) { + if (bap != &ip->i_db[0]) + prev = NXSwapLong(bap[indx - 1]); + else + prev = bap[indx - 1]; + } else prev = bap[indx - 1]; + } + if (indx % fs->fs_maxbpg == 0 || prev == 0) +#else /* REV_ENDIAN_FS */ + if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) +#endif /* REV_ENDIAN_FS */ + { + if (lbn < NDADDR) { + cg = ino_to_cg(fs, ip->i_number); + return (fs->fs_fpg * cg + fs->fs_frag); + } + /* + * Find a cylinder with greater than average number of + * unused data blocks. + */ +#if REV_ENDIAN_FS + if (indx == 0 || prev == 0) +#else /* REV_ENDIAN_FS */ + if (indx == 0 || bap[indx - 1] == 0) +#endif /* REV_ENDIAN_FS */ + startcg = + ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg; + else +#if REV_ENDIAN_FS + startcg = dtog(fs, prev) + 1; +#else /* REV_ENDIAN_FS */ + startcg = dtog(fs, bap[indx - 1]) + 1; +#endif /* REV_ENDIAN_FS */ + startcg %= fs->fs_ncg; + avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; + for (cg = startcg; cg < fs->fs_ncg; cg++) + if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { + fs->fs_cgrotor = cg; + return (fs->fs_fpg * cg + fs->fs_frag); + } + for (cg = 0; cg <= startcg; cg++) + if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { + fs->fs_cgrotor = cg; + return (fs->fs_fpg * cg + fs->fs_frag); + } + return (NULL); + } + /* + * One or more previous blocks have been laid out. If less + * than fs_maxcontig previous blocks are contiguous, the + * next block is requested contiguously, otherwise it is + * requested rotationally delayed by fs_rotdelay milliseconds. + */ +#if REV_ENDIAN_FS + if (rev_endian) { + nextblk = prev + fs->fs_frag; + if (indx < fs->fs_maxcontig) { + return (nextblk); + } + if (bap != &ip->i_db[0]) + prev = NXSwapLong(bap[indx - fs->fs_maxcontig]); + else + prev = bap[indx - fs->fs_maxcontig]; + if (prev + blkstofrags(fs, fs->fs_maxcontig) != nextblk) + return (nextblk); + } else { +#endif /* REV_ENDIAN_FS */ + nextblk = bap[indx - 1] + fs->fs_frag; + if (indx < fs->fs_maxcontig || bap[indx - fs->fs_maxcontig] + + blkstofrags(fs, fs->fs_maxcontig) != nextblk) + return (nextblk); +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + if (fs->fs_rotdelay != 0) + /* + * Here we convert ms of delay to frags as: + * (frags) = (ms) * (rev/sec) * (sect/rev) / + * ((sect/frag) * (ms/sec)) + * then round up to the next block. + */ + nextblk += roundup(fs->fs_rotdelay * fs->fs_rps * fs->fs_nsect / + (NSPF(fs) * 1000), fs->fs_frag); + return (nextblk); +} + +/* + * Implement the cylinder overflow algorithm. + * + * The policy implemented by this algorithm is: + * 1) allocate the block in its requested cylinder group. + * 2) quadradically rehash on the cylinder group number. + * 3) brute force search for a free block. + */ +/*VARARGS5*/ +static u_long +ffs_hashalloc(ip, cg, pref, size, allocator) + struct inode *ip; + int cg; + long pref; + int size; /* size for data blocks, mode for inodes */ + u_int32_t (*allocator)(); +{ + register struct fs *fs; + long result; + int i, icg = cg; + + fs = ip->i_fs; + /* + * 1: preferred cylinder group + */ + result = (*allocator)(ip, cg, pref, size); + if (result) + return (result); + /* + * 2: quadratic rehash + */ + for (i = 1; i < fs->fs_ncg; i *= 2) { + cg += i; + if (cg >= fs->fs_ncg) + cg -= fs->fs_ncg; + result = (*allocator)(ip, cg, 0, size); + if (result) + return (result); + } + /* + * 3: brute force search + * Note that we start at i == 2, since 0 was checked initially, + * and 1 is always checked in the quadratic rehash. + */ + cg = (icg + 2) % fs->fs_ncg; + for (i = 2; i < fs->fs_ncg; i++) { + result = (*allocator)(ip, cg, 0, size); + if (result) + return (result); + cg++; + if (cg == fs->fs_ncg) + cg = 0; + } + return (NULL); +} + +/* + * Determine whether a fragment can be extended. + * + * Check to see if the necessary fragments are available, and + * if they are, allocate them. + */ +static ufs_daddr_t +ffs_fragextend(ip, cg, bprev, osize, nsize) + struct inode *ip; + int cg; + long bprev; + int osize, nsize; +{ + register struct fs *fs; + register struct cg *cgp; + struct buf *bp; + long bno; + int frags, bbase; + int i, error; +#if REV_ENDIAN_FS + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + fs = ip->i_fs; + if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) + return (NULL); + frags = numfrags(fs, nsize); /* number of fragments needed */ + bbase = fragnum(fs, bprev); /* offset in a frag (it is mod fragsize */ + if (bbase > fragnum(fs, (bprev + frags - 1))) { + /* cannot extend across a block boundary */ + return (NULL); + } + /* read corresponding cylinder group info */ + error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), + (int)fs->fs_cgsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (NULL); + } + cgp = (struct cg *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) { + byte_swap_cgin(cgp, fs); + } +#endif /* REV_ENDIAN_FS */ + + if (!cg_chkmagic(cgp)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (NULL); + } + cgp->cg_time = time.tv_sec; + bno = dtogd(fs, bprev); + for (i = numfrags(fs, osize); i < frags; i++) + if (isclr(cg_blksfree(cgp), bno + i)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (NULL); + } + /* + * the current fragment can be extended + * deduct the count on fragment being extended into + * increase the count on the remaining fragment (if any) + * allocate the extended piece + */ + for (i = frags; i < fs->fs_frag - bbase; i++) + if (isclr(cg_blksfree(cgp), bno + i)) + break; + cgp->cg_frsum[i - numfrags(fs, osize)]--; + if (i != frags) + cgp->cg_frsum[i - frags]++; + for (i = numfrags(fs, osize); i < frags; i++) { + clrbit(cg_blksfree(cgp), bno + i); + cgp->cg_cs.cs_nffree--; + fs->fs_cstotal.cs_nffree--; + fs->fs_cs(fs, cg).cs_nffree--; + } + fs->fs_fmod = 1; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); + return (bprev); +} + +/* + * Determine whether a block can be allocated. + * + * Check to see if a block of the appropriate size is available, + * and if it is, allocate it. + */ +static ufs_daddr_t +ffs_alloccg(ip, cg, bpref, size) + struct inode *ip; + int cg; + ufs_daddr_t bpref; + int size; +{ + register struct fs *fs; + register struct cg *cgp; + struct buf *bp; + register int i; + int error, bno, frags, allocsiz; +#if REV_ENDIAN_FS + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + fs = ip->i_fs; + if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) + return (NULL); + error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), + (int)fs->fs_cgsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (NULL); + } + cgp = (struct cg *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgin(cgp,fs); +#endif /* REV_ENDIAN_FS */ + if (!cg_chkmagic(cgp) || + (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (NULL); + } + cgp->cg_time = time.tv_sec; + if (size == fs->fs_bsize) { + bno = ffs_alloccgblk(fs, cgp, bpref); +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); + return (bno); + } + /* + * check to see if any fragments are already available + * allocsiz is the size which will be allocated, hacking + * it down to a smaller size if necessary + */ + frags = numfrags(fs, size); + for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) + if (cgp->cg_frsum[allocsiz] != 0) + break; + if (allocsiz == fs->fs_frag) { + /* + * no fragments were available, so a block will be + * allocated, and hacked up + */ + if (cgp->cg_cs.cs_nbfree == 0) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (NULL); + } + bno = ffs_alloccgblk(fs, cgp, bpref); + bpref = dtogd(fs, bno); + for (i = frags; i < fs->fs_frag; i++) + setbit(cg_blksfree(cgp), bpref + i); + i = fs->fs_frag - frags; + cgp->cg_cs.cs_nffree += i; + fs->fs_cstotal.cs_nffree += i; + fs->fs_cs(fs, cg).cs_nffree += i; + fs->fs_fmod = 1; + cgp->cg_frsum[i]++; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); + return (bno); + } + bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); + if (bno < 0) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (NULL); + } + for (i = 0; i < frags; i++) + clrbit(cg_blksfree(cgp), bno + i); + cgp->cg_cs.cs_nffree -= frags; + fs->fs_cstotal.cs_nffree -= frags; + fs->fs_cs(fs, cg).cs_nffree -= frags; + fs->fs_fmod = 1; + cgp->cg_frsum[allocsiz]--; + if (frags != allocsiz) + cgp->cg_frsum[allocsiz - frags]++; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); + return (cg * fs->fs_fpg + bno); +} + +/* + * Allocate a block in a cylinder group. + * + * This algorithm implements the following policy: + * 1) allocate the requested block. + * 2) allocate a rotationally optimal block in the same cylinder. + * 3) allocate the next available block on the block rotor for the + * specified cylinder group. + * Note that this routine only allocates fs_bsize blocks; these + * blocks may be fragmented by the routine that allocates them. + */ +static ufs_daddr_t +ffs_alloccgblk(fs, cgp, bpref) + register struct fs *fs; + register struct cg *cgp; + ufs_daddr_t bpref; +{ + ufs_daddr_t bno, blkno; + int cylno, pos, delta; + short *cylbp; + register int i; + + if (bpref == 0 || dtog(fs, bpref) != cgp->cg_cgx) { + bpref = cgp->cg_rotor; + goto norot; + } + bpref = blknum(fs, bpref); + bpref = dtogd(fs, bpref); + /* + * if the requested block is available, use it + */ + if (ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bpref))) { + bno = bpref; + goto gotit; + } + if (fs->fs_nrpos <= 1 || fs->fs_cpc == 0) { + /* + * Block layout information is not available. + * Leaving bpref unchanged means we take the + * next available free block following the one + * we just allocated. Hopefully this will at + * least hit a track cache on drives of unknown + * geometry (e.g. SCSI). + */ + goto norot; + } + /* + * check for a block available on the same cylinder + */ + cylno = cbtocylno(fs, bpref); + if (cg_blktot(cgp)[cylno] == 0) + goto norot; + /* + * check the summary information to see if a block is + * available in the requested cylinder starting at the + * requested rotational position and proceeding around. + */ + cylbp = cg_blks(fs, cgp, cylno); + pos = cbtorpos(fs, bpref); + for (i = pos; i < fs->fs_nrpos; i++) + if (cylbp[i] > 0) + break; + if (i == fs->fs_nrpos) + for (i = 0; i < pos; i++) + if (cylbp[i] > 0) + break; + if (cylbp[i] > 0) { + /* + * found a rotational position, now find the actual + * block. A panic if none is actually there. + */ + pos = cylno % fs->fs_cpc; + bno = (cylno - pos) * fs->fs_spc / NSPB(fs); + if (fs_postbl(fs, pos)[i] == -1) { + printf("pos = %d, i = %d, fs = %s\n", + pos, i, fs->fs_fsmnt); + panic("ffs_alloccgblk: cyl groups corrupted"); + } + for (i = fs_postbl(fs, pos)[i];; ) { + if (ffs_isblock(fs, cg_blksfree(cgp), bno + i)) { + bno = blkstofrags(fs, (bno + i)); + goto gotit; + } + delta = fs_rotbl(fs)[i]; + if (delta <= 0 || + delta + i > fragstoblks(fs, fs->fs_fpg)) + break; + i += delta; + } + printf("pos = %d, i = %d, fs = %s\n", pos, i, fs->fs_fsmnt); + panic("ffs_alloccgblk: can't find blk in cyl"); + } +norot: + /* + * no blocks in the requested cylinder, so take next + * available one in this cylinder group. + */ + bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); + if (bno < 0) + return (NULL); + cgp->cg_rotor = bno; +gotit: + blkno = fragstoblks(fs, bno); + ffs_clrblock(fs, cg_blksfree(cgp), (long)blkno); + ffs_clusteracct(fs, cgp, blkno, -1); + cgp->cg_cs.cs_nbfree--; + fs->fs_cstotal.cs_nbfree--; + fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; + cylno = cbtocylno(fs, bno); + cg_blks(fs, cgp, cylno)[cbtorpos(fs, bno)]--; + cg_blktot(cgp)[cylno]--; + fs->fs_fmod = 1; + return (cgp->cg_cgx * fs->fs_fpg + bno); +} + +/* + * Determine whether a cluster can be allocated. + * + * We do not currently check for optimal rotational layout if there + * are multiple choices in the same cylinder group. Instead we just + * take the first one that we find following bpref. + */ +static ufs_daddr_t +ffs_clusteralloc(ip, cg, bpref, len) + struct inode *ip; + int cg; + ufs_daddr_t bpref; + int len; +{ + register struct fs *fs; + register struct cg *cgp; + struct buf *bp; + int i, got, run, bno, bit, map; + u_char *mapp; + int32_t *lp; +#if REV_ENDIAN_FS + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + fs = ip->i_fs; + if (fs->fs_maxcluster[cg] < len) + return (NULL); + if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, + NOCRED, &bp)) + goto fail; + cgp = (struct cg *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgin(cgp,fs); +#endif /* REV_ENDIAN_FS */ + if (!cg_chkmagic(cgp)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + goto fail; + } + /* + * Check to see if a cluster of the needed size (or bigger) is + * available in this cylinder group. + */ + lp = &cg_clustersum(cgp)[len]; + for (i = len; i <= fs->fs_contigsumsize; i++) + if (*lp++ > 0) + break; + if (i > fs->fs_contigsumsize) { + /* + * This is the first time looking for a cluster in this + * cylinder group. Update the cluster summary information + * to reflect the true maximum sized cluster so that + * future cluster allocation requests can avoid reading + * the cylinder group map only to find no clusters. + */ + lp = &cg_clustersum(cgp)[len - 1]; + for (i = len - 1; i > 0; i--) + if (*lp-- > 0) + break; + fs->fs_maxcluster[cg] = i; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + goto fail; + } + /* + * Search the cluster map to find a big enough cluster. + * We take the first one that we find, even if it is larger + * than we need as we prefer to get one close to the previous + * block allocation. We do not search before the current + * preference point as we do not want to allocate a block + * that is allocated before the previous one (as we will + * then have to wait for another pass of the elevator + * algorithm before it will be read). We prefer to fail and + * be recalled to try an allocation in the next cylinder group. + */ + if (dtog(fs, bpref) != cg) + bpref = 0; + else + bpref = fragstoblks(fs, dtogd(fs, blknum(fs, bpref))); + mapp = &cg_clustersfree(cgp)[bpref / NBBY]; + map = *mapp++; + bit = 1 << (bpref % NBBY); + for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { + if ((map & bit) == 0) { + run = 0; + } else { + run++; + if (run == len) + break; + } + if ((got & (NBBY - 1)) != (NBBY - 1)) { + bit <<= 1; + } else { + map = *mapp++; + bit = 1; + } + } + if (got == cgp->cg_nclusterblks) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + goto fail; + } + /* + * Allocate the cluster that we have found. + */ + for (i = 1; i <= len; i++) + if (!ffs_isblock(fs, cg_blksfree(cgp), got - run + i)) + panic("ffs_clusteralloc: map mismatch"); + bno = cg * fs->fs_fpg + blkstofrags(fs, got - run + 1); + if (dtog(fs, bno) != cg) + panic("ffs_clusteralloc: allocated out of group"); + len = blkstofrags(fs, len); + for (i = 0; i < len; i += fs->fs_frag) + if ((got = ffs_alloccgblk(fs, cgp, bno + i)) != bno + i) + panic("ffs_clusteralloc: lost block"); +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); + return (bno); + +fail: + brelse(bp); + return (0); +} + +/* + * Determine whether an inode can be allocated. + * + * Check to see if an inode is available, and if it is, + * allocate it using the following policy: + * 1) allocate the requested inode. + * 2) allocate the next available inode after the requested + * inode in the specified cylinder group. + */ +static ino_t +ffs_nodealloccg(ip, cg, ipref, mode) + struct inode *ip; + int cg; + ufs_daddr_t ipref; + int mode; +{ + register struct fs *fs; + register struct cg *cgp; + struct buf *bp; + int error, start, len, loc, map, i; +#if REV_ENDIAN_FS + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + fs = ip->i_fs; + if (fs->fs_cs(fs, cg).cs_nifree == 0) + return (NULL); + error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), + (int)fs->fs_cgsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (NULL); + } + cgp = (struct cg *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgin(cgp,fs); +#endif /* REV_ENDIAN_FS */ + if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (NULL); + } + + cgp->cg_time = time.tv_sec; + if (ipref) { + ipref %= fs->fs_ipg; + if (isclr(cg_inosused(cgp), ipref)) + goto gotit; + } + start = cgp->cg_irotor / NBBY; + len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); + loc = skpc(0xff, len, &cg_inosused(cgp)[start]); + if (loc == 0) { + len = start + 1; + start = 0; + loc = skpc(0xff, len, &cg_inosused(cgp)[0]); + if (loc == 0) { + printf("cg = %d, irotor = %d, fs = %s\n", + cg, cgp->cg_irotor, fs->fs_fsmnt); + panic("ffs_nodealloccg: map corrupted"); + /* NOTREACHED */ + } + } + i = start + len - loc; + map = cg_inosused(cgp)[i]; + ipref = i * NBBY; + for (i = 1; i < (1 << NBBY); i <<= 1, ipref++) { + if ((map & i) == 0) { + cgp->cg_irotor = ipref; + goto gotit; + } + } + printf("fs = %s\n", fs->fs_fsmnt); + panic("ffs_nodealloccg: block not in map"); + /* NOTREACHED */ +gotit: + setbit(cg_inosused(cgp), ipref); + cgp->cg_cs.cs_nifree--; + fs->fs_cstotal.cs_nifree--; + fs->fs_cs(fs, cg).cs_nifree--; + fs->fs_fmod = 1; + if ((mode & IFMT) == IFDIR) { + cgp->cg_cs.cs_ndir++; + fs->fs_cstotal.cs_ndir++; + fs->fs_cs(fs, cg).cs_ndir++; + } +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); + return (cg * fs->fs_ipg + ipref); +} + +/* + * Free a block or fragment. + * + * The specified block or fragment is placed back in the + * free map. If a fragment is deallocated, a possible + * block reassembly is checked. + */ +ffs_blkfree(ip, bno, size) + register struct inode *ip; + ufs_daddr_t bno; + long size; +{ + register struct fs *fs; + register struct cg *cgp; + struct buf *bp; + ufs_daddr_t blkno; + int i, error, cg, blk, frags, bbase; +#if REV_ENDIAN_FS + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + fs = ip->i_fs; + if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { + printf("dev = 0x%x, bsize = %d, size = %d, fs = %s\n", + ip->i_dev, fs->fs_bsize, size, fs->fs_fsmnt); + panic("blkfree: bad size"); + } + cg = dtog(fs, bno); + if ((u_int)bno >= fs->fs_size) { + printf("bad block %d, ino %d\n", bno, ip->i_number); + ffs_fserr(fs, ip->i_uid, "bad block"); + return; + } + error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), + (int)fs->fs_cgsize, NOCRED, &bp); + if (error) { + brelse(bp); + return; + } + cgp = (struct cg *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgin(cgp,fs); +#endif /* REV_ENDIAN_FS */ + if (!cg_chkmagic(cgp)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return; + } + cgp->cg_time = time.tv_sec; + bno = dtogd(fs, bno); + if (size == fs->fs_bsize) { + blkno = fragstoblks(fs, bno); + if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { + printf("dev = 0x%x, block = %d, fs = %s\n", + ip->i_dev, bno, fs->fs_fsmnt); + panic("blkfree: freeing free block"); + } + ffs_setblock(fs, cg_blksfree(cgp), blkno); + ffs_clusteracct(fs, cgp, blkno, 1); + cgp->cg_cs.cs_nbfree++; + fs->fs_cstotal.cs_nbfree++; + fs->fs_cs(fs, cg).cs_nbfree++; + i = cbtocylno(fs, bno); + cg_blks(fs, cgp, i)[cbtorpos(fs, bno)]++; + cg_blktot(cgp)[i]++; + } else { + bbase = bno - fragnum(fs, bno); + /* + * decrement the counts associated with the old frags + */ + blk = blkmap(fs, cg_blksfree(cgp), bbase); + ffs_fragacct(fs, blk, cgp->cg_frsum, -1); + /* + * deallocate the fragment + */ + frags = numfrags(fs, size); + for (i = 0; i < frags; i++) { + if (isset(cg_blksfree(cgp), bno + i)) { + printf("dev = 0x%x, block = %d, fs = %s\n", + ip->i_dev, bno + i, fs->fs_fsmnt); + panic("blkfree: freeing free frag"); + } + setbit(cg_blksfree(cgp), bno + i); + } + cgp->cg_cs.cs_nffree += i; + fs->fs_cstotal.cs_nffree += i; + fs->fs_cs(fs, cg).cs_nffree += i; + /* + * add back in counts associated with the new frags + */ + blk = blkmap(fs, cg_blksfree(cgp), bbase); + ffs_fragacct(fs, blk, cgp->cg_frsum, 1); + /* + * if a complete block has been reassembled, account for it + */ + blkno = fragstoblks(fs, bbase); + if (ffs_isblock(fs, cg_blksfree(cgp), blkno)) { + cgp->cg_cs.cs_nffree -= fs->fs_frag; + fs->fs_cstotal.cs_nffree -= fs->fs_frag; + fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; + ffs_clusteracct(fs, cgp, blkno, 1); + cgp->cg_cs.cs_nbfree++; + fs->fs_cstotal.cs_nbfree++; + fs->fs_cs(fs, cg).cs_nbfree++; + i = cbtocylno(fs, bbase); + cg_blks(fs, cgp, i)[cbtorpos(fs, bbase)]++; + cg_blktot(cgp)[i]++; + } + } + fs->fs_fmod = 1; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); +} + +#if DIAGNOSTIC +/* + * Verify allocation of a block or fragment. Returns true if block or + * fragment is allocated, false if it is free. + */ +ffs_checkblk(ip, bno, size) + struct inode *ip; + ufs_daddr_t bno; + long size; +{ + struct fs *fs; + struct cg *cgp; + struct buf *bp; + int i, error, frags, free; +#if REV_ENDIAN_FS + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + fs = ip->i_fs; + if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { + printf("bsize = %d, size = %d, fs = %s\n", + fs->fs_bsize, size, fs->fs_fsmnt); + panic("checkblk: bad size"); + } + if ((u_int)bno >= fs->fs_size) + panic("checkblk: bad block %d", bno); + error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), + (int)fs->fs_cgsize, NOCRED, &bp); + if (error) { + brelse(bp); + return; + } + cgp = (struct cg *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgin(cgp,fs); +#endif /* REV_ENDIAN_FS */ + if (!cg_chkmagic(cgp)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return; + } + bno = dtogd(fs, bno); + if (size == fs->fs_bsize) { + free = ffs_isblock(fs, cg_blksfree(cgp), fragstoblks(fs, bno)); + } else { + frags = numfrags(fs, size); + for (free = 0, i = 0; i < frags; i++) + if (isset(cg_blksfree(cgp), bno + i)) + free++; + if (free != 0 && free != frags) + panic("checkblk: partially free fragment"); + } +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (!free); +} +#endif /* DIAGNOSTIC */ + +/* + * Free an inode. + * + * The specified inode is placed back in the free map. + */ +int +ffs_vfree(ap) + struct vop_vfree_args /* { + struct vnode *a_pvp; + ino_t a_ino; + int a_mode; + } */ *ap; +{ + register struct fs *fs; + register struct cg *cgp; + register struct inode *pip; + ino_t ino = ap->a_ino; + struct buf *bp; + int error, cg; +#if REV_ENDIAN_FS + struct vnode *vp=ap->a_pvp; + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + pip = VTOI(ap->a_pvp); + fs = pip->i_fs; + if ((u_int)ino >= fs->fs_ipg * fs->fs_ncg) + panic("ifree: range: dev = 0x%x, ino = %d, fs = %s\n", + pip->i_dev, ino, fs->fs_fsmnt); + cg = ino_to_cg(fs, ino); + error = bread(pip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), + (int)fs->fs_cgsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (0); + } + cgp = (struct cg *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgin(cgp,fs); +#endif /* REV_ENDIAN_FS */ + if (!cg_chkmagic(cgp)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + return (0); + } + cgp->cg_time = time.tv_sec; + ino %= fs->fs_ipg; + if (isclr(cg_inosused(cgp), ino)) { + printf("dev = 0x%x, ino = %d, fs = %s\n", + pip->i_dev, ino, fs->fs_fsmnt); + if (fs->fs_ronly == 0) + panic("ifree: freeing free inode"); + } + clrbit(cg_inosused(cgp), ino); + if (ino < cgp->cg_irotor) + cgp->cg_irotor = ino; + cgp->cg_cs.cs_nifree++; + fs->fs_cstotal.cs_nifree++; + fs->fs_cs(fs, cg).cs_nifree++; + if ((ap->a_mode & IFMT) == IFDIR) { + cgp->cg_cs.cs_ndir--; + fs->fs_cstotal.cs_ndir--; + fs->fs_cs(fs, cg).cs_ndir--; + } + fs->fs_fmod = 1; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + bdwrite(bp); + return (0); +} + +/* + * Find a block of the specified size in the specified cylinder group. + * + * It is a panic if a request is made to find a block if none are + * available. + */ +static ufs_daddr_t +ffs_mapsearch(fs, cgp, bpref, allocsiz) + register struct fs *fs; + register struct cg *cgp; + ufs_daddr_t bpref; + int allocsiz; +{ + ufs_daddr_t bno; + int start, len, loc, i; + int blk, field, subfield, pos; + + /* + * find the fragment by searching through the free block + * map for an appropriate bit pattern + */ + if (bpref) + start = dtogd(fs, bpref) / NBBY; + else + start = cgp->cg_frotor / NBBY; + len = howmany(fs->fs_fpg, NBBY) - start; + loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[start], + (u_char *)fragtbl[fs->fs_frag], + (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); + if (loc == 0) { + len = start + 1; + start = 0; + loc = scanc((u_int)len, (u_char *)&cg_blksfree(cgp)[0], + (u_char *)fragtbl[fs->fs_frag], + (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); + if (loc == 0) { + printf("start = %d, len = %d, fs = %s\n", + start, len, fs->fs_fsmnt); + panic("ffs_alloccg: map corrupted"); + /* NOTREACHED */ + } + } + bno = (start + len - loc) * NBBY; + cgp->cg_frotor = bno; + /* + * found the byte in the map + * sift through the bits to find the selected frag + */ + for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { + blk = blkmap(fs, cg_blksfree(cgp), bno); + blk <<= 1; + field = around[allocsiz]; + subfield = inside[allocsiz]; + for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { + if ((blk & field) == subfield) + return (bno + pos); + field <<= 1; + subfield <<= 1; + } + } + printf("bno = %d, fs = %s\n", bno, fs->fs_fsmnt); + panic("ffs_alloccg: block not in map"); + return (-1); +} + +/* + * Update the cluster map because of an allocation or free. + * + * Cnt == 1 means free; cnt == -1 means allocating. + */ +ffs_clusteracct(fs, cgp, blkno, cnt) + struct fs *fs; + struct cg *cgp; + ufs_daddr_t blkno; + int cnt; +{ + int32_t *sump; + int32_t *lp; + u_char *freemapp, *mapp; + int i, start, end, forw, back, map, bit; + + if (fs->fs_contigsumsize <= 0) + return; + freemapp = cg_clustersfree(cgp); + sump = cg_clustersum(cgp); + /* + * Allocate or clear the actual block. + */ + if (cnt > 0) + setbit(freemapp, blkno); + else + clrbit(freemapp, blkno); + /* + * Find the size of the cluster going forward. + */ + start = blkno + 1; + end = start + fs->fs_contigsumsize; + if (end >= cgp->cg_nclusterblks) + end = cgp->cg_nclusterblks; + mapp = &freemapp[start / NBBY]; + map = *mapp++; + bit = 1 << (start % NBBY); + for (i = start; i < end; i++) { + if ((map & bit) == 0) + break; + if ((i & (NBBY - 1)) != (NBBY - 1)) { + bit <<= 1; + } else { + map = *mapp++; + bit = 1; + } + } + forw = i - start; + /* + * Find the size of the cluster going backward. + */ + start = blkno - 1; + end = start - fs->fs_contigsumsize; + if (end < 0) + end = -1; + mapp = &freemapp[start / NBBY]; + map = *mapp--; + bit = 1 << (start % NBBY); + for (i = start; i > end; i--) { + if ((map & bit) == 0) + break; + if ((i & (NBBY - 1)) != 0) { + bit >>= 1; + } else { + map = *mapp--; + bit = 1 << (NBBY - 1); + } + } + back = start - i; + /* + * Account for old cluster and the possibly new forward and + * back clusters. + */ + i = back + forw + 1; + if (i > fs->fs_contigsumsize) + i = fs->fs_contigsumsize; + sump[i] += cnt; + if (back > 0) + sump[back] -= cnt; + if (forw > 0) + sump[forw] -= cnt; + /* + * Update cluster summary information. + */ + lp = &sump[fs->fs_contigsumsize]; + for (i = fs->fs_contigsumsize; i > 0; i--) + if (*lp-- > 0) + break; + fs->fs_maxcluster[cgp->cg_cgx] = i; +} + +/* + * Fserr prints the name of a file system with an error diagnostic. + * + * The form of the error message is: + * fs: error message + */ +static void +ffs_fserr(fs, uid, cp) + struct fs *fs; + u_int uid; + char *cp; +{ + + log(LOG_ERR, "uid %d on %s: %s\n", uid, fs->fs_fsmnt, cp); +} diff --git a/bsd/ufs/ffs/ffs_balloc.c b/bsd/ufs/ffs/ffs_balloc.c new file mode 100644 index 000000000..a6c61ac8c --- /dev/null +++ b/bsd/ufs/ffs/ffs_balloc.c @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_balloc.c 8.8 (Berkeley) 6/16/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#if REV_ENDIAN_FS +#include +#endif /* REV_ENDIAN_FS */ + +#include + +#include +#include +#include + +#include +#include + +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +/* + * Balloc defines the structure of file system storage + * by allocating the physical blocks on a device given + * the inode and the logical block number in a file. + */ +ffs_balloc(ip, lbn, size, cred, bpp, flags, blk_alloc) + register struct inode *ip; + register ufs_daddr_t lbn; + int size; + struct ucred *cred; + struct buf **bpp; + int flags; + int * blk_alloc; +{ + register struct fs *fs; + register ufs_daddr_t nb; + struct buf *bp, *nbp; + struct vnode *vp = ITOV(ip); + struct indir indirs[NIADDR + 2]; + ufs_daddr_t newb, *bap, pref; + int deallocated, osize, nsize, num, i, error; + ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1]; + int devBlockSize=0; + int alloc_buffer = 1; +#if REV_ENDIAN_FS + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + *bpp = NULL; + if (lbn < 0) + return (EFBIG); + fs = ip->i_fs; + if (flags & B_NOBUFF) + alloc_buffer = 0; + + if (blk_alloc) + *blk_alloc = 0; + + /* + * If the next write will extend the file into a new block, + * and the file is currently composed of a fragment + * this fragment has to be extended to be a full block. + */ + nb = lblkno(fs, ip->i_size); + if (nb < NDADDR && nb < lbn) { + /* the filesize prior to this write can fit in direct + * blocks (ie. fragmentaion is possibly done) + * we are now extending the file write beyond + * the block which has end of file prior to this write + */ + osize = blksize(fs, ip, nb); + /* osize gives disk allocated size in the last block. It is + * either in fragments or a file system block size */ + if (osize < fs->fs_bsize && osize > 0) { + /* few fragments are already allocated,since the + * current extends beyond this block + * allocate the complete block as fragments are only + * in last block + */ + error = ffs_realloccg(ip, nb, + ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]), + osize, (int)fs->fs_bsize, cred, &bp); + if (error) + return (error); + /* adjust the innode size we just grew */ + /* it is in nb+1 as nb starts from 0 */ + ip->i_size = (nb + 1) * fs->fs_bsize; + if (UBCISVALID(vp)) + ubc_setsize(vp, (off_t)ip->i_size); /* XXX check error */ + ip->i_db[nb] = dbtofsb(fs, bp->b_blkno); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + if ((flags & B_SYNC) || (!alloc_buffer)) { + if (!alloc_buffer) + SET(bp->b_flags, B_INVAL); + bwrite(bp); + } else + bawrite(bp); + /* note that bp is already released here */ + } + } + /* + * The first NDADDR blocks are direct blocks + */ + if (lbn < NDADDR) { + nb = ip->i_db[lbn]; + if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) { + if (alloc_buffer) { + error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + return (error); + } + *bpp = bp; + } + return (0); + } + if (nb != 0) { + /* + * Consider need to reallocate a fragment. + */ + osize = fragroundup(fs, blkoff(fs, ip->i_size)); + nsize = fragroundup(fs, size); + if (nsize <= osize) { + if (alloc_buffer) { + error = bread(vp, lbn, osize, NOCRED, &bp); + if (error) { + brelse(bp); + return (error); + } + ip->i_flag |= IN_CHANGE | IN_UPDATE; + *bpp = bp; + return (0); + } + else { + ip->i_flag |= IN_CHANGE | IN_UPDATE; + return (0); + } + } else { + error = ffs_realloccg(ip, lbn, + ffs_blkpref(ip, lbn, (int)lbn, + &ip->i_db[0]), osize, nsize, cred, &bp); + if (error) + return (error); + ip->i_db[lbn] = dbtofsb(fs, bp->b_blkno); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + if(!alloc_buffer) { + SET(bp->b_flags, B_INVAL); + bwrite(bp); + } else + *bpp = bp; + return (0); + + } + } else { + if (ip->i_size < (lbn + 1) * fs->fs_bsize) + nsize = fragroundup(fs, size); + else + nsize = fs->fs_bsize; + error = ffs_alloc(ip, lbn, + ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]), + nsize, cred, &newb); + if (error) + return (error); + if (alloc_buffer) { + bp = getblk(vp, lbn, nsize, 0, 0, BLK_WRITE); + bp->b_blkno = fsbtodb(fs, newb); + if (flags & B_CLRBUF) + clrbuf(bp); + } + ip->i_db[lbn] = newb; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + if (blk_alloc) { + *blk_alloc = nsize; + } + if (alloc_buffer) + *bpp = bp; + return (0); + } + } + /* + * Determine the number of levels of indirection. + */ + pref = 0; + if (error = ufs_getlbns(vp, lbn, indirs, &num)) + return(error); +#if DIAGNOSTIC + if (num < 1) + panic ("ffs_balloc: ufs_bmaparray returned indirect block\n"); +#endif + /* + * Fetch the first indirect block allocating if necessary. + */ + --num; + nb = ip->i_ib[indirs[0].in_off]; + allocib = NULL; + allocblk = allociblk; + if (nb == 0) { + pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); + if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, + cred, &newb)) + return (error); + nb = newb; + *allocblk++ = nb; + bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META); + bp->b_blkno = fsbtodb(fs, nb); + clrbuf(bp); + /* + * Write synchronously so that indirect blocks + * never point at garbage. + */ + if (error = bwrite(bp)) + goto fail; + allocib = &ip->i_ib[indirs[0].in_off]; + *allocib = nb; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + } + /* + * Fetch through the indirect blocks, allocating as necessary. + */ + for (i = 1;;) { + error = meta_bread(vp, + indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + goto fail; + } + bap = (ufs_daddr_t *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + nb = NXSwapLong(bap[indirs[i].in_off]); + else { +#endif /* REV_ENDIAN_FS */ + nb = bap[indirs[i].in_off]; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + if (i == num) + break; + i += 1; + if (nb != 0) { + brelse(bp); + continue; + } + if (pref == 0) + pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); + if (error = + ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) { + brelse(bp); + goto fail; + } + nb = newb; + *allocblk++ = nb; + nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META); + nbp->b_blkno = fsbtodb(fs, nb); + clrbuf(nbp); + /* + * Write synchronously so that indirect blocks + * never point at garbage. + */ + if (error = bwrite(nbp)) { + brelse(bp); + goto fail; + } +#if REV_ENDIAN_FS + if (rev_endian) + bap[indirs[i - 1].in_off] = NXSwapLong(nb); + else { +#endif /* REV_ENDIAN_FS */ + bap[indirs[i - 1].in_off] = nb; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + /* + * If required, write synchronously, otherwise use + * delayed write. + */ + if (flags & B_SYNC) { + bwrite(bp); + } else { + bdwrite(bp); + } + } + /* + * Get the data block, allocating if necessary. + */ + if (nb == 0) { + pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]); + if (error = ffs_alloc(ip, + lbn, pref, (int)fs->fs_bsize, cred, &newb)) { + brelse(bp); + goto fail; + } + nb = newb; + *allocblk++ = nb; +#if REV_ENDIAN_FS + if (rev_endian) + bap[indirs[i].in_off] = NXSwapLong(nb); + else { +#endif /* REV_ENDIAN_FS */ + bap[indirs[i].in_off] = nb; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + /* + * If required, write synchronously, otherwise use + * delayed write. + */ + if ((flags & B_SYNC)) { + bwrite(bp); + } else { + bdwrite(bp); + } + if(alloc_buffer ) { + nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE); + nbp->b_blkno = fsbtodb(fs, nb); + if (flags & B_CLRBUF) + clrbuf(nbp); + } + if (blk_alloc) { + *blk_alloc = fs->fs_bsize; + } + if(alloc_buffer) + *bpp = nbp; + + return (0); + } + brelse(bp); + if (alloc_buffer) { + if (flags & B_CLRBUF) { + error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp); + if (error) { + brelse(nbp); + goto fail; + } + } else { + nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, BLK_WRITE); + nbp->b_blkno = fsbtodb(fs, nb); + } + *bpp = nbp; + } + return (0); +fail: + /* + * If we have failed part way through block allocation, we + * have to deallocate any indirect blocks that we have allocated. + */ + for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) { + ffs_blkfree(ip, *blkp, fs->fs_bsize); + deallocated += fs->fs_bsize; + } + if (allocib != NULL) + *allocib = 0; + if (deallocated) { + VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize); + +#if QUOTA + /* + * Restore user's disk quota because allocation failed. + */ + (void) chkdq(ip, (long)-btodb(deallocated, devBlockSize), cred, FORCE); +#endif /* QUOTA */ + ip->i_blocks -= btodb(deallocated, devBlockSize); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + } + return (error); +} + +/* + * ffs_blkalloc allocates a disk block for ffs_pageout(), as a consequence + * it does no breads (that could lead to deadblock as the page may be already + * marked busy as it is being paged out. Also important to note that we are not + * growing the file in pageouts. So ip->i_size cannot increase by this call + * due to the way UBC works. + * This code is derived from ffs_balloc and many cases of that are dealt + * in ffs_balloc are not applicable here + * Do not call with B_CLRBUF flags as this should only be called only + * from pageouts + */ +ffs_blkalloc(ip, lbn, size, cred, flags) + register struct inode *ip; + ufs_daddr_t lbn; + int size; + struct ucred *cred; + int flags; +{ + register struct fs *fs; + register ufs_daddr_t nb; + struct buf *bp, *nbp; + struct vnode *vp = ITOV(ip); + struct indir indirs[NIADDR + 2]; + ufs_daddr_t newb, *bap, pref; + int deallocated, osize, nsize, num, i, error; + ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1]; + int devBlockSize=0; +#if REV_ENDIAN_FS + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + fs = ip->i_fs; + + if(size > fs->fs_bsize) + panic("ffs_blkalloc: too large for allocation\n"); + + /* + * If the next write will extend the file into a new block, + * and the file is currently composed of a fragment + * this fragment has to be extended to be a full block. + */ + nb = lblkno(fs, ip->i_size); + if (nb < NDADDR && nb < lbn) { + panic("ffs_blkalloc():cannot extend file: i_size %d, lbn %d\n", ip->i_size, lbn); + } + /* + * The first NDADDR blocks are direct blocks + */ + if (lbn < NDADDR) { + nb = ip->i_db[lbn]; + if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) { + /* TBD: trivial case; the block is already allocated */ + return (0); + } + if (nb != 0) { + /* + * Consider need to reallocate a fragment. + */ + osize = fragroundup(fs, blkoff(fs, ip->i_size)); + nsize = fragroundup(fs, size); + if (nsize > osize) { + panic("ffs_allocblk: trying to extend + a fragment \n"); + } + return(0); + } else { + if (ip->i_size < (lbn + 1) * fs->fs_bsize) + nsize = fragroundup(fs, size); + else + nsize = fs->fs_bsize; + error = ffs_alloc(ip, lbn, + ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]), + nsize, cred, &newb); + if (error) + return (error); + ip->i_db[lbn] = newb; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + return (0); + } + } + /* + * Determine the number of levels of indirection. + */ + pref = 0; + if (error = ufs_getlbns(vp, lbn, indirs, &num)) + return(error); + + if(num == 0) { + panic("ffs_blkalloc: file with direct blocks only\n"); + } + + /* + * Fetch the first indirect block allocating if necessary. + */ + --num; + nb = ip->i_ib[indirs[0].in_off]; + allocib = NULL; + allocblk = allociblk; + if (nb == 0) { + pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); + if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, + cred, &newb)) + return (error); + nb = newb; + *allocblk++ = nb; + bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, BLK_META); + bp->b_blkno = fsbtodb(fs, nb); + clrbuf(bp); + /* + * Write synchronously so that indirect blocks + * never point at garbage. + */ + if (error = bwrite(bp)) + goto fail; + allocib = &ip->i_ib[indirs[0].in_off]; + *allocib = nb; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + } + /* + * Fetch through the indirect blocks, allocating as necessary. + */ + for (i = 1;;) { + error = meta_bread(vp, + indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp); + if (error) { + brelse(bp); + goto fail; + } + bap = (ufs_daddr_t *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + nb = NXSwapLong(bap[indirs[i].in_off]); + else { +#endif /* REV_ENDIAN_FS */ + nb = bap[indirs[i].in_off]; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + if (i == num) + break; + i += 1; + if (nb != 0) { + brelse(bp); + continue; + } + if (pref == 0) + pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0); + if (error = + ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) { + brelse(bp); + goto fail; + } + nb = newb; + *allocblk++ = nb; + nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, BLK_META); + nbp->b_blkno = fsbtodb(fs, nb); + clrbuf(nbp); + /* + * Write synchronously so that indirect blocks + * never point at garbage. + */ + if (error = bwrite(nbp)) { + brelse(bp); + goto fail; + } +#if REV_ENDIAN_FS + if (rev_endian) + bap[indirs[i - 1].in_off] = NXSwapLong(nb); + else { +#endif /* REV_ENDIAN_FS */ + bap[indirs[i - 1].in_off] = nb; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + /* + * If required, write synchronously, otherwise use + * delayed write. + */ + if (flags & B_SYNC) { + bwrite(bp); + } else { + bdwrite(bp); + } + } + /* + * Get the data block, allocating if necessary. + */ + if (nb == 0) { + pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]); + if (error = ffs_alloc(ip, + lbn, pref, (int)fs->fs_bsize, cred, &newb)) { + brelse(bp); + goto fail; + } + nb = newb; + *allocblk++ = nb; +#if REV_ENDIAN_FS + if (rev_endian) + bap[indirs[i].in_off] = NXSwapLong(nb); + else { +#endif /* REV_ENDIAN_FS */ + bap[indirs[i].in_off] = nb; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + /* + * If required, write synchronously, otherwise use + * delayed write. + */ + if (flags & B_SYNC) { + bwrite(bp); + } else { + bdwrite(bp); + } + return (0); + } + brelse(bp); + return (0); +fail: + /* + * If we have failed part way through block allocation, we + * have to deallocate any indirect blocks that we have allocated. + */ + for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) { + ffs_blkfree(ip, *blkp, fs->fs_bsize); + deallocated += fs->fs_bsize; + } + if (allocib != NULL) + *allocib = 0; + if (deallocated) { + VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize); + +#if QUOTA + /* + * Restore user's disk quota because allocation failed. + */ + (void) chkdq(ip, (long)-btodb(deallocated, devBlockSize), cred, FORCE); +#endif /* QUOTA */ + ip->i_blocks -= btodb(deallocated, devBlockSize); + ip->i_flag |= IN_CHANGE | IN_UPDATE; + } + return (error); +} diff --git a/bsd/ufs/ffs/ffs_extern.h b/bsd/ufs/ffs/ffs_extern.h new file mode 100644 index 000000000..c4e660c43 --- /dev/null +++ b/bsd/ufs/ffs/ffs_extern.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ + +/*- + * Copyright (c) 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_extern.h 8.6 (Berkeley) 3/30/95 + */ + +/* + * Sysctl values for the fast filesystem. + */ +#define FFS_CLUSTERREAD 1 /* cluster reading enabled */ +#define FFS_CLUSTERWRITE 2 /* cluster writing enabled */ +#define FFS_REALLOCBLKS 3 /* block reallocation enabled */ +#define FFS_ASYNCFREE 4 /* asynchronous block freeing enabled */ +#define FFS_MAXID 5 /* number of valid ffs ids */ + +#define FFS_NAMES { \ + { 0, 0 }, \ + { "doclusterread", CTLTYPE_INT }, \ + { "doclusterwrite", CTLTYPE_INT }, \ + { "doreallocblks", CTLTYPE_INT }, \ + { "doasyncfree", CTLTYPE_INT }, \ +} + +struct buf; +struct fid; +struct fs; +struct inode; +struct mount; +struct nameidata; +struct proc; +struct statfs; +struct timeval; +struct ucred; +struct uio; +struct vnode; +struct mbuf; +struct vfsconf; + +__BEGIN_DECLS +int ffs_alloc __P((struct inode *, + ufs_daddr_t, ufs_daddr_t, int, struct ucred *, ufs_daddr_t *)); +int ffs_balloc __P((struct inode *, + ufs_daddr_t, int, struct ucred *, struct buf **, int, int *)); +int ffs_blkatoff __P((struct vop_blkatoff_args *)); +int ffs_blkfree __P((struct inode *, ufs_daddr_t, long)); +ufs_daddr_t ffs_blkpref __P((struct inode *, ufs_daddr_t, int, ufs_daddr_t *)); +int ffs_bmap __P((struct vop_bmap_args *)); +void ffs_clrblock __P((struct fs *, u_char *, ufs_daddr_t)); +int ffs_fhtovp __P((struct mount *, struct fid *, struct mbuf *, + struct vnode **, int *, struct ucred **)); +void ffs_fragacct __P((struct fs *, int, int32_t [], int)); +int ffs_fsync __P((struct vop_fsync_args *)); +int ffs_init __P((struct vfsconf *)); +int ffs_isblock __P((struct fs *, u_char *, ufs_daddr_t)); +int ffs_mount __P((struct mount *, + char *, caddr_t, struct nameidata *, struct proc *)); +int ffs_mountfs __P((struct vnode *, struct mount *, struct proc *)); +int ffs_mountroot __P((void)); +int ffs_read __P((struct vop_read_args *)); +int ffs_reallocblks __P((struct vop_reallocblks_args *)); +int ffs_realloccg __P((struct inode *, + ufs_daddr_t, ufs_daddr_t, int, int, struct ucred *, struct buf **)); +int ffs_reclaim __P((struct vop_reclaim_args *)); +void ffs_setblock __P((struct fs *, u_char *, ufs_daddr_t)); +int ffs_statfs __P((struct mount *, struct statfs *, struct proc *)); +int ffs_sync __P((struct mount *, int, struct ucred *, struct proc *)); +int ffs_sysctl __P((int *, u_int, void *, size_t *, void *, size_t, + struct proc *)); +int ffs_truncate __P((struct vop_truncate_args *)); +int ffs_unmount __P((struct mount *, int, struct proc *)); +int ffs_update __P((struct vop_update_args *)); +int ffs_valloc __P((struct vop_valloc_args *)); +int ffs_vfree __P((struct vop_vfree_args *)); +int ffs_vget __P((struct mount *, ino_t, struct vnode **)); +int ffs_vptofh __P((struct vnode *, struct fid *)); +int ffs_write __P((struct vop_write_args *)); +int ffs_pagein __P((struct vop_pagein_args *)); +int ffs_pageout __P((struct vop_pageout_args *)); +int ffs_blktooff __P((struct vop_blktooff_args *)); +int ffs_offtoblk __P((struct vop_offtoblk_args *)); + +#if DIAGNOSTIC +void ffs_checkoverlap __P((struct buf *, struct inode *)); +#endif +__END_DECLS + +extern int (**ffs_vnodeop_p)(void *); +extern int (**ffs_specop_p)(void *); +#if FIFO +extern int (**ffs_fifoop_p)(void *); +#define FFS_FIFOOPS ffs_fifoop_p +#else +#define FFS_FIFOOPS NULL +#endif diff --git a/bsd/ufs/ffs/ffs_inode.c b/bsd/ufs/ffs/ffs_inode.c new file mode 100644 index 000000000..3b7b72985 --- /dev/null +++ b/bsd/ufs/ffs/ffs_inode.c @@ -0,0 +1,588 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_inode.c 8.13 (Berkeley) 4/21/95 + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include + +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +static int ffs_indirtrunc __P((struct inode *, ufs_daddr_t, ufs_daddr_t, + ufs_daddr_t, int, long *)); + +/* + * Update the access, modified, and inode change times as specified by the + * IACCESS, IUPDATE, and ICHANGE flags respectively. The IMODIFIED flag is + * used to specify that the inode needs to be updated but that the times have + * already been set. The access and modified times are taken from the second + * and third parameters; the inode change time is always taken from the current + * time. If waitfor is set, then wait for the disk write of the inode to + * complete. + */ +int +ffs_update(ap) + struct vop_update_args /* { + struct vnode *a_vp; + struct timeval *a_access; + struct timeval *a_modify; + int a_waitfor; + } */ *ap; +{ + register struct fs *fs; + struct buf *bp; + struct inode *ip; + int error; +#if REV_ENDIAN_FS + struct mount *mp=(ap->a_vp)->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + ip = VTOI(ap->a_vp); + if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) { + ip->i_flag &= + ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + return (0); + } + if ((ip->i_flag & + (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) + return (0); + if (ip->i_flag & IN_ACCESS) + ip->i_atime = ap->a_access->tv_sec; + if (ip->i_flag & IN_UPDATE) { + ip->i_mtime = ap->a_modify->tv_sec; + ip->i_modrev++; + } + if (ip->i_flag & IN_CHANGE) + ip->i_ctime = time.tv_sec; + ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE); + fs = ip->i_fs; + /* + * Ensure that uid and gid are correct. This is a temporary + * fix until fsck has been changed to do the update. + */ + if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ + ip->i_din.di_ouid = ip->i_uid; /* XXX */ + ip->i_din.di_ogid = ip->i_gid; /* XXX */ + } /* XXX */ + if (error = bread(ip->i_devvp, + fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), + (int)fs->fs_bsize, NOCRED, &bp)) { + brelse(bp); + return (error); + } +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_inode_out(ip, ((struct dinode *)bp->b_data + ino_to_fsbo(fs, ip->i_number))); + else { +#endif /* REV_ENDIAN_FS */ + *((struct dinode *)bp->b_data + + ino_to_fsbo(fs, ip->i_number)) = ip->i_din; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + + if (ap->a_waitfor && (ap->a_vp->v_mount->mnt_flag & MNT_ASYNC) == 0) + return (bwrite(bp)); + else { + bdwrite(bp); + return (0); + } +} + +#define SINGLE 0 /* index of single indirect block */ +#define DOUBLE 1 /* index of double indirect block */ +#define TRIPLE 2 /* index of triple indirect block */ +/* + * Truncate the inode oip to at most length size, freeing the + * disk blocks. + */ +ffs_truncate(ap) + struct vop_truncate_args /* { + struct vnode *a_vp; + off_t a_length; + int a_flags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *ovp = ap->a_vp; + ufs_daddr_t lastblock; + register struct inode *oip; + ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; + ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; + off_t length = ap->a_length; + register struct fs *fs; + struct buf *bp; + int offset, size, level; + long count, nblocks, vflags, blocksreleased = 0; + struct timeval tv; + register int i; + int aflags, error, allerror; + off_t osize; + int devBlockSize=0; + + if (length < 0) + return (EINVAL); + + oip = VTOI(ovp); + fs = oip->i_fs; + + if (length > fs->fs_maxfilesize) + return (EFBIG); + + tv = time; + if (ovp->v_type == VLNK && + oip->i_size < ovp->v_mount->mnt_maxsymlinklen) { +#if DIAGNOSTIC + if (length != 0) + panic("ffs_truncate: partial truncate of symlink"); +#endif + bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); + oip->i_size = 0; + oip->i_flag |= IN_CHANGE | IN_UPDATE; + return (VOP_UPDATE(ovp, &tv, &tv, 1)); + } + + if (oip->i_size == length) { + oip->i_flag |= IN_CHANGE | IN_UPDATE; + return (VOP_UPDATE(ovp, &tv, &tv, 0)); + } +#if QUOTA + if (error = getinoquota(oip)) + return (error); +#endif + osize = oip->i_size; + + /* + * Lengthen the size of the file. We must ensure that the + * last byte of the file is allocated. Since the smallest + * value of osize is 0, length will be at least 1. + */ + if (osize < length) { + offset = blkoff(fs, length - 1); + lbn = lblkno(fs, length - 1); + aflags = B_CLRBUF; + if (ap->a_flags & IO_SYNC) + aflags |= B_SYNC; + if (error = ffs_balloc(oip, lbn, offset + 1, ap->a_cred, &bp, + aflags , 0)) + return (error); + oip->i_size = length; + + if (UBCINFOEXISTS(ovp)) { + bp->b_flags |= B_INVAL; + bwrite(bp); + ubc_setsize(ovp, (off_t)length); + } else { + if (aflags & B_SYNC) + bwrite(bp); + else + bawrite(bp); + } + oip->i_flag |= IN_CHANGE | IN_UPDATE; + return (VOP_UPDATE(ovp, &tv, &tv, 1)); + } + /* + * Shorten the size of the file. If the file is not being + * truncated to a block boundry, the contents of the + * partial block following the end of the file must be + * zero'ed in case it ever become accessable again because + * of subsequent file growth. + */ + if (UBCINFOEXISTS(ovp)) + ubc_setsize(ovp, (off_t)length); + + vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; + allerror = vinvalbuf(ovp, vflags, ap->a_cred, ap->a_p, 0, 0); + + + offset = blkoff(fs, length); + if (offset == 0) { + oip->i_size = length; + } else { + lbn = lblkno(fs, length); + aflags = B_CLRBUF; + if (ap->a_flags & IO_SYNC) + aflags |= B_SYNC; + if (error = ffs_balloc(oip, lbn, offset, ap->a_cred, &bp, + aflags, 0)) + return (error); + oip->i_size = length; + size = blksize(fs, oip, lbn); + bzero((char *)bp->b_data + offset, (u_int)(size - offset)); + allocbuf(bp, size); + if (UBCINFOEXISTS(ovp)) { + bp->b_flags |= B_INVAL; + bwrite(bp); + } else { + if (aflags & B_SYNC) + bwrite(bp); + else + bawrite(bp); + } + } + /* + * Calculate index into inode's block list of + * last direct and indirect blocks (if any) + * which we want to keep. Lastblock is -1 when + * the file is truncated to 0. + */ + lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; + lastiblock[SINGLE] = lastblock - NDADDR; + lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); + lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); + VOP_DEVBLOCKSIZE(oip->i_devvp,&devBlockSize); + nblocks = btodb(fs->fs_bsize, devBlockSize); + + /* + * Update file and block pointers on disk before we start freeing + * blocks. If we crash before free'ing blocks below, the blocks + * will be returned to the free list. lastiblock values are also + * normalized to -1 for calls to ffs_indirtrunc below. + */ + bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); + for (level = TRIPLE; level >= SINGLE; level--) + if (lastiblock[level] < 0) { + oip->i_ib[level] = 0; + lastiblock[level] = -1; + } + for (i = NDADDR - 1; i > lastblock; i--) + oip->i_db[i] = 0; + oip->i_flag |= IN_CHANGE | IN_UPDATE; + if (error = VOP_UPDATE(ovp, &tv, &tv, MNT_WAIT)) + allerror = error; + /* + * Having written the new inode to disk, save its new configuration + * and put back the old block pointers long enough to process them. + * Note that we save the new block configuration so we can check it + * when we are done. + */ + bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); + bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); + oip->i_size = osize; + vflags = ((length > 0) ? V_SAVE : 0) | V_SAVEMETA; + allerror = vinvalbuf(ovp, vflags, ap->a_cred, ap->a_p, 0, 0); + + /* + * Indirect blocks first. + */ + indir_lbn[SINGLE] = -NDADDR; + indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; + indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; + for (level = TRIPLE; level >= SINGLE; level--) { + bn = oip->i_ib[level]; + if (bn != 0) { + error = ffs_indirtrunc(oip, indir_lbn[level], + fsbtodb(fs, bn), lastiblock[level], level, &count); + if (error) + allerror = error; + blocksreleased += count; + if (lastiblock[level] < 0) { + oip->i_ib[level] = 0; + ffs_blkfree(oip, bn, fs->fs_bsize); + blocksreleased += nblocks; + } + } + if (lastiblock[level] >= 0) + goto done; + } + + /* + * All whole direct blocks or frags. + */ + for (i = NDADDR - 1; i > lastblock; i--) { + register long bsize; + + bn = oip->i_db[i]; + if (bn == 0) + continue; + oip->i_db[i] = 0; + bsize = blksize(fs, oip, i); + ffs_blkfree(oip, bn, bsize); + blocksreleased += btodb(bsize, devBlockSize); + } + if (lastblock < 0) + goto done; + + /* + * Finally, look for a change in size of the + * last direct block; release any frags. + */ + bn = oip->i_db[lastblock]; + if (bn != 0) { + long oldspace, newspace; + + /* + * Calculate amount of space we're giving + * back as old block size minus new block size. + */ + oldspace = blksize(fs, oip, lastblock); + oip->i_size = length; + newspace = blksize(fs, oip, lastblock); + if (newspace == 0) + panic("itrunc: newspace"); + if (oldspace - newspace > 0) { + /* + * Block number of space to be free'd is + * the old block # plus the number of frags + * required for the storage we're keeping. + */ + bn += numfrags(fs, newspace); + ffs_blkfree(oip, bn, oldspace - newspace); + blocksreleased += btodb(oldspace - newspace, devBlockSize); + } + } +done: +#if DIAGNOSTIC + for (level = SINGLE; level <= TRIPLE; level++) + if (newblks[NDADDR + level] != oip->i_ib[level]) + panic("itrunc1"); + for (i = 0; i < NDADDR; i++) + if (newblks[i] != oip->i_db[i]) + panic("itrunc2"); + if (length == 0 && + (ovp->v_dirtyblkhd.lh_first || ovp->v_cleanblkhd.lh_first)) + panic("itrunc3"); +#endif /* DIAGNOSTIC */ + /* + * Put back the real size. + */ + oip->i_size = length; + oip->i_blocks -= blocksreleased; + if (oip->i_blocks < 0) /* sanity */ + oip->i_blocks = 0; + oip->i_flag |= IN_CHANGE; +#if QUOTA + (void) chkdq(oip, -blocksreleased, NOCRED, 0); +#endif + return (allerror); +} + +/* + * Release blocks associated with the inode ip and stored in the indirect + * block bn. Blocks are free'd in LIFO order up to (but not including) + * lastbn. If level is greater than SINGLE, the block is an indirect block + * and recursive calls to indirtrunc must be used to cleanse other indirect + * blocks. + * + * NB: triple indirect blocks are untested. + */ +static int +ffs_indirtrunc(ip, lbn, dbn, lastbn, level, countp) + register struct inode *ip; + ufs_daddr_t lbn, lastbn; + ufs_daddr_t dbn; + int level; + long *countp; +{ + register int i; + struct buf *bp; + struct buf *tbp; + register struct fs *fs = ip->i_fs; + register ufs_daddr_t *bap; + struct vnode *vp=ITOV(ip); + ufs_daddr_t *copy, nb, nlbn, last; + long blkcount, factor; + int nblocks, blocksreleased = 0; + int error = 0, allerror = 0; + int devBlockSize=0; +#if REV_ENDIAN_FS + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + /* + * Calculate index in current block of last + * block to be kept. -1 indicates the entire + * block so we need not calculate the index. + */ + factor = 1; + for (i = SINGLE; i < level; i++) + factor *= NINDIR(fs); + last = lastbn; + if (lastbn > 0) + last /= factor; + VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize); + nblocks = btodb(fs->fs_bsize, devBlockSize); + + /* Doing a MALLOC here is asking for trouble. We can still + * deadlock on pagerfile lock, in case we are running + * low on memory and block in MALLOC + */ + + tbp = geteblk(fs->fs_bsize); + copy = (ufs_daddr_t *)tbp->b_data; + + /* + * Get buffer of block pointers, zero those entries corresponding + * to blocks to be free'd, and update on disk copy first. Since + * double(triple) indirect before single(double) indirect, calls + * to bmap on these blocks will fail. However, we already have + * the on disk address, so we have to set the b_blkno field + * explicitly instead of letting bread do everything for us. + */ + + vp = ITOV(ip); + bp = getblk(vp, lbn, (int)fs->fs_bsize, 0, 0, BLK_META); + if (bp->b_flags & (B_DONE | B_DELWRI)) { + /* Braces must be here in case trace evaluates to nothing. */ + trace(TR_BREADHIT, pack(vp, fs->fs_bsize), lbn); + } else { + trace(TR_BREADMISS, pack(vp, fs->fs_bsize), lbn); + current_proc()->p_stats->p_ru.ru_inblock++; /* pay for read */ + bp->b_flags |= B_READ; + if (bp->b_bcount > bp->b_bufsize) + panic("ffs_indirtrunc: bad buffer size"); + bp->b_blkno = dbn; + VOP_STRATEGY(bp); + error = biowait(bp); + } + if (error) { + brelse(bp); + *countp = 0; + brelse(tbp); + return (error); + } + + bap = (ufs_daddr_t *)bp->b_data; + bcopy((caddr_t)bap, (caddr_t)copy, (u_int)fs->fs_bsize); + bzero((caddr_t)&bap[last + 1], + (u_int)(NINDIR(fs) - (last + 1)) * sizeof (ufs_daddr_t)); + if (last == -1) + bp->b_flags |= B_INVAL; + error = bwrite(bp); + if (error) + allerror = error; + bap = copy; + + /* + * Recursively free totally unused blocks. + */ + for (i = NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; + i--, nlbn += factor) { +#if REV_ENDIAN_FS + if (rev_endian) + nb = NXSwapLong(bap[i]); + else { +#endif /* REV_ENDIAN_FS */ + nb = bap[i]; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + if (nb == 0) + continue; + if (level > SINGLE) { + if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), + (ufs_daddr_t)-1, level - 1, &blkcount)) + allerror = error; + blocksreleased += blkcount; + } + ffs_blkfree(ip, nb, fs->fs_bsize); + blocksreleased += nblocks; + } + + /* + * Recursively free last partial block. + */ + if (level > SINGLE && lastbn >= 0) { + last = lastbn % factor; +#if REV_ENDIAN_FS + if (rev_endian) + nb = NXSwapLong(bap[i]); + else { +#endif /* REV_ENDIAN_FS */ + nb = bap[i]; +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + if (nb != 0) { + if (error = ffs_indirtrunc(ip, nlbn, fsbtodb(fs, nb), + last, level - 1, &blkcount)) + allerror = error; + blocksreleased += blkcount; + } + } + brelse(tbp); + *countp = blocksreleased; + return (allerror); +} + diff --git a/bsd/ufs/ffs/ffs_subr.c b/bsd/ufs/ffs/ffs_subr.c new file mode 100644 index 000000000..2431d8d2b --- /dev/null +++ b/bsd/ufs/ffs/ffs_subr.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_subr.c 8.5 (Berkeley) 3/21/95 + */ + +#include +#include +#if REV_ENDIAN_FS +#include +#endif /* REV_ENDIAN_FS */ + +#ifndef KERNEL +#include +#include +#else + +#include +#include +#include +#include +#include +#include +#include +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +/* + * Return buffer with the contents of block "offset" from the beginning of + * directory "ip". If "res" is non-zero, fill it in with a pointer to the + * remaining space in the directory. + */ +int +ffs_blkatoff(ap) + struct vop_blkatoff_args /* { + struct vnode *a_vp; + off_t a_offset; + char **a_res; + struct buf **a_bpp; + } */ *ap; +{ + struct inode *ip; + register struct fs *fs; + struct buf *bp; + ufs_daddr_t lbn; + int bsize, error; +#if REV_ENDIAN_FS + struct mount *mp=(ap->a_vp)->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + ip = VTOI(ap->a_vp); + fs = ip->i_fs; + lbn = lblkno(fs, ap->a_offset); + bsize = blksize(fs, ip, lbn); + + *ap->a_bpp = NULL; + if (error = bread(ap->a_vp, lbn, bsize, NOCRED, &bp)) { + brelse(bp); + return (error); + } +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_in(bp->b_data, bp->b_bcount); +#endif /* REV_ENDIAN_FS */ + + if (ap->a_res) + *ap->a_res = (char *)bp->b_data + blkoff(fs, ap->a_offset); + *ap->a_bpp = bp; + return (0); +} +#endif + +/* + * Update the frsum fields to reflect addition or deletion + * of some frags. + */ +void +ffs_fragacct(fs, fragmap, fraglist, cnt) + struct fs *fs; + int fragmap; + int32_t fraglist[]; + int cnt; +{ + int inblk; + register int field, subfield; + register int siz, pos; + + inblk = (int)(fragtbl[fs->fs_frag][fragmap]) << 1; + fragmap <<= 1; + for (siz = 1; siz < fs->fs_frag; siz++) { + if ((inblk & (1 << (siz + (fs->fs_frag % NBBY)))) == 0) + continue; + field = around[siz]; + subfield = inside[siz]; + for (pos = siz; pos <= fs->fs_frag; pos++) { + if ((fragmap & field) == subfield) { + fraglist[siz] += cnt; + pos += siz; + field <<= siz; + subfield <<= siz; + } + field <<= 1; + subfield <<= 1; + } + } +} + +#if defined(KERNEL) && DIAGNOSTIC +void +ffs_checkoverlap(bp, ip) + struct buf *bp; + struct inode *ip; +{ + register struct buf *ebp, *ep; + register ufs_daddr_t start, last; + struct vnode *vp; +#ifdef NeXT + int devBlockSize=0; +#endif /* NeXT */ + + ebp = &buf[nbuf]; + start = bp->b_blkno; +#ifdef NeXT + VOP_DEVBLOCKSIZE(ip->i_devvp,&devBlockSize); + last = start + btodb(bp->b_bcount, devBlockSize) - 1; +#else + last = start + btodb(bp->b_bcount) - 1; +#endif /* NeXT */ + for (ep = buf; ep < ebp; ep++) { + if (ep == bp || (ep->b_flags & B_INVAL) || + ep->b_vp == NULLVP) + continue; + if (VOP_BMAP(ep->b_vp, (ufs_daddr_t)0, &vp, (ufs_daddr_t)0, + NULL)) + continue; + if (vp != ip->i_devvp) + continue; + /* look for overlap */ +#ifdef NeXT + if (ep->b_bcount == 0 || ep->b_blkno > last || + ep->b_blkno + btodb(ep->b_bcount, devBlockSize) <= start) + continue; + vprint("Disk overlap", vp); + (void)printf("\tstart %d, end %d overlap start %d, end %d\n", + start, last, ep->b_blkno, + ep->b_blkno + btodb(ep->b_bcount, devBlockSize) - 1); +#else + if (ep->b_bcount == 0 || ep->b_blkno > last || + ep->b_blkno + btodb(ep->b_bcount) <= start) + continue; + vprint("Disk overlap", vp); + (void)printf("\tstart %d, end %d overlap start %d, end %d\n", + start, last, ep->b_blkno, + ep->b_blkno + btodb(ep->b_bcount) - 1); +#endif /* NeXT */ + panic("Disk buffer overlap"); + } +} +#endif /* DIAGNOSTIC */ + +/* + * block operations + * + * check if a block is available + */ +int +ffs_isblock(fs, cp, h) + struct fs *fs; + unsigned char *cp; + ufs_daddr_t h; +{ + unsigned char mask; + + switch ((int)fs->fs_frag) { + case 8: + return (cp[h] == 0xff); + case 4: + mask = 0x0f << ((h & 0x1) << 2); + return ((cp[h >> 1] & mask) == mask); + case 2: + mask = 0x03 << ((h & 0x3) << 1); + return ((cp[h >> 2] & mask) == mask); + case 1: + mask = 0x01 << (h & 0x7); + return ((cp[h >> 3] & mask) == mask); + default: + panic("ffs_isblock"); + } +} + +/* + * take a block out of the map + */ +void +ffs_clrblock(fs, cp, h) + struct fs *fs; + u_char *cp; + ufs_daddr_t h; +{ + + switch ((int)fs->fs_frag) { + case 8: + cp[h] = 0; + return; + case 4: + cp[h >> 1] &= ~(0x0f << ((h & 0x1) << 2)); + return; + case 2: + cp[h >> 2] &= ~(0x03 << ((h & 0x3) << 1)); + return; + case 1: + cp[h >> 3] &= ~(0x01 << (h & 0x7)); + return; + default: + panic("ffs_clrblock"); + } +} + +/* + * put a block into the map + */ +void +ffs_setblock(fs, cp, h) + struct fs *fs; + unsigned char *cp; + ufs_daddr_t h; +{ + + switch ((int)fs->fs_frag) { + + case 8: + cp[h] = 0xff; + return; + case 4: + cp[h >> 1] |= (0x0f << ((h & 0x1) << 2)); + return; + case 2: + cp[h >> 2] |= (0x03 << ((h & 0x3) << 1)); + return; + case 1: + cp[h >> 3] |= (0x01 << (h & 0x7)); + return; + default: + panic("ffs_setblock"); + } +} diff --git a/bsd/ufs/ffs/ffs_tables.c b/bsd/ufs/ffs/ffs_tables.c new file mode 100644 index 000000000..b0bec64c9 --- /dev/null +++ b/bsd/ufs/ffs/ffs_tables.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_tables.c 8.1 (Berkeley) 6/11/93 + */ + +#include + +/* + * Bit patterns for identifying fragments in the block map + * used as ((map & around) == inside) + */ +int around[9] = { + 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff +}; +int inside[9] = { + 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe +}; + +/* + * Given a block map bit pattern, the frag tables tell whether a + * particular size fragment is available. + * + * used as: + * if ((1 << (size - 1)) & fragtbl[fs->fs_frag][map] { + * at least one fragment of the indicated size is available + * } + * + * These tables are used by the scanc instruction on the VAX to + * quickly find an appropriate fragment. + */ +u_char fragtbl124[256] = { + 0x00, 0x16, 0x16, 0x2a, 0x16, 0x16, 0x26, 0x4e, + 0x16, 0x16, 0x16, 0x3e, 0x2a, 0x3e, 0x4e, 0x8a, + 0x16, 0x16, 0x16, 0x3e, 0x16, 0x16, 0x36, 0x5e, + 0x16, 0x16, 0x16, 0x3e, 0x3e, 0x3e, 0x5e, 0x9e, + 0x16, 0x16, 0x16, 0x3e, 0x16, 0x16, 0x36, 0x5e, + 0x16, 0x16, 0x16, 0x3e, 0x3e, 0x3e, 0x5e, 0x9e, + 0x2a, 0x3e, 0x3e, 0x2a, 0x3e, 0x3e, 0x2e, 0x6e, + 0x3e, 0x3e, 0x3e, 0x3e, 0x2a, 0x3e, 0x6e, 0xaa, + 0x16, 0x16, 0x16, 0x3e, 0x16, 0x16, 0x36, 0x5e, + 0x16, 0x16, 0x16, 0x3e, 0x3e, 0x3e, 0x5e, 0x9e, + 0x16, 0x16, 0x16, 0x3e, 0x16, 0x16, 0x36, 0x5e, + 0x16, 0x16, 0x16, 0x3e, 0x3e, 0x3e, 0x5e, 0x9e, + 0x26, 0x36, 0x36, 0x2e, 0x36, 0x36, 0x26, 0x6e, + 0x36, 0x36, 0x36, 0x3e, 0x2e, 0x3e, 0x6e, 0xae, + 0x4e, 0x5e, 0x5e, 0x6e, 0x5e, 0x5e, 0x6e, 0x4e, + 0x5e, 0x5e, 0x5e, 0x7e, 0x6e, 0x7e, 0x4e, 0xce, + 0x16, 0x16, 0x16, 0x3e, 0x16, 0x16, 0x36, 0x5e, + 0x16, 0x16, 0x16, 0x3e, 0x3e, 0x3e, 0x5e, 0x9e, + 0x16, 0x16, 0x16, 0x3e, 0x16, 0x16, 0x36, 0x5e, + 0x16, 0x16, 0x16, 0x3e, 0x3e, 0x3e, 0x5e, 0x9e, + 0x16, 0x16, 0x16, 0x3e, 0x16, 0x16, 0x36, 0x5e, + 0x16, 0x16, 0x16, 0x3e, 0x3e, 0x3e, 0x5e, 0x9e, + 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x7e, + 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x7e, 0xbe, + 0x2a, 0x3e, 0x3e, 0x2a, 0x3e, 0x3e, 0x2e, 0x6e, + 0x3e, 0x3e, 0x3e, 0x3e, 0x2a, 0x3e, 0x6e, 0xaa, + 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x7e, + 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x3e, 0x7e, 0xbe, + 0x4e, 0x5e, 0x5e, 0x6e, 0x5e, 0x5e, 0x6e, 0x4e, + 0x5e, 0x5e, 0x5e, 0x7e, 0x6e, 0x7e, 0x4e, 0xce, + 0x8a, 0x9e, 0x9e, 0xaa, 0x9e, 0x9e, 0xae, 0xce, + 0x9e, 0x9e, 0x9e, 0xbe, 0xaa, 0xbe, 0xce, 0x8a, +}; + +u_char fragtbl8[256] = { + 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, + 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08, + 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, + 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10, + 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, + 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, + 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, + 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20, + 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, + 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, + 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, + 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11, + 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, + 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0a, + 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, + 0x08, 0x09, 0x09, 0x0a, 0x10, 0x11, 0x20, 0x40, + 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, + 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, + 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, + 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11, + 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, + 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, + 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21, + 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, + 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0a, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, + 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0a, 0x12, + 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, + 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0c, + 0x08, 0x09, 0x09, 0x0a, 0x09, 0x09, 0x0a, 0x0c, + 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80, +}; + +/* + * The actual fragtbl array. + */ +u_char *fragtbl[MAXFRAG + 1] = { + 0, fragtbl124, fragtbl124, 0, fragtbl124, 0, 0, 0, fragtbl8, +}; diff --git a/bsd/ufs/ffs/ffs_vfsops.c b/bsd/ufs/ffs/ffs_vfsops.c new file mode 100644 index 000000000..10c78d5fb --- /dev/null +++ b/bsd/ufs/ffs/ffs_vfsops.c @@ -0,0 +1,1284 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +int ffs_sbupdate __P((struct ufsmount *, int)); + +struct vfsops ufs_vfsops = { + ffs_mount, + ufs_start, + ffs_unmount, + ufs_root, + ufs_quotactl, + ffs_statfs, + ffs_sync, + ffs_vget, + ffs_fhtovp, + ffs_vptofh, + ffs_init, + ffs_sysctl, +}; + +extern u_long nextgennumber; + +/* + * Called by main() when ufs is going to be mounted as root. + */ +ffs_mountroot() +{ + extern struct vnode *rootvp; + struct fs *fs; + struct mount *mp; + struct proc *p = current_proc(); /* XXX */ + struct ufsmount *ump; + u_int size; + int error; + + /* + * Get vnode for rootdev. + */ + if (error = bdevvp(rootdev, &rootvp)) { + printf("ffs_mountroot: can't setup bdevvp"); + return (error); + } + if (error = vfs_rootmountalloc("ufs", "root_device", &mp)) + return (error); + + /* Must set the MNT_ROOTFS flag before doing the actual mount */ + mp->mnt_flag |= MNT_ROOTFS; + + if (error = ffs_mountfs(rootvp, mp, p)) { + mp->mnt_vfc->vfc_refcount--; + vfs_unbusy(mp, p); + _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + return (error); + } + simple_lock(&mountlist_slock); + CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); + simple_unlock(&mountlist_slock); + ump = VFSTOUFS(mp); + fs = ump->um_fs; + (void) copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0); + (void)ffs_statfs(mp, &mp->mnt_stat, p); + vfs_unbusy(mp, p); + inittodr(fs->fs_time); + return (0); +} + +/* + * VFS Operations. + * + * mount system call + */ +int +ffs_mount(mp, path, data, ndp, p) + register struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + struct vnode *devvp; + struct ufs_args args; + struct ufsmount *ump; + register struct fs *fs; + u_int size; + int error, flags; + mode_t accessmode; + int ronly; + int reload = 0; + + if (error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args))) + return (error); + /* + * If updating, check whether changing from read-only to + * read/write; if there is no device name, that's all we do. + */ + if (mp->mnt_flag & MNT_UPDATE) { + ump = VFSTOUFS(mp); + fs = ump->um_fs; + if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { + flags = WRITECLOSE; + if (mp->mnt_flag & MNT_FORCE) + flags |= FORCECLOSE; + if (error = ffs_flushfiles(mp, flags, p)) + return (error); + fs->fs_clean = 1; + fs->fs_ronly = 1; + if (error = ffs_sbupdate(ump, MNT_WAIT)) { + fs->fs_clean = 0; + fs->fs_ronly = 0; + return (error); + } + } + /* save fs_ronly to later use */ + ronly = fs->fs_ronly; + if ((mp->mnt_flag & MNT_RELOAD) || ronly) + reload = 1; + if ((reload) && + (error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p))) + return (error); + /* replace the ronly after load */ + fs->fs_ronly = ronly; + /* + * Do not update the file system if the user was in singleuser + * and then tries to mount -uw without fscking + */ + if (!fs->fs_clean && ronly) { + printf("WARNING: trying to mount a dirty file system\n"); + if (issingleuser() && (mp->mnt_flag & MNT_ROOTFS)) { + printf("WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n",fs->fs_fsmnt); + /* + * Reset the readonly bit as reload might have + * modified this bit + */ + fs->fs_ronly = 1; + return(EPERM); + } + } + + if (ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) { + /* + * If upgrade to read-write by non-root, then verify + * that user has necessary permissions on the device. + */ + if (p->p_ucred->cr_uid != 0) { + devvp = ump->um_devvp; + vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); + if (error = VOP_ACCESS(devvp, VREAD | VWRITE, + p->p_ucred, p)) { + VOP_UNLOCK(devvp, 0, p); + return (error); + } + VOP_UNLOCK(devvp, 0, p); + } + fs->fs_ronly = 0; + fs->fs_clean = 0; + (void) ffs_sbupdate(ump, MNT_WAIT); + } + if (args.fspec == 0) { + /* + * Process export requests. + */ + return (vfs_export(mp, &ump->um_export, &args.export)); + } + } + /* + * Not an update, or updating the name: look up the name + * and verify that it refers to a sensible block device. + */ + NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p); + if (error = namei(ndp)) + return (error); + devvp = ndp->ni_vp; + + if (devvp->v_type != VBLK) { + vrele(devvp); + return (ENOTBLK); + } + if (major(devvp->v_rdev) >= nblkdev) { + vrele(devvp); + return (ENXIO); + } + /* + * If mount by non-root, then verify that user has necessary + * permissions on the device. + */ + if (p->p_ucred->cr_uid != 0) { + accessmode = VREAD; + if ((mp->mnt_flag & MNT_RDONLY) == 0) + accessmode |= VWRITE; + vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); + if (error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p)) { + vput(devvp); + return (error); + } + VOP_UNLOCK(devvp, 0, p); + } + if ((mp->mnt_flag & MNT_UPDATE) == 0) + error = ffs_mountfs(devvp, mp, p); + else { + if (devvp != ump->um_devvp) + error = EINVAL; /* needs translation */ + else + vrele(devvp); + } + if (error) { + vrele(devvp); + return (error); + } + ump = VFSTOUFS(mp); + fs = ump->um_fs; + (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); + bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); + bcopy((caddr_t)fs->fs_fsmnt, (caddr_t)mp->mnt_stat.f_mntonname, + MNAMELEN); + (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); + (void)ffs_statfs(mp, &mp->mnt_stat, p); + return (0); +} + +/* + * Reload all incore data for a filesystem (used after running fsck on + * the root filesystem and finding things to fix). The filesystem must + * be mounted read-only. + * + * Things to do to update the mount: + * 1) invalidate all cached meta-data. + * 2) re-read superblock from disk. + * 3) re-read summary information from disk. + * 4) invalidate all inactive vnodes. + * 5) invalidate all cached file data. + * 6) re-read inode data for all active vnodes. + */ +ffs_reload(mountp, cred, p) + register struct mount *mountp; + struct ucred *cred; + struct proc *p; +{ + register struct vnode *vp, *nvp, *devvp; + struct inode *ip; + struct csum *space; + struct buf *bp; + struct fs *fs, *newfs; + int i, blks, size, error; + int32_t *lp; +#if REV_ENDIAN_FS + int rev_endian = (mountp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + if ((mountp->mnt_flag & MNT_RDONLY) == 0) + return (EINVAL); + /* + * Step 1: invalidate all cached meta-data. + */ + devvp = VFSTOUFS(mountp)->um_devvp; + if (vinvalbuf(devvp, 0, cred, p, 0, 0)) + panic("ffs_reload: dirty1"); + /* + * Step 2: re-read superblock from disk. + */ + VOP_DEVBLOCKSIZE(devvp,&size); + + if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) { + brelse(bp); + return (error); + } + newfs = (struct fs *)bp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) { + byte_swap_sbin(newfs); + } +#endif /* REV_ENDIAN_FS */ + if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE || + newfs->fs_bsize < sizeof(struct fs)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_sbout(newfs); +#endif /* REV_ENDIAN_FS */ + + brelse(bp); + return (EIO); /* XXX needs translation */ + } + fs = VFSTOUFS(mountp)->um_fs; + /* + * Copy pointer fields back into superblock before copying in XXX + * new superblock. These should really be in the ufsmount. XXX + * Note that important parameters (eg fs_ncg) are unchanged. + */ + bcopy(&fs->fs_csp[0], &newfs->fs_csp[0], sizeof(fs->fs_csp)); + newfs->fs_maxcluster = fs->fs_maxcluster; + bcopy(newfs, fs, (u_int)fs->fs_sbsize); + if (fs->fs_sbsize < SBSIZE) + bp->b_flags |= B_INVAL; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_sbout(newfs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; + ffs_oldfscompat(fs); + /* + * Step 3: re-read summary information from disk. + */ + blks = howmany(fs->fs_cssize, fs->fs_fsize); + space = fs->fs_csp[0]; + for (i = 0; i < blks; i += fs->fs_frag) { + size = fs->fs_bsize; + if (i + fs->fs_frag > blks) + size = (blks - i) * fs->fs_fsize; + if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, + NOCRED, &bp)) { + brelse(bp); + return (error); + } +#if REV_ENDIAN_FS + if (rev_endian) { + /* csum swaps */ + byte_swap_ints((int *)bp->b_data, size / sizeof(int)); + } +#endif /* REV_ENDIAN_FS */ + bcopy(bp->b_data, fs->fs_csp[fragstoblks(fs, i)], (u_int)size); +#if REV_ENDIAN_FS + if (rev_endian) { + /* csum swaps */ + byte_swap_ints((int *)bp->b_data, size / sizeof(int)); + } +#endif /* REV_ENDIAN_FS */ + brelse(bp); + } + /* + * We no longer know anything about clusters per cylinder group. + */ + if (fs->fs_contigsumsize > 0) { + lp = fs->fs_maxcluster; + for (i = 0; i < fs->fs_ncg; i++) + *lp++ = fs->fs_contigsumsize; + } + +loop: + simple_lock(&mntvnode_slock); + for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { + if (vp->v_mount != mountp) { + simple_unlock(&mntvnode_slock); + goto loop; + } + nvp = vp->v_mntvnodes.le_next; + /* + * Step 4: invalidate all inactive vnodes. + */ + if (vrecycle(vp, &mntvnode_slock, p)) + goto loop; + /* + * Step 5: invalidate all cached file data. + */ + simple_lock(&vp->v_interlock); + simple_unlock(&mntvnode_slock); + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { + goto loop; + } + if (vinvalbuf(vp, 0, cred, p, 0, 0)) + panic("ffs_reload: dirty2"); + /* + * Step 6: re-read inode data for all active vnodes. + */ + ip = VTOI(vp); + if (error = + bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), + (int)fs->fs_bsize, NOCRED, &bp)) { + brelse(bp); + vput(vp); + return (error); + } +#if REV_ENDIAN_FS + if (rev_endian) { + byte_swap_inode_in(((struct dinode *)bp->b_data + + ino_to_fsbo(fs, ip->i_number)), ip); + } else { +#endif /* REV_ENDIAN_FS */ + ip->i_din = *((struct dinode *)bp->b_data + + ino_to_fsbo(fs, ip->i_number)); +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + brelse(bp); + vput(vp); + simple_lock(&mntvnode_slock); + } + simple_unlock(&mntvnode_slock); + return (0); +} + +/* + * Common code for mount and mountroot + */ +int +ffs_mountfs(devvp, mp, p) + register struct vnode *devvp; + struct mount *mp; + struct proc *p; +{ + register struct ufsmount *ump; + struct buf *bp; + register struct fs *fs; + dev_t dev; + struct buf *cgbp; + struct cg *cgp; + int32_t clustersumoff; + caddr_t base, space; + int error, i, blks, size, ronly; + int32_t *lp; + struct ucred *cred; + extern struct vnode *rootvp; + u_int64_t maxfilesize; /* XXX */ + u_int dbsize = DEV_BSIZE; +#if REV_ENDIAN_FS + int rev_endian=0; +#endif /* REV_ENDIAN_FS */ + dev = devvp->v_rdev; + cred = p ? p->p_ucred : NOCRED; + /* + * Disallow multiple mounts of the same device. + * Disallow mounting of a device that is currently in use + * (except for root, which might share swap device for miniroot). + * Flush out any old buffers remaining from a previous use. + */ + if (error = vfs_mountedon(devvp)) + return (error); + if (vcount(devvp) > 1 && devvp != rootvp) + return (EBUSY); + if (error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) + return (error); + + ronly = (mp->mnt_flag & MNT_RDONLY) != 0; + if (error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p)) + return (error); + + VOP_DEVBLOCKSIZE(devvp,&size); + + bp = NULL; + ump = NULL; + if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, cred, &bp)) + goto out; + fs = (struct fs *)bp->b_data; +#if REV_ENDIAN_FS + if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || + fs->fs_bsize < sizeof(struct fs)) { + int magic = fs->fs_magic; + + byte_swap_ints(&magic, 1); + if (magic != FS_MAGIC) { + error = EINVAL; + goto out; + } + byte_swap_sbin(fs); + if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || + fs->fs_bsize < sizeof(struct fs)) { + byte_swap_sbout(fs); + error = EINVAL; /* XXX needs translation */ + goto out; + } + rev_endian=1; + } +#endif /* REV_ENDIAN_FS */ + if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || + fs->fs_bsize < sizeof(struct fs)) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_sbout(fs); +#endif /* REV_ENDIAN_FS */ + error = EINVAL; /* XXX needs translation */ + goto out; + } + + + /* + * Buffer cache does not handle multiple pages in a buf when + * invalidating incore buffer in pageout. There are no locks + * in the pageout path. So there is a danger of loosing data when + * block allocation happens at the same time a pageout of buddy + * page occurs. incore() returns buf with both + * pages, this leads vnode-pageout to incorrectly flush of entire. + * buf. Till the low level ffs code is modified to deal with these + * do not mount any FS more than 4K size. + */ + /* + * Can't mount filesystems with a fragment size less than DIRBLKSIZ + */ + /* + * Don't mount dirty filesystems, except for the root filesystem + */ + if ((fs->fs_bsize > PAGE_SIZE) || (fs->fs_fsize < DIRBLKSIZ) || + ((!(mp->mnt_flag & MNT_ROOTFS)) && (!fs->fs_clean))) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_sbout(fs); +#endif /* REV_ENDIAN_FS */ + error = ENOTSUP; + goto out; + } + + /* Let's figure out the devblock size the file system is with */ + /* the device block size = fragment size / number of sectors per frag */ + + dbsize = fs->fs_fsize / NSPF(fs); + if(dbsize <= 0 ) { + kprintf("device blocksize computaion failed\n"); + } else { + if (VOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, &dbsize, FWRITE, NOCRED, + p) != 0) { + kprintf("failed to set device blocksize\n"); + } + /* force the specfs to reread blocksize from size() */ + set_fsblocksize(devvp); + } + + + /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */ + if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_sbout(fs); +#endif /* REV_ENDIAN_FS */ + error = EROFS; /* needs translation */ + goto out; + } + + /* If we are not mounting read only, then check for overlap + * condition in cylinder group's free block map. + * If overlap exists, then force this into a read only mount + * to avoid further corruption. PR#2216969 + */ + if (ronly == 0){ + if (error = bread (devvp, fsbtodb(fs, cgtod(fs, 0)), + (int)fs->fs_cgsize, NOCRED, &cgbp)) { + brelse(cgbp); + goto out; + } + cgp = (struct cg *)cgbp->b_data; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgin(cgp,fs); +#endif /* REV_ENDIAN_FS */ + if (!cg_chkmagic(cgp)){ +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(cgbp); + goto out; + } + if (cgp->cg_clustersumoff != 0) { + /* Check for overlap */ + clustersumoff = cgp->cg_freeoff + + howmany(fs->fs_cpg * fs->fs_spc / NSPF(fs), NBBY); + clustersumoff = roundup(clustersumoff, sizeof(long)); + if (cgp->cg_clustersumoff < clustersumoff) { + /* Overlap exists */ + mp->mnt_flag |= MNT_RDONLY; + ronly = 1; + } + } +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_cgout(cgp,fs); +#endif /* REV_ENDIAN_FS */ + brelse(cgbp); + } + + ump = _MALLOC(sizeof *ump, M_UFSMNT, M_WAITOK); + bzero((caddr_t)ump, sizeof *ump); + ump->um_fs = _MALLOC((u_long)fs->fs_sbsize, M_UFSMNT, + M_WAITOK); + bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize); + if (fs->fs_sbsize < SBSIZE) + bp->b_flags |= B_INVAL; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_sbout(fs); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + bp = NULL; + fs = ump->um_fs; + fs->fs_ronly = ronly; + size = fs->fs_cssize; + blks = howmany(size, fs->fs_fsize); + if (fs->fs_contigsumsize > 0) + size += fs->fs_ncg * sizeof(int32_t); + base = space = _MALLOC((u_long)size, M_UFSMNT, M_WAITOK); + base = space; + for (i = 0; i < blks; i += fs->fs_frag) { + size = fs->fs_bsize; + if (i + fs->fs_frag > blks) + size = (blks - i) * fs->fs_fsize; + if (error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size, + cred, &bp)) { + _FREE(base, M_UFSMNT); + goto out; + } + bcopy(bp->b_data, space, (u_int)size); +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_ints((int *) space, size / sizeof(int)); +#endif /* REV_ENDIAN_FS */ + fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space; + space += size; + brelse(bp); + bp = NULL; + } + if (fs->fs_contigsumsize > 0) { + fs->fs_maxcluster = lp = (int32_t *)space; + for (i = 0; i < fs->fs_ncg; i++) + *lp++ = fs->fs_contigsumsize; + } + mp->mnt_data = (qaddr_t)ump; + mp->mnt_stat.f_fsid.val[0] = (long)dev; + mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum; +#warning hardcoded max symlen and not "mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;" + mp->mnt_maxsymlinklen = 60; +#if REV_ENDIAN_FS + if (rev_endian) + mp->mnt_flag |= MNT_REVEND; +#endif /* REV_ENDIAN_FS */ + ump->um_mountp = mp; + ump->um_dev = dev; + ump->um_devvp = devvp; + ump->um_nindir = fs->fs_nindir; + ump->um_bptrtodb = fs->fs_fsbtodb; + ump->um_seqinc = fs->fs_frag; + for (i = 0; i < MAXQUOTAS; i++) + ump->um_quotas[i] = NULLVP; + devvp->v_specflags |= SI_MOUNTEDON; + ffs_oldfscompat(fs); + ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */ + maxfilesize = (u_int64_t)0x100000000; /* 4GB */ +#if 0 + maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */ +#endif /* 0 */ + if (fs->fs_maxfilesize > maxfilesize) /* XXX */ + fs->fs_maxfilesize = maxfilesize; /* XXX */ + if (ronly == 0) { + fs->fs_clean = 0; + (void) ffs_sbupdate(ump, MNT_WAIT); + } + return (0); +out: + if (bp) + brelse(bp); + (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p); + if (ump) { + _FREE(ump->um_fs, M_UFSMNT); + _FREE(ump, M_UFSMNT); + mp->mnt_data = (qaddr_t)0; + } + return (error); +} + +/* + * Sanity checks for old file systems. + * + * XXX - goes away some day. + */ +ffs_oldfscompat(fs) + struct fs *fs; +{ + int i; + + fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */ + fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */ + if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ + fs->fs_nrpos = 8; /* XXX */ + if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ + u_int64_t sizepb = fs->fs_bsize; /* XXX */ + /* XXX */ + fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */ + for (i = 0; i < NIADDR; i++) { /* XXX */ + sizepb *= NINDIR(fs); /* XXX */ + fs->fs_maxfilesize += sizepb; /* XXX */ + } /* XXX */ + fs->fs_qbmask = ~fs->fs_bmask; /* XXX */ + fs->fs_qfmask = ~fs->fs_fmask; /* XXX */ + } /* XXX */ + return (0); +} + +/* + * unmount system call + */ +int +ffs_unmount(mp, mntflags, p) + struct mount *mp; + int mntflags; + struct proc *p; +{ + register struct ufsmount *ump; + register struct fs *fs; + int error, flags; + flags = 0; + if (mntflags & MNT_FORCE) + flags |= FORCECLOSE; + if (error = ffs_flushfiles(mp, flags, p)) + return (error); + ump = VFSTOUFS(mp); + fs = ump->um_fs; + if (fs->fs_ronly == 0) { + fs->fs_clean = 1; + if (error = ffs_sbupdate(ump, MNT_WAIT)) { + fs->fs_clean = 0; +#ifdef notyet + /* we can atleast cleanup ; as the media could be WP */ + /* & during mount, we do not check for write failures */ + /* FIXME LATER : the Correct fix would be to have */ + /* mount detect the WP media and downgrade to readonly mount */ + /* For now, here it is */ + return (error); +#endif /* notyet */ + } + } + ump->um_devvp->v_specflags &= ~SI_MOUNTEDON; + error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE, + NOCRED, p); + vrele(ump->um_devvp); + + _FREE(fs->fs_csp[0], M_UFSMNT); + _FREE(fs, M_UFSMNT); + _FREE(ump, M_UFSMNT); + mp->mnt_data = (qaddr_t)0; +#if REV_ENDIAN_FS + mp->mnt_flag &= ~MNT_REVEND; +#endif /* REV_ENDIAN_FS */ + return (error); +} + +/* + * Flush out all the files in a filesystem. + */ +ffs_flushfiles(mp, flags, p) + register struct mount *mp; + int flags; + struct proc *p; +{ + register struct ufsmount *ump; + int i, error; + + ump = VFSTOUFS(mp); +#if QUOTA + if (mp->mnt_flag & MNT_QUOTA) { + if (error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) + return (error); + for (i = 0; i < MAXQUOTAS; i++) { + if (ump->um_quotas[i] == NULLVP) + continue; + quotaoff(p, mp, i); + } + /* + * Here we fall through to vflush again to ensure + * that we have gotten rid of all the system vnodes. + */ + } +#endif + error = vflush(mp, NULLVP, SKIPSWAP|flags); + error = vflush(mp, NULLVP, flags); + return (error); +} + +/* + * Get file system statistics. + */ +int +ffs_statfs(mp, sbp, p) + struct mount *mp; + register struct statfs *sbp; + struct proc *p; +{ + register struct ufsmount *ump; + register struct fs *fs; + + ump = VFSTOUFS(mp); + fs = ump->um_fs; + if (fs->fs_magic != FS_MAGIC) + panic("ffs_statfs"); + sbp->f_bsize = fs->fs_fsize; + sbp->f_iosize = fs->fs_bsize; + sbp->f_blocks = fs->fs_dsize; + sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag + + fs->fs_cstotal.cs_nffree; + sbp->f_bavail = freespace(fs, fs->fs_minfree); + sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO; + sbp->f_ffree = fs->fs_cstotal.cs_nifree; + if (sbp != &mp->mnt_stat) { + sbp->f_type = mp->mnt_vfc->vfc_typenum; + bcopy((caddr_t)mp->mnt_stat.f_mntonname, + (caddr_t)&sbp->f_mntonname[0], MNAMELEN); + bcopy((caddr_t)mp->mnt_stat.f_mntfromname, + (caddr_t)&sbp->f_mntfromname[0], MNAMELEN); + } + return (0); +} + +/* + * Go through the disk queues to initiate sandbagged IO; + * go through the inodes to write those that have been modified; + * initiate the writing of the super block if it has been modified. + * + * Note: we are always called with the filesystem marked `MPBUSY'. + */ +int +ffs_sync(mp, waitfor, cred, p) + struct mount *mp; + int waitfor; + struct ucred *cred; + struct proc *p; +{ + struct vnode *nvp, *vp; + struct inode *ip; + struct ufsmount *ump = VFSTOUFS(mp); + struct fs *fs; + int error, allerror = 0; + + fs = ump->um_fs; + if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */ + printf("fs = %s\n", fs->fs_fsmnt); + panic("update: rofs mod"); + } + /* + * Write back each (modified) inode. + */ + simple_lock(&mntvnode_slock); +loop: + for (vp = mp->mnt_vnodelist.lh_first; + vp != NULL; + vp = nvp) { + /* + * If the vnode that we are about to sync is no longer + * associated with this mount point, start over. + */ + if (vp->v_mount != mp) + goto loop; + simple_lock(&vp->v_interlock); + nvp = vp->v_mntvnodes.le_next; + ip = VTOI(vp); + if ((vp->v_type == VNON) || + ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && + vp->v_dirtyblkhd.lh_first == NULL && !(vp->v_flag & VHASDIRTY))) { + simple_unlock(&vp->v_interlock); + continue; + } + simple_unlock(&mntvnode_slock); + error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); + if (error) { + simple_lock(&mntvnode_slock); + if (error == ENOENT) + goto loop; + continue; + } + if (error = VOP_FSYNC(vp, cred, waitfor, p)) + allerror = error; + VOP_UNLOCK(vp, 0, p); + vrele(vp); + simple_lock(&mntvnode_slock); + } + simple_unlock(&mntvnode_slock); + /* + * Force stale file system control information to be flushed. + */ + if (error = VOP_FSYNC(ump->um_devvp, cred, waitfor, p)) + allerror = error; +#if QUOTA + qsync(mp); +#endif + /* + * Write back modified superblock. + */ + if (fs->fs_fmod != 0) { + fs->fs_fmod = 0; + fs->fs_time = time.tv_sec; + if (error = ffs_sbupdate(ump, waitfor)) + allerror = error; + } + return (allerror); +} + +/* + * Look up a FFS dinode number to find its incore vnode, otherwise read it + * in from disk. If it is in core, wait for the lock bit to clear, then + * return the inode locked. Detection and handling of mount points must be + * done by the calling routine. + */ +int +ffs_vget(mp, ino, vpp) + struct mount *mp; + ino_t ino; + struct vnode **vpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct fs *fs; + struct inode *ip; + struct ufsmount *ump; + struct buf *bp; + struct vnode *vp; + dev_t dev; + int i, type, error; + + ump = VFSTOUFS(mp); + dev = ump->um_dev; + + /* Check for unmount in progress */ + if (mp->mnt_kern_flag & MNTK_UNMOUNT) { + *vpp = NULL; + return (EPERM); + } + + if ((*vpp = ufs_ihashget(dev, ino)) != NULL) { + vp = *vpp; + UBCINFOCHECK("ffs_vget", vp); + return (0); + } + /* Allocate a new vnode/inode. */ + type = ump->um_devvp->v_tag == VT_MFS ? M_MFSNODE : M_FFSNODE; /* XXX */ + MALLOC_ZONE(ip, struct inode *, sizeof(struct inode), type, M_WAITOK); + if (error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) { + FREE_ZONE(ip, sizeof(struct inode), type); + *vpp = NULL; + return (error); + } + bzero((caddr_t)ip, sizeof(struct inode)); + lockinit(&ip->i_lock, PINOD, "inode", 0, 0); + vp->v_data = ip; + ip->i_vnode = vp; + ip->i_fs = fs = ump->um_fs; + ip->i_dev = dev; + ip->i_number = ino; +#if QUOTA + for (i = 0; i < MAXQUOTAS; i++) + ip->i_dquot[i] = NODQUOT; +#endif + /* + * Put it onto its hash chain and lock it so that other requests for + * this inode will block if they arrive while we are sleeping waiting + * for old data structures to be purged or for the contents of the + * disk portion of this inode to be read. + */ + ufs_ihashins(ip); + + /* Read in the disk contents for the inode, copy into the inode. */ + if (error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)), + (int)fs->fs_bsize, NOCRED, &bp)) { + /* + * The inode does not contain anything useful, so it would + * be misleading to leave it on its hash chain. With mode + * still zero, it will be unlinked and returned to the free + * list by vput(). + */ + vput(vp); + brelse(bp); + *vpp = NULL; + return (error); + } +#if REV_ENDIAN_FS + if (mp->mnt_flag & MNT_REVEND) { + byte_swap_inode_in(((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)),ip); + } else { +#endif /* REV_ENDIAN_FS */ + ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino)); +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + brelse(bp); + + /* + * Initialize the vnode from the inode, check for aliases. + * Note that the underlying vnode may have changed. + */ + if (error = ufs_vinit(mp, ffs_specop_p, FFS_FIFOOPS, &vp)) { + vput(vp); + *vpp = NULL; + return (error); + } + /* + * Finish inode initialization now that aliasing has been resolved. + */ + ip->i_devvp = ump->um_devvp; + VREF(ip->i_devvp); + /* + * Set up a generation number for this inode if it does not + * already have one. This should only happen on old filesystems. + */ + if (ip->i_gen == 0) { + if (++nextgennumber < (u_long)time.tv_sec) + nextgennumber = time.tv_sec; + ip->i_gen = nextgennumber; + if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) + ip->i_flag |= IN_MODIFIED; + } + /* + * Ensure that uid and gid are correct. This is a temporary + * fix until fsck has been changed to do the update. + */ + if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ + ip->i_uid = ip->i_din.di_ouid; /* XXX */ + ip->i_gid = ip->i_din.di_ogid; /* XXX */ + } /* XXX */ + + *vpp = vp; + if (UBCINFOMISSING(vp) || UBCINFORECLAIMED(vp)) + ubc_info_init(vp); + return (0); +} + +/* + * File handle to vnode + * + * Have to be really careful about stale file handles: + * - check that the inode number is valid + * - call ffs_vget() to get the locked inode + * - check for an unallocated inode (i_mode == 0) + * - check that the given client host has export rights and return + * those rights via. exflagsp and credanonp + */ +int +ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp) + register struct mount *mp; + struct fid *fhp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred **credanonp; +{ + register struct ufid *ufhp; + struct fs *fs; + + ufhp = (struct ufid *)fhp; + fs = VFSTOUFS(mp)->um_fs; + if (ufhp->ufid_ino < ROOTINO || + ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg) + return (ESTALE); + return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp)); +} + +/* + * Vnode pointer to File handle + */ +/* ARGSUSED */ +ffs_vptofh(vp, fhp) + struct vnode *vp; + struct fid *fhp; +{ + register struct inode *ip; + register struct ufid *ufhp; + + ip = VTOI(vp); + ufhp = (struct ufid *)fhp; + ufhp->ufid_len = sizeof(struct ufid); + ufhp->ufid_ino = ip->i_number; + ufhp->ufid_gen = ip->i_gen; + return (0); +} + +/* + * Initialize the filesystem; just use ufs_init. + */ +int +ffs_init(vfsp) + struct vfsconf *vfsp; +{ + + return (ufs_init(vfsp)); +} + +/* + * fast filesystem related variables. + */ +ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree; + + /* all sysctl names at this level are terminal */ + if (namelen != 1) + return (ENOTDIR); /* overloaded */ + + switch (name[0]) { + case FFS_CLUSTERREAD: + return (sysctl_int(oldp, oldlenp, newp, newlen, + &doclusterread)); + case FFS_CLUSTERWRITE: + return (sysctl_int(oldp, oldlenp, newp, newlen, + &doclusterwrite)); + case FFS_REALLOCBLKS: + return (sysctl_int(oldp, oldlenp, newp, newlen, + &doreallocblks)); + case FFS_ASYNCFREE: + return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree)); + default: + return (EOPNOTSUPP); + } + /* NOTREACHED */ +} + +/* + * Write a superblock and associated information back to disk. + */ +int +ffs_sbupdate(mp, waitfor) + struct ufsmount *mp; + int waitfor; +{ + register struct fs *dfs, *fs = mp->um_fs; + register struct buf *bp; + int blks; + caddr_t space; + int i, size, error, allerror = 0; + int devBlockSize=0; +#if REV_ENDIAN_FS + int rev_endian=(mp->um_mountp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + /* + * First write back the summary information. + */ + blks = howmany(fs->fs_cssize, fs->fs_fsize); + space = (caddr_t)fs->fs_csp[0]; + for (i = 0; i < blks; i += fs->fs_frag) { + size = fs->fs_bsize; + if (i + fs->fs_frag > blks) + size = (blks - i) * fs->fs_fsize; + bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i), + size, 0, 0, BLK_META); + bcopy(space, bp->b_data, (u_int)size); +#if REV_ENDIAN_FS + if (rev_endian) { + byte_swap_ints((int *)bp->b_data, size / sizeof(int)); + } +#endif /* REV_ENDIAN_FS */ + space += size; + if (waitfor != MNT_WAIT) + bawrite(bp); + else if (error = bwrite(bp)) + allerror = error; + } + /* + * Now write back the superblock itself. If any errors occurred + * up to this point, then fail so that the superblock avoids + * being written out as clean. + */ + if (allerror) + return (allerror); + VOP_DEVBLOCKSIZE(mp->um_devvp,&devBlockSize); + bp = getblk(mp->um_devvp, (SBOFF/devBlockSize), (int)fs->fs_sbsize, 0, 0, BLK_META); + bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); + /* Restore compatibility to old file systems. XXX */ + dfs = (struct fs *)bp->b_data; /* XXX */ + if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */ + dfs->fs_nrpos = -1; /* XXX */ +#if REV_ENDIAN_FS + /* + * Swapping bytes here ; so that in case + * of inode format < FS_44INODEFMT appropriate + * fields get moved + */ + if (rev_endian) { + byte_swap_sbout((struct fs *)bp->b_data); + } +#endif /* REV_ENDIAN_FS */ + if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */ + int32_t *lp, tmp; /* XXX */ + /* XXX */ + lp = (int32_t *)&dfs->fs_qbmask; /* XXX */ + tmp = lp[4]; /* XXX */ + for (i = 4; i > 0; i--) /* XXX */ + lp[i] = lp[i-1]; /* XXX */ + lp[0] = tmp; /* XXX */ + } /* XXX */ +#if REV_ENDIAN_FS + /* Note that dfs is already swapped so swap the filesize + * before writing + */ + if (rev_endian) { + dfs->fs_maxfilesize = NXSwapLongLong(mp->um_savedmaxfilesize); /* XXX */ + } else { +#endif /* REV_ENDIAN_FS */ + dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */ +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + if (waitfor != MNT_WAIT) + bawrite(bp); + else if (error = bwrite(bp)) + allerror = error; + + return (allerror); +} diff --git a/bsd/ufs/ffs/ffs_vnops.c b/bsd/ufs/ffs/ffs_vnops.c new file mode 100644 index 000000000..8d05efe44 --- /dev/null +++ b/bsd/ufs/ffs/ffs_vnops.c @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +#define VOPFUNC int (*)(void *) + +/* Global vfs data structures for ufs. */ +int (**ffs_vnodeop_p)(void *); +struct vnodeopv_entry_desc ffs_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)ufs_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)ufs_create }, /* create */ + { &vop_whiteout_desc, (VOPFUNC)ufs_whiteout }, /* whiteout */ + { &vop_mknod_desc, (VOPFUNC)ufs_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)ufs_open }, /* open */ + { &vop_close_desc, (VOPFUNC)ufs_close }, /* close */ + { &vop_access_desc, (VOPFUNC)ufs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)ufs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)ufs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)ffs_read }, /* read */ + { &vop_write_desc, (VOPFUNC)ffs_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)ufs_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)ufs_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)ufs_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)ufs_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)ufs_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)ffs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)ufs_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)ufs_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)ufs_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)ufs_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)ufs_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)ufs_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)ufs_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)ufs_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)ufs_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)ufs_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)ufs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)ffs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)ufs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)ufs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)ufs_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)ufs_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)ufs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)ufs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)ufs_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)ufs_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)ffs_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)ffs_valloc }, /* valloc */ + { &vop_reallocblks_desc, (VOPFUNC)ffs_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (VOPFUNC)ffs_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)ffs_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)ffs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_pagein_desc, (VOPFUNC)ffs_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)ffs_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copy File */ + { &vop_blktooff_desc, (VOPFUNC)ffs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)ffs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)ufs_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc ffs_vnodeop_opv_desc = + { &ffs_vnodeop_p, ffs_vnodeop_entries }; + +int (**ffs_specop_p)(void *); +struct vnodeopv_entry_desc ffs_specop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)spec_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)spec_open }, /* open */ + { &vop_close_desc, (VOPFUNC)ufsspec_close }, /* close */ + { &vop_access_desc, (VOPFUNC)ufs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)ufs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)ufs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)ufsspec_read }, /* read */ + { &vop_write_desc, (VOPFUNC)ufsspec_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)spec_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)ffs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)spec_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)ufs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)ffs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)ufs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)ufs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)ufs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)ufs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */ + { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (VOPFUNC)ffs_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)ffs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */ + { &vop_pagein_desc, (VOPFUNC)ffs_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)ffs_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copy File */ + { &vop_blktooff_desc, (VOPFUNC)ffs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)ffs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)spec_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc ffs_specop_opv_desc = + { &ffs_specop_p, ffs_specop_entries }; + +#if FIFO +int (**ffs_fifoop_p)(void *); +struct vnodeopv_entry_desc ffs_fifoop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */ + { &vop_close_desc, (VOPFUNC)ufsfifo_close }, /* close */ + { &vop_access_desc, (VOPFUNC)ufs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)ufs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)ufs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)ufsfifo_read }, /* read */ + { &vop_write_desc, (VOPFUNC)ufsfifo_write }, /* write */ + { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */ + { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */ + { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */ + { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)ffs_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)ufs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)ffs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)ufs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)ufs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)ufs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)ufs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */ + { &vop_reallocblks_desc, (VOPFUNC)fifo_reallocblks }, /* reallocblks */ + { &vop_vfree_desc, (VOPFUNC)ffs_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)ffs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)vn_bwrite }, + { &vop_pagein_desc, (VOPFUNC)ffs_pagein }, /* Pagein */ + { &vop_pageout_desc, (VOPFUNC)ffs_pageout }, /* Pageout */ + { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copy File */ + { &vop_blktooff_desc, (VOPFUNC)ffs_blktooff }, /* blktooff */ + { &vop_offtoblk_desc, (VOPFUNC)ffs_offtoblk }, /* offtoblk */ + { &vop_cmap_desc, (VOPFUNC)ufs_cmap }, /* cmap */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc ffs_fifoop_opv_desc = + { &ffs_fifoop_p, ffs_fifoop_entries }; +#endif /* FIFO */ + +/* + * Enabling cluster read/write operations. + */ +int doclusterread = 0; +int doclusterwrite = 0; + +#include + +/* + * Synch an open file. + */ +/* ARGSUSED */ +int +ffs_fsync(ap) + struct vop_fsync_args /* { + struct vnode *a_vp; + struct ucred *a_cred; + int a_waitfor; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct buf *bp; + struct timeval tv; + struct buf *nbp; + int s; + struct inode *ip = VTOI(vp); + + /* + * Write out any clusters. + */ + cluster_push(vp); + + + /* + * Flush all dirty buffers associated with a vnode. + */ +loop: + s = splbio(); + for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if ((bp->b_flags & B_BUSY)) + continue; + if ((bp->b_flags & B_DELWRI) == 0) + panic("ffs_fsync: not dirty"); + bremfree(bp); + bp->b_flags |= B_BUSY; + splx(s); + /* + * Wait for I/O associated with indirect blocks to complete, + * since there is no way to quickly wait for them below. + */ + if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT) + (void) bawrite(bp); + else + (void) bwrite(bp); + goto loop; + } + if (vp->v_flag & VHASDIRTY) + ubc_pushdirty(vp); + + if (ap->a_waitfor == MNT_WAIT) { + while (vp->v_numoutput) { + vp->v_flag |= VBWAIT; + tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "ffs_fsync", 0); + } + + /* I have seen this happen for swapfile. So it is safer to + * check for dirty buffers again. --Umesh + */ + if (vp->v_dirtyblkhd.lh_first || (vp->v_flag & VHASDIRTY)) { + vprint("ffs_fsync: dirty", vp); + splx(s); + goto loop; + } + } + splx(s); + tv = time; + return (VOP_UPDATE(ap->a_vp, &tv, &tv, ap->a_waitfor == MNT_WAIT)); +} + +/* + * Reclaim an inode so that it can be used for other purposes. + */ +int +ffs_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + int error; + + if (error = ufs_reclaim(vp, ap->a_p)) + return (error); + FREE_ZONE(vp->v_data, sizeof (struct inode), + VFSTOUFS(vp->v_mount)->um_devvp->v_tag == VT_MFS ? + M_MFSNODE : M_FFSNODE); + vp->v_data = NULL; + return (0); +} + +/* Blktooff converts a logical block number to a file offset */ +int +ffs_blktooff(ap) + struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; + } */ *ap; +{ + register struct inode *ip; + register FS *fs; + ufs_daddr_t bn; + + + if (ap->a_vp == NULL) + return (EINVAL); + + ip = VTOI(ap->a_vp); + fs = ip->I_FS; + bn = ap->a_lblkno; + + if ((long)bn < 0) { + panic("-ve blkno in ffs_blktooff"); + bn = -(long)bn; + } + + *ap->a_offset = (off_t)lblktosize(fs, bn); + return (0); +} + +/* Blktooff converts a logical block number to a file offset */ +int +ffs_offtoblk(ap) + struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; + } */ *ap; +{ + register struct inode *ip; + register FS *fs; + + if (ap->a_vp == NULL) + return (EINVAL); + + ip = VTOI(ap->a_vp); + fs = ip->I_FS; + + *ap->a_lblkno = (daddr_t)lblkno(fs, ap->a_offset); + return (0); +} diff --git a/bsd/ufs/ffs/fs.h b/bsd/ufs/ffs/fs.h new file mode 100644 index 000000000..db4436dac --- /dev/null +++ b/bsd/ufs/ffs/fs.h @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)fs.h 8.13 (Berkeley) 3/21/95 + */ +#ifndef _FFS_FS_H_ +#define _FFS_FS_H_ + +/* + * Each disk drive contains some number of file systems. + * A file system consists of a number of cylinder groups. + * Each cylinder group has inodes and data. + * + * A file system is described by its super-block, which in turn + * describes the cylinder groups. The super-block is critical + * data and is replicated in each cylinder group to protect against + * catastrophic loss. This is done at `newfs' time and the critical + * super-block data does not change, so the copies need not be + * referenced further unless disaster strikes. + * + * For file system fs, the offsets of the various blocks of interest + * are given in the super block as: + * [fs->fs_sblkno] Super-block + * [fs->fs_cblkno] Cylinder group block + * [fs->fs_iblkno] Inode blocks + * [fs->fs_dblkno] Data blocks + * The beginning of cylinder group cg in fs, is given by + * the ``cgbase(fs, cg)'' macro. + * + * The first boot and super blocks are given in absolute disk addresses. + * The byte-offset forms are preferred, as they don't imply a sector size. + */ +#define BBSIZE 8192 +#define SBSIZE 8192 +#define BBOFF ((off_t)(0)) +#define SBOFF ((off_t)(BBOFF + BBSIZE)) +#define BBLOCK ((ufs_daddr_t)(0)) +#define SBLOCK ((ufs_daddr_t)(BBLOCK + BBSIZE / DEV_BSIZE)) + +/* + * Addresses stored in inodes are capable of addressing fragments + * of `blocks'. File system blocks of at most size MAXBSIZE can + * be optionally broken into 2, 4, or 8 pieces, each of which is + * addressible; these pieces may be DEV_BSIZE, or some multiple of + * a DEV_BSIZE unit. + * + * Large files consist of exclusively large data blocks. To avoid + * undue wasted disk space, the last data block of a small file may be + * allocated as only as many fragments of a large block as are + * necessary. The file system format retains only a single pointer + * to such a fragment, which is a piece of a single large block that + * has been divided. The size of such a fragment is determinable from + * information in the inode, using the ``blksize(fs, ip, lbn)'' macro. + * + * The file system records space availability at the fragment level; + * to determine block availability, aligned fragments are examined. + */ + +/* + * MINBSIZE is the smallest allowable block size. + * In order to insure that it is possible to create files of size + * 2^32 with only two levels of indirection, MINBSIZE is set to 4096. + * MINBSIZE must be big enough to hold a cylinder group block, + * thus changes to (struct cg) must keep its size within MINBSIZE. + * Note that super blocks are always of size SBSIZE, + * and that both SBSIZE and MAXBSIZE must be >= MINBSIZE. + */ +#define MINBSIZE 4096 + +/* + * The path name on which the file system is mounted is maintained + * in fs_fsmnt. MAXMNTLEN defines the amount of space allocated in + * the super block for this name. + */ +#define MAXMNTLEN 512 + +/* + * The limit on the amount of summary information per file system + * is defined by MAXCSBUFS. It is currently parameterized for a + * size of 128 bytes (2 million cylinder groups on machines with + * 32-bit pointers, and 1 million on 64-bit machines). One pointer + * is taken away to point to an array of cluster sizes that is + * computed as cylinder groups are inspected. + */ +#define MAXCSBUFS ((128 / sizeof(void *)) - 1) + +/* + * A summary of contiguous blocks of various sizes is maintained + * in each cylinder group. Normally this is set by the initial + * value of fs_maxcontig. To conserve space, a maximum summary size + * is set by FS_MAXCONTIG. + */ +#define FS_MAXCONTIG 16 + +/* + * MINFREE gives the minimum acceptable percentage of file system + * blocks which may be free. If the freelist drops below this level + * only the superuser may continue to allocate blocks. This may + * be set to 0 if no reserve of free blocks is deemed necessary, + * however throughput drops by fifty percent if the file system + * is run at between 95% and 100% full; thus the minimum default + * value of fs_minfree is 5%. However, to get good clustering + * performance, 10% is a better choice. hence we use 10% as our + * default value. With 10% free space, fragmentation is not a + * problem, so we choose to optimize for time. + */ +#define MINFREE 5 +#define DEFAULTOPT FS_OPTTIME + +/* + * Per cylinder group information; summarized in blocks allocated + * from first cylinder group data blocks. These blocks have to be + * read in from fs_csaddr (size fs_cssize) in addition to the + * super block. + * + * N.B. sizeof(struct csum) must be a power of two in order for + * the ``fs_cs'' macro to work (see below). + */ +struct csum { + int32_t cs_ndir; /* number of directories */ + int32_t cs_nbfree; /* number of free blocks */ + int32_t cs_nifree; /* number of free inodes */ + int32_t cs_nffree; /* number of free frags */ +}; + +/* + * Super block for an FFS file system. + */ +struct fs { + int32_t fs_firstfield; /* historic file system linked list, */ + int32_t fs_unused_1; /* used for incore super blocks */ + ufs_daddr_t fs_sblkno; /* addr of super-block in filesys */ + ufs_daddr_t fs_cblkno; /* offset of cyl-block in filesys */ + ufs_daddr_t fs_iblkno; /* offset of inode-blocks in filesys */ + ufs_daddr_t fs_dblkno; /* offset of first data after cg */ + int32_t fs_cgoffset; /* cylinder group offset in cylinder */ + int32_t fs_cgmask; /* used to calc mod fs_ntrak */ + time_t fs_time; /* last time written */ + int32_t fs_size; /* number of blocks in fs */ + int32_t fs_dsize; /* number of data blocks in fs */ + int32_t fs_ncg; /* number of cylinder groups */ + int32_t fs_bsize; /* size of basic blocks in fs */ + int32_t fs_fsize; /* size of frag blocks in fs */ + int32_t fs_frag; /* number of frags in a block in fs */ +/* these are configuration parameters */ + int32_t fs_minfree; /* minimum percentage of free blocks */ + int32_t fs_rotdelay; /* num of ms for optimal next block */ + int32_t fs_rps; /* disk revolutions per second */ +/* these fields can be computed from the others */ + int32_t fs_bmask; /* ``blkoff'' calc of blk offsets */ + int32_t fs_fmask; /* ``fragoff'' calc of frag offsets */ + int32_t fs_bshift; /* ``lblkno'' calc of logical blkno */ + int32_t fs_fshift; /* ``numfrags'' calc number of frags */ +/* these are configuration parameters */ + int32_t fs_maxcontig; /* max number of contiguous blks */ + int32_t fs_maxbpg; /* max number of blks per cyl group */ +/* these fields can be computed from the others */ + int32_t fs_fragshift; /* block to frag shift */ + int32_t fs_fsbtodb; /* fsbtodb and dbtofsb shift constant */ + int32_t fs_sbsize; /* actual size of super block */ + int32_t fs_csmask; /* csum block offset */ + int32_t fs_csshift; /* csum block number */ + int32_t fs_nindir; /* value of NINDIR */ + int32_t fs_inopb; /* value of INOPB */ + int32_t fs_nspf; /* value of NSPF */ +/* yet another configuration parameter */ + int32_t fs_optim; /* optimization preference, see below */ +/* these fields are derived from the hardware */ + int32_t fs_npsect; /* # sectors/track including spares */ + int32_t fs_interleave; /* hardware sector interleave */ + int32_t fs_trackskew; /* sector 0 skew, per track */ + int32_t fs_headswitch; /* head switch time, usec */ + int32_t fs_trkseek; /* track-to-track seek, usec */ +/* sizes determined by number of cylinder groups and their sizes */ + ufs_daddr_t fs_csaddr; /* blk addr of cyl grp summary area */ + int32_t fs_cssize; /* size of cyl grp summary area */ + int32_t fs_cgsize; /* cylinder group size */ +/* these fields are derived from the hardware */ + int32_t fs_ntrak; /* tracks per cylinder */ + int32_t fs_nsect; /* sectors per track */ + int32_t fs_spc; /* sectors per cylinder */ +/* this comes from the disk driver partitioning */ + int32_t fs_ncyl; /* cylinders in file system */ +/* these fields can be computed from the others */ + int32_t fs_cpg; /* cylinders per group */ + int32_t fs_ipg; /* inodes per group */ + int32_t fs_fpg; /* blocks per group * fs_frag */ +/* this data must be re-computed after crashes */ + struct csum fs_cstotal; /* cylinder summary information */ +/* these fields are cleared at mount time */ + int8_t fs_fmod; /* super block modified flag */ + int8_t fs_clean; /* file system is clean flag */ + int8_t fs_ronly; /* mounted read-only flag */ + int8_t fs_flags; /* currently unused flag */ + u_char fs_fsmnt[MAXMNTLEN]; /* name mounted on */ +/* these fields retain the current block allocation info */ + int32_t fs_cgrotor; /* last cg searched */ + struct csum *fs_csp[MAXCSBUFS];/* list of fs_cs info buffers */ + int32_t *fs_maxcluster; /* max cluster in each cyl group */ + int32_t fs_cpc; /* cyl per cycle in postbl */ + int16_t fs_opostbl[16][8]; /* old rotation block list head */ + int32_t fs_sparecon[50]; /* reserved for future constants */ + int32_t fs_contigsumsize; /* size of cluster summary array */ + int32_t fs_maxsymlinklen; /* max length of an internal symlink */ + int32_t fs_inodefmt; /* format of on-disk inodes */ + u_int64_t fs_maxfilesize; /* maximum representable file size */ + int64_t fs_qbmask; /* ~fs_bmask for use with 64-bit size */ + int64_t fs_qfmask; /* ~fs_fmask for use with 64-bit size */ + int32_t fs_state; /* validate fs_clean field */ + int32_t fs_postblformat; /* format of positional layout tables */ + int32_t fs_nrpos; /* number of rotational positions */ + int32_t fs_postbloff; /* (u_int16) rotation block list head */ + int32_t fs_rotbloff; /* (u_int8) blocks for each rotation */ + int32_t fs_magic; /* magic number */ + u_int8_t fs_space[1]; /* list of blocks for each rotation */ +/* actually longer */ +}; + +/* + * Filesystem identification + */ +#define FS_MAGIC 0x011954 /* the fast filesystem magic number */ +#define FS_OKAY 0x7c269d38 /* superblock checksum */ +#define FS_42INODEFMT -1 /* 4.2BSD inode format */ +#define FS_44INODEFMT 2 /* 4.4BSD inode format */ +/* + * Preference for optimization. + */ +#define FS_OPTTIME 0 /* minimize allocation time */ +#define FS_OPTSPACE 1 /* minimize disk fragmentation */ + +/* + * Rotational layout table format types + */ +#define FS_42POSTBLFMT -1 /* 4.2BSD rotational table format */ +#define FS_DYNAMICPOSTBLFMT 1 /* dynamic rotational table format */ +/* + * Macros for access to superblock array structures + */ +#define fs_postbl(fs, cylno) \ + (((fs)->fs_postblformat == FS_42POSTBLFMT) \ + ? ((fs)->fs_opostbl[cylno]) \ + : ((int16_t *)((u_int8_t *)(fs) + \ + (fs)->fs_postbloff) + (cylno) * (fs)->fs_nrpos)) +#define fs_rotbl(fs) \ + (((fs)->fs_postblformat == FS_42POSTBLFMT) \ + ? ((fs)->fs_space) \ + : ((u_int8_t *)((u_int8_t *)(fs) + (fs)->fs_rotbloff))) + +/* + * The size of a cylinder group is calculated by CGSIZE. The maximum size + * is limited by the fact that cylinder groups are at most one block. + * Its size is derived from the size of the maps maintained in the + * cylinder group and the (struct cg) size. + */ +#define CGSIZE(fs) \ + /* base cg */ (sizeof(struct cg) + sizeof(int32_t) + \ + /* blktot size */ (fs)->fs_cpg * sizeof(int32_t) + \ + /* blks size */ (fs)->fs_cpg * (fs)->fs_nrpos * sizeof(int16_t) + \ + /* inode map */ howmany((fs)->fs_ipg, NBBY) + \ + /* block map */ howmany((fs)->fs_cpg * (fs)->fs_spc / NSPF(fs), NBBY) +\ + /* if present */ ((fs)->fs_contigsumsize <= 0 ? 0 : \ + /* cluster sum */ (fs)->fs_contigsumsize * sizeof(int32_t) + \ + /* cluster map */ howmany((fs)->fs_cpg * (fs)->fs_spc / NSPB(fs), NBBY))) + +/* + * Convert cylinder group to base address of its global summary info. + * + * N.B. This macro assumes that sizeof(struct csum) is a power of two. + */ +#define fs_cs(fs, indx) \ + fs_csp[(indx) >> (fs)->fs_csshift][(indx) & ~(fs)->fs_csmask] + +/* + * Cylinder group block for a file system. + */ +#define CG_MAGIC 0x090255 +struct cg { + int32_t cg_firstfield; /* historic cyl groups linked list */ + int32_t cg_magic; /* magic number */ + time_t cg_time; /* time last written */ + int32_t cg_cgx; /* we are the cgx'th cylinder group */ + int16_t cg_ncyl; /* number of cyl's this cg */ + int16_t cg_niblk; /* number of inode blocks this cg */ + int32_t cg_ndblk; /* number of data blocks this cg */ + struct csum cg_cs; /* cylinder summary information */ + int32_t cg_rotor; /* position of last used block */ + int32_t cg_frotor; /* position of last used frag */ + int32_t cg_irotor; /* position of last used inode */ + int32_t cg_frsum[MAXFRAG]; /* counts of available frags */ + int32_t cg_btotoff; /* (int32) block totals per cylinder */ + int32_t cg_boff; /* (u_int16) free block positions */ + int32_t cg_iusedoff; /* (u_int8) used inode map */ + int32_t cg_freeoff; /* (u_int8) free block map */ + int32_t cg_nextfreeoff; /* (u_int8) next available space */ + int32_t cg_clustersumoff; /* (u_int32) counts of avail clusters */ + int32_t cg_clusteroff; /* (u_int8) free cluster map */ + int32_t cg_nclusterblks; /* number of clusters this cg */ + int32_t cg_sparecon[13]; /* reserved for future use */ + u_int8_t cg_space[1]; /* space for cylinder group maps */ +/* actually longer */ +}; + +/* + * Macros for access to cylinder group array structures + */ +#define cg_blktot(cgp) \ + (((cgp)->cg_magic != CG_MAGIC) \ + ? (((struct ocg *)(cgp))->cg_btot) \ + : ((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_btotoff))) +#define cg_blks(fs, cgp, cylno) \ + (((cgp)->cg_magic != CG_MAGIC) \ + ? (((struct ocg *)(cgp))->cg_b[cylno]) \ + : ((int16_t *)((u_int8_t *)(cgp) + \ + (cgp)->cg_boff) + (cylno) * (fs)->fs_nrpos)) +#define cg_inosused(cgp) \ + (((cgp)->cg_magic != CG_MAGIC) \ + ? (((struct ocg *)(cgp))->cg_iused) \ + : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_iusedoff))) +#define cg_blksfree(cgp) \ + (((cgp)->cg_magic != CG_MAGIC) \ + ? (((struct ocg *)(cgp))->cg_free) \ + : ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_freeoff))) +#define cg_chkmagic(cgp) \ + ((cgp)->cg_magic == CG_MAGIC || ((struct ocg *)(cgp))->cg_magic == CG_MAGIC) +#define cg_clustersfree(cgp) \ + ((u_int8_t *)((u_int8_t *)(cgp) + (cgp)->cg_clusteroff)) +#define cg_clustersum(cgp) \ + ((int32_t *)((u_int8_t *)(cgp) + (cgp)->cg_clustersumoff)) + +/* + * The following structure is defined + * for compatibility with old file systems. + */ +struct ocg { + int32_t cg_firstfield; /* historic linked list of cyl groups */ + int32_t cg_unused_1; /* used for incore cyl groups */ + time_t cg_time; /* time last written */ + int32_t cg_cgx; /* we are the cgx'th cylinder group */ + int16_t cg_ncyl; /* number of cyl's this cg */ + int16_t cg_niblk; /* number of inode blocks this cg */ + int32_t cg_ndblk; /* number of data blocks this cg */ + struct csum cg_cs; /* cylinder summary information */ + int32_t cg_rotor; /* position of last used block */ + int32_t cg_frotor; /* position of last used frag */ + int32_t cg_irotor; /* position of last used inode */ + int32_t cg_frsum[8]; /* counts of available frags */ + int32_t cg_btot[32]; /* block totals per cylinder */ + int16_t cg_b[32][8]; /* positions of free blocks */ + u_int8_t cg_iused[256]; /* used inode map */ + int32_t cg_magic; /* magic number */ + u_int8_t cg_free[1]; /* free block map */ +/* actually longer */ +}; + +/* + * Turn file system block numbers into disk block addresses. + * This maps file system blocks to device size blocks. + */ +#define fsbtodb(fs, b) ((b) << (fs)->fs_fsbtodb) +#define dbtofsb(fs, b) ((b) >> (fs)->fs_fsbtodb) + +/* + * Cylinder group macros to locate things in cylinder groups. + * They calc file system addresses of cylinder group data structures. + */ +#define cgbase(fs, c) ((ufs_daddr_t)((fs)->fs_fpg * (c))) +#define cgdmin(fs, c) (cgstart(fs, c) + (fs)->fs_dblkno) /* 1st data */ +#define cgimin(fs, c) (cgstart(fs, c) + (fs)->fs_iblkno) /* inode blk */ +#define cgsblock(fs, c) (cgstart(fs, c) + (fs)->fs_sblkno) /* super blk */ +#define cgtod(fs, c) (cgstart(fs, c) + (fs)->fs_cblkno) /* cg block */ +#define cgstart(fs, c) \ + (cgbase(fs, c) + (fs)->fs_cgoffset * ((c) & ~((fs)->fs_cgmask))) + +/* + * Macros for handling inode numbers: + * inode number to file system block offset. + * inode number to cylinder group number. + * inode number to file system block address. + */ +#define ino_to_cg(fs, x) ((x) / (fs)->fs_ipg) +#define ino_to_fsba(fs, x) \ + ((ufs_daddr_t)(cgimin(fs, ino_to_cg(fs, x)) + \ + (blkstofrags((fs), (((x) % (fs)->fs_ipg) / INOPB(fs)))))) +#define ino_to_fsbo(fs, x) ((x) % INOPB(fs)) + +/* + * Give cylinder group number for a file system block. + * Give cylinder group block number for a file system block. + */ +#define dtog(fs, d) ((d) / (fs)->fs_fpg) +#define dtogd(fs, d) ((d) % (fs)->fs_fpg) + +/* + * Extract the bits for a block from a map. + * Compute the cylinder and rotational position of a cyl block addr. + */ +#define blkmap(fs, map, loc) \ + (((map)[(loc) / NBBY] >> ((loc) % NBBY)) & (0xff >> (NBBY - (fs)->fs_frag))) +#define cbtocylno(fs, bno) \ + ((bno) * NSPF(fs) / (fs)->fs_spc) +#define cbtorpos(fs, bno) \ + (((bno) * NSPF(fs) % (fs)->fs_spc / (fs)->fs_nsect * (fs)->fs_trackskew + \ + (bno) * NSPF(fs) % (fs)->fs_spc % (fs)->fs_nsect * (fs)->fs_interleave) % \ + (fs)->fs_nsect * (fs)->fs_nrpos / (fs)->fs_npsect) + +/* + * The following macros optimize certain frequently calculated + * quantities by using shifts and masks in place of divisions + * modulos and multiplications. + */ +#define blkoff(fs, loc) /* calculates (loc % fs->fs_bsize) */ \ + ((loc) & (fs)->fs_qbmask) +#define fragoff(fs, loc) /* calculates (loc % fs->fs_fsize) */ \ + ((loc) & (fs)->fs_qfmask) +#define lblktosize(fs, blk) /* calculates (blk * fs->fs_bsize) */ \ + ((blk) << (fs)->fs_bshift) +#define lblkno(fs, loc) /* calculates (loc / fs->fs_bsize) */ \ + ((loc) >> (fs)->fs_bshift) +#define numfrags(fs, loc) /* calculates (loc / fs->fs_fsize) */ \ + ((loc) >> (fs)->fs_fshift) +#define blkroundup(fs, size) /* calculates roundup(size, fs->fs_bsize) */ \ + (((size) + (fs)->fs_qbmask) & (fs)->fs_bmask) +#define fragroundup(fs, size) /* calculates roundup(size, fs->fs_fsize) */ \ + (((size) + (fs)->fs_qfmask) & (fs)->fs_fmask) +#define fragstoblks(fs, frags) /* calculates (frags / fs->fs_frag) */ \ + ((frags) >> (fs)->fs_fragshift) +#define blkstofrags(fs, blks) /* calculates (blks * fs->fs_frag) */ \ + ((blks) << (fs)->fs_fragshift) +#define fragnum(fs, fsb) /* calculates (fsb % fs->fs_frag) */ \ + ((fsb) & ((fs)->fs_frag - 1)) +#define blknum(fs, fsb) /* calculates rounddown(fsb, fs->fs_frag) */ \ + ((fsb) &~ ((fs)->fs_frag - 1)) + +/* + * Determine the number of available frags given a + * percentage to hold in reserve. + */ +#define freespace(fs, percentreserved) \ + (blkstofrags((fs), (fs)->fs_cstotal.cs_nbfree) + \ + (fs)->fs_cstotal.cs_nffree - ((fs)->fs_dsize * (percentreserved) / 100)) + +/* + * Determining the size of a file block in the file system. + */ +#define blksize(fs, ip, lbn) \ + (((lbn) >= NDADDR || (ip)->i_size >= ((lbn) + 1) << (fs)->fs_bshift) \ + ? (fs)->fs_bsize \ + : (fragroundup(fs, blkoff(fs, (ip)->i_size)))) +#define dblksize(fs, dip, lbn) \ + (((lbn) >= NDADDR || (dip)->di_size >= ((lbn) + 1) << (fs)->fs_bshift) \ + ? (fs)->fs_bsize \ + : (fragroundup(fs, blkoff(fs, (dip)->di_size)))) + +/* + * Number of disk sectors per block/fragment; assumes DEV_BSIZE byte + * sector size. + */ +#define NSPB(fs) ((fs)->fs_nspf << (fs)->fs_fragshift) +#define NSPF(fs) ((fs)->fs_nspf) + +/* + * Number of inodes in a secondary storage block/fragment. + */ +#define INOPB(fs) ((fs)->fs_inopb) +#define INOPF(fs) ((fs)->fs_inopb >> (fs)->fs_fragshift) + +/* + * Number of indirects in a file system block. + */ +#define NINDIR(fs) ((fs)->fs_nindir) + +/* + * This macro controls whether the file system format is byte swapped or not. + * At NeXT, all little endian machines read and write big endian file systems. + */ +#define BIG_ENDIAN_FS (__LITTLE_ENDIAN__) + +extern int inside[], around[]; +extern u_char *fragtbl[]; + + +/* + * UFS Label: + * The purpose of this label is to name a UFS/FFS filesystem. The label + * is located at offset 7K (BBSIZE=8K - UFS_LABEL_SIZE=1K = 7K) of the + * partition. The first 7K is still available for boot blocks. + */ + +#define UFS_LABEL_MAGIC { 'L', 'A', 'B', 'L' } +#define UFS_LABEL_SIZE 1024 +#define UFS_LABEL_OFFSET (BBSIZE - UFS_LABEL_SIZE) /* top 1K */ +#define UFS_LABEL_VERSION 1 +#define UFS_MAX_LABEL_NAME 512 + +struct ufslabel { + u_int32_t ul_magic; + u_int16_t ul_checksum; /* checksum over entire label*/ + u_int32_t ul_version; /* label version */ + u_int32_t ul_time; /* creation time */ + u_int16_t ul_namelen; /* filesystem name length */ + u_char ul_name[UFS_MAX_LABEL_NAME]; /* filesystem name */ + u_char ul_reserved[32];/* reserved for future use */ + u_char ul_unused[460]; /* pad out to 1K */ +}; + +#endif /* ! _FFS_FS_H_ */ diff --git a/bsd/ufs/mfs/mfs_extern.h b/bsd/ufs/mfs/mfs_extern.h new file mode 100644 index 000000000..b8d270fb8 --- /dev/null +++ b/bsd/ufs/mfs/mfs_extern.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mfs_extern.h 8.2 (Berkeley) 6/16/94 + */ + +struct buf; +struct mount; +struct nameidata; +struct proc; +struct statfs; +struct ucred; +struct vnode; + +__BEGIN_DECLS +int mfs_badop __P((void)); +int mfs_bmap __P((struct vop_bmap_args *)); +int mfs_close __P((struct vop_close_args *)); +void mfs_doio __P((struct buf *bp, caddr_t base)); +int mfs_inactive __P((struct vop_inactive_args *)); /* XXX */ +int mfs_reclaim __P((struct vop_reclaim_args *)); +int mfs_init __P((void)); +int mfs_ioctl __P((struct vop_ioctl_args *)); +int mfs_mount __P((struct mount *mp, + char *path, caddr_t data, struct nameidata *ndp, struct proc *p)); +int mfs_open __P((struct vop_open_args *)); +int mfs_print __P((struct vop_print_args *)); /* XXX */ +int mfs_start __P((struct mount *mp, int flags, struct proc *p)); +int mfs_statfs __P((struct mount *mp, struct statfs *sbp, struct proc *p)); +int mfs_strategy __P((struct vop_strategy_args *)); /* XXX */ +__END_DECLS diff --git a/bsd/ufs/mfs/mfs_vfsops.c b/bsd/ufs/mfs/mfs_vfsops.c new file mode 100644 index 000000000..1acebe008 --- /dev/null +++ b/bsd/ufs/mfs/mfs_vfsops.c @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1990, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mfs_vfsops.c 8.4 (Berkeley) 4/16/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include + +caddr_t mfs_rootbase; /* address of mini-root in kernel virtual memory */ +u_long mfs_rootsize; /* size of mini-root in bytes */ + +static int mfs_minor; /* used for building internal dev_t */ + +extern int (**mfs_vnodeop_p)(void *); + +/* + * mfs vfs operations. + */ +struct vfsops mfs_vfsops = { + MOUNT_MFS, + mfs_mount, + mfs_start, + ffs_unmount, + ufs_root, + ufs_quotactl, + mfs_statfs, + ffs_sync, + ffs_vget, + ffs_fhtovp, + ffs_vptofh, + mfs_init, +}; + +/* + * Called by main() when mfs is going to be mounted as root. + * + * Name is updated by mount(8) after booting. + */ +#define ROOTNAME "mfs_root" + +mfs_mountroot() +{ + extern struct vnode *rootvp; + register struct fs *fs; + register struct mount *mp; + struct proc *p = kernel_proc; /* XXX - WMG*/ + struct ufsmount *ump; + struct mfsnode *mfsp; + size_t size; + int error; + + /* + * Get vnodes for swapdev and rootdev. + */ +#if 0 + if (bdevvp(swapdev, &swapdev_vp) || bdevvp(rootdev, &rootvp)) + panic("mfs_mountroot: can't setup bdevvp's"); +#else + if ( bdevvp(rootdev, &rootvp)) + panic("mfs_mountroot: can't setup bdevvp's"); + +#endif + MALLOC_ZONE(mp, struct mount *, + sizeof(struct mount), M_MOUNT, M_WAITOK); + bzero((char *)mp, (u_long)sizeof(struct mount)); + mp->mnt_op = &mfs_vfsops; + mp->mnt_flag = MNT_RDONLY; + MALLOC(mfsp, struct mfsnode *, sizeof(struct mfsnode), M_MFSNODE, M_WAITOK); + rootvp->v_data = mfsp; + rootvp->v_op = mfs_vnodeop_p; + rootvp->v_tag = VT_MFS; + mfsp->mfs_baseoff = mfs_rootbase; + mfsp->mfs_size = mfs_rootsize; + mfsp->mfs_vnode = rootvp; + mfsp->mfs_pid = p->p_pid; + mfsp->mfs_buflist = (struct buf *)0; + if (error = ffs_mountfs(rootvp, mp, p)) { + _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + _FREE(mfsp, M_MFSNODE); + return (error); + } + if (error = vfs_lock(mp)) { + (void)ffs_unmount(mp, 0, p); + _FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + _FREE(mfsp, M_MFSNODE); + return (error); + } + CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); + mp->mnt_vnodecovered = NULLVP; + ump = VFSTOUFS(mp); + fs = ump->um_fs; + bzero(fs->fs_fsmnt, sizeof(fs->fs_fsmnt)); + fs->fs_fsmnt[0] = '/'; + bcopy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MNAMELEN); + (void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); + (void)ffs_statfs(mp, &mp->mnt_stat, p); + vfs_unlock(mp); + inittodr((time_t)0); + return (0); +} + +/* + * This is called early in boot to set the base address and size + * of the mini-root. + */ +mfs_initminiroot(base) + caddr_t base; +{ + struct fs *fs = (struct fs *)(base + SBOFF); + extern int (*mountroot)(); + + /* check for valid super block */ + if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE || + fs->fs_bsize < sizeof(struct fs)) + return (0); + mountroot = mfs_mountroot; + mfs_rootbase = base; + mfs_rootsize = fs->fs_fsize * fs->fs_size; + rootdev = makedev(255, mfs_minor++); + return (mfs_rootsize); +} + +/* + * VFS Operations. + * + * mount system call + */ +/* ARGSUSED */ +int +mfs_mount(mp, path, data, ndp, p) + register struct mount *mp; + char *path; + caddr_t data; + struct nameidata *ndp; + struct proc *p; +{ + struct vnode *devvp; + struct mfs_args args; + struct ufsmount *ump; + register struct fs *fs; + register struct mfsnode *mfsp; + size_t size; + int flags, error; + + if (error = copyin(data, (caddr_t)&args, sizeof (struct mfs_args))) + return (error); + + /* + * If updating, check whether changing from read-only to + * read/write; if there is no device name, that's all we do. + */ + if (mp->mnt_flag & MNT_UPDATE) { + ump = VFSTOUFS(mp); + fs = ump->um_fs; + if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) { + flags = WRITECLOSE; + if (mp->mnt_flag & MNT_FORCE) + flags |= FORCECLOSE; + if (vfs_busy(mp)) + return (EBUSY); + error = ffs_flushfiles(mp, flags, p); + vfs_unbusy(mp); + if (error) + return (error); + } + if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) + fs->fs_ronly = 0; +#ifdef EXPORTMFS + if (args.fspec == 0) + return (vfs_export(mp, &ump->um_export, &args.export)); +#endif + return (0); + } + MALLOC(mfsp, struct mfsnode *, sizeof(struct mfsnode), M_MFSNODE, M_WAITOK); + error = getnewvnode(VT_MFS, (struct mount *)0, mfs_vnodeop_p, &devvp); + if (error) { + FREE(mfsp, M_MFSNODE); + return (error); + } + devvp->v_type = VBLK; + if (checkalias(devvp, makedev(255, mfs_minor++), (struct mount *)0)) + panic("mfs_mount: dup dev"); + devvp->v_data = mfsp; + mfsp->mfs_baseoff = args.base; + mfsp->mfs_size = args.size; + mfsp->mfs_vnode = devvp; + mfsp->mfs_pid = p->p_pid; + mfsp->mfs_buflist = (struct buf *)0; + if (error = ffs_mountfs(devvp, mp, p)) { + mfsp->mfs_buflist = (struct buf *)-1; + vrele(devvp); + return (error); + } + ump = VFSTOUFS(mp); + fs = ump->um_fs; + (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size); + bzero(fs->fs_fsmnt + size, sizeof(fs->fs_fsmnt) - size); + bcopy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MNAMELEN); + (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, + &size); + bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size); + return (0); +} + +int mfs_pri = PWAIT | PCATCH; /* XXX prob. temp */ + +/* + * Used to grab the process and keep it in the kernel to service + * memory filesystem I/O requests. + * + * Loop servicing I/O requests. + * Copy the requested data into or out of the memory filesystem + * address space. + */ +/* ARGSUSED */ +int +mfs_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + register struct vnode *vp = VFSTOUFS(mp)->um_devvp; + register struct mfsnode *mfsp = VTOMFS(vp); + register struct buf *bp; + register caddr_t base; + int error = 0; + + base = mfsp->mfs_baseoff; + while (mfsp->mfs_buflist != (struct buf *)(-1)) { + while (bp = mfsp->mfs_buflist) { + mfsp->mfs_buflist = bp->b_actf; + mfs_doio(bp, base); + wakeup((caddr_t)bp); + } + /* + * If a non-ignored signal is received, try to unmount. + * If that fails, clear the signal (it has been "processed"), + * otherwise we will loop here, as tsleep will always return + * EINTR/ERESTART. + */ + if (error = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0)) + if (dounmount(mp, 0, p) != 0) + CLRSIG(p, CURSIG(p)); + } + return (error); +} + +/* + * Get file system statistics. + */ +mfs_statfs(mp, sbp, p) + struct mount *mp; + struct statfs *sbp; + struct proc *p; +{ + int error; + + error = ffs_statfs(mp, sbp, p); +#ifdef COMPAT_09 + sbp->f_type = 3; +#else + sbp->f_type = 0; +#endif + strncpy(&sbp->f_fstypename[0], mp->mnt_op->vfs_name, MFSNAMELEN); + sbp->f_fstypename[MFSNAMELEN] = '\0'; + return (error); +} diff --git a/bsd/ufs/mfs/mfs_vnops.c b/bsd/ufs/mfs/mfs_vnops.c new file mode 100644 index 000000000..e1e11248a --- /dev/null +++ b/bsd/ufs/mfs/mfs_vnops.c @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* $NetBSD: mfs_vnops.c,v 1.5 1994/12/14 13:03:52 mycroft Exp $ */ + +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mfs_vnops.c 8.5 (Berkeley) 7/28/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +/* + * mfs vnode operations. + */ + +#define VOPFUNC int (*)(void *) + +int (**mfs_vnodeop_p)(void *); +struct vnodeopv_entry_desc mfs_vnodeop_entries[] = { + { &vop_default_desc, (VOPFUNC)vn_default_error }, + { &vop_lookup_desc, (VOPFUNC)mfs_lookup }, /* lookup */ + { &vop_create_desc, (VOPFUNC)mfs_create }, /* create */ + { &vop_mknod_desc, (VOPFUNC)mfs_mknod }, /* mknod */ + { &vop_open_desc, (VOPFUNC)mfs_open }, /* open */ + { &vop_close_desc, (VOPFUNC)mfs_close }, /* close */ + { &vop_access_desc, (VOPFUNC)mfs_access }, /* access */ + { &vop_getattr_desc, (VOPFUNC)mfs_getattr }, /* getattr */ + { &vop_setattr_desc, (VOPFUNC)mfs_setattr }, /* setattr */ + { &vop_read_desc, (VOPFUNC)mfs_read }, /* read */ + { &vop_write_desc, (VOPFUNC)mfs_write }, /* write */ + { &vop_ioctl_desc, (VOPFUNC)mfs_ioctl }, /* ioctl */ + { &vop_select_desc, (VOPFUNC)mfs_select }, /* select */ + { &vop_mmap_desc, (VOPFUNC)mfs_mmap }, /* mmap */ + { &vop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ + { &vop_seek_desc, (VOPFUNC)mfs_seek }, /* seek */ + { &vop_remove_desc, (VOPFUNC)mfs_remove }, /* remove */ + { &vop_link_desc, (VOPFUNC)mfs_link }, /* link */ + { &vop_rename_desc, (VOPFUNC)mfs_rename }, /* rename */ + { &vop_mkdir_desc, (VOPFUNC)mfs_mkdir }, /* mkdir */ + { &vop_rmdir_desc, (VOPFUNC)mfs_rmdir }, /* rmdir */ + { &vop_symlink_desc, (VOPFUNC)mfs_symlink }, /* symlink */ + { &vop_readdir_desc, (VOPFUNC)mfs_readdir }, /* readdir */ + { &vop_readlink_desc, (VOPFUNC)mfs_readlink }, /* readlink */ + { &vop_abortop_desc, (VOPFUNC)mfs_abortop }, /* abortop */ + { &vop_inactive_desc, (VOPFUNC)mfs_inactive }, /* inactive */ + { &vop_reclaim_desc, (VOPFUNC)mfs_reclaim }, /* reclaim */ + { &vop_lock_desc, (VOPFUNC)mfs_lock }, /* lock */ + { &vop_unlock_desc, (VOPFUNC)mfs_unlock }, /* unlock */ + { &vop_bmap_desc, (VOPFUNC)mfs_bmap }, /* bmap */ + { &vop_strategy_desc, (VOPFUNC)mfs_strategy }, /* strategy */ + { &vop_print_desc, (VOPFUNC)mfs_print }, /* print */ + { &vop_islocked_desc, (VOPFUNC)mfs_islocked }, /* islocked */ + { &vop_pathconf_desc, (VOPFUNC)mfs_pathconf }, /* pathconf */ + { &vop_advlock_desc, (VOPFUNC)mfs_advlock }, /* advlock */ + { &vop_blkatoff_desc, (VOPFUNC)mfs_blkatoff }, /* blkatoff */ + { &vop_valloc_desc, (VOPFUNC)mfs_valloc }, /* valloc */ + { &vop_vfree_desc, (VOPFUNC)mfs_vfree }, /* vfree */ + { &vop_truncate_desc, (VOPFUNC)mfs_truncate }, /* truncate */ + { &vop_update_desc, (VOPFUNC)mfs_update }, /* update */ + { &vop_bwrite_desc, (VOPFUNC)mfs_bwrite }, /* bwrite */ + { &vop_pgrd_desc, (VOPFUNC)mfs_pgrg }, /* pager read */ + { &vop_pgwr_desc, (VOPFUNC)mfs_pgwr }, /* pager write */ + { (struct vnodeop_desc*)NULL, (int(*)())NULL } +}; +struct vnodeopv_desc mfs_vnodeop_opv_desc = + { &mfs_vnodeop_p, mfs_vnodeop_entries }; + +/* + * Vnode Operations. + * + * Open called to allow memory filesystem to initialize and + * validate before actual IO. Record our process identifier + * so we can tell when we are doing I/O to ourself. + */ +/* ARGSUSED */ +int +mfs_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + if (ap->a_vp->v_type != VBLK) { + panic("mfs_ioctl not VBLK"); + /* NOTREACHED */ + } + return (0); +} + +/* + * Ioctl operation. + */ +/* ARGSUSED */ +int +mfs_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + u_long a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + return (ENOTTY); +} + +/* + * Pass I/O requests to the memory filesystem process. + */ +int +mfs_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + register struct buf *bp = ap->a_bp; + register struct mfsnode *mfsp; + struct vnode *vp; + struct proc *p = curproc; /* XXX */ + + if (!vfinddev(bp->b_dev, VBLK, &vp) || vp->v_usecount == 0) + panic("mfs_strategy: bad dev"); + mfsp = VTOMFS(vp); + /* check for mini-root access */ + if (mfsp->mfs_pid == 0) { + caddr_t base; + + base = mfsp->mfs_baseoff + (bp->b_blkno << DEV_BSHIFT); + if (bp->b_flags & B_READ) + bcopy(base, bp->b_data, bp->b_bcount); + else + bcopy(bp->b_data, base, bp->b_bcount); + biodone(bp); + } else if (mfsp->mfs_pid == p->p_pid) { + mfs_doio(bp, mfsp->mfs_baseoff); + } else { + bp->b_actf = mfsp->mfs_buflist; + mfsp->mfs_buflist = bp; + wakeup((caddr_t)vp); + } + return (0); +} + +/* + * Memory file system I/O. + * + * Trivial on the HP since buffer has already been mapping into KVA space. + */ +void +mfs_doio(bp, base) + register struct buf *bp; + caddr_t base; +{ + + base += (bp->b_blkno << DEV_BSHIFT); + if (bp->b_flags & B_READ) + bp->b_error = copyin(base, bp->b_data, bp->b_bcount); + else + bp->b_error = copyout(bp->b_data, base, bp->b_bcount); + if (bp->b_error) + bp->b_flags |= B_ERROR; + biodone(bp); +} + +/* + * This is a noop, simply returning what one has been given. + */ +int +mfs_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + daddr_t a_bn; + struct vnode **a_vpp; + daddr_t *a_bnp; + int *a_runp; + } */ *ap; +{ + + if (ap->a_vpp != NULL) + *ap->a_vpp = ap->a_vp; + if (ap->a_bnp != NULL) + *ap->a_bnp = ap->a_bn; + return (0); +} + +/* + * Memory filesystem close routine + */ +/* ARGSUSED */ +int +mfs_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct mfsnode *mfsp = VTOMFS(vp); + register struct buf *bp; + int error; + + /* + * Finish any pending I/O requests. + */ + while (bp = mfsp->mfs_buflist) { + mfsp->mfs_buflist = bp->b_actf; + mfs_doio(bp, mfsp->mfs_baseoff); + wakeup((caddr_t)bp); + } + /* + * On last close of a memory filesystem + * we must invalidate any in core blocks, so that + * we can, free up its vnode. + */ + if (error = vinvalbuf(vp, 1, ap->a_cred, ap->a_p, 0, 0)) + return (error); + /* + * There should be no way to have any more uses of this + * vnode, so if we find any other uses, it is a panic. + */ + if (vp->v_usecount > 1) + printf("mfs_close: ref count %d > 1\n", vp->v_usecount); + if (vp->v_usecount > 1 || mfsp->mfs_buflist) + panic("mfs_close"); + /* + * Send a request to the filesystem server to exit. + */ + mfsp->mfs_buflist = (struct buf *)(-1); + wakeup((caddr_t)vp); + return (0); +} + +/* + * Memory filesystem inactive routine + */ +/* ARGSUSED */ +int +mfs_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct mfsnode *mfsp = VTOMFS(ap->a_vp); + + if (mfsp->mfs_buflist && mfsp->mfs_buflist != (struct buf *)(-1)) + panic("mfs_inactive: not inactive (mfs_buflist %x)", + mfsp->mfs_buflist); + return (0); +} + +/* + * Reclaim a memory filesystem devvp so that it can be reused. + */ +int +mfs_reclaim(ap) + struct vop_reclaim_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + + FREE(vp->v_data, M_MFSNODE); + vp->v_data = NULL; + return (0); +} + +/* + * Print out the contents of an mfsnode. + */ +int +mfs_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct mfsnode *mfsp = VTOMFS(ap->a_vp); + + printf("tag VT_MFS, pid %d, base %d, size %d\n", mfsp->mfs_pid, + mfsp->mfs_baseoff, mfsp->mfs_size); + return (0); +} + +/* + * Block device bad operation + */ +int +mfs_badop() +{ + + panic("mfs_badop called\n"); + /* NOTREACHED */ +} + +/* + * Memory based filesystem initialization. + */ +mfs_init() +{ + +} diff --git a/bsd/ufs/mfs/mfsiom.h b/bsd/ufs/mfs/mfsiom.h new file mode 100644 index 000000000..aaac06b5c --- /dev/null +++ b/bsd/ufs/mfs/mfsiom.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mfsiom.h 8.1 (Berkeley) 6/11/93 + */ + +#define MFS_MAPREG (MAXPHYS/NBPG + 2) /* Kernel mapping pte's */ +#define MFS_MAPSIZE 10 /* Size of alloc map for pte's */ diff --git a/bsd/ufs/mfs/mfsnode.h b/bsd/ufs/mfs/mfsnode.h new file mode 100644 index 000000000..309c9bf15 --- /dev/null +++ b/bsd/ufs/mfs/mfsnode.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)mfsnode.h 8.2 (Berkeley) 8/11/93 + */ + +/* + * This structure defines the control data for the memory based file system. + */ + +struct mfsnode { + struct vnode *mfs_vnode; /* vnode associated with this mfsnode */ + caddr_t mfs_baseoff; /* base of file system in memory */ + long mfs_size; /* size of memory file system */ + pid_t mfs_pid; /* supporting process pid */ + struct buf *mfs_buflist; /* list of I/O requests */ + long mfs_spare[4]; +}; + +/* + * Convert between mfsnode pointers and vnode pointers + */ +#define VTOMFS(vp) ((struct mfsnode *)(vp)->v_data) +#define MFSTOV(mfsp) ((mfsp)->mfs_vnode) + +/* Prototypes for MFS operations on vnodes. */ +#define mfs_lookup ((int (*) __P((struct vop_lookup_args *)))mfs_badop) +#define mfs_create ((int (*) __P((struct vop_create_args *)))mfs_badop) +#define mfs_mknod ((int (*) __P((struct vop_mknod_args *)))mfs_badop) +#define mfs_access ((int (*) __P((struct vop_access_args *)))mfs_badop) +#define mfs_getattr ((int (*) __P((struct vop_getattr_args *)))mfs_badop) +#define mfs_setattr ((int (*) __P((struct vop_setattr_args *)))mfs_badop) +#define mfs_read ((int (*) __P((struct vop_read_args *)))mfs_badop) +#define mfs_write ((int (*) __P((struct vop_write_args *)))mfs_badop) +#define mfs_select ((int (*) __P((struct vop_select_args *)))mfs_badop) +#define mfs_mmap ((int (*) __P((struct vop_mmap_args *)))mfs_badop) +#define mfs_seek ((int (*) __P((struct vop_seek_args *)))mfs_badop) +#define mfs_remove ((int (*) __P((struct vop_remove_args *)))mfs_badop) +#define mfs_link ((int (*) __P((struct vop_link_args *)))mfs_badop) +#define mfs_rename ((int (*) __P((struct vop_rename_args *)))mfs_badop) +#define mfs_mkdir ((int (*) __P((struct vop_mkdir_args *)))mfs_badop) +#define mfs_rmdir ((int (*) __P((struct vop_rmdir_args *)))mfs_badop) +#define mfs_symlink ((int (*) __P((struct vop_symlink_args *)))mfs_badop) +#define mfs_readdir ((int (*) __P((struct vop_readdir_args *)))mfs_badop) +#define mfs_readlink ((int (*) __P((struct vop_readlink_args *)))mfs_badop) +#define mfs_abortop ((int (*) __P((struct vop_abortop_args *)))mfs_badop) +#define mfs_lock ((int (*) __P((struct vop_lock_args *)))nullop) +#define mfs_unlock ((int (*) __P((struct vop_unlock_args *)))nullop) +#define mfs_islocked ((int (*) __P((struct vop_islocked_args *)))nullop) +#define mfs_pathconf ((int (*) __P((struct vop_pathconf_args *)))mfs_badop) +#define mfs_advlock ((int (*) __P((struct vop_advlock_args *)))mfs_badop) +#define mfs_blkatoff ((int (*) __P((struct vop_blkatoff_args *)))mfs_badop) +#define mfs_valloc ((int (*) __P((struct vop_valloc_args *)))mfs_badop) +#define mfs_vfree ((int (*) __P((struct vop_vfree_args *)))mfs_badop) +#define mfs_truncate ((int (*) __P((struct vop_truncate_args *)))mfs_badop) +#define mfs_update ((int (*) __P((struct vop_update_args *)))mfs_badop) +#define mfs_bwrite ((int (*) __P((struct vop_bwrite_args *)))vn_bwrite) diff --git a/bsd/ufs/ufs/Makefile b/bsd/ufs/ufs/Makefile new file mode 100644 index 000000000..5717ecbea --- /dev/null +++ b/bsd/ufs/ufs/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + dinode.h dir.h inode.h lockf.h quota.h ufs_extern.h ufsmount.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = ufs/ufs + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = ufs/ufs + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/ufs/ufs/dinode.h b/bsd/ufs/ufs/dinode.h new file mode 100644 index 000000000..c28034c48 --- /dev/null +++ b/bsd/ufs/ufs/dinode.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dinode.h 8.9 (Berkeley) 3/29/95 + */ +#ifndef _UFS_DINODE_H_ +#define _UFS_DINODE_H_ + + +/* + * The root inode is the root of the file system. Inode 0 can't be used for + * normal purposes and historically bad blocks were linked to inode 1, thus + * the root inode is 2. (Inode 1 is no longer used for this purpose, however + * numerous dump tapes make this assumption, so we are stuck with it). + */ +#define ROOTINO ((ino_t)2) + +/* + * The Whiteout inode# is a dummy non-zero inode number which will + * never be allocated to a real file. It is used as a place holder + * in the directory entry which has been tagged as a DT_W entry. + * See the comments about ROOTINO above. + */ +#define WINO ((ino_t)1) + +/* + * A dinode contains all the meta-data associated with a UFS file. + * This structure defines the on-disk format of a dinode. Since + * this structure describes an on-disk structure, all its fields + * are defined by types with precise widths. + */ + +#define NDADDR 12 /* Direct addresses in inode. */ +#define NIADDR 3 /* Indirect addresses in inode. */ + +typedef int32_t ufs_daddr_t; + +struct dinode { + u_int16_t di_mode; /* 0: IFMT, permissions; see below. */ + int16_t di_nlink; /* 2: File link count. */ + union { + u_int16_t oldids[2]; /* 4: Ffs: old user and group ids. */ + int32_t inumber; /* 4: Lfs: inode number. */ + } di_u; + u_int64_t di_size; /* 8: File byte count. */ + int32_t di_atime; /* 16: Last access time. */ + int32_t di_atimensec; /* 20: Last access time. */ + int32_t di_mtime; /* 24: Last modified time. */ + int32_t di_mtimensec; /* 28: Last modified time. */ + int32_t di_ctime; /* 32: Last inode change time. */ + int32_t di_ctimensec; /* 36: Last inode change time. */ + ufs_daddr_t di_db[NDADDR]; /* 40: Direct disk blocks. */ + ufs_daddr_t di_ib[NIADDR]; /* 88: Indirect disk blocks. */ + u_int32_t di_flags; /* 100: Status flags (chflags). */ + u_int32_t di_blocks; /* 104: Blocks actually held. */ + int32_t di_gen; /* 108: Generation number. */ + u_int32_t di_uid; /* 112: File owner. */ + u_int32_t di_gid; /* 116: File group. */ + int32_t di_spare[2]; /* 120: Reserved; currently unused */ +}; + +/* + * The di_db fields may be overlaid with other information for + * file types that do not have associated disk storage. Block + * and character devices overlay the first data block with their + * dev_t value. Short symbolic links place their path in the + * di_db area. + */ +#define di_inumber di_u.inumber +#define di_ogid di_u.oldids[1] +#define di_ouid di_u.oldids[0] +#define di_rdev di_db[0] +#define di_shortlink di_db +#define MAXSYMLINKLEN ((NDADDR + NIADDR) * sizeof(ufs_daddr_t)) + +/* File permissions. */ +#define IEXEC 0000100 /* Executable. */ +#define IWRITE 0000200 /* Writeable. */ +#define IREAD 0000400 /* Readable. */ +#define ISVTX 0001000 /* Sticky bit. */ +#define ISGID 0002000 /* Set-gid. */ +#define ISUID 0004000 /* Set-uid. */ + +/* File types. */ +#define IFMT 0170000 /* Mask of file type. */ +#define IFIFO 0010000 /* Named pipe (fifo). */ +#define IFCHR 0020000 /* Character device. */ +#define IFDIR 0040000 /* Directory file. */ +#define IFBLK 0060000 /* Block device. */ +#define IFREG 0100000 /* Regular file. */ +#define IFLNK 0120000 /* Symbolic link. */ +#define IFSOCK 0140000 /* UNIX domain socket. */ +#define IFWHT 0160000 /* Whiteout. */ + +#endif /* ! _UFS_DINODE_H_ */ diff --git a/bsd/ufs/ufs/dir.h b/bsd/ufs/ufs/dir.h new file mode 100644 index 000000000..2b8527ddc --- /dev/null +++ b/bsd/ufs/ufs/dir.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)dir.h 8.5 (Berkeley) 4/27/95 + */ + +#ifndef _DIR_H_ +#define _DIR_H_ + +/* + * Theoretically, directories can be more than 2Gb in length, however, in + * practice this seems unlikely. So, we define the type doff_t as a 32-bit + * quantity to keep down the cost of doing lookup on a 32-bit machine. + */ +#define doff_t int32_t +#define MAXDIRSIZE (0x7fffffff) + +/* + * A directory consists of some number of blocks of DIRBLKSIZ + * bytes, where DIRBLKSIZ is chosen such that it can be transferred + * to disk in a single atomic operation (e.g. 512 bytes on most machines). + * + * Each DIRBLKSIZ byte block contains some number of directory entry + * structures, which are of variable length. Each directory entry has + * a struct direct at the front of it, containing its inode number, + * the length of the entry, and the length of the name contained in + * the entry. These are followed by the name padded to a 4 byte boundary + * with null bytes. All names are guaranteed null terminated. + * The maximum length of a name in a directory is MAXNAMLEN. + * + * The macro DIRSIZ(fmt, dp) gives the amount of space required to represent + * a directory entry. Free space in a directory is represented by + * entries which have dp->d_reclen > DIRSIZ(fmt, dp). All DIRBLKSIZ bytes + * in a directory block are claimed by the directory entries. This + * usually results in the last entry in a directory having a large + * dp->d_reclen. When entries are deleted from a directory, the + * space is returned to the previous entry in the same directory + * block by increasing its dp->d_reclen. If the first entry of + * a directory block is free, then its dp->d_ino is set to 0. + * Entries other than the first in a directory do not normally have + * dp->d_ino set to 0. + */ +#ifdef __APPLE__ +#define DIRBLKSIZ 1024 +#else +#define DIRBLKSIZ DEV_BSIZE +#endif +#define MAXNAMLEN 255 + +struct direct { + u_int32_t d_ino; /* inode number of entry */ + u_int16_t d_reclen; /* length of this record */ + u_int8_t d_type; /* file type, see below */ + u_int8_t d_namlen; /* length of string in d_name */ + char d_name[MAXNAMLEN + 1];/* name with length <= MAXNAMLEN */ +}; + +/* + * File types + */ +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 + +/* + * Convert between stat structure types and directory types. + */ +#define IFTODT(mode) (((mode) & 0170000) >> 12) +#define DTTOIF(dirtype) ((dirtype) << 12) + +/* + * The DIRSIZ macro gives the minimum record length which will hold + * the directory entry. This requires the amount of space in struct direct + * without the d_name field, plus enough space for the name with a terminating + * null byte (dp->d_namlen+1), rounded up to a 4 byte boundary. + */ +#if (BYTE_ORDER == LITTLE_ENDIAN) +#define DIRSIZ(oldfmt, dp) \ + ((oldfmt) ? \ + ((sizeof(struct direct) - (MAXNAMLEN+1)) + (((dp)->d_type+1 + 3) &~ 3)) : \ + ((sizeof(struct direct) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3))) +#else +#define DIRSIZ(oldfmt, dp) \ + ((sizeof(struct direct) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3)) +#endif +#define OLDDIRFMT 1 +#define NEWDIRFMT 0 + +/* + * Template for manipulating directories. Should use struct direct's, + * but the name field is MAXNAMLEN - 1, and this just won't do. + */ +struct dirtemplate { + u_int32_t dot_ino; + int16_t dot_reclen; + u_int8_t dot_type; + u_int8_t dot_namlen; + char dot_name[4]; /* must be multiple of 4 */ + u_int32_t dotdot_ino; + int16_t dotdot_reclen; + u_int8_t dotdot_type; + u_int8_t dotdot_namlen; + char dotdot_name[4]; /* ditto */ +}; + +/* + * This is the old format of directories, sanz type element. + */ +struct odirtemplate { + u_int32_t dot_ino; + int16_t dot_reclen; + u_int16_t dot_namlen; + char dot_name[4]; /* must be multiple of 4 */ + u_int32_t dotdot_ino; + int16_t dotdot_reclen; + u_int16_t dotdot_namlen; + char dotdot_name[4]; /* ditto */ +}; +#endif /* !_DIR_H_ */ diff --git a/bsd/ufs/ufs/inode.h b/bsd/ufs/ufs/inode.h new file mode 100644 index 000000000..77a8efde8 --- /dev/null +++ b/bsd/ufs/ufs/inode.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)inode.h 8.9 (Berkeley) 5/14/95 + */ +#ifndef _UFS_INDOE_H_ +#define _UFS_INDOE_H_ + +#include +#include +#include + +/* + * The inode is used to describe each active (or recently active) file in the + * UFS filesystem. It is composed of two types of information. The first part + * is the information that is needed only while the file is active (such as + * the identity of the file and linkage to speed its lookup). The second part + * is * the permanent meta-data associated with the file which is read in + * from the permanent dinode from long term storage when the file becomes + * active, and is put back when the file is no longer being used. + */ +struct inode { + LIST_ENTRY(inode) i_hash;/* Hash chain. */ + struct vnode *i_vnode;/* Vnode associated with this inode. */ + struct vnode *i_devvp;/* Vnode for block I/O. */ + u_int32_t i_flag; /* flags, see below */ + dev_t i_dev; /* Device associated with the inode. */ + ino_t i_number; /* The identity of the inode. */ + + union { /* Associated filesystem. */ + struct fs *fs; /* FFS */ +#if LFS + struct lfs *lfs; /* LFS */ +#endif + } inode_u; +#define i_fs inode_u.fs +#if LFS +#define i_lfs inode_u.lfs +#endif + + struct dquot *i_dquot[MAXQUOTAS]; /* Dquot structures. */ + u_quad_t i_modrev; /* Revision level for NFS lease. */ + struct lockf *i_lockf;/* Head of byte-level lock list. */ + struct lock__bsd__ i_lock; /* Inode lock. */ + /* + * Side effects; used during directory lookup. + */ + int32_t i_count; /* Size of free slot in directory. */ + doff_t i_endoff; /* End of useful stuff in directory. */ + doff_t i_diroff; /* Offset in dir, where we found last entry. */ + doff_t i_offset; /* Offset of free space in directory. */ + ino_t i_ino; /* Inode number of found directory. */ + u_int32_t i_reclen; /* Size of found directory entry. */ + /* + * The on-disk dinode itself. + */ + struct dinode i_din; /* 128 bytes of the on-disk dinode. */ +}; + +#define i_atime i_din.di_atime +#define i_atimensec i_din.di_atimensec +#define i_blocks i_din.di_blocks +#define i_ctime i_din.di_ctime +#define i_ctimensec i_din.di_ctimensec +#define i_db i_din.di_db +#define i_flags i_din.di_flags +#define i_gen i_din.di_gen +#define i_gid i_din.di_gid +#define i_ib i_din.di_ib +#define i_mode i_din.di_mode +#define i_mtime i_din.di_mtime +#define i_mtimensec i_din.di_mtimensec +#define i_nlink i_din.di_nlink +#define i_rdev i_din.di_rdev +#define i_shortlink i_din.di_shortlink +#define i_size i_din.di_size +#define i_uid i_din.di_uid +#define i_spare i_din.di_spare +#define i_oldids i_din.di_u.oldids +#define i_inumber i_din.di_u.inumber + +/* These flags are kept in i_flag. */ +#define IN_ACCESS 0x0001 /* Access time update request. */ +#define IN_CHANGE 0x0002 /* Inode change time update request. */ +#define IN_UPDATE 0x0004 /* Modification time update request. */ +#define IN_MODIFIED 0x0008 /* Inode has been modified. */ +#define IN_RENAME 0x0010 /* Inode is being renamed. */ +#define IN_SHLOCK 0x0020 /* File has shared lock. */ +#define IN_EXLOCK 0x0040 /* File has exclusive lock. */ + +#ifdef KERNEL +/* + * Structure used to pass around logical block paths generated by + * ufs_getlbns and used by truncate and bmap code. + */ +struct indir { + ufs_daddr_t in_lbn; /* Logical block number. */ + int in_off; /* Offset in buffer. */ + int in_exists; /* Flag if the block exists. */ +}; + +/* Convert between inode pointers and vnode pointers. */ +#define VTOI(vp) ((struct inode *)(vp)->v_data) +#define ITOV(ip) ((ip)->i_vnode) + +#define ITIMES(ip, t1, t2) { \ + if ((ip)->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) { \ + (ip)->i_flag |= IN_MODIFIED; \ + if ((ip)->i_flag & IN_ACCESS) \ + (ip)->i_atime = (t1)->tv_sec; \ + if ((ip)->i_flag & IN_UPDATE) { \ + (ip)->i_mtime = (t2)->tv_sec; \ + (ip)->i_modrev++; \ + } \ + if ((ip)->i_flag & IN_CHANGE) \ + (ip)->i_ctime = time.tv_sec; \ + (ip)->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE); \ + } \ +} + +/* This overlays the fid structure (see mount.h). */ +struct ufid { + u_int16_t ufid_len; /* Length of structure. */ + u_int16_t ufid_pad; /* Force 32-bit alignment. */ + ino_t ufid_ino; /* File number (ino). */ + int32_t ufid_gen; /* Generation number. */ +}; +#endif /* KERNEL */ + +#endif /* ! _UFS_INDOE_H_ */ diff --git a/bsd/ufs/ufs/lockf.h b/bsd/ufs/ufs/lockf.h new file mode 100644 index 000000000..a780d93c5 --- /dev/null +++ b/bsd/ufs/ufs/lockf.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Scooter Morris at Genentech Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)lockf.h 8.2 (Berkeley) 10/26/94 + */ +#ifndef _UFS_LOCKF_H_ +#define _UFS_LOCKF_H_ + +/* + * The lockf structure is a kernel structure which contains the information + * associated with a byte range lock. The lockf structures are linked into + * the inode structure. Locks are sorted by the starting byte of the lock for + * efficiency. + */ +TAILQ_HEAD(locklist, lockf); + +struct lockf { + short lf_flags; /* Semantics: F_POSIX, F_FLOCK, F_WAIT */ + short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ + off_t lf_start; /* Byte # of the start of the lock */ + off_t lf_end; /* Byte # of the end of the lock (-1=EOF) */ + caddr_t lf_id; /* Id of the resource holding the lock */ + struct inode *lf_inode; /* Back pointer to the inode */ + struct lockf *lf_next; /* Pointer to the next lock on this inode */ + struct locklist lf_blkhd; /* List of requests blocked on this lock */ + TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */ +}; + +/* Maximum length of sleep chains to traverse to try and detect deadlock. */ +#define MAXDEPTH 50 + +__BEGIN_DECLS +void lf_addblock __P((struct lockf *, struct lockf *)); +int lf_clearlock __P((struct lockf *)); +int lf_findoverlap __P((struct lockf *, + struct lockf *, int, struct lockf ***, struct lockf **)); +struct lockf * + lf_getblock __P((struct lockf *)); +int lf_getlock __P((struct lockf *, struct flock *)); +int lf_setlock __P((struct lockf *)); +void lf_split __P((struct lockf *, struct lockf *)); +void lf_wakelock __P((struct lockf *)); +__END_DECLS + +#ifdef LOCKF_DEBUG +extern int lockf_debug; + +__BEGIN_DECLS +void lf_print __P((char *, struct lockf *)); +void lf_printlist __P((char *, struct lockf *)); +__END_DECLS +#endif + +#endif /* ! _UFS_LOCKF_H_ */ + diff --git a/bsd/ufs/ufs/quota.h b/bsd/ufs/ufs/quota.h new file mode 100644 index 000000000..3a718b1c9 --- /dev/null +++ b/bsd/ufs/ufs/quota.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Robert Elz at The University of Melbourne. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)quota.h 8.3 (Berkeley) 8/19/94 + */ + +#ifndef _QUOTA_ +#define _QUOTA_ + +/* + * Definitions for disk quotas imposed on the average user + * (big brother finally hits UNIX). + * + * The following constants define the amount of time given a user before the + * soft limits are treated as hard limits (usually resulting in an allocation + * failure). The timer is started when the user crosses their soft limit, it + * is reset when they go below their soft limit. + */ +#define MAX_IQ_TIME (7*24*60*60) /* seconds in 1 week */ +#define MAX_DQ_TIME (7*24*60*60) /* seconds in 1 week */ + +/* + * The following constants define the usage of the quota file array in the + * ufsmount structure and dquot array in the inode structure. The semantics + * of the elements of these arrays are defined in the routine getinoquota; + * the remainder of the quota code treats them generically and need not be + * inspected when changing the size of the array. + */ +#define MAXQUOTAS 2 +#define USRQUOTA 0 /* element used for user quotas */ +#define GRPQUOTA 1 /* element used for group quotas */ + +/* + * Definitions for the default names of the quotas files. + */ +#define INITQFNAMES { \ + "user", /* USRQUOTA */ \ + "group", /* GRPQUOTA */ \ + "undefined", \ +}; +#define QUOTAFILENAME "quota" +#define QUOTAGROUP "operator" + +/* + * Command definitions for the 'quotactl' system call. The commands are + * broken into a main command defined below and a subcommand that is used + * to convey the type of quota that is being manipulated (see above). + */ +#define SUBCMDMASK 0x00ff +#define SUBCMDSHIFT 8 +#define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK)) + +#define Q_QUOTAON 0x0100 /* enable quotas */ +#define Q_QUOTAOFF 0x0200 /* disable quotas */ +#define Q_GETQUOTA 0x0300 /* get limits and usage */ +#define Q_SETQUOTA 0x0400 /* set limits and usage */ +#define Q_SETUSE 0x0500 /* set usage */ +#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */ + +/* + * The following structure defines the format of the disk quota file + * (as it appears on disk) - the file is an array of these structures + * indexed by user or group number. The setquota system call establishes + * the vnode for each quota file (a pointer is retained in the ufsmount + * structure). + */ +struct dqblk { + u_int32_t dqb_bhardlimit; /* absolute limit on disk blks alloc */ + u_int32_t dqb_bsoftlimit; /* preferred limit on disk blks */ + u_int32_t dqb_curblocks; /* current block count */ + u_int32_t dqb_ihardlimit; /* maximum # allocated inodes + 1 */ + u_int32_t dqb_isoftlimit; /* preferred inode limit */ + u_int32_t dqb_curinodes; /* current # allocated inodes */ + time_t dqb_btime; /* time limit for excessive disk use */ + time_t dqb_itime; /* time limit for excessive files */ +}; + +#ifndef KERNEL + +#include + +__BEGIN_DECLS +int quotactl __P((char *, int, int, caddr_t)); +__END_DECLS +#endif /* !KERNEL */ + +#ifdef KERNEL +#include + +/* + * The following structure records disk usage for a user or group on a + * filesystem. There is one allocated for each quota that exists on any + * filesystem for the current user or group. A cache is kept of recently + * used entries. + */ +struct dquot { + LIST_ENTRY(dquot) dq_hash; /* hash list */ + TAILQ_ENTRY(dquot) dq_freelist; /* free list */ + u_int16_t dq_flags; /* flags, see below */ + u_int16_t dq_cnt; /* count of active references */ + u_int16_t dq_spare; /* unused spare padding */ + u_int16_t dq_type; /* quota type of this dquot */ + u_int32_t dq_id; /* identifier this applies to */ + struct ufsmount *dq_ump; /* filesystem that this is taken from */ + struct dqblk dq_dqb; /* actual usage & quotas */ +}; +/* + * Flag values. + */ +#define DQ_LOCK 0x01 /* this quota locked (no MODS) */ +#define DQ_WANT 0x02 /* wakeup on unlock */ +#define DQ_MOD 0x04 /* this quota modified since read */ +#define DQ_FAKE 0x08 /* no limits here, just usage */ +#define DQ_BLKS 0x10 /* has been warned about blk limit */ +#define DQ_INODS 0x20 /* has been warned about inode limit */ +/* + * Shorthand notation. + */ +#define dq_bhardlimit dq_dqb.dqb_bhardlimit +#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit +#define dq_curblocks dq_dqb.dqb_curblocks +#define dq_ihardlimit dq_dqb.dqb_ihardlimit +#define dq_isoftlimit dq_dqb.dqb_isoftlimit +#define dq_curinodes dq_dqb.dqb_curinodes +#define dq_btime dq_dqb.dqb_btime +#define dq_itime dq_dqb.dqb_itime + +/* + * If the system has never checked for a quota for this file, then it is + * set to NODQUOT. Once a write attempt is made the inode pointer is set + * to reference a dquot structure. + */ +#define NODQUOT NULL + +/* + * Flags to chkdq() and chkiq() + */ +#define FORCE 0x01 /* force usage changes independent of limits */ +#define CHOWN 0x02 /* (advisory) change initiated by chown */ + +/* + * Macros to avoid subroutine calls to trivial functions. + */ +#if DIAGNOSTIC +#define DQREF(dq) dqref(dq) +#else +#define DQREF(dq) (dq)->dq_cnt++ +#endif + +#include + +struct dquot; +struct inode; +struct mount; +struct proc; +struct ucred; +struct ufsmount; +struct vnode; +__BEGIN_DECLS +int chkdq __P((struct inode *, long, struct ucred *, int)); +int chkdqchg __P((struct inode *, long, struct ucred *, int)); +int chkiq __P((struct inode *, long, struct ucred *, int)); +int chkiqchg __P((struct inode *, long, struct ucred *, int)); +void dqflush __P((struct vnode *)); +int dqget __P((struct vnode *, + u_long, struct ufsmount *, int, struct dquot **)); +void dqinit __P((void)); +void dqref __P((struct dquot *)); +void dqrele __P((struct vnode *, struct dquot *)); +int dqsync __P((struct vnode *, struct dquot *)); +int getinoquota __P((struct inode *)); +int getquota __P((struct mount *, u_long, int, caddr_t)); +int qsync __P((struct mount *mp)); +int quotaoff __P((struct proc *, struct mount *, int)); +int quotaon __P((struct proc *, struct mount *, int, caddr_t)); +int setquota __P((struct mount *, u_long, int, caddr_t)); +int setuse __P((struct mount *, u_long, int, caddr_t)); +int ufs_quotactl __P((struct mount *, int, uid_t, caddr_t, struct proc *)); +__END_DECLS + +#if DIAGNOSTIC +__BEGIN_DECLS +void chkdquot __P((struct inode *)); +__END_DECLS +#endif +#endif /* KERNEL */ + +#endif /* _QUOTA_ */ diff --git a/bsd/ufs/ufs/ufs_bmap.c b/bsd/ufs/ufs/ufs_bmap.c new file mode 100644 index 000000000..4acc2dd91 --- /dev/null +++ b/bsd/ufs/ufs/ufs_bmap.c @@ -0,0 +1,682 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_bmap.c 8.7 (Berkeley) 3/21/95 + */ +/* + * HISTORY + * 11-July-97 Umesh Vaishampayan (umeshv@apple.com) + * Cleanup. Fixed compilation error when tracing is turned on. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +/* + * Bmap converts a the logical block number of a file to its physical block + * number on the disk. The conversion is done by using the logical block + * number to index into the array of block pointers described by the dinode. + */ +int +ufs_bmap(ap) + struct vop_bmap_args /* { + struct vnode *a_vp; + ufs_daddr_t a_bn; + struct vnode **a_vpp; + ufs_daddr_t *a_bnp; + int *a_runp; + } */ *ap; +{ + /* + * Check for underlying vnode requests and ensure that logical + * to physical mapping is requested. + */ + if (ap->a_vpp != NULL) + *ap->a_vpp = VTOI(ap->a_vp)->i_devvp; + if (ap->a_bnp == NULL) + return (0); + + return (ufs_bmaparray(ap->a_vp, ap->a_bn, ap->a_bnp, NULL, NULL, + ap->a_runp)); +} + +/* + * Indirect blocks are now on the vnode for the file. They are given negative + * logical block numbers. Indirect blocks are addressed by the negative + * address of the first data block to which they point. Double indirect blocks + * are addressed by one less than the address of the first indirect block to + * which they point. Triple indirect blocks are addressed by one less than + * the address of the first double indirect block to which they point. + * + * ufs_bmaparray does the bmap conversion, and if requested returns the + * array of logical blocks which must be traversed to get to a block. + * Each entry contains the offset into that block that gets you to the + * next block and the disk address of the block (if it is assigned). + */ + +int +ufs_bmaparray(vp, bn, bnp, ap, nump, runp) + struct vnode *vp; + ufs_daddr_t bn; + ufs_daddr_t *bnp; + struct indir *ap; + int *nump; + int *runp; +{ + register struct inode *ip; + struct buf *bp; + struct ufsmount *ump; + struct mount *mp; + struct vnode *devvp; + struct indir a[NIADDR], *xap; + ufs_daddr_t daddr; + long metalbn; + int error, maxrun, num; +#if REV_ENDIAN_FS + int rev_endian=0; +#endif /* REV_ENDIAN_FS */ + + ip = VTOI(vp); + mp = vp->v_mount; + ump = VFSTOUFS(mp); + +#if REV_ENDIAN_FS + rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + +#if DIAGNOSTIC + if (ap != NULL && nump == NULL || ap == NULL && nump != NULL) + panic("ufs_bmaparray: invalid arguments"); +#endif + + if (runp) { + /* + * XXX + * If MAXPHYSIO is the largest transfer the disks can handle, + * we probably want maxrun to be 1 block less so that we + * don't create a block larger than the device can handle. + */ + *runp = 0; + maxrun = MAXPHYSIO / mp->mnt_stat.f_iosize - 1; + } + + xap = ap == NULL ? a : ap; + if (!nump) + nump = # + if (error = ufs_getlbns(vp, bn, xap, nump)) + return (error); + + num = *nump; + if (num == 0) { + *bnp = blkptrtodb(ump, ip->i_db[bn]); + if (*bnp == 0) + *bnp = -1; + else if (runp) + for (++bn; bn < NDADDR && *runp < maxrun && + is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]); + ++bn, ++*runp); + return (0); + } + + + /* Get disk address out of indirect block array */ + daddr = ip->i_ib[xap->in_off]; + + devvp = VFSTOUFS(vp->v_mount)->um_devvp; + for (bp = NULL, ++xap; --num; ++xap) { + /* + * Exit the loop if there is no disk address assigned yet and + * the indirect block isn't in the cache, or if we were + * looking for an indirect block and we've found it. + */ + + metalbn = xap->in_lbn; + if (daddr == 0 && !incore(vp, metalbn) || metalbn == bn) + break; + /* + * If we get here, we've either got the block in the cache + * or we have a disk address for it, go fetch it. + */ + if (bp) + brelse(bp); + + xap->in_exists = 1; + bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0, BLK_META); + if (bp->b_flags & (B_DONE | B_DELWRI)) { + trace(TR_BREADHIT, pack(vp, mp->mnt_stat.f_iosize), metalbn); + } +#if DIAGNOSTIC + else if (!daddr) + panic("ufs_bmaparry: indirect block not in cache"); +#endif + else { + trace(TR_BREADMISS, pack(vp, mp->mnt_stat.f_iosize), metalbn); + bp->b_blkno = blkptrtodb(ump, daddr); + bp->b_flags |= B_READ; + VOP_STRATEGY(bp); + current_proc()->p_stats->p_ru.ru_inblock++; /* XXX */ + if (error = biowait(bp)) { + brelse(bp); + return (error); + } + } + + daddr = ((ufs_daddr_t *)bp->b_data)[xap->in_off]; +#if REV_ENDIAN_FS + if (rev_endian) + daddr = NXSwapLong(daddr); +#endif /* REV_ENDIAN_FS */ + if (num == 1 && daddr && runp) { +#if REV_ENDIAN_FS + if (rev_endian) { + for (bn = xap->in_off + 1; + bn < MNINDIR(ump) && *runp < maxrun && + is_sequential(ump, + NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn - 1]), + NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn])); + ++bn, ++*runp); + } else { +#endif /* REV_ENDIAN_FS */ + for (bn = xap->in_off + 1; + bn < MNINDIR(ump) && *runp < maxrun && + is_sequential(ump, + ((ufs_daddr_t *)bp->b_data)[bn - 1], + ((ufs_daddr_t *)bp->b_data)[bn]); + ++bn, ++*runp); +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + } + } + if (bp) + brelse(bp); + + daddr = blkptrtodb(ump, daddr); + *bnp = daddr == 0 ? -1 : daddr; + return (0); +} + +/* + * Create an array of logical block number/offset pairs which represent the + * path of indirect blocks required to access a data block. The first "pair" + * contains the logical block number of the appropriate single, double or + * triple indirect block and the offset into the inode indirect block array. + * Note, the logical block number of the inode single/double/triple indirect + * block appears twice in the array, once with the offset into the i_ib and + * once with the offset into the page itself. + */ +int +ufs_getlbns(vp, bn, ap, nump) + struct vnode *vp; + ufs_daddr_t bn; + struct indir *ap; + int *nump; +{ + long metalbn, realbn; + struct ufsmount *ump; + int blockcnt, i, numlevels, off; + + ump = VFSTOUFS(vp->v_mount); + if (nump) + *nump = 0; + numlevels = 0; + realbn = bn; + if ((long)bn < 0) + bn = -(long)bn; + + /* The first NDADDR blocks are direct blocks. */ + if (bn < NDADDR) + return (0); + + /* + * Determine the number of levels of indirection. After this loop + * is done, blockcnt indicates the number of data blocks possible + * at the given level of indirection, and NIADDR - i is the number + * of levels of indirection needed to locate the requested block. + */ + for (blockcnt = 1, i = NIADDR, bn -= NDADDR;; i--, bn -= blockcnt) { + if (i == 0) + return (EFBIG); + blockcnt *= MNINDIR(ump); + if (bn < blockcnt) + break; + } + + /* Calculate the address of the first meta-block. */ + if (realbn >= 0) + metalbn = -(realbn - bn + NIADDR - i); + else + metalbn = -(-realbn - bn + NIADDR - i); + + /* + * At each iteration, off is the offset into the bap array which is + * an array of disk addresses at the current level of indirection. + * The logical block number and the offset in that block are stored + * into the argument array. + */ + ap->in_lbn = metalbn; + ap->in_off = off = NIADDR - i; + ap->in_exists = 0; + ap++; + for (++numlevels; i <= NIADDR; i++) { + /* If searching for a meta-data block, quit when found. */ + if (metalbn == realbn) + break; + + blockcnt /= MNINDIR(ump); + off = (bn / blockcnt) % MNINDIR(ump); + + ++numlevels; + ap->in_lbn = metalbn; + ap->in_off = off; + ap->in_exists = 0; + ++ap; + + metalbn -= -1 + off * blockcnt; + } + if (nump) + *nump = numlevels; + return (0); +} +/* + * Cmap converts a the file offset of a file to its physical block + * number on the disk And returns contiguous size for transfer. + */ +int +ufs_cmap(ap) + struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_foffset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; + } */ *ap; +{ + struct vnode * vp = ap->a_vp; + ufs_daddr_t *bnp = ap->a_bpn; + size_t *runp = ap->a_run; + int size = ap->a_size; + daddr_t bn; + int nblks; + register struct inode *ip; + ufs_daddr_t daddr = 0; + int devBlockSize=0; + struct fs *fs; + int retsize=0; + int error=0; + + ip = VTOI(vp); + fs = ip->i_fs; + + + if (blkoff(fs, ap->a_foffset)) { + panic("ufs_cmap; allocation requested inside a block"); + } + + bn = (daddr_t)lblkno(fs, ap->a_foffset); + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + if (size % devBlockSize) { + panic("ufs_cmap: size is not multiple of device block size\n"); + } + + if (error = VOP_BMAP(vp, bn, (struct vnode **) 0, &daddr, &nblks)) { + return(error); + } + + retsize = nblks * fs->fs_bsize; + + if (bnp) + *bnp = daddr; + + if (ap->a_poff) + *(int *)ap->a_poff = 0; + + if (daddr == -1) { + if (size < fs->fs_bsize) { + retsize = fragroundup(fs, size); + if(size >= retsize) + *runp = retsize; + else + *runp = size; + } else { + *runp = fs->fs_bsize; + } + return(0); + } + + if (runp) { + if ((size < fs->fs_bsize)) { + *runp = size; + return(0); + } + if (retsize) { + retsize += fs->fs_bsize; + if(size >= retsize) + *runp = retsize; + else + *runp = size; + } else { + if (size < fs->fs_bsize) { + retsize = fragroundup(fs, size); + if(size >= retsize) + *runp = retsize; + else + *runp = size; + } else { + *runp = fs->fs_bsize; + } + } + } + return (0); +} + + +#if NOTTOBEUSED +/* + * Cmap converts a the file offset of a file to its physical block + * number on the disk And returns contiguous size for transfer. + */ +int +ufs_cmap(ap) + struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_foffset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; + } */ *ap; +{ + struct vnode * vp = ap->a_vp; + ufs_daddr_t *bnp = ap->a_bpn; + size_t *runp = ap->a_run; + daddr_t bn; + int nblks, blks; + int *nump; + register struct inode *ip; + struct buf *bp; + struct ufsmount *ump; + struct mount *mp; + struct vnode *devvp; + struct indir a[NIADDR], *xap; + ufs_daddr_t daddr; + long metalbn; + int error, maxrun, num; + int devBlockSize=0; + struct fs *fs; + int size = ap->a_size; + int block_offset=0; + int retsize=0; +#if 1 + daddr_t orig_blkno; + daddr_t orig_bblkno; +#endif /* 1 */ +#if REV_ENDIAN_FS + int rev_endian=0; +#endif /* REV_ENDIAN_FS */ + + ip = VTOI(vp); + fs = ip->i_fs; + + mp = vp->v_mount; + ump = VFSTOUFS(mp); + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + bn = (daddr_t)lblkno(fs, ap->a_foffset); + + if (size % devBlockSize) { + panic("ufs_cmap: size is not multiple of device block size\n"); + } + + block_offset = blkoff(fs, ap->a_foffset); + if (block_offset) { + panic("ufs_cmap; allocation requested inside a block"); + } + +#if 1 + VOP_OFFTOBLK(vp, ap->a_foffset, & orig_blkno); +#endif /* 1 */ + /* less than block size and not block offset aligned */ + if ( (size < fs->fs_bsize) && fragoff(fs, size) && block_offset ) { + panic("ffs_cmap: size not a mult of fragment\n"); + } +#if 0 + if (size > fs->fs_bsize && fragoff(fs, size)) { + panic("ffs_cmap: more than bsize & not a multiple of fragment\n"); + } +#endif /* 0 */ +#if REV_ENDIAN_FS + rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + if(runp) + *runp = 0; + + if ( size > MAXPHYSIO) + size = MAXPHYSIO; + nblks = (blkroundup(fs, size))/fs->fs_bsize; + + xap = a; + num = 0; + if (error = ufs_getlbns(vp, bn, xap, &num)) + return (error); + + blks = 0; + if (num == 0) { + daddr = blkptrtodb(ump, ip->i_db[bn]); + *bnp = ((daddr == 0) ? -1 : daddr); + if (daddr && runp) { + for (++bn; bn < NDADDR && blks < nblks && + ip->i_db[bn] && + is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]); + ++bn, ++blks); + + if (blks) { + retsize = lblktosize(fs, blks); + if(size >= retsize) + *runp = retsize; + else + *runp = size; + } else { + if (size < fs->fs_bsize) { + retsize = fragroundup(fs, size); + if(size >= retsize) + *runp = retsize; + else + *runp = size; + } else { + *runp = fs->fs_bsize; + } + } + if (ap->a_poff) + *(int *)ap->a_poff = 0; + } +#if 1 + if (VOP_BMAP(vp, orig_blkno, NULL, &orig_bblkno, NULL)) { + panic("vop_bmap failed\n"); + } + if(daddr != orig_bblkno) { + panic("vop_bmap and vop_cmap differ\n"); + } +#endif /* 1 */ + return (0); + } + + + /* Get disk address out of indirect block array */ + daddr = ip->i_ib[xap->in_off]; + + devvp = VFSTOUFS(vp->v_mount)->um_devvp; + for (bp = NULL, ++xap; --num; ++xap) { + /* + * Exit the loop if there is no disk address assigned yet + * or if we were looking for an indirect block and we've + * found it. + */ + + metalbn = xap->in_lbn; + if (daddr == 0 || metalbn == bn) + break; + /* + * We have a disk address for it, go fetch it. + */ + if (bp) + brelse(bp); + + xap->in_exists = 1; + bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0, BLK_META); + if (bp->b_flags & (B_DONE | B_DELWRI)) { + trace(TR_BREADHIT, pack(vp, mp->mnt_stat.f_iosize), metalbn); + } + else { + trace(TR_BREADMISS, pack(vp, mp->mnt_stat.f_iosize), metalbn); + bp->b_blkno = blkptrtodb(ump, daddr); + bp->b_flags |= B_READ; + VOP_STRATEGY(bp); + current_proc()->p_stats->p_ru.ru_inblock++; /* XXX */ + if (error = biowait(bp)) { + brelse(bp); + return (error); + } + } + + daddr = ((ufs_daddr_t *)bp->b_data)[xap->in_off]; +#if REV_ENDIAN_FS + if (rev_endian) + daddr = NXSwapLong(daddr); +#endif /* REV_ENDIAN_FS */ + if (num == 1 && daddr && runp) { + blks = 0; +#if REV_ENDIAN_FS + if (rev_endian) { + for (bn = xap->in_off + 1; + bn < MNINDIR(ump) && blks < maxrun && + is_sequential(ump, + NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn - 1]), + NXSwapLong(((ufs_daddr_t *)bp->b_data)[bn])); + ++bn, ++blks); + } else { +#endif /* REV_ENDIAN_FS */ + for (bn = xap->in_off + 1; + bn < MNINDIR(ump) && blks < maxrun && + is_sequential(ump, + ((ufs_daddr_t *)bp->b_data)[bn - 1], + ((ufs_daddr_t *)bp->b_data)[bn]); + ++bn, ++blks); +#if REV_ENDIAN_FS + } +#endif /* REV_ENDIAN_FS */ + } + } + if (bp) + brelse(bp); + + daddr = blkptrtodb(ump, daddr); + *bnp = ((daddr == 0) ? -1 : daddr); + if (daddr && runp) { + if (blks) { + retsize = lblktosize(fs, blks); + if(size >= retsize) + *runp = retsize; + else + *runp = size; + } else { + if (size < fs->fs_bsize) { + retsize = fragroundup(fs, size); + if(size >= retsize) + *runp = retsize; + else + *runp = size; + } else { + *runp = fs->fs_bsize; + } + } + + } + if (daddr && ap->a_poff) + *(int *)ap->a_poff = 0; +#if 1 + if (VOP_BMAP(vp, orig_blkno, (struct vnode **) 0, &orig_bblkno, 0)) { + panic("vop_bmap failed\n"); + } + if(daddr != orig_bblkno) { + panic("vop_bmap and vop_cmap differ\n"); + } +#endif /* 1 */ + return (0); +} +#endif /* NOTTOBEUSED */ diff --git a/bsd/ufs/ufs/ufs_byte_order.c b/bsd/ufs/ufs/ufs_byte_order.c new file mode 100644 index 000000000..28edd44e2 --- /dev/null +++ b/bsd/ufs/ufs/ufs_byte_order.c @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright 1998 Apple Computer, Inc. + * + * UFS byte swapping routines to make a big endian file system useful on a + * little endian machine. + * + * HISTORY + * + * 16 Feb 1998 A. Ramesh at Apple + * MacOSX version created. + */ + +#include +#include +#include +#include + + + +#if 0 +#define byte_swap_longlong(thing) ((thing) = NXSwapBigLongLongToHost(thing)) +#define byte_swap_int(thing) ((thing) = NXSwapBigLongToHost(thing)) +#define byte_swap_short(thing) ((thing) = NXSwapBigShortToHost(thing)) +#else +#define byte_swap_longlong(thing) ((thing) = NXSwapLongLong(thing)) +#define byte_swap_int(thing) ((thing) = NXSwapLong(thing)) +#define byte_swap_short(thing) ((thing) = NXSwapShort(thing)) +#endif +void +byte_swap_longlongs(unsigned long long *array, int count) +{ + register unsigned long long i; + + for (i = 0; i < count; i++) + byte_swap_longlong(array[i]); +} + +void +byte_swap_ints(int *array, int count) +{ + register int i; + + for (i = 0; i < count; i++) + byte_swap_int(array[i]); +} + + +void +byte_swap_shorts(short *array, int count) +{ + register int i; + + for (i = 0; i < count; i++) + byte_swap_short(array[i]); +} + + +void +byte_swap_sbin(struct fs *sb) +{ + u_int16_t *usptr; + unsigned long size; + + byte_swap_ints(((int32_t *)&sb->fs_firstfield), 52); + byte_swap_int(sb->fs_cgrotor); + byte_swap_int(sb->fs_cpc); + byte_swap_shorts((int16_t *)sb->fs_opostbl, 16 * 8); + byte_swap_ints((int32_t *)sb->fs_sparecon, 50); + byte_swap_ints((int32_t *)&sb->fs_contigsumsize, 3); + byte_swap_longlongs((u_int64_t *)&sb->fs_maxfilesize,3); + byte_swap_ints((int32_t *)&sb->fs_state, 6); + + /* Got these magic numbers from mkfs.c in newfs */ + if (sb->fs_nrpos != 8 || sb->fs_cpc > 16) { + usptr = (u_int16_t *)((u_int8_t *)(sb) + (sb)->fs_postbloff); + size = sb->fs_cpc * sb->fs_nrpos; + byte_swap_shorts(usptr,size); /* fs_postbloff */ + } +} + +void +byte_swap_sbout(struct fs *sb) +{ + u_int16_t *usptr; + unsigned long size; + /* Got these magic numbers from mkfs.c in newfs */ + if (sb->fs_nrpos != 8 || sb->fs_cpc > 16) { + usptr = (u_int16_t *)((u_int8_t *)(sb) + (sb)->fs_postbloff); + size = sb->fs_cpc * sb->fs_nrpos; + byte_swap_shorts(usptr,size); /* fs_postbloff */ + } + + byte_swap_ints(((int32_t *)&sb->fs_firstfield), 52); + byte_swap_int(sb->fs_cgrotor); + byte_swap_int(sb->fs_cpc); + byte_swap_shorts((int16_t *)sb->fs_opostbl, 16 * 8); + byte_swap_ints((int32_t *)sb->fs_sparecon, 50); + byte_swap_ints((int32_t *)&sb->fs_contigsumsize, 3); + byte_swap_longlongs((u_int64_t *)&sb->fs_maxfilesize,3); + byte_swap_ints((int32_t *)&sb->fs_state, 6); +} +void +byte_swap_csum(struct csum *cs) +{ + byte_swap_ints((int *) cs, sizeof(struct csum) / sizeof(int32_t)); +} + +/* This is for the new 4.4 cylinder group block */ +void +byte_swap_cgin(struct cg *cg, struct fs * fs) +{ + int32_t * ulptr; + int16_t * usptr; + int size; + + byte_swap_int(cg->cg_firstfield); + byte_swap_int(cg->cg_magic); + byte_swap_int(cg->cg_time); + byte_swap_int(cg->cg_cgx); + byte_swap_short(cg->cg_ncyl); + byte_swap_short(cg->cg_niblk); + byte_swap_int(cg->cg_ndblk); + byte_swap_csum(&cg->cg_cs); + byte_swap_int(cg->cg_rotor); + byte_swap_int(cg->cg_frotor); + byte_swap_int(cg->cg_irotor); + byte_swap_ints(cg->cg_frsum, MAXFRAG); + byte_swap_int(cg->cg_iusedoff); + byte_swap_int(cg->cg_freeoff); + byte_swap_int(cg->cg_nextfreeoff); + byte_swap_int(cg->cg_clusteroff); + byte_swap_int(cg->cg_nclusterblks); + byte_swap_ints(&cg->cg_sparecon, 13); + + byte_swap_int(cg->cg_btotoff); + ulptr = ((int32_t *)((u_int8_t *)(cg) + (cg)->cg_btotoff)); + size = fs->fs_cpg; + byte_swap_ints(ulptr, size); /*cg_btotoff*/ + + byte_swap_int(cg->cg_boff); + usptr = ((int16_t *)((u_int8_t *)(cg) + (cg)->cg_boff)); + size = fs->fs_cpg * fs->fs_nrpos; + byte_swap_shorts(usptr,size); /*cg_boff*/ + + byte_swap_int(cg->cg_clustersumoff); + + if ((unsigned int)fs->fs_contigsumsize > 0) { + + ulptr = ((int32_t *)((u_int8_t *)(cg) + (cg)->cg_clustersumoff)); + size = (fs->fs_contigsumsize + 1); + byte_swap_ints(ulptr, size); /*cg_clustersumoff*/ + } + +} + +// This is for the new 4.4 cylinder group block +void +byte_swap_cgout(struct cg *cg, struct fs * fs) +{ + int32_t * ulptr; + int16_t * usptr; + int size; + + byte_swap_int(cg->cg_firstfield); + byte_swap_int(cg->cg_magic); + byte_swap_int(cg->cg_time); + byte_swap_int(cg->cg_cgx); + byte_swap_short(cg->cg_ncyl); + byte_swap_short(cg->cg_niblk); + byte_swap_int(cg->cg_ndblk); + byte_swap_csum(&cg->cg_cs); + byte_swap_int(cg->cg_rotor); + byte_swap_int(cg->cg_frotor); + byte_swap_int(cg->cg_irotor); + byte_swap_ints(cg->cg_frsum, MAXFRAG); + byte_swap_int(cg->cg_freeoff); + byte_swap_int(cg->cg_nextfreeoff); + byte_swap_int(cg->cg_nclusterblks); + byte_swap_ints(&cg->cg_sparecon, 13); + + byte_swap_int(cg->cg_iusedoff); + byte_swap_int(cg->cg_clusteroff); + ulptr = ((int32_t *)((u_int8_t *)(cg) + (cg)->cg_btotoff)); + size = fs->fs_cpg; + byte_swap_ints(ulptr, size); /*cg_btotoff*/ + byte_swap_int(cg->cg_btotoff); + + usptr = ((int16_t *)((u_int8_t *)(cg) + (cg)->cg_boff)); + size = fs->fs_cpg * fs->fs_nrpos; + byte_swap_shorts(usptr,size); /*cg_boff*/ + byte_swap_int(cg->cg_boff); + + if ((unsigned int)fs->fs_contigsumsize > 0) { + ulptr = ((int32_t *)((u_int8_t *)(cg) + (cg)->cg_clustersumoff)); + size = (fs->fs_contigsumsize + 1); + byte_swap_ints(ulptr, size); /*cg_clustersumoff*/ + + } + byte_swap_int(cg->cg_clustersumoff); + +} + +/* This value should correspond to the value set in the ffs_mounts */ + +#define RESYMLNKLEN 60 + +void +byte_swap_inode_in(struct dinode *di, struct inode *ip) +{ + int i; + + ip->i_mode = NXSwapShort(di->di_mode); + ip->i_nlink = NXSwapShort(di->di_nlink); +#ifdef LFS + ip->i_inumber = NXSwapLong(di->di_u.inumber); +#else + ip->i_oldids[0] = NXSwapShort(di->di_u.oldids[0]); + ip->i_oldids[1] = NXSwapShort(di->di_u.oldids[1]); +#endif + ip->i_size = NXSwapLongLong(di->di_size); + ip->i_atime = NXSwapLong(di->di_atime); + ip->i_atimensec = NXSwapLong(di->di_atimensec); + ip->i_mtime = NXSwapLong(di->di_mtime); + ip->i_mtimensec = NXSwapLong(di->di_mtimensec); + ip->i_ctime = NXSwapLong(di->di_ctime); + ip->i_ctimensec = NXSwapLong(di->di_ctimensec); + if (((ip->i_mode & IFMT) == IFLNK ) && (ip->i_size <= RESYMLNKLEN)) { + bcopy(&di->di_shortlink, &ip->i_shortlink, RESYMLNKLEN); + } else { + for (i=0; i < NDADDR; i++) /* direct blocks */ + ip->i_db[i] = NXSwapLong(di->di_db[i]); + for (i=0; i < NIADDR; i++) /* indirect blocks */ + ip->i_ib[i] = NXSwapLong(di->di_ib[i]); + } + ip->i_flags = NXSwapLong(di->di_flags); + ip->i_blocks = NXSwapLong(di->di_blocks); + ip->i_gen = NXSwapLong(di->di_gen); + ip->i_uid = NXSwapLong(di->di_uid); + ip->i_gid = NXSwapLong(di->di_gid); + ip->i_spare[0] = NXSwapLong(di->di_spare[0]); + ip->i_spare[1] = NXSwapLong(di->di_spare[1]); + +} + + +void +byte_swap_inode_out(struct inode *ip, struct dinode *di) +{ + int i; + int mode, inosize; + + mode = (ip->i_mode & IFMT); + inosize = ip->i_size; + + di->di_mode = NXSwapShort(ip->i_mode); + di->di_nlink = NXSwapShort(ip->i_nlink); +#ifdef LFS + di->di_u.inumber = NXSwapLong(ip->i_inumber); +#else + di->di_u.oldids[0] = NXSwapShort(ip->i_oldids[0]); + di->di_u.oldids[1] = NXSwapShort(ip->i_oldids[1]); +#endif /* LFS */ + di->di_size = NXSwapLongLong(ip->i_size); + di->di_atime = NXSwapLong(ip->i_atime); + di->di_atimensec = NXSwapLong(ip->i_atimensec); + di->di_mtime = NXSwapLong(ip->i_mtime); + di->di_mtimensec = NXSwapLong(ip->i_mtimensec); + di->di_ctime = NXSwapLong(ip->i_ctime); + di->di_ctimensec = NXSwapLong(ip->i_ctimensec); + if ((mode == IFLNK) && (inosize <= RESYMLNKLEN)) { + bcopy( &ip->i_shortlink, &di->di_shortlink, RESYMLNKLEN); + } else { + for (i=0; i < NDADDR; i++) /* direct blocks */ + di->di_db[i] = NXSwapLong(ip->i_db[i]); + for (i=0; i < NIADDR; i++) /* indirect blocks */ + di->di_ib[i] = NXSwapLong(ip->i_ib[i]); + } + di->di_flags = NXSwapLong(ip->i_flags); + di->di_blocks = NXSwapLong(ip->i_blocks); + di->di_gen = NXSwapLong(ip->i_gen); + di->di_uid = NXSwapLong(ip->i_uid); + di->di_gid = NXSwapLong(ip->i_gid); + di->di_spare[0] = NXSwapLong(ip->i_spare[0]); + di->di_spare[1] = NXSwapLong(ip->i_spare[1]); + +} + + +void +byte_swap_direct(struct direct *dirp) +{ + byte_swap_int(dirp->d_ino); + byte_swap_short(dirp->d_reclen); +} + +void +byte_swap_dir_block_in(char *addr, int count) +{ + struct direct *ep = (struct direct *) addr; + int entryoffsetinblk = 0; + + while (entryoffsetinblk < count) { + ep = (struct direct *) (entryoffsetinblk + addr); + byte_swap_int(ep->d_ino); + byte_swap_short(ep->d_reclen); + entryoffsetinblk += ep->d_reclen; + if (ep->d_reclen < 12) /* handle garbage in dirs */ + break; + } +} +void +byte_swap_dir_out(char *addr, int count) +{ + struct direct *ep = (struct direct *) addr; + int entryoffsetinblk = 0; + int reclen; + + while (entryoffsetinblk < count) { + ep = (struct direct *) (entryoffsetinblk + addr); + reclen = ep->d_reclen; + entryoffsetinblk += reclen; + byte_swap_int(ep->d_ino); + byte_swap_short(ep->d_reclen); + if (reclen < 12) + break; + } +} + +void +byte_swap_dir_block_out(struct buf *bp) +{ + struct direct *ep = (struct direct *) bp->b_data; + int reclen, entryoffsetinblk = 0; + + while (entryoffsetinblk < bp->b_bcount) { + ep = (struct direct *) (entryoffsetinblk + bp->b_data); + reclen = ep->d_reclen; + entryoffsetinblk += reclen; + byte_swap_int(ep->d_ino); + byte_swap_short(ep->d_reclen); + if (reclen < 12) + break; + } +} + +void +byte_swap_dirtemplate_in(struct dirtemplate *dirt) +{ + byte_swap_int(dirt->dot_ino); + byte_swap_short(dirt->dot_reclen); + byte_swap_int(dirt->dotdot_ino); + byte_swap_short(dirt->dotdot_reclen); +} +void +byte_swap_minidir_in(struct direct *dirp) +{ + byte_swap_int(dirp->d_ino); + byte_swap_short(dirp->d_reclen); +} + +#if 0 +// This is for the compatability (old) cylinder group block +void +byte_swap_ocylgroup(struct cg *cg) +{ + byte_swap_int(cg->cg_time); + byte_swap_int(cg->cg_cgx); + byte_swap_short(cg->cg_ncyl); + byte_swap_short(cg->cg_niblk); + byte_swap_int(cg->cg_ndblk); + byte_swap_csum(&cg->cg_cs); + byte_swap_int(cg->cg_rotor); + byte_swap_int(cg->cg_frotor); + byte_swap_int(cg->cg_irotor); + byte_swap_ints(&cg->cg_frsum, 8); + byte_swap_ints(&cg->cg_btot, 32); + byte_swap_shorts((short *)&cg->cg_b, 32 * 8); + byte_swap_int(cg->cg_magic); +} + +#endif /* 0 */ diff --git a/bsd/ufs/ufs/ufs_byte_order.h b/bsd/ufs/ufs/ufs_byte_order.h new file mode 100644 index 000000000..f52e4475c --- /dev/null +++ b/bsd/ufs/ufs/ufs_byte_order.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright 1998 Apple Computer, Inc. + * + * UFS byte swapping routines to make a big endian file system useful on a + * little endian machine. + * + * HISTORY + * + * 16 Feb 1998 A. Ramesh at Apple + * MacOSX version created. + */ + +#ifdef KERNEL_PRIVATE + +#ifndef _UFS_BYTE_ORDER_H_ +#define _UFS_BYTE_ORDER_H_ + +#include +#include +#include +#include +#include +#include + +void byte_swap_longlongs __P((unsigned long long *, int)); +void byte_swap_ints __P((int *, int)); +void byte_swap_shorts __P((short *, int)); + +/* void byte_swap_superblock __P((struct fs *)); */ +void byte_swap_sbin __P((struct fs *)); +void byte_swap_sbout __P((struct fs *)); +void byte_swap_csum __P((struct csum *)); +void byte_swap_ocylgroup __P((struct cg *)); +void byte_swap_cgin __P((struct cg *, struct fs *)); +void byte_swap_cgout __P((struct cg *, struct fs *)); + +void byte_swap_inode_in __P((struct dinode *, struct inode *)); +void byte_swap_inode_out __P((struct inode *, struct dinode *)); + +void byte_swap_dir_block_in __P((char *, int)); +void byte_swap_dir_block_out __P((struct buf *)); +void byte_swap_direct __P((struct direct *)); +void byte_swap_dirtemplate_in __P((struct dirtemplate *)); +void byte_swap_minidir_in __P((struct direct *)); + +#endif /* _UFS_BYTE_ORDER_H_ */ +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/ufs/ufs/ufs_extern.h b/bsd/ufs/ufs/ufs_extern.h new file mode 100644 index 000000000..469d646ed --- /dev/null +++ b/bsd/ufs/ufs/ufs_extern.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_extern.h 8.10 (Berkeley) 5/14/95 + */ +#ifndef _UFS_EXTERN_H_ +#define _UFS_EXTERN_H_ + +struct buf; +struct direct; +struct disklabel; +struct fid; +struct flock; +struct inode; +struct mbuf; +struct mount; +struct nameidata; +struct proc; +struct ucred; +struct ufs_args; +struct uio; +struct vattr; +struct vfsconf; +struct vnode; + +__BEGIN_DECLS +void diskerr + __P((struct buf *, char *, char *, int, int, struct disklabel *)); +void disksort __P((struct buf *, struct buf *)); +u_int dkcksum __P((struct disklabel *)); +char *readdisklabel __P((dev_t, int (*)(), struct disklabel *)); +int setdisklabel __P((struct disklabel *, struct disklabel *, u_long)); +int writedisklabel __P((dev_t, int (*)(), struct disklabel *)); + +int ufs_abortop __P((struct vop_abortop_args *)); +int ufs_access __P((struct vop_access_args *)); +int ufs_advlock __P((struct vop_advlock_args *)); +int ufs_bmap __P((struct vop_bmap_args *)); +int ufs_check_export __P((struct mount *, struct ufid *, struct mbuf *, + struct vnode **, int *exflagsp, struct ucred **)); +int ufs_checkpath __P((struct inode *, struct inode *, struct ucred *)); +int ufs_close __P((struct vop_close_args *)); +int ufs_create __P((struct vop_create_args *)); +void ufs_dirbad __P((struct inode *, doff_t, char *)); +int ufs_dirbadentry __P((struct vnode *, struct direct *, int)); +int ufs_dirempty __P((struct inode *, ino_t, struct ucred *)); +int ufs_direnter __P((struct inode *, struct vnode *,struct componentname *)); +int ufs_dirremove __P((struct vnode *, struct componentname*)); +int ufs_dirrewrite + __P((struct inode *, struct inode *, struct componentname *)); +int ufs_getattr __P((struct vop_getattr_args *)); +int ufs_getlbns __P((struct vnode *, ufs_daddr_t, struct indir *, int *)); +struct vnode * + ufs_ihashget __P((dev_t, ino_t)); +void ufs_ihashinit __P((void)); +void ufs_ihashins __P((struct inode *)); +struct vnode * + ufs_ihashlookup __P((dev_t, ino_t)); +void ufs_ihashrem __P((struct inode *)); +int ufs_inactive __P((struct vop_inactive_args *)); +int ufs_init __P((struct vfsconf *)); +int ufs_ioctl __P((struct vop_ioctl_args *)); +int ufs_islocked __P((struct vop_islocked_args *)); +#if NFSSERVER +int lease_check __P((struct vop_lease_args *)); +#define ufs_lease_check lease_check +#else +#define ufs_lease_check ((int (*) __P((struct vop_lease_args *)))nullop) +#endif +int ufs_link __P((struct vop_link_args *)); +int ufs_lock __P((struct vop_lock_args *)); +int ufs_lookup __P((struct vop_lookup_args *)); +int ufs_makeinode __P((int mode, struct vnode *, struct vnode **, struct componentname *)); +int ufs_mkdir __P((struct vop_mkdir_args *)); +int ufs_mknod __P((struct vop_mknod_args *)); +int ufs_mmap __P((struct vop_mmap_args *)); +int ufs_open __P((struct vop_open_args *)); +int ufs_pathconf __P((struct vop_pathconf_args *)); +int ufs_print __P((struct vop_print_args *)); +int ufs_readdir __P((struct vop_readdir_args *)); +int ufs_readlink __P((struct vop_readlink_args *)); +int ufs_reclaim __P((struct vnode *, struct proc *)); +int ufs_remove __P((struct vop_remove_args *)); +int ufs_rename __P((struct vop_rename_args *)); +#define ufs_revoke vop_revoke +int ufs_rmdir __P((struct vop_rmdir_args *)); +int ufs_root __P((struct mount *, struct vnode **)); +int ufs_seek __P((struct vop_seek_args *)); +int ufs_select __P((struct vop_select_args *)); +int ufs_setattr __P((struct vop_setattr_args *)); +int ufs_start __P((struct mount *, int, struct proc *)); +int ufs_strategy __P((struct vop_strategy_args *)); +int ufs_symlink __P((struct vop_symlink_args *)); +int ufs_unlock __P((struct vop_unlock_args *)); +int ufs_whiteout __P((struct vop_whiteout_args *)); +int ufs_vinit __P((struct mount *, + int (**)(), int (**)(), struct vnode **)); +int ufsspec_close __P((struct vop_close_args *)); +int ufsspec_read __P((struct vop_read_args *)); +int ufsspec_write __P((struct vop_write_args *)); + +#if FIFO +int ufsfifo_read __P((struct vop_read_args *)); +int ufsfifo_write __P((struct vop_write_args *)); +int ufsfifo_close __P((struct vop_close_args *)); +#endif +int ufs_blktooff __P((struct vop_blktooff_args *)); +int ufs_cmap __P((struct vop_cmap_args *)); + +__END_DECLS + +#endif /* ! _UFS_EXTERN_H_ */ diff --git a/bsd/ufs/ufs/ufs_ihash.c b/bsd/ufs/ufs/ufs_ihash.c new file mode 100644 index 000000000..e2a6fb112 --- /dev/null +++ b/bsd/ufs/ufs/ufs_ihash.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1991, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_ihash.c 8.7 (Berkeley) 5/17/95 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Structures associated with inode cacheing. + */ +LIST_HEAD(ihashhead, inode) *ihashtbl; +u_long ihash; /* size of hash table - 1 */ +#define INOHASH(device, inum) (&ihashtbl[((device) + (inum)) & ihash]) +struct slock ufs_ihash_slock; + +/* + * Initialize inode hash table. + */ +void +ufs_ihashinit() +{ + + ihashtbl = hashinit(desiredvnodes, M_UFSMNT, &ihash); + simple_lock_init(&ufs_ihash_slock); +} + +/* + * Use the device/inum pair to find the incore inode, and return a pointer + * to it. If it is in core, return it, even if it is locked. + */ +struct vnode * +ufs_ihashlookup(dev, inum) + dev_t dev; + ino_t inum; +{ + struct inode *ip; + + simple_lock(&ufs_ihash_slock); + for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) + if (inum == ip->i_number && dev == ip->i_dev) + break; + simple_unlock(&ufs_ihash_slock); + + if (ip) + return (ITOV(ip)); + return (NULLVP); +} + +/* + * Use the device/inum pair to find the incore inode, and return a pointer + * to it. If it is in core, but locked, wait for it. + */ +struct vnode * +ufs_ihashget(dev, inum) + dev_t dev; + ino_t inum; +{ + struct proc *p = current_proc(); /* XXX */ + struct inode *ip; + struct vnode *vp; + +loop: + simple_lock(&ufs_ihash_slock); + for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) { + if (inum == ip->i_number && dev == ip->i_dev) { + vp = ITOV(ip); + simple_lock(&vp->v_interlock); + simple_unlock(&ufs_ihash_slock); + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) + goto loop; + return (vp); + } + } + simple_unlock(&ufs_ihash_slock); + return (NULL); +} + +/* +* Insert the inode into the hash table, and return it locked. + */ +void +ufs_ihashins(ip) + struct inode *ip; +{ + struct proc *p = current_proc(); /* XXX */ + struct ihashhead *ipp; + + /* lock the inode, then put it on the appropriate hash list */ + lockmgr(&ip->i_lock, LK_EXCLUSIVE, (struct slock *)0, p); + + simple_lock(&ufs_ihash_slock); + ipp = INOHASH(ip->i_dev, ip->i_number); + LIST_INSERT_HEAD(ipp, ip, i_hash); + simple_unlock(&ufs_ihash_slock); +} + +/* + * Remove the inode from the hash table. + */ +void +ufs_ihashrem(ip) + struct inode *ip; +{ + struct inode *iq; + + simple_lock(&ufs_ihash_slock); + LIST_REMOVE(ip, i_hash); +#if DIAGNOSTIC + ip->i_hash.le_next = NULL; + ip->i_hash.le_prev = NULL; +#endif + simple_unlock(&ufs_ihash_slock); +} diff --git a/bsd/ufs/ufs/ufs_inode.c b/bsd/ufs/ufs/ufs_inode.c new file mode 100644 index 000000000..0a1a1814c --- /dev/null +++ b/bsd/ufs/ufs/ufs_inode.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +u_long nextgennumber; /* Next generation number to assign. */ +extern int prtactive; + +/* + * Last reference to an inode. If necessary, write or delete it. + */ +int +ufs_inactive(ap) + struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct inode *ip = VTOI(vp); + struct proc *p = ap->a_p; + struct timeval tv; + int mode, error = 0; + extern int prtactive; + + if (prtactive && vp->v_usecount != 0) + vprint("ffs_inactive: pushing active", vp); + + /* + * Ignore inodes related to stale file handles. + */ + if (ip->i_mode == 0) + goto out; + if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { +#if QUOTA + if (!getinoquota(ip)) + (void)chkiq(ip, -1, NOCRED, 0); +#endif + error = VOP_TRUNCATE(vp, (off_t)0, 0, NOCRED, p); + ip->i_rdev = 0; + mode = ip->i_mode; + ip->i_mode = 0; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + VOP_VFREE(vp, ip->i_number, mode); + } + if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { + tv = time; + VOP_UPDATE(vp, &tv, &tv, 0); + } +out: + VOP_UNLOCK(vp, 0, p); + /* + * If we are done with the inode, reclaim it + * so that it can be reused immediately. + */ + if (ip->i_mode == 0) + vrecycle(vp, (struct slock *)0, p); + return (error); +} + +/* + * Reclaim an inode so that it can be used for other purposes. + */ +int +ufs_reclaim(vp, p) + struct vnode *vp; + struct proc *p; +{ + register struct inode *ip; + int i; + extern int prtactive; + + if (prtactive && vp->v_usecount != 0) + vprint("ufs_reclaim: pushing active", vp); + /* + * Remove the inode from its hash chain. + */ + ip = VTOI(vp); + ufs_ihashrem(ip); + /* + * Purge old data structures associated with the inode. + */ + cache_purge(vp); + if (ip->i_devvp) { + vrele(ip->i_devvp); + ip->i_devvp = 0; + } +#if QUOTA + for (i = 0; i < MAXQUOTAS; i++) { + if (ip->i_dquot[i] != NODQUOT) { + dqrele(vp, ip->i_dquot[i]); + ip->i_dquot[i] = NODQUOT; + } + } +#endif + return (0); +} diff --git a/bsd/ufs/ufs/ufs_lockf.c b/bsd/ufs/ufs/ufs_lockf.c new file mode 100644 index 000000000..ec0a50957 --- /dev/null +++ b/bsd/ufs/ufs/ufs_lockf.c @@ -0,0 +1,707 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Scooter Morris at Genentech Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_lockf.c 8.4 (Berkeley) 10/26/94 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * This variable controls the maximum number of processes that will + * be checked in doing deadlock detection. + */ +int maxlockdepth = MAXDEPTH; + +#ifdef LOCKF_DEBUG +#include +#include +int lockf_debug = 0; +struct ctldebug debug4 = { "lockf_debug", &lockf_debug }; +#endif + +#define NOLOCKF (struct lockf *)0 +#define SELF 0x1 +#define OTHERS 0x2 + +/* + * Set a byte-range lock. + */ +int +lf_setlock(lock) + register struct lockf *lock; +{ + register struct lockf *block; + struct inode *ip = lock->lf_inode; + struct lockf **prev, *overlap, *ltmp; + static char lockstr[] = "lockf"; + int ovcase, priority, needtolink, error; + +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) + lf_print("lf_setlock", lock); +#endif /* LOCKF_DEBUG */ + + /* + * Set the priority + */ + priority = PLOCK; + if (lock->lf_type == F_WRLCK) + priority += 4; + priority |= PCATCH; + /* + * Scan lock list for this file looking for locks that would block us. + */ + while (block = lf_getblock(lock)) { + /* + * Free the structure and return if nonblocking. + */ + if ((lock->lf_flags & F_WAIT) == 0) { + FREE(lock, M_LOCKF); + return (EAGAIN); + } + /* + * We are blocked. Since flock style locks cover + * the whole file, there is no chance for deadlock. + * For byte-range locks we must check for deadlock. + * + * Deadlock detection is done by looking through the + * wait channels to see if there are any cycles that + * involve us. MAXDEPTH is set just to make sure we + * do not go off into neverland. + */ + if ((lock->lf_flags & F_POSIX) && + (block->lf_flags & F_POSIX)) { + register struct proc *wproc; + register struct lockf *waitblock; + int i = 0; + + /* The block is waiting on something */ + wproc = (struct proc *)block->lf_id; + while (wproc->p_wchan && + (wproc->p_wmesg == lockstr) && + (i++ < maxlockdepth)) { + waitblock = (struct lockf *)wproc->p_wchan; + /* Get the owner of the blocking lock */ + waitblock = waitblock->lf_next; + if ((waitblock->lf_flags & F_POSIX) == 0) + break; + wproc = (struct proc *)waitblock->lf_id; + if (wproc == (struct proc *)lock->lf_id) { + _FREE(lock, M_LOCKF); + return (EDEADLK); + } + } + } + /* + * For flock type locks, we must first remove + * any shared locks that we hold before we sleep + * waiting for an exclusive lock. + */ + if ((lock->lf_flags & F_FLOCK) && + lock->lf_type == F_WRLCK) { + lock->lf_type = F_UNLCK; + (void) lf_clearlock(lock); + lock->lf_type = F_WRLCK; + } + /* + * Add our lock to the blocked list and sleep until we're free. + * Remember who blocked us (for deadlock detection). + */ + lock->lf_next = block; + TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) { + lf_print("lf_setlock: blocking on", block); + lf_printlist("lf_setlock", block); + } +#endif /* LOCKF_DEBUG */ + if (error = tsleep((caddr_t)lock, priority, lockstr, 0)) { + /* + * We may have been awakened by a signal (in + * which case we must remove ourselves from the + * blocked list) and/or by another process + * releasing a lock (in which case we have already + * been removed from the blocked list and our + * lf_next field set to NOLOCKF). + */ + if (lock->lf_next) + TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, + lf_block); + _FREE(lock, M_LOCKF); + return (error); + } + } + /* + * No blocks!! Add the lock. Note that we will + * downgrade or upgrade any overlapping locks this + * process already owns. + * + * Skip over locks owned by other processes. + * Handle any locks that overlap and are owned by ourselves. + */ + prev = &ip->i_lockf; + block = ip->i_lockf; + needtolink = 1; + for (;;) { + if (ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap)) + block = overlap->lf_next; + /* + * Six cases: + * 0) no overlap + * 1) overlap == lock + * 2) overlap contains lock + * 3) lock contains overlap + * 4) overlap starts before lock + * 5) overlap ends after lock + */ + switch (ovcase) { + case 0: /* no overlap */ + if (needtolink) { + *prev = lock; + lock->lf_next = overlap; + } + break; + + case 1: /* overlap == lock */ + /* + * If downgrading lock, others may be + * able to acquire it. + */ + if (lock->lf_type == F_RDLCK && + overlap->lf_type == F_WRLCK) + lf_wakelock(overlap); + overlap->lf_type = lock->lf_type; + FREE(lock, M_LOCKF); + lock = overlap; /* for debug output below */ + break; + + case 2: /* overlap contains lock */ + /* + * Check for common starting point and different types. + */ + if (overlap->lf_type == lock->lf_type) { + _FREE(lock, M_LOCKF); + lock = overlap; /* for debug output below */ + break; + } + if (overlap->lf_start == lock->lf_start) { + *prev = lock; + lock->lf_next = overlap; + overlap->lf_start = lock->lf_end + 1; + } else + lf_split(overlap, lock); + lf_wakelock(overlap); + break; + + case 3: /* lock contains overlap */ + /* + * If downgrading lock, others may be able to + * acquire it, otherwise take the list. + */ + if (lock->lf_type == F_RDLCK && + overlap->lf_type == F_WRLCK) { + lf_wakelock(overlap); + } else { + while (ltmp = overlap->lf_blkhd.tqh_first) { + TAILQ_REMOVE(&overlap->lf_blkhd, ltmp, + lf_block); + TAILQ_INSERT_TAIL(&lock->lf_blkhd, + ltmp, lf_block); + } + } + /* + * Add the new lock if necessary and delete the overlap. + */ + if (needtolink) { + *prev = lock; + lock->lf_next = overlap->lf_next; + prev = &lock->lf_next; + needtolink = 0; + } else + *prev = overlap->lf_next; + _FREE(overlap, M_LOCKF); + continue; + + case 4: /* overlap starts before lock */ + /* + * Add lock after overlap on the list. + */ + lock->lf_next = overlap->lf_next; + overlap->lf_next = lock; + overlap->lf_end = lock->lf_start - 1; + prev = &lock->lf_next; + lf_wakelock(overlap); + needtolink = 0; + continue; + + case 5: /* overlap ends after lock */ + /* + * Add the new lock before overlap. + */ + if (needtolink) { + *prev = lock; + lock->lf_next = overlap; + } + overlap->lf_start = lock->lf_end + 1; + lf_wakelock(overlap); + break; + } + break; + } +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) { + lf_print("lf_setlock: got the lock", lock); + lf_printlist("lf_setlock", lock); + } +#endif /* LOCKF_DEBUG */ + return (0); +} + +/* + * Remove a byte-range lock on an inode. + * + * Generally, find the lock (or an overlap to that lock) + * and remove it (or shrink it), then wakeup anyone we can. + */ +int +lf_clearlock(unlock) + register struct lockf *unlock; +{ + struct inode *ip = unlock->lf_inode; + register struct lockf *lf = ip->i_lockf; + struct lockf *overlap, **prev; + int ovcase; + + if (lf == NOLOCKF) + return (0); +#ifdef LOCKF_DEBUG + if (unlock->lf_type != F_UNLCK) + panic("lf_clearlock: bad type"); + if (lockf_debug & 1) + lf_print("lf_clearlock", unlock); +#endif /* LOCKF_DEBUG */ + prev = &ip->i_lockf; + while (ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) { + /* + * Wakeup the list of locks to be retried. + */ + lf_wakelock(overlap); + + switch (ovcase) { + + case 1: /* overlap == lock */ + *prev = overlap->lf_next; + FREE(overlap, M_LOCKF); + break; + + case 2: /* overlap contains lock: split it */ + if (overlap->lf_start == unlock->lf_start) { + overlap->lf_start = unlock->lf_end + 1; + break; + } + lf_split(overlap, unlock); + overlap->lf_next = unlock->lf_next; + break; + + case 3: /* lock contains overlap */ + *prev = overlap->lf_next; + lf = overlap->lf_next; + _FREE(overlap, M_LOCKF); + continue; + + case 4: /* overlap starts before lock */ + overlap->lf_end = unlock->lf_start - 1; + prev = &overlap->lf_next; + lf = overlap->lf_next; + continue; + + case 5: /* overlap ends after lock */ + overlap->lf_start = unlock->lf_end + 1; + break; + } + break; + } +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) + lf_printlist("lf_clearlock", unlock); +#endif /* LOCKF_DEBUG */ + return (0); +} + +/* + * Check whether there is a blocking lock, + * and if so return its process identifier. + */ +int +lf_getlock(lock, fl) + register struct lockf *lock; + register struct flock *fl; +{ + register struct lockf *block; + +#ifdef LOCKF_DEBUG + if (lockf_debug & 1) + lf_print("lf_getlock", lock); +#endif /* LOCKF_DEBUG */ + + if (block = lf_getblock(lock)) { + fl->l_type = block->lf_type; + fl->l_whence = SEEK_SET; + fl->l_start = block->lf_start; + if (block->lf_end == -1) + fl->l_len = 0; + else + fl->l_len = block->lf_end - block->lf_start + 1; + if (block->lf_flags & F_POSIX) + fl->l_pid = ((struct proc *)(block->lf_id))->p_pid; + else + fl->l_pid = -1; + } else { + fl->l_type = F_UNLCK; + } + return (0); +} + +/* + * Walk the list of locks for an inode and + * return the first blocking lock. + */ +struct lockf * +lf_getblock(lock) + register struct lockf *lock; +{ + struct lockf **prev, *overlap, *lf = lock->lf_inode->i_lockf; + int ovcase; + + prev = &lock->lf_inode->i_lockf; + while (ovcase = lf_findoverlap(lf, lock, OTHERS, &prev, &overlap)) { + /* + * We've found an overlap, see if it blocks us + */ + if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) + return (overlap); + /* + * Nope, point to the next one on the list and + * see if it blocks us + */ + lf = overlap->lf_next; + } + return (NOLOCKF); +} + +/* + * Walk the list of locks for an inode to + * find an overlapping lock (if any). + * + * NOTE: this returns only the FIRST overlapping lock. There + * may be more than one. + */ +int +lf_findoverlap(lf, lock, type, prev, overlap) + register struct lockf *lf; + struct lockf *lock; + int type; + struct lockf ***prev; + struct lockf **overlap; +{ + off_t start, end; + + *overlap = lf; + if (lf == NOLOCKF) + return (0); +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + lf_print("lf_findoverlap: looking for overlap in", lock); +#endif /* LOCKF_DEBUG */ + start = lock->lf_start; + end = lock->lf_end; + while (lf != NOLOCKF) { + if (((type & SELF) && lf->lf_id != lock->lf_id) || + ((type & OTHERS) && lf->lf_id == lock->lf_id)) { + *prev = &lf->lf_next; + *overlap = lf = lf->lf_next; + continue; + } +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + lf_print("\tchecking", lf); +#endif /* LOCKF_DEBUG */ + /* + * OK, check for overlap + * + * Six cases: + * 0) no overlap + * 1) overlap == lock + * 2) overlap contains lock + * 3) lock contains overlap + * 4) overlap starts before lock + * 5) overlap ends after lock + */ + if ((lf->lf_end != -1 && start > lf->lf_end) || + (end != -1 && lf->lf_start > end)) { + /* Case 0 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("no overlap\n"); +#endif /* LOCKF_DEBUG */ + if ((type & SELF) && end != -1 && lf->lf_start > end) + return (0); + *prev = &lf->lf_next; + *overlap = lf = lf->lf_next; + continue; + } + if ((lf->lf_start == start) && (lf->lf_end == end)) { + /* Case 1 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap == lock\n"); +#endif /* LOCKF_DEBUG */ + return (1); + } + if ((lf->lf_start <= start) && + (end != -1) && + ((lf->lf_end >= end) || (lf->lf_end == -1))) { + /* Case 2 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap contains lock\n"); +#endif /* LOCKF_DEBUG */ + return (2); + } + if (start <= lf->lf_start && + (end == -1 || + (lf->lf_end != -1 && end >= lf->lf_end))) { + /* Case 3 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("lock contains overlap\n"); +#endif /* LOCKF_DEBUG */ + return (3); + } + if ((lf->lf_start < start) && + ((lf->lf_end >= start) || (lf->lf_end == -1))) { + /* Case 4 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap starts before lock\n"); +#endif /* LOCKF_DEBUG */ + return (4); + } + if ((lf->lf_start > start) && + (end != -1) && + ((lf->lf_end > end) || (lf->lf_end == -1))) { + /* Case 5 */ +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + printf("overlap ends after lock\n"); +#endif /* LOCKF_DEBUG */ + return (5); + } + panic("lf_findoverlap: default"); + } + return (0); +} + +/* + * Split a lock and a contained region into + * two or three locks as necessary. + */ +void +lf_split(lock1, lock2) + register struct lockf *lock1; + register struct lockf *lock2; +{ + register struct lockf *splitlock; + +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) { + lf_print("lf_split", lock1); + lf_print("splitting from", lock2); + } +#endif /* LOCKF_DEBUG */ + /* + * Check to see if spliting into only two pieces. + */ + if (lock1->lf_start == lock2->lf_start) { + lock1->lf_start = lock2->lf_end + 1; + lock2->lf_next = lock1; + return; + } + if (lock1->lf_end == lock2->lf_end) { + lock1->lf_end = lock2->lf_start - 1; + lock2->lf_next = lock1->lf_next; + lock1->lf_next = lock2; + return; + } + /* + * Make a new lock consisting of the last part of + * the encompassing lock + */ + MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); + bcopy((caddr_t)lock1, (caddr_t)splitlock, sizeof *splitlock); + splitlock->lf_start = lock2->lf_end + 1; + TAILQ_INIT(&splitlock->lf_blkhd); + lock1->lf_end = lock2->lf_start - 1; + /* + * OK, now link it in + */ + splitlock->lf_next = lock1->lf_next; + lock2->lf_next = splitlock; + lock1->lf_next = lock2; +} + +/* + * Wakeup a blocklist + */ +void +lf_wakelock(listhead) + struct lockf *listhead; +{ + register struct lockf *wakelock; + + while (wakelock = listhead->lf_blkhd.tqh_first) { + TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); + wakelock->lf_next = NOLOCKF; +#ifdef LOCKF_DEBUG + if (lockf_debug & 2) + lf_print("lf_wakelock: awakening", wakelock); +#endif /* LOCKF_DEBUG */ + wakeup((caddr_t)wakelock); + } +} + +#ifdef LOCKF_DEBUG +/* + * Print out a lock. + */ +lf_print(tag, lock) + char *tag; + register struct lockf *lock; +{ + + printf("%s: lock 0x%lx for ", tag, lock); + if (lock->lf_flags & F_POSIX) + printf("proc %d", ((struct proc *)(lock->lf_id))->p_pid); + else + printf("id 0x%x", lock->lf_id); + printf(" in ino %d on dev <%d, %d>, %s, start %d, end %d", + lock->lf_inode->i_number, + major(lock->lf_inode->i_dev), + minor(lock->lf_inode->i_dev), + lock->lf_type == F_RDLCK ? "shared" : + lock->lf_type == F_WRLCK ? "exclusive" : + lock->lf_type == F_UNLCK ? "unlock" : + "unknown", lock->lf_start, lock->lf_end); + if (lock->lf_blkhd.tqh_first) + printf(" block 0x%x\n", lock->lf_blkhd.tqh_first); + else + printf("\n"); +} + +lf_printlist(tag, lock) + char *tag; + struct lockf *lock; +{ + register struct lockf *lf, *blk; + + printf("%s: Lock list for ino %d on dev <%d, %d>:\n", + tag, lock->lf_inode->i_number, + major(lock->lf_inode->i_dev), + minor(lock->lf_inode->i_dev)); + for (lf = lock->lf_inode->i_lockf; lf; lf = lf->lf_next) { + printf("\tlock 0x%lx for ", lf); + if (lf->lf_flags & F_POSIX) + printf("proc %d", ((struct proc *)(lf->lf_id))->p_pid); + else + printf("id 0x%x", lf->lf_id); + printf(", %s, start %d, end %d", + lf->lf_type == F_RDLCK ? "shared" : + lf->lf_type == F_WRLCK ? "exclusive" : + lf->lf_type == F_UNLCK ? "unlock" : + "unknown", lf->lf_start, lf->lf_end); + for (blk = lf->lf_blkhd.tqh_first; blk; + blk = blk->lf_block.tqe_next) { + printf("\n\t\tlock request 0x%lx for ", blk); + if (blk->lf_flags & F_POSIX) + printf("proc %d", + ((struct proc *)(blk->lf_id))->p_pid); + else + printf("id 0x%x", blk->lf_id); + printf(", %s, start %d, end %d", + blk->lf_type == F_RDLCK ? "shared" : + blk->lf_type == F_WRLCK ? "exclusive" : + blk->lf_type == F_UNLCK ? "unlock" : + "unknown", blk->lf_start, blk->lf_end); + if (blk->lf_blkhd.tqh_first) + panic("lf_printlist: bad list"); + } + printf("\n"); + } +} +#endif /* LOCKF_DEBUG */ diff --git a/bsd/ufs/ufs/ufs_lookup.c b/bsd/ufs/ufs/ufs_lookup.c new file mode 100644 index 000000000..a2583678b --- /dev/null +++ b/bsd/ufs/ufs/ufs_lookup.c @@ -0,0 +1,1115 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_lookup.c 8.15 (Berkeley) 6/16/95 + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +extern struct nchstats nchstats; +#if DIAGNOSTIC +int dirchk = 1; +#else +int dirchk = 0; +#endif + +#define FSFMT(vp) ((vp)->v_mount->mnt_maxsymlinklen <= 0) + +/* + * Convert a component of a pathname into a pointer to a locked inode. + * This is a very central and rather complicated routine. + * If the file system is not maintained in a strict tree hierarchy, + * this can result in a deadlock situation (see comments in code below). + * + * The cnp->cn_nameiop argument is LOOKUP, CREATE, RENAME, or DELETE depending + * on whether the name is to be looked up, created, renamed, or deleted. + * When CREATE, RENAME, or DELETE is specified, information usable in + * creating, renaming, or deleting a directory entry may be calculated. + * If flag has LOCKPARENT or'ed into it and the target of the pathname + * exists, lookup returns both the target and its parent directory locked. + * When creating or renaming and LOCKPARENT is specified, the target may + * not be ".". When deleting and LOCKPARENT is specified, the target may + * be "."., but the caller must check to ensure it does an vrele and vput + * instead of two vputs. + * + * Overall outline of ufs_lookup: + * + * check accessibility of directory + * look for name in cache, if found, then if at end of path + * and deleting or creating, drop it, else return name + * search for name in directory, to found or notfound + * notfound: + * if creating, return locked directory, leaving info on available slots + * else return error + * found: + * if at end of path and deleting, return information to allow delete + * if at end of path and rewriting (RENAME and LOCKPARENT), lock target + * inode and return info to allow rewrite + * if not at end, add name to cache; if at end and neither creating + * nor deleting, add name to cache + */ +int +ufs_lookup(ap) + struct vop_lookup_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + } */ *ap; +{ + register struct vnode *vdp; /* vnode for directory being searched */ + register struct inode *dp; /* inode for directory being searched */ + struct buf *bp; /* a buffer of directory entries */ + register struct direct *ep; /* the current directory entry */ + int entryoffsetinblock; /* offset of ep in bp's buffer */ + enum {NONE, COMPACT, FOUND} slotstatus; + doff_t slotoffset; /* offset of area with free space */ + int slotsize; /* size of area at slotoffset */ + int slotfreespace; /* amount of space free in slot */ + int slotneeded; /* size of the entry we're seeking */ + int numdirpasses; /* strategy for directory search */ + doff_t endsearch; /* offset to end directory search */ + doff_t prevoff; /* prev entry dp->i_offset */ + struct vnode *pdp; /* saved dp during symlink work */ + struct vnode *tdp; /* returned by VFS_VGET */ + doff_t enduseful; /* pointer past last used dir slot */ + u_long bmask; /* block offset mask */ + int lockparent; /* 1 => lockparent flag is set */ + int wantparent; /* 1 => wantparent or lockparent flag */ + int namlen, error; + struct vnode **vpp = ap->a_vpp; + struct componentname *cnp = ap->a_cnp; + struct ucred *cred = cnp->cn_cred; + int flags = cnp->cn_flags; + int nameiop = cnp->cn_nameiop; + struct proc *p = cnp->cn_proc; +#if REV_ENDIAN_FS + int rev_endian=0; +#endif /* REV_ENDIAN_FS */ + + + bp = NULL; + slotoffset = -1; + *vpp = NULL; + vdp = ap->a_dvp; + dp = VTOI(vdp); + lockparent = flags & LOCKPARENT; + wantparent = flags & (LOCKPARENT|WANTPARENT); +#if REV_ENDIAN_FS + rev_endian=(vdp->v_mount->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + /* + * Check accessiblity of directory. + */ + if ((dp->i_mode & IFMT) != IFDIR) + return (ENOTDIR); + if (error = VOP_ACCESS(vdp, VEXEC, cred, cnp->cn_proc)) + return (error); + if ((flags & ISLASTCN) && (vdp->v_mount->mnt_flag & MNT_RDONLY) && + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) + return (EROFS); + + /* + * We now have a segment name to search for, and a directory to search. + * + * Before tediously performing a linear scan of the directory, + * check the name cache to see if the directory/name pair + * we are looking for is known already. + */ + if (error = cache_lookup(vdp, vpp, cnp)) { + int vpid; /* capability number of vnode */ + + if (error == ENOENT) + return (error); + /* + * Get the next vnode in the path. + * See comment below starting `Step through' for + * an explaination of the locking protocol. + */ + pdp = vdp; + dp = VTOI(*vpp); + vdp = *vpp; + vpid = vdp->v_id; + if (pdp == vdp) { /* lookup on "." */ + VREF(vdp); + error = 0; + } else if (flags & ISDOTDOT) { + VOP_UNLOCK(pdp, 0, p); + error = vget(vdp, LK_EXCLUSIVE, p); + if (!error && lockparent && (flags & ISLASTCN)) + error = vn_lock(pdp, LK_EXCLUSIVE, p); + } else { + error = vget(vdp, LK_EXCLUSIVE, p); + if (!lockparent || error || !(flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + } + /* + * Check that the capability number did not change + * while we were waiting for the lock. + */ + if (!error) { + if (vpid == vdp->v_id) + return (0); + vput(vdp); + if (lockparent && pdp != vdp && (flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + } + if (error = vn_lock(pdp, LK_EXCLUSIVE, p)) + return (error); + vdp = pdp; + dp = VTOI(pdp); + *vpp = NULL; + } + + /* + * Suppress search for slots unless creating + * file and at end of pathname, in which case + * we watch for a place to put the new file in + * case it doesn't already exist. + */ + slotstatus = FOUND; + slotfreespace = slotsize = slotneeded = 0; + if ((nameiop == CREATE || nameiop == RENAME) && + (flags & ISLASTCN)) { + slotstatus = NONE; + slotneeded = (sizeof(struct direct) - MAXNAMLEN + + cnp->cn_namelen + 3) &~ 3; + } + + /* + * If there is cached information on a previous search of + * this directory, pick up where we last left off. + * We cache only lookups as these are the most common + * and have the greatest payoff. Caching CREATE has little + * benefit as it usually must search the entire directory + * to determine that the entry does not exist. Caching the + * location of the last DELETE or RENAME has not reduced + * profiling time and hence has been removed in the interest + * of simplicity. + */ + bmask = VFSTOUFS(vdp->v_mount)->um_mountp->mnt_stat.f_iosize - 1; + if (nameiop != LOOKUP || dp->i_diroff == 0 || + dp->i_diroff > dp->i_size) { + entryoffsetinblock = 0; + dp->i_offset = 0; + numdirpasses = 1; + } else { + dp->i_offset = dp->i_diroff; + if ((entryoffsetinblock = dp->i_offset & bmask) && + (error = VOP_BLKATOFF(vdp, (off_t)dp->i_offset, NULL, &bp))) + return (error); + numdirpasses = 2; + nchstats.ncs_2passes++; + } + prevoff = dp->i_offset; + endsearch = roundup(dp->i_size, DIRBLKSIZ); + enduseful = 0; + +searchloop: + while (dp->i_offset < endsearch) { + /* + * If necessary, get the next directory block. + */ + if ((dp->i_offset & bmask) == 0) { + if (bp != NULL) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + } + if (error = + VOP_BLKATOFF(vdp, (off_t)dp->i_offset, NULL, &bp)) + return (error); + entryoffsetinblock = 0; + } + /* + * If still looking for a slot, and at a DIRBLKSIZE + * boundary, have to start looking for free space again. + */ + if (slotstatus == NONE && + (entryoffsetinblock & (DIRBLKSIZ - 1)) == 0) { + slotoffset = -1; + slotfreespace = 0; + } + /* + * Get pointer to next entry. + * Full validation checks are slow, so we only check + * enough to insure forward progress through the + * directory. Complete checks can be run by patching + * "dirchk" to be true. + */ + ep = (struct direct *)((char *)bp->b_data + entryoffsetinblock); + if (ep->d_reclen == 0 || + dirchk && ufs_dirbadentry(vdp, ep, entryoffsetinblock)) { + int i; + + ufs_dirbad(dp, dp->i_offset, "mangled entry"); + i = DIRBLKSIZ - (entryoffsetinblock & (DIRBLKSIZ - 1)); + dp->i_offset += i; + entryoffsetinblock += i; + continue; + } + + /* + * If an appropriate sized slot has not yet been found, + * check to see if one is available. Also accumulate space + * in the current block so that we can determine if + * compaction is viable. + */ + if (slotstatus != FOUND) { + int size = ep->d_reclen; + + if (ep->d_ino != 0) + size -= DIRSIZ(FSFMT(vdp), ep); + if (size > 0) { + if (size >= slotneeded) { + slotstatus = FOUND; + slotoffset = dp->i_offset; + slotsize = ep->d_reclen; + } else if (slotstatus == NONE) { + slotfreespace += size; + if (slotoffset == -1) + slotoffset = dp->i_offset; + if (slotfreespace >= slotneeded) { + slotstatus = COMPACT; + slotsize = dp->i_offset + + ep->d_reclen - slotoffset; + } + } + } + } + + /* + * Check for a name match. + */ + if (ep->d_ino) { +# if (BYTE_ORDER == LITTLE_ENDIAN) + if (vdp->v_mount->mnt_maxsymlinklen > 0) + namlen = ep->d_namlen; + else + namlen = ep->d_type; +# else + namlen = ep->d_namlen; +# endif + if (namlen == cnp->cn_namelen && + !bcmp(cnp->cn_nameptr, ep->d_name, + (unsigned)namlen)) { + /* + * Save directory entry's inode number and + * reclen in ndp->ni_ufs area, and release + * directory buffer. + */ + if (vdp->v_mount->mnt_maxsymlinklen > 0 && + ep->d_type == DT_WHT) { + slotstatus = FOUND; + slotoffset = dp->i_offset; + slotsize = ep->d_reclen; + dp->i_reclen = slotsize; + enduseful = dp->i_size; + ap->a_cnp->cn_flags |= ISWHITEOUT; + numdirpasses--; + goto notfound; + } + dp->i_ino = ep->d_ino; + dp->i_reclen = ep->d_reclen; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + goto found; + } + } + prevoff = dp->i_offset; + dp->i_offset += ep->d_reclen; + entryoffsetinblock += ep->d_reclen; + if (ep->d_ino) + enduseful = dp->i_offset; + } +notfound: + /* + * If we started in the middle of the directory and failed + * to find our target, we must check the beginning as well. + */ + if (numdirpasses == 2) { + numdirpasses--; + dp->i_offset = 0; + endsearch = dp->i_diroff; + goto searchloop; + } + if (bp != NULL) { +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + brelse(bp); + } + /* + * If creating, and at end of pathname and current + * directory has not been removed, then can consider + * allowing file to be created. + */ + if ((nameiop == CREATE || nameiop == RENAME || + (nameiop == DELETE && + (ap->a_cnp->cn_flags & DOWHITEOUT) && + (ap->a_cnp->cn_flags & ISWHITEOUT))) && + (flags & ISLASTCN) && dp->i_nlink != 0) { + /* + * Access for write is interpreted as allowing + * creation of files in the directory. + */ + if (error = VOP_ACCESS(vdp, VWRITE, cred, cnp->cn_proc)) + return (error); + /* + * Return an indication of where the new directory + * entry should be put. If we didn't find a slot, + * then set dp->i_count to 0 indicating + * that the new slot belongs at the end of the + * directory. If we found a slot, then the new entry + * can be put in the range from dp->i_offset to + * dp->i_offset + dp->i_count. + */ + if (slotstatus == NONE) { + dp->i_offset = roundup(dp->i_size, DIRBLKSIZ); + dp->i_count = 0; + enduseful = dp->i_offset; + } else if (nameiop == DELETE) { + dp->i_offset = slotoffset; + if ((dp->i_offset & (DIRBLKSIZ - 1)) == 0) + dp->i_count = 0; + else + dp->i_count = dp->i_offset - prevoff; + } else { + dp->i_offset = slotoffset; + dp->i_count = slotsize; + if (enduseful < slotoffset + slotsize) + enduseful = slotoffset + slotsize; + } + dp->i_endoff = roundup(enduseful, DIRBLKSIZ); + dp->i_flag |= IN_CHANGE | IN_UPDATE; + /* + * We return with the directory locked, so that + * the parameters we set up above will still be + * valid if we actually decide to do a direnter(). + * We return ni_vp == NULL to indicate that the entry + * does not currently exist; we leave a pointer to + * the (locked) directory inode in ndp->ni_dvp. + * The pathname buffer is saved so that the name + * can be obtained later. + * + * NB - if the directory is unlocked, then this + * information cannot be used. + */ + cnp->cn_flags |= SAVENAME; + if (!lockparent) + VOP_UNLOCK(vdp, 0, p); + return (EJUSTRETURN); + } + /* + * Insert name into cache (as non-existent) if appropriate. + */ + if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) + cache_enter(vdp, *vpp, cnp); + return (ENOENT); + +found: + if (numdirpasses == 2) + nchstats.ncs_pass2++; + /* + * Check that directory length properly reflects presence + * of this entry. + */ + if (entryoffsetinblock + DIRSIZ(FSFMT(vdp), ep) > dp->i_size) { + ufs_dirbad(dp, dp->i_offset, "i_size too small"); + dp->i_size = entryoffsetinblock + DIRSIZ(FSFMT(vdp), ep); + dp->i_flag |= IN_CHANGE | IN_UPDATE; + } + + /* + * Found component in pathname. + * If the final component of path name, save information + * in the cache as to where the entry was found. + */ + if ((flags & ISLASTCN) && nameiop == LOOKUP) + dp->i_diroff = dp->i_offset &~ (DIRBLKSIZ - 1); + + /* + * If deleting, and at end of pathname, return + * parameters which can be used to remove file. + * If the wantparent flag isn't set, we return only + * the directory (in ndp->ni_dvp), otherwise we go + * on and lock the inode, being careful with ".". + */ + if (nameiop == DELETE && (flags & ISLASTCN)) { + /* + * Write access to directory required to delete files. + */ + if (error = VOP_ACCESS(vdp, VWRITE, cred, cnp->cn_proc)) + return (error); + /* + * Return pointer to current entry in dp->i_offset, + * and distance past previous entry (if there + * is a previous entry in this block) in dp->i_count. + * Save directory inode pointer in ndp->ni_dvp for dirremove(). + */ + if ((dp->i_offset & (DIRBLKSIZ - 1)) == 0) + dp->i_count = 0; + else + dp->i_count = dp->i_offset - prevoff; + if (dp->i_number == dp->i_ino) { + VREF(vdp); + *vpp = vdp; + return (0); + } + if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) + return (error); + /* + * If directory is "sticky", then user must own + * the directory, or the file in it, else she + * may not delete it (unless she's root). This + * implements append-only directories. + */ + if ((dp->i_mode & ISVTX) && + cred->cr_uid != 0 && + cred->cr_uid != dp->i_uid && + tdp->v_type != VLNK && + VTOI(tdp)->i_uid != cred->cr_uid) { + vput(tdp); + return (EPERM); + } + *vpp = tdp; + if (!lockparent) + VOP_UNLOCK(vdp, 0, p); + return (0); + } + + /* + * If rewriting (RENAME), return the inode and the + * information required to rewrite the present directory + * Must get inode of directory entry to verify it's a + * regular file, or empty directory. + */ + if (nameiop == RENAME && wantparent && (flags & ISLASTCN)) { + if (error = VOP_ACCESS(vdp, VWRITE, cred, cnp->cn_proc)) + return (error); + /* + * Careful about locking second inode. + * This can only occur if the target is ".". + */ + if (dp->i_number == dp->i_ino) + return (EISDIR); + if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) + return (error); + *vpp = tdp; + cnp->cn_flags |= SAVENAME; + if (!lockparent) + VOP_UNLOCK(vdp, 0, p); + return (0); + } + + /* + * Step through the translation in the name. We do not `vput' the + * directory because we may need it again if a symbolic link + * is relative to the current directory. Instead we save it + * unlocked as "pdp". We must get the target inode before unlocking + * the directory to insure that the inode will not be removed + * before we get it. We prevent deadlock by always fetching + * inodes from the root, moving down the directory tree. Thus + * when following backward pointers ".." we must unlock the + * parent directory before getting the requested directory. + * There is a potential race condition here if both the current + * and parent directories are removed before the VFS_VGET for the + * inode associated with ".." returns. We hope that this occurs + * infrequently since we cannot avoid this race condition without + * implementing a sophisticated deadlock detection algorithm. + * Note also that this simple deadlock detection scheme will not + * work if the file system has any hard links other than ".." + * that point backwards in the directory structure. + */ + pdp = vdp; + if (flags & ISDOTDOT) { + VOP_UNLOCK(pdp, 0, p); /* race to get the inode */ + if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) { + vn_lock(pdp, LK_EXCLUSIVE | LK_RETRY, p); + return (error); + } + if (lockparent && (flags & ISLASTCN) && + (error = vn_lock(pdp, LK_EXCLUSIVE, p))) { + vput(tdp); + return (error); + } + *vpp = tdp; + } else if (dp->i_number == dp->i_ino) { + VREF(vdp); /* we want ourself, ie "." */ + *vpp = vdp; + } else { + if (error = VFS_VGET(vdp->v_mount, dp->i_ino, &tdp)) + return (error); + if (!lockparent || !(flags & ISLASTCN)) + VOP_UNLOCK(pdp, 0, p); + *vpp = tdp; + } + + /* + * Insert name into cache if appropriate. + */ + if (cnp->cn_flags & MAKEENTRY) + cache_enter(vdp, *vpp, cnp); + return (0); +} + +void +ufs_dirbad(ip, offset, how) + struct inode *ip; + doff_t offset; + char *how; +{ + struct mount *mp; + + mp = ITOV(ip)->v_mount; + (void)printf("%s: bad dir ino %d at offset %d: %s\n", + mp->mnt_stat.f_mntonname, ip->i_number, offset, how); + if ((mp->mnt_stat.f_flags & MNT_RDONLY) == 0) + panic("bad dir"); +} + +/* + * Do consistency checking on a directory entry: + * record length must be multiple of 4 + * entry must fit in rest of its DIRBLKSIZ block + * record must be large enough to contain entry + * name is not longer than MAXNAMLEN + * name must be as long as advertised, and null terminated + */ +int +ufs_dirbadentry(dp, ep, entryoffsetinblock) + struct vnode *dp; + register struct direct *ep; + int entryoffsetinblock; +{ + register int i; + int namlen; + +# if (BYTE_ORDER == LITTLE_ENDIAN) + if (dp->v_mount->mnt_maxsymlinklen > 0) + namlen = ep->d_namlen; + else + namlen = ep->d_type; +# else + namlen = ep->d_namlen; +# endif + if ((ep->d_reclen & 0x3) != 0 || + ep->d_reclen > DIRBLKSIZ - (entryoffsetinblock & (DIRBLKSIZ - 1)) || + ep->d_reclen < DIRSIZ(FSFMT(dp), ep) || namlen > MAXNAMLEN) { + /*return (1); */ + printf("First bad\n"); + goto bad; + } + if (ep->d_ino == 0) + return (0); + for (i = 0; i < namlen; i++) + if (ep->d_name[i] == '\0') { + /*return (1); */ + printf("Second bad\n"); + goto bad; + } + if (ep->d_name[i]) + goto bad; + return (0); +bad: + return (1); +} + +/* + * Write a directory entry after a call to namei, using the parameters + * that it left in nameidata. The argument ip is the inode which the new + * directory entry will refer to. Dvp is a pointer to the directory to + * be written, which was left locked by namei. Remaining parameters + * (dp->i_offset, dp->i_count) indicate how the space for the new + * entry is to be obtained. + */ +int +ufs_direnter(ip, dvp, cnp) + struct inode *ip; + struct vnode *dvp; + register struct componentname *cnp; +{ + register struct inode *dp; + struct direct newdir; + +#if DIAGNOSTIC + if ((cnp->cn_flags & SAVENAME) == 0) + panic("direnter: missing name"); +#endif + dp = VTOI(dvp); + newdir.d_ino = ip->i_number; + newdir.d_namlen = cnp->cn_namelen; + bcopy(cnp->cn_nameptr, newdir.d_name, (unsigned)cnp->cn_namelen + 1); + if (dvp->v_mount->mnt_maxsymlinklen > 0) + newdir.d_type = IFTODT(ip->i_mode); + else { + newdir.d_type = 0; +# if (BYTE_ORDER == LITTLE_ENDIAN) + { u_char tmp = newdir.d_namlen; + newdir.d_namlen = newdir.d_type; + newdir.d_type = tmp; } +# endif + } + return (ufs_direnter2(dvp, &newdir, cnp->cn_cred, cnp->cn_proc)); +} + +/* + * Common entry point for directory entry removal used by ufs_direnter + * and ufs_whiteout + */ +ufs_direnter2(dvp, dirp, cr, p) + struct vnode *dvp; + struct direct *dirp; + struct ucred *cr; + struct proc *p; +{ + int newentrysize; + struct inode *dp; + struct buf *bp; + struct iovec aiov; + struct uio auio; + u_int dsize; + struct direct *ep, *nep; + int error, loc, spacefree; + char *dirbuf; +#if REV_ENDIAN_FS + struct mount *mp=dvp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + dp = VTOI(dvp); + newentrysize = DIRSIZ(FSFMT(dvp), dirp); + + if (dp->i_count == 0) { + /* + * If dp->i_count is 0, then namei could find no + * space in the directory. Here, dp->i_offset will + * be on a directory block boundary and we will write the + * new entry into a fresh block. + */ + if (dp->i_offset & (DIRBLKSIZ - 1)) + panic("ufs_direnter2: newblk"); + auio.uio_offset = dp->i_offset; + dirp->d_reclen = DIRBLKSIZ; + auio.uio_resid = newentrysize; + aiov.iov_len = newentrysize; + aiov.iov_base = (caddr_t)dirp; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_rw = UIO_WRITE; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_procp = (struct proc *)0; + error = VOP_WRITE(dvp, &auio, IO_SYNC, cr); + if (DIRBLKSIZ > + VFSTOUFS(dvp->v_mount)->um_mountp->mnt_stat.f_bsize) + /* XXX should grow with balloc() */ + panic("ufs_direnter2: frag size"); + else if (!error) { + dp->i_size = roundup(dp->i_size, DIRBLKSIZ); + dp->i_flag |= IN_CHANGE; + } + return (error); + } + + /* + * If dp->i_count is non-zero, then namei found space + * for the new entry in the range dp->i_offset to + * dp->i_offset + dp->i_count in the directory. + * To use this space, we may have to compact the entries located + * there, by copying them together towards the beginning of the + * block, leaving the free space in one usable chunk at the end. + */ + + /* + * Increase size of directory if entry eats into new space. + * This should never push the size past a new multiple of + * DIRBLKSIZE. + * + * N.B. - THIS IS AN ARTIFACT OF 4.2 AND SHOULD NEVER HAPPEN. + */ + if (dp->i_offset + dp->i_count > dp->i_size) + dp->i_size = dp->i_offset + dp->i_count; + /* + * Get the block containing the space for the new directory entry. + */ + if (error = VOP_BLKATOFF(dvp, (off_t)dp->i_offset, &dirbuf, &bp)) + return (error); + /* + * Find space for the new entry. In the simple case, the entry at + * offset base will have the space. If it does not, then namei + * arranged that compacting the region dp->i_offset to + * dp->i_offset + dp->i_count would yield the + * space. + */ + ep = (struct direct *)dirbuf; + dsize = DIRSIZ(FSFMT(dvp), ep); + spacefree = ep->d_reclen - dsize; + for (loc = ep->d_reclen; loc < dp->i_count; ) { + nep = (struct direct *)(dirbuf + loc); + if (ep->d_ino) { + /* trim the existing slot */ + ep->d_reclen = dsize; + ep = (struct direct *)((char *)ep + dsize); + } else { + /* overwrite; nothing there; header is ours */ + spacefree += dsize; + } + dsize = DIRSIZ(FSFMT(dvp), nep); + spacefree += nep->d_reclen - dsize; + loc += nep->d_reclen; + bcopy((caddr_t)nep, (caddr_t)ep, dsize); + } + /* + * Update the pointer fields in the previous entry (if any), + * copy in the new entry, and write out the block. + */ + if (ep->d_ino == 0 || + (ep->d_ino == WINO && + bcmp(ep->d_name, dirp->d_name, dirp->d_namlen) == 0)) { + if (spacefree + dsize < newentrysize) + panic("ufs_direnter2: compact1"); + dirp->d_reclen = spacefree + dsize; + } else { + if (spacefree < newentrysize) + panic("ufs_direnter2: compact2"); + dirp->d_reclen = spacefree; + ep->d_reclen = dsize; + ep = (struct direct *)((char *)ep + dsize); + } + bcopy((caddr_t)dirp, (caddr_t)ep, (u_int)newentrysize); +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + error = VOP_BWRITE(bp); + dp->i_flag |= IN_CHANGE | IN_UPDATE; + if (!error && dp->i_endoff && dp->i_endoff < dp->i_size) + error = VOP_TRUNCATE(dvp, (off_t)dp->i_endoff, IO_SYNC, cr, p); + return (error); +} + +/* + * Remove a directory entry after a call to namei, using + * the parameters which it left in nameidata. The entry + * dp->i_offset contains the offset into the directory of the + * entry to be eliminated. The dp->i_count field contains the + * size of the previous record in the directory. If this + * is 0, the first entry is being deleted, so we need only + * zero the inode number to mark the entry as free. If the + * entry is not the first in the directory, we must reclaim + * the space of the now empty record by adding the record size + * to the size of the previous entry. + */ +int +ufs_dirremove(dvp, cnp) + struct vnode *dvp; + struct componentname *cnp; +{ + register struct inode *dp; + struct direct *ep; + struct buf *bp; + int error; +#if REV_ENDIAN_FS + struct mount *mp=dvp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + + dp = VTOI(dvp); + + if (cnp->cn_flags & DOWHITEOUT) { + /* + * Whiteout entry: set d_ino to WINO. + */ + if (error = + VOP_BLKATOFF(dvp, (off_t)dp->i_offset, (char **)&ep, &bp)) + return (error); + ep->d_ino = WINO; + ep->d_type = DT_WHT; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + error = VOP_BWRITE(bp); + dp->i_flag |= IN_CHANGE | IN_UPDATE; + return (error); + } + + if (dp->i_count == 0) { + /* + * First entry in block: set d_ino to zero. + */ + if (error = + VOP_BLKATOFF(dvp, (off_t)dp->i_offset, (char **)&ep, &bp)) + return (error); + ep->d_ino = 0; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + error = VOP_BWRITE(bp); + dp->i_flag |= IN_CHANGE | IN_UPDATE; + return (error); + } + /* + * Collapse new free space into previous entry. + */ + if (error = VOP_BLKATOFF(dvp, (off_t)(dp->i_offset - dp->i_count), + (char **)&ep, &bp)) + return (error); + ep->d_reclen += dp->i_reclen; +#if REV_ENDIAN_FS + if (rev_endian) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + error = VOP_BWRITE(bp); + dp->i_flag |= IN_CHANGE | IN_UPDATE; + return (error); +} + +/* + * Rewrite an existing directory entry to point at the inode + * supplied. The parameters describing the directory entry are + * set up by a call to namei. + */ +int +ufs_dirrewrite(dp, ip, cnp) + struct inode *dp, *ip; + struct componentname *cnp; +{ + struct buf *bp; + struct direct *ep; + struct vnode *vdp = ITOV(dp); + int error; + + if (error = VOP_BLKATOFF(vdp, (off_t)dp->i_offset, (char **)&ep, &bp)) + return (error); + ep->d_ino = ip->i_number; + if (vdp->v_mount->mnt_maxsymlinklen > 0) + ep->d_type = IFTODT(ip->i_mode); +#if REV_ENDIAN_FS + if (vdp->v_mount->mnt_flag & MNT_REVEND) + byte_swap_dir_block_out(bp); +#endif /* REV_ENDIAN_FS */ + error = VOP_BWRITE(bp); + dp->i_flag |= IN_CHANGE | IN_UPDATE; + return (error); +} + +/* + * Check if a directory is empty or not. + * Inode supplied must be locked. + * + * Using a struct dirtemplate here is not precisely + * what we want, but better than using a struct direct. + * + * NB: does not handle corrupted directories. + */ +int +ufs_dirempty(ip, parentino, cred) + register struct inode *ip; + ino_t parentino; + struct ucred *cred; +{ + register off_t off; + struct dirtemplate dbuf; + register struct direct *dp = (struct direct *)&dbuf; + int error, count, namlen; +#if REV_ENDIAN_FS + struct vnode *vp=ITOV(ip); + struct mount *mp=vp->v_mount; + int rev_endian=(mp->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + +#define MINDIRSIZ (sizeof (struct dirtemplate) / 2) + + for (off = 0; off < ip->i_size; off += dp->d_reclen) { + error = vn_rdwr(UIO_READ, ITOV(ip), (caddr_t)dp, MINDIRSIZ, off, + UIO_SYSSPACE, IO_NODELOCKED, cred, &count, (struct proc *)0); + /* + * Since we read MINDIRSIZ, residual must + * be 0 unless we're at end of file. + */ + if (error || count != 0) + return (0); +#if 0 /*REV_ENDIAN_FS */ + if (rev_endian) + byte_swap_minidir_in(dp); +#endif /* REV_ENDIAN_FS */ + /* avoid infinite loops */ + if (dp->d_reclen == 0) + return (0); + /* skip empty entries */ + if (dp->d_ino == 0 || dp->d_ino == WINO) + continue; + /* accept only "." and ".." */ +# if (BYTE_ORDER == LITTLE_ENDIAN) + if (ITOV(ip)->v_mount->mnt_maxsymlinklen > 0) + namlen = dp->d_namlen; + else + namlen = dp->d_type; +# else + namlen = dp->d_namlen; +# endif + if (namlen > 2) + return (0); + if (dp->d_name[0] != '.') + return (0); + /* + * At this point namlen must be 1 or 2. + * 1 implies ".", 2 implies ".." if second + * char is also "." + */ + if (namlen == 1) + continue; + if (dp->d_name[1] == '.' && dp->d_ino == parentino) + continue; + return (0); + } + return (1); +} + +/* + * Check if source directory is in the path of the target directory. + * Target is supplied locked, source is unlocked. + * The target is always vput before returning. + */ +int +ufs_checkpath(source, target, cred) + struct inode *source, *target; + struct ucred *cred; +{ + struct vnode *vp; + int error, rootino, namlen; + struct dirtemplate dirbuf; + + vp = ITOV(target); + if (target->i_number == source->i_number) { + error = EEXIST; + goto out; + } + rootino = ROOTINO; + error = 0; + if (target->i_number == rootino) + goto out; + + for (;;) { + if (vp->v_type != VDIR) { + error = ENOTDIR; + break; + } + error = vn_rdwr(UIO_READ, vp, (caddr_t)&dirbuf, + sizeof (struct dirtemplate), (off_t)0, UIO_SYSSPACE, + IO_NODELOCKED, cred, (int *)0, (struct proc *)0); + if (error != 0) + break; +# if (BYTE_ORDER == LITTLE_ENDIAN) + if (vp->v_mount->mnt_maxsymlinklen > 0) + namlen = dirbuf.dotdot_namlen; + else + namlen = dirbuf.dotdot_type; +# else + namlen = dirbuf.dotdot_namlen; +# endif + if (namlen != 2 || + dirbuf.dotdot_name[0] != '.' || + dirbuf.dotdot_name[1] != '.') { + error = ENOTDIR; + break; + } + if (dirbuf.dotdot_ino == source->i_number) { + error = EINVAL; + break; + } + if (dirbuf.dotdot_ino == rootino) + break; + vput(vp); + if (error = VFS_VGET(vp->v_mount, dirbuf.dotdot_ino, &vp)) { + vp = NULL; + break; + } + } + +out: + if (error == ENOTDIR) + printf("checkpath: .. not a directory\n"); + if (vp != NULL) + vput(vp); + return (error); +} diff --git a/bsd/ufs/ufs/ufs_quota.c b/bsd/ufs/ufs/ufs_quota.c new file mode 100644 index 000000000..eff293ea1 --- /dev/null +++ b/bsd/ufs/ufs/ufs_quota.c @@ -0,0 +1,943 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1990, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Robert Elz at The University of Melbourne. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_quota.c 8.5 (Berkeley) 5/20/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Quota name to error message mapping. + */ +static char *quotatypes[] = INITQFNAMES; + +/* + * Set up the quotas for an inode. + * + * This routine completely defines the semantics of quotas. + * If other criterion want to be used to establish quotas, the + * MAXQUOTAS value in quotas.h should be increased, and the + * additional dquots set up here. + */ +int +getinoquota(ip) + register struct inode *ip; +{ + struct ufsmount *ump; + struct vnode *vp = ITOV(ip); + int error; + + ump = VFSTOUFS(vp->v_mount); + /* + * Set up the user quota based on file uid. + * EINVAL means that quotas are not enabled. + */ + if (ip->i_dquot[USRQUOTA] == NODQUOT && + (error = + dqget(vp, ip->i_uid, ump, USRQUOTA, &ip->i_dquot[USRQUOTA])) && + error != EINVAL) + return (error); + /* + * Set up the group quota based on file gid. + * EINVAL means that quotas are not enabled. + */ + if (ip->i_dquot[GRPQUOTA] == NODQUOT && + (error = + dqget(vp, ip->i_gid, ump, GRPQUOTA, &ip->i_dquot[GRPQUOTA])) && + error != EINVAL) + return (error); + return (0); +} + +/* + * Update disk usage, and take corrective action. + */ +int +chkdq(ip, change, cred, flags) + register struct inode *ip; + long change; + struct ucred *cred; + int flags; +{ + register struct dquot *dq; + register int i; + int ncurblocks, error; + +#if DIAGNOSTIC + if ((flags & CHOWN) == 0) + chkdquot(ip); +#endif + if (change == 0) + return (0); + if (change < 0) { + for (i = 0; i < MAXQUOTAS; i++) { + if ((dq = ip->i_dquot[i]) == NODQUOT) + continue; + while (dq->dq_flags & DQ_LOCK) { + dq->dq_flags |= DQ_WANT; + sleep((caddr_t)dq, PINOD+1); + } + ncurblocks = dq->dq_curblocks + change; + if (ncurblocks >= 0) + dq->dq_curblocks = ncurblocks; + else + dq->dq_curblocks = 0; + dq->dq_flags &= ~DQ_BLKS; + dq->dq_flags |= DQ_MOD; + } + return (0); + } + if ((flags & FORCE) == 0 && cred->cr_uid != 0) { + for (i = 0; i < MAXQUOTAS; i++) { + if ((dq = ip->i_dquot[i]) == NODQUOT) + continue; + if (error = chkdqchg(ip, change, cred, i)) + return (error); + } + } + for (i = 0; i < MAXQUOTAS; i++) { + if ((dq = ip->i_dquot[i]) == NODQUOT) + continue; + while (dq->dq_flags & DQ_LOCK) { + dq->dq_flags |= DQ_WANT; + sleep((caddr_t)dq, PINOD+1); + } + dq->dq_curblocks += change; + dq->dq_flags |= DQ_MOD; + } + return (0); +} + +/* + * Check for a valid change to a users allocation. + * Issue an error message if appropriate. + */ +int +chkdqchg(ip, change, cred, type) + struct inode *ip; + long change; + struct ucred *cred; + int type; +{ + register struct dquot *dq = ip->i_dquot[type]; + long ncurblocks = dq->dq_curblocks + change; + + /* + * If user would exceed their hard limit, disallow space allocation. + */ + if (ncurblocks >= dq->dq_bhardlimit && dq->dq_bhardlimit) { + if ((dq->dq_flags & DQ_BLKS) == 0 && + ip->i_uid == cred->cr_uid) { + uprintf("\n%s: write failed, %s disk limit reached\n", + ITOV(ip)->v_mount->mnt_stat.f_mntonname, + quotatypes[type]); + dq->dq_flags |= DQ_BLKS; + } + return (EDQUOT); + } + /* + * If user is over their soft limit for too long, disallow space + * allocation. Reset time limit as they cross their soft limit. + */ + if (ncurblocks >= dq->dq_bsoftlimit && dq->dq_bsoftlimit) { + if (dq->dq_curblocks < dq->dq_bsoftlimit) { + dq->dq_btime = time.tv_sec + + VFSTOUFS(ITOV(ip)->v_mount)->um_btime[type]; + if (ip->i_uid == cred->cr_uid) + uprintf("\n%s: warning, %s %s\n", + ITOV(ip)->v_mount->mnt_stat.f_mntonname, + quotatypes[type], "disk quota exceeded"); + return (0); + } + if (time.tv_sec > dq->dq_btime) { + if ((dq->dq_flags & DQ_BLKS) == 0 && + ip->i_uid == cred->cr_uid) { + uprintf("\n%s: write failed, %s %s\n", + ITOV(ip)->v_mount->mnt_stat.f_mntonname, + quotatypes[type], + "disk quota exceeded for too long"); + dq->dq_flags |= DQ_BLKS; + } + return (EDQUOT); + } + } + return (0); +} + +/* + * Check the inode limit, applying corrective action. + */ +int +chkiq(ip, change, cred, flags) + register struct inode *ip; + long change; + struct ucred *cred; + int flags; +{ + register struct dquot *dq; + register int i; + int ncurinodes, error; + +#if DIAGNOSTIC + if ((flags & CHOWN) == 0) + chkdquot(ip); +#endif + if (change == 0) + return (0); + if (change < 0) { + for (i = 0; i < MAXQUOTAS; i++) { + if ((dq = ip->i_dquot[i]) == NODQUOT) + continue; + while (dq->dq_flags & DQ_LOCK) { + dq->dq_flags |= DQ_WANT; + sleep((caddr_t)dq, PINOD+1); + } + ncurinodes = dq->dq_curinodes + change; + if (ncurinodes >= 0) + dq->dq_curinodes = ncurinodes; + else + dq->dq_curinodes = 0; + dq->dq_flags &= ~DQ_INODS; + dq->dq_flags |= DQ_MOD; + } + return (0); + } + if ((flags & FORCE) == 0 && cred->cr_uid != 0) { + for (i = 0; i < MAXQUOTAS; i++) { + if ((dq = ip->i_dquot[i]) == NODQUOT) + continue; + if (error = chkiqchg(ip, change, cred, i)) + return (error); + } + } + for (i = 0; i < MAXQUOTAS; i++) { + if ((dq = ip->i_dquot[i]) == NODQUOT) + continue; + while (dq->dq_flags & DQ_LOCK) { + dq->dq_flags |= DQ_WANT; + sleep((caddr_t)dq, PINOD+1); + } + dq->dq_curinodes += change; + dq->dq_flags |= DQ_MOD; + } + return (0); +} + +/* + * Check for a valid change to a users allocation. + * Issue an error message if appropriate. + */ +int +chkiqchg(ip, change, cred, type) + struct inode *ip; + long change; + struct ucred *cred; + int type; +{ + register struct dquot *dq = ip->i_dquot[type]; + long ncurinodes = dq->dq_curinodes + change; + + /* + * If user would exceed their hard limit, disallow inode allocation. + */ + if (ncurinodes >= dq->dq_ihardlimit && dq->dq_ihardlimit) { + if ((dq->dq_flags & DQ_INODS) == 0 && + ip->i_uid == cred->cr_uid) { + uprintf("\n%s: write failed, %s inode limit reached\n", + ITOV(ip)->v_mount->mnt_stat.f_mntonname, + quotatypes[type]); + dq->dq_flags |= DQ_INODS; + } + return (EDQUOT); + } + /* + * If user is over their soft limit for too long, disallow inode + * allocation. Reset time limit as they cross their soft limit. + */ + if (ncurinodes >= dq->dq_isoftlimit && dq->dq_isoftlimit) { + if (dq->dq_curinodes < dq->dq_isoftlimit) { + dq->dq_itime = time.tv_sec + + VFSTOUFS(ITOV(ip)->v_mount)->um_itime[type]; + if (ip->i_uid == cred->cr_uid) + uprintf("\n%s: warning, %s %s\n", + ITOV(ip)->v_mount->mnt_stat.f_mntonname, + quotatypes[type], "inode quota exceeded"); + return (0); + } + if (time.tv_sec > dq->dq_itime) { + if ((dq->dq_flags & DQ_INODS) == 0 && + ip->i_uid == cred->cr_uid) { + uprintf("\n%s: write failed, %s %s\n", + ITOV(ip)->v_mount->mnt_stat.f_mntonname, + quotatypes[type], + "inode quota exceeded for too long"); + dq->dq_flags |= DQ_INODS; + } + return (EDQUOT); + } + } + return (0); +} + +#if DIAGNOSTIC +/* + * On filesystems with quotas enabled, it is an error for a file to change + * size and not to have a dquot structure associated with it. + */ +void +chkdquot(ip) + register struct inode *ip; +{ + struct ufsmount *ump = VFSTOUFS(ITOV(ip)->v_mount); + register int i; + + for (i = 0; i < MAXQUOTAS; i++) { + if (ump->um_quotas[i] == NULLVP || + (ump->um_qflags[i] & (QTF_OPENING|QTF_CLOSING))) + continue; + if (ip->i_dquot[i] == NODQUOT) { + vprint("chkdquot: missing dquot", ITOV(ip)); + panic("missing dquot"); + } + } +} +#endif + +/* + * Code to process quotactl commands. + */ + +/* + * Q_QUOTAON - set up a quota file for a particular file system. + */ +int +quotaon(p, mp, type, fname) + struct proc *p; + struct mount *mp; + register int type; + caddr_t fname; +{ + struct ufsmount *ump = VFSTOUFS(mp); + struct vnode *vp, **vpp; + struct vnode *nextvp; + struct dquot *dq; + int error; + struct nameidata nd; + + vpp = &ump->um_quotas[type]; + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname, p); + if (error = vn_open(&nd, FREAD|FWRITE, 0)) + return (error); + vp = nd.ni_vp; + VOP_UNLOCK(vp, 0, p); + if (vp->v_type != VREG) { + (void) vn_close(vp, FREAD|FWRITE, p->p_ucred, p); + return (EACCES); + } + if (*vpp != vp) + quotaoff(p, mp, type); + ump->um_qflags[type] |= QTF_OPENING; + mp->mnt_flag |= MNT_QUOTA; + vp->v_flag |= VSYSTEM; + *vpp = vp; + /* + * Save the credential of the process that turned on quotas. + * Set up the time limits for this quota. + */ + crhold(p->p_ucred); + ump->um_cred[type] = p->p_ucred; + ump->um_btime[type] = MAX_DQ_TIME; + ump->um_itime[type] = MAX_IQ_TIME; + if (dqget(NULLVP, 0, ump, type, &dq) == 0) { + if (dq->dq_btime > 0) + ump->um_btime[type] = dq->dq_btime; + if (dq->dq_itime > 0) + ump->um_itime[type] = dq->dq_itime; + dqrele(NULLVP, dq); + } + /* + * Search vnodes associated with this mount point, + * adding references to quota file being opened. + * NB: only need to add dquot's for inodes being modified. + */ +again: + for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) { + nextvp = vp->v_mntvnodes.le_next; + if (vp->v_writecount == 0) + continue; + if (vget(vp, LK_EXCLUSIVE, p)) + goto again; + if (error = getinoquota(VTOI(vp))) { + vput(vp); + break; + } + vput(vp); + if (vp->v_mntvnodes.le_next != nextvp || vp->v_mount != mp) + goto again; + } + ump->um_qflags[type] &= ~QTF_OPENING; + if (error) + quotaoff(p, mp, type); + return (error); +} + +/* + * Q_QUOTAOFF - turn off disk quotas for a filesystem. + */ +int +quotaoff(p, mp, type) + struct proc *p; + struct mount *mp; + register int type; +{ + struct vnode *vp; + struct vnode *qvp, *nextvp; + struct ufsmount *ump = VFSTOUFS(mp); + struct dquot *dq; + struct inode *ip; + int error; + struct ucred *cred; + + if ((qvp = ump->um_quotas[type]) == NULLVP) + return (0); + ump->um_qflags[type] |= QTF_CLOSING; + /* + * Search vnodes associated with this mount point, + * deleting any references to quota file being closed. + */ +again: + for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) { + nextvp = vp->v_mntvnodes.le_next; + if (vget(vp, LK_EXCLUSIVE, p)) + goto again; + ip = VTOI(vp); + dq = ip->i_dquot[type]; + ip->i_dquot[type] = NODQUOT; + dqrele(vp, dq); + vput(vp); + if (vp->v_mntvnodes.le_next != nextvp || vp->v_mount != mp) + goto again; + } + dqflush(qvp); + qvp->v_flag &= ~VSYSTEM; + error = vn_close(qvp, FREAD|FWRITE, p->p_ucred, p); + ump->um_quotas[type] = NULLVP; + cred = ump->um_cred[type]; + if (cred != NOCRED) { + ump->um_cred[type] = NOCRED; + crfree(cred); + } + ump->um_qflags[type] &= ~QTF_CLOSING; + for (type = 0; type < MAXQUOTAS; type++) + if (ump->um_quotas[type] != NULLVP) + break; + if (type == MAXQUOTAS) + mp->mnt_flag &= ~MNT_QUOTA; + return (error); +} + +/* + * Q_GETQUOTA - return current values in a dqblk structure. + */ +int +getquota(mp, id, type, addr) + struct mount *mp; + u_long id; + int type; + caddr_t addr; +{ + struct dquot *dq; + int error; + + if (error = dqget(NULLVP, id, VFSTOUFS(mp), type, &dq)) + return (error); + error = copyout((caddr_t)&dq->dq_dqb, addr, sizeof (struct dqblk)); + dqrele(NULLVP, dq); + return (error); +} + +/* + * Q_SETQUOTA - assign an entire dqblk structure. + */ +int +setquota(mp, id, type, addr) + struct mount *mp; + u_long id; + int type; + caddr_t addr; +{ + register struct dquot *dq; + struct dquot *ndq; + struct ufsmount *ump = VFSTOUFS(mp); + struct dqblk newlim; + int error; + + if (error = copyin(addr, (caddr_t)&newlim, sizeof (struct dqblk))) + return (error); + if (error = dqget(NULLVP, id, ump, type, &ndq)) + return (error); + dq = ndq; + while (dq->dq_flags & DQ_LOCK) { + dq->dq_flags |= DQ_WANT; + sleep((caddr_t)dq, PINOD+1); + } + /* + * Copy all but the current values. + * Reset time limit if previously had no soft limit or were + * under it, but now have a soft limit and are over it. + */ + newlim.dqb_curblocks = dq->dq_curblocks; + newlim.dqb_curinodes = dq->dq_curinodes; + if (dq->dq_id != 0) { + newlim.dqb_btime = dq->dq_btime; + newlim.dqb_itime = dq->dq_itime; + } + if (newlim.dqb_bsoftlimit && + dq->dq_curblocks >= newlim.dqb_bsoftlimit && + (dq->dq_bsoftlimit == 0 || dq->dq_curblocks < dq->dq_bsoftlimit)) + newlim.dqb_btime = time.tv_sec + ump->um_btime[type]; + if (newlim.dqb_isoftlimit && + dq->dq_curinodes >= newlim.dqb_isoftlimit && + (dq->dq_isoftlimit == 0 || dq->dq_curinodes < dq->dq_isoftlimit)) + newlim.dqb_itime = time.tv_sec + ump->um_itime[type]; + dq->dq_dqb = newlim; + if (dq->dq_curblocks < dq->dq_bsoftlimit) + dq->dq_flags &= ~DQ_BLKS; + if (dq->dq_curinodes < dq->dq_isoftlimit) + dq->dq_flags &= ~DQ_INODS; + if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && + dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) + dq->dq_flags |= DQ_FAKE; + else + dq->dq_flags &= ~DQ_FAKE; + dq->dq_flags |= DQ_MOD; + dqrele(NULLVP, dq); + return (0); +} + +/* + * Q_SETUSE - set current inode and block usage. + */ +int +setuse(mp, id, type, addr) + struct mount *mp; + u_long id; + int type; + caddr_t addr; +{ + register struct dquot *dq; + struct ufsmount *ump = VFSTOUFS(mp); + struct dquot *ndq; + struct dqblk usage; + int error; + + if (error = copyin(addr, (caddr_t)&usage, sizeof (struct dqblk))) + return (error); + if (error = dqget(NULLVP, id, ump, type, &ndq)) + return (error); + dq = ndq; + while (dq->dq_flags & DQ_LOCK) { + dq->dq_flags |= DQ_WANT; + sleep((caddr_t)dq, PINOD+1); + } + /* + * Reset time limit if have a soft limit and were + * previously under it, but are now over it. + */ + if (dq->dq_bsoftlimit && dq->dq_curblocks < dq->dq_bsoftlimit && + usage.dqb_curblocks >= dq->dq_bsoftlimit) + dq->dq_btime = time.tv_sec + ump->um_btime[type]; + if (dq->dq_isoftlimit && dq->dq_curinodes < dq->dq_isoftlimit && + usage.dqb_curinodes >= dq->dq_isoftlimit) + dq->dq_itime = time.tv_sec + ump->um_itime[type]; + dq->dq_curblocks = usage.dqb_curblocks; + dq->dq_curinodes = usage.dqb_curinodes; + if (dq->dq_curblocks < dq->dq_bsoftlimit) + dq->dq_flags &= ~DQ_BLKS; + if (dq->dq_curinodes < dq->dq_isoftlimit) + dq->dq_flags &= ~DQ_INODS; + dq->dq_flags |= DQ_MOD; + dqrele(NULLVP, dq); + return (0); +} + +/* + * Q_SYNC - sync quota files to disk. + */ +int +qsync(mp) + struct mount *mp; +{ + struct ufsmount *ump = VFSTOUFS(mp); + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp, *nextvp; + struct dquot *dq; + int i, error; + + /* + * Check if the mount point has any quotas. + * If not, simply return. + */ + for (i = 0; i < MAXQUOTAS; i++) + if (ump->um_quotas[i] != NULLVP) + break; + if (i == MAXQUOTAS) + return (0); + /* + * Search vnodes associated with this mount point, + * synchronizing any modified dquot structures. + */ + simple_lock(&mntvnode_slock); +again: + for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) { + if (vp->v_mount != mp) + goto again; + nextvp = vp->v_mntvnodes.le_next; + simple_lock(&vp->v_interlock); + simple_unlock(&mntvnode_slock); + error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); + if (error) { + simple_lock(&mntvnode_slock); + if (error == ENOENT) + goto again; + continue; + } + for (i = 0; i < MAXQUOTAS; i++) { + dq = VTOI(vp)->i_dquot[i]; + if (dq != NODQUOT && (dq->dq_flags & DQ_MOD)) + dqsync(vp, dq); + } + vput(vp); + simple_lock(&mntvnode_slock); + if (vp->v_mntvnodes.le_next != nextvp) + goto again; + } + simple_unlock(&mntvnode_slock); + return (0); +} + +/* + * Code pertaining to management of the in-core dquot data structures. + */ +#define DQHASH(dqvp, id) \ + (&dqhashtbl[((((int)(dqvp)) >> 8) + id) & dqhash]) +LIST_HEAD(dqhash, dquot) *dqhashtbl; +u_long dqhash; + +/* + * Dquot free list. + */ +#define DQUOTINC 5 /* minimum free dquots desired */ +TAILQ_HEAD(dqfreelist, dquot) dqfreelist; +long numdquot, desireddquot = DQUOTINC; + +/* + * Initialize the quota system. + */ +void +dqinit() +{ + + dqhashtbl = hashinit(desiredvnodes, M_DQUOT, &dqhash); + TAILQ_INIT(&dqfreelist); +} + +/* + * Obtain a dquot structure for the specified identifier and quota file + * reading the information from the file if necessary. + */ +int +dqget(vp, id, ump, type, dqp) + struct vnode *vp; + u_long id; + register struct ufsmount *ump; + register int type; + struct dquot **dqp; +{ + struct proc *p = current_proc(); /* XXX */ + struct dquot *dq; + struct dqhash *dqh; + struct vnode *dqvp; + struct iovec aiov; + struct uio auio; + int error; + + dqvp = ump->um_quotas[type]; + if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) { + *dqp = NODQUOT; + return (EINVAL); + } + /* + * Check the cache first. + */ + dqh = DQHASH(dqvp, id); + for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) { + if (dq->dq_id != id || + dq->dq_ump->um_quotas[dq->dq_type] != dqvp) + continue; + /* + * Cache hit with no references. Take + * the structure off the free list. + */ + if (dq->dq_cnt == 0) + TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); + DQREF(dq); + *dqp = dq; + return (0); + } + /* + * Not in cache, allocate a new one. + */ + if (dqfreelist.tqh_first == NODQUOT && + numdquot < MAXQUOTAS * desiredvnodes) + desireddquot += DQUOTINC; + if (numdquot < desireddquot) { + dq = (struct dquot *)_MALLOC(sizeof *dq, M_DQUOT, M_WAITOK); + bzero((char *)dq, sizeof *dq); + numdquot++; + } else { + if ((dq = dqfreelist.tqh_first) == NULL) { + tablefull("dquot"); + *dqp = NODQUOT; + return (EUSERS); + } + if (dq->dq_cnt || (dq->dq_flags & DQ_MOD)) + panic("free dquot isn't"); + TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); + LIST_REMOVE(dq, dq_hash); + } + /* + * Initialize the contents of the dquot structure. + */ + if (vp != dqvp) + vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, p); + LIST_INSERT_HEAD(dqh, dq, dq_hash); + DQREF(dq); + dq->dq_flags = DQ_LOCK; + dq->dq_id = id; + dq->dq_ump = ump; + dq->dq_type = type; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + aiov.iov_base = (caddr_t)&dq->dq_dqb; + aiov.iov_len = sizeof (struct dqblk); + auio.uio_resid = sizeof (struct dqblk); + auio.uio_offset = (off_t)(id * sizeof (struct dqblk)); + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_READ; + auio.uio_procp = (struct proc *)0; + error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]); + + if (auio.uio_resid == sizeof(struct dqblk) && error == 0) + bzero((caddr_t)&dq->dq_dqb, sizeof(struct dqblk)); + if (vp != dqvp) + VOP_UNLOCK(dqvp, 0, p); + if (dq->dq_flags & DQ_WANT) + wakeup((caddr_t)dq); + dq->dq_flags = 0; + /* + * I/O error in reading quota file, release + * quota structure and reflect problem to caller. + */ + if (error) { + LIST_REMOVE(dq, dq_hash); + dqrele(vp, dq); + *dqp = NODQUOT; + return (error); + } + /* + * Check for no limit to enforce. + * Initialize time values if necessary. + */ + if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && + dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) + dq->dq_flags |= DQ_FAKE; + if (dq->dq_id != 0) { + if (dq->dq_btime == 0) + dq->dq_btime = time.tv_sec + ump->um_btime[type]; + if (dq->dq_itime == 0) + dq->dq_itime = time.tv_sec + ump->um_itime[type]; + } + *dqp = dq; + return (0); +} + +/* + * Obtain a reference to a dquot. + */ +void +dqref(dq) + struct dquot *dq; +{ + + dq->dq_cnt++; +} + +/* + * Release a reference to a dquot. + */ +void +dqrele(vp, dq) + struct vnode *vp; + register struct dquot *dq; +{ + + if (dq == NODQUOT) + return; + if (dq->dq_cnt > 1) { + dq->dq_cnt--; + return; + } + if (dq->dq_flags & DQ_MOD) + (void) dqsync(vp, dq); + if (--dq->dq_cnt > 0) + return; + TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist); +} + +/* + * Update the disk quota in the quota file. + */ +int +dqsync(vp, dq) + struct vnode *vp; + struct dquot *dq; +{ + struct proc *p = current_proc(); /* XXX */ + struct vnode *dqvp; + struct iovec aiov; + struct uio auio; + int error; + + if (dq == NODQUOT) + panic("dqsync: dquot"); + if ((dq->dq_flags & DQ_MOD) == 0) + return (0); + if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP) + panic("dqsync: file"); + if (vp != dqvp) + vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY, p); + while (dq->dq_flags & DQ_LOCK) { + dq->dq_flags |= DQ_WANT; + sleep((caddr_t)dq, PINOD+2); + if ((dq->dq_flags & DQ_MOD) == 0) { + if (vp != dqvp) + VOP_UNLOCK(dqvp, 0, p); + return (0); + } + } + dq->dq_flags |= DQ_LOCK; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + aiov.iov_base = (caddr_t)&dq->dq_dqb; + aiov.iov_len = sizeof (struct dqblk); + auio.uio_resid = sizeof (struct dqblk); + auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk)); + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_rw = UIO_WRITE; + auio.uio_procp = (struct proc *)0; + error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]); + if (auio.uio_resid && error == 0) + error = EIO; + if (dq->dq_flags & DQ_WANT) + wakeup((caddr_t)dq); + dq->dq_flags &= ~(DQ_MOD|DQ_LOCK|DQ_WANT); + if (vp != dqvp) + VOP_UNLOCK(dqvp, 0, p); + return (error); +} + +/* + * Flush all entries from the cache for a particular vnode. + */ +void +dqflush(vp) + register struct vnode *vp; +{ + register struct dquot *dq, *nextdq; + struct dqhash *dqh; + + /* + * Move all dquot's that used to refer to this quota + * file off their hash chains (they will eventually + * fall off the head of the free list and be re-used). + */ + for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) { + for (dq = dqh->lh_first; dq; dq = nextdq) { + nextdq = dq->dq_hash.le_next; + if (dq->dq_ump->um_quotas[dq->dq_type] != vp) + continue; + if (dq->dq_cnt) + panic("dqflush: stray dquot"); + LIST_REMOVE(dq, dq_hash); + dq->dq_ump = (struct ufsmount *)0; + } + } +} diff --git a/bsd/ufs/ufs/ufs_readwrite.c b/bsd/ufs/ufs/ufs_readwrite.c new file mode 100644 index 000000000..5b7cb2ac2 --- /dev/null +++ b/bsd/ufs/ufs/ufs_readwrite.c @@ -0,0 +1,680 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 + */ + +#define BLKSIZE(a, b, c) blksize(a, b, c) +#define FS struct fs +#define I_FS i_fs +#define PGRD ffs_pgrd +#define PGRD_S "ffs_pgrd" +#define PGWR ffs_pgwr +#define PGWR_S "ffs_pgwr" + +/* + * Vnode op for reading. + */ +/* ARGSUSED */ +ffs_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp; + register struct inode *ip; + register struct uio *uio; + register FS *fs; + struct buf *bp = (struct buf *)0; + ufs_daddr_t lbn, nextlbn; + off_t bytesinfile; + long size, xfersize, blkoffset; + int devBlockSize=0; + int error; + u_short mode; +#if REV_ENDIAN_FS + int rev_endian=0; +#endif /* REV_ENDIAN_FS */ + + vp = ap->a_vp; + ip = VTOI(vp); + mode = ip->i_mode; + uio = ap->a_uio; + +#if REV_ENDIAN_FS + rev_endian=(vp->v_mount->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_READ) + panic("ffs_read: invalid uio_rw = %x", uio->uio_rw); + + if (vp->v_type == VLNK) { + if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) + panic("ffs_read: short symlink = %d", ip->i_size); + } else if (vp->v_type != VREG && vp->v_type != VDIR) + panic("ffs_read: invalid v_type = %x", vp->v_type); +#endif + fs = ip->I_FS; + if (uio->uio_offset < 0) + return (EINVAL); + if (uio->uio_offset > fs->fs_maxfilesize) + return (EFBIG); + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + if (UBCISVALID(vp)) { + error = cluster_read(vp, uio, (off_t)ip->i_size, + devBlockSize, 0); + } else { + for (error = 0, bp = NULL; uio->uio_resid > 0; + bp = NULL) { + if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) + break; + lbn = lblkno(fs, uio->uio_offset); + nextlbn = lbn + 1; + size = BLKSIZE(fs, ip, lbn); + blkoffset = blkoff(fs, uio->uio_offset); + xfersize = fs->fs_bsize - blkoffset; + if (uio->uio_resid < xfersize) + xfersize = uio->uio_resid; + if (bytesinfile < xfersize) + xfersize = bytesinfile; + + if (lblktosize(fs, nextlbn) >= ip->i_size) + error = bread(vp, lbn, size, NOCRED, &bp); + else if (lbn - 1 == vp->v_lastr && !(vp->v_flag & VRAOFF)) { + int nextsize = BLKSIZE(fs, ip, nextlbn); + error = breadn(vp, lbn, + size, &nextlbn, &nextsize, 1, NOCRED, &bp); + } else + error = bread(vp, lbn, size, NOCRED, &bp); + if (error) + break; + vp->v_lastr = lbn; + + /* + * We should only get non-zero b_resid when an I/O error + * has occurred, which should cause us to break above. + * However, if the short read did not cause an error, + * then we want to ensure that we do not uiomove bad + * or uninitialized data. + */ + size -= bp->b_resid; + if (size < xfersize) { + if (size == 0) + break; + xfersize = size; + } +#if REV_ENDIAN_FS + if (rev_endian && S_ISDIR(mode)) { + byte_swap_dir_block_in((char *)bp->b_data + blkoffset, xfersize); + } +#endif /* REV_ENDIAN_FS */ + if (error = + uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio)) { +#if REV_ENDIAN_FS + if (rev_endian && S_ISDIR(mode)) { + byte_swap_dir_block_in((char *)bp->b_data + blkoffset, xfersize); + } +#endif /* REV_ENDIAN_FS */ + break; + } + +#if REV_ENDIAN_FS + if (rev_endian && S_ISDIR(mode)) { + byte_swap_dir_out((char *)bp->b_data + blkoffset, xfersize); + } +#endif /* REV_ENDIAN_FS */ + if (S_ISREG(mode) && (xfersize + blkoffset == fs->fs_bsize || + uio->uio_offset == ip->i_size)) + bp->b_flags |= B_AGE; + brelse(bp); + } + } + if (bp != NULL) + brelse(bp); + ip->i_flag |= IN_ACCESS; + return (error); +} + +/* + * Vnode op for writing. + */ +ffs_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp; + register struct uio *uio; + register struct inode *ip; + register FS *fs; + struct buf *bp; + struct proc *p; + ufs_daddr_t lbn; + off_t osize; + int blkoffset, flags, ioflag, resid, rsd, size, xfersize; + int devBlockSize=0; + int save_error=0, save_size=0; + int blkalloc = 0; + int error = 0; + +#if REV_ENDIAN_FS + int rev_endian=0; +#endif /* REV_ENDIAN_FS */ + + ioflag = ap->a_ioflag; + uio = ap->a_uio; + vp = ap->a_vp; + ip = VTOI(vp); +#if REV_ENDIAN_FS + rev_endian=(vp->v_mount->mnt_flag & MNT_REVEND); +#endif /* REV_ENDIAN_FS */ + +#if DIAGNOSTIC + if (uio->uio_rw != UIO_WRITE) + panic("ffs_write: uio_rw = %x\n", uio->uio_rw); +#endif + + switch (vp->v_type) { + case VREG: + if (ioflag & IO_APPEND) + uio->uio_offset = ip->i_size; + if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) + return (EPERM); + /* FALLTHROUGH */ + case VLNK: + break; + case VDIR: + if ((ioflag & IO_SYNC) == 0) + panic("ffs_write: nonsync dir write"); + break; + default: + panic("ffs_write: invalid v_type=%x", vp->v_type); + } + + fs = ip->I_FS; + if (uio->uio_offset < 0 || + (u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) + return (EFBIG); + if (uio->uio_resid == 0) + return (0); + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + /* + * Maybe this should be above the vnode op call, but so long as + * file servers have no limits, I don't think it matters. + */ + p = uio->uio_procp; + if (vp->v_type == VREG && p && + uio->uio_offset + uio->uio_resid > + p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { + psignal(p, SIGXFSZ); + return (EFBIG); + } + + resid = uio->uio_resid; + osize = ip->i_size; + flags = ioflag & IO_SYNC ? B_SYNC : 0; + + if (UBCISVALID(vp)) { + off_t filesize; + off_t endofwrite; + off_t local_offset; + off_t head_offset; + int local_flags; + int first_block; + int fboff; + int fblk; + int loopcount; + int file_extended = 0; + + endofwrite = uio->uio_offset + uio->uio_resid; + + if (endofwrite > ip->i_size) { + filesize = endofwrite; + file_extended = 1; + } else + filesize = ip->i_size; + + head_offset = ip->i_size; + + /* Go ahead and allocate the block that are going to be written */ + rsd = uio->uio_resid; + local_offset = uio->uio_offset; + local_flags = ioflag & IO_SYNC ? B_SYNC : 0; + local_flags |= B_NOBUFF; + + first_block = 1; + fboff = 0; + fblk = 0; + loopcount = 0; + + for (error = 0; rsd > 0;) { + blkalloc = 0; + lbn = lblkno(fs, local_offset); + blkoffset = blkoff(fs, local_offset); + xfersize = fs->fs_bsize - blkoffset; + if (first_block) + fboff = blkoffset; + if (rsd < xfersize) + xfersize = rsd; + if (fs->fs_bsize > xfersize) + local_flags |= B_CLRBUF; + else + local_flags &= ~B_CLRBUF; + + /* Allocate block without reading into a buf */ + error = ffs_balloc(ip, + lbn, blkoffset + xfersize, ap->a_cred, + &bp, local_flags, &blkalloc); + if (error) + break; + if (first_block) { + fblk = blkalloc; + first_block = 0; + } + loopcount++; + + rsd -= xfersize; + local_offset += (off_t)xfersize; + if (local_offset > ip->i_size) + ip->i_size = local_offset; + } + + if(error) { + save_error = error; + save_size = rsd; + uio->uio_resid -= rsd; + if (file_extended) + filesize -= rsd; + } + + flags = ioflag & IO_SYNC ? IO_SYNC : 0; + /* flags |= IO_NOZEROVALID; */ + + if((error == 0) && fblk && fboff) { + if( fblk > fs->fs_bsize) + panic("ffs_balloc : allocated more than bsize(head)"); + /* We need to zero out the head */ + head_offset = uio->uio_offset - (off_t)fboff ; + flags |= IO_HEADZEROFILL; + /* flags &= ~IO_NOZEROVALID; */ + } + + if((error == 0) && blkalloc && ((blkalloc - xfersize) > 0)) { + /* We need to zero out the tail */ + if( blkalloc > fs->fs_bsize) + panic("ffs_balloc : allocated more than bsize(tail)"); + local_offset += (blkalloc - xfersize); + if (loopcount == 1) { + /* blkalloc is same as fblk; so no need to check again*/ + local_offset -= fboff; + } + flags |= IO_TAILZEROFILL; + /* Freshly allocated block; bzero even if + * find a page + */ + /* flags &= ~IO_NOZEROVALID; */ + } + /* + * if the write starts beyond the current EOF then + * we we'll zero fill from the current EOF to where the write begins + */ + + error = cluster_write(vp, uio, osize, filesize, head_offset, local_offset, devBlockSize, flags); + + if (uio->uio_offset > osize) { + if (error && ((ioflag & IO_UNIT)==0)) + (void)VOP_TRUNCATE(vp, uio->uio_offset, + ioflag & IO_SYNC, ap->a_cred, uio->uio_procp); + ip->i_size = uio->uio_offset; + ubc_setsize(vp, (off_t)ip->i_size); + } + if(save_error) { + uio->uio_resid += save_size; + if(!error) + error = save_error; + } + ip->i_flag |= IN_CHANGE | IN_UPDATE; + } else { + flags = ioflag & IO_SYNC ? B_SYNC : 0; + + for (error = 0; uio->uio_resid > 0;) { + lbn = lblkno(fs, uio->uio_offset); + blkoffset = blkoff(fs, uio->uio_offset); + xfersize = fs->fs_bsize - blkoffset; + if (uio->uio_resid < xfersize) + xfersize = uio->uio_resid; + + if (fs->fs_bsize > xfersize) + flags |= B_CLRBUF; + else + flags &= ~B_CLRBUF; + + error = ffs_balloc(ip, + lbn, blkoffset + xfersize, ap->a_cred, &bp, flags, 0); + if (error) + break; + if (uio->uio_offset + xfersize > ip->i_size) { + ip->i_size = uio->uio_offset + xfersize; + + if (UBCISVALID(vp)) + ubc_setsize(vp, (u_long)ip->i_size); /* XXX check errors */ + } + + size = BLKSIZE(fs, ip, lbn) - bp->b_resid; + if (size < xfersize) + xfersize = size; + + error = + uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); +#if REV_ENDIAN_FS + if (rev_endian && S_ISDIR(ip->i_mode)) { + byte_swap_dir_out((char *)bp->b_data + blkoffset, xfersize); + } +#endif /* REV_ENDIAN_FS */ + if (ioflag & IO_SYNC) + (void)bwrite(bp); + else if (xfersize + blkoffset == fs->fs_bsize) { + bp->b_flags |= B_AGE; + bawrite(bp); + } + else + bdwrite(bp); + if (error || xfersize == 0) + break; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + } + } + /* + * If we successfully wrote any data, and we are not the superuser + * we clear the setuid and setgid bits as a precaution against + * tampering. + */ + if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) + ip->i_mode &= ~(ISUID | ISGID); + if (error) { + if (ioflag & IO_UNIT) { + (void)VOP_TRUNCATE(vp, osize, + ioflag & IO_SYNC, ap->a_cred, uio->uio_procp); + uio->uio_offset -= resid - uio->uio_resid; + uio->uio_resid = resid; + } + } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) + error = VOP_UPDATE(vp, &time, &time, 1); + return (error); +} + +/* + * Vnode op for page read. + */ +/* ARGSUSED */ +PGRD(ap) + struct vop_pgrd_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ *ap; +{ + +#warning ufs_readwrite PGRD need to implement +return (EOPNOTSUPP); + +} + +/* + * Vnode op for page read. + */ +/* ARGSUSED */ +PGWR(ap) + struct vop_pgwr_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + memory_object_t a_pager; + vm_offset_t a_offset; + } */ *ap; +{ + +#warning ufs_readwrite PGWR need to implement +return (EOPNOTSUPP); + +} + +/* + * Vnode op for pagein. + * Similar to ffs_read() + */ +/* ARGSUSED */ +ffs_pagein(ap) + struct vop_pagein_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + upl_t pl = ap->a_pl; + size_t size= ap->a_size; + off_t f_offset = ap->a_f_offset; + vm_offset_t pl_offset = ap->a_pl_offset; + int flags = ap->a_flags; + register struct inode *ip; + int devBlockSize=0; + int error; + + ip = VTOI(vp); + + /* check pageins for reg file only and ubc info is present*/ + if (UBCINVALID(vp)) + panic("ffs_pagein: Not a VREG: vp=%x", vp); + if (UBCINFOMISSING(vp)) + panic("ffs_pagein: No mapping: vp=%x", vp); + +#if DIAGNOSTIC + if (vp->v_type == VLNK) { + if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) + panic("%s: short symlink", "ffs_pagein"); + } else if (vp->v_type != VREG && vp->v_type != VDIR) + panic("%s: type %d", "ffs_pagein", vp->v_type); +#endif + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + error = cluster_pagein(vp, pl, pl_offset, f_offset, size, + (off_t)ip->i_size, devBlockSize, flags); + /* ip->i_flag |= IN_ACCESS; */ + return (error); +} + +/* + * Vnode op for pageout. + * Similar to ffs_write() + * make sure the buf is not in hash queue when you return + */ +ffs_pageout(ap) + struct vop_pageout_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_f_offset, + size_t a_size, + struct ucred *a_cred, + int a_flags + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + upl_t pl = ap->a_pl; + size_t size= ap->a_size; + off_t f_offset = ap->a_f_offset; + vm_offset_t pl_offset = ap->a_pl_offset; + int flags = ap->a_flags; + register struct inode *ip; + register FS *fs; + int error ; + int devBlockSize=0; + size_t xfer_size = 0; + int local_flags=0; + off_t local_offset; + int resid, blkoffset; + size_t xsize, lsize; + daddr_t lbn; + int save_error =0, save_size=0; + vm_offset_t lupl_offset; + int nocommit = flags & UPL_NOCOMMIT; + struct buf *bp; + + ip = VTOI(vp); + + /* check pageouts for reg file only and ubc info is present*/ + if (UBCINVALID(vp)) + panic("ffs_pageout: Not a VREG: vp=%x", vp); + if (UBCINFOMISSING(vp)) + panic("ffs_pageout: No mapping: vp=%x", vp); + + if (vp->v_mount->mnt_flag & MNT_RDONLY) { + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_FREE_ON_EMPTY); + return (EROFS); + } + fs = ip->I_FS; + + if (f_offset < 0 || f_offset >= ip->i_size) { + if (!nocommit) + kernel_upl_abort_range(pl, pl_offset, size, + UPL_ABORT_FREE_ON_EMPTY); + return (EINVAL); + } + + /* + * once we enable multi-page pageouts we will + * need to make sure we abort any pages in the upl + * that we don't issue an I/O for + */ + if (f_offset + size > ip->i_size) + xfer_size = ip->i_size - f_offset; + else + xfer_size = size; + + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + if (xfer_size & (PAGE_SIZE - 1)) { + /* if not a multiple of page size + * then round up to be a multiple + * the physical disk block size + */ + xfer_size = (xfer_size + (devBlockSize - 1)) & ~(devBlockSize - 1); + } + + /* + * once the block allocation is moved to ufs_cmap + * we can remove all the size and offset checks above + * cluster_pageout does all of this now + * we need to continue to do it here so as not to + * allocate blocks that aren't going to be used because + * of a bogus parameter being passed in + */ + local_flags = 0; + resid = xfer_size; + local_offset = f_offset; + for (error = 0; resid > 0;) { + lbn = lblkno(fs, local_offset); + blkoffset = blkoff(fs, local_offset); + xsize = fs->fs_bsize - blkoffset; + if (resid < xsize) + xsize = resid; + /* Allocate block without reading into a buf */ + error = ffs_blkalloc(ip, + lbn, blkoffset + xsize, ap->a_cred, + local_flags); + if (error) + break; + resid -= xsize; + local_offset += (off_t)xsize; + } + + if (error) { + save_size = resid; + save_error = error; + xfer_size -= save_size; + } + + + error = cluster_pageout(vp, pl, pl_offset, f_offset, round_page(xfer_size), ip->i_size, devBlockSize, flags); + + if(save_error) { + lupl_offset = size - save_size; + resid = round_page(save_size); + if (!nocommit) + kernel_upl_abort_range(pl, lupl_offset, + resid, UPL_ABORT_FREE_ON_EMPTY); + if(!error) + error= save_error; + } + return (error); +} diff --git a/bsd/ufs/ufs/ufs_vfsops.c b/bsd/ufs/ufs/ufs_vfsops.c new file mode 100644 index 000000000..82ca680e8 --- /dev/null +++ b/bsd/ufs/ufs/ufs_vfsops.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1991, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_vfsops.c 8.8 (Berkeley) 5/20/95 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +/* + * Make a filesystem operational. + * Nothing to do at the moment. + */ +/* ARGSUSED */ +int +ufs_start(mp, flags, p) + struct mount *mp; + int flags; + struct proc *p; +{ + + return (0); +} + +/* + * Return the root of a filesystem. + */ +int +ufs_root(mp, vpp) + struct mount *mp; + struct vnode **vpp; +{ + struct vnode *nvp; + int error; + + if (error = VFS_VGET(mp, (ino_t)ROOTINO, &nvp)) + return (error); + *vpp = nvp; + return (0); +} + +/* + * Do operations associated with quotas + */ +int +ufs_quotactl(mp, cmds, uid, arg, p) + struct mount *mp; + int cmds; + uid_t uid; + caddr_t arg; + struct proc *p; +{ + int cmd, type, error; + +#if !QUOTA + return (EOPNOTSUPP); +#else + if (uid == -1) + uid = p->p_cred->p_ruid; + cmd = cmds >> SUBCMDSHIFT; + + switch (cmd) { + case Q_SYNC: + break; + case Q_GETQUOTA: + if (uid == p->p_cred->p_ruid) + break; + /* fall through */ + default: + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + } + + type = cmds & SUBCMDMASK; + if ((u_int)type >= MAXQUOTAS) + return (EINVAL); + if (vfs_busy(mp, LK_NOWAIT, 0, p)) + return (0); + + switch (cmd) { + + case Q_QUOTAON: + error = quotaon(p, mp, type, arg); + break; + + case Q_QUOTAOFF: + error = quotaoff(p, mp, type); + break; + + case Q_SETQUOTA: + error = setquota(mp, uid, type, arg); + break; + + case Q_SETUSE: + error = setuse(mp, uid, type, arg); + break; + + case Q_GETQUOTA: + error = getquota(mp, uid, type, arg); + break; + + case Q_SYNC: + error = qsync(mp); + break; + + default: + error = EINVAL; + break; + } + vfs_unbusy(mp, p); + return (error); +#endif +} + +/* + * Initial UFS filesystems, done only once. + */ +int +ufs_init(vfsp) + struct vfsconf *vfsp; +{ + static int done; + + if (done) + return (0); + done = 1; + ufs_ihashinit(); +#if QUOTA + dqinit(); +#endif + return (0); +} + +/* + * This is the generic part of fhtovp called after the underlying + * filesystem has validated the file handle. + * + * Verify that a host should have access to a filesystem, and if so + * return a vnode for the presented file handle. + */ +int +ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp) + register struct mount *mp; + struct ufid *ufhp; + struct mbuf *nam; + struct vnode **vpp; + int *exflagsp; + struct ucred **credanonp; +{ + register struct inode *ip; + register struct netcred *np; + register struct ufsmount *ump = VFSTOUFS(mp); + struct vnode *nvp; + int error; + + /* + * Get the export permission structure for this tuple. + */ + np = vfs_export_lookup(mp, &ump->um_export, nam); + if (np == NULL) + return (EACCES); + + if (error = VFS_VGET(mp, ufhp->ufid_ino, &nvp)) { + *vpp = NULLVP; + return (error); + } + ip = VTOI(nvp); + if (ip->i_mode == 0 || ip->i_gen != ufhp->ufid_gen) { + vput(nvp); + *vpp = NULLVP; + return (ESTALE); + } + *vpp = nvp; + *exflagsp = np->netc_exflags; + *credanonp = &np->netc_anon; + return (0); +} diff --git a/bsd/ufs/ufs/ufs_vnops.c b/bsd/ufs/ufs/ufs_vnops.c new file mode 100644 index 000000000..b6fae6bc6 --- /dev/null +++ b/bsd/ufs/ufs/ufs_vnops.c @@ -0,0 +1,2241 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufs_vnops.c 8.27 (Berkeley) 5/27/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#if REV_ENDIAN_FS +#include +#include +#endif /* REV_ENDIAN_FS */ + +static int ufs_chmod __P((struct vnode *, int, struct ucred *, struct proc *)); +static int ufs_chown + __P((struct vnode *, uid_t, gid_t, struct ucred *, struct proc *)); + +union _qcvt { + int64_t qcvt; + int32_t val[2]; +}; +#define SETHIGH(q, h) { \ + union _qcvt tmp; \ + tmp.qcvt = (q); \ + tmp.val[_QUAD_HIGHWORD] = (h); \ + (q) = tmp.qcvt; \ +} +#define SETLOW(q, l) { \ + union _qcvt tmp; \ + tmp.qcvt = (q); \ + tmp.val[_QUAD_LOWWORD] = (l); \ + (q) = tmp.qcvt; \ +} + +/* + * Create a regular file + */ +int +ufs_create(ap) + struct vop_create_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + int error; + + if (error = + ufs_makeinode(MAKEIMODE(ap->a_vap->va_type, ap->a_vap->va_mode), + ap->a_dvp, ap->a_vpp, ap->a_cnp)) + return (error); + return (0); +} + +/* + * Mknod vnode call + */ +/* ARGSUSED */ +int +ufs_mknod(ap) + struct vop_mknod_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + struct vattr *vap = ap->a_vap; + struct vnode **vpp = ap->a_vpp; + struct inode *ip; + int error; + + if (error = + ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), + ap->a_dvp, vpp, ap->a_cnp)) + return (error); + ip = VTOI(*vpp); + ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; + if (vap->va_rdev != VNOVAL) { + /* + * Want to be able to use this to make badblock + * inodes, so don't truncate the dev number. + */ + ip->i_rdev = vap->va_rdev; + } + /* + * Remove inode so that it will be reloaded by VFS_VGET and + * checked to see if it is an alias of an existing entry in + * the inode cache. + */ + vput(*vpp); + (*vpp)->v_type = VNON; + vgone(*vpp); + *vpp = 0; + return (0); +} + +/* + * Open called. + * + * Nothing to do. + */ +/* ARGSUSED */ +int +ufs_open(ap) + struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + /* + * Files marked append-only must be opened for appending. + */ + if ((VTOI(ap->a_vp)->i_flags & APPEND) && + (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE) + return (EPERM); + return (0); +} + +/* + * Close called. + * + * Update the times on the inode. + */ +/* ARGSUSED */ +int +ufs_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct inode *ip = VTOI(vp); + + simple_lock(&vp->v_interlock); + if (vp->v_usecount > (UBCINFOEXISTS(vp) ? 2 : 1)) + ITIMES(ip, &time, &time); + simple_unlock(&vp->v_interlock); + + if (!VOP_ISLOCKED(vp)) { + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); + + cluster_push(vp); + + VOP_UNLOCK(vp, 0, ap->a_p); + } + return (0); +} + +int +ufs_access(ap) + struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct inode *ip = VTOI(vp); + struct ucred *cred = ap->a_cred; + mode_t mask, mode = ap->a_mode; + register gid_t *gp; + int i, error; + + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + if (mode & VWRITE) { + switch (vp->v_type) { + case VDIR: + case VLNK: + case VREG: + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); +#if QUOTA + if (error = getinoquota(ip)) + return (error); +#endif + break; + } + } + + /* If immutable bit set, nobody gets to write it. */ + if ((mode & VWRITE) && (ip->i_flags & IMMUTABLE)) + return (EPERM); + + /* Otherwise, user id 0 always gets access. */ + if (cred->cr_uid == 0) + return (0); + + mask = 0; + + /* Otherwise, check the owner. */ + if (cred->cr_uid == ip->i_uid) { + if (mode & VEXEC) + mask |= S_IXUSR; + if (mode & VREAD) + mask |= S_IRUSR; + if (mode & VWRITE) + mask |= S_IWUSR; + return ((ip->i_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check the groups. */ + for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) + if (ip->i_gid == *gp) { + if (mode & VEXEC) + mask |= S_IXGRP; + if (mode & VREAD) + mask |= S_IRGRP; + if (mode & VWRITE) + mask |= S_IWGRP; + return ((ip->i_mode & mask) == mask ? 0 : EACCES); + } + + /* Otherwise, check everyone else. */ + if (mode & VEXEC) + mask |= S_IXOTH; + if (mode & VREAD) + mask |= S_IROTH; + if (mode & VWRITE) + mask |= S_IWOTH; + return ((ip->i_mode & mask) == mask ? 0 : EACCES); +} + +/* ARGSUSED */ +int +ufs_getattr(ap) + struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct inode *ip = VTOI(vp); + register struct vattr *vap = ap->a_vap; + int devBlockSize=0; + + ITIMES(ip, &time, &time); + /* + * Copy from inode table + */ + vap->va_fsid = ip->i_dev; + vap->va_fileid = ip->i_number; + vap->va_mode = ip->i_mode & ~IFMT; + vap->va_nlink = ip->i_nlink; + vap->va_uid = ip->i_uid; + vap->va_gid = ip->i_gid; + vap->va_rdev = (dev_t)ip->i_rdev; + vap->va_size = ip->i_din.di_size; + vap->va_atime.tv_sec = ip->i_atime; + vap->va_atime.tv_nsec = ip->i_atimensec; + vap->va_mtime.tv_sec = ip->i_mtime; + vap->va_mtime.tv_nsec = ip->i_mtimensec; + vap->va_ctime.tv_sec = ip->i_ctime; + vap->va_ctime.tv_nsec = ip->i_ctimensec; + vap->va_flags = ip->i_flags; + vap->va_gen = ip->i_gen; + /* this doesn't belong here */ + if (vp->v_type == VBLK) + vap->va_blocksize = BLKDEV_IOSIZE; + else if (vp->v_type == VCHR) + vap->va_blocksize = MAXPHYSIO; + else + vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + vap->va_bytes = dbtob((u_quad_t)ip->i_blocks, devBlockSize); + vap->va_type = vp->v_type; + vap->va_filerev = ip->i_modrev; + return (0); +} + +/* + * Set attribute vnode op. called from several syscalls + */ +int +ufs_setattr(ap) + struct vop_setattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vattr *vap = ap->a_vap; + struct vnode *vp = ap->a_vp; + struct inode *ip = VTOI(vp); + struct ucred *cred = ap->a_cred; + struct proc *p = ap->a_p; + struct timeval atimeval, mtimeval; + int error; + + /* + * Check for unsettable attributes. + */ + if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || + (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || + (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || + ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { + return (EINVAL); + } + if (vap->va_flags != VNOVAL) { + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + if (cred->cr_uid != ip->i_uid && + (error = suser(cred, &p->p_acflag))) + return (error); + if (cred->cr_uid == 0) { + if ((ip->i_flags & (SF_IMMUTABLE | SF_APPEND)) && + securelevel > 0) + return (EPERM); + ip->i_flags = vap->va_flags; + } else { + if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND) || + (vap->va_flags & UF_SETTABLE) != vap->va_flags) + return (EPERM); + ip->i_flags &= SF_SETTABLE; + ip->i_flags |= (vap->va_flags & UF_SETTABLE); + } + ip->i_flag |= IN_CHANGE; + if (vap->va_flags & (IMMUTABLE | APPEND)) + return (0); + } + if (ip->i_flags & (IMMUTABLE | APPEND)) + return (EPERM); + /* + * Go through the fields and update iff not VNOVAL. + */ + if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + if (error = ufs_chown(vp, vap->va_uid, vap->va_gid, cred, p)) + return (error); + } + if (vap->va_size != VNOVAL) { + /* + * Disallow write attempts on read-only file systems; + * unless the file is a socket, fifo, or a block or + * character device resident on the file system. + */ + switch (vp->v_type) { + case VDIR: + return (EISDIR); + case VLNK: + case VREG: + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + break; + } + if (error = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p)) + return (error); + } + ip = VTOI(vp); + if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) { + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + if (cred->cr_uid != ip->i_uid && + (error = suser(cred, &p->p_acflag)) && + ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || + (error = VOP_ACCESS(vp, VWRITE, cred, p)))) + return (error); + if (vap->va_atime.tv_sec != VNOVAL) + ip->i_flag |= IN_ACCESS; + if (vap->va_mtime.tv_sec != VNOVAL) + ip->i_flag |= IN_CHANGE | IN_UPDATE; + atimeval.tv_sec = vap->va_atime.tv_sec; + atimeval.tv_usec = vap->va_atime.tv_nsec / 1000; + mtimeval.tv_sec = vap->va_mtime.tv_sec; + mtimeval.tv_usec = vap->va_mtime.tv_nsec / 1000; + if (error = VOP_UPDATE(vp, &atimeval, &mtimeval, 1)) + return (error); + } + error = 0; + if (vap->va_mode != (mode_t)VNOVAL) { + if (vp->v_mount->mnt_flag & MNT_RDONLY) + return (EROFS); + error = ufs_chmod(vp, (int)vap->va_mode, cred, p); + } + return (error); +} + +/* + * Change the mode on a file. + * Inode must be locked before calling. + */ +static int +ufs_chmod(vp, mode, cred, p) + register struct vnode *vp; + register int mode; + register struct ucred *cred; + struct proc *p; +{ + register struct inode *ip = VTOI(vp); + int error; + + if (cred->cr_uid != ip->i_uid && + (error = suser(cred, &p->p_acflag))) + return (error); + if (cred->cr_uid) { + if (vp->v_type != VDIR && (mode & S_ISTXT)) + return (EFTYPE); + if (!groupmember(ip->i_gid, cred) && (mode & ISGID)) + return (EPERM); + } + ip->i_mode &= ~ALLPERMS; + ip->i_mode |= (mode & ALLPERMS); + ip->i_flag |= IN_CHANGE; + return (0); +} + +/* + * Perform chown operation on inode ip; + * inode must be locked prior to call. + */ +static int +ufs_chown(vp, uid, gid, cred, p) + register struct vnode *vp; + uid_t uid; + gid_t gid; + struct ucred *cred; + struct proc *p; +{ + register struct inode *ip = VTOI(vp); + uid_t ouid; + gid_t ogid; + int error = 0; +#if QUOTA + register int i; + long change; +#endif + + if (uid == (uid_t)VNOVAL) + uid = ip->i_uid; + if (gid == (gid_t)VNOVAL) + gid = ip->i_gid; + /* + * If we don't own the file, are trying to change the owner + * of the file, or are not a member of the target group, + * the caller must be superuser or the call fails. + */ + if ((cred->cr_uid != ip->i_uid || uid != ip->i_uid || + (gid != ip->i_gid && !groupmember((gid_t)gid, cred))) && + (error = suser(cred, &p->p_acflag))) + return (error); + ogid = ip->i_gid; + ouid = ip->i_uid; +#if QUOTA + if (error = getinoquota(ip)) + return (error); + if (ouid == uid) { + dqrele(vp, ip->i_dquot[USRQUOTA]); + ip->i_dquot[USRQUOTA] = NODQUOT; + } + if (ogid == gid) { + dqrele(vp, ip->i_dquot[GRPQUOTA]); + ip->i_dquot[GRPQUOTA] = NODQUOT; + } + change = ip->i_blocks; + (void) chkdq(ip, -change, cred, CHOWN); + (void) chkiq(ip, -1, cred, CHOWN); + for (i = 0; i < MAXQUOTAS; i++) { + dqrele(vp, ip->i_dquot[i]); + ip->i_dquot[i] = NODQUOT; + } +#endif + ip->i_gid = gid; + ip->i_uid = uid; +#if QUOTA + if ((error = getinoquota(ip)) == 0) { + if (ouid == uid) { + dqrele(vp, ip->i_dquot[USRQUOTA]); + ip->i_dquot[USRQUOTA] = NODQUOT; + } + if (ogid == gid) { + dqrele(vp, ip->i_dquot[GRPQUOTA]); + ip->i_dquot[GRPQUOTA] = NODQUOT; + } + if ((error = chkdq(ip, change, cred, CHOWN)) == 0) { + if ((error = chkiq(ip, 1, cred, CHOWN)) == 0) + goto good; + else + (void) chkdq(ip, -change, cred, CHOWN|FORCE); + } + for (i = 0; i < MAXQUOTAS; i++) { + dqrele(vp, ip->i_dquot[i]); + ip->i_dquot[i] = NODQUOT; + } + } + ip->i_gid = ogid; + ip->i_uid = ouid; + if (getinoquota(ip) == 0) { + if (ouid == uid) { + dqrele(vp, ip->i_dquot[USRQUOTA]); + ip->i_dquot[USRQUOTA] = NODQUOT; + } + if (ogid == gid) { + dqrele(vp, ip->i_dquot[GRPQUOTA]); + ip->i_dquot[GRPQUOTA] = NODQUOT; + } + (void) chkdq(ip, change, cred, FORCE|CHOWN); + (void) chkiq(ip, 1, cred, FORCE|CHOWN); + (void) getinoquota(ip); + } + return (error); +good: + if (getinoquota(ip)) + panic("chown: lost quota"); +#endif /* QUOTA */ + if (ouid != uid || ogid != gid) + ip->i_flag |= IN_CHANGE; + if (ouid != uid && cred->cr_uid != 0) + ip->i_mode &= ~ISUID; + if (ogid != gid && cred->cr_uid != 0) + ip->i_mode &= ~ISGID; + return (0); +} + +/* ARGSUSED */ +int +ufs_ioctl(ap) + struct vop_ioctl_args /* { + struct vnode *a_vp; + int a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + switch (ap->a_command) { + + case 1: + { register struct inode *ip; + register struct vnode *vp; + register struct fs *fs; + register struct radvisory *ra; + int devBlockSize = 0; + int error; + + vp = ap->a_vp; + + VOP_LEASE(vp, ap->a_p, ap->a_cred, LEASE_READ); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, ap->a_p); + + ra = (struct radvisory *)(ap->a_data); + ip = VTOI(vp); + fs = ip->i_fs; + + if ((u_int64_t)ra->ra_offset >= ip->i_size) { + VOP_UNLOCK(vp, 0, ap->a_p); + return (EFBIG); + } + VOP_DEVBLOCKSIZE(ip->i_devvp, &devBlockSize); + + error = advisory_read(vp, ip->i_size, ra->ra_offset, ra->ra_count, devBlockSize); + VOP_UNLOCK(vp, 0, ap->a_p); + return (error); + } + default: + return (ENOTTY); + } +} + +/* ARGSUSED */ +int +ufs_select(ap) + struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + /* + * We should really check to see if I/O is possible. + */ + return (1); +} + +/* + * Mmap a file + * + * NB Currently unsupported. + */ +/* ARGSUSED */ +int +ufs_mmap(ap) + struct vop_mmap_args /* { + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + + return (EINVAL); +} + +/* + * Seek on a file + * + * Nothing to do, so just return. + */ +/* ARGSUSED */ +int +ufs_seek(ap) + struct vop_seek_args /* { + struct vnode *a_vp; + off_t a_oldoff; + off_t a_newoff; + struct ucred *a_cred; + } */ *ap; +{ + + return (0); +} + +int +ufs_remove(ap) + struct vop_remove_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; + } */ *ap; +{ + struct inode *ip; + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + int error; + + ip = VTOI(vp); + if ((ip->i_flags & (IMMUTABLE | APPEND)) || + (VTOI(dvp)->i_flags & APPEND)) { + error = EPERM; + goto out; + } + if ((vp->v_usecount > (UBCINFOEXISTS(vp) ? 2 : 1)) && + (ap->a_cnp->cn_flags & NODELETEBUSY)) { + /* Carbon and Classic clients can't delete busy files */ + error = EBUSY; + goto out; + } + if ((error = ufs_dirremove(dvp, ap->a_cnp)) == 0) { + ip->i_nlink--; + ip->i_flag |= IN_CHANGE; + } + + if (dvp == vp) + vrele(vp); + else + vput(vp); + vput(dvp); + + if (UBCINFOEXISTS(vp)) { + (void) ubc_uncache(vp); + ubc_release(vp); + /* WARNING vp may not be valid after this */ + } + + return (error); + +out: + if (dvp == vp) + vrele(vp); + else + vput(vp); + vput(dvp); + return (error); +} + +/* + * link vnode call + */ +int +ufs_link(ap) + struct vop_link_args /* { + struct vnode *a_vp; + struct vnode *a_tdvp; + struct componentname *a_cnp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vnode *tdvp = ap->a_tdvp; + struct componentname *cnp = ap->a_cnp; + struct proc *p = cnp->cn_proc; + struct inode *ip; + struct timeval tv; + int error; + +#if DIAGNOSTIC + if ((cnp->cn_flags & HASBUF) == 0) + panic("ufs_link: no name"); +#endif + if (tdvp->v_mount != vp->v_mount) { + VOP_ABORTOP(tdvp, cnp); + error = EXDEV; + goto out2; + } + if (tdvp != vp && (error = vn_lock(vp, LK_EXCLUSIVE, p))) { + VOP_ABORTOP(tdvp, cnp); + goto out2; + } + ip = VTOI(vp); + if ((nlink_t)ip->i_nlink >= LINK_MAX) { + VOP_ABORTOP(tdvp, cnp); + error = EMLINK; + goto out1; + } + if (ip->i_flags & (IMMUTABLE | APPEND)) { + VOP_ABORTOP(tdvp, cnp); + error = EPERM; + goto out1; + } + ip->i_nlink++; + ip->i_flag |= IN_CHANGE; + tv = time; + error = VOP_UPDATE(vp, &tv, &tv, 1); + if (!error) + error = ufs_direnter(ip, tdvp, cnp); + if (error) { + ip->i_nlink--; + ip->i_flag |= IN_CHANGE; + } + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); +out1: + if (tdvp != vp) + VOP_UNLOCK(vp, 0, p); +out2: + vput(tdvp); + return (error); +} + +/* + * whiteout vnode call + */ +int +ufs_whiteout(ap) + struct vop_whiteout_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + int a_flags; + } */ *ap; +{ + struct vnode *dvp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + struct direct newdir; + int error; + + switch (ap->a_flags) { + case LOOKUP: + /* 4.4 format directories support whiteout operations */ + if (dvp->v_mount->mnt_maxsymlinklen > 0) + return (0); + return (EOPNOTSUPP); + + case CREATE: + /* create a new directory whiteout */ +#if DIAGNOSTIC + if ((cnp->cn_flags & SAVENAME) == 0) + panic("ufs_whiteout: missing name"); + if (dvp->v_mount->mnt_maxsymlinklen <= 0) + panic("ufs_whiteout: old format filesystem"); +#endif + + newdir.d_ino = WINO; + newdir.d_namlen = cnp->cn_namelen; + bcopy(cnp->cn_nameptr, newdir.d_name, (unsigned)cnp->cn_namelen + 1); + newdir.d_type = DT_WHT; + error = ufs_direnter2(dvp, &newdir, cnp->cn_cred, cnp->cn_proc); + break; + + case DELETE: + /* remove an existing directory whiteout */ +#if DIAGNOSTIC + if (dvp->v_mount->mnt_maxsymlinklen <= 0) + panic("ufs_whiteout: old format filesystem"); +#endif + + cnp->cn_flags &= ~DOWHITEOUT; + error = ufs_dirremove(dvp, cnp); + break; + } + if (cnp->cn_flags & HASBUF) { + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + cnp->cn_flags &= ~HASBUF; + } + return (error); +} + + +/* + * Rename system call. + * rename("foo", "bar"); + * is essentially + * unlink("bar"); + * link("foo", "bar"); + * unlink("foo"); + * but ``atomically''. Can't do full commit without saving state in the + * inode on disk which isn't feasible at this time. Best we can do is + * always guarantee the target exists. + * + * Basic algorithm is: + * + * 1) Bump link count on source while we're linking it to the + * target. This also ensure the inode won't be deleted out + * from underneath us while we work (it may be truncated by + * a concurrent `trunc' or `open' for creation). + * 2) Link source to destination. If destination already exists, + * delete it first. + * 3) Unlink source reference to inode if still around. If a + * directory was moved and the parent of the destination + * is different from the source, patch the ".." entry in the + * directory. + */ +int +ufs_rename(ap) + struct vop_rename_args /* { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; + } */ *ap; +{ + struct vnode *tvp = ap->a_tvp; + register struct vnode *tdvp = ap->a_tdvp; + struct vnode *fvp = ap->a_fvp; + struct vnode *fdvp = ap->a_fdvp; + struct componentname *tcnp = ap->a_tcnp; + struct componentname *fcnp = ap->a_fcnp; + struct proc *p = fcnp->cn_proc; + struct inode *ip, *xp, *dp; + struct dirtemplate dirbuf; + struct timeval tv; + int doingdirectory = 0, oldparent = 0, newparent = 0; + int error = 0; + u_char namlen; + +#if DIAGNOSTIC + if ((tcnp->cn_flags & HASBUF) == 0 || + (fcnp->cn_flags & HASBUF) == 0) + panic("ufs_rename: no name"); +#endif + /* + * Check for cross-device rename. + */ + if ((fvp->v_mount != tdvp->v_mount) || + (tvp && (fvp->v_mount != tvp->v_mount))) { + error = EXDEV; +abortit: + VOP_ABORTOP(tdvp, tcnp); /* XXX, why not in NFS? */ + if (tdvp == tvp) + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + VOP_ABORTOP(fdvp, fcnp); /* XXX, why not in NFS? */ + vrele(fdvp); + vrele(fvp); + return (error); + } + + /* + * Check if just deleting a link name. + */ + if (tvp && ((VTOI(tvp)->i_flags & (IMMUTABLE | APPEND)) || + (VTOI(tdvp)->i_flags & APPEND))) { + error = EPERM; + goto abortit; + } + if (fvp == tvp) { + if (fvp->v_type == VDIR) { + error = EINVAL; + goto abortit; + } + + /* Release destination completely. */ + VOP_ABORTOP(tdvp, tcnp); + vput(tdvp); + vput(tvp); + + /* Delete source. */ + vrele(fdvp); + vrele(fvp); + fcnp->cn_flags &= ~MODMASK; + fcnp->cn_flags |= LOCKPARENT | LOCKLEAF; + if ((fcnp->cn_flags & SAVESTART) == 0) + panic("ufs_rename: lost from startdir"); + fcnp->cn_nameiop = DELETE; + (void) relookup(fdvp, &fvp, fcnp); + return (VOP_REMOVE(fdvp, fvp, fcnp)); + } + if (error = vn_lock(fvp, LK_EXCLUSIVE, p)) + goto abortit; + dp = VTOI(fdvp); + ip = VTOI(fvp); + if ((ip->i_flags & (IMMUTABLE | APPEND)) || (dp->i_flags & APPEND)) { + VOP_UNLOCK(fvp, 0, p); + error = EPERM; + goto abortit; + } + if ((ip->i_mode & IFMT) == IFDIR) { + /* + * Avoid ".", "..", and aliases of "." for obvious reasons. + */ + if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || + dp == ip || (fcnp->cn_flags&ISDOTDOT) || + (ip->i_flag & IN_RENAME)) { + VOP_UNLOCK(fvp, 0, p); + error = EINVAL; + goto abortit; + } + ip->i_flag |= IN_RENAME; + oldparent = dp->i_number; + doingdirectory++; + } + vrele(fdvp); + + /* + * When the target exists, both the directory + * and target vnodes are returned locked. + */ + dp = VTOI(tdvp); + xp = NULL; + if (tvp) + xp = VTOI(tvp); + + /* + * 1) Bump link count while we're moving stuff + * around. If we crash somewhere before + * completing our work, the link count + * may be wrong, but correctable. + */ + ip->i_nlink++; + ip->i_flag |= IN_CHANGE; + tv = time; + if (error = VOP_UPDATE(fvp, &tv, &tv, 1)) { + VOP_UNLOCK(fvp, 0, p); + goto bad; + } + + /* + * If ".." must be changed (ie the directory gets a new + * parent) then the source directory must not be in the + * directory heirarchy above the target, as this would + * orphan everything below the source directory. Also + * the user must have write permission in the source so + * as to be able to change "..". We must repeat the call + * to namei, as the parent directory is unlocked by the + * call to checkpath(). + */ + error = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_proc); + VOP_UNLOCK(fvp, 0, p); + if (oldparent != dp->i_number) + newparent = dp->i_number; + if (doingdirectory && newparent) { + if (error) /* write access check above */ + goto bad; + if (xp != NULL) + vput(tvp); + if (error = ufs_checkpath(ip, dp, tcnp->cn_cred)) + goto out; + if ((tcnp->cn_flags & SAVESTART) == 0) + panic("ufs_rename: lost to startdir"); + if (error = relookup(tdvp, &tvp, tcnp)) + goto out; + dp = VTOI(tdvp); + xp = NULL; + if (tvp) + xp = VTOI(tvp); + } + /* + * 2) If target doesn't exist, link the target + * to the source and unlink the source. + * Otherwise, rewrite the target directory + * entry to reference the source inode and + * expunge the original entry's existence. + */ + if (xp == NULL) { + if (dp->i_dev != ip->i_dev) + panic("rename: EXDEV"); + /* + * Account for ".." in new directory. + * When source and destination have the same + * parent we don't fool with the link count. + */ + if (doingdirectory && newparent) { + if ((nlink_t)dp->i_nlink >= LINK_MAX) { + error = EMLINK; + goto bad; + } + dp->i_nlink++; + dp->i_flag |= IN_CHANGE; + if (error = VOP_UPDATE(tdvp, &tv, &tv, 1)) + goto bad; + } + if (error = ufs_direnter(ip, tdvp, tcnp)) { + if (doingdirectory && newparent) { + dp->i_nlink--; + dp->i_flag |= IN_CHANGE; + (void)VOP_UPDATE(tdvp, &tv, &tv, 1); + } + goto bad; + } + vput(tdvp); + } else { + if (xp->i_dev != dp->i_dev || xp->i_dev != ip->i_dev) + panic("rename: EXDEV"); + /* + * Short circuit rename(foo, foo). + */ + if (xp->i_number == ip->i_number) + panic("rename: same file"); + /* + * If the parent directory is "sticky", then the user must + * own the parent directory, or the destination of the rename, + * otherwise the destination may not be changed (except by + * root). This implements append-only directories. + */ + if ((dp->i_mode & S_ISTXT) && tcnp->cn_cred->cr_uid != 0 && + tcnp->cn_cred->cr_uid != dp->i_uid && + xp->i_uid != tcnp->cn_cred->cr_uid) { + error = EPERM; + goto bad; + } + /* + * Target must be empty if a directory and have no links + * to it. Also, ensure source and target are compatible + * (both directories, or both not directories). + */ + if ((xp->i_mode&IFMT) == IFDIR) { + if (!ufs_dirempty(xp, dp->i_number, tcnp->cn_cred) || + xp->i_nlink > 2) { + error = ENOTEMPTY; + goto bad; + } + if (!doingdirectory) { + error = ENOTDIR; + goto bad; + } + cache_purge(tdvp); + } else if (doingdirectory) { + error = EISDIR; + goto bad; + } + if (error = ufs_dirrewrite(dp, ip, tcnp)) + goto bad; + /* + * If the target directory is in the same + * directory as the source directory, + * decrement the link count on the parent + * of the target directory. + */ + if (doingdirectory && !newparent) { + dp->i_nlink--; + dp->i_flag |= IN_CHANGE; + } + vput(tdvp); + /* + * Adjust the link count of the target to + * reflect the dirrewrite above. If this is + * a directory it is empty and there are + * no links to it, so we can squash the inode and + * any space associated with it. We disallowed + * renaming over top of a directory with links to + * it above, as the remaining link would point to + * a directory without "." or ".." entries. + */ + xp->i_nlink--; + if (doingdirectory) { + if (--xp->i_nlink != 0) + panic("rename: linked directory"); + error = VOP_TRUNCATE(tvp, (off_t)0, IO_SYNC, + tcnp->cn_cred, tcnp->cn_proc); + } + xp->i_flag |= IN_CHANGE; + vput(tvp); + xp = NULL; + } + + /* + * 3) Unlink the source. + */ + fcnp->cn_flags &= ~MODMASK; + fcnp->cn_flags |= LOCKPARENT | LOCKLEAF; + if ((fcnp->cn_flags & SAVESTART) == 0) + panic("ufs_rename: lost from startdir"); + (void) relookup(fdvp, &fvp, fcnp); + if (fvp != NULL) { + xp = VTOI(fvp); + dp = VTOI(fdvp); + } else { + /* + * From name has disappeared. + */ + if (doingdirectory) + panic("rename: lost dir entry"); + vrele(ap->a_fvp); + return (0); + } + /* + * Ensure that the directory entry still exists and has not + * changed while the new name has been entered. If the source is + * a file then the entry may have been unlinked or renamed. In + * either case there is no further work to be done. If the source + * is a directory then it cannot have been rmdir'ed; its link + * count of three would cause a rmdir to fail with ENOTEMPTY. + * The IRENAME flag ensures that it cannot be moved by another + * rename. + */ + if (xp != ip) { + if (doingdirectory) + panic("rename: lost dir entry"); + } else { + /* + * If the source is a directory with a + * new parent, the link count of the old + * parent directory must be decremented + * and ".." set to point to the new parent. + */ + if (doingdirectory && newparent) { + dp->i_nlink--; + dp->i_flag |= IN_CHANGE; + error = vn_rdwr(UIO_READ, fvp, (caddr_t)&dirbuf, + sizeof (struct dirtemplate), (off_t)0, + UIO_SYSSPACE, IO_NODELOCKED, + tcnp->cn_cred, (int *)0, (struct proc *)0); + if (error == 0) { +# if (BYTE_ORDER == LITTLE_ENDIAN) + if (fvp->v_mount->mnt_maxsymlinklen <= 0) + namlen = dirbuf.dotdot_type; + else + namlen = dirbuf.dotdot_namlen; +# else + namlen = dirbuf.dotdot_namlen; +# endif + if (namlen != 2 || + dirbuf.dotdot_name[0] != '.' || + dirbuf.dotdot_name[1] != '.') { + ufs_dirbad(xp, (doff_t)12, + "rename: mangled dir"); + } else { + dirbuf.dotdot_ino = newparent; + (void) vn_rdwr(UIO_WRITE, fvp, + (caddr_t)&dirbuf, + sizeof (struct dirtemplate), + (off_t)0, UIO_SYSSPACE, + IO_NODELOCKED|IO_SYNC, + tcnp->cn_cred, (int *)0, + (struct proc *)0); + cache_purge(fdvp); + } + } + } + error = ufs_dirremove(fdvp, fcnp); + if (!error) { + xp->i_nlink--; + xp->i_flag |= IN_CHANGE; + } + xp->i_flag &= ~IN_RENAME; + } + if (dp) + vput(fdvp); + if (xp) + vput(fvp); + vrele(ap->a_fvp); + return (error); + +bad: + if (xp) + vput(ITOV(xp)); + vput(ITOV(dp)); +out: + if (doingdirectory) + ip->i_flag &= ~IN_RENAME; + if (vn_lock(fvp, LK_EXCLUSIVE, p) == 0) { + ip->i_nlink--; + ip->i_flag |= IN_CHANGE; + vput(fvp); + } else + vrele(fvp); + return (error); +} + +/* + * A virgin directory (no blushing please). + */ +static struct dirtemplate mastertemplate = { + 0, 12, DT_DIR, 1, ".", + 0, DIRBLKSIZ - 12, DT_DIR, 2, ".." +}; +static struct odirtemplate omastertemplate = { + 0, 12, 1, ".", + 0, DIRBLKSIZ - 12, 2, ".." +}; + +/* + * Mkdir system call + */ +int +ufs_mkdir(ap) + struct vop_mkdir_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + } */ *ap; +{ + register struct vnode *dvp = ap->a_dvp; + register struct vattr *vap = ap->a_vap; + register struct componentname *cnp = ap->a_cnp; + register struct inode *ip, *dp; + struct vnode *tvp; + struct dirtemplate dirtemplate, *dtp; + struct timeval tv; + int error, dmode; + +#if DIAGNOSTIC + if ((cnp->cn_flags & HASBUF) == 0) + panic("ufs_mkdir: no name"); +#endif + dp = VTOI(dvp); + if ((nlink_t)dp->i_nlink >= LINK_MAX) { + error = EMLINK; + goto out; + } + dmode = vap->va_mode & 0777; + dmode |= IFDIR; + /* + * Must simulate part of ufs_makeinode here to acquire the inode, + * but not have it entered in the parent directory. The entry is + * made later after writing "." and ".." entries. + */ + if (error = VOP_VALLOC(dvp, dmode, cnp->cn_cred, &tvp)) + goto out; + ip = VTOI(tvp); + ip->i_uid = cnp->cn_cred->cr_uid; + ip->i_gid = dp->i_gid; +#if QUOTA + if ((error = getinoquota(ip)) || + (error = chkiq(ip, 1, cnp->cn_cred, 0))) { + _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + VOP_VFREE(tvp, ip->i_number, dmode); + vput(tvp); + vput(dvp); + return (error); + } +#endif + ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; + ip->i_mode = dmode; + tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */ + ip->i_nlink = 2; + if (cnp->cn_flags & ISWHITEOUT) + ip->i_flags |= UF_OPAQUE; + tv = time; + error = VOP_UPDATE(tvp, &tv, &tv, 1); + + /* + * Bump link count in parent directory + * to reflect work done below. Should + * be done before reference is created + * so reparation is possible if we crash. + */ + dp->i_nlink++; + dp->i_flag |= IN_CHANGE; + if (error = VOP_UPDATE(dvp, &tv, &tv, 1)) + goto bad; + + /* Initialize directory with "." and ".." from static template. */ + if (dvp->v_mount->mnt_maxsymlinklen > 0) + dtp = &mastertemplate; + else + dtp = (struct dirtemplate *)&omastertemplate; + dirtemplate = *dtp; + dirtemplate.dot_ino = ip->i_number; + dirtemplate.dotdot_ino = dp->i_number; + error = vn_rdwr(UIO_WRITE, tvp, (caddr_t)&dirtemplate, + sizeof (dirtemplate), (off_t)0, UIO_SYSSPACE, + IO_NODELOCKED|IO_SYNC, cnp->cn_cred, (int *)0, (struct proc *)0); + if (error) { + dp->i_nlink--; + dp->i_flag |= IN_CHANGE; + goto bad; + } + if (DIRBLKSIZ > VFSTOUFS(dvp->v_mount)->um_mountp->mnt_stat.f_bsize) + panic("ufs_mkdir: blksize"); /* XXX should grow with balloc() */ + else { + ip->i_size = DIRBLKSIZ; + ip->i_flag |= IN_CHANGE; + } + + /* Directory set up, now install it's entry in the parent directory. */ + if (error = ufs_direnter(ip, dvp, cnp)) { + dp->i_nlink--; + dp->i_flag |= IN_CHANGE; + } +bad: + /* + * No need to do an explicit VOP_TRUNCATE here, vrele will do this + * for us because we set the link count to 0. + */ + if (error) { + ip->i_nlink = 0; + ip->i_flag |= IN_CHANGE; + vput(tvp); + } else + *ap->a_vpp = tvp; +out: + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vput(dvp); + return (error); +} + +/* + * Rmdir system call. + */ +int +ufs_rmdir(ap) + struct vop_rmdir_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct vnode *dvp = ap->a_dvp; + struct componentname *cnp = ap->a_cnp; + struct inode *ip, *dp; + int error; + + ip = VTOI(vp); + dp = VTOI(dvp); + /* + * No rmdir "." please. + */ + if (dp == ip) { + vrele(dvp); + vput(vp); + return (EINVAL); + } + /* + * Verify the directory is empty (and valid). + * (Rmdir ".." won't be valid since + * ".." will contain a reference to + * the current directory and thus be + * non-empty.) + */ + error = 0; + if (ip->i_nlink != 2 || + !ufs_dirempty(ip, dp->i_number, cnp->cn_cred)) { + error = ENOTEMPTY; + goto out; + } + if ((dp->i_flags & APPEND) || (ip->i_flags & (IMMUTABLE | APPEND))) { + error = EPERM; + goto out; + } + /* + * Delete reference to directory before purging + * inode. If we crash in between, the directory + * will be reattached to lost+found, + */ + if (error = ufs_dirremove(dvp, cnp)) + goto out; + dp->i_nlink--; + dp->i_flag |= IN_CHANGE; + cache_purge(dvp); + vput(dvp); + dvp = NULL; + /* + * Truncate inode. The only stuff left + * in the directory is "." and "..". The + * "." reference is inconsequential since + * we're quashing it. The ".." reference + * has already been adjusted above. We've + * removed the "." reference and the reference + * in the parent directory, but there may be + * other hard links so decrement by 2 and + * worry about them later. + */ + ip->i_nlink -= 2; + error = VOP_TRUNCATE(vp, (off_t)0, IO_SYNC, cnp->cn_cred, + cnp->cn_proc); + cache_purge(ITOV(ip)); +out: + if (dvp) + vput(dvp); + vput(vp); + return (error); +} + +/* + * symlink -- make a symbolic link + */ +int +ufs_symlink(ap) + struct vop_symlink_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; + } */ *ap; +{ + register struct vnode *vp, **vpp = ap->a_vpp; + register struct inode *ip; + int len, error; + + if (error = ufs_makeinode(IFLNK | ap->a_vap->va_mode, ap->a_dvp, + vpp, ap->a_cnp)) + return (error); + vp = *vpp; + len = strlen(ap->a_target); + if (len < vp->v_mount->mnt_maxsymlinklen) { + ip = VTOI(vp); + bcopy(ap->a_target, (char *)ip->i_shortlink, len); + ip->i_size = len; + ip->i_flag |= IN_CHANGE | IN_UPDATE; + } else + error = vn_rdwr(UIO_WRITE, vp, ap->a_target, len, (off_t)0, + UIO_SYSSPACE, IO_NODELOCKED, ap->a_cnp->cn_cred, (int *)0, + (struct proc *)0); + vput(vp); + return (error); +} + +/* + * Vnode op for reading directories. + * + * The routine below assumes that the on-disk format of a directory + * is the same as that defined by . If the on-disk + * format changes, then it will be necessary to do a conversion + * from the on-disk format that read returns to the format defined + * by . + */ +int +ufs_readdir(ap) + struct vop_readdir_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + int *ncookies; + u_long **a_cookies; + } */ *ap; +{ + register struct uio *uio = ap->a_uio; + int error; + size_t count, lost; + off_t off = uio->uio_offset; + + count = uio->uio_resid; + /* Make sure we don't return partial entries. */ + count -= (uio->uio_offset + count) & (DIRBLKSIZ -1); + if (count <= 0) + return (EINVAL); + lost = uio->uio_resid - count; + uio->uio_resid = count; + uio->uio_iov->iov_len = count; +# if (BYTE_ORDER == LITTLE_ENDIAN) + if (ap->a_vp->v_mount->mnt_maxsymlinklen > 0) { + error = VOP_READ(ap->a_vp, uio, 0, ap->a_cred); + } else { + struct dirent *dp, *edp; + struct uio auio; + struct iovec aiov; + caddr_t dirbuf; + int readcnt; + u_char tmp; + + auio = *uio; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_segflg = UIO_SYSSPACE; + aiov.iov_len = count; + MALLOC(dirbuf, caddr_t, count, M_TEMP, M_WAITOK); + aiov.iov_base = dirbuf; + error = VOP_READ(ap->a_vp, &auio, 0, ap->a_cred); + if (error == 0) { + readcnt = count - auio.uio_resid; + edp = (struct dirent *)&dirbuf[readcnt]; + for (dp = (struct dirent *)dirbuf; dp < edp; ) { + tmp = dp->d_namlen; + dp->d_namlen = dp->d_type; + dp->d_type = tmp; + if (dp->d_reclen > 0) { + dp = (struct dirent *) + ((char *)dp + dp->d_reclen); + } else { + error = EIO; + break; + } + } + if (dp >= edp) + error = uiomove(dirbuf, readcnt, uio); + } + FREE(dirbuf, M_TEMP); + } +# else + error = VOP_READ(ap->a_vp, uio, 0, ap->a_cred); +# endif + if (!error && ap->a_ncookies != NULL) { + struct dirent* dpStart; + struct dirent* dpEnd; + struct dirent* dp; + int ncookies; + u_long *cookies; + u_long *cookiep; + + /* + * Only the NFS server uses cookies, and it loads the + * directory block into system space, so we can just look at + * it directly. + */ + if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) + panic("ufs_readdir: unexpected uio from NFS server"); + dpStart = (struct dirent *) + (uio->uio_iov->iov_base - (uio->uio_offset - off)); + dpEnd = (struct dirent *) uio->uio_iov->iov_base; + for (dp = dpStart, ncookies = 0; + dp < dpEnd && dp->d_reclen != 0; + dp = (struct dirent *)((caddr_t)dp + dp->d_reclen)) + ncookies++; + MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, + M_WAITOK); + for (dp = dpStart, cookiep = cookies; + dp < dpEnd; + dp = (struct dirent *)((caddr_t) dp + dp->d_reclen)) { + off += dp->d_reclen; + *cookiep++ = (u_long) off; + } + *ap->a_ncookies = ncookies; + *ap->a_cookies = cookies; + } + uio->uio_resid += lost; + if (ap->a_eofflag) + *ap->a_eofflag = VTOI(ap->a_vp)->i_size <= uio->uio_offset; + return (error); +} + +/* + * Return target name of a symbolic link + */ +int +ufs_readlink(ap) + struct vop_readlink_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct inode *ip = VTOI(vp); + int isize; + + isize = ip->i_size; + if (isize < vp->v_mount->mnt_maxsymlinklen) { + uiomove((char *)ip->i_shortlink, isize, ap->a_uio); + return (0); + } + return (VOP_READ(vp, ap->a_uio, 0, ap->a_cred)); +} + +/* + * Ufs abort op, called after namei() when a CREATE/DELETE isn't actually + * done. If a buffer has been saved in anticipation of a CREATE, delete it. + */ +/* ARGSUSED */ +int +ufs_abortop(ap) + struct vop_abortop_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + } */ *ap; +{ + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + return (0); +} + +/* + * Lock an inode. If its already locked, set the WANT bit and sleep. + */ +int +ufs_lock(ap) + struct vop_lock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + if (VTOI(vp) == (struct inode *)NULL) + panic("inode in vnode is null\n"); + return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags, &vp->v_interlock, + ap->a_p)); +} + +/* + * Unlock an inode. + */ +int +ufs_unlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + return (lockmgr(&VTOI(vp)->i_lock, ap->a_flags | LK_RELEASE, + &vp->v_interlock, ap->a_p)); +} + +/* + * Check for a locked inode. + */ +int +ufs_islocked(ap) + struct vop_islocked_args /* { + struct vnode *a_vp; + } */ *ap; +{ + + return (lockstatus(&VTOI(ap->a_vp)->i_lock)); +} + +/* + * Calculate the logical to physical mapping if not done already, + * then call the device strategy routine. + */ +int +ufs_strategy(ap) + struct vop_strategy_args /* { + struct buf *a_bp; + } */ *ap; +{ + register struct buf *bp = ap->a_bp; + register struct vnode *vp = bp->b_vp; + register struct inode *ip; + int error; + + ip = VTOI(vp); + if ( !(bp->b_flags & B_VECTORLIST)) { + if (vp->v_type == VBLK || vp->v_type == VCHR) + panic("ufs_strategy: spec"); + + + if (bp->b_flags & B_PAGELIST) { + /* + * if we have a page list associated with this bp, + * then go through cluste_bp since it knows how to + * deal with a page request that might span non-contiguous + * physical blocks on the disk... + */ +#if 1 + if (bp->b_blkno == bp->b_lblkno) { + if (error = VOP_BMAP(vp, bp->b_lblkno, NULL, + &bp->b_blkno, NULL)) { + bp->b_error = error; + bp->b_flags |= B_ERROR; + biodone(bp); + return (error); + } + } +#endif /* 1 */ + error = cluster_bp(bp); + vp = ip->i_devvp; + bp->b_dev = vp->v_rdev; + + return (error); + } + + if (bp->b_blkno == bp->b_lblkno) { + if (error = + VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL)) { + bp->b_error = error; + bp->b_flags |= B_ERROR; + biodone(bp); + return (error); + } + if ((long)bp->b_blkno == -1) + clrbuf(bp); + } + if ((long)bp->b_blkno == -1) { + biodone(bp); + return (0); + } + + } + + vp = ip->i_devvp; + bp->b_dev = vp->v_rdev; + VOCALL (vp->v_op, VOFFSET(vop_strategy), ap); + return (0); +} + +/* + * Print out the contents of an inode. + */ +int +ufs_print(ap) + struct vop_print_args /* { + struct vnode *a_vp; + } */ *ap; +{ + register struct vnode *vp = ap->a_vp; + register struct inode *ip = VTOI(vp); + + printf("tag VT_UFS, ino %d, on dev %d, %d", ip->i_number, + major(ip->i_dev), minor(ip->i_dev)); +#if FIFO + if (vp->v_type == VFIFO) + fifo_printinfo(vp); +#endif /* FIFO */ + lockmgr_printinfo(&ip->i_lock); + printf("\n"); + return (0); +} + +/* + * Read wrapper for special devices. + */ +int +ufsspec_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + + /* + * Set access flag. + */ + VTOI(ap->a_vp)->i_flag |= IN_ACCESS; + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap)); +} + +/* + * Write wrapper for special devices. + */ +int +ufsspec_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + + /* + * Set update and change flags. + */ + VTOI(ap->a_vp)->i_flag |= IN_CHANGE | IN_UPDATE; + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap)); +} + +/* + * Close wrapper for special devices. + * + * Update the times on the inode then do device close. + */ +int +ufsspec_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + struct inode *ip = VTOI(vp); + + simple_lock(&vp->v_interlock); + if (ap->a_vp->v_usecount > 1) + ITIMES(ip, &time, &time); + simple_unlock(&vp->v_interlock); + return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap)); +} + +#if FIFO +/* + * Read wrapper for fifo's + */ +int +ufsfifo_read(ap) + struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + + /* + * Set access flag. + */ + VTOI(ap->a_vp)->i_flag |= IN_ACCESS; + return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_read), ap)); +} + +/* + * Write wrapper for fifo's. + */ +int +ufsfifo_write(ap) + struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; + } */ *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + + /* + * Set update and change flags. + */ + VTOI(ap->a_vp)->i_flag |= IN_CHANGE | IN_UPDATE; + return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_write), ap)); +} + +/* + * Close wrapper for fifo's. + * + * Update the times on the inode then do device close. + */ +ufsfifo_close(ap) + struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; + } */ *ap; +{ + extern int (**fifo_vnodeop_p)(void *); + struct vnode *vp = ap->a_vp; + struct inode *ip = VTOI(vp); + + simple_lock(&vp->v_interlock); + if (ap->a_vp->v_usecount > 1) + ITIMES(ip, &time, &time); + simple_unlock(&vp->v_interlock); + return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap)); +} +#endif /* FIFO */ + +/* + * Return POSIX pathconf information applicable to ufs filesystems. + */ +ufs_pathconf(ap) + struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + int *a_retval; + } */ *ap; +{ + + switch (ap->a_name) { + case _PC_LINK_MAX: + *ap->a_retval = LINK_MAX; + return (0); + case _PC_NAME_MAX: + *ap->a_retval = NAME_MAX; + return (0); + case _PC_PATH_MAX: + *ap->a_retval = PATH_MAX; + return (0); + case _PC_PIPE_BUF: + *ap->a_retval = PIPE_BUF; + return (0); + case _PC_CHOWN_RESTRICTED: + *ap->a_retval = 1; + return (0); + case _PC_NO_TRUNC: + *ap->a_retval = 1; + return (0); + default: + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Advisory record locking support + */ +int +ufs_advlock(ap) + struct vop_advlock_args /* { + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; + } */ *ap; +{ + register struct inode *ip = VTOI(ap->a_vp); + register struct flock *fl = ap->a_fl; + register struct lockf *lock; + off_t start, end; + int error; + + /* + * Avoid the common case of unlocking when inode has no locks. + */ + if (ip->i_lockf == (struct lockf *)0) { + if (ap->a_op != F_SETLK) { + fl->l_type = F_UNLCK; + return (0); + } + } + /* + * Convert the flock structure into a start and end. + */ + switch (fl->l_whence) { + + case SEEK_SET: + case SEEK_CUR: + /* + * Caller is responsible for adding any necessary offset + * when SEEK_CUR is used. + */ + start = fl->l_start; + break; + + case SEEK_END: + start = ip->i_size + fl->l_start; + break; + + default: + return (EINVAL); + } + if (start < 0) + return (EINVAL); + if (fl->l_len == 0) + end = -1; + else + end = start + fl->l_len - 1; + /* + * Create the lockf structure + */ + MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); + lock->lf_start = start; + lock->lf_end = end; + lock->lf_id = ap->a_id; + lock->lf_inode = ip; + lock->lf_type = fl->l_type; + lock->lf_next = (struct lockf *)0; + TAILQ_INIT(&lock->lf_blkhd); + lock->lf_flags = ap->a_flags; + /* + * Do the requested operation. + */ + switch(ap->a_op) { + case F_SETLK: + return (lf_setlock(lock)); + + case F_UNLCK: + error = lf_clearlock(lock); + FREE(lock, M_LOCKF); + return (error); + + case F_GETLK: + error = lf_getlock(lock, fl); + FREE(lock, M_LOCKF); + return (error); + + default: + _FREE(lock, M_LOCKF); + return (EINVAL); + } + /* NOTREACHED */ +} + +/* + * Initialize the vnode associated with a new inode, handle aliased + * vnodes. + */ +int +ufs_vinit(mntp, specops, fifoops, vpp) + struct mount *mntp; + int (**specops)(); + int (**fifoops)(); + struct vnode **vpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct inode *ip; + struct vnode *vp, *nvp; + + vp = *vpp; + ip = VTOI(vp); + switch(vp->v_type = IFTOVT(ip->i_mode)) { + case VCHR: + case VBLK: + vp->v_op = specops; + if (nvp = checkalias(vp, ip->i_rdev, mntp)) { + /* + * Discard unneeded vnode, but save its inode. + * Note that the lock is carried over in the inode + * to the replacement vnode. + */ + nvp->v_data = vp->v_data; + vp->v_data = NULL; + vp->v_op = spec_vnodeop_p; + vrele(vp); + vgone(vp); + /* + * Reinitialize aliased inode. + */ + vp = nvp; + ip->i_vnode = vp; + } + break; + case VFIFO: +#if FIFO + vp->v_op = fifoops; + break; +#else + return (EOPNOTSUPP); +#endif + case VREG: +#if 0 + ubc_info_init(vp); +#endif /* 0 */ + break; + default: + break; + } + if (ip->i_number == ROOTINO) + vp->v_flag |= VROOT; + /* + * Initialize modrev times + */ + SETHIGH(ip->i_modrev, time.tv_sec); + SETLOW(ip->i_modrev, time.tv_usec * 4294); + *vpp = vp; + return (0); +} + +/* + * Allocate a new inode. + */ +int +ufs_makeinode(mode, dvp, vpp, cnp) + int mode; + struct vnode *dvp; + struct vnode **vpp; + struct componentname *cnp; +{ + register struct inode *ip, *pdir; + struct timeval tv; + struct vnode *tvp; + int error; + + pdir = VTOI(dvp); +#if DIAGNOSTIC + if ((cnp->cn_flags & HASBUF) == 0) + panic("ufs_makeinode: no name"); +#endif + *vpp = NULL; + if ((mode & IFMT) == 0) + mode |= IFREG; + + if (error = VOP_VALLOC(dvp, mode, cnp->cn_cred, &tvp)) { + _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vput(dvp); + return (error); + } + ip = VTOI(tvp); + ip->i_gid = pdir->i_gid; + if ((mode & IFMT) == IFLNK) + ip->i_uid = pdir->i_uid; + else + ip->i_uid = cnp->cn_cred->cr_uid; +#if QUOTA + if ((error = getinoquota(ip)) || + (error = chkiq(ip, 1, cnp->cn_cred, 0))) { + _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + VOP_VFREE(tvp, ip->i_number, mode); + vput(tvp); + vput(dvp); + return (error); + } +#endif + ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; + ip->i_mode = mode; + tvp->v_type = IFTOVT(mode); /* Rest init'd in getnewvnode(). */ + ip->i_nlink = 1; + if ((ip->i_mode & ISGID) && !groupmember(ip->i_gid, cnp->cn_cred) && + suser(cnp->cn_cred, NULL)) + ip->i_mode &= ~ISGID; + + if (cnp->cn_flags & ISWHITEOUT) + ip->i_flags |= UF_OPAQUE; + + /* + * initialize UBC before calling VOP_UPDATE and ufs_direnter + * Not doing so introduces probelms in handling error from + * those calls. + * It results in a "vget: stolen ubc_info" panic due to attempt + * to shutdown uninitialized UBC. + */ + if (UBCINFOMISSING(tvp) || UBCINFORECLAIMED(tvp)) + ubc_info_init(tvp); + + /* + * Make sure inode goes to disk before directory entry. + */ + tv = time; + if (error = VOP_UPDATE(tvp, &tv, &tv, 1)) + goto bad; + if (error = ufs_direnter(ip, dvp, cnp)) + goto bad; + if ((cnp->cn_flags & SAVESTART) == 0) + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vput(dvp); + + *vpp = tvp; + return (0); + +bad: + /* + * Write error occurred trying to update the inode + * or the directory so must deallocate the inode. + */ + _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vput(dvp); + ip->i_nlink = 0; + ip->i_flag |= IN_CHANGE; + vput(tvp); + return (error); +} + diff --git a/bsd/ufs/ufs/ufsmount.h b/bsd/ufs/ufs/ufsmount.h new file mode 100644 index 000000000..ce88c2502 --- /dev/null +++ b/bsd/ufs/ufs/ufsmount.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)ufsmount.h 8.6 (Berkeley) 3/30/95 + */ +#ifndef _UFS_UFSMOUNT_H_ +#define _UFS_UFSMOUNT_H_ + +#include + +/* + * Arguments to mount UFS-based filesystems + */ +struct ufs_args { + char *fspec; /* block special device to mount */ + struct export_args export; /* network export information */ +}; + +#if MFS +/* + * Arguments to mount MFS + */ +struct mfs_args { + char *fspec; /* name to export for statfs */ + struct export_args export; /* if exported MFSes are supported */ + caddr_t base; /* base of file system in memory */ + u_long size; /* size of file system */ +}; +#endif /* MFS */ + +#ifdef KERNEL +struct buf; +struct inode; +struct nameidata; +struct timeval; +struct ucred; +struct uio; +struct vnode; +struct netexport; + +/* This structure describes the UFS specific mount structure data. */ +struct ufsmount { + struct mount *um_mountp; /* filesystem vfs structure */ + dev_t um_dev; /* device mounted */ + struct vnode *um_devvp; /* block device mounted vnode */ + + union { /* pointer to superblock */ + struct lfs *lfs; /* LFS */ + struct fs *fs; /* FFS */ + } ufsmount_u; +#define um_fs ufsmount_u.fs +#define um_lfs ufsmount_u.lfs + + struct vnode *um_quotas[MAXQUOTAS]; /* pointer to quota files */ + struct ucred *um_cred[MAXQUOTAS]; /* quota file access cred */ + u_long um_nindir; /* indirect ptrs per block */ + u_long um_bptrtodb; /* indir ptr to disk block */ + u_long um_seqinc; /* inc between seq blocks */ + time_t um_btime[MAXQUOTAS]; /* block quota time limit */ + time_t um_itime[MAXQUOTAS]; /* inode quota time limit */ + char um_qflags[MAXQUOTAS]; /* quota specific flags */ + struct netexport um_export; /* export information */ + int64_t um_savedmaxfilesize; /* XXX - limit maxfilesize */ +}; + +/* + * Flags describing the state of quotas. + */ +#define QTF_OPENING 0x01 /* Q_QUOTAON in progress */ +#define QTF_CLOSING 0x02 /* Q_QUOTAOFF in progress */ + +/* Convert mount ptr to ufsmount ptr. */ +#define VFSTOUFS(mp) ((struct ufsmount *)((mp)->mnt_data)) + +/* + * Macros to access file system parameters in the ufsmount structure. + * Used by ufs_bmap. + */ +#define MNINDIR(ump) ((ump)->um_nindir) +#define blkptrtodb(ump, b) ((b) << (ump)->um_bptrtodb) +#define is_sequential(ump, a, b) ((b) == (a) + ump->um_seqinc) +#endif /* KERNEL */ + +#endif /* ! _UFS_UFSMOUNT_H_ */ diff --git a/bsd/uxkern/ux_exception.c b/bsd/uxkern/ux_exception.c new file mode 100644 index 000000000..7a4e53e45 --- /dev/null +++ b/bsd/uxkern/ux_exception.c @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +/* + ********************************************************************* + * HISTORY + ********************************************************************** + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Unix exception handler. + */ + +static void ux_exception(); + +decl_simple_lock_data(static, ux_handler_init_lock) +mach_port_name_t ux_exception_port; +static task_t ux_handler_self; + +static +void +ux_handler(void) +{ + task_t self = current_task(); + mach_port_name_t exc_port_name; + mach_port_name_t exc_set_name; + + (void) thread_funnel_set(kernel_flock, TRUE); + + /* self->kernel_vm_space = TRUE; */ + ux_handler_self = self; + + + /* + * Allocate a port set that we will receive on. + */ + if (mach_port_allocate(get_task_ipcspace(ux_handler_self), MACH_PORT_RIGHT_PORT_SET, &exc_set_name) != MACH_MSG_SUCCESS) + panic("ux_handler: port_set_allocate failed"); + + /* + * Allocate an exception port and use object_copyin to + * translate it to the global name. Put it into the set. + */ + if (mach_port_allocate(get_task_ipcspace(ux_handler_self), MACH_PORT_RIGHT_RECEIVE, &exc_port_name) != MACH_MSG_SUCCESS) + panic("ux_handler: port_allocate failed"); + if (mach_port_move_member(get_task_ipcspace(ux_handler_self), + exc_port_name, exc_set_name) != MACH_MSG_SUCCESS) + panic("ux_handler: port_set_add failed"); + + if (ipc_object_copyin(get_task_ipcspace(self), exc_port_name, + MACH_MSG_TYPE_MAKE_SEND, + (void *) &ux_exception_port) != MACH_MSG_SUCCESS) + panic("ux_handler: object_copyin(ux_exception_port) failed"); + + thread_wakeup(&ux_exception_port); + + /* Message handling loop. */ + + for (;;) { + struct rep_msg { + mach_msg_header_t Head; + NDR_record_t NDR; + kern_return_t RetCode; + } rep_msg; + struct exc_msg { + mach_msg_header_t Head; + /* start of the kernel processed data */ + mach_msg_body_t msgh_body; + mach_msg_port_descriptor_t thread; + mach_msg_port_descriptor_t task; + /* end of the kernel processed data */ + NDR_record_t NDR; + exception_type_t exception; + mach_msg_type_number_t codeCnt; + exception_data_t code; + /* some times RCV_TO_LARGE probs */ + char pad[512]; + } exc_msg; + mach_port_name_t reply_port; + kern_return_t result; + + exc_msg.Head.msgh_local_port = (mach_port_t)exc_set_name; + exc_msg.Head.msgh_size = sizeof (exc_msg); +#if 0 + result = mach_msg_receive(&exc_msg.Head); +#else + result = mach_msg_receive(&exc_msg.Head, MACH_RCV_MSG, + sizeof (exc_msg), exc_set_name, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL, + 0); +#endif + if (result == MACH_MSG_SUCCESS) { + reply_port = (mach_port_name_t)exc_msg.Head.msgh_remote_port; + + if (exc_server(&exc_msg.Head, &rep_msg.Head)) + (void) mach_msg_send(&rep_msg.Head, MACH_SEND_MSG, + sizeof (rep_msg),MACH_MSG_TIMEOUT_NONE,MACH_PORT_NULL); + + if (reply_port != MACH_PORT_NULL) + (void) mach_port_deallocate(get_task_ipcspace(ux_handler_self), reply_port); + } + else if (result == MACH_RCV_TOO_LARGE) + /* ignore oversized messages */; + else + panic("exception_handler"); + } + thread_funnel_set(kernel_flock, FALSE); +} + +void +ux_handler_init(void) +{ + task_t handler_task; + + simple_lock_init(&ux_handler_init_lock); + ux_exception_port = MACH_PORT_NULL; + if (kernel_task_create(kernel_task, + 0, 0, &handler_task) != MACH_MSG_SUCCESS) { + panic("Failed to created ux handler task\n"); + } + (void) kernel_thread(handler_task, ux_handler); + simple_lock(&ux_handler_init_lock); + if (ux_exception_port == MACH_PORT_NULL) { + simple_unlock(&ux_handler_init_lock); + assert_wait(&ux_exception_port, THREAD_UNINT); + thread_block((void (*)(void)) 0); + } + else + simple_unlock(&ux_handler_init_lock); +} + +kern_return_t +catch_exception_raise( + mach_port_name_t exception_port, + mach_port_name_t thread_name, + mach_port_name_t task_name, + int exception, + exception_data_t code, + mach_msg_type_number_t codecnt +) +{ + task_t self = current_task(); + thread_act_t th_act; + ipc_port_t thread_port; + ipc_port_t task_port; + kern_return_t result = MACH_MSG_SUCCESS; + int signal = 0; + u_long ucode = 0; + struct uthread *ut; + + /* + * Convert local thread name to global port. + */ + if (MACH_PORT_VALID(thread_name) && + (ipc_object_copyin(get_task_ipcspace(self), thread_name, + MACH_MSG_TYPE_PORT_SEND, + (void *) &thread_port) == MACH_MSG_SUCCESS)) { + if (IPC_OBJECT_VALID(thread_port)) { + th_act = (thread_act_t)convert_port_to_act(thread_port); + ipc_port_release(thread_port); + } else { + th_act = THR_ACT_NULL; + } + + /* + * Catch bogus ports + */ + if (th_act != THR_ACT_NULL) { + + /* + * Convert exception to unix signal and code. + */ + ut = get_bsdthread_info(th_act); + ux_exception(exception, code[0], code[1], + &signal, &ucode); + + /* + * Send signal. + */ + if (signal != 0) + threadsignal(th_act, signal, ucode); + + act_deallocate(th_act); + } + else + result = KERN_INVALID_ARGUMENT; + } + else + result = KERN_INVALID_ARGUMENT; + + /* + * Delete our send rights to the task and thread ports. + */ + (void)mach_port_deallocate(get_task_ipcspace(ux_handler_self), task_name); + (void)mach_port_deallocate(get_task_ipcspace(ux_handler_self),thread_name); + + return (result); +} +kern_return_t +catch_exception_raise_state(mach_port_name_t exception_port, int exception, exception_data_t code, mach_msg_type_number_t codeCnt, int flavor, thread_state_t old_state, int old_stateCnt, thread_state_t new_state, int new_stateCnt) +{ + return(KERN_INVALID_ARGUMENT); +} +kern_return_t +catch_exception_raise_state_identity(mach_port_name_t exception_port, mach_port_t thread, mach_port_t task, int exception, exception_data_t code, mach_msg_type_number_t codeCnt, int flavor, thread_state_t old_state, int old_stateCnt, thread_state_t new_state, int new_stateCnt) +{ + return(KERN_INVALID_ARGUMENT); +} + +boolean_t machine_exception(); + +/* + * ux_exception translates a mach exception, code and subcode to + * a signal and u.u_code. Calls machine_exception (machine dependent) + * to attempt translation first. + */ + +static +void ux_exception( + int exception, + int code, + int subcode, + int *ux_signal, + int *ux_code +) +{ + /* + * Try machine-dependent translation first. + */ + if (machine_exception(exception, code, subcode, ux_signal, ux_code)) + return; + + switch(exception) { + + case EXC_BAD_ACCESS: + if (code == KERN_INVALID_ADDRESS) + *ux_signal = SIGSEGV; + else + *ux_signal = SIGBUS; + break; + + case EXC_BAD_INSTRUCTION: + *ux_signal = SIGILL; + break; + + case EXC_ARITHMETIC: + *ux_signal = SIGFPE; + break; + + case EXC_EMULATION: + *ux_signal = SIGEMT; + break; + + case EXC_SOFTWARE: + switch (code) { + + case EXC_UNIX_BAD_SYSCALL: + *ux_signal = SIGSYS; + break; + case EXC_UNIX_BAD_PIPE: + *ux_signal = SIGPIPE; + break; + case EXC_UNIX_ABORT: + *ux_signal = SIGABRT; + break; + } + break; + + case EXC_BREAKPOINT: + *ux_signal = SIGTRAP; + break; + } +} diff --git a/bsd/vfs/Makefile b/bsd/vfs/Makefile new file mode 100644 index 000000000..ce2bd8753 --- /dev/null +++ b/bsd/vfs/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = \ + +EXPINC_SUBDIRS_PPC = \ + +EXPINC_SUBDIRS_I386 = \ + +DATAFILES = \ + vfs_support.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = vfs + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = vfs + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c new file mode 100644 index 000000000..e11f6cb1d --- /dev/null +++ b/bsd/vfs/vfs_bio.c @@ -0,0 +1,2111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/*- + * Copyright (c) 1994 Christopher G. Demetriou + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The NEXTSTEP Software License Agreement specifies the terms + * and conditions for redistribution. + * + * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 + */ + +/* + * Some references: + * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) + * Leffler, et al.: The Design and Implementation of the 4.3BSD + * UNIX Operating System (Addison Welley, 1989) + */ +#define ZALLOC_METADATA 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if DIAGNOSTIC +#include +#endif /* DIAGNOSTIC */ +#include +#include + +#include + +extern void bufqinc(int q); +extern void bufqdec(int q); +extern void bufq_balance_thread_init(); + +extern void reassignbuf(struct buf *, struct vnode *); +static struct buf *getnewbuf(int slpflag, int slptimeo, int *queue); + +extern int niobuf; /* The number of IO buffer headers for cluster IO */ + +#if TRACE +struct proc *traceproc; +int tracewhich, tracebuf[TRCSIZ]; +u_int tracex; +char traceflags[TR_NFLAGS]; +#endif /* TRACE */ + +/* + * Definitions for the buffer hash lists. + */ +#define BUFHASH(dvp, lbn) \ + (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash]) +LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; +u_long bufhash; + +/* Definitions for the buffer stats. */ +struct bufstats bufstats; + +/* + * Insq/Remq for the buffer hash lists. + */ +#if 0 +#define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash) +#define bremhash(bp) LIST_REMOVE(bp, b_hash) +#endif /* 0 */ + + +TAILQ_HEAD(ioqueue, buf) iobufqueue; +TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; +int needbuffer; +int need_iobuffer; + +/* + * Insq/Remq for the buffer free lists. + */ +#define binsheadfree(bp, dp, whichq) do { \ + TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ + bufqinc((whichq)); \ + (bp)->b_whichq = whichq; \ + (bp)->b_timestamp = time.tv_sec; \ + } while (0) + +#define binstailfree(bp, dp, whichq) do { \ + TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ + bufqinc((whichq)); \ + (bp)->b_whichq = whichq; \ + (bp)->b_timestamp = time.tv_sec; \ + } while (0) + +#define BHASHENTCHECK(bp) \ + if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ + panic("%x: b_hash.le_prev is deadb", (bp)); + +#define BLISTNONE(bp) \ + (bp)->b_hash.le_next = (struct buf *)0; \ + (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef; + +simple_lock_data_t bufhashlist_slock; /* lock on buffer hash list */ + +/* + * Time in seconds before a buffer on a list is + * considered as a stale buffer + */ +#define LRU_IS_STALE 120 /* default value for the LRU */ +#define AGE_IS_STALE 60 /* default value for the AGE */ +#define META_IS_STALE 180 /* default value for the BQ_META */ + +int lru_is_stale = LRU_IS_STALE; +int age_is_stale = AGE_IS_STALE; +int meta_is_stale = META_IS_STALE; + +#if 1 +void +blistenterhead(struct bufhashhdr * head, struct buf * bp) +{ + if ((bp->b_hash.le_next = (head)->lh_first) != NULL) + (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next; + (head)->lh_first = bp; + bp->b_hash.le_prev = &(head)->lh_first; + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + panic("blistenterhead: le_prev is deadbeef"); + +} +#endif + +#if 1 +void +binshash(struct buf *bp, struct bufhashhdr *dp) +{ +int s; + +struct buf *nbp; + + simple_lock(&bufhashlist_slock); +#if 0 + if(incore(bp->b_vp, bp->b_lblkno)) { + panic("adding to queue already existing element"); + } +#endif /* 0 */ + BHASHENTCHECK(bp); + + nbp = dp->lh_first; + for(; nbp != NULL; nbp = nbp->b_hash.le_next) { + if(nbp == bp) + panic("buf already in hashlist"); + } + +#if 0 + LIST_INSERT_HEAD(dp, bp, b_hash); +#else + blistenterhead(dp, bp); +#endif + simple_unlock(&bufhashlist_slock); +} + +void +bremhash(struct buf *bp) +{ + int s; + + simple_lock(&bufhashlist_slock); + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + panic("bremhash le_prev is deadbeef"); + if (bp->b_hash.le_next == bp) + panic("bremhash: next points to self"); + + if (bp->b_hash.le_next != NULL) + bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev; + *bp->b_hash.le_prev = (bp)->b_hash.le_next; + simple_unlock(&bufhashlist_slock); +} + +#endif /* 1 */ + + +/* + * Remove a buffer from the free list it's on + */ +void +bremfree(bp) + struct buf *bp; +{ + struct bqueues *dp = NULL; + int whichq = -1; + + /* + * We only calculate the head of the freelist when removing + * the last element of the list as that is the only time that + * it is needed (e.g. to reset the tail pointer). + * + * NB: This makes an assumption about how tailq's are implemented. + */ + if (bp->b_freelist.tqe_next == NULL) { + for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) + if (dp->tqh_last == &bp->b_freelist.tqe_next) + break; + if (dp == &bufqueues[BQUEUES]) + panic("bremfree: lost tail"); + } + TAILQ_REMOVE(dp, bp, b_freelist); + whichq = bp->b_whichq; + bufqdec(whichq); + bp->b_whichq = -1; + bp->b_timestamp = 0; +} + +/* + * Initialize buffers and hash links for buffers. + */ +void +bufinit() +{ + register struct buf *bp; + register struct bqueues *dp; + register int i; + int metabuf; + long whichq; +#if ZALLOC_METADATA + static void bufzoneinit(); +#endif /* ZALLOC_METADATA */ + + /* Initialize the buffer queues ('freelists') and the hash table */ + for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) + TAILQ_INIT(dp); + bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash); + + simple_lock_init(&bufhashlist_slock ); + + metabuf = nbuf/8; /* reserved for meta buf */ + + /* Initialize the buffer headers */ + for (i = 0; i < nbuf; i++) { + bp = &buf[i]; + bzero((char *)bp, sizeof *bp); + bp->b_dev = NODEV; + bp->b_rcred = NOCRED; + bp->b_wcred = NOCRED; + bp->b_vnbufs.le_next = NOLIST; + bp->b_flags = B_INVAL; + /* + * metabuf buffer headers on the meta-data list and + * rest of the buffer headers on the empty list + */ + if (--metabuf ) + whichq = BQ_META; + else + whichq = BQ_EMPTY; + + BLISTNONE(bp); + dp = &bufqueues[whichq]; + binsheadfree(bp, dp, whichq); + binshash(bp, &invalhash); + } + + for (; i < nbuf + niobuf; i++) { + bp = &buf[i]; + bzero((char *)bp, sizeof *bp); + bp->b_dev = NODEV; + bp->b_rcred = NOCRED; + bp->b_wcred = NOCRED; + bp->b_vnbufs.le_next = NOLIST; + bp->b_flags = B_INVAL; + binsheadfree(bp, &iobufqueue, -1); + } + + printf("using %d buffer headers and %d cluster IO buffer headers\n", + nbuf, niobuf); + +#if ZALLOC_METADATA + /* Set up zones for meta-data */ + bufzoneinit(); +#endif + +#if XXX + /* create a thread to do dynamic buffer queue balancing */ + bufq_balance_thread_init(); +#endif /* XXX */ +} + +/* __inline */ +struct buf * +bio_doread(vp, blkno, size, cred, async, queuetype) + struct vnode *vp; + daddr_t blkno; + int size; + struct ucred *cred; + int async; + int queuetype; +{ + register struct buf *bp; + struct proc *p = current_proc(); + + bp = getblk(vp, blkno, size, 0, 0, queuetype); + + /* + * If buffer does not have data valid, start a read. + * Note that if buffer is B_INVAL, getblk() won't return it. + * Therefore, it's valid if it's I/O has completed or been delayed. + */ + if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { + /* Start I/O for the buffer (keeping credentials). */ + SET(bp->b_flags, B_READ | async); + if (cred != NOCRED && bp->b_rcred == NOCRED) { + crhold(cred); + bp->b_rcred = cred; + } + VOP_STRATEGY(bp); + + trace(TR_BREADMISS, pack(vp, size), blkno); + + /* Pay for the read. */ + if (p && p->p_stats) + p->p_stats->p_ru.ru_inblock++; /* XXX */ + } else if (async) { + brelse(bp); + } + + trace(TR_BREADHIT, pack(vp, size), blkno); + + return (bp); +} +/* + * Read a disk block. + * This algorithm described in Bach (p.54). + */ +int +bread(vp, blkno, size, cred, bpp) + struct vnode *vp; + daddr_t blkno; + int size; + struct ucred *cred; + struct buf **bpp; +{ + register struct buf *bp; + + /* Get buffer for block. */ + bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ); + + /* Wait for the read to complete, and return result. */ + return (biowait(bp)); +} + +/* + * Read a disk block. [bread() for meta-data] + * This algorithm described in Bach (p.54). + */ +int +meta_bread(vp, blkno, size, cred, bpp) + struct vnode *vp; + daddr_t blkno; + int size; + struct ucred *cred; + struct buf **bpp; +{ + register struct buf *bp; + + /* Get buffer for block. */ + bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META); + + /* Wait for the read to complete, and return result. */ + return (biowait(bp)); +} + +/* + * Read-ahead multiple disk blocks. The first is sync, the rest async. + * Trivial modification to the breada algorithm presented in Bach (p.55). + */ +int +breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp) + struct vnode *vp; + daddr_t blkno; int size; + daddr_t rablks[]; int rasizes[]; + int nrablks; + struct ucred *cred; + struct buf **bpp; +{ + register struct buf *bp; + int i; + + bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ); + + /* + * For each of the read-ahead blocks, start a read, if necessary. + */ + for (i = 0; i < nrablks; i++) { + /* If it's in the cache, just go on to next one. */ + if (incore(vp, rablks[i])) + continue; + + /* Get a buffer for the read-ahead block */ + (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, BLK_READ); + } + + /* Otherwise, we had to start a read for it; wait until it's valid. */ + return (biowait(bp)); +} + +/* + * Read with single-block read-ahead. Defined in Bach (p.55), but + * implemented as a call to breadn(). + * XXX for compatibility with old file systems. + */ +int +breada(vp, blkno, size, rablkno, rabsize, cred, bpp) + struct vnode *vp; + daddr_t blkno; int size; + daddr_t rablkno; int rabsize; + struct ucred *cred; + struct buf **bpp; +{ + + return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp)); +} + +/* + * Block write. Described in Bach (p.56) + */ +int +bwrite(bp) + struct buf *bp; +{ + int rv, sync, wasdelayed; + struct proc *p = current_proc(); + upl_t upl; + upl_page_info_t *pl; + void * object; + kern_return_t kret; + struct vnode *vp = bp->b_vp; + + /* Remember buffer type, to switch on it later. */ + sync = !ISSET(bp->b_flags, B_ASYNC); + wasdelayed = ISSET(bp->b_flags, B_DELWRI); + CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); + + if (!sync) { + /* + * If not synchronous, pay for the I/O operation and make + * sure the buf is on the correct vnode queue. We have + * to do this now, because if we don't, the vnode may not + * be properly notified that its I/O has completed. + */ + if (wasdelayed) + reassignbuf(bp, vp); + else + if (p && p->p_stats) + p->p_stats->p_ru.ru_oublock++; /* XXX */ + } + + trace(TR_BWRITE, pack(vp, bp->b_bcount), bp->b_lblkno); + + /* Initiate disk write. Make sure the appropriate party is charged. */ + SET(bp->b_flags, B_WRITEINPROG); + vp->v_numoutput++; + + VOP_STRATEGY(bp); + + if (sync) { + /* + * If I/O was synchronous, wait for it to complete. + */ + rv = biowait(bp); + + /* + * Pay for the I/O operation, if it's not been paid for, and + * make sure it's on the correct vnode queue. (async operatings + * were payed for above.) + */ + if (wasdelayed) + reassignbuf(bp, vp); + else + if (p && p->p_stats) + p->p_stats->p_ru.ru_oublock++; /* XXX */ + + /* Release the buffer. */ + brelse(bp); + + return (rv); + } else { + return (0); + } +} + +int +vn_bwrite(ap) + struct vop_bwrite_args *ap; +{ + return (bwrite(ap->a_bp)); +} + +/* + * Delayed write. + * + * The buffer is marked dirty, but is not queued for I/O. + * This routine should be used when the buffer is expected + * to be modified again soon, typically a small write that + * partially fills a buffer. + * + * NB: magnetic tapes cannot be delayed; they must be + * written in the order that the writes are requested. + * + * Described in Leffler, et al. (pp. 208-213). + */ +void +bdwrite(bp) + struct buf *bp; +{ + struct proc *p = current_proc(); + kern_return_t kret; + upl_t upl; + upl_page_info_t *pl; + + /* + * If the block hasn't been seen before: + * (1) Mark it as having been seen, + * (2) Charge for the write. + * (3) Make sure it's on its vnode's correct block list, + */ + if (!ISSET(bp->b_flags, B_DELWRI)) { + SET(bp->b_flags, B_DELWRI); + if (p && p->p_stats) + p->p_stats->p_ru.ru_oublock++; /* XXX */ + + reassignbuf(bp, bp->b_vp); + } + + + /* If this is a tape block, write it the block now. */ + if (ISSET(bp->b_flags, B_TAPE)) { + /* bwrite(bp); */ + VOP_BWRITE(bp); + return; + } + + /* Otherwise, the "write" is done, so mark and release the buffer. */ + SET(bp->b_flags, B_DONE); + brelse(bp); +} + +/* + * Asynchronous block write; just an asynchronous bwrite(). + */ +void +bawrite(bp) + struct buf *bp; +{ + + SET(bp->b_flags, B_ASYNC); + VOP_BWRITE(bp); +} + +/* + * Release a buffer on to the free lists. + * Described in Bach (p. 46). + */ +void +brelse(bp) + struct buf *bp; +{ + struct bqueues *bufq; + int s; + long whichq; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START, + bp->b_lblkno * PAGE_SIZE, bp, bp->b_data, bp->b_flags, 0); + + trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); + + /* IO is done. Cleanup the UPL state */ + if (!ISSET(bp->b_flags, B_META) + && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { + kern_return_t kret; + upl_t upl; + upl_page_info_t *pl; + int upl_flags; + + if ( !ISSET(bp->b_flags, B_PAGELIST)) { + if ( !ISSET(bp->b_flags, B_INVAL)) { + void *object; + off_t file_offset; + + object = ubc_getobject(bp->b_vp, UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("vmobject for vp is null"); + if (bp->b_bufsize & 0xfff) + panic("list request is with less than 4k"); + + file_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno); + + kret = vm_fault_list_request(object, + (vm_object_offset_t)file_offset, bp->b_bufsize, + &upl, NULL, 0, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS + | UPL_SET_INTERNAL)); + if (kret != KERN_SUCCESS) + panic("brelse: Failed to get pagelists"); +#ifdef UBC_DEBUG + upl_ubc_alias_set(upl, bp, 5); +#endif /* UBC_DEBUG */ + } else + upl = (upl_t) 0; + } else { + upl = bp->b_pagelist; + kret = kernel_upl_unmap(kernel_map, upl); + + if (kret != KERN_SUCCESS) + panic("kernel_upl_unmap failed"); + bp->b_data = 0; + } + if (upl) { + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + if (bp->b_flags & (B_ERROR | B_INVAL)) { + if (bp->b_flags & (B_READ | B_INVAL)) + upl_flags = UPL_ABORT_DUMP_PAGES; + else + upl_flags = 0; + kernel_upl_abort(upl, upl_flags); + } else { + if (ISSET(bp->b_flags, (B_DELWRI | B_WASDIRTY))) + upl_flags = UPL_COMMIT_SET_DIRTY | UPL_COMMIT_FREE_ON_EMPTY; + else + upl_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY; + kernel_upl_commit_range(upl, 0, bp->b_bufsize, + upl_flags + | UPL_COMMIT_INACTIVATE, + pl, MAX_UPL_TRANSFER); + } + s = splbio(); + CLR(bp->b_flags, B_PAGELIST); + bp->b_pagelist = 0; + splx(s); + } + } else { + if(ISSET(bp->b_flags, B_PAGELIST)) + panic("brelse: pagelist set for non VREG; vp=%x", bp->b_vp); + } + + /* Wake up any processes waiting for any buffer to become free. */ + if (needbuffer) { + needbuffer = 0; + wakeup(&needbuffer); + } + + /* Wake up any proceeses waiting for _this_ buffer to become free. */ + if (ISSET(bp->b_flags, B_WANTED)) { + CLR(bp->b_flags, B_WANTED); + wakeup(bp); + } + + /* Block disk interrupts. */ + s = splbio(); + + /* + * Determine which queue the buffer should be on, then put it there. + */ + + /* If it's locked, don't report an error; try again later. */ + if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) + CLR(bp->b_flags, B_ERROR); + + /* If it's not cacheable, or an error, mark it invalid. */ + if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) + SET(bp->b_flags, B_INVAL); + + if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { + /* + * If it's invalid or empty, dissociate it from its vnode + * and put on the head of the appropriate queue. + */ + if (bp->b_vp) + brelvp(bp); + CLR(bp->b_flags, B_DELWRI); + if (bp->b_bufsize <= 0) + whichq = BQ_EMPTY; /* no data */ + else + whichq = BQ_AGE; /* invalid data */ + + bufq = &bufqueues[whichq]; + binsheadfree(bp, bufq, whichq); + } else { + /* + * It has valid data. Put it on the end of the appropriate + * queue, so that it'll stick around for as long as possible. + */ + if (ISSET(bp->b_flags, B_LOCKED)) + whichq = BQ_LOCKED; /* locked in core */ + else if (ISSET(bp->b_flags, B_META)) + whichq = BQ_META; /* meta-data */ + else if (ISSET(bp->b_flags, B_AGE)) + whichq = BQ_AGE; /* stale but valid data */ + else + whichq = BQ_LRU; /* valid data */ + + bufq = &bufqueues[whichq]; + binstailfree(bp, bufq, whichq); + } + + /* Unlock the buffer. */ + CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE)); + + /* Allow disk interrupts. */ + splx(s); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END, + bp, bp->b_data, bp->b_flags, 0, 0); +} + +/* + * Determine if a block is in the cache. + * Just look on what would be its hash chain. If it's there, return + * a pointer to it, unless it's marked invalid. If it's marked invalid, + * we normally don't return the buffer, unless the caller explicitly + * wants us to. + */ +struct buf * +incore(vp, blkno) + struct vnode *vp; + daddr_t blkno; +{ + struct buf *bp; + int bufseen = 0; + + bp = BUFHASH(vp, blkno)->lh_first; + + /* Search hash chain */ + for (; bp != NULL; bp = bp->b_hash.le_next, bufseen++) { + if (bp->b_lblkno == blkno && bp->b_vp == vp && + !ISSET(bp->b_flags, B_INVAL)) + return (bp); + if(bufseen >= nbuf) + panic("walked more than nbuf in incore"); + + } + + return (0); +} + +/* XXX FIXME -- Update the comment to reflect the UBC changes -- */ +/* + * Get a block of requested size that is associated with + * a given vnode and block offset. If it is found in the + * block cache, mark it as having been found, make it busy + * and return it. Otherwise, return an empty block of the + * correct size. It is up to the caller to insure that the + * cached blocks be of the correct size. + */ +struct buf * +getblk(vp, blkno, size, slpflag, slptimeo, operation) + register struct vnode *vp; + daddr_t blkno; + int size, slpflag, slptimeo, operation; +{ + struct buf *bp; + int s, err; + upl_t upl; + upl_page_info_t *pl; + void * object; + kern_return_t kret; + void *pager; + off_t file_offset; + int error=0; + int pagedirty = 0; + +start: + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START, + blkno * PAGE_SIZE, size, operation, 0, 0); + + s = splbio(); + if (bp = incore(vp, blkno)) { + /* Found in the Buffer Cache */ + if (ISSET(bp->b_flags, B_BUSY)) { + /* but is busy */ + switch (operation) { + case BLK_READ: + case BLK_WRITE: + case BLK_META: + SET(bp->b_flags, B_WANTED); + bufstats.bufs_busyincore++; + err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk", + slptimeo); + splx(s); + /* + * Callers who call with PCATCH or timeout are + * willing to deal with the NULL pointer + */ + if (err && ((slpflag & PCATCH) || + ((err == EWOULDBLOCK) && slptimeo))) + return (NULL); + goto start; + /*NOTREACHED*/ + break; + + case BLK_PAGEIN: + /* pagein operation must not use getblk */ + panic("getblk: pagein for incore busy buffer"); + splx(s); + /*NOTREACHED*/ + break; + + case BLK_PAGEOUT: + /* pageout operation must not use getblk */ + panic("getblk: pageout for incore busy buffer"); + splx(s); + /*NOTREACHED*/ + break; + + default: + panic("getblk: %d unknown operation 1", operation); + /*NOTREACHED*/ + break; + } + } else { + /* not busy */ + SET(bp->b_flags, (B_BUSY | B_CACHE)); + bremfree(bp); + bufstats.bufs_incore++; + splx(s); + + allocbuf(bp, size); + if (ISSET(bp->b_flags, B_PAGELIST)) + panic("pagelist buffer is not busy"); + + switch (operation) { + case BLK_READ: + case BLK_WRITE: + if (UBCISVALID(bp->b_vp) && bp->b_bufsize) { + + if (bp->b_bufsize & 0xfff) + panic("list request is with less than 4k"); + + object = ubc_getobject(vp, UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("vmobject for vp is null"); + + file_offset = ubc_blktooff(vp, bp->b_lblkno); + + kret = vm_fault_list_request(object, + (vm_object_offset_t)file_offset, bp->b_bufsize, + &upl, NULL, 0, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS | UPL_SET_INTERNAL)); + + if (kret != KERN_SUCCESS) + panic("Failed to get pagelists"); + + SET(bp->b_flags, B_PAGELIST); + bp->b_pagelist = upl; + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + if ( !upl_valid_page(pl, 0)) + panic("getblk: incore buffer without valid page"); + + if (upl_dirty_page(pl, 0)) + SET(bp->b_flags, B_WASDIRTY); + else + CLR(bp->b_flags, B_WASDIRTY); + + kret = kernel_upl_map(kernel_map, upl, (vm_address_t *)&(bp->b_data)); + if (kret != KERN_SUCCESS) { + panic("getblk: kernel_upl_map() " + "failed with (%d)", kret); + } + if (bp->b_data == 0) panic("kernel_upl_map mapped 0"); + } + break; + + case BLK_META: + /* + * VM is not involved in IO for the meta data + * buffer already has valid data + */ + if(bp->b_data == 0) + panic("bp->b_data null incore buf=%x", bp); + break; + + case BLK_PAGEIN: + case BLK_PAGEOUT: + panic("getblk: paging operation 1"); + break; + + default: + panic("getblk: %d unknown operation 2", operation); + /*NOTREACHED*/ + break; + } + } + } else { /* not incore() */ + int queue = BQ_EMPTY; /* Start with no preference */ + splx(s); + + if ((operation == BLK_META) || (UBCINVALID(vp)) || + !(UBCINFOEXISTS(vp))) { + operation = BLK_META; + } + if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) + goto start; + /* + * if it is meta, the queue may be set to other + * type so reset as well as mark it to be B_META + * so that when buffer is released it will goto META queue + * Also, if the vnode is not VREG, then it is META + */ + if (operation == BLK_META) { + SET(bp->b_flags, B_META); + queue = BQ_META; + } + allocbuf(bp, size); + + switch (operation) { + case BLK_META: + /* buffer data is invalid */ + + /* + * Insert in the hash so that incore() can find it + */ + binshash(bp, BUFHASH(vp, blkno)); +#if !ZALLOC_METADATA + if (bp->b_data) + panic("bp->b_data is not nul; %x",bp); + kret = kmem_alloc(kernel_map, + &bp->b_data, bp->b_bufsize); + if (kret != KERN_SUCCESS) + panic("getblk: kmem_alloc() returned %d", kret); +#endif /* ZALLOC_METADATA */ + + if(bp->b_data == 0) + panic("bp->b_data is null %x",bp); + + bp->b_blkno = bp->b_lblkno = blkno; + s = splbio(); + bgetvp(vp, bp); + bufstats.bufs_miss++; + splx(s); + if (bp->b_data == 0) + panic("b_data is 0: 2"); + + /* wakeup the buffer */ + CLR(bp->b_flags, B_WANTED); + wakeup(bp); + break; + + case BLK_READ: + case BLK_WRITE: + /* + * Insert in the hash so that incore() can find it + */ + binshash(bp, BUFHASH(vp, blkno)); + pager = ubc_getpager(vp); + file_offset = ubc_blktooff(vp, blkno); + + object = ubc_getobject(vp, UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("vmobject for vp is null"); + if (bp->b_bufsize & 0xfff) + panic("list request is with less than 4k"); + + if (ISSET(bp->b_flags, B_PAGELIST)) + panic("B_PAGELIST in bp=%x",bp); + + kret = vm_fault_list_request(object, + (vm_object_offset_t)file_offset, bp->b_bufsize, + &upl, NULL, 0, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS | UPL_SET_INTERNAL)); + + if (kret != KERN_SUCCESS) + panic("Failed to get pagelists"); + +#ifdef UBC_DEBUG + upl_ubc_alias_set(upl, bp, 4); +#endif /* UBC_DEBUG */ + bp->b_blkno = bp->b_lblkno = blkno; + bp->b_pagelist = upl; + + SET(bp->b_flags, B_PAGELIST); + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + if (upl_valid_page(pl, 0)) { + SET(bp->b_flags, B_CACHE | B_DONE); + bufstats.bufs_vmhits++; + + pagedirty = upl_dirty_page(pl, 0); + + if (pagedirty) + SET(bp->b_flags, B_WASDIRTY); + + if (vp->v_tag == VT_NFS) { + off_t f_offset; + int valid_size; + + bp->b_validoff = 0; + bp->b_dirtyoff = 0; + + f_offset = ubc_blktooff(vp, blkno); + + if (f_offset > vp->v_ubcinfo->ui_size) { + CLR(bp->b_flags, (B_CACHE|B_DONE|B_WASDIRTY)); + bp->b_validend = 0; + bp->b_dirtyend = 0; + } else { + valid_size = min(((unsigned int)(vp->v_ubcinfo->ui_size - f_offset)), PAGE_SIZE); + bp->b_validend = valid_size; + + if (pagedirty) + bp->b_dirtyend = valid_size; + else + bp->b_dirtyend = 0; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_NONE, + bp->b_validend, bp->b_dirtyend, + (int)vp->v_ubcinfo->ui_size, 0, 0); + } + } else { + bp->b_validoff = 0; + bp->b_dirtyoff = 0; + + if (pagedirty) { + /* page is dirty */ + bp->b_validend = bp->b_bcount; + bp->b_dirtyend = bp->b_bcount; + } else { + /* page is clean */ + bp->b_validend = bp->b_bcount; + bp->b_dirtyend = 0; + } + } + if (error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL)) { + panic("VOP_BMAP failed in getblk"); + /*NOTREACHED*/ + /* + * XXX: We probably should invalidate the VM Page + */ + bp->b_error = error; + SET(bp->b_flags, (B_ERROR | B_INVAL)); + /* undo B_DONE that was set before upl_commit() */ + CLR(bp->b_flags, B_DONE); + brelse(bp); + return (0); + } + } else { + bufstats.bufs_miss++; + } + kret = kernel_upl_map(kernel_map, upl, (vm_address_t *)&(bp->b_data)); + if (kret != KERN_SUCCESS) { + panic("getblk: kernel_upl_map() " + "failed with (%d)", kret); + } + if (bp->b_data == 0) panic("kernel_upl_map mapped 0"); + + s = splbio(); + bgetvp(vp, bp); + splx(s); + + break; + + case BLK_PAGEIN: + case BLK_PAGEOUT: + panic("getblk: paging operation 2"); + break; + default: + panic("getblk: %d unknown operation 3", operation); + /*NOTREACHED*/ + break; + } + } + + if (bp->b_data == NULL) + panic("getblk: bp->b_addr is null"); + + if (bp->b_bufsize & 0xfff) { +#if ZALLOC_METADATA + if (ISSET(bp->b_flags, B_META) && (bp->b_bufsize & 0x1ff)) +#endif /* ZALLOC_METADATA */ + panic("getblk: bp->b_bufsize = %d", bp->b_bufsize); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END, + bp, bp->b_data, bp->b_flags, 3, 0); + + return (bp); +} + +/* + * Get an empty, disassociated buffer of given size. + */ +struct buf * +geteblk(size) + int size; +{ + struct buf *bp; + int queue = BQ_EMPTY; +#if !ZALLOC_METADATA + kern_return_t kret; + vm_size_t desired_size = roundup(size, CLBYTES); + + if (desired_size > MAXBSIZE) + panic("geteblk: buffer larger than MAXBSIZE requested"); +#endif /* ZALLOC_METADATA */ + + while ((bp = getnewbuf(0, 0, &queue)) == 0) + ; +#if ZALLOC_METADATA + SET(bp->b_flags, (B_META|B_INVAL)); +#else + SET(bp->b_flags, B_INVAL); +#endif /* ZALLOC_METADATA */ + +#if DIAGNOSTIC + assert(queue == BQ_EMPTY); +#endif /* DIAGNOSTIC */ + /* XXX need to implement logic to deal with other queues */ + +#if !ZALLOC_METADATA + /* Empty buffer - allocate pages */ + kret = kmem_alloc_aligned(kernel_map, &bp->b_data, desired_size); + if (kret != KERN_SUCCESS) + panic("geteblk: kmem_alloc_aligned returned %d", kret); +#endif /* ZALLOC_METADATA */ + + binshash(bp, &invalhash); + allocbuf(bp, size); + bufstats.bufs_eblk++; + + return (bp); +} + +#if ZALLOC_METADATA +/* + * Zones for the meta data buffers + */ + +#define MINMETA 512 +#define MAXMETA 4096 + +struct meta_zone_entry { + zone_t mz_zone; + vm_size_t mz_size; + vm_size_t mz_max; + char *mz_name; +}; + +struct meta_zone_entry meta_zones[] = { + {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" }, + {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" }, + {NULL, (MINMETA * 3), 16 * (MINMETA * 3), "buf.1536" }, + {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" }, + {NULL, (MINMETA * 5), 16 * (MINMETA * 5), "buf.2560" }, + {NULL, (MINMETA * 6), 16 * (MINMETA * 6), "buf.3072" }, + {NULL, (MINMETA * 7), 16 * (MINMETA * 7), "buf.3584" }, + {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" }, + {NULL, 0, 0, "" } /* End */ +}; + +/* + * Initialize the meta data zones + */ +static void +bufzoneinit(void) +{ + int i; + + for (i = 0; meta_zones[i].mz_size != 0; i++) { + meta_zones[i].mz_zone = + zinit(meta_zones[i].mz_size, + meta_zones[i].mz_max, + PAGE_SIZE, + meta_zones[i].mz_name); + } +} + +static zone_t +getbufzone(size_t size) +{ + int i; + + if (size % 512) + panic("getbufzone: incorect size = %d", size); + + i = (size / 512) - 1; + return (meta_zones[i].mz_zone); +} +#endif /* ZALLOC_METADATA */ + +/* + * With UBC, there is no need to expand / shrink the file data + * buffer. The VM uses the same pages, hence no waste. + * All the file data buffers can have one size. + * In fact expand / shrink would be an expensive operation. + * + * Only exception to this is meta-data buffers. Most of the + * meta data operations are smaller than PAGE_SIZE. Having the + * meta-data buffers grow and shrink as needed, optimizes use + * of the kernel wired memory. + */ + +int +allocbuf(bp, size) + struct buf *bp; + int size; +{ + vm_size_t desired_size; + + desired_size = roundup(size, CLBYTES); + + if(desired_size < PAGE_SIZE) + desired_size = PAGE_SIZE; + if (desired_size > MAXBSIZE) + panic("allocbuf: buffer larger than MAXBSIZE requested"); + +#if ZALLOC_METADATA + if (ISSET(bp->b_flags, B_META)) { + kern_return_t kret; + zone_t zprev, z; + size_t nsize = roundup(size, MINMETA); + + if (bp->b_data) { + vm_offset_t elem = (vm_offset_t)bp->b_data; + + if (ISSET(bp->b_flags, B_ZALLOC)) + if (bp->b_bufsize <= MAXMETA) { + if (bp->b_bufsize < nsize) { + /* reallocate to a bigger size */ + desired_size = nsize; + + zprev = getbufzone(bp->b_bufsize); + z = getbufzone(nsize); + bp->b_data = (caddr_t)zalloc(z); + if(bp->b_data == 0) + panic("allocbuf: zalloc() returned NULL"); + bcopy(elem, bp->b_data, bp->b_bufsize); + zfree(zprev, elem); + } else { + desired_size = bp->b_bufsize; + } + } else + panic("allocbuf: B_ZALLOC set incorrectly"); + else + if (bp->b_bufsize < desired_size) { + /* reallocate to a bigger size */ + kret = kmem_alloc(kernel_map, &bp->b_data, desired_size); + if (kret != KERN_SUCCESS) + panic("allocbuf: kmem_alloc() returned %d", kret); + if(bp->b_data == 0) + panic("allocbuf: null b_data"); + bcopy(elem, bp->b_data, bp->b_bufsize); + kmem_free(kernel_map, elem, bp->b_bufsize); + } else { + desired_size = bp->b_bufsize; + } + } else { + /* new allocation */ + if (nsize <= MAXMETA) { + desired_size = nsize; + z = getbufzone(nsize); + bp->b_data = (caddr_t)zalloc(z); + if(bp->b_data == 0) + panic("allocbuf: zalloc() returned NULL 2"); + SET(bp->b_flags, B_ZALLOC); + } else { + kret = kmem_alloc(kernel_map, &bp->b_data, desired_size); + if (kret != KERN_SUCCESS) + panic("allocbuf: kmem_alloc() 2 returned %d", kret); + if(bp->b_data == 0) + panic("allocbuf: null b_data 2"); + } + } + } + + if (ISSET(bp->b_flags, B_META) && (bp->b_data == 0)) + panic("allocbuf: bp->b_data is NULL"); +#endif /* ZALLOC_METADATA */ + + bp->b_bufsize = desired_size; + bp->b_bcount = size; +} + +/* + * Get a new buffer from one of the free lists. + * + * Request for a queue is passes in. The queue from which the buffer was taken + * from is returned. Out of range queue requests get BQ_EMPTY. Request for + * BQUEUE means no preference. Use heuristics in that case. + * Heuristics is as follows: + * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order. + * If none available block till one is made available. + * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps. + * Pick the most stale buffer. + * If found buffer was marked delayed write, start the async. write + * and restart the search. + * Initialize the fields and disassociate the buffer from the vnode. + * Remove the buffer from the hash. Return the buffer and the queue + * on which it was found. + */ + +static struct buf * +getnewbuf(slpflag, slptimeo, queue) + int slpflag, slptimeo; + int *queue; +{ + register struct buf *bp; + register struct buf *lru_bp; + register struct buf *age_bp; + register struct buf *meta_bp; + register int age_time, lru_time, bp_time, meta_time; + int s; + struct ucred *cred; + int req = *queue; /* save it for restarts */ + +start: + s = splbio(); + + /* invalid request gets empty queue */ + if ((*queue > BQUEUES) || (*queue < 0)) + *queue = BQ_EMPTY; + + /* (*queue == BQUEUES) means no preference */ + if (*queue != BQUEUES) { + /* Try for the requested queue first */ + bp = bufqueues[*queue].tqh_first; + if (bp) + goto found; + } + + /* Unable to use requested queue */ + age_bp = bufqueues[BQ_AGE].tqh_first; + lru_bp = bufqueues[BQ_LRU].tqh_first; + meta_bp = bufqueues[BQ_META].tqh_first; + + if (!age_bp && !lru_bp && !meta_bp) { /* Unavailble on AGE or LRU */ + /* Try the empty list first */ + bp = bufqueues[BQ_EMPTY].tqh_first; + if (bp) { + *queue = BQ_EMPTY; + goto found; + } +#if DIAGNOSTIC + /* with UBC this is a fatal condition */ + panic("getnewbuf: No useful buffers"); +#else + /* Log this error condition */ + printf("getnewbuf: No useful buffers"); +#endif /* DIAGNOSTIC */ + + /* wait for a free buffer of any kind */ + needbuffer = 1; + bufstats.bufs_sleeps++; + tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo); + splx(s); + return (0); + } + + /* Buffer available either on AGE or LRU or META */ + bp = NULL; + *queue = -1; + + /* Buffer available either on AGE or LRU */ + if (!age_bp) { + bp = lru_bp; + *queue = BQ_LRU; + } else if (!lru_bp) { + bp = age_bp; + *queue = BQ_AGE; + } else { /* buffer available on both AGE and LRU */ + age_time = time.tv_sec - age_bp->b_timestamp; + lru_time = time.tv_sec - lru_bp->b_timestamp; + if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */ + bp = age_bp; + *queue = BQ_AGE; + /* + * we should probably re-timestamp eveything in the + * queues at this point with the current time + */ + } else { + if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) { + bp = lru_bp; + *queue = BQ_LRU; + } else { + bp = age_bp; + *queue = BQ_AGE; + } + } + } + + if (!bp) { /* Neither on AGE nor on LRU */ + bp = meta_bp; + *queue = BQ_META; + } else if (meta_bp) { + bp_time = time.tv_sec - bp->b_timestamp; + meta_time = time.tv_sec - meta_bp->b_timestamp; + + if (!(bp_time < 0) && !(meta_time < 0)) { + /* time not set backwards */ + int bp_is_stale; + bp_is_stale = (*queue == BQ_LRU) ? + lru_is_stale : age_is_stale; + + if ((meta_time >= meta_is_stale) && + (bp_time < bp_is_stale)) { + bp = meta_bp; + *queue = BQ_META; + } + } + } + + if (bp == NULL) + panic("getnewbuf: null bp"); + +found: + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + panic("getnewbuf: le_prev is deadbeef"); + + if(ISSET(bp->b_flags, B_BUSY)) + panic("getnewbuf reusing BUSY buf"); + + /* Clean it */ + if (bcleanbuf(bp)) { + /* bawrite() issued, buffer not ready */ + splx(s); + *queue = req; + goto start; + } + splx(s); + return (bp); +} +#include +#include + +/* + * Clean a buffer. + * Returns 0 is buffer is ready to use, + * Returns 1 if issued a bawrite() to indicate + * that the buffer is not ready. + */ +int +bcleanbuf(struct buf *bp) +{ + int s; + struct ucred *cred; + + s = splbio(); + + /* Remove from the queue */ + bremfree(bp); + + /* Buffer is no longer on free lists. */ + SET(bp->b_flags, B_BUSY); + + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + panic("bcleanbuf: le_prev is deadbeef"); + + /* If buffer was a delayed write, start it, and return 1 */ + if (ISSET(bp->b_flags, B_DELWRI)) { + splx(s); + bawrite (bp); + return (1); + } + + if (bp->b_vp) + brelvp(bp); + bremhash(bp); + BLISTNONE(bp); + + splx(s); + + if (ISSET(bp->b_flags, B_META)) { +#if ZALLOC_METADATA + vm_offset_t elem = (vm_offset_t)bp->b_data; + if (elem == 0) + panic("bcleanbuf: NULL bp->b_data B_META buffer"); + + if (ISSET(bp->b_flags, B_ZALLOC)) { + if (bp->b_bufsize <= MAXMETA) { + zone_t z; + + z = getbufzone(bp->b_bufsize); + bp->b_data = (caddr_t)0xdeadbeef; + zfree(z, elem); + CLR(bp->b_flags, B_ZALLOC); + } else + panic("bcleanbuf: B_ZALLOC set incorrectly"); + } else { + bp->b_data = (caddr_t)0xdeadbeef; + kmem_free(kernel_map, elem, bp->b_bufsize); + } +#else + if (bp->b_data == 0) + panic("bcleanbuf: bp->b_data == NULL for B_META buffer"); + + kmem_free(kernel_map, bp->b_data, bp->b_bufsize); +#endif /* ZALLOC_METADATA */ + } + + trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); + + /* disassociate us from our vnode, if we had one... */ + s = splbio(); + + /* clear out various other fields */ + bp->b_data = 0; + bp->b_flags = B_BUSY; + bp->b_dev = NODEV; + bp->b_blkno = bp->b_lblkno = 0; + bp->b_iodone = 0; + bp->b_error = 0; + bp->b_resid = 0; + bp->b_bcount = 0; + bp->b_dirtyoff = bp->b_dirtyend = 0; + bp->b_validoff = bp->b_validend = 0; + + /* nuke any credentials we were holding */ + cred = bp->b_rcred; + if (cred != NOCRED) { + bp->b_rcred = NOCRED; + crfree(cred); + } + cred = bp->b_wcred; + if (cred != NOCRED) { + bp->b_wcred = NOCRED; + crfree(cred); + } + splx(s); + return (0); +} + + +/* + * Wait for operations on the buffer to complete. + * When they do, extract and return the I/O's error value. + */ +int +biowait(bp) + struct buf *bp; +{ + upl_t upl; + upl_page_info_t *pl; + int s; + kern_return_t kret; + + s = splbio(); + while (!ISSET(bp->b_flags, B_DONE)) + tsleep(bp, PRIBIO + 1, "biowait", 0); + splx(s); + + /* check for interruption of I/O (e.g. via NFS), then errors. */ + if (ISSET(bp->b_flags, B_EINTR)) { + CLR(bp->b_flags, B_EINTR); + return (EINTR); + } else if (ISSET(bp->b_flags, B_ERROR)) + return (bp->b_error ? bp->b_error : EIO); + else + return (0); +} + +/* + * Mark I/O complete on a buffer. + * + * If a callback has been requested, e.g. the pageout + * daemon, do so. Otherwise, awaken waiting processes. + * + * [ Leffler, et al., says on p.247: + * "This routine wakes up the blocked process, frees the buffer + * for an asynchronous write, or, for a request by the pagedaemon + * process, invokes a procedure specified in the buffer structure" ] + * + * In real life, the pagedaemon (or other system processes) wants + * to do async stuff to, and doesn't want the buffer brelse()'d. + * (for swap pager, that puts swap buffers on the free lists (!!!), + * for the vn device, that puts malloc'd buffers on the free lists!) + */ +void +biodone(bp) + struct buf *bp; +{ + boolean_t funnel_state; + int s; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START, + bp, bp->b_data, bp->b_flags, 0, 0); + + if (ISSET(bp->b_flags, B_DONE)) + panic("biodone already"); + SET(bp->b_flags, B_DONE); /* note that it's done */ + /* + * I/O was done, so don't believe + * the DIRTY state from VM anymore + */ + CLR(bp->b_flags, B_WASDIRTY); + + if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) + vwakeup(bp); /* wake up reader */ + + if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */ + CLR(bp->b_flags, B_CALL); /* but note callout done */ + (*bp->b_iodone)(bp); + } else if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release it */ + brelse(bp); + else { /* or just wakeup the buffer */ + CLR(bp->b_flags, B_WANTED); + wakeup(bp); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END, + bp, bp->b_data, bp->b_flags, 0, 0); + + thread_funnel_set(kernel_flock, funnel_state); +} + +/* + * Return a count of buffers on the "locked" queue. + */ +int +count_lock_queue() +{ + register struct buf *bp; + register int n = 0; + + for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; + bp = bp->b_freelist.tqe_next) + n++; + return (n); +} + +/* + * Return a count of 'busy' buffers. Used at the time of shutdown. + */ +int +count_busy_buffers() +{ + register struct buf *bp; + register int nbusy = 0; + + for (bp = &buf[nbuf]; --bp >= buf; ) + if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY) + nbusy++; + return (nbusy); +} + +#if 1 /*DIAGNOSTIC */ +/* + * Print out statistics on the current allocation of the buffer pool. + * Can be enabled to print out on every ``sync'' by setting "syncprt" + * in vfs_syscalls.c using sysctl. + */ +void +vfs_bufstats() +{ + int s, i, j, count; + register struct buf *bp; + register struct bqueues *dp; + int counts[MAXBSIZE/CLBYTES+1]; + static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY", "META" }; + + for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { + count = 0; + for (j = 0; j <= MAXBSIZE/CLBYTES; j++) + counts[j] = 0; + s = splbio(); + for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) { + counts[bp->b_bufsize/CLBYTES]++; + count++; + } + splx(s); + printf("%s: total-%d", bname[i], count); + for (j = 0; j <= MAXBSIZE/CLBYTES; j++) + if (counts[j] != 0) + printf(", %d-%d", j * CLBYTES, counts[j]); + printf("\n"); + } +} +#endif /* DIAGNOSTIC */ + + +struct buf * +alloc_io_buf(vp) + struct vnode *vp; +{ + register struct buf *bp; + int s; + + s = splbio(); + + while ((bp = iobufqueue.tqh_first) == NULL) { + need_iobuffer = 1; + bufstats.bufs_iobufsleeps++; + tsleep(&need_iobuffer, (PRIBIO+1), "alloc_io_buf", 0); + } + TAILQ_REMOVE(&iobufqueue, bp, b_freelist); + bp->b_timestamp = 0; + + /* clear out various fields */ + bp->b_flags = B_BUSY; + bp->b_blkno = bp->b_lblkno = 0; + bp->b_iodone = 0; + bp->b_error = 0; + bp->b_resid = 0; + bp->b_bcount = 0; + bp->b_bufsize = 0; + bp->b_vp = vp; + + if (vp->v_type == VBLK || vp->v_type == VCHR) + bp->b_dev = vp->v_rdev; + else + bp->b_dev = NODEV; + bufstats.bufs_iobufinuse++; + if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) + bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse; + splx(s); + + return (bp); +} + +void +free_io_buf(bp) + struct buf *bp; +{ + int s; + + s = splbio(); + /* put buffer back on the head of the iobufqueue */ + bp->b_vp = NULL; + bp->b_flags = B_INVAL; + + binsheadfree(bp, &iobufqueue, -1); + + /* Wake up any processes waiting for any buffer to become free. */ + if (need_iobuffer) { + need_iobuffer = 0; + wakeup(&need_iobuffer); + } + bufstats.bufs_iobufinuse--; + splx(s); +} + + +/* not hookedup yet */ + +/* XXX move this to a separate file */ +/* + * Dynamic Scaling of the Buffer Queues + */ + +typedef long long blsize_t; + +blsize_t MAXNBUF; /* initialize to (mem_size / PAGE_SIZE) */ +/* Global tunable limits */ +blsize_t nbufh; /* number of buffer headers */ +blsize_t nbuflow; /* minimum number of buffer headers required */ +blsize_t nbufhigh; /* maximum number of buffer headers allowed */ +blsize_t nbuftarget; /* preferred number of buffer headers */ + +/* + * assertions: + * + * 1. 0 < nbuflow <= nbufh <= nbufhigh + * 2. nbufhigh <= MAXNBUF + * 3. 0 < nbuflow <= nbuftarget <= nbufhigh + * 4. nbufh can not be set by sysctl(). + */ + +/* Per queue tunable limits */ + +struct bufqlim { + blsize_t bl_nlow; /* minimum number of buffer headers required */ + blsize_t bl_num; /* number of buffer headers on the queue */ + blsize_t bl_nlhigh; /* maximum number of buffer headers allowed */ + blsize_t bl_target; /* preferred number of buffer headers */ + long bl_stale; /* Seconds after which a buffer is considered stale */ +} bufqlim[BQUEUES]; + +/* + * assertions: + * + * 1. 0 <= bl_nlow <= bl_num <= bl_nlhigh + * 2. bl_nlhigh <= MAXNBUF + * 3. bufqlim[BQ_META].bl_nlow != 0 + * 4. bufqlim[BQ_META].bl_nlow > (number of possible concurrent + * file system IO operations) + * 5. bl_num can not be set by sysctl(). + * 6. bl_nhigh <= nbufhigh + */ + +/* + * Rationale: + * ---------- + * Defining it blsize_t as long permits 2^31 buffer headers per queue. + * Which can describe (2^31 * PAGE_SIZE) memory per queue. + * + * These limits are exported to by means of sysctl(). + * It was decided to define blsize_t as a 64 bit quantity. + * This will make sure that we will not be required to change it + * as long as we do not exceed 64 bit address space for the kernel. + * + * low and high numbers parameters initialized at compile time + * and boot arguments can be used to override them. sysctl() + * would not change the value. sysctl() can get all the values + * but can set only target. num is the current level. + * + * Advantages of having a "bufqscan" thread doing the balancing are, + * Keep enough bufs on BQ_EMPTY. + * getnewbuf() by default will always select a buffer from the BQ_EMPTY. + * getnewbuf() perfoms best if a buffer was found there. + * Also this minimizes the possibility of starting IO + * from getnewbuf(). That's a performance win, too. + * + * Localize complex logic [balancing as well as time aging] + * to balancebufq(). + * + * Simplify getnewbuf() logic by elimination of time aging code. + */ + +/* + * Algorithm: + * ----------- + * The goal of the dynamic scaling of the buffer queues to to keep + * the size of the LRU close to bl_target. Buffers on a queue would + * be time aged. + * + * There would be a thread which will be responsible for "balancing" + * the buffer cache queues. + * + * The scan order would be: AGE, LRU, META, EMPTY. + */ + +long bufqscanwait = 0; + +extern void bufqscan_thread(); +extern int balancebufq(int q); +extern int btrimempty(int n); +extern int initbufqscan(void); +extern int nextbufq(int q); +extern void buqlimprt(int all); + +void +bufq_balance_thread_init() +{ + + if (bufqscanwait++ == 0) { + int i; + + /* Initalize globals */ + MAXNBUF = (mem_size / PAGE_SIZE); + nbufh = nbuf; + nbuflow = min(nbufh, 100); + nbufhigh = min(MAXNBUF, max(nbufh, 2048)); + nbuftarget = (mem_size >> 5) / PAGE_SIZE; + nbuftarget = max(nbuflow, nbuftarget); + nbuftarget = min(nbufhigh, nbuftarget); + + /* + * Initialize the bufqlim + */ + + /* LOCKED queue */ + bufqlim[BQ_LOCKED].bl_nlow = 0; + bufqlim[BQ_LOCKED].bl_nlhigh = 32; + bufqlim[BQ_LOCKED].bl_target = 0; + bufqlim[BQ_LOCKED].bl_stale = 30; + + /* LRU queue */ + bufqlim[BQ_LRU].bl_nlow = 0; + bufqlim[BQ_LRU].bl_nlhigh = nbufhigh/4; + bufqlim[BQ_LRU].bl_target = nbuftarget/4; + bufqlim[BQ_LRU].bl_stale = LRU_IS_STALE; + + /* AGE queue */ + bufqlim[BQ_AGE].bl_nlow = 0; + bufqlim[BQ_AGE].bl_nlhigh = nbufhigh/4; + bufqlim[BQ_AGE].bl_target = nbuftarget/4; + bufqlim[BQ_AGE].bl_stale = AGE_IS_STALE; + + /* EMPTY queue */ + bufqlim[BQ_EMPTY].bl_nlow = 0; + bufqlim[BQ_EMPTY].bl_nlhigh = nbufhigh/4; + bufqlim[BQ_EMPTY].bl_target = nbuftarget/4; + bufqlim[BQ_EMPTY].bl_stale = 600000; + + /* META queue */ + bufqlim[BQ_META].bl_nlow = 0; + bufqlim[BQ_META].bl_nlhigh = nbufhigh/4; + bufqlim[BQ_META].bl_target = nbuftarget/4; + bufqlim[BQ_META].bl_stale = META_IS_STALE; + + buqlimprt(1); + } + + /* create worker thread */ + kernel_thread(kernel_task, bufqscan_thread); +} + +/* The workloop for the buffer balancing thread */ +void +bufqscan_thread() +{ + boolean_t funnel_state; + int moretodo = 0; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + for(;;) { + do { + int q; /* buffer queue to process */ + + for (q = initbufqscan(); q; ) { + moretodo |= balancebufq(q); + q = nextbufq(q); + } + } while (moretodo); + +#if 1 || DIAGNOSTIC + vfs_bufstats(); + buqlimprt(0); +#endif + (void)tsleep((void *)&bufqscanwait, PRIBIO, "bufqscanwait", 60 * hz); + moretodo = 0; + } + + (void) thread_funnel_set(kernel_flock, FALSE); +} + +/* Seed for the buffer queue balancing */ +int +initbufqscan() +{ + /* Start with AGE queue */ + return (BQ_AGE); +} + +/* Pick next buffer queue to balance */ +int +nextbufq(int q) +{ + int order[] = { BQ_AGE, BQ_LRU, BQ_META, BQ_EMPTY, 0 }; + + q++; + q %= sizeof(order); + return (order[q]); +} + +/* function to balance the buffer queues */ +int +balancebufq(int q) +{ + int moretodo = 0; + int s = splbio(); + int n; + + /* reject invalid q */ + if ((q < 0) || (q >= BQUEUES)) + goto out; + + /* LOCKED queue MUST not be balanced */ + if (q == BQ_LOCKED) + goto out; + + n = (bufqlim[q].bl_num - bufqlim[q].bl_target); + + /* If queue has less than target nothing more to do */ + if (n < 0) + goto out; + + if ( n > 8 ) { + /* Balance only a small amount (12.5%) at a time */ + n >>= 3; + } + + /* EMPTY queue needs special handling */ + if (q == BQ_EMPTY) { + moretodo |= btrimempty(n); + goto out; + } + + for (; n > 0; n--) { + struct buf *bp = bufqueues[q].tqh_first; + if (!bp) + break; + + /* check if it's stale */ + if ((time.tv_sec - bp->b_timestamp) > bufqlim[q].bl_stale) { + if (bcleanbuf(bp)) { + /* bawrite() issued, bp not ready */ + moretodo = 1; + } else { + /* release the cleaned buffer to BQ_EMPTY */ + SET(bp->b_flags, B_INVAL); + brelse(bp); + } + } else + break; + } + +out: + splx(s); + return (moretodo); +} + +int +btrimempty(int n) +{ + /* + * When struct buf are allocated dynamically, this would + * reclaim upto 'n' struct buf from the empty queue. + */ + + return (0); +} + +void +bufqinc(int q) +{ + if ((q < 0) || (q >= BQUEUES)) + return; + + bufqlim[q].bl_num++; + return; +} + +void +bufqdec(int q) +{ + if ((q < 0) || (q >= BQUEUES)) + return; + + bufqlim[q].bl_num--; + return; +} + +void +buqlimprt(int all) +{ + int i; + static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY", "META" }; + + if (all) + for (i = 0; i < BQUEUES; i++) { + printf("%s : ", bname[i]); + printf("min = %d, ", (long)bufqlim[i].bl_nlow); + printf("cur = %d, ", (long)bufqlim[i].bl_num); + printf("max = %d, ", (long)bufqlim[i].bl_nlhigh); + printf("target = %d, ", (long)bufqlim[i].bl_target); + printf("stale after %d seconds\n", bufqlim[i].bl_stale); + } + else + for (i = 0; i < BQUEUES; i++) { + printf("%s : ", bname[i]); + printf("cur = %d, ", (long)bufqlim[i].bl_num); + } +} diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c new file mode 100644 index 000000000..f0d8f2618 --- /dev/null +++ b/bsd/vfs/vfs_cache.c @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Poul-Henning Kamp of the FreeBSD Project. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * + * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Name caching works as follows: + * + * Names found by directory scans are retained in a cache + * for future reference. It is managed LRU, so frequently + * used names will hang around. Cache is indexed by hash value + * obtained from (vp, name) where vp refers to the directory + * containing name. + * + * If it is a "negative" entry, (i.e. for a name that is known NOT to + * exist) the vnode pointer will be NULL. + * + * For simplicity (and economy of storage), names longer than + * a maximum length of NCHNAMLEN are not cached; they occur + * infrequently in any case, and are almost never of interest. + * + * Upon reaching the last segment of a path, if the reference + * is for DELETE, or NOCACHE is set (rewrite), and the + * name is located in the cache, it will be dropped. + */ + +/* + * Structures associated with name cacheing. + */ +#define NCHHASH(dvp, cnp) \ + (&nchashtbl[((dvp)->v_id + (cnp)->cn_hash) & nchash]) +LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ +u_long nchash; /* size of hash table - 1 */ +long numcache; /* number of cache entries allocated */ +TAILQ_HEAD(, namecache) nclruhead; /* LRU chain */ +struct nchstats nchstats; /* cache effectiveness statistics */ +u_long nextvnodeid = 0; +int doingcache = 1; /* 1 => enable the cache */ + +/* + * Delete an entry from its hash list and move it to the front + * of the LRU list for immediate reuse. + */ +#if DIAGNOSTIC +#define PURGE(ncp) { \ + if (ncp->nc_hash.le_prev == 0) \ + panic("namecache purge le_prev"); \ + if (ncp->nc_hash.le_next == ncp) \ + panic("namecache purge le_next"); \ + LIST_REMOVE(ncp, nc_hash); \ + ncp->nc_hash.le_prev = 0; \ + TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \ + TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru); \ +} +#else +#define PURGE(ncp) { \ + LIST_REMOVE(ncp, nc_hash); \ + ncp->nc_hash.le_prev = 0; \ + TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \ + TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru); \ +} +#endif /* DIAGNOSTIC */ + +/* + * Move an entry that has been used to the tail of the LRU list + * so that it will be preserved for future use. + */ +#define TOUCH(ncp) { \ + if (ncp->nc_lru.tqe_next != 0) { \ + TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \ + TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); \ + } \ +} + +/* + * Lookup an entry in the cache + * + * We don't do this if the segment name is long, simply so the cache + * can avoid holding long names (which would either waste space, or + * add greatly to the complexity). + * + * Lookup is called with dvp pointing to the directory to search, + * cnp pointing to the name of the entry being sought. If the lookup + * succeeds, the vnode is returned in *vpp, and a status of -1 is + * returned. If the lookup determines that the name does not exist + * (negative cacheing), a status of ENOENT is returned. If the lookup + * fails, a status of zero is returned. + */ + +int +cache_lookup(dvp, vpp, cnp) + struct vnode *dvp; + struct vnode **vpp; + struct componentname *cnp; +{ + register struct namecache *ncp, *nnp; + register struct nchashhead *ncpp; + + if (!doingcache) { + cnp->cn_flags &= ~MAKEENTRY; + return (0); + } + if (cnp->cn_namelen > NCHNAMLEN) { + nchstats.ncs_long++; + cnp->cn_flags &= ~MAKEENTRY; + return (0); + } + + ncpp = NCHHASH(dvp, cnp); + for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) { + nnp = ncp->nc_hash.le_next; + /* If one of the vp's went stale, don't bother anymore. */ + if ((ncp->nc_dvpid != ncp->nc_dvp->v_id) || + (ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id)) { + nchstats.ncs_falsehits++; + PURGE(ncp); + continue; + } + /* Now that we know the vp's to be valid, is it ours ? */ + if (ncp->nc_dvp == dvp && + ncp->nc_nlen == cnp->cn_namelen && + !bcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen)) + break; + } + + /* We failed to find an entry */ + if (ncp == 0) { + nchstats.ncs_miss++; + return (0); + } + + /* We don't want to have an entry, so dump it */ + if ((cnp->cn_flags & MAKEENTRY) == 0) { + nchstats.ncs_badhits++; + PURGE(ncp); + return (0); + } + + /* We found a "positive" match, return the vnode */ + if (ncp->nc_vp) { + nchstats.ncs_goodhits++; + TOUCH(ncp); + *vpp = ncp->nc_vp; + return (-1); + } + + /* We found a negative match, and want to create it, so purge */ + if (cnp->cn_nameiop == CREATE) { + nchstats.ncs_badhits++; + PURGE(ncp); + return (0); + } + + /* + * We found a "negative" match, ENOENT notifies client of this match. + * The nc_vpid field records whether this is a whiteout. + */ + nchstats.ncs_neghits++; + TOUCH(ncp); + cnp->cn_flags |= ncp->nc_vpid; + return (ENOENT); +} + +/* + * Add an entry to the cache. + */ +void +cache_enter(dvp, vp, cnp) + struct vnode *dvp; + struct vnode *vp; + struct componentname *cnp; +{ + register struct namecache *ncp; + register struct nchashhead *ncpp; + + if (!doingcache) + return; + + /* + * If an entry that is too long, is entered, bad things happen. + * cache_lookup acts as the sentinel to make sure longer names + * are not stored. This here will prevent outsiders from doing + * something that is unexpected. + */ + if (cnp->cn_namelen > NCHNAMLEN) + panic("cache_enter: name too long"); + + /* + * We allocate a new entry if we are less than the maximum + * allowed and the one at the front of the LRU list is in use. + * Otherwise we use the one at the front of the LRU list. + */ + if (numcache < desiredvnodes && + ((ncp = nclruhead.tqh_first) == NULL || + ncp->nc_hash.le_prev != 0)) { + /* Add one more entry */ + ncp = (struct namecache *) + _MALLOC_ZONE((u_long)sizeof *ncp, M_CACHE, M_WAITOK); + numcache++; + } else if (ncp = nclruhead.tqh_first) { + /* reuse an old entry */ + TAILQ_REMOVE(&nclruhead, ncp, nc_lru); + if (ncp->nc_hash.le_prev != 0) { +#if DIAGNOSTIC + if (ncp->nc_hash.le_next == ncp) + panic("cache_enter: le_next"); +#endif + LIST_REMOVE(ncp, nc_hash); + ncp->nc_hash.le_prev = 0; + } + } else { + /* give up */ + return; + } + + /* + * Fill in cache info, if vp is NULL this is a "negative" cache entry. + * For negative entries, we have to record whether it is a whiteout. + * the whiteout flag is stored in the nc_vpid field which is + * otherwise unused. + */ + ncp->nc_vp = vp; + if (vp) + ncp->nc_vpid = vp->v_id; + else + ncp->nc_vpid = cnp->cn_flags & ISWHITEOUT; + ncp->nc_dvp = dvp; + ncp->nc_dvpid = dvp->v_id; + ncp->nc_nlen = cnp->cn_namelen; + bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen); + TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); + ncpp = NCHHASH(dvp, cnp); +#if DIAGNOSTIC + { + register struct namecache *p; + + for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) + if (p == ncp) + panic("cache_enter: duplicate"); + } +#endif + LIST_INSERT_HEAD(ncpp, ncp, nc_hash); +} + +/* + * Name cache initialization, from vfs_init() when we are booting + */ +void +nchinit() +{ + + TAILQ_INIT(&nclruhead); + nchashtbl = hashinit(desiredvnodes, M_CACHE, &nchash); +} + +/* + * Invalidate a all entries to particular vnode. + * + * We actually just increment the v_id, that will do it. The entries will + * be purged by lookup as they get found. If the v_id wraps around, we + * need to ditch the entire cache, to avoid confusion. No valid vnode will + * ever have (v_id == 0). + */ +void +cache_purge(vp) + struct vnode *vp; +{ + struct namecache *ncp; + struct nchashhead *ncpp; + + vp->v_id = ++nextvnodeid; + if (nextvnodeid != 0) + return; + for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { + while (ncp = ncpp->lh_first) + PURGE(ncp); + } + vp->v_id = ++nextvnodeid; +} + +/* + * Flush all entries referencing a particular filesystem. + * + * Since we need to check it anyway, we will flush all the invalid + * entriess at the same time. + */ +void +cache_purgevfs(mp) + struct mount *mp; +{ + struct nchashhead *ncpp; + struct namecache *ncp, *nnp; + + /* Scan hash tables for applicable entries */ + for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { + for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) { + nnp = ncp->nc_hash.le_next; + if (ncp->nc_dvpid != ncp->nc_dvp->v_id || + (ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id) || + ncp->nc_dvp->v_mount == mp) { + PURGE(ncp); + } + } + } +} diff --git a/bsd/vfs/vfs_cluster.c b/bsd/vfs/vfs_cluster.c new file mode 100644 index 000000000..c877d4e7e --- /dev/null +++ b/bsd/vfs/vfs_cluster.c @@ -0,0 +1,2643 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vfs_cluster.c 8.10 (Berkeley) 3/28/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + + +#define CL_READ 0x01 +#define CL_ASYNC 0x02 +#define CL_COMMIT 0x04 +#define CL_NOMAP 0x08 +#define CL_PAGEOUT 0x10 +#define CL_AGE 0x20 +#define CL_DUMP 0x40 +#define CL_NOZERO 0x80 +#define CL_PAGEIN 0x100 + +/* + * throttle the number of async writes that + * can be outstanding on a single vnode + * before we issue a synchronous write + */ +#define ASYNC_THROTTLE 3 + +static int +cluster_iodone(bp) + struct buf *bp; +{ + int b_flags; + int error; + int total_size; + int total_resid; + int upl_offset; + upl_t upl; + struct buf *cbp; + struct buf *cbp_head; + struct buf *cbp_next; + struct buf *real_bp; + int commit_size; + int pg_offset; + + + cbp_head = (struct buf *)(bp->b_trans_head); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START, + cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0); + + for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) { + /* + * all I/O requests that are part of this transaction + * have to complete before we can process it + */ + if ( !(cbp->b_flags & B_DONE)) { + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, + cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0); + + return 0; + } + } + error = 0; + total_size = 0; + total_resid = 0; + + cbp = cbp_head; + upl_offset = cbp->b_uploffset; + upl = cbp->b_pagelist; + b_flags = cbp->b_flags; + real_bp = cbp->b_real_bp; + + while (cbp) { + if (cbp->b_vectorcount > 1) + _FREE(cbp->b_vectorlist, M_SEGMENT); + + if ((cbp->b_flags & B_ERROR) && error == 0) + error = cbp->b_error; + + total_resid += cbp->b_resid; + total_size += cbp->b_bcount; + + cbp_next = cbp->b_trans_next; + + free_io_buf(cbp); + + cbp = cbp_next; + } + if ((b_flags & B_NEED_IODONE) && real_bp) { + if (error) { + real_bp->b_flags |= B_ERROR; + real_bp->b_error = error; + } + real_bp->b_resid = total_resid; + + biodone(real_bp); + } + if (error == 0 && total_resid) + error = EIO; + + if (b_flags & B_COMMIT_UPL) { + pg_offset = upl_offset & PAGE_MASK; + commit_size = (((pg_offset + total_size) + (PAGE_SIZE - 1)) / PAGE_SIZE) * PAGE_SIZE; + + if (error || (b_flags & B_NOCACHE)) { + int upl_abort_code; + + if (b_flags & B_PAGEOUT) + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; + else + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; + + kernel_upl_abort_range(upl, upl_offset - pg_offset, commit_size, upl_abort_code); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, + upl, upl_offset - pg_offset, commit_size, + 0x80000000|upl_abort_code, 0); + + } else { + int upl_commit_flags = UPL_COMMIT_FREE_ON_EMPTY; + + if ( !(b_flags & B_PAGEOUT)) + upl_commit_flags |= UPL_COMMIT_CLEAR_DIRTY; + if (b_flags & B_AGE) + upl_commit_flags |= UPL_COMMIT_INACTIVATE; + + kernel_upl_commit_range(upl, upl_offset - pg_offset, + commit_size, upl_commit_flags, + UPL_GET_INTERNAL_PAGE_LIST(upl), + MAX_UPL_TRANSFER); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, + upl, upl_offset - pg_offset, commit_size, + upl_commit_flags, 0); + } + } else + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, + upl, upl_offset, 0, error, 0); + + return (error); +} + + +static void +cluster_zero(upl, upl_offset, size, flags, bp) + upl_t upl; + vm_offset_t upl_offset; + int size; + int flags; + struct buf *bp; +{ + vm_offset_t io_addr = 0; + kern_return_t kret; + + if ( !(flags & CL_NOMAP)) { + kret = kernel_upl_map(kernel_map, upl, &io_addr); + + if (kret != KERN_SUCCESS) + panic("cluster_zero: kernel_upl_map() failed with (%d)", kret); + if (io_addr == 0) + panic("cluster_zero: kernel_upl_map mapped 0"); + } else + io_addr = (vm_offset_t)bp->b_data; + bzero((caddr_t)(io_addr + upl_offset), size); + + if ( !(flags & CL_NOMAP)) { + kret = kernel_upl_unmap(kernel_map, upl); + + if (kret != KERN_SUCCESS) + panic("cluster_zero: kernel_upl_unmap failed"); + } +} + + +static int +cluster_io(vp, upl, upl_offset, f_offset, size, flags, real_bp) + struct vnode *vp; + upl_t upl; + vm_offset_t upl_offset; + off_t f_offset; + int size; + int flags; + struct buf *real_bp; +{ + struct buf *cbp; + struct iovec *iovp; + int io_flags; + int error = 0; + int retval = 0; + struct buf *cbp_head = 0; + struct buf *cbp_tail = 0; + upl_page_info_t *pl; + int pg_count; + int pg_offset; + + if (flags & CL_READ) + io_flags = (B_VECTORLIST | B_READ); + else + io_flags = (B_VECTORLIST | B_WRITEINPROG); + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + if (flags & CL_ASYNC) + io_flags |= (B_CALL | B_ASYNC); + if (flags & CL_AGE) + io_flags |= B_AGE; + if (flags & CL_DUMP) + io_flags |= B_NOCACHE; + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_START, + (int)f_offset, size, upl_offset, flags, 0); + + if ((flags & CL_READ) && ((upl_offset + size) & PAGE_MASK) && (!(flags & CL_NOZERO))) { + /* + * then we are going to end up + * with a page that we can't complete (the file size wasn't a multiple + * of PAGE_SIZE and we're trying to read to the end of the file + * so we'll go ahead and zero out the portion of the page we can't + * read in from the file + */ + cluster_zero(upl, upl_offset + size, PAGE_SIZE - ((upl_offset + size) & PAGE_MASK), flags, real_bp); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_NONE, + upl_offset + size, PAGE_SIZE - ((upl_offset + size) & PAGE_MASK), + flags, real_bp, 0); + } + while (size) { + size_t io_size; + int vsize; + int i; + int pl_index; + int pg_resid; + int num_contig; + daddr_t lblkno; + daddr_t blkno; + + if (size > MAXPHYSIO) + io_size = MAXPHYSIO; + else + io_size = size; + + if (error = VOP_CMAP(vp, f_offset, io_size, &blkno, &io_size, NULL)) { + if (error == EOPNOTSUPP) + panic("VOP_CMAP Unimplemented"); + break; + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE, + (int)f_offset, (int)blkno, io_size, 0, 0); + + if ( (!(flags & CL_READ) && (long)blkno == -1) || io_size == 0) { + error = EINVAL; + break; + } + lblkno = (daddr_t)(f_offset / PAGE_SIZE_64); + /* + * we have now figured out how much I/O we can do - this is in 'io_size' + * pl_index represents the first page in the 'upl' that the I/O will occur for + * pg_offset is the starting point in the first page for the I/O + * pg_count is the number of full and partial pages that 'io_size' encompasses + */ + pl_index = upl_offset / PAGE_SIZE; + pg_offset = upl_offset & PAGE_MASK; + pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE; + + if ((flags & CL_READ) && (long)blkno == -1) { + /* + * if we're reading and blkno == -1, then we've got a + * 'hole' in the file that we need to deal with by zeroing + * out the affected area in the upl + */ + cluster_zero(upl, upl_offset, io_size, flags, real_bp); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_NONE, + upl_offset, io_size, flags, real_bp, 0); + + pg_count = (io_size - pg_offset) / PAGE_SIZE; + + if (io_size == size && ((upl_offset + io_size) & PAGE_MASK)) + pg_count++; + + if (pg_count) { + if (pg_offset) + pg_resid = PAGE_SIZE - pg_offset; + else + pg_resid = 0; + if (flags & CL_COMMIT) + kernel_upl_commit_range(upl, + upl_offset + pg_resid, + pg_count * PAGE_SIZE, + UPL_COMMIT_CLEAR_DIRTY + | UPL_COMMIT_FREE_ON_EMPTY, + pl, MAX_UPL_TRANSFER); + } + upl_offset += io_size; + f_offset += io_size; + size -= io_size; + + if (cbp_head && pg_count) + goto start_io; + continue; + } else if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) { + real_bp->b_blkno = blkno; + } + if (pg_count > 1) { + /* + * we need to allocate space for the vector list + */ + iovp = (struct iovec *)_MALLOC(sizeof(struct iovec) * pg_count, + M_SEGMENT, M_NOWAIT); + if (iovp == (struct iovec *) 0) { + /* + * if the allocation fails, then throttle down to a single page + */ + io_size = PAGE_SIZE - pg_offset; + pg_count = 1; + } + } + cbp = alloc_io_buf(vp); + + + if (pg_count == 1) + /* + * we use the io vector that's reserved in the buffer header + * this insures we can always issue an I/O even in a low memory + * condition that prevents the _MALLOC from succeeding... this + * is necessary to prevent deadlocks with the pager + */ + iovp = (struct iovec *)(&cbp->b_vects[0]); + + cbp->b_vectorlist = (void *)iovp; + cbp->b_vectorcount = pg_count; + + for (i = 0, vsize = io_size; i < pg_count; i++, iovp++) { + int psize; + + psize = PAGE_SIZE - pg_offset; + + if (psize > vsize) + psize = vsize; + + iovp->iov_len = psize; + iovp->iov_base = (caddr_t)upl_phys_page(pl, pl_index + i); + + if (iovp->iov_base == (caddr_t) 0) { + if (pg_count > 1) + _FREE(cbp->b_vectorlist, M_SEGMENT); + free_io_buf(cbp); + + error = EINVAL; + break; + } + iovp->iov_base += pg_offset; + pg_offset = 0; + + if (flags & CL_PAGEOUT) { + int s; + struct buf *bp; + + s = splbio(); + if (bp = incore(vp, lblkno + i)) { + if (!ISSET(bp->b_flags, B_BUSY)) { + bremfree(bp); + SET(bp->b_flags, (B_BUSY | B_INVAL)); + splx(s); + brelse(bp); + } else + panic("BUSY bp found in cluster_io"); + } + splx(s); + } + vsize -= psize; + } + if (error) + break; + + if (flags & CL_ASYNC) + cbp->b_iodone = (void *)cluster_iodone; + cbp->b_flags |= io_flags; + + cbp->b_lblkno = lblkno; + cbp->b_blkno = blkno; + cbp->b_bcount = io_size; + cbp->b_pagelist = upl; + cbp->b_uploffset = upl_offset; + cbp->b_trans_next = (struct buf *)0; + + if (flags & CL_READ) + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE, + cbp->b_lblkno, cbp->b_blkno, upl_offset, io_size, 0); + else + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE, + cbp->b_lblkno, cbp->b_blkno, upl_offset, io_size, 0); + + if (cbp_head) { + cbp_tail->b_trans_next = cbp; + cbp_tail = cbp; + } else { + cbp_head = cbp; + cbp_tail = cbp; + } + (struct buf *)(cbp->b_trans_head) = cbp_head; + + upl_offset += io_size; + f_offset += io_size; + size -= io_size; + + if ( !(upl_offset & PAGE_MASK) || size == 0) { + /* + * if we have no more I/O to issue or + * the current I/O we've prepared fully + * completes the last page in this request + * or it's been completed via a zero-fill + * due to a 'hole' in the file + * then go ahead and issue the I/O + */ +start_io: + if (flags & CL_COMMIT) + cbp_head->b_flags |= B_COMMIT_UPL; + if (flags & CL_PAGEOUT) + cbp_head->b_flags |= B_PAGEOUT; + + if (real_bp) { + cbp_head->b_flags |= B_NEED_IODONE; + cbp_head->b_real_bp = real_bp; + } + + for (cbp = cbp_head; cbp;) { + struct buf * cbp_next; + + if (io_flags & B_WRITEINPROG) + cbp->b_vp->v_numoutput++; + + cbp_next = cbp->b_trans_next; + + (void) VOP_STRATEGY(cbp); + cbp = cbp_next; + } + if ( !(flags & CL_ASYNC)) { + for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) + biowait(cbp); + + if (error = cluster_iodone(cbp_head)) { + retval = error; + error = 0; + } + } + cbp_head = (struct buf *)0; + cbp_tail = (struct buf *)0; + } + } + if (error) { + for (cbp = cbp_head; cbp;) { + struct buf * cbp_next; + + if (cbp->b_vectorcount > 1) + _FREE(cbp->b_vectorlist, M_SEGMENT); + cbp_next = cbp->b_trans_next; + free_io_buf(cbp); + cbp = cbp_next; + + } + pg_offset = upl_offset & PAGE_MASK; + pg_count = (size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE; + + if (flags & CL_COMMIT) { + int upl_abort_code; + + if (flags & CL_PAGEOUT) + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; + else if (flags & CL_PAGEIN) + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR; + else + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; + + kernel_upl_abort_range(upl, upl_offset - pg_offset, pg_count * PAGE_SIZE, upl_abort_code); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE, + upl, upl_offset - pg_offset, pg_count * PAGE_SIZE, error, 0); + } + if (real_bp) { + real_bp->b_flags |= B_ERROR; + real_bp->b_error = error; + + biodone(real_bp); + } + if (retval == 0) + retval = error; + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, + (int)f_offset, size, upl_offset, retval, 0); + + return (retval); +} + + +static int +cluster_rd_prefetch(vp, object, f_offset, size, filesize, devblocksize) + struct vnode *vp; + void *object; + off_t f_offset; + u_int size; + off_t filesize; + int devblocksize; +{ + upl_t upl; + upl_page_info_t *pl; + int pages_in_upl; + int start_pg; + int last_pg; + int last_valid; + int io_size; + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START, + (int)f_offset, size, (int)filesize, 0, 0); + + if (f_offset >= filesize) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END, + (int)f_offset, 0, 0, 0, 0); + return(0); + } + if (memory_object_page_op(object, (vm_offset_t)f_offset, 0, 0, 0) == KERN_SUCCESS) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END, + (int)f_offset, 0, 0, 0, 0); + return(0); + } + if (size > MAXPHYSIO) + size = MAXPHYSIO; + else + size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + + if ((off_t)size > (filesize - f_offset)) + size = ((filesize - f_offset) + (devblocksize - 1)) & ~(devblocksize - 1); + + pages_in_upl = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; + + + vm_fault_list_request(object, (vm_object_offset_t)f_offset, pages_in_upl * PAGE_SIZE, &upl, NULL, 0, + UPL_CLEAN_IN_PLACE | UPL_NO_SYNC | UPL_SET_INTERNAL); + if (upl == (upl_t) 0) + return(0); + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + /* + * scan from the beginning of the upl looking for the first + * non-valid page.... this will become the first page in + * the request we're going to make to 'cluster_io'... if all + * of the pages are valid, we won't call through to 'cluster_io' + */ + for (start_pg = 0; start_pg < pages_in_upl; start_pg++) { + if (!upl_valid_page(pl, start_pg)) + break; + } + + /* + * scan from the starting invalid page looking for a valid + * page before the end of the upl is reached, if we + * find one, then it will be the last page of the request to + * 'cluster_io' + */ + for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { + if (upl_valid_page(pl, last_pg)) + break; + } + + /* + * if we find any more free valid pages at the tail of the upl + * than update maxra accordingly.... + */ + for (last_valid = last_pg; last_valid < pages_in_upl; last_valid++) { + if (!upl_valid_page(pl, last_valid)) + break; + } + if (start_pg < last_pg) { + vm_offset_t upl_offset; + + /* + * we found a range of 'invalid' pages that must be filled + * 'size' has already been clipped to the LEOF + * make sure it's at least a multiple of the device block size + */ + upl_offset = start_pg * PAGE_SIZE; + io_size = (last_pg - start_pg) * PAGE_SIZE; + + if ((upl_offset + io_size) > size) { + io_size = size - upl_offset; + + KERNEL_DEBUG(0xd001000, upl_offset, size, io_size, 0, 0); + } + cluster_io(vp, upl, upl_offset, f_offset + upl_offset, io_size, + CL_READ | CL_COMMIT | CL_ASYNC | CL_AGE, (struct buf *)0); + } + if (start_pg) { + /* + * start_pg of non-zero indicates we found some already valid pages + * at the beginning of the upl.... we need to release these without + * modifying there state + */ + kernel_upl_abort_range(upl, 0, start_pg * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 50)) | DBG_FUNC_NONE, + upl, 0, start_pg * PAGE_SIZE, 0, 0); + } + if (last_pg < pages_in_upl) { + /* + * the set of pages that we issued an I/O for did not extend all the + * way to the end of the upl... so just release them without modifying + * there state + */ + kernel_upl_abort_range(upl, last_pg * PAGE_SIZE, (pages_in_upl - last_pg) * PAGE_SIZE, + UPL_ABORT_FREE_ON_EMPTY); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 50)) | DBG_FUNC_NONE, + upl, last_pg * PAGE_SIZE, (pages_in_upl - last_pg) * PAGE_SIZE, 0, 0); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END, + (int)f_offset + (last_valid * PAGE_SIZE), 0, 0, 0, 0); + + return(last_valid); +} + + + +static void +cluster_rd_ahead(vp, object, b_lblkno, e_lblkno, filesize, devblocksize) + struct vnode *vp; + void *object; + daddr_t b_lblkno; + daddr_t e_lblkno; + off_t filesize; + int devblocksize; +{ + daddr_t r_lblkno; + off_t f_offset; + int size_of_prefetch; + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START, + b_lblkno, e_lblkno, vp->v_lastr, 0, 0); + + if (b_lblkno == vp->v_lastr && b_lblkno == e_lblkno) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, + vp->v_ralen, vp->v_maxra, vp->v_lastr, 0, 0); + return; + } + + if (vp->v_lastr == -1 || (b_lblkno != vp->v_lastr && b_lblkno != (vp->v_lastr + 1) && b_lblkno != (vp->v_maxra + 1))) { + vp->v_ralen = 0; + vp->v_maxra = 0; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, + vp->v_ralen, vp->v_maxra, vp->v_lastr, 1, 0); + + return; + } + vp->v_ralen = vp->v_ralen ? min(MAXPHYSIO/PAGE_SIZE, vp->v_ralen << 1) : 1; + + if (((e_lblkno + 1) - b_lblkno) > vp->v_ralen) + vp->v_ralen = min(MAXPHYSIO/PAGE_SIZE, (e_lblkno + 1) - b_lblkno); + + if (e_lblkno < vp->v_maxra) { + if ((vp->v_maxra - e_lblkno) > ((MAXPHYSIO/PAGE_SIZE) / 4)) { + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, + vp->v_ralen, vp->v_maxra, vp->v_lastr, 2, 0); + return; + } + } + r_lblkno = max(e_lblkno, vp->v_maxra) + 1; + f_offset = (off_t)r_lblkno * PAGE_SIZE_64; + + size_of_prefetch = cluster_rd_prefetch(vp, object, f_offset, vp->v_ralen * PAGE_SIZE, filesize, devblocksize); + + if (size_of_prefetch) + vp->v_maxra = r_lblkno + (size_of_prefetch - 1); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, + vp->v_ralen, vp->v_maxra, vp->v_lastr, 3, 0); +} + + +cluster_pageout(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags) + struct vnode *vp; + upl_t upl; + vm_offset_t upl_offset; + off_t f_offset; + int size; + off_t filesize; + int devblocksize; + int flags; +{ + int io_size; + int pg_size; + off_t max_size; + int local_flags = CL_PAGEOUT; + + if ((flags & UPL_IOSYNC) == 0) + local_flags |= CL_ASYNC; + if ((flags & UPL_NOCOMMIT) == 0) + local_flags |= CL_COMMIT; + + if (upl == (upl_t) 0) + panic("cluster_pageout: can't handle NULL upl yet\n"); + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE, + (int)f_offset, size, (int)filesize, local_flags, 0); + + /* + * If they didn't specify any I/O, then we are done... + * we can't issue an abort because we don't know how + * big the upl really is + */ + if (size <= 0) + return (EINVAL); + + if (vp->v_mount->mnt_flag & MNT_RDONLY) { + if (local_flags & CL_COMMIT) + kernel_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); + return (EROFS); + } + /* + * can't page-in from a negative offset + * or if we're starting beyond the EOF + * or if the file offset isn't page aligned + * or the size requested isn't a multiple of PAGE_SIZE + */ + if (f_offset < 0 || f_offset >= filesize || + (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) { + if (local_flags & CL_COMMIT) + kernel_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); + return (EINVAL); + } + max_size = filesize - f_offset; + + if (size < max_size) + io_size = size; + else + io_size = (max_size + (devblocksize - 1)) & ~(devblocksize - 1); + + pg_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; + + if (size > pg_size) { + if (local_flags & CL_COMMIT) + kernel_upl_abort_range(upl, upl_offset + pg_size, size - pg_size, + UPL_ABORT_FREE_ON_EMPTY); + } + + return (cluster_io(vp, upl, upl_offset, f_offset, io_size, + local_flags, (struct buf *)0)); +} + + +cluster_pagein(vp, upl, upl_offset, f_offset, size, filesize, devblocksize, flags) + struct vnode *vp; + upl_t upl; + vm_offset_t upl_offset; + off_t f_offset; + int size; + off_t filesize; + int devblocksize; + int flags; +{ + u_int io_size; + int pg_size; + off_t max_size; + int retval; + int local_flags = 0; + void *object = 0; + + + /* + * If they didn't ask for any data, then we are done... + * we can't issue an abort because we don't know how + * big the upl really is + */ + if (size <= 0) + return (EINVAL); + + if ((flags & UPL_NOCOMMIT) == 0) + local_flags = CL_COMMIT; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE, + (int)f_offset, size, (int)filesize, local_flags, 0); + + /* + * can't page-in from a negative offset + * or if we're starting beyond the EOF + * or if the file offset isn't page aligned + * or the size requested isn't a multiple of PAGE_SIZE + */ + if (f_offset < 0 || f_offset >= filesize || + (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) { + if (local_flags & CL_COMMIT) + kernel_upl_abort_range(upl, upl_offset, size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + return (EINVAL); + } + max_size = filesize - f_offset; + + if (size < max_size) + io_size = size; + else + io_size = (max_size + (devblocksize - 1)) & ~(devblocksize - 1); + + pg_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; + + if (upl == (upl_t) 0) { + object = ubc_getobject(vp, UBC_PAGINGOP|UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("cluster_pagein: ubc_getobject failed"); + + vm_fault_list_request(object, (vm_offset_t)f_offset, pg_size, &upl, NULL, 0, + UPL_CLEAN_IN_PLACE | UPL_NO_SYNC | UPL_SET_INTERNAL); + if (upl == (upl_t) 0) + return (EINVAL); + + upl_offset = (vm_offset_t)0; + size = pg_size; + } + if (size > pg_size) { + if (local_flags & CL_COMMIT) + kernel_upl_abort_range(upl, upl_offset + pg_size, size - pg_size, + UPL_ABORT_FREE_ON_EMPTY); + } + + retval = cluster_io(vp, upl, upl_offset, f_offset, io_size, + local_flags | CL_READ | CL_PAGEIN, (struct buf *)0); + + if (retval == 0) { + int b_lblkno; + int e_lblkno; + + b_lblkno = (int)(f_offset / PAGE_SIZE_64); + e_lblkno = (int) + ((f_offset + ((off_t)io_size - 1)) / PAGE_SIZE_64); + + if (!(flags & UPL_NORDAHEAD) && !(vp->v_flag & VRAOFF)) { + if (object == (void *)0) { + object = ubc_getobject(vp, UBC_PAGINGOP|UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("cluster_pagein: ubc_getobject failed"); + } + /* + * we haven't read the last page in of the file yet + * so let's try to read ahead if we're in + * a sequential access pattern + */ + cluster_rd_ahead(vp, object, b_lblkno, e_lblkno, filesize, devblocksize); + } + vp->v_lastr = e_lblkno; + } + return (retval); +} + + +cluster_bp(bp) + struct buf *bp; +{ + off_t f_offset; + int flags; + + if (bp->b_pagelist == (upl_t) 0) + panic("cluster_bp: can't handle NULL upl yet\n"); + if (bp->b_flags & B_READ) + flags = CL_ASYNC | CL_NOMAP | CL_READ; + else + flags = CL_ASYNC | CL_NOMAP; + + f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno); + + return (cluster_io(bp->b_vp, bp->b_pagelist, 0, f_offset, bp->b_bcount, flags, bp)); +} + + +cluster_write(vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) + struct vnode *vp; + struct uio *uio; + off_t oldEOF; + off_t newEOF; + off_t headOff; + off_t tailOff; + int devblocksize; + int flags; +{ + void *object; + int prev_resid; + int clip_size; + off_t max_io_size; + struct iovec *iov; + int retval = 0; + + + object = ubc_getobject(vp, UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("cluster_write: ubc_getobject failed"); + + /* + * We set a threshhold of 4 pages to decide if the nocopy + * write loop is worth the trouble... + */ + + if ((!uio) || (uio->uio_resid < 4 * PAGE_SIZE) || + (flags & IO_TAILZEROFILL) || (flags & IO_HEADZEROFILL) || + (uio->uio_segflg != UIO_USERSPACE) || (!(vp->v_flag & VNOCACHE_DATA))) + { + retval = cluster_write_x(object, vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); + return(retval); + } + + while (uio->uio_resid && uio->uio_offset < newEOF && retval == 0) + { + /* we know we have a resid, so this is safe */ + iov = uio->uio_iov; + while (iov->iov_len == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + iov = uio->uio_iov; + } + + if (uio->uio_offset & PAGE_MASK_64) + { + /* Bring the file offset write up to a pagesize boundary */ + clip_size = (PAGE_SIZE - (uio->uio_offset & PAGE_MASK_64)); + if (uio->uio_resid < clip_size) + clip_size = uio->uio_resid; + /* + * Fake the resid going into the cluster_write_x call + * and restore it on the way out. + */ + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_write_x(object, vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else if ((int)iov->iov_base & PAGE_MASK_64) + { + clip_size = iov->iov_len; + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_write_x(object, vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else + { + /* + * If we come in here, we know the offset into + * the file is on a pagesize boundary + */ + + max_io_size = newEOF - uio->uio_offset; + clip_size = uio->uio_resid; + if (iov->iov_len < clip_size) + clip_size = iov->iov_len; + if (max_io_size < clip_size) + clip_size = max_io_size; + + if (clip_size < PAGE_SIZE) + { + /* + * Take care of tail end of write in this vector + */ + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_write_x(object, vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else + { + /* round clip_size down to a multiple of pagesize */ + clip_size = clip_size & ~(PAGE_MASK); + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_nocopy_write(object, vp, uio, newEOF, devblocksize, flags); + if ((retval == 0) && uio->uio_resid) + retval = cluster_write_x(object, vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + } /* end else */ + } /* end while */ + return(retval); +} + +static +cluster_nocopy_write(object, vp, uio, newEOF, devblocksize, flags) + void *object; + struct vnode *vp; + struct uio *uio; + off_t newEOF; + int devblocksize; + int flags; +{ + upl_t upl; + upl_page_info_t *pl; + off_t upl_f_offset; + vm_offset_t upl_offset; + off_t max_io_size; + int io_size; + int upl_size; + int upl_needed_size; + int pages_in_pl; + int upl_flags; + kern_return_t kret; + struct iovec *iov; + int i; + int force_data_sync; + int error = 0; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START, + (int)uio->uio_offset, (int)uio->uio_resid, + (int)newEOF, devblocksize, 0); + + /* + * When we enter this routine, we know + * -- the offset into the file is on a pagesize boundary + * -- the resid is a page multiple + * -- the resid will not exceed iov_len + */ + + iov = uio->uio_iov; + + while (uio->uio_resid && uio->uio_offset < newEOF && error == 0) { + + io_size = uio->uio_resid; + if (io_size > MAXPHYSIO) + io_size = MAXPHYSIO; + + upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64; + upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START, + (int)upl_offset, upl_needed_size, iov->iov_base, io_size, 0); + + for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) + { + pages_in_pl = 0; + upl_size = upl_needed_size; + upl_flags = UPL_COPYOUT_FROM | UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL; + + kret = vm_map_get_upl(current_map(), + (vm_offset_t)iov->iov_base & ~PAGE_MASK, + &upl_size, &upl, &pl, &pages_in_pl, &upl_flags, force_data_sync); + + pages_in_pl = upl_size / PAGE_SIZE; + + if (kret != KERN_SUCCESS) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, + 0, 0, 0, kret, 0); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END, + (int)uio->uio_offset, (int)uio->uio_resid, kret, 1, 0); + + /* cluster_nocopy_write: failed to get pagelist */ + /* do not return kret here */ + return(0); + } + + for(i=0; i < pages_in_pl; i++) + { + if (!upl_valid_page(pl, i)) + break; + } + + if (i == pages_in_pl) + break; + + kernel_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_ABORT_FREE_ON_EMPTY); + } + + if (force_data_sync >= 3) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, + 0, 0, 0, kret, 0); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END, + (int)uio->uio_offset, (int)uio->uio_resid, kret, 2, 0); + return(0); + } + + /* + * Consider the possibility that upl_size wasn't satisfied. + */ + if (upl_size != upl_needed_size) + io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, + (int)upl_offset, upl_size, iov->iov_base, io_size, 0); + + if (io_size == 0) + { + kernel_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_ABORT_FREE_ON_EMPTY); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, 0, 3, 0); + + return(0); + } + + /* + * Now look for pages already in the cache + * and throw them away. + */ + + upl_f_offset = uio->uio_offset; /* this is page aligned in the file */ + max_io_size = io_size; + + while (max_io_size) { + + /* + * Flag UPL_POP_DUMP says if the page is found + * in the page cache it must be thrown away. + */ + memory_object_page_op(object, (vm_offset_t)upl_f_offset, + UPL_POP_SET | UPL_POP_BUSY | UPL_POP_DUMP, + 0, 0); + max_io_size -= PAGE_SIZE; + upl_f_offset += PAGE_SIZE; + } + + /* + * issue a synchronous write to cluster_io + */ + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START, + (int)upl_offset, (int)uio->uio_offset, io_size, 0, 0); + + error = cluster_io(vp, upl, upl_offset, uio->uio_offset, + io_size, 0, (struct buf *)0); + + if (error == 0) { + /* + * The cluster_io write completed successfully, + * update the uio structure and commit. + */ + + kernel_upl_commit_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_FREE_ON_EMPTY, + pl, MAX_UPL_TRANSFER); + + iov->iov_base += io_size; + iov->iov_len -= io_size; + uio->uio_resid -= io_size; + uio->uio_offset += io_size; + } + else { + kernel_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_ABORT_FREE_ON_EMPTY); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END, + (int)upl_offset, (int)uio->uio_offset, (int)uio->uio_resid, error, 0); + + } /* end while */ + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END, + (int)uio->uio_offset, (int)uio->uio_resid, error, 4, 0); + + return (error); +} + +static +cluster_write_x(object, vp, uio, oldEOF, newEOF, headOff, tailOff, devblocksize, flags) + void *object; + struct vnode *vp; + struct uio *uio; + off_t oldEOF; + off_t newEOF; + off_t headOff; + off_t tailOff; + int devblocksize; + int flags; +{ + upl_page_info_t *pl; + upl_t upl; + vm_offset_t upl_offset; + int upl_size; + off_t upl_f_offset; + int pages_in_upl; + int start_offset; + int xfer_resid; + int io_size; + int io_size_before_rounding; + int io_flags; + vm_offset_t io_address; + int io_offset; + int bytes_to_zero; + int bytes_to_move; + kern_return_t kret; + int retval = 0; + int uio_resid; + long long total_size; + long long zero_cnt; + off_t zero_off; + long long zero_cnt1; + off_t zero_off1; + daddr_t start_blkno; + daddr_t last_blkno; + + if (uio) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, (int)oldEOF, (int)newEOF, 0); + + uio_resid = uio->uio_resid; + } else { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START, + 0, 0, (int)oldEOF, (int)newEOF, 0); + + uio_resid = 0; + } + zero_cnt = 0; + zero_cnt1 = 0; + + if (flags & IO_HEADZEROFILL) { + /* + * some filesystems (HFS is one) don't support unallocated holes within a file... + * so we zero fill the intervening space between the old EOF and the offset + * where the next chunk of real data begins.... ftruncate will also use this + * routine to zero fill to the new EOF when growing a file... in this case, the + * uio structure will not be provided + */ + if (uio) { + if (headOff < uio->uio_offset) { + zero_cnt = uio->uio_offset - headOff; + zero_off = headOff; + } + } else if (headOff < newEOF) { + zero_cnt = newEOF - headOff; + zero_off = headOff; + } + } + if (flags & IO_TAILZEROFILL) { + if (uio) { + zero_off1 = uio->uio_offset + uio->uio_resid; + + if (zero_off1 < tailOff) + zero_cnt1 = tailOff - zero_off1; + } + } + if (zero_cnt == 0 && uio == (struct uio *) 0) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); + return (0); + } + + while ((total_size = (uio_resid + zero_cnt + zero_cnt1)) && retval == 0) { + /* + * for this iteration of the loop, figure out where our starting point is + */ + if (zero_cnt) { + start_offset = (int)(zero_off & PAGE_MASK_64); + upl_f_offset = zero_off - start_offset; + } else if (uio_resid) { + start_offset = (int)(uio->uio_offset & PAGE_MASK_64); + upl_f_offset = uio->uio_offset - start_offset; + } else { + start_offset = (int)(zero_off1 & PAGE_MASK_64); + upl_f_offset = zero_off1 - start_offset; + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE, + (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0); + + if (total_size > (long long)MAXPHYSIO) + total_size = MAXPHYSIO; + + /* + * compute the size of the upl needed to encompass + * the requested write... limit each call to cluster_io + * to at most MAXPHYSIO, make sure to account for + * a starting offset that's not page aligned + */ + upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; + + if (upl_size > MAXPHYSIO) + upl_size = MAXPHYSIO; + + pages_in_upl = upl_size / PAGE_SIZE; + io_size = upl_size - start_offset; + + if ((long long)io_size > total_size) + io_size = total_size; + + start_blkno = (daddr_t)(upl_f_offset / PAGE_SIZE_64); + last_blkno = start_blkno + pages_in_upl; + + kret = vm_fault_list_request(object, + (vm_object_offset_t)upl_f_offset, upl_size, &upl, NULL, 0, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL)); + + if (kret != KERN_SUCCESS) + panic("cluster_write: failed to get pagelist"); + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_NONE, + upl, (int)upl_f_offset, upl_size, start_offset, 0); + + + if (start_offset && !upl_valid_page(pl, 0)) { + int read_size; + + /* + * we're starting in the middle of the first page of the upl + * and the page isn't currently valid, so we're going to have + * to read it in first... this is a synchronous operation + */ + read_size = PAGE_SIZE; + + if ((upl_f_offset + read_size) > newEOF) { + read_size = newEOF - upl_f_offset; + read_size = (read_size + (devblocksize - 1)) & ~(devblocksize - 1); + } + retval = cluster_io(vp, upl, 0, upl_f_offset, read_size, + CL_READ, (struct buf *)0); + if (retval) { + /* + * we had an error during the read which causes us to abort + * the current cluster_write request... before we do, we need + * to release the rest of the pages in the upl without modifying + * there state and mark the failed page in error + */ + kernel_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES); + kernel_upl_abort(upl, 0); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, + upl, 0, 0, retval, 0); + break; + } + } + if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) { + /* + * the last offset we're writing to in this upl does not end on a page + * boundary... if it's not beyond the old EOF, then we'll also need to + * pre-read this page in if it isn't already valid + */ + upl_offset = upl_size - PAGE_SIZE; + + if ((upl_f_offset + start_offset + io_size) < oldEOF && + !upl_valid_page(pl, upl_offset / PAGE_SIZE)) { + int read_size; + + read_size = PAGE_SIZE; + + if ((upl_f_offset + upl_offset + read_size) > newEOF) { + read_size = newEOF - (upl_f_offset + upl_offset); + read_size = (read_size + (devblocksize - 1)) & ~(devblocksize - 1); + } + retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size, + CL_READ, (struct buf *)0); + if (retval) { + /* + * we had an error during the read which causes us to abort + * the current cluster_write request... before we do, we need + * to release the rest of the pages in the upl without modifying + * there state and mark the failed page in error + */ + kernel_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES); + kernel_upl_abort(upl, 0); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, + upl, 0, 0, retval, 0); + break; + } + } + } + if ((kret = kernel_upl_map(kernel_map, upl, &io_address)) != KERN_SUCCESS) + panic("cluster_write: kernel_upl_map failed\n"); + xfer_resid = io_size; + io_offset = start_offset; + + while (zero_cnt && xfer_resid) { + + if (zero_cnt < (long long)xfer_resid) + bytes_to_zero = zero_cnt; + else + bytes_to_zero = xfer_resid; + + if ( !(flags & IO_NOZEROVALID)) { + bzero((caddr_t)(io_address + io_offset), bytes_to_zero); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, + (int)upl_f_offset + io_offset, bytes_to_zero, + (int)zero_cnt, xfer_resid, 0); + } else { + bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64)); + + if ( !upl_valid_page(pl, (int)(zero_off / PAGE_SIZE_64))) { + bzero((caddr_t)(io_address + io_offset), bytes_to_zero); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, + (int)upl_f_offset + io_offset, bytes_to_zero, + (int)zero_cnt, xfer_resid, 0); + } + } + xfer_resid -= bytes_to_zero; + zero_cnt -= bytes_to_zero; + zero_off += bytes_to_zero; + io_offset += bytes_to_zero; + } + if (xfer_resid && uio_resid) { + bytes_to_move = min(uio_resid, xfer_resid); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 42)) | DBG_FUNC_NONE, + (int)uio->uio_offset, bytes_to_move, uio_resid, xfer_resid, 0); + + retval = uiomove((caddr_t)(io_address + io_offset), bytes_to_move, uio); + + if (retval) { + if ((kret = kernel_upl_unmap(kernel_map, upl)) != KERN_SUCCESS) + panic("cluster_write: kernel_upl_unmap failed\n"); + kernel_upl_abort(upl, UPL_ABORT_DUMP_PAGES); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, + upl, 0, 0, retval, 0); + } else { + uio_resid -= bytes_to_move; + xfer_resid -= bytes_to_move; + io_offset += bytes_to_move; + } + } + while (xfer_resid && zero_cnt1 && retval == 0) { + + if (zero_cnt1 < (long long)xfer_resid) + bytes_to_zero = zero_cnt1; + else + bytes_to_zero = xfer_resid; + + if ( !(flags & IO_NOZEROVALID)) { + bzero((caddr_t)(io_address + io_offset), bytes_to_zero); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, + (int)upl_f_offset + io_offset, + bytes_to_zero, (int)zero_cnt1, xfer_resid, 0); + } else { + bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off1 & PAGE_MASK_64)); + if ( !upl_valid_page(pl, (int)(zero_off1 / PAGE_SIZE_64))) { + bzero((caddr_t)(io_address + io_offset), bytes_to_zero); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, + (int)upl_f_offset + io_offset, + bytes_to_zero, (int)zero_cnt1, xfer_resid, 0); + } + } + xfer_resid -= bytes_to_zero; + zero_cnt1 -= bytes_to_zero; + zero_off1 += bytes_to_zero; + io_offset += bytes_to_zero; + } + + if (retval == 0) { + int must_push; + int can_delay; + + io_size += start_offset; + + if ((upl_f_offset + io_size) == newEOF && io_size < upl_size) { + /* + * if we're extending the file with this write + * we'll zero fill the rest of the page so that + * if the file gets extended again in such a way as to leave a + * hole starting at this EOF, we'll have zero's in the correct spot + */ + bzero((caddr_t)(io_address + io_size), upl_size - io_size); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 43)) | DBG_FUNC_NONE, + (int)upl_f_offset + io_size, + upl_size - io_size, 0, 0, 0); + } + if ((kret = kernel_upl_unmap(kernel_map, upl)) != KERN_SUCCESS) + panic("cluster_write: kernel_upl_unmap failed\n"); + + io_size_before_rounding = io_size; + + if (io_size & (devblocksize - 1)) + io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1); + + must_push = 0; + can_delay = 0; + + if (vp->v_clen) { + int newsize; + + /* + * we have an existing cluster... see if this write will extend it nicely + */ + if (start_blkno >= vp->v_cstart) { + if (last_blkno <= (vp->v_cstart + vp->v_clen)) { + /* + * we have a write that fits entirely + * within the existing cluster limits + */ + if (last_blkno >= vp->v_lastw) { + /* + * if we're extending the dirty region within the cluster + * we need to update the cluster info... we check for blkno + * equality because we may be extending the file with a + * partial write.... this in turn changes our idea of how + * much data to write out (v_ciosiz) for the last page + */ + vp->v_lastw = last_blkno; + newsize = io_size + ((start_blkno - vp->v_cstart) * PAGE_SIZE); + + if (newsize > vp->v_ciosiz) + vp->v_ciosiz = newsize; + } + can_delay = 1; + goto finish_io; + } + if (start_blkno < (vp->v_cstart + vp->v_clen)) { + /* + * we have a write that starts in the middle of the current cluster + * but extends beyond the cluster's limit + * we'll clip the current cluster if we actually + * overlap with the new write and then push it out + * and start a new cluster with the current write + */ + if (vp->v_lastw > start_blkno) { + vp->v_lastw = start_blkno; + vp->v_ciosiz = (vp->v_lastw - vp->v_cstart) * PAGE_SIZE; + } + } + /* + * we also get here for the case where the current write starts + * beyond the limit of the existing cluster + */ + must_push = 1; + goto check_delay; + } + /* + * the current write starts in front of the current cluster + */ + if (last_blkno > vp->v_cstart) { + /* + * the current write extends into the existing cluster + */ + if ((vp->v_lastw - start_blkno) > vp->v_clen) { + /* + * if we were to combine this write with the current cluster + * we would exceed the cluster size limit.... + * clip the current cluster by moving the start position + * to where the current write ends, and then push it + */ + vp->v_ciosiz -= (last_blkno - vp->v_cstart) * PAGE_SIZE; + vp->v_cstart = last_blkno; + + /* + * round up the io_size to the nearest page size + * since we've coalesced with at least 1 pre-existing + * page in the current cluster... this write may have ended in the + * middle of the page which would cause io_size to give us an + * inaccurate view of how much I/O we actually need to do + */ + io_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; + + must_push = 1; + goto check_delay; + } + /* + * we can coalesce the current write with the existing cluster + * adjust the cluster info to reflect this + */ + if (last_blkno > vp->v_lastw) { + /* + * the current write completey overlaps + * the existing cluster + */ + vp->v_lastw = last_blkno; + vp->v_ciosiz = io_size; + } else { + vp->v_ciosiz += (vp->v_cstart - start_blkno) * PAGE_SIZE; + + if (io_size > vp->v_ciosiz) + vp->v_ciosiz = io_size; + } + vp->v_cstart = start_blkno; + can_delay = 1; + goto finish_io; + } + /* + * this I/O range is entirely in front of the current cluster + * so we need to push the current cluster out before beginning + * a new one + */ + must_push = 1; + } +check_delay: + if (must_push) + cluster_push(vp); + + if (io_size_before_rounding < MAXPHYSIO && !(flags & IO_SYNC)) { + vp->v_clen = MAXPHYSIO / PAGE_SIZE; + vp->v_cstart = start_blkno; + vp->v_lastw = last_blkno; + vp->v_ciosiz = io_size; + + can_delay = 1; + } +finish_io: + if (can_delay) { + kernel_upl_commit_range(upl, 0, upl_size, + UPL_COMMIT_SET_DIRTY + | UPL_COMMIT_FREE_ON_EMPTY, + pl, MAX_UPL_TRANSFER); + continue; + } + + if ((flags & IO_SYNC) || (vp->v_numoutput > ASYNC_THROTTLE)) + io_flags = CL_COMMIT | CL_AGE; + else + io_flags = CL_COMMIT | CL_AGE | CL_ASYNC; + + if (vp->v_flag & VNOCACHE_DATA) + io_flags |= CL_DUMP; + + retval = cluster_io(vp, upl, 0, upl_f_offset, io_size, + io_flags, (struct buf *)0); + } + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); + + return (retval); +} + +cluster_read(vp, uio, filesize, devblocksize, flags) + struct vnode *vp; + struct uio *uio; + off_t filesize; + int devblocksize; + int flags; +{ + void *object; + int prev_resid; + int clip_size; + off_t max_io_size; + struct iovec *iov; + int retval = 0; + + object = ubc_getobject(vp, UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("cluster_read: ubc_getobject failed"); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0); + + /* + * We set a threshhold of 4 pages to decide if the nocopy + * read loop is worth the trouble... + */ + + if ((!((vp->v_flag & VNOCACHE_DATA) && (uio->uio_segflg == UIO_USERSPACE))) + || (uio->uio_resid < 4 * PAGE_SIZE)) + { + retval = cluster_read_x(object, vp, uio, filesize, devblocksize, flags); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0); + return(retval); + + } + + while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) + { + /* we know we have a resid, so this is safe */ + iov = uio->uio_iov; + while (iov->iov_len == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + iov = uio->uio_iov; + } + + if (uio->uio_offset & PAGE_MASK_64) + { + /* Bring the file offset read up to a pagesize boundary */ + clip_size = (PAGE_SIZE - (int)(uio->uio_offset & PAGE_MASK_64)); + if (uio->uio_resid < clip_size) + clip_size = uio->uio_resid; + /* + * Fake the resid going into the cluster_read_x call + * and restore it on the way out. + */ + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_read_x(object, vp, uio, filesize, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else if ((int)iov->iov_base & PAGE_MASK_64) + { + clip_size = iov->iov_len; + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_read_x(object, vp, uio, filesize, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else + { + /* + * If we come in here, we know the offset into + * the file is on a pagesize boundary + */ + + max_io_size = filesize - uio->uio_offset; + clip_size = uio->uio_resid; + if (iov->iov_len < clip_size) + clip_size = iov->iov_len; + if (max_io_size < clip_size) + clip_size = (int)max_io_size; + + if (clip_size < PAGE_SIZE) + { + /* + * Take care of the tail end of the read in this vector. + */ + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_read_x(object,vp, uio, filesize, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + else + { + /* round clip_size down to a multiple of pagesize */ + clip_size = clip_size & ~(PAGE_MASK); + prev_resid = uio->uio_resid; + uio->uio_resid = clip_size; + retval = cluster_nocopy_read(object, vp, uio, filesize, devblocksize, flags); + if ((retval==0) && uio->uio_resid) + retval = cluster_read_x(object,vp, uio, filesize, devblocksize, flags); + uio->uio_resid = prev_resid - (clip_size - uio->uio_resid); + } + } /* end else */ + } /* end while */ + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, vp->v_lastr, retval, 0); + + return(retval); +} + +static +cluster_read_x(object, vp, uio, filesize, devblocksize, flags) + void *object; + struct vnode *vp; + struct uio *uio; + off_t filesize; + int devblocksize; + int flags; +{ + upl_page_info_t *pl; + upl_t upl; + vm_offset_t upl_offset; + int upl_size; + off_t upl_f_offset; + int start_offset; + int start_pg; + int last_pg; + int uio_last; + int pages_in_upl; + off_t max_size; + int io_size; + vm_offset_t io_address; + kern_return_t kret; + int segflg; + int error = 0; + int retval = 0; + int b_lblkno; + int e_lblkno; + + b_lblkno = (int)(uio->uio_offset / PAGE_SIZE_64); + + while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) { + /* + * compute the size of the upl needed to encompass + * the requested read... limit each call to cluster_io + * to at most MAXPHYSIO, make sure to account for + * a starting offset that's not page aligned + */ + start_offset = (int)(uio->uio_offset & PAGE_MASK_64); + upl_f_offset = uio->uio_offset - (off_t)start_offset; + max_size = filesize - uio->uio_offset; + + if (uio->uio_resid < max_size) + io_size = uio->uio_resid; + else + io_size = max_size; +#ifdef ppc + if (uio->uio_segflg == UIO_USERSPACE && !(vp->v_flag & VNOCACHE_DATA)) { + segflg = uio->uio_segflg; + + uio->uio_segflg = UIO_PHYS_USERSPACE; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, + (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0); + + while (io_size && retval == 0) { + int xsize; + vm_offset_t paddr; + + if (memory_object_page_op(object, (vm_offset_t)upl_f_offset, UPL_POP_SET | UPL_POP_BUSY, + &paddr, 0) != KERN_SUCCESS) + break; + + xsize = PAGE_SIZE - start_offset; + + if (xsize > io_size) + xsize = io_size; + + retval = uiomove((caddr_t)(paddr + start_offset), xsize, uio); + + memory_object_page_op(object, (vm_offset_t)upl_f_offset, UPL_POP_CLR | UPL_POP_BUSY, 0, 0); + + io_size -= xsize; + start_offset = (int) + (uio->uio_offset & PAGE_MASK_64); + upl_f_offset = uio->uio_offset - start_offset; + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, + (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0); + + uio->uio_segflg = segflg; + + if (retval) + break; + + if (io_size == 0) { + /* + * we're already finished with this read request + * let's see if we should do a read-ahead + */ + e_lblkno = (int) + ((uio->uio_offset - 1) / PAGE_SIZE_64); + + if (!(vp->v_flag & VRAOFF)) + /* + * let's try to read ahead if we're in + * a sequential access pattern + */ + cluster_rd_ahead(vp, object, b_lblkno, e_lblkno, filesize, devblocksize); + vp->v_lastr = e_lblkno; + + break; + } + max_size = filesize - uio->uio_offset; + } +#endif + upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; + if (upl_size > MAXPHYSIO) + upl_size = MAXPHYSIO; + pages_in_upl = upl_size / PAGE_SIZE; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START, + upl, (int)upl_f_offset, upl_size, start_offset, 0); + + kret = vm_fault_list_request(object, + (vm_object_offset_t)upl_f_offset, upl_size, &upl, NULL, 0, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL)); + + if (kret != KERN_SUCCESS) + panic("cluster_read: failed to get pagelist"); + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END, + upl, (int)upl_f_offset, upl_size, start_offset, 0); + + /* + * scan from the beginning of the upl looking for the first + * non-valid page.... this will become the first page in + * the request we're going to make to 'cluster_io'... if all + * of the pages are valid, we won't call through to 'cluster_io' + */ + for (start_pg = 0; start_pg < pages_in_upl; start_pg++) { + if (!upl_valid_page(pl, start_pg)) + break; + } + + /* + * scan from the starting invalid page looking for a valid + * page before the end of the upl is reached, if we + * find one, then it will be the last page of the request to + * 'cluster_io' + */ + for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { + if (upl_valid_page(pl, last_pg)) + break; + } + + if (start_pg < last_pg) { + /* + * we found a range of 'invalid' pages that must be filled + * if the last page in this range is the last page of the file + * we may have to clip the size of it to keep from reading past + * the end of the last physical block associated with the file + */ + upl_offset = start_pg * PAGE_SIZE; + io_size = (last_pg - start_pg) * PAGE_SIZE; + + if ((upl_f_offset + upl_offset + io_size) > filesize) { + io_size = filesize - (upl_f_offset + upl_offset); + io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1); + } + /* + * issue a synchronous read to cluster_io + */ + + error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, + io_size, CL_READ, (struct buf *)0); + } + if (error == 0) { + /* + * if the read completed successfully, or there was no I/O request + * issued, than map the upl into kernel address space and + * move the data into user land.... we'll first add on any 'valid' + * pages that were present in the upl when we acquired it. + */ + u_int val_size; + u_int size_of_prefetch; + + for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) { + if (!upl_valid_page(pl, uio_last)) + break; + } + /* + * compute size to transfer this round, if uio->uio_resid is + * still non-zero after this uiomove, we'll loop around and + * set up for another I/O. + */ + val_size = (uio_last * PAGE_SIZE) - start_offset; + + if (max_size < val_size) + val_size = max_size; + + if (uio->uio_resid < val_size) + val_size = uio->uio_resid; + + e_lblkno = (int)((uio->uio_offset + ((off_t)val_size - 1)) / PAGE_SIZE_64); + + if (size_of_prefetch = (uio->uio_resid - val_size)) { + /* + * if there's still I/O left to do for this request, then issue a + * pre-fetch I/O... the I/O wait time will overlap + * with the copying of the data + */ + cluster_rd_prefetch(vp, object, uio->uio_offset + val_size, size_of_prefetch, filesize, devblocksize); + } else { + if (!(vp->v_flag & VRAOFF) && !(vp->v_flag & VNOCACHE_DATA)) + /* + * let's try to read ahead if we're in + * a sequential access pattern + */ + cluster_rd_ahead(vp, object, b_lblkno, e_lblkno, filesize, devblocksize); + vp->v_lastr = e_lblkno; + } +#ifdef ppc + if (uio->uio_segflg == UIO_USERSPACE) { + int offset; + + segflg = uio->uio_segflg; + + uio->uio_segflg = UIO_PHYS_USERSPACE; + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, + (int)uio->uio_offset, val_size, uio->uio_resid, 0, 0); + + offset = start_offset; + + while (val_size && retval == 0) { + int csize; + int i; + caddr_t paddr; + + i = offset / PAGE_SIZE; + csize = min(PAGE_SIZE - start_offset, val_size); + + paddr = (caddr_t)upl_phys_page(pl, i) + start_offset; + + retval = uiomove(paddr, csize, uio); + + val_size -= csize; + offset += csize; + start_offset = offset & PAGE_MASK; + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, + (int)uio->uio_offset, val_size, uio->uio_resid, 0, 0); + + uio->uio_segflg = segflg; + } else +#endif + { + if ((kret = kernel_upl_map(kernel_map, upl, &io_address)) != KERN_SUCCESS) + panic("cluster_read: kernel_upl_map failed\n"); + + retval = uiomove((caddr_t)(io_address + start_offset), val_size, uio); + + if ((kret = kernel_upl_unmap(kernel_map, upl)) != KERN_SUCCESS) + panic("cluster_read: kernel_upl_unmap failed\n"); + } + } + if (start_pg < last_pg) { + /* + * compute the range of pages that we actually issued an I/O for + * and either commit them as valid if the I/O succeeded + * or abort them if the I/O failed + */ + io_size = (last_pg - start_pg) * PAGE_SIZE; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, + upl, start_pg * PAGE_SIZE, io_size, error, 0); + + if (error || (vp->v_flag & VNOCACHE_DATA)) + kernel_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size, + UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + else + kernel_upl_commit_range(upl, + start_pg * PAGE_SIZE, io_size, + UPL_COMMIT_CLEAR_DIRTY + | UPL_COMMIT_FREE_ON_EMPTY + | UPL_COMMIT_INACTIVATE, + pl, MAX_UPL_TRANSFER); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, + upl, start_pg * PAGE_SIZE, io_size, error, 0); + } + if ((last_pg - start_pg) < pages_in_upl) { + int cur_pg; + int commit_flags; + + /* + * the set of pages that we issued an I/O for did not encompass + * the entire upl... so just release these without modifying + * there state + */ + if (error) + kernel_upl_abort(upl, 0); + else { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, + upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0); + + if (start_pg) { + /* + * we found some already valid pages at the beginning of the upl + * commit these back to the inactive list with reference cleared + */ + for (cur_pg = 0; cur_pg < start_pg; cur_pg++) { + commit_flags = UPL_COMMIT_FREE_ON_EMPTY | UPL_COMMIT_INACTIVATE; + + if (upl_dirty_page(pl, cur_pg)) + commit_flags |= UPL_COMMIT_SET_DIRTY; + + if ( !(commit_flags & UPL_COMMIT_SET_DIRTY) && (vp->v_flag & VNOCACHE_DATA)) + kernel_upl_abort_range(upl, cur_pg * PAGE_SIZE, PAGE_SIZE, + UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + else + kernel_upl_commit_range(upl, cur_pg * PAGE_SIZE, + PAGE_SIZE, commit_flags, pl, MAX_UPL_TRANSFER); + } + } + if (last_pg < uio_last) { + /* + * we found some already valid pages immediately after the pages we issued + * I/O for, commit these back to the inactive list with reference cleared + */ + for (cur_pg = last_pg; cur_pg < uio_last; cur_pg++) { + commit_flags = UPL_COMMIT_FREE_ON_EMPTY | UPL_COMMIT_INACTIVATE; + + if (upl_dirty_page(pl, cur_pg)) + commit_flags |= UPL_COMMIT_SET_DIRTY; + + if ( !(commit_flags & UPL_COMMIT_SET_DIRTY) && (vp->v_flag & VNOCACHE_DATA)) + kernel_upl_abort_range(upl, cur_pg * PAGE_SIZE, PAGE_SIZE, + UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + else + kernel_upl_commit_range(upl, cur_pg * PAGE_SIZE, + PAGE_SIZE, commit_flags, pl, MAX_UPL_TRANSFER); + } + } + if (uio_last < pages_in_upl) { + /* + * there were some invalid pages beyond the valid pages that we didn't + * issue an I/O for, just release them unchanged + */ + kernel_upl_abort(upl, 0); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, + upl, -1, -1, 0, 0); + } + } + if (retval == 0) + retval = error; + } + + return (retval); +} + +static +cluster_nocopy_read(object, vp, uio, filesize, devblocksize, flags) + void *object; + struct vnode *vp; + struct uio *uio; + off_t filesize; + int devblocksize; + int flags; +{ + upl_t upl; + upl_page_info_t *pl; + off_t upl_f_offset; + vm_offset_t upl_offset; + off_t start_upl_f_offset; + off_t max_io_size; + int io_size; + int upl_size; + int upl_needed_size; + int pages_in_pl; + vm_offset_t paddr; + int upl_flags; + kern_return_t kret; + int segflg; + struct iovec *iov; + int i; + int force_data_sync; + int error = 0; + int retval = 0; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START, + (int)uio->uio_offset, uio->uio_resid, (int)filesize, devblocksize, 0); + + /* + * When we enter this routine, we know + * -- the offset into the file is on a pagesize boundary + * -- the resid is a page multiple + * -- the resid will not exceed iov_len + */ + + iov = uio->uio_iov; + while (uio->uio_resid && uio->uio_offset < filesize && retval == 0) { + + io_size = uio->uio_resid; + + /* + * We don't come into this routine unless + * UIO_USERSPACE is set. + */ + segflg = uio->uio_segflg; + + uio->uio_segflg = UIO_PHYS_USERSPACE; + + /* + * First look for pages already in the cache + * and move them to user space. + */ + while (io_size && retval == 0) { + + upl_f_offset = uio->uio_offset; + + /* + * If this call fails, it means the page is not + * in the page cache. + */ + if (memory_object_page_op(object, (vm_offset_t)upl_f_offset, + UPL_POP_SET | UPL_POP_BUSY, + &paddr, 0) != KERN_SUCCESS) + break; + + retval = uiomove((caddr_t)(paddr), PAGE_SIZE, uio); + + memory_object_page_op(object, (vm_offset_t)upl_f_offset, + UPL_POP_CLR | UPL_POP_BUSY, 0, 0); + + io_size -= PAGE_SIZE; + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 71)) | DBG_FUNC_NONE, + (int)uio->uio_offset, io_size, uio->uio_resid, 0, 0); + } + + uio->uio_segflg = segflg; + + if (retval) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, 2, retval, 0); + return(retval); + } + + /* If we are already finished with this read, then return */ + if (io_size == 0) + { + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, 3, io_size, 0); + return(0); + } + + max_io_size = io_size; + if (max_io_size > MAXPHYSIO) + max_io_size = MAXPHYSIO; + + start_upl_f_offset = uio->uio_offset; /* this is page aligned in the file */ + upl_f_offset = start_upl_f_offset; + io_size = 0; + + while(io_size < max_io_size) + { + + if(memory_object_page_op(object, (vm_offset_t)upl_f_offset, + UPL_POP_SET | UPL_POP_BUSY, &paddr, 0) == KERN_SUCCESS) + { + memory_object_page_op(object, (vm_offset_t)upl_f_offset, + UPL_POP_CLR | UPL_POP_BUSY, 0, 0); + break; + } + + /* + * Build up the io request parameters. + */ + + io_size += PAGE_SIZE; + upl_f_offset += PAGE_SIZE; + } + + if (io_size == 0) + return(retval); + + upl_offset = (vm_offset_t)iov->iov_base & PAGE_MASK_64; + upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK; + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START, + (int)upl_offset, upl_needed_size, iov->iov_base, io_size, 0); + + for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) + { + pages_in_pl = 0; + upl_size = upl_needed_size; + upl_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL; + + kret = vm_map_get_upl(current_map(), + (vm_offset_t)iov->iov_base & ~PAGE_MASK, + &upl_size, &upl, &pl, &pages_in_pl, &upl_flags, force_data_sync); + + pages_in_pl = upl_size / PAGE_SIZE; + + if (kret != KERN_SUCCESS) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, + (int)upl_offset, upl_size, io_size, kret, 0); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, 4, retval, 0); + + /* cluster_nocopy_read: failed to get pagelist */ + /* do not return kret here */ + return(retval); + } + + for(i=0; i < pages_in_pl; i++) + { + if (!upl_valid_page(pl, i)) + break; + } + if (i == pages_in_pl) + break; + + kernel_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_ABORT_FREE_ON_EMPTY); + } + + if (force_data_sync >= 3) + { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, + (int)upl_offset, upl_size, io_size, kret, 0); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END, + (int)uio->uio_offset, uio->uio_resid, 5, retval, 0); + return(retval); + } + /* + * Consider the possibility that upl_size wasn't satisfied. + */ + if (upl_size != upl_needed_size) + io_size = (upl_size - (int)upl_offset) & ~PAGE_MASK; + + if (io_size == 0) + { + kernel_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_ABORT_FREE_ON_EMPTY); + return(retval); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, + (int)upl_offset, upl_size, io_size, kret, 0); + + /* + * issue a synchronous read to cluster_io + */ + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START, + upl, (int)upl_offset, (int)start_upl_f_offset, io_size, 0); + + error = cluster_io(vp, upl, upl_offset, start_upl_f_offset, + io_size, CL_READ| CL_NOZERO, (struct buf *)0); + + if (error == 0) { + /* + * The cluster_io read completed successfully, + * update the uio structure and commit. + */ + + kernel_upl_commit_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_COMMIT_SET_DIRTY + | UPL_COMMIT_FREE_ON_EMPTY, + pl, MAX_UPL_TRANSFER); + + iov->iov_base += io_size; + iov->iov_len -= io_size; + uio->uio_resid -= io_size; + uio->uio_offset += io_size; + } + else { + kernel_upl_abort_range(upl, (upl_offset & ~PAGE_MASK), upl_size, + UPL_ABORT_FREE_ON_EMPTY); + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END, + upl, (int)uio->uio_offset, (int)uio->uio_resid, error, 0); + + if (retval == 0) + retval = error; + + } /* end while */ + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END, + (int)uio->uio_offset, (int)uio->uio_resid, 6, retval, 0); + + return (retval); +} + + + +/* + * generate advisory I/O's in the largest chunks possible + * the completed pages will be released into the VM cache + */ +advisory_read(vp, filesize, f_offset, resid, devblocksize) + struct vnode *vp; + off_t filesize; + off_t f_offset; + int resid; + int devblocksize; +{ + void *object; + upl_page_info_t *pl; + upl_t upl; + vm_offset_t upl_offset; + int upl_size; + off_t upl_f_offset; + int start_offset; + int start_pg; + int last_pg; + int pages_in_upl; + off_t max_size; + int io_size; + kern_return_t kret; + int retval = 0; + + + if (!UBCINFOEXISTS(vp)) + return(EINVAL); + + object = ubc_getobject(vp, UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("advisory_read: ubc_getobject failed"); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START, + (int)f_offset, resid, (int)filesize, devblocksize, 0); + + while (resid && f_offset < filesize && retval == 0) { + /* + * compute the size of the upl needed to encompass + * the requested read... limit each call to cluster_io + * to at most MAXPHYSIO, make sure to account for + * a starting offset that's not page aligned + */ + start_offset = (int)(f_offset & PAGE_MASK_64); + upl_f_offset = f_offset - (off_t)start_offset; + max_size = filesize - f_offset; + + if (resid < max_size) + io_size = resid; + else + io_size = max_size; + + upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; + if (upl_size > MAXPHYSIO) + upl_size = MAXPHYSIO; + pages_in_upl = upl_size / PAGE_SIZE; + + kret = vm_fault_list_request(object, + (vm_object_offset_t)upl_f_offset, upl_size, &upl, NULL, 0, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL)); + + if (kret != KERN_SUCCESS) + panic("advisory_read: failed to get pagelist"); + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_NONE, + upl, (int)upl_f_offset, upl_size, start_offset, 0); + + /* + * scan from the beginning of the upl looking for the first + * non-valid page.... this will become the first page in + * the request we're going to make to 'cluster_io'... if all + * of the pages are valid, we won't call through to 'cluster_io' + */ + for (start_pg = 0; start_pg < pages_in_upl; start_pg++) { + if (!upl_valid_page(pl, start_pg)) + break; + } + + /* + * scan from the starting invalid page looking for a valid + * page before the end of the upl is reached, if we + * find one, then it will be the last page of the request to + * 'cluster_io' + */ + for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { + if (upl_valid_page(pl, last_pg)) + break; + } + + if (start_pg < last_pg) { + /* + * we found a range of 'invalid' pages that must be filled + * if the last page in this range is the last page of the file + * we may have to clip the size of it to keep from reading past + * the end of the last physical block associated with the file + */ + upl_offset = start_pg * PAGE_SIZE; + io_size = (last_pg - start_pg) * PAGE_SIZE; + + if ((upl_f_offset + upl_offset + io_size) > filesize) { + io_size = filesize - (upl_f_offset + upl_offset); + io_size = (io_size + (devblocksize - 1)) & ~(devblocksize - 1); + } + /* + * issue an asynchronous read to cluster_io + */ + retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, + CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE, (struct buf *)0); + } + if (start_pg) { + /* + * start_pg of non-zero indicates we found some already valid pages + * at the beginning of the upl.... we need to release these without + * modifying there state + */ + kernel_upl_abort_range(upl, 0, start_pg * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 62)) | DBG_FUNC_NONE, + upl, 0, start_pg * PAGE_SIZE, 0, 0); + } + if (last_pg < pages_in_upl) { + /* + * the set of pages that we issued an I/O for did not extend all the + * way to the end of the upl... so just release them without modifying + * there state + */ + kernel_upl_abort_range(upl, last_pg * PAGE_SIZE, (pages_in_upl - last_pg) * PAGE_SIZE, + UPL_ABORT_FREE_ON_EMPTY); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 63)) | DBG_FUNC_NONE, + upl, last_pg * PAGE_SIZE, + (pages_in_upl - last_pg) * PAGE_SIZE, 0, 0); + } + io_size = (last_pg * PAGE_SIZE) - start_offset; + + if (io_size > resid) + io_size = resid; + f_offset += io_size; + resid -= io_size; + } + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END, + (int)f_offset, resid, retval, 0, 0); + + return(retval); +} + + +cluster_push(vp) + struct vnode *vp; +{ + void *object; + upl_page_info_t *pl; + upl_t upl; + vm_offset_t upl_offset; + int upl_size; + off_t upl_f_offset; + int pages_in_upl; + int start_pg; + int last_pg; + int io_size; + int io_flags; + int size; + kern_return_t kret; + + + if (!UBCINFOEXISTS(vp)) + return(0); + + if (vp->v_clen == 0 || (pages_in_upl = vp->v_lastw - vp->v_cstart) == 0) + return (0); + upl_size = pages_in_upl * PAGE_SIZE; + upl_f_offset = ((off_t)vp->v_cstart) * PAGE_SIZE_64; + size = vp->v_ciosiz; + vp->v_clen = 0; + + if (size > upl_size || (upl_size - size) > PAGE_SIZE) + panic("cluster_push: v_ciosiz doesn't match size of cluster\n"); + + object = ubc_getobject(vp, UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("cluster_push: ubc_getobject failed"); + + kret = vm_fault_list_request(object, + (vm_object_offset_t)upl_f_offset, upl_size, &upl, NULL, 0, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL)); + if (kret != KERN_SUCCESS) + panic("cluster_push: failed to get pagelist"); + + pl = UPL_GET_INTERNAL_PAGE_LIST(upl); + + last_pg = 0; + + while (size) { + + for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) { + if (upl_valid_page(pl, start_pg) && upl_dirty_page(pl, start_pg)) + break; + } + if (start_pg > last_pg) { + io_size = (start_pg - last_pg) * PAGE_SIZE; + + kernel_upl_abort_range(upl, last_pg * PAGE_SIZE, io_size, UPL_ABORT_FREE_ON_EMPTY); + + if (io_size < size) + size -= io_size; + else + break; + } + for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { + if (!upl_valid_page(pl, last_pg) || !upl_dirty_page(pl, last_pg)) + break; + } + upl_offset = start_pg * PAGE_SIZE; + + io_size = min(size, (last_pg - start_pg) * PAGE_SIZE); + + if (vp->v_numoutput > ASYNC_THROTTLE) + io_flags = CL_COMMIT | CL_AGE; + else + io_flags = CL_COMMIT | CL_AGE | CL_ASYNC; + + if (vp->v_flag & VNOCACHE_DATA) + io_flags |= CL_DUMP; + + cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, io_flags, (struct buf *)0); + + size -= io_size; + } + return(1); +} diff --git a/bsd/vfs/vfs_conf.c b/bsd/vfs/vfs_conf.c new file mode 100644 index 000000000..0442e9b64 --- /dev/null +++ b/bsd/vfs/vfs_conf.c @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vfs_conf.c 8.11 (Berkeley) 5/10/95 + */ + +#include +#include +#include +#include + +/* + * These define the root filesystem, device, and root filesystem type. + */ +struct mount *rootfs; +struct vnode *rootvnode; +int (*mountroot)() = NULL; + +/* + * Set up the initial array of known filesystem types. + */ +extern struct vfsops ufs_vfsops; +extern int ffs_mountroot(); +extern struct vfsops lfs_vfsops; +extern int lfs_mountroot(); +extern struct vfsops mfs_vfsops; +extern int mfs_mountroot(); +extern struct vfsops hfs_vfsops; +extern int hfs_mountroot(); +extern struct vfsops volfs_vfsops; +extern struct vfsops cd9660_vfsops; +extern int cd9660_mountroot(); +extern struct vfsops nfs_vfsops; +extern int nfs_mountroot(); +extern struct vfsops afs_vfsops; +extern struct vfsops procfs_vfsops; +extern struct vfsops null_vfsops; +extern struct vfsops union_vfsops; +extern struct vfsops umap_vfsops; +extern struct vfsops portal_vfsops; +extern struct vfsops fdesc_vfsops; +extern struct vfsops kernfs_vfsops; +extern struct vfsops devfs_vfsops; + +/* + * Set up the filesystem operations for vnodes. + */ +static struct vfsconf vfsconflist[] = { + /* HFS/HFS+ Filesystem */ +#if HFS + { &hfs_vfsops, "hfs", 17, 0, MNT_LOCAL | MNT_DOVOLFS, hfs_mountroot, NULL }, +#endif + + /* Fast Filesystem */ +#if FFS + { &ufs_vfsops, "ufs", 1, 0, MNT_LOCAL, ffs_mountroot, NULL }, +#endif + + /* ISO9660 (aka CDROM) Filesystem */ +#if CD9660 + { &cd9660_vfsops, "cd9660", 14, 0, MNT_LOCAL | MNT_DOVOLFS, cd9660_mountroot, NULL }, +#endif + + /* Log-based Filesystem */ +#if LFS + { &lfs_vfsops, "lfs", 5, 0, MNT_LOCAL, lfs_mountroot, NULL }, +#endif + + /* Memory-based Filesystem */ +#if MFS + { &mfs_vfsops, "mfs", 3, 0, MNT_LOCAL, mfs_mountroot, NULL }, +#endif + + /* Sun-compatible Network Filesystem */ +#if NFSCLIENT + { &nfs_vfsops, "nfs", 2, 0, 0, nfs_mountroot, NULL }, +#endif + + /* Andrew Filesystem */ +#if AFS + { &afs_vfsops, "andrewfs", 13, 0, 0, afs_mountroot, NULL }, +#endif + + /* /proc Filesystem */ +#if PROCFS + { &procfs_vfsops, "procfs", 12, 0, 0, NULL, NULL }, +#endif + + /* Loopback (Minimal) Filesystem Layer */ +#if NULLFS + { &null_vfsops, "loopback", 9, 0, 0, NULL, NULL }, +#endif + + /* Union (translucent) Filesystem */ +#if UNION + { &union_vfsops, "union", 15, 0, 0, NULL, NULL }, +#endif + + /* User/Group Identifer Remapping Filesystem */ +#if UMAPFS + { &umap_vfsops, "umap", 10, 0, 0, NULL, NULL }, +#endif + + /* Portal Filesystem */ +#if PORTAL + { &portal_vfsops, "portal", 8, 0, 0, NULL, NULL }, +#endif + + /* File Descriptor Filesystem */ +#if FDESC + { &fdesc_vfsops, "fdesc", 7, 0, 0, NULL, NULL }, +#endif + + /* Kernel Information Filesystem */ +#if KERNFS + { &kernfs_vfsops, "kernfs", 11, 0, 0, NULL, NULL }, +#endif + + /* Volume ID Filesystem */ +#if VOLFS + { &volfs_vfsops, "volfs", 18, 0, 0, NULL, NULL }, +#endif + /* Device Filesystem */ +#if DEVFS + { &devfs_vfsops, "devfs", 19, 0, 0, NULL, NULL }, +#endif + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0} +}; + +/* + * Initially the size of the list, vfs_init will set maxvfsconf + * to the highest defined type number. + */ +int maxvfsslots = sizeof(vfsconflist) / sizeof (struct vfsconf); +int numused_vfsslots = 0; +int maxvfsconf = sizeof(vfsconflist) / sizeof (struct vfsconf); +struct vfsconf *vfsconf = vfsconflist; + +/* + * + * vfs_opv_descs enumerates the list of vnode classes, each with it's own + * vnode operation vector. It is consulted at system boot to build operation + * vectors. It is NULL terminated. + * + */ +extern struct vnodeopv_desc ffs_vnodeop_opv_desc; +extern struct vnodeopv_desc ffs_specop_opv_desc; +extern struct vnodeopv_desc ffs_fifoop_opv_desc; +extern struct vnodeopv_desc lfs_vnodeop_opv_desc; +extern struct vnodeopv_desc lfs_specop_opv_desc; +extern struct vnodeopv_desc lfs_fifoop_opv_desc; +extern struct vnodeopv_desc mfs_vnodeop_opv_desc; +extern struct vnodeopv_desc dead_vnodeop_opv_desc; +extern struct vnodeopv_desc fifo_vnodeop_opv_desc; +extern struct vnodeopv_desc spec_vnodeop_opv_desc; +extern struct vnodeopv_desc nfsv2_vnodeop_opv_desc; +extern struct vnodeopv_desc spec_nfsv2nodeop_opv_desc; +extern struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc; +extern struct vnodeopv_desc fdesc_vnodeop_opv_desc; +extern struct vnodeopv_desc portal_vnodeop_opv_desc; +extern struct vnodeopv_desc null_vnodeop_opv_desc; +extern struct vnodeopv_desc umap_vnodeop_opv_desc; +extern struct vnodeopv_desc kernfs_vnodeop_opv_desc; +extern struct vnodeopv_desc procfs_vnodeop_opv_desc; +extern struct vnodeopv_desc hfs_vnodeop_opv_desc; +extern struct vnodeopv_desc hfs_specop_opv_desc; +extern struct vnodeopv_desc hfs_fifoop_opv_desc; +extern struct vnodeopv_desc volfs_vnodeop_opv_desc; +extern struct vnodeopv_desc cd9660_vnodeop_opv_desc; +extern struct vnodeopv_desc cd9660_specop_opv_desc; +extern struct vnodeopv_desc cd9660_fifoop_opv_desc; +extern struct vnodeopv_desc union_vnodeop_opv_desc; +extern struct vnodeopv_desc procfs_vnodeop_opv_desc; +extern struct vnodeopv_desc devfs_vnodeop_opv_desc; +extern struct vnodeopv_desc devfs_spec_vnodeop_opv_desc; + +struct vnodeopv_desc *vfs_opv_descs[] = { + &ffs_vnodeop_opv_desc, + &ffs_specop_opv_desc, +#if FIFO + &ffs_fifoop_opv_desc, +#endif + &dead_vnodeop_opv_desc, +#if FIFO + &fifo_vnodeop_opv_desc, +#endif + &spec_vnodeop_opv_desc, +#if LFS + &lfs_vnodeop_opv_desc, + &lfs_specop_opv_desc, +#if FIFO + &lfs_fifoop_opv_desc, +#endif +#endif +#if MFS + &mfs_vnodeop_opv_desc, +#endif +#if NFSCLIENT + &nfsv2_vnodeop_opv_desc, + &spec_nfsv2nodeop_opv_desc, +#if FIFO + &fifo_nfsv2nodeop_opv_desc, +#endif +#endif +#if FDESC + &fdesc_vnodeop_opv_desc, +#endif +#if PORTAL + &portal_vnodeop_opv_desc, +#endif +#if NULLFS + &null_vnodeop_opv_desc, +#endif +#if UMAPFS + &umap_vnodeop_opv_desc, +#endif +#if KERNFS + &kernfs_vnodeop_opv_desc, +#endif +#if PROCFS + &procfs_vnodeop_opv_desc, +#endif +#if HFS + &hfs_vnodeop_opv_desc, + &hfs_specop_opv_desc, +#if FIFO + &hfs_fifoop_opv_desc, +#endif +#endif +#if CD9660 + &cd9660_vnodeop_opv_desc, + &cd9660_specop_opv_desc, +#if FIFO + &cd9660_fifoop_opv_desc, +#endif +#endif +#if UNION + &union_vnodeop_opv_desc, +#endif +#if VOLFS + &volfs_vnodeop_opv_desc, +#endif +#if DEVFS + &devfs_vnodeop_opv_desc, + &devfs_spec_vnodeop_opv_desc, +#endif + NULL +}; diff --git a/bsd/vfs/vfs_init.c b/bsd/vfs/vfs_init.c new file mode 100644 index 000000000..536623adc --- /dev/null +++ b/bsd/vfs/vfs_init.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed + * to Berkeley by John Heidemann of the UCLA Ficus project. + * + * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vfs_init.c 8.5 (Berkeley) 5/11/95 + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * Sigh, such primitive tools are these... + */ +#if 0 +#define DODEBUG(A) A +#else +#define DODEBUG(A) +#endif + +extern uid_t console_user; +extern struct vnodeopv_desc *vfs_opv_descs[]; + /* a list of lists of vnodeops defns */ +extern struct vnodeop_desc *vfs_op_descs[]; + /* and the operations they perform */ +/* + * This code doesn't work if the defn is **vnodop_defns with cc. + * The problem is because of the compiler sometimes putting in an + * extra level of indirection for arrays. It's an interesting + * "feature" of C. + */ +int vfs_opv_numops; + +typedef (*PFI)(); /* the standard Pointer to a Function returning an Int */ + +/* + * A miscellaneous routine. + * A generic "default" routine that just returns an error. + */ +int +vn_default_error() +{ + + return (EOPNOTSUPP); +} + +/* + * vfs_init.c + * + * Allocate and fill in operations vectors. + * + * An undocumented feature of this approach to defining operations is that + * there can be multiple entries in vfs_opv_descs for the same operations + * vector. This allows third parties to extend the set of operations + * supported by another layer in a binary compatibile way. For example, + * assume that NFS needed to be modified to support Ficus. NFS has an entry + * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by + * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions) + * listing those new operations Ficus adds to NFS, all without modifying the + * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but + * that is a(whole)nother story.) This is a feature. + */ +void +vfs_opv_init() +{ + int i, j, k; + int (***opv_desc_vector_p)(void *); + int (**opv_desc_vector)(void *); + struct vnodeopv_entry_desc *opve_descp; + + /* + * Allocate the dynamic vectors and fill them in. + */ + for (i=0; vfs_opv_descs[i]; i++) { + opv_desc_vector_p = vfs_opv_descs[i]->opv_desc_vector_p; + /* + * Allocate and init the vector, if it needs it. + * Also handle backwards compatibility. + */ + if (*opv_desc_vector_p == NULL) { + /* XXX - shouldn't be M_VNODE */ + MALLOC(*opv_desc_vector_p, PFI*, + vfs_opv_numops*sizeof(PFI), M_VNODE, M_WAITOK); + bzero (*opv_desc_vector_p, vfs_opv_numops*sizeof(PFI)); + DODEBUG(printf("vector at %x allocated\n", + opv_desc_vector_p)); + } + opv_desc_vector = *opv_desc_vector_p; + for (j=0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) { + opve_descp = &(vfs_opv_descs[i]->opv_desc_ops[j]); + + /* + * Sanity check: is this operation listed + * in the list of operations? We check this + * by seeing if its offest is zero. Since + * the default routine should always be listed + * first, it should be the only one with a zero + * offset. Any other operation with a zero + * offset is probably not listed in + * vfs_op_descs, and so is probably an error. + * + * A panic here means the layer programmer + * has committed the all-too common bug + * of adding a new operation to the layer's + * list of vnode operations but + * not adding the operation to the system-wide + * list of supported operations. + */ + if (opve_descp->opve_op->vdesc_offset == 0 && + opve_descp->opve_op->vdesc_offset != + VOFFSET(vop_default)) { + printf("operation %s not listed in %s.\n", + opve_descp->opve_op->vdesc_name, + "vfs_op_descs"); + panic ("vfs_opv_init: bad operation"); + } + /* + * Fill in this entry. + */ + opv_desc_vector[opve_descp->opve_op->vdesc_offset] = + opve_descp->opve_impl; + } + } + /* + * Finally, go back and replace unfilled routines + * with their default. (Sigh, an O(n^3) algorithm. I + * could make it better, but that'd be work, and n is small.) + */ + for (i = 0; vfs_opv_descs[i]; i++) { + opv_desc_vector = *(vfs_opv_descs[i]->opv_desc_vector_p); + /* + * Force every operations vector to have a default routine. + */ + if (opv_desc_vector[VOFFSET(vop_default)]==NULL) { + panic("vfs_opv_init: operation vector without default routine."); + } + for (k = 0; kopv_desc_vector_p) = NULL; + /* + * Figure out how many ops there are by counting the table, + * and assign each its offset. + */ + for (vfs_opv_numops = 0, i = 0; vfs_op_descs[i]; i++) { + vfs_op_descs[i]->vdesc_offset = vfs_opv_numops; + vfs_opv_numops++; + } + DODEBUG(printf ("vfs_opv_numops=%d\n", vfs_opv_numops)); +} + +/* + * Routines having to do with the management of the vnode table. + */ +extern struct vnodeops dead_vnodeops; +extern struct vnodeops spec_vnodeops; +struct vattr va_null; + +/* + * Initialize the vnode structures and initialize each file system type. + */ +vfsinit() +{ + struct vfsconf *vfsp; + int i, maxtypenum; + + /* + * Initialize the "console user" for access purposes: + */ + console_user = (uid_t)0; + + /* + * Initialize the vnode table + */ + vntblinit(); + /* + * Initialize the vnode name cache + */ + nchinit(); + /* + * Build vnode operation vectors. + */ + vfs_op_init(); + vfs_opv_init(); /* finish the job */ + /* + * Initialize each file system type. + */ + vattr_null(&va_null); + numused_vfsslots = maxtypenum = 0; + for (vfsp = vfsconf, i = 0; i < maxvfsconf; i++, vfsp++) { + if (vfsp->vfc_vfsops == (struct vfsops *)0) + break; + if (i) vfsconf[i-1].vfc_next = vfsp; + if (maxtypenum <= vfsp->vfc_typenum) + maxtypenum = vfsp->vfc_typenum + 1; + (*vfsp->vfc_vfsops->vfs_init)(vfsp); + numused_vfsslots++; + } + /* next vfc_typenum to be used */ + maxvfsconf = maxtypenum; +} + +int +vfsconf_add(struct vfsconf *nvfsp) +{ + struct vfsconf *vfsp; + + if ((numused_vfsslots >= maxvfsslots) || (nvfsp == (struct vfsconf *)0)) + return (-1); + bcopy(nvfsp, &vfsconf[numused_vfsslots], sizeof(struct vfsconf)); + vfsconf[numused_vfsslots-1].vfc_next = &vfsconf[numused_vfsslots]; + + if (nvfsp->vfc_typenum <= maxvfsconf ) + maxvfsconf = nvfsp->vfc_typenum + 1; + numused_vfsslots++; + if (nvfsp->vfc_vfsops->vfs_init) + (*nvfsp->vfc_vfsops->vfs_init)(nvfsp); + return(0); +} + +int +vfsconf_del(char * fs_name) +{ + int entriesRemaining; + struct vfsconf *vfsconflistentry; + struct vfsconf *prevconf = NULL; + struct vfsconf *targetconf = NULL; + + prevconf = vfsconflistentry = vfsconf; + for (entriesRemaining = maxvfsslots; + (entriesRemaining > 0) && (vfsconflistentry != NULL); + --entriesRemaining) { + if ((vfsconflistentry->vfc_vfsops != NULL) && (strcmp(vfsconflistentry->vfc_name, fs_name) == 0)) { + targetconf = vfsconflistentry; + break; + }; + prevconf = vfsconflistentry; + vfsconflistentry = vfsconflistentry->vfc_next; + }; + + if (targetconf != NULL) { + if (prevconf != NULL) { + /* Unlink the target entry from the list: + and decrement our count */ + prevconf->vfc_next = targetconf->vfc_next; + numused_vfsslots--; + } else { + /* XXX need real error code for no previous entry in list */ + return(-1); + } + } else { + /* XXX need real error code for entry not found */ + return(-1); + }; + return(0); +} diff --git a/bsd/vfs/vfs_lookup.c b/bsd/vfs/vfs_lookup.c new file mode 100644 index 000000000..f7eb1ca2a --- /dev/null +++ b/bsd/vfs/vfs_lookup.c @@ -0,0 +1,751 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vfs_lookup.c 8.10 (Berkeley) 5/27/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For _PC_NAME_MAX */ + +#if KTRACE +#include +#endif + +/* + * Convert a pathname into a pointer to a locked inode. + * + * The FOLLOW flag is set when symbolic links are to be followed + * when they occur at the end of the name translation process. + * Symbolic links are always followed for all other pathname + * components other than the last. + * + * The segflg defines whether the name is to be copied from user + * space or kernel space. + * + * Overall outline of namei: + * + * copy in name + * get starting directory + * while (!done && !error) { + * call lookup to search path. + * if symbolic link, massage name in buffer and continue + * } + */ +int +namei(ndp) + register struct nameidata *ndp; +{ + register struct filedesc *fdp; /* pointer to file descriptor state */ + register char *cp; /* pointer into pathname argument */ + register struct vnode *dp; /* the directory we are searching */ + struct iovec aiov; /* uio for reading symbolic links */ + struct uio auio; + int error, linklen; + struct componentname *cnp = &ndp->ni_cnd; + struct proc *p = cnp->cn_proc; + + ndp->ni_cnd.cn_cred = ndp->ni_cnd.cn_proc->p_ucred; +#if DIAGNOSTIC + if (!cnp->cn_cred || !cnp->cn_proc) + panic ("namei: bad cred/proc"); + if (cnp->cn_nameiop & (~OPMASK)) + panic ("namei: nameiop contaminated with flags"); + if (cnp->cn_flags & OPMASK) + panic ("namei: flags contaminated with nameiops"); +#endif + fdp = cnp->cn_proc->p_fd; + + /* + * Get a buffer for the name to be translated, and copy the + * name into the buffer. + */ + if ((cnp->cn_flags & HASBUF) == 0) { + MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, + MAXPATHLEN, M_NAMEI, M_WAITOK); + cnp->cn_pnlen = MAXPATHLEN; + } + if (ndp->ni_segflg == UIO_SYSSPACE) + error = copystr(ndp->ni_dirp, cnp->cn_pnbuf, + MAXPATHLEN, &ndp->ni_pathlen); + else + error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf, + MAXPATHLEN, &ndp->ni_pathlen); + /* + * Do not allow empty pathnames + */ + if (!error && *cnp->cn_pnbuf == '\0') + error = ENOENT; + + if (error) { + _FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + ndp->ni_vp = NULL; + return (error); + } + ndp->ni_loopcnt = 0; +#if KTRACE + if (KTRPOINT(cnp->cn_proc, KTR_NAMEI)) + ktrnamei(cnp->cn_proc->p_tracep, cnp->cn_pnbuf); +#endif + + /* + * Get starting point for the translation. + */ + if ((ndp->ni_rootdir = fdp->fd_rdir) == NULL) + ndp->ni_rootdir = rootvnode; + dp = fdp->fd_cdir; + VREF(dp); + for (;;) { + /* + * Check if root directory should replace current directory. + * Done at start of translation and after symbolic link. + */ + cnp->cn_nameptr = cnp->cn_pnbuf; + if (*(cnp->cn_nameptr) == '/') { + vrele(dp); + while (*(cnp->cn_nameptr) == '/') { + cnp->cn_nameptr++; + ndp->ni_pathlen--; + } + dp = ndp->ni_rootdir; + VREF(dp); + } + ndp->ni_startdir = dp; + if (error = lookup(ndp)) { + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + return (error); + } + /* + * Check for symbolic link + */ + if ((cnp->cn_flags & ISSYMLINK) == 0) { + if ((cnp->cn_flags & (SAVENAME | SAVESTART)) == 0) { + FREE_ZONE(cnp->cn_pnbuf, + cnp->cn_pnlen, M_NAMEI); + } else { + cnp->cn_flags |= HASBUF; + } + return (0); + } + if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1) + VOP_UNLOCK(ndp->ni_dvp, 0, p); + if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { + error = ELOOP; + break; + } + if (ndp->ni_pathlen > 1) { + MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + } else { + cp = cnp->cn_pnbuf; + } + aiov.iov_base = cp; + aiov.iov_len = MAXPATHLEN; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_SYSSPACE; + auio.uio_procp = (struct proc *)0; + auio.uio_resid = MAXPATHLEN; + if (error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred)) { + if (ndp->ni_pathlen > 1) + _FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + break; + } + linklen = MAXPATHLEN - auio.uio_resid; + if (linklen + ndp->ni_pathlen >= MAXPATHLEN) { + if (ndp->ni_pathlen > 1) + _FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + error = ENAMETOOLONG; + break; + } + if (ndp->ni_pathlen > 1) { + bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + cnp->cn_pnbuf = cp; + cnp->cn_pnlen = MAXPATHLEN; + } else + cnp->cn_pnbuf[linklen] = '\0'; + ndp->ni_pathlen += linklen; + vput(ndp->ni_vp); + dp = ndp->ni_dvp; + } + FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); + vrele(ndp->ni_dvp); + vput(ndp->ni_vp); + ndp->ni_vp = NULL; + return (error); +} + +/* + * Search a pathname. + * This is a very central and rather complicated routine. + * + * The pathname is pointed to by ni_ptr and is of length ni_pathlen. + * The starting directory is taken from ni_startdir. The pathname is + * descended until done, or a symbolic link is encountered. The variable + * ni_more is clear if the path is completed; it is set to one if a + * symbolic link needing interpretation is encountered. + * + * The flag argument is LOOKUP, CREATE, RENAME, or DELETE depending on + * whether the name is to be looked up, created, renamed, or deleted. + * When CREATE, RENAME, or DELETE is specified, information usable in + * creating, renaming, or deleting a directory entry may be calculated. + * If flag has LOCKPARENT or'ed into it, the parent directory is returned + * locked. If flag has WANTPARENT or'ed into it, the parent directory is + * returned unlocked. Otherwise the parent directory is not returned. If + * the target of the pathname exists and LOCKLEAF is or'ed into the flag + * the target is returned locked, otherwise it is returned unlocked. + * When creating or renaming and LOCKPARENT is specified, the target may not + * be ".". When deleting and LOCKPARENT is specified, the target may be ".". + * + * Overall outline of lookup: + * + * dirloop: + * identify next component of name at ndp->ni_ptr + * handle degenerate case where name is null string + * if .. and crossing mount points and on mounted filesys, find parent + * call VOP_LOOKUP routine for next component name + * directory vnode returned in ni_dvp, unlocked unless LOCKPARENT set + * component vnode returned in ni_vp (if it exists), locked. + * if result vnode is mounted on and crossing mount points, + * find mounted on vnode + * if more components of name, do next level at dirloop + * return the answer in ni_vp, locked if LOCKLEAF set + * if LOCKPARENT set, return locked parent in ni_dvp + * if WANTPARENT set, return unlocked parent in ni_dvp + */ +int +lookup(ndp) + register struct nameidata *ndp; +{ + register char *cp; /* pointer into pathname argument */ + register struct vnode *dp = 0; /* the directory we are searching */ + struct vnode *tdp; /* saved dp */ + struct mount *mp; /* mount table entry */ + int namemax = 0; /* maximun number of bytes for filename returned by pathconf() */ + int docache; /* == 0 do not cache last component */ + int wantparent; /* 1 => wantparent or lockparent flag */ + int rdonly; /* lookup read-only flag bit */ + int error = 0; + struct componentname *cnp = &ndp->ni_cnd; + struct proc *p = cnp->cn_proc; + int i; + + /* + * Setup: break out flag bits into variables. + */ + wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT); + docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; + if (cnp->cn_nameiop == DELETE || + (wantparent && cnp->cn_nameiop != CREATE && + cnp->cn_nameiop != LOOKUP)) + docache = 0; + rdonly = cnp->cn_flags & RDONLY; + ndp->ni_dvp = NULL; + cnp->cn_flags &= ~ISSYMLINK; + dp = ndp->ni_startdir; + ndp->ni_startdir = NULLVP; + vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p); + +dirloop: + /* + * Search a new directory. + * + * The cn_hash value is for use by vfs_cache. + * Check pathconf for maximun length of name + * The last component of the filename is left accessible via + * cnp->cn_nameptr for callers that need the name. Callers needing + * the name set the SAVENAME flag. When done, they assume + * responsibility for freeing the pathname buffer. + */ + cnp->cn_consume = 0; + cnp->cn_hash = 0; + for (cp = cnp->cn_nameptr, i=1; *cp != 0 && *cp != '/'; i++, cp++) + cnp->cn_hash += (unsigned char)*cp * i; + cnp->cn_namelen = cp - cnp->cn_nameptr; + if (VOP_PATHCONF(dp, _PC_NAME_MAX, &namemax)) + namemax = NAME_MAX; + if (cnp->cn_namelen > namemax) { + error = ENAMETOOLONG; + goto bad; + } +#ifdef NAMEI_DIAGNOSTIC + { char c = *cp; + *cp = '\0'; + printf("{%s}: ", cnp->cn_nameptr); + *cp = c; } +#endif + ndp->ni_pathlen -= cnp->cn_namelen; + ndp->ni_next = cp; + cnp->cn_flags |= MAKEENTRY; + if (*cp == '\0' && docache == 0) + cnp->cn_flags &= ~MAKEENTRY; + if (cnp->cn_namelen == 2 && + cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') + cnp->cn_flags |= ISDOTDOT; + else + cnp->cn_flags &= ~ISDOTDOT; + if (*ndp->ni_next == 0) + cnp->cn_flags |= ISLASTCN; + else + cnp->cn_flags &= ~ISLASTCN; + + + /* + * Check for degenerate name (e.g. / or "") + * which is a way of talking about a directory, + * e.g. like "/." or ".". + */ + if (cnp->cn_nameptr[0] == '\0') { + if (dp->v_type != VDIR) { + error = ENOTDIR; + goto bad; + } + if (cnp->cn_nameiop != LOOKUP) { + error = EISDIR; + goto bad; + } + if (wantparent) { + ndp->ni_dvp = dp; + VREF(dp); + } + ndp->ni_vp = dp; + if (!(cnp->cn_flags & (LOCKPARENT | LOCKLEAF))) + VOP_UNLOCK(dp, 0, p); + if (cnp->cn_flags & SAVESTART) + panic("lookup: SAVESTART"); + return (0); + } + + /* + * Handle "..": two special cases. + * 1. If at root directory (e.g. after chroot) + * or at absolute root directory + * then ignore it so can't get out. + * 2. If this vnode is the root of a mounted + * filesystem, then replace it with the + * vnode which was mounted on so we take the + * .. in the other file system. + */ + if (cnp->cn_flags & ISDOTDOT) { + for (;;) { + if (dp == ndp->ni_rootdir || dp == rootvnode) { + ndp->ni_dvp = dp; + ndp->ni_vp = dp; + VREF(dp); + goto nextname; + } + if ((dp->v_flag & VROOT) == 0 || + (cnp->cn_flags & NOCROSSMOUNT)) + break; + tdp = dp; + dp = dp->v_mount->mnt_vnodecovered; + vput(tdp); + VREF(dp); + vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p); + } + } + + /* + * We now have a segment name to search for, and a directory to search. + */ +unionlookup: + ndp->ni_dvp = dp; + ndp->ni_vp = NULL; + if (error = VOP_LOOKUP(dp, &ndp->ni_vp, cnp)) { +#if DIAGNOSTIC + if (ndp->ni_vp != NULL) + panic("leaf should be empty"); +#endif +#ifdef NAMEI_DIAGNOSTIC + printf("not found\n"); +#endif + if ((error == ENOENT) && + (dp->v_flag & VROOT) && + (dp->v_mount->mnt_flag & MNT_UNION)) { + tdp = dp; + dp = dp->v_mount->mnt_vnodecovered; + vput(tdp); + VREF(dp); + vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p); + goto unionlookup; + } + + if (error != EJUSTRETURN) + goto bad; + /* + * If creating and at end of pathname, then can consider + * allowing file to be created. + */ + if (rdonly) { + error = EROFS; + goto bad; + } + /* + * We return with ni_vp NULL to indicate that the entry + * doesn't currently exist, leaving a pointer to the + * (possibly locked) directory inode in ndp->ni_dvp. + */ + if (cnp->cn_flags & SAVESTART) { + ndp->ni_startdir = ndp->ni_dvp; + VREF(ndp->ni_startdir); + } + if (kdebug_enable) + kdebug_lookup(ndp->ni_dvp, cnp); + return (0); + } +#ifdef NAMEI_DIAGNOSTIC + printf("found\n"); +#endif + + /* + * Take into account any additional components consumed by + * the underlying filesystem. + */ + if (cnp->cn_consume > 0) { + cnp->cn_nameptr += cnp->cn_consume; + ndp->ni_next += cnp->cn_consume; + ndp->ni_pathlen -= cnp->cn_consume; + cnp->cn_consume = 0; + } + + dp = ndp->ni_vp; + /* + * Check to see if the vnode has been mounted on; + * if so find the root of the mounted file system. + */ + while (dp->v_type == VDIR && (mp = dp->v_mountedhere) && + (cnp->cn_flags & NOCROSSMOUNT) == 0) { + if (vfs_busy(mp, 0, 0, p)) + continue; + error = VFS_ROOT(mp, &tdp); + vfs_unbusy(mp, p); + if (error) + goto bad2; + vput(dp); + ndp->ni_vp = dp = tdp; + } + + /* + * Check for symbolic link + */ + if ((dp->v_type == VLNK) && + ((cnp->cn_flags & FOLLOW) || *ndp->ni_next == '/')) { + cnp->cn_flags |= ISSYMLINK; + return (0); + } + +nextname: + /* + * Not a symbolic link. If more pathname, + * continue at next component, else return. + */ + if (*ndp->ni_next == '/') { + cnp->cn_nameptr = ndp->ni_next; + while (*cnp->cn_nameptr == '/') { + cnp->cn_nameptr++; + ndp->ni_pathlen--; + } + vrele(ndp->ni_dvp); + goto dirloop; + } + + /* + * Disallow directory write attempts on read-only file systems. + */ + if (rdonly && + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { + error = EROFS; + goto bad2; + } + if (cnp->cn_flags & SAVESTART) { + ndp->ni_startdir = ndp->ni_dvp; + VREF(ndp->ni_startdir); + } + if (!wantparent) + vrele(ndp->ni_dvp); + if ((cnp->cn_flags & LOCKLEAF) == 0) + VOP_UNLOCK(dp, 0, p); + if (kdebug_enable) + kdebug_lookup(dp, cnp); + return (0); + +bad2: + if ((cnp->cn_flags & LOCKPARENT) && *ndp->ni_next == '\0') + VOP_UNLOCK(ndp->ni_dvp, 0, p); + vrele(ndp->ni_dvp); +bad: + vput(dp); + ndp->ni_vp = NULL; + if (kdebug_enable) + kdebug_lookup(dp, cnp); + return (error); +} + +/* + * relookup - lookup a path name component + * Used by lookup to re-aquire things. + */ +int +relookup(dvp, vpp, cnp) + struct vnode *dvp, **vpp; + struct componentname *cnp; +{ + struct proc *p = cnp->cn_proc; + struct vnode *dp = 0; /* the directory we are searching */ + int docache; /* == 0 do not cache last component */ + int wantparent; /* 1 => wantparent or lockparent flag */ + int rdonly; /* lookup read-only flag bit */ + int error = 0; +#ifdef NAMEI_DIAGNOSTIC + int newhash; /* DEBUG: check name hash */ + char *cp; /* DEBUG: check name ptr/len */ +#endif + + /* + * Setup: break out flag bits into variables. + */ + wantparent = cnp->cn_flags & (LOCKPARENT|WANTPARENT); + docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; + if (cnp->cn_nameiop == DELETE || + (wantparent && cnp->cn_nameiop != CREATE)) + docache = 0; + rdonly = cnp->cn_flags & RDONLY; + cnp->cn_flags &= ~ISSYMLINK; + dp = dvp; + vn_lock(dp, LK_EXCLUSIVE | LK_RETRY, p); + +/* dirloop: */ + /* + * Search a new directory. + * + * The cn_hash value is for use by vfs_cache. + * The last component of the filename is left accessible via + * cnp->cn_nameptr for callers that need the name. Callers needing + * the name set the SAVENAME flag. When done, they assume + * responsibility for freeing the pathname buffer. + */ +#ifdef NAMEI_DIAGNOSTIC + for (newhash = 0, cp = cnp->cn_nameptr; *cp != 0 && *cp != '/'; cp++) + newhash += (unsigned char)*cp; + if (newhash != cnp->cn_hash) + panic("relookup: bad hash"); + if (cnp->cn_namelen != cp - cnp->cn_nameptr) + panic ("relookup: bad len"); + if (*cp != 0) + panic("relookup: not last component"); + printf("{%s}: ", cnp->cn_nameptr); +#endif + + /* + * Check for degenerate name (e.g. / or "") + * which is a way of talking about a directory, + * e.g. like "/." or ".". + */ + if (cnp->cn_nameptr[0] == '\0') { + if (cnp->cn_nameiop != LOOKUP || wantparent) { + error = EISDIR; + goto bad; + } + if (dp->v_type != VDIR) { + error = ENOTDIR; + goto bad; + } + if (!(cnp->cn_flags & LOCKLEAF)) + VOP_UNLOCK(dp, 0, p); + *vpp = dp; + if (cnp->cn_flags & SAVESTART) + panic("lookup: SAVESTART"); + return (0); + } + + if (cnp->cn_flags & ISDOTDOT) + panic ("relookup: lookup on dot-dot"); + + /* + * We now have a segment name to search for, and a directory to search. + */ + if (error = VOP_LOOKUP(dp, vpp, cnp)) { +#if DIAGNOSTIC + if (*vpp != NULL) + panic("leaf should be empty"); +#endif + if (error != EJUSTRETURN) + goto bad; + /* + * If creating and at end of pathname, then can consider + * allowing file to be created. + */ + if (rdonly) { + error = EROFS; + goto bad; + } + /* ASSERT(dvp == ndp->ni_startdir) */ + if (cnp->cn_flags & SAVESTART) + VREF(dvp); + /* + * We return with ni_vp NULL to indicate that the entry + * doesn't currently exist, leaving a pointer to the + * (possibly locked) directory inode in ndp->ni_dvp. + */ + return (0); + } + dp = *vpp; + +#if DIAGNOSTIC + /* + * Check for symbolic link + */ + if (dp->v_type == VLNK && (cnp->cn_flags & FOLLOW)) + panic ("relookup: symlink found.\n"); +#endif + + /* + * Disallow directory write attempts on read-only file systems. + */ + if (rdonly && + (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) { + error = EROFS; + goto bad2; + } + /* ASSERT(dvp == ndp->ni_startdir) */ + if (cnp->cn_flags & SAVESTART) + VREF(dvp); + + if (!wantparent) + vrele(dvp); + if ((cnp->cn_flags & LOCKLEAF) == 0) + VOP_UNLOCK(dp, 0, p); + return (0); + +bad2: + if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN)) + VOP_UNLOCK(dvp, 0, p); + vrele(dvp); +bad: + vput(dp); + *vpp = NULL; + return (error); +} + + +#define NUMPARMS 7 + +kdebug_lookup(dp, cnp) + struct vnode *dp; + struct componentname *cnp; +{ + register int i, n; + register int dbg_namelen; + register char *dbg_nameptr; + long dbg_parms[NUMPARMS]; + char dbg_buf[4]; + static char *dbg_filler = ">>>>"; + + /* Collect the pathname for tracing */ + dbg_namelen = (cnp->cn_nameptr - cnp->cn_pnbuf) + cnp->cn_namelen; + dbg_nameptr = cnp->cn_nameptr + cnp->cn_namelen; + + if (dbg_namelen > sizeof(dbg_parms)) + dbg_namelen = sizeof(dbg_parms); + dbg_nameptr -= dbg_namelen; + + i = 0; + + while (dbg_namelen > 0) { + if (dbg_namelen >= 4) { + dbg_parms[i++] = *(long *)dbg_nameptr; + dbg_nameptr += sizeof(long); + dbg_namelen -= sizeof(long); + } else { + for (n = 0; n < dbg_namelen; n++) + dbg_buf[n] = *dbg_nameptr++; + while (n <= 3) { + if (*dbg_nameptr) + dbg_buf[n++] = '>'; + else + dbg_buf[n++] = 0; + } + dbg_parms[i++] = *(long *)&dbg_buf[0]; + + break; + } + } + while (i < NUMPARMS) { + if (*dbg_nameptr) + dbg_parms[i++] = *(long *)dbg_filler; + else + dbg_parms[i++] = 0; + } + KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW,36)) | DBG_FUNC_NONE, + dp, dbg_parms[0], dbg_parms[1], dbg_parms[2], 0); + KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW,36)) | DBG_FUNC_NONE, + dbg_parms[3], dbg_parms[4], dbg_parms[5], dbg_parms[6], 0); +} diff --git a/bsd/vfs/vfs_subr.c b/bsd/vfs/vfs_subr.c new file mode 100644 index 000000000..216c1a0e4 --- /dev/null +++ b/bsd/vfs/vfs_subr.c @@ -0,0 +1,2699 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 + */ + +/* + * External virtual filesystem routines + */ + +#define DIAGNOSTIC 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +enum vtype iftovt_tab[16] = { + VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, + VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, +}; +int vttoif_tab[9] = { + 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, + S_IFSOCK, S_IFIFO, S_IFMT, +}; + +static void vfree(struct vnode *vp); +static void vinactive(struct vnode *vp); +extern int vnreclaim(int count); +extern kern_return_t + adjust_vm_object_cache(vm_size_t oval, vm_size_t nval); + +/* + * Insq/Remq for the vnode usage lists. + */ +#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) +#define bufremvn(bp) { \ + LIST_REMOVE(bp, b_vnbufs); \ + (bp)->b_vnbufs.le_next = NOLIST; \ +} + +TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ +TAILQ_HEAD(inactivelst, vnode) vnode_inactive_list; /* vnode inactive list */ +struct mntlist mountlist; /* mounted filesystem list */ + +#if DIAGNOSTIC +#define VLISTCHECK(fun, vp, list) \ + if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \ + panic("%s: %s vnode not on %slist", (fun), (list), (list)); + +#define VINACTIVECHECK(fun, vp, expected) \ + do { \ + int __is_inactive = ISSET((vp)->v_flag, VUINACTIVE); \ + if (__is_inactive ^ expected) \ + panic("%s: %sinactive vnode, expected %s", (fun), \ + __is_inactive? "" : "not ", \ + expected? "inactive": "not inactive"); \ + } while(0) +#else +#define VLISTCHECK(fun, vp, list) +#define VINACTIVECHECK(fun, vp, expected) +#endif /* DIAGNOSTIC */ + +#define VLISTNONE(vp) \ + do { \ + (vp)->v_freelist.tqe_next = (struct vnode *)0; \ + (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \ + } while(0) + +#define VONLIST(vp) \ + ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb) + +/* remove a vnode from free vnode list */ +#define VREMFREE(fun, vp) \ + do { \ + VLISTCHECK((fun), (vp), "free"); \ + TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \ + VLISTNONE((vp)); \ + freevnodes--; \ + } while(0) + +/* remove a vnode from inactive vnode list */ +#define VREMINACTIVE(fun, vp) \ + do { \ + VLISTCHECK((fun), (vp), "inactive"); \ + VINACTIVECHECK((fun), (vp), VUINACTIVE); \ + TAILQ_REMOVE(&vnode_inactive_list, (vp), v_freelist); \ + CLR((vp)->v_flag, VUINACTIVE); \ + VLISTNONE((vp)); \ + inactivevnodes--; \ + } while(0) + +#define VORECLAIM_ENABLE(vp) \ + do { \ + if (ISSET((vp)->v_flag, VORECLAIM)) \ + panic("vm object raclaim already"); \ + SET((vp)->v_flag, VORECLAIM); \ + } while(0) + +#define VORECLAIM_DISABLE(vp) \ + do { \ + CLR((vp)->v_flag, VORECLAIM); \ + if (ISSET((vp)->v_flag, VXWANT)) { \ + CLR((vp)->v_flag, VXWANT); \ + wakeup((caddr_t)(vp)); \ + } \ + } while(0) + +/* + * Have to declare first two locks as actual data even if !MACH_SLOCKS, since + * a pointers to them get passed around. + */ +simple_lock_data_t mountlist_slock; +simple_lock_data_t mntvnode_slock; +decl_simple_lock_data(,mntid_slock); +decl_simple_lock_data(,vnode_free_list_slock); +decl_simple_lock_data(,spechash_slock); + +/* + * vnodetarget is the amount of vnodes we expect to get back + * from the the inactive vnode list and VM object cache. + * As vnreclaim() is a mainly cpu bound operation for faster + * processers this number could be higher. + * Having this number too high introduces longer delays in + * the execution of getnewvnode(). + */ +unsigned long vnodetarget; /* target for vnreclaim() */ +#define VNODE_FREE_TARGET 20 /* Default value for vnodetarget */ + +/* + * We need quite a few vnodes on the free list to sustain the + * rapid stat() the compilation process does, and still benefit from the name + * cache. Having too few vnodes on the free list causes serious disk + * thrashing as we cycle through them. + */ +#define VNODE_FREE_MIN 100 /* freelist should have at least these many */ + +/* + * We need to get vnodes back from the VM object cache when a certain # + * of vnodes are reused from the freelist. This is essential for the + * caching to be effective in the namecache and the buffer cache [for the + * metadata]. + */ +#define VNODE_TOOMANY_REUSED (VNODE_FREE_MIN/4) + +/* + * If we have enough vnodes on the freelist we do not want to reclaim + * the vnodes from the VM object cache. + */ +#define VNODE_FREE_ENOUGH (VNODE_FREE_MIN + (VNODE_FREE_MIN/2)) + +/* + * Initialize the vnode management data structures. + */ +void +vntblinit() +{ + extern struct lock__bsd__ exchangelock; + + simple_lock_init(&mountlist_slock); + simple_lock_init(&mntvnode_slock); + simple_lock_init(&mntid_slock); + simple_lock_init(&spechash_slock); + TAILQ_INIT(&vnode_free_list); + simple_lock_init(&vnode_free_list_slock); + TAILQ_INIT(&vnode_inactive_list); + CIRCLEQ_INIT(&mountlist); + lockinit(&exchangelock, PVFS, "exchange", 0, 0); + + if (!vnodetarget) + vnodetarget = VNODE_FREE_TARGET; + + /* + * Scale the vm_object_cache to accomodate the vnodes + * we want to cache + */ + (void) adjust_vm_object_cache(0, desiredvnodes - VNODE_FREE_MIN); +} + +/* Reset the VM Object Cache with the values passed in */ +kern_return_t +reset_vmobjectcache(unsigned int val1, unsigned int val2) +{ + vm_size_t oval = val1 - VNODE_FREE_MIN; + vm_size_t nval = val2 - VNODE_FREE_MIN; + + return(adjust_vm_object_cache(oval, nval)); +} + +/* + * Mark a mount point as busy. Used to synchronize access and to delay + * unmounting. Interlock is not released on failure. + */ +int +vfs_busy(mp, flags, interlkp, p) + struct mount *mp; + int flags; + struct slock *interlkp; + struct proc *p; +{ + int lkflags; + + if (mp->mnt_kern_flag & MNTK_UNMOUNT) { + if (flags & LK_NOWAIT) + return (ENOENT); + mp->mnt_kern_flag |= MNTK_MWAIT; + if (interlkp) + simple_unlock(interlkp); + /* + * Since all busy locks are shared except the exclusive + * lock granted when unmounting, the only place that a + * wakeup needs to be done is at the release of the + * exclusive lock at the end of dounmount. + */ + sleep((caddr_t)mp, PVFS); + if (interlkp) + simple_lock(interlkp); + return (ENOENT); + } + lkflags = LK_SHARED; + if (interlkp) + lkflags |= LK_INTERLOCK; + if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p)) + panic("vfs_busy: unexpected lock failure"); + return (0); +} + +/* + * Free a busy filesystem. + */ +void +vfs_unbusy(mp, p) + struct mount *mp; + struct proc *p; +{ + + lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p); +} + +/* + * Lookup a filesystem type, and if found allocate and initialize + * a mount structure for it. + * + * Devname is usually updated by mount(8) after booting. + */ +int +vfs_rootmountalloc(fstypename, devname, mpp) + char *fstypename; + char *devname; + struct mount **mpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct vfsconf *vfsp; + struct mount *mp; + + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (!strcmp(vfsp->vfc_name, fstypename)) + break; + if (vfsp == NULL) + return (ENODEV); + mp = _MALLOC_ZONE((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK); + bzero((char *)mp, (u_long)sizeof(struct mount)); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + (void)vfs_busy(mp, LK_NOWAIT, 0, p); + LIST_INIT(&mp->mnt_vnodelist); + mp->mnt_vfc = vfsp; + mp->mnt_op = vfsp->vfc_vfsops; + mp->mnt_flag = MNT_RDONLY; + mp->mnt_vnodecovered = NULLVP; + vfsp->vfc_refcount++; + mp->mnt_stat.f_type = vfsp->vfc_typenum; + mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; + strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); + mp->mnt_stat.f_mntonname[0] = '/'; + (void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); + *mpp = mp; + return (0); +} + +/* + * Find an appropriate filesystem to use for the root. If a filesystem + * has not been preselected, walk through the list of known filesystems + * trying those that have mountroot routines, and try them until one + * works or we have tried them all. + */ +int +vfs_mountroot() +{ + struct vfsconf *vfsp; + extern int (*mountroot)(void); + int error; + + if (mountroot != NULL) { + error = (*mountroot)(); + return (error); + } + + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { + if (vfsp->vfc_mountroot == NULL) + continue; + if ((error = (*vfsp->vfc_mountroot)()) == 0) + return (0); + if (error != EINVAL) + printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); + } + return (ENODEV); +} + +/* + * Lookup a mount point by filesystem identifier. + */ +struct mount * +vfs_getvfs(fsid) + fsid_t *fsid; +{ + register struct mount *mp; + + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; + mp = mp->mnt_list.cqe_next) { + if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && + mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { + simple_unlock(&mountlist_slock); + return (mp); + } + } + simple_unlock(&mountlist_slock); + return ((struct mount *)0); +} + +/* + * Get a new unique fsid + */ +void +vfs_getnewfsid(mp) + struct mount *mp; +{ +static u_short xxxfs_mntid; + + fsid_t tfsid; + int mtype; + + simple_lock(&mntid_slock); + mtype = mp->mnt_vfc->vfc_typenum; + mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0); + mp->mnt_stat.f_fsid.val[1] = mtype; + if (xxxfs_mntid == 0) + ++xxxfs_mntid; + tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid); + tfsid.val[1] = mtype; + if (mountlist.cqh_first != (void *)&mountlist) { + while (vfs_getvfs(&tfsid)) { + tfsid.val[0]++; + xxxfs_mntid++; + } + } + mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; + simple_unlock(&mntid_slock); +} + +/* + * Set vnode attributes to VNOVAL + */ +void +vattr_null(vap) + register struct vattr *vap; +{ + + vap->va_type = VNON; + vap->va_size = vap->va_bytes = VNOVAL; + vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid = + vap->va_fsid = vap->va_fileid = + vap->va_blocksize = vap->va_rdev = + vap->va_atime.tv_sec = vap->va_atime.tv_nsec = + vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec = + vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec = + vap->va_flags = vap->va_gen = VNOVAL; + vap->va_vaflags = 0; +} + +/* + * Routines having to do with the management of the vnode table. + */ +extern int (**dead_vnodeop_p)(void *); +static void vclean __P((struct vnode *vp, int flag, struct proc *p)); +extern void vgonel __P((struct vnode *vp, struct proc *p)); +long numvnodes, freevnodes; +long inactivevnodes; +long vnode_reclaim_tried; +long vnode_objects_reclaimed; + + +extern struct vattr va_null; + +/* + * Return the next vnode from the free list. + */ +int +getnewvnode(tag, mp, vops, vpp) + enum vtagtype tag; + struct mount *mp; + int (**vops)(void *); + struct vnode **vpp; +{ + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp; + int cnt, didretry = 0; + static int reused = 0; /* track the reuse rate */ + int reclaimhits = 0; + +retry: + simple_lock(&vnode_free_list_slock); + /* + * MALLOC a vnode if the number of vnodes has not reached the desired + * value and the number on the free list is still reasonable... + * reuse from the freelist even though we may evict a name cache entry + * to reduce the number of vnodes that accumulate.... vnodes tie up + * wired memory and are never garbage collected + */ + if (numvnodes < desiredvnodes && (freevnodes < (2 * VNODE_FREE_MIN))) { + numvnodes++; + simple_unlock(&vnode_free_list_slock); + MALLOC_ZONE(vp, struct vnode *, sizeof *vp, M_VNODE, M_WAITOK); + bzero((char *)vp, sizeof *vp); + VLISTNONE(vp); /* avoid double queue removal */ + simple_lock_init(&vp->v_interlock); + goto done; + } + + /* + * Once the desired number of vnodes are allocated, + * we start reusing the vnodes. + */ + if (freevnodes < VNODE_FREE_MIN) { + /* + * if we are low on vnodes on the freelist attempt to get + * some back from the inactive list and VM object cache + */ + simple_unlock(&vnode_free_list_slock); + (void)vnreclaim(vnodetarget); + simple_lock(&vnode_free_list_slock); + } + if (numvnodes >= desiredvnodes && reused > VNODE_TOOMANY_REUSED) { + reused = 0; + if (freevnodes < VNODE_FREE_ENOUGH) { + simple_unlock(&vnode_free_list_slock); + (void)vnreclaim(vnodetarget); + simple_lock(&vnode_free_list_slock); + } + } + + for (cnt = 0, vp = vnode_free_list.tqh_first; + vp != NULLVP; cnt++, vp = vp->v_freelist.tqe_next) { + if (simple_lock_try(&vp->v_interlock)) { + /* got the interlock */ + if (ISSET(vp->v_flag, VORECLAIM)) { + /* skip over the vnodes that are being reclaimed */ + simple_unlock(&vp->v_interlock); + reclaimhits++; + } else + break; + } + } + + /* + * Unless this is a bad time of the month, at most + * the first NCPUS items on the free list are + * locked, so this is close enough to being empty. + */ + if (vp == NULLVP) { + simple_unlock(&vnode_free_list_slock); + if (!(didretry++) && (vnreclaim(vnodetarget) > 0)) + goto retry; + tablefull("vnode"); + log(LOG_EMERG, "%d vnodes locked, %d desired, %d numvnodes, " + "%d free, %d inactive, %d being reclaimed\n", + cnt, desiredvnodes, numvnodes, freevnodes, inactivevnodes, + reclaimhits); + *vpp = 0; + return (ENFILE); + } + + if (vp->v_usecount) + panic("free vnode isn't: v_type = %d, v_usecount = %d?", + vp->v_type, vp->v_usecount); + + VREMFREE("getnewvnode", vp); + reused++; + simple_unlock(&vnode_free_list_slock); + vp->v_lease = NULL; + cache_purge(vp); + if (vp->v_type != VBAD) + vgonel(vp, p); /* clean and reclaim the vnode */ + else + simple_unlock(&vp->v_interlock); +#if DIAGNOSTIC + if (vp->v_data) + panic("cleaned vnode isn't"); + { + int s = splbio(); + if (vp->v_numoutput) + panic("Clean vnode has pending I/O's"); + splx(s); + } +#endif + if (UBCINFOEXISTS(vp)) + panic("getnewvnode: ubcinfo not cleaned"); + else + vp->v_ubcinfo = 0; + + vp->v_lastr = -1; + vp->v_ralen = 0; + vp->v_maxra = 0; + vp->v_lastw = 0; + vp->v_ciosiz = 0; + vp->v_cstart = 0; + vp->v_clen = 0; + vp->v_socket = 0; + +done: + vp->v_flag = VSTANDARD; + vp->v_type = VNON; + vp->v_tag = tag; + vp->v_op = vops; + insmntque(vp, mp); + *vpp = vp; + vp->v_usecount = 1; + vp->v_data = 0; + return (0); +} + +/* + * Move a vnode from one mount queue to another. + */ +void +insmntque(vp, mp) + struct vnode *vp; + struct mount *mp; +{ + + simple_lock(&mntvnode_slock); + /* + * Delete from old mount point vnode list, if on one. + */ + if (vp->v_mount != NULL) + LIST_REMOVE(vp, v_mntvnodes); + /* + * Insert into list of vnodes for the new mount point, if available. + */ + if ((vp->v_mount = mp) != NULL) + LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); + simple_unlock(&mntvnode_slock); +} + +/* + * Update outstanding I/O count and do wakeup if requested. + */ +void +vwakeup(bp) + register struct buf *bp; +{ + register struct vnode *vp; + + CLR(bp->b_flags, B_WRITEINPROG); + if (vp = bp->b_vp) { + if (--vp->v_numoutput < 0) + panic("vwakeup: neg numoutput"); + if ((vp->v_flag & VBWAIT) && vp->v_numoutput <= 0) { + if (vp->v_numoutput < 0) + panic("vwakeup: neg numoutput 2"); + vp->v_flag &= ~VBWAIT; + wakeup((caddr_t)&vp->v_numoutput); + } + } +} + +/* + * Flush out and invalidate all buffers associated with a vnode. + * Called with the underlying object locked. + */ +int +vinvalbuf(vp, flags, cred, p, slpflag, slptimeo) + register struct vnode *vp; + int flags; + struct ucred *cred; + struct proc *p; + int slpflag, slptimeo; +{ + register struct buf *bp; + struct buf *nbp, *blist; + int s, error = 0; + + if (flags & V_SAVE) { + if (error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) { + return (error); + } + if (vp->v_dirtyblkhd.lh_first != NULL || (vp->v_flag & VHASDIRTY)) + panic("vinvalbuf: dirty bufs"); + } + + for (;;) { + if ((blist = vp->v_cleanblkhd.lh_first) && flags & V_SAVEMETA) + while (blist && blist->b_lblkno < 0) + blist = blist->b_vnbufs.le_next; + if (!blist && (blist = vp->v_dirtyblkhd.lh_first) && + (flags & V_SAVEMETA)) + while (blist && blist->b_lblkno < 0) + blist = blist->b_vnbufs.le_next; + if (!blist) + break; + + for (bp = blist; bp; bp = nbp) { + nbp = bp->b_vnbufs.le_next; + if (flags & V_SAVEMETA && bp->b_lblkno < 0) + continue; + s = splbio(); + if (ISSET(bp->b_flags, B_BUSY)) { + SET(bp->b_flags, B_WANTED); + error = tsleep((caddr_t)bp, + slpflag | (PRIBIO + 1), "vinvalbuf", + slptimeo); + splx(s); + if (error) { + return (error); + } + break; + } + bremfree(bp); + SET(bp->b_flags, B_BUSY); + splx(s); + /* + * XXX Since there are no node locks for NFS, I believe + * there is a slight chance that a delayed write will + * occur while sleeping just above, so check for it. + */ + if (ISSET(bp->b_flags, B_DELWRI) && (flags & V_SAVE)) { + (void) VOP_BWRITE(bp); + break; + } + SET(bp->b_flags, B_INVAL); + brelse(bp); + } + } + if (!(flags & V_SAVEMETA) && + (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first)) + panic("vinvalbuf: flush failed"); + return (0); +} + +/* + * Associate a buffer with a vnode. + */ +void +bgetvp(vp, bp) + register struct vnode *vp; + register struct buf *bp; +{ + + if (bp->b_vp) + panic("bgetvp: not free"); + VHOLD(vp); + bp->b_vp = vp; + if (vp->v_type == VBLK || vp->v_type == VCHR) + bp->b_dev = vp->v_rdev; + else + bp->b_dev = NODEV; + /* + * Insert onto list for new vnode. + */ + bufinsvn(bp, &vp->v_cleanblkhd); +} + +/* + * Disassociate a buffer from a vnode. + */ +void +brelvp(bp) + register struct buf *bp; +{ + struct vnode *vp; + + if (bp->b_vp == (struct vnode *) 0) + panic("brelvp: NULL"); + /* + * Delete from old vnode list, if on one. + */ + if (bp->b_vnbufs.le_next != NOLIST) + bufremvn(bp); + vp = bp->b_vp; + bp->b_vp = (struct vnode *) 0; + HOLDRELE(vp); +} + +/* + * Reassign a buffer from one vnode to another. + * Used to assign file specific control information + * (indirect blocks) to the vnode to which they belong. + */ +void +reassignbuf(bp, newvp) + register struct buf *bp; + register struct vnode *newvp; +{ + register struct buflists *listheadp; + + if (newvp == NULL) { + printf("reassignbuf: NULL"); + return; + } + /* + * Delete from old vnode list, if on one. + */ + if (bp->b_vnbufs.le_next != NOLIST) + bufremvn(bp); + /* + * If dirty, put on list of dirty buffers; + * otherwise insert onto list of clean buffers. + */ + if (ISSET(bp->b_flags, B_DELWRI)) + listheadp = &newvp->v_dirtyblkhd; + else + listheadp = &newvp->v_cleanblkhd; + bufinsvn(bp, listheadp); +} + +/* + * Create a vnode for a block device. + * Used for root filesystem, argdev, and swap areas. + * Also used for memory file system special devices. + */ +int +bdevvp(dev, vpp) + dev_t dev; + struct vnode **vpp; +{ + register struct vnode *vp; + struct vnode *nvp; + int error; + + if (dev == NODEV) { + *vpp = NULLVP; + return (ENODEV); + } + error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp); + if (error) { + *vpp = NULLVP; + return (error); + } + vp = nvp; + vp->v_type = VBLK; + if (nvp = checkalias(vp, dev, (struct mount *)0)) { + vput(vp); + vp = nvp; + } + *vpp = vp; + return (0); +} + +/* + * Check to see if the new vnode represents a special device + * for which we already have a vnode (either because of + * bdevvp() or because of a different vnode representing + * the same block device). If such an alias exists, deallocate + * the existing contents and return the aliased vnode. The + * caller is responsible for filling it with its new contents. + */ +struct vnode * +checkalias(nvp, nvp_rdev, mp) + register struct vnode *nvp; + dev_t nvp_rdev; + struct mount *mp; +{ + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp; + struct vnode **vpp; + struct specinfo * bufhold; + int buffree = 1; + + if (nvp->v_type != VBLK && nvp->v_type != VCHR) + return (NULLVP); + + bufhold = (struct specinfo *)_MALLOC_ZONE(sizeof(struct specinfo), + M_VNODE, M_WAITOK); + vpp = &speclisth[SPECHASH(nvp_rdev)]; +loop: + simple_lock(&spechash_slock); + for (vp = *vpp; vp; vp = vp->v_specnext) { + if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type) + continue; + /* + * Alias, but not in use, so flush it out. + */ + simple_lock(&vp->v_interlock); + if (vp->v_usecount == 0) { + simple_unlock(&spechash_slock); + vgonel(vp, p); + goto loop; + } + if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { + simple_unlock(&spechash_slock); + goto loop; + } + break; + } + if (vp == NULL || vp->v_tag != VT_NON) { + nvp->v_specinfo = bufhold; + buffree = 0; /* buffer used */ + bzero(nvp->v_specinfo, sizeof(struct specinfo)); + nvp->v_rdev = nvp_rdev; + nvp->v_hashchain = vpp; + nvp->v_specnext = *vpp; + nvp->v_specflags = 0; + simple_unlock(&spechash_slock); + *vpp = nvp; + if (vp != NULLVP) { + nvp->v_flag |= VALIASED; + vp->v_flag |= VALIASED; + vput(vp); + } + /* Since buffer is used just return */ + return (NULLVP); + } + simple_unlock(&spechash_slock); + VOP_UNLOCK(vp, 0, p); + simple_lock(&vp->v_interlock); + vclean(vp, 0, p); + vp->v_op = nvp->v_op; + vp->v_tag = nvp->v_tag; + nvp->v_type = VNON; + insmntque(vp, mp); + if (buffree) + _FREE_ZONE((void *)bufhold, sizeof (struct specinfo), M_VNODE); + return (vp); +} + +/* + * Grab a particular vnode from the free list, increment its + * reference count and lock it. The vnode lock bit is set the + * vnode is being eliminated in vgone. The process is awakened + * when the transition is completed, and an error returned to + * indicate that the vnode is no longer usable (possibly having + * been changed to a new file system type). + */ +int +vget(vp, flags, p) + struct vnode *vp; + int flags; + struct proc *p; +{ + int error = 0; + + /* + * If the vnode is in the process of being cleaned out for + * another use, we wait for the cleaning to finish and then + * return failure. Cleaning is determined by checking that + * the VXLOCK flag is set. + */ + if ((flags & LK_INTERLOCK) == 0) + simple_lock(&vp->v_interlock); + if ((vp->v_flag & VXLOCK) || (vp->v_flag & VORECLAIM)) { + vp->v_flag |= VXWANT; + simple_unlock(&vp->v_interlock); + tsleep((caddr_t)vp, PINOD, "vget", 0); + return (ENOENT); + } + + /* + * vnode is being terminated. + * wait for vnode_pager_no_senders() to clear VTERMINATE + */ + if (ISSET(vp->v_flag, VTERMINATE)) { + SET(vp->v_flag, VTERMWANT); + simple_unlock(&vp->v_interlock); + tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vclean", 0); + return (ENOENT); + } + + simple_lock(&vnode_free_list_slock); + /* If on the free list, remove it from there */ + if (vp->v_usecount == 0) { + if (VONLIST(vp)) + VREMFREE("vget", vp); + } else { + /* If on the inactive list, remove it from there */ + if ((vp->v_usecount == 1) && UBCINFOEXISTS(vp)) { + if (VONLIST(vp)) + VREMINACTIVE("vget", vp); + } + } + + /* The vnode should not be on the inactive list here */ + VINACTIVECHECK("vget", vp, 0); + + simple_unlock(&vnode_free_list_slock); + if (++vp->v_usecount <= 0) + panic("vget: v_usecount"); + + if (flags & LK_TYPE_MASK) { + if (error = vn_lock(vp, flags | LK_INTERLOCK, p)) { + /* + * If the vnode was not active in the first place + * must not call vrele() as VOP_INACTIVE() is not + * required. + * So inlined part of vrele() here. + */ + simple_lock(&vp->v_interlock); + if (--vp->v_usecount == 1) { + if (UBCINFOEXISTS(vp)) { + vinactive(vp); + simple_unlock(&vp->v_interlock); + return (error); + } + } + if (vp->v_usecount > 0) { + simple_unlock(&vp->v_interlock); + return (error); + } + if (vp->v_usecount < 0) + panic("vget: negative usecount (%d)", vp->v_usecount); + vfree(vp); + simple_unlock(&vp->v_interlock); + } + return (error); + } + + /* + * If this is a valid UBC vnode, if usecount is 1 and if + * this vnode was mapped in the past, it is likely + * that ubc_info freed due to the memory object getting recycled. + * Just re-initialize the ubc_info. + */ + if ((vp->v_usecount == 1) && UBCISVALID(vp)) { + if (UBCINFOMISSING(vp)) + panic("vget: lost ubc_info"); + + if (ISSET(vp->v_flag, VTERMINATE)) { + /* + * vnode is being terminated. + * wait for vnode_pager_no_senders() to clear + * VTERMINATE + */ + SET(vp->v_flag, VTERMWANT); + simple_unlock(&vp->v_interlock); + tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vclean", 0); + /* return error */ + return (ENOENT); + } + + if ((!UBCINFOEXISTS(vp)) && ISSET(vp->v_flag, VWASMAPPED)) { + simple_unlock(&vp->v_interlock); + ubc_info_init(vp); + simple_lock(&vp->v_interlock); + } else + panic("vget: stolen ubc_info"); + + if (!ubc_issetflags(vp, UI_HASOBJREF)) + if (ubc_getobject(vp, (UBC_NOREACTIVATE|UBC_HOLDOBJECT))) + panic("vget: null object"); + } +out: + if ((flags & LK_INTERLOCK) == 0) + simple_unlock(&vp->v_interlock); + return (0); +} + +/* + * Stubs to use when there is no locking to be done on the underlying object. + * A minimal shared lock is necessary to ensure that the underlying object + * is not revoked while an operation is in progress. So, an active shared + * count is maintained in an auxillary vnode lock structure. + */ +int +vop_nolock(ap) + struct vop_lock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ +#ifdef notyet + /* + * This code cannot be used until all the non-locking filesystems + * (notably NFS) are converted to properly lock and release nodes. + * Also, certain vnode operations change the locking state within + * the operation (create, mknod, remove, link, rename, mkdir, rmdir, + * and symlink). Ideally these operations should not change the + * lock state, but should be changed to let the caller of the + * function unlock them. Otherwise all intermediate vnode layers + * (such as union, umapfs, etc) must catch these functions to do + * the necessary locking at their layer. Note that the inactive + * and lookup operations also change their lock state, but this + * cannot be avoided, so these two operations will always need + * to be handled in intermediate layers. + */ + struct vnode *vp = ap->a_vp; + int vnflags, flags = ap->a_flags; + + if (vp->v_vnlock == NULL) { + if ((flags & LK_TYPE_MASK) == LK_DRAIN) + return (0); + MALLOC_ZONE(vp->v_vnlock, struct lock__bsd__ *, + sizeof(struct lock__bsd__), M_VNODE, M_WAITOK); + lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0); + } + switch (flags & LK_TYPE_MASK) { + case LK_DRAIN: + vnflags = LK_DRAIN; + break; + case LK_EXCLUSIVE: + case LK_SHARED: + vnflags = LK_SHARED; + break; + case LK_UPGRADE: + case LK_EXCLUPGRADE: + case LK_DOWNGRADE: + return (0); + case LK_RELEASE: + default: + panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); + } + if (flags & LK_INTERLOCK) + vnflags |= LK_INTERLOCK; + return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); +#else /* for now */ + /* + * Since we are not using the lock manager, we must clear + * the interlock here. + */ + if (ap->a_flags & LK_INTERLOCK) + simple_unlock(&ap->a_vp->v_interlock); + return (0); +#endif +} + +/* + * Decrement the active use count. + */ +int +vop_nounlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + if (vp->v_vnlock == NULL) + return (0); + return (lockmgr(vp->v_vnlock, LK_RELEASE, NULL, ap->a_p)); +} + +/* + * Return whether or not the node is in use. + */ +int +vop_noislocked(ap) + struct vop_islocked_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + if (vp->v_vnlock == NULL) + return (0); + return (lockstatus(vp->v_vnlock)); +} + +/* + * Vnode reference. + */ +void +vref(vp) + struct vnode *vp; +{ + + simple_lock(&vp->v_interlock); + if (vp->v_usecount <= 0) + panic("vref used where vget required"); + + /* If on the inactive list, remove it from there */ + if ((vp->v_usecount == 1) && UBCINFOEXISTS(vp)) { + if (VONLIST(vp)) { + simple_lock(&vnode_free_list_slock); + VREMINACTIVE("vref", vp); + simple_unlock(&vnode_free_list_slock); + } + } + /* The vnode should not be on the inactive list here */ + VINACTIVECHECK("vref", vp, 0); + + if (++vp->v_usecount <= 0) + panic("vref v_usecount"); + simple_unlock(&vp->v_interlock); +} + +/* + * put the vnode on appropriate free list. + * called with v_interlock held. + */ +static void +vfree(vp) + struct vnode *vp; +{ + /* + * if the vnode is not obtained by calling getnewvnode() we + * are not responsible for the cleanup. Just return. + */ + if (!(vp->v_flag & VSTANDARD)) { + return; + } + + if (vp->v_usecount != 0) + panic("vfree: v_usecount"); + + /* insert at tail of LRU list or at head if VAGE is set */ + simple_lock(&vnode_free_list_slock); + + if (VONLIST(vp)) + panic("vfree: vnode still on list"); + + if (vp->v_flag & VAGE) { + TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); + vp->v_flag &= ~VAGE; + } else + TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); + freevnodes++; + simple_unlock(&vnode_free_list_slock); + return; +} + +/* + * put the vnode on the inactive list. + * called with v_interlock held + */ +static void +vinactive(vp) + struct vnode *vp; +{ + if (!UBCINFOEXISTS(vp)) + panic("vinactive: not a UBC vnode"); + + if (vp->v_usecount != 1) + panic("vinactive: v_usecount"); + + simple_lock(&vnode_free_list_slock); + + if (VONLIST(vp)) + panic("vinactive: vnode still on list"); + VINACTIVECHECK("vinactive", vp, 0); + + TAILQ_INSERT_TAIL(&vnode_inactive_list, vp, v_freelist); + SET(vp->v_flag, VUINACTIVE); + CLR(vp->v_flag, (VNOCACHE_DATA | VRAOFF)); + + inactivevnodes++; + simple_unlock(&vnode_free_list_slock); + return; +} + + +/* + * vput(), just unlock and vrele() + */ +void +vput(vp) + struct vnode *vp; +{ + struct proc *p = current_proc(); /* XXX */ + +#if DIAGNOSTIC + if (vp == NULL) + panic("vput: null vp"); +#endif + simple_lock(&vp->v_interlock); + if (--vp->v_usecount == 1) { + if (UBCINFOEXISTS(vp)) { + vinactive(vp); + simple_unlock(&vp->v_interlock); + VOP_UNLOCK(vp, 0, p); + return; + } + } + if (vp->v_usecount > 0) { + simple_unlock(&vp->v_interlock); + VOP_UNLOCK(vp, 0, p); + return; + } +#if DIAGNOSTIC + if (vp->v_usecount < 0 || vp->v_writecount != 0) { + vprint("vput: bad ref count", vp); + panic("vput: v_usecount = %d, v_writecount = %d", + vp->v_usecount, vp->v_writecount); + } +#endif + if (ISSET((vp)->v_flag, VUINACTIVE) && VONLIST(vp)) + VREMINACTIVE("vrele", vp); + + simple_unlock(&vp->v_interlock); + VOP_INACTIVE(vp, p); + /* + * The interlock is not held and + * VOP_INCATIVE releases the vnode lock. + * We could block and the vnode might get reactivated + * Can not just call vfree without checking the state + */ + simple_lock(&vp->v_interlock); + if (!VONLIST(vp)) { + if (vp->v_usecount == 0) + vfree(vp); + else if ((vp->v_usecount == 1) && UBCINFOEXISTS(vp)) + vinactive(vp); + } + simple_unlock(&vp->v_interlock); +} + +/* + * Vnode release. + * If count drops to zero, call inactive routine and return to freelist. + */ +void +vrele(vp) + struct vnode *vp; +{ + struct proc *p = current_proc(); /* XXX */ + +#if DIAGNOSTIC + if (vp == NULL) + panic("vrele: null vp"); +#endif + simple_lock(&vp->v_interlock); + if (--vp->v_usecount == 1) { + if (UBCINFOEXISTS(vp)) { + vinactive(vp); + simple_unlock(&vp->v_interlock); + return; + } + } + if (vp->v_usecount > 0) { + simple_unlock(&vp->v_interlock); + return; + } +#if DIAGNOSTIC + if (vp->v_usecount < 0 || vp->v_writecount != 0) { + vprint("vrele: bad ref count", vp); + panic("vrele: ref cnt"); + } +#endif + if (ISSET((vp)->v_flag, VUINACTIVE) && VONLIST(vp)) + VREMINACTIVE("vrele", vp); + + + if ((vp->v_flag & VXLOCK) || (vp->v_flag & VORECLAIM)) { + /* vnode is being cleaned, just return */ + vfree(vp); + simple_unlock(&vp->v_interlock); + return; + } + + if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) { + VOP_INACTIVE(vp, p); + /* + * vn_lock releases the interlock and + * VOP_INCATIVE releases the vnode lock. + * We could block and the vnode might get reactivated + * Can not just call vfree without checking the state + */ + simple_lock(&vp->v_interlock); + if (!VONLIST(vp)) { + if (vp->v_usecount == 0) + vfree(vp); + else if ((vp->v_usecount == 1) && UBCINFOEXISTS(vp)) + vinactive(vp); + } + simple_unlock(&vp->v_interlock); + } +#if 0 + else { + vfree(vp); + simple_unlock(&vp->v_interlock); + kprintf("vrele: vn_lock() failed for vp = 0x%08x\n", vp); + } +#endif +} + +void +vagevp(vp) + struct vnode *vp; +{ + assert(vp); + simple_lock(&vp->v_interlock); + vp->v_flag |= VAGE; + simple_unlock(&vp->v_interlock); + return; +} + +/* + * Page or buffer structure gets a reference. + */ +void +vhold(vp) + register struct vnode *vp; +{ + + simple_lock(&vp->v_interlock); + vp->v_holdcnt++; + simple_unlock(&vp->v_interlock); +} + +/* + * Page or buffer structure frees a reference. + */ +void +holdrele(vp) + register struct vnode *vp; +{ + + simple_lock(&vp->v_interlock); + if (vp->v_holdcnt <= 0) + panic("holdrele: holdcnt"); + vp->v_holdcnt--; + simple_unlock(&vp->v_interlock); +} + +/* + * Remove any vnodes in the vnode table belonging to mount point mp. + * + * If MNT_NOFORCE is specified, there should not be any active ones, + * return error if any are found (nb: this is a user error, not a + * system error). If MNT_FORCE is specified, detach any active vnodes + * that are found. + */ +#if DIAGNOSTIC +int busyprt = 0; /* print out busy vnodes */ +#if 0 +struct ctldebug debug1 = { "busyprt", &busyprt }; +#endif /* 0 */ +#endif + +int +vflush(mp, skipvp, flags) + struct mount *mp; + struct vnode *skipvp; + int flags; +{ + struct proc *p = current_proc(); /* XXX */ + struct vnode *vp, *nvp; + int busy = 0; + + simple_lock(&mntvnode_slock); +loop: + for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) { + if (vp->v_mount != mp) + goto loop; + nvp = vp->v_mntvnodes.le_next; + /* + * Skip over a selected vnode. + */ + if (vp == skipvp) + continue; + + simple_lock(&vp->v_interlock); + /* + * Skip over a vnodes marked VSYSTEM. + */ + if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { + simple_unlock(&vp->v_interlock); + continue; + } + /* + * Skip over a vnodes marked VSWAP. + */ + if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) { + simple_unlock(&vp->v_interlock); + continue; + } + /* + * If WRITECLOSE is set, only flush out regular file + * vnodes open for writing. + */ + if ((flags & WRITECLOSE) && + (vp->v_writecount == 0 || vp->v_type != VREG)) { + simple_unlock(&vp->v_interlock); + continue; + } + /* + * With v_usecount == 0, all we need to do is clear + * out the vnode data structures and we are done. + */ + if (vp->v_usecount == 0) { + simple_unlock(&mntvnode_slock); + vgonel(vp, p); + simple_lock(&mntvnode_slock); + continue; + } + /* + * If FORCECLOSE is set, forcibly close the vnode. + * For block or character devices, revert to an + * anonymous device. For all other files, just kill them. + */ + if (flags & FORCECLOSE) { + simple_unlock(&mntvnode_slock); + if (vp->v_type != VBLK && vp->v_type != VCHR) { + vgonel(vp, p); + } else { + vclean(vp, 0, p); + vp->v_op = spec_vnodeop_p; + insmntque(vp, (struct mount *)0); + } + simple_lock(&mntvnode_slock); + continue; + } +#if DIAGNOSTIC + if (busyprt) + vprint("vflush: busy vnode", vp); +#endif + simple_unlock(&vp->v_interlock); + busy++; + } + simple_unlock(&mntvnode_slock); + if (busy) + return (EBUSY); + return (0); +} + +/* + * Disassociate the underlying file system from a vnode. + * The vnode interlock is held on entry. + */ +static void +vclean(vp, flags, p) + struct vnode *vp; + int flags; + struct proc *p; +{ + int active; + void *obj; + int removed = 0; + + /* + * if the vnode is not obtained by calling getnewvnode() we + * are not responsible for the cleanup. Just return. + */ + if (!(vp->v_flag & VSTANDARD)) { + simple_unlock(&vp->v_interlock); + return; + } + + /* + * Check to see if the vnode is in use. + * If so we have to reference it before we clean it out + * so that its count cannot fall to zero and generate a + * race against ourselves to recycle it. + */ + if (active = vp->v_usecount) + if (++vp->v_usecount <= 0) + panic("vclean: v_usecount"); + /* + * Prevent the vnode from being recycled or + * brought into use while we clean it out. + */ + if (vp->v_flag & VXLOCK) + panic("vclean: deadlock"); + vp->v_flag |= VXLOCK; + + /* + * Even if the count is zero, the VOP_INACTIVE routine may still + * have the object locked while it cleans it out. The VOP_LOCK + * ensures that the VOP_INACTIVE routine is done with its work. + * For active vnodes, it ensures that no other activity can + * occur while the underlying object is being cleaned out. + */ + VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p); + + /* + * if this vnode is on the inactive list + * take it off the list. + */ + if ((active == 1) && + (ISSET((vp)->v_flag, VUINACTIVE) && VONLIST(vp))) { + simple_lock(&vnode_free_list_slock); + VREMINACTIVE("vclean", vp); + simple_unlock(&vnode_free_list_slock); + removed++; + } + + /* Clean the pages in VM. */ + if ((active) && UBCINFOEXISTS(vp)) { + (void)ubc_clean(vp, 0); /* do not invalidate */ + } + + /* + * Clean out any buffers associated with the vnode. + */ + if (flags & DOCLOSE) { + if (vp->v_tag == VT_NFS) + nfs_vinvalbuf(vp, V_SAVE, NOCRED, p, 0); + else + vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); + } + /* + * If purging an active vnode, it must be closed and + * deactivated before being reclaimed. Note that the + * VOP_INACTIVE will unlock the vnode. + */ + if (active) { + if (flags & DOCLOSE) + VOP_CLOSE(vp, IO_NDELAY, NOCRED, p); + VOP_INACTIVE(vp, p); + } else { + /* + * Any other processes trying to obtain this lock must first + * wait for VXLOCK to clear, then call the new lock operation. + */ + VOP_UNLOCK(vp, 0, p); + } + /* + * Reclaim the vnode. + */ + if (VOP_RECLAIM(vp, p)) + panic("vclean: cannot reclaim"); + if (active) + vrele(vp); + cache_purge(vp); + if (vp->v_vnlock) { + if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0) + vprint("vclean: lock not drained", vp); + FREE_ZONE(vp->v_vnlock, sizeof (struct lock__bsd__), M_VNODE); + vp->v_vnlock = NULL; + } + + /* It's dead, Jim! */ + vp->v_op = dead_vnodeop_p; + vp->v_tag = VT_NON; + + /* + * v_data is reclaimed by VOP_RECLAIM, all the vnode + * operation generated by the code below would be directed + * to the deadfs + */ + if (UBCINFOEXISTS(vp)) { + /* vnode is dying, destroy the object */ + if (ubc_issetflags(vp, UI_HASOBJREF)) { + obj = ubc_getobject(vp, UBC_NOREACTIVATE); + if (obj == NULL) + panic("vclean: null object"); + if (ISSET(vp->v_flag, VTERMINATE)) + panic("vclean: already teminating"); + SET(vp->v_flag, VTERMINATE); + + ubc_clearflags(vp, UI_HASOBJREF); + memory_object_destroy(obj, 0); + + /* + * memory_object_destroy() is asynchronous with respect + * to vnode_pager_no_senders(). + * wait for vnode_pager_no_senders() to clear + * VTERMINATE + */ + while (ISSET(vp->v_flag, VTERMINATE)) { + SET(vp->v_flag, VTERMWANT); + tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vclean", 0); + } + if (UBCINFOEXISTS(vp)) { + ubc_info_free(vp); + vp->v_ubcinfo = UBC_NOINFO; /* catch bad accesses */ + } + } + } + + /* + * Done with purge, notify sleepers of the grim news. + */ + vp->v_flag &= ~VXLOCK; + if (vp->v_flag & VXWANT) { + vp->v_flag &= ~VXWANT; + wakeup((caddr_t)vp); + } +} + +/* + * Eliminate all activity associated with the requested vnode + * and with all vnodes aliased to the requested vnode. + */ +int +vop_revoke(ap) + struct vop_revoke_args /* { + struct vnode *a_vp; + int a_flags; + } */ *ap; +{ + struct vnode *vp, *vq; + struct proc *p = current_proc(); /* XXX */ + +#if DIAGNOSTIC + if ((ap->a_flags & REVOKEALL) == 0) + panic("vop_revoke"); +#endif + + vp = ap->a_vp; + simple_lock(&vp->v_interlock); + + if (vp->v_flag & VALIASED) { + /* + * If a vgone (or vclean) is already in progress, + * wait until it is done and return. + */ + if (vp->v_flag & VXLOCK) { + while (vp->v_flag & VXLOCK) { + vp->v_flag |= VXWANT; + simple_unlock(&vp->v_interlock); + tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0); + } + return (0); + } + /* + * Ensure that vp will not be vgone'd while we + * are eliminating its aliases. + */ + vp->v_flag |= VXLOCK; + simple_unlock(&vp->v_interlock); + while (vp->v_flag & VALIASED) { + simple_lock(&spechash_slock); + for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { + if (vq->v_rdev != vp->v_rdev || + vq->v_type != vp->v_type || vp == vq) + continue; + simple_unlock(&spechash_slock); + vgone(vq); + break; + } + if (vq == NULLVP) + simple_unlock(&spechash_slock); + } + /* + * Remove the lock so that vgone below will + * really eliminate the vnode after which time + * vgone will awaken any sleepers. + */ + simple_lock(&vp->v_interlock); + vp->v_flag &= ~VXLOCK; + } + vgonel(vp, p); + return (0); +} + +/* + * Recycle an unused vnode to the front of the free list. + * Release the passed interlock if the vnode will be recycled. + */ +int +vrecycle(vp, inter_lkp, p) + struct vnode *vp; + struct slock *inter_lkp; + struct proc *p; +{ + + simple_lock(&vp->v_interlock); + if (vp->v_usecount == 0) { + if (inter_lkp) + simple_unlock(inter_lkp); + vgonel(vp, p); + return (1); + } + simple_unlock(&vp->v_interlock); + return (0); +} + +/* + * Eliminate all activity associated with a vnode + * in preparation for reuse. + */ +void +vgone(vp) + struct vnode *vp; +{ + struct proc *p = current_proc(); /* XXX */ + + simple_lock(&vp->v_interlock); + vgonel(vp, p); +} + +/* + * vgone, with the vp interlock held. + */ +void +vgonel(vp, p) + struct vnode *vp; + struct proc *p; +{ + struct vnode *vq; + struct vnode *vx; + + /* + * if the vnode is not obtained by calling getnewvnode() we + * are not responsible for the cleanup. Just return. + */ + if (!(vp->v_flag & VSTANDARD)) { + simple_unlock(&vp->v_interlock); + return; + } + + /* + * If a vgone (or vclean) is already in progress, + * wait until it is done and return. + */ + if (vp->v_flag & VXLOCK) { + while (vp->v_flag & VXLOCK) { + vp->v_flag |= VXWANT; + simple_unlock(&vp->v_interlock); + tsleep((caddr_t)vp, PINOD, "vgone", 0); + } + return; + } + /* + * Clean out the filesystem specific data. + */ + vclean(vp, DOCLOSE, p); + /* + * Delete from old mount point vnode list, if on one. + */ + if (vp->v_mount != NULL) + insmntque(vp, (struct mount *)0); + /* + * If special device, remove it from special device alias list + * if it is on one. + */ + if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { + simple_lock(&spechash_slock); + if (*vp->v_hashchain == vp) { + *vp->v_hashchain = vp->v_specnext; + } else { + for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { + if (vq->v_specnext != vp) + continue; + vq->v_specnext = vp->v_specnext; + break; + } + if (vq == NULL) + panic("missing bdev"); + } + if (vp->v_flag & VALIASED) { + vx = NULL; + for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { + if (vq->v_rdev != vp->v_rdev || + vq->v_type != vp->v_type) + continue; + if (vx) + break; + vx = vq; + } + if (vx == NULL) + panic("missing alias"); + if (vq == NULL) + vx->v_flag &= ~VALIASED; + vp->v_flag &= ~VALIASED; + } + simple_unlock(&spechash_slock); + FREE_ZONE(vp->v_specinfo, sizeof (struct specinfo), M_VNODE); + vp->v_specinfo = NULL; + } + /* + * If it is on the freelist and not already at the head, + * move it to the head of the list. The test of the back + * pointer and the reference count of zero is because + * it will be removed from the free list by getnewvnode, + * but will not have its reference count incremented until + * after calling vgone. If the reference count were + * incremented first, vgone would (incorrectly) try to + * close the previous instance of the underlying object. + * So, the back pointer is explicitly set to `0xdeadb' in + * getnewvnode after removing it from the freelist to ensure + * that we do not try to move it here. + */ + if (vp->v_usecount == 0) { + simple_lock(&vnode_free_list_slock); + if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) && + vnode_free_list.tqh_first != vp) { + TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); + TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); + } + simple_unlock(&vnode_free_list_slock); + } + vp->v_type = VBAD; +} + +/* + * Lookup a vnode by device number. + */ +int +vfinddev(dev, type, vpp) + dev_t dev; + enum vtype type; + struct vnode **vpp; +{ + struct vnode *vp; + int rc = 0; + + simple_lock(&spechash_slock); + for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { + if (dev != vp->v_rdev || type != vp->v_type) + continue; + *vpp = vp; + rc = 1; + break; + } + simple_unlock(&spechash_slock); + return (rc); +} + +/* + * Calculate the total number of references to a special device. + */ +int +vcount(vp) + struct vnode *vp; +{ + struct vnode *vq, *vnext; + int count; + +loop: + if ((vp->v_flag & VALIASED) == 0) + return (vp->v_usecount); + simple_lock(&spechash_slock); + for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) { + vnext = vq->v_specnext; + if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type) + continue; + /* + * Alias, but not in use, so flush it out. + */ + if (vq->v_usecount == 0 && vq != vp) { + simple_unlock(&spechash_slock); + vgone(vq); + goto loop; + } + count += vq->v_usecount; + } + simple_unlock(&spechash_slock); + return (count); +} + +int prtactive = 0; /* 1 => print out reclaim of active vnodes */ + +/* + * Print out a description of a vnode. + */ +static char *typename[] = + { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; + +void +vprint(label, vp) + char *label; + register struct vnode *vp; +{ + char buf[64]; + + if (label != NULL) + printf("%s: ", label); + printf("type %s, usecount %d, writecount %d, refcount %d,", + typename[vp->v_type], vp->v_usecount, vp->v_writecount, + vp->v_holdcnt); + buf[0] = '\0'; + if (vp->v_flag & VROOT) + strcat(buf, "|VROOT"); + if (vp->v_flag & VTEXT) + strcat(buf, "|VTEXT"); + if (vp->v_flag & VSYSTEM) + strcat(buf, "|VSYSTEM"); + if (vp->v_flag & VXLOCK) + strcat(buf, "|VXLOCK"); + if (vp->v_flag & VXWANT) + strcat(buf, "|VXWANT"); + if (vp->v_flag & VBWAIT) + strcat(buf, "|VBWAIT"); + if (vp->v_flag & VALIASED) + strcat(buf, "|VALIASED"); + if (buf[0] != '\0') + printf(" flags (%s)", &buf[1]); + if (vp->v_data == NULL) { + printf("\n"); + } else { + printf("\n\t"); + VOP_PRINT(vp); + } +} + +#ifdef DEBUG +/* + * List all of the locked vnodes in the system. + * Called when debugging the kernel. + */ +void +printlockedvnodes() +{ + struct proc *p = current_proc(); /* XXX */ + struct mount *mp, *nmp; + struct vnode *vp; + + printf("Locked vnodes\n"); + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { + nmp = mp->mnt_list.cqe_next; + continue; + } + for (vp = mp->mnt_vnodelist.lh_first; + vp != NULL; + vp = vp->v_mntvnodes.le_next) { + if (VOP_ISLOCKED(vp)) + vprint((char *)0, vp); + } + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + } + simple_unlock(&mountlist_slock); +} +#endif + +/* + * Top level filesystem related information gathering. + */ +int +vfs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p) + int *name; + u_int namelen; + void *oldp; + size_t *oldlenp; + void *newp; + size_t newlen; + struct proc *p; +{ + struct ctldebug *cdp; + struct vfsconf *vfsp; + + if (name[0] == VFS_NUMMNTOPS) { + extern unsigned int vfs_nummntops; + return (sysctl_rdint(oldp, oldlenp, newp, vfs_nummntops)); + } + + /* all sysctl names at this level are at least name and field */ + if (namelen < 2) + return (ENOTDIR); /* overloaded */ + if (name[0] != VFS_GENERIC) { + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (vfsp->vfc_typenum == name[0]) + break; + if (vfsp == NULL) + return (EOPNOTSUPP); + return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1, + oldp, oldlenp, newp, newlen, p)); + } + switch (name[1]) { + case VFS_MAXTYPENUM: + return (sysctl_rdint(oldp, oldlenp, newp, maxvfsconf)); + case VFS_CONF: + if (namelen < 3) + return (ENOTDIR); /* overloaded */ + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (vfsp->vfc_typenum == name[2]) + break; + if (vfsp == NULL) + return (EOPNOTSUPP); + return (sysctl_rdstruct(oldp, oldlenp, newp, vfsp, + sizeof(struct vfsconf))); + } + return (EOPNOTSUPP); +} + +int kinfo_vdebug = 1; +#define KINFO_VNODESLOP 10 +/* + * Dump vnode list (via sysctl). + * Copyout address of vnode followed by vnode. + */ +/* ARGSUSED */ +int +sysctl_vnode(where, sizep, p) + char *where; + size_t *sizep; + struct proc *p; +{ + struct mount *mp, *nmp; + struct vnode *nvp, *vp; + char *bp = where, *savebp; + char *ewhere; + int error; + +#define VPTRSZ sizeof (struct vnode *) +#define VNODESZ sizeof (struct vnode) + if (where == NULL) { + *sizep = (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ); + return (0); + } + ewhere = where + *sizep; + + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { + nmp = mp->mnt_list.cqe_next; + continue; + } + savebp = bp; +again: + simple_lock(&mntvnode_slock); + for (vp = mp->mnt_vnodelist.lh_first; + vp != NULL; + vp = nvp) { + /* + * Check that the vp is still associated with + * this filesystem. RACE: could have been + * recycled onto the same filesystem. + */ + if (vp->v_mount != mp) { + simple_unlock(&mntvnode_slock); + if (kinfo_vdebug) + printf("kinfo: vp changed\n"); + bp = savebp; + goto again; + } + nvp = vp->v_mntvnodes.le_next; + if (bp + VPTRSZ + VNODESZ > ewhere) { + simple_unlock(&mntvnode_slock); + *sizep = bp - where; + return (ENOMEM); + } + simple_unlock(&mntvnode_slock); + if ((error = copyout((caddr_t)&vp, bp, VPTRSZ)) || + (error = copyout((caddr_t)vp, bp + VPTRSZ, VNODESZ))) + return (error); + bp += VPTRSZ + VNODESZ; + simple_lock(&mntvnode_slock); + } + simple_unlock(&mntvnode_slock); + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + } + simple_unlock(&mountlist_slock); + + *sizep = bp - where; + return (0); +} + +/* + * Check to see if a filesystem is mounted on a block device. + */ +int +vfs_mountedon(vp) + struct vnode *vp; +{ + struct vnode *vq; + int error = 0; + + if (vp->v_specflags & SI_MOUNTEDON) + return (EBUSY); + if (vp->v_flag & VALIASED) { + simple_lock(&spechash_slock); + for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { + if (vq->v_rdev != vp->v_rdev || + vq->v_type != vp->v_type) + continue; + if (vq->v_specflags & SI_MOUNTEDON) { + error = EBUSY; + break; + } + } + simple_unlock(&spechash_slock); + } + return (error); +} + +/* + * Unmount all filesystems. The list is traversed in reverse order + * of mounting to avoid dependencies. + */ +void +vfs_unmountall() +{ + struct mount *mp, *nmp; + struct proc *p = current_proc(); /* XXX */ + + /* + * Since this only runs when rebooting, it is not interlocked. + */ + for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) { + nmp = mp->mnt_list.cqe_prev; + (void) dounmount(mp, MNT_FORCE, p); + } +} + +/* + * Build hash lists of net addresses and hang them off the mount point. + * Called by ufs_mount() to set up the lists of export addresses. + */ +static int +vfs_hang_addrlist(mp, nep, argp) + struct mount *mp; + struct netexport *nep; + struct export_args *argp; +{ + register struct netcred *np; + register struct radix_node_head *rnh; + register int i; + struct radix_node *rn; + struct sockaddr *saddr, *smask = 0; + struct domain *dom; + int error; + + if (argp->ex_addrlen == 0) { + if (mp->mnt_flag & MNT_DEFEXPORTED) + return (EPERM); + np = &nep->ne_defexported; + np->netc_exflags = argp->ex_flags; + np->netc_anon = argp->ex_anon; + np->netc_anon.cr_ref = 1; + mp->mnt_flag |= MNT_DEFEXPORTED; + return (0); + } + i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen; + MALLOC(np, struct netcred *, i, M_NETADDR, M_WAITOK); + bzero((caddr_t)np, i); + saddr = (struct sockaddr *)(np + 1); + if (error = copyin(argp->ex_addr, (caddr_t)saddr, argp->ex_addrlen)) + goto out; + if (saddr->sa_len > argp->ex_addrlen) + saddr->sa_len = argp->ex_addrlen; + if (argp->ex_masklen) { + smask = (struct sockaddr *)((caddr_t)saddr + argp->ex_addrlen); + error = copyin(argp->ex_addr, (caddr_t)smask, argp->ex_masklen); + if (error) + goto out; + if (smask->sa_len > argp->ex_masklen) + smask->sa_len = argp->ex_masklen; + } + i = saddr->sa_family; + if ((rnh = nep->ne_rtable[i]) == 0) { + /* + * Seems silly to initialize every AF when most are not + * used, do so on demand here + */ + for (dom = domains; dom; dom = dom->dom_next) + if (dom->dom_family == i && dom->dom_rtattach) { + dom->dom_rtattach((void **)&nep->ne_rtable[i], + dom->dom_rtoffset); + break; + } + if ((rnh = nep->ne_rtable[i]) == 0) { + error = ENOBUFS; + goto out; + } + } + rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, + np->netc_rnodes); + if (rn == 0) { + /* + * One of the reasons that rnh_addaddr may fail is that + * the entry already exists. To check for this case, we + * look up the entry to see if it is there. If so, we + * do not need to make a new entry but do return success. + */ + _FREE(np, M_NETADDR); + rn = (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh); + if (rn != 0 && (rn->rn_flags & RNF_ROOT) == 0 && + ((struct netcred *)rn)->netc_exflags == argp->ex_flags && + !bcmp((caddr_t)&((struct netcred *)rn)->netc_anon, + (caddr_t)&argp->ex_anon, sizeof(struct ucred))) + return (0); + return (EPERM); + } + np->netc_exflags = argp->ex_flags; + np->netc_anon = argp->ex_anon; + np->netc_anon.cr_ref = 1; + return (0); +out: + _FREE(np, M_NETADDR); + return (error); +} + +/* ARGSUSED */ +static int +vfs_free_netcred(rn, w) + struct radix_node *rn; + caddr_t w; +{ + register struct radix_node_head *rnh = (struct radix_node_head *)w; + + (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); + _FREE((caddr_t)rn, M_NETADDR); + return (0); +} + +/* + * Free the net address hash lists that are hanging off the mount points. + */ +static void +vfs_free_addrlist(nep) + struct netexport *nep; +{ + register int i; + register struct radix_node_head *rnh; + + for (i = 0; i <= AF_MAX; i++) + if (rnh = nep->ne_rtable[i]) { + (*rnh->rnh_walktree)(rnh, vfs_free_netcred, + (caddr_t)rnh); + _FREE((caddr_t)rnh, M_RTABLE); + nep->ne_rtable[i] = 0; + } +} + +int +vfs_export(mp, nep, argp) + struct mount *mp; + struct netexport *nep; + struct export_args *argp; +{ + int error; + + if (argp->ex_flags & MNT_DELEXPORT) { + vfs_free_addrlist(nep); + mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED); + } + if (argp->ex_flags & MNT_EXPORTED) { + if (error = vfs_hang_addrlist(mp, nep, argp)) + return (error); + mp->mnt_flag |= MNT_EXPORTED; + } + return (0); +} + +struct netcred * +vfs_export_lookup(mp, nep, nam) + register struct mount *mp; + struct netexport *nep; + struct mbuf *nam; +{ + register struct netcred *np; + register struct radix_node_head *rnh; + struct sockaddr *saddr; + + np = NULL; + if (mp->mnt_flag & MNT_EXPORTED) { + /* + * Lookup in the export list first. + */ + if (nam != NULL) { + saddr = mtod(nam, struct sockaddr *); + rnh = nep->ne_rtable[saddr->sa_family]; + if (rnh != NULL) { + np = (struct netcred *) + (*rnh->rnh_matchaddr)((caddr_t)saddr, + rnh); + if (np && np->netc_rnodes->rn_flags & RNF_ROOT) + np = NULL; + } + } + /* + * If no address match, use the default if it exists. + */ + if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED) + np = &nep->ne_defexported; + } + return (np); +} + +/* + * try to reclaim vnodes from the memory + * object cache + */ +int +vm_object_cache_reclaim(int count) +{ + int cnt; + void vnode_pager_release_from_cache(int *); + + /* attempt to reclaim vnodes from VM object cache */ + cnt = count; + vnode_pager_release_from_cache(&cnt); + return(cnt); +} + +/* + * Release memory object reference held by inactive vnodes + * and then try to reclaim some vnodes from the memory + * object cache + */ +int +vnreclaim(int count) +{ + int cnt, i, loopcnt; + void *obj; + struct vnode *vp; + int err; + struct proc *p; + + i = 0; + loopcnt = 0; + + /* Try to release "count" vnodes from the inactive list */ +restart: + if (++loopcnt > inactivevnodes) { + /* + * I did my best trying to reclaim the vnodes. + * Do not try any more as that would only lead to + * long latencies. Also in the worst case + * this can get totally CPU bound. + * Just fall though and attempt a reclaim of VM + * object cache + */ + goto out; + } + + simple_lock(&vnode_free_list_slock); + for (vp = TAILQ_FIRST(&vnode_inactive_list); + (vp != NULLVP) && (i < count); + vp = TAILQ_NEXT(vp, v_freelist)) { + + if (simple_lock_try(&vp->v_interlock)) { + if (vp->v_usecount != 1) + panic("vnreclaim: v_usecount"); + + if(!UBCINFOEXISTS(vp)) { + if (vp->v_type == VBAD) { + VREMINACTIVE("vnreclaim", vp); + simple_unlock(&vp->v_interlock); + continue; + } else + panic("non UBC vnode on inactive list"); + /* Should not reach here */ + } + + /* If vnode is already being reclaimed, wait */ + if ((vp->v_flag & VXLOCK) || (vp->v_flag & VORECLAIM)) { + vp->v_flag |= VXWANT; + simple_unlock(&vp->v_interlock); + simple_unlock(&vnode_free_list_slock); + (void)tsleep((caddr_t)vp, PINOD, "vocr", 0); + goto restart; + } + + VREMINACTIVE("vnreclaim", vp); + simple_unlock(&vnode_free_list_slock); + + /* held vnodes must not be reclaimed */ + if (vp->v_ubcinfo->ui_holdcnt) { /* XXX */ + vinactive(vp); + simple_unlock(&vp->v_interlock); + goto restart; + } + + if (ubc_issetflags(vp, UI_WASMAPPED)) { + /* + * We should not reclaim as it is likely + * to be in use. Let it die a natural death. + * Release the UBC reference if one exists + * and put it back at the tail. + */ + if (ubc_issetflags(vp, UI_HASOBJREF)) { + obj = ubc_getobject(vp, UBC_NOREACTIVATE); + if (obj == NULL) + panic("vnreclaim: null object"); + /* release the reference gained by ubc_info_init() */ + ubc_clearflags(vp, UI_HASOBJREF); + simple_unlock(&vp->v_interlock); + vm_object_deallocate(obj); + /* + * The vnode interlock was release. + * vm_object_deallocate() might have blocked. + * It is possible that the object was terminated. + * It is also possible that the vnode was + * reactivated. Evaluate the state again. + */ + if (UBCINFOEXISTS(vp)) { + simple_lock(&vp->v_interlock); + if ((vp->v_usecount == 1) && !VONLIST(vp)) + vinactive(vp); + simple_unlock(&vp->v_interlock); + } + } else { + vinactive(vp); + simple_unlock(&vp->v_interlock); + } + } else { + VORECLAIM_ENABLE(vp); + + /* + * scrub the dirty pages and invalidate the buffers + */ + p = current_proc(); + err = vn_lock(vp, LK_EXCLUSIVE|LK_INTERLOCK, p); + if (err) { + /* cannot reclaim */ + simple_lock(&vp->v_interlock); + vinactive(vp); + VORECLAIM_DISABLE(vp); + simple_unlock(&vp->v_interlock); + goto restart; + } + simple_lock(&vp->v_interlock); + if(vp->v_usecount != 1) + panic("VOCR: usecount race"); + simple_unlock(&vp->v_interlock); + + /* + * If the UBC reference on the memory object + * was already lost, regain it. This will + * keep the memory object alive for rest of the + * reclaim and finally this reference would + * be lost by memory_object_destroy() + */ + obj = ubc_getobject(vp, (UBC_NOREACTIVATE|UBC_HOLDOBJECT)); + if (obj == (void *)NULL) + panic("vnreclaim: null object"); + + /* clean up the state in VM without invalidating */ + (void)ubc_clean(vp, 0); + + /* flush and invalidate buffers associated with the vnode */ + if (vp->v_tag == VT_NFS) + nfs_vinvalbuf(vp, V_SAVE, NOCRED, p, 0); + else + vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0); + + /* + * It is not necessary to call ubc_uncache() + * here because memory_object_destroy() marks + * the memory object non cachable already + * + * Need to release the vnode lock before calling + * vm_object_deallocate() to avoid deadlock + * when the vnode goes through vop_inactive + * + * Note: for the v_usecount == 1 case, VOP_INACTIVE + * has not yet been called. Call it now while vp is + * still locked, it will also release the lock. + */ + if (vp->v_usecount == 1) + VOP_INACTIVE(vp, p); + else + VOP_UNLOCK(vp, 0, p); + + /* + * This vnode is ready to be reclaimed. + * Terminate the memory object. + * memory_object_destroy() will result in + * vnode_pager_no_senders(). + * That will release the pager reference + * and the vnode will move to the free list. + */ + if (ISSET(vp->v_flag, VTERMINATE)) + panic("vnreclaim: already teminating"); + SET(vp->v_flag, VTERMINATE); + + memory_object_destroy(obj, 0); + + /* + * memory_object_destroy() is asynchronous with respect + * to vnode_pager_no_senders(). + * wait for vnode_pager_no_senders() to clear + * VTERMINATE + */ + while (ISSET(vp->v_flag, VTERMINATE)) { + SET(vp->v_flag, VTERMWANT); + tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vnreclaim", 0); + } + simple_lock(&vp->v_interlock); + VORECLAIM_DISABLE(vp); + i++; + simple_unlock(&vp->v_interlock); + } + /* inactive list lock was released, must restart */ + goto restart; + } + } + simple_unlock(&vnode_free_list_slock); + + vnode_reclaim_tried += i; +out: + i = vm_object_cache_reclaim(count); + vnode_objects_reclaimed += i; + + return(i); +} + +/* + * This routine is called from vnode_pager_no_senders() + * which in turn can be called with vnode locked by vnode_uncache() + * But it could also get called as a result of vm_object_cache_trim(). + * In that case lock state is unknown. + * AGE the vnode so that it gets recycled quickly. + * Check lock status to decide whether to call vput() or vrele(). + */ +void +vnode_pager_vrele(struct vnode *vp) +{ + + boolean_t funnel_state; + int isvnreclaim = 1; + + if (vp == (struct vnode *) NULL) + panic("vnode_pager_vrele: null vp"); + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + /* Mark the vnode to be recycled */ + vagevp(vp); + + simple_lock(&vp->v_interlock); + /* + * If a vgone (or vclean) is already in progress, + * Do not bother with the ubc_info cleanup. + * Let the vclean deal with it. + */ + if (vp->v_flag & VXLOCK) { + CLR(vp->v_flag, VTERMINATE); + if (ISSET(vp->v_flag, VTERMWANT)) { + CLR(vp->v_flag, VTERMWANT); + wakeup((caddr_t)&vp->v_ubcinfo); + } + simple_unlock(&vp->v_interlock); + vrele(vp); + (void) thread_funnel_set(kernel_flock, funnel_state); + return; + } + + /* It's dead, Jim! */ + if (!ISSET(vp->v_flag, VORECLAIM)) { + /* + * called as a result of eviction of the memory + * object from the memory object cache + */ + isvnreclaim = 0; + + /* So serialize vnode operations */ + VORECLAIM_ENABLE(vp); + } + if (!ISSET(vp->v_flag, VTERMINATE)) + SET(vp->v_flag, VTERMINATE); + if (UBCINFOEXISTS(vp)) { + if (ubc_issetflags(vp, UI_WASMAPPED)) + SET(vp->v_flag, VWASMAPPED); + + if ((vp->v_ubcinfo->ui_holdcnt) /* XXX */ + && !(vp->v_flag & VXLOCK)) + panic("vnode_pager_vrele: freeing held ubc_info"); + + simple_unlock(&vp->v_interlock); + ubc_info_free(vp); + vp->v_ubcinfo = UBC_NOINFO; /* catch bad accesses */ + } else { + if ((vp->v_type == VBAD) && ((vp)->v_ubcinfo != UBC_INFO_NULL) + && ((vp)->v_ubcinfo != UBC_NOINFO)) { + simple_unlock(&vp->v_interlock); + ubc_info_free(vp); + vp->v_ubcinfo = UBC_NOINFO; /* catch bad accesses */ + } else { + simple_unlock(&vp->v_interlock); + } + } + + CLR(vp->v_flag, VTERMINATE); + + if (vp->v_type != VBAD){ + vgone(vp); /* revoke the vnode */ + vrele(vp); /* and drop the reference */ + } else + vrele(vp); + + if (ISSET(vp->v_flag, VTERMWANT)) { + CLR(vp->v_flag, VTERMWANT); + wakeup((caddr_t)&vp->v_ubcinfo); + } + if (!isvnreclaim) + VORECLAIM_DISABLE(vp); + (void) thread_funnel_set(kernel_flock, funnel_state); + return; +} + + +#if DIAGNOSTIC +int walk_vnodes_debug=0; + +void +walk_allvnodes() +{ + struct proc *p = current_proc(); /* XXX */ + struct mount *mp, *nmp; + struct vnode *vp; + int cnt = 0; + + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + for (vp = mp->mnt_vnodelist.lh_first; + vp != NULL; + vp = vp->v_mntvnodes.le_next) { + if (vp->v_usecount < 0){ + if(walk_vnodes_debug) { + printf("vp is %x\n",vp); + } + } + } + nmp = mp->mnt_list.cqe_next; + } + for (cnt = 0, vp = vnode_free_list.tqh_first; + vp != NULLVP; cnt++, vp = vp->v_freelist.tqe_next) { + if ((vp->v_usecount < 0) && walk_vnodes_debug) { + if(walk_vnodes_debug) { + printf("vp is %x\n",vp); + } + } + } + printf("%d - free\n", cnt); + + for (cnt = 0, vp = vnode_inactive_list.tqh_first; + vp != NULLVP; cnt++, vp = vp->v_freelist.tqe_next) { + if ((vp->v_usecount < 0) && walk_vnodes_debug) { + if(walk_vnodes_debug) { + printf("vp is %x\n",vp); + } + } + } + printf("%d - inactive\n", cnt); +} +#endif /* DIAGNOSTIC */ diff --git a/bsd/vfs/vfs_support.c b/bsd/vfs/vfs_support.c new file mode 100644 index 000000000..24a1fa77a --- /dev/null +++ b/bsd/vfs/vfs_support.c @@ -0,0 +1,1332 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer, Inc. All rights reserved. + * + * File: vfs/vfs_support.c + * + * The default VFS routines. A VFS plugin can use these + * functions in case it does not want to implement all. These functions + * take care of releasing locks and free up memory that they are + * supposed to. + * + * nop_* routines always return 0 [success] + * err_* routines always return EOPNOTSUPP + * + * This file could be auto-generated from vnode_if.src. but that needs + * support for freeing cnp. + * + * HISTORY + * 15-Jul-1998 Earsh Nandkeshwar (earsh@apple.com) + * Fixed up readdirattr for its existance now. + * 18-Aug-1998 Umesh Vaishampayan (umeshv@apple.com) + * Created. + */ + +#include + + +struct vop_create_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */; + +int +nop_create(struct vop_create_args *ap) +{ +#if DIAGNOSTIC + if ((ap->a_cnp->cn_flags & HASBUF) == 0) + panic("nop_create: no name"); +#endif + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + return (0); +} + +int +err_create(struct vop_create_args *ap) +{ + (void)nop_create(ap); + return (EOPNOTSUPP); +} + + +struct vop_whiteout_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; + int a_flags; +} */; + +int +nop_whiteout(struct vop_whiteout_args *ap) +{ + return (0); +} + +int +err_whiteout(struct vop_whiteout_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_mknod_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */; + +int +nop_mknod(struct vop_mknod_args *ap) +{ +#if DIAGNOSTIC + if ((ap->a_cnp->cn_flags & HASBUF) == 0) + panic("nop_mknod: no name"); +#endif + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + return (0); +} + +int +err_mknod(struct vop_mknod_args *ap) +{ + (void)nop_mknod(ap); + return (EOPNOTSUPP); +} + + +struct vop_mkcomplex_args /* { + struct vnode *a_dvp, + struct vnode **a_vpp, + struct componentname *a_cnp, + struct vattr *a_vap, + u_long a_type) +} */; + +int +nop_mkcomplex(struct vop_mkcomplex_args *ap) +{ +#if DIAGNOSTIC + if ((ap->a_cnp->cn_flags & HASBUF) == 0) + panic("nop_mkcomplex: no name"); +#endif + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + return (0); +} + +int +err_mkcomplex(struct vop_mkcomplex_args *ap) +{ + (void)nop_mkcomplex(ap); + return (EOPNOTSUPP); +} + + +struct vop_open_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_open(struct vop_open_args *ap) +{ + return (0); +} + +int +err_open(struct vop_open_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_close_args /* { + struct vnode *a_vp; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_close(struct vop_close_args *ap) +{ + return (0); +} + +int +err_close(struct vop_close_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_access_args /* { + struct vnode *a_vp; + int a_mode; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_access(struct vop_access_args *ap) +{ + return (0); +} + +int +err_access(struct vop_access_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_getattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_getattr(struct vop_getattr_args *ap) +{ + return (0); +} + +int +err_getattr(struct vop_getattr_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_setattr_args /* { + struct vnode *a_vp; + struct vattr *a_vap; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_setattr(struct vop_setattr_args *ap) +{ + return (0); +} + +int +err_setattr(struct vop_setattr_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_getattrlist_args /* { + struct vnode *a_vp; + struct attrlist *a_alist; + struct uio *a_uio; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_getattrlist(struct vop_getattrlist_args *ap) +{ + return (0); +} + +int +err_getattrlist(struct vop_getattrlist_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_setattrlist_args /* { + struct vnode *a_vp; + struct attrlist *a_alist; + struct uio *a_uio; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_setattrlist(struct vop_setattrlist_args *ap) +{ + return (0); +} + +int +err_setattrlist(struct vop_setattrlist_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_read_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; +} */; + +int +nop_read(struct vop_read_args *ap) +{ + return (0); +} + +int +err_read(struct vop_read_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_write_args /* { + struct vnode *a_vp; + struct uio *a_uio; + int a_ioflag; + struct ucred *a_cred; +} */; + +int +nop_write(struct vop_write_args *ap) +{ + return (0); +} + +int +err_write(struct vop_write_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_lease_args /* { + struct vnode *a_vp; + struct proc *a_p; + struct ucred *a_cred; + int a_flag; +} */; + +int +nop_lease(struct vop_lease_args *ap) +{ + return (0); +} + +int +err_lease(struct vop_lease_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_ioctl_args /* { + struct vnode *a_vp; + u_long a_command; + caddr_t a_data; + int a_fflag; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_ioctl(struct vop_ioctl_args *ap) +{ + return (0); +} + +int +err_ioctl(struct vop_ioctl_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_select_args /* { + struct vnode *a_vp; + int a_which; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_select(struct vop_select_args *ap) +{ + return (0); +} + +int +err_select(struct vop_select_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_exchange_args /* { + struct vnode *a_fvp; + struct vnode *a_tvp; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_exchange(struct vop_exchange_args *ap) +{ + return (0); +} + +int +err_exchange(struct vop_exchange_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_revoke_args /* { + struct vnode *a_vp; + int a_flags; +} */; + +int +nop_revoke(struct vop_revoke_args *ap) +{ + return (vop_revoke(ap)); +} + +int +err_revoke(struct vop_revoke_args *ap) +{ + (void)nop_revoke(ap); + return (EOPNOTSUPP); +} + + +struct vop_mmap_args /* { + struct vnode *a_vp; + int a_fflags; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_mmap(struct vop_mmap_args *ap) +{ + return (0); +} + +int +err_mmap(struct vop_mmap_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_fsync_args /* { + struct vnode *a_vp; + struct ucred *a_cred; + int a_waitfor; + struct proc *a_p; +} */; + +int +nop_fsync(struct vop_fsync_args *ap) +{ + return (0); +} + +int +err_fsync(struct vop_fsync_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_seek_args /* { + struct vnode *a_vp; + off_t a_oldoff; + off_t a_newoff; + struct ucred *a_cred; +} */; + +int +nop_seek(struct vop_seek_args *ap) +{ + return (0); +} + +int +err_seek(struct vop_seek_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_remove_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +} */; + +int +nop_remove(struct vop_remove_args *ap) +{ + if (ap->a_dvp == ap->a_vp) + vrele(ap->a_vp); + else + vput(ap->a_vp); + vput(ap->a_dvp); + return (0); +} + +int +err_remove(struct vop_remove_args *ap) +{ + (void)nop_remove(ap); + return (EOPNOTSUPP); +} + + +struct vop_link_args /* { + struct vnode *a_vp; + struct vnode *a_tdvp; + struct componentname *a_cnp; +} */; + +int +nop_link(struct vop_link_args *ap) +{ +#if DIAGNOSTIC + if ((ap->a_cnp->cn_flags & HASBUF) == 0) + panic("nop_link: no name"); +#endif + VOP_ABORTOP(ap->a_tdvp, ap->a_cnp); + vput(ap->a_tdvp); + return (0); +} + +int +err_link(struct vop_link_args *ap) +{ + (void)nop_link(ap); + return (EOPNOTSUPP); +} + + +struct vop_rename_args /* { + struct vnode *a_fdvp; + struct vnode *a_fvp; + struct componentname *a_fcnp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; +} */; + +int +nop_rename(struct vop_rename_args *ap) +{ +#if DIAGNOSTIC + if ((ap->a_tcnp->cn_flags & HASBUF) == 0 || + (ap->a_fcnp->cn_flags & HASBUF) == 0) + panic("nop_rename: no name"); +#endif + VOP_ABORTOP(ap->a_tdvp, ap->a_tcnp); + if (ap->a_tdvp == ap->a_tvp) + vrele(ap->a_tdvp); + else + vput(ap->a_tdvp); + if (ap->a_tvp) + vput(ap->a_tvp); + VOP_ABORTOP(ap->a_fdvp, ap->a_fcnp); + vrele(ap->a_fdvp); + vrele(ap->a_fvp); + return (0); +} + +int +err_rename(struct vop_rename_args *ap) +{ + (void)nop_rename(ap); + return (EOPNOTSUPP); +} + + +struct vop_mkdir_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; +} */; + +int +nop_mkdir(struct vop_mkdir_args *ap) +{ +#if DIAGNOSTIC + if ((ap->a_cnp->cn_flags & HASBUF) == 0) + panic("nop_mkdir: no name"); +#endif + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + return (0); +} + +int +err_mkdir(struct vop_mkdir_args *ap) +{ + (void)nop_mkdir(ap); + return (EOPNOTSUPP); +} + + +struct vop_rmdir_args /* { + struct vnode *a_dvp; + struct vnode *a_vp; + struct componentname *a_cnp; +} */; + +int +nop_rmdir(struct vop_rmdir_args *ap) +{ + vput(ap->a_dvp); + vput(ap->a_vp); + return (0); +} + +int +err_rmdir(struct vop_rmdir_args *ap) +{ + (void)nop_rmdir(ap); + return (EOPNOTSUPP); +} + + +struct vop_symlink_args /* { + struct vnode *a_dvp; + struct vnode **a_vpp; + struct componentname *a_cnp; + struct vattr *a_vap; + char *a_target; +} */; + +int +nop_symlink(struct vop_symlink_args *ap) +{ +#if DIAGNOSTIC + if ((ap->a_cnp->cn_flags & HASBUF) == 0) + panic("nop_symlink: no name"); +#endif + VOP_ABORTOP(ap->a_dvp, ap->a_cnp); + vput(ap->a_dvp); + return (0); +} + +int +err_symlink(struct vop_symlink_args *ap) +{ + (void)nop_symlink(ap); + return (EOPNOTSUPP); +} + + +struct vop_readdir_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + int *a_eofflag; + int *a_ncookies; + u_long **a_cookies; +} */; + +int +nop_readdir(struct vop_readdir_args *ap) +{ + return (0); +} + +int +err_readdir(struct vop_readdir_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_readdirattr_args /* { + struct vnode *a_vp; + struct attrlist *a_alist; + struct uio *a_uio; + u_long a_maxcount; + u_long a_options; + int *a_newstate; + int *a_eofflag; + u_long *a_actualcount; + u_long **a_cookies; + struct ucred *a_cred; +} */; + +int +nop_readdirattr(struct vop_readdirattr_args *ap) +{ + *(ap->a_actualcount) = 0; + *(ap->a_eofflag) = 0; + return (0); +} + +int +err_readdirattr(struct vop_readdirattr_args *ap) +{ + (void)nop_readdirattr(ap); + return (EOPNOTSUPP); +} + + +struct vop_readlink_args /* { + struct vnode *vp; + struct uio *uio; + struct ucred *cred; +} */; + +int +nop_readlink(struct vop_readlink_args *ap) +{ + return (0); +} + +int +err_readlink(struct vop_readlink_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_abortop_args /* { + struct vnode *a_dvp; + struct componentname *a_cnp; +} */; + +int +nop_abortop(struct vop_abortop_args *ap) +{ + if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) + FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI); + return (0); +} + +int +err_abortop(struct vop_abortop_args *ap) +{ + (void)nop_abortop(ap); + return (EOPNOTSUPP); +} + + +struct vop_inactive_args /* { + struct vnode *a_vp; + struct proc *a_p; +} */; + +int +nop_inactive(struct vop_inactive_args *ap) +{ + VOP_UNLOCK(ap->a_vp, 0, ap->a_p); + return (0); +} + +int +err_inactive(struct vop_inactive_args *ap) +{ + (void)nop_inactive(ap); + return (EOPNOTSUPP); +} + + +struct vop_reclaim_args /* { + struct vnode *a_vp; + struct proc *a_p; +} */; + +int +nop_reclaim(struct vop_reclaim_args *ap) +{ + return (0); +} + +int +err_reclaim(struct vop_reclaim_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_lock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +} */; + +int +nop_lock(struct vop_lock_args *ap) +{ + return (vop_nolock(ap)); +} + +int +err_lock(struct vop_lock_args *ap) +{ + (void)nop_lock(ap); + return (EOPNOTSUPP); +} + + +struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + struct proc *a_p; +} */; + +int +nop_unlock(struct vop_unlock_args *ap) +{ + return (vop_nounlock(ap)); +} + +int +err_unlock(struct vop_unlock_args *ap) +{ + (void)nop_unlock(ap); + return (EOPNOTSUPP); +} + + +struct vop_bmap_args /* { + struct vnode *vp; + daddr_t bn; + struct vnode **vpp; + daddr_t *bnp; + int *runp; +} */; + +int +nop_bmap(struct vop_bmap_args *ap) +{ + return (0); +} + +int +err_bmap(struct vop_bmap_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_strategy_args /* { + struct buf *a_bp; +} */; + +int +nop_strategy(struct vop_strategy_args *ap) +{ + return (0); +} + +int +err_strategy(struct vop_strategy_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_print_args /* { + struct vnode *a_vp; +} */; + +int +nop_print(struct vop_print_args *ap) +{ + return (0); +} + +int +err_print(struct vop_print_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_islocked_args /* { + struct vnode *a_vp; +} */; + +int +nop_islocked(struct vop_islocked_args *ap) +{ + return (vop_noislocked(ap)); +} + +int +err_islocked(struct vop_islocked_args *ap) +{ + (void)nop_islocked(ap); + return (EOPNOTSUPP); +} + + +struct vop_pathconf_args /* { + struct vnode *a_vp; + int a_name; + register_t *a_retval; +} */; + +int +nop_pathconf(struct vop_pathconf_args *ap) +{ + return (0); +} + +int +err_pathconf(struct vop_pathconf_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_advlock_args /* { + struct vnode *a_vp; + caddr_t a_id; + int a_op; + struct flock *a_fl; + int a_flags; +} */; + +int +nop_advlock(struct vop_advlock_args *ap) +{ + return (0); +} + +int +err_advlock(struct vop_advlock_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_blkatoff_args /* { + struct vnode *a_vp; + off_t a_offset; + char **a_res; + struct buf **a_bpp; +} */; + +int +nop_blkatoff(struct vop_blkatoff_args *ap) +{ + *ap->a_bpp = NULL; + return (0); +} + +int +err_blkatoff(struct vop_blkatoff_args *ap) +{ + (void)nop_blkatoff(ap); + return (EOPNOTSUPP); +} + + +struct vop_valloc_args /* { + struct vnode *a_pvp; + int a_mode; + struct ucred *a_cred; + struct vnode **a_vpp; +} */; + +int +nop_valloc(struct vop_valloc_args *ap) +{ + *ap->a_vpp = NULL; + return (0); +} + +int +err_valloc(struct vop_valloc_args *ap) +{ + (void)nop_valloc(ap); + return (EOPNOTSUPP); +} + + +struct vop_reallocblks_args /* { + struct vnode *a_vp; + struct cluster_save *a_buflist; +} */; + +int +nop_reallocblks(struct vop_reallocblks_args *ap) +{ + return (0); +} + +int +err_reallocblks(struct vop_reallocblks_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_vfree_args /* { + struct vnode *a_pvp; + ino_t a_ino; + int a_mode; +} */; + +int +nop_vfree(struct vop_vfree_args *ap) +{ + return (0); +} + +int +err_vfree(struct vop_vfree_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_truncate_args /* { + struct vnode *a_vp; + off_t a_length; + int a_flags; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_truncate(struct vop_truncate_args *ap) +{ + return (0); +} + +int +err_truncate(struct vop_truncate_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_allocate_args /* { + struct vnode *a_vp; + off_t a_length; + u_int32_t a_flags; + off_t *a_bytesallocated; + struct ucred *a_cred; + struct proc *a_p; +} */; + +int +nop_allocate(struct vop_allocate_args *ap) +{ + *(ap->a_bytesallocated) = 0; + return (0); +} + +int +err_allocate(struct vop_allocate_args *ap) +{ + (void)nop_allocate(ap); + return (EOPNOTSUPP); +} + + +struct vop_update_args /* { + struct vnode *a_vp; + struct timeval *a_access; + struct timeval *a_modify; + int a_waitfor; +} */; + +int +nop_update(struct vop_update_args *ap) +{ + return (0); +} + +int +err_update(struct vop_update_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_pgrd_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; +} */; + +int +nop_pgrd(struct vop_pgrd_args *ap) +{ + return (0); +} + +int +err_pgrd(struct vop_pgrd_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_pgwr_args /* { + struct vnode *a_vp; + struct uio *a_uio; + struct ucred *a_cred; + vm_offset_t a_offset; +} */; + +int +nop_pgwr(struct vop_pgwr_args *ap) +{ + return (0); +} + +int +err_pgwr(struct vop_pgwr_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_bwrite_args /* { + struct buf *a_bp; +} */; + +int +nop_bwrite(struct vop_bwrite_args *ap) +{ + return (bwrite(ap->a_bp)); +} + +int +err_bwrite(struct vop_bwrite_args *ap) +{ + return (EOPNOTSUPP); +} + + +struct vop_pagein_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_foffset, + size_t a_size, + struct ucred *a_cred, + int a_flags +} */; + +int +nop_pagein(struct vop_pagein_args *ap) +{ + kernel_upl_abort(ap->a_pl, UPL_ABORT_ERROR); + return (0); +} + +int +err_pagein(struct vop_pagein_args *ap) +{ + kernel_upl_abort(ap->a_pl, UPL_ABORT_ERROR); + return (EOPNOTSUPP); +} + + +struct vop_pageout_args /* { + struct vnode *a_vp, + upl_t a_pl, + vm_offset_t a_pl_offset, + off_t a_foffset, + size_t a_size, + struct ucred *a_cred, + int a_flags +} */; + +int +nop_pageout(struct vop_pageout_args *ap) +{ + kernel_upl_abort(ap->a_pl, UPL_ABORT_ERROR); + return (0); +} + +int +err_pageout(struct vop_pageout_args *ap) +{ + kernel_upl_abort(ap->a_pl, UPL_ABORT_ERROR); + return (EOPNOTSUPP); +} + + +struct vop_devblocksize_args /* { + struct vnode *a_vp; + register_t *a_retval; +} */; + +int +nop_devblocksize(struct vop_devblocksize_args *ap) +{ + /* XXX default value because the call sites do not check error */ + *ap->a_retval = 512; + return (0); +} + +int +err_devblocksize(struct vop_devblocksize_args *ap) +{ + (void)nop_devblocksize(ap); + return (EOPNOTSUPP); +} + + +struct vop_searchfs /* { + struct vnode *a_vp; + void *a_searchparams1; + void *a_searchparams2; + struct attrlist *a_searchattrs; + u_long a_maxmatches; + struct timeval *a_timelimit; + struct attrlist *a_returnattrs; + u_long *a_nummatches; + u_long a_scriptcode; + u_long a_options; + struct uio *a_uio; + struct searchstate *a_searchstate; +} */; + +int +nop_searchfs(struct vop_searchfs_args *ap) +{ + *(ap->a_nummatches) = 0; + return (0); +} + +int +err_searchfs(struct vop_searchfs_args *ap) +{ + (void)nop_searchfs(ap); + return (EOPNOTSUPP); +} + +struct vop_copyfile_args /*{ + struct vnodeop_desc *a_desc; + struct vnode *a_fvp; + struct vnode *a_tdvp; + struct vnode *a_tvp; + struct componentname *a_tcnp; + int a_flags; +}*/; + +int +nop_copyfile(struct vop_copyfile_args *ap) +{ + if (ap->a_tdvp == ap->a_tvp) + vrele(ap->a_tdvp); + else + vput(ap->a_tdvp); + if (ap->a_tvp) + vput(ap->a_tvp); + vrele(ap->a_fvp); + return (0); +} + + +int +err_copyfile(struct vop_copyfile_args *ap) +{ + (void)nop_copyfile(ap); + return (EOPNOTSUPP); +} + + +struct vop_blktooff_args /* { + struct vnode *a_vp; + daddr_t a_lblkno; + off_t *a_offset; +} */; + +int +nop_blktooff(struct vop_blktooff_args *ap) +{ + *ap->a_offset = (off_t)-1; /* failure */ + return (0); +} + +int +err_blktooff(struct vop_blktooff_args *ap) +{ + (void)nop_blktooff(ap); + return (EOPNOTSUPP); +} + +struct vop_offtoblk_args /* { + struct vnode *a_vp; + off_t a_offset; + daddr_t *a_lblkno; +} */; + +int +nop_offtoblk(struct vop_offtoblk_args *ap) +{ + *ap->a_lblkno = (daddr_t)-1; /* failure */ + return (0); +} + +int +err_offtoblk(struct vop_offtoblk_args *ap) +{ + (void)nop_offtoblk(ap); + return (EOPNOTSUPP); +} + +struct vop_cmap_args /* { + struct vnode *a_vp; + off_t a_foffset; + size_t a_size; + daddr_t *a_bpn; + size_t *a_run; + void *a_poff; +} */; + +int nop_cmap(struct vop_cmap_args *ap) +{ + return (0); +} + +int err_cmap(struct vop_cmap_args *ap) +{ + return (EOPNOTSUPP); +} + diff --git a/bsd/vfs/vfs_support.h b/bsd/vfs/vfs_support.h new file mode 100644 index 000000000..7eac9f21e --- /dev/null +++ b/bsd/vfs/vfs_support.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * File: vfs/vfs_support.h + * + * Prototypes for the default vfs routines. A VFS plugin can use these + * functions in case it does not want to implement all. These functions + * take care of releasing locks and free up memory that they are + * supposed to. + * + * HISTORY + * 18-Aug-1998 Umesh Vaishampayan (umeshv@apple.com) + * Created. + */ + +#ifndef _VFS_VFS_SUPPORT_H_ +#define _VFS_VFS_SUPPORT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int nop_create(struct vop_create_args *ap); +extern int err_create(struct vop_create_args *ap); + +extern int nop_whiteout(struct vop_whiteout_args *ap); +extern int err_whiteout(struct vop_whiteout_args *ap); + +extern int nop_mknod(struct vop_mknod_args *ap); +extern int err_mknod(struct vop_mknod_args *ap); + +extern int nop_mkcomplex(struct vop_mkcomplex_args *ap); +extern int err_mkcomplex(struct vop_mkcomplex_args *ap); + +extern int nop_open(struct vop_open_args *ap); +extern int err_open(struct vop_open_args *ap); + +extern int nop_close(struct vop_close_args *ap); +extern int err_close(struct vop_close_args *ap); + +extern int nop_access(struct vop_access_args *ap); +extern int err_access(struct vop_access_args *ap); + +extern int nop_getattr(struct vop_getattr_args *ap); +extern int err_getattr(struct vop_getattr_args *ap); + +extern int nop_setattr(struct vop_setattr_args *ap); +extern int err_setattr(struct vop_setattr_args *ap); + +extern int nop_getattrlist(struct vop_getattrlist_args *ap); +extern int err_getattrlist(struct vop_getattrlist_args *ap); + +extern int nop_setattrlist(struct vop_setattrlist_args *ap); +extern int err_setattrlist(struct vop_setattrlist_args *ap); + +extern int nop_read(struct vop_read_args *ap); +extern int err_read(struct vop_read_args *ap); + +extern int nop_write(struct vop_write_args *ap); +extern int err_write(struct vop_write_args *ap); + +extern int nop_lease(struct vop_lease_args *ap); +extern int err_lease(struct vop_lease_args *ap); + +extern int nop_ioctl(struct vop_ioctl_args *ap); +extern int err_ioctl(struct vop_ioctl_args *ap); + +extern int nop_select(struct vop_select_args *ap); +extern int err_select(struct vop_select_args *ap); + +extern int nop_exchange(struct vop_exchange_args *ap); +extern int err_exchange(struct vop_exchange_args *ap); + +extern int nop_revoke(struct vop_revoke_args *ap); +extern int err_revoke(struct vop_revoke_args *ap); + +extern int nop_mmap(struct vop_mmap_args *ap); +extern int err_mmap(struct vop_mmap_args *ap); + +extern int nop_fsync(struct vop_fsync_args *ap); +extern int err_fsync(struct vop_fsync_args *ap); + +extern int nop_seek(struct vop_seek_args *ap); +extern int err_seek(struct vop_seek_args *ap); + +extern int nop_remove(struct vop_remove_args *ap); +extern int err_remove(struct vop_remove_args *ap); + +extern int nop_link(struct vop_link_args *ap); +extern int err_link(struct vop_link_args *ap); + +extern int nop_rename(struct vop_rename_args *ap); +extern int err_rename(struct vop_rename_args *ap); + +extern int nop_mkdir(struct vop_mkdir_args *ap); +extern int err_mkdir(struct vop_mkdir_args *ap); + +extern int nop_rmdir(struct vop_rmdir_args *ap); +extern int err_rmdir(struct vop_rmdir_args *ap); + +extern int nop_symlink(struct vop_symlink_args *ap); +extern int err_symlink(struct vop_symlink_args *ap); + +extern int nop_readdir(struct vop_readdir_args *ap); +extern int err_readdir(struct vop_readdir_args *ap); + +extern int nop_readdirattr(struct vop_readdirattr_args *ap); +extern int err_readdirattr(struct vop_readdirattr_args *ap); + +extern int nop_readlink(struct vop_readlink_args *ap); +extern int err_readlink(struct vop_readlink_args *ap); + +extern int nop_abortop(struct vop_abortop_args *ap); +extern int err_abortop(struct vop_abortop_args *ap); + +extern int nop_inactive(struct vop_inactive_args *ap); +extern int err_inactive(struct vop_inactive_args *ap); + +extern int nop_reclaim(struct vop_reclaim_args *ap); +extern int err_reclaim(struct vop_reclaim_args *ap); + +extern int nop_lock(struct vop_lock_args *ap); +extern int err_lock(struct vop_lock_args *ap); + +extern int nop_unlock(struct vop_unlock_args *ap); +extern int err_unlock(struct vop_unlock_args *ap); + +extern int nop_bmap(struct vop_bmap_args *ap); +extern int err_bmap(struct vop_bmap_args *ap); + +extern int nop_strategy(struct vop_strategy_args *ap); +extern int err_strategy(struct vop_strategy_args *ap); + +extern int nop_print(struct vop_print_args *ap); +extern int err_print(struct vop_print_args *ap); + +extern int nop_islocked(struct vop_islocked_args *ap); +extern int err_islocked(struct vop_islocked_args *ap); + +extern int nop_pathconf(struct vop_pathconf_args *ap); +extern int err_pathconf(struct vop_pathconf_args *ap); + +extern int nop_advlock(struct vop_advlock_args *ap); +extern int err_advlock(struct vop_advlock_args *ap); + +extern int nop_blkatoff(struct vop_blkatoff_args *ap); +extern int err_blkatoff(struct vop_blkatoff_args *ap); + +extern int nop_valloc(struct vop_valloc_args *ap); +extern int err_valloc(struct vop_valloc_args *ap); + +extern int nop_reallocblks(struct vop_reallocblks_args *ap); +extern int err_reallocblks(struct vop_reallocblks_args *ap); + +extern int nop_vfree(struct vop_vfree_args *ap); +extern int err_vfree(struct vop_vfree_args *ap); + +extern int nop_truncate(struct vop_truncate_args *ap); +extern int err_truncate(struct vop_truncate_args *ap); + +extern int nop_allocate(struct vop_allocate_args *ap); +extern int err_allocate(struct vop_allocate_args *ap); + +extern int nop_update(struct vop_update_args *ap); +extern int err_update(struct vop_update_args *ap); + +extern int nop_pgrd(struct vop_pgrd_args *ap); +extern int err_pgrd(struct vop_pgrd_args *ap); + +extern int nop_pgwr(struct vop_pgwr_args *ap); +extern int err_pgwr(struct vop_pgwr_args *ap); + +extern int nop_bwrite(struct vop_bwrite_args *ap); +extern int err_bwrite(struct vop_bwrite_args *ap); + +extern int nop_pagein(struct vop_pagein_args *ap); +extern int err_pagein(struct vop_pagein_args *ap); + +extern int nop_pageout(struct vop_pageout_args *ap); +extern int err_pageout(struct vop_pageout_args *ap); + +extern int nop_devblocksize(struct vop_devblocksize_args *ap); +extern int err_devblocksize(struct vop_devblocksize_args *ap); + +extern int nop_searchfs(struct vop_searchfs_args *ap); +extern int err_searchfs(struct vop_searchfs_args *ap); + +extern int nop_copyfile(struct vop_copyfile_args *ap); +extern int err_copyfile(struct vop_copyfile_args *ap); + +extern int nop_blktooff(struct vop_blktooff_args *ap); +extern int err_blktooff(struct vop_blktooff_args *ap); + +extern int nop_offtoblk(struct vop_offtoblk_args *ap); +extern int err_offtoblk(struct vop_offtoblk_args *ap); + +extern int nop_cmap(struct vop_cmap_args *ap); +extern int err_cmap(struct vop_cmap_args *ap); +#endif /* _VFS_VFS_SUPPORT_H_ */ diff --git a/bsd/vfs/vfs_syscalls.c b/bsd/vfs/vfs_syscalls.c new file mode 100644 index 000000000..a7e2dd53e --- /dev/null +++ b/bsd/vfs/vfs_syscalls.c @@ -0,0 +1,3575 @@ +/* + * Copyright (c) 1995-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vfs_syscalls.c 8.41 (Berkeley) 6/15/95 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct lock__bsd__ exchangelock; + +/* + * The currently logged-in user, for ownership of files/directories whose on-disk + * permissions are ignored: + */ +uid_t console_user; + +static int change_dir __P((struct nameidata *ndp, struct proc *p)); +static void checkdirs __P((struct vnode *olddp)); + +/* counts number of mount and unmount operations */ +unsigned int vfs_nummntops=0; + +/* + * Virtual File System System Calls + */ + +/* + * Mount a file system. + */ +struct mount_args { + char *type; + char *path; + int flags; + caddr_t data; +}; +/* ARGSUSED */ +int +mount(p, uap, retval) + struct proc *p; + register struct mount_args *uap; + register_t *retval; +{ + struct vnode *vp; + struct mount *mp; + struct vfsconf *vfsp; + int error, flag; + struct vattr va; + u_long fstypenum; + struct nameidata nd; + char fstypename[MFSNAMELEN]; + size_t dummy=0; + /* + * Get vnode to be covered + */ + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + + if ((vp->v_flag & VROOT) && + (vp->v_mount->mnt_flag & MNT_ROOTFS)) + uap->flags |= MNT_UPDATE; + + if (uap->flags & MNT_UPDATE) { + if ((vp->v_flag & VROOT) == 0) { + vput(vp); + return (EINVAL); + } + mp = vp->v_mount; + flag = mp->mnt_flag; + /* + * We only allow the filesystem to be reloaded if it + * is currently mounted read-only. + */ + if ((uap->flags & MNT_RELOAD) && + ((mp->mnt_flag & MNT_RDONLY) == 0)) { + vput(vp); + return (EOPNOTSUPP); /* Needs translation */ + } + mp->mnt_flag |= + uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE); + /* + * Only root, or the user that did the original mount is + * permitted to update it. + */ + if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid && + (error = suser(p->p_ucred, &p->p_acflag))) { + vput(vp); + return (error); + } + /* + * Do not allow NFS export by non-root users. FOr non-root + * users, silently enforce MNT_NOSUID and MNT_NODEV, and + * MNT_NOEXEC if mount point is already MNT_NOEXEC. + */ + if (p->p_ucred->cr_uid != 0) { + if (uap->flags & MNT_EXPORTED) { + vput(vp); + return (EPERM); + } + uap->flags |= MNT_NOSUID | MNT_NODEV; + if (flag & MNT_NOEXEC) + uap->flags |= MNT_NOEXEC; + } + if (vfs_busy(mp, LK_NOWAIT, 0, p)) { + vput(vp); + return (EBUSY); + } + VOP_UNLOCK(vp, 0, p); + goto update; + } + /* + * If the user is not root, ensure that they own the directory + * onto which we are attempting to mount. + */ + if ((error = VOP_GETATTR(vp, &va, p->p_ucred, p)) || + (va.va_uid != p->p_ucred->cr_uid && + (error = suser(p->p_ucred, &p->p_acflag)))) { + vput(vp); + return (error); + } + /* + * Do not allow NFS export by non-root users. FOr non-root + * users, silently enforce MNT_NOSUID and MNT_NODEV, and + * MNT_NOEXEC if mount point is already MNT_NOEXEC. + */ + if (p->p_ucred->cr_uid != 0) { + if (uap->flags & MNT_EXPORTED) { + vput(vp); + return (EPERM); + } + uap->flags |= MNT_NOSUID | MNT_NODEV; + if (vp->v_mount->mnt_flag & MNT_NOEXEC) + uap->flags |= MNT_NOEXEC; + } + if (error = vinvalbuf(vp, V_SAVE, p->p_ucred, p, 0, 0)) { + vput(vp); + return (error); + } + if (vp->v_type != VDIR) { + vput(vp); + return (ENOTDIR); + } +#if COMPAT_43 + /* + * Historically filesystem types were identified by number. If we + * get an integer for the filesystem type instead of a string, we + * check to see if it matches one of the historic filesystem types. + */ + fstypenum = (u_long)uap->type; + if (fstypenum < maxvfsconf) { + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (vfsp->vfc_typenum == fstypenum) + break; + if (vfsp == NULL) { + vput(vp); + return (ENODEV); + } + strncpy(fstypename, vfsp->vfc_name, MFSNAMELEN); + } else +#endif /* COMPAT_43 */ + if (error = copyinstr(uap->type, fstypename, MFSNAMELEN, &dummy)) { + vput(vp); + return (error); + } + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + if (!strcmp(vfsp->vfc_name, fstypename)) + break; + if (vfsp == NULL) { + vput(vp); + return (ENODEV); + } + simple_lock(&vp->v_interlock); + if (ISSET(vp->v_flag, VMOUNT) && (vp->v_mountedhere != NULL)) { + simple_unlock(&vp->v_interlock); + vput(vp); + return (EBUSY); + } + SET(vp->v_flag, VMOUNT); + simple_unlock(&vp->v_interlock); + + /* + * Allocate and initialize the filesystem. + */ + mp = (struct mount *)_MALLOC_ZONE((u_long)sizeof(struct mount), + M_MOUNT, M_WAITOK); + bzero((char *)mp, (u_long)sizeof(struct mount)); + lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0); + (void)vfs_busy(mp, LK_NOWAIT, 0, p); + mp->mnt_op = vfsp->vfc_vfsops; + mp->mnt_vfc = vfsp; + vfsp->vfc_refcount++; + mp->mnt_stat.f_type = vfsp->vfc_typenum; + mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; + strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); + mp->mnt_vnodecovered = vp; + mp->mnt_stat.f_owner = p->p_ucred->cr_uid; + VOP_UNLOCK(vp, 0, p); + +update: + /* + * Set the mount level flags. + */ + if (uap->flags & MNT_RDONLY) + mp->mnt_flag |= MNT_RDONLY; + else if (mp->mnt_flag & MNT_RDONLY) + mp->mnt_kern_flag |= MNTK_WANTRDWR; + mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV | + MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_UNKNOWNPERMISSIONS); + mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC | + MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_UNKNOWNPERMISSIONS); + /* + * Mount the filesystem. + */ + error = VFS_MOUNT(mp, uap->path, uap->data, &nd, p); + if (mp->mnt_flag & MNT_UPDATE) { + vrele(vp); + if (mp->mnt_kern_flag & MNTK_WANTRDWR) + mp->mnt_flag &= ~MNT_RDONLY; + mp->mnt_flag &=~ + (MNT_UPDATE | MNT_RELOAD | MNT_FORCE); + mp->mnt_kern_flag &=~ MNTK_WANTRDWR; + if (error) + mp->mnt_flag = flag; + vfs_unbusy(mp, p); + return (error); + } + /* + * Put the new filesystem on the mount list after root. + */ + cache_purge(vp); + if (!error) { + simple_lock(&vp->v_interlock); + CLR(vp->v_flag, VMOUNT); + vp->v_mountedhere =mp; + simple_unlock(&vp->v_interlock); + simple_lock(&mountlist_slock); + CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list); + simple_unlock(&mountlist_slock); + checkdirs(vp); + VOP_UNLOCK(vp, 0, p); + vfs_unbusy(mp, p); + if (error = VFS_START(mp, 0, p)) + vrele(vp); + + /* increment the operations count */ + if (!error) + vfs_nummntops++; + } else { + simple_lock(&vp->v_interlock); + CLR(vp->v_flag, VMOUNT); + simple_unlock(&vp->v_interlock); + mp->mnt_vfc->vfc_refcount--; + vfs_unbusy(mp, p); + _FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + vput(vp); + } + return (error); +} + +/* + * Scan all active processes to see if any of them have a current + * or root directory onto which the new filesystem has just been + * mounted. If so, replace them with the new mount point. + */ +static void +checkdirs(olddp) + struct vnode *olddp; +{ + struct filedesc *fdp; + struct vnode *newdp; + struct proc *p; + + if (olddp->v_usecount == 1) + return; + if (VFS_ROOT(olddp->v_mountedhere, &newdp)) + panic("mount: lost mount"); + for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { + fdp = p->p_fd; + if (fdp->fd_cdir == olddp) { + vrele(fdp->fd_cdir); + VREF(newdp); + fdp->fd_cdir = newdp; + } + if (fdp->fd_rdir == olddp) { + vrele(fdp->fd_rdir); + VREF(newdp); + fdp->fd_rdir = newdp; + } + } + if (rootvnode == olddp) { + vrele(rootvnode); + VREF(newdp); + rootvnode = newdp; + } + vput(newdp); +} + +/* + * Unmount a file system. + * + * Note: unmount takes a path to the vnode mounted on as argument, + * not special file (as before). + */ +struct unmount_args { + char *path; + int flags; +}; +/* ARGSUSED */ +int +unmount(p, uap, retval) + struct proc *p; + register struct unmount_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct mount *mp; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + mp = vp->v_mount; + + /* + * Only root, or the user that did the original mount is + * permitted to unmount this filesystem. + */ + if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) && + (error = suser(p->p_ucred, &p->p_acflag))) { + vput(vp); + return (error); + } + + /* + * Don't allow unmounting the root file system. + */ + if (mp->mnt_flag & MNT_ROOTFS) { + vput(vp); + return (EBUSY); /* the root is always busy */ + } + + /* + * Must be the root of the filesystem + */ + if ((vp->v_flag & VROOT) == 0) { + vput(vp); + return (EINVAL); + } + vput(vp); + return (dounmount(mp, uap->flags, p)); +} + +/* + * Do the actual file system unmount. + */ +int +dounmount(mp, flags, p) + register struct mount *mp; + int flags; + struct proc *p; +{ + struct vnode *coveredvp; + int error; + + simple_lock(&mountlist_slock); + mp->mnt_kern_flag |= MNTK_UNMOUNT; + lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_slock, p); + mp->mnt_flag &=~ MNT_ASYNC; + ubc_umount(mp); /* release cached vnodes */ + cache_purgevfs(mp); /* remove cache entries for this file sys */ + if (((mp->mnt_flag & MNT_RDONLY) || + (error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, p)) == 0) || + (flags & MNT_FORCE)) + error = VFS_UNMOUNT(mp, flags, p); + simple_lock(&mountlist_slock); + if (error) { + mp->mnt_kern_flag &= ~MNTK_UNMOUNT; + lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE, + &mountlist_slock, p); + goto out; + } + + /* increment the operations count */ + if (!error) + vfs_nummntops++; + CIRCLEQ_REMOVE(&mountlist, mp, mnt_list); + if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) { + coveredvp->v_mountedhere = (struct mount *)0; + simple_unlock(&mountlist_slock); + vrele(coveredvp); + simple_lock(&mountlist_slock); + } + mp->mnt_vfc->vfc_refcount--; + if (mp->mnt_vnodelist.lh_first != NULL) { + panic("unmount: dangling vnode"); + } + lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, p); +out: + if (mp->mnt_kern_flag & MNTK_MWAIT) + wakeup((caddr_t)mp); + if (!error) + _FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + return (error); +} + +/* + * Sync each mounted filesystem. + */ +#if DIAGNOSTIC +int syncprt = 0; +struct ctldebug debug0 = { "syncprt", &syncprt }; +#endif + +struct sync_args { + int dummy; +}; +int print_vmpage_stat=0; + +/* ARGSUSED */ +int +sync(p, uap, retval) + struct proc *p; + struct sync_args *uap; + register_t *retval; +{ + register struct mount *mp, *nmp; + int asyncflag; + + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { + nmp = mp->mnt_list.cqe_next; + continue; + } + if ((mp->mnt_flag & MNT_RDONLY) == 0) { + asyncflag = mp->mnt_flag & MNT_ASYNC; + mp->mnt_flag &= ~MNT_ASYNC; + VFS_SYNC(mp, MNT_NOWAIT, p->p_ucred, p); + if (asyncflag) + mp->mnt_flag |= MNT_ASYNC; + } + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + } + simple_unlock(&mountlist_slock); + + { + extern void vm_countdirtypages(void); + extern unsigned int vp_pagein, vp_pgodirty, vp_pgoclean; + extern unsigned int dp_pgins, dp_pgouts; + if(print_vmpage_stat) { + vm_countdirtypages(); + printf("VP: %d: %d: %d: %d: %d\n", vp_pgodirty, vp_pgoclean, vp_pagein, dp_pgins, dp_pgouts); + } + } +#if DIAGNOSTIC + if (syncprt) + vfs_bufstats(); +#endif /* DIAGNOSTIC */ + return (0); +} + +/* + * Change filesystem quotas. + */ +struct quotactl_args { + char *path; + int cmd; + int uid; + caddr_t arg; +}; +/* ARGSUSED */ +int +quotactl(p, uap, retval) + struct proc *p; + register struct quotactl_args *uap; + register_t *retval; +{ + register struct mount *mp; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + mp = nd.ni_vp->v_mount; + vrele(nd.ni_vp); + return (VFS_QUOTACTL(mp, uap->cmd, uap->uid, + uap->arg, p)); +} + +/* + * Get filesystem statistics. + */ +struct statfs_args { + char *path; + struct statfs *buf; +}; +/* ARGSUSED */ +int +statfs(p, uap, retval) + struct proc *p; + register struct statfs_args *uap; + register_t *retval; +{ + register struct mount *mp; + register struct statfs *sp; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + mp = nd.ni_vp->v_mount; + sp = &mp->mnt_stat; + vrele(nd.ni_vp); + if (error = VFS_STATFS(mp, sp, p)) + return (error); + sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; +/* return (copyout((caddr_t)sp, (caddr_t)uap->buf, sizeof(*sp))); */ + return (copyout((caddr_t)sp, (caddr_t)uap->buf, sizeof(*sp)-sizeof(sp->f_reserved3)-sizeof(sp->f_reserved4))); +} + +/* + * Get filesystem statistics. + */ +struct fstatfs_args { + int fd; + struct statfs *buf; +}; +/* ARGSUSED */ +int +fstatfs(p, uap, retval) + struct proc *p; + register struct fstatfs_args *uap; + register_t *retval; +{ + struct file *fp; + struct mount *mp; + register struct statfs *sp; + int error; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + mp = ((struct vnode *)fp->f_data)->v_mount; + if (!mp) + return (EBADF); + sp = &mp->mnt_stat; + if (error = VFS_STATFS(mp, sp, p)) + return (error); + sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; +/* return (copyout((caddr_t)sp, (caddr_t)uap->buf, sizeof(*sp))); */ + return (copyout((caddr_t)sp, (caddr_t)uap->buf, sizeof(*sp)-sizeof(sp->f_reserved3)-sizeof(sp->f_reserved4))); +} + +/* + * Get statistics on all filesystems. + */ +struct getfsstat_args { + struct statfs *buf; + long bufsize; + int flags; +}; +int +getfsstat(p, uap, retval) + struct proc *p; + register struct getfsstat_args *uap; + register_t *retval; +{ + register struct mount *mp, *nmp; + register struct statfs *sp; + caddr_t sfsp; + long count, maxcount, error; + + maxcount = uap->bufsize / sizeof(struct statfs); + sfsp = (caddr_t)uap->buf; + count = 0; + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { + nmp = mp->mnt_list.cqe_next; + continue; + } + if (sfsp && count < maxcount) { + sp = &mp->mnt_stat; + /* + * If MNT_NOWAIT is specified, do not refresh the + * fsstat cache. MNT_WAIT overrides MNT_NOWAIT. + */ + if (((uap->flags & MNT_NOWAIT) == 0 || + (uap->flags & MNT_WAIT)) && + (error = VFS_STATFS(mp, sp, p))) { + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + continue; + } + sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; + if (error = copyout((caddr_t)sp, sfsp, sizeof(*sp))) + return (error); + sfsp += sizeof(*sp); + } + count++; + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + } + simple_unlock(&mountlist_slock); + if (sfsp && count > maxcount) + *retval = maxcount; + else + *retval = count; + return (0); +} + +#if COMPAT_GETFSSTAT +ogetfsstat(p, uap, retval) + struct proc *p; + register struct getfsstat_args *uap; + register_t *retval; +{ + register struct mount *mp, *nmp; + register struct statfs *sp; + caddr_t sfsp; + long count, maxcount, error; + + maxcount = uap->bufsize / (sizeof(struct statfs) - sizeof(sp->f_reserved4)); + sfsp = (caddr_t)uap->buf; + count = 0; + simple_lock(&mountlist_slock); + for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) { + if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) { + nmp = mp->mnt_list.cqe_next; + continue; + } + if (sfsp && count < maxcount) { + sp = &mp->mnt_stat; + /* + * If MNT_NOWAIT is specified, do not refresh the + * fsstat cache. MNT_WAIT overrides MNT_NOWAIT. + */ + if (((uap->flags & MNT_NOWAIT) == 0 || + (uap->flags & MNT_WAIT)) && + (error = VFS_STATFS(mp, sp, p))) { + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + continue; + } + sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK; + if (error = copyout((caddr_t)sp, sfsp, sizeof(*sp) - sizeof(sp->f_reserved3) - sizeof(sp->f_reserved4))) + return (error); + sfsp += sizeof(*sp) - sizeof(sp->f_reserved4); + } + count++; + simple_lock(&mountlist_slock); + nmp = mp->mnt_list.cqe_next; + vfs_unbusy(mp, p); + } + simple_unlock(&mountlist_slock); + if (sfsp && count > maxcount) + *retval = maxcount; + else + *retval = count; + return (0); +} +#endif + +/* + * Change current working directory to a given file descriptor. + */ +struct fchdir_args { + int fd; +}; +/* ARGSUSED */ +int +fchdir(p, uap, retval) + struct proc *p; + struct fchdir_args *uap; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + struct vnode *vp, *tdp; + struct mount *mp; + struct file *fp; + int error; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + vp = (struct vnode *)fp->f_data; + VREF(vp); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (vp->v_type != VDIR) + error = ENOTDIR; + else + error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p); + while (!error && (mp = vp->v_mountedhere) != NULL) { + if (vfs_busy(mp, 0, 0, p)) + continue; + error = VFS_ROOT(mp, &tdp); + vfs_unbusy(mp, p); + if (error) + break; + vput(vp); + vp = tdp; + } + if (error) { + vput(vp); + return (error); + } + VOP_UNLOCK(vp, 0, p); + vrele(fdp->fd_cdir); + fdp->fd_cdir = vp; + return (0); +} + +/* + * Change current working directory (``.''). + */ +struct chdir_args { + char *path; +}; +/* ARGSUSED */ +int +chdir(p, uap, retval) + struct proc *p; + struct chdir_args *uap; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = change_dir(&nd, p)) + return (error); + vrele(fdp->fd_cdir); + fdp->fd_cdir = nd.ni_vp; + return (0); +} + +/* + * Change notion of root (``/'') directory. + */ +struct chroot_args { + char *path; +}; +/* ARGSUSED */ +int +chroot(p, uap, retval) + struct proc *p; + struct chroot_args *uap; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + int error; + struct nameidata nd; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = change_dir(&nd, p)) + return (error); + + if(error = clone_system_shared_regions()) { + vrele(nd.ni_vp); + return (error); + } + + if (fdp->fd_rdir != NULL) + vrele(fdp->fd_rdir); + fdp->fd_rdir = nd.ni_vp; + return (0); +} + +/* + * Common routine for chroot and chdir. + */ +static int +change_dir(ndp, p) + register struct nameidata *ndp; + struct proc *p; +{ + struct vnode *vp; + int error; + + if (error = namei(ndp)) + return (error); + vp = ndp->ni_vp; + if (vp->v_type != VDIR) + error = ENOTDIR; + else + error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p); + if (error) + vput(vp); + else + VOP_UNLOCK(vp, 0, p); + return (error); +} + +/* + * Check permissions, allocate an open file structure, + * and call the device open routine if any. + */ +struct open_args { + char *path; + int flags; + int mode; +}; +int +open(p, uap, retval) + struct proc *p; + register struct open_args *uap; + register_t *retval; +{ + register struct filedesc *fdp = p->p_fd; + register struct file *fp; + register struct vnode *vp; + int flags, cmode; + struct file *nfp; + int type, indx, error; + struct flock lf; + struct nameidata nd; + extern struct fileops vnops; + + /* CERT advisory patch applied from FreeBSD */ + /* Refer to Radar#2262895 A. Ramesh */ + flags = FFLAGS(uap->flags); + if ((flags & (FREAD | FWRITE))==0) + return(EINVAL); + if (error = falloc(p, &nfp, &indx)) + return (error); + fp = nfp; + cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT; + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + p->p_dupfd = -indx - 1; /* XXX check for fdopen */ + if (error = vn_open(&nd, flags, cmode)) { + ffree(fp); + if ((error == ENODEV || error == ENXIO) && + p->p_dupfd >= 0 && /* XXX from fdopen */ + (error = + dupfdopen(fdp, indx, p->p_dupfd, flags, error)) == 0) { + *retval = indx; + return (0); + } + if (error == ERESTART) + error = EINTR; + fdrelse(p, indx); + return (error); + } + p->p_dupfd = 0; + vp = nd.ni_vp; + fp->f_flag = flags & FMASK; + fp->f_type = DTYPE_VNODE; + fp->f_ops = &vnops; + fp->f_data = (caddr_t)vp; + if (flags & (O_EXLOCK | O_SHLOCK)) { + lf.l_whence = SEEK_SET; + lf.l_start = 0; + lf.l_len = 0; + if (flags & O_EXLOCK) + lf.l_type = F_WRLCK; + else + lf.l_type = F_RDLCK; + type = F_FLOCK; + if ((flags & FNONBLOCK) == 0) + type |= F_WAIT; + VOP_UNLOCK(vp, 0, p); + if (error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) { + (void) vn_close(vp, fp->f_flag, fp->f_cred, p); + ffree(fp); + fdrelse(p, indx); + return (error); + } + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + fp->f_flag |= FHASLOCK; + } + VOP_UNLOCK(vp, 0, p); + *fdflags(p, indx) &= ~UF_RESERVED; + *retval = indx; + return (0); +} + +#if COMPAT_43 +/* + * Create a file. + */ +struct ocreat_args { + char *path; + int mode; +}; +int +ocreat(p, uap, retval) + struct proc *p; + register struct ocreat_args *uap; + register_t *retval; +{ + struct open_args nuap; + + nuap.path = uap->path; + nuap.mode = uap->mode; + nuap.flags = O_WRONLY | O_CREAT | O_TRUNC; + return (open(p, &nuap, retval)); +} +#endif /* COMPAT_43 */ + +/* + * Create a special file. + */ +struct mknod_args { + char *path; + int mode; + int dev; +}; +/* ARGSUSED */ +int +mknod(p, uap, retval) + struct proc *p; + register struct mknod_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct vattr vattr; + int error; + int whiteout; + struct nameidata nd; + + if (error = suser(p->p_ucred, &p->p_acflag)) + return (error); + NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + if (vp != NULL) + error = EEXIST; + else { + VATTR_NULL(&vattr); + vattr.va_mode = (uap->mode & ALLPERMS) &~ p->p_fd->fd_cmask; + vattr.va_rdev = uap->dev; + whiteout = 0; + + switch (uap->mode & S_IFMT) { + case S_IFMT: /* used by badsect to flag bad sectors */ + vattr.va_type = VBAD; + break; + case S_IFCHR: + vattr.va_type = VCHR; + break; + case S_IFBLK: + vattr.va_type = VBLK; + break; + case S_IFWHT: + whiteout = 1; + break; + default: + error = EINVAL; + break; + } + } + if (!error) { + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + if (whiteout) { + error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE); + if (error) + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + vput(nd.ni_dvp); + } else { + error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, + &nd.ni_cnd, &vattr); + } + } else { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (vp) + vrele(vp); + } + return (error); +} + +/* + * Create a named pipe. + */ +struct mkfifo_args { + char *path; + int mode; +}; +/* ARGSUSED */ +int +mkfifo(p, uap, retval) + struct proc *p; + register struct mkfifo_args *uap; + register_t *retval; +{ + struct vattr vattr; + int error; + struct nameidata nd; + +#if !FIFO + return (EOPNOTSUPP); +#else + NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + if (nd.ni_vp != NULL) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(nd.ni_vp); + return (EEXIST); + } + VATTR_NULL(&vattr); + vattr.va_type = VFIFO; + vattr.va_mode = (uap->mode & ALLPERMS) &~ p->p_fd->fd_cmask; + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + return (VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr)); +#endif /* FIFO */ +} + +/* + * Make a hard file link. + */ +struct link_args { + char *path; + char *link; +}; +/* ARGSUSED */ +int +link(p, uap, retval) + struct proc *p; + register struct link_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct nameidata nd; + int error; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + if (vp->v_type == VDIR) + error = EPERM; /* POSIX */ + else { + nd.ni_cnd.cn_nameiop = CREATE; + nd.ni_cnd.cn_flags = LOCKPARENT; + nd.ni_dirp = uap->link; + if ((error = namei(&nd)) == 0) { + if (nd.ni_vp != NULL) + error = EEXIST; + if (!error) { + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, + LEASE_WRITE); + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + error = VOP_LINK(vp, nd.ni_dvp, &nd.ni_cnd); + } else { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (nd.ni_vp) + vrele(nd.ni_vp); + } + } + } + vrele(vp); + return (error); +} + +/* + * Make a symbolic link. + */ +struct symlink_args { + char *path; + char *link; +}; +/* ARGSUSED */ +int +symlink(p, uap, retval) + struct proc *p; + register struct symlink_args *uap; + register_t *retval; +{ + struct vattr vattr; + char *path; + int error; + struct nameidata nd; + size_t dummy=0; + MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + if (error = copyinstr(uap->path, path, MAXPATHLEN, &dummy)) + goto out; + NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->link, p); + if (error = namei(&nd)) + goto out; + if (nd.ni_vp) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(nd.ni_vp); + error = EEXIST; + goto out; + } + VATTR_NULL(&vattr); + vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask; + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path); +out: + FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + return (error); +} + +/* + * Delete a whiteout from the filesystem. + */ +struct undelete_args { + char *path; +}; +/* ARGSUSED */ +int +undelete(p, uap, retval) + struct proc *p; + register struct undelete_args *uap; + register_t *retval; +{ + int error; + struct nameidata nd; + + NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE, + uap->path, p); + error = namei(&nd); + if (error) + return (error); + + if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == nd.ni_vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (nd.ni_vp) + vrele(nd.ni_vp); + return (EEXIST); + } + + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + if (error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE)) + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + vput(nd.ni_dvp); + return (error); +} + +/* + * Delete a name from the filesystem. + */ +struct unlink_args { + char *path; +}; +/* ARGSUSED */ +static int +_unlink(p, uap, retval, nodelbusy) + struct proc *p; + struct unlink_args *uap; + register_t *retval; + int nodelbusy; +{ + register struct vnode *vp; + int error; + struct nameidata nd; + + NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, uap->path, p); + /* with hfs semantics, busy files cannot be deleted */ + if (nodelbusy) + nd.ni_cnd.cn_flags |= NODELETEBUSY; + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + + if (vp->v_type == VDIR) + error = EPERM; /* POSIX */ + else { + /* + * The root of a mounted filesystem cannot be deleted. + * + * XXX: can this only be a VDIR case? + */ + if (vp->v_flag & VROOT) + error = EBUSY; + } + + if (!error) { + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + error = VOP_REMOVE(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd); + } else { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + if (vp != NULLVP) + vput(vp); + } + return (error); +} + +/* + * Delete a name from the filesystem using POSIX semantics. + */ +int +unlink(p, uap, retval) + struct proc *p; + struct unlink_args *uap; + register_t *retval; +{ + return _unlink(p, uap, retval, 0); +} + +/* + * Delete a name from the filesystem using HFS semantics. + */ +int +delete(p, uap, retval) + struct proc *p; + struct unlink_args *uap; + register_t *retval; +{ + return _unlink(p, uap, retval, 1); +} + +/* + * Reposition read/write file offset. + */ +struct lseek_args { + int fd; +#ifdef DOUBLE_ALIGN_PARAMS + int pad; +#endif + off_t offset; + int whence; +}; +int +lseek(p, uap, retval) + struct proc *p; + register struct lseek_args *uap; + register_t *retval; +{ + struct ucred *cred = p->p_ucred; + struct file *fp; + struct vattr vattr; + int error; + + if (error = fdgetf(p, uap->fd, &fp)) + return (error); + if (fp->f_type != DTYPE_VNODE) + return (ESPIPE); + switch (uap->whence) { + case L_INCR: + fp->f_offset += uap->offset; + break; + case L_XTND: + if (error = + VOP_GETATTR((struct vnode *)fp->f_data, &vattr, cred, p)) + return (error); + fp->f_offset = uap->offset + vattr.va_size; + break; + case L_SET: + fp->f_offset = uap->offset; + break; + default: + return (EINVAL); + } + *(off_t *)retval = fp->f_offset; + return (0); +} + +#if COMPAT_43 +/* + * Reposition read/write file offset. + */ +struct olseek_args { + int fd; + long offset; + int whence; +}; +int +olseek(p, uap, retval) + struct proc *p; + register struct olseek_args *uap; + register_t *retval; +{ + struct lseek_args /* { + syscallarg(int) fd; +#ifdef DOUBLE_ALIGN_PARAMS + syscallarg(int) pad; +#endif + syscallarg(off_t) offset; + syscallarg(int) whence; + } */ nuap; + off_t qret; + int error; + + nuap.fd = uap->fd; + nuap.offset = uap->offset; + nuap.whence = uap->whence; + error = lseek(p, &nuap, &qret); + *(long *)retval = qret; + return (error); +} +#endif /* COMPAT_43 */ + +/* + * Check access permissions. + */ +struct access_args { + char *path; + int flags; +}; +int +access(p, uap, retval) + struct proc *p; + register struct access_args *uap; + register_t *retval; +{ + register struct ucred *cred = p->p_ucred; + register struct vnode *vp; + int error, flags, t_gid, t_uid; + struct nameidata nd; + + t_uid = cred->cr_uid; + t_gid = cred->cr_groups[0]; + cred->cr_uid = p->p_cred->p_ruid; + cred->cr_groups[0] = p->p_cred->p_rgid; + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + goto out1; + vp = nd.ni_vp; + + /* Flags == 0 means only check for existence. */ + if (uap->flags) { + flags = 0; + if (uap->flags & R_OK) + flags |= VREAD; + if (uap->flags & W_OK) + flags |= VWRITE; + if (uap->flags & X_OK) + flags |= VEXEC; + if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0) + error = VOP_ACCESS(vp, flags, cred, p); + } + vput(vp); +out1: + cred->cr_uid = t_uid; + cred->cr_groups[0] = t_gid; + return (error); +} + +#if COMPAT_43 +/* + * Get file status; this version follows links. + */ +struct ostat_args { + char *path; + struct ostat *ub; +}; +/* ARGSUSED */ +int +ostat(p, uap, retval) + struct proc *p; + register struct ostat_args *uap; + register_t *retval; +{ + struct stat sb; + struct ostat osb; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + error = vn_stat(nd.ni_vp, &sb, p); + vput(nd.ni_vp); + if (error) + return (error); + cvtstat(&sb, &osb); + error = copyout((caddr_t)&osb, (caddr_t)uap->ub, sizeof (osb)); + return (error); +} + +/* + * Get file status; this version does not follow links. + */ +struct olstat_args { + char *path; + struct ostat *ub; +}; +/* ARGSUSED */ +int +olstat(p, uap, retval) + struct proc *p; + register struct olstat_args *uap; + register_t *retval; +{ + struct vnode *vp, *dvp; + struct stat sb, sb1; + struct ostat osb; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKPARENT, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + /* + * For symbolic links, always return the attributes of its + * containing directory, except for mode, size, and links. + */ + vp = nd.ni_vp; + dvp = nd.ni_dvp; + if (vp->v_type != VLNK) { + if (dvp == vp) + vrele(dvp); + else + vput(dvp); + error = vn_stat(vp, &sb, p); + vput(vp); + if (error) + return (error); + } else { + error = vn_stat(dvp, &sb, p); + vput(dvp); + if (error) { + vput(vp); + return (error); + } + error = vn_stat(vp, &sb1, p); + vput(vp); + if (error) + return (error); + sb.st_mode &= ~S_IFDIR; + sb.st_mode |= S_IFLNK; + sb.st_nlink = sb1.st_nlink; + sb.st_size = sb1.st_size; + sb.st_blocks = sb1.st_blocks; + } + cvtstat(&sb, &osb); + error = copyout((caddr_t)&osb, (caddr_t)uap->ub, sizeof (osb)); + return (error); +} + +/* + * Convert from an old to a new stat structure. + */ +void +cvtstat(st, ost) + struct stat *st; + struct ostat *ost; +{ + + ost->st_dev = st->st_dev; + ost->st_ino = st->st_ino; + ost->st_mode = st->st_mode; + ost->st_nlink = st->st_nlink; + ost->st_uid = st->st_uid; + ost->st_gid = st->st_gid; + ost->st_rdev = st->st_rdev; + if (st->st_size < (quad_t)1 << 32) + ost->st_size = st->st_size; + else + ost->st_size = -2; + ost->st_atime = st->st_atime; + ost->st_mtime = st->st_mtime; + ost->st_ctime = st->st_ctime; + ost->st_blksize = st->st_blksize; + ost->st_blocks = st->st_blocks; + ost->st_flags = st->st_flags; + ost->st_gen = st->st_gen; +} +#endif /* COMPAT_43 */ + +/* + * Get file status; this version follows links. + */ +struct stat_args { + char *path; + struct stat *ub; +}; +/* ARGSUSED */ +int +stat(p, uap, retval) + struct proc *p; + register struct stat_args *uap; + register_t *retval; +{ + struct stat sb; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + error = vn_stat(nd.ni_vp, &sb, p); + vput(nd.ni_vp); + if (error) + return (error); + error = copyout((caddr_t)&sb, (caddr_t)uap->ub, sizeof (sb)); + return (error); +} + +/* + * Get file status; this version does not follow links. + */ +struct lstat_args { + char *path; + struct stat *ub; +}; +/* ARGSUSED */ +int +lstat(p, uap, retval) + struct proc *p; + register struct lstat_args *uap; + register_t *retval; +{ + int error; + struct vnode *vp, *dvp; + struct stat sb, sb1; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKPARENT, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + /* + * For symbolic links, always return the attributes of its containing + * directory, except for mode, size, inode number, and links. + */ + vp = nd.ni_vp; + dvp = nd.ni_dvp; + if ((vp->v_type != VLNK) || ((vp->v_type == VLNK) && (vp->v_tag == VT_NFS))) { + if (dvp == vp) + vrele(dvp); + else + vput(dvp); + error = vn_stat(vp, &sb, p); + vput(vp); + if (error) + return (error); + if (vp->v_type == VLNK) + sb.st_mode |= S_IFLNK; + } else { + error = vn_stat(dvp, &sb, p); + vput(dvp); + if (error) { + vput(vp); + return (error); + } + error = vn_stat(vp, &sb1, p); + vput(vp); + if (error) + return (error); + sb.st_mode &= ~S_IFDIR; + sb.st_mode |= S_IFLNK; + sb.st_nlink = sb1.st_nlink; + sb.st_size = sb1.st_size; + sb.st_blocks = sb1.st_blocks; + sb.st_ino = sb1.st_ino; + } + error = copyout((caddr_t)&sb, (caddr_t)uap->ub, sizeof (sb)); + return (error); +} + +/* + * Get configurable pathname variables. + */ +struct pathconf_args { + char *path; + int name; +}; +/* ARGSUSED */ +int +pathconf(p, uap, retval) + struct proc *p; + register struct pathconf_args *uap; + register_t *retval; +{ + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + error = VOP_PATHCONF(nd.ni_vp, uap->name, retval); + vput(nd.ni_vp); + return (error); +} + +/* + * Return target name of a symbolic link. + */ +struct readlink_args { + char *path; + char *buf; + int count; +}; +/* ARGSUSED */ +int +readlink(p, uap, retval) + struct proc *p; + register struct readlink_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct iovec aiov; + struct uio auio; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + if (vp->v_type != VLNK) + error = EINVAL; + else { + aiov.iov_base = uap->buf; + aiov.iov_len = uap->count; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_procp = p; + auio.uio_resid = uap->count; + error = VOP_READLINK(vp, &auio, p->p_ucred); + } + vput(vp); + *retval = uap->count - auio.uio_resid; + return (error); +} + +/* + * Change flags of a file given a path name. + */ +struct chflags_args { + char *path; + int flags; +}; +/* ARGSUSED */ +int +chflags(p, uap, retval) + struct proc *p; + register struct chflags_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct vattr vattr; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + VATTR_NULL(&vattr); + vattr.va_flags = uap->flags; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + vput(vp); + return (error); +} + +/* + * Change flags of a file given a file descriptor. + */ +struct fchflags_args { + int fd; + int flags; +}; +/* ARGSUSED */ +int +fchflags(p, uap, retval) + struct proc *p; + register struct fchflags_args *uap; + register_t *retval; +{ + struct vattr vattr; + struct vnode *vp; + struct file *fp; + int error; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + vp = (struct vnode *)fp->f_data; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + VATTR_NULL(&vattr); + vattr.va_flags = uap->flags; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + VOP_UNLOCK(vp, 0, p); + return (error); +} + +/* + * Change mode of a file given path name. + */ +struct chmod_args { + char *path; + int mode; +}; +/* ARGSUSED */ +int +chmod(p, uap, retval) + struct proc *p; + register struct chmod_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct vattr vattr; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + VATTR_NULL(&vattr); + vattr.va_mode = uap->mode & ALLPERMS; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + vput(vp); + return (error); +} + +/* + * Change mode of a file given a file descriptor. + */ +struct fchmod_args { + int fd; + int mode; +}; +/* ARGSUSED */ +int +fchmod(p, uap, retval) + struct proc *p; + register struct fchmod_args *uap; + register_t *retval; +{ + struct vattr vattr; + struct vnode *vp; + struct file *fp; + int error; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + vp = (struct vnode *)fp->f_data; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + VATTR_NULL(&vattr); + vattr.va_mode = uap->mode & ALLPERMS; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + VOP_UNLOCK(vp, 0, p); + return (error); +} + +/* + * Set ownership given a path name. + */ +struct chown_args { + char *path; + int uid; + int gid; +}; +/* ARGSUSED */ +int +chown(p, uap, retval) + struct proc *p; + register struct chown_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct vattr vattr; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + + /* + XXX A TEMPORARY HACK FOR NOW: Try to track console_user + by looking for chown() calls on /dev/console from a console process: + */ + if ((vp) && (vp->v_specinfo) && + (major(vp->v_specinfo->si_rdev) == CONSMAJOR) && + (minor(vp->v_specinfo->si_rdev) == 0)) { + console_user = uap->uid; + }; + + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + VATTR_NULL(&vattr); + vattr.va_uid = uap->uid; + vattr.va_gid = uap->gid; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + vput(vp); + return (error); +} + +/* + * Set ownership given a file descriptor. + */ +struct fchown_args { + int fd; + int uid; + int gid; +}; +/* ARGSUSED */ +int +fchown(p, uap, retval) + struct proc *p; + register struct fchown_args *uap; + register_t *retval; +{ + struct vattr vattr; + struct vnode *vp; + struct file *fp; + int error; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + vp = (struct vnode *)fp->f_data; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + VATTR_NULL(&vattr); + vattr.va_uid = uap->uid; + vattr.va_gid = uap->gid; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + VOP_UNLOCK(vp, 0, p); + return (error); +} + +/* + * Set the access and modification times of a file. + */ +struct utimes_args { + char *path; + struct timeval *tptr; +}; +/* ARGSUSED */ +int +utimes(p, uap, retval) + struct proc *p; + register struct utimes_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct timeval tv[2]; + struct vattr vattr; + int error; + struct nameidata nd; + + VATTR_NULL(&vattr); + if (uap->tptr == NULL) { + microtime(&tv[0]); + tv[1] = tv[0]; + vattr.va_vaflags |= VA_UTIMES_NULL; + } else if (error = copyin((caddr_t)uap->tptr, (caddr_t)tv, + sizeof (tv))) + return (error); + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + vattr.va_atime.tv_sec = tv[0].tv_sec; + vattr.va_atime.tv_nsec = tv[0].tv_usec * 1000; + vattr.va_mtime.tv_sec = tv[1].tv_sec; + vattr.va_mtime.tv_nsec = tv[1].tv_usec * 1000; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + vput(vp); + return (error); +} + +/* + * Truncate a file given its path name. + */ +struct truncate_args { + char *path; +#ifdef DOUBLE_ALIGN_PARAMS + int pad; +#endif + off_t length; +}; +/* ARGSUSED */ +int +truncate(p, uap, retval) + struct proc *p; + register struct truncate_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct vattr vattr; + int error; + struct nameidata nd; + + if (uap->length < 0) + return(EINVAL); + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (vp->v_type == VDIR) + error = EISDIR; + else if ((error = vn_writechk(vp)) == 0 && + (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, p)) == 0) { + VATTR_NULL(&vattr); + vattr.va_size = uap->length; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + } + vput(vp); + return (error); +} + +/* + * Truncate a file given a file descriptor. + */ +struct ftruncate_args { + int fd; +#ifdef DOUBLE_ALIGN_PARAMS + int pad; +#endif + off_t length; +}; +/* ARGSUSED */ +int +ftruncate(p, uap, retval) + struct proc *p; + register struct ftruncate_args *uap; + register_t *retval; +{ + struct vattr vattr; + struct vnode *vp; + struct file *fp; + int error; + + if (uap->length < 0) + return(EINVAL); + + if (error = fdgetf(p, uap->fd, &fp)) + return (error); + + if (fp->f_type == DTYPE_PSXSHM) { + return(pshm_truncate(p, fp, uap->fd, uap->length, retval)); + } + if (fp->f_type != DTYPE_VNODE) + return (EINVAL); + + + if ((fp->f_flag & FWRITE) == 0) + return (EINVAL); + vp = (struct vnode *)fp->f_data; + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (vp->v_type == VDIR) + error = EISDIR; + else if ((error = vn_writechk(vp)) == 0) { + VATTR_NULL(&vattr); + vattr.va_size = uap->length; + error = VOP_SETATTR(vp, &vattr, fp->f_cred, p); + } + VOP_UNLOCK(vp, 0, p); + return (error); +} + +#if COMPAT_43 +/* + * Truncate a file given its path name. + */ +struct otruncate_args { + char *path; + long length; +}; +/* ARGSUSED */ +int +otruncate(p, uap, retval) + struct proc *p; + register struct otruncate_args *uap; + register_t *retval; +{ + struct truncate_args /* { + syscallarg(char *) path; +#ifdef DOUBLE_ALIGN_PARAMS + syscallarg(int) pad; +#endif + syscallarg(off_t) length; + } */ nuap; + + nuap.path = uap->path; + nuap.length = uap->length; + return (truncate(p, &nuap, retval)); +} + +/* + * Truncate a file given a file descriptor. + */ +struct oftruncate_args { + int fd; + long length; +}; +/* ARGSUSED */ +int +oftruncate(p, uap, retval) + struct proc *p; + register struct oftruncate_args *uap; + register_t *retval; +{ + struct ftruncate_args /* { + syscallarg(int) fd; +#ifdef DOUBLE_ALIGN_PARAMS + syscallarg(int) pad; +#endif + syscallarg(off_t) length; + } */ nuap; + + nuap.fd = uap->fd; + nuap.length = uap->length; + return (ftruncate(p, &nuap, retval)); +} +#endif /* COMPAT_43 */ + +/* + * Sync an open file. + */ +struct fsync_args { + int fd; +}; +/* ARGSUSED */ +int +fsync(p, uap, retval) + struct proc *p; + struct fsync_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct file *fp; + int error; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + vp = (struct vnode *)fp->f_data; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, p); + VOP_UNLOCK(vp, 0, p); + return (error); +} + +/* + * Duplicate files. Source must be a file, target must be a file or + * must not exist. + */ + +struct copyfile_args { + char *from; + char *to; + int mode; + int flags; +}; +/* ARGSUSED */ +int +copyfile(p, uap, retval) + struct proc *p; + register struct copyfile_args *uap; + register_t *retval; +{ + register struct vnode *tvp, *fvp, *tdvp; + register struct ucred *cred = p->p_ucred; + struct nameidata fromnd, tond; + int error; + + /* Check that the flags are valid. + */ + + if (uap->flags & ~CPF_MASK) { + return(EINVAL); + } + + NDINIT(&fromnd, LOOKUP, SAVESTART, UIO_USERSPACE, + uap->from, p); + if (error = namei(&fromnd)) + return (error); + fvp = fromnd.ni_vp; + + NDINIT(&tond, CREATE, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART, + UIO_USERSPACE, uap->to, p); + if (error = namei(&tond)) { + vrele(fvp); + goto out1; + } + tdvp = tond.ni_dvp; + tvp = tond.ni_vp; + if (tvp != NULL) { + if (!(uap->flags & CPF_OVERWRITE)) { + error = EEXIST; + goto out; + } + } + + if (fvp->v_type == VDIR || (tvp && tvp->v_type == VDIR)) { + error = EISDIR; + goto out; + } + + if (error = VOP_ACCESS(tdvp, VWRITE, cred, p)) + goto out; + + if (fvp == tdvp) + error = EINVAL; + /* + * If source is the same as the destination (that is the + * same inode number) then there is nothing to do. + * (fixed to have POSIX semantics - CSM 3/2/98) + */ + if (fvp == tvp) + error = -1; +out: + if (!error) { + error = VOP_COPYFILE(fvp,tdvp,tvp,&tond.ni_cnd,uap->mode,uap->flags); + } else { + VOP_ABORTOP(tdvp, &tond.ni_cnd); + if (tdvp == tvp) + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + vrele(fvp); + } + vrele(tond.ni_startdir); + FREE_ZONE(tond.ni_cnd.cn_pnbuf, tond.ni_cnd.cn_pnlen, M_NAMEI); +out1: + if (fromnd.ni_startdir) + vrele(fromnd.ni_startdir); + FREE_ZONE(fromnd.ni_cnd.cn_pnbuf, fromnd.ni_cnd.cn_pnlen, M_NAMEI); + if (error == -1) + return (0); + return (error); +} + +/* + * Rename files. Source and destination must either both be directories, + * or both not be directories. If target is a directory, it must be empty. + */ +struct rename_args { + char *from; + char *to; +}; +/* ARGSUSED */ +int +rename(p, uap, retval) + struct proc *p; + register struct rename_args *uap; + register_t *retval; +{ + register struct vnode *tvp, *fvp, *tdvp; + struct nameidata fromnd, tond; + int error; + int mntrename; + + mntrename = FALSE; + + NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE, + uap->from, p); + if (error = namei(&fromnd)) + return (error); + fvp = fromnd.ni_vp; + + NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART, + UIO_USERSPACE, uap->to, p); + if (error = namei(&tond)) { + VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd); + vrele(fromnd.ni_dvp); + vrele(fvp); + goto out2; + } + tdvp = tond.ni_dvp; + tvp = tond.ni_vp; + + if (tvp != NULL) { + if (fvp->v_type == VDIR && tvp->v_type != VDIR) { + error = ENOTDIR; + goto out; + } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) { + error = EISDIR; + goto out; + } + } + if (fvp == tdvp) + error = EINVAL; + /* + * If source is the same as the destination (that is the + * same inode number) then there is nothing to do. + */ + if (fvp == tvp) + error = -1; + + /* + * Allow the renaming of mount points. + * - target must not exist + * - target must reside in the same directory as source + * - union mounts cannot be renamed + * - "/" cannot be renamed + */ + if ((fvp->v_flag & VROOT) && + (fvp->v_type == VDIR) && + (tvp == NULL) && + (fvp->v_mountedhere == NULL) && + (fromnd.ni_dvp == tond.ni_dvp) && + ((fvp->v_mount->mnt_flag & (MNT_UNION | MNT_ROOTFS)) == 0) && + (fvp->v_mount->mnt_vnodecovered != NULLVP)) { + + /* switch fvp to the covered vnode */ + fromnd.ni_vp = fvp->v_mount->mnt_vnodecovered; + vrele(fvp); + fvp = fromnd.ni_vp; + VREF(fvp); + mntrename = TRUE; + } +out: + if (!error) { + VOP_LEASE(tdvp, p, p->p_ucred, LEASE_WRITE); + if (fromnd.ni_dvp != tdvp) + VOP_LEASE(fromnd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + if (tvp) + VOP_LEASE(tvp, p, p->p_ucred, LEASE_WRITE); + error = VOP_RENAME(fromnd.ni_dvp, fvp, &fromnd.ni_cnd, + tond.ni_dvp, tvp, &tond.ni_cnd); + if (error) + goto out1; + + /* + * update filesystem's mount point data + */ + if (mntrename) { + char *cp, *pathend, *mpname; + char * tobuf; + struct mount *mp; + int maxlen; + size_t len = 0; + + VREF(fvp); + vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p); + mp = fvp->v_mountedhere; + + if (vfs_busy(mp, LK_NOWAIT, 0, p)) { + vput(fvp); + error = EBUSY; + goto out1; + } + VOP_UNLOCK(fvp, 0, p); + + MALLOC_ZONE(tobuf, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + error = copyinstr(uap->to, tobuf, MAXPATHLEN, &len); + if (!error) { + /* find current mount point prefix */ + pathend = &mp->mnt_stat.f_mntonname[0]; + for (cp = pathend; *cp != '\0'; ++cp) { + if (*cp == '/') + pathend = cp + 1; + } + /* find last component of target name */ + for (mpname = cp = tobuf; *cp != '\0'; ++cp) { + if (*cp == '/') + mpname = cp + 1; + } + /* append name to prefix */ + maxlen = MNAMELEN - (pathend - mp->mnt_stat.f_mntonname); + bzero(pathend, maxlen); + strncpy(pathend, mpname, maxlen - 1); + } + FREE_ZONE(tobuf, MAXPATHLEN, M_NAMEI); + + vrele(fvp); + vfs_unbusy(mp, p); + } + } else { + VOP_ABORTOP(tond.ni_dvp, &tond.ni_cnd); + if (tdvp == tvp) + vrele(tdvp); + else + vput(tdvp); + if (tvp) + vput(tvp); + VOP_ABORTOP(fromnd.ni_dvp, &fromnd.ni_cnd); + vrele(fromnd.ni_dvp); + vrele(fvp); + } +out1: + vrele(tond.ni_startdir); + FREE_ZONE(tond.ni_cnd.cn_pnbuf, tond.ni_cnd.cn_pnlen, M_NAMEI); +out2: + if (fromnd.ni_startdir) + vrele(fromnd.ni_startdir); + FREE_ZONE(fromnd.ni_cnd.cn_pnbuf, fromnd.ni_cnd.cn_pnlen, M_NAMEI); + if (error == -1) + return (0); + return (error); +} + +/* + * Make a directory file. + */ +struct mkdir_args { + char *path; + int mode; +}; +/* ARGSUSED */ +int +mkdir(p, uap, retval) + struct proc *p; + register struct mkdir_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct vattr vattr; + int error; + struct nameidata nd; + + NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + if (vp != NULL) { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vrele(vp); + return (EEXIST); + } + VATTR_NULL(&vattr); + vattr.va_type = VDIR; + vattr.va_mode = (uap->mode & ACCESSPERMS) &~ p->p_fd->fd_cmask; + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr); + if (!error) + vput(nd.ni_vp); + return (error); +} + +/* + * Remove a directory file. + */ +struct rmdir_args { + char *path; +}; +/* ARGSUSED */ +int +rmdir(p, uap, retval) + struct proc *p; + struct rmdir_args *uap; + register_t *retval; +{ + register struct vnode *vp; + int error; + struct nameidata nd; + + NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE, + uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + if (vp->v_type != VDIR) { + error = ENOTDIR; + goto out; + } + /* + * No rmdir "." please. + */ + if (nd.ni_dvp == vp) { + error = EINVAL; + goto out; + } + /* + * The root of a mounted filesystem cannot be deleted. + */ + if (vp->v_flag & VROOT) + error = EBUSY; +out: + if (!error) { + VOP_LEASE(nd.ni_dvp, p, p->p_ucred, LEASE_WRITE); + VOP_LEASE(vp, p, p->p_ucred, LEASE_WRITE); + error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd); + } else { + VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd); + if (nd.ni_dvp == vp) + vrele(nd.ni_dvp); + else + vput(nd.ni_dvp); + vput(vp); + } + return (error); +} + +#if COMPAT_43 +/* + * Read a block of directory entries in a file system independent format. + */ +struct ogetdirentries_args { + int fd; + char *buf; + u_int count; + long *basep; +}; +int +ogetdirentries(p, uap, retval) + struct proc *p; + register struct ogetdirentries_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct file *fp; + struct uio auio, kuio; + struct iovec aiov, kiov; + struct dirent *dp, *edp; + caddr_t dirbuf; + int error, eofflag, readcnt; + long loff; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + if ((fp->f_flag & FREAD) == 0) + return (EBADF); + vp = (struct vnode *)fp->f_data; +unionread: + if (vp->v_type != VDIR) + return (EINVAL); + aiov.iov_base = uap->buf; + aiov.iov_len = uap->count; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_procp = p; + auio.uio_resid = uap->count; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + loff = auio.uio_offset = fp->f_offset; +# if (BYTE_ORDER != LITTLE_ENDIAN) + if (vp->v_mount->mnt_maxsymlinklen <= 0) { + error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, + (int *)0, (u_long *)0); + fp->f_offset = auio.uio_offset; + } else +# endif + { + kuio = auio; + kuio.uio_iov = &kiov; + kuio.uio_segflg = UIO_SYSSPACE; + kiov.iov_len = uap->count; + MALLOC(dirbuf, caddr_t, uap->count, M_TEMP, M_WAITOK); + kiov.iov_base = dirbuf; + error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag, + (int *)0, (u_long *)0); + fp->f_offset = kuio.uio_offset; + if (error == 0) { + readcnt = uap->count - kuio.uio_resid; + edp = (struct dirent *)&dirbuf[readcnt]; + for (dp = (struct dirent *)dirbuf; dp < edp; ) { +# if (BYTE_ORDER == LITTLE_ENDIAN) + /* + * The expected low byte of + * dp->d_namlen is our dp->d_type. + * The high MBZ byte of dp->d_namlen + * is our dp->d_namlen. + */ + dp->d_type = dp->d_namlen; + dp->d_namlen = 0; +# else + /* + * The dp->d_type is the high byte + * of the expected dp->d_namlen, + * so must be zero'ed. + */ + dp->d_type = 0; +# endif + if (dp->d_reclen > 0) { + dp = (struct dirent *) + ((char *)dp + dp->d_reclen); + } else { + error = EIO; + break; + } + } + if (dp >= edp) + error = uiomove(dirbuf, readcnt, &auio); + } + FREE(dirbuf, M_TEMP); + } + VOP_UNLOCK(vp, 0, p); + if (error) + return (error); + +#if UNION +{ + extern int (**union_vnodeop_p)(void *); + extern struct vnode *union_dircache __P((struct vnode*, struct proc*)); + + if ((uap->count == auio.uio_resid) && + (vp->v_op == union_vnodeop_p)) { + struct vnode *lvp; + + lvp = union_dircache(vp, p); + if (lvp != NULLVP) { + struct vattr va; + + /* + * If the directory is opaque, + * then don't show lower entries + */ + error = VOP_GETATTR(vp, &va, fp->f_cred, p); + if (va.va_flags & OPAQUE) { + vput(lvp); + lvp = NULL; + } + } + + if (lvp != NULLVP) { + error = VOP_OPEN(lvp, FREAD, fp->f_cred, p); + if (error) { + vput(lvp); + return (error); + } + VOP_UNLOCK(lvp, 0, p); + fp->f_data = (caddr_t) lvp; + fp->f_offset = 0; + error = vn_close(vp, FREAD, fp->f_cred, p); + if (error) + return (error); + vp = lvp; + goto unionread; + } + } +} +#endif /* UNION */ + + if ((uap->count == auio.uio_resid) && + (vp->v_flag & VROOT) && + (vp->v_mount->mnt_flag & MNT_UNION)) { + struct vnode *tvp = vp; + vp = vp->v_mount->mnt_vnodecovered; + VREF(vp); + fp->f_data = (caddr_t) vp; + fp->f_offset = 0; + vrele(tvp); + goto unionread; + } + error = copyout((caddr_t)&loff, (caddr_t)uap->basep, + sizeof(long)); + *retval = uap->count - auio.uio_resid; + return (error); +} +#endif /* COMPAT_43 */ + +/* + * Read a block of directory entries in a file system independent format. + */ +struct getdirentries_args { + int fd; + char *buf; + u_int count; + long *basep; +}; +int +getdirentries(p, uap, retval) + struct proc *p; + register struct getdirentries_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct file *fp; + struct uio auio; + struct iovec aiov; + long loff; + int error, eofflag; + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + if ((fp->f_flag & FREAD) == 0) + return (EBADF); + vp = (struct vnode *)fp->f_data; +unionread: + if (vp->v_type != VDIR) + return (EINVAL); + aiov.iov_base = uap->buf; + aiov.iov_len = uap->count; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_procp = p; + auio.uio_resid = uap->count; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + loff = auio.uio_offset = fp->f_offset; + error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, + (int *)0, (u_long *)0); + fp->f_offset = auio.uio_offset; + VOP_UNLOCK(vp, 0, p); + if (error) + return (error); + +#if UNION +{ + extern int (**union_vnodeop_p)(void *); + extern struct vnode *union_dircache __P((struct vnode*, struct proc*)); + + if ((uap->count == auio.uio_resid) && + (vp->v_op == union_vnodeop_p)) { + struct vnode *lvp; + + lvp = union_dircache(vp, p); + if (lvp != NULLVP) { + struct vattr va; + + /* + * If the directory is opaque, + * then don't show lower entries + */ + error = VOP_GETATTR(vp, &va, fp->f_cred, p); + if (va.va_flags & OPAQUE) { + vput(lvp); + lvp = NULL; + } + } + + if (lvp != NULLVP) { + error = VOP_OPEN(lvp, FREAD, fp->f_cred, p); + if (error) { + vput(lvp); + return (error); + } + VOP_UNLOCK(lvp, 0, p); + fp->f_data = (caddr_t) lvp; + fp->f_offset = 0; + error = vn_close(vp, FREAD, fp->f_cred, p); + if (error) + return (error); + vp = lvp; + goto unionread; + } + } +} +#endif /* UNION */ + + if ((uap->count == auio.uio_resid) && + (vp->v_flag & VROOT) && + (vp->v_mount->mnt_flag & MNT_UNION)) { + struct vnode *tvp = vp; + vp = vp->v_mount->mnt_vnodecovered; + VREF(vp); + fp->f_data = (caddr_t) vp; + fp->f_offset = 0; + vrele(tvp); + goto unionread; + } + error = copyout((caddr_t)&loff, (caddr_t)uap->basep, + sizeof(long)); + *retval = uap->count - auio.uio_resid; + return (error); +} + +/* + * Set the mode mask for creation of filesystem nodes. + */ +struct umask_args { + int newmask; +}; +int +umask(p, uap, retval) + struct proc *p; + struct umask_args *uap; + register_t *retval; +{ + register struct filedesc *fdp; + + fdp = p->p_fd; + *retval = fdp->fd_cmask; + fdp->fd_cmask = uap->newmask & ALLPERMS; + return (0); +} + +/* + * Void all references to file by ripping underlying filesystem + * away from vnode. + */ +struct revoke_args { + char *path; +}; +/* ARGSUSED */ +int +revoke(p, uap, retval) + struct proc *p; + register struct revoke_args *uap; + register_t *retval; +{ + register struct vnode *vp; + struct vattr vattr; + int error; + struct nameidata nd; + + NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->path, p); + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) + goto out; + if (p->p_ucred->cr_uid != vattr.va_uid && + (error = suser(p->p_ucred, &p->p_acflag))) + goto out; + if (vp->v_usecount > 1 || (vp->v_flag & VALIASED)) + VOP_REVOKE(vp, REVOKEALL); +out: + vrele(vp); + return (error); +} + +/* + * Convert a user file descriptor to a kernel file entry. + */ +int +getvnode(p, fd, fpp) + struct proc *p; + int fd; + struct file **fpp; +{ + struct file *fp; + int error; + + if (error = fdgetf(p, fd, &fp)) + return (error); + if (fp->f_type != DTYPE_VNODE) + return (EINVAL); + *fpp = fp; + return (0); +} +/* + * HFS/HFS PlUS SPECIFIC SYSTEM CALLS + * The following 10 system calls are designed to support features + * which are specific to the HFS & HFS Plus volume formats + */ + + +/* + * Make a complex file. A complex file is one with multiple forks (data streams) + */ +struct mkcomplex_args { + const char *path; /* pathname of the file to be created */ + mode_t mode; /* access mode for the newly created file */ + u_long type; /* format of the complex file */ +}; +/* ARGSUSED */ +int +mkcomplex(p,uap,retval) + struct proc *p; + register struct mkcomplex_args *uap; + register_t *retval; + +{ + struct vnode *vp; + struct vattr vattr; + int error; + struct nameidata nd; + + /* mkcomplex wants the directory vnode locked so do that here */ + + NDINIT(&nd, CREATE, FOLLOW | LOCKPARENT, UIO_USERSPACE, (char *)uap->path, p); + if (error = namei(&nd)) + return (error); + + /* Set the attributes as specified by the user */ + + VATTR_NULL(&vattr); + vattr.va_mode = (uap->mode & ACCESSPERMS); + error = VOP_MKCOMPLEX(nd.ni_dvp, &vp, &nd.ni_cnd, &vattr, uap->type); + + /* The mkcomplex call promises to release the parent vnode pointer + * even an an error case so don't do it here unless the operation + * is not supported. In that case, there isn't anyone to unlock the parent + * The vnode pointer to the file will also be released. + */ + + if (error) + { + if (error == EOPNOTSUPP) + vput(nd.ni_dvp); + return (error); + } + + return (0); + +} /* end of mkcomplex system call */ + + + +/* + * Extended stat call which returns volumeid and vnodeid as well as other info + */ +struct statv_args { + const char *path; /* pathname of the target file */ + struct vstat *vsb; /* vstat structure for returned info */ +}; +/* ARGSUSED */ +int +statv(p,uap,retval) + struct proc *p; + register struct statv_args *uap; + register_t *retval; + +{ + return (EOPNOTSUPP); /* We'll just return an error for now */ + +} /* end of statv system call */ + + + +/* +* Extended lstat call which returns volumeid and vnodeid as well as other info +*/ +struct lstatv_args { + const char *path; /* pathname of the target file */ + struct vstat *vsb; /* vstat structure for returned info */ +}; +/* ARGSUSED */ +int +lstatv(p,uap,retval) + struct proc *p; + register struct lstatv_args *uap; + register_t *retval; + +{ + return (EOPNOTSUPP); /* We'll just return an error for now */ +} /* end of lstatv system call */ + + + +/* +* Extended fstat call which returns volumeid and vnodeid as well as other info +*/ +struct fstatv_args { + int fd; /* file descriptor of the target file */ + struct vstat *vsb; /* vstat structure for returned info */ +}; +/* ARGSUSED */ +int +fstatv(p,uap,retval) + struct proc *p; + register struct fstatv_args *uap; + register_t *retval; + +{ + return (EOPNOTSUPP); /* We'll just return an error for now */ +} /* end of fstatv system call */ + + + +/* +* Obtain attribute information about a file system object +*/ + +struct getattrlist_args { + const char *path; /* pathname of the target object */ + struct attrlist * alist; /* Attributes desired by the user */ + void * attributeBuffer; /* buffer to hold returned attributes */ + size_t bufferSize; /* size of the return buffer */ + unsigned long options; /* options (follow/don't follow) */ +}; +/* ARGSUSED */ +int +getattrlist (p,uap,retval) + struct proc *p; + register struct getattrlist_args *uap; + register_t *retval; + +{ + int error; + struct nameidata nd; + struct iovec aiov; + struct uio auio; + struct attrlist attributelist; + u_long nameiflags; + + /* Get the attributes desire and do our parameter checking */ + + if (error = copyin((caddr_t)uap->alist, (caddr_t) &attributelist, + sizeof (attributelist))) + { + return(error); + } + + if (attributelist.bitmapcount != ATTR_BIT_MAP_COUNT +#if 0 + || attributelist.commonattr & ~ATTR_CMN_VALIDMASK || + attributelist.volattr & ~ATTR_VOL_VALIDMASK || + attributelist.dirattr & ~ATTR_DIR_VALIDMASK || + attributelist.fileattr & ~ATTR_FILE_VALIDMASK || + attributelist.forkattr & ~ATTR_FORK_VALIDMASK +#endif + ) + { + return (EINVAL); + } + + /* Get the vnode for the file we are getting info on. */ + nameiflags = LOCKLEAF; + if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); + + if (error = namei(&nd)) + return (error); + + /* Set up the UIO structure for use by the vfs routine */ + + + aiov.iov_base = uap->attributeBuffer; + aiov.iov_len = uap->bufferSize; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_procp = p; + auio.uio_resid = uap->bufferSize; + + + error = VOP_GETATTRLIST(nd.ni_vp, &attributelist, &auio, p->p_ucred, p); + + /* Unlock and release the vnode which will have been locked by namei */ + + vput(nd.ni_vp); + + /* return the effort if we got one, otherwise return success */ + + if (error) + { + return (error); + } + + return(0); + +} /* end of getattrlist system call */ + + + +/* + * Set attribute information about a file system object + */ + +struct setattrlist_args { + const char *path; /* pathname of the target object */ + struct attrlist * alist; /* Attributes being set by the user */ + void * attributeBuffer; /* buffer with attribute values to be set */ + size_t bufferSize; /* size of the return buffer */ + unsigned long options; /* options (follow/don't follow) */ +}; +/* ARGSUSED */ +int +setattrlist (p,uap,retval) + struct proc *p; + register struct setattrlist_args *uap; + register_t *retval; + +{ + int error; + struct nameidata nd; + struct iovec aiov; + struct uio auio; + struct attrlist alist; + u_long nameiflags; + + /* Get the attributes desired and do our parameter checking */ + + if ((error = copyin((caddr_t)uap->alist, (caddr_t) &alist, + sizeof (alist)))) { + return (error); + } + + if (alist.bitmapcount != ATTR_BIT_MAP_COUNT) + return (EINVAL); + + /* Get the vnode for the file whose attributes are being set. */ + nameiflags = LOCKLEAF; + if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); + if (error = namei(&nd)) + return (error); + + /* Set up the UIO structure for use by the vfs routine */ + aiov.iov_base = uap->attributeBuffer; + aiov.iov_len = uap->bufferSize; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_offset = 0; + auio.uio_rw = UIO_WRITE; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_procp = p; + auio.uio_resid = uap->bufferSize; + + error = VOP_SETATTRLIST(nd.ni_vp, &alist, &auio, p->p_ucred, p); + + vput(nd.ni_vp); + + return (error); + +} /* end of setattrlist system call */ + + +/* +* Obtain attribute information on objects in a directory while enumerating +* the directory. This call does not yet support union mounted directories. +* TO DO +* 1.union mounted directories. +*/ + +struct getdirentriesattr_args { + int fd; /* file descriptor */ + struct attrlist *alist; /* bit map of requested attributes */ + void *buffer; /* buffer to hold returned attribute info */ + size_t buffersize; /* size of the return buffer */ + u_long *count; /* the count of entries requested/returned */ + u_long *basep; /* the offset of where we are leaving off in buffer */ + u_long *newstate; /* a flag to inform of changes in directory */ + u_long options; /* maybe unused for now */ +}; +/* ARGSUSED */ +int +getdirentriesattr (p,uap,retval) + struct proc *p; + register struct getdirentriesattr_args *uap; + register_t *retval; + +{ + register struct vnode *vp; + struct file *fp; + struct uio auio; + struct iovec aiov; + u_long actualcount; + u_long newstate; + int error, eofflag; + long loff; + struct attrlist attributelist; + + /* Get the attributes into kernel space */ + if (error = copyin((caddr_t)uap->alist, (caddr_t) &attributelist, sizeof (attributelist))) + return(error); + if (error = copyin((caddr_t)uap->count, (caddr_t) &actualcount, sizeof (u_long))) + return(error); + + if (error = getvnode(p, uap->fd, &fp)) + return (error); + if ((fp->f_flag & FREAD) == 0) + return(EBADF); + vp = (struct vnode *)fp->f_data; + + if (vp->v_type != VDIR) + return(EINVAL); + + /* set up the uio structure which will contain the users return buffer */ + aiov.iov_base = uap->buffer; + aiov.iov_len = uap->buffersize; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_procp = p; + auio.uio_resid = uap->buffersize; + + loff = auio.uio_offset = fp->f_offset; + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + error = VOP_READDIRATTR (vp, &attributelist, &auio, + actualcount, uap->options, &newstate, &eofflag, + &actualcount, ((u_long **)0), p->p_cred); + + VOP_UNLOCK(vp, 0, p); + if (error) return (error); + fp->f_offset = auio.uio_offset; /* should be multiple of dirent, not variable */ + + if (error = copyout((caddr_t) &actualcount, (caddr_t) uap->count, sizeof(u_long))) + return (error); + if (error = copyout((caddr_t) &newstate, (caddr_t) uap->newstate, sizeof(u_long))) + return (error); + if (error = copyout((caddr_t)&loff, (caddr_t)uap->basep, sizeof(long))) + return (error); + + *retval = eofflag; /* similar to getdirentries */ + return (0); /* return error earlier, an retval of 0 or 1 now */ + +} /* end of getdirentryattr system call */ + +/* +* Exchange data between two files +*/ + +struct exchangedata_args { + const char *path1; /* pathname of the first swapee */ + const char *path2; /* pathname of the second swapee */ + unsigned long options; /* options */ +}; +/* ARGSUSED */ +int +exchangedata (p,uap,retval) + struct proc *p; + register struct exchangedata_args *uap; + register_t *retval; + +{ + + struct nameidata fnd, snd; + struct vnode *fvp, *svp; + int error; + u_long nameiflags; + + nameiflags = 0; + if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + + /* Global lock, to prevent race condition, only one exchange at a time */ + lockmgr(&exchangelock, LK_EXCLUSIVE , (struct slock *)0, p); + + NDINIT(&fnd, LOOKUP, nameiflags, UIO_USERSPACE, (char *) uap->path1, p); + + if (error = namei(&fnd)) + goto out2; + + fvp = fnd.ni_vp; + + NDINIT(&snd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path2, p); + + if (error = namei(&snd)) { + vrele(fvp); + goto out2; + } + + svp = snd.ni_vp; + + /* if the files are the same, return an inval error */ + if (svp == fvp) { + vrele(fvp); + vrele(svp); + error = EINVAL; + goto out2; + } + + vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p); + vn_lock(svp, LK_EXCLUSIVE | LK_RETRY, p); + + error = VOP_ACCESS(fvp, VWRITE, p->p_ucred, p); + if (error) goto out; + + error = VOP_ACCESS(svp, VWRITE, p->p_ucred, p); + if (error) goto out; + + /* Ok, make the call */ + error = VOP_EXCHANGE (fvp, svp, p->p_ucred, p); + +out: + vput (svp); + vput (fvp); + +out2: + lockmgr(&exchangelock, LK_RELEASE, (struct slock *)0, p); + + if (error) { + return (error); + } + + return (0); + +} /* end of exchangedata system call */ + +/* +* Check users access to a file +*/ + +struct checkuseraccess_args { + const char *path; /* pathname of the target file */ + uid_t userid; /* user for whom we are checking access */ + gid_t *groups; /* Group that we are checking for */ + int ngroups; /* Number of groups being checked */ + int accessrequired; /* needed access to the file */ + unsigned long options; /* options */ +}; + +/* ARGSUSED */ +int +checkuseraccess (p,uap,retval) + struct proc *p; + register struct checkuseraccess_args *uap; + register_t *retval; + +{ + register struct vnode *vp; + int error; + struct nameidata nd; + struct ucred cred; + int flags; /*what will actually get passed to access*/ + u_long nameiflags; + + /* Make sure that the number of groups is correct before we do anything */ + + if ((uap->ngroups <= 0) || (uap->ngroups > NGROUPS)) + return (EINVAL); + + /* Verify that the caller is root */ + + if (error = suser(p->p_ucred, &p->p_acflag)) + return(error); + + /* Fill in the credential structure */ + + cred.cr_ref = 0; + cred.cr_uid = uap->userid; + cred.cr_ngroups = uap->ngroups; + if (error = copyin((caddr_t) uap->groups, (caddr_t) &(cred.cr_groups), (sizeof(gid_t))*uap->ngroups)) + return (error); + + /* Get our hands on the file */ + + nameiflags = LOCKLEAF; + if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); + + if (error = namei(&nd)) + return (error); + vp = nd.ni_vp; + + /* Flags == 0 means only check for existence. */ + + flags = 0; + + if (uap->accessrequired) { + if (uap->accessrequired & R_OK) + flags |= VREAD; + if (uap->accessrequired & W_OK) + flags |= VWRITE; + if (uap->accessrequired & X_OK) + flags |= VEXEC; + } + error = VOP_ACCESS(vp, flags, &cred, p); + + vput(vp); + + if (error) + return (error); + + return (0); + +} /* end of checkuseraccess system call */ + + +struct searchfs_args { + const char *path; + struct fssearchblock *searchblock; + u_long *nummatches; + u_long scriptcode; + u_long options; + struct searchstate *state; + }; +/* ARGSUSED */ + +int +searchfs (p,uap,retval) + struct proc *p; + register struct searchfs_args *uap; + register_t *retval; + +{ + register struct vnode *vp; + int error=0; + int fserror = 0; + struct nameidata nd; + struct fssearchblock searchblock; + struct searchstate *state; + struct attrlist *returnattrs; + void *searchparams1,*searchparams2; + struct iovec aiov; + struct uio auio; + u_long nummatches; + int mallocsize; + u_long nameiflags; + + + /* Start by copying in fsearchblock paramater list */ + + if (error = copyin((caddr_t) uap->searchblock, (caddr_t) &searchblock,sizeof(struct fssearchblock))) + return(error); + + /* Now malloc a big bunch of space to hold the search parameters, the attrlists and the search state. */ + /* It all has to do into local memory and it's not that big so we might as well put it all together. */ + /* Searchparams1 shall be first so we might as well use that to hold the base address of the allocated*/ + /* block. */ + + mallocsize = searchblock.sizeofsearchparams1+searchblock.sizeofsearchparams2 + + sizeof(struct attrlist) + sizeof(struct searchstate); + + MALLOC(searchparams1, void *, mallocsize, M_TEMP, M_WAITOK); + + /* Now set up the various pointers to the correct place in our newly allocated memory */ + + searchparams2 = (void *) (((caddr_t) searchparams1) + searchblock.sizeofsearchparams1); + returnattrs = (struct attrlist *) (((caddr_t) searchparams2) + searchblock.sizeofsearchparams2); + state = (struct searchstate *) (((caddr_t) returnattrs) + sizeof (struct attrlist)); + + /* Now copy in the stuff given our local variables. */ + + if (error = copyin((caddr_t) searchblock.searchparams1, searchparams1,searchblock.sizeofsearchparams1)) + goto freeandexit; + + if (error = copyin((caddr_t) searchblock.searchparams2, searchparams2,searchblock.sizeofsearchparams2)) + goto freeandexit; + + if (error = copyin((caddr_t) searchblock.returnattrs, (caddr_t) returnattrs, sizeof(struct attrlist))) + goto freeandexit; + + if (error = copyin((caddr_t) uap->state, (caddr_t) state, sizeof(struct searchstate))) + goto freeandexit; + + /* set up the uio structure which will contain the users return buffer */ + + aiov.iov_base = searchblock.returnbuffer; + aiov.iov_len = searchblock.returnbuffersize; + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + auio.uio_rw = UIO_READ; + auio.uio_segflg = UIO_USERSPACE; + auio.uio_procp = p; + auio.uio_resid = searchblock.returnbuffersize; + + nameiflags = LOCKLEAF; + if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); + + if (error = namei(&nd)) + goto freeandexit; + + vp = nd.ni_vp; + + + /* + * If searchblock.maxmatches == 0, then skip the search. This has happened + * before and sometimes the underlyning code doesnt deal with it well. + */ + if (searchblock.maxmatches == 0) { + nummatches = 0; + goto saveandexit; + } + + /* + Allright, we have everything we need, so lets make that call. + + We keep special track of the return value from the file system: + EAGAIN is an acceptable error condition that shouldn't keep us + from copying out any results... + */ + + fserror = VOP_SEARCHFS(vp, + searchparams1, + searchparams2, + &searchblock.searchattrs, + searchblock.maxmatches, + &searchblock.timelimit, + returnattrs, + &nummatches, + uap->scriptcode, + uap->options, + &auio, + state); + +saveandexit: + + vput(vp); + + /* Now copy out the stuff that needs copying out. That means the number of matches, the + search state. Everything was already put into he return buffer by the vop call. */ + + if (error = copyout((caddr_t) state, (caddr_t) uap->state, sizeof(struct searchstate))) + goto freeandexit; + + if (error = copyout((caddr_t) &nummatches, (caddr_t) uap->nummatches, sizeof(u_long))) + goto freeandexit; + + error = fserror; + +freeandexit: + + FREE(searchparams1,M_TEMP); + + return(error); + + +} /* end of searchfs system call */ + + +/* + * Make a filesystem-specific control call: + */ +struct fsctl_args { + const char *path; /* pathname of the target object */ + u_long cmd; /* cmd (also encodes size/direction of arguments a la ioctl) */ + caddr_t data; /* pointer to argument buffer */ + u_long options; /* options for fsctl processing */ +}; +/* ARGSUSED */ +int +fsctl (p,uap,retval) + struct proc *p; + struct fsctl_args *uap; + register_t *retval; + +{ + int error; + struct nameidata nd; + u_long nameiflags; + u_long cmd = uap->cmd; + register u_int size; +#define STK_PARAMS 128 + char stkbuf[STK_PARAMS]; + caddr_t data, memp; + + size = IOCPARM_LEN(cmd); + if (size > IOCPARM_MAX) return (EINVAL); + + memp = NULL; + if (size > sizeof (stkbuf)) { + if ((memp = (caddr_t)kalloc(size)) == 0) return ENOMEM; + data = memp; + } else { + data = stkbuf; + }; + + if (cmd & IOC_IN) { + if (size) { + error = copyin(uap->data, data, (u_int)size); + if (error) goto FSCtl_Exit; + } else { + *(caddr_t *)data = uap->data; + }; + } else if ((cmd & IOC_OUT) && size) { + /* + * Zero the buffer so the user always + * gets back something deterministic. + */ + bzero(data, size); + } else if (cmd & IOC_VOID) + *(caddr_t *)data = uap->data; + + /* Get the vnode for the file we are getting info on: */ + nameiflags = LOCKLEAF; + if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + NDINIT(&nd, LOOKUP, nameiflags, UIO_USERSPACE, (char *)uap->path, p); + if (error = namei(&nd)) goto FSCtl_Exit; + + /* Invoke the filesystem-specific code */ + error = VOP_IOCTL(nd.ni_vp, IOCBASECMD(cmd), data, uap->options, p->p_ucred, p); + + vput(nd.ni_vp); + + /* + * Copy any data to user, size was + * already set and checked above. + */ + if (error == 0 && (cmd & IOC_OUT) && size) error = copyout(data, uap->data, (u_int)size); + +FSCtl_Exit: + if (memp) kfree(memp, size); + + return error; +} +/* end of fsctl system call */ diff --git a/bsd/vfs/vfs_utfconv.c b/bsd/vfs/vfs_utfconv.c new file mode 100644 index 000000000..44f726355 --- /dev/null +++ b/bsd/vfs/vfs_utfconv.c @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + + +/* + * UTF-8 (UCS Transformation Format) + * + * The following subset of UTF-8 is used to encode UCS-2 filenames. It + * requires a maximum of three 3 bytes per UCS-2 character. Only the + * shortest encoding required to represent the significant UCS-2 bits + * is legal. + * + * UTF-8 Multibyte Codes + * + * Bytes Bits UCS-2 Min UCS-2 Max UTF-8 Byte Sequence (binary) + * ------------------------------------------------------------------- + * 1 7 0x0000 0x007F 0xxxxxxx + * 2 11 0x0080 0x07FF 110xxxxx 10xxxxxx + * 3 16 0x0800 0xFFFF 1110xxxx 10xxxxxx 10xxxxxx + * ------------------------------------------------------------------- + */ + + +#define UCS_TO_UTF_LEN(c) ((c) < 0x0080 ? 1 : ((c) < 0x0800 ? 2 : 3)) + + +static u_int16_t ucs_decompose __P((u_int16_t, u_int16_t *)); + + +/* + * utf8_encodelen - Calculates the UTF-8 encoding length for a UCS-2 filename + * + * NOTES: + * If '/' chars are allowed on disk then an alternate + * (replacement) char must be provided in altslash. + * + * input flags: + * UTF_REVERSE_ENDIAN: UCS-2 byteorder is opposite current runtime + */ +size_t +utf8_encodelen(ucsp, ucslen, altslash, flags) + const u_int16_t * ucsp; + size_t ucslen; + u_int16_t altslash; + int flags; +{ + u_int16_t ucs_ch; + int charcnt; + int swapbytes = (flags & UTF_REVERSE_ENDIAN); + size_t len; + + charcnt = ucslen / 2; + len = 0; + + while (charcnt-- > 0) { + ucs_ch = *ucsp++; + + if (swapbytes) + ucs_ch = NXSwapShort(ucs_ch); + if (altslash && ucs_ch == '/') + ucs_ch = altslash; + if (ucs_ch == '\0') + ucs_ch = 0xc080; + + len += UCS_TO_UTF_LEN(ucs_ch); + } + + return (len); +} + + +/* + * utf8_encodestr - Encodes a UCS-2 (Unicode) string to UTF-8 + * + * NOTES: + * The resulting UTF-8 string is not null terminated. + * + * If '/' chars are allowed on disk then an alternate + * (replacement) char must be provided in altslash. + * + * input flags: + * UTF_REVERSE_ENDIAN: UCS-2 byteorder is opposite current runtime + * UTF_NO_NULL_TERM: don't add NULL termination to UTF-8 output + */ +int utf8_encodestr(ucsp, ucslen, utf8p, utf8len, buflen, altslash, flags) + const u_int16_t * ucsp; + size_t ucslen; + u_int8_t * utf8p; + size_t * utf8len; + size_t buflen; + u_int16_t altslash; + int flags; +{ + u_int8_t * bufstart; + u_int8_t * bufend; + u_int16_t ucs_ch; + int charcnt; + int swapbytes = (flags & UTF_REVERSE_ENDIAN); + int nullterm = ((flags & UTF_NO_NULL_TERM) == 0); + int result = 0; + + bufstart = utf8p; + bufend = bufstart + buflen; + if (nullterm) + --bufend; + charcnt = ucslen / 2; + + while (charcnt-- > 0) { + ucs_ch = *ucsp++; + + if (swapbytes) + ucs_ch = NXSwapShort(ucs_ch); + if (altslash && ucs_ch == '/') + ucs_ch = altslash; + + if ((ucs_ch < 0x0080) && (ucs_ch != '\0')) { + if (utf8p >= bufend) { + result = ENAMETOOLONG; + break; + } + *utf8p++ = ucs_ch; + + } else if (ucs_ch < 0x800) { + if ((utf8p + 1) >= bufend) { + result = ENAMETOOLONG; + break; + } + /* NOTE: NULL maps to 0xC080 */ + *utf8p++ = (ucs_ch >> 6) | 0xc0; + *utf8p++ = (ucs_ch & 0x3f) | 0x80; + + } else { + if ((utf8p + 2) >= bufend) { + result = ENAMETOOLONG; + break; + } + *utf8p++ = (ucs_ch >> 12) | 0xe0; + *utf8p++ = ((ucs_ch >> 6) & 0x3f) | 0x80; + *utf8p++ = ((ucs_ch) & 0x3f) | 0x80; + } + } + + *utf8len = utf8p - bufstart; + if (nullterm) + *utf8p++ = '\0'; + + return (result); +} + + +/* + * utf8_decodestr - Decodes a UTF-8 string back to UCS-2 (Unicode) + * + * NOTES: + * The input UTF-8 string does not need to be null terminated + * if utf8len is set. + * + * If '/' chars are allowed on disk then an alternate + * (replacement) char must be provided in altslash. + * + * input flags: + * UTF_REV_ENDIAN: UCS-2 byteorder is oposite current runtime + * UTF_DECOMPOSED: UCS-2 output string must be fully decompsed + */ +int +utf8_decodestr(utf8p, utf8len, ucsp, ucslen, buflen, altslash, flags) + const u_int8_t* utf8p; + size_t utf8len; + u_int16_t* ucsp; + size_t *ucslen; + size_t buflen; + u_int16_t altslash; + int flags; +{ + u_int16_t* bufstart; + u_int16_t* bufend; + u_int16_t ucs_ch; + u_int8_t byte; + int result = 0; + int decompose, swapbytes; + + decompose = (flags & UTF_DECOMPOSED); + swapbytes = (flags & UTF_REVERSE_ENDIAN); + + bufstart = ucsp; + bufend = (u_int16_t *)((u_int8_t *)ucsp + buflen); + + while (utf8len-- > 0 && (byte = *utf8p++) != '\0') { + if (ucsp >= bufend) { + result = ENAMETOOLONG; + goto stop; + } + + /* check for ascii */ + if (byte < 0x80) { + ucs_ch = byte; + } else { + switch (byte & 0xf0) { + /* 2 byte sequence*/ + case 0xc0: + case 0xd0: + /* extract bits 6 - 10 from first byte */ + ucs_ch = (byte & 0x1F) << 6; + if ((ucs_ch < 0x0080) && (*utf8p != 0x80)) { + result = EINVAL; /* seq not minimal */ + goto stop; + } + break; + /* 3 byte sequence*/ + case 0xe0: + /* extract bits 12 - 15 from first byte */ + ucs_ch = (byte & 0x0F) << 6; + + /* extract bits 6 - 11 from second byte */ + if (((byte = *utf8p++) & 0xc0) != 0x80) { + result = EINVAL; + goto stop; + } + utf8len--; + + ucs_ch += (byte & 0x3F); + ucs_ch <<= 6; + + if (ucs_ch < 0x0800) { + result = EINVAL; /* seq not minimal */ + goto stop; + } + break; + default: + result = EINVAL; + goto stop; + } + + /* extract bits 0 - 5 from final byte */ + if (((byte = *utf8p++) & 0xc0) != 0x80) { + result = EINVAL; + goto stop; + } + utf8len--; + ucs_ch += (byte & 0x3F); + + if (decompose) { + u_int16_t comb_ch; + + ucs_ch = ucs_decompose(ucs_ch, &comb_ch); + + if (comb_ch) { + if (swapbytes) + *ucsp++ = NXSwapShort(ucs_ch); + else + *ucsp++ = ucs_ch; + + if (ucsp >= bufend) { + result = ENAMETOOLONG; + goto stop; + } + + ucs_ch = comb_ch; + } + } + } + + if (ucs_ch == altslash) + ucs_ch = '/'; + if (swapbytes) + ucs_ch = NXSwapShort(ucs_ch); + + *ucsp++ = ucs_ch; + } +stop: + *ucslen = (u_int8_t*)ucsp - (u_int8_t*)bufstart; + + return (result); +} + + +/* + * Lookup tables for Unicode chars 0x00C0 thru 0x00FF + * primary_char yields first decomposed char. If this + * char is an alpha char then get the combining char + * from the combining_char table and add 0x0300 to it. + */ + +static unsigned char primary_char[64] = { + 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0xC6, 0x43, + + 0x45, 0x45, 0x45, 0x45, 0x49, 0x49, 0x49, 0x49, + + 0xD0, 0x4E, 0x4F, 0x4F, 0x4F, 0x4F, 0x4F, 0xD7, + + 0xD8, 0x55, 0x55, 0x55, 0x55, 0x59, 0xDE, 0xDF, + + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0xE6, 0x63, + + 0x65, 0x65, 0x65, 0x65, 0x69, 0x69, 0x69, 0x69, + + 0xF0, 0x6E, 0x6F, 0x6F, 0x6F, 0x6F, 0x6F, 0xF7, + + 0xF8, 0x75, 0x75, 0x75, 0x75, 0x79, 0xFE, 0x79, +}; + +static unsigned char combining_char[64] = { + 0x00, 0x01, 0x02, 0x03, 0x08, 0x0A, 0xFF, 0x27, + + 0x00, 0x01, 0x02, 0x08, 0x00, 0x01, 0x02, 0x08, + + 0xFF, 0x03, 0x00, 0x01, 0x02, 0x03, 0x08, 0xFF, + + 0xFF, 0x00, 0x01, 0x02, 0x08, 0x01, 0xFF, 0xFF, + + 0x00, 0x01, 0x02, 0x03, 0x08, 0x0A, 0xFF, 0x27, + + 0x00, 0x01, 0x02, 0x08, 0x00, 0x01, 0x02, 0x08, + + 0xFF, 0x03, 0x00, 0x01, 0x02, 0x03, 0x08, 0xFF, + + 0xFF, 0x00, 0x01, 0x02, 0x08, 0x01, 0xFF, 0x08 +}; + + +/* CJK codepoints 0x3000 ~ 0x30FF */ +static const unsigned long __CJKDecompBitmap[] = { + 0x00000000, 0x00000000, 0x000AAAAA, 0xA540DB6C, /* 0x3000 */ + 0x00000802, 0x000AAAAA, 0xA540DB6C, 0x000009E2, /* 0x3080 */ +}; +#define IS_DECOMPOSABLE(table,unicodeVal) \ + (table[(unicodeVal) / 32] & (1 << (31 - ((unicodeVal) % 32)))) + +/* + * ucs_decompose - decompose a composed UCS-2 char + * + * Composed Unicode characters are forbidden on + * HFS Plus volumes. ucs_decompose will convert a + * composed character into its correct decomposed + * sequence. + * + * Currently only MacRoman and MacJapanese chars + * are handled. Other composed characters are + * passed unchanged. + */ +static u_int16_t +ucs_decompose(register u_int16_t ch, u_int16_t *cmb) +{ + u_int16_t base; + + *cmb = 0; + + if ((ch <= 0x00FF) && (ch >= 0x00C0)) { + ch -= 0x00C0; + + base = (u_int16_t) primary_char[ch]; + + if (base <= 'z') { + *cmb = (u_int16_t)0x0300 + (u_int16_t)combining_char[ch]; + } + } else if ((ch > 0x3000) && (ch < 0x3100) && + IS_DECOMPOSABLE(__CJKDecompBitmap, ch - 0x3000)) { + + /* Handle HIRAGANA LETTERs */ + switch(ch) { + case 0x3071: base = 0x306F; *cmb = 0x309A; break; /* PA */ + case 0x3074: base = 0x3072; *cmb = 0x309A; break; /* PI */ + case 0x3077: base = 0x3075; *cmb = 0x309A; break; /* PU */ + case 0x307A: base = 0x3078; *cmb = 0x309A; break; /* PE */ + + case 0x307D: base = 0x307B; *cmb = 0x309A; break; /* PO */ + case 0x3094: base = 0x3046; *cmb = 0x3099; break; /* VU */ + case 0x30D1: base = 0x30CF; *cmb = 0x309A; break; /* PA */ + case 0x30D4: base = 0x30D2; *cmb = 0x309A; break; /* PI */ + + case 0x30D7: base = 0x30D5; *cmb = 0x309A; break; /* PU */ + case 0x30DA: base = 0x30D8; *cmb = 0x309A; break; /* PE */ + case 0x30DD: base = 0x30DB; *cmb = 0x309A; break; /* PO */ + case 0x30F4: base = 0x30A6; *cmb = 0x3099; break; /* VU */ + + case 0x30F7: base = 0x30EF; *cmb = 0x3099; break; /* VA */ + case 0x30F8: base = 0x30F0; *cmb = 0x3099; break; /* VI */ + case 0x30F9: base = 0x30F1; *cmb = 0x3099; break; /* VE */ + case 0x30FA: base = 0x30F2; *cmb = 0x3099; break; /* VO */ + + default: + /* the rest (41 of them) have a simple conversion */ + base = ch - 1; + *cmb = 0x3099; + } + } else { + base = ch; + } + + return (base); +} + diff --git a/bsd/vfs/vfs_vnops.c b/bsd/vfs/vfs_vnops.c new file mode 100644 index 000000000..9c7b9d919 --- /dev/null +++ b/bsd/vfs/vfs_vnops.c @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1982, 1986, 1989, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)vfs_vnops.c 8.14 (Berkeley) 6/15/95 + * + * History + * 10-20-1997 Umesh Vaishampayan + * Fixed the count to be off_t rather than int. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct fileops vnops = + { vn_read, vn_write, vn_ioctl, vn_select, vn_closefile }; + +/* + * Common code for vnode open operations. + * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. + */ +vn_open(ndp, fmode, cmode) + register struct nameidata *ndp; + int fmode, cmode; +{ + register struct vnode *vp; + register struct proc *p = ndp->ni_cnd.cn_proc; + register struct ucred *cred = p->p_ucred; + struct vattr vat; + struct vattr *vap = &vat; + int error; + + if (fmode & O_CREAT) { + ndp->ni_cnd.cn_nameiop = CREATE; + ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; + if ((fmode & O_EXCL) == 0) + ndp->ni_cnd.cn_flags |= FOLLOW; + if (error = namei(ndp)) + return (error); + if (ndp->ni_vp == NULL) { + VATTR_NULL(vap); + vap->va_type = VREG; + vap->va_mode = cmode; + if (fmode & O_EXCL) + vap->va_vaflags |= VA_EXCLUSIVE; + VOP_LEASE(ndp->ni_dvp, p, cred, LEASE_WRITE); + if (error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, + &ndp->ni_cnd, vap)) + return (error); + fmode &= ~O_TRUNC; + vp = ndp->ni_vp; + } else { + VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); + if (ndp->ni_dvp == ndp->ni_vp) + vrele(ndp->ni_dvp); + else + vput(ndp->ni_dvp); + ndp->ni_dvp = NULL; + vp = ndp->ni_vp; + if (fmode & O_EXCL) { + error = EEXIST; + goto bad; + } + fmode &= ~O_CREAT; + } + } else { + ndp->ni_cnd.cn_nameiop = LOOKUP; + ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF; + if (error = namei(ndp)) + return (error); + vp = ndp->ni_vp; + } + if (vp->v_type == VSOCK) { + error = EOPNOTSUPP; + goto bad; + } + if ((fmode & O_CREAT) == 0) { + if (fmode & FREAD && fmode & (FWRITE | O_TRUNC)) { + int err = 0; + if (vp->v_type == VDIR) + err = EISDIR; + else + err = vn_writechk(vp); + if (err && !(error = VOP_ACCESS(vp, VREAD, cred, p))) + error = err; + if (error || (error = VOP_ACCESS(vp, VREAD|VWRITE, + cred, p))) + goto bad; + } else if (fmode & FREAD) { + if ((error = VOP_ACCESS(vp, VREAD, cred, p))) + goto bad; + } else if (fmode & (FWRITE | O_TRUNC)) { + if (vp->v_type == VDIR) { + error = EISDIR; + goto bad; + } + if ((error = vn_writechk(vp)) || + (error = VOP_ACCESS(vp, VWRITE, cred, p))) + goto bad; + } + } + if (fmode & O_TRUNC) { + VOP_UNLOCK(vp, 0, p); /* XXX */ + VOP_LEASE(vp, p, cred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* XXX */ + VATTR_NULL(vap); + vap->va_size = 0; + if (error = VOP_SETATTR(vp, vap, cred, p)) + goto bad; + } + if (error = VOP_OPEN(vp, fmode, cred, p)) + goto bad; + if (UBCINFOMISSING(vp)) + panic("vn_open: ubc_info_init"); + if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) + panic("vn_open: hold"); + if (fmode & FWRITE) + if (++vp->v_writecount <= 0) + panic("vn_open: v_writecount"); + return (0); +bad: + vput(vp); + return (error); +} + +/* + * Check for write permissions on the specified vnode. + * Prototype text segments cannot be written. + */ +vn_writechk(vp) + register struct vnode *vp; +{ + + /* + * If there's shared text associated with + * the vnode, try to free it up once. If + * we fail, we can't allow writing. + */ +#if 0 + /* XXXXX Not sure we need this */ + if (vp->v_flag & VTEXT) + return (ETXTBSY); +#endif /* 0 */ + return (0); +} + +/* + * Vnode close call + */ +vn_close(vp, flags, cred, p) + register struct vnode *vp; + int flags; + struct ucred *cred; + struct proc *p; +{ + int error; + vm_map_t user_map; + vm_offset_t addr, addr1; + vm_size_t size, pageoff; + + if (flags & FWRITE) + vp->v_writecount--; + error = VOP_CLOSE(vp, flags, cred, p); + ubc_rele(vp); + vrele(vp); + return (error); +} + +/* + * Package up an I/O request on a vnode into a uio and do it. + */ +vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, p) + enum uio_rw rw; + struct vnode *vp; + caddr_t base; + int len; + off_t offset; + enum uio_seg segflg; + int ioflg; + struct ucred *cred; + int *aresid; + struct proc *p; +{ + struct uio auio; + struct iovec aiov; + int error=0; + + /* FIXME XXX */ + if ((ioflg & IO_NODELOCKED) == 0) + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + aiov.iov_base = base; + aiov.iov_len = len; + auio.uio_resid = len; + auio.uio_offset = offset; + auio.uio_segflg = segflg; + auio.uio_rw = rw; + auio.uio_procp = p; + + if (rw == UIO_READ) + error = VOP_READ(vp, &auio, ioflg, cred); + else + error = VOP_WRITE(vp, &auio, ioflg, cred); + + if (aresid) + *aresid = auio.uio_resid; + else + if (auio.uio_resid && error == 0) + error = EIO; + if ((ioflg & IO_NODELOCKED) == 0) + VOP_UNLOCK(vp, 0, p); + return (error); +} + +/* + * File table vnode read routine. + */ +vn_read(fp, uio, cred) + struct file *fp; + struct uio *uio; + struct ucred *cred; +{ + struct vnode *vp = (struct vnode *)fp->f_data; + struct proc *p = uio->uio_procp; + int error; + off_t count; + + VOP_LEASE(vp, p, cred, LEASE_READ); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + uio->uio_offset = fp->f_offset; + count = uio->uio_resid; + + error = VOP_READ(vp, uio, (fp->f_flag & FNONBLOCK) ? IO_NDELAY : 0, cred); + + fp->f_offset += count - uio->uio_resid; + VOP_UNLOCK(vp, 0, p); + return (error); +} + + +/* + * File table vnode write routine. + */ +vn_write(fp, uio, cred) + struct file *fp; + struct uio *uio; + struct ucred *cred; +{ + struct vnode *vp = (struct vnode *)fp->f_data; + struct proc *p = uio->uio_procp; + int error, ioflag = IO_UNIT; + off_t count; + + if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) + ioflag |= IO_APPEND; + if (fp->f_flag & FNONBLOCK) + ioflag |= IO_NDELAY; + if ((fp->f_flag & O_FSYNC) || + (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) + ioflag |= IO_SYNC; + VOP_LEASE(vp, p, cred, LEASE_WRITE); + vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + uio->uio_offset = fp->f_offset; + count = uio->uio_resid; + + error = VOP_WRITE(vp, uio, ioflag, cred); + + if (ioflag & IO_APPEND) + fp->f_offset = uio->uio_offset; + else + fp->f_offset += count - uio->uio_resid; + /* + * Set the credentials on successful writes + */ + if ((error == 0) && (vp->v_tag == VT_NFS) && (UBCINFOEXISTS(vp))) { + ubc_setcred(vp, p); + } + + VOP_UNLOCK(vp, 0, p); + return (error); +} + +/* + * File table vnode stat routine. + */ +vn_stat(vp, sb, p) + struct vnode *vp; + register struct stat *sb; + struct proc *p; +{ + struct vattr vattr; + register struct vattr *vap; + int error; + u_short mode; + + vap = &vattr; + error = VOP_GETATTR(vp, vap, p->p_ucred, p); + if (error) + return (error); + /* + * Copy from vattr table + */ + sb->st_dev = vap->va_fsid; + sb->st_ino = vap->va_fileid; + mode = vap->va_mode; + switch (vp->v_type) { + case VREG: + mode |= S_IFREG; + break; + case VDIR: + mode |= S_IFDIR; + break; + case VBLK: + mode |= S_IFBLK; + break; + case VCHR: + mode |= S_IFCHR; + break; + case VLNK: + mode |= S_IFLNK; + break; + case VSOCK: + mode |= S_IFSOCK; + break; + case VFIFO: + mode |= S_IFIFO; + break; + default: + return (EBADF); + }; + sb->st_mode = mode; + sb->st_nlink = vap->va_nlink; + sb->st_uid = vap->va_uid; + sb->st_gid = vap->va_gid; + sb->st_rdev = vap->va_rdev; + sb->st_size = vap->va_size; + sb->st_atimespec = vap->va_atime; + sb->st_mtimespec = vap->va_mtime; + sb->st_ctimespec = vap->va_ctime; + sb->st_blksize = vap->va_blocksize; + sb->st_flags = vap->va_flags; + /* Do not give the generation number out to unpriviledged users */ + if (suser(p->p_ucred, &p->p_acflag)) + sb->st_gen = 0; + else + sb->st_gen = vap->va_gen; + sb->st_blocks = vap->va_bytes / S_BLKSIZE; + return (0); +} + +/* + * File table vnode ioctl routine. + */ +vn_ioctl(fp, com, data, p) + struct file *fp; + u_long com; + caddr_t data; + struct proc *p; +{ + register struct vnode *vp = ((struct vnode *)fp->f_data); + struct vattr vattr; + int error; + + switch (vp->v_type) { + + case VREG: + case VDIR: + if (com == FIONREAD) { + if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) + return (error); + *(int *)data = vattr.va_size - fp->f_offset; + return (0); + } + if (com == FIONBIO || com == FIOASYNC) /* XXX */ + return (0); /* XXX */ + /* fall into ... */ + + default: + return (ENOTTY); + + case VFIFO: + case VCHR: + case VBLK: + error = VOP_IOCTL(vp, com, data, fp->f_flag, p->p_ucred, p); + if (error == 0 && com == TIOCSCTTY) { + if (p->p_session->s_ttyvp) + vrele(p->p_session->s_ttyvp); + p->p_session->s_ttyvp = vp; + VREF(vp); + } + return (error); + } +} + +/* + * File table vnode select routine. + */ +vn_select(fp, which, p) + struct file *fp; + int which; + struct proc *p; +{ + + return (VOP_SELECT(((struct vnode *)fp->f_data), which, fp->f_flag, + fp->f_cred, p)); +} + +/* + * Check that the vnode is still valid, and if so + * acquire requested lock. + */ +int +vn_lock(vp, flags, p) + struct vnode *vp; + int flags; + struct proc *p; +{ + int error; + + do { + if ((flags & LK_INTERLOCK) == 0) + simple_lock(&vp->v_interlock); + if (vp->v_flag & VXLOCK) { + while (vp->v_flag & VXLOCK) { + vp->v_flag |= VXWANT; + simple_unlock(&vp->v_interlock); + tsleep((caddr_t)vp, PINOD, "vn_lock", 0); + } + error = ENOENT; + } else { + error = VOP_LOCK(vp, flags | LK_INTERLOCK, p); + if (error == 0) + return (error); + } + flags &= ~LK_INTERLOCK; + } while (flags & LK_RETRY); + return (error); +} + +/* + * File table vnode close routine. + */ +vn_closefile(fp, p) + struct file *fp; + struct proc *p; +{ + + return (vn_close(((struct vnode *)fp->f_data), fp->f_flag, + fp->f_cred, p)); +} diff --git a/bsd/vfs/vnode_if.c b/bsd/vfs/vnode_if.c new file mode 100644 index 000000000..4d72b5069 --- /dev/null +++ b/bsd/vfs/vnode_if.c @@ -0,0 +1,1138 @@ + +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved + * Copyright (c) 1992, 1993, 1994, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + + +/* + * Warning: This file is generated automatically. + * (Modifications made here may easily be lost!) + * + * Created by the script: + * @(#)vnode_if.sh 8.7 (Berkeley) 5/11/95 + */ + + +#include +#include +#include +#include + +struct vnodeop_desc vop_default_desc = { + 0, + "default", + 0, + NULL, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + + +int vop_lookup_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_lookup_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_lookup_desc = { + 0, + "vop_lookup", + 0, + vop_lookup_vp_offsets, + VOPARG_OFFSETOF(struct vop_lookup_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_lookup_args, a_cnp), + NULL, +}; + +int vop_cachedlookup_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_cachedlookup_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_cachedlookup_desc = { + 0, + "vop_cachedlookup", + 0, + vop_cachedlookup_vp_offsets, + VOPARG_OFFSETOF(struct vop_cachedlookup_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_cachedlookup_args, a_cnp), + NULL, +}; + +int vop_create_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_create_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_create_desc = { + 0, + "vop_create", + 0 | VDESC_VP0_WILLRELE, + vop_create_vp_offsets, + VOPARG_OFFSETOF(struct vop_create_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_create_args, a_cnp), + NULL, +}; + +int vop_whiteout_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_whiteout_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_whiteout_desc = { + 0, + "vop_whiteout", + 0 | VDESC_VP0_WILLRELE, + vop_whiteout_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_whiteout_args, a_cnp), + NULL, +}; + +int vop_mknod_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_mknod_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_mknod_desc = { + 0, + "vop_mknod", + 0 | VDESC_VP0_WILLRELE | VDESC_VPP_WILLRELE, + vop_mknod_vp_offsets, + VOPARG_OFFSETOF(struct vop_mknod_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_mknod_args, a_cnp), + NULL, +}; + +int vop_mkcomplex_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_mkcomplex_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_mkcomplex_desc = { + 0, + "vop_mkcomplex", + 0 | VDESC_VP0_WILLRELE | VDESC_VPP_WILLRELE, + vop_mkcomplex_vp_offsets, + VOPARG_OFFSETOF(struct vop_mkcomplex_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_mkcomplex_args, a_cnp), + NULL, +}; + +int vop_open_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_open_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_open_desc = { + 0, + "vop_open", + 0, + vop_open_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_open_args, a_cred), + VOPARG_OFFSETOF(struct vop_open_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_close_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_close_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_close_desc = { + 0, + "vop_close", + 0, + vop_close_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_close_args, a_cred), + VOPARG_OFFSETOF(struct vop_close_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_access_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_access_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_access_desc = { + 0, + "vop_access", + 0, + vop_access_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_access_args, a_cred), + VOPARG_OFFSETOF(struct vop_access_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_getattr_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_getattr_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_getattr_desc = { + 0, + "vop_getattr", + 0, + vop_getattr_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_getattr_args, a_cred), + VOPARG_OFFSETOF(struct vop_getattr_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_setattr_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_setattr_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_setattr_desc = { + 0, + "vop_setattr", + 0, + vop_setattr_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_setattr_args, a_cred), + VOPARG_OFFSETOF(struct vop_setattr_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_getattrlist_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_getattrlist_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_getattrlist_desc = { + 0, + "vop_getattrlist", + 0, + vop_getattrlist_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_getattrlist_args, a_cred), + VOPARG_OFFSETOF(struct vop_getattrlist_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_setattrlist_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_setattrlist_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_setattrlist_desc = { + 0, + "vop_setattrlist", + 0, + vop_setattrlist_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_setattrlist_args, a_cred), + VOPARG_OFFSETOF(struct vop_setattrlist_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_read_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_read_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_read_desc = { + 0, + "vop_read", + 0, + vop_read_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_read_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_write_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_write_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_write_desc = { + 0, + "vop_write", + 0, + vop_write_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_write_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_lease_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_lease_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_lease_desc = { + 0, + "vop_lease", + 0, + vop_lease_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_lease_args, a_cred), + VOPARG_OFFSETOF(struct vop_lease_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_ioctl_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_ioctl_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_ioctl_desc = { + 0, + "vop_ioctl", + 0, + vop_ioctl_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_ioctl_args, a_cred), + VOPARG_OFFSETOF(struct vop_ioctl_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_select_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_select_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_select_desc = { + 0, + "vop_select", + 0, + vop_select_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_select_args, a_cred), + VOPARG_OFFSETOF(struct vop_select_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_exchange_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_exchange_args,a_fvp), + VOPARG_OFFSETOF(struct vop_exchange_args,a_tvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_exchange_desc = { + 0, + "vop_exchange", + 0, + vop_exchange_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_exchange_args, a_cred), + VOPARG_OFFSETOF(struct vop_exchange_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_revoke_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_revoke_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_revoke_desc = { + 0, + "vop_revoke", + 0, + vop_revoke_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_mmap_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_mmap_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_mmap_desc = { + 0, + "vop_mmap", + 0, + vop_mmap_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_mmap_args, a_cred), + VOPARG_OFFSETOF(struct vop_mmap_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_fsync_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_fsync_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_fsync_desc = { + 0, + "vop_fsync", + 0, + vop_fsync_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_fsync_args, a_cred), + VOPARG_OFFSETOF(struct vop_fsync_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_seek_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_seek_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_seek_desc = { + 0, + "vop_seek", + 0, + vop_seek_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_seek_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_remove_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_remove_args,a_dvp), + VOPARG_OFFSETOF(struct vop_remove_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_remove_desc = { + 0, + "vop_remove", + 0 | VDESC_VP0_WILLRELE | VDESC_VP1_WILLRELE, + vop_remove_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_remove_args, a_cnp), + NULL, +}; + +int vop_link_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_link_args,a_vp), + VOPARG_OFFSETOF(struct vop_link_args,a_tdvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_link_desc = { + 0, + "vop_link", + 0 | VDESC_VP0_WILLRELE, + vop_link_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_link_args, a_cnp), + NULL, +}; + +int vop_rename_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_rename_args,a_fdvp), + VOPARG_OFFSETOF(struct vop_rename_args,a_fvp), + VOPARG_OFFSETOF(struct vop_rename_args,a_tdvp), + VOPARG_OFFSETOF(struct vop_rename_args,a_tvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_rename_desc = { + 0, + "vop_rename", + 0 | VDESC_VP0_WILLRELE | VDESC_VP1_WILLRELE | VDESC_VP2_WILLRELE | VDESC_VP3_WILLRELE, + vop_rename_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_rename_args, a_fcnp), + NULL, +}; + +int vop_mkdir_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_mkdir_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_mkdir_desc = { + 0, + "vop_mkdir", + 0 | VDESC_VP0_WILLRELE, + vop_mkdir_vp_offsets, + VOPARG_OFFSETOF(struct vop_mkdir_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_mkdir_args, a_cnp), + NULL, +}; + +int vop_rmdir_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_rmdir_args,a_dvp), + VOPARG_OFFSETOF(struct vop_rmdir_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_rmdir_desc = { + 0, + "vop_rmdir", + 0 | VDESC_VP0_WILLRELE | VDESC_VP1_WILLRELE, + vop_rmdir_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_rmdir_args, a_cnp), + NULL, +}; + +int vop_symlink_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_symlink_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_symlink_desc = { + 0, + "vop_symlink", + 0 | VDESC_VP0_WILLRELE | VDESC_VPP_WILLRELE, + vop_symlink_vp_offsets, + VOPARG_OFFSETOF(struct vop_symlink_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_symlink_args, a_cnp), + NULL, +}; + +int vop_readdir_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_readdir_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_readdir_desc = { + 0, + "vop_readdir", + 0, + vop_readdir_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_readdir_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_readdirattr_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_readdirattr_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_readdirattr_desc = { + 0, + "vop_readdirattr", + 0, + vop_readdirattr_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_readdirattr_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_readlink_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_readlink_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_readlink_desc = { + 0, + "vop_readlink", + 0, + vop_readlink_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_readlink_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_abortop_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_abortop_args,a_dvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_abortop_desc = { + 0, + "vop_abortop", + 0, + vop_abortop_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_abortop_args, a_cnp), + NULL, +}; + +int vop_inactive_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_inactive_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_inactive_desc = { + 0, + "vop_inactive", + 0, + vop_inactive_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_inactive_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_reclaim_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_reclaim_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_reclaim_desc = { + 0, + "vop_reclaim", + 0, + vop_reclaim_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_reclaim_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_lock_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_lock_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_lock_desc = { + 0, + "vop_lock", + 0, + vop_lock_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_lock_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_unlock_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_unlock_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_unlock_desc = { + 0, + "vop_unlock", + 0, + vop_unlock_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_unlock_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_bmap_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_bmap_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_bmap_desc = { + 0, + "vop_bmap", + 0, + vop_bmap_vp_offsets, + VOPARG_OFFSETOF(struct vop_bmap_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_print_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_print_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_print_desc = { + 0, + "vop_print", + 0, + vop_print_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_islocked_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_islocked_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_islocked_desc = { + 0, + "vop_islocked", + 0, + vop_islocked_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_pathconf_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_pathconf_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_pathconf_desc = { + 0, + "vop_pathconf", + 0, + vop_pathconf_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_advlock_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_advlock_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_advlock_desc = { + 0, + "vop_advlock", + 0, + vop_advlock_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_blkatoff_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_blkatoff_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_blkatoff_desc = { + 0, + "vop_blkatoff", + 0, + vop_blkatoff_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_valloc_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_valloc_args,a_pvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_valloc_desc = { + 0, + "vop_valloc", + 0, + vop_valloc_vp_offsets, + VOPARG_OFFSETOF(struct vop_valloc_args, a_vpp), + VOPARG_OFFSETOF(struct vop_valloc_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_reallocblks_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_reallocblks_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_reallocblks_desc = { + 0, + "vop_reallocblks", + 0, + vop_reallocblks_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_vfree_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_vfree_args,a_pvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_vfree_desc = { + 0, + "vop_vfree", + 0, + vop_vfree_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_truncate_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_truncate_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_truncate_desc = { + 0, + "vop_truncate", + 0, + vop_truncate_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_truncate_args, a_cred), + VOPARG_OFFSETOF(struct vop_truncate_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_allocate_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_allocate_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_allocate_desc = { + 0, + "vop_allocate", + 0, + vop_allocate_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_allocate_args, a_cred), + VOPARG_OFFSETOF(struct vop_allocate_args, a_p), + VDESC_NO_OFFSET, + NULL, +}; + +int vop_update_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_update_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_update_desc = { + 0, + "vop_update", + 0, + vop_update_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_pgrd_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_pgrd_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_pgrd_desc = { + 0, + "vop_pgrd", + 0, + vop_pgrd_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_pgrd_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_pgwr_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_pgwr_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_pgwr_desc = { + 0, + "vop_pgwr", + 0, + vop_pgwr_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_pgwr_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_pagein_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_pagein_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_pagein_desc = { + 0, + "vop_pagein", + 0, + vop_pagein_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_pagein_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_pageout_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_pageout_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_pageout_desc = { + 0, + "vop_pageout", + 0, + vop_pageout_vp_offsets, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_pageout_args, a_cred), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_devblocksize_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_devblocksize_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_devblocksize_desc = { + 0, + "vop_devblocksize", + 0, + vop_devblocksize_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_searchfs_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_searchfs_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_searchfs_desc = { + 0, + "vop_searchfs", + 0, + vop_searchfs_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_copyfile_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_copyfile_args,a_fvp), + VOPARG_OFFSETOF(struct vop_copyfile_args,a_tdvp), + VOPARG_OFFSETOF(struct vop_copyfile_args,a_tvp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_copyfile_desc = { + 0, + "vop_copyfile", + 0 | VDESC_VP0_WILLRELE | VDESC_VP1_WILLRELE | VDESC_VP2_WILLRELE, + vop_copyfile_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vop_copyfile_args, a_tcnp), + NULL, +}; + +int vop_blktooff_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_blktooff_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_blktooff_desc = { + 0, + "vop_blktooff", + 0, + vop_blktooff_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_offtoblk_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_offtoblk_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_offtoblk_desc = { + 0, + "vop_offtoblk", + 0, + vop_offtoblk_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_cmap_vp_offsets[] = { + VOPARG_OFFSETOF(struct vop_cmap_args,a_vp), + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_cmap_desc = { + 0, + "vop_cmap", + 0, + vop_cmap_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +/* Special cases: */ + +int vop_strategy_vp_offsets[] = { + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_strategy_desc = { + 0, + "vop_strategy", + 0, + vop_strategy_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +int vop_bwrite_vp_offsets[] = { + VDESC_NO_OFFSET +}; +struct vnodeop_desc vop_bwrite_desc = { + 0, + "vop_bwrite", + 0, + vop_bwrite_vp_offsets, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; + +/* End of special cases. */ + +struct vnodeop_desc *vfs_op_descs[] = { + &vop_default_desc, /* MUST BE FIRST */ + &vop_strategy_desc, /* XXX: SPECIAL CASE */ + &vop_bwrite_desc, /* XXX: SPECIAL CASE */ + + &vop_lookup_desc, + &vop_cachedlookup_desc, + &vop_create_desc, + &vop_whiteout_desc, + &vop_mknod_desc, + &vop_mkcomplex_desc, + &vop_open_desc, + &vop_close_desc, + &vop_access_desc, + &vop_getattr_desc, + &vop_setattr_desc, + &vop_getattrlist_desc, + &vop_setattrlist_desc, + &vop_read_desc, + &vop_write_desc, + &vop_lease_desc, + &vop_ioctl_desc, + &vop_select_desc, + &vop_exchange_desc, + &vop_revoke_desc, + &vop_mmap_desc, + &vop_fsync_desc, + &vop_seek_desc, + &vop_remove_desc, + &vop_link_desc, + &vop_rename_desc, + &vop_mkdir_desc, + &vop_rmdir_desc, + &vop_symlink_desc, + &vop_readdir_desc, + &vop_readdirattr_desc, + &vop_readlink_desc, + &vop_abortop_desc, + &vop_inactive_desc, + &vop_reclaim_desc, + &vop_lock_desc, + &vop_unlock_desc, + &vop_bmap_desc, + &vop_print_desc, + &vop_islocked_desc, + &vop_pathconf_desc, + &vop_advlock_desc, + &vop_blkatoff_desc, + &vop_valloc_desc, + &vop_reallocblks_desc, + &vop_vfree_desc, + &vop_truncate_desc, + &vop_allocate_desc, + &vop_update_desc, + &vop_pgrd_desc, + &vop_pgwr_desc, + &vop_pagein_desc, + &vop_pageout_desc, + &vop_devblocksize_desc, + &vop_searchfs_desc, + &vop_copyfile_desc, + &vop_blktooff_desc, + &vop_offtoblk_desc, + &vop_cmap_desc, + NULL +}; + diff --git a/bsd/vfs/vnode_if.sh b/bsd/vfs/vnode_if.sh new file mode 100644 index 000000000..5a721a17d --- /dev/null +++ b/bsd/vfs/vnode_if.sh @@ -0,0 +1,379 @@ +#!/bin/sh - +copyright=' +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved + * Copyright (c) 1992, 1993, 1994, 1995 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +' +SCRIPT_ID='@(#)vnode_if.sh 8.7 (Berkeley) 5/11/95' + +# Script to produce VFS front-end sugar. +# +# usage: vnode_if.sh srcfile +# (where srcfile is currently bsd/vfs/vnode_if.src) +# + +if [ $# -ne 1 ] ; then + echo 'usage: vnode_if.sh srcfile' + exit 1 +fi + +# Name of the source file. +src=$1 + +# Names of the created files. +out_c=vnode_if.c +out_h=vnode_if.h + +# Awk program (must support nawk extensions) +# Use "awk" at Berkeley, "nawk" or "gawk" elsewhere. +awk=${AWK:-awk} +#awk=${AWK:-gawk} + +# Does this awk have a "toupper" function? (i.e. is it GNU awk) +isgawk=`$awk 'BEGIN { print toupper("true"); exit; }' 2>/dev/null` + +# If this awk does not define "toupper" then define our own. +if [ "$isgawk" = TRUE ] ; then + # GNU awk provides it. + toupper= +else + # Provide our own toupper() + toupper=' +function toupper(str) { + _toupper_cmd = "echo "str" |tr a-z A-Z" + _toupper_cmd | getline _toupper_str; + close(_toupper_cmd); + return _toupper_str; +}' +fi + +# +# This is the common part of all awk programs that read $src +# This parses the input for one function into the arrays: +# argdir, argtype, argname, willrele +# and calls "doit()" to generate output for the function. +# +# Input to this parser is pre-processed slightly by sed +# so this awk parser doesn't have to work so hard. The +# changes done by the sed pre-processing step are: +# insert a space beween * and pointer name +# replace semicolons with spaces +# +sed_prep='s:\*\([^\*/]\):\* \1:g +s/;/ /' +awk_parser=' +# Comment line +/^#/ { next; } +# First line of description +/^vop_/ { + name=$1; + argc=0; + ubc=$3; + next; +} +# Last line of description +/^}/ { + doit(); + next; +} +# Middle lines of description +{ + argdir[argc] = $1; i=2; + if ($2 == "WILLRELE") { + willrele[argc] = 1; + i++; + } else + willrele[argc] = 0; + argtype[argc] = $i; i++; + while (i < NF) { + argtype[argc] = argtype[argc]" "$i; + i++; + } + argname[argc] = $i; + argc++; + next; +} +' + +# This is put after the copyright on each generated file. +warning=" +/* + * Warning: This file is generated automatically. + * (Modifications made here may easily be lost!) + * + * Created by the script: + * ${SCRIPT_ID} + */ +" + +# Get rid of ugly spaces +space_elim='s:\([^/]\*\) :\1:g' + +# +# Redirect stdout to the H file. +# +echo "$0: Creating $out_h" 1>&2 +exec > $out_h + +# Begin stuff +echo "$copyright" +echo "$warning" +echo ' +#ifndef _VNODE_IF_H_ +#define _VNODE_IF_H_ + +extern struct vnodeop_desc vop_default_desc; +' + +# Body stuff +# This awk program needs toupper() so define it if necessary. +sed -e "$sed_prep" $src | $awk "$toupper"' +function doit() { + # Declare arg struct, descriptor. + printf("\nstruct %s_args {\n", name); + printf("\tstruct vnodeop_desc * a_desc;\n"); + for (i=0; i\n#include \n"); + argc=1; + argtype[0]="struct buf *"; + argname[0]="bp"; + arg0special="->b_vp"; + name="vop_strategy"; + doit(); + name="vop_bwrite"; + doit(); +} +'"$awk_parser" | sed -e "$space_elim" + +# End stuff +echo ' +/* End of special cases. */ + +#endif /* !_VNODE_IF_H_ */' + +# +# Redirect stdout to the C file. +# +echo "$0: Creating $out_c" 1>&2 +exec > $out_c + +# Begin stuff +echo "$copyright" +echo "$warning" +echo ' +#include +#include +#include +#include + +struct vnodeop_desc vop_default_desc = { + 0, + "default", + 0, + NULL, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + NULL, +}; +' + +# Body stuff +sed -e "$sed_prep" $src | $awk ' +function do_offset(typematch) { + for (i=0; i +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * temporary support for delayed instantiation + * of default_pager + */ +int default_pager_init_flag = 0; + +struct bs_map bs_port_table[MAX_BACKING_STORE] = { + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}}; + +/* ###################################################### */ + + +#include + +/* + * Routine: macx_swapon + * Function: + * Syscall interface to add a file to backing store + */ +int +macx_swapon( + char *filename, + int flags, + long size, + long priority) +{ + struct vnode *vp = 0; + struct nameidata nd, *ndp; + struct proc *p = current_proc(); + pager_file_t pf; + register int error; + kern_return_t kr; + mach_port_t backing_store; + mach_port_t default_pager_port = MACH_PORT_NULL; + int i; + boolean_t funnel_state; + + struct vattr vattr; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + ndp = &nd; + + if ((error = suser(p->p_ucred, &p->p_acflag))) + goto swapon_bailout; + + unix_master(); + + if(default_pager_init_flag == 0) { + start_def_pager(NULL); + default_pager_init_flag = 1; + } + + /* + * Get a vnode for the paging area. + */ + NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + filename, p); + + if ((error = namei(ndp))) + goto swapon_bailout; + vp = ndp->ni_vp; + + if (vp->v_type != VREG) { + error = EINVAL; + VOP_UNLOCK(vp, 0, p); + goto swapon_bailout; + } + UBCINFOCHECK("macx_swapon", vp); + + if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) { + VOP_UNLOCK(vp, 0, p); + goto swapon_bailout; + } + + if (vattr.va_size < (u_quad_t)size) { + vattr_null(&vattr); + vattr.va_size = (u_quad_t)size; + error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); + if (error) { + VOP_UNLOCK(vp, 0, p); + goto swapon_bailout; + } + } + + /* add new backing store to list */ + i = 0; + while(bs_port_table[i].vp != 0) { + if(i == MAX_BACKING_STORE) + break; + i++; + } + if(i == MAX_BACKING_STORE) { + error = ENOMEM; + VOP_UNLOCK(vp, 0, p); + goto swapon_bailout; + } + + /* remember the vnode. This vnode has namei() reference */ + bs_port_table[i].vp = vp; + + /* + * Look to see if we are already paging to this file. + */ + /* make certain the copy send of kernel call will work */ + kr = host_default_memory_manager(host_priv_self(), &default_pager_port, 0); + if(kr != KERN_SUCCESS) { + error = EAGAIN; + VOP_UNLOCK(vp, 0, p); + bs_port_table[i].vp = 0; + goto swapon_bailout; + } + + kr = default_pager_backing_store_create(default_pager_port, + -1, /* default priority */ + 0, /* default cluster size */ + &backing_store); + if(kr != KERN_SUCCESS) { + error = ENOMEM; + VOP_UNLOCK(vp, 0, p); + bs_port_table[i].vp = 0; + goto swapon_bailout; + } + + /* + * NOTE: we are able to supply PAGE_SIZE here instead of + * an actual record size or block number because: + * a: we do not support offsets from the beginning of the + * file (allowing for non page size/record modulo offsets. + * b: because allow paging will be done modulo page size + */ + + VOP_UNLOCK(vp, 0, p); + kr = default_pager_add_file(backing_store, vp, PAGE_SIZE, + ((int)vattr.va_size)/PAGE_SIZE); + if(kr != KERN_SUCCESS) { + bs_port_table[i].vp = 0; + if(kr == KERN_INVALID_ARGUMENT) + error = EINVAL; + else + error = ENOMEM; + goto swapon_bailout; + } + bs_port_table[i].bs = (void *)backing_store; + error = 0; + if (!ubc_hold(vp)) + panic("macx_swapon: hold"); + + /* Mark this vnode as being used for swapfile */ + SET(vp->v_flag, VSWAP); + + /* + * take an extra reference on the vnode to keep + * vnreclaim() away from this vnode. + */ + VREF(vp); + + /* Hold on to the namei reference to the paging file vnode */ + vp = 0; + +swapon_bailout: + if (vp) { + vrele(vp); + } + unix_release(); + (void) thread_funnel_set(kernel_flock, FALSE); + return(error); +} + +/* + * Routine: macx_swapoff + * Function: + * Syscall interface to remove a file from backing store + */ +int +macx_swapoff( + char *filename, + int flags) +{ + kern_return_t kr; + mach_port_t backing_store; + + struct vnode *vp = 0; + struct nameidata nd, *ndp; + struct proc *p = current_proc(); + int i; + int error; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + backing_store = NULL; + ndp = &nd; + + if ((error = suser(p->p_ucred, &p->p_acflag))) + goto swapoff_bailout; + + unix_master(); + + /* + * Get the vnode for the paging area. + */ + NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, + filename, p); + + if ((error = namei(ndp))) + goto swapoff_bailout; + vp = ndp->ni_vp; + + if (vp->v_type != VREG) { + error = EINVAL; + VOP_UNLOCK(vp, 0, p); + goto swapoff_bailout; + } + + for(i = 0; i < MAX_BACKING_STORE; i++) { + if(bs_port_table[i].vp == vp) { + backing_store; + break; + } + } + if (i == MAX_BACKING_STORE) { + error = EINVAL; + VOP_UNLOCK(vp, 0, p); + goto swapoff_bailout; + } + backing_store = (mach_port_t)bs_port_table[i].bs; + + VOP_UNLOCK(vp, 0, p); + kr = default_pager_backing_store_delete(backing_store); + switch (kr) { + case KERN_SUCCESS: + error = 0; + bs_port_table[i].vp = 0; + ubc_rele(vp); + /* This vnode is no longer used for swapfile */ + CLR(vp->v_flag, VSWAP); + + /* get rid of macx_swapon() namei() reference */ + vrele(vp); + + /* get rid of macx_swapon() "extra" reference */ + vrele(vp); + break; + case KERN_FAILURE: + error = EAGAIN; + break; + default: + error = EAGAIN; + break; + } + +swapoff_bailout: + /* get rid of macx_swapoff() namei() reference */ + if (vp) + vrele(vp); + + unix_release(); + (void) thread_funnel_set(kernel_flock, FALSE); + return(error); +} diff --git a/bsd/vm/vm_pageout.h b/bsd/vm/vm_pageout.h new file mode 100644 index 000000000..4a473fd46 --- /dev/null +++ b/bsd/vm/vm_pageout.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_VM_VM_PAGEOUT_H_ +#define _BSD_VM_VM_PAGEOUT_H_ + +#include +#include +#include + +extern vm_map_t kernel_map; + + +#endif /* _BSD_VM_VM_PAGEOUT_H_ */ diff --git a/bsd/vm/vm_pager.h b/bsd/vm/vm_pager.h new file mode 100644 index 000000000..6a12dde49 --- /dev/null +++ b/bsd/vm/vm_pager.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * File: vm_pager.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young + * Copyright (C) 1985, Avadis Tevanian, Jr., Michael Wayne Young + * + * Pager routine interface definition + */ + +#ifndef _VM_PAGER_ +#define _VM_PAGER_ + +#include + +struct pager_struct { + boolean_t is_device; +}; +typedef struct pager_struct *vm_pager_t; +#define vm_pager_null ((vm_pager_t) 0) + +#define PAGER_SUCCESS 0 /* page read or written */ +#define PAGER_ABSENT 1 /* pager does not have page */ +#define PAGER_ERROR 2 /* pager unable to read or write page */ + +#ifdef KERNEL +typedef int pager_return_t; + +vm_pager_t vm_pager_allocate(); +void vm_pager_deallocate(); +pager_return_t vm_pager_get(); +pager_return_t vm_pager_put(); +boolean_t vm_pager_has_page(); +#endif /* KERNEL */ + +#endif /* _VM_PAGER_ */ diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c new file mode 100644 index 000000000..41d4714bc --- /dev/null +++ b/bsd/vm/vm_unix.c @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +/* + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +useracc(addr, len, prot) + caddr_t addr; + u_int len; + int prot; +{ + return (vm_map_check_protection( + current_map(), + trunc_page(addr), round_page(addr+len), + prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); +} + +vslock(addr, len) + caddr_t addr; + int len; +{ + vm_map_wire(current_map(), trunc_page(addr), + round_page(addr+len), + VM_PROT_READ | VM_PROT_WRITE ,FALSE); +} + +vsunlock(addr, len, dirtied) + caddr_t addr; + int len; + int dirtied; +{ + pmap_t pmap; +#if FIXME /* [ */ + vm_page_t pg; +#endif /* FIXME ] */ + vm_offset_t vaddr, paddr; + +#if FIXME /* [ */ + if (dirtied) { + pmap = get_task_pmap(current_task()); + for (vaddr = trunc_page(addr); vaddr < round_page(addr+len); + vaddr += PAGE_SIZE) { + paddr = pmap_extract(pmap, vaddr); + pg = PHYS_TO_VM_PAGE(paddr); + vm_page_set_modified(pg); + } + } +#endif /* FIXME ] */ +#ifdef lint + dirtied++; +#endif /* lint */ + vm_map_unwire(current_map(), trunc_page(addr), + round_page(addr+len), FALSE); +} + +#if defined(sun) || BALANCE || defined(m88k) +#else /*defined(sun) || BALANCE || defined(m88k)*/ +subyte(addr, byte) + void * addr; + int byte; +{ + char character; + + character = (char)byte; + return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1); +} + +suibyte(addr, byte) + void * addr; + int byte; +{ + char character; + + character = (char)byte; + return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1); +} + +int fubyte(addr) + void * addr; +{ + unsigned char byte; + + if (copyin(addr, (void *) &byte, sizeof(char))) + return(-1); + return(byte); +} + +int fuibyte(addr) + void * addr; +{ + unsigned char byte; + + if (copyin(addr, (void *) &(byte), sizeof(char))) + return(-1); + return(byte); +} + +suword(addr, word) + void * addr; + long word; +{ + return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1); +} + +long fuword(addr) + void * addr; +{ + long word; + + if (copyin(addr, (void *) &word, sizeof(int))) + return(-1); + return(word); +} + +/* suiword and fuiword are the same as suword and fuword, respectively */ + +suiword(addr, word) + void * addr; + long word; +{ + return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1); +} + +long fuiword(addr) + void * addr; +{ + long word; + + if (copyin(addr, (void *) &word, sizeof(int))) + return(-1); + return(word); +} +#endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */ + +int +swapon() +{ + return(EOPNOTSUPP); +} + +thread_t +procdup( + struct proc *child, + struct proc *parent) +{ + thread_t thread; + task_t task; + kern_return_t result; + + if (parent->task == kernel_task) + result = task_create_local(TASK_NULL, FALSE, FALSE, &task); + else + result = task_create_local(parent->task, TRUE, FALSE, &task); + if (result != KERN_SUCCESS) + printf("fork/procdup: task_create failed. Code: 0x%x\n", result); + child->task = task; + /* task->proc = child; */ + set_bsdtask_info(task, child); + result = thread_create(task, &thread); + if (result != KERN_SUCCESS) + printf("fork/procdup: thread_create failed. Code: 0x%x\n", result); + +#if FIXME /* [ */ + thread_deallocate(thread); // extra ref + + /* + * Don't need to lock thread here because it can't + * possibly execute and no one else knows about it. + */ + /* compute_priority(thread, FALSE); */ +#endif /* ] */ + return(thread); +} + +kern_return_t +pid_for_task(t, x) + mach_port_t t; + int *x; +{ + struct proc * p; + task_t t1; + extern task_t port_name_to_task(mach_port_t t); + int pid = -1; + kern_return_t err; + boolean_t funnel_state; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + t1 = port_name_to_task(t); + + if (t1 == TASK_NULL) { + err = KERN_FAILURE; + } else { + p = get_bsdtask_info(t1); + if (p) { + pid = p->p_pid; + err = KERN_SUCCESS; + } else { + err = KERN_FAILURE; + } + } + task_deallocate(t1); + (void) copyout((char *) &pid, (char *) x, sizeof(*x)); +pftout: + thread_funnel_set(kernel_flock, funnel_state); + return(err); +} + +/* + * Routine: task_for_pid + * Purpose: + * Get the task port for another "process", named by its + * process ID on the same host as "target_task". + * + * Only permitted to privileged processes, or processes + * with the same user ID. + */ +kern_return_t +task_for_pid(target_tport, pid, t) + mach_port_t target_tport; + int pid; + mach_port_t *t; +{ + struct proc *p; + struct proc *p1; + task_t t1; + mach_port_t tret; + extern task_t port_name_to_task(mach_port_t tp); + void * sright; + int error = 0; + boolean_t funnel_state; + + t1 = port_name_to_task(target_tport); + if (t1 == TASK_NULL) { + (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t)); + error = KERN_FAILURE; + goto tfpout; + } + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + restart: + p1 = get_bsdtask_info(t1); + if ( + ((p = pfind(pid)) != (struct proc *) 0) + && (p1 != (struct proc *) 0) + && ((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) + || !(suser(p1->p_ucred, &p1->p_acflag))) + && (p->p_stat != SZOMB) + ) { + if (p->task != TASK_NULL) { + if (!task_reference_try(p->task)) { + mutex_pause(); /* temp loss of funnel */ + goto restart; + } + sright = convert_task_to_port(p->task); + tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task())); + } else + tret = MACH_PORT_NULL; + (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t)); + task_deallocate(t1); + error = KERN_SUCCESS; + goto tfpout; + } + task_deallocate(t1); + tret = MACH_PORT_NULL; + (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t)); + error = KERN_FAILURE; +tfpout: + thread_funnel_set(kernel_flock, funnel_state); + return(error); +} + + +struct load_shared_file_args { + char *filename; + caddr_t mfa; + u_long mfs; + caddr_t *ba; + int map_cnt; + sf_mapping_t *mappings; + int *flags; +}; + + +int +load_shared_file( + struct proc *p, + struct load_shared_file_args *uap, + register *retval) +{ + caddr_t mapped_file_addr=uap->mfa; + u_long mapped_file_size=uap->mfs; + caddr_t *base_address=uap->ba; + int map_cnt=uap->map_cnt; + sf_mapping_t *mappings=uap->mappings; + char *filename=uap->filename; + int *flags=uap->flags; + struct vnode *vp = 0; + struct nameidata nd, *ndp; + char *filename_str; + register int error; + kern_return_t kr; + + struct vattr vattr; + void *object; + void *file_object; + sf_mapping_t *map_list; + caddr_t local_base; + int local_flags; + int caller_flags; + int i; + vm_size_t dummy; + kern_return_t kret; + + shared_region_mapping_t shared_region; + struct shared_region_task_mappings task_mapping_info; + shared_region_mapping_t next; + + ndp = &nd; + + unix_master(); + + /* Retrieve the base address */ + if (error = copyin(base_address, &local_base, sizeof (caddr_t))) { + goto lsf_bailout; + } + if (error = copyin(flags, &local_flags, sizeof (int))) { + goto lsf_bailout; + } + caller_flags = local_flags; + kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str, + (vm_size_t)(MAXPATHLEN)); + if (kret != KERN_SUCCESS) { + error = ENOMEM; + goto lsf_bailout; + } + kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list, + (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); + if (kret != KERN_SUCCESS) { + kmem_free(kernel_map, (vm_offset_t)filename_str, + (vm_size_t)(MAXPATHLEN)); + error = ENOMEM; + goto lsf_bailout; + } + + if (error = + copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) { + goto lsf_bailout_free; + } + + if (error = copyinstr(filename, + filename_str, MAXPATHLEN, (size_t *)&dummy)) { + goto lsf_bailout_free; + } + + /* + * Get a vnode for the target file + */ + NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, + filename_str, p); + + if ((error = namei(ndp))) { + goto lsf_bailout_free; + } + + vp = ndp->ni_vp; + + if (vp->v_type != VREG) { + error = EINVAL; + goto lsf_bailout_free_vput; + } + + UBCINFOCHECK("load_shared_file", vp); + + if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) { + goto lsf_bailout_free_vput; + } + + + file_object = ubc_getobject(vp, (UBC_NOREACTIVATE|UBC_HOLDOBJECT)); + if (file_object == (void *)NULL) { + error = EINVAL; + goto lsf_bailout_free_vput; + } + +#ifdef notdef + if(vattr.va_size != mapped_file_size) { + error = EINVAL; + goto lsf_bailout_free_vput; + } +#endif + + vm_get_shared_region(current_task(), &shared_region); + task_mapping_info.self = (vm_offset_t)shared_region; + + shared_region_mapping_info(shared_region, + &(task_mapping_info.text_region), + &(task_mapping_info.text_size), + &(task_mapping_info.data_region), + &(task_mapping_info.data_size), + &(task_mapping_info.region_mappings), + &(task_mapping_info.client_base), + &(task_mapping_info.alternate_base), + &(task_mapping_info.alternate_next), + &(task_mapping_info.flags), &next); + + /* This is a work-around to allow executables which have been */ + /* built without knowledge of the proper shared segment to */ + /* load. This code has been architected as a shared region */ + /* handler, the knowledge of where the regions are loaded is */ + /* problematic for the extension of shared regions as it will */ + /* not be easy to know what region an item should go into. */ + /* The code below however will get around a short term problem */ + /* with executables which believe they are loading at zero. */ + + { + if (((unsigned int)local_base & + (~(task_mapping_info.text_size - 1))) != + task_mapping_info.client_base) { + if(local_flags & ALTERNATE_LOAD_SITE) { + local_base = (caddr_t)( + (unsigned int)local_base & + (task_mapping_info.text_size - 1)); + local_base = (caddr_t)((unsigned int)local_base + | task_mapping_info.client_base); + } else { + error = EINVAL; + goto lsf_bailout_free_vput; + } + } + } + + /* load alternate regions if the caller has requested. */ + /* Note: the new regions are "clean slates" */ + + if (local_flags & NEW_LOCAL_SHARED_REGIONS) { + + shared_region_mapping_t new_shared_region; + shared_region_mapping_t old_shared_region; + struct shared_region_task_mappings old_info; + struct shared_region_task_mappings new_info; + + if(shared_file_create_system_region(&new_shared_region)) { + error = ENOMEM; + goto lsf_bailout_free_vput; + } + vm_get_shared_region(current_task(), &old_shared_region); + + old_info.self = (vm_offset_t)old_shared_region; + shared_region_mapping_info(old_shared_region, + &(old_info.text_region), + &(old_info.text_size), + &(old_info.data_region), + &(old_info.data_size), + &(old_info.region_mappings), + &(old_info.client_base), + &(old_info.alternate_base), + &(old_info.alternate_next), + &(old_info.flags), &next); + new_info.self = (vm_offset_t)new_shared_region; + shared_region_mapping_info(new_shared_region, + &(new_info.text_region), + &(new_info.text_size), + &(new_info.data_region), + &(new_info.data_size), + &(new_info.region_mappings), + &(new_info.client_base), + &(new_info.alternate_base), + &(new_info.alternate_next), + &(new_info.flags), &next); + if (vm_map_region_replace(current_map(), old_info.text_region, + new_info.text_region, old_info.client_base, + old_info.client_base+old_info.text_size)) { + panic("load_shared_file: shared region mis-alignment"); + shared_region_mapping_dealloc(new_shared_region); + error = EINVAL; + goto lsf_bailout_free_vput; + } + if(vm_map_region_replace(current_map(), old_info.data_region, + new_info.data_region, + old_info.client_base + old_info.text_size, + old_info.client_base + + old_info.text_size + old_info.data_size)) { + panic("load_shared_file: shared region mis-alignment 1"); + shared_region_mapping_dealloc(new_shared_region); + error = EINVAL; + goto lsf_bailout_free_vput; + } + vm_set_shared_region(current_task(), new_shared_region); + task_mapping_info = new_info; + shared_region_mapping_dealloc(old_shared_region); + } + + if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr, + mapped_file_size, + (vm_offset_t *)&local_base, + map_cnt, map_list, file_object, + &task_mapping_info, &local_flags))) { + switch (kr) { + case KERN_FAILURE: + error = EINVAL; + break; + case KERN_INVALID_ARGUMENT: + error = EINVAL; + break; + case KERN_INVALID_ADDRESS: + error = EACCES; + break; + case KERN_PROTECTION_FAILURE: + /* save EAUTH for authentication in this */ + /* routine */ + error = EPERM; + break; + case KERN_NO_SPACE: + error = ENOMEM; + break; + default: + error = EINVAL; + }; + if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) { + printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_object 0x%x\n", error, local_base, map_cnt, file_object); + for(i=0; iba; + int map_cnt=uap->map_cnt; + sf_mapping_t *mappings=uap->mappings; + register int error; + kern_return_t kr; + + sf_mapping_t *map_list; + caddr_t local_base; + vm_offset_t map_address; + int i; + kern_return_t kret; + + + + + unix_master(); + + /* Retrieve the base address */ + if (error = copyin(base_address, &local_base, sizeof (caddr_t))) { + goto rsf_bailout; + } + + if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK) + != GLOBAL_SHARED_TEXT_SEGMENT) { + error = EINVAL; + goto rsf_bailout; + } + + kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list, + (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); + if (kret != KERN_SUCCESS) { + error = ENOMEM; + goto rsf_bailout; + } + + if (error = + copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) { + + kmem_free(kernel_map, (vm_offset_t)map_list, + (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); + goto rsf_bailout; + } + for (i = 0; i +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +unsigned int vp_pagein=0; +unsigned int vp_pgodirty=0; +unsigned int vp_pgoclean=0; +unsigned int dp_pgouts=0; /* Default pager pageouts */ +unsigned int dp_pgins=0; /* Default pager pageins */ + +pager_return_t +vnode_pageout(struct vnode *vp, + upl_t upl, + vm_offset_t upl_offset, + vm_object_offset_t f_offset, + vm_size_t size, + int flags, + int *errorp) +{ + int result = PAGER_SUCCESS; + struct proc *p = current_proc(); + int error = 0; + int vp_size = 0; + int blkno=0, s; + int cnt, isize; + int pg_index; + int offset; + struct buf *bp; + boolean_t funnel_state; + int haveupl=0; + void * object; + upl_page_info_t *pl; + upl_t vpupl; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + if (upl != (upl_t)NULL) { + haveupl = 1; + } + isize = (int)size; + + if (isize < 0) + panic("-ve count in vnode_pageout"); + if (isize == 0) + panic("vnode_pageout: size == 0\n"); + + UBCINFOCHECK("vnode_pageout", vp); + + if (UBCINVALID(vp)) { + result = PAGER_ERROR; + error = PAGER_ERROR; + goto out; + } + if (haveupl) { + /* + * This is a pageout form the Default pager, + * just go ahead and call VOP_PAGEOUT + */ + dp_pgouts++; + if (error = VOP_PAGEOUT(vp, upl, upl_offset, + (off_t)f_offset,(size_t)size, p->p_ucred, flags)) { + result = PAGER_ERROR; + error = PAGER_ERROR; + } + goto out; + } + + object = ubc_getobject(vp, UBC_PAGINGOP|UBC_NOREACTIVATE); + if (object == NULL) + panic("vnode_pageout: null object"); + vp_size = ubc_getsize(vp); + + vm_fault_list_request(object, + f_offset, isize, &vpupl, NULL, 0, + UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_COPYOUT_FROM | UPL_SET_INTERNAL); + if (vpupl == (upl_t) 0) + panic("vnode_pageout: null upl returned"); + + pl = UPL_GET_INTERNAL_PAGE_LIST(vpupl); + + if (vp_size == 0) { + + while (isize) { + blkno = ubc_offtoblk(vp, (off_t)f_offset); +start0: + if (bp = incore(vp, blkno)) { + if (ISSET(bp->b_flags, B_BUSY)) { + SET(bp->b_flags, B_WANTED); + error = tsleep(bp, (PRIBIO + 1), "vnpgout", 0); + goto start0; + } else { + bremfree(bp); + SET(bp->b_flags, (B_BUSY|B_INVAL)); + } + } + if (bp) + brelse(bp); + f_offset += PAGE_SIZE; + isize -= PAGE_SIZE; + } + kernel_upl_commit_range(vpupl, 0, size, UPL_COMMIT_FREE_ON_EMPTY, + pl, MAX_UPL_TRANSFER); + + error = 0; + goto out; + } + pg_index = 0; + offset = 0; + + while (isize) { + int xsize; + int num_of_pages; + + if ( !upl_valid_page(pl, pg_index)) { + kernel_upl_abort_range(vpupl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + + offset += PAGE_SIZE; + isize -= PAGE_SIZE; + pg_index++; + + continue; + } + if ( !upl_dirty_page(pl, pg_index)) { + /* + * if the page is not dirty and reached here it is + * marked precious or it is due to invalidation in + * memory_object_lock request as part of truncation + * We also get here from vm_object_terminate() + * So all you need to do in these + * cases is to invalidate incore buffer if it is there + */ + blkno = ubc_offtoblk(vp, (off_t)(f_offset + offset)); + s = splbio(); + vp_pgoclean++; +start: + if (bp = incore(vp, blkno)) { + if (ISSET(bp->b_flags, B_BUSY)) { + SET(bp->b_flags, B_WANTED); + error = tsleep(bp, (PRIBIO + 1), "vnpgout", 0); + goto start; + } else { + bremfree(bp); + SET(bp->b_flags, (B_BUSY|B_INVAL)); + } + } + splx(s); + if (bp) + brelse(bp); + + kernel_upl_commit_range(vpupl, offset, PAGE_SIZE, + UPL_COMMIT_FREE_ON_EMPTY, pl, MAX_UPL_TRANSFER); + + offset += PAGE_SIZE; + isize -= PAGE_SIZE; + pg_index++; + + continue; + } + vp_pgodirty++; + + num_of_pages = 1; + xsize = isize - PAGE_SIZE; + + while (xsize) { + if ( !upl_valid_page(pl, pg_index + num_of_pages)) + break; + if ( !upl_dirty_page(pl, pg_index + num_of_pages)) + break; + num_of_pages++; + xsize -= PAGE_SIZE; + } + xsize = num_of_pages * PAGE_SIZE; + + /* By defn callee will commit or abort upls */ + if (error = VOP_PAGEOUT(vp, vpupl, (vm_offset_t) offset, + (off_t)(f_offset + offset), + xsize, p->p_ucred, flags & ~UPL_NOCOMMIT)) { + result = PAGER_ERROR; + error = PAGER_ERROR; + } + offset += xsize; + isize -= xsize; + pg_index += num_of_pages; + } +out: + if (errorp) + *errorp = result; + + thread_funnel_set(kernel_flock, funnel_state); + + return (error); +} + + +pager_return_t +vnode_pagein( + struct vnode *vp, + upl_t pl, + vm_offset_t pl_offset, + vm_object_offset_t f_offset, + vm_size_t size, + int flags, + int *errorp) +{ + int result = PAGER_SUCCESS; + struct proc *p = current_proc(); + int error = 0; + boolean_t funnel_state; + int haveupl=0; + void * object; + upl_t vpupl; + unsigned int ioaddr; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + +#if 0 + if(pl->page_list.npages >1 ) + panic("vnode_pageout: Can't handle more than one page"); +#endif /* 0 */ + + if (pl != (upl_t)NULL) { + haveupl = 1; + } + UBCINFOCHECK("vnode_pagein", vp); + + if (UBCINVALID(vp)) { + result = PAGER_ERROR; + error = PAGER_ERROR; + goto out; + } + + if (haveupl) { + dp_pgins++; + if (error = VOP_PAGEIN(vp, pl, pl_offset, (off_t)f_offset, + size,p->p_ucred, flags)) { + result = PAGER_ERROR; + } + } else { + + object = ubc_getobject(vp, UBC_PAGINGOP|UBC_NOREACTIVATE); + if (object == (void *)NULL) + panic("vnode_pagein: null object"); + vm_fault_list_request(object, f_offset, size, &vpupl, NULL, 0, + UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL); + + if (vpupl == (upl_t) 0) + panic("vnode_pagein: null upl returned"); + + vp_pagein++; + + /* By defn callee will commit or abort upls */ + if (error = VOP_PAGEIN(vp, vpupl, (vm_offset_t) 0, + (off_t)f_offset, size,p->p_ucred, flags & ~UPL_NOCOMMIT)) { + result = PAGER_ERROR; + error = PAGER_ERROR; + } + } +out: + if (errorp) + *errorp = result; + thread_funnel_set(kernel_flock, funnel_state); + + return (error); +} + +void +vnode_pager_shutdown() +{ + int i; + extern struct bs_map bs_port_table[]; + struct vnode *vp; + + for(i = 0; i < MAX_BACKING_STORE; i++) { + vp = (struct vnode *)(bs_port_table[i]).vp; + if (vp) { + (bs_port_table[i]).vp = 0; + ubc_rele(vp); + /* get rid of macx_swapon() namei() reference */ + vrele(vp); + + /* get rid of macx_swapon() "extra" reference */ + vrele(vp); + } + } +} + +void * +upl_get_internal_page_list(upl_t upl) +{ + return(UPL_GET_INTERNAL_PAGE_LIST(upl)); + +} + diff --git a/bsd/vm/vnode_pager.h b/bsd/vm/vnode_pager.h new file mode 100644 index 000000000..aaa8146a2 --- /dev/null +++ b/bsd/vm/vnode_pager.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +#ifndef _VNODE_PAGER_ +#define _VNODE_PAGER_ 1 + +#include +#include +#include + +#ifdef KERNEL + +#include +#include + +void vnode_pager_init(); + +vm_pager_t vnode_pager_setup(); +boolean_t vnode_has_page(); +boolean_t vnode_pager_active(); + +/* + * Vstructs are the internal (to us) description of a unit of backing store. + * The are the link between memory objects and the backing store they represent. + * For the vnode pager, backing store comes in two flavors: normal files and + * swap files. + * + * For objects that page to and from normal files (e.g. objects that represent + * program text segments), we maintain some simple parameters that allow us to + * access the file's contents directly through the vnode interface. + * + * Data for objects without associated vnodes is maintained in the swap files. + * Each object that uses one of these as backing store has a vstruct indicating + * the swap file of preference (vs_pf) and a mapping between contiguous object + * offsets and swap file offsets (vs_pmap). Each entry in this mapping specifies + * the pager file to use, and the offset of the page in that pager file. These + * mapping entries are of type pfMapEntry. + */ + +/* + * Pager file structure. One per swap file. + */ +typedef struct pager_file { + queue_chain_t pf_chain; /* link to other paging files */ + struct vnode *pf_vp; /* vnode of paging file */ + u_int pf_count; /* Number of vstruct using this file */ + u_char *pf_bmap; /* Map of used blocks */ + long pf_npgs; /* Size of file in pages */ + long pf_pfree; /* Number of unused pages */ + long pf_lowat; /* Low water page */ + long pf_hipage; /* Highest page allocated */ + long pf_hint; /* Lowest page unallocated */ + char *pf_name; /* Filename of this file */ + boolean_t pf_prefer; + int pf_index; /* index into the pager_file array */ + void * pf_lock; /* Lock for alloc and dealloc */ +} *pager_file_t; + +#define PAGER_FILE_NULL (pager_file_t) 0 + +#define MAXPAGERFILES 16 + +#define MAX_BACKING_STORE 100 + +struct bs_map { + struct vnode *vp; + void *bs; +}; + + + +/* + * Pager file data structures. + */ +#define INDEX_NULL 0 +typedef struct { + unsigned int index:8; /* paging file this block is in */ + unsigned int offset:24; /* page number where block resides */ +} pf_entry; + +typedef enum { + IS_INODE, /* Local disk */ + IS_RNODE /* NFS */ + } vpager_fstype; + +/* + * Basic vnode pager structure. One per object, backing-store pair. + */ +typedef struct vstruct { + boolean_t is_device; /* Must be first - see vm_pager.h */ + pager_file_t vs_pf; /* Pager file this uses */ + pf_entry **vs_pmap; /* Map of pages into paging file */ + unsigned int + /* boolean_t */ vs_swapfile:1; /* vnode is a swapfile */ + short vs_count; /* use count */ + int vs_size; /* size of this chunk in pages*/ + struct vnode *vs_vp; /* vnode to page to */ +} *vnode_pager_t; + +#define VNODE_PAGER_NULL ((vnode_pager_t) 0) + + + +pager_return_t pager_vnode_pagein(); +pager_return_t pager_vnode_pageout(); +pager_return_t vnode_pagein(); +pager_return_t vnode_pageout(); + +#endif /* KERNEL */ + +#endif /* _VNODE_PAGER_ */ diff --git a/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.cpp b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.cpp new file mode 100644 index 000000000..fab734b9c --- /dev/null +++ b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.cpp @@ -0,0 +1,1037 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * AppleATAPIIX.cpp - ATA controller driver for Intel PIIX/PIIX3/PIIX4. + * + * HISTORY + * + */ + +#include +#include +#include +#include "AppleATAPIIX.h" +#include "AppleATAPIIXTiming.h" + +extern pmap_t kernel_pmap; // for pmap_extract() + +// Resources shared between the two IDE channels are protected +// by this mutex. +// +static IOLock * gPIIXLock = 0; +#define PIIX_LOCK IOLockLock(gPIIXLock) +#define PIIX_UNLOCK IOLockUnlock(gPIIXLock) + +#define IOREG(x) (ioBMRange + PIIX_IO_ ## x) + +#define CHECK_UNIT(drv) assert(drv < 2) + +#ifdef DEBUG_XXX +#define DLOG(fmt, args...) IOLog(fmt, ## args) +#else +#define DLOG(fmt, args...) +#endif + +//-------------------------------------------------------------------------- +// Metaclass macro. +// +#undef super +#define super IOATAStandardDriver + +OSDefineMetaClassAndStructorsWithInit( AppleATAPIIX, IOATAStandardDriver, + AppleATAPIIX::initialize() ) + +//-------------------------------------------------------------------------- +// PIIX class initializer. +// +void AppleATAPIIX::initialize() +{ + gPIIXLock = IOLockAlloc(); + assert(gPIIXLock); +} + +//-------------------------------------------------------------------------- +// Defines a table of supported PIIX device types, listing their +// PCI ID, and a name string. Also supply some utility functions +// to locate a table entry based on an arbitrary PCI ID. +// +static struct { + UInt32 CFID; + const char * name; +} piixDeviceTable[] = {{ PCI_ID_PIIX, "PIIX" }, + { PCI_ID_PIIX3, "PIIX3" }, + { PCI_ID_PIIX4, "PIIX4" }, + { PCI_ID_ICH, "ICH" }, + { PCI_ID_ICH0, "ICH0" }, + { PCI_ID_ICH2_M, "ICH2-M" }, + { PCI_ID_ICH2, "ICH2" }, + { PCI_ID_NONE, NULL }}; + +static const char * +PIIXGetName(UInt32 pciID) +{ + for (int i = 0; piixDeviceTable[i].name; i++) { + if (piixDeviceTable[i].CFID == pciID) + return piixDeviceTable[i].name; + } + return 0; +} + +static bool +PIIXVerifyID(UInt32 pciID) +{ + return (PIIXGetName(pciID) == 0) ? false : true; +} + +//-------------------------------------------------------------------------- +// A hack to modify our PCI nub to have two interrupts. +// This code was borrowed from the setupIntelPIC() function +// in iokit/Families/IOPCIBus/IOPCIBridge.cpp. +// +static void setupProviderInterrupts(IOPCIDevice * nub, long irq_p, long irq_s) +{ + OSArray * controller; + OSArray * specifier; + OSData * tmpData; + extern OSSymbol * gIntelPICName; + + do { + // Create the interrupt specifer array. + specifier = OSArray::withCapacity(2); + if (!specifier) + break; + + tmpData = OSData::withBytes(&irq_p, sizeof(irq_p)); + if (tmpData) { + specifier->setObject(tmpData); + tmpData->release(); + } + tmpData = OSData::withBytes(&irq_s, sizeof(irq_s)); + if (tmpData) { + specifier->setObject(tmpData); + tmpData->release(); + } + + controller = OSArray::withCapacity(2); + if (controller) { + controller->setObject(gIntelPICName); + controller->setObject(gIntelPICName); + + // Put the two arrays into the property table. + nub->setProperty(gIOInterruptControllersKey, controller); + controller->release(); + } + nub->setProperty(gIOInterruptSpecifiersKey, specifier); + specifier->release(); + + } while( false ); +} + +//-------------------------------------------------------------------------- +// A static member function that returns the IDE channel for the +// current driver instance, and also registers the interrupts in +// the IOPCIDevice nub. +// +int AppleATAPIIX::PIIXGetChannel(IOPCIDevice * provider) +{ + static bool primaryRegistered = false; + int rc; + extern OSSymbol * gIntelPICName; + + PIIX_LOCK; + + if (primaryRegistered == false) { + rc = PIIX_CHANNEL_PRIMARY; + primaryRegistered = true; + + // Is this necessary? + waitForService(resourceMatching(gIntelPICName)); + + setupProviderInterrupts(provider, PIIX_P_IRQ, PIIX_S_IRQ); + } + else { + rc = PIIX_CHANNEL_SECONDARY; + } + + PIIX_UNLOCK; + + if (rc == PIIX_CHANNEL_SECONDARY) IOSleep(20); + + return rc; +} + +//-------------------------------------------------------------------------- +// Private function: _getIDERanges +// +// Setup the variables that stores the start of the Command and Control +// block in I/O space. The variable 'channel' must have been previously +// set. These ISA I/O ranges are implicit and does not show up in PCI +// config space. +// +bool AppleATAPIIX::_getIDERanges(IOPCIDevice * provider) +{ + ioCmdRange = (channel == PIIX_CHANNEL_PRIMARY) ? + PIIX_P_CMD_ADDR : PIIX_S_CMD_ADDR; + + ioCtlRange = (channel == PIIX_CHANNEL_PRIMARY) ? + PIIX_P_CTL_ADDR : PIIX_S_CTL_ADDR; + + DLOG("%s: ioCmdRange - %04x\n", getName(), ioCmdRange); + DLOG("%s: ioCtlRange - %04x\n", getName(), ioCtlRange); + + return true; +} + +//-------------------------------------------------------------------------- +// Private function: _getBMRange +// +// Determine the start of the I/O mapped Bus-Master registers. +// This range is defined by PCI config space register PIIX_PCI_BMIBA. +// +bool AppleATAPIIX::_getBMRange(IOPCIDevice * provider) +{ + UInt32 bmiba; + + bmiba = provider->configRead32(PIIX_PCI_BMIBA); + if ((bmiba & PIIX_PCI_BMIBA_RTE) == 0) { + IOLog("%s: PCI memory range 0x%02x (0x%08lx) is not an I/O range\n", + getName(), PIIX_PCI_BMIBA, bmiba); + return false; + } + + bmiba &= PIIX_PCI_BMIBA_MASK; // get the address portion + + // If bmiba is zero, it is likely that the user has elected to + // turn off PCI IDE support in the BIOS. + // + if (bmiba == 0) + return false; + + if (channel == PIIX_CHANNEL_SECONDARY) + bmiba += PIIX_IO_BM_OFFSET; + + ioBMRange = (UInt16) bmiba; + + DLOG("%s: ioBMRange - %04x\n", getName(), ioBMRange); + + return true; +} + +//-------------------------------------------------------------------------- +// Private function: _resetTimings() +// +// Reset all timing registers to the slowest (most compatible) timing. +// UDMA modes are disabled. We take a lock to prevent the other IDE +// channel from modifying the shared PCI config space. +// +bool AppleATAPIIX::_resetTimings() +{ + union { + UInt32 b32; + struct { + UInt16 pri; + UInt16 sec; + } b16; + } timing; + + UInt32 udmaControl; + + PIIX_LOCK; + + timing.b32 = provider->configRead32(PIIX_PCI_IDETIM); + udmaControl = provider->configRead32(PIIX_PCI_UDMACTL); + + // Set slowest timing, and disable UDMA. Only modify the flags + // associated with the local channel. + // + switch (channel) { + case PIIX_CHANNEL_PRIMARY: + timing.b16.pri &= PIIX_PCI_IDETIM_IDE; + udmaControl &= ~(PIIX_PCI_UDMACTL_PSDE0 | PIIX_PCI_UDMACTL_PSDE1); + break; + + case PIIX_CHANNEL_SECONDARY: + timing.b16.sec &= PIIX_PCI_IDETIM_IDE; + udmaControl &= ~(PIIX_PCI_UDMACTL_SSDE0 | PIIX_PCI_UDMACTL_SSDE1); + break; + } + + provider->configWrite32(PIIX_PCI_UDMACTL, udmaControl); + provider->configWrite32(PIIX_PCI_IDETIM, timing.b32); + + // FIXME + // No support for ATA/66 or ATA/100 modes. Set this register + // to 0 (new in ICH2) to disable those faster timings. + // + provider->configWrite32(PIIX_PCI_IDECONFIG, 0); + + PIIX_UNLOCK; + + return true; +} + +//-------------------------------------------------------------------------- +// Private function: _allocatePRDTable() +// +// Allocate the physical region descriptor (PRD) table. The physical +// address of this table is stored in 'prdTablePhys'. Look at Intel +// documentation for the alignment requirements. +// +bool AppleATAPIIX::_allocatePRDTable() +{ + prdTable = (prdEntry_t *) IOMallocAligned(PRD_TABLE_SIZE, PAGE_SIZE); + if (!prdTable) + return false; + + prdTablePhys = (UInt32) pmap_extract(kernel_pmap, (vm_offset_t) prdTable); + + bzero(prdTable, PRD_TABLE_SIZE); + + return true; +} + +//-------------------------------------------------------------------------- +// Private function: _deallocatePRDTable() +// +void AppleATAPIIX::_deallocatePRDTable() +{ + IOFreeAligned(prdTable, PRD_TABLE_SIZE); + prdTable = NULL; + prdTablePhys = 0; +} + +//-------------------------------------------------------------------------- +// Function inherited from IOATAController. +// +// Configure the driver/controller. This is the first function called by +// our superclass, in its start() function, to initialize the controller +// hardware. +// +bool +AppleATAPIIX::configure(IOService * forProvider, + ATAControllerInfo * controllerInfo) +{ + UInt32 reg; + +// IOSleep(1000); + + provider = OSDynamicCast(IOPCIDevice, forProvider); + if (!provider) + return false; + + // Superclass performs an exclusive open on the provider, we close + // it to allow more than one instance of this driver to attach to + // the same PCI nub. We should maintain an non-exclusive open on + // the provider. + // + provider->close(this); + + // Determine the type of PIIX controller. Save the controller's + // PCI ID in pciCFID. + // + pciCFID = provider->configRead32(PIIX_PCI_CFID); + if (PIIXVerifyID(pciCFID) == false) { + IOLog("%s: Unknown PCI IDE controller (0x%08lx)\n", + getName(), + pciCFID); + return false; + } + + // Determine our IDE channel, primary or secondary. + // + channel = PIIXGetChannel(provider); + + _getIDERanges(provider); + + IOLog("%s: %s %s IDE controller, 0x%x, irq %d\n", + getName(), + (channel == PIIX_CHANNEL_PRIMARY) ? "Primary" : "Secondary", + PIIXGetName(pciCFID), + ioCmdRange, + (channel == PIIX_CHANNEL_PRIMARY) ? PIIX_P_IRQ : PIIX_S_IRQ); + + // Check the I/O Space Enable bit in the PCI command register. + // This is the master enable bit for the PIIX controller. + // Each IDE channel also has its own enable bit, which is + // checked later. + // + reg = provider->configRead32(PIIX_PCI_PCICMD); + if ((reg & PIIX_PCI_PCICMD_IOSE) == 0) { + IOLog("%s: PCI IDE controller is not enabled\n", getName()); + return false; + } + + // Set BME bit to enable bus-master. + // + if ((reg & PIIX_PCI_PCICMD_BME) == 0) { + reg |= PIIX_PCI_PCICMD_BME; + PIIX_LOCK; + provider->configWrite32(PIIX_PCI_PCICMD, reg); + PIIX_UNLOCK; + } + + // Fetch the corresponding primary/secondary IDETIM register and + // check the individual channel enable bit. + // + reg = provider->configRead32(PIIX_PCI_IDETIM); + if (channel == PIIX_CHANNEL_SECONDARY) + reg >>= 16; // PIIX_PCI_IDETIM + 2 for secondary channel + + if ((reg & PIIX_PCI_IDETIM_IDE) == 0) { + IOLog("%s: %s PCI IDE channel is not enabled\n", + getName(), + (channel == PIIX_CHANNEL_PRIMARY) ? "Primary" : "Secondary"); + return false; + } + + // Locate and add the I/O mapped bus-master registers to + // ioRange[] array. + // + if (_getBMRange(provider) == false) { + IOLog("%s: Bus master I/O range is invalid\n", getName()); + return false; + } + + // Allocate page-aligned memory for the PRD table. + // + if (_allocatePRDTable() == false) { + IOLog("%s: unable to allocate descriptor table\n", getName()); + return false; + } + + // Allocate a cursor object to generate the scatter-gather list + // for each transfer request. Maximum segment size is set to 64K. + // However, there is no way to indicate our requirement that each + // memory segment cannot cross a 64K boundary. We have to do this + // manually. + // + prdCursor = IOLittleMemoryCursor::withSpecification(64 * 1024, 0xffffffff); + if (prdCursor == 0) + return false; + + // Attach an interruptEventSource to handle HW interrupts. + // Must do this after PIIXGetChannel(), since thats where the + // provider's interrupt property is set by setupProviderInterrupts(). + + interruptEventSource = IOInterruptEventSource::interruptEventSource( + (OSObject *) this, + (IOInterruptEventAction) &AppleATAPIIX::interruptOccurred, + (IOService *) provider, + (channel == PIIX_CHANNEL_PRIMARY) ? 0 : 1); + if (interruptEventSource == 0) { + IOLog("%s: unable to create an IOInterruptEventSource object\n", + getName()); + return false; + } + + disableControllerInterrupts(); + + getWorkLoop()->addEventSource(interruptEventSource); + + // Revert to default (compatible) timing. + // + _resetTimings(); + + controllerInfo->maxDevicesSupported = 2; + controllerInfo->devicePrivateDataSize = 0; + controllerInfo->commandPrivateDataSize = 0; + controllerInfo->disableCancelCommands = false; + + + DLOG("AppleATAPIIX::%s() completed successfully\n", __FUNCTION__); + + return true; +} + +//-------------------------------------------------------------------------- +// +// +bool AppleATAPIIX::provideProtocols(enum ATAProtocol * protocolsSupported) +{ + return false; +} + +//-------------------------------------------------------------------------- +// +// +bool AppleATAPIIX::provideTimings(UInt32 * numTimings, + ATATiming * timingsSupported) +{ + return false; +} + +//-------------------------------------------------------------------------- +// Determine the timing selection based on the ATATiming structure given. +// +bool AppleATAPIIX::calculateTiming(UInt32 unit, ATATiming * pTiming) +{ + int i; + PIIXProtocol protocol = ataToPIIXProtocol(pTiming->timingProtocol); + + DLOG("AppleATAPIIX::%s() - unit:%ld protocol:%d minCycles:%ld\n", + __FUNCTION__, unit, protocol, pTiming->minDataCycle); + + CHECK_UNIT(unit); + + timings[unit].validTimings[protocol] = 0; + + switch (protocol) { + + case kPIIXProtocolPIO: + + for (i = 0; i < PIIXPIOTimingTableSize; i++) + { + if (PIIXPIOTimingTable[i].pioMode == _NVM_) + continue; + + if (PIIXPIOTimingTable[i].cycle < pTiming->minDataCycle) + break; + + timings[unit].validTimings[protocol] = i; + } + break; + + case kPIIXProtocolDMA: + + for (i = 0; i < PIIXPIOTimingTableSize; i++) + { + if (PIIXPIOTimingTable[i].mwDMAMode == _NVM_) + continue; + + if (PIIXPIOTimingTable[i].cycle < pTiming->minDataCycle) + break; + + timings[unit].validTimings[protocol] = i; + } + break; + + case kPIIXProtocolUDMA33: + + for (i = 0; i < PIIXUDMATimingTableSize; i++) + { + if (PIIXUDMATimingTable[i].strobe < pTiming->minDataCycle) + break; + + timings[unit].validTimings[protocol] = i; + } + break; + + default: + return false; + } + + timings[unit].validFlag |= (1 << protocol); + + return true; +} + +//-------------------------------------------------------------------------- +// Setup the timing register for the given timing protocol. +// +bool AppleATAPIIX::selectTiming(UInt32 unit, + ATATimingProtocol timingProtocol) +{ + bool ret = false; + UInt8 pciConfig[256]; + PIIXProtocol protocol = ataToPIIXProtocol(timingProtocol); + + DLOG("AppleATAPIIX::%s() - unit:%ld protocol:%d\n", + __FUNCTION__, unit, protocol); + + CHECK_UNIT(unit); + + PIIX_LOCK; + + do { + if (protocol >= kPIIXProtocolLast) + break; + + if (PIIX_PROTOCOL_IS_VALID(protocol) == 0) { + + // superclass error, calculateTiming() was not called + // before calling selectTiming(). + + IOLog("%s: timing protocol selected is invalid\n", getName()); + break; + } + + if (!_readPCIConfigSpace(pciConfig) || + !_selectTiming(unit, protocol, pciConfig) || + !_writePCIConfigSpace(pciConfig)) + break; + + ret = true; + } + while (0); + + PIIX_UNLOCK; + + return ret; +} + +//-------------------------------------------------------------------------- +// Setup the timing registers. +// +bool AppleATAPIIX::_selectTiming(UInt32 unit, + PIIXProtocol protocol, + UInt8 * pciConfig) +{ + UInt8 isp, rtc; + UInt8 index, dma, pio; + bool dmaActive; + bool pioActive; + bool useCompatiblePIOTiming = false; + bool ret = true; + UInt16 * idetim; + UInt8 * sidetim = (UInt8 *) &pciConfig[PIIX_PCI_SIDETIM]; + UInt8 * udmactl = (UInt8 *) &pciConfig[PIIX_PCI_UDMACTL]; + UInt16 * udmatim = (UInt16 *) &pciConfig[PIIX_PCI_UDMATIM]; + + idetim = (channel == PIIX_CHANNEL_PRIMARY) ? + (UInt16 *) &pciConfig[PIIX_PCI_IDETIM] : + (UInt16 *) &pciConfig[PIIX_PCI_IDETIM_S]; + + switch (protocol) { + case kPIIXProtocolUDMA66: + // Not yet! + return false; + + case kPIIXProtocolUDMA33: + if ((pciCFID == PCI_ID_PIIX) || (pciCFID == PCI_ID_PIIX3)) { + // Only PIIX4 (and newer devices) supports UDMA. + return false; + } + PIIX_DEACTIVATE_PROTOCOL(kPIIXProtocolDMA); + break; + + case kPIIXProtocolDMA: + PIIX_DEACTIVATE_PROTOCOL(kPIIXProtocolUDMA33); + break; + + case kPIIXProtocolPIO: + break; + + default: + IOLog("%s: PIIX protocol not handled (%d)\n", getName(), + protocol); + return false; + } + PIIX_ACTIVATE_PROTOCOL(protocol); + + + if (PIIX_PROTOCOL_IS_ACTIVE(kPIIXProtocolUDMA33)) { + + index = PIIX_GET_ACTIVE_TIMING(kPIIXProtocolUDMA33); + + if (unit == 0) { + if (channel == PIIX_CHANNEL_PRIMARY) { + *udmactl |= PIIX_PCI_UDMACTL_PSDE0; + SET_REG_FIELD(*udmatim, PIIX_PCI_UDMATIM_PCT0, + PIIXUDMATimingTable[index].bits); + } + else { + *udmactl |= PIIX_PCI_UDMACTL_SSDE0; + SET_REG_FIELD(*udmatim, PIIX_PCI_UDMATIM_SCT0, + PIIXUDMATimingTable[index].bits); + } + } + else { + if (channel == PIIX_CHANNEL_PRIMARY) { + *udmactl |= PIIX_PCI_UDMACTL_PSDE1; + SET_REG_FIELD(*udmatim, PIIX_PCI_UDMATIM_PCT1, + PIIXUDMATimingTable[index].bits); + } + else { + *udmactl |= PIIX_PCI_UDMACTL_SSDE1; + SET_REG_FIELD(*udmatim, PIIX_PCI_UDMATIM_SCT1, + PIIXUDMATimingTable[index].bits); + } + } + } + else { + if (unit == 0) { + if (channel == PIIX_CHANNEL_PRIMARY) { + *udmactl &= ~PIIX_PCI_UDMACTL_PSDE0; + } + else { + *udmactl &= ~PIIX_PCI_UDMACTL_SSDE0; + } + } + else { + if (channel == PIIX_CHANNEL_PRIMARY) { + *udmactl &= ~PIIX_PCI_UDMACTL_PSDE1; + } + else { + *udmactl &= ~PIIX_PCI_UDMACTL_SSDE1; + } + } + } + + dmaActive = PIIX_PROTOCOL_IS_ACTIVE(kPIIXProtocolDMA); + pioActive = PIIX_PROTOCOL_IS_ACTIVE(kPIIXProtocolPIO); + + if (dmaActive || pioActive) { + + dma = PIIX_GET_ACTIVE_TIMING(kPIIXProtocolDMA); + pio = PIIX_GET_ACTIVE_TIMING(kPIIXProtocolPIO); + + // Early PIIX devices does not have a slave timing register. + // Rather than switching timing registers whenever a new + // drive was selected, We program in a (slower) timing that + // is acceptable for both drive0 and drive1. + + if (pciCFID == PCI_ID_PIIX) { + + unit = (unit ^ 1) & 1; // unit <- other drive unit + + if (PIIX_PROTOCOL_IS_ACTIVE(kPIIXProtocolPIO)) { + if (!pioActive || + (PIIX_GET_ACTIVE_TIMING(kPIIXProtocolPIO) < pio)) { + pio = PIIX_GET_ACTIVE_TIMING(kPIIXProtocolPIO); + } + pioActive = true; + } + + if (PIIX_PROTOCOL_IS_ACTIVE(kPIIXProtocolDMA)) { + if (!dmaActive || + (PIIX_GET_ACTIVE_TIMING(kPIIXProtocolDMA) < dma)) { + dma = PIIX_GET_ACTIVE_TIMING(kPIIXProtocolDMA); + } + dmaActive = true; + } + + *idetim &= ~PIIX_PCI_IDETIM_SITRE; // disable slave timing + unit = 0; + } + else { + *idetim |= PIIX_PCI_IDETIM_SITRE; // enable slave timing + } + + // Pick an index to the PIIXPIOTimingTable[] for the new + // timing selection. + // + if (dmaActive && pioActive) { + + // Both PIO and DMA are active, select DMA timing to + // optimize DMA transfer. + + index = dma; // pick DMA timing + + if (pio < dma) + useCompatiblePIOTiming = true; + } + else if (dmaActive) { + index = dma; + } + else { + index = pio; + } + + isp = PIIX_CLK_TO_ISP(PIIXPIOTimingTable[index].isp); + rtc = PIIX_CLK_TO_RTC(PIIXPIOTimingTable[index].rtc); + + if (unit == 0) { + SET_REG_FIELD(*idetim, PIIX_PCI_IDETIM_ISP, isp); + SET_REG_FIELD(*idetim, PIIX_PCI_IDETIM_RTC, rtc); + if (useCompatiblePIOTiming) + *idetim |= PIIX_PCI_IDETIM_DTE0; + else + *idetim &= ~PIIX_PCI_IDETIM_DTE0; + + if (pciCFID == PCI_ID_PIIX) { + if (useCompatiblePIOTiming) + *idetim |= PIIX_PCI_IDETIM_DTE1; + else + *idetim &= ~PIIX_PCI_IDETIM_DTE1; + } + } + else { + if (channel == PIIX_CHANNEL_PRIMARY) { + SET_REG_FIELD(*sidetim, PIIX_PCI_SIDETIM_PISP1, isp); + SET_REG_FIELD(*sidetim, PIIX_PCI_SIDETIM_PRTC1, rtc); + } + else { + SET_REG_FIELD(*sidetim, PIIX_PCI_SIDETIM_SISP1, isp); + SET_REG_FIELD(*sidetim, PIIX_PCI_SIDETIM_SRTC1, rtc); + } + if (useCompatiblePIOTiming) + *idetim |= PIIX_PCI_IDETIM_DTE1; + else + *idetim &= ~PIIX_PCI_IDETIM_DTE1; + } + + *idetim |= (PIIX_PCI_IDETIM_TIME0 | + PIIX_PCI_IDETIM_PPE0 | + PIIX_PCI_IDETIM_IE0 | + PIIX_PCI_IDETIM_TIME1 | + PIIX_PCI_IDETIM_PPE1 | + PIIX_PCI_IDETIM_IE1); + } + +#ifdef DEBUG_XXX + IOLog("\n%s: %s channel\n", getName(), + (channel == PIIX_CHANNEL_PRIMARY) ? "Primary" : "Secondary"); + IOLog("%s: IDETIM : %04x\n", getName(), *idetim); + IOLog("%s: SIDETIM: %02x\n", getName(), *sidetim); + IOLog("%s: UDMACTL: %02x\n", getName(), *udmactl); + IOLog("%s: UDMATIM: %04x\n", getName(), *udmatim); + IOLog("%s: Active : %04lx\n", getName(), timings[unit].activeFlag); + IOLog("%s: Valid : %04lx\n", getName(), timings[unit].validFlag); + IOLog("%s: PIO:%d DMA:%d UDMA:%d\n\n", getName(), + timings[unit].activeTimings[kPIIXProtocolPIO], + timings[unit].activeTimings[kPIIXProtocolDMA], + timings[unit].activeTimings[kPIIXProtocolUDMA33]); +#endif /* DEBUG */ + + return ret; +} + +//-------------------------------------------------------------------------- +// Setup the descriptor table to perform the transfer indicated by the +// IOMemoryDescriptor in the IOATACommand object provided. +// +bool AppleATAPIIX::programDma(IOATAStandardCommand * cmd) +{ + IOPhysicalSegment physSeg; + IOByteCount offset = 0; + IOMemoryDescriptor * memDesc; + prdEntry_t * prd = prdTable; + UInt32 startSeg; + UInt32 endSeg; + UInt32 partialCount; + UInt32 bytesLeft; + + cmd->getPointers(&memDesc, &dmaReqLength, &dmaIsWrite); + + if (dmaReqLength == 0) + return true; + + bytesLeft = dmaReqLength; + + // Setup the PRD entries in the descriptor table in memory. + // + for (UInt32 i = 0; i < (PRD_ENTRIES - 1); i++, prd++) + { + if (prdCursor->getPhysicalSegments(memDesc, offset, &physSeg, 1) != 1) + break; + + startSeg = (physSeg.location & ~0xffff); + endSeg = (physSeg.location + physSeg.length - 1) & ~0xffff; + + prd->base = physSeg.location; + prd->flags = 0; + + if (startSeg == endSeg) { + prd->count = PRD_COUNT(physSeg.length); + } + else { + partialCount = (-physSeg.location & 0xffff); + prd->count = PRD_COUNT(partialCount); + prd++; + i++; + prd->base = physSeg.location + partialCount; + prd->count = physSeg.length - partialCount; + prd->flags = 0; + } + + bytesLeft -= physSeg.length; + offset += physSeg.length; + } + if (bytesLeft != 0) + return false; + + // Set the 'end-of-table' bit on the last PRD entry. + // + prd--; + prd->flags = PRD_FLAG_EOT; + + /* + * Provide the starting address of the PRD table by loading the + * PRD Table Pointer Register. + */ + outl(IOREG(BMIDTPX), prdTablePhys); + + return true; +} + +//-------------------------------------------------------------------------- +// Start the DMA engine. +// +bool AppleATAPIIX::startDma(IOATAStandardCommand * cmd) +{ + /* + * Clear interrupt and error bits in the Status Register. + */ + outb(IOREG(BMISX), PIIX_IO_BMISX_ERROR | + PIIX_IO_BMISX_IDEINTS | + PIIX_IO_BMISX_DMA0CAP | + PIIX_IO_BMISX_DMA1CAP); + + /* + * Engage the bus master by writing 1 to the start bit in the + * Command Register. Also set the RWCON bit for the direction + * of the data transfer. + */ + outb(IOREG(BMICX), (dmaIsWrite ? 0 : PIIX_IO_BMICX_RWCON) | + PIIX_IO_BMICX_SSBM); + + return true; +} + +//-------------------------------------------------------------------------- +// Stop the DMA engine. +// +bool AppleATAPIIX::stopDma(IOATAStandardCommand * cmd, UInt32 * transferCount) +{ + UInt8 bmisx; + + *transferCount = 0; + + if (dmaReqLength == 0) + return true; + + outb(IOREG(BMICX), 0); // stop the bus-master + + bmisx = inb(IOREG(BMISX)); + + if ((bmisx & PIIX_IO_BMISX_STATUS) != PIIX_IO_BMISX_IDEINTS) { + IOLog("AppleATAPIIX::%s() DMA error (0x%02x)\n", __FUNCTION__, bmisx); + return false; + } + + *transferCount = dmaReqLength; + + return true; +} + +//-------------------------------------------------------------------------- +// Perform a write to the ATA block registers. +// +void AppleATAPIIX::writeATAReg(UInt32 regIndex, UInt32 regValue) +{ + if (regIndex == 0) { + outw(ioCmdRange, (UInt16) regValue); + } + else if (regIndex < kATARegDeviceControl) { + outb(ioCmdRange + regIndex, (UInt8) regValue); + } + else { + outb(ioCtlRange + regIndex - kATARegDeviceControl + 2, + (UInt8) regValue); + } +} + +//-------------------------------------------------------------------------- +// Perform a read from the ATA block registers. +// +UInt32 AppleATAPIIX::readATAReg( UInt32 regIndex ) +{ + if (regIndex == 0) { + return inw(ioCmdRange); + } + else if (regIndex < kATARegDeviceControl) { + return inb(ioCmdRange + regIndex); + } + return inb(ioCtlRange + regIndex - kATARegDeviceControl + 2); +} + +//-------------------------------------------------------------------------- +// Frees the drivers instance. Make sure all objects allocated during +// our initialization are freed. +// +void AppleATAPIIX::free() +{ + if (interruptEventSource) { + interruptEventSource->disable(); + interruptEventSource->release(); + } + + if (prdCursor) + prdCursor->release(); + + if (prdTable != 0) + _deallocatePRDTable(); + + return super::free(); +} + +//-------------------------------------------------------------------------- +// This function is called when our interruptEventSource receives an +// interrupt. Simply pass the action to our superclass to advance its +// state machine. +// +void AppleATAPIIX::interruptOccurred() +{ + super::interruptOccurred(); +} + +//-------------------------------------------------------------------------- +// This function is called by our superclass to disable controller +// interrupts. +// +void AppleATAPIIX::disableControllerInterrupts() +{ + interruptEventSource->disable(); +} + +//-------------------------------------------------------------------------- +// This function is called by our superclass to enable controller +// interrupts. +// +void AppleATAPIIX::enableControllerInterrupts() +{ + interruptEventSource->enable(); +} + +//-------------------------------------------------------------------------- +// Private function: _readPCIConfigSpace +// +// Read the entire PCI config space and stores it to the buffer +// pointed by 'configSpace'. +// +bool AppleATAPIIX::_readPCIConfigSpace(UInt8 * configSpace) +{ + UInt32 * dwordPtr = (UInt32 *) configSpace; + + for (int i = 0; i < 64; i++, dwordPtr++) + *dwordPtr = provider->configRead32(i * 4); + + return true; +} + +//-------------------------------------------------------------------------- +// Private function: _writePCIConfigSpace +// +// Write the entire PCI config space from the buffer pointed +// by 'configSpace'. +// +bool AppleATAPIIX::_writePCIConfigSpace(UInt8 * configSpace) +{ + UInt32 * dwordPtr = (UInt32 *) configSpace; + + for (int i = 0; i < 64; i++, dwordPtr++) + provider->configWrite32(i * 4, *dwordPtr); + + return true; +} diff --git a/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.h b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.h new file mode 100644 index 000000000..c5b0241e4 --- /dev/null +++ b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * AppleATAPIIX.h - ATA controller driver for Intel PIIX/PIIX3/PIIX4. + * + * HISTORY + * + */ + +#ifndef _APPLEATAPIIX_H +#define _APPLEATAPIIX_H + +#include +#include +#include +#include + +#include +#include "AppleATAPIIXRegs.h" +#include "AppleATAPIIXTiming.h" + +class AppleATAPIIX : public IOATAStandardDriver +{ + OSDeclareDefaultStructors( AppleATAPIIX ) + +protected: + IOPCIDevice * provider; // our provider + UInt32 pciCFID; // our PCI vendor/device ID + UInt32 channel; // IDE channel + UInt16 ioCmdRange; // command block + UInt16 ioCtlRange; // control block + UInt16 ioBMRange; // bus-master register block + UInt32 dmaReqLength; // transaction state + bool dmaIsWrite; // transaction state + prdEntry_t * prdTable; // physical region descriptor table + IOPhysicalAddress prdTablePhys; // physical address of prdTable + PIIXSelectedTimings timings[2]; // drive0 and drive1 timings + IOLittleMemoryCursor * prdCursor; // request -> scatter-gather list + IOTimerEventSource * timerEventSource; + IOInterruptEventSource * interruptEventSource; + + /* + * Internal (private) functions. + */ + bool _getIDERanges(IOPCIDevice * provider); + bool _getBMRange(IOPCIDevice * provider); + bool _allocatePRDTable(); + void _deallocatePRDTable(); + bool _resetTimings(); + bool _readPCIConfigSpace(UInt8 * configSpace); + bool _writePCIConfigSpace(UInt8 * configSpace); + bool _selectTiming(UInt32 unit, + PIIXProtocol timingProtocol, + UInt8 * pciConfig); + +public: + /* + * Class initializer. + */ + static void initialize(); + + /* + * Returns the IDE channel for the current driver instance. + */ + static int PIIXGetChannel(IOPCIDevice * provider); + + /* + * Functions defined by our superclass that we must override. + */ + void writeATAReg(UInt32 regIndex, UInt32 regValue); + + UInt32 readATAReg(UInt32 regIndex); + + void free(); + + bool configure(IOService * forProvider, + ATAControllerInfo * controllerInfo); + + bool createWorkLoop(IOWorkLoop ** workLoop); + + bool provideProtocols(enum ATAProtocol * protocolsSupported); + + bool provideTimings(UInt32 * numTimings, + ATATiming * timingsSupported); + + bool calculateTiming(UInt32 deviceNum, ATATiming * pTiming); + + bool selectTiming(UInt32 unitNum, + ATATimingProtocol timingProtocol); + + void disableControllerInterrupts(); + void enableControllerInterrupts(); + + void ataTimer(IOTimerEventSource * sender); + + /* + * Functions that must be implemented by a bus-master controller. + */ + bool programDma(IOATAStandardCommand * cmd); + bool startDma(IOATAStandardCommand * cmd); + bool stopDma(IOATAStandardCommand * cmd, UInt32 * transferCount); + + /* + * Miscellaneous functions. + */ + void interruptOccurred(); +}; + +#endif /* !_APPLEATAPIIX_H */ diff --git a/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXRegs.h b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXRegs.h new file mode 100644 index 000000000..6f0ca85c5 --- /dev/null +++ b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXRegs.h @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * Intel PIIX/PIIX3/PIIX4 PCI IDE controller. + * PIIX = PCI-ISA-IDE-Xelerator. (also USB on newer controllers) + * + * Notes: + * + * PIIX introduced in the "Triton" chipset. + * PIIX3 supports different timings for Master/Slave devices on both channels. + * PIIX4 adds support for Ultra DMA/33. + * + * Be sure to download and read the PIIX errata from Intel's web site at + * developer.intel.com. + * + * HISTORY: + * + */ + +#ifndef _APPLEATAPIIXREGS_H +#define _APPLEATAPIIXREGS_H + +/* + * PCI ID for supported PIIX variants. + */ +#define PCI_ID_PIIX 0x12308086 +#define PCI_ID_PIIX3 0x70108086 +#define PCI_ID_PIIX4 0x71118086 +#define PCI_ID_ICH 0x24118086 +#define PCI_ID_ICH0 0x24218086 +#define PCI_ID_ICH2_M 0x244a8086 +#define PCI_ID_ICH2 0x244b8086 +#define PCI_ID_NONE 0xffffffff + +/* + * Decoded port addresses. Seems to be hardcoded and it does not + * show up in the PCI configuration space memory ranges. + */ +#define PIIX_P_CMD_ADDR 0x1f0 +#define PIIX_P_CTL_ADDR 0x3f4 +#define PIIX_S_CMD_ADDR 0x170 +#define PIIX_S_CTL_ADDR 0x374 +#define PIIX_CMD_SIZE 8 +#define PIIX_CTL_SIZE 4 + +/* + * IRQ assignment. + */ +#define PIIX_P_IRQ 14 +#define PIIX_S_IRQ 15 + +/* + * PIIX has two IDE channels. + */ +#define PIIX_CHANNEL_PRIMARY 0 +#define PIIX_CHANNEL_SECONDARY 1 + +/* + * PIIX PCI config space registers. + * Register size (bits) in parenthesis. + */ +#define PIIX_PCI_CFID 0x00 + +#define PIIX_PCI_PCICMD 0x04 // (16) PCI command register +#define PIIX_PCI_PCICMD_IOSE 0x01 // I/O space enable +#define PIIX_PCI_PCICMD_BME 0x04 // bus-master enable + +#define PIIX_PCI_PCISTS 0x06 // (16) PCI device status register +#define PIIX_PCI_RID 0x08 // (8) Revision ID register +#define PIIX_PCI_CLASSC 0x09 // (24) Class code register +#define PIIX_PCI_MLT 0x0d // (8) Master latency timer register +#define PIIX_PCI_HEDT 0x0e // (8) Header type register + +#define PIIX_PCI_BMIBA 0x20 // (32) Bus-Master base address +#define PIIX_PCI_BMIBA_RTE 0x01 // resource type indicator (I/O) +#define PIIX_PCI_BMIBA_MASK 0xfff0 // base address mask + +#define PIIX_PCI_IDETIM 0x40 // (16) IDE timing registers (pri) +#define PIIX_PCI_IDETIM_S 0x42 // (16) IDE timing registers (sec) +#define PIIX_PCI_SIDETIM 0x44 // (8) Slave IDE timing register +#define PIIX_PCI_UDMACTL 0x48 // (8) Ultra DMA/33 control register +#define PIIX_PCI_UDMATIM 0x4a // (16) Ultra DMA/33 timing register + +#define PIIX_PCI_IDECONFIG 0x54 // (32) IDE I/O Config register + +/* + * PIIX PCI configuration space register definition. + * + * PIIX_IDETIM - IDE timing register. + * + * Address: + * 0x40:0x41 - Primary channel + * 0x42:0x43 - Secondary channel + */ +#define PIIX_PCI_IDETIM_IDE 0x8000 // IDE decode enable +#define PIIX_PCI_IDETIM_SITRE 0x4000 // slave timing register enable + +#define PIIX_PCI_IDETIM_ISP_MASK 0x3000 +#define PIIX_PCI_IDETIM_ISP_SHIFT 12 +#define PIIX_PCI_IDETIM_ISP_5 0x0000 // IORDY sample point +#define PIIX_PCI_IDETIM_ISP_4 0x1000 // (PCI clocks) +#define PIIX_PCI_IDETIM_ISP_3 0x2000 +#define PIIX_PCI_IDETIM_ISP_2 0x3000 + +#define PIIX_PCI_IDETIM_RTC_MASK 0x0300 +#define PIIX_PCI_IDETIM_RTC_SHIFT 8 +#define PIIX_PCI_IDETIM_RTC_4 0x0000 // receovery time (PCI clocks) +#define PIIX_PCI_IDETIM_RTC_3 0x0100 +#define PIIX_PCI_IDETIM_RTC_2 0x0200 +#define PIIX_PCI_IDETIM_RTC_1 0x0300 + +#define PIIX_PCI_IDETIM_DTE1 0x0080 // DMA timing enable only +#define PIIX_PCI_IDETIM_PPE1 0x0040 // prefetch and posting enabled +#define PIIX_PCI_IDETIM_IE1 0x0020 // IORDY sample point enable +#define PIIX_PCI_IDETIM_TIME1 0x0010 // fast timing enable +#define PIIX_PCI_IDETIM_DTE0 0x0008 // same as above for drive 0 +#define PIIX_PCI_IDETIM_PPE0 0x0004 +#define PIIX_PCI_IDETIM_IE0 0x0002 +#define PIIX_PCI_IDETIM_TIME0 0x0001 + +/* + * PIIX PCI configuration space register definition. + * + * PIIX_SIDETIM - Slave IDE timing register. + * + * Address: 0x44 + */ +#define PIIX_PCI_SIDETIM_SISP1_MASK 0xc0 +#define PIIX_PCI_SIDETIM_SISP1_SHIFT 6 +#define PIIX_PCI_SIDETIM_SRTC1_MASK 0x30 +#define PIIX_PCI_SIDETIM_SRTC1_SHIFT 4 +#define PIIX_PCI_SIDETIM_PISP1_MASK 0x0c +#define PIIX_PCI_SIDETIM_PISP1_SHIFT 2 +#define PIIX_PCI_SIDETIM_PRTC1_MASK 0x03 +#define PIIX_PCI_SIDETIM_PRTC1_SHIFT 0 + +/* + * PIIX PCI configuration space register definition. + * + * PIIX_UDMACTL - Ultra DMA/33 control register + * + * Address: 0x48 + */ +#define PIIX_PCI_UDMACTL_SSDE1 0x08 // Enable UDMA/33 Sec/Drive1 +#define PIIX_PCI_UDMACTL_SSDE0 0x04 // Enable UDMA/33 Sec/Drive0 +#define PIIX_PCI_UDMACTL_PSDE1 0x02 // Enable UDMA/33 Pri/Drive1 +#define PIIX_PCI_UDMACTL_PSDE0 0x01 // Enable UDMA/33 Pri/Drive0 + +/* + * PIIX PCI configuration space register definition. + * + * PIIX_UDMATIM - Ultra DMA/33 timing register + * + * Address: 0x4a-0x4b + */ +#define PIIX_PCI_UDMATIM_PCT0_MASK 0x0003 +#define PIIX_PCI_UDMATIM_PCT0_SHIFT 0 +#define PIIX_PCI_UDMATIM_PCT1_MASK 0x0030 +#define PIIX_PCI_UDMATIM_PCT1_SHIFT 4 +#define PIIX_PCI_UDMATIM_SCT0_MASK 0x0300 +#define PIIX_PCI_UDMATIM_SCT0_SHIFT 8 +#define PIIX_PCI_UDMATIM_SCT1_MASK 0x3000 +#define PIIX_PCI_UDMATIM_SCT1_SHIFT 12 + + +/* + * PIIX IO space register offsets. Base address is set in PIIX_PCI_BMIBA. + * Register size (bits) in parenthesis. + * + * Note: + * For the primary channel, the base address is stored in PIIX_PCI_BMIBA. + * For the secondary channel, an offset (PIIX_IO_BM_OFFSET) is added to + * the value stored in PIIX_PCI_BMIBA. + */ +#define PIIX_IO_BMICX 0x00 // (8) Bus master command register +#define PIIX_IO_BMISX 0x02 // (8) Bus master status register +#define PIIX_IO_BMIDTPX 0x04 // (32) Descriptor table register + +#define PIIX_IO_BM_OFFSET 0x08 // offset to sec channel registers +#define PIIX_IO_BM_SIZE 0x08 // BM registers size for each channel +#define PIIX_IO_BM_MASK 0xfff0 // BMIBA mask to get I/O base address + +/* + * PIIX IO space register definition. + * + * BMICX - Bus master IDE command register + */ +#define PIIX_IO_BMICX_SSBM 0x01 // 1=Start, 0=Stop +#define PIIX_IO_BMICX_RWCON 0x08 // 0=Read, 1=Write + +/* + * PIIX IO space register definition. + * + * PIIX_BMISX - Bus master IDE status register + */ +#define PIIX_IO_BMISX_DMA1CAP 0x40 // drive 1 is capable of DMA transfers +#define PIIX_IO_BMISX_DMA0CAP 0x20 // drive 0 is capable of DMA transfers +#define PIIX_IO_BMISX_IDEINTS 0x04 // IDE device asserted its interrupt +#define PIIX_IO_BMISX_ERROR 0x02 // DMA error (cleared by writing a 1) +#define PIIX_IO_BMISX_BMIDEA 0x01 // bus master active bit + +#define PIIX_IO_BMISX_STATUS (PIIX_IO_BMISX_IDEINTS | \ + PIIX_IO_BMISX_ERROR | \ + PIIX_IO_BMISX_BMIDEA) + +/* + * PIIX Bus Master alignment/boundary requirements. + * + * Intel nomemclature: + * WORD - 16-bit + * DWord - 32-bit + */ +#define PIIX_DT_ALIGN 4 // descriptor table must be DWord aligned. +#define PIIX_DT_BOUND (4 * 1024) // cannot cross 4K boundary. (or 64K ?) + +#define PIIX_BUF_ALIGN 2 // memory buffer must be word aligned. +#define PIIX_BUF_BOUND (64 * 1024) // cannot cross 64K boundary. +#define PIIX_BUF_LIMIT (64 * 1024) // limited to 64K in size + +/* + * PIIX Bus Master Physical Region Descriptor (PRD). + */ +typedef struct { + UInt32 base; // base address + UInt16 count; // byte count + UInt16 flags; // flag bits +} prdEntry_t; + +#define PRD_FLAG_EOT 0x8000 + +#define PRD_COUNT(x) (((x) == PIIX_BUF_LIMIT) ? 0 : (x)) +#define PRD_TABLE_SIZE PAGE_SIZE +#define PRD_ENTRIES (PRD_TABLE_SIZE / sizeof(prdEntry_t)) + +/* + * PIIX Register setting macro. + */ +#define SET_REG_FIELD(reg, field, val) \ +{ \ + reg &= ~(field ## _MASK); \ + reg |= (((val) << field ## _SHIFT) & \ + field ## _MASK); \ +} + +/* + * Convert the "isp" and "rtc" fields in PIIX_IDETIM register from + * PCI clocks to their respective values, and vice-versa. + */ +#define PIIX_CLK_TO_ISP(x) (5 - (x)) +#define PIIX_ISP_TO_CLK(x) PIIX_CLK_TO_ISP(x) +#define PIIX_CLK_TO_RTC(x) (4 - (x)) +#define PIIX_RTC_TO_CLK(x) PIIX_CLK_TO_RTC(x) + +#endif /* !_APPLEATAPIIXREGS_H */ diff --git a/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXTiming.h b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXTiming.h new file mode 100644 index 000000000..b2ed7dd4d --- /dev/null +++ b/iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIXTiming.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * AppleATAPIIXTiming.h - Timing tables. + * + * HISTORY + * + */ + +#ifndef _APPLEATAPIIXTIMING_H +#define _APPLEATAPIIXTIMING_H + +/* + * Supported transfer protocols. Entries in this table must map to the + * entries in ATATimingProtocol table. + */ +typedef enum { + kPIIXProtocolPIO = 0, + kPIIXProtocolDMA, + kPIIXProtocolUDMA33, + kPIIXProtocolUDMA66, + kPIIXProtocolLast +} PIIXProtocol; + +/* + * PIIX PIO/DMA timing table. + */ +typedef struct { + UInt8 pioMode; // PIO mode + UInt8 swDMAMode; // single-word DMA mode (obsolete) + UInt8 mwDMAMode; // multiword DMA mode + UInt8 isp; // IORDY sample point in PCI clocks + UInt8 rtc; // Recovery time in PCI clocks + UInt16 cycle; // cycle time in ns +} PIIXTiming; + +#define _NVM_ 0xff // not a valid mode + +static const +PIIXTiming PIIXPIOTimingTable[] = { +/* PIO SW MW ISP RTC CYCLE (ns) */ + {0, 0, 0, 5, 4, 600}, + {1, 1, _NVM_, 5, 4, 600}, + {2, 2, _NVM_, 4, 4, 240}, + {3, _NVM_, 1, 3, 3, 180}, + {4, _NVM_, 2, 3, 1, 120}, + {5, _NVM_, 2, 3, 1, 120}, +}; + +static const UInt8 PIIXPIOTimingTableSize = sizeof(PIIXPIOTimingTable) / + sizeof(PIIXPIOTimingTable[0]); + +/* + * PIIX Ultra-DMA/33 timing table. + */ +typedef struct { + UInt8 mode; // mode number + UInt8 ct; // cycle time in PCI clocks + UInt8 rp; // Ready to Pause time in PCI clocks + UInt8 bits; // register bit setting + UInt16 strobe; // strobe period (cycle) in ns +} PIIXUDMATiming; + +static const +PIIXUDMATiming PIIXUDMATimingTable[] = { +/* MODE CT RP BITS STROBE/CYCLE (ns) */ + {0, 4, 6, 0, 120}, + {1, 3, 5, 1, 90}, + {2, 2, 4, 2, 60}, +}; + +static const UInt8 +PIIXUDMATimingTableSize = sizeof(PIIXUDMATimingTable) / + sizeof(PIIXUDMATimingTable[0]); + +/* + * For each drive, the following table will store the chosen timings + * for each supported protocol. + */ +typedef struct { + UInt8 activeTimings[kPIIXProtocolLast]; // selected timings + UInt8 validTimings[kPIIXProtocolLast]; // calculated timings + UInt32 validFlag; + UInt32 activeFlag; +} PIIXSelectedTimings; + +/* + * Convert from ATATimingProtocol to PIIXProtocol. + */ +inline PIIXProtocol ataToPIIXProtocol(ATATimingProtocol timingProtocol) +{ + int piixProtocol = kPIIXProtocolPIO; + int ataProtocol = timingProtocol; + + while (ataProtocol != 1) { + ataProtocol >>= 1; piixProtocol++; + } + return ((PIIXProtocol) piixProtocol); +} + +/* + * Misc macros to get information from the PIIXSelectedTimings table. + */ +#define PIIX_ACTIVATE_PROTOCOL(p) { \ + timings[unit].activeTimings[p] = timings[unit].validTimings[p]; \ + timings[unit].activeFlag |= (1 << (p)); \ +} + +#define PIIX_DEACTIVATE_PROTOCOL(p) { \ + timings[unit].activeFlag &= ~(1 << (p)); \ +} + +#define PIIX_GET_ACTIVE_TIMING(p) (timings[unit].activeTimings[p]) + +#define PIIX_PROTOCOL_IS_ACTIVE(p) ((bool) \ + (timings[unit].activeFlag & (1 << (p)))) + +#define PIIX_PROTOCOL_IS_VALID(p) ((bool) \ + (timings[unit].validFlag & (1 << (p)))) + +#endif /* !_APPLEATAPIIXTIMING_H */ diff --git a/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.cpp b/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.cpp new file mode 100644 index 000000000..c1ebfce56 --- /dev/null +++ b/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.cpp @@ -0,0 +1,747 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * AppleUltra33ATA.cpp + * + */ +#include "AppleUltra33ATA.h" + +#include + +#undef super +#define super IOATAStandardDriver + +extern pmap_t kernel_pmap; + +OSDefineMetaClassAndStructors( AppleUltra33ATA, IOATAStandardDriver ) + + +static struct +{ + UInt32 minDataAccess; + UInt32 minDataCycle; +} pioModes[] = +{ + { 165, 600 }, /* Mode 0 */ + { 125, 383 }, /* 1 */ + { 100, 240 }, /* 2 */ + { 80, 180 }, /* 3 */ + { 70, 120 } /* 4 */ +}; + + +/* + * + * + */ +bool AppleUltra33ATA::configure( IOService *forProvider, ATAControllerInfo *controllerInfo ) +{ + + provider = (IOPCIDevice *)forProvider; + + busNum = 0; + + ioMapATA[0] = provider->mapDeviceMemoryWithRegister( 0x10 + busNum * 8 + 0 ); + if ( ioMapATA[0] == NULL ) return false; + ioBaseATA[0] = (volatile UInt32 *)ioMapATA[0]->getVirtualAddress(); + + ioMapATA[1] = provider->mapDeviceMemoryWithRegister( 0x10 + busNum * 8 + 4 ); + if ( ioMapATA[1] == NULL ) return false; + ioBaseATA[1] = (volatile UInt32 *)ioMapATA[1]->getVirtualAddress(); + + pciWriteLong( 0x04, 0x05 ); + + dmaDescriptors = (Ultra646Descriptor *)kalloc(page_size); + if ( dmaDescriptors == 0 ) + { + return false; + } + + dmaDescriptorsPhys = (UInt32) pmap_extract(kernel_pmap, (vm_offset_t) dmaDescriptors); + + if ( (UInt32)dmaDescriptors & (page_size - 1) ) + { + IOLog("AppleUltra33ATA::%s() - DMA Descriptor memory not page aligned!!", __FUNCTION__); + return false; + } + + bzero( dmaDescriptors, page_size ); + + numDescriptors = page_size/sizeof(Ultra646Descriptor); + + dmaMemoryCursor = IOBigMemoryCursor::withSpecification( 64*1024-2, 0xffffffff ); + if ( dmaMemoryCursor == NULL ) + { + return false; + } + + bitBucketAddr = IOMalloc(32); + if ( bitBucketAddr == 0 ) + { + return false; + } + bitBucketAddrPhys = (UInt32) pmap_extract(kernel_pmap, (vm_offset_t) (((UInt32)bitBucketAddr + 0xf) & ~0x0f)); + + interruptEventSource = IOInterruptEventSource::interruptEventSource( (OSObject *) this, + (IOInterruptEventAction) &AppleUltra33ATA::interruptOccurred, + (IOService *) provider, + (int) 0 ); + + if ( interruptEventSource == NULL ) + { + return false; + } + + disableControllerInterrupts(); + + getWorkLoop()->addEventSource( interruptEventSource ); + + controllerInfo->maxDevicesSupported = 2; + controllerInfo->devicePrivateDataSize = 0; + controllerInfo->commandPrivateDataSize = 0; + controllerInfo->disableCancelCommands = false; + + return true; +} + +/* + * + * + */ +bool AppleUltra33ATA::calculateTiming( UInt32 unit, ATATiming *pTiming ) +{ + bool rc = false; + + ideTimingRegs[unit].arttimReg = 0x40; + ideTimingRegs[unit].cmdtimReg = 0xA9; + + switch ( pTiming->timingProtocol ) + { + case kATATimingPIO: + rc = calculatePIOTiming( unit, pTiming ); + break; + + case kATATimingDMA: + rc = calculateDMATiming( unit, pTiming ); + break; + + case kATATimingUltraDMA33: + rc = calculateUltraDMATiming( unit, pTiming ); + break; + + + default: + ; + } + + return rc; +} + +/* + * + * + */ +bool AppleUltra33ATA::calculatePIOTiming( UInt32 unit, ATATiming *pTiming ) +{ + UInt32 accessTime; + UInt32 drwActClks, drwRecClks; + UInt32 drwActTime, drwRecTime; + + accessTime = pioModes[pTiming->mode].minDataAccess; + + drwActClks = accessTime / IDE_SYSCLK_NS; + drwActClks += (accessTime % IDE_SYSCLK_NS) ? 1 : 0; + drwActTime = drwActClks * IDE_SYSCLK_NS; + + drwRecTime = pioModes[pTiming->mode].minDataCycle - drwActTime; + drwRecClks = drwRecTime / IDE_SYSCLK_NS; + drwRecClks += (drwRecTime % IDE_SYSCLK_NS) ? 1 : 0; + + if ( drwRecClks >= 16 ) + drwRecClks = 1; + else if ( drwRecClks <= 1 ) + drwRecClks = 16; + + ideTimingRegs[unit].drwtimRegPIO = ((drwActClks & 0x0f) << 4) | ((drwRecClks-1) & 0x0f); + + return true; +} + + +/* + * + * + */ +bool AppleUltra33ATA::calculateDMATiming( UInt32 unit, ATATiming *pTiming ) +{ + UInt32 accessTime; + UInt32 drwActClks, drwRecClks; + UInt32 drwActTime, drwRecTime; + + ideTimingRegs[unit].udidetcrReg = 0; + + accessTime = pTiming->minDataAccess; + + drwActClks = accessTime / IDE_SYSCLK_NS; + drwActClks += (accessTime % IDE_SYSCLK_NS) ? 1 : 0; + drwActTime = drwActClks * IDE_SYSCLK_NS; + + drwRecTime = pTiming->minDataCycle - drwActTime; + drwRecClks = drwRecTime / IDE_SYSCLK_NS; + drwRecClks += (drwRecTime % IDE_SYSCLK_NS) ? 1 : 0; + + if ( drwRecClks >= 16 ) + drwRecClks = 1; + else if ( drwRecClks <= 1 ) + drwRecClks = 16; + + ideTimingRegs[unit].drwtimRegDMA = ((drwActClks & 0x0f) << 4) | ((drwRecClks-1) & 0x0f); + + return true; +} + +/* + * + * + */ +bool AppleUltra33ATA::calculateUltraDMATiming( UInt32 unit, ATATiming *pTiming ) +{ + UInt32 cycleClks; + UInt32 cycleTime; + + cycleTime = pTiming->minDataCycle; + + cycleClks = cycleTime / IDE_SYSCLK_NS; + cycleClks += (cycleTime % IDE_SYSCLK_NS) ? 1 : 0; + + ideTimingRegs[unit].udidetcrReg = (0x01 << unit) | ((cycleClks-1) << ((!unit) ? 4 : 6)) ; + + return true; +} + +/* + * + * + */ +void AppleUltra33ATA::newDeviceSelected( IOATADevice *newDevice ) +{ +} + + +/* + * + * + */ +bool AppleUltra33ATA::selectTiming( UInt32 unit, ATATimingProtocol timingProtocol ) +{ + Ultra646Regs *cfgRegs; + UInt32 cfgByte; + + cfgRegs = &ideTimingRegs[unit]; + + if ( busNum == 0 ) + { + pciWriteByte( kUltra646CMDTIM, cfgRegs->cmdtimReg ); + + if ( unit == 0 ) + { + pciWriteByte( kUltra646ARTTIM0, cfgRegs->arttimReg ); + + if ( timingProtocol == kATATimingPIO ) + { + cfgByte = pciReadByte( kUltra646CNTRL ); + cfgByte &= ~kUltra646CNTRL_Drive0ReadAhead; + cfgByte |= cfgRegs->cntrlReg; + pciWriteByte( kUltra646CNTRL, cfgByte ); + + pciWriteByte( kUltra646DRWTIM0, cfgRegs->drwtimRegPIO ); + } + else if ( timingProtocol == kATATimingDMA ) + { + pciWriteByte( kUltra646DRWTIM0, cfgRegs->drwtimRegDMA ); + } + else if ( timingProtocol == kATATimingUltraDMA33 ) + { + cfgByte = pciReadByte( kUltra646UDIDETCR0 ); + cfgByte &= ~(kUltra646UDIDETCR0_Drive0UDMACycleTime | kUltra646UDIDETCR0_Drive0UDMAEnable); + cfgByte |= cfgRegs->udidetcrReg; + pciWriteByte( kUltra646UDIDETCR0, cfgByte ); + } + } + else + { + pciWriteByte( kUltra646ARTTIM1, cfgRegs->arttimReg ); + + if ( timingProtocol == kATATimingPIO ) + { + cfgByte = pciReadByte( kUltra646CNTRL ); + cfgByte &= ~kUltra646CNTRL_Drive1ReadAhead; + cfgByte |= cfgRegs->cntrlReg; + pciWriteByte( kUltra646CNTRL, cfgByte ); + + pciWriteByte( kUltra646DRWTIM1, cfgRegs->drwtimRegPIO ); + } + else if ( timingProtocol == kATATimingDMA ) + { + pciWriteByte( kUltra646DRWTIM1, cfgRegs->drwtimRegDMA ); + } + else if ( timingProtocol == kATATimingUltraDMA33 ) + { + cfgByte = pciReadByte( kUltra646UDIDETCR0 ); + cfgByte &= ~(kUltra646UDIDETCR0_Drive1UDMACycleTime | kUltra646UDIDETCR0_Drive1UDMAEnable); + cfgByte |= cfgRegs->udidetcrReg; + pciWriteByte( kUltra646UDIDETCR0, cfgByte ); + } + } + } + else + { + pciWriteByte( kUltra646CMDTIM, cfgRegs->cmdtimReg ); + + if ( unit == 0 ) + { + cfgByte = pciReadByte( kUltra646ARTTIM23 ); + cfgByte &= ~(kUltra646ARTTIM23_Drive2ReadAhead | kUltra646ARTTIM23_AddrSetup); + cfgByte |= (cfgRegs->cntrlReg >> 4) | cfgRegs->arttimReg; + pciWriteByte( kUltra646ARTTIM23, cfgByte ); + + if ( timingProtocol == kATATimingPIO ) + { + pciWriteByte( kUltra646DRWTIM2, cfgRegs->drwtimRegPIO ); + } + else if ( timingProtocol == kATATimingDMA ) + { + pciWriteByte( kUltra646DRWTIM1, cfgRegs->drwtimRegDMA ); + } + else if ( timingProtocol == kATATimingUltraDMA33 ) + { + cfgByte = pciReadByte( kUltra646UDIDETCR1 ); + cfgByte &= ~(kUltra646UDIDETCR1_Drive2UDMACycleTime | kUltra646UDIDETCR1_Drive2UDMAEnable); + cfgByte |= cfgRegs->udidetcrReg; + pciWriteByte( kUltra646UDIDETCR1, cfgByte ); + } + } + else + { + cfgByte = pciReadByte( kUltra646ARTTIM23 ); + cfgByte &= ~(kUltra646ARTTIM23_Drive3ReadAhead | kUltra646ARTTIM23_AddrSetup); + cfgByte |= (cfgRegs->cntrlReg >> 4) | cfgRegs->arttimReg; + pciWriteByte( kUltra646ARTTIM23, cfgByte ); + + if ( timingProtocol == kATATimingPIO ) + { + pciWriteByte( kUltra646DRWTIM3, cfgRegs->drwtimRegPIO ); + } + else if ( timingProtocol == kATATimingDMA ) + { + pciWriteByte( kUltra646DRWTIM3, cfgRegs->drwtimRegDMA ); + } + else if ( timingProtocol == kATATimingUltraDMA33 ) + { + cfgByte = pciReadByte( kUltra646UDIDETCR1 ); + cfgByte &= ~(kUltra646UDIDETCR1_Drive3UDMACycleTime | kUltra646UDIDETCR1_Drive3UDMAEnable); + cfgByte |= cfgRegs->udidetcrReg; + pciWriteByte( kUltra646UDIDETCR1, cfgByte ); + } + } + } + + return true; +} + + +/* + * + * + */ +void AppleUltra33ATA::interruptOccurred() +{ + UInt32 intReg; + UInt32 cfgReg; + + intReg = (busNum == 0) ? kUltra646CFR : kUltra646ARTTIM23; + cfgReg = pciReadByte( intReg ); + pciWriteByte( intReg, cfgReg ); + + intReg = (busNum == 0) ? kUltra646BMIDESR0 : kUltra646BMIDESR1; + cfgReg = pciReadByte( intReg ); + pciWriteByte( intReg, cfgReg ); + + super::interruptOccurred(); + + enableControllerInterrupts(); +} + +/* + * + * + */ +bool AppleUltra33ATA::programDma( IOATAStandardCommand *cmd ) +{ + IOMemoryDescriptor *memoryDesc; + IOPhysicalSegment physSeg; + IOByteCount offset; + UInt32 i; + UInt32 bytesLeft; + UInt32 len; + Ultra646Descriptor *dmaDesc; + UInt32 startSeg, endSeg; + + cmd->getPointers( &memoryDesc, &dmaReqLength, &dmaIsWrite ); + + if ( dmaReqLength == 0 ) + { + return true; + } + + offset = 0; + + dmaDesc = dmaDescriptors; + + bytesLeft = dmaReqLength; + + for (i = 0; i < numDescriptors-1; i++, dmaDesc++ ) + { + if ( dmaMemoryCursor->getPhysicalSegments( memoryDesc, offset, &physSeg, 1 ) != 1 ) + { + break; + } + + startSeg = (physSeg.location & ~0xffff); + endSeg = (physSeg.location + physSeg.length - 1) & ~0xffff; + + OSWriteSwapInt32( &dmaDesc->start, 0, physSeg.location); + + if ( startSeg == endSeg ) + { + OSWriteSwapInt32( &dmaDesc->length, 0, physSeg.length ); + } + else + { + len = (-physSeg.location & 0xffff); + OSWriteSwapInt32( &dmaDesc->length, 0, len ); + dmaDesc++; + i++; + OSWriteSwapInt32( &dmaDesc->start, 0, physSeg.location + len ); + OSWriteSwapInt32( &dmaDesc->length, 0, physSeg.length - len ); + } + + bytesLeft -= physSeg.length; + offset += physSeg.length; + } + + if ( bytesLeft != 0 ) + { + return false; + } + + /* + * Note: ATAPI always transfers even byte-counts. Send the extra byte to/from the bit-bucket + * if the requested transfer length is odd. + */ + if ( dmaReqLength & 1 ) + { + if ( i == numDescriptors ) return false; + + dmaDesc++; + OSWriteSwapInt32( &dmaDesc->start, 0, bitBucketAddrPhys ); + OSWriteSwapInt32( &dmaDesc->length, 0, 1 ); + } + + + dmaDesc--; + dmaDesc->length |= 0x80; + + pciWriteLong( ((busNum == 0) ? kUltra646DTPR0 : kUltra646DTPR1), dmaDescriptorsPhys ); + + return true; +} + +/* + * + * + */ +bool AppleUltra33ATA::startDma( IOATAStandardCommand * ) +{ + UInt32 reg; + UInt32 cfgReg; + UInt32 startMask; + UInt32 writeMask; + + if ( dmaReqLength != 0 ) + { + reg = (busNum == 0) ? kUltra646BMIDECR0 : kUltra646BMIDECR1; + startMask = (busNum == 0) ? kUltra646BMIDECR0_StartDMAPRI : kUltra646BMIDECR1_StartDMASDY; + writeMask = (busNum == 0) ? kUltra646BMIDECR0_PCIWritePRI : kUltra646BMIDECR1_PCIWriteSDY; + cfgReg = pciReadByte( reg ); + cfgReg &= ~writeMask; + cfgReg |= startMask | ((dmaIsWrite == false) ? writeMask : 0); + pciWriteByte( reg, cfgReg ); + } + return true; +} + +/* + * + * + */ +bool AppleUltra33ATA::stopDma( IOATAStandardCommand *, UInt32 *transferCount ) +{ + UInt32 reg; + UInt32 cfgReg; + UInt32 startMask; + + *transferCount = 0; + + if ( dmaReqLength == 0 ) + { + return true; + } + + reg = (busNum == 0) ? kUltra646BMIDECR0 : kUltra646BMIDECR1; + startMask = (busNum == 0) ? kUltra646BMIDECR0_StartDMAPRI : kUltra646BMIDECR1_StartDMASDY; + cfgReg = pciReadByte( reg ); + cfgReg &= ~startMask; + pciWriteByte( reg, cfgReg ); + + *transferCount = dmaReqLength; + + return true; +} + +/* + * + * + */ +bool AppleUltra33ATA::checkDmaActive() +{ + UInt32 reg; + UInt32 cfgReg; + UInt32 activeMask; + + reg = (busNum == 0) ? kUltra646BMIDESR0 : kUltra646BMIDESR1; + activeMask = (busNum == 0) ? kUltra646BMIDESR0_DMAActivePRI : kUltra646BMIDESR1_DMAActiveSDY; + + cfgReg = pciReadByte( reg ); + + return ((cfgReg & activeMask) != 0); +} + +/* + * + * + */ +bool AppleUltra33ATA::resetDma() +{ + UInt32 reg; + UInt32 cfgReg; + UInt32 startMask; + + reg = (busNum == 0) ? kUltra646BMIDECR0 : kUltra646BMIDECR1; + startMask = (busNum == 0) ? kUltra646BMIDECR0_StartDMAPRI : kUltra646BMIDECR1_StartDMASDY; + + cfgReg = pciReadByte( reg ); + cfgReg &= ~startMask; + pciWriteByte( reg, cfgReg ); + + return true; +} + +/* + * + * + */ +void AppleUltra33ATA::disableControllerInterrupts() +{ + interruptEventSource->disable(); +} + +/* + * + * + */ +void AppleUltra33ATA::enableControllerInterrupts() +{ + interruptEventSource->enable(); +} + +/* + * + * + */ +void AppleUltra33ATA::free() +{ + UInt32 i; + + if ( interruptEventSource != 0 ) + { + interruptEventSource->disable(); + interruptEventSource->release(); + } + + for (i = 0; i < 2; i++ ) + { + if ( ioMapATA[i] != 0 ) ioMapATA[i]->release(); + } + + if ( dmaDescriptors != 0 ) + { + kfree( (vm_offset_t)dmaDescriptors, page_size ); + } +} + +/* + * + * + */ +void AppleUltra33ATA::writeATAReg( UInt32 regIndex, UInt32 regValue ) +{ + if ( regIndex == 0 ) + { + *(volatile UInt16 *)ioBaseATA[0] = regValue; + } + else if ( regIndex < kATARegDeviceControl ) + { + *((volatile UInt8 *)ioBaseATA[0] + regIndex) = regValue; + } + else + { + *((volatile UInt8 *)ioBaseATA[1] + regIndex - kATARegDeviceControl + 2) = regValue; + } + eieio(); +} + +UInt32 AppleUltra33ATA::readATAReg( UInt32 regIndex ) +{ + if ( regIndex == 0 ) + { + return *(volatile UInt16 *)ioBaseATA[0]; + } + else if ( regIndex < kATARegDeviceControl ) + { + return *((volatile UInt8 *)ioBaseATA[0] + regIndex); + } + + return *((volatile UInt8 *)ioBaseATA[1] + regIndex - kATARegDeviceControl + 2); +} + +/* + * + * + */ +UInt32 AppleUltra33ATA::pciReadByte( UInt32 reg ) +{ + volatile union + { + unsigned long word; + unsigned char byte[4]; + } data; + + data.word = provider->configRead32( reg ); + return data.byte[3 - (reg & 0x03)]; +} + +void AppleUltra33ATA::pciWriteByte( UInt32 reg, UInt32 value ) +{ + volatile union + { + unsigned long word; + unsigned char byte[4]; + } data; + + UInt32 regWord; + + regWord = reg & ~0x03; + + data.word = provider->configRead32( regWord ); + data.word = OSReadSwapInt32( &data.word, 0 ); + + switch (regWord) + { + case kUltra646CFR: + data.byte[kUltra646CFR & 0x03] &= ~kUltra646CFR_IDEIntPRI; + break; + case kUltra646DRWTIM0: + data.byte[kUltra646ARTTIM23 & 0x03] &= ~kUltra646ARTTIM23_IDEIntSDY; + break; + case kUltra646BMIDECR0: + data.byte[kUltra646MRDMODE & 0x03 ] &= ~(kUltra646MRDMODE_IDEIntPRI | kUltra646MRDMODE_IDEIntSDY); + data.byte[kUltra646BMIDESR0 & 0x03] &= ~(kUltra646BMIDESR0_DMAIntPRI | kUltra646BMIDESR0_DMAErrorPRI); + break; + case kUltra646BMIDECR1: + data.byte[kUltra646BMIDESR1 & 0x03] &= ~(kUltra646BMIDESR1_DMAIntSDY | kUltra646BMIDESR1_DMAErrorSDY); + break; + } + data.byte[reg & 0x03] = value; + + data.word = OSReadSwapInt32(&data.word, 0); + + provider->configWrite32( regWord, data.word ); +} + +UInt32 AppleUltra33ATA::pciReadLong( UInt32 reg ) +{ + return provider->configRead32( reg ); +} + +void AppleUltra33ATA::pciWriteLong( UInt32 reg, UInt32 value ) +{ + provider->configWrite32( reg, value ); +} + +/* These overrides take care of OpenFirmware referring to the controller + * as a child of the PCI device, "ata-4" */ + +bool AppleUltra33ATA::attach( IOService * provider ) +{ + if ( super::attach(provider) ) + { + // assumes the first child determines the path OF uses to reference the controller + pathProvider = OSDynamicCast(IOService, provider->getChildEntry(gIODTPlane)); + + if ( pathProvider ) + { + setLocation(pathProvider->getLocation(gIODTPlane), gIODTPlane); + setName(pathProvider->getName(gIODTPlane), gIODTPlane); + attachToParent(provider, gIODTPlane); + pathProvider->retain(); + pathProvider->detachFromParent(provider, gIODTPlane); + } + + return true; + } + + return false; +} + +void AppleUltra33ATA::detach( IOService * provider ) +{ + if ( pathProvider ) + { + detachFromParent(provider, gIODTPlane); + pathProvider->attachToParent(provider, gIODTPlane); + pathProvider->release(); + } + + super::detach(provider); +} diff --git a/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.h b/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.h new file mode 100644 index 000000000..71b6d39c9 --- /dev/null +++ b/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * AppleUltra33ATA.h + * + */ + +#include +#include +#include +#include + +#include +#include "AppleUltra33ATARegs.h" + +class AppleUltra33ATA : public IOATAStandardDriver +{ + OSDeclareDefaultStructors( AppleUltra33ATA ) + +public: + virtual bool attach( IOService * provider ); + virtual void detach( IOService * provider ); + void free(); + +protected: + bool configure( IOService *provider, ATAControllerInfo *controllerDataSize ); + + void interruptOccurred(); + + void enableControllerInterrupts(); + void disableControllerInterrupts(); + + bool calculateTiming( UInt32 deviceNum, ATATiming *timing ); + bool selectTiming( UInt32 deviceNum, ATATimingProtocol timingProtocol ); + + void newDeviceSelected( IOATADevice *newDevice ); + + void writeATAReg( UInt32 regIndex, UInt32 regValue ); + UInt32 readATAReg( UInt32 regIndex ); + + bool programDma( IOATAStandardCommand *cmd ); + bool startDma( IOATAStandardCommand *cmd ); + bool stopDma( IOATAStandardCommand *cmd, UInt32 *transferCount ); + bool checkDmaActive(); + bool resetDma(); + +private: + bool calculatePIOTiming( UInt32 deviceNum, ATATiming *timing ); + bool calculateDMATiming( UInt32 deviceNum, ATATiming *timing ); + bool calculateUltraDMATiming( UInt32 deviceNum, ATATiming *timing ); + + UInt32 pciReadByte( UInt32 reg ); + void pciWriteByte( UInt32 reg, UInt32 value ); + UInt32 pciReadLong( UInt32 reg ); + void pciWriteLong( UInt32 reg, UInt32 value ); + +private: + IOPCIDevice *provider; + IOService *pathProvider; + + UInt32 busNum; + + IOMemoryMap *ioMapATA[2]; + volatile UInt32 *ioBaseATA[2]; + + IOInterruptEventSource *interruptEventSource; + + Ultra646Regs ideTimingRegs[2]; + + IOBigMemoryCursor *dmaMemoryCursor; + Ultra646Descriptor *dmaDescriptors; + UInt32 dmaDescriptorsPhys; + UInt32 numDescriptors; + + void *bitBucketAddr; + UInt32 bitBucketAddrPhys; + + UInt32 dmaReqLength; + bool dmaIsWrite; +}; + diff --git a/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATARegs.h b/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATARegs.h new file mode 100644 index 000000000..7ba0e18a8 --- /dev/null +++ b/iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATARegs.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * PCI Control registers for Cmd646X chipset + * + */ +enum Ultra646RegsValues +{ + kUltra646CFR = 0x50, /* Configuration */ + kUltra646CFR_DSA1 = 0x40, + kUltra646CFR_IDEIntPRI = 0x04, + + kUltra646CNTRL = 0x51, /* Drive 0/1 Control Register */ + kUltra646CNTRL_Drive1ReadAhead = 0x80, + kUltra646CNTRL_Drive0ReadAhead = 0x40, + kUltra646CNTRL_EnableSDY = 0x08, + kUltra646CNTRL_EnablePRI = 0x04, + + kUltra646CMDTIM = 0x52, /* Task file timing (all drives) */ + kUltra646CMDTIM_Drive01CmdActive = 0xF0, + kUltra646CMDTIM_Drive01CmdRecovery = 0x0F, + + kUltra646ARTTIM0 = 0x53, /* Drive 0 Address Setup */ + kUltra646ARTTIM0_Drive0AddrSetup = 0xC0, + + kUltra646DRWTIM0 = 0x54, /* Drive 0 Data Read/Write - DACK Time */ + kUltra646DRWTIM0_Drive0DataActive = 0xF0, + kUltra646DRWTIM0_Drive0DataRecovery = 0x0F, + + kUltra646ARTTIM1 = 0x55, /* Drive 1 Address Setup */ + kUltra646ARTTIM1_Drive1AddrSetup = 0xC0, + + kUltra646DRWTIM1 = 0x56, /* Drive 1 Data Read/Write - DACK Time */ + kUltra646DRWTIM1_Drive1DataActive = 0xF0, + kUltra646DRWTIM1_Drive1DataRecover = 0x0F, + + kUltra646ARTTIM23 = 0x57, /* Drive 2/3 Control/Status */ + kUltra646ARTTIM23_AddrSetup = 0xC0, + kUltra646ARTTIM23_IDEIntSDY = 0x10, + kUltra646ARTTIM23_Drive3ReadAhead = 0x08, + kUltra646ARTTIM23_Drive2ReadAhead = 0x04, + + kUltra646DRWTIM2 = 0x58, /* Drive 2 Read/Write - DACK Time */ + kUltra646DRWTIM2_Drive2DataActive = 0xF0, + kUltra646DRWTIM2_Drive2DataRecovery = 0x0F, + + kUltra646BRST = 0x59, /* Read Ahead Count */ + + kUltra646DRWTIM3 = 0x5B, /* Drive 3 Read/Write - DACK Time */ + kUltra646DRWTIM3_Drive3DataActive = 0xF0, + kUltra646DRWTIM3_Drive3DataRecover = 0x0F, + + kUltra646BMIDECR0 = 0x70, /* BusMaster Command Register - Primary */ + kUltra646BMIDECR0_PCIWritePRI = 0x08, + kUltra646BMIDECR0_StartDMAPRI = 0x01, + + kUltra646MRDMODE = 0x71, /* DMA Master Read Mode Select */ + kUltra646MRDMODE_PCIReadMask = 0x03, + kUltra646MRDMODE_PCIRead = 0x00, + kUltra646MRDMODE_PCIReadMultiple = 0x01, + kUltra646MRDMODE_IDEIntPRI = 0x04, + kUltra646MRDMODE_IDEIntSDY = 0x08, + kUltra646MRDMODE_IntEnablePRI = 0x10, + kUltra646MRDMODE_IntEnableSDY = 0x20, + kUltra646MRDMODE_ResetAll = 0x40, + + kUltra646BMIDESR0 = 0x72, /* BusMaster Status Register - Primary */ + kUltra646BMIDESR0_Simplex = 0x80, + kUltra646BMIDESR0_Drive1DMACap = 0x40, + kUltra646BMIDESR0_Drive0DMACap = 0x20, + kUltra646BMIDESR0_DMAIntPRI = 0x04, + kUltra646BMIDESR0_DMAErrorPRI = 0x02, + kUltra646BMIDESR0_DMAActivePRI = 0x01, + + kUltra646UDIDETCR0 = 0x73, /* Ultra DMA Timing Control Register - Primary */ + kUltra646UDIDETCR0_Drive1UDMACycleTime = 0xC0, + kUltra646UDIDETCR0_Drive0UDMACycleTime = 0x30, + kUltra646UDIDETCR0_Drive1UDMAEnable = 0x02, + kUltra646UDIDETCR0_Drive0UDMAEnable = 0x01, + + kUltra646DTPR0 = 0x74, /* Descriptor Table Pointer - Primary */ + + kUltra646BMIDECR1 = 0x78, /* BusMaster Command Register - Secondary */ + kUltra646BMIDECR1_PCIWriteSDY = 0x08, + kUltra646BMIDECR1_StartDMASDY = 0x01, + + kUltra646BMIDESR1 = 0x7A, /* BusMaster Status Register - Secondary */ + kUltra646BMIDESR1_Simplex = 0x80, + kUltra646BMIDESR1_Drive3DMACap = 0x40, + kUltra646BMIDESR1_Drive2DMACap = 0x20, + kUltra646BMIDESR1_DMAIntSDY = 0x04, + kUltra646BMIDESR1_DMAErrorSDY = 0x02, + kUltra646BMIDESR1_DMAActiveSDY = 0x01, + + kUltra646UDIDETCR1 = 0x7B, /* Ultra DMA Timing Control Register - Secondary */ + kUltra646UDIDETCR1_Drive3UDMACycleTime = 0xC0, + kUltra646UDIDETCR1_Drive2UDMACycleTime = 0x30, + kUltra646UDIDETCR1_Drive3UDMAEnable = 0x02, + kUltra646UDIDETCR1_Drive2UDMAEnable = 0x01, + + kUltra646DTPR1 = 0x7C, /* Descriptor Table Pointer - Secondary */ +}; + +typedef struct +{ + UInt32 cntrlReg; + UInt32 arttimReg; + UInt32 cmdtimReg; + UInt32 drwtimRegPIO; + UInt32 drwtimRegDMA; + UInt32 udidetcrReg; +} Ultra646Regs; + + +typedef struct +{ + UInt32 start; + UInt32 length; +} Ultra646Descriptor; + + +#define IDE_SYSCLK_NS 30 diff --git a/iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.cpp b/iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.cpp new file mode 100644 index 000000000..403787906 --- /dev/null +++ b/iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.cpp @@ -0,0 +1,749 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * AppleUltra66ATA.cpp + * + */ +#include "AppleUltra66ATA.h" + +#undef super +#define super IOATAStandardDriver + +extern pmap_t kernel_pmap; + +OSDefineMetaClassAndStructors( AppleUltra66ATA, IOATAStandardDriver ) + +static inline int rnddiv( int x, int y ) +{ + if ( x < 0 ) + return 0; + else + return ( (x / y) + (( x % y ) ? 1 : 0) ); +} + + +/* + * + * + */ +bool AppleUltra66ATA::configure( IOService *forProvider, ATAControllerInfo *controllerInfo ) +{ + provider = forProvider; + + if ( identifyController() == false ) + { + return false; + } + + ioMapATA = provider->mapDeviceMemoryWithIndex(0); + if ( ioMapATA == NULL ) return false; + ioBaseATA = (volatile UInt32 *)ioMapATA->getVirtualAddress(); + + ioMapDMA = provider->mapDeviceMemoryWithIndex(1); + if ( ioMapDMA == NULL ) return false; + ioBaseDMA = (volatile IODBDMAChannelRegisters *)ioMapDMA->getVirtualAddress(); + + dmaDescriptors = (IODBDMADescriptor *)kalloc(page_size); + if ( dmaDescriptors == 0 ) + { + return false; + } + + dmaDescriptorsPhys = (UInt32) pmap_extract(kernel_pmap, (vm_offset_t) dmaDescriptors); + + if ( (UInt32)dmaDescriptors & (page_size - 1) ) + { + IOLog("AppleUltra66ATA::%s() - DMA Descriptor memory not page aligned!!", __FUNCTION__); + return false; + } + + bzero( dmaDescriptors, page_size ); + + numDescriptors = page_size/sizeof(IODBDMADescriptor); + + dmaMemoryCursor = IOBigMemoryCursor::withSpecification( 64*1024-2, 0xffffffff ); + if ( dmaMemoryCursor == NULL ) + { + return false; + } + + bitBucketAddr = IOMalloc(32); + if ( bitBucketAddr == 0 ) + { + return false; + } + bitBucketAddrPhys = (UInt32) pmap_extract(kernel_pmap, (vm_offset_t) (((UInt32)bitBucketAddr + 0xf) & ~0x0f)); + + interruptEventSource = IOInterruptEventSource::interruptEventSource( (OSObject *) this, + (IOInterruptEventAction) &AppleUltra66ATA::interruptOccurred, + (IOService *) provider, + (int) 0 ); + + if ( interruptEventSource == NULL ) + { + return false; + } + + disableControllerInterrupts(); + + getWorkLoop()->addEventSource( interruptEventSource ); + + controllerInfo->maxDevicesSupported = 2; + controllerInfo->devicePrivateDataSize = 0; + controllerInfo->commandPrivateDataSize = 0; + controllerInfo->disableCancelCommands = false; + + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::identifyController() +{ + OSData *compatibleEntry, *modelEntry; + + do + { + controllerType = kControllerTypeDBDMAVersion1; + + compatibleEntry = OSDynamicCast( OSData, provider->getProperty( "compatible" ) ); + if ( compatibleEntry == 0 ) break; + + if ( compatibleEntry->isEqualTo( "keylargo-ata", sizeof("keylargo-ata")-1 ) == true ) + { + controllerType = kControllerTypeDBDMAVersion2; + + modelEntry = OSDynamicCast( OSData, provider->getProperty("model") ); + if ( modelEntry == 0 ) break; + + if ( modelEntry->isEqualTo( "ata-4", sizeof("ata-4")-1 ) == true ) + { + controllerType = kControllerTypeUltra66DBDMA; + } + } + } while ( 0 ); + + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::calculateTiming( UInt32 deviceNum, ATATiming *pTiming ) +{ + bool rc = false; + + switch ( controllerType ) + { + case kControllerTypeDBDMAVersion1: + case kControllerTypeDBDMAVersion2: + switch ( pTiming->timingProtocol ) + { + case kATATimingPIO: + rc = calculatePIOTiming( deviceNum, pTiming ); + break; + + case kATATimingDMA: + rc = calculateDMATiming( deviceNum, pTiming ); + break; + + default: + ; + } + break; + + case kControllerTypeUltra66DBDMA: + switch ( pTiming->timingProtocol ) + { + case kATATimingPIO: + rc = calculateUltra66PIOTiming( deviceNum, pTiming ); + break; + + case kATATimingDMA: + rc = calculateUltra66DMATiming( deviceNum, pTiming ); + break; + + case kATATimingUltraDMA66: + rc = calculateUltra66UDMATiming( deviceNum, pTiming ); + break; + + default: + ; + } + break; + + default: + ; + } + + return rc; +} + + +/* + * + * + */ +bool AppleUltra66ATA::calculatePIOTiming( UInt32 unitNum, ATATiming *pTiming ) +{ + int accessTime; + int accessTicks; + int recTime; + int recTicks; + int cycleTime; + + /* + * Calc PIO access time >= minDataAccess in SYSCLK increments + */ + accessTicks = rnddiv(pTiming->minDataAccess, kATASysClkNS); + /* + * Hardware limits access times to >= 120 ns + */ + accessTicks -= kATAPioAccessBase; + if (accessTicks < kATAPioAccessMin ) + { + accessTicks = kATAPioAccessMin; + } + accessTime = (accessTicks + kATAPioAccessBase) * kATASysClkNS; + + /* + * Calc recovery time in SYSCLK increments based on time remaining in cycle + */ + recTime = pTiming->minDataCycle - accessTime; + recTicks = rnddiv( recTime, kATASysClkNS ); + /* + * Hardware limits recovery time to >= 150ns + */ + recTicks -= kATAPioRecoveryBase; + if ( recTicks < kATAPioRecoveryMin ) + { + recTicks = kATAPioRecoveryMin; + } + + cycleTime = (recTicks + kATAPioRecoveryBase + accessTicks + kATAPioAccessBase) * kATASysClkNS; + + ideTimingWord[unitNum] &= ~0x7ff; + ideTimingWord[unitNum] |= accessTicks | (recTicks << 5); + +#if 0 + IOLog("AppleUltra66ATA::%s() Unit %1d PIO Requested Timings: Access: %3dns Cycle: %3dns \n\r", + __FUNCTION__, (int)unitNum, (int)pTiming->minDataAccess, (int)pTiming->minDataCycle); + IOLog("AppleUltra66ATA::%s() PIO Actual Timings: Access: %3dns Cycle: %3dns\n\r", + __FUNCTION__, accessTime, cycleTime ); +#endif + + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::calculateDMATiming( UInt32 unitNum, ATATiming *pTiming ) +{ + int accessTime; + int accessTicks; + int recTime; + int recTicks; + int cycleTime; + int cycleTimeOrig; + int halfTick = 0; + + /* + * Calc DMA access time >= minDataAccess in SYSCLK increments + */ + + /* + * OHare II erata - Cant handle write cycle times below 150ns + */ + cycleTimeOrig = pTiming->minDataCycle; +#if 0 + if ( IsPowerStar() ) + { + if ( cycleTimeOrig < 150 ) pTiming->minDataCycle = 150; + } +#endif + + accessTicks = rnddiv(pTiming->minDataAccess, kATASysClkNS); + + accessTicks -= kATADmaAccessBase; + if ( accessTicks < kATADmaAccessMin ) + { + accessTicks = kATADmaAccessMin; + } + accessTime = (accessTicks + kATADmaAccessBase) * kATASysClkNS; + + /* + * Calc recovery time in SYSCLK increments based on time remaining in cycle + */ + recTime = pTiming->minDataCycle - accessTime; + recTicks = rnddiv( recTime, kATASysClkNS ); + + recTicks -= kATADmaRecoveryBase; + if ( recTicks < kATADmaRecoveryMin ) + { + recTicks = kATADmaRecoveryMin; + } + cycleTime = (recTicks + kATADmaRecoveryBase + accessTicks + kATADmaAccessBase) * kATASysClkNS; + + /* + * If our calculated access time is at least SYSCLK/2 > than what the disk requires, + * see if selecting the 1/2 Clock option will help. This adds SYSCLK/2 to + * the access time and subtracts SYSCLK/2 from the recovery time. + * + * By setting the H-bit and subtracting one from the current access tick count, + * we are reducing the current access time by SYSCLK/2 and the current recovery + * time by SYSCLK/2. Now, check if the new cycle time still meets the disk's requirements. + */ + if ( controllerType == kControllerTypeDBDMAVersion1 ) + { + if ( (accessTicks > kATADmaAccessMin) && ((UInt32)(accessTime - kATASysClkNS/2) >= pTiming->minDataAccess) ) + { + if ( (UInt32)(cycleTime - kATASysClkNS) >= pTiming->minDataCycle ) + { + halfTick = 1; + accessTicks--; + accessTime -= kATASysClkNS/2; + cycleTime -= kATASysClkNS; + } + } + } + + ideTimingWord[unitNum] &= ~0xffff800; + ideTimingWord[unitNum] |= (accessTicks | (recTicks << 5) | (halfTick << 10)) << 11; + +#if 0 + IOLog("AppleUltra66ATA::%s() Unit %1d DMA Requested Timings: Access: %3dns Cycle: %3dns \n\r", + __FUNCTION__, (int)unitNum, (int)pTiming->minDataAccess, (int)cycleTimeOrig); + IOLog("AppleUltra66ATA::%s() DMA Actual Timings: Access: %3dns Cycle: %3dns\n\r", + __FUNCTION__, accessTime, cycleTime ); + IOLog("AppleUltra66ATA::%s() Ide DMA Timings = %08lx\n\r", __FUNCTION__, ideTimingWord[unitNum] ); +#endif + + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::calculateUltra66PIOTiming( UInt32 unitNum, ATATiming *pTiming ) +{ + int accessTime; + int accessTicks; + int recTime; + int recTicks; + int cycleTime; + + /* + * Calc PIO access time >= pioAccessTime in SYSCLK increments + */ + accessTicks = rnddiv(pTiming->minDataAccess * 1000, kATAUltra66ClockPS ); + accessTime = accessTicks * kATAUltra66ClockPS; + + /* + * Calc recovery time in SYSCLK increments based on time remaining in cycle + */ + recTime = pTiming->minDataCycle * 1000 - accessTime; + recTicks = rnddiv( recTime, kATAUltra66ClockPS ); + + cycleTime = (recTicks + accessTicks ) * kATAUltra66ClockPS; + + ideTimingWord[unitNum] &= ~0xe00003ff; + ideTimingWord[unitNum] |= accessTicks | (recTicks << 5); + +#if 0 + IOLog("AppleUltra66ATA::%s() Unit %1d PIO Requested Timings: Access: %3dns Cycle: %3dns \n\r", + __FUNCTION__, (int)unitNum, (int)pTiming->minDataAccess, (int)pTiming->minDataCycle); + IOLog("AppleUltra66ATA::%s() PIO Actual Timings: Access: %3dns Cycle: %3dns\n\r", + __FUNCTION__, accessTime / 1000, cycleTime / 1000 ); + IOLog("AppleUltra66ATA::%s() Ide PIO Timings = %08lx\n\r", __FUNCTION__, ideTimingWord[unitNum] ); +#endif + + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::calculateUltra66DMATiming( UInt32 unitNum, ATATiming *pTiming ) +{ + int accessTime; + int accessTicks; + int recTime; + int recTicks; + int cycleTime; + + /* + * Calc DMA access time >= dmaAccessTime in SYSCLK increments + */ + accessTicks = rnddiv(pTiming->minDataAccess * 1000, kATAUltra66ClockPS); + accessTime = accessTicks * kATAUltra66ClockPS; + + /* + * Calc recovery time in SYSCLK increments based on time remaining in cycle + */ + recTime = pTiming->minDataCycle * 1000 - accessTime; + recTicks = rnddiv( recTime, kATAUltra66ClockPS ); + + cycleTime = (recTicks + accessTicks) * kATAUltra66ClockPS; + + ideTimingWord[unitNum] &= ~0x001ffc00; + ideTimingWord[unitNum] |= (accessTicks | (recTicks << 5)) << 10; + +#if 0 + IOLog("AppleUltra66ATA::%s() Unit %1d DMA Requested Timings: Access: %3dns Cycle: %3dns \n\r", + __FUNCTION__, (int)unitNum, (int)pTiming->minDataAccess, (int)pTiming->minDataCycle); + IOLog("AppleUltra66ATA::%s() DMA Actual Timings: Access: %3dns Cycle: %3dns\n\r", + __FUNCTION__, accessTime / 1000, cycleTime / 1000 ); + IOLog("AppleUltra66ATA::%s() Ide DMA Timings = %08lx\n\r", __FUNCTION__, ideTimingWord[unitNum] ); +#endif + + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::calculateUltra66UDMATiming( UInt32 unitNum, ATATiming *pTiming ) +{ + int rdyToPauseTicks; + int rdyToPauseTime; + int cycleTime; + int cycleTicks; + + /* + * Ready to Pause delay in PCI_66_CLOCK / 2 increments + */ + rdyToPauseTicks = rnddiv(pTiming->minDataAccess * 1000, kATAUltra66ClockPS); + rdyToPauseTime = rdyToPauseTicks * kATAUltra66ClockPS; + + /* + * Calculate cycle time in PCI_66_CLOCK / 2 increments + */ + cycleTicks = rnddiv(pTiming->minDataCycle * 1000, kATAUltra66ClockPS); + cycleTime = cycleTicks * kATAUltra66ClockPS; + + ideTimingWord[unitNum] &= ~0x1ff00000; + ideTimingWord[unitNum] |= ((rdyToPauseTicks << 5) | (cycleTicks << 1) | 1) << 20; + +#if 0 + IOLog("AppleUltra66ATA::%s() Unit %1d UDMA66 Requested Timings: ReadyToPause: %3dns Cycle: %3dns \n\r", + __FUNCTION__, (int)unitNum, (int)pTiming->minDataAccess, (int)pTiming->minDataCycle); + IOLog("AppleUltra66ATA::%s() UDMA66 Actual Timings: ReadyToPause: %3dns Cycle: %3dns\n\r", + __FUNCTION__, rdyToPauseTime / 1000, cycleTime / 1000 ); + IOLog("AppleUltra66ATA::%s() Ide DMA Timings = %08lx\n\r", __FUNCTION__, ideTimingWord[unitNum] ); +#endif + + return true; +} + + +/* + * + * + */ +void AppleUltra66ATA::newDeviceSelected( IOATAStandardDevice *newDevice ) +{ + OSWriteSwapInt32( ioBaseATA, 0x200, ideTimingWord[newDevice->getUnit()] ); + eieio(); +} + + +/* + * + * + */ +bool AppleUltra66ATA::selectTiming( UInt32 unitNum, ATATimingProtocol timingProtocol ) +{ + if ( controllerType == kControllerTypeUltra66DBDMA ) + { + switch ( timingProtocol ) + { + case kATATimingUltraDMA66: + ideTimingWord[unitNum] |= 0x00100000; + break; + case kATATimingDMA: + ideTimingWord[unitNum] &= ~0x00100000; + break; + default: + ; + } + } + return true; +} + +/* + * + * + */ +bool AppleUltra66ATA::programDma( IOATAStandardCommand *cmd ) +{ + IOMemoryDescriptor *memoryDesc; + IODBDMADescriptor *dmaDesc; + UInt32 dmaCmd; + bool isWrite; + IOPhysicalSegment physSeg; + IOByteCount offset; + UInt32 i; + + IODBDMAReset( ioBaseDMA ); + + cmd->getPointers( &memoryDesc, &dmaReqLength, &isWrite ); + + if ( dmaReqLength == 0 ) + { + return true; + } + + offset = 0; + + dmaCmd = (isWrite == true) ? kdbdmaOutputMore : kdbdmaInputMore; + dmaDesc = dmaDescriptors; + + for ( i = 0; i < numDescriptors; i++, dmaDesc++ ) + { + if ( dmaMemoryCursor->getPhysicalSegments( memoryDesc, offset, &physSeg, 1 ) != 1 ) + { + break; + } + + IOMakeDBDMADescriptor( dmaDesc, + dmaCmd, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + physSeg.length, + physSeg.location ); + offset += physSeg.length; + } + + if ( i == numDescriptors ) + { + return false; + } + + /* + * Note: ATAPI always transfers even byte-counts. Send the extra byte to/from the bit-bucket + * if the requested transfer length is odd. + */ + if ( dmaReqLength & 1 ) + { + i++; + IOMakeDBDMADescriptor( dmaDesc++, + dmaCmd, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 1, + bitBucketAddrPhys ); + } + + + if ( i == numDescriptors ) + { + return false; + } + + + IOMakeDBDMADescriptor( dmaDesc, + kdbdmaStop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0 ); + + IOSetDBDMACommandPtr( ioBaseDMA, dmaDescriptorsPhys ); + + + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::startDma( IOATAStandardCommand * ) +{ + if ( dmaReqLength != 0 ) + { + IODBDMAContinue( ioBaseDMA ); + } + return true; +} + + +/* + * + * + */ +bool AppleUltra66ATA::stopDma( IOATAStandardCommand *, UInt32 *transferCount ) +{ + UInt32 i; + UInt32 ccResult; + UInt32 byteCount = 0; + + *transferCount = 0; + + if ( dmaReqLength == 0 ) + { + return true; + } + + IODBDMAStop( ioBaseDMA ); + + for ( i=0; i < numDescriptors; i++ ) + { + ccResult = IOGetCCResult( &dmaDescriptors[i] ); + + if ( (ccResult & (kdbdmaStatusActive | kdbdmaStatusDead)) == 0 ) + { + break; + } + byteCount += (IOGetCCOperation( &dmaDescriptors[i] ) & kdbdmaReqCountMask) - (ccResult & kdbdmaResCountMask); + } + + *transferCount = byteCount; + + return true; +} + +/* + * + * + */ +bool AppleUltra66ATA::resetDma() +{ + IODBDMAReset( ioBaseDMA ); + return true; +} + +/* + * + * + */ +bool AppleUltra66ATA::checkDmaActive() +{ + return ((IOGetDBDMAChannelStatus( ioBaseDMA ) & kdbdmaActive) != 0); +} + + +/* + * + * + */ +void AppleUltra66ATA::disableControllerInterrupts() +{ + interruptEventSource->disable(); +} + +/* + * + * + */ +void AppleUltra66ATA::enableControllerInterrupts() +{ + interruptEventSource->enable(); +} + +/* + * + * + */ +void AppleUltra66ATA::free() +{ + if ( interruptEventSource != 0 ) + { + interruptEventSource->disable(); + interruptEventSource->release(); + } + + if ( ioMapATA != 0 ) + { + ioMapATA->release(); + } + + if ( ioMapDMA != 0 ) + { + ioMapDMA->release(); + } + + if ( bitBucketAddr != 0 ) + { + IOFree( bitBucketAddr, 32 ); + } + + if ( dmaDescriptors != 0 ) + { + kfree( (vm_offset_t)dmaDescriptors, page_size ); + } +} + +/* + * + * + */ +void AppleUltra66ATA::writeATAReg( UInt32 regIndex, UInt32 regValue ) +{ + regIndex += (regIndex >= kATARegDeviceControl ) ? (kATACS3RegBase - kATARegDeviceControl + 6) : 0; + + if ( regIndex ) + { + *((volatile UInt8 *)ioBaseATA + (regIndex<<4)) = regValue; + } + else + { + *(volatile UInt16 *)ioBaseATA = regValue; + } + eieio(); +} + +UInt32 AppleUltra66ATA::readATAReg( UInt32 regIndex ) +{ + regIndex += (regIndex >= kATARegAltStatus ) ? (kATACS3RegBase - kATARegAltStatus + 6) : 0; + + if ( regIndex ) + { + return *((volatile UInt8 *)ioBaseATA + (regIndex<<4)); + } + else + { + return *(volatile UInt16 *)ioBaseATA; + } +} diff --git a/iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.h b/iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.h new file mode 100644 index 000000000..355ef13cd --- /dev/null +++ b/iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * AppleUltra66ATA.h + * + */ + +#include +#include + +#include + +class AppleUltra66ATA : public IOATAStandardDriver +{ + OSDeclareDefaultStructors( AppleUltra66ATA ) + +public: + void free(); + +protected: + bool configure( IOService *provider, ATAControllerInfo *controllerDataSize ); + + void enableControllerInterrupts(); + void disableControllerInterrupts(); + + bool calculateTiming( UInt32 deviceNum, ATATiming *timing ); + bool selectTiming( UInt32 deviceNum, ATATimingProtocol timingProtocol ); + + void newDeviceSelected( IOATAStandardDevice *newDevice ); + + void writeATAReg( UInt32 regIndex, UInt32 regValue ); + UInt32 readATAReg( UInt32 regIndex ); + + bool programDma( IOATAStandardCommand *cmd ); + bool startDma( IOATAStandardCommand *cmd ); + bool stopDma( IOATAStandardCommand *cmd, UInt32 *transferCount ); + bool resetDma(); + bool checkDmaActive(); + +private: + bool identifyController(); + + bool calculatePIOTiming( UInt32 deviceNum, ATATiming *timing ); + bool calculateDMATiming( UInt32 deviceNum, ATATiming *timing ); + + bool calculateUltra66PIOTiming( UInt32 deviceNum, ATATiming *timing ); + bool calculateUltra66DMATiming( UInt32 deviceNum, ATATiming *timing ); + bool calculateUltra66UDMATiming( UInt32 deviceNum, ATATiming *timing ); + +private: + IOService *provider; + + IOMemoryMap *ioMapATA; + volatile UInt32 *ioBaseATA; + + IOMemoryMap *ioMapDMA; + volatile IODBDMAChannelRegisters *ioBaseDMA; + + UInt32 controllerType; + + IOInterruptEventSource *interruptEventSource; + + UInt32 ideTimingWord[2]; + + IOBigMemoryCursor *dmaMemoryCursor; + IODBDMADescriptor *dmaDescriptors; + UInt32 dmaDescriptorsPhys; + UInt32 numDescriptors; + + void *bitBucketAddr; + UInt32 bitBucketAddrPhys; + + UInt32 dmaReqLength; +}; + +/* + * + * + */ +#define kATACS3RegBase (16) + +enum +{ + kATASysClkNS = 30, + kATAUltra66ClockPS = (15 * 1000 / 2), // PCI 66 period / 2 (pS) + + kATAPioAccessBase = 0, + kATAPioAccessMin = 4, + kATAPioRecoveryBase = 4, + kATAPioRecoveryMin = 1, + + kATADmaAccessBase = 0, + kATADmaAccessMin = 1, + kATADmaRecoveryBase = 1, + kATADmaRecoveryMin = 1, +}; + +enum +{ + kControllerTypeDBDMAVersion1 = 1, + kControllerTypeDBDMAVersion2 = 2, + kControllerTypeUltra66DBDMA = 3, +}; diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.cpp b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.cpp new file mode 100644 index 000000000..1810d1854 --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.cpp @@ -0,0 +1,244 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include "AppleADBButtons.h" +#include +#include +#include +#include + +#define super IOHIKeyboard +OSDefineMetaClassAndStructors(AppleADBButtons,IOHIKeyboard) + +bool displayWranglerFound( OSObject *, void *, IOService * ); +void button_data ( IOService * us, UInt8 adbCommand, IOByteCount length, UInt8 * data ); +void asyncFunc ( void * ); + +// ********************************************************************************** +// start +// +// ********************************************************************************** +bool AppleADBButtons::start ( IOService * theNub ) +{ + int i; + + for ( i = 0; i < kMax_registrations; i++ ) { + keycodes[i] = kNullKey; + downHandlers[i] = NULL; + } + + adbDevice = (IOADBDevice *)theNub; + + if( !super::start(theNub)) + return false; + + if( !adbDevice->seizeForClient(this, button_data) ) { + IOLog("%s: Seize failed\n", getName()); + return false; + } + + addNotification( gIOPublishNotification,serviceMatching("IODisplayWrangler"), // look for the display wrangler + (IOServiceNotificationHandler)displayWranglerFound, this, 0 ); + _initial_handler_id = adbDevice->handlerID(); + +return true; +} + +UInt64 AppleADBButtons::getGUID() +{ + return(kAppleOnboardGUID); +} + +// ********************************************************************************** +// displayWranglerFound +// +// The Display Wrangler has appeared. We will be calling its +// ActivityTickle method when there is user activity. +// ********************************************************************************** +bool displayWranglerFound( OSObject * us, void * ref, IOService * yourDevice ) +{ + if ( yourDevice != NULL ) { + ((AppleADBButtons *)us)->displayManager = yourDevice; + } + return true; +} + +UInt32 AppleADBButtons::interfaceID() +{ + return NX_EVS_DEVICE_INTERFACE_ADB; +} + +UInt32 AppleADBButtons::deviceType() +{ + return adbDevice->handlerID(); +} + +// ********************************************************************************** +// registerForButton +// +// Clients call here, specifying a button and a routine to call when that +// button is pressed or released. +// ********************************************************************************** +IOReturn AppleADBButtons::registerForButton ( unsigned int keycode, IOService * registrant, button_handler handler, bool down ) +{ + int i; + + for ( i = 0; i < kMax_registrations; i++ ) { + if ( keycodes[i] == kNullKey ) { + if ( down ) { + registrants[i] = registrant; + downHandlers[i] = handler; + keycodes[i] = keycode; + break; + } + } + } + return kIOReturnSuccess; +} + +// ********************************************************************************** +// button_data +// +// ********************************************************************************** +void button_data ( IOService * us, UInt8 adbCommand, IOByteCount length, UInt8 * data ) +{ +((AppleADBButtons *)us)->packet(data,length,adbCommand); +} + + +// ********************************************************************************** +// packet +// +// ********************************************************************************** +IOReturn AppleADBButtons::packet (UInt8 * data, IOByteCount, UInt8 adbCommand ) +{ + unsigned int keycode; + bool down; + + keycode = *data; + down = ((keycode & 0x80) == 0); + keycode &= 0x7f; + dispatchButtonEvent(keycode,down); + + keycode = *(data + 1); + if( keycode != 0xff ) { + down = ((keycode & 0x80) == 0); + keycode &= 0x7f; + dispatchButtonEvent(keycode,down); + } + + if ( displayManager != NULL ) { // if there is a display manager, tell + displayManager->activityTickle(kIOPMSuperclassPolicy1); // it there is user activity + } + + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// dispatchButtonEvent +// +// Look for any registered handlers for this button and notify them. +// ********************************************************************************** +void AppleADBButtons::dispatchButtonEvent (unsigned int keycode, bool down ) +{ + int i; + AbsoluteTime now; + + if (_initial_handler_id == 0xc0) //For Apple ADB AV and ColorSync monitors + { + switch (keycode) + { + case kVolume_up_AV: + keycode = kVolume_up; + break; + case kVolume_down_AV: + keycode = kVolume_down; + break; + case kMute_AV: + keycode = kMute; + break; + default: + //No other volume codes are available for OS X + break; + } + } + + clock_get_uptime(&now); + + for ( i = 0; i < kMax_registrations; i++ ) { + if ( keycodes[i] == keycode ) { + if ( down ) { + if (downHandlers[i] != NULL ) { + thread_call_func((thread_call_func_t)downHandlers[i], + (thread_call_param_t)registrants[i], + true); + } + } + } + } + + //Only dispatch keycodes that this driver understands. + // See appleADBButtonsKeyMap[] for the list. + switch (keycode) + { + case kVolume_up: + case kVolume_down: + case kMute: + case kEject: + case kBrightness_up: + case kBrightness_down: + case kNum_lock_on_laptops: + dispatchKeyboardEvent(keycode, down, now); + break; + default: //Don't dispatch anything else + break; + } +} + +const unsigned char *AppleADBButtons::defaultKeymapOfLength(UInt32 *length) +{ + static const unsigned char appleADBButtonsKeyMap[] = { + 0x00, 0x00, // chars + 0x00, // no modifier keys + 0x00, // no defs + 0x00, // no seqs + 0x07, // 6 special keys + NX_KEYTYPE_SOUND_UP, kVolume_up, + NX_KEYTYPE_SOUND_DOWN, kVolume_down, + NX_KEYTYPE_MUTE, kMute, + NX_KEYTYPE_BRIGHTNESS_UP, kBrightness_up, + NX_KEYTYPE_BRIGHTNESS_DOWN, kBrightness_down, + NX_KEYTYPE_NUM_LOCK, kNum_lock_on_laptops, + NX_KEYTYPE_EJECT, kEject + }; + + *length = sizeof(appleADBButtonsKeyMap); + + return appleADBButtonsKeyMap; +} + +IOReturn AppleADBButtons::setParamProperties(OSDictionary *dict) +{ + dict->removeObject(kIOHIDKeyMappingKey); + + return super::setParamProperties(dict); +} diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.h b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.h new file mode 100644 index 000000000..467ea3a25 --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.h @@ -0,0 +1,78 @@ +#ifndef _APPLEADBBUTTONS_H +#define _APPLEADBBUTTONS_H + +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +#define kVolume_up 0x06 +#define kVolume_down 0x07 +#define kMute 0x08 +#define kVolume_up_AV 0x03 //Apple ADB AV monitors have different button codes +#define kVolume_down_AV 0x02 +#define kMute_AV 0x01 +#define kBrightness_up 0x09 +#define kBrightness_down 0x0a +#define kEject 0x0b +#define kNum_lock_on_laptops 0x7f + +#define kMax_registrations 10 +#define kMax_keycode 0x0a +#define kNullKey 0xFF + +typedef void (*button_handler)(void * ); + +class AppleADBButtons : public IOHIKeyboard +{ + OSDeclareDefaultStructors(AppleADBButtons) + +private: + + unsigned int keycodes[kMax_registrations]; + void * registrants[kMax_registrations]; + button_handler downHandlers[kMax_registrations]; + + void dispatchButtonEvent (unsigned int, bool ); + UInt32 _initial_handler_id; + +public: + + const unsigned char * defaultKeymapOfLength (UInt32 * length ); + UInt32 interfaceID(); + UInt32 deviceType(); + UInt64 getGUID(); + +public: + + IOService * displayManager; // points to display manager + IOADBDevice * adbDevice; + + bool start ( IOService * theNub ); + IOReturn packet (UInt8 * data, IOByteCount length, UInt8 adbCommand ); + IOReturn registerForButton ( unsigned int, IOService *, button_handler, bool ); + + IOReturn setParamProperties(OSDictionary *dict); +}; + +#endif /* _APPLEADBBUTTONS_H */ diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.cpp b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.cpp new file mode 100644 index 000000000..1c064379f --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.cpp @@ -0,0 +1,368 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 23 Jul 98 - start IOKit + * suurballe 17 Nov 98 - ported to C++ + */ + +#include +#include + +#include "AppleADBDisplay.h" + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IODisplay +OSDefineMetaClassAndStructors( AppleADBDisplay, IODisplay ) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn AppleADBDisplay::findADBDisplayInfoForType( UInt16 deviceType ) +{ + char stringBuf[ 32 ]; + OSNumber * off; + OSString * key; + OSData * data; + + sprintf( stringBuf, "adb%dWiggle", deviceType); + if( (off = OSDynamicCast( OSNumber, getProperty( stringBuf )))) + wiggleLADAddr = off->unsigned32BitValue(); + else + wiggleLADAddr = kWiggleLADAddr; + + sprintf( stringBuf, "adb%dModes", deviceType); + key = OSDynamicCast( OSString, getProperty( stringBuf )); + + if( key && (data = OSDynamicCast( OSData, getProperty( key )))) { + modeList = (UInt32 *) data->getBytesNoCopy(); + numModes = data->getLength() / sizeof( UInt32); + } + if( modeList ) + return(kIOReturnSuccess); + else + return( -49 ); +} + + +IOReturn AppleADBDisplay::getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ) +{ + IOReturn err; + IODisplayConnect * connect; + IOFramebuffer * framebuffer; + int timingNum; + IOTimingInformation info; + + *flags = 0; + + connect = getConnection(); + assert( connect ); + framebuffer = connect->getFramebuffer(); + assert( framebuffer ); + + err = framebuffer->getTimingInfoForDisplayMode( mode, &info ); + + if( kIOReturnSuccess == err) { + for( timingNum = 0; timingNum < numModes; timingNum++ ) { + if( info.appleTimingID == modeList[ timingNum ] ) { + *flags = timingNum + ? ( kDisplayModeValidFlag | kDisplayModeSafeFlag ) + : ( kDisplayModeValidFlag | kDisplayModeSafeFlag + | kDisplayModeDefaultFlag ); + break; + } + } + } + + return( err ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn AppleADBDisplay::doConnect( void ) +{ + IOReturn err; + UInt16 value; + UInt32 retries = 9; + IOByteCount length; + + while ( retries-- ) { + + value = 0x6000 | (adbDevice->address() << 8); + length = sizeof( value); + err = adbDevice->writeRegister( 3, (UInt8 *) &value, &length ); + if( err) + continue; + + /* IOSleep(10); */ + /* IODelay(1000); */ + length = sizeof( value); + err = adbDevice->readRegister( 3, (UInt8 *) &value, &length ); + if( err) + continue; + + if( (value & 0xf000) == 0x6000 ) + break; + else + err = kIOReturnNotAttached; + } + + if( err) + kprintf("%s: %d\n", __FUNCTION__, err); + + return(err); +} + +static void autoPollHandler( IOService * self, UInt8 adbCommand, + IOByteCount length, UInt8 * data ) +{ + ((AppleADBDisplay *)self)->packet( adbCommand, length, data ); +} + +void AppleADBDisplay::packet( UInt8 adbCommand, + IOByteCount length, UInt8 * data ) +{ + if( length && (*data == waitAckValue) ) + waitAckValue = 0; +} + +IOReturn AppleADBDisplay::writeWithAcknowledge( UInt8 regNum, + UInt16 data, UInt8 ackValue ) +{ + IOReturn err; + enum { kTimeoutMS = 400 }; + UInt32 timeout; + IOByteCount length; + + waitAckValue = ackValue; + + length = sizeof(data); + err = adbDevice->writeRegister( regNum, (UInt8 *) &data, &length ); + + if( !err) { + timeout = kTimeoutMS / 50; + while( waitAckValue && timeout-- ) + IOSleep( 50 ); + + if( waitAckValue ) + err = -3; + } + waitAckValue = 0; + + return( err ); +} + +IOReturn AppleADBDisplay::setLogicalRegister( UInt16 address, UInt16 data ) +{ + IOReturn err = -1; + UInt32 reconnects = 3; + + while( err && reconnects-- ) { + err = writeWithAcknowledge( kADBReg1, address, kReg2DataRdy); + if( err == kIOReturnSuccess ) + err = writeWithAcknowledge( kADBReg2, data, kReg2DataAck); + + if( err ) { + if( doConnect() ) + break; + } + } + if( err) + kprintf( "%s: %x, %d\n", __FUNCTION__, address, err); + + return( err); +} + +IOReturn AppleADBDisplay::getLogicalRegister( UInt16 address, UInt16 * data ) +{ + IOReturn err = -1; + UInt32 reconnects = 3; + UInt16 value; + IOByteCount length; + + while ( err && reconnects--) { + err = writeWithAcknowledge( kADBReg1, address, kReg2DataRdy); + if( err == kIOReturnSuccess ) { + length = sizeof( value); + err = adbDevice->readRegister( 2, (UInt8 *)&value, &length); + *data = value & 0xff; // actually only 8 bits + } + if( err) { + if ( doConnect()) + break; + } + } + if( err) + kprintf( "%s: %x=%x, %d\n", __FUNCTION__, address, *data, err); + + return err; +} + +void AppleADBDisplay::setWiggle( bool active ) +{ + setLogicalRegister( wiggleLADAddr, (active ? 1 : 0)); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleADBDisplay::start( IOService * nub ) +{ + IOReturn err; + UInt16 data, deviceType; + + if( !super::start( nub)) + return false; + + if( OSDynamicCast( IODisplayConnect, nub )) + return( true ); + + assert( OSDynamicCast( IOADBDevice, nub )); + adbDevice = (IOADBDevice *) nub; + + if( !adbDevice->seizeForClient( this, &autoPollHandler) ) { + IOLog("%s: sieze failed\n", getName()); + return( false); + } + + do { + err = doConnect(); + if( err ) + continue; + + err = setLogicalRegister( 0xff, 0xff); + if( err) + continue; + + err = getLogicalRegister( 0xff, &data); + if( err) + continue; + + err = getLogicalRegister( 0xff, &deviceType); + if( err) + continue; + + kprintf("%s: found AVType %d\n", + adbDevice->getName(), deviceType ); + + err = findADBDisplayInfoForType( deviceType ); + if( err) + continue; + + avDisplayID = deviceType; + setWiggle( false); + + registerService(); + return( true ); + + } while( false ); + + adbDevice->releaseFromClient( this ); + return( false ); +} + + +bool AppleADBDisplay::tryAttach( IODisplayConnect * connect ) +{ + IOReturn err; + bool attached = false; + UInt32 sense, extSense; + IOFramebuffer * framebuffer; + IOIndex fbConnect; + UInt32 senseType; + enum { + kRSCFour = 4, + kRSCSix = 6, + kESCFourNTSC = 0x0A, + kESCSixStandard = 0x2B, + }; + + do { + + framebuffer = connect->getFramebuffer(); + fbConnect = connect->getConnection(); + assert( framebuffer ); + + if( kIOReturnSuccess != framebuffer->getAttributeForConnection( + fbConnect, + kConnectionSupportsAppleSense, NULL )) + continue; + + err = framebuffer->getAppleSense( fbConnect, + &senseType, &sense, &extSense, 0 ); + if( err) + continue; + if( (sense != kRSCSix) || (extSense != kESCSixStandard) ) // straight-6 + continue; + + setWiggle( true ); + err = framebuffer->getAppleSense( fbConnect, + &senseType, &sense, &extSense, 0 ); + setWiggle( false ); + if( err) + continue; + if( (sense != kRSCFour) || (extSense != kESCFourNTSC) ) // straight-4 + continue; + + kprintf( "%s: attached to %s\n", + adbDevice->getName(), framebuffer->getName() ); + + attached = true; + + } while( false); + + return( attached); +} + + +IOService * AppleADBDisplay::probe( IOService * nub, SInt32 * score ) +{ + IODisplayConnect * connect; + + // both ADB device & display connections come here! + + do { + if( OSDynamicCast( IOADBDevice, nub)) + continue; + + if( (connect = OSDynamicCast( IODisplayConnect, nub)) + && tryAttach( connect)) + continue; + + nub = 0; + + } while( false ); + + if( nub) + return( super::probe( nub, score )); + else + return( 0 ); +} + + + diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.h b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.h new file mode 100644 index 000000000..2c909182e --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 23 Jul 98 - start IOKit + * suurballe 17 Nov 98 - ported to C++ + */ + +#include +#include + + +#define kOrgDisplayAddr 0x7 // Original display ADB address + +#define kTelecasterADBHandlerID 0x03 +#define kSmartDisplayADBHandlerID 0xc0 + +#define kADBReg0 0x0 // Device register zero +#define kADBReg1 0x1 // Device register one +#define kADBReg2 0x2 // Device register two +#define kADBReg3 0x3 // Device register three + +#define kReg2DataRdy 0xFD // data (to be read) ready +#define kReg2DataAck 0xFE // data (just written) OK +#define kWiggleLADAddr 0x04 // 0x0f on Telecaster & Sousa? + +#if 0 + +#define kNoDevice -1 +#define kTelecaster 0 +#define kSousaSoundUnit 1 +#define kHammerhead 2 +#define kOrca 3 +#define kWhaler 4 +#define kWarriorEZ 5 +#define kManta 6 +#define kLastDeviceType kManta + +#define kDisplayLocalRemoteLADAddr 0x02 // lad address used in SetDisplayRemoteMode +#define kAudioKeypadEnableLADAddr 0x7D + +#define kUnknown -1 +#define kLocal 0 +#define kRemote 1 + +#endif + +class AppleADBDisplay: public IODisplay +{ + OSDeclareDefaultStructors(AppleADBDisplay) + +private: + + IOADBDevice * adbDevice; + volatile UInt8 waitAckValue; + UInt8 wiggleLADAddr; + SInt16 avDisplayID; + int numModes; + UInt32 * modeList; + + virtual IOReturn findADBDisplayInfoForType( UInt16 deviceType ); + virtual IOReturn getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ); + virtual IOReturn doConnect( void ); + virtual IOReturn writeWithAcknowledge( UInt8 regNum, UInt16 data, + UInt8 ackValue ); + virtual IOReturn setLogicalRegister( UInt16 address, UInt16 data ); + virtual IOReturn getLogicalRegister( UInt16 address, UInt16 * data ); + virtual void setWiggle( bool active ); + virtual bool tryAttach( IODisplayConnect * connect ); + +public: + + virtual bool start( IOService * nub ); + virtual IOService * probe( IOService * nub, SInt32 * score ); + + virtual void packet( UInt8 adbCommand, + IOByteCount length, UInt8 * data ); + +}; diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.cpp b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.cpp new file mode 100644 index 000000000..42657151c --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.cpp @@ -0,0 +1,415 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 18 June 1998 + * Start IOKit version. + */ + +#include "AppleADBKeyboard.h" +#include +#include +#include +#define super IOHIKeyboard +OSDefineMetaClassAndStructors(AppleADBKeyboard,IOHIKeyboard) + + +static void new_kbd_data ( IOService * us, UInt8 adbCommand, IOByteCount length, UInt8 * data ); +static void asyncSetLEDFunc ( thread_call_param_t, thread_call_param_t ); + +//Convert raw ADB codes to MacOS 9 KMAP virtual key codes in dispatchKeyboardEvent() +static unsigned char kmapConvert[] = + { + //00,00,00,00, These 4 are in System resource, but are unused + 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E,0x0F, + 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F, + 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F, + 0x30,0x31,0x32,0x33,0x34,0x35,0x3B,0x37,0x38,0x39,0x3A,0x7B,0x7C,0x7D,0x7E,0x3F, + 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4A,0x4B,0x4C,0x4D,0x4E,0x4F, + 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5A,0x5B,0x5C,0x5D,0x5E,0x5F, + 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F, + 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0x3C,0x3D,0x3E,0x36,0x7F, + 00,00 + }; + + +// ********************************************************************************** +// start +// +// ********************************************************************************** +bool AppleADBKeyboard::start ( IOService * theNub ) +{ + +adbDevice = (IOADBDevice *)theNub; +if( !adbDevice->seizeForClient(this, new_kbd_data) ) { + IOLog("%s: Seize failed\n", getName()); + return false; +} + +turnLEDon = ADBKS_LED_CAPSLOCK | ADBKS_LED_NUMLOCK | ADBKS_LED_SCROLLLOCK; //negative logic +setAlphaLockFeedback(false); +setNumLockFeedback(false); + + +clock_interval_to_absolutetime_interval( 4, kSecondScale, &rebootTime); +clock_interval_to_absolutetime_interval( 1, kSecondScale, &debuggerTime); + +return super::start(theNub); +} + + +// ********************************************************************************** +// interfaceID +// +// ********************************************************************************** +UInt32 AppleADBKeyboard::interfaceID ( void ) +{ +return NX_EVS_DEVICE_INTERFACE_ADB; +} + + +// ********************************************************************************** +// deviceType +// +// ********************************************************************************** +UInt32 AppleADBKeyboard::deviceType ( void ) +{ + UInt32 id; //We need handler ID to remap adjustable JIS keyboard + + id = adbDevice->handlerID(); + if (id == 18) //Adjustable JIS + { + kmapConvert[0x32] = 0x35; //tilde to ESC + } + IORegistryEntry *regEntry; + OSData * data = 0; + UInt32 *dataptr; + + id = adbDevice->handlerID(); + if (id == 18) //Adjustable JIS + { + kmapConvert[0x32] = 0x35; //tilde to ESC + } + + if ((id == kgestaltPwrBkEKDomKbd) || (id == kgestaltPwrBkEKISOKbd) || (id == kgestaltPwrBkEKJISKbd)) + { + if( (regEntry = IORegistryEntry::fromPath( "/pci@f2000000/mac-io/via-pmu/adb/keyboard", gIODTPlane ))) + { + data = OSDynamicCast(OSData, regEntry->getProperty( "keyboard-id", gIODTPlane, kIORegistryIterateRecursively )); + if (data) + { + dataptr = (UInt32 *)data->getBytesNoCopy(); + id = *dataptr; //make sure no byte swapping + } + regEntry->release(); + } + } + + return id; +} + + +// ********************************************************************************** +// setAlphaLockFeedback +// This is usually called on a call-out thread after the caps-lock key is pressed. +// ADB operations to PMU are synchronous, and this is must not be done +// on the call-out thread since that is the PMU driver workloop thread, and +// it will block itself. +// +// Therefore, we schedule the ADB write to disconnect the call-out thread +// and the one that initiates the ADB write. +// +// ********************************************************************************** +void AppleADBKeyboard::setAlphaLockFeedback ( bool to ) +{ + if (to) + turnLEDon &= ~ADBKS_LED_CAPSLOCK; //Inverse logic applies here + else + turnLEDon |= ADBKS_LED_CAPSLOCK; + + thread_call_func(asyncSetLEDFunc, (thread_call_param_t)this, true); +} + +void AppleADBKeyboard::setNumLockFeedback ( bool to ) +{ + if (to) //LED on means clear that bit + turnLEDon &= ~ ADBKS_LED_NUMLOCK; + else + turnLEDon |= ADBKS_LED_NUMLOCK; + + thread_call_func(asyncSetLEDFunc, (thread_call_param_t)this, true); +} + + + +// ********************************************************************************** +// asyncSetLEDFunc +// +// Called asynchronously to turn on/off the capslock and numlock LED +// +// ********************************************************************************** +static void asyncSetLEDFunc ( thread_call_param_t self, thread_call_param_t ) +{ + +UInt16 value; +IOByteCount length = sizeof( UInt16); + + + value = ((AppleADBKeyboard*)self)->turnLEDon; + ((AppleADBKeyboard*)self)->adbDevice->writeRegister(2, (UInt8 *)&value, &length); +} + +/********************************************************************** +Get LED status by reading hardware. Register 2 has 16 bits. +**********************************************************************/ +unsigned AppleADBKeyboard::getLEDStatus (void ) +{ + UInt8 data[8]; //8 bytes max for ADB read (talk) operation + IOByteCount length = 8; + + bzero(data, 8); + LEDStatus = 0; + adbDevice->readRegister(2, data, &length); + + if ((data[1] & ADBKS_LED_NUMLOCK) == 0) + LEDStatus |= ADBKS_LED_NUMLOCK; + if ((data[1] & ADBKS_LED_CAPSLOCK) == 0) + LEDStatus |= ADBKS_LED_CAPSLOCK; + if ((data[1] & ADBKS_LED_SCROLLLOCK) == 0) + LEDStatus |= ADBKS_LED_SCROLLLOCK; + + return LEDStatus; +} + +// ********************************************************************************** +// new_kbd_data +// +// ********************************************************************************** +static void new_kbd_data ( IOService * us, UInt8 adbCommand, IOByteCount length, UInt8 * data ) +{ +((AppleADBKeyboard *)us)->packet(data,length,adbCommand); +} + +// ********************************************************************************** +// dispatchKeyboardEvent +// +// ********************************************************************************** +extern "C" { +void Debugger( const char * ); +void boot(int paniced, int howto, char * command); +#define RB_HALT 0x08 /* don't reboot, just halt */ +} + +static void AppleADBKeyboardReboot( thread_call_param_t arg, thread_call_param_t ) +{ + boot( 0, (int) arg, 0 ); +} + +void AppleADBKeyboard::dispatchKeyboardEvent(unsigned int keyCode, + /* direction */ bool goingDown, + /* timeStamp */ AbsoluteTime time) +{ + if( !goingDown && programmerKey) { + programmerKey = false; + EVK_KEYUP( ADBK_CONTROL, _keyState); + SUB_ABSOLUTETIME( &time, &programmerKeyTime ); + if( CMP_ABSOLUTETIME( &time, &rebootTime) >= 0) { + + thread_call_func( AppleADBKeyboardReboot, + (void *) RB_HALT, true ); + } else if( CMP_ABSOLUTETIME( &time, &debuggerTime) >= 0) { + Debugger("Programmer Key"); + } + + } else if( (keyCode == ADBK_POWER) + && (EVK_IS_KEYDOWN( ADBK_CONTROL, _keyState))) { + + if( !programmerKey) { + programmerKey = true; + programmerKeyTime = time; + } + return; + } + + super::dispatchKeyboardEvent( kmapConvert[keyCode], goingDown, time ); + +} + +// ********************************************************************************** +// packet +// +// ********************************************************************************** +IOReturn AppleADBKeyboard::packet (UInt8 * data, IOByteCount, UInt8 adbCommand ) +{ +unsigned int keycode1, keycode2; +bool down; +AbsoluteTime now; + +keycode1 = *data; +down = ((keycode1 & 0x80) == 0); +keycode1 &= 0x7f; +if(keycode1 == 0x7e) keycode1 = ADBK_POWER; +clock_get_uptime(&now); + +dispatchKeyboardEvent(keycode1,down,now); + +keycode2 = *(data + 1); +if( keycode2 != 0xff ) { + down = ((keycode2 & 0x80) == 0); + keycode2 &= 0x7f; + if( keycode2 == 0x7e) keycode2 = ADBK_POWER; + if( (keycode1 != ADBK_POWER) || (keycode2 != ADBK_POWER)) + dispatchKeyboardEvent(keycode2,down,now); +} + +return kIOReturnSuccess; +} + + +// ********************************************************************************** +// maxKeyCodes +// +// ********************************************************************************** +UInt32 AppleADBKeyboard::maxKeyCodes ( void ) +{ +return 0x80; +} + +//Get key values from ev_keymap.h +bool AppleADBKeyboard:: doesKeyLock ( unsigned key) +{ + switch (key) { + case NX_KEYTYPE_CAPS_LOCK: + return true; + case NX_KEYTYPE_NUM_LOCK: + return false; + default: + return false; + } +} + + +// ********************************************************************************** +// defaultKeymapOfLength +// +// ********************************************************************************** +const unsigned char * AppleADBKeyboard::defaultKeymapOfLength (UInt32 * length ) +{ +static const unsigned char appleUSAKeyMap[] = { + 0x00,0x00, + 0x08, //8 modifier keys + 0x00,0x01,0x39, //NX_MODIFIERKEY_ALPHALOCK + 0x01,0x01,0x38, //NX_MODIFIERKEY_SHIFT virtual from KMAP + 0x02,0x01,0x3b, //NX_MODIFIERKEY_CONTROL + 0x03,0x01,0x3a, //NX_MODIFIERKEY_ALTERNATE + 0x04,0x01,0x37, //NX_MODIFIERKEY_COMMAND + 0x05,0x15,0x52,0x41,0x4c,0x53,0x54,0x55,0x45,0x58,0x57,0x56,0x5b,0x5c, + 0x43,0x4b,0x51,0x7b,0x7d,0x7e,0x7c,0x4e,0x59, //NX_MODIFIERKEY_NUMERICPAD + 0x06,0x01,0x72, //NX_MODIFIERKEY_HELP 7th modifier here + 0x07,0x01,0x3f, //NX_MODIFIERKEY_SECONDARYFN 8th modifier + 0x7f,0x0d,0x00,0x61, + 0x00,0x41,0x00,0x01,0x00,0x01,0x00,0xca,0x00,0xc7,0x00,0x01,0x00,0x01,0x0d,0x00, + 0x73,0x00,0x53,0x00,0x13,0x00,0x13,0x00,0xfb,0x00,0xa7,0x00,0x13,0x00,0x13,0x0d, + 0x00,0x64,0x00,0x44,0x00,0x04,0x00,0x04,0x01,0x44,0x01,0xb6,0x00,0x04,0x00,0x04, + 0x0d,0x00,0x66,0x00,0x46,0x00,0x06,0x00,0x06,0x00,0xa6,0x01,0xac,0x00,0x06,0x00, + 0x06,0x0d,0x00,0x68,0x00,0x48,0x00,0x08,0x00,0x08,0x00,0xe3,0x00,0xeb,0x00,0x00, + 0x18,0x00,0x0d,0x00,0x67,0x00,0x47,0x00,0x07,0x00,0x07,0x00,0xf1,0x00,0xe1,0x00, + 0x07,0x00,0x07,0x0d,0x00,0x7a,0x00,0x5a,0x00,0x1a,0x00,0x1a,0x00,0xcf,0x01,0x57, + 0x00,0x1a,0x00,0x1a,0x0d,0x00,0x78,0x00,0x58,0x00,0x18,0x00,0x18,0x01,0xb4,0x01, + 0xce,0x00,0x18,0x00,0x18,0x0d,0x00,0x63,0x00,0x43,0x00,0x03,0x00,0x03,0x01,0xe3, + 0x01,0xd3,0x00,0x03,0x00,0x03,0x0d,0x00,0x76,0x00,0x56,0x00,0x16,0x00,0x16,0x01, + 0xd6,0x01,0xe0,0x00,0x16,0x00,0x16,0x02,0x00,0x3c,0x00,0x3e,0x0d,0x00,0x62,0x00, + 0x42,0x00,0x02,0x00,0x02,0x01,0xe5,0x01,0xf2,0x00,0x02,0x00,0x02,0x0d,0x00,0x71, + 0x00,0x51,0x00,0x11,0x00,0x11,0x00,0xfa,0x00,0xea,0x00,0x11,0x00,0x11,0x0d,0x00, + 0x77,0x00,0x57,0x00,0x17,0x00,0x17,0x01,0xc8,0x01,0xc7,0x00,0x17,0x00,0x17,0x0d, + 0x00,0x65,0x00,0x45,0x00,0x05,0x00,0x05,0x00,0xc2,0x00,0xc5,0x00,0x05,0x00,0x05, + 0x0d,0x00,0x72,0x00,0x52,0x00,0x12,0x00,0x12,0x01,0xe2,0x01,0xd2,0x00,0x12,0x00, + 0x12,0x0d,0x00,0x79,0x00,0x59,0x00,0x19,0x00,0x19,0x00,0xa5,0x01,0xdb,0x00,0x19, + 0x00,0x19,0x0d,0x00,0x74,0x00,0x54,0x00,0x14,0x00,0x14,0x01,0xe4,0x01,0xd4,0x00, + 0x14,0x00,0x14,0x0a,0x00,0x31,0x00,0x21,0x01,0xad,0x00,0xa1,0x0e,0x00,0x32,0x00, + 0x40,0x00,0x32,0x00,0x00,0x00,0xb2,0x00,0xb3,0x00,0x00,0x00,0x00,0x0a,0x00,0x33, + 0x00,0x23,0x00,0xa3,0x01,0xba,0x0a,0x00,0x34,0x00,0x24,0x00,0xa2,0x00,0xa8,0x0e, + 0x00,0x36,0x00,0x5e,0x00,0x36,0x00,0x1e,0x00,0xb6,0x00,0xc3,0x00,0x1e,0x00,0x1e, + 0x0a,0x00,0x35,0x00,0x25,0x01,0xa5,0x00,0xbd,0x0a,0x00,0x3d,0x00,0x2b,0x01,0xb9, + 0x01,0xb1,0x0a,0x00,0x39,0x00,0x28,0x00,0xac,0x00,0xab,0x0a,0x00,0x37,0x00,0x26, + 0x01,0xb0,0x01,0xab,0x0e,0x00,0x2d,0x00,0x5f,0x00,0x1f,0x00,0x1f,0x00,0xb1,0x00, + 0xd0,0x00,0x1f,0x00,0x1f,0x0a,0x00,0x38,0x00,0x2a,0x00,0xb7,0x00,0xb4,0x0a,0x00, + 0x30,0x00,0x29,0x00,0xad,0x00,0xbb,0x0e,0x00,0x5d,0x00,0x7d,0x00,0x1d,0x00,0x1d, + 0x00,0x27,0x00,0xba,0x00,0x1d,0x00,0x1d,0x0d,0x00,0x6f,0x00,0x4f,0x00,0x0f,0x00, + 0x0f,0x00,0xf9,0x00,0xe9,0x00,0x0f,0x00,0x0f,0x0d,0x00,0x75,0x00,0x55,0x00,0x15, + 0x00,0x15,0x00,0xc8,0x00,0xcd,0x00,0x15,0x00,0x15,0x0e,0x00,0x5b,0x00,0x7b,0x00, + 0x1b,0x00,0x1b,0x00,0x60,0x00,0xaa,0x00,0x1b,0x00,0x1b,0x0d,0x00,0x69,0x00,0x49, + 0x00,0x09,0x00,0x09,0x00,0xc1,0x00,0xf5,0x00,0x09,0x00,0x09,0x0d,0x00,0x70,0x00, + 0x50,0x00,0x10,0x00,0x10,0x01,0x70,0x01,0x50,0x00,0x10,0x00,0x10,0x10,0x00,0x0d, + 0x00,0x03,0x0d,0x00,0x6c,0x00,0x4c,0x00,0x0c,0x00,0x0c,0x00,0xf8,0x00,0xe8,0x00, + 0x0c,0x00,0x0c,0x0d,0x00,0x6a,0x00,0x4a,0x00,0x0a,0x00,0x0a,0x00,0xc6,0x00,0xae, + 0x00,0x0a,0x00,0x0a,0x0a,0x00,0x27,0x00,0x22,0x00,0xa9,0x01,0xae,0x0d,0x00,0x6b, + 0x00,0x4b,0x00,0x0b,0x00,0x0b,0x00,0xce,0x00,0xaf,0x00,0x0b,0x00,0x0b,0x0a,0x00, + 0x3b,0x00,0x3a,0x01,0xb2,0x01,0xa2,0x0e,0x00,0x5c,0x00,0x7c,0x00,0x1c,0x00,0x1c, + 0x00,0xe3,0x00,0xeb,0x00,0x1c,0x00,0x1c,0x0a,0x00,0x2c,0x00,0x3c,0x00,0xcb,0x01, + 0xa3,0x0a,0x00,0x2f,0x00,0x3f,0x01,0xb8,0x00,0xbf,0x0d,0x00,0x6e,0x00,0x4e,0x00, + 0x0e,0x00,0x0e,0x00,0xc4,0x01,0xaf,0x00,0x0e,0x00,0x0e,0x0d,0x00,0x6d,0x00,0x4d, + 0x00,0x0d,0x00,0x0d,0x01,0x6d,0x01,0xd8,0x00,0x0d,0x00,0x0d,0x0a,0x00,0x2e,0x00, + 0x3e,0x00,0xbc,0x01,0xb3,0x02,0x00,0x09,0x00,0x19,0x0c,0x00,0x20,0x00,0x00,0x00, + 0x80,0x00,0x00,0x0a,0x00,0x60,0x00,0x7e,0x00,0x60,0x01,0xbb,0x02,0x00,0x7f,0x00, + 0x08,0xff,0x02,0x00,0x1b,0x00,0x7e,0xff,0xff,0xff,0xff,0xff, + /* + 0x00,0x01,0xac,0x00, + 0x01,0xae,0x00,0x01,0xaf,0x00,0x01,0xad, + */ + 0xff, 0xff, 0xff, 0xff, + 0xff,0xff,0x00,0x00,0x2e,0xff,0x00,0x00, + 0x2a,0xff,0x00,0x00,0x2b,0xff,0x00,0x00,0x1b,0xff,0xff,0xff,0x0e,0x00,0x2f,0x00, + 0x5c,0x00,0x2f,0x00,0x1c,0x00,0x2f,0x00,0x5c,0x00,0x00,0x0a,0x00,0x00,0x00,0x0d, //XX03 + 0xff,0x00,0x00,0x2d,0xff,0xff,0x0e,0x00,0x3d,0x00,0x7c,0x00,0x3d,0x00,0x1c,0x00, + 0x3d,0x00,0x7c,0x00,0x00,0x18,0x46,0x00,0x00,0x30,0x00,0x00,0x31,0x00,0x00,0x32, + 0x00,0x00,0x33,0x00,0x00,0x34,0x00,0x00,0x35,0x00,0x00,0x36,0x00,0x00,0x37,0xff, + 0x00,0x00,0x38,0x00,0x00,0x39,0xff,0xff,0xff,0x00,0xfe,0x24,0x00,0xfe,0x25,0x00, + 0xfe,0x26,0x00,0xfe,0x22,0x00,0xfe,0x27,0x00,0xfe,0x28,0xff,0x00,0xfe,0x2a,0xff, + 0x00,0xfe,0x32,0xff,0x00,0xfe,0x33,0xff,0x00,0xfe,0x29,0xff,0x00,0xfe,0x2b,0xff, + 0x00,0xfe,0x34,0xff,0x00,0xfe,0x2e,0x00,0xfe,0x30,0x00,0xfe,0x2d,0x00,0xfe,0x23, + 0x00,0xfe,0x2f,0x00,0xfe,0x21,0x00,0xfe,0x31,0x00,0xfe,0x20, + //A.W. Added following 4 lines to fix wakeup on PowerBooks. + 0x00,0x01,0xac, //ADB=0x7b is left arrow + 0x00,0x01,0xae, //ADB = 0x7c is right arrow + 0x00,0x01,0xaf, //ADB=0x7d is down arrow. + 0x00,0x01,0xad, //ADB=0x7e is up arrow + 0x0f,0x02,0xff,0x04, + 0x00,0x31,0x02,0xff,0x04,0x00,0x32,0x02,0xff,0x04,0x00,0x33,0x02,0xff,0x04,0x00, + 0x34,0x02,0xff,0x04,0x00,0x35,0x02,0xff,0x04,0x00,0x36,0x02,0xff,0x04,0x00,0x37, + 0x02,0xff,0x04,0x00,0x38,0x02,0xff,0x04,0x00,0x39,0x02,0xff,0x04,0x00,0x30,0x02, + 0xff,0x04,0x00,0x2d,0x02,0xff,0x04,0x00,0x3d,0x02,0xff,0x04,0x00,0x70,0x02,0xff, + 0x04,0x00,0x5d,0x02,0xff,0x04,0x00,0x5b, +0x06, // following are 6 special keys +0x05,0x72, //NX_KEYTYPE_HELP is 5, ADB code is 0x72 +0x06,0x7f, //NX_POWER_KEY is 6, ADB code is 0x7f +0x07,0x4a, //NX_KEYTYPE_MUTE is 7, ADB code is 0x4a +0x08,0x3e, //NX_UP_ARROW_KEY is 8, ADB is 3e raw, 7e virtual (KMAP) +0x09,0x3d, //NX_DOWN_ARROW_KEY is 9, ADB is 0x3d raw, 7d virtual +0x0a,0x47 //NX_KEYTYPE_NUM_LOCK is 10, ADB combines with CLEAR key for numlock + }; + +*length = sizeof(appleUSAKeyMap); +return appleUSAKeyMap; +} + + + diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.h b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.h new file mode 100644 index 000000000..6ab4ee976 --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +enum { + kgestaltPwrBkEKDomKbd = 0xc3, + kgestaltPwrBkEKISOKbd = 0xc4, + kgestaltPwrBkEKJISKbd = 0xc5 +}; + +class AppleADBKeyboard : public IOHIKeyboard +{ +OSDeclareDefaultStructors(AppleADBKeyboard) + +private: + +void setAlphaLockFeedback ( bool to ); +void setNumLockFeedback ( bool to ); +UInt32 maxKeyCodes ( void ); +const unsigned char * defaultKeymapOfLength (UInt32 * length ); +UInt32 interfaceID ( void ); +UInt32 deviceType ( void ); +bool doesKeyLock ( unsigned key); +unsigned getLEDStatus (void ); +bool programmerKey; +AbsoluteTime programmerKeyTime; +AbsoluteTime rebootTime; +AbsoluteTime debuggerTime; + +public: + +IOADBDevice * adbDevice; +UInt16 turnLEDon; // used by setAlphaLockFeedback mechanism +UInt16 LEDStatus; //For ADB device TALK commands + +bool start ( IOService * theNub ); +IOReturn packet (UInt8 * data, IOByteCount length, UInt8 adbCommand ); +void dispatchKeyboardEvent(unsigned int keyCode, + /* direction */ bool goingDown, + /* timeStamp */ AbsoluteTime time); +}; + +/* + * Special key values + */ + +#define ADBK_LEFT 0x3B +#define ADBK_RIGHT 0x3C +#define ADBK_UP 0x3E +#define ADBK_DOWN 0x3D +#define ADBK_PGUP 0x74 +#define ADBK_PGDN 0x79 +#define ADBK_HOME 0x73 +#define ADBK_END 0x77 +#define ADBK_CONTROL 0x36 +#define ADBK_CONTROL_R 0x7D +#define ADBK_FLOWER 0x37 +#define ADBK_SHIFT 0x38 +#define ADBK_SHIFT_R 0x7B +#define ADBK_CAPSLOCK 0x39 +#define ADBK_OPTION 0x3A +#define ADBK_OPTION_R 0x7C +#define ADBK_NUMLOCK 0x47 +#define ADBK_SPACE 0x31 +#define ADBK_F 0x03 +#define ADBK_O 0x1F +#define ADBK_P 0x23 +#define ADBK_Q 0x0C +#define ADBK_V 0x09 +#define ADBK_1 0x12 +#define ADBK_2 0x13 +#define ADBK_3 0x14 +#define ADBK_4 0x15 +#define ADBK_5 0x17 +#define ADBK_6 0x16 +#define ADBK_7 0x1A +#define ADBK_8 0x1C +#define ADBK_9 0x19 +#define ADBK_0 0x1D +#define ADBK_POWER 0x7f /* actual 0x7f 0x7f */ + +#define ADBK_KEYVAL(key) ((key) & 0x7f) +#define ADBK_PRESS(key) (((key) & 0x80) == 0) +#define ADBK_KEYDOWN(key) (key) +#define ADBK_KEYUP(key) ((key) | 0x80) +#define ADBK_MODIFIER(key) ((((key) & 0x7f) == ADBK_SHIFT) || \ + (((key) & 0x7f) == ADBK_SHIFT_R) || \ + (((key) & 0x7f) == ADBK_CONTROL) || \ + (((key) & 0x7f) == ADBK_CONTROL_R) || \ + (((key) & 0x7f) == ADBK_FLOWER) || \ + (((key) & 0x7f) == ADBK_OPTION) || \ + (((key) & 0x7f) == ADBK_OPTION_R) || \ + (((key) & 0x7f) == ADBK_NUMLOCK) || \ + (((key) & 0x7f) == ADBK_CAPSLOCK)) + +/* ADB Keyboard Status - ADB Register 2 */ + +#define ADBKS_LED_NUMLOCK 0x0001 +#define ADBKS_LED_CAPSLOCK 0x0002 +#define ADBKS_LED_SCROLLLOCK 0x0004 +#define ADBKS_SCROLL_LOCK 0x0040 +#define ADBKS_NUMLOCK 0x0080 +/* Bits 3 to 5 are reserved */ +#define ADBKS_APPLE_CMD 0x0100 +#define ADBKS_OPTION 0x0200 +#define ADBKS_SHIFT 0x0400 +#define ADBKS_CONTROL 0x0800 +#define ADBKS_CAPSLOCK 0x1000 +#define ADBKS_RESET 0x2000 +#define ADBKS_DELETE 0x4000 +/* bit 16 is reserved */ diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.cpp b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.cpp new file mode 100644 index 000000000..208e52007 --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.cpp @@ -0,0 +1,454 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 18 June 1998 Start IOKit version. + * 18 Nov 1998 suurballe port to C++ + * 4 Oct 1999 decesare Revised for Type 4 support and sub-classed drivers. + * 1 Feb 2000 tsherman Added extended mouse functionality (implemented in setParamProperties) + */ + +#include "AppleADBMouse.h" +#include +#include +#include + +// Globals to remember enhanced trackpad values @ sleep and restore @ wake. +UInt8 glob_clicking = 0x19; +UInt8 glob_dragging = 0x14; +UInt8 glob_draglock = 0xB2; + +// **************************************************************************** +// NewMouseData +// +// **************************************************************************** +static void NewMouseData(IOService * target, UInt8 adbCommand, IOByteCount length, UInt8 * data) +{ + ((AppleADBMouse *)target)->packet(adbCommand, length, data); +} + + +// **************************************************************************** + +#undef super +#define super IOHIPointing + +OSDefineMetaClassAndStructors(AppleADBMouse, IOHIPointing); + + +// **************************************************************************** +// probe +// +// **************************************************************************** +IOService * AppleADBMouse::probe(IOService * provider, SInt32 * score) +{ + adbDevice = (IOADBDevice *)provider; + + return this; +} + + +// **************************************************************************** +// start +// +// **************************************************************************** +bool AppleADBMouse::start(IOService * provider) +{ + if(!super::start(provider)) return false; + + if(!adbDevice->seizeForClient(this, NewMouseData)) { + IOLog("%s: Seize failed\n", getName()); + return false; + } + + return true; +} + + +// **************************************************************************** +// interfaceID +// +// **************************************************************************** +UInt32 AppleADBMouse::interfaceID(void) +{ + return NX_EVS_DEVICE_INTERFACE_ADB; +} + + +// **************************************************************************** +// deviceType +// +// **************************************************************************** +UInt32 AppleADBMouse::deviceType ( void ) +{ + return adbDevice->handlerID(); +} + + +// **************************************************************************** +// resolution +// +// **************************************************************************** +IOFixed AppleADBMouse::resolution(void) +{ + return _resolution; +} + + +// **************************************************************************** +// buttonCount +// +// **************************************************************************** +IOItemCount AppleADBMouse::buttonCount(void) +{ + return _buttonCount; +} + + +// **************************************************************************** +// packet +// +// **************************************************************************** +void AppleADBMouse::packet(UInt8 /*adbCommand*/, + IOByteCount /*length*/, UInt8 * data) +{ + int dx, dy; + UInt32 buttonState = 0; + AbsoluteTime now; + + dy = data[0] & 0x7f; + dx = data[1] & 0x7f; + + if (dy & 0x40) dy |= 0xffffffc0; + if (dx & 0x40) dx |= 0xffffffc0; + + if ((data[0] & 0x80) == 0) buttonState |= 1; + + clock_get_uptime(&now); + dispatchRelativePointerEvent(dx, dy, buttonState, now); +} + + +// **************************************************************************** + +#undef super +#define super AppleADBMouse + +OSDefineMetaClassAndStructors(AppleADBMouseType1, AppleADBMouse); + +IOService * AppleADBMouseType1::probe(IOService * provider, SInt32 * score) +{ + if (!super::probe(provider, score)) return 0; + + return this; +} + +bool AppleADBMouseType1::start(IOService * provider) +{ + if (adbDevice->setHandlerID(1) != kIOReturnSuccess) return false; + + _resolution = 100 << 16; + _buttonCount = 1; + + return super::start(provider); +} + + +// **************************************************************************** + +#undef super +#define super AppleADBMouse + +OSDefineMetaClassAndStructors(AppleADBMouseType2, AppleADBMouse); + +IOService * AppleADBMouseType2::probe(IOService * provider, SInt32 * score) +{ + if (!super::probe(provider, score)) return 0; + + if (adbDevice->setHandlerID(2) != kIOReturnSuccess) return 0; + + return this; +} + +bool AppleADBMouseType2::start(IOService * provider) +{ + if (adbDevice->setHandlerID(2) != kIOReturnSuccess) return false; + + _resolution = 200 << 16; + _buttonCount = 1; + + return super::start(provider); +} + + +// **************************************************************************** + +#undef super +#define super AppleADBMouse + +OSDefineMetaClassAndStructors(AppleADBMouseType4, AppleADBMouse); + +IOService * AppleADBMouseType4::probe(IOService * provider, SInt32 * score) +{ + UInt8 data[8]; + IOByteCount length = 8; + + if (!super::probe(provider, score)) return 0; + + if (adbDevice->setHandlerID(4) != kIOReturnSuccess) { + adbDevice->setHandlerID(adbDevice->defaultHandlerID()); + return 0; + } + + // To be a Type 4 Extended Mouse, register 1 must return 8 bytes. + if (adbDevice->readRegister(1, data, &length) != kIOReturnSuccess) return 0; + if (length != 8) return 0; + + // Save the device's Extended Mouse Info. + deviceSignature = ((UInt32 *)data)[0]; + deviceResolution = ((UInt16 *)data)[2]; + deviceClass = data[6]; + deviceNumButtons = data[7]; + + return this; +} + +bool AppleADBMouseType4::start(IOService * provider) +{ + UInt8 adbdata[8]; + IOByteCount adblength = 8; + typeTrackpad = FALSE; + + if (adbDevice->setHandlerID(4) != kIOReturnSuccess) return false; + + _resolution = deviceResolution << 16; + _buttonCount = deviceNumButtons; + + adbDevice->readRegister(1, adbdata, &adblength); + if( (adbdata[0] == 't') && (adbdata[1] = 'p') && (adbdata[2] == 'a') && (adbdata[3] == 'd') ) + { + IOLog("Trackpad detected, "); + typeTrackpad = TRUE; + enableEnhancedMode(); + } + + return super::start(provider); +} + +void AppleADBMouseType4::packet(UInt8 /*adbCommand*/, IOByteCount length, UInt8 * data) +{ + int dx, dy, cnt, numExtraBytes; + UInt32 buttonState = 0; + AbsoluteTime now; + + numExtraBytes = length - 2; + + dy = data[0] & 0x7f; + dx = data[1] & 0x7f; + + if ((data[0] & 0x80) == 0) buttonState |= 1; + if ((deviceNumButtons > 1) && ((data[1] & 0x80) == 0)) + { + if(typeTrackpad == TRUE) + buttonState |= 1; + else + buttonState |= 2; + } + + for (cnt = 0; cnt < numExtraBytes; cnt++) { + dy |= ((data[2 + cnt] >> 4) & 7) << (7 + (cnt * 3)); + dx |= ((data[2 + cnt]) & 7) << (7 + (cnt * 3)); + + if ((deviceNumButtons > (cnt + 2)) && ((data[2 + cnt] & 0x80) == 0)) + buttonState |= 4 << (cnt * 2); + if ((deviceNumButtons > (cnt + 2 + 1)) && ((data[2 + cnt] & 0x08) == 0)) + buttonState |= 4 << (cnt * 2 + 1); + } + + if (dy & (0x40 << (numExtraBytes * 3))) + dy |= (0xffffffc0 << (numExtraBytes * 3)); + if (dx & (0x40 << (numExtraBytes * 3))) + dx |= (0xffffffc0 << (numExtraBytes * 3)); + + clock_get_uptime(&now); + dispatchRelativePointerEvent(dx, dy, buttonState, now); +} + +OSData * AppleADBMouseType4::copyAccelerationTable() +{ + char keyName[10]; + + strcpy( keyName, "accl" ); + keyName[4] = (deviceSignature >> 24); + keyName[5] = (deviceSignature >> 16); + keyName[6] = (deviceSignature >> 8); + keyName[7] = (deviceSignature >> 0); + keyName[8] = 0; + + OSData * data = OSDynamicCast( OSData, + getProperty( keyName )); + if( data) + data->retain(); + else + data = super::copyAccelerationTable(); + + return( data ); +} + +// **************************************************************************** +// enableEnhancedMode +// +// **************************************************************************** + +bool AppleADBMouseType4::enableEnhancedMode() +{ + UInt8 adbdata[8]; + IOByteCount adblength = 8; + + IOLog("enableEnhancedMode called.\n"); + adbDevice->readRegister(1, adbdata, &adblength); + + if((adbdata[6] != 0x0D)) + { + adbdata[6] = 0xD; + if (adbDevice->writeRegister(1, adbdata, &adblength) != 0) + return FALSE; + if (adbDevice->readRegister(1, adbdata, &adblength) != 0) + return FALSE; + if (adbdata[6] != 0x0D) + { + IOLog("AppleADBMouseType4 deviceClass = %d (non-Extended Mode)\n", adbdata[6]); + return FALSE; + } + IOLog("AppleADBMouseType4 deviceClass = %d (Extended Mode)\n", adbdata[6]); + + // Set ADB Extended Features to default values. + //adbdata[0] = 0x19; + adbdata[0] = glob_clicking; + //adbdata[1] = 0x14; + adbdata[1] = glob_dragging; + adbdata[2] = 0x19; + //adbdata[3] = 0xB2; + adbdata[3] = glob_draglock; + adbdata[4] = 0xB2; + adbdata[5] = 0x8A; + adbdata[6] = 0x1B; + adbdata[7] = 0x50; + adblength = 8; + + adbDevice->writeRegister(2, adbdata, &adblength); + + /* Add IORegistry entries for Enhanced mode */ + Clicking = FALSE; + Dragging = FALSE; + DragLock = FALSE; + + setProperty("Clicking", (unsigned long long)Clicking, sizeof(Clicking)*8); + setProperty("Dragging", (unsigned long long)Dragging, sizeof(Dragging)*8); + setProperty("DragLock", (unsigned long long)DragLock, sizeof(DragLock)*8); + + return TRUE; + } + + return FALSE; +} + +// **************************************************************************** +// setParamProperties +// +// **************************************************************************** +IOReturn AppleADBMouseType4::setParamProperties( OSDictionary * dict ) +{ + OSData * data; + IOReturn err = kIOReturnSuccess; + UInt8 adbdata[8]; + IOByteCount adblength; + + //IOLog("AppleADBMouseType4::setParamProperties starting here\n"); + + if( (data = OSDynamicCast(OSData, dict->getObject("Clicking"))) && (typeTrackpad == TRUE) ) + { + adblength = sizeof(adbdata); + adbDevice->readRegister(2, adbdata, &adblength); + glob_clicking = (adbdata[0] & 0x7F) | (*( (UInt8 *) data->getBytesNoCopy() ))<<7; + adbdata[0] = glob_clicking; + setProperty("Clicking", (unsigned long long)((adbdata[0]&0x80)>>7), sizeof(adbdata[0])*8); + adbDevice->writeRegister(2, adbdata, &adblength); + } + + if( (data = OSDynamicCast(OSData, dict->getObject("Dragging"))) && (typeTrackpad == TRUE) ) + { + adblength = sizeof(adbdata); + adbDevice->readRegister(2, adbdata, &adblength); + glob_dragging = (adbdata[1] & 0x7F) | (*( (UInt8 *) data->getBytesNoCopy() ))<<7; + adbdata[1] = glob_dragging; + setProperty("Dragging", (unsigned long long)((adbdata[1]&0x80)>>7), sizeof(adbdata[1])*8); + adbDevice->writeRegister(2, adbdata, &adblength); + } + + if( (data = OSDynamicCast(OSData, dict->getObject("DragLock"))) && (typeTrackpad == TRUE) ) + { + adblength = sizeof(adbdata); + adbDevice->readRegister(2, adbdata, &adblength); + adbdata[3] = *((UInt8 *) data->getBytesNoCopy()); + + if(adbdata[3]) + { + setProperty("DragLock", (unsigned long long)adbdata[3], sizeof(adbdata[3])*8); + glob_draglock = 0xFF; + adbdata[3] = glob_draglock; + adblength = sizeof(adbdata); + adbDevice->writeRegister(2, adbdata, &adblength); + } + else + { + setProperty("DragLock", (unsigned long long)adbdata[3], sizeof(adbdata[3])*8); + glob_draglock = 0xB2; + adbdata[3] = glob_draglock; + adblength = sizeof(adbdata); + adbDevice->writeRegister(2, adbdata, &adblength); + } + } + +#if 0 + // For debugging purposes + adblength = 8; + adbDevice->readRegister(2, adbdata, &adblength); + IOLog("adbdata[0] = 0x%x\n", adbdata[0]); + IOLog("adbdata[1] = 0x%x\n", adbdata[1]); + IOLog("adbdata[2] = 0x%x\n", adbdata[2]); + IOLog("adbdata[3] = 0x%x\n", adbdata[3]); + IOLog("adbdata[4] = 0x%x\n", adbdata[4]); + IOLog("adbdata[5] = 0x%x\n", adbdata[5]); + IOLog("adbdata[6] = 0x%x\n", adbdata[6]); + IOLog("adbdata[7] = 0x%x\n", adbdata[7]); +#endif + + if (err == kIOReturnSuccess) + { + //IOLog("AppleADBMouseType4::setParamProperties ending here\n"); + return super::setParamProperties(dict); + } + + IOLog("AppleADBMouseType4::setParamProperties failing here\n"); + return( err ); +} + diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.h b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.h new file mode 100644 index 000000000..668aadf6d --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 18 June 1998 sdouglas Start IOKit version. + * 18 Nov 1998 suurballe port to C++ + * 4 Oct 1999 decesare Revised for Type 4 support and sub-classed drivers. + */ + +#include +#include + +#define TRUE 1 +#define FALSE 0 + +class AppleADBMouse: public IOHIPointing +{ + OSDeclareDefaultStructors(AppleADBMouse); + +protected: + IOADBDevice * adbDevice; + IOFixed _resolution; + IOItemCount _buttonCount; + +public: + virtual IOService * probe(IOService * provider, SInt32 * score); + virtual bool start(IOService * provider); + virtual UInt32 interfaceID(void); + virtual UInt32 deviceType(void); + virtual IOFixed resolution(void); + virtual IOItemCount buttonCount(void); + virtual void packet(UInt8 adbCommand, IOByteCount length, UInt8 * data); +}; + + +class AppleADBMouseType1 : public AppleADBMouse +{ + OSDeclareDefaultStructors(AppleADBMouseType1); + +public: + virtual IOService * probe(IOService * provider, SInt32 * score); + virtual bool start(IOService * provider); +}; + + +class AppleADBMouseType2 : public AppleADBMouse +{ + OSDeclareDefaultStructors(AppleADBMouseType2); + +public: + virtual IOService * probe(IOService * provider, SInt32 * score); + virtual bool start(IOService * provider); +}; + + +class AppleADBMouseType4 : public AppleADBMouse +{ + OSDeclareDefaultStructors(AppleADBMouseType4); + +private: + bool Clicking, Dragging, DragLock, typeTrackpad; + virtual IOReturn setParamProperties( OSDictionary * dict ); + bool enableEnhancedMode(); + +protected: + UInt32 deviceSignature; + UInt16 deviceResolution; + UInt8 deviceClass; + UInt8 deviceNumButtons; + +public: + virtual IOService * probe(IOService * provider, SInt32 * score); + virtual bool start(IOService * provider); + virtual void packet(UInt8 adbCommand, IOByteCount length, UInt8 * data); + virtual OSData * copyAccelerationTable(); +}; diff --git a/iokit/Drivers/hidsystem/drvAppleADBDevices/IOADBDevice.cpp b/iokit/Drivers/hidsystem/drvAppleADBDevices/IOADBDevice.cpp new file mode 100644 index 000000000..b416c86e6 --- /dev/null +++ b/iokit/Drivers/hidsystem/drvAppleADBDevices/IOADBDevice.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 18 June 1998 sdouglas Start IOKit version. + * 17 Nov 1998 suurballe Port objc to c++ + */ + +#include + +#define super IOService +OSDefineMetaClassAndStructors(IOADBDevice,IOService) + +// ********************************************************************************** +// init +// +// ********************************************************************************** +bool IOADBDevice::init ( OSDictionary * regEntry, ADBDeviceControl * us ) +{ +if( !super::init(regEntry)) + return false; + +fBusRef = us; +return true; +} + + +// ********************************************************************************** +// attach +// +// ********************************************************************************** +bool IOADBDevice::attach ( IOADBBus * controller ) +{ +if( !super::attach(controller)) + return false; + +bus = controller; +return true; +} + +// ********************************************************************************** +// matchPropertyTable +// +// ********************************************************************************** + +bool IOADBDevice::matchPropertyTable( OSDictionary * table ) +{ + return( bus->matchNubWithPropertyTable( this, table )); +} + +// ********************************************************************************** +// seizeForClient +// +// ********************************************************************************** +bool IOADBDevice::seizeForClient ( IOService * client, ADB_callback_func handler ) +{ +bus->setOwner(fBusRef,client,handler); + +return true; +} + + +// ********************************************************************************** +// releaseFromClient +// +// ********************************************************************************** +void IOADBDevice::releaseFromClient ( IORegistryEntry * ) +{ + kprintf("IOADBDevice::releaseFromClient\n"); + bus->clearOwner(fBusRef); +} + + +// ********************************************************************************** +// flush +// +// ********************************************************************************** +IOReturn IOADBDevice::flush ( void ) +{ +return( bus->flush(fBusRef) ); +} + + +// ********************************************************************************** +// readRegister +// +// ********************************************************************************** +IOReturn IOADBDevice::readRegister ( IOADBRegister adbRegister, UInt8 * data, + IOByteCount * length ) +{ +return( bus->readRegister(fBusRef,adbRegister,data,length) ); +} + + +// ********************************************************************************** +// writeRegister +// +// ********************************************************************************** +IOReturn IOADBDevice::writeRegister ( IOADBRegister adbRegister, UInt8 * data, + IOByteCount * length ) +{ +return( bus->writeRegister(fBusRef,adbRegister,data,length) ); +} + + +// ********************************************************************************** +// address +// +// ********************************************************************************** +IOADBAddress IOADBDevice::address ( void ) +{ +return( bus->address(fBusRef) ); +} + + +// ********************************************************************************** +// defaultAddress +// +// ********************************************************************************** +IOADBAddress IOADBDevice::defaultAddress ( void ) +{ +return( bus->defaultAddress(fBusRef) ); +} + + +// ********************************************************************************** +// handlerID +// +// ********************************************************************************** +UInt8 IOADBDevice::handlerID ( void ) +{ +return( bus->handlerID(fBusRef) ); +} + + +// ********************************************************************************** +// defaultHandlerID +// +// ********************************************************************************** +UInt8 IOADBDevice::defaultHandlerID ( void ) +{ +return( bus->defaultHandlerID(fBusRef) ); +} + + +// ********************************************************************************** +// setHandlerID +// +// ********************************************************************************** +IOReturn IOADBDevice::setHandlerID ( UInt8 handlerID ) +{ +return( bus->setHandlerID(fBusRef,handlerID) ); +} + + +// ********************************************************************************** +// busRef +// +// ********************************************************************************** +void * IOADBDevice::busRef ( void ) +{ +return fBusRef; +} diff --git a/iokit/Drivers/hidsystem/drvApplePS2Keyboard/ApplePS2Keyboard.cpp b/iokit/Drivers/hidsystem/drvApplePS2Keyboard/ApplePS2Keyboard.cpp new file mode 100644 index 000000000..10b69712a --- /dev/null +++ b/iokit/Drivers/hidsystem/drvApplePS2Keyboard/ApplePS2Keyboard.cpp @@ -0,0 +1,1091 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include "ApplePS2Keyboard.h" + +// ============================================================================= +// ApplePS2Keyboard Class Implementation +// + +#define super IOHIKeyboard +OSDefineMetaClassAndStructors(ApplePS2Keyboard, IOHIKeyboard); + +UInt32 ApplePS2Keyboard::deviceType() { return NX_EVS_DEVICE_TYPE_KEYBOARD; }; +UInt32 ApplePS2Keyboard::interfaceID() { return NX_EVS_DEVICE_INTERFACE_ACE; }; + +UInt32 ApplePS2Keyboard::maxKeyCodes() { return KBV_NUM_KEYCODES; }; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Keyboard::init(OSDictionary * properties) +{ + // + // Initialize this object's minimal state. This is invoked right after this + // object is instantiated. + // + + if (!super::init(properties)) return false; + + _device = 0; + _extendCount = 0; + _interruptHandlerInstalled = false; + _ledState = 0; + + for (int index = 0; index < KBV_NUNITS; index++) _keyBitVector[index] = 0; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +ApplePS2Keyboard * ApplePS2Keyboard::probe(IOService * provider, SInt32 * score) +{ + // + // The driver has been instructed to verify the presence of the actual + // hardware we represent. We are guaranteed by the controller that the + // keyboard clock is enabled and the keyboard itself is disabled (thus + // it won't send any asynchronous scan codes that may mess up the + // responses expected by the commands we send it). This is invoked + // after the init. + // + + ApplePS2KeyboardDevice * device = (ApplePS2KeyboardDevice *)provider; + PS2Request * request = device->allocateRequest(); + bool success; + + if (!super::probe(provider, score)) return 0; + + // + // Check to see if the keyboard responds to a basic diagnostic echo. + // + + // (diagnostic echo command) + request->commands[0].command = kPS2C_WriteDataPort; + request->commands[0].inOrOut = kDP_TestKeyboardEcho; + request->commands[1].command = kPS2C_ReadDataPortAndCompare; + request->commands[1].inOrOut = 0xEE; + request->commandsCount = 2; + device->submitRequestAndBlock(request); + + // (free the request) + success = (request->commandsCount == 2); + device->freeRequest(request); + + return (success) ? this : 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Keyboard::start(IOService * provider) +{ + // + // The driver has been instructed to start. This is called after a + // successful attach. + // + + if (!super::start(provider)) return false; + + // + // Maintain a pointer to and retain the provider object. + // + + _device = (ApplePS2KeyboardDevice *)provider; + _device->retain(); + + // + // Install our driver's interrupt handler, for asynchronous data delivery. + // + + _device->installInterruptAction(this, + (PS2InterruptAction)&ApplePS2Keyboard::interruptOccurred); + _interruptHandlerInstalled = true; + + // + // Initialize the keyboard LED state. + // + + setLEDs(_ledState); + + // + // Enable the keyboard clock (should already be so), the keyboard IRQ line, + // and the keyboard Kscan -> scan code translation mode. + // + + setCommandByte(kCB_EnableKeyboardIRQ | kCB_TranslateMode, + kCB_DisableKeyboardClock); + + // + // Finally, we enable the keyboard itself, so that it may start reporting + // key events. + // + + setKeyboardEnable(true); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Keyboard::stop(IOService * provider) +{ + // + // The driver has been instructed to stop. Note that we must break all + // connections to other service objects now (ie. no registered actions, + // no pointers and retains to objects, etc), if any. + // + + assert(_device == provider); + + // + // Disable the keyboard itself, so that it may stop reporting key events. + // + + setKeyboardEnable(false); + + // + // Disable the keyboard clock and the keyboard IRQ line. + // + + setCommandByte(kCB_DisableKeyboardClock, kCB_EnableKeyboardIRQ); + + // + // Uninstall the interrupt handler. + // + + if ( _interruptHandlerInstalled ) _device->uninstallInterruptAction(); + _interruptHandlerInstalled = false; + + // + // Release the pointer to the provider object. + // + + _device->release(); + _device = 0; + + super::stop(provider); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Keyboard::interruptOccurred(UInt8 scanCode) // PS2InterruptAction +{ + // + // This will be invoked automatically from our device when asynchronous + // keyboard data needs to be delivered. Process the keyboard data. Do + // NOT send any BLOCKING commands to our device in this context. + // + + if (scanCode == kSC_Acknowledge) + IOLog("%s: Unexpected acknowledge from PS/2 controller.\n", getName()); + else if (scanCode == kSC_Resend) + IOLog("%s: Unexpected resend request from PS/2 controller.\n", getName()); + else + dispatchKeyboardEventWithScancode(scanCode); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Keyboard::dispatchKeyboardEventWithScancode(UInt8 scanCode) +{ + // + // Parses the given scan code, updating all necessary internal state, and + // should a new key be detected, the key event is dispatched. + // + // Returns true if a key event was indeed dispatched. + // + + unsigned int keyCode; + bool goingDown; + AbsoluteTime now; + + // + // See if this scan code introduces an extended key sequence. If so, note + // it and then return. Next time we get a key we'll finish the sequence. + // + + if (scanCode == kSC_Extend) + { + _extendCount = 1; + return false; + } + + // + // See if this scan code introduces an extended key sequence for the Pause + // Key. If so, note it and then return. The next time we get a key, drop + // it. The next key we get after that finishes the Pause Key sequence. + // + // The sequence actually sent to us by the keyboard for the Pause Key is: + // + // 1. E1 Extended Sequence for Pause Key + // 2. 1D Useless Data, with Up Bit Cleared + // 3. 45 Pause Key, with Up Bit Cleared + // 4. E1 Extended Sequence for Pause Key + // 5. 9D Useless Data, with Up Bit Set + // 6. C5 Pause Key, with Up Bit Set + // + // The reason items 4 through 6 are sent with the Pause Key is because the + // keyboard hardware never generates a release code for the Pause Key and + // the designers are being smart about it. The sequence above translates + // to this parser as two separate events, as it should be -- one down key + // event and one up key event (for the Pause Key). + // + + if (scanCode == kSC_Pause) + { + _extendCount = 2; + return false; + } + + // + // Convert the scan code into a key code. + // + + if (_extendCount == 0) + keyCode = scanCode & ~kSC_UpBit; + else + { + _extendCount--; + if (_extendCount) return false; + + // + // Convert certain extended codes on the PC keyboard into single scancodes. + // Refer to the conversion table in defaultKeymapOfLength. + // + + switch (scanCode & ~kSC_UpBit) + { + case 0x1D: keyCode = 0x60; break; // ctrl + case 0x38: keyCode = 0x61; break; // alt + case 0x1C: keyCode = 0x62; break; // enter + case 0x35: keyCode = 0x63; break; // / + case 0x48: keyCode = 0x64; break; // up arrow + case 0x50: keyCode = 0x65; break; // down arrow + case 0x4B: keyCode = 0x66; break; // left arrow + case 0x4D: keyCode = 0x67; break; // right arrow + case 0x52: keyCode = 0x68; break; // insert + case 0x53: keyCode = 0x69; break; // delete + case 0x49: keyCode = 0x6A; break; // page up + case 0x51: keyCode = 0x6B; break; // page down + case 0x47: keyCode = 0x6C; break; // home + case 0x4F: keyCode = 0x6D; break; // end + case 0x37: keyCode = 0x6E; break; // PrintScreen + case 0x45: keyCode = 0x6F; break; // Pause + case 0x5B: keyCode = 0x70; break; // Left Windows + case 0x5C: keyCode = 0x71; break; // Right Windows + case 0x5D: keyCode = 0x72; break; // Application + case 0x2A: // header or trailer for PrintScreen + default: return false; + } + } + + if (keyCode == 0) return false; + + // + // Update our key bit vector, which maintains the up/down status of all keys. + // + + goingDown = !(scanCode & kSC_UpBit); + + if (goingDown) + { + // + // Verify that this is not an autorepeated key -- discard it if it is. + // + + if (KBV_IS_KEYDOWN(keyCode, _keyBitVector)) return false; + + KBV_KEYDOWN(keyCode, _keyBitVector); + } + else + { + KBV_KEYUP(keyCode, _keyBitVector); + } + + // + // We have a valid key event -- dispatch it to our superclass. + // + + clock_get_uptime(&now); + + dispatchKeyboardEvent(keyCode, /*direction*/ goingDown, /*timeStamp*/ now); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Keyboard::setAlphaLockFeedback(bool locked) +{ + // + // Set the keyboard LEDs to reflect the state of alpha (caps) lock. + // + // It is safe to issue this request from the interrupt/completion context. + // + + _ledState = locked ? (_ledState | kLED_CapsLock):(_ledState & ~kLED_CapsLock); + setLEDs(_ledState); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Keyboard::setLEDs(UInt8 ledState) +{ + // + // Asynchronously instructs the controller to set the keyboard LED state. + // + // It is safe to issue this request from the interrupt/completion context. + // + + PS2Request * request = _device->allocateRequest(); + + // (set LEDs command) + request->commands[0].command = kPS2C_WriteDataPort; + request->commands[0].inOrOut = kDP_SetKeyboardLEDs; + request->commands[1].command = kPS2C_ReadDataPortAndCompare; + request->commands[1].inOrOut = kSC_Acknowledge; + request->commands[2].command = kPS2C_WriteDataPort; + request->commands[2].inOrOut = ledState; + request->commands[3].command = kPS2C_ReadDataPortAndCompare; + request->commands[3].inOrOut = kSC_Acknowledge; + request->commandsCount = 4; + _device->submitRequest(request); // asynchronous, auto-free'd +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Keyboard::setKeyboardEnable(bool enable) +{ + // + // Instructs the keyboard to start or stop the reporting of key events. + // Be aware that while the keyboard is enabled, asynchronous key events + // may arrive in the middle of command sequences sent to the controller, + // and may get confused for expected command responses. + // + // It is safe to issue this request from the interrupt/completion context. + // + + PS2Request * request = _device->allocateRequest(); + + // (keyboard enable/disable command) + request->commands[0].command = kPS2C_WriteDataPort; + request->commands[0].inOrOut = (enable)?kDP_Enable:kDP_SetDefaultsAndDisable; + request->commands[1].command = kPS2C_ReadDataPortAndCompare; + request->commands[1].inOrOut = kSC_Acknowledge; + request->commandsCount = 2; + _device->submitRequest(request); // asynchronous, auto-free'd +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Keyboard::setCommandByte(UInt8 setBits, UInt8 clearBits) +{ + // + // Sets the bits setBits and clears the bits clearBits "atomically" in the + // controller's Command Byte. Since the controller does not provide such + // a read-modify-write primitive, we resort to a test-and-set try loop. + // + // Do NOT issue this request from the interrupt/completion context. + // + + UInt8 commandByte; + UInt8 commandByteNew; + PS2Request * request = _device->allocateRequest(); + + do + { + // (read command byte) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_GetCommandByte; + request->commands[1].command = kPS2C_ReadDataPort; + request->commands[1].inOrOut = 0; + request->commandsCount = 2; + _device->submitRequestAndBlock(request); + + // + // Modify the command byte as requested by caller. + // + + commandByte = request->commands[1].inOrOut; + commandByteNew = (commandByte | setBits) & (~clearBits); + + // ("test-and-set" command byte) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_GetCommandByte; + request->commands[1].command = kPS2C_ReadDataPortAndCompare; + request->commands[1].inOrOut = commandByte; + request->commands[2].command = kPS2C_WriteCommandPort; + request->commands[2].inOrOut = kCP_SetCommandByte; + request->commands[3].command = kPS2C_WriteDataPort; + request->commands[3].inOrOut = commandByteNew; + request->commandsCount = 4; + _device->submitRequestAndBlock(request); + + // + // Repeat this loop if last command failed, that is, if the old command byte + // was modified since we first read it. + // + + } while (request->commandsCount != 4); + + _device->freeRequest(request); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +const unsigned char * ApplePS2Keyboard::defaultKeymapOfLength(UInt32 * length) +{ + // + // Returns the default x86 keymap string. + // + // The following keys are multi-byte sequences on the x86 keyboard. They get + // mapped into a single scan code for our purposes. Here is the mapping: + // PC Key PC Code NeXT Code + // Right-Ctrl E0-1D 0x60 + // Right-Alt E0-38 0x61 + // Keypad-Enter E0-1C 0x62 + // Keypad-/ E0-35 0x63 + // Up-Arrow E0-48 0x64 + // Down-Arrow E0-50 0x65 + // Left-Arrow E0-4B 0x66 + // Right-Arrow E0-4D 0x67 + // Insert E0-52 0x68 + // Delete E0-53 0x69 + // Page Up E0-49 0x6A + // Page Down E0-51 0x6B + // Home E0-47 0x6C + // End E0-4F 0x6D + // + // Because there is no Command key on the x86 keyboard, we've split the ALT + // keys up. We'll use Left-Alt as Command, and Right-Alt as ALT. + // + + #define CTRL(c) ((c)&037) + #define NX_MODIFIERKEY_ALPHALOCK 0 + #define NX_MODIFIERKEY_SHIFT 1 + #define NX_MODIFIERKEY_CONTROL 2 + #define NX_MODIFIERKEY_ALTERNATE 3 + #define NX_MODIFIERKEY_COMMAND 4 + #define NX_MODIFIERKEY_NUMERICPAD 5 + #define NX_MODIFIERKEY_HELP 6 + + static const unsigned char defaultKeymapForPC[] = + { + 0x00, 0x00, // char file format + + 6, // MODIFIER KEY DEFINITIONS (6) + 0x01, 0x02, 0x2A, 0x36, // Shift, 2 keys + 0x02, 0x02, 0x1D, 0x60, // Ctrl, 2 keys + 0x03, 0x01, 0x61, // Alt, 1 key + 0x04, 0x01, 0x38, // Cmd, 1 key + 0x05, 0x15, 0x52, 0x53, 0x62, 0x4F, 0x50, 0x51, 0x4B, 0x4C, 0x4D, + 0x4E, 0x47, 0x48, 0x49, 0x45, 0x63, 0x37, 0x4A, + 0x64, 0x65, 0x66, 0x67, // NumPad, 21 keys + 0x06, 0x01, 0x3B, // Help, 1 key + + 104, // KEY DEFINITIONS + 0xff, // Key 0x00 unassigned + // Key 0x01 modifier key mask bits (0x02) + (1<', // Shift + NX_ASCIISET, 0xbc, // Alt + NX_SYMBOLSET, 0xb3, // Shift Alt + // Key 0x35 modifier key mask bits (0x0a) + (1< +#include + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Definitions used to keep track of key state. Key up/down state is tracked +// in a bit list. Bits are set for key-down, and cleared for key-up. The bit +// vector and macros for it's manipulation are defined here. +// + +#define KBV_NUM_KEYCODES 128 +#define KBV_BITS_PER_UNIT 32 // for UInt32 +#define KBV_BITS_MASK 31 +#define KBV_BITS_SHIFT 5 // 1<<5 == 32, for cheap divide +#define KBV_NUNITS ((KBV_NUM_KEYCODES + \ + (KBV_BITS_PER_UNIT-1))/KBV_BITS_PER_UNIT) + +#define KBV_KEYDOWN(n, bits) \ + (bits)[((n)>>KBV_BITS_SHIFT)] |= (1 << ((n) & KBV_BITS_MASK)) + +#define KBV_KEYUP(n, bits) \ + (bits)[((n)>>KBV_BITS_SHIFT)] &= ~(1 << ((n) & KBV_BITS_MASK)) + +#define KBV_IS_KEYDOWN(n, bits) \ + (((bits)[((n)>>KBV_BITS_SHIFT)] & (1 << ((n) & KBV_BITS_MASK))) != 0) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// ApplePS2Keyboard Class Declaration +// + +class ApplePS2Keyboard : public IOHIKeyboard +{ + OSDeclareDefaultStructors(ApplePS2Keyboard); + +private: + ApplePS2KeyboardDevice * _device; + UInt32 _keyBitVector[KBV_NUNITS]; + UInt8 _extendCount; + UInt8 _interruptHandlerInstalled:1; + UInt8 _ledState; + + virtual bool dispatchKeyboardEventWithScancode(UInt8 scanCode); + virtual void setCommandByte(UInt8 setBits, UInt8 clearBits); + virtual void setLEDs(UInt8 ledState); + virtual void setKeyboardEnable(bool enable); + +protected: + virtual const unsigned char * defaultKeymapOfLength(UInt32 * length); + virtual void setAlphaLockFeedback(bool locked); + virtual UInt32 maxKeyCodes(); + +public: + virtual bool init(OSDictionary * properties); + virtual ApplePS2Keyboard * probe(IOService * provider, SInt32 * score); + + virtual bool start(IOService * provider); + virtual void stop(IOService * provider); + + virtual void interruptOccurred(UInt8 scanCode); + + virtual UInt32 deviceType(); + virtual UInt32 interfaceID(); +}; + +#endif /* _APPLEPS2KEYBOARD_H */ diff --git a/iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.cpp b/iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.cpp new file mode 100644 index 000000000..3afd11552 --- /dev/null +++ b/iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.cpp @@ -0,0 +1,518 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include "ApplePS2Mouse.h" + +// ============================================================================= +// ApplePS2Mouse Class Implementation +// + +#define super IOHIPointing +OSDefineMetaClassAndStructors(ApplePS2Mouse, IOHIPointing); + +UInt32 ApplePS2Mouse::deviceType() { return NX_EVS_DEVICE_TYPE_MOUSE; }; +UInt32 ApplePS2Mouse::interfaceID() { return NX_EVS_DEVICE_INTERFACE_BUS_ACE; }; + +IOItemCount ApplePS2Mouse::buttonCount() { return 3; }; +IOFixed ApplePS2Mouse::resolution() { return _resolution; }; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Mouse::init(OSDictionary * properties) +{ + // + // Initialize this object's minimal state. This is invoked right after this + // object is instantiated. + // + + if (!super::init(properties)) return false; + + _device = 0; + _interruptHandlerInstalled = false; + _packetByteCount = 0; + _packetLength = kPacketLengthStandard; + _resolution = (150) << 16; // (default is 150 dpi; 6 counts/mm) + _type = kMouseTypeStandard; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +ApplePS2Mouse * ApplePS2Mouse::probe(IOService * provider, SInt32 * score) +{ + // + // The driver has been instructed to verify the presence of the actual + // hardware we represent. We are guaranteed by the controller that the + // mouse clock is enabled and the mouse itself is disabled (thus it + // won't send any asynchronous mouse data that may mess up the + // responses expected by the commands we send it). + // + + ApplePS2MouseDevice * device = (ApplePS2MouseDevice *)provider; + PS2Request * request = device->allocateRequest(); + bool success; + + if (!super::probe(provider, score)) return 0; + + // + // Check to see if acknowledges are being received for commands to the mouse. + // + + // (get information command) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_TransmitToMouse; + request->commands[1].command = kPS2C_WriteDataPort; + request->commands[1].inOrOut = kDP_GetMouseInformation; + request->commands[2].command = kPS2C_ReadDataPortAndCompare; + request->commands[2].inOrOut = kSC_Acknowledge; + request->commands[3].command = kPS2C_ReadDataPort; + request->commands[3].inOrOut = 0; + request->commands[4].command = kPS2C_ReadDataPort; + request->commands[4].inOrOut = 0; + request->commands[5].command = kPS2C_ReadDataPort; + request->commands[5].inOrOut = 0; + request->commandsCount = 6; + device->submitRequestAndBlock(request); + + // (free the request) + success = (request->commandsCount == 6); + device->freeRequest(request); + + return (success) ? this : 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Mouse::start(IOService * provider) +{ + // + // The driver has been instructed to start. This is called after a + // successful probe and match. + // + + if (!super::start(provider)) return false; + + // + // Maintain a pointer to and retain the provider object. + // + + _device = (ApplePS2MouseDevice *)provider; + _device->retain(); + + // + // Install our driver's interrupt handler, for asynchronous data delivery. + // + + _device->installInterruptAction(this, + (PS2InterruptAction)&ApplePS2Mouse::interruptOccurred); + _interruptHandlerInstalled = true; + + // + // Obtain our mouse's resolution and sampling rate. + // + + switch (getMouseInformation() & 0x00FF00) + { + case 0x0000: _resolution = (25) << 16; break; // 25 dpi + case 0x0100: _resolution = (50) << 16; break; // 50 dpi + case 0x0200: _resolution = (100) << 16; break; // 100 dpi + case 0x0300: _resolution = (200) << 16; break; // 200 dpi + default: _resolution = (150) << 16; break; // 150 dpi + } + + // + // Enable the Intellimouse mode, should this be an Intellimouse. + // + + if ( setIntellimouseMode() == true ) + { + _packetLength = kPacketLengthIntellimouse; + _type = kMouseTypeIntellimouse; + } + + // + // Enable the mouse clock (should already be so) and the mouse IRQ line. + // + + setCommandByte(kCB_EnableMouseIRQ, kCB_DisableMouseClock); + + // + // Finally, we enable the mouse itself, so that it may start reporting + // mouse events. + // + + setMouseEnable(true); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Mouse::stop(IOService * provider) +{ + // + // The driver has been instructed to stop. Note that we must break all + // connections to other service objects now (ie. no registered actions, + // no pointers and retains to objects, etc), if any. + // + + assert(_device == provider); + + // + // Disable the mouse itself, so that it may stop reporting mouse events. + // + + setMouseEnable(false); + + // + // Disable the mouse clock and the mouse IRQ line. + // + + setCommandByte(kCB_DisableMouseClock, kCB_EnableMouseIRQ); + + // + // Uninstall the interrupt handler. + // + + if ( _interruptHandlerInstalled ) _device->uninstallInterruptAction(); + _interruptHandlerInstalled = false; + + // + // Release the pointer to the provider object. + // + + _device->release(); + _device = 0; + + super::stop(provider); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Mouse::interruptOccurred(UInt8 data) // PS2InterruptAction +{ + // + // This will be invoked automatically from our device when asynchronous mouse + // needs to be delivered. Process the mouse data. Do NOT send any BLOCKING + // commands to our device in this context. + // + // We ignore all bytes until we see the start of a packet, otherwise the mouse + // packets may get out of sequence and things will get very confusing. + // + + if (_packetByteCount == 0 && ((data == kSC_Acknowledge) || !(data & 0x08))) + { + IOLog("%s: Unexpected data from PS/2 controller.\n", getName()); + return; + } + + // + // Add this byte to the packet buffer. If the packet is complete, that is, + // we have the three bytes, dispatch this packet for processing. + // + + _packetBuffer[_packetByteCount++] = data; + + if (_packetByteCount == _packetLength) + { + dispatchRelativePointerEventWithPacket(_packetBuffer); + _packetByteCount = 0; + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Mouse::dispatchRelativePointerEventWithPacket(UInt8 * packet) +{ + // + // Process the three byte mouse packet that was retreived from the mouse. + // The format of the bytes is as follows: + // + // 7 6 5 4 3 2 1 0 + // YO XO YS XS 1 M R L + // X7 X6 X5 X4 X3 X3 X1 X0 + // Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 + // Z7 Z6 Z5 Z4 Z3 Z2 Z1 Z0 <- fourth byte returned only for Intellimouse type + // + + UInt32 buttons = 0; + SInt32 dx; + SInt32 dy; + SInt32 dz; + AbsoluteTime now; + + if ( !(packet[0] & 0x1) ) buttons |= 0x1; // left button (bit 0 in packet) + if ( !(packet[0] & 0x2) ) buttons |= 0x2; // right button (bit 1 in packet) + if ( !(packet[0] & 0x4) ) buttons |= 0x4; // middle button (bit 2 in packet) + + dx = ((packet[0] & 0x10) ? 0xffffff00 : 0 ) | packet[1]; + dy = -(((packet[0] & 0x20) ? 0xffffff00 : 0 ) | packet[2]); + dz = (SInt32)((SInt8)packet[3]); + + clock_get_uptime(&now); + + dispatchRelativePointerEvent(dx, dy, buttons, now); + + return; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Mouse::setMouseEnable(bool enable) +{ + // + // Instructs the mouse to start or stop the reporting of mouse events. + // Be aware that while the mouse is enabled, asynchronous mouse events + // may arrive in the middle of command sequences sent to the controller, + // and may get confused for expected command responses. + // + // It is safe to issue this request from the interrupt/completion context. + // + + PS2Request * request = _device->allocateRequest(); + + // (mouse enable/disable command) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_TransmitToMouse; + request->commands[1].command = kPS2C_WriteDataPort; + request->commands[1].inOrOut = (enable)?kDP_Enable:kDP_SetDefaultsAndDisable; + request->commands[2].command = kPS2C_ReadDataPortAndCompare; + request->commands[2].inOrOut = kSC_Acknowledge; + request->commandsCount = 3; + _device->submitRequest(request); // asynchronous, auto-free'd +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Mouse::setMouseSampleRate(UInt8 sampleRate) +{ + // + // Instructs the mouse to change its sampling rate to the given value, in + // reports per second. + // + // It is safe to issue this request from the interrupt/completion context. + // + + PS2Request * request = _device->allocateRequest(); + + // (set mouse sample rate command) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_TransmitToMouse; + request->commands[1].command = kPS2C_WriteDataPort; + request->commands[1].inOrOut = kDP_SetMouseSampleRate; + request->commands[2].command = kPS2C_ReadDataPortAndCompare; + request->commands[2].inOrOut = kSC_Acknowledge; + request->commands[3].command = kPS2C_WriteCommandPort; + request->commands[3].inOrOut = kCP_TransmitToMouse; + request->commands[4].command = kPS2C_WriteDataPort; + request->commands[4].inOrOut = sampleRate; + request->commands[5].command = kPS2C_ReadDataPortAndCompare; + request->commands[5].inOrOut = kSC_Acknowledge; + request->commandsCount = 6; + _device->submitRequest(request); // asynchronous, auto-free'd +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Mouse::setIntellimouseMode() +{ + // + // Determines whether this mouse is a Microsoft Intellimouse, and if it is, + // it enables it (the mouse will send 4 byte packets for mouse events from + // then on). Returns true if the Intellimouse mode was succesfully enabled. + // + // Do NOT issue this request from the interrupt/completion context. + // + + UInt32 mouseInfo; + bool isIntellimouse; + + // + // Obtain the current sample rate, in order that we may restore it after + // the Intellimouse command sequence completes. + // + + mouseInfo = getMouseInformation(); + + if (mouseInfo == (UInt32)(-1)) return false; + + // + // Generate the special command sequence to enable the 'Intellimouse' mode. + // The sequence is to set the sampling rate to 200, 100, then 80, at which + // point the mouse will start sending 4 byte packets for mouse events and + // return a mouse ID of 3. + // + + setMouseSampleRate(200); + setMouseSampleRate(100); + setMouseSampleRate(80 ); + + // + // Determine whether we have an Intellimouse by asking for the mouse's ID. + // + + isIntellimouse = ( getMouseID() == kMouseTypeIntellimouse ); + + // + // Restore the original sampling rate, before we obliterated it. + // + + setMouseSampleRate(mouseInfo & 0x0000FF); + + return isIntellimouse; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 ApplePS2Mouse::getMouseInformation() +{ + // + // Asks the mouse to transmit its three information bytes. Should the + // mouse not respond, a value of (UInt32)(-1) is returned. + // + // Do NOT issue this request from the interrupt/completion context. + // + + PS2Request * request = _device->allocateRequest(); + UInt32 returnValue = (UInt32)(-1); + + // (get information command) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_TransmitToMouse; + request->commands[1].command = kPS2C_WriteDataPort; + request->commands[1].inOrOut = kDP_GetMouseInformation; + request->commands[2].command = kPS2C_ReadDataPortAndCompare; + request->commands[2].inOrOut = kSC_Acknowledge; + request->commands[3].command = kPS2C_ReadDataPort; + request->commands[3].inOrOut = 0; + request->commands[4].command = kPS2C_ReadDataPort; + request->commands[4].inOrOut = 0; + request->commands[5].command = kPS2C_ReadDataPort; + request->commands[5].inOrOut = 0; + request->commandsCount = 6; + _device->submitRequestAndBlock(request); + + if (request->commandsCount == 6) // success? + { + returnValue = ((UInt32)request->commands[3].inOrOut << 16) | + ((UInt32)request->commands[4].inOrOut << 8 ) | + ((UInt32)request->commands[5].inOrOut); + } + _device->freeRequest(request); + + return returnValue; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt8 ApplePS2Mouse::getMouseID() +{ + // + // Asks the mouse to transmit its identification byte. Should the mouse + // not respond, a value of (UInt8)(-1) is returned. + // + // Note that some documentation on PS/2 mice implies that two identification + // bytes are returned and not one. This was proven to be false in my tests. + // + // Do NOT issue this request from the interrupt/completion context. + // + + PS2Request * request = _device->allocateRequest(); + UInt8 returnValue = (UInt8)(-1); + + // (get information command) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_TransmitToMouse; + request->commands[1].command = kPS2C_WriteDataPort; + request->commands[1].inOrOut = kDP_GetId; + request->commands[2].command = kPS2C_ReadDataPortAndCompare; + request->commands[2].inOrOut = kSC_Acknowledge; + request->commands[3].command = kPS2C_ReadDataPort; + request->commands[3].inOrOut = 0; + request->commandsCount = 4; + _device->submitRequestAndBlock(request); + + if (request->commandsCount == 4) // success? + returnValue = request->commands[3].inOrOut; + + _device->freeRequest(request); + + return returnValue; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Mouse::setCommandByte(UInt8 setBits, UInt8 clearBits) +{ + // + // Sets the bits setBits and clears the bits clearBits "atomically" in the + // controller's Command Byte. Since the controller does not provide such + // a read-modify-write primitive, we resort to a test-and-set try loop. + // + // Do NOT issue this request from the interrupt/completion context. + // + + UInt8 commandByte; + UInt8 commandByteNew; + PS2Request * request = _device->allocateRequest(); + + do + { + // (read command byte) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_GetCommandByte; + request->commands[1].command = kPS2C_ReadDataPort; + request->commands[1].inOrOut = 0; + request->commandsCount = 2; + _device->submitRequestAndBlock(request); + + // + // Modify the command byte as requested by caller. + // + + commandByte = request->commands[1].inOrOut; + commandByteNew = (commandByte | setBits) & (~clearBits); + + // ("test-and-set" command byte) + request->commands[0].command = kPS2C_WriteCommandPort; + request->commands[0].inOrOut = kCP_GetCommandByte; + request->commands[1].command = kPS2C_ReadDataPortAndCompare; + request->commands[1].inOrOut = commandByte; + request->commands[2].command = kPS2C_WriteCommandPort; + request->commands[2].inOrOut = kCP_SetCommandByte; + request->commands[3].command = kPS2C_WriteDataPort; + request->commands[3].inOrOut = commandByteNew; + request->commandsCount = 4; + _device->submitRequestAndBlock(request); + + // + // Repeat this loop if last command failed, that is, if the old command byte + // was modified since we first read it. + // + + } while (request->commandsCount != 4); + + _device->freeRequest(request); +} diff --git a/iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.h b/iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.h new file mode 100644 index 000000000..ae211bcf4 --- /dev/null +++ b/iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _APPLEPS2MOUSE_H +#define _APPLEPS2MOUSE_H + +#include +#include + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Local Declarations +// + +#define kPacketLengthMax 4 +#define kPacketLengthStandard 3 +#define kPacketLengthIntellimouse 4 + +typedef enum +{ + kMouseTypeStandard = 0x00, + kMouseTypeIntellimouse = 0x03 +} PS2MouseId; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// ApplePS2Mouse Class Declaration +// + +class ApplePS2Mouse : public IOHIPointing +{ + OSDeclareDefaultStructors(ApplePS2Mouse); + +private: + ApplePS2MouseDevice * _device; + unsigned _interruptHandlerInstalled:1; + UInt8 _packetBuffer[kPacketLengthMax]; + UInt32 _packetByteCount; + UInt32 _packetLength; + IOFixed _resolution; // (dots per inch) + PS2MouseId _type; + + virtual void dispatchRelativePointerEventWithPacket(UInt8 * packet); + virtual UInt8 getMouseID(); + virtual UInt32 getMouseInformation(); + virtual void setCommandByte(UInt8 setBits, UInt8 clearBits); + virtual bool setIntellimouseMode(); + virtual void setMouseEnable(bool enable); + virtual void setMouseSampleRate(UInt8 sampleRate); + +protected: + virtual IOItemCount buttonCount(); + virtual IOFixed resolution(); + +public: + virtual bool init(OSDictionary * properties); + virtual ApplePS2Mouse * probe(IOService * provider, SInt32 * score); + + virtual bool start(IOService * provider); + virtual void stop(IOService * provider); + + virtual void interruptOccurred(UInt8 data); + + virtual UInt32 deviceType(); + virtual UInt32 interfaceID(); +}; + +#endif /* _APPLEPS2MOUSE_H */ diff --git a/iokit/Drivers/network/AppleBPF/AppleBPF.cpp b/iokit/Drivers/network/AppleBPF/AppleBPF.cpp new file mode 100644 index 000000000..b31f0fffb --- /dev/null +++ b/iokit/Drivers/network/AppleBPF/AppleBPF.cpp @@ -0,0 +1,120 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * AppleBPF.cpp - BPF driver class implementation. + * + */ + +// Need to check with Simon on User/Client interface and how to do +// PostLoad, and check on IOBSD (IONeededResource) + +#include +#include +#include "AppleBPF.h" + +extern "C" { +#include +#include +#include +#include +} + +//------------------------------------------------------------------------ + +#define super IOService +OSDefineMetaClassAndStructors(AppleBPF, IOService); + +//------------------------------------------------------------------------ + + +// ----------------------------------------------------------------------- +// +// This is the first method to be called when an object of this class is +// instantiated. +// +bool AppleBPF::init(OSDictionary * properties) +{ + if (!super::init(properties)) + { IOLog("BPF: super init failed\n"); + return false; + } + + // Do class specific initialization here. Probably not necessary for + // this driver. + +// IOLog("BPF: super init succeeded\n"); + return true; // return 'true' for success, 'false' for failure. +} + +// ----------------------------------------------------------------------- +// +// The driver has been matched, start it up. Do most initialization and +// resource allocation here. +// +bool AppleBPF::start(IOService * provider) +{ int i; + OSNumber *val; + extern struct bpf_d *bpf_dtab; + extern int nbpfilter; + + if (!super::start(provider)) + { IOLog("BPF: super start failed\n"); + return false; + } + + val = OSDynamicCast(OSNumber, getObject("IODevCount")); + if (val == 0) + nbpfilter = DEFAULT_BPF_DEV_COUNT; + else + nbpfilter = val->unsigned32BitValue(); + +// bpfops.bpf_tap = bpf_tap; +// bpfops.bpf_mtap = bpf_mtap; + + bpf_dtab = (struct bpf_d *)IOMalloc(sizeof (struct bpf_d) * nbpfilter); + if (bpf_dtab == NULL) + { IOLog("%s: couldn't get memory for descriptor table\n", + getName()); + return false; + } + + /* + * Mark all the descriptors free + */ + for (i = 0; i < nbpfilter; ++i) + D_MARKFREE(&bpf_dtab[i]); + +// IOLog("AppleBPF::start() called\n"); + + return true; // return 'true' for success, 'false' for failure. +} + +// ----------------------------------------------------------------------- +// +// Release all resources before the driver goes away. +// +void AppleBPF::stop(IOService * provider) +{ extern struct bpf_d *bpf_dtab; + extern int nbpfilter; + + IOFree((void *)bpf_dtab, sizeof (struct bpf_d) * nbpfilter); +} diff --git a/iokit/Drivers/network/AppleBPF/AppleBPF.h b/iokit/Drivers/network/AppleBPF/AppleBPF.h new file mode 100644 index 000000000..410cfb77a --- /dev/null +++ b/iokit/Drivers/network/AppleBPF/AppleBPF.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * AppleBPF.h - BPF driver header file. + * + */ + +#ifndef _APPLEBPF_H +#define _APPLEBPF_H + +#include + +class AppleBPF : public IOService +{ + OSDeclareDefaultStructors(AppleBPF) + +private: + +public: + virtual bool init(OSDictionary * properties); + virtual bool start(IOService * provider); + virtual void stop(IOService * provider); +}; + +/* What you get if you don't ask */ +#define DEFAULT_BPF_DEV_COUNT 4 + +/* define this if a (loadable) kernel extension */ +/* #define BPFDRV */ +#endif /* !_APPLEBPF_H */ diff --git a/iokit/Drivers/network/AppleBPF/bpf.c b/iokit/Drivers/network/AppleBPF/bpf.c new file mode 100644 index 000000000..d808edb53 --- /dev/null +++ b/iokit/Drivers/network/AppleBPF/bpf.c @@ -0,0 +1,1290 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1990, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf.c 8.4 (Berkeley) 1/9/95 + */ + +#define BPFDRV + +#ifndef BPFDRV +#include +#endif /* not BPFDRV */ + +#include +#include +#include +#if 0 +#ifdef BPFDRV +#include +#endif /* BPFDRV */ +#endif +#include +#include +#include +#include +#include +#include + +#include +#if defined(sparc) && BSD < 199103 +#include +#endif +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#include +#include +#include + +/* + * Older BSDs don't have kernel malloc. + */ +#if BSD < 199103 +extern bcopy(); +static caddr_t bpf_alloc(); +#include +#define BPF_BUFSIZE (MCLBYTES-8) +#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) +#else +#include +#define BPF_BUFSIZE 4096 +#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) +#endif + +#define PRINET 26 /* interruptible */ + +/* + * The default read buffer size is patchable. + */ +#ifndef BPFDRV +int bpf_bufsize = BPF_BUFSIZE; +#else +extern int bpf_bufsize; +#endif /* BPFDRV */ + +/* + * bpf_iflist is the list of interfaces; each corresponds to an ifnet + * bpf_dtab holds the descriptors, indexed by minor device # + */ +#ifdef BPFDRV +extern struct bpf_if *bpf_iflist; +extern struct bpf_d *bpf_dtab; +extern int nbpfilter; +#else +struct bpf_if *bpf_iflist; +struct bpf_d bpf_dtab[NBPFILTER]; +int nbpfilter = NBPFILTER; +#endif + +#ifdef BPFDRV +#if BSD >= 199207 +/* + * bpfilterattach() is called at boot time in new systems. We do + * nothing here since old systems will not call this. + */ +/* ARGSUSED */ +void +bpfilterattach(n) + int n; +{ +} +#endif +#endif /* BPFDRV */ + +static int bpf_allocbufs __P((struct bpf_d *)); +static int bpf_allocbufs __P((struct bpf_d *)); +static void bpf_freed __P((struct bpf_d *)); +static void bpf_freed __P((struct bpf_d *)); +static void bpf_ifname __P((struct ifnet *, struct ifreq *)); +static void bpf_ifname __P((struct ifnet *, struct ifreq *)); +static void bpf_mcopy __P((const void *, void *, u_int)); +static int bpf_movein __P((struct uio *, int, + struct mbuf **, struct sockaddr *, int *)); +static int bpf_setif __P((struct bpf_d *, struct ifreq *)); +static int bpf_setif __P((struct bpf_d *, struct ifreq *)); +static __inline void + bpf_wakeup __P((struct bpf_d *)); +static void catchpacket __P((struct bpf_d *, u_char *, u_int, + u_int, void (*)(const void *, void *, u_int))); +static void reset_d __P((struct bpf_d *)); + +#ifdef BPFDRV +static int +bpf_movein(uio, linktype, mp, sockp, datlen) + register struct uio *uio; + int linktype, *datlen; + register struct mbuf **mp; + register struct sockaddr *sockp; +{ + struct mbuf *m; + int error; + int len; + int hlen; + + /* + * Build a sockaddr based on the data link layer type. + * We do this at this level because the ethernet header + * is copied directly into the data field of the sockaddr. + * In the case of SLIP, there is no header and the packet + * is forwarded as is. + * Also, we are careful to leave room at the front of the mbuf + * for the link level header. + */ + switch (linktype) { + + case DLT_SLIP: + sockp->sa_family = AF_INET; + hlen = 0; + break; + + case DLT_PPP: + sockp->sa_family = AF_UNSPEC; + hlen = 0; + break; + + case DLT_EN10MB: + sockp->sa_family = AF_UNSPEC; + /* XXX Would MAXLINKHDR be better? */ + hlen = sizeof(struct ether_header); + break; + + case DLT_FDDI: + sockp->sa_family = AF_UNSPEC; + /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ + hlen = 24; + break; + + case DLT_NULL: + sockp->sa_family = AF_UNSPEC; + hlen = 0; + break; + + default: + return (EIO); + } + + len = uio->uio_resid; + *datlen = len - hlen; + if ((unsigned)len > MCLBYTES) + return (EIO); + + MGETHDR(m, M_WAIT, MT_DATA); + if (m == 0) + return (ENOBUFS); + if (len > MHLEN) { +#if BSD >= 199103 + MCLGET(m, M_WAIT); + if ((m->m_flags & M_EXT) == 0) { +#else + MCLGET(m); + if (m->m_len != MCLBYTES) { +#endif + error = ENOBUFS; + goto bad; + } + } + m->m_len = len; + m->m_pkthdr.len = len; + *mp = m; + /* + * Make room for link header. + */ + if (hlen != 0) { + m->m_len -= hlen; + m->m_pkthdr.len -= hlen; +#if BSD >= 199103 + m->m_data += hlen; /* XXX */ +#else + m->m_off += hlen; +#endif + error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); + if (error) + goto bad; + } + error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); + if (!error) + return (0); + bad: + m_freem(m); + return (error); +} + +/* + * Attach file to the bpf interface, i.e. make d listen on bp. + * Must be called at splimp. + */ +static void +bpf_attachd(d, bp) + struct bpf_d *d; + struct bpf_if *bp; +{ + /* + * Point d at bp, and add d to the interface's list of listeners. + * Finally, point the driver's bpf cookie at the interface so + * it will divert packets to bpf. + */ + d->bd_bif = bp; + d->bd_next = bp->bif_dlist; + bp->bif_dlist = d; + +/* *bp->bif_driverp = bp; */ +} + +/* + * Detach a file from its interface. + */ +static void +bpf_detachd(d) + struct bpf_d *d; +{ + struct bpf_d **p; + struct bpf_if *bp; + + bp = d->bd_bif; + /* + * Check if this descriptor had requested promiscuous mode. + * If so, turn it off. + */ + if (d->bd_promisc) { + d->bd_promisc = 0; + if (ifpromisc(bp->bif_ifp, 0)) + /* + * Something is really wrong if we were able to put + * the driver into promiscuous mode, but can't + * take it out. + */ + panic("bpf: ifpromisc failed"); + } + /* Remove d from the interface's descriptor list. */ + p = &bp->bif_dlist; + while (*p != d) { + p = &(*p)->bd_next; + if (*p == 0) + panic("bpf_detachd: descriptor not in list"); + } + *p = (*p)->bd_next; + if (bp->bif_dlist == 0) + /* + * Let the driver know that there are no more listeners. + */ + /* *d->bd_bif->bif_driverp = 0 */; + d->bd_bif = 0; +} +#endif /* BPFDRV */ + +#ifdef BPFDRV +/* + * Open ethernet device. Returns ENXIO for illegal minor device number, + * EBUSY if file is open by another process. + */ +/* ARGSUSED */ +int +bpfopen(dev, flag) + dev_t dev; + int flag; +{ + register struct bpf_d *d; + + if (minor(dev) >= nbpfilter) + return (ENXIO); + /* + * Each minor can be opened by only one process. If the requested + * minor is in use, return EBUSY. + */ + d = &bpf_dtab[minor(dev)]; + if (!D_ISFREE(d)) + return (EBUSY); + + /* Mark "free" and do most initialization. */ + bzero((char *)d, sizeof(*d)); + d->bd_bufsize = bpf_bufsize; + + return (0); +} + +/* + * Close the descriptor by detaching it from its interface, + * deallocating its buffers, and marking it free. + */ +/* ARGSUSED */ +int +bpfclose(dev, flag) + dev_t dev; + int flag; +{ + register struct bpf_d *d = &bpf_dtab[minor(dev)]; + register int s; + + s = splimp(); + if (d->bd_bif) + bpf_detachd(d); + splx(s); + bpf_freed(d); + + return (0); +} + +/* + * Support for SunOS, which does not have tsleep. + */ +#if BSD < 199103 +static +bpf_timeout(arg) + caddr_t arg; +{ + struct bpf_d *d = (struct bpf_d *)arg; + d->bd_timedout = 1; + wakeup(arg); +} + +#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) + +int +bpf_sleep(d) + register struct bpf_d *d; +{ + register int rto = d->bd_rtout; + register int st; + + if (rto != 0) { + d->bd_timedout = 0; + timeout(bpf_timeout, (caddr_t)d, rto); + } + st = sleep((caddr_t)d, PRINET|PCATCH); + if (rto != 0) { + if (d->bd_timedout == 0) + untimeout(bpf_timeout, (caddr_t)d); + else if (st == 0) + return EWOULDBLOCK; + } + return (st != 0) ? EINTR : 0; +} +#else +#define BPF_SLEEP tsleep +#endif + +/* + * Rotate the packet buffers in descriptor d. Move the store buffer + * into the hold slot, and the free buffer into the store slot. + * Zero the length of the new store buffer. + */ +#define ROTATE_BUFFERS(d) \ + (d)->bd_hbuf = (d)->bd_sbuf; \ + (d)->bd_hlen = (d)->bd_slen; \ + (d)->bd_sbuf = (d)->bd_fbuf; \ + (d)->bd_slen = 0; \ + (d)->bd_fbuf = 0; +/* + * bpfread - read next chunk of packets from buffers + */ +int +bpfread(dev, uio) + dev_t dev; + register struct uio *uio; +{ + register struct bpf_d *d = &bpf_dtab[minor(dev)]; + int error; + int s; + + /* + * Restrict application to use a buffer the same size as + * as kernel buffers. + */ + if (uio->uio_resid != d->bd_bufsize) + return (EINVAL); + + s = splimp(); + /* + * If the hold buffer is empty, then do a timed sleep, which + * ends when the timeout expires or when enough packets + * have arrived to fill the store buffer. + */ + while (d->bd_hbuf == 0) { + if (d->bd_immediate && d->bd_slen != 0) { + /* + * A packet(s) either arrived since the previous + * read or arrived while we were asleep. + * Rotate the buffers and return what's here. + */ + ROTATE_BUFFERS(d); + break; + } + error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", + d->bd_rtout); + if (error == EINTR || error == ERESTART) { + splx(s); + return (error); + } + if (error == EWOULDBLOCK) { + /* + * On a timeout, return what's in the buffer, + * which may be nothing. If there is something + * in the store buffer, we can rotate the buffers. + */ + if (d->bd_hbuf) + /* + * We filled up the buffer in between + * getting the timeout and arriving + * here, so we don't need to rotate. + */ + break; + + if (d->bd_slen == 0) { + splx(s); + return (0); + } + ROTATE_BUFFERS(d); + break; + } + } + /* + * At this point, we know we have something in the hold slot. + */ + splx(s); + + /* + * Move data from hold buffer into user space. + * We know the entire buffer is transferred since + * we checked above that the read buffer is bpf_bufsize bytes. + */ + error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); + + s = splimp(); + d->bd_fbuf = d->bd_hbuf; + d->bd_hbuf = 0; + d->bd_hlen = 0; + splx(s); + + return (error); +} + + +/* + * If there are processes sleeping on this descriptor, wake them up. + */ +static __inline void +bpf_wakeup(d) + register struct bpf_d *d; +{ + wakeup((caddr_t)d); + selwakeup(&d->bd_sel); + d->bd_sel.si_thread = 0; +} + +int +bpfwrite(dev, uio) + dev_t dev; + struct uio *uio; +{ + register struct bpf_d *d = &bpf_dtab[minor(dev)]; + struct ifnet *ifp; + struct mbuf *m; + int error, s; + static struct sockaddr dst; + int datlen; + + if (d->bd_bif == 0) + return (ENXIO); + + ifp = d->bd_bif->bif_ifp; + + if (uio->uio_resid == 0) + return (0); + + error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); + if (error) + return (error); + + if (datlen > ifp->if_mtu) + return (EMSGSIZE); + + s = splnet(); +#if BSD >= 199103 + error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); +#else + error = (*ifp->if_output)(ifp, m, &dst); +#endif + splx(s); + /* + * The driver frees the mbuf. + */ + return (error); +} + +/* + * Reset a descriptor by flushing its packet buffer and clearing the + * receive and drop counts. Should be called at splimp. + */ +static void +reset_d(d) + struct bpf_d *d; +{ + if (d->bd_hbuf) { + /* Free the hold buffer. */ + d->bd_fbuf = d->bd_hbuf; + d->bd_hbuf = 0; + } + d->bd_slen = 0; + d->bd_hlen = 0; + d->bd_rcount = 0; + d->bd_dcount = 0; +} + +/* + * FIONREAD Check for read packet available. + * SIOCGIFADDR Get interface address - convenient hook to driver. + * BIOCGBLEN Get buffer len [for read()]. + * BIOCSETF Set ethernet read filter. + * BIOCFLUSH Flush read packet buffer. + * BIOCPROMISC Put interface into promiscuous mode. + * BIOCGDLT Get link layer type. + * BIOCGETIF Get interface name. + * BIOCSETIF Set interface. + * BIOCSRTIMEOUT Set read timeout. + * BIOCGRTIMEOUT Get read timeout. + * BIOCGSTATS Get packet stats. + * BIOCIMMEDIATE Set immediate mode. + * BIOCVERSION Get filter language version. + */ +/* ARGSUSED */ +int +bpfioctl(dev, cmd, addr, flag) + dev_t dev; + u_long cmd; + caddr_t addr; + int flag; +{ + register struct bpf_d *d = &bpf_dtab[minor(dev)]; + int s, error = 0; + + switch (cmd) { + + default: + error = EINVAL; + break; + + /* + * Check for read packet available. + */ + case FIONREAD: + { + int n; + + s = splimp(); + n = d->bd_slen; + if (d->bd_hbuf) + n += d->bd_hlen; + splx(s); + + *(int *)addr = n; + break; + } + + case SIOCGIFADDR: + { + struct ifnet *ifp; + + if (d->bd_bif == 0) + error = EINVAL; + else { + ifp = d->bd_bif->bif_ifp; + error = (*ifp->if_ioctl)(ifp, cmd, addr); + } + break; + } + + /* + * Get buffer len [for read()]. + */ + case BIOCGBLEN: + *(u_int *)addr = d->bd_bufsize; + break; + + /* + * Set buffer length. + */ + case BIOCSBLEN: +#if BSD < 199103 + error = EINVAL; +#else + if (d->bd_bif != 0) + error = EINVAL; + else { + register u_int size = *(u_int *)addr; + + if (size > BPF_MAXBUFSIZE) + *(u_int *)addr = size = BPF_MAXBUFSIZE; + else if (size < BPF_MINBUFSIZE) + *(u_int *)addr = size = BPF_MINBUFSIZE; + d->bd_bufsize = size; + } +#endif + break; + + /* + * Set link layer read filter. + */ + case BIOCSETF: + error = bpf_setf(d, (struct bpf_program *)addr); + break; + + /* + * Flush read packet buffer. + */ + case BIOCFLUSH: + s = splimp(); + reset_d(d); + splx(s); + break; + + /* + * Put interface into promiscuous mode. + */ + case BIOCPROMISC: + if (d->bd_bif == 0) { + /* + * No interface attached yet. + */ + error = EINVAL; + break; + } + s = splimp(); + if (d->bd_promisc == 0) { + error = ifpromisc(d->bd_bif->bif_ifp, 1); + if (error == 0) + d->bd_promisc = 1; + } + splx(s); + break; + + /* + * Get device parameters. + */ + case BIOCGDLT: + if (d->bd_bif == 0) + error = EINVAL; + else + *(u_int *)addr = d->bd_bif->bif_dlt; + break; + + /* + * Set interface name. + */ + case BIOCGETIF: + if (d->bd_bif == 0) + error = EINVAL; + else + bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); + break; + + /* + * Set interface. + */ + case BIOCSETIF: + error = bpf_setif(d, (struct ifreq *)addr); + break; + + /* + * Set read timeout. + */ + case BIOCSRTIMEOUT: + { + struct timeval *tv = (struct timeval *)addr; + u_long msec; + + /* Compute number of milliseconds. */ + msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; + /* Scale milliseconds to ticks. Assume hard + clock has millisecond or greater resolution + (i.e. tick >= 1000). For 10ms hardclock, + tick/1000 = 10, so rtout<-msec/10. */ + d->bd_rtout = msec / (tick / 1000); + break; + } + + /* + * Get read timeout. + */ + case BIOCGRTIMEOUT: + { + struct timeval *tv = (struct timeval *)addr; + u_long msec = d->bd_rtout; + + msec *= tick / 1000; + tv->tv_sec = msec / 1000; + tv->tv_usec = msec % 1000; + break; + } + + /* + * Get packet stats. + */ + case BIOCGSTATS: + { + struct bpf_stat *bs = (struct bpf_stat *)addr; + + bs->bs_recv = d->bd_rcount; + bs->bs_drop = d->bd_dcount; + break; + } + + /* + * Set immediate mode. + */ + case BIOCIMMEDIATE: + d->bd_immediate = *(u_int *)addr; + break; + + case BIOCVERSION: + { + struct bpf_version *bv = (struct bpf_version *)addr; + + bv->bv_major = BPF_MAJOR_VERSION; + bv->bv_minor = BPF_MINOR_VERSION; + break; + } + } + return (error); +} + +/* + * Set d's packet filter program to fp. If this file already has a filter, + * free it and replace it. Returns EINVAL for bogus requests. + */ +int +bpf_setf(d, fp) + struct bpf_d *d; + struct bpf_program *fp; +{ + struct bpf_insn *fcode, *old; + u_int flen, size; + int s; + + old = d->bd_filter; + if (fp->bf_insns == 0) { + if (fp->bf_len != 0) + return (EINVAL); + s = splimp(); + d->bd_filter = 0; + reset_d(d); + splx(s); + if (old != 0) + FREE((caddr_t)old, M_DEVBUF); + return (0); + } + flen = fp->bf_len; + if (flen > BPF_MAXINSNS) + return (EINVAL); + + size = flen * sizeof(*fp->bf_insns); + MALLOC(fcode, struct bpf_insn *, size, M_DEVBUF, M_WAITOK); + if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && + bpf_validate(fcode, (int)flen)) { + s = splimp(); + d->bd_filter = fcode; + reset_d(d); + splx(s); + if (old != 0) + FREE((caddr_t)old, M_DEVBUF); + + return (0); + } + FREE((caddr_t)fcode, M_DEVBUF); + return (EINVAL); +} + +/* + * Detach a file from its current interface (if attached at all) and attach + * to the interface indicated by the name stored in ifr. + * Return an errno or 0. + */ +static int +bpf_setif(d, ifr) + struct bpf_d *d; + struct ifreq *ifr; +{ + struct bpf_if *bp; + char *cp; + int unit, s, error; + + /* + * Separate string into name part and unit number. Put a null + * byte at the end of the name part, and compute the number. + * If the a unit number is unspecified, the default is 0, + * as initialized above. XXX This should be common code. + */ + unit = 0; + cp = ifr->ifr_name; + cp[sizeof(ifr->ifr_name) - 1] = '\0'; + while (*cp++) { + if (*cp >= '0' && *cp <= '9') { + unit = *cp - '0'; + *cp++ = '\0'; + while (*cp) + unit = 10 * unit + *cp++ - '0'; + break; + } + } + /* + * Look through attached interfaces for the named one. + */ + for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { + struct ifnet *ifp = bp->bif_ifp; + + if (ifp == 0 || unit != ifp->if_unit + || strcmp(ifp->if_name, ifr->ifr_name) != 0) + continue; + /* + * We found the requested interface. + * If it's not up, return an error. + * Allocate the packet buffers if we need to. + * If we're already attached to requested interface, + * just flush the buffer. + */ + if ((ifp->if_flags & IFF_UP) == 0) + return (ENETDOWN); + + if (d->bd_sbuf == 0) { + error = bpf_allocbufs(d); + if (error != 0) + return (error); + } + s = splimp(); + if (bp != d->bd_bif) { + if (d->bd_bif) + /* + * Detach if attached to something else. + */ + bpf_detachd(d); + + bpf_attachd(d, bp); + } + reset_d(d); + splx(s); + return (0); + } + /* Not found. */ + return (ENXIO); +} + +/* + * Convert an interface name plus unit number of an ifp to a single + * name which is returned in the ifr. + */ +static void +bpf_ifname(ifp, ifr) + struct ifnet *ifp; + struct ifreq *ifr; +{ + char *s = ifp->if_name; + char *d = ifr->ifr_name; + + while ((*d++ = *s++) != 0) + continue; + /* XXX Assume that unit number is less than 10. */ + *d++ = ifp->if_unit + '0'; + *d = '\0'; +} + +/* + * The new select interface passes down the proc pointer; the old select + * stubs had to grab it out of the user struct. This glue allows either case. + */ +#if BSD < 199103 +int +bpfselect(dev, rw) + register dev_t dev; + int rw; +{ + return (bpf_select(dev, rw, u.u_procp)); +} +#endif + +/* + * Support for select() system call + * + * Return true iff the specific operation will not block indefinitely. + * Otherwise, return false but make a note that a selwakeup() must be done. + */ +int +bpf_select(dev, rw, p) + register dev_t dev; + int rw; + struct proc *p; +{ + register struct bpf_d *d; + register int s; + + if (rw != FREAD) + return (0); + /* + * An imitation of the FIONREAD ioctl code. + */ + d = &bpf_dtab[minor(dev)]; + + s = splimp(); + if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { + /* + * There is data waiting. + */ + splx(s); + return (1); + } +#if BSD >= 199103 + selrecord(p, &d->bd_sel); +#else + /* + * No data ready. If there's already a select() waiting on this + * minor device then this is a collision. This shouldn't happen + * because minors really should not be shared, but if a process + * forks while one of these is open, it is possible that both + * processes could select on the same descriptor. + */ + if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) + d->bd_selcoll = 1; + else + d->bd_selproc = p; +#endif + splx(s); + return (0); +} +#endif /* BPFDRV */ + +/* + * Incoming linkage from device drivers. Process the packet pkt, of length + * pktlen, which is stored in a contiguous buffer. The packet is parsed + * by each process' filter, and if accepted, stashed into the corresponding + * buffer. + */ +#ifdef BPFDRV +void +bpf_tap(arg, pkt, pktlen) + struct ifnet *ifp; + register u_char *pkt; + register u_int pktlen; +{ + struct bpf_if *bp; + register struct bpf_d *d; + register u_int slen; + /* + * Note that the ipl does not have to be raised at this point. + * The only problem that could arise here is that if two different + * interfaces shared any data. This is not the case. + */ + bp = (struct bpf_if *)arg; + for (d = bp->bif_dlist; d != 0; d = d->bd_next) { + ++d->bd_rcount; + slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); + if (slen != 0) + catchpacket(d, pkt, pktlen, slen, bcopy); + } +} + +/* + * Copy data from an mbuf chain into a buffer. This code is derived + * from m_copydata in sys/uipc_mbuf.c. + */ +static void +bpf_mcopy(src_arg, dst_arg, len) + const void *src_arg; + void *dst_arg; + register u_int len; +{ + register const struct mbuf *m; + register u_int count; + u_char *dst; + + m = src_arg; + dst = dst_arg; + while (len > 0) { + if (m == 0) + panic("bpf_mcopy"); + count = min(m->m_len, len); + bcopy(mtod(m, caddr_t), (caddr_t)dst, count); + m = m->m_next; + dst += count; + len -= count; + } +} +#endif /* BPFDRV */ + +#ifdef BPFDRV +/* + * Incoming linkage from device drivers, when packet is in an mbuf chain. + */ +void +bpf_mtap(arg, m) + caddr_t arg; + struct mbuf *m; +{ + struct bpf_if *bp = (struct bpf_if *)arg; + struct bpf_d *d; + u_int pktlen, slen; + struct mbuf *m0; + + pktlen = 0; + for (m0 = m; m0 != 0; m0 = m0->m_next) + pktlen += m0->m_len; + + for (d = bp->bif_dlist; d != 0; d = d->bd_next) { + ++d->bd_rcount; + slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); + if (slen != 0) + catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); + } +} +#endif /* BPFDRV */ + +#ifdef BPFDRV +/* + * Move the packet data from interface memory (pkt) into the + * store buffer. Return 1 if it's time to wakeup a listener (buffer full), + * otherwise 0. "copy" is the routine called to do the actual data + * transfer. bcopy is passed in to copy contiguous chunks, while + * bpf_mcopy is passed in to copy mbuf chains. In the latter case, + * pkt is really an mbuf. + */ +static void +catchpacket(d, pkt, pktlen, snaplen, cpfn) + register struct bpf_d *d; + register u_char *pkt; + register u_int pktlen, snaplen; + register void (*cpfn) __P((const void *, void *, u_int)); +{ + register struct bpf_hdr *hp; + register int totlen, curlen; + register int hdrlen = d->bd_bif->bif_hdrlen; + /* + * Figure out how many bytes to move. If the packet is + * greater or equal to the snapshot length, transfer that + * much. Otherwise, transfer the whole packet (unless + * we hit the buffer size limit). + */ + totlen = hdrlen + min(snaplen, pktlen); + if (totlen > d->bd_bufsize) + totlen = d->bd_bufsize; + + /* + * Round up the end of the previous packet to the next longword. + */ + curlen = BPF_WORDALIGN(d->bd_slen); + if (curlen + totlen > d->bd_bufsize) { + /* + * This packet will overflow the storage buffer. + * Rotate the buffers if we can, then wakeup any + * pending reads. + */ + if (d->bd_fbuf == 0) { + /* + * We haven't completed the previous read yet, + * so drop the packet. + */ + ++d->bd_dcount; + return; + } + ROTATE_BUFFERS(d); + bpf_wakeup(d); + curlen = 0; + } + else if (d->bd_immediate) + /* + * Immediate mode is set. A packet arrived so any + * reads should be woken up. + */ + bpf_wakeup(d); + + /* + * Append the bpf header. + */ + hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); +#if BSD >= 199103 + microtime(&hp->bh_tstamp); +#elif defined(sun) + uniqtime(&hp->bh_tstamp); +#else + hp->bh_tstamp = time; +#endif + hp->bh_datalen = pktlen; + hp->bh_hdrlen = hdrlen; + /* + * Copy the packet data into the store buffer and update its length. + */ + (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); + d->bd_slen = curlen + totlen; +} + +/* + * Initialize all nonzero fields of a descriptor. + */ +static int +bpf_allocbufs(d) + register struct bpf_d *d; +{ + MALLOC(d->bd_fbuf, caddr_t, d->bd_bufsize, M_DEVBUF, M_WAITOK); + if (d->bd_fbuf == 0) + return (ENOBUFS); + + MALLOC(d->bd_sbuf, caddr_t, d->bd_bufsize, M_DEVBUF, M_WAITOK); + if (d->bd_sbuf == 0) { + FREE(d->bd_fbuf, M_DEVBUF); + return (ENOBUFS); + } + d->bd_slen = 0; + d->bd_hlen = 0; + return (0); +} + +/* + * Free buffers currently in use by a descriptor. + * Called on close. + */ +static void +bpf_freed(d) + register struct bpf_d *d; +{ + /* + * We don't need to lock out interrupts since this descriptor has + * been detached from its interface and it yet hasn't been marked + * free. + */ + if (d->bd_sbuf != 0) { + FREE(d->bd_sbuf, M_DEVBUF); + if (d->bd_hbuf != 0) + FREE(d->bd_hbuf, M_DEVBUF); + if (d->bd_fbuf != 0) + FREE(d->bd_fbuf, M_DEVBUF); + } + if (d->bd_filter) + FREE((caddr_t)d->bd_filter, M_DEVBUF); + + D_MARKFREE(d); +} +#endif /* BPFDRV */ + +#ifdef BPFDRV +#if BSD >= 199103 +/* XXX This routine belongs in net/if.c. */ +/* + * Set/clear promiscuous mode on interface ifp based on the truth value + * of pswitch. The calls are reference counted so that only the first + * "on" request actually has an effect, as does the final "off" request. + * Results are undefined if the "off" and "on" requests are not matched. + */ +int +ifpromisc(ifp, pswitch) + struct ifnet *ifp; + int pswitch; +{ + struct ifreq ifr; + /* + * If the device is not configured up, we cannot put it in + * promiscuous mode. + */ + if ((ifp->if_flags & IFF_UP) == 0) + return (ENETDOWN); + + if (pswitch) { + if (ifp->if_pcount++ != 0) + return (0); + ifp->if_flags |= IFF_PROMISC; + } else { + if (--ifp->if_pcount > 0) + return (0); + ifp->if_flags &= ~IFF_PROMISC; + } + ifr.ifr_flags = ifp->if_flags; + return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr)); +} +#endif + +#if BSD < 199103 +/* + * Allocate some memory for bpf. This is temporary SunOS support, and + * is admittedly a hack. + * If resources unavaiable, return 0. + */ +static caddr_t +bpf_alloc(size, canwait) + register int size; + register int canwait; +{ + register struct mbuf *m; + + if ((unsigned)size > (MCLBYTES-8)) + return 0; + + MGET(m, canwait, MT_DATA); + if (m == 0) + return 0; + if ((unsigned)size > (MLEN-8)) { + MCLGET(m); + if (m->m_len != MCLBYTES) { + m_freem(m); + return 0; + } + } + *mtod(m, struct mbuf **) = m; + return mtod(m, caddr_t) + 8; +} +#endif +#endif /* BPFDRV */ diff --git a/iokit/Drivers/network/AppleBPF/bpf_filter.c b/iokit/Drivers/network/AppleBPF/bpf_filter.c new file mode 100644 index 000000000..09f025c25 --- /dev/null +++ b/iokit/Drivers/network/AppleBPF/bpf_filter.c @@ -0,0 +1,563 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ +/* + * Copyright (c) 1990, 1991, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93 + */ + +#define BPFDRV + +#include +#include +#include + +#if defined(sparc) || defined(hppa) +#define BPF_ALIGN +#endif + +#ifndef BPF_ALIGN +#define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p)) +#define EXTRACT_LONG(p) (ntohl(*(u_int32_t *)p)) +#else +#define EXTRACT_SHORT(p)\ + ((u_short)\ + ((u_short)*((u_char *)p+0)<<8|\ + (u_short)*((u_char *)p+1)<<0)) +#define EXTRACT_LONG(p)\ + ((u_int32_t)*((u_char *)p+0)<<24|\ + (u_int32_t)*((u_char *)p+1)<<16|\ + (u_int32_t)*((u_char *)p+2)<<8|\ + (u_int32_t)*((u_char *)p+3)<<0) +#endif + +#ifdef BPFDRV +#ifdef KERNEL +#include +#define MINDEX(m, k) \ +{ \ + register int len = m->m_len; \ + \ + while (k >= len) { \ + k -= len; \ + m = m->m_next; \ + if (m == 0) \ + return 0; \ + len = m->m_len; \ + } \ +} + +static int +m_xword(m, k, err) + register struct mbuf *m; + register int k, *err; +{ + register int len; + register u_char *cp, *np; + register struct mbuf *m0; + + len = m->m_len; + while (k >= len) { + k -= len; + m = m->m_next; + if (m == 0) + goto bad; + len = m->m_len; + } + cp = mtod(m, u_char *) + k; + if (len - k >= 4) { + *err = 0; + return EXTRACT_LONG(cp); + } + m0 = m->m_next; + if (m0 == 0 || m0->m_len + len - k < 4) + goto bad; + *err = 0; + np = mtod(m0, u_char *); + switch (len - k) { + + case 1: + return (cp[k] << 24) | (np[0] << 16) | (np[1] << 8) | np[2]; + + case 2: + return (cp[k] << 24) | (cp[k + 1] << 16) | (np[0] << 8) | + np[1]; + + default: + return (cp[k] << 24) | (cp[k + 1] << 16) | (cp[k + 2] << 8) | + np[0]; + } + bad: + *err = 1; + return 0; +} + +static int +m_xhalf(m, k, err) + register struct mbuf *m; + register int k, *err; +{ + register int len; + register u_char *cp; + register struct mbuf *m0; + + len = m->m_len; + while (k >= len) { + k -= len; + m = m->m_next; + if (m == 0) + goto bad; + len = m->m_len; + } + cp = mtod(m, u_char *) + k; + if (len - k >= 2) { + *err = 0; + return EXTRACT_SHORT(cp); + } + m0 = m->m_next; + if (m0 == 0) + goto bad; + *err = 0; + return (cp[k] << 8) | mtod(m0, u_char *)[0]; + bad: + *err = 1; + return 0; +} +#endif + +#include + +/* + * Execute the filter program starting at pc on the packet p + * wirelen is the length of the original packet + * buflen is the amount of data present + */ +u_int +bpf_filter(pc, p, wirelen, buflen) + register struct bpf_insn *pc; + register u_char *p; + u_int wirelen; + register u_int buflen; +{ + register u_int32_t A, X; + register int k; + int32_t mem[BPF_MEMWORDS]; + + if (pc == 0) + /* + * No filter means accept all. + */ + return (u_int)-1; +#ifdef lint + A = 0; + X = 0; +#endif + --pc; + while (1) { + ++pc; + switch (pc->code) { + + default: +#ifdef KERNEL + return 0; +#else + abort(); +#endif + case BPF_RET|BPF_K: + return (u_int)pc->k; + + case BPF_RET|BPF_A: + return (u_int)A; + + case BPF_LD|BPF_W|BPF_ABS: + k = pc->k; + if (k + sizeof(int32_t) > buflen) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xword((struct mbuf *)p, k, &merr); + if (merr != 0) + return 0; + continue; +#else + return 0; +#endif + } +#ifdef BPF_ALIGN + if (((int)(p + k) & 3) != 0) + A = EXTRACT_LONG(&p[k]); + else +#endif + A = ntohl(*(long *)(p + k)); + continue; + + case BPF_LD|BPF_H|BPF_ABS: + k = pc->k; + if (k + sizeof(short) > buflen) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xhalf((struct mbuf *)p, k, &merr); + continue; +#else + return 0; +#endif + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_ABS: + k = pc->k; + if (k >= buflen) { +#ifdef KERNEL + register struct mbuf *m; + + if (buflen != 0) + return 0; + m = (struct mbuf *)p; + MINDEX(m, k); + A = mtod(m, u_char *)[k]; + continue; +#else + return 0; +#endif + } + A = p[k]; + continue; + + case BPF_LD|BPF_W|BPF_LEN: + A = wirelen; + continue; + + case BPF_LDX|BPF_W|BPF_LEN: + X = wirelen; + continue; + + case BPF_LD|BPF_W|BPF_IND: + k = X + pc->k; + if (k + sizeof(int32_t) > buflen) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xword((struct mbuf *)p, k, &merr); + if (merr != 0) + return 0; + continue; +#else + return 0; +#endif + } +#ifdef BPF_ALIGN + if (((int)(p + k) & 3) != 0) + A = EXTRACT_LONG(&p[k]); + else +#endif + A = ntohl(*(long *)(p + k)); + continue; + + case BPF_LD|BPF_H|BPF_IND: + k = X + pc->k; + if (k + sizeof(short) > buflen) { +#ifdef KERNEL + int merr; + + if (buflen != 0) + return 0; + A = m_xhalf((struct mbuf *)p, k, &merr); + if (merr != 0) + return 0; + continue; +#else + return 0; +#endif + } + A = EXTRACT_SHORT(&p[k]); + continue; + + case BPF_LD|BPF_B|BPF_IND: + k = X + pc->k; + if (k >= buflen) { +#ifdef KERNEL + register struct mbuf *m; + + if (buflen != 0) + return 0; + m = (struct mbuf *)p; + MINDEX(m, k); + A = mtod(m, char *)[k]; + continue; +#else + return 0; +#endif + } + A = p[k]; + continue; + + case BPF_LDX|BPF_MSH|BPF_B: + k = pc->k; + if (k >= buflen) { +#ifdef KERNEL + register struct mbuf *m; + + if (buflen != 0) + return 0; + m = (struct mbuf *)p; + MINDEX(m, k); + X = (mtod(m, char *)[k] & 0xf) << 2; + continue; +#else + return 0; +#endif + } + X = (p[pc->k] & 0xf) << 2; + continue; + + case BPF_LD|BPF_IMM: + A = pc->k; + continue; + + case BPF_LDX|BPF_IMM: + X = pc->k; + continue; + + case BPF_LD|BPF_MEM: + A = mem[pc->k]; + continue; + + case BPF_LDX|BPF_MEM: + X = mem[pc->k]; + continue; + + case BPF_ST: + mem[pc->k] = A; + continue; + + case BPF_STX: + mem[pc->k] = X; + continue; + + case BPF_JMP|BPF_JA: + pc += pc->k; + continue; + + case BPF_JMP|BPF_JGT|BPF_K: + pc += (A > pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_K: + pc += (A >= pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_K: + pc += (A == pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_K: + pc += (A & pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGT|BPF_X: + pc += (A > X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_X: + pc += (A >= X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_X: + pc += (A == X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_X: + pc += (A & X) ? pc->jt : pc->jf; + continue; + + case BPF_ALU|BPF_ADD|BPF_X: + A += X; + continue; + + case BPF_ALU|BPF_SUB|BPF_X: + A -= X; + continue; + + case BPF_ALU|BPF_MUL|BPF_X: + A *= X; + continue; + + case BPF_ALU|BPF_DIV|BPF_X: + if (X == 0) + return 0; + A /= X; + continue; + + case BPF_ALU|BPF_AND|BPF_X: + A &= X; + continue; + + case BPF_ALU|BPF_OR|BPF_X: + A |= X; + continue; + + case BPF_ALU|BPF_LSH|BPF_X: + A <<= X; + continue; + + case BPF_ALU|BPF_RSH|BPF_X: + A >>= X; + continue; + + case BPF_ALU|BPF_ADD|BPF_K: + A += pc->k; + continue; + + case BPF_ALU|BPF_SUB|BPF_K: + A -= pc->k; + continue; + + case BPF_ALU|BPF_MUL|BPF_K: + A *= pc->k; + continue; + + case BPF_ALU|BPF_DIV|BPF_K: + A /= pc->k; + continue; + + case BPF_ALU|BPF_AND|BPF_K: + A &= pc->k; + continue; + + case BPF_ALU|BPF_OR|BPF_K: + A |= pc->k; + continue; + + case BPF_ALU|BPF_LSH|BPF_K: + A <<= pc->k; + continue; + + case BPF_ALU|BPF_RSH|BPF_K: + A >>= pc->k; + continue; + + case BPF_ALU|BPF_NEG: + A = -A; + continue; + + case BPF_MISC|BPF_TAX: + X = A; + continue; + + case BPF_MISC|BPF_TXA: + A = X; + continue; + } + } +} + +#ifdef KERNEL +/* + * Return true if the 'fcode' is a valid filter program. + * The constraints are that each jump be forward and to a valid + * code. The code must terminate with either an accept or reject. + * 'valid' is an array for use by the routine (it must be at least + * 'len' bytes long). + * + * The kernel needs to be able to verify an application's filter code. + * Otherwise, a bogus program could easily crash the system. + */ +int +bpf_validate(f, len) + struct bpf_insn *f; + int len; +{ + register int i; + register struct bpf_insn *p; + + for (i = 0; i < len; ++i) { + /* + * Check that that jumps are forward, and within + * the code block. + */ + p = &f[i]; + if (BPF_CLASS(p->code) == BPF_JMP) { + register int from = i + 1; + + if (BPF_OP(p->code) == BPF_JA) { + if (from + p->k >= len) + return 0; + } + else if (from + p->jt >= len || from + p->jf >= len) + return 0; + } + /* + * Check that memory operations use valid addresses. + */ + if ((BPF_CLASS(p->code) == BPF_ST || + (BPF_CLASS(p->code) == BPF_LD && + (p->code & 0xe0) == BPF_MEM)) && + (p->k >= BPF_MEMWORDS || p->k < 0)) + return 0; + /* + * Check for constant division by 0. + */ + if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0) + return 0; + } + return BPF_CLASS(f[len - 1].code) == BPF_RET; +} +#endif +#endif /* BPFDRV */ diff --git a/iokit/Drivers/network/drvIntel82557/i82557.cpp b/iokit/Drivers/network/drvIntel82557/i82557.cpp new file mode 100644 index 000000000..152f1a884 --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557.cpp @@ -0,0 +1,820 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * i82557.cpp + * + * HISTORY + * + * 22-Jan-96 Dieter Siegmund (dieter) at NeXT + * Created. + * + * 03-May-96 Dieter Siegmund (dieter) at NeXT + * Added a real ISR to improve performance. + * + * 10-June-96 Dieter Siegmund (dieter) at NeXT + * Added support for Splash 3 (10 Base-T only card). + * + * 18-June-96 Dieter Siegmund (dieter) at NeXT + * Keep the transmit queue draining by interrupting every + * N / 2 transmits (where N is the size of the hardware queue). + * + * 15-Dec-97 Joe Liu (jliu) at Apple + * Updated PHY programming to be 82558 aware. + * Misc changes to conform to new 82558 register flags. + * Changed RNR interrupt handler to restart RU instead of a reset. + * Interrupt handler now does a thread_call_func() to do most of its work. + * Interrupts are disabled until the thread callout finishes its work. + * Increased the size of TX/RX rings. + * buffer object removed, we use cluster mbufs to back up the receive ring. + * + * 29-May-98 Joe Liu (jliu) at Apple + * Updated _setupPhy method to take advantage of parallel detection whenever + * possible in order to detect the proper link speed. + * + * 17-Aug-98 Joe Liu (jliu) at Apple + * Re-enabled the setting of txready_sel PHY (PCS) bit for DP83840. + * Simplified interrupt handling, resulting in RCV performance improvements. + * Receive packets are sent upstream via a cached function pointer. + */ + +#include "i82557.h" + +#define ONE_SECOND_TICKS 1000 +#define LOAD_STATISTICS_INTERVAL (4 * ONE_SECOND_TICKS) + +#define super IOEthernetController +OSDefineMetaClassAndStructors( Intel82557, IOEthernetController ) + +//--------------------------------------------------------------------------- +// Function: pciConfigInit +// +// Update PCI command register to enable the memory-mapped range, +// and bus-master interface. + +bool Intel82557::pciConfigInit(IOPCIDevice * provider) +{ + UInt32 reg; + + reg = provider->configRead32( kIOPCIConfigCommand ); + + reg |= ( kIOPCICommandBusMaster | + kIOPCICommandMemorySpace | + kIOPCICommandMemWrInvalidate ); + + reg &= ~kIOPCICommandIOSpace; // disable I/O space + + provider->configWrite32( kIOPCIConfigCommand, reg ); + + return true; +} + +//--------------------------------------------------------------------------- +// Function: initDriver +// +// Create and initialize driver objects before the hardware is +// enabled. +// +// Returns true on sucess, and false if initialization failed. + +bool Intel82557::initDriver(IOService * provider) +{ + currentMediumType = MEDIUM_TYPE_INVALID; + + // This driver will allocate and use an IOGatedOutputQueue. + // + transmitQueue = getOutputQueue(); + if ( transmitQueue == 0 ) return false; + + // Allocate two IOMbufLittleMemoryCursor instances. One for transmit and + // the other for receive. + // + rxMbufCursor = IOMbufLittleMemoryCursor::withSpecification(MAX_BUF_SIZE,1); + txMbufCursor = IOMbufLittleMemoryCursor::withSpecification(MAX_BUF_SIZE, + TBDS_PER_TCB); + if (!rxMbufCursor || !txMbufCursor) + return false; + + // Get a handle to our superclass' workloop. + // + IOWorkLoop * myWorkLoop = (IOWorkLoop *) getWorkLoop(); + if (!myWorkLoop) + return false; + + // Create and register an interrupt event source. The provider will + // take care of the low-level interrupt registration stuff. + // + interruptSrc = + IOInterruptEventSource::interruptEventSource(this, + (IOInterruptEventAction) &Intel82557::interruptOccurred, + provider); + + if (!interruptSrc || + (myWorkLoop->addEventSource(interruptSrc) != kIOReturnSuccess)) + return false; + + // Register a timer event source. This is used as a watchdog timer. + // + timerSrc = IOTimerEventSource::timerEventSource( this, + (IOTimerEventSource::Action) &Intel82557::timeoutOccurred ); + if (!timerSrc || + (myWorkLoop->addEventSource(timerSrc) != kIOReturnSuccess)) + return false; + + // Create a dictionary to hold IONetworkMedium objects. + // + mediumDict = OSDictionary::withCapacity(5); + if (!mediumDict) + return false; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: getDefaultSettings +// +// Get the default driver settings chosen by the user. The properties +// are all stored in our property table (an OSDictionary). + +bool Intel82557::getDefaultSettings() +{ + OSNumber * numObj; + OSBoolean * boolObj; + + // Check for PHY address override. + // + phyAddr = PHY_ADDRESS_DEFAULT; + numObj = OSDynamicCast( OSNumber, getProperty("PHY Address") ); + if ( numObj ) + { + phyAddr = numObj->unsigned32BitValue(); + } + + // Check for Verbose flag. + // + verbose = false; + boolObj = OSDynamicCast( OSBoolean, getProperty("Verbose") ); + if ( boolObj && boolObj->isTrue() ) + { + IOLog("%s: verbose mode enabled\n", getName()); + verbose = true; + } + + // Check for Flow-Control enable flag. + // + flowControl = false; + boolObj = OSDynamicCast( OSBoolean, getProperty("Flow Control") ); + if ( boolObj && boolObj->isTrue() ) + { + IOLog("%s: 802.3x flow control enabled\n", getName()); + flowControl = true; + } + + return true; +} + +//--------------------------------------------------------------------------- +// Function: start +// +// Hardware was detected and initialized, start the driver. + +bool Intel82557::start( IOService * provider ) +{ + bool ret = false; + + do { + // Start our superclass first. + + if ( super::start(provider) == false ) + break; + + // Cache our provider to an instance variable. + + pciNub = OSDynamicCast(IOPCIDevice, provider); + if ( pciNub == 0 ) break; + + // Retain provider, released in free(). + + pciNub->retain(); + + // Open our provider. + + if ( pciNub->open(this) == false ) break; + + // Initialize the driver's event sources and other support objects. + + if ( initDriver(provider) == false ) break; + + // Get the virtual address mapping of CSR registers located at + // Base Address Range 0 (0x10). The size of this range is 4K. + + csrMap = pciNub->mapDeviceMemoryWithRegister( kIOPCIConfigBaseAddress0 ); + if ( csrMap == 0 ) break; + + CSR_p = (CSR_t *) csrMap->getVirtualAddress(); + + // Setup our PCI config space. + + if ( pciConfigInit(pciNub) == false ) break; + + // Create the EEPROM object. + + eeprom = i82557eeprom::withAddress(&CSR_p->eepromControl); + if ( eeprom == 0 ) + { + IOLog("%s: couldn't allocate eeprom object", getName()); + break; + } + + // Get default driver settings (stored in property table). + + if ( getDefaultSettings() == false ) break; + + if ( verbose ) eeprom->dumpContents(); + + // Execute one-time initialization code. + + if ( coldInit() == false ) + { + IOLog("%s: coldInit failed\n", getName()); + break; + } + + if ( hwInit() == false ) + { + IOLog("%s: hwInit failed\n", getName()); + break; + } + + // Publish our media capabilities. + + _phyPublishMedia(); + if ( publishMediumDictionary(mediumDict) == false ) + { + IOLog("%s: publishMediumDictionary failed\n", getName()); + break; + } + +#if 0 + // Announce the basic hardware configuration info. + IOLog("%s: Memory 0x%lx irq %d\n", getName(), + csrMap->getPhysicalAddress(), 0); +#endif + + ret = true; + } + while ( false ); + + // Close our provider, it will be re-opened on demand when + // our enable() is called. + + if ( pciNub ) pciNub->close(this); + + do { + if ( ret == false ) break; + + ret = false; + + // Allocate and attach an IOEthernetInterface instance to this driver + // object. + + if ( attachInterface((IONetworkInterface **) &netif, false) == false ) + break; + + // Attach a kernel debugger client. This is not an essential service, + // and the return is not checked. + + attachDebuggerClient(&debugger); + + // Start matching for clients of IONetworkInterface. + + netif->registerService(); + + ret = true; + } + while ( false ); + + return ret; +} + +//--------------------------------------------------------------------------- +// Function: stop +// +// Stop all activities and prepare for termination. + +void Intel82557::stop(IOService * provider) +{ + super::stop(provider); +} + +//--------------------------------------------------------------------------- +// Function: createWorkLoop +// +// Override IONetworkController::createWorkLoop() method to create a workloop. + +bool Intel82557::createWorkLoop() +{ + workLoop = IOWorkLoop::workLoop(); + + return ( workLoop != 0 ); +} + +//--------------------------------------------------------------------------- +// Function: getWorkLoop +// +// Override IOService::getWorkLoop() method to return our workloop. + +IOWorkLoop * Intel82557::getWorkLoop() const +{ + return workLoop; +} + +//--------------------------------------------------------------------------- +// Function: configureInterface +// +// Configure a newly instantiated IONetworkInterface object. + +bool Intel82557::configureInterface(IONetworkInterface * netif) +{ + IONetworkData * data; + + if ( super::configureInterface(netif) == false ) + return false; + + // Get the generic network statistics structure. + + data = netif->getParameter(kIONetworkStatsKey); + if (!data || !(netStats = (IONetworkStats *) data->getBuffer())) { + return false; + } + + // Get the Ethernet statistics structure. + + data = netif->getParameter(kIOEthernetStatsKey); + if (!data || !(etherStats = (IOEthernetStats *) data->getBuffer())) { + return false; + } + + return true; +} + +//--------------------------------------------------------------------------- +// Function: free +// +// Deallocate all resources and destroy the instance. + +void Intel82557::free() +{ + if (debugger) { debugger->release(); debugger = 0; } + if (netif) { netif->release(); netif = 0; } + if (interruptSrc) { interruptSrc->release(); interruptSrc = 0; } + if (timerSrc) { timerSrc->release(); timerSrc = 0; } + if (rxMbufCursor) { rxMbufCursor->release(); rxMbufCursor = 0; } + if (txMbufCursor) { txMbufCursor->release(); txMbufCursor = 0; } + if (csrMap) { csrMap->release(); csrMap = 0; } + if (eeprom) { eeprom->release(); eeprom = 0; } + if (mediumDict) { mediumDict->release(); mediumDict = 0; } + if ( pciNub ) { pciNub->release(); pciNub = 0; } + if ( workLoop ) { workLoop->release(); workLoop = 0; } + + _freeMemPage( &shared ); + _freeMemPage( &txRing ); + _freeMemPage( &rxRing ); + + super::free(); // pass it to our superclass +} + +//--------------------------------------------------------------------------- +// Function: enableAdapter +// +// Enables the adapter & driver to the given level of support. + +bool Intel82557::enableAdapter(UInt32 level) +{ + bool ret = false; + +// IOLog("%s::%s enabling level %ld\n", getName(), __FUNCTION__, level); + + switch (level) { + case kActivationLevel1: + + // Open provider. + // + if ( ( pciNub == 0 ) || ( pciNub->open(this) == false ) ) + { + break; + } + + if (!_initRingBuffers()) + break; + + if (!_startReceive()) { + _clearRingBuffers(); + break; + } + + // Set current medium. + // + if (setMedium(getCurrentMedium()) != kIOReturnSuccess) + IOLog("%s: setMedium error\n", getName()); + + // Start the watchdog timer. + // + timerSrc->setTimeoutMS(LOAD_STATISTICS_INTERVAL); + + // Enable interrupt event sources and hardware interrupts. + // + if (getWorkLoop()) + getWorkLoop()->enableAllInterrupts(); + enableAdapterInterrupts(); + + // Force PHY to report link status. + // + _phyReportLinkStatus(true); + + ret = true; + break; + + case kActivationLevel2: + // Issue a dump statistics command. + // + _dumpStatistics(); + + // Start our IOOutputQueue object. + // + transmitQueue->setCapacity(TRANSMIT_QUEUE_SIZE); + transmitQueue->start(); + + ret = true; + break; + } + + if (!ret) + IOLog("%s::%s error in level %ld\n", getName(), __FUNCTION__, level); + + return ret; +} + +//--------------------------------------------------------------------------- +// Function: disableAdapter +// +// Disables the adapter & driver to the given level of support. + +bool Intel82557::disableAdapter(UInt32 level) +{ + bool ret = false; + +// IOLog("%s::%s disabling level %ld\n", getName(), __FUNCTION__, level); + + switch (level) { + case kActivationLevel1: + // Disable interrupt handling and hardware interrupt sources. + // + disableAdapterInterrupts(); + if (getWorkLoop()) + getWorkLoop()->disableAllInterrupts(); + + // Stop the timer event source, and initialize the watchdog state. + // + timerSrc->cancelTimeout(); + packetsReceived = true; // assume we're getting packets + packetsTransmitted = false; + txCount = 0; + + // Reset the hardware engine. + // + ret = hwInit(); + + // Clear the descriptor rings after the hardware is idle. + // + _clearRingBuffers(); + + // Report link status: unknown. + // + setLinkStatus(0, 0); + + // Flush all packets held in the queue and prevent it + // from accumulating any additional packets. + // + transmitQueue->setCapacity(0); + transmitQueue->flush(); + + // Close provider. + // + if ( pciNub ) + { + pciNub->close(this); + } + + break; + + case kActivationLevel2: + // Stop the transmit queue. outputPacket() will not get called + // after this. + // + transmitQueue->stop(); + + ret = true; + break; + } + + if (!ret) + IOLog("%s::%s error in level %ld\n", getName(), __FUNCTION__, level); + + return ret; +} + +//--------------------------------------------------------------------------- +// Function: setActivationLevel +// +// Sets the adapter's activation level. +// +// kActivationLevel0 - Adapter is disabled. +// kActivationLevel1 - Adapter is brought up just enough to support debugging. +// kActivationLevel2 - Adapter is completely up. + +bool Intel82557::setActivationLevel(UInt32 level) +{ + bool ret = false; + UInt32 nextLevel; + + // IOLog("---> DESIRED LEVEL : %d\n", level); + + if (currentLevel == level) return true; + + for ( ; currentLevel > level; currentLevel--) + { + if ( (ret = disableAdapter(currentLevel)) == false ) + break; + } + + for ( nextLevel = currentLevel + 1; + currentLevel < level; + currentLevel++, nextLevel++ ) + { + if ( (ret = enableAdapter(nextLevel)) == false ) + break; + } + + // IOLog("---> PRESENT LEVEL : %d\n\n", currentLevel); + + return ret; +} + +//--------------------------------------------------------------------------- +// Function: enable +// +// A request from our interface client to enable the adapter. + +IOReturn Intel82557::enable(IONetworkInterface * /*netif*/) +{ + if ( enabledForNetif ) return kIOReturnSuccess; + + enabledForNetif = setActivationLevel( kActivationLevel2 ); + + return ( enabledForNetif ? kIOReturnSuccess : kIOReturnIOError ); +} + +//--------------------------------------------------------------------------- +// Function: disable +// +// A request from our interface client to disable the adapter. + +IOReturn Intel82557::disable(IONetworkInterface * /*netif*/) +{ + enabledForNetif = false; + + setActivationLevel( enabledForDebugger ? + kActivationLevel1 : kActivationLevel0 ); + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Function: enable +// +// A request from our debugger client to enable the adapter. + +IOReturn Intel82557::enable(IOKernelDebugger * /*debugger*/) +{ + if ( enabledForDebugger || enabledForNetif ) + { + enabledForDebugger = true; + return kIOReturnSuccess; + } + + enabledForDebugger = setActivationLevel( kActivationLevel1 ); + + return ( enabledForDebugger ? kIOReturnSuccess : kIOReturnIOError ); +} + +//--------------------------------------------------------------------------- +// Function: disable +// +// A request from our debugger client to disable the adapter. + +IOReturn Intel82557::disable(IOKernelDebugger * /*debugger*/) +{ + enabledForDebugger = false; + + if ( enabledForNetif == false ) + setActivationLevel( kActivationLevel0 ); + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Function: timeoutOccurred +// +// Periodic timer that monitors the receiver status, updates error +// and collision statistics, and update the current link status. + +void Intel82557::timeoutOccurred(IOTimerEventSource * /*timer*/) +{ + if ( (packetsReceived == false) && (packetsTransmitted == true) ) + { + /* + * The B-step of the i82557 requires that an mcsetup command be + * issued if the receiver stops receiving. This is a documented + * errata. + */ + mcSetup(0, 0, true); + } + packetsReceived = packetsTransmitted = false; + + _updateStatistics(); + + _phyReportLinkStatus(); + + timerSrc->setTimeoutMS(LOAD_STATISTICS_INTERVAL); +} + +//--------------------------------------------------------------------------- +// Function: setPromiscuousMode + +IOReturn Intel82557::setPromiscuousMode(IOEnetPromiscuousMode mode) +{ + bool rv; + promiscuousEnabled = (mode == kIOEnetPromiscuousModeOff) ? false : true; + reserveDebuggerLock(); + rv = config(); + releaseDebuggerLock(); + return (rv ? kIOReturnSuccess : kIOReturnIOError); +} + +//--------------------------------------------------------------------------- +// Function: setMulticastMode + +IOReturn Intel82557::setMulticastMode(IOEnetMulticastMode mode) +{ + multicastEnabled = (mode == kIOEnetMulticastModeOff) ? false : true; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Function: setMulticastList + +IOReturn Intel82557::setMulticastList(IOEthernetAddress * addrs, UInt32 count) +{ + IOReturn ret = kIOReturnSuccess; + + if ( mcSetup(addrs, count) == false ) + { + IOLog("%s: set multicast list failed\n", getName()); + ret = kIOReturnIOError; + } + return ret; +} + +//--------------------------------------------------------------------------- +// Function: getPacketBufferConstraints +// +// Return our driver's packet alignment requirements. + +void +Intel82557::getPacketBufferConstraints(IOPacketBufferConstraints * constraints) const +{ + constraints->alignStart = kIOPacketBufferAlign2; // even word aligned. + constraints->alignLength = kIOPacketBufferAlign1; // no restriction. +} + +//--------------------------------------------------------------------------- +// Function: getHardwareAddress +// +// Return the adapter's hardware/Ethernet address. + +IOReturn Intel82557::getHardwareAddress(IOEthernetAddress * addrs) +{ + bcopy(&myAddress, addrs, sizeof(*addrs)); + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Function: createOutputQueue +// +// Allocate an IOGatedOutputQueue instance. + +IOOutputQueue * Intel82557::createOutputQueue() +{ + return IOGatedOutputQueue::withTarget(this, getWorkLoop()); +} + +//--------------------------------------------------------------------------- +// Function: setMedium +// +// Transition the controller/PHY to use a new medium. Note that +// this function can be called my the driver, or by our client. + +IOReturn Intel82557::setMedium(const IONetworkMedium * medium) +{ + bool r; + + if ( OSDynamicCast(IONetworkMedium, medium) == 0 ) + { + // Defaults to Auto. + medium = _phyGetMediumWithType( MEDIUM_TYPE_AUTO ); + if ( medium == 0 ) return kIOReturnError; + } + +#if 0 + IOLog("%s: setMedium -> %s\n", getName(), + medium->getName()->getCStringNoCopy()); +#endif + + // Program PHY to select the desired medium. + // + r = _phySetMedium( (mediumType_t) medium->getIndex() ); + + // Update the current medium property. + // + if ( r && !setCurrentMedium(medium) ) + IOLog("%s: setCurrentMedium error\n", getName()); + + return ( r ? kIOReturnSuccess : kIOReturnIOError ); +} + +//--------------------------------------------------------------------------- +// Function: newVendorString(), newModelString() +// +// +// Report human readable hardware information strings. + +const OSString * Intel82557::newVendorString() const +{ + return OSString::withCString("Intel"); +} + +const OSString * Intel82557::newModelString() const +{ + const char * model = 0; + + assert( eeprom && eeprom->getContents() ); + + switch ( eeprom->getContents()->controllerType ) + { + case I82558_CONTROLLER_TYPE: + model = "82558"; + break; + case I82557_CONTROLLER_TYPE: + default: + model = "82557"; + break; + } + return OSString::withCString(model); +} + +//--------------------------------------------------------------------------- +// Kernel debugger entry points. +// +// KDP driven polling routines to send and transmit a frame. +// Remember, no memory allocation! Not even mbufs are safe. + +void Intel82557::sendPacket(void * pkt, UInt32 pkt_len) +{ + _sendPacket(pkt, pkt_len); +} + +void Intel82557::receivePacket(void * pkt, UInt32 * pkt_len, UInt32 timeout) +{ + _receivePacket(pkt, (UInt *) pkt_len, timeout); +} diff --git a/iokit/Drivers/network/drvIntel82557/i82557.h b/iokit/Drivers/network/drvIntel82557/i82557.h new file mode 100644 index 000000000..398d4b402 --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557.h @@ -0,0 +1,311 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. + * + * i82557.h + * + * HISTORY + * + * 4-Mar-96 Dieter Siegmund (dieter) at NeXT + * Created. + */ + +#ifndef _I82557_H +#define _I82557_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "i82557Inline.h" +#include "i82557eeprom.h" +#include "i82557PHY.h" +#include + +/* + * Macro: VPRINT + * + * Purpose: + * Dump stuff to console log if the "Verbose" key is present in + * the device description. + */ +#define VPRINT(fmt, args...) \ + if (verbose) \ + IOLog(fmt, ## args); + +#define NUM_RECEIVE_FRAMES 32 +#define NUM_TRANSMIT_FRAMES 32 + +#define TRANSMIT_INT_DELAY 8 +#define TRANSMIT_QUEUE_SIZE 256 + +#define MAX_BUF_SIZE (ETHERMAXPACKET + ETHERCRC) + +#define SPIN_TIMEOUT 50000 +#define SPIN_COUNT 1 + +/* + * Type: tcbQ_t + * + * Purpose: + * Hold transmit hardware queue variables. + */ +typedef struct { + int numTcbs; + tcb_t * activeHead_p; + tcb_t * activeTail_p; + tcb_t * freeHead_p; + int numFree; +} tcbQ_t; + +/* + * Type: overlay_t + * + * Purpose: + * Overlayable memory used during start-up. + */ +typedef union { + cbHeader_t nop; + cb_configure_t configure; + cb_iasetup_t iasetup; + port_selftest_t selftest; +} overlay_t; + +/* + * Type: pageBlock_t + * + * Purpose: + * Track a page sized memory block. + */ +typedef struct { + void * memPtr; + UInt memSize; + void * memAllocPtr; + UInt memAvail; +} pageBlock_t; + +/* + * Adapter activation levels. + */ +enum { + kActivationLevel0 = 0, + kActivationLevel1, + kActivationLevel2 +}; + +class Intel82557 : public IOEthernetController +{ + OSDeclareDefaultStructors(Intel82557) + +public: + IOPhysicalAddress memBasePhysical; + int irq; + IOEthernetAddress myAddress; + IOEthernetInterface * netif; + IOKernelDebugger * debugger; + IOPCIDevice * pciNub; + IOWorkLoop * workLoop; + + IOInterruptEventSource * interruptSrc; + IOOutputQueue * transmitQueue; + IOTimerEventSource * timerSrc; + IONetworkStats * netStats; + IOEthernetStats * etherStats; + IOMemoryMap * csrMap; + OSDictionary * mediumDict; + IONetworkMedium * mediumTable[MEDIUM_TYPE_INVALID]; + + IOMbufLittleMemoryCursor * rxMbufCursor; + IOMbufLittleMemoryCursor * txMbufCursor; + + int txCount; + UInt32 currentLevel; + bool enabledForNetif; + bool enabledForDebugger; + bool promiscuousEnabled; + bool multicastEnabled; + bool allMulticastEnabled; + bool interruptEnabled; + bool packetsReceived; + bool packetsTransmitted; + bool verbose; + bool flowControl; + mediumType_t currentMediumType; + UInt8 phyAddr; + UInt32 phyID; + UInt16 phyStatusPrev; + + /* descriptor and control block data structures */ + pageBlock_t shared; + pageBlock_t rxRing; + pageBlock_t txRing; + + CSR_t * CSR_p; + overlay_t * overlay_p; + IOPhysicalAddress overlay_paddr; + errorCounters_t * errorCounters_p; + IOPhysicalAddress errorCounters_paddr; + i82557eeprom * eeprom; + + /* transmit-related */ + tcbQ_t tcbQ; + tcb_t * tcbList_p; + int prevCUCommand; + + /* kernel debugger */ + tcb_t * KDB_tcb_p; + void * KDB_buf_p; + IOPhysicalAddress KDB_buf_paddr; + + /* receive-related */ + rfd_t * rfdList_p; + rfd_t * headRfd; + rfd_t * tailRfd; + + // -------------------------------------------------- + // IOService (or its superclass) methods. + // -------------------------------------------------- + + virtual bool start(IOService * provider); + virtual void stop(IOService * provider); + virtual void free(); + + // -------------------------------------------------- + // IONetworkController methods. + // -------------------------------------------------- + + virtual IOReturn enable(IONetworkInterface * netif); + virtual IOReturn disable(IONetworkInterface * netif); + + virtual IOReturn enable(IOKernelDebugger * debugger); + virtual IOReturn disable(IOKernelDebugger * debugger); + + virtual void sendPacket(void * pkt, UInt32 pkt_len); + virtual void receivePacket(void * pkt, UInt32 * pkt_len, UInt32 timeout); + + virtual UInt32 outputPacket(struct mbuf * m, void * param); + + virtual void getPacketBufferConstraints( + IOPacketBufferConstraints * constraints) const; + + virtual IOOutputQueue * createOutputQueue(); + + virtual const OSString * newVendorString() const; + virtual const OSString * newModelString() const; + + virtual IOReturn setMedium(const IONetworkMedium * medium); + + virtual bool configureInterface(IONetworkInterface * interface); + + virtual bool createWorkLoop(); + virtual IOWorkLoop * getWorkLoop() const; + + // -------------------------------------------------- + // IOEthernetController methods. + // -------------------------------------------------- + + virtual IOReturn getHardwareAddress(IOEthernetAddress * addr); + virtual IOReturn setPromiscuousMode(IOEnetPromiscuousMode mode); + virtual IOReturn setMulticastMode(IOEnetMulticastMode mode); + virtual IOReturn setMulticastList(IOEthernetAddress *addrs, UInt32 count); + + // -------------------------------------------------- + // Intel82557 driver specific methods. + // -------------------------------------------------- + + bool pciConfigInit(IOPCIDevice * provider); + bool initDriver(IOService * provider); + bool coldInit(); + bool enableAdapter(UInt32 level); + bool disableAdapter(UInt32 level); + bool setActivationLevel(UInt32 newLevel); + bool config(); + void disableAdapterInterrupts(); + void enableAdapterInterrupts(); + bool hwInit(); + bool iaSetup(); + bool mcSetup(IOEthernetAddress * addrs, UInt count, bool fromData = false); + bool nop(); + void sendPortCommand(port_command_t command, UInt arg); + bool getDefaultSettings(); + void issueReset(); + + bool _selfTest(); + bool _allocateMemPage(pageBlock_t * p); + void _freeMemPage(pageBlock_t * p); + void _updateStatistics(); + bool _dumpStatistics(); + bool _mdiWritePHY(UInt8 phyAddress, UInt8 regAddress, UInt16 data); + bool _mdiReadPHY(UInt8 phyAddress, UInt8 regAddress, UInt16 * data_p); + void * _memAllocFrom(pageBlock_t * p, UInt allocSize, UInt align); + bool _polledCommand(cbHeader_t * hdr_p, IOPhysicalAddress paddr); + bool _abortReceive(); + bool _startReceive(); + void _resetChip(); + + bool _initTcbQ(bool enable = false); + bool _initRfdList(bool enable = false); + bool _resetRfdList(); + bool _initRingBuffers() { + return (_initTcbQ(true) && _initRfdList(true)); } + bool _clearRingBuffers() { + return (_initTcbQ(false) && _initRfdList(false)); } + + bool _sendPacket(void * pkt, UInt len); + bool _receivePacket(void * pkt, UInt * len, UInt timeout); + + bool updateRFDFromMbuf(rfd_t * rfd_p, struct mbuf * m); + struct mbuf * updateTCBForMbuf(tcb_t * tcb_p, struct mbuf * m); + + void interruptOccurred(IOInterruptEventSource * src, int count); + bool receiveInterruptOccurred(); + void transmitInterruptOccurred(); + void timeoutOccurred(IOTimerEventSource * timer); + + // -------------------------------------------------- + // PHY methods. + // -------------------------------------------------- + + bool _phyReset(); + bool _phyWaitAutoNegotiation(); + UInt32 _phyGetID(); + bool _phySetMedium(mediumType_t medium); + bool _phyProbe(); + void _phyPublishMedia(); + bool _phyAddMediumType(UInt32 type, UInt32 speed, UInt32 code); + void _phyReportLinkStatus(bool firstPoll = false); + mediumType_t _phyGetActiveMedium(); + IONetworkMedium * _phyGetMediumWithType(UInt32 type); + mediumType_t _phyGetMediumTypeFromBits(bool rate100, + bool fullDuplex, + bool t4); +}; + +#endif /* !_I82557_H */ diff --git a/iokit/Drivers/network/drvIntel82557/i82557HW.h b/iokit/Drivers/network/drvIntel82557/i82557HW.h new file mode 100644 index 000000000..5242c223e --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557HW.h @@ -0,0 +1,608 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * i82557HW.h - Intel 82557/82558 chip-related definitions + * + * HISTORY + * Jan 22, 1996 Dieter Siegmund at NeXT (dieter@next.com) + * Created. + */ + +#ifndef _I82557HW_H +#define _I82557HW_H + +#include + +//------------------------------------------------------------------------- +// Misc definitions. +//------------------------------------------------------------------------- + +#define DWORD_ALIGNMENT 4 +#define WORD_ALIGNMENT 2 +#define PARAGRAPH_ALIGNMENT 16 +#define CACHE_ALIGNMENT 32 + +#define C_NULL (~0) + +#define PCI_CFID_INTEL82557 0x12298086 + +typedef enum { + MEDIUM_TYPE_10_HD = 0, + MEDIUM_TYPE_10_FD, + MEDIUM_TYPE_TX_HD, + MEDIUM_TYPE_TX_FD, + MEDIUM_TYPE_T4, + MEDIUM_TYPE_AUTO, + MEDIUM_TYPE_INVALID, +} mediumType_t; + +//------------------------------------------------------------------------- +// SCB status word. +// Offset 0, 16-bit, RW. +//------------------------------------------------------------------------- +typedef UInt16 scb_status_t; +#define SCB_STATUS_CX BIT(15) // command block with 'I' bit set. +#define SCB_STATUS_FR BIT(14) // RU finished receiving a frame. +#define SCB_STATUS_CNA BIT(13) // CU active to suspended/idle. +#define SCB_STATUS_RNR BIT(12) // RU no longer in ready state. +#define SCB_STATUS_MDI BIT(11) // MDI read/write cycle is done. +#define SCB_STATUS_SWI BIT(10) // software interrupt. +#define SCB_STATUS_ER BIT(9) // early receive interrupt. +#define SCB_STATUS_FCP BIT(8) // flow control pause interrupt. +#define SCB_STATUS_INT_MASK 0xff00 // mask for all interrupt bits. + +#define SCB_STATUS_CUS_SHIFT 6 +#define SCB_STATUS_CUS_MASK CSR_MASK(SCB_STATUS_CUS, 0x3) +#define SCB_CUS_IDLE 0 +#define SCB_CUS_SUSPEND 1 +#define SCB_CUS_ACTIVE 2 + +#define SCB_STATUS_RUS_SHIFT 2 +#define SCB_STATUS_RUS_MASK CSR_MASK(SCB_STATUS_RUS, 0xf) +#define SCB_RUS_IDLE 0 +#define SCB_RUS_SUSPEND 1 +#define SCB_RUS_NO_RESOURCES 2 +#define SCB_RUS_READY 4 +#define SCB_RUS_SUSPEND_NO_RBDS 9 +#define SCB_RUS_NO_RBDS 10 +#define SCB_RUS_READY_NO_RBDS 12 + +//------------------------------------------------------------------------- +// SCB interrupt control byte. +// Offset 3, 8-bit, RW. +//------------------------------------------------------------------------- +typedef UInt8 scb_interrupt_t; +#define SCB_INTERRUPT_CX BIT(7) // interrupt masks +#define SCB_INTERRUPT_FR BIT(6) +#define SCB_INTERRUPT_CNA BIT(5) +#define SCB_INTERRUPT_RNR BIT(4) +#define SCB_INTERRUPT_ER BIT(3) +#define SCB_INTERRUPT_FCP BIT(2) +#define SCB_INTERRUPT_SI BIT(1) +#define SCB_INTERRUPT_M BIT(0) + +//------------------------------------------------------------------------- +// SCB command byte. +// Offset 2, 8-bit, RW. +//------------------------------------------------------------------------- +typedef UInt8 scb_command_t; +#define SCB_COMMAND_CUC_SHIFT 4 +#define SCB_COMMAND_CUC_MASK CSR_MASK(SCB_COMMAND_CUC, 0xf) +#define SCB_CUC_NOP 0 +#define SCB_CUC_START 1 +#define SCB_CUC_RESUME 2 +#define SCB_CUC_LOAD_DUMP_ADDR 4 +#define SCB_CUC_DUMP_STAT 5 +#define SCB_CUC_LOAD_BASE 6 +#define SCB_CUC_DUMP_RESET_STAT 7 +#define SCB_CUC_STATIC_RESUME 10 + +#define SCB_COMMAND_RUC_SHIFT 0 +#define SCB_COMMAND_RUC_MASK CSR_MASK(SCB_COMMAND_RUC, 0x7) +#define SCB_RUC_NOP 0 +#define SCB_RUC_START 1 +#define SCB_RUC_RESUME 2 +#define SCB_RUC_DMA_REDIRECT 3 +#define SCB_RUC_ABORT 4 +#define SCB_RUC_LOAD_HDS 5 +#define SCB_RUC_LOAD_BASE 6 +#define SCB_RUC_RBD_RESUME 7 + +//------------------------------------------------------------------------- +// MDI control register. +// Offset 0x10, 32-bit, RW. +//------------------------------------------------------------------------- +typedef UInt32 mdi_control_t; +#define MDI_CONTROL_INT_ENABLE BIT(29) // interrupt enable. +#define MDI_CONTROL_READY BIT(28) // ready bit. +#define MDI_CONTROL_OPCODE_SHIFT 26 +#define MDI_CONTROL_OPCODE_MASK CSR_MASK(MDI_CONTROL_OPCODE, 0x3) +#define MDI_CONTROL_OP_WRITE 1 +#define MDI_CONTROL_OP_READ 2 +#define MDI_CONTROL_PHYADDR_SHIFT 21 +#define MDI_CONTROL_PHYADDR_MASK CSR_MASK(MDI_CONTROL_PHYADDR, 0x1f) +#define MDI_CONTROL_REGADDR_SHIFT 16 +#define MDI_CONTROL_REGADDR_MASK CSR_MASK(MDI_CONTROL_REGADDR, 0x1f) +#define MDI_CONTROL_DATA_SHIFT 0 +#define MDI_CONTROL_DATA_MASK CSR_MASK(MDI_CONTROL_DATA, 0xffff) + +//------------------------------------------------------------------------- +// EEPROM control register. +// Offset 0xE, 16-bit, RW. +//------------------------------------------------------------------------- +typedef UInt16 eeprom_control_t; +#define EEPROM_CONTROL_EEDO BIT(3) +#define EEPROM_CONTROL_EEDI BIT(2) +#define EEPROM_CONTROL_EECS BIT(1) +#define EEPROM_CONTROL_EESK BIT(0) + +//------------------------------------------------------------------------- +// Flow control threshold register. +// Offset 0x19, 8-bit, RW. +//------------------------------------------------------------------------- +#define FC_THRESHOLD_SHIFT 0 +#define FC_THRESHOLD_MASK CSR_MASK(FC_THRESHOLD, 0x7) +#define FC_THRESHOLD_512 0 +#define FC_THRESHOLD_1024 1 +#define FC_THRESHOLD_1280 2 +#define FC_THRESHOLD_1536 3 + +//------------------------------------------------------------------------- +// Flow control command register. +// Offset 0x20, 8-bit, RW. +//------------------------------------------------------------------------- +#define FC_XON BIT(0) +#define FC_XOFF BIT(1) +#define FC_FULL BIT(2) +#define FC_PAUSED BIT(3) +#define FC_PAUSED_LOW BIT(4) + +//------------------------------------------------------------------------- +// Generic command block definition. +//------------------------------------------------------------------------- +#define CB_NOP 0 +#define CB_IA_ADDRESS 1 +#define CB_CONFIGURE 2 +#define CB_MULTICAST 3 +#define CB_TRANSMIT 4 +#define CB_LOAD_MICROCODE 5 +#define CB_DUMP 6 +#define CB_DIAGNOSE 7 + +typedef UInt16 cb_status_t; +#define CB_STATUS_C BIT(15) // command complete. +#define CB_STATUS_OK BIT(13) // DMA OK. + +typedef UInt16 cb_command_t; +#define CB_EL BIT(15) // end of list. +#define CB_S BIT(14) // suspend bit. +#define CB_I BIT(13) // interrupt bit. +#define CB_CMD_SHIFT 0 +#define CB_CMD_MASK CSR_MASK(CB_CMD, 0x7) + +#define CB_CMD_NOP 0x0 +#define CB_CMD_IASETUP 0x1 +#define CB_CMD_CONFIGURE 0x2 +#define CB_CMD_MCSETUP 0x3 +#define CB_CMD_TRANSMIT 0x4 + +static __inline__ char * +CUCommandString(int cmd) +{ + char * s[] = { + "nop", + "iasetup", + "configure", + "mcsetup", + "transmit" + }; + return (s[cmd]); +} + +typedef struct { + volatile cb_status_t status; + volatile cb_command_t command; + IOPhysicalAddress link; +} cbHeader_t; + +//------------------------------------------------------------------------- +// Configure command. +//------------------------------------------------------------------------- +#define CB_CONFIG_BYTE_COUNT 22 + +#define CB_CB0_BYTE_COUNT_SHIFT 0 +#define CB_CB0_BYTE_COUNT_MASK CSR_MASK(CB_CB0_BYTE_COUNT, 0x3f) + +#define CB_CB1_TX_FIFO_LIMIT_SHIFT 4 +#define CB_CB1_TX_FIFO_LIMIT_MASK CSR_MASK(CB_CB1_TX_FIFO_LIMIT, 0xf) +#define CB_CB1_RX_FIFO_LIMIT_SHIFT 0 +#define CB_CB1_RX_FIFO_LIMIT_MASK CSR_MASK(CB_CB1_RX_FIFO_LIMIT, 0xf) +#define CB_CB1_TX_FIFO_0 8 // 0 bytes +#define CB_CB1_RX_FIFO_64 8 // 64 bytes + +#define CB_CB2_ADAPTIVE_IFS_SHIFT 0 +#define CB_CB2_ADAPTIVE_IFS_MASK CSR_MASK(CB_CB2_ADAPTIVE_IFS, 0xff) + +#define CB_CB3_TERM_ON_CL BIT(3) +#define CB_CB3_READ_AL_ENABLE BIT(2) +#define CB_CB3_TYPE_ENABLE BIT(1) +#define CB_CB3_MWI_ENABLE BIT(0) + +#define CB_CB4_RX_MIN_SHIFT 0 +#define CB_CB4_RX_MIN_MASK CSR_MASK(CB_CB4_RX_MIN, 0x7f) + +#define CB_CB5_DMBC_EN BIT(7) +#define CB_CB5_TX_MAX_SHIFT 0 +#define CB_CB5_TX_MAX_MASK CSR_MASK(CB_CB4_TX_MAX, 0x7f) + +#define CB_CB6_SAVE_BF BIT(7) +#define CB_CB6_DISC_OVER BIT(6) +#define CB_CB6_STD_STATS BIT(5) +#define CB_CB6_STD_TCB BIT(4) +#define CB_CB6_CI_INT BIT(3) +#define CB_CB6_TNO_INT BIT(2) +#define CB_CB6_NON_DIRECT_DMA BIT(1) +#define CB_CB6_LATE_SCB BIT(0) + +#define CB_CB7_DYNAMIC_TBD BIT(7) +#define CB_CB7_UNDERRUN_RETRY_SHIFT 1 +#define CB_CB7_UNDERRUN_RETRY_MASK CSR_MASK(CB_CB7_UNDERRUN_RETRY, 0x3) +#define CB_CB7_UNDERRUN_RETRY_1 1 +#define CB_CB7_UNDERRUN_RETRY_2 2 +#define CB_CB7_UNDERRUN_RETRY_3 3 +#define CB_CB7_DISC_SHORT_FRAMES BIT(0) + +#define CB_CB8_CSMA_EN BIT(0) + +#define CB_CB10_LOOPBACK_SHIFT 6 +#define CB_CB10_LOOPBACK_MASK CSR_MASK(CB_CB10_LOOPBACK, 0x3) +#define CB_CB10_PREAMBLE_SHIFT 4 +#define CB_CB10_PREAMBLE_MASK CSR_MASK(CB_CB10_PREAMBLE, 0x3) +#define CB_CB10_PREAMBLE_1_BYTE 0 +#define CB_CB10_PREAMBLE_3_BYTES 1 +#define CB_CB10_PREAMBLE_7_BYTES 2 +#define CB_CB10_PREAMBLE_15_BYTES 3 +#define CB_CB10_NSAI BIT(3) + +#define CB_CB11_LIN_PRIORITY_SHIFT 0 +#define CB_CB11_LIN_PRIORITY_MASK CSR_MASK(CB_CB11_PRIORITY, 0x7) + +#define CB_CB12_IFS_SHIFT 4 +#define CB_CB12_IFS_MASK CSR_MASK(CB_CB12_IFS, 0xf) +#define CB_CB12_IFS_96_BIT_TIMES 0x6 +#define CB_CB12_LIN_PRIORITY BIT(0) + +#define CB_CB13_FC_TYPE_LSB_SHIFT 0 +#define CB_CB13_FC_TYPE_LSB_MASK CSR_MASK(CB_CB13_FC_TYPE_LSB, 0xff) +#define CB_CB13_FC_TYPE_LSB_DEF 0 // 82558 compatible + +#define CB_CB14_FC_TYPE_MSB_SHIFT 0 +#define CB_CB14_FC_TYPE_MSB_MASK CSR_MASK(CB_CB14_FC_TYPE_MSB, 0xff) +#define CB_CB14_FC_TYPE_MSB_DEF 0xf2 // 82558 compatible + +#define CB_CB15_CRS_CDT BIT(7) +#define CB_CB15_BROADCAST_DISABLE BIT(1) +#define CB_CB15_PROMISCUOUS BIT(0) + +#define CB_CB16_FC_DELAY_LSB_SHIFT 0 +#define CB_CB16_FC_DELAY_LSB_MASK CSR_MASK(CB_CB16_FC_DELAY_LSB, 0xff) +#define CB_CB16_FC_DELAY_LSB_DEF 0 + +#define CB_CB17_FC_DELAY_MSB_SHIFT 0 +#define CB_CB17_FC_DELAY_MSB_MASK CSR_MASK(CB_CB17_FC_DELAY_MSB, 0xff) +#define CB_CB17_FC_DELAY_MSB_DEF 0x40 + +#define CB_CB18_LONG_RX_OK BIT(3) +#define CB_CB18_CRC_XFER BIT(2) +#define CB_CB18_PADDING BIT(1) +#define CB_CB18_STRIPPING BIT(0) + +#define CB_CB19_AUTO_FDX BIT(7) +#define CB_CB19_FORCE_FDX BIT(6) +#define CB_CB19_REJECT_FC BIT(5) +#define CB_CB19_RX_FC_RESTART BIT(4) +#define CB_CB19_RX_FC_RESTOP BIT(3) +#define CB_CB19_TX_FC BIT(2) +#define CB_CB19_MAGIC_PKT_WAKEUP BIT(1) +#define CB_CB19_ADDRESS_WAKEUP BIT(0) + +#define CB_CB20_MULTI_IA BIT(6) +#define CB_CB20_FC_ADDR_LSB_SHIFT 0 +#define CB_CB20_FC_ADDR_LSB_MASK CSR_MASK(CB_CB20_FC_ADDR_LSB, 0x1f) +#define CB_CB20_FC_ADDR_LSB_DEF 0x0f + +#define CB_CB21_MULTICAST_ALL BIT(3) + +typedef struct cb_configure { + cbHeader_t header; + UInt8 byte[24]; +} cb_configure_t; + +//------------------------------------------------------------------------- +// MC-Setup command. +//------------------------------------------------------------------------- +typedef struct cb_mcsetup { + cbHeader_t header; + UInt16 count; + IOEthernetAddress addrs[0]; +} cb_mcsetup_t; + +//------------------------------------------------------------------------- +// IA-Setup command. +//------------------------------------------------------------------------- +typedef struct cb_iasetup { + cbHeader_t header; + IOEthernetAddress addr; +} cb_iasetup_t; + +//------------------------------------------------------------------------- +// Port Commands. +// Enumerated port command values. +//------------------------------------------------------------------------- +typedef enum { + portReset_e = 0, + portSelfTest_e = 1, + portSelectiveReset_e = 2, + portDump_e = 3, +} port_command_t; + +#define PORT_ADDRESS_SHIFT 4 +#define PORT_ADDRESS_MASK CSR_MASK(PORT_FUNCTION, 0xfffffff) + +#define PORT_FUNCTION_SHIFT 0 +#define PORT_FUNCTION_MASK CSR_MASK(PORT_FUNCTION, 0xf) + +//------------------------------------------------------------------------- +// Port Self-Test +// Definition for self test area. +//------------------------------------------------------------------------- +#define PORT_SELFTEST_GENERAL BIT(12) +#define PORT_SELFTEST_DIAGNOSE BIT(5) +#define PORT_SELFTEST_REGISTER BIT(3) +#define PORT_SELFTEST_ROM BIT(2) + +typedef struct port_selftest_t { + UInt32 signature; + UInt32 results; +} port_selftest_t; + +/* + * Typedef: CSR_t + * + * Purpose: Control Status Registers block + * Communication to the chip occurs via this set of + * memory-mapped (also io-mapped, which we don't use) + * registers. + */ +typedef struct csr { + volatile scb_status_t status; + volatile scb_command_t command; + volatile scb_interrupt_t interrupt; + volatile IOPhysicalAddress pointer; + volatile UInt32 port; + volatile UInt16 flashControl; + volatile eeprom_control_t eepromControl; + volatile mdi_control_t mdiControl; + volatile UInt32 rxDMAByteCount; + volatile UInt8 earlyRxInterrupt; + volatile UInt8 flowControlThreshold; + volatile UInt8 flowControlCommand; + volatile UInt8 powerManagement; +} CSR_t; + +//------------------------------------------------------------------------- +// Structure containing error counters retrieved via: +// Dump Statistics Counters command, or +// Dump and Reset Statistics Counters command. +// +// NOTE: 82558 can return an extended set of statistics counters. +//------------------------------------------------------------------------- +typedef struct { + UInt32 tx_good_frames; + UInt32 tx_maxcol_errors; + UInt32 tx_late_collision_errors; + UInt32 tx_underrun_errors; + UInt32 tx_lost_carrier_sense_errors; + UInt32 tx_deferred; + UInt32 tx_single_collisions; + UInt32 tx_multiple_collisions; + UInt32 tx_total_collisions; + UInt32 rx_good_frames; + UInt32 rx_crc_errors; + UInt32 rx_alignment_errors; + UInt32 rx_resource_errors; + UInt32 rx_overrun_errors; + UInt32 rx_collision_detect_errors; + UInt32 rx_short_frame_errors; + UInt32 _status; +#define DUMP_STATUS 0x0 +#define DUMP_COMPLETE 0xa005 +#define DUMP_AND_RESET_COMPLETE 0xa007 +} errorCounters_t; + +//------------------------------------------------------------------------- +// RBD count dword. +// Offset 0, 32-bit, RW. +//------------------------------------------------------------------------- +typedef UInt32 rbd_count_t; +#define RBD_COUNT_EOF BIT(15) // end-of-frame bit. +#define RBD_COUNT_F BIT(14) // buffer fetch bit. +#define RBD_COUNT_SHIFT 0 +#define RBD_COUNT_MASK CSR_MASK(RBD_COUNT, 0x3fff) + +//------------------------------------------------------------------------- +// RBD size dword. +// Offset 0xC, 32-bit, RW. +//------------------------------------------------------------------------- +typedef UInt32 rbd_size_t; +#define RBD_SIZE_EL BIT(15) // EL bit. +#define RBD_SIZE_SHIFT 0 +#define RBD_SIZE_MASK CSR_MASK(RBD_SIZE, 0x3fff) + +//------------------------------------------------------------------------- +// RBD - receive buffer descriptor definition. +//------------------------------------------------------------------------- +typedef struct rbd { + volatile rbd_count_t count; + volatile IOPhysicalAddress link; + volatile IOPhysicalAddress buffer; + volatile rbd_size_t size; + + /* driver private */ + + struct rbd * _next; + IOPhysicalAddress _paddr; + struct mbuf * _mbuf; + UInt32 _pad; +} rbd_t; + +//------------------------------------------------------------------------- +// RFD status word. +// Offset 0, 16-bit, RW. +//------------------------------------------------------------------------- +typedef UInt16 rfd_status_t; +#define RFD_STATUS_C BIT(15) // complete bit. +#define RFD_STATUS_OK BIT(13) // OK bit. +#define RFD_STATUS_CRC_ERROR BIT(11) // CRC error bit. +#define RFD_STATUS_ALIGNMENT_ERROR BIT(10) // alignment error. +#define RFD_STATUS_NO_RESOURCES BIT(9) // no buffer space. +#define RFD_STATUS_DMA_OVERRUN BIT(8) // receive DMA overrun. +#define RFD_STATUS_FRAME_TOO_SHORT BIT(7) // frame too short. +#define RFD_STATUS_TYPE_FRAME BIT(5) // type/length bit. +#define RFD_STATUS_RX_ERROR BIT(4) // RX_ERR pin on PHY was set. +#define RFD_STATUS_NO_ADDR_MATCH BIT(2) // no address match. +#define RFD_STATUS_IA_MATCH BIT(1) // IA address match. +#define RFD_STATUS_COLLISION BIT(0) // receive collision. + +//------------------------------------------------------------------------- +// RFD command word. +// Offset 2, 16-bit, RW. +//------------------------------------------------------------------------- +typedef UInt16 rfd_command_t; +#define RFD_COMMAND_EL BIT(15) // EL bit. +#define RFD_COMMAND_S BIT(14) // suspend bit. +#define RFD_COMMAND_H BIT(4) // header RFD bit. +#define RFD_COMMAND_SF BIT(3) // flexible mode bit. + +//------------------------------------------------------------------------- +// RFD misc dword. +// Offset 0xC, 32-bit, RW. +//------------------------------------------------------------------------- +typedef UInt32 rfd_misc_t; +#define RFD_MISC_EOF BIT(15) // end-of-frame bit. +#define RFD_MISC_F BIT(14) // buffer fetch bit. +#define RFD_MISC_ACT_COUNT_SHIFT 0 +#define RFD_MISC_ACT_COUNT_MASK CSR_MASK(RFD_MISC_ACT_COUNT, 0x3fff) +#define RFD_MISC_SIZE_SHIFT 16 +#define RFD_MISC_SIZE_MASK CSR_MASK(RFD_MISC_SIZE, 0x3fff) + +//------------------------------------------------------------------------- +// RFD - receive frame descriptor definition. +//------------------------------------------------------------------------- +typedef struct rfd { + volatile rfd_status_t status; + volatile rfd_command_t command; + volatile IOPhysicalAddress link; + volatile IOPhysicalAddress rbdAddr; + volatile rfd_misc_t misc; // 16 bytes + + UInt32 _pad[2]; // pad it to 64 bytes + + /* driver private */ + + struct rfd * _next; + IOPhysicalAddress _paddr; + rbd_t _rbd; // 32 bytes +} rfd_t; + +//------------------------------------------------------------------------- +// TBD - Transmit Buffer Descriptor. +//------------------------------------------------------------------------- +typedef UInt16 tbd_size_t; +#define TBD_SIZE_EL BIT(15) // end of list +#define TBD_SIZE_SHIFT 0 +#define TBD_SIZE_MASK CSR_MASK(TBD_SIZE, 0x3fff) + +typedef struct tbd { + volatile IOPhysicalAddress addr; + volatile tbd_size_t size; +} tbd_t; + +//------------------------------------------------------------------------- +// TxCB Status Word. +// Offset 0, 16-bit, RW. +//------------------------------------------------------------------------- +typedef UInt16 tcb_status_t; +#define TCB_STATUS_C BIT(15) // complete bit +#define TCB_STATUS_OK BIT(13) // error free completion +#define TCB_STATUS_U BIT(12) // underrun bit + +//------------------------------------------------------------------------- +// TxCB Command Word. +// Offset 2, 16-bit, RW. +//------------------------------------------------------------------------- +typedef UInt16 tcb_command_t; +#define TCB_COMMAND_EL BIT(15) // end of list +#define TCB_COMMAND_S BIT(14) // suspend bit +#define TCB_COMMAND_I BIT(13) // interrupt bit +#define TCB_COMMAND_NC BIT(4) // CRC/Source Address control +#define TCB_COMMAND_SF BIT(3) // flexible mode bit +#define TCB_COMMAND_SHIFT 0 +#define TCB_COMMAND_MASK CSR_MASK(TCB_COMMAND, 0x7) + +//------------------------------------------------------------------------- +// TxCB Count Word. +// Offset 0xC, 16-bit, RW. +//------------------------------------------------------------------------- +typedef UInt16 tcb_count_t; +#define TCB_COUNT_EOF BIT(15) // whole frame in TCB +#define TCB_COUNT_SHIFT 0 +#define TCB_COUNT_MASK CSR_MASK(TCB_COUNT, 0x3fff) + +//------------------------------------------------------------------------- +// TxCB - Transmit Command Block. +//------------------------------------------------------------------------- +#define TBDS_PER_TCB 12 +#define TCB_TX_THRESHOLD 0xe0 + +typedef struct tcb { + volatile tcb_status_t status; + volatile tcb_command_t command; + volatile IOPhysicalAddress link; + volatile IOPhysicalAddress tbdAddr; + volatile tcb_count_t count; + volatile UInt8 threshold; + volatile UInt8 number; + + /* driver private */ + + tbd_t _tbds[TBDS_PER_TCB]; + struct tcb * _next; + IOPhysicalAddress _paddr; + struct mbuf * _mbuf; + unsigned _pad; +} tcb_t; + +#endif /* !_I82557HW_H */ + diff --git a/iokit/Drivers/network/drvIntel82557/i82557Inline.h b/iokit/Drivers/network/drvIntel82557/i82557Inline.h new file mode 100644 index 000000000..7fec51241 --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557Inline.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _I82557INLINE_H +#define _I82557INLINE_H + +#include +#include + +//--------------------------------------------------------------------------- +// CSR macros. + +#define CSR_VALUE(name, x) (((x) & name ## _MASK) >> name ## _SHIFT) +#define CSR_FIELD(name, x) (((x) << name ## _SHIFT) & name ## _MASK) +#define CSR_MASK(name, x) ((x) << name ## _SHIFT) +#define BIT(x) (1 << (x)) + +#define IOSync() OSSynchronizeIO() + +//--------------------------------------------------------------------------- +// CSR read & write. + +static inline +UInt8 +OSReadLE8(volatile void * base) +{ + return *(volatile UInt8 *)base; +} + +static inline +UInt16 +OSReadLE16(volatile void * base) +{ + return OSReadLittleInt16(base, 0); +} + +static inline +UInt32 +OSReadLE32(volatile void * base) +{ + return OSReadLittleInt32(base, 0); +} + +static inline +void +OSWriteLE8(volatile void * base, UInt8 data) +{ + *(volatile UInt8 *)base = data; + IOSync(); +} + +static inline +void +OSWriteLE16(volatile void * base, UInt16 data) +{ + OSWriteLittleInt16(base, 0, data); + IOSync(); +} + +static inline +void +OSWriteLE32(volatile void * base, UInt32 data) +{ + OSWriteLittleInt32(base, 0, data); + IOSync(); +} + +//--------------------------------------------------------------------------- +// Set/clear bit(s) macros. + +#define __SET(n) \ +static inline void \ +OSSetLE##n(volatile void * base, UInt##n bit) \ +{ \ + OSWriteLE##n(base, (OSReadLE##n(base) | (bit))); \ +} + +#define __CLR(n) \ +static inline void \ +OSClearLE##n(volatile void * base, UInt##n bit) \ +{ \ + OSWriteLE##n(base, (OSReadLE##n(base) & ~(bit))); \ +} + +__SET(8) +__SET(16) +__SET(32) + +__CLR(8) +__CLR(16) +__CLR(32) + +#endif /* !_I82557INLINE_H */ diff --git a/iokit/Drivers/network/drvIntel82557/i82557PHY.cpp b/iokit/Drivers/network/drvIntel82557/i82557PHY.cpp new file mode 100644 index 000000000..3c36d550a --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557PHY.cpp @@ -0,0 +1,628 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * i82557PHY.cpp + * + */ + +#include "i82557.h" +#include "i82557PHY.h" + +//--------------------------------------------------------------------------- +// Function: _logMDIStatus +// +// Purpose: +// Dump the contents of the MDI status register. + +static inline void +_logMDIStatus(mdi_reg_t reg) +{ + if (reg & MDI_STATUS_T4) + IOLog("PHY: T4 capable\n"); + if (reg & MDI_STATUS_TX_FD) + IOLog("PHY: 100Base-TX full duplex capable\n"); + if (reg & MDI_STATUS_TX_HD) + IOLog("PHY: 100Base-TX half duplex capable\n"); + if (reg & MDI_STATUS_10_FD) + IOLog("PHY: 10Base-T full duplex capable\n"); + if (reg & MDI_STATUS_10_HD) + IOLog("PHY: 10Base-T half duplex capable\n"); + if (reg & MDI_STATUS_EXTENDED_CAPABILITY) + IOLog("PHY: has extended capability registers\n"); + if (reg & MDI_STATUS_JABBER_DETECTED) + IOLog("PHY: jabberDetect set\n"); + if (reg & MDI_STATUS_AUTONEG_CAPABLE) + IOLog("PHY: auto negotiation capable\n"); + IOLog("PHY: link is %s\n", (reg & MDI_STATUS_LINK_STATUS) ? "UP" : "DOWN"); + return; +} + +//--------------------------------------------------------------------------- +// Function: _getModelId +// +// Purpose: +// Read the MDI ID registers and form a single 32-bit id. + +UInt32 Intel82557::_phyGetID() +{ + UInt16 id1, id2; + _mdiReadPHY(phyAddr, MDI_REG_PHYID_WORD_1, &id1); + _mdiReadPHY(phyAddr, MDI_REG_PHYID_WORD_2, &id2); + return ((id2 << 16) | id1); +} + +//--------------------------------------------------------------------------- +// Function: _phySetMedium +// +// Purpose: +// Setup the PHY to the medium type given. +// Returns true on success. + +bool Intel82557::_phySetMedium(mediumType_t medium) +{ + mdi_reg_t status; + mdi_reg_t control; + mediumType_t phyMedium = medium; + UInt32 mediumCapableMask; + + // Reset PHY before changing medium selection. + // + _phyReset(); + + // Get local capability. + // + _mdiReadPHY(phyAddr, MDI_REG_STATUS, &status); + + // Create a medium capable mask. + // + mediumCapableMask = (status >> 11) & 0x1f; + + // Force the PHY's data rate and duplex settings if the medium type + // chosen is not AUTO. + // + if (phyMedium != MEDIUM_TYPE_AUTO) { + if ((MEDIUM_TYPE_TO_MASK(phyMedium) & mediumCapableMask) == 0) { + // Hardware is not capable of selecting the user-selected + // medium. + // + return false; + } + else { + // Medium chosen is valid, go ahead and set PHY. + // + bool speed100 = false; + bool fullDuplex = false; + + if ((medium == MEDIUM_TYPE_TX_HD) || + (medium == MEDIUM_TYPE_TX_FD) || + (medium == MEDIUM_TYPE_T4)) + speed100 = true; + + if ((medium == MEDIUM_TYPE_10_FD) || (medium == MEDIUM_TYPE_TX_FD)) + fullDuplex = true; + + // Disable auto-negotiation function and force speed + duplex. + // + IOSleep(300); + + control = ((speed100 ? MDI_CONTROL_100 : 0) | + (fullDuplex ? MDI_CONTROL_FULL_DUPLEX : 0)); + + _mdiWritePHY(phyAddr, MDI_REG_CONTROL, control); + + VPRINT("%s: user forced %s Mbit/s%s mode\n", getName(), + speed100 ? "100" : "10", + fullDuplex ? " full duplex" : ""); + + IOSleep(50); + } + } + else { + // For MEDIUM_TYPE_AUTO, enable and restart auto-negotiation. + // + control = MDI_CONTROL_AUTONEG_ENABLE; + _mdiWritePHY(phyAddr, MDI_REG_CONTROL, control); + IOSleep(1); + control |= MDI_CONTROL_RESTART_AUTONEG; + _mdiWritePHY(phyAddr, MDI_REG_CONTROL, control); + } + + // Some special bit twiddling for NSC83840. + // + if (phyID == PHY_MODEL_NSC83840) { + /* set-up National Semiconductor 83840 specific registers */ + + mdi_reg_t reg; + + VPRINT("%s: setting NSC83840-specific registers\n", getName()); + _mdiReadPHY(phyAddr, NSC83840_REG_PCR, ®); + + /* + * This bit MUST be set, otherwise the card may not transmit at + * all in 100Mb/s mode. This is specially true for 82557 cards + * with the DP83840 PHY. + * + * In the NSC documentation, bit 10 of PCS register is labeled + * as a reserved bit. What is the real function of this bit? + */ + reg |= (NSC83840_PCR_TXREADY | NSC83840_PCR_CIM_DIS); + + _mdiWritePHY(phyAddr, NSC83840_REG_PCR, reg); + } + + currentMediumType = medium; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _phyAddMediumType +// +// Purpose: +// Add a single medium object to the medium dictionary. +// Also add the medium object to an array for fast lookup. + +bool Intel82557::_phyAddMediumType(UInt32 type, UInt32 speed, UInt32 code) +{ + IONetworkMedium * medium; + bool ret = false; + + medium = IONetworkMedium::medium(type, speed, 0, code); + if (medium) { + ret = IONetworkMedium::addMedium(mediumDict, medium); + if (ret) + mediumTable[code] = medium; + medium->release(); + } + return ret; +} + +//--------------------------------------------------------------------------- +// Function: _phyPublishMedia +// +// Purpose: +// Examine the PHY capabilities and advertise all supported medium types. +// +// FIXME: Non PHY medium types are not probed. + +#define MBPS 1000000 + +void Intel82557::_phyPublishMedia() +{ + mdi_reg_t status; + + // Read the PHY's media capability. + // + _mdiReadPHY(phyAddr, MDI_REG_STATUS, &status); + + _phyAddMediumType(kIOMediumEthernetAuto, + 0, + MEDIUM_TYPE_AUTO); + + if (status & MDI_STATUS_10_HD) + _phyAddMediumType(kIOMediumEthernet10BaseT | kIOMediumOptionHalfDuplex, + 10 * MBPS, + MEDIUM_TYPE_10_HD); + + if (status & MDI_STATUS_10_FD) + _phyAddMediumType(kIOMediumEthernet10BaseT | kIOMediumOptionFullDuplex, + 10 * MBPS, + MEDIUM_TYPE_10_FD); + + if (status & MDI_STATUS_TX_HD) + _phyAddMediumType( + kIOMediumEthernet100BaseTX | kIOMediumOptionHalfDuplex, + 100 * MBPS, + MEDIUM_TYPE_TX_HD); + + if (status & MDI_STATUS_TX_FD) + _phyAddMediumType( + kIOMediumEthernet100BaseTX | kIOMediumOptionFullDuplex, + 100 * MBPS, + MEDIUM_TYPE_TX_FD); + + if (status & MDI_STATUS_T4) + _phyAddMediumType(kIOMediumEthernet100BaseT4, + 100 * MBPS, + MEDIUM_TYPE_T4); +} + +//--------------------------------------------------------------------------- +// Function: _phyReset +// +// Purpose: +// Reset the PHY. + +#define PHY_RESET_TIMEOUT 100 // ms +#define PHY_RESET_DELAY 10 // ms +#define PHY_POST_RESET_DELAY 300 // us + +bool Intel82557::_phyReset() +{ + int i = PHY_RESET_TIMEOUT; + mdi_reg_t control; + + if (!_mdiReadPHY(phyAddr, MDI_REG_CONTROL, &control)) + return false; + + // Set the reset bit in the PHY Control register + // + _mdiWritePHY(phyAddr, MDI_REG_CONTROL, control | MDI_CONTROL_RESET); + + // Wait till reset process is complete (MDI_CONTROL_RESET returns to zero) + // + while (i > 0) { + if (!_mdiReadPHY(phyAddr, MDI_REG_CONTROL, &control)) + return false; + if ((control & MDI_CONTROL_RESET) == 0) { + IODelay(PHY_POST_RESET_DELAY); + return true; + } + IOSleep(PHY_RESET_DELAY); + i -= PHY_RESET_DELAY; + } + return false; +} + +//--------------------------------------------------------------------------- +// Function: _phyWaitAutoNegotiation +// +// Purpose: +// Wait until auto-negotiation is complete. + +#define PHY_NWAY_TIMEOUT 5000 // ms +#define PHY_NWAY_DELAY 20 // ms + +bool Intel82557::_phyWaitAutoNegotiation() +{ + int i = PHY_NWAY_TIMEOUT; + mdi_reg_t status; + + while (i > 0) { + if (!_mdiReadPHY(phyAddr, MDI_REG_STATUS, &status)) + return false; + + if (status & MDI_STATUS_AUTONEG_COMPLETE) + return true; + + IOSleep(PHY_NWAY_DELAY); + i -= PHY_NWAY_DELAY; + } + return false; +} + +//--------------------------------------------------------------------------- +// Function: _phyProbe +// +// Purpose: +// Find out which PHY is active. +// +#define AUTONEGOTIATE_TIMEOUT 35 + +bool Intel82557::_phyProbe() +{ + bool foundPhy1 = false; + mdi_reg_t control; + mdi_reg_t status; + + if (phyAddr == PHY_ADDRESS_I82503) { + VPRINT("%s: overriding to use Intel 82503", getName()); + return true; + } + + if (phyAddr > 0 && phyAddr < PHY_ADDRESS_MAX) { + VPRINT("%s: looking for Phy 1 at address %d\n", getName(), phyAddr); + _mdiReadPHY(phyAddr, MDI_REG_CONTROL, &control); + _mdiReadPHY(phyAddr, MDI_REG_STATUS, &status); // do it twice + _mdiReadPHY(phyAddr, MDI_REG_STATUS, &status); + if (control == 0xffff || (status == 0 && control == 0)) + { + VPRINT("%s: Phy 1 at address %d does not exist\n", getName(), + phyAddr); + } + else { + VPRINT("%s: Phy 1 at address %d exists\n", getName(), phyAddr); + foundPhy1 = true; + if (status & MDI_STATUS_LINK_STATUS) { + VPRINT("%s: found Phy 1 at address %d with link\n", + getName(), phyAddr); + return true; // use PHY1 + } + } + } + + // PHY1 does not exist, or it does not have valid link. + // Try PHY0 at address 0. + // + _mdiReadPHY(PHY_ADDRESS_0, MDI_REG_CONTROL, &control); + _mdiReadPHY(PHY_ADDRESS_0, MDI_REG_STATUS, &status); + + if (control == 0xffff || (status == 0 && control == 0)) { + if (phyAddr == 0) { /* if address forced to 0, then fail */ + IOLog("%s: phy0 not detected\n", getName()); + return false; + } + if (foundPhy1 == true) { + VPRINT("%s: no Phy at address 0, using Phy 1 without link\n", + getName()); + return true; // use PHY1 without a valid link + } + VPRINT("%s: no Phy at address 0, defaulting to 82503\n", getName()); + phyAddr = PHY_ADDRESS_I82503; + return true; + } + + // must isolate PHY1 electrically before using PHY0. + // + if (foundPhy1 == true) { + control = MDI_CONTROL_ISOLATE; + _mdiWritePHY(phyAddr, MDI_REG_CONTROL, control); + IOSleep(1); + } + + // Enable and restart auto-negotiation on PHY0. + // + VPRINT("%s: starting auto-negotiation on Phy 0", getName()); + control = MDI_CONTROL_AUTONEG_ENABLE; + _mdiWritePHY(PHY_ADDRESS_0, MDI_REG_CONTROL, control); + IOSleep(1); + control |= MDI_CONTROL_RESTART_AUTONEG; + _mdiWritePHY(PHY_ADDRESS_0, MDI_REG_CONTROL, control); + + for (int i = 0; i < AUTONEGOTIATE_TIMEOUT; i++) { + _mdiReadPHY(PHY_ADDRESS_0, MDI_REG_STATUS, &status); + if (status & MDI_STATUS_AUTONEG_COMPLETE) + break; + IOSleep(100); + } + _mdiReadPHY(PHY_ADDRESS_0, MDI_REG_STATUS, &status); + _mdiReadPHY(PHY_ADDRESS_0, MDI_REG_STATUS, &status); + _mdiReadPHY(PHY_ADDRESS_0, MDI_REG_STATUS, &status); + if ((status & MDI_STATUS_LINK_STATUS) || foundPhy1 == false) { + VPRINT("%s: using Phy 0 at address 0\n", getName()); + phyAddr = 0; + return true; + } + + // Isolate PHY0. + // + VPRINT("%s: using Phy 1 without link\n", getName()); + control = MDI_CONTROL_ISOLATE; + _mdiWritePHY(PHY_ADDRESS_0, MDI_REG_CONTROL, control); + IOSleep(1); + + // Enable and restart auto-negotiation on PHY1. + // + control = MDI_CONTROL_AUTONEG_ENABLE; + _mdiWritePHY(phyAddr, MDI_REG_CONTROL, control); + IOSleep(1); + control |= MDI_CONTROL_RESTART_AUTONEG; + _mdiWritePHY(phyAddr, MDI_REG_CONTROL, control); + + phyID = _phyGetID(); + VPRINT("%s: PHY model id is 0x%08lx\n", getName(), phyID); + phyID &= PHY_MODEL_MASK; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _phyGetMediumTypeFromBits +// +// Purpose: +// Return the medium type that correspond to the given specifiers. + +mediumType_t Intel82557::_phyGetMediumTypeFromBits(bool rate100, + bool fullDuplex, + bool t4) +{ + mediumType_t mediumType; + + if (t4) { + mediumType = MEDIUM_TYPE_T4; + } + else if (rate100) { + if (fullDuplex) + mediumType = MEDIUM_TYPE_TX_FD; + else + mediumType = MEDIUM_TYPE_TX_HD; + } + else { + if (fullDuplex) + mediumType = MEDIUM_TYPE_10_FD; + else + mediumType = MEDIUM_TYPE_10_HD; + } + + return mediumType; +} + +//--------------------------------------------------------------------------- +// Function: _phyGetMediumWithCode +// +// Purpose: +// Returns the IONetworkMedium object associated with the given type. + +IONetworkMedium * Intel82557::_phyGetMediumWithType(UInt32 type) +{ + if (type < MEDIUM_TYPE_INVALID) + return mediumTable[type]; + else + return 0; +} + +//--------------------------------------------------------------------------- +// Function: _phyReportLinkStatus +// +// Purpose: +// Called periodically to monitor for link changes. When a change +// is detected, determine the current link and report it to the +// upper layers by calling IONetworkController::setLinkStatus(). + +void Intel82557::_phyReportLinkStatus( bool firstPoll = false ) +{ + UInt16 phyStatus; + UInt16 phyStatusChange; + + // Read PHY status register. + + _mdiReadPHY( phyAddr, MDI_REG_STATUS, &phyStatus ); + + // Detect a change in the two link related bits. + // Remember that the link status bit will latch a link fail + // condition (should not miss a link down event). + + phyStatusChange = ( phyStatusPrev ^ phyStatus ) & + ( MDI_STATUS_LINK_STATUS | + MDI_STATUS_AUTONEG_COMPLETE ); + + if ( phyStatusChange || firstPoll ) + { + if ( firstPoll ) + { + // For the initial link status poll, wait a bit, then + // re-read the status register to clear any latched bits. + + _phyWaitAutoNegotiation(); + _mdiReadPHY( phyAddr, MDI_REG_STATUS, &phyStatus ); + _mdiReadPHY( phyAddr, MDI_REG_STATUS, &phyStatus ); + } + + // IOLog("PhyStatus: %04x\n", phyStatus); + + // Determine the link status. + + if ( ( phyStatus & MDI_STATUS_LINK_STATUS ) && + ( phyStatus & MDI_STATUS_AUTONEG_COMPLETE ) ) + { + // Excellent, link is up. + + IONetworkMedium * activeMedium; + + activeMedium = _phyGetMediumWithType( _phyGetActiveMedium() ); + + setLinkStatus( kIONetworkLinkValid | kIONetworkLinkActive, + activeMedium ); + + // IOLog("link is up %lx\n", + // activeMedium ? activeMedium->getType() : 0); + } + else + { + // Link is down. + + setLinkStatus( kIONetworkLinkValid, 0 ); + + // IOLog("link is down\n"); + } + + // Save phyStatus for the next run. + + phyStatusPrev = phyStatus; + } +} + +//--------------------------------------------------------------------------- +// Function: _phyGetActiveMedium +// +// Purpose: +// Once the PHY reports that the link is up, this method can be called +// to return the type of link that was established. + +mediumType_t Intel82557::_phyGetActiveMedium() +{ + mdi_reg_t reg; + mediumType_t medium; + + do { + // For the simple case where the media selection is not + // automatic (e.g. forced to 100BaseTX). + + if ( currentMediumType != MEDIUM_TYPE_AUTO ) + { + medium = currentMediumType; + break; + } + + // i82553 has a special register for determining the speed and + // duplex mode settings. + + if ( ( phyID == PHY_MODEL_I82553_A_B ) || + ( phyID == PHY_MODEL_I82553_C ) ) + { + _mdiReadPHY( phyAddr, I82553_REG_SCR, ® ); + + medium = _phyGetMediumTypeFromBits( reg & I82553_SCR_100, + reg & I82553_SCR_FULL_DUPLEX, + reg & I82553_SCR_T4 ); + break; + } + else if ( phyID == PHY_MODEL_NSC83840 ) + { + // For NSC83840, we use the 83840 specific register to determine + // the link speed and duplex mode setting. Early 83840 devices + // did not seem to report the remote capabilities when the link + // partner does not support NWay. + + mdi_reg_t exp; + + _mdiReadPHY( phyAddr, MDI_REG_ANEX, &exp ); + + if ( ( exp & MDI_ANEX_LP_AUTONEGOTIABLE ) == 0 ) + { + _mdiReadPHY( phyAddr, NSC83840_REG_PAR, ® ); + + medium = _phyGetMediumTypeFromBits( + !(reg & NSC83840_PAR_SPEED_10), + (reg & NSC83840_PAR_DUPLEX_STAT), + 0 ); + break; + } + } + + // For generic PHY, use the standard PHY registers. + // + // Use the local and remote capability words to determine the + // current active medium. + + mdi_reg_t lpa; + mdi_reg_t mya; + + _mdiReadPHY( phyAddr, MDI_REG_ANLP, &lpa ); + _mdiReadPHY( phyAddr, MDI_REG_ANAR, &mya ); + + mya &= lpa; // obtain common capabilities mask. + + // Observe PHY medium precedence. + + if ( mya & MDI_ANAR_TX_FD ) medium = MEDIUM_TYPE_TX_FD; + else if ( mya & MDI_ANAR_T4 ) medium = MEDIUM_TYPE_T4; + else if ( mya & MDI_ANAR_TX_HD ) medium = MEDIUM_TYPE_TX_HD; + else if ( mya & MDI_ANAR_10_FD ) medium = MEDIUM_TYPE_10_FD; + else medium = MEDIUM_TYPE_10_HD; + } + while ( false ); + + return medium; +} diff --git a/iokit/Drivers/network/drvIntel82557/i82557PHY.h b/iokit/Drivers/network/drvIntel82557/i82557PHY.h new file mode 100644 index 000000000..2391fa3a4 --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557PHY.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 NeXT Software, Inc. + * All rights reserved. + * + * i82557PHY.h + * - contains PHY specific MDI register offsets and definitions + * + * Modification History + * + * 4-Mar-96 Dieter Siegmund (dieter@NeXT.com) + * Created. + */ + +#ifndef _I82557PHY_H +#define _I82557PHY_H + +#define PHY_ADDRESS_0 0 +#define PHY_ADDRESS_DEFAULT 1 +#define PHY_ADDRESS_I82503 32 +#define PHY_ADDRESS_MAX 32 + +#define PHY_MODEL_MASK 0xfff0ffff +#define PHY_MODEL_NSC83840 0x5c002000 +#define PHY_MODEL_I82553_A_B 0x000003e0 +#define PHY_MODEL_I82553_C 0x035002a8 +#define PHY_MODEL_I82555 0x015002a8 // also for internal PHY in i82558 + +#define MEDIUM_TYPE_TO_MASK(m) (1 << (m)) + +//------------------------------------------------------------------------- +// Generic MDI registers: +//------------------------------------------------------------------------- +#define MDI_REG_CONTROL 0x00 +#define MDI_REG_STATUS 0x01 +#define MDI_REG_PHYID_WORD_1 0x02 +#define MDI_REG_PHYID_WORD_2 0x03 +#define MDI_REG_ANAR 0x04 +#define MDI_REG_ANLP 0x05 +#define MDI_REG_ANEX 0x06 +#define MDI_REG_RESERVED_TOP 0x0f + +typedef UInt16 mdi_reg_t; + +//------------------------------------------------------------------------- +// MDI Control Register. +// Address 0, 16-bit, RW. +//------------------------------------------------------------------------- +#define MDI_CONTROL_RESET BIT(15) +#define MDI_CONTROL_LOOPBACK BIT(14) +#define MDI_CONTROL_100 BIT(13) +#define MDI_CONTROL_AUTONEG_ENABLE BIT(12) +#define MDI_CONTROL_POWER_DOWN BIT(11) +#define MDI_CONTROL_ISOLATE BIT(10) +#define MDI_CONTROL_RESTART_AUTONEG BIT(9) +#define MDI_CONTROL_FULL_DUPLEX BIT(8) +#define MDI_CONTROL_CDT_ENABLE BIT(7) + +//------------------------------------------------------------------------- +// MDI Status Register. +// Address 1, 16-bit, RO. +//------------------------------------------------------------------------- +#define MDI_STATUS_T4 BIT(15) +#define MDI_STATUS_TX_FD BIT(14) +#define MDI_STATUS_TX_HD BIT(13) +#define MDI_STATUS_10_FD BIT(12) +#define MDI_STATUS_10_HD BIT(11) +#define MDI_STATUS_AUTONEG_COMPLETE BIT(5) +#define MDI_STATUS_REMOTE_FAULT_DETECT BIT(4) +#define MDI_STATUS_AUTONEG_CAPABLE BIT(3) +#define MDI_STATUS_LINK_STATUS BIT(2) +#define MDI_STATUS_JABBER_DETECTED BIT(1) +#define MDI_STATUS_EXTENDED_CAPABILITY BIT(0) + +//------------------------------------------------------------------------- +// MDI Auto-Negotiation Advertisement Register. +// Address 4, 16-bit, RW. +//------------------------------------------------------------------------- +#define MDI_ANAR_NEXT_PAGE BIT(15) +#define MDI_ANAR_ACKNOWLEDGE BIT(14) +#define MDI_ANAR_REMOTE_FAULT BIT(13) +#define MDI_ANAR_T4 BIT(9) +#define MDI_ANAR_TX_FD BIT(8) +#define MDI_ANAR_TX_HD BIT(7) +#define MDI_ANAR_10_FD BIT(6) +#define MDI_ANAR_10_HD BIT(5) +#define MDI_ANAR_SELECTOR_SHIFT 0 +#define MDI_ANAR_SELECTOR_MASK CSR_MASK(MDI_ANAR_SELECTOR, 0x1f) + +//------------------------------------------------------------------------- +// MDI Auto-Negotiation Link Partner Ability Register. +// Address 5, 16-bit, RO. +//------------------------------------------------------------------------- +#define MDI_ANLP_NEXT_PAGE BIT(15) +#define MDI_ANLP_ACKNOWLEDGE BIT(14) +#define MDI_ANLP_REMOTE_FAULT BIT(13) +#define MDI_ANLP_T4 BIT(9) +#define MDI_ANLP_TX_FD BIT(8) +#define MDI_ANLP_TX_HD BIT(7) +#define MDI_ANLP_10_FD BIT(6) +#define MDI_ANLP_10_HD BIT(5) +#define MDI_ANLP_SELECTOR_SHIFT 0 +#define MDI_ANLP_SELECTOR_MASK CSR_MASK(MDI_ANLP_SELECTOR, 0x1f) + +//------------------------------------------------------------------------- +// MDI Auto-Negotiation Expansion Register. +// Address 6, 16-bit, RO. +//------------------------------------------------------------------------- +#define MDI_ANEX_PARALLEL_DETECT_FAULT BIT(4) +#define MDI_ANEX_LP_NEXT_PAGEABLE BIT(3) +#define MDI_ANEX_NEXT_PAGEABLE BIT(2) +#define MDI_ANEX_PAGE_RECEIVED BIT(1) +#define MDI_ANEX_LP_AUTONEGOTIABLE BIT(0) + +//------------------------------------------------------------------------- +// NSC DP83840-specific MDI registers +//------------------------------------------------------------------------- +#define NSC83840_REG_DCR 0x12 // disconnect counter +#define NSC83840_REG_FCSCR 0x13 // false carrier sense counter +#define NSC83840_REG_RECR 0x15 // receive error counter +#define NSC83840_REG_SRR 0x16 // silicon revision register +#define NSC83840_REG_PCR 0x17 // PCS configuration register +#define NSC83840_REG_LBREMR 0x18 // loopback,bypass,rx err mask +#define NSC83840_REG_PAR 0x19 // PHY address register +#define NSC83840_REG_10BTSR 0x1b // 10Base-T status register +#define NSC83840_REG_10BTCR 0x1c // 10Base-T config register + +//------------------------------------------------------------------------- +// NSC PCS Configuration Register (PCR). +// Address 0x17, 16-bit, RW. +//------------------------------------------------------------------------- +#define NSC83840_PCR_NRZI_EN BIT(15) +#define NSC83840_PCR_DESCR_TO_SEL BIT(14) +#define NSC83840_PCR_DESCR_TO_DIS BIT(13) +#define NSC83840_PCR_REPEATER BIT(12) +#define NSC83840_PCR_ENCSEL BIT(11) +#define NSC83840_PCR_TXREADY BIT(10) +#define NSC83840_PCR_CLK25MDIS BIT(7) +#define NSC83840_PCR_F_LINK_100 BIT(6) +#define NSC83840_PCR_CIM_DIS BIT(5) +#define NSC83840_PCR_TX_OFF BIT(4) +#define NSC83840_PCR_LED1_MODE BIT(2) +#define NSC83840_PCR_LED4_MODE BIT(1) + +//------------------------------------------------------------------------- +// NSC PHY Address Register (PAR). +// Address 0x19, 16-bit, RW. +//------------------------------------------------------------------------- +#define NSC83840_PAR_DIS_CRS_JAB BIT(11) +#define NSC83840_PAR_AN_EN_STAT BIT(10) +#define NSC83840_PAR_FEFI_EN BIT(8) +#define NSC83840_PAR_DUPLEX_STAT BIT(7) +#define NSC83840_PAR_SPEED_10 BIT(6) +#define NSC83840_PAR_CIM_STATUS BIT(5) +#define NSC83840_PAR_PHYADDR_SHIFT 0 +#define NSC83840_PAR_PHYADDR_MASK CSR_MASK(NSC83840_PAR_PHYADDR, 0x1f) + +//------------------------------------------------------------------------- +// Intel 82553-specific MDI registers +//------------------------------------------------------------------------- +#define I82553_REG_SCR 0x10 +#define I82553_REG_100RDCR 0x14 + +//------------------------------------------------------------------------- +// Intel 82553 Status and Control Register (SCR). +// Address 0x10, 16-bit, RW. +//------------------------------------------------------------------------- +#define I82553_SCR_FLOW_CONTROL BIT(15) +#define I82553_SCR_CARRIER_SENSE_DIS BIT(13) +#define I82553_SCR_TX_FLOW_CONTROL BIT(12) +#define I82553_SCR_RX_DESERIAL_IN_SYNC BIT(11) +#define I82553_SCR_100_POWERDOWN BIT(10) +#define I82553_SCR_10_POWERDOWN BIT(9) +#define I82553_SCR_POLARITY BIT(8) +#define I82553_SCR_T4 BIT(2) +#define I82553_SCR_100 BIT(1) +#define I82553_SCR_FULL_DUPLEX BIT(0) + +#endif /* !_I82557PHY_H */ diff --git a/iokit/Drivers/network/drvIntel82557/i82557Private.cpp b/iokit/Drivers/network/drvIntel82557/i82557Private.cpp new file mode 100644 index 000000000..31ac8842f --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557Private.cpp @@ -0,0 +1,1692 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. All rights reserved. + * + * i82557Private.cpp + * + */ + +#include "i82557.h" + +extern "C" { +#include +#include +#include +} + +//--------------------------------------------------------------------------- +// Function: IOPhysicalFromVirtual +// +// Hack, remove ASAP. + +static inline IOReturn +IOPhysicalFromVirtual(vm_address_t vaddr, IOPhysicalAddress * paddr) +{ + *paddr = pmap_extract(kernel_pmap, vaddr); + return (*paddr == 0) ? kIOReturnBadArgument : kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Function: _intrACK +// +// Purpose: +// Acknowledge all of the pending interrupt sources. +// +// Returns: +// Return the interrupt status. +// +// CSR usage: +// Read/Write: SCB status + +static inline scb_status_t _intrACK(CSR_t * CSR_p) +{ + scb_status_t stat_irq = OSReadLE16(&CSR_p->status) & SCB_STATUS_INT_MASK; + if (stat_irq) + OSWriteLE16(&CSR_p->status, stat_irq); // ack pending interrupts. + return (stat_irq); +} + +//--------------------------------------------------------------------------- +// Function: _waitSCBCommandClear +// +// Purpose: +// Wait for the SCB Command field to clear. Ensures that we don't +// overrun the NIC's command unit. +// +// Returns: +// true if the SCB command field was cleared. +// false if the SCB command field was not cleared. +// +// CSR usage: +// Read: SCB command + +static inline bool +_waitSCBCommandClear(CSR_t * CSR_p) +{ + for (int i = 0; i < SPIN_TIMEOUT; i++) { + if (!OSReadLE8(&CSR_p->command)) + return true; + IODelay(SPIN_COUNT); + } + return false; // hardware is not responding. +} + +//--------------------------------------------------------------------------- +// Function: _waitCUNonActive +// +// Purpose: +// Waits for the Command Unit to become inactive. +// +// Returns: +// true if the CU has become inactive. +// false if the CU remains active. +// +// CSR usage: +// Read: SCB status + +static inline bool +_waitCUNonActive(CSR_t * CSR_p) +{ + for (int i = 0; i < SPIN_TIMEOUT; i++) { + if (CSR_VALUE(SCB_STATUS_CUS, OSReadLE16(&CSR_p->status)) != + SCB_CUS_ACTIVE) + return true; + IODelay(SPIN_COUNT); + } + return false; +} + +//--------------------------------------------------------------------------- +// Function: _polledCommand:WithAddress +// +// Purpose: +// Issue a polled command to the NIC. + +bool Intel82557::_polledCommand(cbHeader_t * hdr_p, IOPhysicalAddress paddr) +{ + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: _polledCommand:(%s): _waitSCBCommandClear failed\n", + CUCommandString(CSR_VALUE(CB_CMD, OSReadLE16(&hdr_p->command))), + getName()); + return false; + } + + if (!_waitCUNonActive(CSR_p)) { + IOLog("%s: _polledCommand:(%s): _waitCUNonActive failed\n", + CUCommandString(CSR_VALUE(CB_CMD, OSReadLE16(&hdr_p->command))), + getName()); + return false; + } + + // Set the physical address of the command block, and issue a + // command unit start. + // + OSWriteLE32(&CSR_p->pointer, paddr); + OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_START)); + + prevCUCommand = SCB_CUC_START; + + for (int i = 0; i < SPIN_TIMEOUT; i++) { + if (OSReadLE16(&hdr_p->status) & CB_STATUS_C) + return true; + IODelay(SPIN_COUNT); + } + return false; +} + +//--------------------------------------------------------------------------- +// Function: _abortReceive +// +// Purpose: +// Abort the receive unit. + +bool Intel82557::_abortReceive() +{ + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: _abortReceive: _waitSCBCommandClear failed\n", getName()); + return false; + } + + OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_RUC, SCB_RUC_ABORT)); + + for (int i = 0; i < SPIN_TIMEOUT; i++) { + if (CSR_VALUE(SCB_STATUS_RUS, OSReadLE16(&CSR_p->status)) == + SCB_RUS_IDLE) + return true; + IODelay(SPIN_COUNT); + } + + IOLog("%s: _abortReceive: timeout\n", getName()); + return false; +} + +//--------------------------------------------------------------------------- +// Function: _startReceive +// +// Purpose: +// Start the receive unit + +bool Intel82557::_startReceive() +{ + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: _startReceive: _waitSCBCommandClear failed\n", getName()); + return false; + } + + // Make sure the initial RFD has a link to its RBD + OSWriteLE32(&headRfd->rbdAddr, headRfd->_rbd._paddr); + + OSWriteLE32(&CSR_p->pointer, headRfd->_paddr); + OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_RUC, SCB_RUC_START)); + + for (int i = 0; i < SPIN_TIMEOUT; i++) { + if (CSR_VALUE(SCB_STATUS_RUS, OSReadLE16(&CSR_p->status)) == + SCB_RUS_READY) + return true; + IODelay(SPIN_COUNT); + } + + IOLog("%s: _startReceive: timeout\n", getName()); + return false; +} + +//--------------------------------------------------------------------------- +// Function: _resetChip +// +// Purpose: +// Issue a selective reset then a full reset. +// This is done to avoid a PCI bus hang if the 82557 is in the midst of +// a PCI bus cycle. The selective reset pauses the transmit and receive +// engines. +// +void Intel82557::_resetChip() +{ + int i = 0; + + sendPortCommand(portSelectiveReset_e, 0); + do { + IOSleep(1); + } while (OSReadLE32(&CSR_p->port) && ++i < 100); + + sendPortCommand(portReset_e, 0); + IOSleep(1); + return; +} + +//--------------------------------------------------------------------------- +// Function: issueReset +// +// Purpose: +// Shut down the chip, and issue a reset. + +void Intel82557::issueReset() +{ + IOLog("%s: resetting adapter\n", getName()); + + etherStats->dot3RxExtraEntry.resets++; + + setActivationLevel(kActivationLevel0); + if (!setActivationLevel(currentLevel)) { + IOLog("%s: Reset attempt unsuccessful\n", getName()); + } +} + +//--------------------------------------------------------------------------- +// Function: updateRFDFromMbuf +// +// Purpose: +// Updated a RFD/RBD in order to attach it to a cluster mbuf. +// XXX - assume cluster will never cross page boundary. + +bool Intel82557::updateRFDFromMbuf(rfd_t * rfd_p, struct mbuf * m) +{ + struct IOPhysicalSegment vector; + UInt count; + + count = rxMbufCursor->getPhysicalSegments(m, &vector, 1); + if (!count) + return false; + + // Start modifying RFD + // + rfd_p->_rbd.buffer = vector.location; // cursor is little-endian +// OSWriteLE32(&rfd_p->_rbd.size, CSR_FIELD(RBD_SIZE, vector.length)); + + rfd_p->_rbd._mbuf = m; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _initTcbQ +// +// Purpose: +// Initialize the transmit control block queue. Create a circularly +// linked list of tcbs. + +bool Intel82557::_initTcbQ(bool enable = false) +{ + int i; + + tcbQ.numFree = tcbQ.numTcbs = NUM_TRANSMIT_FRAMES; + tcbQ.activeHead_p = tcbQ.activeTail_p = tcbQ.freeHead_p = tcbList_p; + + for (i = 0; i < tcbQ.numTcbs; i++) { /* free up buffers */ + if (tcbList_p[i]._mbuf) { + freePacket(tcbList_p[i]._mbuf); + tcbList_p[i]._mbuf = 0; + } + } + bzero(tcbList_p, sizeof(tcb_t) * tcbQ.numTcbs); + + if (!enable) + return true; + + for (i = 0; i < tcbQ.numTcbs; i++) { + IOPhysicalAddress paddr; + + IOReturn result = IOPhysicalFromVirtual((vm_address_t) &tcbList_p[i], + &tcbList_p[i]._paddr); + if (result != kIOReturnSuccess) { + IOLog("i82557(tcbQ): Invalid TCB address\n"); + return false; + } + + result = IOPhysicalFromVirtual((vm_address_t) &tcbList_p[i]._tbds, + &paddr); + if (result != kIOReturnSuccess) { + IOLog("i82557(tcbQ): Invalid TBD address\n"); + return false; + } + OSWriteLE32(&tcbList_p[i].tbdAddr, paddr); + + if (i == (tcbQ.numTcbs - 1)) + tcbList_p[i]._next = &tcbList_p[0]; + else + tcbList_p[i]._next = &tcbList_p[i + 1]; + } + for (i = 0; i < tcbQ.numTcbs; i++) /* make physical links */ + OSWriteLE32(&tcbList_p[i].link, tcbList_p[i]._next->_paddr); + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _setupRfd + +static void _setupRfd(rfd_t * rfdList_p) +{ + for (int i = 0; i < NUM_RECEIVE_FRAMES; i++) { + if (i == (NUM_RECEIVE_FRAMES - 1)) { + /* mark tails and link the lists circularly */ + OSSetLE16(&rfdList_p[i].command, RFD_COMMAND_EL); + rfdList_p[i]._next = &rfdList_p[0]; + OSSetLE32(&rfdList_p[i]._rbd.size, RBD_SIZE_EL); + rfdList_p[i]._rbd._next = &rfdList_p[0]._rbd; + } + else { + rfdList_p[i]._next = &rfdList_p[i + 1]; + rfdList_p[i]._rbd._next = &rfdList_p[i + 1]._rbd; + } + + OSWriteLE32(&rfdList_p[i].link, rfdList_p[i]._next->_paddr); + OSWriteLE32(&rfdList_p[i].rbdAddr, + (i == 0) ? rfdList_p[0]._rbd._paddr : C_NULL); + + OSWriteLE32(&rfdList_p[i]._rbd.link, rfdList_p[i]._rbd._next->_paddr); + OSSetLE32(&rfdList_p[i]._rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE)); + } +} + +//--------------------------------------------------------------------------- +// Function: _initRfdList +// +// Purpose: +// Create a circularly linked list of receive frame descriptors, and +// populate them with receive buffers allocated from our special pool. + +bool Intel82557::_initRfdList(bool enable = false) +{ + int i; + IOReturn result; + + /* free allocated packet buffers */ + for (i = 0; i < NUM_RECEIVE_FRAMES; i++) { + if (rfdList_p[i]._rbd._mbuf) { + freePacket(rfdList_p[i]._rbd._mbuf); +// rfdList_p[i]._rbd._mbuf = 0; + } + } + + /* zero out the entire structure, and re-create it */ + bzero(rfdList_p, sizeof(rfd_t) * NUM_RECEIVE_FRAMES); + + if (!enable) + return true; + + for (i = 0; i < NUM_RECEIVE_FRAMES; i++) { + OSSetLE16(&rfdList_p[i].command, RFD_COMMAND_SF); + + result = IOPhysicalFromVirtual((vm_address_t) &rfdList_p[i], + &rfdList_p[i]._paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid RFD address\n", getName()); + return false; + } + result = IOPhysicalFromVirtual((vm_address_t) &rfdList_p[i]._rbd, + &rfdList_p[i]._rbd._paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid RBD address\n", getName()); + return false; + } + } + + _setupRfd(rfdList_p); + + for (i = 0; i < NUM_RECEIVE_FRAMES; i++) { + // Pre-load the receive ring with max size mbuf packets. + // + struct mbuf * m = allocatePacket(MAX_BUF_SIZE); + if (!m) + return false; + + if (updateRFDFromMbuf(&rfdList_p[i], m) == false) { + IOLog("%s: updateRFDFromMbuf() error\n", getName()); + freePacket(m); + return false; + } + } + + headRfd = rfdList_p; + tailRfd = rfdList_p + NUM_RECEIVE_FRAMES - 1; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _resetRfdList +// +// Purpose: +// Reset the RFD list before the receiver engine is restarted after +// a resource shortage. + +bool Intel82557::_resetRfdList() +{ + int i; + + struct _cache { + IOPhysicalAddress rbd_buffer; + struct mbuf * rbd_mbuf; + IOPhysicalAddress rfd_paddr; + IOPhysicalAddress rbd_paddr; + } * cache_p = (struct _cache *) KDB_buf_p; + + if ((sizeof(struct _cache) * NUM_RECEIVE_FRAMES) > ETHERMAXPACKET) { + IOLog("%s: no space for cache data\n", getName()); + return false; + } + + /* cache allocated packet buffers */ + for (i = 0; i < NUM_RECEIVE_FRAMES; i++) { + cache_p[i].rbd_mbuf = rfdList_p[i]._rbd._mbuf; + cache_p[i].rbd_buffer = rfdList_p[i]._rbd.buffer; + cache_p[i].rfd_paddr = rfdList_p[i]._paddr; + cache_p[i].rbd_paddr = rfdList_p[i]._rbd._paddr; + } + + /* zero out the entire structure, and re-create it */ + bzero(rfdList_p, sizeof(rfd_t) * NUM_RECEIVE_FRAMES); + + for (i = 0; i < NUM_RECEIVE_FRAMES; i++) { + OSSetLE16(&rfdList_p[i].command, RFD_COMMAND_SF); + rfdList_p[i]._paddr = cache_p[i].rfd_paddr; + rfdList_p[i]._rbd._paddr = cache_p[i].rbd_paddr; + } + + _setupRfd(rfdList_p); + + for (i = 0; i < NUM_RECEIVE_FRAMES; i++) { + rfdList_p[i]._rbd.buffer = cache_p[i].rbd_buffer; + rfdList_p[i]._rbd._mbuf = cache_p[i].rbd_mbuf; + } + + headRfd = rfdList_p; + tailRfd = rfdList_p + NUM_RECEIVE_FRAMES - 1; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _mdiReadPHY:Register:Data +// +// Purpose: +// Read the specified MDI register and return the results. + +bool +Intel82557::_mdiReadPHY(UInt8 phyAddress, UInt8 regAddress, UInt16 * data_p) +{ + mdi_control_t mdi; + + mdi = CSR_FIELD(MDI_CONTROL_PHYADDR, phyAddress) | + CSR_FIELD(MDI_CONTROL_REGADDR, regAddress) | + CSR_FIELD(MDI_CONTROL_OPCODE, MDI_CONTROL_OP_READ); + + OSWriteLE32(&CSR_p->mdiControl, mdi); + IODelay(20); + + bool ready = false; + for (int i = 0; i < SPIN_TIMEOUT; i++) { + if (OSReadLE32(&CSR_p->mdiControl) & MDI_CONTROL_READY) { + ready = true; + break; + } + IODelay(20); + } + if (ready == false) { + IOLog("%s: _mdiReadPHYRegisterSuccess timeout\n", getName()); + return false; + } + + *data_p = CSR_VALUE(MDI_CONTROL_DATA, OSReadLE32(&CSR_p->mdiControl)); + return true; +} + +//--------------------------------------------------------------------------- +// Function: _mdiWritePHY:Register:Data +// +// Purpose: +// Write the specified MDI register with the given data. + +bool Intel82557::_mdiWritePHY(UInt8 phyAddress, UInt8 regAddress, UInt16 data) +{ + mdi_control_t mdi; + + mdi = CSR_FIELD(MDI_CONTROL_PHYADDR, phyAddress) | + CSR_FIELD(MDI_CONTROL_REGADDR, regAddress) | + CSR_FIELD(MDI_CONTROL_OPCODE, MDI_CONTROL_OP_WRITE) | + CSR_FIELD(MDI_CONTROL_DATA, data); + + OSWriteLE32(&CSR_p->mdiControl, mdi); + IODelay(20); + + bool ready = false; + for (int i = 0; i < SPIN_TIMEOUT; i++) { + if (OSReadLE32(&CSR_p->mdiControl) & MDI_CONTROL_READY) { + ready = true; + break; + } + IODelay(20); + } + if (ready == false) { + IOLog("%s: _mdiWritePHYRegisterData timeout\n", getName()); + return false; + } + return true; +} + +//--------------------------------------------------------------------------- +// Function: nop +// +// Purpose: +// Issue a polled NOP command to the NIC. + +bool Intel82557::nop() +{ + cbHeader_t * nop_p = &overlay_p->nop; + + bzero(nop_p, sizeof(*nop_p)); + OSWriteLE16(&nop_p->command, CSR_FIELD(CB_CMD, CB_CMD_NOP) | CB_EL); + OSWriteLE32(&nop_p->link, C_NULL); + + return _polledCommand(nop_p, overlay_paddr); +} + +//--------------------------------------------------------------------------- +// Function: config +// +// Purpose: +// Issue a polled CONFIGURE command to the NIC. + +bool Intel82557::config() +{ + UInt8 * cb_p; + cb_configure_t * cfg_p = &overlay_p->configure; + + /* + * Fill the configure command block + */ + bzero(cfg_p, sizeof(*cfg_p)); + + OSWriteLE16(&cfg_p->header.command, + CSR_FIELD(CB_CMD, CB_CMD_CONFIGURE) | CB_EL); + OSWriteLE32(&cfg_p->header.link, C_NULL); + + cb_p = cfg_p->byte; + cb_p[0] = CSR_FIELD(CB_CB0_BYTE_COUNT, CB_CONFIG_BYTE_COUNT); + + cb_p[1] = CSR_FIELD(CB_CB1_TX_FIFO_LIMIT, CB_CB1_TX_FIFO_0) | + CSR_FIELD(CB_CB1_RX_FIFO_LIMIT, CB_CB1_RX_FIFO_64); + + cb_p[3] = CB_CB3_MWI_ENABLE; // enable PCI-MWI on 82558 devices + + cb_p[4] = 0; // disable PCI transfer limits + cb_p[5] = 0; + + cb_p[6] = CB_CB6_NON_DIRECT_DMA | CB_CB6_STD_TCB | CB_CB6_STD_STATS; + + cb_p[7] = CSR_FIELD(CB_CB7_UNDERRUN_RETRY, CB_CB7_UNDERRUN_RETRY_1) | + CB_CB7_DISC_SHORT_FRAMES; + + if ((eeprom->getContents()->controllerType != I82558_CONTROLLER_TYPE) && + (phyAddr != PHY_ADDRESS_I82503)) + cb_p[8] = CB_CB8_CSMA_EN; + + cb_p[10] = CSR_FIELD(CB_CB10_PREAMBLE, CB_CB10_PREAMBLE_7_BYTES) | + CB_CB10_NSAI; + + cb_p[12] = CSR_FIELD(CB_CB12_IFS, CB_CB12_IFS_96_BIT_TIMES); + + cb_p[13] = CSR_FIELD(CB_CB13_FC_TYPE_LSB, CB_CB13_FC_TYPE_LSB_DEF); + cb_p[14] = CSR_FIELD(CB_CB14_FC_TYPE_MSB, CB_CB14_FC_TYPE_MSB_DEF); + + cb_p[15] = ((cb_p[8] & CB_CB8_CSMA_EN) ? 0 : CB_CB15_CRS_CDT) | + (promiscuousEnabled ? CB_CB15_PROMISCUOUS : 0); + + cb_p[16] = CSR_FIELD(CB_CB16_FC_DELAY_LSB, CB_CB16_FC_DELAY_LSB_DEF); + cb_p[17] = CSR_FIELD(CB_CB17_FC_DELAY_MSB, CB_CB17_FC_DELAY_MSB_DEF); + + cb_p[18] = CB_CB18_PADDING | CB_CB18_STRIPPING; + +#if 0 // XXX - need to fix this + /* + * Force full duplex if there is a user override, or we are using Phy 0 + * and full duplex mode is enabled. The FDX# pin is wired to Phy 1, + * which means that the 82557 can't autodetect the setting correctly. + */ + if (forceFullDuplex || (phyAddr == PHY_ADDRESS_0 && fullDuplexMode)) + cb_p[19] = CB_CB19_FORCE_FDX; +#endif + + cb_p[19] = CB_CB19_AUTO_FDX; + if (flowControl) { + cb_p[19] |= ( CB_CB19_TX_FC | + CB_CB19_RX_FC_RESTOP | + CB_CB19_RX_FC_RESTART | + CB_CB19_REJECT_FC ); + } + + cb_p[20] = CSR_FIELD(CB_CB20_FC_ADDR_LSB, CB_CB20_FC_ADDR_LSB_DEF); + + IOSync(); + + return _polledCommand((cbHeader_t *) cfg_p, overlay_paddr); +} + +//--------------------------------------------------------------------------- +// Function: iaSetup +// +// Purpose: +// Issue a polled IndividualAddressSETUP command to the NIC. +// +bool Intel82557::iaSetup() +{ + cb_iasetup_t * iaSetup_p = &overlay_p->iasetup; + + /* + * Fill the IA-setup command block + */ + bzero(iaSetup_p, sizeof(*iaSetup_p)); + + OSWriteLE16(&iaSetup_p->header.command, CSR_FIELD(CB_CMD, CB_CMD_IASETUP) | + CB_EL); + OSWriteLE32(&iaSetup_p->header.link, C_NULL); + iaSetup_p->addr = myAddress; + + return _polledCommand((cbHeader_t *) iaSetup_p, overlay_paddr); +} + +//--------------------------------------------------------------------------- +// Function: mcSetup +// +// Purpose: +// Issue a polled MultiCastSETUP command to the NIC. If 'fromData' is +// true, then we ignore the addrs/count arguments and instead use the +// multicast address list property in our interface client object. + +bool Intel82557::mcSetup(IOEthernetAddress * addrs, + UInt count, + bool fromData = false) +{ + cb_mcsetup_t * mcSetup_p; + bool cmdResult; + IOReturn result; + IOPhysicalAddress mcSetup_paddr; + + if (fromData) { + // mcSetup() was not called by the setMulticastList() function. + // We should get the multicast list stored in the interface + // object's property table. + // + // mcSetup() is always executed by the default workloop thread, + // thus we don't have to worry about the address list being + // changed while we go through it. + // + addrs = 0; + count = 0; + + if (netif) { + OSData * mcData = OSDynamicCast(OSData, + netif->getProperty(kIOMulticastFilterData)); + if (mcData) { + addrs = (IOEthernetAddress *) mcData->getBytesNoCopy(); + count = mcData->getLength() / sizeof(IOEthernetAddress); + assert(addrs && count); + } + } + } + + mcSetup_p = (cb_mcsetup_t *) IOMallocAligned(PAGE_SIZE, PAGE_SIZE); + if (!mcSetup_p) { + IOLog("%s: mcSetup:IOMallocAligned return NULL\n", getName()); + return false; + } + + reserveDebuggerLock(); + + do { + cmdResult = false; + + OSWriteLE16(&mcSetup_p->header.status, 0); + OSWriteLE16(&mcSetup_p->header.command, + CSR_FIELD(CB_CMD, CB_CMD_MCSETUP) | CB_EL); + OSWriteLE32(&mcSetup_p->header.link, C_NULL); + + /* fill in the addresses (count may be zero) */ + for (UInt i = 0; i < count; i++) + mcSetup_p->addrs[i] = addrs[i]; + + /* Set the number of bytes in the MC list, if the count is zero, + * it is equivalent to disabling the multicast filtering mechanism. + */ + OSWriteLE16(&mcSetup_p->count, count * sizeof(IOEthernetAddress)); + + result = IOPhysicalFromVirtual((vm_address_t) mcSetup_p, + &mcSetup_paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid MC-setup command block address\n", getName()); + break; + } + + if (!_polledCommand((cbHeader_t *) mcSetup_p, mcSetup_paddr)) { + IOLog("%s: MC-setup command failed 0x%x\n", getName(), + OSReadLE16(&mcSetup_p->header.status)); + break; + } + + cmdResult = (OSReadLE16(&mcSetup_p->header.status) & CB_STATUS_OK) ? + true : false; + } while (0); + + releaseDebuggerLock(); + + IOFreeAligned(mcSetup_p, PAGE_SIZE); + + return cmdResult; +} + +//--------------------------------------------------------------------------- +// Function: _selfTest +// +// Purpose: +// Issue a PORT self test command to the NIC and verify the results. + +bool Intel82557::_selfTest() +{ + port_selftest_t * test_p = (port_selftest_t *) overlay_p; + UInt32 results; + + OSWriteLE32(&test_p->signature, 0); + OSWriteLE32(&test_p->results, ~0); + sendPortCommand(portSelfTest_e, overlay_paddr); + IOSleep(20); + if (OSReadLE32(&test_p->signature) == 0) { + IOLog("%s: Self test timed out\n", getName()); + return false; + } + + results = OSReadLE32(&test_p->results); + if (results) { /* report errors from self test */ + if (results & PORT_SELFTEST_ROM) + IOLog("%s: Self test reports invalid ROM contents\n", + getName()); + if (results & PORT_SELFTEST_REGISTER) + IOLog("%s: Self test reports internal register failure\n", + getName()); + if (results & PORT_SELFTEST_DIAGNOSE) + IOLog("%s: Self test reports serial subsystem failure\n", + getName()); + if (results & PORT_SELFTEST_GENERAL) + IOLog("%s: Self test failed\n", getName()); + return false; + } + return true; +} + +//--------------------------------------------------------------------------- +// Function: sendPortCommand +// +// Purpose: +// Issue an 82557 PORT command. +// +void Intel82557::sendPortCommand(port_command_t command, UInt arg) +{ + OSWriteLE32(&CSR_p->port, (arg & PORT_ADDRESS_MASK) | + CSR_FIELD(PORT_FUNCTION, command)); + return; +} + +//--------------------------------------------------------------------------- +// Function: enableAdapterInterrupts, disableAdapterInterrupts +// +// Purpose: +// Turn on/off interrupts at the adapter. + +void Intel82557::enableAdapterInterrupts() +{ + /* + * For 82558, mask (disable) the ER and FCP interrupts. + */ + UInt8 interruptByte; + interruptByte = SCB_INTERRUPT_ER | SCB_INTERRUPT_FCP; + OSWriteLE8(&CSR_p->interrupt, interruptByte); + interruptEnabled = true; + return; +} + +void Intel82557::disableAdapterInterrupts() +{ + UInt8 interruptByte; + interruptByte = SCB_INTERRUPT_M; + OSWriteLE8(&CSR_p->interrupt, interruptByte); + interruptEnabled = false; + return; +} + +//--------------------------------------------------------------------------- +// Function: _logCounters +// +// Purpose: +// If Verbose is defined as yes, log extra information about errors that +// have occurred. + +static inline void +_logCounters(errorCounters_t * errorCounters_p) +{ + if (errorCounters_p->tx_good_frames) + IOLog("tx_good_frames %ld\n", + OSReadLE32(&errorCounters_p->tx_good_frames)); + if (errorCounters_p->tx_maxcol_errors) + IOLog("tx_maxcol_errors %ld\n", + OSReadLE32(&errorCounters_p->tx_maxcol_errors)); + if (errorCounters_p->tx_late_collision_errors) + IOLog("tx_late_collision_errors %ld\n", + OSReadLE32(&errorCounters_p->tx_late_collision_errors)); + if (errorCounters_p->tx_underrun_errors) + IOLog("tx_underrun_errors %ld\n", + OSReadLE32(&errorCounters_p->tx_underrun_errors)); + if (errorCounters_p->tx_lost_carrier_sense_errors) + IOLog("tx_lost_carrier_sense_errors %ld\n", + OSReadLE32(&errorCounters_p->tx_lost_carrier_sense_errors)); + if (errorCounters_p->tx_deferred) + IOLog("tx_deferred %ld\n", OSReadLE32(&errorCounters_p->tx_deferred)); + if (errorCounters_p->tx_single_collisions) + IOLog("tx_single_collisions %ld\n", + OSReadLE32(&errorCounters_p->tx_single_collisions)); + if (errorCounters_p->tx_multiple_collisions) + IOLog("tx_multiple_collisions %ld\n", + OSReadLE32(&errorCounters_p->tx_multiple_collisions)); + if (errorCounters_p->tx_total_collisions) + IOLog("tx_total_collisions %ld\n", + OSReadLE32(&errorCounters_p->tx_total_collisions)); + if (errorCounters_p->rx_good_frames) + IOLog("rx_good_frames %ld\n", + OSReadLE32(&errorCounters_p->rx_good_frames)); + if (errorCounters_p->rx_crc_errors) + IOLog("rx_crc_errors %ld\n", + OSReadLE32(&errorCounters_p->rx_crc_errors)); + if (errorCounters_p->rx_alignment_errors) + IOLog("rx_alignment_errors %ld\n", + OSReadLE32(&errorCounters_p->rx_alignment_errors)); + if (errorCounters_p->rx_resource_errors) + IOLog("rx_resource_errors %ld\n", + OSReadLE32(&errorCounters_p->rx_resource_errors)); + if (errorCounters_p->rx_overrun_errors) + IOLog("rx_overrun_errors %ld\n", + OSReadLE32(&errorCounters_p->rx_overrun_errors)); + if (errorCounters_p->rx_collision_detect_errors) + IOLog("rx_collision_detect_errors %ld\n", + OSReadLE32(&errorCounters_p->rx_collision_detect_errors)); + if (errorCounters_p->rx_short_frame_errors) + IOLog("rx_short_frame_errors %ld\n", + OSReadLE32(&errorCounters_p->rx_short_frame_errors)); + return; +} + +//--------------------------------------------------------------------------- +// Function: _dumpStatistics +// +// Purpose: +// _dumpStatistics issues a new statistics dump command. Every few seconds, +// _updateStatistics is called from timeoutOccurred to check for updated +// statistics. If complete, update our counters, and issue a new dump +// command. + +bool Intel82557::_dumpStatistics() +{ + reserveDebuggerLock(); + + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: _dumpStatistics: _waitSCBCommandClear failed\n", getName()); + return false; + } + + OSWriteLE8(&CSR_p->command, + CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_DUMP_RESET_STAT)); + + prevCUCommand = SCB_CUC_DUMP_RESET_STAT; + + releaseDebuggerLock(); + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _updateStatistics +// +// Purpose: +// Gather statistics information from the adapter at regular intervals. + +void Intel82557::_updateStatistics() +{ + if (OSReadLE32(&errorCounters_p->_status) != DUMP_STATUS) { + if (verbose) + _logCounters(errorCounters_p); + + // Ethernet transmitter stats. + // + etherStats->dot3StatsEntry.singleCollisionFrames += + OSReadLE32(&errorCounters_p->tx_single_collisions); + + etherStats->dot3StatsEntry.multipleCollisionFrames += + OSReadLE32(&errorCounters_p->tx_multiple_collisions); + + etherStats->dot3StatsEntry.lateCollisions += + OSReadLE32(&errorCounters_p->tx_late_collision_errors); + + etherStats->dot3StatsEntry.excessiveCollisions += + OSReadLE32(&errorCounters_p->tx_maxcol_errors); + + etherStats->dot3StatsEntry.deferredTransmissions += + OSReadLE32(&errorCounters_p->tx_deferred); + + etherStats->dot3StatsEntry.carrierSenseErrors += + OSReadLE32(&errorCounters_p->tx_lost_carrier_sense_errors); + + etherStats->dot3TxExtraEntry.underruns += + OSReadLE32(&errorCounters_p->tx_underrun_errors); + + // Ethernet receiver stats. + // + etherStats->dot3StatsEntry.alignmentErrors += + OSReadLE32(&errorCounters_p->rx_alignment_errors); + + etherStats->dot3StatsEntry.fcsErrors += + OSReadLE32(&errorCounters_p->rx_crc_errors); + + etherStats->dot3RxExtraEntry.resourceErrors += + OSReadLE32(&errorCounters_p->rx_resource_errors); + + etherStats->dot3RxExtraEntry.overruns += + OSReadLE32(&errorCounters_p->rx_overrun_errors); + + etherStats->dot3RxExtraEntry.collisionErrors += + OSReadLE32(&errorCounters_p->rx_collision_detect_errors); + + etherStats->dot3RxExtraEntry.frameTooShorts += + OSReadLE32(&errorCounters_p->rx_short_frame_errors); + + // Generic network stats. For the error counters, we assume + // the Ethernet stats will never be cleared. Thus we derive the + // error counters by summing the appropriate Ethernet error fields. + // + netStats->outputErrors = + ( etherStats->dot3StatsEntry.lateCollisions + + etherStats->dot3StatsEntry.excessiveCollisions + + etherStats->dot3StatsEntry.carrierSenseErrors + + etherStats->dot3TxExtraEntry.underruns + + etherStats->dot3TxExtraEntry.resourceErrors); + + netStats->inputErrors = + ( etherStats->dot3StatsEntry.fcsErrors + + etherStats->dot3StatsEntry.alignmentErrors + + etherStats->dot3RxExtraEntry.resourceErrors + + etherStats->dot3RxExtraEntry.overruns + + etherStats->dot3RxExtraEntry.collisionErrors + + etherStats->dot3RxExtraEntry.frameTooShorts); + + netStats->collisions += + OSReadLE32(&errorCounters_p->tx_total_collisions); + + OSWriteLE32(&errorCounters_p->_status, DUMP_STATUS); + _dumpStatistics(); + } +} + +//--------------------------------------------------------------------------- +// Function: _allocateMemPage +// +// Purpose: +// Allocate a page of memory. + +bool Intel82557::_allocateMemPage(pageBlock_t * p) +{ + p->memSize = PAGE_SIZE; + p->memPtr = IOMallocAligned(p->memSize, PAGE_SIZE); + + if (!p->memPtr) + return false; + + bzero(p->memPtr, p->memSize); + p->memAllocPtr = p->memPtr; /* initialize for allocation routine */ + p->memAvail = p->memSize; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _freeMemPage +// +// Purpose: +// Deallocate a page of memory. +// +void Intel82557::_freeMemPage(pageBlock_t * p) +{ + IOFreeAligned(p->memPtr, p->memSize); +} + +//--------------------------------------------------------------------------- +// Function: hwInit +// +// Purpose: +// Reset/configure the chip, detect the PHY. + +bool Intel82557::hwInit() +{ + disableAdapterInterrupts(); + _resetChip(); + disableAdapterInterrupts(); + + /* disable early RX interrupt */ + OSWriteLE8(&CSR_p->earlyRxInterrupt, 0); + + /* load command unit base address */ + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: hwInit: CU _waitSCBCommandClear failed\n", getName()); + return false; + } + OSWriteLE32(&CSR_p->pointer, 0); + OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_LOAD_BASE)); + prevCUCommand = SCB_CUC_LOAD_BASE; + + /* load receive unit base address */ + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: hwInit: RU _waitSCBCommandClear failed\n", getName()); + return false; + } + OSWriteLE32(&CSR_p->pointer, 0); + OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_RUC, SCB_RUC_LOAD_BASE)); + + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: hwInit: before LOAD_DUMP_COUNTERS_ADDRESS:" + " _waitSCBCommandClear failed\n", getName()); + return false; + } + OSWriteLE32(&errorCounters_p->_status, DUMP_STATUS); + OSWriteLE32(&CSR_p->pointer, errorCounters_paddr); + OSWriteLE8(&CSR_p->command, + CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_LOAD_DUMP_ADDR)); + prevCUCommand = SCB_CUC_LOAD_DUMP_ADDR; + + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: hwInit: before intrACK _waitSCBCommandClear failed\n", + getName()); + return false; + } + + /* Setup flow-control threshold */ + OSWriteLE8(&CSR_p->flowControlThreshold, + CSR_FIELD(FC_THRESHOLD, FC_THRESHOLD_512)); + + _intrACK(CSR_p); /* ack any pending interrupts */ + + _phyProbe(); + + phyID = _phyGetID(); + VPRINT("%s: PHY model id is 0x%08lx\n", getName(), phyID); + phyID &= PHY_MODEL_MASK; + + if (!config()) + return false; + IOSleep(500); + + if (!iaSetup()) + return false; + + _intrACK(CSR_p); /* ack any pending interrupts */ + + return true; +} + +//--------------------------------------------------------------------------- +// Function: _memAlloc +// +// Purpose: +// Return the next aligned chunk of memory in our shared memory page. + +void * Intel82557::_memAllocFrom(pageBlock_t * p, UInt allocSize, UInt align) +{ + void * allocPtr; + UInt sizeReal; + + if (align == 0) + return 0; + + // Advance allocPtr to next aligned boundary. + allocPtr = + (void *)((UInt)((UInt) p->memAllocPtr + (align - 1)) & (~(align - 1))); + + // Actual size of required storage. We need to take the alignment padding + // into account. + sizeReal = allocSize + ((UInt) allocPtr - (UInt) p->memAllocPtr); + + if (sizeReal > p->memAvail) + return 0; + + p->memAllocPtr = (void *)((UInt) p->memAllocPtr + sizeReal); + p->memAvail = p->memSize - ((UInt) p->memAllocPtr - (UInt) p->memPtr); + return allocPtr; +} + +//--------------------------------------------------------------------------- +// Function: coldInit +// +// Purpose: +// One-time initialization code. This is called by start(), before we +// attach any client objects. + +bool Intel82557::coldInit() +{ + IOReturn result; + IOPhysicalAddress paddr; + + disableAdapterInterrupts(); + + /* allocate and initialize shared memory pointers */ + if (!_allocateMemPage(&shared)) { + IOLog("%s: Can't allocate shared memory page\n", getName()); + return false; + } + if (!_allocateMemPage(&txRing)) { + IOLog("%s: Can't allocate memory page for TX ring\n", getName()); + return false; + } + if (!_allocateMemPage(&rxRing)) { + IOLog("%s: Can't allocate memory page for RX ring\n", getName()); + return false; + } + + /* allocate memory for shared data structures + * self test needs to be + * 16 byte aligned + */ + overlay_p = (overlay_t *) _memAllocFrom(&shared, sizeof(overlay_t), + PARAGRAPH_ALIGNMENT); + if (!overlay_p) + return false; + result = IOPhysicalFromVirtual((vm_address_t) overlay_p, &overlay_paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid command block address\n", getName()); + return false; + } + + tcbList_p = (tcb_t *) _memAllocFrom(&txRing, + sizeof(tcb_t) * NUM_TRANSMIT_FRAMES, + CACHE_ALIGNMENT); + if (!tcbList_p) + return false; + + KDB_tcb_p = (tcb_t *) _memAllocFrom(&shared, + sizeof(tcb_t), + CACHE_ALIGNMENT); + if (!KDB_tcb_p) + return false; + result = IOPhysicalFromVirtual((vm_address_t) KDB_tcb_p, + &KDB_tcb_p->_paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid TCB address\n", getName()); + return false; + } + + result = IOPhysicalFromVirtual((vm_address_t) &KDB_tcb_p->_tbds, &paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid TCB->_TBD address\n", getName()); + return false; + } + OSWriteLE32(&KDB_tcb_p->tbdAddr, paddr); + + KDB_buf_p = _memAllocFrom(&shared, ETHERMAXPACKET, DWORD_ALIGNMENT); + if (!KDB_buf_p) + return false; + result = IOPhysicalFromVirtual((vm_address_t) KDB_buf_p, &KDB_buf_paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid address\n", getName()); + return false; + } + + errorCounters_p = (errorCounters_t *) _memAllocFrom(&shared, + sizeof(errorCounters_t), + DWORD_ALIGNMENT); + if (!errorCounters_p) + return false; + result = IOPhysicalFromVirtual((vm_address_t) errorCounters_p, + &errorCounters_paddr); + if (result != kIOReturnSuccess) { + IOLog("%s: Invalid errorCounters address\n", getName()); + return false; + } + + rfdList_p = (rfd_t *) _memAllocFrom(&rxRing, + sizeof(rfd_t) * NUM_RECEIVE_FRAMES, + CACHE_ALIGNMENT); + if (!rfdList_p) + return false; + + if (!_selfTest()) + return false; + + myAddress = eeprom->getContents()->addr; + + return true; +} + +//--------------------------------------------------------------------------- +// Function: receiveInterruptOccurred +// +// Purpose: +// Hand up rceived frames. + +bool Intel82557::receiveInterruptOccurred() +{ + bool packetsQueued = false; + + while (OSReadLE16(&headRfd->status) & RFD_STATUS_C) { + rbd_count_t rbd_count = OSReadLE32(&headRfd->_rbd.count); + + // rxCount does NOT include the Ethernet CRC (FCS). + // + UInt rxCount = CSR_VALUE(RBD_COUNT, rbd_count); + +#if 0 + // When the receive unit runs out of resources, it will + // skip over RFD/RBD, making them as complete, but the RBD will + // have zero bytes and the EOF bit will not be set. + // We just skip over those and allow them to be recycled. + // + // In those cases, the RFD->status word will be 0x8220. + + /* should have exactly 1 rbd per rfd */ + if (!(rbd_count & RBD_COUNT_EOF)) { + IOLog("%s: more than 1 rbd, frame size %d\n", getName(), rxCount); + + IOLog("%s: RFD status: %04x\n", getName(), + OSReadLE16(&headRfd->status)); + + issueReset(); + return; + } +#endif + + if ((!(OSReadLE16(&headRfd->status) & RFD_STATUS_OK)) || + (rxCount < (ETHERMINPACKET - ETHERCRC)) || + !enabledForNetif) { + ; /* bad or unwanted packet */ + } + else { + struct mbuf * m = headRfd->_rbd._mbuf; + struct mbuf * m_in = 0; // packet to pass up to inputPacket() + bool replaced; + + packetsReceived = true; + + m_in = replaceOrCopyPacket(&m, rxCount, &replaced); + if (!m_in) { + etherStats->dot3RxExtraEntry.resourceErrors++; + goto RX_INTR_ABORT; + } + + if (replaced && (updateRFDFromMbuf(headRfd, m) == false)) { + freePacket(m); // free the new replacement mbuf. + m_in = 0; // pass up nothing. + etherStats->dot3RxExtraEntry.resourceErrors++; + IOLog("%s: updateRFDFromMbuf() error\n", getName()); + goto RX_INTR_ABORT; + } + + netif->inputPacket(m_in, rxCount, true); + packetsQueued = true; + netStats->inputPackets++; + } + +RX_INTR_ABORT: + /* clear fields in rfd */ + OSWriteLE16(&headRfd->status, 0); + OSWriteLE16(&headRfd->command, (RFD_COMMAND_SF | RFD_COMMAND_EL)); + OSWriteLE32(&headRfd->rbdAddr, C_NULL); + OSWriteLE32(&headRfd->misc, 0); + + /* clear fields in rbd */ + OSWriteLE32(&headRfd->_rbd.count, 0); + OSWriteLE32(&headRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE) | + RBD_SIZE_EL); + + /* adjust tail markers */ + OSWriteLE32(&tailRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE)); + OSWriteLE16(&tailRfd->command, RFD_COMMAND_SF); + + tailRfd = headRfd; // new tail + headRfd = headRfd->_next; // new head + } /* while */ + + return packetsQueued; +} + +//--------------------------------------------------------------------------- +// Function: transmitInterruptOccurred +// +// Purpose: +// Free up packets associated with any completed TCB's. + +void Intel82557::transmitInterruptOccurred() +{ + tcbQ_t * tcbQ_p = &tcbQ; + tcb_t * head; + + head = tcbQ_p->activeHead_p; + while (tcbQ_p->numFree < tcbQ_p->numTcbs && + (OSReadLE16(&head->status) & TCB_STATUS_C)) + { + OSWriteLE16(&head->status, 0); + if (head->_mbuf) { + freePacket(head->_mbuf); + head->_mbuf = 0; + } + head = tcbQ_p->activeHead_p = head->_next; + tcbQ_p->numFree++; + } + + return; +} + +//--------------------------------------------------------------------------- +// Function: interruptOccurred +// +// Purpose: +// Field an interrupt. + +void Intel82557::interruptOccurred(IOInterruptEventSource * src, int /*count*/) +{ + scb_status_t status; + bool flushInputQ = false; + bool doService = false; + + reserveDebuggerLock(); + + if (interruptEnabled == false) { + _intrACK(CSR_p); + releaseDebuggerLock(); + IOLog("%s: unexpected interrupt\n", getName()); + return; + } + + /* + * Loop until the interrupt line becomes deasserted. + */ + while (1) { + if ((status = _intrACK(CSR_p)) == 0) + break; + + /* + * RX interrupt. + */ + if (status & (SCB_STATUS_FR | SCB_STATUS_RNR)) { + + flushInputQ = receiveInterruptOccurred() || flushInputQ; + + etherStats->dot3RxExtraEntry.interrupts++; + + if (status & SCB_STATUS_RNR) { + etherStats->dot3RxExtraEntry.resets++; + + _abortReceive(); + _resetRfdList(); + + if (!_startReceive()) { + IOLog("%s: Unable to restart receiver\n", getName()); + // issueReset(); /* shouldn't need to do this. */ + } + } + } + + /* + * TX interrupt. + */ + if (status & (SCB_STATUS_CX | SCB_STATUS_CNA)) { + transmitInterruptOccurred(); + etherStats->dot3TxExtraEntry.interrupts++; + doService = true; + } + } + + releaseDebuggerLock(); + + if (enabledForNetif) { + // Flush all packets received and pass them to the network stack. + // + if (flushInputQ) + netif->flushInputQueue(); + + // Call service() without holding the debugger lock to prevent a + // deadlock when service() calls our outputPacket() function. + // + if (doService) + transmitQueue->service(); + } +} + +//--------------------------------------------------------------------------- +// Function: updateTCBForMbuf +// +// Update the TxCB pointed by tcb_p to point to the mbuf chain 'm'. +// Returns the mbuf encoded onto the TxCB. + +struct mbuf * +Intel82557::updateTCBForMbuf(tcb_t * tcb_p, struct mbuf * m) +{ + // Set the invariant TCB fields. + // + OSWriteLE16(&tcb_p->status, 0); + + if (++txCount == TRANSMIT_INT_DELAY) { + OSWriteLE16(&tcb_p->command, CSR_FIELD(TCB_COMMAND, CB_CMD_TRANSMIT) | + TCB_COMMAND_S | + TCB_COMMAND_SF | + TCB_COMMAND_I); + txCount = 0; + } + else + OSWriteLE16(&tcb_p->command, CSR_FIELD(TCB_COMMAND, CB_CMD_TRANSMIT) | + TCB_COMMAND_S | + TCB_COMMAND_SF); + + OSWriteLE8(&tcb_p->threshold, TCB_TX_THRESHOLD); + OSWriteLE16(&tcb_p->count, 0); // all data are in the TBD's, none in TxCB + + // Since the format of a TBD closely matches the structure of an + // 'struct IOPhysicalSegment', we shall have the cursor update the TBD list + // directly. + // + UInt segments = txMbufCursor->getPhysicalSegmentsWithCoalesce(m, + (struct IOPhysicalSegment *) &tcb_p->_tbds[0], + TBDS_PER_TCB); + + if (!segments) { + IOLog("%s: getPhysicalSegments error, pkt len = %d\n", + getName(), m->m_pkthdr.len); + return 0; + } + + // Update the TBD array size count. + // + OSWriteLE8(&tcb_p->number, segments); + + return m; +} + +//--------------------------------------------------------------------------- +// Function: outputPacket +// +// Purpose: +// Transmit the packet handed by our IOOutputQueue. +// TCBs have the suspend bit set, so that the CU goes into the suspend +// state when done. We use the CU_RESUME optimization that allows us to +// issue CU_RESUMES without waiting for SCB command to clear. +// +UInt32 Intel82557::outputPacket(struct mbuf * m, void * param) +{ + tcb_t * tcb_p; + + if (!enabledForNetif) { // drop the packet. + freePacket(m); + return kIOReturnOutputDropped; + } + + reserveDebuggerLock(); + + if (tcbQ.numFree == 0) { // retry when more space is available. + releaseDebuggerLock(); + return kIOReturnOutputStall; + } + + packetsTransmitted = true; + netStats->outputPackets++; + + tcb_p = tcbQ.freeHead_p; + + tcb_p->_mbuf = updateTCBForMbuf(tcb_p, m); + if (tcb_p->_mbuf == 0) { + etherStats->dot3TxExtraEntry.resourceErrors++; + goto fail; + } + + /* update the queue */ + tcbQ.numFree--; + tcbQ.freeHead_p = tcbQ.freeHead_p->_next; + + /* The TCB is already setup and the suspend bit set. Now clear the + * suspend bit of the previous TCB. + */ + if (tcbQ.activeTail_p != tcb_p) + OSClearLE16(&tcbQ.activeTail_p->command, TCB_COMMAND_S); + tcbQ.activeTail_p = tcb_p; + + /* + * CUC_RESUME is optimized such that it is unnecessary to wait + * for the CU to clear the SCB command word if the previous command + * was a resume and the CU state is not idle. + */ + if (CSR_VALUE(SCB_STATUS_CUS, OSReadLE16(&CSR_p->status)) == SCB_CUS_IDLE) + { + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: outputPacket: _waitSCBCommandClear error\n", getName()); + etherStats->dot3TxExtraEntry.timeouts++; + goto fail; + } + OSWriteLE32(&CSR_p->pointer, tcb_p->_paddr); + OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC, SCB_CUC_START)); + prevCUCommand = SCB_CUC_START; + } + else { + if (prevCUCommand != SCB_CUC_RESUME) { + if (!_waitSCBCommandClear(CSR_p)) { + IOLog("%s: outputPacket: _waitSCBCommandClear error\n", + getName()); + etherStats->dot3TxExtraEntry.timeouts++; + goto fail; + } + } + OSWriteLE8(&CSR_p->command, CSR_FIELD(SCB_COMMAND_CUC,SCB_CUC_RESUME)); + prevCUCommand = SCB_CUC_RESUME; + } + releaseDebuggerLock(); + return kIOReturnOutputSuccess; + +fail: + freePacket(m); + tcb_p->_mbuf = 0; + releaseDebuggerLock(); + return kIOReturnOutputDropped; +} + +//--------------------------------------------------------------------------- +// Function: _receivePacket +// +// Purpose: +// Part of kerneldebugger protocol. +// Returns true if a packet was received successfully. +// +bool Intel82557::_receivePacket(void * pkt, UInt * len, UInt timeout) +{ + bool processPacket = true; + bool ret = false; + scb_status_t status; + + timeout *= 1000; + + while ((OSReadLE16(&headRfd->status) & RFD_STATUS_C) == 0) { + if ((int) timeout <= 0) { + processPacket = false; + break; + } + IODelay(50); + timeout -= 50; + } + + if (processPacket) { + if ((OSReadLE16(&headRfd->status) & RFD_STATUS_OK) && + (OSReadLE32(&headRfd->_rbd.count) & RBD_COUNT_EOF)) + { + // Pass up good frames. + // + *len = CSR_VALUE(RBD_COUNT, OSReadLE32(&headRfd->_rbd.count)); + *len = MIN(*len, ETHERMAXPACKET); + bcopy(mtod(headRfd->_rbd._mbuf, void *), pkt, *len); + ret = true; + } + + /* the head becomes the new tail */ + /* clear fields in rfd */ + OSWriteLE16(&headRfd->status, 0); + OSWriteLE16(&headRfd->command, (RFD_COMMAND_SF | RFD_COMMAND_EL)); + OSWriteLE32(&headRfd->rbdAddr, C_NULL); + OSWriteLE32(&headRfd->misc, 0); + + /* clear fields in rbd */ + OSWriteLE32(&headRfd->_rbd.count, 0); + OSWriteLE32(&headRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE) | + RBD_SIZE_EL); + + /* adjust tail markers */ + OSWriteLE32(&tailRfd->_rbd.size, CSR_FIELD(RBD_SIZE, MAX_BUF_SIZE)); + OSWriteLE16(&tailRfd->command, RFD_COMMAND_SF); + + tailRfd = headRfd; // new tail + headRfd = headRfd->_next; // new head + } + + status = OSReadLE16(&CSR_p->status) & SCB_STATUS_RNR; + if (status) { + OSWriteLE16(&CSR_p->status, status); // ack RNR interrupt + + IOLog("Intel82557::%s restarting receiver\n", __FUNCTION__); + + IOLog("%s::%s RUS:0x%x Index:%d\n", getName(), __FUNCTION__, + CSR_VALUE(SCB_STATUS_RUS, OSReadLE16(&CSR_p->status)), + tailRfd - rfdList_p); + + _abortReceive(); + +#if 0 // Display RFD/RBD fields + for (int i = 0; i < NUM_RECEIVE_FRAMES; i++) { + IOLog(" %02d: %04x %04x - %08x %08x\n", i, + OSReadLE16(&rfdList_p[i].command), + OSReadLE16(&rfdList_p[i].status), + OSReadLE32(&rfdList_p[i]._rbd.size), + OSReadLE32(&rfdList_p[i].misc)); + } +#endif + + _resetRfdList(); + _startReceive(); + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Function: _sendPacket +// +// Purpose: +// Part of kerneldebugger protocol. +// Returns true if the packet was sent successfully. + +bool Intel82557::_sendPacket(void * pkt, UInt len) +{ + tbd_t * tbd_p; + + // Set up the TCB and issue the command + // + OSWriteLE16(&KDB_tcb_p->status, 0); + OSWriteLE32(&KDB_tcb_p->link, C_NULL); + OSWriteLE8(&KDB_tcb_p->threshold, TCB_TX_THRESHOLD); + OSWriteLE16(&KDB_tcb_p->command, CSR_FIELD(TCB_COMMAND, CB_CMD_TRANSMIT) | + TCB_COMMAND_EL | + TCB_COMMAND_SF ); + OSWriteLE16(&KDB_tcb_p->count, 0); // all data are in the TBD's. + OSWriteLE8(&KDB_tcb_p->number, 1); // 1 TBD only. + + // Copy the debugger packet to the pre-allocated buffer area. + // + len = MIN(len, ETHERMAXPACKET); + len = MAX(len, ETHERMINPACKET); + bcopy(pkt, KDB_buf_p, len); + + // Update the TBD. + // + tbd_p = &KDB_tcb_p->_tbds[0]; + OSWriteLE32(&tbd_p->addr, KDB_buf_paddr); + OSWriteLE32(&tbd_p->size, CSR_FIELD(TBD_SIZE, len)); + + // Start up the command unit to send the packet. + // + return _polledCommand((cbHeader_t *) KDB_tcb_p, KDB_tcb_p->_paddr); +} diff --git a/iokit/Drivers/network/drvIntel82557/i82557eeprom.cpp b/iokit/Drivers/network/drvIntel82557/i82557eeprom.cpp new file mode 100644 index 000000000..a0a479cee --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557eeprom.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. + * + * i82557eeprom.m + * - Intel 82557 eeprom access object + * + * HISTORY + * + * 6-Mar-96 Dieter Siegmund (dieter) at NeXT + * Created. + */ + +#include +#include "i82557eeprom.h" + +#define super OSObject +OSDefineMetaClassAndStructors( i82557eeprom, OSObject ) + +static __inline__ void +_logAddr(unsigned char * addr) +{ + int i; + for (i = 0; i < NUM_EN_ADDR_BYTES; i++) { + IOLog("%s%02x", i > 0 ? ":" : "", addr[i]); + } + return; +} + +void i82557eeprom::dumpContents() +{ + EEPROM_t * eeprom_p = &image.fields; + + IOLog("The EEPROM contains the following information:\n"); + + IOLog("ethernet address: "); + _logAddr((unsigned char *) &eeprom_p->addr); + IOLog("\n"); + + if (eeprom_p->compatibility_0 & EEPROM_C0_MC_10) + IOLog("compatibility: MCSETUP workaround required for 10 Mbits\n"); + if (eeprom_p->compatibility_0 & EEPROM_C0_MC_100) + IOLog("compatibility: MCSETUP workaround required for 100 Mbits\n"); + + IOLog("connectors: %s %s %s %s\n", + eeprom_p->connectors & EEPROM_CON_RJ45 ? "RJ-45" : "", + eeprom_p->connectors & EEPROM_CON_BNC ? "BNC" : "", + eeprom_p->connectors & EEPROM_CON_AUI ? "AUI" : "", + eeprom_p->connectors & EEPROM_CON_MII ? "MII" : ""); + + IOLog("controller type: %d\n", eeprom_p->controllerType); + + for (int i = 0; i < NUM_PHYS; i++) { + char * s = (i == PRIMARY_PHY) ? "primary" : "secondary"; + UInt16 phy = OSReadLE16(&eeprom_p->phys[i]); + + IOLog("%s PHY: %s\n", s, + PHYDeviceNames(CSR_VALUE(EEPROM_PHY_DEVICE, phy))); + if (CSR_VALUE(EEPROM_PHY_DEVICE, phy) != PHYDevice_None_e) { + if (phy & EEPROM_PHY_VSCR) + IOLog("%s PHY: vendor specific code required\n", s); + if (phy & EEPROM_PHY_10) + IOLog("%s PHY: 10 Mbits only, requires 503 interface\n", s); + IOLog("%s PHY address: 0x%x\n", s, + CSR_VALUE(EEPROM_PHY_ADDRESS, phy)); + } + } + + IOLog("PWA Number: %d %d %d-0%d\n", eeprom_p->PWANumber[1], + eeprom_p->PWANumber[0], eeprom_p->PWANumber[3], + eeprom_p->PWANumber[2]); + + IOLog("Checksum: 0x%x\n", OSReadLE16(&eeprom_p->checkSum)); +#if 0 + if (eeprom_p->checkSum != image.words[NUM_EEPROM_WORDS - 1]) + IOLog("the checksum in the struct doesn't match that in the array\n"); +#endif + return; +} + +i82557eeprom * i82557eeprom::withAddress(volatile eeprom_control_t * p) +{ + i82557eeprom * eeprom = new i82557eeprom; + + if (eeprom && !eeprom->initWithAddress(p)) { + eeprom->release(); + return 0; + } + return eeprom; +} + +bool i82557eeprom::initWithAddress(volatile eeprom_control_t * p) +{ + int i; + UInt16 sum; + + if (!super::init()) + return false; + + ee_p = p; + + /* + * Find out the number of bits in the address by issuing a read to address + * 0 ie. keep feeding eeprom address bits with value 0, until the eeprom + * says that the address is complete. It tells us by setting EEDO to 0 + * after a write cycle. + */ + EEPROMEnable(ee_p); + EEPROMWriteBit(ee_p, 1); /* read */ + EEPROMWriteBit(ee_p, 1); + EEPROMWriteBit(ee_p, 0); + nbits = 1; + + do { + EEPROMWriteBit(ee_p, 0); + if ((OSReadLE16(ee_p) & EEPROM_CONTROL_EEDO) == 0) + break; + nbits++; + } while (nbits <= 32); + + // IOLog("nbits: %d\n", nbits); + + EEPROMDisable(ee_p); + for (sum = 0, i = 0; i < NUM_EEPROM_WORDS; i++) { + UInt16 w = readWord(i); + sum += w; + OSWriteLE16(&image.words[i], w); + } + if (sum != EEPROM_CHECKSUM_VALUE) { + IOLog("i82557eeprom: checksum %x incorrect\n", sum); + return false; + } + + return true; +} + +/* READ command bit sequence: 1 1 0 a5a4a3a2a1a0 */ +UInt16 i82557eeprom::readWord(int offset) +{ + int i; + UInt16 value; + + EEPROMEnable(ee_p); + EEPROMWriteBit(ee_p, 1); + EEPROMWriteBit(ee_p, 1); + EEPROMWriteBit(ee_p, 0); + for (i = (nbits - 1); i >= 0; i--) { + EEPROMWriteBit(ee_p, (offset >> i) & 1); + } + value = 0; + for (i = BITS_IN_SHORT - 1; i >= 0; i--) { + value |= (EEPROMReadBit(ee_p) << i); + } + EEPROMDisable(ee_p); + return (value); +} + +EEPROM_t * i82557eeprom::getContents() +{ + return (&image.fields); +} + diff --git a/iokit/Drivers/network/drvIntel82557/i82557eeprom.h b/iokit/Drivers/network/drvIntel82557/i82557eeprom.h new file mode 100644 index 000000000..a1e7c2539 --- /dev/null +++ b/iokit/Drivers/network/drvIntel82557/i82557eeprom.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1996 NeXT Software, Inc. + * + * i82557eeprom.h + * - Intel 82557 eeprom access object + * + * HISTORY + * + * 6-Mar-96 Dieter Siegmund (dieter) at NeXT + * Created. + */ + +#ifndef _I82557EEPROM_H +#define _I82557EEPROM_H + +#include "i82557Inline.h" +#include "i82557HW.h" + +#define BITS_IN_SHORT 16 +#define MIN_SK_HIGH 20 +#define MIN_SK_LOW 20 +#define NUM_EEPROM_WORDS 0x40 +#define EEPROM_CHECKSUM_VALUE 0xbaba + +//------------------------------------------------------------------------- +// Compatibility Byte 0 +// 8-bit, word 0x3, low byte +//------------------------------------------------------------------------- +#define EEPROM_C0_MC_100 BIT(1) +#define EEPROM_C0_MC_10 BIT(0) + +//------------------------------------------------------------------------- +// Compatibility Byte 1 +// 8-bit, word 0x3, high byte +//------------------------------------------------------------------------- +#define EEPROM_C1_OEM BIT(0) + +//------------------------------------------------------------------------- +// Controller Type +// 8-bit, word 0x5, high byte +//------------------------------------------------------------------------- +#define EEPROM_TYPE_82558 2 +#define EEPROM_TYPE_82557 1 + +//------------------------------------------------------------------------- +// Connectors +// 8-bit, word 0x5, low byte +//------------------------------------------------------------------------- +#define EEPROM_CON_MII BIT(3) +#define EEPROM_CON_AUI BIT(2) +#define EEPROM_CON_BNC BIT(1) +#define EEPROM_CON_RJ45 BIT(0) + +//------------------------------------------------------------------------- +// PHY Device Record. +// 16-bit, Primary word 0x6, Secondary word 0x7. +//------------------------------------------------------------------------- +#define EEPROM_PHY_10 BIT(15) +#define EEPROM_PHY_VSCR BIT(14) +#define EEPROM_PHY_DEVICE_SHIFT 8 +#define EEPROM_PHY_DEVICE_MASK CSR_MASK(EEPROM_PHY_DEVICE, 0x3f) +#define EEPROM_PHY_ADDRESS_SHIFT 0 +#define EEPROM_PHY_ADDRESS_MASK CSR_MASK(EEPROM_PHY_ADDRESS, 0xff) + +typedef enum { + PHYDevice_None_e = 0, + PHYDevice_Intel82553_A_B_step_e, + PHYDevice_Intel82553_C_step_e, + PHYDevice_Intel82503_e, + PHYDevice_NationalDP83840_TX_C_step_e, + PHYDevice_Seeq80C240_T4_e, + PHYDevice_Seeq80C24_e, + PHYDevice_Intel82555_e, + PHYDevice_MicroLinear_e, + PHYDevice_Level_One_e, + PHYDevice_NationalDP82840A_e, + PHYDevice_ICS1890_e, + PHYDevice_Last_e +} PHYDevice_t; + +static inline char * +PHYDeviceNames(unsigned int i) +{ + char * devices[] = { + "No PHY device installed", + "Intel 82553 (PHY 100) A or B step", + "Intel 82553 (PHY 100) C step", + "Intel 82503 10Mps", + "National DP83840 C step 100Base-TX", + "Seeq 80C240 100Base-T4", + "Seeq 80C24 10 Mps", + "Intel 82555 10/100Base-TX PHY", + "MicroLinear 10Mbps", + "Level One 10Mbps", + "National DP83840A", + "ICS 1890", + "PHY device unknown" + }; + if (i > PHYDevice_Last_e) + i = PHYDevice_Last_e; + return (devices[i]); +}; + +#define NUM_PHYS 2 +#define PRIMARY_PHY 0 +#define SECONDARY_PHY 1 +#define NPWA_BYTES 4 + +typedef struct { + IOEthernetAddress addr; + UInt8 compatibility_0; + UInt8 compatibility_1; + UInt16 zero0; + UInt8 connectors; + UInt8 controllerType; +#define I82557_CONTROLLER_TYPE 1 +#define I82558_CONTROLLER_TYPE 2 + UInt16 phys[NUM_PHYS]; + UInt8 PWANumber[NPWA_BYTES]; + UInt16 zero1[38]; + UInt16 rplConfig[2]; + UInt16 zero5[13]; + UInt16 checkSum; +} EEPROM_t; + +static inline +void EEPROMWriteBit(volatile eeprom_control_t * ee_p, bool bit) +{ + if (bit) + OSSetLE16(ee_p, EEPROM_CONTROL_EEDI); + else + OSClearLE16(ee_p, EEPROM_CONTROL_EEDI); + + OSSetLE16(ee_p, EEPROM_CONTROL_EESK); + IODelay(MIN_SK_HIGH); + OSClearLE16(ee_p, EEPROM_CONTROL_EESK); + IODelay(MIN_SK_LOW); +} + +static inline +bool EEPROMReadBit(volatile eeprom_control_t * ee_p) +{ + bool bit; + + OSSetLE16(ee_p, EEPROM_CONTROL_EESK); + IODelay(MIN_SK_HIGH); + bit = (OSReadLE16(ee_p) & EEPROM_CONTROL_EEDO) ? 1 : 0; + OSClearLE16(ee_p, EEPROM_CONTROL_EESK); + IODelay(MIN_SK_LOW); + return (bit); +} + +static inline +void EEPROMEnable(volatile eeprom_control_t * ee_p) +{ + OSSetLE16(ee_p, EEPROM_CONTROL_EECS); + return; +} + +static inline +void EEPROMDisable(volatile eeprom_control_t * ee_p) +{ + OSClearLE16(ee_p, EEPROM_CONTROL_EECS); + return; +} + +class i82557eeprom : public OSObject +{ + OSDeclareDefaultStructors(i82557eeprom) + +public: + volatile eeprom_control_t * ee_p; + int nbits; + union { + UInt16 words[NUM_EEPROM_WORDS]; + EEPROM_t fields; + } image; + + static i82557eeprom * withAddress(volatile eeprom_control_t * p); + + bool initWithAddress(volatile eeprom_control_t * p); + + UInt16 readWord(int offset); + + EEPROM_t * getContents(); + + void dumpContents(); +}; + +#endif /* !_I82557EEPROM_H */ diff --git a/iokit/Drivers/network/drvMaceEnet/MaceEnet.cpp b/iokit/Drivers/network/drvMaceEnet/MaceEnet.cpp new file mode 100644 index 000000000..778daebe2 --- /dev/null +++ b/iokit/Drivers/network/drvMaceEnet/MaceEnet.cpp @@ -0,0 +1,918 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1996 NeXT Software, Inc. + * + * Hardware independent (relatively) code for the Mace Ethernet Controller + * + * HISTORY + * + * dd-mmm-yy + * Created. + * + */ + +#include +#include +#include "MaceEnetPrivate.h" + +//------------------------------------------------------------------------ + +#define super IOEthernetController + +OSDefineMetaClassAndStructors( MaceEnet, IOEthernetController ) + +//------------------------------------------------------------------------ + +#define PROVIDER_DEV 0 +#define PROVIDER_DMA_TX 1 +#define PROVIDER_DMA_RX 2 + +/* + * Public Instance Methods + */ + +bool MaceEnet::init(OSDictionary * properties) +{ + if (!super::init(properties)) + return false; + + isPromiscuous = false; + multicastEnabled = false; + ready = false; + debugClient = false; + debugTxPoll = false; + netifClient = false; + + return true; +} + +MaceEnet * MaceEnet::probe(IOService * /*provider*/, + unsigned int * /*score*/, + unsigned int * /*specificity*/) +{ +#ifdef OLD_CODE + extern int kdp_flag; + + /* + * If bootargs: kdp bit 0 using in-kernel mace driver for early debugging, + * Don't probe this driver. + */ + if( kdp_flag & 1) + { + return 0; + } +#endif + + return this; +} + +bool MaceEnet::start(IOService * provider) +{ + AppleMacIODevice *nub = OSDynamicCast(AppleMacIODevice, provider); + + if (!nub || !super::start(provider)) + return false; + + transmitQueue = OSDynamicCast(IOGatedOutputQueue, getOutputQueue()); + if (!transmitQueue) + { + IOLog("Mace: output queue initialization failed\n"); + return false; + } + transmitQueue->retain(); + + // Allocate debug queue. This stores packets retired from the TX ring + // by the polling routine. We cannot call freePacket() or m_free() within + // the debugger context. + // + // The capacity of the queue is set at maximum to prevent the queue from + // calling m_free() due to over-capacity. But we don't expect the size + // of the queue to grow too large. + // + debugQueue = IOPacketQueue::withCapacity((UInt) -1); + if (!debugQueue) + return false; + + // Allocate a IOMbufBigMemoryCursor instance. Currently, the maximum + // number of segments is set to 2. The maximum length for each segment + // is set to the maximum ethernet frame size (plus padding). + + mbufCursor = IOMbufBigMemoryCursor::withSpecification(NETWORK_BUFSIZE, 2); + if (!mbufCursor) + { + IOLog("Mace: IOMbufMemoryCursor allocation failed\n"); + return false; + } + + // + // Our provider is the nub representing the MaceEnet hardware + // controller. We will query it for our resource information. + // + + for (int i = 0; i < MEMORY_MAP_COUNT; i++) { + IOMemoryMap * map; + + map = provider->mapDeviceMemoryWithIndex(i); + if (!map) + return false; + +#ifdef DEBUG_XXX + IOLog("map %d: Phys:%08x Virt:%08x len:%d\n", + i, + (UInt) map->getPhysicalAddress(), + (UInt) map->getVirtualAddress(), + (UInt) map->getLength()); +#endif + + switch (i) { + case MEMORY_MAP_ENET_INDEX: + ioBaseEnet = (IOPPCAddress) map->getVirtualAddress(); + ioBaseEnetROM = (IOPPCAddress) ((map->getPhysicalAddress() & + ~0xffff) | kControllerROMOffset); + break; + + case MEMORY_MAP_TXDMA_INDEX: + ioBaseEnetTxDMA = (IODBDMAChannelRegisters *) + map->getVirtualAddress(); + break; + + case MEMORY_MAP_RXDMA_INDEX: + ioBaseEnetRxDMA = (IODBDMAChannelRegisters *) + map->getVirtualAddress(); + break; + } + + maps[i] = map; + } + + // Manually create an IODeviceMemory for the ROM memory + // range. + // + IODeviceMemory * romMemory = IODeviceMemory::withRange( + (UInt) ioBaseEnetROM, 0x1000); + if (!romMemory) { + IOLog("Mace: can't create ROM memory object\n"); + return false; + } + + romMap = romMemory->map(); + romMemory->release(); + + if (!romMap) + return false; + + ioBaseEnetROM = (IOPPCAddress) romMap->getVirtualAddress(); + +#ifdef DEBUG_XXX + IOLog("Mace: ioBaseEnet : %08x\n", (UInt) ioBaseEnet); + IOLog("Mace: ioBaseEnetTxDMA : %08x\n", (UInt) ioBaseEnetTxDMA); + IOLog("Mace: ioBaseEnetRxDMA : %08x\n", (UInt) ioBaseEnetRxDMA); + IOLog("Mace: ioBaseEnetROM : %08x\n", (UInt) ioBaseEnetROM); +#endif + + // + // Get a reference to the IOWorkLoop in our superclass. + // + IOWorkLoop * myWorkLoop = (IOWorkLoop *) getWorkLoop(); + assert(myWorkLoop); + + // + // Allocate two IOInterruptEventSources. + // + txIntSrc = IOInterruptEventSource::interruptEventSource + (this, + (IOInterruptEventAction) &MaceEnet::interruptOccurredForSource, + provider, PROVIDER_DMA_TX); + if (!txIntSrc + || (myWorkLoop->addEventSource(txIntSrc) != kIOReturnSuccess)) { + IOLog("Mace: txIntSrc init failure\n"); + return false; + } + + rxIntSrc = IOInterruptEventSource::interruptEventSource + (this, + (IOInterruptEventAction) &MaceEnet::interruptOccurredForSource, + provider, PROVIDER_DMA_RX); + if (!rxIntSrc + || (myWorkLoop->addEventSource(rxIntSrc) != kIOReturnSuccess)) { + IOLog("Mace: rxIntSrc init failure\n"); + return false; + } + + timerSrc = IOTimerEventSource::timerEventSource + (this, (IOTimerEventSource::Action) &MaceEnet::timeoutOccurred); + if (!timerSrc + || (myWorkLoop->addEventSource(timerSrc) != kIOReturnSuccess)) { + IOLog("Mace: timerSrc init failure\n"); + return false; + } + + MGETHDR(txDebuggerPkt, M_DONTWAIT, MT_DATA); + if (!txDebuggerPkt) + { + IOLog("Mace: Can't allocate KDB buffer\n"); + return false; + } + +#if 0 + // Do not enable interrupt sources until the hardware + // is enabled. + + // Enable the interrupt event sources. + myWorkLoop->enableAllInterrupts(); +#endif + +#if 0 + // Do not reset the hardware until we are ready to use it. + // Otherwise, we would have messed up kdp_mace driver's + // state. And we won't be able to break into the debugger + // until we attach our debugger client. + + // + // Perform a hardware reset. + // + if ( !resetAndEnable(false) ) + { + return false; + } +#endif + + // Cache my MAC address. + // + getHardwareAddress(&myAddress); + + // + // Allocate memory for ring buffers. + // + if (_allocateMemory() == false) + { + return false; + } + + // + // Attach a kernel debugger client. + // + attachDebuggerClient(&debugger); + + // + // Allocate and initialize an IONetworkInterface object. + // + if (!attachInterface((IONetworkInterface **) &networkInterface)) + return false; + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::free() +{ + UInt i; + + timerSrc->cancelTimeout(); + + _resetChip(); + + if (debugger) + debugger->release(); + + if (timerSrc) + timerSrc->release(); + + if (rxIntSrc) + rxIntSrc->release(); + + if (txIntSrc) + txIntSrc->release(); + + if (transmitQueue) + transmitQueue->release(); + + if (debugQueue) + debugQueue->release(); + + if (networkInterface) + networkInterface->release(); + + if (mbufCursor) + mbufCursor->release(); + + if (txDebuggerPkt) + freePacket(txDebuggerPkt); + + for (i = 0; i < rxMaxCommand; i++) + if (rxMbuf[i]) freePacket(rxMbuf[i]); + + for (i = 0; i < txMaxCommand; i++) + if (txMbuf[i]) freePacket(txMbuf[i]); + + if (romMap) romMap->release(); + + for (i = 0; i < MEMORY_MAP_COUNT; i++) + if (maps[i]) maps[i]->release(); + + if (dmaMemory.ptr) + { + IOFree(dmaMemory.ptrReal, dmaMemory.sizeReal); + dmaMemory.ptr = 0; + } + + if ( workLoop ) + { + workLoop->release(); + workLoop = 0; + } + + super::free(); +} + +/*------------------------------------------------------------------------- + * Override IONetworkController::createWorkLoop() method and create + * a workloop. + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::createWorkLoop() +{ + workLoop = IOWorkLoop::workLoop(); + + return ( workLoop != 0 ); +} + +/*------------------------------------------------------------------------- + * Override IOService::getWorkLoop() method to return our workloop. + * + * + *-------------------------------------------------------------------------*/ + +IOWorkLoop * MaceEnet::getWorkLoop() const +{ + return workLoop; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::interruptOccurredForSource(IOInterruptEventSource *src, + int /*count*/) +{ + bool doFlushQueue = false; + bool doService = false; + + // IOLog("Mace: interrupt %08x %d\n", (UInt) src, count); + + if (!ready) { + // IOLog("Mace: unexpected interrupt\n"); + return; + } + + reserveDebuggerLock(); + + if (src == txIntSrc) { + txWDInterrupts++; + KERNEL_DEBUG(DBG_MACE_TXIRQ | DBG_FUNC_START, 0, 0, 0, 0, 0 ); + doService = _transmitInterruptOccurred(); + KERNEL_DEBUG(DBG_MACE_TXIRQ | DBG_FUNC_END, 0, 0, 0, 0, 0 ); + } + else { + KERNEL_DEBUG(DBG_MACE_RXIRQ | DBG_FUNC_START, 0, 0, 0, 0, 0 ); + doFlushQueue = _receiveInterruptOccurred(); + KERNEL_DEBUG(DBG_MACE_RXIRQ | DBG_FUNC_END, 0, 0, 0, 0, 0 ); + } + + releaseDebuggerLock(); + + /* + * Submit all received packets queued up by _receiveInterruptOccurred() + * to the network stack. The up call is performed without holding the + * debugger lock. + */ + if (doFlushQueue) + networkInterface->flushInputQueue(); + + /* + * Make sure the output queue is not stalled. + */ + if (doService && netifClient) + transmitQueue->service(); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +UInt32 MaceEnet::outputPacket(struct mbuf *pkt, void *param) +{ + u_int32_t i; + u_int8_t regValue; + UInt32 ret = kIOReturnOutputSuccess; + + // IOLog("Mace: outputPacket %d\n", pkt->m_pkthdr.len); + + KERNEL_DEBUG(DBG_MACE_TXQUEUE | DBG_FUNC_NONE, (int) pkt, + (int) pkt->m_pkthdr.len, 0, 0, 0 ); + + /* + * Hold the debugger lock so the debugger can't interrupt us + */ + reserveDebuggerLock(); + + do + { + /* + * Someone is turning off the receiver before the first transmit. + * Dont know who yet! + */ + regValue = ReadMaceRegister( ioBaseEnet, kMacCC ); + regValue |= kMacCCEnRcv; + WriteMaceRegister( ioBaseEnet, kMacCC, regValue ); + + /* + * Preliminary sanity checks + */ + assert(pkt && netifClient); + + /* + * Remove any completed packets from the Tx ring + */ + _transmitInterruptOccurred(); + + i = txCommandTail + 1; + if ( i >= txMaxCommand ) i = 0; + if ( i == txCommandHead ) + { + ret = kIOReturnOutputStall; + continue; + } + + /* + * If there is space on the Tx ring, add the packet directly to the + * ring + */ + _transmitPacket(pkt); + } + while ( 0 ); + + releaseDebuggerLock(); + + return ret; +} + +/*------------------------------------------------------------------------- + * Called by IOEthernetInterface client to enable the controller. + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn MaceEnet::enable(IONetworkInterface * netif) +{ + IONetworkParameter * param; + + // If an interface client has previously enabled us, + // and we know there can only be one interface client + // for this driver, then simply return true. + // + if (netifClient) { + IOLog("Mace: already enabled\n"); + return kIOReturnSuccess; + } + + param = netif->getParameter(kIONetworkStatsKey); + if (!param || !(netStats = (IONetworkStats *) param->getBuffer())) + { + IOLog("Mace: invalid network statistics\n"); + return kIOReturnError; + } + + if ((ready == false) && !resetAndEnable(true)) + return kIOReturnIOError; + + // Record the interface as an active client. + // + netifClient = true; + + // Start our IOOutputQueue object. + // + transmitQueue->setCapacity(TRANSMIT_QUEUE_SIZE); + transmitQueue->start(); + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * Called by IOEthernetInterface client to disable the controller. + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn MaceEnet::disable(IONetworkInterface * /*netif*/) +{ + // If we have no active clients, then disable the controller. + // + if (debugClient == false) + resetAndEnable(false); + + // Disable our IOOutputQueue object. + // + transmitQueue->stop(); + + // Flush all packets currently in the output queue. + // + transmitQueue->setCapacity(0); + transmitQueue->flush(); + + netifClient = false; + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * This method is called by our debugger client to bring up the controller + * just before the controller is registered as the debugger device. The + * debugger client is attached in response to the attachDebuggerClient() + * call. + * + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn MaceEnet::enable(IOKernelDebugger * /*debugger*/) +{ + // Enable hardware and make it ready to support the debugger client. + // + if ((ready == false) && !resetAndEnable(true)) + return kIOReturnIOError; + + // Record the debugger as an active client of ours. + // + debugClient = true; + + // Returning true will allow the kdp registration to continue. + // If we return false, then we will not be registered as the + // debugger device, and the attachDebuggerClient() call will + // return NULL. + // + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * This method is called by our debugger client to stop the controller. + * The debugger will call this method when we issue a detachDebuggerClient(). + * + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn MaceEnet::disable(IOKernelDebugger * /*debugger*/) +{ + debugClient = false; + + // If we have no active clients, then disable the controller. + // + if (netifClient == false) + resetAndEnable(false); + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::resetAndEnable(bool enable) +{ + bool ret = true; + + if (timerSrc) + timerSrc->cancelTimeout(); + + _disableAdapterInterrupts(); + if (getWorkLoop()) getWorkLoop()->disableAllInterrupts(); + + reserveDebuggerLock(); + + ready = false; + + _resetChip(); + + do { + if (!enable) break; + + if ( !_initRxRing() || !_initTxRing() || !_initChip() ) + { + ret = false; + break; + } + + _startChip(); + + ready = true; + + releaseDebuggerLock(); + + timerSrc->setTimeoutMS(WATCHDOG_TIMER_MS); + + if (getWorkLoop()) getWorkLoop()->enableAllInterrupts(); + _enableAdapterInterrupts(); + + return true; + } + while (0); + + releaseDebuggerLock(); + + return ret; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_sendTestPacket() +{ +// IOOutputPacketStatus ret; + unsigned char * buf; + const unsigned int size = 64; + + struct mbuf * m = allocatePacket(size); + if (!m) { + IOLog("Mace: _sendTestpacket: allocatePacket() failed\n"); + return; + } + + buf = mtod(m, unsigned char *); + + bcopy(&myAddress, buf, NUM_EN_ADDR_BYTES); + buf += NUM_EN_ADDR_BYTES; + bcopy(&myAddress, buf, NUM_EN_ADDR_BYTES); + buf += NUM_EN_ADDR_BYTES; + *buf++ = 0; + *buf++ = 0; + + outputPacket(m, 0); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::timeoutOccurred(IOTimerEventSource * /*timer*/) +{ + u_int32_t dmaStatus; + bool doFlushQueue = false; + bool doService = false; + + reserveDebuggerLock(); + + /* + * Check for DMA shutdown on receive channel + */ + dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetRxDMA ); + if ( !(dmaStatus & kdbdmaActive) ) + { +#if 0 + IOLog("Mace: Timeout check - RxHead = %d RxTail = %d\n", + rxCommandHead, rxCommandTail); +#endif + +#if 0 + IOLog( "Mace: Rx Commands = %08x(p) Rx DMA Ptr = %08x(p)\n\r", rxDMACommandsPhys, IOGetDBDMACommandPtr(ioBaseEnetRxDMA) ); + [self _dumpDesc:(void *)rxDMACommands Size:rxMaxCommand * sizeof(enet_dma_cmd_t)]; +#endif + + doFlushQueue = _receiveInterruptOccurred(); + } + + /* + * If there are pending entries on the Tx ring + */ + if ( txCommandHead != txCommandTail ) + { + /* + * If we did not service the Tx ring during the last timeout interval, + * then force servicing of the Tx ring. + * If we have more than one timeout interval without any transmit + * interrupts, then force the transmitter to reset. + */ + if ( txWDInterrupts == 0 ) + { + if ( ++txWDTimeouts > 1 ) txWDForceReset = true; + +#if 0 + IOLog( "Mace: Checking for timeout - TxHead = %d TxTail = %d\n", + txCommandHead, txCommandTail); +#endif + doService = _transmitInterruptOccurred(); + } + else + { + txWDTimeouts = 0; + txWDInterrupts = 0; + } + } + else + { + txWDTimeouts = 0; + txWDInterrupts = 0; + } + + // Clean-up after the debugger if the debugger was active. + // + if (debugTxPoll) + { + debugQueue->flush(); + debugTxPoll = false; + releaseDebuggerLock(); + doService = true; + } + else + { + releaseDebuggerLock(); + } + + /* + * Submit all received packets queued up by _receiveInterruptOccurred() + * to the network stack. The up call is performed without holding the + * debugger lock. + */ + if (doFlushQueue) + { + networkInterface->flushInputQueue(); + } + + /* + * Make sure the output queue is not stalled. + */ + if (doService && netifClient) + { + transmitQueue->service(); + } + + /* + * Restart the watchdog timer + */ + timerSrc->setTimeoutMS(WATCHDOG_TIMER_MS); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +const OSString * MaceEnet::newVendorString() const +{ + return OSString::withCString("Apple"); +} + +const OSString * MaceEnet::newModelString() const +{ + return OSString::withCString("Mace"); +} + +const OSString * MaceEnet::newRevisionString() const +{ + return OSString::withCString(""); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn MaceEnet::_setPromiscuousMode(IOEnetPromiscuousMode mode) +{ + u_int8_t regVal; + + regVal = ReadMaceRegister( ioBaseEnet, kMacCC ); + WriteMaceRegister( ioBaseEnet, kMacCC, regVal & ~kMacCCEnRcv ); + if (mode == kIOEnetPromiscuousModeOff) { + regVal &= ~kMacCCProm; + isPromiscuous = false; + } + else { + regVal |= kMacCCProm; + isPromiscuous = true; + } + WriteMaceRegister( ioBaseEnet, kMacCC, regVal ); + + return kIOReturnSuccess; + +} + +IOReturn MaceEnet::setPromiscuousMode(IOEnetPromiscuousMode mode) +{ + IOReturn ret; + + reserveDebuggerLock(); + ret = _setPromiscuousMode(mode); + releaseDebuggerLock(); + + return ret; +} + +IOReturn MaceEnet::setMulticastMode(IOEnetMulticastMode mode) +{ + multicastEnabled = (mode == kIOEnetMulticastModeOff) ? false : true; + return kIOReturnSuccess; +} + +IOReturn MaceEnet::setMulticastList(IOEthernetAddress *addrs, UInt32 count) +{ + reserveDebuggerLock(); + _resetHashTableMask(); + for (UInt32 i = 0; i < count; i++) { + _addToHashTableMask(addrs->bytes); + addrs++; + } + _updateHashTableMask(); + releaseDebuggerLock(); + return kIOReturnSuccess; +} + +/* + * Allocate an IOOutputQueue object. + */ +IOOutputQueue * MaceEnet::createOutputQueue() +{ + return IOGatedOutputQueue::withTarget( this, getWorkLoop() ); +} + +/* + * Kernel Debugger Support + */ +void MaceEnet::sendPacket(void *pkt, UInt32 pkt_len) +{ + _sendPacket(pkt, pkt_len); +} + +void MaceEnet::receivePacket(void *pkt, UInt32 *pkt_len, UInt32 timeout) +{ + _receivePacket(pkt, (UInt *) pkt_len, timeout); +} + +#if 0 // no power management stuff in IOKit yet. +/* + * Power management methods. + */ +- (IOReturn)getPowerState:(PMPowerState *)state_p +{ + return kIOReturnUnsupported; +} + +- (IOReturn)setPowerState:(PMPowerState)state +{ + if (state == PM_OFF) { + resetAndEnabled = NO; + [self _resetChip]; + return kIOReturnSuccess; + } + return kIOReturnUnsupported; +} + +- (IOReturn)getPowerManagement:(PMPowerManagementState *)state_p +{ + return kIOReturnUnsupported; +} + +- (IOReturn)setPowerManagement:(PMPowerManagementState)state +{ + return kIOReturnUnsupported; +} +#endif /* 0 */ diff --git a/iokit/Drivers/network/drvMaceEnet/MaceEnet.h b/iokit/Drivers/network/drvMaceEnet/MaceEnet.h new file mode 100644 index 000000000..c01aef76d --- /dev/null +++ b/iokit/Drivers/network/drvMaceEnet/MaceEnet.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1996 NeXT Software, Inc. + * + * Interface definition for the Mace Ethernet chip + * + * HISTORY + * + * 16-Sept-97 + * Created. + */ + +#ifndef _MACEENET_H +#define _MACEENET_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* bcopy */ + +#if 0 +#include /* Performance tracepoints */ +#else +#define KERNEL_DEBUG(x,a,b,c,d,e) +#endif + +#include "MaceEnetRegisters.h" + +extern "C" { +#include +#include +} + +#if 0 +#define IOLog kprintf +#endif + +typedef void * IOPPCAddress; + +typedef struct enet_dma_cmd_t +{ + IODBDMADescriptor desc_seg[2]; +} enet_dma_cmd_t; + +typedef struct enet_txdma_cmd_t +{ + IODBDMADescriptor desc_seg[4]; +} enet_txdma_cmd_t; + +class MaceEnet : public IOEthernetController +{ + OSDeclareDefaultStructors(MaceEnet) + +private: + volatile IOPPCAddress ioBaseEnet; + volatile IOPPCAddress ioBaseEnetROM; + volatile IODBDMAChannelRegisters *ioBaseEnetRxDMA; + volatile IODBDMAChannelRegisters *ioBaseEnetTxDMA; + + u_int16_t chipId; + + IOEthernetAddress myAddress; + IOEthernetInterface * networkInterface; + IOGatedOutputQueue * transmitQueue; + IOPacketQueue * debugQueue; + IOKernelDebugger * debugger; + bool isPromiscuous; + bool multicastEnabled; + bool ready; + bool netifClient; + bool debugClient; + bool debugTxPoll; + + IOWorkLoop * workLoop; + IOInterruptEventSource *rxIntSrc; + IOInterruptEventSource *txIntSrc; + IOMemoryMap * maps[MEMORY_MAP_COUNT]; + IOMemoryMap * romMap; + IONetworkStats * netStats; + IOTimerEventSource * timerSrc; + IOMbufBigMemoryCursor * mbufCursor; + + struct mbuf * txMbuf[TX_RING_LENGTH]; + struct mbuf * rxMbuf[RX_RING_LENGTH]; + struct mbuf * txDebuggerPkt; + + unsigned int txCommandHead; /* Transmit ring descriptor index */ + unsigned int txCommandTail; + unsigned int txMaxCommand; + unsigned int rxCommandHead; /* Receive ring descriptor index */ + unsigned int rxCommandTail; + unsigned int rxMaxCommand; + + struct { + void *ptr; + u_int size; + void *ptrReal; + u_int sizeReal; + } dmaMemory; + + unsigned char * dmaCommands; + enet_txdma_cmd_t * txDMACommands; /* TX descriptor ring ptr */ + unsigned int txDMACommandsPhys; + + enet_dma_cmd_t * rxDMACommands; /* RX descriptor ring ptr */ + unsigned int rxDMACommandsPhys; + + u_int32_t txWDInterrupts; + u_int32_t txWDTimeouts; + bool txWDForceReset; + + void * debuggerPkt; + u_int32_t debuggerPktSize; + + u_int16_t hashTableUseCount[64]; + u_int8_t hashTableMask[8]; + + bool _allocateMemory(); + bool _initTxRing(); + bool _initRxRing(); + bool _initChip(); + void _resetChip(); + void _disableAdapterInterrupts(); + void _enableAdapterInterrupts(); + void _startChip(); + void _restartChip(); + void _stopReceiveDMA(); + void _stopTransmitDMA(); + bool _transmitPacket(struct mbuf * packet); + bool _transmitInterruptOccurred(bool fDebugger = false); + bool _receiveInterruptOccurred(); + bool _receivePackets(bool fDebugger); + void _packetToDebugger(struct mbuf * packet, u_int size); + bool _updateDescriptorFromMbuf(struct mbuf * m, enet_dma_cmd_t *desc, + bool isReceive); + void _resetHashTableMask(); + void _addToHashTableMask(u_int8_t *addr); + void _removeFromHashTableMask(u_int8_t *addr); + void _updateHashTableMask(); +#ifdef DEBUG + void _dumpRegisters(); + void _dumpDesc(void * addr, u_int32_t size); +#endif + IOReturn _setPromiscuousMode(IOEnetPromiscuousMode mode); + void MaceEnet::_sendTestPacket(); + + /* + * Kernel Debugger + */ + void _sendPacket(void *pkt, unsigned int pkt_len); + void _receivePacket(void *pkt, unsigned int *pkt_len, unsigned int + timeout); + + bool resetAndEnable(bool enable); + void interruptOccurredForSource(IOInterruptEventSource *src, int count); + void timeoutOccurred(IOTimerEventSource *timer); + +public: + virtual MaceEnet * MaceEnet::probe(IOService * provider, + unsigned int * score, + unsigned int * specificity); + virtual bool init(OSDictionary * properties = 0); + virtual bool start(IOService * provider); + virtual void free(); + + virtual bool createWorkLoop(); + virtual IOWorkLoop * getWorkLoop() const; + + virtual IOReturn enable(IONetworkInterface * netif); + virtual IOReturn disable(IONetworkInterface * netif); + + virtual IOReturn enable(IOKernelDebugger * debugger); + virtual IOReturn disable(IOKernelDebugger * debugger); + + virtual IOReturn getHardwareAddress(IOEthernetAddress *addr); + + virtual IOReturn setMulticastMode(IOEnetMulticastMode mode); + virtual IOReturn setMulticastList(IOEthernetAddress *addrs, UInt32 count); + + virtual IOReturn setPromiscuousMode(IOEnetPromiscuousMode mode); + + virtual IOOutputQueue * createOutputQueue(); + + virtual UInt32 outputPacket(struct mbuf * m, void * param); + + virtual void sendPacket(void *pkt, UInt32 pkt_len); + virtual void receivePacket(void *pkt, UInt32 *pkt_len, UInt32 timeout); + + virtual const OSString * newVendorString() const; + virtual const OSString * newModelString() const; + virtual const OSString * newRevisionString() const; +}; + +#if 0 // no power management stuff in IOKit yet. +/* + * Power management methods. + */ +- (IOReturn)getPowerState:(PMPowerState *)state_p; +- (IOReturn)setPowerState:(PMPowerState)state; +- (IOReturn)getPowerManagement:(PMPowerManagementState *)state_p; +- (IOReturn)setPowerManagement:(PMPowerManagementState)state; +#endif + +/* + * Performance tracepoints + * + * DBG_MACE_RXIRQ - Receive ISR run time + * DBG_MACE_TXIRQ - Transmit ISR run time + * DBG_MACE_TXQUEUE - Transmit packet passed from network stack + * DBG_MACE_TXCOMPLETE - Transmit packet sent + * DBG_MACE_RXCOMPLETE - Receive packet passed to network stack + */ +#define DBG_MACE_ENET 0x0800 +#define DBG_MACE_RXIRQ DRVDBG_CODE(DBG_DRVNETWORK,(DBG_MACE_ENET+1)) +#define DBG_MACE_TXIRQ DRVDBG_CODE(DBG_DRVNETWORK,(DBG_MACE_ENET+2)) +#define DBG_MACE_TXQUEUE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_MACE_ENET+3)) +#define DBG_MACE_TXCOMPLETE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_MACE_ENET+4)) +#define DBG_MACE_RXCOMPLETE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_MACE_ENET+5)) + +#endif /* !_MACEENET_H */ diff --git a/iokit/Drivers/network/drvMaceEnet/MaceEnetHW.cpp b/iokit/Drivers/network/drvMaceEnet/MaceEnetHW.cpp new file mode 100644 index 000000000..68f3c05a1 --- /dev/null +++ b/iokit/Drivers/network/drvMaceEnet/MaceEnetHW.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1996 NeXT Software, Inc. + * + * Inline definitions for the Apple Mace Ethernet Controller. + * + * HISTORY + * + * 11-Sept-97 + * Created. + */ + +#include "MaceEnetRegisters.h" +#include "MaceEnetPrivate.h" + +void WriteMaceRegister( IOPPCAddress ioEnetBase, u_int32_t reg_offset, u_int8_t data ) +{ + *((volatile u_int8_t *)ioEnetBase + reg_offset) = data; + eieio(); +} + + +volatile u_int8_t ReadMaceRegister( IOPPCAddress ioEnetBase, u_int32_t reg_offset ) +{ + return ((volatile u_int8_t *)ioEnetBase)[reg_offset] ; +} diff --git a/iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.cpp b/iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.cpp new file mode 100644 index 000000000..c1af0e36a --- /dev/null +++ b/iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.cpp @@ -0,0 +1,1793 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1996 NeXT Software, Inc. + * + * Implementation for hardware dependent (relatively) code + * for the Mace Ethernet controller. + * + * HISTORY + * + * 10-Sept-97 + * Created. + * + */ + +#include +#include +#include +#include "MaceEnetPrivate.h" + + +/***************************************************************************** + * + * Hacks. + */ + +typedef unsigned long long ns_time_t; /* nanoseconds! */ + +#define NSEC_PER_SEC 1000000000 + +static void +_IOGetTimestamp(ns_time_t *nsp) +{ + mach_timespec_t now; + + IOGetTime(&now); + *nsp = ((ns_time_t)now.tv_sec * NSEC_PER_SEC) + now.tv_nsec; +} + +/* + * Find a physical address (if any) for the specified virtual address. + * + * Note: what about vm_offset_t kvtophys(vm_offset_t va) + */ +static IOReturn _IOPhysicalFromVirtual( + vm_address_t virtualAddress, + unsigned *physicalAddress) +{ + *physicalAddress = pmap_extract(kernel_pmap, virtualAddress); + if(*physicalAddress == 0) { + return kIOReturnBadArgument; + } + else { + return kIOReturnSuccess; + } +} + +// From osfmk/ppc/pmap.h +// +extern "C" { +extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys); +extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys); +} + +static inline void +invalidate_cache_v(vm_offset_t va, unsigned length) +{ + invalidate_dcache(va, length, 0); +} + +static inline void +flush_cache_v(vm_offset_t va, unsigned length) +{ + flush_dcache(va, length, 0); +} + +/****************************************************************************/ + +static IODBDMADescriptor dbdmaCmd_Nop; +static IODBDMADescriptor dbdmaCmd_NopWInt; +static IODBDMADescriptor dbdmaCmd_LoadXFS; +static IODBDMADescriptor dbdmaCmd_LoadIntwInt; +static IODBDMADescriptor dbdmaCmd_Stop; +static IODBDMADescriptor dbdmaCmd_Branch; + + +static u_int8_t reverseBitOrder(u_int8_t data ) +{ + u_int8_t val = 0; + int i; + + for ( i=0; i < 8; i++ ) + { + val <<= 1; + if (data & 1) val |= 1; + data >>= 1; + } + return( val ); +} + +/* + * Function: IOMallocPage + * + * Purpose: + * Returns a pointer to a page-aligned memory block of size >= PAGE_SIZE + * + * Return: + * Actual pointer and size of block returned in actual_ptr and actual_size. + * Use these as arguments to kfree: kfree(*actual_ptr, *actual_size); + */ +static void * +IOMallocPage(int request_size, void ** actual_ptr, u_int * actual_size) +{ + void * mem_ptr; + + *actual_size = round_page(request_size) + PAGE_SIZE; + mem_ptr = IOMalloc(*actual_size); + if (mem_ptr == NULL) + return NULL; + *actual_ptr = mem_ptr; + return ((void *)round_page(mem_ptr)); +} + +/* + * Private functions + */ +bool MaceEnet::_allocateMemory() +{ + u_int32_t i, n; + unsigned char * virtAddr; + u_int32_t physBase; + u_int32_t physAddr; + u_int32_t dbdmaSize; + + /* + * Calculate total space for DMA channel commands + */ + dbdmaSize = round_page( + RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + + TX_RING_LENGTH * sizeof(enet_txdma_cmd_t) + + 2 * sizeof(IODBDMADescriptor) ); + + /* + * Allocate required memory + */ + dmaMemory.size = dbdmaSize; + dmaMemory.ptr = (void *)IOMallocPage( + dmaMemory.size, + &dmaMemory.ptrReal, + &dmaMemory.sizeReal + ); + + dmaCommands = (unsigned char *) dmaMemory.ptr; + if (!dmaCommands) { + IOLog( "Mace: Cant allocate channel DBDMA commands\n\r" ); + return false; + } + + /* + * If we needed more than one page, then make sure we received + * contiguous memory. + */ + n = (dbdmaSize - PAGE_SIZE) / PAGE_SIZE; + _IOPhysicalFromVirtual((vm_address_t) dmaCommands, &physBase ); + + virtAddr = (unsigned char *) dmaCommands; + for( i=0; i < n; i++, virtAddr += PAGE_SIZE ) + { + _IOPhysicalFromVirtual( (vm_address_t) virtAddr, &physAddr ); + if (physAddr != (physBase + i * PAGE_SIZE) ) + { + IOLog("Mace: Cannot allocate contiguous memory for DBDMA " + "commands\n"); + return false; + } + } + + /* + * Setup the receive ring pointers + */ + rxDMACommands = (enet_dma_cmd_t *)dmaCommands; + rxMaxCommand = RX_RING_LENGTH; + + /* + * Setup the transmit ring pointers + */ + txDMACommands = (enet_txdma_cmd_t *)( + dmaCommands + + RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + + sizeof(IODBDMADescriptor)); + + txMaxCommand = TX_RING_LENGTH; + + /* + * Setup pre-initialized DBDMA commands + */ + IOMakeDBDMADescriptor( (&dbdmaCmd_Nop), + kdbdmaNop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0 ); + + IOMakeDBDMADescriptor( (&dbdmaCmd_NopWInt), + kdbdmaNop, + kdbdmaKeyStream0, + kdbdmaIntAlways, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0 ); + + UInt32 ioBaseEnetPhys = maps[MEMORY_MAP_ENET_INDEX]->getPhysicalAddress(); + + IOMakeDBDMADescriptor( (&dbdmaCmd_LoadXFS), + kdbdmaLoadQuad, + kdbdmaKeySystem, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 1, + ((int)ioBaseEnetPhys + kXmtFS) ); + + IOMakeDBDMADescriptor( (&dbdmaCmd_LoadIntwInt), + kdbdmaLoadQuad, + kdbdmaKeySystem, + kdbdmaIntAlways, + kdbdmaBranchNever, + kdbdmaWaitNever, + 1, + ((int)ioBaseEnetPhys + kIntReg) ); + + IOMakeDBDMADescriptor( (&dbdmaCmd_Stop), + kdbdmaStop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0 ); + + IOMakeDBDMADescriptor( (&dbdmaCmd_Branch), + kdbdmaNop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchAlways, + kdbdmaWaitNever, + 0, + 0 ); + + return true; +} + +/*------------------------------------------------------------------------- + * + * Setup the Transmit Ring + * ----------------------- + * Each transmit ring entry consists of two words to transmit data from buffer + * segments (possibly) spanning a page boundary. This is followed by two DMA + * commands which read transmit frame status and interrupt status from the Mace + * chip. The last DMA command in each transmit ring entry generates a host + * interrupt. The last entry in the ring is followed by a DMA branch to the + * first entry. + *-------------------------------------------------------------------------*/ + +bool MaceEnet::_initTxRing() +{ + bool kr; + u_int32_t i; + + /* + * Clear the transmit DMA command memory + */ + bzero( (void *)txDMACommands, sizeof(enet_txdma_cmd_t) * txMaxCommand); + txCommandHead = 0; + txCommandTail = 0; + + /* + * DMA Channel commands 2,3 are the same for all DBDMA entries on transmit. + * Initialize them now. + */ + for( i=0; i < txMaxCommand; i++ ) + { + txDMACommands[i].desc_seg[2] = dbdmaCmd_LoadXFS; + txDMACommands[i].desc_seg[3] = dbdmaCmd_LoadIntwInt; + } + + /* + * Put a DMA Branch command after the last entry in the transmit ring. + * Set the branch address to the physical address of the start of the + * transmit ring. + */ + txDMACommands[txMaxCommand].desc_seg[0] = dbdmaCmd_Branch; + + kr = _IOPhysicalFromVirtual( (vm_address_t) txDMACommands, + (u_int32_t *)&txDMACommandsPhys ); + if ( kr != kIOReturnSuccess ) + { + IOLog("Mace: Bad Tx DBDMA command buf - %08x\n\r", + (u_int32_t)txDMACommands ); + } + IOSetCCCmdDep( &txDMACommands[txMaxCommand].desc_seg[0], + txDMACommandsPhys ); + + /* + * Set the Transmit DMA Channel pointer to the first entry in the + * transmit ring. + */ + IOSetDBDMACommandPtr( ioBaseEnetTxDMA, txDMACommandsPhys ); + + /* + * Push the DMA channel words into physical memory. + */ + flush_cache_v( (vm_offset_t)txDMACommands, + txMaxCommand*sizeof(enet_txdma_cmd_t) + sizeof(IODBDMADescriptor)); + + return true; +} + +/*------------------------------------------------------------------------- + * + * Setup the Receive ring + * ---------------------- + * Each receive ring entry consists of two DMA commands to receive data + * into a network buffer (possibly) spanning a page boundary. The second + * DMA command in each entry generates a host interrupt. + * The last entry in the ring is followed by a DMA branch to the first + * entry. + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::_initRxRing() +{ + u_int32_t i; + bool status; + IOReturn kr; + + /* + * Clear the receive DMA command memory + */ + bzero( (void *)rxDMACommands, sizeof(enet_dma_cmd_t) * rxMaxCommand); + + kr = _IOPhysicalFromVirtual( (vm_address_t) rxDMACommands, + (u_int32_t *)&rxDMACommandsPhys ); + if ( kr != kIOReturnSuccess ) + { + IOLog("Mace: Bad Rx DBDMA command buf - %08x\n\r", + (u_int32_t)rxDMACommands ); + return false; + } + + /* + * Allocate a receive buffer for each entry in the Receive ring + */ + for (i = 0; i < rxMaxCommand-1; i++) + { + if (rxMbuf[i] == 0) + { + rxMbuf[i] = allocatePacket(NETWORK_BUFSIZE); + + if (!rxMbuf[i]) + { + IOLog("Mace: allocatePacket failed in _initRxRing()\n\r"); + return false; + } + } + + /* + * Set the DMA commands for the ring entry to transfer data to the + * mbuf. + */ + status = _updateDescriptorFromMbuf(rxMbuf[i], &rxDMACommands[i], true); + if (status == false) + { + IOLog("Mace: Cant map mbuf to physical memory in _initRxRing\n\r"); + return false; + } + } + + /* + * Set the receive queue head to point to the first entry in the ring. + * Set the receive queue tail to point to a DMA Stop command after the + * last ring entry + */ + rxCommandHead = 0; + rxCommandTail = i; + + rxDMACommands[i].desc_seg[0] = dbdmaCmd_Stop; + rxDMACommands[i].desc_seg[1] = dbdmaCmd_Nop; + + /* + * Setup a DMA branch command after the stop command + */ + i++; + rxDMACommands[i].desc_seg[0] = dbdmaCmd_Branch; + + IOSetCCCmdDep( &rxDMACommands[i].desc_seg[0], rxDMACommandsPhys ); + + /* + * Set DMA command pointer to first receive entry + */ + IOSetDBDMACommandPtr( ioBaseEnetRxDMA, rxDMACommandsPhys ); + + /* + * Push DMA commands to physical memory + */ + flush_cache_v( (vm_offset_t)&rxDMACommands[rxCommandTail], + 2 * sizeof(enet_dma_cmd_t) ); + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_startChip() +{ + WriteMaceRegister( ioBaseEnet, kMacCC, kMacCCEnXmt | kMacCCEnRcv ); + + // enable rx dma channel + IODBDMAContinue( ioBaseEnetRxDMA ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_resetChip() +{ + u_int8_t regValue; + + /* + * Mace errata - chip reset does not clear pending interrupts + */ + ReadMaceRegister( ioBaseEnet, kIntReg ); + + IODBDMAReset( ioBaseEnetRxDMA ); + IODBDMAReset( ioBaseEnetTxDMA ); + + IOSetDBDMAWaitSelect( ioBaseEnetTxDMA, + IOSetDBDMAChannelControlBits( kdbdmaS5 ) ); + + IOSetDBDMABranchSelect( ioBaseEnetRxDMA, + IOSetDBDMAChannelControlBits( kdbdmaS6 ) ); + + IOSetDBDMAInterruptSelect( ioBaseEnetRxDMA, + IOSetDBDMAChannelControlBits( kdbdmaS6 ) ); + + WriteMaceRegister( ioBaseEnet, kBIUCC, kBIUCCSWRst ); + do + { + regValue = ReadMaceRegister( ioBaseEnet, kBIUCC ); + } + while( regValue & kBIUCCSWRst ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::_initChip() +{ + volatile u_int16_t regValue; + u_int32_t i; + + _disableAdapterInterrupts(); + + chipId = ReadMaceRegister( ioBaseEnet, kMaceChipId0 ); + chipId |= ReadMaceRegister( ioBaseEnet, kMaceChipId1 ) << 8; + + /* + * Turn off ethernet header stripping + */ + regValue = ReadMaceRegister( ioBaseEnet, kRcvFC ); + regValue &= ~kRcvFCAStrpRcv; + WriteMaceRegister( ioBaseEnet, kRcvFC, regValue ); + + /* + * Set Mace destination address. + */ + if ( chipId != kMaceRevisionA2 ) + { + WriteMaceRegister( ioBaseEnet, kIAC, kIACAddrChg | kIACPhyAddr ); + do + { + regValue = ReadMaceRegister( ioBaseEnet, kIAC ); + } + while( regValue & kIACAddrChg ); + } + else + { + WriteMaceRegister( ioBaseEnet, kIAC, kIACPhyAddr ); + } + + for (i=0; i < sizeof(IOEthernetAddress); i++ ) + { + WriteMaceRegister( ioBaseEnet, kPADR, + reverseBitOrder(((unsigned char *)ioBaseEnetROM)[i<<4]) ); + } + + /* + * Clear logical address (multicast) filter + */ + if ( chipId != kMaceRevisionA2 ) + { + WriteMaceRegister( ioBaseEnet, kIAC, kIACAddrChg | kIACLogAddr ); + do + { + regValue = ReadMaceRegister( ioBaseEnet, kIAC ); + } + while( regValue & kIACAddrChg ); + } + else + { + WriteMaceRegister( ioBaseEnet, kIAC, kIACLogAddr ); + } + + for (i = 0; i < 8; i++ ) + { + WriteMaceRegister( ioBaseEnet, kLADRF, 0 ); + } + + /* + * Enable ethernet transceiver + */ + WriteMaceRegister( ioBaseEnet, kPLSCC, kPLSCCPortSelGPSI | kPLSCCEnSts ); + + return true; +} + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_restartChip() +{ + /* + * Shutdown DMA channels + */ + _stopReceiveDMA(); + _stopTransmitDMA(); + + /* + * Get the silicon's attention + */ + _resetChip(); + _initChip(); + + /* + * Restore multicast settings + */ + _updateHashTableMask(); + + if ( isPromiscuous ) + { + _setPromiscuousMode(kIOEnetPromiscuousModeOn); + } + + /* + * Enable receiver and transmitter + */ + _startChip(); + _enableAdapterInterrupts(); + + /* + * Restart transmit DMA + */ + IODBDMAContinue( ioBaseEnetTxDMA ); +} + +/*------------------------------------------------------------------------- + * + * Orderly stop of receive DMA. + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_stopReceiveDMA() +{ + u_int32_t dmaStatus; + u_int32_t dmaCmdPtr; + u_int32_t dmaIndex; + u_int8_t tmpBuf[16]; + u_int8_t *p = 0; + u_int8_t MacCCReg; + + /* + * Stop the receiver and allow any frame receive in progress to complete + */ + MacCCReg = ReadMaceRegister( ioBaseEnet, kMacCC ); + WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg & ~kMacCCEnRcv ); + IODelay( RECEIVE_QUIESCE_uS ); + + /* + * Capture channel status and pause the dma channel. + */ + dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetRxDMA ); + IODBDMAPause( ioBaseEnetRxDMA ); + + /* + * Read the command pointer and convert it to a byte offset into the + * DMA program. + */ + dmaCmdPtr = IOGetDBDMACommandPtr( ioBaseEnetRxDMA ); + dmaIndex = (dmaCmdPtr - rxDMACommandsPhys); + + /* + * If the channel status is DEAD, the DMA pointer is pointing to the + * next command + */ + if ( dmaStatus & kdbdmaDead ) + { + dmaIndex -= sizeof(IODBDMADescriptor); + } + + /* + * Convert channel program offset to command index + */ + dmaIndex = dmaIndex / sizeof(enet_dma_cmd_t); + if ( dmaIndex >= rxMaxCommand ) dmaIndex = 0; + + /* + * The DMA controller doesnt like being stopped before transferring any + * data. + * + * When we do so it pollutes up to 16-bytes aligned to the nearest (lower) + * 16-byte boundary. This corruption can be outside the data transfer area + * of the mbuf, so we capture and then restore these bytes after stopping + * the channel. + * + */ + if ( rxMbuf[dmaIndex] ) + { + p = mtod(rxMbuf[dmaIndex], u_int8_t *); + } + + (u_int32_t)p &= ~0x0f; + + if ( p ) + { + bcopy( p, tmpBuf, 16 ); + } + + IODBDMAReset( ioBaseEnetRxDMA ); + + if ( p ) + { + bcopy( tmpBuf, p, 16 ); + } + + /* + * Reset the dma channel pointer to the nearest command index + */ + dmaCmdPtr = rxDMACommandsPhys + sizeof(enet_dma_cmd_t) * dmaIndex; + IOSetDBDMACommandPtr( ioBaseEnetRxDMA, dmaCmdPtr); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_stopTransmitDMA() +{ + u_int32_t dmaStatus; + u_int32_t dmaCmdPtr; + u_int32_t dmaIndex; + u_int8_t MacCCReg; + + /* + * Stop the transmitter and allow any frame transmit in progress to abort + */ + MacCCReg = ReadMaceRegister( ioBaseEnet, kMacCC ); + WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg & ~kMacCCEnXmt ); + IODelay( TRANSMIT_QUIESCE_uS ); + + /* + * Capture channel status and pause the dma channel. + */ + dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetTxDMA ); + IODBDMAPause( ioBaseEnetTxDMA ); + + /* + * Read the command pointer and convert it to a byte offset into the + * DMA program. + */ + dmaCmdPtr = IOGetDBDMACommandPtr( ioBaseEnetTxDMA ); + dmaIndex = (dmaCmdPtr - txDMACommandsPhys); + + /* + * If the channel status is DEAD, the DMA pointer is pointing to the + * next command + */ + if ( dmaStatus & kdbdmaDead ) + { + dmaIndex -= sizeof(IODBDMADescriptor); + } + + /* + * Convert channel program offset to command index + */ + dmaIndex = dmaIndex / sizeof(enet_txdma_cmd_t); + if ( dmaIndex >= txMaxCommand ) dmaIndex = 0; + + IODBDMAReset( ioBaseEnetTxDMA ); + + /* + * Reset the dma channel pointer to the nearest command index + */ + dmaCmdPtr = txDMACommandsPhys + sizeof(enet_txdma_cmd_t) * dmaIndex; + IOSetDBDMACommandPtr( ioBaseEnetTxDMA, dmaCmdPtr ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_disableAdapterInterrupts() +{ + WriteMaceRegister( ioBaseEnet, kIntMask, 0xFF ); +} + +/*------------------------------------------------------------------------- + * + * _enableAdapterInterrupts + * + * It appears to make the Mace chip work properly with the DBDMA channel + * we need to leave the transmit interrupt unmasked at the chip. This + * is weird, but that's what happens when you try to glue a chip that + * wasn't intended to work with a DMA engine on to a DMA. + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_enableAdapterInterrupts() +{ + u_int8_t regValue; + + regValue = ReadMaceRegister( ioBaseEnet, kIntMask ); + regValue &= ~kIntMaskXmtInt; + WriteMaceRegister( ioBaseEnet, kIntMask, regValue ); + IODelay(500); + ReadMaceRegister( ioBaseEnet, kXmtFS ); + ReadMaceRegister( ioBaseEnet, kIntReg ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::_transmitPacket(struct mbuf * packet) +{ + enet_dma_cmd_t tmpCommand; + u_int32_t i; + + /* + * Check for room on the transmit ring. There should always be space + * since it is the responsibility of the caller to verify this before + * calling _transmitPacket. + * + * Get a copy of the DMA transfer commands in a temporary buffer. + * The new DMA command is written into the channel program so that the + * command word for the old Stop command is overwritten last. This prevents + * the DMA engine from executing a partially written channel command. + */ + i = txCommandTail + 1; + if ( i >= txMaxCommand ) i = 0; + + if ( (i == txCommandHead) || + !_updateDescriptorFromMbuf(packet, &tmpCommand, false)) + { + IOLog("Mace: Freeing transmit packet eh?\n\r"); + if (packet != txDebuggerPkt) + freePacket(packet); + return false; + } + + /* + * txCommandTail points to the current DMA Stop command for the channel. + * We are now creating a new DMA Stop command in the next slot in the + * transmit ring. The previous DMA Stop command will be overwritten with + * the DMA commands to transfer the new mbuf. + */ + txDMACommands[i].desc_seg[0] = dbdmaCmd_Stop; + txDMACommands[i].desc_seg[1] = dbdmaCmd_Nop; + + flush_cache_v( (vm_offset_t)&txDMACommands[i], sizeof(enet_dma_cmd_t) ); + + bcopy( ((u_int32_t *)&tmpCommand)+1, + ((u_int32_t *)&txDMACommands[txCommandTail])+1, + sizeof(enet_dma_cmd_t)-sizeof(u_int32_t) ); + + flush_cache_v( (vm_offset_t)&txDMACommands[txCommandTail], + sizeof(enet_dma_cmd_t) ); + + txMbuf[txCommandTail] = packet; + txDMACommands[txCommandTail].desc_seg[0].operation = + tmpCommand.desc_seg[0].operation; + + flush_cache_v( (vm_offset_t)&txDMACommands[txCommandTail], + sizeof(enet_dma_cmd_t) ); + + /* + * Set the transmit tail to the new stop command. + */ + txCommandTail = i; + + /* + * Tap the DMA channel to wake it up + */ + IODBDMAContinue( ioBaseEnetTxDMA ); + + return true; +} + +/*------------------------------------------------------------------------- + * _receivePacket + * -------------- + * This routine runs the receiver in polled-mode (yuk!) for the kernel + * debugger. + * + * The _receivePackets allocate mbufs and pass them up the stack. The kernel + * debugger interface passes a buffer into us. To reconcile the two interfaces, + * we allow the receive routine to continue to allocate its own buffers and + * transfer any received data to the passed-in buffer. This is handled by + * _receivePacket calling _packetToDebugger. + *-------------------------------------------------------------------------*/ + +void MaceEnet::_receivePacket(void *pkt, unsigned int *pkt_len, + unsigned int timeout) +{ + ns_time_t startTime; + ns_time_t currentTime; + u_int32_t elapsedTimeMS; + + if (!ready || !pkt || !pkt_len) + return; + + *pkt_len = 0; + + debuggerPkt = pkt; + debuggerPktSize = 0; + + _IOGetTimestamp(&startTime); + do + { + _receivePackets(true); + _IOGetTimestamp(¤tTime); + elapsedTimeMS = (currentTime - startTime) / (1000*1000); + } + while ( (debuggerPktSize == 0) && (elapsedTimeMS < timeout) ); + + *pkt_len = debuggerPktSize; + + return; +} + +/*------------------------------------------------------------------------- + * _packetToDebugger + * ----------------- + * This is called by _receivePackets when we are polling for kernel debugger + * packets. It copies the mbuf contents to the buffer passed by the debugger. + * It also sets the var debuggerPktSize which will break the polling loop. + *-------------------------------------------------------------------------*/ + +void MaceEnet::_packetToDebugger(struct mbuf * packet, u_int size) +{ + debuggerPktSize = size; + bcopy( mtod(packet, char *), debuggerPkt, size ); +} + +/*------------------------------------------------------------------------- + * _sendPacket + * ----------- + * + * This routine runs the transmitter in polled-mode (yuk!) for the + * kernel debugger. + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_sendPacket(void *pkt, unsigned int pkt_len) +{ + ns_time_t startTime; + ns_time_t currentTime; + u_int32_t elapsedTimeMS; + + if ( !ready || !pkt || (pkt_len > ETHERMAXPACKET)) + return; + + /* + * Wait for the transmit ring to empty + */ + _IOGetTimestamp(&startTime); + do + { + _transmitInterruptOccurred(true); + _IOGetTimestamp(¤tTime); + elapsedTimeMS = (currentTime - startTime) / (1000*1000); + } + while ( (txCommandHead != txCommandTail) && + (elapsedTimeMS < TX_KDB_TIMEOUT) ); + + if ( txCommandHead != txCommandTail ) + { + IOLog( "Mace: Polled tranmit timeout - 1\n\r"); + return; + } + + txDebuggerPkt->m_next = 0; + txDebuggerPkt->m_data = (caddr_t) pkt; + txDebuggerPkt->m_pkthdr.len = txDebuggerPkt->m_len = pkt_len; + + /* + * Send the debugger packet. txDebuggerPkt must not be freed by + * the transmit routine. + */ + _transmitPacket(txDebuggerPkt); + + /* + * Poll waiting for the transmit ring to empty again + */ + do + { + _transmitInterruptOccurred(true); + _IOGetTimestamp(¤tTime); + elapsedTimeMS = (currentTime - startTime) / (1000*1000); + } + while ( (txCommandHead != txCommandTail) && + (elapsedTimeMS < TX_KDB_TIMEOUT) ); + + if ( txCommandHead != txCommandTail ) + { + IOLog("Mace: Polled transmit timeout - 2\n\r"); + } + + return; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::_receiveInterruptOccurred() +{ + return _receivePackets(false); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::_receivePackets(bool fDebugger) +{ + enet_dma_cmd_t tmpCommand; + struct mbuf * packet; + u_int32_t i,j,last; + u_int32_t dmaChnlStatus; + int receivedFrameSize = 0; + u_int32_t dmaCount[2], dmaResid[2], dmaStatus[2]; + bool reusePkt; + bool status; + bool useNetif = !fDebugger && netifClient; + bool packetsQueued = false; + u_int8_t *rxFS = NULL; + u_int32_t nextDesc; + static const u_int32_t lastResetValue = (u_int32_t)(-1); + + last = lastResetValue; + i = rxCommandHead; + + while ( 1 ) + { + reusePkt = false; + + /* + * Purge cache references for the DBDMA entry we are about to look at. + */ + invalidate_cache_v((vm_offset_t)&rxDMACommands[i], + sizeof(enet_dma_cmd_t)); + + /* + * Collect the DMA residual counts/status for the two buffer segments. + */ + for ( j = 0; j < 2; j++ ) + { + dmaResid[j] = IOGetCCResult( &rxDMACommands[i].desc_seg[j] ); + dmaStatus[j] = dmaResid[j] >> 16; + dmaResid[j] &= 0x0000ffff; + dmaCount[j] = IOGetCCOperation( &rxDMACommands[i].desc_seg[j] ) & + kdbdmaReqCountMask; + } + +#if 0 + IOLog("Ethernet(Mace): Rx NetBuf[%2d] = %08x Resid[0] = %04x Status[0] = %04x Resid[1] = %04x Status[1] = %04x\n\r", + i, (int)nb_map(rxNetbuf[i]), dmaResid[0], dmaStatus[0], dmaResid[1], dmaStatus[1] ); +#endif + + /* + * If the current entry has not been written, then stop at this entry + */ + if ( !((dmaStatus[0] & kdbdmaBt) || (dmaStatus[1] & kdbdmaActive)) ) + { + break; + } + + /* + * The Mace Ethernet controller appends four bytes to each receive + * buffer containing the buffer size and receive frame status. + * We locate these bytes by using the DMA residual counts. + */ + receivedFrameSize = dmaCount[0] - dmaResid[0] + dmaCount[1] - + ((dmaStatus[0] & kdbdmaBt) ? dmaCount[1] : dmaResid[1]); + + if ( ( receivedFrameSize >= 4 ) && + ( receivedFrameSize <= NETWORK_BUFSIZE ) ) + { + /* + * Get the receive frame size as reported by the Mace controller + */ + + rxFS = mtod(rxMbuf[i], u_int8_t *) + receivedFrameSize - 4; + + receivedFrameSize = (u_int16_t) rxFS[0] | + (rxFS[1] & kRcvFS1RcvCnt) << 8; + } + + /* + * Reject packets that are runts or that have other mutations. + */ + if ( receivedFrameSize < (ETHERMINPACKET - ETHERCRC) || + receivedFrameSize > (ETHERMAXPACKET + ETHERCRC) || + (rxFS[1] & (kRcvFS1OFlo | kRcvFS1Clsn | kRcvFS1Fram | kRcvFS1FCS)) + ) + { + if (useNetif) netStats->inputErrors++; + reusePkt = true; + } + else if ( useNetif == false ) + { + /* + * Always reuse packets in debugger mode. + */ + reusePkt = true; + if (fDebugger) + _packetToDebugger(rxMbuf[i], receivedFrameSize); + } + + /* + * Before we pass this packet up the networking stack. Make sure we + * can get a replacement. Otherwise, hold on to the current packet and + * increment the input error count. + * Thanks Justin! + */ + + packet = 0; + + if ( reusePkt == false ) + { + bool replaced; + + packet = replaceOrCopyPacket(&rxMbuf[i], receivedFrameSize, + &replaced); + + reusePkt = true; + + if (packet && replaced) + { + status = _updateDescriptorFromMbuf(rxMbuf[i], + &rxDMACommands[i], true); + + if (status) + { + reusePkt = false; + } + else + { + // Assume descriptor has not been corrupted. + freePacket(rxMbuf[i]); // release new packet. + rxMbuf[i] = packet; // get the old packet back. + packet = 0; // pass up nothing. + IOLog("Mace: _updateDescriptorFromMbuf error\n"); + } + } + + if (packet == 0) + netStats->inputErrors++; + } + + /* + * If we are reusing the existing mbuf, then refurbish the existing + * DMA command \ descriptors by clearing the status/residual count + * fields. + */ + + if ( reusePkt == true ) + { + for ( j=0; j < sizeof(enet_dma_cmd_t)/sizeof(IODBDMADescriptor); + j++ ) + { + IOSetCCResult( &rxDMACommands[i].desc_seg[j], 0 ); + } + flush_cache_v( (vm_offset_t)&rxDMACommands[i], + sizeof(enet_dma_cmd_t) ); + } + + /* + * Keep track of the last receive descriptor processed + */ + last = i; + + /* + * Implement ring wrap-around + */ + if (++i >= rxMaxCommand) i = 0; + + /* + * Early exit in debugger mode. + */ + if (fDebugger) + { + break; + } + + /* + * Transfer received to network stack. + */ + if (packet) + { + KERNEL_DEBUG(DBG_MACE_RXCOMPLETE | DBG_FUNC_NONE, (int) packet, + (int)receivedFrameSize, 0, 0, 0 ); + + /* + * The KDB lock must be held before calling this function. + */ + networkInterface->inputPacket(packet, receivedFrameSize, true); + netStats->inputPackets++; + packetsQueued = true; + } + } + + /* + * OK...this is a little messy + * + * We just processed a bunch of DMA receive descriptors. We are going to + * exchange the current DMA stop command (rxCommandTail) with the last + * receive descriptor we processed (last). This will make these list of + * descriptors we just processed available. If we processed no receive + * descriptors on this call then skip this exchange. + */ + +#if 0 + IOLog("Mace: Prev - Rx Head = %2d Rx Tail = %2d Rx Last = %2d\n\r", + rxCommandHead, rxCommandTail, last ); +#endif + + if ( last != lastResetValue ) + { + /* + * Save the contents of the last receive descriptor processed. + */ + packet = rxMbuf[last]; + tmpCommand = rxDMACommands[last]; + + /* + * Write a DMA stop command into this descriptor slot + */ + rxDMACommands[last].desc_seg[0] = dbdmaCmd_Stop; + rxDMACommands[last].desc_seg[1] = dbdmaCmd_Nop; + rxMbuf[last] = 0; + + flush_cache_v( (vm_offset_t)&rxDMACommands[last], + sizeof(enet_dma_cmd_t) ); + + /* + * Replace the previous DMA stop command with the last receive + * descriptor processed. + * + * The new DMA command is written into the channel program so that the + * command word for the old Stop command is overwritten last. This + * prevents the DMA engine from executing a partially written channel + * command. + * + * Note: When relocating the descriptor, we must update its branch + * field to reflect its new location. + */ + nextDesc = rxDMACommandsPhys + (int)&rxDMACommands[rxCommandTail+1] - + (int)rxDMACommands; + IOSetCCCmdDep( &tmpCommand.desc_seg[0], nextDesc ); + + bcopy( (u_int32_t *)&tmpCommand+1, + (u_int32_t *)&rxDMACommands[rxCommandTail]+1, + sizeof(enet_dma_cmd_t)-sizeof(u_int32_t) ); + + flush_cache_v( (vm_offset_t)&rxDMACommands[rxCommandTail], + sizeof(enet_dma_cmd_t) ); + + rxMbuf[rxCommandTail] = packet; + + rxDMACommands[rxCommandTail].desc_seg[0].operation = + tmpCommand.desc_seg[0].operation; + + flush_cache_v( (vm_offset_t)&rxDMACommands[rxCommandTail], + sizeof(IODBDMADescriptor) ); + + /* + * Update rxCommmandTail to point to the new Stop command. Update + * rxCommandHead to point to the next slot in the ring past the Stop + * command + */ + rxCommandTail = last; + rxCommandHead = i; + } + + /* + * The DMA channel has a nasty habit of shutting down when there is a + * non-recoverable error on receive. We get no interrupt for this since + * the channel shuts down before the descriptor that causes the host + * interrupt is executed. + * + * We check if the channel is DEAD by checking the channel status reg. + * Also, the watchdog timer can force receiver interrupt servicing based + * on detecting that the receive DMA is DEAD. + */ + dmaChnlStatus = IOGetDBDMAChannelStatus( ioBaseEnetRxDMA ); + if ( dmaChnlStatus & kdbdmaDead ) + { + /* + * Read log error + */ + if (useNetif) netStats->inputErrors++; + IOLog( "Mace: Rx DMA Error - Status = %04x\n", dmaChnlStatus ); + + /* + * Reset and reinitialize chip + */ + _restartChip(); // This must not block in debugger mode. + } + else + { + /* + * Tap the DMA to wake it up + */ + IODBDMAContinue( ioBaseEnetRxDMA ); + } + +#if 0 + IOLog( "Mace: New - Rx Head = %2d Rx Tail = %2d\n\r", + rxCommandHead, rxCommandTail ); +#endif + + return packetsQueued; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool MaceEnet::_transmitInterruptOccurred(bool fDebugger = false) +{ + u_int32_t dmaStatus; + u_int32_t xmtFS; + bool fServiced = false; + bool useNetif = !fDebugger && netifClient; + + // Set the debugTxPoll flag to indicate the debugger was active + // and some cleanup may be needed when the driver returns to + // normal operation. + // + if (fDebugger) + debugTxPoll = true; + + while ( 1 ) + { + /* + * Purge cache references for the DBDMA entry we are about to look at. + */ + invalidate_cache_v((vm_offset_t)&txDMACommands[txCommandHead], + sizeof(enet_txdma_cmd_t)); + + /* + * Check the status of the last descriptor in this entry to see if + * the DMA engine completed this entry. + */ + dmaStatus = IOGetCCResult( + &txDMACommands[txCommandHead].desc_seg[3] ) >> 16; + + if ( !(dmaStatus & kdbdmaActive) ) + { + break; + } + + fServiced = true; + + /* + * Reset the status word for the entry we are about to process + */ + IOSetCCResult( &txDMACommands[txCommandHead].desc_seg[3], 0 ); + + flush_cache_v( (vm_offset_t) &txDMACommands[txCommandHead].desc_seg[3], + sizeof(IODBDMADescriptor) ); + + /* + * This DMA descriptor read the transmit frame status. See what it has + * to tell us. + */ + xmtFS = IOGetCCCmdDep( &txDMACommands[txCommandHead].desc_seg[2] ); + if ( useNetif && (xmtFS & kXmtFSXmtSV) ) + { + if (xmtFS & (kXmtFSUFlo | kXmtFSLCol | kXmtFSRtry | kXmtFSLCar) ) + { + netStats->outputErrors++; + } + else + { + netStats->outputPackets++; + } + + if (xmtFS & (kXmtFSOne | kXmtFSMore) ) + { + netStats->collisions++; + } + } + + /* + * Free the mbuf we just transmitted. + */ + KERNEL_DEBUG(DBG_MACE_TXCOMPLETE | DBG_FUNC_NONE, + (int) txMbuf[txCommandHead], + (int) txMbuf[txCommandHead]->m_pkthdr.len, 0, 0, 0 ); + + if (txMbuf[txCommandHead] != txDebuggerPkt) + { + if ( fDebugger ) + { + // + // While in debugger mode, do not touch the mbuf pool. + // Queue any used mbufs to a local queue. This queue + // will get flushed after we exit from debugger mode. + // + // During continuous debugger transmission and + // interrupt polling, we expect only the txDebuggerPkt + // to show up on the transmit mbuf ring. + // + debugQueue->enqueue( txMbuf[txCommandHead] ); + } + else + { + freePacket( txMbuf[txCommandHead] ); + } + } + + txMbuf[txCommandHead] = 0; + + if ( ++txCommandHead >= txMaxCommand ) txCommandHead = 0; + } + + /* + * The DMA channel has a nasty habit of shutting down when there is + * non-recoverable error on transmit. We get no interrupt for this since + * the channel shuts down before the descriptor that causes the host + * interrupt is executed. + * + * We check if the channel is DEAD by checking the channel status reg. + * Also, the watchdog timer can force a transmitter reset if it sees no + * interrupt activity for to consecutive timeout intervals. + */ + + dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetTxDMA ); + if ( (dmaStatus & kdbdmaDead) || (txWDForceReset == true) ) + { + /* + * Read the transmit frame status and log error + */ + xmtFS = ReadMaceRegister( ioBaseEnet, kXmtFS ); + if (useNetif) netStats->outputErrors++; + IOLog( "Mace: Tx DMA Error - Status = %04x FS = %02x\n\r", + dmaStatus, xmtFS); + + /* + * Reset and reinitialize chip + */ + _restartChip(); + + txWDForceReset = false; + fServiced = true; + } + + return fServiced; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +/* + * Breaks up an ethernet data buffer into two physical chunks. We know that + * the buffer can't straddle more than two pages. If the content of paddr2 is + * zero this means that all of the buffer lies in one physical page. Note + * that we use the fact that tx and rx descriptors have the same size and + * same layout of relevent fields (data address and count). + */ +bool +MaceEnet::_updateDescriptorFromMbuf(struct mbuf * m, enet_dma_cmd_t *desc, + bool isReceive) +{ + u_int32_t nextDesc = 0; + int segments; + struct IOPhysicalSegment segVector[2]; + + /* + * Although coalescing is always enabled, it cannot occur + * while the driver is in debugger mode. + */ + segments = mbufCursor->getPhysicalSegmentsWithCoalesce(m, segVector); + + if ((!segments) || (segments > 2)) { + IOLog("Mace: _updateDescriptorFromMbuf error, %d segments\n", + segments); + return false; + } + + if ( segments == 1 ) + { + IOMakeDBDMADescriptor( (&desc->desc_seg[0]), + ((isReceive) ? kdbdmaInputLast : kdbdmaOutputLast), + (kdbdmaKeyStream0), + (kdbdmaIntNever), + (kdbdmaBranchNever), + ((isReceive) ? kdbdmaWaitNever : + kdbdmaWaitIfFalse), + (segVector[0].length), + (segVector[0].location) ); + + desc->desc_seg[1] = (isReceive) ? dbdmaCmd_NopWInt : dbdmaCmd_Nop; + } + else + { + if ( isReceive ) + { + nextDesc = rxDMACommandsPhys + (int)desc - (int)rxDMACommands + + sizeof(enet_dma_cmd_t); + } + + IOMakeDBDMADescriptorDep( (&desc->desc_seg[0]), + ((isReceive) ? kdbdmaInputMore : kdbdmaOutputMore), + (kdbdmaKeyStream0), + ((isReceive) ? kdbdmaIntIfTrue : kdbdmaIntNever), + ((isReceive) ? kdbdmaBranchIfTrue : + kdbdmaBranchNever), + (kdbdmaWaitNever), + (segVector[0].length), + (segVector[0].location), + nextDesc ); + + IOMakeDBDMADescriptor( (&desc->desc_seg[1]), + ((isReceive) ? kdbdmaInputLast : kdbdmaOutputLast), + (kdbdmaKeyStream0), + ((isReceive) ? kdbdmaIntAlways : kdbdmaIntNever), + (kdbdmaBranchNever), + ((isReceive) ? kdbdmaWaitNever : + kdbdmaWaitIfFalse), + (segVector[1].length), + (segVector[1].location) ); + } + + flush_cache_v( (vm_offset_t)desc, sizeof(enet_dma_cmd_t) ); + + return true; +} + + +#ifdef DEBUG +/* + * Useful for testing. + */ + +void MaceEnet::_dumpDesc(void * addr, u_int32_t size) +{ + u_int32_t i; + unsigned long *p; + vm_offset_t paddr; + + _IOPhysicalFromVirtual( (vm_offset_t) addr, (vm_offset_t *)&paddr ); + + p = (unsigned long *)addr; + + for ( i=0; i < size/sizeof(IODBDMADescriptor); i++, p+=4, + paddr+=sizeof(IODBDMADescriptor) ) + { + IOLog("Ethernet(Mace): %08x(v) %08x(p): %08x %08x %08x %08x\n", + (int)p, + (int)paddr, + (int)OSReadSwapInt32(p, 0), (int)OSReadSwapInt32(p, 4), + (int)OSReadSwapInt32(p, 8), (int)OSReadSwapInt32(p, 12) ); + } + IOLog("\n"); +} + +void MaceEnet::_dumpRegisters() +{ + u_int8_t dataValue; + + IOLog("\nEthernet(Mace): IO Address = %08x", (int)ioBaseEnet ); + + dataValue = ReadMaceRegister(ioBaseEnet, kXmtFC); + IOLog("\nEthernet(Mace): Read Register %04x Transmit Frame Control = %02x", kXmtFC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kXmtFS); + IOLog("\nEthernet(Mace): Read Register %04x Transmit Frame Status = %02x", kXmtFS, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kXmtRC); + IOLog("\nEthernet(Mace): Read Register %04x Transmit Retry Count = %02x", kXmtRC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kRcvFC); + IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Control = %02x", kRcvFC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS0); + IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 0 = %02x", kRcvFS0, dataValue ); + dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS1); + IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 1 = %02x", kRcvFS1, dataValue ); + dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS2); + IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 2 = %02x", kRcvFS2, dataValue ); + dataValue = ReadMaceRegister(ioBaseEnet, kRcvFS3); + IOLog("\nEthernet(Mace): Read Register %04x Receive Frame Status 3 = %02x", kRcvFS3, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kFifoFC); + IOLog("\nEthernet(Mace): Read Register %04x FIFO Frame Count = %02x", kFifoFC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kIntReg); + IOLog("\nEthernet(Mace): Read Register %04x Interrupt Register = %02x", kIntReg, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kIntMask); + IOLog("\nEthernet(Mace): Read Register %04x Interrupt Mask Register = %02x", kIntMask, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kPollReg); + IOLog("\nEthernet(Mace): Read Register %04x Poll Register = %02x", kPollReg, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kBIUCC); + IOLog("\nEthernet(Mace): Read Register %04x BUI Configuration Control = %02x", kBIUCC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kFifoCC); + IOLog("\nEthernet(Mace): Read Register %04x FIFO Configuration Control = %02x", kFifoCC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kMacCC); + IOLog("\nEthernet(Mace): Read Register %04x MAC Configuration Control = %02x", kMacCC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kPLSCC); + IOLog("\nEthernet(Mace): Read Register %04x PLS Configuration Contro = %02x", kPLSCC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kPHYCC); + IOLog("\nEthernet(Mace): Read Register %04x PHY Configuration Control = %02x", kPHYCC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kMaceChipId0); + IOLog("\nEthernet(Mace): Read Register %04x MACE ChipID Register 7:0 = %02x", kMaceChipId0, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kMaceChipId1); + IOLog("\nEthernet(Mace): Read Register %04x MACE ChipID Register 15:8 = %02x", kMaceChipId1, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kMPC); + IOLog("\nEthernet(Mace): Read Register %04x Missed Packet Count = %02x", kMPC, dataValue ); + + dataValue = ReadMaceRegister(ioBaseEnet, kUTR); + IOLog("\nEthernet(Mace): Read Register %04x User Test Register = %02x", kUTR, dataValue ); + IOLog("\nEthernet(Mace): -------------------------------------------------------\n" ); +} +#endif DEBUG + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn MaceEnet::getHardwareAddress(IOEthernetAddress *ea) +{ + unsigned char data; + + for (UInt i = 0; i < sizeof(*ea); i++) + { + data = ((unsigned char *)ioBaseEnetROM)[i << 4]; + ea->bytes[i] = reverseBitOrder(data); + } + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +#define ENET_CRCPOLY 0x04c11db7 + +/* Real fast bit-reversal algorithm, 6-bit values */ +static int reverse6[] = +{ 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, + 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, + 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, + 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, + 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, + 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, + 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, + 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f +}; + +static u_int32_t crc416(unsigned int current, unsigned short nxtval ) +{ + register unsigned int counter; + register int highCRCBitSet, lowDataBitSet; + + /* Swap bytes */ + nxtval = ((nxtval & 0x00FF) << 8) | (nxtval >> 8); + + /* Compute bit-by-bit */ + for (counter = 0; counter != 16; ++counter) + { /* is high CRC bit set? */ + if ((current & 0x80000000) == 0) + highCRCBitSet = 0; + else + highCRCBitSet = 1; + + current = current << 1; + + if ((nxtval & 0x0001) == 0) + lowDataBitSet = 0; + else + lowDataBitSet = 1; + + nxtval = nxtval >> 1; + + /* do the XOR */ + if (highCRCBitSet ^ lowDataBitSet) + current = current ^ ENET_CRCPOLY; + } + return current; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +static u_int32_t mace_crc(unsigned short *address) +{ + register u_int32_t newcrc; + + newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ + newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ + newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ + + return (newcrc); +} + +/* + * Clear the hash table filter. + * + */ +void MaceEnet::_resetHashTableMask() +{ + bzero(hashTableUseCount, sizeof(hashTableUseCount)); + bzero(hashTableMask, sizeof(hashTableMask)); +} + +/* + * Add requested mcast addr to Mace's hash table filter. + * + */ +void MaceEnet::_addToHashTableMask(u_int8_t *addr) +{ + u_int32_t crc; + u_int8_t mask; + + crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (hashTableUseCount[crc]++) + return; /* This bit is already set */ + mask = crc % 8; + mask = (unsigned char) 1 << mask; + hashTableMask[crc/8] |= mask; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void MaceEnet::_removeFromHashTableMask(u_int8_t *addr) +{ + unsigned int crc; + unsigned char mask; + + /* Now, delete the address from the filter copy, as indicated */ + crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (hashTableUseCount[crc] == 0) + return; /* That bit wasn't in use! */ + + if (--hashTableUseCount[crc]) + return; /* That bit is still in use */ + + mask = crc % 8; + mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ + hashTableMask[crc/8] &= mask; +} + +/* + * Sync the adapter with the software copy of the multicast mask + * (logical address filter). + */ +void MaceEnet::_updateHashTableMask() +{ + u_int8_t status; + u_int32_t i; + u_int8_t *p; + u_int8_t MacCCReg; + + // Stop the receiver before changing the filter. + // + MacCCReg = ReadMaceRegister( ioBaseEnet, kMacCC ); + WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg & ~kMacCCEnRcv ); + IODelay( RECEIVE_QUIESCE_uS ); + + if ( chipId != kMaceRevisionA2 ) + { + WriteMaceRegister( ioBaseEnet, kIAC, kIACAddrChg | kIACLogAddr ); + do + { + status = ReadMaceRegister( ioBaseEnet, kIAC ); + } + while( status & kIACAddrChg ); + } + else + { + WriteMaceRegister( ioBaseEnet, kIAC, kIACLogAddr ); + } + + p = (u_int8_t *) hashTableMask; + for (i = 0; i < 8; i++, p++ ) + { + WriteMaceRegister( ioBaseEnet, kLADRF, *p ); + } + + // Restore the engine's state. + // + WriteMaceRegister( ioBaseEnet, kMacCC, MacCCReg ); +} diff --git a/iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.h b/iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.h new file mode 100644 index 000000000..849e3a96e --- /dev/null +++ b/iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1996 NeXT Software, Inc. + * + * Interface for hardware dependent (relatively) code + * for the Mace Ethernet chip + * + * HISTORY + * + * 11/22/97 R. Berkoff + * Created. + */ + +#ifndef _MACEENETPRIVATE_H +#define _MACEENETPRIVATE_H + +#include "MaceEnet.h" + +void WriteMaceRegister( IOPPCAddress ioEnetBase, u_int32_t reg_offset, u_int8_t data); + +volatile u_int8_t ReadMaceRegister( IOPPCAddress ioEnetBase, u_int32_t reg_offset); + +#endif /* !_MACEENETPRIVATE_H */ diff --git a/iokit/Drivers/network/drvMaceEnet/MaceEnetRegisters.h b/iokit/Drivers/network/drvMaceEnet/MaceEnetRegisters.h new file mode 100644 index 000000000..64e05bc24 --- /dev/null +++ b/iokit/Drivers/network/drvMaceEnet/MaceEnetRegisters.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1995-1996 NeXT Software, Inc. + * + * Interface definition for the Mace Ethernet controller. + * + * HISTORY + * + * 04-Nov-97 + * Created. + */ + +#ifndef _MACEENETREGISTERS_H +#define _MACEENETREGISTERS_H + +// --------------------------------------------------------------------------------------------- +// Mace and GC I/O Addresses +// --------------------------------------------------------------------------------------------- +#define kTxDMABaseOffset 0x08200 // offset from I/O Space Base address +#define kRxDMABaseOffset 0x08300 +#define kControllerBaseOffset 0x11000 +#define kControllerROMOffset 0x19000 + + +// --------------------------------------------------------------------------------------------- +// Mace Register Numbers & Bit Assignments +// --------------------------------------------------------------------------------------------- +/* + * Chip Revisions.. + */ + +#define kMaceRevisionB0 0x0940 +#define kMaceRevisionA2 0x0941 + +/* xmtfc */ +#define kXmtFC 0x0020 /* Transmit Frame Control */ +#define kXmtFCDRtry 0X80 /* - Disable Retry */ +#define kXmtFCDXmtFCS 0x08 /* - Disable Transmit FCS */ +#define kXmtFCAPadXmt 0x01 /* - Auto PAD Transmit */ + +/* xmtfs */ +#define kXmtFS 0x0030 /* Transmit Frame Status */ +#define kXmtFSXmtSV 0x80 /* - Transmit Status Valid */ +#define kXmtFSUFlo 0x40 /* - Transmit Underflow */ +#define kXmtFSLCol 0x20 /* - Transmit late collision */ +#define kXmtFSMore 0x10 /* - Transmit < 1 retry */ +#define kXmtFSOne 0x08 /* - Transmit single retry */ +#define kXmtFSDefer 0x04 /* - Transmit deferred */ +#define kXmtFSLCar 0x02 /* - Transmit lost carrier */ +#define kXmtFSRtry 0x01 /* - Transmit Unsuccessful */ + +/* xmtrc */ +#define kXmtRC 0x0040 /* Transmit Retry Count */ +#define kXmtRCExDef 0x80 /* - ? */ +#define kXmtRCXmrRC 0x0F /* - Transmit retry count */ + +/* rcvfc */ +#define kRcvFC 0x0050 /* Receive Frame Control */ +#define kRcvFCLLRcv 0x08 /* - ? */ +#define kRcvFCMR 0x04 /* - Match/Reject (not implemented) */ +#define kRcvFCAStrpRcv 0x01 /* - Auto Strip Receive Enable */ + +/* rcvfs */ +#define kRcvFS0 0x0060 /* Receive Frame Status - Byte 0 */ +#define kRcvFS0RcvCnt 0xFF /* - Receive Msg Byte Count (7:0) */ + +#define kRcvFS1 0x0060 /* Receive Frame Status - Byte 1 */ +#define kRcvFS1OFlo 0x80 /* - Receive Overflow */ +#define kRcvFS1Clsn 0x40 /* - Receive Collision */ +#define kRcvFS1Fram 0x20 /* - Receive Framming Error */ +#define kRcvFS1FCS 0x10 /* - Receive Frame Check Error */ +#define kRcvFS1RcvCnt 0x0f /* - Receive Msg Byte Count (11:8) */ + + +#define kRcvFS2 0x0060 /* Receive Frame Status - Byte 2 */ +#define kRcvFS2RntPC 0xFF /* - Runt Packet Count */ + +#define kRcvFS3 0x0060 /* Receive Frame Status - Byte 3 */ +#define kRcvFS3RcvCC 0xFF /* Receive Collision Count */ + +/* fifofc */ +#define kFifoFC 0x0070 /* FIFO Frame Count */ +#define kFifoFCXFW 0xc0 /* - ? */ +#define kFifoFCXFW8 0x00 /* - ? */ +#define kFifoFCXFW16 0x40 /* - ? */ +#define kFifoFCXFW32 0x80 /* - ? */ + +#define kFifoFCRFW 0x30 /* - ? */ +#define kFifoFCRFW16 0x00 /* - ? */ +#define kFifoFCRFW32 0x10 /* - ? */ +#define kFifoFCRFW64 0x20 /* - ? */ +#define kFifoFCXFWU 0x08 /* - ? */ +#define kFifoFCRFWU 0x04 /* - ? */ +#define kFifoFCXBRst 0x02 /* - ? */ +#define kFifoFCRBRst 0x01 /* - ? */ + + +/* ir */ +#define kIntReg 0x0080 /* Interrupt Register */ +#define kIntRegJab 0x80 /* - Jabber Error */ +#define kIntRegBabl 0x40 /* - Babble Error */ +#define kIntRegCErr 0x20 /* - Collision Error */ +#define kIntRegRcvCCO 0x10 /* - Receive Collision Count Overflow */ +#define kIntRegRntPCO 0x08 /* - Runt Packet Count Overflow */ +#define kIntRegMPCO 0x04 /* - Missed Packet Count Overflow */ +#define kIntRegRcvInt 0x02 /* - Receive Interrupt */ +#define kIntRegXmtInt 0x01 /* - Transmit Interrupt */ + +/* imr */ +#define kIntMask 0x0090 /* Interrupt Mask Register */ +#define kIntMaskJab 0x80 /* - Mask Jabber Error Int */ +#define kIntMaskBabl 0x40 /* - Mask Babble Error Int */ +#define kIntMaskCErr 0x20 /* - Mask Collision Error Int */ +#define kIntMaskRcvCCO 0x10 /* - Mask Rcv Coll Ctr Overflow Int */ +#define kIntMaskRntPCO 0x08 /* - Mask Runt Packet Ctr Overflow Int */ +#define kIntMaskMPCO 0x04 /* - Mask Missed Pkt Ctr Overflow Int */ +#define kIntMaskRcvInt 0x02 /* - Mask Receive Int */ +#define kIntMaskXmtInt 0x01 /* - Mask Transmit Int */ + +/* pr */ +#define kPollReg 0x00A0 /* Poll Register */ +#define kPollRegXmtSV 0x80 /* - Transmit Status Valid */ +#define kPollRegTDTReq 0x40 /* - Transmit Data Transfer Request */ +#define kPollRegRDTReq 0x20 /* - Receive Data Transfer Request */ + +/* biucc */ +#define kBIUCC 0x00B0 /* BUI Configuration Control */ +#define kBIUCCBSwp 0x40 /* - Byte Swap Enable */ +#define kBIUCCXmtSP 0x30 /* - Transmit Start Point: */ +#define kBIUCCXmtSP04 0x00 /* - 00b = 4 Bytes */ +#define kBIUCCXmtSP16 0x10 /* - 01b = 16 Bytes */ +#define kBIUCCXmtSP64 0x20 /* - 10b = 64 Bytes */ +#define kBIUCCXmtSP112 0x30 /* - 11b = 112 Bytes */ +#define kBIUCCSWRst 0x01 /* Software Reset */ + +/* fifocc */ +#define kFifoCC 0x00C0 /* FIFO Configuration Control */ +#define kFifoCCXmtFW 0xC0 /* - Transmit FIFO Watermark: */ +#define kFifoCCXmtFW08 0x00 /* - 00b = 8 Write Cycles */ +#define kFifoCCXmtFW16 0x40 /* - 01b = 16 Write Cycles */ +#define kFifoCCXmtFW32 0x80 /* - 10b = 32 Write Cycles */ + +#define kFifoCCRcvFW 0x30 /* - Receive FIFO Watermark: */ +#define kFifoCCRcvFW16 0x00 /* - 00b = 16 Bytes */ +#define kFifoCCRcvFW32 0x10 /* - 01b = 32 Bytes */ +#define kFifoCCRcvFW64 0x20 /* - 10b = 64 Bytes */ + +#define kFifoCCXmtFWRst 0x08 /* - Transmit FIFO Watermark Reset */ +#define kFifoCCRcvFWRst 0x04 /* - Receive FIFO Watermark Reset */ +#define kFifoCCXmtBRst 0x02 /* - Transmit Burst Enable */ +#define kFifoCCRcvBRst 0x01 /* - Receive Burst Enable */ + +/* maccc */ +#define kMacCC 0x00D0 /* MAC Configuration Control */ +#define kMacCCProm 0x80 /* - Promiscuous Mode Enable */ +#define kMacCCDXmt2PD 0x40 /* - Disable Transmit Two Part Deferral */ +#define kMacCCEMBA 0x20 /* - Enable Modified Backoff Algorithm */ +#define kMacCCDRcvPA 0x08 /* - ? */ +#define kMacCCDRcvBC 0x04 /* - ? */ +#define kMacCCEnXmt 0x02 /* - Transmit Enable */ +#define kMacCCEnRcv 0x01 /* - Receive Enable */ + +/* plscc */ +#define kPLSCC 0x00E0 /* PLS Configuration Control */ +#define kPLSCCXmtSel 0x08 /* - Transmit Mode Select */ +#define kPLSCCPortSel 0x06 /* - Port Select: */ +#define kPLSCCPortSelAUI 0x00 /* - 00b = AUI */ +#define kPLSCCPortSelTenBase 0x02 /* - 01b = 10BaseT */ +#define kPLSCCPortSelDAI 0x04 /* - 10b = DAI */ +#define kPLSCCPortSelGPSI 0x06 /* - 11b = GPSI */ +#define kPLSCCEnSts 0x01 /* - Enable Status */ + +/* phycc */ +#define kPHYCC 0x00F0 /* PHY Configuration Control */ +#define kPHYCCLnkFL 0x80 /* - ? */ +#define kPHYCCDLnkTst 0x40 /* - ? */ +#define kPHYCCRcvPol 0x20 /* - ? */ +#define kPHYCCDAPC 0x10 /* - ? */ +#define kPHYCCLRT 0x08 /* - ? */ +#define kPHYCCASel 0x04 /* - ? */ +#define kPHYCCRWake 0x02 /* - ? */ +#define kPHYCCAWake 0x01 /* - ? */ + +#define kMaceChipId0 0x0100 /* MACE Chip ID Register (7:0) */ +#define kMaceChipId1 0x0110 /* MACE Chip ID Register (15:8) */ + +/* iac */ +#define kIAC 0x0120 /* Internal Address Configuration */ +#define kIACAddrChg 0x80 /* - ? */ +#define kIACPhyAddr 0x04 /* - Physical Address Reset */ +#define kIACLogAddr 0x02 /* - Logical Address Reset */ + + +/* ladrf */ +#define kLADRF 0x0140 /* Logical Address Filter - 8 Bytes */ + +/* padr */ +#define kPADR 0x0150 /* Physical Address Filter - 6 Bytes */ + +/* kMPC */ +#define kMPC 0x0180 /* Missed Packet Count */ + +/* utr */ +#define kUTR 0x01D0 /* User Test Register */ +#define kUTRRTRE 0x80 /* - Reserved Test Register Enable */ +#define kUTRRTRD 0x40 /* - Reserved Test Register Disable */ +#define kUTRRPA 0x20 /* - Runt Packet Accept */ +#define kUTRFColl 0x10 /* - Force Collision */ +#define kUTRRcvFCS 0x08 /* - Receive FCS Enable */ + +#define kUTRLoop 0x06 /* - Loopback Control: */ +#define kUTRLoopNone 0x00 /* - 00b = None */ +#define kUTRLoopExt 0x02 /* - 01b = External */ +#define kUTRLoopInt 0x04 /* - 10b = Internal (excludes MENDEC) */ +#define kUTRLoopIntM 0x06 /* - 11b = Internal (includes MENDEC) */ + + +#define TX_RING_LENGTH (32+1) +#define RX_RING_LENGTH (32+1) + +#define NETWORK_BUFSIZE (ETHERMAXPACKET + ETHERCRC + 8) +#define TRANSMIT_QUEUE_SIZE 128 + +#define WATCHDOG_TIMER_MS 500 +#define TX_KDB_TIMEOUT 1000 + +#define TRANSMIT_QUIESCE_uS 200 +#define RECEIVE_QUIESCE_uS 1500 + +enum +{ + kIRQEnetDev = 0, + kIRQEnetTxDMA = 1, + kIRQEnetRxDMA = 2 +}; + +enum +{ + MEMORY_MAP_ENET_INDEX = 0, + MEMORY_MAP_TXDMA_INDEX = 1, + MEMORY_MAP_RXDMA_INDEX = 2, + MEMORY_MAP_COUNT = 3 +}; + +#endif /* !_MACEENETREGISTERS_H */ diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnet.cpp b/iokit/Drivers/network/drvPPCBMac/BMacEnet.cpp new file mode 100644 index 000000000..c164a2ebd --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnet.cpp @@ -0,0 +1,1199 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * Hardware independent (relatively) code for the BMac Ethernet Controller + * + * HISTORY + * + * dd-mmm-yy + * Created. + * + * Dec 10, 1998 jliu + * Converted to IOKit/C++. + */ + +#include "BMacEnet.h" +#include "BMacEnetPrivate.h" + +#include +#include +#include +#include + +// #define DEBUG_JOE 1 + +//------------------------------------------------------------------------ + +#define super IOEthernetController + +OSDefineMetaClassAndStructors( BMacEnet, IOEthernetController ) + +//------------------------------------------------------------------------ + +#define PROVIDER_DEV 0 +#define PROVIDER_DMA_TX 1 +#define PROVIDER_DMA_RX 2 + +/* + * Public Instance Methods + */ + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::init(OSDictionary * properties = 0) +{ + if ( super::init(properties) == false ) + return false; + + /* + * Initialize my ivars. + */ + phyId = 0xff; + phyMIIDelay = MII_DEFAULT_DELAY; + sromAddressBits = 6; + enetAddressOffset = 20; + phyStatusPrev = 0; + + return true; +} + +bool BMacEnet::start(IOService * provider) +{ + AppleMacIODevice *nub = OSDynamicCast(AppleMacIODevice, provider); + IOInterruptEventSource *intES; + + if (!nub || !super::start(provider)) + return false; + + transmitQueue = OSDynamicCast(IOGatedOutputQueue, getOutputQueue()); + if (!transmitQueue) { + IOLog("BMac: output queue initialization failed\n"); + return false; + } + transmitQueue->retain(); + + // Allocate debug queue. This stores packets retired from the TX ring + // by the polling routine. We cannot call freePacket() or m_free() within + // the debugger context. + // + // The capacity of the queue is set at maximum to prevent the queue from + // calling m_free() due to over-capacity. But we don't expect the size + // of the queue to grow too large. + // + debugQueue = IOPacketQueue::withCapacity((UInt) -1); + if (!debugQueue) + return false; + + // Allocate a IOMbufBigMemoryCursor instance. Currently, the maximum + // number of segments is set to 2. The maximum length for each segment + // is set to the maximum ethernet frame size (plus padding). + + mbufCursor = IOMbufBigMemoryCursor::withSpecification(NETWORK_BUFSIZE, 2); + if (!mbufCursor) { + IOLog("Ethernet(BMac): IOMbufBigMemoryCursor allocation failure\n"); + return false; + } + + // + // Our provider is the nub representing the BMacEnet hardware + // controller. We will query it for our resource information. + // + + for (int i = 0; i < MEMORY_MAP_HEATHROW_INDEX; i++) { + IOMemoryMap * map; + + map = provider->mapDeviceMemoryWithIndex(i); + if (!map) + return false; +#ifdef DEBUG_JOE + IOLog("map %d: Phys:%08x Virt:%08x len:%d\n", + i, + (unsigned) map->getPhysicalAddress(), + (unsigned) map->getVirtualAddress(), + (unsigned) map->getLength()); +#endif DEBUG_JOE + + switch (i) { + case MEMORY_MAP_ENET_INDEX: + ioBaseEnet = (IOPPCAddress) map->getVirtualAddress(); + break; + + case MEMORY_MAP_TXDMA_INDEX: + ioBaseEnetTxDMA = (IODBDMAChannelRegisters *) + map->getVirtualAddress(); + break; + + case MEMORY_MAP_RXDMA_INDEX: + ioBaseEnetRxDMA = (IODBDMAChannelRegisters *) + map->getVirtualAddress(); + break; + } + + maps[i] = map; + } + +#ifdef DEBUG_JOE + IOLog("ioBaseEnet: 0x%08x\n", ioBaseEnet); + IOLog("ioBaseEnetTxDMA: 0x%08x\n", ioBaseEnetTxDMA); + IOLog("ioBaseEnetRxDMA: 0x%08x\n", ioBaseEnetRxDMA); +#endif DEBUG_JOE + + // + // We need to get the I/O address for the Heathrow controller. + // We ask the provider (bmac) for its device tree parent. + // + IOService *heathrow; + if (!(heathrow = OSDynamicCast(IOService, + provider->getParentEntry( gIODTPlane )))) + return false; + + // Check whether the hardware is susceptible to the broken unicast + // filter problem. + // + OSData * devIDData; + devIDData = OSDynamicCast(OSData, heathrow->getProperty("device-id")); + + if (devIDData) { + useUnicastFilter = ( *((UInt32 *) devIDData->getBytesNoCopy()) == + 0x10 ); + if (useUnicastFilter) + IOLog("%s: Enabling workaround for broken unicast filter\n", + getName()); + } + + IOMemoryMap * map = heathrow->mapDeviceMemoryWithIndex(0); + if (map) + { +#ifdef DEBUG_JOE + IOLog("Heathrow: Phys:%08x Virt:%08x len:%d\n", + (unsigned) map->getPhysicalAddress(), + (unsigned) map->getVirtualAddress(), + (unsigned) map->getLength()); +#endif DEBUG_JOE + ioBaseHeathrow = (IOPPCAddress) map->getVirtualAddress(); + + maps[MEMORY_MAP_HEATHROW_INDEX] = map; + } + else { + return false; + } + + // + // Get a reference to the IOWorkLoop in our superclass. + // + IOWorkLoop * myWorkLoop = getWorkLoop(); + assert(myWorkLoop); + + // + // Allocate three IOInterruptEventSources. + // + rxIntSrc = IOInterruptEventSource::interruptEventSource + (this, + (IOInterruptEventAction) &BMacEnet::interruptOccurredForSource, + provider, PROVIDER_DMA_RX); + if (!rxIntSrc + || (myWorkLoop->addEventSource(rxIntSrc) != kIOReturnSuccess)) { + IOLog("Ethernet(BMac): rxIntSrc init failure\n"); + return false; + } + + intES = IOInterruptEventSource::interruptEventSource + (this, + (IOInterruptEventAction) &BMacEnet::interruptOccurredForSource, + provider, PROVIDER_DMA_TX); + if (intES) { + bool res = (myWorkLoop->addEventSource(intES) != kIOReturnSuccess); + intES->release(); + if (res) { + IOLog("Ethernet(BMac): PROVIDER_DMA_TX add failure\n"); + return false; + } + } + else { + IOLog("Mace: PROVIDER_DMA_TX init failure\n"); + return false; + } + + intES = IOInterruptEventSource::interruptEventSource + (this, + (IOInterruptEventAction) &BMacEnet::interruptOccurredForSource, + provider, PROVIDER_DEV); + if (intES) { + bool res = (myWorkLoop->addEventSource(intES) != kIOReturnSuccess); + intES->release(); + if (res) { + IOLog("Ethernet(BMac): PROVIDER_DEV add failure\n"); + return false; + } + } + else { + IOLog("Ethernet(BMac): PROVIDER_DEV init failure\n"); + return false; + } + + timerSrc = IOTimerEventSource::timerEventSource + (this, (IOTimerEventSource::Action) &BMacEnet::timeoutOccurred); + if (!timerSrc + || (myWorkLoop->addEventSource(timerSrc) != kIOReturnSuccess)) { + IOLog("Ethernet(BMac): timerSrc init failure\n"); + return false; + } + + MGETHDR(txDebuggerPkt, M_DONTWAIT, MT_DATA); + if (!txDebuggerPkt) { + IOLog("Ethernet(BMac): Couldn't allocate KDB buffer\n"); + return false; + } + +#if 0 + // Enable the interrupt event sources. The hardware interrupts + // sources remain disabled until _resetAndEnable(true) is called. + // + // myWorkLoop->enableAllInterrupts(); +#endif + + // Perform a hardware reset. + // + if ( !_resetAndEnable(false) ) + { + return false; + } + + // Cache my MAC address. + // + getHardwareAddress(&myAddress); + + // Allocate memory for ring buffers. + // + if (_allocateMemory() == false) + { + return false; + } + + // Create a table of supported media types. + // + if ( !createMediumTables() ) + return false; + + // Attach an IOEthernetInterface client. + // + if ( !attachInterface((IONetworkInterface **) &networkInterface, false) ) + return false; + + // Attach a kernel debugger client. + // + attachDebuggerClient(&debugger); + + // Ready to service interface requests. + // + networkInterface->registerService(); + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::free() +{ + UInt i; + + _resetAndEnable(false); + + if (debugger) + debugger->release(); + + if (getWorkLoop()) + getWorkLoop()->disableAllEventSources(); + + if (timerSrc) + timerSrc->release(); + + if (rxIntSrc) + rxIntSrc->release(); + + if (txDebuggerPkt) + freePacket(txDebuggerPkt); + + if (transmitQueue) + transmitQueue->release(); + + if (debugQueue) + debugQueue->release(); + + if (networkInterface) + networkInterface->release(); + + if (mbufCursor) + mbufCursor->release(); + + if ( mediumDict ) + mediumDict->release(); + + for (i = 0; i < rxMaxCommand; i++) + if (rxMbuf[i]) freePacket(rxMbuf[i]); + + for (i = 0; i < txMaxCommand; i++) + if (txMbuf[i]) freePacket(txMbuf[i]); + + for (i = 0; i < MEMORY_MAP_COUNT; i++) + if (maps[i]) maps[i]->release(); + + if (dmaMemory.ptr) + { + IOFree(dmaMemory.ptrReal, dmaMemory.sizeReal); + dmaMemory.ptr = 0; + } + + if ( workLoop ) + { + workLoop->release(); + workLoop = 0; + } + + super::free(); +} + +/*------------------------------------------------------------------------- + * Override IONetworkController::createWorkLoop() method and create + * a workloop. + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::createWorkLoop() +{ + workLoop = IOWorkLoop::workLoop(); + + return ( workLoop != 0 ); +} + +/*------------------------------------------------------------------------- + * Override IOService::getWorkLoop() method to return our workloop. + * + * + *-------------------------------------------------------------------------*/ + +IOWorkLoop * BMacEnet::getWorkLoop() const +{ + return workLoop; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::interruptOccurredForSource(IOInterruptEventSource *src, + int /*count*/) +{ + bool doFlushQueue = false; + bool doService = false; + + reserveDebuggerLock(); + + statReg = ReadBigMacRegister( ioBaseEnet, kSTAT ); + + if (src == rxIntSrc) { + KERNEL_DEBUG(DBG_BMAC_RXIRQ | DBG_FUNC_START, 0, 0, 0, 0, 0 ); + doFlushQueue = _receiveInterruptOccurred(); + KERNEL_DEBUG(DBG_BMAC_RXIRQ | DBG_FUNC_END, 0, 0, 0, 0, 0 ); + } + else { + /* + * On the transmit side, we use the chipset interrupt. Using the + * transmit DMA interrupt (or having multiple transmit DMA entries) + * would allows us to send the next frame to the chipset prior the + * transmit fifo going empty. + * However, this aggrevates a BMac chipset bug where the next frame going + * out gets corrupted (first two bytes lost) if the chipset had to retry + * the previous frame. + */ + txWDInterrupts++; + KERNEL_DEBUG(DBG_BMAC_TXIRQ | DBG_FUNC_START, 0, 0, 0, 0, 0 ); + doService = _transmitInterruptOccurred(); + KERNEL_DEBUG(DBG_BMAC_TXIRQ | DBG_FUNC_END, 0, 0, 0, 0, 0 ); + } + + releaseDebuggerLock(); + + /* + * Submit all received packets queued up by _receiveInterruptOccurred() + * to the network stack. The up call is performed without holding the + * debugger lock. + */ + if ( doFlushQueue ) + networkInterface->flushInputQueue(); + + /* + * Unstall the output queue if some space was made available. + */ + if ( doService && netifEnabled ) + transmitQueue->service(); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +UInt32 BMacEnet::outputPacket(struct mbuf * pkt, void * param) +{ + u_int32_t i; + UInt32 ret = kIOReturnOutputSuccess; + + KERNEL_DEBUG(DBG_BMAC_TXQUEUE | DBG_FUNC_NONE, (int) pkt, + (int) pkt->m_pkthdr.len, 0, 0, 0 ); + + /* + * Hold the debugger lock so the debugger can't interrupt us + */ + reserveDebuggerLock(); + + do + { + /* + * Preliminary sanity checks + */ + assert( pkt && netifEnabled ); + +#if 0 + /* + * Remove any completed packets from the Tx ring + */ + if ( chipId >= kCHIPID_PaddingtonXmitStreaming ) + { + _transmitInterruptOccurred(); + } +#endif + + i = txCommandTail + 1; + if ( i >= txMaxCommand ) i = 0; + if ( i == txCommandHead ) + { + /* + * Ring buffer is full. Disable the dequeueing process. + * We reenable it when an entry is made available by the + * transmit interrupt handler, or if a timeout occurs. + */ + ret = kIOReturnOutputStall; + continue; + } + + /* + * If there is space on the Tx ring, add the packet directly to the + * ring. + */ + _transmitPacket(pkt); + } + while ( 0 ); + + releaseDebuggerLock(); + + return ret; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_resetAndEnable(bool enable) +{ + bool ret = true; + +// reserveDebuggerLock(); + + ready = false; + + if (timerSrc) timerSrc->cancelTimeout(); + + _disableAdapterInterrupts(); + if (getWorkLoop()) getWorkLoop()->disableAllInterrupts(); + + if (enable) + { + phyId = 0xff; + } + + _resetChip(); + + // Initialize the link status. + + phyStatusPrev = 0; + setLinkStatus( 0, 0 ); + + while (enable) + { + if (!_initRxRing() || !_initTxRing()) + { + ret = false; + break; + } + + if ( phyId != 0xff ) + { + miiInitializePHY(phyId); + } + + if (_initChip() == false) + { + ret = false; + break; + } + + _startChip(); + + timerSrc->setTimeoutMS(WATCHDOG_TIMER_MS); + + if (getWorkLoop()) getWorkLoop()->enableAllInterrupts(); + _enableAdapterInterrupts(); + + ready = true; + + _sendDummyPacket(); + + monitorLinkStatus( true ); + + break; + } + +// releaseDebuggerLock(); + + return ret; +} + +/*------------------------------------------------------------------------- + * Grab a pointer to the statistics counters. + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::configureInterface( IONetworkInterface * netif ) +{ + IONetworkData * nd; + + if ( super::configureInterface( netif ) == false ) + return false; + + /* + * Grab a pointer to the statistics structure in the interface. + */ + nd = netif->getNetworkData( kIONetworkStatsKey ); + + if ( !nd || !(netStats = (IONetworkStats *) nd->getBuffer()) ) + { + IOLog("EtherNet(BMac): invalid network statistics\n"); + return false; + } + + return true; +} + +/*------------------------------------------------------------------------- + * Called by IOEthernetInterface client to enable the controller. + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn BMacEnet::enable(IONetworkInterface * netif) +{ + // If an interface client has previously enabled us, + // and we know there can only be one interface client + // for this driver, then simply return true. + // + if ( netifEnabled ) + { + IOLog("EtherNet(BMac): already enabled\n"); + return kIOReturnSuccess; + } + + if ( (ready == false) && !_resetAndEnable(true) ) + return kIOReturnIOError; + + // Record the interface as an active client. + // + netifEnabled = true; + + // Start our IOOutputQueue object. + // + transmitQueue->setCapacity(TRANSMIT_QUEUE_SIZE); + transmitQueue->start(); + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * Called by IOEthernetInterface client to disable the controller. + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn BMacEnet::disable(IONetworkInterface * /*netif*/) +{ + // Disable our IOOutputQueue object. This will prevent the + // outputPacket() method from being called. + // + transmitQueue->stop(); + + // Flush all packets currently in the output queue. + // + transmitQueue->setCapacity(0); + transmitQueue->flush(); + + // If we have no active clients, then disable the controller. + // + if ( debugEnabled == false ) + { + _resetAndEnable(false); + } + + netifEnabled = false; + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * This method is called by our debugger client to bring up the controller + * just before the controller is registered as the debugger device. The + * debugger client is attached in response to the attachDebuggerClient() + * call. + * + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn BMacEnet::enable(IOKernelDebugger *debugger) +{ + // Enable hardware and make it ready to support the debugger client. + // + if ( (ready == false) && !_resetAndEnable(true) ) + { + return kIOReturnIOError; + } + + // Record the debugger as an active client of ours. + // + debugEnabled = true; + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * This method is called by our debugger client to stop the controller. + * The debugger will call this method when we issue a detachDebuggerClient(). + * + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn BMacEnet::disable(IOKernelDebugger * /*debugger*/) +{ + debugEnabled = false; + + // If we have no active clients, then disable the controller. + // + if ( netifEnabled == false ) + { + _resetAndEnable(false); + } + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::timeoutOccurred(IOTimerEventSource * /*timer*/) +{ + u_int32_t dmaStatus; + bool doFlushQueue = false; + bool doService = false; + + if ( ready == false ) + { + return; + } + + // IOLog("Ethernet(BMac): watchdog timer\n"); + + reserveDebuggerLock(); + + /* + * Check for DMA shutdown on receive channel + */ + dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetRxDMA ); + if ( !(dmaStatus & kdbdmaActive) ) + { +#if 0 + IOLog( "Ethernet(BMac): Timeout check - RxHead = %d RxTail = %d\n", + rxCommandHead, rxCommandTail); + + IOLog( "Ethernet(BMac): Rx Commands = %08x(p) Rx DMA Ptr = %08x(p)\n\r", rxDMACommandsPhys, IOGetDBDMACommandPtr(ioBaseEnetRxDMA) ); + [self _dumpDesc:(void *)rxDMACommands Size:rxMaxCommand * sizeof(enet_dma_cmd_t)]; +#endif + + doFlushQueue = _receiveInterruptOccurred(); + } + + /* + * If there are pending entries on the Tx ring + */ + if ( txCommandHead != txCommandTail ) + { + /* + * If we did not service the Tx ring during the last timeout interval, + * then force servicing of the Tx ring + */ + if ( txWDInterrupts == 0 ) + { + if ( txWDCount++ > 0 ) + { + if (_transmitInterruptOccurred() == false) + { +#if 0 + IOLog( "Ethernet(BMac): Timeout check - TxHead = %d TxTail = %d\n", + txCommandHead, txCommandTail); +#endif + _restartTransmitter(); + } + doService = true; + } + } + else + { + txWDInterrupts = 0; + txWDCount = 0; + } + } + else + { + txWDInterrupts = 0; + txWDCount = 0; + } + + // Poll link status periodically. + + monitorLinkStatus(); + + // Clean-up after the debugger if the debugger was active. + // + if ( debugTxPoll ) + { + debugQueue->flush(); + debugTxPoll = false; + doService = true; + } + + releaseDebuggerLock(); + + /* + * Submit all received packets queued up by _receiveInterruptOccurred() + * to the network stack. This call is performed without holding the + * debugger lock. + */ + if ( doFlushQueue ) + { + networkInterface->flushInputQueue(); + } + + /* + * Make sure the output queue is not stalled. + */ + if ( doService && netifEnabled ) + { + transmitQueue->service(); + } + + /* + * Restart the watchdog timer + */ + timerSrc->setTimeoutMS(WATCHDOG_TIMER_MS); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::monitorLinkStatus( bool firstPoll ) +{ + u_int16_t phyStatus; + u_int16_t phyReg; + u_int16_t phyStatusChange; + bool fullDuplex; + bool reportLinkStatus = false; + UInt32 linkSpeed; + UInt32 linkStatus; + + if ( phyId == 0xff ) + { + // For implementations without a PHY, query the link status bit from + // the transceiver control register (kXCVRIF). + + phyStatus = ReadBigMacRegister(ioBaseEnet, kXCVRIF); + + if ( ( ( phyStatus ^ phyStatusPrev ) & kLinkStatus ) || firstPoll ) + { + linkStatus = kIONetworkLinkValid; + linkSpeed = 0; + fullDuplex = false; + reportLinkStatus = true; + + if ( ( phyStatus & kLinkStatus ) == 0 ) + { + linkSpeed = 10; + linkStatus |= kIONetworkLinkActive; + } + + phyStatusPrev = phyStatus; + } + } + else if ( miiReadWord(&phyStatus, MII_STATUS, phyId) == true ) + { + phyStatusChange = ( phyStatusPrev ^ phyStatus ) & + ( MII_STATUS_LINK_STATUS | + MII_STATUS_NEGOTIATION_COMPLETE); + + if ( phyStatusChange || firstPoll ) + { + if ( firstPoll ) + { + // For the initial link status poll, wait a bit, then + // re-read the status register to clear any latched bits. + // Why wait? Well, the debugger can kick in shortly after + // this function returns, and we want the duplex setting + // on the MAC to match the PHY. + + miiWaitForAutoNegotiation( phyId ); + miiReadWord(&phyStatus, MII_STATUS, phyId); + miiReadWord(&phyStatus, MII_STATUS, phyId); + } + + if ( (phyStatus & MII_STATUS_LINK_STATUS) && + (firstPoll || (phyStatus & MII_STATUS_NEGOTIATION_COMPLETE)) ) + { + if ( (phyType & MII_ST10040_MASK) == MII_ST10040_ID ) + { + miiReadWord(&phyReg, MII_ST10040_CHIPST, phyId); + linkSpeed = (phyReg & MII_ST10040_CHIPST_SPEED) ? + 100 : 10; + fullDuplex = (phyReg & MII_ST10040_CHIPST_DUPLEX) ? + true : false; + } + else if ( (phyType & MII_DP83843_MASK) == MII_DP83843_ID ) + { + miiReadWord(&phyReg, MII_DP83843_PHYSTS, phyId); + linkSpeed = (phyReg & MII_DP83843_PHYSTS_SPEED10) ? + 10 : 100; + fullDuplex = (phyReg & MII_DP83843_PHYSTS_DUPLEX) ? + true : false; + } + else + { + linkSpeed = 0; + fullDuplex = false; + } + + if ( fullDuplex != isFullDuplex ) + { + _setDuplexMode(fullDuplex); + } + + linkStatus = kIONetworkLinkActive | kIONetworkLinkValid; + } + else + { + linkStatus = kIONetworkLinkValid; + linkSpeed = 0; + fullDuplex = false; + } + + reportLinkStatus = true; + phyStatusPrev = phyStatus; + } + } + + if ( reportLinkStatus ) + { + IONetworkMedium * medium; + IOMediumType mediumType; + + switch ( linkSpeed ) + { + case 10: + mediumType = kIOMediumEthernet10BaseT; + mediumType |= (fullDuplex == true) ? + kIOMediumOptionFullDuplex : + kIOMediumOptionHalfDuplex; + break; + case 100: + mediumType = kIOMediumEthernet100BaseTX; + mediumType |= (fullDuplex == true) ? + kIOMediumOptionFullDuplex : + kIOMediumOptionHalfDuplex; + break; + default: + mediumType = kIOMediumEthernetNone; + break; + } + + medium = IONetworkMedium::getMediumWithType(mediumDict, mediumType); + + setLinkStatus( linkStatus, medium, linkSpeed * 1000000 ); + + if ( linkStatus & kIONetworkLinkActive ) + IOLog( "Ethernet(BMac): Link up at %ld Mbps - %s Duplex\n", + linkSpeed, (fullDuplex) ? "Full" : "Half" ); + else + IOLog( "Ethernet(BMac): Link down\n" ); + } +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +const OSString * BMacEnet::newVendorString() const +{ + return OSString::withCString("Apple"); +} + +const OSString * BMacEnet::newModelString() const +{ + return OSString::withCString("BMac"); +} + +const OSString * BMacEnet::newRevisionString() const +{ + return OSString::withCString(""); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn BMacEnet::setPromiscuousMode(IOEnetPromiscuousMode mode) +{ + u_int16_t rxCFGVal; + + reserveDebuggerLock(); + + /* + * Turn off the receiver and wait for the chipset to acknowledge + */ + rxCFGVal = ReadBigMacRegister(ioBaseEnet, kRXCFG); + WriteBigMacRegister(ioBaseEnet, kRXCFG, rxCFGVal & ~kRxMACEnable ); + while( ReadBigMacRegister(ioBaseEnet, kRXCFG) & kRxMACEnable ) + ; + + /* + * Set or reset promiscuous mode and restore receiver state + */ + if (mode == kIOEnetPromiscuousModeOff) { + rxCFGVal &= ~kRxPromiscEnable; + isPromiscuous = false; + } + else { + rxCFGVal |= kRxPromiscEnable; + isPromiscuous = true; + } + + WriteBigMacRegister( ioBaseEnet, kRXCFG, rxCFGVal ); + + releaseDebuggerLock(); + + return kIOReturnSuccess; +} + +IOReturn BMacEnet::setMulticastMode(IOEnetMulticastMode mode) +{ + multicastEnabled = (mode == kIOEnetMulticastModeOff) ? false : true; + + return kIOReturnSuccess; +} + +IOReturn BMacEnet::setMulticastList(IOEthernetAddress *addrs, UInt32 count) +{ + reserveDebuggerLock(); + _resetHashTableMask(); + for (UInt32 i = 0; i < count; i++) { + _addToHashTableMask(addrs->bytes); + addrs++; + } + _updateBMacHashTableMask(); + releaseDebuggerLock(); + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +static struct MediumTable +{ + UInt32 type; + UInt32 speed; +} +mediumTable[] = +{ + { kIOMediumEthernetNone , 0 }, + { kIOMediumEthernetAuto , 0 }, + { kIOMediumEthernet10BaseT | kIOMediumOptionHalfDuplex, 10 }, + { kIOMediumEthernet10BaseT | kIOMediumOptionFullDuplex, 10 }, + { kIOMediumEthernet100BaseTX | kIOMediumOptionHalfDuplex, 100 }, + { kIOMediumEthernet100BaseTX | kIOMediumOptionFullDuplex, 100 }, +}; + + +bool BMacEnet::createMediumTables() +{ + IONetworkMedium *medium; + UInt32 i; + + mediumDict = OSDictionary::withCapacity( sizeof(mediumTable)/sizeof(mediumTable[0]) ); + if ( mediumDict == 0 ) return false; + + for ( i=0; i < sizeof(mediumTable)/sizeof(mediumTable[0]); i++ ) + { + medium = IONetworkMedium::medium( mediumTable[i].type, mediumTable[i].speed ); + if ( medium != 0 ) + { + IONetworkMedium::addMedium( mediumDict, medium ); + medium->release(); + } + } + + if ( publishMediumDictionary( mediumDict ) != true ) + { + return false; + } + + medium = IONetworkMedium::getMediumWithType( mediumDict, + kIOMediumEthernetAuto ); + + setCurrentMedium( medium ); + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +/* + * Kernel Debugger Support + */ +void BMacEnet::sendPacket( void * pkt, UInt32 pkt_len ) +{ + _sendPacket(pkt, pkt_len); +} + +void BMacEnet::receivePacket( void * pkt, + UInt32 * pkt_len, + UInt32 timeout ) +{ + _receivePacket(pkt, (UInt *) pkt_len, timeout); +} + +/* + * Create a WorkLoop serialized output queue object. + */ +IOOutputQueue * BMacEnet::createOutputQueue() +{ + return IOGatedOutputQueue::withTarget( this, + getWorkLoop(), + TRANSMIT_QUEUE_SIZE ); +} + +/* + * Power management methods. + */ + +IOReturn BMacEnet::registerWithPolicyMaker(IOService * policyMaker) +{ +#define number_of_power_states 2 + + static IOPMPowerState ourPowerStates[number_of_power_states] = { + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable,IOPMPowerOn,IOPMPowerOn,0,0,0,0,0,0,0,0} + }; + + currentPowerState = 1; + + return policyMaker->registerPowerDriver( this, + ourPowerStates, + number_of_power_states ); +} + +IOReturn BMacEnet::setPowerState( unsigned long powerStateOrdinal, + IOService * whatDevice ) +{ + IOReturn ret = IOPMAckImplied; + + // kprintf("Ethernet(BMac): setPowerState %d\n", powerStateOrdinal); + + if ( currentPowerState == powerStateOrdinal ) + return IOPMAckImplied; + + switch ( powerStateOrdinal ) + { + case 0: + kprintf("Ethernet(BMac): powering off\n"); + currentPowerState = powerStateOrdinal; + break; + + case 1: + kprintf("Ethernet(BMac): powering on\n"); + currentPowerState = powerStateOrdinal; + break; + + default: + ret = IOPMNoSuchState; + break; + } + + return ret; +} diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnet.h b/iokit/Drivers/network/drvPPCBMac/BMacEnet.h new file mode 100644 index 000000000..d1cda0c95 --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnet.h @@ -0,0 +1,272 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * Interface definition for the BMac Ethernet Controller + * + * HISTORY + * + * Dec 10, 1998 jliu + * Converted to IOKit/C++. + */ + +#ifndef _BMACENET_H +#define _BMACENET_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* bcopy */ +#include "BMacEnetRegisters.h" + +extern "C" { +#include +#include +} + +// No kernel tracing support at this time. +// +#define KERNEL_DEBUG(x,a,b,c,d,e) + +// #define IOLog kprintf + +typedef void * IOPPCAddress; + +typedef struct enet_dma_cmd_t +{ + IODBDMADescriptor desc_seg[2]; +} enet_dma_cmd_t; + +typedef struct enet_txdma_cmd_t +{ + IODBDMADescriptor desc_seg[3]; +} enet_txdma_cmd_t; + +class BMacEnet : public IOEthernetController +{ + OSDeclareDefaultStructors( BMacEnet ) + +private: + volatile IOPPCAddress ioBaseEnet; + volatile IOPPCAddress ioBaseHeathrow; + volatile IODBDMAChannelRegisters * ioBaseEnetRxDMA; + volatile IODBDMAChannelRegisters * ioBaseEnetTxDMA; + + IOEthernetAddress myAddress; + IOEthernetInterface * networkInterface; + IOGatedOutputQueue * transmitQueue; + IOPacketQueue * debugQueue; + IOKernelDebugger * debugger; + bool isPromiscuous; + bool multicastEnabled; + bool isFullDuplex; + + IOWorkLoop * workLoop; + IOInterruptEventSource * rxIntSrc; + IOMemoryMap * maps[MEMORY_MAP_COUNT]; + IONetworkStats * netStats; + IOTimerEventSource * timerSrc; + IOMbufBigMemoryCursor * mbufCursor; + + bool ready; + bool netifEnabled; + bool debugEnabled; + bool debugTxPoll; + bool useUnicastFilter; + unsigned int enetAddressOffset; + + unsigned long chipId; + + unsigned long phyType; + unsigned long phyMIIDelay; + + unsigned char phyId; + unsigned char sromAddressBits; + + unsigned short phyStatusPrev; + + OSDictionary * mediumDict; + + struct mbuf * txMbuf[TX_RING_LENGTH]; + struct mbuf * rxMbuf[RX_RING_LENGTH]; + struct mbuf * txDebuggerPkt; + + unsigned int txCommandHead; // TX ring descriptor index + unsigned int txCommandTail; + unsigned int txMaxCommand; + unsigned int rxCommandHead; // RX ring descriptor index + unsigned int rxCommandTail; + unsigned int rxMaxCommand; + + struct { + void * ptr; + u_int size; + void * ptrReal; + u_int sizeReal; + } dmaMemory; + + unsigned char * dmaCommands; + enet_txdma_cmd_t * txDMACommands; // TX descriptor ring ptr + unsigned int txDMACommandsPhys; + + enet_dma_cmd_t * rxDMACommands; // RX descriptor ring ptr + unsigned int rxDMACommandsPhys; + + u_int32_t txWDInterrupts; + u_int32_t txWDCount; + + void * debuggerPkt; + u_int32_t debuggerPktSize; + + u_int16_t statReg; // Current STAT register contents + + u_int16_t hashTableUseCount[64]; + u_int16_t hashTableMask[4]; + + unsigned long currentPowerState; + + bool _allocateMemory(); + bool _initTxRing(); + bool _initRxRing(); + bool _initChip(); + void _resetChip(); + void _disableAdapterInterrupts(); + void _enableAdapterInterrupts(); + void _setDuplexMode(bool duplexMode); + void _startChip(); + bool _updateDescriptorFromMbuf(struct mbuf * m, enet_dma_cmd_t * desc, + bool isReceive); + void _restartTransmitter(); + void _stopTransmitDMA(); + bool _transmitPacket(struct mbuf * packet); + bool _transmitInterruptOccurred(); + bool _debugTransmitInterruptOccurred(); + bool _receiveInterruptOccurred(); + bool _rejectBadUnicastPacket(ether_header_t * etherHeader); + bool _receivePackets(bool fDebugger); + void _packetToDebugger(struct mbuf * packet, u_int size); + void _restartReceiver(); + void _stopReceiveDMA(); + bool _resetAndEnable(bool enable); + void _sendDummyPacket(); + void _resetHashTableMask(); + void _addToHashTableMask(u_int8_t * addr); + void _removeFromHashTableMask(u_int8_t * addr); + void _updateBMacHashTableMask(); + bool createMediumTables(); + +#ifdef DEBUG + void _dumpRegisters(); + void _dumpDesc(void * addr, u_int32_t size); + void _dump_srom(); +#endif DEBUG + + void _sendPacket(void * pkt, unsigned int pkt_len); + void _receivePacket(void * pkt, unsigned int * pkt_len, + unsigned int timeout); + + void sendPacket(void * pkt, UInt32 pkt_len); + void receivePacket(void * pkt, UInt32 * pkt_len, + UInt32 timeout); + + bool miiReadWord(unsigned short * dataPtr, unsigned short reg, + unsigned char phy); + bool miiWriteWord(unsigned short data, unsigned short reg, + unsigned char phy); + void miiWrite(unsigned int miiData, unsigned int dataSize); + int miiReadBit(); + bool miiCheckZeroBit(); + void miiOutThreeState(); + bool miiResetPHY(unsigned char phy); + bool miiWaitForLink(unsigned char phy); + bool miiWaitForAutoNegotiation(unsigned char phy); + void miiRestartAutoNegotiation(unsigned char phy); + bool miiFindPHY(unsigned char * phy_num); + bool miiInitializePHY(unsigned char phy); + + UInt32 outputPacket(struct mbuf * m, void * param); + + void interruptOccurredForSource(IOInterruptEventSource * src, int count); + + void timeoutOccurred(IOTimerEventSource * timer); + + void monitorLinkStatus( bool firstPoll = false ); + +public: + virtual bool init(OSDictionary * properties = 0); + virtual bool start(IOService * provider); + virtual void free(); + + virtual bool createWorkLoop(); + virtual IOWorkLoop * getWorkLoop() const; + + virtual IOReturn enable(IONetworkInterface * netif); + virtual IOReturn disable(IONetworkInterface * netif); + + virtual IOReturn getHardwareAddress(IOEthernetAddress * addr); + + virtual IOReturn setMulticastMode(IOEnetMulticastMode mode); + virtual IOReturn setMulticastList(IOEthernetAddress * addrs, UInt32 count); + + virtual IOReturn setPromiscuousMode(IOEnetPromiscuousMode mode); + + virtual IOOutputQueue * createOutputQueue(); + + virtual const OSString * newVendorString() const; + virtual const OSString * newModelString() const; + virtual const OSString * newRevisionString() const; + + virtual IOReturn enable(IOKernelDebugger * debugger); + virtual IOReturn disable(IOKernelDebugger * debugger); + + virtual bool configureInterface(IONetworkInterface * netif); + + // Simple power managment support: + virtual IOReturn setPowerState( UInt32 powerStateOrdinal, + IOService * whatDevice ); + + virtual IOReturn registerWithPolicyMaker(IOService * policyMaker); +}; + +/* + * Performance tracepoints + * + * DBG_BMAC_RXIRQ - Receive ISR run time + * DBG_BMAC_TXIRQ - Transmit ISR run time + * DBG_BMAC_TXQUEUE - Transmit packet passed from network stack + * DBG_BMAC_TXCOMPLETE - Transmit packet sent + * DBG_BMAC_RXCOMPLETE - Receive packet passed to network stack + */ +#define DBG_BMAC_ENET 0x0900 +#define DBG_BMAC_RXIRQ DRVDBG_CODE(DBG_DRVNETWORK,(DBG_BMAC_ENET+1)) +#define DBG_BMAC_TXIRQ DRVDBG_CODE(DBG_DRVNETWORK,(DBG_BMAC_ENET+2)) +#define DBG_BMAC_TXQUEUE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_BMAC_ENET+3)) +#define DBG_BMAC_TXCOMPLETE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_BMAC_ENET+4)) +#define DBG_BMAC_RXCOMPLETE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_BMAC_ENET+5)) + +#endif /* !_BMACENET_H */ diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnetHW.cpp b/iokit/Drivers/network/drvPPCBMac/BMacEnetHW.cpp new file mode 100644 index 000000000..a64bd544f --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnetHW.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * Miscellaneous definitions for the BMac Ethernet controller. + * + * HISTORY + * + */ + +#include "BMacEnetRegisters.h" +#include "BMacEnetPrivate.h" +#include + +void WriteBigMacRegister( IOPPCAddress ioBaseEnet, u_int32_t reg_offset, u_int16_t data ) +{ + OSWriteSwapInt16( ioBaseEnet, reg_offset, data ); + eieio(); +} + + +volatile u_int16_t ReadBigMacRegister( IOPPCAddress ioBaseEnet, u_int32_t reg_offset ) +{ + return OSReadSwapInt16( ioBaseEnet, reg_offset ); +} + +/* + * Procedure for reading EEPROM + */ +#define kSROMAddressLength 5 +#define kDataInOn 0x0008 +#define kDataInOff 0x0000 +#define kClk 0x0002 +#define kChipSelect 0x0001 +#define kSDIShiftCount 3 +#define kSD0ShiftCount 2 +#define kDelayValue 1000 // number of microseconds + +#define kSROMStartOffset 10 // this is in words +#define kSROMReadCount 3 // number of words to read from SROM + +static unsigned char clock_out_bit(IOPPCAddress base) +{ + u_int16_t data; + u_int16_t val; + + WriteBigMacRegister(base, kSROMCSR, kChipSelect | kClk); + IODelay(kDelayValue); + + data = ReadBigMacRegister(base, kSROMCSR); + IODelay(kDelayValue); + val = (data >> kSD0ShiftCount) & 1; + + WriteBigMacRegister(base, kSROMCSR, kChipSelect); + IODelay(kDelayValue); + + return val; +} + +static void clock_in_bit(IOPPCAddress base, unsigned int val) +{ + u_int16_t data; + + if (val != 0 && val != 1) + { + IOLog("bogus data in clock_in_bit\n"); + return; + } + + data = (val << kSDIShiftCount); + WriteBigMacRegister(base, kSROMCSR, data | kChipSelect ); + IODelay(kDelayValue); + + WriteBigMacRegister(base, kSROMCSR, data | kChipSelect | kClk ); + IODelay(kDelayValue); + + WriteBigMacRegister(base, kSROMCSR, data | kChipSelect); + IODelay(kDelayValue); +} + +void reset_and_select_srom(IOPPCAddress base) +{ + /* first reset */ + WriteBigMacRegister(base, kSROMCSR, 0); + IODelay(kDelayValue); + + /* send it the read command (110) */ + clock_in_bit(base, 1); + clock_in_bit(base, 1); + clock_in_bit(base, 0); +} + +unsigned short read_srom(IOPPCAddress base, unsigned int addr, + unsigned int addr_len) +{ + unsigned short data, val; + unsigned int i; + + /* send out the address we want to read from */ + for (i = 0; i < addr_len; i++) { + val = addr >> (addr_len-i-1); + clock_in_bit(base, val & 1); + } + + /* Now read in the 16-bit data */ + data = 0; + for (i = 0; i < 16; i++) { + val = clock_out_bit(base); + data <<= 1; + data |= val; + } + WriteBigMacRegister(base, kSROMCSR, 0); + + return data; +} diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnetMII.cpp b/iokit/Drivers/network/drvPPCBMac/BMacEnetMII.cpp new file mode 100644 index 000000000..4f9816368 --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnetMII.cpp @@ -0,0 +1,386 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * MII/PHY (National Semiconductor DP83840/DP83840A) support methods. + * It is general enough to work with most MII/PHYs. + * + * HISTORY + * + */ + +#include "BMacEnet.h" +#include "BMacEnetPrivate.h" + +/* + * Read from MII/PHY registers. + */ +bool BMacEnet::miiReadWord(unsigned short *dataPtr, unsigned short reg, + unsigned char phy) +{ + int i; + miiFrameUnion frame; + unsigned short phyreg; + bool ret = true; + + do + { + // Write preamble + // + miiWrite(MII_FRAME_PREAMBLE, MII_FRAME_SIZE); + + if ( miiCheckZeroBit() == true ) + { +// IOLog("Ethernet(BMac): MII not floating before read\n\r"); + ret = false; + break; + } + + // Prepare command frame + // + frame.data = MII_FRAME_READ; + frame.bit.regad = reg; + frame.bit.phyad = phy; + + // write ST, OP, PHYAD, REGAD in the MII command frame + // + miiWrite(frame.data, 14); + + // Hi-Z state + // Make sure the PHY generated a zero bit after the 2nd Hi-Z bit + // + + miiOutThreeState(); + + if (miiCheckZeroBit() == false) + { +// IOLog("Ethernet(BMac): MII not driven after turnaround\n\r"); + ret = false; + break; + } + + // read 16-bit data + // + phyreg = 0; + for (i = 0; i < 16; i++) + { + phyreg = miiReadBit() | (phyreg << 1); + } + if (dataPtr) + *dataPtr = phyreg; + + // Hi-Z state + miiOutThreeState(); + + if (miiCheckZeroBit() == true) + { +// IOLog("Ethernet(BMac): MII not floating after read\n\r"); + ret = false; + break; + } + } + while ( 0 ); + + return ret; +} + +/* + * Write to MII/PHY registers. + */ +bool BMacEnet::miiWriteWord(unsigned short data, unsigned short reg, + unsigned char phy) +{ + miiFrameUnion frame; + bool ret = true; + + do + { + // Write preamble + // + miiWrite(MII_FRAME_PREAMBLE, MII_FRAME_SIZE); + + if (miiCheckZeroBit() == true) + { + ret = false; + break; + } + + // Prepare command frame + // + frame.data = MII_FRAME_WRITE; + frame.bit.regad = reg; + frame.bit.phyad = phy; + frame.bit.data = data; + + // Write command frame + // + miiWrite(frame.data, MII_FRAME_SIZE); + + // Hi-Z state + miiOutThreeState(); + + if (miiCheckZeroBit() == true) + { + ret = false; + break; + } + } + while ( 0 ); + + return ret; +} + +/* + * Write 'dataSize' number of bits to the MII management interface, + * starting with the most significant bit of 'miiData'. + * + */ +void BMacEnet::miiWrite(unsigned int miiData, unsigned int dataSize) +{ + int i; + u_int16_t regValue; + + regValue = kMIFCSR_DataOutEnable; + + for (i = dataSize; i > 0; i--) + { + int bit = ((miiData & 0x80000000) ? kMIFCSR_DataOut : 0); + + regValue &= ~(kMIFCSR_Clock | kMIFCSR_DataOut) ; + regValue |= bit; + WriteBigMacRegister(ioBaseEnet, kMIFCSR, regValue); + IODelay(phyMIIDelay); + + regValue |= kMIFCSR_Clock; + WriteBigMacRegister(ioBaseEnet, kMIFCSR, regValue ); + IODelay(phyMIIDelay); + + miiData = miiData << 1; + } +} + +/* + * Read one bit from the MII management interface. + */ +int BMacEnet::miiReadBit() +{ + u_int16_t regValue; + u_int16_t regValueRead; + + regValue = 0; + + WriteBigMacRegister(ioBaseEnet, kMIFCSR, regValue); + IODelay(phyMIIDelay); + + regValue |= kMIFCSR_Clock; + WriteBigMacRegister(ioBaseEnet, kMIFCSR, regValue); + IODelay(phyMIIDelay); + + regValueRead = ReadBigMacRegister(ioBaseEnet, kMIFCSR); + IODelay(phyMIIDelay); // delay next invocation of this routine + + return ( (regValueRead & kMIFCSR_DataIn) ? 1 : 0 ); +} + +/* + * Read the zero bit on the second clock of the turn-around (TA) + * when reading a PHY register. + */ +bool BMacEnet::miiCheckZeroBit() +{ + u_int16_t regValue; + + regValue = ReadBigMacRegister(ioBaseEnet, kMIFCSR); + + return (((regValue & kMIFCSR_DataIn) == 0) ? true : false ); +} + +/* + * Tri-state the STA's MDIO pin. + */ +void BMacEnet::miiOutThreeState() +{ + u_int16_t regValue; + + regValue = 0; + WriteBigMacRegister(ioBaseEnet, kMIFCSR, regValue); + IODelay(phyMIIDelay); + + regValue |= kMIFCSR_Clock; + WriteBigMacRegister(ioBaseEnet, kMIFCSR, regValue); + IODelay(phyMIIDelay); +} + +bool BMacEnet::miiResetPHY(unsigned char phy) +{ + int i = MII_RESET_TIMEOUT; + unsigned short mii_control; + + // Set the reset bit + // + miiWriteWord(MII_CONTROL_RESET, MII_CONTROL, phy); + + IOSleep(MII_RESET_DELAY); + + // Wait till reset process is complete (MII_CONTROL_RESET returns to zero) + // + while (i > 0) + { + if (miiReadWord(&mii_control, MII_CONTROL, phy) == false) + return false; + + if (!(mii_control & MII_CONTROL_RESET)) + { + miiReadWord(&mii_control, MII_CONTROL, phy); + mii_control &= ~MII_CONTROL_ISOLATE; + miiWriteWord(mii_control, MII_CONTROL, phy); + return true; + } + + IOSleep(MII_RESET_DELAY); + i -= MII_RESET_DELAY; + } + return false; +} + +bool BMacEnet::miiWaitForLink(unsigned char phy) +{ + int i = MII_LINK_TIMEOUT; + unsigned short mii_status; + + while (i > 0) + { + if (miiReadWord(&mii_status, MII_STATUS, phy) == false) + return false; + + if (mii_status & MII_STATUS_LINK_STATUS) + return true; + + IOSleep(MII_LINK_DELAY); + i -= MII_LINK_DELAY; + } + return false; +} + +bool BMacEnet::miiWaitForAutoNegotiation(unsigned char phy) +{ + int i = MII_LINK_TIMEOUT; + unsigned short mii_status; + + while (i > 0) + { + if (miiReadWord(&mii_status, MII_STATUS, phy) == false) + return false; + + if (mii_status & MII_STATUS_NEGOTIATION_COMPLETE) + return true; + + IOSleep(MII_LINK_DELAY); + i -= MII_LINK_DELAY; + } + return false; +} + +void BMacEnet::miiRestartAutoNegotiation(unsigned char phy) +{ + unsigned short mii_control; + + miiReadWord(&mii_control, MII_CONTROL, phy); + mii_control |= MII_CONTROL_RESTART_NEGOTIATION; + miiWriteWord(mii_control, MII_CONTROL, phy); + + /* + * If the system is not connected to the network, then auto-negotiation + * never completes and we hang in this loop! + */ +#if 0 + while (1) + { + miiReadWord(&mii_control, MII_CONTROL, phy); + if ((mii_control & MII_CONTROL_RESTART_NEGOTIATION) == 0) + break; + } +#endif +} + +/* + * Find the first PHY device on the MII interface. + * + * Return + * true PHY found + * false PHY not found + */ +bool BMacEnet::miiFindPHY(unsigned char *phy) +{ + int i; + + *phy = 0xff; + + // The first two PHY registers are required. + // + for (i = 0; i < MII_MAX_PHY; i++) + { + if (miiReadWord(NULL, MII_STATUS, i) && + miiReadWord(NULL, MII_CONTROL, i)) + break; + } + + if (i >= MII_MAX_PHY) + return false; + + *phy = i; + + return true; +} + +/* + * + * + */ +bool BMacEnet::miiInitializePHY(unsigned char phy) +{ + u_int16_t phyWord; + + // Clear then set the enable auto-negotiation bit + // + miiReadWord(&phyWord, MII_CONTROL, phy); + phyWord &= ~MII_CONTROL_AUTONEGOTIATION; + miiWriteWord(phyWord, MII_CONTROL, phy); + + // Advertise 10/100 Half/Full duplex capable to link partner + // + miiReadWord(&phyWord, MII_ADVERTISEMENT, phy); + phyWord |= (MII_ANAR_100BASETX_FD | MII_ANAR_100BASETX | + MII_ANAR_10BASET_FD | MII_ANAR_10BASET ); + miiWriteWord(phyWord, MII_ADVERTISEMENT, phy); + + // Set enable auto-negotiation bit + // + miiReadWord(&phyWord, MII_CONTROL, phy); + phyWord |= MII_CONTROL_AUTONEGOTIATION; + miiWriteWord(phyWord, MII_CONTROL, phy); + + miiRestartAutoNegotiation(phy); + + return true; +} diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnetMII.h b/iokit/Drivers/network/drvPPCBMac/BMacEnetMII.h new file mode 100644 index 000000000..411876fc2 --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnetMII.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * MII protocol and PHY register definitions. + * + * HISTORY + * + */ + +#ifndef _BMACENETMII_H +#define _BMACENETMII_H + +/* + * MII command frame (32-bits) as documented in IEEE 802.3u + */ + +// _BIG_ENDIAN is already defined for PPC +// +#if 0 +#ifdef __PPC__ +#define _BIG_ENDIAN +#endif +#endif /* 0 */ + +typedef union { + unsigned int data; +#ifdef _BIG_ENDIAN + struct { + unsigned int + st:2, // start of frame +#define MII_ST 0x01 + op:2, // operation code +#define MII_OP_READ 0x02 +#define MII_OP_WRITE 0x01 + phyad:5, // PHY address + regad:5, // register address + ta:2, // turnaround + data:16; // 16-bit data field + } bit; +#else _BIG_ENDIAN + struct { + unsigned int + data:16, // 16-bit data field + ta:2, // turnaround + regad:5, // register address + phyad:5, // PHY address + op:2, // operation code + st:2; // start of frame + } bit; +#endif _BIG_ENDIAN +} miiFrameUnion; + +#define MII_FRAME_PREAMBLE 0xFFFFFFFF +#define MII_FRAME_SIZE 32 +#define MII_FRAME_READ 0x60000000 +#define MII_FRAME_WRITE 0x50020000 + +#define MII_MAX_PHY 32 + +/* MII Registers */ +#define MII_CONTROL 0 +#define MII_STATUS 1 +#define MII_ID0 2 +#define MII_ID1 3 +#define MII_ADVERTISEMENT 4 +#define MII_LINKPARTNER 5 +#define MII_EXPANSION 6 +#define MII_NEXTPAGE 7 + +/* MII Control register bits */ +#define MII_CONTROL_RESET 0x8000 +#define MII_CONTROL_LOOPBACK 0x4000 +#define MII_CONTROL_SPEED_SELECTION 0x2000 +#define MII_CONTROL_AUTONEGOTIATION 0x1000 +#define MII_CONTROL_POWERDOWN 0x800 +#define MII_CONTROL_ISOLATE 0x400 +#define MII_CONTROL_RESTART_NEGOTIATION 0x200 +#define MII_CONTROL_FULLDUPLEX 0x100 +#define MII_CONTROL_COLLISION_TEST 0x80 + +/* MII Status register bits */ +#define MII_STATUS_100BASET4 0x8000 +#define MII_STATUS_100BASETX_FD 0x4000 +#define MII_STATUS_100BASETX 0x2000 +#define MII_STATUS_10BASET_FD 0x1000 +#define MII_STATUS_10BASET 0x800 +#define MII_STATUS_NEGOTIATION_COMPLETE 0x20 +#define MII_STATUS_REMOTE_FAULT 0x10 +#define MII_STATUS_NEGOTIATION_ABILITY 0x8 +#define MII_STATUS_LINK_STATUS 0x4 +#define MII_STATUS_JABBER_DETECT 0x2 +#define MII_STATUS_EXTENDED_CAPABILITY 0x1 + +/* MII ANAR register bits */ +#define MII_ANAR_100BASET4 0x200 +#define MII_ANAR_100BASETX_FD 0x100 +#define MII_ANAR_100BASETX 0x80 +#define MII_ANAR_10BASET_FD 0x40 +#define MII_ANAR_10BASET 0x20 + +/* MII ST10040 Specific */ + +/* MII ST10040 ID */ +#define MII_ST10040_OUI 0x1e0400 +#define MII_ST10040_MODEL 0x00 +#define MII_ST10040_REV 0x01 +#define MII_ST10040_ID ((MII_ST10040_OUI << 10) | \ + (MII_ST10040_MODEL << 4)) +#define MII_ST10040_MASK 0xfffffff0 + +#define MII_ST10040_DELAY 1 + +/* MII ST10040 Regs */ +#define MII_ST10040_CHIPST 0x14 + +/* MII ST10040 CHIPST register bits */ +#define MII_ST10040_CHIPST_LINK 0x2000 +#define MII_ST10040_CHIPST_DUPLEX 0x1000 +#define MII_ST10040_CHIPST_SPEED 0x0800 +#define MII_ST10040_CHIPST_NEGOTIATION 0x0020 + +/* MII DP83843 Specific */ + +/* MII DP83843 ID */ +#define MII_DP83843_OUI 0x080017 +#define MII_DP83843_MODEL 0x01 +#define MII_DP83843_REV 0x00 +#define MII_DP83843_ID ((MII_DP83843_OUI << 10) | \ + (MII_DP83843_MODEL << 4)) +#define MII_DP83843_MASK 0xfffffff0 + +#define MII_DP83843_DELAY 20 + +/* MII DP83843 PHYSTS register bits */ +#define MII_DP83843_PHYSTS 0x10 +#define MII_DP83843_PHYSTS_LINK 0x0001 +#define MII_DP83843_PHYSTS_SPEED10 0x0002 +#define MII_DP83843_PHYSTS_DUPLEX 0x0004 +#define MII_DP83843_PHYSTS_NEGOTIATION 0x0020 + + +/* MII timeout */ +#define MII_DEFAULT_DELAY 20 +#define MII_RESET_TIMEOUT 100 +#define MII_RESET_DELAY 10 + +#define MII_LINK_TIMEOUT 2500 +#define MII_LINK_DELAY 20 + +#endif /* _BMACENETMII_H */ diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.cpp b/iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.cpp new file mode 100644 index 000000000..ba5a2eabc --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.cpp @@ -0,0 +1,1958 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * Implementation for hardware dependent (relatively) code + * for the BMac Ethernet controller. + * + * HISTORY + * + */ + +#include +#include + +#include "BMacEnetPrivate.h" +#include + +/***************************************************************************** + * + * Hacks. + */ + +typedef unsigned long long ns_time_t; /* nanoseconds! */ + +#define NSEC_PER_SEC 1000000000 + +static void +_IOGetTimestamp(ns_time_t *nsp) +{ + mach_timespec_t now; + + IOGetTime(&now); + *nsp = ((ns_time_t)now.tv_sec * NSEC_PER_SEC) + now.tv_nsec; +} + +/* + * Find a physical address (if any) for the specified virtual address. + * + * Note: what about vm_offset_t kvtophys(vm_offset_t va) + */ +static IOReturn _IOPhysicalFromVirtual( + vm_address_t virtualAddress, + unsigned *physicalAddress) +{ + *physicalAddress = pmap_extract(kernel_pmap, virtualAddress); + if(*physicalAddress == 0) { + return kIOReturnBadArgument; + } + else { + return kIOReturnSuccess; + } +} + +/****************************************************************************/ + + +extern kern_return_t kmem_alloc_wired(); + +static IODBDMADescriptor dbdmaCmd_Nop; +static IODBDMADescriptor dbdmaCmd_NopWInt; +static IODBDMADescriptor dbdmaCmd_LoadInt; +static IODBDMADescriptor dbdmaCmd_LoadIntWInt; +static IODBDMADescriptor dbdmaCmd_Stop; +static IODBDMADescriptor dbdmaCmd_Branch; + +static u_int8_t reverseBitOrder(u_int8_t data ) +{ + u_int8_t val = 0; + int i; + + for ( i=0; i < 8; i++ ) + { + val <<= 1; + if (data & 1) val |= 1; + data >>= 1; + } + return( val ); +} + +/* + * Function: IOMallocPage + * + * Purpose: + * Returns a pointer to a page-aligned memory block of size >= PAGE_SIZE + * + * Return: + * Actual pointer and size of block returned in actual_ptr and actual_size. + * Use these as arguments to kfree: kfree(*actual_ptr, *actual_size); + */ +static void * +IOMallocPage(int request_size, void ** actual_ptr, u_int * actual_size) +{ + void * mem_ptr; + + *actual_size = round_page(request_size) + PAGE_SIZE; + mem_ptr = IOMalloc(*actual_size); + if (mem_ptr == NULL) + return NULL; + *actual_ptr = mem_ptr; + return ((void *)round_page(mem_ptr)); +} + +/* + * Private functions + */ +bool BMacEnet::_allocateMemory() +{ + u_int32_t i, n; + unsigned char * virtAddr; + u_int32_t physBase; + u_int32_t physAddr; + u_int32_t dbdmaSize; + + /* + * Calculate total space for DMA channel commands + */ + dbdmaSize = round_page( + RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + + TX_RING_LENGTH * sizeof(enet_txdma_cmd_t) + + 2 * sizeof(IODBDMADescriptor) ); + + /* + * Allocate required memory + */ + dmaMemory.size = dbdmaSize; + dmaMemory.ptr = (void *)IOMallocPage( + dmaMemory.size, + &dmaMemory.ptrReal, + &dmaMemory.sizeReal + ); + + dmaCommands = (unsigned char *) dmaMemory.ptr; + if (dmaCommands == NULL) { + IOLog( "Ethernet(BMac): Cant allocate channel DBDMA commands\n\r" ); + return false; + } + + /* + * If we needed more than one page, then make sure we received + * contiguous memory. + */ + n = (dbdmaSize - PAGE_SIZE) / PAGE_SIZE; + _IOPhysicalFromVirtual( (vm_address_t) dmaCommands, &physBase ); + + virtAddr = (unsigned char *) dmaCommands; + for( i=0; i < n; i++, virtAddr += PAGE_SIZE ) + { + _IOPhysicalFromVirtual( (vm_address_t) virtAddr, &physAddr ); + if (physAddr != (physBase + i * PAGE_SIZE) ) + { + IOLog( "Ethernet(BMac): Cannot allocate contiguous memory" + " for DBDMA commands\n\r" ); + return false; + } + } + + /* + * Setup the receive ring pointers + */ + rxDMACommands = (enet_dma_cmd_t *)dmaCommands; + rxMaxCommand = RX_RING_LENGTH; + + /* + * Setup the transmit ring pointers + */ + txDMACommands = (enet_txdma_cmd_t *)(dmaCommands + + RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + sizeof(IODBDMADescriptor)); + txMaxCommand = TX_RING_LENGTH; + + /* + * Setup pre-initialized DBDMA commands + */ + IOMakeDBDMADescriptor( (&dbdmaCmd_Nop), + kdbdmaNop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0); + + IOMakeDBDMADescriptor( (&dbdmaCmd_NopWInt), + kdbdmaNop, + kdbdmaKeyStream0, + kdbdmaIntAlways, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0); + + UInt32 ioBaseEnetPhys = maps[MEMORY_MAP_ENET_INDEX]->getPhysicalAddress(); + + IOMakeDBDMADescriptor( (&dbdmaCmd_LoadInt), + kdbdmaLoadQuad, + kdbdmaKeySystem, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 2, + ((int)ioBaseEnetPhys + kSTAT) ); + + IOMakeDBDMADescriptor( (&dbdmaCmd_LoadIntWInt), + kdbdmaLoadQuad, + kdbdmaKeySystem, + kdbdmaIntAlways, + kdbdmaBranchNever, + kdbdmaWaitNever, + 2, + ((int)ioBaseEnetPhys + kSTAT) ); + + IOMakeDBDMADescriptor( (&dbdmaCmd_Stop), + kdbdmaStop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0); + + IOMakeDBDMADescriptor( (&dbdmaCmd_Branch), + kdbdmaNop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchAlways, + kdbdmaWaitNever, + 0, + 0); + + return true; +} + +/*------------------------------------------------------------------------- + * + * Setup the Transmit Ring + * ----------------------- + * Each transmit ring entry consists of two words to transmit data from buffer + * segments (possibly) spanning a page boundary. This is followed by two DMA + * commands which read transmit frame status and interrupt status from the Bmac + * chip. The last DMA command in each transmit ring entry generates a host + * interrupt. The last entry in the ring is followed by a DMA branch to the + * first entry. + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_initTxRing() +{ + bool kr; + u_int32_t i; + IODBDMADescriptor dbdmaCmd, dbdmaCmdInt; + + /* + * Clear mbufs from TX ring. + */ + for ( i = 0; i < txMaxCommand; i++ ) + { + if ( txMbuf[i] ) + { + freePacket( txMbuf[i] ); + txMbuf[i] = 0; + } + } + + /* + * Clear the transmit DMA command memory + */ + bzero( (void *)txDMACommands, sizeof(enet_txdma_cmd_t) * txMaxCommand); + txCommandHead = 0; + txCommandTail = 0; + + /* + * DMA Channel commands 2 are the same for all DBDMA entries on transmit. + * Initialize them now. + */ + + dbdmaCmd = ( chipId >= kCHIPID_PaddingtonXmitStreaming ) ? dbdmaCmd_Nop : dbdmaCmd_LoadInt; + dbdmaCmdInt = ( chipId >= kCHIPID_PaddingtonXmitStreaming ) ? dbdmaCmd_NopWInt : dbdmaCmd_LoadIntWInt; + + for( i=0; i < txMaxCommand; i++ ) + { + txDMACommands[i].desc_seg[2] = ( (i+1) % TX_PKTS_PER_INT ) ? dbdmaCmd : dbdmaCmdInt; + } + + /* + * Put a DMA Branch command after the last entry in the transmit ring. + * Set the branch address to the physical address of the start of the + * transmit ring. + */ + txDMACommands[txMaxCommand].desc_seg[0] = dbdmaCmd_Branch; + + kr = _IOPhysicalFromVirtual( (vm_address_t) txDMACommands, + (u_int32_t *)&txDMACommandsPhys ); + if ( kr != kIOReturnSuccess ) + { + IOLog( "Ethernet(BMac): Bad DBDMA command buf - %08x\n\r", + (u_int32_t)txDMACommands ); + } + IOSetCCCmdDep( &txDMACommands[txMaxCommand].desc_seg[0], + txDMACommandsPhys ); + + /* + * Set the Transmit DMA Channel pointer to the first entry in the + * transmit ring. + */ + IOSetDBDMACommandPtr( ioBaseEnetTxDMA, txDMACommandsPhys ); + + return true; +} + +/*------------------------------------------------------------------------- + * + * Setup the Receive ring + * ---------------------- + * Each receive ring entry consists of two DMA commands to receive data + * into a network buffer (possibly) spanning a page boundary. The second + * DMA command in each entry generates a host interrupt. + * The last entry in the ring is followed by a DMA branch to the first + * entry. + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_initRxRing() +{ + u_int32_t i; + bool status; + IOReturn kr; + + /* + * Clear the receive DMA command memory + */ + bzero((void *)rxDMACommands, sizeof(enet_dma_cmd_t) * rxMaxCommand); + + kr = _IOPhysicalFromVirtual( (vm_address_t) rxDMACommands, + (u_int32_t *)&rxDMACommandsPhys ); + if ( kr != kIOReturnSuccess ) + { + IOLog( "Ethernet(BMac): Bad DBDMA command buf - %08x\n\r", + (u_int32_t)rxDMACommands ); + return false; + } + + /* + * Allocate a receive buffer for each entry in the Receive ring + */ + for (i = 0; i < rxMaxCommand-1; i++) + { + if (rxMbuf[i] == NULL) + { + rxMbuf[i] = allocatePacket(NETWORK_BUFSIZE); + if (!rxMbuf[i]) + { + IOLog("Ethernet(BMac): allocatePacket failed\n"); + return false; + } + } + + /* + * Set the DMA commands for the ring entry to transfer data to the + * mbuf. + */ + status = _updateDescriptorFromMbuf(rxMbuf[i], &rxDMACommands[i], true); + if (status == false) + { + IOLog("Ethernet(BMac): cannot map mbuf to physical memory in" + " _initRxRing\n\r"); + return false; + } + } + + /* + * Set the receive queue head to point to the first entry in the ring. + * Set the receive queue tail to point to a DMA Stop command after the + * last ring entry + */ + rxCommandHead = 0; + rxCommandTail = i; + + rxDMACommands[i].desc_seg[0] = dbdmaCmd_Stop; + rxDMACommands[i].desc_seg[1] = dbdmaCmd_Nop; + + /* + * Setup a DMA branch command after the stop command + */ + i++; + rxDMACommands[i].desc_seg[0] = dbdmaCmd_Branch; + + IOSetCCCmdDep( &rxDMACommands[i].desc_seg[0], rxDMACommandsPhys ); + + /* + * Set DMA command pointer to first receive entry + */ + IOSetDBDMACommandPtr (ioBaseEnetRxDMA, rxDMACommandsPhys); + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_startChip() +{ + u_int16_t oldConfig; + + IODBDMAContinue( ioBaseEnetRxDMA ); + + // turn on rx plus any other bits already on (promiscuous possibly) + oldConfig = ReadBigMacRegister(ioBaseEnet, kRXCFG); + WriteBigMacRegister(ioBaseEnet, kRXCFG, oldConfig | kRxMACEnable ); + + oldConfig = ReadBigMacRegister(ioBaseEnet, kTXCFG); + WriteBigMacRegister(ioBaseEnet, kTXCFG, oldConfig | kTxMACEnable ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_resetChip() +{ + volatile u_int32_t *heathrowFCR; + u_int32_t fcrValue; + u_int16_t *pPhyType; + + IODBDMAReset( ioBaseEnetRxDMA ); + IODBDMAReset( ioBaseEnetTxDMA ); + + IOSetDBDMAWaitSelect( ioBaseEnetTxDMA, + IOSetDBDMAChannelControlBits( kdbdmaS5 ) ); + + IOSetDBDMABranchSelect( ioBaseEnetRxDMA, + IOSetDBDMAChannelControlBits( kdbdmaS6 ) ); + + IOSetDBDMAInterruptSelect( ioBaseEnetRxDMA, + IOSetDBDMAChannelControlBits( kdbdmaS6 ) ); + + heathrowFCR = (u_int32_t *)((u_int8_t *)ioBaseHeathrow + kHeathrowFCR); + + fcrValue = *heathrowFCR; + eieio(); + + fcrValue = OSReadSwapInt32( &fcrValue, 0 ); + + /* + * Enable the ethernet transceiver/clocks + */ + fcrValue |= kEnetEnabledBits; + fcrValue &= ~kResetEnetCell; + + *heathrowFCR = OSReadSwapInt32( &fcrValue, 0 ); + eieio(); + IOSleep( 100 ); + + /* + * Determine if PHY chip is configured. Reset and enable it (if present). + */ + if ( phyId == 0xff ) + { + phyMIIDelay = 20; + if ( miiFindPHY(&phyId) == true ) + { + miiResetPHY(phyId); + + pPhyType = (u_int16_t *)&phyType; + miiReadWord(pPhyType, MII_ID0, phyId); + miiReadWord(pPhyType+1, MII_ID1, phyId); + + if ( (phyType & MII_ST10040_MASK) == MII_ST10040_ID ) + { + phyMIIDelay = MII_ST10040_DELAY; + } + else if ( (phyType & MII_DP83843_MASK) == MII_DP83843_ID ) + { + phyMIIDelay = MII_DP83843_DELAY; + } + + kprintf("Ethernet(BMac): PHY id = %d\n", phyId); + } + } + + /* + * Reset the reset the ethernet cell + */ + fcrValue |= kResetEnetCell; + *heathrowFCR = OSReadSwapInt32( &fcrValue, 0 ); + eieio(); + IOSleep( 10 ); + + fcrValue &= ~kResetEnetCell; + *heathrowFCR = OSReadSwapInt32( &fcrValue, 0 ); + eieio(); + IOSleep( 10 ); + + chipId = ReadBigMacRegister(ioBaseEnet, kCHIPID) & 0xFF; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_initChip() +{ + volatile u_int16_t regValue; + ns_time_t timeStamp; + u_int16_t *pWord16; + + WriteBigMacRegister(ioBaseEnet, kTXRST, kTxResetBit); + + do + { + // wait for reset to clear..acknowledge + regValue = ReadBigMacRegister(ioBaseEnet, kTXRST); + } + while( regValue & kTxResetBit ); + + WriteBigMacRegister(ioBaseEnet, kRXRST, kRxResetValue); + + if ( phyId == 0xff ) + { + WriteBigMacRegister(ioBaseEnet, kXCVRIF, + kClkBit | kSerialMode | kCOLActiveLow); + } + + _IOGetTimestamp(&timeStamp); + WriteBigMacRegister(ioBaseEnet, kRSEED, (u_int16_t) timeStamp ); + + regValue = ReadBigMacRegister(ioBaseEnet, kXIFC); + regValue |= kTxOutputEnable; + WriteBigMacRegister(ioBaseEnet, kXIFC, regValue); + + ReadBigMacRegister(ioBaseEnet, kPAREG); + + // set collision counters to 0 + WriteBigMacRegister(ioBaseEnet, kNCCNT, 0); + WriteBigMacRegister(ioBaseEnet, kNTCNT, 0); + WriteBigMacRegister(ioBaseEnet, kEXCNT, 0); + WriteBigMacRegister(ioBaseEnet, kLTCNT, 0); + + // set rx counters to 0 + WriteBigMacRegister(ioBaseEnet, kFRCNT, 0); + WriteBigMacRegister(ioBaseEnet, kLECNT, 0); + WriteBigMacRegister(ioBaseEnet, kAECNT, 0); + WriteBigMacRegister(ioBaseEnet, kFECNT, 0); + WriteBigMacRegister(ioBaseEnet, kRXCV, 0); + + // set tx fifo information + // 255 octets before tx starts + WriteBigMacRegister(ioBaseEnet, kTXTH, 0xff); + + // first disable txFIFO + WriteBigMacRegister(ioBaseEnet, kTXFIFOCSR, 0); + WriteBigMacRegister(ioBaseEnet, kTXFIFOCSR, kTxFIFOEnable ); + + // set rx fifo information + // first disable rxFIFO + WriteBigMacRegister(ioBaseEnet, kRXFIFOCSR, 0); + WriteBigMacRegister(ioBaseEnet, kRXFIFOCSR, kRxFIFOEnable ); + + // kTxNeverGiveUp maybe later + //WriteBigMacRegister(ioBaseEnet, kTXCFG, kTxMACEnable); + ReadBigMacRegister(ioBaseEnet, kSTAT); // read it just to clear it + + // zero out the chip Hash Filter registers + WriteBigMacRegister(ioBaseEnet, kHASH3, hashTableMask[0]); // bits 15 - 0 + WriteBigMacRegister(ioBaseEnet, kHASH2, hashTableMask[1]); // bits 31 - 16 + WriteBigMacRegister(ioBaseEnet, kHASH1, hashTableMask[2]); // bits 47 - 32 + WriteBigMacRegister(ioBaseEnet, kHASH0, hashTableMask[3]); // bits 63 - 48 + + pWord16 = (u_int16_t *)&myAddress.bytes[0]; + WriteBigMacRegister(ioBaseEnet, kMADD0, *pWord16++); + WriteBigMacRegister(ioBaseEnet, kMADD1, *pWord16++); + WriteBigMacRegister(ioBaseEnet, kMADD2, *pWord16); + + WriteBigMacRegister(ioBaseEnet, kRXCFG, + kRxCRCEnable | kRxHashFilterEnable | kRxRejectOwnPackets); + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_disableAdapterInterrupts() +{ + WriteBigMacRegister( ioBaseEnet, kINTDISABLE, kNoEventsMask ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_enableAdapterInterrupts() +{ + WriteBigMacRegister( ioBaseEnet, + kINTDISABLE, + ( chipId >= kCHIPID_PaddingtonXmitStreaming ) ? + kNoEventsMask: kNormalIntEvents ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_setDuplexMode(bool duplexMode) +{ + u_int16_t txCFGVal; + + isFullDuplex = duplexMode; + + txCFGVal = ReadBigMacRegister( ioBaseEnet, kTXCFG); + + WriteBigMacRegister( ioBaseEnet, kTXCFG, txCFGVal & ~kTxMACEnable ); + while( ReadBigMacRegister(ioBaseEnet, kTXCFG) & kTxMACEnable ) + ; + + if ( isFullDuplex ) + { + txCFGVal |= (kTxIgnoreCollision | kTxFullDuplex); + } + else + { + txCFGVal &= ~(kTxIgnoreCollision | kTxFullDuplex); + } + + WriteBigMacRegister( ioBaseEnet, kTXCFG, txCFGVal ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_restartTransmitter() +{ + u_int16_t regValue; + + /* + * Shutdown DMA channel + */ + _stopTransmitDMA(); + + /* + * Get the silicon's attention + */ + WriteBigMacRegister( ioBaseEnet, kTXFIFOCSR, 0 ); + WriteBigMacRegister( ioBaseEnet, kTXFIFOCSR, kTxFIFOEnable); + + ReadBigMacRegister( ioBaseEnet, kSTAT ); + + regValue = ReadBigMacRegister(ioBaseEnet, kTXCFG); + WriteBigMacRegister(ioBaseEnet, kTXCFG, regValue | kTxMACEnable ); + + /* + * Restart transmit DMA + */ + IODBDMAContinue( ioBaseEnetTxDMA ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_restartReceiver() +{ + u_int16_t oldConfig; + + /* + * Shutdown DMA channel + */ + _stopReceiveDMA(); + + /* + * Get the silicon's attention + */ + WriteBigMacRegister( ioBaseEnet, kRXFIFOCSR, 0 ); + WriteBigMacRegister( ioBaseEnet, kRXFIFOCSR, kRxFIFOEnable); + + oldConfig = ReadBigMacRegister(ioBaseEnet, kRXCFG); + WriteBigMacRegister(ioBaseEnet, kRXCFG, oldConfig | kRxMACEnable ); + + /* + * Restart receive DMA + */ + IODBDMAContinue( ioBaseEnetRxDMA ); +} + +/*------------------------------------------------------------------------- + * + * Orderly stop of receive DMA. + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_stopReceiveDMA() +{ + u_int32_t dmaCmdPtr; + u_int8_t rxCFGVal; + + /* + * Stop the receiver and allow any frame receive in progress to complete. + */ + rxCFGVal = ReadBigMacRegister(ioBaseEnet, kRXCFG); + WriteBigMacRegister(ioBaseEnet, kRXCFG, rxCFGVal & ~kRxMACEnable ); + IODelay( RECEIVE_QUIESCE_uS ); + + IODBDMAReset( ioBaseEnetRxDMA ); + + dmaCmdPtr = rxDMACommandsPhys + rxCommandHead * sizeof(enet_dma_cmd_t); + IOSetDBDMACommandPtr( ioBaseEnetRxDMA, dmaCmdPtr ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_stopTransmitDMA() +{ + u_int32_t dmaCmdPtr; + u_int8_t txCFGVal; + + /* + * Stop the transmitter and allow any frame transmit in progress to abort + */ + txCFGVal = ReadBigMacRegister(ioBaseEnet, kTXCFG); + WriteBigMacRegister(ioBaseEnet, kTXCFG, txCFGVal & ~kTxMACEnable ); + + IODelay( TRANSMIT_QUIESCE_uS ); + + IODBDMAReset( ioBaseEnetTxDMA ); + + dmaCmdPtr = txDMACommandsPhys + txCommandHead * sizeof(enet_txdma_cmd_t); + IOSetDBDMACommandPtr( ioBaseEnetTxDMA, dmaCmdPtr ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_transmitPacket(struct mbuf *packet) +{ + enet_dma_cmd_t tmpCommand; + u_int32_t i; + + /* + * Check for room on the transmit ring. There should always be space + * since it is the responsibility of the caller to verify this before + * calling _transmitPacket. + * + * Get a copy of the DMA transfer commands in a temporary buffer. + * The new DMA command is written into the channel program so that the + * command word for the old Stop command is overwritten last. This prevents + * the DMA engine from executing a partially written channel command. + */ + i = txCommandTail + 1; + if ( i >= txMaxCommand ) i = 0; + + if ( (i == txCommandHead) || + !_updateDescriptorFromMbuf(packet, &tmpCommand, false) ) + { + IOLog("Ethernet(BMac): Freeing transmit packet eh?\n\r"); + if (packet != txDebuggerPkt) + freePacket(packet); + return false; + } + + /* + * txCommandTail points to the current DMA Stop command for the channel. + * We are now creating a new DMA Stop command in the next slot in the + * transmit ring. The previous DMA Stop command will be overwritten with + * the DMA commands to transfer the new mbuf. + */ + txDMACommands[i].desc_seg[0] = dbdmaCmd_Stop; + txDMACommands[i].desc_seg[1] = dbdmaCmd_Nop; + + bcopy( ((u_int32_t *)&tmpCommand)+1, + ((u_int32_t *)&txDMACommands[txCommandTail])+1, + sizeof(enet_dma_cmd_t)-sizeof(u_int32_t) ); + + txMbuf[txCommandTail] = packet; + txDMACommands[txCommandTail].desc_seg[0].operation = + tmpCommand.desc_seg[0].operation; + + /* + * Set the transmit tail to the new stop command. + */ + txCommandTail = i; + + /* + * Tap the DMA channel to wake it up + */ + IODBDMAContinue( ioBaseEnetTxDMA ); + + return true; +} + +/*------------------------------------------------------------------------- + * _receivePacket + * -------------- + * This routine runs the receiver in polled-mode (yuk!) for the kernel debugger. + * + * The _receivePackets allocate NetBufs and pass them up the stack. The kernel + * debugger interface passes a buffer into us. To reconsile the two interfaces, + * we allow the receive routine to continue to allocate its own buffers and + * transfer any received data to the passed-in buffer. This is handled by + * _receivePacket calling _packetToDebugger. + *-------------------------------------------------------------------------*/ + +void BMacEnet::_receivePacket(void *pkt, unsigned int *pkt_len, + unsigned int timeout) +{ + ns_time_t startTime; + ns_time_t currentTime; + u_int32_t elapsedTimeMS; + + if (!ready || !pkt || !pkt_len) + return; + + *pkt_len = 0; + + debuggerPkt = pkt; + debuggerPktSize = 0; + + _IOGetTimestamp(&startTime); + do + { + _receivePackets(true); + _IOGetTimestamp(¤tTime); + elapsedTimeMS = (currentTime - startTime) / (1000*1000); + } + while ( (debuggerPktSize == 0) && (elapsedTimeMS < timeout) ); + + *pkt_len = debuggerPktSize; +} + +/*------------------------------------------------------------------------- + * _packetToDebugger + * ----------------- + * This is called by _receivePackets when we are polling for kernel debugger + * packets. It copies the NetBuf contents to the buffer passed by the debugger. + * It also sets the var debuggerPktSize which will break the polling loop. + *-------------------------------------------------------------------------*/ + +void BMacEnet::_packetToDebugger(struct mbuf * packet, u_int size) +{ + debuggerPktSize = size; + bcopy( mtod(packet, char *), debuggerPkt, size ); +} + +/*------------------------------------------------------------------------- + * _sendPacket + * ----------- + * + * This routine runs the transmitter in polled-mode (yuk!) for the kernel debugger. + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_sendPacket(void *pkt, unsigned int pkt_len) +{ + ns_time_t startTime; + ns_time_t currentTime; + u_int32_t elapsedTimeMS; + + if (!ready || !pkt || (pkt_len > ETHERMAXPACKET)) + return; + + /* + * Wait for the transmit ring to empty + */ + _IOGetTimestamp(&startTime); + do + { + _debugTransmitInterruptOccurred(); + _IOGetTimestamp(¤tTime); + elapsedTimeMS = (currentTime - startTime) / (1000*1000); + } + while ( (txCommandHead != txCommandTail) && + (elapsedTimeMS < TX_KDB_TIMEOUT) ); + + if ( txCommandHead != txCommandTail ) + { + IOLog( "Ethernet(BMac): Polled tranmit timeout - 1\n\r"); + return; + } + + /* + * Allocate a NetBuf and copy the debugger transmit data into it. + * + * jliu - no allocation, just recycle the same buffer dedicated to + * KDB transmit. + */ + txDebuggerPkt->m_next = 0; + txDebuggerPkt->m_data = (caddr_t) pkt; + txDebuggerPkt->m_pkthdr.len = txDebuggerPkt->m_len = pkt_len; + + /* + * Send the debugger packet. txDebuggerPkt must not be freed by + * the transmit routine. + */ + _transmitPacket(txDebuggerPkt); + + /* + * Poll waiting for the transmit ring to empty again + */ + do + { + _debugTransmitInterruptOccurred(); + _IOGetTimestamp(¤tTime); + elapsedTimeMS = (currentTime - startTime) / (1000*1000); + } + while ( (txCommandHead != txCommandTail) && + (elapsedTimeMS < TX_KDB_TIMEOUT) ); + + if ( txCommandHead != txCommandTail ) + { + IOLog( "Ethernet(BMac): Polled tranmit timeout - 2\n\r"); + } + + return; +} + +/*------------------------------------------------------------------------- + * _sendDummyPacket + * ---------------- + * The BMac receiver seems to be locked until we send our first packet. + * + *-------------------------------------------------------------------------*/ +void BMacEnet::_sendDummyPacket() +{ + union + { + u_int8_t bytes[64]; + IOEthernetAddress enet_addr[2]; + } dummyPacket; + + bzero( &dummyPacket, sizeof(dummyPacket) ); + dummyPacket.enet_addr[0] = myAddress; + dummyPacket.enet_addr[1] = myAddress; + _sendPacket((void *)dummyPacket.bytes, sizeof(dummyPacket)); + IOSleep(50); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_receiveInterruptOccurred() +{ + return _receivePackets(false); +} + +/*------------------------------------------------------------------------- + * Work around a hardware bug where the controller will receive + * unicast packets not directed to the station. The hardware is + * erroneously using the hash table to qualify the unicast address. + * This routine will check that the packet is unicast, and if so, + * makes sure that the unicast address matches the station's address. + * Thus function returns true if the packet should be rejected. + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_rejectBadUnicastPacket(ether_header_t * etherHeader) +{ + bool rejectPacket = false; + + if ( useUnicastFilter && (isPromiscuous == false) && + (etherHeader->ether_dhost[EA_GROUP_BYTE] & EA_GROUP_BIT) == 0) { + // + // Destination Ethernet address is not multicast nor broadcast. + // Then it must be addresses to the station MAC address, + // otherwise reject the packet. + // + if (bcmp(etherHeader->ether_dhost, &myAddress, NUM_EN_ADDR_BYTES) != 0) + rejectPacket = true; + } + + return rejectPacket; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_receivePackets(bool fDebugger) +{ + enet_dma_cmd_t tmpCommand; + struct mbuf * packet; + u_int32_t i, j, last; + int receivedFrameSize = 0; + u_int32_t dmaCount[2], dmaResid[2], dmaStatus[2]; + u_int32_t dmaChnlStatus; + u_int16_t rxPktStatus = 0; + u_int32_t badFrameCount; + bool reusePkt; + bool status; + bool useNetif = !fDebugger && netifEnabled; + bool doFlushQueue = false; + u_int32_t nextDesc; + static const u_int32_t lastResetValue = (u_int32_t)(-1); + + last = lastResetValue; + i = rxCommandHead; + + while ( 1 ) + { + reusePkt = false; + + /* + * Collect the DMA residual counts/status for the two + * buffer segments. + */ + for ( j = 0; j < 2; j++ ) + { + dmaResid[j] = IOGetCCResult( &rxDMACommands[i].desc_seg[j] ); + dmaStatus[j] = dmaResid[j] >> 16; + dmaResid[j] &= 0x0000ffff; + dmaCount[j] = IOGetCCOperation( &rxDMACommands[i].desc_seg[j] ) + & kdbdmaReqCountMask; + } + +#if 0 + IOLog("Ethernet(BMac): Rx NetBuf[%2d] = %08x Resid[0] = %04x Status[0] = %04x Resid[1] = %04x Status[1] = %04x\n\r", + i, (int)nb_map(rxNetbuf[i]), dmaResid[0], dmaStatus[0], dmaResid[1], dmaStatus[1] ); +#endif + + /* + * If the current entry has not been written, then stop at this entry + */ + if ( !((dmaStatus[0] & kdbdmaBt) || (dmaStatus[1] & kdbdmaActive)) ) + { + break; + } + + /* + * The BMac Ethernet controller appends two bytes to each receive + * buffer containing the buffer + * size and receive frame status. + * We locate these bytes by using the DMA residual counts. + */ + receivedFrameSize = dmaCount[0] - dmaResid[0] + dmaCount[1] - + ((dmaStatus[0] & kdbdmaBt) ? dmaCount[1] : dmaResid[1]); + + if ( ( receivedFrameSize >= 2 ) && + ( receivedFrameSize <= NETWORK_BUFSIZE ) ) + { + /* + * Get the receive frame size as reported by the BMac controller + */ + rxPktStatus = *(u_int16_t *)(mtod(rxMbuf[i], u_int32_t) + + receivedFrameSize - 2); + receivedFrameSize = rxPktStatus & kRxLengthMask; + } + + /* + * Reject packets that are runts or that have other mutations. + */ + if ( receivedFrameSize < (ETHERMINPACKET - ETHERCRC) || + receivedFrameSize > (ETHERMAXPACKET + ETHERCRC) || + rxPktStatus & kRxAbortBit || + _rejectBadUnicastPacket(mtod(rxMbuf[i], ether_header_t *)) + ) + { + if (useNetif) netStats->inputErrors++; + reusePkt = true; + } + else if ( useNetif == false ) + { + /* + * Always reuse packets in debugger mode. We also refuse to + * pass anything up the stack unless the driver is open. The + * hardware is enabled before the stack has opened us, to + * allow earlier debug interface registration. But we must + * not pass any packets up. + */ + reusePkt = true; + if (fDebugger) + _packetToDebugger(rxMbuf[i], receivedFrameSize); + } + + /* + * Before we pass this packet up the networking stack. Make sure we + * can get a replacement. Otherwise, hold on to the current packet and + * increment the input error count. + * Thanks Justin! + */ + + packet = 0; + + if ( reusePkt == false ) + { + bool replaced; + + packet = replaceOrCopyPacket(&rxMbuf[i], receivedFrameSize, + &replaced); + + reusePkt = true; + + if (packet && replaced) + { + status = _updateDescriptorFromMbuf(rxMbuf[i], + &rxDMACommands[i], true); + + if (status) + { + reusePkt = false; + } + else + { + // Assume descriptor has not been corrupted. + freePacket(rxMbuf[i]); // release new packet. + rxMbuf[i] = packet; // get the old packet back. + packet = 0; // pass up nothing. + IOLog("Ethernet(BMac): _updateDescriptorFromMbuf error\n"); + } + } + + if (packet == 0) + netStats->inputErrors++; + } + + /* + * If we are reusing the existing mbuf, then refurbish the existing + * DMA command \ descriptors by clearing the status/residual count + * fields. + */ + if ( reusePkt ) + { + for ( j=0; j < sizeof(enet_dma_cmd_t)/sizeof(IODBDMADescriptor); + j++ ) + { + IOSetCCResult( &rxDMACommands[i].desc_seg[j], 0 ); + } + } + + /* + * Keep track of the last receive descriptor processed + */ + last = i; + + /* + * Implement ring wrap-around + */ + if (++i >= rxMaxCommand) i = 0; + + if (fDebugger) + { + break; + } + + /* + * Transfer received packet to network + */ + if (packet) + { + KERNEL_DEBUG(DBG_BMAC_RXCOMPLETE | DBG_FUNC_NONE, (int) packet, + (int)receivedFrameSize, 0, 0, 0 ); + + networkInterface->inputPacket(packet, receivedFrameSize, true); + doFlushQueue = true; + netStats->inputPackets++; + } + } + + /* + * OK...this is a little messy + * + * We just processed a bunch of DMA receive descriptors. We are going to + * exchange the current DMA stop command (rxCommandTail) with the last + * receive descriptor we processed (last). This will make these list of + * descriptors we just processed available. If we processed no receive + * descriptors on this call then skip this exchange. + */ + +#if 0 + IOLog( "Ethernet(BMac): Prev - Rx Head = %2d Rx Tail = %2d Rx Last = %2d\n\r", rxCommandHead, rxCommandTail, last ); +#endif + + if ( last != lastResetValue ) + { + /* + * Save the contents of the last receive descriptor processed. + */ + packet = rxMbuf[last]; + tmpCommand = rxDMACommands[last]; + + /* + * Write a DMA stop command into this descriptor slot + */ + rxDMACommands[last].desc_seg[0] = dbdmaCmd_Stop; + rxDMACommands[last].desc_seg[1] = dbdmaCmd_Nop; + rxMbuf[last] = 0; + + /* + * Replace the previous DMA stop command with the last receive + * descriptor processed. + * + * The new DMA command is written into the channel program so that the + * command word for the old Stop command is overwritten last. This + * prevents the DMA engine from executing a partially written channel + * command. + * + * Note: When relocating the descriptor, we must update its branch + * field to reflect its new location. + */ + nextDesc = rxDMACommandsPhys + + (int) &rxDMACommands[rxCommandTail + 1] - (int)rxDMACommands; + IOSetCCCmdDep( &tmpCommand.desc_seg[0], nextDesc ); + + bcopy( (u_int32_t *) &tmpCommand + 1, + (u_int32_t *) &rxDMACommands[rxCommandTail] + 1, + sizeof(enet_dma_cmd_t) - sizeof(u_int32_t) ); + + rxMbuf[rxCommandTail] = packet; + + rxDMACommands[rxCommandTail].desc_seg[0].operation = + tmpCommand.desc_seg[0].operation; + + /* + * Update rxCommmandTail to point to the new Stop command. Update + * rxCommandHead to point to the next slot in the ring past the Stop + * command + */ + rxCommandTail = last; + rxCommandHead = i; + } + + /* + * Update receive error statistics + */ + badFrameCount = ReadBigMacRegister(ioBaseEnet, kFECNT) + + ReadBigMacRegister(ioBaseEnet, kAECNT) + + ReadBigMacRegister(ioBaseEnet, kLECNT); + + /* + * Clear Hardware counters + */ + WriteBigMacRegister(ioBaseEnet, kFECNT, 0); + WriteBigMacRegister(ioBaseEnet, kAECNT, 0); + WriteBigMacRegister(ioBaseEnet, kLECNT, 0); + + if (badFrameCount && useNetif) + netStats->inputErrors += badFrameCount; + + /* + * Check for error conditions that may cause the receiver to stall + */ + dmaChnlStatus = IOGetDBDMAChannelStatus( ioBaseEnetRxDMA ); + + if ( dmaChnlStatus & kdbdmaDead ) + { + if (useNetif) netStats->inputErrors++; + IOLog( "Ethernet(BMac): Rx DMA Error - Status = %04x\n\r", + dmaChnlStatus ); + _restartReceiver(); + } + else + { + /* + * Tap the DMA to wake it up + */ + IODBDMAContinue( ioBaseEnetRxDMA ); + } + +#if 0 + IOLog( "Ethernet(BMac): New - Rx Head = %2d Rx Tail = %2d\n\r", rxCommandHead, rxCommandTail ); +#endif + + return doFlushQueue; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_transmitInterruptOccurred() +{ + u_int32_t dmaStatus; + u_int32_t collisionCount; + u_int32_t badFrameCount; + bool fServiced = false; + + while ( 1 ) + { + /* + * Check the status of the last descriptor in this entry to see if + * the DMA engine completed this entry. + */ + dmaStatus = IOGetCCResult( + &(txDMACommands[txCommandHead].desc_seg[1])) >> 16; + + if ( !(dmaStatus & kdbdmaActive) ) + { + break; + } + + if (netifEnabled) netStats->outputPackets++; + + fServiced = true; + + KERNEL_DEBUG(DBG_BMAC_TXCOMPLETE | DBG_FUNC_NONE, + (int)txMbuf[txCommandHead], + (int)txMbuf[txCommandHead]->m_pkthdr.len, 0, 0, 0 ); + + /* + * Free the mbuf we just transmitted. + * + * If it is the debugger packet, just remove it from the ring. + * and reuse the same packet for the next sendPacket() request. + */ + if (txMbuf[txCommandHead] != txDebuggerPkt) + { + freePacket( txMbuf[txCommandHead] ); + } + txMbuf[txCommandHead] = NULL; + + if ( ++(txCommandHead) >= txMaxCommand ) + txCommandHead = 0; + } + + /* + * Increment transmit error statistics + */ + collisionCount = ReadBigMacRegister(ioBaseEnet, kNCCNT ); + + WriteBigMacRegister( ioBaseEnet, kNCCNT, 0 ); + + badFrameCount = ReadBigMacRegister(ioBaseEnet, kEXCNT ) + + ReadBigMacRegister(ioBaseEnet, kLTCNT ); + + WriteBigMacRegister( ioBaseEnet, kEXCNT, 0 ); + WriteBigMacRegister( ioBaseEnet, kLTCNT, 0 ); + + if (netifEnabled) { + netStats->collisions += collisionCount; + netStats->outputErrors += badFrameCount; + } + + /* + * Check for error conditions that may cause the transmitter to stall + */ + dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetTxDMA ); + + if ( dmaStatus & kdbdmaDead ) + { + if (netifEnabled) netStats->outputErrors++; + IOLog( "Ethernet(BMac): Tx DMA Error - Status = %04x\n\r", dmaStatus ); + _restartTransmitter(); + fServiced = true; + } + + return fServiced; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool BMacEnet::_debugTransmitInterruptOccurred() +{ + u_int32_t dmaStatus; + u_int32_t badFrameCount; + bool fServiced = false; + + // Set the debugTxPoll flag to indicate the debugger was active + // and some cleanup may be needed when the driver returns to + // normal operation. + // + debugTxPoll = true; + + while ( 1 ) + { + /* + * Check the status of the last descriptor in this entry to see if + * the DMA engine completed this entry. + */ + dmaStatus = IOGetCCResult( + &(txDMACommands[txCommandHead].desc_seg[1])) >> 16; + + if ( !(dmaStatus & kdbdmaActive) ) + { + break; + } + + fServiced = true; + + KERNEL_DEBUG(DBG_BMAC_TXCOMPLETE | DBG_FUNC_NONE, + (int)txMbuf[txCommandHead], + (int)txMbuf[txCommandHead]->m_pkthdr.len, 0, 0, 0 ); + + /* + * Free the mbuf we just transmitted. + * + * If it is the debugger packet, just remove it from the ring. + * and reuse the same packet for the next sendPacket() request. + */ + if (txMbuf[txCommandHead] != txDebuggerPkt) { + // + // While in debugger mode, do not touch the mbuf pool. + // Queue any used mbufs to a local queue. This queue + // will get flushed after we exit from debugger mode. + // + // During continuous debugger transmission and + // interrupt polling, we expect only the txDebuggerPkt + // to show up on the transmit mbuf ring. + // + debugQueue->enqueue( txMbuf[txCommandHead] ); + } + txMbuf[txCommandHead] = NULL; + + if ( ++(txCommandHead) >= txMaxCommand ) + txCommandHead = 0; + } + + /* + * Clear transmit error statistics + */ + badFrameCount = ReadBigMacRegister(ioBaseEnet, kNCCNT ); + WriteBigMacRegister( ioBaseEnet, kNCCNT, 0 ); + + badFrameCount = ReadBigMacRegister(ioBaseEnet, kEXCNT ) + + ReadBigMacRegister(ioBaseEnet, kLTCNT ); + WriteBigMacRegister( ioBaseEnet, kEXCNT, 0 ); + WriteBigMacRegister( ioBaseEnet, kLTCNT, 0 ); + + /* + * Check for error conditions that may cause the transmitter to stall + */ + dmaStatus = IOGetDBDMAChannelStatus( ioBaseEnetTxDMA ); + + if ( dmaStatus & kdbdmaDead ) + { + IOLog( "Ethernet(BMac): Tx DMA Error - Status = %04x\n\r", dmaStatus ); + _restartTransmitter(); + fServiced = true; + } + + return fServiced; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool +BMacEnet::_updateDescriptorFromMbuf(struct mbuf * m, enet_dma_cmd_t *desc, + bool isReceive) +{ + u_int32_t nextDesc = 0; + u_int32_t waitMask = 0; + int segments; + struct IOPhysicalSegment segVector[2]; + + segments = mbufCursor->getPhysicalSegmentsWithCoalesce(m, segVector); + + if ((!segments) || (segments > 2)) { + IOLog("BMac: _updateDescriptorFromMbuf error, %d segments\n", + segments); + return false; + } + + // IOLog("segments: %d\n", segments); + + if ( isReceive || chipId >= kCHIPID_PaddingtonXmitStreaming ) + { + waitMask = kdbdmaWaitNever; + } + else + { + waitMask = kdbdmaWaitIfFalse; + } + + if ( segments == 1 ) + { + IOMakeDBDMADescriptor( (&desc->desc_seg[0]), + ((isReceive) ? kdbdmaInputLast : kdbdmaOutputLast), + (kdbdmaKeyStream0), + (kdbdmaIntNever), + (kdbdmaBranchNever), + (waitMask), + (segVector[0].length), + (segVector[0].location) ); + + desc->desc_seg[1] = (isReceive) ? dbdmaCmd_NopWInt : dbdmaCmd_Nop; + } + else + { + if ( isReceive ) + { + nextDesc = rxDMACommandsPhys + (int)desc - (int)rxDMACommands + + sizeof(enet_dma_cmd_t); + } + + IOMakeDBDMADescriptorDep( (&desc->desc_seg[0]), + ((isReceive) ? kdbdmaInputMore : kdbdmaOutputMore), + (kdbdmaKeyStream0), + ((isReceive) ? kdbdmaIntIfTrue : kdbdmaIntNever), + ((isReceive) ? kdbdmaBranchIfTrue : + kdbdmaBranchNever), + (kdbdmaWaitNever), + (segVector[0].length), + (segVector[0].location), + nextDesc ); + + IOMakeDBDMADescriptor( (&desc->desc_seg[1]), + ((isReceive) ? kdbdmaInputLast : kdbdmaOutputLast), + (kdbdmaKeyStream0), + ((isReceive) ? kdbdmaIntAlways : kdbdmaIntNever), + (kdbdmaBranchNever), + (waitMask), + (segVector[1].length), + (segVector[1].location) ); + } + + return true; +} + +#ifdef DEBUG +/* + * Useful for testing. + */ +void BMacEnet::_dump_srom() +{ + unsigned short data; + int i; + + for (i = 0; i < 128; i++) + { + reset_and_select_srom(ioBaseEnet); + data = read_srom(ioBaseEnet, i, sromAddressBits); + IOLog("Ethernet(BMac): %x = %x ", i, data); + if (i % 10 == 0) IOLog("\n"); + } +} + +void BMacEnet::_dumpDesc(void * addr, u_int32_t size) +{ + u_int32_t i; + unsigned long *p; + vm_offset_t paddr; + + _IOPhysicalFromVirtual( (vm_offset_t) addr, (vm_offset_t *)&paddr ); + + p = (unsigned long *)addr; + + for ( i=0; i < size/sizeof(IODBDMADescriptor); i++, p+=4, + paddr+=sizeof(IODBDMADescriptor) ) + { + IOLog("Ethernet(BMac): %08x(v) %08x(p): %08x %08x %08x %08x\n\r", + (int)p, + (int)paddr, + (int)OSReadSwapInt32(p, 0), (int)OSReadSwapInt32(p, 4), + (int)OSReadSwapInt32(p, 8), (int)OSReadSwapInt32(p, 12) ); + } +} + +void BMacEnet::_dumpRegisters() +{ + u_int16_t dataValue; + + IOLog("\nEthernet(BMac): IO Address = %08x", (int)ioBaseEnet ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kXIFC); + IOLog("\nEthernet(BMac): Read Register %04x Transceiver I/F = %04x", kXIFC, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kSTAT); + IOLog("\nEthernet(BMac): Read Register %04x Int Events = %04x", kSTAT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kINTDISABLE); + IOLog("\nEthernet(BMac): Read Register %04x Int Disable = %04x", kINTDISABLE, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXRST); + IOLog("\nEthernet(BMac): Read Register %04x Tx Reset = %04x", kTXRST, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXCFG); + IOLog("\nEthernet(BMac): Read Register %04x Tx Config = %04x", kTXCFG, dataValue ); + IOLog("\nEthernet(BMac): -------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kIPG1); + IOLog("\nEthernet(BMac): Read Register %04x IPG1 = %04x", kIPG1, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kIPG2); + IOLog("\nEthernet(BMac): Read Register %04x IPG2 = %04x", kIPG2, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kALIMIT); + IOLog("\nEthernet(BMac): Read Register %04x Attempt Limit = %04x", kALIMIT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kSLOT); + IOLog("\nEthernet(BMac): Read Register %04x Slot Time = %04x", kSLOT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kPALEN); + IOLog("\nEthernet(BMac): Read Register %04x Preamble Length = %04x", kPALEN, dataValue ); + + IOLog("\nEthernet(BMac): -------------------------------------------------------" ); + dataValue = ReadBigMacRegister(ioBaseEnet, kPAPAT); + IOLog("\nEthernet(BMac): Read Register %04x Preamble Pattern = %04x", kPAPAT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXSFD); + IOLog("\nEthernet(BMac): Read Register %04x Tx Start Frame Delimeter = %04x", kTXSFD, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kJAM); + IOLog("\nEthernet(BMac): Read Register %04x Jam Size = %04x", kJAM, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXMAX); + IOLog("\nEthernet(BMac): Read Register %04x Tx Max Size = %04x", kTXMAX, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXMIN); + IOLog("\nEthernet(BMac): Read Register %04x Tx Min Size = %04x", kTXMIN, dataValue ); + IOLog("\nEthernet(BMac): -------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kPAREG); + IOLog("\nEthernet(BMac): Read Register %04x Peak Attempts = %04x", kPAREG, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kDCNT); + IOLog("\nEthernet(BMac): Read Register %04x Defer Timer = %04x", kDCNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kNCCNT); + IOLog("\nEthernet(BMac): Read Register %04x Normal Collision Count = %04x", kNCCNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kNTCNT); + IOLog("\nEthernet(BMac): Read Register %04x Network Collision Count = %04x", kNTCNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kEXCNT); + IOLog("\nEthernet(BMac): Read Register %04x Excessive Coll Count = %04x", kEXCNT, dataValue ); + IOLog("\nEthernet(BMac): -------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kLTCNT); + IOLog("\nEthernet(BMac): Read Register %04x Late Collision Count = %04x", kLTCNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRSEED); + IOLog("\nEthernet(BMac): Read Register %04x Random Seed = %04x", kRSEED, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXSM); + IOLog("\nEthernet(BMac): Read Register %04x Tx State Machine = %04x", kTXSM, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXRST); + IOLog("\nEthernet(BMac): Read Register %04x Rx Reset = %04x", kRXRST, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXCFG); + IOLog("\nEthernet(BMac): Read Register %04x Rx Config = %04x", kRXCFG, dataValue ); + IOLog("\nEthernet(BMac): -------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXMAX); + IOLog("\nEthernet(BMac): Read Register %04x Rx Max Size = %04x", kRXMAX, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXMIN); + IOLog("\nEthernet(BMac): Read Register %04x Rx Min Size = %04x", kRXMIN, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kMADD2); + IOLog("\nEthernet(BMac): Read Register %04x Mac Address 2 = %04x", kMADD2, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kMADD1); + IOLog("\nEthernet(BMac): Read Register %04x Mac Address 1 = %04x", kMADD1, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kMADD0); + IOLog("\nEthernet(BMac): Read Register %04x Mac Address 0 = %04x", kMADD0, dataValue ); + IOLog("\nEthernet(BMac): -------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kFRCNT); + IOLog("\nEthernet(BMac): Read Register %04x Rx Frame Counter = %04x", kFRCNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kLECNT); + IOLog("\nEthernet(BMac): Read Register %04x Rx Length Error Cnt = %04x", kLECNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kAECNT); + IOLog("\nEthernet(BMac): Read Register %04x Alignment Error Cnt = %04x", kAECNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kFECNT); + IOLog("\nEthernet(BMac): Read Register %04x FCS Error Cnt = %04x", kFECNT, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXSM); + IOLog("\nEthernet(BMac): Read Register %04x Rx State Machine = %04x", kRXSM, dataValue ); + IOLog("\nEthernet(BMac): -------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXCV); + IOLog("\nEthernet(BMac): Read Register %04x Rx Code Violation = %04x", kRXCV, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kHASH3); + IOLog("\nEthernet(BMac): Read Register %04x Hash 3 = %04x", kHASH3, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kHASH2); + IOLog("\nEthernet(BMac): Read Register %04x Hash 2 = %04x", kHASH2, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kHASH1); + IOLog("\nEthernet(BMac): Read Register %04x Hash 1 = %04x", kHASH1, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kHASH0); + IOLog("\nEthernet(BMac): Read Register %04x Hash 0 = %04x", kHASH0, dataValue ); + IOLog("\n-------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kAFR2); + IOLog("\nEthernet(BMac): Read Register %04x Address Filter 2 = %04x", kAFR2, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kAFR1); + IOLog("\nEthernet(BMac): Read Register %04x Address Filter 1 = %04x", kAFR1, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kAFR0); + IOLog("\nEthernet(BMac): Read Register %04x Address Filter 0 = %04x", kAFR0, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kAFCR); + IOLog("\nEthernet(BMac): Read Register %04x Adress Filter Mask = %04x", kAFCR, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXFIFOCSR); + IOLog("\nEthernet(BMac): Read Register %04x Tx FIFO CSR = %04x", kTXFIFOCSR, dataValue ); + IOLog("\n-------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXTH); + IOLog("\nEthernet(BMac): Read Register %04x Tx Threshold = %04x", kTXTH, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXFIFOCSR); + IOLog("\nEthernet(BMac): Read Register %04x Rx FIFO CSR = %04x", kRXFIFOCSR, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kMEMADD); + IOLog("\nEthernet(BMac): Read Register %04x Mem Addr = %04x", kMEMADD, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kMEMDATAHI); + IOLog("\nEthernet(BMac): Read Register %04x Mem Data High = %04x", kMEMDATAHI, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kMEMDATALO); + IOLog("\nEthernet(BMac): Read Register %04x Mem Data Low = %04x", kMEMDATALO, dataValue ); + IOLog("\n-------------------------------------------------------" ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kXCVRIF); + IOLog("\nEthernet(BMac): Read Register %04x Transceiver IF Control = %04x", kXCVRIF, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kCHIPID); + IOLog("\nEthernet(BMac): Read Register %04x Chip ID = %04x", kCHIPID, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kMIFCSR); + IOLog("\nEthernet(BMac): Read Register %04x MII CSR = %04x", kMIFCSR, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kSROMCSR); + IOLog("\nEthernet(BMac): Read Register %04x SROM CSR = %04x", kSROMCSR, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kTXPNTR); + IOLog("\nEthernet(BMac): Read Register %04x Tx Pointer = %04x", kTXPNTR, dataValue ); + + dataValue = ReadBigMacRegister(ioBaseEnet, kRXPNTR); + IOLog("\nEthernet(BMac): Read Register %04x Rx Pointer = %04x", kRXPNTR, dataValue ); + IOLog("\nEthernet(BMac): -------------------------------------------------------\n" ); +} +#endif DEBUG + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn BMacEnet::getHardwareAddress(IOEthernetAddress *ea) +{ + int i; + unsigned short data; + + for (i = 0; i < (unsigned short)sizeof(*ea)/2; i++) + { + reset_and_select_srom(ioBaseEnet); + data = read_srom(ioBaseEnet, i + enetAddressOffset/2, sromAddressBits); + ea->bytes[2*i] = reverseBitOrder(data & 0x0ff); + ea->bytes[2*i+1] = reverseBitOrder((data >> 8) & 0x0ff); + } + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +#define ENET_CRCPOLY 0x04c11db7 + +/* Real fast bit-reversal algorithm, 6-bit values */ +static int reverse6[] = +{ 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, + 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, + 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, + 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, + 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, + 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, + 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, + 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f +}; + +static u_int32_t crc416(unsigned int current, unsigned short nxtval ) +{ + register unsigned int counter; + register int highCRCBitSet, lowDataBitSet; + + /* Swap bytes */ + nxtval = ((nxtval & 0x00FF) << 8) | (nxtval >> 8); + + /* Compute bit-by-bit */ + for (counter = 0; counter != 16; ++counter) + { /* is high CRC bit set? */ + if ((current & 0x80000000) == 0) + highCRCBitSet = 0; + else + highCRCBitSet = 1; + + current = current << 1; + + if ((nxtval & 0x0001) == 0) + lowDataBitSet = 0; + else + lowDataBitSet = 1; + + nxtval = nxtval >> 1; + + /* do the XOR */ + if (highCRCBitSet ^ lowDataBitSet) + current = current ^ ENET_CRCPOLY; + } + + return current; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +static u_int32_t mace_crc(unsigned short *address) +{ + register u_int32_t newcrc; + + newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ + newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ + newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ + + return(newcrc); +} + +/* + * Clear the hash table filter. + * + */ +void BMacEnet::_resetHashTableMask() +{ + bzero(hashTableUseCount, sizeof(hashTableUseCount)); + bzero(hashTableMask, sizeof(hashTableMask)); +} + +/* + * Add requested mcast addr to BMac's hash table filter. + * + */ +void BMacEnet::_addToHashTableMask(u_int8_t *addr) +{ + u_int32_t crc; + u_int16_t mask; + + crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (hashTableUseCount[crc]++) + return; /* This bit is already set */ + mask = crc % 16; + mask = (unsigned short)1 << mask; + hashTableMask[crc/16] |= mask; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void BMacEnet::_removeFromHashTableMask(u_int8_t *addr) +{ + unsigned int crc; + u_int16_t mask; + + /* Now, delete the address from the filter copy, as indicated */ + crc = mace_crc((unsigned short *)addr)&0x3f; /* Big-endian alert! */ + crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ + if (hashTableUseCount[crc] == 0) + return; /* That bit wasn't in use! */ + + if (--hashTableUseCount[crc]) + return; /* That bit is still in use */ + + mask = crc % 16; + mask = (u_int16_t)1 << mask; /* To turn off bit */ + hashTableMask[crc/16] &= ~mask; +} + +/* + * Sync the adapter with the software copy of the multicast mask + * (logical address filter). + */ +void BMacEnet::_updateBMacHashTableMask() +{ + u_int16_t rxCFGReg; + + rxCFGReg = ReadBigMacRegister(ioBaseEnet, kRXCFG); + WriteBigMacRegister(ioBaseEnet, kRXCFG, + rxCFGReg & ~(kRxMACEnable | kRxHashFilterEnable) ); + + while ( ReadBigMacRegister(ioBaseEnet, kRXCFG) & + (kRxMACEnable | kRxHashFilterEnable) ) + ; + + WriteBigMacRegister(ioBaseEnet, kHASH0, hashTableMask[0]); // bits 15 - 0 + WriteBigMacRegister(ioBaseEnet, kHASH1, hashTableMask[1]); // bits 31 - 16 + WriteBigMacRegister(ioBaseEnet, kHASH2, hashTableMask[2]); // bits 47 - 32 + WriteBigMacRegister(ioBaseEnet, kHASH3, hashTableMask[3]); // bits 63 - 48 + + rxCFGReg |= kRxHashFilterEnable; + WriteBigMacRegister(ioBaseEnet, kRXCFG, rxCFGReg ); +} diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.h b/iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.h new file mode 100644 index 000000000..2250fe75a --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * Interface for hardware dependent (relatively) code + * for the BMac Ethernet chip + * + * HISTORY + * + */ + +#ifndef _BMACENETPRIVATE_H +#define _BMACENETPRIVATE_H + +#include "BMacEnet.h" +#include "BMacEnetMII.h" + +void WriteBigMacRegister( IOPPCAddress ioEnetBase, u_int32_t reg_offset, + u_int16_t data); + +volatile u_int16_t ReadBigMacRegister( IOPPCAddress ioEnetBase, + u_int32_t reg_offset); + +void reset_and_select_srom(IOPPCAddress base); + +u_int16_t read_srom(IOPPCAddress base, unsigned int addr, + unsigned int addr_len); + +#endif /* !_BMACENETPRIVATE_H */ diff --git a/iokit/Drivers/network/drvPPCBMac/BMacEnetRegisters.h b/iokit/Drivers/network/drvPPCBMac/BMacEnetRegisters.h new file mode 100644 index 000000000..ca433076d --- /dev/null +++ b/iokit/Drivers/network/drvPPCBMac/BMacEnetRegisters.h @@ -0,0 +1,212 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * Interface definition for the BigMac Ethernet controller. + * + * HISTORY + * + */ + +#ifndef _BMACENETREGISTERS_H +#define _BMACENETREGISTERS_H + +// --------------------------------------------------------------------------------------------- +// Heathrow (F)eature (C)ontrol (R)egister Addresses +// --------------------------------------------------------------------------------------------- +#define kHeathrowFCR 0x0038 // FCR offset from Heathrow Base Address +#define kEnetEnabledBits 0x60000000 // mask to enable Enet Xcvr/Controller +#define kResetEnetCell 0x80000000 // mask used to reset Enet cell +#define kClearResetEnetCell 0x7fffffff // mask used to clear reset Enet cell +#define kDisableEnet 0x1fffffff // mask to disable Enet Xcvr/Controller + + +// --------------------------------------------------------------------------------------------- +// BMAC & Heathrow I/O Addresses +// --------------------------------------------------------------------------------------------- +#define kTxDMABaseOffset 0x08200 // offset from Heathrow Base address +#define kRxDMABaseOffset 0x08300 +#define kControllerBaseOffset 0x11000 + + +// --------------------------------------------------------------------------------------------- +// BigMac Register Numbers & Bit Assignments +// --------------------------------------------------------------------------------------------- +#define kXIFC 0x0000 +#define kTxOutputEnable 0x0001 +#define kMIILoopbackBits 0x0006 +#define kMIIBufferEnable 0x0008 +#define kSQETestEnable 0x0010 +#define kSTAT 0x0200 +#define kINTDISABLE 0x0210 +#define kIntFrameReceived 0x0001 +#define kIntRxFrameCntExp 0x0002 +#define kIntRxAlignCntExp 0x0004 +#define kIntRxCRCCntExp 0x0008 +#define kIntRxLenCntExp 0x0010 +#define kIntRxOverFlow 0x0020 +#define kIntRxCodeViolation 0x0040 +#define kIntSQETestError 0x0080 +#define kIntFrameSent 0x0100 +#define kIntTxUnderrun 0x0200 +#define kIntTxMaxSizeError 0x0400 +#define kIntTxNormalCollExp 0x0800 +#define kIntTxExcessCollExp 0x1000 +#define kIntTxLateCollExp 0x2000 +#define kIntTxNetworkCollExp 0x4000 +#define kIntTxDeferTimerExp 0x8000 +#define kNormalIntEvents ~(0xFFFF & ( kIntFrameSent | kIntTxUnderrun) ) +#if EXTRA_INTERRUPTS + #define kXtraInterrupts ~(0xFFFF & (kIntFrameReceived | kIntRxFrameCntExp \ + | kIntFrameSent | kIntTxUnderrun | kIntFrameSent) ) +#endif +#define kNoEventsMask 0xFFFF +#define kTXRST 0x0420 +#define kTxResetBit 0x0001 +#define kTXCFG 0x0430 +#define kTxMACEnable 0x0001 +#define kTxThreshold 0x0004 +#define kTxSlowDown 0x0020 +#define kTxIgnoreCollision 0x0040 +#define kTxDisableFCS 0x0080 +#define kTxNoBackoff 0x0100 +#define kTxFullDuplex 0x0200 +#define kTxNeverGiveUp 0x0400 +#define kIPG1 0x0440 +#define kIPG2 0x0450 +#define kALIMIT 0x0460 +#define kSLOT 0x0470 +#define kPALEN 0x0480 +#define kPAPAT 0x0490 +#define kTXSFD 0x04A0 +#define kJAM 0x04B0 +#define kTXMAX 0x04C0 +#define kTXMIN 0x04D0 +#define kPAREG 0x04E0 +#define kDCNT 0x04F0 +#define kNCCNT 0x0500 +#define kNTCNT 0x0510 +#define kEXCNT 0x0520 +#define kLTCNT 0x0530 +#define kRSEED 0x0540 +#define kTXSM 0x0550 +#define kRXRST 0x0620 +#define kRxResetValue 0x0000 +#define kRXCFG 0x0630 +#define kRxMACEnable 0x0001 +#define kReservedValue 0x0004 +#define kRxPromiscEnable 0x0040 +#define kRxCRCEnable 0x0100 +#define kRxRejectOwnPackets 0x0200 +#define kRxHashFilterEnable 0x0800 +#define kRxAddrFilterEnable 0x1000 +#define kRXMAX 0x0640 +#define kRXMIN 0x0650 +#define kMADD2 0x0660 +#define kMADD1 0x0670 +#define kMADD0 0x0680 +#define kFRCNT 0x0690 +#define kLECNT 0x06A0 +#define kAECNT 0x06B0 +#define kFECNT 0x06C0 +#define kRXSM 0x06D0 +#define kRXCV 0x06E0 +#define kHASH3 0x0700 +#define kHASH2 0x0710 +#define kHASH1 0x0720 +#define kHASH0 0x0730 +#define kAFR2 0x0740 +#define kAFR1 0x0750 +#define kAFR0 0x0760 +#define kAFCR 0x0770 +#define kEnableAllCompares 0x0fff +#define kTXFIFOCSR 0x0100 +#define kTxFIFOEnable 0x0001 +#define kTxFIFO128 0x0000 +#define kTxFIFO2048 0x001e +#define kTXTH 0x0110 +#define kRXFIFOCSR 0x0120 +#define kRxFIFOEnable kTxFIFOEnable +#define kRxFIFO128 kTxFIFO128 +#define kRxFIFO2048 kTxFIFO2048 +#define kMEMADD 0x0130 +#define kMEMDATAHI 0x0140 +#define kMEMDATALO 0x0150 +#define kXCVRIF 0x0160 +#define kCOLActiveLow 0x0002 +#define kSerialMode 0x0004 +#define kClkBit 0x0008 +#define kLinkStatus 0x0100 +#define kCHIPID 0x0170 +#define kCHIPID_Heathrow 0xB0 +#define kCHIPID_Paddington 0xC0 +#define kCHIPID_PaddingtonXmitStreaming 0xC4 + +#define kMIFCSR 0x0180 +#define kMIFCSR_Clock 0x0001 +#define kMIFCSR_DataOut 0x0002 +#define kMIFCSR_DataOutEnable 0x0004 +#define kMIFCSR_DataIn 0x0008 +#define kSROMCSR 0x0190 +#define kTXPNTR 0x01A0 +#define kRXPNTR 0x01B0 + +// --------------------------------------------------------------------------------------------- +// Misc. Bit definitions for BMac Status word +// --------------------------------------------------------------------------------------------- +#define kRxAbortBit 0x8000 // status bit in BMac status for rx packets +#define kRxLengthMask 0x3FFF // bits that determine length of rx packets + + +#define TX_RING_LENGTH 33 +#define RX_RING_LENGTH 33 + +#define TX_PKTS_PER_INT 10 + +#define NETWORK_BUFSIZE (ETHERMAXPACKET + ETHERCRC + 2) +#define TRANSMIT_QUEUE_SIZE 1024 + +#define WATCHDOG_TIMER_MS 500 +#define TX_KDB_TIMEOUT 1000 + +#define TRANSMIT_QUIESCE_uS 200 +#define RECEIVE_QUIESCE_uS 1500 + + +enum +{ + kIRQEnetDev = 0, + kIRQEnetTxDMA = 1, + kIRQEnetRxDMA = 2 +}; + +enum +{ + MEMORY_MAP_ENET_INDEX = 0, + MEMORY_MAP_TXDMA_INDEX = 1, + MEMORY_MAP_RXDMA_INDEX = 2, + MEMORY_MAP_HEATHROW_INDEX = 3, + MEMORY_MAP_COUNT = 4 +}; + +#endif /* !_BMACENETREGISTERS_H */ diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnet.cpp b/iokit/Drivers/network/drvPPCUniN/UniNEnet.cpp new file mode 100644 index 000000000..958fbb6b4 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnet.cpp @@ -0,0 +1,1071 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer + * + * Hardware independent (relatively) code for the Sun GEM Ethernet Controller + * + * HISTORY + * + * dd-mmm-yy + * Created. + * + */ + +//void call_kdp(void); + +#include "UniNEnetPrivate.h" + +#define super IOEthernetController + +OSDefineMetaClassAndStructors( UniNEnet, IOEthernetController ) + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::init(OSDictionary * properties) +{ + if ( super::init(properties) == false ) + return false; + + /* + * Initialize my ivars. + */ + phyId = 0xff; + linkStatusPrev = kLinkStatusUnknown; + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::start(IOService * provider) +{ + OSString *matchEntry; + + keyLargo_resetUniNEthernetPhy = OSSymbol::withCString("keyLargo_resetUniNEthernetPhy"); + + // Wait for KeyLargo to show up. + keyLargo = waitForService(serviceMatching("KeyLargo")); + if (keyLargo == 0) return false; + + nub = OSDynamicCast(IOPCIDevice, provider); + + if (!nub || !super::start(provider)) + { + return false; + } + + // Create spinlock to protect TxElementQueue. + + txQueueLock = IOSimpleLockAlloc(); + if ( txQueueLock == 0 ) return false; + IOSimpleLockInit(txQueueLock); + + transmitQueue = (IOGatedOutputQueue *) getOutputQueue(); + if (!transmitQueue) + { + IOLog("Ethernet(UniN): Output queue initialization failed\n"); + return false; + } + transmitQueue->retain(); + + /* + * Allocate debug queue. This stores packets retired from the TX ring + * by the polling routine. We cannot call freePacket() or m_free() within + * the debugger context. + * + * The capacity of the queue is set at maximum to prevent the queue from + * calling m_free() due to over-capacity. But we don't expect the size + * of the queue to grow too large. + */ + debugQueue = IOPacketQueue::withCapacity((UInt) -1); + if (!debugQueue) + { + return false; + } + + /* + * Allocate a IOMbufBigMemoryCursor instance. Currently, the maximum + * number of segments is set to 1. The maximum length for each segment + * is set to the maximum ethernet frame size (plus padding). + */ + mbufCursor = IOMbufBigMemoryCursor::withSpecification(NETWORK_BUFSIZE, 1); + if (!mbufCursor) + { + IOLog("Ethernet(UniN): IOMbufBigMemoryCursor allocation failure\n"); + return false; + } + + matchEntry = OSDynamicCast( OSString, getProperty( gIONameMatchedKey ) ); + if ( matchEntry == 0 ) + { + IOLog("Ethernet(UniN): Cannot obtain matching property.\n"); + return false; + } + + if ( matchEntry->isEqualTo( "gmac" ) == true ) + { + callPlatformFunction("EnableUniNEthernetClock", true, + (void *)true, 0, 0, 0); + } + + /* + * BUS MASTER, MEM I/O Space, MEM WR & INV + */ + nub->configWrite32( 0x04, 0x16 ); + + /* + * set Latency to Max , cache 32 + */ + nub->configWrite32( 0x0C, ((2 + (kGEMBurstSize * (0+1)))<< 8) | (CACHE_LINE_SIZE >> 2) ); + + ioMapEnet = nub->mapDeviceMemoryWithRegister( 0x10 ); + if ( ioMapEnet == NULL ) + { + return false; + } + ioBaseEnet = (volatile IOPPCAddress)ioMapEnet->getVirtualAddress(); + fpRegs = (GMAC_Registers*) ioBaseEnet; + phyId = (UInt8) -1; + + /* + * Get a reference to the IOWorkLoop in our superclass. + */ + IOWorkLoop * myWorkLoop = getWorkLoop(); + + /* + * Allocate three IOInterruptEventSources. + */ + interruptSource = IOInterruptEventSource::interruptEventSource( + (OSObject *) this, + (IOInterruptEventAction) &UniNEnet::interruptOccurred, + (IOService *) provider, + (int) 0 ); + + if ( interruptSource == NULL ) + { + IOLog("Ethernet(UniN): Couldn't allocate Interrupt event source\n"); + return false; + } + + if ( myWorkLoop->addEventSource( interruptSource ) != kIOReturnSuccess ) + { + IOLog("Ethernet(UniN): Couldn't add Interrupt event source\n"); + return false; + } + + + timerSource = IOTimerEventSource::timerEventSource + (this, (IOTimerEventSource::Action) &UniNEnet::timeoutOccurred); + if ( timerSource == NULL ) + { + IOLog("Ethernet(UniN): Couldn't allocate timer event source\n"); + return false; + } + + if ( myWorkLoop->addEventSource( timerSource ) != kIOReturnSuccess ) + { + IOLog("Ethernet(UniN): Couldn't add timer event source\n"); + return false; + } + + MGETHDR(txDebuggerPkt, M_DONTWAIT, MT_DATA); + + if (!txDebuggerPkt) + { + IOLog("Ethernet(UniN): Couldn't allocate KDB buffer\n"); + return false; + } + + /* + * Perform a hardware reset. + */ + if ( resetAndEnable(false) == false ) + { + IOLog("Ethernet(UniN): resetAndEnable() failed\n"); + return false; + } + + /* + * Cache my MAC address. + */ + if ( getHardwareAddress(&myAddress) != kIOReturnSuccess ) + { + IOLog("Ethernet(UniN): getHardwareAddress() failed\n"); + return false; + } + + /* + * Allocate memory for ring buffers. + */ + if ( allocateMemory() == false) + { + IOLog("Ethernet(UniN): allocateMemory() failed\n"); + return false; + } + + if ( createMediumTables() == false ) + { + IOLog("Ethernet(UniN): createMediumTables() failed\n"); + return false; + } + + /* + * Attach an IOEthernetInterface client. But don't registers it just yet. + */ + if ( !attachInterface((IONetworkInterface **) &networkInterface, false) ) + { + IOLog("Ethernet(UniN): attachInterface() failed\n"); + return false; + } + + /* + * Attach a kernel debugger client. + */ + attachDebuggerClient(&debugger); + + /* + * Ready to service interface requests. + */ + networkInterface->registerService(); + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::configureInterface(IONetworkInterface * netif) +{ + IONetworkData * nd; + + if ( super::configureInterface( netif ) == false ) + return false; + + /* + * Grab a pointer to the statistics structure in the interface. + */ + nd = netif->getNetworkData( kIONetworkStatsKey ); + if (!nd || !(fpNetStats = (IONetworkStats *) nd->getBuffer())) + { + IOLog("EtherNet(UniN): invalid network statistics\n"); + return false; + } + + // Get the Ethernet statistics structure: + nd = netif->getParameter( kIOEthernetStatsKey ); + if ( !nd || !(fpEtherStats = (IOEthernetStats*)nd->getBuffer()) ) + { + IOLog("EtherNet(UniN): invalid ethernet statistics\n"); + return false; + } + + /* + * Set the driver/stack reentrancy flag. This is meant to reduce + * context switches. May become irrelevant in the future. + */ + return true; +}/* end configureInterface */ + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::free() +{ + TxQueueElement * txElement; + + resetAndEnable(false); + + if (debugger) + debugger->release(); + + if (getWorkLoop()) + { + getWorkLoop()->disableAllEventSources(); + } + + if (timerSource) + { + timerSource->release(); + timerSource = 0; + } + + if (interruptSource) + { + interruptSource->release(); + } + + if (txDebuggerPkt) + { + freePacket(txDebuggerPkt); + } + + if (transmitQueue) + { + transmitQueue->release(); + } + + if (debugQueue) + { + debugQueue->release(); + } + + if (networkInterface) + { + networkInterface->release(); + } + + if (mbufCursor) + { + mbufCursor->release(); + } + + if ( mediumDict ) + { + mediumDict->release(); + } + + while ( ( txElement = getTxElement() ) ) + { + IOFree( txElement, sizeof(TxQueueElement) ); + } + + if ( ioMapEnet ) + { + ioMapEnet->release(); + } + + if ( dmaCommands != 0 ) + { + IOFreeContiguous( (void *)dmaCommands, dmaCommandsSize ); + } + + if ( workLoop ) + { + workLoop->release(); + workLoop = 0; + } + + if ( txQueueLock ) + { + IOSimpleLockFree( txQueueLock ); + txQueueLock = 0; + } + + super::free(); +} + +/*------------------------------------------------------------------------- + * Override IONetworkController::createWorkLoop() method and create + * a workloop. + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::createWorkLoop() +{ + workLoop = IOWorkLoop::workLoop(); + + return ( workLoop != 0 ); +} + +/*------------------------------------------------------------------------- + * Override IOService::getWorkLoop() method to return our workloop. + * + * + *-------------------------------------------------------------------------*/ + +IOWorkLoop * UniNEnet::getWorkLoop() const +{ + return workLoop; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::interruptOccurred(IOInterruptEventSource * src, int /*count*/) +{ + IODebuggerLockState lockState; + UInt32 interruptStatus; + bool doFlushQueue; + bool doService; + + + if ( ready == false ) return; + + do + { + lockState = IODebuggerLock( this ); + + interruptStatus = READ_REGISTER( Status ) + & ( kStatus_TX_INT_ME | kStatus_RX_DONE ); + + doService = false; + + if ( interruptStatus & kStatus_TX_INT_ME ) + { + txWDInterrupts++; + KERNEL_DEBUG(DBG_GEM_TXIRQ | DBG_FUNC_START, 0, 0, 0, 0, 0 ); + doService = transmitInterruptOccurred(); + KERNEL_DEBUG(DBG_GEM_TXIRQ | DBG_FUNC_END, 0, 0, 0, 0, 0 ); + ETHERNET_STAT_ADD( dot3TxExtraEntry.interrupts ); + } + + doFlushQueue = false; + + if ( interruptStatus & kStatus_RX_DONE ) + { + rxWDInterrupts++; + KERNEL_DEBUG(DBG_GEM_RXIRQ | DBG_FUNC_START, 0, 0, 0, 0, 0 ); + doFlushQueue = receiveInterruptOccurred(); + KERNEL_DEBUG(DBG_GEM_RXIRQ | DBG_FUNC_END, 0, 0, 0, 0, 0 ); + ETHERNET_STAT_ADD( dot3RxExtraEntry.interrupts ); + } + + IODebuggerUnlock( lockState ); + + /* + * Submit all received packets queued up by _receiveInterruptOccurred() + * to the network stack. The up call is performed without holding the + * debugger lock. + */ + if (doFlushQueue) + { + networkInterface->flushInputQueue(); + } + + /* + * Make sure the output queue is not stalled. + */ + if (doService && netifEnabled) + { + transmitQueue->service(); + } + } + while ( interruptStatus ); + +// interruptSource->enable(); + return; +}/* end interruptOccurred */ + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +UInt32 UniNEnet::outputPacket(struct mbuf * pkt, void * param) +{ + UInt32 ret = kIOReturnOutputSuccess; + + KERNEL_DEBUG( DBG_GEM_TXQUEUE | DBG_FUNC_NONE, + (int) pkt, (int) pkt->m_pkthdr.len, 0, 0, 0 ); + + /* + * Hold the debugger lock so the debugger can't interrupt us + */ + reserveDebuggerLock(); + + if ( linkStatusPrev != kLinkStatusUp ) + { + freePacket( pkt ); + } + else if ( transmitPacket(pkt) == false ) + { + ret = kIOReturnOutputStall; + } + + releaseDebuggerLock(); + + return ret; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::resetAndEnable(bool enable) +{ + bool ret = true; + + reserveDebuggerLock(); + + ready = false; + + if (timerSource) + { + timerSource->cancelTimeout(); + } + + disableAdapterInterrupts(); + if (getWorkLoop()) + { + getWorkLoop()->disableAllInterrupts(); + } + + if (enable) + { + phyId = 0xff; + } + + if ( resetChip() == false ) + { + ret = false; + goto resetAndEnable_exit; + } + + // Initialize the link status. + + setLinkStatus( 0, 0 ); + + // Flush all mbufs from RX and TX rings. + + flushRings(); + + while (enable) + { + if (!initRxRing() || !initTxRing()) + { + ret = false; + break; + } + + if ( phyId != 0xff ) + { + miiInitializePHY(phyId); + } + + if (initChip() == false) + { + ret = false; + break; + } + +// startChip(); + + timerSource->setTimeoutMS(WATCHDOG_TIMER_MS); + + if (getWorkLoop()) + { + getWorkLoop()->enableAllInterrupts(); + } + enableAdapterInterrupts(); + + ready = true; + + monitorLinkStatus( true ); + + break; + } + +resetAndEnable_exit: ; + + releaseDebuggerLock(); + + return ret; +} + +/*------------------------------------------------------------------------- + * Called by IOEthernetInterface client to enable the controller. + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::enable(IONetworkInterface * netif) +{ + /* + * If an interface client has previously enabled us, + * and we know there can only be one interface client + * for this driver, then simply return true. + */ + if ( netifEnabled ) + { + IOLog("EtherNet(UniN): already enabled\n"); + return kIOReturnSuccess; + } + + if ( (ready == false) && !resetAndEnable(true) ) + return kIOReturnIOError; + + /* + * Mark the controller as enabled by the interface. + */ + netifEnabled = true; + + /* + * Start our IOOutputQueue object. + */ + transmitQueue->setCapacity( TRANSMIT_QUEUE_SIZE ); + transmitQueue->start(); + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * Called by IOEthernetInterface client to disable the controller. + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::disable(IONetworkInterface * /*netif*/) +{ + /* + * Disable our IOOutputQueue object. This will prevent the + * outputPacket() method from being called. + */ + transmitQueue->stop(); + + /* + * Flush all packets currently in the output queue. + */ + transmitQueue->setCapacity(0); + transmitQueue->flush(); + + /* + * If we have no active clients, then disable the controller. + */ + if ( debugEnabled == false ) + { + resetAndEnable(false); + } + + netifEnabled = false; + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * This method is called by our debugger client to bring up the controller + * just before the controller is registered as the debugger device. The + * debugger client is attached in response to the attachDebuggerClient() + * call. + * + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::enable(IOKernelDebugger * /*debugger*/) +{ + /* + * Enable hardware and make it ready to support the debugger client. + */ + if ( (ready == false) && !resetAndEnable(true) ) + { + return kIOReturnIOError; + } + + /* + * Mark the controller as enabled by the debugger. + */ + debugEnabled = true; + + /* + * Returning true will allow the kdp registration to continue. + * If we return false, then we will not be registered as the + * debugger device, and the attachDebuggerClient() call will + * return NULL. + */ + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * This method is called by our debugger client to stop the controller. + * The debugger will call this method when we issue a detachDebuggerClient(). + * + * This method is always called while running on the default workloop + * thread. + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::disable(IOKernelDebugger * /*debugger*/) +{ + debugEnabled = false; + + /* + * If we have no active clients, then disable the controller. + */ + if ( netifEnabled == false ) + { + resetAndEnable(false); + } + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::timeoutOccurred(IOTimerEventSource * /*timer*/) +{ + IODebuggerLockState lockState; + bool doService = false; + UInt32 txRingIndex; + UInt32 x; + + + if ( ready == false ) + { + // IOLog("EtherNet(UniN): Spurious timeout event!!\n"); + return; + } + + + /* Update statistics from the GMAC statistics registers: */ + + x = READ_REGISTER( LengthErrorCounter ); + writeRegister( &fpRegs->LengthErrorCounter, 0 ); + fpEtherStats->dot3StatsEntry.frameTooLongs += x; + + x = READ_REGISTER( AlignmentErrorCounter ); + writeRegister( &fpRegs->AlignmentErrorCounter, 0 ); + fpEtherStats->dot3StatsEntry.alignmentErrors += x; + + x = READ_REGISTER( FCSErrorCounter ); + writeRegister( &fpRegs->FCSErrorCounter, 0 ); + fpEtherStats->dot3StatsEntry.fcsErrors += x; + + x = READ_REGISTER( RxCodeViolationErrorCounter ); + writeRegister( &fpRegs->RxCodeViolationErrorCounter, 0 ); + fpEtherStats->dot3StatsEntry.internalMacTransmitErrors += x; + + x = READ_REGISTER( FirstAttemptSuccessfulCollisionCounter ); + writeRegister( &fpRegs->FirstAttemptSuccessfulCollisionCounter, 0 ); + fpEtherStats->dot3StatsEntry.singleCollisionFrames += x; + + x = READ_REGISTER( ExcessiveCollisionCounter ); + writeRegister( &fpRegs->ExcessiveCollisionCounter, 0 ); + fpEtherStats->dot3StatsEntry.excessiveCollisions += x; + + x = READ_REGISTER( LateCollisionCounter ); + writeRegister( &fpRegs->LateCollisionCounter, 0 ); + fpEtherStats->dot3StatsEntry.lateCollisions += x; + + lockState = IODebuggerLock( this ); + + monitorLinkStatus(); + + /* + * If there are pending entries on the Tx ring + */ + if ( txCommandHead != txCommandTail ) + { + /* + * If the hardware tx pointer did not move since the last + * check, increment the txWDCount. + */ + txRingIndex = READ_REGISTER( TxCompletion ); + if ( txRingIndex == txRingIndexLast ) + { + txWDCount++; + } + else + { + txWDCount = 0; + txRingIndexLast = txRingIndex; + } + + if ( txWDCount > 2 ) + { + /* + * We only take interrupts every 64 tx completions, so we may be here just + * to do normal clean-up of tx packets. We check if the hardware tx pointer + * points to the next available tx slot. This indicates that we transmitted all + * packets that were scheduled vs rather than the hardware tx being stalled. + */ + if ( txRingIndex != txCommandTail ) + { + UInt32 interruptStatus, compReg, kickReg; + + interruptStatus = READ_REGISTER( Status ); + compReg = READ_REGISTER( TxCompletion ); + kickReg = READ_REGISTER( TxKick ); + + IOLog( "Tx Int Timeout - Comp = %04x Kick = %04x Int = %08x\n\r", (int)compReg, (int)kickReg, (int)interruptStatus ); + } + +// dumpRegisters(); + + transmitInterruptOccurred(); + + doService = true; + + txRingIndexLast = txRingIndex; + txWDCount = 0; + } + } + else + { + txWDCount = 0; + } + + // Monitor receiver's health. + + if ( rxWDInterrupts == 0 ) + { + UInt32 rxMACStatus; + + switch ( rxWDCount ) + { + case 0: + case 1: + rxWDCount++; // Extend timeout + break; + + default: + // We could be less conservative here and restart the + // receiver unconditionally. + + rxMACStatus = READ_REGISTER( RxMACStatus ); + + if ( rxMACStatus & kRX_MAC_Status_Rx_Overflow ) + { + // Bad news, the receiver may be deaf as a result of this + // condition, and if so, a RX MAC reset is needed. Note + // that reading this register will clear all bits. + + restartReceiver(); + + NETWORK_STAT_ADD( inputErrors ); + ETHERNET_STAT_ADD( dot3RxExtraEntry.watchdogTimeouts ); + } + rxWDCount = 0; + break; + } + } + else + { + // Reset watchdog + + rxWDCount = 0; + rxWDInterrupts = 0; + } + + /* Clean-up after the debugger if the debugger was active: */ + + if ( debugTxPoll ) + { + debugQueue->flush(); + debugTxPoll = false; + doService = true; + } + IODebuggerUnlock( lockState ); + + /* + * Make sure the queue is not stalled. + */ + if (doService && netifEnabled) + { + transmitQueue->service(); + } + + /* + * Restart the watchdog timer + */ + timerSource->setTimeoutMS(WATCHDOG_TIMER_MS); + return; +}/* end timeoutOccurred */ + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +const OSString * UniNEnet::newVendorString() const +{ + return OSString::withCString("Apple"); +} + +const OSString * UniNEnet::newModelString() const +{ + return OSString::withCString("gmac+"); +} + +const OSString * UniNEnet::newRevisionString() const +{ + return OSString::withCString(""); +} + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::setPromiscuousMode(IOEnetPromiscuousMode mode) +{ + reserveDebuggerLock(); + + rxMacConfigReg = READ_REGISTER( RxMACConfiguration ); + if (mode == kIOEnetPromiscuousModeOff) + { + rxMacConfigReg &= ~(kRxMACConfiguration_Promiscuous); + isPromiscuous = false; + + } + else + { + rxMacConfigReg |= kRxMACConfiguration_Promiscuous; + isPromiscuous = true; + + } + WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg ); + + releaseDebuggerLock(); + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::setMulticastMode(IOEnetMulticastMode mode) +{ + multicastEnabled = (mode == kIOEnetMulticastModeOff) ? false : true; + + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::setMulticastList(IOEthernetAddress *addrs, UInt32 count) +{ + reserveDebuggerLock(); + + resetHashTableMask(); + for (UInt32 i = 0; i < count; i++) + { + addToHashTableMask(addrs->bytes); + addrs++; + } + updateHashTableMask(); + + releaseDebuggerLock(); + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOOutputQueue* UniNEnet::createOutputQueue() +{ + return IOBasicOutputQueue::withTarget( this, TRANSMIT_QUEUE_SIZE ); +}/* end createOutputQueue */ + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +static struct MediumTable +{ + UInt32 type; + UInt32 speed; +} +mediumTable[] = +{ + { kIOMediumEthernetNone , 0 }, + { kIOMediumEthernetAuto , 0 }, + { kIOMediumEthernet10BaseT | kIOMediumOptionHalfDuplex, 10 }, + { kIOMediumEthernet10BaseT | kIOMediumOptionFullDuplex, 10 }, + { kIOMediumEthernet100BaseTX | kIOMediumOptionHalfDuplex, 100 }, + { kIOMediumEthernet100BaseTX | kIOMediumOptionFullDuplex, 100 }, + { kIOMediumEthernet1000BaseSX | kIOMediumOptionFullDuplex, 1000 }, + { kIOMediumEthernet1000BaseTX | kIOMediumOptionFullDuplex, 1000 } +}; + + +bool UniNEnet::createMediumTables() +{ + IONetworkMedium *medium; + UInt32 i; + + mediumDict = OSDictionary::withCapacity( sizeof(mediumTable)/sizeof(mediumTable[0]) ); + if ( mediumDict == 0 ) return false; + + for ( i=0; i < sizeof(mediumTable)/sizeof(mediumTable[0]); i++ ) + { + medium = IONetworkMedium::medium( mediumTable[i].type, mediumTable[i].speed ); + if ( medium != 0 ) + { + IONetworkMedium::addMedium( mediumDict, medium ); + medium->release(); + } + } + + if ( publishMediumDictionary( mediumDict ) != true ) + { + return false; + } + + medium = IONetworkMedium::getMediumWithType( mediumDict, + kIOMediumEthernetAuto ); + + setCurrentMedium( medium ); + + return true; +} + + +void UniNEnet::writeRegister( UInt32 *pReg, UInt32 data ) +{ +/// ELG( data, (UInt32)pReg - (UInt32)fpRegs, 'wReg', "writeRegister" ); + + OSWriteLittleInt32( pReg, 0, data ); + return; +}/* end writeRegister */ diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnet.h b/iokit/Drivers/network/drvPPCUniN/UniNEnet.h new file mode 100644 index 000000000..235ff2b14 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnet.h @@ -0,0 +1,314 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Software, Inc. + * + * Interface definition for the UniN Ethernet Controller + * + * HISTORY + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* bcopy */ + +extern "C" +{ + #include + #include +} + +//#define IOLog kprintf + +// No kernel tracing support at this time. +// +#define KERNEL_DEBUG(x,a,b,c,d,e) + +#include "UniNEnetRegisters.h" + + + +#define NETWORK_STAT_ADD( x ) (fpNetStats->x++) +#define ETHERNET_STAT_ADD( x ) (fpEtherStats->x++) + +#define READ_REGISTER( REG ) OSReadLittleInt32( (void*)&fpRegs->REG, 0 ) +#define DBG_WRITE 0 +#if DBG_WRITE +#define WRITE_REGISTER( REG, VAL ) writeRegister( &fpRegs->REG, VAL ) +#else +#define WRITE_REGISTER( REG, VAL ) OSWriteLittleInt32( (void*)&fpRegs->REG, 0, VAL ) +#endif // DBG_WRITE + + +typedef void * IOPPCAddress; + +#define NUM_RX_DESC 1 +typedef struct enet_dma_cmd_t +{ + GEMRxDescriptor desc_seg[NUM_RX_DESC]; +} enet_dma_cmd_t; + +#define NUM_TX_DESC 1 +typedef struct enet_txdma_cmd_t +{ + GEMTxDescriptor desc_seg[NUM_TX_DESC]; +} enet_txdma_cmd_t; + + +typedef struct TxQueueElement +{ + queue_chain_t next; + queue_head_t * list; + + struct mbuf * mbuf; + UInt32 slot; + UInt32 count; +} TxQueueElement; + +enum LinkStatus +{ + kLinkStatusUnknown, + kLinkStatusDown, + kLinkStatusUp, +}; + +#define kMaxUniNEnetPowerState 1 +#define kUniNsettle_time 500 //guess 500 microseconds for settling + + +class UniNEnet: public IOEthernetController +{ + OSDeclareDefaultStructors( UniNEnet ) + +private: + volatile GMAC_Registers *fpRegs; + IOPCIDevice * nub; + IOMemoryMap * ioMapEnet; + volatile IOPPCAddress ioBaseEnet; + + IOEthernetInterface * networkInterface; + IOBasicOutputQueue * transmitQueue; + IOPacketQueue * debugQueue; + IOKernelDebugger * debugger; + + IOWorkLoop * workLoop; + IOInterruptEventSource * interruptSource; + IONetworkStats * fpNetStats; + IOEthernetStats * fpEtherStats; + IOTimerEventSource * timerSource; + IOMbufBigMemoryCursor * mbufCursor; + IOSimpleLock * txQueueLock; + + bool ready; + bool netifEnabled; + bool debugEnabled; + bool debugTxPoll; + + IOEthernetAddress myAddress; + bool isPromiscuous; + bool multicastEnabled; + bool isFullDuplex; + + UInt32 phyType; + UInt8 phyId; + + UInt16 phyStatusPrev; + UInt32 linkStatusPrev; + + UInt16 phyBCMType; // 5400 or 5201 for PM + + OSDictionary * mediumDict; + + queue_head_t txActiveQueue; + queue_head_t txFreeQueue; + + TxQueueElement * txElementPtrs[TX_RING_LENGTH]; + struct mbuf * rxMbuf[RX_RING_LENGTH]; + struct mbuf * txDebuggerPkt; + + void * debuggerPkt; + u_int32_t debuggerPktSize; + + UInt32 txCommandHead; // TX ring descriptor index + UInt32 txCommandTail; + UInt32 rxCommandHead; // RX ring descriptor index + UInt32 rxCommandTail; + + UInt32 dmaCommandsSize; + UInt8 * dmaCommands; + enet_txdma_cmd_t * txDMACommands; // TX descriptor ring ptr + UInt32 txDMACommandsPhys; + UInt32 txCommandsAvail; + + enet_dma_cmd_t * rxDMACommands; // RX descriptor ring ptr + UInt32 rxDMACommandsPhys; + + UInt32 txIntCnt; + UInt32 txRingIndexLast; + UInt32 txWDInterrupts; + UInt32 txWDCount; + + UInt32 rxWDInterrupts; + UInt32 rxWDCount; + UInt32 rxMacConfigReg; + + UInt16 hashTableUseCount[256]; + UInt16 hashTableMask[16]; + + unsigned long currentPowerState; /* must be 0 or 1 */ + + bool allocateMemory(); + bool initTxRing(); + bool initRxRing(); + void flushRings(); + bool initChip(); + bool resetChip(); + void disableAdapterInterrupts(); + void enableAdapterInterrupts(); + void setDuplexMode(bool duplexMode); + void startChip(); + void stopChip(); + bool updateDescriptorFromMbuf(struct mbuf * m, + enet_dma_cmd_t * desc, + bool isReceive); + void monitorLinkStatus( bool firstPoll = false ); + void restartTransmitter(); + void stopTransmitDMA(); + bool transmitPacket(struct mbuf * packet); + bool transmitInterruptOccurred(); + bool debugTransmitInterruptOccurred(); + void debugTransmitCleanup(); + bool receiveInterruptOccurred(); + bool receivePackets(bool fDebugger); + void packetToDebugger(struct mbuf * packet, u_int size); + void restartReceiver(); + void stopReceiveDMA(); + bool resetAndEnable(bool enable); + void sendDummyPacket(); + void resetHashTableMask(); + void addToHashTableMask(u_int8_t *addr); + void removeFromHashTableMask(u_int8_t *addr); + void updateHashTableMask(); + + + TxQueueElement * getTxElement(); + void releaseTxElement(TxQueueElement * txElement); + +#ifdef DEBUG + void dumpRegisters(); +#endif DEBUG + + void sendPacket(void * pkt, UInt32 pkt_len); + void receivePacket(void * pkt, UInt32 * pkt_len, + UInt32 timeout); + + bool miiReadWord(unsigned short * dataPtr, + unsigned short reg, UInt8 phy); + bool miiWriteWord(unsigned short data, + unsigned short reg, UInt8 phy); + void miiWrite(UInt32 miiData, UInt32 dataSize); + bool miiResetPHY(UInt8 phy); + bool miiWaitForLink(UInt8 phy); + bool miiWaitForAutoNegotiation(UInt8 phy); + void miiRestartAutoNegotiation(UInt8 phy); + bool miiFindPHY(UInt8 * phy_num); + bool miiInitializePHY(UInt8 phy); + + UInt32 outputPacket(struct mbuf * m, void * param); + + void interruptOccurred(IOInterruptEventSource * src, + int count); + void timeoutOccurred(IOTimerEventSource * timer); + bool createMediumTables(); + + void writeRegister( UInt32 *pReg, UInt32 data ); + + void stopPHYChip(bool setupWOL); + void startPHYChip(); + bool resetPHYChip(); + + // callPlatformFunction symbols + const OSSymbol *keyLargo_resetUniNEthernetPhy; + + IOService *keyLargo; + +public: + virtual bool init(OSDictionary * properties = 0); + virtual bool start(IOService * provider); + virtual void free(); + + virtual bool createWorkLoop(); + virtual IOWorkLoop * getWorkLoop() const; + + virtual IOReturn enable(IONetworkInterface * netif); + virtual IOReturn disable(IONetworkInterface * netif); + + virtual IOReturn getHardwareAddress(IOEthernetAddress *addr); + + virtual IOReturn setMulticastMode(IOEnetMulticastMode mode); + virtual IOReturn setMulticastList(IOEthernetAddress *addrs, UInt32 count); + + virtual IOReturn setPromiscuousMode(IOEnetPromiscuousMode mode); + + virtual IOOutputQueue * createOutputQueue(); + + virtual const OSString * newVendorString() const; + virtual const OSString * newModelString() const; + virtual const OSString * newRevisionString() const; + + virtual IOReturn enable(IOKernelDebugger * debugger); + virtual IOReturn disable(IOKernelDebugger * debugger); + + virtual bool configureInterface(IONetworkInterface * netif); + + // Power management methods: + virtual IOReturn registerWithPolicyMaker(IOService * policyMaker); + virtual UInt32 maxCapabilityForDomainState(IOPMPowerFlags state); + virtual UInt32 initialPowerStateForDomainState(IOPMPowerFlags state); + virtual UInt32 powerStateForDomainState(IOPMPowerFlags state); + virtual IOReturn setPowerState(UInt32 powerStateOrdinal, + IOService * whatDevice); +}; + + +/* + * Performance tracepoints + * + * DBG_UniN_RXIRQ - Receive ISR run time + * DBG_UniN_TXIRQ - Transmit ISR run time + * DBG_UniN_TXQUEUE - Transmit packet passed from network stack + * DBG_UniN_TXCOMPLETE - Transmit packet sent + * DBG_UniN_RXCOMPLETE - Receive packet passed to network stack + */ +#define DBG_UniN_ENET 0x0900 +#define DBG_UniN_RXIRQ DRVDBG_CODE(DBG_DRVNETWORK,(DBG_UniN_ENET+1)) +#define DBG_UniN_TXIRQ DRVDBG_CODE(DBG_DRVNETWORK,(DBG_UniN_ENET+2)) +#define DBG_UniN_TXQUEUE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_UniN_ENET+3)) +#define DBG_UniN_TXCOMPLETE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_UniN_ENET+4)) +#define DBG_UniN_RXCOMPLETE DRVDBG_CODE(DBG_DRVNETWORK,(DBG_UniN_ENET+5)) diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnetHW.cpp b/iokit/Drivers/network/drvPPCUniN/UniNEnetHW.cpp new file mode 100644 index 000000000..c6c2b8db2 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnetHW.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Software, Inc. + * + * Miscellaneous definitions for the Sun GEM Ethernet controller. + * + * HISTORY + * + */ + +#include "UniNEnetPrivate.h" + + +void xWriteUniNRegister( IOPPCAddress ioBaseEnet, u_int32_t reg_offset, u_int32_t data ) +{ + switch ( reg_offset >> 16 ) + { + case 1: + ((u_int8_t *) ioBaseEnet)[reg_offset & 0xffff] = data; + break; + case 2: + ((u_int16_t *)ioBaseEnet)[(reg_offset & 0xFFFF) >> 1] = OSSwapInt16( data ); + break; + case 4: + ((u_int32_t *)ioBaseEnet)[(reg_offset & 0xFFFF) >> 2] = OSSwapInt32( data ); + break; + } + eieio(); +} + + +volatile u_int32_t xReadUniNRegister( IOPPCAddress ioBaseEnet, u_int32_t reg_offset ) +{ + switch ( reg_offset >> 16 ) + { + case 1: + return ((u_int8_t *) ioBaseEnet)[reg_offset & 0xffff]; + + case 2: + return OSSwapInt16( ((u_int16_t *)ioBaseEnet)[(reg_offset & 0xFFFF) >> 1] ); + + case 4: + return OSSwapInt32( ((u_int32_t *)ioBaseEnet)[(reg_offset & 0xFFFF) >> 2] ); + } + + return 0; +} + + +/* + * Procedure for reading EEPROM + */ diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnetMII.cpp b/iokit/Drivers/network/drvPPCUniN/UniNEnetMII.cpp new file mode 100644 index 000000000..af9004a62 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnetMII.cpp @@ -0,0 +1,259 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * MII/PHY (National Semiconductor DP83840/DP83840A) support methods. + * It is general enough to work with most MII/PHYs. + * + * HISTORY + * + */ +#include "UniNEnetPrivate.h" + + +/* + * Read from MII/PHY registers. + */ +bool UniNEnet::miiReadWord( UInt16 *dataPtr, UInt16 reg, UInt8 phy ) +{ + UInt32 i; + UInt32 miiReg; + + WRITE_REGISTER( MIFBitBangFrame_Output, + kMIFBitBangFrame_Output_ST_default + | kMIFBitBangFrame_Output_OP_read + | phy << kMIFBitBangFrame_Output_PHYAD_shift + | reg << kMIFBitBangFrame_Output_REGAD_shift + | kMIFBitBangFrame_Output_TA_MSB ); + + for (i=0; i < 20; i++ ) + { + miiReg = READ_REGISTER( MIFBitBangFrame_Output ); + + if ( miiReg & kMIFBitBangFrame_Output_TA_LSB ) + { +// IOLog("Phy = %d Reg = %d miiReg = %08x\n\r", phy, reg, miiReg ); + *dataPtr = (UInt16) miiReg; + return true; + } + IODelay(10); + } + + return false; +} + +/* + * Write to MII/PHY registers. + */ +bool UniNEnet::miiWriteWord( UInt16 data, UInt16 reg, UInt8 phy ) +{ + UInt32 i; + UInt32 miiReg; + + + WRITE_REGISTER( MIFBitBangFrame_Output, + kMIFBitBangFrame_Output_ST_default + | kMIFBitBangFrame_Output_OP_write + | phy << kMIFBitBangFrame_Output_PHYAD_shift + | reg << kMIFBitBangFrame_Output_REGAD_shift + | kMIFBitBangFrame_Output_TA_MSB + | data ); + + for ( i=0; i < 20; i++ ) + { + miiReg = READ_REGISTER( MIFBitBangFrame_Output ); + + if ( miiReg & kMIFBitBangFrame_Output_TA_LSB ) + { + return true; + } + IODelay(10); + } + + return false; +} + + +bool UniNEnet::miiResetPHY( UInt8 phy ) +{ + int i = MII_RESET_TIMEOUT; + UInt16 mii_control; + + // Set the reset bit + // + miiWriteWord( MII_CONTROL_RESET, MII_CONTROL, phy ); + + IOSleep(MII_RESET_DELAY); + + // Wait till reset process is complete (MII_CONTROL_RESET returns to zero) + // + while ( i > 0 ) + { + if ( miiReadWord( &mii_control, MII_CONTROL, phy) == false ) + return false; + + if (!(mii_control & MII_CONTROL_RESET)) + { + miiReadWord( &mii_control, MII_CONTROL, phy); + mii_control &= ~MII_CONTROL_ISOLATE; + miiWriteWord( mii_control, MII_CONTROL, phy ); + return true; + } + + IOSleep(MII_RESET_DELAY); + i -= MII_RESET_DELAY; + } + return false; +} + +bool UniNEnet::miiWaitForLink( UInt8 phy ) +{ + int i = MII_LINK_TIMEOUT; + unsigned short mii_status; + + while (i > 0) + { + if ( miiReadWord( &mii_status, MII_STATUS, phy ) == false) + return false; + + if (mii_status & MII_STATUS_LINK_STATUS) + return true; + + IOSleep(MII_LINK_DELAY); + i -= MII_LINK_DELAY; + } + return false; +} + +bool UniNEnet::miiWaitForAutoNegotiation( UInt8 phy ) +{ + int i = MII_LINK_TIMEOUT; + unsigned short mii_status; + + while (i > 0) + { + if ( miiReadWord( &mii_status, MII_STATUS, phy ) == false) + return false; + + if (mii_status & MII_STATUS_NEGOTIATION_COMPLETE) + return true; + + IOSleep(MII_LINK_DELAY); + i -= MII_LINK_DELAY; + } + return false; +} + +void UniNEnet::miiRestartAutoNegotiation( UInt8 phy ) +{ + unsigned short mii_control; + + miiReadWord( &mii_control, MII_CONTROL, phy); + mii_control |= MII_CONTROL_RESTART_NEGOTIATION; + miiWriteWord( mii_control, MII_CONTROL, phy); + + /* + * If the system is not connected to the network, then auto-negotiation + * never completes and we hang in this loop! + */ +#if 0 + while (1) + { + miiReadWord( &mii_control, MII_CONTROL, phy ); + if ((mii_control & MII_CONTROL_RESTART_NEGOTIATION) == 0) + break; + } +#endif +} + +/* + * Find the first PHY device on the MII interface. + * + * Return + * true PHY found + * false PHY not found + */ +bool UniNEnet::miiFindPHY( UInt8 *phy ) +{ + int i; + UInt16 phyWord[2]; + + *phy = 0xff; + + // The first two PHY registers are required. + // + for (i = 0; i < MII_MAX_PHY; i++) + { + if ( miiReadWord( &phyWord[0], MII_STATUS, i ) == false ) + { + continue; + } + if ( miiReadWord( &phyWord[1], MII_CONTROL, i ) == false ) + { + continue; + } + if ( phyWord[0] == 0xffff && phyWord[1] == 0xffff ) + { + continue; + } + + if ( *phy == 0xff ) *phy = i; + } + + return (*phy == 0xff) ? false : true; +} + +/* + * + * + */ +bool UniNEnet::miiInitializePHY( UInt8 phy ) +{ + UInt16 phyWord; + + // Clear enable auto-negotiation bit + // + miiReadWord( &phyWord, MII_CONTROL, phy ); + phyWord &= ~MII_CONTROL_AUTONEGOTIATION; + miiWriteWord( phyWord, MII_CONTROL, phy ); + + // Advertise 10/100 Half/Full duplex capable to link partner + // + miiReadWord( &phyWord, MII_ADVERTISEMENT, phy ); + phyWord |= (MII_ANAR_100BASETX_FD | MII_ANAR_100BASETX | + MII_ANAR_10BASET_FD | MII_ANAR_10BASET ); + miiWriteWord( phyWord, MII_ADVERTISEMENT, phy ); + + // Set enable auto-negotiation bit + // + miiReadWord( &phyWord, MII_CONTROL, phy ); + phyWord |= MII_CONTROL_AUTONEGOTIATION; + miiWriteWord( phyWord, MII_CONTROL, phy ); + + miiRestartAutoNegotiation( phy ); + + return true; +} + + diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnetMII.h b/iokit/Drivers/network/drvPPCUniN/UniNEnetMII.h new file mode 100644 index 000000000..9588e2b85 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnetMII.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1998-1999 by Apple Computer, Inc., All rights reserved. + * + * MII protocol and PHY register definitions. + * + * HISTORY + * + */ + +/* + * MII command frame (32-bits) as documented in IEEE 802.3u + */ +#define MII_OP_READ 0x02 +#define MII_OP_WRITE 0x01 + +#define MII_MAX_PHY 32 + +/* MII Registers */ +#define MII_CONTROL 0 +#define MII_STATUS 1 +#define MII_ID0 2 +#define MII_ID1 3 +#define MII_ADVERTISEMENT 4 +#define MII_LINKPARTNER 5 +#define MII_EXPANSION 6 +#define MII_NEXTPAGE 7 + +/* MII Control register bits */ +#define MII_CONTROL_RESET 0x8000 +#define MII_CONTROL_LOOPBACK 0x4000 +#define MII_CONTROL_SPEED_SELECTION 0x2000 +#define MII_CONTROL_AUTONEGOTIATION 0x1000 +#define MII_CONTROL_POWERDOWN 0x800 +#define MII_CONTROL_ISOLATE 0x400 +#define MII_CONTROL_RESTART_NEGOTIATION 0x200 +#define MII_CONTROL_FULLDUPLEX 0x100 +#define MII_CONTROL_COLLISION_TEST 0x80 + +/* MII Status register bits */ +#define MII_STATUS_100BASET4 0x8000 +#define MII_STATUS_100BASETX_FD 0x4000 +#define MII_STATUS_100BASETX 0x2000 +#define MII_STATUS_10BASET_FD 0x1000 +#define MII_STATUS_10BASET 0x800 +#define MII_STATUS_NEGOTIATION_COMPLETE 0x20 +#define MII_STATUS_REMOTE_FAULT 0x10 +#define MII_STATUS_NEGOTIATION_ABILITY 0x8 +#define MII_STATUS_LINK_STATUS 0x4 +#define MII_STATUS_JABBER_DETECT 0x2 +#define MII_STATUS_EXTENDED_CAPABILITY 0x1 + +/* MII ANAR register bits */ +#define MII_ANAR_ASYM_PAUSE 0x800 +#define MII_ANAR_PAUSE 0x400 +#define MII_ANAR_100BASET4 0x200 +#define MII_ANAR_100BASETX_FD 0x100 +#define MII_ANAR_100BASETX 0x80 +#define MII_ANAR_10BASET_FD 0x40 +#define MII_ANAR_10BASET 0x20 + +/* MII ANLPAR register bits */ +#define MII_LPAR_NEXT_PAGE 0x8000 +#define MII_LPAR_ACKNOWLEDGE 0x4000 +#define MII_LPAR_REMOTE_FAULT 0x2000 +#define MII_LPAR_ASYM_PAUSE 0x0800 +#define MII_LPAR_PAUSE 0x0400 +#define MII_LPAR_100BASET4 0x200 +#define MII_LPAR_100BASETX_FD 0x100 +#define MII_LPAR_100BASETX 0x80 +#define MII_LPAR_10BASET_FD 0x40 +#define MII_LPAR_10BASET 0x20 + + +/* MII BCM5201 Specific */ + +/* MII BCM5201 ID */ +#define MII_BCM5201_OUI 0x001018 +#define MII_BCM5201_MODEL 0x21 +#define MII_BCM5201_REV 0x01 +#define MII_BCM5201_ID ((MII_BCM5201_OUI << 10) | (MII_BCM5201_MODEL << 4)) +#define MII_BCM5201_MASK 0xfffffff0 + +#define MII_BCM5201_DELAY 1 + +/* MII BCM5201 Regs */ +#define MII_BCM5201_AUXSTATUS 0x18 + +/* MII BCM5201 AUXSTATUS register bits */ +#define MII_BCM5201_AUXSTATUS_DUPLEX 0x0001 +#define MII_BCM5201_AUXSTATUS_SPEED 0x0002 + +/* MII BCM5201 MULTIPHY interrupt register. + * Added 4/20/2000 by A.W. for power management */ +#define MII_BCM5201_INTERRUPT 0x1A +#define MII_BCM5201_INTERRUPT_INTENABLE 0x4000 + +#define MII_BCM5201_AUXMODE2 0x1B +#define MII_BCM5201_AUXMODE2_LOWPOWER 0x0008 + +#define MII_BCM5201_MULTIPHY 0x1E + +/* MII BCM5201 MULTIPHY register bits */ +#define MII_BCM5201_MULTIPHY_SERIALMODE 0x0002 +#define MII_BCM5201_MULTIPHY_SUPERISOLATE 0x0008 + + +/* MII LXT971 (Level One) Specific */ + +/* MII LXT971 ID */ +#define MII_LXT971_OUI 0x0004de +#define MII_LXT971_MODEL 0x0e +#define MII_LXT971_REV 0x01 +#define MII_LXT971_ID ((MII_LXT971_OUI << 10) | (MII_LXT971_MODEL << 4)) +#define MII_LXT971_MASK 0xfffffff0 + +#define MII_LXT971_DELAY 1 + +/* MII LXT971 Regs */ +#define MII_LXT971_STATUS_2 0x11 + +/* MII LXT971 Status #2 register bits */ +#define MII_LXT971_STATUS_2_DUPLEX 0x0200 +#define MII_LXT971_STATUS_2_SPEED 0x4000 + +/* MII BCM5400 Specific */ + +/* MII BCM5400 ID */ +#define MII_BCM5400_OUI 0x000818 +#define MII_BCM5400_MODEL 0x04 +#define MII_BCM5401_MODEL 0x05 +#define MII_BCM5400_REV 0x01 +#define MII_BCM5400_ID ((MII_BCM5400_OUI << 10) | (MII_BCM5400_MODEL << 4)) +#define MII_BCM5401_ID ((MII_BCM5400_OUI << 10) | (MII_BCM5401_MODEL << 4)) +#define MII_BCM5400_MASK 0xfffffff0 + +#define MII_BCM5400_DELAY 1 + +/* MII BCM5400 Regs */ + +#define MII_BCM5400_1000BASETCONTROL 0x09 +/* MII BCM5400 1000-BASET Control register bits */ + +#define MII_BCM5400_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 + +#define MII_BCM5400_AUXCONTROL 0x18 + +/* MII BCM5400 AUXCONTROL register bits */ +#define MII_BCM5400_AUXCONTROL_PWR10BASET 0x0004 + +#define MII_BCM5400_AUXSTATUS 0x19 + +/* MII BCM5400 AUXSTATUS register bits */ +#define MII_BCM5400_AUXSTATUS_LINKMODE_MASK 0x0700 +#define MII_BCM5400_AUXSTATUS_LINKMODE_BIT 0x0100 + + +/* MII ST10040 Specific */ + +/* MII ST10040 ID */ +#define MII_ST10040_OUI 0x1e0400 +#define MII_ST10040_MODEL 0x00 +#define MII_ST10040_REV 0x01 +#define MII_ST10040_ID ((MII_ST10040_OUI << 10) | (MII_ST10040_MODEL << 4)) +#define MII_ST10040_MASK 0xfffffff0 + +#define MII_ST10040_DELAY 1 + +/* MII ST10040 Regs */ +#define MII_ST10040_CHIPST 0x14 + +/* MII ST10040 CHIPST register bits */ +#define MII_ST10040_CHIPST_LINK 0x2000 +#define MII_ST10040_CHIPST_DUPLEX 0x1000 +#define MII_ST10040_CHIPST_SPEED 0x0800 +#define MII_ST10040_CHIPST_NEGOTIATION 0x0020 + + +/* MII DP83843 Specific */ + +/* MII DP83843 ID */ +#define MII_DP83843_OUI 0x080017 +#define MII_DP83843_MODEL 0x01 +#define MII_DP83843_REV 0x00 +#define MII_DP83843_ID ((MII_DP83843_OUI << 10) | (MII_DP83843_MODEL << 4)) +#define MII_DP83843_MASK 0xfffffff0 + +#define MII_DP83843_DELAY 20 + +/* MII DP83843 PHYSTS register bits */ +#define MII_DP83843_PHYSTS 0x10 +#define MII_DP83843_PHYSTS_LINK 0x0001 +#define MII_DP83843_PHYSTS_SPEED10 0x0002 +#define MII_DP83843_PHYSTS_DUPLEX 0x0004 +#define MII_DP83843_PHYSTS_NEGOTIATION 0x0020 + + +/* MII timeout */ +#define MII_DEFAULT_DELAY 20 +#define MII_RESET_TIMEOUT 100 +#define MII_RESET_DELAY 10 + +#define MII_LINK_TIMEOUT 2500 +#define MII_LINK_DELAY 20 + +/* A few constants needed for miiWriteWord() */ +enum { + kPHYAddr0 = 0x00000000, //PHY addr is 0 + kPHYAddr1F = 0x0000001F +}; diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.cpp b/iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.cpp new file mode 100644 index 000000000..11f30da29 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.cpp @@ -0,0 +1,1701 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer + * + * Implementation for hardware dependent (relatively) code + * for the Sun GEM Ethernet controller. + * + * HISTORY + * + * 10-Sept-97 + * Created. + * + */ +#include "UniNEnetPrivate.h" + +extern void *kernel_pmap; + +/* + * Private functions + */ +bool UniNEnet::allocateMemory() +{ + UInt32 rxRingSize, txRingSize; + UInt32 i, n; + UInt8 *virtAddr; + UInt32 physBase; + UInt32 physAddr; + TxQueueElement *txElement; + + /* + * Calculate total space for DMA channel commands + */ + txRingSize = (TX_RING_LENGTH * sizeof(enet_txdma_cmd_t) + 2048 - 1) & ~(2048-1); + rxRingSize = (RX_RING_LENGTH * sizeof(enet_dma_cmd_t) + 2048 - 1) & ~(2048-1); + + dmaCommandsSize = round_page( txRingSize + rxRingSize ); + /* + * Allocate required memory + */ + if ( !dmaCommands ) + { + dmaCommands = (UInt8 *)IOMallocContiguous( dmaCommandsSize, PAGE_SIZE, 0 ); + + if ( dmaCommands == 0 ) + { + IOLog( "Ethernet(UniN): Cant allocate channel dma commands\n\r" ); + return false; + } + } + + /* + * If we needed more than one page, then make sure we received contiguous memory. + */ + n = (dmaCommandsSize - PAGE_SIZE) / PAGE_SIZE; + physBase = pmap_extract(kernel_pmap, (vm_address_t) dmaCommands); + + virtAddr = (UInt8 *) dmaCommands; + for( i=0; i < n; i++, virtAddr += PAGE_SIZE ) + { + physAddr = pmap_extract(kernel_pmap, (vm_address_t) virtAddr); + if (physAddr != (physBase + i * PAGE_SIZE) ) + { + IOLog( "Ethernet(UniN): Cant allocate contiguous memory for dma commands\n\r" ); + return false; + } + } + + /* Setup the receive ring pointer */ + rxDMACommands = (enet_dma_cmd_t*)dmaCommands; + + /* Setup the transmit ring pointer */ + txDMACommands = (enet_txdma_cmd_t*)(dmaCommands + rxRingSize); + + + queue_init( &txActiveQueue ); + queue_init( &txFreeQueue ); + + for ( i = 0; i < TX_MAX_MBUFS; i++ ) + { + txElement = (TxQueueElement *)IOMalloc( sizeof(TxQueueElement) ); + if ( txElement == 0 ) + { + return false; + } + + bzero( txElement, sizeof(TxQueueElement) ); + + releaseTxElement( txElement ); + } + + return true; +} + +/*------------------------------------------------------------------------- + * + * Setup the Transmit Ring + * ----------------------- + * Each transmit ring entry consists of two words to transmit data from buffer + * segments (possibly) spanning a page boundary. This is followed by two DMA commands + * which read transmit frame status and interrupt status from the UniN chip. The last + * DMA command in each transmit ring entry generates a host interrupt. + * The last entry in the ring is followed by a DMA branch to the first + * entry. + *-------------------------------------------------------------------------*/ + +bool UniNEnet::initTxRing() +{ + TxQueueElement * txElement; + UInt32 i; + + /* + * Clear the transmit DMA command memory + */ + bzero( (void *)txDMACommands, sizeof(enet_txdma_cmd_t) * TX_RING_LENGTH); + txCommandHead = 0; + txCommandTail = 0; + + txDMACommandsPhys = pmap_extract(kernel_pmap, (vm_address_t) txDMACommands); + + if ( txDMACommandsPhys == 0 ) + { + IOLog( "Ethernet(UniN): Bad dma command buf - %08x\n\r", + (int)txDMACommands ); + } + + for ( i=0; i < TX_RING_LENGTH; i++ ) + { + txElement = txElementPtrs[i]; + + if ( txElement && ( --txElement->count == 0 ) ) + { + freePacket( txElement->mbuf ); + releaseTxElement( txElement ); + } + + txElementPtrs[i] = 0; + } + + txCommandsAvail = TX_RING_LENGTH - 1; + + txIntCnt = 0; + txWDCount = 0; + + return true; +} + +/*------------------------------------------------------------------------- + * + * Setup the Receive ring + * ---------------------- + * Each receive ring entry consists of two DMA commands to receive data + * into a network buffer (possibly) spanning a page boundary. The second + * DMA command in each entry generates a host interrupt. + * The last entry in the ring is followed by a DMA branch to the first + * entry. + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::initRxRing() +{ + UInt32 i; + bool status; + + /* Clear the receive DMA command memory */ + bzero( (void*)rxDMACommands, sizeof( enet_dma_cmd_t ) * RX_RING_LENGTH ); + + rxDMACommandsPhys = pmap_extract(kernel_pmap, (vm_address_t) rxDMACommands); + if ( rxDMACommandsPhys == 0 ) + { + IOLog( "Ethernet(UniN): Bad dma command buf - %08x\n\r", + (int) rxDMACommands ); + return false; + } + + /* Allocate a receive buffer for each entry in the Receive ring */ + for ( i = 0; i < RX_RING_LENGTH; i++ ) + { + if (rxMbuf[i] == NULL) + { + rxMbuf[i] = allocatePacket(NETWORK_BUFSIZE); + if (rxMbuf[i] == NULL) + { + IOLog("Ethernet(UniN): NULL packet in initRxRing\n"); + return false; + } + } + + /* + * Set the DMA commands for the ring entry to transfer data to the Mbuf. + */ + status = updateDescriptorFromMbuf(rxMbuf[i], &rxDMACommands[i], true); + if (status == false) + { + IOLog("Ethernet(UniN): updateDescriptorFromMbuf error in " + "initRxRing\n"); + return false; + } + } + + /* + * Set the receive queue head to point to the first entry in the ring. + * Set the receive queue tail to point to a DMA Stop command after the + * last ring entry + */ + i-=4; + rxCommandHead = 0; + rxCommandTail = i; + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::flushRings() +{ + // Free all mbufs from the receive ring: + + for ( UInt32 i = 0; i < RX_RING_LENGTH; i++ ) + { + if (rxMbuf[i]) + { + freePacket( rxMbuf[i] ); + rxMbuf[i] = 0; + } + } + + // Free all mbufs from the transmit ring. + // The TxElement is moved back to the free list. + + for ( UInt32 i = 0; i < TX_RING_LENGTH; i++ ) + { + TxQueueElement * txElement = txElementPtrs[i]; + txElementPtrs[i] = 0; + + if ( txElement && ( --txElement->count == 0 ) ) + { + freePacket( txElement->mbuf ); + releaseTxElement( txElement ); + } + } +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::startChip() +{ + UInt32 gemReg; + +// dumpRegisters(); + + gemReg = READ_REGISTER( TxConfiguration ); + gemReg |= kTxConfiguration_Tx_DMA_Enable; + WRITE_REGISTER( TxConfiguration, gemReg ); + + IOSleep( 20 ); + + gemReg = READ_REGISTER( RxConfiguration ); +/// gemReg |= kRxConfiguration_Rx_DMA_Enable | kRxConfiguration_Batch_Disable; + gemReg |= kRxConfiguration_Rx_DMA_Enable; + WRITE_REGISTER( RxConfiguration, gemReg ); + + IOSleep( 20 ); + + gemReg = READ_REGISTER( TxMACConfiguration ); + gemReg |= kTxMACConfiguration_TxMac_Enable; + WRITE_REGISTER( TxMACConfiguration, gemReg ); + + IOSleep( 20 ); + + rxMacConfigReg = READ_REGISTER( RxMACConfiguration ); + rxMacConfigReg |= kRxMACConfiguration_Rx_Mac_Enable; + WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg ); + + return; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::stopChip() +{ + UInt32 gemReg; + + gemReg = READ_REGISTER( TxConfiguration ); + gemReg &= ~kTxConfiguration_Tx_DMA_Enable; + WRITE_REGISTER( TxConfiguration, gemReg ); + + IOSleep( 20 ); + + gemReg = READ_REGISTER( RxConfiguration ); + gemReg &= ~kRxConfiguration_Rx_DMA_Enable; + WRITE_REGISTER( RxConfiguration, gemReg ); + + IOSleep( 20 ); + + gemReg = READ_REGISTER( TxMACConfiguration ); + gemReg &= ~kTxMACConfiguration_TxMac_Enable; + WRITE_REGISTER( TxMACConfiguration, gemReg ); + + IOSleep( 20 ); + + rxMacConfigReg = READ_REGISTER( RxMACConfiguration ); + rxMacConfigReg &= ~kRxMACConfiguration_Rx_Mac_Enable; + WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg ); + + return; +} + + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::resetChip() +{ + UInt32 resetReg; + UInt16 * pPhyType; + UInt16 phyWord; + + WRITE_REGISTER( SoftwareReset, kSoftwareReset_TX | kSoftwareReset_RX ); + do + { + resetReg = READ_REGISTER( SoftwareReset ); + } + while( resetReg & (kSoftwareReset_TX | kSoftwareReset_RX) ); + + /* + * Determine if PHY chip is configured. Reset and enable it (if present). + */ + if ( phyId == 0xff ) + { + /* + * Generate a hardware PHY reset. + */ + resetPHYChip(); + + if ( miiFindPHY(&phyId) == true ) + { + miiResetPHY( phyId ); + + pPhyType = (UInt16 *)&phyType; + miiReadWord( pPhyType, MII_ID0, phyId ); + miiReadWord( pPhyType+1, MII_ID1, phyId ); + if ( ((phyType & MII_BCM5400_MASK) == MII_BCM5400_ID) + || (((phyType & MII_BCM5400_MASK) == MII_BCM5401_ID)) ) /// mlj temporary quick fix + { + phyBCMType = 5400; + + miiReadWord( &phyWord, MII_BCM5400_AUXCONTROL, phyId ); + phyWord |= MII_BCM5400_AUXCONTROL_PWR10BASET; + miiWriteWord( phyWord, MII_BCM5400_AUXCONTROL, phyId ); + + miiReadWord( &phyWord, MII_BCM5400_1000BASETCONTROL, phyId ); + phyWord |= MII_BCM5400_1000BASETCONTROL_FULLDUPLEXCAP; + miiWriteWord( phyWord, MII_BCM5400_1000BASETCONTROL, phyId ); + + IODelay(100); + + miiResetPHY( 0x1F ); + + miiReadWord( &phyWord, MII_BCM5201_MULTIPHY, 0x1F ); + phyWord |= MII_BCM5201_MULTIPHY_SERIALMODE; + miiWriteWord( phyWord, MII_BCM5201_MULTIPHY, 0x1F ); + + miiReadWord( &phyWord, MII_BCM5400_AUXCONTROL, phyId ); + phyWord &= ~MII_BCM5400_AUXCONTROL_PWR10BASET; + miiWriteWord( phyWord, MII_BCM5400_AUXCONTROL, phyId ); + + } + else if ( (phyType & MII_BCM5201_MASK) == MII_BCM5201_ID ) + { + phyBCMType = 5201; + } + else + { + phyBCMType = 0; + } + // IOLog("DEBUG:UniNEnet: phy type = %d\n", phyBCMType); + } + } + + return true; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::initChip() +{ + UInt32 i, temp; + mach_timespec_t timeStamp; + UInt32 rxFifoSize; + UInt32 rxOff; + UInt32 rxOn; + u_int16_t *p16; + + if ( phyId == 0xff ) + { + WRITE_REGISTER( DatapathMode, kDatapathMode_ExtSERDESMode ); + WRITE_REGISTER( SerialinkControl, kSerialinkControl_DisableLoopback + | kSerialinkControl_EnableSyncDet ); + WRITE_REGISTER( Advertisement, kAdvertisement_Full_Duplex + | kAdvertisement_PAUSE ); + WRITE_REGISTER( PCSMIIControl, kPCSMIIControl_Auto_Negotiation_Enable + | kPCSMIIControl_Restart_Auto_Negotiation ); + WRITE_REGISTER( PCSConfiguration, kPCSConfiguration_Enable ); + WRITE_REGISTER( XIFConfiguration, kXIFConfiguration_Tx_MII_OE + | kXIFConfiguration_GMIIMODE + | kXIFConfiguration_FDPLXLED ); + } + else + { + WRITE_REGISTER( DatapathMode, kDatapathMode_GMIIMode ); + WRITE_REGISTER( XIFConfiguration, kXIFConfiguration_Tx_MII_OE + | kXIFConfiguration_FDPLXLED ); + } + + WRITE_REGISTER( SendPauseCommand, kSendPauseCommand_default ); + WRITE_REGISTER( MACControlConfiguration,kMACControlConfiguration_Receive_Pause_Enable ); + WRITE_REGISTER( InterruptMask, kInterruptMask_None ); + WRITE_REGISTER( TxMACMask, kTxMACMask_default ); + WRITE_REGISTER( RxMACMask, kRxMACMask_default ); + WRITE_REGISTER( MACControlMask, kMACControlMask_default ); + WRITE_REGISTER( Configuration, kConfiguration_TX_DMA_Limit + | kConfiguration_RX_DMA_Limit + | kConfiguration_Infinite_Burst ); + + WRITE_REGISTER( InterPacketGap0, kInterPacketGap0_default ); + WRITE_REGISTER( InterPacketGap1, kInterPacketGap1_default ); + WRITE_REGISTER( InterPacketGap2, kInterPacketGap2_default ); + WRITE_REGISTER( SlotTime, kSlotTime_default ); + WRITE_REGISTER( MinFrameSize, kMinFrameSize_default ); + WRITE_REGISTER( MaxFrameSize, kMaxFrameSize_default ); + WRITE_REGISTER( PASize, kPASize_default ); + WRITE_REGISTER( JamSize, kJamSize_default ); + WRITE_REGISTER( AttemptLimit, kAttemptLimit_default ); + WRITE_REGISTER( MACControlType, kMACControlType_default ); + + p16 = (u_int16_t *) myAddress.bytes; + for ( i=0; i < sizeof(IOEthernetAddress) / 2; i++ ) + WRITE_REGISTER( MACAddress[ i ], p16[ 2 - i ] ); + + for ( i=0; i < 3; i ++ ) + { + WRITE_REGISTER( MACAddress[ i + 3 ], 0 ); + WRITE_REGISTER( AddressFilter[ i ], 0 ); + } + + WRITE_REGISTER( MACAddress[ 6 ], kMACAddress_default_6 ); + WRITE_REGISTER( MACAddress[ 7 ], kMACAddress_default_7 ); + WRITE_REGISTER( MACAddress[ 8 ], kMACAddress_default_8 ); + + WRITE_REGISTER( AddressFilter2_1Mask, 0 ); + WRITE_REGISTER( AddressFilter0Mask, 0 ); + + for ( i=0; i < 16; i++ ) + WRITE_REGISTER( HashTable[ i ], 0 ); + + WRITE_REGISTER( NormalCollisionCounter, 0 ); + WRITE_REGISTER( FirstAttemptSuccessfulCollisionCounter, 0 ); + WRITE_REGISTER( ExcessiveCollisionCounter, 0 ); + WRITE_REGISTER( LateCollisionCounter, 0 ); + WRITE_REGISTER( DeferTimer, 0 ); + WRITE_REGISTER( PeakAttempts, 0 ); + WRITE_REGISTER( ReceiveFrameCounter, 0 ); + WRITE_REGISTER( LengthErrorCounter, 0 ); + WRITE_REGISTER( AlignmentErrorCounter, 0 ); + WRITE_REGISTER( FCSErrorCounter, 0 ); + WRITE_REGISTER( RxCodeViolationErrorCounter, 0 ); + + IOGetTime(&timeStamp); + WRITE_REGISTER( RandomNumberSeed, timeStamp.tv_nsec & 0xFFFF ); + + WRITE_REGISTER( TxDescriptorBaseLow, txDMACommandsPhys ); + WRITE_REGISTER( TxDescriptorBaseHigh, 0 ); + + temp = kTxConfiguration_TxFIFO_Threshold + | TX_RING_LENGTH_FACTOR << kTxConfiguration_Tx_Desc_Ring_Size_Shift; + WRITE_REGISTER( TxConfiguration, temp ); + + WRITE_REGISTER( TxMACConfiguration, 0 ); + + setDuplexMode( (phyId == 0xff) ? true : false ); + + WRITE_REGISTER( RxDescriptorBaseLow, rxDMACommandsPhys ); + WRITE_REGISTER( RxDescriptorBaseHigh, 0 ); + + WRITE_REGISTER( RxKick, RX_RING_LENGTH - 4 ); + + temp = kRxConfiguration_RX_DMA_Threshold + /// | kRxConfiguration_Batch_Disable may cause 4x primary interrupts + | RX_RING_LENGTH_FACTOR << kRxConfiguration_Rx_Desc_Ring_Size_Shift; + WRITE_REGISTER( RxConfiguration, temp ); + + rxMacConfigReg = 0; + WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg ); + + rxFifoSize = READ_REGISTER( RxFIFOSize ); + + rxOff = rxFifoSize - ((kGEMMacMaxFrameSize_Aligned + 8) * 2 / kPauseThresholds_Factor); + rxOn = rxFifoSize - ((kGEMMacMaxFrameSize_Aligned + 8) * 3 / kPauseThresholds_Factor); + + WRITE_REGISTER( PauseThresholds, + (rxOff << kPauseThresholds_OFF_Threshold_Shift) + | (rxOn << kPauseThresholds_ON_Threshold_Shift) ); + + temp = READ_REGISTER( BIFConfiguration ); + if ( temp & kBIFConfiguration_M66EN ) + temp = kRxBlanking_default_66; + else temp = kRxBlanking_default_33; + WRITE_REGISTER( RxBlanking, temp ); + + return true; +}/* end initChip */ + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::disableAdapterInterrupts() +{ + + WRITE_REGISTER( InterruptMask, kInterruptMask_None ); + return; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::enableAdapterInterrupts() +{ + UInt32 gemReg; + + + gemReg = READ_REGISTER( InterruptMask ); + gemReg &= ~( kStatus_TX_INT_ME | kStatus_RX_DONE ); + WRITE_REGISTER( InterruptMask, gemReg ); + return; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::setDuplexMode( bool duplexMode ) +{ + UInt32 txMacConfig; + UInt32 xifConfig; + + + isFullDuplex = duplexMode; + txMacConfig = READ_REGISTER( TxMACConfiguration ); + + WRITE_REGISTER( TxMACConfiguration, txMacConfig & ~kTxMACConfiguration_TxMac_Enable ); + while( READ_REGISTER( TxMACConfiguration ) & kTxMACConfiguration_TxMac_Enable ) + ; + + xifConfig = READ_REGISTER( XIFConfiguration ); + + if ( isFullDuplex ) + { + txMacConfig |= (kTxMACConfiguration_Ignore_Collisions | kTxMACConfiguration_Ignore_Carrier_Sense); + xifConfig &= ~kXIFConfiguration_Disable_Echo; + } + else + { + txMacConfig &= ~(kTxMACConfiguration_Ignore_Collisions | kTxMACConfiguration_Ignore_Carrier_Sense); + xifConfig |= kXIFConfiguration_Disable_Echo; + } + + WRITE_REGISTER( TxMACConfiguration, txMacConfig ); + WRITE_REGISTER( XIFConfiguration, xifConfig ); + return; +} + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::restartTransmitter() +{ +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::restartReceiver() +{ + // Perform a software reset to the logic in the RX MAC. + // The MAC config register should be re-programmed following + // the reset. Everything else *should* be unaffected. + + WRITE_REGISTER( RxMACSoftwareResetCommand, kRxMACSoftwareResetCommand_Reset ); + + // Poll until the reset bit is cleared by the hardware. + + for ( int i = 0; i < 5000; i++ ) + { + if ( ( READ_REGISTER( RxMACSoftwareResetCommand ) + & kRxMACSoftwareResetCommand_Reset ) == 0 ) + { + break; // 'i' is always 0 or 1 + } + IODelay(1); + } + + // Update the MAC Config register. Watch out for the programming + // restrictions documented in the GEM specification!!! + // + // Disable MAC before setting any other bits in the MAC config + // register. + + WRITE_REGISTER( RxMACConfiguration, 0 ); + + for ( int i = 0; i < 5000; i++ ) + { + if ( ( READ_REGISTER( RxMACConfiguration ) + & kRxMACConfiguration_Rx_Mac_Enable ) == 0 ) + { + break; // 'i' is always 0 + } + IODelay(1); + } + + // Update MAC config register. + + WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg ); + return; +}/* end restartReceiver */ + + +/*------------------------------------------------------------------------- + * + * Orderly stop of receive DMA. + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::stopReceiveDMA() +{ +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::stopTransmitDMA() +{ +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::transmitPacket(struct mbuf *packet) +{ + GEMTxDescriptor *dp; // descriptor pointer + UInt32 i,j,k; + struct mbuf *m; + TxQueueElement *txElement; + UInt32 dataPhys; + + + for ( m = packet, i=1; m->m_next; m=m->m_next, i++ ) + ; + + + if ( i > txCommandsAvail ) + { + return false; + } + + if ( (txElement=getTxElement()) == 0 ) + { + return false; + } + + j = txCommandTail; + + txElement->mbuf = packet; + txElement->slot = j; + txElement->count = i; + + OSAddAtomic( -i, (SInt32*)&txCommandsAvail ); + + m = packet; + + do + { + k = j; + + txElementPtrs[j] = txElement; + + dataPhys = (UInt32)mcl_to_paddr( mtod(m, char *) ); + if ( dataPhys == 0 ) + dataPhys = pmap_extract( kernel_pmap, mtod(m, vm_offset_t) ); + + dp = &txDMACommands[ j ].desc_seg[ 0 ]; + OSWriteLittleInt32( &dp->bufferAddrLo, 0, dataPhys ); + OSWriteLittleInt32( &dp->flags0, 0, m->m_len ); + dp->flags1 = 0; + txIntCnt++; + j = (j + 1) & TX_RING_WRAP_MASK; + } + while ( (m=m->m_next) != 0 ); + + txDMACommands[ k ].desc_seg[ 0 ].flags0 |= OSSwapHostToLittleConstInt32( kGEMTxDescFlags0_EndOfFrame ); + txDMACommands[ txCommandTail ].desc_seg[ 0 ].flags0 |= OSSwapHostToLittleConstInt32( kGEMTxDescFlags0_StartOfFrame ); + if ( txIntCnt >= TX_DESC_PER_INT ) + { + txDMACommands[ txCommandTail ].desc_seg[ 0 ].flags1 |= OSSwapHostToLittleConstInt32( kGEMTxDescFlags1_Int ); + txIntCnt = txIntCnt % TX_DESC_PER_INT; + } + txCommandTail = j; + + WRITE_REGISTER( TxKick, j ); + + return true; +}/* end transmitPacket */ + + +/*------------------------------------------------------------------------- + * _receivePacket + * -------------- + * This routine runs the receiver in polled-mode (yuk!) for the kernel debugger. + * Don't mess with the interrupt source here that can deadlock in the debugger + * + * The _receivePackets allocate MBufs and pass them up the stack. The kernel + * debugger interface passes a buffer into us. To reconsile the two interfaces, + * we allow the receive routine to continue to allocate its own buffers and + * transfer any received data to the passed-in buffer. This is handled by + * _receivePacket calling _packetToDebugger. + *-------------------------------------------------------------------------*/ + +void UniNEnet::receivePacket( void * pkt, + UInt32 * pkt_len, + UInt32 timeout ) +{ + mach_timespec_t startTime; + mach_timespec_t currentTime; + UInt32 elapsedTimeMS; + + *pkt_len = 0; + + if (ready == false) + { + return; + } + + debuggerPkt = pkt; + debuggerPktSize = 0; + + IOGetTime(&startTime); + do + { + receivePackets( true ); + IOGetTime( ¤tTime ); + elapsedTimeMS = (currentTime.tv_nsec - startTime.tv_nsec) / (1000*1000); + } + while ( (debuggerPktSize == 0) && (elapsedTimeMS < timeout) ); + + *pkt_len = debuggerPktSize; + + return; +} + +/*------------------------------------------------------------------------- + * _packetToDebugger + * ----------------- + * This is called by _receivePackets when we are polling for kernel debugger + * packets. It copies the MBuf contents to the buffer passed by the debugger. + * It also sets the var debuggerPktSize which will break the polling loop. + *-------------------------------------------------------------------------*/ + +void UniNEnet::packetToDebugger( struct mbuf * packet, u_int size ) +{ + debuggerPktSize = size; + bcopy( mtod(packet, char *), debuggerPkt, size ); +} + +/*------------------------------------------------------------------------- + * _sendPacket + * ----------- + * + * This routine runs the transmitter in polled-mode (yuk!) for the kernel debugger. + * Don't mess with the interrupt source here that can deadlock in the debugger + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::sendPacket( void *pkt, UInt32 pkt_len ) +{ + mach_timespec_t startTime; + mach_timespec_t currentTime; + UInt32 elapsedTimeMS; + + if (!ready || !pkt || (pkt_len > ETHERMAXPACKET)) + { + return; + } + + /* + * Wait for the transmit ring to empty + */ + IOGetTime(&startTime); + do + { + debugTransmitInterruptOccurred(); + IOGetTime(¤tTime); + elapsedTimeMS = (currentTime.tv_nsec - startTime.tv_nsec) / (1000*1000); + } + while ( (txCommandHead != txCommandTail) && (elapsedTimeMS < TX_KDB_TIMEOUT) ); + + if ( txCommandHead != txCommandTail ) + { + IOLog( "Ethernet(UniN): Polled tranmit timeout - 1\n\r"); + return; + } + + /* + * Allocate a MBuf and copy the debugger transmit data into it. + * + * jliu - no allocation, just recycle the same buffer dedicated to + * KDB transmit. + */ + txDebuggerPkt->m_next = 0; + txDebuggerPkt->m_data = (caddr_t) pkt; + txDebuggerPkt->m_pkthdr.len = txDebuggerPkt->m_len = pkt_len; + + /* + * Send the debugger packet. txDebuggerPkt must not be freed by + * the transmit routine. + */ + transmitPacket(txDebuggerPkt); + + /* + * Poll waiting for the transmit ring to empty again + */ + do + { + debugTransmitInterruptOccurred(); + IOGetTime(¤tTime); + elapsedTimeMS = (currentTime.tv_nsec - startTime.tv_nsec) / (1000*1000); + } + while ( (txCommandHead != txCommandTail) && + (elapsedTimeMS < TX_KDB_TIMEOUT) ); + + if ( txCommandHead != txCommandTail ) + { + IOLog( "Ethernet(UniN): Polled tranmit timeout - 2\n\r"); + } + + return; +} + +/*------------------------------------------------------------------------- + * _sendDummyPacket + * ---------------- + * The UniN receiver seems to be locked until we send our first packet. + * + *-------------------------------------------------------------------------*/ +void UniNEnet::sendDummyPacket() +{ + union + { + UInt8 bytes[64]; + IOEthernetAddress enet_addr[2]; + } dummyPacket; + + bzero( &dummyPacket, sizeof(dummyPacket) ); + + + dummyPacket.enet_addr[0] = myAddress; + dummyPacket.enet_addr[1] = myAddress; + + sendPacket((void *)dummyPacket.bytes, (unsigned int)sizeof(dummyPacket)); +} + + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::receiveInterruptOccurred() +{ + return receivePackets(false); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::receivePackets( bool fDebugger ) +{ + struct mbuf * packet; + UInt32 i,last; + int receivedFrameSize = 0; + UInt16 dmaFlags; + UInt32 rxPktStatus = 0; + bool passPacketUp; + bool reusePkt; + bool status; + bool useNetif = !fDebugger && netifEnabled; + bool packetsQueued = false; + + + last = (UInt32)-1; + i = rxCommandHead; + + while ( 1 ) + { + passPacketUp = false; + reusePkt = false; + + dmaFlags = OSReadLittleInt16( &rxDMACommands[ i ].desc_seg[ 0 ].frameDataSize, 0 ); + + /* + * If the current entry has not been written, then stop at this entry + */ + if ( dmaFlags & kGEMRxDescFrameSize_Own ) + { + break; + } + + + receivedFrameSize = dmaFlags & kGEMRxDescFrameSize_Mask; + rxPktStatus = OSReadLittleInt32( &rxDMACommands[ i ].desc_seg[ 0 ].flags, 0 ); + + + /* + * Reject packets that are runts or that have other mutations. + */ + if ( receivedFrameSize < (ETHERMINPACKET - ETHERCRC) || + receivedFrameSize > (ETHERMAXPACKET + ETHERCRC) || + rxPktStatus & kGEMRxDescFlags_BadCRC ) + { + reusePkt = true; + NETWORK_STAT_ADD( inputErrors ); + if ( receivedFrameSize < (ETHERMINPACKET - ETHERCRC) ) + ETHERNET_STAT_ADD( dot3RxExtraEntry.frameTooShorts ); + else ETHERNET_STAT_ADD( dot3StatsEntry.frameTooLongs ); + } + else if ( useNetif == false ) + { + /* + * Always reuse packets in debugger mode. We also refuse to + * pass anything up the stack unless the driver is open. The + * hardware is enabled before the stack has opened us, to + * allow earlier debug interface registration. But we must + * not pass any packets up. + */ + reusePkt = true; + if (fDebugger) + { + packetToDebugger(rxMbuf[i], receivedFrameSize); + } + } + + + /* + * Before we pass this packet up the networking stack. Make sure we + * can get a replacement. Otherwise, hold on to the current packet and + * increment the input error count. + * Thanks Justin! + */ + + packet = 0; + + if ( reusePkt == false ) + { + bool replaced; + + packet = replaceOrCopyPacket(&rxMbuf[i], receivedFrameSize, &replaced); + + reusePkt = true; + + if (packet && replaced) + { + status = updateDescriptorFromMbuf(rxMbuf[i], &rxDMACommands[i], true); + + if (status) + { + reusePkt = false; + } + else + { + // Assume descriptor has not been corrupted. + freePacket(rxMbuf[i]); // release new packet. + rxMbuf[i] = packet; // get the old packet back. + packet = 0; // pass up nothing. + IOLog("Ethernet(UniN): updateDescriptorFromMbuf error\n"); + } + } + + if ( packet == 0 ) + NETWORK_STAT_ADD( inputErrors ); + } + + /* + * Install the new MBuf for the one we're about to pass to the network stack + */ + + if ( reusePkt == true ) + { + rxDMACommands[i].desc_seg[0].flags = 0; + rxDMACommands[i].desc_seg[0].frameDataSize = OSSwapHostToLittleConstInt16( NETWORK_BUFSIZE | kGEMRxDescFrameSize_Own ); + } + + last = i; /* Keep track of the last receive descriptor processed */ + i = (i + 1) & RX_RING_WRAP_MASK; + + if ( (i & 3) == 0 ) // only kick modulo 4 + { + WRITE_REGISTER( RxKick, (i - 4) & RX_RING_WRAP_MASK ); + } + + if (fDebugger) + { + break; + } + + /* + * Transfer received packet to network + */ + if (packet) + { + KERNEL_DEBUG(DBG_UniN_RXCOMPLETE | DBG_FUNC_NONE, (int) packet, + (int)receivedFrameSize, 0, 0, 0 ); + + networkInterface->inputPacket(packet, receivedFrameSize, true); + NETWORK_STAT_ADD( inputPackets ); + packetsQueued = true; + } + }/* end WHILE */ + + if ( last != (UInt32)-1 ) + { + rxCommandTail = last; + rxCommandHead = i; + } + + return packetsQueued; +}/* end receivePackets */ + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::transmitInterruptOccurred() +{ + UInt32 i; + bool serviced = false; + TxQueueElement *txElement; + + + i = READ_REGISTER( TxCompletion ); + + while ( i != txCommandHead ) // i and txCommandHead race each other + { + do // This DO reduces READ_REGISTER calls which access the PCI bus + { /* Free the MBuf we just transmitted */ + + txElement = txElementPtrs[ txCommandHead ]; + + KERNEL_DEBUG( DBG_UniN_TXCOMPLETE | DBG_FUNC_NONE, + (int)txElement->mbuf, 0, 0, 0, 0 ); + + txElementPtrs[ txCommandHead ] = 0; + OSIncrementAtomic( (SInt32*)&txCommandsAvail ); + + if ( --txElement->count == 0 ) + { + freePacket( txElement->mbuf, kDelayFree ); + releaseTxElement( txElement ); + NETWORK_STAT_ADD( outputPackets ); + } + + txCommandHead = (txCommandHead + 1) & TX_RING_WRAP_MASK; + + } while ( i != txCommandHead ); // loop til txCommandHead catches i + + serviced = true; + i = READ_REGISTER( TxCompletion ); // see if i advanced during last batch + }/* end WHILE */ + + // Release all packets in the free queue. + releaseFreePackets(); + return serviced; +}/* end transmitInterruptOccurred */ + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::debugTransmitInterruptOccurred() +{ + bool fServiced = false; + UInt32 i; + TxQueueElement * txElement; + + // Set the debugTxPoll flag to indicate the debugger was active + // and some cleanup may be needed when the driver returns to + // normal operation. + // + debugTxPoll = true; + + i = READ_REGISTER( TxCompletion ); + + while ( i != txCommandHead ) + { + fServiced = true; + + /* + * Free the mbuf we just transmitted. + * + * If it is the debugger packet, just remove it from the ring. + * and reuse the same packet for the next sendPacket() request. + */ + + /* + * While in debugger mode, do not touch the mbuf pool. + * Queue any used mbufs to a local queue. This queue + * will get flushed after we exit from debugger mode. + * + * During continuous debugger transmission and + * interrupt polling, we expect only the txDebuggerPkt + * to show up on the transmit mbuf ring. + */ + txElement = txElementPtrs[txCommandHead]; + txElementPtrs[txCommandHead] = 0; + OSIncrementAtomic( (SInt32*)&txCommandsAvail ); + + KERNEL_DEBUG( DBG_UniN_TXCOMPLETE | DBG_FUNC_NONE, + (int) txElement->mbuf, + (int) txElement->mbuf->m_pkthdr.len, 0, 0, 0 ); + + if ( --txElement->count == 0 ) + { + if (txElement->mbuf != txDebuggerPkt) + { + debugQueue->enqueue( txElement->mbuf ); + } + releaseTxElement( txElement ); + } + + txCommandHead = (txCommandHead + 1) & TX_RING_WRAP_MASK; + } + + return fServiced; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::debugTransmitCleanup() +{ + // Debugger was active, clear all packets in the debugQueue, and + // issue a start(), just in case the debugger became active while the + // ring was full and the output queue stopped. Since the debugger + // does not restart the output queue, to avoid calling + // semaphore_signal() which may reenable interrupts, we need to + // make sure the output queue is not stalled after the debugger has + // flushed the ring. + + debugQueue->flush(); + + transmitQueue->start(); +} + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::updateDescriptorFromMbuf(struct mbuf * m, enet_dma_cmd_t *desc, bool isReceive) +{ + struct IOPhysicalSegment segVector[1]; + UInt32 segments; + + segments = mbufCursor->getPhysicalSegmentsWithCoalesce(m, segVector); + + if ( segments == 0 || segments > 1 ) + { + IOLog("Ethernet(UniN): updateDescriptorFromMbuf error, %d segments\n", (int)segments); + return false; + } + + if ( isReceive ) + { + enet_dma_cmd_t *rxCmd = (enet_dma_cmd_t *)desc; + + OSWriteLittleInt32( &rxCmd->desc_seg[0].bufferAddrLo, 0, segVector[0].location ); + OSWriteLittleInt16( &rxCmd->desc_seg[0].frameDataSize, 0, segVector[0].length | kGEMRxDescFrameSize_Own ); + rxCmd->desc_seg[0].flags = 0; + } + else + { + enet_txdma_cmd_t *txCmd = (enet_txdma_cmd_t *)desc; + + OSWriteLittleInt32( &txCmd->desc_seg[0].bufferAddrLo, 0, segVector[0].location ); + OSWriteLittleInt32( &txCmd->desc_seg[0].flags0, 0, segVector[0].length + | kGEMTxDescFlags0_StartOfFrame + | kGEMTxDescFlags0_EndOfFrame ); + + txCmd->desc_seg[0].flags1 = 0; + txIntCnt += 1; + if ( (txIntCnt % TX_DESC_PER_INT) == 0 ) /// Divide??? + txCmd->desc_seg[0].flags1 = OSSwapHostToLittleConstInt32( kGEMTxDescFlags1_Int ); + } + + return true; +}/* end updateDescriptorFromMbuf */ + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +TxQueueElement * UniNEnet::getTxElement() +{ + TxQueueElement * txElement = 0; + + IOSimpleLockLock( txQueueLock ); + + if ( queue_empty( &txFreeQueue ) == false ) + { + queue_remove_first( &txFreeQueue, txElement, TxQueueElement *, next ); + + txElement->list = &txActiveQueue; + + queue_enter( txElement->list, txElement, TxQueueElement *, next ); + } + + IOSimpleLockUnlock( txQueueLock ); + + return txElement; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::releaseTxElement(TxQueueElement * txElement) +{ + IOSimpleLockLock( txQueueLock ); + + if ( txElement->list != 0 ) + { + queue_remove( txElement->list, txElement, TxQueueElement *, next ); + } + + txElement->list = &txFreeQueue; + + queue_enter( txElement->list, txElement, TxQueueElement *, next); + + IOSimpleLockUnlock( txQueueLock ); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::monitorLinkStatus( bool firstPoll ) +{ + UInt32 gemReg; + UInt16 phyStatus; + UInt16 linkStatus; + UInt16 linkMode; + UInt16 lpAbility; + UInt16 phyStatusChange; + bool fullDuplex = false; + UInt32 linkSpeed = 0; + IOMediumType mediumType = kIOMediumEthernetNone; + IONetworkMedium *medium; + + + if ( firstPoll ) + { + phyStatusPrev = 0; + linkStatusPrev = kLinkStatusUnknown; + } + + if ( phyId == 0xff ) + { + phyStatus = READ_REGISTER( PCSMIIStatus ) & 0x0000FFFF; + lpAbility = READ_REGISTER( PCSMIILinkPartnerAbility ) & 0x0000FFFF; + } + else + { + if ( miiReadWord( &phyStatus, MII_STATUS, phyId) != true ) + { + return; + } + miiReadWord( &lpAbility, MII_STATUS, phyId); + } + + phyStatusChange = (phyStatusPrev ^ phyStatus) & + ( MII_STATUS_LINK_STATUS | + MII_STATUS_NEGOTIATION_COMPLETE ); + + if ( phyStatusChange || firstPoll ) + { + if ( firstPoll ) + { + // For the initial link status poll, wait a bit, then + // re-read the status register to clear any latched bits. + // Why wait? Well, the debugger can kick in shortly after + // this function returns, and we want the duplex setting + // on the MAC to match the PHY. + + miiWaitForAutoNegotiation( phyId ); + miiReadWord(&phyStatus, MII_STATUS, phyId); + miiReadWord(&phyStatus, MII_STATUS, phyId); + } + + gemReg = READ_REGISTER( MACControlConfiguration ); + if ( lpAbility & MII_LPAR_PAUSE ) + gemReg |= kMACControlConfiguration_Send_Pause_Enable; + else gemReg &= ~kMACControlConfiguration_Send_Pause_Enable; + WRITE_REGISTER( MACControlConfiguration, gemReg ); + + if ( (phyStatus & MII_STATUS_LINK_STATUS) && + ( firstPoll || (phyStatus & MII_STATUS_NEGOTIATION_COMPLETE) ) ) + { + if ( phyId == 0xff ) + { + linkSpeed = 1000; + fullDuplex = true; + mediumType = kIOMediumEthernet1000BaseSX; + } + else if ( (phyType & MII_LXT971_MASK) == MII_LXT971_ID ) + { + miiReadWord( &linkStatus, MII_LXT971_STATUS_2, phyId ); + linkSpeed = (linkStatus & MII_LXT971_STATUS_2_SPEED) ? + 100 : 10; + fullDuplex = (linkStatus & MII_LXT971_STATUS_2_DUPLEX) ? + true : false; + mediumType = (linkSpeed == 10) ? kIOMediumEthernet10BaseT : + kIOMediumEthernet100BaseTX; + } + else if ( (phyType & MII_BCM5201_MASK) == MII_BCM5201_ID ) + { + miiReadWord( &linkStatus, MII_BCM5201_AUXSTATUS, phyId ); + linkSpeed = (linkStatus & MII_BCM5201_AUXSTATUS_SPEED) ? + 100 : 10; + fullDuplex = (linkStatus & MII_BCM5201_AUXSTATUS_DUPLEX) ? + true : false; + mediumType = (linkSpeed == 10) ? kIOMediumEthernet10BaseT : + kIOMediumEthernet100BaseTX; + } + else if ( ((phyType & MII_BCM5400_MASK) == MII_BCM5400_ID) + || ((phyType & MII_BCM5400_MASK) == MII_BCM5401_ID) ) /// mlj temporary quick fix + { + miiReadWord( &linkStatus, MII_BCM5400_AUXSTATUS, phyId ); + + linkMode = (linkStatus & MII_BCM5400_AUXSTATUS_LINKMODE_MASK) / + MII_BCM5400_AUXSTATUS_LINKMODE_BIT; + + gemReg = READ_REGISTER( XIFConfiguration ); + if ( linkMode < 6 ) + gemReg &= ~kXIFConfiguration_GMIIMODE; + else gemReg |= kXIFConfiguration_GMIIMODE; + WRITE_REGISTER( XIFConfiguration, gemReg ); + + if ( linkMode == 0 ) + { + linkSpeed = 0; + } + else if ( linkMode < 3 ) + { + linkSpeed = 10; + fullDuplex = ( linkMode < 2 ) ? false : true; + mediumType = kIOMediumEthernet10BaseT; + } + else if ( linkMode < 6 ) + { + linkSpeed = 100; + fullDuplex = ( linkMode < 5 ) ? false : true; + mediumType = kIOMediumEthernet100BaseTX; + } + else + { + linkSpeed = 1000; + fullDuplex = true; + mediumType = kIOMediumEthernet1000BaseTX; + } + } + + if ( fullDuplex != isFullDuplex ) + { + setDuplexMode( fullDuplex ); + } + + if ( ready == true ) + { + startChip(); + } + + if ( linkSpeed != 0 ) + { + mediumType |= (fullDuplex == true) ? + kIOMediumOptionFullDuplex : + kIOMediumOptionHalfDuplex; + } + + medium = IONetworkMedium::getMediumWithType( mediumDict, + mediumType ); + + setLinkStatus( kIONetworkLinkActive | kIONetworkLinkValid, + medium, + linkSpeed * 1000000 ); + + IOLog( "Ethernet(UniN): Link is up at %ld Mbps - %s Duplex\n\r", + linkSpeed, + (fullDuplex) ? "Full" : "Half" ); + + linkStatusPrev = kLinkStatusUp; + } + else + { + if ( (linkStatusPrev == kLinkStatusUp) || + (linkStatusPrev == kLinkStatusUnknown) ) + { + stopChip(); + + medium = IONetworkMedium::getMediumWithType( mediumDict, + mediumType ); + + setLinkStatus( kIONetworkLinkValid, + medium, + 0 ); + + if ( linkStatusPrev != kLinkStatusUnknown ) + { + IOLog( "Ethernet(UniN): Link is down.\n\r" ); + } + + txIntCnt = 0; + + if ( txCommandHead != txCommandTail ) + { + initTxRing(); + + txCommandHead = READ_REGISTER( TxCompletion ); + txCommandTail = txCommandHead; + } + } + + linkStatusPrev = kLinkStatusDown; + } + + phyStatusPrev = phyStatus; + } + return; +} + + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +IOReturn UniNEnet::getHardwareAddress(IOEthernetAddress *ea) +{ + UInt32 i; + OSData *macEntry; + UInt8 *macAddress; + UInt32 len; + + macEntry = OSDynamicCast( OSData, nub->getProperty( "local-mac-address" ) ); + if ( macEntry == 0 ) + { + return kIOReturnError; + } + + macAddress = (UInt8 *)macEntry->getBytesNoCopy(); + if ( macAddress == 0 ) + { + return kIOReturnError; + } + + len = macEntry->getLength(); + if ( len != 6 ) + { + return kIOReturnError; + } + + for (i = 0; i < sizeof(*ea); i++) + { + ea->bytes[i] = macAddress[i]; + } + return kIOReturnSuccess; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +#define ENET_CRCPOLY 0x04c11db7 + +static UInt32 crc416(UInt32 current, UInt16 nxtval ) +{ + register UInt32 counter; + register int highCRCBitSet, lowDataBitSet; + + /* Swap bytes */ + nxtval = ((nxtval & 0x00FF) << 8) | (nxtval >> 8); + + /* Compute bit-by-bit */ + for (counter = 0; counter != 16; ++counter) + { /* is high CRC bit set? */ + if ((current & 0x80000000) == 0) + highCRCBitSet = 0; + else + highCRCBitSet = 1; + + current = current << 1; + + if ((nxtval & 0x0001) == 0) + lowDataBitSet = 0; + else + lowDataBitSet = 1; + + nxtval = nxtval >> 1; + + /* do the XOR */ + if (highCRCBitSet ^ lowDataBitSet) + current = current ^ ENET_CRCPOLY; + } + return current; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +static UInt32 mace_crc(UInt16 *address) +{ + register UInt32 newcrc; + + newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ + newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ + newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ + + return(newcrc); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +/* + * Add requested mcast addr to UniN's hash table filter. + * + */ +void UniNEnet::addToHashTableMask(UInt8 *addr) +{ + UInt32 i,j; + UInt32 crcBitIndex; + UInt16 mask; + + j = mace_crc((UInt16 *)addr) & 0xFF; /* Big-endian alert! */ + + for ( crcBitIndex = i = 0; i < 8; i++ ) + { + crcBitIndex >>= 1; + crcBitIndex |= (j & 0x80); + j <<= 1; + } + + crcBitIndex ^= 0xFF; + + if (hashTableUseCount[crcBitIndex]++) + return; /* This bit is already set */ + mask = crcBitIndex % 16; + mask = 1 << mask; + hashTableMask[crcBitIndex/16] |= mask; +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +void UniNEnet::resetHashTableMask() +{ + bzero(hashTableUseCount, sizeof(hashTableUseCount)); + bzero(hashTableMask, sizeof(hashTableMask)); +} + +/*------------------------------------------------------------------------- + * + * + * + *-------------------------------------------------------------------------*/ + +/* + * Sync the adapter with the software copy of the multicast mask + * (logical address filter). + */ +void UniNEnet::updateHashTableMask() +{ + UInt32 i; + + rxMacConfigReg = READ_REGISTER( RxMACConfiguration ); + WRITE_REGISTER( RxMACConfiguration, + rxMacConfigReg & ~(kRxMACConfiguration_Rx_Mac_Enable + | kRxMACConfiguration_Hash_Filter_Enable) ); + + while ( READ_REGISTER( RxMACConfiguration ) & (kRxMACConfiguration_Rx_Mac_Enable + | kRxMACConfiguration_Hash_Filter_Enable) ) + ; + + for ( i= 0; i < 16; i++ ) + WRITE_REGISTER( HashTable[ i ], hashTableMask[ 15 - i ] ); + + rxMacConfigReg |= kRxMACConfiguration_Hash_Filter_Enable; + WRITE_REGISTER( RxMACConfiguration, rxMacConfigReg ); +} diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.h b/iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.h new file mode 100644 index 000000000..939895243 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnetPrivate.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer + * + * Interface for hardware dependent (relatively) code + * for the UniN Ethernet chip + * + * HISTORY + * + */ + + +#include "UniNEnet.h" +#include "UniNEnetMII.h" +#include + + +///void WriteUniNRegister( IOPPCAddress ioEnetBase, u_int32_t reg_offset, u_int32_t data); +///volatile u_int32_t ReadUniNRegister( IOPPCAddress ioEnetBase, u_int32_t reg_offset); diff --git a/iokit/Drivers/network/drvPPCUniN/UniNEnetRegisters.h b/iokit/Drivers/network/drvPPCUniN/UniNEnetRegisters.h new file mode 100644 index 000000000..7b4b533c0 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNEnetRegisters.h @@ -0,0 +1,529 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer + * + * Interface definition for the Sun GEM (UniN) Ethernet controller. + * + * + */ + +/* + * Miscellaneous defines... + */ +#define CACHE_LINE_SIZE 32 /* Bytes */ + +#define RX_RING_LENGTH_FACTOR 1 // valid from 0 to 8 +#define RX_RING_LENGTH (32 * (1 << RX_RING_LENGTH_FACTOR)) // 128 pkt descs /* Packet descriptors */ +#define RX_RING_WRAP_MASK (RX_RING_LENGTH -1) + +#define TX_RING_LENGTH_FACTOR 2 // valid from 0 to 8 +#define TX_RING_LENGTH (32 * (1 << TX_RING_LENGTH_FACTOR)) // 128 pkt descs +#define TX_RING_WRAP_MASK (TX_RING_LENGTH -1) + +#define TX_MAX_MBUFS (TX_RING_LENGTH / 2) + +#define TX_DESC_PER_INT 32 + +#define NETWORK_BUFSIZE (((ETHERMAXPACKET + ETHERCRC) + 7) & ~7) + +#define TRANSMIT_QUEUE_SIZE 256 + +#define WATCHDOG_TIMER_MS 300 +#define TX_KDB_TIMEOUT 1000 + +#define PCI_PERIOD_33MHz 30 +#define PCI_PERIOD_66MHz 15 +#define RX_INT_LATENCY_uS 250 + + + struct GMAC_Registers + { + /* Global Resources: */ // 0x0000 + + UInt32 SEB_State; // 3 bits for diagnostics + UInt32 Configuration; // + UInt32 filler1; + UInt32 Status; + + UInt32 InterruptMask; // 0x0010 + UInt32 InterruptAck; + UInt32 filler2; + UInt32 StatusAlias; + + UInt8 filler3[ 0x1000 - 0x20 ]; + + UInt32 PCIErrorStatus; // 0x1000 + UInt32 PCIErrorMask; + UInt32 BIFConfiguration; + UInt32 BIFDiagnostic; + + UInt32 SoftwareReset; // 0x1010 + + UInt8 filler4[ 0x2000 - 0x1014 ]; + + /* Transmit DMA registers: */ + + UInt32 TxKick; // 0x2000 + UInt32 TxConfiguration; + UInt32 TxDescriptorBaseLow; + UInt32 TxDescriptorBaseHigh; + + UInt32 filler5; // 0x2010 + UInt32 TxFIFOWritePointer; + UInt32 TxFIFOShadowWritePointer; + UInt32 TxFIFOReadPointer; + + UInt32 TxFIFOShadowReadPointer; // 0x2020 + UInt32 TxFIFOPacketCounter; + UInt32 TxStateMachine; + UInt32 filler6; + + UInt32 TxDataPointerLow; // 0x2030 + UInt32 TxDataPointerHigh; + + UInt8 filler7[ 0x2100 - 0x2038 ]; + + UInt32 TxCompletion; // 0x2100 + UInt32 TxFIFOAddress; + UInt32 TxFIFOTag; + UInt32 TxFIFODataLow; + + UInt32 TxFIFODataHighT1; // 0x2110 + UInt32 TxFIFODataHighT0; + UInt32 TxFIFOSize; + + UInt8 filler8[ 0x4000 - 0x211C ]; + + /* Receive DMA registers: */ + + UInt32 RxConfiguration; // 0x4000 + UInt32 RxDescriptorBaseLow; + UInt32 RxDescriptorBaseHigh; + UInt32 RxFIFOWritePointer; + + UInt32 RxFIFOShadowWritePointer; // 0x4010 + UInt32 RxFIFOReadPointer; + UInt32 RxFIFOPacketCounter; + UInt32 RxStateMachine; + + UInt32 PauseThresholds; // 0x4020 + UInt32 RxDataPointerLow; + UInt32 RxDataPointerHigh; + + UInt8 filler9[ 0x4100 - 0x402C ]; + + UInt32 RxKick; // 0x4100 + UInt32 RxCompletion; + UInt32 RxBlanking; + UInt32 RxFIFOAddress; + + UInt32 RxFIFOTag; // 0x4110 + UInt32 RxFIFODataLow; + UInt32 RxFIFODataHighT0; + UInt32 RxFIFODataHighT1; + + UInt32 RxFIFOSize; // 0x4120 + + UInt8 filler10[ 0x6000 - 0x4124 ]; + + /* MAC registers: */ + + UInt32 TxMACSoftwareResetCommand; // 0x6000 + UInt32 RxMACSoftwareResetCommand; + UInt32 SendPauseCommand; + UInt32 filler11; + + UInt32 TxMACStatus; // 0x6010 + UInt32 RxMACStatus; + UInt32 MACControlStatus; + UInt32 filler12; + + UInt32 TxMACMask; // 0x6020 + UInt32 RxMACMask; + UInt32 MACControlMask; + UInt32 filler13; + + UInt32 TxMACConfiguration; // 0x6030 + UInt32 RxMACConfiguration; + UInt32 MACControlConfiguration; + UInt32 XIFConfiguration; + + UInt32 InterPacketGap0; // 0x6040 + UInt32 InterPacketGap1; + UInt32 InterPacketGap2; + UInt32 SlotTime; + + UInt32 MinFrameSize; // 0x6050 + UInt32 MaxFrameSize; + UInt32 PASize; + UInt32 JamSize; + + UInt32 AttemptLimit; // 0x6060 + UInt32 MACControlType; + UInt8 filler14[ 0x6080 - 0x6068 ]; + + UInt32 MACAddress[ 9 ]; // 0x6080 + + UInt32 AddressFilter[ 3 ]; // 0x60A4 + + UInt32 AddressFilter2_1Mask; // 0x60B0 + UInt32 AddressFilter0Mask; + UInt32 filler15[ 2 ]; + + UInt32 HashTable[ 16 ]; // 0x60C0 + + /* Statistics registers: */ + + UInt32 NormalCollisionCounter; // 0x6100 + UInt32 FirstAttemptSuccessfulCollisionCounter; + UInt32 ExcessiveCollisionCounter; + UInt32 LateCollisionCounter; + + UInt32 DeferTimer; // 0x6110 + UInt32 PeakAttempts; + UInt32 ReceiveFrameCounter; + UInt32 LengthErrorCounter; + + UInt32 AlignmentErrorCounter; // 0x6120 + UInt32 FCSErrorCounter; + UInt32 RxCodeViolationErrorCounter; + UInt32 filler16; + + /* Miscellaneous registers: */ + + UInt32 RandomNumberSeed; // 0x6130 + UInt32 StateMachine; + + UInt8 filler17[ 0x6200 - 0x6138 ]; + + /* MIF registers: */ + + UInt32 MIFBitBangClock; // 0x6200 + UInt32 MIFBitBangData; + UInt32 MIFBitBangOutputEnable; + UInt32 MIFBitBangFrame_Output; + + UInt32 MIFConfiguration; // 0x6210 + UInt32 MIFMask; + UInt32 MIFStatus; + UInt32 MIFStateMachine; + + UInt8 filler18[ 0x9000 - 0x6220 ]; + + /* PCS/Serialink registers: */ + + UInt32 PCSMIIControl; // 0x9000 + UInt32 PCSMIIStatus; + UInt32 Advertisement; + UInt32 PCSMIILinkPartnerAbility; + + UInt32 PCSConfiguration; // 0x9010 + UInt32 PCSStateMachine; + UInt32 PCSInterruptStatus; + + UInt8 filler19[ 0x9050 - 0x901C ]; + + UInt32 DatapathMode; // 0x9050 + UInt32 SerialinkControl; + UInt32 SharedOutputSelect; + UInt32 SerialinkState; + }; /* end GMAC_Registers */ + + +#define kConfiguration_Infinite_Burst 0x00000001 +#define kConfiguration_TX_DMA_Limit (0x1F << 1) +#define kConfiguration_RX_DMA_Limit (0x1F << 6) + + /* The following bits are used in the */ + /* Status, InterruptMask, InterruptAck, and StatusAlias registers: */ + +#define kStatus_TX_INT_ME 0x00000001 +#define kStatus_TX_ALL 0x00000002 +#define kStatus_TX_DONE 0x00000004 +#define kStatus_RX_DONE 0x00000010 +#define kStatus_Rx_Buffer_Not_Available 0x00000020 +#define kStatus_RX_TAG_ERROR 0x00000040 +#define kStatus_PCS_INT 0x00002000 +#define kStatus_TX_MAC_INT 0x00004000 +#define kStatus_RX_MAC_INT 0x00008000 +#define kStatus_MAC_CTRL_INT 0x00010000 +#define kStatus_MIF_Interrupt 0x00020000 +#define kStatus_PCI_ERROR_INT 0x00040000 +#define kStatus_TxCompletion_Shift 19 + +#define kInterruptMask_None 0xFFFFFFFF + +#define kBIFConfiguration_SLOWCLK 0x1 +#define kBIFConfiguration_B64D_DIS 0x2 +#define kBIFConfiguration_M66EN 0x8 + +#define kSoftwareReset_TX 0x1 +#define kSoftwareReset_RX 0x2 +#define kSoftwareReset_RSTOUT 0x4 + + // register TxConfiguration 2004: +#define kTxConfiguration_Tx_DMA_Enable 0x00000001 +#define kTxConfiguration_Tx_Desc_Ring_Size_Shift 1 // bits 1:4 +#define kTxConfiguration_TxFIFO_Threshold 0x001FFC00 // obsolete + + // register RxConfiguration 4000: +#define kRxConfiguration_Rx_DMA_Enable 0x00000001 +#define kRxConfiguration_Rx_Desc_Ring_Size_Shift 1 // bits 1:4 +#define kRxConfiguration_Batch_Disable 0x00000020 +#define kRxConfiguration_First_Byte_Offset_Mask 0x00001C00 +#define kRxConfiguration_Checksum_Start_Offset_Mask 0x000FE000 +#define kRxConfiguration_RX_DMA_Threshold 0x01000000 // 128 bytes + +#define kPauseThresholds_Factor 64 +#define kPauseThresholds_OFF_Threshold_Shift 0 // 9 bit field +#define kPauseThresholds_ON_Threshold_Shift 12 + +#define FACTOR33 ((RX_INT_LATENCY_uS * 1000) / (2048 * PCI_PERIOD_33MHz)) +#define FACTOR66 ((RX_INT_LATENCY_uS * 1000) / (2048 * PCI_PERIOD_66MHz)) + +#define F33 (FACTOR33 << kPauseThresholds_ON_Threshold_Shift ) +#define F66 (FACTOR66 << kPauseThresholds_ON_Threshold_Shift ) + +#define kRxBlanking_default_33 (F33 | 5) +#define kRxBlanking_default_66 (F66 | 5) + +#define kTxMACSoftwareResetCommand_Reset 1 // 1 bit register +#define kRxMACSoftwareResetCommand_Reset 1 + +#define kSendPauseCommand_default 0x1BF0 + // 0x6010: +#define kTX_MAC_Status_Frame_Transmitted 0x001 +#define kTX_MAC_Status_Tx_Underrun 0x002 +#define kTX_MAC_Status_Max_Pkt_Err 0x004 +#define kTX_MAC_Status_Normal_Coll_Cnt_Exp 0x008 +#define kTX_MAC_Status_Excess_Coll_Cnt_Exp 0x010 +#define kTX_MAC_Status_Late_Coll_Cnt_Exp 0x020 +#define kTX_MAC_Status_First_Coll_Cnt_Exp 0x040 +#define kTX_MAC_Status_Defer_Timer_Exp 0x080 +#define kTX_MAC_Status_Peak_Attempts_Cnt_Exp 0x100 + // 0x6014: +#define kRX_MAC_Status_Frame_Received 0x01 +#define kRX_MAC_Status_Rx_Overflow 0x02 // Rx FIFO overflow +#define kRX_MAC_Status_Frame_Cnt_Exp 0x04 +#define kRX_MAC_Status_Align_Err_Cnt_Exp 0x08 +#define kRX_MAC_Status_CRC_Err_Cnt_Exp 0x10 +#define kRX_MAC_Status_Length_Err_Cnt_Exp 0x20 +#define kRX_MAC_Status_Viol_Err_Cnt_Exp 0x40 + + +#ifdef CRAP +#define kTxMACMask_default 0x1FF // was 0xFFFF +#define kRxMACMask_default 0x7F // was 0xFFFF +#define kMACControlMask_default 0X00000007 // was 0xFFFF +#else +#define kTxMACMask_default 1 // enable all but Frame_Transmitted +#define kRxMACMask_default 1 // enable all but Frame_Received +#define kMACControlMask_default 0xFFFFFFF8 // enable Paused stuff +#endif // CRAP + +#define kTxMACConfiguration_TxMac_Enable 0x001 +#define kTxMACConfiguration_Ignore_Carrier_Sense 0x002 +#define kTxMACConfiguration_Ignore_Collisions 0x004 +#define kTxMACConfiguration_Enable_IPG0 0x008 +#define kTxMACConfiguration_Never_Give_Up 0x010 +#define kTxMACConfiguration_Never_Give_Up_Limit 0x020 +#define kTxMACConfiguration_No_Backoff 0x040 +#define kTxMACConfiguration_Slow_Down 0x080 +#define kTxMACConfiguration_No_FCS 0x100 +#define kTxMACConfiguration_TX_Carrier_Extension 0x200 + +#define kRxMACConfiguration_Rx_Mac_Enable 0x001 +#define kRxMACConfiguration_Strip_Pad 0x002 +#define kRxMACConfiguration_Strip_FCS 0x004 +#define kRxMACConfiguration_Promiscuous 0x008 +#define kRxMACConfiguration_Promiscuous_Group 0x010 +#define kRxMACConfiguration_Hash_Filter_Enable 0x020 +#define kRxMACConfiguration_Address_Filter_Enable 0x040 +#define kRxMACConfiguration_Disable_Discard_On_Err 0x080 +#define kRxMACConfiguration_Rx_Carrier_Extension 0x100 + +#define kMACControlConfiguration_Send_Pause_Enable 0x1 +#define kMACControlConfiguration_Receive_Pause_Enable 0x2 +#define kMACControlConfiguration_Pass_MAC_Control 0x4 + +#define kXIFConfiguration_Tx_MII_OE 0x01 // output enable on the MII bus +#define kXIFConfiguration_MII_Int_Loopback 0x02 +#define kXIFConfiguration_Disable_Echo 0x04 +#define kXIFConfiguration_GMIIMODE 0x08 +#define kXIFConfiguration_MII_Buffer_OE 0x10 +#define kXIFConfiguration_LINKLED 0x20 +#define kXIFConfiguration_FDPLXLED 0x40 + +#define kInterPacketGap0_default 0 +#define kInterPacketGap1_default 8 +#define kInterPacketGap2_default 4 + +#define kSlotTime_default 0x0040 +#define kMinFrameSize_default 0x0040 +#define kMaxFrameSize_default 0x05EE + +#define kGEMMacMaxFrameSize_Aligned ((kMaxFrameSize_default + 7) & ~7) + + +#define kPASize_default 0x07 +#define kJamSize_default 0x04 +#define kAttemptLimit_default 0x10 +#define kMACControlType_default 0x8808 + +#define kMACAddress_default_6 0x0001 +#define kMACAddress_default_7 0xC200 +#define kMACAddress_default_8 0x0180 + +#define kMIFBitBangFrame_Output_ST_default 0x40000000 // 2 bits: ST of frame +#define kMIFBitBangFrame_Output_OP_read 0x20000000 // OP code - 2 bits: +#define kMIFBitBangFrame_Output_OP_write 0x10000000 // Read=10; Write=01 +#define kMIFBitBangFrame_Output_PHYAD_shift 23 // 5 bit PHY ADdress +#define kMIFBitBangFrame_Output_REGAD_shift 18 // 5 bit REGister ADdress +#define kMIFBitBangFrame_Output_TA_MSB 0x00020000 // Turn Around MSB +#define kMIFBitBangFrame_Output_TA_LSB 0x00010000 // Turn Around LSB + +#define kMIFConfiguration_PHY_Select 0x01 +#define kMIFConfiguration_Poll_Enable 0x02 +#define kMIFConfiguration_BB_Mode 0x04 +#define kMIFConfiguration_MDI_0 0x10 +#define kMIFConfiguration_MDI_1 0x20 + +#define kPCSMIIControl_1000_Mbs_Speed_Select 0x0040 +#define kPCSMIIControl_Collision_Test 0x0080 +#define kPCSMIIControl_Duplex_Mode 0x0100 +#define kPCSMIIControl_Restart_Auto_Negotiation 0x0200 +#define kPCSMIIControl_Isolate 0x0400 +#define kPCSMIIControl_Power_Down 0x0800 +#define kPCSMIIControl_Auto_Negotiation_Enable 0x1000 +#define kPCSMIIControl_Wrapback 0x4000 +#define kPCSMIIControl_Reset 0x8000 + +#define kAdvertisement_Full_Duplex 0x0020 +#define kAdvertisement_Half_Duplex 0x0040 +#define kAdvertisement_PAUSE 0x0080 // symmetrical to link partner +#define kAdvertisement_ASM_DIR 0x0100 // pause asymmetrical to link partner +#define kAdvertisement_Ack 0x4000 + +#define kPCSConfiguration_Enable 0x01 +#define kPCSConfiguration_Signal_Detect_Override 0x02 +#define kPCSConfiguration_Signal_Detect_Active_Low 0x04 +#define kPCSConfiguration_Jitter_Study // 2 bit field +#define kPCSConfiguration_10ms_Timer_Override 0x20 + +#define kDatapathMode_XMode 0x01 +#define kDatapathMode_ExtSERDESMode 0x02 +#define kDatapathMode_GMIIMode 0x04 +#define kDatapathMode_GMIIOutputEnable 0x08 + +#define kSerialinkControl_DisableLoopback 0x01 +#define kSerialinkControl_EnableSyncDet 0x02 +#define kSerialinkControl_LockRefClk 0x04 + + + + /* Descriptor definitions: */ + /* Note: Own is in the high bit of frameDataSize field: */ + +#define kGEMRxDescFrameSize_Mask 0x7FFF +#define kGEMRxDescFrameSize_Own 0x8000 + + + /* Rx flags field: */ + +#define kGEMRxDescFlags_HashValueBit 0x00001000 +#define kGEMRxDescFlags_HashValueMask 0x0FFFF000 +#define kGEMRxDescFlags_HashPass 0x10000000 +#define kGEMRxDescFlags_AlternateAddr 0x20000000 +#define kGEMRxDescFlags_BadCRC 0x40000000 + + +#define kGEMTxDescFlags0_BufferSizeMask 0x00007FFF +//#define kGEMTxDescFlags0_BufferSizeBit 0x00000001 +#define kGEMTxDescFlags0_ChecksumStartMask 0x00FF8000 +#define kGEMTxDescFlags0_ChecksumStartBit 0x00008000 +#define kGEMTxDescFlags0_ChecksumStuffMask 0x1F000000 +#define kGEMTxDescFlags0_ChecksupStuffBit 0x01000000 +#define kGEMTxDescFlags0_ChecksumEnable 0x20000000 +#define kGEMTxDescFlags0_EndOfFrame 0x40000000 +#define kGEMTxDescFlags0_StartOfFrame 0x80000000 + +#define kGEMTxDescFlags1_Int 0x00000001 +#define kGEMTxDescFlags1_NoCRC 0x00000002 + + +/* + * Receive/Transmit descriptor + * + */ +typedef struct _GEMRxDescriptor +{ + u_int16_t tcpPseudoChecksum; + u_int16_t frameDataSize; + u_int32_t flags; + u_int32_t bufferAddrLo; + u_int32_t bufferAddrHi; +} GEMRxDescriptor; + +/* + * Note: Own is in the high bit of frameDataSize field + */ +#define kGEMRxDescFrameSize_Mask 0x7FFF +#define kGEMRxDescFrameSize_Own 0x8000 + +/* + * Rx flags field + */ +#define kGEMRxDescFlags_HashValueBit 0x00001000 +#define kGEMRxDescFlags_HashValueMask 0x0FFFF000 +#define kGEMRxDescFlags_HashPass 0x10000000 +#define kGEMRxDescFlags_AlternateAddr 0x20000000 +#define kGEMRxDescFlags_BadCRC 0x40000000 + + +typedef struct _GEMTxDescriptor +{ + u_int32_t flags0; + u_int32_t flags1; + u_int32_t bufferAddrLo; + u_int32_t bufferAddrHi; +} GEMTxDescriptor; + +/* + * + */ +#define kGEMTxDescFlags0_BufferSizeMask 0x00007FFF +#define kGEMTxDescFlags0_BufferSizeBit 0x00000001 +#define kGEMTxDescFlags0_ChecksumStartMask 0x00FF8000 +#define kGEMTxDescFlags0_ChecksumStartBit 0x00008000 +#define kGEMTxDescFlags0_ChecksumStuffMask 0x1F000000 +#define kGEMTxDescFlags0_ChecksupStuffBit 0x01000000 +#define kGEMTxDescFlags0_ChecksumEnable 0x20000000 +#define kGEMTxDescFlags0_EndOfFrame 0x40000000 +#define kGEMTxDescFlags0_StartOfFrame 0x80000000 + +#define kGEMTxDescFlags1_Int 0x00000001 +#define kGEMTxDescFlags1_NoCRC 0x00000002 + + + +#define kGEMBurstSize (CACHE_LINE_SIZE / 8) diff --git a/iokit/Drivers/network/drvPPCUniN/UniNPowerSaver.cpp b/iokit/Drivers/network/drvPPCUniN/UniNPowerSaver.cpp new file mode 100644 index 000000000..e1c2edac1 --- /dev/null +++ b/iokit/Drivers/network/drvPPCUniN/UniNPowerSaver.cpp @@ -0,0 +1,378 @@ + +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#include "UniNEnetPrivate.h" + +#define super IOEthernetController + +// Set EXTRANEOUS_PM_DELAYS to 1 to enable absurdly long delays. +// +#define EXTRANEOUS_PM_DELAYS 0 + +// -------------------------------------------------------------------------- +// Method: registerWithPolicyMaker +// +// Purpose: +// initialize the driver for power managment and register ourselves with +// policy-maker +IOReturn +UniNEnet::registerWithPolicyMaker(IOService * policyMaker) +{ + +/****** +From iokit/IOKit/pwr_mgt/IOPMpowerState.h +struct IOPMPowerState +{ +unsigned long version; // version number of this struct +IOPMPowerFlags capabilityFlags; // bits that describe the capability +IOPMPowerFlags outputPowerCharacter; // description (to power domain children) +IOPMPowerFlags inputPowerRequirement; // description (to power domain parent) +unsigned long staticPower; // average consumption in milliwatts +unsigned long unbudgetedPower; // additional consumption from separate power supply (mw) +unsigned long powerToAttain; // additional power to attain this state from next lower state (in mw) +unsigned long timeToAttain; // (in microseconds) +unsigned long settleUpTime; // (microseconds) +unsigned long timeToLower; // (in microseconds) +unsigned long settleDownTime; // (microseconds) +unsigned long powerDomainBudget; // power in mw a domain in this state can deliver to its children +}; + +*******/ + +#define num_of_power_states 2 + +static IOPMPowerState ourPowerStates[num_of_power_states] = { + {1, 0,0,0,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable | IOPMMaxPerformance, IOPMPowerOn, IOPMPowerOn, 50,0,0, + kUniNsettle_time, kUniNsettle_time, kUniNsettle_time, kUniNsettle_time,0} + // 50 milliwatts above is just a guess right now, since the ethernet is part of Uni-N +}; + + currentPowerState = kMaxUniNEnetPowerState; + return policyMaker->registerPowerDriver(this, ourPowerStates, num_of_power_states); +} + +// Method: maxCapabilityForDomainState +// +// Purpose: +// returns the maximun state of card power, which would be +// power on without any attempt to power manager. +unsigned long +UniNEnet::maxCapabilityForDomainState(IOPMPowerFlags domainState) +{ + if( domainState & IOPMPowerOn ) + return kMaxUniNEnetPowerState; //In reality, it's just array element 1 for Uni-N + else + return 0; +} + +// Method: initialPowerStateForDomainState +// +// Purpose: +// The power domain may be changing state. If power is on in the new +// state, that will not affect our state at all. If domain power is off, +// we can attain only our lowest state, which is off. + +unsigned long +UniNEnet::initialPowerStateForDomainState( IOPMPowerFlags domainState ) +{ + if( domainState & IOPMPowerOn ) + return currentPowerState; + else + return 0; +} + + +// Method: powerStateForDomainState +// +// Purpose: +// The power domain may be changing state. If power is on in the new +// state, that will not affect our state at all. If domain power is off, +// we can attain only our lowest state, which is off. +unsigned long +UniNEnet::powerStateForDomainState(IOPMPowerFlags domainState ) +{ + if( domainState & IOPMPowerOn ) + return currentPowerState; + else + return 0; +} + +// Method: setPowerState +// +IOReturn UniNEnet::setPowerState(unsigned long powerStateOrdinal, + IOService * whatDevice) +{ + volatile UInt32 clockReg; + + // Do not do anything if the state is invalid. + if (powerStateOrdinal >= num_of_power_states) + return IOPMNoSuchState; + + if (powerStateOrdinal == currentPowerState) + return IOPMAckImplied; //no change required + + // otherwise remember the new state: + currentPowerState = powerStateOrdinal; + + IOLog("UniNEthernet::setPowerState(%d, 0x%08lx)\n", + (int) powerStateOrdinal, + (UInt32) whatDevice); + + switch ( currentPowerState ) + { + case 0: // Ethernet is off + + // Shutdown the hardware unconditionally. + // doDisable(this); + + // Turn off PHY before turning off MAC + // MII_CONTROL_POWERDOWN ? no, it is read-only for Broadcom 5201 + // PHY, but 5400 is R/W + stopPHYChip(false); //In this file + + // Now turn off ethernet clock in Uni-N + callPlatformFunction("EnableUniNEthernetClock", true, + (void *)false, 0, 0, 0); + break; + + case kMaxUniNEnetPowerState: // 1 = max power state, Ethernet is on + + // Now turn on ethernet clock in Uni-N + callPlatformFunction("EnableUniNEthernetClock", true, + (void *)true, 0, 0, 0); + +#if EXTRANEOUS_PM_DELAYS + IODelay(MII_DEFAULT_DELAY * 1000); // 20 milliseconds +#endif + + // Bring up PHY then MAC. + startPHYChip(); + // doEnable(this); + + break; + + default: + // This is illegal, only 0 and 1 are allowed for + // UniN ethernet for now + break; + } + + return IOPMAckImplied; +} + +// This method sets up the PHY registers for low power. +// Copied from stopEthernetController() in OS9. +// The setupWOL value is not really implemented systemwide yet +void +UniNEnet::stopPHYChip(bool setupWOL) +{ + UInt32 val32; + UInt16 i, val16; + + if (phyBCMType == 0) return; + + //IOLog("UniN on stop phy = %d\n", phyBCMType); + + if (setupWOL == false) + { + //disabling MIF interrupts on the 5201 is explicit + if (phyBCMType == 5201) + { + miiWriteWord(0x0000, MII_BCM5201_INTERRUPT, kPHYAddr0); + // 0 or 0x1f or phyId? miiFindPHY returns any integer + } + } + + //Drive the MDIO line high to prevent immediate wakeup + val32 = READ_REGISTER( MIFConfiguration ); + WRITE_REGISTER( MIFConfiguration, val32 & kMIFConfiguration_Poll_Enable ); + + // 5th ADDR in Broadcom PHY docs + miiReadWord( &val16, MII_LINKPARTNER, kPHYAddr0 ); + + // don't know why OS9 writes it back unchanged + miiWriteWord( val16, MII_LINKPARTNER, kPHYAddr0 ); + + /* Put the MDIO pins into a benign state. Note that the management regs + in the PHY will be inaccessible. This is to guarantee max power savings + on Powerbooks and to eliminate damage to Broadcom PHYs. + */ + //bit bang mode + WRITE_REGISTER( MIFConfiguration, kMIFConfiguration_BB_Mode ); + + WRITE_REGISTER( MIFBitBangClock, 0x0000 ); + WRITE_REGISTER( MIFBitBangData, 0x0000 ); + WRITE_REGISTER( MIFBitBangOutputEnable, 0x0000 ); + WRITE_REGISTER( XIFConfiguration, kXIFConfiguration_GMIIMODE + | kXIFConfiguration_MII_Int_Loopback ); + + if (setupWOL) + { + //For multicast filtering these bits must be enabled + WRITE_REGISTER( RxMACConfiguration, + kRxMACConfiguration_Hash_Filter_Enable + | kRxMACConfiguration_Rx_Mac_Enable ); + // set kpfRxMACEnabled in OS9, but I don't see matching OS X flag + } + else + { + WRITE_REGISTER( RxMACConfiguration, 0 ); + // un-set kpfRxMACEnabled in OS9, but I don't see matching OS X flag + } + + WRITE_REGISTER( TxMACConfiguration, 0 ); + WRITE_REGISTER( XIFConfiguration, 0 ); + +#if 0 + // Disable interrupt source on the controller. + // Already disabled from earlier resetAndEnable(false) call. + WRITE_REGISTER( InterruptMask, kInterruptMask_None ); // all FF +#endif + + WRITE_REGISTER( TxConfiguration, 0 ); + WRITE_REGISTER( RxConfiguration, 0 ); + + if (!setupWOL) + { + // this doesn't power down stuff, but if we don't hit it then we can't + // superisolate the transceiver + WRITE_REGISTER( SoftwareReset, kSoftwareReset_TX | kSoftwareReset_RX ); + + // kSoftwareReset_RSTOUT too??? + i = 0; + do { +// IODelay(MII_RESET_DELAY * 1000); // 10 milliseconds + IODelay(10); + if (i++ >= 100) + { + IOLog("UniNEnet timeout on SW reset\n"); + break; + } + val32 = READ_REGISTER( SoftwareReset ); + } while ( (val32 & (kSoftwareReset_TX | kSoftwareReset_RX)) != 0 ); + + WRITE_REGISTER( TxMACSoftwareResetCommand, kTxMACSoftwareResetCommand_Reset ); + WRITE_REGISTER( RxMACSoftwareResetCommand, kRxMACSoftwareResetCommand_Reset ); + + //This is what actually turns off the LINK LED + if (phyBCMType == 5400) + { +#if 0 + // The 5400 has read/write privilege on this bit, + // but 5201 is read-only. + miiWriteWord( MII_CONTROL_POWERDOWN, MII_CONTROL, kPHYAddr0); +#endif + } + else // Only other possibility is Broadcom 5201 (or 5202?) + { +#if 0 + miiReadWord( &val16, MII_BCM5201_AUXMODE2, kPHYAddr0 ); + miiWriteWord( val16 & ~MII_BCM5201_AUXMODE2_LOWPOWER, + MII_BCM5201_AUXMODE2, kPHYAddr0 ); +#endif + + miiWriteWord( MII_BCM5201_MULTIPHY_SUPERISOLATE, + MII_BCM5201_MULTIPHY, + kPHYAddr0 ); + } + } // end of none-WOL case +} + +//start the PHY +void +UniNEnet::startPHYChip() +{ + UInt32 val32; + UInt16 val16; + + // if (netifClient) //MacOS 9 uses numClients == 1? + { + //IOLog("UniN on restart phy = %d\n", phyBCMType); + + val32 = READ_REGISTER( TxConfiguration ); + WRITE_REGISTER( TxConfiguration, val32 | kTxConfiguration_Tx_DMA_Enable ); + + val32 = READ_REGISTER( RxConfiguration ); + WRITE_REGISTER( RxConfiguration, val32 | kRxConfiguration_Rx_DMA_Enable ); + + val32 = READ_REGISTER( TxMACConfiguration ); + WRITE_REGISTER( TxMACConfiguration, val32 | kTxMACConfiguration_TxMac_Enable ); + + val32 = READ_REGISTER( RxMACConfiguration ); + WRITE_REGISTER( RxMACConfiguration, + val32 | kRxMACConfiguration_Rx_Mac_Enable + | kRxMACConfiguration_Hash_Filter_Enable ); + + // Set flag to RxMACEnabled somewhere?? + + /* These registers are only for the Broadcom 5201. + We write the auto low power mode bit here because if we do it earlier + and there is no link then the xcvr registers become unclocked and + unable to be written + */ + if (phyBCMType == 5201) + { + // Ask Enrique why the following 2 lines are not necessary in OS 9. + // These 2 lines should take the PHY out of superisolate mode. All + // MII inputs are ignored until the PHY is out of isolate mode + miiReadWord( &val16, MII_BCM5201_MULTIPHY, kPHYAddr0 ); + miiWriteWord( val16 & ~MII_BCM5201_MULTIPHY_SUPERISOLATE, + MII_BCM5201_MULTIPHY, kPHYAddr0 ); + +#if 0 + // Automatically go into low power mode if no link + miiReadWord( &val16, MII_BCM5201_AUXMODE2, kPHYAddr0 ); + miiWriteWord( val16 | MII_BCM5201_AUXMODE2_LOWPOWER, + MII_BCM5201_AUXMODE2, kPHYAddr0 ); +#endif + +#if EXTRANEOUS_PM_DELAYS + IODelay(MII_DEFAULT_DELAY * 1000); // 20 milliseconds +#endif + } + + // WARNING... this code is untested on gigabit ethernet (5400), there + // should be a case to handle it for MII_CONTROL_POWERDOWN bit here, + // unless it is unnecessary after a hardware reset + + WRITE_REGISTER( RxKick, RX_RING_LENGTH - 4 ); + } +} + +/*------------------------------------------------------------------------- + * Assert the reset pin on the PHY momentarily to initialize it, and also + * to bring the PHY out of low-power mode. + * + *-------------------------------------------------------------------------*/ + +bool UniNEnet::resetPHYChip() +{ + IOReturn result; + + result = keyLargo->callPlatformFunction(keyLargo_resetUniNEthernetPhy, false, 0, 0, 0, 0); + if (result != kIOReturnSuccess) return false; + + return true; +} diff --git a/iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.cpp b/iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.cpp new file mode 100644 index 000000000..5572c05ec --- /dev/null +++ b/iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.cpp @@ -0,0 +1,356 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created from objc version. + */ + +#include + +#include +#include +#include +#include +#include + +#include "AppleGracklePCI.h" + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOPCIBridge + +OSDefineMetaClassAndStructors(AppleGracklePCI, IOPCIBridge) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleGracklePCI::start( IOService * provider ) +{ + IOPCIPhysicalAddress ioAddrCell; + IOPhysicalAddress ioPhys; + IOPhysicalAddress ioPhysLen; + OSArray * array; + IODeviceMemory::InitElement rangeList[ 3 ]; + IORegistryEntry * bridge; + OSData * busProp; + IOPCIAddressSpace grackleSpace; + UInt32 picr1; + + if( 0 == (lock = IOSimpleLockAlloc())) + return( false ); + + ioAddrCell.physHi.bits = 0; + ioAddrCell.physHi.s.space = kIOPCIIOSpace; + ioAddrCell.physMid = 0; + ioAddrCell.physLo = 0; + ioAddrCell.lengthHi = 0; + ioAddrCell.lengthLo = 0x10000; + + bridge = provider; + + if( ! IODTResolveAddressCell( bridge, (UInt32 *) &ioAddrCell, + &ioPhys, &ioPhysLen) ) { + + IOLog("%s: couldn't find my base\n", getName()); + return( false); + } + + /* define more explicit ranges */ + + rangeList[0].start = ioPhys; + rangeList[0].length = ioPhysLen; + rangeList[1].start = ioPhys + 0x00c00000; + rangeList[1].length = 4; + rangeList[2].start = ioPhys + 0x00e00000; + rangeList[2].length = 4; + + array = IODeviceMemory::arrayFromList( rangeList, 3 ); + if( !array) + return( false); + + provider->setDeviceMemory( array ); + array->release(); + ioMemory = (IODeviceMemory *) array->getObject( 0 ); + + if( (configAddrMap = provider->mapDeviceMemoryWithIndex( 1 ))) + configAddr = (volatile UInt32 *) configAddrMap->getVirtualAddress(); + if( (configDataMap = provider->mapDeviceMemoryWithIndex( 2 ))) + configData = (volatile UInt8 *) configDataMap->getVirtualAddress(); + + if( !configAddr || !configData) + return( false); + + busProp = (OSData *) bridge->getProperty("bus-range"); + if( busProp) + primaryBus = *((UInt32 *) busProp->getBytesNoCopy()); + + // Check to see if there is a set loop snoop property. + if( provider->getProperty("set-loop-snoop")) { + // Turn on the Loop Snoop bit in PICR1. + // See: MPC106 User's Manual p. 3-55. + grackleSpace.bits = 0x80000000; + picr1 = configRead32(grackleSpace, 0xA8); + picr1 |= (1 << 4); + configWrite32(grackleSpace, 0xA8, picr1); + } + + // register iteself so we can find it: + registerService(); + + // Publish the AccessMPC106PerformanceRegister platform function. + publishResource("AccessMPC106PerformanceRegister", this); + + return( super::start( provider)); +} + +bool AppleGracklePCI::configure( IOService * provider ) +{ + bool ok; + + ok = addBridgeMemoryRange( 0x80000000, 0x7f000000, true ); + ok = addBridgeIORange( 0, 0x10000 ); + + return( super::configure( provider )); +} + +void AppleGracklePCI::free() +{ + if( configAddrMap) + configAddrMap->release(); + if( configDataMap) + configDataMap->release(); + if( lock) + IOSimpleLockFree( lock); + + super::free(); +} + +IODeviceMemory * AppleGracklePCI::ioDeviceMemory( void ) +{ + return( ioMemory); +} + +UInt8 AppleGracklePCI::firstBusNum( void ) +{ + return( primaryBus ); +} + +UInt8 AppleGracklePCI::lastBusNum( void ) +{ + return( firstBusNum() ); +} + +IOPCIAddressSpace AppleGracklePCI::getBridgeSpace( void ) +{ + IOPCIAddressSpace space; + + space.bits = 0; + space.s.deviceNum = kBridgeSelfDevice; + + return( space ); +} + +inline void AppleGracklePCI::setConfigSpace( IOPCIAddressSpace space, + UInt8 offset ) +{ + IOPCIAddressSpace addrCycle; + + addrCycle = space; + addrCycle.s.reloc = 1; + addrCycle.s.registerNum = offset & 0xfc; + + OSWriteSwapInt32( configAddr, 0, addrCycle.bits); + eieio(); + OSReadSwapInt32( configAddr, 0 ); + eieio(); +} + + +UInt32 AppleGracklePCI::configRead32( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt32 data; + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + setConfigSpace( space, offset ); + + data = OSReadSwapInt32( configData, 0 ); + eieio(); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + return( data ); +} + +void AppleGracklePCI::configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + setConfigSpace( space, offset ); + + OSWriteSwapInt32( configData, 0, data ); + eieio(); + /* read to sync */ + (void) OSReadSwapInt32( configData, 0 ); + eieio(); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +UInt16 AppleGracklePCI::configRead16( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt16 data; + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + setConfigSpace( space, offset ); + + offset = (offset & 2); + + data = OSReadSwapInt16( configData, offset ); + eieio(); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + return( data ); +} + +void AppleGracklePCI::configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + setConfigSpace( space, offset ); + + offset = (offset & 2); + + OSWriteSwapInt16( configData, offset, data ); + eieio(); + /* read to sync */ + (void) OSReadSwapInt16( configData, offset ); + eieio(); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +UInt8 AppleGracklePCI::configRead8( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt8 data; + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + setConfigSpace( space, offset ); + + offset = (offset & 3); + + data = configData[ offset ]; + eieio(); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + return( data ); +} + +void AppleGracklePCI::configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + setConfigSpace( space, offset ); + + offset = (offset & 3); + + configData[ offset ] = data; + eieio(); + /* read to sync */ + data = configData[ offset ]; + eieio(); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +IOReturn AppleGracklePCI::callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4) +{ + if (functionName->isEqualTo("AccessMPC106PerformanceRegister")) { + return accessMPC106PerformanceRegister((bool)param1, (long)param2, + (unsigned long *)param3); + } + + return super::callPlatformFunction(functionName, waitForFunction, + param1, param2, param3, param4); +} + + +enum { + kMCMonitorModeControl = 0, + kMCCommand, + kMCPerformanceMonitor0, + kMCPerformanceMonitor1, + kMCPerformanceMonitor2, + kMCPerformanceMonitor3 +}; + +IOReturn AppleGracklePCI::accessMPC106PerformanceRegister(bool write, + long regNumber, + unsigned long *data) +{ + IOPCIAddressSpace grackleSpace; + unsigned long offset; + + switch (regNumber) { + case kMCMonitorModeControl : offset = kMPC106MMCR0; break; + case kMCCommand : offset = kMPC106CMDR0; break; + case kMCPerformanceMonitor0 : offset = kMPC106PMC0; break; + case kMCPerformanceMonitor1 : offset = kMPC106PMC1; break; + case kMCPerformanceMonitor2 : offset = kMPC106PMC2; break; + case kMCPerformanceMonitor3 : offset = kMPC106PMC3; break; + default : return kIOReturnBadArgument; + } + + if (data == 0) return kIOReturnBadArgument; + + grackleSpace.bits = 0x80000000; + + if (write) { + configWrite32(grackleSpace, offset, *data); + } else { + *data = configRead32(grackleSpace, offset); + } + + return kIOReturnSuccess; +} diff --git a/iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.h b/iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.h new file mode 100644 index 000000000..766189c00 --- /dev/null +++ b/iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_APPLEGRACKLEPCI_H +#define _IOKIT_APPLEGRACKLEPCI_H + +#include + +enum { + kBridgeSelfDevice = 0 +}; + +class AppleGracklePCI : public IOPCIBridge +{ + OSDeclareDefaultStructors(AppleGracklePCI) + +protected: + IOSimpleLock * lock; + IODeviceMemory * ioMemory; + IOMemoryMap * configAddrMap; + IOMemoryMap * configDataMap; + + volatile UInt32 * configAddr; + volatile UInt8 * configData; + + UInt8 primaryBus; + + inline void setConfigSpace( IOPCIAddressSpace space, UInt8 offset ); + virtual UInt8 firstBusNum( void ); + virtual UInt8 lastBusNum( void ); + + IOReturn accessMPC106PerformanceRegister(bool write, long regNumber, + unsigned long *data); + +public: + virtual bool start( IOService * provider ); + virtual bool configure( IOService * provider ); + + virtual void free(); + virtual IODeviceMemory * ioDeviceMemory( void ); + + virtual UInt32 configRead32( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ); + virtual UInt16 configRead16( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ); + virtual UInt8 configRead8( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ); + + virtual IOPCIAddressSpace getBridgeSpace( void ); + + virtual IOReturn callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4); +}; + +// MPC106 Performance Monitoring Registers +#define kMPC106CMDR0 (0x048) +#define kMPC106MMCR0 (0x04C) +#define kMPC106PMC0 (0x050) +#define kMPC106PMC1 (0x054) +#define kMPC106PMC2 (0x058) +#define kMPC106PMC3 (0x05C) + +#endif /* ! _IOKIT_APPLEGRACKLEPCI_H */ diff --git a/iokit/Drivers/pci/drvApplePCI/AppleI386PCI.cpp b/iokit/Drivers/pci/drvApplePCI/AppleI386PCI.cpp new file mode 100644 index 000000000..40423ac11 --- /dev/null +++ b/iokit/Drivers/pci/drvApplePCI/AppleI386PCI.cpp @@ -0,0 +1,563 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created from objc version. + */ + +#include + +#include +#include +#include +#include + +#include +#include + +#include "AppleI386PCI.h" + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOPCIBridge + +OSDefineMetaClassAndStructors(AppleI386PCI, IOPCIBridge) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleI386PCI::start( IOService * provider ) +{ + OSData * prop; + PCI_bus_info_t * info; + + if( 0 == (lock = IOSimpleLockAlloc())) + return( false ); + + prop = (OSData *) provider->getProperty("pci-bus-info"); + if( 0 == prop) + return( false); + + info = (PCI_bus_info_t *) prop->getBytesNoCopy(); + + maxBusNum = info->maxBusNum; + maxDevNum = 0; + majorVersion = info->majorVersion; + minorVersion = info->minorVersion; + BIOS16Present = info->BIOSPresent; + BIOS32Present = false; + BIOS32Entry = 0x00000000; + configMethod1 = info->u_bus.s.configMethod1; + configMethod2 = info->u_bus.s.configMethod2; + specialCycle1 = info->u_bus.s.specialCycle1; + specialCycle2 = info->u_bus.s.specialCycle2; + + /* + if ((BIOS16Present) & !(configMethod1 | configMethod2)) { + // This is a PCI system, but neither method is supported + // Lets try them both just in case (ala NEC ExpressII P60) + if (!(configMethod1 = [self test_M1])) + configMethod2 = [self test_M2]; + } + */ + +#define IFYES(b, s) ((b) ? s : "") + IOLog("PCI Ver=%x.%02x BusCount=%d Features=[ %s%s%s%s%s%s]\n", + majorVersion, minorVersion, maxBusNum+1, + IFYES(BIOS16Present, "BIOS16 "), IFYES(BIOS32Present, "BIOS32 "), + IFYES(configMethod1, "CM1 "), IFYES(configMethod2, "CM2 "), + IFYES(specialCycle1, "SC1 "), IFYES(specialCycle2, "SC2 ") ); + + if (configMethod1) + maxDevNum = 31; + else if (configMethod2) + maxDevNum = 15; + else + return( false ); + + ioMemory = IODeviceMemory::withRange( 0, 65536 ); + if( !ioMemory) + return( false); + ioMemory->setMapping( kernel_task, 0 ); /* mapped to zero in IO space */ + + return( super::start( provider)); +} + +bool AppleI386PCI::configure( IOService * provider ) +{ + bool ok; + + ok = addBridgeMemoryRange( 0x80000000, 0x7f000000, true ); + ok = addBridgeIORange( 0, 0x10000 ); + + return( super::configure( provider )); +} + +void AppleI386PCI::free() +{ + if( ioMemory) + ioMemory->release(); + if( lock) + IOSimpleLockFree( lock); + + super::free(); +} + +IODeviceMemory * AppleI386PCI::ioDeviceMemory( void ) +{ + return( ioMemory); +} + + +UInt8 AppleI386PCI::firstBusNum( void ) +{ + return( 0 ); +} + +UInt8 AppleI386PCI::lastBusNum( void ) +{ + return( firstBusNum() ); +} + +IOPCIAddressSpace AppleI386PCI::getBridgeSpace( void ) +{ + IOPCIAddressSpace space; + + space.bits = 0; + + return( space ); +} + +/* defines for Configuration Method #1 (PCI 2.0 Spec, sec 3.6.4.1.1) */ +#define PCI_CONFIG_ADDRESS 0x0cf8 +#define PCI_CONFIG_DATA 0x0cfc + +/* defines for Configuration Method #2 (PCI 2.0 Spec, sec 3.6.4.1.3) */ +#define PCI_CSE_REGISTER 0x0cf8 +#define PCI_BUS_FORWARD 0x0cfa + +#define PCI_DEFAULT_DATA 0xffffffff + +#if 0 + +- (BOOL) test_M1 +{ + unsigned long address, data; + + for (address = 0x80000000; address < 0x80010000; address += 0x800) { + outl (PCI_CONFIG_ADDRESS, address); + if (inl (PCI_CONFIG_ADDRESS) != address) { + return NO; + } + data = inl(PCI_CONFIG_DATA); + if ((data != PCI_DEFAULT_DATA) && (data != 0x00)) { + outl (PCI_CONFIG_ADDRESS, 0); + return YES; + } + } + + outl (PCI_CONFIG_ADDRESS, 0); + return NO; +} + +- (BOOL) test_M2 +{ + unsigned long address, data; + + /* Enable configuration space at I/O ports Cxxx. */ + + outb (PCI_CSE_REGISTER, 0xF0); + if (inb (PCI_CSE_REGISTER) != 0xF0) { + return NO; + } + + outb (PCI_BUS_FORWARD, 0x00); + if (inb (PCI_BUS_FORWARD) != 0x00) { + return NO; + } + /* Search all devices on the bus. */ + for (address = 0xc000; address <= 0xcfff; address += 0x100) { + data = inl(address); + if ((data != PCI_DEFAULT_DATA) && (data != 0x00)) { + outb (PCI_CSE_REGISTER, 0); + return YES; + } + } + + outb (PCI_CSE_REGISTER, 0); + return NO; +} +#endif + +UInt32 AppleI386PCI::configRead32Method1( IOPCIAddressSpace space, + UInt8 offset ) +{ + IOPCIAddressSpace addrCycle; + UInt32 data = PCI_DEFAULT_DATA; + + addrCycle = space; + addrCycle.s.reloc = 1; + addrCycle.s.registerNum = offset; + + outl( PCI_CONFIG_ADDRESS, addrCycle.bits); + if (inl( PCI_CONFIG_ADDRESS) == addrCycle.bits) + data = inl( PCI_CONFIG_DATA); + + outl( PCI_CONFIG_ADDRESS, 0); + + return( data ); +} + + +void AppleI386PCI::configWrite32Method1( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ) +{ + IOPCIAddressSpace addrCycle; + + addrCycle = space; + addrCycle.s.reloc = 1; + addrCycle.s.registerNum = offset; + + outl( PCI_CONFIG_ADDRESS, addrCycle.bits); + if (inl( PCI_CONFIG_ADDRESS) == addrCycle.bits) + outl(PCI_CONFIG_DATA, data); + + outl( PCI_CONFIG_ADDRESS, 0); +} + +UInt16 AppleI386PCI::configRead16Method1( IOPCIAddressSpace space, + UInt8 offset ) +{ + IOPCIAddressSpace addrCycle; + UInt16 data = 0xffff; + + addrCycle = space; + addrCycle.s.reloc = 1; + addrCycle.s.registerNum = offset; + + outl( PCI_CONFIG_ADDRESS, addrCycle.bits); + if (inl( PCI_CONFIG_ADDRESS) == addrCycle.bits) + data = inw( PCI_CONFIG_DATA); + + outl( PCI_CONFIG_ADDRESS, 0); + + return( data ); +} + + +void AppleI386PCI::configWrite16Method1( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ) +{ + IOPCIAddressSpace addrCycle; + + addrCycle = space; + addrCycle.s.reloc = 1; + addrCycle.s.registerNum = offset; + + outl( PCI_CONFIG_ADDRESS, addrCycle.bits); + if (inl( PCI_CONFIG_ADDRESS) == addrCycle.bits) + outw(PCI_CONFIG_DATA, data); + + outl( PCI_CONFIG_ADDRESS, 0); +} + +UInt8 AppleI386PCI::configRead8Method1( IOPCIAddressSpace space, + UInt8 offset ) +{ + IOPCIAddressSpace addrCycle; + UInt8 data = 0xff; + + addrCycle = space; + addrCycle.s.reloc = 1; + addrCycle.s.registerNum = offset; + + outl( PCI_CONFIG_ADDRESS, addrCycle.bits); + if (inl( PCI_CONFIG_ADDRESS) == addrCycle.bits) + data = inb( PCI_CONFIG_DATA); + + outl( PCI_CONFIG_ADDRESS, 0); + + return( data ); +} + + +void AppleI386PCI::configWrite8Method1( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ) +{ + IOPCIAddressSpace addrCycle; + + addrCycle = space; + addrCycle.s.reloc = 1; + addrCycle.s.registerNum = offset; + + outl( PCI_CONFIG_ADDRESS, addrCycle.bits); + if (inl( PCI_CONFIG_ADDRESS) == addrCycle.bits) + outb(PCI_CONFIG_DATA, data); + + outl( PCI_CONFIG_ADDRESS, 0); +} + +UInt32 AppleI386PCI::configRead32Method2( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt32 data = PCI_DEFAULT_DATA; + UInt8 cse; + + if( space.s.deviceNum > 15) + return( data); + + cse = 0xf0 | (space.s.functionNum << 1); + outb( PCI_CSE_REGISTER, cse); + if (inb( PCI_CSE_REGISTER) == cse) { + outb( PCI_BUS_FORWARD, space.s.busNum); + if (inb( PCI_BUS_FORWARD) == space.s.busNum) { + data = inl( 0xc000 + | (offset & 0xfc) + | (space.s.deviceNum << 8)); + } + outb( PCI_BUS_FORWARD, 0x00); + } + outb( PCI_CSE_REGISTER, 0x00); + + return( data ); +} + + +void AppleI386PCI::configWrite32Method2( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ) +{ + UInt8 cse; + + if( space.s.deviceNum > 15) + return; + + cse = 0xf0 | (space.s.functionNum << 1); + outb( PCI_CSE_REGISTER, cse); + if (inb( PCI_CSE_REGISTER) == cse) { + outb( PCI_BUS_FORWARD, space.s.busNum); + if (inb( PCI_BUS_FORWARD) == space.s.busNum) { + outl( 0xc000 + | (offset & 0xfc) + | (space.s.deviceNum << 8), data); + } + outb( PCI_BUS_FORWARD, 0x00); + } + outb( PCI_CSE_REGISTER, 0x00); +} + +UInt16 AppleI386PCI::configRead16Method2( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt16 data = 0xffff; + UInt8 cse; + + if( space.s.deviceNum > 15) + return( data); + + cse = 0xf0 | (space.s.functionNum << 1); + outb( PCI_CSE_REGISTER, cse); + if (inb( PCI_CSE_REGISTER) == cse) { + outb( PCI_BUS_FORWARD, space.s.busNum); + if (inb( PCI_BUS_FORWARD) == space.s.busNum) { + data = inw( 0xc000 + | (offset & 0xfe) + | (space.s.deviceNum << 8)); + } + outb( PCI_BUS_FORWARD, 0x00); + } + outb( PCI_CSE_REGISTER, 0x00); + + return( data ); +} + + +void AppleI386PCI::configWrite16Method2( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ) +{ + UInt8 cse; + + if( space.s.deviceNum > 15) + return; + + cse = 0xf0 | (space.s.functionNum << 1); + outb( PCI_CSE_REGISTER, cse); + if (inb( PCI_CSE_REGISTER) == cse) { + outb( PCI_BUS_FORWARD, space.s.busNum); + if (inb( PCI_BUS_FORWARD) == space.s.busNum) { + outw( 0xc000 + | (offset & 0xfe) + | (space.s.deviceNum << 8), data); + } + outb( PCI_BUS_FORWARD, 0x00); + } + outb( PCI_CSE_REGISTER, 0x00); +} + + +UInt8 AppleI386PCI::configRead8Method2( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt16 data = 0xffff; + UInt8 cse; + + if( space.s.deviceNum > 15) + return( data); + + cse = 0xf0 | (space.s.functionNum << 1); + outb( PCI_CSE_REGISTER, cse); + if (inb( PCI_CSE_REGISTER) == cse) { + outb( PCI_BUS_FORWARD, space.s.busNum); + if (inb( PCI_BUS_FORWARD) == space.s.busNum) { + data = inb( 0xc000 + | (offset) + | (space.s.deviceNum << 8)); + } + outb( PCI_BUS_FORWARD, 0x00); + } + outb( PCI_CSE_REGISTER, 0x00); + + return( data ); +} + + +void AppleI386PCI::configWrite8Method2( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ) +{ + UInt8 cse; + + if( space.s.deviceNum > 15) + return; + + cse = 0xf0 | (space.s.functionNum << 1); + outb( PCI_CSE_REGISTER, cse); + if (inb( PCI_CSE_REGISTER) == cse) { + outb( PCI_BUS_FORWARD, space.s.busNum); + if (inb( PCI_BUS_FORWARD) == space.s.busNum) { + outb( 0xc000 + | (offset) + | (space.s.deviceNum << 8), data); + } + outb( PCI_BUS_FORWARD, 0x00); + } + outb( PCI_CSE_REGISTER, 0x00); +} + + +UInt32 AppleI386PCI::configRead32( IOPCIAddressSpace space, + UInt8 offset ) +{ + IOInterruptState ints; + UInt32 retval; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( configMethod1) + retval = configRead32Method1( space, offset ); + else + retval = configRead32Method2( space, offset ); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + return(retval); +} + +void AppleI386PCI::configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( configMethod1) + configWrite32Method1( space, offset, data ); + else + configWrite32Method2( space, offset, data ); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +UInt16 AppleI386PCI::configRead16( IOPCIAddressSpace space, + UInt8 offset ) +{ + IOInterruptState ints; + UInt16 retval; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( configMethod1) + retval = configRead16Method1( space, offset ); + else + retval = configRead16Method2( space, offset ); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + return(retval); +} + +void AppleI386PCI::configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( configMethod1) + configWrite16Method1( space, offset, data ); + else + configWrite16Method2( space, offset, data ); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +UInt8 AppleI386PCI::configRead8( IOPCIAddressSpace space, + UInt8 offset ) +{ + IOInterruptState ints; + UInt8 retval; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( configMethod1) + retval = configRead8Method1( space, offset ); + else + retval = configRead8Method2( space, offset ); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + return(retval); +} + +void AppleI386PCI::configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( configMethod1) + configWrite8Method1( space, offset, data ); + else + configWrite8Method2( space, offset, data ); + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + diff --git a/iokit/Drivers/pci/drvApplePCI/AppleI386PCI.h b/iokit/Drivers/pci/drvApplePCI/AppleI386PCI.h new file mode 100644 index 000000000..a22320e9b --- /dev/null +++ b/iokit/Drivers/pci/drvApplePCI/AppleI386PCI.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_APPLEI386PCI_H +#define _IOKIT_APPLEI386PCI_H + +#include + +class AppleI386PCI : public IOPCIBridge +{ + OSDeclareDefaultStructors(AppleI386PCI) + + IOSimpleLock * lock; + IODeviceMemory * ioMemory; + + UInt8 maxBusNum; /* Highest valid Bus Number */ + UInt8 maxDevNum; /* Highest valid Device Number */ + bool BIOS16Present; /* booter found PCI BIOS 16 */ + bool configMethod1; /* host bridge supports CM1 */ + bool configMethod2; /* host bridge supports CM2 */ + bool specialCycle1; /* host bridge supports SC1 */ + bool specialCycle2; /* host bridge supports SC2 */ + bool BIOS32Present; /* init found PCI BIOS 32 */ + void *BIOS32Entry; /* Points to 32 bit PCI entry pt */ + int majorVersion; /* Packed BCD Major Rev#: 0x02 */ + int minorVersion; /* Packed BCD Minor Rev#: 0x00 */ + +private: + virtual UInt32 configRead32Method1( IOPCIAddressSpace space, + UInt8 offset ); + virtual void configWrite32Method1( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ); + virtual UInt16 configRead16Method1( IOPCIAddressSpace space, + UInt8 offset ); + virtual void configWrite16Method1( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ); + virtual UInt8 configRead8Method1( IOPCIAddressSpace space, + UInt8 offset ); + virtual void configWrite8Method1( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ); + + virtual UInt32 configRead32Method2( IOPCIAddressSpace space, + UInt8 offset ); + virtual void configWrite32Method2( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ); + virtual UInt16 configRead16Method2( IOPCIAddressSpace space, + UInt8 offset ); + virtual void configWrite16Method2( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ); + virtual UInt8 configRead8Method2( IOPCIAddressSpace space, + UInt8 offset ); + virtual void configWrite8Method2( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ); + + virtual IOPCIAddressSpace getBridgeSpace( void ); + +protected: + virtual UInt8 firstBusNum( void ); + virtual UInt8 lastBusNum( void ); + +public: + virtual bool start( IOService * provider ); + virtual bool configure( IOService * provider ); + + virtual void free(); + virtual IODeviceMemory * ioDeviceMemory( void ); + + virtual UInt32 configRead32( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ); + virtual UInt16 configRead16( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ); + virtual UInt8 configRead8( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ); +}; + +#endif /* ! _IOKIT_APPLEI386PCI_H */ + diff --git a/iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.cpp b/iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.cpp new file mode 100644 index 000000000..01a1a65ba --- /dev/null +++ b/iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.cpp @@ -0,0 +1,800 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created from objc version. + * 05 Nov 99 sdouglas added UniNorth AGP based on UniNorthAGPDriver.c + * by Fernando Urbina, Kent Miller. + * + */ + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "AppleMacRiscPCI.h" + +#define ALLOC_AGP_RANGE 0 + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOPCIBridge + +OSDefineMetaClassAndStructors(AppleMacRiscPCI, IOPCIBridge) + +OSDefineMetaClassAndStructors(AppleMacRiscVCI, AppleMacRiscPCI) + +OSDefineMetaClassAndStructors(AppleMacRiscAGP, AppleMacRiscPCI) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleMacRiscPCI::start( IOService * provider ) +{ + IOPCIPhysicalAddress ioAddrCell; + IOPhysicalAddress ioPhys; + IOPhysicalAddress ioPhysLen; + OSArray * array; + IODeviceMemory::InitElement rangeList[ 3 ]; + IORegistryEntry * bridge; + OSData * busProp; + + if( !IODTMatchNubWithKeys(provider, "('pci', 'vci')")) + return( false); + + if( IODTMatchNubWithKeys(provider, "'uni-north'")) + configDataOffsetMask = 0x7; + else + configDataOffsetMask = 0x3; + + if( 0 == (lock = IOSimpleLockAlloc())) + return( false ); + + ioAddrCell.physHi.bits = 0; + ioAddrCell.physHi.s.space = kIOPCIIOSpace; + ioAddrCell.physMid = 0; + ioAddrCell.physLo = 0; + ioAddrCell.lengthHi = 0; + ioAddrCell.lengthLo = 0x10000; + + bridge = provider; + + if( ! IODTResolveAddressCell( bridge, (UInt32 *) &ioAddrCell, + &ioPhys, &ioPhysLen) ) { + + IOLog("%s: couldn't find my base\n", getName()); + return( false); + } + + /* define more explicit ranges */ + + rangeList[0].start = ioPhys; + rangeList[0].length = ioPhysLen; + rangeList[1].start = ioPhys + 0x00800000; + rangeList[1].length = 4; + rangeList[2].start = ioPhys + 0x00c00000; + rangeList[2].length = 4; + + IORangeAllocator * platformRanges; + platformRanges = IOService::getPlatform()->getPhysicalRangeAllocator(); + assert( platformRanges ); + platformRanges->allocateRange( ioPhys, 0x01000000 ); + + array = IODeviceMemory::arrayFromList( rangeList, 3 ); + if( !array) + return( false); + + provider->setDeviceMemory( array ); + array->release(); + ioMemory = (IODeviceMemory *) array->getObject( 0 ); + + /* map registers */ + + if( (configAddrMap = provider->mapDeviceMemoryWithIndex( 1 ))) + configAddr = (volatile UInt32 *) configAddrMap->getVirtualAddress(); + if( (configDataMap = provider->mapDeviceMemoryWithIndex( 2 ))) + configData = (volatile UInt8 *) configDataMap->getVirtualAddress(); + + if( !configAddr || !configData) + return( false); + + busProp = (OSData *) bridge->getProperty("bus-range"); + if( busProp) + primaryBus = *((UInt32 *) busProp->getBytesNoCopy()); + + return( super::start( provider)); +} + +bool AppleMacRiscPCI::configure( IOService * provider ) +{ + UInt32 addressSelects; + UInt32 index; + bool ok; + + addressSelects = configRead32( getBridgeSpace(), kMacRISCAddressSelect ); + + coarseAddressMask = addressSelects >> 16; + fineAddressMask = addressSelects & 0xffff; + + for( index = 0; index < 15; index++ ) { + if( coarseAddressMask & (1 << index)) { + ok = addBridgeMemoryRange( index << 28, 0x10000000, true ); + } + } + +// if( coarseAddressMask & (1 << 15)) // F segment + for( index = 0; index < 15; index++ ) { + if( fineAddressMask & (1 << index)) { + ok = addBridgeMemoryRange( (0xf0 | index) << 24, + 0x01000000, true ); + } + } + + ok = addBridgeIORange( 0, 0x10000 ); + + return( super::configure( provider)); +} + +void AppleMacRiscPCI::free() +{ + if( configAddrMap) + configAddrMap->release(); + if( configDataMap) + configDataMap->release(); + if( lock) + IOSimpleLockFree( lock); + + super::free(); +} + +IODeviceMemory * AppleMacRiscPCI::ioDeviceMemory( void ) +{ + return( ioMemory); +} + +IODeviceMemory * AppleMacRiscVCI::ioDeviceMemory( void ) +{ + return( 0 ); +} + +bool AppleMacRiscVCI::configure( IOService * provider ) +{ + addBridgeMemoryRange( 0x90000000, 0x10000000, true ); + + return( AppleMacRiscPCI::configure( provider)); +} + +UInt8 AppleMacRiscPCI::firstBusNum( void ) +{ + return( primaryBus ); +} + +UInt8 AppleMacRiscPCI::lastBusNum( void ) +{ + return( firstBusNum() ); +} + +IOPCIAddressSpace AppleMacRiscPCI::getBridgeSpace( void ) +{ + IOPCIAddressSpace space; + + space.bits = 0; + space.s.busNum = primaryBus; + space.s.deviceNum = kBridgeSelfDevice; + + return( space ); +} + +inline bool AppleMacRiscPCI::setConfigSpace( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt32 addrCycle; + + offset &= 0xfc; + if( space.s.busNum == primaryBus) { + + if( space.s.deviceNum < kBridgeSelfDevice) + return( false); + + // primary config cycle + addrCycle = ( (1 << space.s.deviceNum) + | (space.s.functionNum << 8) + | offset ); + + } else { + // pass thru config cycle + addrCycle = ( (space.bits) + | offset + | 1 ); + } + + do { + OSWriteSwapInt32( configAddr, 0, addrCycle); + eieio(); + } while( addrCycle != OSReadSwapInt32( configAddr, 0 )); + eieio(); + + return( true ); +} + + +UInt32 AppleMacRiscPCI::configRead32( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt32 data; + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( setConfigSpace( space, offset )) { + + offset = offset & configDataOffsetMask & 4; + + data = OSReadSwapInt32( configData, offset ); + eieio(); + + } else + data = 0xffffffff; + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + + return( data ); +} + +void AppleMacRiscPCI::configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( setConfigSpace( space, offset )) { + + offset = offset & configDataOffsetMask & 4; + + OSWriteSwapInt32( configData, offset, data ); + eieio(); + /* read to sync */ + (void) OSReadSwapInt32( configData, offset ); + eieio(); + sync(); + isync(); + } + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +UInt16 AppleMacRiscPCI::configRead16( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt16 data; + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( setConfigSpace( space, offset )) { + + offset = offset & configDataOffsetMask & 6; + + data = OSReadSwapInt16( configData, offset ); + eieio(); + + } else + data = 0xffff; + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + + return( data ); +} + +void AppleMacRiscPCI::configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( setConfigSpace( space, offset )) { + + offset = offset & configDataOffsetMask & 6; + + OSWriteSwapInt16( configData, offset, data ); + eieio(); + /* read to sync */ + (void) OSReadSwapInt16( configData, offset ); + eieio(); + sync(); + isync(); + } + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +UInt8 AppleMacRiscPCI::configRead8( IOPCIAddressSpace space, + UInt8 offset ) +{ + UInt16 data; + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( setConfigSpace( space, offset )) { + + offset = offset & configDataOffsetMask; + + data = configData[ offset ]; + eieio(); + + } else + data = 0xff; + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); + + return( data ); +} + +void AppleMacRiscPCI::configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ) +{ + IOInterruptState ints; + + ints = IOSimpleLockLockDisableInterrupt( lock ); + + if( setConfigSpace( space, offset )) { + + offset = offset & configDataOffsetMask; + + configData[ offset ] = data; + eieio(); + /* read to sync */ + data = configData[ offset ]; + eieio(); + sync(); + isync(); + } + + IOSimpleLockUnlockEnableInterrupt( lock, ints ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super AppleMacRiscPCI + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleMacRiscAGP::configure( IOService * provider ) +{ + if( !findPCICapability( getBridgeSpace(), kIOPCIAGPCapability, &targetAGPRegisters )) + return( false ); + + return( super::configure( provider)); +} + +IOPCIDevice * AppleMacRiscAGP::createNub( OSDictionary * from ) +{ + IOPCIDevice * nub; + IOPCIAddressSpace space; + bool isAGP; + UInt8 masterAGPRegisters; + + spaceFromProperties( from, &space); + + isAGP = ( (space.s.deviceNum != getBridgeSpace().s.deviceNum) + && findPCICapability( space, kIOPCIAGPCapability, &masterAGPRegisters )); + + if( isAGP) { + nub = new IOAGPDevice; + if( nub) + ((IOAGPDevice *)nub)->masterAGPRegisters = masterAGPRegisters; + from->setObject( kIOAGPBusFlagsKey, getProperty(kIOAGPBusFlagsKey)); + } else + nub = super::createNub( from ); + + return( nub ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn AppleMacRiscAGP::createAGPSpace( IOAGPDevice * master, + IOOptionBits options, + IOPhysicalAddress * address, + IOPhysicalLength * length ) +{ + IOReturn err; + IOPCIAddressSpace target = getBridgeSpace(); + IOPhysicalLength agpLength; + IOPhysicalAddress gartPhys; + + enum { agpSpacePerPage = 4 * 1024 * 1024 }; + enum { agpBytesPerGartByte = 1024 }; + enum { alignLen = 4 * 1024 * 1024 - 1 }; + + destroyAGPSpace( master ); + + agpLength = *length; + if( !agpLength) + agpLength = 32 * 1024 * 1024; + + agpLength = (agpLength + alignLen) & ~alignLen; + + err = kIOReturnVMError; + do { + + gartLength = agpLength / agpBytesPerGartByte; + gartArray = (volatile UInt32 *) IOMallocContiguous( + gartLength, 4096, &gartPhys ); + if( !gartArray) + continue; + // IOMapPages( kernel_map, gartArray, gartPhys, gartLength, kIOMapInhibitCache ); + bzero( (void *) gartArray, gartLength); + +#if ALLOC_AGP_RANGE + IORangeAllocator * platformRanges + = getPlatform()->getPhysicalRangeAllocator(); + for( agpBaseIndex = 0xf; agpBaseIndex > 0; agpBaseIndex--) { + systemBase = agpBaseIndex * 0x10000000; + if( platformRanges->allocateRange( systemBase, agpLength )) { + systemLength = agpLength; + break; + } + } +#else + agpBaseIndex = 0; + systemBase = 0; + systemLength = agpLength; +#endif + if( !systemLength) + continue; + + agpRange = IORangeAllocator::withRange( agpLength, 4096 ); + if( !agpRange) + continue; + + *address = systemBase; + *length = systemLength; +#if 0 + coarseAddressMask |= (1 << agpBaseIndex); + configWrite32( target, kMacRISCAddressSelect, + (coarseAddressMask << 16) | fineAddressMask ); +#endif + configWrite32( target, kUniNAGP_BASE, agpBaseIndex << 28 ); + + assert( 0 == (gartPhys & 0xfff)); + configWrite32( target, kUniNGART_BASE, + gartPhys | (agpLength / agpSpacePerPage)); + + err = kIOReturnSuccess; + + } while( false ); + + if( kIOReturnSuccess == err) + setAGPEnable( master, true, 0 ); + else + destroyAGPSpace( master ); + + return( err ); +} + +IOReturn AppleMacRiscAGP::getAGPSpace( IOAGPDevice * master, + IOPhysicalAddress * address, + IOPhysicalLength * length ) +{ + if( systemLength) { + + if( address) + *address = systemBase; + if( length) + *length = systemLength; + return( kIOReturnSuccess ); + + } else + return( kIOReturnNotReady ); +} + +IOReturn AppleMacRiscAGP::destroyAGPSpace( IOAGPDevice * master ) +{ + + setAGPEnable( master, false, 0 ); + + if( gartArray) { + IOFreeContiguous( (void *) gartArray, gartLength); + gartArray = 0; + } + if( agpRange) { + agpRange->release(); + agpRange = 0; + } + if( systemLength) { +#if ALLOC_AGP_RANGE + IORangeAllocator * platformRanges + = getPlatform()->getPhysicalRangeAllocator(); + platformRanges->deallocate( systemBase, systemLength); +#endif + systemLength = 0; + } + + return( kIOReturnSuccess ); +} + +IORangeAllocator * AppleMacRiscAGP::getAGPRangeAllocator( + IOAGPDevice * master ) +{ +// if( agpRange) agpRange->retain(); + return( agpRange ); +} + +IOOptionBits AppleMacRiscAGP::getAGPStatus( IOAGPDevice * master, + IOOptionBits options = 0 ) +{ + IOPCIAddressSpace target = getBridgeSpace(); + + return( configRead32( target, kUniNINTERNAL_STATUS ) ); +} + +IOReturn AppleMacRiscAGP::resetAGPDevice( IOAGPDevice * master, + IOOptionBits options = 0 ) +{ + IOReturn ret; + + if( master->masterState & kIOAGPStateEnablePending) { + ret = setAGPEnable( master, true, 0 ); + master->masterState &= ~kIOAGPStateEnablePending; + } else + ret = kIOReturnSuccess; + + return( ret ); +} + +IOReturn AppleMacRiscAGP::commitAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options = 0 ) +{ + IOPCIAddressSpace target = getBridgeSpace(); + IOReturn err = kIOReturnSuccess; + UInt32 offset = 0; + IOPhysicalAddress physAddr; + IOByteCount len; + +// ok = agpRange->allocate( memory->getLength(), &agpOffset ); + + assert( agpOffset < systemLength ); + agpOffset /= (page_size / 4); + while( (physAddr = memory->getPhysicalSegment( offset, &len ))) { + + offset += len; + len = (len + 0xfff) & ~0xfff; + while( len > 0) { + OSWriteLittleInt32( gartArray, agpOffset, + ((physAddr & ~0xfff) | 1)); + agpOffset += 4; + physAddr += page_size; + len -= page_size; + } + } +#if 1 + flush_dcache( (vm_offset_t) gartArray, gartLength, false); + len = OSReadLittleInt32( gartArray, agpOffset - 4 ); + sync(); + isync(); +#endif + + if( kIOAGPGartInvalidate & options) { + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_INV ); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_2xRESET); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); + } + + return( err ); +} + +IOReturn AppleMacRiscAGP::releaseAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options ) +{ + IOPCIAddressSpace target = getBridgeSpace(); + IOReturn err = kIOReturnSuccess; + IOByteCount length; + + if( !memory) + return( kIOReturnBadArgument ); + + length = memory->getLength(); + + if( (agpOffset + length) > systemLength) + return( kIOReturnBadArgument ); + +// agpRange->deallocate( agpOffset, length ); + + length = (length + 0xfff) & ~0xfff; + agpOffset /= page_size; + while( length > 0) { + gartArray[ agpOffset++ ] = 0; + length -= page_size; + } +#if 1 + flush_dcache( (vm_offset_t) gartArray, gartLength, false); + length = OSReadLittleInt32( gartArray, 4 * (agpOffset - 1) ); + sync(); + isync(); +#endif + + if( kIOAGPGartInvalidate & options) { + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_INV ); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_2xRESET); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); + } + + return( err ); +} + +IOReturn AppleMacRiscAGP::setAGPEnable( IOAGPDevice * _master, + bool enable, IOOptionBits options ) +{ + IOReturn err = kIOReturnSuccess; + IOPCIAddressSpace target = getBridgeSpace(); + IOPCIAddressSpace master = _master->space; + UInt32 command; + UInt32 targetStatus, masterStatus; + UInt8 masterAGPRegisters = _master->masterAGPRegisters; + + if( enable) { + + targetStatus = configRead32( target, + targetAGPRegisters + kIOPCIConfigAGPStatusOffset ); + masterStatus = configRead32( master, + masterAGPRegisters + kIOPCIConfigAGPStatusOffset ); + + command = kIOAGPSideBandAddresssing + | kIOAGP4xDataRate | kIOAGP2xDataRate | kIOAGP1xDataRate; + command &= targetStatus; + command &= masterStatus; + + if( command & kIOAGP4xDataRate) + command &= ~(kIOAGP2xDataRate | kIOAGP1xDataRate); + else if( command & kIOAGP2xDataRate) + command &= ~(kIOAGP1xDataRate); + else if( 0 == (command & kIOAGP1xDataRate)) + return( kIOReturnUnsupported ); + + command |= kIOAGPEnable; + + if( targetStatus > masterStatus) + targetStatus = masterStatus; + command |= (targetStatus & kIOAGPRequestQueueMask); + +#if 1 + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_INV ); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_2xRESET); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); +#endif + do { + configWrite32( target, targetAGPRegisters + kIOPCIConfigAGPCommandOffset, command ); + } while( (command & kIOAGPEnable) != + (kIOAGPEnable & configRead32( target, targetAGPRegisters + kIOPCIConfigAGPCommandOffset))); + + do { + configWrite32( master, + masterAGPRegisters + kIOPCIConfigAGPCommandOffset, command ); + } while( (command & kIOAGPEnable) != + (kIOAGPEnable & configRead32( master, + masterAGPRegisters + kIOPCIConfigAGPCommandOffset))); + + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_INV ); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_2xRESET); + configWrite32( target, kUniNGART_CTRL, kGART_EN ); + + _master->masterState |= kIOAGPStateEnabled; + + } else { + + while( 0 == (kIOAGPIdle & configRead32( getBridgeSpace(), + kUniNINTERNAL_STATUS ))) + {} + + configWrite32( master, masterAGPRegisters + kIOPCIConfigAGPCommandOffset, 0 ); + configWrite32( target, targetAGPRegisters + kIOPCIConfigAGPCommandOffset, 0 ); +#if 0 + configWrite32( target, kUniNGART_CTRL, kGART_EN | kGART_INV ); + configWrite32( target, kUniNGART_CTRL, 0 ); + configWrite32( target, kUniNGART_CTRL, kGART_2xRESET); + configWrite32( target, kUniNGART_CTRL, 0 ); +#endif + _master->masterState &= ~kIOAGPStateEnabled; + } + + return( err ); +} + +IOReturn AppleMacRiscAGP::saveDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ) +{ + IOReturn ret; + IOAGPDevice * agpDev; + UInt32 agpSave[3]; + IOPCIAddressSpace target = getBridgeSpace(); + + if( (agpDev = OSDynamicCast( IOAGPDevice, device))) { + agpSave[0] = configRead32( target, kUniNAGP_BASE ); + agpSave[1] = configRead32( target, kUniNGART_BASE ); + agpSave[2] = configRead32( target, targetAGPRegisters + kIOPCIConfigAGPCommandOffset ); + setAGPEnable( agpDev, false, 0 ); + } + + ret = super::saveDeviceState( device, options); + + if( agpDev && (ret == kIOReturnSuccess)) { + agpDev->savedConfig[ kUniNAGP_BASE / 4 ] = agpSave[0]; + agpDev->savedConfig[ kUniNGART_BASE / 4 ] = agpSave[1]; + agpDev->savedConfig[ (targetAGPRegisters + kIOPCIConfigAGPCommandOffset) / 4 ] = agpSave[2]; + } + + return( ret ); +} + +IOReturn AppleMacRiscAGP::restoreDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ) +{ + IOReturn ret; + IOAGPDevice * agpDev; + UInt32 agpSave[3]; + IOPCIAddressSpace target = getBridgeSpace(); + + agpDev = OSDynamicCast( IOAGPDevice, device); + if( agpDev && device->savedConfig) { + agpSave[0] = agpDev->savedConfig[ kUniNAGP_BASE / 4 ]; + agpSave[1] = agpDev->savedConfig[ kUniNGART_BASE / 4 ]; + agpSave[2] = agpDev->savedConfig[ (targetAGPRegisters + kIOPCIConfigAGPCommandOffset) / 4 ]; + } + + ret = super::restoreDeviceState( device, options); + + if( agpDev && (kIOReturnSuccess == ret)) { + configWrite32( target, kUniNAGP_BASE, agpSave[0] ); + configWrite32( target, kUniNGART_BASE, agpSave[1] ); + // soon, grasshopper + if( kIOAGPEnable & agpSave[2]) + agpDev->masterState |= kIOAGPStateEnablePending; + else + agpDev->masterState &= ~kIOAGPStateEnablePending; + } + + return( ret ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.h b/iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.h new file mode 100644 index 000000000..8e044d9a3 --- /dev/null +++ b/iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_APPLEMACRISCPCI_H +#define _IOKIT_APPLEMACRISCPCI_H + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +enum { + kBridgeSelfDevice = 11 +}; + +enum { + kMacRISCAddressSelect = 0x48 +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class AppleMacRiscPCI : public IOPCIBridge +{ + OSDeclareDefaultStructors(AppleMacRiscPCI) + +protected: + IOSimpleLock * lock; + IODeviceMemory * ioMemory; + IOMemoryMap * configAddrMap; + IOMemoryMap * configDataMap; + + volatile UInt32 * configAddr; + volatile UInt8 * configData; + + UInt16 coarseAddressMask; + UInt16 fineAddressMask; + UInt8 primaryBus; + UInt8 configDataOffsetMask; + + inline bool setConfigSpace( IOPCIAddressSpace space, UInt8 offset ); + virtual UInt8 firstBusNum( void ); + virtual UInt8 lastBusNum( void ); + +public: + virtual bool start( IOService * provider ); + virtual bool configure( IOService * provider ); + + virtual void free(); + + virtual IODeviceMemory * ioDeviceMemory( void ); + + virtual UInt32 configRead32( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ); + virtual UInt16 configRead16( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ); + virtual UInt8 configRead8( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ); + + virtual IOPCIAddressSpace getBridgeSpace( void ); +}; + +class AppleMacRiscVCI : public AppleMacRiscPCI +{ + OSDeclareDefaultStructors(AppleMacRiscVCI) + +public: + virtual bool configure( IOService * provider ); + + virtual IODeviceMemory * ioDeviceMemory( void ); + +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Definitions of UniNorth Target config registers */ +enum { + kUniNGART_BASE = 0x8c, + kUniNAGP_BASE = 0x90, + kUniNGART_CTRL = 0x94, + kUniNINTERNAL_STATUS = 0x98 +}; +enum { + kGART_INV = 0x00000001, + kGART_EN = 0x00000100, + kGART_2xRESET = 0x00010000 +}; + +class IORangeAllocator; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class AppleMacRiscAGP : public AppleMacRiscPCI +{ + OSDeclareDefaultStructors(AppleMacRiscAGP) + +protected: + IORangeAllocator * agpRange; + UInt32 agpBaseIndex; + IOPhysicalAddress systemBase; + IOPhysicalLength systemLength; + volatile UInt32 * gartArray; + IOByteCount gartLength; + UInt8 targetAGPRegisters; + +private: + virtual IOReturn setAGPEnable( IOAGPDevice * master, bool enable, + IOOptionBits options = 0 ); + +public: + + virtual bool configure( IOService * provider ); + + virtual IOPCIDevice * createNub( OSDictionary * from ); + + virtual IOReturn saveDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ); + virtual IOReturn restoreDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ); + + virtual IOReturn createAGPSpace( IOAGPDevice * master, + IOOptionBits options, + IOPhysicalAddress * address, + IOPhysicalLength * length ); + + virtual IOReturn destroyAGPSpace( IOAGPDevice * master ); + + virtual IORangeAllocator * getAGPRangeAllocator( IOAGPDevice * master ); + + virtual IOOptionBits getAGPStatus( IOAGPDevice * master, + IOOptionBits options = 0 ); + virtual IOReturn resetAGPDevice( IOAGPDevice * master, + IOOptionBits options = 0 ); + + virtual IOReturn getAGPSpace( IOAGPDevice * master, + IOPhysicalAddress * address, + IOPhysicalLength * length ); + + virtual IOReturn commitAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options = 0 ); + + virtual IOReturn releaseAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options = 0 ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#endif /* ! _IOKIT_APPLEMACRISCPCI_H */ + diff --git a/iokit/Drivers/platform/drvAppleCuda/AppleCuda.cpp b/iokit/Drivers/platform/drvAppleCuda/AppleCuda.cpp new file mode 100644 index 000000000..f88334968 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleCuda/AppleCuda.cpp @@ -0,0 +1,1381 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ + +/* 1 April 1997 Simon Douglas: + * Stolen wholesale from MkLinux. + * Added nonblocking adb poll from interrupt level for the debugger. + * Acknowledge before response so polled mode can work from inside the adb handler. + * + * 18 June 1998 sdouglas + * Start IOKit version. Fix errors from kCudaSRQAssertMask. Use ool cmd & reply buffers, + * not fixed len in packet. Does queueing here. + * + * 20 Nov 1998 suurballe + * Port to C++ + */ + + +#include "AppleCuda.h" +#include "IOCudaADBController.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#define super IOService +OSDefineMetaClassAndStructors(AppleCuda,IOService) + +static void cuda_interrupt ( AppleCuda * self ); + +static void cuda_process_response(AppleCuda * self); +static void cuda_transmit_data(AppleCuda * self); +static void cuda_expected_attention(AppleCuda * self); +static void cuda_unexpected_attention(AppleCuda * self); +static void cuda_receive_data(AppleCuda * self); +static void cuda_receive_last_byte(AppleCuda * self); +static void cuda_collision(AppleCuda * self); +static void cuda_idle(AppleCuda * self); + +static void cuda_poll(AppleCuda * self); +static void cuda_error(AppleCuda * self); +static void cuda_send_request(AppleCuda * self); +static IOReturn cuda_do_sync_request( AppleCuda * self, + cuda_request_t * request, bool polled); +static void cuda_do_state_transition_delay(AppleCuda * self); + +static int Cuda_PE_poll_input(unsigned int options, char * c); +static int Cuda_PE_read_write_time_of_day(unsigned int options, long * secs); +static int Cuda_PE_halt_restart(unsigned int type); +static int Cuda_PE_write_IIC(unsigned char addr, unsigned char reg, + unsigned char data); + +static void +autopollArrived ( OSObject *inCuda, IOInterruptEventSource *, int ); + +static int set_cuda_power_message ( int command ); +static int set_cuda_file_server_mode ( int command ); +static void cuda_async_set_power_message_enable( thread_call_param_t param, thread_call_param_t ); +static void cuda_async_set_file_server_mode( thread_call_param_t param, thread_call_param_t ) ; + +bool CudahasRoot( OSObject * us, void *, IOService * yourDevice ); + + +// +// inline functions +// + +static __inline__ unsigned char cuda_read_data(AppleCuda * self) +{ + volatile unsigned char val; + + val = *self->cuda_via_regs.shift; eieio(); + return val; +} + +static __inline__ int cuda_get_result(cuda_request_t *request) +{ + int status = ADB_RET_OK; + int theStatus = request->a_reply.a_header[1]; + + if ( theStatus & kCudaTimeOutMask ) { + status = ADB_RET_TIMEOUT; +#if 0 + // these are expected before autopoll mask is set + } else if ( theStatus & kCudaSRQAssertMask ) { + status = ADB_RET_UNEXPECTED_RESULT; +#endif + } else if ( theStatus & kCudaSRQErrorMask ) { + status = ADB_RET_REQUEST_ERROR; + } else if ( theStatus & kCudaBusErrorMask ) { + status = ADB_RET_BUS_ERROR; + } + + return status; +} + +static __inline__ void cuda_lock(AppleCuda * self) +{ + if( !self->cuda_polled_mode) + IOSimpleLockLock(self->cuda_request_lock); +} + +static __inline__ void cuda_unlock(AppleCuda * self) +{ + if( !self->cuda_polled_mode) + IOSimpleLockUnlock(self->cuda_request_lock); +} + +// +// +// + + +static AppleCuda * gCuda; +// ********************************************************************************** +// init +// +// ********************************************************************************** +bool AppleCuda::init ( OSDictionary * properties = 0 ) +{ +return super::init(properties); +} + + +// ********************************************************************************** +// start +// +// ********************************************************************************** +bool AppleCuda::start ( IOService * nub ) +{ +int i; +IOMemoryMap * viaMap; +unsigned char * cuda_base; + +if( !super::start(nub)) + return false; + +gCuda = this; + // callPlatformFunction symbols + cuda_check_any_interrupt = OSSymbol::withCString("cuda_check_any_interrupt"); + +workLoop = NULL; +eventSrc = NULL; +ourADBinterface = NULL; +_rootDomain = 0; + +workLoop = IOWorkLoop::workLoop(); +if ( !workLoop ) { + kprintf("Start is bailing\n"); + return false; +} + +eventSrc = IOInterruptEventSource::interruptEventSource(this, autopollArrived); +if (!eventSrc || + kIOReturnSuccess != workLoop->addEventSource(eventSrc) ) { + kprintf("Start is bailing\n"); + return false; +} + +if( 0 == (viaMap = nub->mapDeviceMemoryWithIndex( 0 )) ) { + IOLog("%s: no via memory\n", getName()); + kprintf("Start is bailing\n"); + return false; +} +cuda_base = (unsigned char *)viaMap->getVirtualAddress(); + +kprintf("VIA base = %08x\n", (UInt32)cuda_base); +ourADBinterface = new IOCudaADBController; +if ( !ourADBinterface ) { + kprintf("Start is bailing\n"); + return false; +} +if ( !ourADBinterface->init(0,this) ) { + kprintf("Start is bailing\n"); + return false; +} + +if ( !ourADBinterface->attach( this) ) { + kprintf("Start is bailing\n"); + return false; +} + +cuda_request_lock = IOSimpleLockAlloc(); +IOSimpleLockInit(cuda_request_lock); + +cuda_via_regs.dataB = cuda_base; +cuda_via_regs.handshakeDataA = cuda_base+0x0200; +cuda_via_regs.dataDirectionB = cuda_base+0x0400; +cuda_via_regs.dataDirectionA = cuda_base+0x0600; +cuda_via_regs.timer1CounterLow = cuda_base+0x0800; +cuda_via_regs.timer1CounterHigh = cuda_base+0x0A00; +cuda_via_regs.timer1LatchLow = cuda_base+0x0C00; +cuda_via_regs.timer1LatchHigh = cuda_base+0x0E00; +cuda_via_regs.timer2CounterLow = cuda_base+0x1000; +cuda_via_regs.timer2CounterHigh = cuda_base+0x1200; +cuda_via_regs.shift = cuda_base+0x1400; +cuda_via_regs.auxillaryControl = cuda_base+0x1600; +cuda_via_regs.peripheralControl = cuda_base+0x1800; +cuda_via_regs.interruptFlag = cuda_base+0x1A00; +cuda_via_regs.interruptEnable = cuda_base+0x1C00; +cuda_via_regs.dataA = cuda_base+0x1E00; + +// we require delays of this duration between certain state transitions +clock_interval_to_absolutetime_interval(200, 1, &cuda_state_transition_delay); + +// Set the direction of the cuda signals. ByteACk and TIP are output and +// TREQ is an input + +*cuda_via_regs.dataDirectionB |= (kCudaByteAcknowledgeMask | kCudaTransferInProgressMask); +*cuda_via_regs.dataDirectionB &= ~kCudaTransferRequestMask; + +// Set the clock control. Set to shift data in by external clock CB1. + +*cuda_via_regs.auxillaryControl = (*cuda_via_regs.auxillaryControl | kCudaTransferMode) & + kCudaSystemRecieve; + +// Clear any posible cuda interupt. + +if ( *cuda_via_regs.shift ); + +// Initialize the internal data. + +cuda_interrupt_state = CUDA_STATE_IDLE; +cuda_transaction_state = CUDA_TS_NO_REQUEST; +cuda_is_header_transfer = false; +cuda_is_packet_type = false; +cuda_transfer_count = 0; +cuda_current_response = NULL; +for( i = 0; i < NUM_AP_BUFFERS; i++ ) { + cuda_unsolicited[ i ].a_buffer = cuda_autopoll_buffers[ i ]; +} + +// Terminate transaction and set idle state + +cuda_neg_tip_and_byteack(this); + +// we want to delay 4 mS for ADB reset to complete + +IOSleep( 4 ); + +// Clear pending interrupt if any... + +(void)cuda_read_data(this); + +// Issue a Sync Transaction, ByteAck asserted while TIP is negated. + +cuda_assert_byte_ack(this); + +// Wait for the Sync acknowledgement, cuda to assert TREQ + +cuda_wait_for_transfer_request_assert(this); + +// Wait for the Sync acknowledgement interrupt. + +cuda_wait_for_interrupt(this); + +// Clear pending interrupt + +(void)cuda_read_data(this); + +// Terminate the sync cycle by Negating ByteAck + +cuda_neg_byte_ack(this); + +// Wait for the Sync termination acknowledgement, cuda negates TREQ. + +cuda_wait_for_transfer_request_neg(this); + +// Wait for the Sync termination acknowledgement interrupt. + +cuda_wait_for_interrupt(this); + +// Terminate transaction and set idle state, TIP negate and ByteAck negate. +cuda_neg_transfer_in_progress(this); + +// Clear pending interrupt, if there is one... +(void)cuda_read_data(this); + +#if 0 + cuda_polled_mode = true; +#else +#define VIA_DEV_CUDA 2 +nub->registerInterrupt(VIA_DEV_CUDA, + this, (IOInterruptAction) cuda_interrupt); +nub->enableInterrupt(VIA_DEV_CUDA); +#endif + +PE_poll_input = Cuda_PE_poll_input; +PE_read_write_time_of_day = Cuda_PE_read_write_time_of_day; +PE_halt_restart = Cuda_PE_halt_restart; +PE_write_IIC = Cuda_PE_write_IIC; +publishResource( "IOiic0", this ); +publishResource( "IORTC", this ); + + +//set_cuda_power_message(kADB_powermsg_enable); //won't work on beige G3 +thread_call_func(cuda_async_set_power_message_enable, (thread_call_param_t)this, true); +thread_call_func(cuda_async_set_file_server_mode, (thread_call_param_t)this, true); + + registerService(); //Gossamer needs to find this driver for waking up G3 + + _cuda_power_state = 1; //default is wake state + //We want to know when sleep is about to occur + addNotification( gIOPublishNotification,serviceMatching("IOPMrootDomain"), + (IOServiceNotificationHandler)CudahasRoot, this, 0 ); + +ourADBinterface->start( this ); + +return true; +} + +/* Here are some power management functions so we can tell when system is + going to sleep. */ +bool CudahasRoot( OSObject * us, void *, IOService * yourDevice ) +{ + if (( yourDevice != NULL ) && ((AppleCuda *)us)->_rootDomain == 0) + { + ((AppleCuda *)us)->_rootDomain = (IOPMrootDomain *) yourDevice; + ((IOPMrootDomain *)yourDevice)->registerInterestedDriver((IOService *) us); + } + return true; +} + +IOReturn AppleCuda::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned long unused1, + IOService* unused2) +{ +//kprintf("will change to %x", theFlags); + if ( ! (theFlags & IOPMPowerOn) ) + { + _cuda_power_state = 0; //0 means sleeping + } + return IOPMAckImplied; +} + +IOReturn AppleCuda::powerStateDidChangeTo ( IOPMPowerFlags theFlags, unsigned long unused1, + IOService* unused2) +{ +//kprintf("did change to %x", theFlags); + if (theFlags & IOPMPowerOn) + { + _cuda_power_state = 1; //1 means awake + } + return IOPMAckImplied; +} + + + +// ***************************************************************************** +// getWorkLoop +// +// Return the cuda's workloop. +// +// ***************************************************************************** +IOWorkLoop *AppleCuda::getWorkLoop() const +{ + return workLoop; +} + +// ***************************************************************************** +// free +// +// Release everything we may have allocated. +// +// ***************************************************************************** +void AppleCuda::free ( void ) +{ +if ( workLoop ) { + workLoop->release(); +} +if ( eventSrc ) { + eventSrc->release(); +} +if ( ourADBinterface ) { + ourADBinterface->release(); +} + if (_rootDomain) + { + _rootDomain->deRegisterInterestedDriver((IOService *) this); + _rootDomain = 0; + } +super::free(); +} + + +// ********************************************************************************** +// registerForADBInterrupts +// +// Some driver is calling to say it is prepared to receive "unsolicited" adb +// interrupts (e.g. autopoll keyboard and trackpad data). The parameters identify +// who to call when we get one. +// ********************************************************************************** +void AppleCuda::registerForADBInterrupts ( ADB_callback_func handler, IOService * caller ) +{ +autopoll_handler = handler; +ADBid = caller; +} + + +// ********************************************************************************** +// autopollArrived +// +// ********************************************************************************** +static void autopollArrived ( OSObject * CudaDriver, IOInterruptEventSource *, int ) +{ +((AppleCuda *)CudaDriver)->serviceAutopolls(); +} + +#define RB_BOOT 1 /* Causes reboot, not halt. Is in xnu/bsd/sys/reboot.h */ +extern "C" { + void boot(int paniced, int howto, char * command); +} + + +static void cuda_async_set_power_message_enable( thread_call_param_t param, thread_call_param_t ) +{ + //AppleCuda * me = (AppleCuda *) param; + + set_cuda_power_message(kADB_powermsg_enable); +} + +static void cuda_async_set_file_server_mode( thread_call_param_t param, thread_call_param_t ) +{ + set_cuda_file_server_mode(1); +} + +// ********************************************************************************** +// serviceAutopolls +// We get here just before calling autopollHandler() in IOADBController.cpp +// ********************************************************************************** +void AppleCuda::serviceAutopolls ( void ) +{ +cuda_packet_t * response; + + while( inIndex != outIndex ) { + + response = &cuda_unsolicited[ outIndex ]; + + //Check for power messages, which are handled differently from regular + // autopoll data coming from mouse or keyboard. + if (response->a_header[0] == ADB_PACKET_POWER) + { + unsigned char flag, cmd; + + flag = response->a_header[1]; + cmd = response->a_header[2]; + + if ((flag == kADB_powermsg_flag_chassis) + && (cmd == kADB_powermsg_cmd_chassis_off)) + { + thread_call_func(cuda_async_set_power_message_enable, + (thread_call_param_t)this, true); + + if (_rootDomain) + { + if (_cuda_power_state) + { + //Put system to sleep now + _rootDomain->receivePowerNotification (kIOPMSleepNow); + } + else //If asleep, wake up the system + { + //Tickle activity timer in root domain. This will not + // wake up machine that is in demand-sleep, but it will + // wake up an inactive system that dozed + _rootDomain->activityTickle(0,0); + } + } + } + else if ((flag == kADB_powermsg_flag_keyboardpwr) + && (cmd == kADB_powermsg_cmd_keyboardoff)) + { + //set_cuda_power_message(kADB_powermsg_continue); + //This needs to by async so Beige G3 ADB won't lock up + thread_call_func(cuda_async_set_power_message_enable, + (thread_call_param_t)this, true); + } + + } + + if ( ADBid != NULL ) { + (*autopoll_handler)(ADBid,response->a_header[2],response->a_bcount,response->a_buffer); + } + + outIndex = (outIndex + 1) & (NUM_AP_BUFFERS - 1); + + } //end of while loop + +} + + +// ********************************************************************************** +// doSyncRequest +// +// ********************************************************************************** +IOReturn AppleCuda::doSyncRequest ( cuda_request_t * request ) +{ +return(cuda_do_sync_request(this, request, false)); +} + + +IOReturn AppleCuda::callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4) +{ + if (functionName == cuda_check_any_interrupt) + { + bool *hasint; + + hasint = (bool *)param1; + *hasint = false; + + if (inIndex != outIndex) + { + *hasint = true; + } + return kIOReturnSuccess; + } + + return kIOReturnBadArgument; +} + + +// ********************************************************************************** +// cuda_do_sync_request +// +// ********************************************************************************** +IOReturn cuda_do_sync_request ( AppleCuda * self, cuda_request_t * request, bool polled ) +{ + bool wasPolled = false; + IOInterruptState ints; + + if( !polled ) { + request->sync = IOSyncer::create(); + request->needWake = true; + } + + ints = IOSimpleLockLockDisableInterrupt(self->cuda_request_lock); + + if( polled ) { + wasPolled = self->cuda_polled_mode; + self->cuda_polled_mode = polled; + } + + if( self->cuda_last_request ) + self->cuda_last_request->a_next = request; + else + self->cuda_request = request; + + self->cuda_last_request = request; + + if( self->cuda_interrupt_state == CUDA_STATE_IDLE ) + cuda_send_request(self); + + if( polled ) { + cuda_poll(self); + self->cuda_polled_mode = wasPolled; + assert( 0 == self->cuda_request ); + assert( 0 == self->cuda_last_request ); + } + + IOSimpleLockUnlockEnableInterrupt(self->cuda_request_lock, ints); + + if( !polled) + request->sync->wait(); + + return cuda_get_result(request); +} + + +// ********************************************************************************** +// Cuda_PE_read_write_time_of_day +// +// ********************************************************************************** +static int Cuda_PE_read_write_time_of_day ( unsigned int options, long * secs ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); + +cmd.a_cmd.a_hcount = 2; +cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; + +switch( options ) { + + case kPEReadTOD: + cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_REAL_TIME; + cmd.a_reply.a_buffer = (UInt8 *)secs; + cmd.a_reply.a_bcount = sizeof(*secs); + break; + + case kPEWriteTOD: + cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_SET_REAL_TIME; + cmd.a_cmd.a_buffer = (UInt8 *)secs; + cmd.a_cmd.a_bcount = sizeof(*secs); + break; + + default: + return 1; +} + +return cuda_do_sync_request(gCuda, &cmd, true); +} + + +// ********************************************************************************** +// Cuda_PE_halt_restart +// +// ********************************************************************************** +static int Cuda_PE_halt_restart ( unsigned int type ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); + +cmd.a_cmd.a_hcount = 2; +cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; + +switch( type ) { + + case kPERestartCPU: + cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_RESTART_SYSTEM; + break; + + case kPEHaltCPU: + cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_POWER_DOWN; + break; + + default: + return 1; + } + +return cuda_do_sync_request(gCuda, &cmd, true); +} + + +// ********************************************************************************** +// In case this machine loses power, it will automatically reboot when power is +// restored. Only desktop machines have Cuda, so this feature will not affect +// PowerBooks. +// ********************************************************************************** +static int set_cuda_file_server_mode ( int command ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); + +cmd.a_cmd.a_hcount = 3; +cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; +cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_FILE_SERVER_FLAG; +cmd.a_cmd.a_header[2] = command; + +return cuda_do_sync_request(gCuda, &cmd, true); +} + +// ********************************************************************************** +// Fix front panel power key (mostly on Yosemites) so that one press won't power +// down the entire machine +// +// ********************************************************************************** +static int set_cuda_power_message ( int command ) +{ +cuda_request_t cmd; + +if (command >= kADB_powermsg_invalid) + return 0; //invalid Cuda power request + +adb_init_request(&cmd); + +cmd.a_cmd.a_hcount = 3; +cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; +cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_SET_POWER_MESSAGES; +cmd.a_cmd.a_header[2] = command; + +return cuda_do_sync_request(gCuda, &cmd, true); +} + + +// ********************************************************************************** +// Cuda_PE_write_IIC +// +// ********************************************************************************** +static int Cuda_PE_write_IIC ( unsigned char addr, unsigned char reg, unsigned char data ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); + +cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; +cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_SET_IIC; +cmd.a_cmd.a_header[2] = addr; +cmd.a_cmd.a_header[3] = reg; +cmd.a_cmd.a_header[4] = data; +cmd.a_cmd.a_hcount = 5; + +return cuda_do_sync_request(gCuda, &cmd, true); +} + +IOReturn +AppleCudaWriteIIC( UInt8 address, const UInt8 * buffer, IOByteCount * count ) +{ + IOReturn ret; + cuda_request_t cmd; + + if( !gCuda) + return( kIOReturnUnsupported ); + + adb_init_request(&cmd); + + cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; + cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_SET_IIC; + cmd.a_cmd.a_header[2] = address; + cmd.a_cmd.a_hcount = 3; + cmd.a_cmd.a_buffer = (UInt8 *) buffer; + cmd.a_cmd.a_bcount = *count; + + ret = cuda_do_sync_request(gCuda, &cmd, true); + + *count = cmd.a_cmd.a_bcount; + + return( ret ); +} + +IOReturn +AppleCudaReadIIC( UInt8 address, UInt8 * buffer, IOByteCount * count ) +{ + IOReturn ret; + cuda_request_t cmd; + + if( !gCuda) + return( kIOReturnUnsupported ); + + adb_init_request(&cmd); + + cmd.a_cmd.a_header[0] = ADB_PACKET_PSEUDO; + cmd.a_cmd.a_header[1] = ADB_PSEUDOCMD_GET_SET_IIC; + cmd.a_cmd.a_header[2] = address; + cmd.a_cmd.a_hcount = 3; + cmd.a_reply.a_buffer = buffer; + cmd.a_reply.a_bcount = *count; + + ret = cuda_do_sync_request(gCuda, &cmd, true); + *count = cmd.a_reply.a_bcount; + + return( ret ); +} + + +// ********************************************************************************** +// Cuda_PE_poll_input +// +// ********************************************************************************** +static int Cuda_PE_poll_input ( unsigned int, char * c ) +{ +AppleCuda * self = gCuda; +int interruptflag; +UInt8 code; +cuda_packet_t * response; //0123456789abcdef +static char keycodes2ascii[] = "asdfhgzxcv_bqwer" //00 + "yt123465=97-80]o" //10 + "u[ip\nlj'k;_,/nm." //20 + "\t_"; //30 + +*c = 0xff; + +if( !self ) { + return 1; +} + +self->cuda_polled_mode = true; +interruptflag = *self->cuda_via_regs.interruptFlag & kCudaInterruptMask; +eieio(); +if( interruptflag ) { + cuda_interrupt(self); +} + +if( self->inIndex != self->outIndex ) { + response = &self->cuda_unsolicited[ self->outIndex ]; + if( ((response->a_header[2] >> 4) == 2) + && (response->a_bcount > 1) ) { + code = response->a_buffer[0]; + if( code < sizeof(keycodes2ascii) ) { + *c = keycodes2ascii[ code ]; + } + } + self->outIndex = self->inIndex; +} + +self->cuda_polled_mode = false; +return 0; +} + + +// +// internal +// + + +// ********************************************************************************** +// cuda_send_request +// +// ********************************************************************************** +static void cuda_send_request ( AppleCuda * self ) +{ + + // The data register must written with the data byte 25uS + // after examining TREQ or we run the risk of getting out of sync + // with Cuda. So call with disabled interrupts and spinlock held. + + // Check if we can commence with the packet transmission. First, check if + // Cuda can service our request now. Second, check if Cuda wants to send + // a response packet now. + +if( !cuda_is_transfer_in_progress(self) ) { + // Set the shift register direction to output to Cuda by setting + // the direction bit. + + cuda_set_data_direction_to_output(self); + + // Write the first byte to the shift register + cuda_write_data(self, self->cuda_request->a_cmd.a_header[0]); + + // Set up the transfer state info here. + + self->cuda_is_header_transfer = true; + self->cuda_transfer_count = 1; + + // Make sure we're in idle state before transaction, and then + // assert TIP to tell Cuda we're starting command + cuda_neg_byte_ack(self); + cuda_assert_transfer_in_progress(self); + + // The next state is going to be a transmit state, if there is + // no collision. This is a requested response but call it sync. + + self->cuda_interrupt_state = CUDA_STATE_TRANSMIT_EXPECTED; + self->cuda_transaction_state = CUDA_TS_SYNC_RESPONSE; +} + +#if 0 +else { + IOLog("Req = %x, state = %x, TIP = %x\n", self->cuda_request, + self->cuda_interrupt_state, cuda_is_transfer_in_progress(self)); +} +#endif +} + + +// ********************************************************************************** +// cuda_poll +// +// ********************************************************************************** +static void cuda_poll( AppleCuda * self ) +{ + do { + cuda_wait_for_interrupt(self); + cuda_interrupt(self); + } while( self->cuda_interrupt_state != CUDA_STATE_IDLE ); +} + +// +// cuda_process_response +// Execute at secondary interrupt. +// + + +// ********************************************************************************** +// cuda_process_response +// +// ********************************************************************************** +static void cuda_process_response ( AppleCuda * self ) +{ +volatile cuda_request_t * request; +unsigned int newIndex; + + // Almost ready for the next state, which should be a Idle state. + // Just need to notifiy the client. + +if ( self->cuda_transaction_state == CUDA_TS_SYNC_RESPONSE ) { + + // dequeue reqeuest + cuda_lock(self); + request = self->cuda_request; + if( NULL == (self->cuda_request = request->a_next) ) { + self->cuda_last_request = NULL; + } + cuda_unlock(self); + + // wake the sync request thread + if ( ((cuda_request_t *)request)->needWake ) { + ((cuda_request_t *)request)->sync->signal(); + } + +} +else { + if ( self->cuda_transaction_state == CUDA_TS_ASYNC_RESPONSE ) { + newIndex = (self->inIndex + 1) & (NUM_AP_BUFFERS - 1); + if( newIndex != self->outIndex ) { + self->inIndex = newIndex; + } + else { + // drop this packet, and reuse the buffer + } + if ( !self->cuda_polled_mode ) { + // wake thread to service autopolls + self->eventSrc->interruptOccurred(0, 0, 0); + } + } +} +return; +} + + +// ********************************************************************************** +// cuda_interrupt +// +// ********************************************************************************** +static void cuda_interrupt ( AppleCuda * self ) +{ +unsigned char interruptState; + + // Get the relevant signal in determining the cause of the interrupt: + // the shift direction, the transfer request line and the transfer + // request line. + +interruptState = cuda_get_interrupt_state(self); + +//kprintf("%02x",interruptState); + +switch ( interruptState ) { + case kCudaReceiveByte: + cuda_receive_data(self); + break; + + case kCudaReceiveLastByte: + cuda_receive_last_byte(self); + break; + + case kCudaTransmitByte: + cuda_transmit_data(self); + break; + + case kCudaUnexpectedAttention: + cuda_unexpected_attention(self); + break; + + case kCudaExpectedAttention: + cuda_expected_attention(self); + break; + + case kCudaIdleState: + cuda_idle(self); + break; + + case kCudaCollision: + cuda_collision(self); + break; + + // Unknown interrupt, clear it and leave. + default: + cuda_error(self); + break; +} +} + +// +// TransmitCudaData +// Executes at hardware interrupt level. +// + +// ********************************************************************************** +// cuda_transmit_data +// +// ********************************************************************************** +static void cuda_transmit_data ( AppleCuda * self ) +{ + // Clear the pending interrupt by reading the shift register. + +if ( self->cuda_is_header_transfer ) { + // There are more header bytes, write one out. + cuda_write_data(self, self->cuda_request->a_cmd.a_header[self->cuda_transfer_count++]); + + // Toggle the handshake line. + if ( self->cuda_transfer_count >= self->cuda_request->a_cmd.a_hcount ) { + self->cuda_is_header_transfer = FALSE; + self->cuda_transfer_count = 0; + } + + cuda_toggle_byte_ack( self); +} +else { + if ( self->cuda_transfer_count < self->cuda_request->a_cmd.a_bcount ) { + // There are more command bytes, write one out and update the pointer + cuda_write_data( self, + *(self->cuda_request->a_cmd.a_buffer + self->cuda_transfer_count++)); + // Toggle the handshake line. + cuda_toggle_byte_ack(self); + } + else { + (void)cuda_read_data(self); + // There is no more command bytes, terminate the send transaction. + // Cuda should send a expected attention interrupt soon. + + cuda_neg_tip_and_byteack(self); + + // The next interrupt should be a expected attention interrupt. + + self->cuda_interrupt_state = CUDA_STATE_ATTN_EXPECTED; + } +} +} + +// +// cuda_expected_attention +// Executes at hardware interrupt level. +// + + +// ********************************************************************************** +// cuda_expected_attention +// +// ********************************************************************************** +static void cuda_expected_attention ( AppleCuda * self ) +{ + // Clear the pending interrupt by reading the shift register. + +(void)cuda_read_data(self); + + // Allow the VIA to settle directions.. else the possibility of + // data corruption. +cuda_do_state_transition_delay(self); + +if ( self->cuda_transaction_state == CUDA_TS_SYNC_RESPONSE ) { + self->cuda_current_response = (cuda_packet_t*)&self->cuda_request->a_reply; +} +else { + self->cuda_current_response = &self->cuda_unsolicited[ self->inIndex ]; + self->cuda_current_response->a_hcount = 0; + self->cuda_current_response->a_bcount = MAX_AP_RESPONSE; +} + +self->cuda_is_header_transfer = true; +self->cuda_is_packet_type = true; +self->cuda_transfer_count = 0; + + // Set the shift register direction to input. +cuda_set_data_direction_to_input(self); + + // Start the response packet transaction. +cuda_assert_transfer_in_progress(self); + + // The next interrupt should be a receive data interrupt. +self->cuda_interrupt_state = CUDA_STATE_RECEIVE_EXPECTED; +} + +// +// cuda_unexpected_attention +// Executes at hardware interrupt level. +// + + +// ********************************************************************************** +// cuda_expected_attention +// +// ********************************************************************************** +static void cuda_unexpected_attention ( AppleCuda * self ) +{ + // Clear the pending interrupt by reading the shift register. +(void)cuda_read_data(self); + + // Get ready for a unsolicited response. +self->cuda_current_response = &self->cuda_unsolicited[ self->inIndex ]; +self->cuda_current_response->a_hcount = 0; +self->cuda_current_response->a_bcount = MAX_AP_RESPONSE; + +self->cuda_is_header_transfer = TRUE; +self->cuda_is_packet_type = TRUE; +self->cuda_transfer_count = 0; + + // Start the response packet transaction, Transaction In Progress +cuda_assert_transfer_in_progress(self); + + // The next interrupt should be a receive data interrupt and the next + // response should be an async response. + +self->cuda_interrupt_state = CUDA_STATE_RECEIVE_EXPECTED; + +self->cuda_transaction_state = CUDA_TS_ASYNC_RESPONSE; +} + +// +// cuda_receive_data +// Executes at hardware interrupt level. +// + + +// ********************************************************************************** +// cuda_receive_data +// +// ********************************************************************************** +static void cuda_receive_data ( AppleCuda * self ) +{ +if ( self->cuda_is_packet_type ) { + unsigned char packetType; + + packetType = cuda_read_data( self); + self->cuda_current_response->a_header[self->cuda_transfer_count++] = packetType; + + if ( packetType == ADB_PACKET_ERROR) { + self->cuda_current_response->a_hcount = 4; + } + else { + self->cuda_current_response->a_hcount = 3; + } + + self->cuda_is_packet_type = false; + + cuda_toggle_byte_ack(self); + +} +else { + + + if ( self->cuda_is_header_transfer ) { + + self->cuda_current_response->a_header[self->cuda_transfer_count++] = + cuda_read_data(self); + + if (self->cuda_transfer_count >= self->cuda_current_response->a_hcount) { + self->cuda_is_header_transfer = FALSE; + self->cuda_transfer_count = 0; + } + + cuda_toggle_byte_ack(self); + } + else { + if ( self->cuda_transfer_count < self->cuda_current_response->a_bcount ) { + // Still room for more bytes. Get the byte and tell Cuda to continue. + // Toggle the handshake line, ByteAck, to acknowledge receive. + + *(self->cuda_current_response->a_buffer + self->cuda_transfer_count++) = + cuda_read_data(self); + cuda_toggle_byte_ack(self); + + } + else { + // Cuda is still sending data but the buffer is full. + // Normally should not get here. The only exceptions are open ended + // request such as PRAM read... In any event time to exit. + + self->cuda_current_response->a_bcount = self->cuda_transfer_count; + + cuda_read_data(self); + + cuda_process_response(self); + cuda_neg_tip_and_byteack(self); + } + } +} +} + + +// +// cuda_receive_last_byte +// Executes at hardware interrupt level. +// + + +// ********************************************************************************** +// cuda_receive_last_byte +// +// ********************************************************************************** +static void cuda_receive_last_byte ( AppleCuda * self ) +{ + +if ( self->cuda_is_header_transfer ) { + self->cuda_current_response->a_header[self->cuda_transfer_count++] = + cuda_read_data(self); + + self->cuda_transfer_count = 0; + } +else { + if ( self->cuda_transfer_count < self->cuda_current_response->a_bcount ) { + *(self->cuda_current_response->a_buffer + self->cuda_transfer_count++) = + cuda_read_data(self); + } + else { + /* Overrun -- ignore data */ + (void) cuda_read_data(self); + } +} +self->cuda_current_response->a_bcount = self->cuda_transfer_count; + // acknowledge before response so polled mode can work + // from inside the handler +cuda_neg_tip_and_byteack(self); +cuda_process_response(self); +} + + +// +// cuda_collision +// Executes at hardware interrupt level. +// + + +// ********************************************************************************** +// cuda_collision +// +// ********************************************************************************** +static void cuda_collision ( AppleCuda * self ) +{ +// Clear the pending interrupt by reading the shift register. +(void)cuda_read_data(self); + +// Negate TIP to abort the send. Cuda should send a second attention +// interrupt to acknowledge the abort cycle. +cuda_neg_transfer_in_progress(self); + +// The next interrupt should be an expected attention and the next +// response packet should be an async response. + +self->cuda_interrupt_state = CUDA_STATE_ATTN_EXPECTED; +self->cuda_transaction_state = CUDA_TS_ASYNC_RESPONSE; + +/* queue the request */ +self->cuda_is_header_transfer = false; +self->cuda_transfer_count = 0; +} + + +// +// +// Executes at hardware interrupt level. +// + + +// ********************************************************************************** +// cuda_idle +// +// ********************************************************************************** +static void cuda_idle ( AppleCuda * self ) +{ + +// Clear the pending interrupt by reading the shift register. +(void)cuda_read_data(self); + +cuda_lock(self); + // Set to the idle state. +self->cuda_interrupt_state = CUDA_STATE_IDLE; + // See if there are any pending requests. +if( self->cuda_request ) { + cuda_send_request(self); +} +cuda_unlock(self); +} + + +// ********************************************************************************** +// cuda_error +// +// ********************************************************************************** +static void cuda_error ( AppleCuda * self ) +{ +//printf("{Error %d}", self->cuda_transaction_state); + +// Was looking at cuda_transaction_state - doesn't seem right + +switch ( self->cuda_interrupt_state ) { + case CUDA_STATE_IDLE: + cuda_neg_tip_and_byteack(self); + break; + + case CUDA_STATE_TRANSMIT_EXPECTED: + if ( self->cuda_is_header_transfer && self->cuda_transfer_count <= 1 ) { + cuda_do_state_transition_delay(self); + cuda_neg_transfer_in_progress(self); + cuda_set_data_direction_to_input(self); + panic ("CUDA - TODO FORCE COMMAND BACK UP!\n"); + } + else { + self->cuda_interrupt_state = CUDA_STATE_ATTN_EXPECTED; + cuda_neg_tip_and_byteack(self); + } + break; + + case CUDA_STATE_ATTN_EXPECTED: + cuda_assert_transfer_in_progress(self); + + cuda_do_state_transition_delay(self); + cuda_set_data_direction_to_input(self); + cuda_neg_transfer_in_progress(self); + panic("CUDA - TODO CHECK FOR TRANSACTION TYPE AND ERROR"); + break; + + case CUDA_STATE_RECEIVE_EXPECTED: + cuda_neg_tip_and_byteack(self); + panic("Cuda - todo check for transaction type and error"); + break; + + default: + cuda_set_data_direction_to_input(self); + cuda_neg_tip_and_byteack(self); + break; +} +} + +static void cuda_do_state_transition_delay( AppleCuda * self ) +{ + AbsoluteTime deadline; + + clock_absolutetime_interval_to_deadline( + self->cuda_state_transition_delay, &deadline); + clock_delay_until(deadline); +} diff --git a/iokit/Drivers/platform/drvAppleCuda/AppleCuda.h b/iokit/Drivers/platform/drvAppleCuda/AppleCuda.h new file mode 100644 index 000000000..f5d19bfc8 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleCuda/AppleCuda.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ + +/* 1 April 1997 Simon Douglas: + * Stolen wholesale from MkLinux. + * Added nonblocking adb poll from interrupt level for the debugger. + * Acknowledge before response so polled mode can work from inside the adb handler. + * + * 18 June 1998 sdouglas + * Start IOKit version. Fix errors from kCudaSRQAssertMask. Use ool cmd & reply buffers, + * not fixed len in packet. Does queueing here. + * + * 20 Nov 1998 suurballe + * Port to C++ + */ + + +#include + +#include + +extern "C" { +#include +} +#include +#include "AppleCudaCommands.h" +#include "AppleCudaHW.h" +#include +#include + +// +// CudaInterruptState - internal to CudaCore.c +// + +enum CudaInterruptState +{ + CUDA_STATE_INTERRUPT_LIMBO = -1, // + CUDA_STATE_IDLE = 0, // + CUDA_STATE_ATTN_EXPECTED = 1, // + CUDA_STATE_TRANSMIT_EXPECTED = 2, // + CUDA_STATE_RECEIVE_EXPECTED = 3 // +}; + +typedef enum CudaInterruptState CudaInterruptState; + +// +// CudaTransactionFlag - internal to CudaCore.c +// + +enum CudaTransactionFlag +{ + CUDA_TS_NO_REQUEST = 0x0000, + CUDA_TS_SYNC_RESPONSE = 0x0001, + CUDA_TS_ASYNC_RESPONSE = 0x0002 +}; + +typedef enum CudaTransactionFlag CudaTransactionFlag; + +//typedef void (* ADB_input_func)(IOService * obj_id, UInt8 * buffer, UInt32 length, UInt8 command); + +class IOCudaADBController; +class IOInterruptEventSource; +class IOWorkLoop; + + +class AppleCuda: public IOService +{ +OSDeclareDefaultStructors(AppleCuda) + +private: + +IOService * cudaDevice; +IOWorkLoop * workLoop; +IOService * ADBid; +IOCudaADBController * ourADBinterface; +ADB_callback_func autopoll_handler; +UInt8 _cuda_power_state; +// callPlatformFunction symbols +const OSSymbol *cuda_check_any_interrupt; + + // number of autopoll buffers between interrupt and thread +#define NUM_AP_BUFFERS (1<<3) + // max adb register size for autopoll +#define MAX_AP_RESPONSE (8) + +unsigned char cuda_autopoll_buffers[ NUM_AP_BUFFERS ] + [ MAX_AP_RESPONSE ]; + +protected: + +virtual void free( void ); + +public: + +VIARegisterAddress cuda_via_regs; +bool cuda_polled_mode; +IOSimpleLock * cuda_request_lock; +volatile cuda_request_t * cuda_request; // head of todo queue +volatile cuda_request_t * cuda_last_request; // tail of todo queue +volatile CudaInterruptState cuda_interrupt_state; +volatile unsigned int inIndex; +volatile unsigned int outIndex; +volatile CudaTransactionFlag cuda_transaction_state; +cuda_packet_t cuda_unsolicited[ NUM_AP_BUFFERS ]; +bool cuda_is_header_transfer; +int cuda_transfer_count; +IOInterruptEventSource * eventSrc; +cuda_packet_t * cuda_current_response; +bool cuda_is_packet_type; +AbsoluteTime cuda_state_transition_delay; +IOPMrootDomain * _rootDomain; + +bool init ( OSDictionary * properties = 0 ); +bool start ( IOService * ); +virtual IOWorkLoop *getWorkLoop() const; +void serviceAutopolls ( void ); +void registerForADBInterrupts ( ADB_callback_func handler, IOService * caller ); +IOReturn doSyncRequest ( cuda_request_t * request ); +IOReturn powerStateWillChangeTo ( IOPMPowerFlags, unsigned long, IOService*); +IOReturn powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService*); +virtual IOReturn callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4); + +}; + diff --git a/iokit/Drivers/platform/drvAppleCuda/AppleCudaCommands.h b/iokit/Drivers/platform/drvAppleCuda/AppleCudaCommands.h new file mode 100644 index 000000000..e9a76845f --- /dev/null +++ b/iokit/Drivers/platform/drvAppleCuda/AppleCudaCommands.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ + +/* + * 18 June 1998 sdouglas + * Start IOKit version. + */ + +struct cuda_packet { + int a_hcount; + unsigned char a_header[8]; + int a_bcount; /* on entry size, on exit, actual */ + unsigned char * a_buffer; /* ool data */ +}; + +typedef struct cuda_packet cuda_packet_t; + +#ifdef __cplusplus +class IOSyncer; + +struct cuda_request { + cuda_packet_t a_cmd; /* Command packet */ + cuda_packet_t a_reply; /* Reply packet */ + volatile struct cuda_request* a_next; + IOSyncer * sync; + bool needWake; +}; + +typedef struct cuda_request cuda_request_t; + +#else + +struct cuda_request { + cuda_packet_t a_cmd; /* Command packet */ + cuda_packet_t a_reply; /* Reply packet */ + volatile struct cuda_request* a_next; + void * sync; +}; + +typedef struct cuda_request cuda_request_t; + +#endif + + +/* + * ADB Packet Types + */ + +#define ADB_PACKET_ADB 0 +#define ADB_PACKET_PSEUDO 1 +#define ADB_PACKET_ERROR 2 +#define ADB_PACKET_TIMER 3 +#define ADB_PACKET_POWER 4 +#define ADB_PACKET_MACIIC 5 + +/* + * ADB Device Commands + */ + +#define ADB_ADBCMD_RESET_BUS 0x00 +#define ADB_ADBCMD_FLUSH_ADB 0x01 +#define ADB_ADBCMD_WRITE_ADB 0x08 +#define ADB_ADBCMD_READ_ADB 0x0c + +/* + * ADB Pseudo Commands + */ + +#define ADB_PSEUDOCMD_WARM_START 0x00 +#define ADB_PSEUDOCMD_START_STOP_AUTO_POLL 0x01 +#define ADB_PSEUDOCMD_GET_6805_ADDRESS 0x02 +#define ADB_PSEUDOCMD_GET_REAL_TIME 0x03 +#define ADB_PSEUDOCMD_GET_PRAM 0x07 +#define ADB_PSEUDOCMD_SET_6805_ADDRESS 0x08 +#define ADB_PSEUDOCMD_SET_REAL_TIME 0x09 +#define ADB_PSEUDOCMD_POWER_DOWN 0x0a +#define ADB_PSEUDOCMD_SET_POWER_UPTIME 0x0b +#define ADB_PSEUDOCMD_SET_PRAM 0x0c +#define ADB_PSEUDOCMD_MONO_STABLE_RESET 0x0d +#define ADB_PSEUDOCMD_SEND_DFAC 0x0e +#define ADB_PSEUDOCMD_BATTERY_SWAP_SENSE 0x10 +#define ADB_PSEUDOCMD_RESTART_SYSTEM 0x11 +#define ADB_PSEUDOCMD_SET_IPL_LEVEL 0x12 +#define ADB_PSEUDOCMD_FILE_SERVER_FLAG 0x13 +#define ADB_PSEUDOCMD_SET_AUTO_RATE 0x14 +#define ADB_PSEUDOCMD_GET_AUTO_RATE 0x16 +#define ADB_PSEUDOCMD_SET_DEVICE_LIST 0x19 +#define ADB_PSEUDOCMD_GET_DEVICE_LIST 0x1a +#define ADB_PSEUDOCMD_SET_ONE_SECOND_MODE 0x1b +#define ADB_PSEUDOCMD_SET_POWER_MESSAGES 0x21 +#define ADB_PSEUDOCMD_GET_SET_IIC 0x22 +#define ADB_PSEUDOCMD_ENABLE_DISABLE_WAKEUP 0x23 +#define ADB_PSEUDOCMD_TIMER_TICKLE 0x24 +#define ADB_PSEUDOCMD_COMBINED_FORMAT_IIC 0X25 + +/* + * Following values to be used with ADB_PSEUDOCMD_SET_POWER_MESSAGES + */ +enum { + kADB_powermsg_disable = 0, + kADB_powermsg_enable, + kADB_powermsg_suspend, + kADB_powermsg_continue, + kADB_powermsg_debugger, + kADB_powermsg_timed_ADB, + kADB_powermsg_timed_power, + kADB_powermsg_invalid +}; + +//These constants are used to parse Cuda power message response +// packets, to see which selector transitioned +enum { + kADB_powermsg_flag_rotary = 0x20, + kADB_powermsg_flag_chassis = 0x02, + kADB_powermsg_flag_keyboardpwr = 0x04, + kADB_powermsg_cmd_chassis_off = 0x00, + kADB_powermsg_cmd_keyboardoff = 0x04, + kADB_powermsg_cmd_keyboardtimed = 0x00, + kADB_powermsg_cmd_rotary_lock = 0x01, + kADB_powermsg_cmd_rotary_unlock = 0x02 +}; + + +/* + * Macros to help build commands up + */ + +#define ADB_BUILD_CMD1(c, p1) {(c)->a_cmd.a_header[0] = p1; (c)->a_cmd.a_hcount = 1; } +#define ADB_BUILD_CMD2(c, p1, p2) {(c)->a_cmd.a_header[0] = p1; (c)->a_cmd.a_header[1] = p2; (c)->a_cmd.a_hcount = 2; } +#define ADB_BUILD_CMD3(c, p1, p2, p3) {(c)->a_cmd.a_header[0] = p1; (c)->a_cmd.a_header[1] = p2; (c)->a_cmd.a_header[2] = p3; (c)->a_cmd.a_hcount = 3; } + +#define ADB_BUILD_CMD4(c, p1, p2, p3, p4) {(c)->a_cmd.a_header[0] = p1; (c)->a_cmd.a_header[1] = p2; \ + (c)->a_cmd.a_header[2] = p3; (c)->a_cmd.a_header[3] = p4; (c)->a_cmd.a_hcount = 4; } +#if 0 +#define ADB_BUILD_CMD2_BUFFER(c, p1, p2, len, buf) {(c)->a_cmd.a_header[0] = p1; (c)->a_cmd.a_header[1] = p2; (c)->a_cmd.a_hcount = 2;\ + (c)->a_cmd.a_bcount = len;\ + memcpy(&(c)->a_cmd.a_buffer, buf, len); } + +#endif + +#define adb_init_request(a) { bzero((char *) a, sizeof(*a)); } + + + diff --git a/iokit/Drivers/platform/drvAppleCuda/AppleCudaHW.h b/iokit/Drivers/platform/drvAppleCuda/AppleCudaHW.h new file mode 100644 index 000000000..5c4ceb50a --- /dev/null +++ b/iokit/Drivers/platform/drvAppleCuda/AppleCudaHW.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ +/* + * 18 June 1998 sdouglas + * Start IOKit version. + */ + +#include "AppleVIA6522.h" + +typedef struct VIARegisterAddress VIARegisterAddress; + +struct VIARegisterAddress +{ + volatile unsigned char* dataB; + volatile unsigned char* handshakeDataA; + volatile unsigned char* dataDirectionB; + volatile unsigned char* dataDirectionA; + volatile unsigned char* timer1CounterLow; + volatile unsigned char* timer1CounterHigh; + volatile unsigned char* timer1LatchLow; + volatile unsigned char* timer1LatchHigh; + volatile unsigned char* timer2CounterLow; + volatile unsigned char* timer2CounterHigh; + volatile unsigned char* shift; + volatile unsigned char* auxillaryControl; + volatile unsigned char* peripheralControl; + volatile unsigned char* interruptFlag; + volatile unsigned char* interruptEnable; + volatile unsigned char* dataA; +}; + + + +// Cuda to VIA signal definition. They are all active low. + +enum +{ + kCudaTransferRequestMask = EVRB_XCVR, // TREQ (input) + kCudaNegateTransferRequest = EVRB_XCVR, // TREQ + kCudaAssertTransferRequest = ~EVRB_XCVR, // /TREQ + + kCudaByteAcknowledgeMask = EVRB_FULL, // ByteAck (output) + kCudaNegateByteAcknowledge = EVRB_FULL, // ByteAck + kCudaAssertByteAcknowledge = ~EVRB_FULL, // /ByteAck + + kCudaTransferInProgressMask = EVRB_SYSES, // TIP (output) + kCudaNegateTransferInProgress = EVRB_SYSES, // TIP + kCudaAssertTransferInProgress = ~EVRB_SYSES, // /TIP + + kCudaTransferMode = VAC_SRMD3, // + + kCudaDirectionMask = VAC_SRMD4, // + kCudaSystemSend = VAC_SRMD4, // + kCudaSystemRecieve = ~VAC_SRMD4, // + + kCudaInterruptMask = VIE_SR, + kCudaInterruptDisable = VIE_CLEAR | VIE_SR, + kCudaInterruptEnable = VIE_SET | VIE_SR +}; + +// The bits from Cuda that determines the cause of an interrupt + +enum +{ + kCudaInterruptStateMask = kCudaTransferInProgressMask | + kCudaTransferRequestMask +}; + +// Interrupt states. Determined by kTransferRequest, kTransferInProgress and +// kCudaDirection. The names are from the view of the system. + +enum +{ + kCudaReceiveByte = 0, // 0x00 + kCudaReceiveLastByte = kCudaNegateTransferRequest, // 0x08 + kCudaCollision = kCudaSystemSend, // 0x10 + kCudaTransmitByte = kCudaSystemSend | + kCudaNegateTransferRequest, // 0x18 + kCudaUnexpectedAttention = kCudaNegateTransferInProgress,// 0x20 + kCudaIdleState = kCudaNegateTransferInProgress | + kCudaNegateTransferRequest, // 0x28 + kCudaExpectedAttention = kCudaSystemSend | + kCudaNegateTransferInProgress,// 0x30 + kCudaIllegalState = kCudaSystemSend | + kCudaNegateTransferInProgress | + kCudaNegateTransferRequest // 0x38 +}; + +enum +{ + kCudaSRQAssertMask = 0x01, // inactive device asserted SRQ + kCudaTimeOutMask = 0x02, // active device did not have data available + kCudaSRQErrorMask = 0x04, // device asserted excessive SRQ period + kCudaBusErrorMask = 0x08, // timing error in bit cell was detected + kCudaAutoPollMask = 0x40, // data is from an AutoPoll + kCudaResponseMask = 0x80 // response Packet in progress +}; + +#define cuda_write_data(self,theByte) {*self->cuda_via_regs.shift = theByte; eieio(); } +#define cuda_set_data_direction_to_input(self) {*self->cuda_via_regs.auxillaryControl &= kCudaSystemRecieve; eieio(); } +#define cuda_set_data_direction_to_output(self) {*self->cuda_via_regs.auxillaryControl |= kCudaSystemSend; eieio(); } +#define cuda_assert_transfer_in_progress(self) {*self->cuda_via_regs.dataB &= kCudaAssertTransferInProgress; eieio(); } +#define cuda_neg_transfer_in_progress(self) {*self->cuda_via_regs.dataB |= kCudaNegateTransferInProgress; eieio(); } +#define cuda_neg_tip_and_byteack(self) {*self->cuda_via_regs.dataB |= kCudaNegateByteAcknowledge | kCudaNegateTransferInProgress; eieio(); } +#define cuda_toggle_byte_ack(self) {*self->cuda_via_regs.dataB ^= kCudaByteAcknowledgeMask; eieio(); } +#define cuda_assert_byte_ack(self) {*self->cuda_via_regs.dataB &= kCudaAssertByteAcknowledge; eieio(); } +#define cuda_neg_byte_ack(self) {*self->cuda_via_regs.dataB |= kCudaNegateByteAcknowledge; eieio(); } +#define cuda_is_transfer_in_progress(self) ((*self->cuda_via_regs.dataB & kCudaTransferRequestMask) == 0 ) +#define cuda_disable_interrupt(self) {*self->cuda_via_regs.interruptEnable = kCudaInterruptDisable; eieio(); } +#define cuda_enable_interrupt(self) {*self->cuda_via_regs.interruptEnable = kCudaInterruptEnable; eieio(); } +#define cuda_get_interrupt_state(self) (*self->cuda_via_regs.dataB & kCudaInterruptStateMask) | \ + (*self->cuda_via_regs.auxillaryControl & kCudaDirectionMask) +#define cuda_wait_for_transfer_request_assert(self) while ( (*self->cuda_via_regs.dataB & kCudaTransferRequestMask) != 0 ) { eieio(); } ; eieio() +#define cuda_wait_for_transfer_request_neg(self) while ( (*self->cuda_via_regs.dataB & kCudaTransferRequestMask) == 0 ) { eieio(); } ; eieio() +#define cuda_wait_for_interrupt(self) while ( (*self->cuda_via_regs.interruptFlag & kCudaInterruptMask) == 0 ) { eieio(); } ; eieio() diff --git a/iokit/Drivers/platform/drvAppleCuda/AppleVIA6522.h b/iokit/Drivers/platform/drvAppleCuda/AppleVIA6522.h new file mode 100644 index 000000000..a6cd3947c --- /dev/null +++ b/iokit/Drivers/platform/drvAppleCuda/AppleVIA6522.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ +/* + * 18 June 1998 sdouglas + * Start IOKit version. + */ + + +/* + File: via6522.h + + Contains: xxx put contents here xxx + + Written by: xxx put writers here xxx + + Copyright: © 1993, 1995 by Apple Computer, Inc., all rights reserved. + + Change History (most recent first): + + <1> 2/22/95 AM First checked in. + <1> 04/04/94 MRN First checked in. + +*/ + +/* + * Copyright 1987-91 Apple Computer, Inc. + * All Rights Reserved. + */ + +#ifndef __VIA6522_H__ +#define __VIA6522_H__ + +/* + * Synertek SY6522 VIA Versatile Interface Adapter + */ + +/* + * This has been modified to address BOTH the via and RBV registers, + * because we know that both chips ignore part of the address, thus + * only responding correctly. It's ugly, but the ROM does it... + */ + +#if defined(powerc) || defined (__powerc) +#pragma options align=mac68k +#endif + +typedef struct via6522Regs /* VIA / RBV address */ + { + volatile unsigned char vBufB; /* 0000/0000 register b */ + volatile unsigned char RvExp; /* 0001 RBV future expansion */ + volatile unsigned char RvSlotIFR; /* 0002 RBV Slot interrupts reg. */ + volatile unsigned char RvIFR; /* 0003 RBV interrupt flag reg. */ + unsigned char jnk0[ 12 ]; + + volatile unsigned char RvMonP; /* xxxx/0010 RBV video monitor type */ + volatile unsigned char RvChpT; /* xxxx/0011 RBV test mode register */ + volatile unsigned char RvSlotIER; /* xxxx/0012 RBV slot interrupt enables */ + volatile unsigned char RvIER; /* xxxx/0013 RBV interrupt flag enable reg */ + unsigned char jnk1[ 0x1FF - 0x13 ]; + + volatile unsigned char vBufAH; /* 0200 buffer a (with handshake). */ + unsigned char jnk2[ 0x1FF ]; /* Dont use! Here only for completeness */ + + volatile unsigned char vDIRB; /* 0400 data direction register B */ + unsigned char jnk25[ 0x1FF ]; + + volatile unsigned char vDIRA; /* 0600 data direction register A */ + unsigned char jnk3[ 0x1FF ]; + + volatile unsigned char vT1C; /* 0800 timer one low */ + unsigned char jnk4[ 0x1FF ]; + + volatile unsigned char vT1CH; /* 0A00 timer one high */ + unsigned char jnk5[ 0x1FF ]; + + volatile unsigned char vT1L; /* 0C00 timer one latches low */ + unsigned char jnk6[ 0x1FF ]; + + volatile unsigned char vT1LH; /* 0E00 timer one latches high */ + unsigned char jnk7[ 0x1FF ]; + + volatile unsigned char vT2C; /* 1000 timer 2 low */ + unsigned char jnk8[ 0x1FF ]; + + volatile unsigned char vT2CH; /* 1200 timer two counter high */ + unsigned char jnk9[ 0x1FF ]; + + volatile unsigned char vSR; /* 1400 shift register */ + unsigned char jnka[ 0x1FF ]; + + volatile unsigned char vACR; /* 1600 auxilary control register */ + unsigned char jnkb[ 0x1FF ]; + + volatile unsigned char vPCR; /* 1800 peripheral control register */ + unsigned char jnkc[ 0x1FF ]; + + volatile unsigned char vIFR; /* 1A00 interrupt flag register */ + unsigned char jnkd[ 0x1FF ]; + + volatile unsigned char vIER; /* 1C00 interrupt enable register */ + unsigned char jnkf[ 0x1FF ]; + + volatile unsigned char vBufA; /* 1E00 register A, read and write */ + } via6522Regs; + +#if defined(powerc) || defined(__powerc) +#pragma options align=reset +#endif + + +/* Register B contents */ + +#define VRB_POWEROFF 0x04 /* disk head select */ +#define RBV_POWEROFF VRB_POWEROFF +#define VRB_BUSLOCK 0x02 /* NuBus Transactions are locked */ + + +/* Register A contents */ + +#define VRA_DRIVE 0x10 /* drive select */ +#define VRA_HEAD 0x20 /* disk head select */ + + +/* Auxillary control register contents */ + +#define VAC_PAENL 0x01 /* Enable latch for PA */ +#define VAC_PADISL 0x00 /* Disable latch for PA */ +#define VAC_PBENL 0x02 /* Enable latch for PA */ +#define VAC_PBDISL 0x00 /* Disable latch for PA */ +#define VAC_SRDIS 0x00 /* Shift Reg Disabled */ +#define VAC_SRMD1 0x04 /* Shift In under control of T2 */ +#define VAC_SRMD2 0x08 /* Shift In under control of Phase 2 */ +#define VAC_SRMD3 0x0C /* Shift in under control of Ext Clk */ +#define VAC_SRMD4 0x10 /* Shift Out free running at T2 rate */ +#define VAC_SRMD5 0x14 /* Shift Out under control of T2 */ +#define VAC_SRMD6 0x18 /* Shift Out under control of theta2 */ +#define VAC_SRMD7 0x1C /* Shift Out under control of Ext Clk */ +#define VAC_T2CTL 0x20 /* Timer two, control */ +#define VAC_T2TI 0x00 /* Timer Two, Timed Interrupt */ +#define VAC_T2CD 0x20 /* Timer Two, count down with pulses on PB6 */ +#define VAC_T1CONT 0x40 /* Timer one, continous counting */ +#define VAC_T11SHOT 0x00 /* Timer One, one shot output */ +#define VAC_T1PB7 0x80 /* Timer one, drives PB7 */ +#define VAC_T1PB7DIS 0x00 /* Timer one, drives PB7 disabled */ + + +/* Interrupt enable register contents */ + +#define VIE_CA2 0x01 /* interrupt on CA2 */ +#define VIE_CA1 0x02 /* interrupt on CA1 */ +#define VIE_SR 0x04 /* Shift Register */ +#define VIE_CB2 0x08 /* interrupt on CB2 */ +#define VIE_CB1 0x10 /* interrupt on CB1 */ +#define VIE_TIM2 0x20 /* timer 2 interrupt */ +#define VIE_TIM1 0x40 /* timer 1 interrupt */ +#define VIE_SET 0x80 /* Set interrupt bits if this is on */ +#define VIE_CLEAR 0x00 /* Clear bits if used */ + +#define VIE_ALL ( VIE_TIM1 | VIE_TIM2 | VIE_CB1 | VIE_CB2 | VIE_SR | VIE_CA1 | VIE_CA2 ) + + +/* VIA Data Direction Register Contents */ + +#define VDR_P7_O 0x80 /* P7 is output */ +#define VDR_P7_I 0x00 /* P7 is input */ +#define VDR_P6_O 0x40 /* P6 is output */ +#define VDR_P6_I 0x00 /* P6 is input */ +#define VDR_P5_O 0x20 /* P5 is output */ +#define VDR_P5_I 0x00 /* P5 is input */ +#define VDR_P4_O 0x10 /* P4 is output */ +#define VDR_P4_I 0x00 /* P4 is input */ +#define VDR_P3_O 0x08 /* P3 is output */ +#define VDR_P3_I 0x00 /* P3 is input */ +#define VDR_P2_O 0x04 /* P2 is output */ +#define VDR_P2_I 0x00 /* P2 is input */ +#define VDR_P1_O 0x02 /* P1 is output */ +#define VDR_P1_I 0x00 /* P1 is input */ +#define VDR_P0_O 0x01 /* P0 is output */ +#define VDR_P0_I 0x00 /* P0 is input */ + + +/* VIA1 Register A contents where they differ from standard VIA1 */ + +#define RBV_BURNIN 0x01 /* burnin flag */ +#define RBV_CPUID0 0x02 /* CPU id bit 0 */ +#define RBV_CPUID1 0x04 /* CPU id bit 1 */ +#define RBV_CPUID2 0x10 /* CPU id bit 2 */ +#define RBV_CPUID3 0x40 /* CPU id bit 3 */ + + +/* VIA1 Register B contents where they differ from standard VIA1 */ + +#define RBV_PARDIS 0x40 /* disable parity */ +#define RBV_PAROK 0x80 /* parity OK */ + +#define EVRB_XCVR 0x08 /* XCVR_SESSION* */ +#define EVRB_FULL 0x10 /* VIA_FULL */ +#define EVRB_SYSES 0x20 /* SYS_SESSION */ +#define EVRB_AUXIE 0x00 /* Enable A/UX Interrupt Scheme */ +#define EVRB_AUXID 0x40 /* Disable A/UX Interrupt Scheme */ +#define EVRB_SFTWRIE 0x00 /* Software Interrupt ReQuest */ +#define EVRB_SFTWRID 0x80 /* Software Interrupt ReQuest */ + + +/* VIA2 Register A contents where they differ from standard VIA2 */ + +#define RBV_SZEROIRQ 0x40 /* slot 0 irq */ +#define EVRA_ENETIRQ 0x01 /* Ethernet irq */ +#define EVRA_VIDIRQ 0x40 /* Video irq */ + + +/* VIA2 Register B contents where they differ from standard VIA2 */ + +#define RBV_CDIS 0x01 /* disable external cache */ +#define RBV_CFLUSH 0x08 /* flush external cache */ +#define EVRB_LED 0x10 /* LED */ +#define RBV_PARODD 0x80 /* 1 for odd, 0 for even */ + + +/* Video monitor parameters: */ +#define RBV_DEPTH 0x07 /* bits per pixel: 000=1,001=2,010=4,011=8 */ +#define RBV_MONID 0x38 /* monitor type as below */ +#define RBV_VIDOFF 0x40 /* 1 turns off onboard video */ + + +/* Supported video monitor types: */ + +#define MON_15BW ( 1 << 3 ) /* 15" BW portrait */ +#define MON_IIGS ( 2 << 3 ) /* modified IIGS monitor */ +#define MON_15RGB ( 5 << 3 ) /* 15" RGB portrait */ +#define MON_12OR13 ( 6 << 3 ) /* 12" BW or 13" RGB */ +#define MON_NONE ( 7 << 3 ) /* No monitor attached */ + +#endif /* __VIA6522_H__ */ diff --git a/iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.cpp b/iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.cpp new file mode 100644 index 000000000..677bdfc80 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.cpp @@ -0,0 +1,243 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 1 Dec 1998 suurballe Created. + */ + +#include "IOCudaADBController.h" +#include "AppleCuda.h" + +#define super IOADBController +OSDefineMetaClassAndStructors(IOCudaADBController, IOADBController) + +// ********************************************************************************** +// init +// +// ********************************************************************************** +bool IOCudaADBController::init ( OSDictionary * properties, AppleCuda * driver ) +{ + +CudaDriver = driver; +pollList = 0; +autopollOn = false; + +return super::init(properties); +} + + +// ********************************************************************************** +// start +// +// ********************************************************************************** +bool IOCudaADBController::start ( IOService *nub ) +{ +if( !super::start(nub)) + return false; + +CudaDriver->registerForADBInterrupts ( autopollHandler, this ); +return true; +} + + +// ********************************************************************************** +// setAutoPollPeriod +// +// ********************************************************************************** +IOReturn IOCudaADBController::setAutoPollPeriod ( int microsecs ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); +ADB_BUILD_CMD3(&cmd, ADB_PACKET_PSEUDO, ADB_PSEUDOCMD_SET_AUTO_RATE, + ((microsecs + 999) / 1000)); + +return CudaDriver->doSyncRequest(&cmd); +} + + +// ********************************************************************************** +// getAutoPollPeriod +// +// ********************************************************************************** +IOReturn IOCudaADBController::getAutoPollPeriod ( int * microsecs ) +{ +IOReturn err; +cuda_request_t cmd; +UInt8 data; + +adb_init_request(&cmd); +ADB_BUILD_CMD2(&cmd, ADB_PACKET_PSEUDO, ADB_PSEUDOCMD_GET_AUTO_RATE); +cmd.a_reply.a_buffer = &data; +cmd.a_reply.a_bcount = sizeof(UInt8); + +err = CudaDriver->doSyncRequest(&cmd); + +if ( err == kIOReturnSuccess ) { + *microsecs = data * 1000; +} +return err; +} + + +// ********************************************************************************** +// getAutoPollPeriod +// +// ********************************************************************************** +IOReturn IOCudaADBController::setAutoPollList ( UInt16 activeAddressMask ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); +ADB_BUILD_CMD2(&cmd, ADB_PACKET_PSEUDO, ADB_PSEUDOCMD_SET_DEVICE_LIST) + +cmd.a_cmd.a_buffer = (UInt8 *) &activeAddressMask; +cmd.a_cmd.a_bcount = sizeof(UInt16); + +return CudaDriver->doSyncRequest(&cmd); +} + + +// ********************************************************************************** +// getAutoPollList +// +// ********************************************************************************** +IOReturn IOCudaADBController::getAutoPollList ( UInt16 * activeAddressMask ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); +ADB_BUILD_CMD2(&cmd, ADB_PACKET_PSEUDO, ADB_PSEUDOCMD_GET_DEVICE_LIST); +cmd.a_reply.a_buffer = (UInt8 *) activeAddressMask; +cmd.a_reply.a_bcount = sizeof(UInt16); + +return CudaDriver->doSyncRequest(&cmd); +} + + +// ********************************************************************************** +// setAutoPollEnable +// +// ********************************************************************************** +IOReturn IOCudaADBController::setAutoPollEnable ( bool enable ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); +ADB_BUILD_CMD3(&cmd, ADB_PACKET_PSEUDO, ADB_PSEUDOCMD_START_STOP_AUTO_POLL, (enable ? 1 : 0)); + +return CudaDriver->doSyncRequest(&cmd); +} + + +// ********************************************************************************** +// resetBus +// +// ********************************************************************************** +IOReturn IOCudaADBController::resetBus ( void ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); +ADB_BUILD_CMD2(&cmd, ADB_PACKET_ADB, ADB_ADBCMD_RESET_BUS ); + +return CudaDriver->doSyncRequest(&cmd); +} + + +// ********************************************************************************** +// flushDevice +// +// ********************************************************************************** +IOReturn IOCudaADBController::flushDevice ( IOADBAddress address ) +{ +cuda_request_t cmd; + +adb_init_request(&cmd); +ADB_BUILD_CMD2(&cmd, ADB_PACKET_ADB, (ADB_ADBCMD_FLUSH_ADB | (address << 4))); + +return CudaDriver->doSyncRequest(&cmd); +} + + + +// ********************************************************************************** +// readFromDevice +// +// ********************************************************************************** +IOReturn IOCudaADBController::readFromDevice (IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) +{ +IOReturn err; +cuda_request_t cmd; + +adb_init_request(&cmd); +ADB_BUILD_CMD2(&cmd, ADB_PACKET_ADB, + (ADB_ADBCMD_READ_ADB | (address << 4) | (adbRegister & 3))); + +cmd.a_reply.a_buffer = data; +cmd.a_reply.a_bcount = *length; + +err = CudaDriver->doSyncRequest(&cmd); + +//IOLog("Read %d, Addr %x Reg %x = %04x\n", err, address, adbRegister, *((UInt16 *)data)); + +if( err == ADB_RET_OK ) { + *length = cmd.a_reply.a_bcount; +} +else { + *length = 0; +} + +return err; +} + + +// ********************************************************************************** +// writeToDevice +// +// ********************************************************************************** +IOReturn IOCudaADBController::writeToDevice ( IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) +{ +IOReturn err; +cuda_request_t cmd; + +adb_init_request(&cmd); + +ADB_BUILD_CMD2(&cmd, ADB_PACKET_ADB, + (ADB_ADBCMD_WRITE_ADB | (address << 4) | (adbRegister & 3))); +cmd.a_cmd.a_buffer = data; +cmd.a_cmd.a_bcount = *length; + +err = CudaDriver->doSyncRequest(&cmd); + +//IOLog("Write %d, Addr %x Reg %x = %04x\n", err, address, adbRegister, *((UInt16 *)data)); + +if( err == ADB_RET_OK ) { + *length = cmd.a_reply.a_bcount; +} +else { + *length = 0; +} +return err; +} + diff --git a/iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.h b/iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.h new file mode 100644 index 000000000..8f2d00967 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 1 Dec 1998 suurballe Created. + */ + +#include + +class AppleCuda; + + +class IOCudaADBController : public IOADBController +{ +OSDeclareDefaultStructors(IOCudaADBController) + +private: + +AppleCuda * CudaDriver; +UInt32 pollList; // ADB autopoll device bitmap +bool autopollOn; // TRUE: PMU is autopolling + +public: + +bool init ( OSDictionary * properties, AppleCuda * driver ); +bool start ( IOService * ); +IOReturn setAutoPollPeriod ( int microseconds ); +IOReturn getAutoPollPeriod ( int * microseconds ); +IOReturn setAutoPollList ( UInt16 activeAddressMask ); +IOReturn getAutoPollList ( UInt16 * activeAddressMask ); +IOReturn setAutoPollEnable ( bool enable ); +IOReturn resetBus ( void ); +IOReturn flushDevice ( IOADBAddress address ); +IOReturn readFromDevice ( IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ); +IOReturn writeToDevice ( IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ); +}; diff --git a/iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.cpp b/iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.cpp new file mode 100644 index 000000000..45166a392 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.cpp @@ -0,0 +1,266 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +extern "C" { +#include +} + +#include + +#include +//#include + +#include "Gossamer.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super ApplePlatformExpert + +OSDefineMetaClassAndStructors(GossamerPE, ApplePlatformExpert); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool GossamerPE::start(IOService *provider) +{ + unsigned int tmpVal; + long machineType; + long allInOne; + + setChipSetType(kChipSetTypeGossamer); + + // Set the machine type. + if (IODTMatchNubWithKeys(provider, "'AAPL,Gossamer'")) + machineType = kGossamerTypeGossamer; + else if (IODTMatchNubWithKeys(provider, "'AAPL,PowerMac G3'")) + machineType = kGossamerTypeSilk; + else if (IODTMatchNubWithKeys(provider, "'AAPL,PowerBook1998'")) + machineType = kGossamerTypeWallstreet; + else if (IODTMatchNubWithKeys(provider, "'iMac,1'")) + machineType = kGossamerTypeiMac; + else if (IODTMatchNubWithKeys(provider, "('PowerMac1,1', 'PowerMac1,2')")) + machineType = kGossamerTypeYosemite; + else if (IODTMatchNubWithKeys(provider, "'PowerBook1,1'")) + machineType = kGossamerType101; + else return false; + + setMachineType(machineType); + + // Find out if this an all in one. + allInOne = 0; + if (ml_probe_read(kGossamerMachineIDReg, &tmpVal)) { + switch (getMachineType()) { + case kGossamerTypeGossamer : + case kGossamerTypeSilk : + if (!(tmpVal & kGossamerAllInOneMask)) allInOne = 1; + break; + + case kGossamerTypeiMac : + allInOne = 1; + break; + } + } + if (allInOne) setProperty("AllInOne", this); + + // setup default power mgr features per machine + // NOTE: on Core99 and later hardware, this information + // is available from the "prim-info" property in the power-mgt + // node of the device tree. Prior to that, this information + // was just another hard-coded part of the ROM. + + switch (getMachineType()) { + case kGossamerTypeGossamer: + case kGossamerTypeSilk: + case kGossamerTypeiMac: + case kGossamerTypeYosemite: + _pePMFeatures = kStdDesktopPMFeatures; + _pePrivPMFeatures = kStdDesktopPrivPMFeatures; + _peNumBatteriesSupported = kStdDesktopNumBatteries; + break; + + case kGossamerTypeWallstreet: + _pePMFeatures = kWallstreetPMFeatures; + _pePrivPMFeatures = kWallstreetPrivPMFeatures; + _peNumBatteriesSupported = kStdPowerBookNumBatteries; + break; + + case kGossamerType101: + _pePMFeatures = k101PMFeatures; + _pePrivPMFeatures = k101PrivPMFeatures; + _peNumBatteriesSupported = kStdPowerBookNumBatteries; + break; + } + + return super::start(provider); +} + + +bool GossamerPE::platformAdjustService(IOService *service) +{ + long tmpNum; + OSData *tmpData; + + // Add the extra sound properties for Gossamer AIO + if (getProperty("AllInOne") && + ((getMachineType() == kGossamerTypeGossamer) || + (getMachineType() == kGossamerTypeSilk))) { + if (!strcmp(service->getName(), "sound")) { + tmpNum = 3; + tmpData = OSData::withBytes(&tmpNum, sizeof(tmpNum)); + if (tmpData) { + service->setProperty("#-detects", tmpData); + service->setProperty("#-outputs", tmpData); + tmpData->release(); + } + return true; + } + } + + // Set the loop snoop property for Wallstreet or Mainstreet. + if (getMachineType() == kGossamerTypeWallstreet) { + if (IODTMatchNubWithKeys(service, "('grackle', 'MOT,PPC106')")) { + // Add the property for set loop snoop. + service->setProperty("set-loop-snoop", service); + return true; + } + } + + // Publish out the dual display heads on 101. + if (getMachineType() == kGossamerType101) { + if (!strcmp(service->getName(), "ATY,LTProParent")) { + if (kIOReturnSuccess == IONDRVLibrariesInitialize(service)) { + createNubs(this, service->getChildIterator( gIODTPlane )); + } + return true; + } + } + + return true; +} + +IOReturn GossamerPE::callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4) +{ + if (functionName == gGetDefaultBusSpeedsKey) { + getDefaultBusSpeeds((long *)param1, (unsigned long **)param2); + return kIOReturnSuccess; + } + + return super::callPlatformFunction(functionName, waitForFunction, + param1, param2, param3, param4); +} + +static unsigned long gossamerSpeed[] = { 66820000, 1 }; +static unsigned long yosemiteSpeed[] = { 99730000, 1 }; + +void GossamerPE::getDefaultBusSpeeds(long *numSpeeds, + unsigned long **speedList) +{ + if ((numSpeeds == 0) || (speedList == 0)) return; + + switch (getMachineType()) { + case kGossamerTypeGossamer : + case kGossamerTypeSilk : + *numSpeeds = 1; + *speedList = gossamerSpeed; + break; + + case kGossamerTypeYosemite : + *numSpeeds = 1; + *speedList = yosemiteSpeed; + break; + + default : + *numSpeeds = 0; + *speedList = 0; + break; + } +} + + +//********************************************************************************* +// PMInstantiatePowerDomains +// +// This overrides the vanilla implementation in IOPlatformExpert. It instantiates +// a root domain with two children, one for the USB bus (to handle the USB idle +// power budget), and one for the expansions slots on the PCI bus (to handle +// the idle PCI power budget) +//********************************************************************************* + +void GossamerPE::PMInstantiatePowerDomains ( void ) +{ + root = new IOPMrootDomain; + root->init(); + root->attach(this); + root->start(this); + root->youAreRoot(); + +/* All G3s support sleep (or pseudo-sleep) now + if ((getMachineType() == kGossamerType101) || + (getMachineType() == kGossamerTypeWallstreet)) +*/ + root->setSleepSupported(kRootDomainSleepSupported); +} + + +//********************************************************************************* +// PMRegisterDevice +// +// This overrides the vanilla implementation in IOPlatformExpert. +//********************************************************************************* + +//#define DONOTREGISTERATACONTROLLER 1 + +void GossamerPE::PMRegisterDevice(IOService * theNub, IOService * theDevice) +{ +//#ifdef DONOTREGISTERATACONTROLLER + // do not add IOATAStandardDriver to the tree since on this platform they do not need resets +// if (OSDynamicCast(IOATAStandardDriver, theDevice) != NULL) +// return; +//#endif + + // Checks if the nub handles power states, if it does not gets its parent and so + // up until we reach the root, or we do not find anything: + while ((theNub != NULL) && ( theNub->addPowerChild(theDevice) != IOPMNoErr )) { + theNub = theNub->getProvider(); + +//#ifdef DONOTREGISTERATACONTROLLER + // IOATAStandardDriver are detached, and so would be evrething I attach to them so + // their childs go directly on the tree. +// if (OSDynamicCast(IOATAStandardDriver, theNub) != NULL) { +// theNub = theNub->getProvider(); +// } +//#endif + } + + if ( theNub == NULL ) { + root->addPowerChild ( theDevice ); + return; + } +} diff --git a/iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.h b/iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.h new file mode 100644 index 000000000..32517503b --- /dev/null +++ b/iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#ifndef _IOKIT_GOSSAMER_H +#define _IOKIT_GOSSAMER_H + +#include + + +enum { + kGossamerTypeGossamer = 1, + kGossamerTypeSilk, + kGossamerTypeWallstreet, + kGossamerTypeiMac, + kGossamerTypeYosemite, + kGossamerType101 +}; + +#define kGossamerMachineIDReg (0xFF000004) +#define kGossamerAllInOneMask (0x00100000) + +class GossamerPE : public ApplePlatformExpert +{ + OSDeclareDefaultStructors(GossamerPE); + +private: + void getDefaultBusSpeeds(long *numSpeeds, + unsigned long **speedList); + + void PMInstantiatePowerDomains ( void ); + void PMRegisterDevice(IOService * theNub, IOService * theDevice); + +public: + virtual bool start(IOService *provider); + virtual bool platformAdjustService(IOService *service); + virtual IOReturn callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4); +}; + + +#endif /* ! _IOKIT_GOSSAMER_H */ diff --git a/iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.cpp b/iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.cpp new file mode 100644 index 000000000..61e6ab5eb --- /dev/null +++ b/iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.cpp @@ -0,0 +1,496 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. + * + */ + +extern "C" { +#include +} + +#include +#include +#include + +#include "GossamerCPU.h" +#include "Gossamer.h" + +extern "C" { +unsigned int ml_throttle(unsigned int step); +int kdp_getc(void); +void machine_idle(void); +} + +// Uncomment the following define to get verbose logs on the sleep/wake cycles +//#define VERBOSE_LOGS_ON + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOCPU + +OSDefineMetaClassAndStructors(GossamerCPU, IOCPU); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +UInt32 GossamerCPU::restartAddress = 0x100; + +IOService *GossamerCPU::findIOInterface(char *name) +{ + OSDictionary *dict; + IOService *service; + + heathrow = NULL; + + // find the dictionary of the Heathrow matches. + dict = serviceMatching(name); + if (dict == NULL) { +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::findIOInterface faild to get a matching dictionary for %s\n", name); +#endif // VERBOSE_LOGS_ON + return NULL; + } + + service = waitForService(dict, NULL); + if (service == NULL) { +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::findIOInterface failed to get a matching service for %s\n", name); +#endif// VERBOSE_LOGS_ON + return NULL; + } + + return (service); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool GossamerCPU::start(IOService *provider) +{ + kern_return_t result; + ml_processor_info_t processor_info; + bool success = super::start(provider); + GossamerPE *gossamerBoard; + + if (!success) + return false; + + // callPlatformFunction symbols + heathrow_sleepState = OSSymbol::withCString("heathrow_sleepState"); + heathrow_set_light = OSSymbol::withCString("heathrow_set_light"); + cuda_check_any_interrupt = OSSymbol::withCString("cuda_check_any_interrupt"); + usb_remote_wakeup = OSSymbol::withCString("usb_remote_wakeup"); + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::start start\n"); +#endif // VERBOSE_LOGS_ON + + // Checks the board: + gossamerBoard = OSDynamicCast(GossamerPE, getPlatform()); + if (gossamerBoard == 0) { +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::start this is not a GossamerPE\n"); +#endif // VERBOSE_LOGS_ON + return false; + } + + cpuIC = new IOCPUInterruptController; + if (cpuIC == 0) + return false; + + if (cpuIC->initCPUInterruptController(1) != kIOReturnSuccess) return false; + cpuIC->attach(this); + + cpuIC->registerCPUInterruptController(); + + processor_info.cpu_id = (cpu_id_t)this; + processor_info.boot_cpu = true; + processor_info.start_paddr = restartAddress; + processor_info.l2cr_value = mfl2cr() & 0x7FFFFFFF; // cache-disabled value + processor_info.supports_nap = false; // doze, do not nap + processor_info.time_base_enable = 0; + + // Register this CPU with mach. + result = ml_processor_register(&processor_info, &machProcessor, + &ipi_handler); + if (result == KERN_FAILURE) + return false; + + setCPUState(kIOCPUStateUninitalized); + + processor_start(machProcessor); + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::start end %d \n", success); +#endif // VERBOSE_LOGS_ON + + registerService(); + + return success; +} + +void GossamerCPU::ipiHandler(void *refCon, void *nub, int source) +{ + // Call mach IPI handler for this CPU. + if (ipi_handler) + ipi_handler(); +} + +void GossamerCPU::initCPU(bool boot) +{ +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::initCPU start\n"); +#endif // VERBOSE_LOGS_ON + + if (grackle != NULL) { + IOPCIAddressSpace grackleSpace; + UInt32 grackleMemConfiguration; + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::initCPU AppleGracklePCI sets the ram in autorefresh off\n"); +#endif // VERBOSE_LOGS_ON + + grackleSpace.bits = 0x80000000; + grackleMemConfiguration = grackle->configRead32(grackleSpace, 0x70); + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::initCPU AppleGracklePCI current power managment mode :0x%08lx\n", grackleMemConfiguration); +#endif // VERBOSE_LOGS_ON + + // Disables NAP and PM + grackleMemConfiguration &= ~(0x90); +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::initCPU AppleGracklePCI new power managment mode :0x%08lx\n", grackleMemConfiguration); +#endif // VERBOSE_LOGS_ON + + grackle->configWrite32(grackleSpace, 0x70, grackleMemConfiguration); + + grackle = NULL; + } + else + kprintf("GossamerCPU::initCPU not found AppleGracklePCI\n"); + + if (heathrow != NULL) { + // we are waking up from sleep so: + heathrow->callPlatformFunction(heathrow_sleepState, false, (void *)false, 0, 0, 0); + heathrow = NULL; + } + else + kprintf("GossamerCPU::initCPU not found Heathrow\n"); + + /* + The following code is commented because the only Gossamer machine with a pci 2 pci Bridge + is the BWG3 and in that machine we do not remove power from the bridge. I am however leaving + this code here as reference (and to make clear that it is not running for a reason) + // Restore the PCI-PCI Bridge. + if (pci2pciBridge != NULL) + pci2pciBridge->restoreBridgeState(); */ + + // Restore time base after wake (since CPU's TBR was set to zero during sleep) + if(!boot) + saveTimeBase(false); + + // Init the interrupts. + if (boot) + cpuIC->enableCPUInterrupt(this); + + setCPUState(kIOCPUStateRunning); + + gossamerPE = OSDynamicCast(GossamerPE, getPlatform()); + if (gossamerPE ) { + //Initially Gossamers with Cuda are not in sleep mode + gossamerPE->setProperty("GossamerCudaSleeping", false); + } + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::initCPU end\n"); +#endif VERBOSE_LOGS_ON +} + +//extern "C" void _gossamer_cpu_wake(void); +extern UInt32 ResetHandler; + +#ifdef VERBOSE_LOGS_ON +// The following function exist only to check that the wake vector is placed correctly. +static void +cpu_foo_wake() +{ + __asm__ volatile("_gossamer_cpu_wake:"); + //kprintf("_gossamer_cpu_wake going to 0x100\n"); + __asm__ volatile(" ba 0x100"); +} +#endif // VERBOSE_LOGS_ON + +// flushes the cash for a word at the given address. +#define cFlush(addr) __asm__ volatile("dcbf 0, %0" : : "r" (addr)) + +extern "C" { + void gossamer_cpu_wake(void); + extern void cacheInit(void); + extern void cacheDisable(void); +} + +void GossamerCPU::quiesceCPU(void) +{ + UInt32 larsCode = (((UInt32)'L') << 24) | (((UInt32)'a') << 16) | (((UInt32)'r') << 8) | (((UInt32)'s') << 0); + UInt32 restartReferencePhi = pmap_extract(kernel_pmap,(vm_address_t)&restartAddress); + + // disables the interrupts (they should be already disabled, but one more tiem won't hurt): + ml_set_interrupts_enabled(FALSE); + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::quiesceCPU BEFORE 0x%08lx 0x%08lx start\n", 0x00000000, ml_phys_read(0x00000000)); + kprintf("GossamerCPU::quiesceCPU BEFORE 0x%08lx 0x%08lx start\n", 0x00000004, ml_phys_read(0x00000004)); + + // Set the wake vector to point to the my checkpoint vector + ml_phys_write(restartReferencePhi, gossamer_cpu_wake); //restartAddress = gossamer_cpu_wake; + eieio(); +#else + // Set the wake vector to point to the reset vector + ml_phys_write(restartReferencePhi, 0x100); //restartAddress = 0x100; + eieio(); +#endif // VERBOSE_LOGS_ON + + ml_phys_write(0x00000000, restartReferencePhi); + eieio(); + + // Set the wake vector to point to the reset vector + ml_phys_write(0x00000004, larsCode); + eieio(); + + // and flushes the data cache: + flush_dcache(restartReferencePhi, 4, true); + flush_dcache(0x00000000, 8, true); + + // Also makes sure that the reset hander is correctly flushed: + flush_dcache(&ResetHandler, 12, true); + + __asm__ volatile("sync"); + __asm__ volatile("isync"); + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::quiesceCPU AFTER 0x%08lx 0x%08lx start\n", 0x00000000, ml_phys_read(0x00000000)); + kprintf("GossamerCPU::quiesceCPU AFTER 0x%08lx 0x%08lx start\n", ml_phys_read(0x00000000), ml_phys_read(ml_phys_read(0x00000000))); + kprintf("GossamerCPU::quiesceCPU AFTER 0x%08lx 0x%08lx start\n", 0x00000004, ml_phys_read(0x00000004)); +#endif + + // Send PMU command to shutdown system before io is turned off + if (pmu != 0) + pmu->callPlatformFunction("sleepNow", false, 0, 0, 0, 0); + else + kprintf("GossamerCPU::quiesceCPU can't find ApplePMU\n"); + + if (heathrow != NULL) { + heathrow->callPlatformFunction(heathrow_sleepState, false, (void *)true, 0, 0, 0); + } + else + kprintf("GossamerCPU::quiesceCPU not found Heathrow\n"); + + if (grackle != NULL) { + IOPCIAddressSpace grackleSpace; + UInt32 grackleProcConfiguration, grackleMemConfiguration; + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::quiesceCPU AppleGracklePCI sets the ram in autorefresh\n"); + + grackleSpace.bits = 0x80000000; + grackleProcConfiguration = grackle->configRead32(grackleSpace, 0xA8); + kprintf("GossamerCPU::quiesceCPU AppleGracklePCI current processorinterface conf :0x%08lx\n", grackleProcConfiguration); +#endif // VERBOSE_LOGS_ON + + grackleSpace.bits = 0x80000000; + grackleMemConfiguration = grackle->configRead32(grackleSpace, 0x70); +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::quiesceCPU AppleGracklePCI current power managment mode :0x%08lx\n", grackleMemConfiguration); +#endif // VERBOSE_LOGS_ON + + // Enables NAP and PM + grackleMemConfiguration |= 0x90; +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::quiesceCPU AppleGracklePCI new power managment mode :0x%08lx\n", grackleMemConfiguration); +#endif // VERBOSE_LOGS_ON + + grackle->configWrite32(grackleSpace, 0x70, grackleMemConfiguration); + } + else + kprintf("GossamerCPU::quiesceCPU not found AppleGracklePCI\n"); + + // Save time base before sleep since CPU's TBR will be set to zero at wake. + saveTimeBase(true); + + // These make all the difference between a succesful wake and a crash, + // however it is still unclear why this happens. I'll leave to B.A. to + // figure it out. + cacheInit(); + cacheDisable(); + +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::quiesceCPU calling ml_ppc_sleep\n"); +#endif // VERBOSE_LOGS_ON + + // Now we loop here waiting for the PMU to kick in and sleep the machine. + // We do NOT call ml_ppc_sleep because while ml_ppc_sleep works greate for Core99 + // it has some problems with Gossamer CPUS. Also the code in ml_ppc_sleep to + // clear the interrupts (and so keep the processor in its sleep state) is needed + // by the Core99 platform (otherwise the machine does not sleep), but it is totally + // useless for Gossamer CPUs since whatever is the state of the CPU the pmu + // will put the whole system to sleep. + + while(true) { + } + + //ml_ppc_sleep(); +} + +const OSSymbol *GossamerCPU::getCPUName(void) +{ + return OSSymbol::withCStringNoCopy("Primary0"); +} + +kern_return_t GossamerCPU::startCPU(vm_offset_t /*start_paddr*/, + vm_offset_t /*arg_paddr*/) +{ + return KERN_FAILURE; +} + +void GossamerCPU::haltCPU(void) +{ + long machine_type; + grackle = NULL; + + grackle = OSDynamicCast(AppleGracklePCI, findIOInterface("AppleGracklePCI")); + if (grackle == NULL) + kprintf("GossamerCPU::haltCPU missing grackle\n"); + + pci2pciBridge = NULL; + + // Finds heathrow and pmu because we need them in quienceCPU. We can + // not put the "findIOInterface" code there because it may block and + // quienceCPU runs in interrupt context. + heathrow = OSDynamicCast(IOService, findIOInterface("Heathrow")); + //Actually, pmu find is moved below because it hangs when beige G3 go to sleep + + /* + The following code is commented because the only Gossamer machine with a pci 2 pci Bridge + is the BWG3 and in that machine we do not remove power from the bridge. I am however leaving + this code here as reference (and to make clear that it is not running for a reason) + IORegistryEntry *pci2pciBridgeEntry = fromPath("/pci@80000000/@d", gIODTPlane); + IOService *pci2pciBridgeNub = OSDynamicCast(IOService, pci2pciBridgeEntry); + if (pci2pciBridgeNub != NULL) { + pci2pciBridge = OSDynamicCast(IOPCI2PCIBridge, pci2pciBridgeNub->getClient()); + } + + if (pci2pciBridge != NULL) + pci2pciBridge->saveBridgeState(); + */ +#ifdef VERBOSE_LOGS_ON + kprintf("GossamerCPU::haltCPU Here!\n"); +#endif // VERBOSE_LOGS_ON + + gossamerPE = OSDynamicCast(GossamerPE, getPlatform()); + if (gossamerPE == 0 ) + { + processor_exit(machProcessor); + return; + } + machine_type = gossamerPE->getMachineType(); + + //Isolate only those Gossamers that have a Cuda, not PG&E + if ((machine_type != kGossamerType101) && (machine_type != kGossamerTypeWallstreet)) + { + mach_timespec_t t; + IOService *cudaDriver; + IOService *usbOHCIDriver; + bool anyint = false; + + t.tv_sec = 1; + t.tv_nsec = 0; + cudaDriver = waitForService(serviceMatching("AppleCuda"), &t); + usbOHCIDriver = waitForService(serviceMatching("AppleUSBOHCI"), &t); + + if ((heathrow != NULL) && (machine_type == kGossamerTypeYosemite)) + { + heathrow->callPlatformFunction(heathrow_set_light, false, (void *)false, 0, 0, 0); + } + + gossamerPE->setProperty("GossamerCudaSleeping", true); + ml_throttle(254); //throttle cpu speed as much as possible + + while (true) //sit here in a loop, pretending to be asleep + { + machine_idle(); //Max power savings for G3 CPU, needs interrupts enabled. + // It will return when any interrupt occurs + if (cudaDriver != NULL) + { + anyint = false; + cudaDriver->callPlatformFunction(cuda_check_any_interrupt, false, (void *)&anyint, 0, 0, 0); + if (anyint) + { + break; + } + } + + if (usbOHCIDriver != NULL) + { + anyint = false; + usbOHCIDriver->callPlatformFunction(usb_remote_wakeup, false, (void *)&anyint, 0, 0, 0); + if (anyint) + { + break; + } + } + IOSleep(5); //allows USB thread to run since no more thread scheduling. 1 ms + // is enough for slow Yosemite, 5 is needed for iMacs. + } + + ml_throttle(0); //remove throttle from CPU speed + + gossamerPE->setProperty("GossamerCudaSleeping", false); + + if ((heathrow != NULL) && (machine_type == kGossamerTypeYosemite)) + { + heathrow->callPlatformFunction(heathrow_set_light, false, (void *)true, 0, 0, 0); + } + + } + else + { + pmu = OSDynamicCast(IOService, findIOInterface("ApplePMU")); + processor_exit(machProcessor); + } +} + +void GossamerCPU::saveTimeBase(bool save) +{ + if(save) { // Save time base. + do { + tbHigh = mftbu(); + tbLow = mftb(); + tbHigh2 = mftbu(); + } while (tbHigh != tbHigh2); + } else { // Restore time base + mttb(0); + mttbu(tbHigh); + mttb(tbLow); + } +} + diff --git a/iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.h b/iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.h new file mode 100644 index 000000000..ca8a88df0 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + */ + +#ifndef _IOKIT_GOSSAMERCPU_H +#define _IOKIT_GOSSAMERCPU_H + +#include + +#include "../../pci/drvApplePCI/AppleGracklePCI.h" +#include "Gossamer.h" + +class GossamerCPU : public IOCPU +{ + OSDeclareDefaultStructors(GossamerCPU); + +private: + IOService *pmu; + IOService *heathrow; + IOCPUInterruptController *cpuIC; + static UInt32 restartAddress; + AppleGracklePCI *grackle; + IOPCI2PCIBridge *pci2pciBridge; + unsigned long tbLow, tbHigh, tbHigh2; + GossamerPE *gossamerPE; + + // callPlatformFunction symbols + const OSSymbol *heathrow_sleepState; + const OSSymbol *heathrow_set_light; + const OSSymbol *cuda_check_any_interrupt; + const OSSymbol *usb_remote_wakeup; + +protected: + virtual IOService *findIOInterface(char*); + virtual void ipiHandler(void *refCon, void *nub, int source); + +public: + virtual bool start(IOService *provider); + virtual void initCPU(bool boot); + virtual void quiesceCPU(void); + virtual const OSSymbol* getCPUName(void); + virtual kern_return_t startCPU(vm_offset_t start_paddr, + vm_offset_t arg_paddr); + virtual void haltCPU(void); + virtual void saveTimeBase(bool); +}; + +#endif /* ! _IOKIT_GOSSAMERCPU_H */ diff --git a/iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.cpp b/iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.cpp new file mode 100644 index 000000000..90b1a5c8c --- /dev/null +++ b/iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.cpp @@ -0,0 +1,283 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#include + +#include +#include +#include +#include + +#include + +#include "GrandCentral.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super AppleMacIO + +OSDefineMetaClassAndStructors(GrandCentral, AppleMacIO); + +bool GrandCentral::start(IOService *provider) +{ + IOInterruptAction handler; + IOPhysicalAddress base; + OSData * data; + AppleNMI *appleNMI; + IOService *sixty6; + long nmiSource; + OSData *nmiData; + IOReturn error; + + // Call MacIO's start. + if (!super::start(provider)) + return false; + + // Necessary for Control NDRV. + base = fMemory->getPhysicalAddress(); + data = OSData::withBytes(&base, sizeof(base)); + if (data != 0) provider->setProperty("AAPL,address", data); + + // Make sure the sixty6 node exists. + if (provider->childFromPath("sixty6", gIODTPlane) == 0) { + sixty6 = new IOService; + if(sixty6->init()) { + sixty6->setName("sixty6"); + sixty6->attachToParent(provider, gIODTPlane); + sixty6->registerService(); + } + } + + // Make nubs for the children. + publishBelow( provider ); + + // get the base address of the this GrandCentral. + grandCentralBaseAddress = fMemory->getVirtualAddress(); + + getPlatform()->setCPUInterruptProperties(provider); + + // Allocate the interruptController instance. + interruptController = new GrandCentralInterruptController; + if (interruptController == NULL) return false; + + // call the interruptController's init method. + error = interruptController->initInterruptController(provider, grandCentralBaseAddress); + if (error != kIOReturnSuccess) return false; + + handler = interruptController->getInterruptHandlerAddress(); + provider->registerInterrupt(0, interruptController, handler, 0); + + provider->enableInterrupt(0); + + // Register the interrupt controller so client can find it. + getPlatform()->registerInterruptController(gIODTDefaultInterruptController, + interruptController); + + // Create the NMI Driver. + nmiSource = 20; + nmiData = OSData::withBytes(&nmiSource, sizeof(long)); + appleNMI = new AppleNMI; + if ((nmiData != 0) && (appleNMI != 0)) { + appleNMI->initNMI(interruptController, nmiData); + } + + return true; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOInterruptController + +OSDefineMetaClassAndStructors(GrandCentralInterruptController, IOInterruptController); + +IOReturn GrandCentralInterruptController::initInterruptController(IOService *provider, IOLogicalAddress interruptControllerBase) +{ + int cnt; + + parentNub = provider; + + // Allocate the task lock. + taskLock = IOLockAlloc(); + if (taskLock == 0) return kIOReturnNoResources; + + // Allocate the memory for the vectors + vectors = (IOInterruptVector *)IOMalloc(kNumVectors * sizeof(IOInterruptVector)); + if (vectors == NULL) { + IOLockFree(taskLock); + return kIOReturnNoMemory; + } + bzero(vectors, kNumVectors * sizeof(IOInterruptVector)); + + // Allocate locks for the + for (cnt = 0; cnt < kNumVectors; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < kNumVectors; cnt++) { + IOLockFree(taskLock); + if (vectors[cnt].interruptLock != NULL) + IOLockFree(vectors[cnt].interruptLock); + } + return kIOReturnNoResources; + } + } + + // Setup the registers accessors + eventsReg = (unsigned long)(interruptControllerBase + kEventsOffset); + maskReg = (unsigned long)(interruptControllerBase + kMaskOffset); + clearReg = (unsigned long)(interruptControllerBase + kClearOffset); + levelsReg = (unsigned long)(interruptControllerBase + kLevelsOffset); + + // Initialize the registers. + + // Disable all interrupts. + stwbrx(0x00000000, maskReg); + eieio(); + + // Clear all pending interrupts. + stwbrx(0xFFFFFFFF, clearReg); + eieio(); + + // Disable all interrupts. (again?) + stwbrx(0x00000000, maskReg); + eieio(); + + return kIOReturnSuccess; +} + +IOInterruptAction GrandCentralInterruptController::getInterruptHandlerAddress(void) +{ + return (IOInterruptAction)&GrandCentralInterruptController::handleInterrupt; +} + +IOReturn GrandCentralInterruptController::handleInterrupt(void * /*refCon*/, + IOService * /*nub*/, + int /*source*/) +{ + int done; + long events, vectorNumber; + IOInterruptVector *vector; + unsigned long maskTmp; + + do { + done = 1; + + // Do all the sources for events, plus any pending interrupts. + // Also add in the "level" sensitive sources + maskTmp = lwbrx(maskReg); + events = lwbrx(eventsReg) & ~kTypeLevelMask; + events |= lwbrx(levelsReg) & maskTmp & kTypeLevelMask; + events |= pendingEvents & maskTmp; + pendingEvents = 0; + eieio(); + + // Since we have to clear the level'd one clear the current edge's too. + stwbrx(kTypeLevelMask | events, clearReg); + eieio(); + + if (events) done = 0; + + while (events) { + vectorNumber = 31 - cntlzw(events); + events ^= (1 << vectorNumber); + vector = &vectors[vectorNumber]; + + vector->interruptActive = 1; + sync(); + isync(); + if (!vector->interruptDisabledSoft) { + isync(); + + // Call the handler if it exists. + if (vector->interruptRegistered) { + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + } + } else { + // Hard disable the source. + vector->interruptDisabledHard = 1; + disableVectorHard(vectorNumber, vector); + } + + vector->interruptActive = 0; + } + } while (!done); + + return kIOReturnSuccess; +} + +bool GrandCentralInterruptController::vectorCanBeShared(long /*vectorNumber*/, IOInterruptVector */*vector*/) +{ + return true; +} + +int GrandCentralInterruptController::getVectorType(long vectorNumber, IOInterruptVector */*vector*/) +{ + int interruptType; + + if (kTypeLevelMask & (1 << vectorNumber)) { + interruptType = kIOInterruptTypeLevel; + } else { + interruptType = kIOInterruptTypeEdge; + } + + return interruptType; +} + +void GrandCentralInterruptController::disableVectorHard(long vectorNumber, IOInterruptVector */*vector*/) +{ + unsigned long maskTmp; + + // Turn the source off at hardware. + maskTmp = lwbrx(maskReg); + maskTmp &= ~(1 << vectorNumber); + stwbrx(maskTmp, maskReg); + eieio(); +} + +void GrandCentralInterruptController::enableVector(long vectorNumber, + IOInterruptVector *vector) +{ + unsigned long maskTmp; + + maskTmp = lwbrx(maskReg); + maskTmp |= (1 << vectorNumber); + stwbrx(maskTmp, maskReg); + eieio(); + if (lwbrx(levelsReg) & (1 << vectorNumber)) { + // lost the interrupt + causeVector(vectorNumber, vector); + } +} + +void GrandCentralInterruptController::causeVector(long vectorNumber, IOInterruptVector */*vector*/) +{ + pendingEvents |= 1 << vectorNumber; + parentNub->causeInterrupt(0); +} diff --git a/iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.h b/iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.h new file mode 100644 index 000000000..77b94974c --- /dev/null +++ b/iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#ifndef _IOKIT_GRANDCENTRAL_H +#define _IOKIT_GRANDCENTRAL_H + +#include + +#include +#include + +#define kNumVectors (32) + +#define kTypeLevelMask (0x3FF00000) + +#define kEventsOffset (0x00020) +#define kMaskOffset (0x00024) +#define kClearOffset (0x00028) +#define kLevelsOffset (0x0002C) + + +class GrandCentralInterruptController; + +class GrandCentral : public AppleMacIO +{ + OSDeclareDefaultStructors(GrandCentral); + +private: + IOLogicalAddress grandCentralBaseAddress; + GrandCentralInterruptController *interruptController; + +public: + virtual bool start(IOService *provider); +}; + + +class GrandCentralInterruptController : public IOInterruptController +{ + OSDeclareDefaultStructors(GrandCentralInterruptController); + +private: + IOService *parentNub; + IOLock *taskLock; + unsigned long pendingEvents; + unsigned long eventsReg; + unsigned long maskReg; + unsigned long clearReg; + unsigned long levelsReg; + +public: + virtual IOReturn initInterruptController(IOService *provider, IOLogicalAddress interruptControllerBase); + + virtual IOInterruptAction getInterruptHandlerAddress(void); + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source); + + virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector *vector); + virtual int getVectorType(long vectorNumber, IOInterruptVector *vector); + virtual void disableVectorHard(long vectorNumber, IOInterruptVector *vector); + virtual void enableVector(long vectorNumber, IOInterruptVector *vector); + virtual void causeVector(long vectorNumber, IOInterruptVector *vector); +}; + + +#endif /* ! _IOKIT_GRANDCENTRAL_H */ diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp new file mode 100644 index 000000000..5c68f58cb --- /dev/null +++ b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * AppleI386CPU.cpp + * + * March 6, 2000 jliu + * Created based on AppleCPU. + */ + +#include "AppleI386CPU.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOCPU + +OSDefineMetaClassAndStructors(AppleI386CPU, IOCPU); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleI386CPU::start(IOService * provider) +{ +// kern_return_t result; + + if (!super::start(provider)) return false; + + cpuIC = new AppleI386CPUInterruptController; + if (cpuIC == 0) return false; + + if (cpuIC->initCPUInterruptController(1) != kIOReturnSuccess) + return false; + + cpuIC->attach(this); + + cpuIC->registerCPUInterruptController(); + +#ifdef NOTYET + // Register this CPU with mach. + result = ml_processor_register((cpu_id_t)this, 0, + &machProcessor, &ipi_handler, true); + if (result == KERN_FAILURE) return false; +#endif + + setCPUState(kIOCPUStateUninitalized); + +#ifdef NOTYET + processor_start(machProcessor); +#endif + + // Hack. Call initCPU() ourself since no one else will. + initCPU(true); + + registerService(); + + return true; +} + +void AppleI386CPU::initCPU(bool /*boot*/) +{ + cpuIC->enableCPUInterrupt(this); + + setCPUState(kIOCPUStateRunning); +} + +void AppleI386CPU::quiesceCPU(void) +{ +} + +kern_return_t AppleI386CPU::startCPU(vm_offset_t /*start_paddr*/, + vm_offset_t /*arg_paddr*/) +{ + return KERN_FAILURE; +} + +void AppleI386CPU::haltCPU(void) +{ +} + +const OSSymbol * AppleI386CPU::getCPUName(void) +{ + return OSSymbol::withCStringNoCopy("Primary0"); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOCPUInterruptController + +OSDefineMetaClassAndStructors(AppleI386CPUInterruptController, + IOCPUInterruptController); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn AppleI386CPUInterruptController::handleInterrupt(void * /*refCon*/, + IOService * /*nub*/, + int source) +{ + IOInterruptVector * vector; + + // Override the implementation in IOCPUInterruptController to + // dispatch interrupts the old way. + // + // source argument is ignored, use the first IOCPUInterruptController + // in the vector array. + // + vector = &vectors[0]; + + if (!vector->interruptRegistered) + return kIOReturnInvalid; + + vector->handler(vector->target, + vector->refCon, + vector->nub, + source); + + return kIOReturnSuccess; +} diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h new file mode 100644 index 000000000..9595ec031 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * AppleI386CPU.h + * + * March 6, 2000 jliu + * Created based on AppleCPU. + */ + +#ifndef _IOKIT_APPLEI386CPU_H +#define _IOKIT_APPLEI386CPU_H + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class AppleI386CPU : public IOCPU +{ + OSDeclareDefaultStructors(AppleI386CPU); + +private: + IOCPUInterruptController * cpuIC; + +public: + virtual bool start(IOService * provider); + virtual void initCPU(bool boot); + virtual void quiesceCPU(void); + virtual kern_return_t startCPU(vm_offset_t start_paddr, + vm_offset_t arg_paddr); + virtual void haltCPU(void); + virtual const OSSymbol * getCPUName(void); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class AppleI386CPUInterruptController : public IOCPUInterruptController +{ + OSDeclareDefaultStructors(AppleI386CPUInterruptController); + +public: + virtual IOReturn handleInterrupt(void * refCon, + IOService * nub, + int source); +}; + +#endif /* ! _IOKIT_APPLEI386CPU_H */ diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp new file mode 100644 index 000000000..ddcdbd46b --- /dev/null +++ b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + */ + +#include + +#include + +#include +#include +#include +#include + +#include "AppleI386PlatformExpert.h" + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOPlatformExpert + +OSSymbol * gIntelPICName; + +OSDefineMetaClassAndStructors(AppleI386PlatformExpert, IOPlatformExpert) + +IOService * AppleI386PlatformExpert::probe(IOService * /* provider */, + SInt32 * score ) +{ + *score = 2000; + + return (this); +} + +bool AppleI386PlatformExpert::start(IOService * provider) +{ + gIntelPICName = (OSSymbol *) OSSymbol::withCStringNoCopy("intel-pic"); + +// setupPIC(provider); + + if (!super::start(provider)) + return false; + + return true; +} + +IOService * AppleI386PlatformExpert::createNub(OSDictionary * from) +{ + IOService * nub; + OSData * prop; + KERNBOOTSTRUCT * bootStruct; + + nub = super::createNub(from); + + if (nub) + { + if (0 == strcmp( "pci", nub->getName())) + { + bootStruct = (KERNBOOTSTRUCT *) PE_state.bootArgs; + prop = OSData::withBytesNoCopy(&bootStruct->pciInfo, + sizeof(bootStruct->pciInfo)); + assert(prop); + if (prop) + from->setObject( "pci-bus-info", prop); + } + else if (0 != strcmp("intel-pic", nub->getName())) + { + setupPIC(nub); + } + } + + return (nub); +} + +#define kNumVectors 16 + +void +AppleI386PlatformExpert::setupPIC(IOService *nub) +{ + int i; + OSDictionary * propTable; + OSArray * controller; + OSArray * specifier; + OSData * tmpData; + long tmpLong; + + propTable = nub->getPropertyTable(); + + // + // For the moment.. assume a classic 8259 interrupt controller + // with 16 interrupts. + // + // Later, this will be changed to detect a APIC and/or MP-Table + // and then will set the nubs appropriately. + + // Create the interrupt specifer array. + specifier = OSArray::withCapacity(kNumVectors); + assert(specifier); + for (i = 0; i < kNumVectors; i++) { + tmpLong = i; + tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); + specifier->setObject(tmpData); + } + + // Create the interrupt controller array. + controller = OSArray::withCapacity(kNumVectors); + assert(controller); + for (i = 0; i < kNumVectors; i++) + controller->setObject(gIntelPICName); + + // Put the two arrays into the property table. + propTable->setObject(gIOInterruptControllersKey, controller); + propTable->setObject(gIOInterruptSpecifiersKey, specifier); + + // Release the arrays after being added to the property table. + specifier->release(); + controller->release(); +} + +bool +AppleI386PlatformExpert::matchNubWithPropertyTable(IOService * nub, + OSDictionary * propTable ) +{ + OSString * nameProp; + OSString * match; + + if (0 == (nameProp = (OSString *) nub->getProperty(gIONameKey))) + return (false); + + if ( 0 == (match = (OSString *) propTable->getObject(gIONameMatchKey))) + return (false); + + return (match->isEqualTo( nameProp )); +} + +bool AppleI386PlatformExpert::getMachineName( char * name, int maxLength ) +{ + strncpy( name, "x86", maxLength ); + + return (true); +} + +bool AppleI386PlatformExpert::getModelName( char * name, int maxLength ) +{ + strncpy( name, "x86", maxLength ); + + return (true); +} diff --git a/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h new file mode 100644 index 000000000..2334e8796 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _IOKIT_APPLEI386PLATFORM_H +#define _IOKIT_APPLEI386PLATFORM_H + +#include + +class AppleI386PlatformExpert : public IOPlatformExpert +{ + OSDeclareDefaultStructors(AppleI386PlatformExpert) + +private: + void setupPIC(IOService * nub); + +public: + virtual IOService * probe(IOService * provider, + SInt32 * score); + + virtual bool start(IOService * provider); + + virtual bool matchNubWithPropertyTable(IOService * nub, + OSDictionary * table); + + virtual IOService * createNub(OSDictionary * from); + + virtual bool getModelName(char * name, int maxLength); + virtual bool getMachineName(char * name, int maxLength); +}; + +#endif /* ! _IOKIT_APPLEI386PLATFORM_H */ + diff --git a/iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h b/iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h new file mode 100644 index 000000000..b032e4dfe --- /dev/null +++ b/iokit/Drivers/platform/drvAppleIntelClassicPIC/AppleIntelClassicPIC.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#ifndef _IOKIT_APPLEINTELCLASSICPIC_H +#define _IOKIT_APPLEINTELCLASSICPIC_H + +#include +#include + +#define kClockIRQ 0 // FIXME for SMP systems. + +#define kPIC1BasePort 0x20 +#define kPIC2BasePort 0xa0 + +#define kPIC1TriggerTypePort 0x4d0 +#define kPIC2TriggerTypePort 0x4d1 + +#define kPICCmdPortOffset 0 +#define kPICDataPortOffset 1 + +#define kEOICommand 0x20 + +#define kPICSlaveID 2 // Slave ID for second PIC + +#define kNumVectors 16 + +#define IS_SLAVE_VECTOR(x) ((x) & 8) + +// ICW1 +// +#define kPIC_ICW1(x) ((x) + kPICCmdPortOffset) +#define kPIC_ICW1_MBO 0x10 // must be one +#define kPIC_ICW1_LTIM 0x08 // level/edge triggered mode +#define kPIC_ICW1_ADI 0x04 // 4/8 byte call address interval +#define kPIC_ICW1_SNGL 0x02 // single/cascade mode +#define kPIC_ICW1_IC4 0x01 // ICW4 needed/not needed + +// ICW2 - Interrupt vector address (bits 7 - 3). +// +#define kPIC_ICW2(x) ((x) + kPICDataPortOffset) + +// ICW3 - Slave device. +// +#define kPIC_ICW3(x) ((x) + kPICDataPortOffset) + +// ICW4 +// +#define kPIC_ICW4(x) ((x) + kPICDataPortOffset) +#define kPIC_ICW4_SFNM 0x10 // special fully nested mode +#define kPIC_ICW4_BUF 0x08 // buffered mode +#define kPIC_ICW4_MS 0x04 // master/slave +#define kPIC_ICW4_AEOI 0x02 // automatic end of interrupt mode +#define kPIC_ICW4_uPM 0x01 // 8088 (vs. 8085) operation + +// OCW1 - Interrupt mask. +// +#define kPIC_OCW1(x) ((x) + kPICDataPortOffset) + +// OCW2 - Bit 4 must be zero. +// +#define kPIC_OCW2(x) ((x) + kPICCmdPortOffset) +#define kPIC_OCW2_R 0x80 // rotation +#define kPIC_OCW2_SL 0x40 // specific +#define kPIC_OCW2_EOI 0x20 +#define kPIC_OCW2_LEVEL(x) ((x) & 0x07) + +// OCW3 - Bit 4 must be zero. +// +#define kPIC_OCW3(x) ((x) + kPICCmdPortOffset) +#define kPIC_OCW3_ESMM 0x40 // special mask mode +#define kPIC_OCW3_SMM 0x20 +#define kPIC_OCW3_MBO 0x08 // must be one +#define kPIC_OCW3_P 0x04 // poll +#define kPIC_OCW3_RR 0x02 +#define kPIC_OCW3_RIS 0x01 + + +class AppleIntelClassicPIC : public IOInterruptController +{ + OSDeclareDefaultStructors( AppleIntelClassicPIC ); + +protected: + volatile UInt16 maskInterrupts; /* Which interrupts are masked out */ + UInt16 triggerTypes; /* Interrupt trigger type mask */ + + inline int getTriggerType(long irq) + { + return ( triggerTypes & (1 << irq) ) ? + kIOInterruptTypeLevel : kIOInterruptTypeEdge; + } + + inline void updateMask(long irq) + { + if ( IS_SLAVE_VECTOR(irq) ) + outb( kPIC_OCW1(kPIC2BasePort), maskInterrupts >> 8 ); + else + outb( kPIC_OCW1(kPIC1BasePort), maskInterrupts & 0xff ); + } + + inline void disableInterrupt(long irq) + { + maskInterrupts |= (1 << irq); + updateMask(irq); + } + + inline void enableInterrupt(long irq) + { + maskInterrupts &= ~(1 << irq); + updateMask(irq); + } + + inline void ackInterrupt(long irq) + { + if ( IS_SLAVE_VECTOR(irq) ) + outb( kPIC_OCW2(kPIC2BasePort), kEOICommand ); + outb( kPIC_OCW2(kPIC1BasePort), kEOICommand ); + } + + virtual void initializePIC(UInt16 port, + UInt8 icw1, UInt8 icw2, + UInt8 icw3, UInt8 icw4); + +public: + virtual bool start(IOService * provider); + virtual void free(void); + + // Methods that must be implemented by simplifed interrupt controllers. + + virtual int getVectorType(long vectorNumber, IOInterruptVector * vector); + virtual IOInterruptAction getInterruptHandlerAddress(void); + virtual IOReturn handleInterrupt(void * refCon, IOService * nub, int source); + virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector * vector); + virtual void initVector(long vectorNumber, IOInterruptVector * vector); + virtual void disableVectorHard(long vectorNumber, IOInterruptVector * vector); + virtual void enableVector(long vectorNumber, IOInterruptVector * vector); +}; + +#endif /* ! _IOKIT_APPLEINTELCLASSICPIC_H */ diff --git a/iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp b/iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp new file mode 100644 index 000000000..c6eaaea34 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp @@ -0,0 +1,319 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Michael Burg + */ + +#include +#include +#include +#include "AppleIntelClassicPIC.h" + +// This must agree with the trap number reported by the low-level +// interrupt handler (osfmk/i386/locore.s). + +#define kIntelReservedIntVectors 0x40 + +extern OSSymbol * gIntelPICName; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOInterruptController + +OSDefineMetaClassAndStructors(AppleIntelClassicPIC, IOInterruptController); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleIntelClassicPIC::start(IOService * provider) +{ + IOInterruptAction handler; + + if ( super::start(provider) == false ) return false; + + // Allocate the memory for the vectors. + + vectors = (IOInterruptVector *) IOMalloc( kNumVectors * + sizeof(IOInterruptVector) ); + if ( vectors == NULL ) return false; + + bzero(vectors, kNumVectors * sizeof(IOInterruptVector)); + + // Allocate locks for the vectors. + + for ( int cnt = 0; cnt < kNumVectors; cnt++ ) + { + vectors[cnt].interruptLock = IOLockAlloc(); + + if ( vectors[cnt].interruptLock == NULL ) + { + return false; + } + } + + // Mask out the interrupts except for the casacde line. + + maskInterrupts = 0xffff & ~(1 << kPICSlaveID); + + // Initialize master PIC. + + initializePIC( kPIC1BasePort, + /* ICW1 */ kPIC_ICW1_IC4, + /* ICW2 */ kIntelReservedIntVectors, + /* ICW3 */ (1 << kPICSlaveID), + /* ICW4 */ kPIC_ICW4_uPM ); + + // Write to OCW1, OCW3, OCW2. + // The priority order is changed to (highest to lowest) + // 3 4 5 6 7 0 1 2 + // The default priority after initialization is (highest to lowest) + // 0 1 2 3 4 5 6 7 + + outb( kPIC_OCW1(kPIC1BasePort), maskInterrupts & 0xff ); + outb( kPIC_OCW3(kPIC1BasePort), kPIC_OCW3_MBO | kPIC_OCW3_RR ); + outb( kPIC_OCW2(kPIC1BasePort), kPIC_OCW2_R | + kPIC_OCW2_SL | + kPIC_OCW2_LEVEL(2) ); + + // Initialize slave PIC. + + initializePIC( kPIC2BasePort, + /* ICW1 */ kPIC_ICW1_IC4, + /* ICW2 */ kIntelReservedIntVectors + 8, + /* ICW3 */ kPICSlaveID, + /* ICW4 */ kPIC_ICW4_uPM ); + + // Write to OCW1, and OCW3. + + outb( kPIC_OCW1(kPIC2BasePort), maskInterrupts >> 8 ); + outb( kPIC_OCW3(kPIC2BasePort), kPIC_OCW3_MBO | kPIC_OCW3_RR ); + + // Record trigger type. + + triggerTypes = inb( kPIC1TriggerTypePort ) | + ( inb( kPIC2TriggerTypePort ) << 8 ); + + // Primary interrupt controller + + getPlatform()->setCPUInterruptProperties(provider); + + // Register the interrupt handler function so it can service interrupts. + + handler = getInterruptHandlerAddress(); + if ( provider->registerInterrupt(0, this, handler, 0) != kIOReturnSuccess ) + panic("AppleIntelClassicPIC: Failed to install platform interrupt handler"); + + provider->enableInterrupt(0); + + // Register this interrupt controller so clients can find it. + + getPlatform()->registerInterruptController(gIntelPICName, this); + + return true; +} + +//--------------------------------------------------------------------------- +// Free the interrupt controller object. Deallocate all resources. + +void AppleIntelClassicPIC::free(void) +{ + if ( vectors ) + { + for ( int cnt = 0; cnt < kNumVectors; cnt++ ) + { + if (vectors[cnt].interruptLock) + IOLockFree(vectors[cnt].interruptLock); + } + + IOFree( vectors, kNumVectors * sizeof(IOInterruptVector) ); + vectors = 0; + } + + super::free(); +} + +//--------------------------------------------------------------------------- +// Initialize the PIC by sending the Initialization Command Words (ICW). + +void AppleIntelClassicPIC::initializePIC( UInt16 port, + UInt8 icw1, UInt8 icw2, + UInt8 icw3, UInt8 icw4 ) +{ + // Initialize 8259's. Start the initialization sequence by + // issuing ICW1 (Initialization Command Word 1). + // Bit 4 must be set. + + outb( kPIC_ICW1(port), kPIC_ICW1_MBO | icw1 ); + + // ICW2 + // Upper 5 bits of the interrupt vector address. The lower three + // bits are set according to the interrupt level serviced. + + outb( kPIC_ICW2(port), icw2 ); + + // ICW3 (Master Device) + // Set a 1 bit for each IR line that has a slave. + + outb( kPIC_ICW3(port), icw3 ); + + // ICW4 + + outb( kPIC_ICW4(port), icw4 ); +} + +//--------------------------------------------------------------------------- +// Report whether the interrupt line is edge or level triggered. + +int AppleIntelClassicPIC::getVectorType(long vectorNumber, + IOInterruptVector * vector) +{ + return getTriggerType(vectorNumber); +} + +//--------------------------------------------------------------------------- +// + +IOInterruptAction AppleIntelClassicPIC::getInterruptHandlerAddress(void) +{ + return (IOInterruptAction) &AppleIntelClassicPIC::handleInterrupt; +} + +//--------------------------------------------------------------------------- +// Handle an interrupt by servicing the 8259, and dispatch the +// handler associated with the interrupt vector. + +IOReturn AppleIntelClassicPIC::handleInterrupt(void * savedState, + IOService * nub, + int source) +{ + IOInterruptVector * vector; + long vectorNumber; + + typedef void (*IntelClockFuncType)(void *); + IntelClockFuncType clockFunc; + + vectorNumber = source - kIntelReservedIntVectors; + + if (vectorNumber >= kNumVectors) + return kIOReturnSuccess; + + // Disable and ack interrupt. + + disableInterrupt(vectorNumber); + ackInterrupt( vectorNumber); + + // Process the interrupt. + + vector = &vectors[vectorNumber]; + + vector->interruptActive = 1; + + if ( !vector->interruptDisabledSoft ) + { + if ( vector->interruptRegistered ) + { + // Call registered interrupt handler. + + if (vectorNumber == kClockIRQ) // FIXME + { + clockFunc = (IntelClockFuncType) vector->handler; + clockFunc(savedState); + } + else + { + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + } + + // interruptDisabledSoft flag may be set by the + // handler to indicate that the interrupt should + // be disabled. + + if ( vector->interruptDisabledSoft ) + { + // Already "hard" disabled, set interruptDisabledHard + // to indicate this. + + vector->interruptDisabledHard = 1; + } + else + { + // Re-enable the interrupt line. + + enableInterrupt(vectorNumber); + } + } + } + else + { + vector->interruptDisabledHard = 1; + } + + vector->interruptActive = 0; + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// + +bool AppleIntelClassicPIC::vectorCanBeShared(long vectorNumber, + IOInterruptVector * vector) +{ + if ( getVectorType(vectorNumber, vector) == kIOInterruptTypeLevel ) + return true; + else + return false; +} + +//--------------------------------------------------------------------------- +// + +void AppleIntelClassicPIC::initVector(long vectorNumber, + IOInterruptVector * vector) +{ + super::initVector(vectorNumber, vector); +} + +//--------------------------------------------------------------------------- +// + +void AppleIntelClassicPIC::disableVectorHard(long vectorNumber, + IOInterruptVector * vector) +{ + // Sorry, cacade/slave interrupt line cannot be disable. + + if (vectorNumber == kPICSlaveID) return; + + disableInterrupt(vectorNumber); +} + +//--------------------------------------------------------------------------- +// + +void AppleIntelClassicPIC::enableVector(long vectorNumber, + IOInterruptVector * vector) +{ + enableInterrupt(vectorNumber); +} diff --git a/iokit/Drivers/platform/drvAppleIntelClock/AppleIntelClock.h b/iokit/Drivers/platform/drvAppleIntelClock/AppleIntelClock.h new file mode 100644 index 000000000..30a0ba5f6 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleIntelClock/AppleIntelClock.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _APPLEINTELCLOCK_H +#define _APPLEINTELCLOCK_H + +#include + +#define kIRQ_Clock 0 + +class AppleIntelClock : public IOService +{ + OSDeclareDefaultStructors(AppleIntelClock); + +public: + virtual bool start(IOService * provider); +}; + +#endif /* _APPLEINTELCLOCK_H */ diff --git a/iokit/Drivers/platform/drvAppleIntelClock/IntelClock.cpp b/iokit/Drivers/platform/drvAppleIntelClock/IntelClock.cpp new file mode 100644 index 000000000..3de76178e --- /dev/null +++ b/iokit/Drivers/platform/drvAppleIntelClock/IntelClock.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +// +// Backdoor hack for Intel Clock. +// +// + +#include "AppleIntelClock.h" + +#define super IOService +OSDefineMetaClassAndStructors(AppleIntelClock, IOService); + +extern "C" { +extern void hardclock(void); +}; + +bool +AppleIntelClock::start(IOService *provider) +{ + if (!super::start(provider)) + return false; + + provider->registerInterrupt(kIRQ_Clock, 0, (IOInterruptAction) hardclock); + provider->enableInterrupt(kIRQ_Clock); + + publishResource("IORTC", this); + return true; +} diff --git a/iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp b/iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp new file mode 100644 index 000000000..43fa1520d --- /dev/null +++ b/iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp @@ -0,0 +1,282 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created. + */ + +#include +extern "C" { +#include +} + +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService + +OSDefineMetaClassAndAbstractStructors(AppleMacIO, IOService); +OSMetaClassDefineReservedUnused(AppleMacIO, 0); +OSMetaClassDefineReservedUnused(AppleMacIO, 1); +OSMetaClassDefineReservedUnused(AppleMacIO, 2); +OSMetaClassDefineReservedUnused(AppleMacIO, 3); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleMacIO::start( IOService * provider ) +{ + IOPCIDevice *pciNub = (IOPCIDevice *)provider; + + if( !super::start( provider)) + return( false); + + // Make sure memory space is on. + pciNub->setMemoryEnable(true); + + fNub = provider; + fMemory = provider->mapDeviceMemoryWithIndex( 0 ); + if( 0 == fMemory) + IOLog("%s: unexpected ranges\n", getName()); + else if( !selfTest()) + IOLog("Warning: AppleMacIO self test fails\n"); + PMinit(); // initialize for power management + temporaryPowerClampOn(); // hold power on till we get children + return( true); +} + + +IOService * AppleMacIO::createNub( IORegistryEntry * from ) +{ + IOService * nub; + + nub = new AppleMacIODevice; + + if( nub && !nub->init( from, gIODTPlane )) { + nub->free(); + nub = 0; + } + + return( nub); +} + +void AppleMacIO::processNub(IOService * /*nub*/) +{ +} + +const char * AppleMacIO::deleteList ( void ) +{ + return( "('sd', 'st', 'disk', 'tape', 'pram', 'rtc', 'mouse')" ); +} + +const char * AppleMacIO::excludeList( void ) +{ + return( 0 ); +} + +void AppleMacIO::publishBelow( IORegistryEntry * root ) +{ + OSCollectionIterator * kids; + IORegistryEntry * next; + IOService * nub; + + // infanticide + kids = IODTFindMatchingEntries( root, kIODTRecursive, deleteList() ); + if( kids) { + while( (next = (IORegistryEntry *)kids->getNextObject())) { + next->detachAll( gIODTPlane); + } + kids->release(); + } + + // publish everything below, minus excludeList + kids = IODTFindMatchingEntries( root, kIODTRecursive | kIODTExclusive, + excludeList()); + if( kids) { + while( (next = (IORegistryEntry *)kids->getNextObject())) { + + if( 0 == (nub = createNub( next ))) + continue; + + nub->attach( this ); + + processNub(nub); + + nub->registerService(); + } + kids->release(); + } +} + +bool AppleMacIO::compareNubName( const IOService * nub, + OSString * name, OSString ** matched ) const +{ + return( IODTCompareNubName( nub, name, matched ) + || nub->IORegistryEntry::compareName( name, matched ) ); +} + +IOReturn AppleMacIO::getNubResources( IOService * nub ) +{ + if( nub->getDeviceMemory()) + return( kIOReturnSuccess ); + + IODTResolveAddressing( nub, "reg", fNub->getDeviceMemoryWithIndex(0) ); + + return( kIOReturnSuccess); +} + +bool AppleMacIO::selfTest( void ) +{ + IODBDMADescriptor *dmaDescriptors; + UInt32 dmaDescriptorsPhys; + UInt32 i; + UInt32 status; + IODBDMADescriptor *dmaDesc; + volatile IODBDMAChannelRegisters *ioBaseDMA; + bool ok = false; + enum { kTestChannel = 0x8000 }; + + ioBaseDMA = (volatile IODBDMAChannelRegisters *) + (((UInt32)fMemory->getVirtualAddress()) + + kTestChannel ); + + do { + dmaDescriptors = (IODBDMADescriptor *)IOMallocContiguous(page_size, 1, & dmaDescriptorsPhys); + if (!dmaDescriptors) + continue; + + if ( (UInt32)dmaDescriptors & (page_size - 1) ) { + IOLog("AppleMacIO::%s() - DMA Descriptor memory not page aligned!!", __FUNCTION__); + continue; + } + + bzero( dmaDescriptors, page_size ); + + IODBDMAReset( ioBaseDMA ); + + dmaDesc = dmaDescriptors; + + IOMakeDBDMADescriptor( dmaDesc, + kdbdmaNop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0 ); + + dmaDesc++; + + IOMakeDBDMADescriptorDep( dmaDesc, + kdbdmaStoreQuad, + kdbdmaKeySystem, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 4, + dmaDescriptorsPhys+16*sizeof(IODBDMADescriptor), + 0x12345678 ); + + dmaDesc++; + + IOMakeDBDMADescriptor( dmaDesc, + kdbdmaStop, + kdbdmaKeyStream0, + kdbdmaIntNever, + kdbdmaBranchNever, + kdbdmaWaitNever, + 0, + 0 ); + + + for ( i = 0; (!ok) && (i < 3); i++ ) + { + dmaDescriptors[16].operation = 0; + + IOSetDBDMACommandPtr( ioBaseDMA, dmaDescriptorsPhys ); + IODBDMAContinue( ioBaseDMA ); + + IODelay( 200 ); + + status = IOGetDBDMAChannelStatus( ioBaseDMA ); + + if ( ((status & kdbdmaActive) == 0) + && ((status & kdbdmaDead) == 0) + && (OSReadSwapInt32( &dmaDescriptors[16].operation, 0 ) == 0x12345678 )) + ok = true; + } + + IODBDMAReset( ioBaseDMA ); + + } while (false); + + if (dmaDescriptors) + IOFreeContiguous(dmaDescriptors, page_size); + + + return ok; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOService + +OSDefineMetaClassAndStructors(AppleMacIODevice, IOService); +OSMetaClassDefineReservedUnused(AppleMacIODevice, 0); +OSMetaClassDefineReservedUnused(AppleMacIODevice, 1); +OSMetaClassDefineReservedUnused(AppleMacIODevice, 2); +OSMetaClassDefineReservedUnused(AppleMacIODevice, 3); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleMacIODevice::compareName( OSString * name, + OSString ** matched = 0 ) const +{ + return( ((AppleMacIO *)getProvider())-> + compareNubName( this, name, matched )); +} + +IOService * AppleMacIODevice::matchLocation( IOService * /* client */ ) +{ + return( this ); +} + +IOReturn AppleMacIODevice::getResources( void ) +{ + return( ((AppleMacIO *)getProvider())->getNubResources( this )); +} + diff --git a/iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp b/iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp new file mode 100644 index 000000000..00deddd4f --- /dev/null +++ b/iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp @@ -0,0 +1,161 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-9 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#include +#include +#include + +#include + +extern "C" { +#include +} + +bool RootRegistered( OSObject * us, void *, IOService * yourDevice ); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService + +OSDefineMetaClassAndStructors(AppleNMI, IOService); +OSMetaClassDefineReservedUnused(AppleNMI, 0); +OSMetaClassDefineReservedUnused(AppleNMI, 1); +OSMetaClassDefineReservedUnused(AppleNMI, 2); +OSMetaClassDefineReservedUnused(AppleNMI, 3); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleNMI::start(IOService *provider) +{ + if (!super::init()) return false; + + enable_debugger = FALSE; + mask_NMI = FALSE; + + if (provider->getProperty("enable_debugger")) + enable_debugger = TRUE; // Flag to automatically jump to debugger at NMI press + + if (provider->getProperty("mask_NMI")) + mask_NMI = TRUE; // Flag to mask/unmask NMI @ sleep/wake + + // Get notified when Root Domain registers + addNotification( gIOPublishNotification, serviceMatching("IOPMrootDomain"), (IOServiceNotificationHandler)RootRegistered, this, 0 ); + + // Register the interrupt. + provider->registerInterrupt(0, this, (IOInterruptAction) &AppleNMI::handleInterrupt, 0); + provider->enableInterrupt(0); + + return true; +} + +// ********************************************************************************** +// The Root Power Domain has registered, so now we register as an interested driver +// so we know when the system is going to sleep or wake +// ********************************************************************************** +bool RootRegistered( OSObject * us, void *, IOService * yourDevice ) +{ + if ( yourDevice != NULL ) { + ((AppleNMI *)us)->rootDomain = yourDevice; + ((IOPMrootDomain *)yourDevice)->registerInterestedDriver((IOService *) us); + } + + return true; +} + +IOReturn AppleNMI::initNMI(IOInterruptController *parentController, OSData *parentSource) +{ + // Allocate the IOInterruptSource so this can act like a nub. + _interruptSources = (IOInterruptSource *)IOMalloc(sizeof(IOInterruptSource)); + if (_interruptSources == 0) return kIOReturnNoMemory; + _numInterruptSources = 1; + + // Set up the IOInterruptSource to point at this. + _interruptSources[0].interruptController = parentController; + _interruptSources[0].vectorData = parentSource; + + // call start using itself as its provider. + if (!start(this)) return kIOReturnError; + + return kIOReturnSuccess; +} + +IOReturn AppleNMI::handleInterrupt(void * /*refCon*/, IOService * /*nub*/, int /*source*/) +{ + if(enable_debugger == TRUE) + Debugger("NMI"); // This is a direct call to the Debugger + else + PE_enter_debugger("NMI"); // This is a indirect call the Debugger that is dependent on the debug flag + + return kIOReturnSuccess; +} + +//********************************************************************************* +// powerStateWillChangeTo +// +// We are notified here of power changes in the root domain. The root domain +// cannot actually turn itself on and off, but it notifies us anyway. +//********************************************************************************* +IOReturn AppleNMI::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned long, IOService*) +{ + volatile unsigned long *nmiIntSourceAddr; + unsigned long nmiIntSource; + + if (mask_NMI == TRUE) + { + if ( ! (theFlags & IOPMPowerOn) ) + { + IOLog("AppleNMI mask NMI\n"); + + // Mask NMI and change from edge to level whilst sleeping (copied directly from OS9 code) + nmiIntSourceAddr = (volatile unsigned long *)kExtInt9_NMIIntSource; + nmiIntSource = *nmiIntSourceAddr; + nmiIntSource |= kNMIIntLevelMask; + *nmiIntSourceAddr = nmiIntSource; + eieio(); + nmiIntSource |= kNMIIntMask; + *nmiIntSourceAddr = nmiIntSource; + eieio(); + } + else + { + IOLog("AppleNMI unmask NMI\n"); + + // Unmask NMI and change back to edge (copied directly from OS9 code) + nmiIntSourceAddr = (volatile unsigned long *)kExtInt9_NMIIntSource; + nmiIntSource = *nmiIntSourceAddr; + nmiIntSource &= ~kNMIIntLevelMask; + *nmiIntSourceAddr = nmiIntSource; + eieio(); + nmiIntSource &= ~kNMIIntMask; + *nmiIntSourceAddr = nmiIntSource; + eieio(); + } + } + + return IOPMAckImplied; +} diff --git a/iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.cpp b/iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.cpp new file mode 100644 index 000000000..27b69da0e --- /dev/null +++ b/iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include "AppleNVRAM.h" + + +#define super IONVRAMController +OSDefineMetaClassAndStructors(AppleNVRAM, IONVRAMController); + + +// **************************************************************************** +// start +// +// **************************************************************************** +bool AppleNVRAM::start(IOService *provider) +{ + IOItemCount numRanges; + IOMemoryMap *map; + + numRanges = provider->getDeviceMemoryCount(); + + if (numRanges == 1) { + _nvramType = kNVRAMTypeIOMem; + + // Get the address of the data register. + map = provider->mapDeviceMemoryWithIndex(0); + if (map == 0) return false; + _nvramData = (UInt8 *)map->getVirtualAddress(); + + } else if (numRanges == 2) { + _nvramType = kNVRAMTypePort; + + // Get the address of the port register. + map = provider->mapDeviceMemoryWithIndex(0); + if (map == 0) return false; + _nvramPort = (UInt8 *)map->getVirtualAddress(); + + // Get the address of the data register. + map = provider->mapDeviceMemoryWithIndex(1); + if (map == 0) return false; + _nvramData = (UInt8 *)map->getVirtualAddress(); + + } else { + return false; + } + + return super::start(provider); +} + +// **************************************************************************** +// read +// +// Read data from the NVRAM and return it in buffer. +// +// **************************************************************************** +IOReturn AppleNVRAM::read(IOByteCount offset, UInt8 *buffer, + IOByteCount length) +{ + UInt32 cnt; + + if ((buffer == 0) || (length <= 0) || (offset < 0) || + (offset + length > kNVRAMImageSize)) + return kIOReturnBadArgument; + + switch (_nvramType) { + case kNVRAMTypeIOMem : + for (cnt = 0; cnt < length; cnt++) { + buffer[cnt] = _nvramData[(offset + cnt) << 4]; + } + break; + + case kNVRAMTypePort: + for (cnt = 0; cnt < length; cnt++) { + *_nvramPort = (offset + length) >> 5; + eieio(); + buffer[cnt] = _nvramData[((offset + length) & 0x1F) << 4]; + } + break; + + default : + return kIOReturnNotReady; + } + + return kIOReturnSuccess; +} + + +// **************************************************************************** +// write +// +// Write data from buffer into NVRAM. +// +// **************************************************************************** +IOReturn AppleNVRAM::write(IOByteCount offset, UInt8 *buffer, + IOByteCount length) +{ + UInt32 cnt; + + if ((buffer == 0) || (length <= 0) || (offset < 0) || + (offset + length > kNVRAMImageSize)) + return kIOReturnBadArgument; + + switch (_nvramType) { + case kNVRAMTypeIOMem : + for (cnt = 0; cnt < length; cnt++) { + _nvramData[(offset + cnt) << 4] = buffer[cnt]; + eieio(); + } + break; + + case kNVRAMTypePort: + for (cnt = 0; cnt < length; cnt++) { + *_nvramPort = (offset + length) >> 5; + eieio(); + _nvramData[((offset + length) & 0x1F) << 4] = buffer[cnt]; + eieio(); + } + break; + + default : + return kIOReturnNotReady; + } + + return kIOReturnSuccess; +} diff --git a/iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.h b/iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.h new file mode 100644 index 000000000..63ee60261 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +enum { + kNVRAMTypeNone = 0, + kNVRAMTypeIOMem, + kNVRAMTypePort, + + kNVRAMImageSize = 0x2000 +}; + +class AppleNVRAM : public IONVRAMController +{ + OSDeclareDefaultStructors(AppleNVRAM); + +private: + UInt32 _nvramType; + volatile UInt8 *_nvramData; + volatile UInt8 *_nvramPort; + +public: + bool start(IOService *provider); + + virtual IOReturn read(IOByteCount offset, UInt8 *buffer, + IOByteCount length); + virtual IOReturn write(IOByteCount offset, UInt8 *buffer, + IOByteCount length); +}; diff --git a/iokit/Drivers/platform/drvAppleOHare/OHare.cpp b/iokit/Drivers/platform/drvAppleOHare/OHare.cpp new file mode 100644 index 000000000..bd3ac85d4 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleOHare/OHare.cpp @@ -0,0 +1,303 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#include + +#include +#include +#include +#include + +#include + +#include "OHare.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super AppleMacIO + +OSDefineMetaClassAndStructors(OHare, AppleMacIO); + +bool OHare::start(IOService *provider) +{ + IOInterruptAction handler; + OSSymbol *interruptControllerName; + AppleNMI *appleNMI; + long nmiSource; + OSData *nmiData; + IOReturn error; + + // Call MacIO's start. + if (!super::start(provider)) + return false; + + // Figure out which ohare this is. + if (IODTMatchNubWithKeys(provider, "ohare")) + ohareNum = kPrimaryOHare; + else if (IODTMatchNubWithKeys(provider, "'pci106b,7'")) + ohareNum = kSecondaryOHare; + else return false; // This should not happen. + + if (ohareNum == kPrimaryOHare) { + getPlatform()->setCPUInterruptProperties(provider); + } + + // Make nubs for the children. + publishBelow( provider ); + + // get the base address of the this OHare. + ohareBaseAddress = fMemory->getVirtualAddress(); + + // get the name of the interrupt controller + interruptControllerName = getInterruptControllerName(); + + // Allocate the interruptController instance. + interruptController = new OHareInterruptController; + if (interruptController == NULL) return false; + + // call the interruptController's init method. + error = interruptController->initInterruptController(provider, ohareBaseAddress); + if (error != kIOReturnSuccess) return false; + + handler = interruptController->getInterruptHandlerAddress(); + provider->registerInterrupt(0, interruptController, handler, 0); + + provider->enableInterrupt(0); + + // Register the interrupt controller so clients can find it. + getPlatform()->registerInterruptController(interruptControllerName, + interruptController); + + if (ohareNum != kPrimaryOHare) return true; + + // Create the NMI Driver. + nmiSource = 20; + nmiData = OSData::withBytes(&nmiSource, sizeof(long)); + appleNMI = new AppleNMI; + if ((nmiData != 0) && (appleNMI != 0)) { + appleNMI->initNMI(interruptController, nmiData); + } + + return true; +} + +OSSymbol *OHare::getInterruptControllerName(void) +{ + OSSymbol *interruptControllerName; + + switch (ohareNum) { + case kPrimaryOHare : + interruptControllerName = gIODTDefaultInterruptController; + break; + + case kSecondaryOHare : + interruptControllerName = OSSymbol::withCStringNoCopy("SecondaryInterruptController"); + break; + + default: + interruptControllerName = OSSymbol::withCStringNoCopy("UnknownInterruptController"); + break; + } + + return interruptControllerName; +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOInterruptController + +OSDefineMetaClassAndStructors(OHareInterruptController, IOInterruptController); + +IOReturn OHareInterruptController::initInterruptController(IOService *provider, IOLogicalAddress interruptControllerBase) +{ + int cnt; + + parentNub = provider; + + // Allocate the task lock. + taskLock = IOLockAlloc(); + if (taskLock == 0) return kIOReturnNoResources; + + // Allocate the memory for the vectors + vectors = (IOInterruptVector *)IOMalloc(kNumVectors * sizeof(IOInterruptVector)); + if (vectors == NULL) { + IOLockFree(taskLock); + return kIOReturnNoMemory; + } + bzero(vectors, kNumVectors * sizeof(IOInterruptVector)); + + // Allocate locks for the + for (cnt = 0; cnt < kNumVectors; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < kNumVectors; cnt++) { + IOLockFree(taskLock); + if (vectors[cnt].interruptLock != NULL) + IOLockFree(vectors[cnt].interruptLock); + } + return kIOReturnNoResources; + } + } + + // Setup the registers accessors + eventsReg = (unsigned long)(interruptControllerBase + kEventsOffset); + maskReg = (unsigned long)(interruptControllerBase + kMaskOffset); + clearReg = (unsigned long)(interruptControllerBase + kClearOffset); + levelsReg = (unsigned long)(interruptControllerBase + kLevelsOffset); + + // Initialize the registers. + + // Disable all interrupts. + stwbrx(0x00000000, maskReg); + eieio(); + + // Clear all pending interrupts. + stwbrx(0xFFFFFFFF, clearReg); + eieio(); + + // Disable all interrupts. (again?) + stwbrx(0x00000000, maskReg); + eieio(); + + return kIOReturnSuccess; +} + +IOInterruptAction OHareInterruptController::getInterruptHandlerAddress(void) +{ + return (IOInterruptAction)&OHareInterruptController::handleInterrupt; +} + +IOReturn OHareInterruptController::handleInterrupt(void * /*refCon*/, + IOService * /*nub*/, + int /*source*/) +{ + int done; + long events, vectorNumber; + IOInterruptVector *vector; + unsigned long maskTmp; + + do { + done = 1; + + // Do all the sources for events, plus any pending interrupts. + // Also add in the "level" sensitive sources + maskTmp = lwbrx(maskReg); + events = lwbrx(eventsReg) & ~kTypeLevelMask; + events |= lwbrx(levelsReg) & maskTmp & kTypeLevelMask; + events |= pendingEvents & maskTmp; + pendingEvents = 0; + eieio(); + + // Since we have to clear the level'd one clear the current edge's too. + stwbrx(kTypeLevelMask | events, clearReg); + eieio(); + + if (events) done = 0; + + while (events) { + vectorNumber = 31 - cntlzw(events); + events ^= (1 << vectorNumber); + vector = &vectors[vectorNumber]; + + vector->interruptActive = 1; + sync(); + isync(); + if (!vector->interruptDisabledSoft) { + isync(); + + // Call the handler if it exists. + if (vector->interruptRegistered) { + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + } + } else { + // Hard disable the source. + vector->interruptDisabledHard = 1; + disableVectorHard(vectorNumber, vector); + } + + vector->interruptActive = 0; + } + } while (!done); + + return kIOReturnSuccess; +} + +bool OHareInterruptController::vectorCanBeShared(long /*vectorNumber*/, IOInterruptVector */*vector*/) +{ + return true; +} + +int OHareInterruptController::getVectorType(long vectorNumber, IOInterruptVector */*vector*/) +{ + int interruptType; + + if (kTypeLevelMask & (1 << vectorNumber)) { + interruptType = kIOInterruptTypeLevel; + } else { + interruptType = kIOInterruptTypeEdge; + } + + return interruptType; +} + +void OHareInterruptController::disableVectorHard(long vectorNumber, IOInterruptVector */*vector*/) +{ + unsigned long maskTmp; + + // Turn the source off at hardware. + maskTmp = lwbrx(maskReg); + maskTmp &= ~(1 << vectorNumber); + stwbrx(maskTmp, maskReg); + eieio(); +} + +void OHareInterruptController::enableVector(long vectorNumber, + IOInterruptVector *vector) +{ + unsigned long maskTmp; + + maskTmp = lwbrx(maskReg); + maskTmp |= (1 << vectorNumber); + stwbrx(maskTmp, maskReg); + eieio(); + if (lwbrx(levelsReg) & (1 << vectorNumber)) { + // lost the interrupt + causeVector(vectorNumber, vector); + } +} + +void OHareInterruptController::causeVector(long vectorNumber, + IOInterruptVector */*vector*/) +{ + pendingEvents |= 1 << vectorNumber; + parentNub->causeInterrupt(0); +} diff --git a/iokit/Drivers/platform/drvAppleOHare/OHare.h b/iokit/Drivers/platform/drvAppleOHare/OHare.h new file mode 100644 index 000000000..42d672ce6 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleOHare/OHare.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#ifndef _IOKIT_OHARE_H +#define _IOKIT_OHARE_H + +#include + +#include +#include + +#define kPrimaryOHare (0) +#define kSecondaryOHare (1) + +#define kNumVectors (32) + +#define kTypeLevelMask (0x1FF00000) + +#define kEventsOffset (0x00020) +#define kMaskOffset (0x00024) +#define kClearOffset (0x00028) +#define kLevelsOffset (0x0002C) + + +class OHareInterruptController; + +class OHare : public AppleMacIO +{ + OSDeclareDefaultStructors(OHare); + +private: + IOLogicalAddress ohareBaseAddress; + long ohareNum; + OHareInterruptController *interruptController; + + virtual OSSymbol *getInterruptControllerName(void); + +public: + virtual bool start(IOService *provider); +}; + + +class OHareInterruptController : public IOInterruptController +{ + OSDeclareDefaultStructors(OHareInterruptController); + +private: + IOService *parentNub; + IOLock *taskLock; + unsigned long pendingEvents; + unsigned long eventsReg; + unsigned long maskReg; + unsigned long clearReg; + unsigned long levelsReg; + +public: + virtual IOReturn initInterruptController(IOService *provider, + IOLogicalAddress interruptControllerBase); + + virtual IOInterruptAction getInterruptHandlerAddress(void); + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source); + + virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector *vector); + virtual int getVectorType(long vectorNumber, IOInterruptVector *vector); + virtual void disableVectorHard(long vectorNumber, IOInterruptVector *vector); + virtual void enableVector(long vectorNumber, IOInterruptVector *vector); + virtual void causeVector(long vectorNumber, IOInterruptVector *vector); +}; + + +#endif /* ! _IOKIT_OHARE_H */ diff --git a/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp b/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp new file mode 100644 index 000000000..5f8896351 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp @@ -0,0 +1,383 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 12 Nov 1998 suurballe Created. + */ + +#include +#include "IOPMUADBController.h" + +#define super IOADBController +OSDefineMetaClassAndStructors(IOPMUADBController, IOADBController) + +// ********************************************************************************** +// start +// +// ********************************************************************************** +IOService * IOPMUADBController::probe( IOService * provider, SInt32 * score ) +{ + if (super::probe(provider, score) == NULL) + return NULL; + + // this adb controller must interface with the pmu, so let's check if it is of the right type: + // so in any case if this is a powerbook G3 1998 or 1999 it has a pmu so: + if (IODTMatchNubWithKeys(getPlatform()->getProvider(), "'AAPL,PowerBook1998'") || + IODTMatchNubWithKeys(getPlatform()->getProvider(), "'PowerBook1,1'")) + return this; + + // If it is a different machine the compatible property will tell us if it is a pmu-driven + // adb device: + OSData *kl = OSDynamicCast(OSData, provider->getProperty("compatible")); + if ((kl != NULL) && kl->isEqualTo("pmu", 3)) + return this; + + // In all the other cases we do not handle it: + return NULL; +} + +// ********************************************************************************** +// start +// +// ********************************************************************************** +bool IOPMUADBController::start ( IOService * nub ) +{ + // Wait for the PMU to show up: + PMUdriver = waitForService(serviceMatching("ApplePMU")); + + // All the commands in this file will generate an interrupt. + // since the interrupt is the logical conclusion of those commands + // we need a syncer to sincronize the begin/end of these functions: + waitingForData = NULL; + + // Registers for the two interrupts that needs to handle: + if (PMUdriver->callPlatformFunction("registerForPMUInterrupts", true, (void*)kPMUADBint, (void*)handleADBInterrupt, (void*)this, NULL) != kIOReturnSuccess) { +#ifdef VERBOSE_LOGS_ON + IOLog("IOPMUADBController::start registerForPMUInterrupts kPMUADBint fails\n"); +#endif // VERBOSE_LOGS_ON + + return false; + } + + // Creates the mutex lock to protect the clients list: + requestMutexLock = NULL; + requestMutexLock = IOLockAlloc(); + if (!requestMutexLock) + return false; + + // This happens last (while the most common place is the begin) because + // trhe superclass may need the services of the functions above. + if( !super::start(nub)) + return false; + + return true; +} + +// ********************************************************************************** +// free +// +// ********************************************************************************** +void IOPMUADBController::free ( ) +{ + // Releases the mutex lock used to protect the clients lists: + if (requestMutexLock != NULL) { + IOLockFree (requestMutexLock); + requestMutexLock = NULL; + } + + // And removes the interrupt handler: + if (PMUdriver != NULL) + PMUdriver->callPlatformFunction("deRegisterClient", true, (void*)this, (void*)kPMUADBint, NULL, NULL); +} + +// ********************************************************************************** +// localSendMiscCommand +// +// ********************************************************************************** +IOReturn IOPMUADBController::localSendMiscCommand(int command, IOByteCount sLength, UInt8 *sBuffer, IOByteCount *rLength, UInt8 *rBuffer) +{ + IOReturn returnValue = kIOReturnError; + + // The poupose of this method is to free us from the pain to create a parameter block each time + // we wish to talk to the pmu: + SendMiscCommandParameterBlock prmBlock = {command, sLength, sBuffer, rLength, rBuffer}; + +#ifdef VERBOSE_LOGS_ON + IOLog("ApplePMUInterface::localSendMiscCommand 0x%02x %d 0x%08lx 0x%08lx 0x%08lx\n", + command, sLength, sBuffer, rLength, rBuffer); +#endif + + if (PMUdriver != NULL) { +#ifdef VERBOSE_LOGS_ON + IOLog("IOPMUADBController::localSendMiscCommand calling PMUdriver->callPlatformFunction\n"); +#endif + returnValue = PMUdriver->callPlatformFunction("sendMiscCommand", true, (void*)&prmBlock, NULL, NULL, NULL); + } + + // If we are here we do not have a dreive to talk to: +#ifdef VERBOSE_LOGS_ON + IOLog("IOPMUADBController::localSendMiscCommand end 0x%08lx\n", returnValue); +#endif + + return returnValue; +} + +// ********************************************************************************** +// this is the interrupt handler for all ADB interrupts: +// +// ********************************************************************************** + +/* static */ void +IOPMUADBController::handleADBInterrupt(IOService *client, UInt8 interruptMask, UInt32 length, UInt8 *buffer) +{ + if (interruptMask & kPMUautopoll) + autopollHandler(client, buffer[0], length - 1, buffer + 1); // yes, call adb input handler + else { + IOPMUADBController *myThis = OSDynamicCast(IOPMUADBController, client); + + if ((myThis != NULL) && (myThis->waitingForData != NULL)) { + // Complets the adb transaction + myThis->dataLen = length - 1; + bcopy(buffer + 1, myThis->dataBuffer, myThis->dataLen); + myThis->waitingForData->signal(); + } + } +} + +// ********************************************************************************** +// setAutoPollPeriod +// +// ********************************************************************************** +IOReturn IOPMUADBController::setAutoPollPeriod ( int ) +{ + return kPMUNotSupported; +} + + +// ********************************************************************************** +// getAutoPollPeriod +// +// ********************************************************************************** +IOReturn IOPMUADBController::getAutoPollPeriod ( int * ) +{ + return kPMUNotSupported; +} + + +// ********************************************************************************** +// setAutoPollList +// +// ********************************************************************************** +IOReturn IOPMUADBController::setAutoPollList ( UInt16 PollBitField ) +{ + pollList = PollBitField; // remember the new poll list + + if ( autopollOn ) { + UInt8 oBuffer[4]; + + oBuffer[0] = 0; // Byte count in the resto of the command + oBuffer[1] = 0x86; // adb Command op. + oBuffer[2] = (UInt8)(PollBitField >> 8); // ?? + oBuffer[3] = (UInt8)(PollBitField & 0xff); // ?? + + localSendMiscCommand (kPMUpMgrADB, 4, oBuffer, NULL, NULL); + } + return kPMUNoError; +} + + +// ********************************************************************************** +// getAutoPollList +// +// ********************************************************************************** +IOReturn IOPMUADBController::getAutoPollList ( UInt16 * activeAddressMask ) +{ + *activeAddressMask = pollList; + return kPMUNoError; +} + + +// ********************************************************************************** +// setAutoPollEnable +// +// ********************************************************************************** +IOReturn IOPMUADBController::setAutoPollEnable ( bool enable ) +{ + UInt8 oBuffer[4]; + + if ( enable ) { // enabling autopoll + oBuffer[0] = 0; + oBuffer[1] = 0x86; + oBuffer[2] = (UInt8)(pollList >> 8); + oBuffer[3] = (UInt8)(pollList & 0xff); + + localSendMiscCommand (kPMUpMgrADB, 4, oBuffer, NULL,NULL); + autopollOn = true; + } + else { // disabling autopoll; + localSendMiscCommand (kPMUpMgrADBoff, 0, NULL, NULL, NULL); + } + + return kPMUNoError; +} + + +// ********************************************************************************** +// resetBus +// +// ********************************************************************************** +IOReturn IOPMUADBController::resetBus ( void ) +{ + if (requestMutexLock != NULL) + IOLockLock(requestMutexLock); + + UInt8 oBuffer[4]; + + oBuffer[0] = kPMUResetADBBus; + oBuffer[1] = 0; + oBuffer[2] = 0; + + // Reset bus needs to wait for the interrupt to terminate the transaction: + waitingForData = IOSyncer::create(); + localSendMiscCommand (kPMUpMgrADB, 3, oBuffer, NULL, NULL); + waitingForData->wait(); // wait till done + waitingForData = 0; + + if (requestMutexLock != NULL) + IOLockUnlock(requestMutexLock); + + return kPMUNoError; +} + + +// ********************************************************************************** +// flushDevice +// +// ********************************************************************************** +IOReturn IOPMUADBController::flushDevice ( IOADBAddress address ) +{ + if (requestMutexLock != NULL) + IOLockLock(requestMutexLock); + + UInt8 oBuffer[4]; + + oBuffer[0] = kPMUFlushADB | (address << kPMUADBAddressField); + oBuffer[1] = ( autopollOn ? 2 : 0 ); + oBuffer[2] = 0; + + // flush device needs to wait for the interrupt to terminate the transaction + waitingForData = IOSyncer::create(); + localSendMiscCommand (kPMUpMgrADB, 3, oBuffer, NULL, NULL); + waitingForData->wait(); // wait till done + waitingForData = 0; + + if (requestMutexLock != NULL) + IOLockUnlock(requestMutexLock); + + return kPMUNoError; +} + + +// ********************************************************************************** +// readFromDevice +// +// The length parameter is ignored on entry. It is set on exit to reflect +// the number of bytes read from the device. +// ********************************************************************************** +IOReturn IOPMUADBController::readFromDevice ( IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) +{ + if ( (length == NULL) || (data == NULL) ) { + return kPMUParameterError; + } + + if (requestMutexLock != NULL) + IOLockLock(requestMutexLock); + + UInt8 oBuffer[4]; + + oBuffer[0] = kPMUReadADB | (address << kPMUADBAddressField) | (adbRegister); + oBuffer[1] = ( autopollOn ? 2 : 0 ); + oBuffer[2] = 0; + + // read from device needs to wait for the interrupt to terminate the transaction + // and to obtain the data from the device. + waitingForData = IOSyncer::create(); + localSendMiscCommand (kPMUpMgrADB, 3, oBuffer, NULL, NULL); + waitingForData->wait(); // wait till done + waitingForData = 0; + + // set caller's length + *length = (dataLen < *length ? dataLen : *length); + bcopy(dataBuffer, data, *length); + + if (requestMutexLock != NULL) + IOLockUnlock(requestMutexLock); + + if (dataLen == 0 ) { // nothing read; device isn't there + return ADB_RET_NOTPRESENT; + } + + return ADB_RET_OK; +} + + +// ********************************************************************************** +// writeToDevice +// +// ********************************************************************************** +IOReturn IOPMUADBController::writeToDevice ( IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) +{ + // Last check on * length > (252): since the pmu registers are 8 bit + // and the buffer has the first 3 bytes used for the standard paramters + // the max lenght can not be more than 252 bytes. + if ( (* length == 0) || (data == NULL) || (* length > 252) ) + { + return kPMUParameterError; + } + + if (address == 0) + return kPMUNoError; // for now let's ignore these ... + + if (requestMutexLock != NULL) + IOLockLock(requestMutexLock); + + UInt8 oBuffer[256]; + + oBuffer[0] = kPMUWriteADB | (address << kPMUADBAddressField) | (adbRegister); + oBuffer[1] = ( autopollOn ? 2 : 0 ); + oBuffer[2] = *length; + bcopy(data, &oBuffer[3], *length); + + // write to the device needs to wait for the interrupt to terminate the transaction + waitingForData = IOSyncer::create(); + localSendMiscCommand (kPMUpMgrADB, 3 + *length, oBuffer, NULL, NULL); + waitingForData->wait(); + waitingForData = 0; + + if (requestMutexLock != NULL) + IOLockUnlock(requestMutexLock); + + return kPMUNoError; +} + + diff --git a/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h b/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h new file mode 100644 index 000000000..21ee01081 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePMU/IOPMUADBController.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 12 Nov 1998 suurballe Created. + */ + +#include +#include +#include +#include + +class IOPMUADBController : public IOADBController +{ + OSDeclareDefaultStructors(IOPMUADBController) + +private: + enum { + kPMUNoError = 0, + kPMUInitError = 1, // PMU failed to initialize + kPMUParameterError = 2, // Bad parameters + kPMUNotSupported = 3, // PMU don't do that (Cuda does, though) + kPMUIOError = 4 // Nonspecific I/O failure + }; + + enum { + kPMUpMgrADB = 0x20, // send ADB command + kPMUpMgrADBoff = 0x21, // turn ADB auto-poll off + kPMUreadADB = 0x28, // Apple Desktop Bus + kPMUpMgrADBInt = 0x2F, // get ADB interrupt data (Portable only) + }; + + enum { + kPMUADBAddressField = 4 + }; + + enum { + kPMUResetADBBus = 0x00, + kPMUFlushADB = 0x01, + kPMUWriteADB = 0x08, + kPMUReadADB = 0x0C, + kPMURWMaskADB = 0x0C + }; + + enum { // when kPMUADBint is set + kPMUADBint = 0x10, + kPMUwaitinglsc = 0x01, // waiting to listen to charger + kPMUautoSRQpolling = 0x02, // auto/SRQ polling is enabled + kPMUautopoll = 0x04 // input is autopoll data + }; + + // We need this to callPlatformFunction when sending to sendMiscCommand + typedef struct SendMiscCommandParameterBlock { + int command; + IOByteCount sLength; + UInt8 *sBuffer; + IOByteCount *rLength; + UInt8 *rBuffer; + } SendMiscCommandParameterBlock; + typedef SendMiscCommandParameterBlock *SendMiscCommandParameterBlockPtr; + + // Local data: + IOService *PMUdriver; + UInt32 pollList; // ADB autopoll device bitmap + bool autopollOn; // TRUE: PMU is autopolling + + UInt32 dataLen; // data len as result of an interrupt + UInt8 dataBuffer[256]; // data as result of an interrupt + IOSyncer *waitingForData; // syncronizer for reads and writes. + + // Local interrupt handlers: + static void handleADBInterrupt(IOService *client, UInt8 matchingMask, UInt32 length, UInt8 *buffer); + + // This lock protects the access to the common varialbes of this object: + IOLock *requestMutexLock; + + // A simpler way to interface with the pmu SendMiscCommand + IOReturn localSendMiscCommand(int command, IOByteCount sLength, UInt8 *sBuffer, IOByteCount *rLength, UInt8 *rBuffer); + +public: + IOService *probe( IOService * nub, SInt32 * score ); + bool start ( IOService * ); + void free (); + IOReturn setAutoPollPeriod ( int microseconds ); + IOReturn getAutoPollPeriod ( int * microseconds ); + IOReturn setAutoPollList ( UInt16 activeAddressMask ); + IOReturn getAutoPollList ( UInt16 * activeAddressMask ); + IOReturn setAutoPollEnable ( bool enable ); + IOReturn resetBus ( void ); + IOReturn flushDevice ( IOADBAddress address ); + IOReturn readFromDevice ( IOADBAddress address, IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); + IOReturn writeToDevice ( IOADBAddress address, IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); +}; diff --git a/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.cpp b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.cpp new file mode 100644 index 000000000..f73ced1ee --- /dev/null +++ b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.cpp @@ -0,0 +1,1080 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include "ApplePS2Controller.h" + +extern "C" +{ + #include + #include +} + +static ApplePS2Controller * gApplePS2Controller = 0; // global variable to self + +// ============================================================================= +// Interrupt-Time Support Functions +// + +static void interruptHandlerMouse(OSObject *, void *, IOService *, int) +{ + // + // Wake our workloop to service the interrupt. This is an edge-triggered + // interrupt, so returning from this routine without clearing the interrupt + // condition is perfectly normal. + // + + gApplePS2Controller->_interruptSourceMouse->interruptOccurred(0, 0, 0); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +static void interruptHandlerKeyboard(OSObject *, void *, IOService *, int) +{ +#if DEBUGGER_SUPPORT + // + // The keyboard interrupt handler reads in the pending scan code and stores + // it on our internal queue; should it completes a debugger escape sequence, + // we jump to the debugger function immediately. + // + + UInt8 key; + UInt8 status; + + // Lock out the keyboard interrupt handler [redundant here] and claim + // exclusive access to the internal keyboard queue. + + gApplePS2Controller->lockController(); + + // Verify that data is available on the controller's input port. + + if ( ((status = inb(kCommandPort)) & kOutputReady) ) + { + // Verify that the data is keyboard data, otherwise call mouse handler. + // This case should never really happen, but if it does, we handle it. + + if ( (status & kMouseData) ) + { + interruptHandlerMouse(0, 0, 0, 0); + } + else + { + // Retrieve the keyboard data on the controller's input port. + + key = inb(kDataPort); + + // Call the debugger-key-sequence checking code (if a debugger sequence + // completes, the debugger function will be invoked immediately within + // doEscape). The doEscape call may insist that we drop the scan code + // we just received in some cases (a true return) -- we don't question + // it's judgement and comply. + + if (gApplePS2Controller->doEscape(key) == false) + gApplePS2Controller->enqueueKeyboardData(key); + + // In all cases, we wake up our workloop to service the interrupt data. + gApplePS2Controller->_interruptSourceKeyboard->interruptOccurred(0, 0, 0); + } + } + + // Remove the lockout on the keyboard interrupt handler [ineffective here] + // and release our exclusive access to the internal keyboard queue. + + gApplePS2Controller->unlockController(); +#else + // + // Wake our workloop to service the interrupt. This is an edge-triggered + // interrupt, so returning from this routine without clearing the interrupt + // condition is perfectly normal. + // + + gApplePS2Controller->_interruptSourceKeyboard->interruptOccurred(0, 0, 0); + +#endif DEBUGGER_SUPPORT +} + +// ============================================================================= +// ApplePS2Controller Class Implementation +// + +#define super IOService +OSDefineMetaClassAndStructors(ApplePS2Controller, IOService); + +bool ApplePS2Controller::init(OSDictionary * properties) +{ + if (!super::init(properties)) return false; + + // + // Initialize minimal state. + // + + _commandQueue = 0; + _workLoop = 0; + + _interruptSourceKeyboard = 0; + _interruptSourceMouse = 0; + + _interruptTargetKeyboard = 0; + _interruptTargetMouse = 0; + + _interruptActionKeyboard = NULL; + _interruptActionMouse = NULL; + + _interruptInstalledKeyboard = false; + _interruptInstalledMouse = false; + + _mouseDevice = 0; + _keyboardDevice = 0; + +#if DEBUGGER_SUPPORT + _extendedState = false; + _modifierState = 0x00; + + _keyboardQueueAlloc = NULL; + queue_init(&_keyboardQueue); + queue_init(&_keyboardQueueUnused); + + _controllerLockOldSpl = 0; + usimple_lock_init(&_controllerLock, ETAP_NO_TRACE); +#endif DEBUGGER_SUPPORT + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Controller::start(IOService * provider) +{ + // + // The driver has been instructed to start. Allocate all our resources. + // + + if (!super::start(provider)) return false; + +#if DEBUGGER_SUPPORT + _keyboardQueueAlloc = (KeyboardQueueElement *) + IOMalloc(kKeyboardQueueSize*sizeof(KeyboardQueueElement)); + if (!_keyboardQueueAlloc) return false; + + // Add the allocated keyboard queue entries to "unused" queue. + for (int index = 0; index < kKeyboardQueueSize; index++) + queue_enter(&_keyboardQueueUnused, &_keyboardQueueAlloc[index], + KeyboardQueueElement *, chain); +#endif DEBUGGER_SUPPORT + + // + // Initialize the mouse and keyboard hardware to a known state -- the IRQs + // are disabled (don't want interrupts), the clock line is enabled (want to + // be able to send commands), and the device itself is disabled (don't want + // asynchronous data arrival for key/mouse events). We call the read/write + // port routines directly, since no other thread will conflict with us. + // + + UInt8 commandByte; + writeCommandPort(kCP_GetCommandByte); + commandByte = readDataPort(kDT_Keyboard); + commandByte &= ~(kCB_EnableMouseIRQ | kCB_DisableMouseClock); + writeCommandPort(kCP_SetCommandByte); + writeDataPort(commandByte); + + writeDataPort(kDP_SetDefaultsAndDisable); + readDataPort(kDT_Keyboard); // (discard acknowledge; success irrelevant) + + writeCommandPort(kCP_TransmitToMouse); + writeDataPort(kDP_SetDefaultsAndDisable); + readDataPort(kDT_Mouse); // (discard acknowledge; success irrelevant) + + // + // Clear out garbage in the controller's input streams, before starting up + // the work loop. + // + + while ( inb(kCommandPort) & kOutputReady ) + { + inb(kDataPort); + IODelay(kDataDelay); + } + + // + // Initialize our work loop, our command queue, and our interrupt event + // sources. The work loop can accept requests after this step. + // + + _workLoop = IOWorkLoop::workLoop(); + _commandQueue = IOCommandQueue::commandQueue( + this, (IOCommandQueueAction) &ApplePS2Controller::processRequest); + _interruptSourceMouse = IOInterruptEventSource::interruptEventSource( + this, (IOInterruptEventAction) &ApplePS2Controller::interruptOccurred); + _interruptSourceKeyboard = IOInterruptEventSource::interruptEventSource( + this, (IOInterruptEventAction) &ApplePS2Controller::interruptOccurred); + + if ( !_workLoop || + !_commandQueue || + !_interruptSourceMouse || + !_interruptSourceKeyboard ) return false; + + if ( _workLoop->addEventSource(_commandQueue) != kIOReturnSuccess ) + return false; + + // + // Create the keyboard nub and the mouse nub. The keyboard and mouse drivers + // will query these nubs to determine the existence of the keyboard or mouse, + // and should they exist, will attach themselves to the nub as clients. + // + + _keyboardDevice = new ApplePS2KeyboardDevice; + + if ( !_keyboardDevice || + !_keyboardDevice->init() || + !_keyboardDevice->attach(this) ) return false; + + _mouseDevice = new ApplePS2MouseDevice; + + if ( !_mouseDevice || + !_mouseDevice->init() || + !_mouseDevice->attach(this) ) return false; + + gApplePS2Controller = this; + + _keyboardDevice->registerService(); + _mouseDevice->registerService(); + + return true; // success +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::stop(IOService * provider) +{ + // + // The driver has been instructed to stop. Note that we must break all + // connections to other service objects now (ie. no registered actions, + // no pointers and retains to objects, etc), if any. + // + + // Ensure that the interrupt handlers have been uninstalled (ie. no clients). + assert(_interruptInstalledKeyboard == false); + assert(_interruptInstalledMouse == false); + + // Free the nubs we created. + if (_keyboardDevice) _keyboardDevice->release(); + if (_mouseDevice) _mouseDevice->release(); + + // Free the work loop. + if (_workLoop) _workLoop->release(); + + // Free the interrupt source and command queue. + if (_commandQueue) _commandQueue->release(); + if (_interruptSourceKeyboard) _interruptSourceKeyboard->release(); + if (_interruptSourceMouse) _interruptSourceMouse->release(); + +#if DEBUGGER_SUPPORT + // Free the keyboard queue allocation space (after disabling interrupt). + if (_keyboardQueueAlloc) + IOFree(_keyboardQueueAlloc,kKeyboardQueueSize*sizeof(KeyboardQueueElement)); +#endif DEBUGGER_SUPPORT + + gApplePS2Controller = 0; + + super::stop(provider); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOWorkLoop * ApplePS2Controller::getWorkLoop() const +{ + return _workLoop; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::installInterruptAction(PS2DeviceType deviceType, + OSObject * target, + PS2InterruptAction action) +{ + // + // Install the keyboard or mouse interrupt handler. + // + // This method assumes only one possible mouse and only one possible + // keyboard client (ie. callers), and assumes two distinct interrupt + // handlers for each, hence needs no protection against races. + // + + // Is it the keyboard or the mouse interrupt handler that was requested? + // We only install it if it is currently uninstalled. + + if (deviceType == kDT_Keyboard && _interruptInstalledKeyboard == false) + { + target->retain(); + _interruptTargetKeyboard = target; + _interruptActionKeyboard = action; + _workLoop->addEventSource(_interruptSourceKeyboard); + getProvider()->registerInterrupt(kIRQ_Keyboard,0, interruptHandlerKeyboard); + getProvider()->enableInterrupt(kIRQ_Keyboard); + _interruptInstalledKeyboard = true; + } + + else if (deviceType == kDT_Mouse && _interruptInstalledMouse == false) + { + target->retain(); + _interruptTargetMouse = target; + _interruptActionMouse = action; + _workLoop->addEventSource(_interruptSourceMouse); + getProvider()->registerInterrupt(kIRQ_Mouse, 0, interruptHandlerMouse); + getProvider()->enableInterrupt(kIRQ_Mouse); + _interruptInstalledMouse = true; + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::uninstallInterruptAction(PS2DeviceType deviceType) +{ + // + // Uninstall the keyboard or mouse interrupt handler. + // + // This method assumes only one possible mouse and only one possible + // keyboard client (ie. callers), and assumes two distinct interrupt + // handlers for each, hence needs no protection against races. + // + + // Is it the keyboard or the mouse interrupt handler that was requested? + // We only install it if it is currently uninstalled. + + if (deviceType == kDT_Keyboard && _interruptInstalledKeyboard == true) + { + getProvider()->disableInterrupt(kIRQ_Keyboard); + getProvider()->unregisterInterrupt(kIRQ_Keyboard); + _workLoop->removeEventSource(_interruptSourceMouse); + _interruptInstalledKeyboard = false; + _interruptActionKeyboard = NULL; + _interruptTargetKeyboard->release(); + _interruptTargetKeyboard = 0; + } + + else if (deviceType == kDT_Mouse && _interruptInstalledMouse == true) + { + getProvider()->disableInterrupt(kIRQ_Mouse); + getProvider()->unregisterInterrupt(kIRQ_Mouse); + _workLoop->removeEventSource(_interruptSourceMouse); + _interruptInstalledMouse = false; + _interruptActionMouse = NULL; + _interruptTargetMouse->release(); + _interruptTargetMouse = 0; + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +PS2Request * ApplePS2Controller::allocateRequest() +{ + // + // Allocate a request structure. Blocks until successful. Request structure + // is guaranteed to be zeroed. + // + + PS2Request * request = (PS2Request *) IOMalloc(sizeof(PS2Request)); + bzero(request, sizeof(PS2Request)); + return request; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::freeRequest(PS2Request * request) +{ + // + // Deallocate a request structure. + // + + IOFree(request, sizeof(PS2Request)); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2Controller::submitRequest(PS2Request * request) +{ + // + // Submit the request to the controller for processing, asynchronously. + // + + return (_commandQueue->enqueueCommand(false, request) == KERN_SUCCESS); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::submitRequestAndBlock(PS2Request * request) +{ + // + // Submit the request to the controller for processing, synchronously. + // + + IOSyncer * completionSyncer = IOSyncer::create(); + + assert(completionSyncer); + request->completionTarget = this; + request->completionAction = submitRequestAndBlockCompletion; + request->completionParam = completionSyncer; + + _commandQueue->enqueueCommand(true, request); + + completionSyncer->wait(); // wait 'till done +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::submitRequestAndBlockCompletion(void *, void * param) +{ // PS2CompletionAction + IOSyncer * completionSyncer = (IOSyncer *) param; + completionSyncer->signal(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::interruptOccurred(IOInterruptEventSource *, int) +{ // IOInterruptEventAction + // + // Our work loop has informed us of an interrupt, that is, asynchronous + // data has arrived on our input stream. Read the data and dispatch it + // to the appropriate driver. + // + // This method should only be called from our single-threaded work loop. + // + + UInt8 status; + +#if DEBUGGER_SUPPORT + lockController(); // (lock out interrupt + access to queue) + while (1) + { + // See if data is available on the keyboard input stream (off queue); + // we do not read keyboard data from the real data port if it should + // be available. + + if (dequeueKeyboardData(&status)) + { + unlockController(); + dispatchDriverInterrupt(kDT_Keyboard, status); + lockController(); + } + + // See if data is available on the mouse input stream (off real port). + + else if ( (inb(kCommandPort) & (kOutputReady | kMouseData)) == + (kOutputReady | kMouseData)) + { + unlockController(); + dispatchDriverInterrupt(kDT_Mouse, inb(kDataPort)); + lockController(); + } + else break; // out of loop + } + unlockController(); // (release interrupt lockout + access to queue) +#else + // Loop only while there is data currently on the input stream. + + while ( ((status = inb(kCommandPort)) & kOutputReady) ) + { + // Read in and dispatch the data, but only if it isn't what is required + // by the active command. + + dispatchDriverInterrupt((status&kMouseData)?kDT_Mouse:kDT_Keyboard, + inb(kDataPort)); + } +#endif DEBUGGER_SUPPORT +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::dispatchDriverInterrupt(PS2DeviceType deviceType, + UInt8 data) +{ + // + // The supplied data is passed onto the interrupt handler in the appropriate + // driver, if one is registered, otherwise the data byte is thrown away. + // + // This method should only be called from our single-threaded work loop. + // + + if ( deviceType == kDT_Mouse ) + { + // Dispatch the data to the mouse driver. + if (_interruptInstalledMouse) + (*_interruptActionMouse)(_interruptTargetMouse, data); + } + else if ( deviceType == kDT_Keyboard ) + { + // Dispatch the data to the keyboard driver. + if (_interruptInstalledKeyboard) + (*_interruptActionKeyboard)(_interruptTargetKeyboard, data); + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::processRequest(PS2Request * request, + void * /* field1 */, + void * /* field2 */, + void * /* field3 */) + // IOCommandQueueAction +{ + // + // Our work loop has informed us of a request submission. Process + // the request. Note that this code "figures out" when the mouse + // input stream should be read over the keyboard input stream. + // + // This method should only be called from our single-threaded work loop. + // + + UInt8 byte; + PS2DeviceType deviceMode = kDT_Keyboard; + bool failed = false; + bool transmitToMouse = false; + unsigned index; + + // Process each of the commands in the list. + + for (index = 0; index < request->commandsCount; index++) + { + switch (request->commands[index].command) + { + case kPS2C_ReadDataPort: + request->commands[index].inOrOut = readDataPort(deviceMode); + break; + + case kPS2C_ReadDataPortAndCompare: +#if OUT_OF_ORDER_DATA_CORRECTION_FEATURE + byte = readDataPort(deviceMode, request->commands[index].inOrOut); +#else + byte = readDataPort(deviceMode); +#endif + failed = (byte != request->commands[index].inOrOut); + break; + + case kPS2C_WriteDataPort: + writeDataPort(request->commands[index].inOrOut); + if (transmitToMouse) // next reads from mouse input stream + { + deviceMode = kDT_Mouse; + transmitToMouse = false; + } + else + { + deviceMode = kDT_Keyboard; + } + break; + + case kPS2C_WriteCommandPort: + writeCommandPort(request->commands[index].inOrOut); + if (request->commands[index].inOrOut == kCP_TransmitToMouse) + transmitToMouse = true; // preparing to transmit data to mouse + break; + } + + if (failed) break; + } + + // If a command failed and stopped the request processing, store its + // index into the commandsCount field. + + if (failed) request->commandsCount = index; + + // Invoke the completion routine, if one was supplied. + + if (request->completionTarget && request->completionAction) + { + (*request->completionAction)(request->completionTarget, + request->completionParam); + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt8 ApplePS2Controller::readDataPort(PS2DeviceType deviceType) +{ + // + // Blocks until keyboard or mouse data is available from the controller + // and returns that data. Note, if mouse data is requested but keyboard + // data is what is available, the data is delivered to the appropriate + // driver interrupt routine immediately (effectively, the request is + // "preempted" temporarily). + // + // There is a built-in timeout for this command of (timeoutCounter X + // kDataDelay) microseconds, approximately. + // + // This method should only be called from our single-threaded work loop. + // + + UInt8 readByte; + UInt8 status; + UInt32 timeoutCounter = 10000; // (timeoutCounter * kDataDelay = 70 ms) + + while (1) + { +#if DEBUGGER_SUPPORT + lockController(); // (lock out interrupt + access to queue) + if (deviceType == kDT_Keyboard && dequeueKeyboardData(&readByte)) + { + unlockController(); + return readByte; + } +#endif DEBUGGER_SUPPORT + + // + // Wait for the controller's output buffer to become ready. + // + + while (timeoutCounter && !((status = inb(kCommandPort)) & kOutputReady)) + { + timeoutCounter--; + IODelay(kDataDelay); + } + + // + // If we timed out, something went awfully wrong; return a fake value. + // + + if (timeoutCounter == 0) + { +#if DEBUGGER_SUPPORT + unlockController(); // (release interrupt lockout + access to queue) +#endif DEBUGGER_SUPPORT + + IOLog("%s: Timed out on %s input stream.\n", getName(), + (deviceType == kDT_Keyboard) ? "keyboard" : "mouse"); + return 0; + } + + // + // Read in the data. We return the data, however, only if it arrived on + // the requested input stream. + // + + readByte = inb(kDataPort); + +#if DEBUGGER_SUPPORT + unlockController(); // (release interrupt lockout + access to queue) +#endif DEBUGGER_SUPPORT + + if ( (status & kMouseData) ) + { + if (deviceType == kDT_Mouse) return readByte; + } + else + { + if (deviceType == kDT_Keyboard) return readByte; + } + + // + // The data we just received is for the other input stream, not the one + // that was requested, so dispatch other device's interrupt handler. + // + + dispatchDriverInterrupt((deviceType==kDT_Keyboard)?kDT_Mouse:kDT_Keyboard, + readByte); + } // while (forever) +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#if OUT_OF_ORDER_DATA_CORRECTION_FEATURE + +UInt8 ApplePS2Controller::readDataPort(PS2DeviceType deviceType, + UInt8 expectedByte) +{ + // + // Blocks until keyboard or mouse data is available from the controller + // and returns that data. Note, if mouse data is requested but keyboard + // data is what is available, the data is delivered to the appropriate + // driver interrupt routine immediately (effectively, the request is + // "preempted" temporarily). + // + // There is a built-in timeout for this command of (timeoutCounter X + // kDataDelay) microseconds, approximately. + // + // This method should only be called from our single-threaded work loop. + // + // This version of readDataPort does exactly the same as the original, + // except that if the value that should be read from the (appropriate) + // input stream is not what is expected, we make these assumptions: + // + // (a) the data byte we did get was "asynchronous" data being sent by + // the device, which has not figured out that it has to respond to + // the command we just sent to it. + // (b) that the real "expected" response will be the next byte in the + // stream; so what we do is put aside the first byte we read and + // wait for the next byte; if it's the expected value, we dispatch + // the first byte we read to the driver's interrupt handler, then + // return the expected byte. The caller will have never known that + // asynchronous data arrived at a very bad time. + // (c) that the real "expected" response will arrive within (kDataDelay + // X timeoutCounter) microseconds from the time the call is made. + // + + UInt8 firstByte = 0; + bool firstByteHeld = false; + UInt8 readByte; + bool requestedStream; + UInt8 status; + UInt32 timeoutCounter = 10000; // (timeoutCounter * kDataDelay = 70 ms) + + while (1) + { +#if DEBUGGER_SUPPORT + lockController(); // (lock out interrupt + access to queue) + if (deviceType == kDT_Keyboard && dequeueKeyboardData(&readByte)) + { + requestedStream = true; + goto skipForwardToY; + } +#endif DEBUGGER_SUPPORT + + // + // Wait for the controller's output buffer to become ready. + // + + while (timeoutCounter && !((status = inb(kCommandPort)) & kOutputReady)) + { + timeoutCounter--; + IODelay(kDataDelay); + } + + // + // If we timed out, we return the first byte we read, unless THIS IS the + // first byte we are trying to read, then something went awfully wrong + // and we return a fake value rather than lock up the controller longer. + // + + if (timeoutCounter == 0) + { +#if DEBUGGER_SUPPORT + unlockController(); // release interrupt lockout + access to queue +#endif DEBUGGER_SUPPORT + + if (firstByteHeld) return firstByte; + + IOLog("%s: Timed out on %s input stream.\n", getName(), + (deviceType == kDT_Keyboard) ? "keyboard" : "mouse"); + return 0; + } + + // + // Read in the data. We process the data, however, only if it arrived on + // the requested input stream. + // + + readByte = inb(kDataPort); + requestedStream = false; + + if ( (status & kMouseData) ) + { + if (deviceType == kDT_Mouse) requestedStream = true; + } + else + { + if (deviceType == kDT_Keyboard) requestedStream = true; + } + +#if DEBUGGER_SUPPORT +skipForwardToY: + unlockController(); // (release interrupt lockout + access to queue) +#endif DEBUGGER_SUPPORT + + if (requestedStream) + { + if (readByte == expectedByte) + { + if (firstByteHeld == false) + { + // + // Normal case. Return first byte received. + // + + return readByte; + } + else + { + // + // Our assumption was correct. The second byte matched. Dispatch + // the first byte to the interrupt handler, and return the second. + // + + dispatchDriverInterrupt(deviceType, firstByte); + return readByte; + } + } + else // (readByte does not match expectedByte) + { + if (firstByteHeld == false) + { + // + // The first byte was received, and does not match the byte we are + // expecting. Put it aside for the moment. + // + + firstByteHeld = true; + firstByte = readByte; + } + else if (readByte != expectedByte) + { + // + // The second byte mismatched as well. I have yet to see this case + // occur [Dan], however I do think it's plausible. No error logged. + // + + dispatchDriverInterrupt(deviceType, readByte); + return firstByte; + } + } + } + else + { + // + // The data we just received is for the other input stream, not ours, + // so dispatch appropriate interrupt handler. + // + + dispatchDriverInterrupt((deviceType==kDT_Keyboard)?kDT_Mouse:kDT_Keyboard, + readByte); + } + } // while (forever) +} + +#endif + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::writeDataPort(UInt8 byte) +{ + // + // Block until room in the controller's input buffer is available, then + // write the given byte to the Data Port. + // + // This method should only be dispatched from our single-threaded work loop. + // + + while (inb(kCommandPort) & kInputBusy) IODelay(kDataDelay); + outb(kDataPort, byte); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2Controller::writeCommandPort(UInt8 byte) +{ + // + // Block until room in the controller's input buffer is available, then + // write the given byte to the Command Port. + // + // This method should only be dispatched from our single-threaded work loop. + // + + while (inb(kCommandPort) & kInputBusy) IODelay(kDataDelay); + outb(kCommandPort, byte); +} + +// ============================================================================= +// Escape-Key Processing Stuff Localized Here (eg. Mini-Monitor) +// + +#if DEBUGGER_SUPPORT + +#define kModifierShiftLeft 0x01 +#define kModifierShiftRight 0x02 +#define kModifierCtrlLeft 0x04 +#define kModifierCtrlRight 0x08 +#define kModifierAltLeft 0x10 +#define kModifierAltRight 0x20 +#define kModifierWindowsLeft 0x40 +#define kModifierWindowsRight 0x80 + +#define kModifierShiftMask (kModifierShiftLeft | kModifierShiftRight ) +#define kModifierCtrlMask (kModifierCtrlLeft | kModifierCtrlRight ) +#define kModifierAltMask (kModifierAltLeft | kModifierAltRight ) +#define kModifierWindowsMask (kModifierWindowsLeft | kModifierWindowsRight) + +bool ApplePS2Controller::doEscape(UInt8 scancode) +{ + static struct + { + UInt8 scancode; + UInt8 extended; + UInt16 modifier; + } modifierTable[] = { { kSC_Alt, false, kModifierAltLeft }, + { kSC_Alt, true, kModifierAltRight }, + { kSC_Ctrl, false, kModifierCtrlLeft }, + { kSC_Ctrl, true, kModifierCtrlRight }, + { kSC_ShiftLeft, false, kModifierShiftLeft }, + { kSC_ShiftRight, false, kModifierShiftRight }, + { kSC_WindowsLeft, true, kModifierWindowsLeft }, + { kSC_WindowsRight, true, kModifierWindowsRight }, + { 0, 0, 0 } }; + + UInt32 index; + bool releaseModifiers = false; + bool upBit = (scancode & kSC_UpBit) ? true : false; + + // + // See if this is an extened scancode sequence. + // + + if (scancode == kSC_Extend) + { + _extendedState = true; + return false; + } + + // + // Update the modifier state, if applicable. + // + + scancode &= ~kSC_UpBit; + + for (index = 0; modifierTable[index].scancode; index++) + { + if ( modifierTable[index].scancode == scancode && + modifierTable[index].extended == _extendedState ) + { + if (upBit) _modifierState &= ~modifierTable[index].modifier; + else _modifierState |= modifierTable[index].modifier; + + _extendedState = false; + return false; + } + } + + // + // Call the debugger function, if applicable. + // + + if (scancode == kSC_Delete) // (both extended and non-extended scancodes) + { + if ( _modifierState == kModifierAltLeft || + _modifierState == kModifierAltRight ) + { + // Disable the mouse by forcing the clock line low. + + while (inb(kCommandPort) & kInputBusy) IODelay(kDataDelay); + outb(kCommandPort, kCP_DisableMouseClock); + + // Call the debugger function. + + Debugger("Programmer Key"); + + // Re-enable the mouse by making the clock line active. + + while (inb(kCommandPort) & kInputBusy) IODelay(kDataDelay); + outb(kCommandPort, kCP_EnableMouseClock); + + releaseModifiers = true; + } + } + + // + // Release all the modifier keys that were down before the debugger + // function was called (assumption is that they are no longer held + // down after the debugger function returns). + // + + if (releaseModifiers) + { + for (index = 0; modifierTable[index].scancode; index++) + { + if ( _modifierState & modifierTable[index].modifier ) + { + if (modifierTable[index].extended) enqueueKeyboardData(kSC_Extend); + enqueueKeyboardData(modifierTable[index].scancode | kSC_UpBit); + } + } + _modifierState = 0x00; + } + + // + // Update all other state and return status. + // + + _extendedState = false; + return (releaseModifiers); +} + +void ApplePS2Controller::enqueueKeyboardData(UInt8 key) +{ + // + // Enqueue the supplied keyboard data onto our internal queues. The + // controller must already be locked. + // + + KeyboardQueueElement * element; + + // Obtain an unused keyboard data element. + if (!queue_empty(&_keyboardQueueUnused)) + { + queue_remove_first(&_keyboardQueueUnused, + element, KeyboardQueueElement *, chain); + + // Store the new keyboard data element on the queue. + element->data = key; + queue_enter(&_keyboardQueue, element, KeyboardQueueElement *, chain); + } +} + +bool ApplePS2Controller::dequeueKeyboardData(UInt8 * key) +{ + // + // Dequeue keyboard data from our internal queues, if the queue is not + // empty. Should the queue be empty, false is returned. The controller + // must already be locked. + // + + KeyboardQueueElement * element; + + // Obtain an unused keyboard data element. + if (!queue_empty(&_keyboardQueue)) + { + queue_remove_first(&_keyboardQueue, element, KeyboardQueueElement *, chain); + *key = element->data; + + // Place the unused keyboard data element onto the unused queue. + queue_enter(&_keyboardQueueUnused, element, KeyboardQueueElement *, chain); + + return true; + } + return false; +} + +void ApplePS2Controller::unlockController(void) +{ + usimple_unlock(&_controllerLock); + ml_set_interrupts_enabled(_controllerLockOldSpl); +} + +void ApplePS2Controller::lockController(void) +{ + int oldSpl = ml_set_interrupts_enabled(FALSE); + usimple_lock(&_controllerLock); + _controllerLockOldSpl = oldSpl; +} + +#endif DEBUGGER_SUPPORT diff --git a/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.h b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.h new file mode 100644 index 000000000..d275c9520 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _APPLEPS2CONTROLLER_H +#define _APPLEPS2CONTROLLER_H + +#include +#include +#include +#include +#include + +class ApplePS2KeyboardDevice; +class ApplePS2MouseDevice; + +// +// This section describes the problem with the PS/2 controller design and what +// we are doing about it (OUT_OF_ORDER_DATA_CORRECTION_FEATURE). +// +// While the controller processes requests sent by the client drivers, at some +// point in most requests, a read needs to be made from the data port to check +// an acknowledge or receive some sort of data. We illustrate this issue with +// an example -- a write LEDs request to the keyboard: +// +// 1. Write Write LED command. +// 2. Read 0xFA Verify the acknowledge (0xFA). +// 3. Write Write LED state. +// 4. Read 0xFA Verify the acknowledge (0xFA). +// +// The problem is that the keyboard (when it is enabled) can send key events +// to the controller at any time, including when the controller is expecting +// to read an acknowledge next. What ends up happening is this sequence: +// +// a. Write Write LED command. +// b. Read 0x21 Keyboard reports [F] key was depressed, not realizing that +// we're still expecting a response to the command we JUST +// sent the keyboard. We receive 0x21 as a response to our +// command, and figure the command failed. +// c. Get 0xFA Keyboard NOW decides to respond to the command with an +// acknowledge. We're not waiting to read anything, so +// this byte gets dispatched to the driver's interrupt +// handler, which spews out an error message saying it +// wasn't expecting an acknowledge. +// +// What can we do about this? In the above case, we can take note of the fact +// that we are specifically looking for the 0xFA acknowledgement byte (through +// the information passed in the kPS2C_ReadAndCompare primitive). If we don't +// receive this byte next on the input data stream, we put the byte we did get +// aside for a moment, and give the keyboard (or mouse) a second chance to +// respond correctly. +// +// If we receive the 0xFA acknowledgement byte on the second read, that we +// assume that situation described above just happened. We transparently +// dispatch the first byte to the driver's interrupt handler, where it was +// meant to go, and return the second correct byte to the read-and-compare +// logic, where it was meant to go. Everyone wins. +// +// The only situation this feature cannot help is where a kPS2C_ReadDataPort +// primitive is issued in place of a kPS2C_ReadDataPortAndCompare primitive. +// This is necessary in some requests because the driver does not know what +// it is going to receive. This can be illustrated in the mouse get info +// command. +// +// 1. Write Prepare to write to mouse. +// 2. Write Write information command. +// 3. Read 0xFA Verify the acknowledge (0xFA). __-> mouse can report mouse +// 4. Read Get first information byte. __-> packet bytes in between +// 5. Read Get second information byte. __-> these reads +// 6. Rrad Get third information byte. +// +// Controller cannot build any defenses against this. It is suggested that the +// driver writer disable the mouse first, then send any dangerous commands, and +// re-enable the mouse when the command completes. +// +// Note that the OUT_OF_ORDER_DATA_CORRECTION_FEATURE can be turned off at +// compile time. Please see the readDataPort:expecting: method for more +// information about the assumptions necessary for this feature. +// + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Definitions +// + +// Enable debugger support (eg. mini-monitor). + +#define DEBUGGER_SUPPORT 1 + +// Enable dynamic "second chance" re-ordering of input stream data if a +// command response fails to match the expected byte. + +#define OUT_OF_ORDER_DATA_CORRECTION_FEATURE 1 + +// PS/2 device types. + +typedef enum { kDT_Keyboard, kDT_Mouse } PS2DeviceType; + +// Interrupt definitions. + +#define kIRQ_Keyboard 1 +#define kIRQ_Mouse 12 +#define kIPL_Keyboard 6 +#define kIPL_Mouse 3 + +// Port timings. + +#define kDataDelay 7 // usec to delay before data is valid + +// Ports used to control the PS/2 keyboard/mouse and read data from it. + +#define kDataPort 0x60 // keyboard data & cmds (read/write) +#define kCommandPort 0x64 // keybd status (read), command (write) + +// Bit definitions for kCommandPort read values (status). + +#define kOutputReady 0x01 // output (from keybd) buffer full +#define kInputBusy 0x02 // input (to keybd) buffer full +#define kSystemFlag 0x04 // "System Flag" +#define kCommandLastSent 0x08 // 1 = cmd, 0 = data last sent +#define kKeyboardInhibited 0x10 // 0 if keyboard inhibited +#define kMouseData 0x20 // mouse data available + +#if DEBUGGER_SUPPORT +// Definitions for our internal keyboard queue (holds keys processed by the +// interrupt-time mini-monitor-key-sequence detection code). + +#define kKeyboardQueueSize 32 // number of KeyboardQueueElements + +typedef struct KeyboardQueueElement KeyboardQueueElement; +struct KeyboardQueueElement +{ + queue_chain_t chain; + UInt8 data; +}; +#endif DEBUGGER_SUPPORT + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// ApplePS2Controller Class Declaration +// + +class ApplePS2Controller : public IOService +{ + OSDeclareDefaultStructors(ApplePS2Controller); + +public: // interrupt-time variables and functions + IOInterruptEventSource * _interruptSourceKeyboard; + IOInterruptEventSource * _interruptSourceMouse; + +#if DEBUGGER_SUPPORT + void lockController(void); + void unlockController(void); + + bool doEscape(UInt8 key); + bool dequeueKeyboardData(UInt8 * key); + void enqueueKeyboardData(UInt8 key); +#endif DEBUGGER_SUPPORT + +private: + IOCommandQueue * _commandQueue; + IOWorkLoop * _workLoop; + + OSObject * _interruptTargetKeyboard; + OSObject * _interruptTargetMouse; + PS2InterruptAction _interruptActionKeyboard; + PS2InterruptAction _interruptActionMouse; + bool _interruptInstalledKeyboard; + bool _interruptInstalledMouse; + + ApplePS2MouseDevice * _mouseDevice; // mouse nub + ApplePS2KeyboardDevice * _keyboardDevice; // keyboard nub + +#if DEBUGGER_SUPPORT + usimple_lock_data_t _controllerLock; // mach simple spin lock + int _controllerLockOldSpl; // spl before lock taken + + KeyboardQueueElement * _keyboardQueueAlloc; // queues' allocation space + queue_head_t _keyboardQueue; // queue of available keys + queue_head_t _keyboardQueueUnused; // queue of unused entries + + bool _extendedState; + UInt16 _modifierState; +#endif DEBUGGER_SUPPORT + + virtual void dispatchDriverInterrupt(PS2DeviceType deviceType, UInt8 data); + virtual void interruptOccurred(IOInterruptEventSource *, int); + virtual void processRequest(PS2Request * request, void *, void *, void *); + static void submitRequestAndBlockCompletion(void *, void * param); + + virtual UInt8 readDataPort(PS2DeviceType deviceType); + virtual void writeCommandPort(UInt8 byte); + virtual void writeDataPort(UInt8 byte); + +#if OUT_OF_ORDER_DATA_CORRECTION_FEATURE + virtual UInt8 readDataPort(PS2DeviceType deviceType, UInt8 expectedByte); +#endif + +public: + virtual bool init(OSDictionary * properties); + virtual bool start(IOService * provider); + virtual void stop(IOService * provider); + + virtual IOWorkLoop * getWorkLoop() const; + + virtual void installInterruptAction(PS2DeviceType deviceType, + OSObject * target, + PS2InterruptAction action); + virtual void uninstallInterruptAction(PS2DeviceType deviceType); + + virtual PS2Request * allocateRequest(); + virtual void freeRequest(PS2Request * request); + virtual bool submitRequest(PS2Request * request); + virtual void submitRequestAndBlock(PS2Request * request); +}; + +#endif /* _APPLEPS2CONTROLLER_H */ diff --git a/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2KeyboardDevice.cpp b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2KeyboardDevice.cpp new file mode 100644 index 000000000..3418e2b62 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2KeyboardDevice.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include "ApplePS2Controller.h" + +// ============================================================================= +// ApplePS2KeyboardDevice Class Implementation +// + +#define super IOService +OSDefineMetaClassAndStructors(ApplePS2KeyboardDevice, IOService); + +bool ApplePS2KeyboardDevice::attach( IOService * provider ) +{ + if( !super::attach(provider) ) return false; + + assert(_controller == 0); + _controller = (ApplePS2Controller *)provider; + _controller->retain(); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2KeyboardDevice::detach( IOService * provider ) +{ + assert(_controller == provider); + _controller->release(); + _controller = 0; + + super::detach(provider); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2KeyboardDevice::installInterruptAction(OSObject * target, + PS2InterruptAction action) +{ + _controller->installInterruptAction(kDT_Keyboard, target, action); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2KeyboardDevice::uninstallInterruptAction() +{ + _controller->uninstallInterruptAction(kDT_Keyboard); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +PS2Request * ApplePS2KeyboardDevice::allocateRequest() +{ + return _controller->allocateRequest(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2KeyboardDevice::freeRequest(PS2Request * request) +{ + _controller->freeRequest(request); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2KeyboardDevice::submitRequest(PS2Request * request) +{ + return _controller->submitRequest(request); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2KeyboardDevice::submitRequestAndBlock(PS2Request * request) +{ + _controller->submitRequestAndBlock(request); +} diff --git a/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2MouseDevice.cpp b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2MouseDevice.cpp new file mode 100644 index 000000000..7294baba0 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePS2Controller/ApplePS2MouseDevice.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include "ApplePS2Controller.h" + +// ============================================================================= +// ApplePS2MouseDevice Class Implementation +// + +#define super IOService +OSDefineMetaClassAndStructors(ApplePS2MouseDevice, IOService); + +bool ApplePS2MouseDevice::attach(IOService * provider) +{ + if( !super::attach(provider) ) return false; + + assert(_controller == 0); + _controller = (ApplePS2Controller *)provider; + _controller->retain(); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2MouseDevice::detach( IOService * provider ) +{ + assert(_controller == provider); + _controller->release(); + _controller = 0; + + super::detach(provider); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2MouseDevice::installInterruptAction(OSObject * target, + PS2InterruptAction action) +{ + _controller->installInterruptAction(kDT_Mouse, target, action); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2MouseDevice::uninstallInterruptAction() +{ + _controller->uninstallInterruptAction(kDT_Mouse); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +PS2Request * ApplePS2MouseDevice::allocateRequest() +{ + return _controller->allocateRequest(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2MouseDevice::freeRequest(PS2Request * request) +{ + _controller->freeRequest(request); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool ApplePS2MouseDevice::submitRequest(PS2Request * request) +{ + return _controller->submitRequest(request); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void ApplePS2MouseDevice::submitRequestAndBlock(PS2Request * request) +{ + _controller->submitRequestAndBlock(request); +} diff --git a/iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.cpp b/iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.cpp new file mode 100644 index 000000000..274cafce8 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#include "AppleCPU.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOCPU + +OSDefineMetaClassAndStructors(AppleCPU, IOCPU); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool AppleCPU::start(IOService *provider) +{ + kern_return_t result; + ml_processor_info_t processor_info; + + if (!super::start(provider)) return false; + + cpuIC = new IOCPUInterruptController; + if (cpuIC == 0) return false; + + if (cpuIC->initCPUInterruptController(1) != kIOReturnSuccess) return false; + cpuIC->attach(this); + + cpuIC->registerCPUInterruptController(); + + processor_info.cpu_id = (cpu_id_t)this; + processor_info.boot_cpu = true; + processor_info.start_paddr = 0; + processor_info.supports_nap = false; + processor_info.l2cr_value = 0; + processor_info.time_base_enable = 0; + + // Register this CPU with mach. + result = ml_processor_register(&processor_info, &machProcessor, + &ipi_handler); + if (result == KERN_FAILURE) return false; + + setCPUState(kIOCPUStateUninitalized); + + processor_start(machProcessor); + + registerService(); + + return true; +} + +void AppleCPU::initCPU(bool boot) +{ + if (boot) { + cpuIC->enableCPUInterrupt(this); + } + + setCPUState(kIOCPUStateRunning); +} + +void AppleCPU::quiesceCPU(void) +{ + // Unsupported. +} + +kern_return_t AppleCPU::startCPU(vm_offset_t /*start_paddr*/, + vm_offset_t /*arg_paddr*/) +{ + return KERN_FAILURE; +} + +void AppleCPU::haltCPU(void) +{ + // Unsupported. +} + +const OSSymbol *AppleCPU::getCPUName(void) +{ + return OSSymbol::withCStringNoCopy("Primary0"); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.h b/iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.h new file mode 100644 index 000000000..7338ade5b --- /dev/null +++ b/iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#ifndef _IOKIT_APPLECPU_H +#define _IOKIT_APPLECPU_H + +#include + +class AppleCPU : public IOCPU +{ + OSDeclareDefaultStructors(AppleCPU); + +private: + IOCPUInterruptController *cpuIC; + +public: + virtual bool start(IOService *provider); + virtual void initCPU(bool boot); + virtual void quiesceCPU(void); + virtual kern_return_t startCPU(vm_offset_t start_paddr, + vm_offset_t arg_paddr); + virtual void haltCPU(void); + virtual const OSSymbol *getCPUName(void); +}; + +#endif /* ! _IOKIT_APPLECPU_H */ diff --git a/iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp b/iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp new file mode 100644 index 000000000..f14b821bb --- /dev/null +++ b/iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp @@ -0,0 +1,157 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * HISTORY + * + */ + +#include +#include +#include + +#include + + +const OSSymbol *gGetDefaultBusSpeedsKey; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IODTPlatformExpert + +OSDefineMetaClassAndAbstractStructors(ApplePlatformExpert, IODTPlatformExpert); + +OSMetaClassDefineReservedUnused(ApplePlatformExpert, 0); +OSMetaClassDefineReservedUnused(ApplePlatformExpert, 1); +OSMetaClassDefineReservedUnused(ApplePlatformExpert, 2); +OSMetaClassDefineReservedUnused(ApplePlatformExpert, 3); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool ApplePlatformExpert::start( IOService * provider ) +{ + UInt16 romVersion; + + gGetDefaultBusSpeedsKey = OSSymbol::withCString("GetDefaultBusSpeeds"); + + if (provider->getProperty(gIODTNWInterruptMappingKey)) { + // new world interrupt mapping => new world, for now + setBootROMType(kBootROMTypeNewWorld); + } else { + setBootROMType(kBootROMTypeOldWorld); + + // Get the Rom Minor Version from the 68k ROM. + romVersion = ml_phys_read(0xffc00010) & 0x0000ffff; + provider->setProperty("rom-version", &romVersion, sizeof(romVersion)); + } + + return super::start(provider); +} + +bool ApplePlatformExpert::configure( IOService * provider ) +{ + IORangeAllocator * physicalRanges; + + if((physicalRanges = getPhysicalRangeAllocator())) { + physicalRanges->allocateRange(0,0x80000000); // RAM + physicalRanges->allocateRange(0xff000000,0x01000000); // ROM + } + return(super::configure(provider)); +} + +const char * ApplePlatformExpert::deleteList ( void ) +{ + return( "('packages', 'psuedo-usb', 'psuedo-hid', 'multiboot', 'rtas')" ); +} + +const char * ApplePlatformExpert::excludeList( void ) +{ + return( "('chosen', 'memory', 'openprom', 'AAPL,ROM', 'rom', 'options', 'aliases')"); +} + +void ApplePlatformExpert::registerNVRAMController( IONVRAMController * nvram ) +{ + IOReturn err; + enum { kXPRAMTimeToGMTOffset = 0xEC }; + + super::registerNVRAMController(nvram); + + // Here we are saving off the time zone info that's in PRAM. + // This probably should be a separate call that the + // ApplePlatformExpert does in it's initialization. -ECH + + err = readXPRAM(kXPRAMTimeToGMTOffset, (UInt8 *)&_timeToGMT, + sizeof(_timeToGMT)); + if (err == kIOReturnSuccess) { + // Convert from a SInt24 - sign extend from bit 23. + if (_timeToGMT & (1 << 23)) + _timeToGMT |= 0xFF000000; + else + _timeToGMT &= 0x00FFFFFF; + } +} + +#define SECS_BETWEEN_1904_1970 2082844800 + +long ApplePlatformExpert::getGMTTimeOfDay(void) +{ + long localtime; + + // to avid to hang the kernel at boot + // I set a limit of 15 seconds waiting + // for the real time clock. + mach_timespec_t t; + t.tv_sec = 30; + t.tv_nsec = 0; + if (waitForService(resourceMatching("IORTC"), &t ) != NULL) { + if (PE_read_write_time_of_day(kPEReadTOD, &localtime) == 0) + return (localtime - _timeToGMT - SECS_BETWEEN_1904_1970); + } + else + IOLog("ApplePlatformExpert::getGMTTimeOfDay can not provide time of day RTC did not show up\n"); + + return(0); +} + +void ApplePlatformExpert::setGMTTimeOfDay(long secs) +{ + // to avid to hang the kernel at boot + // I set a limit of 15 seconds waiting + // for the real time clock. + mach_timespec_t t; + t.tv_sec = 30; + t.tv_nsec = 0; + if (waitForService(resourceMatching("IORTC"), &t ) != NULL) { + secs += SECS_BETWEEN_1904_1970; + secs += _timeToGMT; + PE_read_write_time_of_day(kPEWriteTOD, &secs); + } + else + IOLog("ApplePlatformExpert::setGMTTimeOfDay can not set time of day RTC did not show up\n"); + +} + +bool ApplePlatformExpert::getMachineName(char *name, int maxLength) +{ + strncpy(name, "Power Macintosh", maxLength); + + return true; +} diff --git a/iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.cpp b/iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.cpp new file mode 100644 index 000000000..8559ba80c --- /dev/null +++ b/iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.cpp @@ -0,0 +1,90 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#include + +#include "PowerExpress.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super ApplePlatformExpert + +OSDefineMetaClassAndStructors(PowerExpressPE, ApplePlatformExpert); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool PowerExpressPE::start(IOService *provider) +{ + OSData *tmpData; + + setChipSetType(kChipSetTypePowerExpress); + + tmpData = OSDynamicCast(OSData, getProperty("senses")); + if (tmpData) senseArray = (long *)tmpData->getBytesNoCopy(); + + return super::start(provider); +} + +bool PowerExpressPE::platformAdjustService(IOService *service) +{ + long cnt, numInterrupts, sourceNumbers[2]; + OSData *tmpData; + OSArray *controllers, *specifiers; + OSSymbol *controller; + + // Fix up the interrupt data. + controllers = OSDynamicCast(OSArray, service->getProperty(gIOInterruptControllersKey)); + specifiers = OSDynamicCast(OSArray, service->getProperty(gIOInterruptSpecifiersKey)); + if (controllers && specifiers) { + numInterrupts = specifiers->getCount(); + for (cnt = 0; cnt < numInterrupts; cnt++) { + // Only change interrupts for MPIC. + controller = OSDynamicCast(OSSymbol, controllers->getObject(cnt)); + if (controller == gIODTDefaultInterruptController) { + tmpData = OSDynamicCast(OSData, specifiers->getObject(cnt)); + if (tmpData && (tmpData->getLength() == 4)) { + sourceNumbers[0] = *(long *)tmpData->getBytesNoCopy(); + sourceNumbers[1] = senseArray[sourceNumbers[0]]; + tmpData = OSData::withBytes(sourceNumbers, 2 * sizeof(long)); + if (tmpData) { + specifiers->setObject(cnt, tmpData); + tmpData->release(); + } + } + } + } + } + + if (IODTMatchNubWithKeys(service, "open-pic")) { + service->setProperty("InterruptControllerName", + gIODTDefaultInterruptController); + return true; + } + + return true; +} diff --git a/iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.h b/iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.h new file mode 100644 index 000000000..c6e372383 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#ifndef _IOKIT_POWEREXPRESS_H +#define _IOKIT_POWEREXPRESS_H + +#include + +class PowerExpressPE : public ApplePlatformExpert +{ + OSDeclareDefaultStructors(PowerExpressPE); + +private: + long *senseArray; + +public: + virtual bool start(IOService *provider); + + virtual bool platformAdjustService(IOService *service); +}; + + +#endif /* ! _IOKIT_POWEREXPRESS_H */ diff --git a/iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.cpp b/iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.cpp new file mode 100644 index 000000000..a8b65211b --- /dev/null +++ b/iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#include + +#include "PowerStar.h" +#include "../drvAppleOHare/OHare.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super ApplePlatformExpert + +OSDefineMetaClassAndStructors(PowerStarPE, ApplePlatformExpert); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool PowerStarPE::start(IOService *provider) +{ + setChipSetType(kChipSetTypePowerStar); + + // See if it is a Hooper or Kanga. + if (IODTMatchNubWithKeys(provider, "('AAPL,3400/2400', 'AAPL,3500')")) { + configureEthernet(provider); + } + + _pePMFeatures = kStdPowerBookPMFeatures; + _pePrivPMFeatures = kStdPowerBookPrivPMFeatures; + _peNumBatteriesSupported = kStdPowerBookNumBatteries; + + return super::start(provider); +} + +bool PowerStarPE::platformAdjustService(IOService *service) +{ + if (!strcmp(service->getName(), "chips65550")) { + service->setProperty("Ignore VBL", "", 0); + return true; + } + + return true; +} + +void PowerStarPE::configureEthernet(IOService *provider) +{ + OSCollectionIterator *nodeList; + IORegistryEntry *node, *enet, *ohare; + OSArray *interruptNames, *interruptSources; + OSSymbol *interruptControllerName; + OSData *tempData; + long tempSource; + + enet = 0; + ohare = 0; + + // Find the node for DEC21041. + nodeList = IODTFindMatchingEntries(provider, kIODTRecursive, + "'pci1011,14'"); + if (nodeList) { + while ((node = (IORegistryEntry *)nodeList->getNextObject())) { + enet = node; + } + nodeList->release(); + } + + if (enet == 0) return; + + // Set the 'Network Connection' property to '10BaseT'. + enet->setProperty("Network Connection", "10BaseT"); + + // Add a 'built-in' property so IONetworkStack will treat it as built in. + enet->setProperty("built-in", "", 0); + + // If it is there, find the node for the second ohare. + nodeList = IODTFindMatchingEntries(provider, kIODTRecursive, + "'pci106b,7'"); + if (nodeList) { + while ((node = (IORegistryEntry *)nodeList->getNextObject())) { + ohare = node; + } + nodeList->release(); + } + + if (ohare == 0) return; + + interruptNames = OSDynamicCast(OSArray, + enet->getProperty(gIOInterruptControllersKey)); + interruptControllerName = (OSSymbol *)OSSymbol::withCStringNoCopy("SecondaryInterruptController"); + interruptNames->setObject(0, interruptControllerName); + interruptControllerName->release(); + + interruptSources = OSDynamicCast(OSArray, + enet->getProperty(gIOInterruptSpecifiersKey)); + tempSource = 28; + tempData = OSData::withBytes(&tempSource, sizeof(tempSource)); + interruptSources->setObject(0, tempData); + tempData->release(); +} diff --git a/iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.h b/iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.h new file mode 100644 index 000000000..4be509ebf --- /dev/null +++ b/iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#ifndef _IOKIT_POWERSTAR_H +#define _IOKIT_POWERSTAR_H + +#include + +class PowerStarPE : public ApplePlatformExpert +{ + OSDeclareDefaultStructors(PowerStarPE); + +private: + virtual void configureEthernet(IOService *provider); + +public: + virtual bool start(IOService *provider); + virtual bool platformAdjustService(IOService *service); +}; + + +#endif /* ! _IOKIT_POWERSTAR_H */ diff --git a/iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.cpp b/iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.cpp new file mode 100644 index 000000000..82478bcc5 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.cpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#include + +#include "PowerSurge.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super ApplePlatformExpert + +OSDefineMetaClassAndStructors(PowerSurgePE, ApplePlatformExpert); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool PowerSurgePE::start(IOService *provider) +{ + setChipSetType(kChipSetTypePowerSurge); + + _pePMFeatures = kStdDesktopPMFeatures; + _pePrivPMFeatures = kStdDesktopPrivPMFeatures; + _peNumBatteriesSupported = kStdDesktopNumBatteries; + + return super::start(provider); +} diff --git a/iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.h b/iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.h new file mode 100644 index 000000000..9e31449d5 --- /dev/null +++ b/iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#ifndef _IOKIT_POWERSURGE_H +#define _IOKIT_POWERSURGE_H + +#include + +class PowerSurgePE : public ApplePlatformExpert +{ + OSDeclareDefaultStructors(PowerSurgePE); + +public: + virtual bool start(IOService *provider); +}; + + +#endif /* ! _IOKIT_POWERSURGE_H */ diff --git a/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp b/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp new file mode 100644 index 000000000..680e40b22 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp @@ -0,0 +1,450 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include "RootDomainUserClient.h" + +extern "C" { +extern void kprintf(const char *, ...); +} + +extern const IORegistryPlane * gIOPowerPlane; + +void PMreceiveCmd ( OSObject *, void *, void *, void *, void * ); +bool rootHasPMU( OSObject * us, void *, IOService * yourDevice ); + + +#define number_of_power_states 3 +#define OFF_STATE 0 +#define SLEEP_STATE 1 +#define ON_STATE 2 + +#define ON_POWER IOPMPowerOn +#define SLEEP_POWER IOPMAuxPowerOn + +static IOPMPowerState ourPowerStates[number_of_power_states] = { + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1,0,0,SLEEP_POWER,0,0,0,0,0,0,0,0}, + {1,IOPMPowerOn,IOPMPowerOn,ON_POWER,0,0,0,0,0,0,0,0}, +}; + +static IOPMrootDomain * gRootDomain; + +#define super IOService +OSDefineMetaClassAndStructors(IOPMrootDomain,IOService) + +extern "C" +{ + IONotifier * registerSleepWakeInterest(IOServiceInterestHandler handler, void * self, void * ref = 0) + { + return gRootDomain->registerInterest( gIOGeneralInterest, handler, self, ref ); + } + + IOReturn acknowledgeSleepWakeNotification(void * PMrefcon) + { + return gRootDomain->allowPowerChange ( (unsigned long)PMrefcon ); + } + +} + + +// ********************************************************************************** +// start +// +// We don't do much here. The real initialization occurs when the platform +// expert informs us we are the root. +// ********************************************************************************** +bool IOPMrootDomain::start ( IOService * nub ) +{ + super::start(nub); + + gRootDomain = this; + + PMinit(); + allowSleep = true; + sleepIsSupported = false; + idlePeriod = 0; + systemBooting = true; +// systemBooting = false; // temporary work-around for 2589847 + ignoringClamshell = false; + + pm_vars->PMworkloop = IOWorkLoop::workLoop(); // make the workloop + pm_vars->commandQueue = IOCommandQueue::commandQueue(this, PMreceiveCmd); // make a command queue + if (! pm_vars->commandQueue || + ( pm_vars->PMworkloop->addEventSource( pm_vars->commandQueue) != kIOReturnSuccess) ) { + return IOPMNoErr; + } + + patriarch = new IORootParent; // create our parent + patriarch->init(); + patriarch->attach(this); + patriarch->start(this); + patriarch->youAreRoot(); + patriarch->wakeSystem(); + patriarch->addPowerChild(this); + + registerPowerDriver(this,ourPowerStates,number_of_power_states); + + // Clamp power on. We will revisit this decision when the login window is displayed + // and we receive preferences via SetAggressiveness. + changePowerStateToPriv(ON_STATE); // clamp power on + powerOverrideOnPriv(); + + registerService(); // let clients find us + + return true; +} + + +//********************************************************************************* +// youAreRoot +// +// Power Managment is informing us that we are the root power domain. +// We know we are not the root however, since we have just instantiated a parent +// for ourselves and made it the root. We override this method so it will have +// no effect +//********************************************************************************* +IOReturn IOPMrootDomain::youAreRoot ( void ) +{ + return IOPMNoErr; +} + + +// ********************************************************************************** +// command_received +// +// We have received a command from ourselves on the command queue. +// If it is to send a recently-received aggressiveness factor, do so. +// Otherwise, it's something the superclass enqueued. +// ********************************************************************************** +void IOPMrootDomain::command_received ( void * command, void * x, void * y, void * z ) +{ + switch ( (int)command ) { + case kPMbroadcastAggressiveness: + if ( (int)x == kPMMinutesToSleep ) { + idlePeriod = (int)y*60; + if ( allowSleep && sleepIsSupported ) { + setIdleTimerPeriod(idlePeriod); // set new timeout + } + } + break; + default: + super::command_received(command,x,y,z); + break; + } +} + + +//********************************************************************************* +// setAggressiveness +// +// Some aggressiveness factor has changed. We put this change on our +// command queue so that we can broadcast it to the hierarchy while on +// the Power Mangement workloop thread. This enables objects in the +// hierarchy to successfully alter their idle timers, which are all on the +// same thread. +//********************************************************************************* + +IOReturn IOPMrootDomain::setAggressiveness ( unsigned long type, unsigned long newLevel ) +{ + systemBooting = false; // when the finder launches, this method gets called -- system booting is done. + + pm_vars->commandQueue->enqueueCommand(true, (void *)kPMbroadcastAggressiveness, (void *) type, (void *) newLevel ); + super::setAggressiveness(type,newLevel); + + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// sleepSystem +// +// ********************************************************************************** +IOReturn IOPMrootDomain::sleepSystem ( void ) +{ + kprintf("sleep demand received\n"); + if ( !systemBooting && allowSleep && sleepIsSupported ) { + patriarch->sleepSystem(); + } + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// powerChangeDone +// +// This overrides powerChangeDone in IOService. +// If we just finished switching to state zero, call the platform expert to +// sleep the kernel. +// Then later, when we awake, the kernel returns here and we wake the system. +// ********************************************************************************** +void IOPMrootDomain::powerChangeDone ( unsigned long powerStateOrdinal ) +{ + if ( powerStateOrdinal == SLEEP_STATE ) { + pm_vars->thePlatform->sleepKernel(); + activityTickle(kIOPMSubclassPolicy); // reset idle sleep + systemWake(); // tell the tree we're waking + patriarch->wakeSystem(); // make sure we have power + changePowerStateToPriv(ON_STATE); // and wake + } +} + + +// ********************************************************************************** +// newUserClient +// +// ********************************************************************************** +IOReturn IOPMrootDomain::newUserClient( task_t owningTask, void * /* security_id */, UInt32 type, IOUserClient ** handler ) +{ + IOReturn err = kIOReturnSuccess; + RootDomainUserClient * client; + + client = RootDomainUserClient::withTask(owningTask); + + if( !client || (false == client->attach( this )) || + (false == client->start( this )) ) { + if(client) { + client->detach( this ); + client->release(); + client = NULL; + } + err = kIOReturnNoMemory; + } + *handler = client; + return err; +} + +//********************************************************************************* +// receivePowerNotification +// +// The power controller is notifying us of a hardware-related power management +// event that we must handle. This is a result of an 'environment' interrupt from +// the power mgt micro. +//********************************************************************************* + +IOReturn IOPMrootDomain::receivePowerNotification (UInt32 msg) +{ + if (msg & kIOPMSleepNow) { + (void) sleepSystem (); + } + + if (msg & kIOPMPowerButton) { + (void) sleepSystem (); + } + + if (msg & kIOPMPowerEmergency) { + (void) sleepSystem (); + } + + if (msg & kIOPMClamshellClosed) { + if ( ! ignoringClamshell ) { + (void) sleepSystem (); + } + } + + if (msg & kIOPMIgnoreClamshell) { + ignoringClamshell = true; + } + + if (msg & kIOPMAllowSleep) { + if ( sleepIsSupported ) { + setIdleTimerPeriod(idlePeriod); + } + allowSleep = true; + changePowerStateTo (0); + } + + // if the case is open on some machines, we must now + // allow the machine to be put to sleep or to idle sleep + + if (msg & kIOPMPreventSleep) { + if ( sleepIsSupported ) { + setIdleTimerPeriod(0); + } + allowSleep = false; + changePowerStateTo (number_of_power_states-1); + } + + return 0; +} + + +//********************************************************************************* +// sleepSupported +// +//********************************************************************************* + +void IOPMrootDomain::setSleepSupported( IOOptionBits flags ) +{ + platformSleepSupport = flags; + if ( flags & kRootDomainSleepSupported ) { + sleepIsSupported = true; + setProperty("IOSleepSupported",""); + } + else + { + sleepIsSupported = false; + removeProperty("IOSleepSupported"); + } + +} + +//********************************************************************************* +// getSleepSupported +// +//********************************************************************************* + +IOOptionBits IOPMrootDomain::getSleepSupported( void ) +{ + return( platformSleepSupport ); +} + + +//********************************************************************************* +// tellChangeDown +// +// We override the superclass implementation so we can send a different message +// type to the client or application being notified. +//********************************************************************************* + +bool IOPMrootDomain::tellChangeDown ( unsigned long stateNum ) +{ + if ( stateNum == SLEEP_STATE ) { + return super::tellClientsWithResponse(kIOMessageSystemWillSleep); + } + return super::tellChangeDown(stateNum); +} + + +//********************************************************************************* +// askChangeDown +// +// We override the superclass implementation so we can send a different message +// type to the client or application being notified. +//********************************************************************************* + +bool IOPMrootDomain::askChangeDown (unsigned long stateNum) +{ + if ( stateNum == SLEEP_STATE ) { + return super::tellClientsWithResponse(kIOMessageCanSystemSleep); + } + return super::askChangeDown(stateNum); +} + + +//********************************************************************************* +// tellNoChangeDown +// +// Notify registered applications and kernel clients that we are not +// dropping power. +// +// We override the superclass implementation so we can send a different message +// type to the client or application being notified. +//********************************************************************************* + +void IOPMrootDomain::tellNoChangeDown ( unsigned long ) +{ + return tellClients(kIOMessageSystemWillNotSleep); +} + + +//********************************************************************************* +// tellChangeUp +// +// Notify registered applications and kernel clients that we are raising power. +// +// We override the superclass implementation so we can send a different message +// type to the client or application being notified. +//********************************************************************************* + +void IOPMrootDomain::tellChangeUp ( unsigned long ) +{ + return tellClients(kIOMessageSystemHasPoweredOn); +} + + +// ********************************************************************************** +// activityTickle +// +// This is called by the HID system and calls the superclass in turn. +// ********************************************************************************** + +bool IOPMrootDomain::activityTickle ( unsigned long, unsigned long x=0 ) +{ + return super::activityTickle (kIOPMSuperclassPolicy1,ON_STATE); +} + + + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOService + +OSDefineMetaClassAndStructors(IORootParent, IOService) + +#define number_of_patriarch_power_states 3 + +static IOPMPowerState patriarchPowerStates[number_of_patriarch_power_states] = { + {1,0,0,0,0,0,0,0,0,0,0,0}, // off + {1,0,SLEEP_POWER,0,0,0,0,0,0,0,0,0}, // sleep + {1,0,ON_POWER,0,0,0,0,0,0,0,0,0} // running +}; + +#define PATRIARCH_OFF 0 +#define PATRIARCH_SLEEP 1 +#define PATRIARCH_ON 2 + + +bool IORootParent::start ( IOService * nub ) +{ + super::start(nub); + PMinit(); + registerPowerDriver(this,patriarchPowerStates,number_of_patriarch_power_states); + powerOverrideOnPriv(); + return true; +} + + +void IORootParent::shutDownSystem ( void ) +{ + changePowerStateToPriv(PATRIARCH_OFF); +} + + +void IORootParent::sleepSystem ( void ) +{ + changePowerStateToPriv(PATRIARCH_SLEEP); +} + + +void IORootParent::wakeSystem ( void ) +{ + changePowerStateToPriv(PATRIARCH_ON); +} + diff --git a/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.h b/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.h new file mode 100644 index 000000000..10dccbd06 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleRootDomain/RootDomain.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOKIT_ROOTDOMAIN_H +#define _IOKIT_ROOTDOMAIN_H + +#include +#include + +class RootDomainUserClient; + +#define number_of_power_states 2 +#define powerOn 1 + + +class IOPMrootDomain: public IOService +{ +OSDeclareDefaultStructors(IOPMrootDomain) + +public: + + virtual bool start( IOService * provider ); + virtual IOReturn newUserClient ( task_t, void *, UInt32, IOUserClient ** ); + virtual IOReturn setAggressiveness ( unsigned long, unsigned long ); + virtual IOReturn getAggressiveness ( unsigned long, unsigned long * ); + + +private: + + unsigned long current_values[kMaxType+1]; // current values of aggressiveness factors + UInt16 serialNumber; // used to identify sleep/wake notification cycle + OSArray* responseFlags; // points to array of responses from apps + bool doNotSleep; // true if an application vetos sleep notification + + virtual IOReturn setPowerState ( long, IOService* ); + virtual unsigned long maxCapabilityForDomainState ( IOPMPowerFlags ); + virtual unsigned long powerStateForDomainState ( IOPMPowerFlags ); + virtual void notifyApps ( void ); + virtual bool responseValid ( unsigned long refcon ); + virtual void checkForDone ( void ); + unsigned long initialPowerStateForDomainState ( IOPMPowerFlags); + void command_received ( void *, void * , void * , void *); + +}; + +#endif /* _IOKIT_ROOTDOMAIN_H */ diff --git a/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp b/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp new file mode 100644 index 000000000..598cbaf9a --- /dev/null +++ b/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + */ + +#include +#include +#include +#include "RootDomainUserClient.h" +#include + +#define super IOUserClient + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClassAndStructors(RootDomainUserClient, IOUserClient) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +RootDomainUserClient *RootDomainUserClient::withTask(task_t owningTask) +{ + RootDomainUserClient *me; + + me = new RootDomainUserClient; + if(me) { + if(!me->init()) { + me->release(); + return NULL; + } + me->fTask = owningTask; + } + return me; +} + +bool RootDomainUserClient::start( IOService * provider ) +{ + assert(OSDynamicCast(IOPMrootDomain, provider)); + if(!super::start(provider)) + return false; + fOwner = (IOPMrootDomain *)provider; + + // Got the owner, so initialize the call structures + fMethods[kPMSetAggressiveness].object = provider; // 0 + fMethods[kPMSetAggressiveness].func = (IOMethod)&IOPMrootDomain::setAggressiveness; + fMethods[kPMSetAggressiveness].count0 = 2; + fMethods[kPMSetAggressiveness].count1 = 0; + fMethods[kPMSetAggressiveness].flags = kIOUCScalarIScalarO; + + fMethods[kPMGetAggressiveness].object = provider; // 1 + fMethods[kPMGetAggressiveness].func = (IOMethod)&IOPMrootDomain::getAggressiveness; + fMethods[kPMGetAggressiveness].count0 = 1; + fMethods[kPMGetAggressiveness].count1 = 1; + fMethods[kPMGetAggressiveness].flags = kIOUCScalarIScalarO; + + fMethods[kPMSleepSystem].object = provider; // 2 + fMethods[kPMSleepSystem].func = (IOMethod)&IOPMrootDomain::sleepSystem; + fMethods[kPMSleepSystem].count0 = 0; + fMethods[kPMSleepSystem].count1 = 0; + fMethods[kPMSleepSystem].flags = kIOUCScalarIScalarO; + + fMethods[kPMAllowPowerChange].object = provider; // 3 + fMethods[kPMAllowPowerChange].func = (IOMethod)&IOPMrootDomain::allowPowerChange; + fMethods[kPMAllowPowerChange].count0 = 1; + fMethods[kPMAllowPowerChange].count1 = 0; + fMethods[kPMAllowPowerChange].flags = kIOUCScalarIScalarO; + + fMethods[kPMCancelPowerChange].object = provider; // 4 + fMethods[kPMCancelPowerChange].func = (IOMethod)&IOPMrootDomain::cancelPowerChange; + fMethods[kPMCancelPowerChange].count0 = 1; + fMethods[kPMCancelPowerChange].count1 = 0; + fMethods[kPMCancelPowerChange].flags = kIOUCScalarIScalarO; + + return true; +} + + +IOReturn RootDomainUserClient::clientClose( void ) +{ + detach( fOwner); + + return kIOReturnSuccess; +} + +IOReturn RootDomainUserClient::clientDied( void ) +{ + return( clientClose()); +} + +IOExternalMethod * +RootDomainUserClient::getExternalMethodForIndex( UInt32 index ) +{ + if(index >= kNumPMMethods) + return NULL; + else + return &fMethods[index]; +} + +IOReturn +RootDomainUserClient::registerNotificationPort( + mach_port_t port, UInt32 type ) +{ + return kIOReturnUnsupported; +} + diff --git a/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.h b/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.h new file mode 100644 index 000000000..28bf6c707 --- /dev/null +++ b/iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_ROOTDOMAINUSERCLIENT_H +#define _IOKIT_ROOTDOMAINUSERCLIENT_H + +#include +#include +#include +#include + + +class RootDomainUserClient : public IOUserClient +{ + OSDeclareDefaultStructors(RootDomainUserClient) + +private: + IOPMrootDomain * fOwner; + task_t fTask; + IOExternalMethod fMethods[ kNumPMMethods ]; + +public: + + static RootDomainUserClient *withTask(task_t owningTask); + + virtual IOReturn clientClose( void ); + + virtual IOReturn clientDied( void ); + + virtual IOReturn registerNotificationPort ( mach_port_t port, UInt32 type ); + + virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + + virtual bool start( IOService * provider ); + +}; + +#endif /* ! _IOKIT_ROOTDOMAINUSERCLIENT_H */ + diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxClient.cpp b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxClient.cpp new file mode 100644 index 000000000..72a6ebe1f --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxClient.cpp @@ -0,0 +1,450 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Sym8xxClient.m created by russb2 on Sat 30-May-1998 */ + +#include "Sym8xxController.h" + +extern pmap_t kernel_pmap; + + +/*-----------------------------------------------------------------------------* + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::executeCommand( IOSCSIParallelCommand *scsiCommand ) +{ + SRB *srb = NULL; + SCSICDBInfo scsiCDB; + SCSITargetLun targetLun; + Nexus *nexus; + Nexus *nexusPhys; + UInt32 len; + bool isWrite; + + srb = (SRB *) scsiCommand->getCommandData(); + bzero( srb, sizeof(SRB) ); + + srb->srbPhys = (SRB *) pmap_extract( kernel_pmap, (vm_offset_t) srb ); + srb->scsiCommand = scsiCommand; + + scsiCommand->getCDB( &scsiCDB ); + scsiCommand->getTargetLun( &targetLun ); + + nexus = &srb->nexus; + nexusPhys = &srb->srbPhys->nexus; + + srb->target = targetLun.target; + srb->lun = targetLun.lun; + srb->srbCDBFlags = scsiCDB.cdbFlags; + + /* + * Setup the Nexus struct. This part of the SRB is read/written both by the + * script and the driver. + */ + nexus->targetParms.target = srb->target; + +// printf( "SCSI(Symbios8xx): executeCommand: T/L = %d:%d Cmd = %08x CmdType = %d\n\r", +// targetLun.target, targetLun.lun, (int)scsiCommand, scsiCommand->getCmdType() ); + + switch ( scsiCommand->getCmdType() ) + { + case kSCSICommandAbort: + case kSCSICommandAbortAll: + case kSCSICommandDeviceReset: + Sym8xxAbortCommand( scsiCommand ); + return; + + default: + ; + } + + /* + * Set client data buffer pointers in the SRB + */ + scsiCommand->getPointers( &srb->xferDesc, &srb->xferCount, &isWrite ); + + srb->directionMask = (isWrite) ? 0x00000000 :0x01000000; + + nexus->cdb.ppData = OSSwapHostToLittleInt32((UInt32)&nexusPhys->cdbData); + + len = scsiCDB.cdbLength; + + nexus->cdb.length = OSSwapHostToLittleInt32( len ); + nexus->cdbData = scsiCDB.cdb; + + Sym8xxCalcMsgs( scsiCommand ); + + /* + * Setup initial data transfer list (SGList) + */ + nexus->ppSGList = (SGEntry *)OSSwapHostToLittleInt32((UInt32)&nexusPhys->sgListData[2]); + Sym8xxUpdateSGList( srb ); + + Sym8xxStartSRB( srb ); +} + + +/*-----------------------------------------------------------------------------* + * This routine queues an SRB to reset the SCSI Bus + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::resetCommand( IOSCSIParallelCommand *scsiCommand ) +{ + SRB *srb; + +// printf( "SCSI(Symbios8xx): resetCommand\n\r" ); + + srb = (SRB *) scsiCommand->getCommandData(); + bzero( srb, sizeof(SRB) ); + + srb->srbPhys = (SRB *) pmap_extract( kernel_pmap, (vm_offset_t) srb ); + srb->scsiCommand = scsiCommand; + + Sym8xxSCSIBusReset( srb ); +} + +/*-----------------------------------------------------------------------------* + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::cancelCommand( IOSCSIParallelCommand *scsiCommand ) +{ + IOSCSIParallelCommand *origCommand; + SRB *srb; + SCSITargetLun targetLun; + SCSIResults scsiResults; + + origCommand = scsiCommand->getOriginalCmd(); + srb = (SRB *)origCommand->getCommandData(); + + switch ( origCommand->getCmdType() ) + { + case kSCSICommandAbort: + case kSCSICommandAbortAll: + case kSCSICommandDeviceReset: + if ( abortSRB == srb ) + { + SCRIPT_VAR(R_ld_AbortBdr_mailbox) = 0; + abortSRB = 0; + + origCommand->complete(); + } + break; + + default: + + if ( adapter->nexusPtrsVirt[srb->nexus.tag] == &srb->nexus ) + { + adapter->nexusPtrsVirt[srb->nexus.tag] = (Nexus *) -1; + adapter->nexusPtrsPhys[srb->nexus.tag] = (Nexus *) -1; + + origCommand->complete(); + } + else + { + origCommand->getTargetLun( &targetLun ); + origCommand->complete(); + + IOLog( "SCSI(Symbios8xx): Aborted SRB not found - T/L = %d:%d\n\r", targetLun.target, targetLun.lun ); + } + } + + bzero( &scsiResults, sizeof(scsiResults) ); + scsiCommand->setResults( &scsiResults ); + scsiCommand->complete(); +} + +/*-----------------------------------------------------------------------------* + * + * + * + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxAbortCommand( IOSCSIParallelCommand *scsiCommand ) +{ + SRB *srb; + SCSICDBInfo scsiCDB; + SCSITargetLun targetLun; + + + scsiCommand->getTargetLun( &targetLun ); + + switch ( scsiCommand->getCmdType() ) + { + case kSCSICommandAbort: + srb = (SRB *)scsiCommand->getOriginalCmd()->getCommandData(); + Sym8xxCancelMailBox( &srb->srbPhys->nexus ); + break; + + case kSCSICommandAbortAll: + Sym8xxCancelMailBox( targetLun.target, targetLun.lun, false ); + break; + + case kSCSICommandDeviceReset: + Sym8xxCancelMailBox( targetLun.target, (UInt32) -1, false ); + break; + + default: + ; + } + + if ( abortSRB ) + { + abortReqPending = true; + + rescheduleCommand( scsiCommand ); + disableCommands(); + return; + } + + scsiCommand->getCDB( &scsiCDB ); + + srb = (SRB *) scsiCommand->getCommandData(); + + srb->nexus.msgData[0] = srb->lun | ((srb->srbCDBFlags & kCDBFlagsNoDisconnect ) ? 0x80 : 0xC0); + + if ( scsiCDB.cdbTagMsg != 0 ) + { + srb->nexus.tag = scsiCDB.cdbTag + 128; + srb->nexus.msgData[1] = srb->nexus.tag; + } + else + { + srb->nexus.tag = ((UInt32)srb->target << 3) | srb->lun; + srb->nexus.msgData[1] = 0; + } + srb->tag = srb->nexus.tag; + + srb->nexus.msgData[2] = scsiCDB.cdbAbortMsg; + + Sym8xxAbortBdr( srb ); +} + + +/*-----------------------------------------------------------------------------* + * This routine creates SCSI messages to send during the initial connection + * to the target. It is called during client request processing and also by + * the I/O thread when a request sense operation is required. + * + * Outbound messages are setup in the MsgOut buffer in the Nexus structure of + * the SRB. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxCalcMsgs( IOSCSIParallelCommand *scsiCommand ) +{ + SRB *srb; + Nexus *nexus; + Nexus *nexusPhys; + UInt32 msgIndex; + SCSICDBInfo scsiCDB; + SCSITargetParms targetParms; + UInt32 i; + UInt32 tw; + + + srb = (SRB *)scsiCommand->getCommandData(); + nexus = &srb->nexus; + nexusPhys = &srb->srbPhys->nexus; + + scsiCommand->getCDB( &scsiCDB ); + + /* + * Setup Identify message + */ + msgIndex = 0; + nexus->msg.ppData = OSSwapHostToLittleInt32((UInt32)&nexusPhys->msgData); + nexus->msgData[msgIndex++] = srb->lun | (( scsiCDB.cdbFlags & kCDBFlagsNoDisconnect ) ? 0x80 : 0xC0); + + /* + * Allocate tag for request. + * + * For non-tagged requests a pseudo-tag is created consisting of target*16+lun. For tagged + * requests a tag in the range 128-255 is allocated. + * + * If a pseudo-tag is inuse for a non-tagged command or there are no tags available for + * a tagged request, then the command is blocked until a tag becomes available. + * + * Note: If we are being called during request sense processing (srbState != ksrbStateCDBDone) + * then a tag has already been allocated to the request. + */ + if ( scsiCDB.cdbTagMsg != 0 ) + { + nexus->msgData[msgIndex++] = scsiCDB.cdbTagMsg; + nexus->msgData[msgIndex++] = srb->tag = srb->nexus.tag = scsiCDB.cdbTag + 128; + } + else + { + srb->tag = srb->nexus.tag = ((UInt32)srb->target << 3) | srb->lun; + } + /* + * Setup to negotiate for Wide (16-bit) data transfers + * + * Note: There is no provision to negotiate back to narrow transfers although + * SCSI does support this. + */ + + scsiCommand->getDevice(kIOSCSIParallelDevice)->getTargetParms( &targetParms ); + + if ( scsiCDB.cdbFlags & (kCDBFlagsNegotiateWDTR | kCDBFlagsNegotiateSDTR) ) + { + negotiateWDTRComplete = negotiateSDTRComplete = false; + } + + if ( scsiCDB.cdbFlags & kCDBFlagsNegotiateWDTR ) + { + nexus->msgData[msgIndex++] = kSCSIMsgExtended; + nexus->msgData[msgIndex++] = 2; + nexus->msgData[msgIndex++] = kSCSIMsgWideDataXferReq; + + for ( tw = targetParms.transferWidth, i = (UInt32)-1; + tw; + tw >>= 1, i++ ) + ; + + nexus->msgData[msgIndex++] = i; + } + + /* + * Setup to negotiate for Synchronous data transfers. + * + * Note: We can negotiate back to async based on the flags in the command. + */ + + if ( scsiCDB.cdbFlags & kCDBFlagsNegotiateSDTR ) + { + nexus->msgData[msgIndex++] = kSCSIMsgExtended; + nexus->msgData[msgIndex++] = 3; + nexus->msgData[msgIndex++] = kSCSIMsgSyncXferReq; + if ( targetParms.transferOffset != 0 ) + { + nexus->msgData[msgIndex++] = targetParms.transferPeriodpS / 4000; + nexus->msgData[msgIndex++] = targetParms.transferOffset; + } + else + { + nexus->msgData[msgIndex++] = 0; + nexus->msgData[msgIndex++] = 0; + } + + } + + /* + * If we are negotiating for both Sync and Wide data transfers, we setup both messages + * in the Nexus msgOut buffer. However, after each message the script needs to wait for + * a reply message from the target. In this case, we set the msgOut length to include + * bytes upto the end of the Wide message. When we get the reply from the target, the + * routine handling the WDTR will setup the Nexus pointers/counts to send the remaining + * message bytes. See Sym8xxExecute.m(Sym8xxNegotiateWDTR). + */ + srb->srbMsgLength = msgIndex; + + if ((scsiCDB.cdbFlags & (kCDBFlagsNegotiateWDTR | kCDBFlagsNegotiateSDTR)) + == (kCDBFlagsNegotiateWDTR | kCDBFlagsNegotiateSDTR)) + { + msgIndex -= 5; + } + + nexus->msg.length = OSSwapHostToLittleInt32( msgIndex ); + + srb->srbCDBFlags = scsiCDB.cdbFlags; +} + +/*-----------------------------------------------------------------------------* + * This routine sets up the data transfer SG list for the client's buffer in the + * Nexus structure. + * + * The SGList actually consists of script instructions. The script will branch + * to the SGList when the target enters data transfer phase. When the SGList completes + * it will either execute a script INT instruction if there are more segments of the + * user buffer that need to be transferred or will execute a script RETURN instruction + * to return to the script. + * + * The first two slots in the SGList are reserved for partial data transfers. See + * Sym8xxExecute.m(Sym8xxAdjustDataPtrs). + * + *-----------------------------------------------------------------------------*/ + + +/*-----------------------------------------------------------------------------* + * Build SG list based on an IOMemoryDescriptor object. + * + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::Sym8xxUpdateSGList( SRB *srb ) +{ + IOPhysicalSegment range; + UInt32 actRanges; + UInt32 offset; + UInt32 bytesLeft; + UInt32 i; + IOReturn rc = true; + + offset = srb->xferOffset; + bytesLeft = srb->xferCount - srb->xferOffset; + + if ( bytesLeft == 0 ) return rc; + + i = 2; + + while ( (bytesLeft > 0) && (i < MAX_SGLIST_ENTRIES-1)) + { + actRanges = memoryCursor->getPhysicalSegments( srb->xferDesc, + offset, + &range, + 1 ); + + if ( actRanges != 1 ) + { + rc = false; + break; + } + + /* + * Note: The script instruction(s) to transfer data to/from the scsi bus + * have the same format as a typical SGList with the transfer length + * as the first word and the physical transfer address as the second. + * The data transfer direction is specified by a bit or'd into the + * high byte of the SG entry's length field. + */ + srb->nexus.sgListData[i].physAddr = OSSwapHostToLittleInt32( (UInt32)range.location ); + srb->nexus.sgListData[i].length = OSSwapHostToLittleInt32( range.length | srb->directionMask ); + + bytesLeft -= range.length; + offset += range.length; + i++; + } + + if ( !bytesLeft ) + { + srb->nexus.sgListData[i].length = OSSwapHostToLittleInt32( 0x90080000 ); + srb->nexus.sgListData[i].physAddr = OSSwapHostToLittleInt32( 0x00000000 ); + } + else + { + srb->nexus.sgListData[i].length = OSSwapHostToLittleInt32( 0x98080000 ); + srb->nexus.sgListData[i].physAddr = OSSwapHostToLittleInt32( A_sglist_complete ); + } + + srb->xferOffsetPrev = srb->xferOffset; + srb->xferOffset = offset; + + return rc; +} + diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxController.h b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxController.h new file mode 100644 index 000000000..1b1d08551 --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxController.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Sym8xxController.h created by russb2 on Sat 30-May-1998 */ + +#include +#include +#include + +#include + +#include "Sym8xxRegs.h" +#include "Sym8xxInterface.h" +#include "Sym8xxSRB.h" + +#include "Sym8xxScript.h" + +#define offsetof(type, field) ((int)&((type *)0)->field) + +class Sym8xxSCSIController : public IOSCSIParallelController +{ + OSDeclareDefaultStructors( Sym8xxSCSIController ) + +private: + + AdapterInterface *adapter; + AdapterInterface *adapterPhys; + + UInt32 nexusArrayVirt[MAX_SCSI_TAG]; + + IOBigMemoryCursor *memoryCursor; + + IOPCIDevice *provider; + + IOInterruptEventSource *interruptEvent; + + IOMemoryMap *ioMapRegs; + IOMemoryMap *ioMapRam; + + UInt8 mailBoxIndex; + + UInt32 initiatorID; + + UInt8 istatReg; + UInt8 dstatReg; + u_int16_t sistReg; + + UInt32 scriptRestartAddr; + + UInt32 srbSeqNum; + UInt32 resetSeqNum; + + SRB *resetSRB; + SRB *abortSRB; + SRB *abortCurrentSRB; + bool abortReqPending; + bool initialReset; + + bool negotiateWDTRComplete; + bool negotiateSDTRComplete; + + UInt32 transferPeriod; + UInt32 transferOffset; + UInt32 transferWidth; + + UInt32 chipId; + UInt32 chipClockRate; + + volatile UInt8 *chipBaseAddr; + UInt8 *chipBaseAddrPhys; + + volatile UInt8 *chipRamAddr; + UInt8 *chipRamAddrPhys; + +public: + bool configure( IOService *forProvider, SCSIControllerInfo *controllerInfo ); + void executeCommand( IOSCSIParallelCommand *scsiCommand ); + void cancelCommand( IOSCSIParallelCommand *scsiCommand ); + void resetCommand( IOSCSIParallelCommand *scsiCommand ); + +private: + bool Sym8xxInit(); + bool Sym8xxInitPCI(); + bool Sym8xxInitVars(); + bool Sym8xxInitScript(); + void Sym8xxLoadScript( UInt32 *scriptPgm, UInt32 scriptWords ); + bool Sym8xxInitChip(); + + void Sym8xxCalcMsgs( IOSCSIParallelCommand *scsiCommand ); + void Sym8xxAbortCommand( IOSCSIParallelCommand *scsiCommand ); + + bool Sym8xxUpdateSGList( SRB *srb ); + bool Sym8xxUpdateSGListVirt( SRB *srb ); + bool Sym8xxUpdateSGListDesc( SRB *srb ); + + void Sym8xxStartSRB( SRB *srb ); + void Sym8xxSignalScript( SRB *srb ); + void interruptOccurred( IOInterruptEventSource *ies, int intCount ); + void Sym8xxProcessIODone(); + void Sym8xxCompleteSRB( SRB *srb ); + void Sym8xxProcessInterrupt(); + void Sym8xxAdjustDataPtrs( SRB *srb, Nexus *nexus ); + UInt32 Sym8xxCheckFifo( SRB *srb, UInt32 *pfifoCnt ); + void Sym8xxUpdateXferOffset( SRB *srb ); + void Sym8xxProcessNoNexus(); + void Sym8xxAbortCurrent( SRB *srb ); + void Sym8xxClearFifo(); + void Sym8xxNegotiateSDTR( SRB *srb, Nexus *nexus ); + void Sym8xxNegotiateWDTR( SRB *srb, Nexus *nexus ); + void Sym8xxSendMsgReject( SRB *srb ); + void Sym8xxSCSIBusReset(SRB *srb ); + void Sym8xxProcessSCSIBusReset(); + void Sym8xxCheckRequestSense( SRB *srb ); + void Sym8xxAbortBdr( SRB *srb ); + bool Sym8xxCancelMailBox( Nexus *nexusCancel ); + void Sym8xxCancelMailBox( UInt32 target, UInt32 lun, bool fReschedule ); + + void Sym8xxAbortScript(); + + UInt32 Sym8xxReadRegs( volatile UInt8 *chipRegs, UInt32 regOffset, UInt32 regSize ); + void Sym8xxWriteRegs( volatile UInt8 *chipRegs, UInt32 regOffset, UInt32 regSize, UInt32 regValue ); + +}; + diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxExecute.cpp b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxExecute.cpp new file mode 100644 index 000000000..a90da6559 --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxExecute.cpp @@ -0,0 +1,1372 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Sym8xxExecute.m created by russb2 on Sat 30-May-1998 */ + +#include "Sym8xxController.h" + +extern "C" +{ + unsigned int ml_phys_read( vm_offset_t paddr ); +}; + +#if 0 +static UInt32 dropInt = 0; +#endif + +void Sym8xxSCSIController::Sym8xxStartSRB( SRB *srb ) +{ + + srb->nexus.targetParms.scntl3Reg = adapter->targetClocks[srb->target].scntl3Reg; + srb->nexus.targetParms.sxferReg = adapter->targetClocks[srb->target].sxferReg; + + adapter->nexusPtrsVirt[srb->nexus.tag] = &srb->nexus; + adapter->nexusPtrsPhys[srb->nexus.tag] = (Nexus *)OSSwapHostToLittleInt32( (UInt32)&srb->srbPhys->nexus ); + adapter->schedMailBox[mailBoxIndex++] = (Nexus *)OSSwapHostToLittleInt32 ( (UInt32)&srb->srbPhys->nexus ); + + Sym8xxSignalScript( srb ); +} + + +/*-----------------------------------------------------------------------------* + * Interrupts from the Symbios chipset are dispatched here at task time under the + * IOThread's context. + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::interruptOccurred( IOInterruptEventSource *ies, int intCount ) +{ + do + { + /* + * The chipset's ISTAT reg gives us the general interrupting condiditions, + * with DSTAT and SIST providing more detailed information. + */ + istatReg = Sym8xxReadRegs( chipBaseAddr, ISTAT, ISTAT_SIZE ); + + /* The INTF bit in ISTAT indicates that the script is signalling the driver + * that its IODone mailbox is full and that we should process a completed + * request. The script continues to run after posting this interrupt unlike + * other chipset interrupts which require the driver to restart the script + * engine. + */ + if ( istatReg & INTF ) + { + Sym8xxWriteRegs( chipBaseAddr, ISTAT, ISTAT_SIZE, istatReg ); +#if 0 + if ( dropInt++ > 100 ) + { + dropInt = 0; + SCRIPT_VAR(R_ld_IOdone_mailbox) = 0; + continue; + } +#endif + Sym8xxProcessIODone(); + } + + /* + * Handle remaining interrupting conditions + */ + if ( istatReg & (SIP | DIP) ) + { + Sym8xxProcessInterrupt(); + } + } + while ( istatReg & (SIP | DIP | INTF) ); + + getWorkLoop()->enableAllInterrupts(); + +} + +/*-----------------------------------------------------------------------------* + * Process a request posted in the script's IODone mailbox. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxProcessIODone() +{ + SRB *srb; + Nexus *nexus; + IODoneMailBox *pMailBox; + + + /* + * The IODone mailbox contains an index into our Nexus pointer tables. + * + * The Nexus struct is part of the SRB so we can get our SRB address + * by subtracting the offset of the Nexus struct in the SRB. + */ + pMailBox = (IODoneMailBox *)&SCRIPT_VAR(R_ld_IOdone_mailbox); + nexus = adapter->nexusPtrsVirt[pMailBox->nexus]; + srb = (SRB *)((UInt32)nexus - offsetof(SRB, nexus)); + + srb->srbSCSIStatus = pMailBox->status; + + if ( srb->srbSCSIStatus == kSCSIStatusCheckCondition ) + { + Sym8xxCheckRequestSense( srb ); + } + + Sym8xxUpdateXferOffset( srb ); + + /* + * Clear the completed Nexus pointer from our tables and clear the + * IODone mailbox. + */ + adapter->nexusPtrsVirt[pMailBox->nexus] = (Nexus *) -1; + adapter->nexusPtrsPhys[pMailBox->nexus] = (Nexus *) -1; + SCRIPT_VAR(R_ld_IOdone_mailbox) = 0; + + /* + * Wake up the client's thread to do post-processing + */ + Sym8xxCompleteSRB( srb ); + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_select_phase]; +} +/*-----------------------------------------------------------------------------* + * + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxCompleteSRB( SRB *srb ) +{ + IOSCSIParallelCommand *scsiCommand; + SCSIResults scsiResults; + SCSINegotiationResults negotiationResult, *negResult; + + + scsiCommand = srb->scsiCommand; + + bzero( &scsiResults, sizeof(scsiResults) ); + + scsiResults.adapterStatus = srb->srbAdapterStatus; + scsiResults.returnCode = srb->srbReturnCode; + + + if ( srb == abortSRB ) + { + abortSRB = 0; + if ( abortReqPending == true ) + { + abortReqPending = false; + enableCommands(); + } + } + else + { + scsiResults.bytesTransferred = srb->xferDone; + scsiResults.scsiStatus = srb->srbSCSIStatus; + } + + negResult = 0; + + if ( (srb->srbCDBFlags & kCDBFlagsNegotiateSDTR) || (srb->srbCDBFlags & kCDBFlagsNegotiateWDTR) ) + { + bzero( &negotiationResult, sizeof(struct SCSINegotiationResults) ); + + if ( ((srb->srbCDBFlags & kCDBFlagsNegotiateSDTR) && srb->negotiateSDTRComplete == false) || + ((srb->srbCDBFlags & kCDBFlagsNegotiateWDTR) && srb->negotiateWDTRComplete == false) ) + { + negotiationResult.returnCode = kIOReturnIOError; + } + + negotiationResult.transferPeriodpS = transferPeriod; + negotiationResult.transferOffset = transferOffset; + negotiationResult.transferWidth = transferWidth; + negotiationResult.transferOptions = 0; + + negResult = &negotiationResult; + } + + scsiCommand->setResults( &scsiResults, negResult ); + scsiCommand->complete(); +} + +/*-----------------------------------------------------------------------------* + * General script interrupt processing + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxProcessInterrupt() +{ + SRB *srb = NULL; + Nexus *nexus = NULL; + UInt32 nexusIndex; + UInt32 scriptPhase; + UInt32 fifoCnt = 0; + UInt32 dspsReg = 0; + UInt32 dspReg = 0; + + + /* + * Read DSTAT/SIST regs to determine why the script stopped. + */ + dstatReg = Sym8xxReadRegs( chipBaseAddr, DSTAT, DSTAT_SIZE ); + IODelay(5); + sistReg = Sym8xxReadRegs( chipBaseAddr, SIST, SIST_SIZE ); + +// printf( "SCSI(Symbios8xx): SIST = %04x DSTAT = %02x\n\r", sistReg, dstatReg ); + + /* + * This Script var tells us what the script thinks it was doing when the interrupt occurred. + */ + scriptPhase = OSSwapHostToLittleInt32( SCRIPT_VAR(R_ld_phase_flag) ); + + /* + * SCSI Bus reset detected + * + * Clean up the carnage. + * Note: This may be either an adapter or target initiated reset. + */ + if ( sistReg & RSTI ) + { + Sym8xxProcessSCSIBusReset(); + return; + } + + /* + * Calculate our current SRB/Nexus. + * + * Read a script var to determine the index of the nexus it was processing + * when the interrupt occurred. The script will invalidate the index if there + * is no target currently connected or the script cannot determine which target + * has reconnected. + */ + nexusIndex = OSSwapHostToLittleInt32(SCRIPT_VAR(R_ld_nexus_index)); + if ( nexusIndex >= MAX_SCSI_TAG ) + { + Sym8xxProcessNoNexus(); + return; + } + nexus = adapter->nexusPtrsVirt[nexusIndex]; + if ( nexus == (Nexus *) -1 ) + { + Sym8xxProcessNoNexus(); + return; + } + srb = (SRB *)((UInt32)nexus - offsetof(SRB, nexus)); + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_phase_handler]; + + /* + * Parity and SCSI Gross Errors. + * + * Abort the current connection. The abort completion will trigger + * clean-up of the current SRB/Nexus. + */ + if ( sistReg & PAR ) + { + srb->srbAdapterStatus = kSCSIAdapterStatusParityError; + Sym8xxAbortCurrent( srb ); + } + + else if ( sistReg & SGE ) + { + srb->srbAdapterStatus = kSCSIAdapterStatusProtocolError; + Sym8xxAbortCurrent( srb ); + } + + /* + * Unexpected disconnect. + * + * If we were currently trying to abort this connection then mark the abort + * as completed. For all cases clean-up and wake-up the client thread. + */ + else if ( sistReg & UDC ) + { + if ( srb->srbAdapterStatus == kSCSIAdapterStatusSuccess ) + { + srb->srbAdapterStatus = kSCSIAdapterStatusProtocolError; + } + adapter->nexusPtrsVirt[nexusIndex] = (Nexus *) -1; + adapter->nexusPtrsPhys[nexusIndex] = (Nexus *) -1; + + if ( scriptPhase == A_kphase_ABORT_CURRENT ) + { + abortCurrentSRB = NULL; + } + + Sym8xxCompleteSRB( srb ); + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_select_phase]; + } + + /* + * Phase Mis-match + * + * If we are in MsgOut phase then calculate how much of the message we sent. For + * now, however, we dont handle the target rejecting messages, so the request is aborted. + * + * If we are in DataIn/DataOut phase. We update the SRB/Nexus with our current data + * pointers. + */ + else if ( sistReg & MA ) + { + if ( scriptPhase == A_kphase_MSG_OUT ) + { + srb->srbMsgResid = Sym8xxCheckFifo( srb, &fifoCnt ); + nexus->msg.ppData = OSSwapHostToLittleInt32( OSSwapHostToLittleInt32(nexus->msg.ppData) + + OSSwapHostToLittleInt32(nexus->msg.length) + - srb->srbMsgResid ); + nexus->msg.length = OSSwapHostToLittleInt32( srb->srbMsgResid ); + + Sym8xxAbortCurrent( srb ); + } + else if ( (scriptPhase == A_kphase_DATA_OUT) || (scriptPhase == A_kphase_DATA_IN) ) + { + Sym8xxAdjustDataPtrs( srb, nexus ); + } + else + { + IOLog("SCSI(Symbios8xx): Unexpected phase mismatch - scriptPhase = %08x\n\r", (int)scriptPhase); + Sym8xxAbortCurrent( srb ); + } + + Sym8xxClearFifo(); + } + + /* + * Selection Timeout. + * + * Clean-up the current request. + */ + else if ( sistReg & STO ) + { + srb->srbAdapterStatus = kSCSIAdapterStatusSelectionTimeout; + + adapter->nexusPtrsVirt[nexusIndex] = (Nexus *) -1; + adapter->nexusPtrsPhys[nexusIndex] = (Nexus *) -1; + SCRIPT_VAR(R_ld_IOdone_mailbox) = 0; + + Sym8xxCompleteSRB( srb ); + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_select_phase]; + } + + /* + * Handle script initiated interrupts + */ + else if ( dstatReg & SIR ) + { + dspsReg = Sym8xxReadRegs( chipBaseAddr, DSPS, DSPS_SIZE ); + +// printf( "SCSI(Symbios8xx): DSPS = %08x\n\r", dspsReg ); + + switch ( dspsReg ) + { + /* + * Non-zero SCSI status + * + * Send request sense CDB or complete request depending on SCSI status value + */ + case A_status_error: + Sym8xxProcessIODone(); + break; + + /* + * Received SDTR/WDTR message from target. + * + * Prepare reply message if we requested negotiation. Otherwise reject + * target initiated negotiation. + */ + case A_negotiateSDTR: + Sym8xxNegotiateSDTR( srb, nexus ); + break; + + case A_negotiateWDTR: + Sym8xxNegotiateWDTR( srb, nexus ); + break; + + /* + * Partial SG List completed. + * + * Refresh the list from the remaining addresses to be transfered and set the + * script engine to branch into the list. + */ + case A_sglist_complete: + Sym8xxUpdateSGList( srb ); + scriptRestartAddr = (UInt32)&srb->srbPhys->nexus.sgListData[2]; + break; + + /* + * Completed abort request + * + * Clean-up the aborted request. + */ + case A_abort_current: + adapter->nexusPtrsVirt[nexusIndex] = (Nexus *) -1; + adapter->nexusPtrsPhys[nexusIndex] = (Nexus *) -1; + + abortCurrentSRB = NULL; + + Sym8xxCompleteSRB( srb ); + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_select_phase]; + break; + + /* + * Script detected protocol errors + * + * Abort the current request. + */ + case A_unknown_phase: + srb->srbAdapterStatus = kSCSIAdapterStatusProtocolError; + Sym8xxAbortCurrent( srb ); + break; + + case A_unknown_msg_reject: + case A_unexpected_msg: + case A_unexpected_ext_msg: + srb->srbAdapterStatus = kSCSIAdapterStatusMsgReject; + Sym8xxAbortCurrent( srb ); + break; + + default: + IOLog( "SCSI(Symbios8xx): Unknown Script Int = %08x\n\r", (int)dspsReg ); + Sym8xxAbortCurrent( srb ); + } + } + + /* + * Illegal script instruction. + * + * We're toast! Abort the current request and hope for the best! + */ + else if ( dstatReg & IID ) + { + dspReg = Sym8xxReadRegs( chipBaseAddr, DSP, DSP_SIZE ); + + IOLog("SCSI(Symbios8xx): Illegal script instruction - dsp = %08x srb=%08x\n\r", (int)dspReg, (int)srb ); + + Sym8xxAbortCurrent( srb ); + } + + if ( scriptRestartAddr ) + { + Sym8xxWriteRegs( chipBaseAddr, DSP, DSP_SIZE, scriptRestartAddr ); + } +} + + +/*-----------------------------------------------------------------------------* + * Current Data Pointer calculations + * + * To do data transfers the driver generates a list of script instructions + * in system storage to deliver data to the requested physical addresses. The + * script branches to the list when the target enters data transfer phase. + * + * When the target changes phase during a data transfer, data is left trapped + * inside the various script engine registers. This routine determines how much + * data was not actually transfered to/from the target and generates a new + * S/G List entry for the partial transfer and a branch back into the original + * S/G list. These script instructions are stored in two reserved slots at the + * top of the original S/G List. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxAdjustDataPtrs( SRB *srb, Nexus *nexus ) +{ + UInt32 i; + UInt32 sgResid; + UInt32 fifoCnt; + UInt32 dspReg; + UInt32 sgDone; + UInt8 scntl2Reg; + Nexus *nexusPhys; + + /* + * Determine SG element residual + * + * This routine returns how much of the current S/G List element the + * script was processing remains to be sent/received. All the information + * required to do this is stored in the script engine's registers. + */ + sgResid = Sym8xxCheckFifo( srb, &fifoCnt ); + + /* + * Determine which script instruction in our SGList we were executing when + * the target changed phase. + * + * The script engine's dspReg tells us where the script thinks it was. Based + * on the physical address of our current SRB/Nexus we can calculate + * an index into our S/G List. + */ + dspReg = Sym8xxReadRegs( chipBaseAddr, DSP, DSP_SIZE ); + + i = ((dspReg - (UInt32)srb->srbPhys->nexus.sgListData) / sizeof(SGEntry)) - 1; + + if ( i > MAX_SGLIST_ENTRIES-1 ) + { + IOLog("SCSI(Symbios8xx): Bad sgListIndex\n\r"); + Sym8xxAbortCurrent( srb ); + return; + } + + /* + * Wide/odd-byte transfers. + * + * When dealing with Wide data transfers, if a S/G List ends with an odd-transfer count, then a + * valid received data byte is left in the script engine's SWIDE register. The least painful way + * to recover this byte is to construct a small script thunk to transfer one additional byte. The + * script will automatically draw this byte from the SWIDE register rather than the SCSI bus. + * The script thunk then branches back to script's PhaseHandler entrypoint. + * + */ + nexusPhys = &srb->srbPhys->nexus; + + scntl2Reg = Sym8xxReadRegs( chipBaseAddr, SCNTL2, SCNTL2_SIZE ); + if ( scntl2Reg & WSR ) + { + adapter->xferSWideInst[0] = OSSwapHostToLittleInt32( srb->directionMask | 1 ); + adapter->xferSWideInst[1] = nexus->sgListData[i].physAddr; + adapter->xferSWideInst[2] = OSSwapHostToLittleInt32( 0x80080000 ); + adapter->xferSWideInst[3] = OSSwapHostToLittleInt32( (UInt32)&chipRamAddrPhys[Ent_phase_handler] ); + + scriptRestartAddr = (UInt32) adapterPhys->xferSWideInst; + + /* + * Note: There is an assumption here that the sgResid count will be > 1. It appears + * that the script engine does not generate a phase-mismatch interrupt until + * we attempt to move > 1 byte from the SCSI bus and the only byte available is + * in SWIDE. + */ + sgResid--; + } + + /* + * Calculate partial S/G List instruction and branch + * + * Fill in slots 0/1 of the SGList based on the SGList index (i) and SGList residual count + * (sgResid) calculated above. + * + */ + sgDone = (OSSwapHostToLittleInt32( nexus->sgListData[i].length ) & 0x00ffffff) - sgResid; + + nexus->sgListData[0].length = OSSwapHostToLittleInt32( sgResid | srb->directionMask ); + nexus->sgListData[0].physAddr = OSSwapHostToLittleInt32( OSSwapHostToLittleInt32(nexus->sgListData[i].physAddr) + sgDone ); + /* + * If a previously calculated SGList 0 entry was interrupted again, we dont need to calculate + * a new branch address since the previous one is still valid. + */ + if ( i != 0 ) + { + nexus->sgListData[1].length = OSSwapHostToLittleInt32( 0x80080000 ); + nexus->sgListData[1].physAddr = OSSwapHostToLittleInt32( (UInt32)&nexusPhys->sgListData[i+1] ); + nexus->sgNextIndex = i + 1; + } + nexus->ppSGList = (SGEntry *)OSSwapHostToLittleInt32( (UInt32) &nexusPhys->sgListData[0] ); + + /* + * The script sets this Nexus variable to non-zero each time it calls the driver generated + * S/G list. This allows the driver's completion routines to differentiate between a successful + * transfer vs no data transfer at all. + */ + nexus->dataXferCalled = 0; + + return; +} + +/*-----------------------------------------------------------------------------* + * Determine SG element residual + * + * This routine returns how much of the current S/G List element the + * script was processing remains to be sent/received. All the information + * required to do this is stored in the script engine's registers. + * + *-----------------------------------------------------------------------------*/ +UInt32 Sym8xxSCSIController::Sym8xxCheckFifo( SRB *srb, UInt32 *pfifoCnt ) +{ + bool fSCSISend; + bool fXferSync; + UInt32 scriptPhase = 0; + UInt32 dbcReg = 0; + UInt32 dfifoReg = 0; + UInt32 ctest5Reg = 0; + UInt8 sstat0Reg = 0; + UInt8 sstat1Reg = 0; + UInt8 sstat2Reg = 0; + UInt32 fifoCnt = 0; + UInt32 sgResid = 0; + + scriptPhase = OSSwapHostToLittleInt32( SCRIPT_VAR(R_ld_phase_flag) ); + + fSCSISend = (scriptPhase == A_kphase_DATA_OUT) || (scriptPhase == A_kphase_MSG_OUT); + + fXferSync = ((scriptPhase == A_kphase_DATA_OUT) || (scriptPhase == A_kphase_DATA_IN)) + && (srb->nexus.targetParms.sxferReg & 0x1F); + + dbcReg = Sym8xxReadRegs( chipBaseAddr, DBC, DBC_SIZE ) & 0x00ffffff; + + if ( !(dstatReg & DFE) ) + { + ctest5Reg = Sym8xxReadRegs( chipBaseAddr, CTEST5, CTEST5_SIZE ); + dfifoReg = Sym8xxReadRegs( chipBaseAddr, DFIFO, DFIFO_SIZE ); + + if ( ctest5Reg & DFS ) + { + fifoCnt = ((((ctest5Reg & 0x03) << 8) | dfifoReg) - dbcReg) & 0x3ff; + } + else + { + fifoCnt = (dfifoReg - dbcReg) & 0x7f; + } + } + + sstat0Reg = Sym8xxReadRegs( chipBaseAddr, SSTAT0, SSTAT0_SIZE ); + sstat2Reg = Sym8xxReadRegs( chipBaseAddr, SSTAT2, SSTAT2_SIZE ); + + if ( fSCSISend ) + { + fifoCnt += (sstat0Reg & OLF ) ? 1 : 0; + fifoCnt += (sstat2Reg & OLF1) ? 1 : 0; + + if ( fXferSync ) + { + fifoCnt += (sstat0Reg & ORF ) ? 1 : 0; + fifoCnt += (sstat2Reg & ORF1) ? 1 : 0; + } + } + else + { + if ( fXferSync ) + { + sstat1Reg = Sym8xxReadRegs( chipBaseAddr, SSTAT0, SSTAT0_SIZE ); + fifoCnt += (sstat1Reg >> 4) | (sstat2Reg & FF4); + } + else + { + fifoCnt += (sstat0Reg & ILF ) ? 1 : 0; + fifoCnt += (sstat2Reg & ILF1) ? 1 : 0; + } + } + + sgResid = dbcReg + fifoCnt; + *pfifoCnt = fifoCnt; + + return sgResid; +} + +/*-----------------------------------------------------------------------------* + * Calculate transfer counts. + * + * This routine updates srb->xferDone with the amount of data transferred + * by the last S/G List executed. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxUpdateXferOffset( SRB *srb ) +{ + UInt32 i; + UInt32 xferOffset; + + /* + * srb->xferOffset contains the client buffer offset INCLUDING the range + * covered by the current SGList. + */ + xferOffset = srb->xferOffset; + + /* + * If script did not complete the current transfer list then we need to determine + * how much of the list was completed. + */ + if ( srb->nexus.dataXferCalled == 0 ) + { + /* + * srb->xferOffsetPrev contains the client buffer offset EXCLUDING the + * range covered by the current SGList. + */ + xferOffset = srb->xferOffsetPrev; + + /* + * Calculate bytes transferred for partially completed list. + * + * To calculate the amount of this list completed, we sum the residual amount + * in SGList Slot 0 and the completed list elements 2 to sgNextIndex-1. + */ + if ( srb->nexus.sgNextIndex != 0 ) + { + xferOffset += OSSwapHostToLittleInt32( srb->nexus.sgListData[srb->nexus.sgNextIndex-1].length ) + - OSSwapHostToLittleInt32( srb->nexus.sgListData[0].length ); + + for ( i=2; i < srb->nexus.sgNextIndex-1; i++ ) + { + xferOffset += OSSwapHostToLittleInt32( srb->nexus.sgListData[i].length ) & 0x00ffffff; + } + } + } + + /* + * The script leaves the result of any Ignore Wide Residual message received from the target + * during the transfer. + */ + xferOffset -= srb->nexus.wideResidCount; + + +#if 0 + { + UInt32 resid = srb->xferOffset - xferOffset; + if ( resid ) + { + IOLog( "SCSI(Symbios8xx): Incomplete transfer - Req Count = %08x Act Count = %08x - srb = %08x\n\r", + srb->xferCount, xferOffset, (UInt32)srb ); + } + } +#endif + + srb->xferDone = xferOffset; +} + +/*-----------------------------------------------------------------------------* + * No SRB/Nexus Processing. + * + * In some cases (mainly Aborts) not having a SRB/Nexus is normal. In other + * cases it indicates a problem such a reconnection from a target that we + * have no record of. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxProcessNoNexus() +{ + UInt32 dspsReg; + UInt32 dspReg = 0; + UInt32 scriptPhase = (UInt32)-1 ; + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_select_phase]; + + dspsReg = Sym8xxReadRegs( chipBaseAddr, DSPS, DSPS_SIZE ); + + scriptPhase = OSSwapHostToLittleInt32( SCRIPT_VAR(R_ld_phase_flag) ); + + /* + * If we were trying to abort or disconnect a target and the bus + * is now free we consider the abort to have completed. + */ + if ( sistReg & UDC ) + { + if ( (scriptPhase == A_kphase_ABORT_MAILBOX) && abortSRB ) + { + Sym8xxCompleteSRB( abortSRB ); + SCRIPT_VAR(R_ld_AbortBdr_mailbox) = 0; + } + else if ( scriptPhase == A_kphase_ABORT_CURRENT ) + { + abortCurrentSRB = NULL; + } + } + /* + * If we were trying to connect to a target to send it an abort message, and + * we timed out, we consider the abort as completed. + * + * Note: In this case the target may be hung, but at least its not on the bus. + */ + else if ( sistReg & STO ) + { + if ( (scriptPhase == A_kphase_ABORT_MAILBOX) && abortSRB ) + { + Sym8xxCompleteSRB( abortSRB ); + SCRIPT_VAR(R_ld_AbortBdr_mailbox) = 0; + } + } + + /* + * If the script died, without a vaild nexusIndex, we abort anything that is currently + * connected and hope for the best! + */ + else if ( dstatReg & IID ) + { + dspReg = Sym8xxReadRegs( chipBaseAddr, DSP, DSP_SIZE ); + IOLog("SCSI(Symbios8xx): Illegal script instruction - dsp = %08x srb=0\n\r", (int)dspReg ); + Sym8xxAbortCurrent( (SRB *)-1 ); + } + + /* + * Script signaled conditions + */ + else if ( dstatReg & SIR ) + { + switch ( dspsReg ) + { + case A_abort_current: + abortCurrentSRB = NULL; + break; + + case A_abort_mailbox: + Sym8xxCompleteSRB( abortSRB ); + SCRIPT_VAR(R_ld_AbortBdr_mailbox) = 0; + break; + + default: + Sym8xxAbortCurrent( (SRB *)-1 ); + } + } + else + { + Sym8xxAbortCurrent( (SRB *)-1 ); + } + + if ( scriptRestartAddr ) + { + Sym8xxWriteRegs( chipBaseAddr, DSP, DSP_SIZE, scriptRestartAddr ); + } +} + + +/*-----------------------------------------------------------------------------* + * Abort currently connected target. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxAbortCurrent( SRB *srb ) +{ + if ( abortCurrentSRB ) + { + if ( abortCurrentSRB != srb ) + { + IOLog("SCSI(Symbios8xx): Multiple abort immediate SRBs - resetting\n\r"); + Sym8xxSCSIBusReset( (SRB *)0 ); + } + return; + } + + abortCurrentSRB = srb; + + if ( srb != (SRB *)-1 ) + { + if ( srb->srbAdapterStatus == kSCSIAdapterStatusSuccess ) + { + srb->srbAdapterStatus = kSCSIAdapterStatusProtocolError; + } + } + + /* + * Issue abort or abort tag depending on whether the is a tagged request + */ + SCRIPT_VAR(R_ld_AbortCode) = OSSwapHostToLittleInt32( ((srb != (SRB *)-1) && (srb->nexus.tag >= MIN_SCSI_TAG)) ? 0x0d : 0x06 ); + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_issueAbort_BDR]; + + Sym8xxClearFifo(); +} + +/*-----------------------------------------------------------------------------* + * This routine clears the script engine's SCSI and DMA fifos. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxClearFifo() +{ + UInt8 ctest3Reg; + UInt8 stest2Reg; + UInt8 stest3Reg; + + stest2Reg = Sym8xxReadRegs( chipBaseAddr, STEST2, STEST2_SIZE ); + if ( stest2Reg & ROF ) + { + Sym8xxWriteRegs( chipBaseAddr, STEST2, STEST2_SIZE, stest2Reg ); + } + + ctest3Reg = Sym8xxReadRegs( chipBaseAddr, CTEST3, CTEST3_SIZE ); + ctest3Reg |= CLF; + Sym8xxWriteRegs( chipBaseAddr, CTEST3, CTEST3_SIZE, ctest3Reg ); + + stest3Reg = Sym8xxReadRegs( chipBaseAddr, STEST3, STEST3_SIZE ); + stest3Reg |= CSF; + Sym8xxWriteRegs( chipBaseAddr,STEST3, STEST3_SIZE, stest3Reg ); + + do + { + ctest3Reg = Sym8xxReadRegs( chipBaseAddr, CTEST3, CTEST3_SIZE ); + stest2Reg = Sym8xxReadRegs( chipBaseAddr, STEST3, STEST3_SIZE ); + stest3Reg = Sym8xxReadRegs( chipBaseAddr, STEST3, STEST3_SIZE ); + } + while( (ctest3Reg & CLF) || (stest3Reg & CSF) || (stest2Reg & ROF) ); +} + +/*-----------------------------------------------------------------------------* + * This routine processes the target's response to our SDTR message. + * + * We calculate the values for the script engine's timing registers + * for synchronous registers, and update our tables indicating that + * requested data transfer mode is in-effect. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxNegotiateSDTR( SRB *srb, Nexus *nexus ) +{ + UInt32 x; + UInt8 *pMsg; + UInt32 syncPeriod; + + /* + * If we were not negotiating, the send MsgReject to targets negotiation + * attempt. + */ + if ( !(srb->srbCDBFlags & kCDBFlagsNegotiateSDTR) ) + { + Sym8xxSendMsgReject( srb ); + return; + } + + /* + * Get pointer to negotiation message received from target. + */ + pMsg = (UInt8 *) &SCRIPT_VAR(R_ld_message); + + /* + * The target's SDTR response contains the (transfer period / 4). + * + * We set our sync clock divisor to 1, 2, or 4 giving us a clock rates + * of: + * 80Mhz (Period = 12.5ns), + * 40Mhz (Period = 25.0ns) + * 20Mhz (Period = 50.0ns) + * + * This is further divided by the value in the sxfer reg to give us the final sync clock rate. + * + * The requested sync period is scaled up by 1000 and the clock periods are scaled up by 10 + * giving a result scaled up by 100. This is rounded-up and converted to sxfer reg values. + */ + if ( pMsg[4] == 0 ) + { + nexus->targetParms.scntl3Reg &= 0x0f; + nexus->targetParms.sxferReg = 0x00; + } + else + { + syncPeriod = (UInt32)pMsg[3] << 2; + if ( syncPeriod < 100 ) + { + nexus->targetParms.scntl3Reg |= SCNTL3_INIT_875_ULTRA; + x = (syncPeriod * 1000) / 125; + } + else if ( syncPeriod < 200 ) + { + nexus->targetParms.scntl3Reg |= SCNTL3_INIT_875_FAST; + x = (syncPeriod * 1000) / 250; + } + else + { + nexus->targetParms.scntl3Reg |= SCNTL3_INIT_875_SLOW; + x = (syncPeriod * 1000) / 500; + } + + if ( x % 100 ) x += 100; + + /* + * sxferReg Bits: 5-0 - Transfer offset + * 7-6 - Sync Clock Divisor (0 = sync clock / 4) + */ + nexus->targetParms.sxferReg = ((x/100 - 4) << 5) | pMsg[4]; + + transferPeriod = syncPeriod * 1000; + transferOffset = pMsg[4]; + + srb->negotiateSDTRComplete = true; + } + + /* + * Update our per-target tables and set-up the hardware regs for this request. + * + * On reconnection attempts, the script will use our per-target tables to set-up + * the scntl3 and sxfer registers in the script engine. + */ + adapter->targetClocks[srb->target].sxferReg = nexus->targetParms.sxferReg; + adapter->targetClocks[srb->target].scntl3Reg = nexus->targetParms.scntl3Reg; + + Sym8xxWriteRegs( chipBaseAddr, SCNTL3, SCNTL3_SIZE, nexus->targetParms.scntl3Reg ); + Sym8xxWriteRegs( chipBaseAddr, SXFER, SXFER_SIZE, nexus->targetParms.sxferReg ); + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_clearACK]; +} + +/*-----------------------------------------------------------------------------* + * This routine processes the target's response to our WDTR message. + * + * In addition, if there is a pending SDTR message, this routine sends it + * to the target. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxNegotiateWDTR( SRB *srb, Nexus *nexus ) +{ + UInt8 *pMsg; + UInt32 msgBytesSent; + UInt32 msgBytesLeft; + + /* + * If we were not negotiating, the send MsgReject to targets negotiation + * attempt. + */ + if ( !(srb->srbCDBFlags & kCDBFlagsNegotiateWDTR) ) + { + Sym8xxSendMsgReject( srb ); + return; + } + + /* + * Set Wide (16-bit) vs Narrow (8-bit) data transfer mode based on target's response. + */ + pMsg = (UInt8 *) &SCRIPT_VAR(R_ld_message); + + if ( pMsg[3] == 1 ) + { + nexus->targetParms.scntl3Reg |= EWS; + transferWidth = 2; + } + else + { + nexus->targetParms.scntl3Reg &= ~EWS; + transferWidth = 1; + } + + /* + * Update our per-target tables and set-up the hardware regs for this request. + * + * On reconnection attempts, the script will use our per-target tables to set-up + * the scntl3 and sxfer registers in the script engine. + */ + + adapter->targetClocks[srb->target].scntl3Reg = nexus->targetParms.scntl3Reg; + Sym8xxWriteRegs( chipBaseAddr, SCNTL3, SCNTL3_SIZE, nexus->targetParms.scntl3Reg ); + + srb->negotiateWDTRComplete = true; + + /* + * If there any pending messages left for the target, send them now, + */ + msgBytesSent = OSSwapHostToLittleInt32( nexus->msg.length ); + msgBytesLeft = srb->srbMsgLength - msgBytesSent; + if ( msgBytesLeft ) + { + nexus->msg.length = OSSwapHostToLittleInt32( msgBytesLeft ); + nexus->msg.ppData = OSSwapHostToLittleInt32( OSSwapHostToLittleInt32( nexus->msg.ppData ) + msgBytesSent ); + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_issueMessageOut]; + } + + /* + * Otherwise, tell the script we're done with MsgOut phase. + */ + else + { + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_clearACK]; + } +} + +/*-----------------------------------------------------------------------------* + * Reject message received from target. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxSendMsgReject( SRB *srb ) +{ + srb->nexus.msg.ppData = OSSwapHostToLittleInt32((UInt32)&srb->srbPhys->nexus.msgData); + srb->nexus.msg.length = OSSwapHostToLittleInt32(0x01); + srb->nexus.msgData[0] = 0x07; + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_issueMessageOut]; +} + + +/*-----------------------------------------------------------------------------* + * This routine initiates a SCSI Bus Reset. + * + * This may be an internally generated request as part of error recovery or + * a client's bus reset request. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxSCSIBusReset( SRB *srb ) +{ + if ( srb ) + { + if ( resetSRB ) + { + srb->srbReturnCode = kIOReturnBusy; + Sym8xxCompleteSRB( srb ); + return; + } + resetSRB = srb; + } + + Sym8xxAbortScript(); + + Sym8xxWriteRegs( chipBaseAddr, SCNTL1, SCNTL1_SIZE, SCNTL1_SCSI_RST ); + IODelay( 100 ); + Sym8xxWriteRegs( chipBaseAddr, SCNTL1, SCNTL1_SIZE, SCNTL1_INIT ); +} + +/*-----------------------------------------------------------------------------* + * This routine handles a SCSI Bus Reset interrupt. + * + * The SCSI Bus reset may be generated by a target on the bus, internally from + * the driver's error recovery or from a client request. + * + * Once the reset is detected we establish a settle period where new client requests + * are blocked in the client thread. In addition we flush all currently executing + * scsi requests back to the client. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxProcessSCSIBusReset() +{ + UInt32 i; + + Sym8xxClearFifo(); + + /* + * We clear the script's request mailboxes. Any work in the script mailboxes is + * already in the NexusPtr tables so we have already have handled the SRB/Nexus + * cleanup. + */ + for ( i=0; i < MAX_SCHED_MAILBOXES; i++ ) + { + adapter->schedMailBox[i] = 0; + } + + SCRIPT_VAR(R_ld_AbortBdr_mailbox) = 0; + SCRIPT_VAR(R_ld_IOdone_mailbox) = 0; + SCRIPT_VAR(R_ld_counter) = 0; + mailBoxIndex = 0; + + + /* + * Reset the data transfer mode/clocks in our per-target tables back to Async/Narrow 8-bit + */ + for ( i=0; i < MAX_SCSI_TARGETS; i++ ) + { + adapter->targetClocks[i].scntl3Reg = SCNTL3_INIT_875; + adapter->targetClocks[i].sxferReg = 0; + } + + scriptRestartAddr = (UInt32) &chipRamAddrPhys[Ent_select_phase]; + Sym8xxWriteRegs( chipBaseAddr, DSP, DSP_SIZE, scriptRestartAddr ); + + if ( resetSRB ) + { + resetSRB->srbReturnCode = kIOReturnBusy; + Sym8xxCompleteSRB( resetSRB ); + resetSRB = 0; + } + else if ( initialReset == true ) + { + initialReset = false; + } + else + { + resetOccurred(); + } +} + +/*-----------------------------------------------------------------------------* + * This routine sets the SIGP bit in the script engine's ISTAT + * register. This signals the script to wake-up for a WAIT for + * reselection instruction. The script will then check the mailboxes + * for work to do. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxSignalScript( SRB *srb ) +{ + Sym8xxWriteRegs( chipBaseAddr, ISTAT, ISTAT_SIZE, SIGP ); +} + +/*-----------------------------------------------------------------------------* + * + * + * + * + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxCheckRequestSense( SRB *srb ) +{ + IOSCSIParallelCommand *scsiCommand; + IOMemoryDescriptor *reqSenseDesc; + + scsiCommand = srb->scsiCommand; + + scsiCommand->getPointers( &reqSenseDesc, 0, 0, true ); + + if ( reqSenseDesc != 0 ) + { + Sym8xxCancelMailBox( srb->target, srb->lun, true ); + } +} + +/*-----------------------------------------------------------------------------* + * This routine does a mailbox abort. + * + * This type of abort is used for targets not currently connected to the SCSI Bus. + * + * The script will select the target and send a tag (if required) followed by the + * appropriate abort message (abort/abort-tag) + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxAbortBdr( SRB *srb ) +{ + IOAbortBdrMailBox abortMailBox; + + abortSRB = srb; + + /* + * Setup a script variable containing the abort information. + */ + abortMailBox.identify = srb->nexus.msgData[0]; + abortMailBox.tag = srb->nexus.msgData[1]; + abortMailBox.message = srb->nexus.msgData[2]; + abortMailBox.scsi_id = srb->target; + + SCRIPT_VAR(R_ld_AbortBdr_mailbox) = *(UInt32 *) &abortMailBox; + + Sym8xxSignalScript( srb ); +} + +/*-----------------------------------------------------------------------------* + * + * + * + * + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::Sym8xxCancelMailBox( Nexus *nexusCancel ) +{ + Nexus *nexusPhys; + UInt32 i; + + nexusPhys = (Nexus *)OSSwapHostToLittleInt32( (UInt32)nexusCancel ); + for ( i=0; i < MAX_SCHED_MAILBOXES; i++ ) + { + if ( nexusPhys == adapter->schedMailBox[i] ) + { + adapter->schedMailBox[i] = (Nexus *)OSSwapHostToLittleInt32( kMailBoxCancel ); + return true; + } + } + return false; +} + + +/*-----------------------------------------------------------------------------* + * + * + * + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxCancelMailBox( UInt32 target, UInt32 lun, bool fReschedule ) +{ + UInt32 tag; + UInt32 tagPos; + UInt32 tagShift; + + UInt32 i; + + SRB *srb; + Nexus *nexus; + Nexus *nexusPhys; + + tagPos = offsetof(Nexus, tag) & 0x03; + tagShift = 24 - (tagPos << 3); + + for ( i=0; i < MAX_SCHED_MAILBOXES; i++ ) + { + nexusPhys = (Nexus *)OSSwapHostToLittleInt32( (UInt32)adapter->schedMailBox[i] ); + if ( (nexusPhys != (Nexus *)kMailBoxEmpty) && (nexusPhys != (Nexus *)kMailBoxCancel) ) + { + /* + * Read the 'tag' byte given Nexus physical address from the mailBox. + * Look-up the virtual address of the corresponding Nexus struct. + */ + tag = ml_phys_read((UInt32)&nexusPhys->tag - tagPos); + tag = (tag >> tagShift) & 0xff; + + nexus = adapter->nexusPtrsVirt[tag]; + if ( nexus == (Nexus *)-1 ) + { + continue; + } + + /* + * If the SCSI target of the mailbox entry matches the abort SRB target, + * then we may have a winner. + */ + srb = (SRB *)((UInt32)nexus - offsetof(SRB, nexus)); + + if ( srb->target == target ) + { + /* + * For a device reset, we cancel all requests for that target regardless of lun. + * For an abort all, we must match on both target and lun + */ + if ( (lun == (UInt32)-1) || (srb->lun == lun) ) + { + adapter->schedMailBox[i] = (Nexus *)OSSwapHostToLittleInt32( kMailBoxCancel ); + + if ( fReschedule == true ) + { + rescheduleCommand( srb->scsiCommand ); + } + } + } + } + } +} + +/*-----------------------------------------------------------------------------* + * This routine is used to shutdown the script engine in an orderly fashion. + * + * Normally the script engine automatically stops when an interrupt is generated. However, + * in the case of timeouts we need to change the script engine's dsp reg (instruction pointer). + * to issue an abort. + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxAbortScript() +{ + mach_timespec_t currentTime; + mach_timespec_t startTime; + + getWorkLoop()->disableAllInterrupts(); + + /* + * We set the ABRT bit in ISTAT and spin until the script engine acknowledges the + * abort or we timeout. + */ + Sym8xxWriteRegs( chipBaseAddr, ISTAT, ISTAT_SIZE, ABRT ); + + IOGetTime( &startTime ); + + do + { + IOGetTime( ¤tTime ); + SUB_MACH_TIMESPEC( ¤tTime, &startTime ); + + istatReg = Sym8xxReadRegs( chipBaseAddr, ISTAT, ISTAT_SIZE ); + + if ( istatReg & SIP ) + { + Sym8xxReadRegs( chipBaseAddr, SIST, SIST_SIZE ); + continue; + } + + if ( istatReg & DIP ) + { + Sym8xxWriteRegs( chipBaseAddr, ISTAT, ISTAT_SIZE, 0x00 ); + Sym8xxReadRegs( chipBaseAddr, DSTAT, DSTAT_SIZE ); + break; + } + } + while ( currentTime.tv_nsec < (kAbortScriptTimeoutMS * 1000 * 1000) ); + + istatReg = SIGP; + Sym8xxWriteRegs( chipBaseAddr, ISTAT, ISTAT_SIZE, istatReg ); + + getWorkLoop()->enableAllInterrupts(); + + if ( currentTime.tv_nsec >= (kAbortScriptTimeoutMS * 1000 * 1000) ) + { + IOLog( "SCSI(Symbios8xx): Abort script failed - resetting bus\n\r" ); + } + + } + + diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInit.cpp b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInit.cpp new file mode 100644 index 000000000..64bb689de --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInit.cpp @@ -0,0 +1,523 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Sym8xxInit.m created by russb2 on Sat 30-May-1998 */ + +/*-----------------------------------------------------------------------------* + * This module contains initialization routines for the driver. + * + * Driver initialization consists of: + * + * - Doing PCI bus initialization for the script engine PCI device. + * - Setting up shared communication areas in system memory between the script + * and the driver. + * - Copying the script program into the script engine on-board ram, applying + * script relocation fixups as required. + * - Setting the initial register values for the script engine. + * - Setting up driver related storage and interfacing with driverKit. + * + *-----------------------------------------------------------------------------*/ + +/* + * This define causes Sym8xxScript.h to include the script instructions and + * relocation tables. Normally without this define we only will get #define + * values for interfacing with the script. + */ +#define INCL_SCRIPT_TEXT + +#include "Sym8xxController.h" + +#define super IOSCSIParallelController + +OSDefineMetaClassAndStructors( Sym8xxSCSIController, IOSCSIParallelController ) ; + +/*-----------------------------------------------------------------------------* + * This structure contains most of the inital register settings for + * the script engine. See Sym8xxRegs.h for the actual initialization + * values. + * + *-----------------------------------------------------------------------------*/ +typedef struct ChipInitRegs +{ + UInt32 regNum; + UInt32 regSize; + UInt32 regValue; + +} ChipInitRegs; + +static ChipInitRegs Sym8xxInitRegs[] = +{ + { SCNTL0, SCNTL0_SIZE, SCNTL0_INIT }, + { SCNTL1, SCNTL1_SIZE, SCNTL1_INIT }, + { SCNTL2, SCNTL2_SIZE, SCNTL2_INIT }, + { SCNTL3, SCNTL3_SIZE, SCNTL3_INIT_875 }, + { SXFER, SXFER_SIZE, SXFER_INIT }, + { SDID, SDID_SIZE, SDID_INIT }, + { GPREG, GPREG_SIZE, GPREG_INIT }, + { SFBR, SFBR_SIZE, SFBR_INIT }, + { SOCL, SOCL_SIZE, SOCL_INIT }, + { DSA, DSA_SIZE, DSA_INIT }, + { ISTAT, ISTAT_SIZE, ISTAT_INIT }, + { TEMP, TEMP_SIZE, TEMP_INIT }, + { CTEST0, CTEST0_SIZE, CTEST0_INIT }, + { CTEST3, CTEST3_SIZE, CTEST3_INIT_A }, + { CTEST4, CTEST4_SIZE, CTEST4_INIT }, + { CTEST5, CTEST5_SIZE, CTEST5_INIT_A_revB}, + { DBC, DBC_SIZE, DBC_INIT }, + { DCMD, DCMD_SIZE, DCMD_INIT }, + { DNAD, DNAD_SIZE, DNAD_INIT }, + { DSPS, DSPS_SIZE, DSPS_INIT }, + { SCRATCHA, SCRATCHA_SIZE, SCRATCHA_INIT }, + { DMODE, DMODE_SIZE, DMODE_INIT_A }, + { DIEN, DIEN_SIZE, DIEN_INIT }, + { DWT, DWT_SIZE, DWT_INIT }, + { DCNTL, DCNTL_SIZE, DCNTL_INIT_A }, + { SIEN, SIEN_SIZE, SIEN_INIT }, + { SLPAR, SLPAR_SIZE, SLPAR_INIT }, + { MACNTL, MACNTL_SIZE, MACNTL_INIT }, + { GPCNTL, GPCNTL_SIZE, GPCNTL_INIT }, + { STIME0, STIME0_SIZE, STIME0_INIT }, + { STIME1, STIME1_SIZE, STIME1_INIT }, + { RESPID0, RESPID0_SIZE, RESPID0_INIT }, + { RESPID1, RESPID1_SIZE, RESPID1_INIT }, + { STEST2, STEST2_SIZE, STEST2_INIT }, + { STEST3, STEST3_SIZE, STEST3_INIT }, + { SODL, SODL_SIZE, SODL_INIT }, + { SCRATCHB, SCRATCHB_SIZE, SCRATCHB_INIT } +}; + +/*-----------------------------------------------------------------------------* + * + * + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::configure( IOService *forProvider, SCSIControllerInfo *controllerInfo ) +{ + provider = (IOPCIDevice *)forProvider; + + if ( Sym8xxInit() == false ) + { + return false; + } + + initialReset = true; + + Sym8xxSCSIBusReset( 0 ); + IOSleep(3000); + + controllerInfo->initiatorId = 7; + + controllerInfo->maxTargetsSupported = 16; + controllerInfo->maxLunsSupported = 8; + + controllerInfo->minTransferPeriodpS = (chipId == kChipIdSym875) ? 50000 : 0; + controllerInfo->maxTransferOffset = (chipId == kChipIdSym875) ? 16 : 0; + controllerInfo->maxTransferWidth = 2; + + controllerInfo->maxCommandsPerController = 0; + controllerInfo->maxCommandsPerTarget = 0; + controllerInfo->maxCommandsPerLun = 0; + + controllerInfo->tagAllocationMethod = kTagAllocationPerController; + controllerInfo->maxTags = 128; + + controllerInfo->commandPrivateDataSize = sizeof( SRB ); + controllerInfo->targetPrivateDataSize = 0; + controllerInfo->lunPrivateDataSize = 0; + + controllerInfo->disableCancelCommands = false; + + return true; +} + + +/*-----------------------------------------------------------------------------* + * Script Initialization + * + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::Sym8xxInit() +{ + /* + * Perform PCI related initialization + */ + if ( Sym8xxInitPCI() == false ) + { + return false; + } + + /* + * Allocate/initialize driver resources + */ + if ( Sym8xxInitVars() == false ) + { + return false; + } + + /* + * Initialize the script engine registers + */ + if ( Sym8xxInitChip() == false ) + { + return false; + } + + /* + * Apply fixups to script and copy script to script engine's on-board ram + */ + if ( Sym8xxInitScript() == false ) + { + return false; + } + + getWorkLoop()->enableAllInterrupts(); + + /* + * Start script execution + */ + Sym8xxWriteRegs( chipBaseAddr, DSP, DSP_SIZE, (UInt32) &chipRamAddrPhys[Ent_select_phase] ); + + return true; +} + +/*-----------------------------------------------------------------------------* + * Script engine PCI initialization + * + * This routine determines the chip version/revision, enables the chip address + * ranges and allocates a virtual mapping to the script engine's registers and + * on-board ram. + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::Sym8xxInitPCI() +{ + unsigned long pciReg0, pciReg8; + UInt32 chipRev; + UInt32 n; + UInt32 ramReg; + OSString *matchEntry; + + + /* + * Determine the number of memory ranges for the PCI device. + * + * The hardware implementation may or may not have a ROM present + * accounting for the difference in the number of ranges. + */ + n = provider->getDeviceMemoryCount(); + if ( !( n == 3 || n == 4 ) ) + { + return false; + } + + /* + * Determine the hardware version. Check the deviceID and + * RevID in the PCI config regs. + */ + pciReg0 = provider->configRead32( 0x00 ); + pciReg8 = provider->configRead32( 0x08 ); + + chipId = pciReg0 >> 16; + chipRev = pciReg8 & 0xff; + +// IOLog( "SCSI(Symbios8xx): Chip Id = %04x Chip rev = %02x\n\r", chipId, chipRev ); + + + ioMapRegs = provider->mapDeviceMemoryWithRegister( 0x14 ); + if ( ioMapRegs == 0 ) + { + return false; + } + + switch ( chipId ) + { + case kChipIdSym875: + ramReg = 0x18; + break; + + case kChipIdSym895: + case kChipIdSym896: + case kChipIdSym1010: + ramReg = 0x1C; + break; + + default: + ramReg = 0x1C; + } + + ioMapRam = provider->mapDeviceMemoryWithRegister( ramReg ); + if ( ioMapRam == 0 ) + { + return false; + } + + /* + * Assume 80Mhz external clock rate for motherboard 875 implementations + * and 40Mhz for others. + */ + matchEntry = OSDynamicCast( OSString, getProperty( gIONameMatchedKey ) ); + if ( matchEntry == 0 ) + { + IOLog("SCSI(Sym8xx): Cannot obtain matching property.\n"); + return false; + } + + if ( matchEntry->isEqualTo( "apple53C8xx" ) == true ) + { + chipClockRate = CLK_80MHz; + } + else + { + chipClockRate = CLK_40MHz; + } + + /* + * BUS MASTER, MEM I/O Space, MEM WR & INV + */ + provider->configWrite32( 0x04, 0x16 ); + + /* + * set Latency to Max , cache 32 + */ + provider->configWrite32( 0x0C, 0x2008 ); + + /* + * get chip register block mapped into pci memory + */ + chipBaseAddr = (UInt8 *)ioMapRegs->getVirtualAddress(); + chipBaseAddrPhys = (UInt8 *)ioMapRegs->getPhysicalAddress(); + +// kprintf( "SCSI(Symbios8xx): Chip Base addr = %08x(p) %08x(v)\n\r", +// (UInt32)chipBaseAddrPhys, (UInt32)chipBaseAddr ); + + chipRamAddr = (UInt8 *)ioMapRam->getVirtualAddress(); + chipRamAddrPhys = (UInt8 *)ioMapRam->getPhysicalAddress(); + +// kprintf( "SCSI(Symbios8xx): Chip Ram addr = %08x(p) %08x(v)\n\r", +// (UInt32)chipRamAddrPhys, (UInt32)chipRamAddr ); + + /* + * Attach interrupt + */ + interruptEvent = IOInterruptEventSource::interruptEventSource( + (OSObject *) this, + (IOInterruptEventAction) &Sym8xxSCSIController::interruptOccurred, + (IOService *) provider, + (int) 0 ); + + if ( interruptEvent == NULL ) + { + return false; + } + + getWorkLoop()->addEventSource( interruptEvent ); + + interruptEvent->enable(); + + /* + * + */ + memoryCursor = IOBigMemoryCursor::withSpecification( 16*1024*1024, 0xffffffff ); + if ( memoryCursor == NULL ) + { + return false; + } + + + + return true; +} + +/*-----------------------------------------------------------------------------* + * This routine allocates/initializes shared memory for communication between + * the script and the driver. In addition other driver resources semaphores, + * queues are initialized here. + * + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::Sym8xxInitVars() +{ + UInt32 i; + + adapter = (AdapterInterface *)IOMallocContiguous( page_size, page_size, (IOPhysicalAddress *)&adapterPhys ); + if ( adapter == 0 ) + { + return false; + } + bzero( adapter, page_size ); + + /* + * We keep two copies of the Nexus pointer array. One contains physical addresses and + * is located in the script/driver shared storage. The other copy holds the corresponding + * virtual addresses to the active Nexus structures and is located in the drivers instance + * data. + * Both tables can be accessed through indirect pointers in the script/driver communication + * area. This is the preferred method to access these arrays. + */ + adapter->nexusPtrsVirt = (Nexus **)nexusArrayVirt; + adapter->nexusPtrsPhys = (Nexus **)adapter->nexusArrayPhys; + + for (i=0; i < MAX_SCSI_TAG; i ++ ) + { + adapter->nexusPtrsVirt[i] = (Nexus *) -1; + adapter->nexusPtrsPhys[i] = (Nexus *) -1; + } + + /* + * The script/driver communication area also contains a 16-entry table clock + * settings for each target. + */ + for (i=0; i < MAX_SCSI_TARGETS; i++ ) + { + adapter->targetClocks[i].scntl3Reg = SCNTL3_INIT_875; + } + + + return true; +} + + +/*-----------------------------------------------------------------------------* + * This routine makes a temporary copy of the script program, applies script fixups, + * initializes the script local data table at the top of the script image, and + * copies the modified script image to the script engine's on-board ram. + * + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::Sym8xxInitScript() +{ + UInt32 i; + UInt32 scriptPgm[sizeof(BSC_SCRIPT)/sizeof(UInt32)]; + + /* + * Make a copy of the script + */ + bcopy( BSC_SCRIPT, scriptPgm, sizeof(scriptPgm) ); + bzero( scriptPgm, R_ld_size ); + + /* + * Apply fixups to the script copy + */ + for ( i=0; i < sizeof(Rel_Patches)/sizeof(UInt32); i++ ) + { + scriptPgm[Rel_Patches[i]] += (UInt32)chipRamAddrPhys; + } + for ( i=0; i < sizeof(LABELPATCHES)/sizeof(UInt32); i++ ) + { + scriptPgm[LABELPATCHES[i]] += (UInt32)chipRamAddrPhys; + } + + /* + * Initialize the script working variables with pointers to the script/driver + * communications area. + */ + scriptPgm[R_ld_sched_mlbx_base_adr >> 2] = (UInt32)&adapterPhys->schedMailBox; + scriptPgm[R_ld_nexus_array_base >> 2] = (UInt32)&adapterPhys->nexusArrayPhys; + scriptPgm[R_ld_device_table_base_adr >> 2] = (UInt32)&adapterPhys->targetClocks; + + /* + * Load the script image into the script engine's on-board ram. + */ + Sym8xxLoadScript( (UInt32 *)scriptPgm, sizeof(scriptPgm)/sizeof(UInt32) ); + + return true; +} + + +/*-----------------------------------------------------------------------------* + * This routine transfers the script program image into the script engine's + * on-board ram + * + *-----------------------------------------------------------------------------*/ +void Sym8xxSCSIController::Sym8xxLoadScript( UInt32 *scriptPgm, UInt32 scriptWords ) +{ + UInt32 i; + volatile UInt32 *ramPtr = (volatile UInt32 *)chipRamAddr; + + for ( i = 0; i < scriptWords; i++ ) + { + ramPtr[i] = OSSwapHostToLittleInt32(scriptPgm[i]); + } +} + +/*-----------------------------------------------------------------------------* + * This routine initializes the script engine's register block. + * + *-----------------------------------------------------------------------------*/ +bool Sym8xxSCSIController::Sym8xxInitChip() +{ + UInt32 i; + + /* + * Reset the script engine + */ + Sym8xxWriteRegs( chipBaseAddr, ISTAT, ISTAT_SIZE, RST ); + IODelay( 25 ); + Sym8xxWriteRegs( chipBaseAddr, ISTAT, ISTAT_SIZE, ISTAT_INIT ); + + /* + * Load our canned register values into the script engine + */ + for ( i = 0; i < sizeof(Sym8xxInitRegs)/sizeof(ChipInitRegs); i++ ) + { + Sym8xxWriteRegs( chipBaseAddr, Sym8xxInitRegs[i].regNum, Sym8xxInitRegs[i].regSize, Sym8xxInitRegs[i].regValue ); + IODelay( 10 ); + } + + /* + * For hardware implementations that have a 40Mhz SCLK input, we enable the chip's on-board + * clock doubler to bring the clock rate upto 80Mhz which is required for Ultra-SCSI timings. + */ + if ( chipClockRate == CLK_40MHz ) + { + /* + * Clock doubler setup for 875 (rev 3 and above). + */ + /* set clock doubler enabler bit */ + Sym8xxWriteRegs( chipBaseAddr, STEST1, STEST1_SIZE, STEST1_INIT | DBLEN); + IODelay(30); + /* halt scsi clock */ + Sym8xxWriteRegs( chipBaseAddr, STEST3, STEST3_SIZE, STEST3_INIT | HSC ); + IODelay(10); + Sym8xxWriteRegs( chipBaseAddr, SCNTL3, SCNTL3_SIZE, SCNTL3_INIT_875); + IODelay(10); + /* set clock doubler select bit */ + Sym8xxWriteRegs( chipBaseAddr, STEST1, STEST1_SIZE, STEST1_INIT | DBLEN | DBLSEL); + IODelay(10); + /* clear hold on scsi clock */ + Sym8xxWriteRegs( chipBaseAddr, STEST3, STEST3_SIZE, STEST3_INIT); + } + + /* + * Set our host-adapter ID in the script engine's registers + */ + initiatorID = kHostAdapterSCSIId; + + if ( initiatorID > 7 ) + { + Sym8xxWriteRegs( chipBaseAddr, RESPID1, RESPID1_SIZE, 1 << (initiatorID-8)); + } + else + { + Sym8xxWriteRegs( chipBaseAddr, RESPID0, RESPID0_SIZE, 1 << initiatorID); + } + + Sym8xxWriteRegs( chipBaseAddr, SCID, SCID_SIZE, SCID_INIT | initiatorID ); + + return true; +} + + diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInterface.h b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInterface.h new file mode 100644 index 000000000..cc031da5f --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInterface.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Sym8xxInterface.h created by russb2 on Sat 30-May-1998 */ + +/* + * This file contains shared data structures between the script and the driver + * + */ + +#define MAX_SCSI_TARGETS 16 +#define MAX_SCSI_LUNS 8 +#define MAX_SCSI_TAG 256 +#define MIN_SCSI_TAG 128 + +#define kHostAdapterSCSIId 7 + +#define MAX_SCHED_MAILBOXES 256 +#define kMailBoxEmpty 0 +#define kMailBoxCancel 1 + +#define kSCSITimerIntervalMS 250 +#define kReqSenseTimeoutMS 3000 +#define kAbortTimeoutMS 3000 +#define kResetQuiesceDelayMS 3000 +#define kAbortScriptTimeoutMS 50 + +/* + * NEXUS DATA Structure + * + * The Nexus structure contains per-request information for the script/driver + * to execute a SCSI request. + */ + +typedef struct SGEntry +{ + UInt32 length; + UInt32 physAddr; +} SGEntry; + +/* + * Note: There are (3) SG List entries reserved for use by the driver, + * i.e SG Entries 0-1 and the last entry in the list. + */ +#define MAX_SGLIST_ENTRIES (64+3) + +/* + * This part of the Nexus structure contains the script engine clock registers to + * be used for this request. This information is also contained in the per-target + * table (AdapterInterface->targetClocks). + */ +typedef struct NexusParms +{ + UInt8 reserved_1; + UInt8 sxferReg; + UInt8 target; + UInt8 scntl3Reg; +} NexusParms; + +/* + * Pointers in the Nexus to our CDB/MsgOut data are in this format. + */ +typedef struct NexusData +{ + UInt32 length; + UInt32 ppData; +} NexusData; + +typedef struct Nexus Nexus; +struct Nexus +{ + NexusParms targetParms; + + SGEntry *ppSGList; + + NexusData msg; + NexusData cdb; + + UInt32 currentDataPtr; + UInt32 savedDataPtr; + + UInt8 tag; + UInt8 dataXferCalled; + UInt8 wideResidCount; + UInt8 reserved_1[1]; + + /* + * Data buffers for nexus + */ + UInt8 cdbData[16]; + UInt8 msgData[16]; + UInt32 sgNextIndex; + SGEntry sgListData[MAX_SGLIST_ENTRIES]; + +}; + +/* + * Abort Bdr Mailbox + * + * The mailbox is used to send an Abort or Bus Device Reset to a device + * This mailbox is 4 bytes long, and all the necessary information are + * contained in this mailbox (No nexus Data associated) + */ +typedef struct IOAbortBdrMailBox +{ + UInt8 identify; /* Identify msg (0xC0) if Abort A0 */ + UInt8 tag; /* Tag Message or Zero A1 */ + UInt8 scsi_id; /* SCSI id of the target of the request A2 */ + UInt8 message; /* Abort(0x06) or Bdr(0x0C) or AbortTag (0x0D) A3 */ +} IOAbortBdrMailBox; + +/* + * IODone mailbox + * + * This mailbox is used to signal the completion of an I/O to the driver. + */ + +typedef struct IODoneMailBox +{ + UInt8 nexus; /* Nexus of the completed I/O */ + UInt8 status; /* Status of the completed I/O */ + UInt8 zero; + UInt8 semaphore; /* If set, these contents are valid */ +} IODoneMailBox; + +/* + * Adapter Interface + * + * This structure contains the shared data between the script and + * the driver. The script's local variable table is updated to point to the + * physical addresses of the data in this control block. + */ + +typedef struct TargetClocks +{ + UInt8 reserved_1; + UInt8 sxferReg; + UInt8 reserved_2; + UInt8 scntl3Reg; +} TargetClocks; + +typedef struct AdapterInterface +{ + Nexus **nexusPtrsVirt; + Nexus **nexusPtrsPhys; + + Nexus *nexusArrayPhys[MAX_SCSI_TAG]; /* Active SRBs or -1 */ + Nexus *schedMailBox[MAX_SCHED_MAILBOXES]; /* New SRBs */ + TargetClocks targetClocks[MAX_SCSI_TARGETS]; + + UInt32 xferSWideInst[4]; + +} AdapterInterface; + +#define SCRIPT_VAR(x) ( *(UInt32 *)(chipRamAddr+(x)) ) diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxMisc.cpp b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxMisc.cpp new file mode 100644 index 000000000..17d179caf --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxMisc.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Sym8xxMisc.m created by russb2 on Sat 30-May-1998 */ + +#include "Sym8xxController.h" + +/* + * Miscellaneous IO worker routines + */ + +UInt32 Sym8xxSCSIController::Sym8xxReadRegs( volatile UInt8 *chipRegs, UInt32 regOffset, UInt32 regSize ) +{ + if ( regSize == 1 ) + { + return chipRegs[regOffset]; + } + if ( regSize == 2 ) + { + return OSSwapHostToLittleInt16( *(volatile u_int16_t *)&chipRegs[regOffset] ); + } + else if (regSize == 4 ) + { + return OSSwapHostToLittleInt32( *(volatile UInt32 *)&chipRegs[regOffset] ); + } + else + { + kprintf("SCSI(SymBios875): Sym8xxReadRegs incorrect regSize\n\r" ); + return 0; + } +} + +void Sym8xxSCSIController::Sym8xxWriteRegs( volatile UInt8 *chipRegs, UInt32 regOffset, UInt32 regSize, UInt32 regValue ) +{ + if ( regSize == 1 ) + { + chipRegs[regOffset] = regValue; + } + else if ( regSize == 2 ) + { + volatile u_int16_t *p = (volatile u_int16_t *)&chipRegs[regOffset]; + *p = OSSwapHostToLittleInt16( regValue ); + } + else if ( regSize == 4 ) + { + volatile UInt32 *p = (volatile UInt32 *)&chipRegs[regOffset]; + *p = OSSwapHostToLittleInt32( regValue ); + } + else + { + kprintf("SCSI(SymBios875): Sym8xxWriteRegs incorrect regSize\n\r" ); + } + eieio(); +} diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxRegs.h b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxRegs.h new file mode 100644 index 000000000..fd8102606 --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxRegs.h @@ -0,0 +1,556 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Copyright (c) 1998 Apple Computer Inc. + * + * Symbios8xx Controller. + * + */ + +/* SymBios 8xx register addressing definitions */ + +#define SCNTL0_SIZE 0x01 +#define SCNTL0 0x00000000 +#define SCNTL0_INIT 0xCA /* Scsi control 0 */ + /* ARB1 1 */ + /* ARB0 1 : Full arbitration */ + /* START 0 : Start sequence */ + /* WATN 0 : Select with ATN */ + /* EPC 1 : enable SCSI bus parity checking */ + /* RES */ + /* AAP 1 : Assert ATN on SCSI parity error */ + /* TRG 0 : Target mode. 720 is Initiator */ + +#define SCNTL1_SIZE 0x01 +#define SCNTL1 0x00000001 +#define SCNTL1_INIT 0x00 /* Scsi control 1 */ + /* EXC 0 : Extra clock cycle data setup (Sdtr) */ + /* ADB 0 : Assert SCSI data bus */ + /* DHP 0 : Target only Disable halt parity error */ + /* CON 0 : 820 connected to SCSI bus */ + /* RST 0 : Assert SCSI RST signal */ + /* AESP 0 : Force bad parity */ + /* IARB 0 : Immediate arbitration */ + /* SST 0 : Start SCSI transfer */ + +#define SCNTL1_SCSI_RST 0x08 /* force scsi reset in scntl1 reg */ + +#define SCNTL2_SIZE 0x01 +#define SCNTL2 0x00000002 +#define SCNTL2_INIT 0x00 /* Scsi control 2 */ + #define SDU 0x80 /* SDU 0 : SCSI Disconnect Unexpected */ + #define CHM 0x40 /* CHM 0 : Chained mode */ + #define SLPMD 0x40 /* SLPMD 0 : SLPAR Mode Bit */ + #define SLPHBEN 0x40 /* SLPHBEN : SLPAR High Byte Enable */ + #define WSS 0x08 /* WSS 0 : Wide Scsi Send */ + #define VUE0 0x40 /* VUE0 : Vendor Uniq Enhancements Bit 0 */ + #define VUE1 0x40 /* VUE1 : Vendor Uniq Enhancements Bit 1 */ + #define WSR 0x01 /* WSR 0 : Wide Scsi Receive */ + +#define SCNTL3_SIZE 0x01 +#define SCNTL3 0x00000003 +#define SCNTL3_INIT 0x03 /* Scsi control 3 for 40Mhz sys clock */ +#define SCNTL3_INIT_875 0x05 /* Scsi control 3 for 80Mhz sys clock */ +#define SCNTL3_INIT_875_ULTRA 0x95 /* Scsi control 3 for 80Mhz sys clock */ +#define SCNTL3_INIT_875_FAST 0x35 /* Scsi control 3 for 80Mhz sys clock */ +#define SCNTL3_INIT_875_SLOW 0x55 /* Scsi control 3 for 80Mhz sys clock */ + /* RES */ + #define SCF 0x70 /* SCF 0 : Sync clock conversion factor 0-2 */ + #define EWS 0x08 /* EWS 0 : Enable Wide SCSI (wdtr) */ + #define CCF 0x07 /* CCF 0 : Async clock conversion factor 0-2 */ + +#define SCID_SIZE 0x01 +#define SCID 0x00000004 +#define SCID_INIT 0x40 /* Scsi chip Id */ + /* RES */ + /* RRE 1 : Enable response to reselection */ + /* SRE 0 : Disable response to selection */ + /* RES */ + /* ID3 0 */ + /* ID2 0 */ + /* ID1 0 */ + /* ID0 0 : Encoded 53825 chip SCSI Id */ + +#define SXFER_SIZE 0x01 +#define SXFER 0x00000005 +#define SXFER_INIT 0x00 /* Scsi Transfer */ + /* TP2 0 */ + /* TP1 0 */ + /* TP0 0 : Scsi sync Transfer Period (4)(Sdtr) */ + /* RES */ + /* MO3 0 */ + /* MO2 0 */ + /* MO1 0 */ + /* MO0 0 : Max Scsi Sync ReqAck offset (async) (Sdtr) */ + +#define SDID_SIZE 0x01 +#define SDID 0x00000006 +#define SDID_INIT 0x00 /* Scsi destination Id */ + /* RES */ + /* RES */ + /* RES */ + /* RES */ + /* ID3 0 */ + /* ID2 0 */ + /* ID1 0 */ + /* ID0 0 : Encoded destination Scsi Id */ + +#define GPREG_SIZE 0x01 +#define GPREG 0x00000007 +#define GPREG_INIT 0x00 /* Read/write general purpose */ +#define GPIO3 0x08 /* GPIO bit 3 */ + /* RES */ + /* RES */ + /* RES */ + /* GPO 0 : General purpose output */ + /* GPI3 0 */ + /* GPI2 0 */ + /* GPI1 0 */ + /* GPI0 0 : General purpose inputs */ + +#define SFBR_SIZE 0x01 +#define SFBR 0x00000008 +#define SFBR_INIT 0x00 + /* SCSI First Byte Received */ + +#define SOCL_SIZE 0x01 +#define SOCL 0x00000009 +#define SOCL_INIT 0x00 + #define SREQ 0x80 /* REQ 0 : Assert SCSI REQ signal */ + #define SACK 0x40 /* ACK 0 : ACK */ + #define SBSY 0x20 /* BSY 0 : BSY */ + #define SSEL 0x10 /* SEL 0 : SEL */ + #define SATN 0x08 /* ATN 0 : ATN */ + #define SMSG 0x04 /* MSG 0 : MSG */ + #define SC_D 0x02 /* C/D 0 : C/D */ + #define SI_O 0x01 /* I/O 0 : I/O */ + +#define SSID_SIZE 0x01 +#define SSID 0x0000000A /* Read Only */ + /* VAL Scsi Valid Bit */ + /* RES */ + /* RES */ + /* RES */ + /* ID3 */ + /* ID2 */ + /* ID1 */ + /* ID0 Encoded Destination Scsi Id */ + +#define SBCL_SIZE 0x01 +#define SBCL 0x0000000B /* Scsi Bus Control Lines Read only */ + /* REQ */ + /* ACK */ + /* BSY */ + /* SEL */ + /* ATN */ + /* MSG */ + /* C/D */ + /* I/O */ + +#define DSTAT_SIZE 0x01 +#define DSTAT 0x0000000C /* DMA status Read only */ + #define DFE 0x80 /* DSTAT DMA FIFO Empty */ + #define MDPE 0x40 /* Master Data Parity Error */ + #define BF 0x20 /* Bus Fault */ + #define DABRT 0x10 /* Abort occurred */ + #define SSI 0x08 /* Script Step Interrupt */ + #define SIR 0x04 /* Script Interrupt Inst Received */ +#ifdef notdef + #define WTD 0x02 /* was watchdog timer, now reserved */ +#endif /* notdef */ + #define IID 0x01 /* Illegal Instruction Detected */ + +#define SSTAT0_SIZE 0x01 +#define SSTAT0 0x0000000D /* SCSI status zero Read only */ + #define ILF 0x80 /* ILF SIDL least significant byte full */ + #define ORF 0x40 /* ORF SODR least significant byte full */ + #define OLF 0x20 /* OLF SODL least significant byte full */ + #define AIP 0x10 /* AIP Arbitration in progress */ + #define LOA 0x08 /* LOA Lost arbitration */ + #define WOA 0x04 /* WOA Won arbitration */ + #define RSTB 0x02 /* RST Scsi reset signal */ + #define SDP0 0x01 /* SDP0 Scsi SDP0 parity signal */ + +#define SSTAT1_SIZE 0x01 +#define SSTAT1 0x0000000E /* SCSI status one Read only */ + /* FF3-0 Bytes or word in the Scsi FIFO */ + /* SDP Latched Scsi parity */ + /* MSG Scsi phase status */ + /* C/D */ + /* I/O */ + +#define SSTAT2_SIZE 0x01 +#define SSTAT2 0x0000000F /* Scsi status two Read only */ + #define ILF1 0x80 /* ILF1 SIDL most significant byte full */ + #define ORF1 0x40 /* ORF1 SODR " " " " */ + #define OLF1 0x20 /* OLF1 SODL " " " " */ + /* RES */ + #define SPL1 0x08 /* SPL1 Latched Scsi parity for SIDL15-8 */ + #define FF4 0x04 /* FIFO Flags Bit 4 */ + #define LDSC 0x02 /* LDSC Last disconnect */ + #define SDP1 0x01 /* SDP1 Scsi SDP1 Signal */ + +#define DSA_SIZE 0x04 +#define DSA 0x00000010 +#define DSA_INIT 0x00000000 /* Data structure address */ + +#define ISTAT_SIZE 0x01 +#define ISTAT 0x00000014 +#define ISTAT_INIT 0x00 /* Interrupt Status */ + #define ABRT 0x80 /* Abort Operation */ + #define RST 0x40 /* Software reset */ + #define SIGP 0x20 /* Signal process */ + #define SEM 0x10 /* Semaphore */ + #define ISTAT_CON 0X08 /* Connected to target. */ + #define INTF 0x04 /* Interrupt on the fly */ + #define SIP 0x02 /* SCSI Interrupt Pending */ + #define DIP 0x01 /* DMA Interrupt Pending */ + +#define CTEST0_SIZE 0x01 +#define CTEST0 0x00000018 +#define CTEST0_INIT 0x00 /* Chip test zero (now general purpose, rph) */ + +#define CTEST1_SIZE 0x01 +#define CTEST1 0x00000019 /* Chip test one Read only */ + /* FMT3-0 0 : Byte empty in DMA FIFO */ + /* FFL3-0 0 : Byte full in DMA FIFO */ + +#define CTEST2_SIZE 0x01 +#define CTEST2 0x0000001A /* Chip test two Read only */ + /* DDIR Data transfer direction (1 : Scsi bus -> host) */ + /* SIGP Signal process (Clear SIGP ISTAT when read) */ + /* CIO (read-only, indicates chip configured as I/O space) */ + /* CM (read-only, indicates configured as memory space) */ + /* RES */ + /* TEOP Scsi true end of process */ + /* DREQ Data request status */ + /* DACK Data acknowledge status */ + +#define CTEST3_SIZE 0x01 +#define CTEST3 0x0000001B +#define CTEST3_INIT 0x04 /* Chip test three */ +#define CTEST3_INIT_A 0x05 /* Chip test three for 'A' part */ + + #define VERS 0xf0 /* V3-0 0 : Chip revision level */ + #define FLF 0x08 /* FLF 0 : Flush DMA Fifo */ + #define CLF 0x04 /* CLF 1 : Clear DMA FIFO */ + #define FM 0x02 /* FM 0 : Fetch pin mode */ + #define WRIE 0x01 /* WRIE 1 : Write and Invalidate Enable, for 825A only!!! */ + +#define TEMP_SIZE 0x04 +#define TEMP 0x0000001C +#define TEMP_INIT 0x00000000 /* Tempory stack */ + +#define DFIFO_SIZE 0x01 +#define DFIFO 0x00000020 +#define DFIFO_INIT 0x00 /* DMA FIFO */ + /* upper bit used for 825 'A' part when using large fifo */ + /* BO6-0 0: Byte offset counter */ + +#define CTEST4_SIZE 0x01 +#define CTEST4 0x00000021 /* Chip test four */ +// #define CTEST4_INIT 0x80 /* Chip test four DISABLE BURST!! */ +#define CTEST4_INIT 0x00 /* Chip test four */ + /* BDIS 0 : set for Burst Disable, reset allows burst on data moves */ + /* ZMOD High impedance mode */ + /* ZSD Scsi high impedance mode */ + /* SRTM Shadow register test mode */ +/* NOT for bandit!!!! yes for NEW rev of Dumbarton LATER on, not initial!!! */ + /* MPEE 0 : Master Parity Error Enable Do we want this set???? rph */ + /* FBL2-0 Fifo byte control */ + +#define CTEST5_SIZE 0x01 +#define CTEST5 0x00000022 +#define CTEST5_INIT 0x00 /* Chip test five */ +#define CTEST5_INIT_A 0x00 /* Chip test five 'A' part, upper burst OFF */ +#define CTEST5_INIT_A_revB 0x24 /* Chip test five 'A' part, upper burst OFF + * also Enable 536 byte fifo */ + #define ADCK 0x80 /* ADCK 0 : Clock address incrementor */ + #define BBCK 0x40 /* BBCK 0 : Clock byte counter */ + #define DFS 0x20 /* DFS 0 : fifo size - 0=88 1=536 bytes */ + #define MASR 0x10 /* MASR 0 : Master control for set reset pulses */ + #define DDIR 0x08 /* DDIR 0 : DMA direction */ + #define BL2 0x04 /* BL2 0 : see DMODE bits 6,7 */ + #define BO89 0x03 /* BO89 0 : upper bits of DFIFO count */ + +#define CTEST6_SIZE 0x01 +#define CTEST6 0x00000023 +#define CTEST6_INIT 0x00 /* chip test six */ + /* 7-0 0 : DMA Fifo */ + +#define DBC_SIZE 0x04 +#define DBC 0x00000024 +#define DBC_INIT 0x000000 /* DMA Byte Counter */ + +#define DCMD_SIZE 0x01 +#define DCMD 0x00000027 +#define DCMD_INIT 0x00 /* DMA command */ + +#define DNAD_SIZE 0x04 +#define DNAD 0x00000028 +#define DNAD_INIT 0x00000000 /* DMA Next Data Address */ + +#define DSP_SIZE 0x04 +#define DSP 0x0000002C +#define DSP_INIT 0x00000000 /* DMA script pointer */ + +#define DSPS_SIZE 0x04 +#define DSPS 0x00000030 +#define DSPS_INIT 0x00000000 /* DMA SCRIPTS Pointer Save */ + +#define SCRATCHA_SIZE 0x04 +#define SCRATCHA 0x00000034 +#define SCRATCHA0 0x00000034 +#define SCRATCHA1 0x00000035 +#define SCRATCHA2 0x00000036 +#define SCRATCHA3 0x00000037 +#define SCRATCHA_INIT 0x04030201 /* general purpose register */ + +#define DMODE_SIZE 0x01 +#define DMODE 0x00000038 +/* 825 bug!!!!! 8 is max!!!!!!! rph 8-23-94 + */ +#define DMODE_INIT 0x82 /* DMA mode 8 burst xfers + instruc fetch */ +#define DMODE_INIT_A 0x0A /* DMA mode 32 burst xfers + instruc fetch */ + /* BL1 1 : Burst length, burst size is '8' transfers (4 bytes per) */ + /* BL0 0 : Burst length */ + /* SIOM 0 : Source I/O-Memory Enable (Memory space is default) */ + /* DIOM 0 : Destination I/O-Memory Enable (Memory space is default) */ + /* ER 1 : Enable Read Line Command, set for 825'A' part */ + /* ERM 0 : */ + /* BOF 1 : Burst Op Code Fetch Enable, only for 825!!! rph */ + /* MAN 0 : Manual start mode (leave 0 for auto-start with DSP write */ + +#define DIEN_SIZE 0x01 +#define DIEN 0x00000039 +#define DIEN_INIT 0x7D /* No mask on DMA interrupt */ + /* RES */ + /* MDPE 1 : Master Data Parity Error */ + /* BF 1 : Bus fault */ + /* ABRT 1 : Aborted */ + /* SSI 1 : Script step interrupt */ + /* SIR 1 : Script interrupt instruction received */ + /* RES */ + /* IID 1 : Illegal instruction detected */ + +#define DWT_SIZE 0x01 +#define DWT 0x0000003A +#define DWT_INIT 0xD0 /* DMA watchdog timer to 0xD0*32*BCLK ns*/ + +#define DCNTL_SIZE 0x01 +#define DCNTL 0x0000003B +#define DCNTL_INIT 0x01 /* DMA Control register */ +#define DCNTL_INIT_A 0xA1 /* DMA Control register, 'A' part */ + /* CLE 7: Cache Line Size Enable for 'A' part */ + /* PFF 6: pre-fetch flush bit for 'A' part */ + /* PFEN 5: pre-fetch Enable bit for 'A' part */ + /* RES */ + /* RES */ +#define SSM 0x10 /* 0 : Single step mode */ + /* IRQM 0 : HW driver type for IRQ pin, default is open drain, ask HW rph*/ +#define STD 0x04 /* 0 : start DMA operation */ + /* IRQ 1: IRQ disable for 'A' part */ + /* COM 1 : No Compatibility 700 */ + +#define ADDER_SIZE 0x04 +#define ADDER 0x0000003C /* Adder sum output Read only */ + +#define SIEN_SIZE 0x02 +#define SIEN 0x00000040 +#define SIEN_INIT_RST_OFF 0x048D /* SCSI Interrupt enable SIEN0-1 rph */ +#define SIEN_INIT 0x048F /* SCSI Interrupt enable SIEN0-1 rph */ +/* SIEN0 */ + /* M/A 1 : Scsi phase mismatch */ + /* CMP 0 : Function complete */ + /* SEL 0 : Selected */ + /* RSL 0 : Reselected */ + /* SGE 1 : Scsi Gross error */ + /* UDC 1 : Unexpected disconnect */ + /* RST 1 : Scsi Reset condition */ + /* PAR 1 : Scsi Parity error */ + + /* RES */ + /* RES */ + /* RES */ + /* RES */ + /* RES */ + /* STO 1 : (Re)Selection timeout */ + /* GEM 0 : General purpose timeout*/ + /* HTH 0 : Handshake timeout */ + +#define SIST_SIZE 0x02 +#define SIST 0x00000042 /* Scsi interrupt status Read only */ + /* idem SIEN reg */ + #define STO 0x0400 + #define GEN 0x0200 + #define HTH 0x0100 + + #define MA 0x0080 + #define CMP 0x0040 + #define SEL 0x0020 + #define RSL 0x0010 + #define SGE 0x0008 + #define UDC 0x0004 + #define RSTI 0x0002 + #define PAR 0x0001 + +#define SLPAR_SIZE 0x01 +#define SLPAR 0x00000044 +#define SLPAR_INIT 0x00 /* SCSI longitudinal parity */ + +#define SWIDE_SIZE 0x01 +#define SWIDE 0x00000045 /* Scsi wide residue data Read only */ + +#define MACNTL_SIZE 0x01 +#define MACNTL 0x00000046 +#define MACNTL_INIT 0x00 /* memory access control */ + /* TYP3-0 : Chip Type (read-only) */ + /* DataWr 0 : Data write Far memory */ + /* DataRd 0 : Data read far memory */ + /* Pointer to script fetch 0 : far memory */ + /* Script fetch 0 : far memory */ + +#define GPCNTL_SIZE 0x01 +#define GPCNTL 0x00000047 +#define GPCNTL_INIT 0x0F /* General purpose control Cf appendum ?? */ +// #define GPCNTL_INIT 0xCF /* General purpose control Cf appendum ?? */ + /* ME : 0 Master Enable */ + /* FE : 0 Fetch Enable */ + /* RES */ + /* GPI/O_en4 : 0 GPREG input or output */ + /* GPI/O_en3 : 1 */ + /* GPI/O_en2 : 1 */ + /* GPI/O_en1 : 1 */ + /* GPI/O_en0 : 1 */ + +#define STIME0_SIZE 0x01 +#define STIME0 0x00000048 +#define STIME0_INIT 0x0C /* Scsi timer register 0 */ + /* HTH3 */ + /* HTH2 */ + /* HTH1 0 */ + /* HTH0 0 : Handshake timer period (disabled) */ + /* SEL3 1 */ + /* SEL2 1 */ + /* SEL1 0 */ + /* SEL0 0 : Selection timeout period (204.8ms) */ + +#define STIME1_SIZE 0x01 +#define STIME1 0x00000049 +#define STIME1_INIT 0x00 /* Scsi timer register one */ + /* RES */ + /* RES */ + /* RES */ + /* RES */ + /* GEN3 0 */ + /* GEN2 0 */ + /* GEN1 0 */ + /* GEN0 0 : General purpose timer period (disabled) */ + +#define RESPID0_SIZE 0x01 +#define RESPID0 0x0000004A +#define RESPID0_INIT 0x00 /* Response Id zero */ + /* ID7 - ID0 */ + +#define RESPID1_SIZE 0x01 +#define RESPID1 0x0000004B +#define RESPID1_INIT 0x00 /* Response ID one */ + /* ID15 - ID8 */ + +#define STEST0_SIZE 0x01 +#define STEST0 0x0000004C /* Scsi test register zero Read only */ + /* RES */ + /* RES */ + /* RES */ + /* RES */ + /* SLT Selection response Logic test */ + /* ART Arbitration priority encoder test */ + /* SOZ Scsi synchronous offset zero */ + /* SOM Scsi synchronous offset maximum */ + +#define STEST1_SIZE 0x01 +#define STEST1 0x0000004D /* Scsi test register one Read/Write */ +#define STEST1_INIT 0x00 + #define SCLK 0x80 /* SCLK 0 : 1 = Use PCI Clock 0 = Use SCLK input */ + #define SISIO 0x40 /* SISIO 0 : SCSI Isolation Mode */ + /* 0 : */ + /* 0 : */ + #define DBLEN 0x08 /* DBLEN 0 : SCLK Doubler Enable */ + #define DBLSEL 0x04 /* DBLSEL 0 : SCLK Doubler Select */ + /* 0 : */ + /* 0 : */ + +#define STEST2_SIZE 0x01 +#define STEST2 0x0000004E +#define STEST2_INIT 0x00 /* Scsi Test register two */ +#define STEST2_DIFF_INIT 0x20 /* Scsi Test register two */ + #define SCE 0x80 /* SCE 0 : Scsi control enable */ + #define ROF 0x40 /* ROF 0 : Reset Scsi offset */ + #define DIF 0x20 /* DIF 0/1 : SCSI differential mode, set if we detect differential card */ + #define SLB 0x10 /* SLB 0 : Scsi loopback mode */ + #define SZM 0x08 /* SZM 0 : SCSI high impedance mode */ + #define AWS 0x04 /* AWS 0 : Always wide SCSI */ + #define EXT 0x02 /* EXT 0 : Extend REQ/ACK filtering NEVER want SET for 'fast'!!! */ + #define LOW 0x01 /* LOW 0 : Scsi low level mode */ + +#define STEST3_SIZE 0x01 +#define STEST3 0x0000004F +#define STEST3_INIT 0x92 /* Scsi test register 3 */ + #define EAN 0x80 /* EAN 1 : Enable active negation */ + #define STR 0x40 /* STR 0 : Scsi FIFO test read */ + #define HSC 0x20 /* HSC 0 : Halt Scsi Clock */ + #define DSI 0x10 /* DSI 1 : Disable single initiator response */ + /* RES */ + #define TTM 0x04 /* TTM 0 : Timer test mode */ + #define CSF 0x02 /* CSF 1 : Clear SCSI FIFO */ + #define STW 0x01 /* STW 0 : SCSI FIFO test write */ + +#define SSIDL_SIZE 0x02 +#define SSIDL 0x00000050 /* SCSI input data latch Read only */ + +#define SODL_SIZE 0x02 +#define SODL 0x00000054 +#define SODL_INIT 0x0000 /* SCSI Output Data Latch */ + +#define SBDL_SIZE 0x02 +#define SBDL 0x00000058 /* SCSI bus data line Read only */ + +#define SCRATCHB_SIZE 0x04 +#define SCRATCHB 0x0000005C +#define SCRATCHB0 0x0000005C +#define SCRATCHB1 0x0000005D +#define SCRATCHB2 0x0000005E +#define SCRATCHB3 0x0000005F +#define SCRATCHB_INIT 0x00000000 /* general purpose register */ + +/* ************************* */ + +/* Miscellaneous defines */ +#define CLK_40MHz 40 +#define CLK_80MHz 80 +#define kResetRecoveryTimeMS 5000 + +#define kChipIdSym875 0x000f +#define kChipIdSym895 0x000c +#define kChipIdSym896 0x000b +#define kChipIdSym1010 0x0020 diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxSRB.h b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxSRB.h new file mode 100644 index 000000000..b8cd76291 --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxSRB.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* Sym8xxSRB.h created by russb2 on Sat 30-May-1998 */ + +/* + * The SRB is the main per-request data structure used by the driver. + * + * It contains an embedded Nexus structure which is used as a per-request + * communication area between the script and the driver. + */ + +typedef struct SRB SRB; + +struct SRB +{ + SRB *srbPhys; + + IOSCSIParallelCommand *scsiCommand; + + UInt32 srbCDBFlags; + + IOReturn srbReturnCode; + SCSIAdapterStatus srbAdapterStatus; + UInt8 srbSCSIStatus; + + UInt8 srbMsgResid; + UInt8 srbMsgLength; + + UInt8 target; + UInt8 lun; + UInt8 tag; + + UInt8 negotiateSDTRComplete; + UInt8 negotiateWDTRComplete; + + UInt32 directionMask; + + IOMemoryDescriptor *xferDesc; + UInt32 xferOffset; + UInt32 xferOffsetPrev; + UInt32 xferCount; + UInt32 xferDone; + + Nexus nexus; + +}; + diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.h b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.h new file mode 100644 index 000000000..5e2fc0bed --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.h @@ -0,0 +1,474 @@ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +typedef unsigned long ULONG; + +#ifdef INCL_SCRIPT_TEXT + +ULONG BSC_SCRIPT[] = { + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x00000000L, + 0x80000000L, 0x0000004CL, + 0x80000000L, 0x0000000FL, + 0xE1340004L, 0x00000028L, + 0x60000400L, 0x00000000L, + 0x79340000L, 0x00000000L, + 0x79350000L, 0x00000000L, + 0x79340000L, 0x00000000L, + 0x79350000L, 0x00000000L, + 0xE0340004L, 0x0000009CL, + 0xE1100004L, 0x00000024L, + 0xF1100004L, 0x00000000L, + 0x72100000L, 0x00000000L, + 0x980C00FFL, 0x00000008L, + 0xE0100004L, 0x0000002CL, + 0x90080000L, 0x00000000L, + 0xE15C0004L, 0x00000020L, + 0x60000400L, 0x00000000L, + 0x795C0000L, 0x00000000L, + 0x795C0000L, 0x00000000L, + 0xE05C0004L, 0x000000F4L, + 0xE1100004L, 0x00000034L, + 0xF15C0004L, 0x00000000L, + 0x725D0000L, 0x00000000L, + 0x6A050000L, 0x00000000L, + 0x725F0000L, 0x00000000L, + 0x6A030000L, 0x00000000L, + 0x90080000L, 0x00000000L, + 0x7A1A0000L, 0x00000000L, + 0xE1340004L, 0x00000010L, + 0x72340000L, 0x00000000L, + 0x80840000L, 0x000005F0L, + 0xE1340004L, 0x0000000CL, + 0x60000400L, 0x00000000L, + 0x79340000L, 0x00000000L, + 0x79350000L, 0x00000000L, + 0x79340000L, 0x00000000L, + 0x79350000L, 0x00000000L, + 0xE0340004L, 0x0000018CL, + 0xE0340004L, 0x0000025CL, + 0xE1100004L, 0x00000018L, + 0xF1100004L, 0x00000000L, + 0xE0100004L, 0x0000002CL, + 0xE1340004L, 0x0000002CL, + 0x72340000L, 0x00000000L, + 0x808C0001L, 0x00000098L, + 0x72B50000L, 0x00000000L, + 0x72B60000L, 0x00000000L, + 0x72B70000L, 0x00000000L, + 0x808C0000L, 0x00000458L, + 0x785C0800L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0xE15C0004L, 0x00000004L, + 0xF15C0001L, 0x00000020L, + 0xE05C0004L, 0x00000028L, + 0xE1100004L, 0x0000002CL, + 0xF15E0001L, 0x00000002L, + 0x725E0000L, 0x00000000L, + 0x6A5C0000L, 0x00000000L, + 0xE05C0001L, 0x00000020L, + 0x88880000L, 0xFFFFFE98L, + 0xE1100004L, 0x0000002CL, + 0xF05D0001L, 0x00000001L, + 0xF05F0001L, 0x00000003L, + 0x47000000L, 0x000003D0L, + 0xE1340004L, 0x00000004L, + 0xE1100004L, 0x00000018L, + 0xF0340004L, 0x00000000L, + 0xE15C0001L, 0x0000000CL, + 0x7E5C0100L, 0x00000000L, + 0xE05C0001L, 0x0000000CL, + 0xE15C0001L, 0x0000002CL, + 0x725C0000L, 0x00000000L, + 0x808C0001L, 0xFFFFFE90L, + 0xE1100004L, 0x0000002CL, + 0x828B0000L, 0x00000088L, + 0x808A0000L, 0x000000A8L, + 0x868A0000L, 0x00000020L, + 0x818A0000L, 0x000000E0L, + 0x838A0000L, 0x00000108L, + 0x878A0000L, 0x00000120L, + 0x98080000L, 0x00000000L, + 0x785C0600L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x1E000000L, 0x00000008L, + 0x80880000L, 0xFFFFFFA0L, + 0x58000008L, 0x00000000L, + 0x60000040L, 0x00000000L, + 0x868B0000L, 0xFFFFFFC8L, + 0x87820000L, 0xFFFFFF80L, + 0x0F000001L, 0x00000039L, + 0x60000040L, 0x00000000L, + 0x80880000L, 0xFFFFFFC8L, + 0x785C0200L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x60000008L, 0x00000000L, + 0x1A000000L, 0x00000010L, + 0x80880000L, 0xFFFFFF40L, + 0x785C0000L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x88880000L, 0x00000008L, + 0x80880000L, 0xFFFFFF20L, + 0xF1340004L, 0x00000004L, + 0xE0340004L, 0x00000394L, + 0x7835FF00L, 0x00000000L, + 0xF0350001L, 0x00000021L, + 0x80080000L, 0x00000333L, + 0x785C0100L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x88880000L, 0xFFFFFFC0L, + 0x81830000L, 0xFFFFFED8L, + 0x01000001L, 0x00000008L, + 0x80880000L, 0xFFFFFFE8L, + 0x785C0300L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x0B000001L, 0x00000008L, + 0x80880000L, 0xFFFFFEA8L, + 0x785C0700L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x0F000001L, 0x00000040L, + 0x808C0000L, 0x000000C0L, + 0x808C0002L, 0x000001A8L, + 0x808C0004L, 0x00000148L, + 0x808C0023L, 0x00000038L, + 0x808C0003L, 0x000001B0L, + 0x808C0001L, 0x00000058L, + 0x808C0007L, 0x00000008L, + 0x98080000L, 0x00000002L, + 0x98080000L, 0x0000000CL, + 0x60000040L, 0x00000000L, + 0x80880000L, 0xFFFFFE38L, + 0x60000040L, 0x00000000L, + 0x0F000001L, 0x00000041L, + 0x6A5E0000L, 0x00000000L, + 0xF05E0001L, 0x00000022L, + 0x60000040L, 0x00000000L, + 0x80880000L, 0xFFFFFE08L, + 0x60000040L, 0x00000000L, + 0x0F000001L, 0x00000041L, + 0x60000040L, 0x00000000L, + 0x0F000001L, 0x00000042L, + 0x60000040L, 0x00000000L, + 0x808C0001L, 0x00000140L, + 0x808C0003L, 0x00000148L, + 0x98080000L, 0x00000003L, + 0x785C0C00L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x7C027F00L, 0x00000000L, + 0x60000040L, 0x00000000L, + 0x48000000L, 0x00000000L, + 0xE1340004L, 0x00000014L, + 0x72370000L, 0x00000000L, + 0x80840000L, 0xFFFFFFE8L, + 0xE1340001L, 0x00000028L, + 0xE15C0001L, 0x00000008L, + 0x725C0000L, 0x00000000L, + 0x6A350000L, 0x00000000L, + 0x78360000L, 0x00000000L, + 0x7837FF00L, 0x00000000L, + 0xE0340004L, 0x00000014L, + 0x72350000L, 0x00000000L, + 0x981CC100L, 0x000000FFL, + 0x808CC100L, 0xFFFFFBC8L, + 0x98080000L, 0x00000001L, + 0xE15C0001L, 0x00000030L, + 0x725C0000L, 0x00000000L, + 0x808C0009L, 0x00000010L, + 0x785C0D00L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x785FFF00L, 0x00000000L, + 0xE05F0001L, 0x0000002BL, + 0x7C027F00L, 0x00000000L, + 0x60000040L, 0x00000000L, + 0x48000000L, 0x00000000L, + 0x80880000L, 0xFFFFFB68L, + 0x785C0E00L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x60000040L, 0x00000000L, + 0x80880000L, 0xFFFFFCB8L, + 0x785C0F00L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x60000040L, 0x00000000L, + 0x80880000L, 0xFFFFFC98L, + 0x0F000002L, 0x00000043L, + 0x98080000L, 0x0000000DL, + 0x0F000001L, 0x00000043L, + 0x98080000L, 0x0000000EL, + 0x7C00DF00L, 0x00000000L, + 0x7A1A0000L, 0x00000000L, + 0x785C0900L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x785FFF00L, 0x00000000L, + 0xE05F0001L, 0x0000002BL, + 0x54000000L, 0xFFFFFAD0L, + 0x720A0000L, 0x00000000L, + 0x980C7F00L, 0x00000007L, + 0x6C5C0F00L, 0x00000000L, + 0xE05C0001L, 0x00000020L, + 0x88880000L, 0xFFFFFA48L, + 0x9F030000L, 0x00000005L, + 0x0F000001L, 0x00000040L, + 0x60000040L, 0x00000000L, + 0x878B0000L, 0x00000048L, + 0x6C340700L, 0x00000000L, + 0xE15C0001L, 0x00000020L, + 0x60000400L, 0x00000000L, + 0x715C0000L, 0x00000000L, + 0x695C0000L, 0x00000000L, + 0x715C0000L, 0x00000000L, + 0x7AB40000L, 0x00000000L, + 0xE0340001L, 0x00000028L, + 0x80880000L, 0x00000030L, + 0x0F000001L, 0x00000040L, + 0x808C0004L, 0xFFFFFE70L, + 0x60000040L, 0x00000000L, + 0x80840020L, 0xFFFFFB90L, + 0x0F000001L, 0x00000028L, + 0x60000040L, 0x00000000L, + 0x785F0000L, 0x00000000L, + 0xE05F0001L, 0x0000002BL, + 0x88880000L, 0xFFFFF930L, + 0x80880000L, 0xFFFFFB60L, + 0x785C0B00L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x785FFF00L, 0x00000000L, + 0xE05F0001L, 0x0000002BL, + 0xE15E0001L, 0x00000012L, + 0xE05E0001L, 0x00000762L, + 0x45000000L, 0xFFFFFEB0L, + 0x72350000L, 0x00000000L, + 0x80840000L, 0x00000038L, + 0x72370000L, 0x00000000L, + 0x6A350000L, 0x00000000L, + 0xE0340002L, 0x00000038L, + 0x78020000L, 0x00000000L, + 0x0E000002L, 0x00000038L, + 0x48000000L, 0x00000000L, + 0x98080000L, 0x00000009L, + 0x72350000L, 0x00000000L, + 0x6A360000L, 0x00000000L, + 0x78352000L, 0x00000000L, + 0xE0340004L, 0x00000038L, + 0x78020000L, 0x00000000L, + 0x0E000004L, 0x00000038L, + 0x48000000L, 0x00000000L, + 0x98080000L, 0x00000009L, + 0x785C0A00L, 0x00000000L, + 0xE05C0001L, 0x00000030L, + 0x74140800L, 0x00000000L, + 0x980C0000L, 0x0000000AL, + 0x58000008L, 0x00000000L, + 0xE1100004L, 0x0000002CL, + 0x60000040L, 0x00000000L, + 0x868B0000L, 0x000000A8L, + 0x838A0000L, 0x00000030L, + 0x878A0000L, 0x00000038L, + 0x818A0000L, 0x00000040L, + 0x7834AD00L, 0x00000000L, + 0x808A0000L, 0x00000040L, + 0x828A0000L, 0x00000058L, + 0x98080000L, 0x00000000L, + 0x0B000001L, 0x00000038L, + 0x80880000L, 0xFFFFFFA8L, + 0x0F000001L, 0x00000038L, + 0x80880000L, 0xFFFFFF98L, + 0x09000001L, 0x00000038L, + 0x80880000L, 0xFFFFFF88L, + 0x7B347300L, 0x00000000L, + 0xE0340001L, 0x00000038L, + 0x08000001L, 0x00000038L, + 0x80880000L, 0xFFFFFF68L, + 0x78340000L, 0x00000000L, + 0xE0340001L, 0x00000038L, + 0x0A000001L, 0x00000038L, + 0x80880000L, 0xFFFFFF48L, + 0x78020000L, 0x00000000L, + 0x0E000001L, 0x00000000L, + 0xE1340004L, 0x00000004L, + 0xE0340004L, 0x00000000L, + 0x48000000L, 0x00000000L, + 0x98080000L, 0x0000000AL + +}; + +#endif /* INCL_SCRIPT_TEXT */ + +#define Rel_Count 76 + +#ifdef INCL_SCRIPT_TEXT + +ULONG Rel_Patches[Rel_Count] = { + 0x0000013DL, + 0x0000014FL, + 0x0000004BL, + 0x000001D5L, + 0x00000051L, + 0x00000099L, + 0x0000009DL, + 0x0000003BL, + 0x000000FFL, + 0x00000119L, + 0x00000125L, + 0x00000129L, + 0x0000017FL, + 0x00000183L, + 0x000001A1L, + 0x000001B9L, + 0x0000002DL, + 0x00000065L, + 0x00000067L, + 0x0000007FL, + 0x0000008BL, + 0x0000009FL, + 0x000000A5L, + 0x00000207L, + 0x00000025L, + 0x00000017L, + 0x0000007DL, + 0x00000143L, + 0x00000165L, + 0x00000191L, + 0x000001B5L, + 0x000001C1L, + 0x000001C7L, + 0x000001D3L, + 0x00000077L, + 0x000000B7L, + 0x000000CDL, + 0x000000D7L, + 0x000000E9L, + 0x000000F5L, + 0x000000FDL, + 0x00000135L, + 0x00000159L, + 0x00000161L, + 0x00000171L, + 0x00000179L, + 0x0000018DL, + 0x000001CFL, + 0x000001FFL, + 0x00000061L, + 0x00000095L, + 0x000000C5L, + 0x000001E3L, + 0x000001E7L, + 0x000001F3L, + 0x000001F7L, + 0x0000021BL, + 0x0000021FL, + 0x00000223L, + 0x00000229L, + 0x0000022BL, + 0x00000231L, + 0x00000233L, + 0x00000031L, + 0x00000087L, + 0x0000019BL, + 0x000001A9L, + 0x00000013L, + 0x000000EFL, + 0x000000F7L, + 0x00000145L, + 0x00000079L, + 0x00000093L, + 0x0000023BL, + 0x00000239L, + 0x0000023DL +}; + +#endif /* INCL_SCRIPT_TEXT */ + +#define R_ld_AbortCode 0x00000000L +#define R_ld_zeroes 0x00000004L +#define R_ld_status 0x00000008L +#define R_ld_counter 0x0000000CL +#define R_ld_AbortBdr_mailbox 0x00000010L +#define R_ld_IOdone_mailbox 0x00000014L +#define R_ld_sched_mlbx_base_adr 0x00000018L +#define R_ld_scsi_id 0x00000020L +#define R_ld_nexus_array_base 0x00000024L +#define R_ld_nexus_index 0x00000028L +#define R_ld_nexus 0x0000002CL +#define R_ld_phase_flag 0x00000030L +#define R_ld_device_table_base_adr 0x00000034L +#define R_ld_scratch 0x00000038L +#define R_ld_message 0x00000040L +#define R_ld_size 0x0000004CL +#define A_kphase_DATA_OUT 0x00000000L +#define A_unknown_phase 0x00000000L +#define A_TLQ_SCSI_ID 0x00000000L +#define A_kphase_DATA_IN 0x00000001L +#define A_status_error 0x00000001L +#define A_unexpected_msg 0x00000002L +#define A_kphase_COMMAND 0x00000002L +#define A_kphase_STATUS 0x00000003L +#define A_unexpected_ext_msg 0x00000003L +#define A_TLQ_xferAdr 0x00000004L +#define A_no_msgin_after_reselect 0x00000005L +#define A_kphase_MSG_OUT 0x00000006L +#define A_kphase_MSG_IN 0x00000007L +#define A_unknown_reselect 0x00000007L +#define A_kphase_SELECT 0x00000008L +#define A_unallocated_nexus 0x00000008L +#define A_TLQ_MSGOp 0x00000008L +#define A_kphase_RESELECT 0x00000009L +#define A_abort_mailbox 0x00000009L +#define A_abort_current 0x0000000AL +#define A_kphase_ABORT_CURRENT 0x0000000AL +#define A_kphase_ABORT_MAILBOX 0x0000000BL +#define A_kphase_CMD_COMPLETE 0x0000000CL +#define A_unknown_msg_reject 0x0000000CL +#define A_kphase_DISCONNECT 0x0000000DL +#define A_negotiateSDTR 0x0000000DL +#define A_negotiateWDTR 0x0000000EL +#define A_kphase_saveDataPointer 0x0000000EL +#define A_sglist_complete 0x0000000FL +#define A_kphase_restoreDataPointer 0x0000000FL +#define A_TLQ_CDBp 0x00000010L +#define A_TLQ_index 0x00000020L +#define A_TLQ_xferStarted 0x00000021L +#define A_TLQ_IWR 0x00000022L +#define Ent_clearACK 0x00000448L +#define Ent_issueAbort_BDR 0x000007F0L +#define Ent_issueMessageOut 0x000002F0L +#define Ent_phase_handler 0x00000290L +#define Ent_select_phase 0x00000120L + +#ifdef INCL_SCRIPT_TEXT + +ULONG LABELPATCHES[] = { + 0x00000023L, + 0x00000039L, + 0x0000005DL, + 0x0000005FL, + 0x000000DFL, + 0x000001D7L +}; + +#endif /* INCL_SCRIPT_TEXT */ diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.lis b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.lis new file mode 100644 index 000000000..6b690920f --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.lis @@ -0,0 +1,993 @@ + 1 + 2 ; Copyright (c) 1997-1999 Apple Computer, Inc. All rights reserved. + 3 ; + 4 ; @APPLE_LICENSE_HEADER_START@ + 5 ; + 6 ; The contents of this file constitute Original Code as defined in and + 7 ; are subject to the Apple Public Source License Version 1.1 (the + 8 ; "License"). You may not use this file except in compliance with the + 9 ; License. Please obtain a copy of the License at + 10 ; http://www.apple.com/publicsource and read it before using this file. + 11 ; + 12 ; This Original Code and all software distributed under the License are + 13 ; distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + 14 ; EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + 15 ; INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + 16 ; FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + 17 ; License for the specific language governing rights and limitations + 18 ; under the License. + 19 ; + 20 ; @APPLE_LICENSE_HEADER_END@ + 21 ; + 22 ; File Ownership: + 23 ; + 24 ; DRI: Mike Johnson + 25 ; + 26 ; Other Contact: Russ Berkoff + 27 ; + 28 ; Technology: SCSI + 29 ; + 30 ; Writers: + 31 ; + 32 ; (MLJ) Mike Johnson + 33 ; (RRA) Rick Auricchio + 34 + 35 + 36 ; NCR Errata Listing 125 Item 1 : Clear the SCNTL0 start bit + 37 ; when jump to reselect during select (try_reselect) + 38 ; + 39 ; NCR Errata Listing 117 Item 4 : Bad parity if odd bytes during + 40 ; wide transfer. Only for DATA OUT in Initiator mode. + 41 ; (Confirm by Manfred Eierle 3rd June 93 not during DATA IN) + 42 + 43 ARCH 825A ;specifically for 825a and 875 (new instructions) + 44 + 45 + 46 ;***************************************************************** + 47 ; + 48 ; Phase codes - These values represent which action is being handled + 49 ; + 50 ;***************************************************************** + 51 + 52 ABSOLUTE kphase_DATA_OUT = 0x00 + 53 ABSOLUTE kphase_DATA_IN = 0x01 + 54 ABSOLUTE kphase_COMMAND = 0x02 + 55 ABSOLUTE kphase_STATUS = 0x03 + 56 ABSOLUTE kphase_MSG_OUT = 0x06 + 57 ABSOLUTE kphase_MSG_IN = 0x07 + 58 ABSOLUTE kphase_SELECT = 0x08 + 59 ABSOLUTE kphase_RESELECT = 0x09 + 60 ABSOLUTE kphase_ABORT_CURRENT = 0x0A + 61 ABSOLUTE kphase_ABORT_MAILBOX = 0x0B + 62 ABSOLUTE kphase_CMD_COMPLETE = 0x0C + 63 ABSOLUTE kphase_DISCONNECT = 0x0D + 64 ABSOLUTE kphase_saveDataPointer = 0x0E ; ??? driver work to be done + 65 ABSOLUTE kphase_restoreDataPointer = 0x0F ; ??? driver work to be done + 66 + 67 + 68 ;***************************************************************** + 69 ; interrupt codes + 70 ;***************************************************************** + 71 + 72 ABSOLUTE unknown_phase = 0x00 ; A spurious phase on SCSI Bus + 73 ABSOLUTE status_error = 0x01 ; IO completes, but with status error + 74 ABSOLUTE unexpected_msg = 0x02 ; An 'unknown' message is in ld_message var + 75 ABSOLUTE unexpected_ext_msg = 0x03 ; An 'unknown' extended message in ld_message + 76 ABSOLUTE wide_32_not_supported = 0x04 ; The device wants 32 bits data phase + 77 ABSOLUTE no_msgin_after_reselect = 0x05 ; No message-in after reselection + 78 ABSOLUTE reqack_too_large = 0x06 ; The device answer ReqAck offset is greater than 8 + 79 ABSOLUTE unknown_reselect = 0x07 ; The valid bit in SFBR reg not set + 80 ABSOLUTE unallocated_nexus = 0x08 ; nexus index -> 0xFFFFFFFF + 81 ABSOLUTE abort_mailbox = 0x09 ; Abort/BDR mailbox completed + 82 ABSOLUTE abort_current = 0x0A ; Abort/BDR current op completed + 83 ABSOLUTE unknown_message_out = 0x0B ; Unknown phase before message out + 84 ABSOLUTE unknown_msg_reject = 0x0C ; Unknown message reject + 85 ABSOLUTE negotiateSDTR = 0x0D ; Sync negotiation rx'd + 86 ABSOLUTE negotiateWDTR = 0x0E ; Wide negotiation rx'd + 87 ABSOLUTE sglist_complete = 0x0F ; SGList complete + 88 + 89 + 90 ;***************************************************************** + 91 ; + 92 ; Data structure for T/L/Q Nexus: + 93 ; + 94 ;***************************************************************** + 95 + 96 ABSOLUTE TLQ_SCSI_ID = 0 ; 4 SCSI ID et al for SELECT instruction + 97 ABSOLUTE TLQ_xferAdr = 4 ; 4 Physical address of CHMOV instructions + 98 ABSOLUTE TLQ_MSGOp = 8 ; 8 Byte count, data adr -> TLQ_MSGO + 99 ABSOLUTE TLQ_CDBp = 16 ; 8 Byte count, data adr -> TLQ_CDB + 100 ABSOLUTE TLQ_CDP = 24 ; 4 Current Data Pointer + 101 ABSOLUTE TLQ_SDP = 28 ; 4 Saved Data Pointer + 102 ABSOLUTE TLQ_index = 32 ; 1 index into nexus array + 103 ABSOLUTE TLQ_xferStarted= 33 ; 1 transfer started flag + 104 ABSOLUTE TLQ_IWR = 34 ; 1 flag to Ignore Wide Residue + 105 ABSOLUTE TLQ_pad = 35 ; 1 pad byte + 106 + 107 + 108 ;***************************************************************** + 109 ; + 110 ; ENTRY declarations - Declare entry points for driver + 111 ; + 112 ;***************************************************************** + 113 + 114 ENTRY select_phase + 115 ENTRY phase_handler + 116 ENTRY issueMessageOut ; for negotiation and Reject messages + 117 ENTRY issueAbort_BDR ; to immediately Abort or Bus-Device-Reset + 118 ENTRY clearACK ; MsgIn done - clr ACK, jump to phase handler + 119 + 120 + 121 ;***************************************************************** + 122 ; + 123 ; Define local data structure at start of SCRIPTS. + 124 ; This structure is allocated by the following nops. + 125 ; + 126 ;***************************************************************** + 127 ; + 128 + 129 RELATIVE local_data \ + 130 00000000: ld_AbortCode = 4{??}\ ; 1 byte code to Abort or BDR + 131 00000004: ld_zeroes = 4{??}\ ; 4 bytes of 0 to clear registers + 132 00000008: ld_status = 4{??}\ ; Status byte from target + 133 0000000C: ld_counter = 4{??}\ ; index into mailbox array + 134 00000010: ld_AbortBdr_mailbox = 4{??}\ ; Abort/BusDeviceReset mailbox + 135 00000014: ld_IOdone_mailbox = 4{??}\ ; [ nexus 0 0 semaphore ] + 136 00000018: ld_sched_mlbx_base_adr = 4{??}\ ; base addr of mailbox array + 137 0000001C: ld_mailboxp = 4{??}\ ; address of current mailbox + 138 00000020: ld_scsi_id = 4{??}\ ; ptr to current mailbox + 139 00000024: ld_nexus_array_base = 4{??}\ ; base address of Nexus pointers + 140 00000028: ld_nexus_index = 4{??}\ ; index to Nexus pointer + 141 0000002C: ld_nexus = 4{??}\ ; address of Nexus + 142 00000030: ld_phase_flag = 4{??}\ ; for debugging + 143 00000034: ld_device_table_base_adr = 4{??}\ ; device configuration table + 144 00000038: ld_scratch = 4{??}\ ; scratch memory + 145 0000003C: ld_unused = 4{??}\ ; unused + 146 00000040: ld_message = 4{??}\ ; buffer for MsgIn bytes + 147 00000044: ld_message4 = 4{??}\ ; buffer continuation + 148 00000048: ld_pad = 4{??}\ ; padding + 149 0000004C: ld_size = 4{??} ; size of this structure + 150 + 151 + 152 00000000: PROC BSC_SCRIPT: + 153 + 154 ; *** These NOPs must be at address 0. *** + 155 ; *** This is reserved space for the structure "local_data". *** + 156 ; *** The driver inits this area to zero. *** + 157 + 158 00000000: 80000000 00000000 nop 0 ; ld_AbortCode, ld_zeroes + 159 00000008: 80000000 00000000 nop 0 ; ld_status, ld_counter + 160 + 161 00000010: 80000000 00000000 nop 0 ; ld_AbortBdr_mailbox, ld_IOdone_mailbox + 162 00000018: 80000000 00000000 nop 0 ; ld_sched_mlbx_base_adr, ld_mailboxp + 163 + 164 00000020: 80000000 00000000 nop 0 ; ld_scsi_id, ld_nexus_array_base + 165 00000028: 80000000 00000000 nop 0 ; ld_nexus_index, ld_nexus + 166 + 167 00000030: 80000000 00000000 nop 0 ; ld_phase_flag, ld_device_table_base_adr + 168 00000038: 80000000 00000000 nop 0 ; ld_scratch, ld_unused + 169 + 170 00000040: 80000000 00000000 nop 0 ; ld_message, ld_message4 + 171 00000048: 80000000 0000004C nop ld_size ; ld_pad, ld_size (Use ld_size or lose it) + 172 + 173 00000050: 80000000 0000000F nop sglist_complete ; use sglist_complete or lose it from gen'd output file + 174 + 175 ;**************************************************************************** + 176 ; + 177 ; findNexusFromIndex - load DSA with pointer to Nexus given a Nexus index: + 178 ; + 179 ;**************************************************************************** + 180 + 181 00000058: findNexusFromIndex: + 182 + 183 00000058: E1340004 00000028 load SCRATCHA0, 4, ld_nexus_index ; load index and leading zeroes + 184 00000060: 60000400 00000000 clear CARRY + 185 00000068: 79340000 00000000 move SCRATCHA0 SHL 0 to SCRATCHA0 ; double the index + 186 00000070: 79350000 00000000 move SCRATCHA1 SHL 0 to SCRATCHA1 + 187 00000078: 79340000 00000000 move SCRATCHA0 SHL 0 to SCRATCHA0 ; double again + 188 00000080: 79350000 00000000 move SCRATCHA1 SHL 0 to SCRATCHA1 ; A0 now has index to 4-byte address + 189 00000088: E0340004 0000009C store SCRATCHA0, 4, patchArrayOffset+4 ; *** patch the code + 190 + 191 00000090: E1100004 00000024 load DSA0, 4, ld_nexus_array_base ; load base address of array of Nexus pointers + 192 00000098: patchArrayOffset: + 193 00000098: F1100004 00000000 load DSA0, 4, DSAREL( 0 ) ; *** patched offset. Load pointer. + 194 + 195 000000A0: 72100000 00000000 move DSA0 to SFBR ; Ensure pointer is not 0xFFFFFFFF + 196 000000A8: 980C00FF 00000008 int unallocated_nexus, if 0xFF ; Interrupt if NFG + 197 + 198 000000B0: E0100004 0000002C store DSA0, 4, ld_nexus ; Store the Nexus pointer + 199 000000B8: 90080000 00000000 return ; end findNexusFromIndex + 200 + 201 + 202 ;**************************************************************************** + 203 ; + 204 ; initContext - Initialize the registers for Sync and Wide using + 205 ; values stored in the device configuration table. + 206 ; Return with values in SCRATCHB for Select code. + 207 ; + 208 ;**************************************************************************** + 209 + 210 000000C0: initContext: + 211 + 212 000000C0: E15C0004 00000020 load SCRATCHB0, 4, ld_scsi_id ; load 4-bit SCSI ID and zeroes + 213 000000C8: 60000400 00000000 clear CARRY + 214 000000D0: 795C0000 00000000 move SCRATCHB0 SHL SCRATCHB0 ; * 2 + 215 000000D8: 795C0000 00000000 move SCRATCHB0 SHL SCRATCHB0 ; * 2 -> UInt32 index + 216 000000E0: E05C0004 000000F4 store SCRATCHB0, 4, patchGetDevConfigOffset+4 ; *** Patch load code + 217 + 218 000000E8: E1100004 00000034 load DSA0, 4, ld_device_table_base_adr ; load base physical addr of tables + 219 + 220 000000F0: patchGetDevConfigOffset: + 221 000000F0: F15C0004 00000000 load SCRATCHB0, 4, DSAREL( 0 ) ; *** Patched table offset *** + 222 + 223 ; SCRATCHB0 = 0 + 224 ; SCRATCHB1 = TP,MO (SXFER bits7-5 bits3-0) + 225 ; SCRATCHB2 = 0 (position for SCSI ID) + 226 ; SCRATCHB3 = SCCF,EWS (SCNTL3 bits6-4 bit 3) + 227 + 228 000000F8: 725D0000 00000000 move SCRATCHB1 to SFBR ; init SXFER from B1 + 229 00000100: 6A050000 00000000 move SFBR to SXFER + 230 ; Init SCNTL3 from B3 + 231 00000108: 725F0000 00000000 move SCRATCHB3 to SFBR + 232 00000110: 6A030000 00000000 move SFBR to SCNTL3 + 233 00000118: 90080000 00000000 return ; return with SCRATCHB intact. + 234 + 235 + 236 ;***************************************************************** + 237 ; + 238 ; Select_phase: + 239 ; Clear the SIGP bit. + 240 ; Check if any Abort/BusDeviceReset request waiting. + 241 ; Nexus is found in the list of 256 mailboxes. + 242 ; If current mailbox is empty, jump to reselect_phase. + 243 ; SCRIPTS tries to select device. + 244 ; If select fails due to reselect, jump to reselect_phase + 245 ; Select Timeout handled by driver. + 246 ; If select succeeds, clear the mailbox entry + 247 ; and increment the mailbox counter. + 248 ; Jump to the phase_handler (hopefully for MSG_OUT) + 249 ; + 250 ;***************************************************************** + 251 + 252 00000120: select_phase: + 253 + 254 00000120: 7A1A0000 00000000 move CTEST2 | 0x00 to CTEST2 ; Clear SIGP bit from ISTAT reg + 255 + 256 ; Check abort mailbox: + 257 + 258 00000128: E1340004 00000010 load SCRATCHA0, 4, ld_AbortBdr_mailbox ; Get AbortBdr mailbox + 259 ; The Identify byte in byte 0 is also the semaphore + 260 ; A0 = Identify byte (0xC0 + LUN N.B. Disconnect allowed) + 261 ; A1 = Tag, if any + 262 ; A2 = SCSI ID + 263 ; A3 = Abort code Abort=0x06; Abort Tag=0D; Bus Device Reset=0x0C + 264 00000130: 72340000 00000000 move SCRATCHA0 to SFBR ; test the semaphore/Identify + 265 00000138: 80840000 000005F0 jump rel( AbortMailbox ), if not 0 ; jump if aborting + 266 + 267 + 268 ; Get the next IO nexus in the mailboxes circular list. + 269 ; Calculate current mailbox address as so: + 270 ; counter byte index * 4 to get mailbox index + 271 ; add base physical address of mailboxes giving current mailbox address + 272 + 273 00000140: E1340004 0000000C load SCRATCHA0, 4, ld_counter ; get 1-byte mailbox counter & 0s + 274 00000148: 60000400 00000000 clear CARRY + 275 00000150: 79340000 00000000 move SCRATCHA0 SHL 0 to SCRATCHA0 ; double it + 276 00000158: 79350000 00000000 move SCRATCHA1 SHL 0 to SCRATCHA1 + 277 00000160: 79340000 00000000 move SCRATCHA0 SHL 0 to SCRATCHA0 ; double it again + 278 00000168: 79350000 00000000 move SCRATCHA1 SHL 0 to SCRATCHA1 ; now have a UInt32 index + 279 00000170: E0340004 0000018C store SCRATCHA0, 4, fetchMailbox+4 ; *** patch the load DSA instruction + 280 00000178: E0340004 0000025C store SCRATCHA0, 4, clear_mailbox+4 ; *** patch the store DSA instruction + 281 + 282 00000180: E1100004 00000018 load DSA0, 4, ld_sched_mlbx_base_adr ; load base physical address of mailboxes + 283 + 284 00000188: fetchMailbox: + 285 00000188: F1100004 00000000 load DSA0, 4, DSAREL( 0 ) ; *** Patched offset. Load Nexus address + 286 00000190: E0100004 0000002C store DSA0, 4, ld_nexus ; save pointer to current Nexus + 287 00000198: E1340004 0000002C load SCRATCHA0, 4, ld_nexus ; copy to A0 + 288 + 289 000001A0: 72340000 00000000 move SCRATCHA0 to SFBR ; + 290 000001A8: 808C0001 00000098 jump rel( next_mailbox ), if 1 ; if low-byte == 0x01 then cancelled mailbox + 291 + 292 000001B0: 72B50000 00000000 move SCRATCHA1 | SFBR to SFBR ; if non-zero, have implicit semaphore + 293 000001B8: 72B60000 00000000 move SCRATCHA2 | SFBR to SFBR + 294 000001C0: 72B70000 00000000 move SCRATCHA3 | SFBR to SFBR + 295 000001C8: 808C0000 00000458 jump rel( reselect_phase ), if 0 ; go to reselect_phase if empty + 296 + 297 ;***************************************************************** + 298 ; + 299 ; Something in mailbox: we have work to do + 300 ; + 301 ;***************************************************************** + 302 + 303 000001D0: 785C0800 00000000 move kphase_SELECT to SCRATCHB0 ; set phase indicator + 304 000001D8: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 305 + 306 000001E0: E15C0004 00000004 load SCRATCHB0, 4, ld_zeroes ; clr the invalid-nexus-index flag + 307 000001E8: F15C0001 00000020 load SCRATCHB0, 1, DSAREL( TLQ_index ) ; get index byte from nexus + 308 000001F0: E05C0004 00000028 store SCRATCHB0, 4, ld_nexus_index ; save it in local data + 309 + 310 000001F8: E1100004 0000002C load DSA0, 4, ld_nexus ; restore DSA register + 311 00000200: F15E0001 00000002 load SCRATCHB2, 1, DSAREL( TLQ_SCSI_ID+2 ) ; get Target's SCSI ID + 312 00000208: 725E0000 00000000 move SCRATCHB2 to SFBR + 313 00000210: 6A5C0000 00000000 move SFBR to SCRATCHB0 ; position it + 314 00000218: E05C0001 00000020 store SCRATCHB0, 1, ld_scsi_id ; save it + 315 00000220: 88880000 FFFFFE98 call rel( initContext ) ; setup Sync/Wide regs in SCRATCHB + 316 00000228: E1100004 0000002C load DSA0, 4, ld_nexus ; restore DSA register + 317 00000230: F05D0001 00000001 store SCRATCHB1, 1, DSAREL( TLQ_SCSI_ID+1 ) ; SXFER + 318 00000238: F05F0001 00000003 store SCRATCHB3, 1, DSAREL( TLQ_SCSI_ID+3 ) ; SCNTL3 + 319 + 320 ;********************** select the device ******************************** + 321 00000240: 47000000 000003D0 SELECT ATN from TLQ_SCSI_ID, rel( try_reselect ) ; ************************ + 322 ;************************************************************************* + 323 + 324 ; looking good - clear the mailbox: + 325 + 326 00000248: next_mailbox: + 327 00000248: E1340004 00000004 load SCRATCHA0, 4, ld_zeroes ; zero out scratch register A + 328 00000250: E1100004 00000018 load DSA0, 4, ld_sched_mlbx_base_adr ; load base physical address of mailboxes + 329 00000258: clear_mailbox: + 330 00000258: F0340004 00000000 store SCRATCHA0, 4, DSAREL( 0 ) ; *** Patched offset. Zero the mailbox + 331 + 332 ; Update the index to the mailbox circular list: + 333 00000260: E15C0001 0000000C load SCRATCHB0, 1, ld_counter ; get counter (mailbox index) + 334 00000268: 7E5C0100 00000000 move SCRATCHB0 + 1 to SCRATCHB0 ; add 1 + 335 00000270: E05C0001 0000000C store SCRATCHB0, 1, ld_counter ; put it back + 336 + 337 00000278: E15C0001 0000002C load SCRATCHB0, 1, ld_nexus ; if low-byte == 0x01 then cancelled mailbox + 338 00000280: 725C0000 00000000 move SCRATCHB0 to SFBR + 339 00000288: 808C0001 FFFFFE90 jump rel( select_phase ), if 1 + 340 + 341 ; *** FALL THROUGH TO phase_handler *** + 342 + 343 + 344 ;***************************************************************** + 345 ; + 346 ; Phase_handler + 347 ; The phase handler script is a dispatcher function of SCSI phase + 348 ; + 349 ;***************************************************************** + 350 + 351 00000290: phase_handler: + 352 00000290: E1100004 0000002C load DSA0, 4, ld_nexus ; reload DSA + 353 00000298: 828B0000 00000088 jump rel( command_phase ), when CMD ; wait for REQ + 354 000002A0: 808A0000 000000A8 jump rel( data_out_phase ), if DATA_OUT ; already latched REQ signal + 355 000002A8: 868A0000 00000020 jump rel( message_out_phase ), if MSG_OUT + 356 000002B0: 818A0000 000000E0 jump rel( data_in_phase ), if DATA_IN + 357 000002B8: 838A0000 00000108 jump rel( status_phase ), if STATUS + 358 000002C0: 878A0000 00000120 jump rel( message_in_phase ), if MSG_IN + 359 000002C8: 98080000 00000000 int unknown_phase + 360 + 361 + 362 ;***************************************************************** + 363 ; + 364 ; Message-Out phase + 365 ; + 366 ;***************************************************************** + 367 + 368 000002D0: message_out_phase: + 369 000002D0: 785C0600 00000000 move kphase_MSG_OUT to SCRATCHB0 ; Set phase indicator + 370 000002D8: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 371 + 372 000002E0: 1E000000 00000008 move from TLQ_MSGOp, when MSG_OUT ; put out the message(s) + 373 000002E8: 80880000 FFFFFFA0 jump rel( phase_handler ) + 374 + 375 + 376 ; issueMessageOut - Driver entry point for Sync/Wide negotiation and + 377 ; to issue message Reject: + 378 + 379 000002F0: issueMessageOut: + 380 000002F0: 58000008 00000000 set ATN ; tell Target we have something to say + 381 000002F8: 60000040 00000000 clear ACK + 382 00000300: 868B0000 FFFFFFC8 jump rel( message_out_phase ), when MSG_OUT ; wait for REQ. Jump if msg-out phase. + 383 00000308: 87820000 FFFFFF80 jump rel( phase_handler ), if not MSG_IN ; jump if weird phase + 384 00000310: 0F000001 00000039 move 1, ld_scratch+1, when MSG_IN ; dump the msg byte + 385 00000318: 60000040 00000000 clear ACK ; accept Target's last msg-in byte + 386 00000320: 80880000 FFFFFFC8 jump rel( issueMessageOut ) + 387 + 388 + 389 ;***************************************************************** + 390 ; + 391 ; Command phase + 392 ; + 393 ;***************************************************************** + 394 + 395 00000328: command_phase: + 396 00000328: 785C0200 00000000 move kphase_COMMAND to SCRATCHB0 ; Set phase indicator + 397 00000330: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 398 + 399 00000338: 60000008 00000000 clear ATN ; In case we missed the sending nego + 400 00000340: 1A000000 00000010 move FROM TLQ_CDBp, when CMD ; issue the CDB + 401 00000348: 80880000 FFFFFF40 jump rel( phase_handler ) + 402 + 403 + 404 ;***************************************************************** + 405 ; + 406 ; Data_out_phase + 407 ; + 408 ;***************************************************************** + 409 + 410 00000350: data_out_phase: + 411 00000350: 785C0000 00000000 move kphase_DATA_OUT to SCRATCHB0 ; Set phase indicator + 412 00000358: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 413 + 414 00000360: 88880000 00000008 call rel( driverXfer ) ; call driver-built CHMOV instructions + 415 00000368: 80880000 FFFFFF20 jump rel( phase_handler ) ; if all data xfer'd, get next phase + 416 + 417 00000370: driverXfer: ; get here from data-in code also + 418 00000370: F1340004 00000004 load SCRATCHA0, 4, DSAREL( TLQ_xferAdr ) + 419 00000378: E0340004 00000394 store SCRATCHA0, 4, doItPatch+4 ; *** patch the JUMP address + 420 00000380: 7835FF00 00000000 move 0xFF to SCRATCHA1 + 421 00000388: F0350001 00000021 store SCRATCHA1, 1, DSAREL( TLQ_xferStarted ) + 422 + 423 00000390: doItPatch: + 424 00000390: 80080000 00000333 jump 0x0333 ; *** patched address + 425 + 426 + 427 + 428 ;***************************************************************** + 429 ; + 430 ; Data_in_phase + 431 ; 875 sets ATN if bad parity detected. + 432 ; Use of CHMOV instructions assures that we properly handle + 433 ; a leftover wide byte in the SWIDE or SODL register, depending + 434 ; on the data direction. This can happen in either of two conditions: + 435 ; 1. The Target disconnects at an odd boundary. This is + 436 ; extremely unlikely with disk devices. + 437 ; 2. The client passes either an odd buffer address or + 438 ; an odd transfer count. When the Target disconnects (at + 439 ; an even boundary, we end up with the extra wide + 440 ; byte in SWIDE or SODL. MacOS does this with VM on. + 441 ; + 442 ;***************************************************************** + 443 + 444 00000398: data_in_phase: + 445 00000398: 785C0100 00000000 move kphase_DATA_IN to SCRATCHB0 ; Set phase indicator + 446 000003A0: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 447 + 448 000003A8: 88880000 FFFFFFC0 call rel( driverXfer ) ; call driver-built CHMOV instructions + 449 + 450 ; The driver gets interrupted if a phase mismatch occurs as when + 451 ; the Target goes MSG-IN with a Disconnect. + 452 ; The driver codes either a RETURN if the Scatter/Gather list is complete or + 453 ; an INT if more Scatter/Gather elements need to be generated. + 454 ; On the Macintosh, client programs expect extra incoming data to be dumped. + 455 ; For example, during boot the ROM reads 512 bytes from a 2K-byte-sector CD. + 456 + 457 000003B0: bucket_loop: + 458 000003B0: 81830000 FFFFFED8 jump rel( phase_handler ), when not DATA_IN ; wait for phase, exit if changed + 459 000003B8: 01000001 00000008 CHMOV 1, ld_status, when DATA_IN ; eat a byte + 460 000003C0: 80880000 FFFFFFE8 jump rel( bucket_loop ); ; keep dumping bytes + 461 + 462 + 463 ;***************************************************************** + 464 ; + 465 ; Status phase + 466 ; + 467 ;***************************************************************** + 468 + 469 000003C8: status_phase: + 470 000003C8: 785C0300 00000000 move kphase_STATUS to SCRATCHB0 ; Set phase indicator + 471 000003D0: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 472 + 473 000003D8: 0B000001 00000008 move 1, ld_status, when STATUS ; Read Status byte from bus + 474 000003E0: 80880000 FFFFFEA8 jump rel( phase_handler ) + 475 + 476 + 477 ;***************************************************************** + 478 ; + 479 ; Message-In phase + 480 ; + 481 ;***************************************************************** + 482 + 483 000003E8: message_in_phase: + 484 000003E8: 785C0700 00000000 move kphase_MSG_IN to SCRATCHB0 ; Set phase indicator + 485 000003F0: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 486 + 487 000003F8: 0F000001 00000040 move 1, ld_message, when MSG_IN ; Read byte from bus + 488 + 489 00000400: 808C0000 000000C0 jump rel( cmdComplete ), if 0x00 ; Command Complete + 490 00000408: 808C0002 000001A8 jump rel( saveDataPointer ), if 0x02 ; Save Data Pointer + 491 00000410: 808C0004 00000148 jump rel( disconnect_msg ), if 0x04 ; Disconnect + 492 00000418: 808C0023 00000038 jump rel( ignoreWideResidue ), if 0x23 ; Ignore Wide Residue + 493 00000420: 808C0003 000001B0 jump rel( restoreDataPointer ), if 0x03 ; Restore Data Pointer + 494 00000428: 808C0001 00000058 jump rel( extended_msg ), if 0x01 ; Extended message + 495 00000430: 808C0007 00000008 jump rel( msg_reject ), if 0x07 ; Message Reject + 496 ; Identify, if 0x80-FF ; Identify + LUN + 497 ; simple_queue_tag, if 0x20 ; Simple Queue Tag + 498 ; initiate_recovery, if 0x0F ; Initiate Recovery + 499 ; linked_cde_complete, if 0x0A/0x0B + 500 00000438: 98080000 00000002 int unexpected_msg ; unknown + 501 + 502 00000440: msg_reject: + 503 00000440: 98080000 0000000C int unknown_msg_reject + 504 + 505 00000448: clearACK: ; ENTRY point to end negotiation + 506 00000448: 60000040 00000000 clear ACK + 507 00000450: 80880000 FFFFFE38 jump rel( phase_handler ) + 508 + 509 + 510 + 511 ;***************************************************************** + 512 ; + 513 ; Ignore Wide Residue + 514 ; + 515 ;***************************************************************** + 516 + 517 00000458: ignoreWideResidue: ; this is a two byte message so snag the 2nd byte here + 518 00000458: 60000040 00000000 clear ACK + 519 00000460: 0F000001 00000041 move 1, ld_message+1, when MSG_IN ; save residue count + 520 00000468: 6A5E0000 00000000 move SFBR to SCRATCHB2 ; byte is still in SFBR. Position it. + 521 00000470: F05E0001 00000022 store SCRATCHB2, 1, DSAREL( TLQ_IWR ) ; Store residue count in Nexus for driver. + 522 00000478: 60000040 00000000 clear ACK + 523 00000480: 80880000 FFFFFE08 jump rel( phase_handler ) + 524 + 525 + 526 ;***************************************************************** + 527 ; + 528 ; Extended message + 529 ; Accept Wide and Synchronous Data Transfer messages + 530 ; + 531 ;***************************************************************** + 532 + 533 00000488: extended_msg: + 534 00000488: 60000040 00000000 clear ACK + 535 00000490: 0F000001 00000041 move 1, ld_message+1, when MSG_IN ; read msg length byte from bus + 536 00000498: 60000040 00000000 clear ACK + 537 000004A0: 0F000001 00000042 move 1, ld_message+2, when MSG_IN ; read ext msg code from bus + 538 000004A8: 60000040 00000000 clear ACK + 539 ; extended_identify, IF 0x02 + 540 ; modify_data_pointer, if 0x00 + 541 000004B0: 808C0001 00000140 jump rel( sdtr ), if 0x01 ; jump if SDTR, sync negotiation msg + 542 000004B8: 808C0003 00000148 jump rel( wdtr ), if 0x03 ; jump if WDTR, wide negotiation msg + 543 000004C0: 98080000 00000003 int unexpected_ext_msg ; let driver deal with unknown + 544 + 545 + 546 ;***************************************************************** + 547 ; + 548 ; Command complete + 549 ; The Command-Complete message is sent to indicate that the + 550 ; IO operation has completed and valid status has been sent. + 551 ; The Target should then disconnect. + 552 ; SCRIPTS must spin until the IOdone mailbox is empty. + 553 ; Then it sets the IOdone mailbox with the current Nexus. + 554 ; The status message is analyzed. + 555 ; If status is good, INTF the driver and jump to select_phase. + 556 ; If status is NG, save it in the NEXUS and INT the driver. + 557 ; + 558 ;***************************************************************** + 559 + 560 000004C8: cmdComplete: + 561 000004C8: 785C0C00 00000000 move kphase_CMD_COMPLETE to SCRATCHB0 ; Set phase indicator + 562 000004D0: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 563 + 564 000004D8: 7C027F00 00000000 move SCNTL2 & 0X7F to SCNTL2 ; Clr SDU: SCSI Disconnect Unexpected + 565 000004E0: 60000040 00000000 clear ACK + 566 000004E8: 48000000 00000000 WAIT DISCONNECT + 567 + 568 000004F0: testMbxLp: ; loop until IOdone mailbox empty + 569 000004F0: E1340004 00000014 load SCRATCHA0, 4, ld_IOdone_mailbox + 570 000004F8: 72370000 00000000 move SCRATCHA3 to SFBR ; A3 = semaphore + 571 00000500: 80840000 FFFFFFE8 jump rel( testMbxLp ), if not 0 + 572 + 573 ; Fill in the IOdone mailbox with the following: + 574 ; A0 = index to Nexus + 575 ; A1 = Status + 576 ; A2 = 0 + 577 ; A3 = semaphore (FF = set) + 578 00000508: E1340001 00000028 load SCRATCHA0, 1, ld_nexus_index ; A0 = index to Nexus + 579 00000510: E15C0001 00000008 load SCRATCHB0, 1, ld_status + 580 00000518: 725C0000 00000000 move SCRATCHB0 to SFBR + 581 00000520: 6A350000 00000000 move SFBR to SCRATCHA1 ; A1 = Status + 582 00000528: 78360000 00000000 move 0x00 to SCRATCHA2 ; A2 = 0 + 583 00000530: 7837FF00 00000000 move 0xFF to SCRATCHA3 ; A3 = semaphore IOdone mailbox + 584 00000538: E0340004 00000014 store SCRATCHA0, 4, ld_IOdone_mailbox + 585 + 586 00000540: 72350000 00000000 move SCRATCHA1 to SFBR ; Test the Status of this IO + 587 ; SFBR = status msg + 588 ; Test status - If good, Interrupt on the fly and jump to select phase + 589 00000548: 981CC100 000000FF intfly 0xFF, if 0 and mask 0xC1 ; mask off reserved bits + 590 00000550: 808CC100 FFFFFBC8 jump rel( select_phase ), if 0 and mask 0xC1 + 591 00000558: 98080000 00000001 int status_error ; Status err. Interrupt driver & stop + 592 + 593 + 594 ;***************************************************************** + 595 ; + 596 ; Disconnect + 597 ; The 8xx Accepts the disconnection and jumps to the select_phase + 598 ; to check for another IO + 599 ; + 600 ;***************************************************************** + 601 + 602 00000560: disconnect_msg: + 603 00000560: E15C0001 00000030 load SCRATCHB0, 1, ld_phase_flag + 604 00000568: 725C0000 00000000 move SCRATCHB0 to SFBR + 605 ; If we got here from reselect just bailout since ld_nexus is + 606 ; not setup and the code using it is not needed anyway (no data xfer) + 607 00000570: 808C0009 00000010 jump rel( bailout ), if kphase_RESELECT + 608 + 609 00000578: 785C0D00 00000000 move kphase_DISCONNECT to SCRATCHB0 + 610 00000580: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 611 + 612 00000588: bailout: + 613 00000588: 785FFF00 00000000 move 0xFF to SCRATCHB3 ; invalidate nexus index for driver + 614 00000590: E05F0001 0000002B store SCRATCHB3, 1, ld_nexus_index+3 + 615 00000598: 7C027F00 00000000 move SCNTL2 & 0x7F to SCNTL2 ; Clr SDU: SCSI Disconnect Unexpected + 616 000005A0: 60000040 00000000 clear ACK + 617 000005A8: 48000000 00000000 WAIT DISCONNECT ; wait for bus-free + 618 000005B0: 80880000 FFFFFB68 jump rel( select_phase ) ; go see if more to do + 619 + 620 + 621 ;****************************************************************** + 622 ; + 623 ; ??? mlj - saveDataPointer and restoreDataPointer are incorrect. + 624 ; ??? They basically do nothing. + 625 ; Save Data Pointer + 626 ; + 627 ;***************************************************************** + 628 + 629 000005B8: saveDataPointer: + 630 000005B8: 785C0E00 00000000 move kphase_saveDataPointer to SCRATCHB0 + 631 000005C0: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 632 000005C8: 60000040 00000000 clear ACK + 633 000005D0: 80880000 FFFFFCB8 jump rel( phase_handler ) + 634 + 635 + 636 ;****************************************************************** + 637 ; + 638 ; ??? mlj - saveDataPointer and restoreDataPointer are incorrect. + 639 ; ??? They basically do nothing. + 640 ; Restore Data Pointer + 641 ; The local values still blocks, still bytes and data address + 642 ; must be loaded from the corresponding NEXUS data set. + 643 ; This message should followed an IDE (parity error) + 644 ; + 645 ;***************************************************************** + 646 + 647 000005D8: restoreDataPointer: + 648 000005D8: 785C0F00 00000000 move kphase_restoreDataPointer to SCRATCHB0 + 649 000005E0: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 650 000005E8: 60000040 00000000 clear ACK + 651 000005F0: 80880000 FFFFFC98 jump rel( phase_handler ) + 652 + 653 + 654 ;***************************************************************** + 655 ; + 656 ; Synchronous data transfer request or response + 657 ; + 658 ;***************************************************************** + 659 000005F8: sdtr: + 660 000005F8: 0F000002 00000043 move 2, ld_message+3, when MSG_IN ; Read period & offset from bus + 661 00000600: 98080000 0000000D int negotiateSDTR + 662 + 663 + 664 ;*************************************************************************** + 665 ; + 666 ; Wide Data Transfer request or response + 667 ; + 668 ;*************************************************************************** + 669 00000608: wdtr: + 670 00000608: 0F000001 00000043 move 1, ld_message+3, when MSG_IN ; get Transfer Width Exponent fm bus + 671 00000610: 98080000 0000000E int negotiateWDTR + 672 + 673 + 674 ;***************************************************************** + 675 ; + 676 ; Reselect phase + 677 ; The chip waits here either for a Reselection from a Target or + 678 ; a SIGP from the driver indicating something in the mailbox. + 679 ; If reselected, the script uses the Nexus value which is either + 680 ; a Tag or a SCSI ID/LUN combo to lookup the Nexus. + 681 ; Then init the SXFER and SCNTL3 registers from the device config table. + 682 ; + 683 ;***************************************************************** + 684 + 685 00000618: try_reselect: ; Select failed - probably reselecting + 686 ; Cf NCR Errata Listing 117 Item 1: + 687 00000618: 7C00DF00 00000000 move SCNTL0 & 0xDF to SCNTL0 ; clr Start bit + 688 00000620: 7A1A0000 00000000 move CTEST2 | 0x00 to CTEST2 ; Clear SIGP bit from ISTAT reg + 689 + 690 00000628: reselect_phase: + 691 00000628: 785C0900 00000000 move kphase_RESELECT to SCRATCHB0 ; Set phase indicator + 692 00000630: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 693 + 694 00000638: 785FFF00 00000000 move 0xFF to SCRATCHB3 ; invalidate nexus index for driver + 695 00000640: E05F0001 0000002B store SCRATCHB3, 1, ld_nexus_index+3 + 696 + 697 ; wait here for reselect from a Target + 698 ; or SIGP from the driver + 699 + 700 00000648: 54000000 FFFFFAD0 WAIT RESELECT REL( select_phase ) ; jump if SIGP + 701 + 702 ; Reselected: + 703 + 704 00000650: 720A0000 00000000 move SSID to SFBR ; SSID = [ Valxxx Scsi_id ] + 705 00000658: 980C7F00 00000007 int unknown_reselect, if 0 and mask 0x7F; Interrupt if VAL bit not set + 706 00000660: 6C5C0F00 00000000 move SFBR & 0x0F to SCRATCHB0 ; B0 = Target ID + 707 00000668: E05C0001 00000020 store SCRATCHB0, 1, ld_scsi_id ; save it + 708 + 709 00000670: 88880000 FFFFFA48 call rel( initContext ) ; setup sync regs here + 710 + 711 00000678: 9F030000 00000005 int no_msgin_after_reselect, when not MSG_IN + 712 + 713 00000680: 0F000001 00000040 move 1, ld_message, when MSG_IN ; Read Identify byte from bus + 714 + 715 ; if another REQ is asserted, a SimpleQueueTag message should be next + 716 + 717 00000688: 60000040 00000000 clear ACK ; notify Target: msg byte rx'd + 718 00000690: 878B0000 00000048 jump rel( getNextMsg ), when MSG_IN ; jump if SimpleQueueTag coming + 719 + 720 ; untagged operation: + 721 + 722 00000698: 6C340700 00000000 move SFBR & 0x07 to SCRATCHA0 ; isolate LUN from Identify byte + 723 + 724 000006A0: E15C0001 00000020 load SCRATCHB0, 1, ld_scsi_id ; B0 = Target ID + 725 000006A8: 60000400 00000000 clear CARRY + 726 000006B0: 715C0000 00000000 move SCRATCHB0 SHL SFBR ; shift left #1 + 727 000006B8: 695C0000 00000000 move SFBR SHL SCRATCHB0 ; shift left #2 + 728 000006C0: 715C0000 00000000 move SCRATCHB0 SHL SFBR ; shift left #3 + 729 000006C8: 7AB40000 00000000 move SCRATCHA0 | SFBR to SCRATCHA0 ; form Nexus index = 0b0TTTTLLL + 730 + 731 000006D0: E0340001 00000028 store SCRATCHA0, 1, ld_nexus_index ; store as index to Nexus + 732 000006D8: 80880000 00000030 jump rel( haveNexusIndex ) + 733 + 734 ; should be tagged operation: + 735 + 736 000006E0: getNextMsg: + 737 000006E0: 0F000001 00000040 move 1, ld_message, when MSG_IN ; read message byte from bus + 738 000006E8: 808C0004 FFFFFE70 jump rel( disconnect_msg ), if 0x04 ; if Disconnect, oh well. + 739 000006F0: 60000040 00000000 clear ACK + 740 000006F8: 80840020 FFFFFB90 jump rel( phase_handler ), if not 0x20; Branch if not Queue tag code + 741 ; get the Queue Tag and save as the nexus index + 742 00000700: 0F000001 00000028 move 1, ld_nexus_index, when MSG_IN ; Nexus index <- Tag from bus + 743 00000708: 60000040 00000000 clear ACK ; acknowledge it + 744 + 745 00000710: haveNexusIndex: + 746 00000710: 785F0000 00000000 move 0x00 to SCRATCHB3 ; clear invalid-nexus-index flag + 747 00000718: E05F0001 0000002B store SCRATCHB3, 1, ld_nexus_index+3 + 748 00000720: 88880000 FFFFF930 call rel( findNexusFromIndex ) ; set DSA <- Nexus pointer + 749 00000728: 80880000 FFFFFB60 jump rel( phase_handler ) ; start handling phases. + 750 + 751 + 752 ;***************************************************************** + 753 ; + 754 ; AbortMailbox - Abort (or BusDeviceReset) the mailbox entry. + 755 ; This is a queued operation - not an immediate + 756 ; operation as is issueAbort_BDR. + 757 ; The Abort message clears all IO processes for the + 758 ; selecting Initiator on the specified LUN. + 759 ; + 760 ; The Bus Device Reset message clears all IO processes for + 761 ; all Initiators on all LUNs of selected Target. + 762 ; It forces a hard reset condition to the selected SCSI device. + 763 ; + 764 ; A0 = Identify byte (0xC0 + LUN N.B. Disconnect allowed) + 765 ; A1 = Tag, if any + 766 ; A2 = SCSI ID + 767 ; A3 = Abort code Abort=0x06; Abort Tag=0D; Bus Device Reset=0x0C + 768 ; + 769 ; Mailbox not cleared by SCRIPTS so that driver can find SCSI ID when done + 770 ; N.B.: Device is Async and Narrow after BDR!!! + 771 ; Driver must set the device config table values accordingly. + 772 ;***************************************************************** + 773 + 774 00000730: AbortMailbox: + 775 00000730: 785C0B00 00000000 move kphase_ABORT_MAILBOX to SCRATCHB0 ; Set phase code + 776 00000738: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 777 + 778 00000740: 785FFF00 00000000 move 0xFF to SCRATCHB3 ; invalidate nexus index for driver + 779 00000748: E05F0001 0000002B store SCRATCHB3, 1, ld_nexus_index+3 + 780 + 781 00000750: E15E0001 00000012 load SCRATCHB2, 1, ld_AbortBdr_mailbox+2 ; get SCSI ID + 782 00000758: E05E0001 00000762 store SCRATCHB2, 1, AbortSelect+2 ; *** Patch the Select/ATN instruction + 783 + 784 00000760: AbortSelect: + 785 00000760: 45000000 FFFFFEB0 SELECT ATN 0, REL( try_reselect ) ; *** Patched SCSI ID + 786 + 787 00000768: 72350000 00000000 move SCRATCHA1 to SFBR ; check for Tag + 788 00000770: 80840000 00000038 jump rel( taggedAbort ) if not 0x00 ; jump if tagged abort + 789 + 790 ; untagged Abort or BusDeviceReset: + 791 + 792 00000778: 72370000 00000000 move SCRATCHA3 to SFBR ; position the abort code + 793 00000780: 6A350000 00000000 move SFBR to SCRATCHA1 + 794 00000788: E0340002 00000038 store SCRATCHA0, 2, ld_scratch ; Store Identify and Abort msgs + 795 00000790: 78020000 00000000 move 0x00 to SCNTL2 ; Clr SDU SCSI Disconnect Unexpected + 796 00000798: 0E000002 00000038 move 2, ld_scratch , when MSG_OUT ; emit Identify and Abort messages + 797 000007A0: 48000000 00000000 WAIT DISCONNECT + 798 000007A8: 98080000 00000009 int abort_mailbox + 799 + 800 ; AbortTag: + 801 + 802 000007B0: taggedAbort: + 803 000007B0: 72350000 00000000 move SCRATCHA1 to SFBR ; position the Tag + 804 000007B8: 6A360000 00000000 move SFBR to SCRATCHA2 + 805 000007C0: 78352000 00000000 move 0x20 to SCRATCHA1 ; gen SimpleQueueTag code + 806 000007C8: E0340004 00000038 store SCRATCHA0, 4, ld_scratch ; store Identify, SQT, Tag, AbortTag + 807 000007D0: 78020000 00000000 move 0x00 to SCNTL2 ; Clr SDU SCSI Disconnect Unexpected + 808 000007D8: 0E000004 00000038 move 4, ld_scratch, when MSG_OUT ; emit all 4 bytes + 809 000007E0: 48000000 00000000 WAIT DISCONNECT + 810 000007E8: 98080000 00000009 int abort_mailbox + 811 + 812 + 813 ;***************************************************************** + 814 ; + 815 ; issueAbort_BDR - Abort (or BusDeviceReset) the current operation. + 816 ; This is an immediate operation - not a queued operation + 817 ; as is AbortMailbox. + 818 ; The Abort message clears all IO processes for the + 819 ; selecting Initiator on the specified LUN. + 820 ; + 821 ; The Bus Device Reset message clears all IO processes for + 822 ; all Initiators on all LUNs of selected Target. + 823 ; It forces a hard reset condition to the selected SCSI device. + 824 ; + 825 ;***************************************************************** + 826 + 827 000007F0: issueAbort_BDR: + 828 000007F0: 785C0A00 00000000 move kphase_ABORT_CURRENT to SCRATCHB0 ; Set phase code + 829 000007F8: E05C0001 00000030 store SCRATCHB0, 1, ld_phase_flag + 830 + 831 00000800: 74140800 00000000 move ISTAT & 0x08 to SFBR ; see if Target connected to bus + 832 00000808: 980C0000 0000000A int abort_current, if 0 ; interrupt driver if not connected + 833 + 834 00000810: 58000008 00000000 SET ATN ; get Target's attention + 835 00000818: E1100004 0000002C load DSA0, 4, ld_nexus ; load pointer to Nexus + 836 + 837 00000820: bucketLoop: + 838 00000820: 60000040 00000000 clear ACK + 839 00000828: 868B0000 000000A8 jump rel( sendAbortBDR ), when MSG_OUT ; wait for REQ. Jump if OK. + 840 + 841 00000830: 838A0000 00000030 jump rel( BucketInStatus ), if STATUS ; bit bucket in + 842 00000838: 878A0000 00000038 jump rel( BucketInMsg ), if MSG_IN ; bit bucket in + 843 00000840: 818A0000 00000040 jump rel( BucketInData ), if DATA_IN ; bit bucket in + 844 + 845 00000848: 7834AD00 00000000 move 0xAD to SCRATCHA0 + 846 00000850: 808A0000 00000040 jump rel( BucketOutData ), if DATA_OUT ; bit bucket out + 847 00000858: 828A0000 00000058 jump rel( BucketOutCmd ), if CMD ; bit bucket out + 848 00000860: 98080000 00000000 int unknown_phase ; back to driver for harsher measures + 849 + 850 + 851 00000868: BucketInStatus: + 852 00000868: 0B000001 00000038 move 1, ld_scratch, when STATUS ; eat the Status byte + 853 00000870: 80880000 FFFFFFA8 jump rel( bucketLoop ); ; keep bit-bucketing bytes + 854 + 855 00000878: BucketInMsg: + 856 00000878: 0F000001 00000038 move 1, ld_scratch, when MSG_IN ; eat a message byte + 857 00000880: 80880000 FFFFFF98 jump rel( bucketLoop ); ; keep bit-bucketing bytes + 858 + 859 00000888: BucketInData: + 860 00000888: 09000001 00000038 move 1, ld_scratch, when DATA_IN ; eat a data byte + 861 00000890: 80880000 FFFFFF88 jump rel( bucketLoop ); ; keep bit-bucketing bytes + 862 + 863 00000898: BucketOutData: + 864 00000898: 7B347300 00000000 move SCRATCHA0 xor 0x73 to SCRATCHA0 ; gen 0xDEAD ... + 865 000008A0: E0340001 00000038 store SCRATCHA0, 1, ld_scratch + 866 000008A8: 08000001 00000038 move 1, ld_scratch, when DATA_OUT ; pad a byte out + 867 000008B0: 80880000 FFFFFF68 jump rel( bucketLoop ); ; keep bit-bucketing bytes + 868 + 869 000008B8: BucketOutCmd: + 870 000008B8: 78340000 00000000 move 0x00 to SCRATCHA0 ; load Null, TestUnitReady, whatever + 871 000008C0: E0340001 00000038 store SCRATCHA0, 1, ld_scratch + 872 000008C8: 0A000001 00000038 move 1, ld_scratch, when CMD ; pad a byte out + 873 000008D0: 80880000 FFFFFF48 jump rel( bucketLoop ); ; keep bit-bucketing bytes + 874 + 875 + 876 000008D8: sendAbortBDR: + 877 000008D8: 78020000 00000000 move 0x00 to SCNTL2 ; Clr SDU SCSI Disconnect Unexpected + 878 000008E0: 0E000001 00000000 move 1, ld_AbortCode, when MSG_OUT ; Send Abort(06) or BDR(0C) message + 879 000008E8: E1340004 00000004 load SCRATCHA0, 4, ld_zeroes ; load 0's + 880 000008F0: E0340004 00000000 store SCRATCHA0, 4, ld_AbortCode ; clear the Abort code + 881 000008F8: 48000000 00000000 WAIT DISCONNECT + 882 00000900: 98080000 0000000A int abort_current ; went BusFree - tell Driver + +--SYMBOL---------------------------VALUE------TYPE------- +abort_current 0000000A ABSOLUTE +abort_mailbox 00000009 ABSOLUTE +kphase_ABORT_MAILBOX 0000000B ABSOLUTE +kphase_ABORT_CURRENT 0000000A ABSOLUTE +kphase_CMD_COMPLETE 0000000C ABSOLUTE +kphase_COMMAND 00000002 ABSOLUTE +kphase_DATA_IN 00000001 ABSOLUTE +kphase_DATA_OUT 00000000 ABSOLUTE +kphase_DISCONNECT 0000000D ABSOLUTE +kphase_MSG_IN 00000007 ABSOLUTE +kphase_MSG_OUT 00000006 ABSOLUTE +kphase_RESELECT 00000009 ABSOLUTE +kphase_SELECT 00000008 ABSOLUTE +kphase_STATUS 00000003 ABSOLUTE +kphase_restoreDataPointer 0000000F ABSOLUTE +kphase_saveDataPointer 0000000E ABSOLUTE +negotiateWDTR 0000000E ABSOLUTE +negotiateSDTR 0000000D ABSOLUTE +no_msgin_after_reselect 00000005 ABSOLUTE +reqack_too_large 00000006 ABSOLUTE +sglist_complete 0000000F ABSOLUTE +status_error 00000001 ABSOLUTE +TLQ_CDP 00000018 ABSOLUTE +TLQ_CDBp 00000010 ABSOLUTE +TLQ_IWR 00000022 ABSOLUTE +TLQ_MSGOp 00000008 ABSOLUTE +TLQ_SDP 0000001C ABSOLUTE +TLQ_index 00000020 ABSOLUTE +TLQ_pad 00000023 ABSOLUTE +TLQ_xferAdr 00000004 ABSOLUTE +TLQ_SCSI_ID 00000000 ABSOLUTE +TLQ_xferStarted 00000021 ABSOLUTE +unallocated_nexus 00000008 ABSOLUTE +unexpected_ext_msg 00000003 ABSOLUTE +unexpected_msg 00000002 ABSOLUTE +unknown_message_out 0000000B ABSOLUTE +unknown_msg_reject 0000000C ABSOLUTE +unknown_phase 00000000 ABSOLUTE +unknown_reselect 00000007 ABSOLUTE +wide_32_not_supported 00000004 ABSOLUTE +BSC_SCRIPT 00000000 CODE SEGMENT +SCRIPT 00000000 CODE SEGMENT +local_data 00000000 DATA SEGMENT +clearACK 00000448 ENTRY +issueAbort_BDR 000007F0 ENTRY +issueMessageOut 000002F0 ENTRY +phase_handler 00000290 ENTRY +select_phase 00000120 ENTRY +AbortSelect 00000760 LABEL +AbortMailbox 00000730 LABEL +BucketInData 00000888 LABEL +BucketInMsg 00000878 LABEL +BucketInStatus 00000868 LABEL +BucketOutCmd 000008B8 LABEL +BucketOutData 00000898 LABEL +bucketLoop 00000820 LABEL +bailout 00000588 LABEL +bucket_loop 000003B0 LABEL +clear_mailbox 00000258 LABEL +cmdComplete 000004C8 LABEL +command_phase 00000328 LABEL +data_in_phase 00000398 LABEL +data_out_phase 00000350 LABEL +disconnect_msg 00000560 LABEL +doItPatch 00000390 LABEL +driverXfer 00000370 LABEL +extended_msg 00000488 LABEL +fetchMailbox 00000188 LABEL +findNexusFromIndex 00000058 LABEL +getNextMsg 000006E0 LABEL +haveNexusIndex 00000710 LABEL +ignoreWideResidue 00000458 LABEL +initContext 000000C0 LABEL +message_in_phase 000003E8 LABEL +message_out_phase 000002D0 LABEL +msg_reject 00000440 LABEL +next_mailbox 00000248 LABEL +patchGetDevConfigOffset 000000F0 LABEL +patchArrayOffset 00000098 LABEL +reselect_phase 00000628 LABEL +restoreDataPointer 000005D8 LABEL +sdtr 000005F8 LABEL +saveDataPointer 000005B8 LABEL +sendAbortBDR 000008D8 LABEL +status_phase 000003C8 LABEL +taggedAbort 000007B0 LABEL +testMbxLp 000004F0 LABEL +try_reselect 00000618 LABEL +wdtr 00000608 LABEL +ld_IOdone_mailbox 00000014 RELATIVE. +ld_AbortBdr_mailbox 00000010 RELATIVE. +ld_counter 0000000C RELATIVE. +ld_device_table_base_adr 00000034 RELATIVE. +ld_mailboxp 0000001C RELATIVE. +ld_message 00000040 RELATIVE. +ld_message4 00000044 RELATIVE. +ld_nexus 0000002C RELATIVE. +ld_nexus_array_base 00000024 RELATIVE. +ld_nexus_index 00000028 RELATIVE. +ld_pad 00000048 RELATIVE. +ld_phase_flag 00000030 RELATIVE. +ld_sched_mlbx_base_adr 00000018 RELATIVE. +ld_scratch 00000038 RELATIVE. +ld_scsi_id 00000020 RELATIVE. +ld_size 0000004C RELATIVE. +ld_status 00000008 RELATIVE. +ld_unused 0000003C RELATIVE. +ld_zeroes 00000004 RELATIVE. +ld_AbortCode 00000000 RELATIVE. \ No newline at end of file diff --git a/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.ss b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.ss new file mode 100644 index 000000000..0d7381b3b --- /dev/null +++ b/iokit/Drivers/scsi/drvSymbios8xx/Sym8xxScript.ss @@ -0,0 +1,882 @@ + +; Copyright (c) 1997-1999 Apple Computer, Inc. All rights reserved. +; +; @APPLE_LICENSE_HEADER_START@ +; +; The contents of this file constitute Original Code as defined in and +; are subject to the Apple Public Source License Version 1.1 (the +; "License"). You may not use this file except in compliance with the +; License. Please obtain a copy of the License at +; http://www.apple.com/publicsource and read it before using this file. +; +; This Original Code and all software distributed under the License are +; distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER +; EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +; INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +; FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the +; License for the specific language governing rights and limitations +; under the License. +; +; @APPLE_LICENSE_HEADER_END@ +; +; File Ownership: +; +; DRI: Mike Johnson +; +; Other Contact: Russ Berkoff +; +; Technology: SCSI +; +; Writers: +; +; (MLJ) Mike Johnson +; (RRA) Rick Auricchio + + +; NCR Errata Listing 125 Item 1 : Clear the SCNTL0 start bit +; when jump to reselect during select (try_reselect) +; +; NCR Errata Listing 117 Item 4 : Bad parity if odd bytes during +; wide transfer. Only for DATA OUT in Initiator mode. +; (Confirm by Manfred Eierle 3rd June 93 not during DATA IN) + + ARCH 825A ;specifically for 825a and 875 (new instructions) + + + ;***************************************************************** + ; + ; Phase codes - These values represent which action is being handled + ; + ;***************************************************************** + + ABSOLUTE kphase_DATA_OUT = 0x00 + ABSOLUTE kphase_DATA_IN = 0x01 + ABSOLUTE kphase_COMMAND = 0x02 + ABSOLUTE kphase_STATUS = 0x03 + ABSOLUTE kphase_MSG_OUT = 0x06 + ABSOLUTE kphase_MSG_IN = 0x07 + ABSOLUTE kphase_SELECT = 0x08 + ABSOLUTE kphase_RESELECT = 0x09 + ABSOLUTE kphase_ABORT_CURRENT = 0x0A + ABSOLUTE kphase_ABORT_MAILBOX = 0x0B + ABSOLUTE kphase_CMD_COMPLETE = 0x0C + ABSOLUTE kphase_DISCONNECT = 0x0D + ABSOLUTE kphase_saveDataPointer = 0x0E ; ??? driver work to be done + ABSOLUTE kphase_restoreDataPointer = 0x0F ; ??? driver work to be done + + + ;***************************************************************** + ; interrupt codes + ;***************************************************************** + + ABSOLUTE unknown_phase = 0x00 ; A spurious phase on SCSI Bus + ABSOLUTE status_error = 0x01 ; IO completes, but with status error + ABSOLUTE unexpected_msg = 0x02 ; An 'unknown' message is in ld_message var + ABSOLUTE unexpected_ext_msg = 0x03 ; An 'unknown' extended message in ld_message + ABSOLUTE wide_32_not_supported = 0x04 ; The device wants 32 bits data phase + ABSOLUTE no_msgin_after_reselect = 0x05 ; No message-in after reselection + ABSOLUTE reqack_too_large = 0x06 ; The device answer ReqAck offset is greater than 8 + ABSOLUTE unknown_reselect = 0x07 ; The valid bit in SFBR reg not set + ABSOLUTE unallocated_nexus = 0x08 ; nexus index -> 0xFFFFFFFF + ABSOLUTE abort_mailbox = 0x09 ; Abort/BDR mailbox completed + ABSOLUTE abort_current = 0x0A ; Abort/BDR current op completed + ABSOLUTE unknown_message_out = 0x0B ; Unknown phase before message out + ABSOLUTE unknown_msg_reject = 0x0C ; Unknown message reject + ABSOLUTE negotiateSDTR = 0x0D ; Sync negotiation rx'd + ABSOLUTE negotiateWDTR = 0x0E ; Wide negotiation rx'd + ABSOLUTE sglist_complete = 0x0F ; SGList complete + + + ;***************************************************************** + ; + ; Data structure for T/L/Q Nexus: + ; + ;***************************************************************** + + ABSOLUTE TLQ_SCSI_ID = 0 ; 4 SCSI ID et al for SELECT instruction + ABSOLUTE TLQ_xferAdr = 4 ; 4 Physical address of CHMOV instructions + ABSOLUTE TLQ_MSGOp = 8 ; 8 Byte count, data adr -> TLQ_MSGO + ABSOLUTE TLQ_CDBp = 16 ; 8 Byte count, data adr -> TLQ_CDB + ABSOLUTE TLQ_CDP = 24 ; 4 Current Data Pointer + ABSOLUTE TLQ_SDP = 28 ; 4 Saved Data Pointer + ABSOLUTE TLQ_index = 32 ; 1 index into nexus array + ABSOLUTE TLQ_xferStarted= 33 ; 1 transfer started flag + ABSOLUTE TLQ_IWR = 34 ; 1 flag to Ignore Wide Residue + ABSOLUTE TLQ_pad = 35 ; 1 pad byte + + + ;***************************************************************** + ; + ; ENTRY declarations - Declare entry points for driver + ; + ;***************************************************************** + + ENTRY select_phase + ENTRY phase_handler + ENTRY issueMessageOut ; for negotiation and Reject messages + ENTRY issueAbort_BDR ; to immediately Abort or Bus-Device-Reset + ENTRY clearACK ; MsgIn done - clr ACK, jump to phase handler + + + ;***************************************************************** + ; + ; Define local data structure at start of SCRIPTS. + ; This structure is allocated by the following nops. + ; + ;***************************************************************** + ; + + RELATIVE local_data \ + ld_AbortCode = 4{??}\ ; 1 byte code to Abort or BDR + ld_zeroes = 4{??}\ ; 4 bytes of 0 to clear registers + ld_status = 4{??}\ ; Status byte from target + ld_counter = 4{??}\ ; index into mailbox array + ld_AbortBdr_mailbox = 4{??}\ ; Abort/BusDeviceReset mailbox + ld_IOdone_mailbox = 4{??}\ ; [ nexus 0 0 semaphore ] + ld_sched_mlbx_base_adr = 4{??}\ ; base addr of mailbox array + ld_mailboxp = 4{??}\ ; address of current mailbox + ld_scsi_id = 4{??}\ ; ptr to current mailbox + ld_nexus_array_base = 4{??}\ ; base address of Nexus pointers + ld_nexus_index = 4{??}\ ; index to Nexus pointer + ld_nexus = 4{??}\ ; address of Nexus + ld_phase_flag = 4{??}\ ; for debugging + ld_device_table_base_adr = 4{??}\ ; device configuration table + ld_scratch = 4{??}\ ; scratch memory + ld_unused = 4{??}\ ; unused + ld_message = 4{??}\ ; buffer for MsgIn bytes + ld_message4 = 4{??}\ ; buffer continuation + ld_pad = 4{??}\ ; padding + ld_size = 4{??} ; size of this structure + + +PROC BSC_SCRIPT: + + ; *** These NOPs must be at address 0. *** + ; *** This is reserved space for the structure "local_data". *** + ; *** The driver inits this area to zero. *** + + nop 0 ; ld_AbortCode, ld_zeroes + nop 0 ; ld_status, ld_counter + + nop 0 ; ld_AbortBdr_mailbox, ld_IOdone_mailbox + nop 0 ; ld_sched_mlbx_base_adr, ld_mailboxp + + nop 0 ; ld_scsi_id, ld_nexus_array_base + nop 0 ; ld_nexus_index, ld_nexus + + nop 0 ; ld_phase_flag, ld_device_table_base_adr + nop 0 ; ld_scratch, ld_unused + + nop 0 ; ld_message, ld_message4 + nop ld_size ; ld_pad, ld_size (Use ld_size or lose it) + + nop sglist_complete ; use sglist_complete or lose it from gen'd output file + + ;**************************************************************************** + ; + ; findNexusFromIndex - load DSA with pointer to Nexus given a Nexus index: + ; + ;**************************************************************************** + +findNexusFromIndex: + + load SCRATCHA0, 4, ld_nexus_index ; load index and leading zeroes + clear CARRY + move SCRATCHA0 SHL 0 to SCRATCHA0 ; double the index + move SCRATCHA1 SHL 0 to SCRATCHA1 + move SCRATCHA0 SHL 0 to SCRATCHA0 ; double again + move SCRATCHA1 SHL 0 to SCRATCHA1 ; A0 now has index to 4-byte address + store SCRATCHA0, 4, patchArrayOffset+4 ; *** patch the code + + load DSA0, 4, ld_nexus_array_base ; load base address of array of Nexus pointers +patchArrayOffset: + load DSA0, 4, DSAREL( 0 ) ; *** patched offset. Load pointer. + + move DSA0 to SFBR ; Ensure pointer is not 0xFFFFFFFF + int unallocated_nexus, if 0xFF ; Interrupt if NFG + + store DSA0, 4, ld_nexus ; Store the Nexus pointer + return ; end findNexusFromIndex + + + ;**************************************************************************** + ; + ; initContext - Initialize the registers for Sync and Wide using + ; values stored in the device configuration table. + ; Return with values in SCRATCHB for Select code. + ; + ;**************************************************************************** + +initContext: + + load SCRATCHB0, 4, ld_scsi_id ; load 4-bit SCSI ID and zeroes + clear CARRY + move SCRATCHB0 SHL SCRATCHB0 ; * 2 + move SCRATCHB0 SHL SCRATCHB0 ; * 2 -> UInt32 index + store SCRATCHB0, 4, patchGetDevConfigOffset+4 ; *** Patch load code + + load DSA0, 4, ld_device_table_base_adr ; load base physical addr of tables + +patchGetDevConfigOffset: + load SCRATCHB0, 4, DSAREL( 0 ) ; *** Patched table offset *** + + ; SCRATCHB0 = 0 + ; SCRATCHB1 = TP,MO (SXFER bits7-5 bits3-0) + ; SCRATCHB2 = 0 (position for SCSI ID) + ; SCRATCHB3 = SCCF,EWS (SCNTL3 bits6-4 bit 3) + + move SCRATCHB1 to SFBR ; init SXFER from B1 + move SFBR to SXFER + ; Init SCNTL3 from B3 + move SCRATCHB3 to SFBR + move SFBR to SCNTL3 + return ; return with SCRATCHB intact. + + + ;***************************************************************** + ; + ; Select_phase: + ; Clear the SIGP bit. + ; Check if any Abort/BusDeviceReset request waiting. + ; Nexus is found in the list of 256 mailboxes. + ; If current mailbox is empty, jump to reselect_phase. + ; SCRIPTS tries to select device. + ; If select fails due to reselect, jump to reselect_phase + ; Select Timeout handled by driver. + ; If select succeeds, clear the mailbox entry + ; and increment the mailbox counter. + ; Jump to the phase_handler (hopefully for MSG_OUT) + ; + ;***************************************************************** + +select_phase: + + move CTEST2 | 0x00 to CTEST2 ; Clear SIGP bit from ISTAT reg + + ; Check abort mailbox: + + load SCRATCHA0, 4, ld_AbortBdr_mailbox ; Get AbortBdr mailbox + ; The Identify byte in byte 0 is also the semaphore + ; A0 = Identify byte (0xC0 + LUN N.B. Disconnect allowed) + ; A1 = Tag, if any + ; A2 = SCSI ID + ; A3 = Abort code Abort=0x06; Abort Tag=0D; Bus Device Reset=0x0C + move SCRATCHA0 to SFBR ; test the semaphore/Identify + jump rel( AbortMailbox ), if not 0 ; jump if aborting + + + ; Get the next IO nexus in the mailboxes circular list. + ; Calculate current mailbox address as so: + ; counter byte index * 4 to get mailbox index + ; add base physical address of mailboxes giving current mailbox address + + load SCRATCHA0, 4, ld_counter ; get 1-byte mailbox counter & 0s + clear CARRY + move SCRATCHA0 SHL 0 to SCRATCHA0 ; double it + move SCRATCHA1 SHL 0 to SCRATCHA1 + move SCRATCHA0 SHL 0 to SCRATCHA0 ; double it again + move SCRATCHA1 SHL 0 to SCRATCHA1 ; now have a UInt32 index + store SCRATCHA0, 4, fetchMailbox+4 ; *** patch the load DSA instruction + store SCRATCHA0, 4, clear_mailbox+4 ; *** patch the store DSA instruction + + load DSA0, 4, ld_sched_mlbx_base_adr ; load base physical address of mailboxes + +fetchMailbox: + load DSA0, 4, DSAREL( 0 ) ; *** Patched offset. Load Nexus address + store DSA0, 4, ld_nexus ; save pointer to current Nexus + load SCRATCHA0, 4, ld_nexus ; copy to A0 + + move SCRATCHA0 to SFBR ; + jump rel( next_mailbox ), if 1 ; if low-byte == 0x01 then cancelled mailbox + + move SCRATCHA1 | SFBR to SFBR ; if non-zero, have implicit semaphore + move SCRATCHA2 | SFBR to SFBR + move SCRATCHA3 | SFBR to SFBR + jump rel( reselect_phase ), if 0 ; go to reselect_phase if empty + + ;***************************************************************** + ; + ; Something in mailbox: we have work to do + ; + ;***************************************************************** + + move kphase_SELECT to SCRATCHB0 ; set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + load SCRATCHB0, 4, ld_zeroes ; clr the invalid-nexus-index flag + load SCRATCHB0, 1, DSAREL( TLQ_index ) ; get index byte from nexus + store SCRATCHB0, 4, ld_nexus_index ; save it in local data + + load DSA0, 4, ld_nexus ; restore DSA register + load SCRATCHB2, 1, DSAREL( TLQ_SCSI_ID+2 ) ; get Target's SCSI ID + move SCRATCHB2 to SFBR + move SFBR to SCRATCHB0 ; position it + store SCRATCHB0, 1, ld_scsi_id ; save it + call rel( initContext ) ; setup Sync/Wide regs in SCRATCHB + load DSA0, 4, ld_nexus ; restore DSA register + store SCRATCHB1, 1, DSAREL( TLQ_SCSI_ID+1 ) ; SXFER + store SCRATCHB3, 1, DSAREL( TLQ_SCSI_ID+3 ) ; SCNTL3 + + ;********************** select the device ******************************** + SELECT ATN from TLQ_SCSI_ID, rel( try_reselect ) ; ************************ + ;************************************************************************* + + ; looking good - clear the mailbox: + +next_mailbox: + load SCRATCHA0, 4, ld_zeroes ; zero out scratch register A + load DSA0, 4, ld_sched_mlbx_base_adr ; load base physical address of mailboxes +clear_mailbox: + store SCRATCHA0, 4, DSAREL( 0 ) ; *** Patched offset. Zero the mailbox + + ; Update the index to the mailbox circular list: + load SCRATCHB0, 1, ld_counter ; get counter (mailbox index) + move SCRATCHB0 + 1 to SCRATCHB0 ; add 1 + store SCRATCHB0, 1, ld_counter ; put it back + + load SCRATCHB0, 1, ld_nexus ; if low-byte == 0x01 then cancelled mailbox + move SCRATCHB0 to SFBR + jump rel( select_phase ), if 1 + +; *** FALL THROUGH TO phase_handler *** + + + ;***************************************************************** + ; + ; Phase_handler + ; The phase handler script is a dispatcher function of SCSI phase + ; + ;***************************************************************** + +phase_handler: + load DSA0, 4, ld_nexus ; reload DSA + jump rel( command_phase ), when CMD ; wait for REQ + jump rel( data_out_phase ), if DATA_OUT ; already latched REQ signal + jump rel( message_out_phase ), if MSG_OUT + jump rel( data_in_phase ), if DATA_IN + jump rel( status_phase ), if STATUS + jump rel( message_in_phase ), if MSG_IN + int unknown_phase + + + ;***************************************************************** + ; + ; Message-Out phase + ; + ;***************************************************************** + +message_out_phase: + move kphase_MSG_OUT to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + move from TLQ_MSGOp, when MSG_OUT ; put out the message(s) + jump rel( phase_handler ) + + + ; issueMessageOut - Driver entry point for Sync/Wide negotiation and + ; to issue message Reject: + +issueMessageOut: + set ATN ; tell Target we have something to say + clear ACK + jump rel( message_out_phase ), when MSG_OUT ; wait for REQ. Jump if msg-out phase. + jump rel( phase_handler ), if not MSG_IN ; jump if weird phase + move 1, ld_scratch+1, when MSG_IN ; dump the msg byte + clear ACK ; accept Target's last msg-in byte + jump rel( issueMessageOut ) + + + ;***************************************************************** + ; + ; Command phase + ; + ;***************************************************************** + +command_phase: + move kphase_COMMAND to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + clear ATN ; In case we missed the sending nego + move FROM TLQ_CDBp, when CMD ; issue the CDB + jump rel( phase_handler ) + + + ;***************************************************************** + ; + ; Data_out_phase + ; + ;***************************************************************** + +data_out_phase: + move kphase_DATA_OUT to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + call rel( driverXfer ) ; call driver-built CHMOV instructions + jump rel( phase_handler ) ; if all data xfer'd, get next phase + +driverXfer: ; get here from data-in code also + load SCRATCHA0, 4, DSAREL( TLQ_xferAdr ) + store SCRATCHA0, 4, doItPatch+4 ; *** patch the JUMP address + move 0xFF to SCRATCHA1 + store SCRATCHA1, 1, DSAREL( TLQ_xferStarted ) + +doItPatch: + jump 0x0333 ; *** patched address + + + + ;***************************************************************** + ; + ; Data_in_phase + ; 875 sets ATN if bad parity detected. + ; Use of CHMOV instructions assures that we properly handle + ; a leftover wide byte in the SWIDE or SODL register, depending + ; on the data direction. This can happen in either of two conditions: + ; 1. The Target disconnects at an odd boundary. This is + ; extremely unlikely with disk devices. + ; 2. The client passes either an odd buffer address or + ; an odd transfer count. When the Target disconnects (at + ; an even boundary, we end up with the extra wide + ; byte in SWIDE or SODL. MacOS does this with VM on. + ; + ;***************************************************************** + +data_in_phase: + move kphase_DATA_IN to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + call rel( driverXfer ) ; call driver-built CHMOV instructions + + ; The driver gets interrupted if a phase mismatch occurs as when + ; the Target goes MSG-IN with a Disconnect. + ; The driver codes either a RETURN if the Scatter/Gather list is complete or + ; an INT if more Scatter/Gather elements need to be generated. + ; On the Macintosh, client programs expect extra incoming data to be dumped. + ; For example, during boot the ROM reads 512 bytes from a 2K-byte-sector CD. + +bucket_loop: + jump rel( phase_handler ), when not DATA_IN ; wait for phase, exit if changed + CHMOV 1, ld_status, when DATA_IN ; eat a byte + jump rel( bucket_loop ); ; keep dumping bytes + + + ;***************************************************************** + ; + ; Status phase + ; + ;***************************************************************** + +status_phase: + move kphase_STATUS to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + move 1, ld_status, when STATUS ; Read Status byte from bus + jump rel( phase_handler ) + + + ;***************************************************************** + ; + ; Message-In phase + ; + ;***************************************************************** + +message_in_phase: + move kphase_MSG_IN to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + move 1, ld_message, when MSG_IN ; Read byte from bus + + jump rel( cmdComplete ), if 0x00 ; Command Complete + jump rel( saveDataPointer ), if 0x02 ; Save Data Pointer + jump rel( disconnect_msg ), if 0x04 ; Disconnect + jump rel( ignoreWideResidue ), if 0x23 ; Ignore Wide Residue + jump rel( restoreDataPointer ), if 0x03 ; Restore Data Pointer + jump rel( extended_msg ), if 0x01 ; Extended message + jump rel( msg_reject ), if 0x07 ; Message Reject + ; Identify, if 0x80-FF ; Identify + LUN + ; simple_queue_tag, if 0x20 ; Simple Queue Tag + ; initiate_recovery, if 0x0F ; Initiate Recovery + ; linked_cde_complete, if 0x0A/0x0B + int unexpected_msg ; unknown + +msg_reject: + int unknown_msg_reject + +clearACK: ; ENTRY point to end negotiation + clear ACK + jump rel( phase_handler ) + + + + ;***************************************************************** + ; + ; Ignore Wide Residue + ; + ;***************************************************************** + +ignoreWideResidue: ; this is a two byte message so snag the 2nd byte here + clear ACK + move 1, ld_message+1, when MSG_IN ; save residue count + move SFBR to SCRATCHB2 ; byte is still in SFBR. Position it. + store SCRATCHB2, 1, DSAREL( TLQ_IWR ) ; Store residue count in Nexus for driver. + clear ACK + jump rel( phase_handler ) + + + ;***************************************************************** + ; + ; Extended message + ; Accept Wide and Synchronous Data Transfer messages + ; + ;***************************************************************** + +extended_msg: + clear ACK + move 1, ld_message+1, when MSG_IN ; read msg length byte from bus + clear ACK + move 1, ld_message+2, when MSG_IN ; read ext msg code from bus + clear ACK + ; extended_identify, IF 0x02 + ; modify_data_pointer, if 0x00 + jump rel( sdtr ), if 0x01 ; jump if SDTR, sync negotiation msg + jump rel( wdtr ), if 0x03 ; jump if WDTR, wide negotiation msg + int unexpected_ext_msg ; let driver deal with unknown + + + ;***************************************************************** + ; + ; Command complete + ; The Command-Complete message is sent to indicate that the + ; IO operation has completed and valid status has been sent. + ; The Target should then disconnect. + ; SCRIPTS must spin until the IOdone mailbox is empty. + ; Then it sets the IOdone mailbox with the current Nexus. + ; The status message is analyzed. + ; If status is good, INTF the driver and jump to select_phase. + ; If status is NG, save it in the NEXUS and INT the driver. + ; + ;***************************************************************** + +cmdComplete: + move kphase_CMD_COMPLETE to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + move SCNTL2 & 0X7F to SCNTL2 ; Clr SDU: SCSI Disconnect Unexpected + clear ACK + WAIT DISCONNECT + +testMbxLp: ; loop until IOdone mailbox empty + load SCRATCHA0, 4, ld_IOdone_mailbox + move SCRATCHA3 to SFBR ; A3 = semaphore + jump rel( testMbxLp ), if not 0 + + ; Fill in the IOdone mailbox with the following: + ; A0 = index to Nexus + ; A1 = Status + ; A2 = 0 + ; A3 = semaphore (FF = set) + load SCRATCHA0, 1, ld_nexus_index ; A0 = index to Nexus + load SCRATCHB0, 1, ld_status + move SCRATCHB0 to SFBR + move SFBR to SCRATCHA1 ; A1 = Status + move 0x00 to SCRATCHA2 ; A2 = 0 + move 0xFF to SCRATCHA3 ; A3 = semaphore IOdone mailbox + store SCRATCHA0, 4, ld_IOdone_mailbox + + move SCRATCHA1 to SFBR ; Test the Status of this IO + ; SFBR = status msg + ; Test status - If good, Interrupt on the fly and jump to select phase + intfly 0xFF, if 0 and mask 0xC1 ; mask off reserved bits + jump rel( select_phase ), if 0 and mask 0xC1 + int status_error ; Status err. Interrupt driver & stop + + + ;***************************************************************** + ; + ; Disconnect + ; The 8xx Accepts the disconnection and jumps to the select_phase + ; to check for another IO + ; + ;***************************************************************** + +disconnect_msg: + load SCRATCHB0, 1, ld_phase_flag + move SCRATCHB0 to SFBR + ; If we got here from reselect just bailout since ld_nexus is + ; not setup and the code using it is not needed anyway (no data xfer) + jump rel( bailout ), if kphase_RESELECT + + move kphase_DISCONNECT to SCRATCHB0 + store SCRATCHB0, 1, ld_phase_flag + +bailout: + move 0xFF to SCRATCHB3 ; invalidate nexus index for driver + store SCRATCHB3, 1, ld_nexus_index+3 + move SCNTL2 & 0x7F to SCNTL2 ; Clr SDU: SCSI Disconnect Unexpected + clear ACK + WAIT DISCONNECT ; wait for bus-free + jump rel( select_phase ) ; go see if more to do + + + ;****************************************************************** + ; + ; ??? mlj - saveDataPointer and restoreDataPointer are incorrect. + ; ??? They basically do nothing. + ; Save Data Pointer + ; + ;***************************************************************** + +saveDataPointer: + move kphase_saveDataPointer to SCRATCHB0 + store SCRATCHB0, 1, ld_phase_flag + clear ACK + jump rel( phase_handler ) + + + ;****************************************************************** + ; + ; ??? mlj - saveDataPointer and restoreDataPointer are incorrect. + ; ??? They basically do nothing. + ; Restore Data Pointer + ; The local values still blocks, still bytes and data address + ; must be loaded from the corresponding NEXUS data set. + ; This message should followed an IDE (parity error) + ; + ;***************************************************************** + +restoreDataPointer: + move kphase_restoreDataPointer to SCRATCHB0 + store SCRATCHB0, 1, ld_phase_flag + clear ACK + jump rel( phase_handler ) + + + ;***************************************************************** + ; + ; Synchronous data transfer request or response + ; + ;***************************************************************** +sdtr: + move 2, ld_message+3, when MSG_IN ; Read period & offset from bus + int negotiateSDTR + + + ;*************************************************************************** + ; + ; Wide Data Transfer request or response + ; + ;*************************************************************************** +wdtr: + move 1, ld_message+3, when MSG_IN ; get Transfer Width Exponent fm bus + int negotiateWDTR + + + ;***************************************************************** + ; + ; Reselect phase + ; The chip waits here either for a Reselection from a Target or + ; a SIGP from the driver indicating something in the mailbox. + ; If reselected, the script uses the Nexus value which is either + ; a Tag or a SCSI ID/LUN combo to lookup the Nexus. + ; Then init the SXFER and SCNTL3 registers from the device config table. + ; + ;***************************************************************** + +try_reselect: ; Select failed - probably reselecting + ; Cf NCR Errata Listing 117 Item 1: + move SCNTL0 & 0xDF to SCNTL0 ; clr Start bit + move CTEST2 | 0x00 to CTEST2 ; Clear SIGP bit from ISTAT reg + +reselect_phase: + move kphase_RESELECT to SCRATCHB0 ; Set phase indicator + store SCRATCHB0, 1, ld_phase_flag + + move 0xFF to SCRATCHB3 ; invalidate nexus index for driver + store SCRATCHB3, 1, ld_nexus_index+3 + + ; wait here for reselect from a Target + ; or SIGP from the driver + + WAIT RESELECT REL( select_phase ) ; jump if SIGP + + ; Reselected: + + move SSID to SFBR ; SSID = [ Valxxx Scsi_id ] + int unknown_reselect, if 0 and mask 0x7F; Interrupt if VAL bit not set + move SFBR & 0x0F to SCRATCHB0 ; B0 = Target ID + store SCRATCHB0, 1, ld_scsi_id ; save it + + call rel( initContext ) ; setup sync regs here + + int no_msgin_after_reselect, when not MSG_IN + + move 1, ld_message, when MSG_IN ; Read Identify byte from bus + + ; if another REQ is asserted, a SimpleQueueTag message should be next + + clear ACK ; notify Target: msg byte rx'd + jump rel( getNextMsg ), when MSG_IN ; jump if SimpleQueueTag coming + + ; untagged operation: + + move SFBR & 0x07 to SCRATCHA0 ; isolate LUN from Identify byte + + load SCRATCHB0, 1, ld_scsi_id ; B0 = Target ID + clear CARRY + move SCRATCHB0 SHL SFBR ; shift left #1 + move SFBR SHL SCRATCHB0 ; shift left #2 + move SCRATCHB0 SHL SFBR ; shift left #3 + move SCRATCHA0 | SFBR to SCRATCHA0 ; form Nexus index = 0b0TTTTLLL + + store SCRATCHA0, 1, ld_nexus_index ; store as index to Nexus + jump rel( haveNexusIndex ) + + ; should be tagged operation: + +getNextMsg: + move 1, ld_message, when MSG_IN ; read message byte from bus + jump rel( disconnect_msg ), if 0x04 ; if Disconnect, oh well. + clear ACK + jump rel( phase_handler ), if not 0x20; Branch if not Queue tag code + ; get the Queue Tag and save as the nexus index + move 1, ld_nexus_index, when MSG_IN ; Nexus index <- Tag from bus + clear ACK ; acknowledge it + +haveNexusIndex: + move 0x00 to SCRATCHB3 ; clear invalid-nexus-index flag + store SCRATCHB3, 1, ld_nexus_index+3 + call rel( findNexusFromIndex ) ; set DSA <- Nexus pointer + jump rel( phase_handler ) ; start handling phases. + + + ;***************************************************************** + ; + ; AbortMailbox - Abort (or BusDeviceReset) the mailbox entry. + ; This is a queued operation - not an immediate + ; operation as is issueAbort_BDR. + ; The Abort message clears all IO processes for the + ; selecting Initiator on the specified LUN. + ; + ; The Bus Device Reset message clears all IO processes for + ; all Initiators on all LUNs of selected Target. + ; It forces a hard reset condition to the selected SCSI device. + ; + ; A0 = Identify byte (0xC0 + LUN N.B. Disconnect allowed) + ; A1 = Tag, if any + ; A2 = SCSI ID + ; A3 = Abort code Abort=0x06; Abort Tag=0D; Bus Device Reset=0x0C + ; + ; Mailbox not cleared by SCRIPTS so that driver can find SCSI ID when done + ; N.B.: Device is Async and Narrow after BDR!!! + ; Driver must set the device config table values accordingly. + ;***************************************************************** + +AbortMailbox: + move kphase_ABORT_MAILBOX to SCRATCHB0 ; Set phase code + store SCRATCHB0, 1, ld_phase_flag + + move 0xFF to SCRATCHB3 ; invalidate nexus index for driver + store SCRATCHB3, 1, ld_nexus_index+3 + + load SCRATCHB2, 1, ld_AbortBdr_mailbox+2 ; get SCSI ID + store SCRATCHB2, 1, AbortSelect+2 ; *** Patch the Select/ATN instruction + +AbortSelect: + SELECT ATN 0, REL( try_reselect ) ; *** Patched SCSI ID + + move SCRATCHA1 to SFBR ; check for Tag + jump rel( taggedAbort ) if not 0x00 ; jump if tagged abort + + ; untagged Abort or BusDeviceReset: + + move SCRATCHA3 to SFBR ; position the abort code + move SFBR to SCRATCHA1 + store SCRATCHA0, 2, ld_scratch ; Store Identify and Abort msgs + move 0x00 to SCNTL2 ; Clr SDU SCSI Disconnect Unexpected + move 2, ld_scratch , when MSG_OUT ; emit Identify and Abort messages + WAIT DISCONNECT + int abort_mailbox + + ; AbortTag: + +taggedAbort: + move SCRATCHA1 to SFBR ; position the Tag + move SFBR to SCRATCHA2 + move 0x20 to SCRATCHA1 ; gen SimpleQueueTag code + store SCRATCHA0, 4, ld_scratch ; store Identify, SQT, Tag, AbortTag + move 0x00 to SCNTL2 ; Clr SDU SCSI Disconnect Unexpected + move 4, ld_scratch, when MSG_OUT ; emit all 4 bytes + WAIT DISCONNECT + int abort_mailbox + + + ;***************************************************************** + ; + ; issueAbort_BDR - Abort (or BusDeviceReset) the current operation. + ; This is an immediate operation - not a queued operation + ; as is AbortMailbox. + ; The Abort message clears all IO processes for the + ; selecting Initiator on the specified LUN. + ; + ; The Bus Device Reset message clears all IO processes for + ; all Initiators on all LUNs of selected Target. + ; It forces a hard reset condition to the selected SCSI device. + ; + ;***************************************************************** + +issueAbort_BDR: + move kphase_ABORT_CURRENT to SCRATCHB0 ; Set phase code + store SCRATCHB0, 1, ld_phase_flag + + move ISTAT & 0x08 to SFBR ; see if Target connected to bus + int abort_current, if 0 ; interrupt driver if not connected + + SET ATN ; get Target's attention + load DSA0, 4, ld_nexus ; load pointer to Nexus + +bucketLoop: + clear ACK + jump rel( sendAbortBDR ), when MSG_OUT ; wait for REQ. Jump if OK. + + jump rel( BucketInStatus ), if STATUS ; bit bucket in + jump rel( BucketInMsg ), if MSG_IN ; bit bucket in + jump rel( BucketInData ), if DATA_IN ; bit bucket in + + move 0xAD to SCRATCHA0 + jump rel( BucketOutData ), if DATA_OUT ; bit bucket out + jump rel( BucketOutCmd ), if CMD ; bit bucket out + int unknown_phase ; back to driver for harsher measures + + +BucketInStatus: + move 1, ld_scratch, when STATUS ; eat the Status byte + jump rel( bucketLoop ); ; keep bit-bucketing bytes + +BucketInMsg: + move 1, ld_scratch, when MSG_IN ; eat a message byte + jump rel( bucketLoop ); ; keep bit-bucketing bytes + +BucketInData: + move 1, ld_scratch, when DATA_IN ; eat a data byte + jump rel( bucketLoop ); ; keep bit-bucketing bytes + +BucketOutData: + move SCRATCHA0 xor 0x73 to SCRATCHA0 ; gen 0xDEAD ... + store SCRATCHA0, 1, ld_scratch + move 1, ld_scratch, when DATA_OUT ; pad a byte out + jump rel( bucketLoop ); ; keep bit-bucketing bytes + +BucketOutCmd: + move 0x00 to SCRATCHA0 ; load Null, TestUnitReady, whatever + store SCRATCHA0, 1, ld_scratch + move 1, ld_scratch, when CMD ; pad a byte out + jump rel( bucketLoop ); ; keep bit-bucketing bytes + + +sendAbortBDR: + move 0x00 to SCNTL2 ; Clr SDU SCSI Disconnect Unexpected + move 1, ld_AbortCode, when MSG_OUT ; Send Abort(06) or BDR(0C) message + load SCRATCHA0, 4, ld_zeroes ; load 0's + store SCRATCHA0, 4, ld_AbortCode ; clear the Abort code + WAIT DISCONNECT + int abort_current ; went BusFree - tell Driver diff --git a/iokit/Drivers/scsi/drvSymbios8xx/nasmpb b/iokit/Drivers/scsi/drvSymbios8xx/nasmpb new file mode 100755 index 0000000000000000000000000000000000000000..3398f9a8507d7ef5d1837a1273fb7c76d37b644f GIT binary patch literal 344008 zcmeFa4}4U`xi>z$Ss)?71`Qe@kf1>m5+vAQHy9*nY!4=Cw4p{DOQ^9;Bxuyo2Hlw5 z3wYr+wxL3c6|dnIz0jf!w|HM{u|kV&P;9ZHMa32?-U}5i_J;R|8r|RbnVGZmoRhPG z^-sOL4WCcg-RI2A_xV51%$c+M!JF^?RtRw_{^*|{e`!Jt-Hq2F`19c}ICpN5b6{`y$-_%!l6E5?%b9eT3hIy`q#hz z`5nUdR+SK^cc2CFD*uI5clmzq-0N>^xwY}8uj_05#_;Fv6XLQ$;Sb=|jKSZ}o!i`a z{cVdDeD&t0xwo|}{A&L(ZcPTuhlT$tyqYn<2Ysf0x!~jGn{G40^dI9Jy+Wj<3ICVy zYQ~Vg&419NKV$CP@>$ih3{m*QP;VJ7)!{Wq;2-HRNxwpm_48Wh5n287?w`J1+ajh% z&c@uiP4nhz9W=MxiZNY0Bz3OG=p7=dnO@l#l9#gxdG6espg^TZ?=e;sqgks+x`kf( z7(?;-^Y|lK7B$t=JN2(WA1gNEN(RQDS3ZXBL1VaW;g)5^HZ%@m40??-##e8<4HxZy z9()`V;>w2d`oEs@hRo;w3P5Zo>`p>TkFe@8v)5{-Q|rX8~n*!0Wu`X7-8n+K-1KAu`i3 z2420#0G)V3PN|%IdBr8=v#a%xN&9i73;g5H_dpM*w&e$wEx|;-cGHZdfp-vhl08&bxKdl+WKfZ@~@Uy!qDo zw@sPV++5Mva_-k`%9JnNaO-W2h}bC?>uVSS|7hGg{AEu2`lXgH8|;9y0ml9O%?ocr z@L?qHvCVZ1ihqXhul;znI2{x~3^2Zu$ZPD2bT2uqw0Wk~r8uKU1fED2BD@8cQt?-% znw$X|{fY13^#Q!*E4s)3fycw3){DU7b$I1|Nx8cle`KdTA&{R3{|#zjPy>S+7}UU^ z1_m`SsDVKZ3~FFd1A`hE)WDzy1~o9KfdMoSnx5JkoDhvxPY}gHwClvK!paHJZQyZVmw>7jKmWmwoeZQn1FL&^S!o`^(`plveRc7r~oy~qDhsBLU(=z+6Z zLt6`4E#YgNaiQ>+ebGIKV(*LZS+3qk@@JPtuoF^<&$I3l>1}yQ54A1x zKXiFmOrUQc^x_8@^HW&v%9S^2{s7i5Wn}l8tv2kZVSy! zZS8CqHJuNL+AvivqJC#{ig-%qEYzLdVW1p0Kd5t?dR9@w%z#U*2s-eR({PRb8?1Wh<;<>Lv^}+ zZ(B}9YwW!qduopGx#^w2bYryDU7E4HAR47{m0Z3Y@RFsmkF_;^xs~%lJ{Xw^ddT;zYcx5x7rh(|u2j@WB}KbGPzsQmEi zfbmK2!Tl@dlW61cN#5s6#~gheKB?_P{^dWP@-pI8(QH3Z{D_UvpN`ir^%>x7%MOui18-R+roI zA>aCfT??m#{{)-(i(i3n2Qdc(3n>PdicF~+fALz)Z!dU0NY@pAY8(%rBtKrK##wkS z#?iUVw2uG>JPi+}T=Pi0@o;_2>Qp&T!DiK#$$fYaUm*W>=0ruOJYn?S4|5D*9`8Ej z^`VVZ*Lv<6Xg;9%Md{|MeZ-^fBbs)=gC^$=SG&Eu9nDp+lfeAwxBTb5O`q}lB>5Z1 zGWk(qlYR=79WftCog8ChAs)_qQ1{<2^_h*i5q3~+3jPsE1% zUGYNx?GLBS45G%6a~3Y>OYC^cSgnP71?49{KVIpElM$VCwOAI9a^NX$@q=T6w5*? z4<=h;NHabYy<<)R@#fE1ya6>{*Bg4S9a7r9^$q2#7~9=e`X=O9J_B=4&ajr{?c)1w zjl){x_;-m+n+|@aAnt(|I6(8nGRzO5vBO&0>t1f9&zfJlz74#r)}IU8uYb9f{JU;R z7`htL#%!BT*DNt~ZRn1%t&8W1&gIh;57ZY}S@!34%@3Ok(=NDHq+_nyjW(#8hPC8{ zMPSy_KpEy7f64r59S&PTKC)p+KIn+s_MB@o+VfQ%Y0F2x9t;PF$K^JUz^e?85rN2b z{N0Yf3dF^5;R~fEwUS@pTAJoHm)b0zF4FFqDayjQwivJNY|ViBXPpPuI2#M{R?%Ez z`KPQU8jVN(l?VpelICqoh&?uzwnGPO614Lvd@Kk*kUmtlMD+6?;vamB=6uBSn?(2n zU$X8Cr(Ch8s7~fm*yOciZMm??4KKI0KlDax_+9vz|J+5uE2Wx>Vsr)Gqvom~K!z65 z7tK3p(=q2p#KDdJ49agV9TvIe7|3Sswsx#Q4$2itPB{)_+H!ivBJcd|lwr z+YYFmNCsMeV*E}wb_6zAUf>y{g#)#P{dL@m9D96fA54ixwQhsJlh;0J{S6>P)d|Q@ z7{Dvn$MP4BMqih0M4%%2ULkamrfR--9;fk{4+blu?=ArkrO>G;TPjw1 z_&^O|gr5*=EPuhH?wp2vg8V3gc3z|UI8t!Mo?xRL3kPbce`pBiCFw&2x9B{#)dQ=Z z+5+b?J^$03*uSp992Bb|+5QNhk3u1=k?%jN_3E%Vmt;dODr{ejy#as0b!r|zh&BHQ z&27Tp8cG&bAt82Gi_d%jSt#E^hS`;rSE+7j|JG}*GX5rAno-gyiY2Yf^6c-mF8?`r zjPYysBOG%$@e~T5)jFpD{C-Q$lckc63Tt)AjiQ+LQ|4I60S(t?P|gmeRt4L>{y}r- zjcZ%mzFt$+Hec+XGhgzbOnWVD*MFI6m-zE|oV6nDEnVFe(^-bf-;;(=-o$u$N;g(U zOgBG%gXtzvO!>)E2fXe#HtXUxLoM2h8X|?Kv8hu;m3=LOS|gO=&%iuX-i1D)lwrk@Ht3&t z`U-fubYHNo9D80rw$8Quc?+lw=B@zMpo9Uw)@+rVhWnzoA3G+*aL z7)|C9%YV)SMr+o6uVJ)hnsy1Jt=F`vjJ8$N#xa^LM~dZpZZ`Qd^P$S{I2=L@`^w7h z@)hHKR$-WYz?TR6Wg(ZR!GG!cMYx_}+kbHz+T9oPxotu6mkUH1`FWV;Hvc*I>bBF@ z&J@$hS4!I9GpJj!-xFx78`eTNcCL)qM#hc*ocykAG2O0f;2YCn^NobLmd1qtMeYEf zWH;$oFCUYBh5XufHuCfv#Lw9yFh*=nCjVM|hpa72jh^;pDBVqb6?zEJyg4T)nO~Q4 zfB13u^-%0J`X@dHea;|#!vDH0-?MMJVlfM`NV=N^IRj`rQpekVhhM@MW4Ulv^q}_3 zW|v>CM;_Q1#L-v8Q?v5V^O`8==w4hy zO>^-Yl5Z{Q4cMYs+k*BA?7rH!a-SzV0<#YCjd00VuH~Bn`3e`;ioC)f#B56Qb{&h| zy2aOu4B+p#e7{f8df39-%*n|N5)OE%v;1c*rTnUEmj>E5r+lV)UxvD}Tg%v^Y?eFO z@VWa27oRPR&srCsmBd3hTQtlm$dvwxwz8h=M;{|=^#JF21^Q~a$o^BcT+7^jC2uM6 zK7a!&D56o+u9OoSu$NsI?84qSY!^;BuT}Y2ZVGZg?5=E}U)daxRAl*nx6mbT4P>zA zfHKIoF@!lo=5=G=!%A;w9%VM~mHx7o{N>D7)Eu$PyI48Y26| z<_TXK@!5?STRSuEJh9QfhCE}>6ZII&p`UQJsK@=60L?G3e_6llB+s4j_4zLa+Ul{V zy+QImf!cB3gDiPn;D^mY>s79JDb3756An=u*Bn&F^b=A#o3I_@pTNBI!QOa!C~1uL zg~Bzo?@qPiC#Z`FqduH2a$fP!Md?~?&yUGwnZ{;03liWn%h)VuT7UcGkp27Fry|<7 zi?!AMpM8|-X~bT$B}P0>zOG}eoYho$7_Xf9Q@QDL%0wL>d%~!5D32n}4(l=Gej9S_ zYL8soV|aNfPsZI7qTKRl-@xc)%HOiDGtt3T4PwOQEeAO~udB0aQ zP7nC6RqcKIfLR;Fo{!}pPy5z|T@L8l! ztuITzRxw+-=j|+vYq4=HyS`vHC4Z}8ZK@dF0&Tx8*uc{MChwib(f={cQ>Bsd}zI_hA2$yy0@ zS%cyKC2hMgFKU@toF?DvlH&y9JNbRB?>O;2=Kc8b{j$u1D!yN~RD2&^)t|Rv5`*dZ zBgA*|Wt!gs;(N>j&BG@Z-&dLV9(@_Jj~(Cgy5%1|)#XbG#CPV=!+I8g!U(BO;k$tyen3#S*%uDz9;1v@XZ~F(P|Z=qIZBCx!36EWm&5s zHmfav=14|gsbcet_cd=d;B}hIr$da#dd6c7Vzh?wSOdO@Rz2`sd&UarIL=&q#zNq~ zqkS!G__Sm2j5)TSTfSf2?c#GFT{`A1V64&@q)U5#-3>fFzn;-HK>W%1M#tX{6@Q0D z8NPmb)WugN(;v?dCp5=G&O@5lZtxdpZaw{NjqeeA;uvhwZ*DcZ#r^>0ZPYr5FU*~z z-X6fGR-1nN((Qie^Z194xlxhh$K0sv;>X;mStK9xLz+|jD5nR*Ww@{4ciW?w$)IY@ znCuy{{#E^wuF-mDwdp>+XO;1cZEuBBM>uYOtaRxl4|N8``b5;k{c1lg-ySyJi!r+! zA7yptK1;=J#w^IA{Wjyt_PK4a&iQ+Mh(S?RH!k>{4;)J{P&*1j$oLr;h@%YW)3 zM&G9CwV=;)=F5HiSf5KAduP}$v-i%}IYD`L27b|iJu|sxfXuXqTe#b_VI`M6}fHib}T(lzv@J;osMX8E4Dg!ZSXubiJ0-EnV$ zpFR8#yuEIZpEz&7kmsFFgg4>~=V;}97w9VpeUZ-vagVSR@&|BVhxT6R9sr%^^pS4x zM={X9%@S%uXD0if-LQ3W96SL$aU49;=ELGaPQoD>>AsPab5cJ!aW7iR7{Bca;LCGH zan6TOES2P^%X3h)hs!^QQ`P54V|tT0S9e|qosnC9Blc_?eUHRx-@w}69k2a8z>|B% zOL6~7t~t}2rshPWZFk@tBhF&#v8!o+S)Kv95B|SY#zyT0cSNIhAIbxEpTe*hj`#`c zzUAgDQMx*-2AS+V{8$@3K4Peg?xU5=Pp>rmLpS^!VT!2I{p5W}dCp1ov1FgG;2!0T z&{rMK<>teW=-eS6)4q=TfT2K=-YfjfgSrhLqv1GXG|;_7HpaXQ>V&wb9rsBuB%7YQ zjAUQvHlS}EIhuWi@B-+~SbGzx)Qnumpd==?U-baM|RYa$A#X+2#qcbRT>M%x| zpU%MNV9zqCa3v`Le6y+(cfQ5b8>t5wbl~&rCtNq1jpKE z3E?N&cW7*k(LiHR`_S0gkEr|FBOat2O4sc;qVZ)L=%?+D*>r}Ga(|iR2R5+8%g<1s zLY40+=b_HoiM>IQjQzuH^oqZ|@$}i10@+3nD{e3Ob4Gj4(-`Oc7Z5i^s{Qx3NWJDi z?a)8zH5A?*n|sJ+dLFV=zy9_h9I`dyhUCM&o=L;qW7};mxWm?2f4?MO>Bd+%kGEI* zN;0*F*n;BR){Tz+C5oT^E=d-k5i&x^swrg9X;CZWz`pbKf-y-|HG#Y0<0*KFY z^wZc`>nUecr&4?>T(U!T%FphcQyRGNf;G`7`AH%9!4lC`DU$xl{>#b|S8F*}N}N=A zkK59|(Zclx-qRKX7uRZ!r@O0XAN&Awl67_2usd~}*HE0d;cPk1V?5H9FM|3x1FA(x zC-BK=hZOhDHS$lK_WkCOGPTJUGNY82c2xb|JsbuA$8`D@_M9T@_zC;k};ezc|YZd zF#7O$DAI^F@C7kqDF>)C8&9@a;#a%y*Mq+uYMk_X8b_`pRG$gxvy=5H zL)+D^wmv;RwY{IUMPC1vuAO7!`dQmsOxqm?(N_9B@}12CZ!2q3#rKY_Xsh=#V{Mbz zxbobeZi{C*etFQvuRNCseSqJcZ!#Y0(N>>Rij6zOjJtC?Yl~X>mz}P0hqAV-SX;~~ zzpQt)9md)=nzp~%kG4T-i~6aG><(M_sU845M~yLgpR$pEYd35Jda{fDYenCF#6{n& z>3f~@B)_I_bI~Vh`g#|=K-0DSHa|%=-S$b%XVMhizRopHv8Kzp06uKDFVS>4w=nv2 zP0w_VW7CC;UaQ(~)Ao;zKVQ>#yV@_*^erxWyQX)!=wVG?245O(iQlX>cKl_gCAKw# zj#~RS@;t8c$6vx;+iE}`0Xp;l?ePC?Wtx7C8E5-$(DOh)8|`N?`WDdBIX|Y}@o&W( zOLdEWhVD0PkNG!xkNJhE81r09{F?dycKH7jTS3QK=-1CPtV+<=f$j(WaYn~_=?Uz& z`;tJH^@x&jdlu*|pd+{a`fk=f0J@wjY@2ePk@vIEX9)T*`L{uSQOIOj!fLP%Rf9c$ z1NtGxwym?oFZ$x?M34U&OFa3G;b|MyKYaWhZ2Vn%{2_Y$a*V$Q@6$2Hqil>q&}IGL zB^PW_Wccq%>p%XU2i5pb8~`twcz+7UpJT>Uqm+N|^=O;FUo(4Md?2{8& z`!$w$T<;|-pZC!7Xv{lrvpx+@J@jN4K6|p19-ez&^9i4Re5JRZlM0|`M}9HbNz-vn z=T|phNwv7lZ_+|sYly>3JFe;b)cx?nxW?^L%jB)s@{UmQ{sR6l?J^eg z4)gzC!2h?B|BnZq`TsBA|69rbb3kYQ{|os4)>_cd1bv~={o{v0F9-cB(63|kJ)pxk zeYv2s`S0|2$dsT*LHSr}X^0Cm}m@@N@XTj3b&Gng9Rn zuq7TV|9{NixE?ktJ^c1@rH9A*v>tF@`-ug{A09hki7lmQGX`{){~y~0dak#A{JRnF zj{1h>O+T*f^6r;ZjBDL-u9e_gy!=i%+)>{#Jj&B?=69K=%i`cso{l4566I;s5i+L@ z#e6rz=xfVn(2*bQI)~4#JC7-yKQmkDd<*j5fA06jCms4~yssrehrZG{*CydwPMm9q zUtb{3HF};Up3TTE@oYwRiDxsiOFWy=-1=$RY!v1yR{#DV)W6(jPs#gU&=cmNr>d0A zo?fMF_Me#l9xe3N;h@dD_42XA+3A>(<{4K1{1o-iCaQl<1D(}B zKSlksiRvHB=UZ6)^HbD6o2dTD1D(}BKZRdxqWWhf=&b(v57a-K;?zHbzVZqB%Bh$O zj~ZXu1p7O3@)71MdLI3?5B4KFmRaH_tMNV`V{Bq$z~(;*gMJR^_cA)x%0FoUJq7fL zp?4nxy$bYV(AoZ1_kPfWpnn$hD_Q#<&~rdP5A+g7?*=^?^huyI|L=zXKXL^0VW6k5 z_F>TXYTfR2?R8E^{$l>$4gY^+qpPjG*GX-e{N0fMk+8ST27TFEFCTfXDstJz-Ylh^WO%{f9tDsTY2BZtfR?4n-uat?~VJaF4A2{k@NU&P<7uW9;(+ zh?@;;{qsYtfBr3b&}Q!YU`e0kUbMT;V7B>pmjC|^`Jb<=cgbg)U@O$CyY{Kr`_Bz3 z_SPc*b=h;3T|Xt8nErQm|8FhwU)Mi;{xkdj&&X$2lwP07RC;|B&lS^pu8Z>D|AyM3 zDo$L>cvpG*!FeWcKd{#l|7!c$KWu%d^O_wGQin=c>!ul9{p&_&oIO)*=D25Sl|TG+ zmhy*x#s1Hlz47zqKStf{((9Eb&-@$P|GFRhUp#+3vytg_v(oFPLrSmr=UL*u!Tm3N zhRv+y{;u=TaAU`{Z2#-N4yPS|n{4d(+jM2eZoU6?ANIfQjgv12_f5R>#z(FJ?L2Si zxz8IrKEU?B?se^d{kELhu~OOb5xxI)FZRE_f5e;LK|6Zu+e{t_&Z%IZWyKXx)SxzckTOs|FAW%V(Y|NXC6 zXVsA1!Aqtq&RA{q{dY|M`fjr&I(r6ll6;29&Ph^-hym<1eeW5gs}<$0*y%I=usfjq z;lUSG?5xPN#CMtp_3F^;H6|ymT4?n8ox@JO?yg~aty6kk7g2is&NfSY+wI$_Y}|2d z+?AHN`>-nx(oEZxZ?m=yXuHkTb_8q7&i{O;6m9LZhPKa~!rI=;#!W}t2G_WsVQty{ zzi%Hv+frBCbk=s38TZ>;&^F!Gb|h=d)<56I`lsW7tL>?#?LCLsxLE&m*y|r#w$oVK z$64EAwC&L2ddv@l`{jR!{qjt*jYq!EFgE)xoBun)miU*wgEsrS?w7moqmGVavr)!o zopVf_{mU*(ERS=qc5p50?aLo|E$g<~m^d~&-Pr8gYmLp8WB&i=0p|a~-1v9Mjbr23 z>1(J8K(6+fG;8ajfks);0}o>s@Wfv$l<y zWRE2l7Y^F&Q=O~NjAJu7hbx;c+hAeGh`ZX0NN+8g0o9W}G|sx%}h++YdRc z_CxNuMfu5{-N1}<{>c-QtuYp}{f|4b|G|CpS$3x6S$(GDyZZYN?!^8_$lm{O+rUds zRwFNNGjg`|x-d(PoX?gkIlsGH$=TKe%$x*t%WAyEZ2jAY^{+!tj@h6w_5OETwI%M@ z=#BZI&ST5DMET7>Ei`%v9d_~5!St|F@w7tke}%Szr_4A!{o2FFWG>X0K8D%q!erlv z@T~qmgcT!{?n9N9xP6_=cXLgh8DaBZ2=m|V_WWo2`9#*1+5ZmM|Mp7PxM#DrR~mlr zfc~RZNTW0^;VgE(tu5k-l+u4SP+hPAjw*BpKC$qNWSlgABXg%y2_gvGq zoyGs{i2qji{^u0dmhFGuj`(l2%-IhD0tVEA2h0BuWM{GP|! z&SGu1pl!OV?PporQLJr;CBAt;w{_(!zgNr@n{n=B_Fn}1J95Wi7N@-`PVd(H|BGP% zZ`$@h5zJ`}^BTi@>ux9J?=ubN?{hTf4JuAs*ID8leF?~UzQJsJonh9xFxfea-|KT0 z9dB#QB492{0P}MU^A>~o&4Vt?W+rE=lCwjffB)uIU}nU@9ITB#ncB!*yRaC!gXRBk zApi3i`TZJZ$92k%|MIBvorTE%Eq4BQ$H<@^|2}qPd1uk9CLUXATz1^Y?07)gaoL;7 zjxB|jxOK^3JpNtRyzY2pxx96>vE!|sPCGuAW$gG|uCn7l>*rr?t+zz;)fZ(K z`u9T9wuRNdH>3Vt;I4mXvbL=Ly&3iI0=xbloVWfyd+F|cS7vN;TfWJ63%Xo!!s?Ib zbp7#9(^Q-+XtBinU4!}VQ_Xj?j2&;8XY4qCugi{mS-#t^@?BV;|D3jK@zk9?Oj#n^KG&zZQ1ck?l3_(R6}nRa>HdV|+XQ zLq02biP7tgn~Yv>$aCqnhsDQk6(5TpSMgEbXNl{pnIhm0Ms zuXEb*`CMbi=L?k`zj;*I@%my*%;`uKWA1L39oc&BdA*)%(f2>*;{L~V@$P>N?kE3!_LJFjXVhQyY+q*kJs(9L~Ze4JbtS2SZVU=yq(65wPj8_zQERVFX;7L^YhA% zwHcPUwrS9gpQ;@%Gj^==89QFv>artS&%L16b2sbrAJ4>*s=B&W5+KYblH)uA79Yx$N6o_j$hhp zi8&eGcJw^+ca_2X^7{-E```SY(jNk*M*c&ZTI{!I?7*uv_)ZhP9js2gi_ImR>Xh(0oeC&3-&e*YroqxLu``->b zvNKSB&}X2&zDUL6Ra-1^Wx6-NgLd@R%g1iVuNXUi=|K~ZS7QI$VaIM3k6Tnc-mpp8 zvFeB=uCToM9kipjUOsj^&NX(de#O}F3haM7?0AIP@tCsX_4@liuE759W#Ksd(sx#` zjC1YN`aLQ4+)`(B@WmI64lb{9>R>OMTlVU?<*TnNU%ouo5|yDia-Ix2m}hiw)$>LN zmtpfpsJqk|W7l@8|W^WPPjmYCgq66>Je=->*r z|8uEp|L4Ufrh^uxgRkiQpG&d-W5wD3IVob{2BU+^+4-;8r7l0%%5<

EOC&RX&)V zZiz40&p(`u`QU3t2bXO(InfF_b?{P}(ZNd@N(Z&iC>>a7miT=ANgNAbH#(?%-00v7 z#V#GxG9A<_9ejDG(!m#!EpbWRNvwkgql1daj1E3umGVlUN7yjSj4bjUQZ$`q!a@HB1NVln$h@$1E|c>Lk`dlhMH! zHW(dTg!Do$^uq7@mKZ*U|W}}1h`;87} zqW*R0U?0=L0i}a4ZcsY7@Sr7Tl%2#nXf`^y_+F!f3sL`a9lTO#bnr^C(!o^^Djm!? zV2RSwlUN707#&=+%IIJQ>R*QrLQDrsln$<3t8`Gh-x3!DPhuV1YIHD*o&P9B{p--d zE~bM$N(WW?{Kp0REHSP#E;Tv`qW*R0U@OzX z4yA+3Rwx|=cU$6fc_* z)99eI&FJ8K)V~fL)G{5^D;-oUS2{Sq#}dR*QrHZmP-Ryw$J zsnWr;9hNB0I*E0#$mn4DLZgFX)V~fL^cfxO4=5eXjwl@zZ?nW_?dM-l#(Cj(qk|H5 z{^hf%f4L6!S27(`D;-$+{L5#zTH?Hnlh_aLFgggb`~T;m{&nbJ4b#CorGqc%`~T-{ zvBcE$lUN5Kql3>inpl{M`q!a@!%PRgN(Y}`sA6I2W=j;Moy0n5Gdei`2BU)_)W2K@ zua+7eyjrewaLFx72SweMn38-F>)=kKgK6`O4yK^~b?Bgj>0qVOLHPougDD#=ac6ygNqxL4$fU~iOC|44!pjTFKjSBdx&AyIWhlSXfXd= ztS~P+sxgZ#QP`0HW`tp~`ERmo{`+$+!>resdj6Y?`L8hE{O2X-KQT<7k#lmZOHSrr zf7bqW(Fi5y_`{}saj3*xQ+yyRTMFh?6X z3p-tMvV8hyolj>?RB{&9TVm4I1ms+5FsHKp&%!Jx=4)9-&ew94oMn3dvjFwqqUUJ^eFozg9^Wgsu zIoB}EbsBS|k~6Q$5@)YZK+bO)%*ky3FAx6jz}&?!_bAL#z5jO(>YuaY?f-en`5lHi z&d7NV{NI7uXE0w6Xv};i=Q$0Qn7AzgIXewz;Rc2Y|L2&m7c$IZg?YiF3iIqJg#UBQKX({Rd;XmMjK)j@W_<#f z_b|-M4CX}mKgWEXVZOdn$vOQRg*kDxCC)mM0Om@AIq5})3IBIsZZ&egzC&S_ysj~G zff-5wa}~p6^?xq>-+_6UVfJcFUH|8H12bOz?-eWGGnn~r8#!~~{~YtbvJB>b3IBIsZe*C773SyGD$FzY12dQa<_{R=1cP}d{NI7O zk6|9rn3FYTlO=L?B!GFJ!93?-h6(@YnE#z-F#kJ4VV?h(!klmnm{kd2u3?xZ26F=Z z-+@`mFv~UOB^q;yC9?M=fO)^cJo`z83IBIsHZjZ=g*ok6jhPP2h6FJGm0@0KFmvGl z4$L(SbDhS#PGRP(vBdb^1TY^km=j-NnDBoG<}QZ0M`0Ep(wKR`3=e>rg?69Ae8=KH z+ZF%+-Dk!>7|`SE_|Jy?;|Gd=8Xt4gL^l6tuXOVNn=&?jo*rM%|Je-2nQr*LwDMC4R}+B$w6ylfSE}B-^DOl{XcH63v)Na+^aEl{XcFa zFcYc&*BZ<-S^Yn*-i68h@sRe%sk;6j2VWbTNd5nB43pLWSx20hhw_Y^hl-S(Q+55H zwF8)m)c-$Zn5_QK3b`=ZZvY?CzX3c|*Z)~nmKc*r{lCs&PGI$a7W|**l0)n_zYi@_ zau(_Of9yVBCQ|=D$S_&_k9EcWp^Z$pn>D76|FMYw(F4W5J3b#Wm^rNe9}E9?$jQFv z;*kEHiz&MPAJYrWMC$*4XPB)19|QmAn15l<@%}|W$2&#W|6{_I$V{aEUvDt8S^Yl- z{_ntK&jtNOKNobauK&lR0W*>Me*?p0_5W!2zXP+z*z+%S%AV)y`hWClOPp~efqeTT zgE^kn|D)mm4ov2^f6;zBS=axgbAg#i{r_Wz$?E@1_`d^lJ(IIr$vIiq|C!yuOr-wb zXfVgI`acu?@4#gF_Aff$7V7#xv(yr&CsO}E%rM#fe+K;DfqBfx`G!z(7V7!`j2>VP zH2=Hf^AU#0>i;v~{~Ys;9EPcDfF|FCW;4TV)tI{eKfTEk8Hv>Yn+)byR{x(4|94=r8ublbqfXNG z|EOcYtV$q0|AS$&`hOJs-+{@#H{lKay$O?a{Xc4nB~DAE{{N}L9K-7WQSg5UW-rri zpVDo1~m^?wHZpJN_oetTH^ZN9GmGuBw*)I{q4M-AraS4@4I0snVk zvikP0u5a@Wsroh}518Qu?D?Mzv(aFl2LE?pvikP0u5a@eD$LV112ZiF%>QFBGmkP% z_`d^_)whRreVgagm}Qn2xjF&NEetbaFi(a5J1|+kJ*@NXId?0}Q+EL~Hv!DY80H9u z3IBIsvikN-UEiKFT4UB&BE35S%%2&|Gwx-W@PCf^CaZ7X)b;Jz4=T)&2Y^|c0Orpb z=0t-z68`VNWcBTvy1qSos>W=##AkXEz}#jqPv68a;r|XyRwulv>x7AqE6nshU{)u9 zxt(FoFqrA^e+MS3Z{O7Q?Znv{bD1Sh*`EOBFAe6XoeUHH@4#gB?VGy3&3#^DW&pD( z0nA@9%sB@0Gw^?o`4+2h-_rGM?mUJ0nJ!C=IF%S%moJX6!^abv)0TjZ`G@L<*Y>t^OP;XOiuvw*9P;ncNix8-+{S|VRmZF zAsVyX5@~A^!2AuvTxu{!!2cbXn;GU-g?Z)*g*jq3F!K_?>|vNA87BPSfw`Yy9@LoQ zG-j6aMeOT+i&;t?ZfoqQ=YwW^)3Vzh#)$7|h}De+MS3 z6aK2}gzOs>=J0ivNbXAj^LGaGGp{pD_`d^_t;>$+b=mm0HD(bomnDF?n_=E!FjL_F z9PJl~w=)&y*e5k+0GPE2VE%z&UScqZ!v7tZL55kTF|SmZLpv=IIG6zDUW1wX zEW?EVJ22}QW`n{U^NPmI0%l7Bm@hKS>kQ@)_`d^l8N=+Wn3NvXhFpCnv{1d|* zZ7`GI{|-!6Q~yoZ)R_}CX1yhRTNP${T_`1~H7I;DXk4t<*qofq;y(%T@6+>Ntj~q2 zPwIBVM>4B_{e9@8*MG4-Gj$)PujDIDe}519w^HcuoLz`r@xxw^J zPBHKOVYJzcHp9_|)lJ(0#q2SAt#uI^);35A!(L?*As^K88D#uui`L z?E^$B$xpZ13MyK|sY2$0NMm|aTjQ5oXWWTv^Uv#OyYb7d%Wo8-tLoc0Z@{oV1} zFP<+%MYLGP{wImKxLuTovqf-fj_|eRRJ3;DePpI64Z(_KOGH{lv;?*m$raIHQJqjS zf0F!goskX-?-QbIsnF}Q^ropf(P-Np)+6Kxx@|StqZ9UkzP5%Z2od&) zsgG)V_zBuUN3r+X467OR2u3F$M7-DNyYr z6&L50+z4CbUz9cD9QeHC9=@jZql(Q%vL{eNj_?3-gg=@b zGZzlTqf3r@<6p-1cAnPbNS~0oM&=UOiDEEYe0eo|R<(i5j0Qihar$vtEyJm`ZR*Tv zXoE4dAJ?3qAMbbhv5u=5%Or*!S0^49j=YVyXdwTN6wGKK{_VI8r(C;-aw5qdPMOgV z3Sj>B&6tfjElBl4Tivi0$X|pwuGKW3rnTa`Gc6ab2DD-q?W>^WxM&xFmKMZ32|gOa z_ydL?80mOVbE}{7VHmOoAHeGqq5(WjpgM*4j5J=mmwc4!Gd-@&r@*-7x=ulCXZM&m z&)!XO9+OknDI;kA|ALC>2a8w@M71O0bw;di0dJ=we-vvz)4^vR!#K<^dQ~nNf%?Zs z`vmTBhGU#6Jr3nYT4O`+*QzsSs3l<|U&;KO^tO6gBhmXznybim;Z)cMe1{KF{X4=3 zzv}V8NM#tw9}OezeqhvlU`zl;rIv&2R`%iLNTc;XY|wYq*x-cnzQey2{`4@_y;O@7 zhJ7>1PKx*G4R)N;=Sbu9WhDjCD9<0^lA18Tp%sYV80cVs5UND;~4<-%Ea`O@>~b*adtG2y2e+czAqw2r|09HN!w(d$x? zj%%~<`53Nc6^6^`+A_RPr!kc;K-N^qioAt5K+Wk(Q#!c%2}(DMx=F+LVGGjI@i=w)~n$+H&MPlMh+R zwq+jI@^G!+SZG(u#xAXhMmH+FXw=4|u~?fR`9ewl`Mq{dK&+)=?A=Y_h+NN8HKJQd?F zVB=?C{7kR0R6eQh@W{36(7qBnBD<~D?Uu=0Wa63jnvgH66&-#PMy{xczPEy~=~_wt zoHDzHq0FrEg^J}^$Mg0F^q4os}{92k{ zHqVXCGddbuq;wRxTg`JvXB!=fBaxZHQghPhviO{&Wn3v`2L|Oc@Y)4C_;F3;K&-{} z`zDQ%ukzCi&nM1b(wmCE-VKbuszmrxJVOquBN|DEy|NxqwShR1oav}b>fn27%qyyG zAK>{BJ|gEbDYLI=6L_VXAwX>qFZ)hl{p(ma5LQY4RJqO}zqjW@&}0qdqBW6^xoC2o z>Y~Z@j7{_6>7G}keb_6KyilCM&Qe#4L65E3Bhe##Pbt(kwl%cotky`WNRH(+yqELl z5|JCu63MuhNqb7OmWW_`mz*Og&Piu^xF#sCcV-FE-t|W7YWP>Ru=hJf4%%e3|L~30 z_9d@jO*8c}<`Ygkg0C2lX5VWYpZk;zFKfZ3UJc&EcQe*dd|19I6Kq*y`)DVyPoc(} z8nk_)Ff4O)rh^XuvwY{y(X?V$+fp~3K1Uia%f))UM(Qk9e@#_=)p$d6zarOoHnyV6 zwv%7fZ42YHRdm(%Vcj+_PFqD+Z6msEZk)D?uG-G=@;B!mumR-HuE)gKH+Qb z{FASxGwN$yk?jkunCT0znCn}ju&3;>u_@Mq*=U#T)9aYZWV&YWpTjofb6P*@8pQ(n zTM+XM<%aMBzUJ@~zB0>Kbc-6lGGLFd>x(QmF6|ktpm@*r+3VkZy3Jn7qx$_e8i#Pg z^N~yB`tYo7$+#tsj9ZM1zRg<3B*@qo<8fd+uJh($(lfFCAI*;O#$WJZWJG;3Rr4kF zVXrae+~}J;2YIkt>mUFftc%Gounx9*>mYw;tp6vXgZ#$h=M^=dK`wRg1F;lNDX5k4 zi1;VpRy-Eec+lj0PkDm1tMX`9=|QtRXyqQXG7nm*2Tjg1E?(rE;G)Ug@1n^Z9~Azr zOpZDajCv1RlcxDPjjrT8Kp2dcYxiZe91ohT%UpdkJ=$e>(3W`6!bDT~?+eYrR8hn5 zLmus0J!mZ+v}QN0B!5yNueIP)&Kl$dY6h%LQY`VpB1=4dCBB#WF?>ID3H%SW$OzGP z7HSdk^jfNM$R{I>d7btisq)dhPMTBbz8UsVWG@_S@ri$NV7gfCufcrZJ5CfZfj6CSMRu*{%GD({6cv{oCgq^Pu&5(2j803FHTl z8u6TZ8iUpjmM`~SrQ>sCJQEF{VXsuVhiLiEdEeOU9C_c#$f(DGzEFSATC$jrg&I3a z&DE(*lt1EPcRni`mYBj*4nmK_!C>{{zQg+#MpyfOBS=#5Yd}r%AeWKK_J=fBBv+hdTrN@(bj_N!`7aNIQlAoJn_O_(& z$a|LIl!+O(e%Za9$?%Dm*myceu$*Ev`Yp5B z4#M}anY3j%1?~IGL^5~8`0jt5Y^LQ>c*Lu+*@Oi~uCwGBsuPfFL>#$j?c1RFkiN}u zh8UbP-*)>3_{-|Ar<`XAhx$47cjnz3gYqcd7euUF$?{gM*;h(AV)9nXG1^xO{)zTe z-qv-siYaHEGf~dtYEGF|Z_g>nQO*zCFK6B5->S!y^$qhE=@%aUV#j!99DlK6-0M1zaaB)&XXP*1GmIT4 znq0J`^ETi{M|E#6YzJ1M?5S3aYh~QRgTKB_rH0LVsRYfe;n-2 z;@GB-p9*>$e;+@J;rQHoa{9YH-ndr@hx)7G39obPj_j4(p`D~cs`we9G~{O&V5?q5zopevKr~V@`>?t+_rN0bd#F5#-sMO zbNHw2(_=Rn{>QEI;Gg@n=6~F5@->rlhim%S>p+j=(_?2b99hrsxx(es_IPrxpnkA5 z`J<+1>Giy+nUqgw-A(*!|KdK~VsiNKkjdePQ?w6B|8n`XW8DS0xKB45pB^gjvuI51 z(?e|?6nXfqor}{F^xL%K_^sDq>ncvFi6I3ofbfq3g`j^XZo5-)?0Vu*>jpZ;?>|9&$9-%*B6o(l1KUb>D!GR zGavTw7jCbE%HK2R!QU;#OTSusR~*?k$J+Oojp9+)R&i@_yS9oayOz&gi;sHM$alsw zCnnz+k@$W;Y6Tmw%{`vW@3p>0QU3SP;~As1Jzai(y7V&-zh^b~a8`5w*EM<^>4Pr6 zZy3Pu8;swJSNr?D*uuwk)*1GBG*4g4B^_D54Czla2I3{lju+^nW`J=^)wTZPLbo}3 zTv<=3adQvaP$l&JXvDYAT`!@cb{;a=tu0P5?TQwv96mJ2UaFJTsxk zS)|4h`)M4=4V-r}R4k4fM||O)b}j9_*lQT@&iH#L)8xzdGH9OH8;!iBYE9owb zzZRy0Z(^K!gZW;r!pxBO{y!Y%Yk2-qVrFPeonys^vYX-Uoo2<`X=|AuXDMGgP3~pd z`|Eh_ri%5GHIU^yP1f0kVY!#%?)U3%=obBGjh>wfDQURd|rK0v=ox?eNvw?y|_K0v<$-7m=cb?AP4{qNFClHE_Rek*mq(F5R3 zQFudoSijY(-$xc()9|=XBjKhxOC7%*c}C z_X`|k{bbEz`9}H%=vS-o0^O|NX2oav&H?((*Zo>qzb(2SufyH?T&Vk%v3^^1ziS4- zYuEjfSwEQrEnoVC0s4hizogx)-wuWMnM3jWsrzDh26Q#+w@dfiG(bNcw@G!Z-yYp> z(E$B)+$QC*e*1L4S@HYXaqB|239k=2b>sP4zjU1q#j$5&f^($8W(q0^(pN?DH2Z+r_ z@0RL*YX|73;}*|Hn11EDU*iD%bll4GIaK3R(tW1z1ER+Ar5-}f;5m2e^J=^4o(^g( z3pG|{=M$o;^G~80_b;kZKUA8tqryit?0wK#Q2VSTok818?IP`p9?2F}l_Ke%?7z0M zi@(>8cIm7aLc>K>XgHojoPEtBc7JyEHwEYa^K5!#F2)CLXn5HpQ_DnATiF+J4_cld z&W+RNb9S3BctslrPa#q9J;*`y%-ESbyvJF?Jhac0bJP-jzD~=AnjLFa$*a51ZrG~Q z*0JNPAMR)Cvwmg#zFAO2qrfl2c_z9K344vKq;obUI7>`+(=|Wp^WiCI6QUZaB>(Jg z-6rI0lk&Q%p@s+guW!|}AK{eYa@}F>z5Y$d%&k3(cw1;|PpXC&x*q zdt0i8_hrU-gRf9O_*pP~Bkm!81$w*mZ=2i7zo^cDJ&m*IzM=YFvzFHzqnzns2gO$S z$EY2BL$4b^kDZd|6p;scO2NB*CY}6WuX}+>>*ksKjN0Gqdz#>F$oukIipbz&V9it% zlM(wLj5j$C;u`$pv0d_9uy4p?@X;mEZHcX0Jh!zDe|3r8r>8ZQ!W|~}z|Gn*sTj}c z;CVN^E*157_2Y9uzAhDog<=1%!SfFJDxcBuZ^h>|xSxspum0V5zYR9-Mf(E0?~|{x zUD70c-h*pQ_SJpluguwYZjp_bc(f9q()9 zt7^9ppNnw~`>07_ysyFgCX+{x)Tlf<y;t1KrdzVkZzw3dM=|c z)AW&`r!o3UMGxTqr|4yLUEc;?1$`f*Z`AE~g1&>%b-fta1o}osmp*Ly0{4QxlF@hT zaU!6H7=53nQ|z;~;}N|V61WodO4hztw=V&`*wEh=nmz$^x{s;XU2p5UA&>$(Js;vZ z_c+h4z9*dd5$B?iGmvxTGcw9g10D7rU1xUSprQ@Qku=;3wS0bkE^mm;gC>p+>0CEt z6~zqoi!=_&vG*h#<6sS0sMeRonl8t}I#{1G!S|rY&Y8TCYtEVMS>!bi{Fd@N_5%%{ z6(2nF4x7i>Ig>->q))U*ydSEhc&~`=nSlJio6Z(ie6aIDyC25Q2tx;&SN&YfusWTW ze7Ihyano%5sBzy469%3oEwy~U1+YzCoOaTebvxvLtdmki8s#sYmygBz$()Dl^O2M9 z1n=|T>NMv#eEL2`K;G{+Ifl+}_ymm`X%qp-SVsNzyyp*Y6WJBf|L+F57n~i~uU`*9 z=KYX48_!)*-VM>`>Xc!>(9f6^(PxZ#0Q1rkdZxUsz)#P;Ege+IsjAX)d@&-(30{*}1yzQiN7kLiW>va`h!*w`1*SUTcRQ z3Pd0Rn?@QCFR}UUtuDv>CZdl4_aO9`jOUMwp^G%yM~aO_wwQ}CCSeR?k8W)b=q#%J zG|+)LI~o2GjO5F?whZ6fkwas{&gm3iu(jiPF?^5Fj$pQbPsvQ>k1F=X#N{*nd-Q%> zRoPv>R(yV5f4&Nz8}RuB{rL)fuEFO&=+779vjrY{z{`*oGer-*N8IlM_Ab&nyQyAUmB0E26f`8}a=fb@I80x$ylg7cCd<>}SXDog~q9YW&zUS-S{( zV9eAf?btg}Oz=TIN_gw#*wR0uQRK-UOFXob#)F>s!sjp_%I6+}4m-}oHDkjq+J*tN zL4Mf0vF&U+>-hc#OC%LC8#KFYfIU)&4Q_VX;Q9||gW-=Rs*jIugW)r8?>k$tWBLn#6y2&1TDNWor-=llrCO0{1+qYzG67yxub(VM+ z=VcOV|8Ros+hQ`P+`QZ5=0`hCZhm(TY~gyI_E@MQ`nRdTJAqmz+Qn-b)Y+|ASJN5} zar<9s$`|My*6#Z&qED?tKA?5FUgNzdzsC~u-{jza%vs3K6(97(_9tfDHB6te%9LlU z*qV#i5$i}6g@@l3k#)q=FDls`{d|~5Gl`DBjz+{1C&ruT2#mq9eHBgCD}hnE}&C@W^@I6z&38(f2X-!LhMt)HdeeS60+gI)4wbwy! zMB5L3#r#%m*4GQQo`l+G&c>Q($mojbvzuJkrQcX0`$h2lj=J`qJi|;ei=4L{wUvD4 z4Cfig>Avzz0oOPDHK?y)pF-ASrRr>BW*MIIs!{dop&q+lRp0UOf9VVai>Li^9|Yqd ze)oq}JoPNs<4S+xn6Pn^z9waLU8~a9cz z`DnWy%R_FzuIu}xU%GHeHvR1JU(!vk_)Dge?Kk=v#y;tHuD;DDz`yk4{`~)?*zn)0 z>)$?$?4sJfw@vyQ>?@z!{@`i7AERR4$%nl!)aRwg^@_)S`KD6SdkB+{M>QSq&{X37?M8l82@9-KTwV+CqBvFD?WJIwzZ^r>+82RUq@uWl-PNAopat(bQwQK zY3;B1?Nd1Y+pB!7+LtJqJ@s9uuQ~jXXpzRgV#+B}*Nwx9iSI~7Ur>IB*Lb|Q5QjG+ z4!?rfx)i@#guOgkA5m=^euLutZ#yfZzuJyx?ohw~+tP~Y_WQlC6A?2=xm+Kc8GEzjH@qvd{D#9yCuE5{>i){;_8Nb3OUDe&1yFHGZ|0^({l3 z=-5h6_K2Yl-8PGKh_-}d5l!ydri!f26!jfg^f^+2J)_}RYo@60u!%I{do$)!pCjt| ze|*OK2eE$ID0vm~dvlW2d(1~EBA30VH6QI45gqH;nEW##Z!P;w-zQYbKF`4CQudkj zqiYRM8PlmPo*kp_@By#)Z?bKGJ=hTBb=ng9{~piOJ8X&iuz~D=Gc5R?jU)0N59t4CThh7wDX~zTniB|;3J^*ZJ*OPqIi_LNLTT4N{1^aG}v_Pxd+Hc!`X5y zXM05pC?7wi18eeRyC025^l<7a9T+1S_7uV4_mQ?mvldbVtSQTK}1rc*vjpK(sc+e) zD;s00{jwn5e*^DNq4s!xJKk#@r^y(C-wQlvLE|-rmdm)wQ94cwU|*z?*Z3qayOztY zk-lx;oCy9XP8x7MuMn{apQc!809Ii*8+J~W-%HaV-=~N|^riT*Kj*Rcg~!dQ<{sC6 z*Sbg7Ua6a%zUSMaWJuk}==4myt`Sqa<$V7Tfs$FDd z61KCSuZ5lx&WR=;zHyC5Jks|L>hCapd9$fsUzWeoQW4$yCNP&Neo~rjUqUR{^OE*U zc1_8U)vu4DV;-{C5vGrvuT(5)`R!V z4&CQL+skPmp4Xv$G|pP&QoS!ux>~Q~7+Or%bgoP?^ZU>s(RKcnd`B9G25lb)FAa8o z#6c#@wYolV^iQL*@=N_|o^9Q6T9HTJVh@^*TL*5bN4qi)TDb?U(t}pzL96zlSst_+ z4_d7UtC3IPP6-Rn%y@;_oZjwZFy}PtqttHoQB^@NIXwVe@2c~_V8^bT*@VL z>W=s+C%5G!$+Iu~y3QeVe|B457+*lKM4mOm`HG~X!mvPY3arO|nD$lQR?0hS%!9Jt zbdQPaS?c=!pKaSe;MwVzsq3CHkPfTW_3-#O z{cF_qs!4IK*Q)DQVVvuA>Uz<6ajw^^>zP51>v44K(|K3QpRD@Fl`l89GZ>V-CHuIwd?l3?B%kRLC_cy{}6VVOpOeZgf7E zeCj?!D8{F{6uH@@zqOvUPY_oxJri*XePt7pd#3S5x6IpMEB$>qJNE@!$DtmpWu0Re&>08ey4jLe72R1&-u}HfaLRdxIW;@`?4g9 z3wMcG1KGGgImB$pF^{JMvUxmSe9(apIsh+kt)+F@3GzbE*4a9s@i{-X4hF&{-P^eQ zUN_ghhYlXm=VE=7PrNXR9#2+=`6fT>a6HUgwBEa1m<^2Xz|{L@0ov!+=bs2WpnUC3 ze%9qU*bnP_bG|iku!-)4o$QT$`0*3B@x%IDk*b4qY@wdq>sEF>PVM`*o;$!W-^I&TpgG z^g3tGzxNpTGi04h=l96RSIji~^!(X~Uu=)=RnJCz(9HLZm5=n6QD41hAT+JigNC}4 z+TnQ$%<~rjo7&>p+upX4iq;Ftd{16*hw$TjZR!`EiCFMo~I=*U+~ z>}U6TrF=R+(iskXFaNJ6P)?vRB8|PV{VMt#PU+V^lhbW|k{#PuzozG8)dxKKY40pF z4(o+ddFHbCf;GY)x}W%w`IXKx9$Z@*{Q&1Ni;0e$+!e~&R@D~tpWBvW9h174j&mtW ze?@Zd*w&?at_6Dh!&E0<%6^(A?QF}8+%503L<)LK<#cS7Ta6|c=#a?_jvUGHk|ItZD#&0hSjZ8Ofl&8xD{j>yMh zpRO#;leUGN$8bj1jjQcopKncr%>uB~4%pBy?F4&LAN=0ea4GkOH)4J%Xy~acJkRA^rJbgufK1sebo}T=(Pv%^u0N)!{MZZficxRZH!^t-!(>=)WeC5(Y)WA z@1648nHt0PxwZM0XzteI#xUhssOYQudk`T{OsB{vm}kPNy$yD~P5Q(bP3|$Y>}5>$ z*!XIUqs?**_$%5_o}zhG@3$w@9y0v6Im1a$mvlu#fAFQw8TOXb?`Wz%H}BWiYw5Z@ zXJCGl^#Cv&@VXF6r_ zU8MPDzw@@U&g?bpl)ta7WzxCgfmX2V=}mU| zw1v`NI@KV?#xZ}e`@oa)Y@q%5Ycnv0j#IRW z;h|%UWT?Co*TKmMt(K5<=NX)MdFEL%?Y*qpN=ArLm@M>dw*!^g& zZFhQ|Q?_0ds!UF-$Jj5u32b_`F$~aYnj6-(WXMR2jLOt(Um67pAsg{pGr1yzDA#K zHgS$I_Y5#5{noZV*B5Wh`*wQpxf}OqqNv?;{bTSjp8ie7r&um}O6Q{J?uzJriz=dj zv}=0l2X>91aE|Lo4X}gEN8yx#+jgZ!f8Q-_c4KFM8R*QT-0z|FthN<&5PKd1dX>*Z z`~c@lUy11c-uyD3g)Hp3h~x1mhARFVj{w8Aoj1n7F2 zLH+H~jkAlE_&U~{FC`lPJO>XH`*rqQfOxRy44Lx?`)bSzSi8|?y+3V{n8^p zd~ZWV^u>8D9rdg6R6QS?U!^>1jz;d(b2R2>5xFtiz}L|sam@=9+iHAIKmL$1-w@I~ zC+HKa9@G6|^C|LZGUft{)7d;Azt;luG0u)Zwa3g=tMvVkUfVv&Bxgl*?7#5$Ej()tM7lAx&>nf60B+LXCIKy zzWbo&QRWrBo~xzxoUW&1`3v`l@O$8j9;xW9rRQ5{?P%Xuv&649*v|penAo?iv+Gk` zuas-7Sp4yJlxNyf29#fz4hSEn&PMxC#) zV~CIW5p=%7opZ${Qmje!z?aZnO)>_gvuWFo=4|2wBM?26lV`e2;imE4m&OWmFf?{%MX^fBC!E^7>Z}>g(0! zzO-)!jmvbihUsRV^4ag;x%#hcRDF-i`ygv64&?V<__sRy|D=ZX?@|4~{kq1jCmqQe z-mL#EqIs-~X&n;K{o=Ku=RfQ^Bwm}9XoGWw*B$-{{7+1j|42o&9?w+qy+lv`bUT`x zk0(dHmc#ae8e5m<+-YxucD)<S~tV<#_%#m)@m@ zqKO2}Wy(elb-3(L;4N}$-{ua+QLZ0zb~)AIrz_(*laww;p^PuZ`O-7KvX!6hx#2GP z+;F;aX%1+1wL#MtG!kcVqZnM>`XU)3xOT7O$qtq`=nu`&y zHTSU6v}D=(s$GH7Tm@cijP%ym{WiG=NUkZ*>g#`*m&;Dyt#k<3Mp4%`kxp&UYz9qR zZFnbWl2kg`f&0A{eMT@Edw&s(CW&O_va*A1=>4CZw|Fye-_%%rA@Z!&atHql%W`&t z%5N#<*Ea^kp@#SSX1=)ro=wtT_6{w_=Us<w!>eb{af#CTk*$)!5TvR4W&bnI@h922cxmK>0mUfP0v#Eyl;-69=uX! zv-1M89$Y*={CW_Gf8Ak+^}t>?to1z29~{(;b?W;c|Cx1z_MC<{Pn2}w)Dapd(eD@k zsLJ9f*O7}(KC(J;;afj{9l5Yd$@E*h{IB*OMF*VK4ISTj4QCu_#{QQ;gTT;&3Q+sZs^w>`Pd@Ua5%OYyV2g;nw#zgO=M6tPb>qKf-jRQov$_#(zp>_zf$K)N{l=P$fBw3$M!iEg(s{>! zOTRHqJ^%5p#b_R4t;Ou~JZlYLg%O?g{3qPGpYr{EuG)Wgq-F5m(mwN4AMdyfXl-WA zzmGz>G^zJL|C#0T)6L1JF7fN9!fhi*xqMd6Y9H6+dlj{w&QZ_*j&-|PVa8GowB*)vEnHVd*tG2j~u;Q!=Accw5M(Z z-dn7l9^EMS;>8{GWZ=;C+NDJgtgYq`NV;L5^L`iKl(`qmg~MT{qg-Y}<3w z!d`ezezfcv_cPnmV}1ks#wyyY2Vacepe{vaUd(B)BlZ@5xQT3P`6R%8PI|V3Jwpf& zcy8D%ACu;lkLX<^%4imz^Qq^{E#f~{@z{4~ebcB#yy^Fh-OwHJRy~#FuhRd~mu-9m@dET#%mEDVUJJ885gtuT`#{gL2+m= zxqkI8SRZ1qf5B+f9D-Th7zJ=6ml&*#9uvvTvp8GwD3E zHNUg%sWmnxQXlI*=k6UdB0+E2NT#9oB6#-*)1Cv+ps!*dDf`;emEXn+lx|ep>B=*N zl&*RP>(wbbTwZ^DTIn7u&t^(rpe@f5y}H$A&uykXE5FHQscTf4mOaDJ#j;PTHU3+? zH0NzE3+w!>qq3;L{=by3{K|srYP&sM*>fTGKP0Q?9|tRa`>0#=3Hi49Mn3IDqG!tR zv%I^sL(~M?bL3YZ)W)%YJ2}@1Kbq-UY1(wEgiiAN>kB$C4t=b;V9!sqUptp$IMSki z^K-MEmv_o#NxuE>zH_#;?<`PXb}aiy8hhOKk&;*0(c5PS=p&OZ7>06g&XwQ!_l;kH z`pEgO=dzJy{%K;*o*Z9bd)rz6*LwN-*w+S_wx53${0)8{fM4034>05W$3yZV%v71R zii5vj>-&JM;@}&Y%?*+!{QX+*_FM76H?TFQY8BUvI6AlBzX!NgJyB-{)EIA$Z>{X_ z8((ev312I+*{;mA-I=qnT-n;%cGejDA8or<^M&wiXISmSZadrg``6U@`+4a9c%#4V zT9pO%p4!@ituF75_m#)1YQ8WR@Bg_cNZtBRsIRT&Htub{+vBs(?gX>G&bcF(4NFw^ z3ACT@U=Z6;dscI)R&9^kbFe;w+N`}@rrO31?|fE$FH=+J@e;`=2Yo|_!e>p7ewyxQ zO^+F;{A1w!Jj+F&sQjJ1aKM(&^=3ZLQ18DE29&4f&y-Grs()?J4LKB=PVSvzfo&&V zz#KnN|4iRzn22X4<5b$-@j&+Xa;>~u`iDcCz`A+&=jNGQI-7s`yU^Ww7VN)H*$sJC zKj=*p;{nN=ALLsQ>KtZJdCPKH!a?6S^txQfLT`-WuXiJxtGuh{N~dH0Z^90huQqI( zuqia%&u!Z>`V7m@Xxp;ma@kti`$p;YEJoa_x!mbiZ)=rWe}f zVAWT8*Lk$|r*Q8)POV|(JU^EW`pPdIHQyYx-!ELQ;X(hbtXreknZHFP!pA;Yohs@>vJM|8Z*EKa86Shb#?lk* zDBZ8#tFz^M?(?N%wWAq<`al195#@OjWW?BHBm|LRZ>y^9&&aX(Z1p)Im-XIeg+u2_ zw)CvIKlNGOytF(gxo5jCFMD>FZMfiFTYACtvpr|qqn@)B++pMM%2R!$vfUfDx&0e* zZa>{9SZ49r@&!BfI$(uE{~&t2VXHH5$U1}cdfWJx*z93@X7GlsuYN=I)%n)^$3E}% z+R&!k66ofw|Ip1Vqor-%u=U?>s4~pIz@}SG5Z!EL@`fste0Bb(MLQms%X)5C>7|l> zt)6eR+#?i>#y(dIMx)+|^~|@+!*?0SqfT4(IT)|%qkDF>^ZM(m_A`g0i%PG@v!T+% zI^6!QF_>=Q$WwXmVa+{cTF&y`!|MM6>ft=^CtLl$+E=l97VRrA_fHOBl8?YU^`eJuA}(0-CNCt?qZxgSK8$tvj+2;ZXrcFlFUlhpU}tueX9UJc?4 zb8pL$?!~a$c2N0#ds|dqn%)Ys7h_wHy%^iV-;1&GPk3Hp)pgrmh*v(RaSPw*zZ>ye zlx^+tYA?ph4T!hwYLEX>@mC|hvNgZ87h`1};9B;pyN2f+yXko|Jzs`TpF_6yssVXH z@8BHl3D)=@B&`->U@#i{7#NI3je#fF_W!kI&#Y^6*$KT=+1Z{Iqdj<-Q9i^l@D7rE ztXT78FO8hD%jeR2vwhz<-kYz!8;Ni2Wm(U^N`uXNN;?SqJM3U|_Pht9QF%Y!8vg^* zsWChv9Jv9;`+#YxcL`YU9cTOg6>Q?&8?;x>U0c@U$-P0pMcKUw^V1k}o+;>gg{O6R z428$w-PR73POE4BnV0agz;X1Rma93k|1(EzDB_g7R&m1+r~1WKaU-G6r$OYUB5qfZ zxD3Q?4H8H1n63#DHy3eMU*MN_?WcU7oM7r>&gs>B7<~k@{L+_V>_`koL+|b`Sc-my zl~Mf!t#)PL{E6M|sv5gGB5VN_mksA;Bw_3g($0yf#YOb zTN5OX+S-C3an#nbgT$e&aa9jn*#~WncMQTuZB4BuTJcd^+Z%+B+S;}tan#l}1&O1w zY7P=d?}00W#L?c3Tr=*Vy`11zxaGSCdpWgkTkv0G&fjGkw(;p2+xWD}_g&+0H{`O8 zs_(OZtIGR*SyhfL;_&@U*+&mhvb^IV`ftQp{j~M29K4t_(YH0Rham|%sc-J@eb}bc zcAHL-=ub)mq#f$}tLokDUfa9kz3N>t`HdR;JIgM?$Ke)x$%=x|*~j5vG-@1<+Zjrx zZyau0Zh&z(m~8dTANB_g~O+ z7{C35!RYLDAQ+9R12H>7$qZ5l0;UTLfe@SCE&5tZmnDvM!8>q!p4TqOy6%E$ zeql>z{#orOf-Tz8F%A1)A`d#&LzaD+^m7CLhu!f@UsKRAve6y?8RV;q_*32S%egD~ zBV6WxF7*e*A2R>c2PwMCLg`6#YZtKH=JysO)O;#%dm>ttv0TqXe#rb={SoO9N`8R8 zC`g|n&mdr3sBI4;y*8hNyO5h5O0McFTjUjUDjGwv&eUQ8@V4;7W{>yriQV>a|Mv$< zW>P4caPw&|FYLiB3zg;i9cO_t@T9E_D1!G_ElQ*zff(1=r;St={J*n z^J3pY_lw^XN5_RgMN~YevK{tbpg`f0Q#<=hhE}g{N6$7 zWeoZ9M}Hry*7tI0uHf_i`}b7Kk1YA&h0j)T+gO}4CBM3D_}?A7$mmE^{Zyf1DYa+7$*wuzUHp5WQU=GSS^&V4TO%@xB=Sto1TxmOg!?!zc#_z4vZsD7ElI`5>Bubysz&hIIlMOU2e6qFnI}H7> zNA%4trf=qpt$CG&9ed7;)z_ ze=rW2KDmtpcAwnTXCwVS>dZDh!{QfIH+@8QkpB73f?waE)4I|7B#gBz33Us4?hfQD zT*~3b0VQX%T@HMQH;vuQ*hRPE>cKjAB5ShFgsc2@G~1lkxWQ}#jj$ph;0lJBVCBe66#VgmW=hJMf5(>Kp`bF}9UoZjP5nyBvs{Amy`&wP(!#xsx?gX+S_ z9?h|<}8HeZI7Yu7!Bm0He=iz(V^nrX4?38$ux6CG?j&-DZx2E}1D#wIo z*rFNn@KbQMr4Pcpo?r1c`5JE|*%z&5Nyx``*o5Of6x#h6L)NVB*>wJbx267|Pehys z{aD73iPl~X{61Zsff)7V$OfaHPyW|Bqi_o}hbZ-dm&md1p;RKG~+sr?zl)|zR^ z@eS>_X}M~b=5L(SrZL9WF1_L8BL{6Nuzm6vBcJSRPR2fn0QN=Me3W>j*M3RTUbxR? zb27d5x)}C4{f(xJ&Y(S3vTabC+Z9ZvS#~nb0l{dlM!6xaxxwOPKh?Zbmf_W7@k}a6 z{!}{Mu3p8GC?CjA%++&P()pYJPJZagpZ9u|?<6V@YaB^Ly+pazSagbaQgaWaW5!WC z=ty}+UO&J3+z>hpk$EBipaIK(bTsQ5Ytd&siT3O5Gct`&Ra@Tb*Or&awtPNYf;O}y za64u{Za_W5xYsy#wO>0{;bFF8Z~DG=9BtbtA=_~a8mwvmMD=9UB-M^5S#_~-t8A-z zZ}{5k2DF7u-nupk{keZzUDAwr_&xZex9(9}z34-4)}lU$As#`!vh4thtxB%_Ib)mwAYl?PNeO8cK)e63BDBl_qS|QHJ+qNzH30uH-GV zK9szU{*s>C!KX0mc>YZvHmSPN%j`!()QuqHv)NCaE$3a9zADU`>&dycJ&f#NmdV-j zyCqhc*!n<^KYgM(2cW(QYjimeL%(shH4noWcy^$97|I2VeP{wpRH*Wcm1%`I-*<;(I@(ahNNebw)SR ziEKn?oKYq;za^W~XF^F!#AVCqz} z%d6A9C?~H@F{!XM3n*LJis5hm{K{1|mGhUCFRhwiSyfZMstPxCl`Cqh=2zENH&&O| zRG(W_Kfj{9rlx`DOP5r6mR8l*R@HbKRxDp$SKsJaR9Ek5L~!M*+VW-96`q>vT2OeZ z8$7jjjh^N8RSi|Ojh@CjPyLEo5F&heecj^v@?|697>k=+Ug@b@-dJ5%>ls+#8Q72r z^i(gdt*fu9guJ-S`o$}jQ3AD9Rh3nhQrf_Z7F2PwYM0j5t*rH>lju!hAheE)l_|40 z{81hXi}R-D&F~CNO0OxSfYehx-kZ3b8QDY=*SKmq zOzi^1S1c*7_sq)5%`6^E`D$paUr~We{Npb0JULB`^RvkKs0NitOqS$Ogrc68 z*UX<$zO1TYj+aOYptSo9Z0HAic}1E+Aawq$hN?<$J}nZ+w~>3( zDXywfd6QXF=}>49NkzuI6yA)H{xmMNp?`5ser9RjX*mJ)&s^BhLau33gx1xpXsl8; z^%8kSOWS3%Ejx2^Nn!r1(jc;1noi|8)8BMn8|BnC&PP=qW4AX|Dzi@IXHL$^FUl;< z%FQXUtjF7~eW0I0Q<@FUysvAMKqP}VbSdm83=ww{8%Ii_B>(IlLQ@?HY^I@bw z{3)b^6jWVnYT-N~N@ujvXB4igo-RXnohN3clZkY43JQx)_Y@SOPR}UxOv#^Bl6xvy zBRx4OGg1CX8LfV#C7A&UW7edv$rv))yvs;yVYLQWvu2goc4U9FHW&?xsvea(0@=xh zQ2>3yX7(Qes)TC=Qv{m=x3GW5y`crq+Hgr>wki-Gk6FCQ zs+iM4ra^6s(nt!da`3t$n{|-Nf;7pN)Ynkj$v;8bR2Qcf73WM1oOX~}W$GpCW8RV3 zjp}9z(r{}x`ygQzE7^?dE~Wbu*ldif+eqA@$*3&HP~9%bq=w;)n|P^iPc3rCOLe?B zr=&<}oo>}?@5mCQdXro^Zb)9Yr4+DBllGvxZR&M&ON#WC{p{qn>StRPkac~Gtm{bY z=vu%`TebnD?brgL$C)wI2C_A8md-;RZ z!#4O+y!;M_d|94pnWMb?4u^f>&qzW2qH*6v-ZN5AkBQeszBA1EMu}=J;m+n{yU&pI z293>)Tv?B>EOa)jGv&&9H9IfcoFli=`AFs}8M4nU%`3=pSP)c(vX3QRhXx^D*@x!j z=ViN)E9VNt>sW4blAH@rAM03da*~|q60ZxnvfdG|!^Vbnc#^Dl#Otu3WhP}%cCo$! zFCJP37=CQyLwed|P^&&^vP1KrO@ki>viKdE$mEBSEPjV(GWlUBi{GKCB!8AK|5+;k z-nFB(j`yw^Ls*k+k>#^Rma>Jjw5-W7%1@@}vrU$=&5@<&vsIR|)e)ta?X#WLq>n5; zpZ%<6eq`zS?3aBs?3eAcU-r?kUyjdyR#$vP`OWd!&+49!EIpt7a*l@ma(wp7IU4qx z;^IG4ziCIqe$#yRn|3tpH_d0iX-C6;(|z`v zZrU&C;}JO@o9?sEbkja<(EIW~-OPU*^uGK}H}e;oKHsOmdgj#zy-$Dj9IFj_pZ@9< zK^ycw{qw`ozrd$|K{)m=@abO=j{OUK`WJ*_{{o->1>xvF!>7M`rV-jcGkp51=NN6! z`}Cg?j{Y-z`p*bQ|3aVsh2hx0(5HW4IQB2}=~EbvK1Du#io(&S$fr+HIQkU%>{%3! zJ&Sz$7lot$OrQQU!_j}HPyd9yCfWaGo-$=!_hY-iJnPM39T;5Ati~PKTip*s-%OSpbM#{q=Sy23u#iK zONM^gZ4|$xgMOe3S@e<)`hl)Z{gR;{=-SjzKGUCE5ZZvK?<8A3!v}3hB`sRABWOb^ zYSEG{K^sz8MLUY@8K%CY$fjZHJBsWYroN-dwqfd6q=SB-3#p)_gMOe3sUYY^(Rh>>T0P3gC>oFQLaS%eLBG7v>X~%VFE6xul8(ls zlD6yLsAT8|y0+{$DjE8Lt}XkGqH!jxEx~3VKZ?c~q75nBqJ?~-4XJ=d3mXw_$U-z} z<=*JL+H-I;qS8bAn#(J&Q@p`5cwoa|x!1F3MZ=Qe3(G5(2HuC8B=_}M{MbL+SWf37 zaNa;AL3Z*Tg_CDlkw?NG0-#KDVd9A2nbdsvg@p66m5 zjHswVzAb6_d_x*!2$EmDkAh+t_d3LL0ObDPvY`Rnv0p zQN{s{oWdz1s2(R%{))=$p*!}s!*`ah^n2cK~#Tee}yU0s-jT0x^_95fh09|xF<9WL5Y>rt*Ec4 zGQ(wkrw!GMaG;}d1RH@P7nDCL7pfPEw!9YgMdnndwxk?8vKz{mp&8*I6OJ)?bs2$U z85Te3YvZQ_8)QUZ3dAf^v8TY;h`NPmS!u|9Bc{HjL*c@+Pz%f?0&Nt9jYIR~u+Q{l4T9i4e-%6aP!+`@f2E7Y*!!YddX4rC1h>-yw3rS8YH?N~v zI^xq)=sMaAle1FLV8?U%7=)(-GYHcd_D4{%@=6^S-Gg0TX%i{!pHSgagn6H7h9lE- zujomnc@MEN9Ft+X(nf2#LbSX>yu3mM^9sYn$(lAi({`Q$tpex3>i4ji>-{ zuBv=RO```XLUWJBK?g9>N)?&eVdO8KrceomrY`zejNK=?J$D8Bxb( z{U&;OFa=lYzUbaM%tLh6Zwj9^1G_^D^H0mk_E<^U!?1~IMrq!Zyqsdt%CjZ7#>EXE zPF+x8{_x;`r)B2P$}yu$PcKrxeB^~wrj+EAqN~ORK!hGin&|h2k4kA!XZ)Zaehp7a zX<+7|zf&nbEe)+4I-7dA!=!^@aH0>v%oEoswj<7u!csCIE_Rme&2dyx4nv-jiu&s1jj~&qW1in^m=_mI z_YD}2PxYK8r$^Zsg{bXQhlrz{)wLBhD=MwpzOuTevZB1c5>uz4gE0zMdInD%92Z+C zPlGmiPW2Sl;y_Qm2dC)Judb|?=hf&yX@xuqx^R_e@W{bLFnBoFvQ^tDw>2|pJ9<#L29xT8vj}{Gj^i_jg)V{ldD2Dp?MG zQ8=tfCam%l$V0lGQtEDN%{fJJ6=oHcjLoquK8Je$dGkC|t7_@2Ru!fp^>n1yljUhJ z#{n;?Iy+llGp_|dh8R2lh{-rNYffTlMvBom(GxI<5IT5ehfKhm>Kqz@?K+gTHenDY zY-wN?fHX>In8#15Q|9ahb<#^^Hv#iEyCVs8bdYHaaPU7U?^o@5nH zo@P}7nrW$X;7XP)z1FqGoUV{)O0R`%OMSCev>=QtU4ka#SxNr}s(BmD+AHZ!O=TP8 z!EiHESU;uGe$-jDXy0`;I3w*bS18DKo_9%O%{JxyC5h%`ZLB{kG10~Br&1n<#GHq^ z-?n(FeMgqnDNstqoHNM;+K}0bpJZZfgQMR#Ax=xo7H2$BK#2FtBPJTw8j03N=K2rS z*|tEmQ^-Baa{XhiaT`|EHkLQZo*0vp>g8&t-avJW(xWlSqQyD~$K~aLeJTthVK`rl z%I;gfc`S8Q5l|Xr%vS44^RTKn9ipT9XPzIGb8NqqOns>A^XnE@SD>e>LM_317~dqo zlz#5umZCvNY0`Q;+qRqvbXnx3m(}Oz&=>DTT6RXWAH9qg)~OM-eJ;00_jC-pS)~u zzksvBIGIXP@C-wKz(JnL#ge(cx{3;xbfPwqUA1tvAmp z&Bp+gclqH3>p(8@Zq~27@XjaG?4reS9#bEBR&Kqo44xiIPfH!Xuo_=EFu$-s{cVd* z6=swZE>({7T;2PQ^C%@uwxM=4JFhgCTKVkUoEhFKuH;giD$30)$?-<``cLAgXTx-c z*i$lPa`A`}Bk1`G2FASn{G6%y#DT3J>&uc;Mp4yLXO7J;FUV&!l<}&j3Vi6plj^HJ zvi{ODWtskz;zB$tXD`d$)^NQc{ z81v(VqujuF;#|zb&X5p+LRt_$hw_EeMup8AJmGnGK+C$9+UGW zp3KThbV~F9vffj&VxfHUgd*~f!&C;OTDB-AY*V*Wx; zh(ohDypvOG9w&3TJ5$;~m610^&$znf6TEV$vK_e$JYSZwa#VgS?OV#SLXSuurc0jT z6>7yq&k|^MY96K@Gg{7@C@cd%ZA&G!Us^qkupUIn(FR>VJ|M~&QD-Y8(|pHouR(B{`gQ@Zwfgg0Gp^)ROaG%gWcX4Z7?P*RDiqvj=z)tK0m ze4?W{g;_nVL8mn((NW)*SvsT5^r#Ok%`BdZ=Mupaq_mJ1k_{abJxDmwqqtxLQy;&~ zD<0A#Z%V5k*xUC|x}<|;`$qIrG?P%R&OT2{q5dp$Mm8l@SRA;|9!1YHkme}r1L#Wa z+U(+(iNpBmU zt#nOu`K3#?m1BKyHD!8aPuqB3kXbTac2rnmz-*@CN?l1$wv|Mo(@h?YwruSk@_x@_ z+KOe%gY@DU3ae{ssut604zi1VtU;eib47a@r&76~ozSenY*n`EV!p*`F~>v^Ti+^b zUC@x1D(8OceUn^5V$Ns152E@{`grqUKCtl4Kk4m}`DV#n-%)y0{+QkcRgY*?ol2E+ z$m(-2`KPLgFN@K92457bGwVpIEc3c$%gTMKx3Gm=7s#Q4>Y4RM$7T?<#vqwg-uYFF z=xs}2(h-AMiMf`M4*@{l*80u+Vu!x_jBl<*u3aMTToa?+Iesa=2YP9rjcYe(cV;Z@ zvvEdqg!~3|0GcL6(-eZHOwp8uph;IW=^<$16-|6_n%;*@n%;*((Ck+<`$N!dQ#9K` z&}>vR8$-}EDVnAbG-ZmWECfxuqDc=y6R&9EgVXdnWYY9H6oO{IqS+sUW}Bkf7J_D@ zqS+XNrb*E>g`g=@G-V-Z(iKg52%30B6Ca$W=OL4(=b;cZ`xVXp5H#Bq&9)FU8x_sQ z5HwASrYQtXnW8BRL6fd%(nHY1E1LM=G(8TPG(8T5pxLiz_J^R^rf9Z>pxLNsHin>S zQZ!8=Xv!2#8POPrcSQh~8;4)nY8-y;9pmt;jmF`BePkRym;_7(rUNU1R|2;KKQ#`2 z(+xNYSOeS$+yMmdHwTTw-*yBJ0FDEe0zvcbI^ZVY!@ym@kAOcKhrdHQ-z5UGfpdY4 zz{`MJfZKt4fu9?P4?)(UG9dIkv>v$GIQ(x0TmWnW?gxHl9R5BAxWqX81L%GL-4CGq zVH!2~71@1R=-pkO%ZbOe)XXsJsz#`xhU^8$d zaI2xmR07ukuL0f%+yQ(Cc+k*eI|2s)!50gf4%vp@31OX|HS{i}K%~k~1lXn?TW|% zTW9F$>A+overhuidW}W8W8X3KaYew741IhV5Om|W0ueqT1_*s7EC3??3EP06o7fcy zy(WTxBJ`RFy(WHc=#%1spvz>yM&PH0o`tk>nhbsFBty?bdU;4M59#HBE)VJDA-!ox zZyM5@hV=52487oxp%)^}nd=R`q{z@q?=tk$mK%Cmx}l$ewCC(J^m$hr`us9OFZUSw z!cPsoVy>ZA4KVaY&|}HNhJMx>Ltol#=rzdCGNe-rd3E4f4t>wwXz2Bu4838qp*L^Jla-!b%y z4jB5nj|}}{$iD<~fAN)}uRmz$8x9%zFMl-jOP2!?e_0t2{FhGxBK<28fRO*IuE2Fb z&|g^tM0!`10wM3$0i492>I6)0e2Ys_36O-41J>qc#Wao z5CdET1kH^|=f+KjeiLNf1X(vh)+Wf>1X(vj*3FQ03utbEtXm=LR>;~6S(_niGi2Qc zS+^mb-$2%HAnSI>x*f8%K-Lz>x&yNASOfgZ(C@4S?l<(`<^!KK^t)1lcNzNKy?|F5 z`aKcACgA6We(wU{UPHex7r5Qf?@t15G4u!GftMNjgACXR{M695&IRr^^xtIzw;B3F ziNMW<{`+pg^}rtu{o&=nj|}~hGT<&le{>QM{EtHZqwg5{V@U6@0YJ!mtmY`-Mx?P7 zxD#$h#sTqEgipdJhA|uM3XA}HfP6eYyN|1&-*{8Q;RS=^X$WJ6u`w4|1S~a-L5aW` z!x;RzVGQ4C7$d(jj8t4luQQCaLxz!e4P34uY|%EuSdQ!2;A=wIs&2r`@Hz8MhOu^| z0Us@faap=yT!HJAOMrI)KQ)Z2Qi0DJ#;+@ZTYw)yZoFY!kGSiR=Ji(sL4O_6zOD%f z{jOaPgq&+3;~J!Y%>cu=`Vepz5NTXp2}IbHlYm_f<5$qt65fJpCb=L02lg_Ia-=^G`poNS7<1PHL32j4VU&Gp7_*BEW7amqDD@adG2#kwU$DzCrf)Tj zsZEBFGuJS((hVas$uK5OGK`6kJpptR%7Cdrq(1@q9e)7069~TXkUf40Fy1i6;rF;0 z!x)P+$KC~82L#`#HHMLKA8@&0q(lGoO+bW=E=3)LuBk{fZpdSdi1NH*906m7$e-rGr8HhCI9)JxQ&;y(VT!6aN6$shq6# zr)6ZFv8e0Yd`X%pjtX~N22aEy6T4AgbcU8be-jF=&SFkI)&x~RB+9^uohUZLZfkohMY7+9@#uIoXx@SGT`aJ zGgR1Y^Gg5O-N1W*X9DK~7XW7ge+#@zQcz#e0@(G4+X%b?cw@`BV0Y|W(0&Iz1pGJf zpTK_s4+6gd{sZ_W@GIcgz`p_i4*VSW1@N!HPk^5SKLdUM{1EsN@MGY6!1sX%fPVr0 zKj7QIcYyB#{|wv*+z)&UxCi(Ka4+yp;H$vbfV+XO178BZ4BQ2L1^5E+Mc_{0pMcK+ zp9k&${t@^z@EPE~{!z2>5&8!@x&W`2G0(faDyfLa4q3f;5DV0j2{p zEDClQ{JQt90e6kS6~J?VYk+Hk=K;?LUTjjbLUo-$SN4Ot%a(Tu2#GR7IUo%JP|R! z0ImmaFe%t-b=_=Ut^aJA_t&pLeI@WJ;IAo^T@AbjcrEZc8M0LVufzRv;Mu@>8A6sG z%Ldbb`71*H*AON!8gn^Xnxp1K{FcCa@rkTAuqUtw@MK_j;EBMKSUfup*KWY$*$G~r zuDI_4>FiWCj!nR{e=^HvQ{XIT8b-$%tcVq}Qg#}f&C1y6>|qZn9mlla<&jq z!75o5TgU=On2vEQ>t*kkN*_5|C`o@URmKd|T64)#a(0(+7DiM_;LX0Nc<*z0T$dxPy| ze`fpH|6}j4ciDUF0Q-P_#Qw@YVV|?`(9c94C`4zYi;@7WLRN2X(B z;~3F2jCYYd8vS+$)bP%{3r2(EQ8T-vMxKP)+k^MS%%L}C4X0pE(HHZH0T`_kFtbU- z=$wRkKnli-RE!m4&}L6X9U6z(kU1;L#7rv-GpwnYRZYiiY6fOd#h5+K;-~R4eg>bz z=kYW70$$D+@(NzX7xBe>39sf$`7&P1>-chhHgDjK{2acLH}O?`HDAlm=NIq``8s|H z{{>&qH}FgOW&Cn}1^*SlieJsI<=64+`9^*t-^6d>xAM*WHhw$b!tdaB^562i_}%;- zelNd|-_IZ55Av=2cl;s#Fn@$U%D3^y`4jv}zMVhKpXJZ-9sC8plfT4w@mKk7zK6fT z_wqOSpZR|N|M)xnJ^nsFz(3$0@xSs<`QP~8`RDu}{44%Xevp5|zvYMczxnt42mT}1 z1rtI^F<2ys zA!4W)CKAPPF+z+KNg_q0iZqcfP8H+C1TjfW7TIEo$Q9E>zL+73M6oCpr-?FghL|hP z6br;cQ7IOQCE_emBWlHRQ7;H?%jkecD^v+uFO@``TZ$53~=pkF`&QY9dCesTjL*Ssq5J$!t1CD|`AG7`qDDOg0AN*Qu-oV^}JiB}XtC zw~8>{wT4sWcxH}fi@@m|7PBQ71+n^KXSv`W15W3#l+|F=CB9l0+|K0@hdIL2ulk=& zW`mqhkj!&_Hkm8gTFhHW=0!i7%ysNy%x*~L)jyj|nvcm>^UvGZvzV#=0khHP*u$8GzRY%E-ujB7+QPQUx$2Yb zDb@^l9<$V)a-RA}_A2J7yD?MUA?K=ZVy^lY=BjV2+3Ne4ul@xy)_v?l%vwLjT=uV+ z%hKHSGc|iPXRiOiyj5raV&7m6`yFPnUo!)%bu3c2zzSW%(}M^eg;{$m;L&Uz)&Z@; z!wcat;nDryrNX?vBk$x|zYAS+y0rKa$s4QIHixm6F;_CjVI|W|u4Uq}mN_0Pntpih zHULkktaZ&mtY!wgX(?C(4Z#X%Bv$MxZul8JM@`1s$s<=!S$Gex z5@FFLShvcV4gc9@HdrIq@H&~#hMz5;L-B0Bk*~m$E|)VKyjjV^dVdvG|Fq(-#F~E% zpN^+0zrquftN7`7ra&wGa(=E{>(fgABCPd?yU8K{5UcZ#rG@_ChTjbj{~FH${wZaC z;fCK05C0O+9sVt4e(#3g4G$KLTtpD3h;+m6h6hhkG!Z3bM!Vs6!-Howcv2%}p5%t# z4G*3Noh*7unZ4ccyWznTsGg#al$qd$-wluXY)i^abi?n4M?OcBqqP|2hTjbjp4g2R zX~Zeqt(#qpn|MN)Ax@PtbKL0ch6m3er;B_kv%n3%8y-CKEEMykOgZMeNy90Rn1g4l z^QFv%PI=wZi^X_`E9X$+XPecCrFdp6XBc9+TfLp~iL>#9TFz%gqf=hD^kM~`h|8xY zVx8MGobus$`=#PC;u4oT<#kJs=lfTQUrU)k+w&T6J>DhUCT0G{t=>-g#I1O5al4ec z)hVxAdb}n1op?yfyu)o8PWkXw=J(=ZDU+V|{49XCKL9CH{{K0ow#)qKtlVzfpU$@Y zCcJ~bncspw-ep_CzQPQ=QBUU+M6KKtb2i?HXY%vK!+bn2lAnh?7HMJ`Z^nD}alBr= z?{?MGw%3H8t|qkGw%xWzYJv7Tbfgvx&fjj^;I=u^n0DJ9sRe`cx7#+jZH_di-L^++ z!QlMuwheBZBaLad?U7nAIDflsgWKjvV~&Mw`NifgaNoHk-B|8eSgT#8o01BTr`@*U*~tx6yKUWAEj*rf+lFT+ zH&pGmbz`;gc-n0no}JuKS?B89@DdNo=C^KmZDo3=8-6!DHkmHp@i>=yEM)TEo2Mk5 zYin{q=X>4o+VXmj8-6!DHktq3W%`(T=GBdjh==UthWCHPzQ-b;^8E5s{AoAk(q5)* zTkV*X=`%Qw{s;bC+u8-DXm6*1?P7%9xH~LtE^alTfHeuD25es&}}yWu?+ zdHtV}slFfSdiT=buxIHT{x7l@KLq?czFqm9{AT50u5*Kb8{mtSQ^Zt!Z8ERDE(Ev0 z|4jXBA0vLUGq=a24prb=u~lM`+x|p!0B0AGEUuN>cf>VGti<hGQjnH0`!^XSZ;; z+HD(-mE6&^+t!`k!r}U#vF*8HHGCDVbvu^9PM3&Zz{Al7vKXb}3eF3sO#Ep~NfatB z_K*9i?rkz%%^lq6)h^SG)xzUxw{3WKazoW_TQ^nrp!l}J z7Qu(n68M3OhtIaQXamK!_8`}G$XW1_T7x#34xeppQ@5sY^Gt58^w%~CQqOML8-#f` z3BLB6LOncyHA*jir#a>&Src8rIq*QXR{929?SgwEIGqD|vprXOBwXc!djdF}!^Q9r zcQrgSj)#9n=iKArDe-uC@;wf|-j2g{Bkqn@UUWOluzdK^y$&ARj)Mo?*4K9NpN(%47?5jKDib^ zCOn<Zp?f{4cc!sE#Mt?1&ESJK}FUWXt~g1@%Tk!1n z@Ygn$!B;tawmIaL^zVb$A&ASdfAClAJ@!6)wmIaL^zVV!A&Bb$H?w!ydt#GQUP=Eh zcpZYc1#lBQy1y$PbjmB~4}sSqVBh5f;(qcYFK%?oE9t)juS39Y&-)C6yw{5xobpQg zZ^7#j4Ee{tVc$y04!I%i8|R>89%kP_rVD;{*f}VfKeEG+>4KmA=p2;HAJ~tO>4KmA z;2f09@7WKK>4Kkq?;K2-oU`vC(*-|w@uy@m&JDu_JCt<+H*ymgWV+#xV9^$c@$`$2 znh`vMLdAIVir5X&kLP{k~xDH zLZ%CTKEpXEnFV|XWV+zz14Kk^ItL}QgqK353w~bW9F)vrUILje_<6B& zP%>xpV#svC&u2RaCG#{s8!}z+^V6Jzl6fXS4KiKu^D~`;k~xo`37IbV`8?;KWX|RD zAkzgupX(fy%ql(?GF|ZVD(9eNR`M#ybivOnor98D!7Cxt1wXHF4oc=iUICdd`1wNT zpk&tYg^=ljpVv4CC37jSflL?te5rF#GFR}Wkm-V-uW$}ZW+Pt#nJ)NwqjOL)n|LE+ zy5Q$c&Oyms$(tb41wUWu9F)w}d?jSM;ODEIgOYhJUk#Zq`1!dmU>!H%T*!38zmZEY z3^9@4!mn3w9hV>`B2`O5sOv4@bqLB@@C6SU4tXX0&F~}d5R^=KpodI{ypnzsyw^Jf zB@=${A=4qRq`!$f1|{=1{3ghB%ngp;I0q&3HvSvPbivPWa}G-8W_}xFy5Q%Vor98j zE8h&6F8KMa&Oyn%liv!NF8KMK&Oyn%gWn06F8KK!&OyoC!ta1g7yNvSb5JsG=UX7t z1wZ^m&S8cl`Vj=b&Uh%pZY$UGT%Nonufk;S&!s9dm=@ z_b%XS_>Cs`J%7Rl_wPw(a5@H+*T?x2?)drR&OyoC#vg}F7yR&R=NObs_{4)u$K2p} z%mw@!-oObS<4?QbevEVmr(;lgg`YTg{QN2Bpk!|6PeG;&e!kr~D49?4?U3n$AKvtw z!&UI{PVgju&IR|Aq%$}jgUT!X#JS^#hdIZfWWqZgWIE;s$Ft5s$%Icl$aKNapK$?K zauc56FSy`-hI9s}V^Dd8pE!5?@G$2XluUT1gG|TV;Mn0DluY=l~C!_{4)u7ySGU7jP*z;SK)(TyVcZI)l?OsJy~Y zoI8Gam~#wDCcM)@rekh!>~{`ICVb*SrVD<)&jtLFo3M|+=Yo45=?qTCpz;boaqjrx zVa_oqnea{rnU1-^@s4v)GT{>sGF|ZVw_U&nZo=FA0~g$Hlg{9D3@Wei6X%W}9_Ac_ zk_qp0km;Bk90#0(k_n%9km-V-zwaEB%=P?z$aKNa*EDcw!<(LC5XYOq{|cFoxxtanzv5|p4C&6(fa!b;?!Wg!n&{ICa)bnM zItFE@WU6OV=!e3jsF0dj=A{{LURsErVtw5oVwr_ZvGYu z69i8>V-I+O}J5gu?q>K}VM2+i-&OzCzyEqXkxZoGv zor99uO>~D$7yP1|b5Jtjn-el!@QV|igOb@(oB)|F_(f0Wpk($CJt5Ntzv$r{l+3{b zCl;M^gJZCBP%;OJ!I0^KUkq{%O6EW@2r^ypi-FET$s8aCLZ%CTF~B(}nf=88$aKLk z`a1_Dv!Cb>nJ)N6Kj)xi_7(jg(*?ii>l~C!kLU}TF8GDVIVhQ@2oGes;D^6q=Ww1d z;S|Vp&V7m)!6FI76&Uv-DF^0ny3)NEVg3>u#YU5Yen}W19D}mZNJ`Z?H#kN*2PHF4 zjD$=V{36deD4FmS4Vf2s3!Ih>^at;65(34UVHSmx ztKvL^q&ngA(w+hTaSlPrgy%WPbjT~|Plwk!hoEG_-yLK+Q z7?jLK0^hiB&JB)5&Oyn9H+IN$!4E&~jzP(U4|vFQ%ngo8=b&W5^E_m_;1?CnLCLHZ z6_DwIUsO8>C3A_WhD;ayVu^E5G8c;_km-V7EOr5_gb9lw(*-}cR+>QCiwn&>IQ=z92g!udt?5>w&Z3^Ir(97!Q*GQyE;rwNjpN+(EcSQcsLZMQ(X1^(+T5WzI~ zwCoWIWX}c)>)L{rrR_`GH*{Fq?BS(DUhA|nqPO_BI3QjWkBBEkZ+?jX3-6Ko!E@m} z_-#yqcfNbsTvkjxp#gp(vn>i%%T}{W^A7&au20*?PhlHGG|o(14R20G@b);GJVU1K zV`acnRstU_z@ZksSF# zL_x%s$jc&&BjY39i|QR68TE41RZ+9k_6bg$!UOCLJPB_;2jMzEB$&ApL-FS8`RFgA ze;>UndLn#J3=%alJG|K8@(v{tlRJLa@xhKOI*t`zY8jp8 zb?UFh;QDZV4T9>X7iz3d7I3sP};JXLkI(Sp3)sY`` zS{s=cIW6*o$af>(kElR=O~f*!uqvV&|5rr37Ev8>f7d&^)^ttjawu)zaiz!g=z1XH zq2qQQcSXdT5f?>V8JTtbImeGXUVwUM)JZ3tdBT9>|91S=w0)5sBlh6`V<+r6VPj-L zOP{|!N`MY`=TPFxy+j`*Y&=y*KEs< zm!8!3#7|DV|HOv)*pps6>DrU##D5+CSp3@ftdrk5`PP%GP9E0d`5qg36!z%e^ZuR< zJu`Y3y{_*yuUG$`pY^_=_tf5b&sOwRMQ71=jPdH`p$5E*L1X2HpdNk_ssMS$x2Y)yub4YZ; zD+yO8oF4UDbkdMVhOCZy0Tf?F4^G<`Gc)G&AumS18~s4^+0n_-8POSow+(%A=mkTk z4($}ZAi6sGy2FS?zDZe(L=i>pOu`H^lj4ik+CDQM&6ioVNz~V=aGA3C&wI$)hNx_2V&EP z97riniBEnndCMr4@|7P@((Ieu}@5G%IciZs7xXa_N#_y|43wF>( z=)k!>xq8V=f@6nB}J#tEk@4~a3n@^0q8IMR(a#Eb|M*rW$`8@c zstEkk9^Z}o?Q~yb-tWSF6W!OD_d9W)PWKJw{eC7E9Hjf@^8Seg+&@hBE9js0__Mfg zrl04~KkW$*?knl1OjLWK5AFxhy&RL|5A{!b{2=HPkjB%v#sj+ny8=4`V}KaPkU#D5 z{lLAz-N0SIoxtsg^Wgd{a2xOm;G@8Y5#9&b3%|Dl9|ArAybodhfCKP*2d>cP@vnfN z13v|R1Uvx5DFNdL<7?v+<3ro<-%a2)Rn91Z`fiR?HP))ir05!Mx9T~*j2b;Z6r$%j3E^7?Oe{a#)f%iziIsGP_$ z)O~l{pM-nt(PA9ebX;rs1ctql;J28elEKhwX_KR3SDzt>0Wh5Dz)Vg2v=zx3}6UH`ZKgZ`Di zP5)Z|LjR|J$oN73QvZkkt$tAdn?6DRQ7_lW>kHvkZvy!zgnxhxxyLsZ{w7oS2>7lX z%7?&*o!NoM@mLK8B5e3(jU;b>JRF_!&lfNQG4O1!7FEv5v#u^<%H6dtm5IF1L(QO=wNhokb>q* zL%+`GYIHF=>(}s8`2c)qy?VZ8baNo3wLM=mPBKmmJB5=2=Jhq>*s_M@xOpCuXt zfCGVpPzTa@2Yn3CY9;9{T8OuG*xN>ncHnCT_7>2=s{Q&{-oaZ|_WJY}s`W8ejhFSv zs_Xg*`d?8S5{!|?2qW1TWuzEMMxrs?7-kIRiF_D*b)U&b%62i74PpbW^o>vTXY^-{ z(MB3d;}2n3vM=n|n|G77-&;5At;9-Of5k|5+8lm zk5%ribqmUE<;-gzFRxE_eo$`8S1{Q={z6CmB{&08%9Hg?`px<+`i=Un#>d8A^_z^- z;8o)!ej@LV(pw|z>>#hMuNkw$E<@P2m7RRLp6*E3xq2~Do2!=^AId&qJ{zy+@d^4k zv{ke@{mnL8-4J`b)Axp*3tt`<>QCz@>fPb7mRgCoUR!M#DTd#I{o1c(Un?!6(ot%y z`U5LH-PdLohqSj}+x7BaiXLP|*kgBp^i4i{S#yZ>NaI|YMmTeb7HQ~L@ksp!8fE0T z>K#Lkw;Yc>UcK}Y7&}UhU&wm2&seX|qMC*B{3RPN>(cS|x}<+%_2Hnh_u^)K8tJ9A zdU~suALut0Sn(~#0Tl|`WQ-UV%}6$s4b3o+FpQzdZiqdGDq8>dmj{Jgi6&DRk9R3ANGMWq_--UE1!5O978Z zJQeY7#23Ir5>UAO)5voo9*Jk)1pZFc3sLt(-4u0Qvex_ue^Oe@6Y@DdH*iI@8x&#I*wiU*z-JJkJsb%I*;?(IqjVD z<6ic^=l>^g`+;6O$I8E@y>-lIaW8vSnuNW%V!t4dz3i5RUW=>b@<80V_GjMMYvPuZ z)0O8@Wv4=`9=?5XmrGB9X|Ek!WbI z$QzN@A}?~amp8oc-s1aUUqu=+wl9H|2#r-KI8Ofv&;NwzBC{6 zdBiXAS-sEkna@?`VLnxR4Y@2cx0xIGH2Ui}Ue7W`kwYVSkuj0OLb2w3^Ijx9a)3E6 zx{>FiUWh&)t&6@IeJ%Pr&s)6_-5Gr|x{LqY%lBN~=Ii^>`NaI_EXCiCp1^ZjTcTT| zA4WflZi{{#{Uq{E&i9C@}EZGEb7Yw!2Aw_=YiK9_Ainpc4)vmf%zVJ4SXk4AA0 z@^WNu^u=gxWI^P-$oY|rB9}%kk3JZ=AaY5xB64N)=E%ax!ss#4g6O#D;gLG?HGB4R zJ{|Z0vy#_>uAsxr=GCHU<|uF80+VmXcr%YSS&^$F8zNWn8q(E~^2nmd4Uroo*G9vU z>!SPe`qmmgx&B0Rf_+{udS>*{Xl`_Lw0*Q~DBkQe-6qWMH&M^sDIc(c^d??w#o7Xg#l2t%!adeagFBWyYFp zGa@o6`V7x4K5Vx-(l?S8=@aQ4UClF$i=x*?FN>ZXEs4&I7DbPUj*K1@O^J4iCPh1V z&rCJvc=C@X|2SHuqvx#XlhMb$XDQc4*F;xE?}{#uE{iUW-W0tdS{}VFS{A(~dPQ_W z^z!J%(RtAeqI09?N9RP(iJldm9X%s@di1pDsnHqHlcUE*Cq$2oj*T7`9TUxt9&COv z-LPJjth}KV}{^kC;mHka^IoH|xy3yaIQ(Sz>N6*PBJ= zT62|IU@kQmn+td?uY}hEPc>7`6f-WmFnW<$8Lf;y5WO?HB6?f&R&yEWE*J7-c5&pS z$c)ITky(-Hk&`2*L}o@#i<}-gBXTCML(Vj(n3K(P9>I?@6U=yXq&XsbN%TDPkLdd7 zKcY*bw?r35Z#37KE6qZ4nwb%OE?OOZB04cTDmos$+7KNU-P`;Uc_@+` zsf={>y2fphMf$h#cEZ_9k{ zW=xBi1TOY3dB(FBcZVj$OpS>*WqV$D4)goawfuhAd(Zfu-^IjpnaU-}e!`so^2+;n z^4{k;f5fM3>u)>wdQ8k%E_?h15$E#V0)J`KHs(*4PSlUOR}*Kp7O59PQ!lFy+W z#3e11N-MCJ!+9RL2bUBsNsX6|JbU0@I?zd--RFb$3iV>10bGW1ai3KBhWFkU?fKhd z{?-VKA>_w%03-p_sdsDCW*|5z?ILv}8g zSYDaf+mron&x;)yyzKeY;CHN^i$69P_Z`Q)_UbPFn%#Fc6MJ!A z=d16gpY17s4fg+bTvOiu%yz6*OP;9Q+j~96z9wT|nXzB3Z2Zraurv8<-t+%%&9TgB z{~cZ|<80o34t{+(1@+eGkG1*O3jKN8?H=I860WWaZf-Z)zh%?Uoow9R+OFMhvyHpk z+PHO7%y{FP_5!m%!Ru>Jyv=K38C>%*ud`Jba!tX!KK2B!jXl9@WYtA{UCebF<92hc z#pzsmZSDzPlY63$7`J=8z&plbt9d=GdJETLV=_zs@44DzzuJ3E)63KT&wm|}*CBcR zFeaSB*R^a1^{_SYUTb7bC3SgtiT_&cIW_)kvGz4a@Ab+g#@W{(?dy$I{%f_~YnTcA zoIS&9oqx5&6qlKp;w4;HajoLY>#4=OCR@zwuEpE9Zs)qo#2m+waa+EAV#({;t5^75KXXe^=n|3jF`O0*AIZ^MrHGC^@nJ2_+?B3)s@d zgE3o)apQR1+{E#_c+=8sa^q6a)%}TLG`hIkn#QI4zjTG0r%QRa6>gpj@wcm&-o*Da ztsn7!;Q77DlM9V$mTlvA<50mBUVO1PQ8Ule!;=eouDQY+W8*Pv{4w?|bAFH)`}W2J zT5N@bGmcPfc?f%xD+-R-6XO9#9Ew;D(0W~}t%?_BS`x@ii zU!5cNzS}`ZZXpgVt$0#2mhVF*ga6rQDV^=Jo!sT^g;zTLIDVSw$Y#Z138# zA#6;a1mnJ%HS7N0x)_!<>n2Zrl}{b|PcN^2xOCZ?b$0$&U;Xwwd->4&|G)9|qi??Z z@T0BYZT)ELN3YuJ>sz?ieN^{)-Rsr()^!P;?EKN_NB->p=GPC&=)><`V+~wcgT2&! z^dxKG${NmF-ma@Z|G$xQ<2L%alfQRyxs$(lsebyE*Uw+nbEH2%mH8j%ZmZ~L&j0_E zyRA;Om-`d1S3iB;qMid>{(k-WtBL=>WzD)*nEi>@Pg&GM`Mdg?HaWA0Ol`X6&bF5v z@BhBt_f2Dh<9$b?cJ{S<VW?i{{S1a*TmCl_Wt?r=Rag>)A2+8>uy~h%zw}0 z-yT2zUjBO?|ErnZ=HK=&`j2-6{hRsw$A7T?OaGL=JO2ItGwEN>zvunp&;4ihx~0pP zl7E8sZ?OKhm;CGc+pYBW)gF%*uK(Z4>{l(H95{ZO>hI_Buj=nV7Wl`#zs>&b?0$$w z$({@KSN_4{-_PM+&2Mw3@4G9r`nR)RqtUCF-R93b4S44#w)#A3e`MO&T;O|Dks-X4=@iY##rcuhHm| zwg*}|3#&&p>TGnjbDFv9@$iXSIhsCuQvbEMWu_zOl{2H zKM(s)=D%oeApfl&`R9@LT=ze**zB)dlsh#+_2KOLJMRpe^Iv-o%sHfg9?5x(Js0!N z2RP&KA6M7dM?dipfFL%#C5eEWk)BYL39{GED+w))l+@16HJ#xR#ovu54-i_Tl*uIJ$y^WF8_`7`F9+<2Wl`{eme*U7kWws)Oc zIJ=N*ZsT>*)ZD3lea>J0&-c&&Sj7H{_fLyGj%Lg!7jh_^I(Ver!>iegSF&A`?R|Uv z;E_E?_8ib-K+ge#FyD{Q8SnjW`%iQIbHL8NX5A^XXUzAnlj%t_=2O$bL-RLhe*4_WKP_uGXZ+tD`MpN+e9C_@zqdmBFX#UEm%q&apEl?} z5%^cnVDSu9ryYO!43>T7$$tjRK07tG@fj>`p=Yp&w=+%8V0GHn^bA&~hQ?>G>@#8i zOw0}cm#d%%wc{7S$d2K;lA+w=-}M8JWZ+gucgUx_n} zx3swd50sbVLGl_rSbh`_k)Oo}%CF<0^80w0`~@B^|Af2vPB56)2JMotPuTEIY~GTP zi1(Gd;cjvo=D9$-BxHn*>FLoVWZ_=2J!Vqn$+)*X9ruxE<1~3L?kn5lihplomxQvg z;XS{+CE+HVF5iIq?6wY0r4;Ku{D`DkgkZ@@d?Lq*0#4FgmrARFTx{?x5Pid{kn|%?{4m!*4HvD(U|u0 zTeoCD?DZ~w9dY)JH>Mm@$My^SF*n31em(K&GWiUzVa$~pLkIS5dlT_p#IMu*4T|4l z%t6WW63l+}#xKRW@~t>uz8x1Z&#juDWWDjriI>O~c!9hEuax8ivzslz4Wy@mwY82)w_I z5-$ulo~tAshtqA8_>6Glxk^$A?r)>SFA6uFt0XPJs%O%*SoKV@^-(>OY<*PEr2Dbz zne+(ezF?Q6XTpu=DoL+kj&Zvry%TOcS4rB2&#+PAf&JdOQ@HV5rL*1dhuRUwbhdTS z()rkcFALb_qowm_P43b$;KKs8$A*?Js{^+Cak2k{E_R+0`6sNUtKCP=_I`AM|4rAU zu#f7>^mgj zT)ff$!M^tXqGjKO0Y8bq^nbAL%T4iaCg248t3Re&_dt9yJ-?aarv~B`0dHz@_dq}C z-kIAT;NP};-@uqcoUQmAJVsuFwRC^4$tmpv9ux4@0l$r>8E;8EBsau+_ZY8#TK|A| z&`YgAG@wPn%R~X(y#e2N7x8)T8GpfKJ&np0C)B-sr;8a|u zcpuCZ-fjEhYPlcYB6D9JwN0j0qqfWajTz0Vz1wDB#(5ru^T~6B|2)HC?zY@#N6#Ui z=f@9E()gjo!!kW^w9A>}cJzA1d5j)iDIbihWRB<2yP0!>KWARK;-ieoar5V3*6roS ze$2_1^YAozES@fp!!C#Mc!}Z@@Je|Su9XXMoje(Dl#6hKe6%sSDe|#6Q$7yo%2V-l z`9wTZCYI~AYX+`RoS)~eluySs@)_9eyEC!dU$Z%k6E&s;yZvY>nfbAG1uTU`8w?KxdB%yz8Jfj z--0(P&b~Tyr@YkgCQvflcUYXf3^$jT<9M0-!eNQ>O6+pD2MV}4FH`3pi*RSgu>C!j$>h-E zA2Qq7qbl3jK48xedOVG(v3-uC$BUSlJ)h`Nhr40c-Q%sWvHw=co#`ip%dCq3e7Mv2D zFY~Q8=jGg%x_f3@52p^E7vk`q?m3S+HNNMS%ySu&kXz5P>E5;{I0S^hdAm9@NJ~!ZN1AaVU`}n10zd+8t+cm{| zcf&d-?45?W-`S=2;Be#lN$+gTV}xCL9}#XmSLt1ZhuA3blfsSX$-PT3j}dn1eNnja zJh^u%9&V$=Zw@z}C-+`~?J>{Ndu_P!Jh}J7*dFsNy`K&@o+tNy3EN|yrS}`*#`EOf zTX8QNCH_UY@jSWr&$zdZa*Et6+<2bcCjrwl?9!)mxbZx>PbyYT`W%4!8E@$`6sOB0 z@d2_u(0FXMOP|7U<9TwQ0|f%p&m`2-?5go{R5s5@VtO41OBYZeLDs` zD&V;R--j0)Z|Q62(YE(}3Tu1&+Wn~Gq3?Higz=XB1M}}6$Zh}28KY%?JI@OLoBivs zmVPY)w#RLa|AT&Zzi8=qeZcnE+vxwG-=l%}r%g`pg1_{~r1uHL?f%u0eg^*4|0X?9 z!}JH~$;}jhfiYUr|7h|70|P!e;Pbexj%yxVrd^>SytQ|9q-Z1bk^W8-Cd$k-&AzA-ji?t!PvJ@MSY zm}PP*@%8e4xJI^fy4?EU&5F}E#%_`66=Pj)a>i878B~Xi(<=O9gY{-yB+tVf=>F?}+$56;HiFG6fL?|5l4Uh~fZY@IV+_s;l`}$-%2Rz37O1c|4vb7vMSa1Y9al#LMJK*wwHQ*D5|4Zeo_r!Ml25|3L z`~mFtsg2JI+;)Nd2=Q|HQM^)q47>cR@fO9O#4hKjF*Wycc*dCVF6Za4+wZkFU1Ofd zxq>TSIls)9nTo%Hsi`;rtC;$GehXL1n{buR-L`DS8H(?~S@O5o<-Ze`DgFblkXcWG%kwAfa`+j$dj5(V zn&zC4B1epwFhq{xJl`?oJE2H%@|jSqIQwKmiM$tHBGV@)td;4F6YAuaSapcm$C!!S z-qtOK$C`=FWnvT4<@PvRrcM(J<&Joc%;V0)WpWaB>*c;UalPW*@FuxCR!w5q?un{N zOi!$u#PFChiEXyI#qN(;x95IXxyAO!nTiveq*}!e!m3Z~5Ij?3hGNw!mfKA#Q+zmH zDQDp-`4GH8rUsLgf9xn+uXqmLCFdGb7%$U*3KQfJI88nRvkq@t3NTmCJVq22$vj3B zx}13|D4e7C6daZ*PGPxxGNy4kqPlioh1UgK}aJLScO zH`tc%#4cy{&rvCg--EN{dvUJ33Kz@w;SzZ*rWRiQ^!cM)Js-r&6n_Xa&KvVEu9hFc zTja;^HkoauRQ`A#Uy59vpTfzC*WwJB-d^PD`~uEX{3To{zl^8JFJhPfYj}a;8*#b( zCa#b-VVD2AxK8oSn4fvu^&YO5*`}hM@`t!Vrsp4>Ais}O&Xwug$Ig>G;l*+vTp=HTD`oojv8rd>0PJ!egg0u;5Ukq9Wnh>8a6XQQ?YA|L zI~XU(SvW-=fzxGrb#cBt8W+krxL7_EyPD_Wu;Pbfw=KDNrQ+l8dU*n_lP6)9GwUgC zP<#qcO(e-hm~Hd&KNh>($mO^K#iwGI+X>j^b|Nm-7<%tf0Y+Qm%6sMNQ^D}R}9piGo z5HD8z3cO0b5?9LCV3+^3c(dY*aJ|eNQ{&{DaJ+moPL`Kqw=K8fOvP`laE`K}T z<$nuio4x$+!Y+UM_EeYiYV7i~@tqn^51;08`v*=}oL)UGTc%e}E0!O{Ze3NlT=B=S zTURw+tN7Enc25uN&+kpt-2R?2aCn6Y%{PY6(=Y=MmAUP}WB5JjQsUHM;PLn}oP|%! zHlfS$zIZlXfRph&%s%pdw<^O6aTnsZhfRq6WwP--xD->*f$Q*9I1g809*gZSt^=RN z>{C;K>u?$7_!;;%X8)QZybWK6i}9D3eQl=UA2IddlMc+FSbRO6i4*Y+cot5@H{uek z-|5EHVEscyE(rx=EMZe|1D0Hzr$JbPV8#?J)WlckC=6N`IGO7)L~EsxoOTp z>&a(_a(GI1bF%&u!@BJsc(EM8Yvm}e^4*MSPTVNR;4N}2re1?KuwKnM=v~(JyVm=$ zto1gFXFS_ub8gm(Pp6KPTVri|vo<(G@dR8Tx5LG9dpw=>ew`X}a}JKFFwOSy*E={~ zX1#+u`mfiVRKio|D*|3a0kjOM3;4q(AJ{D5ZUJWn91gfP;7^)7G%nyC0Ur|Z_<-%s z(K7VHfbR{M9^#gvft-g%o8rUv>D`Ri#A3qKb69F?yBmEEAf9hxN|T8n=Epr35HBF! zjrdH(t*PDh9#s}Ca z@k7FmuQv_P!vk!T_=Ip{J%=BIRnOtmvFbV8)<^Xmej!#phhL75G~P1&Iy^~Uf{Wz4 z@Nx3}_zbxUYZ+c2ZhY-&_;>hF{|Ccu9kd*j7V!9hZ9ZBKdNANmn|yF$!1mbCa`1Tp z+x*&#QQ@;;-Ry`E|TX-iT}DH!%Cc z%kv$)UH-tBQxfDY*yX$xrzrj*&XBj^ESYn{Q(R5Hz@mA}I4<*zaM zdHL+XF1Hu)F2#3pdYCMKk5lEJaJu|6&X#|{)8yT_O6LAMvs(Tg*T@Zc1NE=){=Hl% z#~lNI5a;$bw-zDHIL~p04_cGCkDQt*x4^}6OFUiP2hWk2`_#E|YdlYGgO|w(xK3`1 zRi75^u*Snb@CB-n|vg8bsLX&D?Sl7$dh;oW`B6|Ou>mVJ@AY)nO=8B zx_m5lH9Q`>ZJCOTH0DHHET4qkcFn+UyH3HS8gm*hlTXKP8_&cm6`zf(l9GUI5@ew>naUMHnyKQ_NmnqKU#ca2YPvMn{KZD&iK8M}5 z@O8G^mKU+xmJPT;w|#}DF5=|ZaC3PhPL+8qD9MyJ;Vk)W?6!sVluTFr13Xh^oh9?+ zk8oK27%!7~TqvoQKgHzj-H$%Uy8|Fhy{etuSh{4-9M zf5F-EZ#Y-}9lLFb;8}jWCB6Nu5}AH}_7Hh5Tqy62i{+MB`?qB)JV$YIIeV_$7MID1 zxLl@hpS@B}!mH%YShZ@IjB6C%7jKeNF#E&Ss$~znLr%pz<=)1e6EF9{+{PQfKW3cg zemGa|kMre$xIjM8m^tL_jUS4K$ir}!d=So-55a{pu{lNZC_GDMopVa$F?fNThfC$d z@iLjb=2XbzaHUL+bE;(4dG0)UGIqC}f>$bjGk*#&O9=&KjFJj-yM#?V(U*d)_eFW4+EHRhr``BqF_ynTH;o+i^jFAB>mFzfTi zP@9Wbhv)l@nb#n*{)-djbvRL`W)}~UAHrGkBRE@r6c@^m<0AP9JWHlGUR)wSjTgwz zVwW3z^5SKRzkn-b`sBrx^2@kNe#4mgY4V%ct*`e6}2+`?TQU@mN5(Ch??Tp-iy7P@+}-3yBq zr`IiXxlx0KrHa$*7P@?xV_~J@g~nXzOs~6glj8KcE4Ro;<83nQDdjd^KF8si@-#e4 zJ`smydR^&anX#p=R*b#M`3$^PW9WNV)yQXIS2z0JRrQLWi+9L#@lJW3F;_cZY|J%r zieG}0PJ?ufVx7HM=HHzRsAkDw)1lRwvW<$~MYuUzyA8X1rT*`e9jvyxf@U z*k-S871-5h1$OmWiI-^1-Po;n6?QeDH(pn(G1TljR|k6Ib#B|~hl`vmjVVvi_=jc7Ld3br!56fA%=VvfK^UV6oH_G(D@=Y?ex!(Czyj}6vu&V>Tk<$6| z&>L?^k>AE?@@AYaZ!zXZ=dCzf@egsK{4p+)Kf!b5&u~~Kmm3$z-|{r$F8MoSZfY)5 zvzwCTAF<1gu{XK({(|!~<~N-0J3eI0&8km)3|4*OV{yI4aPD_AdDt4p@6DeRX}$5B z``wbDIKA$cM47KklyiIsyhQPic&*$Sua~=Ex88j*b@bN7b}XTep6PE(3*=NhUEU9? zZt-b&p5pYqrORaY!_t-V0K7`3@7?M=1iLyMh+RIJc$3Bt$GhY#+#ruM<~Aee7{hA) zoO3a?@bVvn$HB4pMX>4={Qq91+#5lo~IkL+<6vu^{4MG=QiG$5;P(z6CFniLI!W+0iRD$ozc8PMJQrVwYUO)5yv4T{uNviSy)palX6?&yneq zcg~d`Fy^l9@`JcRe%KhM@^gL^yKR3Arzu{I)8%K4xtmj3>xADeAG2^UhX=Y7|9E(%s z7RKDCZD~ahyl<7_+~B^oavNMDx5b;}M7&w1@7=F$Y1I`ME8Y!HlUe`$bL5`5RHpCU zUnci6<{xZ>mvg!?>k{MvnDu$_fjCVbjI-pSI9txd^JIG6x^kIwzjZ6+L$KT45qN{* zqwq$VeYMVQ?^t6V(7tPR1TNB;BeC1w0$if_M7%(zzdhi#_bBXgV2%f>H0Efm{n(26 z9&kAvhj(fWwR~W=d=hSuPd4VkBzXp=c3!RMfe*TxoQ7Tgr{n1wb0(f8)At^9`JaQ! z6lcFZSRtQ_E9JS^<$nR*tT?rQaGN|I*UR+4hm3p~cKh^l?DpwG?DD5KK9sNVSK~tY zT3jU44Perhba_6ET@4>G=HYmae+(zckK;`FNt`7= zh26e>2G3OdSxmjWF_%j6Gnt^6_GAb)~)%JkkxcFCU`^JudCB~FpQ#;zvh{%C>X z-(XiC=6lrD=X+eLG0gpFxlA8^bg}#^UN8TKU7qyhM>i`T!CPc{@}mvD?K@Cb#mUWZ zb9rx^Dif>HK5b2}uF6+D9xMOWt#FCr33#sD9?z3IVAZyDN4!??PFVZ5br-DsThqg< z>NTbtre1cxwC;g-%6*J^e43nw=g57r>ef0PmnnV#cI(Z+m5L9>Rq}9Ss+r4M*FjkO ztM$RSK=Bc{P#%fhdPifoy<>1#W9ZdSx}5WkdCJv%98S_0=6)(!o`BtYSA78<4z4XrwWg7DargmNq?;68Rd~e2y@_RT<-hy2X>6tIP^|BvcF4UM$agqFm zF|Rmp$Fmgw5{Km-*yZ*OUa9y_yjK1Zua|!{=G7FL9{B1InI8CRo*cpXGClCsS-#uE z;5l+LJXhYwnAgb9)}c*2Zjf6U^Lm2J*Vk2(Hf^!i+lJWdSsFusdp%p;mwz&(oZEE6 zwTgGg)Yn^APfSfc_rh+yz4Y;z!X>&?V< z8bg13bE7=Um`yI{(YR3Y96VD#6uTN8hTVF{;>8*>4lj`>8S{?InfcyHR(vuhZ*O~x zFu8g@8oTutW3FDD{`Sr^`DA0>bve(#^@^W@cgv??SHshd+3eO!PurZTF=yd4`FvyE zb2-n&YZbo$*U0m*t08^tJ-6O4-k~v<XiyY+IexYcbheQfJ4jd{YDk6g~xn3{U?KZ(oa8eA?vgWY;r=SNkFzlgcLciUHu z`Pk)5@A^1f@z=3CW;plz*wyeY?AH4Z4r}~-c!5lzKFN@`;#~Pd?B@9x7c2e=o+p2X z-8|IelTyXM<}DQ5{@sD|6#oXhd8pZ^(-i+6yWFVXr*58Ku&WjQtv*xZ>2LMVa#V)a!Xtw?}Obut?^97+hFCE&Y#B{gTiHyLtA-vowZ&__@oi2X^zM;w2i>$Cxj2~hP% zZl1w-nZ^t?W_zAI3~Rq6WMVfDy>a^-jiFB4U2Y?>n}iMJXd3mz%I8Vv72WCUa2vK#(Y&E(=)%CE>FR3o}=+R#gD-*x8tyzhg`l|r7l<=Q6!&(8RyNZ%dp!omt!{%ef67CjkyxL+^)uMo@;TX#*`cLEw%7+ zqpyBTex7f@Zl0TPnc_ENmm59yTQ?6i{Rc) z>~bUD@7z4}+wZD1<{!rFoGGuvZohE5oo=3o@M4W2ubnQpN3omdaa^M@^xp4h$xmUo zU!KNpo@enAjd>2c++M(Lo|kZ~#?%?}Ly7zCZp7+}_4+p3QiJ z#(ZGRk8|WL*zFhk^N((xZMZ^X=+8g8-0HEL=L<}J-g>_>X4hPq{=CcWmmS#6^BrEP zF*~u#?MLk9`5AB2m|ukkZ`twh2o(Nv0G4$u3lv~>vta;it!<+muZJQhO zb6BQ7|E&Gewk6g)ZCl~B8q*ppx3+Dunx?$UliMlZRsM zm$t*Oo97^0r7`s9UtMk^u$yNTuGg4cV}2`_>CeAuzqF-4|K{d799L@${rNYS+c@mz zDZtw`W|A?#FP00j+b@%`o2LlZXbk=Ncb8i+cJoZdJ2d7*V;Yvo^ydb*U#4R>&ndW8 zV`gHP+v(WNb0*%YF=rX`$1?eB?Dh-&`42bGd3b}yoR3{@7h*Th#dw#-gpG+*$n@uk z+b{Izh@0mMT&FSg=ZMSgD(vPd!@D(RkulMgayfSU<$CPqS&TPo%uU$kM$Mvbp4)JP z#@t~-W|h1gSIZUH&2tytr1(nga=RD1dDfUv$Y{*{c&+>o?Doq#?B;n8Z`PQHu*>Zc z6XGuK-*+Cvaq<&5MXts}lj1+(o$@XdigR`T2`4GO8@v2}#aW6s;9NP1^L)1p;puV=R-N0$ z;xfhe!pgth-gv#@Epe6H3RlZ*@MbvytIqA(`hWgodu+Qzto++`z^NLagwy4&I78kS z=gZx&>fEk7o}+kA?DFr0mnhyFSIB+wO1U4dk<+p2-0lFpUGWU;@*ikId&Mh01SiVF zaFRS650MYTuFePJV#P;bm;Xo{R(v!rmB-*R`7pdn&cm+GhvSWk=VO=uIJ`^o0^A@^ z^54eItN&!2toRh{>UBfC!dX7ozKDPil2vF{^#QY#V^D~^2NAVo{#6s{8?FZSLaKyJ5Cm0 zm;V*GR^v-?oqP@6C|`?r$k+LAi|5sO5l&S62JG^`5oaoXGtQQm;#~PQ?2eOV*wy)V zT&gkj{1z_%6?m=U^!yf;^1Zl9UX3@&^#2yF&iCO4#p(Z?l6n2S-8!5i)Bjth$q(an z`B9uFS7BG@$8d?_^#7JF|0nTc#p(Yom&woJ3i)|lEx&+WonOTDiqrpFy8K_^EeD$` zPXFI0L4E@#%5UKec@uVZej67lPXFJ><^LX@r#St8p9S(qxK#cauarN*uFjw0I>qV# z`?&nSz&jPE|L?O~{suS5-|?1-N%BtY>ij*N|Hmu;_OW=g;`IOcZE_1-FYjZ#e*mdTPsLU8H0xWUpWyO8 z2hUZU{+|$*FTe}ri*SWJ54$?g$1Z>Rf5K*sxg5Lv7h;z`J)d7cy?)sKDx4^n;UxK5 zoGF*%Z23l<`Ewkzd3agBTz-XzoW+isWX`E7T| zt4ye!t2uq1b1#2e==1Gd{_F7&#UH?VGCjYYJI)`*F8@bxSYsZ;r80fKU77q8UL`+` zt7UqAJ9nHvhh6@)c$db|=i4>N^!fHiuEWXlt2jfZ=eKutegnJw-^4RD=51Ue)92gI zk>AJV@(0+}nV#RiO7V}d%YPf*qWGt{UZ&5t-!5-Ap~N`(OYG`Q&rfv6`8U|*|1B=i z`0sI%OrK9ImVd@`G>U$KeY*USgUwfTq)D%J5=7f zFOv_(74jjtTBgr;bafty>lGi3UH&;H)Tz1RhvEd8-rgxuJ_2XR^!ZM%&f{>A;sw~{ zKLO8Eybv#t>Fu3L<)iURnLgji)%jRlr}**MnYLHJcp`;|4KA+_3 zd@9aXd=_^3pMj?-I^!B6@`CMEk)8~_1ozKUWieHFb{uklRieG}a$(Q1K`EnED z=|QgtCN99P&R5_x#p(H-UH(_$e8uVcoeSkfxJbSp&yjDyuFmxT&dU_1=XZAb--2rt zzYTAY@4$63J-_pIc?EWLzSD%dBq)A2cKP3fhbX=pXUS`EwoK3OQY^2-uFmW60>vM~ zF8@lrO7TbWdiil&Dbw@2Y?PnGuFg;4-HJbhUH;GVks`^8KaW%84LD7v=XcGOU%{@< zb$FKIuVa`0MqIA=TX>254qhhH^Sf5b?_pPG`hVALif_d({|`+lIZpA9alBlQ6Xefv zy8H!pb*BF(7b^ZWcKPqXa~1y%hvgse0(lp%kblCi&h-D}4T}GYUH-q}9g6>fcloB5 zhmv>8F??K0q8y7==MMD$eX|tb8!P_~&G9tF_rWve)_9hjfJ^1JSat3||KE4L;vKN^ z@6Zu%QoJ+XA}8Z*ayQ%{cjseeROb%#|8A*@_rfm!RGg=HA6y{Q^Sc$w>9|Cu|94ZJ zJM_m(6sPBRbNLU#)r!;eyVc6{{B9fMgK)h}|L^ANoW;lA#4AqE@9y%?#uHpnboyX#^;`IFPF8?F(O2z5<-Pg+W{O;@JDY#Ci|95wFF2cJMr{{Nf z`4^i|N|NIA{FD@#o}ZE`PsiCZ{XfOkc?O=TI6Xhb<$oG3Q=FckvRJ0)r!0}r#+5St zKgHE~4&I_TJwIieJlBMJ7@3~mqq%$uj+etYO{V|%aCN>67bs58@8R-ai03Fy&+jo$ zF2iB@I=oD#|MzfpF2}Ws)AM_{{1@ZxiqrFZ?38cCyX0ji)H6Z89lJW;fio4S|MztH z--(MAr~mhyF0aBf?joUW?Z%PXF)e@?VcPDo+3JxmkV$Z;`9;Zuv12>gDSE zI8ITV{@=^x{}j$uoc`Y{U#`Uk@(XyD{33RB-hdY?PXF)a@~^{Hiqrpl)yQw+T6q)R zCclkco!>E`)aHuQ|5IK5@8fjE>Hn!iHn!N|DR20zeL69|NAA&4LC)P;4C?cRcFqNL;Fql;~nY$ z`zim9akx}*`u~3Aa!b5ej>qfeR#%ego#kHN0ahvHR=)BpRp{Kw)t z#p(ZjHpvBevpf;+k|&u^nyYgmPF9@$pXTx}!r6+`|I_m1<8i(`4bPNMz^=|G;&R35 z|7kA&8Msn$`hQxrJPX&zXW}jLZ0zb>VnTi66sQ09b@|W1X^PYT`)0@&;vw=pTp(YJ zU7at%a}}rm_jUPShLw&$oGCBGuFkjOX^PYT_jmcHqy){*U8Xiqrr5&5>*HT=`kNSbh$>I@jWA#p(b3 zT>cyIHpS`x{kF@m;T`fu6H0F`zkywy-^3Y;)Bn?5{_o&I#p(a)#qt(BP5uzilRv_) z&fD-x#p(a)F8_MGL2>$j`bPOHyh+}Hcgo-JHioXw-{K_2>Hi0~{C~h%iqroO$dz~F zJemG~fU9i-b~*on*ZN~R8N6N&Vb!x!9IjWqnF;k*K0FT*>aTisYJt-Hqyp+dymJ_yL`{6A^y`gFMdh_{yA(~G-D9Q(rDfbx5Zw(hvYUsYc|0j ze=6(BY3q+amAd7$^W3yu6WB(#zb0bF*w=tM@mSO7$=KC%iU|$O&=?+f2D~9VGgD>ye5RXo8!lD+W4uBB1n-dRO=!5w^D~^KxSVqY^-&#ij;k=89%rR9soRXO z>BM$>$J^=t@n-9jGs{2T8s~{HXMyI7;#t0vVgioE3lwjLSIK+hI+=bxTsbGTG@*mE z?Md|TgS1bRT4Ogq{rn*9yQH>Q^-OAqH)_sAyh-kWx67TdTUU|^9UP~4XPhK=#mRCq z*8WZEhS@RpI85q}we3kMxLENXc$&N)o-6mpWipR12d~}JIcKmhRL`7?sDD)TU*Ol@ z);8w~zy6K4&0(Gz%`*nq%k=Yu-8LR!LRrlfABWTA0-Pb!8?*9d&dIY_m$!}d!>l&y{b+rScNIM7|ZTmv6&W^6l8|qdRbe;`Ff*Y`$0XJF(kF`q&6}9NvRd zH0ECH_Ty^o@?3*mp7&wu?9F*Ub~&%d^AvvoFOVO?)p8}?BtL>(-5$j|6|cg(Jc#F(=^~l}w2PTvqCx3*KWqNFOo=lI;cKOg(vpH_P+J1@Me*YR*D*g@LB!7pu z%R5cT-UYnd{(xP5e!@wL|BN%`-MCQx4bPDqaH;$Uu8^a+(syTrH|^=1?c5)A-^t

4CoScoDWH29DL-1Bml)j7>hy^zXm+ zKA1b)zyCJoK<^!;I&@Alq0w>5txLf4+tKNP_z<}padNPA>n!IUlp5-+IdhK)8|L)4 zXW4fS&Po3q zy+dOP@osstu^(4za?fJC#^heKz@F(C{R7YNJ*Guk%jT!D zuM>p4oI4j`E0iP4_qXGZk z_3;FW=)rI8{!-z2)w>kGwzT*tE;I!C~V*k7L{jyuXbSKP+sF zebRZ`}D<^&A(*s^>UcAJuc5t&i$Ct^%u` z<5uG%jkk=e#FONwF!y`AjC(2E_<0=T-oj_tDDl94KeA<*H+uEL=Ex5CP}$Z&%aKP0 zd~U!tA1z02Zu0mR0Us2wJvOwAUly?4kBj{ujJNZY$lI}&0=ti#?fuC1bC!a!;l{cZ z6k^q?U@F$JRB#0zVZ5baE!I--MUy9V4%p7K!XGr@#6Wy`!0+H1<1G`u!dfQw33wRZ z=>K4%y}xLgctODH@t6J&CO#R6|JdY7wx9g!kD1gi5YG$qkKq+B4#aN^_{D$&{iHCS z+aBQGwlKMAOyQ(}uMD_0;J`eS?KWzed~v{!2K-Hvrv#3xDdg`yzhg?Ek4?FjxVCFb z;FzEC5b>Lhw@i5&Ynfu}pY4Bhls%ru$k|xSQL_VH9q+cW1j zxp}^0LU@53ih=N zc6H7s3^A-7+OzN#=W{f!CDWZ(TF&*F1fW91T@7&b8(tSA$A#rCm= zZ8>@%=CLNkdXLWX&nNBvJ^B#;e6n%9=a56Ca+rfR%I9LYf6v3a6+hpE#>UH>hmTE> z`5kub5cxvP@#fY1B3vxb!{qG6FUAYxOE9^4@%ebI9L6>BrP$T{GQ3^!%W;Fub8$x` z$XDQ0c_F4g-uzeMe7O`)ldr;au&;ylUq#&2kl%p9Q$E|mX)i{$m#)%HQ`w&h{$jDKiXu2P)ebC29C z@5FA~f56)m--UO{{BAqm?bBbe+xFjZuEx;+#}~;Bc$WMJcKJl`V#Rq}7{AJQEO*)Q z)Zysk+4jKp#wHS{CiWPP?SPeMY?pwOO{gGVWA??Fau1v>_rf}6V$*P);(c+wyuS%e zP(5S&;UvY=ak4xJyPOAOm)j7$O=E`Q-7=3O6J2gRj!aBYoX3laVRzC0e6$n^h7+841CvD>amxK?8d@dlax zKWT?d&o68)AA?inVw^2c#p~s1*v)eS-mdsbxIvz7LX+LLoPu4RGjXZLoQhY;r{fxV z7S??vb~fIucnRJqpJhT*5@h<%lr)*1KP5}1&rg{q)7z&kkT1k;AJN;VtW|s-cH4L{ zb~(`7r)<%fFy1ZG!;eao>D5QM`RUb1EmNFceN?qfe?DrXd^L9S)02yG6{jZ`70dMG zqIoiXxM;C_19o-03A_An#x4Ojs>G z3Em<;8TMs4HHef;R~gyxP=%onDjy-vgMA+8gUU-Krw=Nx6w~jNFAxuemx~9%tHkt2 z;Qqfys=#MH$%f@plJWqTX z+%7JIcZw$jk;-f_{Z!>-@kF>yTmf$o(`Q9{h@-Htmnt|R`6PISn7%B!T})qA)lWPH zo*<@Qt6C(cU#nUtt_>oSGR1Z9P;ounET+Gkv`kEYH))Hwf$3D1n7(iFXmK1~AZ~)! zh^GgU>OedLE)vrZR>#Cwz;ng3;d#{QX4;ABam;(P2il`)CszOTDb=mw@53v^ts(va z?ArrX)r!0#o%ZMNt%RJQbNO(VS34Dl{GLJnX5oCOy@V()qUHdoX9ea#x^U1X6N2csl zy3YlXsXfKa9g_S4oF(~Mc$j!STq%A9PKaNH7mHtmSBvQjruwvA4-SIaQpvZ#zHPh%ua}&Dp=Ps~exbIz_&u1i+j@T=?klE`s6|&+PJU|9mF17%N-_OH z?c|O;HjBEDuEyq~+vm`25&Fa*oAEqMdl-8TOj(WZZ0vdP3-BT(e262|0*zuO+EZ51C9M2wr;$`6GXiN6Fd6ZeFrpDtg9H%WdNES-1h6-4TL zNZuRnCq5h=Dn0@pEymu~t1i0qf#*qn6x=308eSnj23{woKdRp%ravMYmw)=BSO#UE z$={k+dB@Ic3(ViI_s?k;`h{3;g&z#(i_e15r`5wbu+QfRxJL4?!#+Rf!#)oe1d(Yz z4;R8&3V#tiRE*u2=HtQsPivMO`#-HsjGdoG9osx$&!=q=W49Z8{MhP-K9X0!gT>hA zhVf!-a)VC`o7~`aiydxQ6uP%vj2&)RFUAfxY!_p18+(hJ;1V(Rv$0a#3@;F4KO2{d zFNar%v89b`#0hw<_)2(__}U;6&l1mr`-`uGsiU#CxWAd3@MXCk`9g)c0bU}WA4HmH z16GGO!ac+{!9LEL;R%v2fPGoM3on*@A-qg{3+&5sYY>_4b@Dyf$GHghdBd(w_j#i) znC|1b9p0=sTj4EYZ2Jr!=bdmr$?t-F{nAIw7$*5WaGQ8Byi`noF=LJR{vgsEh?l@V zpVbV>Ha3bhJ znXG+#Ow9GzmGBVcL*bu`=fe2>yO{Suq&4t63ZIUgw8jN6_oc0ec`w%Gj3CI^ z03QRN32%f?6mNpRCf*F63ZF%oE$|>`KaEWoH`v)vQAM*?X$dougX=9f-4CpWO4%GI#+S@|nC--?6E z4e$lxx8Tv@ZSbYyop7z=XsF(!jKPBGlAC%TE*=K!6KxFf_e1>C5Pz29s#8K-7UJte z{A`GKz_P1VCLWc2Qa4!Ton-29U|>I!ir~}4q4+0-@-}HLVf2}7;?Xlc`OC0AlP?Of zX}9g}hsma1^qKs-5SzBQ&izmwq~z88LtFvBqcE|M+|;i=)la~mxQptL4y!*+B-bfT zIhZi|Oc@^HnIV2S#6RKQlidwdOxw_BYS$1Khj>PaL-9;CWz?s}JQtp)gt#rlzYa0^ z_v@5u4@k*thr*`?_H$YJut>1f*-y)lz@zDf^47LG zoh0L7PBd3#i5vqjR{Y0>_;^_Pk9-AQt1u_P8^kBVs-s9hSmla*HHci3sWAQFp5l{X z=`)fIH%p!i&lBgti^Tb`&l_{AYnDh}1TPmex4NcXJQ!Znk!$Z@y_w2e`%qgDIb6DW z3O4;C`Kf&#J`!$#e+~D6vG297!$-k0;jIY{PJxYXKX$q`daK>(bZgSB{lx9BxwUr? z)Mbi^%e0e7DS6u{z8KymX8x6A=3n3Py1ESRuP|kBiFg9+>#iL3x|#^DP#ETG-`XIq zf_=Ko*RIWwd@|fmTn$eUPk|F+#+KK%i5X8`yIvfFw}__&k$I@amazf$@r&z@LjNjz zT`u|@Mc^Xzb7sH&BNwyoy*FF}Ujn16x~S8w(MesE(`_=&>xrN6HZ6Q;^YX>a>5|Nx zZeFS6-+`OO3*m)g=5+Ixif@Hii*JLeM;kx$yLp?%cfh{Beh@^i^W_!SO`?p-TOEB( zb!6w~D$Y(gM&yh<%X#p&qq+v6!SJ~6j z;e{^#w65?faW{Cqct3cT_&};JOMDQVFFqI^A?^W>7as!0#D~JtNgD4XZjesWdctaN zW*zJe?FxSw?DNnI-XeK#*yrc)ATqy)hy+kVp~$uA2cH+vnH z!}*e9!*BNaset2>SHcU$QFyr++kW#pF*f|>^B0i@yaoi|4@$#W%uB#ovZ~xv=5i z_H~Wj{bH(&c-&-So1KuKj%elF1{{IB~d$+*z6y_axk@#JBxp*t=^Y$L>GW~8H!Dv1vfEq5$HGg*$HP7k^k28Hm7M093euZs1NGVN{hj3j$VVc72KjlC8~z6J zvLJo>U*Rezw_JNdmyF&)`Zd|euXb|FHy|Gnq+dG$`JGN~`CjDBLHhiD$X7bKM;i2S!sZuxc1YqDIqy6{}I_7wjGCSCI!b)`;|Or5q4lbkwjMW;3lb=q1ZJ`#?J z`@plr)NgA-%=n~rftdPjZ4*~6;wMB2qzX|sj)5d<_^>7K?EcvCdFYjgWJju)8 z`C`TZKUgKMf;WgK^9rU&Tn)#>i~;WS@zdAeIal&#c(J$z_PV0)zO!BO%i;CnS@3r8 zRlI`97GDkfc&>q$N`5Wu3bH(%p_q2)khgXXahNb6j z*!+7oOMWP??DK#PUpz~4p5?_pKPSSilJ|q##V5h* z#izm>#5wR*aUQP)eSV5ypPzwnUxg`#$BPHS&EmoEQZcsuUZ3t+u-C)c@OFjyI;?ip z4O?+vmgL`neOed6BPGXH+*c{aR@}EhjIFqDnHXDfpV#wfc&+4P;mzXl?scbmzPe#g z?(Z)-_T>JUcmnKoSPr*IUIDKVV`uL7aYo@?l4En0^bt>jeg3gOOA?Y#g%^us@DgzY z?DIbZ_W5stw<^qR*z0f(uV}kVekD9ijQ#oH1o2#Wkr?~)!{y>@;Vt5Mu#f*b_o~c8GcVOQJz6<-b7Q$YKx4^!Ruw4(VRXo_P2X>0FT@QF2{ve1v z7)XvCd$6bYE_kRIJNBS2@BOeZ?*p*U1NQAfU*3mdU*1RD>tKMp6vE8)3f?BUWj@hZ4o{9|~7_!-!j_b0F~FZT1H9t!g;>~;P;JX-QKu-7@Z z^r5)qKZh5Je*rHNuZ5S2UxL?&e+6$AuZO)(u)Pm$mHgEp@^B!=4nN#aya6r|zXn%| ze-Af{H^H;SZ@@l3Z^A1i$8JBoO1v3fC&q?9(ntI!c$oOl@M!TnaE+L`!y_^A`|v#R zc6f>SL)h2FNAPyZKW1q`hIl8ODgFfZdH5Ub^FUv)%%{5xPAL56u=Jmq4y(MG8E~r$ zli3AcChi8W6?cbK_Dtpl%eF{<5KA4DpUi{d?vm4YJenmw1RgB@5-j~>_J+qxemJal zl6eH|^LZrf^VtVps_;j_YsB<5k8T$Cg?--WYaZ(<`Pbmy;{I^H_!L;W;*6KbWAh}> zfmexhS+>znoDWYH7s5XLAb7sygW)CO)8I|w)8Sp>p)CK%7M}sPiqC>KiqD39c>18n z1Ib6i{l(vaeLSP!QprcdUe9A-wSi3fm&X??%s6j#5c1{sHd2IVTErq--C-JZ-spx?u5rnz8Ib$ zz8CiS!RD`6B>7UfRm?bc#WL~3@Otqh@J{igELG|wege)CKMCiHSHM0GPr-8~e;W4f z5WD@9e|E6jPqiz|bMOZ7YS^cX-F|A9Y#w5p1u8fPZlPedBx510V*v6GU&)CD2URT(|mA+1Q!M;whZ!5RBd-ub>t=uZc zraj$Tyg#h6>~{dH{O{Ktu2dN8+tc&J2f@n2eh0(rBtHb+AwCS=DelFxF&{tnZIzE7 z`?ku*k9}L^<3AGi@nh3g`S_26*DKEBVPCGkLFC8XCI1SXEymXUxKw;1+$?5X{o_{g zNpQRPR9HIPF9+Tvc^_}n1!6JJLo;XaaI0Q>kagcFjNz>CEf!E3~qz-z^0V6V6FL8LuHa_n1sU-1OE zpSS`ZCY}UWiYLQfpV+*0~&`S4`%0(hSI+pw>r@4(9>|1P{* zd<(o$d>gz=e0vaizPA|L`21)ww()sicXz=){=4CJg}DdbEXHQ8&J;fg`*L9?SND?~ z+qim|_))l0{21)ZwH%%+Id*dOVlj4d^$PJ*@LKUoc$0V)yi@!Wj`8v3!e;)|*WGh) zTw#6+H;Z3@ef*4pf9iGhVi5URpfGFUzT%f*U#{Q4!z6zNo*>=;C&aJ8zFgSYpS4N; zI@~VC-u`U8_zifA_)XZ?*XAJdLQl!Jz`k7X!X=Xb1zsS=Ccm&yj7@&Q$NvHBb@gEo zS<^>hcECf$pTNFcpTZL){|ufb{yW_2c>e%y6Q{we#MtmP8^v8=wg3IQ1(6psB+rCZ zU;FP5XG@L^e^KSyzXx0+`62L9aZh-e7<>Mrj~{#fbLnb-?D@}&6b2jq^YP+iV3lkC zEI2OtvG6?caj-8VHvH$yCGQ7IpZlK#`*gn+M1GO2FsH!z;%wN*p93$FJQw!5%7-^e zJ`mn4E{0WK`wt2tYr9K67|s%7&)532PKQTIeimFKJ{$Js!k({PBRMvF?OO49LF6SL zKlc13ALse7*BkcyrG&zl!fM<5e-rlQx)@%nFxd8&+Qp;c_2RMcR`I1lh7GvjsStE|ZKK?3ro#fd6U+xfN^MBP_TnG0N*TcS!VzAoz{?p*m3ey1lwBoQY zV>7&1VP?XKx7(`z7Y26I_GGhN<9;`6f|CfEaZiRgv-3GTR%lJ1h?926N5cy5E0rVZeRUIAB1>U7F^cNcx{{i$D8&sA9_Jijt%mMHcaSwQ@ z_z-xV_)vICzXx zCVd`|9YlVodN~03?*ywe5Hat(^&w-bSN5HGaUx$6(&Vx5gP9L#xi}->d@`ns@37jeZ z2HaOX3hpN^g@=jhJN__2OyBW`xOg2{9=;9lR2ce&O+G*L3!6qtej8jP z#?Ei@ZK(~OEBW29PxpS<>jc}rsa;{P?VC1=AB26|TN*^(=qdR_uy2Qtz+Ml_;1LS* zDD3mI9QOHn9QOI4FL=Y}hrZyA4Z3$F?8}S&e`A;A*#9?sh@XS|i?R7{4i&G4z5ag+ zPnP@z*r&S&UL-mF!kb?I^aXEvJ-h_3SD0VIK0ovkZ}pR$e&H=&-VN|b$$tm?JkS@s zHD7XU{#$M0x8TL%w_%?LZ2nuDCEo)3bl(XgoBK%q7uf538(b>+4tTT}+rBw2#4~%1`$+So-gt4y&%acZC-ye0O-E z_#jy6c0U+iCpotL?e*d>!`sEZgUFv+#fQWG-Xq}El4Ixpv_*_<|I=3SaY5wIWZd{O zyB`nt5Pt>kFYX6#5Puc+_ns6)h|=BrHMp<%6u6%lo4;j*xDXyGE`n!?i(#MtL9p6D zckKB)KKxL4g2JBxtM0m=2``fTY}m&$5?&?wMR2DUO(5sOBD}xcWK-mstVdA%)#Hz^GE@I9ZmAHlxtE8txUvoeTm z>mkPOZR;t17WO*%8QfoT?AW%^V(itn@nUS$wwQQ5>~-}j?CS-)v~9k^FfQHJCVmrM zEM_daZG{;7vu%|a`?GDGcssma{2{zWjLq4$RgBGfKU4fS*p~%6^ZsBbKM;HJ{!lUY zbU=Lg%eeYNC$;SJ)iz|zBkCj^lXRHp}?2>W!gIUh_= zcx=lDmEvr;SzG|m5@TmRSR}^Ie4siy5IggM*9o@e1J%)i*p?4`9uoAaSh_k4Jn%DQW`(=Im&e{APH*m~q_>pXXcQ#gb!xcB~NJ2Co%k ze|C61-wAJ){4RK>_&#`-`2HaBQ4jGC;lARB;eO&rVBZdx!CvQ&!9M>_!jlyqd-aiT zhfl!^B!3$A`EQ4pNd6qWO8f%s>*Yn**UK;A4GOak-YR|t-Y))a5cz8#@kZF!%O7E{ z|F>YD|F>bE&v)U3?!|Wfb&+^0?DhN}?DhOUyh&jgQ~lNJ`6GCjM*yjPe_h}&c1+dpSW35kp9xj0= zD9jjmq1ecM{>KH8T|Ffq4-Xb!3Qrba2G19l!#-~n@UE2nGhf%yAoAHP$+3Z-`8u5n z?~uGEh$r+YoTLSb%z*NW%E+r>8p zX+cl%0@&B%EwFD(i(sGD9dL=l-wAu2FNPD6-wXRZ+y^g_{C;?|cu9~J$rL{T`?MZ` zvn5{!mx>>So5eqZTg5BjHt|Z>m+>d?I?3DN9pYz$v@~DF=U`u!=i#`*{1jd!{yFT^ z`USj3^0n}G@h^k4^q%71z`ez(WEK87>mP4G$K- z8>DqzE&dCbl+fPJ1nfxUh{4bt}OtMI#E|IB^{`@DS)FH@Mm!y6nQ z%rRs8?GmR2X}oi?^QnWo!HV#XVrv-NA>#%G<%c;B^Xfc#w8L zKk<=prMM5A5FZ6E7at986dx0$bq~bH!ai>&z&_3s;ZlX^2dnG{p9K57oeD2jm>k&W ztq@)>c@ex*JOuXoIXy@_ut@S@@L=(9*z4px*r$6wyjo!{f>n11kAgQ#{>>ompltCa zu-Ef=*q7x}IHoXVu+L{XyhHLT*ynk2kalps| zzY6y8&kfRg`1r4bef&4T<8?3gr^f{Gci?&A@4@rMx4}N$AHY7{JK;?V(-x#1k}19$ z?k~pv9OCtc{W)Z`An=CeaV;c*Dwv;_%sg2{(Q-o zaXswI_$utn_DesC{`BTMDkpCo|tF$%k#wp;WqJL*yoMs_sg3k9~z_`)>C{2++TbqJXm}-Tq@=n zKg_3f9_-V)2=;05TpzYf;Y;CmG0*s6tHl=wX}tpR7&t>b7S0lngR{lhfnLMJq}6ML zxEl6(n+B_G^k{%%3eyb7#VxSU|K+gHKhJNkg$g6C|1;;vy~7@;p6u=VaA?rwFJzc= zF6uuK_X&bn2D}ydkua0-F7Lrdz`fyZ@Zs=R;P>I)@X7FYn8L^M6G2xnD1y)Fw}012 zkbY|xJl^f0Yt4o)wR`An-eQ;WBNGhsiOtALo^utu5Ph4_IGuL<$HDUNpw@$n%p2=RyzPYCgiAzl*V)ggX6#2>)4 z1@npTN+i#)YT6IZHPRqx>X}H^QPZ(-zL6sDpGdZorh%~Pt7#ZKAh4gNZ@>e^Wiaj6 ze43^tlINi{HNmuZ^J$u$NS=q*G!H)7NRfXhkvtEr>HF|;MvDBtMDjeere$znBSrp` zM6%sB{SrRWNRhvmNS=q*^d|gOBSmf;x@yl&X43yPBSrrAM6x|k&xB7lQjVrMG?8r2 z(~p8rHB#g!B$E1^ekz<}q{xkaq|fQYV72+_7sAr#^wIDIf&EOcfJcdI;nCt|_)_sz zaIH90@6(?`K3($F@NluwgFe$gOYw}pAvXEZXU4T5UJ>GVQrv9XhCa>bh1k^NLia9{x+<#H@CuS8_mzbrv>)Y9E!ii#6Q%@Tk_$v#isq}({fvgf0N>wT|)fz5Z?k% z2<&I(A7Fhh?;YY$_{*)gdGqaS@{ zJrLqI;CI{)v$mz=SM&|B(ZeT!{akTYNZ!JvxQpbW_Hf1OP?()5o^76AeP$2gUbXGn z=ZEB`jVaFAw`9>lxf`ZMheZO$sK#bmz6M?`zBa`3;PoN-4)OKKcXj01_x4T3IeXR6 zAh^b*HTzlHXW95?Kc_y+-^+NlcMrvL51cJt4CjmQgNwvJga?ZsfJ?*=!8~KOEDysm zF=N`^twAvRd3%<4+VRUIUyi&T`AbgTcY@@PBi|@ytlN8&_-S~v_{Xr%KlA9rDW*+# zHQYzM2JRBzHR<#|#2pZyLI7#l^pvp*Ob1oyy4!Jos#XZqJU8SuR@3Yv4E_z1Wcd>?We z;+!n_ez-UMRrlPPXKKz#?zv0m;VkmAQTZ7L?-DbHCn?*W`NNlKeKN#fhl$g)^T z)HuB*)4~sMly7e{;E~GDOt?gR1zakg4U-3}w*(v$UkT3>Ukxu8&xJRLzXf|eV3Uvb zb-^6u=q!b~0j6v={`s)a!_9EBsK?kBz%?k~O{&hN-`YN=D{e9oMP1IWVTN4fLmZwr}x1#VN<6|6G7y1xCmbE>etlWoX1`LCev*v4Gf5?Kqz|?D=t}l4HY;C_<#hE>05*wy2uXAFMY@hS_( zC+&FYJnbl0&r2G9&g0{X=NLF4&VuKOkA)YA$=mS@#mB>~9eK_hoL8fA&H0cA^FXzY zPsM|SAdvwdgnTG`=un({FzqeT%e5b)heU7JeoXv{!{^eik=8PoR!|e-T6n$W_!#?+ z?8u2R^aTo^h?3w`V(LSjQ^cfe+7<5~()vyiV`us%#B<;^9XU}?oC=?qNj#?goBYqo zvS)U36(m~WT0`)?Sscb@F-SaCY0J`gV-R#3aPR?0)1RIIpCLxK>32JRY88J=qo?DQ z{o$h}M-LgNiZ{SR#ON*K>*AHfd7k5^(NFqAj#p+Arc~Smo+!=;Sg3Wd$y+cHdE_JU zb`-zAI6<6a6({-WR-t=W!&AgN;Rc1FZFid`Mu!>o3eyw5LYzsM8x&?8^6MQxWq6Tz z6MV12A4h(Ax%`}quF{Tid_L_YtyXIdSKk_zR@@L=^Z9dZyS>*G_k}oBmyA-~JFmF44vW<9h6=o}Zx_AYA zuJ~c{e39c-t*uV}KjwI4AGlq7bPxnRRd(_dG&%Vy?hS5B52%ZPq=G>0e3hxw z4{h6s3~DEyb&kt7(Y7;Pn%-QHa{b zvafdGpMHfr^tbZJK;)4sCttn}R-Hb+2tLBydph~=@`($7E_&$pg5&EceAl}y+qrPp zE>3>_dg3o~oV|kZ8ysIvS-QRJcrJNMf7!B~!$yv`91LkC&c~hHjIp~O=E7Ga58iiS zik1=2PA8w&n|RdbA16%u2q!Ph;@&=vC-j3)bUbAzTMEBe^u7)Rz(Rq5z z<)I=5&v4<-B(3!86o$NIba!#CqFkAM#U{*m6=n@OarI^W>fM|^?YX#_Co6m@_vR~a zTel)UT2 z6h}6sIL+jN|Lo^i9=LinC6An%;;t=J!yMCOKcQcviKl4fdTZ%K% zQ=GOw#oaPP+#|_piJn2P<;*t|J%V2MmC!e?33@Fy|E{+G2ECT@O676BKjMqs?)4B~ z+J3J``98w;hkWnmdl%ma_&&w=r+n$hdadI76TZ*$rOo$xlJC8If5!JYzVwZ~R`BKd z>Gfm2PxF12Fa1@o`}w}Wx1BHJoL=|v{r^&49wa}P@LkNejqja&NAf+5Z-2h0^Bu|; zze2BD`5wwQmv19qY+tYQ_~!7vnr~0OBlw=nw-?`U@EykYOuk>?%N(p1b29VJE(ppl z7=1>=qze5zJt!+Hi#60wsw=OJ!j&yGm1R{;broeb_4QMmVqR2TR~;{_m>jK`ili}K z-VhJUqIJ`&8|v!blD6x=Ce|qcyQ;L!fYF6|tJ8#xfHHH&?_bH$=-T z?PXa*eN();&Lmtrxh&cot!Rozxx2iQyUWVzCr+t2*WOi89gxHY*~DC1P7s^VWURI2)pZnf)ETi@O*L_F>-knRv8ifOG>#G+)CBfME&|G3 zUI|p098^`*)SGmwOpfh8=AtngMGIB&Xbtz(memaqA5U78Ep=X zo-3k_jn#Frrno|soz>LP5Uq<})KnK8Szl3JV{|brT2b*Es_Ulef)ZQxQIqv3+{D%)l|2&(%BD5eo95RTZ>Wfo z!+3pD%(S+$kU;P&TYQUY`0%SMH~?8yZ4U`zsqY z7xfJlF1mBW8PJ6dbS@Rx&{ScqLA-pTd2Y_J|L~XoO=Y} zrO6^DTWU(um}`Hxx0RR+d)uh;=~3FOu1Z}7>_7IokuolhHcYJdb#_U4LmkFZ1smNE ztqLWaytL`KE1QtPiKZHoS21e~-E}#2XN*`iR=b&yv}a?nWLzq+Ei~|Is^8c}-}X{$ zntd)NFJ3<}D65Y}z1^~Q#6RdsyQNqgqNxaivgy%=M$9Tk$ywp@3S;8@bLDJ>d9+OI z?g=9=$*R*smr5!XbxP)XqSQ5Q+&=i;+BIS{DrkgdjnPT2aS=b(-aQ1huJI?IhQ^qE zYK;vd_l-W1mKH3gF&lyo8at!1vVl_N@US>SX=h@$rk>BU<{lMW-;pI=9kAaUnS^p(`nk*2dhk@2fVXF{(0Ln`Op~7=bkdJ{+k@VjGL; zrKU#9DoL@d&f2z?mWs*cJjE?7G-B73EE*f!(o%Dy(TkOIfU*?meJ#>Kichjg94#qVAT^fXxM>&tjx!hd+BAopmxUsw;1-jZL)o zol+Z+SaCdONJGw`lXG+O2jmy!73LQWO5%V)foVj=@!TN|xr4F?<>VqU4N5)pxJ{?&e8oHe=XSU*HjXP_LrRO&OU>(2XyvP3O0_8$qcZOg`*mj zozqzql(KO$BM_JOB6HixdAS1$iw2qhCx7i2`#@KS!h(Xl0`7N(ck0W@wo&Bd7C99r zZ`9AW2C@g`7I0$~qf=8Us61y-J~|63>znBO5LtZ}8IdhqQf`%N`nkQBsi$FulII zlH3j)k~1(c?X!4zPEgK#q&Q~(on9Qv%^q@6ZVq2-7#QX3Hc}4{&`oz?#zA{;1^UPldrW=2SLr^Q0CA70FBc&{q1(oS-r~-DRPwx;Yj%m>*24w(*(F*E`A7 zD08RccwRtf7&SC7a5F@U^;K0yRv5(GjX6O>O?9mkG2UyOX)q^XI#e^&VUtgRXS~A7 z3NW}cZPbv*oFHCZYlV41Ri&NHnutKglf|XExkDQBa;qvE>4W2q#kqMy%55h#V34tw zxdnyTNQI5(<_1+U#`!j%OwF()M39exEt|+xv^ck5$VoXevt=gzkewy?@N-9=P545> z7nQ{b?F@2RLo(R-plk*gh55!d+o?cfaZ&$)h5a$fb`rr=PEM}5BzUev(gNZHiXk(eHHUg`YEFMlnPlYt`7mvyA!jv5MMC zGbh^Dk7w^2zcSd}*N@vi-uK@7`tg1J`2XSk_;Aw|#_6$7$}__lGrM&D#^O@?$HtsO zI;`Sc`sboS=5-2KWXBH1$z7c5XTUQiGtuVCjk&5!oOyQ0Wqa~7E;?sa=|$f>tCS&a z_7{#E@^k!<;0s3%`3yJR0Hf29gY)?s-yMa>8`7AYk6YdNI?t_aF3xA9m0vKVxlxWb z>*FhS1Ca6jbu~NUDbOgy7P&yh8eSc(sl3ny@Z-txft8Lj!_?%c(u$0?(uf#XGSD-UpEP>|~bgU7=8B?CScAlBTP-dvppoTf+i|Bx@@4>r;)u}>&`nrVQrEOn zubHUGaLEdoP8DRQN@g6!u1E#hsq~U%DWC=UvJ|MT*|Ko4x3ZWheIW{R1M@mSFUlwe z&8(=#y*4usf~_m7@&ejhL26WMUQ6jNWiK!2&=N~&oQ(y!p@RD8a4a`PsZXE34vlj} zdE;dJva)!fraI=`lJTGy=MS`M8EB?ErVLidEW^xY5ZYEq+az9_>yl_)rM>hM5!Z6u zOvJXDQSP0lT1-JuX5PozH#tm<$$d^?Ev_mlp|-p^Irblq6D!7*t4ovr#^M4V3g1Q< z^qZ(IDzBTwGf_bEFRE)W7mYqPT@}!d3%n+DmwO@1Z8X!ooPt6;bk!Bbh1tn2%vdod zG2?@19WOw5xl~-3qaX8f2IZPunVD)MqjWR7DK2ELR>)k&c`?UK7d7O#{Ci;;_Ju0QEyyL?whintyJ2cI(9FDyUd`l-83&Dz+=X7E8c~In zrDj5Vx-<*ToZ9w8Ot0#TV}%1TT0F#2Tp8vGBYNB@jphv&j`4W)^r&%%HW>%MO_?TU z=6(48?Y|Yp4NL`*72+Ez&I!zNi@IpEGFr)nneUk?Tx0P$1AI%#ABfxD%b9Aj8lm`{ zoZ!?^Ex3tlPaRo3v7x-7<<#K~bb&MK8>Tkums6^|8gfuc8LTA<)v&&dS}v?crE(iF;=pwyeO z4@DczM_TRIu~OWnpTv$BflZp#P=GWlnf3Qm916QnF0*%^Tr%(dpPgLtT8#?tIJsoT*>Q4d z=D(rIC6^s1m&pm!prGUAGB>D?nbm4$)fKaBGi%Su4K*UB!!}+QDl|Fft!{mcYd6*8 zs-nE1p#_2Sh9Ic({t-1;rdceNGcRzg*F;_#)`yy9+6~bPrda*;QV^kCMTsQ&LJ(P{ zaaAC4Zvx}YRU4u;M&RB78iCJ6az<%{3?Qm%Ox~mwBb(AxYqGC-q?Z)tBQsM+JGV5_ z@_3yQxz~b5VAh+knpt_#E5gtZF|*#6Tg>~xWGpkv@wp)5@kLh6qR1Lo;DP}*t?GJ8 z!isRFTBy{$9ULE2*Ly9P_k-@nipFY|tPp5`jlP;y+D`8SvNa<%cQr6_C(r<1MB3Yo z7m3S4YdNY}wPOlp=D9Y9rYuFa1t4^7-HLL(%vJKB;6mG0hSOFW8JOh;W5$ds9aC~P zoHwMYAlExkY_xK^7+0ln(c~NNfK5$56k7?p=2c{J>Q#IW{tf(0_W6_{#evaSWI|jZ zm#B@$2;?jD2F%4HuTNpXoQP$?cG}FcS{w_*M-<2Kd6;*SA-9?J*2Ij%BR@ZwQe4Cw zwjemy#8N`m8w!eoQK^gk;M}w9E69RC?*x4wnCUV%V)3+1(0XV|EQmMO5fjnm2PvwQ z%*?w7$m*JOivs4qF${Rht74Y1*`-IOIIM=>qd1)vMa9Vbf?~vsK9glE2uhL{_-u?f zAX(0{eSt6XaDI!Yi;}W4yy%QGM|mts!UdjoSDLBCGa4GoTdXsvpfDK8>{tT&+XDPq zjkH{=+|H)n#lumoYmc#l;CvI9f0Gr)kEU8W&Trk~ihtt9@bukyQFvx+b8wf?nsLRL zt6XyxgeLYKg6$b3Jl)@0P;#8wJmA*vyE%SxX})=(?AGcVue4bbYnSzFSAezvr1k{t zzQ-Wt`DCX?r~461Qd3@5&KPJE%h=r7E*2mUG!M6a2@J7TiV)tbPKX>F!TP}jsH(1G4ecnFizVY= z^}qkYgqX-`ygEYo6=ca&><=b{eH}511!Ku5TsDl{{BG?DF%;|`!2V!DFq||y#t7qm z^2t>6lg)`4Cyip=S27)!RkKL#{~VtW~GB5U^k@uEZ)@wr8bd3r7V|7KAl<6JW_;myq67bUV{u(L&pECf7> z6@PmyN~EssOMX8gVFjRFlxP;h@#cMxMTvMacwfE8qC_(SPc2HMtY5S!k=N}`>w7Lr ze*v}nSzZ}r3D4f-Ph6kW8OrEmloJ16>d^&mKK=Hj^3Yv-p#R_ z_lJ3Q*!#n){C(b^ecm6OH2;M6r!YI^{lR7M1>PUC%!8FfyLo@C#Cd;4+MV)d34odI z7N5hKkdx?a?0nZ;L2khPk0xVPV#zRG9QxqJRg$E_GnlgUKlif9$_@Plcr_yDEpD zs_dE^pq{1pEP^m#GHsSa1QVwh=j8=<8^^`C5|TqE`=g!1yOhkeoy)sRR$%xky%D$# z|IV!NQ^z^J4NzWL-E7uvuv=sxQH89pab=htocLV$p`#0+csQ$WFnR&bFEcCm3$S8D zf@ma)w5B;$SPvlmkyViuAG9|QB`9f&5e&$nJr);EH8GblC77WtP(X7&o2kcRCpol(gpO6IPC zR!)ng;hM^~t;XG5!dEFa6~q>CEj+S|b~MROVeg#Zs)W*@_90`9E zgv)>UR0*#Zo`=-=5%$7s!Cu&;-By~QEU7&EQ^?Ou)KMJ?J*9*p5mE70C~ z1+b3WWC&f^Al7v!0U1r6kEjOF|-GMP&qcbxf+x>I~XPw+j33tqZJ9z=OadIyaHp?E=u>08$&V$IQ zCA<5WoutE3G3#IKa8iUnoPyZ>p(qtPAHts(kvu2D-e{gx+cD|+-&>%bjms-nZqc&UmlS5K#xKb$gVd_D!Y}emnx$ZVB`iD7+@8B)D{+3wvNF~% zR+LRI=U6+ueev7@L6|%2%Htive{WZwd9|RIARTw*887@^cIBC+S)un4HUoZF9$wb) zt~~#GpVedZLw09cYFD23I(EJ*5C0$DtIl`jp)z)lrA|@#$Gh?d`qz_t*_D^v74`+Y z@&=e?*PZXmW2w8O)?f7xXA~q{- z?N&NGNqCs6>KpKZT7hwNv)+waKBs>b5Ad(l&A!Q8S|1zZCUH*90Q`Tc*||+3?A5b^ z)bG}@XJujE9s;}E&^Y>x>#2BvUodcSj?FdOf_X>ZD?_kSu$X%zl@e(wr`%2R}}1Vq%;}c?HIcvs;9_QkK4!T z4{S79OBTP#uNUC>yj*j?-5z>zQBtcM@@Eh2Ci$eNwD=qr$cB!eHP^fOca13NKs6U3 z7pl4RPE^C*jcV9CQVl0vsWwpP1ZQ&{^7)#}kaxBC910kAAX`DwM`|0T3F1AiNkP~t zYwz(M*JOy4qn53$`N{}RV>;tb4LjGGW z2iSe*^5@!X>!P(3cH|bHljlQP&#k%M&6#UN|LWV3FW!l3N;t9FogHkd$y}%W@kSDI z*PF|fSJ_;J{mSM#=~4Ig%haQ3CdnG&A=%BjD~G9fhwgDO?VjG@-H)p6#GTri*EMB) zbtJAcCO%<%L~-bdTQ(G!!+y_9u6^}~@R|d7%|X0o{X*y%Ut4+Rc;7QRISSZ>HH+Hi zoA;M4l2Yql-|e(u&hQPN^4sZgyu50(eqsHqvv}RvynFZZ?`8=|XD??{CmsF%q4#ex z?E4aL75v+aq%}w$uWm}2e4i0+^1Gb@zQLu4V|9%yeHQT(>t1XX4*_`J>w}? zzTacLr~ltHl;p}{^K-|S0k*~)_N^_>-sdF$cRR`bUfz8hshPw5r`_dV`3&Foo$tBt ze9!;n`JVeWQk%hYr>At8iQ~SF)c@LvV`ik~m(uRrNL}NW!^QLWZKSr_kN0h){$Jfl z-C@3K-B$RMth3B+h24Gr%l27o3f&pa#`9Jeol%Sn-MAO+HcH!x73M*{AsyMElmgRga#&=&*VO(h!#`u4( z^@|nZV-w|Cl%vJ6_qBvCTqy5iv7x<>&7Lc84kwzMUya+vq}ajnf?pBmHUCRFUqYwk z4K#Umt{Au4oi<|6W3cPwal?>nyEHz9IAye+*!pG6Z9HtF+?y?%xy~2==~>q!&hu#h z*rNBm2khtE^DifLbZ2z30Mb5I$tYbWy!+<@vz64|<5qp{-GP+!GyA`wx3?vrc0;&b zMrog=)bV%5r(;%C@<|?j7hbEVEbd#bxX%;w@Aky(Tdw#Y_rL60u9)KwRob^)vGe7M zX4U$?`7ffDOM-Og`j$9QO^NOF^S zXDdZGG|mk&J6$QlR&Mr0cfL}Dv&_v_?f=F~5q4O+w9G<~&dzV{IHjmBKD_$ZKX)q`(_R=@34{Go>~6Cb4I@1Y~A@8`3xsRvnRjF+aB?k zT*GR2afU+u3uolpv&FyYjC{NHA-Q42<|DQFJ9P9POQCFb+(vf4Py3%b#;~Bn8Tn?( z)j!_I-q{)X?9UEuWH;-xSX|V}Ms{|~Rx%CfSQfKn%Wq^a44;wDh2JRdCbxd4_#iu( z@ZLAJZ;Kbd4IJfe4^Ov?dYJ3l)jhdp*2}V| zsGa_50gz3n + +#include "GenericInterruptController.h" + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOInterruptController + +IODefineMetaClassAndStructors(GenericInterruptController, + IOInterruptController); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +bool GenericInterruptController::start(IOService *provider) +{ + IOInterruptAction handler; + IOSymbol *interruptControllerName; + + // If needed call the parents start. + if (!super::start(provider)) + return false; + + // Map the device's memory and initalize its state. + + // For now you must allocate storage for the vectors. + // This will probably changed to something like: initVectors(numVectors). + // In the mean time something like this works well. +#if 0 + // Allocate the memory for the vectors. + vectors = (IOInterruptVector *)IOMalloc(numVectors * + sizeof(IOInterruptVector)); + if (vectors == NULL) return false; + bzero(vectors, numVectors * sizeof(IOInterruptVector)); + + // Allocate locks for the vectors. + for (cnt = 0; cnt < numVectors; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < numVectors; cnt++) { + if (vectors[cnt].interruptLock != NULL) + IOLockFree(vectors[cnt].interruptLock); + } + } + } +#endif + + // If you know that this interrupt controller is the primary + // interrupt controller, use this to set it nub properties properly. + // This may be done by the nub's creator. + getPlatform()->setCPUInterruptProperties(provider); + + // register the interrupt handler so it can receive interrupts. + handler = getInterruptHandlerAddress(); + provider->registerInterrupt(0, this, handler, 0); + + // Just like any interrupt source, you must enable it to receive interrupts. + provider->enableInterrupt(0); + + // Set interruptControllerName to the proper symbol. + //interruptControllerName = xxx; + + // Register this interrupt controller so clients can find it. + getPlatform()->registerInterruptController(interruptControllerName, this); + + // All done, so return true. + return true; +} + +IOReturn GenericInterruptController::getInterruptType(IOService *nub, + int source, + int *interruptType) +{ + if (interruptType == 0) return kIOReturnBadArgument; + + // Given the nub and source, set interruptType to level or edge. + + return kIOReturnSuccess; +} + +// Sadly this just has to be replicated in every interrupt controller. +IOInterruptAction GenericInterruptController::getInterruptHandlerAddress(void) +{ + return (IOInterruptAction)handleInterrupt; +} + +// Handle all current interrupts. +IOReturn GenericInterruptController::handleInterrupt(void * refCon, + IOService * nub, + int source) +{ + IOInterruptVector *vector; + int vectorNumber; + + while (1) { + // Get vectorNumber from hardware some how and clear the event. + + // Break if there are no more vectors to handle. + if (vectorNumber == 0/*kNoVector*/) break; + + // Get the vector's date from the controller's array. + vector = &vectors[vectorNumber]; + + // Set the vector as active. This store must compleat before + // moving on to prevent the disableInterrupt fuction from + // geting out of sync. + vector->interruptActive = 1; + //sync(); + //isync(); + + // If the vector is not disabled soft, handle it. + if (!vector->interruptDisabledSoft) { + // Prevent speculative exacution as needed on your processor. + //isync(); + + // Call the handler if it exists. + if (vector->interruptRegistered) { + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + } + } else { + // Hard disable the vector if is was only soft disabled. + vector->interruptDisabledHard = 1; + disableVectorHard(vectorNumber, vector); + } + + // Done with this vector so, set it back to inactive. + vector->interruptActive = 0; + } + + return kIOReturnSuccess; +} + +bool GenericInterruptController::vectorCanBeShared(long vectorNumber, + IOInterruptVector *vector) +{ + // Given the vector number and the vector data, return if it can be shared. + return true; +} + +void GenericInterruptController::initVector(long vectorNumber, + IOInterruptVector *vector) +{ + // Given the vector number and the vector data, + // get the hardware ready for the vector to generate interrupts. + // Make sure the vector is left disabled. +} + +void GenericInterruptController::disableVectorHard(long vectorNumber, + IOInterruptVector *vector) +{ + // Given the vector number and the vector data, + // disable the vector at the hardware. +} + +void GenericInterruptController::enableVector(long vectorNumber, + IOInterruptVector *vector) +{ + // Given the vector number and the vector data, + // enable the vector at the hardware. +} + +void GenericInterruptController::causeVector(long vectorNumber, + IOInterruptVector *vector) +{ + // Given the vector number and the vector data, + // Set the vector pending and cause an interrupt at the parent controller. + + // cause the interrupt at the parent controller. Source is usually zero, + // but it could be different for your controller. + getPlatform()->causeInterrupt(0); +} diff --git a/iokit/Examples/drvGenericInterruptController/GenericInterruptController.h b/iokit/Examples/drvGenericInterruptController/GenericInterruptController.h new file mode 100644 index 000000000..7b9fce561 --- /dev/null +++ b/iokit/Examples/drvGenericInterruptController/GenericInterruptController.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#ifndef _IOKIT_GENERICINTERRUPTCONTROLLER_H +#define _IOKIT_GENERICINTERRUPTCONTROLLER_H + +#include +#include + +class GenericInterruptController : public IOInterruptController +{ + IODeclareDefaultStructors(GenericInterruptController); + +public: + // There should be a method to start or init the controller. + // Its nature is up to you. + virtual bool start(IOService *provider); + + // Returns the type of a vector: level or edge. This will probably get + // replaced but a default method and a new method getVectorType. + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType); + + // Returns a function pointer for the interrupt handler. + // Sadly, egcs prevents this from being done by the base class. + virtual IOInterruptAction getInterruptHandlerAddress(void); + + // The actual interrupt handler. + virtual IOReturn handleInterrupt(void *refCon, + IOService *nub, int source); + + + // Should return true if this vector can be shared. + // The base class return false, so this method only need to be implemented + // if the controller needs to support shared interrupts. + // No other work is required to support shared interrupts. + virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector *vector); + + // Do any hardware initalization for this vector. Leave the vector + // hard disabled. + virtual void initVector(long vectorNumber, IOInterruptVector *vector); + + // Disable this vector at the hardware. + virtual void disableVectorHard(long vectorNumber, IOInterruptVector *vector); + + // Enable this vector at the hardware. + virtual void enableVector(long vectorNumber, IOInterruptVector *vector); + + // Cause an interrupt on this vector. + virtual void causeVector(long vectorNumber, IOInterruptVector *vector); +}; + +#endif /* ! _IOKIT_GENERICINTERRUPTCONTROLLER_H */ diff --git a/iokit/Families/IOADBBus/IOADBBus.cpp b/iokit/Families/IOADBBus/IOADBBus.cpp new file mode 100644 index 000000000..5baf7ae9f --- /dev/null +++ b/iokit/Families/IOADBBus/IOADBBus.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +#define super IOService + +OSDefineMetaClass(IOADBBus,IOService) +OSDefineAbstractStructors(IOADBBus,IOService) + +// ********************************************************************************** +// init +// +// ********************************************************************************** +bool IOADBBus::init( OSDictionary * properties = 0 ) +{ +return super::init(properties); +} + diff --git a/iokit/Families/IOADBBus/IOADBBusPriv.h b/iokit/Families/IOADBBus/IOADBBusPriv.h new file mode 100644 index 000000000..c3fff4024 --- /dev/null +++ b/iokit/Families/IOADBBus/IOADBBusPriv.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ +/* + * 18 June 1998 sdouglas + * Start IOKit version. + */ + +#define ADB_DEVICE_COUNT 16 + +#define ADB_FLAGS_PRESENT 0x00000001 /* Device is present */ +#define ADB_FLAGS_REGISTERED 0x00000002 /* Device has a handler */ +#define ADB_FLAGS_UNRESOLVED 0x00000004 /* Device has not been fully probed */ + +/* + * ADB Commands + */ + +#define ADB_DEVCMD_SELF_TEST 0xff +#define ADB_DEVCMD_CHANGE_ID 0xfe +#define ADB_DEVCMD_CHANGE_ID_AND_ACT 0xfd +#define ADB_DEVCMD_CHANGE_ID_AND_ENABLE 0x00 + +#ifndef __cplusplus + +struct ADBDeviceControl { + IOADBAddress address; + IOADBAddress defaultAddress; + UInt8 handlerID; + UInt8 defaultHandlerID; + UInt32 flags; + id owner; // here for speed +}; + +typedef struct ADBDeviceControl ADBDeviceControl; + + +@class IOADBDevice; + +@interface IOADBBus : IODevice +{ + IODevice * controller; +@public + ADBDeviceControl * adbDevices[ ADB_DEVICE_COUNT ]; +} + +- (IOReturn) probeBus; +- setUpName:(IOADBDevice *)device; + +/////// nub -> bus + +- (IOReturn) setOwner:owner forDevice:(void *)busRef; + +- (IOReturn) flush:(void *)busRef; + +- (IOReturn) readRegister:(void *)busRef + adbRegister:(IOADBRegister)adbRegister + contents:(UInt8 *)data + length:(IOByteCount *)length; + +- (IOReturn) writeRegister:(void *)busRef + adbRegister:(IOADBRegister)adbRegister + contents:(UInt8 *)data + length:(IOByteCount *)length; + +- (IOADBAddress) address:(void *)busRef; + +- (IOADBAddress) defaultAddress:(void *)busRef; + +- (UInt8) handlerID:(void *)busRef; + +- (UInt8) defaultHandlerID:(void *)busRef; + +- (IOReturn) setHandlerID:(void *)busRef + handlerID:(UInt8)handlerID; + +@end + +@interface IOADBDevice : IODevice +{ + IOADBBus * bus; + void * busRef; +} + +- initForBus:(IOADBBus *)bus andBusRef:(void *)busRef; + +- (void *) busRef; + +@end + +#endif diff --git a/iokit/Families/IOADBBus/IOADBController.cpp b/iokit/Families/IOADBBus/IOADBController.cpp new file mode 100644 index 000000000..14cafcc48 --- /dev/null +++ b/iokit/Families/IOADBBus/IOADBController.cpp @@ -0,0 +1,802 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ +/* + * 18 June 1998 sdouglas Start IOKit version. + * 16 Nov 1998 suurballe Port to c++ + */ + + +#include + +#include "IOADBControllerUserClient.h" +#include +#include +#include +#include +#include +#include +#include "IOADBBusPriv.h" + +bool ADBhasRoot( OSObject *, void *, IOService * ); +void doProbe ( thread_call_param_t, thread_call_param_t); + +#define kTenSeconds 10000000 + +#define super IOADBBus + +OSDefineMetaClass(IOADBController,IOADBBus) +OSDefineAbstractStructors(IOADBController,IOADBBus) + + +// ********************************************************************************** +// start +// +// ********************************************************************************** +bool IOADBController::start ( IOService * nub ) +{ + if( !super::start(nub)) { + return false; + } + probeBus(); + + rootDomain = NULL; + busProbed = true; + + // creates the probe thread for when we wake up: + probeThread = thread_call_allocate((thread_call_func_t)doProbe, (thread_call_param_t)this); + if (probeThread == NULL) { + IOLog("IOADBController::start fails to call thread_call_allocate \n"); + return false; + } + + addNotification( gIOPublishNotification,serviceMatching("IOPMrootDomain"), // look for the Root Domain + (IOServiceNotificationHandler)ADBhasRoot, this, 0 ); + + return true; +} + + + + + +// ********************************************************************************** +// ADBhasRoot +// +// The Root Power Domain has registered. +// Register as an interested driver so we find out when the system is +// going to sleep and waking up. +// ********************************************************************************** +bool ADBhasRoot( OSObject * us, void *, IOService * yourDevice ) +{ + if ( yourDevice != NULL ) { + ((IOADBController *)us)->rootDomain = (IOPMrootDomain *)yourDevice; + ((IOADBController *)us)->rootDomain->registerInterestedDriver((IOService *) us); + } + return true; +} + + +//********************************************************************************* +// powerStateWillChangeTo +// +// We are notified here of power changes in the root domain. +// +// If power is going down in the root domain, then the system is going to +// sleep, and we tear down the ADB stack. +//********************************************************************************* + +IOReturn IOADBController::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned long, IOService*) +{ + int i; + + if ( ! (theFlags & IOPMPowerOn) ) { + busProbed = false; + for ( i = 1; i < ADB_DEVICE_COUNT; i++ ) { + if( adbDevices[ i ] != NULL ) { + if ( adbDevices[ i ]->nub ) { + adbDevices[ i ]->nub->terminate(kIOServiceRequired); + adbDevices[ i ]->nub->release(); + } + IOFree( adbDevices[ i ], sizeof (ADBDeviceControl)); + adbDevices[ i ] = NULL; + } + } + } + return IOPMAckImplied; +} + +//********************************************************************************* +// powerStateDidChangeTo +// +// We are notified here of power changes in the root domain +// +// If power is has been brought up, then the system is waking from sleep. +// We re-probe the bus +//********************************************************************************* +IOReturn IOADBController::powerStateDidChangeTo ( IOPMPowerFlags theFlags, unsigned long, IOService*) +{ + if ( theFlags & IOPMPowerOn ) { + if ( ! busProbed ) { + thread_call_enter(probeThread); + busProbed = true; + return kTenSeconds; + } + } + return IOPMAckImplied; +} + + +void doProbe ( thread_call_param_t arg, thread_call_param_t) +{ + ((IOADBController *)arg)->probeBus(); + ((IOADBController *)arg)->rootDomain->acknowledgePowerChange((IOService *)arg); +} + + +// ********************************************************************************** +// probeAddress +// +// ********************************************************************************** +bool IOADBController::probeAddress ( IOADBAddress addr ) +{ + IOReturn err; + ADBDeviceControl * deviceInfo; + UInt16 value; + IOByteCount length; + + length = 2; + err = readFromDevice(addr,3,(UInt8 *)&value,&length); + + if (err == ADB_RET_OK) { + if( NULL == (deviceInfo = adbDevices[ addr ])) { + + deviceInfo = (ADBDeviceControl *)IOMalloc(sizeof(ADBDeviceControl)); + bzero(deviceInfo, sizeof(ADBDeviceControl)); + + adbDevices[ addr ] = deviceInfo; + deviceInfo->defaultAddress = addr; + deviceInfo->handlerID = deviceInfo->defaultHandlerID = (value & 0xff); + } + deviceInfo->address = addr; + } + return( (err == ADB_RET_OK)); +} + + +// ********************************************************************************** +// firstBit +// +// ********************************************************************************** +unsigned int IOADBController::firstBit ( unsigned int mask ) +{ + int bit = 15; + + while( 0 == (mask & (1 << bit))) { + bit--; + } + return(bit); +} + + +// ********************************************************************************** +// moveDeviceFrom +// +// ********************************************************************************** +bool IOADBController::moveDeviceFrom ( IOADBAddress from, IOADBAddress to, bool check ) +{ + IOReturn err; + UInt16 value; + IOByteCount length; + bool moved; + + length = 2; + value = ((to << 8) | ADB_DEVCMD_CHANGE_ID); + + err = writeToDevice(from,3,(UInt8 *)&value,&length); + + adbDevices[ to ] = adbDevices[ from ]; + + moved = probeAddress(to); + + if( moved || (!check)) { + adbDevices[ from ] = NULL; + } + else { + adbDevices[ to ] = NULL; + } + + return moved; +} + + +// ********************************************************************************** +// probeBus +// +// ********************************************************************************** +IOReturn IOADBController::probeBus ( void ) +{ + int i; + UInt32 unresolvedAddrs; + UInt32 freeAddrs; + IOADBAddress freeNum, devNum; + IOADBDevice * newDev; + OSDictionary * newProps; + char nameStr[ 10 ]; + const OSNumber * object; + const OSSymbol * key; + + /* Waits one second for the trackpads to be up */ + + IOSleep(1500); + + /* Kill the auto poll until a new dev id's have been setup */ + + setAutoPollEnable(false); + + /* + * Send a ADB bus reset - reply is sent after bus has reset, + * so there is no need to wait for the reset to complete. + */ + + resetBus(); + + /* Waits one second for the trackpads to be up */ + + IOSleep(1500); + + /* + * Okay, now attempt reassign the + * bus + */ + + unresolvedAddrs = 0; + freeAddrs = 0xfffe; + + /* Skip 0 -- it's special! */ + for (i = 1; i < ADB_DEVICE_COUNT; i++) { + if( probeAddress(i) ) { + unresolvedAddrs |= ( 1 << i ); + freeAddrs &= ~( 1 << i ); + } + } + + /* Now attempt to reassign the addresses */ + while( unresolvedAddrs) { + if( !freeAddrs) { + panic("ADB: Cannot find a free ADB slot for reassignment!"); + } + + freeNum = firstBit(freeAddrs); + devNum = firstBit(unresolvedAddrs); + + if( !moveDeviceFrom(devNum, freeNum, true) ) { + + /* It didn't move.. bad! */ + IOLog("WARNING : ADB DEVICE %d having problems " + "probing!\n", devNum); + } + else { + if( probeAddress(devNum) ) { + /* Found another device at the address, leave + * the first device moved to one side and set up + * newly found device for probing + */ + freeAddrs &= ~( 1 << freeNum ); + + devNum = 0; + + } + else { + /* no more at this address, good !*/ + /* Move it back.. */ + moveDeviceFrom(freeNum,devNum,false); + } + } + if(devNum) { + unresolvedAddrs &= ~( 1 << devNum ); + } + } + + IOLog("ADB present:%lx\n", (freeAddrs ^ 0xfffe)); + + setAutoPollList(freeAddrs ^ 0xfffe); + + setAutoPollPeriod(11111); + + setAutoPollEnable(true); + +// publish the nubs + for ( i = 1; i < ADB_DEVICE_COUNT; i++ ) { + if( 0 == adbDevices[ i ] ) { + continue; + } + newDev = new IOADBDevice; // make a nub + if ( newDev == NULL ) { + continue; + } + adbDevices[ i ]->nub = newDev; // keep a pointer to it + + newProps = OSDictionary::withCapacity( 10 ); // create a property table for it + if ( newProps == NULL ) { + newDev->free(); + continue; + } + + key = OSSymbol::withCString(ADBaddressProperty); // make key/object for address + if ( key == NULL ) { + newDev->free(); + newProps->free(); + continue; + } + + object = OSNumber::withNumber((unsigned long long)adbDevices[i]->address,8); + if ( object == NULL ) { + key->release(); + newDev->free(); + newProps->free(); + continue; + } + newProps->setObject(key, (OSObject *)object); // put it in newProps + key->release(); + object->release(); + + key = OSSymbol::withCString(ADBhandlerIDProperty); // make key/object for handlerID + if ( key == NULL ) { + newDev->free(); + newProps->free(); + continue; + } + object = OSNumber::withNumber((unsigned long long)adbDevices[i]->handlerID,8); + if ( object == NULL ) { + key->release(); + newDev->free(); + newProps->free(); + continue; + } + newProps->setObject(key, (OSObject *)object); // put it in newProps + key->release(); + object->release(); + + key = OSSymbol::withCString(ADBdefAddressProperty); // make key/object for default addr + if ( key == NULL ) { + newDev->free(); + newProps->free(); + continue; + } + object = OSNumber::withNumber((unsigned long long)adbDevices[i]->defaultAddress,8); + if ( object == NULL ) { + key->release(); + newDev->free(); + newProps->free(); + continue; + } + newProps->setObject(key, (OSObject *)object); // put it in newProps + key->release(); + object->release(); + + key = OSSymbol::withCString(ADBdefHandlerProperty); // make key/object for default h id + if ( key == NULL ) { + newDev->free(); + newProps->free(); + continue; + } + object = OSNumber::withNumber((unsigned long long)adbDevices[i]->defaultHandlerID,8); + if ( object == NULL ) { + key->release(); + newDev->free(); + newProps->free(); + continue; + } + newProps->setObject(key, (OSObject *)object); // put it in newProps + key->release(); + object->release(); + + if ( ! newDev->init(newProps,adbDevices[i]) ) { // give it to our new nub + kprintf("adb nub init failed\n"); + newDev->release(); + continue; + } + + sprintf(nameStr,"%x-%02x",adbDevices[i]->defaultAddress,adbDevices[i]->handlerID); + newDev->setName(nameStr); + sprintf(nameStr, "%x", adbDevices[i]->defaultAddress); + newDev->setLocation(nameStr); + + newProps->release(); // we're done with it + if ( !newDev->attach(this) ) { + kprintf("adb nub attach failed\n"); + newDev->release(); + continue; + } + newDev->registerService(); + newDev->start(this); + } // repeat loop + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// autopollHandler +// +// ********************************************************************************** +void autopollHandler ( IOService * us, UInt8 adbCommand, IOByteCount length, UInt8 * data ) +{ + ((IOADBController *)us)->packet(data,length,adbCommand); +} + + +// ********************************************************************************** +// packet +// +// ********************************************************************************** +void IOADBController::packet ( UInt8 * data, IOByteCount length, UInt8 adbCommand ) +{ + ADBDeviceControl * deviceInfo; + + deviceInfo = adbDevices[ adbCommand >> 4 ]; + if( deviceInfo != NULL ) { + if( deviceInfo->owner != NULL ) { + deviceInfo->handler(deviceInfo->owner, adbCommand, length, data); + } + } + else { + // new device arrival? + // IOLog("IOADBBus: new device @%x\n", address); + } +} + + +// ********************************************************************************** +// matchDevice +// +// ********************************************************************************** +bool IOADBController::matchNubWithPropertyTable( IOService * device, OSDictionary * propTable ) +{ + bool matched = false; + const char * keys; + ADBDeviceControl * deviceInfo = (ADBDeviceControl *)(((IOADBDevice *)device)->busRef()); + OSObject * X; + + do { + X = propTable->getObject("ADB Match"); + if( !X ) { + break; + } + keys = ((OSString *)X)->getCStringNoCopy(); + if( *keys == '*' ) { + keys++; + } + else { + if( deviceInfo->defaultAddress != strtol(keys, &keys, 16)) { + break; + } + } + if( *keys++ == '-' ) { + if( deviceInfo->defaultHandlerID != strtol(keys, &keys, 16)) { + break; + } + } + matched = true; + + } while ( false ); + return matched; +} + + +/////// nub -> bus + +// ********************************************************************************** +// setOwner +// +// ********************************************************************************** +IOReturn IOADBController::setOwner ( void * device, IOService * client, ADB_callback_func handler ) +{ + ADBDeviceControl * deviceInfo = (ADBDeviceControl *)device; + + deviceInfo->owner = client; + deviceInfo->handler = handler; + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// clearOwner +// +// ********************************************************************************** +IOReturn IOADBController::clearOwner ( void * device ) +{ + ADBDeviceControl * deviceInfo = (ADBDeviceControl *)device; + kprintf("IOADBController::clearOwner\n"); + + deviceInfo->owner = NULL; + deviceInfo->handler = NULL; + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// claimDevice +// +// Called by the user client +// ********************************************************************************** +IOReturn IOADBController::claimDevice (unsigned long ADBaddress, IOService * client, ADB_callback_func handler ) +{ + if ( claimed_devices[ADBaddress] == true ) { // is this address already claimed by the user? + return kIOReturnExclusiveAccess; // yes + } + if ( adbDevices[ADBaddress] == NULL ) { // no, is there a device at that address? + return kIOReturnNoDevice; // no + } + if (adbDevices[ADBaddress]->handler != NULL ) { // yes, is it already owned by the kernel? + return kIOReturnExclusiveAccess; // yes + } + claimed_devices[ADBaddress] = true; // no, user can have it + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// releaseDevice +// +// Called by the user client +// ********************************************************************************** +IOReturn IOADBController::releaseDevice (unsigned long ADBaddress ) +{ + if ( claimed_devices[ADBaddress] == false ) { + return kIOReturnBadArgument; + } + + claimed_devices[ADBaddress] = false; + + return kIOReturnSuccess; +} + + +// ********************************************************************************** +// readDeviceForUser +// +// Called by the user client +// ********************************************************************************** +IOReturn IOADBController::readDeviceForUser (unsigned long address, unsigned long adbRegister, + UInt8 * data, IOByteCount * length) +{ + if ( claimed_devices[address] == false ) { + return kIOReturnBadArgument; + } + + return (readFromDevice((IOADBAddress)address,(IOADBRegister)adbRegister,data,length)); +} + + +// ********************************************************************************** +// writeDeviceForUser +// +// Called by the user client +// ********************************************************************************** +IOReturn IOADBController::writeDeviceForUser (unsigned long address, unsigned long adbRegister, + UInt8 * data, IOByteCount * length) +{ + if ( claimed_devices[address] == false ) { + return kIOReturnBadArgument; + } + + return (writeToDevice((IOADBAddress)address,(IOADBRegister)adbRegister,data,length)); +} + + +// ********************************************************************************** +// address +// +// ********************************************************************************** +IOADBAddress IOADBController::address ( ADBDeviceControl * busRef ) +{ + return busRef->address; +} + + +// ********************************************************************************** +// defaultAddress +// +// ********************************************************************************** +IOADBAddress IOADBController::defaultAddress ( ADBDeviceControl * busRef ) +{ + return busRef->defaultAddress; +} + + +// ********************************************************************************** +// handlerID +// +// ********************************************************************************** +UInt8 IOADBController::handlerID ( ADBDeviceControl * busRef ) +{ + return busRef->handlerID; +} + + +// ********************************************************************************** +// defaultHandlerID +// +// ********************************************************************************** +UInt8 IOADBController::defaultHandlerID ( ADBDeviceControl * busRef ) +{ + return busRef->defaultHandlerID; +} + + +// ********************************************************************************** +// flush +// +// ********************************************************************************** +IOReturn IOADBController::flush ( ADBDeviceControl * busRef ) +{ + return(flushDevice(busRef->address)); +} + + +// ********************************************************************************** +// readRegister +// +// ********************************************************************************** +IOReturn IOADBController::readRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) +{ + return readFromDevice(busRef->address,adbRegister,data,length); +} + + +// ********************************************************************************** +// writeRegister +// +// ********************************************************************************** +IOReturn IOADBController::writeRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) +{ + return writeToDevice(busRef->address,adbRegister,data,length); +} + + +// ********************************************************************************** +// setHandlerID +// +// ********************************************************************************** +IOReturn IOADBController::setHandlerID ( ADBDeviceControl * deviceInfo, UInt8 handlerID ) +{ + IOReturn err; + UInt16 value; + IOByteCount length; + IOADBAddress addr = deviceInfo->address; + + length = 2; + err = readFromDevice(addr,3,(UInt8 *)&value,&length); + + if ( err ) { + return err; + } + + value = (value & 0xf000) | handlerID | (addr << 8); + length = sizeof(value); + err = writeToDevice(addr,3,(UInt8 *)&value,&length); + + length = sizeof(value); + err = readFromDevice(addr,3,(UInt8 *)&value,&length); + + if ( err == kIOReturnSuccess ) { + deviceInfo->handlerID = value & 0xff; + } + + if ( deviceInfo->handlerID == handlerID ) { + err = kIOReturnSuccess; + } + else { + err = kIOReturnNoResources; + } + + return err; +} + + +// ********************************************************************************** +// getURLComponentUnit +// +// ********************************************************************************** +int IOADBController::getURLComponentUnit ( IOService * device, char * path, int maxLen ) +{ + ADBDeviceControl * deviceInfo = (ADBDeviceControl *)((IOADBDevice *)device)->busRef(); + + if( maxLen > 1 ) { + sprintf( path, "%x", deviceInfo->address ); + return(1); + } + else { + return(0); + } +} + + +// ********************************************************************************** +// newUserClient +// +// ********************************************************************************** +IOReturn IOADBController::newUserClient( task_t owningTask, void * /* security_id */, UInt32 type, IOUserClient ** handler ) +{ + IOReturn err = kIOReturnSuccess; + IOADBControllerUserClient * client; + + client = IOADBControllerUserClient::withTask(owningTask); + + if( !client || (false == client->attach( this )) || + (false == client->start( this )) ) { + if(client) { + client->detach( this ); + client->release(); + client = NULL; + } + err = kIOReturnNoMemory; + } + *handler = client; + return err; +} diff --git a/iokit/Families/IOADBBus/IOADBControllerUserClient.cpp b/iokit/Families/IOADBBus/IOADBControllerUserClient.cpp new file mode 100644 index 000000000..4d1502e91 --- /dev/null +++ b/iokit/Families/IOADBBus/IOADBControllerUserClient.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + */ + +#include +#include +#include +#include "IOADBControllerUserClient.h" + +#define super IOUserClient + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClassAndStructors(IOADBControllerUserClient, IOUserClient) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOADBControllerUserClient *IOADBControllerUserClient::withTask(task_t owningTask) +{ + IOADBControllerUserClient * me; + + me = new IOADBControllerUserClient; + if ( me ) { + if (! me->init() ) { + me->release(); + return NULL; + } + me->fTask = owningTask; + } + return me; +} + +bool IOADBControllerUserClient::start( IOService * provider ) +{ + assert(OSDynamicCast(IOADBController, provider)); + if(!super::start(provider)) + return false; + fOwner = (IOADBController *)provider; + + // Got the owner, so initialize the call structures + fMethods[kADBReadDevice].object = provider; + fMethods[kADBReadDevice].func = (IOMethod)&IOADBController::readDeviceForUser; + fMethods[kADBReadDevice].count0 = 2; + fMethods[kADBReadDevice].count1 = 8; + fMethods[kADBReadDevice].flags = kIOUCScalarIStructO; + + fMethods[kADBWriteDevice].object = provider; + fMethods[kADBWriteDevice].func = (IOMethod)&IOADBController::writeDeviceForUser; + fMethods[kADBWriteDevice].count0 = 4; + fMethods[kADBWriteDevice].count1 = 0; + fMethods[kADBWriteDevice].flags = kIOUCScalarIScalarO; + + fMethods[kADBClaimDevice].object = provider; + fMethods[kADBClaimDevice].func = (IOMethod)&IOADBController::claimDevice; + fMethods[kADBClaimDevice].count0 = 1; + fMethods[kADBClaimDevice].count1 = 0; + fMethods[kADBClaimDevice].flags = kIOUCScalarIScalarO; + + fMethods[kADBReleaseDevice].object = provider; + fMethods[kADBReleaseDevice].func = (IOMethod)&IOADBController::releaseDevice; + fMethods[kADBReleaseDevice].count0 = 1; + fMethods[kADBReleaseDevice].count1 = 0; + fMethods[kADBReleaseDevice].flags = kIOUCScalarIScalarO; + + return true; +} + +IOReturn IOADBControllerUserClient::clientMemoryForType( UInt32 type, + UInt32 * flags, IOLogicalAddress * address, IOByteCount * size ) +{ + return kIOReturnUnsupported; +} + +IOReturn IOADBControllerUserClient::clientClose( void ) +{ + detach( fOwner); + + return kIOReturnSuccess; +} + +IOReturn IOADBControllerUserClient::clientDied( void ) +{ + return( clientClose()); +} + +IOReturn IOADBControllerUserClient::connectClient( IOUserClient * client ) +{ + return kIOReturnSuccess; +} + +IOExternalMethod * IOADBControllerUserClient::getExternalMethodForIndex( UInt32 index ) +{ + if(index >= kNumADBMethods) + return NULL; + else + return &fMethods[index]; +} + +IOReturn IOADBControllerUserClient::registerNotificationPort ( mach_port_t port, UInt32 type ) +{ + return kIOReturnUnsupported; +} + diff --git a/iokit/Families/IOADBBus/IOADBControllerUserClient.h b/iokit/Families/IOADBBus/IOADBControllerUserClient.h new file mode 100644 index 000000000..9035f3bef --- /dev/null +++ b/iokit/Families/IOADBBus/IOADBControllerUserClient.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_ADBCONTROLLERUSERCLIENT_H +#define _IOKIT_ADBCONTROLLERUSERCLIENT_H + +#include +#include +#include + + +class IOADBControllerUserClient : public IOUserClient +{ + OSDeclareDefaultStructors(IOADBControllerUserClient) + +private: + IOADBController * fOwner; + task_t fTask; + IOExternalMethod fMethods[ kNumADBMethods ]; + +public: + + static IOADBControllerUserClient *withTask(task_t owningTask); + + virtual IOReturn clientClose( void ); + + virtual IOReturn clientDied( void ); + + virtual IOReturn registerNotificationPort ( mach_port_t port, UInt32 type ); + + virtual IOReturn connectClient( IOUserClient * client ); + + virtual IOReturn clientMemoryForType( UInt32, UInt32 *, IOLogicalAddress *, IOByteCount * ); + + virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + + virtual bool start( IOService * provider ); + +}; + +#endif /* ! _IOKIT_ADBCONTROLLERUSERCLIENT_H */ + diff --git a/iokit/Families/IOATAHDDrive/IOATAHDCommand.cpp b/iokit/Families/IOATAHDDrive/IOATAHDCommand.cpp new file mode 100644 index 000000000..8cfd30563 --- /dev/null +++ b/iokit/Families/IOATAHDDrive/IOATAHDCommand.cpp @@ -0,0 +1,612 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAHDCommand.cpp - Performs ATA command processing. + * + * HISTORY + * Aug 27, 1999 jliu - Ported from AppleATADrive. + */ + +#include +#include +#include + +// Enable this define to generate debugging messages. +// #define DEBUG_LOG 1 + +//--------------------------------------------------------------------------- +// Select the device timing protocol. + +bool +IOATAHDDrive::selectTimingProtocol() +{ + bool ret; + UInt8 ataReadCmd; + UInt8 ataWriteCmd; + ATATimingProtocol timing; + char * protocolName; + + ret = _ataDevice->getTimingsSupported(&timing); + if (ret == false) + { + IOLog("%s: getTimingsSupported() error\n", getName()); + timing = kATATimingPIO; + } + + // IOLog("%s: device supported timings: %08x\n", getName(), timing); + + if (timing & (kATATimingUltraDMA66 | kATATimingUltraDMA33 | kATATimingDMA)) + { + if (timing & kATATimingUltraDMA66) + { + protocolName = "U-DMA/66"; + timing = kATATimingUltraDMA66; + } + else if (timing & kATATimingUltraDMA33) + { + protocolName = "U-DMA/33"; + timing = kATATimingUltraDMA33; + } + else + { + protocolName = "DMA"; + timing = kATATimingDMA; + } + + selectCommandProtocol(true); + + switch ( _ataProtocol ) + { + case kATAProtocolDMAQueued: + ataReadCmd = kIOATACommandReadDMAQueued; + ataWriteCmd = kIOATACommandWriteDMAQueued; + break; + + case kATAProtocolDMA: + default: + ataReadCmd = kIOATACommandReadDMA; + ataWriteCmd = kIOATACommandWriteDMA; + } + } + else + { + protocolName = "PIO"; + timing = kATATimingPIO; + ataReadCmd = kIOATACommandReadPIO; + ataWriteCmd = kIOATACommandWritePIO; + selectCommandProtocol(false); + } + + _timingProtocol = timing; + _ataReadCmd = ataReadCmd; + _ataWriteCmd = ataWriteCmd; + ret = true; + + // Select device timing. + // + ret = _ataDevice->selectTiming( _timingProtocol, false ); + + if (ret == false) + { + IOLog("%s: %s selectTiming() failed\n", getName(), protocolName); + + if (_timingProtocol != kATATimingPIO) + { + // Non PIO mode selection failed, defaulting to PIO mode and + // try one more time. + + protocolName = "PIO"; + _timingProtocol = kATATimingPIO; + _ataReadCmd = kIOATACommandReadPIO; + _ataWriteCmd = kIOATACommandWritePIO; + selectCommandProtocol(false); + + ret = _ataDevice->selectTiming( _timingProtocol, false ); + if (ret == false) + IOLog("%s: %s selectTiming() retry failed\n", + getName(), protocolName); + } + } + + if (ret && _logSelectedTimingProtocol) + IOLog("%s: Using %s transfers\n", getName(), protocolName); + + return ret; +} + +//--------------------------------------------------------------------------- +// Select the command protocol to use (e.g. ataProtocolPIO, ataProtocolDMA). + +bool +IOATAHDDrive::selectCommandProtocol(bool isDMA) +{ + ATAProtocol protocolsSupported; + + if ( _ataDevice->getProtocolsSupported( &protocolsSupported ) == false ) + { + IOLog("%s: getProtocolsSupported() failed\n", getName()); + return false; + } + + if ( (protocolsSupported & kATAProtocolDMAQueued) != 0 ) + { +#if 0 + _ataProtocol = kATAProtocolDMAQueued; +#else + _ataProtocol = kATAProtocolDMA; +#endif + } + else if ( (protocolsSupported & kATAProtocolDMA) != 0 ) + { + _ataProtocol = kATAProtocolDMA; + } + else + { + _ataProtocol = kATAProtocolPIO; + } + + return true; +} + +//--------------------------------------------------------------------------- +// Configure the ATA/ATAPI device when the driver is initialized, and +// after every device reset. + +bool +IOATAHDDrive::configureDevice(IOATADevice * device) +{ + bool ret; + + // Select device timing. + // + ret = device->selectTiming( _timingProtocol, true ); + if (ret == false) { + IOLog("%s: selectTiming() failed\n", getName()); + return false; + } + + return true; +} + +//--------------------------------------------------------------------------- +// Setup an ATATaskFile from the parameters given, and write the taskfile +// to the ATATaskfile structure pointer provided. +// +// taskfile - pointer to a taskfile structure. +// protocol - An ATA transfer protocol (ataProtocolPIO, ataProtocolDMA, etc) +// command - ATA command byte. +// block - Initial transfer block. +// nblks - Number of blocks to transfer. + +void +IOATAHDDrive::setupReadWriteTaskFile(ATATaskfile * taskfile, + ATAProtocol protocol, + UInt8 command, + UInt32 block, + UInt32 nblks) +{ + bzero( taskfile, sizeof(ATATaskfile) ); + + taskfile->protocol = protocol; + + // Mask of all taskfile registers that shall contain valid + // data and should be written to the hardware registers. + // + taskfile->regmask = ATARegtoMask(kATARegSectorNumber) | + ATARegtoMask(kATARegCylinderLow) | + ATARegtoMask(kATARegCylinderHigh) | + ATARegtoMask(kATARegDriveHead) | + ATARegtoMask(kATARegSectorCount) | + ATARegtoMask(kATARegFeatures) | + ATARegtoMask(kATARegCommand); + + taskfile->resultmask = 0; + + taskfile->ataRegs[kATARegSectorNumber] = block & 0x0ff; + taskfile->ataRegs[kATARegCylinderLow] = (block >> 8) & 0xff; + taskfile->ataRegs[kATARegCylinderHigh] = (block >> 16) & 0xff; + taskfile->ataRegs[kATARegDriveHead] = ((block >> 24) & 0x0f) | + kATAModeLBA | (_unit << 4); + + if ( protocol == kATAProtocolDMAQueued ) + { + taskfile->ataRegs[kATARegFeatures] = + (nblks == kIOATAMaxBlocksPerXfer) ? 0 : nblks; + taskfile->ataRegs[kATARegSectorCount] = 0; + } + else + { + taskfile->ataRegs[kATARegFeatures] = 0; + taskfile->ataRegs[kATARegSectorCount] = + (nblks == kIOATAMaxBlocksPerXfer) ? 0 : nblks; + } + + taskfile->ataRegs[kATARegCommand] = command; +} + +//--------------------------------------------------------------------------- +// Allocate and return an IOATACommand that is initialized to perform +// a read/write operation. +// +// buffer - IOMemoryDescriptor object describing this transfer. +// block - Initial transfer block. +// nblks - Number of blocks to transfer. + +IOATACommand * +IOATAHDDrive::ataCommandReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks) +{ + ATATaskfile taskfile; + bool isWrite; + IOATACommand * cmd = allocateCommand(); + + assert(buffer); + + if (!cmd) return 0; // error, command allocation failed. + + isWrite = (buffer->getDirection() == kIODirectionOut) ? + true : false; + +#ifdef DEBUG_LOG + IOLog("%s::ataCommandReadWrite %08x (%d) %s %d %d\n", + getName(), + buffer, + buffer->getLength(), + isWrite ? "WR" : "RD", + block, + nblks); +#endif + +#if 0 // used for testing - force PIO mode + setupReadWriteTaskFile(&taskfile, + kATAProtocolPIO, + isWrite ? kIOATACommandWritePIO : + kIOATACommandReadPIO, + block, + nblks); +#else + + // Setup the taskfile structure with the size and direction of the + // transfer. This structure will be written to the actual taskfile + // registers when this command is processed. + // + setupReadWriteTaskFile(&taskfile, + _ataProtocol, + isWrite ? _ataWriteCmd : _ataReadCmd, + block, + nblks); +#endif + + // Get a pointer to the client data buffer, and record parameters + // which shall be later used by the completion routine. + // + ATA_CLIENT_DATA(cmd)->buffer = buffer; + + cmd->setTaskfile(&taskfile); + + cmd->setPointers(buffer, /* (IOMemoryDescriptor *) */ + buffer->getLength(), /* transferCount (bytes) */ + isWrite); /* isWrite */ + + return cmd; +} + +//--------------------------------------------------------------------------- +// Allocate and return a ATA SetFeatures command. + +IOATACommand * +IOATAHDDrive::ataCommandSetFeatures(UInt8 features, + UInt8 SectorCount, + UInt8 SectorNumber, + UInt8 CylinderLow, + UInt8 CyclinderHigh) +{ + ATATaskfile taskfile; + IOATACommand * cmd = allocateCommand(); + + if (!cmd) return 0; // error, command allocation failed. + + taskfile.protocol = kATAProtocolPIO; + + taskfile.regmask = ATARegtoMask(kATARegSectorNumber) | + ATARegtoMask(kATARegCylinderLow) | + ATARegtoMask(kATARegCylinderHigh) | + ATARegtoMask(kATARegDriveHead) | + ATARegtoMask(kATARegSectorCount) | + ATARegtoMask(kATARegCommand); + + taskfile.resultmask = ATARegtoMask(kATARegError) | + ATARegtoMask(kATARegStatus); + + taskfile.ataRegs[kATARegFeatures] = features; + taskfile.ataRegs[kATARegSectorNumber] = SectorNumber; + taskfile.ataRegs[kATARegCylinderLow] = CylinderLow; + taskfile.ataRegs[kATARegCylinderHigh] = CyclinderHigh; + taskfile.ataRegs[kATARegDriveHead] = kATAModeLBA | (_unit << 4); + taskfile.ataRegs[kATARegSectorCount] = SectorCount; + taskfile.ataRegs[kATARegCommand] = kIOATACommandSetFeatures; + + cmd->setTaskfile(&taskfile); + + // This is a way to issue a command which will wait + // for an interrupt, but does no data transfer. + + cmd->setPointers(0, 0, false); + + return cmd; +} + +//--------------------------------------------------------------------------- +// Return a Flush Cache command. + +IOATACommand * +IOATAHDDrive::ataCommandFlushCache() +{ + ATATaskfile taskfile; + IOATACommand * cmd = allocateCommand(); + + if (!cmd) return 0; // error, command allocation failed. + + // kATAProtocolSetRegs does not wait for an interrupt from the drive. + + taskfile.protocol = kATAProtocolPIO; + + taskfile.regmask = ATARegtoMask(kATARegDriveHead) | + ATARegtoMask(kATARegCommand); + + taskfile.resultmask = ATARegtoMask(kATARegError) | + ATARegtoMask(kATARegSectorNumber) | + ATARegtoMask(kATARegCylinderLow) | + ATARegtoMask(kATARegCylinderHigh) | + ATARegtoMask(kATARegDriveHead) | + ATARegtoMask(kATARegStatus); + + taskfile.ataRegs[kATARegDriveHead] = kATAModeLBA | (_unit << 4); + taskfile.ataRegs[kATARegCommand] = kIOATACommandFlushCache; + + cmd->setTaskfile(&taskfile); + + // This is a way to issue a command which will wait + // for an interrupt, but does no data transfer. + + cmd->setPointers(0, 0, false); + + return cmd; +} + +//--------------------------------------------------------------------------- +// Return a STANDBY IMMEDIATE command. + +IOATACommand * +IOATAHDDrive::ataCommandStandby() +{ + ATATaskfile taskfile; + IOATACommand * cmd = allocateCommand(); + + if (!cmd) return 0; // error, command allocation failed. + + // kATAProtocolSetRegs does not wait for an interrupt from the drive. + + taskfile.protocol = kATAProtocolPIO; + + taskfile.regmask = ATARegtoMask(kATARegDriveHead) | + ATARegtoMask(kATARegCommand); + + taskfile.resultmask = ATARegtoMask(kATARegError) | + ATARegtoMask(kATARegStatus); + + taskfile.ataRegs[kATARegDriveHead] = kATAModeLBA | (_unit << 4); + taskfile.ataRegs[kATARegCommand] = kIOATACommandStandbyImmediate; + + cmd->setTaskfile(&taskfile); + + // This is a way to issue a command which will wait + // for an interrupt, but does no data transfer. + + cmd->setPointers(0, 0, false); + + return cmd; +} + +//--------------------------------------------------------------------------- +// This routine is called by our provider when a command processing has +// completed. + +void +IOATAHDDrive::sHandleCommandCompletion(IOATAHDDrive * self, + IOATACommand * cmd) +{ + ATAResults results; + IOATADevice * device; + IOATAClientData * clientData; + + assert(cmd); + device = cmd->getDevice(kIOATADevice); + assert(device); + + clientData = ATA_CLIENT_DATA(cmd); + assert(clientData); + + if ((cmd->getResults(&results) != kIOReturnSuccess) && + (clientData->maxRetries-- > 0)) + { + cmd->execute(); + return; + } + +#if 0 + // Force command retry to test retry logic. + // Controller will reset the IOMemoryDescriptor's position, right? + // + cmd->getResults(&results); + if (clientData->maxRetries-- > 2) { + cmd->execute(); + return; + } +#endif + +#ifdef DEBUG_LOG + IOLog("%s: sHandleCommandCompletion %08x %08x %08x %08x %d\n", + getName(), device, cmd, refcon, results.returnCode, + results.bytesTransferred); +#endif + + // Return IOReturn for sync commands. + // + clientData->returnCode = results.returnCode; + + if (clientData->isSync) { + // For sync commands, unblock the client thread. + // + assert(clientData->completion.syncLock); + clientData->completion.syncLock->signal(); // unblock the client. + } + else { + // Signal the completion routine that the request has been completed. + // + + IOStorage::complete(clientData->completion.async, + results.returnCode, + (UInt64) results.bytesTransferred); + } + + // Release the IOMemoryDescriptor. + // + if (clientData->buffer) + clientData->buffer->release(); + + // Command processing is complete, release the command object. + // + cmd->release(); +} + +//--------------------------------------------------------------------------- +// Issue a synchronous ATA command. + +IOReturn +IOATAHDDrive::syncExecute(IOATACommand * cmd, /* command object */ + UInt32 timeout, /* timeout in ms */ + UInt retries, /* max retries */ + IOMemoryDescriptor * senseData) +{ + IOATAClientData * clientData = ATA_CLIENT_DATA(cmd); + + if ( _pmReady ) + { + activityTickle( kIOPMSuperclassPolicy1, 1 ); + } + + // Bump the retain count on the command. The completion handler + // will decrement the retain count. + // + cmd->retain(); + + // Set timeout and register the completion handler. + // + cmd->setPointers(senseData, + senseData ? senseData->getLength() : 0, + false, /* isWrite */ + true ); /* isSense */ + cmd->setTimeout(timeout); + cmd->setCallback(this, + (CallbackFn) &IOATAHDDrive::sHandleCommandCompletion, + (void *) cmd); + + // Increment the retain count on the IOMemoryDescriptor. + // Release when the completion routine gets called. + // + if (clientData->buffer) + clientData->buffer->retain(); + + // Set the max retry count. If retry count is 0, then the command shall + // not be retried if an error occurs. + // + clientData->maxRetries = retries; + clientData->completion.syncLock = IOSyncer::create(); + clientData->isSync = true; + + cmd->execute(); + + // Block client thread on lock until the completion handler + // receives an indication that the processing is complete. + // + clientData->completion.syncLock->wait(); + + return clientData->returnCode; +} + +//--------------------------------------------------------------------------- +// Issue an asynchronous ATA command. + +IOReturn +IOATAHDDrive::asyncExecute(IOATACommand * cmd, /* command object */ + IOStorageCompletion completion, + UInt32 timeout, /* timeout in ms */ + UInt retries) /* max retries */ +{ + IOATAClientData * clientData = ATA_CLIENT_DATA(cmd); + + if ( _pmReady ) + { + activityTickle( kIOPMSuperclassPolicy1, 1 ); + } + + // Bump the retain count on the command. The completion handler + // will decrement the retain count. + // + cmd->retain(); + + // Set timeout and register the completion handler. + // + cmd->setTimeout(timeout); + cmd->setCallback(this, + (CallbackFn) &IOATAHDDrive::sHandleCommandCompletion, + (void *) cmd); + + // Increment the retain count on the IOMemoryDescriptor. + // Release when the completion routine gets called. + // + if (clientData->buffer) + clientData->buffer->retain(); + + // Set the max retry count. If retry count is 0, then the command shall + // not be retried if an error occurs. + // + clientData->maxRetries = retries; + clientData->isSync = false; + + clientData->completion.async = completion; + + return (cmd->execute() ? kIOReturnSuccess : kIOReturnNoResources); +} + +//--------------------------------------------------------------------------- +// Allocate an IOATACommand object with a fixed client data area. + +IOATACommand * +IOATAHDDrive::allocateCommand() +{ + return _ataDevice->allocCommand(kIOATADevice, sizeof(IOATAClientData)); +} diff --git a/iokit/Families/IOATAHDDrive/IOATAHDDrive.cpp b/iokit/Families/IOATAHDDrive/IOATAHDDrive.cpp new file mode 100644 index 000000000..af28e9a54 --- /dev/null +++ b/iokit/Families/IOATAHDDrive/IOATAHDDrive.cpp @@ -0,0 +1,584 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAHDDrive.cpp - Generic ATA disk driver. + * + * HISTORY + * Aug 27, 1999 jliu - Ported from AppleATADrive. + */ + +#include +#include +#include + +#define super IOService +OSDefineMetaClassAndStructors( IOATAHDDrive, IOService ) + +//--------------------------------------------------------------------------- +// C to C++ glue. + +void +IOATAHDDrive::sHandleConfigureDevice(IOATAHDDrive * self) +{ + self->configureDevice(self->_ataDevice); +} + +//--------------------------------------------------------------------------- +// init() method. + +bool +IOATAHDDrive::init(OSDictionary * properties) +{ + return (super::init(properties)); +} + +//--------------------------------------------------------------------------- +// Override probe() method inherited from IOService. + +IOService * +IOATAHDDrive::probe(IOService * provider, SInt32 * score) +{ + if (!super::probe(provider, score)) + return 0; + + // Our provider must be a IOATADevice nub, most likely created + // by an IOATAController instance. + // + IOATADevice * device = OSDynamicCast(IOATADevice, provider); + if (device == 0) + return 0; // Provider is not an IOATADevice. + + // Do ATA device type matching. Does the nub match my device type? + // + if (device->getDeviceType() != reportATADeviceType()) + return 0; // error, type mismatch (probably ATAPI). + + // Cache the drive unit number (master/slave assignment). + // + _unit = device->getUnit(); + + return this; // probe successful. +} + +//--------------------------------------------------------------------------- +// Starts up the driver and spawn a nub. + +bool +IOATAHDDrive::start(IOService * provider) +{ + // First call start() in our superclass. + // + if (super::start(provider) == false) + return false; + + _configThreadCall = (void *) thread_call_allocate( + (thread_call_func_t) sHandleConfigureDevice, + (thread_call_param_t) this); + if (!_configThreadCall) + return false; + + // Cache our provider. + // + _ataDevice = OSDynamicCast(IOATADevice, provider); + if (_ataDevice == 0) + return false; + + // Open our provider. + // + _ataDevice->retain(); + if (_ataDevice->open(this) == false) + return false; + + // Inspect the provider. + // + if (inspectDevice(_ataDevice) == false) + return false; + + // Select ATA timing. + // + _logSelectedTimingProtocol = true; + + if (selectTimingProtocol() == false) + return false; + + // Create an IOCommandGate (for power management support) and attach + // this event source to the provider's workloop. + // + _cmdGate = IOCommandGate::commandGate(this); + if (_cmdGate == 0) + return false; + + IOWorkLoop * workloop = _ataDevice->getWorkLoop(); + if ((workloop == 0) || + (workloop->addEventSource(_cmdGate) != kIOReturnSuccess)) + return false; + + // Starts up in the active state. + // + _currentATAPowerState = kIOATAPowerStateActive; + + // A policy-maker must make these calls to join the PM tree, + // and to initialize its state. + // + PMinit(); /* initialize power management variables */ + provider->joinPMtree(this); /* join power management tree */ + setIdleTimerPeriod(300); /* 300 sec inactivity timer */ + + if (_supportedFeatures & kIOATAFeaturePowerManagement) + initForPM(); + + return (createNub(provider)); +} + +//--------------------------------------------------------------------------- +// Stop the driver. + +void +IOATAHDDrive::stop(IOService * provider) +{ + PMstop(); + + super::stop(provider); +} + +//--------------------------------------------------------------------------- +// Release allocated resources. + +void +IOATAHDDrive::free() +{ + if (_configThreadCall) { + thread_call_cancel((thread_call_t) _configThreadCall); + thread_call_free((thread_call_t) _configThreadCall); + } + + if (_cmdGate) { + if (_ataDevice && (_ataDevice->getWorkLoop())) + _ataDevice->getWorkLoop()->removeEventSource(_cmdGate); + _cmdGate->release(); + } + + if (_ataDevice) + _ataDevice->release(); + + super::free(); +} + +//--------------------------------------------------------------------------- +// Fetch information about the ATA device nub. + +bool +IOATAHDDrive::inspectDevice(IOATADevice * ataDevice) +{ + OSString * string; + ATAIdentify * identify; + + // Fetch ATA device information from the nub. + // + string = OSDynamicCast(OSString, + ataDevice->getProperty(kATAPropertyModelNumber)); + if (string) { + strncpy(_model, string->getCStringNoCopy(), 40); + _model[40] = '\0'; + } + + string = OSDynamicCast(OSString, + ataDevice->getProperty(kATAPropertyFirmwareRev)); + if (string) { + strncpy(_revision, string->getCStringNoCopy(), 8); + _revision[8] = '\0'; + } + + // Fetch Word 82 (commandSetsSupported1) in Identify data. + // + identify = (ATAIdentify *) IOMalloc(sizeof(*identify)); + if (!identify) + return false; + + ataDevice->getIdentifyData(identify); + + if (identify->commandSetsSupported1 & 0x8) + _supportedFeatures |= kIOATAFeaturePowerManagement; + + if (identify->commandSetsSupported1 & 0x20) + _supportedFeatures |= kIOATAFeatureWriteCache; + + IOFree(identify, sizeof(*identify)); + + // Add an OSNumber property indicating the supported features. + // + setProperty(kIOATASupportedFeaturesKey, + _supportedFeatures, + sizeof(_supportedFeatures) * 8); + + return true; +} + +//--------------------------------------------------------------------------- +// Report the type of ATA device (ATA vs. ATAPI). + +ATADeviceType +IOATAHDDrive::reportATADeviceType() const +{ + return kATADeviceATA; +} + +//--------------------------------------------------------------------------- +// Returns the device type. + +const char * +IOATAHDDrive::getDeviceTypeName() +{ + return kIOBlockStorageDeviceTypeGeneric; +} + +//--------------------------------------------------------------------------- +// Instantiate an ATA specific subclass of IOBlockStorageDevice. + +IOService * IOATAHDDrive::instantiateNub() +{ + IOService * nub = new IOATAHDDriveNub; + return nub; +} + +//--------------------------------------------------------------------------- +// Returns an IOATAHDDriveNub. + +bool IOATAHDDrive::createNub(IOService * provider) +{ + IOService * nub; + + // Instantiate a generic hard disk nub so a generic driver + // can match above us. + // + nub = instantiateNub(); + + if (nub == 0) { + IOLog("%s: instantiateNub() failed\n", getName()); + return false; + } + + nub->init(); + + if (!nub->attach(this)) + IOPanic("IOATAHDDrive::createNub() unable to attach nub"); + + nub->registerService(); + + return true; +} + +//--------------------------------------------------------------------------- +// Handles read/write requests. + +IOReturn IOATAHDDrive::doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion) +{ + IOReturn ret; + IOATACommand * cmd = ataCommandReadWrite(buffer, block, nblks); + + if (cmd == 0) + return kIOReturnNoMemory; + + ret = asyncExecute(cmd, completion); + + cmd->release(); + + return ret; +} + +IOReturn IOATAHDDrive::doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks) +{ + IOReturn ret; + IOATACommand * cmd = ataCommandReadWrite(buffer, block, nblks); + + if (cmd == 0) + return kIOReturnNoMemory; + + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Eject the media in the drive. + +IOReturn IOATAHDDrive::doEjectMedia() +{ + return kIOReturnUnsupported; // No support for removable ATA devices. +} + +//--------------------------------------------------------------------------- +// Format the media in the drive. +// ATA devices does not support low level formatting. + +IOReturn IOATAHDDrive::doFormatMedia(UInt64 byteCapacity) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Returns disk capacity. + +UInt32 IOATAHDDrive::doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + UInt32 blockCount = 0; + UInt32 blockSize = 0; + + assert(_ataDevice); + + if (_ataDevice->getDeviceCapacity(&blockCount, &blockSize) && + (capacities != NULL) && (capacitiesMaxCount > 0)) + { + UInt64 count = blockCount; + UInt64 size = blockSize; + + *capacities = size * (count + 1); + + return 1; + } + + return 0; +} + +//--------------------------------------------------------------------------- +// Lock the media and prevent a user-initiated eject. + +IOReturn IOATAHDDrive::doLockUnlockMedia(bool doLock) +{ + return kIOReturnUnsupported; // No removable ATA device support. +} + +//--------------------------------------------------------------------------- +// Flush the write-cache to the physical media. + +IOReturn IOATAHDDrive::doSynchronizeCache() +{ + IOReturn ret; + IOATACommand * cmd = ataCommandFlushCache(); + + if (cmd == 0) + return kIOReturnNoMemory; + + ret = syncExecute(cmd, 60000); + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Handle a Start Unit command. + +IOReturn +IOATAHDDrive::doStart() +{ + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Handle a Stop Unit command. + +IOReturn +IOATAHDDrive::doStop() +{ + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Return device identification strings. + +char * IOATAHDDrive::getAdditionalDeviceInfoString() +{ + return ("[ATA]"); +} + +char * IOATAHDDrive::getProductString() +{ + return _model; +} + +char * IOATAHDDrive::getRevisionString() +{ + return _revision; +} + +char * IOATAHDDrive::getVendorString() +{ + return NULL; +} + +//--------------------------------------------------------------------------- +// Report the device block size in bytes. We ask the device nub for the +// block size. We expect this to be 512-bytes. + +IOReturn IOATAHDDrive::reportBlockSize(UInt64 * blockSize) +{ + UInt32 blkCount = 0; + UInt32 blkSize = 0; + + assert(_ataDevice); + + if (!_ataDevice->getDeviceCapacity(&blkCount, &blkSize)) + return kIOReturnNoDevice; + + *blockSize = blkSize; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report the media in the ATA device as non-ejectable. + +IOReturn IOATAHDDrive::reportEjectability(bool * isEjectable) +{ + *isEjectable = false; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Fixed media, locking is invalid. + +IOReturn IOATAHDDrive::reportLockability(bool * isLockable) +{ + *isLockable = false; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report the polling requirements for a removable media. + +IOReturn IOATAHDDrive::reportPollRequirements(bool * pollRequired, + bool * pollIsExpensive) +{ + *pollIsExpensive = false; + *pollRequired = false; + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report the max number of bytes transferred for an ATA read command. + +IOReturn IOATAHDDrive::reportMaxReadTransfer(UInt64 blocksize, UInt64 * max) +{ + *max = blocksize * kIOATAMaxBlocksPerXfer; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report the max number of bytes transferred for an ATA write command. + +IOReturn IOATAHDDrive::reportMaxWriteTransfer(UInt64 blocksize, UInt64 * max) +{ + // Same as read transfer limits. + // + return reportMaxReadTransfer(blocksize, max); +} + +//--------------------------------------------------------------------------- +// Returns the maximum addressable sector number. + +IOReturn IOATAHDDrive::reportMaxValidBlock(UInt64 * maxBlock) +{ + UInt32 blockCount = 0; + UInt32 blockSize = 0; + + assert(_ataDevice && maxBlock); + + if (!_ataDevice->getDeviceCapacity(&blockCount, &blockSize)) + return kIOReturnNoDevice; + + *maxBlock = blockCount; + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report whether the media is currently present, and whether a media +// change has been registered since the last reporting. + +IOReturn IOATAHDDrive::reportMediaState(bool * mediaPresent, bool * changed) +{ + *mediaPresent = true; + *changed = true; + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report whether the media is removable. + +IOReturn IOATAHDDrive::reportRemovability(bool * isRemovable) +{ + *isRemovable = false; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report if the media is write-protected. + +IOReturn IOATAHDDrive::reportWriteProtection(bool * isWriteProtected) +{ + *isWriteProtected = false; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Handles messages from our provider. + +IOReturn +IOATAHDDrive::message(UInt32 type, IOService * provider, void * argument) +{ + IOReturn ret = kIOReturnSuccess; + +// IOLog("IOATAHDDrive::message %p %lx\n", this, type); + + switch (type) + { + case kATAClientMsgBusReset: + _ataDevice->holdQueue(kATAQTypeNormalQ); + break; + + case kATAClientMsgBusReset | kATAClientMsgDone: + configureDevice( _ataDevice ); + break; + + case kATAClientMsgSelectTiming | kATAClientMsgDone: + _ataDevice->releaseQueue(kATAQTypeNormalQ); + break; + + default: + ret = super::message(type, provider, argument); + break; + } + + return ret; +} diff --git a/iokit/Families/IOATAHDDrive/IOATAHDDriveNub.cpp b/iokit/Families/IOATAHDDrive/IOATAHDDriveNub.cpp new file mode 100644 index 000000000..a917fec7a --- /dev/null +++ b/iokit/Families/IOATAHDDrive/IOATAHDDriveNub.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAHDDriveNub.cpp + * + * This subclass implements a relay to a protocol and device-specific + * provider. + * + * HISTORY + * Aug 27, 1999 jliu - Created. + */ + +#include +#include +#include + +#define super IOBlockStorageDevice +OSDefineMetaClassAndStructors( IOATAHDDriveNub, IOBlockStorageDevice ) + +//--------------------------------------------------------------------------- +// attach to provider. + +bool IOATAHDDriveNub::attach(IOService * provider) +{ + if (!super::attach(provider)) + return false; + + _provider = OSDynamicCast(IOATAHDDrive, provider); + if (_provider == 0) { + IOLog("IOATAHDDriveNub: attach; wrong provider type!\n"); + return false; + } + + return true; +} + +//--------------------------------------------------------------------------- +// detach from provider. + +void IOATAHDDriveNub::detach(IOService * provider) +{ + if (_provider == provider) + _provider = 0; + + super::detach(provider); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion) +{ + return _provider->doAsyncReadWrite(buffer, block, nblks, completion); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::doSyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks) +{ + return _provider->doSyncReadWrite(buffer, block, nblks); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::doEjectMedia() +{ + return _provider->doEjectMedia(); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::doFormatMedia(UInt64 byteCapacity) +{ + return _provider->doFormatMedia(byteCapacity); +} + +//--------------------------------------------------------------------------- +// + +UInt32 +IOATAHDDriveNub::doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + return _provider->doGetFormatCapacities(capacities, capacitiesMaxCount); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::doLockUnlockMedia(bool doLock) +{ + return _provider->doLockUnlockMedia(doLock); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::doSynchronizeCache() +{ + return _provider->doSynchronizeCache(); +} + +//--------------------------------------------------------------------------- +// + +char * IOATAHDDriveNub::getVendorString() +{ + return _provider->getVendorString(); +} + +//--------------------------------------------------------------------------- +// + +char * IOATAHDDriveNub::getProductString() +{ + return _provider->getProductString(); +} + +//--------------------------------------------------------------------------- +// + +char * IOATAHDDriveNub::getRevisionString() +{ + return _provider->getRevisionString(); +} + +//--------------------------------------------------------------------------- +// + +char * IOATAHDDriveNub::getAdditionalDeviceInfoString() +{ + return _provider->getAdditionalDeviceInfoString(); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportBlockSize(UInt64 * blockSize) +{ + return _provider->reportBlockSize(blockSize); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportEjectability(bool * isEjectable) +{ + return _provider->reportEjectability(isEjectable); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportLockability(bool * isLockable) +{ + return _provider->reportLockability(isLockable); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive) +{ + return _provider->reportPollRequirements(pollIsRequired, pollIsExpensive); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportMaxReadTransfer(UInt64 blockSize, + UInt64 * max) +{ + return _provider->reportMaxReadTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportMaxValidBlock(UInt64 * maxBlock) +{ + return _provider->reportMaxValidBlock(maxBlock); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportMaxWriteTransfer(UInt64 blockSize, + UInt64 * max) +{ + return _provider->reportMaxWriteTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportMediaState(bool * mediaPresent, + bool * changed) +{ + return _provider->reportMediaState(mediaPresent, changed); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportRemovability(bool * isRemovable) +{ + return _provider->reportRemovability(isRemovable); +} + +//--------------------------------------------------------------------------- +// + +IOReturn IOATAHDDriveNub::reportWriteProtection(bool * isWriteProtected) +{ + return _provider->reportWriteProtection(isWriteProtected); +} diff --git a/iokit/Families/IOATAHDDrive/IOATAHDPower.cpp b/iokit/Families/IOATAHDDrive/IOATAHDPower.cpp new file mode 100644 index 000000000..bbbbbafcc --- /dev/null +++ b/iokit/Families/IOATAHDDrive/IOATAHDPower.cpp @@ -0,0 +1,590 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOATAHDPower.cpp - Power management support. + * + * HISTORY + * + */ + +#include +#include +#define super IOService + +//--------------------------------------------------------------------------- +// Inform the policy-maker that an ATA hard-drive is capable of two power +// states (a simplification). The ourPowerStates[] array encodes information +// about each state. + +#define kIOATAPowerStates 2 + +static IOPMPowerState ourPowerStates[kIOATAPowerStates] = +{ + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1, IOPMDeviceUsable, IOPMPowerOn, IOPMPowerOn, 0,0,0,0,0,0,0,0} +}; + +static const char * ataPowerStateNames[] = +{ + "Sleep", + "Standby", + "Idle", + "Active" +}; + +//--------------------------------------------------------------------------- +// Maps the power state ordinal, used by our policy maker, +// to an ATA power states. + +IOATAPowerState +IOATAHDDrive::getATAPowerStateForStateOrdinal(UInt32 stateOrdinal) +{ + IOATAPowerState stateOrdinalToATAPowerState[kIOATAPowerStates] = + { + kIOATAPowerStateStandby, /* state 0 */ + kIOATAPowerStateActive, /* state 1 */ + }; + + if (stateOrdinal > (kIOATAPowerStates - 1)) + stateOrdinal = (kIOATAPowerStates - 1); + + return stateOrdinalToATAPowerState[stateOrdinal]; +} + +//--------------------------------------------------------------------------- +// Register the driver with our policy-maker (also in the same class). + +void +IOATAHDDrive::initForPM() +{ + registerPowerDriver(this, ourPowerStates, kIOATAPowerStates); + _pmReady = true; +} + +//--------------------------------------------------------------------------- +// Policy-maker code which intercepts kPMMinutesToSpinDown settings +// and then call setIdleTimerPeriod() to adjust the idle timer. + +IOReturn +IOATAHDDrive::setAggressiveness(UInt32 type, UInt32 minutes) +{ + if (type == kPMMinutesToSpinDown) + { + // IOLog("IOATAHDDrive: setting idle timer to %ld min\n", minutes); + setIdleTimerPeriod(minutes * 60); // argument is in seconds + } + return super::setAggressiveness(type, minutes); +} + +//--------------------------------------------------------------------------- +// Policy-maker calls this function to find find out what power state +// the device is in, given the current power domain state. +// +// We respond to this message in the following fashion: +// If domain power is off, drive must be off. +// If domain power is on, return _currentATAPowerState. + +UInt32 +IOATAHDDrive::initialPowerStateForDomainState(IOPMPowerFlags domainState) +{ + UInt32 ret; + + _cmdGate->runAction((IOCommandGate::Action) + &IOATAHDDrive::sHandleInitialPowerStateForDomainState, + (void *) domainState, + (void *) &ret); + + return ret; +} + +//--------------------------------------------------------------------------- +// Static member function called by the IOCommandGate to translate +// initialPowerStateForDomainState() calls to the synchronized +// handleInitialPowerStateForDomainState() call. + +void +IOATAHDDrive::sHandleInitialPowerStateForDomainState(IOATAHDDrive * self, + IOPMPowerFlags domainState, + UInt32 * state) +{ + *state = self->handleInitialPowerStateForDomainState(domainState); +} + +//--------------------------------------------------------------------------- +// The synchronized form of initialPowerStateForDomainState(). + +UInt32 +IOATAHDDrive::handleInitialPowerStateForDomainState(IOPMPowerFlags domainState) +{ + if (domainState & IOPMPowerOn) + return ((_currentATAPowerState == kIOATAPowerStateActive) ? 1 : 0); + else + return 0; +} + +//--------------------------------------------------------------------------- +// Set/Change the power state of the ATA hard-drive. + +IOReturn +IOATAHDDrive::setPowerState(UInt32 powerStateOrdinal, + IOService * whatDevice) +{ + IOReturn ret; + + // Power state transitions are synchronized by our IOCommandGate object, + // (attached to the ATA controller's workloop). + + _cmdGate->runAction((IOCommandGate::Action) + &IOATAHDDrive::sHandleSetPowerState, + (void *) powerStateOrdinal, + (void *) whatDevice, + (void *) &ret); + + kprintf("%s::%s(0x%08lx, 0x%08lx) returns 0x%08lx\n",getName(), __FUNCTION__,powerStateOrdinal, whatDevice, ret); + return ret; +} + +//--------------------------------------------------------------------------- +// Static member function called by the IOCommandGate to translate +// setPowerState() calls to the synchronized handleSetPowerState() call. + +void +IOATAHDDrive::sHandleSetPowerState(IOATAHDDrive * self, + UInt32 powerStateOrdinal, + IOService * whatDevice, + IOReturn * handlerReturn) +{ + *handlerReturn = self->handleSetPowerState(powerStateOrdinal, whatDevice); +} + +//--------------------------------------------------------------------------- +// A static member function that calls handleStandbyStateTransition(). +// This function can be registered as the completion handler for an +// IOATACommand. + +void +IOATAHDDrive::sHandleStandbyStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred) +{ + self->handleStandbyStateTransition((UInt32) stage, status); +} + +//--------------------------------------------------------------------------- +// A static member function that calls handleActiveStateTransition(). +// This function can be registered as the completion handler for an +// IOATACommand. + +void +IOATAHDDrive::sHandleActiveStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred) +{ + self->handleActiveStateTransition((UInt32) stage, status); +} + +//--------------------------------------------------------------------------- +// A static member function that calls handleIdleStateTransition(). +// This function can be registered as the completion handler for an +// IOATACommand. + +void +IOATAHDDrive::sHandleIdleStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred) +{ + self->handleIdleStateTransition((UInt32) stage, status); +} + +//--------------------------------------------------------------------------- +// A static member function that calls handleSleepStateTransition(). +// This function can be registered as the completion handler for an +// IOATACommand. + +void +IOATAHDDrive::sHandleSleepStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred) +{ + self->handleSleepStateTransition((UInt32) stage, status); +} + +//--------------------------------------------------------------------------- +// IOATAHDDrive provide a default implementation for handleSetPowerState(). +// This (IOCommandGate synchronized) function is called by our policy-maker. + +IOReturn +IOATAHDDrive::handleSetPowerState(UInt32 powerStateOrdinal, + IOService * whatDevice) +{ + IOATAPowerState ataPowerState = + getATAPowerStateForStateOrdinal(powerStateOrdinal); + +#if 1 + kprintf("%s::%s %d (%d) %lx\n", getName(), __FUNCTION__, ataPowerState, + _currentATAPowerState, (UInt32) whatDevice); +#endif + + // We cannot change power state while we are still transitioning + // the power state from a previous state change request. + + if (_powerStateChanging) { + kprintf("%s::%s overlap detected\n",getName(), __FUNCTION__); + IOLog("%s::%s overlap detected\n",getName(), __FUNCTION__); + return IOPMAckImplied; // FIXME - should return something else + } + + // If we are already in the desired power state, return success. + + if (ataPowerState == _currentATAPowerState) { + kprintf("%s::%s already in the given sate\n",getName(), __FUNCTION__); + return IOPMAckImplied; + } + + _powerStateChanging = true; + _setPowerAckPending = true; + + startATAPowerStateTransition(ataPowerState); + + // Return the number of microseconds it may take for the drive to + // complete the power state transition. Report 100 seconds max. + + return (100 * 1000 * 1000); +} + +//--------------------------------------------------------------------------- +// Start transitioning into the specified ATA power state. + +void +IOATAHDDrive::startATAPowerStateTransition(IOATAPowerState ataPowerState) +{ + _proposedATAPowerState = ataPowerState; + + switch (ataPowerState) + { + case kIOATAPowerStateStandby: + + // Register sHandleStandbyStateTransition to be called when the + // IOATADevice becomes idle. Or, if the device is already idle, + // the function will be called immediately. + + _ataDevice->notifyIdle(this, + (CallbackFn) &IOATAHDDrive::sHandleStandbyStateTransition, + (void *) kIOATAStandbyStage0); + break; + + case kIOATAPowerStateActive: + + // Call sHandleActiveStateTransition and begin processing + // at stage 0. + + sHandleActiveStateTransition(this, + (void *) kIOATAActiveStage0, + kIOReturnSuccess, + 0); + break; + + default: + IOPanic("IOATAHDDrive::startATAPowerStateTransition\n"); + } +} + +//--------------------------------------------------------------------------- +// Abort the current state transition and retore the current state. + +void +IOATAHDDrive::abortATAPowerStateTransition() +{ + // Do not ack the setPowerState request if the power state + // transition is aborted. + + _setPowerAckPending = false; + + // Transition to the previous state. However, if we are unable + // to transition to the previous state, then give up. + + if (_proposedATAPowerState != _currentATAPowerState) + { + startATAPowerStateTransition(_currentATAPowerState); + } + else + { + IOLog("%s::%s Unable to revert to previous state\n", + getName(), __FUNCTION__); + + endATAPowerStateTransition(_currentATAPowerState); + } +} + +//--------------------------------------------------------------------------- +// Complete the ATA power state transition. + +void +IOATAHDDrive::endATAPowerStateTransition(IOATAPowerState ataPowerState) +{ + _currentATAPowerState = ataPowerState; + + // In the future, a NACK response may be sent to indicate state change + // failure. + + if (_setPowerAckPending) { + thread_call_func(acknowledgeATAPowerStateTransition, this, 1); + //acknowledgeATAPowerStateTransition(this, NULL); + } + + //kprintf("%s::%s %s \n", getName(), __FUNCTION__, ataPowerStateNames[_currentATAPowerState]); +} + +//--------------------------------------------------------------------------- +// To avoid deadlocks between the PM and the IOATAHDDrive workloop the +// actual acknolegment wuns on a different thread. + +/* static */ void +IOATAHDDrive::acknowledgeATAPowerStateTransition(void *castMeToIOATAHDDrive, void*) +{ + IOATAHDDrive *myThis = OSDynamicCast(IOATAHDDrive, (OSObject*)castMeToIOATAHDDrive); + + if (myThis !=NULL) { + myThis->_powerStateChanging = false; + myThis->acknowledgeSetPowerState(); + } +} + +//--------------------------------------------------------------------------- +// A function called by startATAPowerStateTransition() to transition the +// drive into the STANDBY state. It may also be called by the IOATACommand +// completion handler to advance to the next stage of the state transition. +// +// stage: The current stage in the state transition. +// status: The status from the previous stage. + +void +IOATAHDDrive::handleStandbyStateTransition(UInt32 stage, IOReturn status) +{ + bool doAbort = false; + IOATACommand * cmd = 0; + IOStorageCompletion completion; + +// IOLog("IOATAHDDrive::handleStandbyStateTransition %ld %x\n", stage, status); + + switch (stage) + { + case kIOATAStandbyStage0: + // Device is idle. Hold the normal queue. + _ataDevice->holdQueue(kATAQTypeNormalQ); + status = kIOReturnSuccess; + + case kIOATAStandbyStage1: + + if ( reportATADeviceType() == kATADeviceATA ) + { + // Issue a flush cache command. + + if ((cmd = ataCommandFlushCache()) == 0) + { + doAbort = true; break; + } + cmd->setQueueInfo(kATAQTypeBypassQ); + + // Must issue an async command here, otherwise the thread will + // deadlock. + + completion.target = this; + completion.action = sHandleStandbyStateTransition; + completion.parameter = (void *) kIOATAStandbyStage2; + + asyncExecute(cmd, + completion, + 60000, /* 1 min timeout */ + 0); /* no retry for flush cache command */ + break; + } + + case kIOATAStandbyStage2: + + if ( reportATADeviceType() == kATADeviceATA ) + { + // Issue an ATA STANDBY IMMEDIATE command. We ignore the + // status from the flush cache command since not all drives + // implement this. + + if ((cmd = ataCommandStandby()) == 0) + { + doAbort = true; break; + } + cmd->setQueueInfo(kATAQTypeBypassQ); + + // Must issue an async command here, otherwise the thread will + // deadlock. + + completion.target = this; + completion.action = sHandleStandbyStateTransition; + completion.parameter = (void *) kIOATAStandbyStage3; + + asyncExecute(cmd, + completion, + 30000, /* 30 sec timeout */ + 0); /* no retry for STANDBY command */ + break; + } + + case kIOATAStandbyStage3: + // Final stage in the STANDBY state transition. + + if (status != kIOReturnSuccess) { + // STANDBY command failed, abort the state transition. + doAbort = true; break; + } + else { + endATAPowerStateTransition(kIOATAPowerStateStandby); + } + + break; + + default: + IOLog("%s::%s unknown stage %ld\n", getName(), __FUNCTION__, stage); + } + + if (cmd) cmd->release(); + + if (doAbort) + abortATAPowerStateTransition(); +} + +//--------------------------------------------------------------------------- +// Called by startATAPowerStateTransition() to transition the drive into +// the ACTIVE state. It may also be called by the IOATACommand completion +// handler to advance to the next stage of the state transition. +// +// stage: The current stage in the state transition. +// status: The status from the previous stage. + +void +IOATAHDDrive::handleActiveStateTransition(UInt32 stage, IOReturn status) +{ + IOStorageCompletion completion; + +#if 0 + IOLog("IOATAHDDrive::handleActiveStateTransition %p %ld %x\n", + this, stage, status); +#endif + + switch (stage) + { + case kIOATAActiveStage0: + kprintf("kIOATAActiveStage0 current power state is sleep %d\n", _currentATAPowerState == kIOATAPowerStateSleep); + +#if 0 // This des not work. + // Issue a software reset. Only necessary if the current + // state is kATAPowerStateSleep. + + // if (_currentATAPowerState == kIOATAPowerStateSleep) // Marco: Commenting because it looks that the power state is wrong + { + kprintf("Attempting to reset on kIOATAActiveStage0\n"); + _ataDevice->reset(); + } +#endif + + case kIOATAActiveStage1: + kprintf("kIOATAActiveStage1\n"); + + if ( reportATADeviceType() == kATADeviceATA ) + { + // Spin up the drive before releasing the queue. A media + // access command is issued with an extra long timeout. + + completion.target = this; + completion.action = sHandleActiveStateTransition, + completion.parameter = (void *) kIOATAActiveStage2; + + readSector(completion); + break; + } + + case kIOATAActiveStage2: + kprintf("kIOATAActiveStage2\n"); + // Release the normal queue. + _ataDevice->releaseQueue(kATAQTypeNormalQ); + + case kIOATAActiveStage3: + kprintf("kIOATAActiveStage3\n"); + // Finalize ACTIVE state transition. + endATAPowerStateTransition(kIOATAPowerStateActive); + break; + + default: + IOLog("%s::%s unknown stage %ld\n", getName(), __FUNCTION__, stage); + } +} + +//--------------------------------------------------------------------------- +// Unimplemented state transition handlers. + +void +IOATAHDDrive::handleIdleStateTransition(UInt32 stage, IOReturn status) +{ + IOLog("%s::%s unimplemented!\n", getName(), __FUNCTION__); +} + +void +IOATAHDDrive::handleSleepStateTransition(UInt32 stage, IOReturn status) +{ + IOLog("%s::%s unimplemented!\n", getName(), __FUNCTION__); +} + +//--------------------------------------------------------------------------- +// Read a single sector from the disk. The data read is discarded. + +IOReturn IOATAHDDrive::readSector(IOStorageCompletion completion, + UInt32 sector = 0) +{ + IOBufferMemoryDescriptor * desc; + IOATACommand * cmd; + IOReturn ret; + + desc = IOBufferMemoryDescriptor::withCapacity(kIOATASectorSize, + kIODirectionIn); + if (!desc) + return kIOReturnNoMemory; + + desc->setLength(desc->getCapacity()); + + cmd = ataCommandReadWrite(desc, sector, 1); + if (!cmd) + return kIOReturnNoMemory; + + cmd->setQueueInfo(kATAQTypeBypassQ); + + ret = asyncExecute(cmd, completion, 60000); + + // Don't worry, asyncExecute has retained both the command + // and the memory descriptor object. + + desc->release(); + cmd->release(); + + return kIOReturnSuccess; +} diff --git a/iokit/Families/IOATAPICDDrive/IOATAPICDCommand.cpp b/iokit/Families/IOATAPICDDrive/IOATAPICDCommand.cpp new file mode 100644 index 000000000..a6e6c7cbd --- /dev/null +++ b/iokit/Families/IOATAPICDDrive/IOATAPICDCommand.cpp @@ -0,0 +1,196 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +//--------------------------------------------------------------------------- +// ATAPI Read TOC command (43). + +IOATACommand * +IOATAPICDDrive::atapiCommandReadTOC(IOMemoryDescriptor * buffer, + bool msf, + UInt8 format, + UInt8 startTrackSession) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandReadTOC; + atapiCmd.cdb[1] = msf ? 0x02 : 0x00; + atapiCmd.cdb[6] = startTrackSession; + atapiCmd.cdb[7] = (UInt8)(buffer->getLength() >> 8); + atapiCmd.cdb[8] = (UInt8)(buffer->getLength()); + + if ((format & 0x04)) + atapiCmd.cdb[2] = (format & 0x07); // new format field + else + atapiCmd.cdb[9] = (format & 0x03) << 6; // old format field + + return atapiCommand(&atapiCmd, buffer); +} + +//--------------------------------------------------------------------------- +// atapiCommandPlayAudioMSF + +IOATACommand * +IOATAPICDDrive::atapiCommandPlayAudioMSF(CDMSF timeStart, CDMSF timeStop) +{ + ATACDBInfo atapiCmd; + + bzero(&atapiCmd, sizeof(atapiCmd)); + atapiCmd.cdbLength = 12; + + atapiCmd.cdb[0] = kIOATAPICommandPlayAudioMSF; + + // starting MSF address + atapiCmd.cdb[3] = timeStart.minute; + atapiCmd.cdb[4] = timeStart.second; + atapiCmd.cdb[5] = timeStart.frame; + + // ending MSF address + atapiCmd.cdb[6] = timeStop.minute; + atapiCmd.cdb[7] = timeStop.second; + atapiCmd.cdb[8] = timeStop.frame; + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// atapiCommandPauseResume + +IOATACommand * +IOATAPICDDrive::atapiCommandPauseResume(bool resume) +{ + ATACDBInfo atapiCmd; + + bzero(&atapiCmd, sizeof(atapiCmd)); + atapiCmd.cdbLength = 12; + + atapiCmd.cdb[0] = kIOATAPICommandPauseResume; + + // set resume bit + if (resume) atapiCmd.cdb[8] = 0x01; + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// atapiCommandStopPlay + +IOATACommand * +IOATAPICDDrive::atapiCommandStopPlay() +{ + ATACDBInfo atapiCmd; + + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandStopPlay; + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// atapiCommandReadSubChannel + +IOATACommand * +IOATAPICDDrive::atapiCommandReadSubChannel(IOMemoryDescriptor * buffer, + UInt8 dataFormat, + UInt8 trackNumber) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandReadSubChannel; + atapiCmd.cdb[1] = 0x02; + atapiCmd.cdb[2] = 0x40; + atapiCmd.cdb[3] = dataFormat; + atapiCmd.cdb[6] = trackNumber; + atapiCmd.cdb[7] = (buffer->getLength() >> 8) & 0xff; + atapiCmd.cdb[8] = buffer->getLength() & 0xff; + + return atapiCommand(&atapiCmd, buffer); +} + +//--------------------------------------------------------------------------- +// atapiCommandScan + +IOATACommand * +IOATAPICDDrive::atapiCommandScan(CDMSF timeStart, bool reverse) +{ + ATACDBInfo atapiCmd; + + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandScan; + atapiCmd.cdb[1] = reverse ? 0x10 : 0x00; + atapiCmd.cdb[3] = timeStart.minute; + atapiCmd.cdb[4] = timeStart.second; + atapiCmd.cdb[5] = timeStart.frame; + atapiCmd.cdb[9] = 0x40; // MSF + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// Allocates and return an IOATACommand to perform a read/write operation. + +IOATACommand * +IOATAPICDDrive::atapiCommandReadCD(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[ 0] = kIOATAPICommandReadCD; + atapiCmd.cdb[ 1] = (sectorType & 0x7) << 2; + atapiCmd.cdb[ 2] = (block >> 24) & 0xFF; + atapiCmd.cdb[ 3] = (block >> 16) & 0xFF; + atapiCmd.cdb[ 4] = (block >> 8) & 0xFF; + atapiCmd.cdb[ 5] = (block ) & 0xFF; + atapiCmd.cdb[ 6] = (nblks >> 16) & 0xFF; + atapiCmd.cdb[ 7] = (nblks >> 8) & 0xFF; + atapiCmd.cdb[ 8] = (nblks ) & 0xFF; + atapiCmd.cdb[ 9] = (sectorArea & ~kCDSectorAreaSubChannel); + atapiCmd.cdb[10] = (sectorArea & kCDSectorAreaSubChannel); + + return atapiCommand(&atapiCmd, buffer); +} diff --git a/iokit/Families/IOATAPICDDrive/IOATAPICDDrive.cpp b/iokit/Families/IOATAPICDDrive/IOATAPICDDrive.cpp new file mode 100644 index 000000000..a937999ee --- /dev/null +++ b/iokit/Families/IOATAPICDDrive/IOATAPICDDrive.cpp @@ -0,0 +1,513 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPICDDrive.h - Generic ATAPI CD-ROM driver. + * + * HISTORY + * Sep 2, 1999 jliu - Ported from AppleATAPIDrive. + */ + +#include +#include +#include +#include + +#define super IOATAPIHDDrive +OSDefineMetaClassAndStructors( IOATAPICDDrive, IOATAPIHDDrive ) + +//--------------------------------------------------------------------------- +// Looks for an ATAPI device which is a CD-ROM device. + +bool +IOATAPICDDrive::matchATAPIDeviceType(UInt8 type, SInt32 * score) +{ + if (type == kIOATAPIDeviceTypeCDROM) + return true; + + return false; +} + +//--------------------------------------------------------------------------- +// Instantiate an ATAPI specific subclass of IOCDBlockStorageDevice. + +IOService * +IOATAPICDDrive::instantiateNub() +{ + IOService * nub = new IOATAPICDDriveNub; + return nub; +} + +//--------------------------------------------------------------------------- +// Report whether media is write-protected. + +IOReturn +IOATAPICDDrive::reportWriteProtection(bool * isWriteProtected) +{ + *isWriteProtected = true; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Returns the device type. + +const char * +IOATAPICDDrive::getDeviceTypeName() +{ + return kIOBlockStorageDeviceTypeCDROM; +} + +//--------------------------------------------------------------------------- +// Read the Table of Contents. +// +// The LG DVD-ROM DRN8080B LAB8 drive returns a TOC Data Length field which +// describes the number of bytes *returned* in the transfer, not the number +// bytes available to be transferred like it should. There is a workaround +// that addresses this problem here, however the workaround should be moved +// into a separate drive-specific subclass in the future. + +#define LG_DVD_ROM_DRN8080B_SUPPORT + +IOReturn +IOATAPICDDrive::readTOC(IOMemoryDescriptor * buffer) +{ + IOReturn ret; + IOATACommand * cmd; + + assert(buffer); + +#ifdef LG_DVD_ROM_DRN8080B_SUPPORT + IOMemoryDescriptor * bufferOrig = buffer; + bool isLG_DVD_ROM_DRN8080B = + ( getVendorString() && + getProductString() && + !strcmp(getVendorString(), "LG") && + !strcmp(getProductString(), "DVD-ROM DRN8080B") ); + + if (isLG_DVD_ROM_DRN8080B) { + buffer = IOBufferMemoryDescriptor::withCapacity( + max(4096, (bufferOrig->getLength()+1) & (~1)), + kIODirectionIn); + if (!buffer) + return kIOReturnNoMemory; + } +#endif LG_DVD_ROM_DRN8080B_SUPPORT + + cmd = atapiCommandReadTOC(buffer, true, 2, 0); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the Read TOC command. + // + ret = syncExecute(cmd); + +#ifdef LG_DVD_ROM_DRN8080B_SUPPORT + if (isLG_DVD_ROM_DRN8080B) { + void * toc; + UInt16 tocSize; + ATAResults results; + + cmd->getResults(&results); + toc = ((IOBufferMemoryDescriptor *)buffer)->getBytesNoCopy(); + tocSize = min(results.bytesTransferred, bufferOrig->getLength()); + + if (bufferOrig->writeBytes(0, toc, tocSize) < bufferOrig->getLength()) + ret = (ret == kIOReturnSuccess) ? kIOReturnUnderrun : ret; + else + ret = (ret == kIOReturnUnderrun) ? kIOReturnSuccess : ret; + + buffer->release(); + } +#endif LG_DVD_ROM_DRN8080B_SUPPORT + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Start analog audio play + +IOReturn +IOATAPICDDrive::audioPlay(CDMSF timeStart,CDMSF timeStop) +{ + IOATACommand * cmd; + IOReturn ret; + + // IOLog("IOATAPICDDrive::audioPlay %x %x\n",timeStart,timeStop); + cmd = atapiCommandPlayAudioMSF(timeStart, timeStop); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the audio play command. + // + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +IOReturn +IOATAPICDDrive::audioPause(bool pause) +{ + IOATACommand * cmd; + IOReturn ret; + + // IOLog("IOATAPICDDrive::audioPause\n"); + cmd = atapiCommandPauseResume(!pause); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the audio pause/resume command. + // + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +IOReturn +IOATAPICDDrive::audioScan(CDMSF timeStart, bool reverse) +{ + IOATACommand * cmd; + IOReturn ret; + + cmd = atapiCommandScan(timeStart, reverse); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the audio scan command. + // + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +IOReturn +IOATAPICDDrive::audioStop() +{ + IOATACommand * cmd; + IOReturn ret; + + cmd = atapiCommandStopPlay(); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the audio stop play command. + // + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +IOReturn +IOATAPICDDrive::getAudioVolume(UInt8 * leftVolume, UInt8 * rightVolume) +{ + UInt8 audio_control[24]; + IOReturn status; + + status = readModeSense(audio_control,sizeof(audio_control),(UInt32)0xe); + + if (status == kIOReturnSuccess) { + assert((audio_control[0] ) == 0x00); + assert((audio_control[1] ) == sizeof(audio_control) - 2); + assert((audio_control[8] & 0x3f) == 0x0e); + assert((audio_control[9] ) == 0x0e); + + *leftVolume = audio_control[17]; + *rightVolume = audio_control[19]; + } + + return status; +} + +IOReturn +IOATAPICDDrive::setAudioVolume(UInt8 leftVolume, UInt8 rightVolume) +{ + UInt8 audio_control[24]; + IOReturn status; + + // get current values + status = readModeSense(audio_control,sizeof(audio_control),(UInt32)0xe); + + if (status == kIOReturnSuccess) { + assert((audio_control[0] ) == 0x00); + assert((audio_control[1] ) == sizeof(audio_control) - 2); + assert((audio_control[8] & 0x3f) == 0x0e); + assert((audio_control[9] ) == 0x0e); + + // set new values + audio_control[17] = audio_control[21] = leftVolume; + audio_control[19] = audio_control[23] = rightVolume; + + status = writeModeSelect(audio_control,sizeof(audio_control)); + } + + return status; +} + +IOReturn +IOATAPICDDrive::readModeSense(UInt8 * buffer, + UInt32 length, + UInt8 pageCode, + UInt8 pageControl = 0) +{ + IOReturn ret; + IOATACommand * cmd; + IOMemoryDescriptor * senseDesc; + + assert(buffer); + + // IOLog("IOATAPICDDrive::readModeSense len=%d page=%d\n",length,pageCode); + + senseDesc = IOMemoryDescriptor::withAddress(buffer, + length, + kIODirectionIn); + if (!senseDesc) + return kIOReturnNoMemory; + + cmd = atapiCommandModeSense(senseDesc, pageCode, pageControl); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the Mode Sense command. + // + ret = syncExecute(cmd); + + // Release the memory descriptor. + // + senseDesc->release(); + + cmd->release(); + + return ret; +} + +IOReturn +IOATAPICDDrive::writeModeSelect(UInt8 * buffer, UInt32 length) +{ + IOReturn ret; + IOATACommand * cmd; + IOMemoryDescriptor * selectDesc; + + // IOLog("IOATAPICDDrive::writeModeSelect %d %d\n",length); + assert(buffer); + + selectDesc = IOMemoryDescriptor::withAddress(buffer, + length, + kIODirectionOut); + if (!selectDesc) + return kIOReturnNoMemory; + + cmd = atapiCommandModeSelect(selectDesc); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the Mode Select command. + // + ret = syncExecute(cmd); + + // Release the memory descriptor. + // + selectDesc->release(); + + cmd->release(); + + return ret; +} + +IOReturn +IOATAPICDDrive::getAudioStatus(CDAudioStatus * status) +{ + UInt8 * channel_data; + IOReturn ret; + + // init + channel_data = (UInt8 *)IOMalloc(16); + if (!channel_data) return kIOReturnNoMemory; + + // get audio status + ret = readSubChannel(channel_data,16,0x01,0x00); + + if (ret == kIOReturnSuccess) { + // state our assumptions + assert(channel_data[2] == 0); + assert(channel_data[3] == 12); + assert(channel_data[4] == 1); + + // get current status + status->status = channel_data[ 1]; + + // get current track and track index + status->position.track.number = channel_data[ 6]; + status->position.track.index = channel_data[ 7]; + + // get current absolute address + status->position.time.minute = channel_data[ 9]; + status->position.time.second = channel_data[10]; + status->position.time.frame = channel_data[11]; + + // get current relative address + status->position.track.time.minute = channel_data[13]; + status->position.track.time.second = channel_data[14]; + status->position.track.time.frame = channel_data[15]; + } + + // cleanup + IOFree(channel_data,16); + return ret; +} + +IOReturn +IOATAPICDDrive::readMCN(CDMCN mcn) +{ + UInt8 * channel_data; + IOReturn ret; + + // init + channel_data = (UInt8 *)IOMalloc(24); + if (!channel_data) return kIOReturnNoMemory; + + // get audio status + ret = readSubChannel(channel_data,24,0x02,0x00); + + if (ret == kIOReturnSuccess) { + // state our assumptions + assert(channel_data[2] == 0); + assert(channel_data[3] == 20); + assert(channel_data[4] == 2); + + // check if found + if ((channel_data[8] & 0x80)) { + // copy the data + bcopy(&channel_data[9],mcn,kCDMCNMaxLength); + mcn[kCDMCNMaxLength] = '\0'; + } else { + ret = kIOReturnNotFound; + } + } + + // cleanup + IOFree(channel_data,24); + return ret; +} + +IOReturn +IOATAPICDDrive::readISRC(UInt8 track, CDISRC isrc) +{ + UInt8 * channel_data; + IOReturn ret; + + // init + channel_data = (UInt8 *)IOMalloc(24); + if (!channel_data) return kIOReturnNoMemory; + + // get audio status + ret = readSubChannel(channel_data,24,0x03,track); + + if (ret == kIOReturnSuccess) { + // state our assumptions + assert(channel_data[2] == 0); + assert(channel_data[3] == 20); + assert(channel_data[4] == 3); + + // check if found + if ((channel_data[8] & 0x80)) { + // copy the data + bcopy(&channel_data[9],isrc,kCDISRCMaxLength); + isrc[kCDISRCMaxLength] = '\0'; + } else { + ret = kIOReturnNotFound; + } + } + + // cleanup + IOFree(channel_data,24); + return ret; +} + +IOReturn +IOATAPICDDrive::readSubChannel(UInt8 * buffer, + UInt32 length, + UInt8 dataFormat, + UInt8 trackNumber) +{ + IOReturn ret; + IOATACommand * cmd; + IOMemoryDescriptor * readDesc; + + assert(buffer); + + // IOLog("IOATAPICDDrive::readSubChannel len=%d\n",length); + + readDesc = IOMemoryDescriptor::withAddress(buffer, + length, + kIODirectionIn); + if (!readDesc) + return kIOReturnNoMemory; + + cmd = atapiCommandReadSubChannel(readDesc, dataFormat, trackNumber); + if (!cmd) + return kIOReturnNoMemory; + + // Execute the Mode Sense command. + // + ret = syncExecute(cmd); + + // Release the memory descriptor. + // + readDesc->release(); + + cmd->release(); + + return ret; +} + +IOReturn +IOATAPICDDrive::doAsyncReadCD(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + IOReturn ret; + IOATACommand * cmd; + + cmd = atapiCommandReadCD(buffer,block,nblks,sectorArea,sectorType); + + if (!cmd) + return kIOReturnNoMemory; + + ret = asyncExecute(cmd, completion); + + cmd->release(); + + return ret; +} diff --git a/iokit/Families/IOATAPICDDrive/IOATAPICDDriveNub.cpp b/iokit/Families/IOATAPICDDrive/IOATAPICDDriveNub.cpp new file mode 100644 index 000000000..a70f6aa3d --- /dev/null +++ b/iokit/Families/IOATAPICDDrive/IOATAPICDDriveNub.cpp @@ -0,0 +1,362 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPICDDriveNub.cpp + * + * This subclass implements a relay to a protocol and device-specific + * provider. + * + * HISTORY + * 2-Sep-1999 Joe Liu (jliu) created. + */ + +#include +#include +#include + +#define super IOCDBlockStorageDevice +OSDefineMetaClassAndStructors( IOATAPICDDriveNub, IOCDBlockStorageDevice ) + +//--------------------------------------------------------------------------- +// attach to provider. + +bool IOATAPICDDriveNub::attach(IOService * provider) +{ + if (!super::attach(provider)) + return false; + + _provider = OSDynamicCast(IOATAPICDDrive, provider); + if (_provider == 0) { + IOLog("IOATAPICDDriveNub: attach; wrong provider type!\n"); + return false; + } + + return true; +} + +//--------------------------------------------------------------------------- +// detach from provider. + +void IOATAPICDDriveNub::detach(IOService * provider) +{ + if (_provider == provider) + _provider = 0; + + super::detach(provider); +} + + +//--------------------------------------------------------------------------- +// doAsyncReadCD + +IOReturn IOATAPICDDriveNub::doAsyncReadCD(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + return _provider->doAsyncReadCD(buffer, + block, + nblks, + sectorArea, + sectorType, + completion); +} + +//--------------------------------------------------------------------------- +// doAsyncReadWrite + +IOReturn IOATAPICDDriveNub::doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion) +{ + if (buffer->getDirection() == kIODirectionOut) + return kIOReturnNotWritable; + + return _provider->doAsyncReadWrite(buffer, block, nblks, completion); +} + +//--------------------------------------------------------------------------- +// doSyncReadWrite + +IOReturn +IOATAPICDDriveNub::doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks) +{ + if (buffer->getDirection() == kIODirectionOut) + return kIOReturnNotWritable; + + return _provider->doSyncReadWrite(buffer, block, nblks); +} + +//--------------------------------------------------------------------------- +// doFormatMedia + +IOReturn +IOATAPICDDriveNub::doFormatMedia(UInt64 byteCapacity) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// doGetFormatCapacities + +UInt32 +IOATAPICDDriveNub::doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + return _provider->doGetFormatCapacities(capacities, capacitiesMaxCount); +} + +//--------------------------------------------------------------------------- +// doEjectMedia + +IOReturn IOATAPICDDriveNub::doEjectMedia() +{ + return _provider->doEjectMedia(); +} + +//--------------------------------------------------------------------------- +// doLockUnlockMedia + +IOReturn IOATAPICDDriveNub::doLockUnlockMedia(bool doLock) +{ + return _provider->doLockUnlockMedia(doLock); +} + +//--------------------------------------------------------------------------- +// getMediaType + +UInt32 IOATAPICDDriveNub::getMediaType() +{ + return kCDMediaTypeROM; +} + +//--------------------------------------------------------------------------- +// getVendorString + +char * IOATAPICDDriveNub::getVendorString() +{ + return _provider->getVendorString(); +} + +//--------------------------------------------------------------------------- +// getProductString + +char * IOATAPICDDriveNub::getProductString() +{ + return _provider->getProductString(); +} + +//--------------------------------------------------------------------------- +// getRevisionString + +char * IOATAPICDDriveNub::getRevisionString() +{ + return _provider->getRevisionString(); +} + +//--------------------------------------------------------------------------- +// getAdditionalDeviceInfoString + +char * IOATAPICDDriveNub::getAdditionalDeviceInfoString() +{ + return _provider->getAdditionalDeviceInfoString(); +} + +//--------------------------------------------------------------------------- +// reportBlockSize + +IOReturn IOATAPICDDriveNub::reportBlockSize(UInt64 * blockSize) +{ + return _provider->reportBlockSize(blockSize); +} + +//--------------------------------------------------------------------------- +// reportEjectability + +IOReturn IOATAPICDDriveNub::reportEjectability(bool * isEjectable) +{ + return _provider->reportEjectability(isEjectable); +} + +//--------------------------------------------------------------------------- +// reportLockability + +IOReturn IOATAPICDDriveNub::reportLockability(bool * isLockable) +{ + return _provider->reportLockability(isLockable); +} + +//--------------------------------------------------------------------------- +// reportMediaState + +IOReturn IOATAPICDDriveNub::reportMediaState(bool * mediaPresent, + bool * changed) +{ + return _provider->reportMediaState(mediaPresent, changed); +} + +//--------------------------------------------------------------------------- +// reportPollRequirements + +IOReturn IOATAPICDDriveNub::reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive) +{ + return _provider->reportPollRequirements(pollIsRequired, pollIsExpensive); +} + +//--------------------------------------------------------------------------- +// reportMaxReadTransfer + +IOReturn IOATAPICDDriveNub::reportMaxReadTransfer(UInt64 blockSize, + UInt64 * max) +{ + return _provider->reportMaxReadTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// reportMaxValidBlock + +IOReturn IOATAPICDDriveNub::reportMaxValidBlock(UInt64 * maxBlock) +{ + return _provider->reportMaxValidBlock(maxBlock); +} + +//--------------------------------------------------------------------------- +// reportRemovability + +IOReturn IOATAPICDDriveNub::reportRemovability(bool * isRemovable) +{ + return _provider->reportRemovability(isRemovable); +} + +//--------------------------------------------------------------------------- +// readISRC + +IOReturn IOATAPICDDriveNub::readISRC(UInt8 track, CDISRC isrc) +{ + return _provider->readISRC(track, isrc); +} + +//--------------------------------------------------------------------------- +// readMCN + +IOReturn IOATAPICDDriveNub::readMCN(CDMCN mcn) +{ + return _provider->readMCN(mcn); +} + +//--------------------------------------------------------------------------- +// readTOC + +IOReturn IOATAPICDDriveNub::readTOC(IOMemoryDescriptor * buffer) +{ + return _provider->readTOC(buffer); +} + +//--------------------------------------------------------------------------- +// audioPause + +IOReturn IOATAPICDDriveNub::audioPause(bool pause) +{ + return _provider->audioPause(pause); +} + +//--------------------------------------------------------------------------- +// audioPlay + +IOReturn IOATAPICDDriveNub::audioPlay(CDMSF timeStart, CDMSF timeStop) +{ + return _provider->audioPlay(timeStart, timeStop); +} + +//--------------------------------------------------------------------------- +// audioScan + +IOReturn IOATAPICDDriveNub::audioScan(CDMSF timeStart, bool reverse) +{ + return _provider->audioScan(timeStart, reverse); +} + +//--------------------------------------------------------------------------- +// audioStop + +IOReturn IOATAPICDDriveNub::audioStop() +{ + return _provider->audioStop(); +} + +//--------------------------------------------------------------------------- +// getAudioStatus + +IOReturn IOATAPICDDriveNub::getAudioStatus(CDAudioStatus * status) +{ + return _provider->getAudioStatus(status); +} + +//--------------------------------------------------------------------------- +// getAudioVolume + +IOReturn IOATAPICDDriveNub::getAudioVolume(UInt8 * leftVolume, + UInt8 * rightVolume) +{ + return _provider->getAudioVolume(leftVolume, rightVolume); +} + +//--------------------------------------------------------------------------- +// setVolume + +IOReturn IOATAPICDDriveNub::setAudioVolume(UInt8 leftVolume, UInt8 rightVolume) +{ + return _provider->setAudioVolume(leftVolume, rightVolume); +} + +//--------------------------------------------------------------------------- +// doSynchronizeCache + +IOReturn IOATAPICDDriveNub::doSynchronizeCache() +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// reportMaxWriteTransfer + +IOReturn IOATAPICDDriveNub::reportMaxWriteTransfer(UInt64 blockSize, + UInt64 * max) +{ + return _provider->reportMaxWriteTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// reportMaxWriteTransfer + +IOReturn IOATAPICDDriveNub::reportWriteProtection(bool * isWriteProtected) +{ + return _provider->reportWriteProtection(isWriteProtected); +} diff --git a/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDCommand.cpp b/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDCommand.cpp new file mode 100644 index 000000000..e93273e33 --- /dev/null +++ b/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDCommand.cpp @@ -0,0 +1,146 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +//--------------------------------------------------------------------------- +// SEND KEY command. + +IOATACommand * +IOATAPIDVDDrive::atapiCommandSendKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt8 agid, + const DVDKeyFormat keyFormat) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandSendKey; + atapiCmd.cdb[7] = keyClass; + atapiCmd.cdb[8] = (UInt8)(buffer->getLength() >> 8); + atapiCmd.cdb[9] = (UInt8)(buffer->getLength()); + atapiCmd.cdb[10] = agid << 6 | keyFormat; + + return atapiCommand(&atapiCmd, buffer); +} + +//--------------------------------------------------------------------------- +// REPORT KEY command. + +IOATACommand * +IOATAPIDVDDrive::atapiCommandReportKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt32 lba, + const UInt8 agid, + const DVDKeyFormat keyFormat) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandReportKey; + + if (keyFormat == kTitleKey) { + atapiCmd.cdb[2] = (UInt8)(lba >> 24); + atapiCmd.cdb[3] = (UInt8)(lba >> 16); + atapiCmd.cdb[4] = (UInt8)(lba >> 8); + atapiCmd.cdb[5] = (UInt8)(lba); + } + atapiCmd.cdb[7] = keyClass; + atapiCmd.cdb[8] = (UInt8)(buffer->getLength() >> 8); + atapiCmd.cdb[9] = (UInt8)(buffer->getLength()); + atapiCmd.cdb[10] = agid << 6 | keyFormat; + + return atapiCommand(&atapiCmd, buffer); +} + +//--------------------------------------------------------------------------- +// GET CONFIGURATION command. + +IOATACommand * +IOATAPIDVDDrive::atapiCommandGetConfiguration(IOMemoryDescriptor * buffer, + UInt8 rt, + UInt16 sfn = 0) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandGetConfiguration; + atapiCmd.cdb[1] = rt & 0x03; + atapiCmd.cdb[2] = (UInt8)(sfn >> 8); // starting feature number MSB + atapiCmd.cdb[3] = (UInt8)(sfn); // starting feature number LSB + atapiCmd.cdb[7] = (UInt8)(buffer->getLength() >> 8); + atapiCmd.cdb[8] = (UInt8)(buffer->getLength()); + + return atapiCommand(&atapiCmd, buffer); +} + +//--------------------------------------------------------------------------- +// READ DVD STRUCTURE command. + +IOATACommand * +IOATAPIDVDDrive::atapiCommandReadDVDStructure(IOMemoryDescriptor * buffer, + UInt8 format, + UInt32 address = 0, + UInt8 layer = 0, + UInt8 agid = 0) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandReadDVDStructure; + atapiCmd.cdb[2] = (UInt8)(address >> 24); + atapiCmd.cdb[3] = (UInt8)(address >> 16); + atapiCmd.cdb[4] = (UInt8)(address >> 8); + atapiCmd.cdb[5] = (UInt8)(address); + atapiCmd.cdb[6] = layer; + atapiCmd.cdb[7] = format; + atapiCmd.cdb[8] = (UInt8)(buffer->getLength() >> 8); + atapiCmd.cdb[9] = (UInt8)(buffer->getLength()); + atapiCmd.cdb[10] = (agid & 0x3) << 6; + + return atapiCommand(&atapiCmd, buffer); +} diff --git a/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDrive.cpp b/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDrive.cpp new file mode 100644 index 000000000..2e8b9cdef --- /dev/null +++ b/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDrive.cpp @@ -0,0 +1,529 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +// Define this to log debug messages. +// +// #define LOG_DVD_MESSAGES 1 + +#ifdef LOG_DVD_MESSAGES +#define DEBUG_LOG(fmt, args...) IOLog(fmt, ## args) +#else +#define DEBUG_LOG(fmt, args...) +#endif + +#define super IOATAPICDDrive +OSDefineMetaClassAndStructors( IOATAPIDVDDrive, IOATAPICDDrive ) + +//--------------------------------------------------------------------------- +// Feature header and descriptor definition. + +struct featureHdr { + UInt32 totalLen; + UInt8 reserved1[2]; + UInt16 currentProfile; +}; + +struct featureDescriptor { + UInt16 featureCode; + UInt8 versionPC; + UInt8 additionalLength; +}; + +#define kConfigBufferSize 2048 +#define kConfigFeatureHeaderBytes sizeof(struct featureHdr) +#define kConfigDataLengthBytes sizeof(UInt32) +#define kConfigMinDataLength (sizeof(struct featureHdr) - \ + kConfigDataLengthBytes + \ + sizeof(struct featureDescriptor)) + +//--------------------------------------------------------------------------- +// Classify an ATAPI CD-ROM drive as a CD or a DVD drive. + +IOReturn IOATAPIDVDDrive::classifyDrive(bool * isDVDDrive) +{ + IOATACommand * cmd = 0; + IOBufferMemoryDescriptor * copyrightDesc; + IOBufferMemoryDescriptor * senseDesc; + IOReturn ret = kIOReturnSuccess; + + do { + // Buffer descriptor to hold Copyright Information. + + copyrightDesc = IOBufferMemoryDescriptor::withCapacity(8, + kIODirectionIn); + + // Buffer descriptor to hold sense data. + + senseDesc = IOBufferMemoryDescriptor::withCapacity( + sizeof(ATASenseData), + kIODirectionIn); + + if ( (copyrightDesc == 0) || (senseDesc == 0) ) + { + ret = kIOReturnNoMemory; + break; + } + + bzero(senseDesc->getBytesNoCopy(), senseDesc->getCapacity()); + + // READ DVD STRUCTURE command - DVD Copyright Information + + cmd = atapiCommandReadDVDStructure(copyrightDesc, + kIODVDReadStructureCopyright); + if (cmd == 0) + { + ret = kIOReturnNoMemory; + break; + } + + // Execute the command, and get sense data. + + ret = syncExecute(cmd, + kATADefaultTimeout, + kATADefaultRetries, + senseDesc); + + // By default, consider it a DVD drive, unless the drive + // returns an error, and the sense data contains, + // + // KEY = 0x05 + // ASC = 0x20 + // ASCQ = 0x00 + + *isDVDDrive = true; + + if (ret != kIOReturnSuccess) + { + ATASenseData * senseData; + + senseData = (ATASenseData *) senseDesc->getBytesNoCopy(); + + if ((senseData->errorCode == kATAPISenseCurrentErr) || + (senseData->errorCode == kATAPISenseDeferredErr)) + { + if ((senseData->senseKey == kATAPISenseIllegalReq) && + (senseData->additionalSenseCode == 0x20) && + (senseData->additionalSenseQualifier == 0x0)) + { + *isDVDDrive = false; + } + } + + ret = kIOReturnSuccess; + } + } + while (false); + + if (senseDesc) senseDesc->release(); + if (copyrightDesc) copyrightDesc->release(); + if (cmd) cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Determine the media type (book type) in the DVD drive. + +IOReturn +IOATAPIDVDDrive::determineMediaType(UInt32 * mediaType) +{ + IOATACommand * cmd = 0; + IOBufferMemoryDescriptor * dataDesc; + IODVDStructurePhysical * data; + IOReturn ret = kIOReturnSuccess; + + *mediaType = kDVDMediaTypeUnknown; + + do { + // Buffer descriptor to hold Physical Format Information. + + dataDesc = IOBufferMemoryDescriptor::withCapacity(sizeof(*data), + kIODirectionIn); + + if ( dataDesc == 0 ) + { + ret = kIOReturnNoMemory; + break; + } + + data = (IODVDStructurePhysical *) dataDesc->getBytesNoCopy(); + bzero(data, sizeof(data->length)); + + // READ DVD STRUCTURE command - Physical Format Information + + cmd = atapiCommandReadDVDStructure(dataDesc, + kIODVDReadStructurePhysical); + if ( cmd == 0 ) + { + ret = kIOReturnNoMemory; + break; + } + + // Execute the command. + + if ( syncExecute(cmd) != kIOReturnSuccess ) + { + *mediaType = kCDMediaTypeROM; // Assume its a CD. + } + else if ( IODVDGetDataLength16(data->length) < + (sizeof(*data) - sizeof(data->length)) ) + { + ret = kIOReturnUnderrun; + } + else + { + DEBUG_LOG("%s: DVD Book Type: %x Part Version: %x\n", + getName(), data->bookType, data->partVersion); + + switch (data->bookType) + { + default: + case kIODVDBookTypeDVDROM: + case kIODVDBookTypeDVDR: + case kIODVDBookTypeDVDRW: + case kIODVDBookTypeDVDPlusRW: + *mediaType = kDVDMediaTypeROM; + break; + + case kIODVDBookTypeDVDRAM: + *mediaType = kDVDMediaTypeRAM; + break; + } + } + } + while (false); + + if (dataDesc) dataDesc->release(); + if (cmd) cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Perform active matching with an ATAPI device nub published by the +// ATA controller driver. + +bool +IOATAPIDVDDrive::matchATAPIDeviceType(UInt8 type, SInt32 * score) +{ + bool isDVDDrive; + bool match = false; + + do { + // If the device type reported by INQUIRY data is not a CD-ROM type, + // then give up immediately. + + if ( type != kIOATAPIDeviceTypeCDROM ) + break; + + // Select timing protocol before performing I/O. + + if ( selectTimingProtocol() == false ) + break; + + // Is this unit a DVD drive? + + if ( classifyDrive(&isDVDDrive) != kIOReturnSuccess ) + break; + + if ( isDVDDrive ) + { + // Indicate a strong affinity for the DVD drive by setting + // a higher probe score when a DVD drive is detected. + + DEBUG_LOG("%s::%s DVD drive detected\n", getName(), __FUNCTION__); + *score = 20; + match = true; + } + else + { + // Not a DVD drive. + DEBUG_LOG("%s::%s Not a DVD drive\n", getName(), __FUNCTION__); + } + } + while (false); + + return match; +} + +//--------------------------------------------------------------------------- +// GET CONFIGURATION command. + +IOReturn +IOATAPIDVDDrive::getConfiguration(UInt8 * buf, + UInt32 length, + UInt32 * actualLength, + bool current) +{ + IOMemoryDescriptor * bufDesc = 0; + IOATACommand * cmd = 0; + IOReturn ret = kIOReturnNoMemory; + ATAResults results; + + do { + bufDesc = IOMemoryDescriptor::withAddress(buf, length, kIODirectionIn); + if (bufDesc == 0) + break; + + cmd = atapiCommandGetConfiguration(bufDesc, 0x01); + if (cmd == 0) + break; + + ret = syncExecute(cmd); + + cmd->getResults(&results); + *actualLength = results.bytesTransferred; + } + while (0); + + if (cmd) cmd->release(); + if (bufDesc) bufDesc->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Report disk type. + +const char * +IOATAPIDVDDrive::getDeviceTypeName() +{ + return kIOBlockStorageDeviceTypeDVD; +} + +//--------------------------------------------------------------------------- +// Report the type of media in the DVD drive. + +UInt32 +IOATAPIDVDDrive::getMediaType() +{ + UInt32 mediaType; + + determineMediaType(&mediaType); + + return mediaType; +} + +//--------------------------------------------------------------------------- +// Initialize the IOATAPIDVDDrive object. + +bool +IOATAPIDVDDrive::init(OSDictionary * properties) +{ + return super::init(properties); +} + +//--------------------------------------------------------------------------- +// Instantiate an IOATAPIDVDDriveNub nub. + +IOService * +IOATAPIDVDDrive::instantiateNub(void) +{ + IOService * nub = new IOATAPIDVDDriveNub; + + /* Instantiate a generic DVD nub so a generic driver can match above us. */ + + return nub; +} + +//--------------------------------------------------------------------------- +// Report the media state. + +IOReturn +IOATAPIDVDDrive::reportMediaState(bool * mediaPresent, bool * changed) +{ + IOReturn result; + + // Let superclass check for media in a generic fashion. + + result = super::reportMediaState(mediaPresent, changed); + +#if 0 // For testing only + + if (result != kIOReturnSuccess) + { + return result; + } + + // For a new media, determine its type. + + if (*mediaPresent && *changed) + { + getMediaType(); + } +#endif + + return result; +} + +//--------------------------------------------------------------------------- +// Report random write support. + +IOReturn +IOATAPIDVDDrive::reportWriteProtection(bool * isWriteProtected) +{ + UInt32 len; + struct featureHdr * fh; + struct featureDescriptor * fdp; + IOReturn result; + UInt8 * configBuf; + UInt32 configBufSize; /* not used */ + + *isWriteProtected = true; + + /* Allocate memory for the configuration data. + Theoretically, this can be up to 65534 bytes. */ + + configBuf = (UInt8 *) IOMalloc(kConfigBufferSize); + if ( configBuf == 0 ) + return kIOReturnNoMemory; + + bzero((void *) configBuf, kConfigBufferSize); + + /* Get the *current* configuration information, relating to the media. */ + + do { + result = getConfiguration(configBuf, + kConfigBufferSize, + &configBufSize, + true); /* Get current (active) features */ + + if (result == kIOReturnUnderrun) + { + // getConfiguration() will report an underrun. + result = kIOReturnSuccess; + } + + if (result != kIOReturnSuccess) + { + DEBUG_LOG("%s::%s getConfiguration() error = %s\n", + getName(), __FUNCTION__, stringFromReturn(result)); + result = kIOReturnSuccess; + break; + } + + fh = (struct featureHdr *) configBuf; + len = OSSwapBigToHostInt32(fh->totalLen); + + if (len < kConfigMinDataLength) + { + result = kIOReturnUnderrun; + break; + } + + // total length, including the Data Length field. + // + len += kConfigDataLengthBytes; + len = min(len, kConfigBufferSize); + DEBUG_LOG("%s::%s config length = %ld\n", getName(), __FUNCTION__, len); + + // Points to the first Feature Descriptor after the Feature Header. + // + fdp = (struct featureDescriptor *) + &configBuf[kConfigFeatureHeaderBytes]; + + do { + if (OSSwapBigToHostInt16(fdp->featureCode) == + kIOATAPIFeatureRandomWrite) + { + *isWriteProtected = false; + break; + } + + fdp = (struct featureDescriptor *)((char *)fdp + + sizeof(struct featureDescriptor) + + fdp->additionalLength); + } + while ( ((UInt8 *)fdp + sizeof(*fdp)) <= &configBuf[len] ); + } + while (false); + + IOFree((void *) configBuf, kConfigBufferSize); + + return result; +} + +//--------------------------------------------------------------------------- +// SEND KEY command. + +IOReturn +IOATAPIDVDDrive::sendKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt8 agid, + const DVDKeyFormat keyFormat) +{ + IOATACommand * cmd = 0; + IOReturn ret = kIOReturnNoMemory; + + do { + assert(buffer); + + cmd = atapiCommandSendKey(buffer, keyClass, agid, keyFormat); + if (cmd == 0) + break; + + ret = syncExecute(cmd); + } + while (0); + + if (cmd) + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// REPORT KEY command. + +IOReturn +IOATAPIDVDDrive::reportKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt32 lba, + const UInt8 agid, + const DVDKeyFormat keyFormat) +{ + IOATACommand * cmd = 0; + IOReturn ret = kIOReturnNoMemory; + + do { + assert(buffer); + + cmd = atapiCommandReportKey(buffer, keyClass, lba, agid, keyFormat); + if (cmd == 0) + break; + + ret = syncExecute(cmd); + } + while (0); + + if (cmd) + cmd->release(); + + return ret; +} diff --git a/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDriveNub.cpp b/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDriveNub.cpp new file mode 100644 index 000000000..7ad606b1c --- /dev/null +++ b/iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDriveNub.cpp @@ -0,0 +1,397 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +class IOMemoryDescriptor; + +#define super IODVDBlockStorageDevice +OSDefineMetaClassAndStructors(IOATAPIDVDDriveNub, IODVDBlockStorageDevice) + +//--------------------------------------------------------------------------- +// attach to provider. + +bool +IOATAPIDVDDriveNub::attach(IOService * provider) +{ + if (!super::attach(provider)) + return false; + + _provider = OSDynamicCast(IOATAPIDVDDrive, provider); + if (_provider == 0) { + IOLog("IOATAPIDVDDriveNub: attach; wrong provider type!\n"); + return false; + } + return true; +} + +//--------------------------------------------------------------------------- +// detach from provider. + +void +IOATAPIDVDDriveNub::detach(IOService * provider) +{ + if (_provider == provider) + _provider = 0; + + super::detach(provider); +} + +//--------------------------------------------------------------------------- +// audioPlay + +IOReturn +IOATAPIDVDDriveNub::audioPlay(CDMSF timeStart, CDMSF timeStop) +{ + return _provider->audioPlay(timeStart, timeStop); +} + +//--------------------------------------------------------------------------- +// audioPause + +IOReturn +IOATAPIDVDDriveNub::audioPause(bool pause) +{ + return _provider->audioPause(pause); +} + +//--------------------------------------------------------------------------- +// audioScan + +IOReturn +IOATAPIDVDDriveNub::audioScan(CDMSF timeStart, bool reverse) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// audioStop + +IOReturn IOATAPIDVDDriveNub::audioStop() +{ + return _provider->audioStop(); +} + +//--------------------------------------------------------------------------- +// doAsyncReadCD + +IOReturn IOATAPIDVDDriveNub::doAsyncReadCD(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + return _provider->doAsyncReadCD(buffer, + block, + nblks, + sectorArea, + sectorType, + completion); +} + +//--------------------------------------------------------------------------- +// doAsyncReadWrite + +IOReturn +IOATAPIDVDDriveNub::doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion) +{ + return _provider->doAsyncReadWrite(buffer, block, nblks, completion); +} + +//--------------------------------------------------------------------------- +// doSyncReadWrite + +IOReturn +IOATAPIDVDDriveNub::doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks) +{ + return _provider->doSyncReadWrite(buffer, block, nblks); +} + +//--------------------------------------------------------------------------- +// doEjectMedia + +IOReturn +IOATAPIDVDDriveNub::doEjectMedia() +{ + return _provider->doEjectMedia(); +} + +//--------------------------------------------------------------------------- +// doFormatMedia + +IOReturn +IOATAPIDVDDriveNub::doFormatMedia(UInt64 byteCapacity) +{ + return _provider->doFormatMedia(byteCapacity); +} + +//--------------------------------------------------------------------------- +// doGetFormatCapacities + +UInt32 +IOATAPIDVDDriveNub::doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + return _provider->doGetFormatCapacities(capacities, capacitiesMaxCount); +} + +//--------------------------------------------------------------------------- +// doLockUnlockMedia + +IOReturn +IOATAPIDVDDriveNub::doLockUnlockMedia(bool doLock) +{ + return _provider->doLockUnlockMedia(doLock); +} + +//--------------------------------------------------------------------------- +// doSynchronizeCache + +IOReturn +IOATAPIDVDDriveNub::doSynchronizeCache() +{ + return _provider->doSynchronizeCache(); +} + +//--------------------------------------------------------------------------- +// getAudioStatus + +IOReturn +IOATAPIDVDDriveNub::getAudioStatus(CDAudioStatus * status) +{ + return _provider->getAudioStatus(status); +} + +//--------------------------------------------------------------------------- +// getAudioVolume + +IOReturn +IOATAPIDVDDriveNub::getAudioVolume(UInt8 * leftVolume, UInt8 * rightVolume) +{ + return _provider->getAudioVolume(leftVolume, rightVolume); +} + +//--------------------------------------------------------------------------- +// getMediaType + +UInt32 +IOATAPIDVDDriveNub::getMediaType() +{ + return _provider->getMediaType(); +} + +//--------------------------------------------------------------------------- +// getVendorString + +char * +IOATAPIDVDDriveNub::getVendorString() +{ + return _provider->getVendorString(); +} + +//--------------------------------------------------------------------------- +// getProductString + +char * +IOATAPIDVDDriveNub::getProductString() +{ + return _provider->getProductString(); +} + +//--------------------------------------------------------------------------- +// getRevisionString + +char * +IOATAPIDVDDriveNub::getRevisionString() +{ + return _provider->getRevisionString(); +} + +//--------------------------------------------------------------------------- +// getAdditionalDeviceInfoString + +char * +IOATAPIDVDDriveNub::getAdditionalDeviceInfoString() +{ + return _provider->getAdditionalDeviceInfoString(); +} + +//--------------------------------------------------------------------------- +// readISRC + +IOReturn +IOATAPIDVDDriveNub::readISRC(UInt8 track, CDISRC isrc) +{ + return _provider->readISRC(track, isrc); +} + +//--------------------------------------------------------------------------- +// readMCN + +IOReturn +IOATAPIDVDDriveNub::readMCN(CDMCN mcn) +{ + return _provider->readMCN(mcn); +} + +//--------------------------------------------------------------------------- +// readTOC + +IOReturn +IOATAPIDVDDriveNub::readTOC(IOMemoryDescriptor * buffer) +{ + return _provider->readTOC(buffer); +} + +//--------------------------------------------------------------------------- +// reportBlockSize + +IOReturn +IOATAPIDVDDriveNub::reportBlockSize(UInt64 * blockSize) +{ + return _provider->reportBlockSize(blockSize); +} + +//--------------------------------------------------------------------------- +// reportEjectability + +IOReturn +IOATAPIDVDDriveNub::reportEjectability(bool * isEjectable) +{ + return _provider->reportEjectability(isEjectable); +} + +//--------------------------------------------------------------------------- +// reportKey + +IOReturn +IOATAPIDVDDriveNub::reportKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt32 lba, + const UInt8 agid, + const DVDKeyFormat keyFormat) +{ + return _provider->reportKey(buffer, keyClass, lba, agid, keyFormat); +} + +//--------------------------------------------------------------------------- +// sendKey + +IOReturn +IOATAPIDVDDriveNub::sendKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt8 agid, + const DVDKeyFormat keyFormat) +{ + return _provider->sendKey(buffer, keyClass, agid, keyFormat); +} + +//--------------------------------------------------------------------------- +// reportLockability + +IOReturn +IOATAPIDVDDriveNub::reportLockability(bool * isLockable) +{ + return _provider->reportLockability(isLockable); +} + +//--------------------------------------------------------------------------- +// reportPollRequirements + +IOReturn +IOATAPIDVDDriveNub::reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive) +{ + return _provider->reportPollRequirements(pollIsRequired, pollIsExpensive); +} + +//--------------------------------------------------------------------------- +// reportMaxReadTransfer + +IOReturn +IOATAPIDVDDriveNub::reportMaxReadTransfer(UInt64 blockSize, UInt64 * max) +{ + return _provider->reportMaxReadTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// reportMaxValidBlock + +IOReturn +IOATAPIDVDDriveNub::reportMaxValidBlock(UInt64 * maxBlock) +{ + return _provider->reportMaxValidBlock(maxBlock); +} + +//--------------------------------------------------------------------------- +// reportMaxWriteTransfer + +IOReturn +IOATAPIDVDDriveNub::reportMaxWriteTransfer(UInt64 blockSize, UInt64 * max) +{ + return _provider->reportMaxWriteTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// reportMediaState + +IOReturn +IOATAPIDVDDriveNub::reportMediaState(bool * mediaPresent, bool * changed) +{ + return _provider->reportMediaState(mediaPresent, changed); +} + +//--------------------------------------------------------------------------- +// reportRemovability + +IOReturn +IOATAPIDVDDriveNub::reportRemovability(bool * isRemovable) +{ + return _provider->reportRemovability(isRemovable); +} + +//--------------------------------------------------------------------------- +// reportWriteProtection + +IOReturn +IOATAPIDVDDriveNub::reportWriteProtection(bool * isWriteProtected) +{ + return _provider->reportWriteProtection(isWriteProtected); +} + +//--------------------------------------------------------------------------- +// setAudioVolume + +IOReturn +IOATAPIDVDDriveNub::setAudioVolume(UInt8 leftVolume, UInt8 rightVolume) +{ + return _provider->setAudioVolume(leftVolume, rightVolume); +} diff --git a/iokit/Families/IOATAPIHDDrive/IOATAPIHDCommand.cpp b/iokit/Families/IOATAPIHDDrive/IOATAPIHDCommand.cpp new file mode 100644 index 000000000..959d18250 --- /dev/null +++ b/iokit/Families/IOATAPIHDDrive/IOATAPIHDCommand.cpp @@ -0,0 +1,325 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPIHDCommand.cpp - Performs ATAPI command processing. + * + * HISTORY + * Sep 2, 1999 jliu - Ported from AppleATAPIDrive. + */ + +#include +#include + +#define super IOATAHDDrive + +// Enable this define to generate debugging messages. +// #define DEBUG_LOG 1 + +//--------------------------------------------------------------------------- +// Returns the Command protocol to use (e.g. ataProtocolPIO, ataProtocolDMA). + +bool +IOATAPIHDDrive::selectCommandProtocol(bool isDMA) +{ + super::selectCommandProtocol(isDMA); + + if (isDMA) + _atapiProtocol = kATAProtocolATAPIDMA; + else + _atapiProtocol = kATAProtocolATAPIPIO; + + return true; +} + +//--------------------------------------------------------------------------- +// Setup a ATATaskFile for an ATAPI packet command from the parameters given. + +void +IOATAPIHDDrive::setupPacketTaskFile(ATATaskfile * taskfile, + ATAProtocol protocol, + UInt16 byteCount) +{ + bzero( taskfile, sizeof(ATATaskfile) ); + + taskfile->protocol = protocol; + + taskfile->regmask = ATARegtoMask(kATARegATAPIDeviceSelect) + | ATARegtoMask(kATARegATAPICommand) + | ATARegtoMask(kATARegATAPIByteCountLow) + | ATARegtoMask(kATARegATAPIByteCountHigh) + | ATARegtoMask(kATARegATAPIFeatures); + + taskfile->resultmask = ATARegtoMask(kATARegATAPIError); + + taskfile->ataRegs[kATARegATAPIDeviceSelect] = kATAModeLBA | (_unit << 4); + taskfile->ataRegs[kATARegATAPICommand] = kATACommandATAPIPacket; + taskfile->ataRegs[kATARegATAPIByteCountLow] = byteCount & 0xff; + taskfile->ataRegs[kATARegATAPIByteCountHigh] = (byteCount >> 8) & 0xff; + taskfile->ataRegs[kATARegATAPIFeatures] = (protocol == + kATAProtocolATAPIPIO) ? + 0 : kIOATAPIFeaturesDMA; +} + +//--------------------------------------------------------------------------- +// Create a generic ATAPI command object. + +IOATACommand * +IOATAPIHDDrive::atapiCommand(ATACDBInfo * packetCommand, + IOMemoryDescriptor * transferBuffer = 0) +{ + ATATaskfile taskfile; + bool isWrite; + UInt32 transferLength; + IOATACommand * cmd = allocateCommand(); + + if (!cmd) return 0; // error, command allocation failed. + + // Create ATA packet command. + // + setupPacketTaskFile(&taskfile, _atapiProtocol, kIOATAPIMaxTransfer); + + // Get a pointer to the client data buffer, and record parameters + // which shall be later used by the completion routine. + // + IOATAClientData * clientData = ATA_CLIENT_DATA(cmd); + assert(clientData); + + clientData->buffer = transferBuffer; + + cmd->setTaskfile(&taskfile); + cmd->setCDB(packetCommand); + + if (transferBuffer) { + isWrite = (transferBuffer->getDirection() == kIODirectionOut); + transferLength = transferBuffer->getLength(); + } + else { + isWrite = false; + transferLength = 0; + } + cmd->setPointers(transferBuffer, transferLength, isWrite); + + return cmd; +} + +//--------------------------------------------------------------------------- +// Allocates and return an IOATACommand to perform a read/write operation. + +IOATACommand * +IOATAPIHDDrive::atapiCommandReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + +#ifdef DEBUG_LOG + IOLog("%s: atapiCommandReadWrite %08x (%d) %s %d %d\n", + getName(), + buffer, + buffer->getLength(), + (buffer->getDirection() == kIODirectionOut) ? "WR" : + "RD", + block, + nblks); +#endif + + // Create the ATAPI packet (bytes 1, 10, 11 are reserved). + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = (buffer->getDirection() == kIODirectionOut) ? + kIOATAPICommandWrite : kIOATAPICommandRead; + atapiCmd.cdb[2] = (UInt8)(block >> 24); + atapiCmd.cdb[3] = (UInt8)(block >> 16); + atapiCmd.cdb[4] = (UInt8)(block >> 8); + atapiCmd.cdb[5] = (UInt8)(block); + atapiCmd.cdb[6] = (UInt8)(nblks >> 24); + atapiCmd.cdb[7] = (UInt8)(nblks >> 16); + atapiCmd.cdb[8] = (UInt8)(nblks >> 8); + atapiCmd.cdb[9] = (UInt8)(nblks); + + return atapiCommand(&atapiCmd, buffer); +} + +//--------------------------------------------------------------------------- +// ATAPI Start/Stop Unit command (1B). + +IOATACommand * +IOATAPIHDDrive::atapiCommandStartStopUnit(bool doStart, + bool doLoadEject, + bool immediate) +{ + ATACDBInfo atapiCmd; + +#ifdef DEBUG_LOG + IOLog("%s: atapiCommandStartStopUnit: %s\n", getName(), + doStart ? "start" : "stop"); +#endif + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandStartStopUnit; + atapiCmd.cdb[1] = immediate ? 0x01 : 0x00; + atapiCmd.cdb[4] = (doStart ? 0x01 : 0) | + (doLoadEject ? 0x02 : 0); + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// ATAPI Format Unit command (04). + +IOATACommand * +IOATAPIHDDrive::atapiCommandFormatUnit(UInt16 interleave, + UInt8 flagBits, + UInt8 vendorBits, + IOMemoryDescriptor * formatData) +{ + ATACDBInfo atapiCmd; + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandFormatUnit; + atapiCmd.cdb[1] = flagBits; + atapiCmd.cdb[3] = (UInt8)(interleave >> 8); + atapiCmd.cdb[4] = (UInt8)(interleave); + atapiCmd.cdb[5] = vendorBits; + + if (formatData) + atapiCmd.cdb[1] |= 0x10; + + return atapiCommand(&atapiCmd, formatData); +} + +//--------------------------------------------------------------------------- +// ATAPI Synchronize Cache command (35). + +IOATACommand * +IOATAPIHDDrive::atapiCommandSynchronizeCache() +{ + ATACDBInfo atapiCmd; + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandSynchronizeCache; + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// ATAPI Prevent/Allow medium removal command (1E). + +IOATACommand * +IOATAPIHDDrive::atapiCommandPreventAllowRemoval(bool doLock) +{ + ATACDBInfo atapiCmd; + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandPreventAllow; + atapiCmd.cdb[4] = doLock ? 0x01 : 0; + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// ATAPI Test Unit Ready command (00). + +IOATACommand * +IOATAPIHDDrive::atapiCommandTestUnitReady() +{ + ATACDBInfo atapiCmd; + +#ifdef DEBUG_LOG + IOLog("%s: atapiCommandTestUnitReady\n", getName()); +#endif + + // Create the ATAPI packet. + // + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandTestUnitReady; + + return atapiCommand(&atapiCmd); +} + +//--------------------------------------------------------------------------- +// atapiCommandModeSense + +IOATACommand * +IOATAPIHDDrive::atapiCommandModeSense(IOMemoryDescriptor * buffer, + UInt8 pageCode, + UInt8 pageControl) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandModeSense; + atapiCmd.cdb[2] = (pageCode & 0x3f) | ((pageControl & 0x3) << 6); + atapiCmd.cdb[7] = (buffer->getLength() >> 8) & 0xff; + atapiCmd.cdb[8] = buffer->getLength() & 0xff; + + return atapiCommand(&atapiCmd, buffer); +} + +//--------------------------------------------------------------------------- +// atapiCommandModeSelect + +IOATACommand * +IOATAPIHDDrive::atapiCommandModeSelect(IOMemoryDescriptor * buffer) +{ + ATACDBInfo atapiCmd; + + assert(buffer); + + bzero(&atapiCmd, sizeof(atapiCmd)); + + atapiCmd.cdbLength = 12; + atapiCmd.cdb[0] = kIOATAPICommandModeSelect; + atapiCmd.cdb[1] = 0x10; + atapiCmd.cdb[7] = (buffer->getLength() >> 8) & 0xff; + atapiCmd.cdb[8] = buffer->getLength() & 0xff; + + return atapiCommand(&atapiCmd, buffer); +} diff --git a/iokit/Families/IOATAPIHDDrive/IOATAPIHDDrive.cpp b/iokit/Families/IOATAPIHDDrive/IOATAPIHDDrive.cpp new file mode 100644 index 000000000..7016edd1d --- /dev/null +++ b/iokit/Families/IOATAPIHDDrive/IOATAPIHDDrive.cpp @@ -0,0 +1,556 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPIHDDrive.h - Generic ATAPI Direct-Access driver. + * + * HISTORY + * Sep 2, 1999 jliu - Ported from AppleATAPIDrive. + */ + +#include +#include +#include + +#define super IOATAHDDrive +OSDefineMetaClassAndStructors( IOATAPIHDDrive, IOATAHDDrive ) + +//--------------------------------------------------------------------------- +// Override the init() method in IOATAHDDrive. + +bool +IOATAPIHDDrive::init(OSDictionary * properties) +{ + _mediaPresent = false; + _isRemovable = false; + + return super::init(properties); +} + +//--------------------------------------------------------------------------- +// Override probe() method inherited from IOATAHDDrive. We need to +// perform additional matching on ATAPI device type based on the +// Inquiry data revealed by an ATA(PI) device nub. + +IOService * +IOATAPIHDDrive::probe(IOService * provider, SInt32 * score) +{ + UInt8 deviceType; + IOService * ret = 0; + bool wasOpened = false; + + // Let superclass have a go at probe first. + // + if (!super::probe(provider, score)) + return 0; + + // Our provider must be a IOATADevice nub, most likely created + // by an IOATAController instance. + // + _ataDevice = OSDynamicCast(IOATADevice, provider); + if (_ataDevice == 0) + return 0; // IOATADevice nub not found. + + do { + // Since we may issue command to the IOATADevice to perform + // device matching, we express interest in using the device by + // performing an open. + // + if (_ataDevice->open(this) == false) + break; + + wasOpened = true; + + // Perform ATAPI type matching, CD-ROM, Direct-Access, Tape, etc. + // + if (!_ataDevice->getInquiryData(1, (ATAPIInquiry *) &deviceType) || + !matchATAPIDeviceType(deviceType & 0x1f, score)) + break; + + ret = this; + } + while (false); + + if (wasOpened) + _ataDevice->close(this); + + _ataDevice = 0; + + return ret; +} + +//--------------------------------------------------------------------------- +// Report as an ATAPI device. + +ATADeviceType +IOATAPIHDDrive::reportATADeviceType() const +{ + return kATADeviceATAPI; +} + +//--------------------------------------------------------------------------- +// Looks for an ATAPI device which is a direct-access device. + +bool +IOATAPIHDDrive::matchATAPIDeviceType(UInt8 type, SInt32 * score) +{ + if (type == kIOATAPIDeviceTypeDirectAccess) + return true; + + return false; +} + +//--------------------------------------------------------------------------- +// Gather information about the ATAPI device nub. + +bool +IOATAPIHDDrive::inspectDevice(IOATADevice * device) +{ + OSString * string; + + // Fetch ATAPI device information from the nub. + // + string = OSDynamicCast(OSString, + device->getProperty(kATAPropertyVendorName)); + if (string) { + strncpy(_vendor, string->getCStringNoCopy(), 8); + _vendor[8] = '\0'; + } + + string = OSDynamicCast(OSString, + device->getProperty(kATAPropertyProductName)); + if (string) { + strncpy(_product, string->getCStringNoCopy(), 16); + _product[16] = '\0'; + } + + string = OSDynamicCast(OSString, + device->getProperty(kATAPropertyProductRevision)); + if (string) { + strncpy(_revision, string->getCStringNoCopy(), 4); + _revision[4] = '\0'; + } + + // Device wants to be power-managed. + // + _supportedFeatures |= kIOATAFeaturePowerManagement; + + return true; +} + +//--------------------------------------------------------------------------- +// Async read/write requests. + +IOReturn +IOATAPIHDDrive::doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion) +{ + IOReturn ret; + IOATACommand * cmd = atapiCommandReadWrite(buffer, block, nblks); + + if (!cmd) + return kIOReturnNoMemory; + + ret = asyncExecute(cmd, completion); + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Sync read/write requests. + +IOReturn +IOATAPIHDDrive::doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks) +{ + IOReturn ret; + IOATACommand * cmd = atapiCommandReadWrite(buffer, block, nblks); + + if (!cmd) + return kIOReturnNoMemory; + + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Eject the media in the removable drive. + +IOReturn +IOATAPIHDDrive::doEjectMedia() +{ + IOReturn ret; + IOATACommand * cmd = atapiCommandStartStopUnit(false, /* start unit */ + true, /* Load/Eject */ + false); /* Immediate */ + + if (!cmd) + return kIOReturnNoMemory; + + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Format the media in the drive. + +IOReturn +IOATAPIHDDrive::doFormatMedia(UInt64 byteCapacity, + IOMemoryDescriptor * formatData = 0) +{ + IOReturn ret; + IOATACommand * cmd = atapiCommandFormatUnit(0, 0, 0, formatData); + + if (!cmd) + return kIOReturnNoMemory; + + ret = syncExecute(cmd, 15 * 60 * 1000); // 15 min timeout + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Lock/unlock the media in the removable drive. + +IOReturn +IOATAPIHDDrive::doLockUnlockMedia(bool doLock) +{ + IOReturn ret; + IOATACommand * cmd = atapiCommandPreventAllowRemoval(doLock); + + if (!cmd) + return kIOReturnNoMemory; + + ret = syncExecute(cmd); + + cmd->release(); + + // Cache the state on the media lock, and restore it when + // the driver wakes up from sleep. + + _isLocked = doLock; + + return ret; +} + +//--------------------------------------------------------------------------- +// Sync the write cache. + +IOReturn +IOATAPIHDDrive::doSynchronizeCache() +{ + IOReturn ret; + IOATACommand * cmd = atapiCommandSynchronizeCache(); + + if (!cmd) + return kIOReturnNoMemory; + + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Start up the drive. + +IOReturn +IOATAPIHDDrive::doStart() +{ + return doStartStop(true); +} + +//--------------------------------------------------------------------------- +// Stop the drive. + +IOReturn +IOATAPIHDDrive::doStop() +{ + return doStartStop(false); +} + +//--------------------------------------------------------------------------- +// Issue a START/STOP Unit command. + +IOReturn +IOATAPIHDDrive::doStartStop(bool doStart) +{ + IOReturn ret; + IOATACommand * cmd; + + cmd = atapiCommandStartStopUnit(doStart, /* start unit */ + false, /* Load/Eject */ + false); /* Immediate operation */ + + if (!cmd) return kIOReturnNoMemory; + + ret = syncExecute(cmd); + + cmd->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Return device identification strings + +char * IOATAPIHDDrive::getVendorString() +{ + return _vendor; +} + +char * IOATAPIHDDrive::getProductString() +{ + return _product; +} + +char * IOATAPIHDDrive::getRevisionString() +{ + return _revision; +} + +char * IOATAPIHDDrive::getAdditionalDeviceInfoString() +{ + return ("[ATAPI]"); +} + +//--------------------------------------------------------------------------- +// Report whether the media in the drive is ejectable. + +IOReturn +IOATAPIHDDrive::reportEjectability(bool * isEjectable) +{ + *isEjectable = true; /* default: if it's removable, it's ejectable */ + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report whether the drive can prevent user-initiated ejects by locking +// the media in the drive. + +IOReturn +IOATAPIHDDrive::reportLockability(bool * isLockable) +{ + *isLockable = true; /* default: if it's removable, it's lockable */ + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report our polling requirments. + +IOReturn +IOATAPIHDDrive::reportPollRequirements(bool * pollRequired, + bool * pollIsExpensive) +{ + *pollIsExpensive = false; + *pollRequired = _isRemovable; + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report the current state of the media. + +IOReturn +IOATAPIHDDrive::reportMediaState(bool * mediaPresent, + bool * changed) +{ + IOATACommand * cmd = 0; + IOMemoryDescriptor * senseData = 0; + UInt8 senseBuf[18]; + ATAResults results; + IOReturn ret; + + assert(mediaPresent && changed); + + do { + ret = kIOReturnNoMemory; + + bzero((void *) senseBuf, sizeof(senseBuf)); + senseData = IOMemoryDescriptor::withAddress(senseBuf, + sizeof(senseBuf), + kIODirectionIn); + if (!senseData) + break; + + cmd = atapiCommandTestUnitReady(); + if (!cmd) + break; + + // Execute the Test Unit Ready command with no retries. + // + syncExecute(cmd, kATADefaultTimeout, kATAZeroRetry, senseData); + + ret = kIOReturnSuccess; + + if (cmd->getResults(&results) == kIOReturnSuccess) + { + *mediaPresent = true; + *changed = (*mediaPresent != _mediaPresent); + _mediaPresent = true; + } + else + { + UInt8 errorCode = senseBuf[0]; + UInt8 senseKey = senseBuf[2]; + +#ifdef DEBUG_LOG + UInt8 senseCode = senseBuf[12]; + UInt8 senseQualifier = senseBuf[13]; + + IOLog("-- IOATAPIHDDrive::reportMediaState --\n"); + IOLog("Error code: %02x\n", errorCode); + IOLog("Sense Key : %02x\n", senseKey); + IOLog("ASC : %02x\n", senseCode); + IOLog("ASCQ : %02x\n", senseQualifier); +#endif + + *mediaPresent = false; + *changed = (*mediaPresent != _mediaPresent); + _mediaPresent = false; + + // The error code field for ATAPI request sense should always + // be 0x70 or 0x71. Otherwise ignore the sense data. + // + if ((errorCode == 0x70) || (errorCode == 0x71)) + { + switch (senseKey) { + case 5: /* Invalid ATAPI command */ + ret = kIOReturnIOError; + break; + + case 2: /* Not ready */ + break; + + default: + break; + } + } + } + } + while (false); + + if (cmd) + cmd->release(); + + if (senseData) + senseData->release(); + +#if 0 + IOLog("%s: media present %s, changed %s\n", getName(), + *mediaPresent ? "Y" : "N", + *changed ? "Y" : "N" + ); +#endif + + return ret; +} + +//--------------------------------------------------------------------------- +// Report media removability. + +IOReturn +IOATAPIHDDrive::reportRemovability(bool * isRemovable) +{ + UInt8 inqBuf[2]; + + *isRemovable = false; + + if (_ataDevice->getInquiryData(sizeof(inqBuf), (ATAPIInquiry *) inqBuf)) + { + if (inqBuf[1] & 0x80) + *isRemovable = _isRemovable = true; + else + *isRemovable = _isRemovable = false; + } + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Report whether media is write-protected. + +IOReturn +IOATAPIHDDrive::reportWriteProtection(bool * isWriteProtected) +{ + *isWriteProtected = false; // defaults to read-write + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Instantiate an ATAPI specific subclass of IOBlockStorageDevice. + +IOService * +IOATAPIHDDrive::instantiateNub() +{ + IOService * nub = new IOATAPIHDDriveNub; + return nub; +} + +//--------------------------------------------------------------------------- +// Override the handleActiveStateTransition() method in IOATAHDDrive and +// perform ATAPI specific handling. + +void +IOATAPIHDDrive::handleActiveStateTransition( UInt32 stage, IOReturn status ) +{ + // Restore the lock on the media after the ATAPI device wakes up from + // sleep. Assume that the drive will always power up in the unlocked state. + // Technically, some drives may have a jumper to set the default state + // at power up. + + if ( ( stage == kIOATAActiveStage0 ) && _isLocked ) + { + IOStorageCompletion completion; + IOReturn ret; + IOATACommand * cmd = atapiCommandPreventAllowRemoval( true ); + + completion.target = this; + completion.action = sHandleActiveStateTransition; + completion.parameter = (void *) kIOATAActiveStage1; + + if ( cmd ) + { + cmd->setQueueInfo( kATAQTypeBypassQ ); + ret = asyncExecute( cmd, completion ); + cmd->release(); + } + } + else + { + super::handleActiveStateTransition( stage, status ); + } +} diff --git a/iokit/Families/IOATAPIHDDrive/IOATAPIHDDriveNub.cpp b/iokit/Families/IOATAPIHDDrive/IOATAPIHDDriveNub.cpp new file mode 100644 index 000000000..668d717e9 --- /dev/null +++ b/iokit/Families/IOATAPIHDDrive/IOATAPIHDDriveNub.cpp @@ -0,0 +1,247 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPIHDDriveNub.cpp + * + * This subclass implements a relay to a protocol and device-specific + * provider. + * + * HISTORY + * 2-Sep-1999 Joe Liu (jliu) created. + */ + +#include +#include +#include + +#define super IOBlockStorageDevice +OSDefineMetaClassAndStructors( IOATAPIHDDriveNub, IOBlockStorageDevice ) + +//--------------------------------------------------------------------------- +// attach to provider. + +bool IOATAPIHDDriveNub::attach(IOService * provider) +{ + if (!super::attach(provider)) + return false; + + _provider = OSDynamicCast(IOATAPIHDDrive, provider); + if (_provider == 0) { + IOLog("IOATAPIHDDriveNub: attach; wrong provider type!\n"); + return false; + } + + return true; +} + +//--------------------------------------------------------------------------- +// detach from provider. + +void IOATAPIHDDriveNub::detach(IOService * provider) +{ + if (_provider == provider) + _provider = 0; + + super::detach(provider); +} + +//--------------------------------------------------------------------------- +// doAsyncReadWrite + +IOReturn IOATAPIHDDriveNub::doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion) +{ + return _provider->doAsyncReadWrite(buffer, block, nblks, completion); +} + +//--------------------------------------------------------------------------- +// doSyncReadWrite + +IOReturn IOATAPIHDDriveNub::doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks) +{ + return _provider->doSyncReadWrite(buffer, block, nblks); +} + + +//--------------------------------------------------------------------------- +// doEjectMedia + +IOReturn IOATAPIHDDriveNub::doEjectMedia() +{ + return _provider->doEjectMedia(); +} + +//--------------------------------------------------------------------------- +// doFormatMedia + +IOReturn IOATAPIHDDriveNub::doFormatMedia(UInt64 byteCapacity) +{ + return _provider->doFormatMedia(byteCapacity); +} + +//--------------------------------------------------------------------------- +// doGetFormatCapacities + +UInt32 +IOATAPIHDDriveNub::doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + return _provider->doGetFormatCapacities(capacities, capacitiesMaxCount); +} + +//--------------------------------------------------------------------------- +// doLockUnlockMedia + +IOReturn IOATAPIHDDriveNub::doLockUnlockMedia(bool doLock) +{ + return _provider->doLockUnlockMedia(doLock); +} + +//--------------------------------------------------------------------------- +// doSynchronizeCache + +IOReturn IOATAPIHDDriveNub::doSynchronizeCache() +{ + return _provider->doSynchronizeCache(); +} + +//--------------------------------------------------------------------------- +// getVendorString + +char * IOATAPIHDDriveNub::getVendorString() +{ + return _provider->getVendorString(); +} + +//--------------------------------------------------------------------------- +// getProductString + +char * IOATAPIHDDriveNub::getProductString() +{ + return _provider->getProductString(); +} + +//--------------------------------------------------------------------------- +// getRevisionString + +char * IOATAPIHDDriveNub::getRevisionString() +{ + return _provider->getRevisionString(); +} + +//--------------------------------------------------------------------------- +// getAdditionalDeviceInfoString + +char * IOATAPIHDDriveNub::getAdditionalDeviceInfoString() +{ + return _provider->getAdditionalDeviceInfoString(); +} + +//--------------------------------------------------------------------------- +// reportBlockSize + +IOReturn IOATAPIHDDriveNub::reportBlockSize(UInt64 * blockSize) +{ + return _provider->reportBlockSize(blockSize); +} + +//--------------------------------------------------------------------------- +// reportEjectability + +IOReturn IOATAPIHDDriveNub::reportEjectability(bool * isEjectable) +{ + return _provider->reportEjectability(isEjectable); +} + +//--------------------------------------------------------------------------- +// reportLockability + +IOReturn IOATAPIHDDriveNub::reportLockability(bool * isLockable) +{ + return _provider->reportLockability(isLockable); +} + +//--------------------------------------------------------------------------- +// reportPollRequirements + +IOReturn IOATAPIHDDriveNub::reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive) +{ + return _provider->reportPollRequirements(pollIsRequired, pollIsExpensive); +} + +//--------------------------------------------------------------------------- +// reportMaxReadTransfer + +IOReturn IOATAPIHDDriveNub::reportMaxReadTransfer(UInt64 blockSize, + UInt64 * max) +{ + return _provider->reportMaxReadTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// reportMaxValidBlock + +IOReturn IOATAPIHDDriveNub::reportMaxValidBlock(UInt64 * maxBlock) +{ + return _provider->reportMaxValidBlock(maxBlock); +} + +//--------------------------------------------------------------------------- +// reportMaxWriteTransfer + +IOReturn IOATAPIHDDriveNub::reportMaxWriteTransfer(UInt64 blockSize, + UInt64 * max) +{ + return _provider->reportMaxWriteTransfer(blockSize, max); +} + +//--------------------------------------------------------------------------- +// reportMediaState + +IOReturn IOATAPIHDDriveNub::reportMediaState(bool * mediaPresent, + bool * changed) +{ + return _provider->reportMediaState(mediaPresent, changed); +} + +//--------------------------------------------------------------------------- +// reportRemovability + +IOReturn IOATAPIHDDriveNub::reportRemovability(bool * isRemovable) +{ + return _provider->reportRemovability(isRemovable); +} + +//--------------------------------------------------------------------------- +// reportWriteProtection + +IOReturn IOATAPIHDDriveNub::reportWriteProtection(bool * isWriteProtected) +{ + return _provider->reportWriteProtection(isWriteProtected); +} diff --git a/iokit/Families/IOATAStandard/ATAQueueHelpers.cpp b/iokit/Families/IOATAStandard/ATAQueueHelpers.cpp new file mode 100644 index 000000000..1f15dd6b9 --- /dev/null +++ b/iokit/Families/IOATAStandard/ATAQueueHelpers.cpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * queueHelpers.cpp + * + */ +#include + +void IOATAStandardDevice::addCommand( queue_head_t *list, IOATAStandardCommand *ataCmd ) +{ + ataCmd->list = list; + + queue_enter( list, ataCmd, IOATAStandardCommand *, nextCommand ); +} + +void IOATAStandardDevice::deleteCommand( queue_head_t *list, IOATAStandardCommand *ataCmd, IOReturn rc = kIOReturnSuccess ) +{ + ataCmd->list = 0; + + if ( rc != kIOReturnSuccess ) + { + if ( ataCmd->results.returnCode == kIOReturnSuccess ) + { + ataCmd->results.returnCode = (IOReturn) rc; + } + } + + queue_remove( list, ataCmd, IOATAStandardCommand *, nextCommand ); +} + +IOATAStandardCommand *IOATAStandardDevice::checkCommand( queue_head_t *list ) +{ + if ( queue_empty( list ) == true ) + { + return 0; + } + + return (IOATAStandardCommand *)queue_first( list ); +} + + +IOATAStandardCommand *IOATAStandardDevice::getCommand( queue_head_t *list ) +{ + IOATAStandardCommand *ataCmd = 0; + + if ( queue_empty( list ) == false ) + { + queue_remove_first( list, ataCmd, IOATAStandardCommand *, nextCommand ); + ataCmd->list = 0; + } + + return ataCmd; +} + +void IOATAStandardDevice::stackCommand( queue_head_t *list, IOATAStandardCommand *ataCmd ) +{ + ataCmd->list = list; + + queue_enter_first( list, ataCmd, IOATAStandardCommand *, nextCommand ); +} + +void IOATAStandardDevice::moveCommand( queue_head_t *fromList, queue_head_t *toList, IOATAStandardCommand *ataCmd, IOReturn rc = kIOReturnSuccess ) +{ + if ( rc != kIOReturnSuccess ) + { + if ( ataCmd->results.returnCode == kIOReturnSuccess ) + { + ataCmd->results.returnCode = (IOReturn) rc; + } + } + + ataCmd->list = toList; + + queue_remove( fromList, ataCmd, IOATAStandardCommand *, nextCommand ); + queue_enter( toList, ataCmd, IOATAStandardCommand *, nextCommand ); +} + +void IOATAStandardDevice::moveAllCommands( queue_head_t *fromList, queue_head_t *toList, IOReturn rc = kIOReturnSuccess ) +{ + IOATAStandardCommand *ataCmd; + + if ( queue_empty( fromList ) == true ) return; + + do + { + ataCmd = (IOATAStandardCommand *)queue_first( fromList ); + + if ( rc != kIOReturnSuccess ) + { + if ( ataCmd->results.returnCode == kIOReturnSuccess ) + { + ataCmd->results.returnCode = (IOReturn) rc; + } + } + + ataCmd->list = toList; + + queue_remove( fromList, ataCmd, IOATAStandardCommand *, nextCommand ); + queue_enter( toList, ataCmd, IOATAStandardCommand *, nextCommand ); + + } while( queue_empty( fromList ) == false ); +} + +bool IOATAStandardDevice::findCommand( queue_head_t *list, IOATAStandardCommand *findATACmd ) +{ + IOATAStandardCommand *ataCmd; + + queue_iterate( list, ataCmd, IOATAStandardCommand *, nextCommand ) + { + if ( ataCmd == findATACmd ) + { + return true; + } + } + return false; +} + +void IOATAStandardDevice::purgeAllCommands( queue_head_t *list, IOReturn rc ) +{ + IOATAStandardCommand *ataCmd; + + if ( queue_empty( list ) == true ) return; + + do + { + ataCmd = (IOATAStandardCommand *)queue_first( list ); + + deleteCommand( list, ataCmd, rc ); + finishCommand( ataCmd ); + + } while( queue_empty( list ) == false ); +} diff --git a/iokit/Families/IOATAStandard/IOATAStandardCommand.cpp b/iokit/Families/IOATAStandard/IOATAStandardCommand.cpp new file mode 100644 index 000000000..adaa5e598 --- /dev/null +++ b/iokit/Families/IOATAStandard/IOATAStandardCommand.cpp @@ -0,0 +1,343 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAStandardCommand.cpp + * + */ + +#include + +#include +#include + +#undef super +#define super IOATACommand + +OSDefineMetaClassAndStructors( IOATAStandardCommand, IOATACommand ) +OSDefineMetaClassAndAbstractStructors( IOATACommand, IOCDBCommand ) + +IOATADevice *IOATAStandardCommand::getDevice(IOATADevice *) +{ + return (IOATADevice *)device; +} + +IOATAStandardDevice *IOATAStandardCommand::getDevice(IOATAStandardDevice *) +{ + return device; +} + +void *IOATAStandardCommand::getClientData() +{ + return clientData; +} + +void *IOATAStandardCommand::getCommandData() +{ + return commandPrivateData; +} + +UInt32 IOATAStandardCommand::getCmdType() +{ + return cmdType; +} + +IOATAStandardCommand *IOATAStandardCommand::getOriginalCmd() +{ + return origCommand; +} + +UInt32 IOATAStandardCommand::getSequenceNumber() +{ + return sequenceNumber; +} + +ATAUnit IOATAStandardCommand::getUnit() +{ + return device->unit; +} + +void IOATAStandardCommand::setTaskfile( ATATaskfile *srcTaskfile ) +{ + taskfile = *srcTaskfile; +} + +void IOATAStandardCommand::getTaskfile( ATATaskfile *dstTaskfile ) +{ + *dstTaskfile = taskfile; +} + +UInt32 IOATAStandardCommand::getFlags() +{ + return taskfile.flags; +} + +ATAProtocol IOATAStandardCommand::getProtocol() +{ + return taskfile.protocol; +} + +UInt32 IOATAStandardCommand::getResultMask() +{ + return taskfile.resultmask; +} + + +void IOATAStandardCommand::setTimeout( UInt32 timeoutMS ) +{ + timeout = timeoutMS; +} + +UInt32 IOATAStandardCommand::getTimeout() +{ + return timeout; +} + + +void IOATAStandardCommand::setResults( ATAResults *srcResults ) +{ + results = *srcResults; + + if ( getCmdType() == kATACommandExecute ) + { + if ( results.bytesTransferred < xferCount ) + { + if ( results.returnCode == kIOReturnSuccess ) + { + results.returnCode = kIOReturnUnderrun; + } + } + } +} + +IOReturn IOATAStandardCommand::getResults( ATAResults *dstResults ) +{ + if ( dstResults != 0 ) + { + *dstResults = results; + } + + return results.returnCode; +} + +void IOATAStandardCommand::setQueueInfo( UInt32 forQueueType, UInt32 forQueuePosition ) +{ + queueType = forQueueType; + queuePosition = forQueuePosition; +} + +void IOATAStandardCommand::getQueueInfo( UInt32 *forQueueType, UInt32 *forQueuePosition = 0 ) +{ + if ( forQueueType != 0 ) *forQueueType = queueType; + if ( forQueuePosition != 0 ) *forQueuePosition = queuePosition; +} + +void IOATAStandardCommand::setPointers( IOMemoryDescriptor *clientDesc, UInt32 transferCount, bool isWrite, bool isSense = false ) +{ + if ( isSense == false ) + { + xferDesc = clientDesc; + xferCount = transferCount; + xferDirection = isWrite; + } + else + { + senseData = clientDesc; + senseLength = transferCount; + } +} + +void IOATAStandardCommand::getPointers( IOMemoryDescriptor **clientDesc, UInt32 *transferCount, bool *isWrite, bool isSense = false ) +{ + if ( clientDesc != NULL ) + { + *clientDesc = (isSense == false) ? xferDesc : senseData; + } + + if ( transferCount != NULL ) + { + *transferCount = (isSense == false) ? xferCount : senseLength; + } + + if ( isWrite != NULL ) + { + *isWrite = (isSense == false) ? xferDirection : false; + } +} + +void IOATAStandardCommand::setCDB( ATACDBInfo *clientATACmd ) +{ + ataCmd = *clientATACmd; +} + +void IOATAStandardCommand::getCDB( ATACDBInfo *clientATACmd ) +{ + *clientATACmd = ataCmd; +} + +void IOATAStandardCommand::setCallback( void *clientTarget, CallbackFn clientATADoneFn, void *clientRefcon ) +{ + completionInfo.async.target = clientTarget; + completionInfo.async.callback = clientATADoneFn; + completionInfo.async.refcon = clientRefcon; +} + +bool IOATAStandardCommand::execute( UInt32 *cmdSequenceNumber ) +{ + bool isSync; + + do + { + sequenceNumber = OSIncrementAtomic( (SInt32 *)&controller->sequenceNumber ); + } + while ( sequenceNumber == 0 ); + + if ( cmdSequenceNumber != 0 ) + { + *cmdSequenceNumber = sequenceNumber; + } + + list = (queue_head_t *)device->deviceGate; + + isSync = (completionInfo.async.callback == 0); + + if ( isSync ) + { + completionInfo.sync.lock = IOSyncer::create(); + } + + device->submitCommand( kATACommandExecute, this ); + + if ( isSync ) + { + completionInfo.sync.lock->wait(); + } + + return true; + +} + +void IOATAStandardCommand::abort( UInt32 sequenceNumber ) +{ + device->submitCommand( kATACommandAbort, this, sequenceNumber ); +} + +void IOATAStandardCommand::complete() +{ + if ( device ) + { + device->completeCommand( this ); + } + else + { + controller->completeCommand( this ); + } +} + +/*------------------- Generic CDB Interface -----------------------------------------------*/ + +void IOATAStandardCommand::getCDB( CDBInfo *cdbInfo ) +{ + ATACDBInfo ataCDBInfo; + + bzero( cdbInfo, sizeof(CDBInfo) ); + + getCDB( &ataCDBInfo ); + cdbInfo->cdb = ataCDBInfo.cdb; + cdbInfo->cdbLength = ataCDBInfo.cdbLength; +} + +void IOATAStandardCommand::setCDB( CDBInfo *cdbInfo ) +{ + IOATAStandardDevice *ataDevice; + ATATimingProtocol ataTimingProtocol; + ATACDBInfo ataCDBInfo; + ATATaskfile ataTaskfile; + + ataDevice = getDevice(kIOATAStandardDevice); + + if ( ataDevice->getDeviceType() != kATADeviceATAPI ) + { + return; + } + + bzero( &ataTaskfile, sizeof(ataTaskfile) ); + + ataDevice->getTimingSelected( &ataTimingProtocol ); + + ataTaskfile.regmask = ATARegtoMask(kATARegATAPIDeviceSelect) + | ATARegtoMask(kATARegATAPICommand) + | ATARegtoMask(kATARegATAPIByteCountLow) + | ATARegtoMask(kATARegATAPIByteCountHigh) + | ATARegtoMask(kATARegATAPIFeatures); + + ataTaskfile.ataRegs[kATARegATAPICommand] = kATACommandATAPIPacket; + ataTaskfile.ataRegs[kATARegATAPIDeviceSelect] = kATAModeLBA | (getUnit() << 4); + + if ( ataTimingProtocol & ~kATATimingPIO ) + { + ataTaskfile.protocol = kATAProtocolATAPIDMA; + ataTaskfile.ataRegs[kATARegATAPIFeatures] = 0x01; + } + else + { + ataTaskfile.protocol = kATAProtocolATAPIPIO; + ataTaskfile.ataRegs[kATARegATAPIByteCountLow] = 0xfe; + ataTaskfile.ataRegs[kATARegATAPIByteCountHigh] = 0xff; + } + + setTaskfile( &ataTaskfile ); + + bzero( &ataCDBInfo, sizeof(ATACDBInfo) ); + + ataCDBInfo.cdbLength = cdbInfo->cdbLength; + ataCDBInfo.cdb = cdbInfo->cdb; + setCDB( &ataCDBInfo ); + + setQueueInfo(); +} + +IOReturn IOATAStandardCommand::getResults( CDBResults *cdbResults ) +{ + ATAResults ataResults; + IOReturn rc; + + rc = getResults( &ataResults ); + + if ( cdbResults != 0 ) + { + bzero( cdbResults, sizeof(CDBResults) ); + + cdbResults->returnCode = ataResults.returnCode; + cdbResults->bytesTransferred = ataResults.bytesTransferred; + cdbResults->requestSenseDone = ataResults.returnCode; + cdbResults->requestSenseLength = ataResults.requestSenseLength; + } + + return rc; +} + + +IOCDBDevice *IOATAStandardCommand::getDevice( IOCDBDevice * ) +{ + return (IOCDBDevice *)device; +} diff --git a/iokit/Families/IOATAStandard/IOATAStandardController.cpp b/iokit/Families/IOATAStandard/IOATAStandardController.cpp new file mode 100644 index 000000000..3374fbbdd --- /dev/null +++ b/iokit/Families/IOATAStandard/IOATAStandardController.cpp @@ -0,0 +1,956 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOATAStandardController.cpp + * + */ + +#include +#include + +#undef super +#define super IOService + +OSDefineMetaClass( IOATAStandardController, IOService ) +OSDefineAbstractStructors( IOATAStandardController, IOService ); + +#define round(x,y) (((int)(x) + (y) - 1) & ~((y)-1)) + +/* + * + * + */ +bool IOATAStandardController::start( IOService *forProvider ) +{ + provider = forProvider; + +// IOSleep( 15000 ); + + if ( provider->open( this ) != true ) + { + return false; + } + + if ( createWorkLoop() != true ) + { + return false; + } + + if ( configureController() != true ) + { + provider->close( this ); + return false; + } + + if ( scanATABus() != true ) + { + provider->close( this ); + return false; + } + + return true; +} + +/* + * + * + * + */ +bool IOATAStandardController::scanATABus() +{ + if ( createDeviceNubs() != true ) + { + return false; + } + + timer( timerEvent ); + + if ( initTimings() == false ) + { + return false; + } + + disableControllerInterrupts(); + + if ( reset() != kIOReturnSuccess ) + { + return false; + } + + enableControllerInterrupts(); + + if ( probeDeviceNubs() != true ) + { + return false; + } + + if ( registerDeviceNubs() != true ) + { + return false; + } + + return true; +} + + +/* + * + * + * + */ +bool IOATAStandardController::createDeviceNubs() +{ + UInt32 i; + IOATAStandardDevice *ataDev; + + for (i = 0; i < controllerInfo.maxDevicesSupported; i++ ) + { + ataDev = targets[i].device = new IOATAStandardDevice; + + if ( ataDev->init( this, i ) != true ) + { + ataDev->release(); + targets[i].device = NULL; + } + } + + resetCmd = allocCommand( 0 ); + resetCmd->cmdType = kATACommandBusReset; + resetCmd->setTimeout( kATAResetTimeoutmS ); + resetCmd->setPointers( 0, 0, false ); + + return true; +} + +/* + * + * + * + */ +bool IOATAStandardController::probeDeviceNubs() +{ + UInt32 i; + IOATAStandardDevice *ataDev; + + for (i = 0; i < controllerInfo.maxDevicesSupported; i++ ) + { + ataDev = targets[i].device; + if ( ataDev->probeDeviceType() == kATADeviceNone ) + { + ataDev->release(); + targets[i].device = NULL; + } + } + + for (i = 0; i < controllerInfo.maxDevicesSupported; i++ ) + { + ataDev = targets[i].device; + if ( ataDev == NULL ) + { + continue; + } + + if ( ataDev->probeDevice() != true ) + { + ataDev->release(); + targets[i].device = NULL; + } + } + + return true; +} + + +/* + * + * + * + */ +bool IOATAStandardController::registerDeviceNubs() +{ + UInt32 i; + IOATAStandardDevice *ataDev; + + for (i = 0; i < controllerInfo.maxDevicesSupported; i++ ) + { + ataDev = targets[i].device; + if ( ataDev != NULL ) + { + ataDev->attach( this ); + ataDev->registerService(); + } + } + + return true; +} + +/* + * + * + * + */ +bool IOATAStandardController::initTimings() +{ + ATATiming initPIOTiming; + + initPIOTiming.timingProtocol = kATATimingPIO; + initPIOTiming.featureSetting = 0; + initPIOTiming.mode = 0; + initPIOTiming.minDataAccess = 165; + initPIOTiming.minDataCycle = 600; + initPIOTiming.minCmdAccess = 290; + initPIOTiming.minCmdCycle = 600; + + if ( calculateTiming( 0, &initPIOTiming ) != true ) + { + return false; + } + + if ( calculateTiming( 1, &initPIOTiming ) != true ) + { + return false; + } + + return true; +} + +/* + * + * + * + */ +bool IOATAStandardController::matchNubWithPropertyTable( IOService *nub, OSDictionary *table ) +{ + bool rc; + + rc = nub->compareProperty( table, kATAPropertyLocation ); + + return rc; +} + + + +/* + * + * + * + */ +void IOATAStandardController::releaseDevice( IOATAStandardDevice *device ) +{ + workLoopRequest( kWorkLoopReleaseDevice, (UInt32) device ); + + device->release(); +} + +/* + * + * + * + */ +bool IOATAStandardController::workLoopRequest( WorkLoopReqType type, UInt32 p1, UInt32 p2, UInt32 p3 ) +{ + WorkLoopRequest workLoopReq; + + bzero( &workLoopReq, sizeof(WorkLoopRequest) ); + workLoopReq.type = type; + workLoopReq.sync = IOSyncer::create(); + + workLoopReqGate->runCommand( &workLoopReq, (void *)p1, (void *)p2, (void *)p3 ); + + workLoopReq.sync->wait(); + + return( workLoopReq.rc ); +} + + +/* + * + * + * + */ +void IOATAStandardController::workLoopProcessRequest( WorkLoopRequest *workLoopReq, void *p1, void *p2, void *p3 ) +{ + bool rc = true; + IOATAStandardDevice *device; + + switch ( workLoopReq->type ) + { + + case kWorkLoopInitDevice: + device = (IOATAStandardDevice *) p1; + addDevice( device ); + rc = allocateDevice( device->unit ); + break; + + case kWorkLoopReleaseDevice: + device = (IOATAStandardDevice *) p1; + deleteDevice( device ); + break; + } + + workLoopReq->rc = rc; + workLoopReq->sync->signal(); +} + +/* + * + * + * + */ +void IOATAStandardController::addDevice( IOATAStandardDevice *forDevice ) +{ + ATAUnit unit; + + unit = forDevice->unit; + + forDevice->target = &targets[unit]; + targets[unit].device = forDevice; +} + +/* + * + * + * + */ +void IOATAStandardController::deleteDevice( IOATAStandardDevice *forDevice ) +{ + ATAUnit unit; + + unit = forDevice->unit; + targets[unit].device = 0; +} + + +/* + * + * + * + */ +bool IOATAStandardController::allocateDevice( ATAUnit unit ) +{ + return true; +} + +/* + * + * + * + */ +void IOATAStandardController::deallocateDevice( ATAUnit unit ) +{ +} + + +/* + * + * + * + */ +void *IOATAStandardController::getDeviceData( ATAUnit unit ) +{ + IOATAStandardDevice *device; + + device = targets[unit].device; + + if ( device == 0 ) return 0; + + return device->devicePrivateData; +} + + +/* + * + * + * + */ +IOReturn IOATAStandardController::reset() +{ + if ( busResetState != kStateIdle ) + { + return kIOReturnNoResources; + } + + busResetState = kStateIssue; + dispatchRequest(); + + while ( busResetState != kStateIdle ) + { + IOSleep( 100 ); + } + + return resetCmd->getResults( (ATAResults *)0 ); +} + +/* + * + * + * + */ +void IOATAStandardController::resetATABus() +{ + if ( busResetState != kStateIssue ) + { + return; + } + + busResetState = kStateActive; + + resetStarted(); + + resetCommand( resetCmd ); +} + +/* + * + * + * + */ +void IOATAStandardController::resetStarted() +{ + IOATAStandardDevice *device; + UInt32 i; + + for (i=0; i < controllerInfo.maxDevicesSupported; i++ ) + { + device = targets[i].device; + + if ( (device != 0) && (device->client != 0) && (device->abortCmdPending != kATACommandDeviceReset) ) + { + device->client->message( kATAClientMsgBusReset, device ); + } + } +} + + +/* + * + * + * + */ +bool IOATAStandardController::checkBusReset() +{ + if ( busResetState == kStateIdle ) + { + return false; + } + if ( busResetState == kStateIssue ) + { + resetATABus(); + } + return true; +} + + +/* + * + * + */ +void IOATAStandardController::timer( IOTimerEventSource * /* timer */ ) +{ + UInt32 i; + IOATAStandardDevice *device; + + + if ( disableTimer ) + { + if ( !--disableTimer ) + { + disableTimeoutOccurred(); + } + } + + for (i=0; i < controllerInfo.maxDevicesSupported; i++ ) + { + device = targets[i].device; + if ( device != 0 ) + { + device->timer(); + } + } + + timerEvent->setTimeoutMS(kATATimerIntervalmS); +} + + +/* + * + * + * + */ +void IOATAStandardController::completeCommand( IOATAStandardCommand *ataCmd ) +{ + switch ( ataCmd->cmdType ) + { + case kATACommandBusReset: + busResetState = kStateIdle; + resetOccurred(); + break; + default: + ; + } +} + +/* + * + * + * + */ +void IOATAStandardController::resetOccurred() +{ + UInt32 i; + IOATAStandardDevice *device; + + for (i=0; i < controllerInfo.maxDevicesSupported; i++ ) + { + device = targets[i].device; + + if ( device == 0 ) continue; + + if ( device->abortCmdPending != kATACommandDeviceReset ) + { + device->resetOccurred( (ATAClientMessage) (kATAClientMsgBusReset | kATAClientMsgDone) ); + } + } +} + +/* + * + * + * + */ +bool IOATAStandardController::createWorkLoop() +{ + workLoop = getWorkLoop(); + if ( workLoop == 0 ) + { + workLoop = new IOWorkLoop; + if ( workLoop == 0 ) + { + return false; + } + } + + if ( workLoop->init() != true ) + { + return false; + } + + timerEvent = IOTimerEventSource::timerEventSource( this, (IOTimerEventSource::Action) &IOATAStandardController::timer ); + if ( timerEvent == NULL ) + { + return false; + } + + if ( workLoop->addEventSource( timerEvent ) != kIOReturnSuccess ) + { + return false; + } + + timer( timerEvent ); + + + dispatchEvent = IOInterruptEventSource::interruptEventSource( this, + (IOInterruptEventAction) &IOATAStandardController::dispatch, + 0 ); + if ( dispatchEvent == 0 ) + { + return false; + } + + if ( workLoop->addEventSource( dispatchEvent ) != kIOReturnSuccess ) + { + return false; + } + + workLoopReqGate = IOCommandGate::commandGate( this, (IOCommandGate::Action) &IOATAStandardController::workLoopProcessRequest ); + if ( workLoopReqGate == NULL ) + { + return false; + } + + if ( workLoop->addEventSource( workLoopReqGate ) != kIOReturnSuccess ) + { + return false; + } + + + return true; +} + +/* + * + * + * + */ +IOATAStandardCommand *IOATAStandardController::findCommandWithNexus( IOATAStandardDevice *device, UInt32 tagValue = (UInt32)-1 ) +{ + return ((IOATAStandardDevice *)device)->findCommandWithNexus( tagValue ); +} + +/* + * + * + * + */ +bool IOATAStandardController::configureController() +{ + UInt32 targetsSize; + + if ( configure( provider, &controllerInfo ) == false ) + { + return false; + } + + controllerInfo.commandPrivateDataSize = round( controllerInfo.commandPrivateDataSize, 16 ); + + targetsSize = controllerInfo.maxDevicesSupported * sizeof(ATATarget); + targets = (ATATarget *)IOMalloc( targetsSize ); + bzero( targets, targetsSize ); + + commandLimit = commandLimitSave = (UInt32)-1; + + return true; +} + +/* + * + * + * + */ +void IOATAStandardController::setCommandLimit( IOATAStandardDevice *device, UInt32 newCommandLimit ) +{ + ((IOATAStandardDevice *)device)->commandLimit = newCommandLimit; +} + + +/* + * + * + * + */ +void IOATAStandardController::disableControllerInterrupts() +{ + workLoop->disableAllInterrupts(); +} + +/* + * + * + * + */ +void IOATAStandardController::enableControllerInterrupts() +{ + workLoop->enableAllInterrupts(); +} + + +/* + * + * + * + */ +IOWorkLoop *IOATAStandardController::getWorkLoop() const +{ + return workLoop; +} + +/* + * + * + * + */ +void IOATAStandardController::disableCommands( UInt32 disableTimeoutmS ) +{ + commandDisable = true; + + disableTimer = ( disableTimeoutmS != 0 ) ? (disableTimeoutmS / kATATimerIntervalmS + 1) : 0; +} + + +/* + * + * + * + */ +void IOATAStandardController::disableCommands() +{ + UInt32 disableTimeout; + + commandDisable = true; + + disableTimeout = kATADisableTimeoutmS; + + if ( noDisconnectCmd != 0 ) + { + disableTimeout = noDisconnectCmd->getTimeout(); + if ( disableTimeout != 0 ) disableTimeout += kATADisableTimeoutmS; + } + + disableTimer = ( disableTimeout != 0 ) ? (disableTimeout / kATATimerIntervalmS + 1) : 0; +} + +/* + * + * + * + */ +void IOATAStandardController::disableTimeoutOccurred() +{ + busResetState = kStateIssue; + dispatchRequest(); +} + + +/* + * + * + * + */ +UInt32 IOATAStandardController::getCommandCount() +{ + return commandCount; +} + +/* + * + * + * + */ +void IOATAStandardController::suspendDevice( IOATAStandardDevice *device ) +{ + ((IOATAStandardDevice *)device)->suspend(); +} + +/* + * + * + * + */ +void IOATAStandardController::resumeDevice( IOATAStandardDevice *device ) +{ + ((IOATAStandardDevice *)device)->resume(); +} + +/* + * + * + * + */ +IOATAStandardDevice *IOATAStandardController::selectDevice() +{ + IOATAStandardDevice *ataDev; + IOATAStandardDevice *selectedDevice = 0; + AbsoluteTime maxSuspendTime; + UInt32 i; + + AbsoluteTime_to_scalar(&maxSuspendTime) = 0; + + for (i = 0; i < controllerInfo.maxDevicesSupported; i++ ) + { + ataDev = targets[i].device; + if ( ataDev != NULL ) + { + if ( ataDev->isSuspended == true ) + { + if ( CMP_ABSOLUTETIME(&ataDev->suspendTime, &maxSuspendTime) > 0 ) + { + selectedDevice = ataDev; + AbsoluteTime_to_scalar( &maxSuspendTime ) = AbsoluteTime_to_scalar( &ataDev->suspendTime ); + } + } + } + } + + return (IOATAStandardDevice *) selectedDevice; +} + + +/* + * + * + * + */ +void IOATAStandardController::rescheduleCommand( IOATAStandardCommand *forATACmd ) +{ + forATACmd->getDevice(kIOATAStandardDevice)->rescheduleCommand( forATACmd ); +} + +/* + * + * + * + */ +void IOATAStandardController::enableCommands() +{ + commandDisable = false; + + disableTimer = 0; + + dispatchRequest(); +} + +/* + * + * + * + */ +void IOATAStandardController::dispatchRequest() +{ + dispatchEvent->interruptOccurred(0, 0, 0); +} + + +/* + * + * + * + */ +void IOATAStandardController::dispatch() +{ + ATATarget *target; + IOATAStandardDevice *device; + UInt32 dispatchAction; + UInt32 i; + + if ( checkBusReset() == true ) + { + goto dispatch_Exit; + } + + for ( i = 0; i < controllerInfo.maxDevicesSupported; i++ ) + { + target = &targets[i]; + + device = target->device; + if ( device == 0 ) continue; + + if ( target->state == kStateActive ) + { + if ( device->dispatch( &dispatchAction ) == false ) + { + target->state = kStateIdle; + } + + switch ( dispatchAction ) + { + case kDispatchNextDevice: + break; + case kDispatchStop: + goto dispatch_Exit; + } + } + } + +dispatch_Exit: + ; +} + +/* + * + * + * + */ +IOATAStandardCommand *IOATAStandardController::allocCommand(UInt32 clientDataSize ) +{ + IOATAStandardCommand *cmd; + UInt32 size; + + size = controllerInfo.commandPrivateDataSize + round(clientDataSize, 16); + + cmd = new IOATAStandardCommand; + if ( !cmd ) + { + return 0; + } + cmd->init(); + + if ( size ) + { + cmd->dataArea = (void *)IOMallocContiguous( (vm_size_t)size, 16, 0 ); + if ( !cmd->dataArea ) + { + cmd->release(); + return 0; + } + + bzero( cmd->dataArea, size ); + + cmd->dataSize = size; + + if ( controllerInfo.commandPrivateDataSize ) + { + cmd->commandPrivateData = cmd->dataArea; + } + if ( clientDataSize ) + { + cmd->clientData = (void *)((UInt8 *)cmd->dataArea + controllerInfo.commandPrivateDataSize); + } + } + + cmd->controller = this; + + return cmd; +} + +/* + * + * + * + */ +void IOATAStandardController::free() +{ + UInt32 targetsSize; + + if ( timerEvent != 0 ) timerEvent->release(); + + if ( workLoopReqGate != 0 ) workLoopReqGate->release(); + + if ( dispatchEvent != 0 ) dispatchEvent->release(); + + if ( resetCmd != 0 ) resetCmd->release(); + + if ( workLoop != 0 ) workLoop->release(); + + if ( targets != 0 ) + { + targetsSize = controllerInfo.maxDevicesSupported * sizeof(ATATarget); + IOFree( targets, targetsSize ); + } + + super::free(); +} + +/* + * + * + * + */ +void IOATAStandardCommand::free() +{ + if ( dataArea ) + { + IOFreeContiguous( dataArea, dataSize ); + } + + OSObject::free(); +} + diff --git a/iokit/Families/IOATAStandard/IOATAStandardData.cpp b/iokit/Families/IOATAStandard/IOATAStandardData.cpp new file mode 100644 index 000000000..7a9074b5e --- /dev/null +++ b/iokit/Families/IOATAStandard/IOATAStandardData.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAData.cpp + * + */ + +#include +#include + +static void *x; +#define id ((ATAIdentify *)x) + +EndianTable AppleIdentifyEndianTable[] = +{ + { sizeof(id->generalConfiguration), sizeof(UInt16) }, + { sizeof(id->logicalCylinders), sizeof(UInt16) }, + { sizeof(id->reserved_1), sizeof(UInt16) }, + { sizeof(id->logicalHeads), sizeof(UInt16) }, + { sizeof(id->reserved_2), sizeof(UInt16) }, + { sizeof(id->logicalSectorsPerTrack), sizeof(UInt16) }, + { sizeof(id->reserved_3), sizeof(UInt16) }, + { sizeof(id->serialNumber), sizeof(UInt8) }, + { sizeof(id->reserved_4), sizeof(UInt16) }, + { sizeof(id->firmwareRevision), sizeof(UInt8) }, + { sizeof(id->modelNumber), sizeof(UInt8) }, + { sizeof(id->multipleModeSectors), sizeof(UInt16) }, + { sizeof(id->reserved_5), sizeof(UInt16) }, + { sizeof(id->capabilities1), sizeof(UInt16) }, + { sizeof(id->capabilities2), sizeof(UInt16) }, + { sizeof(id->pioMode), sizeof(UInt16) }, + { sizeof(id->reserved_6), sizeof(UInt16) }, + { sizeof(id->validFields), sizeof(UInt16) }, + { sizeof(id->currentLogicalCylinders), sizeof(UInt16) }, + { sizeof(id->currentLogicalHeads), sizeof(UInt16) }, + { sizeof(id->currentLogicalSectorsPerTrack), sizeof(UInt16) }, + { sizeof(id->currentAddressableSectors), sizeof(UInt32) }, + { sizeof(id->currentMultipleModeSectors), sizeof(UInt16) }, + { sizeof(id->userAddressableSectors), sizeof(UInt32) }, + { sizeof(id->reserved_7), sizeof(UInt16) }, + { sizeof(id->dmaModes), sizeof(UInt16) }, + { sizeof(id->advancedPIOModes), sizeof(UInt16) }, + { sizeof(id->minDMACycleTime), sizeof(UInt16) }, + { sizeof(id->recDMACycleTime), sizeof(UInt16) }, + { sizeof(id->minPIOCycleTimeNoIORDY), sizeof(UInt16) }, + { sizeof(id->minPIOCyclcTimeIORDY), sizeof(UInt16) }, + { sizeof(id->reserved_8), sizeof(UInt16) }, + { sizeof(id->busReleaseLatency), sizeof(UInt16) }, + { sizeof(id->serviceLatency), sizeof(UInt16) }, + { sizeof(id->reserved_9), sizeof(UInt16) }, + { sizeof(id->queueDepth), sizeof(UInt16) }, + { sizeof(id->reserved_10), sizeof(UInt16) }, + { sizeof(id->versionMajor), sizeof(UInt16) }, + { sizeof(id->versionMinor), sizeof(UInt16) }, + { sizeof(id->commandSetsSupported1), sizeof(UInt16) }, + { sizeof(id->commandSetsSupported2), sizeof(UInt16) }, + { sizeof(id->commandSetsSupported3), sizeof(UInt16) }, + { sizeof(id->commandSetsEnabled1), sizeof(UInt16) }, + { sizeof(id->commandSetsEnabled2), sizeof(UInt16) }, + { sizeof(id-> commandSetsDefault), sizeof(UInt16) }, + { sizeof(id->ultraDMAModes), sizeof(UInt16) }, + { sizeof(id->securityEraseTime), sizeof(UInt16) }, + { sizeof(id-> securityEnhancedEraseTime), sizeof(UInt16) }, + { sizeof(id-> currentAdvPowerMgtValue), sizeof(UInt16) }, + { sizeof(id->reserved_11), sizeof(UInt16) }, + { sizeof(id->removableMediaSupported), sizeof(UInt16) }, + { sizeof(id->securityStatus), sizeof(UInt16) }, + { sizeof(id->reserved_12), sizeof(UInt16) }, + { 0, 0 } +}; + +ATAModeTable ApplePIOModes[] = +{ + { 165, 600 }, /* Mode 0 */ + { 125, 383 }, /* 1 */ + { 100, 240 }, /* 2 */ + { 80, 180 }, /* 3 */ + { 70, 120 } /* 4 */ +}; +UInt32 AppleNumPIOModes = (sizeof(ApplePIOModes)/sizeof(ATAModeTable)); + +ATAModeTable AppleDMAModes[] = +{ + { 215, 480 }, /* Mode 0 */ + { 80, 150 }, /* 1 */ + { 70, 120 } /* 2 */ +}; +UInt32 AppleNumDMAModes = (sizeof(AppleDMAModes)/sizeof(ATAModeTable)); + +ATAModeTable AppleUltraModes[] = +{ + { 0, 114 }, /* Mode 0 */ + { 0, 75 }, /* 1 */ + { 0, 55 }, /* 2 */ + { 100, 45 }, /* 3 */ + { 100, 25 } /* 4 */ +}; +UInt32 AppleNumUltraModes = (sizeof(AppleUltraModes)/sizeof(ATAModeTable)); + diff --git a/iokit/Families/IOATAStandard/IOATAStandardDevice.cpp b/iokit/Families/IOATAStandard/IOATAStandardDevice.cpp new file mode 100644 index 000000000..2d3b5c0be --- /dev/null +++ b/iokit/Families/IOATAStandard/IOATAStandardDevice.cpp @@ -0,0 +1,2370 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOATAStandardDevice.cpp + * + */ + +#include +#include +#include + +#undef super +#define super IOATADevice + +#ifndef MIN +#define MIN(a,b) ((a <= b) ? a : b) +#endif + +#define round(x,y) (((int)(x) + (y) - 1) & ~((y)-1)) + +extern EndianTable AppleIdentifyEndianTable[]; + +extern UInt32 AppleNumPIOModes; +extern ATAModeTable ApplePIOModes[]; +extern UInt32 AppleNumDMAModes; +extern ATAModeTable AppleDMAModes[]; +extern UInt32 AppleNumUltraModes; +extern ATAModeTable AppleUltraModes[]; + +OSDefineMetaClassAndAbstractStructors( IOATADevice, IOCDBDevice ) +OSDefineMetaClassAndStructors( IOATAStandardDevice, IOATADevice ) + +/* + * + * + * + */ +bool IOATAStandardDevice::init( IOATAStandardController *forController, ATAUnit forUnit ) +{ + ATATaskfile taskfile; + ATACDBInfo ataCDB; + + controller = forController; + unit = forUnit; + + target = &controller->targets[unit]; + + queue_init( &deviceList ); + queue_init( &bypassList ); + queue_init( &activeList ); + queue_init( &abortList ); + queue_init( &cancelList ); + + clientSem = IORWLockAlloc(); + if ( clientSem == 0 ) + { + return false; + } + + if ( super::init() == false ) + { + return false; + } + + if ( controller->controllerInfo.devicePrivateDataSize != 0 ) + { + devicePrivateData = IOMallocContiguous( controller->controllerInfo.devicePrivateDataSize, 16, 0 ); + if ( devicePrivateData == 0 ) + { + return false; + } + } + + bzero( &ataCDB, sizeof(ataCDB) ); + + probeCmd = allocCommand(kIOATAStandardDevice, 0); + if ( probeCmd == 0 ) + { + return false; + } + + abortCmd = allocCommand(kIOATAStandardDevice, 0); + if ( abortCmd == 0 ) + { + return false; + } + abortCmd->setTimeout( kATAAbortTimeoutmS ); + + cancelCmd = allocCommand(kIOATAStandardDevice, 0); + if ( cancelCmd == 0 ) + { + return false; + } + cancelCmd->setTimeout( 0 ); + cancelCmd->cmdType = kATACommandCancel; + + reqSenseCmd = allocCommand(kIOATAStandardDevice, 0); + if ( reqSenseCmd == 0 ) + { + return false; + } + + bzero( &taskfile, sizeof(taskfile) ); + taskfile.protocol = kATAProtocolATAPIPIO; + taskfile.tagType = kATATagTypeNone; + + taskfile.resultmask = ATARegtoMask( kATARegStatus ); + + taskfile.regmask = ATARegtoMask( kATARegATAPIFeatures ) + | ATARegtoMask( kATARegATAPIByteCountLow ) + | ATARegtoMask( kATARegATAPIByteCountHigh ) + | ATARegtoMask( kATARegATAPIDeviceSelect ) + | ATARegtoMask( kATARegATAPICommand ); + + taskfile.ataRegs[kATARegATAPIFeatures] = 0; + taskfile.ataRegs[kATARegATAPIByteCountLow] = 0xfe; + taskfile.ataRegs[kATARegATAPIByteCountHigh] = 0xff; + taskfile.ataRegs[kATARegATAPIDeviceSelect] = kATAModeLBA | (getUnit() << 4); + taskfile.ataRegs[kATARegATAPICommand] = kATACommandATAPIPacket; + + reqSenseCmd->setTaskfile( &taskfile ); + + ataCDB.cdbLength = 12; + ataCDB.cdb[0] = kATAPICmdRequestSense; + + reqSenseCmd->setTimeout( kATAReqSenseTimeoutmS ); + reqSenseCmd->cmdType = kATACommandReqSense; + reqSenseCmd->setCDB( &ataCDB ); + + deviceGate = IOCommandGate::commandGate( this, (IOCommandGate::Action) &IOATAStandardDevice::receiveCommand ); + if ( deviceGate == 0 ) + { + return false; + } + + if ( controller->workLoop->addEventSource( deviceGate ) != kIOReturnSuccess ) + { + return false; + } + + commandLimitSave = commandLimit = 1; + + idleNotifyActive = false; + + normalQHeld = 0; + bypassQHeld = 0; + + currentTiming = kATATimingPIO; + + return true; +} + +IOReturn IOATAStandardDevice::probeDevice() +{ + OSDictionary *propTable = 0; + + if ( doIdentify( (void **)&identifyData ) != kIOReturnSuccess ) + { + goto probeDevice_error; + } + + if ( deviceType == kATADeviceATA ) + { + doSpinUp(); + } + + else if ( deviceType == kATADeviceATAPI ) + { + atapiPktInt = ((identifyData->generalConfiguration & kATAPIPktProtocolIntDRQ) != 0); + + if ( doInquiry( (void **)&inquiryData ) != kIOReturnSuccess ) + { + goto probeDevice_error; + } + } + + if ( getATATimings() != true ) + { + goto probeDevice_error; + } + + if ( maxTags != 0 ) + { + tagArraySize = round( maxTags, 32 ) / 8; + tagArray = (UInt32 *)IOMalloc( tagArraySize ); + if ( tagArray == 0 ) + { + goto probeDevice_error; + } + bzero( tagArray, tagArraySize ); + } + + propTable = createProperties(); + if ( !propTable ) + { + goto probeDevice_error; + } + + setPropertyTable( propTable ); + + propTable->release(); + + close( this, 0 ); + + return true; + +probeDevice_error: ; + close( this, 0 ); + return false; +} + +/* + * + * + * + */ +ATADeviceType IOATAStandardDevice::probeDeviceType() +{ + ATATaskfile taskfile; + ATAResults results; + + bzero( (void *)&taskfile, sizeof(taskfile) ); + + taskfile.protocol = kATAProtocolSetRegs; + taskfile.regmask = ATARegtoMask(kATARegDriveHead); + + taskfile.resultmask = ATARegtoMask(kATARegSectorCount) + | ATARegtoMask(kATARegSectorNumber) + | ATARegtoMask(kATARegCylinderLow) + | ATARegtoMask(kATARegCylinderHigh) + | ATARegtoMask(kATARegStatus); + + taskfile.flags = kATACmdFlagTimingChanged; + + taskfile.ataRegs[kATARegDriveHead] = kATAModeLBA | (getUnit() << 4); + + probeCmd->setQueueInfo(); + probeCmd->setTaskfile( &taskfile ); + probeCmd->execute(); + + if ( probeCmd->getResults( &results ) != kIOReturnSuccess ) + { + return (deviceType = kATADeviceNone); + } + + if ( results.ataRegs[kATARegSectorCount] == kATASignatureSectorCount + && results.ataRegs[kATARegSectorNumber] == kATASignatureSectorNumber + && results.ataRegs[kATARegCylinderLow] == kATASignatureCylinderLow + && results.ataRegs[kATARegCylinderHigh] == kATASignatureCylinderHigh ) + { + if ( !(results.ataRegs[kATARegStatus] & kATAStatusBSY) + && (results.ataRegs[kATARegStatus] & kATAStatusDRDY) ) + { + return (deviceType = kATADeviceATA); + } + } + + if ( results.ataRegs[kATARegCylinderLow] == kATAPISignatureCylinderLow + && results.ataRegs[kATARegCylinderHigh] == kATAPISignatureCylinderHigh ) + { + return (deviceType = kATADeviceATAPI); + } + + return (deviceType = kATADeviceNone); +} + + +/* + * + * + * + */ +IOReturn IOATAStandardDevice::doSpinUp() +{ + void *buffer = NULL; + IOReturn rc; + + rc = doSectorCommand( kATACommandReadSector, 0, 1, &buffer ); + + if ( rc != kIOReturnSuccess ) + { + return rc; + } + + IOFree( buffer, 512 ); + + return rc ; +} + +/* + * + * + * + */ +IOReturn IOATAStandardDevice::doIdentify( void **dataPtr ) +{ + ATACommand ataCmd; + IOReturn rc; + + ataCmd = (deviceType == kATADeviceATA) ? kATACommandIdentify : kATACommandATAPIIdentify; + + rc = doSectorCommand( ataCmd, 0, 1, dataPtr ); + + if ( rc != kIOReturnSuccess ) + { + return rc; + } + + endianConvertData( *dataPtr, AppleIdentifyEndianTable ); + + return rc; +} + + + +/* + * + * + * + */ +IOReturn IOATAStandardDevice::doSectorCommand( ATACommand ataCmd, UInt32 ataLBA, UInt32 ataCount, void **dataPtr ) +{ + ATATaskfile taskfile; + ATAResults result; + IOMemoryDescriptor *desc; + UInt32 size; + void *data; + UInt32 i; + IOReturn rc; + + *dataPtr = NULL; + + size = ataCount * 512; + + if ( !(data = (void *)IOMalloc(size)) ) + { + return kIOReturnNoMemory; + } + + bzero( &taskfile, sizeof(taskfile) ); + + desc = IOMemoryDescriptor::withAddress( data, size, kIODirectionIn ); + if ( desc == NULL ) + { + rc = kIOReturnNoMemory; + goto doSectorCommand_error; + } + + + taskfile.protocol = kATAProtocolPIO; + taskfile.regmask = ATARegtoMask(kATARegDriveHead) + | ATARegtoMask(kATARegSectorCount) + | ATARegtoMask(kATARegSectorNumber) + | ATARegtoMask(kATARegCylinderLow) + | ATARegtoMask(kATARegCylinderHigh) + | ATARegtoMask(kATARegFeatures) + | ATARegtoMask(kATARegCommand); + + + taskfile.resultmask = ATARegtoMask(kATARegError) + | ATARegtoMask(kATARegStatus); + + taskfile.ataRegs[kATARegSectorCount] = ataCount; + taskfile.ataRegs[kATARegSectorNumber] = ataLBA & 0xff; + taskfile.ataRegs[kATARegCylinderLow] = (ataLBA >> 8) & 0xff; + taskfile.ataRegs[kATARegCylinderHigh] = (ataLBA >> 16) & 0xff; + taskfile.ataRegs[kATARegDriveHead] = (ataLBA >> 24) & 0x0f; + + taskfile.ataRegs[kATARegDriveHead] |= kATAModeLBA | (getUnit() << 4); + taskfile.ataRegs[kATARegCommand] = ataCmd; + + probeCmd->setQueueInfo(); + + for ( i = 0; i < 2; i++ ) + { + probeCmd->setTimeout( 25000 ); + probeCmd->setTaskfile( &taskfile ); + probeCmd->setPointers( desc, size, false ); + probeCmd->execute(); + + rc = probeCmd->getResults( &result ); + if ( rc == kIOReturnSuccess ) + { + break; + } + } + + +doSectorCommand_error: ; + + desc->release(); + + if ( rc != kIOReturnSuccess ) + { + IOFree( data, size ); + return result.returnCode; + } + + *dataPtr = data; + + return kIOReturnSuccess; +} + + +/* + * + * + */ +IOReturn IOATAStandardDevice::doInquiry( void **dataPtr ) +{ + ATATaskfile taskfile; + ATACDBInfo atapiCmd; + ATAResults result; + void *data; + IOMemoryDescriptor *desc; + UInt32 size = sizeof(ATAPIInquiry); + + *dataPtr = 0; + + if ( !(data = (void *)IOMalloc(size)) ) + { + return kIOReturnNoMemory; + } + + bzero( data, size ); + bzero( &taskfile, sizeof(taskfile) ); + bzero( &atapiCmd, sizeof(atapiCmd) ); + + desc = IOMemoryDescriptor::withAddress( data, size, kIODirectionIn ); + + taskfile.protocol = kATAProtocolATAPIPIO; + taskfile.regmask = ATARegtoMask(kATARegATAPIDeviceSelect) + | ATARegtoMask(kATARegATAPICommand) + | ATARegtoMask(kATARegATAPIByteCountLow) + | ATARegtoMask(kATARegATAPIByteCountHigh) + | ATARegtoMask(kATARegATAPIFeatures); + taskfile.ataRegs[kATARegATAPIDeviceSelect] = kATAModeLBA | (getUnit() << 4); + taskfile.ataRegs[kATARegATAPICommand] = kATACommandATAPIPacket; + taskfile.ataRegs[kATARegATAPIFeatures] = 0; + taskfile.ataRegs[kATARegATAPIByteCountLow] = 0xfe; + taskfile.ataRegs[kATARegATAPIByteCountHigh] = 0xff; + + atapiCmd.cdbLength = 12; // Fix 16 byte cmdpkts?? + atapiCmd.cdb[0] = 0x12; + atapiCmd.cdb[4] = size; + + probeCmd->setCDB( &atapiCmd ); + probeCmd->setTaskfile( &taskfile ); + probeCmd->setPointers( desc, size, false ); + probeCmd->setTimeout( 5000 ); + probeCmd->setQueueInfo(); + probeCmd->execute(); + + if ( probeCmd->getResults(&result) == kIOReturnSuccess ) + { + *dataPtr = data; + } + else if ( ( result.returnCode == kIOReturnUnderrun ) && + ( result.bytesTransferred >= 36 ) ) + { + // The standard INQUIRY contain 36 required bytes, + // the rest is optional and vendor specific. + + result.returnCode = kIOReturnSuccess; + *dataPtr = data; + } + else + { + IOFree( data, size ); + } + + desc->release(); + + return result.returnCode; +} + +/* + * + * + */ +bool IOATAStandardDevice::getDeviceCapacity( UInt32 *blockMax, UInt32 *blockSize ) +{ + UInt32 i; + UInt32 data[2]; + + if ( deviceType == kATADeviceATA ) + { + if ( identifyData != NULL ) + { + *blockMax = *(UInt32 *)identifyData->userAddressableSectors - 1; + *blockSize = 512; + return true; + } + } + + if ( deviceType == kATADeviceATAPI ) + { + for ( i=0; i < 8; i++ ) + { + if ( doTestUnitReady() == kIOReturnSuccess ) + { + break; + } + } + + if ( doReadCapacity( data ) == kIOReturnSuccess ) + { + *blockMax = OSSwapBigToHostInt32( data[0] ); + *blockSize = OSSwapBigToHostInt32( data[1] ); + return true; + } + } + + return false; +} + + +IOReturn IOATAStandardDevice::doTestUnitReady() +{ + ATATaskfile taskfile; + ATACDBInfo atapiCmd; + ATAResults result; + + bzero( &taskfile, sizeof(taskfile) ); + bzero( &atapiCmd, sizeof(atapiCmd) ); + + taskfile.protocol = kATAProtocolATAPIPIO; + + taskfile.regmask = ATARegtoMask(kATARegATAPIDeviceSelect) + | ATARegtoMask(kATARegATAPICommand) + | ATARegtoMask(kATARegATAPIByteCountLow) + | ATARegtoMask(kATARegATAPIByteCountHigh) + | ATARegtoMask(kATARegATAPIFeatures); + + taskfile.ataRegs[kATARegATAPIDeviceSelect] = kATAModeLBA | (getUnit() << 4); + taskfile.ataRegs[kATARegATAPICommand] = kATACommandATAPIPacket; + taskfile.ataRegs[kATARegATAPIFeatures] = 0; + taskfile.ataRegs[kATARegATAPIByteCountLow] = 0xfe; + taskfile.ataRegs[kATARegATAPIByteCountHigh] = 0xff; + + atapiCmd.cdbLength = 12; // Fix 16 byte cmdpkts?? + atapiCmd.cdb[0] = 0x00; + + probeCmd->setCDB( &atapiCmd ); + probeCmd->setTaskfile( &taskfile ); + probeCmd->setPointers( (IOMemoryDescriptor *)NULL, 0, false ); + probeCmd->setTimeout( 5000 ); + probeCmd->setQueueInfo(); + probeCmd->execute(); + probeCmd->getResults(&result); + + return result.returnCode; +} + + +/* + * + * + */ +IOReturn IOATAStandardDevice::doReadCapacity( void *data ) +{ + ATATaskfile taskfile; + ATACDBInfo atapiCmd; + ATAResults result; + IOMemoryDescriptor *dataDesc; + UInt32 size = 8; + + + bzero( &taskfile, sizeof(taskfile) ); + bzero( &atapiCmd, sizeof(atapiCmd) ); + + dataDesc = IOMemoryDescriptor::withAddress( data, size, kIODirectionIn ); + if ( dataDesc == NULL ) + { + return kIOReturnNoMemory; + } + + taskfile.protocol = kATAProtocolATAPIPIO; + taskfile.regmask = ATARegtoMask(kATARegATAPIDeviceSelect) + | ATARegtoMask(kATARegATAPICommand) + | ATARegtoMask(kATARegATAPIByteCountLow) + | ATARegtoMask(kATARegATAPIByteCountHigh) + | ATARegtoMask(kATARegATAPIFeatures); + taskfile.ataRegs[kATARegATAPIDeviceSelect] = kATAModeLBA | (getUnit() << 4); + taskfile.ataRegs[kATARegATAPICommand] = kATACommandATAPIPacket; + taskfile.ataRegs[kATARegATAPIFeatures] = 0; + taskfile.ataRegs[kATARegATAPIByteCountLow] = 0xfe; + taskfile.ataRegs[kATARegATAPIByteCountHigh] = 0xff; + + atapiCmd.cdbLength = 12; // Fix 16 byte cmdpkts?? + atapiCmd.cdb[0] = 0x25; + + probeCmd->setCDB( &atapiCmd ); + probeCmd->setTaskfile( &taskfile ); + probeCmd->setPointers( dataDesc, size, false ); + probeCmd->setTimeout( 5000 ); + probeCmd->setQueueInfo(); + probeCmd->execute(); + + probeCmd->getResults(&result); + + dataDesc->release(); + + return result.returnCode; +} + +/* + * + * + */ +bool IOATAStandardDevice::getTimingsSupported( ATATimingProtocol *timingsSupported ) +{ + UInt32 i; + + *(UInt32 *)timingsSupported = 0; + + for ( i=0; i < numTimings; i++ ) + { + *(UInt32 *) timingsSupported |= (UInt32)ataTimings[i].timingProtocol; + } + + return true; +} + +/* + * + * + */ +bool IOATAStandardDevice::getTimingSelected( ATATimingProtocol *timingSelected ) +{ + *timingSelected = currentTiming; + return true; +} + +/* + * + * + */ +bool IOATAStandardDevice::getProtocolsSupported( ATAProtocol *forProtocolsSupported ) +{ + *(UInt32 *)forProtocolsSupported = protocolsSupported; + return true; +} + +/* + * + * + */ +bool IOATAStandardDevice::getTiming( ATATimingProtocol *timingProtocol, ATATiming *timing ) +{ + UInt32 i; + + for ( i=0; i < numTimings; i++ ) + { + if ( ataTimings[i].timingProtocol == *timingProtocol ) + { + bcopy( &ataTimings[i], timing, sizeof(ATATiming) ); + return true; + } + } + + return false; +} + + +/* + * + * + */ +bool IOATAStandardDevice::selectTiming( ATATimingProtocol timingProtocol, bool fNotifyMsg ) +{ + ATATaskfile taskfile; + bool rc = false; + UInt32 i; + IOATAStandardCommand * ataCmd; + + for ( i=0; i < numTimings; i++ ) + { + if ( ataTimings[i].timingProtocol == timingProtocol ) + { + rc = true; + break; + } + } + + if ( rc == false ) + { + return false; + } + + ataCmd = allocCommand(kIOATAStandardDevice, 0); + if ( ataCmd == 0 ) return false; + + currentTiming = timingProtocol; + + bzero( &taskfile, sizeof(taskfile) ); + + taskfile.protocol = kATAProtocolPIO; + taskfile.regmask = ATARegtoMask(kATARegFeatures) + | ATARegtoMask(kATARegSectorCount) + | ATARegtoMask(kATARegDriveHead) + | ATARegtoMask(kATARegCommand); + + taskfile.ataRegs[kATARegSectorCount] = ataTimings[i].featureSetting; + taskfile.ataRegs[kATARegFeatures] = kATAFeatureTransferMode; + taskfile.ataRegs[kATARegDriveHead] = kATAModeLBA | (getUnit() << 4); + taskfile.ataRegs[kATARegCommand] = kATACommandSetFeatures; + + taskfile.flags = kATACmdFlagTimingChanged; + + ataCmd->setTaskfile( &taskfile ); + ataCmd->setPointers( (IOMemoryDescriptor *)NULL, 0, false ); + ataCmd->setTimeout( 5000 ); + ataCmd->setQueueInfo( kATAQTypeBypassQ ); + + if ( fNotifyMsg == false ) + { + ataCmd->setCallback(); + ataCmd->execute(); + if ( ataCmd->getResults( (ATAResults *) 0 ) != kIOReturnSuccess ) + { + rc = false; + } + ataCmd->release(); + } + else + { + ataCmd->setCallback( this, (CallbackFn)&IOATAStandardDevice::selectTimingDone, ataCmd ); + ataCmd->execute(); + } + return rc; +} + +/* + * + * + */ +void IOATAStandardDevice::selectTimingDone( IOATAStandardCommand *ataCmd ) +{ + bool rc; + + rc = (ataCmd->getResults( (ATAResults *)0 ) == kIOReturnSuccess); + + client->message( kATAClientMsgSelectTiming | kATAClientMsgDone, this, (void *)rc ); + + ataCmd->release(); +} + +/* + * + * + */ +bool IOATAStandardDevice::getATATimings() +{ + int i, n; + UInt32 mode = 0; + UInt32 cycleTime = 0; + + ATATiming *pTimings; + + if ( controller->getProtocolsSupported( (ATAProtocol *)&protocolsSupported ) == false ) + { + return false; + } + + pTimings = ataTimings; + + /* + * PIO Cycle timing...... + * + * 1. Try to match Word 51 (pioCycleTime) with cycle timings + * in our pioModes table to get mode/CycleTime. (Valid for Modes 0-2) + * 2. If Words 64-68 are supported and Mode 3 or 4 supported check, + * update CycleTime with Word 68 (CycleTimeWithIORDY). + */ + + cycleTime = identifyData->pioMode; + + if ( cycleTime > 2 ) + { + for ( i=AppleNumPIOModes-1; i != -1; i-- ) + { + if ( cycleTime <= ApplePIOModes[i].minDataCycle ) + { + mode = i; + break; + } + } + + if ( i == -1 ) + { + cycleTime = ApplePIOModes[mode].minDataCycle; + } + } + else + { + mode = cycleTime; + cycleTime = ApplePIOModes[mode].minDataCycle; + } + + + if ( identifyData->validFields & identifyWords_64to70_Valid ) + { + if (identifyData->advancedPIOModes & advPIOModes_Mode4_Supported) + mode = 4; + else if (identifyData->advancedPIOModes & advPIOModes_Mode3_Supported) + mode = 3; + + if ( (mode >= 3) && identifyData->minPIOCyclcTimeIORDY ) + { + cycleTime = identifyData->minPIOCyclcTimeIORDY; + } + } + + pTimings->timingProtocol = kATATimingPIO; + pTimings->mode = mode; + pTimings->featureSetting = mode | kATATransferModePIOwFC; + pTimings->minDataCycle = cycleTime; + pTimings->minDataAccess = ApplePIOModes[mode].minDataAccess; + + if ( ((protocolsSupported & kATAProtocolPIO) == 0) + || (controller->calculateTiming( getUnit(), pTimings ) == false) ) + { + IOLog("IOATAStandardDevice::%s() - Controller driver must support PIO protocol\n\r", __FUNCTION__); + return false; + } + + pTimings++; + numTimings++; + + /* + * Multiword DMA timing..... + * + * 1. Check Word 63(7:0) (Multiword DMA Modes Supported). Lookup + * CycleTime for highest mode we support. + * 2. If Words 64-68 supported, update CycleTime from Word 66 + * (RecommendedMultiWordCycleTime) if specified. + */ + + n = identifyData->dmaModes & dmaModes_Supported; + if ( n ) + { + for ( i=0; n; i++, n>>=1 ) + ; + + mode = i - 1; + if ( mode > AppleNumDMAModes-1 ) + { + mode = AppleNumDMAModes-1; + } + cycleTime = AppleDMAModes[mode].minDataCycle; + + if (identifyData->validFields & identifyWords_64to70_Valid) + { + if ( identifyData->recDMACycleTime ) + { + cycleTime = identifyData->recDMACycleTime; + } + } + pTimings->timingProtocol = kATATimingDMA; + pTimings->mode = mode; + pTimings->featureSetting = mode | kATATransferModeDMA; + pTimings->minDataCycle = cycleTime; + pTimings->minDataAccess = AppleDMAModes[mode].minDataAccess; + + if ( ((protocolsSupported & kATAProtocolDMA) != 0) + && (controller->calculateTiming( getUnit(), pTimings ) == true) ) + { + pTimings++; + numTimings++; + } + } + + /* + * Ultra DMA timing..... + * + */ + if ( identifyData->validFields & identifyWords_88to88_Valid ) + { + n = identifyData->ultraDMAModes & ultraDMAModes_Supported; + if ( n ) + { + for ( i=0; n; i++, n>>=1 ) + ; + + mode = i - 1; + if ( mode > AppleNumUltraModes-1 ) + { + mode = AppleNumUltraModes-1; + } + + /* + * Build a separate timing entry for Ultra DMA/33 (mode <= 2) and Ultra DMA/66 + */ + while ( 1 ) + { + cycleTime = AppleUltraModes[mode].minDataCycle; + + pTimings->timingProtocol = (mode > 2) ? kATATimingUltraDMA66 : kATATimingUltraDMA33; + pTimings->mode = mode; + pTimings->featureSetting = mode | kATATransferModeUltraDMA33; + pTimings->minDataCycle = cycleTime; + pTimings->minDataAccess = AppleUltraModes[mode].minDataAccess; + + if ( ((protocolsSupported & kATAProtocolDMA) != 0) + && (controller->calculateTiming( getUnit(), pTimings ) == true) ) + { + pTimings++; + numTimings++; + } + + if ( mode < 3 ) break; + + mode = 2; + } + } + } + + maxTags = 0; + + if ( deviceType == kATADeviceATA ) + { + if ( ((identifyData->commandSetsSupported2 & commandSetsSupported2_ValidMask) == commandSetsSupported2_Valid) + && ((identifyData->commandSetsSupported3 & commandSetsSupported3_ValidMask) == commandSetsSupported3_Valid) ) + { + if ( ((identifyData->commandSetsSupported2 & commandSetsSupported2_DMAQueued) != 0) + && ((identifyData->commandSetsEnabled2 & commandSetsEnabled2_DMAQueued) != 0) ) + { + maxTags = identifyData->queueDepth + 1; + } + } + } + + if ( maxTags == 0 ) + { + protocolsSupported &= ~(kATAProtocolDMAQueued | kATAProtocolDMAQueuedRelease); + } + + + return true; +} + +/* + * + * + * + */ +ATAUnit IOATAStandardDevice::getUnit() +{ + return unit; +} + +/* + * + * + */ +ATADeviceType IOATAStandardDevice::getDeviceType() +{ + return deviceType; +} + +/* + * + * + */ +bool IOATAStandardDevice::getATAPIPktInt() +{ + return atapiPktInt; +} + +/* + * + * + */ +bool IOATAStandardDevice::getIdentifyData( ATAIdentify *identifyBuffer ) +{ + if ( identifyData == NULL ) + { + bzero( identifyBuffer, sizeof(ATAIdentify) ); + return false; + } + + bcopy( identifyData, identifyBuffer, sizeof(ATAIdentify) ); + return true; +} + +/* + * + * + */ +bool IOATAStandardDevice::getInquiryData( UInt32 inquiryBufLength, ATAPIInquiry *inquiryBuffer ) +{ + bzero( inquiryBuffer, inquiryBufLength ); + + if ( inquiryData == NULL ) + { + return false; + } + + bcopy( inquiryData, inquiryBuffer, inquiryBufLength ); + + return true; +} + + +/* + * + * + * + */ +void IOATAStandardDevice::setupTarget() +{ +} + +/* + * + * + * + */ +void IOATAStandardDevice::getInquiryData( void *clientBuf, UInt32 clientBufSize, UInt32 *clientDataSize ) +{ + UInt32 len; + + bzero( clientBuf, clientBufSize ); + + len = MIN( clientBufSize, inquiryDataSize ); + + bcopy( inquiryData, clientBuf, len ); + + *clientDataSize = len; +} + +/* + * + * + * + */ +void IOATAStandardDevice::abort() +{ + submitCommand( kATACommandAbortAll, 0 ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::reset() +{ + submitCommand( kATACommandDeviceReset, 0 ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::holdQueue( UInt32 queueType ) +{ + if ( getWorkLoop()->inGate() == false ) + { + IOPanic( "IOATAStandardDevice::holdQueue() - must be called from workloop!!\n\r"); + } + + if ( queueType == kATAQTypeBypassQ ) + { + bypassQHeld++; + } + else if ( queueType == kATAQTypeNormalQ ) + { + normalQHeld++; + } +} + +/* + * + * + * + */ +void IOATAStandardDevice::releaseQueue( UInt32 queueType ) +{ + bool doDispatchRequest = false; + + if ( getWorkLoop()->inGate() == false ) + { + IOPanic( "IOATAStandardDevice::releaseQueue() - must be called from workloop!!\n\r"); + } + + if ( queueType == kATAQTypeBypassQ ) + { + if ( bypassQHeld && (--bypassQHeld == 0) ) + doDispatchRequest = true; + } + else if ( queueType == kATAQTypeNormalQ ) + { + if ( normalQHeld && (--normalQHeld == 0) ) + doDispatchRequest = true; + } + + if ( doDispatchRequest ) dispatchRequest(); +} + +/* + * + * + * + */ +void IOATAStandardDevice::notifyIdle( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ) +{ + if ( getWorkLoop()->inGate() == false ) + { + IOPanic( "IOATAStandardDevice:::notifyIdle() - must be called from workloop!!\n\r"); + } + + if ( callback == 0 ) + { + idleNotifyActive = false; + return; + } + + if ( idleNotifyActive == true ) + { + IOPanic( "IOATAStandardDevice:::notifyIdle() - only one idle notify may be active\n\r"); + } + + idleNotifyActive = true; + idleNotifyTarget = target; + idleNotifyCallback = callback; + idleNotifyRefcon = refcon; + + checkIdleNotify(); +} + + +/* + * + * + * + */ +void IOATAStandardDevice::submitCommand( UInt32 cmdType, IOATAStandardCommand *ataCmd, UInt32 cmdSequenceNumber ) +{ + deviceGate->runCommand( (void *)cmdType, (void *)ataCmd, (void *) cmdSequenceNumber, (void *) 0 ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::receiveCommand( UInt32 cmdType, IOATAStandardCommand *ataCmd, UInt32 cmdSequenceNumber, void *p3 ) +{ + queue_head_t *queue; + + switch ( cmdType ) + { + case kATACommandExecute: + ataCmd->cmdType = (ATACommandType) cmdType; + + queue = (ataCmd->queueType == kATAQTypeBypassQ) ? &bypassList : &deviceList; + + if ( ataCmd->queuePosition == kATAQPositionHead ) + { + stackCommand( queue, ataCmd ); + } + else + { + addCommand( queue, ataCmd ); + } + + dispatchRequest(); + break; + + case kATACommandAbortAll: + abortAllCommands( kATACommandAbortAll ); + break; + + case kATACommandAbort: + abortCommand( ataCmd, cmdSequenceNumber ); + break; + + case kATACommandDeviceReset: + abortAllCommands( kATACommandDeviceReset ); + break; + + default: + /* ??? */ + break; + } +} + +/* + * + * + * + */ +void IOATAStandardDevice::abortCommand( IOATAStandardCommand *ataCmd, UInt32 sequenceNumber ) +{ + if ( ataCmd->list == (queue_head_t *)deviceGate ) + { + if ( ataCmd->sequenceNumber != sequenceNumber ) + { + return; + } + ataCmd->results.returnCode = kIOReturnAborted; + } + else if ( ataCmd->list == &deviceList ) + { + if ( ataCmd->sequenceNumber != sequenceNumber ) + { + return; + } + + deleteCommand( &deviceList, ataCmd ); + ataCmd->results.returnCode = kIOReturnAborted; + finishCommand( ataCmd ); + } + else if ( ataCmd->list == &activeList ) + { + if ( ataCmd->sequenceNumber != sequenceNumber ) + { + return; + } + + moveCommand( &activeList, &abortList, ataCmd ); + + dispatchRequest(); + } +} + + +/* + * + * + * + */ +void IOATAStandardDevice::abortAllCommands( ATACommandType cmdType ) +{ + + abortCmdPending = cmdType; + + if ( abortCmdPending == kATACommandAbortAll ) + { + if ( client != 0 ) + { + client->message( kATAClientMsgDeviceAbort, this ); + } + } + else if ( abortCmdPending == kATACommandDeviceReset ) + { + if ( client != 0 ) + { + client->message( kATAClientMsgDeviceReset, this ); + } + } + + dispatchRequest(); +} + +/* + * + * + * + */ +void IOATAStandardDevice::resetOccurred( ATAClientMessage clientMsg ) +{ + moveAllCommands( &activeList, &cancelList, kIOReturnAborted ); + moveAllCommands( &abortList, &cancelList, kIOReturnAborted ); + + abortState = kStateIdle; + reqSenseState = kStateIdle; + commandLimit = 1; + + isSuspended = false; + AbsoluteTime_to_scalar( &suspendTime ) = 0; + + if ( (client != 0) && (clientMsg != kATAClientMsgNone) ) + { + client->message( clientMsg, this ); + } + + dispatchRequest(); +} + +void IOATAStandardDevice::resetComplete() +{ + if ( client != 0 ) + { + client->message( kATAClientMsgBusReset | kATAClientMsgDone, this ); + } +} + + +/* + * + * + * + */ +bool IOATAStandardDevice::checkAbortQueue() +{ + IOATAStandardCommand *origCmd; + + if ( abortState == kStateActive ) + { + return true; + } + + if ( abortCmdPending != kATACommandNone ) + { + abortCmd->origCommand = 0; + abortCmd->taskfile.tagType = kATATagTypeNone; + abortCmd->cmdType = abortCmdPending; + + abortCmd->timer = ( abortCmd->timeout != 0 ) ? + abortCmd->timeout / kATATimerIntervalmS + 1 : 0; + + bzero( &abortCmd->results, sizeof(ATAResults) ); + + abortState = kStateActive; + + addCommand( &activeList, abortCmd ); + + if ( (abortCmdPending == kATACommandDeviceReset) || + (abortCmdPending == kATACommandAbortAll) && (queue_empty( &abortList ) == false) ) + { + controller->abortCommand( abortCmd ); + } + else + { + abortCmd->complete(); + } + } + else if ( queue_empty( &abortList ) == false ) + { + origCmd = (IOATAStandardCommand *)queue_first( &abortList ); + abortCmd->origCommand = origCmd; + + abortCmd->cmdType = kATACommandAbort; + abortCmd->taskfile.tagType = origCmd->taskfile.tagType; + abortCmd->taskfile.tag = origCmd->taskfile.tag; + + abortCmd->timer = ( abortCmd->timeout != 0 ) ? + abortCmd->timeout / kATATimerIntervalmS + 1 : 0; + + bzero( &abortCmd->results, sizeof(ATAResults) ); + + abortState = kStateActive; + + addCommand( &activeList, abortCmd ); + controller->abortCommand( abortCmd ); + } + else + { + return false; + } + + return true; +} + +/* + * + * + * + */ +void IOATAStandardDevice::checkCancelQueue() +{ + if ( cancelState != kStateIdle ) + { + return; + } + + if ( queue_empty( &cancelList ) == true ) + { + return; + } + + if ( controller->controllerInfo.disableCancelCommands == true ) + { + return; + } + + cancelCmd->origCommand = (IOATAStandardCommand *)queue_first( &cancelList ); + bzero( &cancelCmd->results, sizeof(ATAResults) ); + + cancelState = kStateActive; + controller->cancelCommand( cancelCmd ); +} + +/* + * + * + * + */ +bool IOATAStandardDevice::checkReqSense() +{ + IOMemoryDescriptor *senseData; + UInt32 senseLength; + + if ( reqSenseState == kStateIssue ) + { + reqSenseCmd->origCommand = reqSenseOrigCmd; + bzero( &reqSenseCmd->results, sizeof(ATAResults) ); + + reqSenseOrigCmd->getPointers( &senseData, &senseLength, 0, true ); + reqSenseCmd->setPointers( senseData, senseLength, false ); + + reqSenseCmd->timer = ( reqSenseCmd->timeout != 0 ) ? + reqSenseCmd->timeout / kATATimerIntervalmS + 1 : 0; + + reqSenseCmd->ataCmd.cdb[3] = (senseLength >> 8) & 0xff; + reqSenseCmd->ataCmd.cdb[4] = senseLength & 0xff; + + reqSenseState = kStatePending; + } + + if ( reqSenseState == kStatePending ) + { + reqSenseState = kStateActive; + + addCommand( &activeList, reqSenseCmd ); + + commandCount++; + controller->commandCount++; + + controller->executeCommand( reqSenseCmd ); + } + + return (reqSenseState != kStateIdle); +} + + +/* + * + * + * + */ +bool IOATAStandardDevice::checkDeviceQueue( UInt32 *dispatchAction ) +{ + IOATAStandardCommand *ataCmd = 0; + queue_head_t *queue; + UInt32 i; + bool rc = true; + UInt32 queueHeld; + + do + { + if ( isSuspended == true ) + { + *dispatchAction = kDispatchNextDevice; + break; + } + + if ( controller->commandCount >= controller->commandLimit ) + { + *dispatchAction = kDispatchStop; + break; + } + + *dispatchAction = kDispatchNextDevice; + + if ( commandCount >= commandLimit ) + { + break; + } + + for ( i=0; i < 2; i++ ) + { + queueHeld = (i == 0) ? bypassQHeld : normalQHeld; + queue = (i == 0) ? &bypassList : &deviceList; + + if ( queueHeld > 0 ) + { + continue; + } + + ataCmd = checkCommand( queue ); + if ( ataCmd != 0 ) + { + *dispatchAction = kDispatchNextCommand; + break; + } + } + + if ( i == 2 ) + { + rc = false; + break; + } + + + if ( checkTag( ataCmd ) == false ) + { + *dispatchAction = kDispatchNextDevice; + break; + } + + getCommand( queue ); + + ataCmd->timer = ( ataCmd->timeout != 0 ) ? ataCmd->timeout / kATATimerIntervalmS + 1 : 0; + + commandCount++; + controller->commandCount++; + + addCommand( &activeList, ataCmd ); + + controller->executeCommand( ataCmd ); + + } while ( 0 ); + + return rc; +} + +/* + * + * + * + */ +void IOATAStandardDevice::suspend() +{ + if ( AbsoluteTime_to_scalar( &suspendTime ) == 0 ) + { + clock_get_uptime( &suspendTime ); + } + + isSuspended = true; +} + +/* + * + * + * + */ +void IOATAStandardDevice::resume() +{ + AbsoluteTime_to_scalar( &suspendTime ) = 0; + isSuspended = false; + + dispatchRequest(); +} + + +/* + * + * + * + */ +void IOATAStandardDevice::rescheduleCommand( IOATAStandardCommand *ataCmd ) +{ + queue_head_t *queue; + + if ( ataCmd->list != &activeList ) + { + IOLog( "IOATAStandardController::rescheduleCommand() - Command not active. Cmd = %08x\n\r", (int)ataCmd ); + return; + } + + deleteCommand( &activeList, ataCmd ); + + switch ( ataCmd->cmdType ) + { + case kATACommandExecute: + if ( ataCmd->taskfile.tagType != kATATagTypeNone ) + { + freeTag( ataCmd->taskfile.tag ); + ataCmd->taskfile.tag = kATATagTypeNone; + } + + queue = (ataCmd->queueType == kATAQTypeBypassQ) ? &bypassList : &deviceList; + + stackCommand( queue, ataCmd ); + + controller->commandCount--; + commandCount--; + break; + + case kATACommandReqSense: + reqSenseState = kStatePending; + commandCount--; + controller->commandCount--; + break; + + case kATACommandAbortAll: + case kATACommandDeviceReset: + abortCmdPending = ataCmd->cmdType; + + case kATACommandAbort: + abortState = kStateIdle; + break; + + default: + ; + } + + dispatchRequest(); + +} + +/* + * + * + * + */ +bool IOATAStandardDevice::checkTag( IOATAStandardCommand *ataCmd ) +{ + ATACDBInfo ataCDB; + bool rc = true; + ATAProtocol protocol; + + ataCmd->getCDB( &ataCDB ); + + ataCmd->taskfile.tagType = kATATagTypeNone; + + protocol = ataCmd->getProtocol(); + + do + { + if ( protocol != kATAProtocolDMAQueued && protocol != kATAProtocolDMAQueuedRelease ) + { + break; + } + if ( allocTag( &ataCmd->taskfile.tag ) == false ) + { + rc = false; + break; + } + + ataCmd->taskfile.tagType = kATATagTypeSimple; + } + while ( 0 ); + + ataCmd->setCDB( &ataCDB ); + + return rc; +} + +/* + * + * + * + */ +bool IOATAStandardDevice::allocTag( UInt32 *tagId ) +{ + UInt32 i; + UInt32 tagIndex; + UInt32 tagMask; + UInt32 *tags = 0; + + tags = tagArray; + + if ( tags == 0 ) return false; + + for ( i = 0; i < maxTags; i++ ) + { + tagIndex = i / 32; + tagMask = 1 << (i % 32); + if ( !(tags[tagIndex] & tagMask) ) + { + tags[tagIndex] |= tagMask; + *tagId = i; + return true; + } + } + return false; +} + +/* + * + * + * + */ +void IOATAStandardDevice::freeTag( UInt32 tagId ) +{ + UInt32 *tags = 0; + + tags = tagArray; + + if ( tags == 0 ) return; + + tags[tagId/32] &= ~(1 << (tagId % 32)); +} + +/* + * + * + * + */ +IOATAStandardCommand *IOATAStandardDevice::findCommandWithNexus( UInt32 tagValue ) +{ + IOATAStandardCommand *ataCmd; + UInt32 tag; + + queue_iterate( &activeList, ataCmd, IOATAStandardCommand *, nextCommand ) + { + switch ( ataCmd->cmdType ) + { + case kATACommandExecute: + case kATACommandReqSense: + tag = (ataCmd->taskfile.tagType == kATATagTypeNone) ? (UInt32) -1 : ataCmd->taskfile.tag; + if ( tag == tagValue ) + { + return ataCmd; + } + break; + default: + ; + } + } + + queue_iterate( &abortList, ataCmd, IOATAStandardCommand *, nextCommand ) + { + switch ( ataCmd->cmdType ) + { + case kATACommandExecute: + case kATACommandReqSense: + if ( ataCmd->taskfile.tag == tagValue ) + { + return ataCmd; + } + break; + default: + ; + } + } + + return 0; +} + +/* + * + * + * + */ +void IOATAStandardDevice::timer() +{ + IOATAStandardCommand *ataCmd, *tmp = 0; + + queue_iterate( &activeList, ataCmd, IOATAStandardCommand *, nextCommand ) + { + tmp = (IOATAStandardCommand *)queue_prev( &ataCmd->nextCommand ); + + if ( ataCmd->timer ) + { + if ( !--ataCmd->timer ) + { + IOLog("Timeout: Unit = %d Cmd = %08x Cmd Type = %d\n\r", + unit, (int)ataCmd, ataCmd->cmdType ); + + controller->busResetState = kStateIssue; + dispatchRequest(); + } + } + + if ( queue_end( &activeList, (queue_head_t *)ataCmd ) == true ) + { + break; + } + } +} + +/* + * + * + * + */ +void IOATAStandardDevice::dispatchRequest() +{ + target->state = kStateActive; + controller->dispatchRequest(); +} + +/* + * + * + * + */ +bool IOATAStandardDevice::dispatch( UInt32 *dispatchAction ) +{ + bool rc; + + checkCancelQueue(); + + if ( controller->checkBusReset() == true ) + { + *dispatchAction = kDispatchStop; + return true; + } + + if ( checkAbortQueue() == true ) + { + *dispatchAction = kDispatchNextDevice; + return true; + } + + do + { + if ( (rc = controller->commandDisable) == true ) + { + *dispatchAction = kDispatchStop; + break; + } + + if ( isSuspended == true ) + { + *dispatchAction = kDispatchNextDevice; + break; + } + + if ( (rc = checkReqSense()) == true ) + { + *dispatchAction = kDispatchNextDevice; + break; + } + + rc = checkDeviceQueue( dispatchAction ); + + } while ( *dispatchAction == kDispatchNextCommand ); + + return rc; +} + + +/* + * + * + * + */ +void IOATAStandardDevice::completeCommand( IOATAStandardCommand *ataCmd ) +{ + ATACommandType cmdType; + + cmdType = ataCmd->cmdType; + switch ( cmdType ) + { + case kATACommandExecute: + executeCommandDone( ataCmd ); + break; + + case kATACommandReqSense: + executeReqSenseDone( ataCmd ); + break; + + case kATACommandAbort: + case kATACommandAbortAll: + case kATACommandDeviceReset: + abortCommandDone( ataCmd ); + break; + + case kATACommandCancel: + cancelCommandDone( ataCmd ); + break; + + default: + ; + } + + checkIdleNotify(); + + dispatchRequest(); +} + +/* + * + * + * + */ +void IOATAStandardDevice::checkIdleNotify() +{ + if ( idleNotifyActive == false ) + { + return; + } + + if ( (queue_empty( &activeList ) == true) + && (queue_empty( &abortList ) == true) + && (queue_empty( &cancelList ) == true) ) + { + idleNotifyActive = false; + (idleNotifyCallback)( idleNotifyTarget, idleNotifyRefcon ); + } +} + +/* + * + * + * + */ +void IOATAStandardDevice::flushQueue( UInt32 queueType, IOReturn rc ) +{ + queue_head_t *queue; + + queue = (queueType == kATAQTypeBypassQ) ? &bypassList : &deviceList; + purgeAllCommands( queue, rc ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::executeCommandDone( IOATAStandardCommand *ataCmd ) +{ + deleteCommand( ataCmd->list, ataCmd ); + + commandCount--; + controller->commandCount--; + + if ( ataCmd->taskfile.tagType != kATATagTypeNone ) + { + freeTag( ataCmd->taskfile.tag ); + ataCmd->taskfile.tagType = kATATagTypeNone; + } + + if ( deviceType == kATADeviceATAPI + && ataCmd->results.adapterStatus == kATAReturnStatusError + && ataCmd->results.requestSenseDone == false + && ataCmd->senseData != 0 ) + { + reqSenseOrigCmd = ataCmd; + reqSenseState = kStateIssue; + return; + } + + finishCommand( ataCmd ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::executeReqSenseDone( IOATAStandardCommand *ataCmd ) +{ + IOATAStandardCommand *origCommand; + + deleteCommand( ataCmd->list, ataCmd ); + + commandCount--; + controller->commandCount--; + + reqSenseState = kStateIdle; + + reqSenseOrigCmd = 0; + + origCommand = ataCmd->origCommand; + + if ( (ataCmd->results.returnCode == kIOReturnSuccess) || (ataCmd->results.returnCode == kIOReturnUnderrun)) + { + origCommand->results.requestSenseDone = true; + origCommand->results.requestSenseLength = ataCmd->results.bytesTransferred; + } + else + { + origCommand->results.requestSenseDone = false; + origCommand->results.requestSenseLength = 0; + } + + finishCommand( ataCmd->origCommand ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::abortCommandDone( IOATAStandardCommand *ataCmd ) +{ + IOATAStandardCommand *origATACmd; + + deleteCommand( ataCmd->list, ataCmd ); + + if ( ataCmd->cmdType == kATACommandAbortAll ) + { + resetOccurred( (ATAClientMessage) (kATAClientMsgDeviceAbort | kATAClientMsgDone) ); + abortCmdPending = kATACommandNone; + } + if ( ataCmd->cmdType == kATACommandDeviceReset ) + { + resetOccurred( (ATAClientMessage) (kATAClientMsgDeviceReset | kATAClientMsgDone) ); + abortCmdPending = kATACommandNone; + } + else if ( ataCmd->cmdType == kATACommandAbort ) + { + origATACmd = ataCmd->origCommand; + + if ( findCommand( &abortList, origATACmd ) == true ) + { + moveCommand( &abortList, &cancelList, origATACmd, kIOReturnAborted ); + } + } + + abortState = kStateIdle; + + return; +} + +/* + * + * + * + */ +void IOATAStandardDevice::cancelCommandDone( IOATAStandardCommand *ataCmd ) +{ + IOATAStandardCommand *origATACmd; + + cancelState = kStateIdle; + + origATACmd = ataCmd->origCommand; + + if ( findCommand( &cancelList, origATACmd ) == true ) + { + IOLog( "IOATAStandardDevice::cancelCommandDone - Cancelled command not completed - ataCmd = %08x\n\r", (int)origATACmd ); + deleteCommand( &cancelList, origATACmd ); + } +} + +/* + * + * + * + */ +void IOATAStandardDevice::finishCommand( IOATAStandardCommand *ataCmd ) +{ + if ( ataCmd->completionInfo.async.callback ) + { + (*ataCmd->completionInfo.async.callback)( ataCmd->completionInfo.async.target, + ataCmd->completionInfo.async.refcon ); + } + else + { + ataCmd->completionInfo.sync.lock->signal(); + } +} + + +/* + * + * + */ +OSDictionary *IOATAStandardDevice::createProperties() +{ + OSDictionary *propTable = 0; + OSObject *regObj; + char tmpbuf[81]; + const char *s; + char *d; + + + propTable = OSDictionary::withCapacity(kATAMaxProperties); + if ( propTable == NULL ) + { + return NULL; + } + + s = (deviceType == kATADeviceATA) ? kATAPropertyProtocolATA : kATAPropertyProtocolATAPI; + regObj = (OSObject *)OSString::withCString( s ); + if ( addToRegistry( propTable, regObj, kATAPropertyProtocol ) != true ) + { + goto createprop_error; + } + + regObj = (OSObject *)OSNumber::withNumber(unit,32); + if ( addToRegistry( propTable, regObj, kATAPropertyDeviceNumber ) != true ) + { + goto createprop_error; + } + + regObj = (OSObject *)OSNumber::withNumber(unit,32); + if ( addToRegistry( propTable, regObj, kATAPropertyLocation ) != true ) + { + goto createprop_error; + } + + d = tmpbuf; + stripBlanks( d, (char *)identifyData->modelNumber, sizeof(identifyData->modelNumber)); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kATAPropertyModelNumber ) != true ) + { + goto createprop_error; + } + + d = tmpbuf; + stripBlanks( d, (char *)identifyData->firmwareRevision, sizeof(identifyData->firmwareRevision)); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kATAPropertyFirmwareRev ) != true ) + { + goto createprop_error; + } + + if ( inquiryData ) + { + stripBlanks( d, (char *)inquiryData->vendorName, sizeof(inquiryData->vendorName) ); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kATAPropertyVendorName ) != true ) + { + goto createprop_error; + } + + stripBlanks( d, (char *)inquiryData->productName, sizeof(inquiryData->productName) ); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kATAPropertyProductName ) != true ) + { + goto createprop_error; + } + + stripBlanks( d, (char *)inquiryData->productRevision, sizeof(inquiryData->productRevision) ); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kATAPropertyProductRevision ) != true ) + { + goto createprop_error; + } + } + return propTable; + +createprop_error: ; + propTable->release(); + return NULL; +} + + +/* + * + * + */ +bool IOATAStandardDevice::addToRegistry( OSDictionary *propTable, OSObject *regObj, char *key, + bool doRelease = true ) +{ + bool rc; + + if ( regObj == NULL ) + { + return false; + } + + rc = propTable->setObject( key, regObj ); + + if ( doRelease ) + { + // If 'doRelease' is true, then a reference count is consumed. + regObj->release(); + } + + return rc; +} + + +/* + * + * + * + */ +bool IOATAStandardDevice::matchPropertyTable(OSDictionary * table) +{ + return( controller->matchNubWithPropertyTable( this, table )); +} + + +/* + * + * + * + */ +IOService *IOATAStandardDevice::matchLocation(IOService * client) +{ + return this; +} + + +/* + * + * + * + */ +void IOATAStandardDevice::stripBlanks( char *d, char *s, UInt32 l ) +{ + char *p, c; + + for ( p = d, c = *s; l && c ; l--) + { + c = (*d++ = *s++); + if ( c != ' ' ) + { + p = d; + } + } + *p = 0; +} + +/* + * + * + */ +void IOATAStandardDevice::endianConvertData( void *data, void *endianTable ) +{ + EndianTable *t; + + union EndianPtr + { + void *voidPtr; + UInt8 *bytePtr; + UInt16 *shortPtr; + UInt32 *longPtr; + UInt64 *longlongPtr; + } p; + + UInt32 i,j; + + p.voidPtr = data; + + t = (EndianTable *)endianTable; + + for ( ; t->type; t++ ) + { + i = t->size/t->type; + + switch ( t->type ) + { + + /* Note: + * + * The ATA standard defines identify strings as arrays of short ints, + * with the left-most character of the string as the most significant + * byte of the short int. Strings are not normally affected by the host + * endianess. However, the way ATA defines strings would cause strings + * to appear byte reversed. We do a manditory short int byte-swap here, + * although strictly speaking this is not an endian issue. + * + */ + case sizeof(UInt8): + for ( j = 0; j < i/2; j++ ) + { + *p.shortPtr++ = OSSwapInt16(*p.shortPtr); + } + + break; + + case sizeof(UInt16): + for ( j = 0; j < i; j++ ) + { + *p.shortPtr++ = OSSwapLittleToHostInt16(*p.shortPtr); + } + break; + + case sizeof(UInt32): + for ( j = 0; j < i; j++ ) + { + *p.longPtr++ = OSSwapLittleToHostInt32(*p.longPtr); + } + break; + + case sizeof(UInt64): + for ( j = 0; j < i; j++ ) + { + *p.longlongPtr++ = OSSwapLittleToHostInt64(*p.longlongPtr); + } + break; + + default: + ; + } + } +} + +/* + * + * + * + */ +IOATACommand *IOATAStandardDevice::allocCommand( IOATADevice *, UInt32 clientDataSize ) +{ + return (IOATAStandardCommand *) allocCommand( kIOATAStandardDevice, clientDataSize ); +} + +IOCDBCommand *IOATAStandardDevice::allocCommand( IOCDBDevice *, UInt32 clientDataSize ) +{ + return (IOCDBCommand *) allocCommand( kIOATAStandardDevice, clientDataSize ); +} + +IOATAStandardCommand *IOATAStandardDevice::allocCommand( IOATAStandardDevice *, UInt32 clientDataSize ) +{ + IOATAStandardCommand *cmd; + + if ( (cmd = controller->allocCommand( clientDataSize )) ) + { + cmd->device = this; + } + return cmd; +} + + +/* + * + * + */ +IOWorkLoop *IOATAStandardDevice::getWorkLoop() const +{ + return controller->workLoop; +} + + +/* + * + * + * + */ +bool IOATAStandardDevice::open( IOService *forClient, IOOptionBits options, void *arg ) +{ + if ( client != 0 ) return false; + + client = forClient; + + return super::open( forClient, options, arg ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::close( IOService *forClient, IOOptionBits options ) +{ + client = 0; + + return super::close( forClient, options ); +} + +/* + * + * + * + */ +void IOATAStandardDevice::free() +{ + if ( deviceGate != 0 ) + { + controller->workLoop->removeEventSource( deviceGate ); + deviceGate->release(); + } + + if ( reqSenseCmd != 0 ) reqSenseCmd->release(); + if ( abortCmd != 0 ) abortCmd->release(); + if ( cancelCmd != 0 ) cancelCmd->release(); + if ( probeCmd != 0 ) probeCmd->release(); + + if ( tagArray != 0 ) IOFree( tagArray, tagArraySize ); + if ( inquiryData != 0 ) IOFree( inquiryData, inquiryDataSize ); + if ( devicePrivateData != 0 ) IOFreeContiguous( devicePrivateData, controller->controllerInfo.devicePrivateDataSize ); + if ( clientSem != 0 ) IORWLockFree( clientSem ); + + super::free(); +} + + diff --git a/iokit/Families/IOATAStandard/IOATAStandardDriver.cpp b/iokit/Families/IOATAStandard/IOATAStandardDriver.cpp new file mode 100644 index 000000000..054405e67 --- /dev/null +++ b/iokit/Families/IOATAStandard/IOATAStandardDriver.cpp @@ -0,0 +1,569 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAStandardDriver.cpp + * + */ +#include + +#undef super +#define super IOATAStandardController + +OSDefineMetaClass( IOATAStandardDriver, IOATAStandardController ); +OSDefineAbstractStructors( IOATAStandardDriver, IOATAStandardController ); + +#if 0 +static UInt32 dropInt=0; +#endif + +/* + * + * + */ +void IOATAStandardDriver::executeCommand( IOATAStandardCommand *cmd ) +{ + IOATAStandardDevice *newDevice; + ATAProtocol newProtocol; + ATATimingProtocol timingProtocol; + ATAResults results; + + newDevice = cmd->getDevice(kIOATAStandardDevice); + newProtocol = cmd->getProtocol(); + +#if 0 + IOLog("IOATAStandardDriver::%s() - Cmd = %08x Device = %08x Count = %d\n\r", + __FUNCTION__, (int) cmd, (int) newDevice, getCommandCount() ); +#endif + + if ( getCommandCount() > 1 ) + { + if ( currentDevice != newDevice || currentProtocol != newProtocol ) + { + suspendDevice( newDevice ); + rescheduleCommand( cmd ); + return; + } + } + + currentProtocol = newProtocol; + + if ( currentDevice != newDevice ) + { + newDeviceSelected( newDevice ); + currentDevice = newDevice; + } + + if ( (cmd->getFlags() & kATACmdFlagTimingChanged) != 0 ) + { + currentDevice->getTimingSelected( &timingProtocol ); + selectTiming( currentDevice->getUnit(), timingProtocol ); + newDeviceSelected( newDevice ); + } + + bzero( &results, sizeof(ATAResults) ); + cmd->setResults( &results ); + + switch ( currentProtocol ) + { + case kATAProtocolSetRegs: + doProtocolSetRegs( cmd ); + break; + + case kATAProtocolPIO: + doATAProtocolPio( cmd ); + break; + + case kATAProtocolDMA: + doATAProtocolDma( cmd ); + break; + + case kATAProtocolDMAQueued: + doATAProtocolDmaQueued( cmd ); + break; + + case kATAProtocolATAPIPIO: + doATAPIProtocolPio( cmd ); + break; + + case kATAProtocolATAPIDMA: + doATAPIProtocolDma( cmd ); + break; + + default: + doProtocolNotSupported( cmd ); + break; + } +} + + +/* + * + * + */ +void IOATAStandardDriver::resetCommand( IOATAStandardCommand *cmd ) +{ + + resetDma(); + dmaActive = false; + + currentProtocol = kATAProtocolNone; + currentDevice = 0; + doATAReset( cmd ); +} + +/* + * + * + */ +void IOATAStandardDriver::abortCommand( IOATAStandardCommand *ataCmd ) +{ + resetStarted(); + doATAReset( ataCmd ); +} + +/* + * + * + */ +void IOATAStandardDriver::cancelCommand( IOATAStandardCommand *ataCmd ) +{ + ATAResults results; + IOATAStandardCommand *origCmd; + + origCmd = ataCmd->getOriginalCmd(); + if ( origCmd != 0 ) + { + completeCmd( origCmd ); + } + + bzero( &results, sizeof(ATAResults) ); + ataCmd->setResults( &results ); + completeCmd( ataCmd ); +} + + +/* + * + * + */ +void IOATAStandardDriver::interruptOccurred() +{ +#if 0 + if ( dropInt++ > 20 ) + { + UInt32 status; + + IOLog("IOATAStandardDriver::%s() - Dropping interrupt\n\r", __FUNCTION__ ); + status = readATAReg( kATARegStatus ); + dropInt = 0; + return; + } +#endif + + if ( currentDevice == 0 ) + { + IOLog( "IOATAStandardDriver::interruptOccurred - Spurious interrupt - ATA Status = %04lx\n\r", readATAReg( kATARegStatus ) ); + return; + } + + switch ( currentProtocol ) + { + case kATAProtocolPIO: + processATAPioInt(); + break; + + case kATAProtocolDMA: + processATADmaInt(); + break; + + case kATAProtocolDMAQueued: + processATADmaQueuedInt(); + break; + + case kATAProtocolATAPIPIO: + processATAPIPioInt(); + break; + + case kATAProtocolATAPIDMA: + processATAPIDmaInt(); + break; + + default: + IOLog( "IOATAStandardDriver::interruptOccurred - Spurious interrupt - ATA Status = %04lx\n\r", readATAReg( kATARegStatus ) ); + } +} + + +/* + * + * + */ +void IOATAStandardDriver::doProtocolNotSupported( IOATAStandardCommand *cmd ) +{ + completeCmd( cmd, kATAReturnNotSupported ); +} + + +/* + * + * + */ +void IOATAStandardDriver::completeCmd( IOATAStandardCommand *cmd, ATAReturnCode returnCode, UInt32 bytesTransferred ) +{ + updateCmdStatus( cmd, returnCode, bytesTransferred ); + completeCmd( cmd ); +} + +/* + * + * + */ +void IOATAStandardDriver::updateCmdStatus( IOATAStandardCommand *cmd, ATAReturnCode returnCode, UInt32 bytesTransferred ) +{ + UInt32 resultmask; + UInt32 i; + ATAResults result; + + bzero( &result, sizeof(result) ); + + resultmask = cmd->getResultMask(); + + if ( cmd->getProtocol() != kATAProtocolSetRegs ) + { + if ( waitForStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ) == false ) + { + if ( returnCode == kATAReturnSuccess ) + { + kprintf("IOATAStandardDriver::updateCmdStatus is going to return kATAReturnBusyError;\n"); + returnCode = kATAReturnBusyError; + } + } + } + + for ( i=0; resultmask; i++ ) + { + if ( resultmask & 1 ) + { + result.ataRegs[i] = readATAReg( i ); + } + resultmask >>= 1; + } + + result.adapterStatus = returnCode; + result.bytesTransferred = bytesTransferred; + cmd->setResults( &result ); +} + +/* + * + * + */ +void IOATAStandardDriver::completeCmd( IOATAStandardCommand *cmd ) +{ + IOATAStandardDevice *device; + ATAResults ataResult; + + cmd->getResults( &ataResult ); + ataResult.returnCode = getIOReturnCode( ataResult.adapterStatus ); + cmd->setResults( &ataResult ); + + if ( getCommandCount() == 1 ) + { + currentProtocol = kATAProtocolNone; + + device = selectDevice(); + if ( device != 0 ) + { + resumeDevice( device ); + } + } + + cmd->complete(); +} + +/* + * + * + */ +IOReturn IOATAStandardDriver::getIOReturnCode( ATAReturnCode code ) +{ + switch (code) + { + case kATAReturnSuccess: + return kIOReturnSuccess; + + case kATAReturnNotSupported: + return kIOReturnUnsupported; + + case kATAReturnNoResource: + return kIOReturnNoResources; + + case kATAReturnBusyError: + return kIOReturnBusy; + + case kATAReturnInterruptTimeout: + return kIOReturnTimeout; + + case kATAReturnRetryPIO: + case kATAReturnStatusError: + case kATAReturnProtocolError: + default: + ; + } + return kIOReturnIOError; +} + +/* + * + * + */ +void IOATAStandardDriver::newDeviceSelected( IOATAStandardDevice * ) +{ +} + + +/* + * + * + */ +bool IOATAStandardDriver::programDma( IOATAStandardCommand * ) +{ + IOLog( "IOATAStandardDriver::%s - Subclass must implement\n\r", __FUNCTION__ ); + return false; +} + + +/* + * + * + */ +bool IOATAStandardDriver::startDma( IOATAStandardCommand * ) +{ + IOLog( "IOATAStandardDriver::%s - Subclass must implement\n\r", __FUNCTION__ ); + return false; +} + + +/* + * + * + */ +bool IOATAStandardDriver::stopDma( IOATAStandardCommand *, UInt32 * ) +{ + IOLog( "IOATAStandardDriver::%s - Subclass must implement\n\r", __FUNCTION__ ); + return false; +} + +/* + * + * + */ +bool IOATAStandardDriver::checkDmaActive() +{ + IOLog( "IOATAStandardDriver::%s - Subclass must implement\n\r", __FUNCTION__ ); + return false; +} + +/* + * + * + */ +bool IOATAStandardDriver::resetDma() +{ + return false; +} + +/* + * + * + */ +bool IOATAStandardDriver::getProtocolsSupported( ATAProtocol *forProtocol ) +{ + *(UInt32 *) forProtocol = ( kATAProtocolSetRegs + | kATAProtocolPIO + | kATAProtocolDMA + | kATAProtocolDMAQueued + | kATAProtocolATAPIPIO + | kATAProtocolATAPIDMA ); + + return true; +} + +/* + * + * + */ +ATAReturnCode IOATAStandardDriver::waitForDRQ( UInt32 timeoutmS ) +{ + AbsoluteTime currentTime, endTime; + UInt32 status; + ATAReturnCode rc = kATAReturnBusyError; + + clock_interval_to_deadline( timeoutmS, 1000000, &endTime ); + do + { + status = readATAReg( kATARegStatus ); + if ( (status & kATAPIStatusBSY) == 0 ) + { + if ( (status & kATAStatusERR) != 0 ) + { + rc = kATAReturnStatusError; + break; + } + if ( (status & kATAStatusDRQ) != 0 ) + { + rc = kATAReturnSuccess; + break; + } + } + clock_get_uptime( ¤tTime ); + } + while ( CMP_ABSOLUTETIME( &endTime, ¤tTime ) > 0 ); + + if (rc == kATAReturnBusyError) + kprintf("IOATAStandardDriver::waitForDRQ is going to return kATAReturnBusyError;\n"); + + return rc; +} + + +/* + * + * + */ +bool IOATAStandardDriver::waitForStatus( UInt32 statusBitsOn, UInt32 statusBitsOff, UInt32 timeoutmS ) +{ + AbsoluteTime currentTime, endTime; + UInt32 status; + + clock_interval_to_deadline( timeoutmS, 1000000, &endTime ); + + do + { + status = readATAReg( kATARegStatus ); + + if ( (status & statusBitsOn) == statusBitsOn + && (status & statusBitsOff) == 0 ) + { + return true; + } + + clock_get_uptime( ¤tTime ); + + } while ( CMP_ABSOLUTETIME( &endTime, ¤tTime ) > 0 ); + + return false; +} + +/* + * + * + */ +bool IOATAStandardDriver::waitForAltStatus( UInt32 statusBitsOn, UInt32 statusBitsOff, UInt32 timeoutmS ) +{ + AbsoluteTime currentTime, endTime; + UInt32 status; + + clock_interval_to_deadline( timeoutmS, 1000000, &endTime ); + + do + { + status = readATAReg( kATARegAltStatus ); + + if ( (status & statusBitsOn) == statusBitsOn + && (status & statusBitsOff) == 0 ) + { + return true; + } + + clock_get_uptime( ¤tTime ); + + } while ( CMP_ABSOLUTETIME( &endTime, ¤tTime ) > 0 ); + + return false; +} + +/* + * + * + */ +bool IOATAStandardDriver::start (IOService *provider) +{ + + PMinit(); // initialize superclass variables + provider->joinPMtree(this); // attach into the power management hierarchy + + #define number_of_power_states 2 + + static IOPMPowerState ourPowerStates[number_of_power_states] = { + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable,IOPMPowerOn,IOPMPowerOn,0,0,0,0,0,0,0,0} + }; + + + // register ourselves with ourself as policy-maker + if (pm_vars != NULL) + registerPowerDriver(this, ourPowerStates, number_of_power_states); + + + // We are starting up, so not waking up: + wakingUpFromSleep = false; + + if (!super::start (provider)) + return false; + + return true; +} + +/* + * + * + */ + +IOReturn IOATAStandardDriver::setPowerState(unsigned long powerStateOrdinal, IOService* whatDevice) +{ + // Do not do anything if the state is inavalid. + if (powerStateOrdinal >= 2) + return IOPMNoSuchState; + + if ( powerStateOrdinal == 0 ) + { + kprintf("IOATAStandardDriver would be powered off here\n"); + wakingUpFromSleep = true; + + // Let's pretend we did something: + return IOPMAckImplied; + } + + if ( ( powerStateOrdinal == 1 ) && ( wakingUpFromSleep ) ) + { + wakingUpFromSleep = false; + disableControllerInterrupts(); + reset(); + enableControllerInterrupts(); + return IOPMAckImplied; + } + + return IOPMCannotRaisePower; +} + diff --git a/iokit/Families/IOATAStandard/IOATAStandardDriverDma.cpp b/iokit/Families/IOATAStandard/IOATAStandardDriverDma.cpp new file mode 100644 index 000000000..2ad22be97 --- /dev/null +++ b/iokit/Families/IOATAStandard/IOATAStandardDriverDma.cpp @@ -0,0 +1,381 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAStandardDriverDma.cpp + * + */ +#include + +/*----------------------------------- ATA DMA Protocol ------------------------------*/ + +/* + * + * + */ +void IOATAStandardDriver::doATAProtocolDma( IOATAStandardCommand *cmd ) +{ + ATATaskfile taskfile; + UInt32 regmask; + UInt32 i; + + setCommandLimit( currentDevice, 1 ); + + cmd->getTaskfile( &taskfile ); + + regmask = taskfile.regmask; + + if ( regmask & ATARegtoMask(kATARegDriveHead) ) + { + regmask &= ~ATARegtoMask(kATARegDriveHead); + if ( selectDrive( taskfile.ataRegs[kATARegDriveHead] ) == false ) + { + completeCmd( cmd, kATAReturnBusyError ); + return; + } + } + + programDma( cmd ); + + for ( i = 0; regmask; i++ ) + { + if ( regmask & 1 ) + { + writeATAReg( i, taskfile.ataRegs[i] ); + } + regmask >>= 1; + } + + startDma( cmd ); +} + +/* + * + * + */ +void IOATAStandardDriver::processATADmaInt() +{ + UInt32 status; + UInt32 reqCount; + ATAReturnCode rc = kATAReturnSuccess; + IOATAStandardCommand *ataCmd; + UInt32 xferCount; + + ataCmd = findCommandWithNexus( currentDevice, (UInt32) -1 ); + if ( ataCmd == 0 ) + { + IOLog( "IOATAStandardDriver::processATADmaInt() - ATA Command not found\n\r" ); + return; + } + + if ( waitForStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ) == false ) + { + stopDma( ataCmd, &xferCount ); + completeCmd( ataCmd, kATAReturnBusyError, xferCount ); + return; + } + + status = readATAReg( kATARegStatus ); + + ataCmd->getPointers( 0, &reqCount, 0 ); + + if ( stopDma( ataCmd, &xferCount ) != true ) + { + rc = kATAReturnDMAError; + } + + else if ( status & kATAStatusDRQ ) + { + rc = kATAReturnDMAError; + } + + else if ( status & kATAStatusERR ) + { + rc = kATAReturnStatusError; + } + + else if ( reqCount != xferCount ) + { + rc = kATAReturnProtocolError; + } + + completeCmd( ataCmd, rc, xferCount ); +} + +/*----------------------------------- ATA DMA Queued Protocol ------------------------------*/ + +/* + * + * + */ +void IOATAStandardDriver::doATAProtocolDmaQueued( IOATAStandardCommand *ataCmd ) +{ + ATATaskfile taskfile; + UInt32 regmask; + UInt32 i; + + if ( dmaActive == true ) + { + setCommandLimit( currentDevice, 0 ); + rescheduleCommand( ataCmd ); + return; + } + + setCommandLimit( currentDevice, 31 ); + + ataCmd->getTaskfile( &taskfile ); + + regmask = taskfile.regmask; + + regmask &= ~(ATARegtoMask(kATARegDriveHead) | ATARegtoMask(kATARegCommand)); + + if ( selectDrive( taskfile.ataRegs[kATARegDriveHead] ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError ); + return; + } + + programDma( ataCmd ); + dmaActive = true; + startDma( ataCmd ); + + taskfile.ataRegs[kATARegSectorCount] = taskfile.tag << 3; + + for ( i = 0; regmask; i++ ) + { + if ( regmask & 1 ) + { + writeATAReg( i, taskfile.ataRegs[i] ); + } + regmask >>= 1; + } + + writeATAReg( kATARegCommand, taskfile.ataRegs[kATARegCommand] ); + +#if 1 + IODelay( 1 ); + waitForAltStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ); +#endif +} + +/* + * + * + */ +void IOATAStandardDriver::processATADmaQueuedInt() +{ + UInt32 status; + UInt32 intReason; + UInt32 tag; + UInt32 xferCount; + IOATAStandardCommand *ataCmd; + ATAReturnCode rc = kATAReturnSuccess; + + while ( 1 ) + { + status = readATAReg( kATARegStatus ); + intReason = readATAReg( kATARegSectorCount ); + tag = intReason / kATATagBit; + + ataCmd = findCommandWithNexus( currentDevice, tag ); + + if ( (intReason & kATAPIIntReasonCD) && (intReason & kATAPIIntReasonIO) && (dmaActive == true) ) + { + if ( ataCmd == 0 ) + { + IOLog( "IOATAStandardDriver::processATADmaQueuedInt() - ATA Command not found\n\r" ); + return; + } + + dmaActive = false; + + if ( stopDma( ataCmd, &xferCount ) != true ) + { + rc = kATAReturnDMAError; + } + + else if ( status & kATAStatusERR ) + { + rc = kATAReturnStatusError; + } + + completeCmd( ataCmd, rc, xferCount ); + } + + if ( (status & kATAStatusDRQ) != 0 ) + { + if ( ataCmd == 0 ) + { + IOLog( "IOATAStandardDriver::processATADmaQueuedInt() - ATA Command not found\n\r" ); + return; + } + + programDma( ataCmd ); + dmaActive = true; + startDma( ataCmd ); + break; + } + + if ( status & kATAStatusSERV ) + { + resetDma(); + + writeATAReg( kATARegCommand, kATACommandService ); + + if ( waitForAltStatus( 0, kATAStatusBSY, 500 ) == false ) + { + return; + } + continue; + } + + if ( dmaActive == false ) + { + setCommandLimit( currentDevice, 31 ); + } + break; + } + +} + +/*----------------------------------- ATAPI DMA Protocols ------------------------------*/ + +/* + * + * + * + */ +void IOATAStandardDriver::doATAPIProtocolDma( IOATAStandardCommand *ataCmd ) +{ + ATATaskfile taskfile; + ATACDBInfo atapiCmd; + ATAReturnCode rc; + UInt32 regmask; + UInt32 i; + + setCommandLimit( currentDevice, 1 ); + + ataCmd->getTaskfile( &taskfile ); + ataCmd->getCDB( &atapiCmd ); + + regmask = taskfile.regmask; + + if ( regmask & ATARegtoMask(kATARegDriveHead) ) + { + regmask &= ~ATARegtoMask(kATARegDriveHead); + if ( selectDrive( taskfile.ataRegs[kATARegDriveHead] ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError); + return; + } + } + + for ( i = 0; regmask; i++ ) + { + if ( regmask & 1 ) + { + writeATAReg( i, taskfile.ataRegs[i] ); + } + regmask >>= 1; + } + + programDma( ataCmd ); + + if ( ataCmd->getDevice(kIOATAStandardDevice)->getATAPIPktInt() == false ) + { + rc = sendATAPIPacket( ataCmd ); + + if ( rc != kATAReturnSuccess ) + { + completeCmd( ataCmd, rc ); + return; + } + + startDma( ataCmd ); + } +} + + +/* + * + * + */ +void IOATAStandardDriver::processATAPIDmaInt() +{ + IOATAStandardCommand *ataCmd; + ATAReturnCode rc = kATAReturnProtocolError; + UInt32 status; + UInt32 intReason; + UInt32 xferCount; + + ataCmd = findCommandWithNexus( currentDevice, (UInt32) -1 ); + if ( ataCmd == 0 ) + { + IOLog( "IOATAStandardDriver::processATAPIDmaInt() - ATA Command not found\n\r" ); + return; + } + + if ( waitForStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError, 0 ); + return; + } + + status = readATAReg( kATARegATAPIStatus ); + intReason = readATAReg( kATARegATAPIIntReason ); + + if ( (status & kATAPIStatusDRQ) && (intReason & kATAPIIntReasonCD) && !(intReason & kATAPIIntReasonIO) ) + { + rc = sendATAPIPacket( ataCmd ); + if ( rc != kATAReturnSuccess ) + { + completeCmd( ataCmd, rc ); + } + + else if ( startDma( ataCmd ) != true ) + { + rc = kATAReturnDMAError; + completeCmd( ataCmd, rc ); + } + } + + else if ( !(status & kATAPIStatusDRQ) && (intReason & kATAPIIntReasonCD) && (intReason & kATAPIIntReasonIO) ) + { + if ( stopDma( ataCmd, &xferCount ) != true ) + { + rc = kATAReturnDMAError; + xferCount = 0; + } + else + { + rc = (status & kATAPIStatusCHK) ? kATAReturnStatusError : kATAReturnSuccess; + } + + completeCmd( ataCmd, rc, xferCount ); + } + else + { + stopDma( ataCmd, &xferCount ); + completeCmd( ataCmd, rc, 0 ); + } +} + diff --git a/iokit/Families/IOATAStandard/IOATAStandardDriverPio.cpp b/iokit/Families/IOATAStandard/IOATAStandardDriverPio.cpp new file mode 100644 index 000000000..d1b4c4c30 --- /dev/null +++ b/iokit/Families/IOATAStandard/IOATAStandardDriverPio.cpp @@ -0,0 +1,501 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAStandardDriverPio.cpp + * + */ +#include + +/*----------------------------------- ATA SetRegs Protocol ------------------------------*/ + +/* + * + * + */ +void IOATAStandardDriver::doProtocolSetRegs( IOATAStandardCommand *ataCmd ) +{ + ATATaskfile taskfile; + UInt32 regmask; + UInt32 i; + + setCommandLimit( currentDevice, 1 ); + + ataCmd->getTaskfile( &taskfile ); + + regmask = taskfile.regmask; + + if ( regmask & ATARegtoMask(kATARegDriveHead) ) + { + regmask &= ~ATARegtoMask(kATARegDriveHead); + if ( selectDrive( taskfile.ataRegs[kATARegDriveHead] ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError ); + return; + } + } + + for ( i = 0; regmask; i++ ) + { + if ( regmask & 1 ) + { + writeATAReg( i, taskfile.ataRegs[i] ); + } + regmask >>= 1; + } + + IODelay( 100 ); + + completeCmd( ataCmd, kATAReturnSuccess ); +} + +/*----------------------------------- ATA PIO Protocol ------------------------------*/ + +/* + * + * + */ +void IOATAStandardDriver::doATAProtocolPio( IOATAStandardCommand *ataCmd ) +{ + ATATaskfile taskfile; + ATAReturnCode rc; + UInt32 regmask; + UInt32 i; + + setCommandLimit( currentDevice, 1 ); + + ataCmd->getTaskfile( &taskfile ); + + regmask = taskfile.regmask; + + if ( regmask & ATARegtoMask(kATARegDriveHead) ) + { + regmask &= ~ATARegtoMask(kATARegDriveHead); + if ( selectDrive( taskfile.ataRegs[kATARegDriveHead] ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError ); + return; + } + } + + xferCount = 0; + ataCmd->getPointers( &xferDesc, &xferRemaining, &xferIsWrite ); + + for ( i = 0; regmask; i++ ) + { + if ( regmask & 1 ) + { + writeATAReg( i, taskfile.ataRegs[i] ); + } + regmask >>= 1; + } + + if ( xferIsWrite ) + { + rc = waitForDRQ( kATADRQTimeoutmS ); + if ( rc != kATAReturnSuccess ) + { + completeCmd( ataCmd, rc ); + return; + } + interruptOccurred(); + } +} + + +/* + * + * + */ +void IOATAStandardDriver::processATAPioInt() +{ + IOATAStandardCommand *ataCmd; + UInt16 tmpBuffer[256]; + UInt32 status; + UInt32 i; + ATAReturnCode rc = kATAReturnSuccess; + + ataCmd = findCommandWithNexus( currentDevice, (UInt32) -1 ); + if ( ataCmd == 0 ) + { + IOLog( "IOATAStandardDriver::processATAPioInt() - ATA Command not found\n\r" ); + return; + } + + if ( waitForStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError, xferCount ); + return; + } + + status = readATAReg( kATARegStatus ); + + if ( (status & kATAStatusDRQ) && (xferRemaining != 0) ) + { + if ( xferIsWrite == true ) + { + xferDesc->readBytes( xferCount, tmpBuffer, 512 ); + + for ( i=0; i < 256; i++ ) + { + writeATAReg( kATARegData, tmpBuffer[i] ); + } + } + else + { + for ( i=0; i < 256; i++ ) + { + tmpBuffer[i] = readATAReg( kATARegData ); + } + xferDesc->writeBytes( xferCount, tmpBuffer, 512 ); + } + + xferCount += 512; + xferRemaining -= 512; + } + + if ( status & kATAStatusERR ) + { + completeCmd( ataCmd, kATAReturnStatusError, xferCount ); + } + else if ( !xferRemaining ) + { + completeCmd( ataCmd, rc, xferCount ); + } +} +/*----------------------------------- ATA Reset Protocol ------------------------------*/ + +/* + * + * + * + */ +void IOATAStandardDriver::doATAReset( IOATAStandardCommand *ataCmd ) +{ + + if ( resetCmd != 0 ) + { + completeCmd( ataCmd, kATAReturnNoResource ); + return; + } + + if ( resetPollEvent == 0 ) + { + resetPollEvent = IOTimerEventSource::timerEventSource( this, + (IOTimerEventSource::Action) &IOATAStandardDriver::checkATAResetComplete); + + if ( (resetPollEvent == 0) || (getWorkLoop()->addEventSource( resetPollEvent ) != kIOReturnSuccess) ) + { + completeCmd( ataCmd, kATAReturnNoResource ); + return; + } + } + + resetCmd = ataCmd; + + clock_interval_to_deadline( resetCmd->getTimeout(), 1000000, &resetTimeout ); + + writeATAReg( kATARegDeviceControl, kATADevControlnIEN | kATADevControlSRST ); + IODelay( 25 ); + writeATAReg( kATARegDeviceControl, 0 ); + + IOSleep(5); + + checkATAResetComplete(); + + return; +} + +/* + * + * + * + */ +void IOATAStandardDriver::checkATAResetComplete() +{ + UInt32 status; + IOATAStandardCommand *ataCmd; + AbsoluteTime currentTime; + ATAReturnCode rc = kATAReturnSuccess; + + do + { + status = readATAReg( kATARegStatus ); + + if ( (status & kATAStatusBSY) == 0 ) + { + break; + } + + clock_get_uptime( ¤tTime ); + if ( CMP_ABSOLUTETIME( ¤tTime, &resetTimeout ) > 0 ) + { + rc = kATAReturnBusyError; + break; + } + + resetPollEvent->setTimeoutMS(kATAResetPollIntervalmS); + return; + + } while ( 0 ); + + ataCmd = resetCmd; + resetCmd = 0; + + if ( ataCmd->getCmdType() != kATACommandBusReset ) + { + resetOccurred(); + } + + completeCmd( ataCmd, rc ); +} + + +/*----------------------------------- ATAPI PIO Protocols ------------------------------*/ + +/* + * + * + * + */ +void IOATAStandardDriver::doATAPIProtocolPio( IOATAStandardCommand *ataCmd ) +{ + ATATaskfile taskfile; + ATACDBInfo atapiCmd; + ATAReturnCode rc; + UInt32 regmask; + UInt32 i; + + setCommandLimit( currentDevice, 1 ); + + xferCount = 0; + + ataCmd->getTaskfile( &taskfile ); + ataCmd->getCDB( &atapiCmd ); + + regmask = taskfile.regmask; + + if ( regmask & ATARegtoMask(kATARegDriveHead) ) + { + regmask &= ~ATARegtoMask(kATARegDriveHead); + if ( selectDrive( taskfile.ataRegs[kATARegDriveHead] ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError ); + return; + } + } + + for ( i = 0; regmask; i++ ) + { + if ( regmask & 1 ) + { + writeATAReg( i, taskfile.ataRegs[i] ); + } + regmask >>= 1; + } + + xferCount = 0; + ataCmd->getPointers( &xferDesc, &xferRemaining, &xferIsWrite ); + + if ( ataCmd->getDevice(kIOATAStandardDevice)->getATAPIPktInt() == false ) + { + rc = sendATAPIPacket( ataCmd ); + + if ( rc != kATAReturnSuccess ) + { + completeCmd( ataCmd, rc ); + return; + } + } +} + +/* + * + * + */ +void IOATAStandardDriver::processATAPIPioInt() +{ + IOATAStandardCommand *ataCmd; + ATAReturnCode rc = kATAReturnProtocolError; + UInt32 status; + UInt32 intReason; + UInt32 n; + + ataCmd = findCommandWithNexus( currentDevice, (UInt32) -1 ); + if ( ataCmd == 0 ) + { + IOLog( "IOATAStandardDriver::processATAPIPioInt() - ATA Command not found\n\r" ); + return; + } + + if ( waitForStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ) == false ) + { + completeCmd( ataCmd, kATAReturnBusyError, xferCount ); + return; + } + + status = readATAReg( kATARegATAPIStatus ); + intReason = readATAReg( kATARegATAPIIntReason ); + + if ( status & kATAPIStatusDRQ ) + { + if ( intReason & kATAPIIntReasonCD ) + { + if ( !(intReason & kATAPIIntReasonIO) ) + { + rc = sendATAPIPacket( ataCmd ); + } + } + else + { + n = readATAReg( kATARegATAPIByteCountLow ) | (readATAReg( kATARegATAPIByteCountHigh ) << 8); + n = (n+1) & ~0x01; + + if ( !(intReason & kATAPIIntReasonIO) && (xferIsWrite == true) ) + { + rc = writeATAPIDevice( n ); + } + else if ( (intReason & kATAPIIntReasonIO) && (xferIsWrite == false) ) + { + rc = readATAPIDevice( n ); + } + } + } + else if ( (intReason & kATAPIIntReasonCD) && (intReason & kATAPIIntReasonIO) ) + { + rc = (status & kATAPIStatusCHK) ? kATAReturnStatusError : kATAReturnSuccess; + completeCmd( ataCmd, rc, xferCount ); + } +} + +/* + * + * + */ +ATAReturnCode IOATAStandardDriver::sendATAPIPacket( IOATAStandardCommand *ataCmd ) +{ + UInt32 i; + ATACDBInfo atapiCmd; + UInt16 *pCDB; + ATAReturnCode rc; + + ataCmd->getCDB( &atapiCmd ); + + rc = waitForDRQ( kATADRQTimeoutmS ); + if ( rc != kATAReturnSuccess ) return rc; + + pCDB = (UInt16 *)atapiCmd.cdb; + for ( i=0; i < atapiCmd.cdbLength >> 1; i++ ) + { + writeATAReg( kATARegData, *pCDB++ ); + } + + return rc; +} + + +/* + * + * + */ +ATAReturnCode IOATAStandardDriver::readATAPIDevice( UInt32 n ) +{ + UInt16 tmpBuffer[256]; + UInt32 i,j,k; + + while ( n ) + { + j = (n < 512) ? n : 512; + + j >>= 1; + for ( i=0; i < j; i++ ) + { + tmpBuffer[i] = readATAReg( kATARegData ); + } + j <<= 1; + n -= j; + + k = (j > xferRemaining ) ? xferRemaining : j; + + xferDesc->writeBytes( xferCount, tmpBuffer, k ); + + xferCount += k; + xferRemaining -= k; + } + + return kATAReturnSuccess; +} + +/* + * + * + */ +ATAReturnCode IOATAStandardDriver::writeATAPIDevice( UInt32 n ) +{ + UInt16 tmpBuffer[256]; + UInt32 i,j,k; + + + while ( n ) + { + j = (n < 512) ? n : 512; + + k = (j > xferRemaining ) ? xferRemaining : j; + + xferDesc->readBytes( xferCount, tmpBuffer, k ); + + j >>= 1; + for ( i=0; i < j; i++ ) + { + writeATAReg( kATARegData, tmpBuffer[i] ); + } + j <<= 1; + n -= j; + + xferCount += k; + xferRemaining -= k; + } + + return kATAReturnSuccess; +} + + +/* + * + * + */ +bool IOATAStandardDriver::selectDrive( UInt32 driveHeadReg ) +{ + if ( waitForAltStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ) == false ) + { + return false; + } + + writeATAReg( kATARegDriveHead, driveHeadReg ); + + if ( waitForAltStatus( 0, kATAStatusBSY, kATABusyTimeoutmS ) == false ) + { + return false; + } + + return true; +} diff --git a/iokit/Families/IOBlockStorage/IOBlockStorageDevice.cpp b/iokit/Families/IOBlockStorage/IOBlockStorageDevice.cpp new file mode 100644 index 000000000..abf724844 --- /dev/null +++ b/iokit/Families/IOBlockStorage/IOBlockStorageDevice.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +#define super IOService +OSDefineMetaClass(IOBlockStorageDevice,IOService) +OSDefineAbstractStructors(IOBlockStorageDevice,IOService) + +bool +IOBlockStorageDevice::init(OSDictionary * properties) +{ + bool result; + + result = super::init(properties); + if (result) { + result = setProperty(kIOBlockStorageDeviceTypeKey, + kIOBlockStorageDeviceTypeGeneric); + } + + return(result); +} + +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 0); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 1); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 2); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 3); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 4); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 5); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 6); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 7); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 8); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 9); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 10); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 11); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 12); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 13); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 14); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 15); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 16); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 17); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 18); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 19); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 20); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 21); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 22); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 23); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 24); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 25); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 26); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 27); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 28); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 29); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 30); +OSMetaClassDefineReservedUnused(IOBlockStorageDevice, 31); diff --git a/iokit/Families/IOCDBlockStorage/IOCDBlockStorageDevice.cpp b/iokit/Families/IOCDBlockStorage/IOCDBlockStorageDevice.cpp new file mode 100644 index 000000000..e3bd887ed --- /dev/null +++ b/iokit/Families/IOCDBlockStorage/IOCDBlockStorageDevice.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +#define super IOBlockStorageDevice +OSDefineMetaClass(IOCDBlockStorageDevice,IOBlockStorageDevice) +OSDefineAbstractStructors(IOCDBlockStorageDevice,IOBlockStorageDevice) + +bool +IOCDBlockStorageDevice::init(OSDictionary * properties) +{ + bool result; + + result = super::init(properties); + if (result) { + setProperty(kIOBlockStorageDeviceTypeKey, + kIOBlockStorageDeviceTypeCDROM); + } + + return(result); +} + +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 0); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 1); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 2); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 3); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 4); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 5); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 6); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 7); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 8); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 9); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 10); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 11); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 12); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 13); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 14); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDevice, 15); diff --git a/iokit/Families/IOCDStorage/IOCDAudioControl.cpp b/iokit/Families/IOCDStorage/IOCDAudioControl.cpp new file mode 100644 index 000000000..7c1636144 --- /dev/null +++ b/iokit/Families/IOCDStorage/IOCDAudioControl.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#define super IOService +OSDefineMetaClassAndStructors(IOCDAudioControl, IOService) + +IOCDBlockStorageDriver * +IOCDAudioControl::getProvider() const +{ + return (IOCDBlockStorageDriver *) IOService::getProvider(); +} + +IOReturn +IOCDAudioControl::getStatus(CDAudioStatus *status) +{ + return(getProvider()->getAudioStatus(status)); +} + +CDTOC * +IOCDAudioControl::getTOC(void) +{ + return(getProvider()->getTOC()); +} + +IOReturn +IOCDAudioControl::getVolume(UInt8 *left,UInt8 *right) +{ + return(getProvider()->getAudioVolume(left,right)); +} + +IOReturn +IOCDAudioControl::newUserClient(task_t task, + void * /* security */, + UInt32 /* type */, + IOUserClient ** object ) + +{ + IOReturn err = kIOReturnSuccess; + IOCDAudioControlUserClient * client; + + client = IOCDAudioControlUserClient::withTask(task); + + if( !client || (false == client->attach( this )) || + (false == client->start( this )) ) { + if(client) { + client->detach( this ); + client->release(); + } + err = kIOReturnNoMemory; + } + + *object = client; + return( err ); +} + +IOReturn +IOCDAudioControl::pause(bool pause) +{ + return(getProvider()->audioPause(pause)); +} + +IOReturn +IOCDAudioControl::play(CDMSF timeStart,CDMSF timeStop) +{ + return(getProvider()->audioPlay(timeStart,timeStop)); +} + +IOReturn +IOCDAudioControl::scan(CDMSF timeStart,bool reverse) +{ + return(getProvider()->audioScan(timeStart,reverse)); +} + +IOReturn +IOCDAudioControl::stop() +{ + return(getProvider()->audioStop()); +} + +IOReturn +IOCDAudioControl::setVolume(UInt8 left,UInt8 right) +{ + return(getProvider()->setAudioVolume(left,right)); +} + +OSMetaClassDefineReservedUnused(IOCDAudioControl, 0); +OSMetaClassDefineReservedUnused(IOCDAudioControl, 1); +OSMetaClassDefineReservedUnused(IOCDAudioControl, 2); +OSMetaClassDefineReservedUnused(IOCDAudioControl, 3); +OSMetaClassDefineReservedUnused(IOCDAudioControl, 4); +OSMetaClassDefineReservedUnused(IOCDAudioControl, 5); +OSMetaClassDefineReservedUnused(IOCDAudioControl, 6); +OSMetaClassDefineReservedUnused(IOCDAudioControl, 7); diff --git a/iokit/Families/IOCDStorage/IOCDAudioControlUserClient.cpp b/iokit/Families/IOCDStorage/IOCDAudioControlUserClient.cpp new file mode 100644 index 000000000..8dfb84946 --- /dev/null +++ b/iokit/Families/IOCDStorage/IOCDAudioControlUserClient.cpp @@ -0,0 +1,324 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#define super IOUserClient +OSDefineMetaClassAndStructors(IOCDAudioControlUserClient, IOUserClient) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOCDAudioControl * IOCDAudioControlUserClient::getProvider() const +{ + // + // Obtain this object's provider. We override the superclass's method + // to return a more specific subclass of IOService -- IOCDAudioControl. + // This method serves simply as a convenience to subclass developers. + // + + return (IOCDAudioControl *) IOService::getProvider(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOCDAudioControlUserClient * IOCDAudioControlUserClient::withTask(task_t) +{ + // + // Create a new IOCDAudioControlUserClient. + // + + IOCDAudioControlUserClient * me = new IOCDAudioControlUserClient; + + if ( me && me->init() == false ) + { + me->release(); + me = 0; + } + + return me; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOCDAudioControlUserClient::start(IOService * provider) +{ + // + // Prepare the user client for usage. + // + + // State our assumptions. + + assert(OSDynamicCast(IOCDAudioControl, provider)); + + // Ask our superclass' opinion. + + if ( super::start(provider) == false ) return false; + + // Open our provider. + + if ( provider->open(this) == false ) return false; + + // Prepare our method dispatch table. + + _methods[kIOCDAudioControlMethodGetStatus].func = (IOMethod) &IOCDAudioControlUserClient::getStatus; + _methods[kIOCDAudioControlMethodGetStatus].flags = kIOUCScalarIStructO; + _methods[kIOCDAudioControlMethodGetStatus].count0 = 0; + _methods[kIOCDAudioControlMethodGetStatus].count1 = sizeof(CDAudioStatus); + _methods[kIOCDAudioControlMethodGetStatus].object = this; + + _methods[kIOCDAudioControlMethodGetTOC ].func = (IOMethod) &IOCDAudioControlUserClient::getTOC; + _methods[kIOCDAudioControlMethodGetTOC ].flags = kIOUCScalarIStructO; + _methods[kIOCDAudioControlMethodGetTOC ].count0 = 0; + _methods[kIOCDAudioControlMethodGetTOC ].count1 = 0xFFFFFFFF; + _methods[kIOCDAudioControlMethodGetTOC ].object = this; + + _methods[kIOCDAudioControlMethodGetVolume].func = (IOMethod) &IOCDAudioControlUserClient::getVolume; + _methods[kIOCDAudioControlMethodGetVolume].flags = kIOUCScalarIScalarO; + _methods[kIOCDAudioControlMethodGetVolume].count0 = 0; + _methods[kIOCDAudioControlMethodGetVolume].count1 = 2; + _methods[kIOCDAudioControlMethodGetVolume].object = this; + + _methods[kIOCDAudioControlMethodSetVolume].func = (IOMethod) &IOCDAudioControlUserClient::setVolume; + _methods[kIOCDAudioControlMethodSetVolume].flags = kIOUCScalarIScalarO; + _methods[kIOCDAudioControlMethodSetVolume].count0 = 2; + _methods[kIOCDAudioControlMethodSetVolume].count1 = 0; + _methods[kIOCDAudioControlMethodSetVolume].object = this; + + _methods[kIOCDAudioControlMethodPause ].func = (IOMethod) &IOCDAudioControlUserClient::pause; + _methods[kIOCDAudioControlMethodPause ].flags = kIOUCScalarIScalarO; + _methods[kIOCDAudioControlMethodPause ].count0 = 1; + _methods[kIOCDAudioControlMethodPause ].count1 = 0; + _methods[kIOCDAudioControlMethodPause ].object = this; + + _methods[kIOCDAudioControlMethodPlay ].func = (IOMethod) &IOCDAudioControlUserClient::play; + _methods[kIOCDAudioControlMethodPlay ].flags = kIOUCScalarIScalarO; + _methods[kIOCDAudioControlMethodPlay ].count0 = 2; + _methods[kIOCDAudioControlMethodPlay ].count1 = 0; + _methods[kIOCDAudioControlMethodPlay ].object = this; + + _methods[kIOCDAudioControlMethodScan ].func = (IOMethod) &IOCDAudioControlUserClient::scan; + _methods[kIOCDAudioControlMethodScan ].flags = kIOUCScalarIScalarO; + _methods[kIOCDAudioControlMethodScan ].count0 = 2; + _methods[kIOCDAudioControlMethodScan ].count1 = 0; + _methods[kIOCDAudioControlMethodScan ].object = this; + + _methods[kIOCDAudioControlMethodStop ].func = (IOMethod) &IOCDAudioControlUserClient::stop; + _methods[kIOCDAudioControlMethodStop ].flags = 0; + _methods[kIOCDAudioControlMethodStop ].count0 = 0; + _methods[kIOCDAudioControlMethodStop ].count1 = 0; + _methods[kIOCDAudioControlMethodStop ].object = this; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::clientClose() +{ + // + // Relinquish the user client. + // + + IOCDAudioControl * provider = getProvider(); + + if ( provider && provider->isOpen(this) ) + { + provider->close(this); + detach(provider); + } + + return kIOReturnSuccess; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOExternalMethod * IOCDAudioControlUserClient::getExternalMethodForIndex( + UInt32 index ) +{ + // + // Obtain the method definition given a method index. + // + + if (index >= kIOCDAudioControlMethodCount) return 0; + + return _methods + index; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::getStatus( CDAudioStatus * status, + UInt32 * statusSize ) +{ + // + // Get the current audio play status information. + // + + if ( *statusSize != sizeof(CDAudioStatus) ) return kIOReturnBadArgument; + + return getProvider()->getStatus(status); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::getTOC(CDTOC * toc, UInt32 * tocMaxSize) +{ + // + // Get the full Table Of Contents. + // + + CDTOC * original = getProvider()->getTOC(); + + if (original == 0) return kIOReturnBadMedia; + + *tocMaxSize = min(original->length + sizeof(UInt16), *tocMaxSize); + bcopy(original, toc, *tocMaxSize); + + return kIOReturnSuccess; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::getVolume(UInt32 * left, UInt32 * right) +{ + // + // Get the current audio volume. + // + + IOReturn status; + + if ( ((*left) & ~0xFF) || ((*right) & ~0xFF) ) return kIOReturnBadArgument; + + status = getProvider()->getVolume((UInt8 *) left, (UInt8 *) right); + + *left = *((UInt8 *) left ); + *right = *((UInt8 *) right); + + return status; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::setVolume(UInt32 left, UInt32 right) +{ + // + // Set the current audio volume. + // + + if ( (left & ~0xFF) || (right & ~0xFF) ) return kIOReturnBadArgument; + + return getProvider()->setVolume((UInt8) left, (UInt8) right); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::pause(UInt32 pause) +{ + // + // Pause or resume the audio playback. + // + + return getProvider()->pause((bool) pause); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::play(UInt32 msfStart, UInt32 msfStop) +{ + // + // Play audio. + // + + CDMSF timeStart; + CDMSF timeStop; + + timeStart.minute = (msfStart >> 16) & 0xFF; + timeStart.second = (msfStart >> 8) & 0xFF; + timeStart.frame = (msfStart >> 0) & 0xFF; + + timeStop.minute = (msfStop >> 16) & 0xFF; + timeStop.second = (msfStop >> 8) & 0xFF; + timeStop.frame = (msfStop >> 0) & 0xFF; + + return getProvider()->play(timeStart, timeStop); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::scan(UInt32 msfStart, UInt32 reverse) +{ + // + // Perform a fast-forward or fast-backward operation. + // + + CDMSF timeStart; + + timeStart.minute = (msfStart >> 16) & 0xFF; + timeStart.second = (msfStart >> 8) & 0xFF; + timeStart.frame = (msfStart >> 0) & 0xFF; + + return getProvider()->scan(timeStart, reverse ? true : false); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDAudioControlUserClient::stop() +{ + // + // Stop the audio playback (or audio scan). + // + + return getProvider()->stop(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDAudioControlUserClient, 7); diff --git a/iokit/Families/IOCDStorage/IOCDBlockStorageDriver.cpp b/iokit/Families/IOCDStorage/IOCDBlockStorageDriver.cpp new file mode 100644 index 000000000..1e15e5ff1 --- /dev/null +++ b/iokit/Families/IOCDStorage/IOCDBlockStorageDriver.cpp @@ -0,0 +1,607 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include + + +// Hack for Cheetah to prevent sleep if there's disk activity. +static IOService * gIORootPowerDomain = NULL; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define super IOBlockStorageDriver +OSDefineMetaClassAndStructors(IOCDBlockStorageDriver,IOBlockStorageDriver) + +IOCDBlockStorageDevice * +IOCDBlockStorageDriver::getProvider() const +{ + return (IOCDBlockStorageDevice *) IOService::getProvider(); +} + + +/* Accept a new piece of media, doing whatever's necessary to make it + * show up properly to the system. The arbitration lock is assumed to + * be held during the call. + */ +IOReturn +IOCDBlockStorageDriver::acceptNewMedia(void) +{ + IOReturn result; + bool ok; + int i; + UInt64 nblocks; + int nentries; + int nDataTracks; + int nAudioTracks; + char name[128]; + bool nameSep; + + /* First, we cache information about the tracks on the disc: */ + + result = cacheTocInfo(); + if (result != kIOReturnSuccess) { + assert(_toc == NULL); + } + + /* Scan thru the track list, counting up the number of Data and Audio tracks. */ + + nDataTracks = 0; + nAudioTracks = 0; + nblocks = 0; + + if (_toc) { + nentries = (_toc->length - sizeof(UInt16)) / sizeof(CDTOCDescriptor); + + for (i = 0; i < nentries; i++) { + /* tracks 1-99, not leadout or skip intervals */ + if (_toc->descriptors[i].point <= 99 && _toc->descriptors[i].adr == 1) { + if ((_toc->descriptors[i].control & 0x04)) { + /* it's a data track */ + nDataTracks++; + } else { + nAudioTracks++; + } + /* leadout */ + } else if (_toc->descriptors[i].point == 0xA2 && _toc->descriptors[i].adr == 1) { + if (nblocks < CDConvertMSFToLBA(_toc->descriptors[i].p)) { + nblocks = CDConvertMSFToLBA(_toc->descriptors[i].p); + } + } + } + + if (nblocks < _maxBlockNumber + 1) { + nblocks = _maxBlockNumber + 1; + } + } else if (_maxBlockNumber) { + nblocks = _maxBlockNumber + 1; + } + + /* Instantiate a CD Media nub above ourselves. */ + + name[0] = 0; + nameSep = false; + if (getProvider()->getVendorString()) { + strcat(name, getProvider()->getVendorString()); + nameSep = true; + } + if (getProvider()->getProductString()) { + if (nameSep == true) strcat(name, " "); + strcat(name, getProvider()->getProductString()); + nameSep = true; + } + if (nameSep == true) strcat(name, " "); + strcat(name, "Media"); + + _mediaObject = instantiateMediaObject(0,nblocks*kBlockSizeCD,kBlockSizeCD,name); + result = (_mediaObject) ? kIOReturnSuccess : kIOReturnBadArgument; + + if (result == kIOReturnSuccess) { + ok = _mediaObject->attach(this); + } else { + IOLog("%s[IOCDBlockStorageDriver]::acceptNewMedia; can't instantiate CD media nub.\n",getName()); + return(result); /* give up now */ + } + if (!ok) { + IOLog("%s[IOCDBlockStorageDriver]::acceptNewMedia; can't attach CD media nub.\n",getName()); + _mediaObject->release(); + _mediaObject = NULL; + return(kIOReturnNoMemory); /* give up now */ + } + + /* Instantiate an audio control nub for the audio portion of the media. */ + + if (nAudioTracks) { + _acNub = new IOCDAudioControl; + if (_acNub) { + _acNub->init(); + ok = _acNub->attach(this); + if (!ok) { + IOLog("%s[IOCDBlockStorageDriver]::acceptNewMedia; can't attach audio control nub.\n",getName()); + _acNub->release(); + _acNub = NULL; + } + } else { + IOLog("%s[IOCDBlockStorageDriver]::acceptNewMedia; can't instantiate audio control nub.\n", + getName()); + } + } + + /* Now that the nubs are attached, register them. */ + + _mediaPresent = true; + if (_toc) { + _mediaObject->setProperty(kIOCDMediaTOCKey,(void*)_toc,_tocSize); + } + _mediaObject->registerService(); + + if (_acNub) { + _acNub->registerService(); + } + + return(result); +} + +IOReturn +IOCDBlockStorageDriver::audioPause(bool pause) +{ + return(getProvider()->audioPause(pause)); +} + +IOReturn +IOCDBlockStorageDriver::audioPlay(CDMSF timeStart,CDMSF timeStop) +{ + return(getProvider()->audioPlay(timeStart,timeStop)); +} + +IOReturn +IOCDBlockStorageDriver::audioScan(CDMSF timeStart,bool reverse) +{ + return(getProvider()->audioScan(timeStart,reverse)); +} + +IOReturn +IOCDBlockStorageDriver::audioStop() +{ + return(getProvider()->audioStop()); +} + +IOReturn +IOCDBlockStorageDriver::cacheTocInfo(void) +{ + IOBufferMemoryDescriptor *buffer = NULL; + IOReturn result; + CDTOC *toc; + UInt16 tocSize; + + assert(sizeof(CDTOC) == 4); /* (compiler/platform check) */ + assert(sizeof(CDTOCDescriptor) == 11); /* (compiler/platform check) */ + + assert(_toc == NULL); + + /* Read the TOC header: */ + + buffer = IOBufferMemoryDescriptor::withCapacity(sizeof(CDTOC),kIODirectionIn); + if (buffer == NULL) { + return(kIOReturnNoMemory); + } + + result = getProvider()->readTOC(buffer); + if (result != kIOReturnSuccess) { + buffer->release(); + return(result); + } + + toc = (CDTOC *) buffer->getBytesNoCopy(); + tocSize = OSSwapBigToHostInt16(toc->length) + sizeof(UInt16); + + buffer->release(); + + /* Read the TOC in full: */ + + buffer = IOBufferMemoryDescriptor::withCapacity(tocSize,kIODirectionIn); + if (buffer == NULL) { + return(kIOReturnNoMemory); + } + + result = getProvider()->readTOC(buffer); + if (result != kIOReturnSuccess) { + buffer->release(); + return(result); + } + + toc = (CDTOC *) IOMalloc(tocSize); + if (toc == NULL) { + buffer->release(); + return(kIOReturnNoMemory); + } + + if (buffer->readBytes(0,toc,tocSize) != tocSize) { + buffer->release(); + IOFree(toc,tocSize); + return(kIOReturnNoMemory); + } + + _toc = toc; + _tocSize = tocSize; + + buffer->release(); + + /* Convert big-endian values in TOC to host-endianess: */ + + if (_tocSize >= sizeof(UInt16)) { + _toc->length = OSSwapBigToHostInt16(_toc->length); + } + + return(result); +} + +/* Decommission all nubs. The arbitration lock is assumed to + * be held during the call. + */ +IOReturn +IOCDBlockStorageDriver::decommissionMedia(bool forcible) +{ + IOReturn result; + + if (_mediaObject) { + /* If this is a forcible decommission (i.e. media is gone), we don't + * care whether the teardown worked; we forget about the media. + */ + if (_mediaObject->terminate(forcible ? kIOServiceRequired : 0) || forcible) { + _mediaObject->release(); + _mediaObject = 0; + + initMediaState(); /* clear all knowledge of the media */ + result = kIOReturnSuccess; + + } else { + result = kIOReturnBusy; + } + } else { + result = kIOReturnNoMedia; + } + + /* We only attempt to decommission the audio portion of the + * CD if all the data tracks decommissioned successfully. + */ + + if (result == kIOReturnSuccess) { + if (_acNub) { + _acNub->terminate(kIOServiceRequired); + _acNub->release(); + _acNub = 0; + } + if (_toc) { + IOFree(_toc,_tocSize); + _toc = NULL; + _tocSize = 0; + } + } + + return(result); +} + +/* We should check with other clients using the other nubs before we allow + * the client of the IOCDMedia to eject the media. + */ +IOReturn +IOCDBlockStorageDriver::ejectMedia(void) +{ + /* For now, we don't check with the other clients. */ + + return(super::ejectMedia()); +} + +void +IOCDBlockStorageDriver::executeRequest(UInt64 byteStart, + IOMemoryDescriptor *buffer, + IOStorageCompletion completion, + IOBlockStorageDriver::Context *context) +{ + UInt32 block; + UInt32 nblks; + IOReturn result; + + if (!_mediaPresent) { /* no media? you lose */ + complete(completion, kIOReturnNoMedia,0); + return; + } + + /* We know that we are never called with a request too large, + * nor one that is misaligned with a block. + */ + assert((byteStart % context->block.size) == 0); + assert((buffer->getLength() % context->block.size) == 0); + + block = byteStart / context->block.size; + nblks = buffer->getLength() / context->block.size; + +/* Now the protocol-specific provider implements the actual + * start of the data transfer: */ + + // Tickle the root power domain to reset the sleep countdown. + if (gIORootPowerDomain) { + gIORootPowerDomain->activityTickle(kIOPMSubclassPolicy); + } + + if (context->block.type == kBlockTypeCD) { + result = getProvider()->doAsyncReadCD(buffer,block,nblks, + (CDSectorArea)context->block.typeSub[0], + (CDSectorType)context->block.typeSub[1], + completion); + } else { + result = getProvider()->doAsyncReadWrite(buffer,block,nblks,completion); + } + + if (result != kIOReturnSuccess) { /* it failed to start */ + complete(completion,result); + return; + } +} + +IOReturn +IOCDBlockStorageDriver::getAudioStatus(CDAudioStatus *status) +{ + return(getProvider()->getAudioStatus(status)); +} + +IOReturn +IOCDBlockStorageDriver::getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume) +{ + return(getProvider()->getAudioVolume(leftVolume,rightVolume)); +} + +const char * +IOCDBlockStorageDriver::getDeviceTypeName(void) +{ + return(kIOBlockStorageDeviceTypeCDROM); +} + +UInt64 +IOCDBlockStorageDriver::getMediaBlockSize(CDSectorArea area,CDSectorType type) +{ + UInt64 blockSize = 0; + + const SInt16 areaSize[kCDSectorTypeCount][8] = + { /* 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 */ + /* Unknown */ { 96, 294, -1, 280, 2048, 4, 8, 12 }, + /* CDDA */ { 96, 294, -1, 0, 2352, 0, 0, 0 }, + /* Mode1 */ { 96, 294, -1, 288, 2048, 4, 0, 12 }, + /* Mode2 */ { 96, 294, -1, 0, 2336, 4, 0, 12 }, + /* Mode2Form1 */ { 96, 294, -1, 280, 2048, 4, 8, 12 }, + /* Mode2Form2 */ { 96, 294, -1, 0, 2328, 4, 8, 12 }, + }; + + if ( type >= kCDSectorTypeCount ) return 0; + + for ( UInt32 index = 0; index < 8; index++ ) + { + if ( ((area >> index) & 0x01) ) + { + if ( areaSize[type][index] == -1 ) return 0; + blockSize += areaSize[type][index]; + } + } + + return blockSize; +} + +UInt32 +IOCDBlockStorageDriver::getMediaType(void) +{ + return(getProvider()->getMediaType()); +} + +CDTOC * +IOCDBlockStorageDriver::getTOC(void) +{ + return(_toc); +} + +bool +IOCDBlockStorageDriver::init(OSDictionary * properties) +{ + _acNub = NULL; + _toc = NULL; + _tocSize = 0; + + // Hack for Cheetah to prevent sleep if there's disk activity. + if (!gIORootPowerDomain) { + // No danger of race here as we're ultimately just setting + // the gIORootPowerDomain variable. + + do { + IOService * root = NULL; + OSIterator * iterator = NULL; + OSDictionary * pmDict = NULL; + + root = IOService::getServiceRoot(); + if (!root) break; + + pmDict = root->serviceMatching("IOPMrootDomain"); + if (!pmDict) break; + + iterator = root->getMatchingServices(pmDict); + pmDict->release(); + if (!iterator) break; + + if (iterator) { + gIORootPowerDomain = OSDynamicCast(IOService, iterator->getNextObject()); + iterator->release(); + } + } while (false); + } + + return(super::init(properties)); +} + +IOMedia * +IOCDBlockStorageDriver::instantiateDesiredMediaObject(void) +{ + return(new IOCDMedia); +} + +IOMedia * +IOCDBlockStorageDriver::instantiateMediaObject(UInt64 base,UInt64 byteSize, + UInt32 blockSize,char *mediaName) +{ + IOMedia *media; + + media = super::instantiateMediaObject(base,byteSize,blockSize,mediaName); + + if (media) { + char *description = NULL; + + switch (getMediaType()) { + case kCDMediaTypeROM: + description = kIOCDMediaTypeROM; + break; + case kCDMediaTypeR: + description = kIOCDMediaTypeR; + break; + case kCDMediaTypeRW: + description = kIOCDMediaTypeRW; + break; + } + + if (description) { + media->setProperty(kIOCDMediaTypeKey, description); + } + } + + return media; +} + +void +IOCDBlockStorageDriver::readCD(IOService *client, + UInt64 byteStart, + IOMemoryDescriptor *buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + assert(buffer->getDirection() == kIODirectionIn); + + prepareRequest(byteStart, buffer, sectorArea, sectorType, completion); +} + +void +IOCDBlockStorageDriver::prepareRequest(UInt64 byteStart, + IOMemoryDescriptor *buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + Context * context; + IOReturn status; + + // Allocate a context structure to hold some of our state. + + context = allocateContext(); + + if (context == 0) + { + complete(completion, kIOReturnNoMemory); + return; + } + + // Prepare the transfer buffer. + + status = buffer->prepare(); + + if (status != kIOReturnSuccess) + { + deleteContext(context); + complete(completion, status); + return; + } + + // Fill in the context structure with some of our state. + + if ( ( sectorArea == kCDSectorAreaUser ) && + ( sectorType == kCDSectorTypeMode1 || + sectorType == kCDSectorTypeMode2Form1 ) ) + { + context->block.size = _mediaBlockSize; + context->block.type = kBlockTypeStandard; + } + else + { + context->block.size = getMediaBlockSize(sectorArea, sectorType); + context->block.type = kBlockTypeCD; + context->block.typeSub[0] = sectorArea; + context->block.typeSub[1] = sectorType; + } + + context->original.byteStart = byteStart; + context->original.buffer = buffer; + context->original.buffer->retain(); + context->original.completion = completion; + + completion.target = this; + completion.action = prepareRequestCompletion; + completion.parameter = context; + + // Deblock the transfer. + + deblockRequest(byteStart, buffer, completion, context); +} + +IOReturn +IOCDBlockStorageDriver::readISRC(UInt8 track,CDISRC isrc) +{ + return(getProvider()->readISRC(track,isrc)); +} + +IOReturn +IOCDBlockStorageDriver::readMCN(CDMCN mcn) +{ + return(getProvider()->readMCN(mcn)); +} + +IOReturn +IOCDBlockStorageDriver::setAudioVolume(UInt8 leftVolume,UInt8 rightVolume) +{ + return(getProvider()->setAudioVolume(leftVolume,rightVolume)); +} + +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 0); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 1); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 2); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 3); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 4); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 5); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 6); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 7); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 8); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 9); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 10); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 11); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 12); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 13); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 14); +OSMetaClassDefineReservedUnused(IOCDBlockStorageDriver, 15); diff --git a/iokit/Families/IOCDStorage/IOCDMedia.cpp b/iokit/Families/IOCDStorage/IOCDMedia.cpp new file mode 100644 index 000000000..5b1061668 --- /dev/null +++ b/iokit/Families/IOCDStorage/IOCDMedia.cpp @@ -0,0 +1,320 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#define super IOMedia +OSDefineMetaClassAndStructors(IOCDMedia, IOMedia) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Local Functions + +static void readCDCompletion(void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount) +{ + // + // Internal completion routine for synchronous version of readCD. + // + + if (parameter) *((UInt64 *)parameter) = actualByteCount; + ((IOSyncer *)target)->signal(status); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOCDBlockStorageDriver * IOCDMedia::getProvider() const +{ + // + // Obtain this object's provider. We override the superclass's method to + // return a more specific subclass of IOService -- IOCDBlockStorageDriver. + // This method serves simply as a convenience to subclass developers. + // + + return (IOCDBlockStorageDriver *) IOService::getProvider(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOCDMedia::read(IOService * /* client */, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // Read data from the storage object at the specified byte offset into the + // specified buffer, asynchronously. When the read completes, the caller + // will be notified via the specified completion action. + // + // The buffer will be retained for the duration of the read. + // + // This method will work even when the media is in the terminated state. + // + + if (isInactive()) + { + complete(completion, kIOReturnNoMedia); + return; + } + + if (_openLevel == kIOStorageAccessNone) // (instantaneous value, no lock) + { + complete(completion, kIOReturnNotOpen); + return; + } + + if (_mediaSize == 0 || _preferredBlockSize == 0) + { + complete(completion, kIOReturnUnformattedMedia); + return; + } + + if (_mediaSize < byteStart + buffer->getLength()) + { + complete(completion, kIOReturnBadArgument); + return; + } + + byteStart += _mediaBase; + getProvider()->readCD( /* client */ this, + /* byteStart */ byteStart, + /* buffer */ buffer, + /* sectorArea */ (CDSectorArea) 0xF8, // (2352 bytes) + /* sectorType */ (CDSectorType) 0x00, // ( all types) + /* completion */ completion ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDMedia::readCD(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + UInt64 * actualByteCount = 0) +{ + // + // Read data from the CD media object at the specified byte offset into the + // specified buffer, synchronously. Special areas of the CD sector can be + // read via this method, such as the header and subchannel data. When the + // read completes, this method will return to the caller. The actual byte + // count field is optional. + // + // This method will work even when the media is in the terminated state. + // + + IOStorageCompletion completion; + IOSyncer * completionSyncer; + + // Initialize the lock we will synchronize against. + + completionSyncer = IOSyncer::create(); + + // Fill in the completion information for this request. + + completion.target = completionSyncer; + completion.action = readCDCompletion; + completion.parameter = actualByteCount; + + // Issue the asynchronous read. + + readCD(client, byteStart, buffer, sectorArea, sectorType, completion); + + // Wait for the read to complete. + + return completionSyncer->wait(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOCDMedia::readCD(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + // + // Read data from the CD media object at the specified byte offset into the + // specified buffer, asynchronously. Special areas of the CD sector can be + // read via this method, such as the header and subchannel data. When the + // read completes, the caller will be notified via the specified completion + // action. + // + // The buffer will be retained for the duration of the read. + // + // This method will work even when the media is in the terminated state. + // + + if (isInactive()) + { + complete(completion, kIOReturnNoMedia); + return; + } + + if (_openLevel == kIOStorageAccessNone) // (instantaneous value, no lock) + { + complete(completion, kIOReturnNotOpen); + return; + } + + if (_mediaSize == 0 || _preferredBlockSize == 0) + { + complete(completion, kIOReturnUnformattedMedia); + return; + } + + if (_mediaSize < byteStart + buffer->getLength()) + { + complete(completion, kIOReturnBadArgument); + return; + } + + byteStart += _mediaBase; + getProvider()->readCD( /* client */ this, + /* byteStart */ byteStart, + /* buffer */ buffer, + /* sectorArea */ sectorArea, + /* sectorType */ sectorType, + /* completion */ completion ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDMedia::readISRC(UInt8 track, CDISRC isrc) +{ + // + // Read the International Standard Recording Code for the specified track. + // + // This method will work even when the media is in the terminated state. + // + + if (isInactive()) + { + return kIOReturnNoMedia; + } + + return getProvider()->readISRC(track, isrc); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOCDMedia::readMCN(CDMCN mcn) +{ + // + // Read the Media Catalog Number (also known as the Universal Product Code). + // + // This method will work even when the media is in the terminated state. + // + + if (isInactive()) + { + return kIOReturnNoMedia; + } + + return getProvider()->readMCN(mcn); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +CDTOC * IOCDMedia::getTOC() +{ + // + // Get the full Table Of Contents. + // + // This method will work even when the media is in the terminated state. + // + + if (isInactive()) + { + return 0; + } + + return getProvider()->getTOC(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDMedia, 15); diff --git a/iokit/Families/IOCDStorage/IOCDPartitionScheme.cpp b/iokit/Families/IOCDStorage/IOCDPartitionScheme.cpp new file mode 100644 index 000000000..2dbdcb34c --- /dev/null +++ b/iokit/Families/IOCDStorage/IOCDPartitionScheme.cpp @@ -0,0 +1,800 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +#define super IOPartitionScheme +OSDefineMetaClassAndStructors(IOCDPartitionScheme, IOPartitionScheme); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define kIOCDPartitionSchemeContentTable "Content Table" + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOCDMedia * IOCDPartitionScheme::getProvider() const +{ + // + // Obtain this object's provider. We override the superclass's method + // to return a more specific subclass of OSObject -- IOCDMedia. This + // method serves simply as a convenience to subclass developers. + // + + return (IOCDMedia *) IOService::getProvider(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOCDPartitionScheme::init(OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + // State our assumptions. + + assert(sizeof(CDTOC) == 4); // (compiler/platform check) + assert(sizeof(CDTOCDescriptor) == 11); // (compiler/platform check) + + // Ask our superclass' opinion. + + if (super::init(properties) == false) return false; + + // Initialize our state. + + _partitions = 0; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOCDPartitionScheme::free() +{ + // + // Free all of this object's outstanding resources. + // + + if ( _partitions ) _partitions->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOService * IOCDPartitionScheme::probe(IOService * provider, SInt32 * score) +{ + // + // Determine whether the provider media contains CD partitions. + // + + // State our assumptions. + + assert(OSDynamicCast(IOCDMedia, provider)); + + // Ask superclass' opinion. + + if (super::probe(provider, score) == 0) return 0; + + // Scan the provider media for CD partitions. + + _partitions = scan(score); + + return ( _partitions ) ? this : 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOCDPartitionScheme::start(IOService * provider) +{ + // + // Publish the new media objects which represent our partitions. + // + + IOMedia * partition; + OSIterator * partitionIterator; + + // State our assumptions. + + assert(_partitions); + + // Ask our superclass' opinion. + + if ( super::start(provider) == false ) return false; + + // Attach and register the new media objects representing our partitions. + + partitionIterator = OSCollectionIterator::withCollection(_partitions); + if ( partitionIterator == 0 ) return false; + + while ( (partition = (IOMedia *) partitionIterator->getNextObject()) ) + { + if ( partition->attach(this) ) + { + partition->registerService(); + } + } + + partitionIterator->release(); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSSet * IOCDPartitionScheme::scan(SInt32 * score) +{ + // + // Scan the provider media for CD partitions (in TOC). Returns the set + // of media objects representing each of the partitions (the retain for + // the set is passed to the caller), or null should no CD partitions be + // found. The default probe score can be adjusted up or down, based on + // the confidence of the scan. + // + + struct CDSession + { + UInt32 isFormX:1; + UInt32 leadOut; + }; + + struct CDTrack + { + UInt32 block; + CDSectorSize blockSize; + CDSectorType blockType; + CDTOCDescriptor * descriptor; + UInt32 session:8; + }; + + #define kCDSessionMaxIndex 0x63 + + #define kCDTrackMinIndex 0x01 + #define kCDTrackMaxIndex 0x63 + + IOBufferMemoryDescriptor * buffer = 0; + IOCDMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + bool mediaIsOpen = false; + OSSet * partitions = 0; + CDSession * sessions = 0; + UInt32 sessionMinIndex = kCDSessionMaxIndex + 1; + UInt32 sessionMaxIndex = 0; + CDTOC * toc = 0; + UInt32 tocCount = 0; + CDTrack * tracks = 0; + UInt32 trackMinIndex = kCDTrackMaxIndex + 1; + UInt32 trackMaxIndex = 0; + CDTrack * trackMaxLinked = 0; + + // State our assumptions. + + assert(mediaBlockSize == kCDSectorSizeWhole); + + // Determine whether this media is formatted. + + if ( media->isFormatted() == false ) goto scanErr; + + // Allocate a buffer large enough to hold a whole 2352-byte sector. + + buffer = IOBufferMemoryDescriptor::withCapacity( + /* capacity */ mediaBlockSize, + /* withDirection */ kIODirectionIn ); + if ( buffer == 0 ) goto scanErr; + + // Allocate a set to hold the set of media objects representing partitions. + + partitions = OSSet::withCapacity(2); + if ( partitions == 0 ) goto scanErr; + + // Open the media with read access. + + mediaIsOpen = media->open(this, 0, kIOStorageAccessReader); + if ( mediaIsOpen == false ) goto scanErr; + + // Obtain the table of contents. + + toc = media->getTOC(); + if ( toc == 0 ) goto scanErr; + + tocCount = (toc->length - sizeof(UInt16)) / sizeof(CDTOCDescriptor); + + // Allocate a list large enough to hold information about each session. + + sessions = IONew(CDSession, kCDSessionMaxIndex + 1); + if ( sessions == 0 ) goto scanErr; + + bzero(sessions, (kCDSessionMaxIndex + 1) * sizeof(CDSession)); + + // Allocate a list large enough to hold information about each track. + + tracks = IONew(CDTrack, kCDTrackMaxIndex + 1); + if ( tracks == 0 ) goto scanErr; + + bzero(tracks, (kCDTrackMaxIndex + 1) * sizeof(CDTrack)); + + // Scan the table of contents, gathering information about the sessions + // and tracks on the CD, but without making assumptions about the order + // of the entries in the table. + + for ( unsigned index = 0; index < tocCount; index++ ) + { + CDTOCDescriptor * descriptor = toc->descriptors + index; + + // Determine whether this is an audio or data track descriptor. + + if ( descriptor->point >= kCDTrackMinIndex && + descriptor->point <= kCDTrackMaxIndex && + descriptor->adr == 0x01 && + descriptor->session <= kCDSessionMaxIndex ) + { + CDTrack * track = tracks + descriptor->point; + + // Record the relevant information about this track. + + track->block = CDConvertMSFToLBA(descriptor->p); + track->descriptor = descriptor; + track->session = descriptor->session; + + if ( (descriptor->control & 0x04) ) // (data track?) + { + track->blockSize = kCDSectorSizeMode1; + track->blockType = kCDSectorTypeMode1; + } + else // (audio track?) + { + track->blockSize = kCDSectorSizeCDDA; + track->blockType = kCDSectorTypeCDDA; + } + + trackMinIndex = min(descriptor->point, trackMinIndex); + trackMaxIndex = max(descriptor->point, trackMaxIndex); + } + + // Determine whether this is a lead-in (A0) descriptor. + + else if ( descriptor->point == 0xA0 && + descriptor->adr == 0x01 && + descriptor->session <= kCDSessionMaxIndex ) + { + CDSession * session = sessions + descriptor->session; + + // Record whether the session has "form 1" or "form 2" tracks. + + session->isFormX = (descriptor->p.second) ? true : false; + } + + // Determine whether this is a lead-out (A2) descriptor. + + else if ( descriptor->point == 0xA2 && + descriptor->adr == 0x01 && + descriptor->session <= kCDSessionMaxIndex ) + { + CDSession * session = sessions + descriptor->session; + + // Record the position of the session lead-out. + + session->leadOut = CDConvertMSFToLBA(descriptor->p); + + sessionMinIndex = min(descriptor->session, sessionMinIndex); + sessionMaxIndex = max(descriptor->session, sessionMaxIndex); + } + } + + if ( sessionMinIndex > kCDSessionMaxIndex ) // (no sessions?) + { + goto scanErr; + } + + if ( trackMinIndex > kCDTrackMaxIndex ) // (no tracks?) + { + goto scanErr; + } + + // Pre-scan the ordered list of tracks. + + for ( unsigned index = trackMinIndex; index <= trackMaxIndex; index++ ) + { + CDTrack * track = tracks + index; + + // Validate the existence of this track (and its session). + + if ( track->descriptor == 0 || sessions[track->session].leadOut == 0 ) + { + goto scanErr; + } + + // Determine the block type, and linkage requirement, for this track. + + if ( track->blockType == kCDSectorTypeMode1 ) // (data track?) + { + IOReturn status; + + // Read a whole sector from the data track into our buffer. + +///m:2333367:workaround:commented:start +// status = media->read( /* client */ this, +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = media->IOStorage::read( /* client */ this, +///m:2333367:workaround:added:stop + /* byteStart */ track->block * mediaBlockSize, + /* buffer */ buffer ); + + if ( status == kIOReturnSuccess ) + { + UInt8 * sector = (UInt8 *) buffer->getBytesNoCopy(); + + // Determine whether this is a "mode 2" data track. + + if ( sector[15] == 0x02 ) + { + // Determine whether this is a "mode 2 formless", + // "mode 2 form 1" or "mode 2 form 2" data track. + + if ( sessions[track->session].isFormX ) + { + if ( (sector[18] & 0x20) ) + { + track->blockSize = kCDSectorSizeMode2Form2; + track->blockType = kCDSectorTypeMode2Form2; + } + else + { + track->blockSize = kCDSectorSizeMode2Form1; + track->blockType = kCDSectorTypeMode2Form1; + + // Determine whether this is a linked data track. + + if ( track->block && memcmp(sector + 24, "ER", 2) ) + { + trackMaxLinked = track; + } + } + } + else + { + track->blockSize = kCDSectorSizeMode2; + track->blockType = kCDSectorTypeMode2; + } + } + + // Determine whether this is a linked "mode 1" data track. + + else if ( track->block && memcmp(sector + 16, "ER", 2) ) + { + trackMaxLinked = track; + } + } + } + } + + // Create a media object to represent the linked data tracks, the hidden + // pre-gap-area data track, or even both, if it is applicable to this CD. + + if ( trackMaxLinked || tracks[trackMinIndex].block ) + { + CDTOCDescriptor descriptor; + UInt32 trackBlockNext; + CDSectorSize trackBlockSize; + CDSectorType trackBlockType; + UInt64 trackSize; + + descriptor.session = sessionMinIndex; + descriptor.control = 0x04; + descriptor.adr = 0x01; + descriptor.tno = 0x00; + descriptor.point = 0x00; + descriptor.address.minute = 0x00; + descriptor.address.second = 0x00; + descriptor.address.frame = 0x00; + descriptor.zero = 0x00; + descriptor.p = CDConvertLBAToMSF(0); + + if ( trackMaxLinked ) + { + descriptor.session = sessionMaxIndex; + descriptor.control = trackMaxLinked->descriptor->control; + + trackBlockNext = sessions[sessionMaxIndex].leadOut; + trackBlockSize = trackMaxLinked->blockSize; + trackBlockType = trackMaxLinked->blockType; + } + else + { + trackBlockNext = tracks[trackMinIndex].block; + trackBlockSize = kCDSectorSizeMode1; + trackBlockType = kCDSectorTypeMode1; + } + + trackSize = trackBlockNext * trackBlockSize; + + // Create a media object to represent this partition. + + IOMedia * newMedia = instantiateMediaObject( + /* partition */ &descriptor, + /* partitionSize */ trackSize, + /* partitionBlockSize */ trackBlockSize, + /* partitionBlockType */ trackBlockType, + /* toc */ toc ); + + if ( newMedia ) + { + partitions->setObject(newMedia); + newMedia->release(); + } + } + + // Scan the ordered list of tracks. + + for ( unsigned index = trackMinIndex; index <= trackMaxIndex; index++ ) + { + CDTrack * track = tracks + index; + UInt32 trackBlockNext; + UInt64 trackSize; + + // Determine whether this is a linked data track (skipped). + + if ( ( trackMaxLinked ) && + ( track->blockType == kCDSectorTypeMode1 || + track->blockType == kCDSectorTypeMode2Form1 ) ) + { + continue; + } + + // Determine where the partitions ends. + + if ( index < trackMaxIndex && track->session == (track + 1)->session ) + { + trackBlockNext = (track + 1)->block; + } + else + { + trackBlockNext = sessions[track->session].leadOut; + } + + if ( track->block >= trackBlockNext ) + { + goto scanErr; + } + + trackSize = (trackBlockNext - track->block) * track->blockSize; + + // Determine whether the partition is corrupt (fatal). + + if ( isPartitionCorrupt( /* partition */ track->descriptor, + /* partitionSize */ trackSize, + /* partitionBlockSize */ track->blockSize, + /* partitionBlockType */ track->blockType, + /* toc */ toc ) ) + { + goto scanErr; + } + + // Determine whether the partition is invalid (skipped). + + if ( isPartitionInvalid( /* partition */ track->descriptor, + /* partitionSize */ trackSize, + /* partitionBlockSize */ track->blockSize, + /* partitionBlockType */ track->blockType, + /* toc */ toc ) ) + { + continue; + } + + // Create a media object to represent this partition. + + IOMedia * newMedia = instantiateMediaObject( + /* partition */ track->descriptor, + /* partitionSize */ trackSize, + /* partitionBlockSize */ track->blockSize, + /* partitionBlockType */ track->blockType, + /* toc */ toc ); + + if ( newMedia ) + { + partitions->setObject(newMedia); + newMedia->release(); + } + } + + // Release our resources. + + media->close(this); + buffer->release(); + IODelete(tracks, CDTrack, kCDTrackMaxIndex + 1); + IODelete(sessions, CDSession, kCDSessionMaxIndex + 1); + + return partitions; + +scanErr: + + // Release our resources. + + if ( mediaIsOpen ) media->close(this); + if ( partitions ) partitions->release(); + if ( buffer ) buffer->release(); + if ( tracks ) IODelete(tracks, CDTrack, kCDTrackMaxIndex + 1); + if ( sessions ) IODelete(sessions, CDSession, kCDSessionMaxIndex + 1); + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOCDPartitionScheme::isPartitionCorrupt( + CDTOCDescriptor * /* partition */ , + UInt64 /* partitionSize */ , + UInt32 /* partitionBlockSize */ , + CDSectorType /* partitionBlockType */ , + CDTOC * /* toc */ ) +{ + // + // Ask whether the given partition appears to be corrupt. A partition that + // is corrupt will cause the failure of the CD partition scheme altogether. + // + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOCDPartitionScheme::isPartitionInvalid( + CDTOCDescriptor * partition, + UInt64 partitionSize, + UInt32 partitionBlockSize, + CDSectorType partitionBlockType, + CDTOC * toc ) +{ + // + // Ask whether the given partition appears to be invalid. A partition that + // is invalid will cause it to be skipped in the scan, but will not cause a + // failure of the CD partition scheme. + // + + IOMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + UInt64 partitionBase = 0; + + // Compute the relative byte position and size of the new partition, + // relative to the provider media's natural blocking factor of 2352. + + partitionBase = CDConvertMSFToLBA(partition->p) * mediaBlockSize; + partitionSize = (partitionSize / partitionBlockSize) * mediaBlockSize; + + // Determine whether the partition begins before the 00:02:00 mark. + + if ( partition->p.minute == 0 && partition->p.second < 2 ) return true; + + // Determine whether the partition leaves the confines of the container. + + if ( partitionBase + partitionSize > media->getSize() ) return true; + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOCDPartitionScheme::instantiateMediaObject( + CDTOCDescriptor * partition, + UInt64 partitionSize, + UInt32 partitionBlockSize, + CDSectorType partitionBlockType, + CDTOC * toc ) +{ + // + // Instantiate a new media object to represent the given partition. + // + + IOMedia * media = getProvider(); + UInt64 partitionBase = 0; + char * partitionHint = 0; + + // Compute the relative byte position of the new partition and encode it + // into the designated "logical space", given the partition's block type. + // + // 0x0000000000 through 0x00FFFFFFFF is the "don't care" space. + // 0x0100000000 through 0x01FFFFFFFF is the "audio" space. + // 0x0200000000 through 0x02FFFFFFFF is the "mode 1" space. + // 0x0300000000 through 0x03FFFFFFFF is the "mode 2 formless" space. + // 0x0400000000 through 0x04FFFFFFFF is the "mode 2 form 1" space. + // 0x0500000000 through 0x05FFFFFFFF is the "mode 2 form 2" space. + + partitionBase = CDConvertMSFToLBA(partition->p) * partitionBlockSize; + partitionBase += ((UInt64) partitionBlockType) << 32; + + // Look up a type for the new partition. + + OSDictionary * hintTable = OSDynamicCast( + /* type */ OSDictionary, + /* instance */ getProperty(kIOCDPartitionSchemeContentTable) ); + + if ( hintTable ) + { + char hintIndex[5]; + OSString * hintValue; + + sprintf(hintIndex, "0x%02X", partitionBlockType & 0xFF); + + hintValue = OSDynamicCast(OSString, hintTable->getObject(hintIndex)); + + if ( hintValue ) partitionHint = (char *) hintValue->getCStringNoCopy(); + } + + // Create the new media object. + + IOMedia * newMedia = instantiateDesiredMediaObject( + /* partition */ partition, + /* partitionSize */ partitionSize, + /* partitionBlockSize */ partitionBlockSize, + /* partitionBlockType */ partitionBlockType, + /* toc */ toc ); + + if ( newMedia ) + { + if ( newMedia->init( + /* base */ partitionBase, + /* size */ partitionSize, + /* preferredBlockSize */ partitionBlockSize, + /* isEjectable */ media->isEjectable(), + /* isWhole */ false, + /* isWritable */ media->isWritable(), + /* contentHint */ partitionHint ) ) + { + // Set a name for this partition. + + char name[24]; + sprintf(name, "Untitled %d", partition->point); + newMedia->setName(name); + + // Set a location value (the partition number) for this partition. + + char location[12]; + sprintf(location, "%d", partition->point); + newMedia->setLocation(location); + + // Set the "Partition ID" key for this partition. + + newMedia->setProperty(kIOMediaPartitionIDKey, partition->point, 32); + + // Set the "Session ID" key for this partition. + + newMedia->setProperty(kIOMediaSessionIDKey, partition->session, 32); + } + else + { + newMedia->release(); + newMedia = 0; + } + } + + return newMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOCDPartitionScheme::instantiateDesiredMediaObject( + CDTOCDescriptor * /* partition */ , + UInt64 /* partitionSize */ , + UInt32 /* partitionBlockSize */ , + CDSectorType /* partitionBlockType */ , + CDTOC * /* toc */ ) +{ + // + // Allocate a new media object (called from instantiateMediaObject). + // + + return new IOMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOCDPartitionScheme::read( IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion ) +{ + // + // Read data from the storage object at the specified byte offset into the + // specified buffer, asynchronously. When the read completes, the caller + // will be notified via the specified completion action. + // + // The buffer will be retained for the duration of the read. + // + // For the CD partition scheme, we convert the read from a partition + // object into the appropriate readCD command to our provider media. + // + + getProvider()->readCD( /* client */ this, + /* byteStart */ (byteStart & 0xFFFFFFFF), + /* buffer */ buffer, + /* sectorArea */ (CDSectorArea) kCDSectorAreaUser, + /* sectorType */ (CDSectorType) (byteStart >> 32), + /* completion */ completion ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOCDPartitionScheme, 15); diff --git a/iokit/Families/IODVDBlockStorage/IODVDBlockStorageDevice.cpp b/iokit/Families/IODVDBlockStorage/IODVDBlockStorageDevice.cpp new file mode 100644 index 000000000..5ed3a43f3 --- /dev/null +++ b/iokit/Families/IODVDBlockStorage/IODVDBlockStorageDevice.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +#define super IOCDBlockStorageDevice +OSDefineMetaClass(IODVDBlockStorageDevice,IOCDBlockStorageDevice) +OSDefineAbstractStructors(IODVDBlockStorageDevice,IOCDBlockStorageDevice) + +bool +IODVDBlockStorageDevice::init(OSDictionary * properties) +{ + bool result; + + result = super::init(properties); + if (result) { + setProperty(kIOBlockStorageDeviceTypeKey,kIOBlockStorageDeviceTypeDVD); + } + + return(result); +} + +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 0); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 1); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 2); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 3); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 4); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 5); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 6); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 7); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 8); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 9); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 10); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 11); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 12); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 13); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 14); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 15); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 16); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 17); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 18); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 19); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 20); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 21); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 22); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 23); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 24); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 25); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 26); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 27); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 28); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 29); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 30); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDevice, 31); diff --git a/iokit/Families/IODVDStorage/IODVDBlockStorageDriver.cpp b/iokit/Families/IODVDStorage/IODVDBlockStorageDriver.cpp new file mode 100644 index 000000000..a6a56c453 --- /dev/null +++ b/iokit/Families/IODVDStorage/IODVDBlockStorageDriver.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#define super IOCDBlockStorageDriver +OSDefineMetaClassAndStructors(IODVDBlockStorageDriver,IOCDBlockStorageDriver) + +IODVDBlockStorageDevice * +IODVDBlockStorageDriver::getProvider() const +{ + return (IODVDBlockStorageDevice *) IOService::getProvider(); +} + +/* Accept a new piece of media, doing whatever's necessary to make it + * show up properly to the system. + */ +IOReturn +IODVDBlockStorageDriver::acceptNewMedia(void) +{ + UInt32 mediaType = getMediaType(); + + if (mediaType >= kCDMediaTypeMin && mediaType <= kCDMediaTypeMax) { + return IOCDBlockStorageDriver::acceptNewMedia(); + } else { + return IOBlockStorageDriver::acceptNewMedia(); + } +} + +const char * +IODVDBlockStorageDriver::getDeviceTypeName(void) +{ + return(kIOBlockStorageDeviceTypeDVD); +} + +IOMedia * +IODVDBlockStorageDriver::instantiateDesiredMediaObject(void) +{ + UInt32 mediaType = getMediaType(); + + if (mediaType >= kCDMediaTypeMin && mediaType <= kCDMediaTypeMax) { + return IOCDBlockStorageDriver::instantiateDesiredMediaObject(); + } else { + return(new IODVDMedia); + } +} + +IOMedia * +IODVDBlockStorageDriver::instantiateMediaObject(UInt64 base,UInt64 byteSize, + UInt32 blockSize,char *mediaName) +{ + IOMedia *media = NULL; + UInt32 mediaType = getMediaType(); + + if (mediaType >= kCDMediaTypeMin && mediaType <= kCDMediaTypeMax) { + return IOCDBlockStorageDriver::instantiateMediaObject( + base,byteSize,blockSize,mediaName); + } else { + media = IOBlockStorageDriver::instantiateMediaObject( + base,byteSize,blockSize,mediaName); + } + + if (media) { + char *description = NULL; + + switch (mediaType) { + case kDVDMediaTypeROM: + description = kIODVDMediaTypeROM; + break; + case kDVDMediaTypeRAM: + description = kIODVDMediaTypeRAM; + break; + case kDVDMediaTypeR: + description = kIODVDMediaTypeR; + break; + case kDVDMediaTypeRW: + description = kIODVDMediaTypeRW; + break; + case kDVDMediaTypePlusRW: + description = kIODVDMediaTypePlusRW; + break; + } + + if (description) { + media->setProperty(kIODVDMediaTypeKey, description); + } + } + + return media; +} + +IOReturn +IODVDBlockStorageDriver::reportKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt32 lba,const UInt8 agid,const DVDKeyFormat keyFormat) +{ + return(getProvider()->reportKey(buffer,keyClass,lba,agid,keyFormat)); +} + +IOReturn +IODVDBlockStorageDriver::sendKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt8 agid,const DVDKeyFormat keyFormat) +{ + return(getProvider()->sendKey(buffer,keyClass,agid,keyFormat)); +} + +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 0); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 1); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 2); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 3); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 4); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 5); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 6); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 7); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 8); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 9); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 10); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 11); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 12); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 13); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 14); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 15); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 16); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 17); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 18); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 19); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 20); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 21); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 22); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 23); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 24); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 25); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 26); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 27); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 28); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 29); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 30); +OSMetaClassDefineReservedUnused(IODVDBlockStorageDriver, 31); diff --git a/iokit/Families/IODVDStorage/IODVDMedia.cpp b/iokit/Families/IODVDStorage/IODVDMedia.cpp new file mode 100644 index 000000000..4586a4b8f --- /dev/null +++ b/iokit/Families/IODVDStorage/IODVDMedia.cpp @@ -0,0 +1,168 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +#define super IOMedia +OSDefineMetaClassAndStructors(IODVDMedia, IOMedia) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IODVDBlockStorageDriver * IODVDMedia::getProvider() const +{ + // + // Obtain this object's provider. We override the superclass's method to + // return a more specific subclass of IOService -- IODVDBlockStorageDriver. + // This method serves simply as a convenience to subclass developers. + // + + return (IODVDBlockStorageDriver *) IOService::getProvider(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 15); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 16); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 17); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 18); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 19); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 20); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 21); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 22); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 23); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 24); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 25); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 26); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 27); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 28); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 29); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 30); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IODVDMedia, 31); diff --git a/iokit/Families/IOGraphics/ADBTest.m b/iokit/Families/IOGraphics/ADBTest.m new file mode 100644 index 000000000..a3b09d671 --- /dev/null +++ b/iokit/Families/IOGraphics/ADBTest.m @@ -0,0 +1,70 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +void main( void ) +{ + IOReturn err; + IOString kind; + int reg[ 2 ]; + int retCount; + IOObjectNumber obj; + int i; + + err = _IOLookupByDeviceName( device_master_self(), "display@Display0", + &obj, &kind); + printf("_IOLookupByDeviceName = %d\n", err ); + + retCount = sizeof(int); + err = _IOCallDeviceMethod( device_master_self(), obj, + "IOSMADBGetAVDeviceID:size:", + nil, 0, + &retCount, + ®[ 1 ], + &retCount); + printf("IOSMADBGetAVDeviceID(%d) = %08x\n", err, reg[ 1 ] ); + + reg[0] = 0xff; + reg[1] = 0xff; + retCount = 0; + err = _IOCallDeviceMethod( device_master_self(), obj, + "IOSMADBSetLogicalRegister:size:", + reg, sizeof( reg), + &retCount, + nil, + &retCount); + printf("IOSMADBSetLogicalRegister(%d)\n", err ); + + for( i = 0; i < 4; i++) { + reg[0] = 0xff; + retCount = sizeof(int); + err = _IOCallDeviceMethod( device_master_self(), obj, + "IOSMADBGetLogicalRegister:size:result:size:", + reg, sizeof( int), + &retCount, + ®[ 1 ], + &retCount); + printf("IOSMADBGetLogicalRegister(%d) = %08x\n", err, reg[ 1 ] ); + } +} diff --git a/iokit/Families/IOGraphics/AppleDDCDisplay.cpp b/iokit/Families/IOGraphics/AppleDDCDisplay.cpp new file mode 100644 index 000000000..8fb6d59d2 --- /dev/null +++ b/iokit/Families/IOGraphics/AppleDDCDisplay.cpp @@ -0,0 +1,311 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 23 Jul 98 - start IOKit + * sdouglas 08 Dec 98 - start cpp + */ + +#include +#include +#include + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct EDID { + UInt8 header[8]; + UInt8 vendorProduct[4]; + UInt8 serialNumber[4]; + UInt8 weekOfManufacture; + UInt8 yearOfManufacture; + UInt8 version; + UInt8 revision; + UInt8 displayParams[5]; + UInt8 colorCharacteristics[10]; + UInt8 establishedTimings[3]; + UInt16 standardTimings[8]; + UInt8 detailedTimings[72]; + UInt8 extension; + UInt8 checksum; +}; + +struct TimingToEDID { + UInt32 timingID; + UInt8 spare; + UInt8 establishedBit; + UInt16 standardTiming; +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class AppleDDCDisplay : public IODisplay +{ + OSDeclareDefaultStructors(AppleDDCDisplay) + +private: + OSData * edidData; + OSData * additions; + OSData * deletions; + TimingToEDID * timingToEDID; + int numEDIDEntries; + +public: + virtual IOService * probe( IOService * provider, + SInt32 * score ); + + virtual bool start( IOService * provider ); + + virtual IOReturn getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ); +}; + +#undef super +#define super IODisplay + +OSDefineMetaClassAndStructors(AppleDDCDisplay, IODisplay) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOService * AppleDDCDisplay::probe( IOService * provider, + SInt32 * score ) +{ + IODisplayConnect * connect; + IOFramebuffer * framebuffer; + IOService * ret = 0; + + do { + + if( 0 == super::probe( provider, score )) + continue; + + connect = getConnection(); + framebuffer = connect->getFramebuffer(); + assert( framebuffer ); + + if( kIOReturnSuccess != framebuffer->getAttributeForConnection( + connect->getConnection(), + kConnectionSupportsHLDDCSense, NULL )) + continue; + + if( framebuffer->hasDDCConnect( connect->getConnection())) + ret = this; + + } while( false); + + return( ret ); +} + + +bool AppleDDCDisplay::start( IOService * provider ) +{ + IOReturn err; + IODisplayConnect * connect; + IOFramebuffer * framebuffer; + OSData * data; + OSData * overrideData; + OSArray * array; + OSDictionary * dict; + OSNumber * off; + IOByteCount length; + EDID readEDID; + EDID * edid; + UInt32 vendorProd; + UInt32 index; + UInt32 numExts; + + connect = getConnection(); + framebuffer = connect->getFramebuffer(); + assert( framebuffer ); + + do { + length = sizeof( EDID); + err = framebuffer->getDDCBlock( connect->getConnection(), + 1, kIODDCBlockTypeEDID, 0, (UInt8 *) &readEDID, &length ); + if( err || (length != sizeof( EDID))) + continue; + + IOLog("%s EDID Version %d, Revision %d\n", framebuffer->getName(), + readEDID.version, readEDID.revision ); + if( readEDID.version != 1) + continue; + + if( (data = (OSData *) getProperty( "appleDDC" ))) { + timingToEDID = (TimingToEDID *) data->getBytesNoCopy(); + numEDIDEntries = data->getLength() / sizeof(TimingToEDID); + } else + continue; + + vendorProd = (readEDID.vendorProduct[0] << 24) + | (readEDID.vendorProduct[1] << 16) + | (readEDID.vendorProduct[2] << 8) + | (readEDID.vendorProduct[3] << 0); + +#if 1 + IOLog("Vendor/product 0x%08lx, ", vendorProd ); + IOLog("Est: "); + for( index = 0; index < 3; index++) + IOLog(" 0x%02x,", readEDID.establishedTimings[ index ] ); + IOLog("\nStd: " ); + for( index = 0; index < 8; index++) + IOLog(" 0x%04x,", readEDID.standardTimings[ index ] ); + IOLog("\n"); +#endif + + data = OSData::withBytes( &readEDID, sizeof( EDID )); + if( !data) + continue; + + numExts = readEDID.extension; + for( index = 2; index < (2 + numExts); index++) { + length = sizeof( EDID); + err = framebuffer->getDDCBlock( connect->getConnection(), + index, kIODDCBlockTypeEDID, 0, (UInt8 *) &readEDID, &length ); + if( err || (length != sizeof( EDID))) + break; + if( !data->appendBytes( &readEDID, sizeof( EDID ) )) + break; + } + + overrideData = 0; + additions = 0; + if( (array = OSDynamicCast(OSArray, getProperty("overrides")))) { + for( index = 0; + (dict = OSDynamicCast(OSDictionary, array->getObject(index))); + index++ ) { + if( 0 == (off = OSDynamicCast(OSNumber, dict->getObject("ID")))) + continue; + if( vendorProd == off->unsigned32BitValue()) { + overrideData = OSDynamicCast(OSData, + dict->getObject( "EDID")); + additions = OSDynamicCast(OSData, + dict->getObject("additions")); + deletions = OSDynamicCast(OSData, + dict->getObject("deletions")); + break; + } + } + } + + if( overrideData) + data = overrideData; + + setProperty( kIODisplayEDIDKey, data ); + data->release(); + edidData = data; + + edid = (EDID *) edidData->getBytesNoCopy(); + // vendor + vendorProd = (edid->vendorProduct[0] << 8) | edid->vendorProduct[1]; + setProperty( kDisplayVendorID, vendorProd, 32); + // product + vendorProd = (edid->vendorProduct[3] << 8) | edid->vendorProduct[2]; + setProperty( kDisplayProductID, vendorProd, 32); + + return( super::start( provider)); + + } while( false); + + return( false); +} + +IOReturn AppleDDCDisplay::getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ) +{ + IOReturn err; + IODisplayConnect * connect; + IOFramebuffer * framebuffer; + IOTimingInformation info; + const TimingToEDID * lookTiming; + UInt32 estBit, i; + EDID * edid; + UInt32 * dataModes; + UInt32 numData; + UInt32 appleTimingID; + bool supported = false; + bool deleted = false; + enum { kSetFlags = (kDisplayModeValidFlag + | kDisplayModeSafeFlag) }; + + + connect = getConnection(); + framebuffer = connect->getFramebuffer(); + assert( framebuffer ); + + if( kIOReturnSuccess != framebuffer->connectFlags( connect->getConnection(), + mode, flags )) + *flags = 0; + + err = framebuffer->getTimingInfoForDisplayMode( mode, &info ); + if( err != kIOReturnSuccess) + return( err); + + appleTimingID = info.appleTimingID; + + if( deletions) { + numData = deletions->getLength() / sizeof( UInt32); + dataModes = (UInt32 *) deletions->getBytesNoCopy(); + for( i = 0; (!deleted) && (i < numData); i++) + deleted = (dataModes[ i ] == appleTimingID); + } + + if( !deleted) { + + if( additions) { + numData = additions->getLength() / sizeof( UInt32); + dataModes = (UInt32 *) additions->getBytesNoCopy(); + for( i = 0; (!supported) && (i < numData); i++) + supported = (dataModes[ i ] == appleTimingID); + } + + edid = (EDID *) edidData->getBytesNoCopy(); + assert( edid ); + for( lookTiming = timingToEDID; + (!supported) && ((lookTiming - timingToEDID) < numEDIDEntries); + lookTiming++ ) { + + if( lookTiming->timingID == appleTimingID) { + estBit = lookTiming->establishedBit; + if( estBit != 0xff) + supported = (0 != (edid->establishedTimings[ estBit / 8 ] + & (1 << (estBit % 8)))); + + for( i = 0; (!supported) && (i < 8); i++ ) + supported = (lookTiming->standardTiming + == edid->standardTimings[i] ); + } + } + } + + if( supported) + *flags = ((*flags) & ~kDisplayModeSafetyFlags) | kSetFlags; + + // Pass the existing flags (from framebuffer) thru + return( err); +} + diff --git a/iokit/Families/IOGraphics/AppleG3SeriesDisplay.cpp b/iokit/Families/IOGraphics/AppleG3SeriesDisplay.cpp new file mode 100644 index 000000000..9e9e451c4 --- /dev/null +++ b/iokit/Families/IOGraphics/AppleG3SeriesDisplay.cpp @@ -0,0 +1,453 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +#include + +#define kNumber_of_power_states 4 +#define kNumber_of_power_levels 32 + +#define kScreenBit 0x01 +#define kPowerOn 0x80 +#define kPowerOff 0x00 +#define kDisplayOn kScreenBit | kPowerOn +#define kDisplayOff kScreenBit | kPowerOff + +class AppleG3SeriesDisplay : public AppleSenseDisplay +{ + OSDeclareDefaultStructors(AppleG3SeriesDisplay) + +private: + +int current_user_brightness; // 0-31. The brightness level last selected via the brightness buttons. +int current_level; // 0-31. The current brightness level +IOService * PMUdriver; // points to PMU driver +int * rawTable; // points to table of raw brightess levels + +// the constants used to talk with the pmu: +enum { + kPMUpower1Read = 0x19, // more power status (DBLite) + kPMUReadBrightness = 0x49, // read the brightness value + kPMUpower1Cntl = 0x11, // more power control (DBLite) + kPMUSetBrightness = 0x41 // set screen brightness +}; + +// We need this to callPlatformFunction when sending to sendMiscCommand +typedef struct SendMiscCommandParameterBlock { + int command; + IOByteCount sLength; + UInt8 *sBuffer; + IOByteCount *rLength; + UInt8 *rBuffer; +} SendMiscCommandParameterBlock; +typedef SendMiscCommandParameterBlock *SendMiscCommandParameterBlockPtr; + +// A simpler way to interface with the pmu SendMiscCommand +IOReturn localSendMiscCommand(int command, IOByteCount sLength, UInt8 *sBuffer, IOByteCount *rLength, UInt8 *rBuffer); + +public: + IOService * probe ( IOService *, SInt32 * ); + virtual void initForPM ( IOService* ); + virtual IOReturn setPowerState ( unsigned long, IOService* ); + virtual unsigned long maxCapabilityForDomainState ( IOPMPowerFlags ); + virtual unsigned long initialPowerStateForDomainState ( IOPMPowerFlags ); + virtual unsigned long powerStateForDomainState ( IOPMPowerFlags ); + virtual void ourButtonHandler ( unsigned int ); + virtual void setBrightness ( long ); +}; + +void upButtonHandler(AppleG3SeriesDisplay *); +void downButtonHandler(AppleG3SeriesDisplay *); + + +/* + The actual display panel has 128 power levels. Copying the MacOS, we only implement 32 of them. + We further divide the 32 into four IOKit power states which we export to our superclass. + + In the lowest state, the display is off. This state consists of only one of the 32 power levels, the lowest. + In the next state it is in the dimmest usable state. This state also consists of only one of the 32 levels, the second lowest. + The next state is also dim and consists of seven of the 32 levels. + The highest state consists of the highest 23 levels. + + The display has no state or configuration or programming that would be saved/restored over power state changes, + and the driver does not register with the superclass as an interested driver. + + This driver doesn't have much to do. It changes between the four power state brightnesses on command + from the superclass, and it raises and lowers the display brightness by one of the 32 brightness levels + when it receives a brightness-button interrupt from the ADB stack. + + The only smart thing it does is keep track of which of the 32 brightness levels the user has selected by button, and it + never exceeds that on command from the display device object. It only raises above that on an brightness-up-button + interrupt. + + */ + + +static IOPMPowerState ourPowerStates[kNumber_of_power_states] = { + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable+IOPMMaxPerformance,0,IOPMPowerOn,0,0,0,0,0,0,0,0} +}; + +static int max_brightness_level[kNumber_of_power_states] = {0,1,8,31}; + +static int HooperTable[ ] = {127,71,69,67,65,63,61,59, + 58,56,54,52,50,48,46,44, + 42,40,38,37,35,33,31,29, + 27,25,23,21,19,18,16,14 }; + +bool ourNotificationHandler( OSObject *, void *, IOService * ); + +#define super AppleSenseDisplay + +OSDefineMetaClassAndStructors(AppleG3SeriesDisplay, AppleSenseDisplay) + + +// ********************************************************************************** +// probe +// +// ********************************************************************************** +IOService * AppleG3SeriesDisplay::probe ( IOService * provider, SInt32 * score ) +{ + IOFramebuffer * framebuffer; + IOService * ret = 0; + UInt32 displayType; + IOIndex ourIndex; + + do { + if ( 0 == super::probe( provider, score ) ) { + continue; + } + + framebuffer = (IOFramebuffer *)getConnection()->getFramebuffer(); // point to our framebuffer + ourIndex = getConnection()->getConnection(); // get our connection index on this framebuffer + + if ( kIOReturnSuccess != framebuffer->getAppleSense(ourIndex,NULL,NULL,NULL,&displayType) ) { + continue; + } + + if ( !(displayType == kPanelTFTConnect) ) { // does it have a panel attached? + continue; // no + } + ret = this; // yes, we will control the panel + + } while ( false ); + + return ( ret ); +} + +// ********************************************************************************** +// localSendMiscCommand +// +// ********************************************************************************** +IOReturn AppleG3SeriesDisplay::localSendMiscCommand(int command, IOByteCount sLength, UInt8 *sBuffer, IOByteCount *rLength, UInt8 *rBuffer) +{ + IOReturn returnValue = kIOReturnError; + + // The poupose of this method is to free us from the pain to create a parameter block each time + // we wish to talk to the pmu: + SendMiscCommandParameterBlock prmBlock = {command, sLength, sBuffer, rLength, rBuffer}; + + IOLog("AppleG3SeriesDisplay::localSendMiscCommand 0x%02x %d 0x%08lx 0x%08lx 0x%08lx\n", + command, sLength, sBuffer, rLength, rBuffer); + + if (PMUdriver != NULL) { + IOLog("AppleG3SeriesDisplay::localSendMiscCommand calling PMUdriver->callPlatformFunction\n"); + returnValue = PMUdriver->callPlatformFunction("sendMiscCommand", true, (void*)&prmBlock, NULL, NULL, NULL); + } + + // If we are here we do not have a dreive to talk to: + IOLog("AppleG3SeriesDisplay::localSendMiscCommand end 0x%08lx\n", returnValue); + + return returnValue; +} + +// ********************************************************************************** +// initForPM +// +// This method overrides the one in IODisplay.h to do PowerBook-only +// power management of the display. +// ********************************************************************************** +void AppleG3SeriesDisplay::initForPM ( IOService * provider ) +{ + unsigned long i; + + UInt8 PMUreceiveBuffer[10]; // (I think 1 is enough, but it scares me) + IOByteCount unused = sizeof(PMUreceiveBuffer); + + displayPMVars->powerControllable = true; + + PMinit(); // initialize superclass variables + + PMUdriver = waitForService(serviceMatching("ApplePMU")); + + rawTable = HooperTable; + + localSendMiscCommand(kPMUpower1Read,0, NULL, &unused,PMUreceiveBuffer); + + if ( PMUreceiveBuffer[0] & kScreenBit ) { // is the screen currently on? + unused = sizeof(PMUreceiveBuffer); + localSendMiscCommand(kPMUReadBrightness,0, NULL, &unused,PMUreceiveBuffer); // yes, figure out the brightness + current_user_brightness = kNumber_of_power_levels - 1; // ( in case the for-loop doesn't break) + current_level = kNumber_of_power_levels - 1; + + for ( i = 0; i < kNumber_of_power_levels; i++ ) { + if ( PMUreceiveBuffer[0] >= rawTable[i] ) { + current_user_brightness = i; + current_level = i; + break; + } + } + } + else { // no + current_user_brightness = 0; + current_level = 0; + } + + addNotification( gIOPublishNotification,serviceMatching("AppleADBButtons"), // look for the button driver + (IOServiceNotificationHandler)ourNotificationHandler, this, 0 ); + + provider->joinPMtree(this); // attach into the power management hierarchy + registerPowerDriver(this,ourPowerStates,kNumber_of_power_states); // register with policy-maker (us) +} + + +// ********************************************************************************** +// ourNotificationHandler +// +// The ADB button driver has appeared. Tell it we are interested in the +// brightness-up button and the brightness-down button. +// ********************************************************************************** +bool ourNotificationHandler( OSObject * us, void * ref, IOService * yourDevice ) +{ + if ( yourDevice != NULL ) { + ((AppleADBButtons *)yourDevice)->registerForButton(kBrightness_up,(IOService *)us,(button_handler)upButtonHandler,true); + ((AppleADBButtons *)yourDevice)->registerForButton(kBrightness_down,(IOService *)us,(button_handler)downButtonHandler,true); + } + return true; +} + + +// ********************************************************************************** +// setPowerState +// +// All power state changes require a call to the PMU driver, which +// blocks the thread till the command completes. +// ********************************************************************************** +IOReturn AppleG3SeriesDisplay::setPowerState ( unsigned long powerStateOrdinal, IOService* whatDevice ) +{ + UInt8 displayOn = kDisplayOn; + UInt8 displayOff = kDisplayOff; + unsigned long i; + + if ( powerStateOrdinal < kNumber_of_power_states ) { + if ( powerStateOrdinal > pm_vars->myCurrentState ) { // raising power + if ( pm_vars->myCurrentState == 0 ) { // is it currently off? + IOByteCount unused = 0; + localSendMiscCommand(kPMUpower1Cntl,1, &displayOn, &unused,NULL); + } + current_level = max_brightness_level[powerStateOrdinal]; + if ( current_user_brightness < current_level ) { + current_level = current_user_brightness; // don't exceed what the user used to have it at + } + setBrightness(current_level); + // If we are still higher than we need to be, request a lower state + for ( i = 0; i < kNumber_of_power_states; i++ ) { // figure out what state we should be in + if ( current_level <= max_brightness_level[i] ) { + break; + } + } + if ( pm_vars->myCurrentState > i ) { + changePowerStateToPriv(i); + } + } + + if ( powerStateOrdinal < pm_vars->myCurrentState ) { // lowering power + if (powerStateOrdinal == 0 ) { // going all the way off? + IOByteCount unused = 0; + localSendMiscCommand(kPMUpower1Cntl,1, &displayOff, &unused,NULL); // yes + current_level = max_brightness_level[powerStateOrdinal]; + } + else { + if ( current_level > max_brightness_level[powerStateOrdinal] ) { // no + current_level = max_brightness_level[powerStateOrdinal]; + setBrightness(current_level); + } + } + } + } + return IOPMAckImplied; +} + + +// ********************************************************************************** +// maxCapabilityForDomainState +// +// This simple device needs only power. If the power domain is supplying +// power, the display can go to its highest state. If there is no power +// it can only be in its lowest state, which is off. +// ********************************************************************************** +unsigned long AppleG3SeriesDisplay::maxCapabilityForDomainState ( IOPMPowerFlags domainState ) +{ + if ( domainState & IOPMPowerOn ) { + return kNumber_of_power_states-1; + } + return 0; +} + + +// ********************************************************************************** +// initialPowerStateForDomainState +// +// The power domain may be changing state. If power is on in the new +// state, that will not affect our state at all. If domain power is off, +// we can attain only our lowest state, which is off. +// ********************************************************************************** +unsigned long AppleG3SeriesDisplay::initialPowerStateForDomainState ( IOPMPowerFlags domainState ) +{ + long unsigned i; + + if ( domainState & IOPMPowerOn ) { // domain has power + for ( i = 0; i < kNumber_of_power_states; i++ ) { // find power state that has our current + if ( current_level <= max_brightness_level[i] ) { // brightness level + return i; + break; + } + } + } + return 0; // domain is down, so display is off +} + + +// ********************************************************************************** +// powerStateForDomainState +// +// The power domain may be changing state. If power is on in the new +// state, that will not affect our state at all. If domain power is off, +// we can attain only our lowest state, which is off. +// ********************************************************************************** +unsigned long AppleG3SeriesDisplay::powerStateForDomainState ( IOPMPowerFlags domainState ) +{ + long unsigned i; + + if ( domainState & IOPMPowerOn ) { // domain has power + for ( i = 0; i < kNumber_of_power_states; i++ ) { // find power state that has our current + if ( current_level <= max_brightness_level[i] ) { // brightness level + return i; + } + } + } + return 0; // domain is down, so display is off +} + + +// ********************************************************************************** +// upButtonHandler +// +// The display-brightness-up button just went down. +// We are running on a new thread made by the ADB Button driver +// ********************************************************************************** +void upButtonHandler(AppleG3SeriesDisplay * us ) +{ + ((AppleG3SeriesDisplay *)us)->ourButtonHandler(kBrightness_up); +} + + +// ********************************************************************************** +// downButtonHandler +// +// The display-brightness-down button just went down. +// We are running on a new thread made by the ADB Button driver +// ********************************************************************************** +void downButtonHandler(AppleG3SeriesDisplay * us ) +{ + ((AppleG3SeriesDisplay *)us)->ourButtonHandler(kBrightness_down); +} + + +// ********************************************************************************** +// ourButtonHandler +// +// Alter the backlight brightness up or down by one increment. +// This involves a call to the PMU driver, which will block the thread. +// ********************************************************************************** +void AppleG3SeriesDisplay::ourButtonHandler ( unsigned int keycode ) +{ // If we are idle, ignore the button. + // The display will be made usable + if ( ! displayPMVars->displayIdle ) { // by the DisplayWrangler + switch (keycode) { + case kBrightness_up: // The brightness-up button has just been pressed + // We make sure the brightness is not above the maximum + // brightness level of our current power state. If it + // is too high, we ask the device to raise power. + if (current_level < max_brightness_level[pm_vars->myCurrentState] ) { + current_level++; + current_user_brightness = current_level; + setBrightness(current_level); + } + else { + if ( pm_vars->myCurrentState < (kNumber_of_power_states-1) ) { + current_user_brightness++; // increment user's desire + if ( changePowerStateToPriv(pm_vars->myCurrentState + 1) != IOPMNoErr ) { // request higher power + current_user_brightness--; // can't + } + } + } + break; + + case kBrightness_down: // The brightness-down button has just been pressed + // We lower the brightness, and if that takes us into a + // lower power state, we tell our parent about it. + if ( pm_vars->myCurrentState > 0 ) { // don't lower if in lowest (off) state + if ( current_level > 0 ) { + current_level--; + current_user_brightness = current_level; + setBrightness(current_level); + if (current_level <= max_brightness_level[pm_vars->myCurrentState - 1] ) { // if this takes us into the next lower state + changePowerStateToPriv(pm_vars->myCurrentState - 1); // request lower power + } + } + } + break; + } + } +} + + +// ********************************************************************************** +// setBrightness +// +// Instruct PMU to set the display brightness. +// This will block the thread while the command completes. +// ********************************************************************************** +void AppleG3SeriesDisplay::setBrightness ( long brightness ) +{ + IOByteCount unused = 0; + UInt8 setBrightnessBuffer; + + setBrightnessBuffer = (UInt8)rawTable[brightness]; + localSendMiscCommand(kPMUSetBrightness,1, &setBrightnessBuffer, &unused,NULL); +} diff --git a/iokit/Families/IOGraphics/DDCInfo.m b/iokit/Families/IOGraphics/DDCInfo.m new file mode 100644 index 000000000..8ba555c95 --- /dev/null +++ b/iokit/Families/IOGraphics/DDCInfo.m @@ -0,0 +1,80 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include "IOMacOSVideo.h" +#include + +struct TimingToEDID { + UInt32 timingID; + UInt8 spare; + UInt8 establishedBit; + UInt16 standardTiming; +}; +typedef struct TimingToEDID TimingToEDID; + +#define MAKESTD(h,a,r) ( (((h/8)-31)<<8) | (a<<6) | (r-60) ) + +static const TimingToEDID timingToEDID[] = { + { timingApple_512x384_60hz, 0, 0xff, MAKESTD( 512,1,60) }, + { timingApple_640x480_67hz, 0, 0x04, MAKESTD( 640,1,67) }, + { timingVESA_640x480_60hz, 0, 0x05, MAKESTD( 640,1,60) }, + { timingVESA_640x480_72hz , 0, 0x03, MAKESTD( 640,1,72) }, + { timingVESA_640x480_75hz, 0, 0x02, MAKESTD( 640,1,75) }, + { timingVESA_640x480_85hz, 0, 0xff, MAKESTD( 640,1,85) }, + { timingApple_832x624_75hz, 0, 0x0d, MAKESTD( 832,1,75) }, + { timingVESA_800x600_56hz, 0, 0x01, MAKESTD( 800,1,56) }, + { timingVESA_800x600_60hz, 0, 0x00, MAKESTD( 800,1,60) }, + { timingVESA_800x600_72hz, 0, 0x0f, MAKESTD( 800,1,72) }, + { timingVESA_800x600_75hz, 0, 0x0e, MAKESTD( 800,1,75) }, + { timingVESA_800x600_85hz, 0, 0xff, MAKESTD( 800,1,85) }, + { timingVESA_1024x768_60hz, 0, 0x0b, MAKESTD( 1024,1,60) }, + { timingVESA_1024x768_70hz, 0, 0x0a, MAKESTD( 1024,1,70) }, + { timingVESA_1024x768_75hz, 0, 0x09, MAKESTD( 1024,1,75) }, + { timingVESA_1024x768_85hz, 0, 0xff, MAKESTD( 1024,1,85) }, + { timingApple_1024x768_75hz, 0, 0x09, MAKESTD( 1024,1,75) }, + { timingApple_1152x870_75hz, 0, 0x17, MAKESTD( 0000,0,00) }, + { timingVESA_1280x960_75hz, 0, 0xff, MAKESTD( 1280,1,75) }, + { timingVESA_1280x1024_60hz, 0, 0xff, MAKESTD( 1280,2,60) }, + { timingVESA_1280x1024_75hz, 0, 0x08, MAKESTD( 1280,2,75) }, + { timingVESA_1280x1024_85hz, 0, 0xff, MAKESTD( 1280,2,85) }, + { timingVESA_1600x1200_60hz, 0, 0xff, MAKESTD( 1600,1,60) }, + { timingVESA_1600x1200_65hz, 0, 0xff, MAKESTD( 1600,1,65) }, + { timingVESA_1600x1200_70hz, 0, 0xff, MAKESTD( 1600,1,70) }, + { timingVESA_1600x1200_75hz, 0, 0xff, MAKESTD( 1600,1,75) }, + { timingVESA_1600x1200_80hz, 0, 0xff, MAKESTD( 1600,1,80) } +}; + + +void main( void ) +{ + const TimingToEDID * lookTiming; + + lookTiming = timingToEDID; + while( lookTiming < (timingToEDID + sizeof( timingToEDID) / sizeof( TimingToEDID))) { + + printf("%d 0x%x ", lookTiming->timingID, + *((unsigned int *)&lookTiming->spare) ); + lookTiming++; + } + printf("\n"); +} + diff --git a/iokit/Families/IOGraphics/IOAccelerator.cpp b/iokit/Families/IOGraphics/IOAccelerator.cpp new file mode 100644 index 000000000..f50442480 --- /dev/null +++ b/iokit/Families/IOGraphics/IOAccelerator.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + + +#define super IOService + +OSDefineMetaClassAndStructors(IOAccelerator, IOService) + diff --git a/iokit/Families/IOGraphics/IOBootFramebuffer.cpp b/iokit/Families/IOGraphics/IOBootFramebuffer.cpp new file mode 100644 index 000000000..c9e4caad1 --- /dev/null +++ b/iokit/Families/IOGraphics/IOBootFramebuffer.cpp @@ -0,0 +1,230 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Boot video dumb frambuffer shim + */ + +#include "IOBootFramebuffer.h" + +enum { kTheDisplayMode = 10 }; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOFramebuffer + +OSDefineMetaClassAndStructors(IOBootFramebuffer, IOFramebuffer) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOService * IOBootFramebuffer::probe( IOService * provider, + SInt32 * score ) +{ + PE_Video bootDisplay; + IOService * ret = 0; + IOReturn err; + + do { + + if( !provider->getProperty("AAPL,boot-display")) + continue; + + err = getPlatform()->getConsoleInfo( &bootDisplay ); + if( err || (bootDisplay.v_baseAddr == 0)) + continue; + + if (false == super::probe( provider, score )) + continue; + + *score = 0; + ret = this; // Success + + } while( false); + + return( ret); +} + + +const char * IOBootFramebuffer::getPixelFormats( void ) +{ + const char * ret; + PE_Video bootDisplay; + + getPlatform()->getConsoleInfo( &bootDisplay); + + switch( bootDisplay.v_depth) { + case 8: + default: + ret = IO8BitIndexedPixels; + break; + case 15: + case 16: + ret = IO16BitDirectPixels; + break; + case 24: + case 32: + ret = IO32BitDirectPixels; + break; + } + + return( ret); +} + +IOItemCount IOBootFramebuffer::getDisplayModeCount( void ) +{ + return( 1); +} + +IOReturn IOBootFramebuffer::getDisplayModes( + IODisplayModeID * allDisplayModes ) +{ + + *allDisplayModes = kTheDisplayMode; + return( kIOReturnSuccess); +} + +IOReturn IOBootFramebuffer::getInformationForDisplayMode( + IODisplayModeID /* displayMode */, + IODisplayModeInformation * info ) +{ + PE_Video bootDisplay; + + getPlatform()->getConsoleInfo( &bootDisplay); + + bzero( info, sizeof( *info)); + + info->maxDepthIndex = 0; + info->nominalWidth = bootDisplay.v_width; + info->nominalHeight = bootDisplay.v_height; + info->refreshRate = 75 << 16; + + return( kIOReturnSuccess); +} + +UInt64 IOBootFramebuffer::getPixelFormatsForDisplayMode( + IODisplayModeID /* displayMode */, IOIndex /* depth */ ) +{ + return( 1); +} + +IOReturn IOBootFramebuffer::getPixelInformation( + IODisplayModeID displayMode, IOIndex depth, + IOPixelAperture aperture, IOPixelInformation * info ) +{ + PE_Video bootDisplay; + + if( aperture || depth || (displayMode != kTheDisplayMode) ) + return( kIOReturnUnsupportedMode); + + getPlatform()->getConsoleInfo( &bootDisplay); + + bzero( info, sizeof( *info)); + + info->activeWidth = bootDisplay.v_width; + info->activeHeight = bootDisplay.v_height; + info->bytesPerRow = bootDisplay.v_rowBytes & 0x7fff; + info->bytesPerPlane = 0; + + switch( bootDisplay.v_depth ) { + case 8: + default: + strcpy(info->pixelFormat, IO8BitIndexedPixels ); + info->pixelType = kIOCLUTPixels; + info->componentMasks[0] = 0xff; + info->bitsPerPixel = 8; + info->componentCount = 1; + info->bitsPerComponent = 8; + break; + case 15: + case 16: + strcpy(info->pixelFormat, IO16BitDirectPixels ); + info->pixelType = kIORGBDirectPixels; + info->componentMasks[0] = 0x7c00; + info->componentMasks[1] = 0x03e0; + info->componentMasks[2] = 0x001f; + info->bitsPerPixel = 16; + info->componentCount = 3; + info->bitsPerComponent = 5; + break; + case 24: + case 32: + strcpy(info->pixelFormat, IO32BitDirectPixels ); + info->pixelType = kIORGBDirectPixels; + info->componentMasks[0] = 0x00ff0000; + info->componentMasks[1] = 0x0000ff00; + info->componentMasks[2] = 0x000000ff; + info->bitsPerPixel = 32; + info->componentCount = 3; + info->bitsPerComponent = 8; + break; + } + + return( kIOReturnSuccess); +} + +IOReturn IOBootFramebuffer::getCurrentDisplayMode( + IODisplayModeID * displayMode, IOIndex * depth ) +{ + if( displayMode) + *displayMode = kTheDisplayMode; + if( depth) + *depth = 0; + + return( kIOReturnSuccess); +} + +IODeviceMemory * IOBootFramebuffer::getApertureRange( IOPixelAperture aper ) +{ + IOReturn err; + IOPixelInformation info; + IOByteCount bytes; + PE_Video bootDisplay; + + getPlatform()->getConsoleInfo( &bootDisplay); + + err = getPixelInformation( kTheDisplayMode, 0, aper, + &info ); + if( err) + return( 0 ); + + bytes = (info.bytesPerRow * info.activeHeight) + 128; + + return( IODeviceMemory::withRange( bootDisplay.v_baseAddr, bytes )); +} + +bool IOBootFramebuffer::isConsoleDevice( void ) +{ + return( (0 != getProvider()->getProperty("AAPL,boot-display")) ); +} + +IOReturn IOBootFramebuffer::setGammaTable( UInt32 channelCount, + UInt32 dataCount, UInt32 dataWidth, void * data ) +{ + return( kIOReturnSuccess ); +} + +IOReturn IOBootFramebuffer::setCLUTWithEntries( + IOColorEntry * colors, UInt32 index, UInt32 numEntries, + IOOptionBits options ) +{ + return( kIOReturnSuccess ); +} diff --git a/iokit/Families/IOGraphics/IOBootFramebuffer.h b/iokit/Families/IOGraphics/IOBootFramebuffer.h new file mode 100644 index 000000000..c2a92c1c9 --- /dev/null +++ b/iokit/Families/IOGraphics/IOBootFramebuffer.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOBOOTFRAMEBUFFER_H +#define _IOKIT_IOBOOTFRAMEBUFFER_H + +#include +#include + + +class IOBootFramebuffer : public IOFramebuffer +{ + OSDeclareDefaultStructors(IOBootFramebuffer) + +public: + + virtual IOService * probe( IOService * provider, + SInt32 * score ); + +// virtual bool start( IOService * provider ); + + virtual const char * getPixelFormats( void ); + + virtual IOItemCount getDisplayModeCount( void ); + + virtual IOReturn getDisplayModes( IODisplayModeID * allDisplayModes ); + + virtual IOReturn getInformationForDisplayMode( IODisplayModeID displayMode, + IODisplayModeInformation * info ); + + virtual UInt64 getPixelFormatsForDisplayMode( IODisplayModeID displayMode, + IOIndex depth ); + + virtual IOReturn getPixelInformation( + IODisplayModeID displayMode, IOIndex depth, + IOPixelAperture aperture, IOPixelInformation * pixelInfo ); + + virtual IOReturn getCurrentDisplayMode( IODisplayModeID * displayMode, + IOIndex * depth ); + + virtual IODeviceMemory * getApertureRange( IOPixelAperture aperture ); + + virtual bool isConsoleDevice( void ); + + virtual IOReturn setCLUTWithEntries( IOColorEntry * colors, UInt32 index, + UInt32 numEntries, IOOptionBits options ); + + virtual IOReturn setGammaTable( UInt32 channelCount, UInt32 dataCount, + UInt32 dataWidth, void * data ); +}; + +#endif /* ! _IOKIT_IOBOOTFRAMEBUFFER_H */ + diff --git a/iokit/Families/IOGraphics/IOCursorBlits.h b/iokit/Families/IOGraphics/IOCursorBlits.h new file mode 100644 index 000000000..9f31b4f77 --- /dev/null +++ b/iokit/Families/IOGraphics/IOCursorBlits.h @@ -0,0 +1,425 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#define RBMASK 0xF0F0 /* Short, or 16 bit format */ +#define GAMASK 0x0F0F /* Short, or 16 bit format */ +#define AMASK 0x000F /* Short, or 16 bit format */ + +#if 1 +#define short34to35WithGamma(x) \ + ( (((x) & 0xf000) >> 1) \ + | (((x) & 0x0f00) >> 2) \ + | (((x) & 0x00f0) >> 3) \ + | (((x) & 0x8000) >> 5) \ + | (((x) & 0x0800) >> 6) \ + | (((x) & 0x0080) >> 7) ) + +#define short35to34WithGamma(x) \ + ( 0x000F \ + | (((x) & 0x001e) << 3) \ + | (((x) & 0x03c0) << 2) \ + | (((x) & 0x7800) << 1) ) +#else +#define short34to35WithGamma(x) \ + ( (_bm34To35SampleTable[((x) & 0x00F0) >> 4]) \ + | (_bm34To35SampleTable[((x) & 0x0F00) >> 8] << 5) \ + | (_bm34To35SampleTable[(x) >> 12] << 10) ) + +#define short35to34WithGamma(x) \ + ( 0x000F \ + | (_bm35To34SampleTable[x & 0x001F] << 4) \ + | (_bm35To34SampleTable[(x & 0x03E0) >> 5] << 8) \ + | (_bm35To34SampleTable[(x & 0x7C00) >> 10] << 12) ) +#endif + +void IOFramebuffer::StdFBDisplayCursor555( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned short *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ) +{ + int i, j; + volatile unsigned short *cursPtr; + volatile unsigned short *savePtr; + unsigned short s, d, f; + unsigned char *_bm34To35SampleTable; + unsigned char *_bm35To34SampleTable; + + savePtr = (volatile unsigned short *) inst->cursorSave; + cursPtr = (volatile unsigned short *) inst->cursorImages[ shmem->frame ]; + cursPtr += cursStart; + + _bm34To35SampleTable = inst->colorConvert.t._bm34To35SampleTable; + _bm35To34SampleTable = inst->colorConvert.t._bm35To34SampleTable; + + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) { + d = *savePtr++ = *vramPtr; + if ( (s = *cursPtr++) == 0 ) + { /* Transparent black area. Leave dst as is. */ + ++vramPtr; + continue; + } + if ( (f = (~s) & (unsigned int)AMASK) == 0 ) + { /* Opaque cursor pixel. Mark it. */ + *vramPtr++ = short34to35WithGamma(s); + continue; + } + if ((f == AMASK)) + { /* Transparent non black cursor pixel. xor it. */ + *vramPtr++ = d ^ short34to35WithGamma(s); + continue; + } + /* Alpha is not 0 or 1.0. Sover the cursor. */ + d = short35to34WithGamma(d); + d = s + (((((d & RBMASK)>>4)*f + GAMASK) & RBMASK) + | ((((d & GAMASK)*f+GAMASK)>>4) & GAMASK)); + *vramPtr++ = short34to35WithGamma(d); + } + cursPtr += cursRow; /* starting point of next cursor line */ + vramPtr += vramRow; /* starting point of next screen line */ + } +} + +void IOFramebuffer::StdFBDisplayCursor444( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned short *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ) +{ + int i, j; + volatile unsigned short *savePtr; /* saved screen data pointer */ + volatile unsigned short *cursPtr; + unsigned short s, d, f; + + savePtr = (volatile unsigned short *) inst->cursorSave; + cursPtr = (volatile unsigned short *) inst->cursorImages[ shmem->frame ]; + cursPtr += cursStart; + + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) { + d = *savePtr++ = *vramPtr; + if ( (s = *cursPtr++) == 0 ) + { /* Transparent black area. Leave dst as is. */ + ++vramPtr; + continue; + } + if ( (f = (~s) & (unsigned int)AMASK) == 0 ) + { /* Opaque cursor pixel. Mark it. */ + *vramPtr++ = s; + continue; + } + if ((f == AMASK)) + { /* Transparent non black cursor pixel. xor it. */ + *vramPtr++ = d ^ s; + continue; + } + /* Alpha is not 0 or 1.0. Sover the cursor. */ + *vramPtr++ = s + (((((d & RBMASK)>>4)*f + GAMASK) & RBMASK) + | ((((d & GAMASK)*f+GAMASK)>>4) & GAMASK)); + } + cursPtr += cursRow; /* starting point of next cursor line */ + vramPtr += vramRow; /* starting point of next screen line */ + } +} + +static inline unsigned int MUL32(unsigned int a, unsigned int b) +{ + unsigned int v, w; + + v = ((a & 0xff00ff00) >> 8) * b; + v += ((v & 0xff00ff00) >> 8) + 0x00010001; + w = (a & 0x00ff00ff) * b; + w += ((w & 0xff00ff00) >> 8) + 0x00010001; + + return (v & 0xff00ff00) | ((w >> 8) & 0x00ff00ff); +} + +static inline unsigned char map32to256( unsigned char *directToLogical, unsigned int s) +{ + unsigned char logicalValue; + + if ((s ^ (s>>8)) & 0x00ffff00) { + logicalValue = directToLogical[(s>>24) + 0] + + directToLogical[((s>>16)&0xff) + 256] + + directToLogical[((s>>8)&0xff) + 512]; + } else { + logicalValue = directToLogical[(s>>24) + 768]; + } + // final conversion to actual palette + return( directToLogical[ logicalValue + 1024 ]); +} + +void IOFramebuffer::StdFBDisplayCursor8P( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned char *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ) +{ + int i, j; + volatile unsigned char *savePtr; /* saved screen data pointer */ + volatile unsigned char *cursPtr; + unsigned char dst, src, alpha, white; + unsigned int rgb32val; + volatile unsigned char *maskPtr; /* cursor mask pointer */ + unsigned int *_bm256To38SampleTable + = inst->colorConvert.t._bm256To38SampleTable; + unsigned char *_bm38To256SampleTable + = inst->colorConvert.t._bm38To256SampleTable; + + savePtr = (volatile unsigned char *) inst->cursorSave; + cursPtr = (volatile unsigned char *) inst->cursorImages[ shmem->frame ]; + maskPtr = (volatile unsigned char *) inst->cursorMasks[ shmem->frame ]; + cursPtr += cursStart; + maskPtr += cursStart; + + white = inst->white; + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; savePtr++,maskPtr++,cursPtr++,vramPtr++) { + dst = *savePtr = *vramPtr; + src = *cursPtr; + if ((alpha = *maskPtr)) { + if ((alpha = ~alpha)) { + rgb32val = _bm256To38SampleTable[dst]; + rgb32val = (_bm256To38SampleTable[src] & ~0xff) + + MUL32(rgb32val, alpha); + *vramPtr = map32to256(_bm38To256SampleTable, rgb32val); + } else + *vramPtr = src; + } else if (src == white) + *vramPtr = map32to256(_bm38To256SampleTable, + _bm256To38SampleTable[dst] ^ 0xffffffff); + } + cursPtr += cursRow; /* starting point of next cursor line */ + maskPtr += cursRow; + vramPtr += vramRow; /* starting point of next screen line */ + } +} + + +void IOFramebuffer::StdFBDisplayCursor8G( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned char *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ) +{ + int i, j; + volatile unsigned char *savePtr; /* saved screen data pointer */ + unsigned short s, d, a; + volatile unsigned char *cursPtr; + volatile unsigned char *maskPtr; /* cursor mask pointer */ + + savePtr = (volatile unsigned char *) inst->cursorSave; + cursPtr = (volatile unsigned char *) inst->cursorImages[ shmem->frame ]; + maskPtr = (volatile unsigned char *) inst->cursorMasks[ shmem->frame ]; + cursPtr += cursStart; + maskPtr += cursStart; + + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) { + int t; + d = *savePtr++ = *vramPtr; + s = *cursPtr++; + a = *maskPtr++; + if (a) { + t = d * (255 - *maskPtr++); + *vramPtr = s + ((t + (t >> 8) + 1) >> 8); + } else if (s) + *vramPtr = d ^ s; + vramPtr++; + } + cursPtr += cursRow; /* starting point of next cursor line */ + maskPtr += cursRow; + vramPtr += vramRow; /* starting point of next screen line */ + } +} + +void IOFramebuffer::StdFBDisplayCursor32Axxx( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned int *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ) +{ + int i, j; + volatile unsigned int *savePtr; /* saved screen data pointer */ + unsigned int s, d, f; + volatile unsigned int *cursPtr; + + savePtr = (volatile unsigned int *) inst->cursorSave; + cursPtr = (volatile unsigned int *) inst->cursorImages[ shmem->frame ]; + cursPtr += cursStart; + + /* Pixel format is Axxx */ + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) { + d = *savePtr++ = *vramPtr; + s = *cursPtr++; + f = s >> 24; + if (f) { + if (f == 0xff) // Opaque pixel + *vramPtr++ = s; + else { // SOVER the cursor pixel + s <<= 8; d <<= 8; /* Now pixels are xxxA */ + f ^= 0xFF; + d = s+(((((d&0xFF00FF00)>>8)*f+0x00FF00FF)&0xFF00FF00) + | ((((d & 0x00FF00FF)*f+0x00FF00FF)>>8) & + 0x00FF00FF)); + *vramPtr++ = (d>>8) | 0xff000000; + } + } else if (s) { + // Transparent non black cursor pixel. xor it. + *vramPtr++ = d ^ s; + continue; + } else // Transparent cursor pixel + vramPtr++; + } + cursPtr += cursRow; /* starting point of next cursor line */ + vramPtr += vramRow; /* starting point of next screen line */ + } +} + +void IOFramebuffer::StdFBDisplayCursor32xxxA( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned int *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ) +{ + int i, j; + volatile unsigned int *savePtr; /* saved screen data pointer */ + unsigned int s, d, f; + volatile unsigned int *cursPtr; + + savePtr = (volatile unsigned int *) inst->cursorSave; + cursPtr = (volatile unsigned int *) inst->cursorImages[ shmem->frame ]; + cursPtr += cursStart; + + /* Pixel format is xxxA */ + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) { + d = *savePtr++ = *vramPtr; + s = *cursPtr++; + f = s & (unsigned int)0xFF; + if (f) { + if (f == 0xff) // Opaque pixel + *vramPtr++ = s; + else { // SOVER the cursor pixel + f ^= 0xFF; + d = s+(((((d&0xFF00FF00)>>8)*f+0x00FF00FF)&0xFF00FF00) + | ((((d & 0x00FF00FF)*f+0x00FF00FF)>>8) & + 0x00FF00FF)); + *vramPtr++ = d; + } + } else if (s) { + // Transparent non black cursor pixel. xor it. + *vramPtr++ = d ^ s; + continue; + } else // Transparent cursor pixel + vramPtr++; + } + cursPtr += cursRow; /* starting point of next cursor line */ + vramPtr += vramRow; /* starting point of next screen line */ + } +} + +void IOFramebuffer::StdFBRemoveCursor16( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned short *vramPtr, + unsigned int vramRow, + int width, + int height ) +{ + int i, j; + volatile unsigned short *savePtr; + + savePtr = (volatile unsigned short *) inst->cursorSave; + + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) + *vramPtr++ = *savePtr++; + vramPtr += vramRow; + } +} + +void IOFramebuffer::StdFBRemoveCursor8( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned char *vramPtr, + unsigned int vramRow, + int width, + int height ) +{ + int i, j; + volatile unsigned char *savePtr; + + savePtr = (volatile unsigned char *) inst->cursorSave; + + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) + *vramPtr++ = *savePtr++; + vramPtr += vramRow; + } +} + +void IOFramebuffer::StdFBRemoveCursor32( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned int *vramPtr, + unsigned int vramRow, + int width, + int height ) +{ + int i, j; + volatile unsigned int *savePtr; + + savePtr = (volatile unsigned int *) inst->cursorSave; + + for (i = height; --i >= 0; ) { + for (j = width; --j >= 0; ) + *vramPtr++ = *savePtr++; + vramPtr += vramRow; + } +} diff --git a/iokit/Families/IOGraphics/IODisplay.cpp b/iokit/Families/IOGraphics/IODisplay.cpp new file mode 100644 index 000000000..8bcf2fdd9 --- /dev/null +++ b/iokit/Families/IOGraphics/IODisplay.cpp @@ -0,0 +1,518 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 18 May 98 - make loadable. + * sdouglas 23 Jul 98 - start IOKit + * sdouglas 08 Dec 98 - start cpp + */ + +#include +#include +#include +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +const OSSymbol * gIODisplayParametersKey; +const OSSymbol * gIODisplayGUIDKey; + +const OSSymbol * gIODisplayValueKey; +const OSSymbol * gIODisplayMinValueKey; +const OSSymbol * gIODisplayMaxValueKey; + +const OSSymbol * gIODisplayContrastKey; +const OSSymbol * gIODisplayBrightnessKey; +const OSSymbol * gIODisplayHorizontalPositionKey; +const OSSymbol * gIODisplayHorizontalSizeKey; +const OSSymbol * gIODisplayVerticalPositionKey; +const OSSymbol * gIODisplayVerticalSizeKey; +const OSSymbol * gIODisplayTrapezoidKey; +const OSSymbol * gIODisplayPincushionKey; +const OSSymbol * gIODisplayParallelogramKey; +const OSSymbol * gIODisplayRotationKey; + +const OSSymbol * gIODisplayParametersCommitKey; +const OSSymbol * gIODisplayParametersDefaultKey; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOService + +OSDefineMetaClass( IODisplay, IOService ) +OSDefineAbstractStructors( IODisplay, IOService ) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IODisplay::initialize( void ) +{ + gIODisplayParametersKey = OSSymbol::withCStringNoCopy( + kIODisplayParametersKey ); + gIODisplayGUIDKey = OSSymbol::withCStringNoCopy( + kIODisplayGUIDKey ); + gIODisplayValueKey = OSSymbol::withCStringNoCopy( + kIODisplayValueKey ); + gIODisplayMinValueKey = OSSymbol::withCStringNoCopy( + kIODisplayMinValueKey ); + gIODisplayMaxValueKey = OSSymbol::withCStringNoCopy( + kIODisplayMaxValueKey ); + gIODisplayContrastKey = OSSymbol::withCStringNoCopy( + kIODisplayContrastKey ); + gIODisplayBrightnessKey = OSSymbol::withCStringNoCopy( + kIODisplayBrightnessKey ); + gIODisplayHorizontalPositionKey = OSSymbol::withCStringNoCopy( + kIODisplayHorizontalPositionKey ); + gIODisplayHorizontalSizeKey = OSSymbol::withCStringNoCopy( + kIODisplayHorizontalSizeKey ); + gIODisplayVerticalPositionKey = OSSymbol::withCStringNoCopy( + kIODisplayVerticalPositionKey ); + gIODisplayVerticalSizeKey = OSSymbol::withCStringNoCopy( + kIODisplayVerticalSizeKey ); + gIODisplayTrapezoidKey = OSSymbol::withCStringNoCopy( + kIODisplayTrapezoidKey ); + gIODisplayPincushionKey = OSSymbol::withCStringNoCopy( + kIODisplayPincushionKey ); + gIODisplayParallelogramKey = OSSymbol::withCStringNoCopy( + kIODisplayParallelogramKey ); + gIODisplayRotationKey = OSSymbol::withCStringNoCopy( + kIODisplayRotationKey ); + + gIODisplayParametersCommitKey = OSSymbol::withCStringNoCopy( + kIODisplayParametersCommitKey ); + gIODisplayParametersDefaultKey = OSSymbol::withCStringNoCopy( + kIODisplayParametersDefaultKey ); +} + +IOService * IODisplay::probe( IOService * provider, + SInt32 * score ) +{ + connection = OSDynamicCast(IODisplayConnect, provider); + + return( this ); +} + +IODisplayConnect * IODisplay::getConnection( void ) +{ + return( connection ); +} + + +IOReturn IODisplay::getGammaTableByIndex( + UInt32 * /* channelCount */, UInt32 * /* dataCount */, + UInt32 * /* dataWidth */, void ** /* data */ ) +{ + return( kIOReturnUnsupported); +} + + +bool IODisplay::start( IOService * provider ) +{ + if ( super::start(provider) ) { + if ( connection != NULL ) { + displayPMVars = (DisplayPMVars *)IOMalloc(sizeof(DisplayPMVars)); // make space for our variables + assert( displayPMVars ); + displayPMVars->displayIdle = false; // initialize some + initForPM(provider); // initialize power management of the device + registerService(); + } + return true; + } + return false; +} + +IOReturn IODisplay::setProperties( OSObject * properties ) +{ + IOService * handler; + OSDictionary * dict; + OSDictionary * dict2; + + dict = OSDynamicCast( OSDictionary, properties); + if( !dict) + return( kIOReturnUnsupported ); + + dict2 = OSDynamicCast( OSDictionary, dict->getObject(gIODisplayParametersKey)); + if( dict2) + dict = dict2; + + handler = getClientWithCategory(gIODisplayParametersKey); + if( !handler) + return( kIOReturnUnsupported ); + + return( handler->setProperties( dict ) ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* +This is the power-controlling driver for a display. It also acts as an agent of the policy-maker for display power + which is the DisplayWrangler. The Display Wrangler calls here to lower power by one state when it senses + no user activity. It also calls here to make the display usable after it has been idled down, and it also calls + here to make the display barely usable if it senses a power emergency (e.g. low battery). + + This driver assumes a video display, and it calls the framebuffer driver to control the sync signals. Non-video + display drivers (e.g. flat panels) subclass IODisplay and override this and other appropriate methods. + */ + +static IOPMPowerState ourPowerStates[kIODisplayMaxPowerStates] = { + {1,0,0,0,0,0,0,0,0,0,0,0}, +// {1,0,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, +// {1,0,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable+IOPMMaxPerformance,0,IOPMPowerOn,0,0,0,0,0,0,0,0} +}; + + +void IODisplay::initForPM ( IOService * provider ) +{ + UInt32 capabilities = 0; + unsigned long number_of_power_states; + UInt32 currentSyncs = 0; + IOReturn err; + + displayPMVars->connectIndex = connection->getConnection(); // find out our index in the nub + + // what are the sync-controlling capabilities of the framebuffer? + err = connection->getAttributeForConnection( displayPMVars->connectIndex, + kConnectionSyncEnable, &capabilities ); + + // find out current state of sync lines + err = connection->getAttributeForConnection( displayPMVars->connectIndex, + kConnectionSyncFlags, ¤tSyncs ); + + displayPMVars->currentSyncs = currentSyncs; + displayPMVars->powerControllable = true; + + if ( (capabilities & kIOHSyncDisable) && + (capabilities & kIOVSyncDisable) && + !(capabilities & kIONoSeparateSyncControl ) ) { // four power states + number_of_power_states = 4; + displayPMVars->syncControls[0] = 0 | kIOHSyncDisable | kIOVSyncDisable | kIOCSyncDisable; + displayPMVars->syncControls[1] = 0 | kIOVSyncDisable | kIOCSyncDisable; + displayPMVars->syncControls[2] = 0 | kIOHSyncDisable | kIOCSyncDisable; + displayPMVars->syncControls[3] = 0; + displayPMVars->syncMask = capabilities & (kIOHSyncDisable | kIOVSyncDisable | kIOCSyncDisable); + } + else { + if ( capabilities & kIOCSyncDisable ) { // two power states + number_of_power_states = 2; + ourPowerStates[1].capabilityFlags = ourPowerStates[3].capabilityFlags; + displayPMVars->syncControls[0] = 0 | kIOCSyncDisable; + displayPMVars->syncControls[1] = 0; + displayPMVars->syncMask = 0 | kIOCSyncDisable; + } + else { // two power states and not controllable + number_of_power_states = 2; + ourPowerStates[1].capabilityFlags = ourPowerStates[3].capabilityFlags; + ourPowerStates[0].capabilityFlags |= IOPMNotAttainable; + ourPowerStates[1].capabilityFlags |= IOPMNotAttainable; + displayPMVars->syncControls[0] = displayPMVars->currentSyncs; + displayPMVars->syncControls[1] = displayPMVars->currentSyncs; + displayPMVars->syncMask = displayPMVars->currentSyncs; + displayPMVars->powerControllable = false; + } + } + + PMinit(); // initialize superclass variables + provider->joinPMtree(this); // attach into the power management hierarchy + + registerPowerDriver(this,ourPowerStates,number_of_power_states); // register ourselves with policy-maker (us) + +} + + +//********************************************************************************* +// registerPowerDriver +// +// We intercept this call to our superclass just to snoop early on +// the number of power states. +//********************************************************************************* + +IOReturn IODisplay::registerPowerDriver ( IOService* x, IOPMPowerState*y, unsigned long numberOfStates ) +{ + displayPMVars->max_display_state = numberOfStates - 1; + return super::registerPowerDriver(x,y,numberOfStates); +} + + +//********************************************************************************* +// setAggressiveness +// +// We are informed by our power domain parent of a new level of "power management +// aggressiveness". Our only interest is if it implies a power management +// emergency, in which case we keep the display brightness low. +//********************************************************************************* + +IOReturn IODisplay::setAggressiveness ( unsigned long type, unsigned long newLevel ) +{ + unsigned long i; + + if ( type == kPMGeneralAggressiveness ) { + if ( newLevel >= kIOPowerEmergencyLevel ) { // emergency level + for ( i = 0; i < pm_vars->theNumberOfPowerStates; i++ ) { // find lowest usable state + if ( pm_vars->thePowerStates[i].capabilityFlags & IOPMDeviceUsable ) { + break; + } + } + displayPMVars->max_display_state = i; + if ( pm_vars->myCurrentState > i ) { // if we are currently above that, + changePowerStateToPriv(i); // drop to emergency level + } + } + else { // not emergency level + if ( pm_vars->aggressiveness >= kIOPowerEmergencyLevel ) { // but it was emergency level + displayPMVars->max_display_state = pm_vars->theNumberOfPowerStates - 1; + if ( ! displayPMVars->displayIdle ) { + changePowerStateToPriv(displayPMVars->max_display_state); // return to normal usable level + } + } + } + } + super::setAggressiveness(type, newLevel); + return IOPMNoErr; +} + + +// ********************************************************************************** +// dropOneLevel +// +// Called by the display wrangler when it decides there hasn't been user +// activity for a while. We drop one power level. This can be called by the +// display wrangler before we have been completely initialized. +// ********************************************************************************** +void IODisplay::dropOneLevel ( void ) +{ + if ( initialized && displayPMVars->powerControllable) { + displayPMVars->displayIdle = true; + if ( pm_vars != NULL ) { + if ( pm_vars->myCurrentState > 0 ) { + changePowerStateToPriv(pm_vars->myCurrentState - 1); // drop a level + } + else { + changePowerStateToPriv(0); // this may rescind previous request for domain power + } + } + } +} + + +//********************************************************************************* +// makeDisplayUsable +// +// The DisplayWrangler has sensed user activity after we have idled the +// display and wants us to make it usable again. We are running on its +// workloop thread. This can be called before we are completely +// initialized. +//********************************************************************************* +void IODisplay::makeDisplayUsable ( void ) +{ + if ( initialized && displayPMVars->powerControllable) { + displayPMVars->displayIdle = false; + if ( pm_vars != NULL ) { + changePowerStateToPriv(displayPMVars->max_display_state); + } + } +} + + +// ********************************************************************************** +// setPowerState +// +// Called by the superclass to change the display power state. +// ********************************************************************************** +IOReturn IODisplay::setPowerState ( unsigned long powerStateOrdinal, IOService* whatDevice ) +{ + UInt32 flags; + if( initialized) { + flags =(displayPMVars->syncControls[powerStateOrdinal])<<8; + flags |= displayPMVars->syncMask; + displayPMVars->currentSyncs = displayPMVars->syncControls[powerStateOrdinal]; + connection->setAttributeForConnection( displayPMVars->connectIndex, kConnectionSyncEnable, flags ); + } + return IOPMAckImplied; +} + + +// ********************************************************************************** +// maxCapabilityForDomainState +// +// This simple device needs only power. If the power domain is supplying +// power, the display can go to its highest state. If there is no power +// it can only be in its lowest state, which is off. +// ********************************************************************************** +unsigned long IODisplay::maxCapabilityForDomainState ( IOPMPowerFlags domainState ) +{ + if ( domainState & IOPMPowerOn ) { + return pm_vars->theNumberOfPowerStates-1; + } + else { + return 0; + } +} + + +// ********************************************************************************** +// initialPowerStateForDomainState +// +// The power domain may be changing state. If power is on in the new +// state, that will not affect our state at all. In that case ask the ndrv +// what our current state is. If domain power is off, we can attain +// only our lowest state, which is off. +// ********************************************************************************** +unsigned long IODisplay::initialPowerStateForDomainState ( IOPMPowerFlags domainState ) +{ + long unsigned i; + + if ( domainState & IOPMPowerOn ) { // domain has power + for ( i = pm_vars->theNumberOfPowerStates-1; i > 0; i-- ) { // compare to our table to find current power state + if ( (displayPMVars->syncControls[i] & displayPMVars->syncMask) + == (displayPMVars->currentSyncs & displayPMVars->syncMask) ) { + break; + } + } + return i; + } + else { + return 0; // domain is down, so display is off + } +} + + +// ********************************************************************************** +// powerStateForDomainState +// +// The power domain may be changing state. If power is on in the new +// state, that will not affect our state at all. In that case ask the ndrv +// what our current state is. If domain power is off, we can attain +// only our lowest state, which is off. +// ********************************************************************************** +unsigned long IODisplay::powerStateForDomainState ( IOPMPowerFlags domainState ) +{ + long unsigned i; + + if ( domainState & IOPMPowerOn ) { // domain has power + for ( i = pm_vars->theNumberOfPowerStates-1; i > 0; i-- ) { // compare to our table to find current power state + if ( (displayPMVars->syncControls[i] & displayPMVars->syncMask) + == (displayPMVars->currentSyncs & displayPMVars->syncMask) ) { + break; + } + } + return i; + } + else { + return 0; // domain is down, so display is off + } +} + + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IODisplay + +OSDefineMetaClassAndStructors(AppleSenseDisplay, IODisplay) + + +IOService * AppleSenseDisplay::probe( IOService * provider, + SInt32 * score ) +{ + IODisplayConnect * connect; + IOFramebuffer * framebuffer; + IOService * ret = 0; + UInt32 sense, extSense; + UInt32 senseType, displayType; + + do { + + if( 0 == super::probe( provider, score )) + continue; + + connect = getConnection(); + if( !connect) + continue; + + framebuffer = connect->getFramebuffer(); + assert( framebuffer ); + + if( kIOReturnSuccess != framebuffer->getAttributeForConnection( + connect->getConnection(), + kConnectionSupportsAppleSense, NULL )) + continue; + + ret = this; + + if( kIOReturnSuccess != framebuffer->getAppleSense( + connect->getConnection(), + &senseType, &sense, &extSense, &displayType )) + continue; + sense = ((sense & 0xff) << 8) | (extSense & 0xff); + setProperty( kDisplayProductID, sense, 32); + setProperty( kDisplayVendorID, kDisplayVendorIDUnknown, 32); + setProperty( "AppleDisplayType", displayType, 32); + + } while( false); + + return( ret ); +} + +IOReturn AppleSenseDisplay::getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ) +{ + IOFramebuffer * framebuffer; + IODisplayConnect * connect; + + connect = getConnection(); + framebuffer = connect->getFramebuffer(); + + return( framebuffer->connectFlags( + connect->getConnection(), + mode, flags )); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +#undef super +#define super IODisplay + +OSDefineMetaClassAndStructors(AppleNoSenseDisplay, IODisplay) + + +IOReturn AppleNoSenseDisplay::getConnectFlagsForDisplayMode( + IODisplayModeID /* mode */, UInt32 * flags) +{ + *flags = kDisplayModeValidFlag | kDisplayModeSafeFlag; + + setProperty( kDisplayProductID, kDisplayProductIDGeneric, 32); + setProperty( kDisplayVendorID, kDisplayVendorIDUnknown, 32); + + return( kIOReturnSuccess ); +} + + diff --git a/iokit/Families/IOGraphics/IODisplayWrangler.cpp b/iokit/Families/IOGraphics/IODisplayWrangler.cpp new file mode 100644 index 000000000..01502cede --- /dev/null +++ b/iokit/Families/IOGraphics/IODisplayWrangler.cpp @@ -0,0 +1,720 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 18 Mar 99 - first checked in. + */ + + +#include +#include +#include + +#include "IODisplayWrangler.h" + +bool wranglerHasRoot( OSObject * us, void *, IOService * yourDevice ); + +#define DODEFAULTMODE 0 + +#if DODEFAULTMODE +enum { + kAquaMinWidth = 800, + kAquaMinHeight = 600 +}; +#endif + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// tiddly nub + +#undef super +#define super IOService + +OSDefineMetaClassAndStructors(IODisplayConnect, IOService) + +bool IODisplayConnect::initWithConnection( IOIndex _connection ) +{ + char name[ 12 ]; + + if( !super::init()) + return( false); + + connection = _connection; + + sprintf( name, "display%ld", connection); + + setName( name); + + return( true); +} + +IOFramebuffer * IODisplayConnect::getFramebuffer( void ) +{ + return( (IOFramebuffer *) getProvider()); +} + +IOIndex IODisplayConnect::getConnection( void ) +{ + return( connection); +} + +IOReturn IODisplayConnect::getAttributeForConnection( IOIndex connectIndex, IOSelect selector, UInt32 * value ) +{ + return ((IOFramebuffer *) getProvider())->getAttributeForConnection( connectIndex, selector, value ); +} + +IOReturn IODisplayConnect::setAttributeForConnection( IOIndex connectIndex, IOSelect selector, UInt32 info ) +{ + return ((IOFramebuffer *) getProvider())->setAttributeForConnection( connectIndex, selector, info ); +} + + +//********************************************************************************* +// joinPMtree +// +// The policy-maker in the display driver calls here when initializing. +// We attach it into the power management hierarchy as a child of our +// frame buffer. +//********************************************************************************* +void IODisplayConnect::joinPMtree ( IOService * driver ) +{ + getProvider()->addPowerChild(driver); +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService +OSDefineMetaClassAndStructors(IODisplayWrangler, IOService); + +IODisplayWrangler * gIODisplayWrangler; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IODisplayWrangler::start( IOService * provider ) +{ + OSObject * notify; + + if( !super::start( provider)) + return( false); + + assert( gIODisplayWrangler == 0 ); + gIODisplayWrangler = this; + + fMatchingLock = IOLockAlloc(); + fFramebuffers = OSSet::withCapacity( 1 ); + fDisplays = OSSet::withCapacity( 1 ); + + assert( fMatchingLock && fFramebuffers && fDisplays ); + + notify = addNotification( gIOPublishNotification, + serviceMatching("IODisplay"), _displayHandler, + this, fDisplays ); + assert( notify ); + + notify = addNotification( gIOPublishNotification, + serviceMatching("IODisplayConnect"), _displayConnectHandler, + this, 0, 50000 ); + assert( notify ); + + rootDomain = NULL; + addNotification( gIOPublishNotification,serviceMatching("IOPMrootDomain"), // look for the Root Domain + (IOServiceNotificationHandler)wranglerHasRoot, this, 0 ); + + // initialize power managment + gIODisplayWrangler->initForPM(); + // set default screen-dim timeout + gIODisplayWrangler->setAggressiveness ( kPMMinutesToDim, 30 ); + + return( true ); +} + + +bool wranglerHasRoot( OSObject * us, void *, IOService * yourDevice ) +{ + if ( yourDevice != NULL ) { + ((IODisplayWrangler *)us)->rootDomain = yourDevice; + } + return true; +} + + +bool IODisplayWrangler::_displayHandler( void * target, void * ref, + IOService * newService ) +{ + + return( ((IODisplayWrangler *)target)->displayHandler( (OSSet *) ref, + (IODisplay *) newService )); +} + +bool IODisplayWrangler::_displayConnectHandler( void * target, void * ref, + IOService * newService ) +{ + return( ((IODisplayWrangler *)target)->displayConnectHandler( ref, + (IODisplayConnect *) newService )); +} + +bool IODisplayWrangler::displayHandler( OSSet * set, + IODisplay * newDisplay ) +{ + assert( OSDynamicCast( IODisplay, newDisplay )); + + IOTakeLock( fMatchingLock ); + + set->setObject( newDisplay ); + + IOUnlock( fMatchingLock ); + + return( true ); +} + +bool IODisplayWrangler::displayConnectHandler( void * /* ref */, + IODisplayConnect * connect ) +{ + SInt32 score = 50000; + OSIterator * iter; + IODisplay * display; + bool found = false; + + assert( OSDynamicCast( IODisplayConnect, connect )); + + IOTakeLock( fMatchingLock ); + + iter = OSCollectionIterator::withCollection( fDisplays ); + if( iter) { + while( !found && (display = (IODisplay *) iter->getNextObject())) { + if( display->getConnection()) + continue; + + do { + if( !display->attach( connect )) + continue; + found = ((display->probe( connect, &score )) + && (display->start( connect ))); + if( !found) + display->detach( connect ); + } while( false); + } + iter->release(); + } + + IOUnlock( fMatchingLock ); + + return( true); +} + +IOReturn IODisplayWrangler::clientStart( IOFramebuffer * fb ) +{ + IOReturn err = kIOReturnSuccess; + +// IOTakeLock( fFBLock ); + + if( gIODisplayWrangler && + gIODisplayWrangler->fFramebuffers->setObject( fb )) { + + // framebuffer not yet done + + err = fb->open(); + if( kIOReturnSuccess == err) { + gIODisplayWrangler->makeDisplayConnects( fb ); + gIODisplayWrangler->findStartupMode( fb ); + } + // try to open it next time + // else gIODisplayWrangler->fFramebuffers->removeObject( fb ); + } + +// IOUnlock( fFBLock ); + + return( err ); +} + +bool IODisplayWrangler::makeDisplayConnects( IOFramebuffer * fb ) +{ + IODisplayConnect * connect; + IOItemCount i; + + for( i = 0; i < fb->getConnectionCount(); i++) { + + connect = new IODisplayConnect; + if( 0 == connect) + continue; + + if( (connect->initWithConnection( i )) + && (connect->attach( fb ))) { + + connect->registerService( kIOServiceSynchronous ); + } + connect->release(); + } + + return( true ); +} + +IODisplayConnect * IODisplayWrangler::getDisplayConnect( + IOFramebuffer * fb, IOIndex connect ) +{ + OSIterator * iter; + OSObject * next; + IODisplayConnect * connection = 0; + + iter = fb->getClientIterator(); + if( iter) { + while( (next = iter->getNextObject())) { + connection = OSDynamicCast( IODisplayConnect, next); + if( connection && (0 == (connect--))) + break; + } + iter->release(); + } + return( connection ); +} + + +IOReturn IODisplayWrangler::getConnectFlagsForDisplayMode( + IODisplayConnect * connect, + IODisplayModeID mode, UInt32 * flags ) +{ + IOReturn err = kIOReturnUnsupported; + IODisplay * display; + + display = OSDynamicCast( IODisplay, connect->getClient()); + if( display) + err = display->getConnectFlagsForDisplayMode( mode, flags ); + else { + kprintf("%s: no display\n", connect->getFramebuffer()->getName()); + err = connect->getFramebuffer()->connectFlags( + connect->getConnection(), mode, flags ); + } + + return( err ); +} + +IOReturn IODisplayWrangler::getFlagsForDisplayMode( + IOFramebuffer * fb, + IODisplayModeID mode, UInt32 * flags ) +{ + IODisplayConnect * connect; + + // should look at all connections + connect = gIODisplayWrangler->getDisplayConnect( fb, 0 ); + if( !connect) { + kprintf("%s: no display connect\n", fb->getName()); + return( kIOReturnUnsupported ); + } + + return( gIODisplayWrangler-> + getConnectFlagsForDisplayMode( connect, mode, flags )); +} + +IOReturn IODisplayWrangler::getDefaultMode( IOFramebuffer * fb, + IODisplayModeID * mode, IOIndex * depth ) +{ +#if DODEFAULTMODE + UInt32 thisFlags, bestFlags = 0; + IODisplayModeID thisMode, bestMode = 0; + IOIndex bestDepth; + UInt32 i; + IOReturn err; + IODisplayModeInformation info; + char arg[ 64 ]; + const char * param; + UInt32 lookWidth, lookHeight, lookRefresh, lookDepth; + static const char bitsToIndex[] = { 0, 0, 1, 1, 2 }; + UInt32 numModes; + IODisplayModeID * allModes; + bool foundForced; + bool killedDefault = false; + bool haveSubst = false; + + numModes = fb->getDisplayModeCount(); + allModes = IONew( IODisplayModeID, numModes ); + + if( NULL == allModes) + return( kIOReturnNoMemory); + err = fb->getDisplayModes( allModes ); + if( err) // leak + return( err ); + + if( PE_parse_boot_arg("dm", arg)) { + + param = arg; + lookWidth = strtol( param, (char **) ¶m, 0); + param++; + lookHeight = strtol( param, (char **) ¶m, 0); + param++; + lookRefresh = strtol( param, (char **) ¶m, 0); + param++; + lookDepth = strtol( param, (char **) ¶m, 0); + if( lookDepth == 15) + lookDepth = 16; + if( lookDepth > 32) + lookDepth = 32; + +kprintf("%s: Looking %dx%d@%d,%d\n", fb->getName(), lookWidth, lookHeight, + lookRefresh, lookDepth ); + + } else { + param = 0; + lookWidth = 1024; + lookHeight = 768; + lookRefresh = 75; + lookDepth = 16; + } + + bestDepth = bitsToIndex[ lookDepth / 8 ]; + + for( i = 0; i < numModes; i++) { + + thisMode = allModes[ i ]; + if( getFlagsForDisplayMode( fb, thisMode, &thisFlags)) + continue; + + // make sure it does 16/32 && requested mode + err = fb->getInformationForDisplayMode( thisMode, &info); + if( err) + continue; + if( 0 == info.maxDepthIndex) + continue; +#if 0 + kprintf("%d x %d @ %d = %x\n", info.nominalWidth, info.nominalHeight, + info.refreshRate >> 16, thisFlags); +#endif + + if( 0 == (thisFlags & kDisplayModeValidFlag)) + continue; + + foundForced = (param + && (info.nominalWidth == lookWidth) + && (info.nominalHeight == lookHeight) + && (((info.refreshRate + 0x8000) >> 16) == lookRefresh) ); + + if( (thisFlags & kDisplayModeDefaultFlag) + && ((info.nominalWidth < kAquaMinWidth) + || (info.nominalHeight < kAquaMinHeight)) ) { + + thisFlags &= ~kDisplayModeDefaultFlag; + killedDefault = true; + haveSubst = false; + + } else if( killedDefault + && (info.nominalWidth >= kAquaMinWidth) + && (info.nominalHeight >= kAquaMinHeight) ) { + + if( thisFlags & kDisplayModeSafeFlag) { + thisFlags |= kDisplayModeDefaultFlag; + killedDefault = false; + } else if( !haveSubst) { + thisFlags |= kDisplayModeDefaultFlag; + haveSubst = true; + } + } + + if( foundForced + || (thisFlags & kDisplayModeDefaultFlag) + || (((bestFlags & kDisplayModeDefaultFlag) == 0) + && (thisFlags & kDisplayModeSafeFlag)) ) { + + bestMode = thisMode; + bestFlags = thisFlags; + + bestDepth = bitsToIndex[ lookDepth / 8 ]; + if( bestDepth > info.maxDepthIndex) + bestDepth = info.maxDepthIndex; + + if( foundForced) + break; + } + } + + IODelete( allModes, IODisplayModeID, numModes ); + + if( bestMode) { + *mode = bestMode; + *depth = bestDepth; + return( kIOReturnSuccess); + } else +#endif /* DODEFAULTMODE */ + return( kIOReturnUnsupported); +} + +// Determine a startup mode given the framebuffer & displays + +IOReturn IODisplayWrangler::findStartupMode( IOFramebuffer * fb ) +{ + IODisplayModeID mode; + IOIndex depth; + IODisplayModeID startMode; + IOIndex startDepth; + IOReturn err; + + fb->getCurrentDisplayMode( &mode, &depth); + err = fb->getStartupDisplayMode( &startMode, &startDepth ); + if( err) { + startMode = mode; + startDepth = depth; + } + +#if DODEFAULTMODE + IODisplayModeInformation info; + UInt32 startFlags = 0; + + do { + err = getFlagsForDisplayMode( fb, startMode, &startFlags ); + if( err) + continue; + err = fb->getInformationForDisplayMode( startMode, &info); + if( err) + continue; + + if( (info.nominalWidth < kAquaMinWidth) + || (info.nominalHeight < kAquaMinHeight)) { + err = kIOReturnNoResources; + continue; + } + + if( startDepth == 2) + startDepth = 1; + + if( (startDepth == 0) && (info.maxDepthIndex > 0)) + startDepth = 1; + + } while( false ); + + if( err + || (startDepth == 0) + || ((startFlags & kDisplayModeValidFlag) + != kDisplayModeValidFlag) ) { + // look for default + err = getDefaultMode( fb, &startMode, &startDepth ); + } +#endif /* DODEFAULTMODE */ + if( (startMode != mode) || (startDepth != depth)) + fb->setDisplayMode( startMode, startDepth ); + + fb->setupForCurrentConfig(); + + return( kIOReturnSuccess ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define kNumber_of_power_states 5 + +static IOPMPowerState ourPowerStates[kNumber_of_power_states] = { + {1,0,0,0,0,0,0,0,0,0,0,0}, + {1,0,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, + {1,0,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, + {1,0,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, + {1,IOPMDeviceUsable,0,IOPMPowerOn,0,0,0,0,0,0,0,0} + +}; + + +/* + This is the Power Management policy-maker for the displays. It senses when the display is idle + and lowers power accordingly. It raises power back up when the display becomes un-idle. + + It senses idleness with a combination of an idle timer and the "activityTickle" method call. "activityTickle" + is called by objects which sense keyboard activity, mouse activity, or other button activity (display contrast, + display brightness, PCMCIA eject). The method sets a "displayInUse" flag. When the timer expires, + this flag is checked. If it is on, the display is judged "in use". The flag is cleared and the timer is restarted. + + If the flag is off when the timer expires, then there has been no user activity since the last timer + expiration, and the display is judged idle and its power is lowered. + + The period of the timer is a function of the current value of Power Management aggressiveness. As that factor + varies from 1 to 999, the timer period varies from 1004 seconds to 6 seconds. Above 1000, the system is in + a very aggressive power management condition, and the timer period is 5 seconds. (In this case, the display dims + between five and ten seconds after the last user activity). + + This driver calls the drivers for each display and has them move their display between various power states. + When the display is idle, its power is dropped state by state until it is in the lowest state. When it becomes un-idle + it is powered back up to the state where it was last being used. + + In times of very high power management aggressiveness, the display will not be operated above the lowest power + state which is marked "usable". + + When Power Management is turned off (aggressiveness = 0), the display is never judged idle and never dimmed. + + We register with Power Management only so that we can be informed of changes in the Power Management + aggressiveness factor. We don't really have a device with power states so we implement the absolute minimum. + The display drivers themselves are part of the Power Management hierarchy under their respective frame buffers. + + */ + + +// ********************************************************************************** +// initForPM +// +// ********************************************************************************** +void IODisplayWrangler::initForPM (void ) +{ + + PMinit(); // initialize superclass variables + + mins_to_dim = 0; + use_general_aggressiveness = false; + + pm_vars->thePlatform->PMRegisterDevice(0,this); // attach into the power management hierarchy + + registerPowerDriver(this,ourPowerStates,kNumber_of_power_states); // register ourselves with policy-maker (us) + + registerService(); // HID system is waiting for this + +} + + +//********************************************************************************* +// setAggressiveness +// +// We are informed by our power domain parent of a new level of "power management +// aggressiveness" which we use as a factor in our judgement of when we are idle. +// This change implies a change in our idle timer period, so restart that timer. +// timer. +//********************************************************************************* + +IOReturn IODisplayWrangler::setAggressiveness ( unsigned long type, unsigned long newLevel ) +{ + if ( type == kPMMinutesToDim ) { // minutes to dim received + if( newLevel == 0 ) { + if( pm_vars->myCurrentState < kNumber_of_power_states-1 ) { // pm turned off while idle? + makeDisplaysUsable(); // yes, bring displays up again + } + } + mins_to_dim = newLevel; + use_general_aggressiveness = false; + if ( pm_vars->aggressiveness < kIOPowerEmergencyLevel ) { // no, currently in emergency level? + setIdleTimerPeriod(newLevel*60); // no, set new timeout + } + } + if ( type == kPMGeneralAggressiveness ) { // general factor received + if ( newLevel >= kIOPowerEmergencyLevel ) { // emergency level? + setIdleTimerPeriod(5); // yes + } + else { + if ( pm_vars->aggressiveness >= kIOPowerEmergencyLevel ) { // no, coming out of emergency level? + if (use_general_aggressiveness ) { // yes, set new timer period + setIdleTimerPeriod(333-(newLevel/3)); + } + else { + setIdleTimerPeriod(mins_to_dim*60); + } + } + else { + if (use_general_aggressiveness ) { // no, maybe set period + setIdleTimerPeriod(333-(newLevel/3)); + } + } + } + } + super::setAggressiveness(type, newLevel); + return IOPMNoErr; +} + + +// ********************************************************************************** +// activityTickle +// +// This is called by the HID system and calls the superclass in turn. +// ********************************************************************************** + +bool IODisplayWrangler::activityTickle ( unsigned long, unsigned long ) +{ + if ( rootDomain != NULL ) { + rootDomain->activityTickle (kIOPMSubclassPolicy); + } + return super::activityTickle (kIOPMSuperclassPolicy1,kNumber_of_power_states-1 ); +} + + +// ********************************************************************************** +// setPowerState +// +// The vanilla policy-maker in the superclass is changing our power state. +// If it's down, inform the displays to lower one state, too. If it's up, +// the idle displays are made usable. +// ********************************************************************************** +IOReturn IODisplayWrangler::setPowerState ( unsigned long powerStateOrdinal, IOService* whatDevice ) +{ + if ( powerStateOrdinal == 0 ) { // system is going to sleep + return IOPMNoErr; + } + if ( powerStateOrdinal < pm_vars->myCurrentState ) { // HI is idle, drop power + idleDisplays(); + return IOPMNoErr; + } + if ( powerStateOrdinal == kNumber_of_power_states-1 ) { // there is activity, raise power + makeDisplaysUsable(); + return IOPMNoErr; + } + return IOPMNoErr; +} + + +// ********************************************************************************** +// makeDisplaysUsable +// +// ********************************************************************************** +void IODisplayWrangler::makeDisplaysUsable ( void ) +{ + OSIterator * iter; + IODisplay * display; + + IOTakeLock( fMatchingLock ); + + iter = OSCollectionIterator::withCollection( fDisplays ); + if( iter ) { + while( (display = (IODisplay *) iter->getNextObject()) ) { + display->makeDisplayUsable(); + } + iter->release(); + } + IOUnlock( fMatchingLock ); +} + + +// ********************************************************************************** +// idleDisplays +// +// ********************************************************************************** +void IODisplayWrangler::idleDisplays ( void ) +{ + OSIterator * iter; + IODisplay * display; + + IOTakeLock( fMatchingLock ); + + iter = OSCollectionIterator::withCollection( fDisplays ); + if( iter ) { + while( (display = (IODisplay *) iter->getNextObject()) ) { + display->dropOneLevel(); + } + iter->release(); + } + IOUnlock( fMatchingLock ); +} + + diff --git a/iokit/Families/IOGraphics/IODisplayWrangler.h b/iokit/Families/IOGraphics/IODisplayWrangler.h new file mode 100644 index 000000000..0237a62ab --- /dev/null +++ b/iokit/Families/IOGraphics/IODisplayWrangler.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. + * + * + * HISTORY + * + */ + +#ifndef _IOKIT_IODISPLAYWRANGLER_H +#define _IOKIT_IODISPLAYWRANGLER_H + +#include +#include +#include + +class IOWorkLoop; +class IOCommandQueue; + +class IODisplayWrangler : public IOService +{ + OSDeclareDefaultStructors( IODisplayWrangler ); + +private: + bool fOpen; + IOLock * fMatchingLock; + OSSet * fFramebuffers; + OSSet * fDisplays; + // true: we have informed displays to assume lowest usable state + bool emergency_informed; + // from control panel: number of idle minutes before dimming + unsigned long mins_to_dim; + // false: use mins_to_dim unless in emergency situation + bool use_general_aggressiveness; + + virtual void initForPM ( void ); + virtual IOReturn setAggressiveness ( unsigned long, unsigned long ); + virtual bool activityTickle ( unsigned long, unsigned long ); + virtual IOReturn setPowerState ( unsigned long powerStateOrdinal, IOService* whatDevice ); + virtual void makeDisplaysUsable ( void ); + virtual void idleDisplays ( void ); + + static bool _displayHandler( void * target, void * ref, + IOService * newService ); + static bool _displayConnectHandler( void * target, void * ref, + IOService * newService ); + + virtual bool displayHandler( OSSet * set, IODisplay * newDisplay); + virtual bool displayConnectHandler( void * ref, IODisplayConnect * connect); + + virtual bool makeDisplayConnects( IOFramebuffer * fb ); + + virtual IODisplayConnect * getDisplayConnect( + IOFramebuffer * fb, IOIndex connect ); + + virtual IOReturn getConnectFlagsForDisplayMode( + IODisplayConnect * connect, + IODisplayModeID mode, UInt32 * flags ); + + virtual IOReturn getDefaultMode( IOFramebuffer * fb, + IODisplayModeID * mode, IOIndex * depth ); + + virtual IOReturn findStartupMode( IOFramebuffer * fb ); + +public: + + IOService * rootDomain; // points to Root Power Domain + + virtual bool start(IOService * provider); + + static IOReturn clientStart( IOFramebuffer * fb ); + + static IOReturn getFlagsForDisplayMode( + IOFramebuffer * fb, + IODisplayModeID mode, UInt32 * flags ); + +}; + +#endif /* _IOKIT_IODISPLAYWRANGLER_H */ diff --git a/iokit/Families/IOGraphics/IOFramebuffer.cpp b/iokit/Families/IOGraphics/IOFramebuffer.cpp new file mode 100644 index 000000000..73977c788 --- /dev/null +++ b/iokit/Families/IOGraphics/IOFramebuffer.cpp @@ -0,0 +1,1847 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 01 Sep 92 Portions from Joe Pasqua, Created. + */ + + +#include +#include + +#include +#include +#include +#include + +#define IOFRAMEBUFFER_PRIVATE +#include +#include + +#include "IOFramebufferUserClient.h" +#include "IODisplayWrangler.h" +#include "IOFramebufferReallyPrivate.h" +#include + +#include +#include +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOGraphicsDevice + +OSDefineMetaClass( IOFramebuffer, IOGraphicsDevice ) +OSDefineAbstractStructors( IOFramebuffer, IOGraphicsDevice ) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define GetShmem(instance) ((StdFBShmem_t *)(instance->priv)) + +#define CLEARSEMA(shmem) ev_unlock(&shmem->cursorSema) +#define SETSEMA(shmem) \ + if (!ev_try_lock(&shmem->cursorSema)) return; +#define TOUCHBOUNDS(one, two) \ + (((one.minx < two.maxx) && (two.minx < one.maxx)) && \ + ((one.miny < two.maxy) && (two.miny < one.maxy))) + +/* + * Cursor rendering + */ + +#include "IOCursorBlits.h" + +inline void IOFramebuffer::StdFBDisplayCursor( IOFramebuffer * inst ) +{ + StdFBShmem_t *shmem; + Bounds saveRect; + volatile unsigned char *vramPtr; /* screen data pointer */ + unsigned int cursStart; + unsigned int cursorWidth; + int width; + int height; + + shmem = GetShmem(inst); + saveRect = shmem->cursorRect; + /* Clip saveRect vertical within screen bounds */ + if (saveRect.miny < shmem->screenBounds.miny) + saveRect.miny = shmem->screenBounds.miny; + if (saveRect.maxy > shmem->screenBounds.maxy) + saveRect.maxy = shmem->screenBounds.maxy; + if (saveRect.minx < shmem->screenBounds.minx) + saveRect.minx = shmem->screenBounds.minx; + if (saveRect.maxx > shmem->screenBounds.maxx) + saveRect.maxx = shmem->screenBounds.maxx; + shmem->saveRect = saveRect; /* Remember save rect for RemoveCursor */ + + vramPtr = inst->frameBuffer + + (inst->rowBytes * (saveRect.miny - shmem->screenBounds.miny)) + + (inst->bytesPerPixel * (saveRect.minx - shmem->screenBounds.minx)); + + width = saveRect.maxx - saveRect.minx; + height = saveRect.maxy - saveRect.miny; + cursorWidth = shmem->cursorSize[shmem->frame].width; + + cursStart = (saveRect.miny - shmem->cursorRect.miny) * cursorWidth + + (saveRect.minx - shmem->cursorRect.minx); + + if( inst->cursorBlitProc) + inst->cursorBlitProc( inst, + (void *) shmem, + vramPtr, + cursStart, + inst->totalWidth - width, /* vramRow */ + cursorWidth - width, /* cursRow */ + width, + height); +} + +// Description: RemoveCursor erases the cursor by replacing the background +// image that was saved by the previous call to DisplayCursor. +// If the frame buffer is cacheable, flush at the end of the +// drawing operation. + +inline void IOFramebuffer::StdFBRemoveCursor( IOFramebuffer * inst ) +{ + StdFBShmem_t *shmem; + volatile unsigned char *vramPtr; /* screen data pointer */ + unsigned int vramRow; + int width; + int height; + + shmem = GetShmem(inst); + + vramRow = inst->totalWidth; /* Scanline width in pixels */ + + vramPtr = inst->frameBuffer + + (inst->rowBytes * (shmem->saveRect.miny - shmem->screenBounds.miny)) + + (inst->bytesPerPixel * + (shmem->saveRect.minx - shmem->screenBounds.minx)); + + width = shmem->saveRect.maxx - shmem->saveRect.minx; + height = shmem->saveRect.maxy - shmem->saveRect.miny; + vramRow -= width; + + if( inst->cursorRemoveProc) + inst->cursorRemoveProc( inst, (void *)shmem, + vramPtr, vramRow, width, height); +} + +inline void IOFramebuffer::RemoveCursor( IOFramebuffer * inst ) +{ + StdFBShmem_t * shmem = GetShmem(inst); + + if( shmem->hardwareCursorActive ) { + Point * hs; + + hs = &shmem->hotSpot[shmem->frame]; + inst->setCursorState( + shmem->cursorLoc.x - hs->x - shmem->screenBounds.minx, + shmem->cursorLoc.y - hs->y - shmem->screenBounds.miny, false ); + } else + StdFBRemoveCursor(inst); +} + +inline void IOFramebuffer::DisplayCursor( IOFramebuffer * inst ) +{ + Point * hs; + StdFBShmem_t * shmem = GetShmem(inst); + SInt32 x, y; + + hs = &shmem->hotSpot[shmem->frame]; + x = shmem->cursorLoc.x - hs->x; + y = shmem->cursorLoc.y - hs->y; + + if( shmem->hardwareCursorActive ) + inst->setCursorState( x - shmem->screenBounds.minx, + y - shmem->screenBounds.miny, true ); + else { + shmem->cursorRect.maxx = (shmem->cursorRect.minx = x) + + shmem->cursorSize[shmem->frame].width; + shmem->cursorRect.maxy = (shmem->cursorRect.miny = y) + + shmem->cursorSize[shmem->frame].height; + StdFBDisplayCursor(inst); + shmem->oldCursorRect = shmem->cursorRect; + } +} + +inline void IOFramebuffer::SysHideCursor( IOFramebuffer * inst ) +{ + if (!GetShmem(inst)->cursorShow++) + RemoveCursor(inst); +} + +inline void IOFramebuffer::SysShowCursor( IOFramebuffer * inst ) +{ + StdFBShmem_t *shmem; + + shmem = GetShmem(inst); + + if (shmem->cursorShow) + if (!--(shmem->cursorShow)) + DisplayCursor(inst); +} + +inline void IOFramebuffer::CheckShield( IOFramebuffer * inst ) +{ + Point * hs; + int intersect; + Bounds tempRect; + StdFBShmem_t * shmem = GetShmem(inst); + + /* Calculate temp cursorRect */ + hs = &shmem->hotSpot[shmem->frame]; + tempRect.maxx = (tempRect.minx = (shmem->cursorLoc).x - hs->x) + + shmem->cursorSize[shmem->frame].width; + tempRect.maxy = (tempRect.miny = (shmem->cursorLoc).y - hs->y) + + shmem->cursorSize[shmem->frame].height; + + intersect = TOUCHBOUNDS(tempRect, shmem->shieldRect); + if (intersect != shmem->shielded) + (shmem->shielded = intersect) ? + SysHideCursor(inst) : SysShowCursor(inst); +} + +/** + ** external methods + **/ + +void IOFramebuffer::setupCursor( IOPixelInformation * info ) +{ + StdFBShmem_t * shmem = GetShmem(this); + volatile unsigned char * bits; + IOByteCount cursorImageBytes; + + rowBytes = info->bytesPerRow; + totalWidth = (rowBytes * 8) / info->bitsPerPixel; + bytesPerPixel = info->bitsPerPixel / 8; + frameBuffer = (volatile unsigned char *) vramMap->getVirtualAddress(); + + if( shmem) { + if( (shmem->screenBounds.maxx == shmem->screenBounds.minx) + || (shmem->screenBounds.maxy == shmem->screenBounds.miny)) { + // a default if no one calls IOFBSetBounds() + shmem->screenBounds.minx = 0; + shmem->screenBounds.miny = 0; + shmem->screenBounds.maxx = info->activeWidth; + shmem->screenBounds.maxy = info->activeHeight; + } + + cursorImageBytes = maxCursorSize.width * maxCursorSize.height + * bytesPerPixel; + bits = shmem->cursor; + for( int i = 0; i < kIOFBNumCursorFrames; i++ ) { + cursorImages[i] = bits; + bits += cursorImageBytes; + shmem->cursorSize[i] = maxCursorSize; + } + if( info->bitsPerPixel <= 8) { + for( int i = 0; i < kIOFBNumCursorFrames; i++ ) { + cursorMasks[i] = bits; + bits += cursorImageBytes; + } + } + cursorSave = bits; + } + + switch( info->bitsPerPixel) { + case 8: + if( colorConvert.t._bm256To38SampleTable + && colorConvert.t._bm38To256SampleTable) { + cursorBlitProc = (CursorBlitProc) StdFBDisplayCursor8P; + cursorRemoveProc = (CursorRemoveProc) StdFBRemoveCursor8; + } + break; + case 16: + if( colorConvert.t._bm34To35SampleTable + && colorConvert.t._bm35To34SampleTable) { + cursorBlitProc = (CursorBlitProc) StdFBDisplayCursor555; + cursorRemoveProc = (CursorRemoveProc) StdFBRemoveCursor16; + } + break; + case 32: + if( colorConvert.t._bm256To38SampleTable + && colorConvert.t._bm38To256SampleTable) { + cursorBlitProc = (CursorBlitProc) StdFBDisplayCursor32Axxx; + cursorRemoveProc = (CursorRemoveProc) StdFBRemoveCursor32; + } + break; + default: + IOLog("%s: can't do cursor at depth %ld\n", + getName(), info->bitsPerPixel); + cursorBlitProc = (CursorBlitProc) NULL; + cursorRemoveProc = (CursorRemoveProc) NULL; + break; + } +} + +void IOFramebuffer::stopCursor( void ) +{ + cursorBlitProc = (CursorBlitProc) NULL; + cursorRemoveProc = (CursorRemoveProc) NULL; +} + +IOReturn IOFramebuffer::createSharedCursor( + int shmemVersion, int maxWidth, int maxHeight ) +{ + StdFBShmem_t * shmem; + IOByteCount size, maxImageSize; + + kprintf("createSharedCursor vers = %d, %d x %d\n", + shmemVersion, maxWidth, maxHeight); + + if( shmemVersion != kIOFBCurrentShmemVersion) + return( kIOReturnUnsupported); + + shmemClientVersion = shmemVersion; + maxImageSize = (maxWidth * maxHeight * kIOFBMaxCursorDepth) / 8; + + size = sizeof( StdFBShmem_t) + + ((kIOFBNumCursorFrames + 1) * maxImageSize); + + if( !sharedCursor || (size != sharedCursor->getLength())) { + IOBufferMemoryDescriptor * newDesc; + + priv = 0; + newDesc = IOBufferMemoryDescriptor::withOptions( + kIODirectionNone | kIOMemoryKernelUserShared, size ); + if( !newDesc) + return( kIOReturnNoMemory ); + + if( sharedCursor) + sharedCursor->release(); + sharedCursor = newDesc; + } + shmem = (StdFBShmem_t *) sharedCursor->getBytesNoCopy(); + priv = shmem; + + // Init shared memory area + bzero( shmem, size ); + shmem->version = kIOFBCurrentShmemVersion; + shmem->structSize = size; + shmem->cursorShow = 1; + shmem->hardwareCursorCapable = haveHWCursor; + + maxCursorSize.width = maxWidth; + maxCursorSize.height = maxHeight; + + doSetup( false ); + + return( kIOReturnSuccess); +} + +IOReturn IOFramebuffer::setBoundingRect( Bounds * bounds ) +{ + StdFBShmem_t *shmem; + + shmem = GetShmem(this); + if( NULL == shmem) + return( kIOReturnUnsupported); + + shmem->screenBounds = *bounds; + + return( kIOReturnSuccess); +} + +/** + ** IOUserClient methods + **/ + +IOReturn IOFramebuffer::newUserClient( task_t owningTask, + void * security_id, + UInt32 type, + IOUserClient ** handler ) + +{ +#if 0 + static UInt8 data[] = { 0x00, 0x03, 0x04, 0x07, 0x08, 0x0b, 0x0c, 0x0f, + 0x10, 0x13, 0x14, 0x17, 0x18, 0x1b, 0x1c, 0x1f, + + 0x00, 0x00, 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, + 0x04, 0x04, 0x05, 0x05, 0x06, 0x06, 0x07, 0x07, + 0x08, 0x08, 0x09, 0x09, 0x0a, 0x0a, 0x0b, 0x0b, + 0x0c, 0x0c, 0x0d, 0x0d, 0x0e, 0x0e, 0x0f, 0x0f }; + colorConvert.t._bm34To35SampleTable = data; + colorConvert.t._bm35To34SampleTable = data + 16; +#endif + + IOReturn err = kIOReturnSuccess; + IOUserClient * newConnect = 0; + IOUserClient * theConnect = 0; + + switch( type ) { + + case kIOFBServerConnectType: + if( serverConnect) + err = kIOReturnExclusiveAccess; + else { + + if( isConsoleDevice()) + getPlatform()->setConsoleInfo( 0, kPEReleaseScreen); + + err = IODisplayWrangler::clientStart( this ); + if( kIOReturnSuccess == err) + newConnect = IOFramebufferUserClient::withTask(owningTask); + } + break; + + case kIOFBSharedConnectType: + if( sharedConnect) { + theConnect = sharedConnect; + theConnect->retain(); + } else if( serverConnect) + newConnect = IOFramebufferSharedUserClient::withTask(owningTask); + else + err = kIOReturnNotOpen; + break; + + case kIOFBEngineControllerConnectType: + case kIOFBEngineConnectType: + newConnect = IOGraphicsEngineClient::withTask(owningTask); + break; + + default: + err = kIOReturnBadArgument; + } + + if( newConnect) { + if( (false == newConnect->attach( this )) + || (false == newConnect->start( this ))) { + newConnect->detach( this ); + newConnect->release(); + } else + theConnect = newConnect; + } + + *handler = theConnect; + return( err ); +} + +IOReturn IOFramebuffer::extGetDisplayModeCount( IOItemCount * count ) +{ + *count = getDisplayModeCount(); + return( kIOReturnSuccess); +} + +IOReturn IOFramebuffer::extGetDisplayModes( IODisplayModeID * allModes, IOByteCount * size ) +{ + IOReturn err; + IOByteCount outSize; + + outSize = getDisplayModeCount() * sizeof( IODisplayModeID); + + if( *size < outSize) + return( kIOReturnBadArgument); + + *size = outSize; + err = getDisplayModes( allModes ); + + return( err); +} + +IOReturn IOFramebuffer::extGetVRAMMapOffset( IOPixelAperture /* aperture */, + IOByteCount * offset ) +{ + *offset = vramMapOffset; + + return( kIOReturnSuccess ); +} + +IOReturn IOFramebuffer::extSetBounds( Bounds * bounds ) +{ + StdFBShmem_t *shmem; + + shmem = GetShmem(this); + if( shmem) + shmem->screenBounds = *bounds; + + return( kIOReturnSuccess ); +} + +IOReturn IOFramebuffer::extValidateDetailedTiming( + void * description, void * outDescription, + IOByteCount inSize, IOByteCount * outSize ) +{ + IOReturn err; + + if( *outSize != inSize) + return( kIOReturnBadArgument ); + + err = validateDetailedTiming( description, inSize ); + + if( kIOReturnSuccess == err) + bcopy( description, outDescription, inSize ); + + return( err ); +} + + +IOReturn IOFramebuffer::extSetColorConvertTable( UInt32 select, + UInt8 * data, IOByteCount length ) +{ + static const IOByteCount checkLength[] = { + 16 * sizeof( UInt8), + 32 * sizeof( UInt8), + 256 * sizeof( UInt32), + 5 * 256 * sizeof( UInt8) }; + + UInt8 * table; + IODisplayModeID mode; + IOIndex depth; + IOPixelInformation info; + + if( select > 3) + return( kIOReturnBadArgument ); + + if( length != checkLength[select]) + return( kIOReturnBadArgument ); + + table = colorConvert.tables[select]; + if( 0 == table) { + table = (UInt8 *) IOMalloc( length ); + colorConvert.tables[select] = table; + } + if( !table) + return( kIOReturnNoMemory ); + + bcopy( data, table, length ); + if( select == 3) + white = data[data[255] + data[511] + data[767] + 1024]; + + if( (NULL == cursorBlitProc) + && colorConvert.tables[0] && colorConvert.tables[1] + && colorConvert.tables[2] && colorConvert.tables[3] + && vramMap + && (kIOReturnSuccess == getCurrentDisplayMode( &mode, &depth )) + && (kIOReturnSuccess == getPixelInformation( mode, depth, kIOFBSystemAperture, &info ))) + setupCursor( &info ); + + return( kIOReturnSuccess ); +} + +IOReturn IOFramebuffer::extSetCLUTWithEntries( UInt32 index, IOOptionBits options, + IOColorEntry * colors, IOByteCount inputCount ) +{ + IOReturn kr; + + kr = setCLUTWithEntries( colors, index, + inputCount / sizeof( IOColorEntry), + options ); + + return( kr ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// +// BEGIN: Implementation of the evScreen protocol +// + +void IOFramebuffer::hideCursor( void ) +{ + StdFBShmem_t *shmem = GetShmem(this); + + SETSEMA(shmem); + SysHideCursor(this); + CLEARSEMA(shmem); +} + +#if 0 +void IOFramebuffer::free() +{ + if( vblSemaphore) + semaphore_destroy(kernel_task, vblSemaphore); + super::free(); +} +#endif + +void IOFramebuffer::deferredMoveCursor( IOFramebuffer * inst ) +{ + StdFBShmem_t * shmem = GetShmem(inst); + IOReturn err = kIOReturnSuccess; + + if( shmem->hardwareCursorActive && (0 == shmem->frame) ) { + + if (shmem->cursorObscured) { + shmem->cursorObscured = 0; + if (shmem->cursorShow) + --shmem->cursorShow; + } + if (!shmem->cursorShow) { + Point * hs; + hs = &shmem->hotSpot[shmem->frame]; + err = inst->setCursorState( + shmem->cursorLoc.x - hs->x - shmem->screenBounds.minx, + shmem->cursorLoc.y - hs->y - shmem->screenBounds.miny, true ); + } + + } else { + + if (!shmem->cursorShow++) + RemoveCursor(inst); + if (shmem->cursorObscured) { + shmem->cursorObscured = 0; + if (shmem->cursorShow) + --shmem->cursorShow; + } + if (shmem->shieldFlag) CheckShield(inst); + if (shmem->cursorShow) + if (!--shmem->cursorShow) + DisplayCursor(inst); + + inst->flushCursor(); + } + inst->needCursorService = (kIOReturnBusy == err); +} + +void IOFramebuffer::moveCursor( Point * cursorLoc, int frame ) +{ + nextCursorLoc = *cursorLoc; + nextCursorFrame = frame; + needCursorService = true; + + StdFBShmem_t *shmem = GetShmem(this); + + SETSEMA(shmem); + + if( !haveVBLService) { + shmem->cursorLoc = *cursorLoc; + shmem->frame = frame; + deferredMoveCursor( this ); + } + + CLEARSEMA(shmem); +} + +void IOFramebuffer::handleVBL( IOFramebuffer * inst, void * ref ) +{ + StdFBShmem_t * shmem = GetShmem(inst); + AbsoluteTime now; + + if( !shmem) + return; + + clock_get_uptime( &now ); + shmem->vblDelta = now; + SUB_ABSOLUTETIME( &shmem->vblDelta, &shmem->vblTime ); + shmem->vblTime = now; + + KERNEL_DEBUG(0xc000030 | DBG_FUNC_NONE, + shmem->vblDelta.hi, shmem->vblDelta.lo, 0, 0, 0); + + if( inst->vblSemaphore) + semaphore_signal_all(inst->vblSemaphore); + + SETSEMA(shmem); + + if( inst->needCursorService) { + shmem->cursorLoc = inst->nextCursorLoc; + shmem->frame = inst->nextCursorFrame; + deferredMoveCursor( inst ); + } + + CLEARSEMA(shmem); +} + +void IOFramebuffer::showCursor( Point * cursorLoc, int frame ) +{ + StdFBShmem_t *shmem; + + shmem = GetShmem(this); + SETSEMA(shmem); + shmem->frame = frame; + shmem->hardwareCursorActive = hwCursorLoaded && (frame == 0); + shmem->cursorLoc = *cursorLoc; + if (shmem->shieldFlag) CheckShield(this); + SysShowCursor(this); + CLEARSEMA(shmem); +} + +void IOFramebuffer::resetCursor( void ) +{ + StdFBShmem_t *shmem; + + shmem = GetShmem(this); + hwCursorLoaded = false; + if( !shmem) + return; + hideCursor(); + shmem->hardwareCursorActive = false; + showCursor( &shmem->cursorLoc, shmem->frame ); +} + +void IOFramebuffer::getVBLTime( AbsoluteTime * time, AbsoluteTime * delta ) +{ + StdFBShmem_t *shmem; + + shmem = GetShmem(this); + if( shmem) { + *time = shmem->vblTime; + *delta = shmem->vblDelta; + } else + time->hi = time->lo = 0; +} + +void IOFramebuffer::getBoundingRect( Bounds ** bounds ) +{ + StdFBShmem_t *shmem; + + shmem = GetShmem(this); + if( NULL == shmem) + *bounds = NULL; + else + *bounds = &shmem->screenBounds; +} + +// +// END: Implementation of the evScreen protocol +// + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOFramebuffer::getNotificationSemaphore( + IOSelect interruptType, semaphore_t * semaphore ) +{ + kern_return_t kr; + semaphore_t sema; + + if( interruptType != kIOFBVBLInterruptType) + return( kIOReturnUnsupported ); + + if( !haveVBLService) + return( kIOReturnNoResources ); + + if( MACH_PORT_NULL == vblSemaphore) { + kr = semaphore_create(kernel_task, &sema, SYNC_POLICY_FIFO, 0); + if( kr == KERN_SUCCESS) + vblSemaphore = sema; + } else + kr = KERN_SUCCESS; + + if( kr == KERN_SUCCESS) + *semaphore = vblSemaphore; + + return( kr ); +} + +IOReturn IOFramebuffer::extSetCursorVisible( bool visible ) +{ + IOReturn err; + Point * hs; + StdFBShmem_t * shmem = GetShmem(this); + + if( shmem->hardwareCursorActive ) { + hs = &shmem->hotSpot[shmem->frame]; + err = setCursorState( + shmem->cursorLoc.x - hs->x - shmem->screenBounds.minx, + shmem->cursorLoc.y - hs->y - shmem->screenBounds.miny, + visible ); + } else + err = kIOReturnBadArgument; + + return( err ); +} + +IOReturn IOFramebuffer::extSetCursorPosition( SInt32 x, SInt32 y ) +{ + return( kIOReturnUnsupported ); +} + +IOReturn IOFramebuffer::extSetNewCursor( void * cursor, IOIndex frame, + IOOptionBits options ) +{ + StdFBShmem_t * shmem = GetShmem(this); + IOReturn err; + + if( cursor || options || frame) + err = kIOReturnBadArgument; + else { + + if( (shmem->cursorSize[frame].width > maxCursorSize.width) + || (shmem->cursorSize[frame].height > maxCursorSize.height)) + err = kIOReturnBadArgument; + + else if( haveHWCursor) + err = setCursorImage( (void *) frame ); + else + err = kIOReturnUnsupported; + } + + hwCursorLoaded = (kIOReturnSuccess == err); + shmem->hardwareCursorActive = hwCursorLoaded && (shmem->frame == 0); + + return( err ); +} + +bool IOFramebuffer::convertCursorImage( void * cursorImage, + IOHardwareCursorDescriptor * hwDesc, + IOHardwareCursorInfo * hwCursorInfo ) +{ + StdFBShmem_t * shmem = GetShmem(this); + UInt8 * dataOut = hwCursorInfo->hardwareCursorData; + IOColorEntry * clut = hwCursorInfo->colorMap; + UInt32 maxColors = hwDesc->numColors; + int frame = (int) cursorImage; + + volatile unsigned short * cursPtr16; + volatile unsigned int * cursPtr32; + SInt32 x, y; + UInt32 index, numColors = 0; + UInt16 alpha, red, green, blue; + UInt16 s16; + UInt32 s32; + UInt8 data = 0; + UInt8 pixel = 0; + bool ok = true; + + assert( frame < kIOFBNumCursorFrames ); + + if( bytesPerPixel == 4) { + cursPtr32 = (volatile unsigned int *) cursorImages[ frame ]; + cursPtr16 = 0; + } else if( bytesPerPixel == 2) { + cursPtr32 = 0; + cursPtr16 = (volatile unsigned short *) cursorImages[ frame ]; + } else + return( false ); + + x = shmem->cursorSize[frame].width; + y = shmem->cursorSize[frame].height; + + if( (x > (SInt32) hwDesc->width) || (y > (SInt32) hwDesc->height)) + return( false ); +#if 0 + hwCursorInfo->cursorWidth = x; + hwCursorInfo->cursorHeight = y; + while( (--y != -1) ) { + x = shmem->cursorSize[frame].width; + while( (--x != -1) ) { + + if( cursPtr32) { + s32 = *(cursPtr32++); + alpha = (s32 >> 28) & 0xf; + if( alpha && (alpha != 0xf)) + *(cursPtr32 - 1) = 0x00ffffff; + + } else { + s16 = *(cursPtr16++); + alpha = s16 & 0x000F; + if( alpha && (alpha != 0xf)) + *(cursPtr16 - 1) = 0xfff0; + } + } + } +#endif + + hwCursorInfo->cursorWidth = x; + hwCursorInfo->cursorHeight = y; + + while( ok && (--y != -1) ) { + x = shmem->cursorSize[frame].width; + while( ok && (--x != -1) ) { + + if( cursPtr32) { + s32 = *(cursPtr32++); + alpha = (s32 >> 28) & 0xf; + red = (s32 >> 16) & 0xff; + red |= (red << 8); + green = (s32 >> 8) & 0xff; + green |= (green << 8); + blue = (s32) & 0xff; + blue |= (blue << 8); + + } else { +#define RMASK16 0xF000 +#define GMASK16 0x0F00 +#define BMASK16 0x00F0 +#define AMASK16 0x000F + s16 = *(cursPtr16++); + alpha = s16 & AMASK16; + red = s16 & RMASK16; + red |= (red >> 4) | (red >> 8) | (red >> 12); + green = s16 & GMASK16; + green |= (green << 4) | (green >> 4) | (green >> 8); + blue = s16 & BMASK16; + blue |= (blue << 8) | (blue << 4) | (blue >> 4); + } + + if( alpha == 0 ) { + + if( 0 == (red | green | blue)) { + /* Transparent black area. Leave dst as is. */ + if( kTransparentEncodedPixel + & hwDesc->supportedSpecialEncodings) + pixel = hwDesc->specialEncodings[kTransparentEncoding]; + else + ok = false; + } else if (0xffff == (red & green & blue)) { + /* Transparent white area. Invert dst. */ + if( kInvertingEncodedPixel + & hwDesc->supportedSpecialEncodings) + pixel = hwDesc->specialEncodings[kInvertingEncoding]; + else + ok = false; + } else + ok = false; + + } else if( alpha == 0xf ) { + + /* Opaque cursor pixel. Mark it. */ + for( index = 0; index < numColors; index++ ) { + if( (red == clut[ index ].red) + && (green == clut[ index ].green) + && (blue == clut[ index ].blue) ) { + + pixel = clut[ index ].index; + break; + } + } + if( index == numColors) { + ok = (numColors < maxColors); + if( ok) { + pixel = hwDesc->colorEncodings[ numColors++ ]; + clut[ index ].red = red; + clut[ index ].green = green; + clut[ index ].blue = blue; + clut[ index ].index = pixel; + } + } + + } else { + /* Alpha is not 0 or 1.0. Sover the cursor. */ + ok = false; + break; + } + + data <<= hwDesc->bitDepth; + data |= pixel; + + if( 0 == (x & ((8 / hwDesc->bitDepth) - 1))) + *dataOut++ = data; + } /* x */ + } /* y */ + +// if( !ok) kprintf("Couldnt do a hw curs\n"); + + return( ok ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IOFramebuffer::initialize() +{ +#if 0 +static IOWorkLoop * gIOFramebufferWorkloop; +static IOLock * gIOFramebufferLock; + + gIOFramebufferLock = IOLockAlloc(); + + gIOFramebufferWorkloop = IOWorkLoop::workLoop(); + + assert( gIOFramebufferLock && gIOFramebufferWorkloop ); +#endif +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#if 0 +static bool serializeInfoCB( void * target, void * ref, OSSerialize * s ) +{ + return( ((IOFramebuffer *)target)->serializeInfo(s) ); +} +#endif + +static IOPMrootDomain * gIOPMRootDomain; + +bool IOFramebuffer::start( IOService * provider ) +{ + + if( ! super::start( provider)) + return( false ); + + userAccessRanges = OSArray::withCapacity( 1 ); + engineAccessRanges = OSArray::withCapacity( 1 ); + +#if 0 + OSSerializer * infoSerializer = OSSerializer::forTarget( (void *) this, &serializeInfoCB ); + if( !infoSerializer) + return( false ); + + setProperty( kIOFramebufferInfoKey, infoSerializer ); + infoSerializer->release(); + + + IOInterruptEventSource * eventSrc; + + eventSrc = IOInterruptEventSource::interruptEventSource( + this, autopollArrived); + if (!eventSrc || + kIOReturnSuccess != workLoop->addEventSource(eventSrc) ) { + kprintf("Start is bailing\n"); + return false; + } +#endif + + closed = true; + registerService(); + + // initialize superclass power management variables + PMinit(); + // attach into the power management hierarchy + provider->joinPMtree(this); + // clamp power on (the user client will change that when appropriate) +// makeUsable(); + + if( !gIOPMRootDomain) + gIOPMRootDomain = (IOPMrootDomain *) + IORegistryEntry::fromPath("/IOPowerConnection/IOPMrootDomain", gIOPowerPlane); + if( gIOPMRootDomain) + gIOPMRootDomain->registerInterestedDriver(this); + + return( true ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// Apple standard 8-bit CLUT + +UInt8 appleClut8[ 256 * 3 ] = { +// 00 + 0xFF,0xFF,0xFF, 0xFF,0xFF,0xCC, 0xFF,0xFF,0x99, 0xFF,0xFF,0x66, + 0xFF,0xFF,0x33, 0xFF,0xFF,0x00, 0xFF,0xCC,0xFF, 0xFF,0xCC,0xCC, + 0xFF,0xCC,0x99, 0xFF,0xCC,0x66, 0xFF,0xCC,0x33, 0xFF,0xCC,0x00, + 0xFF,0x99,0xFF, 0xFF,0x99,0xCC, 0xFF,0x99,0x99, 0xFF,0x99,0x66, +// 10 + 0xFF,0x99,0x33, 0xFF,0x99,0x00, 0xFF,0x66,0xFF, 0xFF,0x66,0xCC, + 0xFF,0x66,0x99, 0xFF,0x66,0x66, 0xFF,0x66,0x33, 0xFF,0x66,0x00, + 0xFF,0x33,0xFF, 0xFF,0x33,0xCC, 0xFF,0x33,0x99, 0xFF,0x33,0x66, + 0xFF,0x33,0x33, 0xFF,0x33,0x00, 0xFF,0x00,0xFF, 0xFF,0x00,0xCC, +// 20 + 0xFF,0x00,0x99, 0xFF,0x00,0x66, 0xFF,0x00,0x33, 0xFF,0x00,0x00, + 0xCC,0xFF,0xFF, 0xCC,0xFF,0xCC, 0xCC,0xFF,0x99, 0xCC,0xFF,0x66, + 0xCC,0xFF,0x33, 0xCC,0xFF,0x00, 0xCC,0xCC,0xFF, 0xCC,0xCC,0xCC, + 0xCC,0xCC,0x99, 0xCC,0xCC,0x66, 0xCC,0xCC,0x33, 0xCC,0xCC,0x00, +// 30 + 0xCC,0x99,0xFF, 0xCC,0x99,0xCC, 0xCC,0x99,0x99, 0xCC,0x99,0x66, + 0xCC,0x99,0x33, 0xCC,0x99,0x00, 0xCC,0x66,0xFF, 0xCC,0x66,0xCC, + 0xCC,0x66,0x99, 0xCC,0x66,0x66, 0xCC,0x66,0x33, 0xCC,0x66,0x00, + 0xCC,0x33,0xFF, 0xCC,0x33,0xCC, 0xCC,0x33,0x99, 0xCC,0x33,0x66, +// 40 + 0xCC,0x33,0x33, 0xCC,0x33,0x00, 0xCC,0x00,0xFF, 0xCC,0x00,0xCC, + 0xCC,0x00,0x99, 0xCC,0x00,0x66, 0xCC,0x00,0x33, 0xCC,0x00,0x00, + 0x99,0xFF,0xFF, 0x99,0xFF,0xCC, 0x99,0xFF,0x99, 0x99,0xFF,0x66, + 0x99,0xFF,0x33, 0x99,0xFF,0x00, 0x99,0xCC,0xFF, 0x99,0xCC,0xCC, +// 50 + 0x99,0xCC,0x99, 0x99,0xCC,0x66, 0x99,0xCC,0x33, 0x99,0xCC,0x00, + 0x99,0x99,0xFF, 0x99,0x99,0xCC, 0x99,0x99,0x99, 0x99,0x99,0x66, + 0x99,0x99,0x33, 0x99,0x99,0x00, 0x99,0x66,0xFF, 0x99,0x66,0xCC, + 0x99,0x66,0x99, 0x99,0x66,0x66, 0x99,0x66,0x33, 0x99,0x66,0x00, +// 60 + 0x99,0x33,0xFF, 0x99,0x33,0xCC, 0x99,0x33,0x99, 0x99,0x33,0x66, + 0x99,0x33,0x33, 0x99,0x33,0x00, 0x99,0x00,0xFF, 0x99,0x00,0xCC, + 0x99,0x00,0x99, 0x99,0x00,0x66, 0x99,0x00,0x33, 0x99,0x00,0x00, + 0x66,0xFF,0xFF, 0x66,0xFF,0xCC, 0x66,0xFF,0x99, 0x66,0xFF,0x66, +// 70 + 0x66,0xFF,0x33, 0x66,0xFF,0x00, 0x66,0xCC,0xFF, 0x66,0xCC,0xCC, + 0x66,0xCC,0x99, 0x66,0xCC,0x66, 0x66,0xCC,0x33, 0x66,0xCC,0x00, + 0x66,0x99,0xFF, 0x66,0x99,0xCC, 0x66,0x99,0x99, 0x66,0x99,0x66, + 0x66,0x99,0x33, 0x66,0x99,0x00, 0x66,0x66,0xFF, 0x66,0x66,0xCC, +// 80 + 0x66,0x66,0x99, 0x66,0x66,0x66, 0x66,0x66,0x33, 0x66,0x66,0x00, + 0x66,0x33,0xFF, 0x66,0x33,0xCC, 0x66,0x33,0x99, 0x66,0x33,0x66, + 0x66,0x33,0x33, 0x66,0x33,0x00, 0x66,0x00,0xFF, 0x66,0x00,0xCC, + 0x66,0x00,0x99, 0x66,0x00,0x66, 0x66,0x00,0x33, 0x66,0x00,0x00, +// 90 + 0x33,0xFF,0xFF, 0x33,0xFF,0xCC, 0x33,0xFF,0x99, 0x33,0xFF,0x66, + 0x33,0xFF,0x33, 0x33,0xFF,0x00, 0x33,0xCC,0xFF, 0x33,0xCC,0xCC, + 0x33,0xCC,0x99, 0x33,0xCC,0x66, 0x33,0xCC,0x33, 0x33,0xCC,0x00, + 0x33,0x99,0xFF, 0x33,0x99,0xCC, 0x33,0x99,0x99, 0x33,0x99,0x66, +// a0 + 0x33,0x99,0x33, 0x33,0x99,0x00, 0x33,0x66,0xFF, 0x33,0x66,0xCC, + 0x33,0x66,0x99, 0x33,0x66,0x66, 0x33,0x66,0x33, 0x33,0x66,0x00, + 0x33,0x33,0xFF, 0x33,0x33,0xCC, 0x33,0x33,0x99, 0x33,0x33,0x66, + 0x33,0x33,0x33, 0x33,0x33,0x00, 0x33,0x00,0xFF, 0x33,0x00,0xCC, +// b0 + 0x33,0x00,0x99, 0x33,0x00,0x66, 0x33,0x00,0x33, 0x33,0x00,0x00, + 0x00,0xFF,0xFF, 0x00,0xFF,0xCC, 0x00,0xFF,0x99, 0x00,0xFF,0x66, + 0x00,0xFF,0x33, 0x00,0xFF,0x00, 0x00,0xCC,0xFF, 0x00,0xCC,0xCC, + 0x00,0xCC,0x99, 0x00,0xCC,0x66, 0x00,0xCC,0x33, 0x00,0xCC,0x00, +// c0 + 0x00,0x99,0xFF, 0x00,0x99,0xCC, 0x00,0x99,0x99, 0x00,0x99,0x66, + 0x00,0x99,0x33, 0x00,0x99,0x00, 0x00,0x66,0xFF, 0x00,0x66,0xCC, + 0x00,0x66,0x99, 0x00,0x66,0x66, 0x00,0x66,0x33, 0x00,0x66,0x00, + 0x00,0x33,0xFF, 0x00,0x33,0xCC, 0x00,0x33,0x99, 0x00,0x33,0x66, +// d0 + 0x00,0x33,0x33, 0x00,0x33,0x00, 0x00,0x00,0xFF, 0x00,0x00,0xCC, + 0x00,0x00,0x99, 0x00,0x00,0x66, 0x00,0x00,0x33, 0xEE,0x00,0x00, + 0xDD,0x00,0x00, 0xBB,0x00,0x00, 0xAA,0x00,0x00, 0x88,0x00,0x00, + 0x77,0x00,0x00, 0x55,0x00,0x00, 0x44,0x00,0x00, 0x22,0x00,0x00, +// e0 + 0x11,0x00,0x00, 0x00,0xEE,0x00, 0x00,0xDD,0x00, 0x00,0xBB,0x00, + 0x00,0xAA,0x00, 0x00,0x88,0x00, 0x00,0x77,0x00, 0x00,0x55,0x00, + 0x00,0x44,0x00, 0x00,0x22,0x00, 0x00,0x11,0x00, 0x00,0x00,0xEE, + 0x00,0x00,0xDD, 0x00,0x00,0xBB, 0x00,0x00,0xAA, 0x00,0x00,0x88, +// f0 + 0x00,0x00,0x77, 0x00,0x00,0x55, 0x00,0x00,0x44, 0x00,0x00,0x22, + 0x00,0x00,0x11, 0xEE,0xEE,0xEE, 0xDD,0xDD,0xDD, 0xBB,0xBB,0xBB, + 0xAA,0xAA,0xAA, 0x88,0x88,0x88, 0x77,0x77,0x77, 0x55,0x55,0x55, + 0x44,0x44,0x44, 0x22,0x22,0x22, 0x11,0x11,0x11, 0x00,0x00,0x00 +}; + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#ifdef __ppc__ +extern +#endif +int killprint; +extern "C" { int kmputc( int c ); } + +IOReturn IOFramebuffer::setPowerState( unsigned long powerStateOrdinal, + IOService * whichDevice ) +{ + if( 0 == powerStateOrdinal ) { + if( isConsoleDevice()) + killprint = 1; + deliverFramebufferNotification( kIOFBNotifyWillSleep ); + + } else { + + if( isConsoleDevice()) { + killprint = 0; + kmputc( 033 ); + kmputc( 'c' ); + } + deliverFramebufferNotification( kIOFBNotifyDidWake ); + } + + return( IOPMAckImplied); +} + +IOReturn IOFramebuffer::beginSystemSleep( void * ackRef ) +{ + pmRef = ackRef; + powerOverrideOnPriv(); + changePowerStateToPriv(0); + + return( kIOReturnSuccess ); +} + +IOReturn IOFramebuffer::powerStateWillChangeTo( IOPMPowerFlags flags, + unsigned long, IOService * whatDevice ) +{ + if( (whatDevice == gIOPMRootDomain) && (IOPMPowerOn & flags)) + // end system sleep + powerOverrideOffPriv(); + + return( IOPMAckImplied ); +} + +IOReturn IOFramebuffer::powerStateDidChangeTo( IOPMPowerFlags flags, + unsigned long, IOService* whatDevice ) +{ + if( (whatDevice == this) && pmRef && (0 == (IOPMDeviceUsable & flags))) { + // root can proceed + acknowledgeSleepWakeNotification(pmRef); + pmRef = 0; + } + + return( IOPMAckImplied ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IODeviceMemory * IOFramebuffer::getVRAMRange( void ) +{ + return( getApertureRange( kIOFBSystemAperture )); +} + +void IOFramebuffer::close( void ) // called by the user client when +{ // the window server exits + +} + + + +IOReturn IOFramebuffer::open( void ) +{ + IOReturn err = kIOReturnSuccess; + UInt32 value; + + do { + if( opened) + continue; + + // tell the console if it's on this display, it's going away + if( isConsoleDevice()) + getPlatform()->setConsoleInfo( 0, kPEDisableScreen); + + deliverFramebufferNotification( kIOFBNotifyDisplayModeWillChange ); + + err = enableController(); + if( kIOReturnSuccess != err) { + deliverFramebufferNotification( kIOFBNotifyDisplayModeDidChange ); + continue; + } + err = registerForInterruptType( kIOFBVBLInterruptType, + (IOFBInterruptProc) &handleVBL, + this, priv, &vblInterrupt ); + haveVBLService = (err == kIOReturnSuccess ); + + err = getAttribute( kIOHardwareCursorAttribute, &value ); + haveHWCursor = ((err == kIOReturnSuccess) && value); + + err = kIOReturnSuccess; + opened = true; + + } while( false ); + + return( err ); +} + +IOReturn IOFramebuffer::setUserRanges( void ) +{ +#if 1 /* print ranges */ + + UInt32 i, numRanges; + IODeviceMemory * mem; + + numRanges = userAccessRanges->getCount(); + IOLog("%s: user ranges num:%ld", getName(), numRanges); + for( i = 0; i < numRanges; i++) { + mem = (IODeviceMemory *) userAccessRanges->getObject( i ); + if( 0 == mem) + continue; + IOLog(" start:%lx size:%lx", + mem->getPhysicalAddress(), mem->getLength() ); + } + IOLog("\n"); + +#endif + return( kIOReturnSuccess); +} + +IOReturn IOFramebuffer::setupForCurrentConfig( void ) +{ + return( doSetup( true )); +} + +IOReturn IOFramebuffer::doSetup( bool full ) +{ + IOReturn err; + IODisplayModeID mode; + IOIndex depth; + IOPixelInformation info; + IODisplayModeInformation dmInfo; + IODeviceMemory * mem; + IODeviceMemory * fbRange; + IOPhysicalAddress base; + PE_Video newConsole; + + err = getCurrentDisplayMode( &mode, &depth ); + if( err) + IOLog("%s: getCurrentDisplayMode %d\n", getName(), err); + + err = getPixelInformation( mode, depth, kIOFBSystemAperture, &info ); + if( err) + IOLog("%s: getPixelInformation %d\n", getName(), err); + + if( full && (clutValid == false) && (info.pixelType == kIOCLUTPixels)) { + + IOColorEntry * tempTable; + int i; + + tempTable = (IOColorEntry *) IOMalloc( 256 * sizeof( *tempTable)); + if( tempTable) { + + for( i = 0; i < 256; i++) { + if( currentMono) { + UInt32 lum; + + lum = 0x0101 * i; + tempTable[ i ].red = lum; + tempTable[ i ].green = lum; + tempTable[ i ].blue = lum; + } else { + tempTable[ i ].red = (appleClut8[ i * 3 + 0 ] << 8) + | appleClut8[ i * 3 + 0 ]; + tempTable[ i ].green = (appleClut8[ i * 3 + 1 ] << 8) + | appleClut8[ i * 3 + 1 ]; + tempTable[ i ].blue = (appleClut8[ i * 3 + 2 ] << 8) + | appleClut8[ i * 3 + 2 ]; + } + } + setCLUTWithEntries( tempTable, 0, 256, 1 * kSetCLUTImmediately ); + IOFree( tempTable, 256 * sizeof( *tempTable)); + } + clutValid = true; + } + + fbRange = getApertureRange( kIOFBSystemAperture ); + + if( full && fbRange) { + + userAccessRanges->removeObject( kIOFBSystemAperture ); + userAccessRanges->setObject( kIOFBSystemAperture, fbRange ); + err = setUserRanges(); + + base = fbRange->getPhysicalAddress(); + if( (mem = getVRAMRange())) { + vramMapOffset = base - mem->getPhysicalAddress(); + mem->release(); + } + + if( vramMap) + vramMap->release(); + vramMap = fbRange->map(); + assert( vramMap ); + if( vramMap) + base = vramMap->getVirtualAddress(); + + // console now available + if( info.activeWidth >= 128) { + newConsole.v_baseAddr = base; + newConsole.v_rowBytes = info.bytesPerRow; + newConsole.v_width = info.activeWidth; + newConsole.v_height = info.activeHeight; + newConsole.v_depth = info.bitsPerPixel; + // strcpy( consoleInfo->v_pixelFormat, "PPPPPPPP"); + getPlatform()->setConsoleInfo( &newConsole, kPEEnableScreen ); + } + + deliverFramebufferNotification( kIOFBNotifyDisplayModeDidChange, 0 ); + + (void) getInformationForDisplayMode( mode, &dmInfo ); + IOLog( "%s: using (%ldx%ld@%ldHz,%ld bpp)\n", getName(), + info.activeWidth, info.activeHeight, + (dmInfo.refreshRate + 0x8000) >> 16, info.bitsPerPixel ); + } + + if( fbRange) + fbRange->release(); + if( vramMap) + setupCursor( &info ); + + return( kIOReturnSuccess ); +} + +IOReturn IOFramebuffer::extSetDisplayMode( IODisplayModeID displayMode, + IOIndex depth ) +{ + IOReturn err; + + stopCursor(); + + if( isConsoleDevice()) + getPlatform()->setConsoleInfo( 0, kPEDisableScreen); + + deliverFramebufferNotification( kIOFBNotifyDisplayModeWillChange ); + + err = setDisplayMode( displayMode, depth ); + + clutValid = false; + + setupForCurrentConfig(); + + return( err ); +} + +IOReturn IOFramebuffer::extGetInformationForDisplayMode( + IODisplayModeID mode, IODisplayModeInformation * info ) +{ + UInt32 flags = 0; + IOReturn err; + IOTimingInformation timingInfo; + + err = getInformationForDisplayMode( mode, info ); + if( kIOReturnSuccess == err) { + err = IODisplayWrangler::getFlagsForDisplayMode( this, mode, &flags); + if( kIOReturnSuccess == err) { + info->flags &= ~kDisplayModeSafetyFlags; + info->flags |= flags; + } + if( kIOReturnSuccess == getTimingInfoForDisplayMode( mode, &timingInfo )) + info->reserved[0] = timingInfo.appleTimingID; + + } + + return( err ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOFramebuffer::setNumber( OSDictionary * dict, const char * key, + UInt32 value ) +{ + OSNumber * num; + bool ok; + + num = OSNumber::withNumber( value, 32 ); + if( !num) + return( false ); + + ok = dict->setObject( key, num ); + num->release(); + + return( ok ); +} + +bool IOFramebuffer::serializeInfo( OSSerialize * s ) +{ + IOReturn err; + IODisplayModeInformation info; + IOPixelInformation pixelInfo; + IODisplayModeID * modeIDs; + IOItemCount modeCount, modeNum, aperture; + IOIndex depthNum; + OSDictionary * infoDict; + OSDictionary * modeDict; + OSDictionary * pixelDict; + char keyBuf[12]; + bool ok = true; + + modeCount = getDisplayModeCount(); + modeIDs = IONew( IODisplayModeID, modeCount ); + if( !modeIDs) + return( false ); + + err = getDisplayModes( modeIDs ); + if( err) + return( false ); + + infoDict = OSDictionary::withCapacity( 10 ); + if( !infoDict) + return( false ); + + for( modeNum = 0; modeNum < modeCount; modeNum++ ) { + + err = getInformationForDisplayMode( modeIDs[ modeNum ], &info ); + if( err) + continue; + + modeDict = OSDictionary::withCapacity( 10 ); + if( !modeDict) + break; + + ok = setNumber( modeDict, kIOFBWidthKey, + info.nominalWidth ) + && setNumber( modeDict, kIOFBHeightKey, + info.nominalHeight ) + && setNumber( modeDict, kIOFBRefreshRateKey, + info.refreshRate ) + && setNumber( modeDict, kIOFBFlagsKey, + info.flags ); + if( !ok) + break; + + for( depthNum = 0; depthNum < info.maxDepthIndex; depthNum++ ) { + + for( aperture = 0; ; aperture++ ) { + + err = getPixelInformation( modeIDs[ modeNum ], depthNum, + aperture, &pixelInfo ); + if( err) + break; + + pixelDict = OSDictionary::withCapacity( 10 ); + if( !pixelDict) + continue; + + ok = setNumber( pixelDict, kIOFBBytesPerRowKey, + pixelInfo.bytesPerRow ) + && setNumber( pixelDict, kIOFBBytesPerPlaneKey, + pixelInfo.bytesPerPlane ) + && setNumber( pixelDict, kIOFBBitsPerPixelKey, + pixelInfo.bitsPerPixel ) + && setNumber( pixelDict, kIOFBComponentCountKey, + pixelInfo.componentCount ) + && setNumber( pixelDict, kIOFBBitsPerComponentKey, + pixelInfo.bitsPerComponent ) + && setNumber( pixelDict, kIOFBFlagsKey, + pixelInfo.flags ) + && setNumber( pixelDict, kIOFBWidthKey, + pixelInfo.activeWidth ) + && setNumber( pixelDict, kIOFBHeightKey, + pixelInfo.activeHeight ); + if( !ok) + break; + + sprintf( keyBuf, "%lx", depthNum + (aperture << 16) ); + modeDict->setObject( keyBuf, pixelDict ); + pixelDict->release(); + } + } + + sprintf( keyBuf, "%lx", modeIDs[ modeNum ] ); + infoDict->setObject( keyBuf, modeDict ); + modeDict->release(); + } + + IODelete( modeIDs, IODisplayModeID, modeCount ); + + ok &= infoDict->serialize( s ); + infoDict->release(); + + return( ok ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClassAndStructors(_IOFramebufferNotifier, IONotifier) +#define LOCKNOTIFY() +#define UNLOCKNOTIFY() + +void _IOFramebufferNotifier::remove() +{ + LOCKNOTIFY(); + + if( whence) { + whence->removeObject( (OSObject *) this ); + whence = 0; + } + + fEnable = false; + + UNLOCKNOTIFY(); + + release(); +} + +bool _IOFramebufferNotifier::disable() +{ + bool ret; + + LOCKNOTIFY(); + ret = fEnable; + fEnable = false; + UNLOCKNOTIFY(); + + return( ret ); +} + +void _IOFramebufferNotifier::enable( bool was ) +{ + LOCKNOTIFY(); + fEnable = was; + UNLOCKNOTIFY(); +} + +IONotifier * IOFramebuffer::addFramebufferNotification( + IOFramebufferNotificationHandler handler, + OSObject * self, void * ref) +{ + _IOFramebufferNotifier * notify = 0; + + notify = new _IOFramebufferNotifier; + if( notify && !notify->init()) { + notify->release(); + notify = 0; + } + + if( notify) { + notify->handler = handler; + notify->self = self; + notify->ref = ref; + notify->fEnable = true; + + if( 0 == fbNotifications) + fbNotifications = OSSet::withCapacity(1); + + notify->whence = fbNotifications; + if( fbNotifications) + fbNotifications->setObject( notify ); + } + + return( notify ); +} + +IOReturn IOFramebuffer::deliverFramebufferNotification( + IOIndex event, void * info = 0 ) +{ + OSIterator * iter; + _IOFramebufferNotifier * notify; + IOReturn ret = kIOReturnSuccess; + IOReturn r; + + LOCKNOTIFY(); + + iter = OSCollectionIterator::withCollection( fbNotifications ); + + if( iter) { + while( (notify = (_IOFramebufferNotifier *) iter->getNextObject())) { + + if( notify->fEnable) { + r = (*notify->handler)( notify->self, notify->ref, this, + event, info ); + if( kIOReturnSuccess != ret) + ret = r; + } + } + iter->release(); + } + + UNLOCKNOTIFY(); + + return( ret ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// Some stubs + +IOReturn IOFramebuffer::enableController ( void ) +{ + return( kIOReturnSuccess ); +} + +bool IOFramebuffer::isConsoleDevice( void ) +{ + return( false ); +} + + // Set display mode and depth +IOReturn IOFramebuffer::setDisplayMode( IODisplayModeID /* displayMode */, + IOIndex /* depth */ ) +{ + return( kIOReturnUnsupported); +} + +// For pages +IOReturn IOFramebuffer::setApertureEnable( + IOPixelAperture /* aperture */, IOOptionBits /* enable */ ) +{ + return( kIOReturnUnsupported); +} + +// Display mode and depth for startup +IOReturn IOFramebuffer::setStartupDisplayMode( + IODisplayModeID /* displayMode */, IOIndex /* depth */ ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::getStartupDisplayMode( + IODisplayModeID * /* displayMode */, IOIndex * /* depth */ ) +{ + return( kIOReturnUnsupported); +} + +//// CLUTs + +IOReturn IOFramebuffer::setCLUTWithEntries( + IOColorEntry * /* colors */, UInt32 /* index */, + UInt32 /* numEntries */, IOOptionBits /* options */ ) +{ + return( kIOReturnUnsupported); +} + +//// Gamma + +IOReturn IOFramebuffer::setGammaTable( UInt32 /* channelCount */, + UInt32 /* dataCount */, UInt32 /* dataWidth */, void * /* data */ ) +{ + return( kIOReturnUnsupported); +} + +//// Controller attributes + +IOReturn IOFramebuffer::setAttribute( IOSelect /* attribute */, UInt32 /* value */ ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::getAttribute( IOSelect /* attribute */, + UInt32 * /* value */ ) +{ + return( kIOReturnUnsupported); +} + +//// Display mode timing information + +IOReturn IOFramebuffer::getTimingInfoForDisplayMode( + IODisplayModeID /* displayMode */, + IOTimingInformation * /* info */ ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::validateDetailedTiming( + void * description, IOByteCount descripSize ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::setDetailedTimings( OSArray * array ) +{ + return( kIOReturnUnsupported); +} + +//// Connections + +IOItemCount IOFramebuffer::getConnectionCount( void ) +{ + return( 1); +} + +IOReturn IOFramebuffer::setAttributeForConnection( IOIndex /* connectIndex */, + IOSelect /* attribute */, UInt32 /* value */ ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::getAttributeForConnection( IOIndex /* connectIndex */, + IOSelect /* attribute */, UInt32 * /* value */ ) +{ + return( kIOReturnUnsupported); +} + +//// HW Cursors + +IOReturn IOFramebuffer::setCursorImage( void * cursorImage ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::setCursorState( SInt32 x, SInt32 y, bool visible ) +{ + return( kIOReturnUnsupported); +} + +void IOFramebuffer::flushCursor( void ) +{ +} + +//// Interrupts + +IOReturn IOFramebuffer::registerForInterruptType( IOSelect interruptType, + IOFBInterruptProc proc, OSObject * target, void * ref, + void ** interruptRef ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::unregisterInterrupt( void * interruptRef ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::setInterruptState( void * interruptRef, UInt32 state ) +{ + return( kIOReturnUnsupported); +} + +// Apple sensing + +IOReturn IOFramebuffer::getAppleSense( + IOIndex /* connectIndex */, + UInt32 * /* senseType */, + UInt32 * /* primary */, + UInt32 * /* extended */, + UInt32 * /* displayType */ ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::connectFlags( IOIndex /* connectIndex */, + IODisplayModeID /* displayMode */, IOOptionBits * /* flags */ ) +{ + return( kIOReturnUnsupported); +} + +//// IOLowLevelDDCSense + +void IOFramebuffer::setDDCClock( IOIndex /* connectIndex */, UInt32 /* value */ ) +{ +} + +void IOFramebuffer::setDDCData( IOIndex /* connectIndex */, UInt32 /* value */ ) +{ +} + +bool IOFramebuffer::readDDCClock( IOIndex /* connectIndex */ ) +{ + return( false); +} + +bool IOFramebuffer::readDDCData( IOIndex /* connectIndex */ ) +{ + return( false); +} + +IOReturn IOFramebuffer::enableDDCRaster( bool /* enable */ ) +{ + return( kIOReturnUnsupported); +} + + +//// IOHighLevelDDCSense + +bool IOFramebuffer::hasDDCConnect( IOIndex /* connectIndex */ ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOFramebuffer::getDDCBlock( IOIndex /* connectIndex */, UInt32 /* blockNumber */, + IOSelect /* blockType */, IOOptionBits /* options */, + UInt8 * /* data */, IOByteCount * /* length */ ) +{ + return( kIOReturnUnsupported); +} + +OSMetaClassDefineReservedUnused(IOFramebuffer, 0); +OSMetaClassDefineReservedUnused(IOFramebuffer, 1); +OSMetaClassDefineReservedUnused(IOFramebuffer, 2); +OSMetaClassDefineReservedUnused(IOFramebuffer, 3); +OSMetaClassDefineReservedUnused(IOFramebuffer, 4); +OSMetaClassDefineReservedUnused(IOFramebuffer, 5); +OSMetaClassDefineReservedUnused(IOFramebuffer, 6); +OSMetaClassDefineReservedUnused(IOFramebuffer, 7); +OSMetaClassDefineReservedUnused(IOFramebuffer, 8); +OSMetaClassDefineReservedUnused(IOFramebuffer, 9); +OSMetaClassDefineReservedUnused(IOFramebuffer, 10); +OSMetaClassDefineReservedUnused(IOFramebuffer, 11); +OSMetaClassDefineReservedUnused(IOFramebuffer, 12); +OSMetaClassDefineReservedUnused(IOFramebuffer, 13); +OSMetaClassDefineReservedUnused(IOFramebuffer, 14); +OSMetaClassDefineReservedUnused(IOFramebuffer, 15); +OSMetaClassDefineReservedUnused(IOFramebuffer, 16); +OSMetaClassDefineReservedUnused(IOFramebuffer, 17); +OSMetaClassDefineReservedUnused(IOFramebuffer, 18); +OSMetaClassDefineReservedUnused(IOFramebuffer, 19); +OSMetaClassDefineReservedUnused(IOFramebuffer, 20); +OSMetaClassDefineReservedUnused(IOFramebuffer, 21); +OSMetaClassDefineReservedUnused(IOFramebuffer, 22); +OSMetaClassDefineReservedUnused(IOFramebuffer, 23); +OSMetaClassDefineReservedUnused(IOFramebuffer, 24); +OSMetaClassDefineReservedUnused(IOFramebuffer, 25); +OSMetaClassDefineReservedUnused(IOFramebuffer, 26); +OSMetaClassDefineReservedUnused(IOFramebuffer, 27); +OSMetaClassDefineReservedUnused(IOFramebuffer, 28); +OSMetaClassDefineReservedUnused(IOFramebuffer, 29); +OSMetaClassDefineReservedUnused(IOFramebuffer, 30); +OSMetaClassDefineReservedUnused(IOFramebuffer, 31); + diff --git a/iokit/Families/IOGraphics/IOFramebufferReallyPrivate.h b/iokit/Families/IOGraphics/IOFramebufferReallyPrivate.h new file mode 100644 index 000000000..5a58658ad --- /dev/null +++ b/iokit/Families/IOGraphics/IOFramebufferReallyPrivate.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +class _IOFramebufferNotifier : public IONotifier +{ + friend IOFramebuffer; + + OSDeclareDefaultStructors(_IOFramebufferNotifier) + +public: + OSSet * whence; + + IOFramebufferNotificationHandler handler; + OSObject * self; + void * ref; + bool fEnable; + + virtual void remove(); + virtual bool disable(); + virtual void enable( bool was ); +}; diff --git a/iokit/Families/IOGraphics/IOFramebufferUserClient.cpp b/iokit/Families/IOGraphics/IOFramebufferUserClient.cpp new file mode 100644 index 000000000..3667cce8c --- /dev/null +++ b/iokit/Families/IOGraphics/IOFramebufferUserClient.cpp @@ -0,0 +1,801 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 14 Aug 98 sdouglas created. + * 08 Dec 98 sdouglas cpp. + */ + +#define IOFRAMEBUFFER_PRIVATE +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "IOFramebufferUserClient.h" + +#include + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOUserClient + +OSDefineMetaClassAndStructors(IOFramebufferUserClient, IOUserClient) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static IOReturn myHandler(void *, void * , UInt32, IOService *, void *, unsigned int); +static IOLock * gSleepFramebuffersLock; +static OSOrderedSet * gSleepFramebuffers; +static UInt32 gWakeCount; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOFramebufferUserClient * IOFramebufferUserClient::withTask( task_t owningTask ) +{ + IOFramebufferUserClient * inst; + + if( 0 == gSleepFramebuffersLock) { + gSleepFramebuffersLock = IOLockAlloc(); + gSleepFramebuffers = OSOrderedSet::withCapacity(6); + assert( gSleepFramebuffersLock && gSleepFramebuffers ); + } + + inst = new IOFramebufferUserClient; + + if( inst && !inst->init()) { + inst->release(); + inst = 0; + } + + return( inst ); +} + +bool IOFramebufferUserClient::start( IOService * _owner ) +{ + static const IOExternalMethod methodTemplate[] = { +/* 0 */ { NULL, NULL, kIOUCScalarIScalarO, 3, 0 }, +/* 1 */ { NULL, NULL, kIOUCScalarIStructO, 3, sizeof( IOPixelInformation) }, +/* 2 */ { NULL, NULL, kIOUCScalarIScalarO, 0, 2 }, +/* 3 */ { NULL, NULL, kIOUCScalarIScalarO, 2, 0 }, +/* 4 */ { NULL, NULL, kIOUCScalarIScalarO, 2, 0 }, +/* 5 */ { NULL, NULL, kIOUCScalarIStructO, + 1, sizeof( IODisplayModeInformation) }, +/* 6 */ { NULL, NULL, kIOUCScalarIScalarO, 0, 1 }, +/* 7 */ { NULL, NULL, kIOUCStructIStructO, 0, 0xffffffff }, +/* 8 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 1 }, +/* 9 */ { NULL, NULL, kIOUCStructIStructO, sizeof( Bounds), 0 }, +/* 10 */ { NULL, NULL, kIOUCScalarIScalarO, 3, 0 }, +/* 11 */ { NULL, NULL, kIOUCScalarIStructI, 3, 0xffffffff }, +/* 12 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, +/* 13 */ { NULL, NULL, kIOUCScalarIScalarO, 2, 0 }, +/* 14 */ { NULL, NULL, kIOUCScalarIScalarO, 0, 0 }, +/* 15 */ { NULL, NULL, kIOUCScalarIStructI, 1, 0xffffffff }, +/* 16 */ { NULL, NULL, kIOUCScalarIStructI, 2, 0xffffffff }, +/* 17 */ { NULL, NULL, kIOUCStructIStructO, 0xffffffff, 0xffffffff }, + + }; + + if( !super::start( _owner )) + return( false); + + owner = (IOFramebuffer *) _owner; + + assert( sizeof( methodTemplate) == sizeof( externals)); + bcopy( methodTemplate, externals, sizeof( externals )); + + externals[0].object = owner; + externals[0].func = (IOMethod) &IOFramebuffer::createSharedCursor; + + externals[1].object = owner; + externals[1].func = (IOMethod) &IOFramebuffer::getPixelInformation; + + externals[2].object = owner; + externals[2].func = (IOMethod) &IOFramebuffer::getCurrentDisplayMode; + + externals[3].object = owner; + externals[3].func = (IOMethod) &IOFramebuffer::setStartupDisplayMode; + + externals[4].object = owner; + externals[4].func = (IOMethod) &IOFramebuffer::extSetDisplayMode; + + externals[5].object = owner; + externals[5].func = + (IOMethod) &IOFramebuffer::extGetInformationForDisplayMode; + + externals[6].object = owner; + externals[6].func = (IOMethod) &IOFramebuffer::extGetDisplayModeCount; + + externals[7].object = owner; + externals[7].func = (IOMethod) &IOFramebuffer::extGetDisplayModes; + + externals[8].object = owner; + externals[8].func = (IOMethod) &IOFramebuffer::extGetVRAMMapOffset; + + externals[9].object = owner; + externals[9].func = (IOMethod) &IOFramebuffer::extSetBounds; + + externals[10].object = owner; + externals[10].func = (IOMethod) &IOFramebuffer::extSetNewCursor; + + externals[11].object = owner; + externals[11].func = (IOMethod) &IOFramebuffer::setGammaTable; + + externals[12].object = owner; + externals[12].func = (IOMethod) &IOFramebuffer::extSetCursorVisible; + + externals[13].object = owner; + externals[13].func = (IOMethod) &IOFramebuffer::extSetCursorPosition; + + externals[14].object = this; + externals[14].func = (IOMethod) &IOFramebufferUserClient::acknowledgeNotification; + + externals[15].object = owner; + externals[15].func = (IOMethod) &IOFramebuffer::extSetColorConvertTable; + + externals[16].object = owner; + externals[16].func = (IOMethod) &IOFramebuffer::extSetCLUTWithEntries; + + externals[17].object = owner; + externals[17].func = (IOMethod) &IOFramebuffer::extValidateDetailedTiming; + + + ackFrameBuffer = false; + ackRoot = false; + + owner->serverConnect = this; + + // register interest in sleep and wake + powerRootNotifier = registerSleepWakeInterest(myHandler, (void *) this); + // register interest in frame buffer + frameBufferNotifier = owner->registerInterest( gIOGeneralInterest, myHandler, this, 0 ); + return( true ); +} + + +IOReturn +IOFramebufferUserClient::acknowledgeNotification( void ) +{ + if( ackFrameBuffer ) { + ackFrameBuffer = false; + owner->allowPowerChange((unsigned long)PMrefcon); + } + if( ackRoot ) { + ackRoot = false; + owner->beginSystemSleep(PMrefcon); + } + + return IOPMNoErr; +} + + +// We have registered for notification of power state changes in the framebuffer and the system in general. +// We are notified here of such a change. "System" power changes refer to sleep/wake and power down/up. +// "Device" changes refer to the framebuffer. + +static IOReturn +myHandler(void * us, void *, UInt32 messageType, IOService *, void * params, unsigned int) +{ + kern_return_t r; + mach_msg_header_t *msgh; + IOFramebufferUserClient * self = (IOFramebufferUserClient *)us; + + switch (messageType) { + case kIOMessageSystemWillSleep: + if ( !(self->WSKnowsWeAreOff) ) { + msgh = (mach_msg_header_t *)(self->notificationMsg); + if( msgh && (self->WSnotificationPort) ) { + msgh->msgh_id = 0; + self->WSKnowsWeAreOff = true; + self->ackRoot = true; + r = mach_msg_send_from_kernel( msgh, msgh->msgh_size); + if( KERN_SUCCESS == r) { + // WS will ack within ten seconds + ((sleepWakeNote *)params)->returnValue = 10000000; + self->PMrefcon = ((sleepWakeNote *)params)->powerRef; + IOLockLock( gSleepFramebuffersLock ); + gSleepFramebuffers->setObject(self); + IOLockUnlock( gSleepFramebuffersLock ); + return kIOReturnSuccess; + } + } + } + self->ackRoot = false; + ((sleepWakeNote *)params)->returnValue = 0; + return kIOReturnSuccess; + + case kIOMessageDeviceWillPowerOff: + if ( !self->WSKnowsWeAreOff ) { + msgh = (mach_msg_header_t *)(self->notificationMsg); + if( msgh && (self->WSnotificationPort) ) { + msgh->msgh_id = 0; + self->WSKnowsWeAreOff = true; + self->ackFrameBuffer = true; + r = mach_msg_send_from_kernel( msgh, msgh->msgh_size); + if( KERN_SUCCESS == r) { + // WS will ack within ten seconds + ((sleepWakeNote *)params)->returnValue = 10000000; + self->PMrefcon = ((sleepWakeNote *)params)->powerRef; + IOLockLock( gSleepFramebuffersLock ); + gSleepFramebuffers->setObject(self); + IOLockUnlock( gSleepFramebuffersLock ); + return kIOReturnSuccess; + } + } + } + ((sleepWakeNote *)params)->returnValue = 0; + self->ackFrameBuffer = false; + return kIOReturnSuccess; + + case kIOMessageDeviceHasPoweredOn: + + IOLockLock( gSleepFramebuffersLock ); + gWakeCount++; + if( gWakeCount == gSleepFramebuffers->getCount()) { + while( (self = (IOFramebufferUserClient *) gSleepFramebuffers->getFirstObject())) { + if ( self->WSKnowsWeAreOff ) { + msgh = (mach_msg_header_t *)(self->notificationMsg); + if( msgh && (self->WSnotificationPort)) { + msgh->msgh_id = 1; + self->WSKnowsWeAreOff = false; + r = mach_msg_send_from_kernel( msgh, msgh->msgh_size); + } + } + gSleepFramebuffers->removeObject( self ); + } + gWakeCount = 0; + } + IOLockUnlock( gSleepFramebuffersLock ); + return kIOReturnSuccess; + } + return kIOReturnUnsupported; +} + +IOReturn IOFramebufferUserClient::registerNotificationPort( + mach_port_t port, + UInt32 type, + UInt32 refCon ) +{ + static mach_msg_header_t init_msg = { + // mach_msg_bits_t msgh_bits; + MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,0), + // mach_msg_size_t msgh_size; + sizeof (mach_msg_header_t), + // mach_port_t msgh_remote_port; + MACH_PORT_NULL, + // mach_port_t msgh_local_port; + MACH_PORT_NULL, + // mach_msg_size_t msgh_reserved; + 0, + // mach_msg_id_t msgh_id; + 0 + }; + + if ( notificationMsg == NULL ) + notificationMsg = IOMalloc( sizeof (mach_msg_header_t) ); + // Initialize the power state change notification message. + *((mach_msg_header_t *)notificationMsg) = init_msg; + + ((mach_msg_header_t *)notificationMsg)->msgh_remote_port = port; + + WSnotificationPort = port; + WSKnowsWeAreOff = false; + return( kIOReturnSuccess); +} + +IOReturn IOFramebufferUserClient::getNotificationSemaphore( + UInt32 interruptType, semaphore_t * semaphore ) +{ + return( owner->getNotificationSemaphore(interruptType, semaphore) ); +} + +// The window server is going away. +// We disallow power down to prevent idle sleep while the console is running. +IOReturn IOFramebufferUserClient::clientClose( void ) +{ + owner->close(); + if( owner->isConsoleDevice()) + getPlatform()->setConsoleInfo( 0, kPEAcquireScreen); + + if( powerRootNotifier) { + powerRootNotifier->remove(); + powerRootNotifier = 0; + } + if( frameBufferNotifier) { + frameBufferNotifier->remove(); + frameBufferNotifier = 0; + } + if( notificationMsg) { + IOFree( notificationMsg, sizeof (mach_msg_header_t)); + notificationMsg = 0; + } + owner->serverConnect = 0; + WSnotificationPort = NULL; + detach( owner); + + return( kIOReturnSuccess); +} + +IOService * IOFramebufferUserClient::getService( void ) +{ + return( owner ); +} + +IOReturn IOFramebufferUserClient::clientMemoryForType( UInt32 type, + IOOptionBits * flags, IOMemoryDescriptor ** memory ) +{ + IOMemoryDescriptor * mem; + IOReturn err; + + switch( type) { + + case kIOFBCursorMemory: + mem = owner->sharedCursor; + mem->retain(); + break; + + case kIOFBVRAMMemory: + mem = owner->getVRAMRange(); + break; + + default: + mem = (IOMemoryDescriptor *) owner->userAccessRanges->getObject( type ); + mem->retain(); + break; + } + + *memory = mem; + if( mem) + err = kIOReturnSuccess; + else + err = kIOReturnBadArgument; + + return( err ); +} + +IOExternalMethod * IOFramebufferUserClient::getExternalMethodForIndex( UInt32 index ) +{ + if( index < (sizeof( externals) / sizeof( externals[0]))) + return( externals + index); + else + return( NULL); +} + +IOReturn IOFramebufferUserClient::setProperties( OSObject * properties ) +{ + OSDictionary * dict; + OSArray * array; + IOReturn kr = kIOReturnUnsupported; + + if( !(dict = OSDynamicCast( OSDictionary, properties))) + return( kIOReturnBadArgument); + + if( (array = OSDynamicCast(OSArray, + dict->getObject( kIOFBDetailedTimingsKey)))) + kr = owner->setDetailedTimings( array ); + else + kr = kIOReturnBadArgument; + + return( kr ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClassAndStructors(IOGraphicsEngineClient, IOUserClient) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOGraphicsEngineClient * IOGraphicsEngineClient::withTask( task_t owningTask ) +{ + IOGraphicsEngineClient * inst; + + inst = new IOGraphicsEngineClient; + + if( inst && !inst->init()) { + inst->release(); + inst = 0; + } + if( inst) + inst->owningTask = owningTask; + + return( inst ); +} + +bool IOGraphicsEngineClient::start( IOService * _owner ) +{ + + static const IOExternalMethod methodTemplate[] = { +/* 0 */ { NULL, NULL, kIOUCScalarIScalarO, 3, 1 }, +/* 1 */ { NULL, NULL, kIOUCScalarIScalarO, 2, 2 }, +/* 2 */ { NULL, NULL, kIOUCScalarIScalarO, 3, 2 }, +/* 3 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, + }; + + IOGraphicsEngineContext * mem; + IOByteCount size; + + if( !super::start( _owner )) + return( false); + + owner = (IOFramebuffer *) _owner; + agpDev = OSDynamicCast( IOAGPDevice, owner->getProvider()); + descriptors = OSArray::withCapacity( 1 ); + + bcopy( methodTemplate, externals, sizeof( methodTemplate )); + + externals[0].object = this; + externals[0].func = (IOMethod) &IOGraphicsEngineClient::addUserRange; + externals[1].object = this; + externals[1].func = (IOMethod) &IOGraphicsEngineClient::createAGPSpace; + externals[2].object = this; + externals[2].func = (IOMethod) &IOGraphicsEngineClient::commitAGPMemory; + externals[3].object = this; + externals[3].func = (IOMethod) &IOGraphicsEngineClient::releaseAGPMemory; + + if( 0 == owner->engineContext) { + + size = round_page( sizeof( IOGraphicsEngineContext)); + owner->engineContext = IOBufferMemoryDescriptor::withCapacity( + size, kIODirectionNone, false ); + if( !owner->engineContext) + return( kIOReturnNoMemory ); + owner->engineContext->setLength( size ); + + mem = (IOGraphicsEngineContext *) + owner->engineContext->getBytesNoCopy(); + memset((char *)mem, 0, size); + mem->version = kIOGraphicsEngineContextVersion; + mem->structSize = size; + } + + return( true ); +} + +void IOGraphicsEngineClient::free() +{ + if( descriptors) + descriptors->free(); + + if( agpDev && haveAGP) + agpDev->destroyAGPSpace(); + + super::free(); +} + +IOReturn IOGraphicsEngineClient::clientClose( void ) +{ + detach( owner ); + + return( kIOReturnSuccess); +} + +IOService * IOGraphicsEngineClient::getService( void ) +{ + return( owner ); +} + +IOReturn IOGraphicsEngineClient::clientMemoryForType( UInt32 type, + IOOptionBits * options, IOMemoryDescriptor ** memory ) +{ + IOMemoryDescriptor * mem; + + switch( type) { + case kIOGraphicsEngineContext: + mem = owner->engineContext; + break; + default: + mem = (IOMemoryDescriptor *) owner->engineAccessRanges->getObject( type ); + break; + } + + if( mem) { + mem->retain(); + *memory = mem; + return( kIOReturnSuccess); + } else + return( kIOReturnBadArgument); +} + +IOExternalMethod * IOGraphicsEngineClient::getExternalMethodForIndex( UInt32 index ) +{ + if( index < (sizeof( externals) / sizeof( externals[0]))) + return( externals + index); + else + return( NULL); +} + +IOReturn IOGraphicsEngineClient::addUserRange( vm_address_t start, + vm_size_t length, UInt32 apertureIndex, IOPhysicalAddress * phys ) +{ + IODeviceMemory * mem; + IOReturn err = kIOReturnSuccess; + OSArray * ranges; + int i; + IODeviceMemory * aperture + = owner->getProvider()->getDeviceMemoryWithIndex( apertureIndex ); + + if( 0 == aperture) + return( kIOReturnBadArgument ); + + ranges = owner->engineAccessRanges; + i = 0; + while( (mem = (IODeviceMemory *) ranges->getObject( i++ ))) { + if( (mem->getPhysicalAddress() == + (start + aperture->getPhysicalAddress())) + && (length <= mem->getLength()) ) + break; + } + + if( 0 == mem) { + mem = IODeviceMemory::withSubRange( + aperture, start, length ); + if( mem) { + owner->engineAccessRanges->setObject( mem ); + err = kIOReturnSuccess; + } else + err = kIOReturnNoResources; + } + + if( kIOReturnSuccess == err) + *phys = mem->getPhysicalAddress(); + + return( err ); +} + +IOReturn IOGraphicsEngineClient::createAGPSpace( IOOptionBits options, + IOPhysicalLength length, + IOPhysicalAddress * address, + IOPhysicalLength * lengthOut ) +{ + IOReturn err; + + if( !agpDev) + return( kIOReturnUnsupported ); + + *lengthOut = length; + err = agpDev->createAGPSpace( options, address, lengthOut ); + haveAGP = (kIOReturnSuccess == err); + + return( err ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class _IOGraphicsClientMemory : public OSObject { + OSDeclareDefaultStructors(_IOGraphicsClientMemory) +public: + IOMemoryDescriptor * memory; + IOAGPDevice * agpDev; + IOByteCount agpOffset; + + virtual bool init(); + virtual void free(); +}; + +OSDefineMetaClassAndStructors(_IOGraphicsClientMemory, OSObject) + +bool _IOGraphicsClientMemory::init() +{ + return( OSObject::init()); +} + +void _IOGraphicsClientMemory::free() +{ + if( memory) { + agpDev->getAGPRangeAllocator()->deallocate( agpOffset, + memory->getLength() ); + memory->complete(); + memory->release(); + } + + OSObject::free(); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOGraphicsEngineClient::commitAGPMemory( vm_address_t start, + vm_size_t length, IOOptionBits options, + void ** ref, IOByteCount * offset ) +{ + _IOGraphicsClientMemory * graphicsMem; + IORangeAllocator * rangeAllocator; + IOByteCount agpOffset; + IOReturn err = kIOReturnNoMemory; + bool ok; + + if( !agpDev) + return( kIOReturnUnsupported ); + if( (!start) || (!length)) + return( kIOReturnBadArgument ); + rangeAllocator = agpDev->getAGPRangeAllocator(); + if( !rangeAllocator) + return( kIOReturnUnsupported ); + + do { + graphicsMem = new _IOGraphicsClientMemory; + if( (!graphicsMem) || (!graphicsMem->init())) + continue; + + ok = rangeAllocator->allocate( length, (IORangeScalar *) &agpOffset ); + if( !ok) { + err = kIOReturnNoSpace; + continue; + } + + graphicsMem->agpDev = agpDev; + graphicsMem->agpOffset = agpOffset; + + graphicsMem->memory = IOMemoryDescriptor::withAddress( start, length, + kIODirectionOut, owningTask ); + if( !graphicsMem->memory) + continue; + + err = graphicsMem->memory->prepare(); + if( err != kIOReturnSuccess) + continue; + + err = agpDev->commitAGPMemory( graphicsMem->memory, agpOffset ); + if( err != kIOReturnSuccess) + continue; + + *ref = (void *) descriptors->getCount(); + *offset = agpOffset; + descriptors->setObject( graphicsMem ); + + } while( false ); + + if( graphicsMem) + graphicsMem->release(); + + if( (kIOReturnSuccess != err) && (!graphicsMem)) + rangeAllocator->deallocate( agpOffset, length ); + + return( err ); +} + +IOReturn IOGraphicsEngineClient::releaseAGPMemory( void * ref ) +{ + _IOGraphicsClientMemory * graphicsMem; + UInt32 index = (UInt32) ref; + + if( 0 == (graphicsMem = (_IOGraphicsClientMemory *) + descriptors->getObject( index ))) + return( kIOReturnBadArgument ); + + descriptors->removeObject( index ); + + return( kIOReturnSuccess ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClassAndStructors(IOFramebufferSharedUserClient, IOUserClient) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOFramebufferSharedUserClient * IOFramebufferSharedUserClient::withTask( + task_t owningTask ) +{ + IOFramebufferSharedUserClient * inst; + + inst = new IOFramebufferSharedUserClient; + + if( inst && !inst->init()) { + inst->release(); + inst = 0; + } + + return( inst ); +} + +bool IOFramebufferSharedUserClient::start( IOService * _owner ) +{ + + static const IOExternalMethod methodTemplate[] = { + }; + + if( !super::start( _owner )) + return( false); + + owner = (IOFramebuffer *) _owner; + + bcopy( methodTemplate, externals, sizeof( methodTemplate )); + + return( true ); +} + +void IOFramebufferSharedUserClient::free( void ) +{ + retain(); retain(); + owner->sharedConnect = 0; + detach( owner); + super::free(); +} + +void IOFramebufferSharedUserClient::release() const +{ + super::release(2); +} + +IOReturn IOFramebufferSharedUserClient::clientClose( void ) +{ + return( kIOReturnSuccess); +} + +IOService * IOFramebufferSharedUserClient::getService( void ) +{ + return( owner ); +} + +IOReturn IOFramebufferSharedUserClient::clientMemoryForType( UInt32 type, + IOOptionBits * options, IOMemoryDescriptor ** memory ) +{ + IOMemoryDescriptor * mem = 0; + IOReturn err; + + switch( type) { + + case kIOFBCursorMemory: + mem = owner->sharedCursor; + mem->retain(); + *options = kIOMapReadOnly; + break; + + case kIOFBVRAMMemory: + mem = owner->getVRAMRange(); + break; + } + + *memory = mem; + if( mem) + err = kIOReturnSuccess; + else + err = kIOReturnBadArgument; + + return( err ); +} + +IOReturn IOFramebufferSharedUserClient::getNotificationSemaphore( + UInt32 interruptType, semaphore_t * semaphore ) +{ + return( owner->getNotificationSemaphore(interruptType, semaphore) ); +} + +IOExternalMethod * IOFramebufferSharedUserClient::getExternalMethodForIndex( UInt32 index ) +{ + if( index < (sizeof( externals) / sizeof( externals[0]))) + return( externals + index); + else + return( NULL); +} + diff --git a/iokit/Families/IOGraphics/IOFramebufferUserClient.h b/iokit/Families/IOGraphics/IOFramebufferUserClient.h new file mode 100644 index 000000000..78d5be5d5 --- /dev/null +++ b/iokit/Families/IOGraphics/IOFramebufferUserClient.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOFRAMEBUFFERUSERCLIENT_H +#define _IOKIT_IOFRAMEBUFFERUSERCLIENT_H + +#include +#include +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IOFramebufferUserClient : public IOUserClient +{ + OSDeclareDefaultStructors(IOFramebufferUserClient) + +private: + + IONotifier * powerRootNotifier; + IONotifier * frameBufferNotifier; + + IOExternalMethod externals[ 18 ]; + + + +public: + + IOFramebuffer * owner; + mach_port_t WSnotificationPort; // how we tell window server of power changes + void * notificationMsg; // Msg to be sent to Window Server. + + bool WSKnowsWeAreOff; // true after informing WS that framebuffer is off + bool ackRoot; // true if we must ack the root domain + bool ackFrameBuffer; // true if we must ack the framebuffer + void * PMrefcon; // refcon to return to Power Management + + // IOUserClient methods + virtual IOReturn clientClose( void ); + + virtual IOService * getService( void ); + + virtual IOReturn clientMemoryForType( UInt32 type, + IOOptionBits * options, IOMemoryDescriptor ** memory ); + + virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + + virtual IOReturn registerNotificationPort( mach_port_t, UInt32, UInt32 ); + virtual IOReturn getNotificationSemaphore( UInt32 interruptType, + semaphore_t * semaphore ); + + // others + + static IOFramebufferUserClient * withTask( task_t owningTask ); + + virtual bool start( IOService * provider ); + virtual IOReturn setProperties( OSObject * properties ); + + virtual IOReturn acknowledgeNotification(void); + +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IOGraphicsEngineClient : public IOUserClient +{ + OSDeclareDefaultStructors(IOGraphicsEngineClient) + +private: + + IOFramebuffer * owner; + task_t owningTask; + IOAGPDevice * agpDev; + bool haveAGP; + OSArray * descriptors; + + IOExternalMethod externals[ 4 ]; + +public: + // IOUserClient methods + virtual IOReturn clientClose( void ); + virtual void free(); + + virtual IOService * getService( void ); + + virtual IOReturn clientMemoryForType( UInt32 type, + IOOptionBits * options, IOMemoryDescriptor ** memory ); + + virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + + // others + + static IOGraphicsEngineClient * withTask( task_t owningTask ); + virtual bool start( IOService * provider ); + + virtual IOReturn addUserRange( vm_address_t start, vm_size_t length, + UInt32 aperture, IOPhysicalAddress * phys ); + + virtual IOReturn createAGPSpace( IOOptionBits options, + IOPhysicalLength length, + IOPhysicalAddress * address, + IOPhysicalLength * lengthOut ); + + virtual IOReturn commitAGPMemory( vm_address_t start, + vm_size_t length, IOOptionBits options, + void ** ref, IOByteCount * offset ); + + virtual IOReturn releaseAGPMemory( void * ref ); + +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IOFramebufferSharedUserClient : public IOUserClient +{ + OSDeclareDefaultStructors(IOFramebufferSharedUserClient) + +private: + + IOFramebuffer * owner; + + IOExternalMethod externals[ 0 ]; + +public: + virtual void free(); + virtual void release() const; + + // IOUserClient methods + virtual IOReturn clientClose( void ); + + virtual IOService * getService( void ); + + virtual IOReturn clientMemoryForType( UInt32 type, + IOOptionBits * options, IOMemoryDescriptor ** memory ); + + virtual IOReturn getNotificationSemaphore( UInt32 notification_type, + semaphore_t * semaphore ); + + virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + + // others + static IOFramebufferSharedUserClient * withTask( task_t owningTask ); + virtual bool start( IOService * provider ); +}; + + +#endif /* ! _IOKIT_IOFRAMEBUFFERUSERCLIENT_H */ diff --git a/iokit/Families/IOGraphics/IOGraphicsDevice.cpp b/iokit/Families/IOGraphics/IOGraphicsDevice.cpp new file mode 100644 index 000000000..4718ec669 --- /dev/null +++ b/iokit/Families/IOGraphics/IOGraphicsDevice.cpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 30 Nov 98 sdouglas created. + */ + + +#include +#include + +#include + +#define super IOService + +OSDefineMetaClass( IOGraphicsDevice, IOService ) +OSDefineAbstractStructors( IOGraphicsDevice, IOService ) + diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCheckReport.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCheckReport.c new file mode 100644 index 000000000..ca7be8202 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCheckReport.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDCheckReport.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2001 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (DF) David Ferguson + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 1/2/01 DF Change length checking to check for the minimum size instead of + the "exact" size. + 12/12/00 KH Correcting cast of void * + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDCheckReport - Check the Report ID, Type, and Length + * + * Input: + * reportType - The Specified Report Type + * ptPreparsedData - The Preparsed Data + * ptReportItem - The Report Item + * psReport - The Report + * iReportLength - The Report Length + * Output: + * Returns: + * kHIDSuccess, HidP_IncompatibleReportID, + * kHIDInvalidReportLengthErr, kHIDInvalidReportTypeErr + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDCheckReport(HIDReportType reportType, HIDPreparsedDataRef preparsedDataRef, + HIDReportItem *ptReportItem, void *report, UInt32 iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + int reportID, reportIndex; + int iExpectedLength; + Byte * psReport = (Byte *)report; +/* + * See if this is the correct Report ID +*/ + reportID = psReport[0]&0xFF; + if ((ptPreparsedData->reportCount > 1) + && (reportID != ptReportItem->globals.reportID)) + return kHIDIncompatibleReportErr; +/* + * See if this is the correct ReportType +*/ + if (reportType != ptReportItem->reportType) + return kHIDIncompatibleReportErr; +/* + * Check for the correct Length for the Type +*/ + reportIndex = ptReportItem->globals.reportIndex; + switch(reportType) + { + case kHIDInputReport: + iExpectedLength = (ptPreparsedData->reports[reportIndex].inputBitCount + 7)/8; + break; + case kHIDOutputReport: + iExpectedLength = (ptPreparsedData->reports[reportIndex].outputBitCount + 7)/8; + break; + case kHIDFeatureReport: + iExpectedLength = (ptPreparsedData->reports[reportIndex].featureBitCount + 7)/8; + break; + default: + return kHIDInvalidReportTypeErr; + } + if (iExpectedLength > iReportLength) + return kHIDInvalidReportLengthErr; + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCountDescriptorItems.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCountDescriptorItems.c new file mode 100644 index 000000000..f3a6a12c3 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCountDescriptorItems.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDCountDescriptorItems.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +//#include + +/* + *------------------------------------------------------------------------------ + * + * HIDCountDescriptorItems + * + * Input: + * ptDescriptor - Descriptor Pointer Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDCountDescriptorItems(HIDReportDescriptor *ptDescriptor, HIDPreparsedDataPtr ptPreparsedData) +{ + OSStatus iStatus; + ByteCount iSpaceRequired; + HIDItem *ptItem; + Byte *pMem; +/* + * Initialize Counters +*/ + int collectionCount = 1; + int reportItemCount = 0; + int iUsages = 0; + int iUsageRanges = 0; + int iStrings = 0; + int iStringRanges = 0; + int iDesigs = 0; + int iDesigRanges = 0; + int reportCount = 1; + int globalsNesting = 0; + int iMaxGlobalsNesting = 0; + int collectionNesting = 0; + int iMaxCollectionNesting = 0; +/* + * Disallow NULL Pointers +*/ + if ((ptDescriptor == NULL) || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; +/* + * Initialize the memory allocation pointer +*/ + ptPreparsedData->rawMemPtr = NULL; +/* + * Initialize the Descriptor Pointer Structure +*/ + ptDescriptor->index = 0; + ptItem = &ptDescriptor->item; +/* + * Count various items in the descriptor +*/ + while ((iStatus = HIDNextItem(ptDescriptor)) == kHIDSuccess) + { + switch (ptItem->itemType) + { + case kHIDTypeMain: + switch (ptItem->tag) + { + case kHIDTagCollection: + collectionCount++; + collectionNesting++; + if (collectionNesting > iMaxCollectionNesting) + iMaxCollectionNesting = collectionNesting; + break; + case kHIDTagEndCollection: + if (collectionNesting-- == 0) + return kHIDInvalidPreparsedDataErr; + break; + case kHIDTagInput: + case kHIDTagOutput: + case kHIDTagFeature: + reportItemCount++; + break; + } + break; + case kHIDTypeGlobal: + switch (ptItem->tag) + { + case kHIDTagReportID: + reportCount++; + break; + case kHIDTagPush: + globalsNesting++; + if (globalsNesting > iMaxGlobalsNesting) + iMaxGlobalsNesting = globalsNesting; + break; + case kHIDTagPop: + globalsNesting--; + if (globalsNesting < 0) + return kHIDInvalidPreparsedDataErr; + break; + } + break; + case kHIDTypeLocal: + switch (ptItem->tag) + { + case kHIDTagUsage: + iUsages++; + break; + case kHIDTagUsageMinimum: + case kHIDTagUsageMaximum: + iUsageRanges++; + break; + case kHIDTagStringIndex: + iStrings++; + break; + case kHIDTagStringMinimum: + case kHIDTagStringMaximum: + iStringRanges++; + break; + case kHIDTagDesignatorIndex: + iDesigs++; + break; + case kHIDTagDesignatorMinimum: + case kHIDTagDesignatorMaximum: + iDesigRanges++; + break; + } + } + } +/* + * Disallow malformed descriptors +*/ + if ((collectionNesting != 0) + || (collectionCount == 1) + || (reportItemCount == 0) + || ((iUsageRanges & 1) == 1) + || ((iStringRanges & 1) == 1) + || ((iDesigRanges & 1) == 1)) + return kHIDInvalidPreparsedDataErr; +/* + * Summarize the Indices and Ranges +*/ + iUsages += (iUsageRanges/2); + iStrings += (iStringRanges/2); + iDesigs += (iDesigRanges/2); +/* + * Calculate the space needed for the structures +*/ + iSpaceRequired = (sizeof(HIDCollection) * collectionCount) + + (sizeof(HIDReportItem) * reportItemCount) + + (sizeof(HIDReportSizes) * reportCount) + + (sizeof(HIDP_UsageItem) * iUsages) + + (sizeof(HIDStringItem) * iStrings) + + (sizeof(HIDDesignatorItem) * iDesigs) + + (sizeof(int) * iMaxCollectionNesting) + + (sizeof(HIDGlobalItems) * iMaxGlobalsNesting); + pMem = PoolAllocateResident(iSpaceRequired, kShouldClearMem); + + if (pMem == NULL) + return kHIDNotEnoughMemoryErr; + ptPreparsedData->rawMemPtr = pMem; + ptPreparsedData->numBytesAllocated = iSpaceRequired; +/* + * Allocate space to the various structures +*/ + ptPreparsedData->collections = (HIDCollection *) pMem; + ptPreparsedData->collectionCount = 0; + pMem += (sizeof(HIDCollection) * collectionCount); + ptPreparsedData->reportItems = (HIDReportItem *) pMem; + ptPreparsedData->reportItemCount = 0; + pMem += (sizeof(HIDReportItem) * reportItemCount); + ptPreparsedData->reports = (HIDReportSizes *) pMem; + ptPreparsedData->reportCount = 0; + pMem += (sizeof(HIDReportSizes) * reportCount); + ptPreparsedData->usageItems = (HIDP_UsageItem *) pMem; + ptPreparsedData->usageItemCount = 0; + pMem += (sizeof(HIDP_UsageItem) * iUsages); + ptPreparsedData->stringItems = (HIDStringItem *) pMem; + ptPreparsedData->stringItemCount = 0; + pMem += (sizeof(HIDStringItem) * iStrings); + ptPreparsedData->desigItems = (HIDDesignatorItem *) pMem; + ptPreparsedData->desigItemCount = 0; + pMem += (sizeof(HIDDesignatorItem) * iDesigs); + ptDescriptor->collectionStack = (SInt32 *) pMem; + ptDescriptor->collectionNesting = 0; + pMem += (sizeof(SInt32) * iMaxCollectionNesting); + ptDescriptor->globalsStack = (HIDGlobalItems *) pMem; + ptDescriptor->globalsNesting = 0; + if (iStatus == kHIDEndOfDescriptorErr) + return kHIDSuccess; + return iStatus; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonCaps.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonCaps.c new file mode 100644 index 000000000..53b910883 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonCaps.c @@ -0,0 +1,493 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetButtonCaps.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2001 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 4/21/00 KH Added HIDGetButtonCapabilities and + HIDGetSpecificButtonCapabilities that now allow users to find + HID report units and exponents. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 5/3/99 BWS We were not setting isStringRange, isDesignatorRange, and + isAbsolute + 3/7/99 BWS When range/notRange were made a union, we missed this case where + they were both being set indescriminately + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetSpecificButtonCaps - Get the binary values for a report type + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - usage Criteria or zero + * buttonCaps - ButtonCaps Array + * piButtonCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piButtonCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetSpecificButtonCaps(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + HIDButtonCapsPtr buttonCaps, + UInt32 *piButtonCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDCollection *ptParent; + HIDReportItem *ptReportItem; + HIDP_UsageItem *ptUsageItem; + HIDStringItem *ptStringItem; + HIDDesignatorItem *ptDesignatorItem; + HIDP_UsageItem *ptFirstCollectionUsageItem; + HIDButtonCaps *ptCapability; + int iR, iU; + int parent; + int iReportItem, iUsageItem; + int iMaxCaps; + // There are 3 versions of HID Parser code all based on the same logic: OS 9 HID Library; + // OSX xnu; OSX IOKitUser. They should all be nearly the same logic. This version (xnu) + // is based on older OS 9 code. This version has added logic to maintain this startBit. + // I don't know why it is here, but believe if it is needed here, it would probably be + // needed in the other two implementations. Didn't have time to determine that at this + // time, so i'll leave this comment to remind me that we should reconcile the 3 versions. + UInt32 startBit; // Added esb 9-29-99 + /*If I remember correctly, it was an optimization. Each time you ask for + a specific value capability, it would search through the entire report + descriptor to find it (my recollection is kind of hazy on this part). + The start bit allowed somebody (client maybe) to cache the information + on where in the report a specific value resided and the use that later + when fetching that value. That way, you don't have to keep going + through the parse tree to find where a value exists. I don't remember + if the implementation was completed or if I even used it. -esb */ +/* + * Disallow Null Pointers +*/ + if ((buttonCaps == NULL) + || (piButtonCapsLength == NULL) + || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Save the buffer size +*/ + iMaxCaps = *piButtonCapsLength; + *piButtonCapsLength = 0; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; +/* + * Search only reports of the proper type +*/ + if ((ptReportItem->reportType == reportType) + && HIDIsButton(ptReportItem, preparsedDataRef)) + { + startBit = ptReportItem->startBit; +/* + * Search the usages +*/ + for (iU=0; iUusageItemCount; iU++) + { +/* + * Copy all usages if the usage above is zero + * or copy all that are "match" +*/ + iUsageItem = ptReportItem->firstUsageItem + iU; + ptUsageItem = &ptPreparsedData->usageItems[iUsageItem]; + + // ¥¥ we assume there is a 1-1 corresponence between usage items, string items, and designator items + // ¥¥Êthis is not necessarily the case, but its better than nothing + ptStringItem = &ptPreparsedData->stringItems[ptReportItem->firstStringItem + iU]; + ptDesignatorItem = &ptPreparsedData->desigItems[ptReportItem->firstDesigItem + iU]; + + if (HIDUsageInRange(ptUsageItem,usagePage,usage)) + { +/* + * Only copy if there's room +*/ + if (*piButtonCapsLength >= iMaxCaps) + return kHIDBufferTooSmallErr; + ptCapability = &buttonCaps[(*piButtonCapsLength)++]; +/* + * Populate the Capability Structure +*/ + parent = ptReportItem->parent; + ptParent = &ptPreparsedData->collections[parent]; + ptFirstCollectionUsageItem + = &ptPreparsedData->usageItems[ptParent->firstUsageItem]; + ptCapability->collection = parent; + ptCapability->collectionUsagePage = ptParent->usagePage; + ptCapability->collectionUsage = ptFirstCollectionUsageItem->usage; + ptCapability->bitField = ptReportItem->dataModes; + ptCapability->reportID = ptReportItem->globals.reportID; + ptCapability->usagePage = ptUsageItem->usagePage; + + ptCapability->isStringRange = false; // ¥¥ todo: set this and stringMin,stringMax,stringIndex + ptCapability->isDesignatorRange = false; // ¥¥ todo: set this and designatorMin,designatorMax,designatorIndex + ptCapability->isAbsolute = !(ptReportItem->dataModes & kHIDDataRelative); + + ptCapability->isRange = ptUsageItem->isRange; + if (ptUsageItem->isRange) + { + ptCapability->u.range.usageMin = ptUsageItem->usageMinimum; + ptCapability->u.range.usageMax = ptUsageItem->usageMaximum; + } + else + ptCapability->u.notRange.usage = ptUsageItem->usage; + + // if there really are that many items + if (iU < ptReportItem->stringItemCount) + { + ptCapability->isStringRange = ptStringItem->isRange; + + if (ptStringItem->isRange) + { + ptCapability->u.range.stringMin = ptStringItem->minimum; + ptCapability->u.range.stringMax = ptStringItem->maximum; + } + else + ptCapability->u.notRange.stringIndex = ptStringItem->index; + } + // default, clear it + else + { + ptCapability->isStringRange = false; + ptCapability->u.notRange.stringIndex = 0; + } + + // if there really are that many items + if (iU < ptReportItem->desigItemCount) + { + ptCapability->isDesignatorRange = ptDesignatorItem->isRange; + + if (ptDesignatorItem->isRange) + { + ptCapability->u.range.designatorMin = ptDesignatorItem->minimum; + ptCapability->u.range.designatorMax = ptDesignatorItem->maximum; + } + else + ptCapability->u.notRange.designatorIndex = ptDesignatorItem->index; + } + // default, clear it + else + { + ptCapability->isDesignatorRange = false; + ptCapability->u.notRange.designatorIndex = 0; + } + ptCapability->startBit = startBit; + } + startBit += (ptReportItem->globals.reportSize * ptReportItem->globals.reportCount); + } + } + } + return kHIDSuccess; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDGetButtonCaps - Get the binary values for a report type + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * buttonCaps - ButtonCaps Array + * piButtonCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piButtonCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetButtonCaps(HIDReportType reportType, + HIDButtonCapsPtr buttonCaps, + UInt32 *piButtonCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + return HIDGetSpecificButtonCaps(reportType,0,0,0,buttonCaps, + piButtonCapsLength,preparsedDataRef); +} + + +/* + *------------------------------------------------------------------------------ + * + * HIDGetSpecificButtonCapabilities - Get the binary values for a report type + * This is the same as HIDGetSpecificButtonCaps, + * except that it takes a HIDButtonCapabilitiesPtr + * so it can return units and unitExponents. + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - usage Criteria or zero + * buttonCaps - ButtonCaps Array + * piButtonCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piButtonCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetSpecificButtonCapabilities(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + HIDButtonCapabilitiesPtr buttonCaps, + UInt32 *piButtonCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDCollection *ptParent; + HIDReportItem *ptReportItem; + HIDP_UsageItem *ptUsageItem; + HIDStringItem *ptStringItem; + HIDDesignatorItem *ptDesignatorItem; + HIDP_UsageItem *ptFirstCollectionUsageItem; + HIDButtonCapabilities *ptCapability; + int iR, iU; + int parent; + int iReportItem, iUsageItem; + int iMaxCaps; + // There are 3 versions of HID Parser code all based on the same logic: OS 9 HID Library; + // OSX xnu; OSX IOKitUser. They should all be nearly the same logic. This version (xnu) + // is based on older OS 9 code. This version has added logic to maintain this startBit. + // I don't know why it is here, but believe if it is needed here, it would probably be + // needed in the other two implementations. Didn't have time to determine that at this + // time, so i'll leave this comment to remind me that we should reconcile the 3 versions. + UInt32 startBit; +/* + * Disallow Null Pointers +*/ + if ((buttonCaps == NULL) + || (piButtonCapsLength == NULL) + || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Save the buffer size +*/ + iMaxCaps = *piButtonCapsLength; + *piButtonCapsLength = 0; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; +/* + * Search only reports of the proper type +*/ + if ((ptReportItem->reportType == reportType) + && HIDIsButton(ptReportItem, preparsedDataRef)) + { + startBit = ptReportItem->startBit; +/* + * Search the usages +*/ + for (iU=0; iUusageItemCount; iU++) + { +/* + * Copy all usages if the usage above is zero + * or copy all that are "match" +*/ + iUsageItem = ptReportItem->firstUsageItem + iU; + ptUsageItem = &ptPreparsedData->usageItems[iUsageItem]; + + // ¥¥ we assume there is a 1-1 corresponence between usage items, string items, and designator items + // ¥¥Êthis is not necessarily the case, but its better than nothing + ptStringItem = &ptPreparsedData->stringItems[ptReportItem->firstStringItem + iU]; + ptDesignatorItem = &ptPreparsedData->desigItems[ptReportItem->firstDesigItem + iU]; + + if (HIDUsageInRange(ptUsageItem,usagePage,usage)) + { +/* + * Only copy if there's room +*/ + if (*piButtonCapsLength >= iMaxCaps) + return kHIDBufferTooSmallErr; + ptCapability = &buttonCaps[(*piButtonCapsLength)++]; +/* + * Populate the Capability Structure +*/ + parent = ptReportItem->parent; + ptParent = &ptPreparsedData->collections[parent]; + ptFirstCollectionUsageItem + = &ptPreparsedData->usageItems[ptParent->firstUsageItem]; + ptCapability->collection = parent; + ptCapability->collectionUsagePage = ptParent->usagePage; + ptCapability->collectionUsage = ptFirstCollectionUsageItem->usage; + ptCapability->bitField = ptReportItem->dataModes; + ptCapability->reportID = ptReportItem->globals.reportID; + ptCapability->usagePage = ptUsageItem->usagePage; + ptCapability->unitExponent = ptReportItem->globals.unitExponent; + ptCapability->units = ptReportItem->globals.units; +// ptCapability->reserved = 0; // for future OS 9 expansion + ptCapability->startBit = 0; // init esb added field. +// ptCapability->pbVersion = kHIDCurrentCapabilitiesPBVersion; + ptCapability->pbVersion = 2; + + ptCapability->isStringRange = false; // ¥¥ todo: set this and stringMin,stringMax,stringIndex + ptCapability->isDesignatorRange = false; // ¥¥ todo: set this and designatorMin,designatorMax,designatorIndex + ptCapability->isAbsolute = !(ptReportItem->dataModes & kHIDDataRelative); + + ptCapability->isRange = ptUsageItem->isRange; + if (ptUsageItem->isRange) + { + ptCapability->u.range.usageMin = ptUsageItem->usageMinimum; + ptCapability->u.range.usageMax = ptUsageItem->usageMaximum; + } + else + ptCapability->u.notRange.usage = ptUsageItem->usage; + + // if there really are that many items + if (iU < ptReportItem->stringItemCount) + { + ptCapability->isStringRange = ptStringItem->isRange; + + if (ptStringItem->isRange) + { + ptCapability->u.range.stringMin = ptStringItem->minimum; + ptCapability->u.range.stringMax = ptStringItem->maximum; + } + else + ptCapability->u.notRange.stringIndex = ptStringItem->index; + } + // default, clear it + else + { + ptCapability->isStringRange = false; + ptCapability->u.notRange.stringIndex = 0; + } + + // if there really are that many items + if (iU < ptReportItem->desigItemCount) + { + ptCapability->isDesignatorRange = ptDesignatorItem->isRange; + + if (ptDesignatorItem->isRange) + { + ptCapability->u.range.designatorMin = ptDesignatorItem->minimum; + ptCapability->u.range.designatorMax = ptDesignatorItem->maximum; + } + else + ptCapability->u.notRange.designatorIndex = ptDesignatorItem->index; + } + // default, clear it + else + { + ptCapability->isDesignatorRange = false; + ptCapability->u.notRange.designatorIndex = 0; + } + ptCapability->startBit = startBit; + } + startBit += (ptReportItem->globals.reportSize * ptReportItem->globals.reportCount); + } + } + } + return kHIDSuccess; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDGetButtonCapabilities - Get the binary values for a report type + * This is the same as HIDGetButtonCaps, + * except that it takes a HIDButtonCapabilitiesPtr + * so it can return units and unitExponents. + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * buttonCaps - ButtonCaps Array + * piButtonCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piButtonCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetButtonCapabilities(HIDReportType reportType, + HIDButtonCapabilitiesPtr buttonCaps, + UInt32 *piButtonCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + return HIDGetSpecificButtonCapabilities(reportType,0,0,0,buttonCaps, + piButtonCapsLength,preparsedDataRef); +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtons.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtons.c new file mode 100644 index 000000000..eec929e49 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtons.c @@ -0,0 +1,165 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetButtons.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS [2311349] iteration broken, passing wrong value + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetButtons - Get the state of the buttons for a Page + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * piUsageList - Usages for pressed buttons + * piUsageListLength - Max entries in UsageList + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus +HIDGetButtons (HIDReportType reportType, + UInt32 iCollection, + HIDUsageAndPagePtr ptUsageList, + UInt32 * piUsageListLength, + HIDPreparsedDataRef preparsedDataRef, + void * psReport, + UInt32 iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + int iR, iE; + long iValue; + int iStart; + int iReportItem; + int iMaxUsages; + HIDUsageAndPage tUsageAndPage; + +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (ptUsageList == NULL) + || (piUsageListLength == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Save the UsageList size +*/ + iMaxUsages = *piUsageListLength; + *piUsageListLength = 0; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if ((ptReportItem->reportType == reportType) + && HIDIsButton(ptReportItem, preparsedDataRef)) + { +/* + * Save Arrays and Bitmaps +*/ + iStart = ptReportItem->startBit; + for (iE=0; iEglobals.reportCount; iE++) + { + OSStatus status = noErr; + iValue = 0; + + if ((ptReportItem->dataModes & kHIDDataArrayBit) == kHIDDataArray) + { + status = HIDGetData(psReport, iReportLength, iStart, ptReportItem->globals.reportSize, &iValue, false); + if (!status) + status = HIDPostProcessRIValue (ptReportItem, &iValue); + if (status) return status; + + iStart += ptReportItem->globals.reportSize; + HIDUsageAndPageFromIndex(preparsedDataRef,ptReportItem,ptReportItem->globals.logicalMinimum+iE,&tUsageAndPage); + if (*piUsageListLength >= iMaxUsages) + return kHIDBufferTooSmallErr; + ptUsageList[(*piUsageListLength)++] = tUsageAndPage; + } + else + { + status = HIDGetData(psReport, iReportLength, iStart, 1, &iValue, false); + if (!status) + status = HIDPostProcessRIValue (ptReportItem, &iValue); + if (status) return status; + + iStart++; + if (iValue != 0) + { + HIDUsageAndPageFromIndex(preparsedDataRef,ptReportItem,ptReportItem->globals.logicalMinimum+iE,&tUsageAndPage); + if (*piUsageListLength >= iMaxUsages) + return kHIDBufferTooSmallErr; + ptUsageList[(*piUsageListLength)++] = tUsageAndPage; + } + } + } + } + } + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonsOnPage.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonsOnPage.c new file mode 100644 index 000000000..a18361c64 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonsOnPage.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetButtonsOnPage.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/24/00 KH Complex report descriptors could lead to reporting + kHIDUsageNotFoundErr's as kHIDIncompatibleReportErr's instead. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 5/26/99 BWS We are not checking the usage page for bitmapped buttons! This + caused the Wingman Extreme to get the tilt button on the user + page confused with the first button on the button page. + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetButtonsOnPage - Get the state of the buttons for a Page + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * piUsageList - Usages for pressed buttons + * piUsageListLength - Max entries in UsageList + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetButtonsOnPage(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage *piUsageList, + UInt32 *piUsageListLength, + HIDPreparsedDataRef preparsedDataRef, + void *psReport, + UInt32 iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDUsageAndPage tUsageAndPage; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int iR, iE; + long iValue; + int iStart; + int iMaxUsages; + int iReportItem; + Boolean bIncompatibleReport = false; + Boolean butNotReally = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (piUsageList == NULL) + || (piUsageListLength == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Save the size of the list +*/ + iMaxUsages = *piUsageListLength; + *piUsageListLength = 0; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if (HIDIsButton(ptReportItem, preparsedDataRef)) + { +/* + * This may be the proper data to get + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem, + psReport,iReportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { + butNotReally = true; +/* + * Save Array Buttons +*/ + iStart = ptReportItem->startBit; + for (iE=0; iEglobals.reportCount; iE++) + { + if ((ptReportItem->dataModes & kHIDDataArrayBit) == kHIDDataArray) + { + iStatus = HIDGetData(psReport, iReportLength, iStart, + ptReportItem->globals.reportSize, + &iValue, false); + if (!iStatus) + iStatus = HIDPostProcessRIValue (ptReportItem, &iValue); + HIDUsageAndPageFromIndex(preparsedDataRef, + ptReportItem, + iValue-ptReportItem->globals.logicalMinimum, + &tUsageAndPage); + iStart += ptReportItem->globals.reportSize; + if (usagePage == tUsageAndPage.usagePage) + { + if (*piUsageListLength >= iMaxUsages) + return kHIDBufferTooSmallErr; + piUsageList[(*piUsageListLength)++] = iValue; + } + } +/* + * Save Bitmapped Buttons +*/ + else + { + iStatus = HIDGetData(psReport, iReportLength, iStart, 1, &iValue, false); + if (!iStatus) + iStatus = HIDPostProcessRIValue (ptReportItem, &iValue); + iStart++; + if (!iStatus && iValue != 0) + { + HIDUsageAndPageFromIndex(preparsedDataRef,ptReportItem,iE,&tUsageAndPage); + if (usagePage == tUsageAndPage.usagePage) + { + if (*piUsageListLength >= iMaxUsages) + return kHIDBufferTooSmallErr; + piUsageList[(*piUsageListLength)++] = tUsageAndPage.usage; + } + } + } + } + } + } + } +/* + * If nothing was returned then change the status +*/ + if (*piUsageListLength == 0) + { + // If any of the report items were not the right type, we have set the bIncompatibleReport flag. + // However, if any of the report items really were the correct type, we have done our job of checking + // and really didn't find a usage. Don't let the bIncompatibleReport flag wipe out our valid test. + if (bIncompatibleReport && !butNotReally) + return kHIDIncompatibleReportErr; + return kHIDUsageNotFoundErr; + } + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCaps.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCaps.c new file mode 100644 index 000000000..32c2bb1c2 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCaps.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetCaps.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 4/21/00 KH Added HIDGetCapabilities to be consistant with + HIDGetButtonCapabilities, HIDGetSpecificButtonCapabilities, + HIDGetValueCapabilities, and HIDGetSpecificValueCapabilities. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDP_GetCaps + * + * Input: + * ptPreparsedData - Pre-Parsed Data + * ptCapabilities - Pointer to caller-provided structure + * Output: + * ptCapabilities - Capabilities data + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetCaps(HIDPreparsedDataRef preparsedDataRef, HIDCapsPtr ptCapabilities) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + HIDReportSizes *ptReport; + int iFirstUsage; + int i; +/* + * Disallow Null Pointers +*/ + + if ((ptPreparsedData == NULL) || (ptCapabilities == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Copy the capabilities to the user + * Collection Capabilities +*/ + + ptCollection = &ptPreparsedData->collections[1]; + ptCapabilities->usagePage = ptCollection->usagePage; + iFirstUsage = ptCollection->firstUsageItem; + ptCapabilities->usage = ptPreparsedData->usageItems[iFirstUsage].usage; + ptCapabilities->numberCollectionNodes = ptPreparsedData->collectionCount; +/* + * Report Capabilities Summary +*/ + + ptCapabilities->inputReportByteLength = 0; + ptCapabilities->outputReportByteLength = 0; + ptCapabilities->featureReportByteLength = 0; + for (i=0; ireportCount; i++) + { + ptReport = &ptPreparsedData->reports[i]; + if (ptCapabilities->inputReportByteLength < ptReport->inputBitCount) + ptCapabilities->inputReportByteLength = ptReport->inputBitCount; + if (ptCapabilities->outputReportByteLength < ptReport->outputBitCount) + ptCapabilities->outputReportByteLength = ptReport->outputBitCount; + if (ptCapabilities->featureReportByteLength < ptReport->featureBitCount) + ptCapabilities->featureReportByteLength = ptReport->featureBitCount; + } + ptCapabilities->inputReportByteLength = (ptCapabilities->inputReportByteLength + 7) /8; + ptCapabilities->outputReportByteLength = (ptCapabilities->outputReportByteLength + 7)/8; + ptCapabilities->featureReportByteLength = (ptCapabilities->featureReportByteLength + 7)/8; +/* + * Sum the capabilities types +*/ + + ptCapabilities->numberInputButtonCaps = 0; + ptCapabilities->numberInputValueCaps = 0; + ptCapabilities->numberOutputButtonCaps = 0; + ptCapabilities->numberOutputValueCaps = 0; + ptCapabilities->numberFeatureButtonCaps = 0; + ptCapabilities->numberFeatureValueCaps = 0; + for (i=0; ireportItemCount; i++) + { + ptReportItem = &ptPreparsedData->reportItems[i]; + switch (ptReportItem->reportType) + { + case kHIDInputReport: + if (HIDIsButton(ptReportItem, preparsedDataRef)) + ptCapabilities->numberInputButtonCaps += ptReportItem->usageItemCount; + else if (HIDIsVariable(ptReportItem, preparsedDataRef)) + ptCapabilities->numberInputValueCaps += ptReportItem->usageItemCount; + break; + case kHIDOutputReport: + if (HIDIsButton(ptReportItem, preparsedDataRef)) + ptCapabilities->numberOutputButtonCaps += ptReportItem->usageItemCount; + else if (HIDIsVariable(ptReportItem, preparsedDataRef)) + ptCapabilities->numberOutputValueCaps += ptReportItem->usageItemCount; + break; + case kHIDFeatureReport: + if (HIDIsButton(ptReportItem, preparsedDataRef)) + ptCapabilities->numberFeatureButtonCaps += ptReportItem->usageItemCount; + else if (HIDIsVariable(ptReportItem, preparsedDataRef)) + ptCapabilities->numberFeatureValueCaps += ptReportItem->usageItemCount; + break; + } + } + return kHIDSuccess; +} + + +/* + *------------------------------------------------------------------------------ + * + * HIDGetCapabilities This is exactly the same as HIDGetCaps. It does take a + * HIDCapabiitiesPtr instead of a HIDCapsPtr, but the structures + * of each are exactly the same. The only reason this call + * exists seperately is for uniformity of naming with + * HIDGetValueCapabilities, HIDGetSpecificButtonCapabilities, etc. + * + * Input: + * ptPreparsedData - Pre-Parsed Data + * ptCapabilities - Pointer to caller-provided structure + * Output: + * ptCapabilities - Capabilities data + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetCapabilities(HIDPreparsedDataRef preparsedDataRef, HIDCapabilitiesPtr ptCapabilities) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + HIDReportSizes *ptReport; + int iFirstUsage; + int i; +/* + * Disallow Null Pointers +*/ + + if ((ptPreparsedData == NULL) || (ptCapabilities == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Copy the capabilities to the user + * Collection Capabilities +*/ + + ptCollection = &ptPreparsedData->collections[1]; + ptCapabilities->usagePage = ptCollection->usagePage; + iFirstUsage = ptCollection->firstUsageItem; + ptCapabilities->usage = ptPreparsedData->usageItems[iFirstUsage].usage; + ptCapabilities->numberCollectionNodes = ptPreparsedData->collectionCount; +/* + * Report Capabilities Summary +*/ + + ptCapabilities->inputReportByteLength = 0; + ptCapabilities->outputReportByteLength = 0; + ptCapabilities->featureReportByteLength = 0; + for (i=0; ireportCount; i++) + { + ptReport = &ptPreparsedData->reports[i]; + if (ptCapabilities->inputReportByteLength < ptReport->inputBitCount) + ptCapabilities->inputReportByteLength = ptReport->inputBitCount; + if (ptCapabilities->outputReportByteLength < ptReport->outputBitCount) + ptCapabilities->outputReportByteLength = ptReport->outputBitCount; + if (ptCapabilities->featureReportByteLength < ptReport->featureBitCount) + ptCapabilities->featureReportByteLength = ptReport->featureBitCount; + } + ptCapabilities->inputReportByteLength = (ptCapabilities->inputReportByteLength + 7) /8; + ptCapabilities->outputReportByteLength = (ptCapabilities->outputReportByteLength + 7)/8; + ptCapabilities->featureReportByteLength = (ptCapabilities->featureReportByteLength + 7)/8; +/* + * Sum the capabilities types +*/ + + ptCapabilities->numberInputButtonCaps = 0; + ptCapabilities->numberInputValueCaps = 0; + ptCapabilities->numberOutputButtonCaps = 0; + ptCapabilities->numberOutputValueCaps = 0; + ptCapabilities->numberFeatureButtonCaps = 0; + ptCapabilities->numberFeatureValueCaps = 0; + for (i=0; ireportItemCount; i++) + { + ptReportItem = &ptPreparsedData->reportItems[i]; + switch (ptReportItem->reportType) + { + case kHIDInputReport: + if (HIDIsButton(ptReportItem, preparsedDataRef)) + ptCapabilities->numberInputButtonCaps += ptReportItem->usageItemCount; + else if (HIDIsVariable(ptReportItem, preparsedDataRef)) + ptCapabilities->numberInputValueCaps += ptReportItem->usageItemCount; + break; + case kHIDOutputReport: + if (HIDIsButton(ptReportItem, preparsedDataRef)) + ptCapabilities->numberOutputButtonCaps += ptReportItem->usageItemCount; + else if (HIDIsVariable(ptReportItem, preparsedDataRef)) + ptCapabilities->numberOutputValueCaps += ptReportItem->usageItemCount; + break; + case kHIDFeatureReport: + if (HIDIsButton(ptReportItem, preparsedDataRef)) + ptCapabilities->numberFeatureButtonCaps += ptReportItem->usageItemCount; + else if (HIDIsVariable(ptReportItem, preparsedDataRef)) + ptCapabilities->numberFeatureValueCaps += ptReportItem->usageItemCount; + break; + } + } + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCollectionNodes.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCollectionNodes.c new file mode 100644 index 000000000..f824c7247 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCollectionNodes.c @@ -0,0 +1,121 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetCollectionNodes.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetCollectionNodes - Get the Collections Database + * + * Input: + * ptLinkCollectionNodes - Node Array provided by caller + * piLinkCollectionNodesLength - Maximum Nodes + * Output: + * piLinkCollectionNodesLength - Actual number of Nodes + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * HidP_NotEnoughRoom - More Nodes than space for them + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetCollectionNodes(HIDCollectionNodePtr ptLinkCollectionNodes, + UInt32 *piLinkCollectionNodesLength, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollectionNodePtr ptLink; + HIDCollection *ptCollection; + HIDP_UsageItem *ptFirstUsageItem; + int iMaxNodes; + int collectionCount; + int firstUsageItem; + int i; +/* + * Disallow Null Pointers +*/ + if ((ptLinkCollectionNodes == NULL) + || (piLinkCollectionNodesLength == NULL) + || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Remember the size of the output array +*/ + iMaxNodes = *piLinkCollectionNodesLength; + collectionCount = ptPreparsedData->collectionCount; + *piLinkCollectionNodesLength = collectionCount; +/* + * Report if there's not enough room +*/ + if (collectionCount > iMaxNodes) + return kHIDBufferTooSmallErr; +/* + * Copy the nodes +*/ + for (i=0; icollections[i]; + ptLink = &ptLinkCollectionNodes[i]; + firstUsageItem = ptCollection->firstUsageItem; + ptFirstUsageItem = &ptPreparsedData->usageItems[firstUsageItem]; + ptLink->collectionUsage = ptFirstUsageItem->usage; + ptLink->collectionUsagePage = ptCollection->usagePage; + ptLink->parent = ptCollection->parent; + ptLink->numberOfChildren = ptCollection->children; + ptLink->nextSibling = ptCollection->nextSibling; + ptLink->firstChild = ptCollection->firstChild; + } +/* + * Report if there wasn't enough space +*/ + if (iMaxNodes < ptPreparsedData->collectionCount) + return kHIDBufferTooSmallErr; + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetData.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetData.c new file mode 100644 index 000000000..8f373b906 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetData.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetData.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 12/12/00 KH Correct cast of void * + 3/5/99 BWS [2311353] HIDGetData not masking properly, so not work at all + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetData - Get a single data item from a report + * + * Input: + * psReport - The report + * iReportLength - The length of the report + * iStart - Start Bit in report + * iSize - Number of Bits + * piValue - The place to write the data + * bSignExtend - Sign extend? + * Output: + * piValue - The data + * Returns: + * kHidP_Success - Success + * kHidP_NullPointer - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetData(void * report, UInt32 iReportLength, + UInt32 iStart, UInt32 iSize, SInt32 *piValue, + Boolean bSignExtend) +{ + Byte * psReport = (Byte *)report; + unsigned data; + unsigned iSignBit; + unsigned iExtendMask; + unsigned iStartByte = iStart/8; + unsigned startBit = iStart&7; + unsigned iLastBit = iStart + iSize - 1; + unsigned iLastByte = iLastBit/8; + int iCurrentByte; // needs to be signed, we terminate loop on -1 + unsigned iMask; + + // Check the parameters + if ((iSize == 0) || (iLastByte >= iReportLength) || (iLastByte < iStartByte)) + return kHIDBadParameterErr; + + // Pick up the data bytes backwards + data = 0; + for (iCurrentByte = iLastByte; iCurrentByte >= (int) iStartByte; iCurrentByte--) + { + data <<= 8; + + iMask = 0xff; // 1111 1111 initial mask + // if this is the 'last byte', then we need to mask off the top part of the byte + // to find the mask, we: find the position in this byte (lastBit % 8) + // then shift one to the left that many times plus one (to get one bit further) + // then subtract 1 to get all ones starting from the lastBit to the least signif bit + // ex: if iLastBit is 9, or iLastBit is 15, then we get: + // 1 7 (x % 8) + // 0000 0100 1 0000 0000 (1 << (x + 1)) + // 0000 0011 0 1111 1111 (x - 1) + if (iCurrentByte == iLastByte) + iMask = ((1 << (((unsigned) iLastBit % 8) + 1)) - 1); + + data |= (unsigned) psReport[iCurrentByte] & iMask; + } + + // Shift to the right to byte align the least significant bit + data >>= startBit; + + // Sign extend the report item + if (bSignExtend) + { + iSignBit = 1; + if (iSize > 1) + iSignBit <<= (iSize-1); + iExtendMask = (iSignBit << 1) - 1; + if ((data & iSignBit)==0) + data &= iExtendMask; + else + data |= ~iExtendMask; + } + + // Return the value + *piValue = (SInt32) data; + + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextButtonInfo.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextButtonInfo.c new file mode 100644 index 000000000..26adbec86 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextButtonInfo.c @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetNextButtonInfo.c + + Contains: HIDGetNextButtonInfo call for HID Library + + Version: 1.0d1 + + Copyright: © 2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: David Ferguson + + Other Contact: Keithen Hayenga + + Technology: technologies, usb + + Writers: + + (KH) Keithen Hayenga + + Change History (most recent first): + + 2/14/00 KH first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetNextButtonInfo - Get report id and collection for a button. In keeping + * with USBGetNextInterface, we find the usage in the + * next collection, so that you can find usages that + * have the same usage and usage page. + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * usage - The usage to get the information for + * collection - Starting Collection Criteria or zero + * preparsedDataRef - Pre-Parsed Data + * Output: + * collection - Final Collection Criteria or no change + * reportID - Report ID or no change + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetNextButtonInfo + (HIDReportType reportType, + HIDUsage usagePage, + HIDUsage usage, + UInt32 * collection, + UInt8 * reportID, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr)preparsedDataRef; + HIDReportItem *ptReportItem; + UInt32 iCollection; + UInt32 newCollection = 0xFFFFFFFF; + int iR; + UInt8 newReportID = 0; + OSStatus iStatus = kHIDUsageNotFoundErr; + + //Disallow Null Pointers + + if ((ptPreparsedData == NULL) || (collection == NULL) || (reportID == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; + + // The Collection must be in range + + iCollection = *collection; + // Umm... an unsigned number can never be less than 0! + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; + + // HIDGetNextButtonInfo is different from HIDGetButton in how it treats + // the collection parameter. HIDGetButton will only look at report items that + // are within the collection and can therefore limit it's searches to starting at + // ptPreparsedData->collections[iCollection]->firstReportItem and only check + // ptPreparsedData->collections[iCollection]->reportItemCount. Since we want to + // find the NEXT collection as well, we need to cycle through all of the reports. + + for (iR = 0; iR < ptPreparsedData->reportItemCount; iR++) + { + SInt32 minUsage; + SInt32 maxUsage; + HIDP_UsageItem thisUsage; + + ptReportItem = &ptPreparsedData->reportItems[iR]; + + thisUsage = ptPreparsedData->usageItems[ptReportItem->firstUsageItem]; + + if (thisUsage.isRange) + { + minUsage = thisUsage.usageMinimum; + maxUsage = thisUsage.usageMaximum; + } + else + { + minUsage = thisUsage.usage; + maxUsage = thisUsage.usage; + } + + if (ptReportItem->reportType == reportType && + (usagePage == 0 || ptReportItem->globals.usagePage == usagePage) && + (usage >= minUsage && usage <= maxUsage) && + ptReportItem->parent > iCollection && + HIDIsButton(ptReportItem, preparsedDataRef)) + { + if (ptReportItem->parent < newCollection) + { + newCollection = ptReportItem->parent; + newReportID = iR; + iStatus = noErr; + } + } + } + + if (iStatus == noErr) + { + *reportID = newReportID; + *collection = newCollection; + } + + return iStatus; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextUsageValueInfo.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextUsageValueInfo.c new file mode 100644 index 000000000..7393d967d --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextUsageValueInfo.c @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetNextUsageValueInfo.c + + Contains: HIDGetNextUsageValueInfo call for HID Library + + Version: 1.0d1 + + Copyright: © 2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: David Ferguson + + Other Contact: Keithen Hayenga + + Technology: technologies, usb + + Writers: + + (KH) Keithen Hayenga + + Change History (most recent first): + + 2/14/00 KH first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetNextUsageValueInfo - Get report id and collection for a usage. In keeping + * with USBGetNextInterface, we find the usage in the + * next collection, so that you can find usages that + * have the same usage and usage page. + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * usage - The usage to get the information for + * collection - Starting Collection Criteria or zero + * preparsedDataRef - Pre-Parsed Data + * Output: + * collection - Final Collection Criteria or no change + * reportID - Report ID or no change + * Returns: + * kHIDBadParameterErr when there are no more collections to search. + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetNextUsageValueInfo + (HIDReportType reportType, + HIDUsage usagePage, + HIDUsage usage, + UInt32 * collection, + UInt8 * reportID, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr)preparsedDataRef; + HIDReportItem *ptReportItem; + UInt32 iCollection; + UInt32 newCollection = 0xFFFFFFFF; + int iR; + UInt8 newReportID = 0; + OSStatus iStatus = kHIDUsageNotFoundErr; + + //Disallow Null Pointers + + if ((ptPreparsedData == NULL) || (collection == NULL) || (reportID == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; + + // The Collection must be in range + + iCollection = *collection; + // Umm... an unsigned number can never be less than 0! + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; + + // HIDGetNextUsageValueInfo is different from HIDGetUsageValue in how it treats + // the collection parameter. HIDGetUsageValue will only look at report items that + // are within the collection and can therefore limit it's searches to starting at + // ptPreparsedData->collections[iCollection]->firstReportItem and only check + // ptPreparsedData->collections[iCollection]->reportItemCount. Since we want to + // find the NEXT collection as well, we need to cycle through all of the reports. + + for (iR = 0; iR < ptPreparsedData->reportItemCount; iR++) + { + SInt32 minUsage; + SInt32 maxUsage; + HIDP_UsageItem thisUsage; + + ptReportItem = &ptPreparsedData->reportItems[iR]; + + thisUsage = ptPreparsedData->usageItems[ptReportItem->firstUsageItem]; + + if (thisUsage.isRange) + { + minUsage = thisUsage.usageMinimum; + maxUsage = thisUsage.usageMaximum; + } + else + { + minUsage = thisUsage.usage; + maxUsage = thisUsage.usage; + } + + if (ptReportItem->reportType == reportType && + (usagePage == 0 || ptReportItem->globals.usagePage == usagePage) && + (usage >= minUsage && usage <= maxUsage) && + ptReportItem->parent > iCollection && + HIDIsVariable(ptReportItem, preparsedDataRef)) + { + if (ptReportItem->parent < newCollection) + { + newCollection = ptReportItem->parent; + newReportID = iR; + iStatus = noErr; + } + } + } + + if (iStatus == noErr) + { + *reportID = newReportID; + *collection = newCollection; + } + + return iStatus; +} + diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetReportLength.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetReportLength.c new file mode 100644 index 000000000..010dcf6df --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetReportLength.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetReportLength.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: David Ferguson + + Other Contact: Keithen Hayenga + + Technology: technologies, usb + + Writers: + + + Change History (most recent first): + +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetReportLength - Get the length of a report + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * reportID - Desired Report + * preparsedDataRef - opaque Pre-Parsed Data + * Output: + * reportLength - The length of the Report + * Returns: + * status kHIDNullPointerErr, kHIDInvalidPreparsedDataErr, + * kHIDUsageNotFoundErr + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetReportLength(HIDReportType reportType, + UInt8 reportID, + ByteCount * reportLength, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr)preparsedDataRef; + ByteCount dataLength = 0; + OSStatus iStatus = kHIDUsageNotFoundErr; + int iR; + + // Disallow Null Pointers. + + if (ptPreparsedData == NULL || reportLength == NULL) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; + + // Go through the Reports. + + for (iR = 0; iR < ptPreparsedData->reportCount; iR++) + { + if (ptPreparsedData->reports[iR].reportID == reportID) + { + switch(reportType) + { + case kHIDInputReport: + dataLength = (ptPreparsedData->reports[iR].inputBitCount + 7)/8; + break; + case kHIDOutputReport: + dataLength = (ptPreparsedData->reports[iR].outputBitCount + 7)/8; + break; + case kHIDFeatureReport: + dataLength = (ptPreparsedData->reports[iR].featureBitCount + 7)/8; + break; + default: + return kHIDInvalidReportTypeErr; + } + break; + } + } + + // If the reportID > 0, there must be 1 byte for reportID, so total report must be > 1. + // (Would come into play if we had input report 3, but searched for ouput report 3 + // that didn't exist.) + + if ((reportID == 0) && (dataLength > 0) || dataLength > 1) + { + iStatus = noErr; + } + else + { + dataLength = 0; // Ignore report that had id, but no data. + } + + *reportLength = dataLength; + + return iStatus; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValue.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValue.c new file mode 100644 index 000000000..888b413d7 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValue.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetUsageValue.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2001 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 1/18/01 KH Fix for complex descriptors only needed for buttons. + 3/24/00 KH Complex report descriptors could lead to reporting + kHIDUsageNotFoundErr's as kHIDIncompatibleReportErr's instead. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetUsageValue - Get the value for a usage + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - The usage to get the value for + * piUsageValue - User-supplied place to put value + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetUsageValue + (HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + SInt32 * piUsageValue, + HIDPreparsedDataRef preparsedDataRef, + void * psReport, + ByteCount iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int iR; + SInt32 iValue; + int iStart; + int iReportItem; + UInt32 iUsageIndex; + Boolean bIncompatibleReport = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (piUsageValue == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if (HIDIsVariable(ptReportItem, preparsedDataRef) + && HIDHasUsage(preparsedDataRef,ptReportItem,usagePage,usage,&iUsageIndex,NULL)) + { +/* + * This may be the proper data to get + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem, + psReport,iReportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { +/* + * Pick up the data +*/ + iStart = ptReportItem->startBit + + (ptReportItem->globals.reportSize * iUsageIndex); + iStatus = HIDGetData(psReport, iReportLength, iStart, + ptReportItem->globals.reportSize, &iValue, + ((ptReportItem->globals.logicalMinimum < 0) + ||(ptReportItem->globals.logicalMaximum < 0))); + if (!iStatus) + iStatus = HIDPostProcessRIValue (ptReportItem, &iValue); + *piUsageValue = iValue; + return iStatus; + } + } + } + if (bIncompatibleReport) + return kHIDIncompatibleReportErr; + return kHIDUsageNotFoundErr; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDGetScaledUsageValue - Get the value for a usage + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - usage Criteria or zero + * piValue - Pointer to usage Value + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetScaledUsageValue(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + SInt32 *piUsageValue, + HIDPreparsedDataRef preparsedDataRef, + void *psReport, + ByteCount iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int iR; + long iValue; + int iStart; + int iReportItem; + UInt32 iUsageIndex; + Boolean bIncompatibleReport = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (piUsageValue == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if (HIDIsVariable(ptReportItem, preparsedDataRef) + && HIDHasUsage(preparsedDataRef,ptReportItem,usagePage,usage,&iUsageIndex,NULL)) + { +/* + * This may be the proper data to get + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem, + psReport,iReportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { +/* + * Pick up the data +*/ + iStart = ptReportItem->startBit + + (ptReportItem->globals.reportSize * iUsageIndex); + iStatus = HIDGetData(psReport, iReportLength, iStart, + ptReportItem->globals.reportSize, &iValue, + ((ptReportItem->globals.logicalMinimum < 0) + ||(ptReportItem->globals.logicalMaximum < 0))); + if (!iStatus) + iStatus = HIDPostProcessRIValue (ptReportItem, &iValue); + if (iStatus != kHIDSuccess) + return iStatus; +/* + * Try to scale the data +*/ + iStatus = HIDScaleUsageValueIn(ptReportItem,iValue,&iValue); + *piUsageValue = iValue; + return iStatus; + } + } + } + if (bIncompatibleReport) + return kHIDIncompatibleReportErr; + return kHIDUsageNotFoundErr; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValueArray.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValueArray.c new file mode 100644 index 000000000..69d925f06 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValueArray.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetUsageValueArray.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2001 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 1/18/01 KH Fix for complex descriptors only needed for buttons. + 3/24/00 KH Complex report descriptors could lead to reporting + kHIDUsageNotFoundErr's as kHIDIncompatibleReportErr's instead. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDP_GetUsageValueArray - Get the values for a usage + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria + * iCollection - Collection Criteria or zero + * usage - usage Criteria + * psBuffer - Pointer to usage Buffer + * iByteLength - Length of usage Buffer + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetUsageValueArray(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + UInt8 *psUsageBuffer, + UInt32 iByteLength, + HIDPreparsedDataRef preparsedDataRef, + void *psReport, + UInt32 iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int i; + int iR; + long iValue; + int iStart; + int iReportItem; + UInt32 iUsageIndex; + UInt32 iCount; + int byteCount; + Boolean bIncompatibleReport = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (psUsageBuffer == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if (HIDIsVariable(ptReportItem, preparsedDataRef) + && HIDHasUsage(preparsedDataRef,ptReportItem,usagePage,usage,&iUsageIndex,&iCount)) + { +/* + * This may be the proper data to get + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem, + psReport,iReportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { +/* + * Disallow single count variables + * Count is set by HasUsage +*/ + if (iCount <= 1) + return kHIDNotValueArrayErr; +/* + * Get the data +*/ + iStart = ptReportItem->startBit + (ptReportItem->globals.reportSize * iUsageIndex); + byteCount = (ptReportItem->globals.reportSize * iCount + 7)/8; + if (byteCount > iByteLength) + byteCount = iByteLength; + for (i=0; iglobals.logicalMinimum < 0) + ||(ptReportItem->globals.logicalMaximum < 0))); + if (!iStatus) + iStatus = HIDPostProcessRIValue (ptReportItem, &iValue); + if (iStatus != kHIDSuccess) + return iStatus; + *psUsageBuffer++ = (char) iValue; + iStart += 8; + } + return kHIDSuccess; + } + } + } + if (bIncompatibleReport) + return kHIDIncompatibleReportErr; + return kHIDUsageNotFoundErr; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetValueCaps.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetValueCaps.c new file mode 100644 index 000000000..1af6f7467 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetValueCaps.c @@ -0,0 +1,543 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDGetValueCaps.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 12/12/00 KH range count off by 1. + 4/21/00 KH Added HIDGetValueCapabilities and + HIDGetSpecificValueCapabilities that now allow users to find HID + report units and exponents. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 5/3/99 BWS Fix typo + 5/3/99 BWS We were not setting isStringRange, isDesignatorRange, and + isAbsolute + 3/7/99 BWS When range/notRange were made a union, we missed this case where + they were both being set indescriminately + 3/7/99 BWS [2311411] Had added missing fields to caps structure, but they + were not being filled in + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDGetSpecificValueCaps - Get the binary values for a report type + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - usage Criteria or zero + * valueCaps - ValueCaps Array + * piValueCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piValueCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetSpecificValueCaps(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + HIDValueCapsPtr valueCaps, + UInt32 *piValueCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDCollection *ptParent; + HIDReportItem *ptReportItem; + HIDP_UsageItem *ptUsageItem; + HIDStringItem *ptStringItem; + HIDDesignatorItem *ptDesignatorItem; + HIDP_UsageItem *ptFirstCollectionUsageItem; + HIDValueCaps *ptCapability; + int iR, iU; + int parent; + int iReportItem, iUsageItem; + int iMaxCaps; + UInt32 iCount; + // There are 3 versions of HID Parser code all based on the same logic: OS 9 HID Library; + // OSX xnu; OSX IOKitUser. They should all be nearly the same logic. This version (xnu) + // is based on older OS 9 code. This version has added logic to maintain this startBit. + // I don't know why it is here, but believe if it is needed here, it would probably be + // needed in the other two implementations. Didn't have time to determine that at this + // time, so i'll leave this comment to remind me that we should reconcile the 3 versions. + UInt32 startBit; // Added esb 9-29-99 + /*If I remember correctly, it was an optimization. Each time you ask for + a specific value capability, it would search through the entire report + descriptor to find it (my recollection is kind of hazy on this part). + The start bit allowed somebody (client maybe) to cache the information + on where in the report a specific value resided and the use that later + when fetching that value. That way, you don't have to keep going + through the parse tree to find where a value exists. I don't remember + if the implementation was completed or if I even used it. -esb */ +/* + * Disallow Null Pointers +*/ + if ((valueCaps == NULL) + || (piValueCapsLength == NULL) + || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Save the buffer size +*/ + iMaxCaps = *piValueCapsLength; + *piValueCapsLength = 0; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; +/* + * Search only reports of the proper type +*/ + if ((ptReportItem->reportType == reportType) + && ((ptReportItem->globals.usagePage == usagePage) + || (usagePage == 0)) + && HIDIsVariable(ptReportItem, preparsedDataRef)) + { + startBit = ptReportItem->startBit; // Added esb 9-28-99 +/* + * Search the usages +*/ + for (iU=0; iUusageItemCount; iU++) + { +/* + * Copy all usages if the usage above is zero + * or copy all that "match" +*/ + iUsageItem = ptReportItem->firstUsageItem + iU; + ptUsageItem = &ptPreparsedData->usageItems[iUsageItem]; + + // ¥¥ we assume there is a 1-1 corresponence between usage items, string items, and designator items + // ¥¥Êthis is not necessarily the case, but its better than nothing + ptStringItem = &ptPreparsedData->stringItems[ptReportItem->firstStringItem + iU]; + ptDesignatorItem = &ptPreparsedData->desigItems[ptReportItem->firstDesigItem + iU]; + + if (HIDUsageInRange(ptUsageItem,usagePage,usage)) + { +/* + * Only copy if there's room +*/ + if (*piValueCapsLength >= iMaxCaps) + return kHIDBufferTooSmallErr; + ptCapability = &valueCaps[(*piValueCapsLength)++]; +/* + * Populate the Capability Structure +*/ + parent = ptReportItem->parent; + ptParent = &ptPreparsedData->collections[parent]; + ptFirstCollectionUsageItem = &ptPreparsedData->usageItems[ptParent->firstUsageItem]; + ptCapability->collection = parent; + ptCapability->collectionUsagePage = ptParent->usagePage; + ptCapability->collectionUsage = ptFirstCollectionUsageItem->usage; + ptCapability->bitField = ptReportItem->dataModes; + ptCapability->reportID = ptReportItem->globals.reportID; + ptCapability->usagePage = ptUsageItem->usagePage; + + ptCapability->isAbsolute = !(ptReportItem->dataModes & kHIDDataRelative); + + ptCapability->isRange = ptUsageItem->isRange; + if (ptUsageItem->isRange) + { + ptCapability->u.range.usageMin = ptUsageItem->usageMinimum; + ptCapability->u.range.usageMax = ptUsageItem->usageMaximum; + } + else + ptCapability->u.notRange.usage = ptUsageItem->usage; + + // if there really are that many items + if (iU < ptReportItem->stringItemCount) + { + ptCapability->isStringRange = ptStringItem->isRange; + + if (ptStringItem->isRange) + { + ptCapability->u.range.stringMin = ptStringItem->minimum; + ptCapability->u.range.stringMax = ptStringItem->maximum; + } + else + ptCapability->u.notRange.stringIndex = ptStringItem->index; + } + // default, clear it + else + { + ptCapability->isStringRange = false; + ptCapability->u.notRange.stringIndex = 0; + } + + // if there really are that many items + if (iU < ptReportItem->desigItemCount) + { + ptCapability->isDesignatorRange = ptDesignatorItem->isRange; + + if (ptDesignatorItem->isRange) + { + ptCapability->u.range.designatorMin = ptDesignatorItem->minimum; + ptCapability->u.range.designatorMax = ptDesignatorItem->maximum; + } + else + ptCapability->u.notRange.designatorIndex = ptDesignatorItem->index; + } + // default, clear it + else + { + ptCapability->isDesignatorRange = false; + ptCapability->u.notRange.designatorIndex = 0; + } + + ptCapability->bitSize = ptReportItem->globals.reportSize; + + ptCapability->logicalMin = ptReportItem->globals.logicalMinimum; + ptCapability->logicalMax = ptReportItem->globals.logicalMaximum; + ptCapability->physicalMin = ptReportItem->globals.physicalMinimum; + ptCapability->physicalMax = ptReportItem->globals.physicalMaximum; + + if (ptUsageItem->isRange) + { + iCount = ptUsageItem->usageMaximum - ptUsageItem->usageMinimum; + if (iCount < 0) + iCount = -iCount; + iCount++; // Range count was off by one. + } + else + // If we're not in a range, then there should be just one usage. + // Why do we have to call this function to determine that? Are we checking + // that there is that usage before we decide if usage count is 0 or 1? + // But haven't we already verified that we have this usage by the time we + // got here? + HIDHasUsage(preparsedDataRef,ptReportItem, + ptUsageItem->usagePage,ptUsageItem->usage, + NULL,&iCount); + ptCapability->reportCount = iCount; + ptCapability->startBit = startBit; + startBit += (ptCapability->bitSize * ptCapability->reportCount); + } + } + } + } + return kHIDSuccess; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDGetValueCaps - Get the binary values for a report type + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * valueCaps - ValueCaps Array + * piValueCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piValueCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetValueCaps(HIDReportType reportType, + HIDValueCapsPtr valueCaps, + UInt32 *piValueCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + return HIDGetSpecificValueCaps(reportType,0,0,0,valueCaps, + piValueCapsLength,preparsedDataRef); +} + + +/* + *------------------------------------------------------------------------------ + * + * HIDGetSpecificValueCapabilities - Get the binary values for a report type + * This is the same as HIDGetSpecificValueCaps, + * except that it takes a HIDValueCapabilitiesPtr + * so it can return units and unitExponents. + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - usage Criteria or zero + * valueCaps - ValueCaps Array + * piValueCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piValueCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetSpecificValueCapabilities(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + HIDValueCapabilitiesPtr valueCaps, + UInt32 *piValueCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDCollection *ptParent; + HIDReportItem *ptReportItem; + HIDP_UsageItem *ptUsageItem; + HIDStringItem *ptStringItem; + HIDDesignatorItem *ptDesignatorItem; + HIDP_UsageItem *ptFirstCollectionUsageItem; + HIDValueCapabilities *ptCapability; + int iR, iU; + int parent; + int iReportItem, iUsageItem; + int iMaxCaps; + UInt32 iCount; + // There are 3 versions of HID Parser code all based on the same logic: OS 9 HID Library; + // OSX xnu; OSX IOKitUser. They should all be nearly the same logic. This version (xnu) + // is based on older OS 9 code. This version has added logic to maintain this startBit. + // I don't know why it is here, but believe if it is needed here, it would probably be + // needed in the other two implementations. Didn't have time to determine that at this + // time, so i'll leave this comment to remind me that we should reconcile the 3 versions. + UInt32 startBit; // Carried esb's logic down here when we added HIDGetSpecificValueCapabilities(). +/* + * Disallow Null Pointers +*/ + if ((valueCaps == NULL) + || (piValueCapsLength == NULL) + || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Save the buffer size +*/ + iMaxCaps = *piValueCapsLength; + *piValueCapsLength = 0; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; +/* + * Search only reports of the proper type +*/ + if ((ptReportItem->reportType == reportType) + && ((ptReportItem->globals.usagePage == usagePage) + || (usagePage == 0)) + && HIDIsVariable(ptReportItem, preparsedDataRef)) + { + startBit = ptReportItem->startBit; // Same logic as Added esb 9-28-99 +/* + * Search the usages +*/ + for (iU=0; iUusageItemCount; iU++) + { +/* + * Copy all usages if the usage above is zero + * or copy all that "match" +*/ + iUsageItem = ptReportItem->firstUsageItem + iU; + ptUsageItem = &ptPreparsedData->usageItems[iUsageItem]; + + // ¥¥ we assume there is a 1-1 corresponence between usage items, string items, and designator items + // ¥¥Êthis is not necessarily the case, but its better than nothing + ptStringItem = &ptPreparsedData->stringItems[ptReportItem->firstStringItem + iU]; + ptDesignatorItem = &ptPreparsedData->desigItems[ptReportItem->firstDesigItem + iU]; + + if (HIDUsageInRange(ptUsageItem,usagePage,usage)) + { +/* + * Only copy if there's room +*/ + if (*piValueCapsLength >= iMaxCaps) + return kHIDBufferTooSmallErr; + ptCapability = &valueCaps[(*piValueCapsLength)++]; + +/* + * Populate the Capability Structure +*/ + parent = ptReportItem->parent; + ptParent = &ptPreparsedData->collections[parent]; + ptFirstCollectionUsageItem = &ptPreparsedData->usageItems[ptParent->firstUsageItem]; + ptCapability->collection = parent; + ptCapability->collectionUsagePage = ptParent->usagePage; + ptCapability->collectionUsage = ptFirstCollectionUsageItem->usage; + ptCapability->bitField = ptReportItem->dataModes; + ptCapability->reportID = ptReportItem->globals.reportID; + ptCapability->usagePage = ptUsageItem->usagePage; + ptCapability->unitExponent = ptReportItem->globals.unitExponent; + ptCapability->units = ptReportItem->globals.units; +// ptCapability->reserved = 0; // for future OS 9 expansion + ptCapability->startBit = 0; // init esb added field. +// ptCapability->pbVersion = kHIDCurrentCapabilitiesPBVersion; + ptCapability->pbVersion = 2; + + ptCapability->isAbsolute = !(ptReportItem->dataModes & kHIDDataRelative); + + ptCapability->isRange = ptUsageItem->isRange; + if (ptUsageItem->isRange) + { + ptCapability->u.range.usageMin = ptUsageItem->usageMinimum; + ptCapability->u.range.usageMax = ptUsageItem->usageMaximum; + } + else + ptCapability->u.notRange.usage = ptUsageItem->usage; + + // if there really are that many items + if (iU < ptReportItem->stringItemCount) + { + ptCapability->isStringRange = ptStringItem->isRange; + + if (ptStringItem->isRange) + { + ptCapability->u.range.stringMin = ptStringItem->minimum; + ptCapability->u.range.stringMax = ptStringItem->maximum; + } + else + ptCapability->u.notRange.stringIndex = ptStringItem->index; + } + // default, clear it + else + { + ptCapability->isStringRange = false; + ptCapability->u.notRange.stringIndex = 0; + } + + // if there really are that many items + if (iU < ptReportItem->desigItemCount) + { + ptCapability->isDesignatorRange = ptDesignatorItem->isRange; + + if (ptDesignatorItem->isRange) + { + ptCapability->u.range.designatorMin = ptDesignatorItem->minimum; + ptCapability->u.range.designatorMax = ptDesignatorItem->maximum; + } + else + ptCapability->u.notRange.designatorIndex = ptDesignatorItem->index; + } + // default, clear it + else + { + ptCapability->isDesignatorRange = false; + ptCapability->u.notRange.designatorIndex = 0; + } + + ptCapability->bitSize = ptReportItem->globals.reportSize; + + ptCapability->logicalMin = ptReportItem->globals.logicalMinimum; + ptCapability->logicalMax = ptReportItem->globals.logicalMaximum; + ptCapability->physicalMin = ptReportItem->globals.physicalMinimum; + ptCapability->physicalMax = ptReportItem->globals.physicalMaximum; + + if (ptUsageItem->isRange) + { + iCount = ptUsageItem->usageMaximum - ptUsageItem->usageMinimum; + if (iCount < 0) + iCount = -iCount; + iCount++; // Range count was off by one. + } + else + HIDHasUsage(preparsedDataRef,ptReportItem, + ptUsageItem->usagePage,ptUsageItem->usage, + NULL,&iCount); + ptCapability->reportCount = iCount; + ptCapability->startBit = startBit; // more of same logic. + startBit += (ptCapability->bitSize * ptCapability->reportCount); + } + } + } + } + return kHIDSuccess; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDGetValueCapabilities - Get the binary values for a report type + * This is the same as HIDGetValueCaps, + * except that it takes a HIDValueCapabilitiesPtr + * so it can return units and unitExponents. + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * valueCaps - ValueCaps Array + * piValueCapsLength - Maximum Entries + * ptPreparsedData - Pre-Parsed Data + * Output: + * piValueCapsLength - Entries Populated + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDGetValueCapabilities(HIDReportType reportType, + HIDValueCapabilitiesPtr valueCaps, + UInt32 *piValueCapsLength, + HIDPreparsedDataRef preparsedDataRef) +{ + return HIDGetSpecificValueCapabilities(reportType,0,0,0,valueCaps, + piValueCapsLength,preparsedDataRef); +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDHasUsage.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDHasUsage.c new file mode 100644 index 000000000..97a48ab14 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDHasUsage.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDHasUsage.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 12/12/00 KH range count off by 1. + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HidP_UsageFromIndex + * + * Input: + * ptPreparsedData - The Preparsed Data + * ptReportItem - The Report Item + * usagePage - The usage Page to find + * usage - The usage to find + * piIndex(optional) - The usage Index pointer (Can be used to tell + * which bits in an array correspond to that usage.) + * piCount(optional) - The usage Count pointer (Can be used to tell + * how many items will be in a report.) + * Output: + * piIndex - The usage Index + * Returns: + * The usage + * + *------------------------------------------------------------------------------ +*/ +Boolean HIDHasUsage (HIDPreparsedDataRef preparsedDataRef, + HIDReportItem *ptReportItem, + HIDUsage usagePage, HIDUsage usage, + UInt32 *piIndex, UInt32 *piCount) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + int iUsageItem; + UInt32 iUsageIndex; + int iUsages; + int i; + UInt32 iCountsLeft; + HIDP_UsageItem *ptUsageItem; + Boolean bOnPage; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (ptReportItem == NULL)) + return 0; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return 0; +/* + * Look through the usage Items for this usage +*/ + iUsageItem = ptReportItem->firstUsageItem; + iUsageIndex = 0; + for (i=0; iusageItemCount; i++) + { +/* + * Each usage Item is either a usage or a usage range +*/ + ptUsageItem = &ptPreparsedData->usageItems[iUsageItem++]; + bOnPage = ((usagePage == 0) || (usagePage == ptUsageItem->usagePage)); + if (ptUsageItem->isRange) + { +/* + * For usage Ranges + * If the index is in the range + * then return the usage + * Otherwise adjust the index by the size of the range +*/ + if ((usage >= ptUsageItem->usageMinimum) + && (usage <= ptUsageItem->usageMaximum)) + { + if (piIndex != NULL) + *piIndex = iUsageIndex + (ptUsageItem->usageMinimum - usage); +/* + * If this usage is the last one for this ReportItem + * then it gets all of the remaining reportCount +*/ + if (piCount != NULL) + { + // piCount is going to be used to find which element in a button array is + // the one that returns the value for that usage. + if (((i+1) == ptReportItem->usageItemCount) + && (usage == ptUsageItem->usageMaximum)) + { + // Hmm, the same logic in the non-range case below was wrong. But things + // seem to be working for finding buttons, so i am not changing it here. + // However, we have made some changes to range calculations that may no + // longer require that -1 here either. Heads up! + iCountsLeft = ptReportItem->globals.reportCount - iUsageIndex - 1; + if (iCountsLeft > 1) + *piCount = iCountsLeft; + else + *piCount = 1; + } + else + *piCount = 1; + } + if (bOnPage) + return true; + } + iUsages = ptUsageItem->usageMaximum - ptUsageItem->usageMinimum; + if (iUsages < 0) + iUsages = -iUsages; + iUsages++; // Add off by one adjustment AFTER sign correction. + iUsageIndex += iUsages; + } + else + { +/* + * For Usages + * If the index is zero + * then return this usage + * Otherwise one less to index through +*/ + if (usage == ptUsageItem->usage) + { + if (piIndex != NULL) + *piIndex = iUsageIndex; + if (piCount != NULL) + { + if ((i+1) == ptReportItem->usageItemCount) + { + // Keithen does not understand the logic of iCountsLeft. + // In Radar #2579612 we come through here for HIDGetUsageValueArray + // and HIDGetSpecificValueCaps. In both cases piCount that is returned + // should be the reportCount without the -1. +// iCountsLeft = ptReportItem->globals.reportCount - iUsageIndex - 1; + iCountsLeft = ptReportItem->globals.reportCount - iUsageIndex; + if (iCountsLeft > 1) + *piCount = iCountsLeft; + else + *piCount = 1; + } + else + *piCount = 1; + } + if (bOnPage) + return true; + } + iUsageIndex++; + } + } + return false; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDInitReport.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDInitReport.c new file mode 100644 index 000000000..54fdda491 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDInitReport.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDInitReport.c + + Contains: HIDInitReport call for HID Library + + Version: 1.0d1 + + Copyright: © 2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: David Ferguson + + Other Contact: Keithen Hayenga + + Technology: technologies, usb + + Writers: + + + Change History (most recent first): + +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDInitReport - Initialize report to have report ID and, if possible, null values + * so that setting any one value will not inadvertantly change + * other items in the same report to 0. + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * reportID - Report ID + * preparsedDataRef - Pre-Parsed Data + * report - An HID Report + * reportLength - The length of the Report + * Output: + * report - Initialized HID Report + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDInitReport + (HIDReportType reportType, + UInt8 reportID, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDReportItem * ptReportItem; + ByteCount minLength; + UInt8 * iPtr; + int iR; + OSStatus iStatus = kHIDSuccess; + + //Disallow Null Pointers + + if ((ptPreparsedData == NULL) || (report == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; + if (reportLength == 0) + return kHIDReportSizeZeroErr; + + // Report length must also be great enough to hold report. + HIDGetReportLength(reportType, reportID, &minLength, preparsedDataRef); + // I know that HIDGetReportLength repeats the first tests above, but it + // was easier to duplicate that logic than build test cases for the other + // errors that could be returned by HIDGetReportLength that i don't care + // about. + + if (reportLength < minLength) + return kHIDInvalidReportLengthErr; + + // First byte of report must be reportID. Unless it is report ID 0; + // in which case 0 is just the first byte of the following initialization. + + iPtr = (UInt8 *)report; + *iPtr++ = reportID; + + // Default initialization is to zero out all values. + + for (iR = 1; iR < reportLength; iR++) + { + *iPtr++ = 0; + } + + // Search through all report items to see if they belong in this report. + + for (iR = 0; iR < ptPreparsedData->reportItemCount; iR++) + { + ptReportItem = &ptPreparsedData->reportItems[iR]; + + if (ptReportItem->reportType == reportType && + ptReportItem->globals.reportID == reportID) + { + // Is there a null value for this item? + SInt32 nullValue; + SInt32 bitwiseMax; + SInt32 bitwiseMin; + SInt32 bitSize; + Boolean isSigned; + + // The HID spec "highly encourages" 0 to be a null value, so test + // for it first. + + if ( 0 < ptReportItem->globals.logicalMinimum || + 0 > ptReportItem->globals.logicalMaximum) + continue; // Default initialization was good enough. + + nullValue = 0; // We can test if this changes below. + + // Determine the maximum and minimum signed numbers that will fit into this + // item and then see if they are outside the bounds of what the descriptor + // says are the allowed min and max. + // What the possible ranges are depends upon if the device is accepting + // signed or unsigned numbers. I haven't noticed that information in the + // preparsed data, so i'll take an educated guess. If logicalMinimum is + // less than 0 it must be using signed numbers. Conversly, logicalMaximum + // using the high order bit of it's bitfield would indicate unsigned. In + // case of a tie, we'll say signed since that agrees with the SInt32 that + // logicalMinimum and logicalMaximum are stored in. + + // The mininimum 8 bit value would be 0x80 (-128). To be -128 in UInt32 = 0xFFFFFF80. + // This just happens to also set the high order bit that we need to test in the + // maximum value using the high order bit, such as 64, 0x80. + bitSize = ptReportItem->globals.reportSize; + bitwiseMin = -1 << (bitSize - 1); + + // Logical max should not have any bit set higher than the high order bit of our + // size, so anding with 0xFFFFFF80 should only test field's high order bit. + isSigned = (ptReportItem->globals.logicalMinimum < 0) || + !(ptReportItem->globals.logicalMaximum & bitwiseMin); + + // If signed, we test from 0x80 to 0x7F. If not, 0x00 to 0xFF. + if (isSigned) + { + --bitSize; // Don't let max value flow into sign bit. + } + else + { + bitwiseMin = 0; + } + + // Our compare uses SInt32, so even for unsigned values, we can't let them + // overflow into real sign bit. (So 0x80000000 is not a legal HID positive number.) + if (bitSize >= 32) bitSize = 31; + + // The theory behind this greatly simplified set of compares. 1. I was worried about + // the case of a 4 bit field with a max = 4 and a min = -2. Then if i chose a value + // of 7 for my bitwise max, it could also be -1 for min, which would make it a null + // positive value, but a legal negative one. But while HID specs say a field can be + // either a signed or unsigned value, i don't see how it can be both, so i haven't + // allowed for such a situation. 2. I originally built logic that tested for signed + // or unsigned fields as above, but had seperate logic based on what would happen + // after that. I have resolved that logic down to the main part below and the only + // exceptions i had are now filtered out into the 2 lines of "if (signed)" etc. above. + + bitwiseMax = (1< ptReportItem->globals.logicalMaximum) + { + nullValue = bitwiseMax; + } + else + { + if (bitwiseMin < ptReportItem->globals.logicalMinimum) + { + nullValue = bitwiseMin; + } + } + + // If we found a null value, store it into the proper place in the report. + + if (nullValue != 0) + { + // Write out the data. + SInt32 iStart; + int lR; + OSStatus tempStatus; + + HIDPreProcessRIValue(ptReportItem, &nullValue); + + // For a reportItem, there can be multiple identical usages. + for (lR = 0; lR < ptReportItem->usageItemCount; lR++) + { + iStart = ptReportItem->startBit + + (ptReportItem->globals.reportSize * lR); + tempStatus = HIDPutData(report, reportLength, iStart, + ptReportItem->globals.reportSize, nullValue); + if (tempStatus) + iStatus = tempStatus; // Pass on any bad news. + } + } + } // == reportID + } // reportItemCount + + return iStatus; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDIsButtonOrValue.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDIsButtonOrValue.c new file mode 100644 index 000000000..5855ef1a6 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDIsButtonOrValue.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDIsButtonOrValue.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. + 3/5/99 BWS [2311366] HIDIsButton should screen out constants (at least + until other functions, like HIDGetButtons, are fixed to be able + to properly skip constants) + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *----------------------------------------------------------------------------- + * + * HIDIsButton - Is the data button(s)? + * + * Input: + * ptReportItem - Input/Output/Feature + * Output: + * Returns: + * Boolean + * + *----------------------------------------------------------------------------- +*/ +Boolean HIDIsButton(HIDReportItem *ptReportItem, HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + +/* + * Disallow Null Pointers +*/ + if (ptReportItem==NULL) + return false; +/* + * Remove items that are constant and have no usage + */ + if ((ptReportItem->dataModes & kHIDDataConstantBit) == kHIDDataConstant) + { + // if has no usages, then bit filler + if (ptReportItem->usageItemCount == 0) + return false; + + // also check to see if there is a usage, but it is zero + + // if the first usage item is range, then check that one + // (we will not worry about report items with multiple zero usages, + // as I dont think that is a case that makes sense) + if (ptReportItem->firstUsageItem < ptPreparsedData->usageItemCount) + { + HIDP_UsageItem * ptUsageItem = &ptPreparsedData->usageItems[ptReportItem->firstUsageItem]; + + // if it is a range usage, with both zero usages + if ((ptUsageItem->isRange && ptUsageItem->usageMinimum == 0 && ptUsageItem->usageMaximum == 0) && + // or not a range, and zero usage + (!ptUsageItem->isRange && ptUsageItem->usage == 0)) + // then this is bit filler + return false; + } + } + +/* + * Arrays and 1-bit Variables +*/ + return (((ptReportItem->dataModes & kHIDDataArrayBit) == kHIDDataArray) + || (ptReportItem->globals.reportSize == 1)); +} + +/* + *----------------------------------------------------------------------------- + * + * HIDIsVariable - Is the data variable(s)? + * + * Input: + * ptReportItem - Input/Output/Feature + * Output: + * Returns: + * Boolean + * + *----------------------------------------------------------------------------- +*/ +Boolean HIDIsVariable(HIDReportItem *ptReportItem, HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + +/* + * Disallow Null Pointers +*/ + if (ptReportItem==NULL) + return false; + +/* + * Remove items that are constant and have no usage + */ + if ((ptReportItem->dataModes & kHIDDataConstantBit) == kHIDDataConstant) + { + // if has no usages, then bit filler + if (ptReportItem->usageItemCount == 0) + return false; + + // also check to see if there is a usage, but it is zero + + // if the first usage item is range, then check that one + // (we will not worry about report items with multiple zero usages, + // as I dont think that is a case that makes sense) + if (ptReportItem->firstUsageItem < ptPreparsedData->usageItemCount) + { + HIDP_UsageItem * ptUsageItem = &ptPreparsedData->usageItems[ptReportItem->firstUsageItem]; + + // if it is a range usage, with both zero usages + if ((ptUsageItem->isRange && ptUsageItem->usageMinimum == 0 && ptUsageItem->usageMaximum == 0) && + // or not a range, and zero usage + (!ptUsageItem->isRange && ptUsageItem->usage == 0)) + // then this is bit filler + return false; + } + } + +/* + * Multi-bit Variables +*/ + return (((ptReportItem->dataModes & kHIDDataArrayBit) != kHIDDataArray) + && (ptReportItem->globals.reportSize != 1)); +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDLib.h b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDLib.h new file mode 100644 index 000000000..b727e76d5 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDLib.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDLib.h + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#ifndef __HIDLib__ +#define __HIDLib__ + +#include "HIDPriv.h" + +#define kShouldClearMem true + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Library definitions */ +/* */ +/*------------------------------------------------------------------------------*/ + +/* And now our extern procedures that are not external entry points in our shared library */ + +struct HIDReportDescriptor +{ + Byte * descriptor; + ByteCount descriptorLength; + UInt32 index; + SInt32 * collectionStack; + SInt32 collectionNesting; + HIDGlobalItems * globalsStack; + SInt32 globalsNesting; + HIDItem item; + SInt32 firstUsageItem; + SInt32 firstStringItem; + SInt32 firstDesigItem; + SInt32 parent; + SInt32 sibling; + HIDGlobalItems globals; + Boolean haveUsageMin; + Boolean haveUsageMax; + SInt32 rangeUsagePage; + SInt32 usageMinimum; + SInt32 usageMaximum; + Boolean haveStringMin; + Boolean haveStringMax; + SInt32 stringMinimum; + SInt32 stringMaximum; + Boolean haveDesigMin; + Boolean haveDesigMax; + SInt32 desigMinimum; + SInt32 desigMaximum; +}; +typedef struct HIDReportDescriptor HIDReportDescriptor; + +/* And now our extern procedures that are not external entry points in our shared library */ + +extern OSStatus +HIDCountDescriptorItems (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +extern OSStatus +HIDNextItem (HIDReportDescriptor * reportDescriptor); + +extern OSStatus +HIDParseDescriptor (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +extern OSStatus +HIDProcessCollection (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +extern OSStatus +HIDProcessEndCollection (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +extern OSStatus +HIDProcessGlobalItem (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +extern OSStatus +HIDProcessLocalItem (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +extern OSStatus +HIDProcessMainItem (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +extern OSStatus +HIDProcessReportItem (HIDReportDescriptor * reportDescriptor, + HIDPreparsedDataPtr preparsedData); + +#endif diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDMaxUsageListLength.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDMaxUsageListLength.c new file mode 100644 index 000000000..9568e0a27 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDMaxUsageListLength.c @@ -0,0 +1,111 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDMaxUsageListLength.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 3/6/99 BWS Eliminate warning + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDMaxUsageListLength + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * ptPreparsedData - Pre-Parsed Data + * Output: + * Returns: length of list + * + *------------------------------------------------------------------------------ +*/ +UInt32 +HIDMaxUsageListLength (HIDReportType reportType, + HIDUsage usagePage, + HIDPreparsedDataRef preparsedDataRef) +{ +#pragma unused(usagePage) // not used, see comment below + + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDReportItem *ptReportItem; + int iButtons; + int i; + + +/* + * Disallow Null Pointers +*/ + if (ptPreparsedData == NULL) + return 0; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Go through the ReportItems + * Filter on ReportType + * Sum the button counts + * + * NOTE: A more precise value for the maximum list length + * may be obtained by filtering out the usages that + * are not on the specified usage page. Most of + * the time the number returned below is the same + * as that returned by filtering usages. It is + * never smaller. The tradeoff is sometimes wasting + * a few words of RAM in exchange for speed. +*/ + iButtons = 0; + for (i=0; ireportItemCount; i++) + { + ptReportItem = &ptPreparsedData->reportItems[i]; + if ((ptReportItem->reportType == reportType) + && HIDIsButton(ptReportItem, preparsedDataRef)) + iButtons += ptReportItem->globals.reportCount; + } + return iButtons; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDNextItem.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDNextItem.c new file mode 100644 index 000000000..59091afe6 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDNextItem.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDNextItem.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (DF) David Ferguson + (JRH) Rhoads Hollowell + (BWS) Brent Schorsch + + Change History (most recent first): + + 11/3/99 DF And now I get to add the code to actually fix the checkin below. + 11/1/99 BWS Fix long item calc error, fix by Dave Ferguson + 6/1/99 JRH Get rid of an uninitialized variable warning. It turns out that + with the code flow it was never being used before being + initialized, but the compiler was complaining. + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *----------------------------------------------------------------------------- + * + * HIDNextItem - Get the Next Item + * + * Input: + * ptDescriptor - Descriptor Structure + * Output: + * ptItem - Caller-provided Item Structure + * Returns: + * kHIDSuccess - Success + * kHIDEndOfDescriptorErr - End of the HID Report Descriptor + * + *----------------------------------------------------------------------------- +*/ +OSStatus HIDNextItem(HIDReportDescriptor *ptDescriptor) +{ + HIDItem *ptItem; + unsigned char iHeader; + unsigned char *psD; + int i; + int iLength; + UInt32 *piX; + int iSize; + int iByte = 0; +/* + * Disallow Null Pointers +*/ + if (ptDescriptor==NULL) + return kHIDNullPointerErr; +/* + * Use local pointers +*/ + ptItem = &ptDescriptor->item; + psD = ptDescriptor->descriptor; + piX = &ptDescriptor->index; + iLength = ptDescriptor->descriptorLength; +/* + * Don't go past the end of the buffer +*/ + if (*piX >= iLength) + return kHIDEndOfDescriptorErr; +/* + * Get the header byte +*/ + iHeader = psD[(*piX)++]; +/* + * Don't go past the end of the buffer +*/ + if (*piX > iLength) + return kHIDEndOfDescriptorErr; + ptItem->itemType = iHeader; + ptItem->itemType &= kHIDItemTypeMask; + ptItem->itemType >>= kHIDItemTypeShift; +/* + * Long Item Header + * Skip Long Items! +*/ + if (iHeader==kHIDLongItemHeader) + { + iSize = psD[(*piX)++]; + ptItem->tag = *piX++; + } +/* + * Short Item Header +*/ + else + { + iSize = iHeader; + iSize &= kHIDItemSizeMask; + if (iSize==3) + iSize = 4; + ptItem->byteCount = iSize; + ptItem->tag = iHeader; + ptItem->tag &= kHIDItemTagMask; + ptItem->tag >>= kHIDItemTagShift; + } +/* + * Don't go past the end of the buffer +*/ + if ((*piX + iSize) > iLength) + return kHIDEndOfDescriptorErr; +/* + * Pick up the data +*/ + ptItem->unsignedValue = 0; + if (iSize==0) + { + ptItem->signedValue = 0; + return kHIDSuccess; + } +/* + * Get the data bytes +*/ + for (i = 0; i < iSize; i++) + { + iByte = psD[(*piX)++]; + ptItem->unsignedValue |= (iByte << (i*8)); + } +/* + * Keep one value unsigned +*/ + ptItem->signedValue = ptItem->unsignedValue; +/* + * Sign extend one value +*/ + if ((iByte & 0x80) != 0) + { + while (i < sizeof(int)) + ptItem->signedValue |= (0xFF << ((i++)*8)); + } + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDOpenCloseDescriptor.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDOpenCloseDescriptor.c new file mode 100644 index 000000000..a52667e04 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDOpenCloseDescriptor.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDOpenCloseDescriptor.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/17/99 BWS [2314839] Added flags field to HIDPreparsedData which is set in + new parameter to HIDOpenReportDescriptor + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +//#include + +/* + *------------------------------------------------------------------------------ + * + * HIDCloseReportDescriptor - Close the Descriptor + * + * Input: + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDCloseReportDescriptor(HIDPreparsedDataRef preparsedDataRef) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + OSStatus iStatus; +/* + * Disallow NULL Pointers +*/ + if (ptPreparsedData == NULL) + return kHIDNullPointerErr; +/* + * If it's marked closed then don't do anything +*/ + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Free any memory that was allocated +*/ + if (ptPreparsedData->rawMemPtr != NULL) + { + PoolDeallocate (ptPreparsedData->rawMemPtr, ptPreparsedData->numBytesAllocated); + ptPreparsedData->rawMemPtr = NULL; + } +/* + * Mark closed +*/ + ptPreparsedData->hidTypeIfValid = 0; +/* + * Deallocate the preparsed data +*/ + iStatus = PoolDeallocate (ptPreparsedData, sizeof(HIDPreparsedData)); + + return iStatus; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDOpenReportDescriptor - Initialize the HID Parser + * + * Input: + * psHidReportDescriptor - The HID Report Descriptor (String) + * descriptorLength - Length of the Descriptor in bytes + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus +HIDOpenReportDescriptor (void * hidReportDescriptor, + UInt32 descriptorLength, + HIDPreparsedDataRef * preparsedDataRef, + UInt32 flags) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + OSStatus iStatus; + HIDReportDescriptor tDescriptor; + +/* + * Disallow NULL Pointers +*/ + if ((hidReportDescriptor == NULL) || (preparsedDataRef == NULL)) + return kHIDNullPointerErr; + +/* + * Initialize the return result, and allocate space for preparsed data +*/ + *preparsedDataRef = NULL; + + ptPreparsedData = PoolAllocateResident (sizeof (HIDPreparsedData), kShouldClearMem); + +/* + * Make sure we got the memory +*/ + if (ptPreparsedData == NULL) + return kHIDNotEnoughMemoryErr; + +/* + * Copy the flags field +*/ + ptPreparsedData->flags = flags; +/* + * Initialize the memory allocation pointer +*/ + ptPreparsedData->rawMemPtr = NULL; +/* + * Set up the descriptor structure +*/ + tDescriptor.descriptor = hidReportDescriptor; + tDescriptor.descriptorLength = descriptorLength; +/* + * Count various items in the descriptor + * allocate space within the PreparsedData structure + * and initialize the counters there +*/ + iStatus = HIDCountDescriptorItems(&tDescriptor,ptPreparsedData); + if (iStatus != kHIDSuccess) + return iStatus; +/* + * Parse the Descriptor + * filling in the structures in the PreparsedData structure +*/ + iStatus = HIDParseDescriptor(&tDescriptor,ptPreparsedData); +/* + * Mark the PreparsedData initialized, maybe +*/ + if (iStatus == kHIDSuccess && ptPreparsedData->rawMemPtr != NULL) + { + ptPreparsedData->hidTypeIfValid = kHIDOSType; + *preparsedDataRef = (HIDPreparsedDataRef) ptPreparsedData; + } + else // something failed, deallocate everything, and make sure we return an error + { + if (ptPreparsedData->rawMemPtr != NULL) + PoolDeallocate (ptPreparsedData->rawMemPtr, ptPreparsedData->numBytesAllocated); + + PoolDeallocate (ptPreparsedData, sizeof(HIDPreparsedData)); + + if (iStatus == kHIDSuccess) + iStatus = kHIDNotEnoughMemoryErr; + } + + return iStatus; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDParseDescriptor.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDParseDescriptor.c new file mode 100644 index 000000000..8de229a6c --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDParseDescriptor.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDParseDescriptor.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +//#include +/* + *------------------------------------------------------------------------------ + * + * HIDParseDescriptor - Fill in the PreparsedData structures + * + * Input: + * ptDescriptor - Descriptor Pointer Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + * NOTE: HIDCountDescriptorItems MUST have been called to set up the + * array pointers in the HIDPreparsedData structure! + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDParseDescriptor(HIDReportDescriptor *ptDescriptor, HIDPreparsedDataPtr ptPreparsedData) +{ + OSStatus iStatus; + HIDItem *ptItem; + HIDCollection *ptCollection; + HIDReportSizes *ptReport; +/* + * Disallow NULL Pointers +*/ + if ((ptDescriptor == NULL) || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; +/* + * Initialize Counters +*/ + ptPreparsedData->collectionCount = 1; + ptPreparsedData->reportItemCount = 0; + ptPreparsedData->reportCount = 1; + ptPreparsedData->usageItemCount = 0; + ptPreparsedData->stringItemCount = 0; + ptPreparsedData->desigItemCount = 0; +/* + * Initialize the Descriptor Data +*/ + ptDescriptor->index = 0; + ptDescriptor->collectionNesting = 0; + ptDescriptor->globalsNesting = 0; + ptDescriptor->firstUsageItem = 0; + ptDescriptor->firstStringItem = 0; + ptDescriptor->firstDesigItem = 0; + ptDescriptor->parent = 0; + ptDescriptor->sibling = 0; + ptDescriptor->globals.usagePage = 0; + ptDescriptor->globals.logicalMinimum = 0; + ptDescriptor->globals.logicalMaximum = 0; + ptDescriptor->globals.physicalMinimum = 0; + ptDescriptor->globals.physicalMaximum = 0; + ptDescriptor->globals.unitExponent = 0; + ptDescriptor->globals.units = 0; + ptDescriptor->globals.reportSize = 0; + ptDescriptor->globals.reportID = 0; + ptDescriptor->globals.reportCount = 0; + ptDescriptor->globals.reportIndex = 0; + ptDescriptor->haveUsageMin = false; + ptDescriptor->haveUsageMax = false; + ptDescriptor->haveStringMin = false; + ptDescriptor->haveStringMax = false; + ptDescriptor->haveDesigMin = false; + ptDescriptor->haveDesigMax = false; + ptItem = &ptDescriptor->item; +/* + * Initialize the virtual collection +*/ + ptCollection = ptPreparsedData->collections; + ptCollection->data = 0; + ptCollection->usagePage = 0; + ptCollection->firstUsageItem = 0; + ptCollection->usageItemCount = 0; + ptCollection->firstReportItem = 0; + ptCollection->reportItemCount = 0; + ptCollection->parent = 0; + ptCollection->children = 0; + ptCollection->firstChild = 0; + ptCollection->nextSibling = 0; +/* + * Initialize the default report +*/ + ptReport = ptPreparsedData->reports; + ptReport->reportID = 0; + ptReport->inputBitCount = 0; + ptReport->outputBitCount = 0; + ptReport->featureBitCount = 0; + +/* + * Parse the Descriptor +*/ + while ((iStatus = HIDNextItem(ptDescriptor)) == kHIDSuccess) + { + switch (ptItem->itemType) + { + case kHIDTypeMain: + iStatus = HIDProcessMainItem(ptDescriptor,ptPreparsedData); + break; + case kHIDTypeGlobal: + iStatus = HIDProcessGlobalItem(ptDescriptor,ptPreparsedData); + break; + case kHIDTypeLocal: + iStatus = HIDProcessLocalItem(ptDescriptor,ptPreparsedData); + break; + } + if (iStatus != kHIDSuccess) + return iStatus; + } + if (iStatus == kHIDEndOfDescriptorErr) + iStatus = kHIDSuccess; +/* + * Update the virtual collection +*/ + ptCollection = ptPreparsedData->collections; + ptCollection->reportItemCount = ptPreparsedData->reportItemCount; +/* + * Mark the PreparsedData initialized +*/ + return iStatus; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPostProcessRIValue.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPostProcessRIValue.c new file mode 100644 index 000000000..32c7ca22f --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPostProcessRIValue.c @@ -0,0 +1,112 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDPostProcessRIValue.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 4/7/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDPostProcessRIValue - performs any post-processing necessary for data + * retrieved _from_ a report for the specified report + * item. Currently, the only post-processing done + * is reversing when appropriate + * + * Input: + * reportItem - The report item + * value - the value, from HIDGetData + * Output: + * value - The processed value + * Returns: + * kHIDSuccess - Success + * + *------------------------------------------------------------------------------ +*/ + +OSStatus HIDPostProcessRIValue (HIDReportItem * reportItem, + SInt32 * value) +{ + + // if isReversed, returnValue = ((min - returnValue) + max) + if (reportItem->flags & kHIDReportItemFlag_Reversed) + *value = ((reportItem->globals.logicalMinimum - (*value)) + + reportItem->globals.logicalMaximum); + + return kHIDSuccess; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDPreProcessRIValue - performs any pre-processing necessary for data + * ouput _to_ a report for the specified report + * item. Currently, the only pre-processing done + * is reversing when appropriate + * + * Input: + * reportItem - The report item + * value - the value, destined for HIDPutData + * Output: + * value - The processed value + * Returns: + * kHIDSuccess - Success + * + *------------------------------------------------------------------------------ +*/ + +OSStatus HIDPreProcessRIValue (HIDReportItem * reportItem, + SInt32 * value) +{ + + // if isReversed, returnValue = ((min - returnValue) + max) + if (reportItem->flags & kHIDReportItemFlag_Reversed) + *value = ((reportItem->globals.logicalMinimum - (*value)) + + reportItem->globals.logicalMaximum); + + return kHIDSuccess; +} + + diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPriv.h b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPriv.h new file mode 100644 index 000000000..3db02ab6b --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPriv.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HIDPriv__ +#define __HIDPriv__ + +/* + File: HIDPriv.i + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + <5> 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + <4> 4/7/99 BWS Add flags to report items (for reverse) + <3> 3/19/99 BWS Build stub library + <2> 3/17/99 BWS [2314839] Add flags field to HIDPreparsedData, is set in + HIDOpenReportDescriptor + <1> 3/5/99 BWS first checked in +*/ + +#include "MacTypes.h" +#include + +/* the following constants are from the USB HID Specification (www.usb.org)*/ + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Header */ +/* */ +/* --------------------------------------------------------- */ +/* | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | */ +/* | Tag | Type | Size | */ +/* --------------------------------------------------------- */ +/*------------------------------------------------------------------------------*/ +enum { + kHIDItemSizeMask = 0x03, + kHIDItemTagMask = 0xF0, + kHIDItemTagShift = 4, + kHIDItemTypeMask = 0x0C, + kHIDItemTypeShift = 2, + kHIDLongItemHeader = 0xFE +}; + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Item Type Definitions */ +/* */ +/*------------------------------------------------------------------------------*/ +enum { + kHIDTypeMain = 0, + kHIDTypeGlobal = 1, + kHIDTypeLocal = 2, + kHIDTypeLong = 3 +}; + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Item Tag Definitions - Main Items */ +/* */ +/*------------------------------------------------------------------------------*/ +enum { + kHIDTagInput = 8, + kHIDTagOutput = 9, + kHIDTagCollection = 0x0A, + kHIDTagFeature = 0x0B, + kHIDTagEndCollection = 0x0C +}; + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Item Tag Definitions - Globals */ +/* */ +/*------------------------------------------------------------------------------*/ +enum { + kHIDTagUsagePage = 0, + kHIDTagLogicalMinimum = 1, + kHIDTagLogicalMaximum = 2, + kHIDTagPhysicalMinimum = 3, + kHIDTagPhysicalMaximum = 4, + kHIDTagUnitExponent = 5, + kHIDTagUnit = 6, + kHIDTagReportSize = 7, + kHIDTagReportID = 8, + kHIDTagReportCount = 9, + kHIDTagPush = 0x0A, + kHIDTagPop = 0x0B +}; + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Item Tag Definitions - Locals */ +/* */ +/*------------------------------------------------------------------------------*/ +enum { + kHIDTagUsage = 0, + kHIDTagUsageMinimum = 1, + kHIDTagUsageMaximum = 2, + kHIDTagDesignatorIndex = 3, + kHIDTagDesignatorMinimum = 4, + kHIDTagDesignatorMaximum = 5, + kHIDTagStringIndex = 7, + kHIDTagStringMinimum = 8, + kHIDTagStringMaximum = 9, + kHIDTagSetDelimiter = 0x0A +}; + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Main Item Header Bit Definitions */ +/* */ +/*------------------------------------------------------------------------------*/ +enum { + kHIDDataBufferedBytes = 0x0100, + kHIDDataVolatileBit = 0x80, + kHIDDataVolatile = 0x80, + kHIDDataNullStateBit = 0x40, + kHIDDataNullState = 0x40, + kHIDDataNoPreferredBit = 0x20, + kHIDDataNoPreferred = 0x20, + kHIDDataNonlinearBit = 0x10, + kHIDDataNonlinear = 0x10, + kHIDDataWrapBit = 0x08, + kHIDDataWrap = 0x08, + kHIDDataRelativeBit = 0x04, + kHIDDataRelative = 0x04, + kHIDDataAbsolute = 0x00, + kHIDDataVariableBit = 0x02, + kHIDDataVariable = 0x02, + kHIDDataArrayBit = 0x02, + kHIDDataArray = 0x00, + kHIDDataConstantBit = 0x01, + kHIDDataConstant = 0x01 +}; + +/*------------------------------------------------------------------------------*/ +/* */ +/* HID Collection Data Definitions */ +/* */ +/*------------------------------------------------------------------------------*/ +enum { + kHIDPhysicalCollection = 0x00, + kHIDApplicationCollection = 0x01 +}; + +/*------------------------------------------------------------------------------*/ +/* */ +/* HIDLibrary private defs */ +/* */ +/*------------------------------------------------------------------------------*/ + +enum +{ + kHIDOSType = 'hid ' +}; + +struct HIDItem +{ + ByteCount byteCount; + SInt32 itemType; + SInt32 tag; + SInt32 signedValue; + UInt32 unsignedValue; +}; +typedef struct HIDItem HIDItem; + +struct HIDGlobalItems +{ + HIDUsage usagePage; + SInt32 logicalMinimum; + SInt32 logicalMaximum; + SInt32 physicalMinimum; + SInt32 physicalMaximum; + SInt32 unitExponent; + SInt32 units; + ByteCount reportSize; + SInt32 reportID; + SInt32 reportCount; + SInt32 reportIndex; +}; +typedef struct HIDGlobalItems HIDGlobalItems; + +struct HIDReportSizes +{ + SInt32 reportID; + SInt32 inputBitCount; + SInt32 outputBitCount; + SInt32 featureBitCount; +}; +typedef struct HIDReportSizes HIDReportSizes; + +struct HIDCollection +{ + SInt32 data; + SInt32 usagePage; + SInt32 firstUsageItem; + SInt32 usageItemCount; + SInt32 firstReportItem; + SInt32 reportItemCount; + SInt32 parent; + SInt32 children; + SInt32 firstChild; + SInt32 nextSibling; +}; +typedef struct HIDCollection HIDCollection; + +enum +{ + kHIDReportItemFlag_Reversed = 0x00000001 +}; + +struct HIDReportItem +{ + UInt32 reportType; + HIDGlobalItems globals; + SInt32 startBit; + SInt32 parent; + SInt32 dataModes; + SInt32 firstUsageItem; + SInt32 usageItemCount; + SInt32 firstStringItem; + SInt32 stringItemCount; + SInt32 firstDesigItem; + SInt32 desigItemCount; + UInt32 flags; +}; +typedef struct HIDReportItem HIDReportItem; + +struct HIDP_UsageItem +{ + Boolean isRange; + Boolean reserved; + HIDUsage usagePage; + HIDUsage usage; + SInt32 usageMinimum; + SInt32 usageMaximum; +}; +typedef struct HIDP_UsageItem HIDP_UsageItem; + +struct HIDStringItem +{ + Boolean isRange; + Boolean reserved; + SInt32 index; + SInt32 minimum; + SInt32 maximum; +}; +typedef struct HIDStringItem HIDStringItem; +typedef HIDStringItem HIDDesignatorItem; + +struct HIDPreparsedData +{ + OSType hidTypeIfValid; + HIDCollection * collections; + UInt32 collectionCount; + HIDReportItem * reportItems; + UInt32 reportItemCount; + HIDReportSizes * reports; + UInt32 reportCount; + HIDP_UsageItem * usageItems; + UInt32 usageItemCount; + HIDStringItem * stringItems; + UInt32 stringItemCount; + HIDDesignatorItem * desigItems; + UInt32 desigItemCount; + UInt8 * rawMemPtr; + UInt32 flags; + ByteCount numBytesAllocated; +}; +typedef struct HIDPreparsedData HIDPreparsedData; +typedef HIDPreparsedData * HIDPreparsedDataPtr; + +extern +OSStatus +HIDCheckReport (HIDReportType reportType, + HIDPreparsedDataRef preparsedDataRef, + HIDReportItem * reportItem, + void * report, + ByteCount reportLength); + + +extern +OSStatus +HIDGetData (void * report, + ByteCount reportLength, + UInt32 start, + UInt32 size, + SInt32 * value, + Boolean signExtend); + +extern +OSStatus +HIDPostProcessRIValue (HIDReportItem * reportItem, + SInt32 * value); + +extern +OSStatus +HIDPreProcessRIValue (HIDReportItem * reportItem, + SInt32 * value); + +extern +Boolean +HIDHasUsage (HIDPreparsedDataRef preparsedDataRef, + HIDReportItem * reportItem, + HIDUsage usagePage, + HIDUsage usage, + UInt32 * usageIndex, + UInt32 * count); + +extern +Boolean +HIDIsButton (HIDReportItem * reportItem, + HIDPreparsedDataRef preparsedDataRef); + +extern +Boolean +HIDIsVariable (HIDReportItem * reportItem, + HIDPreparsedDataRef preparsedDataRef); + +extern +OSStatus +HIDPutData (void * report, + ByteCount reportLength, + UInt32 start, + UInt32 size, + SInt32 value); + +extern +OSStatus +HIDScaleUsageValueIn (HIDReportItem * reportItem, + UInt32 value, + SInt32 * scaledValue); + +extern +OSStatus +HIDScaleUsageValueOut (HIDReportItem * reportItem, + UInt32 value, + SInt32 * scaledValue); + +extern +void +HIDUsageAndPageFromIndex (HIDPreparsedDataRef preparsedDataRef, + HIDReportItem * reportItem, + UInt32 index, + HIDUsageAndPage * usageAndPage); + +extern +Boolean +HIDUsageInRange (HIDP_UsageItem * usageItem, + HIDUsage usagePage, + HIDUsage usage); + +#endif diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessCollection.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessCollection.c new file mode 100644 index 000000000..93e1b18ad --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessCollection.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDProcessCollection.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDProcessCollection - Process a Collection MainItem + * + * Input: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDProcessCollection(HIDReportDescriptor *ptDescriptor, HIDPreparsedDataPtr ptPreparsedData) +{ + HIDCollection *collections; + HIDCollection *ptCollection; + int parent; + int iCollection; +/* + * Disallow NULL Pointers +*/ + if ((ptDescriptor == NULL) || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; +/* + * Initialize the new Collection Structure +*/ + iCollection = ptPreparsedData->collectionCount++; + collections = ptPreparsedData->collections; + ptCollection = &collections[iCollection]; + ptCollection->data = ptDescriptor->item.unsignedValue; + ptCollection->firstUsageItem = ptDescriptor->firstUsageItem; + ptCollection->usageItemCount = ptPreparsedData->usageItemCount - ptDescriptor->firstUsageItem; + ptDescriptor->firstUsageItem = ptPreparsedData->usageItemCount; + ptCollection->children = 0; + ptCollection->nextSibling = ptDescriptor->sibling; + ptDescriptor->sibling = 0; + ptCollection->firstChild = 0; + ptCollection->usagePage = ptDescriptor->globals.usagePage; + ptCollection->firstReportItem = ptPreparsedData->reportItemCount; +/* + * Set up the relationship with the parent Collection +*/ + parent = ptDescriptor->parent; + ptCollection->parent = parent; + collections[parent].firstChild = iCollection; + collections[parent].children++; + ptDescriptor->parent = iCollection; +/* + * Save the parent Collection Information on the stack +*/ + ptDescriptor->collectionStack[ptDescriptor->collectionNesting++] = parent; + return kHIDSuccess; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDProcessEndCollection - Process an EndCollection MainItem + * + * Input: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDProcessEndCollection(HIDReportDescriptor *ptDescriptor, HIDPreparsedDataPtr ptPreparsedData) +{ + HIDCollection *ptCollection; + int iCollection; +/* + * Remember the number of ReportItem MainItems in this Collection +*/ + ptCollection = &ptPreparsedData->collections[ptDescriptor->parent]; + ptCollection->reportItemCount = ptPreparsedData->reportItemCount - ptCollection->firstReportItem; +/* + * Restore the parent Collection Data +*/ + iCollection = ptDescriptor->collectionStack[--ptDescriptor->collectionNesting]; + ptDescriptor->sibling = ptDescriptor->parent; + ptDescriptor->parent = iCollection; + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessGlobalItem.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessGlobalItem.c new file mode 100644 index 000000000..5764b6141 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessGlobalItem.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDProcessGlobalItem.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (DF) David Ferguson + (BWS) Brent Schorsch + + Change History (most recent first): + + 10/18/99 DF Lets try not reporting an error with zero report count + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDProcessGlobalItem - Process a GlobalItem + * + * Input: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDProcessGlobalItem(HIDReportDescriptor *ptDescriptor, HIDPreparsedDataPtr ptPreparsedData) +{ + HIDReportSizes *ptReport; + HIDGlobalItems *ptGlobals; + HIDItem *ptItem; + int reportIndex; +/* + * Disallow NULL Pointers +*/ + if ((ptDescriptor == NULL) || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; +/* + * Process by tag +*/ + ptItem = &ptDescriptor->item; + ptGlobals = &ptDescriptor->globals; + switch (ptItem->tag) + { +/* + * usage Page +*/ + case kHIDTagUsagePage: + if (ptItem->unsignedValue == 0) + return kHIDUsagePageZeroErr; + ptGlobals->usagePage = ptItem->unsignedValue; + break; +/* + * Logical Minimum +*/ + case kHIDTagLogicalMinimum: + ptGlobals->logicalMinimum = ptItem->signedValue; + break; +/* + * Logical Maximum +*/ + case kHIDTagLogicalMaximum: + ptGlobals->logicalMaximum = ptItem->signedValue; + break; +/* + * Physical Minimum +*/ + case kHIDTagPhysicalMinimum: + ptGlobals->physicalMinimum = ptItem->signedValue; + break; +/* + * Physical Maximum +*/ + case kHIDTagPhysicalMaximum: + ptGlobals->physicalMaximum = ptItem->signedValue; + break; +/* + * Unit Exponent +*/ + case kHIDTagUnitExponent: + ptGlobals->unitExponent = ptItem->signedValue; + break; +/* + * Unit +*/ + case kHIDTagUnit: + ptGlobals->units = ptItem->unsignedValue; + break; +/* + * Report Size in Bits +*/ + case kHIDTagReportSize: + ptGlobals->reportSize = ptItem->unsignedValue; + if (ptGlobals->reportSize == 0) + return kHIDReportSizeZeroErr; + break; +/* + * Report ID +*/ + case kHIDTagReportID: + if (ptItem->unsignedValue == 0) + return kHIDReportIDZeroErr; +/* + * Look for the Report ID in the table +*/ + reportIndex = 0; + while ((reportIndex < ptPreparsedData->reportCount) + && (ptPreparsedData->reports[reportIndex].reportID != ptItem->unsignedValue)) + reportIndex++; +/* + * Initialize the entry if it's new and there's room for it + * Start with 8 bits for the Report ID +*/ + if (reportIndex == ptPreparsedData->reportCount) + { + ptReport = &ptPreparsedData->reports[ptPreparsedData->reportCount++]; + ptReport->reportID = ptItem->unsignedValue; + ptReport->inputBitCount = 8; + ptReport->outputBitCount = 8; + ptReport->featureBitCount = 8; + } +/* + * Remember which report is being processed +*/ + ptGlobals->reportID = ptItem->unsignedValue; + ptGlobals->reportIndex = reportIndex; + break; +/* + * Report Count +*/ + case kHIDTagReportCount: +#if 0 + // some device actually have a report count of zero specified. we must allow it! + if (ptItem->unsignedValue == 0) + return kHIDReportCountZeroErr; +#endif + ptGlobals->reportCount = ptItem->unsignedValue; + break; +/* + * Push Globals +*/ + case kHIDTagPush: + ptDescriptor->globalsStack[ptDescriptor->globalsNesting++] = ptDescriptor->globals; + break; +/* + * Pop Globals +*/ + case kHIDTagPop: + ptDescriptor->globals = ptDescriptor->globalsStack[--ptDescriptor->globalsNesting]; + break; + } + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessLocalItem.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessLocalItem.c new file mode 100644 index 000000000..c67f05a24 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessLocalItem.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDProcessLocalItem.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDProcessLocalItem - Process a LocalItem + * + * Input: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDProcessLocalItem(HIDReportDescriptor *ptDescriptor, + HIDPreparsedDataPtr ptPreparsedData) +{ + HIDDesignatorItem *ptDesignatorItem; + HIDStringItem *ptStringItem; + HIDP_UsageItem *ptUsageItem; + HIDItem *ptItem; +/* + * Disallow NULL Pointers +*/ + if ((ptDescriptor == NULL) || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; +/* + * Process the LocalItem by tag +*/ + ptItem = &ptDescriptor->item; + switch (ptItem->tag) + { +/* + * Note that Tag = usage Item may represent either + * a UsagePair with the usagePage implied, or + * a UsagePair defined by an extended usage + * If a Tag = usage Item has 1 or 2 bytes of data + * then the current usagePage is used + * If a Tag = usage Item has 4 bytes of data + * then the high order bytes are the usagePage + * + * Note that the Microsoft HID Parser uses the last + * usagePage defined before the MainItem with which + * the usage is associated rather than the current + * usagePage. The method used here is more generic + * although multiple UsagePages for a MainItem are + * unlikely due to the MS limitation. +*/ + case kHIDTagUsage: + ptUsageItem = &ptPreparsedData->usageItems[ptPreparsedData->usageItemCount++]; + ptUsageItem->isRange = false; + if (ptItem->byteCount == 4) + { + ptUsageItem->usagePage = ptItem->unsignedValue>>16; + ptUsageItem->usage = ptItem->unsignedValue&0xFFFFL; + } + else + { + ptUsageItem->usagePage = ptDescriptor->globals.usagePage; + ptUsageItem->usage = ptItem->unsignedValue; + } + break; +/* + * Note that Tag = usage Minimum Item may represent either + * a UsagePair with the usagePage implied, or + * a UsagePair defined by an extended usage + * If a Tag = usage Item has 1 or 2 bytes of data + * then the current usagePage is used + * If a Tag = usage Item has 4 bytes of data + * then the high order bytes are the usagePage +*/ + case kHIDTagUsageMinimum: + if (ptDescriptor->haveUsageMax) + { + ptUsageItem = &ptPreparsedData->usageItems[ptPreparsedData->usageItemCount++]; + ptUsageItem->isRange = true; + if (ptItem->byteCount == 4) + { + ptUsageItem->usagePage = ptItem->unsignedValue>>16; + ptUsageItem->usageMinimum = ptItem->unsignedValue&0xFFFFL; + } + else + { + ptUsageItem->usagePage = ptDescriptor->globals.usagePage; + ptUsageItem->usageMinimum = ptItem->unsignedValue; + } + if (ptUsageItem->usagePage != ptDescriptor->rangeUsagePage) + return kHIDInvalidRangePageErr; + ptUsageItem->usageMaximum = ptDescriptor->usageMaximum; + if (ptUsageItem->usageMaximum < ptUsageItem->usageMinimum) + return kHIDInvertedUsageRangeErr; + ptDescriptor->haveUsageMax = false; + ptDescriptor->haveUsageMin = false; + } + else + { + if (ptItem->byteCount == 4) + { + ptDescriptor->rangeUsagePage = ptItem->unsignedValue>>16; + ptDescriptor->usageMinimum = ptItem->unsignedValue&0xFFFFL; + } + else + { + ptDescriptor->rangeUsagePage = ptDescriptor->globals.usagePage; + ptDescriptor->usageMinimum = ptItem->unsignedValue; + } + ptDescriptor->haveUsageMin = true; + } + break; +/* + * Note that Tag = usage Maximum Item may represent either + * a UsagePair with the usagePage implied, or + * a UsagePair defined by an extended usage + * If a Tag = usage Item has 1 or 2 bytes of data + * then the current usagePage is used + * If a Tag = usage Item has 4 bytes of data + * then the high order bytes are the usagePage +*/ + case kHIDTagUsageMaximum: + if (ptDescriptor->haveUsageMin) + { + ptUsageItem = &ptPreparsedData->usageItems[ptPreparsedData->usageItemCount++]; + ptUsageItem->isRange = true; + if (ptItem->byteCount == 4) + { + ptUsageItem->usagePage = ptItem->unsignedValue>>16; + ptUsageItem->usageMaximum = ptItem->unsignedValue&0xFFFFL; + } + else + { + ptUsageItem->usagePage = ptDescriptor->globals.usagePage; + ptUsageItem->usageMaximum = ptItem->unsignedValue; + } + if (ptUsageItem->usagePage != ptDescriptor->rangeUsagePage) + return kHIDInvalidRangePageErr; + ptUsageItem->usageMinimum = ptDescriptor->usageMinimum; + if (ptUsageItem->usageMaximum < ptUsageItem->usageMinimum) + return kHIDInvertedUsageRangeErr; + ptDescriptor->haveUsageMax = false; + ptDescriptor->haveUsageMin = false; + } + else + { + if (ptItem->byteCount == 4) + { + ptDescriptor->rangeUsagePage = ptItem->unsignedValue>>16; + ptDescriptor->usageMaximum = ptItem->unsignedValue&0xFFFFL; + } + else + { + ptDescriptor->rangeUsagePage = ptDescriptor->globals.usagePage; + ptDescriptor->usageMaximum = ptItem->unsignedValue; + } + ptDescriptor->haveUsageMax = true; + } + break; +/* + * Designators +*/ + case kHIDTagDesignatorIndex: + ptDesignatorItem = &ptPreparsedData->desigItems[ptPreparsedData->desigItemCount++]; + ptDesignatorItem->isRange = false; + ptDesignatorItem->index = ptItem->unsignedValue; + break; + case kHIDTagDesignatorMinimum: + if (ptDescriptor->haveDesigMax) + { + ptDesignatorItem = &ptPreparsedData->desigItems[ptPreparsedData->desigItemCount++]; + ptDesignatorItem->isRange = true; + ptDesignatorItem->minimum = ptItem->unsignedValue; + ptDesignatorItem->maximum = ptDescriptor->desigMaximum; + ptDescriptor->haveDesigMin = false; + ptDescriptor->haveDesigMax = false; + } + else + { + ptDescriptor->desigMinimum = ptItem->unsignedValue; + ptDescriptor->haveDesigMin = true; + } + break; + case kHIDTagDesignatorMaximum: + if (ptDescriptor->haveDesigMin) + { + ptDesignatorItem = &ptPreparsedData->desigItems[ptPreparsedData->desigItemCount++]; + ptDesignatorItem->isRange = true; + ptDesignatorItem->maximum = ptItem->unsignedValue; + ptDesignatorItem->minimum = ptDescriptor->desigMinimum; + ptDescriptor->haveDesigMin = false; + ptDescriptor->haveDesigMax = false; + } + else + { + ptDescriptor->desigMaximum = ptItem->unsignedValue; + ptDescriptor->haveDesigMax = true; + } + break; +/* + * Strings +*/ + case kHIDTagStringIndex: + ptStringItem = &ptPreparsedData->stringItems[ptPreparsedData->stringItemCount++]; + ptStringItem->isRange = false; + ptStringItem->index = ptItem->unsignedValue; + break; + case kHIDTagStringMinimum: + if (ptDescriptor->haveStringMax) + { + ptStringItem = &ptPreparsedData->stringItems[ptPreparsedData->stringItemCount++]; + ptStringItem->isRange = true; + ptStringItem->minimum = ptItem->unsignedValue; + ptStringItem->maximum = ptDescriptor->stringMaximum; + ptDescriptor->haveStringMin = false; + ptDescriptor->haveStringMax = false; + } + else + { + ptDescriptor->stringMinimum = ptItem->unsignedValue; + ptDescriptor->haveStringMin = true; + } + break; + case kHIDTagStringMaximum: + if (ptDescriptor->haveStringMin) + { + ptStringItem = &ptPreparsedData->stringItems[ptPreparsedData->stringItemCount++]; + ptStringItem->isRange = true; + ptStringItem->maximum = ptItem->unsignedValue; + ptStringItem->minimum = ptDescriptor->stringMinimum; + ptDescriptor->haveStringMin = false; + ptDescriptor->haveStringMax = false; + } + else + { + ptDescriptor->stringMaximum = ptItem->unsignedValue; + ptDescriptor->haveStringMax = true; + } + break; +/* + * Delimiters (are not processed) +*/ + case kHIDTagSetDelimiter: + break; + } + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessMainItem.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessMainItem.c new file mode 100644 index 000000000..2bc2cd36c --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessMainItem.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDProcessMainItem.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDProcessMainItem - Process a MainItem + * + * Input: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDProcessMainItem(HIDReportDescriptor *ptDescriptor, + HIDPreparsedDataPtr ptPreparsedData) +{ + OSStatus iStatus = kHIDSuccess; + +/* + * Disallow NULL Pointers +*/ + if ((ptDescriptor == NULL) || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; +/* + * Process by MainItem Tag +*/ + switch (ptDescriptor->item.tag) + { + case kHIDTagCollection: + iStatus = HIDProcessCollection(ptDescriptor,ptPreparsedData); + break; + case kHIDTagEndCollection: + iStatus = HIDProcessEndCollection(ptDescriptor,ptPreparsedData); + break; + case kHIDTagInput: + case kHIDTagOutput: + case kHIDTagFeature: + iStatus = HIDProcessReportItem(ptDescriptor,ptPreparsedData); + break; + } + return iStatus; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessReportItem.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessReportItem.c new file mode 100644 index 000000000..7d81b9ecc --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessReportItem.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDProcessReportItem.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (DF) David Ferguson + (BWS) Brent Schorsch + + Change History (most recent first): + + 1/11/00 KH Tweaking last fix. For logical maximum, limit shifting into the + sign bit only for report sizes of 32 bits or greater. + 1/10/00 DF re-do last change (better fix). + 1/10/00 DF do proper logical range test for 32-bit report items. + 4/7/99 BWS Add support for reversed report items + 3/20/99 BWS Oops, strict error checking does not work if there is no error. + We should only return error if it is not noErr + 3/17/99 BWS [2314839] Added flags field to HIDPreparsedData which is set in + new parameter to HIDOpenReportDescriptor. We check the + StrictErrorCheck bit to determine whether we return errors or + just try to work around problems we find + 3/7/99 BWS [2311413] Do not error check min/max ranges for constants + 3/7/99 BWS [2311412] We need to handle the cases where physical min/max is + either (0/0) which is valid according to the spec, and means to + use the logical min/max, and the invalid case, that some devices + exibit, which has (0/-1) which we will treat the same, + 3/5/99 BWS [2311359] HIDProcessReportItem does not initialize startBit + field of HIDReportItem! + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDProcessReportItem - Process a Report Item MainItem + * + * Input: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Output: + * ptDescriptor - The Descriptor Structure + * ptPreparsedData - The PreParsedData Structure + * Returns: + * kHIDSuccess - Success + * kHIDNullPointerErr - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDProcessReportItem(HIDReportDescriptor *ptDescriptor, HIDPreparsedDataPtr ptPreparsedData) +{ + OSStatus error = noErr; + HIDReportItem *ptReportItem; + HIDReportSizes *ptReport; + int iBits; +/* + * Disallow NULL Pointers +*/ + + if ((ptDescriptor == NULL) || (ptPreparsedData == NULL)) + return kHIDNullPointerErr; +/* + * Begin to initialize the new Report Item structure +*/ + + ptReportItem = &ptPreparsedData->reportItems[ptPreparsedData->reportItemCount++]; + ptReportItem->dataModes = ptDescriptor->item.unsignedValue; + ptReportItem->globals = ptDescriptor->globals; + ptReportItem->flags = 0; + +/* + * Reality Check on the Report Main Item +*/ + // Don't check ranges for constants (MS Sidewinder, for one, does not reset) + //if (!(ptReportItem->dataModes & kHIDDataConstantBit)) // don't think we need this anymore + { + // Determine the maximum signed value for a given report size. + // (Don't allow shifting into sign bit.) + SInt32 posSize = (ptReportItem->globals.reportSize >= 32) ? + 31 : ptReportItem->globals.reportSize; + SInt32 realMax = (1<globals.logicalMinimum > realMax) + { + error = kHIDBadLogicalMinimumErr; + ptReportItem->globals.logicalMinimum = 0; + } + if (ptReportItem->globals.logicalMaximum > realMax) + { + if (error == noErr) + error = kHIDBadLogicalMaximumErr; + ptReportItem->globals.logicalMaximum = realMax; + } + if (ptReportItem->globals.logicalMinimum > ptReportItem->globals.logicalMaximum) + { + SInt32 temp; + if (error == noErr) + error = kHIDInvertedLogicalRangeErr; + + // mark as a 'reversed' item + ptReportItem->flags |= kHIDReportItemFlag_Reversed; + + temp = ptReportItem->globals.logicalMaximum; + ptReportItem->globals.logicalMaximum = ptReportItem->globals.logicalMinimum; + ptReportItem->globals.logicalMinimum = temp; + } + } + + // check to see if we got half a range (we don't need to fix this, since 'isRange' will be false + if ((error == noErr) && (ptDescriptor->haveUsageMin || ptDescriptor->haveUsageMax)) + error = kHIDUnmatchedUsageRangeErr; + if ((error == noErr) && (ptDescriptor->haveStringMin || ptDescriptor->haveStringMax)) + error = kHIDUnmatchedStringRangeErr; + if ((error == noErr) && (ptDescriptor->haveDesigMin || ptDescriptor->haveDesigMax)) + error = kHIDUnmatchedDesignatorRangeErr; + + // if the physical min/max are out of wack, use the logical values + if (ptReportItem->globals.physicalMinimum >= ptReportItem->globals.physicalMaximum) + { + // equal to each other is not an error, just means to use the logical values + if ((error == noErr) && + (ptReportItem->globals.physicalMinimum > ptReportItem->globals.physicalMaximum)) + error = kHIDInvertedPhysicalRangeErr; + + ptReportItem->globals.physicalMinimum = ptReportItem->globals.logicalMinimum; + ptReportItem->globals.physicalMaximum = ptReportItem->globals.logicalMaximum; + } + + // if strict error checking is true, return any errors + if (error != noErr && ptPreparsedData->flags & kHIDFlag_StrictErrorChecking) + return error; + +/* + * Continue to initialize the new Report Item structure +*/ + + ptReportItem->parent = ptDescriptor->parent; + ptReportItem->firstUsageItem = ptDescriptor->firstUsageItem; + ptDescriptor->firstUsageItem = ptPreparsedData->usageItemCount; + ptReportItem->usageItemCount = ptPreparsedData->usageItemCount - ptReportItem->firstUsageItem; + ptReportItem->firstStringItem = ptDescriptor->firstStringItem; + ptDescriptor->firstStringItem = ptPreparsedData->stringItemCount; + ptReportItem->stringItemCount = ptPreparsedData->stringItemCount - ptReportItem->firstStringItem; + ptReportItem->firstDesigItem = ptDescriptor->firstDesigItem; + ptDescriptor->firstDesigItem = ptPreparsedData->desigItemCount; + ptReportItem->desigItemCount = ptPreparsedData->desigItemCount - ptReportItem->firstDesigItem; +/* + * Update the Report by the size of this item +*/ + + ptReport = &ptPreparsedData->reports[ptReportItem->globals.reportIndex]; + iBits = ptReportItem->globals.reportSize * ptReportItem->globals.reportCount; + switch (ptDescriptor->item.tag) + { + case kHIDTagFeature: + ptReportItem->reportType = kHIDFeatureReport; + ptReportItem->startBit = ptReport->featureBitCount; + ptReport->featureBitCount += iBits; + break; + case kHIDTagOutput: + ptReportItem->reportType = kHIDOutputReport; + ptReportItem->startBit = ptReport->outputBitCount; + ptReport->outputBitCount += iBits; + break; + case kHIDTagInput: + ptReportItem->reportType = kHIDInputReport; + ptReportItem->startBit = ptReport->inputBitCount; + ptReport->inputBitCount += iBits; + break; + default: + ptReportItem->reportType = kHIDUnknownReport; + ptReportItem->startBit = 0; + break; + } + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPutData.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPutData.c new file mode 100644 index 000000000..3eceaac1c --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPutData.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDPutData.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + (KH) Keithen Hayenga + + Change History (most recent first): + + 12/12/00 KH Correcct cast of void * + 11/11/99 KH Use shifted value when HIDSetUsageValue fills data into a report + field that spans multiple bytes. + 11/10/99 KH Data that overflowed byte bounderies was lost because + we shifted initial value instead of value corrected for + starting bit location. + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +//#include + +/* + *------------------------------------------------------------------------------ + * + * HIDPutData - Put a single data item to a report + * + * Input: + * psReport - The report + * iReportLength - The length of the report + * iStart - Start Bit in report + * iSize - Number of Bits + * iValue - The data + * Output: + * Returns: + * kHidP_Success - Success + * kHidP_NullPointer - Argument, Pointer was Null + * + *------------------------------------------------------------------------------ +*/ +OSStatus +HIDPutData (void * report, + ByteCount reportLength, + UInt32 start, + UInt32 size, + SInt32 value) +{ + Byte * psReport = (Byte *)report; + SInt32 data, iShiftedData; + UInt32 iStartByte, startBit; + UInt32 iLastByte, iLastBit; + UInt32 iStartMask, iLastMask; + UInt32 iDataMask; +/* + * Report + * Bit 28 27 26 25 24 | 23 22 21 20 19 18 17 16 | 15 14 13 12 11 10 09 ... + * Last Byte (3) | | Byte 2 | | Start Byte (1) + * Data x x x d d | d d d d d d d d | d d y y y y y + * Last Bit (1) / | | \ Start Bit (6) + * ... 1 1 1 0 0 | Intermediate | 0 0 1 1 1 1 1 ... + * Last Mask | Byte(s) | StartMask +*/ + iLastByte = (start + size - 1)/8; +/* + * Check the parameters +*/ + if ((start < 0) || (size <= 0) || (iLastByte >= reportLength)) + return kHIDBadParameterErr; + iLastBit = (start + size - 1)&7; + iLastMask = ~((1<<(iLastBit+1)) - 1); + iStartByte = start/8; + startBit = start&7; + iStartMask = (1<>= 8; +/* + * Store out an intermediate bytes +*/ + while (++iStartByte < iLastByte) + { + psReport[iStartByte] = (Byte) iShiftedData; + iShiftedData >>= 8; + } +/* + * Mask off the bits where the new data goes + * Mask off the bits in the new data where the old goes + * Or the two together and store back out +*/ + data = psReport[iLastByte]; + data &= iLastMask; + iShiftedData &= ~iLastMask; + data |= iShiftedData; + } +/* + * Store out the last or only Byte +*/ + psReport[iStartByte] = (Byte) data; + return kHIDSuccess; +} + diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDScaleUsageValue.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDScaleUsageValue.c new file mode 100644 index 000000000..842453e8e --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDScaleUsageValue.c @@ -0,0 +1,198 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDScaleUsageValue.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDScaleUsageValueIn + * + * Input: + * ptReportItem - The ReportItem in which the data resides + * iValue - The unscaled data + * piScaledValue - The scaled value + * Output: + * piScaledValue - The scaled value + * Returns: + * kHIDSuccess + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDScaleUsageValueIn (HIDReportItem *ptReportItem, UInt32 iValue, SInt32 *piScaledValue) +{ + long int lData; + long int lDeltaL; + long int lDeltaP; + long int lL, lP; + long int lScaledData; + long int lLMin, lLMax; +/* + * Disallow Null Pointers +*/ + if ((ptReportItem == NULL) || (piScaledValue == NULL)) + return kHIDNullPointerErr; +/* + * Convert the data to Long Integer +*/ + lData = iValue; +/* + * range check the Logical Value +*/ + lLMax = ptReportItem->globals.logicalMaximum; + lLMin = ptReportItem->globals.logicalMinimum; + if ((lData < lLMin) || (lData > lLMax)) + { + if ((ptReportItem->dataModes & kHIDDataNullStateBit) == kHIDDataNullState) + return kHIDNullStateErr; + return kHIDValueOutOfRangeErr; + } +/* + * (PhysicalValue - PhysicalMinimum)/(PhysicalMaximum - PhysicalMinimum) + * = (LogicalValue - LogicalMinimum)/(LogicalMaximum - LogicalMinimum) + * + * Calculate the ranges + * Zero ranges are invalid! + * lDeltaL = (LogicalMaximum - LogicalMinimum) + * lDeltaP = (PhysicalMaximum - PhysicalMinimum) +*/ + lDeltaL = lLMax - lLMin; + lDeltaP = ptReportItem->globals.physicalMaximum - ptReportItem->globals.physicalMinimum; + if ((lDeltaL == 0) || (lDeltaP == 0)) + return kHIDBadLogPhysValuesErr; +/* + * (PhysicalValue - PhysicalMinimum)/lDeltaP + * = (LogicalValue - LogicalMinimum)/lDeltaL + * lL = (LogicalValue - LogicalMinimum) +*/ + lL = lData - ptReportItem->globals.logicalMinimum; +/* + * (PhysicalValue - PhysicalMinimum)/lDeltaP = lL/lDeltaL + * (PhysicalValue - PhysicalMinimum) = (lDeltaP * lL)/lDeltaL + * lP = (PhysicalValue - PhysicalMinimum) = (lDeltaP * lL)/lDeltaL +*/ + lP = (lL* lDeltaP)/lDeltaL; +/* + * lP = (PhysicalValue - PhysicalMinimum) + * PhysicalValue = lP + PhysicalMinimum; +*/ + lScaledData = lP + ptReportItem->globals.physicalMinimum; + *piScaledValue = (int) lScaledData; + return kHIDSuccess; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDScaleUsageValueOut + * + * Input: + * ptReportItem - The ReportItem in which the data will go + * iValue - The unscaled data + * piScaledValue - The scaled value + * Output: + * piScaledValue - The scaled value + * Returns: + * kHIDSuccess + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDScaleUsageValueOut (HIDReportItem *ptReportItem, UInt32 iValue, SInt32 *piScaledValue) +{ + long int lData; + long int lDeltaL; + long int lDeltaP; + long int lL, lP; + long int lPMax, lPMin; +/* + * Convert the data to Long Integer +*/ + lData = iValue; +/* + * range check the Logical Value +*/ + lPMax = ptReportItem->globals.physicalMaximum; + lPMin = ptReportItem->globals.physicalMinimum; + if ((lData < lPMin) || (lData > lPMax)) + { + if ((ptReportItem->dataModes & kHIDDataNullStateBit) == kHIDDataNullState) + return kHIDNullStateErr; + return kHIDValueOutOfRangeErr; + } +/* + * (PhysicalValue - PhysicalMinimum)/(PhysicalMaximum - PhysicalMinimum) + * = (LogicalValue - LogicalMinimum)/(LogicalMaximum - LogicalMinimum) + * + * Calculate the ranges + * Zero ranges are invalid! + * lDeltaL = (LogicalMaximum - LogicalMinimum) + * lDeltaP = (PhysicalMaximum - PhysicalMinimum) +*/ + lDeltaL = ptReportItem->globals.logicalMaximum - ptReportItem->globals.logicalMinimum; + lDeltaP = ptReportItem->globals.physicalMaximum - ptReportItem->globals.physicalMinimum; + if ((lDeltaL == 0) || (lDeltaP == 0)) + return kHIDBadLogPhysValuesErr; +/* + * (PhysicalValue - PhysicalMinimum)/lDeltaP + * = (LogicalValue - LogicalMinimum)/lDeltaL + * lP = (PhysicalValue - PhysicalMinimum) +*/ + lP = lData - ptReportItem->globals.physicalMinimum; +/* + * (LogicalValue - LogicalMinimum)/lDeltaL = lP/lDeltaP + * (LogicalValue - LogicalMinimum)/lDeltaL = (lDeltaL * lP)/lDeltaP + * lL = (LogicalValue - LogicalMinimum) = (lDeltaL * lP)/lDeltaP +*/ + lL = (lP* lDeltaL)/lDeltaP; +/* + * lL = (LogicalValue - LogicalMinimum) + * LogicalValue = lL + LogicalMinimum; +*/ + lData = lL + ptReportItem->globals.logicalMinimum; + *piScaledValue = (int) lData; + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetButtons.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetButtons.c new file mode 100644 index 000000000..b13e73685 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetButtons.c @@ -0,0 +1,244 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDSetButtons.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2000 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/24/00 KH Complex report descriptors could lead to reporting + kHIDUsageNotFoundErr's as kHIDIncompatibleReportErr's instead. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDSetButton - Set the state of a button for a Page + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - Usages for pressed button + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDSetButton (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int iR, iX; + SInt32 data; + int iStart; + int iReportItem; + UInt32 iUsageIndex; + Boolean bIncompatibleReport = false; + Boolean butNotReally = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (report == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((collection < 0) || (collection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[collection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if (HIDIsButton(ptReportItem, preparsedDataRef) + && HIDHasUsage(preparsedDataRef,ptReportItem,usagePage,usage,&iUsageIndex,NULL)) + { +/* + * This may be the proper data to get + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem,report,reportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { + butNotReally = true; +/* + * Save Arrays +*/ + if ((ptReportItem->dataModes & kHIDDataArrayBit) == kHIDDataArray) + { + for (iX=0; iXglobals.reportCount; iX++) + { + iStart = ptReportItem->startBit + (ptReportItem->globals.reportSize * iX); + iStatus = HIDGetData(report, reportLength, iStart, + ptReportItem->globals.reportSize, &data, true); + if (!iStatus) + iStatus = HIDPostProcessRIValue (ptReportItem, &data); + if (iStatus != kHIDSuccess) + return iStatus; + // if not already in the list, add it (is this code right??) + if (data == 0) + return HIDPutData(report, reportLength, iStart, + ptReportItem->globals.reportSize, + iUsageIndex + ptReportItem->globals.logicalMinimum); + } + return kHIDBufferTooSmallErr; + } +/* + * Save Bitmaps +*/ + else if (ptReportItem->globals.reportSize == 1) + { + iStart = ptReportItem->startBit + (ptReportItem->globals.reportSize * iUsageIndex); + // should we call HIDPreProcessRIValue here? + // we are passing '-1' as trhe value, is this right? Some hack to set the right bit to 1? + iStatus = HIDPutData(report, reportLength, iStart, ptReportItem->globals.reportSize, -1); + if (iStatus != kHIDSuccess) + return iStatus; + return kHIDSuccess; + } + } + } + } + // If any of the report items were not the right type, we have set the bIncompatibleReport flag. + // However, if any of the report items really were the correct type, we have done our job of checking + // and really didn't find a usage. Don't let the bIncompatibleReport flag wipe out our valid test. + if (bIncompatibleReport && !butNotReally) + return kHIDIncompatibleReportErr; + return kHIDUsageNotFoundErr; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDSetButtons - Set the state of the buttons for a Page + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * collection - Collection Criteria or zero + * piUsageList - Usages for pressed buttons + * piUsageListLength - Max entries in UsageList + * ptPreparsedData - Pre-Parsed Data + * report - An HID Report + * reportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus +HIDSetButtons (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage * usageList, + UInt32 * usageListSize, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + OSStatus iStatus; + int iUsages; + int usage; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (usageList == NULL) + || (usageListSize == NULL) + || (report == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * Save the usage List Length +*/ + iUsages = *usageListSize; +/* + * Write them out one at a time +*/ + for (usage=0; usage 1/18/01 KH Fix for complex descriptors only needed for buttons. + 3/24/00 KH Complex report descriptors could lead to reporting + kHIDUsageNotFoundErr's as kHIDIncompatibleReportErr's instead. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDSetScaledUsageValue - Set the value for a usage + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - usage Criteria or zero + * iValue - The usage Value + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDSetScaledUsageValue(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + SInt32 iUsageValue, + HIDPreparsedDataRef preparsedDataRef, + void *psReport, + ByteCount iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int iR; + SInt32 data; + int iStart; + int iReportItem; + UInt32 iUsageIndex; + Boolean bIncompatibleReport = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if ((ptReportItem->reportType == reportType) + && HIDIsVariable(ptReportItem, preparsedDataRef) + && HIDHasUsage(preparsedDataRef,ptReportItem,usagePage,usage,&iUsageIndex,NULL)) + { +/* + * This may be the proper place to write data + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem,psReport,iReportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { + iStatus = HIDScaleUsageValueOut(ptReportItem,iUsageValue,&data); + if (iStatus != kHIDSuccess) + return iStatus; + iStart = ptReportItem->startBit + (ptReportItem->globals.reportSize * iUsageIndex); + iStatus = HIDPreProcessRIValue (ptReportItem, &data); + iStatus = HIDPutData(psReport, iReportLength, iStart, ptReportItem->globals.reportSize, data); + if (iStatus != kHIDSuccess) + return iStatus; + return kHIDSuccess; + } + } + } + if (bIncompatibleReport) + return kHIDIncompatibleReportErr; + return kHIDUsageNotFoundErr; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValue.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValue.c new file mode 100644 index 000000000..06a9e9438 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValue.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDSetUsageValue.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2001 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 1/18/01 KH Fix for complex descriptors only needed for buttons. + 3/24/00 KH Complex report descriptors could lead to reporting + kHIDUsageNotFoundErr's as kHIDIncompatibleReportErr's instead. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDSetUsageValue - Set the value for a usage + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria or zero + * iCollection - Collection Criteria or zero + * usage - usage Criteria or zero + * iValue - The usage Value + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDSetUsageValue(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + SInt32 iUsageValue, + HIDPreparsedDataRef preparsedDataRef, + void *psReport, + ByteCount iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int iR; + int iStart; + int iReportItem; + UInt32 iUsageIndex; + Boolean bIncompatibleReport = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if (HIDIsVariable(ptReportItem, preparsedDataRef) + && HIDHasUsage(preparsedDataRef,ptReportItem,usagePage,usage,&iUsageIndex,NULL)) + { +/* + * This may be the proper place + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem, + psReport,iReportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { +/* + * Write out the data +*/ + iStart = ptReportItem->startBit + + (ptReportItem->globals.reportSize * iUsageIndex); + iStatus = HIDPreProcessRIValue (ptReportItem, &iUsageValue); + iStatus = HIDPutData(psReport, iReportLength, iStart, + ptReportItem->globals.reportSize, iUsageValue); + if (iStatus != kHIDSuccess) + return iStatus; + return kHIDSuccess; + } + } + } + if (bIncompatibleReport) + return kHIDIncompatibleReportErr; + return kHIDUsageNotFoundErr; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValueArray.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValueArray.c new file mode 100644 index 000000000..29c5ab2cc --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValueArray.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDSetUsageValueArray.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999-2001 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (KH) Keithen Hayenga + (BWS) Brent Schorsch + + Change History (most recent first): + + 1/18/01 KH Fix for complex descriptors only needed for buttons. + 3/24/00 KH Complex report descriptors could lead to reporting + kHIDUsageNotFoundErr's as kHIDIncompatibleReportErr's instead. + 11/1/99 BWS [2405720] We need a better check for 'bit padding' items, + rather than just is constant. We will check to make sure the + item is constant, and has no usage, or zero usage. This means we + need to pass an additional parameter to some internal functions + 4/7/99 BWS Add support for reversed report items + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDSetUsageValueArray - Set the values for a usage + * + * Input: + * reportType - HIDP_Input, HIDP_Output, HIDP_Feature + * usagePage - Page Criteria + * iCollection - Collection Criteria or zero + * usage - usage Criteria + * psBuffer - Pointer to usage Buffer + * iByteLength - Length of usage Buffer + * ptPreparsedData - Pre-Parsed Data + * psReport - An HID Report + * iReportLength - The length of the Report + * Output: + * piValue - Pointer to usage Value + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDSetUsageValueArray(HIDReportType reportType, + HIDUsage usagePage, + UInt32 iCollection, + HIDUsage usage, + UInt8 *psUsageBuffer, + UInt32 iByteLength, + HIDPreparsedDataRef preparsedDataRef, + void *psReport, + UInt32 iReportLength) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDCollection *ptCollection; + HIDReportItem *ptReportItem; + OSStatus iStatus; + int i; + int iR; + long iValue; + int iStart; + int iReportItem; + UInt32 iUsageIndex; + UInt32 iCount; + int byteCount; + Boolean bIncompatibleReport = false; +/* + * Disallow Null Pointers +*/ + if ((ptPreparsedData == NULL) + || (psUsageBuffer == NULL) + || (psReport == NULL)) + return kHIDNullPointerErr; + if (ptPreparsedData->hidTypeIfValid != kHIDOSType) + return kHIDInvalidPreparsedDataErr; +/* + * The Collection must be in range +*/ + if ((iCollection < 0) || (iCollection >= ptPreparsedData->collectionCount)) + return kHIDBadParameterErr; +/* + * Search only the scope of the Collection specified + * Go through the ReportItems + * Filter on ReportType and usagePage +*/ + ptCollection = &ptPreparsedData->collections[iCollection]; + for (iR=0; iRreportItemCount; iR++) + { + iReportItem = ptCollection->firstReportItem + iR; + ptReportItem = &ptPreparsedData->reportItems[iReportItem]; + if (HIDIsVariable(ptReportItem, preparsedDataRef) + && HIDHasUsage(preparsedDataRef,ptReportItem,usagePage,usage,&iUsageIndex,&iCount)) + { +/* + * This may be the proper place + * Let's check for the proper Report ID, Type, and Length +*/ + iStatus = HIDCheckReport(reportType,preparsedDataRef,ptReportItem, + psReport,iReportLength); +/* + * The Report ID or Type may not match. + * This may not be an error (yet) +*/ + if (iStatus == kHIDIncompatibleReportErr) + bIncompatibleReport = true; + else if (iStatus != kHIDSuccess) + return iStatus; + else + { +/* + * Disallow single count variables + * Count is set by HasUsage +*/ + if (iCount <= 1) + return kHIDNotValueArrayErr; +/* + * Write out the data +*/ + iStart = ptReportItem->startBit + (ptReportItem->globals.reportSize * iUsageIndex); + byteCount = (ptReportItem->globals.reportSize * iCount + 7)/8; + if (byteCount > iByteLength) + byteCount = iByteLength; + for (i=0; i 12/12/00 KH range count off by 1. + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDUsageAndPageFromIndex + * + * Input: + * ptPreparsedData - The Preparsed Data + * ptReportItem - The Report Item + * index - The usage Index + * ptUsageAndPage - The usage And Page + * Output: + * Returns: + * + *------------------------------------------------------------------------------ +*/ +void HIDUsageAndPageFromIndex (HIDPreparsedDataRef preparsedDataRef, + HIDReportItem *ptReportItem, UInt32 index, + HIDUsageAndPage *ptUsageAndPage) +{ + HIDPreparsedDataPtr ptPreparsedData = (HIDPreparsedDataPtr) preparsedDataRef; + HIDP_UsageItem *ptUsageItem = NULL; + int iUsageItem; + int iUsages; + int i; + +/* + * Disallow NULL Pointers +*/ + if ((ptUsageAndPage == NULL) || (ptReportItem == NULL) || (ptPreparsedData == NULL)) + { + ptUsageAndPage->usagePage = 0; + return; // kHIDNullPointerErr; + } + +/* + * Index through the usage Items for this ReportItem +*/ + iUsageItem = ptReportItem->firstUsageItem; + for (i=0; iusageItemCount; i++) + { +/* + * Each usage Item is either a usage or a usage range +*/ + ptUsageItem = &ptPreparsedData->usageItems[iUsageItem++]; + if (ptUsageItem->isRange) + { +/* + * For usage Ranges + * If the index is in the range + * then return the usage + * Otherwise adjust the index by the size of the range +*/ + iUsages = ptUsageItem->usageMaximum - ptUsageItem->usageMinimum; + if (iUsages < 0) + iUsages = -iUsages; + iUsages++; // Add off by one adjustment AFTER sign correction. + if (iUsages > index) + { + ptUsageAndPage->usagePage = ptUsageItem->usagePage; + ptUsageAndPage->usage = ptUsageItem->usageMinimum + index; + return; + } + index -= iUsages; + } + else + { +/* + * For Usages + * If the index is zero + * then return this usage + * Otherwise one less to index through +*/ + if (index-- == 0) + { + ptUsageAndPage->usagePage = ptUsageItem->usagePage; + ptUsageAndPage->usage = ptUsageItem->usage; + return; + } + } + } + if (ptUsageItem != NULL) + { + ptUsageAndPage->usagePage = ptUsageItem->usagePage; + if (ptUsageItem->isRange) + ptUsageAndPage->usage = ptUsageItem->usageMaximum; + else + ptUsageAndPage->usage = ptUsageItem->usage; + } +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageInRange.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageInRange.c new file mode 100644 index 000000000..2d975ecc0 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageInRange.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDUsageInRange.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * HIDUsageInRange + * + * Input: + * ptUsage - The usage/UsageRange Item + * usagePage - The usagePage of the Item - or zero + * usage - The usage of the Item + * Output: + * Returns: + * true - usagePage/usage is in usage/UsageRange + * false - usagePage/usage is not in usage/UsageRange + * + *------------------------------------------------------------------------------ +*/ +Boolean HIDUsageInRange (HIDP_UsageItem *ptUsage, HIDUsage usagePage, HIDUsage usage) +{ +/* + * Disallow Null Pointers +*/ + if (ptUsage == NULL) + return false; +/* + * Check for the proper Page, 0 means don't care +*/ + if ((usagePage != 0) && (ptUsage->usagePage != usagePage)) + return false; +/* + * usage = 0 means don't care +*/ + if (usage == 0) + return true; +/* + * The requested usage must match or be in the range +*/ + if (ptUsage->isRange) + { + if ((ptUsage->usageMinimum > usage) || (ptUsage->usageMaximum < usage)) + return false; + } + else + { + if (ptUsage->usage != usage) + return false; + } + return true; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageListDifference.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageListDifference.c new file mode 100644 index 000000000..87e3816a3 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageListDifference.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: HIDUsageListDifference.c + + Contains: xxx put contents here xxx + + Version: xxx put version here xxx + + Copyright: © 1999 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: xxx put dri here xxx + + Other Contact: xxx put other contact here xxx + + Technology: xxx put technology here xxx + + Writers: + + (BWS) Brent Schorsch + + Change History (most recent first): + + 3/5/99 BWS first checked in +*/ + +#include "HIDLib.h" + +/* + *------------------------------------------------------------------------------ + * + * In - Is a usage in a UsageList? + * + * Input: + * piUsageList - usage List + * iUsageListLength - Max entries in usage Lists + * usage - The usage + * Output: + * Returns: true or false + * + *------------------------------------------------------------------------------ +*/ +static Boolean IsUsageInUsageList(HIDUsage *piUsageList, UInt32 iUsageListLength, HIDUsage usage) +{ + unsigned int i; + for (i = 0; i < iUsageListLength; i++) + if (piUsageList[i] == usage) + return true; + return false; +} + +/* + *------------------------------------------------------------------------------ + * + * HIDUsageListDifference - Return adds and drops given present and past + * + * Input: + * piPreviouUL - Previous usage List + * piCurrentUL - Current usage List + * piBreakUL - Break usage List + * piMakeUL - Make usage List + * iUsageListLength - Max entries in usage Lists + * Output: + * piBreakUL - Break usage List + * piMakeUL - Make usage List + * Returns: + * + *------------------------------------------------------------------------------ +*/ +OSStatus HIDUsageListDifference(HIDUsage *piPreviousUL, HIDUsage *piCurrentUL, HIDUsage *piBreakUL, HIDUsage *piMakeUL, UInt32 iUsageListLength) +{ + int i; + HIDUsage usage; + int iBreakLength=0; + int iMakeLength=0; + for (i = 0; i < iUsageListLength; i++) + { +/* + * If in Current but not Previous then it's a Make +*/ + usage = piCurrentUL[i]; + if ((usage != 0) && (!IsUsageInUsageList(piPreviousUL,iUsageListLength,usage)) + && (!IsUsageInUsageList(piMakeUL,iMakeLength,usage))) + piMakeUL[iMakeLength++] = usage; +/* + * If in Previous but not Current then it's a Break +*/ + usage = piPreviousUL[i]; + if ((usage != 0) && (!IsUsageInUsageList(piCurrentUL,iUsageListLength,usage)) + && (!IsUsageInUsageList(piBreakUL,iBreakLength,usage))) + piBreakUL[iBreakLength++] = usage; + } +/* + * Clear the rest of the usage Lists +*/ + while (iMakeLength < iUsageListLength) + piMakeUL[iMakeLength++] = 0; + while (iBreakLength < iUsageListLength) + piBreakUL[iBreakLength++] = 0; + return kHIDSuccess; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/MacTypes.h b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/MacTypes.h new file mode 100644 index 000000000..915d7fcbb --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/MacTypes.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __HID_MACTYPES__ +#define __HID_MACTYPES__ + +#include +#include + +extern void *PoolAllocateResident(vm_size_t size, unsigned char clear); +extern OSStatus PoolDeallocate(void *ptr, vm_size_t size); + +#endif /* __HID_MACTYPES__ */ diff --git a/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/PoolAlloc.c b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/PoolAlloc.c new file mode 100644 index 000000000..8d1c616f4 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDDescriptorParser/PoolAlloc.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +void *PoolAllocateResident (vm_size_t size, unsigned char clear) +{ + void *mem = IOMalloc(size); + + if (clear) { + bzero(mem, size); + } + + return mem; +} + +OSStatus PoolDeallocate (void *ptr, vm_size_t size) +{ + IOFree(ptr, size); + return noErr; +} diff --git a/iokit/Families/IOHIDSystem/IOHIDIO.cpp b/iokit/Families/IOHIDSystem/IOHIDIO.cpp new file mode 100644 index 000000000..7e59174c7 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDIO.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * EventIO.m - Event System MiG interface for driver control and status. + * + * HISTORY + * 2-April-92 Mike Paquette at NeXT + * Created. + */ + +#include + +#include +#include +#include + +#include /* Per-machine configuration info */ + +/* + * Additional kernel API to drivers using the Event Driver + */ + int +EventCoalesceDisplayCmd( int cmd, int oldcmd ) +{ + static const char coalesce[4][4] = { + /* nop */ {EVNOP, EVHIDE, EVSHOW, EVMOVE}, + /* hide */ {EVHIDE, EVHIDE, EVNOP, EVSHOW}, + /* show */ {EVSHOW, EVNOP, EVSHOW, EVSHOW}, + /* move */ {EVMOVE, EVHIDE, EVSHOW, EVMOVE} + }; + if ( cmd < EVLEVEL ) // coalesce EVNOP thru EVMOVE only + cmd = coalesce[oldcmd & 3][cmd & 3]; + return cmd; +} + diff --git a/iokit/Families/IOHIDSystem/IOHIDSystem.cpp b/iokit/Families/IOHIDSystem/IOHIDSystem.cpp new file mode 100644 index 000000000..12f4599da --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDSystem.cpp @@ -0,0 +1,2643 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * EventDriver.m - Event System module, ObjC implementation. + * + * The EventDriver is a pseudo-device driver. + * + * HISTORY + * 31-Mar-92 Mike Paquette at NeXT + * Created. + * 04-Aug-93 Erik Kay at NeXT + * minor API cleanup + * 12-Dec-00 bubba at Apple. + * Handle eject key cases on Pro Keyboard. +*/ + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* Per-machine configuration info */ +#include "IOHIDUserClient.h" + +#include + +#ifdef __cplusplus + extern "C" + { + #include + } +#endif + +bool displayWranglerUp( OSObject *, void *, IOService * ); + +static IOHIDSystem * evInstance = 0; +MasterAudioFunctions *masterAudioFunctions = 0; + +#define xpr_ev_cursor(x, a, b, c, d, e) +#define PtInRect(ptp,rp) \ + ((ptp)->x >= (rp)->minx && (ptp)->x < (rp)->maxx && \ + (ptp)->y >= (rp)->miny && (ptp)->y < (rp)->maxy) + + + +static inline unsigned AbsoluteTimeToTick( AbsoluteTime * ts ) +{ + UInt64 nano; + absolutetime_to_nanoseconds(*ts, &nano); + return( nano >> 24 ); +} + +static inline void TickToAbsoluteTime( unsigned tick, AbsoluteTime * ts ) +{ + UInt64 nano = ((UInt64) tick) << 24; + nanoseconds_to_absolutetime(nano, ts); +} + +#define EV_NS_TO_TICK(ns) AbsoluteTimeToTick(ns) +#define EV_TICK_TO_NS(tick,ns) TickToAbsoluteTime(tick,ns) + + +#define super IOService +OSDefineMetaClassAndStructors(IOHIDSystem, IOService); + +/* Return the current instance of the EventDriver, or 0 if none. */ +IOHIDSystem * IOHIDSystem::instance() +{ + return evInstance; +} + +bool IOHIDSystem::init(OSDictionary * properties) +{ + if (!super::init(properties)) return false; + + /* + * Initialize minimal state. + */ + + driverLock = NULL; + kickConsumerLock = NULL; + evScreen = NULL; + timerES = 0; + cmdQ = 0; + workLoop = 0; + + return true; +} + +IOHIDSystem * IOHIDSystem::probe(IOService * provider, + SInt32 * score) +{ + if (!super::probe(provider,score)) return 0; + + return this; +} + +/* + * Perform reusable initialization actions here. + */ +IOWorkLoop * IOHIDSystem::getWorkLoop() const +{ + return workLoop; +} + +bool IOHIDSystem::start(IOService * provider) +{ + bool iWasStarted = false; + + do { + if (!super::start(provider)) break; + + evInstance = this; + + driverLock = IOLockAlloc(); // Event driver data protection lock + kickConsumerLock = IOLockAlloc(); + + /* A few details to be set up... */ + pointerLoc.x = INIT_CURSOR_X; + pointerLoc.y = INIT_CURSOR_Y; + + pointerDelta.x = 0; + pointerDelta.y = 0; + + evScreenSize = sizeof(EvScreen) * 6; // FIX + evScreen = (void *) IOMalloc(evScreenSize); + + if (!driverLock || + !kickConsumerLock || + !evScreenSize) break; + + IOLockInit(driverLock); + IOLockInit(kickConsumerLock); + bzero(evScreen, evScreenSize); + + /* + * Start up the work loop + */ + workLoop = IOWorkLoop::workLoop(); + cmdQ = IOCommandQueue::commandQueue + (this, (IOCommandQueueAction) &_doPerformInIOThread ); + timerES = IOTimerEventSource::timerEventSource + (this, (IOTimerEventSource::Action) &_periodicEvents ); + + if (!workLoop || !cmdQ || !timerES) + break; + + if ((workLoop->addEventSource(cmdQ) != kIOReturnSuccess) + || (workLoop->addEventSource(timerES) != kIOReturnSuccess)) + break; + + publishNotify = addNotification( + gIOPublishNotification, serviceMatching("IOHIDevice"), + (IOServiceNotificationHandler) &publishNotificationHandler, + this, 0 ); + + if (!publishNotify) break; + + /* + * IOHIDSystem serves both as a service and a nub (we lead a double + * life). Register ourselves as a nub to kick off matching. + */ + + registerService(); + + addNotification( gIOPublishNotification,serviceMatching("IODisplayWrangler"), // look for the display wrangler + (IOServiceNotificationHandler)displayWranglerUp, this, 0 ); + + iWasStarted = true; + } while(false); + + if (!iWasStarted) evInstance = 0; + + return iWasStarted; +} + +// ********************************************************************************** +// displayWranglerUp +// +// The Display Wrangler has appeared. We will be calling its +// activityTickle method when there is user activity. +// ********************************************************************************** +bool displayWranglerUp( OSObject * us, void * ref, IOService * yourDevice ) +{ + if ( yourDevice != NULL ) { + ((IOHIDSystem *)us)->displayManager = yourDevice; + ((IOHIDSystem *)us)->displayState = yourDevice->registerInterestedDriver((IOService *)us); + } + return true; +} + + +//********************************************************************************* +// powerStateDidChangeTo +// +// The display wrangler has changed state, so the displays have changed +// state, too. We save the new state. +//********************************************************************************* + +IOReturn IOHIDSystem::powerStateDidChangeTo ( IOPMPowerFlags theFlags, unsigned long, IOService*) +{ + displayState = theFlags; + return IOPMNoErr; +} + + + +bool IOHIDSystem::publishNotificationHandler( + IOHIDSystem * self, + void * /* ref */, + IOService * newService ) +{ + self->attach( newService ); + +// IOTakeLock( self->driverLock); + if( self->eventsOpen + && OSDynamicCast(IOHIDevice, newService)) { + self->registerEventSource((IOHIDevice *) newService); + } +// IOUnlock( self->driverLock); + + return true; +} + + +/* + * Free locally allocated resources, and then ourselves. + */ +void IOHIDSystem::free() +{ + /* Initiates a normal close if open & inited */ + if( driverLock) + evClose(); + + if (evScreen) IOFree( (void *)evScreen, evScreenSize ); + evScreen = (void *)0; + evScreenSize = 0; + + if (timerES) timerES->release(); + if (cmdQ) cmdQ->release(); + if (workLoop) workLoop->release(); + if (publishNotify) publishNotify->release(); + + /* Release locally allocated resources */ + if (kickConsumerLock) IOLockFree( kickConsumerLock); + if (driverLock) IOLockFree( driverLock); + + super::free(); +} + + + +/* + * Open the driver for business. This call must be made before + * any other calls to the Event driver. We can only be opened by + * one user at a time. + */ +IOReturn IOHIDSystem::evOpen(void) +{ + IOReturn r = kIOReturnSuccess; + + if ( evOpenCalled == true ) + { + r = kIOReturnBusy; + goto done; + } + evOpenCalled = true; + + if (!evInitialized) + { + evInitialized = true; + curBright = EV_SCREEN_MAX_BRIGHTNESS; // FIXME: Set from NVRAM? + curVolume = EV_AUDIO_MAX_VOLUME / 2; // FIXME: Set from NVRAM? + // Put code here that is to run on the first open ONLY. + } + +done: + return r; +} + +IOReturn IOHIDSystem::evClose(void) +{ + IOTakeLock( driverLock); + if ( evOpenCalled == false ) + { + IOUnlock( driverLock); + return kIOReturnBadArgument; + } + // Early close actions here + forceAutoDimState(false); + if( cursorEnabled) + hideCursor(); + cursorStarted = false; + cursorEnabled = false; + IOUnlock( driverLock); + + // Release the input devices. + detachEventSources(); + + // Tear down the shared memory area if set up +// if ( eventsOpen == true ) +// unmapEventShmem(eventPort); + + IOTakeLock( driverLock); + // Clear screens registry and related data + if ( evScreen != (void *)0 ) + { + screens = 0; + lastShmemPtr = (void *)0; + } + // Remove port notification for the eventPort and clear the port out + setEventPort(MACH_PORT_NULL); +// ipc_port_release_send(event_port); + + // Clear local state to shutdown + evOpenCalled = false; + eventsOpen = false; + + IOUnlock( driverLock); + + return kIOReturnSuccess; +} + +// +// Dispatch state to screens registered with the Event Driver +// Pending state changes for a device may be coalesced. +// +// +// On entry, the driverLock should be set. +// +void IOHIDSystem::evDispatch( + /* command */ EvCmd evcmd) +{ + Point p; + + if( !eventsOpen) + return; + + for( int i = 0; i < screens; i++ ) { + + EvScreen *esp = &((EvScreen*)evScreen)[i]; + + if ( esp->instance ) + { + p.x = evg->cursorLoc.x; // Copy from shmem. + p.y = evg->cursorLoc.y; + + bool onscreen = (0 != (cursorScreens & (1 << i))); + + switch ( evcmd ) + { + case EVMOVE: + if (onscreen) + esp->instance->moveCursor(&p, evg->frame); + break; + + case EVSHOW: + if (onscreen) + esp->instance->showCursor(&p, evg->frame); + break; + + case EVHIDE: + if (onscreen) + esp->instance->hideCursor(); + break; + + case EVLEVEL: + case EVNOP: + /* lets keep that compiler happy */ + break; + } + } + } +} + +// +// Dispatch mechanism for special key press. If a port has been registered, +// a message is built to be sent out to that port notifying that the key has +// changed state. A level in the range 0-64 is provided for convenience. +// +void IOHIDSystem::evSpecialKeyMsg(unsigned key, + /* direction */ unsigned dir, + /* flags */ unsigned f, + /* level */ unsigned l) +{ + mach_port_t dst_port; + struct evioSpecialKeyMsg *msg; + + static const struct evioSpecialKeyMsg init_msg = + { { MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, // mach3xxx, is the right? + MACH_MSG_TYPE_MAKE_SEND), // mach_msg_bits_t msgh_bits; + sizeof (struct evioSpecialKeyMsg), // mach_msg_size_t msgh_size; + MACH_PORT_NULL, // mach_port_t msgh_remote_port; + MACH_PORT_NULL, // mach_port_t msgh_local_port; + 0, // mach_msg_size_t msgh_reserved; + EV_SPECIAL_KEY_MSG_ID // mach_msg_id_t msgh_id; + }, + 0, /* key */ + 0, /* direction */ + 0, /* flags */ + 0 /* level */ + }; + + if ( (dst_port = specialKeyPort(key)) == MACH_PORT_NULL ) + return; + msg = (struct evioSpecialKeyMsg *) IOMalloc( + sizeof (struct evioSpecialKeyMsg) ); + if ( msg == NULL ) + return; + + // Initialize the message. + bcopy( &init_msg, msg, sizeof (struct evioSpecialKeyMsg) ); + msg->Head.msgh_remote_port = dst_port; + msg->key = key; + msg->direction = dir; + msg->flags = f; + msg->level = l; + + // Send the message out from the I/O thread. + sendWorkLoopCommand(this,(IOHIDAction)_performSpecialKeyMsg,(void*)msg); +} + +// +// Reset instance variables to their default state for mice/pointers +// +void IOHIDSystem::_resetMouseParameters() +{ + + IOTakeLock( driverLock); + if ( eventsOpen == false ) + { + IOUnlock( driverLock); + return; + } + nanoseconds_to_absolutetime( EV_DCLICKTIME, &clickTimeThresh); + clickSpaceThresh.x = clickSpaceThresh.y = EV_DCLICKSPACE; + AbsoluteTime_to_scalar( &clickTime) = 0; + clickLoc.x = clickLoc.y = -EV_DCLICKSPACE; + clickState = 1; + nanoseconds_to_absolutetime( DAUTODIMPERIOD, &autoDimPeriod); + clock_get_uptime( &autoDimTime); + ADD_ABSOLUTETIME( &autoDimTime, &autoDimPeriod); + dimmedBrightness = DDIMBRIGHTNESS; + + IOUnlock( driverLock); +} + +void IOHIDSystem::_resetKeyboardParameters() +{ +} + +/* + * Methods exported by the EventDriver. + * + * The screenRegister protocol is used by frame buffer drivers to register + * themselves with the Event Driver. These methods are called in response + * to a registerSelf or unregisterSelf message received from the Event + * Driver. + */ + +int IOHIDSystem::registerScreen(IOGraphicsDevice * instance, + /* bounds */ Bounds * bp) +{ + EvScreen *esp; + + if( (false == eventsOpen) || (0 == bp) ) + { + return -1; + } + + if ( lastShmemPtr == (void *)0 ) + lastShmemPtr = evs; + + /* shmemSize and bounds already set */ + esp = &((EvScreen*)evScreen)[screens]; + esp->instance = instance; + esp->bounds = bp; + // Update our idea of workSpace bounds + if ( bp->minx < workSpace.minx ) + workSpace.minx = bp->minx; + if ( bp->miny < workSpace.miny ) + workSpace.miny = bp->miny; + if ( bp->maxx < workSpace.maxx ) + workSpace.maxx = bp->maxx; + if ( esp->bounds->maxy < workSpace.maxy ) + workSpace.maxy = bp->maxy; + + return(SCREENTOKEN + screens++); +} + + +void IOHIDSystem::unregisterScreen(int index) +{ + index -= SCREENTOKEN; + + IOTakeLock( driverLock); + if ( eventsOpen == false || index < 0 || index >= screens ) + { + IOUnlock( driverLock); + return; + } + hideCursor(); + + // clear the state for the screen + ((EvScreen*)evScreen)[index].instance = 0; + // Put the cursor someplace reasonable if it was on the destroyed screen + cursorScreens &= ~(1 << index); + // This will jump the cursor back on screen + setCursorPosition((Point *)&evg->cursorLoc, true); + + showCursor(); + + IOUnlock( driverLock); +} + +/* Member of EventClient protocol + * + * Absolute position input devices and some specialized output devices + * may need to know the bounding rectangle for all attached displays. + * The following method returns a Bounds* for the workspace. Please note + * that the bounds are kept as signed values, and that on a multi-display + * system the minx and miny values may very well be negative. + */ +Bounds * IOHIDSystem::workspaceBounds() +{ + return &workSpace; +} + +IOReturn IOHIDSystem::createShmem(void* p1, void*, void*, void*, void*, void*) +{ // IOMethod + int shmemVersion = (int)p1; + IOByteCount size; + + if( shmemVersion != kIOHIDCurrentShmemVersion) + return( kIOReturnUnsupported); + + IOTakeLock( driverLock); + + if( 0 == globalMemory) { + + size = sizeof(EvOffsets) + sizeof(EvGlobals); + globalMemory = IOBufferMemoryDescriptor::withOptions( + kIODirectionNone | kIOMemoryKernelUserShared, size ); + + if( !globalMemory) { + IOUnlock( driverLock); + return( kIOReturnNoMemory ); + } + shmem_addr = (vm_offset_t) globalMemory->getBytesNoCopy(); + shmem_size = size; + } + + initShmem(); + IOUnlock( driverLock); + + return kIOReturnSuccess; +} + + +// Initialize the shared memory area. +// +// On entry, the driverLock should be set. +void IOHIDSystem::initShmem() +{ + int i; + EvOffsets *eop; + + /* top of sharedMem is EvOffsets structure */ + eop = (EvOffsets *) shmem_addr; + + bzero( (void*)shmem_addr, shmem_size); + + /* fill in EvOffsets structure */ + eop->evGlobalsOffset = sizeof(EvOffsets); + eop->evShmemOffset = eop->evGlobalsOffset + sizeof(EvGlobals); + + /* find pointers to start of globals and private shmem region */ + evg = (EvGlobals *)((char *)shmem_addr + eop->evGlobalsOffset); + evs = (void *)((char *)shmem_addr + eop->evShmemOffset); + + evg->version = kIOHIDCurrentShmemVersion; + evg->structSize = sizeof( EvGlobals); + + /* Set default wait cursor parameters */ + evg->waitCursorEnabled = TRUE; + evg->globalWaitCursorEnabled = TRUE; + evg->waitThreshold = (12 * EV_TICKS_PER_SEC) / 10; + clock_interval_to_absolutetime_interval(DefaultWCFrameRate, kNanosecondScale, + &waitFrameRate); + clock_interval_to_absolutetime_interval(DefaultWCSustain, kNanosecondScale, + &waitSustain); + AbsoluteTime_to_scalar(&waitSusTime) = 0; + AbsoluteTime_to_scalar(&waitFrameTime) = 0; + + EV_TICK_TO_NS(10,&periodicEventDelta); + + /* Set up low-level queues */ + lleqSize = LLEQSIZE; + for (i=lleqSize; --i != -1; ) { + evg->lleq[i].event.type = 0; + AbsoluteTime_to_scalar(&evg->lleq[i].event.time) = 0; + evg->lleq[i].event.flags = 0; + ev_init_lock(&evg->lleq[i].sema); + evg->lleq[i].next = i+1; + } + evg->LLELast = 0; + evg->lleq[lleqSize-1].next = 0; + evg->LLEHead = + evg->lleq[evg->LLELast].next; + evg->LLETail = + evg->lleq[evg->LLELast].next; + evg->buttons = 0; + evg->eNum = INITEVENTNUM; + evg->eventFlags = 0; + + AbsoluteTime ts; + unsigned tick; + clock_get_uptime( &ts); + tick = EV_NS_TO_TICK(&ts); + if ( tick == 0 ) + tick = 1; // No zero values allowed! + evg->VertRetraceClock = tick; + + evg->cursorLoc.x = pointerLoc.x; + evg->cursorLoc.y = pointerLoc.y; + evg->dontCoalesce = 0; + evg->dontWantCoalesce = 0; + evg->wantPressure = 0; + evg->wantPrecision = 0; + evg->mouseRectValid = 0; + evg->movedMask = 0; + ev_init_lock( &evg->cursorSema ); + ev_init_lock( &evg->waitCursorSema ); + // Set eventsOpen last to avoid race conditions. + eventsOpen = true; +} + +// +// Set the event port. The event port is both an ownership token +// and a live port we hold send rights on. The port is owned by our client, +// the WindowServer. We arrange to be notified on a port death so that +// we can tear down any active resources set up during this session. +// An argument of PORT_NULL will cause us to forget any port death +// notification that's set up. +// +// The driverLock should be held on entry. +// +void IOHIDSystem::setEventPort(mach_port_t port) +{ + static struct _eventMsg init_msg = { { + // mach_msg_bits_t msgh_bits; + MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,0), + // mach_msg_size_t msgh_size; + sizeof (struct _eventMsg), + // mach_port_t msgh_remote_port; + MACH_PORT_NULL, + // mach_port_t msgh_local_port; + MACH_PORT_NULL, + // mach_msg_size_t msgh_reserved; + 0, + // mach_msg_id_t msgh_id; + 0 + } }; + + if ( eventMsg == NULL ) + eventMsg = IOMalloc( sizeof (struct _eventMsg) ); + eventPort = port; + // Initialize the events available message. + *((struct _eventMsg *)eventMsg) = init_msg; + + ((struct _eventMsg *)eventMsg)->h.msgh_remote_port = port; +} + +// +// Set the port to be used for a special key notification. This could be more +// robust about letting ports be set... +// +IOReturn IOHIDSystem::setSpecialKeyPort( + /* keyFlavor */ int special_key, + /* keyPort */ mach_port_t key_port) +{ + if ( special_key >= 0 && special_key < NX_NUM_SCANNED_SPECIALKEYS ) + _specialKeyPort[special_key] = key_port; + return kIOReturnSuccess; +} + +mach_port_t IOHIDSystem::specialKeyPort(int special_key) +{ + if ( special_key >= 0 && special_key < NX_NUM_SCANNED_SPECIALKEYS ) + return _specialKeyPort[special_key]; + return MACH_PORT_NULL; +} + +// +// Helper functions for postEvent +// +static inline int myAbs(int a) { return(a > 0 ? a : -a); } + +short IOHIDSystem::getUniqueEventNum() +{ + while (++evg->eNum == NULLEVENTNUM) + ; /* sic */ + return(evg->eNum); +} + +// postEvent +// +// This routine actually places events in the event queue which is in +// the EvGlobals structure. It is called from all parts of the ev +// driver. +// +// On entry, the driverLock should be set. +// + +void IOHIDSystem::postEvent(int what, + /* at */ Point * location, + /* atTime */ AbsoluteTime ts, + /* withData */ NXEventData * myData) +{ + NXEQElement * theHead = (NXEQElement *) &evg->lleq[evg->LLEHead]; + NXEQElement * theLast = (NXEQElement *) &evg->lleq[evg->LLELast]; + NXEQElement * theTail = (NXEQElement *) &evg->lleq[evg->LLETail]; + int wereEvents; + unsigned theClock = EV_NS_TO_TICK(&ts); + + /* Some events affect screen dimming */ + if (EventCodeMask(what) & NX_UNDIMMASK) { + autoDimTime = ts; + ADD_ABSOLUTETIME( &autoDimTime, &autoDimPeriod); + if (autoDimmed) + undoAutoDim(); + } + // Update the PS VertRetraceClock off of the timestamp if it looks sane + if ( theClock > (unsigned)evg->VertRetraceClock + && theClock < (unsigned)(evg->VertRetraceClock + (20 * EV_TICK_TIME)) ) + evg->VertRetraceClock = theClock; + + wereEvents = EventsInQueue(); + + xpr_ev_post("postEvent: what %d, X %d Y %d Q %d, needKick %d\n", + what,location->x,location->y, + EventsInQueue(), needToKickEventConsumer); + + if ((!evg->dontCoalesce) /* Coalescing enabled */ + && (theHead != theTail) + && (theLast->event.type == what) + && (EventCodeMask(what) & COALESCEEVENTMASK) + && ev_try_lock(&theLast->sema)) { + /* coalesce events */ + theLast->event.location.x = location->x; + theLast->event.location.y = location->y; + absolutetime_to_nanoseconds(ts, &theLast->event.time); + if (myData != NULL) + theLast->event.data = *myData; + ev_unlock(&theLast->sema); + } else if (theTail->next != evg->LLEHead) { + /* store event in tail */ + theTail->event.type = what; + theTail->event.location.x = location->x; + theTail->event.location.y = location->y; + theTail->event.flags = evg->eventFlags; + absolutetime_to_nanoseconds(ts, &theLast->event.time); + theTail->event.window = 0; + if (myData != NULL) + theTail->event.data = *myData; + switch(what) { + case NX_LMOUSEDOWN: + theTail->event.data.mouse.eventNum = + leftENum = getUniqueEventNum(); + break; + case NX_RMOUSEDOWN: + theTail->event.data.mouse.eventNum = + rightENum = getUniqueEventNum(); + break; + case NX_LMOUSEUP: + theTail->event.data.mouse.eventNum = leftENum; + leftENum = NULLEVENTNUM; + break; + case NX_RMOUSEUP: + theTail->event.data.mouse.eventNum = rightENum; + rightENum = NULLEVENTNUM; + break; + } + if (EventCodeMask(what) & PRESSUREEVENTMASK) { + theTail->event.data.mouse.pressure = lastPressure; + } + if (EventCodeMask(what) & MOUSEEVENTMASK) { /* Click state */ + AbsoluteTime delta = ts; + SUB_ABSOLUTETIME( &delta, &clickTime); + if ((CMP_ABSOLUTETIME(&delta, &clickTimeThresh) <= 0) + && (myAbs(location->x - clickLoc.x) <= clickSpaceThresh.x) + && (myAbs(location->y - clickLoc.y) <= clickSpaceThresh.y)) { + if ((what == NX_LMOUSEDOWN)||(what == NX_RMOUSEDOWN)) { + clickTime = ts; + theTail->event.data.mouse.click = ++clickState; + } else { + theTail->event.data.mouse.click = clickState; + } + } else if ((what == NX_LMOUSEDOWN)||(what == NX_RMOUSEDOWN)) { + clickLoc = *location; + clickTime = ts; + clickState = 1; + theTail->event.data.mouse.click = clickState; + } else + theTail->event.data.mouse.click = 0; + } +#if PMON + pmon_log_event(PMON_SOURCE_EV, + KP_EV_POST_EVENT, + what, + evg->eventFlags, + theClock); +#endif + evg->LLETail = theTail->next; + evg->LLELast = theLast->next; + if ( ! wereEvents ) // Events available, so wake event consumer + kickEventConsumer(); + } + else + { + /* + * if queue is full, ignore event, too hard to take care of all cases + */ + IOLog("%s: postEvent LLEventQueue overflow.\n", getName()); + kickEventConsumer(); +#if PMON + pmon_log_event( PMON_SOURCE_EV, + KP_EV_QUEUE_FULL, + what, + evg->eventFlags, + theClock); +#endif + } +} + +/* + * - kickEventConsumer + * + * Try to send a message out to let the event consumer know that + * there are now events available for consumption. + */ + +void IOHIDSystem::kickEventConsumer() +{ + IOReturn err; + + IOTakeLock( kickConsumerLock); + xpr_ev_post("kickEventConsumer (need == %d)\n", + needToKickEventConsumer,2,3,4,5); + if ( needToKickEventConsumer == true ) + { + IOUnlock( kickConsumerLock); + return; // Request is already pending + } + needToKickEventConsumer = true; // Posting a request now + IOUnlock( kickConsumerLock); + + err = sendWorkLoopCommand(this, (IOHIDAction)_performKickEventConsumer, + NULL); + + if( err) + IOLog("%s: cmdQ fail %d\n", getName(), err); +} + +/* + * Event sources may need to use an I/O thread from time to time. + * Rather than have each instance running it's own thread, we provide + * a callback mechanism to let all the instances share a common Event I/O + * thread running in the IOTask space, and managed by the Event Driver. + */ + +IOReturn IOHIDSystem::sendWorkLoopCommand(OSObject * target, + IOHIDAction action, + void * data) +{ + kern_return_t err; + + err = cmdQ->enqueueCommand( /* sleep */ true, + /* field0 */ target, + /* field1 */ (void *) action, + /* field2 */ data ); + + return (err == KERN_SUCCESS) ? kIOReturnSuccess : kIOReturnNoMemory; +} + +/* + * The following methods are executed from the I/O thread only. + */ + +/* + * This routine is run within the I/O thread, on demand from the + * sendWorkLoopCommand method above. We attempt to dispatch a message + * to the specified selector and instance. + */ +void IOHIDSystem::_doPerformInIOThread(void* self, + void* target, /* IOCommandQueueAction */ + void* action, + void* data, + void* /* unused */) +{ + (*((IOHIDAction)action))((OSObject *)target, data); +} + +/* + * This is run in the I/O thread, to perform the actual message send operation. + */ + +void IOHIDSystem::_performSpecialKeyMsg(IOHIDSystem * self, + struct evioSpecialKeyMsg *msg) + /* IOHIDAction */ +{ + kern_return_t r; + + xpr_ev_post("_performSpecialKeyMsg 0x%x\n", msg,2,3,4,5); + + + /* FIXME: Don't block */ + r = mach_msg_send_from_kernel( &msg->Head, msg->Head.msgh_size); + + xpr_ev_post("_performSpecialKeyMsg: msg_send() == %d\n",r,2,3,4,5); + if ( r != MACH_MSG_SUCCESS ) + { + IOLog("%s: _performSpecialKeyMsg msg_send returned %d\n", + self->getName(), r); + } + if ( r == MACH_SEND_INVALID_DEST ) /* Invalidate the port */ + { + self->setSpecialKeyPort( + /* keyFlavor */ msg->key, + /* keyPort */ MACH_PORT_NULL); + } + IOFree( (void *)msg, sizeof (struct evioSpecialKeyMsg) ); +} + +/* + * This is run in the I/O thread, to perform the actual message send operation. + * Note that we perform a non-blocking send. The Event port in the event + * consumer has a queue depth of 1 message. Once the consumer picks up that + * message, it runs until the event queue is exhausted before trying to read + * another message. If a message is pending,there is no need to enqueue a + * second one. This also keeps us from blocking the I/O thread in a msg_send + * which could result in a deadlock if the consumer were to make a call into + * the event driver. + */ +void IOHIDSystem::_performKickEventConsumer(IOHIDSystem * self, void *) /* IOHIDAction */ +{ + kern_return_t r; + mach_msg_header_t *msgh + + xpr_ev_post("_performKickEventConsumer\n", 1,2,3,4,5); + IOTakeLock( self->kickConsumerLock); + self->needToKickEventConsumer = false; // Request received and processed + IOUnlock( self->kickConsumerLock); + + msgh = (mach_msg_header_t *)self->eventMsg; + if( msgh) { + + r = mach_msg_send_from_kernel( msgh, msgh->msgh_size); + switch ( r ) + { + case MACH_SEND_TIMED_OUT:/* Already has a message posted */ + case MACH_MSG_SUCCESS: /* Message is posted */ + break; + default: /* Log the error */ + IOLog("%s: _performKickEventConsumer msg_send returned %d\n", + self->getName(), r); + break; + } + } +} + +// +// Schedule the next periodic event to be run, based on the current state of +// the event system. We have to consider things here such as when the last +// periodic event pass ran, if there is currently any mouse delta accumulated, +// and how long it has been since the last event was consumed by an app (for +// driving the wait cursor). +// +// This code should only be run from the periodicEvents method or +// _setCursorPosition. +// +void IOHIDSystem::scheduleNextPeriodicEvent() +{ + if (CMP_ABSOLUTETIME( &waitFrameTime, &thisPeriodicRun) > 0) + { + AbsoluteTime time_for_next_run; + + clock_get_uptime(&time_for_next_run); + ADD_ABSOLUTETIME( &time_for_next_run, &periodicEventDelta); + + if (CMP_ABSOLUTETIME( &waitFrameTime, &time_for_next_run) < 0) { + timerES->wakeAtTime(waitFrameTime); + return; + } + } + + timerES->setTimeout(periodicEventDelta); +} + +// Periodic events are driven from this method. +// After taking care of all pending work, the method +// calls scheduleNextPeriodicEvent to compute and set the +// next callout. +// + +void IOHIDSystem::_periodicEvents(IOHIDSystem * self, + IOTimerEventSource *timer) +{ + self->periodicEvents(timer); +} + +void IOHIDSystem::periodicEvents(IOTimerEventSource * /* timer */) + /* IOTimerEventSource::Action, IOHIDAction */ +{ + unsigned int tick; + + // If eventsOpen is false, then the driver shmem is + // no longer valid, and it is in the process of shutting down. + // We should give up without rescheduling. + IOTakeLock( driverLock); + if ( eventsOpen == false ) + { + IOUnlock( driverLock); + return; + } + + // Increment event time stamp last + clock_get_uptime(&thisPeriodicRun); + + // Temporary hack til we wean CGS off of VertRetraceClock + tick = EV_NS_TO_TICK(&thisPeriodicRun); + if ( tick == 0 ) + tick = 1; + evg->VertRetraceClock = tick; + + // Update cursor position if needed + if ( needSetCursorPosition == true ) + _setCursorPosition(&pointerLoc, false); + + // WAITCURSOR ACTION + if ( ev_try_lock(&evg->waitCursorSema) ) + { + if ( ev_try_lock(&evg->cursorSema) ) + { + // See if the current context has timed out + if ( (evg->AALastEventSent != evg->AALastEventConsumed) + && ((evg->VertRetraceClock - evg->AALastEventSent > + evg->waitThreshold))) + evg->ctxtTimedOut = TRUE; + // If wait cursor enabled and context timed out, do waitcursor + if (evg->waitCursorEnabled && evg->globalWaitCursorEnabled && + evg->ctxtTimedOut) + { + /* WAIT CURSOR SHOULD BE ON */ + if (!evg->waitCursorUp) + showWaitCursor(); + } else + { + /* WAIT CURSOR SHOULD BE OFF */ + if (evg->waitCursorUp && + CMP_ABSOLUTETIME(&waitSusTime, &thisPeriodicRun) <= 0) + hideWaitCursor(); + } + /* Animate cursor */ + if (evg->waitCursorUp && + CMP_ABSOLUTETIME(&waitFrameTime, &thisPeriodicRun) <= 0) + animateWaitCursor(); + ev_unlock(&evg->cursorSema); + if ((CMP_ABSOLUTETIME(&thisPeriodicRun, &autoDimTime) > 0) + && (!autoDimmed)) + doAutoDim(); + } + ev_unlock(&evg->waitCursorSema); + } + + scheduleNextPeriodicEvent(); + IOUnlock( driverLock); + + return; +} + +// +// Start the cursor system running. +// +// At this point, the WindowServer is up, running, and ready to process events. +// We will attach the keyboard and mouse, if none are available yet. +// + +bool IOHIDSystem::resetCursor() +{ + volatile Point * p; + UInt32 newScreens = 0; + SInt32 pinScreen = -1L; + + p = &evg->cursorLoc; + + /* Get mask of screens on which the cursor is present */ + EvScreen *screen = (EvScreen *)evScreen; + for (int i = 0; i < screens; i++ ) { + if ((screen[i].instance) && PtInRect(p, screen[i].bounds)) { + pinScreen = i; + newScreens |= (1 << i); + } + } + + if (newScreens == 0) + pinScreen = 0; + + // reset pin rect + cursorPin = *(((EvScreen*)evScreen)[pinScreen].bounds); + cursorPin.maxx--; /* Make half-open rectangle */ + cursorPin.maxy--; + cursorPinScreen = pinScreen; + + if (newScreens == 0) { + /* Pin new cursor position to cursorPin rect */ + p->x = (p->x < cursorPin.minx) ? + cursorPin.minx : ((p->x > cursorPin.maxx) ? + cursorPin.maxx : p->x); + p->y = (p->y < cursorPin.miny) ? + cursorPin.miny : ((p->y > cursorPin.maxy) ? + cursorPin.maxy : p->y); + + /* regenerate mask for new position */ + for (int i = 0; i < screens; i++ ) { + if ((screen[i].instance) && PtInRect(p, screen[i].bounds)) + newScreens |= (1 << i); + } + } + + cursorScreens = newScreens; + + pointerDelta.x += (evg->cursorLoc.x - pointerLoc.x); + pointerDelta.y += (evg->cursorLoc.y - pointerLoc.y); + pointerLoc.x = evg->cursorLoc.x; + pointerLoc.y = evg->cursorLoc.y; + + return( true ); +} + +bool IOHIDSystem::startCursor() +{ + bool ok; + + if (0 == screens) // no screens, no cursor + return( false ); + + resetCursor(); + setBrightness(); + showCursor(); + + // Start the cursor control callouts + ok = (kIOReturnSuccess == + sendWorkLoopCommand(this, (IOHIDAction)_periodicEvents, timerES)); + + cursorStarted = ok; + return( ok ); +} + +// +// Wait Cursor machinery. The driverLock should be held on entry to +// these methods, and the shared memory area must be set up. +// +void IOHIDSystem::showWaitCursor() +{ + xpr_ev_cursor("showWaitCursor\n",1,2,3,4,5); + evg->waitCursorUp = true; + hideCursor(); + evg->frame = EV_WAITCURSOR; + showCursor(); + // Set animation and sustain absolute times. + + waitSusTime = waitFrameTime = thisPeriodicRun; + ADD_ABSOLUTETIME( &waitFrameTime, &waitFrameRate); + ADD_ABSOLUTETIME( &waitSusTime, &waitSustain); +} + +void IOHIDSystem::hideWaitCursor() +{ + xpr_ev_cursor("hideWaitCursor\n",1,2,3,4,5); + evg->waitCursorUp = false; + hideCursor(); + evg->frame = EV_STD_CURSOR; + showCursor(); + AbsoluteTime_to_scalar(&waitFrameTime) = 0; + AbsoluteTime_to_scalar(&waitSusTime ) = 0; +} + +void IOHIDSystem::animateWaitCursor() +{ + xpr_ev_cursor("animateWaitCursor\n",1,2,3,4,5); + changeCursor(evg->frame + 1); + // Set the next animation time. + waitFrameTime = thisPeriodicRun; + ADD_ABSOLUTETIME( &waitFrameTime, &waitFrameRate); +} + +void IOHIDSystem::changeCursor(int frame) +{ + evg->frame = + (frame > EV_MAXCURSOR) ? EV_WAITCURSOR : frame; + xpr_ev_cursor("changeCursor %d\n",evg->frame,2,3,4,5); + moveCursor(); +} + +// +// Return the screen number in which point p lies. Return -1 if the point +// lies outside of all registered screens. +// +int IOHIDSystem::pointToScreen(Point * p) +{ + int i; + EvScreen *screen = (EvScreen *)evScreen; + for (i=screens; --i != -1; ) { + if (screen[i].instance != 0 + && (p->x >= screen[i].bounds->minx) + && (p->x < screen[i].bounds->maxx) + && (p->y >= screen[i].bounds->miny) + && (p->y < screen[i].bounds->maxy)) + return i; + } + return(-1); /* Cursor outside of known screen boundary */ +} + +// +// API used to manipulate screen brightness +// +// On entry to each of these, the driverLock should be set. +// +// Set the current brightness +void IOHIDSystem::setBrightness(int b) +{ + if ( b < EV_SCREEN_MIN_BRIGHTNESS ) + b = EV_SCREEN_MIN_BRIGHTNESS; + else if ( b > EV_SCREEN_MAX_BRIGHTNESS ) + b = EV_SCREEN_MAX_BRIGHTNESS; + if ( b != curBright ) + { + curBright = b; + if ( autoDimmed == false ) + setBrightness(); + } +} + +int IOHIDSystem::brightness() +{ + return curBright; +} + +// Set the current brightness +void IOHIDSystem::setAutoDimBrightness(int b) +{ + if ( b < EV_SCREEN_MIN_BRIGHTNESS ) + b = EV_SCREEN_MIN_BRIGHTNESS; + else if ( b > EV_SCREEN_MAX_BRIGHTNESS ) + b = EV_SCREEN_MAX_BRIGHTNESS; + if ( b != dimmedBrightness ) + { + dimmedBrightness = b; + if ( autoDimmed == true ) + setBrightness(); + } +} + +int IOHIDSystem::autoDimBrightness() +{ + return dimmedBrightness; +} + +int IOHIDSystem::currentBrightness() // Return the current brightness +{ + if ( autoDimmed == true && dimmedBrightness < curBright ) + return dimmedBrightness; + else + return curBright; +} + +void IOHIDSystem::doAutoDim() +{ + autoDimmed = true; + setBrightness(); +} + +// Return display brightness to normal +void IOHIDSystem::undoAutoDim() +{ + autoDimmed = false; + setBrightness(); +} + +void IOHIDSystem::forceAutoDimState(bool dim) +{ + if ( dim == true ) + { + if ( autoDimmed == false ) + { + if ( eventsOpen == true ) + clock_get_uptime( &autoDimTime); + doAutoDim(); + } + } + else + { + if ( autoDimmed == true ) + { + if ( eventsOpen == true ) { + clock_get_uptime( &autoDimTime); + ADD_ABSOLUTETIME( &autoDimTime, &autoDimPeriod); + } + undoAutoDim(); + } + } +} + +// +// API used to manipulate sound volume/attenuation +// +// Set the current brightness. +void IOHIDSystem::setAudioVolume(int v) +{ + if ( v < EV_AUDIO_MIN_VOLUME ) + v = EV_AUDIO_MIN_VOLUME; + else if ( v > EV_AUDIO_MAX_VOLUME ) + v = EV_AUDIO_MAX_VOLUME; + curVolume = v; +} + +// +// Volume set programatically, rather than from keyboard +// +void IOHIDSystem::setUserAudioVolume(int v) +{ + setAudioVolume(v); + // Let sound driver know about the change + evSpecialKeyMsg( NX_KEYTYPE_SOUND_UP, + /* direction */ NX_KEYDOWN, + /* flags */ 0, + /* level */ curVolume); +} + +int IOHIDSystem::audioVolume() +{ + return curVolume; +} + +// +// API used to drive event state out to attached screens +// +// On entry to each of these, the driverLock should be set. +// +inline void IOHIDSystem::setBrightness() // Propagate state out to screens +{ + evDispatch(/* command */ EVLEVEL); +} + +inline void IOHIDSystem::showCursor() +{ + evDispatch(/* command */ EVSHOW); +} +inline void IOHIDSystem::hideCursor() +{ + evDispatch(/* command */ EVHIDE); +} + +inline void IOHIDSystem::moveCursor() +{ + evDispatch(/* command */ EVMOVE); +} + +// +// - attachDefaultEventSources +// Attach the default event sources. +// +void IOHIDSystem::attachDefaultEventSources() +{ + OSObject * source; + OSIterator * sources; + + + sources = getProviderIterator(); + + if (!sources) return; + + while( (source = sources->getNextObject())) { + if (OSDynamicCast(IOHIDevice, source)) { + + registerEventSource((IOHIDevice *)source); + } + } + sources->release(); +} + +// +// - detachEventSources +// Detach all event sources +// +void IOHIDSystem::detachEventSources() +{ + OSIterator * iter; + IOHIDevice * srcInstance; + + iter = getOpenProviderIterator(); + if( iter) { + while( (srcInstance = (IOHIDevice *) iter->getNextObject())) { +#ifdef DEBUG + kprintf("detachEventSource:%s\n", srcInstance->getName()); +#endif + srcInstance->close(this); + } + iter->release(); + } +} + +// +// EventSrcClient implementation +// + +// +// A new device instance desires to be added to our list. +// Try to get ownership of the device. If we get it, add it to +// the list. +// +bool IOHIDSystem::registerEventSource(IOHIDevice * source) +{ + bool success = false; + +#ifdef DEBUG + kprintf("registerEventSource:%s\n", ((IOHIDevice*)source)->getName()); +#endif + + if ( OSDynamicCast(IOHIKeyboard, source) ) { + success = ((IOHIKeyboard*)source)->open(this, kIOServiceSeize, + (KeyboardEventAction) _keyboardEvent, + (KeyboardSpecialEventAction) _keyboardSpecialEvent, + (UpdateEventFlagsAction) _updateEventFlags); + } else if ( OSDynamicCast(IOHIPointing, source) ) { + success = ((IOHIPointing*)source)->open(this, kIOServiceSeize, + (RelativePointerEventAction) _relativePointerEvent, + (AbsolutePointerEventAction) _absolutePointerEvent, + (ScrollWheelEventAction) _scrollWheelEvent); + } + + if ( success == false ) + IOLog("%s: Seize of %s failed.\n", getName(), source->getName()); + + return success; +} + +IOReturn IOHIDSystem::message(UInt32 type, IOService * provider, + void * argument) +{ + IOReturn status = kIOReturnSuccess; + + switch (type) + { + case kIOMessageServiceIsTerminated: +#ifdef DEBUG + kprintf("detachEventSource:%s\n", provider->getName()); +#endif + provider->close( this ); + case kIOMessageServiceWasClosed: + break; + + default: + status = super::message(type, provider, argument); + break; + } + + return status; +} + +// +// This will scale the point at location in the coordinate system represented by bounds +// to the coordinate system of the current screen. +// This is needed for absolute pointer events that come from devices with different bounds. +// +void IOHIDSystem::_scaleLocationToCurrentScreen(Point *location, Bounds *bounds) +{ + // We probably also need to look at current screen offsets as well + // but that shouldn't matter until we provide tablets with a way to + // switch screens... + location->x = ((location->x - bounds->minx) * (cursorPin.maxx - cursorPin.minx + 1) + / (bounds->maxx - bounds->minx)) + cursorPin.minx; + location->y = ((location->y - bounds->miny) * (cursorPin.maxy - cursorPin.miny + 1) + / (bounds->maxy - bounds->miny)) + cursorPin.miny; + + return; +} + + +// +// Process a mouse status change. The driver should sign extend +// it's deltas and perform any bit flipping needed there. +// +// We take the state as presented and turn it into events. +// +void IOHIDSystem::_relativePointerEvent(IOHIDSystem * self, + int buttons, + /* deltaX */ int dx, + /* deltaY */ int dy, + /* atTime */ AbsoluteTime ts) +{ + self->relativePointerEvent(buttons, dx, dy, ts); +} + +void IOHIDSystem::relativePointerEvent(int buttons, + /* deltaX */ int dx, + /* deltaY */ int dy, + /* atTime */ AbsoluteTime ts) +{ + AbsoluteTime nextVBL, vblDeltaTime, eventDeltaTime, moveDeltaTime; + + if( displayManager != NULL ) // if there is a display manager, tell + displayManager->activityTickle(0,0); // it there is user activity + + IOTakeLock( driverLock); + if( eventsOpen == false ) + { + IOUnlock( driverLock); + return; + } + // Fake up pressure changes from button state changes + if( (buttons & EV_LB) != (evg->buttons & EV_LB) ) + { + if ( buttons & EV_LB ) + lastPressure = MAXPRESSURE; + else + lastPressure = MINPRESSURE; + } + _setButtonState(buttons, /* atTime */ ts); + + // figure cursor movement + if( dx || dy ) + { + eventDeltaTime = ts; + SUB_ABSOLUTETIME( &eventDeltaTime, &lastEventTime ); + lastEventTime = ts; + + IOGraphicsDevice * instance = ((EvScreen*)evScreen)[cursorPinScreen].instance; + if( instance) + instance->getVBLTime( &nextVBL, &vblDeltaTime ); + else + nextVBL.hi = nextVBL.lo = vblDeltaTime.hi = vblDeltaTime.lo = 0; + + if( dx && ((dx ^ accumDX) < 0)) + accumDX = 0; + if( dy && ((dy ^ accumDY) < 0)) + accumDY = 0; + + KERNEL_DEBUG(0x0c000060 | DBG_FUNC_NONE, + nextVBL.hi, nextVBL.lo, postedVBLTime.hi, postedVBLTime.lo, 0); + + if( (nextVBL.lo || nextVBL.hi) + && (nextVBL.lo == postedVBLTime.lo) && (nextVBL.hi == postedVBLTime.hi)) { + accumDX += dx; + accumDY += dy; + + } else { + SInt32 num = 0, div = 0; + + dx += accumDX; + dy += accumDY; + + moveDeltaTime = ts; + SUB_ABSOLUTETIME( &moveDeltaTime, &lastMoveTime ); + lastMoveTime = ts; + + if( (eventDeltaTime.lo < vblDeltaTime.lo) && (0 == eventDeltaTime.hi) + && vblDeltaTime.lo && moveDeltaTime.lo) { + num = vblDeltaTime.lo; + div = moveDeltaTime.lo; + dx = (num * dx) / div; + dy = (num * dy) / div; + } + + KERNEL_DEBUG(0x0c000000 | DBG_FUNC_NONE, + dx, dy, num, div, 0); + + postedVBLTime = nextVBL; // we have posted for this vbl + accumDX = accumDY = 0; + + if( dx || dy ) { + pointerLoc.x += dx; + pointerLoc.y += dy; + pointerDelta.x += dx; + pointerDelta.y += dy; + _setCursorPosition(&pointerLoc, false); + } + } + } + IOUnlock( driverLock); +} + +void IOHIDSystem::_absolutePointerEvent(IOHIDSystem * self, + int buttons, + /* at */ Point * newLoc, + /* withBounds */ Bounds * bounds, + /* inProximity */ bool proximity, + /* withPressure */ int pressure, + /* withAngle */ int stylusAngle, + /* atTime */ AbsoluteTime ts) +{ + self->absolutePointerEvent(buttons, newLoc, bounds, proximity, + pressure, stylusAngle, ts); +} + +void IOHIDSystem::absolutePointerEvent(int buttons, + /* at */ Point * newLoc, + /* withBounds */ Bounds * bounds, + /* inProximity */ bool proximity, + /* withPressure */ int pressure, + /* withAngle */ int /* stylusAngle */, + /* atTime */ AbsoluteTime ts) + +{ + /* + * If you don't know what to pass for the following fields, pass the + * default values below: + * pressure = MINPRESSURE or MAXPRESSURE + * stylusAngle = 90 + */ + + NXEventData outData; /* dummy data */ + + if ( displayManager != NULL ) { // if there is a display manager, tell + displayManager->activityTickle(0,0); // it there is user activity + } + + IOTakeLock( driverLock); + if ( eventsOpen == false ) + { + IOUnlock( driverLock); + return; + } + + lastPressure = pressure; + + _scaleLocationToCurrentScreen(newLoc, bounds); + if ( newLoc->x != pointerLoc.x || newLoc->y != pointerLoc.y ) + { + pointerDelta.x += (newLoc->x - pointerLoc.x); + pointerDelta.y += (newLoc->y - pointerLoc.y); + pointerLoc = *newLoc; + _setCursorPosition(&pointerLoc, false); + } + if ( lastProximity != proximity && proximity == true ) + { + evg->eventFlags |= NX_STYLUSPROXIMITYMASK; + bzero( (char *)&outData, sizeof outData ); + postEvent( NX_FLAGSCHANGED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + if ( proximity == true ) + _setButtonState(buttons, /* atTime */ ts); + if ( lastProximity != proximity && proximity == false ) + { + evg->eventFlags &= ~NX_STYLUSPROXIMITYMASK; + bzero( (char *)&outData, sizeof outData ); + postEvent( NX_FLAGSCHANGED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + lastProximity = proximity; + IOUnlock( driverLock); +} + +void IOHIDSystem::_scrollWheelEvent(IOHIDSystem * self, + short deltaAxis1, + short deltaAxis2, + short deltaAxis3, + /* atTime */ AbsoluteTime ts) +{ + self->scrollWheelEvent(deltaAxis1, deltaAxis2, deltaAxis3, ts); +} + +void IOHIDSystem::scrollWheelEvent(short deltaAxis1, + short deltaAxis2, + short deltaAxis3, + /* atTime */ AbsoluteTime ts) + +{ + NXEventData wheelData; + + if ((deltaAxis1 == 0) && (deltaAxis2 == 0) && (deltaAxis3 == 0)) { + return; + } + + IOTakeLock( driverLock); + if (!eventsOpen) + { + IOUnlock(driverLock); + return; + } + + bzero((char *)&wheelData, sizeof wheelData); + wheelData.scrollWheel.deltaAxis1 = deltaAxis1; + wheelData.scrollWheel.deltaAxis2 = deltaAxis2; + wheelData.scrollWheel.deltaAxis3 = deltaAxis3; + + postEvent( NX_SCROLLWHEELMOVED, + /* at */ (Point *)&evg->cursorLoc, + /* atTime */ ts, + /* withData */ &wheelData); + + IOUnlock(driverLock); + return; +} + +void IOHIDSystem::_tabletEvent(IOHIDSystem *self, + NXEventData *tabletData, + AbsoluteTime ts) +{ + self->tabletEvent(tabletData, ts); +} + +void IOHIDSystem::tabletEvent(NXEventData *tabletData, + AbsoluteTime ts) +{ + IOTakeLock(driverLock); + + if (eventsOpen) { + postEvent(NX_TABLETPOINTER, + (Point *)&evg->cursorLoc, + ts, + tabletData); + } + + IOUnlock(driverLock); + + return; +} + +void IOHIDSystem::_proximityEvent(IOHIDSystem *self, + NXEventData *proximityData, + AbsoluteTime ts) +{ + self->proximityEvent(proximityData, ts); +} + +void IOHIDSystem::proximityEvent(NXEventData *proximityData, + AbsoluteTime ts) +{ + IOTakeLock(driverLock); + + if (eventsOpen) { + postEvent(NX_TABLETPROXIMITY, + (Point *)&evg->cursorLoc, + ts, + proximityData); + } + + IOUnlock(driverLock); + + return; +} + +// +// Process a keyboard state change. +// +void IOHIDSystem::_keyboardEvent(IOHIDSystem * self, + unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet, + /* keyboardType */ unsigned keyboardType, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts) +{ + self->keyboardEvent(eventType, flags, key, charCode, charSet, + origCharCode, origCharSet, keyboardType, repeat, ts); +} + +void IOHIDSystem::keyboardEvent(unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet, + /* keyboardType */ unsigned keyboardType, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts) +{ + NXEventData outData; + + if ( ! (displayState & IOPMDeviceUsable) ) { // display is off, consume the keystroke + if ( eventType == NX_KEYDOWN ) { + return; + } + if ( displayManager != NULL ) { // but if there is a display manager, tell + displayManager->activityTickle(0,0); // it there is user activity + } + return; + } + + if ( displayManager != NULL ) { // if there is a display manager, tell + displayManager->activityTickle(0,0); // it there is user activity + } + + outData.key.repeat = repeat; + outData.key.keyCode = key; + outData.key.charSet = charSet; + outData.key.charCode = charCode; + outData.key.origCharSet = origCharSet; + outData.key.origCharCode = origCharCode; + outData.key.keyboardType = keyboardType; + + IOTakeLock( driverLock); + if ( eventsOpen == false ) + { + IOUnlock( driverLock); + return; + } + evg->eventFlags = (evg->eventFlags & ~KEYBOARD_FLAGSMASK) + | (flags & KEYBOARD_FLAGSMASK); + + postEvent( eventType, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + + IOUnlock( driverLock); +} + +void IOHIDSystem::_keyboardSpecialEvent( IOHIDSystem * self, + unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* specialty */ unsigned flavor, + /* guid */ UInt64 guid, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts) +{ + self->keyboardSpecialEvent(eventType, flags, key, flavor, guid, repeat, ts); +} + + +void IOHIDSystem::keyboardSpecialEvent( unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* specialty */ unsigned flavor, + /* guid */ UInt64 guid, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts) +{ + NXEventData outData; + int level = -1; + + bzero( (void *)&outData, sizeof outData ); + + IOTakeLock( driverLock); + if ( eventsOpen == false ) + { + IOUnlock( driverLock); + return; + } + + // Update flags. + evg->eventFlags = (evg->eventFlags & ~KEYBOARD_FLAGSMASK) + | (flags & KEYBOARD_FLAGSMASK); + + if ( eventType == NX_KEYDOWN ) + { + switch ( flavor ) + { + case NX_KEYTYPE_SOUND_UP: + if ( (flags & SPECIALKEYS_MODIFIER_MASK) == 0 ) + { + //level = IOAudioManager::sharedInstance()->incrementMasterVolume(); + if (masterAudioFunctions && masterAudioFunctions->incrementMasterVolume) + { + masterAudioFunctions->incrementMasterVolume(); + } + } + else + { + if( !(evg->eventFlags & NX_COMMANDMASK) && + !(evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + (evg->eventFlags & NX_ALTERNATEMASK) ) + { + // Open the sound preferences control panel. + KUNCExecute( "Sound.preference", kOpenAppAsConsoleUser, kOpenPreferencePanel ); + } + } + break; + case NX_KEYTYPE_SOUND_DOWN: + if ( (flags & SPECIALKEYS_MODIFIER_MASK) == 0 ) + { + //level = IOAudioManager::sharedInstance()->decrementMasterVolume(); + if (masterAudioFunctions && masterAudioFunctions->decrementMasterVolume) + { + masterAudioFunctions->decrementMasterVolume(); + } + } + else + { + if( !(evg->eventFlags & NX_COMMANDMASK) && + !(evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + (evg->eventFlags & NX_ALTERNATEMASK) ) + { + // Open the sound preferences control panel. + KUNCExecute( "Sound.preference", kOpenAppAsConsoleUser, kOpenPreferencePanel ); + } + } + break; + case NX_KEYTYPE_MUTE: + if ( (flags & SPECIALKEYS_MODIFIER_MASK) == 0 ) + { + //level = IOAudioManager::sharedInstance()->toggleMasterMute(); + if (masterAudioFunctions && masterAudioFunctions->toggleMasterMute) + { + masterAudioFunctions->toggleMasterMute(); + } + } + else + { + if( !(evg->eventFlags & NX_COMMANDMASK) && + !(evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + (evg->eventFlags & NX_ALTERNATEMASK) ) + { + // Open the sound preferences control panel. + KUNCExecute( "Sound.preference", kOpenAppAsConsoleUser, kOpenPreferencePanel ); + } + } + break; + case NX_KEYTYPE_EJECT: + + // Special key handlers: + // + // Command = invoke macsbug + // Command+option = sleep now + // Command+option+control = shutdown now + // Control = logout dialog + + if( (evg->eventFlags & NX_COMMANDMASK) && + !(evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + !(evg->eventFlags & NX_ALTERNATEMASK) ) + { + // Post a power key event, Classic should pick this up and + // drop into MacsBug. + // + outData.compound.subType = NX_SUBTYPE_POWER_KEY; + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + else if( (evg->eventFlags & NX_COMMANDMASK) && + !(evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + (evg->eventFlags & NX_ALTERNATEMASK) ) + { + //IOLog( "IOHIDSystem -- sleep now!\n" ); + + // Post the sleep now event. Someone else will handle the actual call. + // + outData.compound.subType = NX_SUBTYPE_SLEEP_EVENT; + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + else if( (evg->eventFlags & NX_COMMANDMASK) && + (evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + (evg->eventFlags & NX_ALTERNATEMASK) ) + { + //IOLog( "IOHIDSystem -- shutdown now!\n" ); + + // Post the shutdown now event. Someone else will handle the actual call. + // + outData.compound.subType = NX_SUBTYPE_SHUTDOWN_EVENT; + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + else if( (evg->eventFlags & NX_COMMANDMASK) && + (evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + !(evg->eventFlags & NX_ALTERNATEMASK) ) + { + // Restart now! + //IOLog( "IOHIDSystem -- Restart now!\n" ); + + // Post the Restart now event. Someone else will handle the actual call. + // + outData.compound.subType = NX_SUBTYPE_RESTART_EVENT; + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + else if( !(evg->eventFlags & NX_COMMANDMASK) && + (evg->eventFlags & NX_CONTROLMASK) && + !(evg->eventFlags & NX_SHIFTMASK) && + !(evg->eventFlags & NX_ALTERNATEMASK) ) + { + // Looks like we should put up the normal 'Power Key' dialog. + // + // Set the event flags to zero, because the system will not do the right + // thing if we don't zero this out (it will ignore the power key event + // we post, thinking that some modifiers are down). + // + evg->eventFlags = 0; + + // Post the power keydown event. + // + outData.compound.subType = NX_SUBTYPE_POWER_KEY; + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + else + { + // After all that checking, no modifiers are down, so let's pump up a + // system defined eject event. This way we can have anyone who's watching + // for this event (aka LoginWindow) route this event to the right target + // (aka AutoDiskMounter). + + //IOLog( "IOHIDSystem--Normal Eject action!\n" ); + + // Post the eject keydown event. + // + outData.compound.subType = NX_SUBTYPE_EJECT_KEY; + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + break; + + case NX_POWER_KEY: + outData.compound.subType = NX_SUBTYPE_POWER_KEY; + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + break; + } + } +#if 0 /* So far, nothing to do on keyup */ + else if ( eventType == NX_KEYUP ) + { + switch ( flavor ) + { + case NX_KEYTYPE_SOUND_UP: + break; + case NX_KEYTYPE_SOUND_DOWN: + break; + case NX_KEYTYPE_MUTE: + break; + case NX_POWER_KEY: + break; + } + } +#endif + if( (0 == (flags & SPECIALKEYS_MODIFIER_MASK)) + && ((1 << flavor) & NX_SPECIALKEY_POST_MASK)) { + outData.compound.subType = NX_SUBTYPE_AUX_CONTROL_BUTTONS; + outData.compound.misc.S[0] = flavor; + outData.compound.misc.C[2] = eventType; + outData.compound.misc.C[3] = repeat; + outData.compound.misc.L[1] = guid & 0xffffffff; + outData.compound.misc.L[2] = guid >> 32; + + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&pointerLoc, + /* atTime */ ts, + /* withData */ &outData); + } + + IOUnlock( driverLock); + if ( level != -1 ) // An interesting special key event occurred + { + evSpecialKeyMsg( flavor, + /* direction */ eventType, + /* flags */ flags, + /* level */ level); + } +} + +/* + * Update current event flags. Restricted to keyboard flags only, this + * method is used to silently update the flags state for keys which both + * generate characters and flag changes. The specs say we don't generate + * a flags-changed event for such keys. This method is also used to clear + * the keyboard flags on a keyboard subsystem reset. + */ +void IOHIDSystem::_updateEventFlags(IOHIDSystem * self, unsigned flags) +{ + self->updateEventFlags(flags); +} + +void IOHIDSystem::updateEventFlags(unsigned flags) +{ + IOTakeLock( driverLock); + if ( eventsOpen ) + evg->eventFlags = (evg->eventFlags & ~KEYBOARD_FLAGSMASK) + | (flags & KEYBOARD_FLAGSMASK); + IOUnlock( driverLock); +} + +// +// - _setButtonState:(int)buttons atTime:(int)t +// Update the button state. Generate button events as needed +// +void IOHIDSystem::_setButtonState(int buttons, + /* atTime */ AbsoluteTime ts) +{ + // Magic uber-mouse buttons changed event so we can get all of the buttons... + if(evg->buttons ^ buttons) + { + NXEventData evData; + unsigned long hwButtons, hwDelta, temp; + + /* I'd like to keep the event button mapping linear, so + I have to "undo" the LB/RB mouse bit numbering funkiness + before I pass the information down to the app. */ + /* Ideally this would all go away if we fixed EV_LB and EV_RB + to be bits 0 and 1 */ + hwButtons = buttons & ~7; /* Keep everything but bottom 3 bits. */ + hwButtons |= (buttons & 3) << 1; /* Map bits 01 to 12 */ + hwButtons |= (buttons & 4) >> 2; /* Map bit 2 back to bit 0 */ + temp = evg->buttons ^ buttons; + hwDelta = temp & ~7; + hwDelta |= (temp & 3) << 1; /* Map bits 01 to 12 */ + hwDelta |= (temp & 4) >> 2; /* Map bit 2 back to bit 0 */ + + evData.compound.reserved = 0; + evData.compound.subType = NX_SUBTYPE_AUX_MOUSE_BUTTONS; + evData.compound.misc.L[0] = hwDelta; + evData.compound.misc.L[1] = hwButtons; + + postEvent( NX_SYSDEFINED, + /* at */ (Point *)&evg->cursorLoc, + /* atTime */ ts, + /* withData */ &evData); + } + + if ((evg->buttons & EV_LB) != (buttons & EV_LB)) + { + if (buttons & EV_LB) + { + postEvent( NX_LMOUSEDOWN, + /* at */ (Point *)&evg->cursorLoc, + /* atTime */ ts, + /* withData */ NULL); + } + else + { + postEvent( NX_LMOUSEUP, + /* at */ (Point *)&evg->cursorLoc, + /* atTime */ ts, + /* withData */ NULL); + } + // After entering initial up/down event, set up + // coalescing state so drags will behave correctly + evg->dontCoalesce = evg->dontWantCoalesce; + if (evg->dontCoalesce) + evg->eventFlags |= NX_NONCOALSESCEDMASK; + else + evg->eventFlags &= ~NX_NONCOALSESCEDMASK; + } + + if ((evg->buttons & EV_RB) != (buttons & EV_RB)) { + if (buttons & EV_RB) { + postEvent( NX_RMOUSEDOWN, + /* at */ (Point *)&evg->cursorLoc, + /* atTime */ ts, + /* withData */ NULL); + } else { + postEvent( NX_RMOUSEUP, + /* at */ (Point *)&evg->cursorLoc, + /* atTime */ ts, + /* withData */ NULL); + } + } + + evg->buttons = buttons; +} +// +// Sets the cursor position (evg->cursorLoc) to the new +// location. The location is clipped against the cursor pin rectangle, +// mouse moved/dragged events are generated using the given event mask, +// and a mouse-exited event may be generated. The cursor image is +// moved. +// On entry, the driverLock should be set. +// +void IOHIDSystem::setCursorPosition(Point * newLoc, bool external) +{ + if ( eventsOpen == true ) + { + pointerDelta.x += (newLoc->x - pointerLoc.x); + pointerDelta.y += (newLoc->y - pointerLoc.y); + pointerLoc = *newLoc; + _setCursorPosition(newLoc, external); + } +} + +// +// This mechanism is used to update the cursor position, possibly generating +// messages to registered frame buffer devices and posting drag, tracking, and +// mouse motion events. +// +// On entry, the driverLock should be set. +// This can be called from setCursorPosition:(Point *)newLoc to set the +// position by a _IOSetParameterFromIntArray() call, directly from the absolute or +// relative pointer device routines, or on a timed event callback. +// +void IOHIDSystem::_setCursorPosition(Point * newLoc, bool external) +{ + bool cursorMoved = true; + + if (!screens) + return; + + if( ev_try_lock(&evg->cursorSema) == 0 ) // host using shmem + { + needSetCursorPosition = true; // try again later +// scheduleNextPeriodicEvent(); + return; + } + + // Past here we hold the cursorSema lock. Make sure the lock is + // cleared before returning or the system will be wedged. + + needSetCursorPosition = false; // We WILL succeed + + if (cursorCoupled || external) + { + UInt32 newScreens = 0; + SInt32 pinScreen = -1L; + + /* Get mask of screens on which the cursor is present */ + EvScreen *screen = (EvScreen *)evScreen; + for (int i = 0; i < screens; i++ ) { + if ((screen[i].instance) && PtInRect(newLoc, screen[i].bounds)) { + pinScreen = i; + newScreens |= (1 << i); + } + } + + if (newScreens == 0) { + /* At this point cursor has gone off all screens, + just clip it to one of the previous screens. */ + newLoc->x = (newLoc->x < cursorPin.minx) ? + cursorPin.minx : ((newLoc->x > cursorPin.maxx) ? + cursorPin.maxx : newLoc->x); + newLoc->y = (newLoc->y < cursorPin.miny) ? + cursorPin.miny : ((newLoc->y > cursorPin.maxy) ? + cursorPin.maxy : newLoc->y); + /* regenerate mask for new position */ + for (int i = 0; i < screens; i++ ) { + if ((screen[i].instance) && PtInRect(newLoc, screen[i].bounds)) { + pinScreen = i; + newScreens |= (1 << i); + } + } + } + + pointerLoc = *newLoc; // Sync up pointer with clipped cursor + /* Catch the no-move case */ + if ((evg->cursorLoc.x == newLoc->x) && (evg->cursorLoc.y == newLoc->y)) { + if ((pointerDelta.x == 0) && (pointerDelta.y == 0)) { + ev_unlock(&evg->cursorSema); + return; + } + cursorMoved = false; // mouse moved, but cursor didn't + } else { + evg->cursorLoc.x = newLoc->x; + evg->cursorLoc.y = newLoc->y; + + /* If cursor changed screens */ + if (newScreens != cursorScreens) { + hideCursor(); /* hide cursor on old screens */ + cursorScreens = newScreens; + cursorPin = *(((EvScreen*)evScreen)[pinScreen].bounds); + cursorPin.maxx--; /* Make half-open rectangle */ + cursorPin.maxy--; + cursorPinScreen = pinScreen; + showCursor(); + } else { + /* cursor moved on same screens */ + moveCursor(); + } + } + } else { + /* cursor uncoupled */ + pointerLoc.x = evg->cursorLoc.x; + pointerLoc.y = evg->cursorLoc.y; + } + + AbsoluteTime ts; + clock_get_uptime(&ts); + + /* See if anybody wants the mouse moved or dragged events */ + if (evg->movedMask) { + if ((evg->movedMask&NX_LMOUSEDRAGGEDMASK)&&(evg->buttons& EV_LB)) { + _postMouseMoveEvent(NX_LMOUSEDRAGGED, newLoc, ts); + } else if ((evg->movedMask&NX_RMOUSEDRAGGEDMASK) && (evg->buttons & EV_RB)) { + _postMouseMoveEvent(NX_RMOUSEDRAGGED, newLoc, ts); + } else if (evg->movedMask & NX_MOUSEMOVEDMASK) { + _postMouseMoveEvent(NX_MOUSEMOVED, newLoc, ts); + } + } + + /* check new cursor position for leaving evg->mouseRect */ + if (cursorMoved && evg->mouseRectValid && (!PtInRect(newLoc, &evg->mouseRect))) + { + if (evg->mouseRectValid) + { + postEvent( NX_MOUSEEXITED, + /* at */ newLoc, + /* atTime */ ts, + /* withData */ NULL); + evg->mouseRectValid = 0; + } + } + ev_unlock(&evg->cursorSema); +} + +void IOHIDSystem::_postMouseMoveEvent(int what, + Point * location, + AbsoluteTime ts) +{ + NXEventData data; + + data.mouseMove.dx = pointerDelta.x; + data.mouseMove.dy = pointerDelta.y; + + pointerDelta.x = 0; + pointerDelta.y = 0; + + postEvent(what, location, ts, &data); +} + +/** + ** IOUserClient methods + **/ + +IOReturn IOHIDSystem::newUserClient(task_t /* owningTask */, + /* withToken */ void * /* security_id */, + /* ofType */ UInt32 type, + /* client */ IOUserClient ** handler) +{ + IOUserClient * newConnect = 0; + IOReturn err = kIOReturnNoMemory; + + IOTakeLock( driverLock); + + do { + if( type == kIOHIDParamConnectType) { + if( paramConnect) { + newConnect = paramConnect; + newConnect->retain(); + } else if( eventsOpen) { + newConnect = new IOHIDParamUserClient; + } else { + err = kIOReturnNotOpen; + continue; + } + + } else if( type == kIOHIDServerConnectType) { + newConnect = new IOHIDUserClient; + } else + err = kIOReturnUnsupported; + + if( !newConnect) + continue; + + // initialization is getting out of hand + + if( (newConnect != paramConnect) && ( + (false == newConnect->init()) + || (false == newConnect->attach( this )) + || (false == newConnect->start( this )) + || ((type == kIOHIDServerConnectType) + && (err = evOpen())) + )) { + newConnect->detach( this ); + newConnect->release(); + newConnect = 0; + continue; + } + if( type == kIOHIDParamConnectType) + paramConnect = newConnect; + err = kIOReturnSuccess; + + } while( false ); + + IOUnlock( driverLock); + + *handler = newConnect; + return( err ); +} + + +IOReturn IOHIDSystem::setEventsEnable(void*p1,void*,void*,void*,void*,void*) +{ // IOMethod + bool enable = (bool)p1; + + if( enable) { + attachDefaultEventSources(); + _resetMouseParameters(); + _resetKeyboardParameters(); + } + return( kIOReturnSuccess); +} + +IOReturn IOHIDSystem::setCursorEnable(void*p1,void*,void*,void*,void*,void*) +{ // IOMethod + bool enable = (bool)p1; + IOReturn err = kIOReturnSuccess; + + IOTakeLock( driverLock); + if ( eventsOpen == false ) { + IOUnlock( driverLock); + return( kIOReturnNotOpen ); + } + + if( 0 == screens) { // Should be at least 1! + IOUnlock( driverLock); + return( kIOReturnNoDevice ); + } + + if( enable) { + if( cursorStarted) { + hideCursor(); + cursorEnabled = resetCursor(); + showCursor(); + } else + cursorEnabled = startCursor(); + } else + cursorEnabled = enable; + + cursorCoupled = cursorEnabled; + + IOUnlock( driverLock); + + return( err); +} + +IOReturn IOHIDSystem::extPostEvent(void*p1,void*,void*,void*,void*,void*) +{ // IOMethod + struct evioLLEvent * event = (struct evioLLEvent *)p1; + + IOTakeLock( driverLock); + + if( event->setCursor) + setCursorPosition(&event->location, true); + + if( event->setFlags) + evg->eventFlags = (evg->eventFlags & ~KEYBOARD_FLAGSMASK) + | (event->flags & KEYBOARD_FLAGSMASK); + + AbsoluteTime ts; + clock_get_uptime(&ts); + postEvent( event->type, + /* at */ &event->location, + /* atTime */ ts, + /* withData */ &event->data); + + IOUnlock( driverLock); + return( kIOReturnSuccess); +} + +IOReturn IOHIDSystem::extSetMouseLocation(void*p1,void*,void*,void*,void*,void*) +{ // IOMethod + Point * loc = (Point *)p1; + + IOTakeLock( driverLock); + setCursorPosition(loc, true); + IOUnlock( driverLock); + return( kIOReturnSuccess); +} + +IOReturn IOHIDSystem::extGetButtonEventNum(void*p1,void*p2,void*,void*,void*,void*) +{ // IOMethod + NXMouseButton button = (NXMouseButton)(int)p1; + int * eventNum = (int *)p2; + IOReturn err = kIOReturnSuccess; + + IOTakeLock( driverLock); + switch( button) { + case NX_LeftButton: + *eventNum = leftENum; + break; + case NX_RightButton: + *eventNum = rightENum; + break; + default: + err = kIOReturnBadArgument; + } + + IOUnlock( driverLock); + return( err); +} + +bool IOHIDSystem::updateProperties( void ) +{ + UInt64 clickTimeThreshNano; + UInt64 autoDimThresholdNano; + UInt64 autoDimTimeNano; + UInt64 idleTimeNano; + AbsoluteTime time1, time2; + bool ok; + + absolutetime_to_nanoseconds( clickTimeThresh, &clickTimeThreshNano); + absolutetime_to_nanoseconds( autoDimPeriod, &autoDimThresholdNano); + if( eventsOpen) { + clock_get_uptime( &time1); + if( autoDimmed) { + autoDimTimeNano = 0; + // now - (autoDimTime - autoDimPeriod) + SUB_ABSOLUTETIME( &time1, &autoDimTime); + ADD_ABSOLUTETIME( &time1, &autoDimPeriod); + absolutetime_to_nanoseconds( time1, &idleTimeNano); + } else { + // autoDimTime - now + time2 = autoDimTime; + SUB_ABSOLUTETIME( &time2, &time1); + absolutetime_to_nanoseconds( time2, &autoDimTimeNano); + // autoDimPeriod - (autoDimTime - evg->VertRetraceClock) + time1 = autoDimPeriod; + SUB_ABSOLUTETIME( &time1, &time2); + absolutetime_to_nanoseconds( time1, &idleTimeNano); + } + } else { + absolutetime_to_nanoseconds( autoDimPeriod, &autoDimTimeNano); + idleTimeNano = 0; // user is active + } + + ok = setProperty( kIOHIDClickTimeKey, &clickTimeThreshNano, + sizeof( UInt64)) + & setProperty( kIOHIDClickSpaceKey, &clickSpaceThresh, + sizeof( clickSpaceThresh)) + & setProperty( kIOHIDAutoDimThresholdKey, &autoDimThresholdNano, + sizeof( UInt64)) + & setProperty( kIOHIDAutoDimTimeKey, &autoDimTimeNano, + sizeof( UInt64)) + & setProperty( kIOHIDIdleTimeKey, &idleTimeNano, + sizeof( UInt64)) + & setProperty( kIOHIDAutoDimStateKey, &autoDimmed, + sizeof( autoDimmed)) + & setProperty( kIOHIDBrightnessKey, &curBright, + sizeof( curBright)) + & setProperty( kIOHIDAutoDimBrightnessKey, &dimmedBrightness, + sizeof( dimmedBrightness)); + + return( ok ); +} + +bool IOHIDSystem::serializeProperties( OSSerialize * s ) const +{ + ((IOHIDSystem *) this)->updateProperties(); + + return( super::serializeProperties( s )); +} + +IOReturn IOHIDSystem::setParamProperties( OSDictionary * dict ) +{ + OSData * data; + IOReturn err = kIOReturnSuccess; + + IOTakeLock( driverLock); + if( (data = OSDynamicCast( OSData, dict->getObject(kIOHIDClickTimeKey)))) + { + UInt64 nano = *((UInt64 *)(data->getBytesNoCopy())); + nanoseconds_to_absolutetime(nano, &clickTimeThresh); + } + if( (data = OSDynamicCast( OSData, + dict->getObject(kIOHIDClickSpaceKey)))) { + clickSpaceThresh.x = ((UInt32 *) (data->getBytesNoCopy()))[EVSIOSCS_X]; + clickSpaceThresh.y = ((UInt32 *) (data->getBytesNoCopy()))[EVSIOSCS_Y]; + } + + if( (data = OSDynamicCast( OSData, dict->getObject(kIOHIDAutoDimThresholdKey)))) { + AbsoluteTime oldPeriod = autoDimPeriod; + UInt64 nano = *((UInt64 *)(data->getBytesNoCopy())); + nanoseconds_to_absolutetime(nano, &autoDimPeriod); + // autoDimTime = autoDimTime - oldPeriod + autoDimPeriod; + SUB_ABSOLUTETIME( &autoDimTime, &oldPeriod); + ADD_ABSOLUTETIME( &autoDimTime, &autoDimPeriod); + } + + if( (data = OSDynamicCast( OSData, dict->getObject(kIOHIDAutoDimStateKey)))) + forceAutoDimState( 0 != *((SInt32 *) (data->getBytesNoCopy()))); + + if( (data = OSDynamicCast( OSData, dict->getObject(kIOHIDBrightnessKey)))) + setBrightness( *((SInt32 *) (data->getBytesNoCopy()))); + + if( (data = OSDynamicCast( OSData, dict->getObject(kIOHIDAutoDimBrightnessKey)))) + setAutoDimBrightness( *((SInt32 *) (data->getBytesNoCopy()))); + + IOUnlock( driverLock); + + return( err ); +} + diff --git a/iokit/Families/IOHIDSystem/IOHIDUserClient.cpp b/iokit/Families/IOHIDSystem/IOHIDUserClient.cpp new file mode 100644 index 000000000..a4e1906b2 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDUserClient.cpp @@ -0,0 +1,246 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#include +#include + +#include "IOHIDUserClient.h" + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOUserClient + +OSDefineMetaClassAndStructors(IOHIDUserClient, IOUserClient) + +OSDefineMetaClassAndStructors(IOHIDParamUserClient, IOUserClient) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOHIDUserClient::start( IOService * _owner ) +{ + static const IOExternalMethod methodTemplate[] = { +/* 0 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, +/* 1 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, +/* 2 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, +/* 3 */ { NULL, NULL, kIOUCStructIStructO, sizeof(struct evioLLEvent), 0 }, +/* 4 */ { NULL, NULL, kIOUCStructIStructO, sizeof(Point), 0 }, +/* 5 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 1 } + }; + + if( !super::start( _owner )) + return( false); + + owner = (IOHIDSystem *) _owner; + + bcopy( methodTemplate, externals, sizeof( methodTemplate )); + + externals[0].object = owner; + externals[0].func = (IOMethod) &IOHIDSystem::createShmem; + + externals[1].object = owner; + externals[1].func = (IOMethod) &IOHIDSystem::setEventsEnable; + + externals[2].object = owner; + externals[2].func = (IOMethod) &IOHIDSystem::setCursorEnable; + + externals[3].object = owner; + externals[3].func = (IOMethod) &IOHIDSystem::extPostEvent; + + externals[4].object = owner; + externals[4].func = (IOMethod) &IOHIDSystem::extSetMouseLocation; + + externals[5].object = owner; + externals[5].func = (IOMethod) &IOHIDSystem::extGetButtonEventNum; + + return( true ); +} + +IOReturn IOHIDUserClient::clientClose( void ) +{ + owner->evClose(); +#ifdef DEBUG + kprintf("%s: client token invalidated\n", getName()); +#endif + + owner->serverConnect = 0; + detach( owner); + + return( kIOReturnSuccess); +} + +IOService * IOHIDUserClient::getService( void ) +{ + return( owner ); +} + +IOReturn IOHIDUserClient::registerNotificationPort( + mach_port_t port, + UInt32 type, + UInt32 refCon ) +{ + if( type != kIOHIDEventNotification) + return( kIOReturnUnsupported); + + owner->setEventPort(port); + return( kIOReturnSuccess); +} + +IOReturn IOHIDUserClient::connectClient( IOUserClient * client ) +{ + Bounds * bounds; + IOGraphicsDevice * graphicsDevice; + + // yikes + if( 0 == (graphicsDevice = OSDynamicCast(IOGraphicsDevice, + client->getProvider())) ) + return( kIOReturnBadArgument ); + + graphicsDevice->getBoundingRect(&bounds); + + owner->registerScreen(graphicsDevice, bounds); + + return( kIOReturnSuccess); +} + +IOReturn IOHIDUserClient::clientMemoryForType( UInt32 type, + UInt32 * flags, IOMemoryDescriptor ** memory ) +{ + + if( type != kIOHIDGlobalMemory) + return( kIOReturnBadArgument); + + *flags = 0; + owner->globalMemory->retain(); + *memory = owner->globalMemory; + + return( kIOReturnSuccess); +} + +IOExternalMethod * IOHIDUserClient::getExternalMethodForIndex( UInt32 index ) +{ + if( index < (sizeof( externals) / sizeof( externals[0]))) + return( externals + index); + else + return( NULL); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOHIDParamUserClient::start( IOService * _owner ) +{ + static const IOExternalMethod methodTemplate[] = { +/* 0 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, +/* 1 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, +/* 2 */ { NULL, NULL, kIOUCScalarIScalarO, 1, 0 }, +/* 3 */ { NULL, NULL, kIOUCStructIStructO, sizeof(struct evioLLEvent), 0 }, +/* 4 */ { NULL, NULL, kIOUCStructIStructO, sizeof(Point), 0 }, + }; + + if( !super::start( _owner )) + return( false); + + owner = (IOHIDSystem *) _owner; + + bcopy( methodTemplate, externals, sizeof( methodTemplate )); + + externals[3].object = owner; + externals[3].func = (IOMethod) &IOHIDSystem::extPostEvent; + + externals[4].object = owner; + externals[4].func = (IOMethod) &IOHIDSystem::extSetMouseLocation; + + return( true ); +} + +void IOHIDParamUserClient::free( void ) +{ + retain(); retain(); + owner->paramConnect = 0; + detach( owner); + super::free(); +} + +void IOHIDParamUserClient::release() const +{ + super::release(2); +} + + +IOReturn IOHIDParamUserClient::clientClose( void ) +{ + return( kIOReturnSuccess); +} + +IOService * IOHIDParamUserClient::getService( void ) +{ + return( owner ); +} + +IOExternalMethod * IOHIDParamUserClient::getExternalMethodForIndex( + UInt32 index ) +{ + // get the same library function to work for param & server connects + if( (index >= 3) + && (index < (sizeof( externals) / sizeof( externals[0])))) + return( externals + index); + else + return( NULL); +} + +IOReturn IOHIDParamUserClient::setProperties( OSObject * properties ) +{ + OSDictionary * dict; + OSIterator * iter; + IOHIDevice * eventSrc; + IOReturn err = kIOReturnSuccess; + IOReturn ret; + + dict = OSDynamicCast( OSDictionary, properties ); + if( dict) { + ret = owner->setParamProperties( dict ); + if( (ret != kIOReturnSuccess) && (ret != kIOReturnBadArgument)) + err = ret; + iter = owner->getOpenProviderIterator(); + + if( iter) { + while( (eventSrc = (IOHIDevice *) iter->getNextObject())) { + ret = eventSrc->setParamProperties( dict ); + if( (ret != kIOReturnSuccess) && (ret != kIOReturnBadArgument)) + err = ret; + } + iter->release(); + } + } else + err = kIOReturnBadArgument; + + return( err ); +} + diff --git a/iokit/Families/IOHIDSystem/IOHIDUserClient.h b/iokit/Families/IOHIDSystem/IOHIDUserClient.h new file mode 100644 index 000000000..84f842cae --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDUserClient.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOHIDUSERCLIENT_H +#define _IOKIT_IOHIDUSERCLIENT_H + +#include +#include + + +class IOHIDUserClient : public IOUserClient +{ + OSDeclareDefaultStructors(IOHIDUserClient) + +private: + + IOHIDSystem * owner; + + IOExternalMethod externals[ 6 ]; + +public: + // IOUserClient methods + virtual IOReturn clientClose( void ); + + virtual IOService * getService( void ); + + virtual IOReturn registerNotificationPort( + mach_port_t port, UInt32 type, UInt32 refCon ); + + virtual IOReturn connectClient( IOUserClient * client ); + + virtual IOReturn clientMemoryForType( UInt32 type, + UInt32 * flags, IOMemoryDescriptor ** memory ); + + virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + + // others + + bool start( IOService * provider ); + +}; + + +class IOHIDParamUserClient : public IOUserClient +{ + OSDeclareDefaultStructors(IOHIDParamUserClient) + +private: + + IOHIDSystem * owner; + + IOExternalMethod externals[ 5 ]; + +public: + // OSObject methods + virtual void free( void ); + + virtual void release() const; + + // IOUserClient methods + virtual IOReturn clientClose( void ); + + virtual IOService * getService( void ); + + virtual IOExternalMethod * getExternalMethodForIndex( UInt32 index ); + + // others + + bool start( IOService * provider ); + virtual IOReturn setProperties( OSObject * properties ); + +}; + + +#endif /* ! _IOKIT_IOHIDUSERCLIENT_H */ diff --git a/iokit/Families/IOHIDSystem/IOHIDevice.cpp b/iokit/Families/IOHIDSystem/IOHIDevice.cpp new file mode 100644 index 000000000..ad51bd67c --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIDevice.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#define super IOService +OSDefineMetaClassAndStructors(IOHIDevice, IOService); + +bool IOHIDevice::init(OSDictionary * properties) +{ + if (!super::init(properties)) return false; + + /* + * Initialize minimal state. + */ + + return true; +} + +void IOHIDevice::free() +{ + super::free(); +} + +IOHIDKind IOHIDevice::hidKind() +{ + return kHIUnknownDevice; +} + +UInt32 IOHIDevice::interfaceID() +{ + return 0; +} + +UInt32 IOHIDevice::deviceType() +{ + return 0; +} + +UInt64 IOHIDevice::getGUID() +{ + return(0xffffffffffffffffULL); +} + +bool IOHIDevice::updateProperties( void ) +{ + bool ok; + + ok = setProperty( kIOHIDKindKey, hidKind(), 32 ) + & setProperty( kIOHIDInterfaceIDKey, interfaceID(), 32 ) + & setProperty( kIOHIDSubinterfaceIDKey, deviceType(), 32 ); + + return( ok ); +} + +IOReturn IOHIDevice::setParamProperties( OSDictionary * dict ) +{ + return( kIOReturnSuccess ); +} + + diff --git a/iokit/Families/IOHIDSystem/IOHIKeyboard.cpp b/iokit/Families/IOHIDSystem/IOHIKeyboard.cpp new file mode 100644 index 000000000..f0f9c14b5 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIKeyboard.cpp @@ -0,0 +1,579 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * From EventSrcPCKeyoard.m - PC Keyboard EventSrc subclass implementation + * Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * 20-Dec-00 bubba Save global repeat and delay values when + * devices are unplugged. Restore when device is reset. + * 24-Jan-01 bubba Don't auto-repeat on Power Key. This prevents infinite power key + * events from being generated when this key is hit on ADB keyboards. + */ + +#include +#include +#include +#include +#include + +AbsoluteTime gKeyRepeat = { 0 }; // Delay between key repeats +AbsoluteTime gInitialKeyRepeat = { 0 }; // Delay before initial key repeat + +#define super IOHIDevice +OSDefineMetaClassAndStructors(IOHIKeyboard, IOHIDevice); + +bool IOHIKeyboard::init(OSDictionary * properties) +{ + if (!super::init(properties)) return false; + + /* + * Initialize minimal state. + */ + + _deviceLock = IOLockAlloc(); + _keyMap = 0; + _keyStateSize = 4*((maxKeyCodes()+(EVK_BITS_PER_UNIT-1))/EVK_BITS_PER_UNIT); + _keyState = (UInt32 *) IOMalloc(_keyStateSize); + + if (!_deviceLock || !_keyState) return false; + + IOLockInit(_deviceLock); + bzero(_keyState, _keyStateSize); + + return true; +} + +bool IOHIKeyboard::start(IOService * provider) +{ + if (!super::start(provider)) return false; + + /* + * IOHIKeyboard serves both as a service and a nub (we lead a double + * life). Register ourselves as a nub to kick off matching. + */ + + registerService(); + + return true; +} + +void IOHIKeyboard::free() +// Description: Go Away. Be careful when freeing the lock. +{ + IOLock * lock = NULL; + + // Save repeat rate and delay, so when we are replugged we'll be ready + // with the right values. + // + gKeyRepeat = _keyRepeat; + gInitialKeyRepeat = _initialKeyRepeat; + + if ( _deviceLock ) + { + lock = _deviceLock; + IOTakeLock( lock); + _deviceLock = NULL; + } + if ( _keyMap ) + _keyMap->release(); + if( _keyState ) + IOFree( _keyState, _keyStateSize); + if ( lock ) + { + IOUnlock( lock); + IOLockFree( lock); + } + super::free(); +} + +IOHIDKind IOHIKeyboard::hidKind() +{ + return kHIKeyboardDevice; +} + +bool IOHIKeyboard::updateProperties( void ) +{ + UInt64 keyRepeatNano; + UInt64 initialKeyRepeatNano; + bool ok; + + absolutetime_to_nanoseconds( _keyRepeat, &keyRepeatNano); + absolutetime_to_nanoseconds( _initialKeyRepeat, &initialKeyRepeatNano); + + ok = setProperty( kIOHIDKeyMappingKey, _keyMap ) + & setProperty( kIOHIDKeyRepeatKey, &keyRepeatNano, + sizeof(keyRepeatNano)) + & setProperty( kIOHIDInitialKeyRepeatKey, &initialKeyRepeatNano, + sizeof(initialKeyRepeatNano)); + + return( ok & super::updateProperties() ); +} + +IOReturn IOHIKeyboard::setParamProperties( OSDictionary * dict ) +{ + OSData * data; + IOReturn err = kIOReturnSuccess; + unsigned char * map; + IOHIKeyboardMapper * oldMap; + bool updated = false; + UInt64 nano; + IOTakeLock( _deviceLock); + + if( (data = OSDynamicCast( OSData, + dict->getObject(kIOHIDKeyRepeatKey)))) { + + nano = *((UInt64 *)(data->getBytesNoCopy())); + if( nano < EV_MINKEYREPEAT) + nano = EV_MINKEYREPEAT; + nanoseconds_to_absolutetime(nano, &_keyRepeat); + updated = true; + } + + if( (data = OSDynamicCast( OSData, + dict->getObject(kIOHIDInitialKeyRepeatKey)))) { + + nano = *((UInt64 *)(data->getBytesNoCopy())); + if( nano < EV_MINKEYREPEAT) + nano = EV_MINKEYREPEAT; + nanoseconds_to_absolutetime(nano, &_initialKeyRepeat); + updated = true; + } + + if( (data = OSDynamicCast( OSData, dict->getObject(kIOHIDKeyMappingKey)))) { + + map = (unsigned char *)IOMalloc( data->getLength() ); + bcopy( data->getBytesNoCopy(), map, data->getLength() ); + oldMap = _keyMap; + _keyMap = IOHIKeyboardMapper::keyboardMapper(this, map, data->getLength(), true); + if (_keyMap) { + if (oldMap) + oldMap->release(); + updated = true; + } else { + _keyMap = oldMap; + err = kIOReturnBadArgument; + } + } + IOUnlock( _deviceLock); + + if( dict->getObject(kIOHIDResetKeyboardKey)) + resetKeyboard(); + + if( updated ) + updateProperties(); + + return( err ); +} + +bool IOHIKeyboard::resetKeyboard() +// Description: Reset the keymapping to the default value and reconfigure +// the keyboards. +{ + const unsigned char *defaultKeymap; + UInt32 defaultKeymapLength; + + IOTakeLock( _deviceLock); + + if ( _keyMap ) + _keyMap->release(); + + // Set up default keymapping. + defaultKeymap = defaultKeymapOfLength(&defaultKeymapLength); + + _keyMap = IOHIKeyboardMapper::keyboardMapper( this, + defaultKeymap, + defaultKeymapLength, + false ); + if (_keyMap) + { + clock_interval_to_absolutetime_interval( EV_DEFAULTKEYREPEAT, + kNanosecondScale, &_keyRepeat); + clock_interval_to_absolutetime_interval( EV_DEFAULTINITIALREPEAT, + kNanosecondScale, &_initialKeyRepeat); + } + + // Use our globals if valid. That way, if we are unplugged and replugged, we'll + // have the proper values, instead of the lame default values. + // + if( gKeyRepeat.lo > 0 ) _keyRepeat = gKeyRepeat; + if( gInitialKeyRepeat.lo > 0 ) _initialKeyRepeat = gInitialKeyRepeat; + + updateProperties(); + + _interfaceType = interfaceID(); + _deviceType = deviceType(); + _guid = getGUID(); + + IOUnlock( _deviceLock); + return (_keyMap) ? true : false; +} + +void IOHIKeyboard::scheduleAutoRepeat() +// Description: Schedule a procedure to be called when a timeout has expired +// so that we can generate a repeated key. +// Preconditions: +// * _deviceLock should be held on entry +{ + if ( _calloutPending == true ) + { + thread_call_func_cancel(_autoRepeat, this, true); + _calloutPending = false; + } + if ( AbsoluteTime_to_scalar(&_downRepeatTime) ) + { + AbsoluteTime deadline; + clock_absolutetime_interval_to_deadline(_downRepeatTime, &deadline); + thread_call_func_delayed(_autoRepeat, this, deadline); + _calloutPending = true; + } +} + +void IOHIKeyboard::_autoRepeat(thread_call_param_t arg, + thread_call_param_t) /* thread_call_func_t */ +{ + IOHIKeyboard *self = (IOHIKeyboard *) arg; + self->autoRepeat(); +} + +void IOHIKeyboard::autoRepeat() +// Description: Repeat the currently pressed key and schedule ourselves +// to be called again after another interval elapses. +// Preconditions: +// * Should only be executed on callout thread +// * _deviceLock should be unlocked on entry. +{ + IOTakeLock( _deviceLock); + if ( _calloutPending == false ) + { + IOUnlock( _deviceLock); + return; + } + _calloutPending = false; + _isRepeat = true; + + if ( AbsoluteTime_to_scalar(&_downRepeatTime) ) + { + // Device is due to generate a repeat + if (_keyMap) _keyMap->translateKeyCode(_codeToRepeat, + /* direction */ true, + /* keyBits */ _keyState); + _downRepeatTime = _keyRepeat; + } + + _isRepeat = false; + scheduleAutoRepeat(); + IOUnlock( _deviceLock); +} + +void IOHIKeyboard::setRepeat(unsigned eventType, unsigned keyCode) +// Description: Set up or tear down key repeat operations. The method +// that locks _deviceLock is a bit higher on the call stack. +// This method is invoked as a side effect of our own +// invocation of _keyMap->translateKeyCode(). +// Preconditions: +// * _deviceLock should be held upon entry. +{ + if ( _isRepeat == false ) // make sure we're not already repeating + { + if (eventType == NX_KEYDOWN) // Start repeat + { + // Set this key to repeat (push out last key if present) + _downRepeatTime = _initialKeyRepeat; // + _lastEventTime; + _codeToRepeat = keyCode; + // reschedule key repeat event here + scheduleAutoRepeat(); + } + else if (eventType == NX_KEYUP) // End repeat + { + /* Remove from downKey */ + if (_codeToRepeat == keyCode) + { + AbsoluteTime_to_scalar(&_downRepeatTime) = 0; + _codeToRepeat = (unsigned)-1; + scheduleAutoRepeat(); + } + } + } +} + +// +// BEGIN: Implementation of the methods required by IOHIKeyboardMapper. +// + +void IOHIKeyboard::keyboardEvent(unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned keyCode, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet) +// Description: We use this notification to set up our _keyRepeat timer +// and to pass along the event to our owner. This method +// will be called while the KeyMap object is processing +// the key code we've sent it using deliverKey. +{ + + if (_keyboardEventAction) /* upstream call */ + { + (*_keyboardEventAction)(_keyboardEventTarget, + eventType, + /* flags */ flags, + /* keyCode */ keyCode, + /* charCode */ charCode, + /* charSet */ charSet, + /* originalCharCode */ origCharCode, + /* originalCharSet */ origCharSet, + /* keyboardType */ _deviceType, + /* repeat */ _isRepeat, + /* atTime */ _lastEventTime); + } + + + if( keyCode == _keyMap->getParsedSpecialKey(NX_KEYTYPE_CAPS_LOCK) || + keyCode == _keyMap->getParsedSpecialKey(NX_POWER_KEY) ) + { + //Don't repeat caps lock on ADB/USB. 0x39 is default ADB code. + // We are here because KeyCaps needs to see 0x39 as a real key, + // not just another modifier bit. + + if (_interfaceType == NX_EVS_DEVICE_INTERFACE_ADB) + { + return; + } + } + + // Set up key repeat operations here. + setRepeat(eventType, keyCode); +} + +void IOHIKeyboard::keyboardSpecialEvent(unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned keyCode, + /* specialty */ unsigned flavor) +// Description: See the description for keyboardEvent. +{ + + if (_keyboardSpecialEventAction) /* upstream call */ + { + (*_keyboardSpecialEventAction)(_keyboardSpecialEventTarget, + eventType, + /* flags */ flags, + /* keyCode */ keyCode, + /* specialty */ flavor, + /* guid */ _guid, + /* repeat */ _isRepeat, + /* atTime */ _lastEventTime); + } + + // Set up key repeat operations here. + + // Don't repeat caps lock, numlock or power key. + // + if ( (flavor != NX_KEYTYPE_CAPS_LOCK) && (flavor != NX_KEYTYPE_NUM_LOCK) && + (flavor != NX_POWER_KEY) ) + { + setRepeat(eventType, keyCode); + } +} + +void IOHIKeyboard::updateEventFlags(unsigned flags) +// Description: Process non-event-generating flag changes. Simply pass this +// along to our owner. +{ + if (_updateEventFlagsAction) /* upstream call */ + { + (*_updateEventFlagsAction)(_updateEventFlagsTarget, flags); + } +} + +unsigned IOHIKeyboard::eventFlags() +// Description: Return global event flags In this world, there is only +// one keyboard device so device flags == global flags. +{ + return _eventFlags; +} + +unsigned IOHIKeyboard::deviceFlags() +// Description: Return per-device event flags. In this world, there is only +// one keyboard device so device flags == global flags. +{ + return _eventFlags; +} + +void IOHIKeyboard::setDeviceFlags(unsigned flags) +// Description: Set device event flags. In this world, there is only +// one keyboard device so device flags == global flags. +{ + _eventFlags = flags; +} + +bool IOHIKeyboard::alphaLock() +// Description: Return current alpha-lock state. This is a state tracking +// callback used by the KeyMap object. +{ + return _alphaLock; +} + +void IOHIKeyboard::setAlphaLock(bool val) +// Description: Set current alpha-lock state This is a state tracking +// callback used by the KeyMap object. +{ + _alphaLock = val; + setAlphaLockFeedback(val); +} + +bool IOHIKeyboard::numLock() +{ + return _numLock; +} + +void IOHIKeyboard::setNumLock(bool val) +{ + _numLock = val; + setNumLockFeedback(val); +} + +bool IOHIKeyboard::charKeyActive() +// Description: Return true If a character generating key down This is a state +// tracking callback used by the KeyMap object. +{ + return _charKeyActive; +} + +void IOHIKeyboard::setCharKeyActive(bool val) +// Description: Note whether a char generating key is down. This is a state +// tracking callback used by the KeyMap object. +{ + _charKeyActive = val; +} +// +// END: Implementation of the methods required by IOHIKeyboardMapper. +// + +void IOHIKeyboard::dispatchKeyboardEvent(unsigned int keyCode, + /* direction */ bool goingDown, + /* timeStamp */ AbsoluteTime time) +// Description: This method is the heart of event dispatching. The overlying +// subclass invokes this method with each event. We then +// get the event xlated and dispatched using a _keyMap instance. +// The event structure passed in by reference should not be freed. +{ + _lastEventTime = time; + + IOTakeLock( _deviceLock); + + if (_keyMap) _keyMap->translateKeyCode(keyCode, + /* direction */ goingDown, + /* keyBits */ _keyState); + IOUnlock( _deviceLock); +} + +const unsigned char * IOHIKeyboard::defaultKeymapOfLength(UInt32 * length) +{ + *length = 0; + return NULL; +} + +void IOHIKeyboard::setAlphaLockFeedback(bool /* val */) +{ + return; +} + +void IOHIKeyboard::setNumLockFeedback(bool /* val */) +{ + return; +} + +UInt32 IOHIKeyboard::maxKeyCodes() +{ + return( 0x80); +} + +bool IOHIKeyboard:: doesKeyLock ( unsigned key) +{ + return false; +} + +unsigned IOHIKeyboard:: getLEDStatus () +{ + return 0; +} + + +bool IOHIKeyboard::open(IOService * client, + IOOptionBits options, + KeyboardEventAction keAction, + KeyboardSpecialEventAction kseAction, + UpdateEventFlagsAction uefAction) +{ + if ( (!_keyMap) && (!resetKeyboard())) return false; + +// IOLog("***open -- gKeyRepeat.lo = %08lx\n", gKeyRepeat.lo ); +// IOLog("***open -- gInitialKeyRepeat.lo = %08lx\n", gInitialKeyRepeat.lo ); + + if (super::open(client, options)) + { + // Note: client object is already retained by superclass' open() + _keyboardEventTarget = client; + _keyboardEventAction = keAction; + _keyboardSpecialEventTarget = client; + _keyboardSpecialEventAction = kseAction; + _updateEventFlagsTarget = client; + _updateEventFlagsAction = uefAction; + + return true; + } + + return false; +} + +void IOHIKeyboard::close(IOService * client, IOOptionBits) +{ +// IOLog("***close -- gKeyRepeat.lo = %08lx\n", gKeyRepeat.lo ); +// IOLog("***close -- gInitialKeyRepeat.lo = %08lx\n", gInitialKeyRepeat.lo ); + + // Save repeat rate and delay, so when we are replugged we'll be ready + // with the right values. + // + gKeyRepeat = _keyRepeat; + gInitialKeyRepeat = _initialKeyRepeat; + + // kill autorepeat task + AbsoluteTime_to_scalar(&_downRepeatTime) = 0; + _codeToRepeat = (unsigned)-1; + scheduleAutoRepeat(); + // clear modifiers to avoid stuck keys + setAlphaLock(false); + if (_updateEventFlagsAction) + (*_updateEventFlagsAction)(_updateEventFlagsTarget, 0); _eventFlags = 0; + bzero(_keyState, _keyStateSize); + + _keyboardEventAction = NULL; + _keyboardEventTarget = 0; + _keyboardSpecialEventAction = NULL; + _keyboardSpecialEventTarget = 0; + _updateEventFlagsAction = NULL; + _updateEventFlagsTarget = 0; + + super::close(client); +} + diff --git a/iokit/Families/IOHIDSystem/IOHIKeyboardMapper.cpp b/iokit/Families/IOHIDSystem/IOHIKeyboardMapper.cpp new file mode 100644 index 000000000..aac2e26bf --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIKeyboardMapper.cpp @@ -0,0 +1,768 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * KeyMap.m - Generic keymap string parser and keycode translator. + * + * HISTORY + * 19 June 1992 Mike Paquette at NeXT + * Created. + * 5 Aug 1993 Erik Kay at NeXT + * minor API cleanup + * 11 Nov 1993 Erik Kay at NeXT + * fix to allow prevent long sequences from overflowing the event queue + * 12 Nov 1998 Dan Markarian at Apple + * major cleanup of public API's; converted to C++ + */ + +#include +#include +#include +#include +#include + +#define super OSObject +OSDefineMetaClassAndStructors(IOHIKeyboardMapper, OSObject); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOHIKeyboardMapper * IOHIKeyboardMapper::keyboardMapper( + IOHIKeyboard * delegate, + const UInt8 * mapping, + UInt32 mappingLength, + bool mappingShouldBeFreed ) +{ + IOHIKeyboardMapper * me = new IOHIKeyboardMapper; + + if (me && !me->init(delegate, mapping, mappingLength, mappingShouldBeFreed)) + { + me->free(); + return 0; + } + + return me; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +/* + * Common KeyMap initialization + */ +bool IOHIKeyboardMapper::init( IOHIKeyboard * delegate, + const UInt8 * mapping, + UInt32 mappingLength, + bool mappingShouldBeFreed ) +{ + if (!super::init()) return false; + + _delegate = delegate; + + if (!parseKeyMapping(mapping, mappingLength, &_parsedMapping)) return false; + + _mappingShouldBeFreed = mappingShouldBeFreed; + _parsedMapping.mapping = mapping; + _parsedMapping.mappingLen = mappingLength; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOHIKeyboardMapper::free() +{ + if (_mappingShouldBeFreed && _parsedMapping.mapping) + IOFree((void *)_parsedMapping.mapping, _parsedMapping.mappingLen); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +const UInt8 * IOHIKeyboardMapper::mapping() +{ + return (const UInt8 *)_parsedMapping.mapping; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 IOHIKeyboardMapper::mappingLength() +{ + return _parsedMapping.mappingLen; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOHIKeyboardMapper::serialize(OSSerialize *s) const +{ + OSData * data; + bool ok; + + if (s->previouslySerialized(this)) return true; + + data = OSData::withBytesNoCopy( (void *) _parsedMapping.mapping, _parsedMapping.mappingLen ); + if (data) { + ok = data->serialize(s); + data->release(); + } else + ok = false; + + return( ok ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +// +// Perform the mapping of 'key' moving in the specified direction +// into events. +// + +void IOHIKeyboardMapper::translateKeyCode(UInt8 key, + bool keyDown, + kbdBitVector keyBits) +{ + unsigned char thisBits = _parsedMapping.keyBits[key]; + unsigned char * bp; + + /* do mod bit update and char generation in useful order */ + if (keyDown) + { + EVK_KEYDOWN(key, keyBits); + + if (thisBits & NX_MODMASK) doModCalc(key, keyBits); + if (thisBits & NX_CHARGENMASK) doCharGen(key, keyDown); + } + else + { + EVK_KEYUP(key, keyBits); + if (thisBits & NX_CHARGENMASK) doCharGen(key, keyDown); + if (thisBits & NX_MODMASK) doModCalc(key, keyBits); + } + + //Fix JIS localization. We are here because the JIS keys Yen, Ro, Eisu, + // Kana, and "," are not matched in _parsedMapping.keyBits[] above even + // though the keyboard drivers are sending the correct scan codes. + // The check for interfaceID() below makes sure both ADB and USB works. + // This fix has been tested with AppKit and Carbon for Kodiak 1H + if( 0 == (thisBits & (NX_MODMASK | NX_CHARGENMASK))) + if (_delegate->interfaceID() == NX_EVS_DEVICE_INTERFACE_ADB) + { + unsigned charCode=0; + + switch (key) { + case 0x5F: // numpad ',' using raw ADB scan code + charCode = ','; + break; + case 0x5E: //ro + charCode = '_'; + break; + case 0x5d: //Yen + charCode = '\\'; + break; + case 0x0a: + charCode = 0xa7; + break; + case 0x66: // eisu + case 0x68: // kana + default: + // do nothing. AppKit has fix in 1H + break; + } + /* Post the keyboard event */ + _delegate->keyboardEvent(keyDown ? NX_KEYDOWN : NX_KEYUP, + /* flags */ _delegate->eventFlags(), + /* keyCode */ key, + /* charCode */ charCode, + /* charSet */ 0, //0 is adequate for JIS + /* originalCharCode */ 0, + /* originalCharSet */ 0); + } + +#ifdef OMITPENDINGKEYCAPS + //Make KeyCaps.app see the caps lock + if (key == _parsedMapping.specialKeys[NX_KEYTYPE_CAPS_LOCK]) //ADB caps lock 0x39 + { + if (_delegate->alphaLock() == keyDown) + //This logic is needed for non-locking USB caps lock + { + _delegate->keyboardEvent(keyDown ? NX_KEYDOWN : NX_KEYUP, + _delegate->eventFlags(), key, 0, 0, 0, 0); + } + } + + //Find scan code corresponding to PowerBook fn key (0x3f in ADB) + bp = _parsedMapping.modDefs[NX_MODIFIERKEY_SECONDARYFN]; //7th array entry + if (bp) + { + bp++; //now points to actual ADB scan code + if (key == *bp ) //ADB fn key should be 0x3f here + { + _delegate->keyboardEvent(keyDown ? NX_KEYDOWN : NX_KEYUP, + _delegate->eventFlags(), key, 0, 0, 0, 0); + } + } +#endif +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +// +// Support goop for parseKeyMapping. These routines are +// used to walk through the keymapping string. The string +// may be composed of bytes or shorts. If using shorts, it +// MUST always be aligned to use short boundries. +// +typedef struct { + unsigned const char *bp; + unsigned const char *endPtr; + int shorts; +} NewMappingData; + +static inline unsigned int NextNum(NewMappingData *nmd) +{ + if (nmd->bp >= nmd->endPtr) + return(0); + if (nmd->shorts) + return(*((unsigned short *)nmd->bp)++); + else + return(*((unsigned char *)nmd->bp)++); +} + +// +// Perform the actual parsing operation on a keymap. Returns false on failure. +// + +bool IOHIKeyboardMapper::parseKeyMapping(const UInt8 * mapping, + UInt32 mappingLength, + NXParsedKeyMapping * parsedMapping) const +{ + NewMappingData nmd; + int i, j, k, l, n; + unsigned int m; + int keyMask, numMods; + int maxSeqNum = -1; + unsigned char * bp; + + + /* Initialize the new map. */ + bzero( parsedMapping, sizeof (NXParsedKeyMapping) ); + parsedMapping->maxMod = -1; + parsedMapping->numDefs = -1; + parsedMapping->numSeqs = -1; + + nmd.endPtr = mapping + mappingLength; + nmd.bp = mapping; + nmd.shorts = 1; // First value, the size, is always a short + + /* Start filling it in with the new data */ + parsedMapping->mapping = (unsigned char *)mapping; + parsedMapping->mappingLen = mappingLength; + parsedMapping->shorts = nmd.shorts = NextNum(&nmd); + + /* Walk through the modifier definitions */ + numMods = NextNum(&nmd); + for(i=0; i= NX_NUMMODIFIERS) + return false; + + /* Check maxMod */ + if (j > parsedMapping->maxMod) + parsedMapping->maxMod = j; + + /* record position of this def */ + parsedMapping->modDefs[j] = (unsigned char *)nmd.bp; + + /* Loop through each key assigned to this bit */ + for(k=0,n = NextNum(&nmd);k= NX_NUMKEYCODES) + return false; + /* Make sure the key's not already assigned */ + if (parsedMapping->keyBits[l] & NX_MODMASK) + return false; + /* Set bit for modifier and which one */ + + //The "if" here is to patch the keymapping file. That file has nothing + // for num lock, so no change is required here for num lock. + // Also, laptop Macs have num lock handled by Buttons driver + if ((j != NX_MODIFIERKEY_ALPHALOCK) || (_delegate->doesKeyLock(NX_KEYTYPE_CAPS_LOCK)) ) + { + parsedMapping->keyBits[l] |=NX_MODMASK | (j & NX_WHICHMODMASK); + } + + } + } + + //This is here because keymapping file has an entry for caps lock, but in + // order to trigger special code (line 646-), the entry needs to be zero + if (!_delegate->doesKeyLock(NX_KEYTYPE_CAPS_LOCK)) + parsedMapping->modDefs[NX_MODIFIERKEY_ALPHALOCK] = 0; + + //This section is here to force keymapping to include the PowerBook's secondary + // fn key as a new modifier key. This code can be removed once the keymapping + // file has the fn key (ADB=0x3f) in the modifiers section. + // NX_MODIFIERKEY_SECONDARYFN = 8 in ev_keymap.h + if (_delegate->interfaceID() == NX_EVS_DEVICE_INTERFACE_ADB) + { + parsedMapping->keyBits[0x3f] |=NX_MODMASK | (NX_MODIFIERKEY_SECONDARYFN & NX_WHICHMODMASK); + } + + /* Walk through each key definition */ + parsedMapping->numDefs = NextNum(&nmd); + n = parsedMapping->numDefs; + for( i=0; i < NX_NUMKEYCODES; i++) + { + if (i < n) + { + parsedMapping->keyDefs[i] = (unsigned char *)nmd.bp; + if ((keyMask = NextNum(&nmd)) != (nmd.shorts ? 0xFFFF: 0x00FF)) + { + /* Set char gen bit for this guy: not a no-op */ + parsedMapping->keyBits[i] |= NX_CHARGENMASK; + /* Check key defs to find max sequence number */ + for(j=0, k=1; j<=parsedMapping->maxMod; j++, keyMask>>=1) + { + if (keyMask & 0x01) + k*= 2; + } + for(j=0; j maxSeqNum) + maxSeqNum = l; /* Update expected # of seqs */ + } + } + else /* unused code within active range */ + parsedMapping->keyDefs[i] = NULL; + } + else /* Unused code past active range */ + { + parsedMapping->keyDefs[i] = NULL; + } + } + /* Walk through sequence defs */ + parsedMapping->numSeqs = NextNum(&nmd); + /* If the map calls more sequences than are declared, bail out */ + if (parsedMapping->numSeqs <= maxSeqNum) + return false; + + /* Walk past all sequences */ + for(i = 0; i < parsedMapping->numSeqs; i++) + { + parsedMapping->seqDefs[i] = (unsigned char *)nmd.bp; + /* Walk thru entries in a seq. */ + for(j=0, l=NextNum(&nmd); j NX_NUMSPECIALKEYS ) + return false; + if ( numMods ) + { + for ( i = 0; i < NX_NUMSPECIALKEYS; ++i ) + parsedMapping->specialKeys[i] = NX_NOSPECIALKEY; + + //This "if" will cover both ADB and USB keyboards. This code does not + // have to be here if the keymaps include these two entries. Keyboard + // drivers already have these entries, but keymapping file does not + if (_delegate->interfaceID() == NX_EVS_DEVICE_INTERFACE_ADB) + { + //ADB capslock: + parsedMapping->specialKeys[NX_KEYTYPE_CAPS_LOCK] = 0x39; + + //ADB numlock for external keyboards, not PowerBook keyboards: + parsedMapping->specialKeys[NX_KEYTYPE_NUM_LOCK] = 0x47; + + //HELP key needs to be visible + parsedMapping->keyDefs[0x72] = parsedMapping->keyDefs[0x47]; + } + + //Keymapping file can override caps and num lock above now: + for ( i = 0; i < numMods; ++i ) + { + j = NextNum(&nmd); /* Which modifier key? */ + l = NextNum(&nmd); /* Scancode for modifier key */ + if ( j >= NX_NUMSPECIALKEYS ) + return false; + parsedMapping->specialKeys[j] = l; + } + } + else /* No special keys defs implies an old style keymap */ + { + return false; /* Old style keymaps are guaranteed to do */ + /* the wrong thing on ADB keyboards */ + } + /* Install bits for Special device keys */ + for(i=0; ispecialKeys[i] != NX_NOSPECIALKEY ) + { + parsedMapping->keyBits[parsedMapping->specialKeys[i]] |= + (NX_CHARGENMASK | NX_SPECIALKEYMASK); + } + } + + //caps lock keys should not generate characters. + if (_delegate->doesKeyLock(NX_KEYTYPE_CAPS_LOCK)) + { + parsedMapping->keyBits[ parsedMapping->specialKeys[NX_KEYTYPE_CAPS_LOCK] ] + &= ~NX_CHARGENMASK; + } + + //Find scan code corresponding to PowerBook fn key (0x3f in ADB) + // and then make sure it does not generate a character + bp = _parsedMapping.modDefs[NX_MODIFIERKEY_SECONDARYFN]; //7th array entry + if (bp) + { + bp++; //now points to actual ADB scan code + parsedMapping->keyBits[ *bp ] &= ~NX_CHARGENMASK; + } + + return true; +} + + +//Retrieve a key from mapping above. Useful for IOHIKeyboard +UInt8 IOHIKeyboardMapper::getParsedSpecialKey(UInt8 logical) +{ + UInt8 retval; + + if ( logical < NX_NUMSPECIALKEYS) + retval = _parsedMapping.specialKeys[logical]; + else + retval = 0xff; //careful, 0 is mapped already + return retval; +} + + +static inline int NEXTNUM(unsigned char ** mapping, short shorts) +{ + int returnValue; + + if (shorts) + { + returnValue = *((unsigned short *)*mapping); + *mapping += sizeof(unsigned short); + } + else + { + returnValue = **((unsigned char **)mapping); + *mapping += sizeof(unsigned char); + } + + return returnValue; +} + +// +// Look up in the keymapping each key associated with the modifier bit. +// Look in the device state to see if that key is down. +// Return 1 if a key for modifier 'bit' is down. Return 0 if none is down +// +static inline int IsModifierDown(NXParsedKeyMapping *parsedMapping, + kbdBitVector keyBits, + int bit ) +{ + int i, n; + unsigned char *mapping; + unsigned key; + short shorts = parsedMapping->shorts; + + if ( (mapping = parsedMapping->modDefs[bit]) != 0 ) { + for(i=0, n=NEXTNUM(&mapping, shorts); ideviceFlags() & (~bitMask); + /* Set bit if any associated keys are down */ + if ( IsModifierDown( &_parsedMapping, keyBits, bit ) ) + myFlags |= bitMask; + + if ( bit == NX_MODIFIERKEY_ALPHALOCK ) /* Caps Lock key */ + _delegate->setAlphaLock((myFlags & NX_ALPHASHIFTMASK) ? true : false); + else if ( bit == NX_MODIFIERKEY_NUMLOCK ) /* Num Lock key */ + _delegate->setNumLock((myFlags & NX_NUMERICPADMASK) ? true : false); + + _delegate->setDeviceFlags(myFlags); + +} + + +// +// Perform flag state update and generate flags changed events for this key. +// +void IOHIKeyboardMapper::doModCalc(int key, kbdBitVector keyBits) +{ + int thisBits; + thisBits = _parsedMapping.keyBits[key]; + if (thisBits & NX_MODMASK) + { + calcModBit((thisBits & NX_WHICHMODMASK), keyBits); + /* The driver generates flags-changed events only when there is + no key-down or key-up event generated */ + if (!(thisBits & NX_CHARGENMASK)) + { + /* Post the flags-changed event */ + _delegate->keyboardEvent(NX_FLAGSCHANGED, + /* flags */ _delegate->eventFlags(), + /* keyCode */ key, + /* charCode */ 0, + /* charSet */ 0, + /* originalCharCode */ 0, + /* originalCharSet */ 0); + } + else /* Update, but don't generate an event */ + _delegate->updateEventFlags(_delegate->eventFlags()); + } +} + +// +// Perform character event generation for this key +// +void IOHIKeyboardMapper::doCharGen(int keyCode, bool down) +{ + int i, n, eventType, adjust, thisMask, modifiers, saveModifiers; + short shorts; + unsigned charSet, origCharSet; + unsigned charCode, origCharCode; + unsigned char *mapping; + unsigned eventFlags, origflags; + + _delegate->setCharKeyActive(true); // a character generating key is active + + eventType = (down == true) ? NX_KEYDOWN : NX_KEYUP; + eventFlags = _delegate->eventFlags(); + saveModifiers = eventFlags >> 16; // machine independent mod bits + /* Set NX_ALPHASHIFTMASK based on alphaLock OR shift active */ + if( saveModifiers & (NX_SHIFTMASK >> 16)) + saveModifiers |= (NX_ALPHASHIFTMASK >> 16); + + + /* Get this key's key mapping */ + shorts = _parsedMapping.shorts; + mapping = _parsedMapping.keyDefs[keyCode]; + modifiers = saveModifiers; + if ( mapping ) + { + + + /* Build offset for this key */ + thisMask = NEXTNUM(&mapping, shorts); + if (thisMask && modifiers) + { + adjust = (shorts ? sizeof(short) : sizeof(char))*2; + for( i = 0; i <= _parsedMapping.maxMod; ++i) + { + if (thisMask & 0x01) + { + if (modifiers & 0x01) + mapping += adjust; + adjust *= 2; + } + thisMask >>= 1; + modifiers >>= 1; + } + } + charSet = NEXTNUM(&mapping, shorts); + charCode = NEXTNUM(&mapping, shorts); + + /* construct "unmodified" character */ + mapping = _parsedMapping.keyDefs[keyCode]; + modifiers = saveModifiers & ((NX_ALPHASHIFTMASK | NX_SHIFTMASK) >> 16); + + thisMask = NEXTNUM(&mapping, shorts); + if (thisMask && modifiers) + { + adjust = (shorts ? sizeof(short) : sizeof(char)) * 2; + for ( i = 0; i <= _parsedMapping.maxMod; ++i) + { + if (thisMask & 0x01) + { + if (modifiers & 0x01) + mapping += adjust; + adjust *= 2; + } + thisMask >>= 1; + modifiers >>= 1; + } + } + origCharSet = NEXTNUM(&mapping, shorts); + origCharCode = NEXTNUM(&mapping, shorts); + + if (charSet == (unsigned)(shorts ? 0xFFFF : 0x00FF)) + { + // Process as a character sequence + // charCode holds the sequence number + mapping = _parsedMapping.seqDefs[charCode]; + + origflags = eventFlags; + for(i=0,n=NEXTNUM(&mapping, shorts);ikeyboardEvent(NX_FLAGSCHANGED, + /* flags */ _delegate->deviceFlags(), + /* keyCode */ keyCode, + /* charCode */ 0, + /* charSet */ 0, + /* originalCharCode */ 0, + /* originalCharSet */ 0); + } + else + NEXTNUM(&mapping, shorts); /* Skip over value */ + } + else + { + charCode = NEXTNUM(&mapping, shorts); + _delegate->keyboardEvent(eventType, + /* flags */ eventFlags, + /* keyCode */ keyCode, + /* charCode */ charCode, + /* charSet */ charSet, + /* originalCharCode */ charCode, + /* originalCharSet */ charSet); + } + } + /* Done with macro. Restore the flags if needed. */ + if ( eventFlags != origflags ) + { + _delegate->keyboardEvent(NX_FLAGSCHANGED, + /* flags */ _delegate->deviceFlags(), + /* keyCode */ keyCode, + /* charCode */ 0, + /* charSet */ 0, + /* originalCharCode */ 0, + /* originalCharSet */ 0); + eventFlags = origflags; + } + } + else /* A simple character generating key */ + { + _delegate->keyboardEvent(eventType, + /* flags */ eventFlags, + /* keyCode */ keyCode, + /* charCode */ charCode, + /* charSet */ charSet, + /* originalCharCode */ origCharCode, + /* originalCharSet */ origCharSet); + } + } /* if (mapping) */ + + /* + * Check for a device control key: note that they always have CHARGEN + * bit set + */ + if (_parsedMapping.keyBits[keyCode] & NX_SPECIALKEYMASK) + { + for(i=0; ikeyboardSpecialEvent(eventType, + /* flags */ eventFlags, + /* keyCode */ keyCode, + /* specialty */ i); + /* + * Special keys hack for letting an arbitrary (non-locking) + * key act as a CAPS-LOCK key. If a special CAPS LOCK key + * is designated, and there is no key designated for the + * AlphaLock function, then we'll let the special key toggle + * the AlphaLock state. + */ + if (i == NX_KEYTYPE_CAPS_LOCK + && down == true + && !_parsedMapping.modDefs[NX_MODIFIERKEY_ALPHALOCK] ) + { + unsigned myFlags = _delegate->deviceFlags(); + bool alphaLock = (_delegate->alphaLock() == false); + + // Set delegate's alphaLock state + _delegate->setAlphaLock(alphaLock); + // Update the delegate's flags + if ( alphaLock ) + myFlags |= NX_ALPHASHIFTMASK; + else + myFlags &= ~NX_ALPHASHIFTMASK; + + _delegate->setDeviceFlags(myFlags); + + _delegate->keyboardEvent(NX_FLAGSCHANGED, + /* flags */ myFlags, + /* keyCode */ keyCode, + /* charCode */ 0, + /* charSet */ 0, + /* originalCharCode */ 0, + /* originalCharSet */ 0); + } + else if (i == NX_KEYTYPE_NUM_LOCK + && down == true + && !_parsedMapping.modDefs[NX_MODIFIERKEY_NUMLOCK] ) + { + unsigned myFlags = _delegate->deviceFlags(); + bool numLock = (_delegate->numLock() == false); + + // Set delegate's alphaLock state + _delegate->setNumLock(numLock); + if ( numLock ) + myFlags |= NX_NUMERICPADMASK; + else + myFlags &= ~NX_NUMERICPADMASK; + + _delegate->setDeviceFlags(myFlags); + _delegate->keyboardEvent(NX_FLAGSCHANGED, + /* flags */ myFlags, + /* keyCode */ keyCode, + /* charCode */ 0, + /* charSet */ 0, + /* originalCharCode */ 0, + /* originalCharSet */ 0); + } + + break; + } + } + } +} diff --git a/iokit/Families/IOHIDSystem/IOHIPointing.cpp b/iokit/Families/IOHIDSystem/IOHIPointing.cpp new file mode 100644 index 000000000..9c6719c13 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHIPointing.cpp @@ -0,0 +1,790 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 17 July 1998 sdouglas + * 22 Dec 2000 bubba - save global acceleration state when device is unplugged. + */ + +#include +#include +#include +#include +#include + + +#ifndef abs +#define abs(_a) ((_a >= 0) ? _a : -_a) +#endif + +#define super IOHIDevice + +// Global variable glob_accel won't get clobbered by sleep/wake code, +// which will re-init this driver. The Window Server does not re-send +// the original acceleration value, so we need to restore it on unplugs +// and sleeps. + +IOFixed glob_accel = 0x8000; + +bool HIPointinghasRoot( OSObject * us, void *, IOService * yourDevice ); + +OSDefineMetaClassAndStructors(IOHIPointing, IOHIDevice); + +bool IOHIPointing::init(OSDictionary * properties) +{ + if (!super::init(properties)) return false; + + /* + * Initialize minimal state. + */ + + _fractX = 0; + _fractY = 0; + _acceleration = -1; + _convertAbsoluteToRelative = false; + _contactToMove = false; + _hadContact = false; + _pressureThresholdToClick = 128; + _previousLocation.x = 0; + _previousLocation.y = 0; + _rootDomain = 0; + + _deviceLock = IOLockAlloc(); + + if (!_deviceLock) return false; + + IOLockInit(_deviceLock); + + return true; +} + +bool IOHIPointing::start(IOService * provider) +{ + if (!super::start(provider)) return false; + + /* + * IOHIPointing serves both as a service and a nub (we lead a double + * life). Register ourselves as a nub to kick off matching. + */ + + registerService(); + addNotification( gIOPublishNotification,serviceMatching("IOPMrootDomain"), + (IOServiceNotificationHandler)HIPointinghasRoot, this, 0 ); + + return true; +} + +/* Here are some power management functions so we can tell when system is + going to sleep. We need to remember the acceleration value */ +bool HIPointinghasRoot( OSObject * us, void *, IOService * yourDevice ) +{ + if (( yourDevice != NULL ) && ((IOHIPointing *)us)->_rootDomain == 0) + { + ((IOHIPointing *)us)->_rootDomain = yourDevice; + ((IOPMrootDomain *)yourDevice)->registerInterestedDriver((IOService *) us); + } + return true; +} + +IOReturn IOHIPointing::powerStateWillChangeTo ( IOPMPowerFlags theFlags, unsigned long unused1, + IOService* unused2) +{ + if ( ! (theFlags & IOPMPowerOn) ) + { + glob_accel = _acceleration; //Save old value before driver is torn down + } + return IOPMAckImplied; +} + +IOReturn IOHIPointing::powerStateDidChangeTo ( IOPMPowerFlags theFlags, unsigned long unused1, + IOService* unused2) +{ + if (theFlags & IOPMPowerOn) + { + if (glob_accel > 0x10000) //Just in case saved value is out of bounds + glob_accel = 0x10000; + setupForAcceleration(glob_accel); + updateProperties(); + } + return IOPMAckImplied; +} + + + +void IOHIPointing::free() +// Description: Go Away. Be careful when freeing the lock. +{ + glob_accel = _acceleration; +// IOLog("***free -- glob_accel = %08lx\n", glob_accel ); + + if (_deviceLock) + { + IOLock * lock; + + IOTakeLock(_deviceLock); + + lock = _deviceLock; + _deviceLock = NULL; + + IOUnlock(lock); + IOLockFree(lock); + } + if (_rootDomain) + { + _rootDomain->deRegisterInterestedDriver((IOService *) this); + _rootDomain = 0; + } + super::free(); +} + +bool IOHIPointing::open(IOService * client, + IOOptionBits options, + RelativePointerEventAction rpeAction, + AbsolutePointerEventAction apeAction, + ScrollWheelEventAction sweAction) +{ +// IOLog("***open -- glob_accel = %08lx\n", glob_accel ); + if ( (-1 == _acceleration) && (!resetPointer())) return false; +// IOLog("***open -- after reset is called, glob_accel = %08lx\n", glob_accel ); + if (super::open(client, options)) + { + // Note: client object is already retained by superclass' open() + _relativePointerEventTarget = client; + _relativePointerEventAction = rpeAction; + _absolutePointerEventTarget = client; + _absolutePointerEventAction = apeAction; + _scrollWheelEventTarget = client; + _scrollWheelEventAction = sweAction; + return true; + } + + return false; +} + +void IOHIPointing::close(IOService * client, IOOptionBits) +{ + glob_accel = _acceleration; + // IOLog("***close -- glob_accel = %08lx\n", glob_accel ); + + _relativePointerEventAction = NULL; + _relativePointerEventTarget = 0; + _absolutePointerEventAction = NULL; + _absolutePointerEventTarget = 0; + if (_rootDomain) + { + _rootDomain->deRegisterInterestedDriver((IOService *) this); + _rootDomain = 0; + } + super::close(client); +} + +IOHIDKind IOHIPointing::hidKind() +{ + return kHIRelativePointingDevice; +} + +struct CursorDeviceSegment { + SInt32 devUnits; + SInt32 slope; + SInt32 intercept; +}; +typedef struct CursorDeviceSegment CursorDeviceSegment; + +void IOHIPointing::scalePointer(int * dxp, int * dyp) +// Description: Perform pointer acceleration computations here. +// Given the resolution, dx, dy, and time, compute the velocity +// of the pointer over a Manhatten distance in inches/second. +// Using this velocity, do a lookup in the pointerScaling table +// to select a scaling factor. Scale dx and dy up as appropriate. +// Preconditions: +// * _deviceLock should be held on entry +{ + + SInt32 dx, dy; + SInt32 absDx, absDy; + SInt32 mag; + IOFixed scale; + CursorDeviceSegment * segment; + + if( !_scaleSegments) + return; + + dx = (*dxp) << 16; + dy = (*dyp) << 16; + absDx = (dx < 0) ? -dx : dx; + absDy = (dy < 0) ? -dy : dy; + + if( absDx > absDy) + mag = (absDx + (absDy / 2)); + else + mag = (absDy + (absDx / 2)); + + if( !mag) + return; + + // scale + for( + segment = (CursorDeviceSegment *) _scaleSegments; + mag > segment->devUnits; + segment++) {} + + scale = IOFixedDivide( + segment->intercept + IOFixedMultiply( mag, segment->slope ), + mag ); + + dx = IOFixedMultiply( dx, scale ); + dy = IOFixedMultiply( dy, scale ); + + // add fract parts + dx += _fractX; + dy += _fractY; + + *dxp = dx / 65536; + *dyp = dy / 65536; + + // get fractional part with sign extend + if( dx >= 0) + _fractX = dx & 0xffff; + else + _fractX = dx | 0xffff0000; + if( dy >= 0) + _fractY = dy & 0xffff; + else + _fractY = dy | 0xffff0000; +} + +/* + Routine: Interpolate + This routine interpolates to find a point on the line [x1,y1] [x2,y2] which + is intersected by the line [x3,y3] [x3,y"]. The resulting y' is calculated + by interpolating between y3 and y", towards the higher acceleration curve. +*/ + +static SInt32 Interpolate( SInt32 x1, SInt32 y1, + SInt32 x2, SInt32 y2, + SInt32 x3, SInt32 y3, + SInt32 scale, Boolean lower ) +{ + + SInt32 slope; + SInt32 intercept; + SInt32 resultY; + + slope = IOFixedDivide( y2 - y1, x2 - x1 ); + intercept = y1 - IOFixedMultiply( slope, x1 ); + resultY = intercept + IOFixedMultiply( slope, x3 ); + if( lower) + resultY = y3 - IOFixedMultiply( scale, y3 - resultY ); + else + resultY = resultY + IOFixedMultiply( scale, y3 - resultY ); + + return( resultY ); +} + + +static SInt32 Fetch32( const UInt16 * p ) +{ + SInt32 result; + + result = (*(p++)) << 16; + result |= (*(p++)); + + return( result ); +} + +void IOHIPointing::setupForAcceleration( IOFixed desired ) +{ + OSData * data; + const UInt16 * lowTable = 0; + const UInt16 * highTable; + + SInt32 x1, y1, x2, y2, x3, y3; + SInt32 prevX1, prevY1; + SInt32 upperX, upperY; + SInt32 lowerX, lowerY; + SInt32 lowAccl = 0, lowPoints = 0; + SInt32 highAccl, highPoints; + SInt32 scale; + UInt32 count; + Boolean lower; + + SInt32 pointerResolution = resolution(); + SInt32 frameRate = (67 << 16); + SInt32 screenResolution = (72 << 16); + SInt32 devScale, crsrScale; + SInt32 scaledX1, scaledY1; + SInt32 scaledX2, scaledY2; + + CursorDeviceSegment * segments; + CursorDeviceSegment * segment; + SInt32 segCount; + + assert(pointerResolution); + data = copyAccelerationTable(); + if( !data) + return; + + if( desired < (IOFixed) 0) { + // disabling mouse scaling + if(_scaleSegments && _scaleSegCount) + IODelete( _scaleSegments, + CursorDeviceSegment, _scaleSegCount ); + _scaleSegments = NULL; + _scaleSegCount = 0; + data->release(); + return; + } + + highTable = (const UInt16 *) data->getBytesNoCopy(); + + devScale = IOFixedDivide( pointerResolution, frameRate ); + crsrScale = IOFixedDivide( screenResolution, frameRate ); + + scaledX1 = scaledY1 = 0; + + scale = Fetch32( highTable ); + highTable += 4; + + _acceleration = desired; + + // normalize table's default (scale) to 0.5 + if( desired > 0x8000) { + desired = IOFixedMultiply( desired - 0x8000, + 0x10000 - scale ); + desired <<= 1; + desired += scale; + } else { + desired = IOFixedMultiply( desired, scale ); + desired <<= 1; + } + if( desired > (1 << 16)) + desired = (1 << 16); + + count = *(highTable++); + + // find curves bracketing the desired value + do { + highAccl = Fetch32( highTable ); + highTable += 2; + highPoints = *(highTable++); + + if( desired <= highAccl) + break; + + lowTable = highTable; + lowAccl = highAccl; + lowPoints = highPoints; + highTable += lowPoints * 4; + + } while( true ); + + // scale between the two + if( lowTable) + scale = IOFixedDivide( desired - lowAccl, + highAccl - lowAccl ); + // or take all the high one + else { + scale = (1 << 16); + lowTable = highTable; + lowAccl = highAccl; + lowPoints = 0; + } + + if( lowPoints > highPoints) + segCount = lowPoints; + else + segCount = highPoints; + segCount *= 2; +/* IOLog("lowPoints %ld, highPoints %ld, segCount %ld\n", + lowPoints, highPoints, segCount); */ + segments = IONew( CursorDeviceSegment, segCount ); + assert( segments ); + segment = segments; + + x1 = prevX1 = y1 = prevY1 = 0; + + lowerX = Fetch32( lowTable ); + lowTable += 2; + lowerY = Fetch32( lowTable ); + lowTable += 2; + upperX = Fetch32( highTable ); + highTable += 2; + upperY = Fetch32( highTable ); + highTable += 2; + + do { + // consume next point from first X + lower = (lowPoints && (!highPoints || (lowerX <= upperX))); + + if( lower) { + /* highline */ + x2 = upperX; + y2 = upperY; + x3 = lowerX; + y3 = lowerY; + if( lowPoints && (--lowPoints)) { + lowerX = Fetch32( lowTable ); + lowTable += 2; + lowerY = Fetch32( lowTable ); + lowTable += 2; + } + } else { + /* lowline */ + x2 = lowerX; + y2 = lowerY; + x3 = upperX; + y3 = upperY; + if( highPoints && (--highPoints)) { + upperX = Fetch32( highTable ); + highTable += 2; + upperY = Fetch32( highTable ); + highTable += 2; + } + } + { + // convert to line segment + assert( segment < (segments + segCount) ); + + scaledX2 = IOFixedMultiply( devScale, /* newX */ x3 ); + scaledY2 = IOFixedMultiply( crsrScale, + /* newY */ Interpolate( x1, y1, x2, y2, x3, y3, + scale, lower ) ); + if( lowPoints || highPoints) + segment->devUnits = scaledX2; + else + segment->devUnits = 0x7fffffff; + segment->slope = IOFixedDivide( scaledY2 - scaledY1, + scaledX2 - scaledX1 ); + segment->intercept = scaledY2 + - IOFixedMultiply( segment->slope, scaledX2 ); +/* IOLog("devUnits = %08lx, slope = %08lx, intercept = %08lx\n", + segment->devUnits, segment->slope, segment->intercept); */ + + scaledX1 = scaledX2; + scaledY1 = scaledY2; + segment++; + } + + // continue on from last point + if( lowPoints && highPoints) { + if( lowerX > upperX) { + prevX1 = x1; + prevY1 = y1; + } else { + /* swaplines */ + prevX1 = x1; + prevY1 = y1; + x1 = x3; + y1 = y3; + } + } else { + x2 = x1; + y2 = y1; + x1 = prevX1; + y1 = prevY1; + prevX1 = x2; + prevY1 = y2; + } + + } while( lowPoints || highPoints ); + + if( _scaleSegCount && _scaleSegments) + IODelete( _scaleSegments, + CursorDeviceSegment, _scaleSegCount ); + _scaleSegCount = segCount; + _scaleSegments = (void *) segments; + + _fractX = _fractY = 0; + + data->release(); +} + + +bool IOHIPointing::resetPointer() +{ + IOTakeLock( _deviceLock); + + _buttonMode = NX_RightButton; +// IOLog("***resetPointer -- glob_accel = %08lx", glob_accel ); + if( glob_accel > 0 ) + { + // Restore the last acceleration value, since we may have been hot + // unplugged and re-plugged. + setupForAcceleration(glob_accel); + } + else + { + setupForAcceleration(0x8000); + } + updateProperties(); + + IOUnlock( _deviceLock); + return true; +} + +void IOHIPointing::dispatchAbsolutePointerEvent(Point * newLoc, + Bounds * bounds, + UInt32 buttonState, + bool proximity, + int pressure, + int pressureMin, + int pressureMax, + int stylusAngle, + AbsoluteTime ts) +{ + int buttons = 0; + int dx, dy; + + IOTakeLock(_deviceLock); + + if (buttonState & 1) { + buttons |= EV_LB; + } + + if (buttonCount() > 1) { + if (buttonState & -2) { // any other buttons + buttons |= EV_RB; + } + } + + if ((_pressureThresholdToClick < 255) && ((pressure - pressureMin) > ((pressureMax - pressureMin) * _pressureThresholdToClick / 256))) { + buttons |= EV_LB; + } + + if (_buttonMode == NX_OneButton) { + if ((buttons & (EV_LB|EV_RB)) != 0) { + buttons = EV_LB; + } + } + + if (_convertAbsoluteToRelative) { + dx = newLoc->x - _previousLocation.x; + dy = newLoc->y - _previousLocation.y; + + if ((_contactToMove && !_hadContact && (pressure > pressureMin)) || (abs(dx) > ((bounds->maxx - bounds->minx) / 20)) || (abs(dy) > ((bounds->maxy - bounds->miny) / 20))) { + dx = 0; + dy = 0; + } else { + scalePointer(&dx, &dy); + } + + _previousLocation.x = newLoc->x; + _previousLocation.y = newLoc->y; + } + + IOUnlock(_deviceLock); + + _hadContact = (pressure > pressureMin); + + if (!_contactToMove || (pressure > pressureMin)) { + pressure -= pressureMin; + if (pressure > 255) { + pressure = 255; + } + if (_convertAbsoluteToRelative) { + if (_relativePointerEventAction && _relativePointerEventTarget) { + (*_relativePointerEventAction)(_relativePointerEventTarget, + buttons, + dx, + dy, + ts); + } + } else { + if (_absolutePointerEventAction && _absolutePointerEventTarget) { + (*_absolutePointerEventAction)(_absolutePointerEventTarget, + buttons, + newLoc, + bounds, + proximity, + pressure, + stylusAngle, + ts); + } + } + } + + return; +} + +void IOHIPointing::dispatchRelativePointerEvent(int dx, + int dy, + UInt32 buttonState, + AbsoluteTime ts) +{ + int buttons; + + IOTakeLock( _deviceLock); + + buttons = 0; + + if( buttonState & 1) + buttons |= EV_LB; + + if( buttonCount() > 1) { + if( buttonState & 2) // any others down + buttons |= EV_RB; + // Other magic bit reshuffling stuff. It seems there was space + // left over at some point for a "middle" mouse button between EV_LB and EV_RB + if(buttonState & 4) + buttons |= 2; + // Add in the rest of the buttons in a linear fasion... + buttons |= buttonState & ~0x7; + } + + // Perform pointer acceleration computations + scalePointer(&dx, &dy); + + // Perform button tying and mapping. This + // stuff applies to relative posn devices (mice) only. + if ( _buttonMode == NX_OneButton ) + { + // Remap both Left and Right (but no others?) to Left. + if ( (buttons & (EV_LB|EV_RB)) != 0 ) { + buttons |= EV_LB; + buttons &= ~EV_RB; + } + } + else if ( (buttonCount() > 1) && (_buttonMode == NX_LeftButton) ) + // Menus on left button. Swap! + { + int temp = 0; + if ( buttons & EV_LB ) + temp = EV_RB; + if ( buttons & EV_RB ) + temp |= EV_LB; + // Swap Left and Right, preserve everything else + buttons = (buttons & ~(EV_LB|EV_RB)) | temp; + } + IOUnlock( _deviceLock); + + if (_relativePointerEventAction) /* upstream call */ + { + (*_relativePointerEventAction)(_relativePointerEventTarget, + /* buttons */ buttons, + /* deltaX */ dx, + /* deltaY */ dy, + /* atTime */ ts); + } +} + +void IOHIPointing::dispatchScrollWheelEvent(short deltaAxis1, + short deltaAxis2, + short deltaAxis3, + AbsoluteTime ts) +{ + if (_scrollWheelEventAction) { + (*_scrollWheelEventAction)(_scrollWheelEventTarget, + deltaAxis1, + deltaAxis2, + deltaAxis3, + ts); + } +} + +bool IOHIPointing::updateProperties( void ) +{ + bool ok; + UInt32 res = resolution(); + + ok = setProperty( kIOHIDPointerResolutionKey, &res, sizeof( res)) + & setProperty( kIOHIDPointerAccelerationKey, &_acceleration, + sizeof( _acceleration)) + & setProperty( kIOHIDPointerConvertAbsoluteKey, &_convertAbsoluteToRelative, + sizeof( _convertAbsoluteToRelative)) + & setProperty( kIOHIDPointerContactToMoveKey, &_contactToMove, + sizeof( _contactToMove)); + + return( ok & super::updateProperties() ); +} + +IOReturn IOHIPointing::setParamProperties( OSDictionary * dict ) +{ + OSData * data; + IOReturn err = kIOReturnSuccess; + bool updated = false; + UInt8 * bytes; + + IOTakeLock( _deviceLock); + if( (data = OSDynamicCast( OSData, + dict->getObject(kIOHIDPointerAccelerationKey)))) { + + setupForAcceleration( *((IOFixed *)data->getBytesNoCopy()) ); + updated = true; + } + IOUnlock( _deviceLock); + + if( dict->getObject(kIOHIDResetPointerKey)) + resetPointer(); + + if ((data = OSDynamicCast(OSData, + dict->getObject(kIOHIDPointerConvertAbsoluteKey)))) { + bytes = (UInt8 *) data->getBytesNoCopy(); + _convertAbsoluteToRelative = (bytes[0] != 0) ? true : false; + updated = true; + } + + if ((data = OSDynamicCast(OSData, + dict->getObject(kIOHIDPointerContactToMoveKey)))) { + bytes = (UInt8 *) data->getBytesNoCopy(); + _contactToMove = (bytes[0] != 0) ? true : false; + updated = true; + } + + if( updated ) + updateProperties(); + + return( err ); +} + +// subclasses override + +IOItemCount IOHIPointing::buttonCount() +{ + return (1); +} + +IOFixed IOHIPointing::resolution() +{ + return (100 << 16); +} + +OSData * IOHIPointing::copyAccelerationTable() +{ +static const UInt16 accl[] = { + 0x0000, 0x8000, + 0x4032, 0x3030, 0x0002, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, + 0x0001, 0x0000, 0x0001, 0x0000, 0x0009, 0x0000, 0x713B, 0x0000, + 0x6000, 0x0004, 0x4EC5, 0x0010, 0x8000, 0x000C, 0x0000, 0x005F, + 0x0000, 0x0016, 0xEC4F, 0x008B, 0x0000, 0x001D, 0x3B14, 0x0094, + 0x8000, 0x0022, 0x7627, 0x0096, 0x0000, 0x0024, 0x6276, 0x0096, + 0x0000, 0x0026, 0x0000, 0x0096, 0x0000, 0x0028, 0x0000, 0x0096, + 0x0000 +}; + + OSData * data = OSDynamicCast( OSData, + getProperty( "HIDPointerAccelerationTable" )); + if( data) + data->retain(); + else + data = OSData::withBytesNoCopy( accl, sizeof( accl ) ); + + return( data ); +} diff --git a/iokit/Families/IOHIDSystem/IOHITablet.cpp b/iokit/Families/IOHIDSystem/IOHITablet.cpp new file mode 100644 index 000000000..8ab0e26a6 --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHITablet.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +OSDefineMetaClassAndStructors(IOHITablet, IOHIPointing); + +UInt16 IOHITablet::generateTabletID() +{ + static UInt16 _nextTabletID = 0; + return _nextTabletID++; +} + +bool IOHITablet::init(OSDictionary *propTable) +{ + if (!IOHIPointing::init(propTable)) { + return false; + } + + _systemTabletID = generateTabletID(); + setProperty(kIOHISystemTabletID, (unsigned long long)_systemTabletID, 16); + + return true; +} + +bool IOHITablet::open(IOService *client, + IOOptionBits options, + RelativePointerEventAction rpeAction, + AbsolutePointerEventAction apeAction, + ScrollWheelEventAction sweAction, + TabletEventAction tabletAction, + ProximityEventAction proximityAction) +{ + if (!IOHIPointing::open(client, options, rpeAction, apeAction, sweAction)) { + return false; + } + + _tabletEventTarget = client; + _tabletEventAction = tabletAction; + _proximityEventTarget = client; + _proximityEventAction = proximityAction; + + return true; +} + +void IOHITablet::dispatchTabletEvent(NXEventData *tabletEvent, + AbsoluteTime ts) +{ + if (_tabletEventAction) { + (*_tabletEventAction)(_tabletEventTarget, + tabletEvent, + ts); + } +} + +void IOHITablet::dispatchProximityEvent(NXEventData *proximityEvent, + AbsoluteTime ts) +{ + if (_proximityEventAction) { + (*_proximityEventAction)(_proximityEventTarget, + proximityEvent, + ts); + } +} + +bool IOHITablet::startTabletPointer(IOHITabletPointer *pointer, OSDictionary *properties) +{ + bool result = false; + + do { + if (!pointer) + break; + + if (!pointer->init(properties)) + break; + + if (!pointer->attach(this)) + break; + + if (!pointer->start(this)) + break; + + result = true; + } while (false); + + return result; +} + diff --git a/iokit/Families/IOHIDSystem/IOHITabletPointer.cpp b/iokit/Families/IOHIDSystem/IOHITabletPointer.cpp new file mode 100644 index 000000000..91837cb5c --- /dev/null +++ b/iokit/Families/IOHIDSystem/IOHITabletPointer.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +OSDefineMetaClassAndStructors(IOHITabletPointer, IOHIDevice) + +UInt16 IOHITabletPointer::generateDeviceID() +{ + static _nextDeviceID = 0; + return _nextDeviceID++; +} + +bool IOHITabletPointer::init( OSDictionary *propTable ) +{ + if (!IOHIDevice::init(propTable)) { + return false; + } + + _deviceID = generateDeviceID(); + setProperty(kIOHITabletPointerDeviceID, (unsigned long long)_deviceID, 16); + + return true; +} + +bool IOService::attach( IOService * provider ) +{ + if (!IOHIDevice::attach(provider)) { + return false; + } + + _tablet = OSDynamicCast(IOHITablet, provider); + + return true; +} + +void IOHITabletPointer::dispatchTabletEvent(NXEventData *tabletEvent, + AbsoluteTime ts) +{ + if (_tablet) { + _tablet->dispatchTabletEvent(tabletEvent, ts); + } +} + +void IOHITabletPointer::dispatchProximityEvent(NXEventData *proximityEvent, + AbsoluteTime ts) +{ + if (_tablet) { + _tablet->dispatchProximityEvent(proximityEvent, ts); + } +} diff --git a/iokit/Families/IONDRVSupport/IONDRV.cpp b/iokit/Families/IONDRVSupport/IONDRV.cpp new file mode 100644 index 000000000..db912cf14 --- /dev/null +++ b/iokit/Families/IONDRVSupport/IONDRV.cpp @@ -0,0 +1,322 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 Jul 98 - start IOKit + * sdouglas 14 Dec 98 - start cpp. + */ + + + +#include +#include + +extern "C" { +#include +}; + +#include "IONDRV.h" +#include "IOPEFLoader.h" + +#define LOG if(1) kprintf + +#define USE_TREE_NDRVS 1 +#define USE_ROM_NDRVS 1 + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super OSObject + +OSDefineMetaClassAndStructors(IONDRV, OSObject) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IONDRV * IONDRV::instantiate( IORegistryEntry * regEntry, + IOLogicalAddress container, + IOByteCount containerSize, + IONDRVUndefinedSymbolHandler undefHandler, + void * self ) +{ + OSStatus err = 1; + IONDRV * inst; + + inst = new IONDRV; + + if( inst) do { + if( false == inst->init()) + continue; + + err = PCodeOpen( (void *)container, containerSize, &inst->pcInst ); + if( err) + continue; + + err = PCodeInstantiate( inst->pcInst, undefHandler, self ); + if( err) + continue; + + inst->getSymbol( "DoDriverIO", + (IOLogicalAddress *) &inst->fDoDriverIO ); + if( kIOReturnSuccess == inst->getSymbol( "TheDriverDescription", + (IOLogicalAddress *) &inst->theDriverDesc )) { + + char * name; + int plen; + + name = (char *) inst->theDriverDesc->driverOSRuntimeInfo.driverName; + plen = name[ 0 ]; + strncpy( name, name + 1, plen); + name[ plen ] = 0; + + kprintf("ndrv version %08x\n", + inst->theDriverDesc-> driverType.version); + } + + } while( false); + + if( inst && err) { + inst->release(); + inst = 0; + } + + return( inst ); +} + +void IONDRV::free( void ) +{ + if( pcInst) + PCodeClose( pcInst ); + super::free(); +} + +IOReturn IONDRV::getSymbol( const char * symbolName, + IOLogicalAddress * address ) +{ + OSStatus err; + + err = PCodeFindExport( pcInst, symbolName, + (LogicalAddress *)address, NULL ); + if( err) + *address = 0; + + return( err); +} + +#if 0 + if( (err = NDRVGetShimClass( ioDevice, instance, 0, classNames )) + ) continue; + err = [propTable createProperty:"AAPL,dk_Driver Name" flags:0 + value:classNames length:strlen( classNames) ]; + err = [propTable createProperty:"AAPL,dk_Server Name" flags:0 + value:classNames length:strlen( classNames) ]; + +OSStatus NDRVGetShimClass( id ioDevice, NDRVInstance instance, UInt32 serviceIndex, char * className ) +{ + NDRVInstanceVars * ndrvInst = (NDRVInstanceVars *) instance; + OSStatus err; + static const char * driverDescProperty = "TheDriverDescription"; + static const char * frameBufferShim = "IONDRVFramebuffer"; + DriverDescription * desc; + UInt32 serviceType; + + className[ 0 ] = 0; + do { + err = PCodeFindExport( ndrvInst->pcInst, driverDescProperty, (IOLogicalAddress *)&desc, NULL ); + if( err) continue; + + if( desc->driverDescSignature != kTheDescriptionSignature) { + err = -1; + continue; + } + if( serviceIndex >= desc->driverServices.nServices) { + err = -1; + continue; + } + + serviceType = desc->driverServices.service[ serviceIndex ].serviceType; + switch( desc->driverServices.service[ serviceIndex ].serviceCategory) { + + case kServiceCategoryNdrvDriver: + if( serviceType == kNdrvTypeIsVideo) { + strcpy( className, frameBufferShim); + break; + } + default: + err = -1; + } + } while( false); + + return( err); +} +#endif + + + +IOReturn IONDRV::doDriverIO( UInt32 commandID, void * contents, + UInt32 commandCode, UInt32 commandKind ) +{ + OSStatus err; + + if( 0 == fDoDriverIO) + return( kIOReturnUnsupported ); + + err = CallTVector( /*AddressSpaceID*/ 0, (void *)commandID, contents, + (void *)commandCode, (void *)commandKind, /*p6*/ 0, + fDoDriverIO ); + +#if 0 + if( err) { + UInt32 i; + static const char * commands[] = + { "kOpenCommand", "kCloseCommand", + "kReadCommand", "kWriteCommand", + "kControlCommand", "kStatusCommand", "kKillIOCommand", + "kInitializeCommand", "kFinalizeCommand", + "kReplaceCommand", "kSupersededCommand" }; + + LOG("Driver failed (%d) on %s : ", err, commands[ commandCode ] ); + + switch( commandCode) { + case kControlCommand: + case kStatusCommand: + LOG("%d : ", ((UInt16 *)contents)[ 0x1a / 2 ]); + contents = ((void **)contents)[ 0x1c / 4 ]; + for( i = 0; i<5; i++ ) + LOG("%08x, ", ((UInt32 *)contents)[i] ); + break; + } + LOG("\n"); + } +#endif + + return( err); +} + + +IONDRV * IONDRV::fromRegistryEntry( IORegistryEntry * regEntry, + IONDRVUndefinedSymbolHandler handler, + void * self ) +{ + IOLogicalAddress pef = 0; + IOByteCount propSize = 0; + OSData * prop; + IONDRV * inst; + + inst = (IONDRV *) regEntry->getProperty("AAPL,ndrvInst"); + if( inst) { + inst->retain(); + return( inst ); + } + + prop = (OSData *) regEntry->getProperty( "driver,AAPL,MacOS,PowerPC" ); + if( USE_TREE_NDRVS && prop) { + pef = (IOLogicalAddress) prop->getBytesNoCopy(); + propSize = prop->getLength(); + } + + // God awful hack: + // Some onboard devices don't have the ndrv in the tree. The booter + // can load & match PEF's but only from disk, not network boots. + +#if USE_ROM_NDRVS + if( !pef && (0 == strcmp( regEntry->getName(), "ATY,mach64_3DU")) ) { + + int * patch; + + patch = (int *) 0xffe88140; + propSize = 0x10a80; + + // Check ati PEF exists there + if( patch[ 0x1f0 / 4 ] == 'ATIU') { + + pef = (IOLogicalAddress) IOMalloc( propSize ); + bcopy( (void *) patch, (void *) pef, propSize ); + } + } + + if( !pef && (0 == strcmp( regEntry->getName(), "ATY,mach64_3DUPro")) ) { + + int * patch; + + patch = (int *) 0xffe99510; + propSize = 0x12008; + // Check ati PEF exists there + if( patch[ 0x1fc / 4 ] != 'ATIU') { + + // silk version + patch = (int *) 0xffe99550; + propSize = 0x12058; + if( patch[ 0x1fc / 4 ] != 'ATIU') + propSize = 0; + } + + if( propSize) { + pef = (IOLogicalAddress) IOMalloc( propSize ); + bcopy( (void *) patch, (void *) pef, propSize ); + } + } + + if( !pef && (0 == strcmp( regEntry->getName(), "control")) ) { + +#define ins(i,d,a,simm) ((i<<26)+(d<<21)+(a<<16)+simm) + int * patch; + + patch = (int *) 0xffe6bd50; + propSize = 0xac10; + + // Check control PEF exists there + if( patch[ 0x41ac / 4 ] == ins( 32, 3, 0, 0x544)) { // lwz r3,0x544(0) + + pef = (IOLogicalAddress) IOMalloc( propSize ); + bcopy( (void *) patch, (void *) pef, propSize ); + patch = (int *) pef; + // patch out low mem accesses + patch[ 0x8680 / 4 ] = ins( 14, 12, 0, 0); // addi r12,0,0x0 + patch[ 0x41ac / 4 ] = ins( 14, 3, 0, 0x544); // addi r3,0,0x544; + patch[ 0x8fa0 / 4 ] = ins( 14, 3, 0, 0x648); // addi r3,0,0x648; + } + } +#endif + + if( pef) { + kprintf( "pef = %08x, %08x\n", pef, propSize ); + inst = IONDRV::instantiate( regEntry, pef, propSize, handler, self ); + if( inst ) + regEntry->setProperty( "AAPL,ndrvInst", inst); + + } else + inst = 0; + + return( inst ); +} + +const char * IONDRV::driverName( void ) +{ + return( (const char *) theDriverDesc->driverOSRuntimeInfo.driverName); +} + + diff --git a/iokit/Families/IONDRVSupport/IONDRV.h b/iokit/Families/IONDRVSupport/IONDRV.h new file mode 100644 index 000000000..f4f05282a --- /dev/null +++ b/iokit/Families/IONDRVSupport/IONDRV.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 July 98 - start IOKit + */ + + +#ifndef __IONDRV__ +#define __IONDRV__ + +#include +#include + +#include +#include + +#pragma options align=mac68k + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void * RegEntryID[4]; + +struct DriverInitInfo { + UInt16 refNum; + RegEntryID deviceEntry; +}; + +#define MAKE_REG_ENTRY(regEntryID,obj) \ + regEntryID[ 0 ] = (void *) obj; \ + regEntryID[ 1 ] = (void *) ~(UInt32)obj; \ + regEntryID[ 2 ] = (void *) 0x53696d65; \ + regEntryID[ 3 ] = (void *) 0x52756c7a; + +#define REG_ENTRY_TO_OBJ(regEntryID,obj) \ + if( (UInt32)((obj = ((IORegistryEntry **)regEntryID)[ 0 ])) \ + != ~((UInt32 *)regEntryID)[ 1 ] ) \ + return( -2538); + +#define REG_ENTRY_TO_OBJ_RET(regEntryID,obj,ret) \ + if( (UInt32)((obj = ((IORegistryEntry **)regEntryID)[ 0 ])) \ + != ~((UInt32 *)regEntryID)[ 1 ] ) \ + return( ret); + +#define REG_ENTRY_TO_PT(regEntryID,obj) \ + IORegistryEntry * obj; \ + if( (UInt32)((obj = ((IORegistryEntry **)regEntryID)[ 0 ])) \ + != ~((UInt32 *)regEntryID)[ 1 ] ) \ + return( -2538); + +#define REG_ENTRY_TO_SERVICE(regEntryID,type,obj) \ + IORegistryEntry * regEntry; \ + type * obj; \ + if( (UInt32)((regEntry = ((IORegistryEntry **)regEntryID)[ 0 ])) \ + != ~((UInt32 *)regEntryID)[ 1 ] ) \ + return( -2538); \ + if( 0 == (obj = OSDynamicCast( type, regEntry))) \ + return( -2542); + +struct CntrlParam { + void * qLink; + short qType; + short ioTrap; + void * ioCmdAddr; + void * ioCompletion; + short ioResult; + char * ioNamePtr; + short ioVRefNum; + short ioCRefNum; + short csCode; + void * csParams; + short csParam[9]; +}; +typedef struct CntrlParam CntrlParam, *CntrlParamPtr; + +#pragma options align=reset + +enum { + kOpenCommand = 0, + kCloseCommand = 1, + kReadCommand = 2, + kWriteCommand = 3, + kControlCommand = 4, + kStatusCommand = 5, + kKillIOCommand = 6, + kInitializeCommand = 7, /* init driver and device*/ + kFinalizeCommand = 8, /* shutdown driver and device*/ + kReplaceCommand = 9, /* replace an old driver*/ + kSupersededCommand = 10 /* prepare to be replaced by a new driver*/ +}; +enum { + kSynchronousIOCommandKind = 0x00000001, + kAsynchronousIOCommandKind = 0x00000002, + kImmediateIOCommandKind = 0x00000004 +}; + + +extern OSStatus CallTVector( + void * p1, void * p2, void * p3, void * p4, void * p5, void * p6, + struct IOTVector * entry ); + +#ifdef __cplusplus +} +#endif + +class IONDRV : public OSObject +{ + OSDeclareDefaultStructors(IONDRV) + +private: + void * pcInst; + struct IOTVector * fDoDriverIO; + struct DriverDescription * theDriverDesc; + +public: + static IONDRV * instantiate( IORegistryEntry * regEntry, + IOLogicalAddress container, + IOByteCount containerSize, + IONDRVUndefinedSymbolHandler handler, + void * self ); + + static IONDRV * fromRegistryEntry( IORegistryEntry * regEntry, + IONDRVUndefinedSymbolHandler handler, + void * self); + + virtual void free( void ); + + virtual IOReturn getSymbol( const char * symbolName, + IOLogicalAddress * address ); + + virtual const char * driverName( void ); + + virtual IOReturn doDriverIO( UInt32 commandID, void * contents, + UInt32 commandCode, UInt32 commandKind ); + +}; + +struct IONDRVInterruptSource { + void * refCon; + struct IOTVector * handler; + struct IOTVector * enabler; + struct IOTVector * disabler; + bool registered; + bool enabled; +}; + +class IONDRVInterruptSet : public OSObject { + + OSDeclareDefaultStructors(IONDRVInterruptSet) + +public: + IOService * provider; + IOOptionBits options; + UInt32 count; + IONDRVInterruptSource * sources; + IONDRVInterruptSet * child; + + static IONDRVInterruptSet * with(IOService * provider, + IOOptionBits options, SInt32 count); + void free(); +}; + +#endif /* __IONDRV__ */ + diff --git a/iokit/Families/IONDRVSupport/IONDRVFramebuffer.cpp b/iokit/Families/IONDRVSupport/IONDRVFramebuffer.cpp new file mode 100644 index 000000000..2fe80f342 --- /dev/null +++ b/iokit/Families/IONDRVSupport/IONDRVFramebuffer.cpp @@ -0,0 +1,1965 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 24 Jul 98 - start IOKit. + * sdouglas 15 Dec 98 - cpp. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "IONDRV.h" + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IOATINDRV : public IONDRVFramebuffer +{ + OSDeclareDefaultStructors(IOATINDRV) + +public: + virtual IOReturn getStartupDisplayMode( IODisplayModeID * displayMode, + IOIndex * depth ); + virtual IODeviceMemory * findVRAM( void ); + +}; + +class IOATI128NDRV : public IOATINDRV +{ + OSDeclareDefaultStructors(IOATI128NDRV) + +public: + virtual void flushCursor( void ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct _VSLService { + class IONDRVFramebuffer * framebuffer; + IOSelect type; + IOFBInterruptProc handler; + OSObject * target; + void * ref; + _VSLService * next; +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// IONDRVFramebuffer has two power states, off and on + +enum { kIONDRVFramebufferPowerStateCount = 2 }; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOFramebuffer + +OSDefineMetaClassAndStructors(IONDRVFramebuffer, IOFramebuffer) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +//============ +//= External = +//============ + +IOService * IONDRVFramebuffer::probe( IOService * provider, + SInt32 * score ) +{ + IOService * inst = this; + IOService * newInst = 0; + const char * name; + + if( !super::probe( provider, score )) + return( 0 ); + + if( 0 != provider->getProperty(kIONDRVIgnoreKey)) return( 0 ); + + if( /* IONDRV::fromRegistryEntry( provider ) || */ true) { + + provider->setProperty(kIONDRVForXKey, this, sizeof(this) ); + + // temporary for in-kernel acceleration + name = provider->getName(); + if( 0 == strncmp("ATY,Rage128", name, strlen("ATY,Rage128"))) + newInst = new IOATI128NDRV; + else if( 0 == strncmp("ATY,", name, strlen("ATY,"))) + newInst = new IOATINDRV; + + if( newInst) { + if( ! newInst->init( inst->getPropertyTable())) { + newInst->release(); + newInst = 0; + } + inst = newInst; + } + } else + inst = 0; + + return( inst ); +} + +IOReturn IONDRVFramebuffer::setProperties( OSObject * properties ) +{ + OSDictionary * dict; + OSData * data; + IOReturn kr = kIOReturnUnsupported; + + if( !(dict = OSDynamicCast( OSDictionary, properties))) + return( kIOReturnBadArgument); + + if( (data = OSDynamicCast(OSData, + dict->getObject("driver,AAPL,MacOS,PowerPC")))) { + + if( ndrvState) + return( kIOReturnStillOpen ); + + if( ndrv) + ndrv->release(); + nub->setProperty("driver,AAPL,MacOS,PowerPC", data); + nub->removeProperty("AAPL,ndrvInst"); + ndrv = IONDRV::fromRegistryEntry( nub, &_undefinedSymbolHandler, (void *) this ); + if( ndrv) + setName( ndrv->driverName()); + + kr = kIOReturnSuccess; + } + + return( kr ); +} + +static IOPMrootDomain * gIOPMRootDomain; + +bool IONDRVFramebuffer::start( IOService * provider ) +{ + bool ok = false; + IOService * parent; + OSData * data; + + do { + cachedVDResolution.csDisplayModeID = kDisplayModeIDInvalid; + + nub = provider; + ndrv = IONDRV::fromRegistryEntry( provider, &_undefinedSymbolHandler, this ); + if( ndrv) + setName( ndrv->driverName()); + startAt8 = 3; + consoleDevice = (0 != provider->getProperty("AAPL,boot-display")); + powerState = 1; + + if( 0 == nub->getDeviceMemoryCount()) { + parent = OSDynamicCast( IOService, nub->getParentEntry(gIODTPlane)); + if( parent) { + parent->getResources(); + OSArray * array = parent->getDeviceMemory(); + array->retain(); + nub->setDeviceMemory( array); + array->release(); + } + } + + if( false == super::start( nub )) + continue; + + if( !gIOPMRootDomain) + gIOPMRootDomain = (IOPMrootDomain *) + IORegistryEntry::fromPath("/IOPowerConnection/IOPMrootDomain", gIOPowerPlane); + if( gIOPMRootDomain) + platformDeepSleep = (gIOPMRootDomain->getSleepSupported() + & kFrameBufferDeepSleepSupported) ? 1 : 0; + + // default flags can be overriden + accessFlags = 0; + if(0 == strncmp("3Dfx", provider->getName(), strlen("3Dfx"))) + accessFlags |= kFramebufferDisableAltivecAccess; + + if( (data = OSDynamicCast( OSData, provider->getProperty(kIOFBHostAccessFlagsKey)))) + accessFlags = *((UInt32 *) data->getBytesNoCopy()); + + ok = true; // Success + + } while( false); + + return( ok); +} + +bool IONDRVFramebuffer::isConsoleDevice( void ) +{ + return( consoleDevice ); +} + +// osfmk/ppc/mappings.h +extern "C" { extern void ignore_zero_fault(boolean_t); } + +IOReturn IONDRVFramebuffer::enableController( void ) +{ + IOReturn err; + const char * logname; + OSData * data; + + logname = getProvider()->getName(); + + if( 0 == strcmp( "control", logname)) + waitForService( resourceMatching( "IOiic0" )); + + if( 0 == ndrv) + err = kIOReturnNotReady; + else + err = IONDRVLibrariesInitialize( getProvider() ); + + if( kIOReturnSuccess == err) do { + + ignore_zero_fault( true ); + err = checkDriver(); + ignore_zero_fault( false ); + + if( err) { + IOLog("%s: Not usable\n", logname ); + if( err == -999) + IOLog("%s: driver incompatible.\n", logname ); + continue; + } + getCurrentConfiguration(); + vramMemory = findVRAM(); + + // disable clamshell sleep on a single head portable + if( gIOPMRootDomain + && (0 == OSDynamicCast( IOPCIDevice, getProvider())) + && (data = OSDynamicCast( OSData, + getProvider()->getProperty("ATY,PanelActive")))) { + + if( 0 == *((UInt32 *) data->getBytesNoCopy())) + gIOPMRootDomain->receivePowerNotification( kIOPMIgnoreClamshell); + } + + // initialize power management of the device + initForPM(); + + } while( false); + + return( err); +} + +IODeviceMemory * IONDRVFramebuffer::getVRAMRange( void ) +{ + if( vramMemory) + vramMemory->retain(); + + return( vramMemory ); +} + +const IOTVector * IONDRVFramebuffer::_undefinedSymbolHandler( void * self, + const char * libraryName, const char * symbolName ) +{ + return( ((IONDRVFramebuffer *)self)->undefinedSymbolHandler( libraryName, symbolName) ); +} + +const IOTVector * IONDRVFramebuffer::undefinedSymbolHandler( const char * libraryName, + const char * symbolName ) +{ + return( 0 ); +} + +void IONDRVFramebuffer::free( void ) +{ + super::free(); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IONDRVFramebuffer::registerForInterruptType( IOSelect interruptType, + IOFBInterruptProc proc, OSObject * target, void * ref, + void ** interruptRef ) + +{ + _VSLService * service; + IOReturn err; + + if( (interruptType == kIOFBVBLInterruptType) + && (getProvider()->getProperty("Ignore VBL"))) + return( kIOReturnUnsupported ); + + for( service = vslServices; + service && (service->type != interruptType); + service = service->next ) {} + + if( service) { + + if( service->handler) + err = kIOReturnBusy; + + else { + service->target = target; + service->ref = ref; + service->handler = proc; + *interruptRef = service; + err = kIOReturnSuccess; + } + + } else + err = kIOReturnNoResources; + + return( err ); +} + +IOReturn IONDRVFramebuffer::unregisterInterrupt( void * interruptRef ) +{ + _VSLService * service = (_VSLService *) interruptRef; + + service->handler = 0; + + return( kIOReturnSuccess ); +} + +IOReturn IONDRVFramebuffer::setInterruptState( void * interruptRef, + UInt32 state ) +{ + return( kIOReturnUnsupported ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +//// VSL calls + +OSStatus IONDRVFramebuffer::VSLNewInterruptService( + void * entryID, + IOSelect serviceType, + _VSLService ** vslService ) +{ + IORegistryEntry * regEntry; + IONDRVFramebuffer * fb; + _VSLService * service; + IOReturn err = kIOReturnSuccess; + + REG_ENTRY_TO_OBJ( (const RegEntryID *) entryID, regEntry) + + fb = OSDynamicCast( IONDRVFramebuffer, + regEntry->getChildEntry( gIOServicePlane )); + assert( fb ); + + if( fb) { + service = IONew( _VSLService, 1 ); + + if( service) { + service->framebuffer = fb; + service->type = serviceType; + service->handler = 0; + service->next = fb->vslServices; + fb->vslServices = service; + + *vslService = service; + + } else + err = kIOReturnNoMemory; + + } else + err = kIOReturnBadArgument; + + return( err ); +} + +OSStatus IONDRVFramebuffer::VSLDisposeInterruptService(_VSLService * vslService) +{ + IONDRVFramebuffer * fb; + _VSLService * next; + _VSLService * prev; + + if( vslService) { + + fb = vslService->framebuffer; + + prev = fb->vslServices; + if( prev == vslService) + fb->vslServices = vslService->next; + else { + while( ((next = prev->next) != vslService) && next) + prev = next; + if( next) + prev->next = vslService->next; + } + + IODelete( vslService, _VSLService, 1 ); + } + + return( kIOReturnSuccess ); +} + +OSStatus IONDRVFramebuffer::VSLDoInterruptService( _VSLService * vslService ) +{ + IOFBInterruptProc proc; + + if( vslService) { + if( (proc = vslService->handler)) + (*proc) (vslService->target, vslService->ref); + } + + return( kIOReturnSuccess ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct _VSLCursorRef { + IOFramebuffer * framebuffer; + void * cursorImage; +}; + +Boolean IONDRVFramebuffer::VSLPrepareCursorForHardwareCursor( + void * cursorRef, + IOHardwareCursorDescriptor * hwDesc, + IOHardwareCursorInfo * hwCursorInfo ) +{ + _VSLCursorRef * cursor = (_VSLCursorRef *) cursorRef; + bool ok; + + if( hwCursorInfo->colorMap) + hwCursorInfo->colorMap += 1; + ok = cursor->framebuffer->convertCursorImage( + cursor->cursorImage, hwDesc, hwCursorInfo ); + if( hwCursorInfo->colorMap) + hwCursorInfo->colorMap -= 1; + + return( ok ); +} + +IOReturn IONDRVFramebuffer::setCursorImage( void * cursorImage ) +{ + _VSLCursorRef cursorRef; + VDSetHardwareCursorRec setCursor; + IOReturn err; + + if( 0 == powerState) + return( kIOReturnSuccess ); + + cursorRef.framebuffer = this; + cursorRef.cursorImage = cursorImage; + + setCursor.csCursorRef = (void *) &cursorRef; + setCursor.csReserved1 = 0; + setCursor.csReserved2 = 0; + + err = doControl( cscSetHardwareCursor, &setCursor ); + + return( err ); +} + +IOReturn IONDRVFramebuffer::setCursorState( SInt32 x, SInt32 y, bool visible ) +{ + VDDrawHardwareCursorRec drawCursor; + IOReturn err; + + if( 0 == powerState) + return( kIOReturnSuccess ); + + if( 0 == OSIncrementAtomic( &ndrvEnter)) + { + + drawCursor.csCursorX = x; + drawCursor.csCursorY = y; + drawCursor.csCursorVisible = visible; + drawCursor.csReserved1 = 0; + drawCursor.csReserved2 = 0; + + err = doControl( cscDrawHardwareCursor, &drawCursor ); + + } else + err = kIOReturnBusy; + + OSDecrementAtomic( &ndrvEnter ); + + return( err ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +//============ +//= Internal = +//============ + +IOReturn IONDRVFramebuffer::doControl( UInt32 code, void * params ) +{ + IOReturn err; + CntrlParam pb; + + if( ndrvState == 0) + return( kIOReturnNotOpen); + + pb.qLink = 0; + pb.csCode = code; + pb.csParams = params; + + OSIncrementAtomic( &ndrvEnter ); + err = ndrv->doDriverIO( /*ID*/ (UInt32) &pb, &pb, + kControlCommand, kImmediateIOCommandKind ); + OSDecrementAtomic( &ndrvEnter ); + + return( err); +} + +IOReturn IONDRVFramebuffer::doStatus( UInt32 code, void * params ) +{ + IOReturn err; + CntrlParam pb; + + if( ndrvState == 0) + return( kIOReturnNotOpen); + + pb.qLink = 0; + pb.csCode = code; + pb.csParams = params; + + OSIncrementAtomic( &ndrvEnter ); + err = ndrv->doDriverIO( /*ID*/ (UInt32) &pb, &pb, + kStatusCommand, kImmediateIOCommandKind ); + OSDecrementAtomic( &ndrvEnter ); + + return( err); +} + + +IOReturn IONDRVFramebuffer::checkDriver( void ) +{ + OSStatus err = noErr; + struct DriverInitInfo initInfo; + CntrlParam pb; + VDClutBehavior clutSetting; + VDGammaRecord gammaRec; + VDSwitchInfoRec switchInfo; + IOTimingInformation info; + VDPageInfo pageInfo; + + if( ndrvState == 0) { + do { + initInfo.refNum = 0xffcd; // ...sure. + MAKE_REG_ENTRY(initInfo.deviceEntry, nub ) + + err = ndrv->doDriverIO( 0, &initInfo, + kInitializeCommand, kImmediateIOCommandKind ); + if( err) continue; + + err = ndrv->doDriverIO( 0, &pb, + kOpenCommand, kImmediateIOCommandKind ); + + } while( false); + + if( err) + return( err); + + // allow calls to ndrv + ndrvState = 1; + + if( (noErr == doStatus( cscGetCurMode, &switchInfo )) + && (noErr == getTimingInfoForDisplayMode( switchInfo.csData, &info)) + && (timingApple_0x0_0hz_Offline == info.appleTimingID)) { + + IOLog("%s: display offline\n", getName()); + err = kIOReturnOffline; + return( err); + } else + ndrvState = 2; + +#if IONDRVI2CLOG + do { + VDCommunicationInfoRec commInfo; + + bzero( &commInfo, sizeof( commInfo)); + commInfo.csBusID = kVideoDefaultBus; + + err = doStatus( cscGetCommunicationInfo, &commInfo ); + IOLog("%s: cscGetCommunicationInfo: ", getName()); + if( kIOReturnSuccess != err) { + IOLog("fails with %ld\n", err); + continue; + } + if( commInfo.csSupportedTypes & (1< 0) ) { + + if( displayModeIDs) + displayModeIDs[ num ] = info.csDisplayModeID; + + info.csPreviousDisplayModeID = info.csDisplayModeID; + num++; + } + + if( detailedTimings) { + IOItemCount count, i; + + count = detailedTimings->getCount(); + if( displayModeIDs) { + for( i = 0; i < count; i++) + displayModeIDs[ num + i ] = kDisplayModeIDReservedBase + i; + } + num += count; + } + + return( num); +} + +IOReturn IONDRVFramebuffer::getResInfoForArbMode( IODisplayModeID modeID, + IODisplayModeInformation * info ) +{ + VDVideoParametersInfoRec pixelParams; + VPBlock pixelInfo; + VDDetailedTimingRec * detailed; + IOIndex depth; + IOReturn err; + + err = validateDisplayMode( modeID, 0, &detailed ); + + for( depth = -1; err == kIOReturnSuccess; ) { + pixelParams.csDisplayModeID = modeID; + pixelParams.csDepthMode = ++depth + kDepthMode1; + pixelParams.csVPBlockPtr = &pixelInfo; + err = doStatus( cscGetVideoParameters, &pixelParams ); + } + + if( depth) { + info->maxDepthIndex = depth - 1; + info->nominalWidth = pixelInfo.vpBounds.right; + info->nominalHeight = pixelInfo.vpBounds.bottom; + if( detailed) + info->refreshRate = detailed->csPixelClock * 65536ULL / + ((detailed->csVerticalActive + detailed->csVerticalBlanking) + * (detailed->csHorizontalActive + detailed->csHorizontalBlanking)); + else + info->refreshRate = 0; + + err = kIOReturnSuccess; + } + + return( err ); +} + +IOReturn IONDRVFramebuffer::getResInfoForMode( IODisplayModeID modeID, + IODisplayModeInformation * info ) +{ + + bzero( info, sizeof( *info)); + + if( (UInt32) modeID >= (UInt32) kDisplayModeIDReservedBase) + return( getResInfoForArbMode( modeID, info )); + + // unfortunately, there is no "kDisplayModeIDFindSpecific" + if( cachedVDResolution.csDisplayModeID != (UInt32) modeID) { + + // try the next after cached mode + cachedVDResolution.csPreviousDisplayModeID = cachedVDResolution.csDisplayModeID; + if( (noErr != doStatus( cscGetNextResolution, &cachedVDResolution)) + || (cachedVDResolution.csDisplayModeID != (UInt32) modeID) ) { + + // else full blown iterate + cachedVDResolution.csPreviousDisplayModeID = kDisplayModeIDFindFirstResolution; + while( + (noErr == doStatus( cscGetNextResolution, &cachedVDResolution)) + && (cachedVDResolution.csDisplayModeID != (UInt32) modeID) + && ((SInt32) cachedVDResolution.csDisplayModeID > 0)) { + + cachedVDResolution.csPreviousDisplayModeID = cachedVDResolution.csDisplayModeID; + } + } + } + + if( cachedVDResolution.csDisplayModeID != (UInt32) modeID) { + cachedVDResolution.csDisplayModeID = kDisplayModeIDInvalid; + return( kIOReturnUnsupportedMode); + + } else { + + info->maxDepthIndex = cachedVDResolution.csMaxDepthMode - kDepthMode1; + info->nominalWidth = cachedVDResolution.csHorizontalPixels; + info->nominalHeight = cachedVDResolution.csVerticalLines; + info->refreshRate = cachedVDResolution.csRefreshRate; + + return( noErr); + } +} + +enum { + kModePreflight = 1, + kDisplayModeIDPreflight = kDisplayModeIDReservedBase + 1000 +}; + +IOReturn IONDRVFramebuffer::setDetailedTiming( + IODisplayModeID mode, IOOptionBits options, + void * _desc, IOByteCount descripSize ) +{ + IOReturn err; + VDResolutionInfoRec info; + VDDetailedTimingRec * desc = (VDDetailedTimingRec *)_desc; + VDDetailedTimingRec look; + IOIndex index; + bool notPreflight = (0 == (options & kModePreflight)); + + // current must be ok + if( mode == currentDisplayMode) + return( kIOReturnSuccess ); + + index = mode - kDisplayModeIDReservedBase; + bzero( &look, sizeof( VDDetailedTimingRec) ); + look.csTimingSize = sizeof( VDDetailedTimingRec); + + // look for a programmable + for( + info.csPreviousDisplayModeID = kDisplayModeIDFindFirstProgrammable; + (noErr == (err = doStatus( cscGetNextResolution, &info))); + info.csPreviousDisplayModeID = info.csDisplayModeID) { + + if( (SInt32) info.csDisplayModeID < 0) { + err = kIOReturnNoResources; + break; + } + + look.csDisplayModeID = info.csDisplayModeID; + err = doStatus( cscGetDetailedTiming, &look ); + if( err != kIOReturnSuccess) + continue; + + // don't toss current + if( look.csDisplayModeAlias == (UInt32) currentDisplayMode) + continue; + + // see if already set to the right timing + if( (look.csDisplayModeAlias == (UInt32) mode) + && (look.csDisplayModeState == kDMSModeReady) + && (notPreflight) + && (detailedTimingsCurrent[index] == detailedTimingsSeed)) + break; + + // set it free + if( look.csDisplayModeState != kDMSModeFree) { + look.csDisplayModeID = info.csDisplayModeID; + look.csDisplayModeAlias = 0; + look.csDisplayModeState = kDMSModeFree; + err = doControl( cscSetDetailedTiming, &look ); + if( err != kIOReturnSuccess) + continue; + } + // set it ready + desc->csDisplayModeID = info.csDisplayModeID; + desc->csDisplayModeAlias = mode; + desc->csDisplayModeSeed = look.csDisplayModeSeed; + desc->csDisplayModeState = kDMSModeReady; + err = doControl( cscSetDetailedTiming, desc ); + + if( kIOReturnSuccess == err) { + if( notPreflight) + // don't stomp orig record + desc = &look; + err = doStatus( cscGetDetailedTiming, desc ); + } + if( notPreflight && (kIOReturnSuccess == err)) + detailedTimingsCurrent[index] = detailedTimingsSeed; + + break; + } + + return( err ); +} + +IOReturn IONDRVFramebuffer::validateDisplayMode( + IODisplayModeID _mode, IOOptionBits flags, + VDDetailedTimingRec ** detailed ) +{ + UInt32 mode = _mode; + IOReturn err = kIOReturnSuccess; + OSData * data; + const void * bytes; + + if( detailed) + *detailed = (VDDetailedTimingRec *) 0; + + if( mode >= (UInt32) kDisplayModeIDReservedBase) do { + + if( mode == (UInt32) kDisplayModeIDBootProgrammable) + continue; + + err = kIOReturnBadArgument; + if( !detailedTimings) + continue; + + data = OSDynamicCast( OSData, detailedTimings->getObject( + mode - kDisplayModeIDReservedBase)); + if( !data) + continue; + + bytes = data->getBytesNoCopy(); + err = setDetailedTiming( mode, 0, (void *) bytes, data->getLength() ); + if( err != kIOReturnSuccess) + continue; + + if( detailed) + *detailed = (VDDetailedTimingRec *) bytes; + + } while( false ); + + return( err ); +} + +void IONDRVFramebuffer::getCurrentConfiguration( void ) +{ + IOReturn err; + VDSwitchInfoRec switchInfo; + VDGrayRecord grayRec; + + grayRec.csMode = 0; // turn off luminance map + err = doControl( cscSetGray, &grayRec ); + // driver refused => mono display + grayMode = ((noErr == err) && (0 != grayRec.csMode)); + + err = doStatus( cscGetCurMode, &switchInfo ); + if( err == noErr) { + currentDisplayMode = switchInfo.csData; + currentDepth = switchInfo.csMode - kDepthMode1; + currentPage = switchInfo.csPage; + if( 0 == (physicalFramebuffer = pmap_extract( kernel_pmap, + ((vm_address_t) switchInfo.csBaseAddr) ))) + physicalFramebuffer = (UInt32) switchInfo.csBaseAddr; + } else + IOLog("%s: cscGetCurMode failed\n", nub->getName()); +} + +IODeviceMemory * IONDRVFramebuffer::makeSubRange( + IOPhysicalAddress start, + IOPhysicalLength length ) +{ + IODeviceMemory * mem = 0; + UInt32 numMaps, i; + IOService * device; + + device = nub; + numMaps = device->getDeviceMemoryCount(); + + for( i = 0; (!mem) && (i < numMaps); i++) { + mem = device->getDeviceMemoryWithIndex(i); + if( !mem) + continue; + mem = IODeviceMemory::withSubRange( mem, + start - mem->getPhysicalAddress(), length ); + } + if( !mem) + mem = IODeviceMemory::withRange( start, length ); + + return( mem ); +} + +IODeviceMemory * IONDRVFramebuffer::getApertureRange( IOPixelAperture aper ) +{ + IOReturn err; + IOPixelInformation info; + IOByteCount bytes; + + err = getPixelInformation( currentDisplayMode, currentDepth, aper, + &info ); + if( err) + return( 0 ); + + bytes = (info.bytesPerRow * info.activeHeight) + 128; + + return( makeSubRange( physicalFramebuffer, bytes )); +} + +IODeviceMemory * IONDRVFramebuffer::findVRAM( void ) +{ + VDVideoParametersInfoRec pixelParams; + VPBlock pixelInfo; + VDResolutionInfoRec vdRes; + UInt32 size; + IOPhysicalAddress vramBase = physicalFramebuffer; + IOByteCount vramLength; + IOReturn err; + OSData * prop; + + vramLength = 0; + prop = OSDynamicCast( OSData, nub->getProperty("VRAM,memsize")); + + if( prop) { + vramLength = *((IOByteCount *)prop->getBytesNoCopy()); + if( vramLength) { + vramLength = (vramLength + (vramBase & 0xffff)) & 0xffff0000; + vramBase &= 0xffff0000; + } + } + + if( !vramLength) { + + vdRes.csPreviousDisplayModeID = kDisplayModeIDFindFirstResolution; + while( + (noErr == doStatus( cscGetNextResolution, &vdRes)) + && ((SInt32) vdRes.csDisplayModeID > 0) ) + { + pixelParams.csDisplayModeID = vdRes.csDisplayModeID; + pixelParams.csDepthMode = vdRes.csMaxDepthMode; + pixelParams.csVPBlockPtr = &pixelInfo; + err = doStatus( cscGetVideoParameters, &pixelParams); + if( err) + continue; + + // Control hangs its framebuffer off the end of the aperture to support + // 832 x 624 @ 32bpp. The commented out version will correctly calculate + // the vram length, but DPS needs the full extent to be mapped, so we'll + // end up mapping an extra page that will address vram through the + // little endian aperture. No other drivers like this known. +#if 1 + size = 0x40 + pixelInfo.vpBounds.bottom * + (pixelInfo.vpRowBytes & 0x7fff); +#else + size = ( (pixelInfo.vpBounds.right * pixelInfo.vpPixelSize) / 8) // last line + + (pixelInfo.vpBounds.bottom - 1) * + (pixelInfo.vpRowBytes & 0x7fff); +#endif + if( size > vramLength) + vramLength = size; + + vdRes.csPreviousDisplayModeID = vdRes.csDisplayModeID; + } + + vramLength = (vramLength + (vramBase & 0xffff) + 0xffff) & 0xffff0000; + vramBase &= 0xffff0000; + } + + return( makeSubRange( vramBase, vramLength )); +} + + +//============ +//= External = +//============ + +const char * IONDRVFramebuffer::getPixelFormats( void ) +{ + static const char * ndrvPixelFormats = + IO1BitIndexedPixels "\0" + IO2BitIndexedPixels "\0" + IO4BitIndexedPixels "\0" + IO8BitIndexedPixels "\0" + IO16BitDirectPixels "\0" + IO32BitDirectPixels "\0" + "\0"; + + return( ndrvPixelFormats); +} + +IOItemCount IONDRVFramebuffer::getDisplayModeCount( void ) +{ + return( iterateAllModes( 0 )); +} + +IOReturn IONDRVFramebuffer::getDisplayModes( IODisplayModeID * allDisplayModes ) +{ + iterateAllModes( allDisplayModes ); + return( kIOReturnSuccess ); +} + +IOReturn IONDRVFramebuffer::validateDetailedTiming( + void * desc, IOByteCount descripSize ) +{ + IOReturn err; + + err = setDetailedTiming( kDisplayModeIDPreflight, + kModePreflight, desc, descripSize); + + return( err ); +} + +IOReturn IONDRVFramebuffer::setDetailedTimings( OSArray * array ) +{ + IOReturn err; + UInt32 * newCurrent; + IOItemCount newCount; + + newCount = array->getCount(); + newCurrent = IONew(UInt32, newCount); + if( newCurrent) { + if( detailedTimings) + IODelete( detailedTimingsCurrent, UInt32, detailedTimings->getCount()); + detailedTimingsCurrent = newCurrent; + bzero( newCurrent, newCount * sizeof( UInt32)); + setProperty( kIOFBDetailedTimingsKey, array ); // retains + detailedTimings = array; + detailedTimingsSeed++; + + if( currentDisplayMode == kDisplayModeIDBootProgrammable) { + VDDetailedTimingRec look; + VDDetailedTimingRec * detailed; + OSData * data; + IODisplayModeID newDisplayMode; + + newDisplayMode = currentDisplayMode; + + bzero( &look, sizeof( VDDetailedTimingRec) ); + look.csTimingSize = sizeof( VDDetailedTimingRec); + look.csDisplayModeID = kDisplayModeIDBootProgrammable; + err = doStatus( cscGetDetailedTiming, &look ); + + if( kIOReturnSuccess == err) + for( int i = 0; + (data = OSDynamicCast( OSData, detailedTimings->getObject(i))); + i++) { + + detailed = (VDDetailedTimingRec *) data->getBytesNoCopy(); + if( (detailed->csHorizontalActive == look.csHorizontalActive) + && (detailed->csVerticalActive == look.csVerticalActive)) { + + newDisplayMode = i + kDisplayModeIDReservedBase; + break; + } + } + if( newDisplayMode != currentDisplayMode) { + err = validateDisplayMode( newDisplayMode, 0, 0 ); + currentDisplayMode = newDisplayMode; + } + } + + err = kIOReturnSuccess; + } else + err = kIOReturnNoMemory; + + return( err ); +} + +IOReturn IONDRVFramebuffer::getInformationForDisplayMode( + IODisplayModeID displayMode, IODisplayModeInformation * info ) +{ + IOReturn err; + + err = getResInfoForMode( displayMode, info ); + if( err) + err = kIOReturnUnsupportedMode; + + return( err ); +} + + +UInt64 IONDRVFramebuffer::getPixelFormatsForDisplayMode( + IODisplayModeID /* displayMode */, IOIndex depthIndex ) +{ + return( 1 << (depthIndex + startAt8)); +} + +IOReturn IONDRVFramebuffer::getPixelInformation( + IODisplayModeID displayMode, IOIndex depth, + IOPixelAperture aperture, IOPixelInformation * info ) +{ + SInt32 err; + VDVideoParametersInfoRec pixelParams; + VPBlock pixelInfo; + const char * formats; + UInt32 mask; + int index; + + bzero( info, sizeof( *info)); + + if( aperture) + return( kIOReturnUnsupportedMode); + + err = validateDisplayMode( displayMode, 0, 0 ); + if( err) + return( err ); + + do { + pixelParams.csDisplayModeID = displayMode; + pixelParams.csDepthMode = depth + kDepthMode1; + pixelParams.csVPBlockPtr = &pixelInfo; + err = doStatus( cscGetVideoParameters, &pixelParams ); + if( err) + continue; + + info->flags = accessFlags; + + info->activeWidth = pixelInfo.vpBounds.right; + info->activeHeight = pixelInfo.vpBounds.bottom; + info->bytesPerRow = pixelInfo.vpRowBytes & 0x7fff; + info->bytesPerPlane = pixelInfo.vpPlaneBytes; + info->bitsPerPixel = pixelInfo.vpPixelSize; + + formats = getPixelFormats(); + mask = getPixelFormatsForDisplayMode( displayMode, depth ); + + for( index = 0; index < 32; index++) { + if( (mask & (1 << index)) && ((aperture--) == 0)) { + strcpy( info->pixelFormat, formats); + break; + } + formats += strlen( formats) + 1; + } + + if( 0 == strcmp("PPPPPPPP", info->pixelFormat)) { + info->pixelType = kIOCLUTPixels; + info->componentMasks[0] = 0xff; + info->bitsPerPixel = 8; + info->componentCount = 1; + info->bitsPerComponent = 8; + + } else if( 0 == strcmp("-RRRRRGGGGGBBBBB", info->pixelFormat)) { + info->pixelType = kIORGBDirectPixels; + info->componentMasks[0] = 0x7c00; + info->componentMasks[1] = 0x03e0; + info->componentMasks[2] = 0x001f; + info->bitsPerPixel = 16; + info->componentCount = 3; + info->bitsPerComponent = 5; + + } else if( 0 == strcmp("--------RRRRRRRRGGGGGGGGBBBBBBBB", + info->pixelFormat)) { + info->pixelType = kIORGBDirectPixels; + info->componentMasks[0] = 0x00ff0000; + info->componentMasks[1] = 0x0000ff00; + info->componentMasks[2] = 0x000000ff; + info->bitsPerPixel = 32; + info->componentCount = 3; + info->bitsPerComponent = 8; + } + + } while( false); + + return( err); +} + +IOReturn IONDRVFramebuffer::getTimingInfoForDisplayMode( + IODisplayModeID displayMode, IOTimingInformation * info ) +{ + VDTimingInfoRec timingInfo; + OSStatus err; + + err = validateDisplayMode( displayMode, 0, 0 ); + if( err) + return( err ); + + timingInfo.csTimingMode = displayMode; + // in case the driver doesn't do it: + timingInfo.csTimingFormat = kDeclROMtables; + err = doStatus( cscGetModeTiming, &timingInfo); + if( err == noErr) { + if( timingInfo.csTimingFormat == kDeclROMtables) + info->appleTimingID = timingInfo.csTimingData; + else + info->appleTimingID = timingInvalid; + + return( kIOReturnSuccess); + } + + return( kIOReturnUnsupportedMode); +} + +IOReturn IONDRVFramebuffer::getCurrentDisplayMode( + IODisplayModeID * displayMode, IOIndex * depth ) +{ + if( displayMode) + *displayMode = currentDisplayMode; + if( depth) + *depth = currentDepth; + + return( kIOReturnSuccess); +} + +IOReturn IONDRVFramebuffer::setDisplayMode( IODisplayModeID displayMode, IOIndex depth ) +{ + SInt32 err; + VDSwitchInfoRec switchInfo; + VDPageInfo pageInfo; + + err = validateDisplayMode( displayMode, 0, 0 ); + if( err) + return( err ); + + ignore_zero_fault( true ); + switchInfo.csData = displayMode; + switchInfo.csMode = depth + kDepthMode1; + switchInfo.csPage = 0; + err = doControl( cscSwitchMode, &switchInfo); + if(err) + IOLog("%s: cscSwitchMode:%d\n", nub->getName(), (int)err); + + // duplicate QD InitGDevice + pageInfo.csMode = switchInfo.csMode; + pageInfo.csData = 0; + pageInfo.csPage = 0; + doControl( cscSetMode, &pageInfo); + doControl( cscGrayPage, &pageInfo); + ignore_zero_fault( false ); + + getCurrentConfiguration(); + + return( err); +} + +IOReturn IONDRVFramebuffer::setStartupDisplayMode( + IODisplayModeID displayMode, IOIndex depth ) +{ + SInt32 err; + VDSwitchInfoRec switchInfo; + + err = validateDisplayMode( displayMode, 0, 0 ); + if( err) + return( err ); + + switchInfo.csData = displayMode; + switchInfo.csMode = depth + kDepthMode1; + err = doControl( cscSavePreferredConfiguration, &switchInfo); + return( err); +} + +IOReturn IONDRVFramebuffer::getStartupDisplayMode( + IODisplayModeID * displayMode, IOIndex * depth ) +{ + SInt32 err; + VDSwitchInfoRec switchInfo; + + err = doStatus( cscGetPreferredConfiguration, &switchInfo); + if( err == noErr) { + *displayMode = switchInfo.csData; + *depth = switchInfo.csMode - kDepthMode1; + } + return( err); +} + +IOReturn IONDRVFramebuffer::setApertureEnable( IOPixelAperture /* aperture */, + IOOptionBits /* enable */ ) +{ + return( kIOReturnSuccess); +} + +IOReturn IONDRVFramebuffer::setCLUTWithEntries( + IOColorEntry * colors, UInt32 index, UInt32 numEntries, + IOOptionBits options ) +{ + IOReturn err; + UInt32 code; + VDSetEntryRecord setEntryRec; + VDClutBehavior clutSetting; + VDGrayRecord grayRec; + + if( 0 == powerState) + return( kIOReturnSuccess ); + + if( options & kSetCLUTWithLuminance) + grayRec.csMode = 1; // turn on luminance map + else + grayRec.csMode = 0; // turn off luminance map + + if( grayRec.csMode != lastGrayMode) { + doControl( cscSetGray, &grayRec); + lastGrayMode = grayRec.csMode; + } + + if( options & kSetCLUTImmediately) + clutSetting = kSetClutAtSetEntries; + else + clutSetting = kSetClutAtVBL; + + if( clutSetting != lastClutSetting) { + doControl( cscSetClutBehavior, &clutSetting); + lastClutSetting = clutSetting; + } + + if( options & kSetCLUTByValue) + setEntryRec.csStart = -1; + else + setEntryRec.csStart = index; + + setEntryRec.csTable = (ColorSpec *) colors; + setEntryRec.csCount = numEntries - 1; + if( directMode) + code = cscDirectSetEntries; + else + code = cscSetEntries; + err = doControl( code, &setEntryRec); + + return( err); +} + +IOReturn IONDRVFramebuffer::setGammaTable( UInt32 channelCount, UInt32 dataCount, + UInt32 dataWidth, void * data ) +{ + IOReturn err; + VDGammaRecord gammaRec; + struct GammaTbl { + short gVersion; /*gamma version number*/ + short gType; /*gamma data type*/ + short gFormulaSize; /*Formula data size */ + short gChanCnt; /*number of channels of data */ + short gDataCnt; /*number of values/channel */ + short gDataWidth; /*bits/corrected value */ + /* (data packed to next larger byte size) */ + UInt8 gFormulaData[0]; /* data for formulas followed by gamma values */ + }; + GammaTbl * table = NULL; + IOByteCount dataLen = 0; + + if( 0 == powerState) + return( kIOReturnSuccess ); + + if( data) { + dataLen = (dataWidth + 7) / 8; + dataLen *= dataCount * channelCount; + table = (GammaTbl *) IOMalloc( dataLen + sizeof( struct GammaTbl)); + if( NULL == table) + return( kIOReturnNoMemory); + + table->gVersion = 0; + table->gType = 0; + table->gFormulaSize = 0; + table->gChanCnt = channelCount; + table->gDataCnt = dataCount; + table->gDataWidth = dataWidth; + bcopy( data, table->gFormulaData, dataLen); + } + + gammaRec.csGTable = (Ptr) table; + err = doControl( cscSetGamma, &gammaRec); + if( table) + IOFree( table, dataLen + sizeof( struct GammaTbl)); + + return( err); +} + +IOReturn IONDRVFramebuffer::getAttribute( IOSelect attribute, UInt32 * value ) +{ + IOReturn err = kIOReturnSuccess; + VDSupportsHardwareCursorRec hwCrsrSupport; + + switch( attribute ) { + + case kIOHardwareCursorAttribute: + + *value = ((kIOReturnSuccess == + doStatus( cscSupportsHardwareCursor, &hwCrsrSupport)) + && (hwCrsrSupport.csSupportsHardwareCursor)); + break; + + default: + err = super::getAttribute( attribute, value ); + } + + return( err ); +} + +UInt32 IONDRVFramebuffer::getConnectionCount( void ) +{ + VDMultiConnectInfoRec theRecord; + + if( doStatus(cscGetMultiConnect,&theRecord) == 0 ) { + return theRecord.csDisplayCountOrNumber; + } + return 1; +} + +IOReturn IONDRVFramebuffer::setAttributeForConnection( IOIndex connectIndex, + IOSelect attribute, UInt32 info ) +{ + IOReturn err; + VDSyncInfoRec theVDSyncInfoRec; + VDPowerStateRec sleepInfo; + + + switch( attribute ) { + + case kConnectionSyncEnable: + + theVDSyncInfoRec.csMode = (unsigned char)(info>>8); + theVDSyncInfoRec.csFlags = (unsigned char)(info & 0xFF); + doControl( cscSetSync, &theVDSyncInfoRec); + + sleepInfo.powerState = ((info>>8) & 0xff) ? kAVPowerSuspend : kAVPowerOn; + sleepInfo.powerFlags = 0; + sleepInfo.powerReserved1 = 0; + sleepInfo.powerReserved2 = 0; + doControl( cscSetPowerState, &sleepInfo); + + err = kIOReturnSuccess; + break; + + default: + err = super::setAttributeForConnection( connectIndex, + attribute, info ); + break; + } + return( err ); +} + + +IOReturn IONDRVFramebuffer::getAttributeForConnection( IOIndex connectIndex, + IOSelect attribute, UInt32 * value ) +{ + IOReturn ret; + VDSyncInfoRec theVDSyncInfoRec; + + switch( attribute ) { + + case kConnectionSyncFlags: + // find out current state of sync lines + theVDSyncInfoRec.csMode = 0x00; + doStatus(cscGetSync,&theVDSyncInfoRec); + * value = theVDSyncInfoRec.csMode; + ret = kIOReturnSuccess; + break; + case kConnectionSyncEnable: + // what are the sync-controlling capabilities of the ndrv? + theVDSyncInfoRec.csMode = 0xFF; + doStatus(cscGetSync,&theVDSyncInfoRec); + * value = (UInt32)theVDSyncInfoRec.csMode; + ret = kIOReturnSuccess; + break; + case kConnectionSupportsHLDDCSense: + case kConnectionSupportsAppleSense: + ret = kIOReturnSuccess; + break; + default: + ret = super::getAttributeForConnection( connectIndex, + attribute, value ); + break; + } + + return( ret ); +} + +IOReturn IONDRVFramebuffer::getAppleSense( IOIndex connectIndex, + UInt32 * senseType, + UInt32 * primary, + UInt32 * extended, + UInt32 * displayType ) +{ + OSStatus err; + VDMultiConnectInfoRec multiConnect; + UInt32 sense, extSense; + + if( connectIndex == 0 ) + err = doStatus( cscGetConnection, &multiConnect.csConnectInfo); + + else { + multiConnect.csDisplayCountOrNumber = connectIndex; + err = doControl( cscSetMultiConnect, &multiConnect); + } + if( err) + return( err); + + if( multiConnect.csConnectInfo.csConnectFlags + & ((1<myCurrentState); + else + return( 0); +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// setPowerState +// +// Called by the superclass to turn the frame buffer on and off. +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IONDRVFramebuffer::setPowerState( unsigned long powerStateOrdinal, + IOService * whichDevice ) +{ + static const unsigned long states[2][2] = + { { kAVPowerOff, kAVPowerOn }, + { kHardwareSleep, kHardwareWake } }; + + VDPowerStateRec sleepInfo; + IOReturn err; + int deepSleep = 0; + UInt32 newState; + IOAGPDevice * agpDev; + + if( powerStateOrdinal == powerState) + return( IOPMAckImplied ); + + if( 0 == powerStateOrdinal) + super::setPowerState( powerStateOrdinal, whichDevice ); + + sleepInfo.powerState = 0; + sleepInfo.powerFlags = 0; + sleepInfo.powerReserved1 = 0; + sleepInfo.powerReserved2 = 0; + + err = doStatus( cscGetPowerState, &sleepInfo); + if( (kIOReturnSuccess == err) + && (kPowerStateSleepCanPowerOffMask & sleepInfo.powerFlags)) + deepSleep = platformDeepSleep; + + newState = states[deepSleep & 1][powerStateOrdinal & 1]; + sleepInfo.powerState = newState; + sleepInfo.powerFlags = 0; + sleepInfo.powerReserved1 = 0; + sleepInfo.powerReserved2 = 0; + + ignore_zero_fault( true ); + boolean_t ints = ml_set_interrupts_enabled( false ); + + err = doControl( cscSetPowerState, &sleepInfo); + + ml_set_interrupts_enabled( ints ); + ignore_zero_fault( false ); + + if( powerStateOrdinal) { + powerState = powerStateOrdinal; + if( kAVPowerOn == newState) { + VDPageInfo pageInfo; + pageInfo.csData = 0; + pageInfo.csPage = 0; + doControl( cscGrayPage, &pageInfo); + resetCursor(); + } else { + IOSleep( 100 ); + setCursorImage( 0 ); + } + + agpDev = OSDynamicCast(IOAGPDevice, nub); + if( !agpDev) + agpDev = OSDynamicCast( IOAGPDevice, nub->getParentEntry(gIODTPlane)); + if( agpDev) + agpDev->resetAGP(); + } + powerState = powerStateOrdinal; + + if( powerStateOrdinal) + super::setPowerState( powerStateOrdinal, whichDevice ); + + return( IOPMAckImplied ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// ATI patches. +// Real problem : getStartupMode doesn't. + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IONDRVFramebuffer + +OSDefineMetaClassAndStructors(IOATINDRV, IONDRVFramebuffer) +OSDefineMetaClassAndStructors(IOATI128NDRV, IOATINDRV) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOATINDRV::getStartupDisplayMode( + IODisplayModeID * displayMode, IOIndex * depth ) +{ + UInt16 * nvram; + OSData * prop; + + prop = OSDynamicCast( OSData, nub->getProperty("Sime")); + if( prop) { + nvram = (UInt16 *) prop->getBytesNoCopy(); + *displayMode = nvram[ 0 ]; // 1 is physDisplayMode + *depth = nvram[ 2 ] - kDepthMode1; + return( kIOReturnSuccess); + } else + return(super::getStartupDisplayMode( displayMode, depth)); +} + +IODeviceMemory * IOATINDRV::findVRAM( void ) +{ + OSData * prop; + IOByteCount * lengths; + IOIndex count; + IOPhysicalAddress vramBase; + IOByteCount vramLength; + + prop = OSDynamicCast( OSData, nub->getProperty("ATY,memsize")); + if( !prop) + return( super::findVRAM()); + + lengths = (IOByteCount *) prop->getBytesNoCopy(); + count = prop->getLength() / sizeof(IOByteCount); + + prop = OSDynamicCast( OSData, nub->getProperty("ATY,Base")); + + if( prop && (count > 1)) { + vramBase = *((IOPhysicalAddress *)prop->getBytesNoCopy()); + vramLength = lengths[1]; + vramBase &= ~(vramLength - 1); + } else { + vramBase = physicalFramebuffer; + vramLength = lengths[0]; + } + + if( !vramLength) + return( super::findVRAM()); + + vramLength = (vramLength + (vramBase & 0xffff)) & 0xffff0000; + vramBase &= 0xffff0000; + + return( makeSubRange( vramBase, vramLength )); +} + +static int g128ExtraCurs = 8; +static int g128DeltaCurs = 0x25c0; + +void IOATI128NDRV::flushCursor( void ) +{ + volatile UInt32 * fb; + UInt32 x; + int i; + + fb = (volatile UInt32 *) frameBuffer; + for( i = 0; i < g128ExtraCurs; i++) { + x += *(fb++); + fb += g128DeltaCurs; + } +} + + + +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 0); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 1); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 2); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 3); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 4); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 5); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 6); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 7); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 8); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 9); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 10); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 11); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 12); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 13); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 14); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 15); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 16); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 17); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 18); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 19); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 20); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 21); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 22); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 23); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 24); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 25); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 26); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 27); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 28); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 29); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 30); +OSMetaClassDefineReservedUnused(IONDRVFramebuffer, 31); diff --git a/iokit/Families/IONDRVSupport/IONDRVLibraries.cpp b/iokit/Families/IONDRVSupport/IONDRVLibraries.cpp new file mode 100644 index 000000000..565126fef --- /dev/null +++ b/iokit/Families/IONDRVSupport/IONDRVLibraries.cpp @@ -0,0 +1,1776 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 Jul 98 - start IOKit + * sdouglas 14 Dec 98 - start cpp. + */ + + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "IOPEFLibraries.h" +#include "IOPEFLoader.h" +#include "IONDRV.h" + +#include + +extern "C" +{ + +extern void *kern_os_malloc(size_t size); +extern void kern_os_free(void * addr); + +#define LOG if(1) kprintf + +#define LOGNAMEREG 0 + +/* NameRegistry error codes */ +enum { + nrLockedErr = -2536, + nrNotEnoughMemoryErr = -2537, + nrInvalidNodeErr = -2538, + nrNotFoundErr = -2539, + nrNotCreatedErr = -2540, + nrNameErr = -2541, + nrNotSlotDeviceErr = -2542, + nrDataTruncatedErr = -2543, + nrPowerErr = -2544, + nrPowerSwitchAbortErr = -2545, + nrTypeMismatchErr = -2546, + nrNotModifiedErr = -2547, + nrOverrunErr = -2548, + nrResultCodeBase = -2549, + nrPathNotFound = -2550, /* a path component lookup failed */ + nrPathBufferTooSmall = -2551, /* buffer for path is too small */ + nrInvalidEntryIterationOp = -2552, /* invalid entry iteration operation */ + nrPropertyAlreadyExists = -2553, /* property already exists */ + nrIterationDone = -2554, /* iteration operation is done */ + nrExitedIteratorScope = -2555, /* outer scope of iterator was exited */ + nrTransactionAborted = -2556 /* transaction was aborted */ +}; + +enum { + kNVRAMProperty = 0x00000020L, // matches NR + kRegMaximumPropertyNameLength = 31 +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +UInt32 _eEndianSwap32Bit( UInt32 data ) +{ + return( OSReadSwapInt32(&data, 0)); +} + +UInt16 _eEndianSwap16Bit( UInt16 data ) +{ + return( OSReadSwapInt16(&data, 0)); +} + +OSStatus _eExpMgrConfigReadLong( RegEntryID entryID, UInt8 offset, UInt32 * value ) +{ + IORegistryEntry * regEntry; + IOPCIDevice * ioDevice; + UInt32 adj; + + REG_ENTRY_TO_OBJ( entryID, regEntry) + + ioDevice = OSDynamicCast( IOPCIDevice, regEntry ); + if( !ioDevice) + ioDevice = OSDynamicCast( IOPCIDevice, regEntry->getParentEntry( gIODTPlane) ); + if( !ioDevice) + return( nrNotSlotDeviceErr ); + + adj = ioDevice->configRead32( offset ); +#if 0 + IOMemoryMap * map = 0; + if( (offset >= kIOPCIConfigBaseAddress2) + && (offset <= kIOPCIConfigBaseAddress5)) { + if( (map = ioDevice->mapDeviceMemoryWithRegister( offset, kIOMapReference))) { + adj = (adj & 3) | (map->getVirtualAddress()); + map->release(); + } + } +#endif + *value = adj; + + return( noErr ); +} + +OSStatus _eExpMgrConfigWriteLong( RegEntryID entryID, UInt8 offset, UInt32 value ) +{ + + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + ioDevice->configWrite32( offset, value); + + return( noErr ); +} + + +OSStatus _eExpMgrConfigReadWord( RegEntryID entryID, UInt8 offset, UInt16 * value ) +{ + IORegistryEntry * regEntry; + IOPCIDevice * ioDevice; + + REG_ENTRY_TO_OBJ( entryID, regEntry) + + ioDevice = OSDynamicCast( IOPCIDevice, regEntry ); + if( !ioDevice) + ioDevice = OSDynamicCast( IOPCIDevice, regEntry->getParentEntry( gIODTPlane) ); + if( !ioDevice) + return( nrNotSlotDeviceErr ); + + *value = ioDevice->configRead16( offset ); + + return( noErr ); +} + +OSStatus _eExpMgrConfigWriteWord( RegEntryID entryID, UInt8 offset, UInt16 value ) +{ + + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + ioDevice->configWrite16( offset, value); + + return( noErr); +} + +OSStatus _eExpMgrConfigReadByte( RegEntryID entryID, UInt8 offset, UInt8 * value ) +{ + IORegistryEntry * regEntry; + IOPCIDevice * ioDevice; + + REG_ENTRY_TO_OBJ( entryID, regEntry) + + ioDevice = OSDynamicCast( IOPCIDevice, regEntry ); + if( !ioDevice) + ioDevice = OSDynamicCast( IOPCIDevice, regEntry->getParentEntry( gIODTPlane) ); + if( !ioDevice) + return( nrNotSlotDeviceErr ); + + *value = ioDevice->configRead8( offset ); + + return( noErr ); +} + +OSStatus _eExpMgrConfigWriteByte( RegEntryID entryID, UInt8 offset, UInt8 value ) +{ + + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + ioDevice->configWrite8( offset, value); + + return( noErr); +} + +OSStatus _eExpMgrIOReadLong( RegEntryID entryID, UInt16 offset, UInt32 * value ) +{ + + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + *value = ioDevice->ioRead32( offset ); + + return( noErr); +} + +OSStatus _eExpMgrIOWriteLong( RegEntryID entryID, UInt16 offset, UInt32 value ) +{ + + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + ioDevice->ioWrite32( offset, value ); + + return( noErr); +} + +OSStatus _eExpMgrIOReadWord( RegEntryID entryID, UInt16 offset, UInt16 * value ) +{ + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + *value = ioDevice->ioRead16( offset ); + + return( noErr); +} + +OSStatus _eExpMgrIOWriteWord( RegEntryID entryID, UInt16 offset, UInt16 value ) +{ + + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + ioDevice->ioWrite16( offset, value ); + + return( noErr); +} + +OSStatus _eExpMgrIOReadByte( RegEntryID entryID, UInt16 offset, UInt8 * value ) +{ + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + *value = ioDevice->ioRead8( offset ); + + return( noErr); +} + +OSStatus _eExpMgrIOWriteByte( RegEntryID entryID, UInt16 offset, UInt8 value ) +{ + + REG_ENTRY_TO_SERVICE( entryID, IOPCIDevice, ioDevice) + + ioDevice->ioWrite8( offset, value ); + + return( noErr); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSStatus _eRegistryEntryIDCopy( RegEntryID entryID, RegEntryID to ) +{ + bcopy( entryID, to, sizeof( RegEntryID) ); + return( noErr); +} + + +OSStatus _eRegistryEntryIDInit( RegEntryID entryID ) +{ + MAKE_REG_ENTRY( entryID, 0); + return( noErr); +} + +/* + * Compare EntryID's for equality or if invalid + * + * If a NULL value is given for either id1 or id2, the other id + * is compared with an invalid ID. If both are NULL, the id's + * are consided equal (result = true). + * note: invalid != uninitialized + */ +Boolean _eRegistryEntryIDCompare( RegEntryID entryID1, RegEntryID entryID2 ) +{ + IORegistryEntry * regEntry1; + IORegistryEntry * regEntry2; + + if( entryID1) { + REG_ENTRY_TO_OBJ_RET( entryID1, regEntry1, false) + } else + regEntry1 = 0; + + if( entryID2) { + REG_ENTRY_TO_OBJ_RET( entryID2, regEntry2, false) + } else + regEntry2 = 0; + + return( regEntry1 == regEntry2 ); +} + +OSStatus _eRegistryPropertyGetSize( void *entryID, char *propertyName, + UInt32 * propertySize ) +{ + OSStatus err = noErr; + OSData * prop; + + REG_ENTRY_TO_PT( entryID, regEntry) + + prop = (OSData *) regEntry->getProperty( propertyName); + if( prop) + *propertySize = prop->getLength(); + else + err = nrNotFoundErr; + +#if LOGNAMEREG + LOG("RegistryPropertyGetSize: %s : %d\n", propertyName, err); +#endif + return( err); + +} + +OSStatus _eRegistryPropertyGet(void *entryID, char *propertyName, UInt32 *propertyValue, UInt32 *propertySize) +{ + OSStatus err = noErr; + OSData * prop; + UInt32 len; + + REG_ENTRY_TO_PT( entryID, regEntry) + + prop = OSDynamicCast( OSData, regEntry->getProperty( propertyName)); + if( prop) { + + len = *propertySize; + *propertySize = prop->getLength(); + len = (len > prop->getLength()) ? prop->getLength() : len; + bcopy( prop->getBytesNoCopy(), propertyValue, len); +#if LOGNAMEREG + LOG("value: %08x ", *propertyValue); +#endif + } else + err = nrNotFoundErr; + +#if LOGNAMEREG + LOG("RegistryPropertyGet: %s : %d\n", propertyName, err); +#endif + return( err); +} + +OSStatus _eRegistryPropertyCreate( void *entryID, char *propertyName, + void * propertyValue, UInt32 propertySize ) +{ + OSStatus err = noErr; + OSData * prop; + + REG_ENTRY_TO_PT( entryID, regEntry) + + prop = OSData::withBytes( propertyValue, propertySize ); + + if( prop) { + + regEntry->setProperty( propertyName, prop); + prop->release(); + + } else + err = nrNotCreatedErr; + +#if LOGNAMEREG + LOG("RegistryPropertyCreate: %s : %d\n", propertyName, err); +#endif + return( err); +} + +OSStatus _eRegistryPropertyDelete( void *entryID, char *propertyName ) +{ + OSStatus err = noErr; + OSObject * old; + + REG_ENTRY_TO_PT( entryID, regEntry) + + old = regEntry->getProperty(propertyName); + if ( old ) + regEntry->removeProperty(propertyName); + else + err = nrNotFoundErr; + +#if LOGNAMEREG + LOG("RegistryPropertyDelete: %s : %d\n", propertyName, err); +#endif + return( err); +} + +void IONDRVSetNVRAMPropertyName( IORegistryEntry * regEntry, + const OSSymbol * sym ) +{ + regEntry->setProperty( "IONVRAMProperty", (OSObject *) sym ); +} + +static IOReturn IONDRVSetNVRAMPropertyValue( IORegistryEntry * regEntry, + const OSSymbol * name, OSData * value ) +{ + IOReturn err; + IODTPlatformExpert * platform = + (IODTPlatformExpert *) IOService::getPlatform(); + + err = platform->writeNVRAMProperty( regEntry, name, value ); + + return( err ); +} + +OSStatus _eRegistryPropertySet( void *entryID, char *propertyName, void * propertyValue, UInt32 propertySize ) +{ + OSStatus err = noErr; + OSData * prop; + const OSSymbol * sym; + + REG_ENTRY_TO_PT( entryID, regEntry) + + sym = OSSymbol::withCString( propertyName ); + if( !sym) + return( kIOReturnNoMemory ); + + prop = OSDynamicCast( OSData, regEntry->getProperty( sym )); + if( 0 == prop) + err = nrNotFoundErr; + + else if( (prop = OSData::withBytes( propertyValue, propertySize))) { + regEntry->setProperty( sym, prop); + + if( (sym == (const OSSymbol *) + regEntry->getProperty("IONVRAMProperty"))) + err = IONDRVSetNVRAMPropertyValue( regEntry, sym, prop ); + prop->release(); + + } else + err = nrNotCreatedErr; + + sym->release(); + +#if LOGNAMEREG + LOG("RegistryPropertySet: %s : %d\n", propertyName, err); +#endif + return( err); +} + +OSStatus _eRegistryPropertyGetMod(void * entryID, char * propertyName, + UInt32 * mod) +{ + const OSSymbol * sym; + + REG_ENTRY_TO_PT( entryID, regEntry) + + if( (sym = OSDynamicCast( OSSymbol, + regEntry->getProperty("IONVRAMProperty"))) + && (0 == strcmp( propertyName, sym->getCStringNoCopy()))) + + *mod = kNVRAMProperty; + else + *mod = 0; + + return( noErr); +} + +OSStatus _eRegistryPropertySetMod(void *entryID, char *propertyName, + UInt32 mod ) +{ + OSStatus err = noErr; + OSData * data; + const OSSymbol * sym; + + REG_ENTRY_TO_PT( entryID, regEntry) + + if( (mod & kNVRAMProperty) + && (sym = OSSymbol::withCString( propertyName ))) { + + if( (data = OSDynamicCast( OSData, regEntry->getProperty( sym))) ) { + err = IONDRVSetNVRAMPropertyValue( regEntry, sym, data ); + if( kIOReturnSuccess == err) + IONDRVSetNVRAMPropertyName( regEntry, sym ); + } + sym->release(); + } + + return( err); +} + +OSStatus _eVSLSetDisplayConfiguration(RegEntryID * entryID, + char * propertyName, + void * configData, + long configDataSize) +{ + IOReturn err = nrNotCreatedErr; + IORegistryEntry * options; + const OSSymbol * sym = 0; + OSData * data = 0; + enum { kMaxDisplayConfigDataSize = 64 }; + + if( (configDataSize > kMaxDisplayConfigDataSize) + || (strlen(propertyName) > kRegMaximumPropertyNameLength)) + return( nrNotCreatedErr ); + + do { + options = IORegistryEntry::fromPath( "/options", gIODTPlane); + if( !options) + continue; + data = OSData::withBytes( configData, configDataSize ); + if( !data) + continue; + sym = OSSymbol::withCString( propertyName ); + if( !sym) + continue; + if( !options->setProperty( sym, data )) + continue; + err = kIOReturnSuccess; + + } while( false ); + + if( options) + options->release(); + if( data) + data->release(); + if( sym) + sym->release(); + + return( err ); +} + +OSStatus _eRegistryPropertyIterateCreate( RegEntryID * entryID, + OSCollectionIterator ** cookie) +{ + + REG_ENTRY_TO_PT( entryID, regEntry) + + // NB. unsynchronized. But should only happen on an owned nub! + // Should non OSData be filtered out? + *cookie = OSCollectionIterator::withCollection( + regEntry->getPropertyTable()); + + if( *cookie) + return( noErr); + else + return( nrNotEnoughMemoryErr); +} + +OSStatus _eRegistryPropertyIterateDispose( OSCollectionIterator ** cookie) +{ + if( *cookie) { + (*cookie)->release(); + *cookie = NULL; + return( noErr); + } else + return( nrIterationDone); +} + + +OSStatus _eRegistryPropertyIterate( OSCollectionIterator ** cookie, + char * name, Boolean * done ) +{ + const OSSymbol * key; + + key = (const OSSymbol *) (*cookie)->getNextObject(); + if( key) + strncpy( name, key->getCStringNoCopy(), kRegMaximumPropertyNameLength); + + // Seems to be differences in handling "done". + // ATI assumes done = true when getting the last property. + // The Book says done is true after last property. + // ATI does check err, so this will work. + // Control ignores err and checks done. + + *done = (key == 0); + + if( 0 != key) + return( noErr); + else + return( nrIterationDone ); +} + +OSStatus +_eRegistryEntryIterateCreate( IORegistryIterator ** cookie) +{ + *cookie = IORegistryIterator::iterateOver( gIODTPlane ); + if( *cookie) + return( noErr); + else + return( nrNotEnoughMemoryErr); +} + +OSStatus +_eRegistryEntryIterateDispose( IORegistryIterator ** cookie) +{ + if( *cookie) { + (*cookie)->release(); + *cookie = NULL; + return( noErr); + } else + return( nrIterationDone); +} + +OSStatus +_eRegistryEntryIterate( IORegistryIterator ** cookie, + UInt32 /* relationship */, + RegEntryID foundEntry, + Boolean * done) +{ + IORegistryEntry * regEntry; + + // TODO: check requested type of iteration + regEntry = (*cookie)->getNextObjectRecursive(); + + MAKE_REG_ENTRY( foundEntry, regEntry); + *done = (0 == regEntry); + +#if LOGNAMEREG + if( regEntry) + LOG("RegistryEntryIterate: %s\n", regEntry->getName( gIODTPlane )); +#endif + + if( regEntry) + return( noErr); + else + return( nrNotFoundErr); +} + +OSStatus +_eRegistryCStrEntryToName( const RegEntryID * entryID, + RegEntryID parentEntry, + char * nameComponent, + Boolean * done ) +{ + IORegistryEntry * regEntry; + + REG_ENTRY_TO_OBJ( entryID, regEntry) + + strncpy( nameComponent, regEntry->getName( gIODTPlane ), kRegMaximumPropertyNameLength ); + nameComponent[ kRegMaximumPropertyNameLength ] = 0; + + regEntry = regEntry->getParentEntry( gIODTPlane ); + if( regEntry) { + MAKE_REG_ENTRY( parentEntry, regEntry); + *done = false; + } else + *done = true; + + return( noErr); +} + +OSStatus +_eRegistryCStrEntryLookup( const RegEntryID * parentEntry, + const char * path, + RegEntryID newEntry) +{ + IOReturn err; + IORegistryEntry * regEntry = 0; + char * buf; + char * cvtPath; + char c; +#define kDTRoot "Devices:device-tree:" + + if( parentEntry) { + REG_ENTRY_TO_OBJ( parentEntry, regEntry) + } else + regEntry = 0; + + buf = IONew( char, 512 ); + if( !buf) + return( nrNotEnoughMemoryErr ); + + cvtPath = buf; + if( ':' == path[0]) + path++; + else if( 0 == strncmp( path, kDTRoot, strlen( kDTRoot ))) { + path += strlen( kDTRoot ) - 1; + regEntry = 0; + } + + do { + c = *(path++); + if( ':' == c) + c = '/'; + *(cvtPath++) = c; + } while( c != 0 ); + + if( regEntry) + regEntry = regEntry->childFromPath( buf, gIODTPlane ); + else + regEntry = IORegistryEntry::fromPath( buf, gIODTPlane ); + + if( regEntry) { + MAKE_REG_ENTRY( newEntry, regEntry); + regEntry->release(); + err = noErr; + } else + err = nrNotFoundErr; + + IODelete( buf, char, 512 ); + + return( err ); +} + + +OSStatus +_eRegistryCStrEntryCreate( const RegEntryID * parentEntry, + char * name, + RegEntryID newEntry) +{ + IORegistryEntry * newDev; + IORegistryEntry * parent; + + REG_ENTRY_TO_OBJ( parentEntry, parent) + + // NOT published + + newDev = new IORegistryEntry; + if( newDev && (false == newDev->init())) + newDev = 0; + + if( newDev) { + newDev->attachToParent( parent, gIODTPlane ); + if( ':' == name[0]) + name++; + newDev->setName( name ); + } + + MAKE_REG_ENTRY( newEntry, newDev); + + if( newDev) + return( noErr); + else + return( nrNotCreatedErr); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +extern "C" { + +// in NDRVLibrariesAsm.s +extern void _eSynchronizeIO( void ); + +// platform expert +extern vm_offset_t +PEResidentAddress( vm_offset_t address, vm_size_t length ); + +}; + +enum { + kProcessorCacheModeDefault = 0, + kProcessorCacheModeInhibited = 1, + kProcessorCacheModeWriteThrough = 2, + kProcessorCacheModeCopyBack = 3 +}; + +OSStatus _eSetProcessorCacheMode( UInt32 /* space */, void * /* addr */, + UInt32 /* len */, UInt32 /* mode */ ) +{ +#if 0 + struct phys_entry* pp; + vm_offset_t spa; + vm_offset_t epa; + int wimg; + + // This doesn't change any existing kernel mapping eg. BAT changes etc. + // but this is enough to change user level mappings for DPS etc. + // Should use a kernel service when one is available. + + spa = kvtophys( (vm_offset_t)addr); + if( spa == 0) { + spa = PEResidentAddress( (vm_offset_t)addr, len); + if( spa == 0) + return( kIOReturnVMError); + } + epa = (len + spa + 0xfff) & 0xfffff000; + spa &= 0xfffff000; + + switch( mode) { + case kProcessorCacheModeWriteThrough: + wimg = PTE_WIMG_WT_CACHED_COHERENT_GUARDED; + break; + case kProcessorCacheModeCopyBack: + wimg = PTE_WIMG_CB_CACHED_COHERENT_GUARDED; + break; + default: + wimg = PTE_WIMG_UNCACHED_COHERENT_GUARDED; + break; + } + + while( spa < epa) { + pp = pmap_find_physentry(spa); + if (pp != PHYS_NULL) + pp->pte1.bits.wimg = wimg; + spa += PAGE_SIZE; + } +#endif + _eSynchronizeIO(); + return( noErr); +} + +char * _ePStrCopy( char *to, const char *from ) +{ + UInt32 len; + char * copy; + + copy = to; + len = *(from++); + *(copy++) = len; + bcopy( from, copy, len); + return( to); +} + +LogicalAddress _ePoolAllocateResident(ByteCount byteSize, Boolean clear) +{ + LogicalAddress mem; + + mem = (LogicalAddress) kern_os_malloc( (size_t) byteSize ); + if( clear && mem) + memset( mem, 0, byteSize); + + return( mem); +} + +OSStatus _ePoolDeallocate( LogicalAddress address ) +{ + kern_os_free( (void *) address ); + return( noErr); +} + +UInt32 _eCurrentExecutionLevel(void) +{ + return(0); // == kTaskLevel, HWInt == 6 +} + +// don't expect any callers of this +OSErr _eIOCommandIsComplete( UInt32 /* commandID */, OSErr result) +{ + LOG("_eIOCommandIsComplete\n"); + return( result); // !!??!! +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include + + +AbsoluteTime _eUpTime( void ) +{ + AbsoluteTime result; + + clock_get_uptime( &result); + + return( result); +} + +AbsoluteTime _eAddAbsoluteToAbsolute(AbsoluteTime left, AbsoluteTime right) +{ + AbsoluteTime result = left; + + ADD_ABSOLUTETIME( &left, &right); + + return( result); +} + + +AbsoluteTime _eSubAbsoluteFromAbsolute(AbsoluteTime left, AbsoluteTime right) +{ + AbsoluteTime result = left; + + // !! ATI bug fix here: + // They expect the 64-bit result to be signed. The spec says < 0 => 0 + // To workaround, make sure this routine takes 10 us to execute. + IODelay( 10); + + if( CMP_ABSOLUTETIME( &result, &right) < 0) { + AbsoluteTime_to_scalar( &result ) = 0; + } else { + result = left; + SUB_ABSOLUTETIME( &result, &right); + } + + return( result); +} + + +AbsoluteTime _eDurationToAbsolute( Duration theDuration) +{ + AbsoluteTime result; + + if( theDuration > 0) { + clock_interval_to_absolutetime_interval( theDuration, kMillisecondScale, + &result ); + + } else { + clock_interval_to_absolutetime_interval( (-theDuration), kMicrosecondScale, + &result ); + } + + return( result); +} + +AbsoluteTime _eAddDurationToAbsolute( Duration duration, AbsoluteTime absolute ) +{ + return( _eAddAbsoluteToAbsolute(_eDurationToAbsolute( duration), absolute)); +} + +#define UnsignedWideToUInt64(x) (*(UInt64 *)(x)) +#define UInt64ToUnsignedWide(x) (*(UnsignedWide *)(x)) + +AbsoluteTime _eNanosecondsToAbsolute ( UnsignedWide theNanoseconds) +{ + AbsoluteTime result; + UInt64 nano = UnsignedWideToUInt64(&theNanoseconds); + + nanoseconds_to_absolutetime( nano, &result); + + return( result); +} + +UnsignedWide _eAbsoluteToNanoseconds( AbsoluteTime absolute ) +{ + UnsignedWide result; + UInt64 nano; + + absolutetime_to_nanoseconds( absolute, &nano); + result = UInt64ToUnsignedWide( &nano ); + + return( result); +} + +Duration _eAbsoluteDeltaToDuration( AbsoluteTime left, AbsoluteTime right ) +{ + Duration dur; + AbsoluteTime result; + UInt64 nano; + + if( CMP_ABSOLUTETIME( &left, &right) < 0) + return( 0); + + result = left; + SUB_ABSOLUTETIME( &result, &right); + absolutetime_to_nanoseconds( result, &nano); + + if( nano >= ((1ULL << 31) * 1000ULL)) { + // +ve milliseconds + if( nano >= ((1ULL << 31) * 1000ULL * 1000ULL)) + dur = 0x7fffffff; + else + dur = nano / 1000000ULL; + } else { + // -ve microseconds + dur = -(nano / 1000ULL); + } + + return( dur); +} + + +OSStatus _eDelayForHardware( AbsoluteTime time ) +{ + AbsoluteTime deadline; + + clock_absolutetime_interval_to_deadline( time, &deadline ); + clock_delay_until( deadline ); + + return( noErr); +} + +OSStatus _eDelayFor( Duration theDuration ) +{ +#if 1 + +// In Marconi, DelayFor uses the old toolbox Delay routine +// which is based on the 60 Hz timer. Durations are not +// rounded up when converting to ticks. Yes, really. +// Some ATI drivers call DelayFor(1) 50000 times starting up. +// There is some 64-bit math there so we'd better reproduce +// the overhead of that calculation. + +#define DELAY_FOR_TICK_NANO 16666666 +#define DELAY_FOR_TICK_MILLI 17 +#define NANO32_MILLI 4295 + + UnsignedWide nano; + AbsoluteTime abs; + unsigned int ms; + + abs = _eDurationToAbsolute( theDuration); + nano = _eAbsoluteToNanoseconds( abs); + + ms = (nano.lo / DELAY_FOR_TICK_NANO) * DELAY_FOR_TICK_MILLI; + ms += nano.hi * NANO32_MILLI; + if( ms) + IOSleep( ms); + +#else + // Accurate, but incompatible, version + +#define SLEEP_THRESHOLD 5000 + + if( theDuration < 0) { + + // us duration + theDuration -= theDuration; + if( theDuration > SLEEP_THRESHOLD) + IOSleep( (theDuration + 999) / 1000); + else + IODelay( theDuration); + + } else { + + // ms duration + if( theDuration > (SLEEP_THRESHOLD / 1000)) + IOSleep( theDuration ); // ms + else + IODelay( theDuration * 1000); // us + } +#endif + + return( noErr); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSStatus _eCallOSTrapUniversalProc( UInt32 /* theProc */, + UInt32 procInfo, UInt32 trap, UInt8 * pb ) +{ + OSStatus err = -40; + struct PMgrOpParamBlock { + SInt16 pmCommand; + SInt16 pmLength; + UInt8 * pmSBuffer; + UInt8 * pmRBuffer; + UInt8 pmData[4]; + }; +#define readExtSwitches 0xDC + + if( (procInfo == 0x133822) + && (trap == 0xa085) ) { + + PMgrOpParamBlock * pmOp = (PMgrOpParamBlock *) pb; + + if( (readExtSwitches == pmOp->pmCommand) && pmOp->pmRBuffer) { + OSNumber * num = OSDynamicCast(OSNumber, + IOService::getPlatform()->getProperty("AppleExtSwitchBootState")); + *pmOp->pmRBuffer = (num->unsigned32BitValue() & 1); + err = noErr; + } + + } else if( (procInfo == 0x133822) + && (trap == 0xa092) ) { + + UInt8 addr, reg, data; + + addr = pb[ 2 ]; + reg = pb[ 3 ]; + pb = *( (UInt8 **) ((UInt32) pb + 8)); + data = pb[ 1 ]; + (*PE_write_IIC)( addr, reg, data ); + err = noErr; + } + return( err); +} + +const UInt32 * _eGetKeys( void ) +{ + static const UInt32 zeros[] = { 0, 0, 0, 0 }; + + return( zeros); +} + +UInt32 _eGetIndADB( void * adbInfo, UInt32 /* index */) +{ + bzero( adbInfo, 10); + return( 0); // orig address +} + +char * _eLMGetPowerMgrVars( void ) +{ + static char * powerMgrVars = NULL; + + if( powerMgrVars == NULL) { + powerMgrVars = (char *) IOMalloc( 0x3c0); + if( powerMgrVars) + bzero( powerMgrVars, 0x3c0); + } + return( powerMgrVars); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSStatus _eNoErr( void ) +{ + return( noErr); +} + +OSStatus _eFail( void ) +{ + return( -40); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// fix this! + +#define heathrowID ((volatile UInt32 *)0xf3000034) +#define heathrowTermEna (1 << 3) +#define heathrowTermDir (1 << 0) + +#define heathrowFeatureControl ((volatile UInt32 *)0xf3000038) +#define heathrowMBRES (1 << 24) + +#define heathrowBrightnessControl ((volatile UInt8 *)0xf3000032) +#define defaultBrightness 144 +#define heathrowContrastControl ((volatile UInt8 *)0xf3000033) +#define defaultContrast 183 + +#define gossamerSystemReg1 ((volatile UInt16 *)0xff000004) +#define gossamerAllInOne (1 << 4) + +void _eATISetMBRES( UInt32 state ) +{ + UInt32 value; + + value = *heathrowFeatureControl; + + if( state == 0) + value &= ~heathrowMBRES; + else if( state == 1) + value |= heathrowMBRES; + + *heathrowFeatureControl = value; + eieio(); +} + +void _eATISetMonitorTermination( Boolean enable ) +{ + + UInt32 value; + + value = *heathrowID; + + value |= heathrowTermEna; + if( enable) + value |= heathrowTermDir; + else + value &= ~heathrowTermDir; + + *heathrowID = value; + eieio(); +} + +Boolean _eATIIsAllInOne( void ) +{ + Boolean rtn; + static bool didBrightness; + + rtn = (0 == ((*gossamerSystemReg1) & gossamerAllInOne)); + if( rtn && !didBrightness) { + *heathrowBrightnessControl = defaultBrightness; + eieio(); + *heathrowContrastControl = defaultContrast; + eieio(); + didBrightness = true; + } + return( rtn); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static void IONDRVInterruptAction( OSObject * target, void * refCon, + IOService * provider, int index ) +{ + IONDRVInterruptSet * set; + IONDRVInterruptSource * source; + SInt32 result; + + set = (IONDRVInterruptSet *) target; + index++; + + do { + + assert( (UInt32) index <= set->count); + if( (UInt32) index > set->count) + break; + + source = set->sources + index; + result = CallTVector( set, (void *) index, source->refCon, 0, 0, 0, + source->handler ); + + switch( result ) { + + case kIONDRVIsrIsNotComplete: + index++; + case kIONDRVIsrIsComplete: + break; + + case kIONDRVMemberNumberParent: + assert( false ); + break; + + default: + index = result; + set = set->child; + break; + } + + } while( result != kIONDRVIsrIsComplete ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static SInt32 IONDRVStdInterruptHandler( IONDRVInterruptSetMember setMember, + void *refCon, UInt32 theIntCount ) +{ +// assert( false ); + + return( kIONDRVIsrIsComplete ); +} + +static bool IONDRVStdInterruptDisabler( IONDRVInterruptSetMember setMember, + void *refCon ) +{ + IONDRVInterruptSet * set; + IONDRVInterruptSource * source; + bool was; + + set = (IONDRVInterruptSet *) setMember.setID; + assert( OSDynamicCast( IONDRVInterruptSet, set )); + assert( setMember.member <= set->count ); + source = set->sources + setMember.member; + + was = source->enabled; + source->enabled = false; + + assert( set->provider ); + set->provider->disableInterrupt( setMember.member - 1 ); + + return( was ); +} + +static void IONDRVStdInterruptEnabler( IONDRVInterruptSetMember setMember, + void *refCon ) +{ + IONDRVInterruptSet * set; + IONDRVInterruptSource * source; + + set = (IONDRVInterruptSet *) setMember.setID; + assert( OSDynamicCast( IONDRVInterruptSet, set )); + assert( setMember.member <= set->count ); + source = set->sources + setMember.member; + + source->enabled = true; + + assert( set->provider ); + + if( !source->registered) { + source->registered = true; + set->provider->registerInterrupt( setMember.member - 1, set, + &IONDRVInterruptAction, (void *) 0x53 ); + } + + set->provider->enableInterrupt( setMember.member - 1 ); +} + +static IOTVector tvIONDRVStdInterruptHandler = { IONDRVStdInterruptHandler, 0 }; +static IOTVector tvIONDRVStdInterruptEnabler = { IONDRVStdInterruptEnabler, 0 }; +static IOTVector tvIONDRVStdInterruptDisabler = { IONDRVStdInterruptDisabler, 0 }; + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSStatus +_eGetInterruptFunctions( void * setID, + UInt32 member, + void ** refCon, + IOTVector ** handler, + IOTVector ** enabler, + IOTVector ** disabler ) +{ + IONDRVInterruptSet * set; + IONDRVInterruptSource * source; + OSStatus err = noErr; + + set = (IONDRVInterruptSet *) setID; + assert( OSDynamicCast( IONDRVInterruptSet, set )); + assert( member <= set->count ); + source = set->sources + member; + + if( refCon) + *refCon = source->refCon; + if( handler) + *handler = source->handler; + if( enabler) + *enabler = source->enabler; + if( disabler) + *disabler = source->disabler; + + return( err); +} + +IOReturn +IONDRVInstallInterruptFunctions(void * setID, + UInt32 member, + void * refCon, + IOTVector * handler, + IOTVector * enabler, + IOTVector * disabler ) +{ + IONDRVInterruptSet * set; + IONDRVInterruptSource * source; + OSStatus err = noErr; + + set = (IONDRVInterruptSet *) setID; + assert( OSDynamicCast( IONDRVInterruptSet, set )); + if( member > set->count ) + return( paramErr ); + source = set->sources + member; + + source->refCon = refCon; + if( handler) + source->handler = handler; + if( enabler) + source->enabler = enabler; + if( disabler) + source->disabler = disabler; + + return( err); +} + +OSStatus +_eInstallInterruptFunctions(void * setID, + UInt32 member, + void * refCon, + IOTVector * handler, + IOTVector * enabler, + IOTVector * disabler ) +{ + return( IONDRVInstallInterruptFunctions( setID, member, refCon, + handler, enabler, disabler )); +} + +OSStatus +_eCreateInterruptSet( void * parentSet, + UInt32 parentMember, + UInt32 setSize, + void ** setID, + IOOptionBits options ) +{ + IONDRVInterruptSet * set; + IONDRVInterruptSet * newSet; + IONDRVInterruptSource * source; + OSStatus err = noErr; + + set = (IONDRVInterruptSet *) parentSet; + assert( OSDynamicCast( IONDRVInterruptSet, set )); + assert( parentMember <= set->count ); + source = set->sources + parentMember; + + newSet = IONDRVInterruptSet::with( 0, options, setSize ); + assert( newSet ); + + if( newSet) for( UInt32 i = 1; i <= setSize; i++ ) { + + source = newSet->sources + i; + source->handler = &tvIONDRVStdInterruptHandler; + source->enabler = &tvIONDRVStdInterruptEnabler; + source->disabler = &tvIONDRVStdInterruptDisabler; + } + + set->child = newSet; + *setID = newSet; + + return( err ); +} + +OSStatus +_eDeleteInterruptSet( void * setID ) +{ + IONDRVInterruptSet * set; + OSStatus err = noErr; + + set = (IONDRVInterruptSet *) setID; + assert( OSDynamicCast( IONDRVInterruptSet, set )); + + set->release(); + + return( err ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define MAKEFUNC(s,e) { s, e, 0 } + +static FunctionEntry PCILibFuncs[] = +{ + MAKEFUNC( "ExpMgrConfigReadLong", _eExpMgrConfigReadLong), + MAKEFUNC( "ExpMgrConfigReadWord", _eExpMgrConfigReadWord), + MAKEFUNC( "ExpMgrConfigReadByte", _eExpMgrConfigReadByte), + MAKEFUNC( "ExpMgrConfigWriteLong", _eExpMgrConfigWriteLong), + MAKEFUNC( "ExpMgrConfigWriteWord", _eExpMgrConfigWriteWord), + MAKEFUNC( "ExpMgrConfigWriteByte", _eExpMgrConfigWriteByte), + + MAKEFUNC( "ExpMgrIOReadLong", _eExpMgrIOReadLong), + MAKEFUNC( "ExpMgrIOReadWord", _eExpMgrIOReadWord), + MAKEFUNC( "ExpMgrIOReadByte", _eExpMgrIOReadByte), + MAKEFUNC( "ExpMgrIOWriteLong", _eExpMgrIOWriteLong), + MAKEFUNC( "ExpMgrIOWriteWord", _eExpMgrIOWriteWord), + MAKEFUNC( "ExpMgrIOWriteByte", _eExpMgrIOWriteByte), + + MAKEFUNC( "EndianSwap16Bit", _eEndianSwap16Bit), + MAKEFUNC( "EndianSwap32Bit", _eEndianSwap32Bit) +}; + +static FunctionEntry VideoServicesLibFuncs[] = +{ + MAKEFUNC( "VSLPrepareCursorForHardwareCursor", + IONDRVFramebuffer::VSLPrepareCursorForHardwareCursor), + MAKEFUNC( "VSLNewInterruptService", IONDRVFramebuffer::VSLNewInterruptService), + MAKEFUNC( "VSLDisposeInterruptService", IONDRVFramebuffer::VSLDisposeInterruptService), + MAKEFUNC( "VSLDoInterruptService", IONDRVFramebuffer::VSLDoInterruptService), + MAKEFUNC( "VSLSetDisplayConfiguration", _eVSLSetDisplayConfiguration) +}; + +static FunctionEntry NameRegistryLibFuncs[] = +{ + MAKEFUNC( "RegistryEntryIDCopy", _eRegistryEntryIDCopy), + MAKEFUNC( "RegistryEntryIDInit", _eRegistryEntryIDInit), + MAKEFUNC( "RegistryEntryIDDispose", _eNoErr), + MAKEFUNC( "RegistryEntryIDCompare", _eRegistryEntryIDCompare), + MAKEFUNC( "RegistryPropertyGetSize", _eRegistryPropertyGetSize), + MAKEFUNC( "RegistryPropertyGet", _eRegistryPropertyGet), + MAKEFUNC( "RegistryPropertyGetMod", _eRegistryPropertyGetMod), + MAKEFUNC( "RegistryPropertySetMod", _eRegistryPropertySetMod), + + MAKEFUNC( "RegistryPropertyIterateCreate", _eRegistryPropertyIterateCreate), + MAKEFUNC( "RegistryPropertyIterateDispose", _eRegistryPropertyIterateDispose), + MAKEFUNC( "RegistryPropertyIterate", _eRegistryPropertyIterate), + + MAKEFUNC( "RegistryEntryIterateCreate", _eRegistryEntryIterateCreate), + MAKEFUNC( "RegistryEntryIterateDispose", _eRegistryEntryIterateDispose), + MAKEFUNC( "RegistryEntryIterate", _eRegistryEntryIterate), + MAKEFUNC( "RegistryCStrEntryToName", _eRegistryCStrEntryToName), + MAKEFUNC( "RegistryCStrEntryLookup", _eRegistryCStrEntryLookup), + + MAKEFUNC( "RegistryCStrEntryCreate", _eRegistryCStrEntryCreate), + MAKEFUNC( "RegistryEntryDelete", _eNoErr), + + MAKEFUNC( "RegistryPropertyCreate", _eRegistryPropertyCreate), + MAKEFUNC( "RegistryPropertyDelete", _eRegistryPropertyDelete), + MAKEFUNC( "RegistryPropertySet", _eRegistryPropertySet) +}; + + +static FunctionEntry DriverServicesLibFuncs[] = +{ + MAKEFUNC( "SynchronizeIO", _eSynchronizeIO), + MAKEFUNC( "SetProcessorCacheMode", _eSetProcessorCacheMode), + MAKEFUNC( "BlockCopy", bcopy), + MAKEFUNC( "BlockMove", bcopy), + MAKEFUNC( "BlockMoveData", bcopy), + MAKEFUNC( "CStrCopy", strcpy), + MAKEFUNC( "CStrCmp", strcmp), + MAKEFUNC( "CStrLen", strlen), + MAKEFUNC( "CStrCat", strcat), + MAKEFUNC( "CStrNCopy", strncpy), + MAKEFUNC( "CStrNCmp", strncmp), + MAKEFUNC( "CStrNCat", strncat), + MAKEFUNC( "PStrCopy", _ePStrCopy), + + MAKEFUNC( "PoolAllocateResident", _ePoolAllocateResident), + MAKEFUNC( "MemAllocatePhysicallyContiguous", _ePoolAllocateResident), + MAKEFUNC( "PoolDeallocate", _ePoolDeallocate), + + MAKEFUNC( "UpTime", _eUpTime), + MAKEFUNC( "AbsoluteDeltaToDuration", _eAbsoluteDeltaToDuration), + MAKEFUNC( "AddAbsoluteToAbsolute", _eAddAbsoluteToAbsolute), + MAKEFUNC( "SubAbsoluteFromAbsolute", _eSubAbsoluteFromAbsolute), + MAKEFUNC( "AddDurationToAbsolute", _eAddDurationToAbsolute), + MAKEFUNC( "NanosecondsToAbsolute", _eNanosecondsToAbsolute), + MAKEFUNC( "AbsoluteToNanoseconds", _eAbsoluteToNanoseconds), + MAKEFUNC( "DurationToAbsolute", _eDurationToAbsolute), + MAKEFUNC( "DelayForHardware", _eDelayForHardware), + MAKEFUNC( "DelayFor", _eDelayFor), + + MAKEFUNC( "CurrentExecutionLevel", _eCurrentExecutionLevel), + MAKEFUNC( "IOCommandIsComplete", _eIOCommandIsComplete), + + MAKEFUNC( "SysDebugStr", _eNoErr), + MAKEFUNC( "SysDebug", _eNoErr), + + MAKEFUNC( "CompareAndSwap", OSCompareAndSwap), + + MAKEFUNC( "CreateInterruptSet", _eCreateInterruptSet), + MAKEFUNC( "DeleteInterruptSet", _eDeleteInterruptSet), + MAKEFUNC( "GetInterruptFunctions", _eGetInterruptFunctions), + MAKEFUNC( "InstallInterruptFunctions", _eInstallInterruptFunctions) + +}; + +static FunctionEntry ATIUtilsFuncs[] = +{ + // Gossamer onboard ATI + MAKEFUNC( "ATISetMBRES", _eATISetMBRES), + MAKEFUNC( "ATISetMonitorTermination", _eATISetMonitorTermination), + MAKEFUNC( "ATIIsAllInOne", _eATIIsAllInOne) +}; + +// These are all out of spec + +static FunctionEntry InterfaceLibFuncs[] = +{ + // Apple control : XPRam and EgretDispatch + MAKEFUNC( "CallUniversalProc", _eFail), + MAKEFUNC( "CallOSTrapUniversalProc", _eCallOSTrapUniversalProc), + + // Apple chips65550 +// MAKEFUNC( "NewRoutineDescriptor", _eCallOSTrapUniversalProc), +// MAKEFUNC( "DisposeRoutineDescriptor", _eNoErr), +// MAKEFUNC( "InsTime", _eInsTime), +// MAKEFUNC( "PrimeTime", _ePrimeTime), + + // Radius PrecisionColor 16 + MAKEFUNC( "CountADBs", _eNoErr), + MAKEFUNC( "GetIndADB", _eGetIndADB), + MAKEFUNC( "GetKeys", _eGetKeys) +}; + +static FunctionEntry PrivateInterfaceLibFuncs[] = +{ + // Apple chips65550 + MAKEFUNC( "LMGetPowerMgrVars", _eLMGetPowerMgrVars ) +}; + +#define NUMLIBRARIES 7 +const ItemCount IONumNDRVLibraries = NUMLIBRARIES; +LibraryEntry IONDRVLibraries[ NUMLIBRARIES ] = +{ + { "PCILib", sizeof(PCILibFuncs) / sizeof(FunctionEntry), PCILibFuncs }, + { "VideoServicesLib", sizeof(VideoServicesLibFuncs) / sizeof(FunctionEntry), VideoServicesLibFuncs }, + { "NameRegistryLib", sizeof(NameRegistryLibFuncs) / sizeof(FunctionEntry), NameRegistryLibFuncs }, + { "DriverServicesLib", sizeof(DriverServicesLibFuncs) / sizeof(FunctionEntry), DriverServicesLibFuncs }, + + // G3 + { "ATIUtils", sizeof(ATIUtilsFuncs) / sizeof(FunctionEntry), ATIUtilsFuncs }, + + // out of spec stuff + { "InterfaceLib", sizeof(InterfaceLibFuncs) / sizeof(FunctionEntry), InterfaceLibFuncs }, + { "PrivateInterfaceLib", sizeof(PrivateInterfaceLibFuncs) / sizeof(FunctionEntry), PrivateInterfaceLibFuncs } +}; + +} /* extern "C" */ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super OSObject + +OSDefineMetaClassAndStructors(IONDRVInterruptSet, OSObject) + +IONDRVInterruptSet * IONDRVInterruptSet::with(IOService * provider, + IOOptionBits options, SInt32 count ) +{ + IONDRVInterruptSet * set; + + set = new IONDRVInterruptSet; + if( set && !set->init()) { + set->release(); + set = 0; + } + + if( set) { + + set->provider = provider; + set->options = options; + set->count = count; + + count++; + set->sources = IONew( IONDRVInterruptSource, count ); + assert( set->sources ); + bzero( set->sources, count * sizeof( IONDRVInterruptSource)); + } + + return( set ); +} + +void IONDRVInterruptSet::free() +{ + if( sources) + IODelete( sources, IONDRVInterruptSource, count + 1 ); + + super::free(); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#if NDRVLIBTEST + +static void IONDRVLibrariesTest( IOService * provider ) +{ + UInt64 nano; + UnsignedWide nano2; + AbsoluteTime abs1, abs2; + + nano = 1000ULL; + abs1 = _eNanosecondsToAbsolute(UInt64ToUnsignedWide(&nano)); + IOLog("_eNanosecondsToAbsolute %08lx:%08lx\n", abs1.hi, abs1.lo); + nano2 = _eAbsoluteToNanoseconds(abs1); + IOLog("_eAbsoluteToNanoseconds %08lx:%08lx\n", nano2.hi, nano2.lo); + AbsoluteTime_to_scalar(&abs2) = 0; + IOLog("_eAbsoluteDeltaToDuration %ld\n", _eAbsoluteDeltaToDuration(abs1,abs2)); + + nano = 0x13161b000ULL; + abs1 = _eNanosecondsToAbsolute(UInt64ToUnsignedWide(&nano)); + IOLog("_eNanosecondsToAbsolute %08lx:%08lx\n", abs1.hi, abs1.lo); + nano2 = _eAbsoluteToNanoseconds(abs1); + IOLog("_eAbsoluteToNanoseconds %08lx:%08lx\n", nano2.hi, nano2.lo); + AbsoluteTime_to_scalar(&abs2) = 0; + IOLog("_eAbsoluteDeltaToDuration %ld\n", _eAbsoluteDeltaToDuration(abs1,abs2)); + + nano = 0x6acfc00000000ULL; + abs1 = _eNanosecondsToAbsolute(UInt64ToUnsignedWide(&nano)); + IOLog("_eNanosecondsToAbsolute %08lx:%08lx\n", abs1.hi, abs1.lo); + nano2 = _eAbsoluteToNanoseconds(abs1); + IOLog("_eAbsoluteToNanoseconds %08lx:%08lx\n", nano2.hi, nano2.lo); + AbsoluteTime_to_scalar(&abs2) = 0; + IOLog("_eAbsoluteDeltaToDuration %ld\n", _eAbsoluteDeltaToDuration(abs1,abs2)); + + abs1 = _eUpTime(); + IODelay(10); + abs2 = _eUpTime(); + IOLog("10us duration %ld\n", _eAbsoluteDeltaToDuration(abs2,abs1)); + + abs1 = _eUpTime(); + for( int i =0; i < 50000; i++) + _eDelayFor(1); + abs2 = _eUpTime(); + IOLog("50000 DelayFor(1) %ld\n", _eAbsoluteDeltaToDuration(abs2,abs1)); + + abs1 = _eUpTime(); + _eDelayFor(50); + abs2 = _eUpTime(); + IOLog("DelayFor(50) %ld\n", _eAbsoluteDeltaToDuration(abs2,abs1)); + + abs1 = _eDurationToAbsolute( -10); + IOLog("_eDurationToAbsolute(-10) %08lx:%08lx\n", abs1.hi, abs1.lo); + abs1 = _eDurationToAbsolute( 10); + IOLog("_eDurationToAbsolute(10) %08lx:%08lx\n", abs1.hi, abs1.lo); + +} +#endif + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IONDRVLibrariesInitialize( IOService * provider ) +{ + IODTPlatformExpert * platform; + const OSSymbol * sym; + OSData * data; + OSArray * intSpec; + unsigned int len, i; + +#if NDRVLIBTEST + IONDRVLibrariesTest( provider ); +#endif + + // copy nvram property + + if( (platform = OSDynamicCast( IODTPlatformExpert, + IOService::getPlatform()))) { + +// IOService::waitForService( IOService::resourceMatching( "IONVRAM" )); + + if( kIOReturnSuccess == platform->readNVRAMProperty( provider, + &sym, &data )) { + + IONDRVSetNVRAMPropertyName( provider, sym ); + provider->setProperty( sym, data); + data->release(); + sym->release(); + } + } + + // create interrupt properties, if none present + + if( (intSpec = (OSArray *)provider->getProperty( gIOInterruptSpecifiersKey)) + && (0 == provider->getProperty( gIODTAAPLInterruptsKey ))) { + // make AAPL,interrupts property if not present (NW) + for( i = 0, len = 0; i < intSpec->getCount(); i++ ) { + data = (OSData *) intSpec->getObject(i); + assert( data ); + len += data->getLength(); + } + if( len) + data = OSData::withCapacity( len ); + if( data) { + for( i = 0; i < intSpec->getCount(); i++ ) + data->appendBytes( (OSData *) intSpec->getObject(i)); + provider->setProperty( gIODTAAPLInterruptsKey, data ); + data->release(); + } + } + + // make NDRV interrupts + + data = OSData::withCapacity( kIONDRVISTPropertyMemberCount + * sizeof( IONDRVInterruptSetMember)); + + IONDRVInterruptSetMember setMember; + IONDRVInterruptSet * set; + IONDRVInterruptSource * source; + + set = IONDRVInterruptSet::with( provider, 0, + kIONDRVISTPropertyMemberCount ); + + if( set) for( i = 1; i <= kIONDRVISTPropertyMemberCount; i++ ) { + + source = set->sources + i; + source->handler = &tvIONDRVStdInterruptHandler; + source->enabler = &tvIONDRVStdInterruptEnabler; + source->disabler = &tvIONDRVStdInterruptDisabler; + + setMember.setID = (void *) set; + setMember.member = i; + data->appendBytes( &setMember, sizeof( setMember)); + + } else + data = 0; + + if( data) { + provider->setProperty( kIONDRVISTPropertyName, data ); + data->release(); + data = 0; + } + + // map memory + + IOItemCount numMaps = provider->getDeviceMemoryCount(); + IOVirtualAddress virtAddress; + + for( i = 0; i < numMaps; i++) { + IODeviceMemory * mem; + IOMemoryMap * map; + bool consoleDevice; + + consoleDevice = (0 != provider->getProperty("AAPL,boot-display")); + + mem = provider->getDeviceMemoryWithIndex( i ); + if( 0 == mem) + continue; + + // set up a 1-1 mapping for the BAT map of the console device + // remove this soon + if( consoleDevice && (0 == mem->map( kIOMapReference))) + mem->setMapping( kernel_task, mem->getPhysicalAddress() ); + + map = mem->map(); + if( 0 == map) { +// IOLog("%s: map[%ld] failed\n", provider->getName(), i); + continue; + } + + virtAddress = map->getVirtualAddress(); + if( !data) + data = OSData::withCapacity( numMaps * sizeof( IOVirtualAddress)); + if( !data) + continue; + data->appendBytes( &virtAddress, sizeof( IOVirtualAddress)); + kprintf("ndrv base = %lx\n", virtAddress); + } + + // NDRV aperture vectors + if( data) { + provider->setProperty( "AAPL,address", data ); + data->release(); + } + + return( kIOReturnSuccess ); +} + diff --git a/iokit/Families/IONDRVSupport/IOPEFInternals.c b/iokit/Families/IONDRVSupport/IOPEFInternals.c new file mode 100644 index 000000000..13c8cc051 --- /dev/null +++ b/iokit/Families/IONDRVSupport/IOPEFInternals.c @@ -0,0 +1,2108 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 July 98 - start IOKit + */ + + + +/* + File: PEFLoader.c + + Contains: PEF loader implementation. + + Version: Maxwell + + Copyright: © 1994-1996 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Alan Lillich + + Other Contact: <> + + Technology: Core Runtime + + Writers: + + (AWL) Alan Lillich + (ELE) Erik Eidt + + Change History (most recent first): + + <26> 10/4/96 AWL Disable partial unpacking tests. + <25> 9/26/96 AWL Fix assertions to have the right polarity. + <24> 9/18/96 AWL Simplify UnpackPartialSection. + <23> 8/27/96 AWL Support partial unpacking in PEF_UnpackSection. + <22> 8/23/96 AWL (1379028) Propagate changes from CodeFragmentContainerPriv. + <21> 8/16/96 AWL Isolate memory utilities to work with both CFM and ProtoCFM. + <20> 4/18/96 AWL (1342167) Fix problems with relocations for in-place sections. + <19> 4/2/96 AWL (1336962) Fix checks for missing optional parameters. + <18> 3/7/96 AWL Remove unused variable in PEF_UnpackSection. + <17> 2/28/96 AWL Adapt for new container handler model. + <16> 1/19/96 AWL Changes for D11. + <15> 10/10/95 AWL Minor cleanup for CodeWarrior's strict checking. + <14> 6/14/95 AWL Pick up flags from CFMWhere ASAP. + <13> 5/23/95 AWL Introduce temporary hack to workaround build problem for 68K + ModernOS booting code. *** THIS BREAKS REAL 68K BUILDS! *** + <12> 2/8/95 AWL Update debug output calls. + <11> 12/14/94 AWL Changes for Maxwell D4 build. + <10> 12/2/94 AWL Disable reexported import optimization because of problems with + missing weak libraries. It could be put back later with the + addition of a "resolvedImports" bit vector. + <9> 9/9/94 AWL Switch to the "real" API and SPI headers. + <8> 9/2/94 AWL Error codes are now in Errors.h. + <7> 7/28/94 AWL Return cfragSymbolNotFound instead of paramErr from + PLFindExportInfo. (#1177313) + <6> 7/12/94 AWL Fix load-in-place processing in SetRegionAddress. + <5> 6/20/94 AWL Allow the CFL info pointer to be NULL for a "get procs" call to + OpenContainer. + <4> 5/9/94 AWL Change PLGetSpecialSectionInfo to handle some of the wierdness + in nonloaded sections. + <3> 4/28/94 AWL Simplify cross address space use for booting. Fix problem with + load in place, should not require SetRegionAddress. + <2> 2/25/94 AWL Update for Q&D solution to loading across address spaces. + Fix problem in PLGetSpecialSectionInfo switch statement. + <1> 2/15/94 AWL Initial checkin for kernel based CFM. + + ------------------------------------------------------------------------------------ + + <31> 09/15/93 AWL (&ELE) Add CFL prefix to hash functions. + <30> 09/08/93 ELE (&AWL) Fix sneaky little typo that causes load failure. + <29> 08/30/93 AWL Add declaration so that 68K native CFM compiles. + <28> 08/26/93 AWL Move CFTypes.h and CFLoader.h up with other Apple private + headers. + <26> 07/08/93 AWL (&ELE) Fixed version field names in import file IDs. + Remove version < 0 checks as versions are unsigned. + <25> 06/16/93 ELE ELE & AWL Change to New Pool allocation. + <24> 06/09/93 ELE ELE & AWL Fix bug in GetSpecialSection for debugger. + <23> 06/09/93 JRG ELE & AWL Changes: + <22> 06/08/93 ELE (&AWL) Shift to allocation bottleneck. Added support for + packed data sections. Switched to new CFLoader section + attribute bits. + <21> 02/15/93 ELE Changed NewPtr->NewPtrSys + <20> 02/03/93 ELE Added architecture pass thru to GetVersion per CFL Spec. + <19> 12/23/92 ELE Fixed bug where init routine was being returned for the + term routine. + <17> 10/29/92 ELE GetVersion - added dateStamp. + <16> 10/01/92 ELE fix bug in use in place, update of header! + <15> 10/01/92 ELE fix bug in use in place! + <14> 09/28/92 ELE needed to update field expIndex from Find/GetExportInfo. + <13> 09/23/92 ELE updated to new PEF format, updated to new CF Loader SPI. + <12> 09/23/92 ELE Latest version. + +*/ + + +#include "IOPEFInternals.h" + +// =========================================================================================== + +#define PEF_Assert(a) if( !(a)) kprintf("PEF_Assert:") +#define PEF_BlockMove(src,dst,len) memcpy(dst,src,len) +#define PEF_BlockClear(dst,len) memset(dst,0,len) +extern Boolean PCFM_CompareBytes ( const Byte * left, + const Byte * right, + ByteCount count ); +#define PEF_CompareBytes(a,b,c) PCFM_CompareBytes(a,b,c) + +#define EnableCFMDebugging 0 + +// =========================================================================================== + + +enum { + kPEFHandlerProcCount = 18 +}; + +static CFContHandlerProcs PEFHandlerProcs = { + kPEFHandlerProcCount, + kCFContHandlerABIVersion, + + PEF_OpenContainer, // 1 + PEF_CloseContainer, // 2 + PEF_GetContainerInfo, // 3 + + PEF_GetSectionCount, // 4 + PEF_GetSectionInfo, // 5 + PEF_FindSectionInfo, // 6 + PEF_SetSectionAddress, // 7 + + PEF_GetAnonymousSymbolLocations, // 8 + + PEF_GetExportedSymbolCount, // 9 + PEF_GetExportedSymbolInfo, // 10 + PEF_FindExportedSymbolInfo, // 11 + + PEF_GetImportCounts, // 12 + PEF_GetImportedLibraryInfo, // 13 + PEF_GetImportedSymbolInfo, // 14 + PEF_SetImportedSymbolAddress, // 15 + + PEF_UnpackSection, // 16 + PEF_RelocateSection, // 17 + PEF_RelocateImportsOnly, // 18 +}; + + +#if EnableCFMDebugging + static char gDebugMessage [256]; +#endif + +// =========================================================================================== + +const unsigned char opcode [128] = { + krDDAT,krDDAT,krDDAT,krDDAT, krDDAT,krDDAT,krDDAT,krDDAT, + krDDAT,krDDAT,krDDAT,krDDAT, krDDAT,krDDAT,krDDAT,krDDAT, + krDDAT,krDDAT,krDDAT,krDDAT, krDDAT,krDDAT,krDDAT,krDDAT, + krDDAT,krDDAT,krDDAT,krDDAT, krDDAT,krDDAT,krDDAT,krDDAT, + + krCODE,krDATA,krDESC,krDSC2, krVTBL,krSYMR,krXXXX,krXXXX, + krXXXX,krXXXX,krXXXX,krXXXX, krXXXX,krXXXX,krXXXX,krXXXX, + krSYMB,krCDIS,krDTIS,krSECN, krXXXX,krXXXX,krXXXX,krXXXX, + krXXXX,krXXXX,krXXXX,krXXXX, krXXXX,krXXXX,krXXXX,krXXXX, + + krDELT,krDELT,krDELT,krDELT, krDELT,krDELT,krDELT,krDELT, + krRPT ,krRPT ,krRPT ,krRPT , krRPT ,krRPT ,krRPT ,krRPT , + krLABS,krLABS,krLSYM,krLSYM, krXXXX,krXXXX,krXXXX,krXXXX, + krLRPT,krLRPT,krLSEC,krLSEC, krXXXX,krXXXX,krXXXX,krXXXX, + + krXXXX,krXXXX,krXXXX,krXXXX, krXXXX,krXXXX,krXXXX,krXXXX, + krXXXX,krXXXX,krXXXX,krXXXX, krXXXX,krXXXX,krXXXX,krXXXX, + krXXXX,krXXXX,krXXXX,krXXXX, krXXXX,krXXXX,krXXXX,krXXXX, + krXXXX,krXXXX,krXXXX,krXXXX, krXXXX,krXXXX,krXXXX,krXXXX, +}; + +// ¤ +// =========================================================================================== +// GetNameLength () +// ================ + + +static ByteCount GetNameLength ( BytePtr nameStart ) +{ + BytePtr nameEnd = nameStart; + + + if ( nameStart != NULL ) { + while ( *nameEnd != 0 ) nameEnd += 1; + } + + return (nameEnd - nameStart); + + +} // GetNameLength () + + +// ¤ +// =========================================================================================== +// FindRelocationInfo () +// ===================== + + +static LoaderRelExpHeader * FindRelocationInfo ( PEFPrivateInfo * pefPrivate, + ItemCount sectionIndex ) +{ + LoaderRelExpHeader * relocInfo = NULL; + const ItemCount loopLimit = pefPrivate->ldrHeader->numSections; + ItemCount relocIndex; + + + for ( relocIndex = 0; relocIndex < loopLimit; relocIndex += 1 ) { + relocInfo = &pefPrivate->ldrSections[relocIndex]; + if ( sectionIndex == relocInfo->sectionNumber ) return relocInfo; + } + return NULL; + + +} // FindRelocationInfo () + + +// ¤ +// =========================================================================================== +// GetSectionName () +// ================= + + +static void GetSectionName ( PEFPrivateInfo * pefPrivate, + SectionHeader * sectionHeader, + CFContHashedName * sectionName ) +{ + CFContStringHash nameHash = 0; + BytePtr nameText = NULL; + ByteCount nameLength; + + + if ( sectionHeader->sectionName != -1 ) { + nameText = pefPrivate->stringTable + sectionHeader->sectionName; + nameLength = GetNameLength ( nameText ); + nameHash = CFContHashName ( nameText, nameLength ); + } + + sectionName->nameHash = nameHash; + sectionName->nameText = nameText; + + +} // GetSectionName () + + +// ¤ +// =========================================================================================== +// PEF_OpenContainer () +// ==================== + + +OSStatus PEF_OpenContainer ( LogicalAddress mappedAddress, + LogicalAddress runningAddress, + ByteCount containerLength, + KernelProcessID runningProcessID, + const CFContHashedName * cfragName, + CFContOpenOptions options, + CFContAllocateMem Allocate, + CFContReleaseMem Release, + CFContHandlerRef * containerRef, + CFContHandlerProcsPtr * handlerProcs ) +{ + #pragma unused ( containerLength ) + #pragma unused ( runningProcessID ) + #pragma unused ( cfragName ) + + OSStatus err = -1;//cfragCFMInternalErr; + FileHeader * fileHeader = (FileHeader *) mappedAddress; + PEFPrivateInfo * pefPrivate = NULL; + SectionHeader * loaderSection = NULL; + SInt32 sectionIndex; + + + if ( (sizeof ( PEF_SBits32 ) != 4) | (sizeof ( PEF_UBits32 ) != 4) ) goto InternalError; // ! Is "int" 32 bits? + + if ( (Allocate == NULL) || + (Release == NULL) || + (containerRef == NULL) || + (handlerProcs == NULL) ) goto ParameterError; + + *containerRef = NULL; // Clear for errors, only set on OK path. + *handlerProcs = NULL; + + + // --------------------------------------------------------------------------------- + // Allow the container address to be null as a special case to get the loader procs. + // Otherwise validate the header as acceptable PEF. + + if ( mappedAddress == NULL ) goto OK; + + if ( (fileHeader->magic1 != kPEFMagic1) || + (fileHeader->magic2 != kPEFMagic2) || + (fileHeader->fileTypeID != kPEFTypeID) || + (fileHeader->versionNumber != kPEFVersion) ) goto FragmentFormatError; + + + // ----------------------------------------------- + // Allocate and initialize the private info block. + + pefPrivate = (PEFPrivateInfo *) ((*Allocate) ( sizeof ( PEFPrivateInfo ) )); + if ( pefPrivate == NULL ) goto PrivateMemoryError; + + PEF_BlockClear ( pefPrivate, sizeof ( *pefPrivate ) ); + + pefPrivate->Allocate = Allocate; + pefPrivate->Release = Release; + pefPrivate->mappedContainer = (BytePtr) mappedAddress; + pefPrivate->runningContainer = (BytePtr) runningAddress; + pefPrivate->sectionCount = fileHeader->loadableSections; + pefPrivate->sections = (SectionHeader *) (fileHeader + 1); + pefPrivate->stringTable = (BytePtr) (&pefPrivate->sections[fileHeader->numberSections]); + pefPrivate->loadInPlace = ((options & kCFContPrepareInPlaceMask) != 0); + + // ----------------------------------------------------- + // Find the loader section and extract important fields. + + for ( sectionIndex = 0; sectionIndex < fileHeader->numberSections; sectionIndex += 1 ) { + loaderSection = & pefPrivate->sections[sectionIndex]; + if ( loaderSection->regionKind == kPEFLoaderSection ) break; + } + if ( sectionIndex == fileHeader->numberSections ) goto FragmentCorruptError; + + pefPrivate->ldrSectionNo = sectionIndex; + pefPrivate->ldrHeader = (LoaderHeader *) ((BytePtr)mappedAddress + loaderSection->containerOffset); + pefPrivate->ldrStringTable = (BytePtr)pefPrivate->ldrHeader + pefPrivate->ldrHeader->stringsOffset; + + pefPrivate->ldrImportFiles = (LoaderImportFileID *) (pefPrivate->ldrHeader + 1); + pefPrivate->ldrImportSymbols = (LoaderImport *) (pefPrivate->ldrImportFiles + pefPrivate->ldrHeader->numImportFiles); + pefPrivate->ldrSections = (LoaderRelExpHeader *) (pefPrivate->ldrImportSymbols + pefPrivate->ldrHeader->numImportSyms); + pefPrivate->ldrRelocations = (BytePtr)pefPrivate->ldrHeader + pefPrivate->ldrHeader->relocationsOffset; + + pefPrivate->ldrHashSlot = (HashSlotEntry *) ((BytePtr)pefPrivate->ldrHeader + pefPrivate->ldrHeader->hashSlotTable); + pefPrivate->ldrHashChain = (HashChainEntry *) (pefPrivate->ldrHashSlot + (1 << pefPrivate->ldrHeader->hashSlotTabSize)); + pefPrivate->ldrExportSymbols = (LoaderExport *) (pefPrivate->ldrHashChain + pefPrivate->ldrHeader->numExportSyms); + + // ---------------------------------------------------- + // Set up the array to store resolved import addresses. + + if ( pefPrivate->ldrHeader->numImportSyms > 0 ) { + pefPrivate->imports = (BytePtr *) ((*Allocate) ( pefPrivate->ldrHeader->numImportSyms * sizeof ( BytePtr ) )); + if ( pefPrivate->imports == NULL ) goto PrivateMemoryError; + } + + // ----------------------------------------------------------------- + // Set up the pointers to the arrays of section origins and offsets. + + if (pefPrivate->sectionCount <= kBuiltinSectionArraySize) { + pefPrivate->mappedOrigins = & pefPrivate->originArray[0]; + pefPrivate->runningOffsets = & pefPrivate->offsetArray[0]; + } else { + pefPrivate->mappedOrigins = (BytePtr *) ((*Allocate) ( pefPrivate->sectionCount * sizeof ( BytePtr ) )); + if ( pefPrivate->mappedOrigins == NULL ) goto PrivateMemoryError; + pefPrivate->runningOffsets = (ByteCount *) ((*Allocate) ( pefPrivate->sectionCount * sizeof ( ByteCount ) )); + if ( pefPrivate->runningOffsets == NULL ) goto PrivateMemoryError; + } + + // --------------------------------------------------------------------------------------- + // Fill in the origin and offset arrays. The origin array gives the base address of the + // section instance as visible in the loader's address space. I.e. it tells the loader + // where it can access the loaded section contents. The offset array tells what to add + // for relocations refering to that section. So it must be based on running addresses and + // must "remove" the presumed running address. If the section will be used in place we + // must compute the final values here. Otherwise SetRegionAddress will be called later to + // provide the mapped and running addresses. Validate load in place restrictions too. + + // ??? We really ought to consider getting rid of the preset for in-place usage and make + // ??? that case as close as possible to the normal case. + + // ! Note that although the ByteCount type used in the offset arrays is unsigned, ignoring + // ! overflow lets things work right for a full -4GB to +4GB offset range. + + for ( sectionIndex = 0; sectionIndex < pefPrivate->sectionCount; sectionIndex += 1 ) { + + SectionHeader * section = & pefPrivate->sections[sectionIndex]; + + pefPrivate->mappedOrigins[sectionIndex] = (BytePtr) -1; // ! Just a diagnostic tag. + pefPrivate->runningOffsets[sectionIndex] = - ((ByteCount) section->sectionAddress); // Subtract the presumed address. + + if ( pefPrivate->loadInPlace ) { + if ( (section->regionKind == kPEFPIDataSection) || (section->execSize != section->rawSize) ) goto FragmentUsageError; + section->sectionAddress = pefPrivate->runningContainer + section->containerOffset; + pefPrivate->mappedOrigins[sectionIndex] = pefPrivate->mappedContainer + section->containerOffset; + pefPrivate->runningOffsets[sectionIndex] += (ByteCount) section->sectionAddress; // Add in the new address. + } + + } + + if ( options & kCFContPrepareInPlaceMask ) fileHeader->memoryAddress = runningAddress; + + +OK: + err = noErr; + *handlerProcs = &PEFHandlerProcs; + *containerRef = (CFContHandlerRef) pefPrivate; + +EXIT: + return err; + +ERROR: + (void) PEF_CloseContainer ( (CFContHandlerRef) pefPrivate, kNilOptions ); + goto EXIT; + +InternalError: + err = cfragCFMInternalErr; + goto ERROR; + +ParameterError: + err = paramErr; + goto ERROR; + +FragmentFormatError: + err = cfragFragmentFormatErr; + goto ERROR; + +PrivateMemoryError: + err = cfragNoPrivateMemErr; + goto ERROR; + +FragmentCorruptError: + err = cfragFragmentCorruptErr; + goto ERROR; + +FragmentUsageError: + err = cfragFragmentUsageErr; + goto ERROR; + + +} // PEF_OpenContainer () + + +// ¤ +// =========================================================================================== +// PEF_CloseContainer () +// ===================== + + +OSStatus PEF_CloseContainer ( CFContHandlerRef containerRef, + CFContCloseOptions options ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + CFContReleaseMem Release = NULL; + + + if ( pefPrivate == NULL ) goto OK; // Simplifies error cleanup from PEF_OpenContainer. + + + Release = pefPrivate->Release; + + if ( pefPrivate->sectionCount > kBuiltinSectionArraySize ) { + if ( pefPrivate->mappedOrigins != NULL ) { + (*Release) ( pefPrivate->mappedOrigins ); + pefPrivate->mappedOrigins = NULL; + } + if ( pefPrivate->runningOffsets != NULL ) { + (*Release) ( pefPrivate->runningOffsets ); + pefPrivate->runningOffsets = NULL; + } + } + + if ( pefPrivate->imports != NULL ) { + (*Release) ( pefPrivate->imports ); + pefPrivate->imports = NULL; + } + + pefPrivate->resolved = 0; // ! Disables reexported import optimization. + + if ( ! (options & kCFContPartialCloseMask) ) (*Release) ( pefPrivate ); + + +OK: + err = noErr; + return err; + +} // PEF_CloseContainer () + + +// ¤ +// =========================================================================================== +// PEF_GetContainerInfo () +// ======================= + + +OSStatus PEF_GetContainerInfo ( CFContHandlerRef containerRef, + PBVersion infoVersion, + CFContContainerInfo * containerInfo ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + FileHeader * fileHeader = NULL; + + + if ( (pefPrivate == NULL) || (containerInfo == NULL) ) goto ParameterError; + if ( infoVersion != kCFContContainerInfoVersion ) goto ParameterError; + + + fileHeader = (FileHeader *) pefPrivate->mappedContainer; + + containerInfo->cfragName.nameHash = 0; // PEF does not have an embedded name. + containerInfo->cfragName.nameText = NULL; + + containerInfo->modDate = fileHeader->dateTimeStamp; + containerInfo->architecture = fileHeader->architectureID; + containerInfo->currentVersion = fileHeader->currentVersion; + containerInfo->oldImpVersion = fileHeader->oldImpVersion; + containerInfo->oldDefVersion = fileHeader->oldDefVersion; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetContainerInfo () + + +// ¤ +// =========================================================================================== +// PEF_GetSectionCount () +// ====================== + + +OSStatus PEF_GetSectionCount ( CFContHandlerRef containerRef, + ItemCount * sectionCount ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + + + if ( (pefPrivate == NULL) || (sectionCount == NULL) ) goto ParameterError; + + *sectionCount = pefPrivate->sectionCount; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetSectionCount () + + +// ¤ +// =========================================================================================== +// PEF_GetSectionInfo () +// ===================== + + +OSStatus PEF_GetSectionInfo ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + PBVersion infoVersion, + CFContSectionInfo * sectionInfo ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + SectionHeader * sectionHeader = NULL; + + + if ( (pefPrivate == NULL) || (sectionInfo == NULL) ) goto ParameterError; + if ( infoVersion != kCFContSectionInfoVersion ) goto ParameterError; + if ( sectionIndex >= pefPrivate->sectionCount ) goto ParameterError; + + + sectionHeader = &pefPrivate->sections[sectionIndex]; + + GetSectionName ( pefPrivate, sectionHeader, §ionInfo->sectionName ); + + sectionInfo->sharing = sectionHeader->shareKind; + sectionInfo->alignment = sectionHeader->alignment; + sectionInfo->reservedA = 0; + sectionInfo->containerOffset = sectionHeader->containerOffset; + sectionInfo->containerLength = sectionHeader->rawSize; + sectionInfo->unpackedLength = sectionHeader->initSize; + sectionInfo->totalLength = sectionHeader->execSize; + sectionInfo->defaultAddress = sectionHeader->sectionAddress; + + sectionInfo->options = kNilOptions; + if ( FindRelocationInfo ( pefPrivate, sectionIndex ) != NULL ) sectionInfo->options |= kRelocatedCFContSectionMask; + + switch ( pefPrivate->sections[sectionIndex].regionKind ) { + case kPEFCodeSection : + sectionInfo->access = kCFContNormalCode; + break; + case kPEFDataSection : + sectionInfo->access = kCFContWriteableData; + break; + case kPEFPIDataSection : + sectionInfo->access = kCFContWriteableData; + sectionInfo->options |= kPackedCFContSectionMask; + break; + case kPEFConstantSection : + sectionInfo->access = kCFContReadOnlyData; + break; + case kPEFExecDataSection : + sectionInfo->access = kCFContWriteableData | kCFContMemExecuteMask; + break; + default : + sectionInfo->access = kCFContReadOnlyData; // ! Not necessarily right, but safe. + break; + } + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetSectionInfo () + + +// ¤ +// =========================================================================================== +// PEF_FindSectionInfo () +// ====================== + + +OSStatus PEF_FindSectionInfo ( CFContHandlerRef containerRef, + const CFContHashedName * sectionName, + PBVersion infoVersion, + ItemCount * sectionIndex, // May be null. + CFContSectionInfo * sectionInfo ) // May be null. +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + SectionHeader * sectionHeader = NULL; + CFContHashedName hashedName; + + ItemCount tempIndex; + CFContSectionInfo tempInfo; + + + if ( pefPrivate == NULL ) goto ParameterError; + if ( (sectionInfo != NULL) && (infoVersion != kCFContSectionInfoVersion) ) goto ParameterError; + + if ( sectionIndex == NULL ) sectionIndex = &tempIndex; + if ( sectionInfo == NULL ) sectionInfo = &tempInfo; + + + for ( tempIndex = 0; tempIndex < pefPrivate->sectionCount; tempIndex += 1 ) { + sectionHeader = &pefPrivate->sections[tempIndex]; + GetSectionName ( pefPrivate, sectionHeader, &hashedName ); + if ( (hashedName.nameHash == sectionName->nameHash) && + (PEF_CompareBytes ( hashedName.nameText, sectionName->nameText, CFContStringHashLength ( hashedName.nameHash ) )) ) break; + } + if ( tempIndex == pefPrivate->sectionCount ) goto NoSectionError; + *sectionIndex = tempIndex; + + err = PEF_GetSectionInfo ( containerRef, tempIndex, infoVersion, sectionInfo ); + if ( err != noErr ) goto ERROR; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + +NoSectionError: + err = cfragNoSectionErr; + goto ERROR; + + +} // PEF_FindSectionInfo () + + +// ¤ +// =========================================================================================== +// PEF_SetSectionAddress () +// ======================== + + +OSStatus PEF_SetSectionAddress ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + LogicalAddress mappedAddress, + LogicalAddress runningAddress ) +{ + OSErr err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + SectionHeader * section = NULL; + + + if ( (pefPrivate == NULL) || (sectionIndex >= pefPrivate->sectionCount) ) goto ParameterError; + + + // -------------------------------------------------------------------------------------- + // For a load in place usage we've already set the addresses, make sure these match. + // Otherwise set both addresses. Note that the "presumed" address is already subtracted. + + section = & pefPrivate->sections[sectionIndex]; + + if ( ! pefPrivate->loadInPlace ) { + + pefPrivate->mappedOrigins[sectionIndex] = (BytePtr) mappedAddress; + pefPrivate->runningOffsets[sectionIndex] += (ByteCount) runningAddress; + + } else { + + if ( (runningAddress != section->sectionAddress) || + (mappedAddress != pefPrivate->mappedOrigins[sectionIndex]) ) goto UsageError; + + } + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + +UsageError: + err = cfragFragmentUsageErr; + goto ERROR; + + +} // PEF_SetSectionAddress () + + +// ¤ +// =========================================================================================== +// PEF_GetAnonymousSymbolLocations () +// ================================== + + +extern OSStatus PEF_GetAnonymousSymbolLocations ( CFContHandlerRef containerRef, + CFContLogicalLocation * mainLocation, // May be null. + CFContLogicalLocation * initLocation, // May be null. + CFContLogicalLocation * termLocation ) // May be null. +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + LoaderHeader * ldrHeader = NULL; + + CFContLogicalLocation tempLocation; + + + if ( (pefPrivate == NULL) ) goto ParameterError; + + if ( mainLocation == NULL ) mainLocation = &tempLocation; + if ( initLocation == NULL ) initLocation = &tempLocation; + if ( termLocation == NULL ) termLocation = &tempLocation; + + + ldrHeader = pefPrivate->ldrHeader; + + mainLocation->section = ldrHeader->entryPointSection; + mainLocation->offset = ldrHeader->entryPointOffset; + + initLocation->section = ldrHeader->initPointSection; + initLocation->offset = ldrHeader->initPointOffset; + + termLocation->section = ldrHeader->termPointSection; + termLocation->offset = ldrHeader->termPointOffset; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetAnonymousSymbolLocations () + + +// ¤ +// =========================================================================================== +// PEF_GetExportedSymbolCount () +// ============================= + + +extern OSStatus PEF_GetExportedSymbolCount ( CFContHandlerRef containerRef, + ItemCount * exportCount ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + + + if ( (pefPrivate == NULL) || (exportCount == NULL) ) goto ParameterError; + + *exportCount = pefPrivate->ldrHeader->numExportSyms; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetExportedSymbolCount () + + +// ¤ +// =========================================================================================== +// PEF_GetExportedSymbolInfo () +// ============================ + + +OSStatus PEF_GetExportedSymbolInfo ( CFContHandlerRef containerRef, + CFContSignedIndex exportIndex, + PBVersion infoVersion, + CFContExportedSymbolInfo * exportInfo ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + LoaderExport * exportedSymbol = NULL; + + + if ( (pefPrivate == NULL) || (exportInfo == NULL) ) goto ParameterError; + if ( exportIndex >= pefPrivate->ldrHeader->numExportSyms ) goto ParameterError; + if ( infoVersion != kCFContExportedSymbolInfoVersion ) goto ParameterError; + + + if ( exportIndex >= 0 ) { + + exportedSymbol = &pefPrivate->ldrExportSymbols[exportIndex]; + + exportInfo->symbolName.nameHash = pefPrivate->ldrHashChain[exportIndex].hashword; + exportInfo->symbolName.nameText = &pefPrivate->ldrStringTable[exportedSymbol->nameOffset]; + + exportInfo->symbolClass = exportedSymbol->symClass; + exportInfo->reservedA = 0; + exportInfo->reservedB = 0; + exportInfo->options = kNilOptions; + + exportInfo->location.section = exportedSymbol->sectionNumber; + + #if 1 // *** Disable the reexported import optimization. + exportInfo->location.offset = exportedSymbol->offset; + #else + // This is the buggy optimization. It has problems with missing weak libraries. + // Addition of a "resolvedImports" bit vector is probably the way to fix it, but it + // may not be much of an optimization then. + if ( (! pefPrivate->resolved) || (exportedSymbol->sectionNumber != kReExportImport) ) { + exportInfo->location.offset = exportedSymbol->address; + } else { + exportInfo->location.section = kPhysicalExport; + exportInfo->location.offset = pefPrivate->imports[exportedSymbol->address]; + } + #endif + + } else { + + CFContLogicalLocation mainLocation; + CFContLogicalLocation initLocation; + CFContLogicalLocation termLocation; + + err = PEF_GetAnonymousSymbolLocations ( containerRef, &mainLocation, &initLocation, &termLocation ); + if ( err != noErr ) goto ERROR; + + switch ( exportIndex ) { + case kMainCFragSymbolIndex : + exportInfo->location = mainLocation; + exportInfo->symbolClass = 0xFF; // !!! Ought to have a kUnknownCFragSymbol constant. + break; + case kInitCFragSymbolIndex : + exportInfo->location = initLocation; + exportInfo->symbolClass = kTVectorCFragSymbol; // ! Very well better be! + break; + case kTermCFragSymbolIndex : + exportInfo->location = termLocation; + exportInfo->symbolClass = kTVectorCFragSymbol; // ! Very well better be! + break; + default : + goto ParameterError; + } + + exportInfo->symbolName.nameHash = 0; + exportInfo->symbolName.nameText = NULL; + + exportInfo->reservedA = 0; + exportInfo->reservedB = 0; + exportInfo->options = kNilOptions; + + } + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetExportedSymbolInfo () + + +// ¤ +// =========================================================================================== +// PEF_FindExportedSymbolInfo () +// ============================= + + +OSStatus PEF_FindExportedSymbolInfo ( CFContHandlerRef containerRef, + const CFContHashedName * exportName, + PBVersion infoVersion, + ItemCount * exportIndex_o, // May be null. + CFContExportedSymbolInfo * exportInfo ) // May be null. +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + LoaderExport * exportedSymbol = NULL; + CFContStringHash * hashwordList = NULL; + CFContStringHash * nextHashword = NULL; + HashSlotEntry * hashSlot = NULL; + ByteCount nameLength = CFContStringHashLength ( exportName->nameHash ); + ItemCount exportIndex; + ItemCount slotIndex; + ItemCount chainLimit; + Boolean nameMatch; + + + if ( pefPrivate == NULL ) goto ParameterError; + if ( infoVersion != kCFContExportedSymbolInfoVersion ) goto ParameterError; + + + hashwordList = &pefPrivate->ldrHashChain[0].hashword; + + slotIndex = GetPEFHashSlot ( exportName->nameHash, pefPrivate->ldrHeader->hashSlotTabSize ); + hashSlot = &pefPrivate->ldrHashSlot[slotIndex]; + + exportIndex = hashSlot->chainIndex; + chainLimit = exportIndex + hashSlot->chainCount; + nextHashword = &hashwordList[exportIndex]; + + while ( exportIndex < chainLimit ) { + + if ( *nextHashword == exportName->nameHash ) { + exportedSymbol = &pefPrivate->ldrExportSymbols[exportIndex]; + nameMatch = PEF_CompareBytes ( exportName->nameText, + &pefPrivate->ldrStringTable[exportedSymbol->nameOffset], + nameLength ); + if ( nameMatch ) goto Found; + } + + exportIndex += 1; + nextHashword += 1; // ! Pointer arithmetic. + } + goto NotFoundError; + +Found: + if ( exportIndex_o != NULL ) *exportIndex_o = exportIndex; + if ( exportInfo != NULL ) { + err = PEF_GetExportedSymbolInfo ( containerRef, exportIndex, infoVersion, exportInfo ); + if ( err != noErr ) goto ERROR; + } + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + +NotFoundError: + err = cfragNoSymbolErr; + goto ERROR; + + +} // PEF_FindExportedSymbolInfo () + + +// ¤ +// =========================================================================================== +// PEF_GetImportCounts () +// ====================== + + +OSStatus PEF_GetImportCounts ( CFContHandlerRef containerRef, + ItemCount * libraryCount, // May be null. + ItemCount * symbolCount ) // May be null. +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + + + if ( pefPrivate == NULL ) goto ParameterError; + + if ( libraryCount != NULL ) *libraryCount = pefPrivate->ldrHeader->numImportFiles; + if ( symbolCount != NULL ) *symbolCount = pefPrivate->ldrHeader->numImportSyms; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetImportCounts () + + +// ¤ +// =========================================================================================== +// PEF_GetImportedLibraryInfo () +// ============================= + + +OSStatus PEF_GetImportedLibraryInfo ( CFContHandlerRef containerRef, + ItemCount libraryIndex, + PBVersion infoVersion, + CFContImportedLibraryInfo * libraryInfo ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + LoaderImportFileID * importedLibrary = NULL; + BytePtr nameText = NULL; + ByteCount nameLength; + + + if ( (pefPrivate == NULL) || (libraryInfo == NULL) ) goto ParameterError; + if ( infoVersion != kCFContImportedLibraryInfoVersion ) goto ParameterError; + if ( libraryIndex >= pefPrivate->ldrHeader->numImportFiles ) goto ParameterError; + + + importedLibrary = &pefPrivate->ldrImportFiles[libraryIndex]; + + nameText = &pefPrivate->ldrStringTable[importedLibrary->fileNameOffset]; + nameLength = GetNameLength ( nameText ); + + libraryInfo->libraryName.nameHash = CFContHashName ( nameText, nameLength ); + libraryInfo->libraryName.nameText = nameText; + + libraryInfo->linkedVersion = importedLibrary->linkedVersion; + libraryInfo->oldImpVersion = importedLibrary->oldImpVersion; + libraryInfo->options = kNilOptions; + + if ( importedLibrary->options & kPEFInitBeforeMask ) libraryInfo->options |= kCFContInitBeforeMask; + if ( importedLibrary->options & kPEFWeakLibraryMask ) libraryInfo->options |= kCFContWeakLibraryMask; + if ( importedLibrary->options & kPEFDeferredBindMask ) libraryInfo->options |= kCFContDeferredBindMask; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_GetImportedLibraryInfo () + + +// ¤ +// =========================================================================================== +// PEF_GetImportedSymbolInfo () +// ============================ + + +OSStatus PEF_GetImportedSymbolInfo ( CFContHandlerRef containerRef, + ItemCount symbolIndex, + PBVersion infoVersion, + CFContImportedSymbolInfo * symbolInfo ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + LoaderImport * importedSymbol = NULL; + LoaderImportFileID * importedLibrary = NULL; + BytePtr nameText = NULL; + ByteCount nameLength; + ItemCount libraryCount; + ItemCount libraryIndex; + + + if ( (pefPrivate == NULL) || (symbolInfo == NULL) ) goto ParameterError; + if ( infoVersion != kCFContImportedSymbolInfoVersion ) goto ParameterError; + if ( symbolIndex >= pefPrivate->ldrHeader->numImportSyms ) goto ParameterError; + + + importedSymbol = &pefPrivate->ldrImportSymbols[symbolIndex]; + libraryCount = pefPrivate->ldrHeader->numImportFiles; + + nameText = &pefPrivate->ldrStringTable[importedSymbol->nameOffset]; + nameLength = GetNameLength ( nameText ); + + symbolInfo->symbolName.nameHash = CFContHashName ( nameText, nameLength ); + symbolInfo->symbolName.nameText = nameText; + + symbolInfo->symbolClass = importedSymbol->symClass & 0x0F; + symbolInfo->reservedA = 0; + symbolInfo->reservedB = 0; + symbolInfo->options = 0; + + if ( importedSymbol->symClass & kPEFWeakSymbolMask ) symbolInfo->options |= kCFContWeakSymbolMask; + + for ( libraryIndex = 0; libraryIndex < libraryCount; libraryIndex += 1 ) { + importedLibrary = &pefPrivate->ldrImportFiles[libraryIndex]; + if ( (importedLibrary->impFirst <= symbolIndex) && + (symbolIndex < (importedLibrary->impFirst + importedLibrary->numImports)) ) { + break; + } + } + if ( libraryIndex == libraryCount ) goto FragmentCorruptError; + + symbolInfo->libraryIndex = libraryIndex; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + +FragmentCorruptError: + err = cfragFragmentCorruptErr; + goto ERROR; + + +} // PEF_GetImportedSymbolInfo () + + +// ¤ +// =========================================================================================== +// PEF_SetImportedSymbolAddress () +// =============================== + + +OSStatus PEF_SetImportedSymbolAddress ( CFContHandlerRef containerRef, + ItemCount symbolIndex, + LogicalAddress symbolAddress ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + + + if ( pefPrivate == NULL ) goto ParameterError; + if ( symbolIndex >= pefPrivate->ldrHeader->numImportSyms ) goto ParameterError; + + + pefPrivate->imports[symbolIndex] = symbolAddress; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_SetImportedSymbolAddress () + + +// ¤ +// =========================================================================================== +// GetPackedDataCount () +// ===================== + + +static UInt32 GetPackedDataCount ( UInt8 * * byteHandle ) +{ + UInt32 count = 0; + UInt8 * bytePtr = *byteHandle; + UInt8 currByte; + + + do { + currByte = *bytePtr++; + count = (count << kPEFPkDataVCountShift) | (currByte & kPEFPkDataVCountMask); + } while ( (currByte & kPEFPkDataVCountEndMask) != 0 ); + + *byteHandle = bytePtr; + + return count; + + +} // GetPackedDataCount () + + +// ¤ +// =========================================================================================== +// UnpackFullSection () +// ==================== + + +// ------------------------------------------------------------------------------------------ +// This is the "normal" case from CFM, unpacking all of the packed portion. Along the way we +// make sure we're not writing beyond the end of the unpacked data. At the end we make sure +// that all we didn't read past the end of the packed data, and that all of the output was +// written. + +// ! Note that the xyzEnd pointers are the actual end of the range, not one byte beyond. This +// ! routine will work if the output end address is 0xFFFFFFFF, but not if the packed end is. + +// ! Don't do range comparisons as "(lowAddr + length) > highAddr", because this might wrap +// ! the end high end of the address space. Always do "(highAddr - lowAddr) > length". + +// ??? We should gather some statistics on actual usage to see whether it is worthwhile to +// ??? have local customized code for common cases. E.g. block fill of 1, 2, or 4 bytes, or +// ??? of interleaved repeats with 1/2/4 byte common or custom portions. + + +static OSStatus UnpackFullSection ( BytePtr packedBase, + BytePtr packedEnd, + BytePtr outputBase, + BytePtr outputEnd ) +{ + OSStatus err = cfragCFMInternalErr; + BytePtr packedPos = packedBase; + BytePtr outputPos = outputBase; + BytePtr outPosLimit = outputEnd + 1; // ! Might be zero if outputEnd is 0xFFFFFFFF. + + UInt8 currByte; + UInt8 opcode; + UInt32 count1; + UInt32 count2; + UInt32 count3; + + + if ( (packedEnd + 1) == 0 ) goto FragmentUsageError; + + + while ( packedPos <= packedEnd ) { + + + currByte = *packedPos++; + opcode = currByte >> kPEFPkDataOpcodeShift; + count1 = currByte & kPEFPkDataCount5Mask; + + if ( count1 == 0 ) count1 = GetPackedDataCount ( &packedPos ); + + + switch ( opcode ) { + + + case kPEFPkDataZero : + + if ( (outPosLimit - outputPos) < count1 ) goto FragmentCorruptError; + + PEF_BlockClear ( outputPos, count1 ); + outputPos += count1; + + break; + + + case kPEFPkDataBlock : + + if ( (outPosLimit - outputPos) < count1 ) goto FragmentCorruptError; + + PEF_BlockMove ( packedPos, outputPos, count1 ); + packedPos += count1; + outputPos += count1; + + break; + + + case kPEFPkDataRepeat : // ??? Need a BlockFill routine? + + count2 = GetPackedDataCount ( &packedPos ) + 1; // ! Stored count is 1 less. + + if ( (outPosLimit - outputPos) < (count1 * count2) ) goto FragmentCorruptError; + + if ( count1 == 1 ) { // ??? Is this worth the bother? Other sizes? + + currByte = *packedPos++; + for ( ; count2 != 0; count2 -= 1 ) *outputPos++ = currByte; + + } else { + + for ( ; count2 != 0; count2 -= 1 ) { + PEF_BlockMove ( packedPos, outputPos, count1 ); + outputPos += count1; + } + packedPos += count1; + + } + + break; + + + case kPEFPkDataRepeatBlock : + + count2 = GetPackedDataCount ( &packedPos ); + count3 = GetPackedDataCount ( &packedPos ); + + if ( (outPosLimit - outputPos) < (((count1 + count2) * count3) + count1) ) goto FragmentCorruptError; + + { + BytePtr commonPos = packedPos; + + packedPos += count1; // Skip the common part. + + for ( ; count3 != 0; count3 -= 1 ) { + + PEF_BlockMove ( commonPos, outputPos, count1 ); + outputPos += count1; + + PEF_BlockMove ( packedPos, outputPos, count2 ); + packedPos += count2; + outputPos += count2; + + } + + PEF_BlockMove ( commonPos, outputPos, count1 ); + outputPos += count1; + + } + + break; + + + case kPEFPkDataRepeatZero : + + count2 = GetPackedDataCount ( &packedPos ); + count3 = GetPackedDataCount ( &packedPos ); + + if ( (outPosLimit - outputPos) < (((count1 + count2) * count3) + count1) ) goto FragmentCorruptError; + + PEF_BlockClear ( outputPos, count1 ); + outputPos += count1; + + for ( ; count3 != 0; count3 -= 1 ) { + + PEF_BlockMove ( packedPos, outputPos, count2 ); + packedPos += count2; + outputPos += count2; + + PEF_BlockClear ( outputPos, count1 ); + outputPos += count1; + + } + + break; + + + default : + goto FragmentCorruptError; + + } + + } + + + if ( (packedPos != (packedEnd + 1)) || (outputPos != outPosLimit) ) goto FragmentCorruptError; + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + + +FragmentUsageError: + err = cfragFragmentUsageErr; + goto ERROR; + +FragmentCorruptError: + err = cfragFragmentCorruptErr; + goto ERROR; + + +} // UnpackFullSection () + + +// ¤ +// =========================================================================================== +// UnpackPartialSection () +// ======================= + + +// ------------------------------------------------------------------------------------------- +// This is the case where we want to extract some arbitrary portion of a section as it would +// be when instantiated but not relocated. We have to interpret the packed part up to the +// desired output start, then continue begin unpacking for real. If we run out of packed data +// before filling the output, we fill the rest of the output with zeroes. + +// ! We have to be very careful in the skip logic because the current operation probably spans +// ! the skip/output boundary. We have to be similarly careful at the output end because the +// ! current operation probably spans the tail of the output. Don't forget that the partial +// ! output at the start could also fill the output and overflow the tail! + +// ! Note that the xyzEnd pointers are the actual end of the range, not one byte beyond. This +// ! routine might not work if outputEnd is 0xFFFFFFFF. This is because outputPos points to +// ! the next byte to be written. The loops that are controlled by "outputPos < outputBase" +// ! or "outputPos <= outputEnd" would fail in this case if outputPos were "outputEnd + 1", +// ! i.e. outputPos would be zero. + +// ! Don't do range comparisons as "(lowAddr + length) > highAddr", because this might wrap +// ! the end high end of the address space. Always do "(highAddr - lowAddr) > length". + + +// ------------------------------------------------------------------------------------------- + + +static void PartialBlockClear ( BytePtr outputBase, + ByteCount outputStartOffset, + ByteCount outputEndOffset, + ByteCount outputOffset, + ByteCount count ) +{ + + if ( ((outputOffset + count) <= outputStartOffset) || (outputOffset > outputEndOffset) ) return; // Nothing to output. + + if ( outputOffset < outputStartOffset ) { + count -= (outputStartOffset - outputOffset); + outputOffset = outputStartOffset; + } + + if ( count > (outputEndOffset - outputOffset + 1) ) count = outputEndOffset - outputOffset + 1; + + PEF_BlockClear ( outputBase + (outputOffset - outputStartOffset), count ); + +} // PartialBlockClear (); + + +// ------------------------------------------------------------------------------------------- + + +static void PartialBlockMove ( BytePtr source, + BytePtr outputBase, + ByteCount outputStartOffset, + ByteCount outputEndOffset, + ByteCount outputOffset, + ByteCount count ) +{ + + if ( ((outputOffset + count) <= outputStartOffset) || (outputOffset > outputEndOffset) ) return; // Nothing to output. + + if ( outputOffset < outputStartOffset ) { + const ByteCount skipCount = outputStartOffset - outputOffset; + source += skipCount; + count -= skipCount; + outputOffset = outputStartOffset; + } + + if ( count > (outputEndOffset - outputOffset + 1) ) count = outputEndOffset - outputOffset + 1; + + PEF_BlockMove ( source, outputBase + (outputOffset - outputStartOffset), count ); + +} // PartialBlockClear (); + + +// ------------------------------------------------------------------------------------------- + + +static OSStatus UnpackPartialSection ( BytePtr packedBase, + BytePtr packedEnd, + BytePtr outputBase, + BytePtr outputEnd, + ByteCount outputStartOffset ) +{ + OSStatus err = cfragCFMInternalErr; + const ByteCount outputEndOffset = outputStartOffset + (outputEnd - outputBase); + BytePtr packedPos = NULL; + BytePtr packedBoundary = NULL; + ByteCount outputOffset; + ByteCount outputBoundary; + + UInt8 currByte; + UInt8 opcode; + UInt32 count1; + UInt32 count2; + UInt32 count3; + + + if ( ((packedEnd + 1) == 0) || ((outputEnd + 1) == 0) ) goto FragmentUsageError; + + + // -------------------------------------------------------------------------------------- + // Skip the packed data until we get within the output range. We know there is something + // to unpack, otherwise the zero fill of the output would be done by the caller. This + // loop sets outputOffset to the end of what would be unpacked, until the outputOffset is + // beyond the outputStartOffset. I.e. until we hit the first operation that would create + // actual output. + + outputOffset = 0; + packedPos = packedBase; + + do { + + packedBoundary = packedPos; // The start of the current operation. + outputBoundary = outputOffset; + + currByte = *packedPos++; + opcode = currByte >> kPEFPkDataOpcodeShift; + count1 = currByte & kPEFPkDataCount5Mask; + + if ( count1 == 0 ) count1 = GetPackedDataCount ( &packedPos ); + + switch ( opcode ) { + + case kPEFPkDataZero : + outputOffset += count1; + break; + + case kPEFPkDataBlock : + packedPos += count1; + outputOffset += count1; + break; + + case kPEFPkDataRepeat : + count2 = GetPackedDataCount ( &packedPos ) + 1; // ! Stored count is 1 less. + packedPos += count1; + outputOffset += count1 * count2; + break; + + + case kPEFPkDataRepeatBlock : + count2 = GetPackedDataCount ( &packedPos ); + count3 = GetPackedDataCount ( &packedPos ); + packedPos += count1 + (count2 * count3); + outputOffset += count1 + ((count1 + count2) * count3); + break; + + + case kPEFPkDataRepeatZero : + count2 = GetPackedDataCount ( &packedPos ); + count3 = GetPackedDataCount ( &packedPos ); + packedPos += count2 * count3; + outputOffset += count1 + ((count1 + count2) * count3); + break; + + + default : + goto FragmentCorruptError; + + } + + } while ( outputOffset <= outputStartOffset ); + + + //---------------------------------------------------------------------------------------- + // Now do the actual unpacking. This uses a copy of the full unpack logic with special + // block copy/clear routines. These special routines do the bounds checking, only writing + // output where actually allowed. This involves "unnecessary" checks for the "middle" + // operations that are fully within the range, but vastly simplifies the boundary cases. + + packedPos = packedBoundary; // Reset to the operation that spans the output start. + outputOffset = outputBoundary; + + do { + + currByte = *packedPos++; + opcode = currByte >> kPEFPkDataOpcodeShift; + count1 = currByte & kPEFPkDataCount5Mask; + + if ( count1 == 0 ) count1 = GetPackedDataCount ( &packedPos ); + + switch ( opcode ) { + + case kPEFPkDataZero : + PartialBlockClear ( outputBase, outputStartOffset, outputEndOffset, outputOffset, count1 ); + outputOffset += count1; + break; + + case kPEFPkDataBlock : + PartialBlockMove ( packedPos, outputBase, outputStartOffset, outputEndOffset, outputOffset, count1 ); + packedPos += count1; + outputOffset += count1; + break; + + case kPEFPkDataRepeat : // ??? Need a BlockFill routine? + count2 = GetPackedDataCount ( &packedPos ) + 1; // ! Stored count is 1 less. + for ( ; count2 != 0; count2 -= 1 ) { + PartialBlockMove ( packedPos, outputBase, outputStartOffset, outputEndOffset, outputOffset, count1 ); + outputOffset += count1; + } + packedPos += count1; + break; + + case kPEFPkDataRepeatBlock : + + count2 = GetPackedDataCount ( &packedPos ); + count3 = GetPackedDataCount ( &packedPos ); + + { + BytePtr commonPos = packedPos; + + packedPos += count1; // Skip the common part. + + for ( ; count3 != 0; count3 -= 1 ) { + + PartialBlockMove ( commonPos, outputBase, outputStartOffset, outputEndOffset, outputOffset, count1 ); + outputOffset += count1; + + PartialBlockMove ( packedPos, outputBase, outputStartOffset, outputEndOffset, outputOffset, count2 ); + packedPos += count2; + outputOffset += count2; + + } + + PartialBlockMove ( commonPos, outputBase, outputStartOffset, outputEndOffset, outputOffset, count1 ); + outputOffset += count1; + + } + + break; + + case kPEFPkDataRepeatZero : + + count2 = GetPackedDataCount ( &packedPos ); + count3 = GetPackedDataCount ( &packedPos ); + + PartialBlockClear ( outputBase, outputStartOffset, outputEndOffset, outputOffset, count1 ); + outputOffset += count1; + + for ( ; count3 != 0; count3 -= 1 ) { + + PartialBlockMove ( packedPos, outputBase, outputStartOffset, outputEndOffset, outputOffset, count2 ); + packedPos += count2; + outputOffset += count2; + + PartialBlockClear ( outputBase, outputStartOffset, outputEndOffset, outputOffset, count1 ); + outputOffset += count1; + + } + + break; + + default : + goto FragmentCorruptError; + + } + + } while ( (outputOffset <= outputEndOffset) && (packedPos <= packedEnd) ); + + + // ------------------------------------------ + // Finally block clear anything that is left. + + if ( outputOffset <= outputEndOffset ) { + PEF_BlockClear ( outputBase + (outputOffset - outputStartOffset), outputEndOffset - outputOffset + 1 ); + } + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + + +FragmentUsageError: + err = cfragFragmentUsageErr; + goto ERROR; + +FragmentCorruptError: + err = cfragFragmentCorruptErr; + goto ERROR; + + +} // UnpackPartialSection () + + +// ¤ +// =========================================================================================== +// PEF_UnpackSection () +// ==================== + + +OSStatus PEF_UnpackSection ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + ByteCount sectionOffset, + LogicalAddress bufferAddress, + ByteCount bufferLength ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + SectionHeader * section = NULL; + BytePtr packedBase = NULL; + BytePtr packedEnd = NULL; + BytePtr outputBase = bufferAddress; + BytePtr outputEnd = outputBase + bufferLength - 1; + + + if ( pefPrivate == NULL ) goto ParameterError; + if ( sectionIndex >= pefPrivate->sectionCount ) goto ParameterError; + if ( (bufferAddress == NULL) && (bufferLength != 0) ) goto ParameterError; + + section = &pefPrivate->sections[sectionIndex]; + if ( (sectionOffset + bufferLength) > section->execSize ) goto ParameterError; + + packedBase = pefPrivate->mappedContainer + section->containerOffset; + packedEnd = packedBase + section->rawSize - 1; + + + if ( (sectionOffset == 0) && (bufferLength == section->initSize) ) { + + err = UnpackFullSection ( packedBase, packedEnd, outputBase, outputEnd ); + if ( err != noErr ) goto ERROR; + + if ( false && EnableCFMDebugging && (section->execSize > 8) ) { // Force some tests of partial unpacking. + + UInt32 word; + BytePtr partContents = (*pefPrivate->Allocate) ( section->execSize - 2 ); + + PEF_Assert ( partContents != NULL ); + + err = PEF_UnpackSection ( containerRef, sectionIndex, 1, &word, 4 ); + PEF_Assert ( err == noErr ); + + err = PEF_UnpackSection ( containerRef, sectionIndex, section->execSize / 2, &word, 4 ); + PEF_Assert ( err == noErr ); + + err = PEF_UnpackSection ( containerRef, sectionIndex, section->execSize - 5, &word, 4 ); + PEF_Assert ( err == noErr ); + + err = PEF_UnpackSection ( containerRef, sectionIndex, 1, partContents, section->execSize - 2 ); + PEF_Assert ( err == noErr ); + + (*pefPrivate->Release) ( partContents ); + } + + } else { + + if ( section->initSize < sectionOffset ) { + PEF_BlockClear ( bufferAddress, bufferLength ); + } else { + err = UnpackPartialSection ( packedBase, packedEnd, outputBase, outputEnd, sectionOffset ); + if ( err != noErr ) goto ERROR; + } + + if ( EnableCFMDebugging ) { // See if the partial output agrees with full output. + + BytePtr fullContents = (*pefPrivate->Allocate) ( section->execSize ); + + PEF_Assert ( fullContents != NULL ); + PEF_BlockClear ( fullContents, section->execSize ); + + err = UnpackFullSection ( packedBase, packedEnd, fullContents, fullContents + section->initSize - 1 ); + PEF_Assert ( err == noErr ); + + PEF_Assert ( PEF_CompareBytes ( fullContents + sectionOffset, bufferAddress, bufferLength ) ); + + (*pefPrivate->Release) ( fullContents ); + + } + + } + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_UnpackSection () + + +// ¤ +// =========================================================================================== +// PEF_RelocateSection () +// ====================== + + +// *** This needs cleaning up. + + +OSStatus PEF_RelocateSection ( CFContHandlerRef containerRef, + ItemCount sectionIndex ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + + BytePtr * raddr; + ByteCount dataA; + int cnt; // ! Must be signed. + ByteCount codeA; + LoaderRelExpHeader * ldRelHdr; + Relocation *reloc, *rlend; + Relocation r; + long rpt; // ! Must be signed. + long secn; + long rsymi; + BytePtr *imports; + ByteCount *regions; + long i; + long relNum; + BytePtr regStart; + SectionHeader * section; + + + if ( pefPrivate == NULL ) goto ParameterError; + if ( sectionIndex >= pefPrivate->sectionCount ) goto ParameterError; + + regStart = pefPrivate->mappedOrigins[sectionIndex]; + section = & pefPrivate->sections [sectionIndex]; + + pefPrivate->resolved = 1; // !!! Really means relocated, and should be set on exit. + + for (i = 0; ; i++) { + if ( i >= pefPrivate->sectionCount ) return noErr; // No relocations for this section. + ldRelHdr = & pefPrivate->ldrSections [i]; + if ( ldRelHdr->sectionNumber == sectionIndex ) break; + } + + regions = pefPrivate->runningOffsets; + imports = pefPrivate->imports; + + reloc = (Relocation *) (pefPrivate->ldrRelocations + ldRelHdr->relocationsOffset); + rlend = (Relocation *) ((RelocInstr *) reloc + ldRelHdr->numRelocations); + raddr = (BytePtr *) regStart; // ! Change the stride from 1 to 4. + rsymi = 0; + codeA = regions [0]; + dataA = regions [1]; + rpt = 0; + + #if 0 + sprintf ( gDebugMessage, "PLPrepareRegion: start @ %.8X\n", raddr ); + PutSerialMesssage ( gDebugMessage ); + #endif + + relNum = 0; + while (reloc < rlend) { + + r = *reloc; + reloc = (Relocation *) ((RelocInstr *) reloc + 1); + + switch ( opcode [r.opcode.op] ) { + case krDDAT : + raddr = (BytePtr *) ((BytePtr)raddr + (r.deltadata.delta_d4 * 4)); // ! Reduce stride to 1. + cnt = r.deltadata.cnt; + while (--cnt >= 0) { + *raddr++ += dataA; + } + break; + + case krCODE : + cnt = r.run.cnt_m1 + 1; + while (--cnt >= 0) { + *raddr++ += codeA; + } + break; + + case krDATA : + cnt = r.run.cnt_m1 + 1; + while (--cnt >= 0) { + *raddr++ += dataA; + } + break; + + case krDESC : + cnt = r.run.cnt_m1 + 1; + while (--cnt >= 0) { + *raddr++ += codeA; + *raddr++ += dataA; + raddr++; + } + break; + + case krDSC2 : + cnt = r.run.cnt_m1 + 1; + while (--cnt >= 0) { + *raddr++ += codeA; + *raddr++ += dataA; + } + break; + + case krVTBL : + cnt = r.run.cnt_m1 + 1; + while (--cnt >= 0) { + *raddr++ += dataA; + raddr++; + } + break; + + case krSYMR : + cnt = r.run.cnt_m1 + 1; + while (--cnt >= 0) { + *raddr++ += (ByteCount) imports [rsymi++]; + } + break; + + case krSYMB : + rsymi = r.glp.idx; + *raddr++ += (ByteCount) imports [rsymi++]; + break; + + case krCDIS : + codeA = regions [r.glp.idx]; + break; + + case krDTIS : + dataA = regions [r.glp.idx]; + break; + + case krSECN : + *raddr++ += regions [r.glp.idx]; + break; + + case krDELT : + raddr = (BytePtr *) ((BytePtr) raddr + r.delta.delta_m1 + 1); // ! Reduce stride to 1. + #if 0 + sprintf ( gDebugMessage, "PLPrepareRegion: delta to %.8X\n", raddr ); + PutSerialMesssage ( gDebugMessage ); + #endif + break; + + case krRPT : + if (--rpt == 0) break; // count was 1 --> rpt done + if (rpt < 0) // first time rpt encountered? + rpt = r.rpt.rcnt_m1 + 1; // yes- initialize rpt count + cnt = r.rpt.icnt_m1 + 2; // yes or no - back up cnt instrs + reloc = (Relocation *) ((RelocInstr *) reloc - cnt); + break; + + case krLABS : + raddr = (BytePtr *) ((r.large1.idx_top << 16) + reloc->bot + regStart); + reloc = (Relocation *) ((RelocInstr *) reloc + 1); + #if 0 + sprintf ( gDebugMessage, "PLPrepareRegion: abs to %.8X\n", raddr ); + PutSerialMesssage ( gDebugMessage ); + #endif + break; + + case krLSYM : + rsymi = (r.large1.idx_top << 16) + reloc->bot; + reloc = (Relocation *) ((RelocInstr *) reloc + 1); + *raddr++ += (ByteCount) imports [rsymi++]; + break; + + case krLRPT : + if (--rpt == 0) { + reloc = (Relocation *) ((RelocInstr *) reloc + 1); + break; + } + if (rpt < 0) + rpt = (r.large2.idx_top << 16) + reloc->bot; + cnt = r.large2.cnt_m1 + 2; + reloc = (Relocation *) ((RelocInstr *) reloc - cnt); + break; + + case krLSEC : + secn = (r.large2.idx_top << 16) + reloc->bot; + switch (r.large2.cnt_m1) { + case 0 : *raddr++ += regions [secn]; break; + case 1 : codeA = regions [secn]; break; + case 2 : dataA = regions [secn]; break; + } + reloc = (Relocation *) ((RelocInstr *) reloc + 1); + break; + + default : + goto FragmentCorruptError; + } + } + + + #if 0 + sprintf ( gDebugMessage, "PLPrepareRegion: end @ %.8X\n", raddr ); + PutSerialMesssage ( gDebugMessage ); + #endif + + err = noErr; + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + +FragmentCorruptError: + err = cfragFragmentCorruptErr; + goto ERROR; + + +} // PEF_RelocateSection () + + +// ¤ +// =========================================================================================== +// PEF_RelocateImportsOnly () +// ========================== + + +OSStatus PEF_RelocateImportsOnly ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + ItemCount libraryIndex ) +{ + OSStatus err = cfragCFMInternalErr; + PEFPrivateInfo * pefPrivate = (PEFPrivateInfo *) containerRef; + + + if ( pefPrivate == NULL ) goto ParameterError; + if ( sectionIndex >= pefPrivate->sectionCount ) goto ParameterError; + if ( libraryIndex >= pefPrivate->ldrHeader->numImportFiles ) goto ParameterError; + + + if ( pefPrivate == NULL ) goto ParameterError; + + + return unimpErr; // !!! Fix this! + +EXIT: + return err; + +ERROR: + goto EXIT; + +ParameterError: + err = paramErr; + goto ERROR; + + +} // PEF_RelocateImportsOnly () + diff --git a/iokit/Families/IONDRVSupport/IOPEFInternals.h b/iokit/Families/IONDRVSupport/IOPEFInternals.h new file mode 100644 index 000000000..083f62c8b --- /dev/null +++ b/iokit/Families/IONDRVSupport/IOPEFInternals.h @@ -0,0 +1,931 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 July 98 - start IOKit + */ + + +/* + File: zPEF.h + + Contains: PEF format declarations. + + Version: Maxwell + + Copyright: © 1992-1996 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Alan Lillich + + Other Contact: <> + + Technology: Core Runtime + + Writers: + + (AWL) Alan Lillich + (ELE) Erik Eidt + + Change History (most recent first): + + <7> 2/28/96 AWL Adapt for new container handler model. + <6> 4/12/95 AWL Fix bit field problem. + <5> 8/29/94 AWL Remove "never" share mode. + <4> 8/23/94 AWL Update section sharing constants. + <3> 4/28/94 AWL Simplify cross address space use for booting. + <2> 4/11/94 AWL Use 68K alignment for the export symbol structure. + <1> 2/15/94 AWL Initial checkin for kernel based CFM. + + ------------------------------------------------------------------------------------- + + <7> 8/26/93 AWL Move CFTypes.h and CFLoader.h up with other Apple private headers. + <5> 7/8/93 AWL (&ELE) Fixed version field names in import file IDs + <4> 6/9/93 JRG ELE & AWL Changes: + <4> 06/08/93 AWL (&ELE) Added more standard section types and packed data opcodes. + <3> 9/23/92 ELE added precomputed hash table for improved runtime performance. + + Version 1.3 Erik Eidt 9/23/92 updated for new hash table capabilities + Version 1.2 Erik Eidt 7/8/92 updated for new relocations and other loader section size optimizations + Version 1.1 Cheryl Lins 5/27/92 updated for PEF 1.2 definition + Version 1.0 Cheryl Lins 4/7/92 initial version +*/ + + +#ifndef __IOPEFINTERNALS__ +#define __IOPEFINTERNALS__ 1 + +#include "IOPEFLoader.h" + + +typedef signed int PEF_SBits32; // ! Can't use SInt32, it is "signed long". +typedef unsigned int PEF_UBits32; // ! Can't use UInt32, it is "unsigned long". + + +#pragma options align=mac68k + +/*========== File Header ==========*/ + +typedef struct { + UInt16 magic1; /* magic flag describing execution machine and environment */ + UInt16 magic2; /* magic flag describing execution machine and environment */ + OSType fileTypeID; /* OSType identifier = 'pef' */ + OSType architectureID; /* OSType identifier = 'pwpc' */ + UInt32 versionNumber; /* version number of this file format */ + UInt32 dateTimeStamp; /* Macintosh date/time stamp */ + UInt32 oldDefVersion; /* old definition version number */ + UInt32 oldImpVersion; /* old implementation version number */ + UInt32 currentVersion; /* current version number */ + SInt16 numberSections; /* number of sections */ + SInt16 loadableSections; /* number of sections that are loadable for execution, + also the section # of first non-loadable section */ + BytePtr memoryAddress; /* the location this container was last loaded */ +} FileHeader, *FileHeaderPtr; + +#define kPEFVersion 1 /* current version number */ +#define kPEFMagic1 0x4A6F /* value of magic1 for PEF */ +#define kPEFMagic2 0x7921 /* value of magic2 for PEF */ +#define kPEFTypeID 0x70656666 /* value of fileTypeID for 'peff' */ +#define kPowerPCID 0x70777063 /* value of architecture ID 'pwpc' */ + +/*========== Section Header ==========*/ + +typedef struct { + ByteCount sectionName; /* offset into global string table for section name */ + BytePtr sectionAddress; /* preferred base address for the section */ + ByteCount execSize; /* section size in bytes during execution in memory including zero initialization */ + ByteCount initSize; /* section size in bytes during execution in memory before zero initialization */ + ByteCount rawSize; /* section size in bytes in container before loading */ + ByteCount containerOffset;/* container offest to section's raw data */ + UInt8 regionKind; /* section/region classification */ + UInt8 shareKind; /* sharing classification */ + UInt8 alignment; /* execution alignment requirement (0=byte,1=half,2=word,3=doubleword,4=quadword..) */ + UInt8 reservedA; +} SectionHeader, *SectionHeaderPtr; + +/* TCFLSectionKind */ +#define kPEFCodeSection 0 +#define kPEFDataSection 1 +#define kPEFPIDataSection 2 +#define kPEFConstantSection 3 +#define kPEFLoaderSection 4 +#define kPEFDebugSection 5 +#define kPEFExecDataSection 6 +#define kPEFExceptionSection 7 +#define kPEFTracebackSection 8 + +/* TCFLShareKind */ +#define kPEFContextShare 1 +#define kPEFGlobalShare 4 +#define kPEFProtectedShare 5 + +/* Defines for PIDataSections */ +#define kPEFZero 0 +#define kPEFBlock 1 +#define kPEFRepeat 2 +#define kPEFRepeatBlock 3 +#define kPEFRepeatZero 4 +#define kPEFNoOpcode 0x0fff +#define kPEFOpcodeShift 5 +#define kPEFFirstOperandMask 31 + + +/*========== Loader Header ==========*/ + +typedef struct { + SInt32 entryPointSection; /* section number containing entry point descriptor */ + ByteCount entryPointOffset; /* offset to entry point descriptor within section */ + + SInt32 initPointSection; /* section number containing entry point descriptor */ + ByteCount initPointOffset; /* offset to entry point descriptor within section */ + + SInt32 termPointSection; /* section number containing entry point descriptor */ + ByteCount termPointOffset; /* offset to entry point descriptor within section */ + + ItemCount numImportFiles; /* number of import file id entries */ + ItemCount numImportSyms; /* number of import symbol table entries */ + ItemCount numSections; /* number of sections with load-time relocations */ + ByteCount relocationsOffset; /* offset to relocation descriptions table */ + + ByteCount stringsOffset; /* offset to loader string table */ + + ByteCount hashSlotTable; /* offset to hash slot table */ + ItemCount hashSlotTabSize; /* number of hash slot entries */ + ItemCount numExportSyms; /* number of export symbol table entries */ +} LoaderHeader, *LoaderHeaderPtr; + +/*========== Loader Section Header ==========*/ + +typedef struct { + SInt16 sectionNumber; /* reference to primary section number */ + SInt16 reservedA; /* if TSectNum were 16 bits, which it isn't */ + ItemCount numRelocations; /* number of loader relocations for this section */ + ByteCount relocationsOffset; /* offset to relocation descriptions for this section */ +} LoaderRelExpHeader, *LoaderRelExpHeaderPtr; + +/*========== Loader Import File ID's Entry ==========*/ + +typedef struct { + ByteCount fileNameOffset; /* offset into loader string table for file name */ + UInt32 oldImpVersion; /* oldest compatible implementation library */ + UInt32 linkedVersion; /* current version at link time */ + ItemCount numImports; /* number of imports from this file */ + ItemCount impFirst; /* number of the first imports from this file (relative to all imports) */ + UInt8 options; /* call this libraries initialization routine before mine */ + UInt8 reservedA; + UInt16 reservedB; +} LoaderImportFileID, *LoaderImportFileIDPtr; + +#define kPEFInitBeforeMask 0x80 +#define kPEFWeakLibraryMask 0x40 +#define kPEFDeferredBindMask 0x20 + +/*========== Loader Import Symbol Table Entry ==========*/ + +typedef struct { + PEF_UBits32 symClass : 8; // Actually ot type TCFLSymbolClass. + PEF_UBits32 nameOffset : 24; +} LoaderImport, *LoaderImportPtr; + +#define kPEFWeakSymbolMask 0x80 + +/*========== Loader Export Hash Slot Table Entry ==========*/ + +typedef struct { + PEF_UBits32 chainCount : 14; + PEF_UBits32 chainIndex : 18; +} HashSlotEntry, *HashSlotEntryPtr; + +#define PEFHashHighBits(hashword,bitCount) ((hashword) >> (bitCount)) +#define PEFHashMaskBits(hashword,bitCount) (((UInt32)(1) << (bitCount)) - 1) + +#define GetPEFHashSlot(hashword,bitCount) \ + ( (ItemCount) (((hashword) ^ PEFHashHighBits((hashword),(bitCount))) & PEFHashMaskBits((hashword),(bitCount))) ) + +/*========== Loader Export Hash Chain Table Entry ==========*/ + +typedef struct { + UInt32 hashword; /* (hashword >> 16) == nameLength !! */ +} HashChainEntry, *HashChainEntryPtr; + +/*========== Loader Export Symbol Table Entry ==========*/ + +/* Section number controls how 'address' is interpreted. + >=0: section number exporting the symbol; 'address' is offset from start of the section to + the symbol being exported (ie address of a routine or data item) + -1: value is absolute (non-relocatable) + -2: value is a physical address (non-relocatable) + -3: re-export imported symbol whose number is in 'address' +*/ + +/* this struct is stored in the file, non-aligned: size = 10 */ +typedef struct { + PEF_UBits32 symClass : 8; // Actually ot type TCFLSymbolClass. + PEF_UBits32 nameOffset : 24; + ByteCount offset; /* offset into section to exported symbol */ + SInt16 sectionNumber; +} LoaderExport, *LoaderExportPtr; + +#define SIZEOF_LoaderExport (sizeof (TUnsigned32)*2 + sizeof (SInt16)) + + +#define kPEFAbsoluteExport -1 +#define kPEFPhysicalExport -2 +#define kPEFReExportImport -3 + +/*========== Loader Relocation Entry ==========*/ + +typedef UInt16 RelocInstr; + +typedef union { + struct { unsigned op:7, rest:9; } opcode; + struct { unsigned op:2, delta_d4:8, cnt:6; } deltadata; + struct { unsigned op:7, cnt_m1:9; } run; + struct { unsigned op:7, idx:9; } glp; + struct { unsigned op:4, delta_m1:12; } delta; + struct { unsigned op:4, icnt_m1:4, rcnt_m1:8; } rpt; + struct { unsigned op:6, idx_top:10; } large1; + struct { unsigned op:6, cnt_m1:4, idx_top:6; } large2; + UInt16 instr; + UInt16 bot; +} Relocation; + +// opcode definitions which can be used with +// Relocation.opcode.op:7, if masked properly +// by the up coming table +// (NOTE: a half word of 0 is garunteed to be an unused relocation instruction) + +#define krDDAT 0x00 // type deltadata + +#define krCODE 0x20 // type run +#define krDATA 0x21 // type run +#define krDESC 0x22 // type run +#define krDSC2 0x23 // type run +#define krVTBL 0x24 // type run +#define krSYMR 0x25 // type run +// 0x26 +// 0x2F + +#define krSYMB 0x30 // type glp +#define krCDIS 0x31 // type glp +#define krDTIS 0x32 // type glp +#define krSECN 0x33 // type glp +// 0x34 +// 0x3F + +#define krDELT 0x40 // type delta +#define krRPT 0x48 // type rpt + +#define krLABS 0x50 // type large1 +#define krLSYM 0x52 // type large1 +// 0x54 +// 0x56 + +#define krLRPT 0x58 // type large2 +#define krLSEC 0x5A // type large2 +// 0x5C +// 0x5E + + // LSEC usage: + // LSEC 0, n -- Long SECN + // LSEC 1, n -- Long CDIS + // LSEC 2, n -- Long DTIS + // LSEC 3, n -- free + // LSEC 15, n -- free + +// constants that indicate the maximum sizes of fields +// (before packing, ie: subtracting one, in some cases) + +#define ksDELTA 4096 // delta max for DELTA from + +#define ksDDDMAX 1023 // delta max for DELTA-DAT (DDAT) form +#define ksDDRMAX 63 // run max for DELTA-DAT (DDAT) form + +#define ksCODE 512 // count max for CODE form +#define ksDATA 512 // count max for DATA form +#define ksDEMAX 512 // count max for DESC form +#define ksVTMAX 512 // count max for VTBL form +#define ksISMAX 512 // count max for IMPS form +#define ksRPTMAX 256 // count max for RPT form + +#define IsLARG(op) (((op) & 0x70) == 0x50) + +#define RELOPSHFT 9 + +#define ksDVDMAX 0 // (63) delta max for DELTA-VTBL (DVBL) form +#define ksDVRMAX 0 // (256) run max for DELTA-VTBL (DVBL) form + +#define krXXXX 0xff + + +/* + From: PEFBinaryFormat.i + Revision: 9 +*/ + +enum { + /* The packed data opcodes. */ + kPEFPkDataZero = 0, /* Zero fill "count" bytes. */ + kPEFPkDataBlock = 1, /* Block copy "count" bytes. */ + kPEFPkDataRepeat = 2, /* Repeat "count" bytes "count2"+1 times. */ + kPEFPkDataRepeatBlock = 3, /* Interleaved repeated and unique data. */ + kPEFPkDataRepeatZero = 4 /* Interleaved zero and unique data. */ +}; + + +enum { + kPEFPkDataOpcodeShift = 5, + kPEFPkDataCount5Mask = 0x1F, + kPEFPkDataMaxCount5 = 31, + kPEFPkDataVCountShift = 7, + kPEFPkDataVCountMask = 0x7F, + kPEFPkDataVCountEndMask = 0x80 +}; + +#define PEFPkDataOpcode(byte) ( ((UInt8)(byte)) >> kPEFPkDataOpcodeShift ) + +#define PEFPkDataCount5(byte) ( ((UInt8)(byte)) & kPEFPkDataCount5Mask ) + +#define PEFPkDataComposeInstr(opcode,count5) \ + ( (((UInt8)(opcode)) << kPEFPkDataOpcodeShift) | ((UInt8)(count5)) ) + + + + + +/* + File: CodeFragmentContainerPriv.h + + Contains: Physical container routines of the ModernOS version of CFM. + + Version: Maxwell + + DRI: Alan Lillich + + Copyright: © 1984-1996 by Apple Computer, Inc. + All rights reserved. + + BuildInfo: Built by: Simon Douglas + With Interfacer: 2.0d13 (PowerPC native) + From: CodeFragmentContainerPriv.i + Revision: 9 + Dated: 10/9/96 + Last change by: AWL + Last comment: Remove special SMP sharing, using prepare option instead. + + Bugs: Report bugs to Radar component Ã’System InterfacesÓ, Ã’LatestÓ + List the version information (from above) in the Problem Description. + +*/ +/* + ------------------------------------------------------------------------------------------- + This file contains what used to be called the CFLoader interface. The name was changed to + fit the newer convention of having CodeFragment as a common prefix, and to reduce pervasive + confusion between the Code Fragment Manager and the Code Fragment Loaders, promulgated by + the long history of the Segment Loader. This file defines the abstract interface to the + physical representation of code fragments. + !!! This version has minimal comments, the main purpose is to get things compiled. +*/ + + +/* + ¤ + =========================================================================================== + General Types and Constants + =========================== +*/ +typedef SInt32 CFContSignedIndex; +typedef UInt32 CFContStringHash; +#define CFContStringHashLength(hashValue) ((hashValue) >> 16) +struct CFContHashedName { + CFContStringHash nameHash; /* ! Includes the name length.*/ + BytePtr nameText; +}; +typedef struct CFContHashedName CFContHashedName; + +/* + ------------------------------------------ + Declarations for code fragment containers. +*/ + +enum { + kCFContContainerInfoVersion = 0x00010001 +}; + +struct CFContContainerInfo { + CFContHashedName cfragName; + UInt32 modDate; /* !!! Abstract type?*/ + OSType architecture; + CFragVersionNumber currentVersion; + CFragVersionNumber oldImpVersion; + CFragVersionNumber oldDefVersion; + UInt32 reservedA; + void * reservedB; +}; +typedef struct CFContContainerInfo CFContContainerInfo; + +/* + ---------------------------------------- + Declarations for code fragment sections. +*/ +struct CFContLogicalLocation { + CFContSignedIndex section; /* "Real" sections use zero based indices, special ones are negative.*/ + ByteCount offset; +}; +typedef struct CFContLogicalLocation CFContLogicalLocation; + + +enum { + kCFContNoSectionIndex = -1, + kCFContAbsoluteSectionIndex = -2, + kCFContReexportSectionIndex = -3 +}; + +typedef UInt8 CFContSectionSharing; + +enum { + kCFContShareSectionInClosure = 0, /* ! Not supported at present!*/ + kCFContShareSectionInProcess = 1, + kCFContShareSectionAcrossSystem = 4, + kCFContShareSectionWithProtection = 5 +}; + +typedef UInt8 CFContMemoryAccess; + +enum { + kCFContMemReadMask = 0x01, /* Readable memory can also be executed.*/ + kCFContMemWriteMask = 0x02, + kCFContMemExecuteMask = 0x04, /* ! Affects cache actions, not protection!*/ + kCFContReadOnlyData = kCFContMemReadMask, + kCFContWriteableData = kCFContMemReadMask | kCFContMemWriteMask, + kCFContNormalCode = kCFContMemReadMask | kCFContMemExecuteMask, + kCFContExcludedMemory = 0 +}; + +typedef UInt32 CFContSectionOptions; + +enum { + /* Values for CFContSectionOptions.*/ + kPackedCFContSectionMask = 0x01, /* Stored contents are compressed.*/ + kRelocatedCFContSectionMask = 0x02, /* Section contents have relocations.*/ + kEmptyFillCFContSectionMask = 0x04, /* The extension part may be left untouched.*/ + kResidentCFContSectionMask = 0x08, + kPrefaultCFContSectionMask = 0x10 +}; + + +enum { + kCFContSectionInfoVersion = 0x00010001 +}; + +struct CFContSectionInfo { + CFContHashedName sectionName; + CFContMemoryAccess access; + CFContSectionSharing sharing; + UInt8 alignment; /* ! The power of 2, a.k.a. number of low order zero bits.*/ + UInt8 reservedA; + CFContSectionOptions options; + ByteCount containerOffset; + ByteCount containerLength; + ByteCount unpackedLength; + ByteCount totalLength; + LogicalAddress defaultAddress; + UInt32 reservedB; + void * reservedC; +}; +typedef struct CFContSectionInfo CFContSectionInfo; + +/* + ---------------------------------- + Declarations for exported symbols. +*/ +typedef UInt32 CFContExportedSymbolOptions; +/* + ! enum { // Values for CFContExportedSymbolOptions. + ! // ! No options at present. + ! }; +*/ + +enum { + kCFContExportedSymbolInfoVersion = 0x00010001 +}; + +struct CFContExportedSymbolInfo { + CFContHashedName symbolName; + CFContLogicalLocation location; + CFContExportedSymbolOptions options; + CFragSymbolClass symbolClass; + UInt8 reservedA; + UInt16 reservedB; + UInt32 reservedC; + void * reservedD; +}; +typedef struct CFContExportedSymbolInfo CFContExportedSymbolInfo; + +/* + ------------------------------------------------ + Declarations for imported libraries and symbols. +*/ +typedef UInt32 CFContImportedLibraryOptions; + +enum { + /* Values for CFContImportedLibraryOptions.*/ + kCFContWeakLibraryMask = 0x01, /* ! Same as kCFContWeakSymbolMask to reduce errors.*/ + kCFContInitBeforeMask = 0x02, + kCFContDeferredBindMask = 0x04 +}; + + +enum { + kCFContImportedLibraryInfoVersion = 0x00010001 +}; + +struct CFContImportedLibraryInfo { + CFContHashedName libraryName; + CFragVersionNumber linkedVersion; + CFragVersionNumber oldImpVersion; + CFContImportedLibraryOptions options; +}; +typedef struct CFContImportedLibraryInfo CFContImportedLibraryInfo; + +typedef UInt32 CFContImportedSymbolOptions; + +enum { + /* Values for CFContImportedSymbolOptions.*/ + kCFContWeakSymbolMask = 0x01 /* ! Same as kCFContWeakLibraryMask to reduce errors.*/ +}; + + +enum { + kCFContImportedSymbolInfoVersion = 0x00010001 +}; + +struct CFContImportedSymbolInfo { + CFContHashedName symbolName; + ItemCount libraryIndex; + CFContImportedSymbolOptions options; + CFragSymbolClass symbolClass; + UInt8 reservedA; + UInt16 reservedB; + UInt32 reservedC; + void * reservedD; +}; +typedef struct CFContImportedSymbolInfo CFContImportedSymbolInfo; + +/* + ------------------------------------------------- + Declarations for dealing with container handlers. +*/ +typedef UInt32 CFContOpenOptions; + +enum { + /* Values for CFContOpenOptions.*/ + kCFContPrepareInPlaceMask = 0x01, + kCFContMinimalOpenMask = 0x02 +}; + +typedef UInt32 CFContCloseOptions; + +enum { + /* Values for CFContCloseOptions.*/ + kCFContPartialCloseMask = 0x01 +}; + +typedef struct OpaqueCFContHandlerRef* CFContHandlerRef; +typedef struct CFContHandlerProcs CFContHandlerProcs; +typedef CFContHandlerProcs *CFContHandlerProcsPtr; +typedef LogicalAddress (*CFContAllocateMem)(ByteCount size); +typedef void (*CFContReleaseMem)(LogicalAddress address); +/* + ¤ + =========================================================================================== + Container Handler Routines + ========================== +*/ +typedef OSStatus (*CFCont_OpenContainer)(LogicalAddress mappedAddress, LogicalAddress runningAddress, ByteCount containerLength, KernelProcessID runningProcessID, const CFContHashedName *cfragName, CFContOpenOptions options, CFContAllocateMem Allocate, CFContReleaseMem Release, CFContHandlerRef *containerRef, CFContHandlerProcsPtr *handlerProcs); +typedef OSStatus (*CFCont_CloseContainer)(CFContHandlerRef containerRef, CFContCloseOptions options); +typedef OSStatus (*CFCont_GetContainerInfo)(CFContHandlerRef containerRef, PBVersion infoVersion, CFContContainerInfo *containerInfo); +/* -------------------------------------------------------------------------------------------*/ +typedef OSStatus (*CFCont_GetSectionCount)(CFContHandlerRef containerRef, ItemCount *sectionCount); +typedef OSStatus (*CFCont_GetSectionInfo)(CFContHandlerRef containerRef, ItemCount sectionIndex, PBVersion infoVersion, CFContSectionInfo *sectionInfo); +typedef OSStatus (*CFCont_FindSectionInfo)(CFContHandlerRef containerRef, const CFContHashedName *sectionName, PBVersion infoVersion, ItemCount *sectionIndex, CFContSectionInfo *sectionInfo); +typedef OSStatus (*CFCont_SetSectionAddress)(CFContHandlerRef containerRef, ItemCount sectionIndex, LogicalAddress mappedAddress, LogicalAddress runningAddress); +/* -------------------------------------------------------------------------------------------*/ +typedef OSStatus (*CFCont_GetAnonymousSymbolLocations)(CFContHandlerRef containerRef, CFContLogicalLocation *mainLocation, CFContLogicalLocation *initLocation, CFContLogicalLocation *termLocation); +/* -------------------------------------------------------------------------------------------*/ +typedef OSStatus (*CFCont_GetExportedSymbolCount)(CFContHandlerRef containerRef, ItemCount *exportCount); +typedef OSStatus (*CFCont_GetExportedSymbolInfo)(CFContHandlerRef containerRef, CFContSignedIndex exportedIndex, PBVersion infoVersion, CFContExportedSymbolInfo *exportInfo); +typedef OSStatus (*CFCont_FindExportedSymbolInfo)(CFContHandlerRef containerRef, const CFContHashedName *exportName, PBVersion infoVersion, ItemCount *exportIndex, CFContExportedSymbolInfo *exportInfo); +/* -------------------------------------------------------------------------------------------*/ +typedef OSStatus (*CFCont_GetImportCounts)(CFContHandlerRef containerRef, ItemCount *libraryCount, ItemCount *symbolCount); +typedef OSStatus (*CFCont_GetImportedLibraryInfo)(CFContHandlerRef containerRef, ItemCount libraryIndex, PBVersion infoVersion, CFContImportedLibraryInfo *libraryInfo); +typedef OSStatus (*CFCont_GetImportedSymbolInfo)(CFContHandlerRef containerRef, ItemCount symbolIndex, PBVersion infoVersion, CFContImportedSymbolInfo *symbolInfo); +typedef OSStatus (*CFCont_SetImportedSymbolAddress)(CFContHandlerRef containerRef, ItemCount symbolIndex, LogicalAddress symbolAddress); +/* -------------------------------------------------------------------------------------------*/ +typedef OSStatus (*CFCont_UnpackSection)(CFContHandlerRef containerRef, ItemCount sectionIndex, ByteCount sectionOffset, LogicalAddress bufferAddress, ByteCount bufferLength); +typedef OSStatus (*CFCont_RelocateSection)(CFContHandlerRef containerRef, ItemCount sectionIndex); +typedef OSStatus (*CFCont_RelocateImportsOnly)(CFContHandlerRef containerRef, ItemCount sectionIndex, ItemCount libraryIndex); +typedef OSStatus (*CFCont_MakeSectionExecutable)(CFContHandlerRef containerRef, ItemCount sectionIndex); +typedef OSStatus (*CFCont_AllocateSection)(CFContHandlerRef containerRef, ItemCount sectionIndex, LogicalAddress *mappedAddress, LogicalAddress *runningAddress); +typedef OSStatus (*CFCont_ReleaseSection)(CFContHandlerRef containerRef, ItemCount sectionIndex); +/* -------------------------------------------------------------------------------------------*/ + +#if 0 +struct CFContHandlerInfo { + OrderedItemName orderedName; + OrderRequirements orderedReq; + CFCont_OpenContainer OpenHandler; +}; +typedef struct CFContHandlerInfo CFContHandlerInfo; +#endif + +struct CFContHandlerProcs { + ItemCount procCount; + CFragShortVersionPair abiVersion; + + CFCont_OpenContainer OpenContainer; /* 1*/ + CFCont_CloseContainer CloseContainer; /* 2*/ + CFCont_GetContainerInfo GetContainerInfo; /* 3*/ + + CFCont_GetSectionCount GetSectionCount; /* 4*/ + CFCont_GetSectionInfo GetSectionInfo; /* 5*/ + CFCont_FindSectionInfo FindSectionInfo; /* 6*/ + CFCont_SetSectionAddress SetSectionAddress; /* 7*/ + + CFCont_GetAnonymousSymbolLocations GetAnonymousSymbolLocations; /* 8*/ + + CFCont_GetExportedSymbolCount GetExportedSymbolCount; /* 9*/ + CFCont_GetExportedSymbolInfo GetExportedSymbolInfo; /* 10*/ + CFCont_FindExportedSymbolInfo FindExportedSymbolInfo; /* 11*/ + + CFCont_GetImportCounts GetImportCounts; /* 12*/ + CFCont_GetImportedLibraryInfo GetImportedLibraryInfo; /* 13*/ + CFCont_GetImportedSymbolInfo GetImportedSymbolInfo; /* 14*/ + CFCont_SetImportedSymbolAddress SetImportedSymbolAddress; /* 15*/ + + CFCont_UnpackSection UnpackSection; /* 16*/ + CFCont_RelocateSection RelocateSection; /* 17*/ + CFCont_RelocateImportsOnly RelocateImportsOnly; /* 18*/ + CFCont_MakeSectionExecutable MakeSectionExecutable; /* 19 (Opt.)*/ + CFCont_AllocateSection AllocateSection; /* 20 (Opt.)*/ + CFCont_ReleaseSection ReleaseSection; /* 21 (Opt.)*/ +}; + + +enum { + kCFContMinimumProcCount = 18, + kCFContCurrentProcCount = 21, + kCFContHandlerABIVersion = 0x00010001 +}; + +/* + ----------------------------------------------------------------------------------------- + The ABI version is a pair of UInt16s used as simple counters. The high order part is the + current version number, the low order part is the oldest compatible definition version. + number. This pair is to be used by the specific container handlers to describe what + version of the container handler ABI they support. + 0x00010001 + ---------- + The initial release of this ABI. (The old CFLoader ABI does not count.) + ¤ + =========================================================================================== + General Routines + ================ +*/ +extern CFContStringHash CFContHashName(BytePtr nameText, ByteCount nameLength); + +#if 0 + +/* -------------------------------------------------------------------------------------------*/ +extern OSStatus CFContOpenContainer(LogicalAddress mappedAddress, LogicalAddress runningAddress, ByteCount containerLength, KernelProcessID runningProcessID, const CFContHashedName *cfragName, CFContOpenOptions options, CFContAllocateMem Allocate, CFContReleaseMem Release, CFContHandlerRef *containerRef, CFContHandlerProcsPtr *handlerProcs); + +/* -------------------------------------------------------------------------------------------*/ +extern OSStatus CFContRegisterContainerHandler(const OrderedItemName *orderedName, const OrderRequirements *orderedReq, CFCont_OpenContainer OpenHandler, OrderedItemName *rejectingHandler); + +extern OSStatus CFContUnregisterContainerHandler(const OrderedItemName *orderedName); + +extern OSStatus CFContGetContainerHandlers(ItemCount requestedCount, ItemCount *totalCount, CFContHandlerInfo *handlers); + +/* -------------------------------------------------------------------------------------------*/ +#endif + + + + + + +/* + File: PEFLoader.h + + Contains: PEF Loader Interface. + + Version: Maxwell + + Copyright: © 1992-1996 by Apple Computer, Inc., all rights reserved. + + File Ownership: + + DRI: Alan Lillich + + Other Contact: <> + + Technology: Core Runtime + + Writers: + + (AWL) Alan Lillich + (ELE) Erik Eidt + + Change History (most recent first): + + <7> 8/23/96 AWL (1379028) Propagate changes from CodeFragmentContainerPriv. + <6> 2/28/96 AWL Adapt for new container handler model. + <5> 6/20/94 AWL Move private PEF loader info struct here to be visible to the + booting "wacky" PEF loader. + <4> 6/8/94 AWL Make all CFL routines visible for direct use in special cases + such as booting. + <3> 5/16/94 AWL Fix typo. + <2> 2/25/94 AWL Update for Q&D solution to loading across address spaces. + <1> 2/15/94 AWL Initial checkin for kernel based CFM. +*/ + +// =========================================================================================== + +enum { + kBuiltinSectionArraySize = 4 +}; + +struct PEFPrivateInfo { // !!! Clean up field names, collapse Booleans, etc. + CFContAllocateMem Allocate; + CFContReleaseMem Release; + BytePtr mappedContainer; + BytePtr runningContainer; + ItemCount sectionCount; // Just the instantiated sections. + SectionHeader * sections; + BytePtr stringTable; + ItemCount ldrSectionNo; + LoaderHeader * ldrHeader; + BytePtr ldrStringTable; + LoaderRelExpHeader * ldrSections; + LoaderImportFileID * ldrImportFiles; + LoaderImport * ldrImportSymbols; + HashSlotEntry * ldrHashSlot; + HashChainEntry * ldrHashChain; + LoaderExport * ldrExportSymbols; + BytePtr ldrRelocations; + BytePtr * mappedOrigins; // Mapped base address for each section. + ByteCount * runningOffsets; // Running offset from presumed address. + BytePtr * imports; + BytePtr originArray [kBuiltinSectionArraySize]; // ! Only used if big enough. + ByteCount offsetArray [kBuiltinSectionArraySize]; // ! Only used if big enough. + Boolean loadInPlace; + Boolean resolved; +}; + +typedef struct PEFPrivateInfo PEFPrivateInfo; + + +// =========================================================================================== + + +extern OSStatus PEF_OpenContainer ( LogicalAddress mappedAddress, + LogicalAddress runningAddress, + ByteCount containerLength, + KernelProcessID runningProcessID, + const CFContHashedName * cfragName, + CFContOpenOptions options, + CFContAllocateMem Allocate, + CFContReleaseMem Release, + CFContHandlerRef * containerRef_o, + CFContHandlerProcs * * handlerProcs_o ); + +extern OSStatus PEF_CloseContainer ( CFContHandlerRef containerRef, + CFContCloseOptions options ); + +extern OSStatus PEF_GetContainerInfo ( CFContHandlerRef containerRef, + PBVersion infoVersion, + CFContContainerInfo * containerInfo ); + +// ------------------------------------------------------------------------------------------- + +extern OSStatus PEF_GetSectionCount ( CFContHandlerRef containerRef, + ItemCount * sectionCount ); + +extern OSStatus PEF_GetSectionInfo ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + PBVersion infoVersion, + CFContSectionInfo * sectionInfo ); + +extern OSStatus PEF_FindSectionInfo ( CFContHandlerRef containerRef, + const CFContHashedName * sectionName, + PBVersion infoVersion, + ItemCount * sectionIndex, // May be null. + CFContSectionInfo * sectionInfo ); // May be null. + +extern OSStatus PEF_SetSectionAddress ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + LogicalAddress mappedAddress, + LogicalAddress runningAddress ); + +// ------------------------------------------------------------------------------------------- + +extern OSStatus PEF_GetAnonymousSymbolLocations ( CFContHandlerRef containerRef, + CFContLogicalLocation * mainLocation, // May be null. + CFContLogicalLocation * initLocation, // May be null. + CFContLogicalLocation * termLocation ); // May be null. + +// ------------------------------------------------------------------------------------------- + +extern OSStatus PEF_GetExportedSymbolCount ( CFContHandlerRef containerRef, + ItemCount * exportCount ); + +extern OSStatus PEF_GetExportedSymbolInfo ( CFContHandlerRef containerRef, + CFContSignedIndex exportIndex, + PBVersion infoVersion, + CFContExportedSymbolInfo * exportInfo ); + +extern OSStatus PEF_FindExportedSymbolInfo ( CFContHandlerRef containerRef, + const CFContHashedName * exportName, + PBVersion infoVersion, + ItemCount * exportIndex, // May be null. + CFContExportedSymbolInfo * exportInfo ); // May be null. + +// ------------------------------------------------------------------------------------------- + +extern OSStatus PEF_GetImportCounts ( CFContHandlerRef containerRef, + ItemCount * libraryCount, // May be null. + ItemCount * symbolCount ); // May be null. + +extern OSStatus PEF_GetImportedLibraryInfo ( CFContHandlerRef containerRef, + ItemCount libraryIndex, + PBVersion infoVersion, + CFContImportedLibraryInfo * libraryInfo ); + +extern OSStatus PEF_GetImportedSymbolInfo ( CFContHandlerRef containerRef, + ItemCount symbolIndex, + PBVersion infoVersion, + CFContImportedSymbolInfo * symbolInfo ); + +extern OSStatus PEF_SetImportedSymbolAddress ( CFContHandlerRef containerRef, + ItemCount symbolIndex, + LogicalAddress symbolAddress ); + +// ------------------------------------------------------------------------------------------- + +extern OSStatus PEF_UnpackSection ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + ByteCount sectionOffset, + LogicalAddress bufferAddress, + ByteCount bufferLength ); + +extern OSStatus PEF_RelocateSection ( CFContHandlerRef containerRef, + ItemCount sectionIndex ); + +extern OSStatus PEF_RelocateImportsOnly ( CFContHandlerRef containerRef, + ItemCount sectionIndex, + ItemCount libraryIndex ); + +struct CFragInitBlock { + void * contextID; + void * closureID; + void * connectionID; + SInt32 where; // locator rec + LogicalAddress address; + ByteCount length; + Boolean inPlace; + UInt8 resvA; + UInt16 resvB; + char * libName; + UInt32 resvC; +}; +typedef struct CFragInitBlock CFragInitBlock; + +#pragma options align=reset + +#endif // __IOPEFINTERNALS__ + diff --git a/iokit/Families/IONDRVSupport/IOPEFLibraries.h b/iokit/Families/IONDRVSupport/IOPEFLibraries.h new file mode 100644 index 000000000..897316898 --- /dev/null +++ b/iokit/Families/IONDRVSupport/IOPEFLibraries.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * Simon Douglas 22 Oct 97 + * - first checked in. + */ + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +struct FunctionEntry +{ + char * name; + LogicalAddress address; + LogicalAddress toc; +}; +typedef struct FunctionEntry FunctionEntry; + +struct LibraryEntry +{ + char * name; + ItemCount numSyms; + FunctionEntry * functions; +}; +typedef struct LibraryEntry LibraryEntry; + +extern LibraryEntry IONDRVLibraries[]; +extern const ItemCount IONumNDRVLibraries; + +#ifdef __cplusplus +} +#endif + diff --git a/iokit/Families/IONDRVSupport/IOPEFLoader.c b/iokit/Families/IONDRVSupport/IOPEFLoader.c new file mode 100644 index 000000000..27e50ba4a --- /dev/null +++ b/iokit/Families/IONDRVSupport/IOPEFLoader.c @@ -0,0 +1,600 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * From pieces of ProtoCFM, Alan Lillich. + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 July 98 - start IOKit + */ + + +#include +#include + +#include "IOPEFLibraries.h" +#include "IOPEFLoader.h" +#include "IOPEFInternals.h" + + + +#define LOG if(0) IOLog +#define INFO if(0) IOLog + +struct SectionVars { + LogicalAddress address; + ByteCount allocSize; + ByteCount unpackedLength; + Boolean isPacked; +}; +typedef struct SectionVars SectionVars; + +struct InstanceVars { + BytePtr pef; // container in memory + CFContHandlerRef cRef; + CFContHandlerProcs * cProcs; + ItemCount numSections; + SectionVars * sections; + IONDRVUndefinedSymbolHandler undefinedHandler; + void * undefHandlerSelf; +}; +typedef struct InstanceVars InstanceVars; + + +static OSStatus LocationToAddress( InstanceVars * inst, + CFContLogicalLocation * location, LogicalAddress * address ); +static OSStatus SatisfyImports( InstanceVars * inst ); +static OSStatus Instantiate( InstanceVars * inst ); + + +#define PCFM_BlockCopy(src,dst,len) memcpy(dst,src,len) +#define PCFM_BlockClear(dst,len) memset(dst,0,len) +#define PCFM_MakeExecutable(addr,len) flush_dcache((vm_offset_t)addr, len, 0); \ + invalidate_icache((vm_offset_t)addr, len, 0) + +extern OSStatus CallTVector( + void * p1, void * p2, void * p3, void * p4, void * p5, void * p6, + LogicalAddress entry ); + +// ¤ +// =========================================================================================== +// CFContHashName () +// ================= + + +CFContStringHash CFContHashName ( BytePtr nameText, + ByteCount nameLength ) +{ + BytePtr currChar = nameText; + SInt32 hashValue = 0; // ! Signed to match old published PEF algorithm. + ByteCount length = 0; + ByteCount limit; + CFContStringHash result; + + #define PseudoRotate(x) ( ( (x) << 1 ) - ( (x) >> (16) ) ) + + + for ( limit = nameLength; limit > 0; limit -= 1 ) { + if ( *currChar == NULL ) break; + hashValue = (PseudoRotate ( hashValue )) ^ *currChar; + currChar += 1; + length += 1; + } + + result = (length << 16) | ((UInt16) ((hashValue ^ (hashValue >> 16)) & 0xFFFF)); + + return result; + + +} // CFContHashName () + + +// ¤ +// =========================================================================================== +// PCFM_CompareBytes () +// ==================== + + +Boolean PCFM_CompareBytes ( const Byte * left, + const Byte * right, + ByteCount count ) +{ + // !!! Blechola! Switch to a standard routine ASAP! + + UInt32 * wLeft; + UInt32 * wRight; + UInt8 * bLeft; + UInt8 * bRight; + + ByteCount leftMiss = (UInt32)left & 0x00000003; + ByteCount rightMiss = (UInt32)right & 0x00000003; + + + bLeft = (UInt8 *) left; + bRight = (UInt8 *) right; + + if ( (leftMiss != 0) && (rightMiss != 0) ) { + ByteCount align = leftMiss; + if ( align > count ) align = count; + while ( align > 0 ) { + if ( *bLeft++ != *bRight++ ) goto NoMatch; + align -= 1; + count -= 1; + } + } + + wLeft = (UInt32 *) bLeft; + wRight = (UInt32 *) bRight; + while ( count >= 4 ) { + if ( *wLeft++ != *wRight++ ) goto NoMatch; + count -= 4; + } + + bLeft = (UInt8 *) wLeft; + bRight = (UInt8 *) wRight; + while ( count > 0 ) { + if ( *bLeft++ != *bRight++ ) goto NoMatch; + count -= 1; + } + + return true; + + +NoMatch: + return false; + + +} // PCFM_CompareBytes () + +// =========================================================================================== + +LogicalAddress PCodeAllocateMem( ByteCount size ); +void PCodeReleaseMem( LogicalAddress address ); +extern void *kern_os_malloc(size_t size); +extern void kern_os_free(void * addr); + +LogicalAddress +PCodeAllocateMem( ByteCount size ) +{ + return( (LogicalAddress) kern_os_malloc( (size_t) size )); +} + +void +PCodeReleaseMem( LogicalAddress address ) +{ + kern_os_free( (void *) address ); +} + +// =========================================================================================== + +OSStatus +PCodeOpen( LogicalAddress container, ByteCount containerSize, PCodeInstance * instance ) +{ + OSStatus err; + InstanceVars * inst; + + inst = PCodeAllocateMem( sizeof( InstanceVars)); + *instance = inst; + + inst->pef = (BytePtr) container; + // procID, name, options + err = PEF_OpenContainer( container, container, containerSize, 0, 0, 0, + PCodeAllocateMem, PCodeReleaseMem, + &inst->cRef, &inst->cProcs ); + if( err) LOG( "PEF_OpenContainer = %ld\n", err ); + + return( err); +} + +OSStatus +PCodeInstantiate( PCodeInstance instance, + IONDRVUndefinedSymbolHandler handler, void * self ) +{ + OSStatus err; + InstanceVars * inst = instance; + CFContLogicalLocation initLocation; + LogicalAddress tv; + CFragInitBlock initInfo; + + inst->undefinedHandler = handler; + inst->undefHandlerSelf = self; + + do { + err = Instantiate( inst ); + if( err) + continue; + + // call INIT + err = PEF_GetAnonymousSymbolLocations( inst->cRef, NULL, &initLocation, NULL ); + if( err) + continue; + err = LocationToAddress( inst, &initLocation, &tv ); + if( err || (tv == NULL) ) + continue; + bzero( &initInfo, sizeof( initInfo)); + err = CallTVector( &initInfo, 0, 0, 0, 0, 0, tv ); + + } while( false); + + return( err); +} + + +OSStatus +PCodeClose( PCodeInstance instance ) +{ + OSStatus err; + InstanceVars * inst = instance; + SectionVars * section; + int i; + + if( !inst) + return( noErr); + + err = PEF_CloseContainer( inst->cRef, 0 ); + if( err) LOG( "PEF_CloseContainer = %ld\n", err ); + + if( inst->sections ) { + for( i = 0; i < inst->numSections; i++) { + section = inst->sections + i; + if( section->allocSize) + PCodeReleaseMem( section->address); + } + PCodeReleaseMem(inst->sections); + } + + return( err); +} + +OSStatus +PCodeFindExport( PCodeInstance instance, const char * symbolName, LogicalAddress * address, CFragSymbolClass * symbolClass ) +{ + CFContExportedSymbolInfo symInfo; + CFContHashedName hashName; + OSStatus err; + InstanceVars * inst = instance; + + hashName.nameHash = CFContHashName( (UInt8 *) symbolName, strlen( symbolName) ); + hashName.nameText = (UInt8 *) symbolName; + + err = PEF_FindExportedSymbolInfo( inst->cRef, &hashName, + kCFContExportedSymbolInfoVersion, (void *) 0, &symInfo ); + if( err) { + LOG( "PEF_FindExportedSymbolInfo = %ld\n", err ); + return( err); + } + + if( address); + err = LocationToAddress( inst, &symInfo.location, address ); + if( symbolClass) + *symbolClass = symInfo.symbolClass; + + return( err); +} + +OSStatus +PCodeFindMain( PCodeInstance instance, LogicalAddress * mainAddress ) +{ + InstanceVars * inst = instance; + CFContLogicalLocation mainLocation; + OSStatus err; + + err = PEF_GetAnonymousSymbolLocations( inst->cRef, &mainLocation, NULL, NULL ); + + if( err == noErr) + err = LocationToAddress( inst, &mainLocation, mainAddress ); + + return( err); +} + + + +// =========================================================================================== + +static OSStatus +LocationToAddress( InstanceVars * inst, CFContLogicalLocation * location, + LogicalAddress * address ) +{ + BytePtr sectionBase; + OSStatus err = noErr; + + if ( location->section >= 0 ) { + sectionBase = (BytePtr) (inst->sections + location->section)->address; + *address = (LogicalAddress) (sectionBase + location->offset); + + } else if ( location->section == kCFContAbsoluteSectionIndex ) { + *address = (LogicalAddress) location->offset; + + } else if ( location->section == kCFContNoSectionIndex ) { + *address = (LogicalAddress) kUnresolvedCFragSymbolAddress; + + } else + err = cfragFragmentFormatErr; + + return( err); +} + + +static OSStatus +Instantiate( InstanceVars * inst ) +{ + CFContHandlerRef cRef; + ItemCount numSects, sectionIndex; + CFContSectionInfo sectionInfo; + CFContSectionInfo * section; + OSStatus err; + + cRef = inst->cRef; + + err = PEF_GetSectionCount( cRef, &numSects ); + if( err) LOG( "PEF_GetSectionCount = %ld\n", err ); + INFO( "Num sects = %ld\n", numSects ); + + inst->numSections = numSects; + inst->sections = PCodeAllocateMem( numSects * sizeof( SectionVars )); + + for( sectionIndex = 0; sectionIndex < numSects; sectionIndex++ ) + { + Boolean isPacked, isMappable; + Boolean needAlloc, needCopy, needClear; + LogicalAddress sectionAddress; + SectionVars * sectionVars; + + sectionVars = inst->sections + sectionIndex; + section = §ionInfo; + + err = PEF_GetSectionInfo( cRef, sectionIndex, kCFContSectionInfoVersion, section ); + if( err) LOG( "PEF_GetSectionInfo = %ld\n", err ); + +#if 0 + if ( sectionInfo.sharing == kCFContShareSectionInClosure ) goto SectionSharingError; + if ( (! (sectionInfo.access & kCFContMemWriteMask)) && + (sectionInfo.options & kRelocatedCFContSectionMask) ) goto SectionOptionsError; +#endif + + isPacked = ((section->options & kPackedCFContSectionMask) != 0); + isMappable = (! isPacked) && + (! (section->options & kRelocatedCFContSectionMask)) && + (! (section->access & kCFContMemWriteMask)); + + if ( ! isMappable ) { + // ---------------------------------------------------------------------------------- + // Mappable really means "fully expanded in container", so sections that are not mappable + // need to be allocated. The loader will do the initialization copying. This is the + // standard case for packed PEF data sections. + needAlloc = true; + needCopy = (! isPacked); + needClear = (section->totalLength != section->unpackedLength); + + } else if ( ! (section->access & kCFContMemWriteMask) ) { + // ----------------------------------------------------------------------------------- + // A "mappable" read only section. Make sure it is fully present, i.e. no zero filled + // extension. This is the standard case for code and literal sections. + if ( section->totalLength != section->unpackedLength ) { + err = cfragFragmentUsageErr; // !!! Needs error label & message. +// goto ERROR; + } + needAlloc = false; + needCopy = false; + needClear = false; + + } else { + // ----------------------------------------------------------------------------------- + // A "mappable", writeable, don't use in place section. This is the standard case for + // unpacked data sections. + needAlloc = true; + needCopy = true; + needClear = (section->totalLength != section->unpackedLength); + } + + if ( needAlloc ) { + // *** Should honor the container's alignment specifications. + sectionAddress = PCodeAllocateMem( section->totalLength ); //, 4, allocMode ); + } else { + sectionAddress = inst->pef + section->containerOffset; + } + + // -------------------------------------------------------------------------------------- + // !!! The copy/clear code should be moved to the loader as part of the split of the + // !!! unpack/relocate operations. It isn't clear at this point if both the read and + // !!! write sides should be touched. What if the write side pushes out pages brought in + // !!! by the read side? We should also have better advice to say all bytes are changed. + + if ( needCopy ) { + BytePtr source = inst->pef + section->containerOffset; + BytePtr dest = sectionAddress; + ByteCount length = section->unpackedLength; + + PCFM_BlockCopy ( source, dest, length ); + } + + if ( needClear ) { + BytePtr dest = (BytePtr) sectionAddress + section->unpackedLength; + ByteCount length = section->totalLength - section->unpackedLength; + + PCFM_BlockClear ( dest, length ); + } + + // ------------------------------------------------------------------------------------- + // If CFM was responsible for bringing the container into memory then we have to get the + // I&D caches in sync for the (read-only & use-in-place) code sections. + + if ( (section->access & kCFContMemExecuteMask) + && (! (section->access & kCFContMemWriteMask)) && isMappable ) { + PCFM_MakeExecutable ( sectionAddress, section->unpackedLength ); + } + + err = PEF_SetSectionAddress( cRef, sectionIndex, sectionAddress, sectionAddress ); + if( err) LOG( "PEF_SetSectionAddress = %ld\n", err ); + + sectionVars->address = sectionAddress; + sectionVars->unpackedLength = section->unpackedLength; + sectionVars->isPacked = isPacked; + if( needAlloc) + sectionVars->allocSize = section->totalLength; + else + sectionVars->allocSize = 0; + } + + // ------------------------------------------------------------------------------------- + + err = SatisfyImports( inst ); + if( err) LOG( "SatisfyImports = %ld\n", err ); + + // ------------------------------------------------------------------------------------- + + for( sectionIndex = 0; sectionIndex < numSects; sectionIndex++ ) + { + SectionVars * sectionVars; + + sectionVars = inst->sections + sectionIndex; + + INFO("Section[%ld] ", sectionIndex ); + + if ( sectionVars->isPacked ) { + INFO("unpacking..."); + err = PEF_UnpackSection( cRef, + sectionIndex, + 0, // Unpack the whole section. + sectionVars->address, + sectionVars->unpackedLength ); + if( err) LOG( "PEF_UnpackSection = %ld\n", err ); + } + + INFO("reloc..."); + err = PEF_RelocateSection( cRef, sectionIndex ); + + INFO(" address = 0x%08lx\n", (UInt32) sectionVars->address ); + } + + if( err) LOG( "Instantiate = %ld\n", err ); + + return( err); +} + +struct StubFunction { + LogicalAddress pc; + LogicalAddress toc; + char name[64]; +}; +typedef struct StubFunction StubFunction; + +OSStatus IONDRVUnimplementedVector( UInt32 p1, UInt32 p2, UInt32 p3, UInt32 p4 ) +{ + char * name = (char *) get_R2(); + + LOG("-*- %s : %lx, %lx, %lx, %lx\n", name, p1, p2, p3, p4); + + set_R2( (UInt32) name); + + return( -53); +} + +static OSStatus +SatisfyImports( InstanceVars * inst ) +{ + CFContImportedSymbolInfo symInfo; + + OSStatus err = 0; + CFContHandlerRef cRef; + ItemCount numLibs, numSyms, index, i; + struct CFLibInfo { + CFContImportedLibraryInfo info; + LibraryEntry * found; + }; + struct CFLibInfo * libInfo; + struct CFLibInfo * curLib; + FunctionEntry * funcs; + const IOTVector * symAddr; + StubFunction * stub; + + cRef = inst->cRef; + err = PEF_GetImportCounts( cRef, &numLibs, &numSyms ); + if( err) LOG( "PEF_GetImportCounts = %ld\n", err ); + + libInfo = PCodeAllocateMem( numLibs * sizeof( struct CFLibInfo)); + PCFM_BlockClear( libInfo, numLibs * sizeof( struct CFLibInfo)); + + for( index = 0; index < numLibs; index++ ) + { + curLib = libInfo + index; + err = PEF_GetImportedLibraryInfo( cRef, index, kCFContImportedLibraryInfoVersion, &curLib->info); + if( err) LOG( "PEF_GetImportCounts = %ld\n", err ); + + for( i = 0; i < IONumNDRVLibraries; i++ ) { + if( strcmp( (char *) curLib->info.libraryName.nameText, + IONDRVLibraries[ i ].name) == 0) { + curLib->found = &IONDRVLibraries[ i ]; + break; + } + } + } + + for( index = 0; index < numSyms; index++ ) + { + err = PEF_GetImportedSymbolInfo( cRef, index, kCFContImportedSymbolInfoVersion, &symInfo ); + if( err) LOG( "PEF_GetImportedSymbolInfo = %ld\n", err ); + + curLib = libInfo + symInfo.libraryIndex; + + symAddr = NULL; + if( curLib->found) { + for( i = 0; i < curLib->found->numSyms; i++ ) { + + funcs = curLib->found->functions + i; + if( strcmp( (char *) symInfo.symbolName.nameText, funcs->name ) == 0) { + symAddr = (IOTVector *) &funcs->address; + break; + } + } + + } else if( inst->undefinedHandler) + symAddr = (*inst->undefinedHandler)(inst->undefHandlerSelf, + curLib->info.libraryName.nameText, + symInfo.symbolName.nameText ); + if( symAddr == NULL) { + + LOG("Undefined %s:%s ", curLib->info.libraryName.nameText, symInfo.symbolName.nameText ); + + stub = IOMalloc( sizeof( StubFunction)); + symAddr = (IOTVector *) &stub->pc; + stub->pc = IONDRVUnimplementedVector; + stub->toc = &stub->name[0]; + strncpy( stub->name, symInfo.symbolName.nameText, 60); + } + + err = PEF_SetImportedSymbolAddress( cRef, index, (IOTVector *) symAddr ); + if( err) LOG( "PEF_SetImportedSymbolAddress = %ld\n", err ); + } + + PCodeReleaseMem( libInfo); + + return( err); +} + + + + diff --git a/iokit/Families/IONDRVSupport/IOPEFLoader.h b/iokit/Families/IONDRVSupport/IOPEFLoader.h new file mode 100644 index 000000000..b08704e09 --- /dev/null +++ b/iokit/Families/IONDRVSupport/IOPEFLoader.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 July 98 - start IOKit + * sdouglas 21 July 98 - start IOKit + */ + + +#ifndef _IOKIT_IOPEFLOADER_H +#define _IOKIT_IOPEFLOADER_H + + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +enum { + cfragFirstErrCode = -2800, /* The first value in the range of CFM errors.*/ + cfragContextIDErr = -2800, /* The context ID was not valid.*/ + cfragConnectionIDErr = -2801, /* The connection ID was not valid.*/ + cfragNoSymbolErr = -2802, /* The specified symbol was not found.*/ + cfragNoSectionErr = -2803, /* The specified section was not found.*/ + cfragNoLibraryErr = -2804, /* The named library was not found.*/ + cfragDupRegistrationErr = -2805, /* The registration name was already in use.*/ + cfragFragmentFormatErr = -2806, /* A fragment's container format is unknown.*/ + cfragUnresolvedErr = -2807, /* A fragment had "hard" unresolved imports.*/ + cfragNoPositionErr = -2808, /* The registration insertion point was not found.*/ + cfragNoPrivateMemErr = -2809, /* Out of memory for internal bookkeeping.*/ + cfragNoClientMemErr = -2810, /* Out of memory for fragment mapping or section instances.*/ + cfragNoIDsErr = -2811, /* No more CFM IDs for contexts, connections, etc.*/ + cfragInitOrderErr = -2812, /* */ + cfragImportTooOldErr = -2813, /* An import library was too old for a client.*/ + cfragImportTooNewErr = -2814, /* An import library was too new for a client.*/ + cfragInitLoopErr = -2815, /* Circularity in required initialization order.*/ + cfragInitAtBootErr = -2816, /* A boot library has an initialization function. (System 7 only)*/ + cfragLibConnErr = -2817, /* */ + cfragCFMStartupErr = -2818, /* Internal error during CFM initialization.*/ + cfragCFMInternalErr = -2819, /* An internal inconstistancy has been detected.*/ + cfragFragmentCorruptErr = -2820, /* A fragment's container was corrupt (known format).*/ + cfragInitFunctionErr = -2821, /* A fragment's initialization routine returned an error.*/ + cfragNoApplicationErr = -2822, /* No application member found in the cfrg resource.*/ + cfragArchitectureErr = -2823, /* A fragment has an unacceptable architecture.*/ + cfragFragmentUsageErr = -2824, /* A semantic error in usage of the fragment.*/ + cfragFileSizeErr = -2825, /* A file was too large to be mapped.*/ + cfragNotClosureErr = -2826, /* The closure ID was actually a connection ID.*/ + cfragNoRegistrationErr = -2827, /* The registration name was not found.*/ + cfragContainerIDErr = -2828, /* The fragment container ID was not valid.*/ + cfragClosureIDErr = -2829, /* The closure ID was not valid.*/ + cfragAbortClosureErr = -2830 /* Used by notification handlers to abort a closure.*/ +}; + +enum { + paramErr = -50, + unimpErr = -4 +}; + + +typedef UInt32 CFragVersionNumber; +enum { + kNullCFragVersion = 0, + kWildcardCFragVersion = 0xFFFFFFFF +}; + +typedef UInt8 CFragSymbolClass; +enum { + kCodeCFragSymbol = 0, + kDataCFragSymbol = 1, + kTVectorCFragSymbol = 2, + kTOCCFragSymbol = 3, + kGlueCFragSymbol = 4 +}; + +enum { + kUnresolvedCFragSymbolAddress = 0 +}; + +typedef UInt32 CFragShortVersionPair; +#define ComposeCFragShortVersionPair(current,older) (current << 16 | older) +#define GetCFragShortVersionCurrent(version) (version >> 16) +#define GetCFragShortVersionOlder(version) (version & 0xFFFF) + + +enum { + kMainCFragSymbolIndex = -1, + kInitCFragSymbolIndex = -2, + kTermCFragSymbolIndex = -3 +}; + +typedef void * PCodeInstance; + +OSStatus +PCodeOpen( LogicalAddress container, ByteCount containerSize, PCodeInstance * instance ); +OSStatus +PCodeClose( PCodeInstance instance ); +OSStatus +PCodeInstantiate( PCodeInstance instance, + IONDRVUndefinedSymbolHandler handler, void * self ); +OSStatus +PCodeFindExport( PCodeInstance instance, const char * symbolName, LogicalAddress * address, CFragSymbolClass * symbolClass ); +OSStatus +PCodeFindMain( PCodeInstance instance, LogicalAddress * mainAddress ); + +static __inline__ unsigned int get_R2(void) +{ + unsigned int result; + __asm__ volatile("mr %0, r2" : "=r" (result)); + return result; +} + +static __inline__ void set_R2(unsigned int val) +{ + __asm__ volatile("mr r2,%0" : : "r" (val)); + return; +} + +#ifdef __cplusplus +} +#endif + +#endif /* ! _IOKIT_IOPEFLOADER_H */ + diff --git a/iokit/Families/IONVRAM/IONVRAMController.cpp b/iokit/Families/IONVRAM/IONVRAMController.cpp new file mode 100644 index 000000000..78d7ff9f1 --- /dev/null +++ b/iokit/Families/IONVRAM/IONVRAMController.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +#define super IOService +OSDefineMetaClass(IONVRAMController, IOService); +OSDefineAbstractStructors(IONVRAMController, IOService); + + +// **************************************************************************** +// init +// +// **************************************************************************** +bool IONVRAMController::start(IOService *provider) +{ + if(!super::start(provider)) return false; + + getPlatform()->registerNVRAMController(this); + + return true; +} + +// **************************************************************************** +// syncNVRAM +// +// +// **************************************************************************** +void IONVRAMController::sync(void) +{ +} diff --git a/iokit/Families/IONetworking/IOEthernetController.cpp b/iokit/Families/IONetworking/IOEthernetController.cpp new file mode 100644 index 000000000..c664b67ca --- /dev/null +++ b/iokit/Families/IONetworking/IOEthernetController.cpp @@ -0,0 +1,416 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOEthernetController.cpp + * + * Abstract Ethernet controller superclass. + * + * HISTORY + * + * Dec 3, 1998 jliu - C++ conversion. + */ + +#include +#include +#include + +extern "C" { +#include +#include +#include +} + +//--------------------------------------------------------------------------- + +#define super IONetworkController + +OSDefineMetaClassAndAbstractStructors( IOEthernetController, IONetworkController) +OSMetaClassDefineReservedUnused( IOEthernetController, 0); +OSMetaClassDefineReservedUnused( IOEthernetController, 1); +OSMetaClassDefineReservedUnused( IOEthernetController, 2); +OSMetaClassDefineReservedUnused( IOEthernetController, 3); +OSMetaClassDefineReservedUnused( IOEthernetController, 4); +OSMetaClassDefineReservedUnused( IOEthernetController, 5); +OSMetaClassDefineReservedUnused( IOEthernetController, 6); +OSMetaClassDefineReservedUnused( IOEthernetController, 7); +OSMetaClassDefineReservedUnused( IOEthernetController, 8); +OSMetaClassDefineReservedUnused( IOEthernetController, 9); +OSMetaClassDefineReservedUnused( IOEthernetController, 10); +OSMetaClassDefineReservedUnused( IOEthernetController, 11); +OSMetaClassDefineReservedUnused( IOEthernetController, 12); +OSMetaClassDefineReservedUnused( IOEthernetController, 13); +OSMetaClassDefineReservedUnused( IOEthernetController, 14); +OSMetaClassDefineReservedUnused( IOEthernetController, 15); +OSMetaClassDefineReservedUnused( IOEthernetController, 16); +OSMetaClassDefineReservedUnused( IOEthernetController, 17); +OSMetaClassDefineReservedUnused( IOEthernetController, 18); +OSMetaClassDefineReservedUnused( IOEthernetController, 19); +OSMetaClassDefineReservedUnused( IOEthernetController, 20); +OSMetaClassDefineReservedUnused( IOEthernetController, 21); +OSMetaClassDefineReservedUnused( IOEthernetController, 22); +OSMetaClassDefineReservedUnused( IOEthernetController, 23); +OSMetaClassDefineReservedUnused( IOEthernetController, 24); +OSMetaClassDefineReservedUnused( IOEthernetController, 25); +OSMetaClassDefineReservedUnused( IOEthernetController, 26); +OSMetaClassDefineReservedUnused( IOEthernetController, 27); +OSMetaClassDefineReservedUnused( IOEthernetController, 28); +OSMetaClassDefineReservedUnused( IOEthernetController, 29); +OSMetaClassDefineReservedUnused( IOEthernetController, 30); +OSMetaClassDefineReservedUnused( IOEthernetController, 31); + +//------------------------------------------------------------------------- +// Macros + +#ifdef DEBUG +#define DLOG(fmt, args...) IOLog(fmt, ## args) +#else +#define DLOG(fmt, args...) +#endif + +//--------------------------------------------------------------------------- +// IOEthernetController class initializer. + +void IOEthernetController::initialize() +{ +} + +//--------------------------------------------------------------------------- +// Initialize an IOEthernetController instance. + +bool IOEthernetController::init(OSDictionary * properties) +{ + if (!super::init(properties)) + { + DLOG("IOEthernetController: super::init() failed\n"); + return false; + } + + return true; +} + +//--------------------------------------------------------------------------- +// Free the IOEthernetController instance. + +void IOEthernetController::free() +{ + // Any allocated resources should be released here. + + super::free(); +} + +//--------------------------------------------------------------------------- +// Publish Ethernet controller capabilites and properties. + +bool IOEthernetController::publishProperties() +{ + bool ret = false; + IOEthernetAddress addr; + OSDictionary * dict; + + do { + // Let the superclass publish properties first. + + if (super::publishProperties() == false) + break; + + // Publish the controller's Ethernet address. + + if ( (getHardwareAddress(&addr) != kIOReturnSuccess) || + (setProperty(kIOMACAddress, (void *) &addr, + kIOEthernetAddressSize) == false) ) + { + break; + } + + // Publish Ethernet defined packet filters. + + dict = OSDynamicCast(OSDictionary, getProperty(kIOPacketFilters)); + if ( dict ) + { + UInt32 filters; + OSNumber * num; + + if ( getPacketFilters(gIOEthernetWakeOnLANFilterGroup, + &filters) != kIOReturnSuccess ) + { + break; + } + + num = OSNumber::withNumber(filters, sizeof(filters) * 8); + if (num == 0) + break; + + ret = dict->setObject(gIOEthernetWakeOnLANFilterGroup, num); + num->release(); + } + } + while (false); + + return ret; +} + +//--------------------------------------------------------------------------- +// Set or change the station address used by the Ethernet controller. + +IOReturn +IOEthernetController::setHardwareAddress(const IOEthernetAddress * addr) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Enable or disable multicast mode. + +IOReturn IOEthernetController::setMulticastMode(bool active) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Enable or disable promiscuous mode. + +IOReturn IOEthernetController::setPromiscuousMode(bool active) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Enable or disable the wake on Magic Packet support. + +IOReturn IOEthernetController::setWakeOnMagicPacket(bool active) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Set the list of multicast addresses that the multicast filter should use +// to match against the destination address of an incoming frame. The frame +// should be accepted when a match occurs. + +IOReturn IOEthernetController::setMulticastList(IOEthernetAddress * /*addrs*/, + UInt32 /*count*/) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Allocate and return a new IOEthernetInterface instance. + +IONetworkInterface * IOEthernetController::createInterface() +{ + IOEthernetInterface * netif = new IOEthernetInterface; + + if ( netif && ( netif->init( this ) == false ) ) + { + netif->release(); + netif = 0; + } + return netif; +} + +//--------------------------------------------------------------------------- +// Returns all the packet filters supported by the Ethernet controller. +// This method will perform a bitwise OR of: +// +// kIOPacketFilterUnicast +// kIOPacketFilterBroadcast +// kIOPacketFilterMulticast +// kIOPacketFilterPromiscuous +// +// and write it to the argument provided if the group specified is +// gIONetworkFilterGroup, otherwise 0 is returned. Drivers that support +// a different set of filters should override this method. +// +// Returns kIOReturnSuccess. Drivers that override this method must return +// kIOReturnSuccess to indicate success, or an error code otherwise. + +IOReturn +IOEthernetController::getPacketFilters(const OSSymbol * group, + UInt32 * filters) const +{ + *filters = 0; + + if ( group == gIONetworkFilterGroup ) + { + return getPacketFilters(filters); + } + else + { + return kIOReturnSuccess; + } +} + +IOReturn IOEthernetController::getPacketFilters(UInt32 * filters) const +{ + *filters = ( kIOPacketFilterUnicast | + kIOPacketFilterBroadcast | + kIOPacketFilterMulticast | + kIOPacketFilterPromiscuous ); + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Enable a filter from the specified group. + +#define UCAST_BCAST_MASK \ + ( kIOPacketFilterUnicast | kIOPacketFilterBroadcast ) + +IOReturn IOEthernetController::enablePacketFilter( + const OSSymbol * group, + UInt32 aFilter, + UInt32 enabledFilters, + IOOptionBits options = 0) +{ + IOReturn ret = kIOReturnUnsupported; + UInt32 newFilters = enabledFilters | aFilter; + + if ( group == gIONetworkFilterGroup ) + { + // The default action is to call setMulticastMode() or + // setPromiscuousMode() to handle multicast or promiscuous + // filter changes. + + if ( aFilter == kIOPacketFilterMulticast ) + { + ret = setMulticastMode(true); + } + else if ( aFilter == kIOPacketFilterPromiscuous ) + { + ret = setPromiscuousMode(true); + } + else if ( (newFilters ^ enabledFilters) & UCAST_BCAST_MASK ) + { + ret = kIOReturnSuccess; + } + } + else if ( group == gIOEthernetWakeOnLANFilterGroup ) + { + if ( aFilter == kIOEthernetWakeOnMagicPacket ) + { + ret = setWakeOnMagicPacket(true); + } + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Disable a filter from the specifed filter group. + +IOReturn IOEthernetController::disablePacketFilter( + const OSSymbol * group, + UInt32 aFilter, + UInt32 enabledFilters, + IOOptionBits options = 0) +{ + IOReturn ret = kIOReturnUnsupported; + UInt32 newFilters = enabledFilters & ~aFilter; + + if ( group == gIONetworkFilterGroup ) + { + // The default action is to call setMulticastMode() or + // setPromiscuousMode() to handle multicast or promiscuous + // filter changes. + + if ( aFilter == kIOPacketFilterMulticast ) + { + ret = setMulticastMode(false); + } + else if ( aFilter == kIOPacketFilterPromiscuous ) + { + ret = setPromiscuousMode(false); + } + else if ( (newFilters ^ enabledFilters) & UCAST_BCAST_MASK ) + { + ret = kIOReturnSuccess; + } + } + else if ( group == gIOEthernetWakeOnLANFilterGroup ) + { + if ( aFilter == kIOEthernetWakeOnMagicPacket ) + { + ret = setWakeOnMagicPacket(false); + } + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Get the Ethernet controller's station address. +// Call the Ethernet specific (overloaded) form. + +IOReturn +IOEthernetController::getHardwareAddress(void * addr, + UInt32 * inOutAddrBytes) +{ + UInt32 bufBytes; + + if (inOutAddrBytes == 0) + return kIOReturnBadArgument; + + // Cache the size of the caller's buffer, and replace it with the + // number of bytes required. + + bufBytes = *inOutAddrBytes; + *inOutAddrBytes = kIOEthernetAddressSize; + + // Make sure the buffer is large enough for a single Ethernet + // hardware address. + + if ((addr == 0) || (bufBytes < kIOEthernetAddressSize)) + return kIOReturnNoSpace; + + return getHardwareAddress((IOEthernetAddress *) addr); +} + +//--------------------------------------------------------------------------- +// Set or change the station address used by the Ethernet controller. +// Call the Ethernet specific (overloaded) version of this method. + +IOReturn +IOEthernetController::setHardwareAddress(const void * addr, + UInt32 addrBytes) +{ + if ((addr == 0) || (addrBytes != kIOEthernetAddressSize)) + return kIOReturnBadArgument; + + return setHardwareAddress((const IOEthernetAddress *) addr); +} + +//--------------------------------------------------------------------------- +// Report the max/min packet sizes, including the frame header and FCS bytes. + +IOReturn IOEthernetController::getMaxPacketSize(UInt32 * maxSize) const +{ + *maxSize = kIOEthernetMaxPacketSize; + return kIOReturnSuccess; +} + +IOReturn IOEthernetController::getMinPacketSize(UInt32 * minSize) const +{ + *minSize = kIOEthernetMinPacketSize; + return kIOReturnSuccess; +} diff --git a/iokit/Families/IONetworking/IOEthernetInterface.cpp b/iokit/Families/IONetworking/IOEthernetInterface.cpp new file mode 100644 index 000000000..4b975a606 --- /dev/null +++ b/iokit/Families/IONetworking/IOEthernetInterface.cpp @@ -0,0 +1,922 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOEthernetInterface.cpp + * + * HISTORY + * 8-Jan-1999 Joe Liu (jliu) created. + * + */ + +#include +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +void arpwhohas(struct arpcom * ac, struct in_addr * addr); +} + +//--------------------------------------------------------------------------- + +#define super IONetworkInterface + +OSDefineMetaClassAndStructors( IOEthernetInterface, IONetworkInterface ) +OSMetaClassDefineReservedUnused( IOEthernetInterface, 0); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 1); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 2); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 3); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 4); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 5); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 6); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 7); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 8); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 9); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 10); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 11); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 12); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 13); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 14); +OSMetaClassDefineReservedUnused( IOEthernetInterface, 15); + +// The name prefix for all BSD Ethernet interfaces. +// +#define kIOEthernetInterfaceNamePrefix "en" + +//--------------------------------------------------------------------------- +// Macros + +#ifdef DEBUG +#define DLOG(fmt, args...) IOLog(fmt, ## args) +#else +#define DLOG(fmt, args...) +#endif + +UInt32 IOEthernetInterface::getFilters(const OSDictionary * dict, + const OSSymbol * group) +{ + OSNumber * num; + UInt32 filters = 0; + + assert( dict && group ); + + if (( num = (OSNumber *) dict->getObject(group) )) + { + filters = num->unsigned32BitValue(); + } + return filters; +} + +bool IOEthernetInterface::setFilters(OSDictionary * dict, + const OSSymbol * group, + UInt32 filters) +{ + OSNumber * num; + bool ret = false; + + assert( dict && group ); + + num = (OSNumber *) dict->getObject(group); + if ( num == 0 ) + { + if (( num = OSNumber::withNumber(filters, 32) )) + { + ret = dict->setObject(group, num); + num->release(); + } + } + else + { + num->setValue(filters); + ret = true; + } + return ret; +} + +#define GET_REQUIRED_FILTERS(g) getFilters(_requiredFilters, (g)) +#define GET_ACTIVE_FILTERS(g) getFilters(_activeFilters, (g)) +#define GET_SUPPORTED_FILTERS(g) getFilters(_supportedFilters, (g)) + +#define SET_REQUIRED_FILTERS(g, v) setFilters(_requiredFilters, (g), (v)) +#define SET_ACTIVE_FILTERS(g, v) setFilters(_activeFilters, (g), (v)) + +//--------------------------------------------------------------------------- +// Initialize an IOEthernetInterface instance. Instance variables are +// initialized, and an arpcom structure is allocated. + +bool IOEthernetInterface::init(IONetworkController * controller) +{ + // Allocate an arpcom structure, then call super::init(). + // We expect our superclass to call getIfnet() during its init() + // method, so arpcom must be allocated before calling super::init(). + + if ( (_arpcom = (struct arpcom *) IOMalloc(sizeof(*_arpcom))) == 0 ) + { + DLOG("IOEthernetInterface: arpcom allocation failed\n"); + return false; + } + + // Pass the init() call to our superclass. + + if ( super::init(controller) == false ) + return false; + + // Add an IONetworkData with room to hold an IOEthernetStats structure. + // This class does not reference the data object created, and no harm + // is done if the data object is released or replaced. + + IONetworkData * data = IONetworkData::withInternalBuffer( + kIOEthernetStatsKey, + sizeof(IOEthernetStats)); + if (data) + { + addNetworkData(data); + data->release(); + } + + // Create and initialize the filter dictionaries. + + _requiredFilters = OSDictionary::withCapacity(4); + _activeFilters = OSDictionary::withCapacity(4); + + if ( (_requiredFilters == 0) || (_activeFilters == 0) ) + return false; + + _supportedFilters = OSDynamicCast(OSDictionary, + controller->getProperty(kIOPacketFilters)); + if ( _supportedFilters == 0 ) return false; + _supportedFilters->retain(); + + // Controller's Unicast (directed) and Broadcast filters should always + // be enabled. Those bits should never be cleared. + + if ( !SET_REQUIRED_FILTERS( gIONetworkFilterGroup, + kIOPacketFilterUnicast | + kIOPacketFilterBroadcast ) + || !SET_REQUIRED_FILTERS( gIOEthernetWakeOnLANFilterGroup, 0 ) + || !SET_ACTIVE_FILTERS( gIONetworkFilterGroup, 0 ) + || !SET_ACTIVE_FILTERS( gIOEthernetWakeOnLANFilterGroup, 0 ) ) + { + return false; + } + + // Publish filter dictionaries to property table. + + setProperty( kIORequiredPacketFilters, _requiredFilters ); + setProperty( kIOActivePacketFilters, _activeFilters ); + + return true; +} + +//--------------------------------------------------------------------------- +// Initialize the given ifnet structure. The argument specified is a pointer +// to an ifnet structure obtained through getIfnet(). IOEthernetInterface +// will initialize this structure in a manner that is appropriate for most +// Ethernet interfaces, then call super::initIfnet() to allow the superclass +// to perform generic interface initialization. +// +// ifp: Pointer to the ifnet structure to be initialized. +// +// Returns true on success, false otherwise. + +bool IOEthernetInterface::initIfnet(struct ifnet * ifp) +{ + struct arpcom * ac = (struct arpcom *) ifp; + + assert(ac); + + lock(); + + bzero(ac, sizeof(*ac)); + + // Set defaults suitable for Ethernet interfaces. + + setInterfaceType( IFT_ETHER ); + setMaxTransferUnit( ETHERMTU ); + setMediaAddressLength( NUM_EN_ADDR_BYTES ); + setMediaHeaderLength( ETHERHDRSIZE ); + setFlags( IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS ); + + unlock(); + + return super::initIfnet(ifp); +} + +//--------------------------------------------------------------------------- +// Free the IOEthernetInterface instance. The memory allocated +// for the arpcom structure is released. + +void IOEthernetInterface::free() +{ + if ( _arpcom ) + { + IOFree(_arpcom, sizeof(*_arpcom)); + _arpcom = 0; + } + + if ( _requiredFilters ) + { + _requiredFilters->release(); + _requiredFilters = 0; + } + + if ( _activeFilters ) + { + _activeFilters->release(); + _activeFilters = 0; + } + + if ( _supportedFilters ) + { + _supportedFilters->release(); + _supportedFilters = 0; + } + + super::free(); +} + +//--------------------------------------------------------------------------- +// This method returns a pointer to an ifnet structure maintained +// by the family specific interface object. IOEthernetInterface +// allocates an arpcom structure in init(), and returns a pointer +// to that structure when this method is called. +// +// Returns a pointer to an ifnet structure. + +struct ifnet * IOEthernetInterface::getIfnet() const +{ + return (&(_arpcom->ac_if)); +} + +//--------------------------------------------------------------------------- +// The name of the interface advertised to the network layer +// is generated by concatenating the string returned by this method, +// and an unit number. +// +// Returns a pointer to a constant string "en". Thus Ethernet interfaces +// will be registered as en0, en1, etc. + +const char * IOEthernetInterface::getNamePrefix() const +{ + return kIOEthernetInterfaceNamePrefix; +} + +//--------------------------------------------------------------------------- +// Prepare the 'Ethernet' controller after it has been opened. This is called +// by IONetworkInterface after a controller has accepted an open from this +// interface. IOEthernetInterface uses this method to inspect the controller, +// and to cache certain controller properties, such as its hardware address. +// This method is called with the arbitration lock held. +// +// controller: The controller object that was opened. +// +// Returns true on success, false otherwise +// (which will cause the controller to be closed). + +bool IOEthernetInterface::controllerDidOpen(IONetworkController * ctr) +{ + bool ret = false; + OSData * addrData; + IOEthernetAddress * addr; + + do { + // Call the controllerDidOpen() in superclass first. + + if ( (ctr == 0) || (super::controllerDidOpen(ctr) == false) ) + break; + + // If the controller supports some form of multicast filtering, + // then set the ifnet IFF_MULTICAST flag. + + if ( GET_SUPPORTED_FILTERS(gIONetworkFilterGroup) & + (kIOPacketFilterMulticast | kIOPacketFilterMulticastAll) ) + { + setFlags(IFF_MULTICAST); + } + + // Get the controller's MAC/Ethernet address. + + addrData = OSDynamicCast(OSData, ctr->getProperty(kIOMACAddress)); + if ( (addrData == 0) || (addrData->getLength() != NUM_EN_ADDR_BYTES) ) + { + DLOG("%s: kIOMACAddress property access error (len %d)\n", + getName(), addrData ? addrData->getLength() : 0); + break; + } + + addr = (IOEthernetAddress *) addrData->getBytesNoCopy(); + +#if 1 // Print the address + IOLog("%s: Ethernet address %02x:%02x:%02x:%02x:%02x:%02x\n", + ctr->getName(), + addr->bytes[0], + addr->bytes[1], + addr->bytes[2], + addr->bytes[3], + addr->bytes[4], + addr->bytes[5]); +#endif + + // Copy the hardware address we obtained from the controller + // to the arpcom structure. + + bcopy(addr, _arpcom->ac_enaddr, NUM_EN_ADDR_BYTES); + + ret = true; + } + while (0); + + return ret; +} + +//--------------------------------------------------------------------------- +// When a close from our last client is received, the interface will +// close the controller. But before the controller is closed, this method +// will be called by our superclass to perform any final cleanup. This +// method is called with the arbitration lock held. +// +// IOEthernetInterface will ensure that the controller is disabled. +// +// controller: The currently opened controller object. + +void IOEthernetInterface::controllerWillClose(IONetworkController * ctr) +{ + super::controllerWillClose(ctr); +} + +//--------------------------------------------------------------------------- +// Handle ioctl commands originated from the network layer. +// Commands not handled by this method are passed to our superclass. +// +// Argument convention is: +// +// arg0 - (struct ifnet *) +// arg1 - (void *) +// +// The commands handled by IOEthernetInterface are: +// +// SIOCSIFADDR +// SIOCSIFFLAGS +// SIOCADDMULTI +// SIOCDELMULTI +// +// Returns an error code defined in errno.h (BSD). + +SInt32 IOEthernetInterface::performCommand( IONetworkController * ctr, + UInt32 cmd, + void * arg0, + void * arg1 ) +{ + SInt32 ret; + + assert( arg0 == _arpcom ); + + if ( ctr == 0 ) return EINVAL; + + switch ( cmd ) + { + case SIOCSIFFLAGS: + case SIOCADDMULTI: + case SIOCDELMULTI: + case SIOCSIFADDR: + + ret = (int) ctr->executeCommand( + this, /* client */ + (IONetworkController::Action) + &IOEthernetInterface::performGatedCommand, + this, /* target */ + ctr, /* param0 */ + (void *) cmd, /* param1 */ + arg0, /* param2 */ + arg1 ); /* param3 */ + break; + + default: + // Unknown command, let our superclass deal with it. + ret = super::performCommand(ctr, cmd, arg0, arg1); + break; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Handle an ioctl command on the controller's workloop context. + +int IOEthernetInterface::performGatedCommand(void * target, + void * arg1_ctr, + void * arg2_cmd, + void * arg3_0, + void * arg4_1) +{ + IOEthernetInterface * self = (IOEthernetInterface *) target; + IONetworkController * ctr = (IONetworkController *) arg1_ctr; + SInt ret = EOPNOTSUPP; + + // Refuse to perform controller I/O if the controller is in a + // low-power state that makes it "unusable". + + if ( self->_controllerLostPower ) return EPWROFF; + + self->lock(); + + switch ( (UInt32) arg2_cmd ) + { + case SIOCSIFADDR: + ret = self->syncSIOCSIFADDR(ctr); + break; + + case SIOCSIFFLAGS: + ret = self->syncSIOCSIFFLAGS(ctr); + break; + + case SIOCADDMULTI: + ret = self->syncSIOCADDMULTI(ctr); + break; + + case SIOCDELMULTI: + ret = self->syncSIOCDELMULTI(ctr); + break; + } + + self->unlock(); + + return ret; +} + +//--------------------------------------------------------------------------- +// enableController() is reponsible for calling the controller's enable() +// method and restoring the state of the controller. We assume that +// controllers can completely reset its state upon receiving a disable() +// method call. And when it is brought back up, the interface should +// assist in restoring the previous state of the Ethernet controller. + +IOReturn IOEthernetInterface::enableController(IONetworkController * ctr) +{ + IOReturn ret = kIOReturnSuccess; + bool enabled = false; + UInt32 filters; + + assert(ctr); + + do { + // Is controller already enabled? If so, exit and return success. + + if ( _ctrEnabled ) + break; + + // Send the controller an enable command. + + if ( (ret = ctr->enable((IOService *) this)) != kIOReturnSuccess ) + break; // unable to enable the controller. + + enabled = true; + + // Disable all Wake-On-LAN filters. + + filters = GET_ACTIVE_FILTERS(gIOEthernetWakeOnLANFilterGroup); + + for (UInt i = 0; i < (sizeof(filters) * 8); i++) + { + if ((1 << i) & filters) + { + disableFilter(ctr, gIOEthernetWakeOnLANFilterGroup, + (1 << i)); + } + } + + // Restore current filter selection. + + SET_ACTIVE_FILTERS(gIONetworkFilterGroup, 0); + filters = GET_REQUIRED_FILTERS(gIONetworkFilterGroup); + + for (UInt i = 0; i < (sizeof(filters) * 8); i++) + { + if ((1 << i) & filters) + { + if ( (ret = enableFilter(ctr, gIONetworkFilterGroup, + (1 << i))) + != kIOReturnSuccess ) + break; + } + } + if ( ret != kIOReturnSuccess ) + break; + + // Restore multicast filter settings. + + syncSIOCADDMULTI(ctr); + + _ctrEnabled = true; + + } while (false); + + // Disable the controller if a serious error has occurred after the + // controller has been enabled. + + if ( enabled && (ret != kIOReturnSuccess) ) + { + ctr->disable((IOService *) this); + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Handles SIOCSIFFLAGS ioctl command for Ethernet interfaces. The network +// stack has changed the if_flags field in ifnet. Our job is to go +// through if_flags and see what has changed, and act accordingly. +// +// The fact that if_flags contains both generic and Ethernet specific bits +// means that we cannot move some of the default flag processing to the +// superclass. + +int IOEthernetInterface::syncSIOCSIFFLAGS(IONetworkController * ctr) +{ + UInt16 flags = getFlags(); + IOReturn ret = kIOReturnSuccess; + + if ( ( ((flags & IFF_UP) == 0) || _controllerLostPower ) && + ( flags & IFF_RUNNING ) ) + { + // If interface is marked down and it is currently running, + // then stop it. + + ctr->disable((IOService *) this); + flags &= ~IFF_RUNNING; + _ctrEnabled = false; + } + else if ( ( flags & IFF_UP ) && + ( _controllerLostPower == false ) && + ((flags & IFF_RUNNING) == 0) ) + { + // If interface is marked up and it is currently stopped, + // then start it. + + if ( (ret = enableController(ctr)) == kIOReturnSuccess ) + flags |= IFF_RUNNING; + } + + if ( flags & IFF_RUNNING ) + { + IOReturn rc; + + // We don't expect multiple flags to be changed for a given + // SIOCSIFFLAGS call. + + // Promiscuous mode + + rc = (flags & IFF_PROMISC) ? + enableFilter(ctr, gIONetworkFilterGroup, + kIOPacketFilterPromiscuous) : + disableFilter(ctr, gIONetworkFilterGroup, + kIOPacketFilterPromiscuous); + + if (ret == kIOReturnSuccess) ret = rc; + + // Multicast-All mode + + rc = (flags & IFF_ALLMULTI) ? + enableFilter(ctr, gIONetworkFilterGroup, + kIOPacketFilterMulticastAll) : + disableFilter(ctr, gIONetworkFilterGroup, + kIOPacketFilterMulticastAll); + + if (ret == kIOReturnSuccess) ret = rc; + } + + // Update the flags field to pick up any modifications. Also update the + // property table to reflect any flag changes. + + setFlags(flags, ~flags); + + return errnoFromReturn(ret); +} + +//--------------------------------------------------------------------------- +// Handles SIOCSIFADDR ioctl. + +SInt IOEthernetInterface::syncSIOCSIFADDR(IONetworkController * ctr) +{ + IOReturn ret = kIOReturnSuccess; + + // Interface is implicitly brought up by an SIOCSIFADDR ioctl. + + setFlags(IFF_UP); + + if ( (getFlags() & IFF_RUNNING) == 0 ) + { + if ( (ret = enableController(ctr)) == kIOReturnSuccess ) + setFlags(IFF_RUNNING); + } + + return errnoFromReturn(ret); +} + +//--------------------------------------------------------------------------- +// Handle SIOCADDMULTI ioctl command. + +SInt IOEthernetInterface::syncSIOCADDMULTI(IONetworkController * ctr) +{ + IOReturn ret; + + // Make sure multicast filter is active. + + ret = enableFilter(ctr, gIONetworkFilterGroup, kIOPacketFilterMulticast); + + if ( ret == kIOReturnSuccess ) + { + // Load multicast addresses only if the filter was activated. + + ret = setupMulticastFilter(ctr); + + // If the list is now empty, then deactivate the multicast filter. + + if ( _mcAddrCount == 0 ) + { + IOReturn dret = disableFilter(ctr, gIONetworkFilterGroup, + kIOPacketFilterMulticast); + + if (ret == kIOReturnSuccess) ret = dret; + } + } + + return errnoFromReturn(ret); +} + +//--------------------------------------------------------------------------- +// Handle SIOCDELMULTI ioctl command. + +SInt IOEthernetInterface::syncSIOCDELMULTI(IONetworkController * ctr) +{ + return syncSIOCADDMULTI(ctr); +} + +//--------------------------------------------------------------------------- +// Enable a packet filter. + +IOReturn +IOEthernetInterface::enableFilter(IONetworkController * ctr, + const OSSymbol * group, + UInt32 filter, + IOOptionBits options = 0) +{ + IOReturn ret; + UInt32 reqFilters; + UInt32 actFilters; + + // If the controller does not support the packet filter, + // there's no need to proceed. + + if (( GET_SUPPORTED_FILTERS(group) & filter ) == 0) + return kIOReturnUnsupported; + + do { + // Add specified filter to the set of required filters. + + reqFilters = GET_REQUIRED_FILTERS(group) | filter; + SET_REQUIRED_FILTERS(group, reqFilters); + + // Abort if no changes are needed. + + ret = kIOReturnSuccess; + + actFilters = GET_ACTIVE_FILTERS(group); + + if ( (( actFilters ^ reqFilters ) & filter) == 0 ) + break; + + // Send a command to the controller driver. + + ret = ctr->enablePacketFilter(group, filter, actFilters, options); + + if ( ret == kIOReturnSuccess ) + { + SET_ACTIVE_FILTERS(group, actFilters | filter); + } + } + while (false); + + return ret; +} + +//--------------------------------------------------------------------------- +// Disable a packet filter. + +IOReturn +IOEthernetInterface::disableFilter(IONetworkController * ctr, + const OSSymbol * group, + UInt32 filter, + IOOptionBits options = 0) +{ + IOReturn ret; + UInt32 reqFilters; + UInt32 actFilters; + + do { + // Remove specified filter from the set of required filters. + + reqFilters = GET_REQUIRED_FILTERS(group) & ~filter; + SET_REQUIRED_FILTERS(group, reqFilters); + + // Abort if no changes are needed. + + ret = kIOReturnSuccess; + + actFilters = GET_ACTIVE_FILTERS(group); + + if ( (( actFilters ^ reqFilters ) & filter) == 0 ) + break; + + // Send a command to the controller driver. + + ret = ctr->disablePacketFilter(group, filter, actFilters, options); + + if ( ret == kIOReturnSuccess ) + { + SET_ACTIVE_FILTERS(group, actFilters & ~filter); + } + } + while (false); + + return ret; +} + +//--------------------------------------------------------------------------- +// Cache the list of multicast addresses and send a command to the +// controller to update the multicast list. + +IOReturn +IOEthernetInterface::setupMulticastFilter(IONetworkController * ctr) +{ + void * multiAddrs = 0; + UInt mcount; + OSData * mcData = 0; + struct ifnet * ifp = (struct ifnet *) _arpcom; + struct ifmultiaddr * ifma; + IOReturn ret = kIOReturnSuccess; + bool ok; + + assert(ifp); + + // Update the multicast addresses count ivar. + + mcount = 0; + for (ifma = ifp->if_multiaddrs.lh_first; + ifma != NULL; + ifma = ifma->ifma_link.le_next) + { + if ((ifma->ifma_addr->sa_family == AF_UNSPEC) || + (ifma->ifma_addr->sa_family == AF_LINK)) + mcount++; + } + _mcAddrCount = mcount; + + if ( mcount ) + { + char * addrp; + + mcData = OSData::withCapacity(mcount * NUM_EN_ADDR_BYTES); + if (!mcData) + { + DLOG("%s: no memory for multicast address list\n", getName()); + return kIOReturnNoMemory; + } + + // Loop through the linked multicast structures and write the + // address to the OSData. + + for (ifma = ifp->if_multiaddrs.lh_first; + ifma != NULL; + ifma = ifma->ifma_link.le_next) + { + if (ifma->ifma_addr->sa_family == AF_UNSPEC) + addrp = &ifma->ifma_addr->sa_data[0]; + else + if (ifma->ifma_addr->sa_family == AF_LINK) + addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); + else + continue; + + ok = mcData->appendBytes((const void *) addrp, NUM_EN_ADDR_BYTES); + assert(ok); + } + + multiAddrs = (void *) mcData->getBytesNoCopy(); + assert(multiAddrs); + } + + // Issue a controller command to setup the multicast filter. + + ret = ((IOEthernetController *)ctr)->setMulticastList( + (IOEthernetAddress *) multiAddrs, + mcount); + if (mcData) + { + if (ret == kIOReturnSuccess) + setProperty(kIOMulticastAddressList, mcData); + + mcData->release(); + } + else { + removeProperty(kIOMulticastAddressList); + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Power management support. +// +// Handlers called, with the controller's gate closed, in response to a +// controller power state change. + +IOReturn +IOEthernetInterface::controllerWillChangePowerState( + IONetworkController * ctr, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ) +{ + if ( ( (flags & IOPMDeviceUsable ) == 0) && + ( _controllerLostPower == false ) ) + { + _controllerLostPower = true; + + // Enable Magic Packet if supported. + + if ( GET_SUPPORTED_FILTERS(gIOEthernetWakeOnLANFilterGroup) & + kIOEthernetWakeOnMagicPacket ) + { + enableFilter(ctr, gIOEthernetWakeOnLANFilterGroup, + kIOEthernetWakeOnMagicPacket); + } + + // Set _controllerLostPower, then call the SIOCSIFFLAGS handler to + // disable the controller, then mark the interface as Not Running. + + syncSIOCSIFFLAGS(ctr); + } + + return super::controllerWillChangePowerState( ctr, flags, + stateNumber, + policyMaker ); +} + +IOReturn +IOEthernetInterface::controllerDidChangePowerState( + IONetworkController * ctr, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ) +{ + IOReturn ret = super::controllerDidChangePowerState( ctr, flags, + stateNumber, + policyMaker ); + + if ( ( flags & IOPMDeviceUsable ) && ( _controllerLostPower == true ) ) + { + _controllerLostPower = false; + + // Clear _controllerLostPower, then call the SIOCSIFFLAGS handler to + // perhaps enable the controller, restore all Ethernet controller + // state, then mark the interface as Running. + + syncSIOCSIFFLAGS(ctr); + } + + return ret; +} diff --git a/iokit/Families/IONetworking/IOKernelDebugger.cpp b/iokit/Families/IONetworking/IOKernelDebugger.cpp new file mode 100644 index 000000000..daa450d8f --- /dev/null +++ b/iokit/Families/IONetworking/IOKernelDebugger.cpp @@ -0,0 +1,688 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOKernelDebugger.cpp + * + * HISTORY + */ + +#include +#include +#include +#include +#include +#include +#include + +//--------------------------------------------------------------------------- +// IOKDP + +#define kIOKDPEnableKDP "IOEnableKDP" +#define kIOKDPDriverMatch "IODriverMatch" +#define kIOKDPDriverNubMatch "IODriverNubMatch" + +class IOKDP : public IOService +{ + OSDeclareDefaultStructors( IOKDP ) + +public: + static void initialize(); + + virtual bool start( IOService * provider ); + + virtual void stop( IOService * provider ); + + virtual bool matchProvider( IOService * provider ); + + virtual bool matchServiceWithDictionary( IOService * service, + OSDictionary * match ); + + virtual IOReturn message( UInt32 type, + IOService * provider, + void * argument = 0 ); +}; + +//--------------------------------------------------------------------------- +// IOKDP defined globals. + +static IOLock * gIOKDPLock = 0; +static IOKDP * gIOKDP = 0; + +#define super IOService +OSDefineMetaClassAndStructorsWithInit( IOKDP, IOService, + IOKDP::initialize() ) + +//--------------------------------------------------------------------------- +// Match the provider with the matching dictionary in our property table. + +bool IOKDP::matchProvider(IOService * provider) +{ + IOService * driver = 0; + IOService * driverNub = 0; + OSBoolean * aBool; + + if ( provider ) driver = provider->getProvider(); + if ( driver ) driverNub = driver->getProvider(); + + if ( (driver == 0) || (driverNub == 0) ) + return false; + + if ( ( aBool = OSDynamicCast(OSBoolean, getProperty(kIOKDPEnableKDP)) ) && + ( aBool->isTrue() == false ) ) + return false; + + if ( matchServiceWithDictionary( driver, (OSDictionary *) + getProperty(kIOKDPDriverMatch)) ) + { + // IOLog("IOKDP: %s\n", kIOKDPDriverMatch); + return true; + } + + if ( matchServiceWithDictionary( driverNub, (OSDictionary *) + getProperty(kIOKDPDriverNubMatch)) ) + { + // IOLog("IOKDP: %s\n", kIOKDPDriverNubMatch); + return true; + } + + return false; +} + +//--------------------------------------------------------------------------- +// Match an IOService with a matching dictionary. + +bool IOKDP::matchServiceWithDictionary(IOService * service, + OSDictionary * match) +{ + OSCollectionIterator * matchIter; + OSCollectionIterator * arrayIter = 0; + OSCollection * array; + OSObject * objM; + OSObject * objP; + OSSymbol * sym; + bool isMatch = false; + + if ( ( OSDynamicCast(OSDictionary, match) == 0 ) || + ( match->getCount() == 0 ) || + ( (matchIter = OSCollectionIterator::withCollection(match)) == 0 ) ) + return false; + + while ( ( sym = OSDynamicCast(OSSymbol, matchIter->getNextObject()) ) ) + { + objM = match->getObject(sym); + objP = service->getProperty(sym); + + isMatch = false; + + if ( arrayIter ) + { + arrayIter->release(); + arrayIter = 0; + } + + if ( (array = OSDynamicCast( OSCollection, objM )) ) + { + arrayIter = OSCollectionIterator::withCollection( array ); + if ( arrayIter == 0 ) break; + } + + do { + if ( arrayIter && ((objM = arrayIter->getNextObject()) == 0) ) + break; + + if ( objM && objP && objM->isEqualTo(objP) ) + { + isMatch = true; + break; + } + } + while ( arrayIter ); + + if ( isMatch == false ) break; + } + + if ( arrayIter ) arrayIter->release(); + matchIter->release(); + + return isMatch; +} + +//--------------------------------------------------------------------------- +// IOKDP class initializer. + +void IOKDP::initialize() +{ + gIOKDPLock = IOLockAlloc(); + assert( gIOKDPLock ); +} + +//--------------------------------------------------------------------------- +// start/stop/message. + +bool IOKDP::start( IOService * provider ) +{ + bool ret = false; + + if ( super::start(provider) == false ) + return false; + + IOLockLock( gIOKDPLock ); + + do { + if ( gIOKDP ) + break; + + if ( matchProvider(provider) == false ) + break; + + if ( provider->open(this) == false ) + break; + + publishResource("kdp"); + + gIOKDP = this; + ret = true; + } + while ( false ); + + IOLockUnlock( gIOKDPLock ); + + return ret; +} + +void IOKDP::stop( IOService * provider ) +{ + provider->close(this); + + IOLockLock( gIOKDPLock ); + + if ( gIOKDP == this ) gIOKDP = 0; + + IOLockUnlock( gIOKDPLock ); + + super::stop(provider); +} + +IOReturn IOKDP::message( UInt32 type, + IOService * provider, + void * argument ) +{ + if ( type == kIOMessageServiceIsTerminated ) + { + provider->close(this); + } + return kIOReturnSuccess; +} + + +//--------------------------------------------------------------------------- +// IOKernelDebugger + +extern "C" { +// +// Defined in osfmk/kdp/kdp_en_debugger.h, but the header file is not +// exported, thus the definition is replicated here. +// +typedef void (*kdp_send_t)( void * pkt, UInt pkt_len ); +typedef void (*kdp_receive_t)( void * pkt, UInt * pkt_len, UInt timeout ); +void kdp_register_send_receive( kdp_send_t send, kdp_receive_t receive ); +} + +#undef super +#define super IOService +OSDefineMetaClassAndStructors( IOKernelDebugger, IOService ) +OSMetaClassDefineReservedUnused( IOKernelDebugger, 0); +OSMetaClassDefineReservedUnused( IOKernelDebugger, 1); +OSMetaClassDefineReservedUnused( IOKernelDebugger, 2); +OSMetaClassDefineReservedUnused( IOKernelDebugger, 3); + +// IOKernelDebugger global variables. +// +IOService * gIODebuggerDevice = 0; +IODebuggerTxHandler gIODebuggerTxHandler = 0; +IODebuggerRxHandler gIODebuggerRxHandler = 0; +UInt32 gIODebuggerTxBytes = 0; +UInt32 gIODebuggerRxBytes = 0; +SInt32 gIODebuggerSemaphore = 0; +UInt32 gIODebuggerFlag = 0; + +// Global debugger flags. +// +enum { + kIODebuggerFlagRegistered = 0x01, + kIODebuggerFlagWarnNullHandler = 0x02 +}; + +//--------------------------------------------------------------------------- +// The KDP receive dispatch function. Dispatches KDP receive requests to the +// registered receive handler. This function is registered with KDP via +// kdp_register_send_receive(). + +void IOKernelDebugger::kdpReceiveDispatcher( void * buffer, + UInt32 * length, + UInt32 timeout ) +{ + *length = 0; // return a zero length field by default. + + if ( gIODebuggerSemaphore ) return; // FIXME - Driver is busy! + + (*gIODebuggerRxHandler)( gIODebuggerDevice, buffer, length, timeout ); + + gIODebuggerRxBytes += *length; +} + +//--------------------------------------------------------------------------- +// The KDP transmit dispatch function. Dispatches KDP receive requests to the +// registered transmit handler. This function is registered with KDP via +// kdp_register_send_receive(). + +void IOKernelDebugger::kdpTransmitDispatcher( void * buffer, UInt32 length ) +{ + if ( gIODebuggerSemaphore ) return; // FIXME - Driver is busy! + + (*gIODebuggerTxHandler)( gIODebuggerDevice, buffer, length ); + + gIODebuggerTxBytes += length; +} + +//--------------------------------------------------------------------------- +// Null debugger handlers. + +void IOKernelDebugger::nullTxHandler( IOService * target, + void * buffer, + UInt32 length ) +{ +} + +void IOKernelDebugger::nullRxHandler( IOService * target, + void * buffer, + UInt32 * length, + UInt32 timeout ) +{ + if ( gIODebuggerFlag & kIODebuggerFlagWarnNullHandler ) + { + IOLog("IOKernelDebugger::%s no debugger device\n", __FUNCTION__); + gIODebuggerFlag &= ~kIODebuggerFlagWarnNullHandler; + } +} + +//--------------------------------------------------------------------------- +// Take the debugger lock conditionally. + +IODebuggerLockState IOKernelDebugger::lock( IOService * object ) +{ + if ( gIODebuggerDevice == object ) + { + OSIncrementAtomic( &gIODebuggerSemaphore ); + return kIODebuggerLockTaken; + } + return (IODebuggerLockState) 0; +} + +//--------------------------------------------------------------------------- +// Release the debugger lock if the kIODebuggerLockTaken flag is set. + +void IOKernelDebugger::unlock( IODebuggerLockState state ) +{ + if ( state & kIODebuggerLockTaken ) + OSDecrementAtomic( &gIODebuggerSemaphore ); +} + +//--------------------------------------------------------------------------- +// Initialize an IOKernelDebugger instance. + +bool IOKernelDebugger::init( IOService * target, + IODebuggerTxHandler txHandler, + IODebuggerRxHandler rxHandler ) +{ + if ( ( super::init() == false ) || + ( OSDynamicCast(IOService, target) == 0 ) || + ( txHandler == 0 ) || + ( rxHandler == 0 ) ) + { + return false; + } + + // Cache the target and handlers provided. + + _target = target; + _txHandler = txHandler; + _rxHandler = rxHandler; + + return true; +} + +//--------------------------------------------------------------------------- +// Factory method which performs allocation and initialization of an +// IOKernelDebugger instance. + +IOKernelDebugger * IOKernelDebugger::debugger( IOService * target, + IODebuggerTxHandler txHandler, + IODebuggerRxHandler rxHandler ) +{ + IOKernelDebugger * debugger = new IOKernelDebugger; + + if (debugger && (debugger->init( target, txHandler, rxHandler ) == false)) + { + debugger->release(); + debugger = 0; + } + + return debugger; +} + +//--------------------------------------------------------------------------- +// Register the debugger handlers. + +void IOKernelDebugger::registerHandler( IOService * target, + IODebuggerTxHandler txHandler, + IODebuggerRxHandler rxHandler ) +{ + bool doRegister; + + assert( ( target == gIODebuggerDevice ) || + ( target == 0 ) || + ( gIODebuggerDevice == 0 ) ); + + doRegister = ( target && ( txHandler != 0 ) && ( rxHandler != 0 ) ); + + if ( txHandler == 0 ) txHandler = &IOKernelDebugger::nullTxHandler; + if ( rxHandler == 0 ) rxHandler = &IOKernelDebugger::nullRxHandler; + + OSIncrementAtomic( &gIODebuggerSemaphore ); + + gIODebuggerDevice = target; + gIODebuggerTxHandler = txHandler; + gIODebuggerRxHandler = rxHandler; + gIODebuggerFlag |= kIODebuggerFlagWarnNullHandler; + + OSDecrementAtomic( &gIODebuggerSemaphore ); + + if ( doRegister && (( gIODebuggerFlag & kIODebuggerFlagRegistered ) == 0) ) + { + // Register dispatch function, these in turn will call the + // handlers when the debugger is active. + // + // Note: The following call may trigger an immediate break + // to the debugger. + + kdp_register_send_receive( (kdp_send_t) kdpTransmitDispatcher, + (kdp_receive_t) kdpReceiveDispatcher ); + + // Limit ourself to a single real KDP registration. + + gIODebuggerFlag |= kIODebuggerFlagRegistered; + } +} + +//--------------------------------------------------------------------------- +// Called by open() with the arbitration lock held. + +bool IOKernelDebugger::handleOpen( IOService * forClient, + IOOptionBits options, + void * arg ) +{ + IONetworkController * ctr = OSDynamicCast(IONetworkController, _target); + bool ret = false; + + do { + // Only a single client at a time. + + if ( _client ) break; + + // Register the target to prime the lock()/unlock() functionality + // before opening the target. + + registerHandler( _target ); + + // While the target is opened/enabled, it must block any thread + // which may acquire the debugger lock in its execution path. + + if ( _target->open( this ) == false ) + break; + + // Register interest in receiving notifications about controller + // power state changes. + // + // We are making an assumption that the controller is 'usable' and + // the next notification will inform this object that the controller + // has become unusable, there is no support for cases when the + // controller is already in an 'unusable' state. + + _pmDisabled = false; + + if ( ctr ) + { + // Register to receive PM notifications for controller power + // state changes. + + ctr->registerInterestedDriver( this ); + + if ( ctr->doEnable( this ) != kIOReturnSuccess ) + { + ctr->deRegisterInterestedDriver( this ); + break; + } + } + + // After the target has been opened, complete the registration. + + IOLog("%s: Debugger attached\n", getName()); + registerHandler( _target, _txHandler, _rxHandler ); + + // Remember the client. + + _client = forClient; + + ret = true; + } + while (0); + + if ( ret == false ) + { + registerHandler( 0 ); + _target->close( this ); + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Called by IOService::close() with the arbitration lock held. + +void IOKernelDebugger::handleClose( IOService * forClient, + IOOptionBits options ) +{ + IONetworkController * ctr = OSDynamicCast(IONetworkController, _target); + + if ( _client && ( _client == forClient ) ) + { + // There is no KDP un-registration. The best we can do is to + // register dummy handlers. + + registerHandler( 0 ); + + if ( ctr ) + { + // Disable controller if it is not already disabled. + + if ( _pmDisabled == false ) + { + ctr->doDisable( this ); + } + + // Before closing the controller, remove interest in receiving + // notifications about controller power state changes. + + ctr->deRegisterInterestedDriver( this ); + } + + _client = 0; + + _target->close( this ); + } +} + +//--------------------------------------------------------------------------- +// Called by IOService::isOpen() with the arbitration lock held. + +bool IOKernelDebugger::handleIsOpen( const IOService * forClient ) const +{ + if ( forClient == 0 ) + return ( forClient != _client ); + else + return ( forClient == _client ); +} + +//--------------------------------------------------------------------------- +// Free the IOKernelDebugger object. + +void IOKernelDebugger::free() +{ + // IOLog("IOKernelDebugger::%s %p\n", __FUNCTION__, this); + super::free(); +} + +#define PM_SECS(x) ((x) * 1000 * 1000) + +//--------------------------------------------------------------------------- +// Handle controller's power state change notitifications. + +IOReturn +IOKernelDebugger::powerStateWillChangeTo( IOPMPowerFlags flags, + unsigned long stateNumber, + IOService * policyMaker ) +{ + IOReturn ret = IOPMAckImplied; + + if ( ( flags & IOPMDeviceUsable ) == 0 ) + { + // Controller is about to transition to an un-usable state. + // The debugger nub should be disabled. + + this->retain(); + + thread_call_func( (thread_call_func_t) pmDisableDebugger, + this, /* parameter */ + FALSE ); /* disable unique call filter */ + + ret = PM_SECS(3); /* Must ACK within 3 seconds */ + } + + return ret; +} + +IOReturn +IOKernelDebugger::powerStateDidChangeTo( IOPMPowerFlags flags, + unsigned long stateNumber, + IOService * policyMaker ) +{ + IOReturn ret = IOPMAckImplied; + + if ( flags & IOPMDeviceUsable ) + { + // Controller has transitioned to an usable state. + // The debugger nub should be enabled if necessary. + + this->retain(); + + thread_call_func( (thread_call_func_t) pmEnableDebugger, + this, /* parameter */ + FALSE ); /* disable unique call filter */ + + ret = PM_SECS(3); /* Must ACK within 3 seconds */ + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Static member function: Enable the debugger nub after the controller +// transitions into an usable state. + +void IOKernelDebugger::pmEnableDebugger( IOKernelDebugger * debugger ) +{ + IONetworkController * ctr; + assert( debugger ); + + ctr = OSDynamicCast( IONetworkController, debugger->_target ); + + debugger->lockForArbitration(); + + if ( debugger->_client && ( debugger->_pmDisabled == true ) ) + { + if ( ctr && ( ctr->doEnable( debugger ) != kIOReturnSuccess ) ) + { + // This is bad, unable to re-enable the controller after sleep. + IOLog("IOKernelDebugger: Unable to re-enable controller\n"); + } + else + { + registerHandler( debugger->_target, debugger->_txHandler, + debugger->_rxHandler ); + + debugger->_pmDisabled = false; + } + } + + debugger->unlockForArbitration(); + + // Ack the power state change. + debugger->_target->acknowledgePowerChange( debugger ); + + debugger->release(); +} + +//--------------------------------------------------------------------------- +// Static member function: Disable the debugger nub before the controller +// transitions into an unusable state. + +void IOKernelDebugger::pmDisableDebugger( IOKernelDebugger * debugger ) +{ + IONetworkController * ctr; + assert( debugger ); + + ctr = OSDynamicCast( IONetworkController, debugger->_target ); + + debugger->lockForArbitration(); + + if ( debugger->_client && ( debugger->_pmDisabled == false ) ) + { + // Keep an open on the controller, but inhibit access to the + // controller's debugger handlers, and disable controller's + // hardware support for the debugger. + + registerHandler( 0 ); + if ( ctr ) ctr->doDisable( debugger ); + + debugger->_pmDisabled = true; + } + + debugger->unlockForArbitration(); + + // Ack the power state change. + debugger->_target->acknowledgePowerChange( debugger ); + + debugger->release(); +} diff --git a/iokit/Families/IONetworking/IOMbufMemoryCursor.cpp b/iokit/Families/IONetworking/IOMbufMemoryCursor.cpp new file mode 100644 index 000000000..21360f1b2 --- /dev/null +++ b/iokit/Families/IONetworking/IOMbufMemoryCursor.cpp @@ -0,0 +1,576 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOMbufMemoryCursor.cpp created by gvdl on 1999-1-20 */ + +#include + +__BEGIN_DECLS +#include + +#include +#include +#include +__END_DECLS + +#include +#include + +#ifndef MIN +#define MIN(a,b) (((a)<(b))?(a):(b)) +#endif /* MIN */ + +#define next_page(x) trunc_page(x + PAGE_SIZE) + +/* Define the meta class stuff for the entire file here */ +OSDefineMetaClassAndAbstractStructors(IOMbufMemoryCursor, IOMemoryCursor) +OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 0); +OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 1); +OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 2); +OSMetaClassDefineReservedUnused( IOMbufMemoryCursor, 3); + +OSDefineMetaClassAndStructors(IOMbufNaturalMemoryCursor, IOMbufMemoryCursor) +OSDefineMetaClassAndStructors(IOMbufBigMemoryCursor, IOMbufMemoryCursor) +OSDefineMetaClassAndStructors(IOMbufLittleMemoryCursor, IOMbufMemoryCursor) + +#ifdef __ppc__ +OSDefineMetaClassAndStructors(IOMbufDBDMAMemoryCursor, IOMbufMemoryCursor) +#endif /* __ppc__ */ + +/*********************** class IOMbufMemoryCursor ***********************/ +#define super IOMemoryCursor + +bool IOMbufMemoryCursor::initWithSpecification(OutputSegmentFunc outSeg, + UInt32 maxSegmentSize, + UInt32 maxTransferSize, + UInt32 align) +{ + return false; +} + +bool IOMbufMemoryCursor::initWithSpecification(OutputSegmentFunc inOutSeg, + UInt32 inMaxSegmentSize, + UInt32 inMaxNumSegments) +{ + if (!super::initWithSpecification(inOutSeg, inMaxSegmentSize, 0, 1)) + return false; + +#if 0 + // It is too confusing to force the max segment size to be at least + // as large as a page. Most Enet devices only have 11-12 bit fields, + // enough for a full size frame, and also the PAGE_SIZE parameter + // may be architecture dependent. + + assert(inMaxSegmentSize >= PAGE_SIZE); + if (inMaxSegmentSize < PAGE_SIZE) + return false; +#else + if (!inMaxSegmentSize) + return false; +#endif + + maxSegmentSize = MIN(maxSegmentSize, PAGE_SIZE); + maxNumSegments = inMaxNumSegments; + coalesceCount = 0; + + return true; +} + +// +// Copy the src packet into the destination packet. The amount to copy is +// determined by the dstm->m_len, which is setup by analyseSegments, see below. +// The source mbuf is not freed nor modified. +// +#define BCOPY(s, d, l) do { bcopy((void *) s, (void *) d, l); } while(0) + +static inline void coalesceSegments(struct mbuf *srcm, struct mbuf *dstm) +{ + vm_offset_t src, dst; + SInt32 srcLen, dstLen; + struct mbuf *temp; + + srcLen = srcm->m_len; + src = mtod(srcm, vm_offset_t); + + dstLen = dstm->m_len; + dst = mtod(dstm, vm_offset_t); + + for (;;) { + if (srcLen < dstLen) { + + // Copy remainder of src mbuf to current dst. + BCOPY(src, dst, srcLen); + dst += srcLen; + dstLen -= srcLen; + + // Move on to the next source mbuf. + temp = srcm->m_next; assert(temp); + srcm = temp; + + srcLen = srcm->m_len; + src = mtod(srcm, vm_offset_t); + } + else if (srcLen > dstLen) { + + // Copy some of src mbuf to remaining space in dst mbuf. + BCOPY(src, dst, dstLen); + src += dstLen; + srcLen -= dstLen; + + // Move on to the next destination mbuf. + temp = dstm->m_next; assert(temp); + dstm = temp; + + dstLen = dstm->m_len; + dst = mtod(dstm, vm_offset_t); + } + else { /* (srcLen == dstLen) */ + + // copy remainder of src into remaining space of current dst + BCOPY(src, dst, srcLen); + + // Free current mbuf and move the current onto the next + srcm = srcm->m_next; + + // Do we have any data left to copy? + if (!dstm->m_next) + break; + dstm = dstm->m_next; + + assert(srcm); + dstLen = dstm->m_len; + dst = mtod(dstm, vm_offset_t); + srcLen = srcm->m_len; + src = mtod(srcm, vm_offset_t); + } + } +} + +static const UInt32 kMBufDataCacheSize = 16; + +static inline bool analyseSegments( + struct mbuf *packet, /* input packet mbuf */ + const UInt32 mbufsInCache, /* number of entries in segsPerMBuf[] */ + const UInt32 segsPerMBuf[], /* segments required per mbuf */ + SInt32 numSegs, /* total number of segments */ + const UInt32 maxSegs) /* max controller segments per mbuf */ +{ + struct mbuf *newPacket; // output mbuf chain. + struct mbuf *out; // current output mbuf link. + SInt32 outSize; // size of current output mbuf link. + SInt32 outSegs; // segments for current output mbuf link. + SInt32 doneSegs; // segments for output mbuf chain. + SInt32 outLen; // remaining length of input buffer. + + struct mbuf *in = packet; // save the original input packet pointer. + UInt32 inIndex = 0; + + // Allocate a mbuf (non header mbuf) to begin the output mbuf chain. + // + MGET(newPacket, M_DONTWAIT, MT_DATA); + if (!newPacket) { + IOLog("analyseSegments: MGET() 1 error\n"); + return false; + } + + /* Initialise outgoing packet controls */ + out = newPacket; + outSize = MLEN; + doneSegs = outSegs = outLen = 0; + + // numSegs stores the delta between the total and the max. For each + // input mbuf consumed, we decrement numSegs. + // + numSegs -= maxSegs; + + // Loop through the input packet mbuf 'in' and construct a new mbuf chain + // large enough to make (numSegs + doneSegs + outSegs) less than or + // equal to zero. + // + do { + vm_offset_t vmo; + + outLen += in->m_len; + + while (outLen > outSize) { + // Oh dear the current outgoing length is too big. + if (outSize != MCLBYTES) { + // Current mbuf is not yet a cluster so promote, then + // check for error. + + MCLGET(out, M_DONTWAIT); + if ( !(out->m_flags & M_EXT) ) { + IOLog("analyseSegments: MCLGET() error\n"); + goto bombAnalysis; + } + + outSize = MCLBYTES; + + continue; + } + + vmo = mtod(out, vm_offset_t); + out->m_len = MCLBYTES; /* Fill in target copy size */ + doneSegs += (round_page(vmo + MCLBYTES) - trunc_page(vmo)) + / PAGE_SIZE; + + // If the number of segments of the output chain, plus + // the segment for the mbuf we are about to allocate is greater + // than maxSegs, then abort. + // + if (doneSegs + 1 > (int) maxSegs) { + IOLog("analyseSegments: maxSegs limit 1 reached! %ld %ld\n", + doneSegs, maxSegs); + goto bombAnalysis; + } + + MGET(out->m_next, M_DONTWAIT, MT_DATA); + if (!out->m_next) { + IOLog("analyseSegments: MGET() error\n"); + goto bombAnalysis; + } + + out = out->m_next; + outSize = MLEN; + outLen -= MCLBYTES; + } + + // Compute number of segment in current outgoing mbuf. + vmo = mtod(out, vm_offset_t); + outSegs = (round_page(vmo + outLen) - trunc_page(vmo)) / PAGE_SIZE; + if (doneSegs + outSegs > (int) maxSegs) { + IOLog("analyseSegments: maxSegs limit 2 reached! %ld %ld %ld\n", + doneSegs, outSegs, maxSegs); + goto bombAnalysis; + } + + // Get the number of segments in the current inbuf + if (inIndex < mbufsInCache) + numSegs -= segsPerMBuf[inIndex]; // Yeah, in cache + else { + // Hmm, we have to recompute from scratch. Copy code from genPhys. + int thisLen = 0, mbufLen; + + vmo = mtod(in, vm_offset_t); + for (mbufLen = in->m_len; mbufLen; mbufLen -= thisLen) { + thisLen = MIN(next_page(vmo), vmo + mbufLen) - vmo; + vmo += thisLen; + numSegs--; + } + } + + // Walk the incoming buffer on one. + in = in->m_next; + inIndex++; + + // continue looping until the total number of segments has dropped + // to an acceptable level, or if we ran out of mbuf links. + + } while (in && ((numSegs + doneSegs + outSegs) > 0)); + + if ( (int) (numSegs + doneSegs + outSegs) <= 0) { // success + + out->m_len = outLen; // Set last mbuf with the remaining length. + + // The amount to copy is determine by the segment length in each + // mbuf linked to newPacket. The sum can be smaller than + // packet->pkthdr.len; + // + coalesceSegments(packet, newPacket); + + // Copy complete. + + // If 'in' is non zero, then it means that we only need to copy part + // of the input packet, beginning at the start. The mbuf chain + // beginning at 'in' must be preserved and linked to the new + // output packet chain. Everything before 'in', except for the + // header mbuf can be freed. + // + struct mbuf *m = packet->m_next; + while (m != in) + m = m_free(m); + + // The initial header mbuf is preserved, its length set to zero, and + // linked to the new packet chain. + + packet->m_len = 0; + packet->m_next = newPacket; + newPacket->m_next = in; + + return true; + } + +bombAnalysis: + + m_freem(newPacket); + return false; +} + +UInt32 IOMbufMemoryCursor::genPhysicalSegments(struct mbuf *packet, void *vector, + UInt32 maxSegs, bool doCoalesce) +{ + bool doneCoalesce = false; + + if (!packet || !(packet->m_flags & M_PKTHDR)) + return 0; + + if (!maxSegs) + maxSegs = maxNumSegments; + if (!maxSegs) + return 0; + + if ( packet->m_next == 0 ) + { + vm_offset_t src; + struct IOPhysicalSegment physSeg; + /* + * the packet consists of only 1 mbuf + * so if the data buffer doesn't span a page boundary + * we can take the simple way out + */ + src = mtod(packet, vm_offset_t); + + if ( trunc_page(src) == trunc_page(src+packet->m_len-1) ) + { + if ((physSeg.location = + (IOPhysicalAddress)mcl_to_paddr((char *)src)) == 0) + physSeg.location = (IOPhysicalAddress)pmap_extract(kernel_pmap, src); + if (!physSeg.location) + return 0; + physSeg.length = packet->m_len; + (*outSeg)(physSeg, vector, 0); + + return 1; + } + } + + if ( doCoalesce == true && maxSegs == 1 ) + { + vm_offset_t src; + vm_offset_t dst; + struct mbuf *m; + struct mbuf *mnext; + struct mbuf *out; + UInt32 len = 0; + struct IOPhysicalSegment physSeg; + + m = packet; + + MGET(out, M_DONTWAIT, MT_DATA); + if ( out == 0 ) return 0; + + MCLGET(out, M_DONTWAIT); + if ( !(out->m_flags & M_EXT) ) + { + m_free( out ); + return 0; + } + dst = mtod(out, vm_offset_t); + + do + { + src = mtod(m, vm_offset_t); + BCOPY( src, dst, m->m_len ); + dst += m->m_len; + len += m->m_len; + } while ( (m = m->m_next) != 0 ); + + out->m_len = len; + + dst = mtod(out, vm_offset_t); + if ((physSeg.location = (IOPhysicalAddress)mcl_to_paddr((char *)dst)) == 0) + physSeg.location = (IOPhysicalAddress)pmap_extract(kernel_pmap, dst); + if (!physSeg.location) + return 0; + physSeg.length = out->m_len; + (*outSeg)(physSeg, vector, 0); + + m = packet->m_next; + while (m != 0) + { + mnext = m->m_next; + m_free(m); + m = mnext; + } + + // The initial header mbuf is preserved, its length set to zero, and + // linked to the new packet chain. + + packet->m_len = 0; + packet->m_next = out; + out->m_next = 0; + + return 1; + } + + + // + // Iterate over the mbuf, translating segments were allowed. When we + // are not allowed to translate segments then accumulate segment + // statistics up to kMBufDataCacheSize of mbufs. Finally + // if we overflow our cache just count how many segments this + // packet represents. + // + UInt32 segsPerMBuf[kMBufDataCacheSize]; + +tryAgain: + UInt32 curMBufIndex = 0; + UInt32 curSegIndex = 0; + UInt32 lastSegCount = 0; + struct mbuf *m = packet; + + // For each mbuf in incoming packet. + do { + vm_size_t mbufLen, thisLen = 0; + vm_offset_t src; + + // Step through each segment in the current mbuf + for (mbufLen = m->m_len, src = mtod(m, vm_offset_t); + mbufLen; + src += thisLen, mbufLen -= thisLen) + { + // If maxSegmentSize is atleast PAGE_SIZE, then + // thisLen = MIN(next_page(src), src + mbufLen) - src; + + thisLen = MIN(mbufLen, maxSegmentSize); + thisLen = MIN(next_page(src), src + thisLen) - src; + + // If room left then find the current segment addr and output + if (curSegIndex < maxSegs) { + struct IOPhysicalSegment physSeg; + + if ((physSeg.location = + (IOPhysicalAddress)mcl_to_paddr((char *)src)) == 0) + physSeg.location = (IOPhysicalAddress)pmap_extract(kernel_pmap, src); + if (!physSeg.location) + return 0; + physSeg.length = thisLen; + (*outSeg)(physSeg, vector, curSegIndex); + } + // Count segments if we are coalescing. + curSegIndex++; + } + + // Cache the segment count data if room is available. + if (curMBufIndex < kMBufDataCacheSize) { + segsPerMBuf[curMBufIndex] = curSegIndex - lastSegCount; + lastSegCount = curSegIndex; + } + + // Move on to next imcoming mbuf + curMBufIndex++; + m = m->m_next; + } while (m); + + // If we finished cleanly return number of segments found + if (curSegIndex <= maxSegs) + return curSegIndex; + if (!doCoalesce) + return 0; // if !coalescing we've got a problem. + + + // If we are coalescing and it is possible then attempt coalesce, + if (!doneCoalesce + && (UInt) packet->m_pkthdr.len <= maxSegs * maxSegmentSize) { + // Hmm, we have to do some coalescing. + bool analysisRet; + + analysisRet = analyseSegments(packet, + MIN(curMBufIndex, kMBufDataCacheSize), + segsPerMBuf, + curSegIndex, maxSegs); + if (analysisRet) { + doneCoalesce = true; + coalesceCount++; + goto tryAgain; + } + } + + assert(!doneCoalesce); // Problem in Coalesce code. + packetTooBigErrors++; + return 0; +} + +/********************* class IOMbufBigMemoryCursor **********************/ +IOMbufBigMemoryCursor * +IOMbufBigMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) +{ + IOMbufBigMemoryCursor *me = new IOMbufBigMemoryCursor; + + if (me && !me->initWithSpecification(&bigOutputSegment, + maxSegSize, maxNumSegs)) { + me->release(); + return 0; + } + + return me; +} + + +/******************* class IOMbufNaturalMemoryCursor ********************/ +IOMbufNaturalMemoryCursor * +IOMbufNaturalMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) +{ + IOMbufNaturalMemoryCursor *me = new IOMbufNaturalMemoryCursor; + + if (me && !me->initWithSpecification(&naturalOutputSegment, + maxSegSize, maxNumSegs)) { + me->release(); + return 0; + } + + return me; +} + + +/******************** class IOMbufLittleMemoryCursor ********************/ +IOMbufLittleMemoryCursor * +IOMbufLittleMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) +{ + IOMbufLittleMemoryCursor *me = new IOMbufLittleMemoryCursor; + + if (me && !me->initWithSpecification(&littleOutputSegment, + maxSegSize, maxNumSegs)) { + me->release(); + return 0; + } + + return me; +} + + +/******************** class IOMbufDBDMAMemoryCursor *********************/ +#ifdef __ppc__ + +IOMbufDBDMAMemoryCursor * +IOMbufDBDMAMemoryCursor::withSpecification(UInt32 maxSegSize, UInt32 maxNumSegs) +{ + IOMbufDBDMAMemoryCursor *me = new IOMbufDBDMAMemoryCursor; + + if (me && !me->initWithSpecification(&dbdmaOutputSegment, + maxSegSize, maxNumSegs)) { + me->release(); + return 0; + } + + return me; +} +#endif /* __ppc__ */ diff --git a/iokit/Families/IONetworking/IOMbufQueue.h b/iokit/Families/IONetworking/IOMbufQueue.h new file mode 100644 index 000000000..b5937ab36 --- /dev/null +++ b/iokit/Families/IONetworking/IOMbufQueue.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOMBUFQUEUE_H +#define _IOMBUFQUEUE_H + +extern "C" { +#include +#include +} + +struct IOMbufQueue { + struct mbuf * head; + struct mbuf * tail; + UInt32 size; + UInt32 capacity; +}; + +static __inline__ +UInt32 IOMbufFree(struct mbuf * m) +{ +/*LD#### + UInt32 count = 0; + struct mbuf * mn; + + while (( mn = m )) + { + m = mn->m_nextpkt; + mn->m_nextpkt = 0; + m_freem(mn); + count++; + } + return count; +*/ + return m_freem_list(m); +} + +static __inline__ +void IOMbufQueueInit(IOMbufQueue * q, UInt32 capacity) +{ + q->head = q->tail = 0; + q->size = 0; + q->capacity = capacity; +} + +static __inline__ +bool IOMbufQueueEnqueue(IOMbufQueue * q, struct mbuf * m) +{ + if (q->size >= q->capacity) return false; + + if (q->size++ > 0) + q->tail->m_nextpkt = m; + else + q->head = m; + + for (q->tail = m; + q->tail->m_nextpkt; + q->tail = q->tail->m_nextpkt, q->size++) + ; + + return true; +} + +static __inline__ +bool IOMbufQueueEnqueue(IOMbufQueue * q, IOMbufQueue * qe) +{ + if (qe->size) + { + if (q->size == 0) + q->head = qe->head; + else + q->tail->m_nextpkt = qe->head; + q->tail = qe->tail; + q->size += qe->size; + + qe->head = qe->tail = 0; + qe->size = 0; + } + return true; +} + +static __inline__ +void IOMbufQueuePrepend(IOMbufQueue * q, struct mbuf * m) +{ + struct mbuf * tail; + + for (tail = m, q->size++; + tail->m_nextpkt; + tail = tail->m_nextpkt, q->size++) + ; + + tail->m_nextpkt = q->head; + if (q->tail == 0) + q->tail = tail; + q->head = m; +} + +static __inline__ +void IOMbufQueuePrepend(IOMbufQueue * q, IOMbufQueue * qp) +{ + if (qp->size) + { + qp->tail->m_nextpkt = q->head; + if (q->tail == 0) + q->tail = qp->tail; + q->head = qp->head; + q->size += qp->size; + + qp->head = qp->tail = 0; + qp->size = 0; + } +} + +static __inline__ +struct mbuf * IOMbufQueueDequeue(IOMbufQueue * q) +{ + struct mbuf * m = q->head; + if (m) + { + if ((q->head = m->m_nextpkt) == 0) + q->tail = 0; + m->m_nextpkt = 0; + q->size--; + } + return m; +} + +static __inline__ +struct mbuf * IOMbufQueueDequeueAll(IOMbufQueue * q) +{ + struct mbuf * m = q->head; + q->head = q->tail = 0; + q->size = 0; + return m; +} + +static __inline__ +struct mbuf * IOMbufQueuePeek(IOMbufQueue * q) +{ + return q->head; +} + +static __inline__ +UInt32 IOMbufQueueGetSize(IOMbufQueue * q) +{ + return q->size; +} + +static __inline__ +UInt32 IOMbufQueueGetCapacity(IOMbufQueue * q) +{ + return q->capacity; +} + +static __inline__ +void IOMbufQueueSetCapacity(IOMbufQueue * q, UInt32 capacity) +{ + q->capacity = capacity; +} + +#endif /* !_IOMBUFQUEUE_H */ diff --git a/iokit/Families/IONetworking/IONetworkController.cpp b/iokit/Families/IONetworking/IONetworkController.cpp new file mode 100644 index 000000000..2b44e40f9 --- /dev/null +++ b/iokit/Families/IONetworking/IONetworkController.cpp @@ -0,0 +1,2159 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkController.cpp + * + * HISTORY + * 9-Dec-1998 Joe Liu (jliu) created. + * + */ + +#include +#include +#include +#include +#include + +// IONetworkController (and its subclasses) needs to know about mbufs, +// but it shall have no further dependencies on BSD networking. +// +extern "C" { +#include // mbuf limits defined here. +#include +#include +// +// osfmk/kern/spl.h - Need splimp for mbuf macros. +// +typedef unsigned spl_t; +extern spl_t (splimp)(void); +} + +//------------------------------------------------------------------------- +// Macros. + +#define super IOService + +OSDefineMetaClassAndAbstractStructors( IONetworkController, IOService ) +OSMetaClassDefineReservedUnused( IONetworkController, 0); +OSMetaClassDefineReservedUnused( IONetworkController, 1); +OSMetaClassDefineReservedUnused( IONetworkController, 2); +OSMetaClassDefineReservedUnused( IONetworkController, 3); +OSMetaClassDefineReservedUnused( IONetworkController, 4); +OSMetaClassDefineReservedUnused( IONetworkController, 5); +OSMetaClassDefineReservedUnused( IONetworkController, 6); +OSMetaClassDefineReservedUnused( IONetworkController, 7); +OSMetaClassDefineReservedUnused( IONetworkController, 8); +OSMetaClassDefineReservedUnused( IONetworkController, 9); +OSMetaClassDefineReservedUnused( IONetworkController, 10); +OSMetaClassDefineReservedUnused( IONetworkController, 11); +OSMetaClassDefineReservedUnused( IONetworkController, 12); +OSMetaClassDefineReservedUnused( IONetworkController, 13); +OSMetaClassDefineReservedUnused( IONetworkController, 14); +OSMetaClassDefineReservedUnused( IONetworkController, 15); +OSMetaClassDefineReservedUnused( IONetworkController, 16); +OSMetaClassDefineReservedUnused( IONetworkController, 17); +OSMetaClassDefineReservedUnused( IONetworkController, 18); +OSMetaClassDefineReservedUnused( IONetworkController, 19); +OSMetaClassDefineReservedUnused( IONetworkController, 20); +OSMetaClassDefineReservedUnused( IONetworkController, 21); +OSMetaClassDefineReservedUnused( IONetworkController, 22); +OSMetaClassDefineReservedUnused( IONetworkController, 23); +OSMetaClassDefineReservedUnused( IONetworkController, 24); +OSMetaClassDefineReservedUnused( IONetworkController, 25); +OSMetaClassDefineReservedUnused( IONetworkController, 26); +OSMetaClassDefineReservedUnused( IONetworkController, 27); +OSMetaClassDefineReservedUnused( IONetworkController, 28); +OSMetaClassDefineReservedUnused( IONetworkController, 29); +OSMetaClassDefineReservedUnused( IONetworkController, 30); +OSMetaClassDefineReservedUnused( IONetworkController, 31); + +static bool isPowerOfTwo(UInt32 num) +{ + return (num == (num & ~(num - 1))); +} + +#define MEDIUM_LOCK IOTakeLock(_mediumLock); +#define MEDIUM_UNLOCK IOUnlock(_mediumLock); + +#ifdef DEBUG +#define DLOG(fmt, args...) IOLog(fmt, ## args) +#else +#define DLOG(fmt, args...) +#endif + +// OSSymbols for frequently used keys. +// +static const OSSymbol * gIOActiveMediumKey; +static const OSSymbol * gIOCurrentMediumKey; +static const OSSymbol * gIODefaultMediumKey; +static const OSSymbol * gIONullMediumName; +static const OSSymbol * gIOLinkDataKey; +static const OSData * gIONullLinkData; + +// Global symbols. +// +const OSSymbol * gIONetworkFilterGroup; +const OSSymbol * gIOEthernetWakeOnLANFilterGroup; + +// Constants for handleCommand(). +// +enum { + kCommandEnable = 1, + kCommandDisable = 2, + kCommandPrepare = 3 +}; + +//--------------------------------------------------------------------------- +// IONetworkController class initializer. Create often used OSSymbol objects +// that are used as keys. This method is called explicitly by a line in +// IOStartIOKit.cpp and not by the OSDefineMetaClassAndInit() mechanism, to +// ensure that this method is called after the OSSymbol class is initialized. + +void IONetworkController::initialize() +{ + gIOActiveMediumKey = OSSymbol::withCStringNoCopy(kIOActiveMedium); + gIOCurrentMediumKey = OSSymbol::withCStringNoCopy(kIOSelectedMedium); + gIODefaultMediumKey = OSSymbol::withCStringNoCopy(kIODefaultMedium); + gIONullMediumName = OSSymbol::withCStringNoCopy(""); + gIOLinkDataKey = OSSymbol::withCStringNoCopy(kIOLinkData); + gIONullLinkData = OSData::withCapacity(0); + gIONetworkFilterGroup + = OSSymbol::withCStringNoCopy(kIONetworkFilterGroup); + + gIOEthernetWakeOnLANFilterGroup + = OSSymbol::withCStringNoCopy("IOEthernetWakeOnLANFilterGroup"); + + assert( gIOEthernetWakeOnLANFilterGroup ); + + assert(gIOActiveMediumKey && + gIOCurrentMediumKey && + gIODefaultMediumKey && + gIONullMediumName && + gIOLinkDataKey && + gIONullLinkData && + gIONetworkFilterGroup); + + IONetworkData::initialize(); +} + +//--------------------------------------------------------------------------- +// Initialize the IONetworkController instance. Instance variables are +// set to their default values, then super::init() is called. +// +// properties: A dictionary object containing a property table +// associated with this instance. +// +// Returns true on success, false otherwise. + +bool IONetworkController::init(OSDictionary * properties) +{ + // Initialize instance variables. + // + _workLoop = 0; + _cmdGate = 0; + _outputQueue = 0; + _clientSet = 0; + _clientSetIter = 0; + _cmdClient = 0; + _propertiesPublished = false; + _mediumLock = 0; + _lastLinkData = gIONullLinkData; + _lastActiveMediumName = gIONullMediumName; + _lastCurrentMediumName = gIONullMediumName; + + if (super::init(properties) == false) + { + DLOG("IONetworkController: super::init() failed\n"); + return false; + } + + return true; +} + +//------------------------------------------------------------------------- +// Called after the controller driver was successfully matched to a provider, +// to start running. IONetworkController will allocate resources and gather +// controller properties. No I/O will be performed until the subclass +// attaches a client object from its start() method. Subclasses must override +// this method and call super::start() at the beginning of its implementation. +// Then check the return value to make sure the superclass was started +// successfully before continuing. The resources allocated by +// IONetworkController include: +// +// - An IOCommandGate object to handle client commands. +// - An OSSet to track our clients. +// - An optional IOOutputQueue object for output queueing. +// +// Tasks that are usually performed by a typical network driver in start +// include: +// +// - Resource allocation +// - Hardware initialization +// - Allocation of IOEventSources and attaching them to an IOWorkLoop object. +// - Publishing a medium dictionary. +// - And finally, attaching an interface object after the driver is ready +// to handle client requests. +// +// provider: The provider that the controller was matched +// (and attached) to. +// +// Returns true on success, false otherwise. + +bool IONetworkController::start(IOService * provider) +{ + // Most drivers will probably want to wait for BSD due to their + // dependency on mbufs, which is not available until BSD is + // initialized. + + if ((getFeatures() & kIONetworkFeatureNoBSDWait) == 0) + waitForService(resourceMatching( "IOBSD" )); + + // Start our superclass. + + if (!super::start(provider)) + return false; + + // Create an OSSet to store our clients. + + _clientSet = OSSet::withCapacity(2); + if (_clientSet == 0) + return false; + + _clientSetIter = OSCollectionIterator::withCollection(_clientSet); + if (_clientSetIter == 0) + return false; + + // Initialize link status properties. + + if (!setProperty(gIOActiveMediumKey, (OSSymbol *) gIONullMediumName) || + !setProperty(gIOCurrentMediumKey, (OSSymbol *) gIONullMediumName)) + return false; + + _linkStatus = OSNumber::withNumber((UInt64) 0, 32); + if (!_linkStatus || !setProperty(kIOLinkStatus, _linkStatus)) + { + return false; + } + + _linkSpeed = OSNumber::withNumber((UInt64) 0, 64); + if (!_linkSpeed || !setProperty(kIOLinkSpeed, _linkSpeed)) + { + return false; + } + + // Allocate a mutex lock to serialize access to the medium dictionary. + + _mediumLock = IOLockAlloc(); + if (!_mediumLock) + return false; + IOLockInitWithState(_mediumLock, kIOLockStateUnlocked); + + // Tell the driver that now is the time to create a work loop + // (if it wants one). + + if ( createWorkLoop() != true ) + { + DLOG("%s: createWorkLoop() error\n", getName()); + return false; + } + + // Get the workloop. + + _workLoop = getWorkLoop(); + if ( _workLoop == 0 ) + { + DLOG("%s: IOWorkLoop allocation failed\n", getName()); + return false; + } + _workLoop->retain(); + + // Create a 'private' IOCommandGate object and attach it to + // our workloop created above. This is used by executeCommand(). + + _cmdGate = IOCommandGate::commandGate(this); + if (!_cmdGate || + (_workLoop->addEventSource(_cmdGate) != kIOReturnSuccess)) + { + DLOG("%s: IOCommandGate initialization failed\n", getName()); + return false; + } + + // Try to allocate an IOOutputQueue instance. This is optional and + // _outputQueue may be 0. + + _outputQueue = createOutputQueue(); + + // Query the controller's mbuf buffer restrictions. + + IOPacketBufferConstraints constraints; + getPacketBufferConstraints(&constraints); + if ((constraints.alignStart > kIOPacketBufferAlign32) || + (constraints.alignLength > kIOPacketBufferAlign32) || + !isPowerOfTwo(constraints.alignStart) || + !isPowerOfTwo(constraints.alignLength)) + { + IOLog("%s: Invalid alignment: start:%ld, length:%ld\n", + getName(), + constraints.alignStart, + constraints.alignLength); + return false; + } + + // Make it easier to satisfy both constraints. + + if (constraints.alignStart < constraints.alignLength) + constraints.alignStart = constraints.alignLength; + + // Convert to alignment masks. + + _alignStart = (constraints.alignStart) ? constraints.alignStart - 1 : 0; + _alignLength = (constraints.alignLength) ? constraints.alignLength - 1 : 0; + _alignPadding = _alignStart + _alignLength; + + // Called by a policy-maker to initialize itself for power-management. + // IONetworkController is the policy-maker. + + PMinit(); + + // Called by a policy-maker on its nub, to be attached into the + // power management hierarchy. + + provider->joinPMtree(this); + + return true; +} + +//--------------------------------------------------------------------------- +// The opposite of start(). The controller has been instructed to stop running. +// This method should release resources and undo actions performed by start(). +// Subclasses must override this method and call super::stop() at the end of +// its implementation. +// +// provider: The provider that the controller was matched +// (and attached) to. + +void IONetworkController::stop(IOService * provider) +{ + // Called by a policy-maker to resign its responsibilities as the + // policy-maker. + + PMstop(); + + super::stop(provider); +} + +//--------------------------------------------------------------------------- +// Power-management hooks for subclasses. + +IOReturn IONetworkController::registerWithPolicyMaker(IOService * policyMaker) +{ + // An opportunity for subclasses to call + // policyMaker->registerPowerDriver(...) + // and other future PM requirements. + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Catch calls to createWorkLoop() for drivers that choose not implement this +// method. + +bool IONetworkController::createWorkLoop() +{ + return true; +} + +//--------------------------------------------------------------------------- +// Get the IOCommandGate object created by IONetworkController. +// An IOCommandGate is created and attached to the internal workloop by +// the start() method. +// This IOCommandGate object is used to handle client commands sent to +// executeCommand(). Subclasses that need an IOCommandGate should use the +// object returned by this method, rather than creating +// a new instance. See IOCommandGate. +// +// Returns the IOCommandGate object created by IONetworkController. + +IOCommandGate * IONetworkController::getCommandGate() const +{ + return _cmdGate; +} + +//--------------------------------------------------------------------------- +// Get the address of the method designated to handle output packets. +// +// Returns the address of the outputPacket() method. + +IOOutputAction IONetworkController::getOutputHandler() const +{ + return (IOOutputAction) &IONetworkController::outputPacket; +} + +//--------------------------------------------------------------------------- +// Create a new interface object and attach it to the controller. +// The createInterface() method is called to perform the allocation and +// initialization, followed by a call to configureInterface() to configure +// the interface. Subclasses can override those methods to customize the +// interface client attached. Drivers will usually call this method from +// their start() implementation, after they are ready to process client +// requests. +// +// interfaceP: If successful (return value is true), then the interface +// object will be written to the handle provided. +// +// doRegister: If true, then registerService() is called to register +// the interface, which will trigger the matching process, +// and cause the interface to become registered with the network +// layer. For drivers that wish to delay the registration, and +// hold off servicing requests and data packets from the network +// layer, set doRegister to false and call registerService() on +// the interface object when the controller becomes ready. +// This allows the driver to attach an interface but without +// making it available to the rest of the system. +// +// Returns true on success, false otherwise. + +bool +IONetworkController::attachInterface(IONetworkInterface ** interfaceP, + bool doRegister = true) +{ + IONetworkInterface * netif; + + *interfaceP = 0; + + // We delay some initialization until the first time that + // attachInterface() is called by the subclass. + + if (executeCommand(this, &IONetworkController::handleCommand, + this, (void *) kCommandPrepare) != kIOReturnSuccess) + { + return false; + } + + do { + // Allocate a concrete subclass of IONetworkInterface + // by calling createInterface(). + + netif = createInterface(); + if (!netif) + break; + + // Configure the interface instance by calling + // configureInterface(), then attach it as our client. + + if ( !configureInterface(netif) || !netif->attach(this) ) + { + netif->release(); + break; + } + + *interfaceP = netif; + + // Register the interface nub. Spawns a matching thread. + + if (doRegister) + netif->registerService(); + + return true; // success + } + while (0); + + return false; // failure +} + +//--------------------------------------------------------------------------- +// Detach the interface object. This method will check that the object +// provided is indeed an IONetworkInterface, and if so its terminate() +// method is called. Note that a registered interface object will close +// and detach from its controller only after the network layer has removed +// all references to the data structures exposed by the interface. +// +// interface: An interface object to be detached. +// sync: If true, the interface is terminated synchronously. +// Note that this may cause detachInterface() to block +// for an indeterminate of time. + +void +IONetworkController::detachInterface(IONetworkInterface * interface, + bool sync = false) +{ + IOOptionBits options = kIOServiceRequired; + + if (OSDynamicCast(IONetworkInterface, interface) == 0) + return; + + if (sync) + options |= kIOServiceSynchronous; + + interface->terminate(options); +} + +//--------------------------------------------------------------------------- +// This method is called by attachInterface() or attachDebuggerClient() on +// the workloop context, to prepare the controller before attaching the client +// object. This method will call publishProperties() to publish controller +// capabilities and properties that may be used by client objects. However, +// publishProperties() will be called only once, even if prepare() is called +// multiple times. +// +// kIOReturnSuccess on success, or an error code otherwise. +// Returning an error will cause the client attach to fail. + +IOReturn IONetworkController::prepare() +{ + IOReturn ret = kIOReturnSuccess; + + if ( _propertiesPublished == false ) + { + if ( publishProperties() == true ) + { + _propertiesPublished = true; + + if (pm_vars != 0) + { + registerWithPolicyMaker( this ); + } + } + else + { + ret = kIOReturnError; + } + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Handle a client open on the controller object. IOService calls this method +// with the arbitration lock held. Subclasses are not expected to override +// this method. +// +// client: The client that is attempting to open the controller. +// options: See IOService. +// argument: See IOService. +// +// Returns true to accept the client open, false to refuse it. + +bool IONetworkController::handleOpen(IOService * client, + IOOptionBits options, + void * argument) +{ + assert(client); + return _clientSet->setObject(client); +} + +//--------------------------------------------------------------------------- +// Handle a close from one of the client objects. IOService calls this method +// with the arbitration lock held. Subclasses are not expected to override this +// method. +// +// client: The client that is closing the controller. +// options: See IOService. + +void IONetworkController::handleClose(IOService * client, IOOptionBits options) +{ + _clientSet->removeObject(client); +} + +//--------------------------------------------------------------------------- +// This method is always called by IOService with the arbitration lock held. +// Subclasses should not override this method. +// +// Returns true if the specified client, or any client if none is +// specified, presently has an open on this object. + +bool IONetworkController::handleIsOpen(const IOService * client) const +{ + if (client) + return _clientSet->containsObject(client); + else + return (_clientSet->getCount() > 0); +} + +//--------------------------------------------------------------------------- +// Free the IONetworkController instance by releasing all allocated resources, +// then call super::free(). + +void IONetworkController::free() +{ +#define RELEASE(x) do { if (x) { (x)->release(); (x) = 0; } } while (0) + + // We should have no clients at this point. If we do, + // then something is very wrong! It means that a client + // has an open on us, and yet we are being freed. + + if (_clientSet) assert(_clientSet->getCount() == 0); + + RELEASE( _outputQueue ); + RELEASE( _cmdGate ); + RELEASE( _workLoop ); + RELEASE( _clientSetIter ); + RELEASE( _clientSet ); + RELEASE( _linkStatus ); + RELEASE( _linkSpeed ); + + if (_mediumLock) { IOLockFree(_mediumLock); _mediumLock = 0; } + + super::free(); +} + +//--------------------------------------------------------------------------- +// Handle an enable request from a client. + +IOReturn IONetworkController::enable(IOService * client) +{ + if (OSDynamicCast(IONetworkInterface, client)) + return enable((IONetworkInterface *) client); + + if (OSDynamicCast(IOKernelDebugger, client)) + return enable((IOKernelDebugger *) client); + + IOLog("%s::%s Unknown client type\n", getName(), __FUNCTION__); + return kIOReturnBadArgument; +} + +//--------------------------------------------------------------------------- +// Handle a disable request from a client. + +IOReturn IONetworkController::disable(IOService * client) +{ + if (OSDynamicCast(IONetworkInterface, client)) + return disable((IONetworkInterface *) client); + + if (OSDynamicCast(IOKernelDebugger, client)) + return disable((IOKernelDebugger *) client); + + IOLog("%s::%s Unknown client type\n", getName(), __FUNCTION__); + return kIOReturnBadArgument; +} + +//--------------------------------------------------------------------------- +// Called by an interface client to enable the controller. + +IOReturn IONetworkController::enable(IONetworkInterface * interface) +{ + IOLog("IONetworkController::%s\n", __FUNCTION__); + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Called by an interface client to disable the controller. + +IOReturn IONetworkController::disable(IONetworkInterface * interface) +{ + IOLog("IONetworkController::%s\n", __FUNCTION__); + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Discover and publish controller capabilities to the property table. +// This method is called by prepare() on the workloop context. +// +// Returns true if all capabilities were discovered and published +// successfully, false otherwise. Returning false will prevent client +// objects from attaching to the controller since a vital property that +// a client requires may be missing. + +bool IONetworkController::publishProperties() +{ + bool ret = false; + const OSString * string; + UInt32 num; + OSDictionary * dict = 0; + OSNumber * numObj = 0; + + do { + bool status; + + string = newVendorString(); + if (string) { + status = setProperty(kIOVendor, (OSObject *) string); + string->release(); + if (status != true) break; + } + + string = newModelString(); + if (string) { + status = setProperty(kIOModel, (OSObject *) string); + string->release(); + if (status != true) break; + } + + string = newRevisionString(); + if (string) { + status = setProperty(kIORevision, (OSObject *) string); + string->release(); + if (status != true) break; + } + + // Publish controller feature flags. + + num = getFeatures(); + if ( !setProperty(kIOFeatures, num, sizeof(num) * 8) ) + break; + + // Publish max/min packet size. + + if ( ( getMaxPacketSize(&num) != kIOReturnSuccess ) || + ( !setProperty(kIOMaxPacketSize, num, sizeof(num) * 8) ) ) + break; + + if ( ( getMinPacketSize(&num) != kIOReturnSuccess ) || + ( !setProperty(kIOMinPacketSize, num, sizeof(num) * 8) ) ) + break; + + // Publish supported packet filters. + + if (getPacketFilters(gIONetworkFilterGroup, &num) != kIOReturnSuccess) + break; + + dict = OSDictionary::withCapacity(4); + numObj = OSNumber::withNumber(num, sizeof(num) * 8); + if ( (dict == 0) || (numObj == 0) ) break; + + if ( !dict->setObject(gIONetworkFilterGroup, numObj) || + !setProperty(kIOPacketFilters, dict) ) + break; + + ret = true; + } + while (false); + + if (ret == false) { + DLOG("IONetworkController::%s error\n", __FUNCTION__); + } + if ( dict ) dict->release(); + if ( numObj ) numObj->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Send a network event to all attached interface objects. + +bool IONetworkController::_broadcastEvent(UInt32 type, void * data = 0) +{ + IONetworkInterface * netif; + + lockForArbitration(); // locks open/close/state changes. + + if (_clientSet->getCount()) + { + _clientSetIter->reset(); + + while ((netif = (IONetworkInterface *)_clientSetIter->getNextObject())) + { + if (OSDynamicCast(IONetworkInterface, netif) == 0) + continue; // only send events to IONetworkInterface objects. + netif->inputEvent(type, data); + } + } + + unlockForArbitration(); + + return true; +} + +//--------------------------------------------------------------------------- +// A client request for the controller to change to a new MTU size. + +IOReturn IONetworkController::setMaxPacketSize(UInt32 maxSize) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Transmit a packet mbuf. + +UInt32 IONetworkController::outputPacket(struct mbuf * m, void * param) +{ + // The implementation here is simply a sink-hole, all packets are + // dropped. + + if (m) freePacket(m); + return 0; +} + +//--------------------------------------------------------------------------- +// Report features supported by the controller and/or driver. + +UInt32 IONetworkController::getFeatures() const +{ + return 0; +} + +//--------------------------------------------------------------------------- +// Create default description strings. + +const OSString * IONetworkController::newVendorString() const +{ + return 0; +} + +const OSString * IONetworkController::newModelString() const +{ + return 0; +} + +const OSString * IONetworkController::newRevisionString() const +{ + return 0; +} + +//--------------------------------------------------------------------------- +// Encode a client command received by executeCommand(). + +struct cmdStruct { + OSObject * client; + void * target; + IONetworkController::Action action; + void * param0; + void * param1; + void * param2; + void * param3; + IOReturn ret; +}; + +//--------------------------------------------------------------------------- +// Get the command client object. + +OSObject * IONetworkController::getCommandClient() const +{ + return ( _workLoop->inGate() ? _cmdClient : 0 ); +} + +//--------------------------------------------------------------------------- +// Configure an interface object created through createInterface(). +// IONetworkController will register its output handler with the interface +// object provided. After the interface is registered and opened by its +// client, it will refuse requests to change its properties through its +// public methods. Since this method is called before the interface object +// is published and registered, subclasses of IONetworkController may override +// this method to configure and customize the interface object. +// +// interface: The interface object to be configured. +// +// Returns true if configuration was successful, false otherwise (this +// will cause attachInterface() to fail). + +bool IONetworkController::configureInterface(IONetworkInterface * interface) +{ + IOOutputAction handler; + OSObject * target; + bool ret; + IONetworkData * stats; + + if (!OSDynamicCast(IONetworkInterface, interface)) + return false; + + IOOutputQueue * outQueue = getOutputQueue(); + + // Must register an output handler with the interface object. + // The interface will send output packets, to its registered + // output handler. If we allocated an output queue, then we + // register the queue as the output handler, otherwise, we + // become the output handler. + + if (outQueue) + { + target = outQueue; + handler = outQueue->getOutputHandler(); + + stats = outQueue->getStatisticsData(); + interface->addNetworkData(stats); + } + else + { + target = this; + handler = getOutputHandler(); + } + ret = interface->registerOutputHandler(target, handler); + + return ret; +} + +//--------------------------------------------------------------------------- +// Called by start() to create an optional IOOutputQueue instance to handle +// output queueing. The default implementation will always return 0, hence +// no output queue will be created. A driver may override this method and +// return a subclass of IOOutputQueue. IONetworkController will keep a +// reference to the queue created, and will release the object when +// IONetworkController is freed. Also see getOutputQueue(). +// +// Returns a newly allocated and initialized IOOutputQueue instance. + +IOOutputQueue * IONetworkController::createOutputQueue() +{ + return 0; +} + +//--------------------------------------------------------------------------- +// Return the output queue allocated though createOutputQueue(). + +IOOutputQueue * IONetworkController::getOutputQueue() const +{ + return _outputQueue; +} + +//--------------------------------------------------------------------------- +// Called by start() to obtain the constraints on the memory buffer +// associated with each mbuf allocated through allocatePacket(). +// Drivers can override this method to specify their buffer constraints +// imposed by their bus master hardware. Note that outbound packets, +// those that originate from the network stack, are not subject +// to the constraints reported here. +// +// constraintsP: A pointer to an IOPacketBufferConstraints structure +// that that this method is expected to initialize. +// See IOPacketBufferConstraints structure definition. + +void IONetworkController::getPacketBufferConstraints( + IOPacketBufferConstraints * constraintsP) const +{ + assert(constraintsP); + constraintsP->alignStart = kIOPacketBufferAlign1; + constraintsP->alignLength = kIOPacketBufferAlign1; +} + +//--------------------------------------------------------------------------- +// Allocates a mbuf chain. Each mbuf in the chain is aligned according to +// the constraints from IONetworkController::getPacketBufferConstraints(). +// The last mbuf in the chain will be guaranteed to be length aligned if +// the 'size' argument is a multiple of the length alignment. +// +// The m->m_len and m->pkthdr.len fields are updated by this function. +// This allows the driver to pass the mbuf chain obtained through this +// function to the IOMbufMemoryCursor object directly. +// +// If (size + alignments) is smaller than MCLBYTES, then this function +// will always return a single mbuf header or cluster. +// +// The allocation is guaranteed not to block. If a packet cannot be +// allocated, this function will return NULL. + +#define IO_APPEND_MBUF(head, tail, m) { \ + if (tail) { \ + (tail)->m_next = (m); \ + (tail) = (m); \ + } \ + else { \ + (head) = (tail) = (m); \ + (head)->m_pkthdr.len = 0; \ + } \ +} + +#define IO_ALIGN_MBUF_START(m, mask) { \ + if ( (mask) & mtod((m), vm_address_t) ) { \ + (m)->m_data = (caddr_t) (( mtod((m), vm_address_t) + (mask) ) \ + & ~(mask)); \ + } \ +} + +#define IO_ALIGN_MBUF(m, size, smask, lmask) { \ + IO_ALIGN_MBUF_START((m), (smask)); \ + (m)->m_len = ((size) - (smask)) & ~(lmask); \ +} + +static struct mbuf * allocateMbuf( UInt32 size, UInt32 smask, UInt32 lmask ) +{ + struct mbuf * m; + struct mbuf * head = 0; + struct mbuf * tail = 0; + UInt32 capacity; + + while ( size ) + { + // Allocate a mbuf. For the initial mbuf segment, allocate a + // mbuf header. + + if ( head == 0 ) + { + MGETHDR( m, M_DONTWAIT, MT_DATA ); + capacity = MHLEN; + } + else + { + MGET( m, M_DONTWAIT, MT_DATA ); + capacity = MLEN; + } + + if ( m == 0 ) goto error; // mbuf allocation error + + // Append the new mbuf to the tail of the mbuf chain. + + IO_APPEND_MBUF( head, tail, m ); + + // If the remaining size exceed the buffer size of a normal mbuf, + // then promote it to a cluster. Currently, the cluster size is + // fixed to MCLBYTES bytes. + + if ( ( size + smask + lmask ) > capacity ) + { + MCLGET( m, M_DONTWAIT ); + if ( (m->m_flags & M_EXT) == 0 ) goto error; + capacity = MCLBYTES; + } + + // Align the mbuf per driver's specifications. + + IO_ALIGN_MBUF( m, capacity, smask, lmask ); + + // Compute the number of bytes needed after accounting for the + // current mbuf allocation. + + if ( (UInt) m->m_len > size ) + m->m_len = size; + + size -= m->m_len; + + // Update the total length in the packet header. + + head->m_pkthdr.len += m->m_len; + } + + return head; + +error: + if ( head ) m_freem(head); + return 0; +} + +struct mbuf * IONetworkController::allocatePacket( UInt32 size ) +{ + struct mbuf * m; + + do { + // Handle the simple case where the requested size + // is small enough for a single mbuf. Otherwise, + // go to the more costly route and call the + // generic mbuf allocation routine. + + if ( ( size + _alignStart ) <= MCLBYTES ) { + if ( ( size + _alignStart ) > MHLEN ) { + m = m_getpacket(); /* MGETHDR+MCLGET under one single lock */ + if ( m == 0 ) break; + } + else + { + MGETHDR( m, M_DONTWAIT, MT_DATA ); + if ( m == 0 ) break; + } + + // Align start of mbuf buffer. + + IO_ALIGN_MBUF_START( m, _alignStart ); + + // No length adjustment for single mbuf. + // Driver gets what it asked for. + + m->m_pkthdr.len = m->m_len = size; + } + else + { + m = allocateMbuf(size, _alignStart, _alignLength); + } + } while ( false ); + + return m; +} + +//--------------------------------------------------------------------------- +// Release the mbuf back to the free pool. + +void IONetworkController::freePacket(struct mbuf * m, IOOptionBits options) +{ + assert(m); + + if ( options & kDelayFree ) + { + m->m_nextpkt = _freeList; + _freeList = m; + } + else + { + m_freem_list(m); + } +} + +UInt32 IONetworkController::releaseFreePackets() +{ + UInt32 count = 0; + + if ( _freeList ) + { + count = m_freem_list( _freeList ); + _freeList = 0; + } + return count; +} + +static inline bool IO_COPY_MBUF( + const struct mbuf * src, + struct mbuf * dst, + int length) +{ + caddr_t src_dat, dst_dat; + int dst_len, src_len; + + assert(src && dst); + + dst_len = dst->m_len; + dst_dat = dst->m_data; + + while (src) { + + src_len = src->m_len; + src_dat = src->m_data; + + if (src_len > length) + src_len = length; + + while (src_len) { + + if (dst_len >= src_len) { + // copy entire src mbuf to dst mbuf. + + bcopy(src_dat, dst_dat, src_len); + length -= src_len; + dst_len -= src_len; + dst_dat += src_len; + src_len = 0; + } + else { + // fill up dst mbuf with some portion of the data in + // the src mbuf. + + bcopy(src_dat, dst_dat, dst_len); // dst_len = 0? + length -= dst_len; + dst_len = 0; + src_len -= dst_len; + } + + // Go to the next destination mbuf segment. + + if (dst_len == 0) { + if (!(dst = dst->m_next)) + return (length == 0); + dst_len = dst->m_len; + dst_dat = dst->m_data; + } + + } /* while (src_len) */ + + src = src->m_next; + + } /* while (src) */ + + return (length == 0); // returns true on success. +} + +//--------------------------------------------------------------------------- +// Replace the mbuf pointed by the given pointer with another mbuf. +// Drivers can call this method to replace a mbuf before passing the +// original mbuf, which contains a received frame, to the network layer. +// +// mp: A pointer to the original mbuf that shall be updated by this +// method to point to the new mbuf. +// size: If size is 0, then the new mbuf shall have the same size +// as the original mbuf that is being replaced. Otherwise, the new +// mbuf shall have the size specified here. +// +// If mbuf allocation was successful, then the replacement will +// take place and the original mbuf will be returned. Otherwise, +// a NULL is returned. + +struct mbuf * IONetworkController::replacePacket(struct mbuf ** mp, + UInt32 size = 0) +{ + assert((mp != NULL) && (*mp != NULL)); + + struct mbuf * m = *mp; + + // If size is zero, then size is taken from the source mbuf. + + if (size == 0) size = m->m_pkthdr.len; + + // Allocate a new packet to replace the current packet. + + if ( (*mp = allocatePacket(size)) == 0 ) + { + *mp = m; m = 0; + } + + return m; +} + +//--------------------------------------------------------------------------- +// Make a copy of a mbuf, and return the copy. The source mbuf is not modified. +// +// m: The source mbuf. +// size: The number of bytes to copy. If set to 0, then the entire +// source mbuf is copied. +// +// Returns a new mbuf created from the source packet. + +struct mbuf * IONetworkController::copyPacket(const struct mbuf * m, + UInt32 size = 0) +{ + struct mbuf * mn; + + assert(m != NULL); + + // If size is zero, then size is taken from the source mbuf. + + if (size == 0) size = m->m_pkthdr.len; + + // Copy the current mbuf to the new mbuf, and return the new mbuf. + // The input mbuf is left intact. + + if ( (mn = allocatePacket(size)) == 0 ) return 0; + + if (!IO_COPY_MBUF(m, mn, size)) + { + freePacket(mn); mn = 0; + } + + return mn; +} + +//--------------------------------------------------------------------------- +// Either replace or copy the source mbuf given depending on the amount of +// data in the source mbuf. This method will either perform a copy or replace +// the source mbuf, whichever is more time efficient. If replaced, then the +// original mbuf is returned, and a new mbuf is allocated to take its place. +// If copied, the source mbuf is left intact, while a copy is returned that +// is just big enough to hold all the data from the source mbuf. +// +// mp: A pointer to the source mbuf that may be updated by this +// method to point to the new mbuf if replaced. +// rcvlen: The number of data bytes in the source mbuf. +// replacedP: Pointer to a bool that is set to true if the +// source mbuf was replaced, or set to false if the +// source mbuf was copied. +// +// Returns a replacement or a copy of the source mbuf, 0 if mbuf +// allocation failed. + +struct mbuf * IONetworkController::replaceOrCopyPacket(struct mbuf ** mp, + UInt32 rcvlen, + bool * replacedP) +{ + struct mbuf * m; + + assert((mp != NULL) && (*mp != NULL)); + + if ( (rcvlen + _alignPadding) > MHLEN ) + { + // Large packet, it is more efficient to allocate a new mbuf + // to replace the original mbuf than to make a copy. The new + // packet shall have exactly the same size as the original + // mbuf being replaced. + + m = *mp; + + if ( (*mp = allocatePacket(m->m_pkthdr.len)) == 0 ) + { + *mp = m; m = 0; // error recovery + } + + *replacedP = true; + } + else + { + // The copy will fit within a header mbuf. Fine, make a copy + // of the original mbuf instead of replacing it. We only copy + // the rcvlen bytes, not the entire source mbuf. + + if ( (m = allocatePacket(rcvlen)) == 0 ) return 0; + + if (!IO_COPY_MBUF(*mp, m, rcvlen)) + { + freePacket(m); m = 0; + } + + *replacedP = false; + } + + return m; +} + +//--------------------------------------------------------------------------- +// Get hardware support of network/transport layer checksums. + +IOReturn +IONetworkController::getChecksumSupport( UInt32 * checksumMask, + UInt32 checksumFamily, + bool isOutput ) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Update a mbuf with the result from the hardware checksum engine. + +#define kTransportLayerPartialChecksums \ + ( kChecksumTCPNoPseudoHeader | \ + kChecksumUDPNoPseudoHeader | \ + kChecksumTCPSum16 ) + +#define kTransportLayerFullChecksums \ + ( kChecksumTCP | kChecksumUDP ) + +bool +IONetworkController::setChecksumResult( struct mbuf * m, + UInt32 family, + UInt32 result, + UInt32 valid, + UInt32 param0 = 0, + UInt32 param1 = 0 ) +{ +#ifdef HW_CSUM_SUPPORT + // Reporting something that is valid without checking for it + // is forbidden. + + valid &= result; + + // Initialize checksum result fields in the packet. + + m->m_pkthdr.csum_flags = 0; + + if ( family != kChecksumFamilyTCPIP ) + { + return false; + } + + // Set the result for the network layer (IP) checksum. + + if ( result & kChecksumIP ) + { + m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; + if ( valid & kChecksumIP ) + m->m_pkthdr.csum_flags |= CSUM_IP_VALID; + } + + // Now examine the transport layer checksum flags. + + if ( valid & kTransportLayerFullChecksums ) + { + // Excellent, hardware did account for the pseudo-header + // and no "partial" checksum value is required. + + m->m_pkthdr.csum_flags |= ( CSUM_DATA_VALID | CSUM_PSEUDO_HDR ); + m->m_pkthdr.csum_data = 0xffff; // fake a valid checksum value + } + else if ( result & kTransportLayerPartialChecksums ) + { + // Hardware does not account for the pseudo-header. + // Driver must pass up the partial TCP/UDP checksum, + // and the transport layer must adjust for the missing + // 12-byte pseudo-header. + + m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; + m->m_pkthdr.csum_data = (UInt16) param0; + + if ( result & kChecksumTCPSum16 ) + { + // A very simple engine that only computes a ones complement + // sum of 16-bit words (UDP/TCP style checksum), from a fixed + // offset, without the ability to scan for the IP or UDP/TCP + // headers. Must pass up the offset to the packet data where + // the checksum computation started from. + + m->m_pkthdr.csum_flags |= CSUM_TCP_SUM; // XXX - fake constant + m->m_pkthdr.csum_data |= (((UInt16) param1) << 16); + } + } + return true; +#else + return false; +#endif HW_CSUM_SUPPORT +} + +//--------------------------------------------------------------------------- +// Get the checksums that must be performed by the hardware for the +// given packet, before it is sent on the network. + +void +IONetworkController::getChecksumDemand( const struct mbuf * m, + UInt32 checksumFamily, + UInt32 * demandMask, + void * param0 = 0, + void * param1 = 0 ) +{ +#ifdef HW_CSUM_SUPPORT + if ( checksumFamily != kChecksumFamilyTCPIP ) + { + *demandMask = 0; return; + } + + *demandMask = m->m_pkthdr.csum_flags & ( kChecksumIP | + kChecksumTCP | + kChecksumUDP | + kChecksumTCPSum16 ); + + if ( m->m_pkthdr.csum_flags & kChecksumTCPSum16 ) + { + // param0 is start offset (XXX - range?) + // param1 is stuff offset (XXX - range?) + + if (param0) + *((UInt16 *) param0) = (UInt16) (m->m_pkthdr.csum_data); + if (param1) + *((UInt16 *) param1) = (UInt16) (m->m_pkthdr.csum_data >> 16); + } +#else + *demandMask = 0; + return; +#endif HW_CSUM_SUPPORT +} + +#if 0 +//--------------------------------------------------------------------------- +// Used for debugging only. Log the mbuf fields. + +static void _logMbuf(struct mbuf * m) +{ + if (!m) { + IOLog("logMbuf: NULL mbuf\n"); + return; + } + + while (m) { + IOLog("m_next : %08x\n", (UInt) m->m_next); + IOLog("m_nextpkt: %08x\n", (UInt) m->m_nextpkt); + IOLog("m_len : %d\n", (UInt) m->m_len); + IOLog("m_data : %08x\n", (UInt) m->m_data); + IOLog("m_type : %08x\n", (UInt) m->m_type); + IOLog("m_flags : %08x\n", (UInt) m->m_flags); + + if (m->m_flags & M_PKTHDR) + IOLog("m_pkthdr.len : %d\n", (UInt) m->m_pkthdr.len); + + if (m->m_flags & M_EXT) { + IOLog("m_ext.ext_buf : %08x\n", (UInt) m->m_ext.ext_buf); + IOLog("m_ext.ext_size: %d\n", (UInt) m->m_ext.ext_size); + } + + m = m->m_next; + } + IOLog("\n"); +} +#endif /* 0 */ + +//--------------------------------------------------------------------------- +// Allocate and attache a new IOKernelDebugger client object. +// +// debuggerP: A handle that is updated by this method +// with the allocated IOKernelDebugger instance. +// +// Returns true on success, false otherwise. + +bool IONetworkController::attachDebuggerClient(IOKernelDebugger ** debugger) +{ + IOKernelDebugger * client; + bool ret = false; + + // Prepare the controller. + + if (executeCommand(this, &IONetworkController::handleCommand, + this, (void *) kCommandPrepare) != kIOReturnSuccess) + { + return false; + } + + // Create a debugger client nub and register the static + // member functions as the polled-mode handlers. + + client = IOKernelDebugger::debugger( this, + &debugTxHandler, + &debugRxHandler ); + + if ( client && !client->attach(this) ) + { + // Unable to attach the client object. + client->terminate( kIOServiceRequired | kIOServiceSynchronous ); + client->release(); + client = 0; + } + + *debugger = client; + + if ( client ) + { + client->registerService(); + ret = true; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Detach and terminate the IOKernelDebugger client object provided. +// A synchronous termination is issued, and this method returns after +// the debugger client has been terminated. +// +// debugger: The IOKernelDebugger instance to be detached and terminated. +// If the argument provided is NULL or is not an IOKernelDebugger, +// this method will return immediately. + +void IONetworkController::detachDebuggerClient(IOKernelDebugger * debugger) +{ + if (OSDynamicCast(IOKernelDebugger, debugger) == 0) + return; + + // Terminate the debugger client and return after the client has + // been terminated. + + debugger->terminate(kIOServiceRequired | kIOServiceSynchronous); +} + +//--------------------------------------------------------------------------- +// An enable request from an IOKernelDebugger client. + +IOReturn IONetworkController::enable(IOKernelDebugger * debugger) +{ + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// A disable request from an IOKernelDebugger client. + +IOReturn IONetworkController::disable(IOKernelDebugger * debugger) +{ + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Take and release the debugger lock. + +void IONetworkController::reserveDebuggerLock() +{ + if ( _debugLockCount++ == 0 ) + { + _debugLockState = IODebuggerLock( this ); + } +} + +void IONetworkController::releaseDebuggerLock() +{ + if ( --_debugLockCount == 0 ) + { + IODebuggerUnlock( _debugLockState ); + } + assert( _debugLockCount >= 0 ); +} + +//--------------------------------------------------------------------------- +// This static C++ member function is registered by attachDebuggerClient() +// as the debugger receive handler. IOKernelDebugger will call this +// function when KDP is polling for a received packet. This function will +// in turn will call the receivePacket() member function implemented by +// a driver with debugger support. + +void IONetworkController::debugRxHandler(IOService * handler, + void * buffer, + UInt32 * length, + UInt32 timeout) +{ + ((IONetworkController *) handler)->receivePacket(buffer, + length, + timeout); +} + +//--------------------------------------------------------------------------- +// This static C++ member function is registered by attachDebuggerClient() +// as the debugger transmit handler. IOKernelDebugger will call this +// function when KDP sends an outgoing packet. This function will in turn +// call the sendPacket() member function implemented by a driver with +// debugger support. + +void IONetworkController::debugTxHandler(IOService * handler, + void * buffer, + UInt32 length) +{ + ((IONetworkController *) handler)->sendPacket(buffer, length); +} + +//--------------------------------------------------------------------------- +// This method must be implemented by a driver that supports kernel debugging. +// After a debugger client is attached through attachDebuggerClient(), this +// method will be called by the debugger client to poll for a incoming packet +// when the debugger session is active. This method may be called from the +// primary interrupt context, implementation must avoid any memory allocation, +// and must never block. The receivePacket() method in IONetworkController is +// used as a placeholder and should not be called. A driver that attaches +// a debugger client must override this method. +// +// pkt: Pointer to a receive buffer where the received packet should +// be stored to. The buffer has enough space for 1518 bytes. +// pkt_len: The length of the received packet must be written to the +// integer pointed by pkt_len. +// timeout: The maximum amount of time in milliseconds to poll for +// a packet to arrive before this method must return. + +void IONetworkController::receivePacket(void * /*pkt*/, + UInt32 * /*pkt_len*/, + UInt32 /*timeout*/) +{ + IOLog("IONetworkController::%s()\n", __FUNCTION__); +} + +//--------------------------------------------------------------------------- +// Debugger polled-mode transmit handler. This method must be implemented +// by a driver that supports kernel debugging. After a debugger client is +// attached through attachDebuggerClient(), this method will be called by the +// debugger to send an outbound packet when the kernel debugger is active. +// This method may be called from the primary interrupt context, and the +// implementation must avoid any memory allocation, and must never block. +// sendPacket() method in IONetworkController is used as a placeholder +// and should not be called. A driver that attaches a debugger client +// must override this method. +// +// pkt: Pointer to a transmit buffer containing the packet to be sent. +// pkt_len: The amount of data in the transmit buffer. + +void IONetworkController::sendPacket(void * /*pkt*/, UInt32 /*pkt_len*/) +{ + IOLog("IONetworkController::%s()\n", __FUNCTION__); +} + +//--------------------------------------------------------------------------- +// Report the link status and the active medium. + +bool IONetworkController::setLinkStatus( + UInt32 status, + const IONetworkMedium * activeMedium, + UInt64 speed, + OSData * data) +{ + bool success = true; + bool changed = false; + UInt32 linkEvent = 0; + const OSSymbol * name = activeMedium ? activeMedium->getName() : + gIONullMediumName; + + if (data == 0) + data = (OSData *) gIONullLinkData; + + if ((speed == 0) && activeMedium) + speed = activeMedium->getSpeed(); + + MEDIUM_LOCK; + + // Update kIOActiveMedium property. + + if (name != _lastActiveMediumName) + { + if ( setProperty(gIOActiveMediumKey, (OSSymbol *) name) ) + { + changed = true; + _lastActiveMediumName = name; + } + else + success = false; + } + + // Update kIOLinkData property. + + if (data != _lastLinkData) + { + if ( setProperty(gIOLinkDataKey, data) ) + { + changed = true; + _lastLinkData = data; + } + else + success = false; + } + + // Update kIOLinkStatus property. + + if (status != _linkStatus->unsigned32BitValue()) + { + if (status & kIONetworkLinkValid) + { + linkEvent = (status & kIONetworkLinkActive) ? + kIONetworkEventTypeLinkUp : + kIONetworkEventTypeLinkDown; + } + _linkStatus->setValue(status); + changed = true; + } + + // Update kIOLinkSpeed property. + + if (speed != _linkSpeed->unsigned64BitValue()) + { + _linkSpeed->setValue(speed); + changed = true; + } + + MEDIUM_UNLOCK; + + // Broadcast a link event to interface objects. + + if (linkEvent) + _broadcastEvent(linkEvent); + + return success; +} + +//--------------------------------------------------------------------------- +// Returns the medium dictionary published by the driver through +// publishMediumDictionary(). Use copyMediumDictionary() to get a copy +// of the medium dictionary. +// +// Returns the published medium dictionary, or 0 if the driver has not +// yet published a medium dictionary through publishMediumDictionary(). + +const OSDictionary * IONetworkController::getMediumDictionary() const +{ + return (OSDictionary *) getProperty(kIOMediumDictionary); +} + +//--------------------------------------------------------------------------- +// Returns a copy of the medium dictionary published by the driver. +// The caller is responsible for releasing the dictionary object returned. +// Use getMediumDictionary() to get a reference to the published medium +// dictionary instead of creating a copy. +// +// Returns a copy of the medium dictionary, or 0 if the driver has not +// published a medium dictionary through publishMediumDictionary(). + +OSDictionary * IONetworkController::copyMediumDictionary() const +{ + const OSDictionary * mediumDict; + OSDictionary * copy = 0; + + MEDIUM_LOCK; + + mediumDict = getMediumDictionary(); + + if (mediumDict) + { + copy = OSDictionary::withDictionary(mediumDict, + mediumDict->getCount()); + } + + MEDIUM_UNLOCK; + + return copy; +} + +//--------------------------------------------------------------------------- +// A client request to change the media selection. + +IOReturn IONetworkController::selectMedium(const IONetworkMedium * medium) +{ + return kIOReturnUnsupported; +} + +//--------------------------------------------------------------------------- +// Private function to lookup a key in the medium dictionary and call +// setMedium() if a match is found. This function is called by our +// clients to change the medium selection by passing a name for the desired +// medium. + +IOReturn IONetworkController::selectMediumWithName(const OSSymbol * mediumName) +{ + OSSymbol * currentMediumName; + IONetworkMedium * newMedium = 0; + bool doChange = true; + IOReturn ret = kIOReturnSuccess; + + if (OSDynamicCast(OSSymbol, mediumName) == 0) + return kIOReturnBadArgument; + + MEDIUM_LOCK; + + do { + const OSDictionary * mediumDict = getMediumDictionary(); + if (!mediumDict) + { + // no medium dictionary, bail out. + ret = kIOReturnUnsupported; + break; + } + + // Lookup the new medium in the dictionary. + + newMedium = (IONetworkMedium *) mediumDict->getObject(mediumName); + if (!newMedium) + { + ret = kIOReturnBadArgument; + break; // not found, invalid mediumName. + } + + newMedium->retain(); + + // Lookup the current medium key to avoid unnecessary + // medium changes. + + currentMediumName = (OSSymbol *) getProperty(gIOCurrentMediumKey); + + // Is change necessary? + + if (currentMediumName && mediumName->isEqualTo(currentMediumName)) + doChange = false; + } + while (0); + + MEDIUM_UNLOCK; + + if (newMedium) + { + // Call the driver's selectMedium() without holding the medium lock. + + if (doChange) + ret = selectMedium(newMedium); + + // Remove the earlier retain. + + newMedium->release(); + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Designate an entry in the published medium dictionary as +// the current selected medium. + +bool IONetworkController::setSelectedMedium(const IONetworkMedium * medium) +{ + bool success = true; + bool changed = false; + const OSSymbol * name = medium ? medium->getName() : gIONullMediumName; + + MEDIUM_LOCK; + + if (name != _lastCurrentMediumName) + { + if ( setProperty(gIOCurrentMediumKey, (OSSymbol *) name) ) + { + changed = true; + _lastCurrentMediumName = name; + } + else + success = false; + } + + MEDIUM_UNLOCK; + +#if 0 + if (changed) + _broadcastEvent(kIONetworkEventTypeLinkChange); +#endif + + return success; +} + +//--------------------------------------------------------------------------- +// Get the current selected medium. + +const IONetworkMedium * IONetworkController::getSelectedMedium() const +{ + IONetworkMedium * medium = 0; + OSSymbol * mediumName; + + MEDIUM_LOCK; + + do { + const OSDictionary * mediumDict = getMediumDictionary(); + if (!mediumDict) // no medium dictionary, bail out. + break; + + // Fetch the current medium name from the property table. + + mediumName = (OSSymbol *) getProperty(gIOCurrentMediumKey); + + // Make sure the current medium name points to an entry in + // the medium dictionary. + + medium = (IONetworkMedium *) mediumDict->getObject(mediumName); + + // Invalid current medium, try the default medium. + + if ( medium == 0 ) + { + OSString * aString; + + // This comes from the driver's property list. + // More checking is done to avoid surprises. + + aString = OSDynamicCast( OSString, + getProperty(gIODefaultMediumKey) ); + + medium = (IONetworkMedium *) mediumDict->getObject(aString); + } + } + while (0); + + MEDIUM_UNLOCK; + + return medium; +} + +//--------------------------------------------------------------------------- +// A private function to verify a medium dictionary. Returns true if the +// dictionary is OK. + +static bool verifyMediumDictionary(const OSDictionary * mediumDict) +{ + OSCollectionIterator * iter; + bool verifyOk = true; + OSSymbol * key; + + if (!OSDynamicCast(OSDictionary, mediumDict)) + return false; // invalid argument + + if (mediumDict->getCount() == 0) + return false; // empty dictionary + + iter = OSCollectionIterator::withCollection((OSDictionary *) mediumDict); + if (!iter) + return false; // cannot allocate iterator + + while ((key = (OSSymbol *) iter->getNextObject())) + { + if ( !OSDynamicCast(IONetworkMedium, mediumDict->getObject(key)) ) + { + verifyOk = false; // non-medium object in dictionary + break; + } + } + + iter->release(); + + return verifyOk; +} + +//--------------------------------------------------------------------------- +// Publish a dictionary of IONetworkMedium objects. + +bool +IONetworkController::publishMediumDictionary(const OSDictionary * mediumDict) +{ + OSDictionary * cloneDict; + bool ret = false; + + if (!verifyMediumDictionary(mediumDict)) + return false; // invalid dictionary + + // Create a clone of the source dictionary. This prevents the driver + // from adding/removing entries after the medium dictionary is added + // to the property table. + + cloneDict = OSDictionary::withDictionary(mediumDict, + mediumDict->getCount()); + if (!cloneDict) + return false; // unable to create a copy + + MEDIUM_LOCK; + + // Add the dictionary to the property table. + + if (setProperty(kIOMediumDictionary, cloneDict)) + { + const OSSymbol * mediumName; + + // Update kIOSelectedMedium property. + + mediumName = (OSSymbol *) getProperty(gIOCurrentMediumKey); + if (cloneDict->getObject(mediumName) == 0) + { + mediumName = gIONullMediumName; + } + setProperty(gIOCurrentMediumKey, (OSSymbol *) mediumName); + _lastCurrentMediumName = mediumName; + + // Update kIOActiveMedium property. + + mediumName = (OSSymbol *) getProperty(gIOActiveMediumKey); + if (cloneDict->getObject(mediumName) == 0) + { + mediumName = gIONullMediumName; + } + setProperty(gIOActiveMediumKey, (OSSymbol *) mediumName); + _lastActiveMediumName = mediumName; + + ret = true; + } + + MEDIUM_UNLOCK; + + // Retained by the property table. drop our retain count. + + cloneDict->release(); + +#if 0 + // Broadcast a link change event. + + _broadcastEvent(kIONetworkEventTypeLinkChange); +#endif + + return ret; +} + +//--------------------------------------------------------------------------- +// Static function called by the internal IOCommandGate object to +// handle a runAction() request invoked by executeCommand(). + +IOReturn IONetworkController::executeCommandAction(OSObject * owner, + void * arg0, + void * /* arg1 */, + void * /* arg2 */, + void * /* arg3 */) +{ + IONetworkController * self = (IONetworkController *) owner; + cmdStruct * cmdP = (cmdStruct *) arg0; + IOReturn ret; + bool accept = true; + OSObject * oldClient; + + assert(cmdP && self); + + oldClient = self->_cmdClient; + + if (accept != true) + { + // Command rejected. + ret = kIOReturnNotPermitted; + } + else + { + self->_cmdClient = cmdP->client; + + cmdP->ret = (*cmdP->action)( cmdP->target, + cmdP->param0, + cmdP->param1, + cmdP->param2, + cmdP->param3 ); + + self->_cmdClient = oldClient; + + ret = kIOReturnSuccess; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Perform an "action" that is synchronized by the command gate. + +IOReturn IONetworkController::executeCommand(OSObject * client, + Action action, + void * target, + void * param0, + void * param1, + void * param2, + void * param3) +{ + cmdStruct cmd; + IOReturn ret; + + cmd.client = client; + cmd.action = action; + cmd.target = target; + cmd.param0 = param0; + cmd.param1 = param1; + cmd.param2 = param2; + cmd.param3 = param3; + + // Execute the client command through the command gate. Client commands + // are thus synchronized with the workloop returned by getWorkLoop(). + + ret = _cmdGate->runAction( (IOCommandGate::Action) + &IONetworkController::executeCommandAction, + (void *) &cmd ); /* arg0 - cmdStruct */ + + // If executeCommandAction() executed successfully, then return the + // status from the client command that was executed. + + if (ret == kIOReturnSuccess) + ret = cmd.ret; + + return ret; +} + +//--------------------------------------------------------------------------- +// Called by executeCommand() to handle the client command on the +// workloop context. + +IOReturn IONetworkController::handleCommand(void * target, + void * param0, + void * param1, + void * param2, + void * param3) +{ + + IONetworkController * self = (IONetworkController *) target; + UInt32 command = (UInt32) param0; + IOService * client = (IOService *) param1; + IOReturn ret; + + switch (command) + { + case kCommandEnable: + ret = self->enable(client); + break; + + case kCommandDisable: + ret = self->disable(client); + break; + + case kCommandPrepare: + ret = self->prepare(); + break; + + default: + ret = kIOReturnUnsupported; + break; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Issue an kCommandEnable command to handleCommand(). + +IOReturn IONetworkController::doEnable(IOService * client) +{ + return executeCommand( client, + &IONetworkController::handleCommand, + this, + (void *) kCommandEnable, + (void *) client); +} + +//--------------------------------------------------------------------------- +// Issue an kCommandDisable command to handleCommand(). + +IOReturn IONetworkController::doDisable(IOService * client) +{ + return executeCommand( client, + &IONetworkController::handleCommand, + this, + (void *) kCommandDisable, + (void *) client); +} diff --git a/iokit/Families/IONetworking/IONetworkData.cpp b/iokit/Families/IONetworking/IONetworkData.cpp new file mode 100644 index 000000000..821f02b60 --- /dev/null +++ b/iokit/Families/IONetworking/IONetworkData.cpp @@ -0,0 +1,597 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkData.cpp + */ + +#include +#include +#include +#include +#include +#include + +#define super OSObject +OSDefineMetaClassAndStructors( IONetworkData, OSObject ) +OSMetaClassDefineReservedUnused( IONetworkData, 0); +OSMetaClassDefineReservedUnused( IONetworkData, 1); +OSMetaClassDefineReservedUnused( IONetworkData, 2); +OSMetaClassDefineReservedUnused( IONetworkData, 3); + +#define TAP_IS_VALID (_tapAction) + +// All access method are serialized by a single global lock, +// shared among all IONetworkData instances. +// +static IOLock * gIONDLock = 0; +#define LOCK IOTakeLock(gIONDLock) +#define UNLOCK IOUnlock(gIONDLock) + +static const OSSymbol * gIONDDataKey; +static const OSSymbol * gIONDAccessKey; +static const OSSymbol * gIONDSizeKey; + +//--------------------------------------------------------------------------- +// IONetworkData class initializer. + +void IONetworkData::initialize() +{ + // Allocates the global data lock. + // + gIONDLock = IOLockAlloc(); + assert(gIONDLock); + IOLockInitWithState(gIONDLock, kIOLockStateUnlocked); + + gIONDDataKey = OSSymbol::withCStringNoCopy( kIONetworkDataBytes ); + gIONDAccessKey = OSSymbol::withCStringNoCopy( kIONetworkDataAccessTypes ); + gIONDSizeKey = OSSymbol::withCStringNoCopy( kIONetworkDataSize ); + + assert(gIONDDataKey && gIONDAccessKey && gIONDSizeKey); +} + +//--------------------------------------------------------------------------- +// Initialize an IONetworkData instance. + +bool +IONetworkData::init(const char * name, + UInt32 bufferType, + UInt32 bufferSize, + void * extBuffer = 0, + UInt32 accessTypes = kIONetworkDataBasicAccessTypes, + void * target = 0, + Action action = 0, + void * param = 0) +{ + if ((bufferType == kIONetworkDataBufferTypeInternal) || + (bufferType == kIONetworkDataBufferTypeExternal)) + { + _buffer = (bufferType == kIONetworkDataBufferTypeInternal) ? + (void *) kalloc(bufferSize) : extBuffer; + + if (_buffer == 0) + return false; + + if (bufferType == kIONetworkDataBufferTypeInternal) + bzero(_buffer, bufferSize); + } + + _bufType = bufferType; + _access = accessTypes; + _tapTarget = target; + _tapAction = action; + _tapParam = param; + _size = bufferSize; + + // Generate a key for this object based on its assigned name. + // + if ((_key = OSSymbol::withCString(name)) == 0) + return false; + + return true; +} + +//--------------------------------------------------------------------------- +// Factory method that will construct and initialize an IONetworkData +// instance with an internal buffer. + +IONetworkData * +IONetworkData::withInternalBuffer( + const char * name, + UInt32 bufferSize, + UInt32 accessTypes = kIONetworkDataBasicAccessTypes, + void * target = 0, + Action action = 0, + void * param = 0) +{ + IONetworkData * aData = new IONetworkData; + + if (aData && !aData->init(name, + kIONetworkDataBufferTypeInternal, + bufferSize, + 0, + accessTypes, + target, + action, + param)) + { + aData->release(); + aData = 0; + } + return aData; +} + +//--------------------------------------------------------------------------- +// Factory method that will construct and initialize an IONetworkData +// instance with an external buffer. + +IONetworkData * +IONetworkData::withExternalBuffer( + const char * name, + UInt32 bufferSize, + void * buffer, + UInt32 accessTypes = kIONetworkDataBasicAccessTypes, + void * target = 0, + Action action = 0, + void * param = 0) +{ + IONetworkData * aData = new IONetworkData; + + if (aData && !aData->init(name, + kIONetworkDataBufferTypeExternal, + bufferSize, + buffer, + accessTypes, + target, + action, + param)) + { + aData->release(); + aData = 0; + } + return aData; +} + +//--------------------------------------------------------------------------- +// Factory method that will construct and initialize an IONetworkData +// instance with no data buffer. The notification handler must intervene +// when the IONetworkData is accessed. + +IONetworkData * +IONetworkData::withNoBuffer(const char * name, + UInt32 bufferSize, + UInt32 accessTypes, + void * target, + Action action, + void * param = 0) +{ + IONetworkData * aData = new IONetworkData; + + if (aData && !aData->init(name, + kIONetworkDataBufferTypeNone, + bufferSize, + 0, + accessTypes, + target, + action, + param)) + { + aData->release(); + aData = 0; + } + return aData; +} + +//--------------------------------------------------------------------------- +// Free the IONetworkData instance. + +void IONetworkData::free() +{ + if (_key) + _key->release(); + + if (_buffer && (_bufType == kIONetworkDataBufferTypeInternal)) + kfree((vm_offset_t) _buffer, _size); + + super::free(); +} + +//--------------------------------------------------------------------------- +// Return the type of buffer managed by this instance. +// See IONetworkDataBufferType enum definition + +UInt32 IONetworkData::getBufferType() const +{ + return _bufType; +} + +//--------------------------------------------------------------------------- +// Change the supported access types. + +#define kIONetworkDataImmutableAccessTypes 0 + +void IONetworkData::setAccessTypes(UInt32 types) +{ + LOCK; + _access = (_access & kIONetworkDataImmutableAccessTypes) | + (types & ~kIONetworkDataImmutableAccessTypes); + UNLOCK; +} + +//--------------------------------------------------------------------------- +// Register a target/action to handle access notification. + +void IONetworkData::setNotificationTarget(void * target, + Action action, + void * param) +{ + LOCK; + _tapTarget = target; + _tapAction = action; + _tapParam = param; + UNLOCK; +} + +//--------------------------------------------------------------------------- +// Return the supported access types. + +UInt32 IONetworkData::getAccessTypes() const +{ + return _access; +} + +//--------------------------------------------------------------------------- +// Return the notification target. + +void * IONetworkData::getNotificationTarget() const +{ + return _tapTarget; +} + +//--------------------------------------------------------------------------- +// Return the notification action. + +IONetworkData::Action IONetworkData::getNotificationAction() const +{ + return _tapAction; +} + +//--------------------------------------------------------------------------- +// Return the notification parameter. + +void * IONetworkData::getNotificationParameter() const +{ + return _tapParam; +} + +//--------------------------------------------------------------------------- +// Get an OSSymbol key associated with this instance. +// During initialization, IONetworkData will create an OSSymbol +// key based on its assigned name. +// +// Return an OSSymbol key generated from the assigned name. + +const OSSymbol * IONetworkData::getKey() const +{ + return _key; +} + +//--------------------------------------------------------------------------- +// Return the size of the data managed by this instance in bytes. + +UInt32 IONetworkData::getSize() const +{ + return _size; +} + +//--------------------------------------------------------------------------- +// Write to the data buffer with data from a source buffer provided +// by the caller. + +bool IONetworkData::writeBytes(const void * srcBuffer, + UInt32 srcBufferSize, + UInt32 writeOffset) +{ + if ( _buffer == 0 ) return false; + + if ( srcBufferSize && + (writeOffset < _size) && + ((writeOffset + srcBufferSize) <= _size) ) + { + bcopy(srcBuffer, (char *) _buffer + writeOffset, srcBufferSize); + return true; + } + + return false; +} + +//--------------------------------------------------------------------------- +// Return a pointer to the data buffer. + +const void * IONetworkData::getBuffer() const +{ + return (_buffer) ? _buffer : 0; +} + +//--------------------------------------------------------------------------- +// Copy the data buffer to a destination buffer provided by the caller. + +bool IONetworkData::readBytes(void * dstBuffer, + UInt32 * dstBufferSize, + UInt32 readOffset) const +{ + if ( _buffer == 0 ) return false; + + if ( *dstBufferSize && (readOffset < _size) ) + { + UInt32 bytesCopied = min((_size - readOffset), *dstBufferSize); + + bcopy((char *) _buffer + readOffset, dstBuffer, bytesCopied); + + *dstBufferSize = bytesCopied; + + return true; + } + + return false; +} + +//--------------------------------------------------------------------------- +// Clear the entire data buffer by filling it with zeroes. + +bool IONetworkData::clearBuffer() +{ + if ( _buffer ) + { + bzero((void *) _buffer, _size); + return true; + } + return false; +} + +//--------------------------------------------------------------------------- +// Handle a user space request to reset the data buffer. + +IOReturn IONetworkData::reset() +{ + IOReturn ret = kIOReturnUnsupported; + + LOCK; + + do { + // Check access. + + if ( (_access & kIONetworkDataAccessTypeReset) == 0 ) + { + ret = kIOReturnNotWritable; + break; + } + + // Default action is to bzero the entire buffer. + + if ( clearBuffer() ) + { + ret = kIOReturnSuccess; + } + + // Notify our target. + + if ( TAP_IS_VALID ) + { + ret = (*_tapAction)(_tapTarget, _tapParam, + this, + (UInt32) kIONetworkDataAccessTypeReset, + 0, 0, 0); + } + } + while (0); + + UNLOCK; + + return ret; +} + +//--------------------------------------------------------------------------- +// Handle an external request to read from the data buffer +// and copy it to the destination buffer provided by the accessor. + +IOReturn IONetworkData::read(void * dstBuffer, + UInt32 * dstBufferSize, + UInt32 readOffset) +{ + IOReturn ret = kIOReturnUnsupported; + + LOCK; + + do { + // Check the arguments. + + if ( !dstBuffer || !dstBufferSize ) + { + ret = kIOReturnBadArgument; + break; + } + + // Check access. + + if ( (_access & kIONetworkDataAccessTypeRead) == 0 ) + { + ret = kIOReturnNotReadable; + break; + } + + // Notify the target before the read operation. + // The target can take this opportunity to update the + // data buffer. If the target returns an error, + // abort and return the error. + + if ( TAP_IS_VALID ) + { + ret = (*_tapAction)(_tapTarget, _tapParam, + this, + (UInt32) kIONetworkDataAccessTypeRead, + dstBuffer, + dstBufferSize, + readOffset); + if (ret != kIOReturnSuccess) + break; + } + + if ( _buffer ) + { + ret = readBytes(dstBuffer, dstBufferSize, readOffset) ? + kIOReturnSuccess : kIOReturnBadArgument; + } + } + while (0); + + UNLOCK; + + return ret; +} + +//--------------------------------------------------------------------------- +// Handle an external request to write to the data buffer +// from a source buffer provided by the accessor. + +IOReturn IONetworkData::write(void * srcBuffer, + UInt32 srcBufferSize, + UInt32 writeOffset) +{ + IOReturn ret = kIOReturnUnsupported; + + LOCK; + + do { + // Check the arguments. + + if ( srcBuffer == 0 ) + { + ret = kIOReturnBadArgument; + break; + } + + // Check access. + + if ( (_access & kIONetworkDataAccessTypeWrite) == 0 ) + { + ret = kIOReturnNotWritable; + break; + } + + // Update the data buffer. + + if ( _buffer && + (writeBytes(srcBuffer, srcBufferSize, writeOffset) == false) ) + { + ret = kIOReturnBadArgument; + break; + } + + // Notify the target after a successful write operation. + + if ( TAP_IS_VALID ) + { + ret = (*_tapAction)(_tapTarget, _tapParam, + this, + (UInt32) kIONetworkDataAccessTypeWrite, + srcBuffer, + &srcBufferSize, + writeOffset); + } + } + while (0); + + UNLOCK; + + return ret; +} + +//--------------------------------------------------------------------------- +// Serialize the IONetworkData object. If notification is enabled, +// then the notification handler is called before the data buffer is +// serialized. + +bool IONetworkData::serialize(OSSerialize * s) const +{ + bool ok; + OSDictionary * dictToSerialize; + OSData * dataEntry; + OSNumber * numberEntry; + + dictToSerialize = OSDictionary::withCapacity(3); + if (!dictToSerialize) + return false; + + numberEntry = OSNumber::withNumber(_access, sizeof(_access) * 8); + if (numberEntry) { + dictToSerialize->setObject(gIONDAccessKey, numberEntry); + numberEntry->release(); + } + + numberEntry = OSNumber::withNumber(_size, sizeof(_size) * 8); + if (numberEntry) { + dictToSerialize->setObject(gIONDSizeKey, numberEntry); + numberEntry->release(); + } + + LOCK; + + do { + // Check access. + + if ((_access & kIONetworkDataAccessTypeSerialize) == 0) + break; + + if (_buffer == 0) + break; + + // Notify the target before the read operation. + // The target can take this opportunity to update the + // data buffer. If the target returns an error, + // then the data buffer is not serialized. + + if (TAP_IS_VALID && + ((*_tapAction)(_tapTarget, _tapParam, + (IONetworkData *) this, + kIONetworkDataAccessTypeSerialize, + 0, 0, 0) != kIOReturnSuccess)) + { + break; + } + + dataEntry = OSData::withBytesNoCopy(_buffer, _size); + if (dataEntry) { + dictToSerialize->setObject(gIONDDataKey, dataEntry); + dataEntry->release(); + } + } + while (0); + + ok = dictToSerialize->serialize(s); + dictToSerialize->release(); + + UNLOCK; + + return ok; +} diff --git a/iokit/Families/IONetworking/IONetworkInterface.cpp b/iokit/Families/IONetworking/IONetworkInterface.cpp new file mode 100644 index 000000000..849752297 --- /dev/null +++ b/iokit/Families/IONetworking/IONetworkInterface.cpp @@ -0,0 +1,1531 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkInterface.cpp + * + * HISTORY + * 8-Jan-1999 Joe Liu (jliu) created. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +#include +#include +#include +#include +#include +#include +int copyout(void *kaddr, void *udaddr, size_t len); + +#ifdef KEV_DL_LINK_ON +#include +int dlil_event(struct ifnet *ifp, struct kern_event_msg *event); +#endif +} + +//--------------------------------------------------------------------------- + +#define super IOService + +OSDefineMetaClassAndAbstractStructors( IONetworkInterface, IOService ) +OSMetaClassDefineReservedUnused( IONetworkInterface, 0); +OSMetaClassDefineReservedUnused( IONetworkInterface, 1); +OSMetaClassDefineReservedUnused( IONetworkInterface, 2); +OSMetaClassDefineReservedUnused( IONetworkInterface, 3); +OSMetaClassDefineReservedUnused( IONetworkInterface, 4); +OSMetaClassDefineReservedUnused( IONetworkInterface, 5); +OSMetaClassDefineReservedUnused( IONetworkInterface, 6); +OSMetaClassDefineReservedUnused( IONetworkInterface, 7); +OSMetaClassDefineReservedUnused( IONetworkInterface, 8); +OSMetaClassDefineReservedUnused( IONetworkInterface, 9); +OSMetaClassDefineReservedUnused( IONetworkInterface, 10); +OSMetaClassDefineReservedUnused( IONetworkInterface, 11); +OSMetaClassDefineReservedUnused( IONetworkInterface, 12); +OSMetaClassDefineReservedUnused( IONetworkInterface, 13); +OSMetaClassDefineReservedUnused( IONetworkInterface, 14); +OSMetaClassDefineReservedUnused( IONetworkInterface, 15); + +//--------------------------------------------------------------------------- +// Macros + +#ifdef DEBUG +#define DLOG(fmt, args...) IOLog(fmt, ## args) +#else +#define DLOG(fmt, args...) +#endif + +//--------------------------------------------------------------------------- +// Initialize an IONetworkInterface instance. +// +// Returns true if initialized successfully, false otherwise. + +bool IONetworkInterface::init(IONetworkController * controller) +{ + // Propagate the init() call to our superclass. + + if ( super::init() == false ) + return false; + + // The controller object provided must be valid. + + if ( OSDynamicCast(IONetworkController, controller) == 0 ) + return false; + + _controller = controller; + + // Create interface lock to serialize ifnet updates. + + _ifLock = IORecursiveLockAlloc(); + if ( _ifLock == 0 ) + return false; + + // Create an OSNumber to store interface state bits. + + _stateBits = OSNumber::withNumber((UInt64) 0, 32); + if ( _stateBits == 0 ) + return false; + setProperty( kIOInterfaceState, _stateBits ); + + // Create an OSSet to store client objects. Initial capacity + // (which can grow) is set at 2 clients. + + _clientSet = OSSet::withCapacity(2); + if ( _clientSet == 0 ) + return false; + + // Get the ifnet structure of the network interface. Subclasses must + // implement getIfnet() and expect this function to be called when + // they call IONetworkInterface::init(). + + _ifp = getIfnet(); + if ( _ifp == 0 ) + { + DLOG("%s: getIfnet() returned NULL\n", getName()); + return false; + } + + // Intialize the ifnet structure. + + if ( initIfnet(_ifp) == false ) + return false; + + // Create a data dictionary. + + if ( (_dataDict = OSDictionary::withCapacity(5)) == 0 ) + return false; + + IONetworkData * data = IONetworkData::withExternalBuffer( + kIONetworkStatsKey, + sizeof(IONetworkStats), + (UInt8 *) &(_ifp->if_data.ifi_ipackets)); + if ( data ) + { + addNetworkData(data); + data->release(); + } + + // Register default output handler. + + if ( registerOutputHandler( controller, + controller->getOutputHandler() ) == false ) + { + return false; + } + + // Set the kIOInterfaceNamePrefix and kIOPrimaryInterface properties. + // These may be used by an user space agent as hints when assigning a + // BSD name for the interface. + + setProperty( kIOInterfaceNamePrefix, getNamePrefix() ); + setProperty( kIOPrimaryInterface, isPrimaryInterface() ); + + return true; +} + +//--------------------------------------------------------------------------- +// Destroy the interface. Release all allocated resources. + +void IONetworkInterface::free() +{ + DLOG("IONetworkInterface::free()\n"); + + if ( _clientSet ) + { + // Should not have any clients. + assert(_clientSet->getCount() == 0); + _clientSet->release(); + _clientSet = 0; + } + + if ( _dataDict ) { _dataDict->release(); _dataDict = 0; } + if ( _stateBits ) { _stateBits->release(); _stateBits = 0; } + + if ( _ifLock ) + { + IORecursiveLockFree(_ifLock); + _ifLock = 0; + } + + clearInputQueue(); + + super::free(); +} + +//--------------------------------------------------------------------------- +// Returns true if the receiver of this method is the system's primary +// network interface. + +bool IONetworkInterface::isPrimaryInterface() const +{ + IOService * provider = getController(); + bool isPrimary = false; + + if ( provider ) provider = provider->getProvider(); + + // FIXME: Should rely on a single property, and the platform + // expert should patch the device tree if necessary to make + // it so. + + if ( provider && + ( provider->getProperty( "AAPL,slot-name" ) == 0 ) && + ( provider->getProperty( "built-in" ) || + provider->getProperty( "AAPL,connector" ) || + ( strcmp( provider->getName(), "ethernet" ) == 0 ) ) ) + { + isPrimary = true; + } + + return isPrimary; +} + +//--------------------------------------------------------------------------- +// Get the IONetworkCotroller object that is servicing this interface. + +IONetworkController * IONetworkInterface::getController() const +{ + return _controller; +} + +#ifdef HW_CSUM_SUPPORT +//--------------------------------------------------------------------------- +// Get the value that should be set in the hwassist field in the ifnet +// structure. Currently, this field is solely used for advertising the +// hardware checksumming support. + +static UInt32 getIfnetHardwareAssistValue( IONetworkController * ctr ) +{ + UInt32 input; + UInt32 output; + UInt32 hwassist = 0; + + do { + if ( ctr->getChecksumSupport( + &input, + IONetworkController::kChecksumFamilyTCPIP, + false ) != kIOReturnSuccess ) break; + + if ( ctr->getChecksumSupport( + &output, + IONetworkController::kChecksumFamilyTCPIP, + true ) != kIOReturnSuccess ) break; + + if ( input & output & IONetworkController::kChecksumIP ) + { + hwassist |= CSUM_IP; + } + + if ( ( input & ( IONetworkController::kChecksumTCP | + IONetworkController::kChecksumTCPNoPseudoHeader ) ) + && ( output & ( IONetworkController::kChecksumTCP | + IONetworkController::kChecksumTCPSum16 ) ) ) + { + hwassist |= CSUM_TCP; + } + + if ( ( input & ( IONetworkController::kChecksumUDP | + IONetworkController::kChecksumUDPNoPseudoHeader ) ) + && ( output & ( IONetworkController::kChecksumUDP | + IONetworkController::kChecksumTCPSum16 ) ) ) + { + hwassist |= CSUM_UDP; + } + } + while ( false ); + + return hwassist; +} +#endif HW_CSUM_SUPPORT + +//--------------------------------------------------------------------------- +// Initialize the ifnet structure. + +bool IONetworkInterface::initIfnet(struct ifnet * ifp) +{ + lock(); + + // Register our 'shim' functions. These function pointers + // points to static member functions inside this class. + + ifp->if_output = output_shim; + ifp->if_ioctl = ioctl_shim; + ifp->if_set_bpf_tap = set_bpf_tap_shim; + ifp->if_private = this; + ifp->if_free = &IONetworkStack::bsdInterfaceWasUnregistered; + ifp->if_name = (char *) getNamePrefix(); + + unlock(); + + return true; +} + +//--------------------------------------------------------------------------- +// Implement family specific matching. + +bool IONetworkInterface::matchPropertyTable(OSDictionary * table, + SInt32 * score) +{ + return super::matchPropertyTable(table, score); +} + +//--------------------------------------------------------------------------- +// Take the interface lock. + +void IONetworkInterface::lock() +{ + IORecursiveLockLock(_ifLock); +} + +//--------------------------------------------------------------------------- +// Release the interface lock. + +void IONetworkInterface::unlock() +{ + IORecursiveLockUnlock(_ifLock); +} + +//--------------------------------------------------------------------------- +// Inspect the controller after it has been opened. + +bool IONetworkInterface::controllerDidOpen(IONetworkController * controller) +{ + return true; // by default, always accept the controller open. +} + +//--------------------------------------------------------------------------- +// Perform cleanup before the controller is closed. + +void IONetworkInterface::controllerWillClose(IONetworkController * controller) +{ +} + +//--------------------------------------------------------------------------- +// Handle a client open on the interface. + +bool IONetworkInterface::handleOpen(IOService * client, + IOOptionBits options, + void * argument) +{ + bool accept = false; + bool controllerOpen = false; + + do { + // Was this object already registered as our client? + + if ( _clientSet->containsObject(client) ) + { + DLOG("%s: multiple opens from client %lx\n", + getName(), (UInt32) client); + accept = true; + break; + } + + // If the interface has not received a client open, which also + // implies that the interface has not yet opened the controller, + // then open the controller upon receiving the first open from + // a client. If the controller open fails, the client open will + // be rejected. + + if ( ( getInterfaceState() & kIONetworkInterfaceOpenedState ) == 0 ) + { + if ( ( (controllerOpen = _controller->open(this)) == false ) || + ( controllerDidOpen(_controller) == false ) ) + break; + } + + // Qualify the client. + + if ( handleClientOpen(client, options, argument) == false ) + break; + + // Add the new client object to our client set. + + if ( _clientSet->setObject(client) == false ) + { + handleClientClose(client, 0); + break; + } + + accept = true; + } + while (false); + + // If provider was opened above, but an error has caused us to refuse + // the client open, then close our provider. + + if ( controllerOpen ) + { + if (accept) + { + setInterfaceState( kIONetworkInterfaceOpenedState ); + _controller->registerInterestedDriver( this ); + } + else { + controllerWillClose(_controller); + _controller->close(this); + } + } + + return accept; +} + +//--------------------------------------------------------------------------- +// Handle a client close on the interface. + +void IONetworkInterface::handleClose(IOService * client, IOOptionBits options) +{ + // Remove the object from the client OSSet. + + if ( _clientSet->containsObject(client) ) + { + // Call handleClientClose() to handle the client close. + + handleClientClose( client, options ); + + // If this is the last client, then close our provider. + + if ( _clientSet->getCount() == 1 ) + { + _controller->deRegisterInterestedDriver( this ); + controllerWillClose( _controller ); + _controller->close( this ); + setInterfaceState( 0, kIONetworkInterfaceOpenedState ); + } + + // Remove the client from our OSSet. + + _clientSet->removeObject(client); + } +} + +//--------------------------------------------------------------------------- +// Query whether a client has an open on the interface. + +bool IONetworkInterface::handleIsOpen(const IOService * client) const +{ + if (client) + return _clientSet->containsObject(client); + else + return (_clientSet->getCount() > 0); +} + +//--------------------------------------------------------------------------- +// Handle a client open on the interface. + +bool IONetworkInterface::handleClientOpen(IOService * client, + IOOptionBits options, + void * argument) +{ + if ( OSDynamicCast(IONetworkStack, client) ) + { + // Transition state to registered interface. + + setInterfaceState( kIONetworkInterfaceRegisteredState ); + } + return true; +} + +//--------------------------------------------------------------------------- +// Handle a client close on the interface. + +void IONetworkInterface::handleClientClose(IOService * client, + IOOptionBits options) +{ + if ( OSDynamicCast(IONetworkStack, client) ) + { + // Transition state to unregistered interface. + + setInterfaceState( 0, kIONetworkInterfaceRegisteredState ); + } +} + +//--------------------------------------------------------------------------- +// Register the output packet handler. + +bool IONetworkInterface::registerOutputHandler(OSObject * target, + IOOutputAction action) +{ + lock(); + + // Sanity check on the arguments. + + if ( (getInterfaceState() & kIONetworkInterfaceRegisteredState) || + !target || !action ) + { + unlock(); + return false; + } + + _outTarget = target; + _outAction = action; + + unlock(); + + return true; +} + +//--------------------------------------------------------------------------- +// Feed packets to the input/output BPF packet filter taps. + +static inline void _feedPacketTap(struct ifnet * ifp, + struct mbuf * m, + BPF_FUNC func, + int mode) +{ + if (func) func(ifp, m); +} + +//--------------------------------------------------------------------------- +// Called by a network controller to submit a single packet received from +// the network to the data link layer. + +#define IN_Q_ENQUEUE(m) \ +{ \ + if (_inputQHead == 0) { \ + _inputQHead = _inputQTail = (m); \ + } \ + else { \ + _inputQTail->m_nextpkt = (m) ; \ + _inputQTail = (m); \ + } \ + _inputQCount++; \ +} + +#define DLIL_INPUT(m_head, m_tail) \ +{ \ + if ( m_head ) { \ + dlil_input(_ifp, (m_head), (m_tail)); \ + } \ +} + +UInt32 IONetworkInterface::flushInputQueue() +{ + UInt32 count = _inputQCount; + + DLIL_INPUT(_inputQHead, _inputQTail); + _inputQHead = _inputQTail = 0; + _inputQCount = 0; + + return count; +} + +UInt32 IONetworkInterface::clearInputQueue() +{ + UInt32 count = _inputQCount; + + m_freem_list( _inputQHead ); + + _inputQHead = _inputQTail = 0; + _inputQCount = 0; + + return count; +} + +UInt32 IONetworkInterface::inputPacket(struct mbuf * pkt, + UInt32 length = 0, + IOOptionBits options = 0, + void * param = 0) +{ + UInt32 count; + + assert(pkt); + + // Set the source interface and length of the received frame. + + if ( length ) + { + if ( pkt->m_next == 0 ) + { + pkt->m_pkthdr.len = pkt->m_len = length; + } + else + { + struct mbuf * m = pkt; + pkt->m_pkthdr.len = length; + do { + if (length < (UInt32) m->m_len) + m->m_len = length; + length -= m->m_len; + } while (( m = m->m_next )); + assert(length == 0); + } + } + + pkt->m_pkthdr.rcvif = _ifp; + + // Increment input byte count. + + _ifp->if_ibytes += pkt->m_pkthdr.len; + + // Feed BPF tap. + + _feedPacketTap(_ifp, pkt, _inputFilterFunc, BPF_TAP_INPUT); + + pkt->m_pkthdr.header = pkt->m_data; + pkt->m_pkthdr.len -= sizeof(struct ether_header); + pkt->m_len -= sizeof(struct ether_header); + pkt->m_data += sizeof(struct ether_header); + + if ( options & kInputOptionQueuePacket ) + { + IN_Q_ENQUEUE(pkt); + count = 0; + } + else + { + if ( _inputQHead ) // queue is not empty + { + IN_Q_ENQUEUE(pkt); + + count = _inputQCount; + + DLIL_INPUT(_inputQHead, _inputQTail); + + _inputQHead = _inputQTail = 0; + _inputQCount = 0; + } + else + { + DLIL_INPUT(pkt, pkt); + count = 1; + } + } + return count; +} + +//--------------------------------------------------------------------------- +// Deliver an event to the network layer. + +bool IONetworkInterface::inputEvent(UInt32 type, void * data) +{ + bool success = true; + +#ifdef KEV_DL_LINK_ON + struct { + kern_event_msg header; + u_long unit; + char if_name[IFNAMSIZ]; + } event; + + switch (type) + { + // Deliver an IOKit defined event. + + case kIONetworkEventTypeLinkUp: + case kIONetworkEventTypeLinkDown: + + if ( ( _ifp->if_flags & IFF_UP ) == 0 ) + { + break; + } + + bzero((void *) &event, sizeof(event)); + + event.header.total_size = sizeof(event); + event.header.vendor_code = KEV_VENDOR_APPLE; + event.header.kev_class = KEV_NETWORK_CLASS; + event.header.kev_subclass = KEV_DL_SUBCLASS; + event.header.event_code = (type == kIONetworkEventTypeLinkUp) ? + KEV_DL_LINK_ON : KEV_DL_LINK_OFF; + event.header.event_data[0] = _ifp->if_family; + event.unit = (u_long) _ifp->if_unit; + strncpy(&event.if_name[0], _ifp->if_name, IFNAMSIZ); + + dlil_event(_ifp, &event.header); + break; + + // Deliver a raw kernel event to DLIL. + // The data argument must point to a kern_event_msg structure. + + case kIONetworkEventTypeDLIL: + dlil_event(_ifp, (struct kern_event_msg *) data); + break; + + default: + IOLog("IONetworkInterface: unknown event type %lx\n", type); + success = false; + break; + } +#endif + + return success; +} + +//--------------------------------------------------------------------------- +// SIOCSIFMTU (set interface MTU) ioctl handler. + +SInt32 IONetworkInterface::syncSIOCSIFMTU(IONetworkController * ctr, + struct ifreq * ifr) +{ + SInt32 error; + UInt32 newMTU = ifr->ifr_mtu; + + // If change is not necessary, return success without getting the + // controller involved. + + if ( getMaxTransferUnit() == newMTU ) + return 0; + + // Request the controller to switch MTU size. + + error = errnoFromReturn( ctr->setMaxPacketSize(newMTU) ); + + if ( error == 0 ) + { + // Controller reports success. Update the interface MTU size + // property. + + setMaxTransferUnit(newMTU); + } + + return error; +} + +//--------------------------------------------------------------------------- +// SIOCSIFMEDIA (SET interface media) ioctl handler. + +SInt32 IONetworkInterface::syncSIOCSIFMEDIA(IONetworkController * ctr, + struct ifreq * ifr) +{ + OSDictionary * mediumDict; + IONetworkMedium * medium; + SInt32 error; + + mediumDict = ctr->copyMediumDictionary(); // creates a copy + if ( mediumDict == 0 ) + { + // unable to allocate memory, or no medium dictionary. + return EOPNOTSUPP; + } + + medium = IONetworkMedium::getMediumWithType(mediumDict, ifr->ifr_media); + if ( medium == 0 ) + { + // Exact type was not found. Try a partial match. + // ifconfig program sets the media type and media + // options separately. This should not be allowed! + + medium = IONetworkMedium::getMediumWithType(mediumDict, + ifr->ifr_media, + ~(IFM_TMASK | IFM_NMASK)); + if ( medium == 0 ) + { + mediumDict->release(); + return EINVAL; // requested medium not found. + } + } + + // It may be possible for the controller to update the medium + // dictionary and perhaps delete the medium entry that we have + // selected from our copy of the stale dictionary. This is + // harmless since IONetworkController will filter out invalid + // selections before calling the driver. + + error = errnoFromReturn( ctr->selectMediumWithName(medium->getName()) ); + + mediumDict->release(); + + return error; +} + +//--------------------------------------------------------------------------- +// SIOCGIFMEDIA (GET interface media) ioctl handler. + +SInt32 IONetworkInterface::syncSIOCGIFMEDIA(IONetworkController * ctr, + struct ifreq * ifr) +{ + OSDictionary * mediumDict = 0; + UInt mediumCount = 0; + UInt maxCount; + OSCollectionIterator * iter = 0; + UInt32 * typeList; + UInt typeListSize; + OSSymbol * keyObject; + SInt32 error = 0; + struct ifmediareq * ifmr = (struct ifmediareq *) ifr; + + // Maximum number of medium types that the caller will accept. + // + maxCount = ifmr->ifm_count; + + do { + mediumDict = ctr->copyMediumDictionary(); // creates a copy + if (mediumDict == 0) + { + break; // unable to allocate memory, or no medium dictionary. + } + + if ((mediumCount = mediumDict->getCount()) == 0) + break; // no medium in the medium dictionary + + if (maxCount == 0) + break; // caller is only probing for support and media count. + + if (maxCount < mediumCount) + { + // user buffer is too small to hold all medium entries. + error = E2BIG; + + // Proceed with partial copy on E2BIG. This follows the + // SIOCGIFMEDIA handling practice in bsd/net/if_media.c. + // + // break; + } + + // Create an iterator to loop through the medium entries in the + // dictionary. + // + iter = OSCollectionIterator::withCollection(mediumDict); + if (!iter) + { + error = ENOMEM; + break; + } + + // Allocate memory for the copyout buffer. + // + typeListSize = maxCount * sizeof(UInt32); + typeList = (UInt32 *) IOMalloc(typeListSize); + if (!typeList) + { + error = ENOMEM; + break; + } + bzero(typeList, typeListSize); + + // Iterate through the medium dictionary and copy the type of + // each medium entry to typeList[]. + // + mediumCount = 0; + while ( (keyObject = (OSSymbol *) iter->getNextObject()) && + (mediumCount < maxCount) ) + { + IONetworkMedium * medium = (IONetworkMedium *) + mediumDict->getObject(keyObject); + if (!medium) + continue; // should not happen! + + typeList[mediumCount++] = medium->getType(); + } + + if (mediumCount) + { + error = copyout((caddr_t) typeList, + (caddr_t) ifmr->ifm_ulist, + typeListSize); + } + + IOFree(typeList, typeListSize); + } + while (0); + + ifmr->ifm_active = ifmr->ifm_current = IFM_NONE; + ifmr->ifm_status = 0; + ifmr->ifm_count = mediumCount; + + // Get a copy of the controller's property table and read the + // link status, current, and active medium. + + OSDictionary * pTable = ctr->dictionaryWithProperties(); + if (pTable) + { + OSNumber * linkStatus = (OSNumber *) + pTable->getObject(kIOLinkStatus); + if (linkStatus) + ifmr->ifm_status = linkStatus->unsigned32BitValue(); + + if (mediumDict) + { + IONetworkMedium * medium; + OSSymbol * mediumName; + + if ((mediumName = (OSSymbol *) pTable->getObject(kIOSelectedMedium)) + && (medium = (IONetworkMedium *) + mediumDict->getObject(mediumName))) + { + ifmr->ifm_current = medium->getType(); + } + + if ((mediumName = (OSSymbol *) pTable->getObject(kIOActiveMedium)) + && (medium = (IONetworkMedium *) + mediumDict->getObject(mediumName))) + { + ifmr->ifm_active = medium->getType(); + } + } + pTable->release(); + } + + if (iter) + iter->release(); + + if (mediumDict) + mediumDict->release(); + + return error; +} + +//--------------------------------------------------------------------------- +// Handle ioctl commands sent to the network interface. + +SInt32 IONetworkInterface::performCommand(IONetworkController * ctr, + UInt32 cmd, + void * arg0, + void * arg1) +{ + struct ifreq * ifr = (struct ifreq *) arg1; + SInt32 ret = EOPNOTSUPP; + + if ( (ifr == 0) || (ctr == 0) ) + return EINVAL; + + switch ( cmd ) + { + // Get interface MTU. + + case SIOCGIFMTU: + ifr->ifr_mtu = getMaxTransferUnit(); + ret = 0; // no error + break; + + // Get interface media type and status. + + case SIOCGIFMEDIA: + ret = syncSIOCGIFMEDIA(ctr, ifr); + break; + + case SIOCSIFMTU: + case SIOCSIFMEDIA: + ret = (int) ctr->executeCommand( + this, /* client */ + (IONetworkController::Action) + &IONetworkInterface::performGatedCommand, + this, /* target */ + ctr, /* param0 */ + (void *) cmd, /* param1 */ + arg0, /* param2 */ + arg1 ); /* param3 */ + break; + + default: + // DLOG(%s: command not handled (%08lx), getName(), cmd); + break; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Perform an ioctl command on the controller's workloop context. + +int IONetworkInterface::performGatedCommand(void * target, + void * arg1_ctr, + void * arg2_cmd, + void * arg3_0, + void * arg4_1) +{ + IONetworkInterface * self = (IONetworkInterface *) target; + IONetworkController * ctr = (IONetworkController *) arg1_ctr; + struct ifreq * ifr = (struct ifreq *) arg4_1; + SInt32 ret = EOPNOTSUPP; + + // Refuse to issue I/O to the controller if it is in a power state + // that renders it "unusable". + + if ( self->getInterfaceState() & kIONetworkInterfaceDisabledState ) + return EPWROFF; + + switch ( (UInt32) arg2_cmd ) + { + // Set interface MTU. + + case SIOCSIFMTU: + ret = self->syncSIOCSIFMTU(ctr, ifr); + break; + + // Set interface (controller) media type. + + case SIOCSIFMEDIA: + ret = self->syncSIOCSIFMEDIA(ctr, ifr); + break; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// if_ioctl() handler - Calls performCommand() when we receive an ioctl +// from DLIL. + +int +IONetworkInterface::ioctl_shim(struct ifnet * ifp, u_long cmd, caddr_t data) +{ + assert(ifp && ifp->if_private); + + IONetworkInterface * self = (IONetworkInterface *) ifp->if_private; + + assert(ifp == self->_ifp); + + return self->performCommand( self->_controller, + cmd, + (void *) ifp, + (void *) data ); +} + +//--------------------------------------------------------------------------- +// if_output() handler. +// +// Handle a call from the network stack to transmit the given mbuf. +// For now, we can assume that the mbuf is singular, and never chained. + +int IONetworkInterface::output_shim(struct ifnet * ifp, struct mbuf * m) +{ + assert(ifp && ifp->if_private); + + IONetworkInterface * self = (IONetworkInterface *) ifp->if_private; + + assert(ifp == self->_ifp); + + if ( m == 0 ) + { + DLOG("IONetworkInterface: NULL output mbuf\n"); + return EINVAL; + } + + if ( (m->m_flags & M_PKTHDR) == 0 ) + { + DLOG("IONetworkInterface: M_PKTHDR bit not set\n"); + m_freem(m); + return EINVAL; + } + + // Increment output byte counter. + + ifp->if_obytes += m->m_pkthdr.len; + + // Feed the output filter tap. + + _feedPacketTap(ifp, m, self->_outputFilterFunc, BPF_TAP_OUTPUT); + + // Forward the packet to the registered output packet handler. + + return ((self->_outTarget)->*(self->_outAction))(m, 0); +} + +//--------------------------------------------------------------------------- +// if_set_bpf_tap() handler. Handles request from the DLIL to enable or +// disable the input/output filter taps. +// +// FIXME - locking may be needed. + +int IONetworkInterface::set_bpf_tap_shim(struct ifnet * ifp, + int mode, + BPF_FUNC func) +{ + assert(ifp && ifp->if_private); + + IONetworkInterface * self = (IONetworkInterface *) ifp->if_private; + + assert(ifp == self->_ifp); + + self->lock(); + + switch ( mode ) + { + case BPF_TAP_DISABLE: + self->_inputFilterFunc = self->_outputFilterFunc = 0; + break; + + case BPF_TAP_INPUT: + assert(func); + self->_inputFilterFunc = func; + break; + + case BPF_TAP_OUTPUT: + assert(func); + self->_outputFilterFunc = func; + break; + + case BPF_TAP_INPUT_OUTPUT: + assert(func); + self->_inputFilterFunc = self->_outputFilterFunc = func; + break; + + default: + DLOG("IONetworkInterface: Unknown BPF tap mode %d\n", mode); + break; + } + + self->unlock(); + + return 0; +} + +//--------------------------------------------------------------------------- +// As the name implies, this function does nothing. This will get called +// if the network layer tries to call the if_watchdog function pointer +// in ifnet. This should not happen. IOKit does not use this watchdog +// timer facility. + +void IONetworkInterface::null_shim(struct ifnet * /*ifp*/) +{ + IOLog("IONetworkInterface::null_shim called!\n"); +} + +//--------------------------------------------------------------------------- +// ifnet field (and property table) getter/setter. + +bool IONetworkInterface::_setInterfaceProperty(UInt32 value, + UInt32 mask, + UInt32 bytes, + void * addr, + char * key) +{ + bool updateOk = false; + UInt32 newValue; + OSNumber * number; + + lock(); + + // Update the property in ifnet. + + switch (bytes) + { + case 1: + newValue = (*((UInt8 *) addr) & mask) | value; + *((UInt8 *) addr) = (UInt8) newValue; + break; + case 2: + newValue = (*((UInt16 *) addr) & mask) | value; + *((UInt16 *) addr) = (UInt16) newValue; + break; + case 4: + newValue = (*((UInt32 *) addr) & mask) | value; + *((UInt32 *) addr) = (UInt32) newValue; + break; + default: + goto abort; + } + + // Update the OSNumber in the property table. + + if ( key ) + { + if ( (number = (OSNumber *) getProperty(key)) ) + { + number->setValue(newValue); + updateOk = true; + } + else + { + updateOk = setProperty(key, newValue, bytes * 8); + } + } +abort: + unlock(); + return updateOk; +} + +#define IO_IFNET_GET(func, type, field) \ +type IONetworkInterface:: ## func() const \ +{ \ + type ret; \ + ((IONetworkInterface *) this)->lock(); \ + ret = _ifp ? _ifp-> ## field : 0; \ + ((IONetworkInterface *) this)->unlock(); \ + return ret; \ +} + +#define IO_IFNET_SET(func, type, field, propName) \ +bool IONetworkInterface:: ## func(type value) \ +{ \ + return _setInterfaceProperty( \ + (UInt32) value, \ + 0, \ + sizeof(type), \ + (void *) &_ifp-> ## field, \ + propName); \ +} + +#define IO_IFNET_RMW(func, type, field, propName) \ +bool IONetworkInterface:: ## func(type value, type clear = 0) \ +{ \ + return _setInterfaceProperty( \ + (UInt32) value, \ + (UInt32) ~clear, \ + sizeof(type), \ + (void *) &_ifp-> ## field, \ + propName); \ +} + +//--------------------------------------------------------------------------- +// Interface type accessors (ifp->if_type). The list of interface types is +// defined in . + +IO_IFNET_SET(setInterfaceType, UInt8, if_type, kIOInterfaceType) +IO_IFNET_GET(getInterfaceType, UInt8, if_type) + +//--------------------------------------------------------------------------- +// Mtu (MaxTransferUnit) accessors (ifp->if_mtu). + +IO_IFNET_SET(setMaxTransferUnit, UInt32, if_mtu, kIOMaxTransferUnit) +IO_IFNET_GET(getMaxTransferUnit, UInt32, if_mtu) + +//--------------------------------------------------------------------------- +// Flags accessors (ifp->if_flags). This is a read-modify-write operation. + +IO_IFNET_RMW(setFlags, UInt16, if_flags, kIOInterfaceFlags) +IO_IFNET_GET(getFlags, UInt16, if_flags) + +//--------------------------------------------------------------------------- +// EFlags accessors (ifp->if_eflags). This is a read-modify-write operation. + +IO_IFNET_RMW(setExtraFlags, UInt32, if_eflags, kIOInterfaceExtraFlags) +IO_IFNET_GET(getExtraFlags, UInt32, if_eflags) + +//--------------------------------------------------------------------------- +// MediaAddressLength accessors (ifp->if_addrlen) + +IO_IFNET_SET(setMediaAddressLength, UInt8, if_addrlen, kIOMediaAddressLength) +IO_IFNET_GET(getMediaAddressLength, UInt8, if_addrlen) + +//--------------------------------------------------------------------------- +// MediaHeaderLength accessors (ifp->if_hdrlen) + +IO_IFNET_SET(setMediaHeaderLength, UInt8, if_hdrlen, kIOMediaHeaderLength) +IO_IFNET_GET(getMediaHeaderLength, UInt8, if_hdrlen) + +//--------------------------------------------------------------------------- +// Interface unit number. The unit number for the interface is assigned +// by our client. + +IO_IFNET_SET(setUnitNumber, UInt16, if_unit, kIOInterfaceUnit) +IO_IFNET_GET(getUnitNumber, UInt16, if_unit) + +//--------------------------------------------------------------------------- +// Return true if the interface has been registered with the network layer, +// false otherwise. + +bool IONetworkInterface::isRegistered() const +{ + return (bool)(getInterfaceState() & kIONetworkInterfaceRegisteredState); +} + +//--------------------------------------------------------------------------- +// Return the interface state flags. + +UInt32 IONetworkInterface::getInterfaceState() const +{ + return _stateBits->unsigned32BitValue(); +} + +//--------------------------------------------------------------------------- +// Set (or clear) the interface state flags. + +UInt32 IONetworkInterface::setInterfaceState( UInt32 set, + UInt32 clear ) +{ + UInt32 val; + + assert( _stateBits ); + + lock(); + + val = ( _stateBits->unsigned32BitValue() | set ) & ~clear; + _stateBits->setValue( val ); + + unlock(); + + return val; +} + +//--------------------------------------------------------------------------- +// Perform a lookup of the dictionary kept by the interface, +// and return an entry that matches the specified string key. +// +// key: Search for an IONetworkData entry with this key. +// +// Returns the matching entry, or 0 if no match was found. + +IONetworkData * IONetworkInterface::getNetworkData(const OSSymbol * key) const +{ + return OSDynamicCast(IONetworkData, _dataDict->getObject(key)); +} + +IONetworkData * IONetworkInterface::getNetworkData(const char * key) const +{ + return OSDynamicCast(IONetworkData, _dataDict->getObject(key)); +} + +//--------------------------------------------------------------------------- +// A private function to copy the data dictionary to the property table. + +bool IONetworkInterface::_syncNetworkDataDict() +{ + OSDictionary * aCopy = OSDictionary::withDictionary(_dataDict); + bool ret = false; + + if (aCopy) { + ret = setProperty(kIONetworkData, aCopy); + aCopy->release(); + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Remove an entry from the IONetworkData dictionary managed by the interface. +// The removed object is released. + +bool IONetworkInterface::removeNetworkData(const OSSymbol * aKey) +{ + bool ret = false; + + lock(); + + do { + if ( getInterfaceState() & kIONetworkInterfaceOpenedState ) + break; + + _dataDict->removeObject(aKey); + ret = _syncNetworkDataDict(); + } + while (0); + + unlock(); + + return ret; +} + +bool IONetworkInterface::removeNetworkData(const char * aKey) +{ + bool ret = false; + + lock(); + + do { + if ( getInterfaceState() & kIONetworkInterfaceOpenedState ) + break; + + _dataDict->removeObject(aKey); + ret = _syncNetworkDataDict(); + } + while (0); + + unlock(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Add an IONetworkData object to a dictionary managed by the interface. + +bool IONetworkInterface::addNetworkData(IONetworkData * aData) +{ + bool ret = false; + + if (OSDynamicCast(IONetworkData, aData) == 0) + return false; + + lock(); + + if (( getInterfaceState() & kIONetworkInterfaceOpenedState ) == 0) + { + if ((ret = _dataDict->setObject(aData->getKey(), aData))) + ret = _syncNetworkDataDict(); + } + + unlock(); + + return ret; +} + +//--------------------------------------------------------------------------- +// Create a new IOUserClient to handle client requests. The default +// implementation will create an IONetworkUserClient instance if +// the type given is kIONUCType. + +IOReturn IONetworkInterface::newUserClient(task_t owningTask, + void * /*security_id*/, + UInt32 type, + IOUserClient ** handler) +{ + IOReturn err = kIOReturnSuccess; + IONetworkUserClient * client; + + if (type != kIONUCType) + return kIOReturnBadArgument; + + client = IONetworkUserClient::withTask(owningTask); + + if (!client || !client->attach(this) || !client->start(this)) + { + if (client) + { + client->detach(this); + client->release(); + client = 0; + } + err = kIOReturnNoMemory; + } + + *handler = client; + + return err; +} + +//--------------------------------------------------------------------------- +// Handle controller's power state transitions. + +IOReturn +IONetworkInterface::controllerWillChangePowerState( + IONetworkController * controller, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ) +{ + if ( ( flags & IOPMDeviceUsable ) == 0 ) + { + setInterfaceState( kIONetworkInterfaceDisabledState ); + } + return kIOReturnSuccess; +} + +IOReturn +IONetworkInterface::controllerDidChangePowerState( + IONetworkController * controller, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ) +{ + if ( flags & IOPMDeviceUsable ) + { + setInterfaceState( 0, kIONetworkInterfaceDisabledState ); + } + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Static member functions called by power-management notification handlers. +// Act as stub functions that will simply forward the call to virtual member +// functions. + +IOReturn +IONetworkInterface::sControllerWillChangePowerState( + IONetworkInterface * netif, + void * param0, + void * param1, + void * param2, + void * param3 ) +{ + return netif->controllerWillChangePowerState( + (IONetworkController *) param0, + (IOPMPowerFlags) param1, + (UInt32) param2, + (IOService *) param3 ); +} + +IOReturn +IONetworkInterface::sControllerDidChangePowerState( + IONetworkInterface * netif, + void * param0, + void * param1, + void * param2, + void * param3 ) +{ + return netif->controllerDidChangePowerState( + (IONetworkController *) param0, + (IOPMPowerFlags) param1, + (UInt32) param2, + (IOService *) param3 ); +} + +//--------------------------------------------------------------------------- +// Handle notitifications triggered by controller's power state change. + +IOReturn +IONetworkInterface::powerStateWillChangeTo( IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ) +{ + _controller->executeCommand( + this, /* client */ + (IONetworkController::Action) + &IONetworkInterface::sControllerWillChangePowerState, + this, /* target */ + (void *) _controller, /* param0 */ + (void *) flags, /* param1 */ + (void *) stateNumber, /* param2 */ + (void *) policyMaker); /* param3 */ + + return IOPMAckImplied; +} + +IOReturn +IONetworkInterface::powerStateDidChangeTo( IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ) +{ + _controller->executeCommand( + this, /* client */ + (IONetworkController::Action) + &IONetworkInterface::sControllerDidChangePowerState, + this, /* target */ + (void *) _controller, /* param0 */ + (void *) flags, /* param1 */ + (void *) stateNumber, /* param2 */ + (void *) policyMaker); /* param3 */ + + return IOPMAckImplied; +} diff --git a/iokit/Families/IONetworking/IONetworkMedium.cpp b/iokit/Families/IONetworking/IONetworkMedium.cpp new file mode 100644 index 000000000..7a10de58b --- /dev/null +++ b/iokit/Families/IONetworking/IONetworkMedium.cpp @@ -0,0 +1,383 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkMedium.cpp + * + * HISTORY + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +//--------------------------------------------------------------------------- +// OSMetaClass macros. + +#define super OSObject +OSDefineMetaClassAndStructors( IONetworkMedium, OSObject ) +OSMetaClassDefineReservedUnused( IONetworkMedium, 0); +OSMetaClassDefineReservedUnused( IONetworkMedium, 1); +OSMetaClassDefineReservedUnused( IONetworkMedium, 2); +OSMetaClassDefineReservedUnused( IONetworkMedium, 3); + +//--------------------------------------------------------------------------- +// Initialize an IONetworkMedium instance. +// +// type: The medium type, the fields are encoded with bits defined in +// IONetworkMedium.h. +// +// speed: The maximum (or the only) link speed supported over this medium +// in units of bits per second. +// +// flags: An optional flag for the medium object. +// See IONetworkMedium.h for defined flags. +// +// index: An optional 32-bit index assigned by the caller. Drivers can use +// this to store an index or a pointer to a media table inside the +// driver, or it may map to a driver defined media type. +// +// name: An name to assign to this medium object. If 0, then a name +// will be created based on the medium type given using nameForType(). +// +// Returns true on success, false otherwise. + +bool IONetworkMedium::init(IOMediumType type, + UInt64 speed, + UInt32 flags = 0, + UInt32 index = 0, + const char * name = 0) +{ + if ( super::init() == false ) + return false; + + _type = type; + _flags = flags; + _speed = speed; + _index = index; + + if (name) + _name = OSSymbol::withCString(name); + else + _name = IONetworkMedium::nameForType(type); + + if (!_name) + return false; + + return true; +} + +//--------------------------------------------------------------------------- +// Factory method which performs allocation and initialization +// of an IONetworkMedium instance. +// +// Returns an IONetworkMedium instance on success, or 0 otherwise. + +IONetworkMedium * IONetworkMedium::medium(IOMediumType type, + UInt64 speed, + UInt32 flags = 0, + UInt32 index = 0, + const char * name = 0) +{ + IONetworkMedium * medium = new IONetworkMedium; + + if (medium && !medium->init(type, speed, flags, index, name)) + { + medium->release(); + medium = 0; + } + + return medium; +} + +//--------------------------------------------------------------------------- +// Free the IONetworkMedium instance. + +void IONetworkMedium::free() +{ + if (_name) + { + _name->release(); + _name = 0; + } + super::free(); +} + +//--------------------------------------------------------------------------- +// Return the assigned medium type. + +IOMediumType IONetworkMedium::getType() const +{ + return _type; +} + +//--------------------------------------------------------------------------- +// Return the medium flags. + +UInt32 IONetworkMedium::getFlags() const +{ + return _flags; +} + +//--------------------------------------------------------------------------- +// Return the maximum medium speed. + +UInt64 IONetworkMedium::getSpeed() const +{ + return _speed; +} + +//--------------------------------------------------------------------------- +// Return the assigned index. + +UInt32 IONetworkMedium::getIndex() const +{ + return _index; +} + +//--------------------------------------------------------------------------- +// Return the name for this instance. + +const OSSymbol * IONetworkMedium::getName() const +{ + return _name; +} + +//--------------------------------------------------------------------------- +// Given a medium type, create an unique OSymbol name for the medium. +// The caller is responsible for releasing the OSSymbol object returned. +// +// type: A medium type. See IONetworkMedium.h for type encoding. +// +// Returns an OSSymbol created based on the type provided. + +const OSSymbol * IONetworkMedium::nameForType(IOMediumType type) +{ + char buffer[10]; + + sprintf(buffer, "%08lx", type); + + // Caller must remember to free the OSSymbol! + // + return OSSymbol::withCString(buffer); +} + +//--------------------------------------------------------------------------- +// Test for equality between two IONetworkMedium objects. +// Two IONetworkMedium objects are considered equal if +// they have similar properties assigned to them during initialization. +// +// medium: An IONetworkMedium to test against the IONetworkMedium +// object being called. +// +// Returns true if equal, false otherwise. + +bool IONetworkMedium::isEqualTo(const IONetworkMedium * medium) const +{ + return ( (medium->_name == _name) && + (medium->_type == _type) && + (medium->_flags == _flags) && + (medium->_speed == _speed) && + (medium->_index == _index) ); +} + +//--------------------------------------------------------------------------- +// Test for equality between a IONetworkMedium object and an OSObject. +// The OSObject is considered equal to the IONetworkMedium object if the +// OSObject is an IONetworkMedium, and they have similar properties assigned +// to them during initialization. +// +// obj: An OSObject to test against the IONetworkMedium object being called. +// +// Returns true if equal, false otherwise. + +bool IONetworkMedium::isEqualTo(const OSMetaClassBase * obj) const +{ + IONetworkMedium * medium; + if ((medium = OSDynamicCast(IONetworkMedium, obj))) + return isEqualTo(medium); + else + return false; +} + +//--------------------------------------------------------------------------- +// Create an OSData containing an IOMediumDescriptor structure (not copied), +// and ask the OSData to serialize. +// +// s: An OSSerialize object to handle the serialization. +// +// Returns true on success, false otherwise. + +static bool addNumberToDict(OSDictionary * dict, + const char * key, + UInt32 val, + UInt32 bits = 32) +{ + OSNumber * num = OSNumber::withNumber(val, bits); + bool ret; + + if ( num == 0 ) return false; + ret = dict->setObject( key, num ); + num->release(); + return ret; +} + +bool IONetworkMedium::serialize(OSSerialize * s) const +{ + bool ret; + OSDictionary * dict; + + dict = OSDictionary::withCapacity(4); + if ( dict == 0 ) return false; + + addNumberToDict(dict, kIOMediumType, getType()); + addNumberToDict(dict, kIOMediumSpeed, getSpeed(), 64); + addNumberToDict(dict, kIOMediumIndex, getIndex()); + addNumberToDict(dict, kIOMediumFlags, getFlags()); + + ret = dict->serialize(s); + dict->release(); + + return ret; +} + +//--------------------------------------------------------------------------- +// A helper function to add an IONetworkMedium object to a given dictionary. +// The name of the medium is used as the key for the new dictionary entry. +// +// dict: An OSDictionary object where the medium object should be added to. +// medium: The IONetworkMedium object to add to the dictionary. +// +// Returns true on success, false otherwise. + +bool IONetworkMedium::addMedium(OSDictionary * dict, + const IONetworkMedium * medium) +{ + // Arguments type checking. + // + if (!OSDynamicCast(OSDictionary, dict) || + !OSDynamicCast(IONetworkMedium, medium)) + return false; + + return dict->setObject(medium->getName(), medium); +} + +//--------------------------------------------------------------------------- +// A helper function to remove an entry in a dictionary with a key that +// matches the name of the IONetworkMedium object provided. +// +// dict: An OSDictionary object where the medium object should be removed +// from. +// medium: The name of this medium object is used as the removal key. + +void IONetworkMedium::removeMedium(OSDictionary * dict, + const IONetworkMedium * medium) +{ + // Arguments type checking. + // + if (!OSDynamicCast(OSDictionary, dict) || + !OSDynamicCast(IONetworkMedium, medium)) + return; + + dict->removeObject(medium->getName()); +} + +//--------------------------------------------------------------------------- +// Iterate through a dictionary and return an IONetworkMedium entry that +// satisfies the matching criteria. Returns 0 if there is no match. + +IONetworkMedium * IONetworkMedium::getMediumWithType( + const OSDictionary * dict, + IOMediumType type, + IOMediumType mask = 0) +{ + OSCollectionIterator * iter; + OSSymbol * key; + IONetworkMedium * medium; + IONetworkMedium * match = 0; + + if (!dict) return 0; + + // Shouldn't withCollection take an (const OSDictionary *) argument? + + iter = OSCollectionIterator::withCollection((OSDictionary *) dict); + if (!iter) + return 0; + + while ( (key = (OSSymbol *) iter->getNextObject()) ) + { + medium = OSDynamicCast(IONetworkMedium, dict->getObject(key)); + if (medium == 0) continue; + + if ( ( (medium->getType() ^ type) & ~mask) == 0 ) + { + match = medium; + break; + } + } + + iter->release(); + + return match; +} + +IONetworkMedium * IONetworkMedium::getMediumWithIndex( + const OSDictionary * dict, + UInt32 index, + UInt32 mask = 0) +{ + OSCollectionIterator * iter; + OSSymbol * key; + IONetworkMedium * medium; + IONetworkMedium * match = 0; + + if (!dict) return 0; + + // Shouldn't withCollection take an (const OSDictionary *) argument? + + iter = OSCollectionIterator::withCollection((OSDictionary *) dict); + if (!iter) + return 0; + + while ( (key = (OSSymbol *) iter->getNextObject()) ) + { + medium = OSDynamicCast(IONetworkMedium, dict->getObject(key)); + if (medium == 0) continue; + + if ( ( (medium->getIndex() ^ index) & ~mask) == 0 ) + { + match = medium; + break; + } + } + + iter->release(); + + return match; +} diff --git a/iokit/Families/IONetworking/IONetworkStack.cpp b/iokit/Families/IONetworking/IONetworkStack.cpp new file mode 100644 index 000000000..3f5dbc05e --- /dev/null +++ b/iokit/Families/IONetworking/IONetworkStack.cpp @@ -0,0 +1,1124 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkStack.cpp - An IOKit proxy for the BSD network stack. + * + * HISTORY + * + * IONetworkStack abstracts essential network stack services. These + * include registering/unregistering network interfaces, and interface + * name space management. + * + * Only a single IONetworkStack object is instantiated. This object will + * register to receive a notification when a network interface object is + * first published. The notification handler is responsible for attaching + * the network stack object to the interface object as a client. When the + * interface is terminated, this linkage is severed. + * + * This object does not participate in the data/packet flow. The interface + * object will interact directly with DLIL to send and to receive packets. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +#include +#include +#include +#include +#include +void ether_ifattach(struct ifnet * ifp); // FIXME +} + +#define super IOService +OSDefineMetaClassAndStructors( IONetworkStack, IOService ) +OSMetaClassDefineReservedUnused( IONetworkStack, 0); +OSMetaClassDefineReservedUnused( IONetworkStack, 1); +OSMetaClassDefineReservedUnused( IONetworkStack, 2); +OSMetaClassDefineReservedUnused( IONetworkStack, 3); + +#ifdef DEBUG_XXX +#define __LOG(class, fn, fmt, args...) IOLog(class "::%s " fmt, fn, ## args) +#define DLOG(fmt, args...) __LOG("IONetworkStack", __FUNCTION__, fmt, ## args) +#else +#define DLOG(fmt, args...) +#endif + +#define NETIF_FLAGS(n) ((n)->_clientVar[0]) +#define SET_NETIF_FLAGS(n, x) (NETIF_FLAGS(n) |= (x)) +#define CLR_NETIF_FLAGS(n, x) (NETIF_FLAGS(n) &= ~(x)) + +static IONetworkStack * gIONetworkStack = 0; + +// Flags encoded on the interface object. +// +enum { + kInterfaceFlagActive = 0x01, // Interface is awaiting registration + kInterfaceFlagRegistered = 0x02, // Interface has registered with DLIL + kInterfaceFlagRegistering = 0x04 // Interface is registering with DLIL +}; + +// IONetworkStackUserClient definition. +// +#include + +class IONetworkStackUserClient : public IOUserClient +{ + OSDeclareDefaultStructors( IONetworkStackUserClient ) + +protected: + IONetworkStack * _provider; + +public: + static IONetworkStackUserClient * withTask( task_t owningTask ); + virtual bool start( IOService * provider ); + virtual IOReturn clientClose(); + virtual IOReturn clientDied(); + virtual IOReturn setProperties( OSObject * properties ); +}; + +//--------------------------------------------------------------------------- +// Initialize the IONetworkStack object. + +bool IONetworkStack::init( OSDictionary * properties ) +{ + // Init our superclass first. + + if ( super::init(properties) == false ) + return false; + + return true; +} + +//--------------------------------------------------------------------------- +// IONetworkStack was matched to its provider (IOResources), now start it up. + +bool IONetworkStack::start( IOService * provider ) +{ + DLOG("%p\n", provider); + + if ( super::start(provider) == false ) + return false; + + // Only a single IONetworkStack object is created, and a reference + // to this object is stored in a global variable. + + // When the boot process is VERY VERY slow for some unknown reason + // we get two instances of IONetworkStack and theassert below fires. + // so I am commenting the assert and replacing it with an if statement. + // assert( gIONetworkStack == 0 ); + + if ( gIONetworkStack != 0 ) + return false; + + gIONetworkStack = this; + + // Create containers to store interface objects. + + _ifSet = OSOrderedSet::withCapacity(10); + if ( _ifSet == 0 ) + return false; + + _ifDict = OSDictionary::withCapacity(4); + if ( _ifDict == 0 ) + return false; + + // Create a notification object to call a 'C' function, every time an + // interface object is first published. + + _interfaceNotifier = addNotification( + /* type */ gIOFirstPublishNotification, + /* match */ serviceMatching("IONetworkInterface"), + /* action */ interfacePublished, + /* param */ this ); + + if ( _interfaceNotifier == 0 ) return false; + + // Register the IONetworkStack object. + + registerService(); + + // Success. + + DLOG("success\n"); + + return true; +} + +//--------------------------------------------------------------------------- +// Stop is called by a terminated provider, after being closed, but before +// this client object is detached from it. + +void IONetworkStack::stop( IOService * provider ) +{ + DLOG("%p\n", provider); + super::stop(provider); +} + +//--------------------------------------------------------------------------- +// Release allocated resources. + +void IONetworkStack::free() +{ + DLOG("\n"); + + // IONotifier::remove() will remove the notification request + // and release the object. + + if ( _interfaceNotifier ) + { + _interfaceNotifier->remove(); + _interfaceNotifier = 0; + } + + // Free interface containers. + + if ( _ifDict ) + { + _ifDict->release(); + _ifDict = 0; + } + + if ( _ifSet ) + { + _ifSet->release(); + _ifSet = 0; + } + + gIONetworkStack = 0; + + // Propagate the free to superclass. + + super::free(); +} + +//--------------------------------------------------------------------------- +// A static method to get the global network stack object. + +IONetworkStack * IONetworkStack::getNetworkStack() +{ + return (IONetworkStack *) IOService::waitForService( + IOService::serviceMatching("IONetworkStack") ); +} + + +//=========================================================================== +// +// Interface object container helpers. +// +//=========================================================================== + +//--------------------------------------------------------------------------- +// Add the new interface object to an OSOrderedSet. + +bool IONetworkStack::addInterface( IONetworkInterface * netif ) +{ + return _ifSet->setObject(netif); +} + +//--------------------------------------------------------------------------- +// Remove an interface object from an OSOrderedSet. + +void IONetworkStack::removeInterface( IONetworkInterface * netif ) +{ + _ifSet->removeObject(netif); + DLOG("count = %d\n", _ifSet->getCount()); +} + +//--------------------------------------------------------------------------- +// Get an interface object at a given index. + +IONetworkInterface * IONetworkStack::getInterface( UInt32 index ) +{ + return (IONetworkInterface *) _ifSet->getObject(index); +} + +//--------------------------------------------------------------------------- +// Query whether the specified interface object is a member of the Set. + +bool IONetworkStack::containsInterface( IONetworkInterface * netif ) +{ + return _ifSet->containsObject(netif); +} + +//--------------------------------------------------------------------------- +// Add an interface object to the set of registered interfaces. + +bool IONetworkStack::addRegisteredInterface( IONetworkInterface * netif ) +{ + bool success = true; + OSOrderedSet * set; + const char * prefix = netif->getNamePrefix(); + + if (prefix == 0) return false; + + // Look for a Set object in the dictionary. + + set = (OSOrderedSet *) _ifDict->getObject(prefix); + + // If not found, then create one and add it to the dictionary. + + if ( (set == 0) && + ((set = OSOrderedSet::withCapacity(10, orderRegisteredInterfaces))) ) + { + success = _ifDict->setObject(prefix, set); + set->release(); + } + + // Add the interface object to its corresponding set. + // All objects in a set will have the same name prefix. + + success = (set && success) ? set->setObject(netif) : false; + + return success; +} + +//--------------------------------------------------------------------------- +// Remove an interface object from the set of registered interfaces. + +void IONetworkStack::removeRegisteredInterface( IONetworkInterface * netif ) +{ + OSOrderedSet * set; + const char * prefix = netif->getNamePrefix(); + + if ( prefix ) + { + set = (OSOrderedSet *) _ifDict->getObject(prefix); + + if ( set ) + { + // Remove interface from set. + + set->removeObject(netif); + DLOG("set:%s count = %d\n", prefix, set->getCount()); + + // Remove (also release) the set from the dictionary. + + if ( set->getCount() == 0 ) _ifDict->removeObject(prefix); + } + } +} + +//--------------------------------------------------------------------------- +// Get an registered interface with the given prefix and unit number. + +IONetworkInterface * +IONetworkStack::getRegisteredInterface( const char * prefix, + UInt32 unit ) +{ + OSOrderedSet * set; + IONetworkInterface * netif = 0; + + set = (OSOrderedSet *) _ifDict->getObject(prefix); + + for ( UInt32 index = 0; + ( set && (netif = (IONetworkInterface *) set->getObject(index)) ); + index++ ) + { + if ( netif->getUnitNumber() == unit ) + break; + } + + return netif; +} + +//--------------------------------------------------------------------------- +// Get the last object (with largest index) in the set of registered +// interfaces with the specified prefix. + +IONetworkInterface * +IONetworkStack::getLastRegisteredInterface( const char * prefix ) +{ + OSOrderedSet * set; + + set = (OSOrderedSet *) _ifDict->getObject(prefix); + + return ( set ) ? (IONetworkInterface *) set->getLastObject() : 0; +} + +//--------------------------------------------------------------------------- +// Get the next available unit number in the set of registered interfaces +// with the specified prefix. + +UInt32 +IONetworkStack::getNextAvailableUnitNumber( const char * prefix, + UInt32 startingUnit ) +{ + IONetworkInterface * netif = getLastRegisteredInterface(prefix); + + if ( ( netif == 0 ) || ( netif->getUnitNumber() < startingUnit ) ) + { + // The unit number provided is acceptable. + } + else if ( netif->getUnitNumber() == startingUnit ) + { + // Conflict, bump proposed unit number by one. + startingUnit++; + } + else + { + OSOrderedSet * set = (OSOrderedSet *) _ifDict->getObject(prefix); + + for ( UInt32 index = 0; set; index++ ) + { + netif = (IONetworkInterface *) set->getObject(index); + + if ( ( netif == 0 ) || + ( netif->getUnitNumber() > startingUnit ) ) + break; + else if ( netif->getUnitNumber() < startingUnit ) + continue; + else + startingUnit = netif->getUnitNumber() + 1; + } + } + + return startingUnit; +} + + +//=========================================================================== +// +// Interface Management. +// +//=========================================================================== + + +//--------------------------------------------------------------------------- +// A static member function that is called by a notification object when an +// interface is published. This function is called with arbitration lock of +// the interface object held. + +bool IONetworkStack::interfacePublished( void * /* target */, + void * /* param */, + IOService * service ) +{ + IONetworkInterface * netif = OSDynamicCast(IONetworkInterface, service); + bool success = false; + + DLOG("%p\n", netif); + + if ( gIONetworkStack == 0 ) + return false; + + gIONetworkStack->lockForArbitration(); + + do { + if ( netif == 0 ) break; + + // Early exit from redundant notifications. + + if ( gIONetworkStack->containsInterface(netif) == true ) + { + success = true; + break; + } + + // Add the interface to a collection. + + if ( gIONetworkStack->addInterface(netif) == false ) + break; + + // Attach the stack object to the interface object as its client. + + if ( gIONetworkStack->attach(netif) == false ) + break; + + // Initialize the interface flags. These flags are used only + // by IONetworkStack. + + NETIF_FLAGS(netif) = kInterfaceFlagActive; + + // No outside intervention is required for the primary interface + // to be registered at unit 0. This is to assure that we have 'en0' + // even if something is really fouled up. Ideally, this should be + // removed in the future and have a single entity manage the + // naming responsibility. And on Intel, there is no concept of a + // "built-in" interface, so this will do nothing for Intel. + + if ( gIONetworkStack->_registerPrimaryInterface && + netif->isPrimaryInterface() ) + { + const char * prefix = netif->getNamePrefix(); + const UInt32 unit = 0; + + // If another interface already took unit 0, do nothing. + + if ( gIONetworkStack->getRegisteredInterface(prefix, unit) == 0 ) + { + OSArray * array = OSArray::withCapacity(1); + if ( array ) + { + gIONetworkStack->preRegisterInterface( netif, + prefix, + unit, + array ); + + completeRegistration( array, false ); // Async + } + } + } + + success = true; + } + while ( false ); + + // Remove interface on failure. + + if (success == false) gIONetworkStack->removeInterface(netif); + + gIONetworkStack->unlockForArbitration(); + + return success; +} + +//--------------------------------------------------------------------------- +// Handle termination messages sent from the interface object (provider). + +IOReturn IONetworkStack::message( UInt32 type, + IOService * provider, + void * /* argument */ ) +{ + IONetworkInterface * netif = (IONetworkInterface *) provider; + IOReturn ret = kIOReturnBadArgument; + + DLOG("%lx %p\n", type, provider); + + if ( type == kIOMessageServiceIsTerminated ) + { + lockForArbitration(); + + do { + // Verify that the provider object given is known. + + if ( containsInterface(netif) == false ) + break; + + ret = kIOReturnSuccess; + + // Interface has become inactive, it is no longer possible + // to open or to attach to the interface object. + // Mark the interface as Inactive. + + CLR_NETIF_FLAGS( netif, kInterfaceFlagActive ); + + // Interface is registering with DLIL. Postpone termination until + // the interface has completed the registration. + + if ( NETIF_FLAGS(netif) & kInterfaceFlagRegistering ) + break; + + // Remove the interface object. Don't worry, it is still retained. + + removeInterface(netif); + + // If interface was never registered with BSD, no additional + // action is required. + + if ( (NETIF_FLAGS(netif) & kInterfaceFlagRegistered) == 0 ) + break; + + // Need to unregister the interface. Do this asynchronously. + // The interface will be waiting for a close before advancing + // to the next stage in the termination process. + + thread_call_func( (thread_call_func_t) unregisterBSDInterface, + netif, + TRUE ); /* unique call desired */ + } + while ( false ); + + unlockForArbitration(); + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Detach an inactive interface that is currently registered with BSD. + +void IONetworkStack::unregisterBSDInterface( IONetworkInterface * netif ) +{ + assert( netif ); + + // If dlil_if_detach() returns DLIL_WAIT_FOR_FREE, then we + // must not close the interface until we receive a callback + // from DLIL. Otherwise, proceed with the close. + + DLOG("%p\n", netif); + + if ( dlil_if_detach(netif->getIfnet()) != DLIL_WAIT_FOR_FREE ) + { + bsdInterfaceWasUnregistered( netif->getIfnet() ); + } +} + +//--------------------------------------------------------------------------- +// Handle a callback from DLIL to signal that an interface can now be safely +// destroyed. DLIL will issue this call only if the dlil_if_detach() function +// returned DLIL_WAIT_FOR_FREE. + +int IONetworkStack::bsdInterfaceWasUnregistered( struct ifnet * ifp ) +{ + IONetworkInterface * netif; + + assert( ifp ); + + netif = (IONetworkInterface *) ifp->if_private; + DLOG("%p\n", netif); + + assert( netif && gIONetworkStack ); + + // An interface was detached from DLIL. It is now safe to close the + // interface object. + + gIONetworkStack->lockForArbitration(); + + assert( NETIF_FLAGS(netif) == kInterfaceFlagRegistered ); + + // Update state. + + CLR_NETIF_FLAGS( netif, kInterfaceFlagRegistered ); + + // Drop interface from list of registered interfaces, + // and decrement interface retain count. + + gIONetworkStack->removeRegisteredInterface(netif); + + gIONetworkStack->unlockForArbitration(); + + // Make sure the interface is brought down before it is closed. + + netif->setFlags( 0, IFF_UP ); // clear IFF_UP flag. + (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, 0); + + // Close interface and allow it to proceed with termination. + + netif->close(gIONetworkStack); + + return 0; +} + +//--------------------------------------------------------------------------- +// Pre-register a network interface. This function assumes that the +// caller is holding the arbitration lock. + +bool IONetworkStack::preRegisterInterface( IONetworkInterface * netif, + const char * prefix, + UInt32 unit, + OSArray * array ) +{ + bool success = false; + + DLOG("%p %s %d\n", netif, prefix ? prefix : "", unit); + + assert( netif && array ); + + do { + if ( prefix == 0 ) break; + + // Verify that the interface object given is known. + + if ( containsInterface(netif) == false ) + break; + + // Interface must be in Active state. + + if ( NETIF_FLAGS(netif) != kInterfaceFlagActive ) + { + break; + } + + // The unit argument provided is a hint to indicate the lowest unit + // number that can be assigned to the interface. We are allowed to + // increment the unit number provided if the number is already + // taken. + + unit = getNextAvailableUnitNumber(prefix, unit); + + // Open the interface object. This will fail if the interface + // object has become inactive. Beware of reverse lock acquisition + // sequence, which is interface then stack arbitration lock. + // Must avoid taking locks in that order to avoid deadlocks. + // The only exception is when handling the "First Publish" + // notification, which is safe since the stack object does not + // yet have a reference to the new interface. + + if ( netif->open(this) == false ) + { + break; + } + + // Update interface name properties and add the interface object + // to a collection of registered interfaces. The chosen name will + // be reserved until the interface is removed from the collection + // of registered interfaces. + + if ( ( netif->setUnitNumber(unit) == false ) || + ( addRegisteredInterface(netif) == false ) ) + { + netif->close(this); + break; + } + + success = true; + } + while ( false ); + + if ( success ) + { + // Mark the interface as in the process of registering. + + SET_NETIF_FLAGS( netif, kInterfaceFlagRegistering ); + + // Add interface to pre-registration array. + // We assume the array has enough storage space for the new entry. + + success = array->setObject( netif ); + assert( success ); + } + + return success; +} + +//--------------------------------------------------------------------------- +// Complete the registration of interface objects stored in the array provided. +// The arbitration lock should not be held when the 'isSync' flag is true. + +void +IONetworkStack::completeRegistration( OSArray * array, bool isSync ) +{ + if ( isSync ) + { + completeRegistrationUsingArray( array ); + } + else + { + thread_call_func( (thread_call_func_t) completeRegistrationUsingArray, + array, + TRUE ); /* unique call desired */ + } +} + +void +IONetworkStack::completeRegistrationUsingArray( OSArray * array ) +{ + IONetworkInterface * netif; + + assert( array ); + + for ( UInt32 i = 0; i < array->getCount(); i++ ) + { + netif = (IONetworkInterface *) array->getObject(i); + assert( netif ); + + registerBSDInterface( netif ); + } + + array->release(); // consumes a ref count +} + +//--------------------------------------------------------------------------- +// Call DLIL functions to register the BSD interface. + +void IONetworkStack::registerBSDInterface( IONetworkInterface * netif ) +{ + char ifname[20]; + bool doTermination = false; + + assert( netif ); + + // Attach the interface to DLIL. + + bpfattach( netif->getIfnet(), DLT_EN10MB, sizeof(struct ether_header) ); + ether_ifattach( netif->getIfnet() ); + + // Add a kIOBSDNameKey property to the interface AFTER the interface + // has registered with DLIL. The order is very important to avoid + // rooting from an interface which is not yet known by BSD. + + sprintf(ifname, "%s%d", netif->getNamePrefix(), netif->getUnitNumber()); + netif->setProperty(kIOBSDNameKey, ifname); + + // Update state bits and detect for untimely interface termination. + + gIONetworkStack->lockForArbitration(); + + assert( ( NETIF_FLAGS(netif) & + ( kInterfaceFlagRegistering | kInterfaceFlagRegistered ) ) == + kInterfaceFlagRegistering ); + + CLR_NETIF_FLAGS( netif, kInterfaceFlagRegistering ); + SET_NETIF_FLAGS( netif, kInterfaceFlagRegistered ); + + if ( ( NETIF_FLAGS(netif) & kInterfaceFlagActive ) == 0 ) + { + doTermination = true; + } + else + { + // Re-register interface after the interface has registered with BSD. + // Is there a danger in calling registerService while holding the + // gIONetworkStack's arbitration lock? + + netif->registerService(); + } + + gIONetworkStack->unlockForArbitration(); + + // In the unlikely event that an interface was terminated before + // being registered, re-issue the termination message and tear it + // all down. + + if ( doTermination ) + { + gIONetworkStack->message(kIOMessageServiceIsTerminated, netif); + } +} + +//--------------------------------------------------------------------------- +// External/Public API - Register all interfaces. + +IOReturn +IONetworkStack::registerAllInterfaces() +{ + IONetworkInterface * netif; + const UInt32 unit = 0; + OSArray * array; + + lockForArbitration(); + + // Allocate array to hold pre-registered interface objects. + + array = OSArray::withCapacity( _ifSet->getCount() ); + if ( array == 0 ) + { + unlockForArbitration(); + return kIOReturnNoMemory; + } + + // Iterate through all interface objects. + + for ( UInt32 index = 0; ( netif = getInterface(index) ); index++ ) + { + // Interface must be Active and not yet registered. + + if ( NETIF_FLAGS(netif) != kInterfaceFlagActive ) + { + continue; + } + + // Pre-register the interface. + + preRegisterInterface( netif, + netif->getNamePrefix(), + unit, + array ); + } + + unlockForArbitration(); + + // Complete registration without holding the arbitration lock. + + completeRegistration( array, true ); + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// External/Public API - Register primary interface. + +IOReturn IONetworkStack::registerPrimaryInterface( bool enable ) +{ + IONetworkInterface * netif; + const UInt32 unit = 0; + OSArray * array; + + lockForArbitration(); + + _registerPrimaryInterface = enable; + + if ( _registerPrimaryInterface == false ) + { + unlockForArbitration(); + return kIOReturnSuccess; + } + + // Allocate array to hold pre-registered interface objects. + + array = OSArray::withCapacity( _ifSet->getCount() ); + if ( array == 0 ) + { + unlockForArbitration(); + return kIOReturnNoMemory; + } + + // Iterate through all interface objects. + + for ( UInt32 index = 0; ( netif = getInterface(index) ); index++ ) + { + const char * prefix = netif->getNamePrefix(); + + // Interface must be Active and not yet registered. + + if ( NETIF_FLAGS(netif) != kInterfaceFlagActive ) + { + continue; + } + + // Primary only. + + if ( netif->isPrimaryInterface() != true ) + { + continue; + } + + // If the unit slot is already taken, forget it. + + if ( getRegisteredInterface( prefix, unit ) ) + { + continue; + } + + // Pre-register the interface. + + preRegisterInterface( netif, prefix, unit, array ); + } + + unlockForArbitration(); + + // Complete registration without holding the arbitration lock. + + completeRegistration( array, true ); + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// External/Public API - Register a single interface. + +IOReturn IONetworkStack::registerInterface( IONetworkInterface * netif, + const char * prefix, + UInt32 unit, + bool isSync ) +{ + bool ret; + OSArray * array; + + // Create pre-registration array. + + array = OSArray::withCapacity( 1 ); + if ( array == 0 ) + { + return kIOReturnNoMemory; + } + + // Pre-registration has to be serialized, but the registration can + // (and should) be completed without holding a lock. If the interface + // has already been registered, or cannot be registered, then the + // return value will be false. + + lockForArbitration(); + ret = preRegisterInterface( netif, prefix, unit, array ); + unlockForArbitration(); + + // Complete the registration synchronously or asynchronously. + // If synchronous, then this call will return after the interface + // object in the array has registered with DLIL. + + completeRegistration( array, isSync ); + + return ret ? kIOReturnSuccess : kIOReturnError; +} + +//--------------------------------------------------------------------------- +// Registered interfaces are ordered by their assigned unit number. Those with +// larger unit numbers will be placed behind those with smaller unit numbers. +// This ordering makes it easier to hunt for an available unit number slot for +// a new interface. + +SInt32 IONetworkStack:: +orderRegisteredInterfaces( const OSMetaClassBase * obj1, + const OSMetaClassBase * obj2, + void * ref ) +{ + const IONetworkInterface * netif1 = (const IONetworkInterface *) obj1; + const IONetworkInterface * netif2 = (const IONetworkInterface *) obj2; + + assert( netif1 && netif2 ); + + return ( netif2->getUnitNumber() - netif1->getUnitNumber() ); +} + +//--------------------------------------------------------------------------- +// Create a user-client object to manage user space access. + +IOReturn IONetworkStack::newUserClient( task_t owningTask, + void * /* security_id */, + UInt32 /* type */, + IOUserClient ** handler ) +{ + IOReturn err = kIOReturnSuccess; + IOUserClient * client; + + client = IONetworkStackUserClient::withTask(owningTask); + + if (!client || !client->attach(this) || !client->start(this)) + { + if (client) + { + client->detach(this); + client->release(); + client = 0; + err = kIOReturnExclusiveAccess; + } + else + { + err = kIOReturnNoMemory; + } + } + + *handler = client; + + return err; +} + +//--------------------------------------------------------------------------- +// IONetworkStackUserClient implementation. + +#undef super +#define super IOUserClient +OSDefineMetaClassAndStructors( IONetworkStackUserClient, IOUserClient ) + +IONetworkStackUserClient * IONetworkStackUserClient::withTask( task_t task ) +{ + IONetworkStackUserClient * me = new IONetworkStackUserClient; + + if ( me && me->init() == false ) + { + me->release(); + return 0; + } + return me; +} + +bool IONetworkStackUserClient::start( IOService * provider ) +{ + if ( super::start(provider) == false ) + return false; + + if ( provider->open(this) == false ) + return false; + + _provider = (IONetworkStack *) provider; + + return true; +} + +IOReturn IONetworkStackUserClient::clientClose() +{ + if (_provider) + { + _provider->close(this); + detach(_provider); + } + return kIOReturnSuccess; +} + +IOReturn IONetworkStackUserClient::clientDied() +{ + return clientClose(); +} + +IOReturn IONetworkStackUserClient::setProperties( OSObject * properties ) +{ + IONetworkInterface * netif; + OSDictionary * dict = OSDynamicCast(OSDictionary, properties); + IOReturn ret = kIOReturnBadArgument; + OSString * path = 0; + OSNumber * unit; + OSNumber * cmd; + + do { + // Sanity check. + + if ( (_provider == 0) || (dict == 0) ) + break; + + // Switch on the specified user command. + + cmd = OSDynamicCast( OSNumber, + dict->getObject( kIONetworkStackUserCommand ) ); + if ( cmd == 0 ) + break; + + switch ( cmd->unsigned32BitValue() ) + { + // Register one interface. + + case kIORegisterOne: + path = OSDynamicCast( OSString, + dict->getObject( kIOPathMatchKey )); + unit = OSDynamicCast( OSNumber, + dict->getObject( kIOInterfaceUnit )); + + if ( (path == 0) || (unit == 0) ) + { + break; + } + + netif = OSDynamicCast( IONetworkInterface, + IORegistryEntry::fromPath( path->getCStringNoCopy()) ); + + if ( netif == 0 ) break; + + ret = _provider->registerInterface( netif, + netif->getNamePrefix(), + unit->unsigned32BitValue() ); + + netif->release(); // offset the retain by fromPath(). + + break; + + // Register all interfaces. + + case kIORegisterAll: + ret = _provider->registerAllInterfaces(); + break; + } + } + while ( false ); + + return ret; +} diff --git a/iokit/Families/IONetworking/IONetworkUserClient.cpp b/iokit/Families/IONetworking/IONetworkUserClient.cpp new file mode 100644 index 000000000..8641d4b5a --- /dev/null +++ b/iokit/Families/IONetworking/IONetworkUserClient.cpp @@ -0,0 +1,274 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * + */ + +#include +#include +#include +#include + +//------------------------------------------------------------------------ + +#define super IOUserClient +OSDefineMetaClassAndStructors( IONetworkUserClient, IOUserClient ) + +#ifdef DEBUG +#define DLOG(fmt, args...) IOLog(fmt, ## args) +#else +#define DLOG(fmt, args...) +#endif + +//--------------------------------------------------------------------------- +// Factory method that performs allocation and initialization +// of an IONetworkUserClient instance. + +IONetworkUserClient * IONetworkUserClient::withTask(task_t owningTask) +{ + IONetworkUserClient * me; + + me = new IONetworkUserClient; + if (me) + { + if (!me->init()) + { + me->release(); + return 0; + } + me->_task = owningTask; + } + return me; +} + +//--------------------------------------------------------------------------- +// Start the IONetworkUserClient. + +bool IONetworkUserClient::start(IOService * provider) +{ + UInt32 i; + + _owner = OSDynamicCast(IONetworkInterface, provider); + assert(_owner); + + if (!super::start(_owner)) + return false; + + if (!_owner->open(this)) + return false; + + // Initialize the call structures. + // + i = kIONUCResetNetworkDataIndex; + _methods[i].object = this; + _methods[i].func = (IOMethod) &IONetworkUserClient::resetNetworkData; + _methods[i].count0 = kIONUCResetNetworkDataInputs; + _methods[i].count1 = kIONUCResetNetworkDataOutputs; + _methods[i].flags = kIONUCResetNetworkDataFlags; + + i = kIONUCWriteNetworkDataIndex; + _methods[i].object = this; + _methods[i].func = (IOMethod) &IONetworkUserClient::writeNetworkData; + _methods[i].count0 = kIONUCWriteNetworkDataInput0; + _methods[i].count1 = kIONUCWriteNetworkDataInput1; + _methods[i].flags = kIONUCWriteNetworkDataFlags; + + i = kIONUCReadNetworkDataIndex; + _methods[i].object = this; + _methods[i].func = (IOMethod) &IONetworkUserClient::readNetworkData; + _methods[i].count0 = kIONUCReadNetworkDataInputs; + _methods[i].count1 = kIONUCReadNetworkDataOutputs; + _methods[i].flags = kIONUCReadNetworkDataFlags; + + i = kIONUCGetNetworkDataCapacityIndex; + _methods[i].object = this; + _methods[i].func = (IOMethod) + &IONetworkUserClient::getNetworkDataCapacity; + _methods[i].count0 = kIONUCGetNetworkDataCapacityInputs; + _methods[i].count1 = kIONUCGetNetworkDataCapacityOutputs; + _methods[i].flags = kIONUCGetNetworkDataCapacityFlags; + + i = kIONUCGetNetworkDataHandleIndex; + _methods[i].object = this; + _methods[i].func = (IOMethod) &IONetworkUserClient::getNetworkDataHandle; + _methods[i].count0 = kIONUCGetNetworkDataHandleInputs; + _methods[i].count1 = kIONUCGetNetworkDataHandleOutputs; + _methods[i].flags = kIONUCGetNetworkDataHandleFlags; + + return true; +} + +//--------------------------------------------------------------------------- +// Free the IONetworkUserClient instance. + +void IONetworkUserClient::free() +{ + super::free(); +} + +//--------------------------------------------------------------------------- +// Handle a client close. Close and detach from our owner (provider). + +IOReturn IONetworkUserClient::clientClose() +{ + if (_owner) { + _owner->close(this); + detach(_owner); + } + + return kIOReturnSuccess; +} + +//--------------------------------------------------------------------------- +// Handle client death. Close and detach from our owner (provider). + +IOReturn IONetworkUserClient::clientDied() +{ + return clientClose(); +} + +//--------------------------------------------------------------------------- +// Look up an entry from the method array. + +IOExternalMethod * +IONetworkUserClient::getExternalMethodForIndex(UInt32 index) +{ + if (index >= kIONUCLastIndex) + return 0; + else + return &_methods[index]; +} + +//--------------------------------------------------------------------------- +// Fill the data buffer in an IONetworkData object with zeroes. + +IOReturn IONetworkUserClient::resetNetworkData(OSSymbol * key) +{ + IONetworkData * data; + IOReturn ret; + + data = _owner->getNetworkData(key); + ret = data ? data->reset() : kIOReturnBadArgument; + + return ret; +} + +//--------------------------------------------------------------------------- +// Write to the data buffer in an IONetworkData object with data from a +// source buffer provided by the caller. + +IOReturn +IONetworkUserClient::writeNetworkData(OSSymbol * key, + void * srcBuffer, + IOByteCount srcBufferSize) +{ + IONetworkData * data; + IOReturn ret; + + if (!srcBuffer || (srcBufferSize == 0)) + return kIOReturnBadArgument; + + data = _owner->getNetworkData(key); + ret = data ? data->write(srcBuffer, srcBufferSize) : kIOReturnBadArgument; + + return ret; +} + +//--------------------------------------------------------------------------- +// Read the data buffer in an IONetworkData object and copy +// this data to a destination buffer provided by the caller. + +IOReturn +IONetworkUserClient::readNetworkData(OSSymbol * key, + void * dstBuffer, + IOByteCount * dstBufferSize) +{ + IONetworkData * data; + IOReturn ret ; + + if (!dstBuffer || !dstBufferSize) + return kIOReturnBadArgument; + + data = _owner->getNetworkData(key); + ret = data ? data->read(dstBuffer, dstBufferSize) : + kIOReturnBadArgument; + + return ret; +} + +//--------------------------------------------------------------------------- +// Get the capacity of an IONetworkData object. + +IOReturn +IONetworkUserClient::getNetworkDataCapacity(OSSymbol * key, + UInt32 * capacity) +{ + IOReturn ret = kIOReturnBadArgument; + IONetworkData * data; + + data = _owner->getNetworkData(key); + + if (data) { + *capacity = data->getSize(); + ret = kIOReturnSuccess; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Called to obtain a handle that maps to an IONetworkData object. +// This handle can be later passed to other methods in this class +// to refer to the same object. + +IOReturn +IONetworkUserClient::getNetworkDataHandle(char * name, + OSSymbol ** handle, + IOByteCount nameSize, + IOByteCount * handleSizeP) +{ + IOReturn ret = kIOReturnBadArgument; + const OSSymbol * key; + + if (!name || !nameSize || (name[nameSize - 1] != '\0') || + (*handleSizeP != sizeof(*handle))) + return kIOReturnBadArgument; + + key = OSSymbol::withCStringNoCopy(name); + if (!key) + return kIOReturnNoMemory; + + if (_owner->getNetworkData(key)) + { + *handle = (OSSymbol *) key; + ret = kIOReturnSuccess; + } + + if (key) + key->release(); + + return ret; +} diff --git a/iokit/Families/IONetworking/IOOutputQueue.cpp b/iokit/Families/IONetworking/IOOutputQueue.cpp new file mode 100644 index 000000000..44349b770 --- /dev/null +++ b/iokit/Families/IONetworking/IOOutputQueue.cpp @@ -0,0 +1,899 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * IOOutputQueue.cpp + * + * HISTORY + * 2-Feb-1999 Joe Liu (jliu) created. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "IOMbufQueue.h" +#include + +//=========================================================================== +// IOOutputQueue +//=========================================================================== + +#define STATE_IS(bits) (_state == (bits)) +#define STATE_HAS(bits) ((_state & (bits)) == (bits)) +#define STATE_SET(bits) (_state |= (bits)) +#define STATE_CLR(bits) (_state &= ~(bits)) + +#undef super +#define super OSObject +OSDefineMetaClassAndAbstractStructors( IOOutputQueue, OSObject ) +OSMetaClassDefineReservedUnused( IOOutputQueue, 0); +OSMetaClassDefineReservedUnused( IOOutputQueue, 1); +OSMetaClassDefineReservedUnused( IOOutputQueue, 2); +OSMetaClassDefineReservedUnused( IOOutputQueue, 3); +OSMetaClassDefineReservedUnused( IOOutputQueue, 4); +OSMetaClassDefineReservedUnused( IOOutputQueue, 5); +OSMetaClassDefineReservedUnused( IOOutputQueue, 6); +OSMetaClassDefineReservedUnused( IOOutputQueue, 7); +OSMetaClassDefineReservedUnused( IOOutputQueue, 8); +OSMetaClassDefineReservedUnused( IOOutputQueue, 9); +OSMetaClassDefineReservedUnused( IOOutputQueue, 10); +OSMetaClassDefineReservedUnused( IOOutputQueue, 11); +OSMetaClassDefineReservedUnused( IOOutputQueue, 12); +OSMetaClassDefineReservedUnused( IOOutputQueue, 13); +OSMetaClassDefineReservedUnused( IOOutputQueue, 14); +OSMetaClassDefineReservedUnused( IOOutputQueue, 15); + +//--------------------------------------------------------------------------- +// Initialize an IOOutputQueue object. + +bool IOOutputQueue::init() +{ + if (super::init() == false) + return false; + + // Allocate and initialize the callout entry for async service. + + _callEntry = thread_call_allocate((thread_call_func_t) &runServiceThread, + (void *) this); /* param0 */ + if (_callEntry == 0) + return false; + + return true; +} + +//--------------------------------------------------------------------------- +// Frees the IOOutputQueue object. + +void IOOutputQueue::free() +{ + if (_callEntry) + { + cancelServiceThread(); + thread_call_free(_callEntry); + _callEntry = 0; + } + + super::free(); +} + +//--------------------------------------------------------------------------- +// Schedule a service thread callout, which will run the +// serviceThread() method. + +bool IOOutputQueue::scheduleServiceThread(void * param = 0) +{ + return thread_call_enter1(_callEntry, (thread_call_param_t) param); +} + +//--------------------------------------------------------------------------- +// Cancel any pending service thread callout. + +bool IOOutputQueue::cancelServiceThread() +{ + if (_callEntry == 0) + return false; + else + return thread_call_cancel(_callEntry); +} + +//--------------------------------------------------------------------------- +// A 'C' glue function that is registered as the service thread callout +// handler. This function in turn will call the serviceThread() method. + +void +IOOutputQueue::runServiceThread(thread_call_param_t param0, /* this */ + thread_call_param_t param1) /* param */ +{ + assert(param0); + ((IOOutputQueue *) param0)->serviceThread(param1); +} + +//--------------------------------------------------------------------------- +// Must be implemented by a subclass that calls scheduleServiceThread(). +// The default implementation is a placeholder and performs no action. + +void IOOutputQueue::serviceThread(void * param) +{ +} + +//--------------------------------------------------------------------------- +// Return an address of a method that is designated to handle +// packets sent to the queue object. + +IOOutputAction IOOutputQueue::getOutputHandler() const +{ + return (IOOutputAction) &IOOutputQueue::enqueue; +} + +//--------------------------------------------------------------------------- +// Return an IONetworkData object containing statistics counters. + +IONetworkData * IOOutputQueue::getStatisticsData() const +{ + return 0; +} + + +//=========================================================================== +// IOBasicOutputQueue +//=========================================================================== + +#undef super +#define super IOOutputQueue +OSDefineMetaClassAndStructors( IOBasicOutputQueue, IOOutputQueue ) + +#define QUEUE_LOCK IOSimpleLockLock(_spinlock) +#define QUEUE_UNLOCK IOSimpleLockUnlock(_spinlock) + +#define kIOOutputQueueSignature ((void *) 0xfacefeed) + +//--------------------------------------------------------------------------- +// 'C' function glue to dispatch the IONetworkData notification. + +IOReturn +IOBasicOutputQueue::dispatchNetworkDataNotification(void * target, + void * param, + IONetworkData * data, + UInt32 type) +{ + IOBasicOutputQueue * self = (IOBasicOutputQueue *) target; + return self->handleNetworkDataAccess(data, type, param); +} + +//--------------------------------------------------------------------------- +// Initialize an IOBasicOutputQueue object. + +bool IOBasicOutputQueue::init(OSObject * target, + IOOutputAction action, + UInt32 capacity = 0) +{ + if (super::init() == false) + return false; + + if ((target == 0) || (action == 0)) + return false; + + _target = target; + _action = action; + + // Create a data object for queue statistics. + + _statsData = IONetworkData::withInternalBuffer( + kIOOutputQueueStatsKey, + sizeof(IOOutputQueueStats), + kIONetworkDataBasicAccessTypes, + this, + (IONetworkData::Action) + &IOBasicOutputQueue::dispatchNetworkDataNotification, + kIOOutputQueueSignature); + + if (_statsData == 0) + return false; + + _stats = (IOOutputQueueStats *) _statsData->getBuffer(); + assert(_stats); + + _stats->capacity = capacity; + + // Create two queue objects. + + _queues[0] = IONew(IOMbufQueue, 1); + _queues[1] = IONew(IOMbufQueue, 1); + + IOMbufQueueInit(_queues[0], capacity); + IOMbufQueueInit(_queues[1], capacity); + + if ( (_queues[0] == 0) || (_queues[1] == 0) ) + return false; + + _inQueue = _queues[0]; + + // Create a spinlock to protect the queue. + + _spinlock = IOSimpleLockAlloc(); + if (_spinlock == 0) + return false; + + return true; +} + +//--------------------------------------------------------------------------- +// Factory methods that will construct and initialize an IOBasicOutputQueue +// object. + +IOBasicOutputQueue * +IOBasicOutputQueue::withTarget(IONetworkController * target, + UInt32 capacity = 0) +{ + IOBasicOutputQueue * queue = new IOBasicOutputQueue; + + if (queue && !queue->init(target, target->getOutputHandler(), capacity)) + { + queue->release(); + queue = 0; + } + return queue; +} + +IOBasicOutputQueue * +IOBasicOutputQueue::withTarget(OSObject * target, + IOOutputAction action, + UInt32 capacity = 0) +{ + IOBasicOutputQueue * queue = new IOBasicOutputQueue; + + if (queue && !queue->init(target, action, capacity)) + { + queue->release(); + queue = 0; + } + return queue; +} + +//--------------------------------------------------------------------------- +// Release all resources previously allocated before calling super::free(). + +void IOBasicOutputQueue::free() +{ + cancelServiceThread(); + + if (_spinlock) + { + flush(); + IOSimpleLockFree(_spinlock); + _spinlock = 0; + } + + if (_queues[0]) IODelete(_queues[0], IOMbufQueue, 1); + if (_queues[1]) IODelete(_queues[1], IOMbufQueue, 1); + _queues[0] = _queues[1] = 0; + + if (_statsData) + { + _statsData->release(); + _statsData = 0; + } + + super::free(); +} + +//--------------------------------------------------------------------------- +// Provide an implementation for the serviceThread() method defined in +// IOOutputQueue. This method is called by a callout thread after an +// asynchronous service was scheduled. + +void IOBasicOutputQueue::serviceThread(void * param) +{ + QUEUE_LOCK; + STATE_CLR((UInt32) param); + STATE_SET(kStateOutputActive); + dequeue(); + QUEUE_UNLOCK; +} + +//--------------------------------------------------------------------------- +// Add a single packet, or a chain of packets, to the queue object. +// This method can support multiple clients threads. + +UInt32 IOBasicOutputQueue::enqueue(struct mbuf * m, void * param) +{ + bool success; + + QUEUE_LOCK; + + success = IOMbufQueueEnqueue(_inQueue, m); + + if ( STATE_IS( kStateRunning ) ) + { + STATE_SET( kStateOutputActive ); + dequeue(); + } + + QUEUE_UNLOCK; + + // Drop the packet if the packet(s) were not queued. + // But avoid calling m_free() while holding a simple lock. + // This will not be necessary in the future when m_free() + // is no longer funneled. + + if (success == false) + { + OSAddAtomic( IOMbufFree(m), + (SInt32 *) &_stats->dropCount ); + } + + return 0; +} + +//--------------------------------------------------------------------------- +// Responsible for removing all packets from the queue and pass each packet +// removed to our target. This method returns when the queue becomes empty +// or if the queue is stalled by the target. This method is called with the +// spinlock held. + +void IOBasicOutputQueue::dequeue() +{ + IOMbufQueue * outQueue = _queues[0]; + UInt32 newState = 0; + UInt32 myServiceCount; + + // Switch the input queue. Work on the real queue, while allowing + // clients to continue to queue packets to the "shadow" queue. + + _inQueue = _queues[1]; + + // While dequeue is allowed, and incoming queue has packets. + + while ( STATE_IS( kStateRunning | kStateOutputActive ) && + IOMbufQueueGetSize(outQueue) ) + { + myServiceCount = _serviceCount; + + QUEUE_UNLOCK; + + output( outQueue, &newState ); + + QUEUE_LOCK; + + // If service() was called during the interval when the + // spinlock was released, then refuse to honor any + // stall requests. + + if ( newState ) + { + if ( myServiceCount != _serviceCount ) + newState &= ~kStateOutputStalled; + + STATE_SET( newState ); + } + + // Absorb new packets added to the shadow queue. + + IOMbufQueueEnqueue( outQueue, _inQueue ); + } + + _inQueue = _queues[0]; + + STATE_CLR( kStateOutputActive ); + + if ( newState & kStateOutputServiceMask ) + { + scheduleServiceThread((void *)(newState & kStateOutputServiceMask)); + } + + if (_waitDequeueDone) + { + // A stop() request is waiting for the transmit thread to + // complete transmission. Wake up the waiting thread. + + _waitDequeueDone = false; + thread_wakeup((void *) &_waitDequeueDone); + } +} + +//--------------------------------------------------------------------------- +// Transfer all packets from the given queue to the target. Continue until +// the queue becomes empty, or if the target throttle the queue. + +void IOBasicOutputQueue::output(IOMbufQueue * queue, UInt32 * state) +{ + struct mbuf * pkt; + UInt32 status; + + do { + pkt = IOMbufQueueDequeue(queue); + assert(pkt); + + // Handoff each packet to the controller driver. + + status = (_target->*_action)( pkt, 0 ); + + if ( status == ( kIOOutputStatusAccepted | kIOOutputCommandNone ) ) + { + // Fast-path the typical code path. + _stats->outputCount++; + } + else + { + // Look at the return status and update statistics counters. + + switch (status & kIOOutputStatusMask) + { + default: + case kIOOutputStatusAccepted: + _stats->outputCount++; + break; + + case kIOOutputStatusRetry: + IOMbufQueuePrepend(queue, pkt); + _stats->retryCount++; + break; + } + + // Handle the requested action. + + switch (status & kIOOutputCommandMask) + { + case kIOOutputCommandStall: + *state = kStateOutputStalled; + _stats->stallCount++; + break; + + default: + break; + } + } + } + while ( IOMbufQueueGetSize(queue) && (*state == 0) ); +} + +//--------------------------------------------------------------------------- +// Start or enable the queue. + +bool IOBasicOutputQueue::start() +{ + QUEUE_LOCK; + + STATE_SET( kStateRunning ); + STATE_CLR( kStateOutputStalled ); + _serviceCount++; + + if ( STATE_IS( kStateRunning ) ) + { + STATE_SET( kStateOutputActive ); + dequeue(); + } + + QUEUE_UNLOCK; + + return true; /* always return true */ +} + +//--------------------------------------------------------------------------- +// Stop or disable the queue. + +bool IOBasicOutputQueue::stop() +{ + bool wasRunning; + + QUEUE_LOCK; + + wasRunning = STATE_HAS( kStateRunning ); + + STATE_CLR( kStateRunning ); + + if ( STATE_HAS( kStateOutputActive ) ) + { + // If dequeue is active, it means that: + // 1. A thread is about to call dequeue(). + // 2. A thread is in dequeue() and calling the target. + // + // Wait for the dequeue thread to complete processing. + + _waitDequeueDone = true; + + assert_wait((void *) &_waitDequeueDone, false); + } + + QUEUE_UNLOCK; + + thread_block((void (*)(void)) 0); + + return wasRunning; +} + +//--------------------------------------------------------------------------- +// If the queue becomes stalled, then service() must be called by the target +// to restart the queue when the target is ready to accept more packets. + +bool IOBasicOutputQueue::service(UInt32 options = 0) +{ + bool doDequeue = false; + bool async = (options & kServiceAsync); + UInt32 oldState; + + QUEUE_LOCK; + + oldState = _state; + + // Clear the stall condition. + + STATE_CLR( kStateOutputStalled ); + _serviceCount++; + + if ( ( oldState & kStateOutputStalled ) && + STATE_IS( kStateRunning ) && + IOMbufQueueGetSize(_queues[0]) ) + { + doDequeue = true; + STATE_SET( kStateOutputActive ); + if (async == false) dequeue(); + } + + QUEUE_UNLOCK; + + if ( doDequeue && async ) + { + scheduleServiceThread(); + } + + return doDequeue; +} + +//--------------------------------------------------------------------------- +// Release all packets held by the queue. + +UInt32 IOBasicOutputQueue::flush() +{ + UInt32 flushCount; + + QUEUE_LOCK; + flushCount = IOMbufFree( IOMbufQueueDequeueAll( _inQueue ) ); + OSAddAtomic(flushCount, (SInt32 *) &_stats->dropCount); + QUEUE_UNLOCK; + + return flushCount; +} + +//--------------------------------------------------------------------------- +// Change the capacity of the queue. + +bool IOBasicOutputQueue::setCapacity(UInt32 capacity) +{ + QUEUE_LOCK; + IOMbufQueueSetCapacity(_queues[1], capacity); + IOMbufQueueSetCapacity(_queues[0], capacity); + _stats->capacity = capacity; + QUEUE_UNLOCK; + return true; +} + +//--------------------------------------------------------------------------- +// Returns the current queue capacity. + +UInt32 IOBasicOutputQueue::getCapacity() const +{ + return _stats->capacity; +} + +//--------------------------------------------------------------------------- +// Returns the current queue size. + +UInt32 IOBasicOutputQueue::getSize() const +{ + UInt32 size; + QUEUE_LOCK; + size = IOMbufQueueGetSize(_queues[0]) + IOMbufQueueGetSize(_queues[1]); + QUEUE_UNLOCK; + return size; +} + +//--------------------------------------------------------------------------- +// Returns the number of packets dropped by the queue due to over-capacity. + +UInt32 IOBasicOutputQueue::getDropCount() +{ + return _stats->dropCount; +} + +//--------------------------------------------------------------------------- +// Returns the number of packet passed to the target. + +UInt32 IOBasicOutputQueue::getOutputCount() +{ + return _stats->outputCount; +} + +//--------------------------------------------------------------------------- +// Returns the number of times that a kIOOutputStatusRetry status code +// is received from the target. + +UInt32 IOBasicOutputQueue::getRetryCount() +{ + return _stats->retryCount; +} + +//--------------------------------------------------------------------------- +// Returns the number of times that a kIOOutputCommandStall action code +// is received from the target. + +UInt32 IOBasicOutputQueue::getStallCount() +{ + return _stats->stallCount; +} + +//--------------------------------------------------------------------------- +// Returns the current state of the queue object. + +UInt32 IOBasicOutputQueue::getState() const +{ + return _state; +} + +//--------------------------------------------------------------------------- +// This method is called by our IONetworkData object when it receives +// a read or a reset request. We need to be notified to intervene in +// the request handling. + +IOReturn +IOBasicOutputQueue::handleNetworkDataAccess(IONetworkData * data, + UInt32 accessType, + void * arg) +{ + IOReturn ret = kIOReturnSuccess; + + assert(data && (arg == kIOOutputQueueSignature)); + + // Check the type of data request. + + switch (accessType) + { + case kIONetworkDataAccessTypeRead: + case kIONetworkDataAccessTypeSerialize: + _stats->size = getSize(); + break; + + default: + ret = kIOReturnNotWritable; + break; + } + + return ret; +} + +//--------------------------------------------------------------------------- +// Return an IONetworkData object containing an IOOutputQueueStats structure. + +IONetworkData * IOBasicOutputQueue::getStatisticsData() const +{ + return _statsData; +} + +//=========================================================================== +// IOGatedOutputQueue +//=========================================================================== + +#undef super +#define super IOBasicOutputQueue +OSDefineMetaClassAndStructors( IOGatedOutputQueue, IOBasicOutputQueue ) + +//--------------------------------------------------------------------------- +// Initialize an IOGatedOutputQueue object. + +bool IOGatedOutputQueue::init(OSObject * target, + IOOutputAction action, + IOWorkLoop * workloop, + UInt32 capacity = 0) +{ + if (super::init(target, action, capacity) == false) + return false; + + // Verify that the IOWorkLoop provided is valid. + + if (OSDynamicCast(IOWorkLoop, workloop) == 0) + return false; + + // Allocate and attach an IOCommandGate object to the workloop. + + _gate = IOCommandGate::commandGate(this); + + if (!_gate || (workloop->addEventSource(_gate) != kIOReturnSuccess)) + return false; + + // Allocate and attach an IOInterruptEventSource object to the workloop. + + _interruptSrc = IOInterruptEventSource::interruptEventSource( + this, + (IOInterruptEventSource::Action) restartDeferredOutput + ); + + if ( !_interruptSrc || + (workloop->addEventSource(_interruptSrc) != kIOReturnSuccess) ) + return false; + + return true; +} + +//--------------------------------------------------------------------------- +// Factory methods that will construct and initialize an IOGatedOutputQueue +// object. + +IOGatedOutputQueue * +IOGatedOutputQueue::withTarget(IONetworkController * target, + IOWorkLoop * workloop, + UInt32 capacity = 0) +{ + IOGatedOutputQueue * queue = new IOGatedOutputQueue; + + if (queue && !queue->init(target, target->getOutputHandler(), workloop, + capacity)) + { + queue->release(); + queue = 0; + } + return queue; +} + +IOGatedOutputQueue * +IOGatedOutputQueue::withTarget(OSObject * target, + IOOutputAction action, + IOWorkLoop * workloop, + UInt32 capacity = 0) +{ + IOGatedOutputQueue * queue = new IOGatedOutputQueue; + + if (queue && !queue->init(target, action, workloop, capacity)) + { + queue->release(); + queue = 0; + } + return queue; +} + +//--------------------------------------------------------------------------- +// Free the IOGatedOutputQueue object. + +void IOGatedOutputQueue::free() +{ + cancelServiceThread(); + + if (_gate) + { + _gate->release(); + _gate = 0; + } + + if (_interruptSrc) + { + IOWorkLoop * wl = _interruptSrc->getWorkLoop(); + if (wl) wl->removeEventSource(_interruptSrc); + _interruptSrc->release(); + _interruptSrc = 0; + } + + super::free(); +} + +//--------------------------------------------------------------------------- +// Called by an IOCommandGate object. + +void IOGatedOutputQueue::gatedOutput(OSObject * /* owner */, + IOGatedOutputQueue * self, + IOMbufQueue * queue, + UInt32 * state) +{ + struct mbuf * pkt; + UInt32 status; + + do { + pkt = IOMbufQueueDequeue(queue); + assert(pkt); + + // Handoff the packet to the controller driver. + + status = ((self->_target)->*(self->_action))( pkt, 0 ); + + if ( status == ( kIOOutputStatusAccepted | kIOOutputCommandNone ) ) + { + // Fast-path the typical code path. + self->_stats->outputCount++; + } + else + { + // Look at the return status and update statistics counters. + + switch (status & kIOOutputStatusMask) + { + default: + case kIOOutputStatusAccepted: + self->_stats->outputCount++; + break; + + case kIOOutputStatusRetry: + IOMbufQueuePrepend(queue, pkt); + self->_stats->retryCount++; + break; + } + + // Handle the requested action. + + switch (status & kIOOutputCommandMask) + { + case kIOOutputCommandStall: + *state = kStateOutputStalled; + self->_stats->stallCount++; + break; + + default: + break; + } + } + } + while ( IOMbufQueueGetSize(queue) && (*state == 0) ); +} + +//--------------------------------------------------------------------------- +// Called by our superclass to output all packets in the packet queue given. + +enum { + kStateOutputDeferred = 0x100 +}; + +void IOGatedOutputQueue::output(IOMbufQueue * queue, UInt32 * state) +{ + if ( _gate->attemptAction((IOCommandGate::Action) + &IOGatedOutputQueue::gatedOutput, + (void *) this, + (void *) queue, + (void *) state) == kIOReturnCannotLock ) + { + *state = kStateOutputDeferred; + } +} + +bool IOGatedOutputQueue::scheduleServiceThread(void * param) +{ + if ( ((UInt32) param) & kStateOutputDeferred ) + { + _interruptSrc->interruptOccurred(0, 0, 0); + return true; + } + else + { + return super::scheduleServiceThread(param); + } +} + +void IOGatedOutputQueue::restartDeferredOutput( + OSObject * owner, + IOInterruptEventSource * sender, + int count) +{ + IOGatedOutputQueue * self = (IOGatedOutputQueue *) owner; + self->serviceThread((void *) kStateOutputDeferred); +} diff --git a/iokit/Families/IONetworking/IOPacketQueue.cpp b/iokit/Families/IONetworking/IOPacketQueue.cpp new file mode 100644 index 000000000..dd5a223bd --- /dev/null +++ b/iokit/Families/IONetworking/IOPacketQueue.cpp @@ -0,0 +1,267 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * IOPacketQueue.cpp - Implements a FIFO mbuf packet queue. + * + * HISTORY + * 9-Dec-1998 Joe Liu (jliu) created. + * + */ + +#include +#include // IOLog +#include +#include "IOMbufQueue.h" + +#define super OSObject +OSDefineMetaClassAndStructors( IOPacketQueue, OSObject ) +OSMetaClassDefineReservedUnused( IOPacketQueue, 0); +OSMetaClassDefineReservedUnused( IOPacketQueue, 1); +OSMetaClassDefineReservedUnused( IOPacketQueue, 2); +OSMetaClassDefineReservedUnused( IOPacketQueue, 3); +OSMetaClassDefineReservedUnused( IOPacketQueue, 4); +OSMetaClassDefineReservedUnused( IOPacketQueue, 5); +OSMetaClassDefineReservedUnused( IOPacketQueue, 6); +OSMetaClassDefineReservedUnused( IOPacketQueue, 7); +OSMetaClassDefineReservedUnused( IOPacketQueue, 8); +OSMetaClassDefineReservedUnused( IOPacketQueue, 9); +OSMetaClassDefineReservedUnused( IOPacketQueue, 10); +OSMetaClassDefineReservedUnused( IOPacketQueue, 11); +OSMetaClassDefineReservedUnused( IOPacketQueue, 12); +OSMetaClassDefineReservedUnused( IOPacketQueue, 13); +OSMetaClassDefineReservedUnused( IOPacketQueue, 14); +OSMetaClassDefineReservedUnused( IOPacketQueue, 15); + + +//--------------------------------------------------------------------------- +// Lock macros + +#define LOCK IOSimpleLockLock(_lock) +#define UNLOCK IOSimpleLockUnlock(_lock) + +//--------------------------------------------------------------------------- +// Initialize an IOPacketQueue object. + +bool IOPacketQueue::initWithCapacity(UInt32 capacity) +{ + if (super::init() == false) + { + IOLog("IOPacketQueue: super::init() failed\n"); + return false; + } + + _queue = IONew(IOMbufQueue, 1); + if (_queue == 0) + return false; + + IOMbufQueueInit(_queue, capacity); + + if ((_lock = IOSimpleLockAlloc()) == 0) + return false; + + IOSimpleLockInit(_lock); + + return true; +} + +//--------------------------------------------------------------------------- +// Factory method that will construct and initialize an IOPacketQueue object. + +IOPacketQueue * IOPacketQueue::withCapacity(UInt32 capacity) +{ + IOPacketQueue * queue = new IOPacketQueue; + + if (queue && !queue->initWithCapacity(capacity)) + { + queue->release(); + queue = 0; + } + return queue; +} + +//--------------------------------------------------------------------------- +// Frees the IOPacketQueue instance. + +void IOPacketQueue::free() +{ + if (_lock) + { + IOSimpleLockFree(_lock); + _lock = 0; + } + + if (_queue) + { + flush(); + IODelete(_queue, IOMbufQueue, 1); + _queue = 0; + } + + super::free(); +} + +//--------------------------------------------------------------------------- +// Get the current size of the queue. + +UInt32 IOPacketQueue::getSize() const +{ + return IOMbufQueueGetSize(_queue); +} + +//--------------------------------------------------------------------------- +// Change the capacity of the queue. + +bool IOPacketQueue::setCapacity(UInt32 capacity) +{ + IOMbufQueueSetCapacity(_queue, capacity); + return true; +} + +//--------------------------------------------------------------------------- +// Get the capacity of the queue. + +UInt32 IOPacketQueue::getCapacity() const +{ + return IOMbufQueueGetCapacity(_queue); +} + +//--------------------------------------------------------------------------- +// Peek at the head of the queue without dequeueing the packet. + +const struct mbuf * IOPacketQueue::peek() const +{ + return IOMbufQueuePeek(_queue); +} + +//--------------------------------------------------------------------------- +// Add a packet chain to the head of the queue. + +void IOPacketQueue::prepend(struct mbuf * m) +{ + IOMbufQueuePrepend(_queue, m); +} + +void IOPacketQueue::prepend(IOPacketQueue * queue) +{ + IOMbufQueuePrepend(_queue, queue->_queue); +} + +//--------------------------------------------------------------------------- +// Add a packet chain to the tail of the queue. + +bool IOPacketQueue::enqueue(struct mbuf * m) +{ + return IOMbufQueueEnqueue(_queue, m); +} + +bool IOPacketQueue::enqueue(IOPacketQueue * queue) +{ + return IOMbufQueueEnqueue(_queue, queue->_queue); +} + +UInt32 IOPacketQueue::enqueueWithDrop(struct mbuf * m) +{ + return IOMbufQueueEnqueue(_queue, m) ? 0 : IOMbufFree(m); +} + +//--------------------------------------------------------------------------- +// Remove a single packet from the head of the queue. + +struct mbuf * IOPacketQueue::dequeue() +{ + return IOMbufQueueDequeue(_queue); +} + +//--------------------------------------------------------------------------- +// Remove all packets from the queue and return the chain of packet(s). + +struct mbuf * IOPacketQueue::dequeueAll() +{ + return IOMbufQueueDequeueAll(_queue); +} + +//--------------------------------------------------------------------------- +// Remove all packets from the queue and put them back to the free mbuf +// pool. The size of the queue will be cleared to zero. + +UInt32 IOPacketQueue::flush() +{ + return IOMbufFree(IOMbufQueueDequeueAll(_queue)); +} + +//--------------------------------------------------------------------------- +// Locked forms of prepend/enqueue/dequeue/dequeueAll methods. +// A spinlock will enforce mutually exclusive queue access. + +void IOPacketQueue::lockPrepend(struct mbuf * m) +{ + LOCK; + IOMbufQueuePrepend(_queue, m); + UNLOCK; +} + +bool IOPacketQueue::lockEnqueue(struct mbuf * m) +{ + bool ok; + LOCK; + ok = IOMbufQueueEnqueue(_queue, m); + UNLOCK; + return ok; +} + +UInt32 IOPacketQueue::lockEnqueueWithDrop(struct mbuf * m) +{ + bool ok; + LOCK; + ok = IOMbufQueueEnqueue(_queue, m); + UNLOCK; + return ok ? 0 : IOMbufFree(m); +} + +struct mbuf * IOPacketQueue::lockDequeue() +{ + struct mbuf * m; + LOCK; + m = IOMbufQueueDequeue(_queue); + UNLOCK; + return m; +} + +struct mbuf * IOPacketQueue::lockDequeueAll() +{ + struct mbuf * m; + LOCK; + m = IOMbufQueueDequeueAll(_queue); + UNLOCK; + return m; +} + +UInt32 IOPacketQueue::lockFlush() +{ + struct mbuf * m; + LOCK; + m = IOMbufQueueDequeueAll(_queue); + UNLOCK; + return IOMbufFree(m); +} diff --git a/iokit/Families/IOPCIBus/IOPCIBridge.cpp b/iokit/Families/IOPCIBus/IOPCIBridge.cpp new file mode 100644 index 000000000..3e990785a --- /dev/null +++ b/iokit/Families/IOPCIBus/IOPCIBridge.cpp @@ -0,0 +1,1187 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 23 Nov 98 sdouglas created from objc version. + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern "C" { +#include +}; + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService + +OSDefineMetaClassAndAbstractStructors( IOPCIBridge, IOService ) +OSMetaClassDefineReservedUnused(IOPCIBridge, 0); +OSMetaClassDefineReservedUnused(IOPCIBridge, 1); +OSMetaClassDefineReservedUnused(IOPCIBridge, 2); +OSMetaClassDefineReservedUnused(IOPCIBridge, 3); +OSMetaClassDefineReservedUnused(IOPCIBridge, 4); +OSMetaClassDefineReservedUnused(IOPCIBridge, 5); +OSMetaClassDefineReservedUnused(IOPCIBridge, 6); +OSMetaClassDefineReservedUnused(IOPCIBridge, 7); +OSMetaClassDefineReservedUnused(IOPCIBridge, 8); +OSMetaClassDefineReservedUnused(IOPCIBridge, 9); +OSMetaClassDefineReservedUnused(IOPCIBridge, 10); +OSMetaClassDefineReservedUnused(IOPCIBridge, 11); +OSMetaClassDefineReservedUnused(IOPCIBridge, 12); +OSMetaClassDefineReservedUnused(IOPCIBridge, 13); +OSMetaClassDefineReservedUnused(IOPCIBridge, 14); +OSMetaClassDefineReservedUnused(IOPCIBridge, 15); +OSMetaClassDefineReservedUnused(IOPCIBridge, 16); +OSMetaClassDefineReservedUnused(IOPCIBridge, 17); +OSMetaClassDefineReservedUnused(IOPCIBridge, 18); +OSMetaClassDefineReservedUnused(IOPCIBridge, 19); +OSMetaClassDefineReservedUnused(IOPCIBridge, 20); +OSMetaClassDefineReservedUnused(IOPCIBridge, 21); +OSMetaClassDefineReservedUnused(IOPCIBridge, 22); +OSMetaClassDefineReservedUnused(IOPCIBridge, 23); +OSMetaClassDefineReservedUnused(IOPCIBridge, 24); +OSMetaClassDefineReservedUnused(IOPCIBridge, 25); +OSMetaClassDefineReservedUnused(IOPCIBridge, 26); +OSMetaClassDefineReservedUnused(IOPCIBridge, 27); +OSMetaClassDefineReservedUnused(IOPCIBridge, 28); +OSMetaClassDefineReservedUnused(IOPCIBridge, 29); +OSMetaClassDefineReservedUnused(IOPCIBridge, 30); +OSMetaClassDefineReservedUnused(IOPCIBridge, 31); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// 1 log, 2 disable DT +int gIOPCIDebug = 0; + +#ifdef __I386__ +static void setupIntelPIC(IOPCIDevice * nub); +#endif + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// stub driver has two power states, off and on + +enum { kIOPCIBridgePowerStateCount = 2 }; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOPCIBridge::start( IOService * provider ) +{ + static const IOPMPowerState powerStates[ kIOPCIBridgePowerStateCount ] = { + { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 1, IOPMPowerOn, IOPMPowerOn, IOPMPowerOn, 0, 0, 0, 0, 0, 0, 0, 0 } + }; + + if( !super::start( provider)) + return( false); + + // empty ranges to start + bridgeMemoryRanges = IORangeAllocator::withRange( 0, 1, 8, + IORangeAllocator::kLocking ); + assert( bridgeMemoryRanges ); + setProperty( "Bridge Memory Ranges", bridgeMemoryRanges ); + + bridgeIORanges = IORangeAllocator::withRange( 0, 1, 8, + IORangeAllocator::kLocking ); + assert( bridgeIORanges ); + setProperty( "Bridge IO Ranges", bridgeIORanges ); + + if( !configure( provider)) + return( false); + + // initialize superclass variables + PMinit(); + // register as controlling driver + registerPowerDriver( this, (IOPMPowerState *) powerStates, + kIOPCIBridgePowerStateCount); + // join the tree + provider->joinPMtree( this); + // clamp power on + temporaryPowerClampOn(); + + probeBus( provider, firstBusNum() ); + + return( true ); +} + +IOReturn IOPCIBridge::setDevicePowerState( IOPCIDevice * device, + unsigned long powerState ) +{ + if( powerState) + return( restoreDeviceState( device)); + else + return( saveDeviceState( device)); +} + +enum { kSavedConfigSize = 64 }; +enum { kSavedConfigs = 16 }; + +IOReturn IOPCIBridge::saveDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ) +{ + int i; + + if( !device->savedConfig) + device->savedConfig = IONew( UInt32, kSavedConfigSize ); + if( !device->savedConfig) + return( kIOReturnNotReady ); + + for( i = 1; i < kSavedConfigs; i++) + device->savedConfig[i] = device->configRead32( i * 4 ); + + return( kIOReturnSuccess ); +} + +IOReturn IOPCIBridge::restoreDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ) +{ + int i; + + if( !device->savedConfig) + return( kIOReturnNotReady ); + + for( i = 2; i < kSavedConfigs; i++) + device->configWrite32( i * 4, device->savedConfig[ i ]); + + device->configWrite32( kIOPCIConfigCommand, device->savedConfig[1]); + + IODelete( device->savedConfig, UInt32, kSavedConfigSize); + device->savedConfig = 0; + + return( kIOReturnSuccess ); +} + + +bool IOPCIBridge::configure( IOService * provider ) +{ + return( true ); +} + +static SInt32 PCICompare( UInt32 /* cellCount */, UInt32 cleft[], UInt32 cright[] ) +{ + IOPCIPhysicalAddress * left = (IOPCIPhysicalAddress *) cleft; + IOPCIPhysicalAddress * right = (IOPCIPhysicalAddress *) cright; + static const UInt8 spacesEq[] = { 0, 1, 2, 2 }; + + if( spacesEq[ left->physHi.s.space ] != spacesEq[ right->physHi.s.space ]) + return( -1); + + return( left->physLo - right->physLo ); +} + +void IOPCIBridge::nvLocation( IORegistryEntry * entry, + UInt8 * busNum, UInt8 * deviceNum, UInt8 * functionNum ) +{ + IOPCIDevice * nub; + + nub = OSDynamicCast( IOPCIDevice, entry ); + assert( nub ); + + *busNum = nub->space.s.busNum; + *deviceNum = nub->space.s.deviceNum; + *functionNum = nub->space.s.functionNum; +} + +void IOPCIBridge::spaceFromProperties( OSDictionary * propTable, + IOPCIAddressSpace * space ) +{ + OSData * regProp; + IOPCIAddressSpace * inSpace; + + space->bits = 0; + + if( (regProp = (OSData *) propTable->getObject("reg"))) { + + inSpace = (IOPCIAddressSpace *) regProp->getBytesNoCopy(); + space->s.busNum = inSpace->s.busNum; + space->s.deviceNum = inSpace->s.deviceNum; + space->s.functionNum = inSpace->s.functionNum; + } +} + +IORegistryEntry * IOPCIBridge::findMatching( OSIterator * kids, + IOPCIAddressSpace space ) +{ + IORegistryEntry * found = 0; + IOPCIAddressSpace regSpace; + + if( kids) { + kids->reset(); + while( (0 == found) + && (found = (IORegistryEntry *) kids->getNextObject())) { + + spaceFromProperties( found->getPropertyTable(), ®Space); + if( space.bits != regSpace.bits) + found = 0; + } + } + return( found ); +} + +OSDictionary * IOPCIBridge::constructProperties( IOPCIAddressSpace space ) +{ + OSDictionary * propTable; + UInt32 value; + UInt8 byte; + UInt16 vendor, device; + OSData * prop; + const char * name; + const OSSymbol * nameProp; + char * nameStr; + char * compatBuf; + char * nextCompat; + + struct IOPCIGenericNames { + const char * name; + UInt32 mask; + UInt32 classCode; + }; + static const IOPCIGenericNames genericNames[] = { + { "display", 0xffffff, 0x000100 }, + { "scsi", 0xffff00, 0x010000 }, + { "ethernet", 0xffff00, 0x020000 }, + { "display", 0xff0000, 0x030000 }, + { "pci-bridge", 0xffff00, 0x060400 }, + { 0, 0, 0 } + }; + const IOPCIGenericNames * nextName; + + compatBuf = (char *) IOMalloc( 256 ); + + propTable = OSDictionary::withCapacity( 8 ); + + if( compatBuf && propTable) { + + prop = OSData::withBytes( &space, sizeof( space) ); + if( prop) { + propTable->setObject("reg", prop ); + prop->release(); + } + + value = configRead32( space, kIOPCIConfigVendorID ); + vendor = value; + device = value >> 16; + + prop = OSData::withBytes( &vendor, 2 ); + if( prop) { + propTable->setObject("vendor-id", prop ); + prop->release(); + } + + prop = OSData::withBytes( &device, 2 ); + if( prop) { + propTable->setObject("device-id", prop ); + prop->release(); + } + + value = configRead32( space, kIOPCIConfigRevisionID ); + byte = value & 0xff; + prop = OSData::withBytes( &byte, 1 ); + if( prop) { + propTable->setObject("revision-id", prop ); + prop->release(); + } + + // make generic name + value >>= 8; + name = 0; + for( nextName = genericNames; + (0 == name) && nextName->name; + nextName++ ) { + if( (value & nextName->mask) == nextName->classCode) + name = nextName->name; + } + + // start compatible list + nextCompat = compatBuf; + sprintf( nextCompat, "pci%x,%x", vendor, device); + nameStr = nextCompat; + + value = configRead32( space, kIOPCIConfigSubSystemVendorID ); + if( value) { + vendor = value; + device = value >> 16; + + prop = OSData::withBytes( &vendor, 2 ); + if( prop) { + propTable->setObject("subsystem-vendor-id", prop ); + prop->release(); + } + prop = OSData::withBytes( &device, 2 ); + if( prop) { + propTable->setObject("subsystem-id", prop ); + prop->release(); + } + + nextCompat += strlen( nextCompat ) + 1; + sprintf( nextCompat, "pci%x,%x", vendor, device); + nameStr = nextCompat; + } + + nextCompat += strlen( nextCompat ) + 1; + prop = OSData::withBytes( compatBuf, nextCompat - compatBuf); + if( prop) { + propTable->setObject( "compatible", prop ); + prop->release(); + } + + if( 0 == name) + name = nameStr; + + nameProp = OSSymbol::withCString( name ); + if( nameProp) { + propTable->setObject( gIONameKey, (OSSymbol *) nameProp); + nameProp->release(); + } + } + + if( compatBuf) + IOFree( compatBuf, 256 ); + + return( propTable ); +} + +IOPCIDevice * IOPCIBridge::createNub( OSDictionary * from ) +{ + return( new IOPCIDevice ); +} + +bool IOPCIBridge::initializeNub( IOPCIDevice * nub, + OSDictionary * from ) +{ + spaceFromProperties( from, &nub->space); + nub->parent = this; + if( ioDeviceMemory()) + nub->ioMap = ioDeviceMemory()->map(); + + return( true ); +} + +bool IOPCIBridge::publishNub( IOPCIDevice * nub, UInt32 /* index */ ) +{ + char location[ 24 ]; + bool ok; + OSData * data; + OSData * driverData; + UInt32 *regData, expRomReg; + IOMemoryMap * memoryMap; + IOVirtualAddress virtAddr; + + if( nub) { + if( nub->space.s.functionNum) + sprintf( location, "%X,%X", nub->space.s.deviceNum, + nub->space.s.functionNum ); + else + sprintf( location, "%X", nub->space.s.deviceNum ); + nub->setLocation( location ); + IODTFindSlotName( nub, nub->space.s.deviceNum ); + + // Look for a "driver-reg,AAPL,MacOSX,PowerPC" property. + if( (data = (OSData *)nub->getProperty( "driver-reg,AAPL,MacOSX,PowerPC"))) { + if( data->getLength() == (2 * sizeof(UInt32))) { + regData = (UInt32 *)data->getBytesNoCopy(); + + getNubResources(nub); + memoryMap = nub->mapDeviceMemoryWithRegister(kIOPCIConfigExpansionROMBase); + if( memoryMap != 0) { + virtAddr = memoryMap->getVirtualAddress(); + virtAddr += regData[0]; + + nub->setMemoryEnable(true); + + expRomReg = nub->configRead32(kIOPCIConfigExpansionROMBase); + nub->configWrite32(kIOPCIConfigExpansionROMBase, expRomReg | 1); + + driverData = OSData::withBytesNoCopy((void *)virtAddr, regData[1]); + if ( driverData != 0) { + gIOCatalogue->addExtensionsFromArchive(driverData); + + driverData->release(); + } + + nub->configWrite32(kIOPCIConfigExpansionROMBase, expRomReg); + + nub->setMemoryEnable(false); + + memoryMap->release(); + } + } + } + + ok = nub->attach( this ); + if( ok) + nub->registerService(); + } else + ok = false; + + return( ok ); +} + +void IOPCIBridge::publishNubs( OSIterator * kids, UInt32 index ) +{ + IORegistryEntry * found; + IOPCIDevice * nub; + OSDictionary * propTable; + + if( kids) { + kids->reset(); + while( (found = (IORegistryEntry *) kids->getNextObject())) { + + propTable = found->getPropertyTable(); + nub = createNub( propTable ); + if( !nub) + continue; + if( !initializeNub( nub, propTable)) + continue; + if( !nub->init( found, gIODTPlane)) + continue; + + publishNub( nub, index++ ); + +if( 1 & gIOPCIDebug) + IOLog("%08lx = 0:%08lx 4:%08lx ", nub->space.bits, + nub->configRead32(kIOPCIConfigVendorID), + nub->configRead32(kIOPCIConfigCommand) ); + + } + } +} + +UInt8 IOPCIBridge::firstBusNum( void ) +{ + return( 0 ); +} + +UInt8 IOPCIBridge::lastBusNum( void ) +{ + return( 255 ); +} + +void IOPCIBridge::probeBus( IOService * provider, UInt8 busNum ) +{ + IORegistryEntry * regEntry; + OSDictionary * propTable; + IOPCIDevice * nub = 0; + IOPCIAddressSpace space; + UInt32 vendor; + UInt8 scanDevice, scanFunction, lastFunction; + OSIterator * kidsIter; + UInt32 index = 0; + + IODTSetResolving( provider, PCICompare, nvLocation ); + + if( 2 & gIOPCIDebug) + kidsIter = 0; + else + kidsIter = provider->getChildIterator( gIODTPlane ); + + space.bits = 0; + space.s.busNum = busNum; + + for( scanDevice = 0; scanDevice <= 31; scanDevice++ ) { + + lastFunction = 0; + for( scanFunction = 0; scanFunction <= lastFunction; scanFunction++ ) { + + space.s.deviceNum = scanDevice; + space.s.functionNum = scanFunction; + + if( (regEntry = findMatching( kidsIter, space ))) { + + + } else { + /* probe - should guard exceptions */ +#ifdef __ppc__ + // DEC bridge really needs safe probe + continue; +#endif + vendor = configRead32( space, kIOPCIConfigVendorID ); + vendor &= 0x0000ffff; + if( (0 == vendor) || (0xffff == vendor)) + continue; + + // look in function 0 for multi function flag + if( (0 == scanFunction) + && (0x00800000 & configRead32( space, + kIOPCIConfigCacheLineSize ))) + lastFunction = 7; + + propTable = constructProperties( space ); + if( propTable + && (nub = createNub( propTable)) + && (initializeNub( nub, propTable)) + && nub->init( propTable )) { +#ifdef __I386__ + setupIntelPIC(nub); +#endif + publishNub( nub, index++); + } + } + } + } + + if( kidsIter) { + publishNubs( kidsIter, index ); + kidsIter->release(); + } +} + +bool IOPCIBridge::addBridgeMemoryRange( IOPhysicalAddress start, + IOPhysicalLength length, bool host ) +{ + IORangeAllocator * platformRanges; + bool ok = true; + + if( host ) { + + platformRanges = getPlatform()->getPhysicalRangeAllocator(); + assert( platformRanges ); + + // out of the platform + ok = platformRanges->allocateRange( start, length ); + if( !ok) + kprintf("%s: didn't get host range (%08lx:%08lx)\n", getName(), + start, length); + } + + // and into the bridge + bridgeMemoryRanges->deallocate( start, length ); + + return( ok ); +} + +bool IOPCIBridge::addBridgeIORange( IOByteCount start, IOByteCount length ) +{ + bool ok = true; + + // into the bridge + bridgeIORanges->deallocate( start, length ); + + return( ok ); +} + +bool IOPCIBridge::constructRange( IOPCIAddressSpace * flags, + IOPhysicalAddress phys, + IOPhysicalLength len, + OSArray * array ) +{ + IODeviceMemory * range; + IODeviceMemory * ioMemory; + IORangeAllocator * bridgeRanges; + bool ok; + + if( !array) + return( false ); + + if( kIOPCIIOSpace == flags->s.space) { + + bridgeRanges = bridgeIORanges; + if( (ioMemory = ioDeviceMemory())) { + + phys &= 0x00ffffff; // seems bogus + range = IODeviceMemory::withSubRange( ioMemory, phys, len ); + if( range == 0) + /* didn't fit */ + range = IODeviceMemory::withRange( + phys + ioMemory->getPhysicalAddress(), len ); + + } else + range = 0; + + } else { + bridgeRanges = bridgeMemoryRanges; + range = IODeviceMemory::withRange( phys, len ); + } + + + if( range) { + +#ifdef i386 + // Do nothing for Intel -- I/O ports are not accessed through + // memory on this platform, but through I/O port instructions +#else + + ok = bridgeRanges->allocateRange( phys, len ); + if( !ok) + IOLog("%s: bad range %d(%08lx:%08lx)\n", getName(), flags->s.space, + phys, len); +#endif + + range->setTag( flags->bits ); + ok = array->setObject( range ); + range->release(); + + } else + ok = false; + + return( ok ); +} + + +IOReturn IOPCIBridge::getDTNubAddressing( IOPCIDevice * regEntry ) +{ + OSArray * array; + IORegistryEntry * parentEntry; + OSData * addressProperty; + IOPhysicalAddress phys; + IOPhysicalLength len; + UInt32 cells = 5; + int i, num; + UInt32 * reg; + + addressProperty = (OSData *) regEntry->getProperty( "assigned-addresses" ); + if( 0 == addressProperty) + return( kIOReturnSuccess ); + + parentEntry = regEntry->getParentEntry( gIODTPlane ); + if( 0 == parentEntry) + return( kIOReturnBadArgument ); + + array = OSArray::withCapacity( 1 ); + if( 0 == array) + return( kIOReturnNoMemory ); + + reg = (UInt32 *) addressProperty->getBytesNoCopy(); + num = addressProperty->getLength() / (4 * cells); + + for( i = 0; i < num; i++) { + + if( IODTResolveAddressCell( parentEntry, reg, &phys, &len )) + + constructRange( (IOPCIAddressSpace *) reg, phys, len, array ); + + reg += cells; + } + + if( array->getCount()) + regEntry->setProperty( gIODeviceMemoryKey, array); + + array->release(); + + return( kIOReturnSuccess); +} + +IOReturn IOPCIBridge::getNubAddressing( IOPCIDevice * nub ) +{ + OSArray * array; + IOPhysicalAddress phys; + IOPhysicalLength len; + UInt32 save, value; + IOPCIAddressSpace reg; + UInt8 regNum; + bool memEna, ioEna; + boolean_t s; + + value = nub->configRead32( kIOPCIConfigVendorID ); + if( 0x0003106b == value ) // control doesn't play well + return( kIOReturnSuccess ); + + // only header type 0 + value = nub->configRead32( kIOPCIConfigCacheLineSize ); + if( value & 0x007f0000) + return( kIOReturnSuccess ); + + array = OSArray::withCapacity( 1 ); + if( 0 == array) + return( kIOReturnNoMemory ); + + for( regNum = 0x10; regNum < 0x28; regNum += 4) { + + // begin scary + s = ml_set_interrupts_enabled(FALSE); + memEna = nub->setMemoryEnable( false ); + ioEna = nub->setIOEnable( false ); + + save = nub->configRead32( regNum ); + + nub->configWrite32( regNum, 0xffffffff ); + value = nub->configRead32( regNum ); + + nub->configWrite32( regNum, save ); + nub->setMemoryEnable( memEna ); + nub->setIOEnable( ioEna ); + ml_set_interrupts_enabled( s ); + // end scary + + if( 0 == value) + continue; + + reg = nub->space; + reg.s.registerNum = regNum; + + if( value & 1) { + reg.s.space = kIOPCIIOSpace; + + } else { + reg.s.prefetch = (0 != (value & 8)); + + switch( value & 6) { + case 2: /* below 1Mb */ + reg.s.t = 1; + /* fall thru */ + case 0: /* 32-bit mem */ + case 6: /* reserved */ + reg.s.space = kIOPCI32BitMemorySpace; + break; + + case 4: /* 64-bit mem */ + reg.s.space = kIOPCI64BitMemorySpace; + regNum += 4; + break; + } + } + + value &= 0xfffffff0; + phys = IOPhysical32( 0, save & value ); + len = IOPhysical32( 0, -value ); + +if( 1 & gIOPCIDebug) + IOLog("Space %08lx : %08lx, %08lx\n", reg.bits, phys, len); + + constructRange( ®, phys, len, array ); + } + + if( array->getCount()) + nub->setProperty( gIODeviceMemoryKey, array); + + array->release(); + + return( kIOReturnSuccess); +} + +bool IOPCIBridge::isDTNub( IOPCIDevice * nub ) +{ + return( 0 != nub->getParentEntry( gIODTPlane )); +} + +IOReturn IOPCIBridge::getNubResources( IOService * service ) +{ + IOPCIDevice * nub = (IOPCIDevice *) service; + IOReturn err; + + if( service->getDeviceMemory()) + return( kIOReturnSuccess ); + + if( isDTNub( nub)) + err = getDTNubAddressing( nub ); + else + err = getNubAddressing( nub ); + + return( err); +} + +bool IOPCIBridge::matchKeys( IOPCIDevice * nub, const char * keys, + UInt32 defaultMask, UInt8 regNum ) +{ + const char * next; + UInt32 mask, value, reg; + bool found = false; + + do { + value = strtoul( keys, (char **) &next, 16); + if( next == keys) + break; + + while( (*next) == ' ') + next++; + + if( (*next) == '&') + mask = strtoul( next + 1, (char **) &next, 16); + else + mask = defaultMask; + + reg = nub->configRead32( regNum ); + found = ((value & mask) == (reg & mask)); + keys = next; + + } while( !found); + + return( found ); +} + + +bool IOPCIBridge::pciMatchNub( IOPCIDevice * nub, + OSDictionary * table, + SInt32 * score ) +{ + OSString * prop; + const char * keys; + bool match = true; + UInt8 regNum; + int i; + + struct IOPCIMatchingKeys { + const char * propName; + UInt8 regs[ 4 ]; + UInt32 defaultMask; + }; + IOPCIMatchingKeys * look; + static IOPCIMatchingKeys matching[] = { + { kIOPCIMatchKey, + { 0x00 + 1, 0x2c }, 0xffffffff }, + { kIOPCIPrimaryMatchKey, + { 0x00 }, 0xffffffff }, + { kIOPCISecondaryMatchKey, + { 0x2c }, 0xffffffff }, + { kIOPCIClassMatchKey, + { 0x08 }, 0xffffff00 }}; + + for( look = matching; + (match && (look < (&matching[4]))); + look++ ) { + + prop = (OSString *) table->getObject( look->propName ); + if( prop) { + keys = prop->getCStringNoCopy(); + match = false; + for( i = 0; + ((false == match) && (i < 4)); + i++ ) { + + regNum = look->regs[ i ]; + match = matchKeys( nub, keys, + look->defaultMask, regNum & 0xfc ); + if( 0 == (1 & regNum)) + break; + } + } + } + + return( match ); +} + +bool IOPCIBridge::matchNubWithPropertyTable( IOService * nub, + OSDictionary * table, + SInt32 * score ) +{ + bool matches; + + matches = pciMatchNub( (IOPCIDevice *) nub, table, score); + + return( matches ); +} + +bool IOPCIBridge::compareNubName( const IOService * nub, + OSString * name, OSString ** matched ) const +{ + return( IODTCompareNubName( nub, name, matched )); +} + +UInt32 IOPCIBridge::findPCICapability( IOPCIAddressSpace space, + UInt8 capabilityID, UInt8 * found ) +{ + UInt32 data = 0; + UInt8 offset = 0; + + if( found) + *found = 0; + + if( 0 == ((kIOPCIStatusCapabilities << 16) + & (configRead32( space, kIOPCIConfigCommand)))) + return( 0 ); + + offset = configRead32( space, kIOPCIConfigCapabilitiesPtr ); + while( offset) { + data = configRead32( space, offset ); + if( capabilityID == (data & 0xff)) { + if( found) + *found = offset; + break; + } + offset = (data >> 8) & 0xfc; + } + + return( data ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOPCIBridge::createAGPSpace( IOAGPDevice * master, + IOOptionBits options, + IOPhysicalAddress * address, + IOPhysicalLength * length ) +{ + return( kIOReturnUnsupported ); +} + +IOReturn IOPCIBridge::destroyAGPSpace( IOAGPDevice * master ) +{ + return( kIOReturnUnsupported ); +} + +IORangeAllocator * IOPCIBridge::getAGPRangeAllocator( IOAGPDevice * master ) +{ + return( 0 ); +} + +IOOptionBits IOPCIBridge::getAGPStatus( IOAGPDevice * master, + IOOptionBits options = 0 ) +{ + return( 0 ); +} + +IOReturn IOPCIBridge::commitAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options ) +{ + return( kIOReturnUnsupported ); +} + +IOReturn IOPCIBridge::releaseAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options ) +{ + return( kIOReturnUnsupported ); +} + +IOReturn IOPCIBridge::resetAGPDevice( IOAGPDevice * master, + IOOptionBits options = 0 ) +{ + return( kIOReturnUnsupported ); +} + +IOReturn IOPCIBridge::getAGPSpace( IOAGPDevice * master, + IOPhysicalAddress * address, + IOPhysicalLength * length ) +{ + return( kIOReturnUnsupported ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOPCIBridge + +OSDefineMetaClassAndStructors(IOPCI2PCIBridge, IOPCIBridge) +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 0); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 1); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 2); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 3); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 4); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 5); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 6); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 7); +OSMetaClassDefineReservedUnused(IOPCI2PCIBridge, 8); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOService * IOPCI2PCIBridge::probe( IOService * provider, + SInt32 * score ) +{ + + if( 0 == (bridgeDevice = OSDynamicCast( IOPCIDevice, provider))) + return( 0 ); + + *score -= 100; + + return( this ); +} + +enum { + kPCI2PCIBusNumbers = 0x18, + kPCI2PCIIORange = 0x1c, + kPCI2PCIMemoryRange = 0x20, + kPCI2PCIPrefetchMemoryRange = 0x24, + kPCI2PCIUpperIORange = 0x30 +}; + +bool IOPCI2PCIBridge::configure( IOService * provider ) +{ + UInt32 end; + UInt32 start; + bool ok; + + end = bridgeDevice->configRead32( kPCI2PCIMemoryRange ); + if( end ) { + start = (end & 0xfff0) << 16; + end |= 0x000fffff; + ok = addBridgeMemoryRange( start, end - start + 1, false ); + } + + end = bridgeDevice->configRead32( kPCI2PCIPrefetchMemoryRange ); + if( end) { + start = (end & 0xfff0) << 16; + end |= 0x000fffff; + ok = addBridgeMemoryRange( start, end - start + 1, false ); + } + + end = bridgeDevice->configRead32( kPCI2PCIIORange ); + if( end) { + start = (end & 0xf0) << 8; + end = (end & 0xffff) | 0xfff; + ok = addBridgeIORange( start, end - start + 1 ); + } + + saveBridgeState(); + + return( super::configure( provider )); +} + +void IOPCI2PCIBridge::saveBridgeState( void ) +{ + long cnt; + + for (cnt = 0; cnt < kIOPCIBridgeRegs; cnt++) { + bridgeState[cnt] = bridgeDevice->configRead32(cnt * 4); + } +} + +void IOPCI2PCIBridge::restoreBridgeState( void ) +{ + long cnt; + + for (cnt = 0; cnt < kIOPCIBridgeRegs; cnt++) { + bridgeDevice->configWrite32(cnt * 4, bridgeState[cnt]); + } +} + +UInt8 IOPCI2PCIBridge::firstBusNum( void ) +{ + UInt32 value; + + value = bridgeDevice->configRead32( kPCI2PCIBusNumbers ); + + return( (value >> 8) & 0xff ); +} + +UInt8 IOPCI2PCIBridge::lastBusNum( void ) +{ + UInt32 value; + + value = bridgeDevice->configRead32( kPCI2PCIBusNumbers ); + + return( (value >> 16) & 0xff ); +} + +IOPCIAddressSpace IOPCI2PCIBridge::getBridgeSpace( void ) +{ + return( bridgeDevice->space ); +} + +UInt32 IOPCI2PCIBridge::configRead32( IOPCIAddressSpace space, + UInt8 offset ) +{ + return( bridgeDevice->configRead32( space, offset )); +} + +void IOPCI2PCIBridge::configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ) +{ + bridgeDevice->configWrite32( space, offset, data ); +} + +UInt16 IOPCI2PCIBridge::configRead16( IOPCIAddressSpace space, + UInt8 offset ) +{ + return( bridgeDevice->configRead16( space, offset )); +} + +void IOPCI2PCIBridge::configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ) +{ + bridgeDevice->configWrite16( space, offset, data ); +} + +UInt8 IOPCI2PCIBridge::configRead8( IOPCIAddressSpace space, + UInt8 offset ) +{ + return( bridgeDevice->configRead8( space, offset )); +} + +void IOPCI2PCIBridge::configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ) +{ + bridgeDevice->configWrite8( space, offset, data ); +} + +IODeviceMemory * IOPCI2PCIBridge::ioDeviceMemory( void ) +{ + return( bridgeDevice->ioDeviceMemory()); +} + +bool IOPCI2PCIBridge::publishNub( IOPCIDevice * nub, UInt32 index ) +{ + if( nub) + nub->setProperty( "IOChildIndex" , index, 32 ); + + return( super::publishNub( nub, index ) ); +} + +#ifdef __I386__ + +static void setupIntelPIC(IOPCIDevice *nub) +{ + OSDictionary *propTable; + OSArray *controller; + OSArray *specifier; + OSData *tmpData; + long irq; + extern OSSymbol *gIntelPICName; + + propTable = nub->getPropertyTable(); + if (!propTable) return; + + do { + // Create the interrupt specifer array. + specifier = OSArray::withCapacity(1); + if ( !specifier ) + break; + irq = nub->configRead32(kIOPCIConfigInterruptLine) & 0xf; + tmpData = OSData::withBytes(&irq, sizeof(irq)); + if ( tmpData ) { + specifier->setObject(tmpData); + tmpData->release(); + } + + controller = OSArray::withCapacity(1); + if ( controller ) { + controller->setObject(gIntelPICName); + + // Put the two arrays into the property table. + propTable->setObject(gIOInterruptControllersKey, controller); + controller->release(); + } + propTable->setObject(gIOInterruptSpecifiersKey, specifier); + specifier->release(); + } while( false ); +} + +#endif diff --git a/iokit/Families/IOPCIBus/IOPCIDevice.cpp b/iokit/Families/IOPCIBus/IOPCIDevice.cpp new file mode 100644 index 000000000..0212aa67e --- /dev/null +++ b/iokit/Families/IOPCIBus/IOPCIDevice.cpp @@ -0,0 +1,473 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created from objc version. + */ + +#include + +#include +#include +#include +#include + +#include +#include + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService + +OSDefineMetaClassAndStructors(IOPCIDevice, IOService) +OSMetaClassDefineReservedUnused(IOPCIDevice, 0); +OSMetaClassDefineReservedUnused(IOPCIDevice, 1); +OSMetaClassDefineReservedUnused(IOPCIDevice, 2); +OSMetaClassDefineReservedUnused(IOPCIDevice, 3); +OSMetaClassDefineReservedUnused(IOPCIDevice, 4); +OSMetaClassDefineReservedUnused(IOPCIDevice, 5); +OSMetaClassDefineReservedUnused(IOPCIDevice, 6); +OSMetaClassDefineReservedUnused(IOPCIDevice, 7); +OSMetaClassDefineReservedUnused(IOPCIDevice, 8); +OSMetaClassDefineReservedUnused(IOPCIDevice, 9); +OSMetaClassDefineReservedUnused(IOPCIDevice, 10); +OSMetaClassDefineReservedUnused(IOPCIDevice, 11); +OSMetaClassDefineReservedUnused(IOPCIDevice, 12); +OSMetaClassDefineReservedUnused(IOPCIDevice, 13); +OSMetaClassDefineReservedUnused(IOPCIDevice, 14); +OSMetaClassDefineReservedUnused(IOPCIDevice, 15); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// stub driver has two power states, off and on + +enum { kIOPCIDevicePowerStateCount = 2 }; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// attach +// +// We clamp power on. The effect is +// to prevent system sleep. If a driver is loaded which can +// power manage the device, it will become our child and we +// will remove the clamp. This prevents the system +// from sleeping when there are non-power-managed +// PCI cards installed. +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOPCIDevice::attach( IOService * provider ) +{ + static const IOPMPowerState powerStates[ kIOPCIDevicePowerStateCount ] = { + { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 1, IOPMPowerOn, IOPMPowerOn, IOPMPowerOn, 0, 0, 0, 0, 0, 0, 0, 0 } + }; + + // initialize superclass variables + PMinit(); + // register as controlling driver + registerPowerDriver( this, (IOPMPowerState *) powerStates, + kIOPCIDevicePowerStateCount ); + // join the tree + provider->joinPMtree( this); + + // clamp power on if this is a slot device + slotNameProperty = provider->getProperty ("AAPL,slot-name"); + if (slotNameProperty != NULL) + changePowerStateToPriv (1); + + return super::attach(provider); +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// maxCapabilityForDomainState +// +// If the power domain is supplying power, the device +// can be on. If there is no power it can only be off. +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +unsigned long IOPCIDevice::maxCapabilityForDomainState( + IOPMPowerFlags domainState ) +{ + if( domainState & IOPMPowerOn ) + return( kIOPCIDevicePowerStateCount - 1); + else + return( 0); +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// initialPowerStateForDomainState +// +// This is our first information about the power domain state. +// If power is on in the new state, the device is on. +// If domain power is off, the device is also off. +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +unsigned long IOPCIDevice::initialPowerStateForDomainState( + IOPMPowerFlags domainState ) +{ + if( domainState & IOPMPowerOn) + return( kIOPCIDevicePowerStateCount - 1); + else + return( 0); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// powerStateForDomainState +// +// The power domain may be changing state. +// If power is on in the new state, the device will be on. +// If domain power is off, the device will be off. +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +unsigned long IOPCIDevice::powerStateForDomainState( + IOPMPowerFlags domainState ) +{ + if( domainState & IOPMPowerOn) + return( pm_vars->myCurrentState); + else + return( 0); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// setPowerState +// +// Saves and restores PCI config space if power is going down or up. +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOPCIDevice::setPowerState( unsigned long powerState, + IOService * whatDevice ) +{ + parent->setDevicePowerState( this, powerState ); + + return( IOPMAckImplied); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// addPowerChild +// +// +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOPCIDevice::addPowerChild ( IOService * theChild ) +{ + IOReturn result = IOPMNoErr; + + result = super::addPowerChild (theChild); + + if ((slotNameProperty != NULL) && (result == IOPMNoErr)) + changePowerStateToPriv(0); + + return result; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +// joinPMtree +// +// A policy-maker for our PCI device calls here when initializing, +// to be attached into the power management hierarchy. +// We attach this driver as our child so we can save and restore its config +// space across power cycles. +// +// This overrides the default function of the IOService joinPMtree. +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IOPCIDevice::joinPMtree( IOService * driver ) +{ + // hook it into the tree + pm_vars->thePlatform->PMRegisterDevice( this, driver); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOPCIDevice::matchPropertyTable( OSDictionary * table, SInt32 * score ) +{ + return( parent->matchNubWithPropertyTable( this, table, score )); +} + +bool IOPCIDevice::compareName( OSString * name, OSString ** matched = 0 ) const +{ + return( parent->compareNubName( this, name, matched )); +} + +IOReturn IOPCIDevice::getResources( void ) +{ + return( parent->getNubResources( this )); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +UInt32 IOPCIDevice::configRead32( IOPCIAddressSpace _space, + UInt8 offset ) +{ + return( parent->configRead32( _space, offset )); +} + +void IOPCIDevice::configWrite32( IOPCIAddressSpace _space, + UInt8 offset, UInt32 data ) +{ + parent->configWrite32( _space, offset, data ); +} + +UInt16 IOPCIDevice::configRead16( IOPCIAddressSpace _space, + UInt8 offset ) +{ + return( parent->configRead16( _space, offset )); +} + +void IOPCIDevice::configWrite16( IOPCIAddressSpace _space, + UInt8 offset, UInt16 data ) +{ + parent->configWrite16( _space, offset, data ); +} + +UInt8 IOPCIDevice::configRead8( IOPCIAddressSpace _space, + UInt8 offset ) +{ + return( parent->configRead8( _space, offset )); +} + +void IOPCIDevice::configWrite8( IOPCIAddressSpace _space, + UInt8 offset, UInt8 data ) +{ + parent->configWrite8( _space, offset, data ); +} + +UInt32 IOPCIDevice::configRead32( UInt8 offset ) +{ + return( parent->configRead32( space, offset )); +} + +void IOPCIDevice::configWrite32( UInt8 offset, UInt32 data ) +{ + parent->configWrite32( space, offset, data ); +} + +UInt16 IOPCIDevice::configRead16( UInt8 offset ) +{ + return( parent->configRead16( space, offset )); +} + +void IOPCIDevice::configWrite16( UInt8 offset, UInt16 data ) +{ + parent->configWrite16( space, offset, data ); +} + +UInt8 IOPCIDevice::configRead8( UInt8 offset ) +{ + return( parent->configRead8( space, offset )); +} + +void IOPCIDevice::configWrite8( UInt8 offset, UInt8 data ) +{ + parent->configWrite8( space, offset, data ); +} + +IOReturn IOPCIDevice::saveDeviceState( IOOptionBits options = 0 ) +{ + return( parent->saveDeviceState( this, options ) ); +} + +IOReturn IOPCIDevice::restoreDeviceState( IOOptionBits options = 0 ) +{ + return( parent->restoreDeviceState( this, options ) ); +} + +UInt32 IOPCIDevice::findPCICapability( UInt8 capabilityID, UInt8 * offset = 0 ) +{ + return( parent->findPCICapability( space, capabilityID, offset )); +} + +UInt32 IOPCIDevice::setConfigBits( UInt8 reg, UInt32 mask, UInt32 value ) +{ + UInt32 was; + UInt32 bits; + + bits = configRead32( reg ); + was = (bits & mask); + bits &= ~mask; + bits |= (value & mask); + configWrite32( reg, bits ); + + return( was ); +} + +bool IOPCIDevice::setBusMasterEnable( bool enable ) +{ + return( 0 != setConfigBits( kIOPCIConfigCommand, kIOPCICommandBusMaster, + enable ? kIOPCICommandBusMaster : 0)); +} + +bool IOPCIDevice::setMemoryEnable( bool enable ) +{ + return( 0 != setConfigBits( kIOPCIConfigCommand, kIOPCICommandMemorySpace, + enable ? kIOPCICommandMemorySpace : 0)); +} + +bool IOPCIDevice::setIOEnable( bool enable, bool /* exclusive = false */ ) +{ + // exclusive is TODO. + return( 0 != setConfigBits( kIOPCIConfigCommand, kIOPCICommandIOSpace, + enable ? kIOPCICommandIOSpace : 0)); +} + +UInt8 IOPCIDevice::getBusNumber( void ) +{ + return( space.s.busNum ); +} + +UInt8 IOPCIDevice::getDeviceNumber( void ) +{ + return( space.s.deviceNum ); +} + +UInt8 IOPCIDevice::getFunctionNumber( void ) +{ + return( space.s.functionNum ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IODeviceMemory * IOPCIDevice::getDeviceMemoryWithRegister( UInt8 reg ) +{ + OSArray * array; + IODeviceMemory * range; + unsigned int i = 0; + + array = (OSArray *) getProperty( gIODeviceMemoryKey); + if( 0 == array) + return( 0); + + while( (range = (IODeviceMemory *) array->getObject( i++ ))) { + if( reg == (range->getTag() & 0xff)) + break; + } + + return( range); +} + +IOMemoryMap * IOPCIDevice:: mapDeviceMemoryWithRegister( UInt8 reg, + IOOptionBits options = 0 ) +{ + IODeviceMemory * range; + IOMemoryMap * map; + + range = getDeviceMemoryWithRegister( reg ); + if( range) + map = range->map( options ); + else + map = 0; + + return( map ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IODeviceMemory * IOPCIDevice::ioDeviceMemory( void ) +{ + return( parent->ioDeviceMemory() ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOService * IOPCIDevice::matchLocation( IOService * /* client */ ) +{ + return( this ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOPCIDevice + +OSDefineMetaClassAndStructors(IOAGPDevice, IOPCIDevice) +OSMetaClassDefineReservedUnused(IOAGPDevice, 0); +OSMetaClassDefineReservedUnused(IOAGPDevice, 1); +OSMetaClassDefineReservedUnused(IOAGPDevice, 2); +OSMetaClassDefineReservedUnused(IOAGPDevice, 3); +OSMetaClassDefineReservedUnused(IOAGPDevice, 4); +OSMetaClassDefineReservedUnused(IOAGPDevice, 5); +OSMetaClassDefineReservedUnused(IOAGPDevice, 6); +OSMetaClassDefineReservedUnused(IOAGPDevice, 7); +OSMetaClassDefineReservedUnused(IOAGPDevice, 8); +OSMetaClassDefineReservedUnused(IOAGPDevice, 9); +OSMetaClassDefineReservedUnused(IOAGPDevice, 10); +OSMetaClassDefineReservedUnused(IOAGPDevice, 11); +OSMetaClassDefineReservedUnused(IOAGPDevice, 12); +OSMetaClassDefineReservedUnused(IOAGPDevice, 13); +OSMetaClassDefineReservedUnused(IOAGPDevice, 14); +OSMetaClassDefineReservedUnused(IOAGPDevice, 15); +OSMetaClassDefineReservedUnused(IOAGPDevice, 16); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOAGPDevice::createAGPSpace( IOOptionBits options, + IOPhysicalAddress * address, + IOPhysicalLength * length ) +{ + return( parent->createAGPSpace( this, options, address, length )); +} + +IOReturn IOAGPDevice::destroyAGPSpace( void ) +{ + return( parent->destroyAGPSpace( this )); +} + +IORangeAllocator * IOAGPDevice::getAGPRangeAllocator( void ) +{ + return( parent->getAGPRangeAllocator( this )); +} + +IOOptionBits IOAGPDevice::getAGPStatus( IOOptionBits options = 0 ) +{ + return( parent->getAGPStatus( this, options )); +} + +IOReturn IOAGPDevice::resetAGP( IOOptionBits options = 0 ) +{ + return( parent->resetAGPDevice( this, options )); +} + +IOReturn IOAGPDevice::getAGPSpace( IOPhysicalAddress * address, + IOPhysicalLength * length ) +{ + return( parent->getAGPSpace( this, address, length )); +} + +IOReturn IOAGPDevice::commitAGPMemory( IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options = 0 ) +{ + return( parent->commitAGPMemory( this, memory, agpOffset, options )); +} + +IOReturn IOAGPDevice::releaseAGPMemory( IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options = 0 ) +{ + return( parent->releaseAGPMemory( this, memory, agpOffset, options )); +} + + diff --git a/iokit/Families/IOPCIBus/IOPCIDeviceI386.cpp b/iokit/Families/IOPCIBus/IOPCIDeviceI386.cpp new file mode 100644 index 000000000..172b1a2e1 --- /dev/null +++ b/iokit/Families/IOPCIBus/IOPCIDeviceI386.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created from objc version. + */ + +#include + +#include +#include +#include +#include + +#include + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +UInt32 IOPCIDevice::ioRead32( UInt16 offset, IOMemoryMap * map = 0 ) +{ + UInt32 value; + + if( 0 == map) + map = ioMap; + + value = inl( map->getPhysicalAddress() + offset ); + + return( value ); +} + +UInt16 IOPCIDevice::ioRead16( UInt16 offset, IOMemoryMap * map = 0 ) +{ + UInt16 value; + + if( 0 == map) + map = ioMap; + + value = inw( map->getPhysicalAddress() + offset ); + + return( value ); +} + +UInt8 IOPCIDevice::ioRead8( UInt16 offset, IOMemoryMap * map = 0 ) +{ + UInt32 value; + + if( 0 == map) + map = ioMap; + + value = inb( map->getPhysicalAddress() + offset ); + + return( value ); +} + +void IOPCIDevice::ioWrite32( UInt16 offset, UInt32 value, + IOMemoryMap * map = 0 ) +{ + if( 0 == map) + map = ioMap; + + outl( map->getPhysicalAddress() + offset, value ); +} + +void IOPCIDevice::ioWrite16( UInt16 offset, UInt16 value, + IOMemoryMap * map = 0 ) +{ + if( 0 == map) + map = ioMap; + + outw( map->getPhysicalAddress() + offset, value ); +} + +void IOPCIDevice::ioWrite8( UInt16 offset, UInt8 value, + IOMemoryMap * map = 0 ) +{ + if( 0 == map) + map = ioMap; + + outb( map->getPhysicalAddress() + offset, value ); +} + diff --git a/iokit/Families/IOPCIBus/IOPCIDevicePPC.cpp b/iokit/Families/IOPCIBus/IOPCIDevicePPC.cpp new file mode 100644 index 000000000..8730695e6 --- /dev/null +++ b/iokit/Families/IOPCIBus/IOPCIDevicePPC.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created from objc version. + */ + +#include + +#include +#include + +#include +#include + +#include +#include + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +UInt32 IOPCIDevice::ioRead32( UInt16 offset, IOMemoryMap * map = 0 ) +{ + UInt32 value; + + if( 0 == map) { + map = ioMap; + if( 0 == map) + return( 0 ); + } + + value = OSReadSwapInt32( (volatile void *)map->getVirtualAddress(), offset); + eieio(); + + return( value ); +} + +UInt16 IOPCIDevice::ioRead16( UInt16 offset, IOMemoryMap * map = 0 ) +{ + UInt16 value; + + if( 0 == map) { + map = ioMap; + if( 0 == map) + return( 0 ); + } + + value = OSReadSwapInt16( (volatile void *)map->getVirtualAddress(), offset); + eieio(); + + return( value ); +} + +UInt8 IOPCIDevice::ioRead8( UInt16 offset, IOMemoryMap * map = 0 ) +{ + UInt32 value; + + if( 0 == map) { + map = ioMap; + if( 0 == map) + return( 0 ); + } + + value = ((volatile UInt8 *) map->getVirtualAddress())[ offset ]; + eieio(); + + return( value ); +} + +void IOPCIDevice::ioWrite32( UInt16 offset, UInt32 value, + IOMemoryMap * map = 0 ) +{ + if( 0 == map) { + map = ioMap; + if( 0 == map) + return; + } + + OSWriteSwapInt32( (volatile void *)map->getVirtualAddress(), offset, value); + eieio(); +} + +void IOPCIDevice::ioWrite16( UInt16 offset, UInt16 value, + IOMemoryMap * map = 0 ) +{ + if( 0 == map) { + map = ioMap; + if( 0 == map) + return; + } + + OSWriteSwapInt16( (volatile void *)map->getVirtualAddress(), offset, value); + eieio(); +} + +void IOPCIDevice::ioWrite8( UInt16 offset, UInt8 value, + IOMemoryMap * map = 0 ) +{ + if( 0 == map) { + map = ioMap; + if( 0 == map) + return; + } + + ((volatile UInt8 *) map->getVirtualAddress())[ offset ] = value; + eieio(); + +} + diff --git a/iokit/Families/IOSCSICDDrive/IOSCSICDDrive.cpp b/iokit/Families/IOSCSICDDrive/IOSCSICDDrive.cpp new file mode 100644 index 000000000..2c02c8df6 --- /dev/null +++ b/iokit/Families/IOSCSICDDrive/IOSCSICDDrive.cpp @@ -0,0 +1,746 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +#define super IOSCSIHDDrive +OSDefineMetaClassAndStructors(IOSCSICDDrive,IOSCSIHDDrive) + +static void __inline ConvertBCDToHex(UInt8 *value) +{ + *value = (((*value) >> 4) * 10) + ((*value) & 0x0f); +} + +/* The Callback (C) entry from the SCSI provider. We just glue + * right into C++. + */ + +void +IOSCSICDDrive_gc_glue(IOService *object,void *param) +{ + IOSCSICDDrive *self; + struct IOBasicSCSI::context *cx; + + self = (IOSCSICDDrive *)object; + cx = (struct IOBasicSCSI::context *)param; + self->genericCompletion(cx); /* do it in C++ */ +} + +IOReturn +IOSCSICDDrive::audioPause(bool pause) +{ + struct context *cx; + SCSICDBInfo scsiCmd; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + bzero(&scsiCmd, sizeof(scsiCmd)); + + scsiCmd.cdbLength = 10; + scsiCmd.cdb[0] = kIOSCSICommandPauseResume; + scsiCmd.cdb[8] = pause ? 0x00 : 0x01; + + cx->scsireq->setCDB(&scsiCmd); + cx->scsireq->setPointers(cx->memory, 0, false); + cx->scsireq->setPointers(cx->senseDataDesc, 255, false, true); + cx->scsireq->setTimeout(5000); + + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +IOReturn +IOSCSICDDrive::audioPlay(CDMSF timeStart,CDMSF timeStop) +{ + return(doAudioPlayCommand(timeStart,timeStop)); +} + +IOReturn +IOSCSICDDrive::audioScan(CDMSF timeStart,bool reverse) +{ + struct context *cx; + SCSICDBInfo scsiCmd; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + bzero(&scsiCmd, sizeof(scsiCmd)); + + scsiCmd.cdbLength = 10; + scsiCmd.cdb[0] = 0xCD; /* AUDIO SCAN (10) */ + scsiCmd.cdb[1] = reverse ? 0x10 : 0x00; + scsiCmd.cdb[3] = timeStart.minute; + scsiCmd.cdb[4] = timeStart.second; + scsiCmd.cdb[5] = timeStart.frame; + scsiCmd.cdb[9] = 0x40; /* MSF */ + + cx->scsireq->setCDB(&scsiCmd); + cx->scsireq->setPointers(cx->memory, 0, false); + cx->scsireq->setPointers(cx->senseDataDesc, 255, false, true); + cx->scsireq->setTimeout(5000); + + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +IOReturn +IOSCSICDDrive::audioStop() +{ + struct context *cx; + SCSICDBInfo scsiCmd; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + bzero(&scsiCmd, sizeof(scsiCmd)); + + scsiCmd.cdbLength = 6; + scsiCmd.cdb[0] = 0x01; /* REZERO UNIT (6) */ + + cx->scsireq->setCDB(&scsiCmd); + cx->scsireq->setPointers(cx->memory, 0, false); + cx->scsireq->setPointers(cx->senseDataDesc, 255, false, true); + cx->scsireq->setTimeout(5000); + + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +bool +IOSCSICDDrive::deviceTypeMatches(UInt8 inqBuf[],UInt32 inqLen,SInt32 *score) +{ + if ((inqBuf[0] & 0x1f) == kIOSCSIDeviceTypeCDROM) { +// IOLog("%s[IOSCSICDDrive]::deviceTypeMatches, returning TRUE\n",getName()); + *score = 0; + return(true); + } else { +// IOLog("%s[IOSCSICDDrive]::deviceTypeMatches, returning FALSE\n",getName()); + return(false); /* we don't handle other devices */ + } +} + +IOReturn +IOSCSICDDrive::doAsyncReadCD(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + struct context *cx; + SCSICDBInfo scsiCmd; + + assert(buffer->getDirection() == kIODirectionIn); + + bzero(&scsiCmd, sizeof(scsiCmd)); + + if (sectorArea == kCDSectorAreaUser) { + if (sectorType == kCDSectorTypeCDDA) { + scsiCmd.cdbLength = 12; + scsiCmd.cdb[ 0] = 0xD8; /* READ CD-DA */ + scsiCmd.cdb[ 2] = (block >> 24) & 0xFF; + scsiCmd.cdb[ 3] = (block >> 16) & 0xFF; + scsiCmd.cdb[ 4] = (block >> 8) & 0xFF; + scsiCmd.cdb[ 5] = (block ) & 0xFF; + scsiCmd.cdb[ 6] = (nblks >> 24) & 0xFF; + scsiCmd.cdb[ 7] = (nblks >> 16) & 0xFF; + scsiCmd.cdb[ 8] = (nblks >> 8) & 0xFF; + scsiCmd.cdb[ 9] = (nblks ) & 0xFF; + } else if (sectorType == kCDSectorTypeMode1 || + sectorType == kCDSectorTypeMode2Form1) { + return doAsyncReadWrite(buffer,block,nblks,completion); + } + } + + if (scsiCmd.cdbLength == 0) { + return(kIOReturnUnsupported); + } + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + buffer->retain(); /* bump the retain count */ + cx->memory = buffer; + cx->completion = completion; + cx->state = kAsyncReadWrite; + + cx->scsireq->setCallback(this, (CallbackFn)IOSCSICDDrive_gc_glue, cx); + cx->scsireq->setCDB(&scsiCmd); + cx->scsireq->setPointers(buffer, buffer->getLength(), false); + cx->scsireq->setPointers(cx->senseDataDesc, 255, false, true); + cx->scsireq->setTimeout(60000); + + /* Queue the request awaiting power and return. When power comes up, + * the request will be passed to standardAsyncReadWriteExecute. + */ + queueCommand(cx,kAsync,getReadWritePowerState()); /* queue and possibly wait for power */ + + return(kIOReturnSuccess); +} + +IOReturn +IOSCSICDDrive::doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) +{ + if (buffer->getDirection() == kIODirectionOut) { + return(kIOReturnNotWritable); + } + + return(super::doAsyncReadWrite(buffer,block,nblks,completion)); +} + +IOReturn +IOSCSICDDrive::doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks) +{ + if (buffer->getDirection() == kIODirectionOut) { + return(kIOReturnNotWritable); + } + + return(super::doSyncReadWrite(buffer,block,nblks)); +} + +IOReturn +IOSCSICDDrive::doAudioPlayCommand(CDMSF timeStart,CDMSF timeStop) +{ + struct context *cx; + struct IOAudioPlayMSFcdb *p; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + p = (struct IOAudioPlayMSFcdb *)scsiCDB.cdb; /* use PlayAudioMSF */ + p->opcode = kIOSCSICommandPlayAudioMSF; + p->lunbits = 0; + p->reserved1 = 0; + p->start_m = timeStart.minute; + p->start_s = timeStart.second; + p->start_f = timeStart.frame; + p->end_m = timeStop.minute; + p->end_s = timeStop.second; + p->end_f = timeStop.frame; + p->ctlbyte = 0; + + scsiCDB.cdbLength = 10; + req->setCDB( &scsiCDB ); + req->setPointers(cx->senseDataDesc, 255, false, true); + + req->setPointers( cx->memory, 0, false ); + req->setTimeout( 5000 ); + + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +IOReturn +IOSCSICDDrive::doFormatMedia(UInt64 /* byteCapacity */) +{ + return(kIOReturnUnsupported); +} + +UInt32 +IOSCSICDDrive::doGetFormatCapacities(UInt64 * /* capacities */,UInt32 /* capacitiesMaxCount */) const +{ + return(kIOReturnUnsupported); +} + +IOReturn +IOSCSICDDrive::doSynchronizeCache(void) +{ + return(kIOReturnUnsupported); +} + +IOReturn +IOSCSICDDrive::getAudioStatus(CDAudioStatus *status) +{ + IOReturn result; + UInt8 *tempBuf; + + /* Get a buffer for the returned data: */ + + result = allocateTempBuffer(&tempBuf,16); + if (result != kIOReturnSuccess) { + return(kIOReturnNoMemory); + } + + result = readSubChannel(tempBuf,16,IORSCcdb::kCurrentPosition,0); + + if (result == kIOReturnSuccess) { /* we got the data */ + assert(tempBuf[2] == 0); + assert(tempBuf[3] == 12); + assert(tempBuf[4] == 1); + + status->status = tempBuf[ 1]; + + status->position.track.number = tempBuf[ 6]; + status->position.track.index = tempBuf[ 7]; + + status->position.time.minute = tempBuf[ 9]; + status->position.time.second = tempBuf[10]; + status->position.time.frame = tempBuf[11]; + + status->position.track.time.minute = tempBuf[13]; + status->position.track.time.second = tempBuf[14]; + status->position.track.time.frame = tempBuf[15]; + } + deleteTempBuffer(tempBuf,16); + + return(result); +} + +IOReturn +IOSCSICDDrive::getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume) +{ + struct context *cx; + SCSICDBInfo scsiCmd; + IOReturn result; + UInt8 audio_control[28]; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + cx->memory = IOMemoryDescriptor::withAddress(audio_control, + sizeof(audio_control), + kIODirectionIn); + if (cx->memory == NULL) { + deleteContext(cx); + return(kIOReturnNoMemory); + } + + bzero(&scsiCmd, sizeof(scsiCmd)); + + scsiCmd.cdbLength = 6; + scsiCmd.cdb[0] = 0x1a; /* MODE SENSE (6) */ + scsiCmd.cdb[2] = 0x0e; /* PAGE CODE E */ + scsiCmd.cdb[4] = sizeof(audio_control); + + cx->scsireq->setCDB(&scsiCmd); + cx->scsireq->setPointers(cx->memory, sizeof(audio_control), true); + cx->scsireq->setPointers(cx->senseDataDesc, 255, false, true); + cx->scsireq->setTimeout(5000); + + result = simpleSynchIO(cx); + + if (result == kIOReturnSuccess) { + assert((audio_control[ 0] ) == 28-1); + assert((audio_control[ 3] ) == 0x08); + assert((audio_control[12] & 0x3f) == 0x0e); + assert((audio_control[13] ) == 0x0e); + + *leftVolume = audio_control[21]; + *rightVolume = audio_control[23]; + } + + deleteContext(cx); + + return(result); +} + +const char * +IOSCSICDDrive::getDeviceTypeName(void) +{ + return(kIOBlockStorageDeviceTypeCDROM); +} + +bool +IOSCSICDDrive::init(OSDictionary * properties) +{ + return(super::init(properties)); +} + +IOService * +IOSCSICDDrive::instantiateNub(void) +{ + IOService *nub; + + /* Instantiate a generic CDROM nub so a generic driver can match above us. */ + + nub = new IOSCSICDDriveNub; + return(nub); +} + +void +IOSCSICDDrive::mediaArrived(void) +{ +} + +void +IOSCSICDDrive::mediaGone(void) +{ +} + +IOReturn +IOSCSICDDrive::readISRC(UInt8 track,CDISRC isrc) +{ + IOReturn result; + UInt8 *tempBuf; + + /* Get a buffer for the returned data: */ + + result = allocateTempBuffer(&tempBuf,24); + if (result != kIOReturnSuccess) { + return(kIOReturnNoMemory); + } + + result = readSubChannel(tempBuf,24,IORSCcdb::kISRC,track); + if (result == kIOReturnSuccess) { + assert(tempBuf[2] == 0); + assert(tempBuf[3] == 20); + assert(tempBuf[4] == 3); + + if ((tempBuf[8] & 0x80)) { /* return the ISRC */ + bcopy(&tempBuf[9],isrc,kCDISRCMaxLength); + isrc[kCDISRCMaxLength] = '\0'; + } else { + result = kIOReturnNotFound; + } + } + + deleteTempBuffer(tempBuf,24); + + return(result); +} + +IOReturn +IOSCSICDDrive::readMCN(CDMCN mcn) +{ + IOReturn result; + UInt8 *tempBuf; + + /* Get a buffer for the returned data: */ + + result = allocateTempBuffer(&tempBuf,24); + if (result != kIOReturnSuccess) { + return(kIOReturnNoMemory); + } + + result = readSubChannel(tempBuf,24,IORSCcdb::kMCN,0); + if (result == kIOReturnSuccess) { + assert(tempBuf[2] == 0); + assert(tempBuf[3] == 20); + assert(tempBuf[4] == 2); + + if ((tempBuf[8] & 0x80)) { /* return the MCN */ + bcopy(&tempBuf[9],mcn,kCDMCNMaxLength); + mcn[kCDMCNMaxLength] = '\0'; + } else { + result = kIOReturnNotFound; + } + } + + deleteTempBuffer(tempBuf,24); + + return(result); +} + + +IOReturn +IOSCSICDDrive::readSubChannel(UInt8 *buffer,UInt32 length,UInt8 dataFormat,UInt8 trackNumber) +{ + struct context *cx; + struct IORSCcdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + bzero(buffer,length); + + c = (struct IORSCcdb *)(scsiCDB.cdb); + + c->opcode = kIOSCSICommandReadSubChannel; + c->lunbits = 0; + c->lunbits |= IORSCcdb::kMSF; + c->subq = IORSCcdb::kSubq; + c->dataformat = dataFormat; + c->track = trackNumber; /* any valid track will do */ + c->reserved1 = 0; + c->reserved2 = 0; + c->len_hi = length >> 8; + c->len_lo = length & 0xff; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 10; + req->setCDB( &scsiCDB ); + req->setPointers(cx->senseDataDesc, 255, false, true); + + cx->memory = IOMemoryDescriptor::withAddress((void *)buffer, + length, + kIODirectionIn); + req->setPointers( cx->memory, length, false ); + + req->setTimeout( 5000 ); + + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +IOReturn +IOSCSICDDrive::readTOC(IOMemoryDescriptor *buffer) +{ + struct context *cx; + struct IOReadToccdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + c = (struct IOReadToccdb *)scsiCDB.cdb; + + c->opcode = kIOSCSICommandReadTOC; + c->lunbits = IOReadToccdb::kMSF; + c->reserved1 = 0; + c->reserved2 = 0; + c->reserved3 = 0; + c->reserved4 = 0; + c->start_trk_session = 0; + c->len_hi = buffer->getLength() >> 8; + c->len_lo = buffer->getLength() & 0xff; + c->ctlbyte = IOReadToccdb::kFullTOC << 6; /* old format field */ + + scsiCDB.cdbLength = 10; + req->setCDB( &scsiCDB ); + req->setPointers(cx->senseDataDesc, 255, false, true); + + cx->memory = buffer; + + req->setPointers( cx->memory, cx->memory->getLength(), false ); + + req->setTimeout( 5000 ); + + result = simpleSynchIO(cx); + + deleteContext(cx); + +#ifdef HOLLYWOOD_BCD_TO_HEX_SUPPORT + IOByteCount tocMaxSize; + CDTOC *toc = buffer->getVirtualSegment(0, &tocMaxSize); + + /* Convert BCD-encoded values in TOC to hex values. */ + if (toc && tocMaxSize >= sizeof(UInt32)) { + UInt32 count = (tocMaxSize - sizeof(UInt32)) / sizeof(CDTOCDescriptor); + for (UInt32 index = 0; index < count; index++) { + if (toc->descriptors[index].point <= 0x99) { + ConvertBCDToHex(&toc->descriptors[index].point); + } + if ((toc->descriptors[index].point & 0xf0) == 0xb0) { + ConvertBCDToHex(&toc->descriptors[index].address.minute); + ConvertBCDToHex(&toc->descriptors[index].address.second); + ConvertBCDToHex(&toc->descriptors[index].address.frame); + ConvertBCDToHex(&toc->descriptors[index].zero); + } + if ( toc->descriptors[index].point <= 0x99 || + ( toc->descriptors[index].point >= 0xa0 && + toc->descriptors[index].point <= 0xc0 ) ) { + ConvertBCDToHex(&toc->descriptors[index].p.minute); + if (toc->descriptors[index].point != 0xa0) { + ConvertBCDToHex(&toc->descriptors[index].p.second); + } + ConvertBCDToHex(&toc->descriptors[index].p.frame); + } + } + } +#endif HOLLYWOOD_BCD_TO_HEX_SUPPORT + + return(result); +} + +IOReturn +IOSCSICDDrive::reportMaxWriteTransfer(UInt64 /* blockSize */,UInt64 * /* max */) +{ + return(0); +} + +IOReturn +IOSCSICDDrive::reportMediaState(bool *mediaPresent,bool *changed) +{ + IOReturn result; + + result = super::reportMediaState(mediaPresent,changed); + + if (result != kIOReturnSuccess) { + IOLog("%s[IOSCSICDDrive]::reportMediaState; result=%s, changed = %s, present = %s\n", + getName(),stringFromReturn(result),*changed ? "Y" : "N", *mediaPresent ? "Y" : "N"); + } + + if ((result == kIOReturnSuccess) && *changed) { /* the media state changed */ + if (*mediaPresent) { /* new media inserted */ + mediaArrived(); + } else { /* media went away */ + mediaGone(); + } + } + + /* We don't return the result of our internal operations. But since they + * indicate a problem, we probably should report some kind of problem, + * or maybe just ignore the media change. + */ + + return(result); +} + +IOReturn +IOSCSICDDrive::reportWriteProtection(bool *isWriteProtected) +{ + *isWriteProtected = true; + return(kIOReturnSuccess); +} + +IOReturn +IOSCSICDDrive::setAudioVolume(UInt8 leftVolume,UInt8 rightVolume) +{ + struct context *cx; + SCSICDBInfo scsiCmd; + IOReturn result; + UInt8 audio_control[28]; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + cx->memory = IOMemoryDescriptor::withAddress(audio_control, + sizeof(audio_control), + kIODirectionIn); + if (cx->memory == NULL) { + deleteContext(cx); + return(kIOReturnNoMemory); + } + + /* Get current values. */ + + bzero(&scsiCmd, sizeof(scsiCmd)); + + scsiCmd.cdbLength = 6; + scsiCmd.cdb[0] = 0x1a; /* MODE SENSE (6) */ + scsiCmd.cdb[2] = 0x0e; /* PAGE CODE E */ + scsiCmd.cdb[4] = sizeof(audio_control); + + cx->scsireq->setCDB(&scsiCmd); + cx->scsireq->setPointers(cx->memory, sizeof(audio_control), true); + cx->scsireq->setPointers(cx->senseDataDesc, 255, false, true); + cx->scsireq->setTimeout(5000); + + result = simpleSynchIO(cx); + + if (result == kIOReturnSuccess) { + assert((audio_control[ 0] ) == 28-1); + assert((audio_control[ 3] ) == 0x08); + assert((audio_control[12] & 0x3f) == 0x0e); + assert((audio_control[13] ) == 0x0e); + + /* Set new values. */ + + audio_control[21] = audio_control[25] = leftVolume; + audio_control[23] = audio_control[27] = rightVolume; + + deleteContext(cx); + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + cx->memory = IOMemoryDescriptor::withAddress(audio_control, + sizeof(audio_control), + kIODirectionOut); + if (cx->memory == NULL) { + deleteContext(cx); + return(kIOReturnNoMemory); + } + + bzero(&scsiCmd, sizeof(scsiCmd)); + + scsiCmd.cdbLength = 6; + scsiCmd.cdb[0] = 0x15; /* MODE SELECT (6) */ + scsiCmd.cdb[4] = sizeof(audio_control); + + cx->scsireq->setCDB(&scsiCmd); + cx->scsireq->setPointers(cx->memory, sizeof(audio_control), true); + cx->scsireq->setPointers(cx->senseDataDesc, 255, false, true); + cx->scsireq->setTimeout(5000); + + result = simpleSynchIO(cx); + } + + deleteContext(cx); + + return(result); +} diff --git a/iokit/Families/IOSCSICDDrive/IOSCSICDDriveNub.cpp b/iokit/Families/IOSCSICDDrive/IOSCSICDDriveNub.cpp new file mode 100644 index 000000000..94104d038 --- /dev/null +++ b/iokit/Families/IOSCSICDDrive/IOSCSICDDriveNub.cpp @@ -0,0 +1,250 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#define super IOCDBlockStorageDevice +OSDefineMetaClassAndStructors(IOSCSICDDriveNub,IOCDBlockStorageDevice) + +bool +IOSCSICDDriveNub::attach(IOService * provider) +{ + if (!super::attach(provider)) { + return(false); + } + + _provider = OSDynamicCast(IOSCSICDDrive,provider); + if (_provider == NULL) { + return(false); + } else { + return(true); + } +} + +IOReturn +IOSCSICDDriveNub::audioPause(bool pause) +{ + return(_provider->audioPause(pause)); +} + +IOReturn +IOSCSICDDriveNub::audioPlay(CDMSF timeStart,CDMSF timeStop) +{ + return(_provider->audioPlay(timeStart,timeStop)); +} + +IOReturn +IOSCSICDDriveNub::audioScan(CDMSF timeStart,bool reverse) +{ + return(_provider->audioScan(timeStart,reverse)); +} + +IOReturn +IOSCSICDDriveNub::audioStop() +{ + return(_provider->audioStop()); +} + +IOReturn +IOSCSICDDriveNub::doAsyncReadCD(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + return(_provider->doAsyncReadCD(buffer,block,nblks, + sectorArea,sectorType, + completion)); +} + +IOReturn +IOSCSICDDriveNub::doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) +{ + return(_provider->doAsyncReadWrite(buffer,block,nblks,completion)); +} + +IOReturn +IOSCSICDDriveNub::doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks) +{ + return(_provider->doSyncReadWrite(buffer,block,nblks)); +} + +IOReturn +IOSCSICDDriveNub::doEjectMedia(void) +{ + return(_provider->doEjectMedia()); +} + +IOReturn +IOSCSICDDriveNub::doFormatMedia(UInt64 byteCapacity) +{ + return(_provider->doFormatMedia(byteCapacity)); +} + +UInt32 +IOSCSICDDriveNub::doGetFormatCapacities(UInt64 * capacities,UInt32 capacitiesMaxCount) const +{ + return(_provider->doGetFormatCapacities(capacities,capacitiesMaxCount)); +} + +IOReturn +IOSCSICDDriveNub::doLockUnlockMedia(bool doLock) +{ + return(_provider->doLockUnlockMedia(doLock)); +} + +IOReturn +IOSCSICDDriveNub::doSynchronizeCache(void) +{ + return(_provider->doSynchronizeCache()); +} + +IOReturn +IOSCSICDDriveNub::getAudioStatus(CDAudioStatus *status) +{ + return(_provider->getAudioStatus(status)); +} + +IOReturn +IOSCSICDDriveNub::getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume) +{ + return(_provider->getAudioVolume(leftVolume,rightVolume)); +} + +UInt32 +IOSCSICDDriveNub::getMediaType(void) +{ + return(kCDMediaTypeROM); +} + +char * +IOSCSICDDriveNub::getVendorString(void) +{ + return(_provider->getVendorString()); +} + +char * +IOSCSICDDriveNub::getProductString(void) +{ + return(_provider->getProductString()); +} + +char * +IOSCSICDDriveNub::getRevisionString(void) +{ + return(_provider->getRevisionString()); +} + +char * +IOSCSICDDriveNub::getAdditionalDeviceInfoString(void) +{ + return(_provider->getAdditionalDeviceInfoString()); +} + +IOReturn +IOSCSICDDriveNub::readISRC(UInt8 track,CDISRC isrc) +{ + return(_provider->readISRC(track,isrc)); +} + +IOReturn +IOSCSICDDriveNub::readMCN(CDMCN mcn) +{ + return(_provider->readMCN(mcn)); +} + +IOReturn +IOSCSICDDriveNub::readTOC(IOMemoryDescriptor *buffer) +{ + return(_provider->readTOC(buffer)); +} + +IOReturn +IOSCSICDDriveNub::reportBlockSize(UInt64 *blockSize) +{ + return(_provider->reportBlockSize(blockSize)); +} + +IOReturn +IOSCSICDDriveNub::reportEjectability(bool *isEjectable) +{ + return(_provider->reportEjectability(isEjectable)); +} + +IOReturn +IOSCSICDDriveNub::reportLockability(bool *isLockable) +{ + return(_provider->reportLockability(isLockable)); +} + +IOReturn +IOSCSICDDriveNub::reportPollRequirements(bool *pollIsRequired,bool *pollIsExpensive) +{ + return(_provider-> reportPollRequirements(pollIsRequired,pollIsExpensive)); +} + +IOReturn +IOSCSICDDriveNub::reportMaxReadTransfer (UInt64 blockSize,UInt64 *max) +{ + return(_provider->reportMaxReadTransfer(blockSize,max)); +} + +IOReturn +IOSCSICDDriveNub::reportMaxValidBlock(UInt64 *maxBlock) +{ + return(_provider->reportMaxValidBlock(maxBlock)); +} + +IOReturn +IOSCSICDDriveNub::reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max) +{ + return(_provider->reportMaxWriteTransfer(blockSize,max)); +} + +IOReturn +IOSCSICDDriveNub::reportMediaState(bool *mediaPresent,bool *changed) +{ + return(_provider-> reportMediaState(mediaPresent,changed)); +} + +IOReturn +IOSCSICDDriveNub::reportRemovability(bool *isRemovable) +{ + return(_provider->reportRemovability(isRemovable)); +} + +IOReturn +IOSCSICDDriveNub::reportWriteProtection(bool *isWriteProtected) +{ + return(_provider->reportWriteProtection(isWriteProtected)); +} + +IOReturn +IOSCSICDDriveNub::setAudioVolume(UInt8 leftVolume,UInt8 rightVolume) +{ + return(_provider->setAudioVolume(leftVolume,rightVolume)); +} + diff --git a/iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDrive.cpp b/iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDrive.cpp new file mode 100644 index 000000000..3c8db1bd1 --- /dev/null +++ b/iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDrive.cpp @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +// ============================================================================= +// Copyright (c) 2000 Apple Computer, Inc. All rights reserved. +// +// IOSCSIDVDDrive.cpp +// +#include +#include +#include +#include +#include +#include +#include +#include + +#define super IOSCSICDDrive +OSDefineMetaClassAndStructors(IOSCSIDVDDrive,IOSCSICDDrive) + +/*----------------*/ +const int kFeatureProfileList = 0x0000; +const int kFeatureCore = 0x0001; +const int kFeatureMorphing = 0x0002; +const int kFeatureRemovableMedium = 0x0003; +const int kFeatureRandomReadable = 0x0010; +const int kFeatureMultiRead = 0x001d; +const int kFeatureCDRead = 0x001e; +const int kFeatureDVDRead = 0x001f; +const int kFeatureRandomWrite = 0x0020; +const int kFeatureIncrStreamWrite = 0x0021; +const int kFeatureSectorErasable = 0x0022; +const int kFeatureFormattable = 0x0023; +const int kFeatureDefectManagement = 0x0024; +const int kFeatureWriteOnce = 0x0025; +const int kFeatureRestrictedOverwrite = 0x0026; +const int kFeatureDVDRWRestrictedOverwrite = 0x002c; +const int kFeatureCDTrackAtOnce = 0x002d; +const int kFeatureCDMastering = 0x002e; +const int kFeatureDVDR_RWWrite = 0x002f; +const int kFeaturePowerManagement = 0x0100; +const int kFeatureSMART = 0x0101; +const int kFeatureEmbeddedChanger = 0x0102; +const int kFeatureCDAudioAnalogPlay = 0x0103; +const int kFeatureMicrocodeUpgrade = 0x0104; +const int kFeatureTimeout = 0x0105; +const int kFeatureDVDCSS = 0x0106; +const int kFeatureRealTimeStreaming = 0x0107; +const int kFeatureLUNSerialNumber = 0x0108; +const int kFeatureDiskControlBlocks = 0x010a; +const int kFeatureDVDCPRM = 0x010b; + +void +IOSCSIDVDDrive::checkConfig(UInt8 *buf,UInt32 actual) +{ + struct featureHdr { + UInt32 totalLen; + UInt8 reserved1[2]; + UInt16 currentProfile; + }; + struct featureDescriptor { + UInt16 featureCode; + UInt8 versionPC; + UInt8 additionalLength; + }; + + int len; + struct featureHdr *fh; + struct featureDescriptor *fdp; + + fh = (struct featureHdr *)buf; + len = OSSwapBigToHostInt32(fh->totalLen); + + fdp = (struct featureDescriptor *)(&buf[8]); + + do { + + switch (OSSwapBigToHostInt16(fdp->featureCode)) { + + case kFeatureDVDRead : + _isDVDDrive = true; + break; + case kFeatureDVDCSS : + _canDoCSS = true; + break; + } + fdp = (struct featureDescriptor *)((char *)fdp + + sizeof(struct featureDescriptor) + + fdp->additionalLength); + } while ((UInt8 *)fdp < &buf[len]); +} + +IOReturn +IOSCSIDVDDrive::determineMediaType(void) +{ + struct featureHdr { + UInt32 totalLen; + UInt8 reserved1[2]; + UInt16 currentProfile; + }; + struct featureDescriptor { + UInt16 featureCode; + UInt8 versionPC; + UInt8 additionalLength; + }; + + int len; + struct featureHdr *fh; + struct featureDescriptor *fdp; + IOReturn result; + UInt32 configSize; + UInt8 configBuf[kMaxConfigLength]; + + /* Get the *current* configuration information, relating to the media. */ + + result = getConfiguration(configBuf,kMaxConfigLength,&configSize,true); + if (result != kIOReturnSuccess) { + IOLog("%s[IOSCSIDVDDrive]::determineMediaType; result = '%s'\n", + getName(),stringFromReturn(result)); + return(result); + } + + fh = (struct featureHdr *)configBuf; + len = OSSwapBigToHostInt32(fh->totalLen); + + fdp = (struct featureDescriptor *)(&configBuf[8]); + + _mediaType = kDVDMediaTypeUnknown; /* assume there is no media inserted */ + + do { + + switch (OSSwapBigToHostInt16(fdp->featureCode)) { + + case kFeatureCDRead : + _mediaType = kCDMediaTypeROM; + IOLog("%s[IOSCSIDVDDrive]::determineMediaType; media is %s.\n",getName(),"CD"); + break; + case kFeatureDVDRead : + _mediaType = kDVDMediaTypeROM; + IOLog("%s[IOSCSIDVDDrive]::determineMediaType; media is %s.\n",getName(),"DVDROM"); + break; + case kFeatureFormattable : + _mediaType = kDVDMediaTypeRAM; + IOLog("%s[IOSCSIDVDDrive]::determineMediaType; media is %s.\n",getName(),"DVDRam"); + break; + case kFeatureRandomWrite : + _isWriteProtected = false; + IOLog("%s[IOSCSIDVDDrive]::determineMediaType; write-enabled.\n",getName()); + break; + } + fdp = (struct featureDescriptor *)((char *)fdp + + sizeof(struct featureDescriptor) + + fdp->additionalLength); + } while ((UInt8 *)fdp < &configBuf[len]); + + if (_mediaType == kDVDMediaTypeUnknown) { + IOLog("%s[IOSCSIDVDDrive]::determineMediaType; drive is empty.\n",getName()); + } + + return(kIOReturnSuccess); +} + +bool +IOSCSIDVDDrive::deviceTypeMatches(UInt8 inqBuf[],UInt32 inqLen,SInt32 *score) +{ + IOReturn result; + UInt8 type; + + type = inqBuf[0] & 0x1f; + + if (type == kIOSCSIDeviceTypeCDROM) { + // IOLog("%s[IOSCSIDVDDrive]::deviceTypeMatches; device type %d is CD/DVD\n",getName(),type); + + /* Try to get the device configuration. If we can, then it must be a DVD + * drive since it follows the MtFuji command set (so far). If we cannot + * get the configuration, then the device must be a plain CDROM drive. + */ + result = getConfiguration(_configBuf,kMaxConfigLength,&_configSize,false); + if (result == kIOReturnSuccess) { + // IOLog("%s[IOSCSIDVDDrive]::deviceTypeMatches getConfig OK; returning true\n",getName()); + checkConfig(_configBuf,_configSize); + if (_isDVDDrive) { + // IOLog("---isDVDDrive\n"); + *score = 16; /* override any CD driver match */ + return(true); + } else { /* not DVD */ + return(false); + } + } else { + // IOLog("%s[IOSCSIDVDDrive]::deviceTypeMatches getConfig fail; returning false\n",getName()); + return(false); + } + } else { + /** + IOLog("%s[IOSCSIDVDDrive]::deviceTypeMatches; device type %d not CD/DVD, returning FALSE\n", + getName(),type); + **/ + return(false); /* we don't handle other devices */ + } +} + +IOReturn +IOSCSIDVDDrive::doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) +{ + return(standardAsyncReadWrite(buffer,block,nblks,completion)); +} + +IOReturn +IOSCSIDVDDrive::doFormatMedia(UInt64 byteCapacity) +{ + return(standardFormatMedia(byteCapacity)); +} + +UInt32 +IOSCSIDVDDrive::doGetFormatCapacities(UInt64 *capacities,UInt32 capacitiesMaxCount) const +{ + if (capacitiesMaxCount > 0) { + *capacities = (UInt64)((UInt64)2600 * (UInt64)1048576); /* DVD-RAM V1.0 is 2.6GB */ + return(1); + } else { + return(0); + } +} + +IOReturn +IOSCSIDVDDrive::doSynchronizeCache(void) +{ + return(standardSynchronizeCache()); +} + +IOReturn +IOSCSIDVDDrive::doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks) +{ + return(standardSyncReadWrite(buffer,block,nblks)); +} + +IOReturn +IOSCSIDVDDrive::getConfiguration(UInt8 *buffer,UInt32 length,UInt32 *actualLength,bool current) +{ + struct context *cx; + struct IOGCCdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + SCSIResults scsiResults; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + bzero(buffer,length); + + c = (struct IOGCCdb *)(scsiCDB.cdb); + + c->opcode = kIOSCSICommandGetConfiguration; + c->lunRT = 0; + if (current) { /* only get current features */ + c->lunRT |= 0x01; + } + c->startFeature_lo = 0; + c->startFeature_hi = 0; + c->len_hi = length >> 8; + c->len_lo = length & 0xff; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 10; + req->setCDB( &scsiCDB ); + + cx->memory = IOMemoryDescriptor::withAddress((void *)buffer, + length, + kIODirectionIn); + req->setPointers( cx->memory, length, false ); + req->setPointers( cx->senseDataDesc, 255, false, true ); + req->setTimeout( 5000 ); + + queueCommand(cx,kSync,getGetConfigurationPowerState()); + result = simpleSynchIO(cx); + + req->getResults(&scsiResults); + if (result == kIOReturnUnderrun) { + result = kIOReturnSuccess; + } + *actualLength = scsiResults.bytesTransferred; + + deleteContext(cx); + + return(result); +} + +const char * +IOSCSIDVDDrive::getDeviceTypeName(void) +{ + return(kIOBlockStorageDeviceTypeDVD); +} + +UInt32 +IOSCSIDVDDrive::getGetConfigurationPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIDVDDrive::getReportKeyPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIDVDDrive::getSendKeyPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIDVDDrive::getMediaType(void) +{ + return(_mediaType); +} + +bool +IOSCSIDVDDrive::init(OSDictionary * properties) +{ + _isDVDDrive = false; + _canDoCSS = false; + _configSize = 0; + _mediaType = kDVDMediaTypeUnknown; + _isWriteProtected = true; + + return(super::init(properties)); +} + +IOService * +IOSCSIDVDDrive::instantiateNub(void) +{ + IOService *nub; + + /* Instantiate a generic DVD nub so a generic driver can match above us. */ + + nub = new IOSCSIDVDDriveNub; + return(nub); +} + +IOReturn +IOSCSIDVDDrive::reportKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt32 lba,const UInt8 agid,const DVDKeyFormat keyFormat) +{ + struct context *cx; + struct IORKCdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + c = (struct IORKCdb *)(scsiCDB.cdb); + + c->opcode = kIOSCSICommandReportKey; + if (keyFormat == kTitleKey) { + c->lba_0 = lba >> 24; + c->lba_1 = lba >> 16; + c->lba_2 = lba >> 8; + c->lba_3 = lba & 0xff; + } + c->keyClass = keyClass; + c->len_hi = buffer->getLength() >> 8; + c->len_lo = buffer->getLength() & 0xff; + c->agidKeyFormat = agid << 6 | keyFormat; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 10; + req->setCDB( &scsiCDB ); + + cx->memory = buffer; + + req->setPointers( cx->memory, cx->memory->getLength(), false ); + req->setPointers( cx->senseDataDesc, 255, false, true ); + req->setTimeout( 5000 ); + + queueCommand(cx,kSync,getReportKeyPowerState()); + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +IOReturn +IOSCSIDVDDrive::reportMediaState(bool *mediaPresent,bool *changed) +{ + IOReturn result; + + /* Let the superclass check for media in the standard way: */ + + result = super::reportMediaState(mediaPresent,changed); + + if (result != kIOReturnSuccess) { + IOLog("%s[IOSCSIDVDDrive]:: reportMediaState; result = '%s' from super\n", + getName(),stringFromReturn(result)); + return(result); + } + + /* If we have newly-inserted media, determine its type: */ + + if (*mediaPresent && *changed) { + result = determineMediaType(); + } + + return(result); +} + +IOReturn +IOSCSIDVDDrive::reportWriteProtection(bool *isWriteProtected) +{ + *isWriteProtected = _isWriteProtected; + return(kIOReturnSuccess); +} + +IOReturn +IOSCSIDVDDrive::sendKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt8 agid,const DVDKeyFormat keyFormat) +{ + struct context *cx; + struct IOSKCdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + c = (struct IOSKCdb *)(scsiCDB.cdb); + + c->opcode = kIOSCSICommandSendKey; + c->keyClass = keyClass; + c->len_hi = buffer->getLength() >> 8; + c->len_lo = buffer->getLength() & 0xff; + c->agidKeyFormat = agid << 6 | keyFormat; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 10; + req->setCDB( &scsiCDB ); + + cx->memory = buffer; + + req->setPointers( cx->memory, cx->memory->getLength(), false ); + req->setPointers( cx->senseDataDesc, 255, false, true ); + req->setTimeout( 5000 ); + + queueCommand(cx,kSync,getSendKeyPowerState()); + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} diff --git a/iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDriveNub.cpp b/iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDriveNub.cpp new file mode 100644 index 000000000..9061f004d --- /dev/null +++ b/iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDriveNub.cpp @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +#define super IODVDBlockStorageDevice +OSDefineMetaClassAndStructors(IOSCSIDVDDriveNub,IODVDBlockStorageDevice) + +bool +IOSCSIDVDDriveNub::attach(IOService * provider) +{ + if (!super::attach(provider)) { + return(false); + } + + _provider = OSDynamicCast(IOSCSIDVDDrive,provider); + if (_provider == NULL) { + return(false); + } else { + return(true); + } +} + +IOReturn +IOSCSIDVDDriveNub::audioPause(bool pause) +{ + return(_provider->audioPause(pause)); +} + +IOReturn +IOSCSIDVDDriveNub::audioPlay(CDMSF timeStart,CDMSF timeStop) +{ + return(_provider->audioPlay(timeStart,timeStop)); +} + +IOReturn +IOSCSIDVDDriveNub::audioScan(CDMSF timeStart,bool reverse) +{ + return(_provider->audioScan(timeStart,reverse)); +} + +IOReturn +IOSCSIDVDDriveNub::audioStop() +{ + return(_provider->audioStop()); +} + +IOReturn +IOSCSIDVDDriveNub::doAsyncReadCD(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) +{ + return(_provider->doAsyncReadCD(buffer,block,nblks, + sectorArea,sectorType, + completion)); +} + +IOReturn +IOSCSIDVDDriveNub::doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) +{ + return(_provider->doAsyncReadWrite(buffer,block,nblks,completion)); +} + +IOReturn +IOSCSIDVDDriveNub::doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks) +{ + return(_provider->doSyncReadWrite(buffer,block,nblks)); +} + +IOReturn +IOSCSIDVDDriveNub::doEjectMedia(void) +{ + return(_provider->doEjectMedia()); +} + +IOReturn +IOSCSIDVDDriveNub::doFormatMedia(UInt64 byteCapacity) +{ + return(_provider->doFormatMedia(byteCapacity)); +} + +UInt32 +IOSCSIDVDDriveNub::doGetFormatCapacities(UInt64 * capacities,UInt32 capacitiesMaxCount) const +{ + return(_provider->doGetFormatCapacities(capacities,capacitiesMaxCount)); +} + +IOReturn +IOSCSIDVDDriveNub::doLockUnlockMedia(bool doLock) +{ + return(_provider->doLockUnlockMedia(doLock)); +} + +IOReturn +IOSCSIDVDDriveNub::doSynchronizeCache(void) +{ + return(_provider->doSynchronizeCache()); +} + +IOReturn +IOSCSIDVDDriveNub::getAudioStatus(CDAudioStatus *status) +{ + return(_provider->getAudioStatus(status)); +} + +IOReturn +IOSCSIDVDDriveNub::getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume) +{ + return(_provider->getAudioVolume(leftVolume,rightVolume)); +} + +UInt32 +IOSCSIDVDDriveNub::getMediaType(void) +{ + return(_provider->getMediaType()); +} + +char * +IOSCSIDVDDriveNub::getVendorString(void) +{ + return(_provider->getVendorString()); +} + +char * +IOSCSIDVDDriveNub::getProductString(void) +{ + return(_provider->getProductString()); +} + +char * +IOSCSIDVDDriveNub::getRevisionString(void) +{ + return(_provider->getRevisionString()); +} + +char * +IOSCSIDVDDriveNub::getAdditionalDeviceInfoString(void) +{ + return(_provider->getAdditionalDeviceInfoString()); +} + +IOReturn +IOSCSIDVDDriveNub::readISRC(UInt8 track,CDISRC isrc) +{ + return(_provider->readISRC(track,isrc)); +} + +IOReturn +IOSCSIDVDDriveNub::readMCN(CDMCN mcn) +{ + return(_provider->readMCN(mcn)); +} + +IOReturn +IOSCSIDVDDriveNub::readTOC(IOMemoryDescriptor *buffer) +{ + return(_provider->readTOC(buffer)); +} + +IOReturn +IOSCSIDVDDriveNub::reportBlockSize(UInt64 *blockSize) +{ + return(_provider->reportBlockSize(blockSize)); +} + +IOReturn +IOSCSIDVDDriveNub::reportEjectability(bool *isEjectable) +{ + return(_provider->reportEjectability(isEjectable)); +} + +IOReturn +IOSCSIDVDDriveNub::reportKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt32 lba,const UInt8 agid,const DVDKeyFormat keyFormat) +{ + return(_provider->reportKey(buffer,keyClass,lba,agid,keyFormat)); +} + +IOReturn +IOSCSIDVDDriveNub::reportLockability(bool *isLockable) +{ + return(_provider->reportLockability(isLockable)); +} + +IOReturn +IOSCSIDVDDriveNub::reportPollRequirements(bool *pollIsRequired,bool *pollIsExpensive) +{ + return(_provider-> reportPollRequirements(pollIsRequired,pollIsExpensive)); +} + +IOReturn +IOSCSIDVDDriveNub::reportMaxReadTransfer (UInt64 blockSize,UInt64 *max) +{ + return(_provider->reportMaxReadTransfer(blockSize,max)); +} + +IOReturn +IOSCSIDVDDriveNub::reportMaxValidBlock(UInt64 *maxBlock) +{ + return(_provider->reportMaxValidBlock(maxBlock)); +} + +IOReturn +IOSCSIDVDDriveNub::reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max) +{ + return(_provider->reportMaxWriteTransfer(blockSize,max)); +} + +IOReturn +IOSCSIDVDDriveNub::reportMediaState(bool *mediaPresent,bool *changed) +{ + return(_provider->reportMediaState(mediaPresent,changed)); +} + +IOReturn +IOSCSIDVDDriveNub::reportRemovability(bool *isRemovable) +{ + return(_provider->reportRemovability(isRemovable)); +} + +IOReturn +IOSCSIDVDDriveNub::reportWriteProtection(bool *isWriteProtected) +{ + return(_provider->reportWriteProtection(isWriteProtected)); +} + +IOReturn +IOSCSIDVDDriveNub::sendKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt8 agid,const DVDKeyFormat keyFormat) +{ + return(_provider->sendKey(buffer,keyClass,agid,keyFormat)); +} + +IOReturn +IOSCSIDVDDriveNub::setAudioVolume(UInt8 leftVolume,UInt8 rightVolume) +{ + return(_provider->setAudioVolume(leftVolume,rightVolume)); +} diff --git a/iokit/Families/IOSCSIHDDrive/IOBasicSCSI.cpp b/iokit/Families/IOSCSIHDDrive/IOBasicSCSI.cpp new file mode 100644 index 000000000..dcfdc159c --- /dev/null +++ b/iokit/Families/IOSCSIHDDrive/IOBasicSCSI.cpp @@ -0,0 +1,1331 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +#define super IOService +OSDefineMetaClass(IOBasicSCSI,IOService) +OSDefineAbstractStructors(IOBasicSCSI,IOService) + +void IOBasicSCSI_gc_glue(void *object,void *param); + +/* Allocate a new context struct. A return of NULL means we couldn't + * allocate either the context itself or one of its members. + */ +struct IOBasicSCSI::context * +IOBasicSCSI::allocateContext(void) +{ + struct context *cx; + + //xxx IOLog("allocateContext entered\n"); + + /* First, the context structure itself. */ + + cx = IONew(struct context,1); + if (cx == NULL) { + return(NULL); + } + + bzero(cx,sizeof(struct context)); + + /* Allocate all the structs and objects we need. If any allocation + * fails, we can simply call deleteContext() to free anything + * allocated so far. + */ + + cx->scsireq = _provider->allocCommand(kIOSCSIDevice, 0); + if (cx->scsireq == NULL) { + deleteContext(cx); + return(NULL); + } + + + /* Preset the completion parameters, which are the same for + * all SCSI requests we issue. Only the target function changes. + */ + + cx->senseData = (SCSISenseData *)IOMalloc(256); + if (cx-> senseData == NULL) { + deleteContext(cx); + return(NULL); + } + + bzero(cx->senseData, 256 ); + + cx->senseDataDesc = IOMemoryDescriptor::withAddress(cx->senseData, + 256, + kIODirectionIn); + + + cx->sync = IOSyncer::create(false); + if (cx->sync == NULL) { + deleteContext(cx); + return(NULL); + } + + cx->retryInProgress = false; + + /* We defer allocation of the Memory Descriptor till later; + * it will be allocated where it's needed. + */ + + // IOLog("allocateContext returning cx = %08x\n",(unsigned int)cx); + + return(cx); +} + +IOReturn +IOBasicSCSI::allocateInquiryBuffer(UInt8 **buf,UInt32 size) +{ + *buf = (UInt8 *)IOMalloc(size); + if (*buf == NULL) { + return(kIOReturnNoMemory); + } + + bzero(*buf,size); + + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::allocateTempBuffer(UInt8 **buf,UInt32 size) +{ + *buf = (UInt8 *)IOMalloc(size); + if (*buf == NULL) { + return(kIOReturnNoMemory); + } + + bzero(*buf,size); + + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::allocateReadCapacityBuffer(UInt8 **buf,UInt8 size) +{ + *buf = (UInt8 *)IOMalloc(size); + if (*buf == NULL) { + return(kIOReturnNoMemory); + } + + bzero(*buf,size); + + return(kIOReturnSuccess); +} + +UInt32 +IOBasicSCSI::createReadCdb(UInt8 *cdb,UInt32 *cdbLength, + UInt32 block,UInt32 nblks, + UInt32 *maxAutoSenseLength, + UInt32 *timeoutSeconds) +{ + struct IORWcdb *c; + + c = (struct IORWcdb *)cdb; + + c->opcode = SOP_READ10; + c->lunbits = 0; + + c->lba_3 = block >> 24; + c->lba_2 = block >> 16; + c->lba_1 = block >> 8; + c->lba_0 = block & 0xff; + + c->reserved = 0; + + c->count_msb = nblks >> 8; + c->count_lsb = nblks & 0xff; + + c->ctlbyte = 0; + + *cdbLength = 10; + *maxAutoSenseLength = 8; /* do the sense */ + *timeoutSeconds = 60; + return(0); +} + +UInt32 +IOBasicSCSI::createWriteCdb(UInt8 *cdb,UInt32 *cdbLength, + UInt32 block,UInt32 nblks, + UInt32 *maxAutoSenseLength, + UInt32 *timeoutSeconds) +{ + struct IORWcdb *c; + + c = (struct IORWcdb *)cdb; + + c->opcode = SOP_WRITE10; + c->lunbits = 0; + + c->lba_3 = block >> 24; + c->lba_2 = block >> 16; + c->lba_1 = block >> 8; + c->lba_0 = block & 0xff; + + c->reserved = 0; + + c->count_msb = nblks >> 8; + c->count_lsb = nblks & 0xff; + + c->ctlbyte = 0; + + *cdbLength = 10; + *maxAutoSenseLength = sizeof( SCSISenseData ); /* do the sense */ + *timeoutSeconds = 60; + return(0); +} + +void +IOBasicSCSI::deleteContext(struct context *cx) +{ + // IOLog("deleteContext %08x\n",(unsigned int)cx); + + if (cx->scsireq) { + cx->scsireq->release(); + } + +// if (cx->scsiresult) { +// IODelete(cx->scsiresult,struct IOSCSIResult,1); +// } + + if (cx->senseData) + { + IOFree( cx->senseData, 256 ); + } + + if ( cx->senseDataDesc ) + { + cx->senseDataDesc->release(); + } + + if (cx->memory) { + cx->memory->release(); + } + + if (cx->sync) { + cx->sync->release(); + } + + IODelete(cx,struct context,1); +} + +void +IOBasicSCSI::deleteInquiryBuffer(UInt8 *buf,UInt32 size) +{ + IOFree((void *)buf,size); +} + +void +IOBasicSCSI::deleteTempBuffer(UInt8 *buf,UInt32 len) +{ + IOFree((void *)buf,len); +} + +void +IOBasicSCSI::deleteReadCapacityBuffer(UInt8 *buf,UInt32 len) +{ + IOFree((void *)buf,len); +} + +IOReturn +IOBasicSCSI::doInquiry(UInt8 *inqBuf,UInt32 maxLen,UInt32 *actualLen) +{ + _provider->getInquiryData( inqBuf, maxLen, actualLen ); + return kIOReturnSuccess; +} + +IOReturn +IOBasicSCSI::doReadCapacity(UInt64 *blockSize,UInt64 *maxBlock) +{ + struct context *cx; + struct IOReadCapcdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + UInt8 *buf; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(SCSICDBInfo) ); + + c = (struct IOReadCapcdb *)&scsiCDB.cdb; + c->opcode = SOP_READCAP; + c->lunbits = 0; + c->lba_3 = 0; + c->lba_2 = 0; + c->lba_1 = 0; + c->lba_0 = 0; + c->reserved1 = 0; + c->reserved2 = 0; + c->reserved3 = 0; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 10; + + req->setCDB( &scsiCDB ); + req->setPointers( cx->senseDataDesc, sizeof(SCSISenseData), false, true ); + + req->setTimeout( 30000 ); + + *blockSize = 0; + *maxBlock = 0; + + result = allocateReadCapacityBuffer(&buf,kReadCapSize); + + if (result == kIOReturnSuccess) { + + cx->memory = IOMemoryDescriptor::withAddress((void *)buf, + kReadCapSize, + kIODirectionIn); + + req->setPointers( cx->memory, kReadCapSize, false ); + + /* We force the drive to be completely powered-up, including the mechanical + * components, because some drives (e.g. CDs) access the media. + */ + + queueCommand(cx,kSync,getReadCapacityPowerState()); /* queue the operation, sleep awaiting power */ + + result = simpleSynchIO(cx); + + if (result == kIOReturnSuccess) { + + *blockSize = (buf[4] << 24) | /* endian-neutral */ + (buf[5] << 16) | + (buf[6] << 8) | + (buf[7] ); + + *maxBlock = (buf[0] << 24) | /* endian-neutral */ + (buf[1] << 16) | + (buf[2] << 8) | + (buf[3] ); + } + + deleteReadCapacityBuffer(buf,kReadCapSize); + } + + deleteContext(cx); + + return(result); +} + +void +IOBasicSCSI::free(void) +{ + if (_inqBuf) { + deleteInquiryBuffer(_inqBuf,_inqBufSize); + _inqBuf = NULL; + } + +#ifdef DISKPM + if (_powerQueue.lock) { + IOLockFree(_powerQueue.lock); + } +#endif + + if (_busResetContext) { + deleteContext(_busResetContext); + } + if (_unitAttentionContext) { + deleteContext(_unitAttentionContext); + } + + super::free(); +} + +/* The Callback (C) entry from the SCSI provider. We just glue + * right into C++. + */ + +void +IOBasicSCSI_gc_glue(void *object,void *param) +{ + IOBasicSCSI *self; + struct IOBasicSCSI::context *cx; + + self = (IOBasicSCSI *)object; + cx = (struct IOBasicSCSI::context *)param; + self->genericCompletion(cx); /* do it in C++ */ +} + +void +IOBasicSCSI::setupBusResetRecovery(void) +{ + IOLog("%s[IOBasicSCSI]: SCSI bus reset occurred; begin recovery.\n",getName()); + + _busResetContext->step = 1; + _busResetRecoveryInProgress = true; + _provider->holdQueue(kQTypeNormalQ); + // _provider->flushQueue(kQTypeNormalQ,kIOReturnAborted); +} + +void +IOBasicSCSI::beginBusResetRecovery(void) +{ + /* In this method, we issue the first command necessary to recover + * from the Bus Reset condition. Its completion will call + * busResetRecoveryCommandComplete, which is respnsible for starting + * the next command, until all have been executed. + * + * The default implementation of this method does nothing, except + * to call finishBusResetRecovery immediately. + */ + + // IOLog("%s[IOBasicSCSI]: beginBusReset\n",getName()); + finishBusResetRecovery(); +} + +void +IOBasicSCSI::busResetRecoveryCommandComplete(struct IOBasicSCSI::context *cx) +{ + /* We are entered for each command completion during bus reset recovery. + * + * Do whatever we have to upon completion of one of our commands. + * + * Typically we would increment "step" then start another asynchronous + * command. When we have finished running off the whole set of required + * operations then we call finishBusResetRecovery. + * + * The default implementation does nothing. + */ +} + +void +IOBasicSCSI::finishBusResetRecovery(void) +{ + /* Release the IO queue so that any pending commands can start. */ + + IOLog("%s[IOBasicSCSI]: SCSI bus reset recovery complete.\n",getName()); + _provider->releaseQueue(kQTypeNormalQ); + _busResetRecoveryInProgress = false; +} + +bool +IOBasicSCSI::unitAttentionDetected(struct IOBasicSCSI::context *cx) +{ + SCSIResults scsiResults; + + /* We're not currently handling a Unit Attention: see if + * we just got a one to handle. Note that we do NOT have to + * detect Bus Reset here, because we receive notification of + * that event asynchronously via the message() method. + */ + + cx->scsireq->getResults(&scsiResults); + + /* A special case is Unit Attention, which can happen at any time. We begin + * the Unit Attention recovery procedure which issues multiple asynch commands + * to restore the device condition. After the recovery procedure completes, + * it causes a retry of the original command. + */ + + if (scsiResults.requestSenseDone == true) { /* an error occurred */ + + // IOLog("%s[IOBasicSCSI]::unitAttentionDetected: sense code %02x\n", + // getName(),cx->scsiresult->scsiSense[02]); + + if ((cx->senseData->senseKey & 0x0f) == kUnitAttention) { /* it's a UA */ + + // IOLog("%s[IOBasicSCSI]::unitAttentionDetected: detected UnitAttention\n", + // getName()); + + return(true); + } + + } /* no sense data, therefore NOT a Unit Attention */ + + return(false); +} + +void +IOBasicSCSI::setupUnitAttentionRecovery(struct IOBasicSCSI::context *cx) +{ + if (!_unitAttentionRecoveryInProgress) { + + /* Save original IO context and set step. */ + + _unitAttentionContext->originalIOContext = cx; + + _unitAttentionContext->step = 1; + + _unitAttentionRecoveryInProgress = true; + + beginUnitAttentionRecovery(); + } +} + +void +IOBasicSCSI::beginUnitAttentionRecovery(void) +{ + /* In this method, we issue the first command necessary to recover + * from the Unit Attention condition. Its completion will call + * unitAttentionCommandComplete, which is respnsible for starting + * the next command, until all have been executed. + * + * The default implementation of this method does nothing, except + * to call finishUnitAttentionRecovery immediately. + */ + + finishUnitAttentionRecovery(); +} + +void +IOBasicSCSI::unitAttentionRecoveryCommandComplete(struct IOBasicSCSI::context *cx) +{ + /* We are entered for each command completion during Unit Attention recovery. + * + * Do whatever we have to upon completion of one of our commands. + * + * Typically we would increment "step" then start another asynchronous + * command. When we have finished running off the whole set of required + * operations then we call finishUnitAttentionRecovery. + * + * The default implementation does nothing. + */ +} + +void +IOBasicSCSI::finishUnitAttentionRecovery(void) +{ + /* When we're done, we reissue the command that caught the Unit Attention. */ + + _unitAttentionRecoveryInProgress = false; + _unitAttentionContext->originalIOContext->scsireq->execute(); +} + +bool +IOBasicSCSI::automaticRetry(struct IOBasicSCSI::context *cx) +{ + SCSIResults scsiResults; + + if (unitAttentionDetected(cx)) { /* do an automatic retry for Unit Attention */ + setupUnitAttentionRecovery(cx); + return(true); + } + + cx->scsireq->getResults(&scsiResults); + + if (scsiResults.returnCode != kIOReturnSuccess && + scsiResults.returnCode != kIOReturnError) { + /** + IOLog("%s[IOBasicSCSI]: retcode = %08lx / %s\n", + getName(),scsiResults.returnCode,stringFromReturn(scsiResults.returnCode)); + **/ + } + + if (scsiResults.returnCode == kIOReturnAborted || + scsiResults.returnCode == kIOReturnTimeout) { /* must be a Bus Reset abort */ + if (!cx->retryInProgress) { /* start a retry if not already doing one */ + cx->retryInProgress = true; + cx->retryCount = kMaxRetries; + } + if (cx->retryCount > 0) { /* OK to continue retrying */ + IOLog("%s[IOBasicSCSI]: AutoRetry cx @ %08lx, cmd @ %08lx; %ld retries to go.\n", + getName(),(unsigned long)cx,(unsigned long)cx->scsireq,cx->retryCount); + cx->retryCount--; + cx->scsireq->execute(); + return(true); + } else { + cx->retryInProgress = false; + return(false); + } + } + + return(customAutomaticRetry(cx)); +} + +bool +IOBasicSCSI::customAutomaticRetry(struct IOBasicSCSI::context *cx) +{ + return(false); /* the default does nothing special */ +} + +void +IOBasicSCSI::genericCompletion(struct IOBasicSCSI::context *cx) +{ + + /* We dispatch the completion depending on our state. */ + + // IOLog("%s[IOBasicSCSI]::genericCompletion: dispatching, state = %s\n", + // getName(),stringFromState(cx->state)); + + switch (cx->state) { + + case kSimpleSynchIO : + if (!automaticRetry(cx)) { + cx->sync->signal(kIOReturnSuccess,false); /* Just wake up the waiting thread: */ + } + break; + + case kAsyncReadWrite : /* normal r/w completion */ + if (!automaticRetry(cx)) { + RWCompletion(cx); + deleteContext(cx); + } + break; + + case kHandlingRecoveryAfterBusReset : /* still handling recovery after reset */ + if (!automaticRetry(cx)) { + busResetRecoveryCommandComplete(cx); + } + break; /* just wait for next completion */ + + case kHandlingUnitAttention : /* still handling UA */ + unitAttentionRecoveryCommandComplete(cx); + break; /* just wait for next completion */ + + case kNone : /* undefined */ + case kMaxStateValue : + case kAwaitingPower : + break; + } + + return; +} + +char * +IOBasicSCSI::getAdditionalDeviceInfoString(void) +{ + return("[SCSI]"); +} + +UInt64 +IOBasicSCSI::getBlockSize(void) +{ + return(_blockSize); +} + +char * +IOBasicSCSI::getProductString(void) +{ + return(_product); +} + +char * +IOBasicSCSI::getRevisionString(void) +{ + return(_rev); +} + +char * +IOBasicSCSI::getVendorString(void) +{ + return(_vendor); +} + +bool +IOBasicSCSI::init(OSDictionary * properties) +{ + _inqBuf = NULL; + _inqBufSize = 0; + _inqLen = 0; + + _vendor[8] = '\0'; + _product[16] = '\0'; + _rev[4] = '\0'; + + _readCapDone = false; + _blockSize = 0; + _maxBlock = 0; + _removable = false; + +#ifdef DISKPM + _powerQueue.head = NULL; + _powerQueue.tail = NULL; + _powerQueue.lock = IOLockAlloc(); + if (_powerQueue.lock == NULL) { + return(false); + } +#endif + + return(super::init(properties)); +} + +IOReturn +IOBasicSCSI::message(UInt32 type,IOService * provider,void * argument) +{ + // IOLog("%s[IOBasicSCSI]: message: type = %lx\n",getName(),type); + switch (type) { + case kSCSIClientMsgBusReset : /* Bus Reset has begun */ + if (!_busResetRecoveryInProgress) { /* try to avoid reset-within-reset recovery */ + setupBusResetRecovery(); /* indicate recovery will be in progress */ + } + break; /* now wait till reset is done */ + + case (kSCSIClientMsgBusReset | kSCSIClientMsgDone) : /* Bus Reset is finished */ + beginBusResetRecovery(); /* now start the actual recovery process */ + break; + + default : + return(super::message(type,provider,argument)); /* not one of ours */ + } + + return(kIOReturnSuccess); +} + +IOService * +IOBasicSCSI::probe(IOService * provider,SInt32 * score) +{ + IOReturn result; + OSString * string; + + if (!super::probe(provider,score)) { + return(NULL); + } + + _provider = (IOSCSIDevice *)provider; + + /* Do an inquiry to get the device type. The inquiry buffer will + * be deleted by free(). + */ + + _inqBufSize = kMaxInqSize; + result = allocateInquiryBuffer(&_inqBuf,_inqBufSize); + if (result != kIOReturnSuccess) { + return(NULL); + } + + result = doInquiry(_inqBuf,_inqBufSize,&_inqLen); + if (result != kIOReturnSuccess) { + return(NULL); + } + +#ifdef notdef + // xxx NEVER match for ID=0, the boot disk. This lets us + // test this driver on other disk drives. + // + if (_provider->getTarget() == 0) { + IOLog("**%s[IOBasicSCSI]:probe; ignoring SCSI ID %d\n", + getName(),(int)_provider->getTarget()); + return(NULL); + } +#endif + + // Fetch SCSI device information from the nub. + + string = OSDynamicCast(OSString, + _provider->getProperty(kSCSIPropertyVendorName)); + if (string) { + strncpy(_vendor, string->getCStringNoCopy(), 8); + _vendor[8] = '\0'; + } + + string = OSDynamicCast(OSString, + _provider->getProperty(kSCSIPropertyProductName)); + if (string) { + strncpy(_product, string->getCStringNoCopy(), 16); + _product[16] = '\0'; + } + + string = OSDynamicCast(OSString, + _provider->getProperty(kSCSIPropertyProductRevision)); + if (string) { + strncpy(_rev, string->getCStringNoCopy(), 4); + _rev[4] = '\0'; + } + + if (deviceTypeMatches(_inqBuf,_inqLen,score)) { + +/*** + IOLog("**%s[IOBasicSCSI]::probe; accepting %s, %s, %s, %s; SCSI ID %d\n", + getName(),getVendorString(),getProductString(),getRevisionString(), + getAdditionalDeviceInfoString(), + (int)_provider->getTarget()); +***/ + return(this); + + } else { + return(NULL); + } +} + +void +IOBasicSCSI::dequeueCommands(void) +{ +#ifdef DISKPM + struct queue *q; + IOReturn result; + + q = &_powerQueue; + + IOLockLock(q->lock); + + /* Dequeue and execute all requests for which we have the proper power level. */ + + while (q->head) { + cx = q->head; + if (pm_vars->myCurrentState != cx->desiredPower) { + break; + } + q->head = cx->next; /* remove command from the queue */ + if (q->head == NULL) { + q->tail = NULL; + } + + cx->state = kNone; + + /* If the queued request was synchronous, all we have to do is wake it up. */ + + if (cx->isSync) { + cx->sync->signal(kIOReturnSuccess, false); /* Just wake up the waiting thread: */ + + } else { /* it's async; fire it off! */ + result = standardAsyncReadWriteExecute(cx); /* execute the async IO */ + if (result != kIOReturnSuccess) { /* provider didn't accept it! */ + RWCompletion(cx); /* force a completion */ + } + } + }; + + IOLockUnlock(q->lock); +#endif +} + +void +IOBasicSCSI::queueCommand(struct context *cx,bool isSync,UInt32 desiredPower) +{ +#ifndef DISKPM //for now, just return immediately without queueing + /* If we're ifdefed out, we have to start async requests. Sync requests + * will just return immediately without any delay for power. + */ + if (isSync == kAsync) { + (void)standardAsyncReadWriteExecute(cx); /* execute the async IO */ + } +#else + struct queue *q; + + /* First, we enqueue the request to ensure sequencing with respect + * to other commands that may already be in the queue. + */ + + q = &_powerQueue; + + cx->next = NULL; + cx->state = kAwaitingPower; + + IOLockLock(q->lock); + + if (q->head == NULL) { /* empty queue */ + q->head = cx; + q->tail = q->head; + + } else { /* not empty; add after tail */ + q->tail->next = cx; + q->tail = cx; + } + + /* If the command is synchronous, start by assuming we'll have to sleep + * awaiting power (and subsequent dequeuing). If, however, power is already + * right, then dequeuCommands will unlock the lock and we will continue, + * returning inline to the call site, exactly as if we were awakened. + * + * An async request will call dequeueCommands and always return immediately. + */ + + IOLockUnlock(q->lock); + + /* Now we try to dequeue pending commands if the power's right. */ + + dequeueCommands(); + + /* If we're synchronous, we'll wait here till dequeued. If we were + * dequeued above (and unlocked), then we'll return to allow the + * caller to continue with the command execution. + */ + + if (isSync) { + cx->sync->wait(false); /* waits here till awakened */ + } +#endif //DISKPM +} + +IOReturn +IOBasicSCSI::reportBlockSize(UInt64 *blockSize) +{ + IOReturn result; + + *blockSize = 0; + result = kIOReturnSuccess; + + if (_readCapDone == false) { + result = doReadCapacity(&_blockSize,&_maxBlock); + _readCapDone = true; + } + + if (result == kIOReturnSuccess) { + *blockSize = _blockSize; + } + + return(result); +} + +IOReturn +IOBasicSCSI::reportEjectability(bool *isEjectable) +{ + *isEjectable = true; /* default: if it's removable, it's ejectable */ + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::reportLockability(bool *isLockable) +{ + *isLockable = true; /* default: if it's removable, it's lockable */ + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::reportMaxReadTransfer (UInt64 blocksize,UInt64 *max) +{ + *max = blocksize * 65536; /* max blocks in a SCSI transfer */ + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::reportMaxValidBlock(UInt64 *maxBlock) +{ + IOReturn result; + + *maxBlock = 0; + result = kIOReturnSuccess; + + if (_readCapDone == false) { + result = doReadCapacity(&_blockSize,&_maxBlock); + _readCapDone = true; + } + + if (result == kIOReturnSuccess) { + *maxBlock = _maxBlock; + } + return(result); +} + +IOReturn +IOBasicSCSI::reportMaxWriteTransfer(UInt64 blocksize,UInt64 *max) +{ + *max = blocksize * 65536; /* max blocks in a SCSI transfer */ + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::reportPollRequirements(bool *pollRequired,bool *pollIsExpensive) +{ + *pollIsExpensive = false; + *pollRequired = _removable; /* for now, all removables need polling */ + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::reportRemovability(bool *isRemovable) +{ + if (_inqLen > 0) { /* inquiry byte exists to check */ + if (_inqBuf[1] & 0x80) { /* it's removable */ + *isRemovable = true; + _removable = true; + } else { /* it's not removable */ + *isRemovable = false; + _removable = false; + } + } else { /* no byte? call it nonremovable */ + *isRemovable = false; + } + + return(kIOReturnSuccess); +} + +/* Issue a Mode Sense to get the Mode Parameter Header but no pages. + * Since we're only interested in the Mode Parameter Header, we just + * issue a standard SCSI-1 6-byte command, nothing fancy. + */ +IOReturn +IOBasicSCSI::reportWriteProtection(bool *writeProtected) +{ + struct context *cx; + struct IOModeSensecdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + SCSIResults scsiResults; + UInt8 *buf; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(SCSICDBInfo) ); + + c = (struct IOModeSensecdb *)&scsiCDB.cdb; + c->opcode = SOP_MODESENSE; + c->lunbits = 0; + c->pagecode = 0 | 0x01; /* get current settings; any page will work */ + c->reserved = 0; + c->len = kModeSenseSize; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 6; + + req->setCDB( &scsiCDB ); + req->setPointers( cx->senseDataDesc, sizeof(SCSISenseData), false, true ); + + req->setTimeout( 30000 ); + + result = allocateTempBuffer(&buf,kModeSenseSize); + + if (result == kIOReturnSuccess) { + + cx->memory = IOMemoryDescriptor::withAddress((void *)buf, + kModeSenseSize, + kIODirectionIn); + + req->setPointers( cx->memory, kModeSenseSize, false ); + + queueCommand(cx,kSync,getReportWriteProtectionPowerState()); /* queue the op, sleep awaiting power */ + + result = simpleSynchIO(cx); + + if (result == kIOReturnUnderrun) { + cx->scsireq->getResults( &scsiResults ); + if (scsiResults.bytesTransferred >= 4) + result = kIOReturnSuccess; + } + + if (result == kIOReturnSuccess) { + if (buf[2] & 0x80) { + *writeProtected = true; + } else { + *writeProtected = false; + } + } + + deleteTempBuffer(buf,kModeSenseSize); + } + + deleteContext(cx); + + return(result); +} + +/* Issue a simple, asynchronous SCSI operation. The caller's supplied context + * contains a SCSI command and Memory Descriptor. The caller is responsible + * for deleting the context. + */ + +IOReturn +IOBasicSCSI::simpleAsynchIO(struct IOBasicSCSI::context *cx) +{ + IOSCSICommand *req; + IOReturn result; + + if (cx == NULL) { /* safety check */ + return(kIOReturnNoMemory); + } + + /* Set completion to return to genericCompletion: */ + + req = cx->scsireq; + req->setCallback( (void *)this, (CallbackFn)IOBasicSCSI_gc_glue, (void *)cx ); + + cx->state = kSimpleSynchIO; + + /* Start the scsi request: */ + + result = req->execute(); + + if (result == true ) { + result = req->getResults((SCSIResults *) 0); + } + + return(result); +} + +/* Issue a simple, synchronous SCSI operation. The caller's supplied context + * contains a SCSI command and Memory Descriptor. The caller is responsible + * for deleting the context. + */ + +IOReturn +IOBasicSCSI::simpleSynchIO(struct context *cx) +{ + IOSCSICommand *req; + IOReturn result; + + if (cx == NULL) { /* safety check */ + return(kIOReturnNoMemory); + } + + /* Set completion to return to genericCompletion: */ + + req = cx->scsireq; + req->setCallback( (void *)this, (CallbackFn)IOBasicSCSI_gc_glue, (void *)cx ); + + cx->state = kSimpleSynchIO; + +/** + IOLog("%s[IOBasicSCSI]::simpleSynchIO; issuing SCSI cmd %02x\n", + getName(),req->cdb.byte[0]); +**/ + /* Start the scsi request: */ + + //IOLog("IOBasicSCSI::simpleSynchIO, lock initted, calling SCSI\n"); + + result = req->execute(); + + if (result == true ) { + +// IOLog("IOBasicSCSI::simpleSynchIO, SCSI req accepted\n"); + + /* Wait for it to complete by attempting to acquire a read-lock, which + * will block until the write-lock is released by the completion routine. + */ + + cx->sync->wait(false); /* waits here till unlocked at completion */ + + /* We're back: */ + + result = req->getResults((SCSIResults *) 0); + +/** + if ((result != kIOReturnSuccess) ) { + IOLog("%s[IOBasicSCSI]::simpleSynchIO; err '%s' from completed req\n", + getName(),stringFromReturn(result)); + } +**/ + } else { +/** + IOLog("%s[IOBasicSCSI]:simpleSynchIO; err '%s' queueing SCSI req\n", + getName(),stringFromReturn(result)); +**/ + } + +// IOLog("IOBasicSCSI: completed; result '%s'\n",stringFromReturn(result)); + + return(result); +} + +IOReturn +IOBasicSCSI::standardAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) +{ + struct context *cx; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + UInt32 reqSenseLength; + UInt32 timeoutSeconds; + UInt8 *cdb; + bool isWrite; + + cx = allocateContext(); + + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + buffer->retain(); /* bump the retain count */ + + cx->memory = buffer; + if (buffer->getDirection() == kIODirectionOut) { + isWrite = true; + } else { + isWrite = false; + } + +/** + IOLog("%s[IOBasicSCSI]::standardAsyncReadWrite; (%s) blk %ld nblks %ld\n", + getName(),(isWrite ? "write" : "read"),block,nblks); +**/ + req = cx->scsireq; + + /* Set completion to return to rwCompletion: */ + cx->completion = completion; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + req->setPointers( buffer, nblks * getBlockSize(), isWrite ); + + req->setCallback( this, IOBasicSCSI_gc_glue, cx ); + + cx->state = kAsyncReadWrite; + + cdb = (UInt8 *) &scsiCDB.cdb; + + /* Allow a subclass to override the creation of the cdb and specify + * other parameters for the operation. + */ + + if (isWrite) { + scsiCDB.cdbFlags |= createWriteCdb(cdb,&scsiCDB.cdbLength, + block,nblks, + &reqSenseLength, + &timeoutSeconds); + + } else { + + scsiCDB.cdbFlags |= createReadCdb(cdb,&scsiCDB.cdbLength, + block,nblks, + &reqSenseLength, + &timeoutSeconds); + } + + req->setCDB( &scsiCDB ); + req->setPointers( cx->senseDataDesc, reqSenseLength, false, true ); + req->setTimeout( timeoutSeconds * 1000 ); + + /* Queue the request awaiting power and return. When power comes up, + * the request will be passed to standardAsyncReadWriteExecute. + */ + queueCommand(cx,kAsync,getReadWritePowerState()); /* queue and possibly wait for power */ + + return(kIOReturnSuccess); +} + +IOReturn +IOBasicSCSI::standardAsyncReadWriteExecute(struct context *cx) +{ + return(cx->scsireq->execute()); +} + +IOReturn +IOBasicSCSI::standardSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks) +{ + struct context *cx; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + UInt32 reqSenseLength; + UInt32 reqTimeoutSeconds; + UInt8 *cdb; + bool isWrite; + IOReturn result; + + cx = allocateContext(); + + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + cx->memory = buffer; + buffer->retain(); /* bump the retain count */ + + if (buffer->getDirection() == kIODirectionOut) { + isWrite = true; + } else { + isWrite = false; + } + +/** + IOLog("%s[IOBasicSCSI]::standardSyncReadWrite; (%s) blk %ld nblks %ld\n", + getName(),(isWrite ? "write" : "read"),block,nblks); +**/ + + bzero(&scsiCDB,sizeof(scsiCDB)); + + req = cx->scsireq; + req->setPointers(buffer,(nblks * getBlockSize()),isWrite); + + cdb = (UInt8 *)&scsiCDB.cdb; + + /* Allow a subclass to override the creation of the cdb and specify + * other parameters for the operation. + */ + + if (isWrite) { + scsiCDB.cdbFlags |= createWriteCdb(cdb,&scsiCDB.cdbLength, + block,nblks, + &reqSenseLength, + &reqTimeoutSeconds); + + } else { + + scsiCDB.cdbFlags |= createReadCdb(cdb,&scsiCDB.cdbLength, + block,nblks, + &reqSenseLength, + &reqTimeoutSeconds); + } + + + req->setCDB(&scsiCDB); + req->setPointers(cx->senseDataDesc,reqSenseLength,false,true); + req->setTimeout(reqTimeoutSeconds * 1000); + + queueCommand(cx,kSync,getReadWritePowerState()); /* queue the operation, sleep awaiting power */ + + result = simpleSynchIO(cx); /* issue a simple command */ + + deleteContext(cx); + return(result); +} + +bool +IOBasicSCSI::start(IOService *provider) +{ + bool result; + + _busResetContext = allocateContext(); + if (_busResetContext == NULL) { + return(false); + } + _busResetContext->state = kHandlingRecoveryAfterBusReset; + _busResetRecoveryInProgress = false; + + _unitAttentionContext = allocateContext(); + if (_unitAttentionContext == NULL) { + return(false); + } + _unitAttentionContext->state = kHandlingUnitAttention; + _unitAttentionRecoveryInProgress = false; + + result = provider->open(this,0,0); /* set up to receive message() notifications */ + if (result != true) { + IOLog("open result is false\n"); + } + + return(true); +} + +char * +IOBasicSCSI::stringFromState(stateValue state) +{ + static char *stateNames[] = { + "kNone", + "kAsyncReadWrite", + "kSimpleSynchIO", + "kHandlingUnitAttention", + "khandlingRecoveryAfterBusReset" + }; + + if (state < 0 || state > kMaxValidState) { + return("invalid"); + } + + return(stateNames[state]); +} diff --git a/iokit/Families/IOSCSIHDDrive/IOSCSIHDDrive.cpp b/iokit/Families/IOSCSIHDDrive/IOSCSIHDDrive.cpp new file mode 100644 index 000000000..c74ec25e8 --- /dev/null +++ b/iokit/Families/IOSCSIHDDrive/IOSCSIHDDrive.cpp @@ -0,0 +1,899 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +#define super IOBasicSCSI +OSDefineMetaClassAndStructors(IOSCSIHDDrive,IOBasicSCSI) + +IOReturn +IOSCSIHDDrive::allocateFormatBuffer(UInt8 **buf,UInt32 *len) +{ + /* The default implementation uses no buffer. */ + + *buf = 0; + *len = 0; + return(kIOReturnSuccess); +} + +UInt8 +IOSCSIHDDrive::composeFormatBuffer(UInt8 * /* buf */,UInt32 /* buflen */) +{ + return(0); /* default: no fmtdata buffer to transfer */ +} + +OSDictionary * +IOSCSIHDDrive::constructDeviceProperties(void) +{ + OSDictionary *propTable; + OSData *prop; + char *typeString; + + propTable = OSDictionary::withCapacity(6); + + if (propTable) { + + prop = OSData::withBytes((void *)(&_vendor),strlen(_vendor)); + if (prop) { + propTable->setObject("vendor", prop); + } + + prop = OSData::withBytes((void *)(&_product),strlen(_product)); + if (prop) { + propTable->setObject("product", prop); + } + + prop = OSData::withBytes((void *)(&_rev),strlen(_rev)); + if (prop) { + propTable->setObject("revision", prop); + } + + typeString = (char *)getDeviceTypeName(); + prop = OSData::withBytes((void *)(typeString),strlen(typeString)); + if (prop) { + propTable->setObject("device-type", prop); + } + +#ifdef xxx + prop = OSData::withBytes((void *)(&_removable),sizeof(bool)); + if (prop) { + propTable->setObject("removable", prop); + } + + prop = OSData::withBytes((void *)(&_ejectable),sizeof(bool)); + if (prop) { + propTable->setObject("ejectable", prop); + } +#endif //xxx + + } + + return(propTable); +} + +UInt32 +IOSCSIHDDrive::createFormatCdb(UInt64 /* byteCapacity */, + UInt8 *cdb,UInt32 *cdbLength, + UInt8 buf[],UInt32 bufLen, + UInt32 *maxAutoSenseLength,UInt32 *timeoutSeconds) +{ + struct IOFormatcdb *c; + UInt8 formatControls; /* CmpLst & Defect List Format bits */ + + c = (struct IOFormatcdb *)cdb; + + c->opcode = kIOSCSICommandFormatUnit; + c->lunbits = 0; + c->vendor = 0; + c->interleave_msb = 0; + c->interleave_lsb = 0; + c->ctlbyte = 0; + + *cdbLength = 6; + + /* If we are to use a format buffer, set it up: */ + + if (buf != NULL) { + formatControls = composeFormatBuffer(buf,bufLen); + c->lunbits |= (formatControls | 0x10); /* data transfer will occur */ + } + + *maxAutoSenseLength = sizeof(SCSISenseData); /* do the sense */ + *timeoutSeconds = 0; /* infinitely long time */ + + return(0); +} + +IOService * +IOSCSIHDDrive::createNub(void) +{ + IOService *nub; + +// IOLog("%s[IOSCSIHDDrive]::createNub\n",getName()); + + /* Instantiate a nub so a generic driver can match above us. */ + + nub = instantiateNub(); + if (nub == NULL) { + IOLog("%s[IOSCSIHDDrive]::createNub; nub didn't instantiate\n",getName()); + return(NULL); + } + + nub->init(); + + if (!nub->attach(this)) { + IOPanic("IOSCSIHDDrive::createNub; couldn't attach IOSCSIHDDriveNub"); + } + + nub->registerService(); + + return(nub); +} + +void +IOSCSIHDDrive::deleteFormatBuffer(UInt8 * /* buf */, UInt32 /* buflen */) +{ + /* The default implementation has no buffer to free. */ +} + +bool +IOSCSIHDDrive::deviceTypeMatches(UInt8 inqBuf[],UInt32 inqLen,SInt32 * /*score*/) +{ + if ((_inqBuf[0] & 0x1f) == kIOSCSIDeviceTypeDirectAccess) { + return(true); + } else { + return(false); /* we don't handle other devices */ + } +} + +IOReturn +IOSCSIHDDrive::doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) +{ + return(standardAsyncReadWrite(buffer,block,nblks,completion)); +} + +IOReturn +IOSCSIHDDrive::doEjectMedia(void) +{ + /* Spin down, eject, and leave power alone: */ + + return(doStartStop(false,true,IOStartStopcdb::P_NOCHANGE)); +} + +IOReturn +IOSCSIHDDrive::doFormatMedia(UInt64 byteCapacity) +{ + return(standardFormatMedia(byteCapacity)); +} + +UInt32 +IOSCSIHDDrive::doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + if ((capacities != NULL) && (capacitiesMaxCount > 0)) { + *capacities = _blockSize * (_maxBlock + 1); + return(1); + } else { + return(0); + } +} + +/* We issue a simple Prevent/Allow command to lock or unlock the media: */ +IOReturn +IOSCSIHDDrive::doLockUnlockMedia(bool doLock) +{ + struct context *cx; + struct IOPrevAllowcdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + c = (struct IOPrevAllowcdb *)&scsiCDB.cdb; + + c->opcode = kIOSCSICommandPreventAllow; + c->lunbits = 0; + c->reserved1 = 0; + c->reserved2 = 0; + + if (doLock) { + c->prevent = 0x01; /* prevent removal from device */ + } else { + c->prevent = 0x00; /* allow removal from device */ + } + + c->ctlbyte = 0; + + scsiCDB.cdbLength = 6; + + req->setCDB( &scsiCDB ); + + cx->memory = 0; + + req->setPointers( cx->memory, 0, false ); + + queueCommand(cx,kSync,getLockUnlockMediaPowerState()); /* queue the operation, sleep awaiting power */ + + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +IOReturn +IOSCSIHDDrive::doStart(void) +{ + return(doStartStop(true,false,IOStartStopcdb::P_ACTIVE)); +} + +IOReturn +IOSCSIHDDrive::doStop(void) +{ + return(doStartStop(false,false,IOStartStopcdb::P_NOCHANGE)); +} + +IOReturn +IOSCSIHDDrive::doStartStop(bool start,bool loadEject,UInt8 powerCondition) +{ + struct context *cx; + struct IOStartStopcdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + UInt32 powerLevel; /* what power level we need to be in */ + + /* Issue a Start/Stop Unit command. */ + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + powerLevel = getStopPowerState(); /* assume we're spinning down */ + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(SCSICDBInfo) ); + + c = (struct IOStartStopcdb *)&scsiCDB.cdb; + c->opcode = kIOSCSICommandStartStopUnit; + c->lunImmed = 0; + c->reserved1 = 0; + c->reserved2 = 0; + c->controls = powerCondition; + c->controls = 0; /* xxx powerCondition is a SCSI-3 thing */ + if (loadEject) { + c->controls |= IOStartStopcdb::C_LOEJ; + powerLevel = getEjectPowerState(); /* let subclass decide what we need */ + }; + if (start) { + c->controls |= IOStartStopcdb::C_SPINUP; + powerLevel = getStartPowerState(); + } + c->ctlbyte = 0; + + scsiCDB.cdbLength = 6; + + req->setCDB( &scsiCDB ); + req->setTimeout( 30000 ); + + cx->memory = 0; + + req->setPointers( cx->memory, 0, false ); + + queueCommand(cx,kSync,powerLevel); /* queue the operation, sleep awaiting power */ + + result = simpleSynchIO(cx); + + deleteContext(cx); + return(result); +} + +IOReturn +IOSCSIHDDrive::doSynchronizeCache(void) +{ + return(standardSynchronizeCache()); +} + +IOReturn +IOSCSIHDDrive::doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks) +{ + return(standardSyncReadWrite(buffer,block,nblks)); +} + +const char * +IOSCSIHDDrive::getDeviceTypeName(void) +{ + return(kIOBlockStorageDeviceTypeGeneric); +} + +UInt32 +IOSCSIHDDrive::getEjectPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIHDDrive::getExecuteCDBPowerState(void) +{ + return(kAllOn); +} + +UInt32 +IOSCSIHDDrive::getFormatMediaPowerState(void) +{ + return(kAllOn); +} + +UInt32 +IOSCSIHDDrive::getInitialPowerState(void) +{ + return(kAllOn); +} + +UInt32 +IOSCSIHDDrive::getInquiryPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIHDDrive::getLockUnlockMediaPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIHDDrive::getReadCapacityPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIHDDrive::getReadWritePowerState(void) +{ + return(kAllOn); +} + +UInt32 +IOSCSIHDDrive::getReportWriteProtectionPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIHDDrive::getStartPowerState(void) +{ + return(kElectronicsOn); +} + +UInt32 +IOSCSIHDDrive::getStopPowerState(void) +{ + return(kElectronicsOn); /* we don't have to be spinning to spin down */ +} + +UInt32 +IOSCSIHDDrive::getSynchronizeCachePowerState(void) +{ + return(kAllOn); +} + +UInt32 +IOSCSIHDDrive::getTestUnitReadyPowerState(void) +{ + return(kElectronicsOn); +} + +bool +IOSCSIHDDrive::init(OSDictionary * properties) +{ + _mediaPresent = false; + _startStopDisabled = false; + + return(super::init(properties)); +} + +IOService * +IOSCSIHDDrive::instantiateNub(void) +{ + IOService *nub; + + /* Instantiate a nub so a generic driver can match above us. */ + + nub = new IOSCSIHDDriveNub; + return(nub); +} + +bool +IOSCSIHDDrive::powerTickle(UInt32 desiredState) +{ + return(activityTickle(kIOPMSuperclassPolicy1,desiredState)); +} + +IOReturn +IOSCSIHDDrive::reportMediaState(bool *mediaPresent,bool *changed) +{ + struct context *cx; + struct IOTURcdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + SCSIResults scsiResults; + IOReturn result; + UInt8 status; + UInt8 senseKey; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + c = (struct IOTURcdb *)&scsiCDB.cdb; + c->opcode = kIOSCSICommandTestUnitReady; + c->lunbits = 0; + c->reserved1 = 0; + c->reserved2 = 0; + c->reserved3 = 0; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 6; + + req->setCDB( &scsiCDB ); + req->setPointers( cx->senseDataDesc, 255, false, true ); + + req->setTimeout( 5000 ); + + cx->memory = 0; + + req->setPointers( cx->memory, 0, false ); + +/** + IOLog("IOSCSIHDDrive::reportMediaState: mp=%08x,ch=%08x\n", + (int)mediaPresent,(int)changed); + IOLog("IOSCSIHDDrive::reportMediaState: doing TUR\n"); +**/ + + queueCommand(cx,kSync,getTestUnitReadyPowerState()); + result = simpleSynchIO(cx); + + req->getResults( &scsiResults ); + + status = scsiResults.scsiStatus; + +/** + IOLog("%s[IOSCSIHDDrive]::reportMediaState; result=%s, status=%02x,sense=%02x\n", + getName(),stringFromReturn(result),status,cx->senseData->senseKey + ); +**/ + + if (result == kIOReturnSuccess) { /* TUR succeeded; device is ready */ + + *mediaPresent = true; + *changed = (*mediaPresent != _mediaPresent); /* report if it's changed */ + _mediaPresent = true; /* remember current state */ + result = kIOReturnSuccess; + + } else { /* TUR failed; check sense key */ + + if ( scsiResults.requestSenseDone == true ) { + senseKey = cx->senseData->senseKey; + + if (senseKey == 0x02) { /* device says "not ready" */ + *mediaPresent = false; + *changed = (*mediaPresent != _mediaPresent); /* report if it's changed */ + _mediaPresent = false; /* remember current state */ + result = kIOReturnSuccess; + + } else { /* funky sense key? forget it. */ + + *mediaPresent = false; + *changed = (*mediaPresent != _mediaPresent); /* report if it's changed */ + _mediaPresent = false; /* remember current state */ + result = kIOReturnIOError; +/** + IOLog("%s[IOSCSIHDDrive]:: reportMediaState; funky sense key %d\n", + getName(),senseKey); + **/ + } + } else { /* autosense not done! */ + + /* This condition has been observed with the Matsushita PD-2 DVD-RAM on the + * Curio (external) bus on an 8500. I can't figure out why we get a good status + * but no autosense (after going through Unit-Attention.) We ignore the current + * media check and it'll operate normally on the next pass through. + */ +/** + IOLog("%s[IOSCSIHDDrive]:: reportMediaState; autosense not done: ",getName()); + IOLog("result = '%s', status = %d, senseKey = %d\n", + stringFromReturn(result),status,cx->senseData->senseKey); +**/ + *mediaPresent = _mediaPresent; + *changed = false; + result = kIOReturnSuccess; + } + } + + if (*changed && *mediaPresent) { + _readCapDone = false; + _blockSize = 0; + _maxBlock = 0; + } + + deleteContext(cx); + +#ifndef DISKPM + if (*changed && *mediaPresent) + doStart(); +#endif + +/** + if (result != kIOReturnSuccess) { + IOLog("%s[IOSCSIHDDrive]:: reportMediaState; returning %d %x '%s'\n", + getName(),result,result,stringFromReturn(result)); + } +**/ + return(result); +} + +IOReturn +IOSCSIHDDrive::restoreElectronicsState(void) +{ + return(kIOReturnSuccess); +} + +/* The standard completion for a doAsyncReadWrite operation. We fire it + * up to our target, the generic driver. + */ +void +IOSCSIHDDrive::RWCompletion(struct context *cx) +{ + SCSIResults scsiResults; + + cx->scsireq->getResults( &scsiResults ); + + IOStorage::complete(cx->completion, + scsiResults.returnCode, + scsiResults.bytesTransferred); + + + /* Attempt to dequeue and execute any waiting commands: */ + + dequeueCommands(); +} + +IOReturn +IOSCSIHDDrive::saveElectronicsState(void) +{ + return(kIOReturnSuccess); +} + +static IOPMPowerState ourPowerStates[kNumberOfPowerStates] = { + {1,IOPMNotAttainable,0,0,0,0,0,0,0,0,0,0}, /* state 00 kAllOff */ + {1,0,0,IOPMPowerOn,0,0,0,0,0,0,0,0}, /* state 01 kElectronicsOn */ + {1,0,0,IOPMPowerOn,0,0,0,0,0,0,0,0} /* state 02 kAllOn */ +}; + +IOReturn +IOSCSIHDDrive::standardFormatMedia(UInt64 byteCapacity) +{ + struct context *cx; + UInt8 *fmtbuf; + IOReturn result; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + UInt32 transferLength; + UInt32 senseLength; + UInt32 timeoutSeconds; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + + /* Allow a subclass to construct the cdb and return an optional + * memory buffer address for defect lists, etc. + */ + + result = allocateFormatBuffer(&fmtbuf,&transferLength); + if (result != kIOReturnSuccess) { + return(result); + } + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + scsiCDB.cdbFlags |= createFormatCdb(byteCapacity,(UInt8 *)&scsiCDB.cdb,&scsiCDB.cdbLength, + fmtbuf,transferLength, + &senseLength, + &timeoutSeconds); + + req->setCDB( &scsiCDB ); + req->setPointers( cx->senseDataDesc, senseLength, false, true ); + req->setTimeout( timeoutSeconds * 1000 ); + + /* If we have a buffer to transfer, create a Memory Descriptor for it: */ + + if ((fmtbuf != NULL) && (transferLength != 0)) { + cx->memory = IOMemoryDescriptor::withAddress((void *)fmtbuf, + transferLength, + kIODirectionOut); + } + + req->setPointers( cx->memory, transferLength, true ); + queueCommand(cx,kSync,getFormatMediaPowerState()); /* queue the operation, sleep awaiting power */ + + result = simpleSynchIO(cx); /* issue a simple command */ + + /* Free the format buffer, if any: */ + + deleteFormatBuffer(fmtbuf,transferLength); + + deleteContext(cx); + + return(result); +} + +IOReturn +IOSCSIHDDrive::standardSynchronizeCache(void) +{ + struct context *cx; + struct IOSyncCachecdb *c; + IOSCSICommand *req; + SCSICDBInfo scsiCDB; + IOReturn result; + + cx = allocateContext(); + if (cx == NULL) { + return(kIOReturnNoMemory); + } + + req = cx->scsireq; + bzero( &scsiCDB, sizeof(scsiCDB) ); + + c = (struct IOSyncCachecdb *)&scsiCDB.cdb; + + c->opcode = kIOSCSICommandSynchronizeCache; + c->lunbits = 0; + c->lba_3 = 0; /* if zero, start at block zero */ + c->lba_2 = 0; + c->lba_1 = 0; + c->lba_0 = 0; + c->reserved = 0; + c->nblks_msb = 0; /* if zero, do all blocks */ + c->nblks_lsb = 0; + c->ctlbyte = 0; + + scsiCDB.cdbLength = 10; + + req->setCDB( &scsiCDB ); + + cx->memory = 0; + + req->setPointers( cx->memory, 0, false ); + + /* We assume there will be some data in the drive's cache, so we force the + * drive to be running before we issue this command. + */ + + queueCommand(cx,kSync,getSynchronizeCachePowerState()); /* queue the operation, sleep awaiting power */ + + result = simpleSynchIO(cx); + + deleteContext(cx); + + return(result); +} + +bool +IOSCSIHDDrive::start(IOService *provider) +{ + IOService *nub; + + if (!super::start(provider)) { + return(false); + } + +// IOLog("%s[IOSCSIHDDrive]::start\n",getName()); + + /* Initialize and set up to perform Power Management: */ + + PMinit(); + _restoreState = false; +#ifdef notyet // don't register for PM yet till we handle queuing requests! + IOPMRegisterDevice(pm_vars->ourName,this); // join the power management tree +#endif + registerPowerDriver(this,ourPowerStates,kNumberOfPowerStates); // export power states + + nub = createNub(); + if (nub == NULL) { + return(false); + } else { + return(true); + } +} + +// ********************************************************************************** +// maxCapabilityForDomainState +// +// This simple device needs only power. If the power domain is supplying +// power, the disk can go to its highest state. If there is no power +// it can only be in its lowest state, which is off. +// ********************************************************************************** + +unsigned long +IOSCSIHDDrive::maxCapabilityForDomainState(IOPMPowerFlags domainState) +{ + if (domainState & IOPMPowerOn) { + return(kAllOn); + } else { + return(kAllOff); + } +} + +// ********************************************************************************** +// powerStateForDomainState +// +// The power domain may be changing state. If power is ON in its new +// state, we will be on, too. If domain power is OFF, we are off. +// ********************************************************************************** +unsigned long +IOSCSIHDDrive::powerStateForDomainState(IOPMPowerFlags domainState) +{ + if (domainState & IOPMPowerOn) { + return(kAllOn); /* xxx might be kElectronicsOn if drive not spun up */ + } else { + return(kAllOff); + } +} + +// ********************************************************************************** +// initialPowerStateForDomainState +// +// Our parent wants to know what our initial power state is. If power is ON in the +// domain, we are in state kElectronicsOn or kAllOn. If domain power is OFF, we are off. +// ********************************************************************************** +unsigned long +IOSCSIHDDrive::initialPowerStateForDomainState(IOPMPowerFlags domainState) +{ + if (domainState & IOPMPowerOn) { + return(getInitialPowerState()); /* report whether it's spinning on startup */ + } else { + return(kAllOff); + } +} + +// ********************************************************************************** +// setPowerState +// +// Someone has decided to change the disk state. We perform the change here. +// ********************************************************************************** +IOReturn +IOSCSIHDDrive::setPowerState(unsigned long powerStateOrdinal,IOService *) +{ + IOReturn result; + + result = kIOReturnSuccess; + + /* All we do in the default implementation is spin up and down. If the drive reports an + * error to a start/stop command, we don't bother attempting to issue those commands again. + * + * xxx Question: What should we return? Success? or an error meaning "we didn't do it!" + */ + switch (powerStateOrdinal) { + + case kElectronicsOn : /* spin down if necessary */ + if (pm_vars->myCurrentState == kAllOn) { + if (!_startStopDisabled) { + result = doStop(); + if (result != kIOReturnSuccess) { + _startStopDisabled = true; + result = kIOReturnSuccess; + } + } + } + break; + + case kAllOn : /* spin up if necessary */ + if (pm_vars->myCurrentState == kElectronicsOn) { + if (!_startStopDisabled) { + result = doStart(); + if (result != kIOReturnSuccess) { + _startStopDisabled = true; + result = kIOReturnSuccess; + } + } + } + break; + + default: /* we don't do other states */ + result = kIOReturnSuccess; + break; + + } + + return(result); +} + +// ********************************************************************************** +/* We get called here as an advisory that the power state will change. If we are coming up + * from the all-off state, remember to restore the electronics state when we later power up. + * If we are powering-down the electronics, save any required state now. + */ +IOReturn +IOSCSIHDDrive::powerStateWillChangeTo(unsigned long,unsigned long stateOrdinal,IOService *) +{ + if ((pm_vars->myCurrentState == kAllOff) && + (stateOrdinal > kAllOff)) { /* we're powering up from all-off */ + _restoreState = true; + } + + if ((stateOrdinal == kAllOff) && + (pm_vars->myCurrentState > kAllOff)) { /* we're powering down to all-off */ + saveElectronicsState(); + } + + return(IOPMAckImplied); +} + +// ********************************************************************************** +/* We get called here when power has successfully changed state. */ +IOReturn +IOSCSIHDDrive::powerStateDidChangeTo(unsigned long,unsigned long stateOrdinal,IOService*) +{ + IOReturn result; + + /* If we must restore the electronics state, do it now. */ + + if (_restoreState) { + result = restoreElectronicsState(); + _restoreState = false; + } + + /* If we have powered up into a state that can execute commands, release any queued + * requests that were awaiting the power change. + */ + + if (stateOrdinal > kAllOff) { + dequeueCommands(); + } + + return IOPMAckImplied; +} diff --git a/iokit/Families/IOSCSIHDDrive/IOSCSIHDDriveNub.cpp b/iokit/Families/IOSCSIHDDrive/IOSCSIHDDriveNub.cpp new file mode 100644 index 000000000..c43d09970 --- /dev/null +++ b/iokit/Families/IOSCSIHDDrive/IOSCSIHDDriveNub.cpp @@ -0,0 +1,184 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +#define super IOBlockStorageDevice +OSDefineMetaClassAndStructors(IOSCSIHDDriveNub,IOBlockStorageDevice) + +bool +IOSCSIHDDriveNub::attach(IOService * provider) +{ +// IOLog("IOSCSIHDDriveNub: attach\n"); + + if (!super::attach(provider)) { + return(false); + } + +// IOLog("IOSCSIHDDriveNub: attach; casting provider\n"); + _provider = OSDynamicCast(IOSCSIHDDrive,provider); + if (_provider == NULL) { + IOLog("IOSCSIHDDriveNub: attach; wrong provider type!\n"); + return(false); + } else { +// IOLog("IOSCSIHDDriveNub: attach; provider OK\n"); + return(true); + } +} + +void IOSCSIHDDriveNub::detach(IOService * provider) +{ + if( _provider == provider) + _provider = 0; + + super::detach( provider ); +} + +IOReturn +IOSCSIHDDriveNub::doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) +{ + return(_provider->doAsyncReadWrite(buffer,block,nblks,completion)); +} + +IOReturn +IOSCSIHDDriveNub::doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks) +{ + return(_provider->doSyncReadWrite(buffer,block,nblks)); +} + +IOReturn +IOSCSIHDDriveNub::doEjectMedia(void) +{ + return(_provider->doEjectMedia()); +} + +IOReturn +IOSCSIHDDriveNub::doFormatMedia(UInt64 byteCapacity) +{ + return(_provider->doFormatMedia(byteCapacity)); +} + +UInt32 +IOSCSIHDDriveNub::doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + return(_provider->doGetFormatCapacities(capacities,capacitiesMaxCount)); +} + +IOReturn +IOSCSIHDDriveNub::doLockUnlockMedia(bool doLock) +{ + return(_provider->doLockUnlockMedia(doLock)); +} + +IOReturn +IOSCSIHDDriveNub::doSynchronizeCache(void) +{ + return(_provider->doSynchronizeCache()); +} + +char * +IOSCSIHDDriveNub::getVendorString(void) +{ + return(_provider->getVendorString()); +} + +char * +IOSCSIHDDriveNub::getProductString(void) +{ + return(_provider->getProductString()); +} + +char * +IOSCSIHDDriveNub::getRevisionString(void) +{ + return(_provider->getRevisionString()); +} + +char * +IOSCSIHDDriveNub::getAdditionalDeviceInfoString(void) +{ + return(_provider-> getAdditionalDeviceInfoString()); +} + +IOReturn +IOSCSIHDDriveNub::reportBlockSize(UInt64 *blockSize) +{ + return(_provider->reportBlockSize(blockSize)); +} + +IOReturn +IOSCSIHDDriveNub::reportEjectability(bool *isEjectable) +{ + return(_provider->reportEjectability(isEjectable)); +} + +IOReturn +IOSCSIHDDriveNub::reportLockability(bool *isLockable) +{ + return(_provider->reportLockability(isLockable)); +} + +IOReturn +IOSCSIHDDriveNub::reportPollRequirements(bool *pollIsRequired,bool *pollIsExpensive) +{ + return(_provider->reportPollRequirements(pollIsRequired,pollIsExpensive)); +} + +IOReturn +IOSCSIHDDriveNub::reportMaxReadTransfer (UInt64 blockSize,UInt64 *max) +{ + return(_provider->reportMaxReadTransfer(blockSize,max)); +} + +IOReturn +IOSCSIHDDriveNub::reportMaxValidBlock(UInt64 *maxBlock) +{ + return(_provider->reportMaxValidBlock(maxBlock)); +} + +IOReturn +IOSCSIHDDriveNub::reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max) +{ + return(_provider->reportMaxWriteTransfer(blockSize,max)); +} + +IOReturn +IOSCSIHDDriveNub::reportMediaState(bool *mediaPresent,bool *changed) +{ + return(_provider->reportMediaState(mediaPresent,changed)); +} + +IOReturn +IOSCSIHDDriveNub::reportRemovability(bool *isRemovable) +{ + return(_provider->reportRemovability(isRemovable)); +} + +IOReturn +IOSCSIHDDriveNub::reportWriteProtection(bool *isWriteProtected) +{ + return(_provider->reportWriteProtection(isWriteProtected)); +} diff --git a/iokit/Families/IOSCSIParallel/IOSCSIParallelCommand.cpp b/iokit/Families/IOSCSIParallel/IOSCSIParallelCommand.cpp new file mode 100644 index 000000000..bff8be4bc --- /dev/null +++ b/iokit/Families/IOSCSIParallel/IOSCSIParallelCommand.cpp @@ -0,0 +1,429 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSIParallelCommand.cpp + * + */ + +#include +#include +#include + +#undef super +#define super IOSCSICommand + +OSDefineMetaClassAndStructors( IOSCSIParallelCommand, IOSCSICommand ) +OSDefineMetaClassAndAbstractStructors( IOSCSICommand, IOCDBCommand ) +OSDefineMetaClassAndAbstractStructors( IOCDBCommand, IOCommand ) + +static struct adapterReturnCodes +{ + SCSIAdapterStatus adapterStatus; + IOReturn ioReturnCode; +} +adapterReturnCodes[] = +{ + { kSCSIAdapterStatusSuccess, kIOReturnSuccess }, + { kSCSIAdapterStatusProtocolError, kIOReturnDeviceError }, + { kSCSIAdapterStatusSelectionTimeout, kIOReturnNotResponding }, + { kSCSIAdapterStatusMsgReject, kIOReturnUnsupported }, + { kSCSIAdapterStatusParityError, kIOReturnIOError }, + { kSCSIAdapterStatusOverrun, kIOReturnOverrun } +}; + +#define kNumAdapterReturnCodes (sizeof(adapterReturnCodes)/sizeof(adapterReturnCodes[0])) + +static struct statusReturnCodes +{ + UInt32 scsiStatus; + IOReturn ioReturnCode; +} +statusReturnCodes[] = +{ + { kSCSIStatusGood, kIOReturnSuccess }, + { kSCSIStatusCheckCondition, kIOReturnIOError }, + { kSCSIStatusConditionMet, kIOReturnSuccess }, + { kSCSIStatusBusy, kIOReturnBusy }, + { kSCSIStatusIntermediate, kIOReturnSuccess }, + { kSCSIStatusIntermediateMet, kIOReturnSuccess }, + { kSCSIStatusReservationConfict, kIOReturnAborted }, + { kSCSIStatusCommandTerminated, kIOReturnAborted }, + { kSCSIStatusQueueFull, kIOReturnIOError } +}; + +#define kNumStatusReturnCodes (sizeof(statusReturnCodes)/sizeof(statusReturnCodes[0])) + + +IOSCSIDevice *IOSCSIParallelCommand::getDevice(IOSCSIDevice *) +{ + return (IOSCSIDevice *)getDevice(kIOSCSIParallelDevice); +} + +IOSCSIParallelDevice *IOSCSIParallelCommand::getDevice(IOSCSIParallelDevice *) +{ + return device; +} + + +void *IOSCSIParallelCommand::getClientData() +{ + return clientData; +} + +void *IOSCSIParallelCommand::getCommandData() +{ + return commandPrivateData; +} + +UInt32 IOSCSIParallelCommand::getCmdType() +{ + return cmdType; +} + +IOSCSIParallelCommand *IOSCSIParallelCommand::getOriginalCmd() +{ + return origCommand; +} + +UInt32 IOSCSIParallelCommand::getSequenceNumber() +{ + return sequenceNumber; +} + +void IOSCSIParallelCommand::getTargetLun( SCSITargetLun *forTargetLun ) +{ + if ( device ) + { + *forTargetLun = device->targetLun; + } + else + { + bzero( forTargetLun, sizeof( SCSITargetLun ) ); + } +} + + + +void IOSCSIParallelCommand::setTimeout( UInt32 timeoutMS ) +{ + timeout = timeoutMS; +} + +UInt32 IOSCSIParallelCommand::getTimeout() +{ + return timeout; +} + +void IOSCSIParallelCommand::setResults( SCSIResults *srcResults ) +{ + setResults( srcResults, (SCSINegotiationResults *)0 ); +} + +void IOSCSIParallelCommand::setResults( SCSIResults *srcResults, SCSINegotiationResults *negotiationResult ) +{ + SCSICommandType cmdType; + + if ( srcResults != 0 ) + { + results = *srcResults; + + cmdType = (SCSICommandType) getCmdType(); + + while ( results.returnCode == kIOReturnSuccess ) + { + switch ( cmdType ) + { + case kSCSICommandExecute: + case kSCSICommandReqSense: + case kSCSICommandAbort: + case kSCSICommandAbortAll: + case kSCSICommandDeviceReset: + if ( results.adapterStatus != kSCSIAdapterStatusSuccess ) + { + results.returnCode = adapterStatusToIOReturnCode( results.adapterStatus ); + } + break; + + default: + ; + } + + if ( results.returnCode != kIOReturnSuccess ) break; + + switch ( cmdType ) + { + case kSCSICommandExecute: + case kSCSICommandReqSense: + results.returnCode = scsiStatusToIOReturnCode( results.scsiStatus ); + + if ( results.returnCode != kIOReturnSuccess ) break; + + if ( results.bytesTransferred < xferCount ) + { + results.returnCode = kIOReturnUnderrun; + } + break; + + default: + ; + } + + break; + } + } + + if ( negotiationResult != 0 ) + { + device->target->negotiationResult = *negotiationResult; + } +} + +IOReturn IOSCSIParallelCommand::adapterStatusToIOReturnCode( SCSIAdapterStatus adapterStatus ) +{ + UInt32 i; + + for ( i=0; i < kNumAdapterReturnCodes; i++ ) + { + if ( adapterReturnCodes[i].adapterStatus == adapterStatus ) + { + return adapterReturnCodes[i].ioReturnCode; + } + } + return kIOReturnError; +} + +IOReturn IOSCSIParallelCommand::scsiStatusToIOReturnCode( UInt8 scsiStatus ) +{ + UInt32 i; + + for ( i=0; i < kNumStatusReturnCodes; i++ ) + { + if ( statusReturnCodes[i].scsiStatus == scsiStatus ) + { + return statusReturnCodes[i].ioReturnCode; + } + } + return kIOReturnError; +} + +IOReturn IOSCSIParallelCommand::getResults( SCSIResults *dstResults ) +{ + if ( dstResults != 0 ) + { + *dstResults = results; + } + + return results.returnCode; +} + +void IOSCSIParallelCommand::setQueueInfo( UInt32 forQueueType, UInt32 forQueuePosition ) +{ + queueType = forQueueType; + queuePosition = forQueuePosition; +} + +void IOSCSIParallelCommand::getQueueInfo( UInt32 *forQueueType, UInt32 *forQueuePosition = 0 ) +{ + if ( forQueueType != 0 ) *forQueueType = queueType; + if ( forQueuePosition != 0 ) *forQueuePosition = queuePosition; +} + +void IOSCSIParallelCommand::setPointers( IOMemoryDescriptor *clientDesc, UInt32 transferCount, bool isWrite, bool isSense = false ) +{ + if ( isSense == false ) + { + xferDesc = clientDesc; + xferCount = transferCount; + xferDirection = isWrite; + } + else + { + senseData = clientDesc; + senseLength = transferCount; + } +} + +void IOSCSIParallelCommand::getPointers( IOMemoryDescriptor **clientDesc, UInt32 *transferCount, bool *isWrite, bool isSense = false ) +{ + if ( clientDesc != NULL ) + { + *clientDesc = (isSense == false) ? xferDesc : senseData; + } + + if ( transferCount != NULL ) + { + *transferCount = (isSense == false) ? xferCount : senseLength; + } + + if ( isWrite != NULL ) + { + *isWrite = (isSense == false) ? xferDirection : false; + } +} + +void IOSCSIParallelCommand::setCDB( SCSICDBInfo *clientSCSICmd ) +{ + scsiCmd = *clientSCSICmd; +} + +void IOSCSIParallelCommand::getCDB( SCSICDBInfo *clientSCSICmd ) +{ + *clientSCSICmd = scsiCmd; +} + +void IOSCSIParallelCommand::setCallback( void *clientTarget, CallbackFn clientSCSIDoneFn, void *clientRefcon ) +{ + completionInfo.async.target = clientTarget; + completionInfo.async.callback = clientSCSIDoneFn; + completionInfo.async.refcon = clientRefcon; +} + +bool IOSCSIParallelCommand::execute( UInt32 *cmdSequenceNumber ) +{ + bool isSync; + + do + { + sequenceNumber = OSIncrementAtomic( (SInt32 *)&controller->sequenceNumber ); + } + while ( sequenceNumber == 0 ); + + if ( cmdSequenceNumber != 0 ) + { + *cmdSequenceNumber = sequenceNumber; + } + + list = (queue_head_t *)device->deviceGate; + + isSync = (completionInfo.async.callback == 0); + + if ( isSync ) + { + completionInfo.sync.lock = IOSyncer::create(); + } + + device->submitCommand( kSCSICommandExecute, this ); + + if ( isSync ) + { + completionInfo.sync.lock->wait(); + } + + return true; + +} + +void IOSCSIParallelCommand::abort( UInt32 sequenceNumber ) +{ + device->submitCommand( kSCSICommandAbort, this, sequenceNumber ); +} + +void IOSCSIParallelCommand::complete() +{ + if ( device ) + { + device->completeCommand( this ); + } + else + { + controller->completeCommand( this ); + } +} + +/*------------------- Generic CDB Interface -----------------------------------------------*/ + +void IOSCSIParallelCommand::getCDB( CDBInfo *cdbInfo ) +{ + SCSICDBInfo scsiCDBInfo; + + bzero( cdbInfo, sizeof(CDBInfo) ); + + getCDB( &scsiCDBInfo ); + cdbInfo->cdb = scsiCDBInfo.cdb; + cdbInfo->cdbLength = scsiCDBInfo.cdbLength; +} + +void IOSCSIParallelCommand::setCDB( CDBInfo *cdbInfo ) +{ + SCSICDBInfo scsiCDBInfo; + + bzero( &scsiCDBInfo, sizeof(SCSICDBInfo) ); + + scsiCDBInfo.cdbLength = cdbInfo->cdbLength; + scsiCDBInfo.cdb = cdbInfo->cdb; + setCDB( &scsiCDBInfo ); +} + +IOReturn IOSCSIParallelCommand::getResults( CDBResults *cdbResults ) +{ + SCSIResults scsiResults; + IOReturn rc; + + rc = getResults( &scsiResults ); + + if ( cdbResults != 0 ) + { + bzero( cdbResults, sizeof(CDBResults) ); + + cdbResults->returnCode = scsiResults.returnCode; + cdbResults->bytesTransferred = scsiResults.bytesTransferred; + cdbResults->requestSenseDone = scsiResults.requestSenseDone; + cdbResults->requestSenseLength = scsiResults.requestSenseLength; + } + + return rc; +} + + +IOCDBDevice *IOSCSIParallelCommand::getDevice( IOCDBDevice * ) +{ + return (IOCDBDevice *)device; +} + + +void IOSCSIParallelCommand::zeroCommand() +{ + cmdType = kSCSICommandNone; +// controller = ; +// device = ; + list = 0; + bzero(&nextCommand, sizeof(nextCommand)); + bzero(&scsiCmd, sizeof(scsiCmd)); + bzero(&results, sizeof(results)); + timeout = 0; + timer = 0; + queueType = 0; + queuePosition = 0; + xferDesc = 0; + xferCount = 0; + xferDirection = 0; + senseLength = 0; + senseData = 0; + origCommand = 0; + bzero(&completionInfo, sizeof(completionInfo)); + if (dataArea && dataSize) + bzero( dataArea, dataSize ); +// commandPrivateData = ; +// clientData = ; + sequenceNumber = 0; +} diff --git a/iokit/Families/IOSCSIParallel/IOSCSIParallelController.cpp b/iokit/Families/IOSCSIParallel/IOSCSIParallelController.cpp new file mode 100644 index 000000000..a2b386386 --- /dev/null +++ b/iokit/Families/IOSCSIParallel/IOSCSIParallelController.cpp @@ -0,0 +1,1079 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOSCSIParallelController.cpp + * + */ + +#include +#include + +#undef super +#define super IOService + +OSDefineMetaClass( IOSCSIParallelController, IOService ) +OSDefineAbstractStructors( IOSCSIParallelController, IOService ); + +#define round(x,y) (((int)(x) + (y) - 1) & ~((y)-1)) + +/* + * + * + */ +bool IOSCSIParallelController::start( IOService *forProvider ) +{ + provider = forProvider; + + if ( provider->open( this ) != true ) + { + return false; + } + + if ( createWorkLoop() != true ) + { + return false; + } + + if ( configureController() == false ) + { + provider->close( this ); + return false; + } + + initQueues(); + + if ( scanSCSIBus() == false ) + { + provider->close( this ); + return false; + } + + return true; +} + +/* + * + * + * + */ +bool IOSCSIParallelController::scanSCSIBus() +{ + SCSITargetLun targetLun; + UInt32 i; + + targetLun.lun = 0; + + for ( i=0; i < controllerInfo.maxTargetsSupported; i++ ) + { + targetLun.target = i; + probeTarget( targetLun ); + } + + return true; +} + +/* + * + * + * + */ +bool IOSCSIParallelController::probeTarget( SCSITargetLun targetLun ) +{ + IOSCSIParallelDevice *device; + UInt32 i; + + if ( targetLun.target == controllerInfo.initiatorId ) + { + return false; + } + + if ( initTarget( targetLun ) == false ) + { + releaseTarget( targetLun ); + return false; + } + + for ( i=0; i < controllerInfo.maxLunsSupported; i++ ) + { + targetLun.lun = i; + + device = createDevice(); + if ( device == 0 ) + { + break; + } + + if ( device->init( this, targetLun ) == false ) + { + releaseDevice( device ); + break; + } + + if ( initDevice( device ) == false ) + { + releaseDevice( device ); + continue; + } + + if ( device->probeTargetLun() != kIOReturnSuccess ) + { + releaseDevice( device ); + if ( i == 0 ) break; + } + } + + if ( i == 0 ) + { + releaseTarget( targetLun ); + return false; + } + + queue_iterate( &targets[targetLun.target].deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + device->setupTarget(); + device->attach( this ); + device->registerService(); + } + + return true; +} + +/* + * + * + * + */ +bool IOSCSIParallelController::initTargetGated( SCSITargetLun *targetLun ) +{ + return initTarget( *targetLun ); +} + +bool IOSCSIParallelController::initTarget( SCSITargetLun targetLun ) +{ + SCSITarget *target; + UInt32 number; + + if ( getWorkLoop()->inGate() == false ) + { + return controllerGate->runAction( (IOCommandGate::Action)&IOSCSIParallelController::initTargetGated, (void *)&targetLun ); + } + + target = &targets[targetLun.target]; + + target->clientSem = IORWLockAlloc(); + target->targetSem = IORWLockAlloc(); + if( (target->targetSem == 0) || (target->clientSem == 0)) + { + return false; + } + target->commandLimitSave = target->commandLimit = 1; + + target->targetParmsCurrent.transferWidth = 1; + + if ( controllerInfo.targetPrivateDataSize != 0 ) + { + target->targetPrivateData = IOMallocContiguous( controllerInfo.targetPrivateDataSize, 16, 0 ); + if ( target->targetPrivateData == 0 ) + { + return false; + } + } + + if ( controllerInfo.tagAllocationMethod == kTagAllocationPerTarget ) + { + target->tagArray = (UInt32 *)IOMalloc( tagArraySize ); + if ( target->tagArray == 0 ) + { + return false; + } + bzero( target->tagArray, tagArraySize ); + } + + number = 0; + target->regObjTransferPeriod = OSNumber::withNumber( number, 32 ); + if ( target->regObjTransferPeriod == 0 ) + { + return false; + } + + number = 0; + target->regObjTransferOffset = OSNumber::withNumber( number, 32 ); + if ( target->regObjTransferOffset == 0 ) + { + return false; + } + + number = 1; + target->regObjTransferWidth = OSNumber::withNumber( number, 32 ); + if ( target->regObjTransferWidth == 0 ) + { + return false; + } + + number = 0; + target->regObjTransferOptions = OSNumber::withNumber( number, 32 ); + if ( target->regObjTransferOptions == 0 ) + { + return false; + } + + number = 0; + target->regObjCmdQueue = OSNumber::withNumber( number, 32 ); + if ( target->regObjCmdQueue == 0 ) + { + return false; + } + + target->targetAllocated = allocateTarget( targetLun ); + + return target->targetAllocated; +} + +/* + * + * + * + */ +void IOSCSIParallelController::releaseTargetGated( SCSITargetLun *targetLun ) +{ + releaseTarget( *targetLun ); +} + +void IOSCSIParallelController::releaseTarget( SCSITargetLun targetLun ) +{ + SCSITarget *target; + + if ( getWorkLoop()->inGate() == false ) + { + controllerGate->runAction( (IOCommandGate::Action)&IOSCSIParallelController::releaseTargetGated, (void *)&targetLun ); + return; + } + + target = &targets[targetLun.target]; + + if ( queue_empty( &target->deviceList ) != true ) + { + IOLog("IOSCSIParallelController()::Target %d deleted with lun(s) active!\n\r", + targetLun.target ); + } + + if ( target->targetAllocated == true ) + { + deallocateTarget( targetLun ); + + target->targetAllocated = false; + } + + if ( target->tagArray != 0 ) + { + IOFree( target->tagArray, tagArraySize ); + target->tagArray = 0; + } + + if ( target->targetPrivateData != 0 ) + { + IOFreeContiguous( target->targetPrivateData, controllerInfo.targetPrivateDataSize ); + target->targetPrivateData = 0; + } + + if ( target->clientSem != 0 ) + { + IORWLockFree( target->clientSem ); + } + if ( target->targetSem != 0 ) + { + IORWLockFree( target->targetSem ); + } + + if ( target->regObjTransferPeriod != 0 ) + { + target->regObjTransferPeriod->release(); + target->regObjTransferPeriod = 0; + } + if ( target->regObjTransferOffset != 0 ) + { + target->regObjTransferOffset->release(); + target->regObjTransferOffset = 0; + } + if ( target->regObjTransferWidth != 0 ) + { + target->regObjTransferWidth->release(); + target->regObjTransferWidth = 0; + } + if ( target->regObjCmdQueue != 0 ) + { + target->regObjCmdQueue->release(); + target->regObjCmdQueue = 0; + } + +} + +/* + * + * + * + */ +bool IOSCSIParallelController::initDeviceGated( IOSCSIParallelDevice *device ) +{ + return initDevice( device ); +} + +bool IOSCSIParallelController::initDevice( IOSCSIParallelDevice *device ) +{ + if ( getWorkLoop()->inGate() == false ) + { + return controllerGate->runAction( (IOCommandGate::Action)&IOSCSIParallelController::initDeviceGated, (void *)device ); + } + + addDevice( device ); + device->lunAllocated = allocateLun( device->targetLun ); + + return device->lunAllocated; +} + +/* + * + * + * + */ +void IOSCSIParallelController::releaseDeviceGated( IOSCSIParallelDevice *device ) +{ + releaseDevice( device ); + return; +} + +void IOSCSIParallelController::releaseDevice( IOSCSIParallelDevice *device ) +{ + if ( getWorkLoop()->inGate() == false ) + { + controllerGate->runAction( (IOCommandGate::Action)&IOSCSIParallelController::releaseDeviceGated, (void *)device ); + return; + } + + deleteDevice( device ); + if ( device->lunAllocated == true ) + { + deallocateLun( device->targetLun ); + } + + device->release(); +} + + +/* + * + * + * + */ +void IOSCSIParallelController::addDevice( IOSCSIParallelDevice *forDevice ) +{ + UInt32 targetID; + + targetID = forDevice->targetLun.target; + + forDevice->target = &targets[targetID]; + queue_enter( &targets[targetID].deviceList, forDevice, IOSCSIParallelDevice *, nextDevice ); +} + +/* + * + * + * + */ +void IOSCSIParallelController::deleteDevice( IOSCSIParallelDevice *forDevice ) +{ + queue_head_t *deviceList; + IOSCSIParallelDevice *device; + UInt32 targetID; + + targetID = forDevice->targetLun.target; + + deviceList = &targets[targetID].deviceList; + + queue_iterate( deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + if ( device == forDevice ) + { + queue_remove( &targets[targetID].deviceList, device, IOSCSIParallelDevice *, nextDevice ); + break; + } + } +} + +/* + * + * + * + */ +bool IOSCSIParallelController::allocateTarget( SCSITargetLun targetLun ) +{ + return true; +} + +/* + * + * + * + */ +void IOSCSIParallelController::deallocateTarget( SCSITargetLun targetLun ) +{ +} + +/* + * + * + * + */ +bool IOSCSIParallelController::allocateLun( SCSITargetLun targetLun ) +{ + return true; +} + +/* + * + * + * + */ +void IOSCSIParallelController::deallocateLun( SCSITargetLun targetLun ) +{ +} + + +/* + * + * + * + */ +void *IOSCSIParallelController::getTargetData( SCSITargetLun targetLun ) +{ + return targets[targetLun.target].targetPrivateData; +} + +/* + * + * + * + */ +void *IOSCSIParallelController::getLunData( SCSITargetLun targetLun ) +{ + queue_head_t *deviceList; + IOSCSIParallelDevice *device; + + deviceList = &targets[targetLun.target].deviceList; + + queue_iterate( deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + if ( device->targetLun.lun == targetLun.lun ) + { + return device->devicePrivateData; + } + } + return 0; +} + + + +/* + * + * + * + */ +IOSCSIParallelDevice *IOSCSIParallelController::createDevice() +{ + return new IOSCSIParallelDevice; +} + + +/* + * + * + * + */ +void IOSCSIParallelController::initQueues() +{ + UInt32 i; + + for ( i=0; i < controllerInfo.maxTargetsSupported; i++ ) + { + queue_init( &targets[i].deviceList ); + } + + resetCmd = allocCommand( 0 ); + resetCmd->cmdType = kSCSICommandBusReset; + + timer( timerEvent ); +} + +/* + * + * + * + */ +void IOSCSIParallelController::reset() +{ + IOSCSIParallelDevice *device; + UInt32 i; + + if ( busResetState != kStateIssue ) + { + return; + } + + busResetState = kStateActive; + + for (i=0; i < controllerInfo.maxTargetsSupported; i++ ) + { + queue_iterate( &targets[i].deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + if ( device->client != 0 ) + { + device->client->message( kSCSIClientMsgBusReset, device ); + } + } + } + + resetCommand( resetCmd ); +} + +/* + * + * + * + */ +bool IOSCSIParallelController::checkBusReset() +{ + if ( busResetState == kStateIdle ) + { + return false; + } + if ( busResetState == kStateIssue ) + { + reset(); + } + return true; +} + + +/* + * + * + * + */ +void IOSCSIParallelController::resetOccurred() +{ + UInt32 i; + IOSCSIParallelDevice *device; + SCSITarget *target; + SCSIClientMessage clientMsg; + + for (i=0; i < controllerInfo.maxTargetsSupported; i++ ) + { + target = &targets[i]; + + target->commandLimit = target->commandLimitSave; + target->reqSenseCount = 0; + target->reqSenseState = kStateIdle; + target->negotiateState = kStateIssue; + + target->targetParmsCurrent.transferPeriodpS = 0; + target->targetParmsCurrent.transferOffset = 0; + target->targetParmsCurrent.transferWidth = 1; + + noDisconnectCmd = 0; + + clientMsg = ( busResetState != kStateActive ) ? kSCSIClientMsgBusReset : kSCSIClientMsgNone; + + queue_iterate( &target->deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + device->resetOccurred( clientMsg ); + } + } + + resetTimer = (kSCSIResetIntervalmS / kSCSITimerIntervalmS + 1); +} + + +/* + * + * + */ +void IOSCSIParallelController::timer( IOTimerEventSource * /* timer */ ) +{ + UInt32 i; + IOSCSIParallelDevice *device; + + + if ( disableTimer ) + { + if ( !--disableTimer ) + { + disableTimeoutOccurred(); + } + } + + if ( resetTimer ) + { + if ( !--resetTimer ) + { + for (i=0; i < controllerInfo.maxTargetsSupported; i++ ) + { + queue_iterate( &targets[i].deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + device->resetComplete(); + } + } + + } + } + else + { + for (i=0; i < controllerInfo.maxTargetsSupported; i++ ) + { + queue_iterate( &targets[i].deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + device->timer(); + } + } + } + + timerEvent->setTimeoutMS(kSCSITimerIntervalmS); +} + + +/* + * + * + * + */ +void IOSCSIParallelController::completeCommand( IOSCSIParallelCommand *scsiCmd ) +{ + switch ( scsiCmd->cmdType ) + { + case kSCSICommandBusReset: + resetOccurred(); + busResetState = kStateIdle; + break; + default: + ; + } +} + + +/* + * + * + * + */ +bool IOSCSIParallelController::createWorkLoop() +{ + workLoop = getWorkLoop(); + if ( workLoop == 0 ) + { + workLoop = IOWorkLoop::workLoop(); + if ( workLoop == 0 ) + { + return false; + } + } + + timerEvent = IOTimerEventSource::timerEventSource( this, (IOTimerEventSource::Action) &IOSCSIParallelController::timer ); + if ( timerEvent == 0 ) + { + return false; + } + + if ( workLoop->addEventSource( timerEvent ) != kIOReturnSuccess ) + { + return false; + } + + + dispatchEvent = IOInterruptEventSource::interruptEventSource( this, + (IOInterruptEventAction) &IOSCSIParallelController::dispatch, + 0 ); + if ( dispatchEvent == 0 ) + { + return false; + } + + if ( workLoop->addEventSource( dispatchEvent ) != kIOReturnSuccess ) + { + return false; + } + + controllerGate = IOCommandGate::commandGate( this, (IOCommandGate::Action) 0 ); + if ( controllerGate == 0 ) + { + return false; + } + + if ( workLoop->addEventSource( controllerGate ) != kIOReturnSuccess ) + { + return false; + } + + return true; +} + +/* + * + * + * + */ +IOSCSIParallelCommand *IOSCSIParallelController::findCommandWithNexus( SCSITargetLun targetLun, UInt32 tagValue = (UInt32)-1 ) +{ + IOSCSIParallelDevice *device; + + device = findDeviceWithTargetLun( targetLun ); + if ( device == 0 ) + { + return 0; + } + + return device->findCommandWithNexus( tagValue ); +} + + +/* + * + * + * + */ +IOSCSIParallelDevice *IOSCSIParallelController::findDeviceWithTargetLun( SCSITargetLun targetLun ) +{ + IOSCSIParallelDevice *device; + + if ( targetLun.target > controllerInfo.maxTargetsSupported || targetLun.lun > controllerInfo.maxLunsSupported ) + { + return 0; + } + + queue_iterate( &targets[targetLun.target].deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + if ( device->targetLun.lun == targetLun.lun ) + { + return device; + } + } + return 0; +} + + +/* + * + * + * + */ +bool IOSCSIParallelController::configureController() +{ + UInt32 targetsSize; + + if ( configure( provider, &controllerInfo ) == false ) + { + return false; + } + + controllerInfo.commandPrivateDataSize = round( controllerInfo.commandPrivateDataSize, 16 ); + + if ( controllerInfo.maxCommandsPerController == 0 ) controllerInfo.maxCommandsPerController = (UInt32) -1; + if ( controllerInfo.maxCommandsPerTarget == 0 ) controllerInfo.maxCommandsPerTarget = (UInt32) -1; + if ( controllerInfo.maxCommandsPerLun == 0 ) controllerInfo.maxCommandsPerLun = (UInt32) -1; + + targetsSize = controllerInfo.maxTargetsSupported * sizeof(SCSITarget); + targets = (SCSITarget *)IOMalloc( targetsSize ); + bzero( targets, targetsSize ); + + commandLimit = commandLimitSave = controllerInfo.maxCommandsPerController; + + tagArraySize = (controllerInfo.maxTags / 32 + ((controllerInfo.maxTags % 32) ? 1 : 0)) * sizeof(UInt32); + + if ( controllerInfo.tagAllocationMethod == kTagAllocationPerController ) + { + tagArray = (UInt32 *)IOMalloc( tagArraySize ); + bzero( tagArray, tagArraySize ); + } + + return true; +} + +/* + * + * + * + */ +void IOSCSIParallelController::setCommandLimit( UInt32 newCommandLimit ) +{ + if ( newCommandLimit == 0 ) controllerInfo.maxCommandsPerController = (UInt32) -1; + + commandLimit = commandLimitSave = controllerInfo.maxCommandsPerController; +} + +/* + * + * + * + */ +IOWorkLoop *IOSCSIParallelController::getWorkLoop() const +{ + return workLoop; +} + +/* + * + * + * + */ +void IOSCSIParallelController::disableCommands( UInt32 disableTimeoutmS ) +{ + commandDisable = true; + + disableTimer = ( disableTimeoutmS != 0 ) ? (disableTimeoutmS / kSCSITimerIntervalmS + 1) : 0; +} + + +/* + * + * + * + */ +void IOSCSIParallelController::disableCommands() +{ + UInt32 disableTimeout; + + commandDisable = true; + + disableTimeout = kSCSIDisableTimeoutmS; + + if ( noDisconnectCmd != 0 ) + { + disableTimeout = noDisconnectCmd->getTimeout(); + if ( disableTimeout != 0 ) disableTimeout += kSCSIDisableTimeoutmS; + } + + disableTimer = ( disableTimeout != 0 ) ? (disableTimeout / kSCSITimerIntervalmS + 1) : 0; +} + +/* + * + * + * + */ +void IOSCSIParallelController::disableTimeoutOccurred() +{ + busResetState = kStateIssue; + dispatchRequest(); +} + +/* + * + * + * + */ +void IOSCSIParallelController::rescheduleCommand( IOSCSIParallelCommand *forSCSICmd ) +{ + forSCSICmd->getDevice(kIOSCSIParallelDevice)->rescheduleCommand( forSCSICmd ); +} + +/* + * + * + * + */ +void IOSCSIParallelController::enableCommands() +{ + commandDisable = false; + + disableTimer = 0; + + dispatchRequest(); +} + +/* + * + * + * + */ +void IOSCSIParallelController::dispatchRequest() +{ + dispatchEvent->interruptOccurred(0, 0, 0); +} + + +/* + * + * + * + */ +void IOSCSIParallelController::dispatch() +{ + SCSITarget *target; + IOSCSIParallelDevice *device; + UInt32 dispatchAction; + UInt32 lunsActive = 0; + UInt32 i; + + if ( !targets || checkBusReset() ) + { + goto dispatch_Exit; + } + + for ( i = 0; i < controllerInfo.maxTargetsSupported; i++ ) + { + target = &targets[i]; + + if ( target->state == kStateActive ) + { + lunsActive = 0; + + queue_iterate( &target->deviceList, device, IOSCSIParallelDevice *, nextDevice ) + { + if ( device->dispatch( &dispatchAction ) == true ) + { + lunsActive++; + } + + switch ( dispatchAction ) + { + case kDispatchNextLun: + ; + case kDispatchNextTarget: + break; + case kDispatchStop: + goto dispatch_Exit; + } + } + if ( lunsActive == 0 ) + { + target->state = kStateIdle; + } + } + } + +dispatch_Exit: + ; +} + +/* + * + * + * + */ +IOSCSIParallelCommand *IOSCSIParallelController::allocCommand(UInt32 clientDataSize ) +{ + IOSCSIParallelCommand *cmd; + UInt32 size; + + size = controllerInfo.commandPrivateDataSize + round(clientDataSize, 16); + + cmd = new IOSCSIParallelCommand; + if ( !cmd ) + { + return 0; + } + cmd->init(); + + if ( size ) + { + cmd->dataArea = (void *)IOMallocContiguous( (vm_size_t)size, 16, 0 ); + if ( !cmd->dataArea ) + { + cmd->release(); + return 0; + } + + bzero( cmd->dataArea, size ); + + cmd->dataSize = size; + + if ( controllerInfo.commandPrivateDataSize ) + { + cmd->commandPrivateData = cmd->dataArea; + } + if ( clientDataSize ) + { + cmd->clientData = (void *)((UInt8 *)cmd->dataArea + controllerInfo.commandPrivateDataSize); + } + } + + cmd->controller = this; + + return cmd; +} + +/* + * + * + * + */ +void IOSCSIParallelController::free() +{ + UInt32 targetsSize; + UInt32 i; + + if ( controllerGate != 0 ) + { + workLoop->removeEventSource( controllerGate ); + controllerGate->release(); + } + + if ( timerEvent != 0 ) timerEvent->release(); + + if ( dispatchEvent != 0 ) dispatchEvent->release(); + + if ( resetCmd != 0 ) resetCmd->release(); + + if ( workLoop != 0 ) workLoop->release(); + + if ( targets != 0 ) + { + for ( i=0; i < controllerInfo.maxTargetsSupported; i++ ) + { + if ( targets[i].targetPrivateData != 0 ) + { + IOFreeContiguous( targets[i].targetPrivateData, controllerInfo.targetPrivateDataSize ); + } + } + + targetsSize = controllerInfo.maxTargetsSupported * sizeof(SCSITarget); + IOFree( targets, targetsSize ); + } + + if ( tagArray != 0 ) IOFree( tagArray, tagArraySize ); + + super::free(); +} + +/* + * + * + * + */ +void IOSCSIParallelCommand::free() +{ + if ( dataArea ) + { + IOFreeContiguous( dataArea, dataSize ); + } + + OSObject::free(); +} + diff --git a/iokit/Families/IOSCSIParallel/IOSCSIParallelDevice.cpp b/iokit/Families/IOSCSIParallel/IOSCSIParallelDevice.cpp new file mode 100644 index 000000000..bdd9d626f --- /dev/null +++ b/iokit/Families/IOSCSIParallel/IOSCSIParallelDevice.cpp @@ -0,0 +1,2156 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOSCSIParallelDevice.cpp + * + */ + +#include +#include + +#include + +#undef super +#define super IOSCSIDevice + +#ifndef MIN +#define MIN(a,b) ((a <= b) ? a : b) +#endif + +OSDefineMetaClassAndAbstractStructors( IOCDBDevice, IOService ) +OSDefineMetaClassAndAbstractStructors( IOSCSIDevice, IOCDBDevice ) +OSDefineMetaClassAndStructors( IOSCSIParallelDevice, IOSCSIDevice ) + +/* + * + * + * + */ +bool IOSCSIParallelDevice::init( IOSCSIParallelController *forController, SCSITargetLun forTargetLun ) +{ + SCSICDBInfo scsiCDB; + + controller = forController; + targetLun = forTargetLun; + + target = &controller->targets[targetLun.target]; + + queue_init( &deviceList ); + queue_init( &bypassList ); + queue_init( &activeList ); + queue_init( &abortList ); + queue_init( &cancelList ); + + clientSem = IORWLockAlloc(); + if ( clientSem == 0 ) + { + return false; + } + + if ( super::init() == false ) + { + return false; + } + + if ( controller->controllerInfo.lunPrivateDataSize != 0 ) + { + devicePrivateData = IOMallocContiguous( controller->controllerInfo.lunPrivateDataSize, 16, 0 ); + if ( devicePrivateData == 0 ) + { + return false; + } + } + + bzero( &scsiCDB, sizeof(scsiCDB) ); + + abortCmd = allocCommand(kIOSCSIParallelDevice, 0); + if ( abortCmd == 0 ) + { + return false; + } + abortCmd->setTimeout( kSCSIAbortTimeoutmS ); + + cancelCmd = allocCommand(kIOSCSIParallelDevice, 0); + if ( cancelCmd == 0 ) + { + return false; + } + cancelCmd->setTimeout( 0 ); + cancelCmd->cmdType = kSCSICommandCancel; + + reqSenseCmd = allocCommand(kIOSCSIParallelDevice, 0); + if ( reqSenseCmd == 0 ) + { + return false; + } + scsiCDB.cdbLength = 6; + scsiCDB.cdb[0] = kSCSICmdRequestSense; + scsiCDB.cdb[1] = targetLun.lun << 4; + scsiCDB.cdbTag = (UInt32) -1; + + reqSenseCmd->setTimeout( kSCSIReqSenseTimeoutmS ); + reqSenseCmd->cmdType = kSCSICommandReqSense; + reqSenseCmd->setCDB( &scsiCDB ); + + if ( controller->controllerInfo.tagAllocationMethod == kTagAllocationPerLun ) + { + tagArray = (UInt32 *)IOMalloc( controller->tagArraySize ); + bzero( tagArray, controller->tagArraySize ); + } + + deviceGate = IOCommandGate::commandGate( this, (IOCommandGate::Action) &IOSCSIParallelDevice::receiveCommand ); + if ( deviceGate == 0 ) + { + return false; + } + + if ( controller->workLoop->addEventSource( deviceGate ) != kIOReturnSuccess ) + { + return false; + } + + commandLimitSave = commandLimit = controller->controllerInfo.maxCommandsPerLun; + + idleNotifyActive = false; + + normalQHeld = false; + bypassQHeld = false; + + return true; +} + +/* + * + * + * + */ +IOReturn IOSCSIParallelDevice::probeTargetLun() +{ + SCSICDBInfo cdb; + SCSIResults result; + IOReturn rc; + IOMemoryDescriptor *desc = 0; + SCSIInquiry *inqData = 0; + UInt32 size = 0; + OSDictionary *propTable; + + probeCmd = allocCommand(kIOSCSIParallelDevice, 0); + + if ( probeCmd == 0 ) + { + rc = kIOReturnNoMemory; + goto probeError; + } + + size = kDefaultInquirySize; + + if ( !(inqData = (SCSIInquiry *)IOMalloc(size)) ) + { + rc = kIOReturnNoMemory; + goto probeError; + } + + desc = IOMemoryDescriptor::withAddress( (void *)inqData, size, kIODirectionIn ); + if ( desc == 0 ) + { + rc = kIOReturnNoMemory; + goto probeError; + } + + if ( open( this ) == false ) + { + rc = kIOReturnError; + goto probeError; + } + + bzero( (void *)&cdb, sizeof(cdb) ); + + cdb.cdbLength = 6; + cdb.cdb[0] = kSCSICmdInquiry; + cdb.cdb[4] = size; + probeCmd->setCDB( &cdb ); + + probeCmd->setPointers( desc, size, false ); + + probeCmd->setTimeout( kSCSIProbeTimeoutmS ); + probeCmd->setCallback(); + + probeCmd->execute(); + + rc = probeCmd->getResults( &result ); + + switch ( rc ) + { + case kIOReturnSuccess: + break; + + case kIOReturnUnderrun: + rc = kIOReturnSuccess; + break; + + default: + goto probeError; + } + + if ( result.bytesTransferred <= (UInt32)(&inqData->flags - &inqData->devType) ) + { + rc = kIOReturnDeviceError; + goto probeError; + } + + switch ( inqData->devType & kSCSIDevTypeQualifierMask ) + { + case kSCSIDevTypeQualifierConnected: + case kSCSIDevTypeQualifierNotConnected: + break; + case kSCSIDevTypeQualifierReserved: + case kSCSIDevTypeQualifierMissing: + rc = kIOReturnNotAttached; + break; + default: + break; + } + + if ( rc != kIOReturnSuccess ) + { + goto probeError; + } + + inquiryData = inqData; + inquiryDataSize = result.bytesTransferred; + + propTable = createProperties(); + if ( !propTable ) goto probeError; + + setPropertyTable( propTable ); + + propTable->release(); + +probeError: ; + + if ( desc ) + { + desc->release(); + } + + if ( inqData ) + { + if ( rc != kIOReturnSuccess ) + { + IOFree( inqData, size ); + } + } + + return rc; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::setupTarget() +{ + SCSITargetParms targetParms; + UInt32 transferWidth; + + if ( targetLun.lun != 0 ) + { + close( this ); + return; + } + + getTargetParms( &targetParms ); + + if ( ((inquiryData->flags & kSCSIDevCapCmdQue) != 0) && (checkCmdQueEnabled() == true) ) + { + targetParms.enableTagQueuing = true; + } + + if ( inquiryData->flags & kSCSIDevCapSync ) + { + targetParms.transferPeriodpS = controller->controllerInfo.minTransferPeriodpS; + targetParms.transferOffset = controller->controllerInfo.maxTransferOffset; + } + + if ( inquiryData->flags & kSCSIDevCapWBus32 ) + { + transferWidth = 4; + } + else if ( inquiryData->flags & kSCSIDevCapWBus16 ) + { + transferWidth = 2; + } + else + { + transferWidth = 1; + } + + targetParms.transferWidth = MIN( transferWidth, controller->controllerInfo.maxTransferWidth ); + + if ( ((inquiryData->version & 0x07) >= kSCSIInqVersionSCSI3) + && (inquiryDataSize > (UInt32)(&inquiryData->scsi3Options - &inquiryData->devType)) ) + { + if ( inquiryData->scsi3Options & kSCSI3InqOptionClockDT ) + { + targetParms.transferOptions |= kSCSITransferOptionClockDT; + + /* If it's a SCSI-3 target that handles DT clocking, + * assume the HBA can try using the PPR message. + */ + targetParms.transferOptions |= kSCSITransferOptionPPR; + + if ( inquiryData->scsi3Options & kSCSI3InqOptionIUS ) + { + targetParms.transferOptions |= kSCSITransferOptionIUS; + + if ( inquiryData->scsi3Options & kSCSI3InqOptionQAS ) + { + targetParms.transferOptions |= kSCSITransferOptionQAS; + } + } + } + } + + setTargetParms( &targetParms ); + + close( this ); +} + +/* + * + * + * + */ +bool IOSCSIParallelDevice::checkCmdQueEnabled() +{ + SCSICDBInfo scsiCDB; + SCSIResults scsiResult; + IOMemoryDescriptor *desc; + UInt32 size; + UInt8 controlModePage[32]; + IOReturn cmdRc; + bool rc = false; + + bzero( (void *)&scsiCDB, sizeof(scsiCDB) ); + + size = sizeof(controlModePage); + + scsiCDB.cdbLength = 6; + scsiCDB.cdb[0] = kSCSICmdModeSense6; + scsiCDB.cdb[1] = 0x08; + scsiCDB.cdb[2] = 0x0a; // Control Mode Page + scsiCDB.cdb[4] = size; + + probeCmd->setCDB( &scsiCDB ); + + desc = IOMemoryDescriptor::withAddress( (void *)controlModePage, size, kIODirectionIn ); + if ( desc == 0 ) + { + return rc; + } + + probeCmd->setPointers( desc, size, false ); + + probeCmd->setTimeout( kSCSIProbeTimeoutmS ); + probeCmd->setCallback(); + + probeCmd->execute(); + + cmdRc = probeCmd->getResults( &scsiResult ); + + if ( (cmdRc == kIOReturnUnderrun) && (scsiResult.bytesTransferred > 7) ) + { + cmdRc = kIOReturnSuccess; + } + + /* Check DQue bit on ControlMode Page (0x0A) */ + if ( (cmdRc == kIOReturnSuccess) && ((controlModePage[7] & 0x01) == 0) ) + { + rc = true; + } + + desc->release(); + + return rc; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::getInquiryData( void *clientBuf, UInt32 clientBufSize, UInt32 *clientDataSize ) +{ + UInt32 len; + + bzero( clientBuf, clientBufSize ); + + len = MIN( clientBufSize, inquiryDataSize ); + + bcopy( inquiryData, clientBuf, len ); + + *clientDataSize = len; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::abort() +{ + submitCommand( kSCSICommandAbortAll, 0 ); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::reset() +{ + submitCommand( kSCSICommandDeviceReset, 0 ); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::holdQueue( UInt32 queueType ) +{ + if ( getWorkLoop()->inGate() == false ) + { + IOPanic( "IOSCSIParallelDevice::holdQueue() - must be called from workloop!!\n\r"); + } + + if ( queueType == kQTypeBypassQ ) + { + bypassQHeld = true; + } + else if ( queueType == kQTypeNormalQ ) + { + normalQHeld = true; + } +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::releaseQueue( UInt32 queueType ) +{ + if ( getWorkLoop()->inGate() == false ) + { + IOPanic( "IOSCSIParallelDevice::releaseQueue() - must be called from workloop!!\n\r"); + } + + if ( queueType == kQTypeBypassQ ) + { + bypassQHeld = false; + } + else if ( queueType == kQTypeNormalQ ) + { + normalQHeld = false; + } + + dispatchRequest(); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::notifyIdle( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ) +{ + if ( getWorkLoop()->inGate() == false ) + { + IOPanic( "IOSCSIParallelDevice:::notifyIdle() - must be called from workloop!!\n\r"); + } + + if ( callback == 0 ) + { + idleNotifyActive = false; + return; + } + + if ( idleNotifyActive == true ) + { + IOPanic( "IOSCSIParallelDevice:::notifyIdle() - only one idle notify may be active\n\r"); + } + + idleNotifyActive = true; + idleNotifyTarget = target; + idleNotifyCallback = callback; + idleNotifyRefcon = refcon; + + checkIdleNotify(); +} + + +/* + * + * + * + */ +void IOSCSIParallelDevice::submitCommand( UInt32 cmdType, IOSCSIParallelCommand *scsiCmd, UInt32 cmdSequenceNumber ) +{ + deviceGate->runCommand( (void *)cmdType, (void *)scsiCmd, (void *) cmdSequenceNumber, (void *) 0 ); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::receiveCommand( UInt32 cmdType, IOSCSIParallelCommand *scsiCmd, UInt32 cmdSequenceNumber, void *p3 ) +{ + queue_head_t *queue; + + switch ( cmdType ) + { + case kSCSICommandExecute: + scsiCmd->cmdType = (SCSICommandType) cmdType; + + scsiCmd->scsiCmd.cdbFlags &= (kCDBFNoDisconnect); + + queue = (scsiCmd->queueType == kQTypeBypassQ) ? &bypassList : &deviceList; + + if ( scsiCmd->queuePosition == kQPositionHead ) + { + stackCommand( queue, scsiCmd ); + } + else + { + addCommand( queue, scsiCmd ); + } + + dispatchRequest(); + break; + + case kSCSICommandAbortAll: + abortAllCommands( kSCSICommandAbortAll ); + break; + + case kSCSICommandAbort: + abortCommand( scsiCmd, cmdSequenceNumber ); + break; + + case kSCSICommandDeviceReset: + abortAllCommands( kSCSICommandDeviceReset ); + break; + + default: + /* ??? */ + break; + } +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::abortCommand( IOSCSIParallelCommand *scsiCmd, UInt32 sequenceNumber ) +{ + if ( scsiCmd->list == (queue_head_t *)deviceGate ) + { + if ( scsiCmd->sequenceNumber != sequenceNumber ) + { + return; + } + scsiCmd->results.returnCode = kIOReturnAborted; + } + else if ( scsiCmd->list == &deviceList ) + { + if ( scsiCmd->sequenceNumber != sequenceNumber ) + { + return; + } + + deleteCommand( &deviceList, scsiCmd ); + scsiCmd->results.returnCode = kIOReturnAborted; + finishCommand( scsiCmd ); + } + else if ( scsiCmd->list == &activeList ) + { + if ( scsiCmd->sequenceNumber != sequenceNumber ) + { + return; + } + + moveCommand( &activeList, &abortList, scsiCmd ); + + dispatchRequest(); + } +} + + +/* + * + * + * + */ +void IOSCSIParallelDevice::abortAllCommands( SCSICommandType cmdType ) +{ + IOSCSIParallelDevice *abortDev; + + abortCmdPending = cmdType; + + if ( abortCmdPending == kSCSICommandAbortAll ) + { + if ( client != 0 ) + { + client->message( kSCSIClientMsgDeviceAbort, this ); + } + } + else if ( abortCmdPending == kSCSICommandDeviceReset ) + { + queue_iterate( &target->deviceList, abortDev, IOSCSIParallelDevice *, nextDevice ) + { + if ( abortDev->client != 0 ) + { + abortDev->client->message( kSCSIClientMsgDeviceReset, abortDev ); + } + } + } + + dispatchRequest(); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::resetOccurred( SCSIClientMessage clientMsg ) +{ + if ( client != 0 && clientMsg != kSCSIClientMsgNone ) + { + client->message( clientMsg, this ); + } + + moveAllCommands( &activeList, &cancelList, kIOReturnAborted ); + moveAllCommands( &abortList, &cancelList, kIOReturnAborted ); + + abortState = kStateIdle; + reqSenseState = kStateIdle; + commandLimit = commandLimitSave; + negotiateState = kStateIdle; + + dispatchRequest(); +} + +void IOSCSIParallelDevice::resetComplete() +{ + if ( client != 0 ) + { + client->message( kSCSIClientMsgBusReset | kSCSIClientMsgDone, this ); + } +} + + +/* + * + * + * + */ +bool IOSCSIParallelDevice::checkAbortQueue() +{ + IOSCSIParallelCommand *origCmd; + + if ( abortState == kStateActive ) + { + return true; + } + + if ( abortCmdPending != kSCSICommandNone ) + { + abortCmd->origCommand = 0; + + abortCmd->scsiCmd.cdbTagMsg = 0; + abortCmd->scsiCmd.cdbTag = (UInt32) -1; + + + abortCmd->cmdType = abortCmdPending; + abortCmd->scsiCmd.cdbAbortMsg = (abortCmdPending == kSCSICommandAbortAll) + ? kSCSIMsgAbort : kSCSIMsgBusDeviceReset; + + if ( disableDisconnect == true ) + { + abortCmd->scsiCmd.cdbFlags |= kCDBFlagsNoDisconnect; + } + else + { + abortCmd->scsiCmd.cdbFlags &= ~kCDBFlagsNoDisconnect; + } + + + abortCmd->timer = ( abortCmd->timeout != 0 ) ? + abortCmd->timeout / kSCSITimerIntervalmS + 1 : 0; + + bzero( &abortCmd->results, sizeof(SCSIResults) ); + + abortCmdPending = kSCSICommandNone; + abortState = kStateActive; + + addCommand( &activeList, abortCmd ); + controller->executeCommand( abortCmd ); + } + else if ( queue_empty( &abortList ) == false ) + { + origCmd = (IOSCSIParallelCommand *)queue_first( &abortList ); + abortCmd->origCommand = origCmd; + + abortCmd->cmdType = kSCSICommandAbort; + abortCmd->scsiCmd.cdbTagMsg = origCmd->scsiCmd.cdbTagMsg; + abortCmd->scsiCmd.cdbTag = origCmd->scsiCmd.cdbTag; + abortCmd->scsiCmd.cdbAbortMsg = (abortCmd->scsiCmd.cdbTagMsg != 0) + ? kSCSIMsgAbortTag : kSCSIMsgAbort; + + abortCmd->timer = ( abortCmd->timeout != 0 ) ? + abortCmd->timeout / kSCSITimerIntervalmS + 1 : 0; + + bzero( &abortCmd->results, sizeof(SCSIResults) ); + + abortState = kStateActive; + + addCommand( &activeList, abortCmd ); + controller->executeCommand( abortCmd ); + } + else + { + return false; + } + + return true; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::checkCancelQueue() +{ + if ( cancelState != kStateIdle ) + { + return; + } + + if ( queue_empty( &cancelList ) == true ) + { + return; + } + + if ( controller->controllerInfo.disableCancelCommands == true ) + { + return; + } + + cancelCmd->origCommand = (IOSCSIParallelCommand *)queue_first( &cancelList ); + bzero( &cancelCmd->results, sizeof(SCSIResults) ); + + cancelState = kStateActive; + controller->cancelCommand( cancelCmd ); +} + +/* + * + * + * + */ +bool IOSCSIParallelDevice::checkReqSense() +{ + IOMemoryDescriptor *senseData; + UInt32 senseLength; + SCSITargetParms *tpCur; + + if ( target->reqSenseState == kStateActive ) + { + return true; + } + + if ( reqSenseState == kStateIssue ) + { + reqSenseCmd->origCommand = reqSenseOrigCmd; + bzero( &reqSenseCmd->results, sizeof(SCSIResults) ); + + reqSenseOrigCmd->getPointers( &senseData, &senseLength, 0, true ); + reqSenseCmd->setPointers( senseData, senseLength, false ); + + reqSenseCmd->scsiCmd.cdbFlags = 0; + + if ( disableDisconnect == true ) + { + reqSenseCmd->scsiCmd.cdbFlags |= kCDBFlagsNoDisconnect; + } + else + { + reqSenseCmd->scsiCmd.cdbFlags &= ~kCDBFlagsNoDisconnect; + } + + tpCur = &target->targetParmsCurrent; + + if ( tpCur->transferWidth != 1 ) + { + reqSenseCmd->scsiCmd.cdbFlags |= kCDBFlagsNegotiateWDTR; + if (tpCur->transferOptions & kSCSITransferOptionPPR) { + reqSenseCmd->scsiCmd.cdbFlags |= kCDBFlagsNegotiatePPR; + } + } + + if ( tpCur->transferOffset != 0 ) + { + reqSenseCmd->scsiCmd.cdbFlags |= kCDBFlagsNegotiateSDTR; + if (tpCur->transferOptions & kSCSITransferOptionPPR) { + reqSenseCmd->scsiCmd.cdbFlags |= kCDBFlagsNegotiatePPR; + } + + } + + reqSenseCmd->timer = ( reqSenseCmd->timeout != 0 ) ? + reqSenseCmd->timeout / kSCSITimerIntervalmS + 1 : 0; + + reqSenseCmd->scsiCmd.cdb[3] = (senseLength >> 8) & 0xff; + reqSenseCmd->scsiCmd.cdb[4] = senseLength & 0xff; + + reqSenseState = kStatePending; + } + + if ( reqSenseState == kStatePending ) + { + target->reqSenseState = reqSenseState = kStateActive; + + addCommand( &activeList, reqSenseCmd ); + + commandCount++; + controller->commandCount++; + + controller->executeCommand( reqSenseCmd ); + } + + return (target->reqSenseCount > 0); +} + + +/* + * + * + * + */ +bool IOSCSIParallelDevice::checkDeviceQueue( UInt32 *dispatchAction ) +{ + IOSCSIParallelCommand *scsiCmd = 0; + queue_head_t *queue; + UInt32 i; + bool rc = true; + bool queueHeld; + + do + { + if ( controller->commandCount >= controller->commandLimit ) + { + *dispatchAction = kDispatchStop; + break; + } + + if ( target->commandCount >= target->commandLimit ) + { + *dispatchAction = kDispatchNextTarget; + break; + } + + *dispatchAction = kDispatchNextLun; + + if ( commandCount >= commandLimit ) + { + break; + } + + for ( i=0; i < 2; i++ ) + { + queueHeld = (i == 0) ? bypassQHeld : normalQHeld; + queue = (i == 0) ? &bypassList : &deviceList; + + if ( queueHeld == true ) + { + continue; + } + + scsiCmd = checkCommand( queue ); + if ( scsiCmd != 0 ) + { + *dispatchAction = kDispatchNextCommand; + break; + } + } + + if ( i == 2 ) + { + rc = false; + break; + } + + if ( disableDisconnect == true || (scsiCmd->scsiCmd.cdbFlags & kCDBFNoDisconnect) ) + { + scsiCmd->scsiCmd.cdbFlags |= kCDBFlagsNoDisconnect; + + if ( controller->commandCount != 0 ) + { + *dispatchAction = kDispatchNextLun; + break; + } + + controller->noDisconnectCmd = scsiCmd; + controller->commandLimitSave = controller->commandLimit; + controller->commandLimit = 1; + } + + else if ( checkTag( scsiCmd ) == false ) + { + switch ( controller->controllerInfo.tagAllocationMethod ) + { + case kTagAllocationPerTarget: + *dispatchAction = kDispatchNextTarget; + break; + case kTagAllocationPerController: + *dispatchAction = kDispatchStop; + break; + case kTagAllocationPerLun: + ; + default: + *dispatchAction = kDispatchNextLun; + } + break; + } + + getCommand( queue ); + + checkNegotiate( scsiCmd ); + + scsiCmd->timer = ( scsiCmd->timeout != 0 ) ? scsiCmd->timeout / kSCSITimerIntervalmS + 1 : 0; + + commandCount++; + target->commandCount++; + controller->commandCount++; + + addCommand( &activeList, scsiCmd ); + + controller->executeCommand( scsiCmd ); + + } while ( 0 ); + + return rc; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::rescheduleCommand( IOSCSIParallelCommand *scsiCmd ) +{ + if ( scsiCmd->list != &activeList ) + { + IOLog( "IOSCSIParallelController::rescheduleCommand() - Command not active. Cmd = %08x\n\r", (int)scsiCmd ); + return; + } + + deleteCommand( &activeList, scsiCmd ); + + switch ( scsiCmd->cmdType ) + { + case kSCSICommandExecute: + if ( scsiCmd->scsiCmd.cdbTagMsg != 0 ) + { + freeTag( scsiCmd->scsiCmd.cdbTag ); + scsiCmd->scsiCmd.cdbTag = (UInt32) -1; + } + + stackCommand( &deviceList, scsiCmd ); + + if ( scsiCmd->scsiCmd.cdbFlags & kCDBFlagsNoDisconnect ) + { + controller->commandLimit = controller->commandLimitSave; + controller->noDisconnectCmd = 0; + } + + controller->commandCount--; + target->commandCount--; + commandCount--; + break; + + case kSCSICommandReqSense: + reqSenseState = kStatePending; + target->reqSenseState = kStateIdle; + commandCount--; + controller->commandCount--; + break; + + case kSCSICommandAbortAll: + case kSCSICommandDeviceReset: + abortCmdPending = scsiCmd->cmdType; + + case kSCSICommandAbort: + abortState = kStateIdle; + break; + + default: + ; + } + + dispatchRequest(); + +} + +/* + * + * + * + */ +bool IOSCSIParallelDevice::setTargetParms( SCSITargetParms *targetParms ) +{ + IOSCSIParallelCommand *scsiCmd; + SCSICDBInfo scsiCDB; + bool fTagEnable; + bool rc = true; + + IOMemoryDescriptor *senseDesc; + UInt8 senseBuffer[14]; + + + if ( getWorkLoop()->inGate() == true ) + { + IOPanic( "IOSCSIParallelDevice:::setTargetParms() - must not be called from workloop!!\n\r"); + } + + IOWriteLock( target->clientSem ); + IOWriteLock( target->targetSem ); + + while ( target->negotiateState == kStateActive ) + { + IOSleep( 100 ); + } + + target->targetParmsNew = *targetParms; + + if ( targetParms->transferPeriodpS < controller->controllerInfo.minTransferPeriodpS ) + { + target->targetParmsNew.transferPeriodpS = controller->controllerInfo.minTransferPeriodpS; + } + + if ( target->targetParmsNew.transferPeriodpS == 0 + || target->targetParmsNew.transferOffset == 0 + || controller->controllerInfo.minTransferPeriodpS == 0 ) + { + target->targetParmsNew.transferPeriodpS = 0; + target->targetParmsNew.transferOffset = 0; + } + + target->commandLimit = 1; + + fTagEnable = (targetParms->enableTagQueuing == true) + && (controller->controllerInfo.tagAllocationMethod != kTagAllocationNone) + && (controller->controllerInfo.maxTags != 0); + + regObjCmdQueue->setValue( (UInt32)fTagEnable ); + + if ( fTagEnable == true ) + { + target->commandLimitSave = controller->controllerInfo.maxCommandsPerTarget; + } + else + { + target->commandLimitSave = 1; + target->targetParmsNew.enableTagQueuing = false; + } + + scsiCmd = allocCommand(kIOSCSIParallelDevice, 0); + + bzero( &scsiCDB, sizeof( SCSICDBInfo ) ); + + scsiCDB.cdbLength = 6; + scsiCDB.cdb[0] = kSCSICmdTestUnitReady; + scsiCDB.cdb[1] = targetLun.lun << 4; + scsiCmd->setCDB( &scsiCDB ); + + senseDesc = IOMemoryDescriptor::withAddress(senseBuffer, sizeof(senseBuffer), kIODirectionIn); + if ( senseDesc == 0 ) return false; + scsiCmd->setPointers( senseDesc, sizeof(senseBuffer), false, true ); + + target->negotiateState = kStateIssue; + + scsiCmd->execute(); + + IOWriteLock( target->targetSem ); + IORWUnlock( target->targetSem ); + + scsiCmd->release(); + senseDesc->release(); + + rc = (target->negotiationResult.returnCode == kIOReturnSuccess); + + IORWUnlock( target->clientSem ); + + return rc; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::getTargetParms( SCSITargetParms *targetParms ) +{ + *targetParms = target->targetParmsCurrent; +} + +/* + * + * + * + */ +bool IOSCSIParallelDevice::setLunParms( SCSILunParms *lunParms ) +{ + IOSCSIParallelCommand *scsiCmd; + SCSICDBInfo scsiCDB; + + IOMemoryDescriptor *senseDesc; + UInt8 senseBuffer[14]; + + if ( getWorkLoop()->inGate() == true ) + { + IOPanic( "IOSCSIParallelDevice:::setLunParms() - must not be called from workloop!!\n\r"); + } + + IOWriteLock( clientSem ); + + lunParmsNew = *lunParms; + commandLimitSave = commandLimit; + commandLimit = 1; + + scsiCmd = allocCommand(kIOSCSIParallelDevice, 0); + + bzero( &scsiCDB, sizeof( SCSICDBInfo ) ); + + scsiCDB.cdbLength = 6; + scsiCDB.cdb[0] = kSCSICmdTestUnitReady; + scsiCDB.cdb[1] = targetLun.lun << 4; + scsiCmd->setCDB( &scsiCDB ); + + senseDesc = IOMemoryDescriptor::withAddress(senseBuffer, sizeof(senseBuffer), kIODirectionIn); + if ( senseDesc == 0 ) return false; + scsiCmd->setPointers( senseDesc, sizeof(senseBuffer), false, true ); + + negotiateState = kStateIssue; + + scsiCmd->execute(); + + scsiCmd->release(); + senseDesc->release(); + + while ( negotiateState != kStateIdle ) + { + IOSleep( 100 ); + } + + IORWUnlock( clientSem ); + + return true; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::getLunParms( SCSILunParms *lunParms ) +{ + lunParms->disableDisconnect = disableDisconnect; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::checkNegotiate( IOSCSIParallelCommand *scsiCmd ) +{ + SCSITargetParms *tpCur, *tpNew; + + if ( target->negotiateState == kStateIssue ) + { + if ( target->commandCount == 0 ) + { + target->negotiateState = kStateActive; + + tpNew = &target->targetParmsNew; + tpCur = &target->targetParmsCurrent; + + target->negotiationResult.returnCode = kIOReturnError; + + if ((tpCur->transferPeriodpS != tpNew->transferPeriodpS) || + (tpCur->transferOffset != tpNew->transferOffset) || + ((tpCur->transferOptions ^ tpNew->transferOptions) & kSCSITransferOptionsSCSI3) ) + { + scsiCmd->scsiCmd.cdbFlags |= kCDBFlagsNegotiateSDTR; + + if (tpNew->transferOptions & kSCSITransferOptionPPR) { + scsiCmd->scsiCmd.cdbFlags |= kCDBFlagsNegotiatePPR; + } + } + + if ( tpCur->transferWidth != tpNew->transferWidth ) + { + scsiCmd->scsiCmd.cdbFlags |= kCDBFlagsNegotiateWDTR; + } + + if ( tpCur->enableTagQueuing != tpNew->enableTagQueuing ) + { + scsiCmd->scsiCmd.cdbFlags |= kCDBFlagsEnableTagQueuing; + } + + if ( (scsiCmd->scsiCmd.cdbFlags & + (kCDBFlagsNegotiateSDTR | + kCDBFlagsNegotiateWDTR | + kCDBFlagsNegotiatePPR | + kCDBFlagsEnableTagQueuing)) == 0 ) + { + IORWUnlock( target->targetSem ); + target->negotiateState = kStateIdle; + target->commandLimit = target->commandLimitSave; + } + + *tpCur = *tpNew; + } + } + + if ( negotiateState == kStateIssue ) + { + if ( commandCount == 0 ) + { + disableDisconnect = lunParmsNew.disableDisconnect; + negotiateState = kStateIdle; + } + } +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::negotiationComplete() +{ + SCSITargetParms *tpCur, *tpNew; + + tpNew = &target->targetParmsNew; + tpCur = &target->targetParmsCurrent; + + if ( target->negotiationResult.returnCode == kIOReturnSuccess ) + { + tpCur->transferPeriodpS = tpNew->transferPeriodpS = target->negotiationResult.transferPeriodpS; + tpCur->transferOffset = tpNew->transferOffset = target->negotiationResult.transferOffset; + tpCur->transferWidth = tpNew->transferWidth = target->negotiationResult.transferWidth; + tpCur->transferOptions = tpNew->transferOptions = target->negotiationResult.transferOptions; + + target->commandLimit = target->commandLimitSave; + } + else + { + tpNew->transferPeriodpS = 0; + tpNew->transferOffset = 0; + tpNew->transferWidth = 1; + } + + target->regObjTransferPeriod->setValue( tpNew->transferPeriodpS ); + target->regObjTransferOffset->setValue( tpNew->transferOffset ); + target->regObjTransferWidth->setValue( tpNew->transferWidth ); + target->regObjTransferOptions->setValue( tpNew->transferOptions ); + + target->negotiateState = kStateIdle; +} + +/* + * + * + * + */ +bool IOSCSIParallelDevice::checkTag( IOSCSIParallelCommand *scsiCmd ) +{ + SCSICDBInfo scsiCDB; + bool rc = true; + + scsiCmd->getCDB( &scsiCDB ); + + scsiCDB.cdbTagMsg = 0; + scsiCDB.cdbTag = (UInt32)-1; + + do + { + if ( scsiCmd->device->target->targetParmsCurrent.enableTagQueuing == false ) + { + break; + } + + if ( allocTag( &scsiCDB.cdbTag ) == false ) + { + rc = false; + break; + } + + if ( scsiCDB.cdbTagMsg == 0 ) + { + scsiCDB.cdbTagMsg = kSCSIMsgSimpleQueueTag; + } + } + while ( 0 ); + + scsiCmd->setCDB( &scsiCDB ); + + return rc; +} + +/* + * + * + * + */ +bool IOSCSIParallelDevice::allocTag( UInt32 *tagId ) +{ + UInt32 i; + UInt32 tagIndex; + UInt32 tagMask; + UInt32 *tags = 0; + + switch ( controller->controllerInfo.tagAllocationMethod ) + { + case kTagAllocationPerLun: + tags = tagArray; + break; + case kTagAllocationPerTarget: + tags = target->tagArray; + break; + case kTagAllocationPerController: + tags = controller->tagArray; + break; + default: + ; + } + + if ( tags == 0 ) return false; + + for ( i = 0; i < controller->controllerInfo.maxTags; i++ ) + { + tagIndex = i / 32; + tagMask = 1 << (i % 32); + if ( !(tags[tagIndex] & tagMask) ) + { + tags[tagIndex] |= tagMask; + *tagId = i; + return true; + } + } + return false; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::freeTag( UInt32 tagId ) +{ + UInt32 *tags = 0; + + switch ( controller->controllerInfo.tagAllocationMethod ) + { + case kTagAllocationPerLun: + tags = tagArray; + break; + case kTagAllocationPerTarget: + tags = target->tagArray; + break; + case kTagAllocationPerController: + tags = controller->tagArray; + break; + default: + ; + } + + if ( tags == 0 ) return; + + tags[tagId/32] &= ~(1 << (tagId % 32)); +} + +/* + * + * + * + */ +IOSCSIParallelCommand *IOSCSIParallelDevice::findCommandWithNexus( UInt32 tagValue ) +{ + IOSCSIParallelCommand *scsiCmd; + + queue_iterate( &activeList, scsiCmd, IOSCSIParallelCommand *, nextCommand ) + { + switch ( scsiCmd->cmdType ) + { + case kSCSICommandExecute: + case kSCSICommandReqSense: + if ( scsiCmd->scsiCmd.cdbTag == tagValue ) + { + return scsiCmd; + } + break; + default: + ; + } + } + + queue_iterate( &abortList, scsiCmd, IOSCSIParallelCommand *, nextCommand ) + { + switch ( scsiCmd->cmdType ) + { + case kSCSICommandExecute: + case kSCSICommandReqSense: + if ( scsiCmd->scsiCmd.cdbTag == tagValue ) + { + return scsiCmd; + } + break; + default: + ; + } + } + + return 0; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::timer() +{ + IOSCSIParallelCommand *scsiCmd, *tmp = 0; + SCSITargetLun scsiTargetLun; + + queue_iterate( &activeList, scsiCmd, IOSCSIParallelCommand *, nextCommand ) + { + tmp = (IOSCSIParallelCommand *)queue_prev( &scsiCmd->nextCommand ); + + if ( scsiCmd->timer ) + { + if ( !--scsiCmd->timer ) + { + scsiCmd->getTargetLun( &scsiTargetLun ); + IOLog("Timeout: T/L = %d:%d Cmd = %08x Cmd Type = %d\n\r", + scsiTargetLun.target, scsiTargetLun.lun, (int)scsiCmd, scsiCmd->cmdType ); + + switch ( scsiCmd->cmdType ) + { + case kSCSICommandExecute: + moveCommand( &activeList, &abortList, scsiCmd, kIOReturnTimeout ); + scsiCmd = tmp; + break; + + case kSCSICommandReqSense: + reqSenseState = kStateIdle; + moveCommand( &activeList, &abortList, scsiCmd, kIOReturnTimeout ); + scsiCmd = tmp; + break; + + case kSCSICommandAbort: + case kSCSICommandAbortAll: + case kSCSICommandDeviceReset: + controller->busResetState = kStateIssue; + break; + + default: + ; + } + + dispatchRequest(); + } + } + + if ( queue_end( &activeList, (queue_head_t *)scsiCmd ) == true ) + { + break; + } + } +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::dispatchRequest() +{ + target->state = kStateActive; + controller->dispatchRequest(); +} + +/* + * + * + * + */ +bool IOSCSIParallelDevice::dispatch( UInt32 *dispatchAction ) +{ + bool rc; + + checkCancelQueue(); + + if ( controller->checkBusReset() == true ) + { + *dispatchAction = kDispatchStop; + return true; + } + + if ( (rc = controller->commandDisable) == true ) + { + *dispatchAction = kDispatchNextTarget; + return true; + } + + if ( checkAbortQueue() == true ) + { + *dispatchAction = kDispatchNextTarget; + return true; + } + + do + { + if ( (rc = controller->commandDisable) == true ) + { + *dispatchAction = kDispatchStop; + break; + } + + if ( (rc = checkReqSense()) == true ) + { + *dispatchAction = kDispatchNextTarget; + break; + } + + rc = checkDeviceQueue( dispatchAction ); + + } while ( *dispatchAction == kDispatchNextCommand ); + + return rc; +} + + +/* + * + * + * + */ +void IOSCSIParallelDevice::completeCommand( IOSCSIParallelCommand *scsiCmd ) +{ + SCSICommandType cmdType; + + cmdType = scsiCmd->cmdType; + switch ( cmdType ) + { + case kSCSICommandExecute: + executeCommandDone( scsiCmd ); + break; + + case kSCSICommandReqSense: + executeReqSenseDone( scsiCmd ); + break; + + case kSCSICommandAbort: + case kSCSICommandAbortAll: + case kSCSICommandDeviceReset: + abortCommandDone( scsiCmd ); + break; + + case kSCSICommandCancel: + cancelCommandDone( scsiCmd ); + break; + + default: + ; + } + + checkIdleNotify(); + + dispatchRequest(); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::checkIdleNotify() +{ + if ( idleNotifyActive == false ) + { + return; + } + + if ( (queue_empty( &activeList ) == true) + && (queue_empty( &abortList ) == true) + && (queue_empty( &cancelList ) == true) + && (target->reqSenseCount == 0) ) + { + idleNotifyActive = false; + (idleNotifyCallback)( idleNotifyTarget, idleNotifyRefcon ); + } +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::flushQueue( UInt32 queueType, IOReturn rc ) +{ + queue_head_t *queue; + + queue = (queueType == kQTypeBypassQ) ? &bypassList : &deviceList; + purgeAllCommands( queue, rc ); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::executeCommandDone( IOSCSIParallelCommand *scsiCmd ) +{ + deleteCommand( scsiCmd->list, scsiCmd ); + + commandCount--; + controller->commandCount--; + target->commandCount--; + + if ( scsiCmd->scsiCmd.cdbTagMsg != 0 ) + { + freeTag( scsiCmd->scsiCmd.cdbTag ); + scsiCmd->scsiCmd.cdbTag = (UInt32) -1; + } + + if ( scsiCmd->scsiCmd.cdbFlags & (kCDBFlagsNegotiateSDTR | + kCDBFlagsNegotiateWDTR | + kCDBFlagsNegotiatePPR | + kCDBFlagsEnableTagQueuing) ) + { + if ( scsiCmd->scsiCmd.cdbFlags & (kCDBFlagsNegotiateSDTR | + kCDBFlagsNegotiateWDTR | + kCDBFlagsNegotiatePPR) ) + { + negotiationComplete(); + } + else + { + target->negotiationResult.returnCode = kIOReturnSuccess; + } + + IORWUnlock( target->targetSem ); + } + + if ( scsiCmd->scsiCmd.cdbFlags & kCDBFlagsNoDisconnect ) + { + controller->commandLimit = controller->commandLimitSave; + controller->noDisconnectCmd = 0; + } + + if ( scsiCmd->results.scsiStatus == kSCSIStatusCheckCondition + && scsiCmd->results.requestSenseDone == false + && scsiCmd->senseData != 0 ) + { + reqSenseOrigCmd = scsiCmd; + reqSenseState = kStateIssue; + target->reqSenseCount++; + return; + } + + if ( scsiCmd->results.scsiStatus == kSCSIStatusQueueFull ) + { + if ( commandCount > 4 ) + { +// IOLog( "IOSCSI: Q-full - commandCount = %d commandLimit = %d\n\r", commandCount, commandLimit ); + commandLimit = commandCount; + } + + stackCommand( &deviceList, scsiCmd ); + return; + } + + finishCommand( scsiCmd ); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::executeReqSenseDone( IOSCSIParallelCommand *scsiCmd ) +{ + IOSCSIParallelCommand *origCommand; + + deleteCommand( scsiCmd->list, scsiCmd ); + + target->reqSenseState = reqSenseState = kStateIdle; + target->reqSenseCount--; + + commandCount--; + controller->commandCount--; + + reqSenseOrigCmd = 0; + + origCommand = scsiCmd->origCommand; + + if ( (scsiCmd->results.returnCode == kIOReturnSuccess) || (scsiCmd->results.returnCode == kIOReturnUnderrun) ) + { + origCommand->results.requestSenseDone = true; + origCommand->results.requestSenseLength = scsiCmd->results.bytesTransferred; + } + else + { + origCommand->results.requestSenseDone = false; + origCommand->results.requestSenseLength = 0; + } + + finishCommand( scsiCmd->origCommand ); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::abortCommandDone( IOSCSIParallelCommand *scsiCmd ) +{ + IOSCSIParallelCommand *origSCSICmd; + IOSCSIParallelDevice *abortDev; + + deleteCommand( scsiCmd->list, scsiCmd ); + + abortState = kStateIdle; + + if ( scsiCmd->cmdType == kSCSICommandAbortAll ) + { + moveAllCommands( &activeList, &cancelList, kIOReturnAborted ); + moveAllCommands( &abortList, &cancelList, kIOReturnAborted ); + + if ( client != 0 ) + { + client->message( kSCSIClientMsgDeviceAbort | kSCSIClientMsgDone, this ); + } + } + if ( scsiCmd->cmdType == kSCSICommandDeviceReset ) + { + target->commandLimit = target->commandLimitSave; + target->reqSenseCount = 0; + target->reqSenseState = kStateIdle; + target->negotiateState = kStateIssue; + + target->targetParmsCurrent.transferPeriodpS = 0; + target->targetParmsCurrent.transferOffset = 0; + target->targetParmsCurrent.transferWidth = 1; + + queue_iterate( &target->deviceList, abortDev, IOSCSIParallelDevice *, nextDevice ) + { + abortDev->resetOccurred( (SCSIClientMessage)(kSCSIClientMsgDeviceReset | kSCSIClientMsgDone) ); + } + } + else if ( scsiCmd->cmdType == kSCSICommandAbort ) + { + origSCSICmd = scsiCmd->origCommand; + + if ( findCommand( &abortList, origSCSICmd ) == true ) + { + moveCommand( &abortList, &cancelList, origSCSICmd, kIOReturnAborted ); + } + } + + return; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::cancelCommandDone( IOSCSIParallelCommand *scsiCmd ) +{ + IOSCSIParallelCommand *origSCSICmd; + + cancelState = kStateIdle; + + origSCSICmd = scsiCmd->origCommand; + + if ( findCommand( &cancelList, origSCSICmd ) == true ) + { + IOLog( "IOSCSIParallelDevice::cancelCommandDone - Cancelled command not completed - scsiCmd = %08x\n\r", (int)origSCSICmd ); + deleteCommand( &cancelList, origSCSICmd ); + } +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::finishCommand( IOSCSIParallelCommand *scsiCmd ) +{ + if ( scsiCmd->completionInfo.async.callback ) + { + (*scsiCmd->completionInfo.async.callback)( scsiCmd->completionInfo.async.target, + scsiCmd->completionInfo.async.refcon ); + } + else + { + scsiCmd->completionInfo.sync.lock->signal(); + } +} + + +/* + * + * + */ +OSDictionary *IOSCSIParallelDevice::createProperties() +{ + OSDictionary *propTable = 0; + OSObject *regObj; + char tmpbuf[81]; + char *d; + + propTable = OSDictionary::withCapacity(kSCSIMaxProperties); + if ( propTable == NULL ) + { + return NULL; + } + + regObj = (OSObject *)OSNumber::withNumber(targetLun.target,32); + if ( addToRegistry( propTable, regObj, kSCSIPropertyTarget ) != true ) + { + goto createprop_error; + } + + regObj = (OSObject *)OSNumber::withNumber(targetLun.target,32); + if ( addToRegistry( propTable, regObj, kSCSIPropertyIOUnit ) != true ) + { + goto createprop_error; + } + + regObj = (OSObject *)OSNumber::withNumber(targetLun.lun,32); + if ( addToRegistry( propTable, regObj, kSCSIPropertyLun ) != true ) + { + goto createprop_error; + } + + d= tmpbuf; + + stripBlanks( d, (char *)inquiryData->vendorName, sizeof(inquiryData->vendorName) ); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kSCSIPropertyVendorName ) != true ) + { + goto createprop_error; + } + + stripBlanks( d, (char *)inquiryData->productName, sizeof(inquiryData->productName) ); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kSCSIPropertyProductName ) != true ) + { + goto createprop_error; + } + + stripBlanks( d, (char *)inquiryData->productRevision, sizeof(inquiryData->productRevision) ); + regObj = (OSObject *)OSString::withCString( d ); + if ( addToRegistry( propTable, regObj, kSCSIPropertyProductRevision ) != true ) + { + goto createprop_error; + } + + regObj = (OSObject *)OSBoolean::withBoolean( (inquiryData->devTypeMod & kSCSIDevTypeModRemovable) != 0 ); + if ( addToRegistry( propTable, regObj, kSCSIPropertyRemovableMedia ) != true ) + { + goto createprop_error; + } + + regObj = (OSObject *)OSNumber::withNumber( inquiryData->devType & kSCSIDevTypeMask, 32 ); + if ( addToRegistry( propTable, regObj, kSCSIPropertyDeviceTypeID ) != true ) + { + goto createprop_error; + } + + regObj = (OSObject *)target->regObjTransferPeriod; + if ( addToRegistry( propTable, regObj, kSCSIPropertyTransferPeriod, false ) != true ) + { + goto createprop_error; + } + regObjTransferPeriod = (OSNumber *)regObj; + + regObj = (OSObject *)target->regObjTransferOffset; + if ( addToRegistry( propTable, regObj, kSCSIPropertyTransferOffset, false ) != true ) + { + goto createprop_error; + } + regObjTransferOffset = (OSNumber *)regObj; + + + regObj = (OSObject *)target->regObjTransferWidth; + if ( addToRegistry( propTable, regObj, kSCSIPropertyTransferWidth, false ) != true ) + { + goto createprop_error; + } + regObjTransferWidth = (OSNumber *)regObj; + + regObj = (OSObject *)target->regObjTransferOptions; + if ( addToRegistry( propTable, regObj, kSCSIPropertyTransferOptions, false ) != true ) + { + goto createprop_error; + } + regObjTransferOptions = (OSNumber *)regObj; + + regObj = (OSObject *)target->regObjCmdQueue; + if ( addToRegistry( propTable, regObj, kSCSIPropertyCmdQueue, false ) != true ) + { + goto createprop_error; + } + regObjCmdQueue = (OSNumber *)regObj; + + return propTable; + +createprop_error: ; + propTable->release(); + return NULL; +} + + +/* + * + * + */ +bool IOSCSIParallelDevice::addToRegistry( OSDictionary *propTable, OSObject *regObj, char *key, + bool doRelease = true ) +{ + bool rc; + + if ( regObj == NULL ) + { + return false; + } + + rc = propTable->setObject( key, regObj ); + + if ( doRelease ) + { + // If 'doRelease' is true, then a reference count is consumed. + regObj->release(); + } + + return rc; +} + + +/* + * + * + * + */ +bool IOSCSIParallelDevice::matchPropertyTable(OSDictionary * table) +{ + bool match; + + match = compareProperty( table, kSCSIPropertyIOUnit ) && + compareProperty( table, kSCSIPropertyDeviceTypeID ) && + compareProperty( table, kSCSIPropertyRemovableMedia ) && + compareProperty( table, kSCSIPropertyVendorName ) && + compareProperty( table, kSCSIPropertyProductName ) && + compareProperty( table, kSCSIPropertyProductRevision ); + + if ( match == true ) + { + match = super::matchPropertyTable(table); + } + + return match; +} + + +/* + * + * + * + */ +IOService *IOSCSIParallelDevice::matchLocation(IOService * client) +{ + return this; +} + + +/* + * + * + * + */ +void IOSCSIParallelDevice::stripBlanks( char *d, char *s, UInt32 l ) +{ + char *p, c; + + for ( p = d, c = *s; l && c ; l--) + { + c = (*d++ = *s++); + if ( c != ' ' ) + { + p = d; + } + } + *p = 0; +} + +/* + * + * + * + */ +IOSCSICommand *IOSCSIParallelDevice::allocCommand( IOSCSIDevice *, UInt32 clientDataSize ) +{ + + return (IOSCSICommand *) allocCommand( kIOSCSIParallelDevice, clientDataSize ); +} + +IOSCSIParallelCommand *IOSCSIParallelDevice::allocCommand( IOSCSIParallelDevice *, UInt32 clientDataSize ) +{ + IOSCSIParallelCommand *cmd; + + if ( (cmd = controller->allocCommand( clientDataSize )) ) + { + cmd->device = this; + } + return cmd; +} + +IOCDBCommand *IOSCSIParallelDevice::allocCommand( IOCDBDevice *, UInt32 clientDataSize ) +{ + return (IOCDBCommand *) allocCommand( kIOSCSIDevice, clientDataSize ); +} + + +/* + * + * + */ +IOWorkLoop *IOSCSIParallelDevice::getWorkLoop() const +{ + return controller->workLoop; +} + + +/* + * + * + * + */ +bool IOSCSIParallelDevice::open( IOService *forClient, IOOptionBits options, void *arg ) +{ + if ( client != 0 ) return false; + + client = forClient; + + return super::open( forClient, options, arg ); +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::close( IOService *forClient, IOOptionBits options ) +{ + client = 0; + + return super::close( forClient, options ); +} + +/* + * + * + * + */ +IOReturn IOSCSIParallelDevice::message( UInt32 forMsg, IOService *forProvider, void *forArg ) +{ + IOReturn rc = kIOReturnSuccess; + SCSIClientMessage clientMsg; + + clientMsg = (SCSIClientMessage) forMsg; + +// IOLog( "IOSCSIParallelDevice::message() - clientMsg = %08x\n\r", clientMsg ); + + switch( clientMsg ) + { + case kSCSIClientMsgBusReset: + holdQueue( kQTypeNormalQ ); + break; + case kSCSIClientMsgBusReset | kSCSIClientMsgDone: + releaseQueue( kQTypeNormalQ ); + break; + default: + rc = super::message( clientMsg, forProvider, forArg ); + } + + return rc; +} + +/* + * + * + * + */ +void IOSCSIParallelDevice::free() +{ + if ( deviceGate != 0 ) + { + controller->workLoop->removeEventSource( deviceGate ); + deviceGate->release(); + } + + if ( reqSenseCmd != 0 ) reqSenseCmd->release(); + if ( abortCmd != 0 ) abortCmd->release(); + if ( cancelCmd != 0 ) cancelCmd->release(); + if ( probeCmd != 0 ) probeCmd->release(); + + if ( tagArray != 0 ) IOFree( tagArray, controller->tagArraySize ); + if ( inquiryData != 0 ) IOFree( inquiryData, inquiryDataSize ); + if ( devicePrivateData != 0 ) IOFreeContiguous( devicePrivateData, controller->controllerInfo.lunPrivateDataSize ); + if ( clientSem != 0 ) IORWLockFree( clientSem ); + + super::free(); +} + + diff --git a/iokit/Families/IOSCSIParallel/queueHelpers.cpp b/iokit/Families/IOSCSIParallel/queueHelpers.cpp new file mode 100644 index 000000000..569418feb --- /dev/null +++ b/iokit/Families/IOSCSIParallel/queueHelpers.cpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * queueHelpers.cpp + * + */ +#include + +void IOSCSIParallelDevice::addCommand( queue_head_t *list, IOSCSIParallelCommand *scsiCmd ) +{ + scsiCmd->list = list; + + queue_enter( list, scsiCmd, IOSCSIParallelCommand *, nextCommand ); +} + +void IOSCSIParallelDevice::deleteCommand( queue_head_t *list, IOSCSIParallelCommand *scsiCmd, IOReturn rc = kIOReturnSuccess ) +{ + scsiCmd->list = 0; + + if ( rc != kIOReturnSuccess ) + { + if ( scsiCmd->results.returnCode == kIOReturnSuccess ) + { + scsiCmd->results.returnCode = (IOReturn) rc; + } + } + + queue_remove( list, scsiCmd, IOSCSIParallelCommand *, nextCommand ); +} + +IOSCSIParallelCommand *IOSCSIParallelDevice::checkCommand( queue_head_t *list ) +{ + if ( queue_empty( list ) == true ) + { + return 0; + } + + return (IOSCSIParallelCommand *)queue_first( list ); +} + + +IOSCSIParallelCommand *IOSCSIParallelDevice::getCommand( queue_head_t *list ) +{ + IOSCSIParallelCommand *scsiCmd = 0; + + if ( queue_empty( list ) == false ) + { + queue_remove_first( list, scsiCmd, IOSCSIParallelCommand *, nextCommand ); + scsiCmd->list = 0; + } + + return scsiCmd; +} + +void IOSCSIParallelDevice::stackCommand( queue_head_t *list, IOSCSIParallelCommand *scsiCmd ) +{ + scsiCmd->list = list; + + queue_enter_first( list, scsiCmd, IOSCSIParallelCommand *, nextCommand ); +} + +void IOSCSIParallelDevice::moveCommand( queue_head_t *fromList, queue_head_t *toList, IOSCSIParallelCommand *scsiCmd, IOReturn rc = kIOReturnSuccess ) +{ + if ( rc != kIOReturnSuccess ) + { + if ( scsiCmd->results.returnCode == kIOReturnSuccess ) + { + scsiCmd->results.returnCode = (IOReturn) rc; + } + } + + scsiCmd->list = toList; + + queue_remove( fromList, scsiCmd, IOSCSIParallelCommand *, nextCommand ); + queue_enter( toList, scsiCmd, IOSCSIParallelCommand *, nextCommand ); +} + +void IOSCSIParallelDevice::moveAllCommands( queue_head_t *fromList, queue_head_t *toList, IOReturn rc = kIOReturnSuccess ) +{ + IOSCSIParallelCommand *scsiCmd; + + if ( queue_empty( fromList ) == true ) return; + + do + { + scsiCmd = (IOSCSIParallelCommand *)queue_first( fromList ); + + if ( rc != kIOReturnSuccess ) + { + if ( scsiCmd->results.returnCode == kIOReturnSuccess ) + { + scsiCmd->results.returnCode = (IOReturn) rc; + } + } + + scsiCmd->list = toList; + + queue_remove( fromList, scsiCmd, IOSCSIParallelCommand *, nextCommand ); + queue_enter( toList, scsiCmd, IOSCSIParallelCommand *, nextCommand ); + + } while( queue_empty( fromList ) == false ); +} + +bool IOSCSIParallelDevice::findCommand( queue_head_t *list, IOSCSIParallelCommand *findSCSICmd ) +{ + IOSCSIParallelCommand *scsiCmd; + + queue_iterate( list, scsiCmd, IOSCSIParallelCommand *, nextCommand ) + { + if ( scsiCmd == findSCSICmd ) + { + return true; + } + } + return false; +} + +void IOSCSIParallelDevice::purgeAllCommands( queue_head_t *list, IOReturn rc ) +{ + IOSCSIParallelCommand *scsiCmd; + + if ( queue_empty( list ) == true ) return; + + do + { + scsiCmd = (IOSCSIParallelCommand *)queue_first( list ); + + deleteCommand( list, scsiCmd, rc ); + finishCommand( scsiCmd ); + + } while( queue_empty( list ) == false ); +} diff --git a/iokit/Families/IOStorage/IOApplePartitionScheme.cpp b/iokit/Families/IOStorage/IOApplePartitionScheme.cpp new file mode 100644 index 000000000..cf6a91f22 --- /dev/null +++ b/iokit/Families/IOStorage/IOApplePartitionScheme.cpp @@ -0,0 +1,747 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +#define super IOPartitionScheme +OSDefineMetaClassAndStructors(IOApplePartitionScheme, IOPartitionScheme); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Notes +// +// o the on-disk structure's fields are: 16-bit packed, big-endian formatted +// o the dpme_pblock_start and dpme_pblocks block values are: +// o for media without a driver map: +// o natural block size based +// o for media with a driver map: +// o driver map block size based, unless the driver map block size is 2048 +// and a valid partition entry exists at a 512 byte offset into the disk, +// in which case, assume a 512 byte block size, except for the partition +// entries that lie on a 2048 byte multiple and are one of the following +// types: Apple_Patches, Apple_Driver, Apple_Driver43, Apple_Driver43_CD, +// Apple_Driver_ATA, Apple_Driver_ATAPI; in which case, we assume a 2048 +// byte block size (for the one partition) +// o the dpme_pblock_start block value is relative to the media container +// + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define kIOApplePartitionSchemeContentTable "Content Table" + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOApplePartitionScheme::init(OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + // State our assumptions. + + assert(sizeof(dpme) == 512); // (compiler/platform check) + assert(sizeof(DDMap) == 8); // (compiler/platform check) + assert(sizeof(Block0) == 512); // (compiler/platform check) + + // Ask our superclass' opinion. + + if (super::init(properties) == false) return false; + + // Initialize our state. + + _partitions = 0; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOApplePartitionScheme::free() +{ + // + // Free all of this object's outstanding resources. + // + + if ( _partitions ) _partitions->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOService * IOApplePartitionScheme::probe(IOService * provider, SInt32 * score) +{ + // + // Determine whether the provider media contains an Apple partition map. + // + + // State our assumptions. + + assert(OSDynamicCast(IOMedia, provider)); + + // Ask superclass' opinion. + + if (super::probe(provider, score) == 0) return 0; + + // Scan the provider media for an Apple partition map. + + _partitions = scan(score); + + return ( _partitions ) ? this : 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOApplePartitionScheme::start(IOService * provider) +{ + // + // Publish the new media objects which represent our partitions. + // + + IOMedia * partition; + OSIterator * partitionIterator; + + // State our assumptions. + + assert(_partitions); + + // Ask our superclass' opinion. + + if ( super::start(provider) == false ) return false; + + // Attach and register the new media objects representing our partitions. + + partitionIterator = OSCollectionIterator::withCollection(_partitions); + if ( partitionIterator == 0 ) return false; + + while ( (partition = (IOMedia *) partitionIterator->getNextObject()) ) + { + if ( partition->attach(this) ) + { + attachMediaObjectToDeviceTree(partition); + + partition->registerService(); + } + } + + partitionIterator->release(); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOApplePartitionScheme::stop(IOService * provider) +{ + // + // Clean up after the media objects we published before terminating. + // + + IOMedia * partition; + OSIterator * partitionIterator; + + // State our assumptions. + + assert(_partitions); + + // Detach the media objects we previously attached to the device tree. + + partitionIterator = OSCollectionIterator::withCollection(_partitions); + + if ( partitionIterator ) + { + while ( (partition = (IOMedia *) partitionIterator->getNextObject()) ) + { + detachMediaObjectFromDeviceTree(partition); + } + + partitionIterator->release(); + } + + super::stop(provider); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSSet * IOApplePartitionScheme::scan(SInt32 * score) +{ + // + // Scan the provider media for an Apple partition map. Returns the set + // of media objects representing each of the partitions (the retain for + // the set is passed to the caller), or null should no partition map be + // found. The default probe score can be adjusted up or down, based on + // the confidence of the scan. + // + + IOBufferMemoryDescriptor * buffer = 0; + UInt32 bufferReadAt = 0; + UInt32 bufferSize = 0; + UInt32 dpmeBlockSize = 0; + UInt32 dpmeCount = 0; + UInt32 dpmeID = 0; + dpme * dpmeMap = 0; + UInt32 dpmeMaxCount = 0; + bool dpmeOldSchool = false; + Block0 * driverMap = 0; + IOMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + bool mediaIsOpen = false; + OSSet * partitions = 0; + IOReturn status = kIOReturnError; + + // Determine whether this media is formatted. + + if ( media->isFormatted() == false ) goto scanErr; + + // Determine whether this media has an appropriate block size. + + if ( (mediaBlockSize % sizeof(dpme)) ) goto scanErr; + + // Allocate a buffer large enough to hold one map, rounded to a media block. + + bufferSize = IORound(max(sizeof(Block0), sizeof(dpme)), mediaBlockSize); + buffer = IOBufferMemoryDescriptor::withCapacity( + /* capacity */ bufferSize, + /* withDirection */ kIODirectionIn ); + if ( buffer == 0 ) goto scanErr; + + // Allocate a set to hold the set of media objects representing partitions. + + partitions = OSSet::withCapacity(8); + if ( partitions == 0 ) goto scanErr; + + // Open the media with read access. + + mediaIsOpen = media->open(this, 0, kIOStorageAccessReader); + if ( mediaIsOpen == false ) goto scanErr; + + // Read the driver map into our buffer. + + bufferReadAt = 0; + +///m:2333367:workaround:commented:start +// status = media->read(this, bufferReadAt, buffer); +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = media->IOStorage::read(this, bufferReadAt, buffer); +///m:2333367:workaround:added:stop + if ( status != kIOReturnSuccess ) goto scanErr; + + driverMap = (Block0 *) buffer->getBytesNoCopy(); + + // Determine the official block size to use to scan the partition entries. + + dpmeBlockSize = mediaBlockSize; // (natural block size) + + if ( driverMap->sbSig == BLOCK0_SIGNATURE ) + { + dpmeBlockSize = driverMap->sbBlkSize; // (driver map block size) + + // Determine whether we have an old school partition map, where there is + // a partition entry at a 512 byte offset into the disk, even though the + // driver map block size is 2048. + + if ( dpmeBlockSize == 2048 ) + { + if ( bufferSize >= sizeof(Block0) + sizeof(dpme) ) // (in buffer?) + { + dpmeMap = (dpme *) (driverMap + 1); + } + else // (not in buffer) + { + // Read the partition entry at byte offset 512 into our buffer. + + bufferReadAt = sizeof(dpme); + +///m:2333367:workaround:commented:start +// status = media->read(this, bufferReadAt, buffer); +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = media->IOStorage::read(this, bufferReadAt, buffer); +///m:2333367:workaround:added:stop + if ( status != kIOReturnSuccess ) goto scanErr; + + dpmeMap = (dpme *) buffer->getBytesNoCopy(); + } + + // Determine whether the partition entry signature is present. + + if (OSSwapBigToHostInt16(dpmeMap->dpme_signature) == DPME_SIGNATURE) + { + dpmeBlockSize = sizeof(dpme); // (old school block size) + dpmeOldSchool = true; + } + } + + // Increase the probe score when a driver map is detected, since we are + // more confident in the match when it is present. This will eliminate + // conflicts with FDisk when it shares the same block as the driver map. + + *score += 2000; + } + + // Scan the media for Apple partition entries. + + for ( dpmeID = 1, dpmeCount = 1; dpmeID <= dpmeCount; dpmeID++ ) + { + UInt32 partitionBlockSize = dpmeBlockSize; + + // Determine whether we've exhausted the current buffer of entries. + + if ( dpmeID * dpmeBlockSize + sizeof(dpme) > bufferReadAt + bufferSize ) + { + // Read the next partition entry into our buffer. + + bufferReadAt = dpmeID * dpmeBlockSize; + +///m:2333367:workaround:commented:start +// status = media->read(this, bufferReadAt, buffer); +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = media->IOStorage::read(this, bufferReadAt, buffer); +///m:2333367:workaround:added:stop + if ( status != kIOReturnSuccess ) goto scanErr; + } + + dpmeMap = (dpme *) ( ((UInt8 *) buffer->getBytesNoCopy()) + + (dpmeID * dpmeBlockSize) - bufferReadAt ); + + // Determine whether the partition entry signature is present. + + if ( OSSwapBigToHostInt16(dpmeMap->dpme_signature) != DPME_SIGNATURE ) + { + goto scanErr; + } + + // Obtain an accurate number of entries in the partition map. + + if ( !strcmp(dpmeMap->dpme_type, "Apple_partition_map") || + !strcmp(dpmeMap->dpme_type, "Apple_Partition_Map") || + !strcmp(dpmeMap->dpme_type, "Apple_patition_map" ) ) + { + dpmeCount = OSSwapBigToHostInt32(dpmeMap->dpme_map_entries); + dpmeMaxCount = OSSwapBigToHostInt32(dpmeMap->dpme_pblocks); + } + else if ( dpmeCount == 1 ) + { + dpmeCount = OSSwapBigToHostInt32(dpmeMap->dpme_map_entries); + } + + // Obtain an accurate block size for an old school partition map. + + if ( dpmeOldSchool && (dpmeID % 4) == 0 ) + { + if ( !strcmp(dpmeMap->dpme_type, "Apple_Driver" ) || + !strcmp(dpmeMap->dpme_type, "Apple_Driver43" ) || + !strcmp(dpmeMap->dpme_type, "Apple_Driver43_CD" ) || + !strcmp(dpmeMap->dpme_type, "Apple_Driver_ATA" ) || + !strcmp(dpmeMap->dpme_type, "Apple_Driver_ATAPI") || + !strcmp(dpmeMap->dpme_type, "Apple_Patches" ) ) + { + partitionBlockSize = 2048; + } + } + + // Determine whether the partition is corrupt (fatal). + + if ( isPartitionCorrupt( + /* partition */ dpmeMap, + /* partitionID */ dpmeID, + /* partitionBlockSize */ partitionBlockSize ) ) + { + goto scanErr; + } + + // Determine whether the partition is invalid (skipped). + + if ( isPartitionInvalid( + /* partition */ dpmeMap, + /* partitionID */ dpmeID, + /* partitionBlockSize */ partitionBlockSize ) ) + { + continue; + } + + // Create a media object to represent this partition. + + IOMedia * newMedia = instantiateMediaObject( + /* partition */ dpmeMap, + /* partitionID */ dpmeID, + /* partitionBlockSize */ partitionBlockSize ); + + if ( newMedia ) + { + partitions->setObject(newMedia); + newMedia->release(); + } + } + + // Determine whether we ever came accross an Apple_partition_map partition. + + if ( dpmeMaxCount == 0 ) goto scanErr; + + // Release our resources. + + media->close(this); + buffer->release(); + + return partitions; + +scanErr: + + // Release our resources. + + if ( mediaIsOpen ) media->close(this); + if ( partitions ) partitions->release(); + if ( buffer ) buffer->release(); + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOApplePartitionScheme::isPartitionCorrupt( + dpme * /* partition */ , + UInt32 /* partitionID */ , + UInt32 /* partitionBlockSize */ ) +{ + // + // Ask whether the given partition appears to be corrupt. A partition that + // is corrupt will cause the failure of the Apple partition map recognition + // altogether. + // + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOApplePartitionScheme::isPartitionInvalid( dpme * partition, + UInt32 partitionID, + UInt32 partitionBlockSize ) +{ + // + // Ask whether the given partition appears to be invalid. A partition that + // is invalid will cause it to be skipped in the scan, but will not cause a + // failure of the Apple partition map recognition. + // + + IOMedia * media = getProvider(); + UInt64 partitionBase = 0; + UInt64 partitionSize = 0; + + // Compute the relative byte position and size of the new partition. + + partitionBase = OSSwapBigToHostInt32(partition->dpme_pblock_start); + partitionSize = OSSwapBigToHostInt32(partition->dpme_pblocks); + partitionBase *= partitionBlockSize; + partitionSize *= partitionBlockSize; + + // Determine whether the partition is a placeholder. + + if ( partitionSize == 0 ) return true; + + // Determine whether the partition starts at (or past) the end-of-media. + + if ( partitionBase >= media->getSize() ) return true; + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOApplePartitionScheme::instantiateMediaObject( + dpme * partition, + UInt32 partitionID, + UInt32 partitionBlockSize ) +{ + // + // Instantiate a new media object to represent the given partition. + // + + IOMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + UInt64 partitionBase = 0; + char * partitionHint = partition->dpme_type; + bool partitionIsWritable = media->isWritable(); + char * partitionName = partition->dpme_name; + UInt64 partitionSize = 0; + + // Compute the relative byte position and size of the new partition. + + partitionBase = OSSwapBigToHostInt32(partition->dpme_pblock_start); + partitionSize = OSSwapBigToHostInt32(partition->dpme_pblocks); + partitionBase *= partitionBlockSize; + partitionSize *= partitionBlockSize; + + // Clip the size of the new partition if it extends past the end-of-media. + + if ( partitionBase + partitionSize > media->getSize() ) + { + partitionSize = media->getSize() - partitionBase; + } + + // Look up a type for the new partition. + + OSDictionary * hintTable = OSDynamicCast( + /* type */ OSDictionary, + /* instance */ getProperty(kIOApplePartitionSchemeContentTable) ); + + if ( hintTable ) + { + OSString * hintValue = OSDynamicCast( + /* type */ OSString, + /* instance */ hintTable->getObject(partitionHint) ); + + if ( hintValue ) partitionHint = (char *) hintValue->getCStringNoCopy(); + } + + // Look up a name for the new partition. + + while ( *partitionName == ' ' ) { partitionName++; } + + if ( *partitionName == 0 ) partitionName = 0; + + // Determine whether the new partition type is Apple_Free, which we choose + // not to publish because it is an internal concept to the partition map. + + if ( !strcmp(partitionHint, "Apple_Free") ) return 0; + + // Determine whether the new partition is read-only. + // + // Note that we treat the misspelt Apple_patition_map entries as equivalent + // to Apple_partition_map entries due to the messed up CDs noted in 2513960. + + if ( !strcmp(partition->dpme_type, "Apple_partition_map") || + !strcmp(partition->dpme_type, "Apple_Partition_Map") || + !strcmp(partition->dpme_type, "Apple_patition_map" ) || + ( ((partition->dpme_flags & DPME_FLAGS_WRITABLE) == 0) && + ((partition->dpme_flags & DPME_FLAGS_VALID ) != 0) ) ) + { + partitionIsWritable = false; + } + + // Create the new media object. + + IOMedia * newMedia = instantiateDesiredMediaObject( + /* partition */ partition, + /* partitionID */ partitionID, + /* partitionBlockSize */ partitionBlockSize ); + + if ( newMedia ) + { + if ( newMedia->init( + /* base */ partitionBase, + /* size */ partitionSize, + /* preferredBlockSize */ mediaBlockSize, + /* isEjectable */ media->isEjectable(), + /* isWhole */ false, + /* isWritable */ partitionIsWritable, + /* contentHint */ partitionHint ) ) + { + // Set a name for this partition. + + char name[24]; + sprintf(name, "Untitled %ld", partitionID); + newMedia->setName(partitionName ? partitionName : name); + + // Set a location value (the partition number) for this partition. + + char location[12]; + sprintf(location, "%ld", partitionID); + newMedia->setLocation(location); + + // Set the "Partition ID" key for this partition. + + newMedia->setProperty(kIOMediaPartitionIDKey, partitionID, 32); + } + else + { + newMedia->release(); + newMedia = 0; + } + } + + return newMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOApplePartitionScheme::instantiateDesiredMediaObject( + dpme * partition, + UInt32 partitionID, + UInt32 partitionBlockSize ) +{ + // + // Allocate a new media object (called from instantiateMediaObject). + // + + return new IOMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOApplePartitionScheme::attachMediaObjectToDeviceTree( IOMedia * media ) +{ + // + // Attach the given media object to the device tree plane. + // + + IOService * service; + SInt32 unit = -1; + + for ( service = this; service; service = service->getProvider() ) + { + OSNumber * number; + + if ( (number = OSDynamicCast(OSNumber, service->getProperty("IOUnit")))) + { + unit = number->unsigned32BitValue(); + } + + if ( service->inPlane(gIODTPlane) ) + { + IORegistryEntry * child; + IORegistryIterator * children; + + if ( unit == -1 ) break; + + children = IORegistryIterator::iterateOver(service, gIODTPlane); + + if ( children == 0 ) break; + + while ( (child = children->getNextObject()) ) + { + const char * location = child->getLocation(gIODTPlane); + const char * name = child->getName(gIODTPlane); + + if ( name == 0 || strcmp(name, "" ) != 0 || + location == 0 || strchr(location, ':') == 0 ) + { + child->detachAll(gIODTPlane); + } + } + + children->release(); + + if ( media->attachToParent(service, gIODTPlane) ) + { + char location[ sizeof("hhhhhhhh:dddddddddd") ]; + + sprintf(location, "%lx:", unit); + strcat(location, media->getLocation()); + media->setLocation(location, gIODTPlane); + media->setName("", gIODTPlane); + + return true; + } + + break; + } + } + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOApplePartitionScheme::detachMediaObjectFromDeviceTree( IOMedia * media ) +{ + // + // Detach the given media object from the device tree plane. + // + + IORegistryEntry * parent; + + if ( (parent = media->getParentEntry(gIODTPlane)) ) + { + media->detachFromParent(parent, gIODTPlane); + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOApplePartitionScheme, 15); diff --git a/iokit/Families/IOStorage/IOBlockStorageDriver.cpp b/iokit/Families/IOStorage/IOBlockStorageDriver.cpp new file mode 100644 index 000000000..8e843b49a --- /dev/null +++ b/iokit/Families/IOStorage/IOBlockStorageDriver.cpp @@ -0,0 +1,2338 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +#define super IOStorage +OSDefineMetaClassAndStructors(IOBlockStorageDriver, IOStorage) + +// Hack for Cheetah to prevent sleep if there's disk activity. +static IOService * gIORootPowerDomain = NULL; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +const UInt32 kPollerInterval = 1000; // (ms, 1 second) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOBlockStorageDevice * IOBlockStorageDriver::getProvider() const +{ + // + // Obtain this object's provider. We override the superclass's method to + // return a more specific subclass of IOService -- IOBlockStorageDevice. + // This method serves simply as a convenience to subclass developers. + // + + return (IOBlockStorageDevice *) IOService::getProvider(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOBlockStorageDriver::init(OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + if (super::init(properties) == false) return false; + + initMediaState(); + + _ejectable = false; + _lockable = false; + _pollIsExpensive = false; + _pollIsRequired = false; + _removable = false; + + _mediaBlockSize = 0; + _maxBlockNumber = 0; + _maxReadByteTransfer = 0; + _maxWriteByteTransfer = 0; + + _mediaStateLock = IOLockAlloc(); + + if (_mediaStateLock == 0) + return false; + + _deblockRequestWriteLock = IOLockAlloc(); + _openClients = OSSet::withCapacity(2); + _pollerCall = thread_call_allocate(poller, this); + + for (unsigned index = 0; index < kStatisticsCount; index++) + _statistics[index] = OSNumber::withNumber(0ULL, 64); + + if (_deblockRequestWriteLock == 0 || _openClients == 0 || _pollerCall == 0) + return false; + + for (unsigned index = 0; index < kStatisticsCount; index++) + if (_statistics[index] == 0) return false; + + // + // Create the standard block storage driver registry properties. + // + + OSDictionary * statistics = OSDictionary::withCapacity(kStatisticsCount); + + if (statistics == 0) return false; + + statistics->setObject( kIOBlockStorageDriverStatisticsBytesReadKey, + _statistics[kStatisticsBytesRead] ); + statistics->setObject( kIOBlockStorageDriverStatisticsBytesWrittenKey, + _statistics[kStatisticsBytesWritten] ); + statistics->setObject( kIOBlockStorageDriverStatisticsReadErrorsKey, + _statistics[kStatisticsReadErrors] ); + statistics->setObject( kIOBlockStorageDriverStatisticsWriteErrorsKey, + _statistics[kStatisticsWriteErrors] ); + statistics->setObject( kIOBlockStorageDriverStatisticsLatentReadTimeKey, + _statistics[kStatisticsLatentReadTime] ); + statistics->setObject( kIOBlockStorageDriverStatisticsLatentWriteTimeKey, + _statistics[kStatisticsLatentWriteTime] ); + statistics->setObject( kIOBlockStorageDriverStatisticsReadsKey, + _statistics[kStatisticsReads] ); + statistics->setObject( kIOBlockStorageDriverStatisticsWritesKey, + _statistics[kStatisticsWrites] ); + statistics->setObject( kIOBlockStorageDriverStatisticsReadRetriesKey, + _statistics[kStatisticsReadRetries] ); + statistics->setObject( kIOBlockStorageDriverStatisticsWriteRetriesKey, + _statistics[kStatisticsWriteRetries] ); + statistics->setObject( kIOBlockStorageDriverStatisticsTotalReadTimeKey, + _statistics[kStatisticsTotalReadTime] ); + statistics->setObject( kIOBlockStorageDriverStatisticsTotalWriteTimeKey, + _statistics[kStatisticsTotalWriteTime] ); + + setProperty(kIOBlockStorageDriverStatisticsKey, statistics); + + // Hack for Cheetah to prevent sleep if there's disk activity. + if (!gIORootPowerDomain) { + // No danger of race here as we're ultimately just setting + // the gIORootPowerDomain variable. + + do { + IOService * root = NULL; + OSIterator * iterator = NULL; + OSDictionary * pmDict = NULL; + + root = IOService::getServiceRoot(); + if (!root) break; + + pmDict = root->serviceMatching("IOPMrootDomain"); + if (!pmDict) break; + + iterator = root->getMatchingServices(pmDict); + pmDict->release(); + if (!iterator) break; + + if (iterator) { + gIORootPowerDomain = OSDynamicCast(IOService, iterator->getNextObject()); + iterator->release(); + } + } while (false); + } + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOBlockStorageDriver::start(IOService * provider) +{ + // + // This method is called once we have been attached to the provider object. + // + + // Open the block storage device. + + if (provider->open(this) == false) return false; + + // Prepare the block storage driver for operation. + + if (handleStart(provider) == false) + { + provider->close(this); + return false; + } + + // Initiate the poller mechanism if it is required. + + if (isMediaEjectable() && isMediaPollRequired() && !isMediaPollExpensive()) + { + lockForArbitration(); // (disable opens/closes; a recursive lock) + + if (!isOpen() && !isInactive()) + schedulePoller(); // (schedule the poller, increments retain) + + unlockForArbitration(); // (enable opens/closes; a recursive lock) + } + + // Register this object so it can be found via notification requests. It is + // not being registered to have I/O Kit attempt to have drivers match on it, + // which is the reason most other services are registered -- that's not the + // intention of this registerService call. + + registerService(); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOBlockStorageDriver::yield(IOService * provider, + IOOptionBits options, + void * argument) +{ + // + // This method is called as a result of the kIOMessageServiceIsTerminated + // or kIOMessageServiceIsRequestingClose provider messages. The argument + // is passed in as-is from the message. The kIOServiceRequired option is + // set for the kIOMessageServiceIsTerminated message to indicate that the + // yield must succeed. + // + + bool success = false; + + lockForArbitration(); + + // Yield the block storage device. + + success = handleYield(provider, options, argument); + + if (success) + { + // Close the block storage device. + + provider->close(this); + } + + unlockForArbitration(); + + return success; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::free() +{ + // + // Free all of this object's outstanding resources. + // + + if (_mediaStateLock) IOLockFree(_mediaStateLock); + + if (_deblockRequestWriteLock) IOLockFree(_deblockRequestWriteLock); + if (_openClients) _openClients->release(); + if (_pollerCall) thread_call_free(_pollerCall); + + for (unsigned index = 0; index < kStatisticsCount; index++) + if (_statistics[index]) _statistics[index]->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOBlockStorageDriver::handleOpen(IOService * client, + IOOptionBits options, + void * argument) +{ + // + // The handleOpen method grants or denies permission to access this object + // to an interested client. The argument is an IOStorageAccess value that + // specifies the level of access desired -- reader or reader-writer. + // + // This method can be invoked to upgrade or downgrade the access level for + // an existing client as well. The previous access level will prevail for + // upgrades that fail, of course. A downgrade should never fail. If the + // new access level should be the same as the old for a given client, this + // method will do nothing and return success. In all cases, one, singular + // close-per-client is expected for all opens-per-client received. + // + // This method assumes that the arbitration lock is held. + // + + assert(client); + + // Ensure there is media in the block storage device. + + if (getMediaState() == kIOMediaStateOffline) return false; + + // Handle the first open on removable media in a special case. + + if (isMediaEjectable() && _openClients->getCount() == 0) + { + // Halt the poller if it is active and this is the first open. + + if (isMediaPollRequired() && !isMediaPollExpensive()) + unschedulePoller(); // (unschedule the poller) + + // Lock down the media while we have opens on this driver object. + + if (lockMedia(true) != kIOReturnSuccess) + IOLog("%s: Unable to lock down removable media.\n", getName()); + } + + // Process the open. + + _openClients->setObject(client); // (works for up/downgrade case) + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOBlockStorageDriver::handleIsOpen(const IOService * client) const +{ + // + // The handleIsOpen method determines whether the specified client, or any + // client if none is specificed, presently has an open on this object. + // + // This method assumes that the arbitration lock is held. + // + + if (client) + return _openClients->containsObject(client); + else + return (_openClients->getCount() != 0); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::handleClose(IOService * client, IOOptionBits options) +{ + // + // The handleClose method drops the incoming client's access to this object. + // + // This method assumes that the arbitration lock is held. + // + + assert(client); + + // Process the close. + + _openClients->removeObject(client); + + // Handle the last close in a special case. + + if (!isInactive() && _openClients->getCount() == 0) + { + if (isMediaWritable()) + { + if (getMediaState() == kIOMediaStateOnline) + { + // Synchronize the cache on writeable media. + + if (synchronizeCache(this) != kIOReturnSuccess) + IOLog("%s: Unable to flush cache on media.\n", getName()); + } + } + + if (isMediaEjectable()) + { + // Unlock the removable media. + + if (getMediaState() == kIOMediaStateOnline) + { + if (lockMedia(false) != kIOReturnSuccess) + IOLog("%s: Unable to unlock removable media.\n", getName()); + } + + // Reactivate the poller. + + if (isMediaPollRequired() && !isMediaPollExpensive()) + schedulePoller(); // (schedule the poller, increments retain) + } + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::read(IOService * /* client */, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // The read method is the receiving end for all read requests from the + // storage framework, ie. via the media object created by this driver. + // + // This method kicks off a sequence of three methods for each read or write + // request. The first is prepareRequest, which allocates and prepares some + // context for the transfer; the second is deblockRequest, which aligns the + // transfer at the media block boundaries; and the third is executeRequest, + // which implements the actual transfer from the block storage device. + // + + // State our assumptions. + + assert(buffer->getDirection() == kIODirectionIn); + + // Prepare the transfer. + + prepareRequest(byteStart, buffer, completion); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::write(IOService * /* client */, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // The write method is the receiving end for all write requests from the + // storage framework, ie. via the media object created by this driver. + // + // This method kicks off a sequence of three methods for each read or write + // request. The first is prepareRequest, which allocates and prepares some + // context for the transfer; the second is deblockRequest, which aligns the + // transfer at the media block boundaries; and the third is executeRequest, + // which implements the actual transfer from the block storage driver. + // + + // State our assumptions. + + assert(buffer->getDirection() == kIODirectionOut); + + // Prepare the transfer. + + prepareRequest(byteStart, buffer, completion); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::addToBytesTransferred(UInt64 bytesTransferred, + UInt64 totalTime, // (ns) + UInt64 latentTime, // (ns) + bool isWrite) +{ + // + // Update the total number of bytes transferred, the total transfer time, + // and the total latency time -- used for statistics. + // + + if (isWrite) + { + _statistics[kStatisticsWrites]->addValue(1); + _statistics[kStatisticsBytesWritten]->addValue(bytesTransferred); + _statistics[kStatisticsTotalWriteTime]->addValue(totalTime); + _statistics[kStatisticsLatentWriteTime]->addValue(latentTime); + if (bytesTransferred <= getMediaBlockSize()) + _statistics[kStatisticsSingleBlockWrites]->addValue(1); + } + else + { + _statistics[kStatisticsReads]->addValue(1); + _statistics[kStatisticsBytesRead]->addValue(bytesTransferred); + _statistics[kStatisticsTotalReadTime]->addValue(totalTime); + _statistics[kStatisticsLatentReadTime]->addValue(latentTime); + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::incrementRetries(bool isWrite) +{ + // + // Update the total retry count -- used for statistics. + // + + if (isWrite) + _statistics[kStatisticsWriteRetries]->addValue(1); + else + _statistics[kStatisticsReadRetries]->addValue(1); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::incrementErrors(bool isWrite) +{ + // + // Update the total error count -- used for statistics. + // + + if (isWrite) + _statistics[kStatisticsWriteErrors]->addValue(1); + else + _statistics[kStatisticsReadErrors]->addValue(1); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 IOBlockStorageDriver::getStatistics(UInt64 * statistics, + UInt32 statisticsMaxCount) const +{ + // + // Ask the driver to report its operating statistics. + // + // The statistics are each indexed by IOBlockStorageDriver::Statistics + // indices. This routine fills the caller's buffer, up to the maximum + // count specified if the real number of statistics would overflow the + // buffer. The return value indicates the actual number of statistics + // copied to the buffer. + // + // If the statistics buffer is not supplied or if the maximum count is + // zero, the routine returns the proposed count of statistics instead. + // + + if (statistics == 0) + return kStatisticsCount; + + UInt32 statisticsCount = min(kStatisticsCount, statisticsMaxCount); + + for (unsigned index = 0; index < statisticsCount; index++) + statistics[index] = _statistics[index]->unsigned64BitValue(); + + return statisticsCount; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt64 IOBlockStorageDriver::getStatistic(Statistics statistic) const +{ + // + // Ask the driver to report one of its operating statistics. + // + + if ((UInt32) statistic >= kStatisticsCount) return 0; + + return _statistics[statistic]->unsigned64BitValue(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOBlockStorageDriver::Context * IOBlockStorageDriver::allocateContext() +{ + // + // Allocate a context structure for a read/write operation. + // + + Context * context = IONew(Context, 1); + + if (context) + { + bzero(context, sizeof(Context)); + } + + return context; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::deleteContext( + IOBlockStorageDriver::Context * context) +{ + // + // Delete a context structure from a read/write operation. + // + + IODelete(context, Context, 1); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::prepareRequest(UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // The prepareRequest method allocates and prepares state for the transfer. + // + // This method is part of a sequence of methods invoked for each read/write + // request. The first is prepareRequest, which allocates and prepares some + // context for the transfer; the second is deblockRequest, which aligns the + // transfer at the media block boundaries; and the third is executeRequest, + // which implements the actual transfer from the block storage device. + // + + Context * context; + IOReturn status; + + // Allocate a context structure to hold some of our state. + + context = allocateContext(); + + if (context == 0) + { + complete(completion, kIOReturnNoMemory); + return; + } + + // Prepare the transfer buffer. + + status = buffer->prepare(); + + if (status != kIOReturnSuccess) + { + deleteContext(context); + complete(completion, status); + return; + } + + // Fill in the context structure with some of our state. + + context->block.size = getMediaBlockSize(); + context->block.type = kBlockTypeStandard; + + context->original.byteStart = byteStart; + context->original.buffer = buffer; + context->original.buffer->retain(); + context->original.completion = completion; + + completion.target = this; + completion.action = prepareRequestCompletion; + completion.parameter = context; + + // Deblock the transfer. + + deblockRequest(byteStart, buffer, completion, context); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::prepareRequestCompletion(void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount) +{ + // + // This is the completion routine for the prepared request. It updates + // the driver's statistics, performs some clean up work, then calls the + // original request's completion routine. + // + + Context * context = (Context *) parameter; + IOBlockStorageDriver * driver = (IOBlockStorageDriver *) target; + bool isWrite; + + isWrite = (context->original.buffer->getDirection() == kIODirectionOut); + + // State our assumptions. + + assert(status != kIOReturnSuccess || + context->original.buffer->getLength() == actualByteCount); + + // Update the total number of bytes transferred. + + driver->addToBytesTransferred(actualByteCount, 0, 0, isWrite); + + // Update the total error count. + + if (status != kIOReturnSuccess) + { + driver->incrementErrors(isWrite); + } + + // Complete the transfer buffer. + + context->original.buffer->complete(); + + // Complete the transfer request. + + IOStorage::complete(context->original.completion, status, actualByteCount); + + // Release our resources. + + context->original.buffer->release(); + + driver->deleteContext(context); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::schedulePoller() +{ + // + // Schedule the poller mechanism. + // + // This method assumes that the arbitration lock is held. + // + + AbsoluteTime deadline; + + retain(); + + clock_interval_to_deadline(kPollerInterval, kMillisecondScale, &deadline); + thread_call_enter_delayed(_pollerCall, deadline); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::unschedulePoller() +{ + // + // Unschedule the poller mechanism. + // + // This method assumes that the arbitration lock is held. + // + + if (thread_call_cancel(_pollerCall)) release(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::poller(void * target, void *) +{ + // + // This method is the timeout handler for the poller mechanism. It polls + // for media and reschedules another timeout if there are still no opens. + // + + IOBlockStorageDriver * driver = (IOBlockStorageDriver *) target; + + driver->pollMedia(); + + driver->lockForArbitration(); // (disable opens/closes; a recursive lock) + + if (!driver->isOpen() && !driver->isInactive()) + driver->schedulePoller(); // (schedule the poller, increments retain) + + driver->unlockForArbitration(); // (enable opens/closes; a recursive lock) + + driver->release(); // (drop the retain associated with this poll) +} + + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOBlockStorageDriver::message(UInt32 type, + IOService * provider, + void * argument) +{ + // + // Generic entry point for calls from the provider. A return value of + // kIOReturnSuccess indicates that the message was received, and where + // applicable, that it was successful. + // + + switch (type) + { + case kIOMessageMediaStateHasChanged: + { + IOReturn status; + IOLockLock(_mediaStateLock); + status = mediaStateHasChanged((IOMediaState) argument); + IOLockUnlock(_mediaStateLock); + return status; + } + case kIOMessageServiceIsRequestingClose: + { + bool success; + success = yield(provider, 0, argument); + return success ? kIOReturnSuccess : kIOReturnBusy; + } + case kIOMessageServiceIsTerminated: + { + bool success; + success = yield(provider, kIOServiceRequired, argument); + return success ? kIOReturnSuccess : kIOReturnError; + } + default: + { + return super::message(type, provider, argument); + } + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +/* Accept a new piece of media, doing whatever's necessary to make it + * show up properly to the system. The arbitration lock is assumed to + * be held during the call. + */ +IOReturn +IOBlockStorageDriver::acceptNewMedia(void) +{ + IOReturn result; + bool ok; + UInt64 nbytes; + char name[128]; + bool nameSep; + + /* Since the kernel printf doesn't handle 64-bit integers, we + * simply make an assumption that the block count and size + * will be 32-bit values max. + */ + +#ifdef moreDebug + IOLog("%s[IOBlockStorageDriver]::%s media: %ld blocks, %ld bytes each, write-%s.\n", + getName(), + getDeviceTypeName(), + (UInt32)_maxBlockNumber + 1,(UInt32)getMediaBlockSize(), + (_writeProtected ? "protected" : "enabled")); +#endif + + if (_maxBlockNumber) { + nbytes = _mediaBlockSize * (_maxBlockNumber + 1); + } else { + nbytes = 0; + } + + /* Instantiate a media object and attach it to ourselves. */ + + name[0] = 0; + nameSep = false; + if (getProvider()->getVendorString()) { + strcat(name, getProvider()->getVendorString()); + nameSep = true; + } + if (getProvider()->getProductString()) { + if (nameSep == true) strcat(name, " "); + strcat(name, getProvider()->getProductString()); + nameSep = true; + } + if (nameSep == true) strcat(name, " "); + strcat(name, "Media"); + + _mediaObject = instantiateMediaObject(0,nbytes,_mediaBlockSize,name); + result = (_mediaObject) ? kIOReturnSuccess : kIOReturnBadArgument; + + if (result == kIOReturnSuccess) { + ok = _mediaObject->attach(this); /* attach media object above us */ + if (ok) { + _mediaPresent = true; + _mediaObject->registerService(); /* enable matching */ + } else { + _mediaObject->release(); + _mediaObject = 0; + return(kIOReturnNoMemory); /* give up now */ + } + } + + return(result); +} + +IOReturn +IOBlockStorageDriver::checkForMedia(void) +{ + IOReturn result; + bool currentState; + bool changed; + + IOLockLock(_mediaStateLock); + + result = getProvider()->reportMediaState(¤tState,&changed); + if (result != kIOReturnSuccess) { /* the poll operation failed */ + IOLog("%s[IOBlockStorageDriver]::checkForMedia; err '%s' from reportMediaState\n", + getName(),stringFromReturn(result)); + } else if (changed) { /* the poll succeeded, media state has changed */ + result = mediaStateHasChanged(currentState ? kIOMediaStateOnline + : kIOMediaStateOffline); + } + + IOLockUnlock(_mediaStateLock); + return(result); +} + +IOReturn +IOBlockStorageDriver::mediaStateHasChanged(IOMediaState state) +{ + IOReturn result; + + /* The media has changed state. See if it's just inserted or removed. */ + + if (state == kIOMediaStateOnline) { /* media is now present */ + + /* Allow a subclass to decide whether we accept the media. Such a + * decision might be based on things like password-protection, etc. + */ + + if (validateNewMedia() == false) { /* we're told to reject it */ + rejectMedia(); /* so let subclass do whatever it wants */ + return(kIOReturnSuccess); /* pretend nothing happened */ + } + + result = recordMediaParameters(); /* learn about media */ + if (result != kIOReturnSuccess) { /* couldn't record params */ + initMediaState(); /* deny existence of new media */ + IOLog("%s[IOBlockStorageDriver]::checkForMedia: err '%s' from recordMediaParameters\n", + getName(),stringFromReturn(result)); + return(result); + } + + /* Now we do what's necessary to make the new media + * show up properly in the system. + */ + + lockForArbitration(); + result = acceptNewMedia(); + + if (result != kIOReturnSuccess) { + initMediaState(); /* deny existence of new media */ + IOLog("%s[IOBlockStorageDriver]::checkForMedia; err '%s' from acceptNewMedia\n", + getName(),stringFromReturn(result)); + } + + unlockForArbitration(); + return(result); /* all done, new media is ready */ + + } else { /* media is now absent */ + + lockForArbitration(); + result = decommissionMedia(true); /* force a teardown */ + unlockForArbitration(); + + if (result != kIOReturnSuccess && result != kIOReturnNoMedia) { + IOLog("%s[IOBlockStorageDriver]::checkForMedia; err '%s' from decommissionNewMedia\n", + getName(),stringFromReturn(result)); + return(result); + } + + return(kIOReturnSuccess); /* all done; media is gone */ + } +} + +UInt64 +IOBlockStorageDriver::constrainByteCount(UInt64 /* requestedCount */ ,bool isWrite) +{ + if (isWrite) { + return(_maxWriteByteTransfer); + } else { + return(_maxReadByteTransfer); + } +} + +/* Decommission a piece of media that has become unavailable either due to + * ejection or some outside force (e.g. the Giant Hand of the User). + * (I prefer the term "decommission" rather than "abandon." The former implies + * a well-determined procedure, whereas the latter implies leaving the media + * in an orphaned state.) + */ +/* Tear down the stack above the specified object. Usually these objects will + * be of type IOMedia, but they could be any IOService. The arbitration lock is + * assumed to be held during the call. + */ +IOReturn +IOBlockStorageDriver::decommissionMedia(bool forcible) +{ + IOReturn result; + + if (_mediaObject) { + /* If this is a forcible decommission (i.e. media is gone), we don't + * care whether the teardown worked; we forget about the media. + */ + if (_mediaObject->terminate(forcible ? kIOServiceRequired : 0) || forcible) { + _mediaObject->release(); + _mediaObject = 0; + + initMediaState(); /* clear all knowledge of the media */ + result = kIOReturnSuccess; + + } else { + result = kIOReturnBusy; + } + } else { + result = kIOReturnNoMedia; + } + + return(result); +} + +IOReturn +IOBlockStorageDriver::ejectMedia(void) +{ + IOReturn result; + + if (_removable) { + + IOLockLock(_mediaStateLock); + + lockForArbitration(); + result = decommissionMedia(false); /* try to teardown */ + unlockForArbitration(); + + if (result == kIOReturnSuccess) { /* eject */ + if (lockMedia(false) != kIOReturnSuccess) + IOLog("%s: Unable to unlock removable media.\n", getName()); + + (void)getProvider()->doEjectMedia(); /* ignore any error */ + } + + IOLockUnlock(_mediaStateLock); + + return(result); + + } else { + return(kIOReturnUnsupported); + } +} + +void +IOBlockStorageDriver::executeRequest(UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion, + IOBlockStorageDriver::Context * context) +{ + UInt32 block; + UInt32 nblks; + IOReturn result; + + if (!_mediaPresent) { /* no media? you lose */ + complete(completion, kIOReturnNoMedia,0); + return; + } + + /* We know that we are never called with a request too large, + * nor one that is misaligned with a block. + */ + assert((byteStart % _mediaBlockSize) == 0); + assert((buffer->getLength() % _mediaBlockSize) == 0); + + block = byteStart / _mediaBlockSize; + nblks = buffer->getLength() / _mediaBlockSize; + +/* Now the protocol-specific provider implements the actual + * start of the data transfer: */ + + // Tickle the root power domain to reset the sleep countdown. + if (gIORootPowerDomain) { + gIORootPowerDomain->activityTickle(kIOPMSubclassPolicy); + } + + result = getProvider()->doAsyncReadWrite(buffer,block,nblks,completion); + + if (result != kIOReturnSuccess) { /* it failed to start */ + IOLog("%s[IOBlockStorageDriver]; executeRequest: request failed to start!\n",getName()); + complete(completion,result); + return; + } +} + +IOReturn +IOBlockStorageDriver::formatMedia(UInt64 byteCapacity) +{ + if (!_mediaPresent) { + return(kIOReturnNoMedia); + } + + return(getProvider()->doFormatMedia(byteCapacity)); +} + +const char * +IOBlockStorageDriver::getDeviceTypeName(void) +{ + return(kIOBlockStorageDeviceTypeGeneric); +} + +UInt32 +IOBlockStorageDriver::getFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const +{ + return(getProvider()->doGetFormatCapacities(capacities,capacitiesMaxCount)); +} + +UInt64 +IOBlockStorageDriver::getMediaBlockSize() const +{ + return(_mediaBlockSize); +} + +IOMediaState +IOBlockStorageDriver::getMediaState() const +{ + if (_mediaPresent) { + return(kIOMediaStateOnline); + } else { + return(kIOMediaStateOffline); + } +} + +bool +IOBlockStorageDriver::handleStart(IOService * provider) +{ + IOReturn result; + + /* Print device name/type information on the console: */ + + /*The protocol-specific provider determines whether the media is removable. */ + + result = getProvider()->reportRemovability(&_removable); + if (result != kIOReturnSuccess) { + IOLog("%s[IOBlockStorageDriver]::handleStart; err '%s' from reportRemovability\n", + getName(),stringFromReturn(result)); + return(false); + } + + if (_removable) { + + /* The protocol-specific provider determines whether we must poll to detect + * media insertion. Nonremovable devices never need polling. + */ + + result = getProvider()->reportPollRequirements(&_pollIsRequired,&_pollIsExpensive); + + if (result != kIOReturnSuccess) { + IOLog("%s[IOBlockStorageDriver]::handleStart; err '%s' from reportPollRequirements\n", + getName(),stringFromReturn(result)); + return(false); + } + + /* The protocol-specific provider determines whether the media is ejectable + * under software control. + */ + result = getProvider()->reportEjectability(&_ejectable); + if (result != kIOReturnSuccess) { + IOLog("%s[IOBlockStorageDriver]::handleStart; err '%s' from reportEjectability\n", + getName(),stringFromReturn(result)); + return(false); + } + + /* The protocol-specific provider determines whether the media is lockable + * under software control. + */ + result = getProvider()->reportLockability(&_lockable); + if (result != kIOReturnSuccess) { + IOLog("%s[IOBlockStorageDriver]::handleStart; err '%s' from reportLockability\n", + getName(),stringFromReturn(result)); + return(false); + } + + } else { /* fixed disk: not ejectable, not lockable */ + _ejectable = false; + _lockable = false; + _pollIsRequired = true; /* polling detects device disappearance */ + } + + /* Check for the device being ready with media inserted: */ + + result = checkForMedia(); + + /* The poll should never fail for nonremovable media: */ + + if (result != kIOReturnSuccess && !_removable) { + IOLog("%s[IOBlockStorageDriver]::handleStart: err '%s' from checkForMedia\n", + getName(),stringFromReturn(result)); + return(false); + } + + return(true); +} + +/* The driver has been instructed to yield. The arbitration lock is assumed to + * be held during the call. + */ +bool +IOBlockStorageDriver::handleYield(IOService * provider, + IOOptionBits options, + void * argument) +{ + // Determine whether we can yield (for non-required yield requests). + + if ( (options & kIOServiceRequired) == 0 && isOpen() != false ) + { + return false; + } + + // Halt the poller mechanism. + + if ( isMediaEjectable() != false && + isMediaPollRequired() != false && + isMediaPollExpensive() == false ) + { + unschedulePoller(); // (unschedule the poller) + } + + // Force a teardown. + + decommissionMedia(true); + + return true; +} + +void +IOBlockStorageDriver::initMediaState(void) +{ + _mediaPresent = false; + _writeProtected = false; +} + +IOMedia * +IOBlockStorageDriver::instantiateDesiredMediaObject(void) +{ + return(new IOMedia); +} + +IOMedia * +IOBlockStorageDriver::instantiateMediaObject(UInt64 base,UInt64 byteSize, + UInt32 blockSize,char *mediaName) +{ + IOMedia *m; + bool result; + + m = instantiateDesiredMediaObject(); + if (m == NULL) { + return(NULL); + } + + result = m->init( base, /* base byte offset */ + byteSize, /* byte size */ + blockSize, /* preferred block size */ + _ejectable, /* TRUE if ejectable */ + true, /* TRUE if whole physical media */ + !_writeProtected, /* TRUE if writable */ + ""); /* content hint */ + + if (result) { + m->setName(mediaName); + return(m); + + } else { /* some init error */ + m->release(); + return(NULL); /* beats me...call it this error */ + } +} + +bool +IOBlockStorageDriver::isMediaEjectable(void) const +{ + return(_ejectable); +} + +bool +IOBlockStorageDriver::isMediaPollExpensive(void) const +{ + return(_pollIsExpensive); +} + +bool +IOBlockStorageDriver::isMediaPollRequired(void) const +{ + return(_pollIsRequired); +} + +bool +IOBlockStorageDriver::isMediaWritable(void) const +{ + return(!_writeProtected); +} + +IOReturn +IOBlockStorageDriver::lockMedia(bool locked) +{ + if (_lockable) { + return(getProvider()->doLockUnlockMedia(locked)); + } else { + return(kIOReturnUnsupported); + } +} + +IOReturn +IOBlockStorageDriver::pollMedia(void) +{ + if (!_pollIsRequired) { /* shouldn't poll; it's an error */ + + return(kIOReturnUnsupported); + + } else { /* poll is required...do it */ + + return(checkForMedia()); + + } +} + +IOReturn +IOBlockStorageDriver::recordMediaParameters(void) +{ + IOReturn result; + + /* Determine the device's block size and max block number. + * What should an unformatted device report? All zeroes, or an error? + */ + + result = getProvider()->reportBlockSize(&_mediaBlockSize); + if (result != kIOReturnSuccess) { + goto err; + } + + result = getProvider()->reportMaxValidBlock(&_maxBlockNumber); + if (result != kIOReturnSuccess) { + goto err; + } + + /* Calculate the maximum allowed byte transfers for reads and writes. */ + + result = getProvider()->reportMaxReadTransfer(_mediaBlockSize,&_maxReadByteTransfer); + if (result != kIOReturnSuccess) { + goto err; + } + + result = getProvider()->reportMaxWriteTransfer(_mediaBlockSize,&_maxWriteByteTransfer); + if (result != kIOReturnSuccess) { + goto err; + } + + /* Is the media write-protected? */ + + result = getProvider()->reportWriteProtection(&_writeProtected); + if (result != kIOReturnSuccess) { + goto err; + } + + return(kIOReturnSuccess); /* everything was successful */ + + /* If we fall thru to here, we had some kind of error. Set everything to + * a reasonable state since we haven't got any real information. + */ + +err: + _mediaPresent = false; + _writeProtected = true; + + return(result); +} + +void +IOBlockStorageDriver::rejectMedia(void) +{ + (void)getProvider()->doEjectMedia(); /* eject it, ignoring any error */ + initMediaState(); /* deny existence of new media */ +} + +IOReturn +IOBlockStorageDriver::synchronizeCache(IOService *client) +{ + return(getProvider()->doSynchronizeCache()); +} + +bool +IOBlockStorageDriver::validateNewMedia(void) +{ + return(true); +} + +// ----------------------------------------------------------------------------- +// Deblocker Implementation + +#include + +class IODeblocker : public IOMemoryDescriptor +{ + OSDeclareDefaultStructors(IODeblocker); + +protected: + + UInt64 _blockSize; + + struct + { + IOMemoryDescriptor * buffer; + UInt32 offset; + UInt32 length; + } _chunks[3]; + UInt32 _chunksCount; + + IOBufferMemoryDescriptor * _excessBuffer; + UInt64 _excessCountFinal; + UInt64 _excessCountStart; + + IOMemoryDescriptor * _requestBuffer; + IOStorageCompletion _requestCompletion; + void * _requestContext; + UInt64 _requestCount; + bool _requestIsOneBlock; + UInt64 _requestStart; + + enum + { + kStageInit, + kStagePrepareExcessStart, + kStagePrepareExcessFinal, + kStageLast, + kStageDone + } _stage; + + virtual void free(); + + virtual bool initWithAddress( void * address, /* not supported */ + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithAddress( vm_address_t address, /* not supported */ + IOByteCount withLength, + IODirection withDirection, + task_t withTask ); + + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, /* not supported */ + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithPhysicalRanges( + IOPhysicalRange * ranges, /* not supproted */ + UInt32 withCount, + IODirection withDirection, + bool asReference = false ); + + virtual bool initWithRanges( IOVirtualRange * ranges, /* not supported */ + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false ); + + virtual void * getVirtualSegment( IOByteCount offset, /* not supported */ + IOByteCount * length ); + + IOMemoryDescriptor::withAddress; /* not supported */ + IOMemoryDescriptor::withPhysicalAddress; /* not supported */ + IOMemoryDescriptor::withPhysicalRanges; /* not supported */ + IOMemoryDescriptor::withRanges; /* not supported */ + IOMemoryDescriptor::withSubRange; /* not supported */ + +public: + + static IODeblocker * withBlockSize( + UInt64 blockSize, + UInt64 withRequestStart, + IOMemoryDescriptor * withRequestBuffer, + IOStorageCompletion withRequestCompletion, + void * withRequestContext ); + + virtual bool initWithBlockSize( + UInt64 blockSize, + UInt64 withRequestStart, + IOMemoryDescriptor * withRequestBuffer, + IOStorageCompletion withRequestCompletion, + void * withRequestContext ); + + virtual IOPhysicalAddress getPhysicalSegment( IOByteCount offset, + IOByteCount * length ); + + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone); + + virtual IOReturn complete(IODirection forDirection = kIODirectionNone); + + virtual IOByteCount readBytes( IOByteCount offset, + void * bytes, + IOByteCount withLength ); + + virtual IOByteCount writeBytes( IOByteCount offset, + const void * bytes, + IOByteCount withLength ); + + virtual bool getNextStage(UInt64 * byteStart); + + virtual void getRequestCompletion( IOStorageCompletion * completion, + IOReturn * status, + UInt64 * actualByteCount ); + + virtual IOMemoryDescriptor * getRequestBuffer(); + + virtual void * getRequestContext(); +}; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#undef super +#define super IOMemoryDescriptor +OSDefineMetaClassAndStructors(IODeblocker, IOMemoryDescriptor) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IODeblocker::initWithAddress( void * /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IODeblocker::initWithAddress( vm_address_t /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ , + task_t /* withTask */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IODeblocker::initWithPhysicalAddress( + IOPhysicalAddress /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ ) +{ + return false; +} + + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IODeblocker::initWithPhysicalRanges( + IOPhysicalRange * /* ranges */ , + UInt32 /* withCount */ , + IODirection /* withDirection */ , + bool /* asReference */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IODeblocker::initWithRanges( IOVirtualRange * /* ranges */ , + UInt32 /* withCount */ , + IODirection /* withDirection */ , + task_t /* withTask */ , + bool /* asReference */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IODeblocker * IODeblocker::withBlockSize( + UInt64 blockSize, + UInt64 withRequestStart, + IOMemoryDescriptor * withRequestBuffer, + IOStorageCompletion withRequestCompletion, + void * withRequestContext ) +{ + // + // Create a new IODeblocker. + // + + IODeblocker * me = new IODeblocker; + + if ( me && me->initWithBlockSize( + /* blockSize */ blockSize, + /* withRequestStart */ withRequestStart, + /* withRequestBuffer */ withRequestBuffer, + /* withRequestCompletion */ withRequestCompletion, + /* withRequestContext */ withRequestContext ) == false ) + { + me->release(); + me = 0; + } + + return me; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IODeblocker::initWithBlockSize( + UInt64 blockSize, + UInt64 withRequestStart, + IOMemoryDescriptor * withRequestBuffer, + IOStorageCompletion withRequestCompletion, + void * withRequestContext ) +{ + // + // Initialize an IODeblocker. + // + // _excessCountStart = byte count from media boundary to start of request + // _excessCountFinal = byte count from end of request to a media boundary + // + + UInt32 excessBufferSize = 0; + + // Ask our superclass' opinion. + + if ( super::init() == false ) return false; + + // Initialize our minimal state. + + _blockSize = blockSize; + _chunksCount = 0; + _direction = kIODirectionNone; + _length = 0; + + _requestBuffer = withRequestBuffer; + _requestBuffer->retain(); + _requestCompletion = withRequestCompletion; + _requestContext = withRequestContext; + _requestCount = withRequestBuffer->getLength(); + _requestStart = withRequestStart; + + _excessCountStart = (withRequestStart ) % blockSize; + _excessCountFinal = (withRequestStart + _requestCount) % blockSize; + if ( _excessCountFinal ) _excessCountFinal = blockSize - _excessCountFinal; + + _requestIsOneBlock = (_excessCountStart + _requestCount <= blockSize); + + // Determine the necessary size for our scratch buffer. + + switch ( _requestBuffer->getDirection() ) + { + case kIODirectionIn: // (read) + { + excessBufferSize = max(_excessCountStart, _excessCountFinal); + } break; + + case kIODirectionOut: // (write) + { + if ( _excessCountStart ) excessBufferSize += blockSize; + if ( _excessCountFinal ) excessBufferSize += blockSize; + + // If there is excess both ends of the original request, but both + // ends reside within the same media block, then we could shorten + // our buffer size to just one block. + + if ( _excessCountStart && _excessCountFinal && _requestIsOneBlock ) + { + excessBufferSize -= blockSize; + } + } break; + + default: + { + assert(0); + } break; + } + + // Allocate our scratch buffer. + + if ( excessBufferSize ) + { + _excessBuffer = IOBufferMemoryDescriptor::withCapacity( + /* capacity */ excessBufferSize, + /* withDirection */ kIODirectionNone ); + if ( _excessBuffer == 0 ) return false; + } + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IODeblocker::free() +{ + // + // Free all of this object's outstanding resources. + // + + if ( _requestBuffer ) _requestBuffer->release(); + if ( _excessBuffer ) _excessBuffer->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IODeblocker::prepare(IODirection forDirection) +{ + // + // Prepare the memory for an I/O transfer. + // + // This involves paging in the memory and wiring it down for the duration + // of the transfer. The complete() method finishes the processing of the + // memory after the I/O transfer finishes. + // + + unsigned index; + IOReturn status = kIOReturnInternalError; + IOReturn statusUndo; + + if ( forDirection == kIODirectionNone ) + { + forDirection = _direction; + } + + for ( index = 0; index < _chunksCount; index++ ) + { + status = _chunks[index].buffer->prepare(forDirection); + if ( status != kIOReturnSuccess ) break; + } + + if ( status != kIOReturnSuccess ) + { + for ( unsigned indexUndo = 0; indexUndo <= index; indexUndo++ ) + { + statusUndo = _chunks[index].buffer->complete(forDirection); + assert(statusUndo == kIOReturnSuccess); + } + } + + return status; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IODeblocker::complete(IODirection forDirection) +{ + // + // Complete processing of the memory after an I/O transfer finishes. + // + // This method shouldn't be called unless a prepare() was previously issued; + // the prepare() and complete() must occur in pairs, before and after an I/O + // transfer. + // + + IOReturn status; + IOReturn statusFinal = kIOReturnSuccess; + + if ( forDirection == kIODirectionNone ) + { + forDirection = _direction; + } + + for ( unsigned index = 0; index < _chunksCount; index++ ) + { + status = _chunks[index].buffer->complete(forDirection); + if ( status != kIOReturnSuccess ) statusFinal = status; + assert(status == kIOReturnSuccess); + } + + return statusFinal; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOPhysicalAddress IODeblocker::getPhysicalSegment( IOByteCount offset, + IOByteCount * length ) +{ + // + // This method returns the physical address of the byte at the given offset + // into the memory, and optionally the length of the physically contiguous + // segment from that offset. + // + + assert(offset <= _length); + + for ( unsigned index = 0; index < _chunksCount; index++ ) + { + if ( offset < _chunks[index].length ) + { + IOPhysicalAddress address; + address = _chunks[index].buffer->getPhysicalSegment( + /* offset */ offset + _chunks[index].offset, + /* length */ length ); + if ( length ) *length = min(*length, _chunks[index].length); + return address; + } + offset -= _chunks[index].length; + } + + if ( length ) *length = 0; + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void * IODeblocker::getVirtualSegment( IOByteCount /* offset */ , + IOByteCount * /* length */ ) +{ + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOByteCount IODeblocker::readBytes( IOByteCount offset, + void * bytes, + IOByteCount withLength ) +{ + // + // Copies data from the memory descriptor's buffer at the given offset, to + // the specified buffer. Returns the number of bytes copied. + // + + IOByteCount bytesCopied = 0; + unsigned index; + + for ( index = 0; index < _chunksCount; index++ ) + { + if ( offset < _chunks[index].length ) break; + offset -= _chunks[index].length; + } + + for ( ; index < _chunksCount && withLength; index++) + { + IOByteCount copy = min(_chunks[index].length, withLength); + IOByteCount copied = _chunks[index].buffer->readBytes( + /* offset */ offset + _chunks[index].offset, + /* bytes */ bytes, + /* length */ copy ); + + bytesCopied += copied; + if ( copied != copy ) break; + + bytes = ((UInt8 *) bytes) + copied; + withLength -= copied; + offset = 0; + } + + return bytesCopied; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOByteCount IODeblocker::writeBytes( IOByteCount offset, + const void * bytes, + IOByteCount withLength ) +{ + // + // Copies data to the memory descriptor's buffer at the given offset, from + // the specified buffer. Returns the number of bytes copied. + // + + IOByteCount bytesCopied = 0; + unsigned index; + + for ( index = 0; index < _chunksCount; index++ ) + { + if ( offset < _chunks[index].length ) break; + offset -= _chunks[index].length; + } + + for ( ; index < _chunksCount && withLength; index++) + { + IOByteCount copy = min(_chunks[index].length, withLength); + IOByteCount copied = _chunks[index].buffer->writeBytes( + /* offset */ offset + _chunks[index].offset, + /* bytes */ bytes, + /* length */ copy ); + + bytesCopied += copied; + if ( copied != copy ) break; + + bytes = ((UInt8 *) bytes) + copied; + withLength -= copied; + offset = 0; + } + + return bytesCopied; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IODeblocker::getNextStage(UInt64 * byteStart) +{ + // + // Obtain the next stage of the transfer. The transfer buffer will be the + // deblocker object itself and the byte start will be returned in byteStart. + // + // This method must not be called if the current stage failed with an error + // or a short byte count, but instead getRequestCompletion() must be called + // to adjust the status and actual byte count (with respect to the original + // request) and return the original request's completion routine. The same + // call to getRequestCompletion() should also be done if the getNextStage() + // method returns false. + // + + _chunksCount = 0; + _direction = kIODirectionNone; + _length = 0; + + switch ( _requestBuffer->getDirection() ) + { + case kIODirectionIn: // (read) + { + switch ( _stage ) + { + case kStageInit: + { + _stage = kStageLast; + _excessBuffer->setDirection(kIODirectionIn); + _direction = kIODirectionIn; + *byteStart = _requestStart - _excessCountStart; + + if ( _excessCountStart ) + { + _chunks[_chunksCount].buffer = _excessBuffer; + _chunks[_chunksCount].offset = 0; + _chunks[_chunksCount].length = _excessCountStart; + _chunksCount++; + } + + _chunks[_chunksCount].buffer = _requestBuffer; + _chunks[_chunksCount].offset = 0; + _chunks[_chunksCount].length = _requestBuffer->getLength(); + _chunksCount++; + + if ( _excessCountFinal ) + { + _chunks[_chunksCount].buffer = _excessBuffer; + _chunks[_chunksCount].offset = 0; + _chunks[_chunksCount].length = _excessCountFinal; + _chunksCount++; + } + } break; + + case kStageLast: + { + _stage = kStageDone; + } break; + + default: + { + assert(0); + } break; + } // (switch) + } break; + + case kIODirectionOut: // (write) + { + switch ( _stage ) + { + case kStageInit: + { + if ( _excessCountStart ) + { + _stage = kStagePrepareExcessStart; + _excessBuffer->setDirection(kIODirectionIn); + _direction = kIODirectionIn; + *byteStart = _requestStart - _excessCountStart; + + _chunks[_chunksCount].buffer = _excessBuffer; + _chunks[_chunksCount].offset = 0; + _chunks[_chunksCount].length = _blockSize; + _chunksCount++; + break; + } + } // (fall thru) + + case kStagePrepareExcessStart: + { + if ( _excessCountFinal ) + { + // We do not issue this stage if the original transfer + // resides within one media block, and we already read + // that block into our buffer in the previous stage. + + if ( !_excessCountStart || !_requestIsOneBlock ) + { + _stage = kStagePrepareExcessFinal; + _excessBuffer->setDirection(kIODirectionIn); + _direction = kIODirectionIn; + *byteStart = _requestStart + _requestCount + + _excessCountFinal - _blockSize; + + _chunks[_chunksCount].buffer = _excessBuffer; + _chunks[_chunksCount].offset = (_requestIsOneBlock) + ? 0 + : (_excessCountStart) + ? _blockSize + : 0; + _chunks[_chunksCount].length = _blockSize; + _chunksCount++; + break; + } + } + } // (fall thru) + + case kStagePrepareExcessFinal: + { + _stage = kStageLast; + _excessBuffer->setDirection(kIODirectionOut); + _direction = kIODirectionOut; + *byteStart = _requestStart - _excessCountStart; + + if ( _excessCountStart ) + { + _chunks[_chunksCount].buffer = _excessBuffer; + _chunks[_chunksCount].offset = 0; + _chunks[_chunksCount].length = _excessCountStart; + _chunksCount++; + } + + _chunks[_chunksCount].buffer = _requestBuffer; + _chunks[_chunksCount].offset = 0; + _chunks[_chunksCount].length = _requestBuffer->getLength(); + _chunksCount++; + + if ( _excessCountFinal ) + { + _chunks[_chunksCount].buffer = _excessBuffer; + _chunks[_chunksCount].offset = (_requestIsOneBlock) + ? 0 + : (_excessCountStart) + ? _blockSize + : 0; + _chunks[_chunksCount].offset += ( _blockSize - + _excessCountFinal ); + _chunks[_chunksCount].length = _excessCountFinal; + _chunksCount++; + } + } break; + + case kStageLast: + { + _stage = kStageDone; + } break; + + default: + { + assert(0); + } break; + } // (switch) + } break; + + default: + { + assert(0); + } break; + } // (switch) + + // Determine whether we have an abort or completion condition. + + if ( _chunksCount == 0 ) return false; + + // Compute the total length of the descriptor over all chunks. + + for ( unsigned index = 0; index < _chunksCount; index++ ) + { + _length += _chunks[index].length; + } + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IODeblocker::getRequestCompletion( IOStorageCompletion * completion, + IOReturn * status, + UInt64 * actualByteCount ) +{ + // + // Obtain the completion information for the original request, taking + // into account the status and actual byte count of the current stage. + // + + *completion = _requestCompletion; + + switch ( _stage ) + { + case kStageInit: // (inital stage) + { + *status = kIOReturnInternalError; + *actualByteCount = 0; + } break; + + case kStagePrepareExcessStart: // (write preparation stage) + case kStagePrepareExcessFinal: + { + *actualByteCount = 0; + } break; + + case kStageLast: // (last stage) + case kStageDone: + { + if ( *actualByteCount > _excessCountStart ) + *actualByteCount -= _excessCountStart; + else + *actualByteCount = 0; + + if ( *actualByteCount > _requestBuffer->getLength() ) + *actualByteCount = _requestBuffer->getLength(); + } break; + + default: + { + assert(0); + } break; + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMemoryDescriptor * IODeblocker::getRequestBuffer() +{ + // + // Obtain the buffer for the original request. + // + + return _requestBuffer; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void * IODeblocker::getRequestContext() +{ + // + // Obtain the context for the original request. + // + + return _requestContext; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::deblockRequest( + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion, + IOBlockStorageDriver::Context * context ) +{ + // + // The deblockRequest method checks to see if the incoming request rests + // on the media's block boundaries, and if not, deblocks it. Deblocking + // involves rounding out the request to the nearest block boundaries and + // transferring the excess bytes into a scratch buffer. + // + // This method is part of a sequence of methods invoked for each read/write + // request. The first is prepareRequest, which allocates and prepares some + // context for the transfer; the second is deblockRequest, which aligns the + // transfer at the media block boundaries; and the third is executeRequest, + // which implements the actual transfer from the block storage device. + // + // The current implementation of deblockRequest is asynchronous. + // + + IODeblocker * deblocker; + + // If the request is aligned with the media's block boundaries, we + // do short-circuit the deblocker and call executeRequest directly. + + if ( (byteStart % context->block.size) == 0 && + (buffer->getLength() % context->block.size) == 0 ) + { + executeRequest(byteStart, buffer, completion, context); + return; + } + + // Build a deblocker object. + + deblocker = IODeblocker::withBlockSize( + /* blockSize */ context->block.size, + /* withRequestStart */ byteStart, + /* withRequestBuffer */ buffer, + /* withRequestCompletion */ completion, + /* withRequestContext */ context ); + + if ( deblocker == 0 ) + { + complete(completion, kIOReturnNoMemory); + return; + } + + // This implementation of the deblocker permits only one read-modify-write + // at any given time. Note that other write requests can, and do, proceed + // simultaneously so long as they do not require the deblocker -- refer to + // the read() and the write() routines for the short-cut logic. + // + // Note that the original buffer during a read-modify-write operation must + // be prepared on the client's thread, that is, right now, or else it will + // happen on the controller's thread after the read stage(s) complete, and + // this is bad (causes deadlock if that controller was the swap device). + + if ( buffer->getDirection() == kIODirectionOut ) + { + if ( buffer->prepare() != kIOReturnSuccess ) + { + deblocker->release(); + complete(completion, kIOReturnNoMemory); + return; + } + + IOLockLock(_deblockRequestWriteLock); + } + + // Execute the transfer (for the next stage). + + deblockRequestCompletion(this, deblocker, kIOReturnSuccess, 0); + + return; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOBlockStorageDriver::deblockRequestCompletion( void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount ) +{ + // + // This is the completion routine for the aligned deblocker subrequests. + // It verifies the success of the just-completed stage, transitions to + // the next stage, then builds and issues a transfer for the next stage. + // + + UInt64 byteStart; + IOStorageCompletion completion; + Context * context; + IODeblocker * deblocker = (IODeblocker *) parameter; + IOBlockStorageDriver * driver = (IOBlockStorageDriver *) target; + + // Determine whether an error occurred or whether there are no more stages. + + if ( actualByteCount < deblocker->getLength() || + status != kIOReturnSuccess || + deblocker->getNextStage(&byteStart) == false ) + { + // Unlock the write-lock in order to allow the next write to proceed. + + if ( deblocker->getRequestBuffer()->getDirection() == kIODirectionOut ) + { + IOLockUnlock(driver->_deblockRequestWriteLock); + + deblocker->getRequestBuffer()->complete(); + } + + // Obtain the completion information for the original request, taking + // into account the status and actual byte count of the current stage. + + deblocker->getRequestCompletion(&completion, &status, &actualByteCount); + + // Complete the original request. + + IOStorage::complete(completion, status, actualByteCount); + + // Release our resources. + + deblocker->release(); + + return; + } + + // Execute the transfer (for the next stage). + + completion.target = driver; + completion.action = deblockRequestCompletion; + completion.parameter = deblocker; + + context = (Context *) deblocker->getRequestContext(); + + driver->executeRequest(byteStart, deblocker, completion, context); + + return; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 15); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 16); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 17); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 18); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 19); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 20); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 21); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 22); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 23); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 24); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 25); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 26); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 27); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 28); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 29); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 30); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOBlockStorageDriver, 31); diff --git a/iokit/Families/IOStorage/IOFDiskPartitionScheme.cpp b/iokit/Families/IOStorage/IOFDiskPartitionScheme.cpp new file mode 100644 index 000000000..3121d021e --- /dev/null +++ b/iokit/Families/IOStorage/IOFDiskPartitionScheme.cpp @@ -0,0 +1,566 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +#define super IOPartitionScheme +OSDefineMetaClassAndStructors(IOFDiskPartitionScheme, IOPartitionScheme); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Notes +// +// o the on-disk structure's fields are: 16-bit packed, little-endian formatted +// o the relsect and numsect block values assume the drive's natural block size +// o the relsect block value is: +// o for data partitions: +// o relative to the FDisk map that defines the partition +// o for extended partitions defined in the root-level FDisk map: +// o relative to the FDisk map that defines the partition (start of disk) +// o for extended partitions defined in a second-level or deeper FDisk map: +// o relative to the second-level FDisk map, regardless of depth +// o the valid extended partition types are: 0x05, 0x0F, 0x85 +// o there should be no more than one extended partition defined per FDisk map +// + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define kIOFDiskPartitionSchemeContentTable "Content Table" + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOFDiskPartitionScheme::init(OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + // State our assumptions. + + assert(sizeof(fdisk_part) == 16); // (compiler/platform check) + assert(sizeof(disk_blk0) == 512); // (compiler/platform check) + + // Ask our superclass' opinion. + + if ( super::init(properties) == false ) return false; + + // Initialize our state. + + _partitions = 0; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOFDiskPartitionScheme::free() +{ + // + // Free all of this object's outstanding resources. + // + + if ( _partitions ) _partitions->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOService * IOFDiskPartitionScheme::probe(IOService * provider, SInt32 * score) +{ + // + // Determine whether the provider media contains an FDisk partition map. + // + + // State our assumptions. + + assert(OSDynamicCast(IOMedia, provider)); + + // Ask our superclass' opinion. + + if ( super::probe(provider, score) == 0 ) return 0; + + // Scan the provider media for an FDisk partition map. + + _partitions = scan(score); + + // There might be an FDisk partition scheme on disk with boot code, but with + // no partitions defined. We don't consider this a match and return failure + // from probe. + + if ( _partitions && _partitions->getCount() == 0 ) + { + _partitions->release(); + _partitions = 0; + } + + return ( _partitions ) ? this : 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOFDiskPartitionScheme::start(IOService * provider) +{ + // + // Publish the new media objects which represent our partitions. + // + + IOMedia * partition; + OSIterator * partitionIterator; + + // State our assumptions. + + assert(_partitions); + + // Ask our superclass' opinion. + + if ( super::start(provider) == false ) return false; + + // Attach and register the new media objects representing our partitions. + + partitionIterator = OSCollectionIterator::withCollection(_partitions); + if ( partitionIterator == 0 ) return false; + + while ( (partition = (IOMedia *) partitionIterator->getNextObject()) ) + { + if ( partition->attach(this) ) + { + partition->registerService(); + } + } + + partitionIterator->release(); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSSet * IOFDiskPartitionScheme::scan(SInt32 * score) +{ + // + // Scan the provider media for an FDisk partition map. Returns the set + // of media objects representing each of the partitions (the retain for + // the set is passed to the caller), or null should no partition map be + // found. The default probe score can be adjusted up or down, based on + // the confidence of the scan. + // + + IOBufferMemoryDescriptor * buffer = 0; + UInt32 bufferSize = 0; + UInt32 fdiskBlock = 0; + UInt32 fdiskBlockExtn = 0; + UInt32 fdiskBlockNext = 0; + UInt32 fdiskID = 0; + disk_blk0 * fdiskMap = 0; + IOMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + bool mediaIsOpen = false; + OSSet * partitions = 0; + IOReturn status = kIOReturnError; + + // Determine whether this media is formatted. + + if ( media->isFormatted() == false ) goto scanErr; + + // Determine whether this media has an appropriate block size. + + if ( (mediaBlockSize % sizeof(disk_blk0)) ) goto scanErr; + + // Allocate a buffer large enough to hold one map, rounded to a media block. + + bufferSize = IORound(sizeof(disk_blk0), mediaBlockSize); + buffer = IOBufferMemoryDescriptor::withCapacity( + /* capacity */ bufferSize, + /* withDirection */ kIODirectionIn ); + if ( buffer == 0 ) goto scanErr; + + // Allocate a set to hold the set of media objects representing partitions. + + partitions = OSSet::withCapacity(4); + if ( partitions == 0 ) goto scanErr; + + // Open the media with read access. + + mediaIsOpen = media->open(this, 0, kIOStorageAccessReader); + if ( mediaIsOpen == false ) goto scanErr; + + // Scan the media for FDisk partition map(s). + + do + { + // Read the next FDisk map into our buffer. + +///m:2333367:workaround:commented:start +// status = media->read(this, fdiskBlock * mediaBlockSize, buffer); +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = media->IOStorage::read(this, fdiskBlock * mediaBlockSize, buffer); +///m:2333367:workaround:added:stop + if ( status != kIOReturnSuccess ) goto scanErr; + + fdiskMap = (disk_blk0 *) buffer->getBytesNoCopy(); + + // Determine whether the partition map signature is present. + + if ( OSSwapLittleToHostInt16(fdiskMap->signature) != DISK_SIGNATURE ) + { + goto scanErr; + } + + // Scan for valid partition entries in the partition map. + + fdiskBlockNext = 0; + + for ( unsigned index = 0; index < DISK_NPART; index++ ) + { + // Determine whether this is an extended (vs. data) partition. + + if ( isPartitionExtended(fdiskMap->parts + index) ) // (extended) + { + // If peer extended partitions exist, we accept only the first. + + if ( fdiskBlockNext == 0 ) // (no peer extended partition) + { + fdiskBlockNext = fdiskBlockExtn + + OSSwapLittleToHostInt32( + /* data */ fdiskMap->parts[index].relsect ); + + if ( fdiskBlockNext * mediaBlockSize >= media->getSize() ) + { + fdiskBlockNext = 0; // (exceeds confines of media) + } + } + } + else if ( isPartitionUsed(fdiskMap->parts + index) ) // (data) + { + // Prepare this partition's ID. + + fdiskID = ( fdiskBlock == 0 ) ? (index + 1) : (fdiskID + 1); + + // Determine whether the partition is corrupt (fatal). + + if ( isPartitionCorrupt( + /* partition */ fdiskMap->parts + index, + /* partitionID */ fdiskID, + /* fdiskBlock */ fdiskBlock ) ) + { + goto scanErr; + } + + // Determine whether the partition is invalid (skipped). + + if ( isPartitionInvalid( + /* partition */ fdiskMap->parts + index, + /* partitionID */ fdiskID, + /* fdiskBlock */ fdiskBlock ) ) + { + continue; + } + + // Create a media object to represent this partition. + + IOMedia * newMedia = instantiateMediaObject( + /* partition */ fdiskMap->parts + index, + /* partitionID */ fdiskID, + /* fdiskBlock */ fdiskBlock ); + + if ( newMedia ) + { + partitions->setObject(newMedia); + newMedia->release(); + } + } + } + + // Prepare for first extended partition, if any. + + if ( fdiskBlock == 0 ) + { + fdiskID = DISK_NPART; + fdiskBlockExtn = fdiskBlockNext; + } + + } while ( (fdiskBlock = fdiskBlockNext) ); + + // Release our resources. + + media->close(this); + buffer->release(); + + return partitions; + +scanErr: + + // Release our resources. + + if ( mediaIsOpen ) media->close(this); + if ( partitions ) partitions->release(); + if ( buffer ) buffer->release(); + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOFDiskPartitionScheme::isPartitionExtended(fdisk_part * partition) +{ + // + // Ask whether the given partition is extended. + // + + return ( partition->systid == 0x05 || + partition->systid == 0x0F || + partition->systid == 0x85 ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOFDiskPartitionScheme::isPartitionUsed(fdisk_part * partition) +{ + // + // Ask whether the given partition is used. + // + + return ( partition->systid != 0 && partition->numsect != 0 ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOFDiskPartitionScheme::isPartitionCorrupt( + fdisk_part * /* partition */ , + UInt32 /* partitionID */ , + UInt32 /* fdiskBlock */ ) +{ + // + // Ask whether the given partition appears to be corrupt. A partition that + // is corrupt will cause the failure of the FDisk partition map recognition + // altogether. + // + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOFDiskPartitionScheme::isPartitionInvalid( fdisk_part * partition, + UInt32 partitionID, + UInt32 fdiskBlock ) +{ + // + // Ask whether the given partition appears to be invalid. A partition that + // is invalid will cause it to be skipped in the scan, but will not cause a + // failure of the FDisk partition map recognition. + // + + IOMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + UInt64 partitionBase = 0; + UInt64 partitionSize = 0; + + // Compute the relative byte position and size of the new partition. + + partitionBase = OSSwapLittleToHostInt32(partition->relsect) + fdiskBlock; + partitionSize = OSSwapLittleToHostInt32(partition->numsect); + partitionBase *= mediaBlockSize; + partitionSize *= mediaBlockSize; + + // Determine whether the partition starts at (or past) the end-of-media. + + if ( partitionBase >= media->getSize() ) return true; + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOFDiskPartitionScheme::instantiateMediaObject( + fdisk_part * partition, + UInt32 partitionID, + UInt32 fdiskBlock ) +{ + // + // Instantiate a new media object to represent the given partition. + // + + IOMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + UInt64 partitionBase = 0; + char * partitionHint = 0; + UInt64 partitionSize = 0; + + // Compute the relative byte position and size of the new partition. + + partitionBase = OSSwapLittleToHostInt32(partition->relsect) + fdiskBlock; + partitionSize = OSSwapLittleToHostInt32(partition->numsect); + partitionBase *= mediaBlockSize; + partitionSize *= mediaBlockSize; + + // Clip the size of the new partition if it extends past the end-of-media. + + if ( partitionBase + partitionSize > media->getSize() ) + { + partitionSize = media->getSize() - partitionBase; + } + + // Look up a type for the new partition. + + OSDictionary * hintTable = OSDynamicCast( + /* type */ OSDictionary, + /* instance */ getProperty(kIOFDiskPartitionSchemeContentTable) ); + + if ( hintTable ) + { + char hintIndex[5]; + OSString * hintValue; + + sprintf(hintIndex, "0x%02X", partition->systid & 0xFF); + + hintValue = OSDynamicCast(OSString, hintTable->getObject(hintIndex)); + + if ( hintValue ) partitionHint = (char *) hintValue->getCStringNoCopy(); + } + + // Create the new media object. + + IOMedia * newMedia = instantiateDesiredMediaObject( + /* partition */ partition, + /* partitionID */ partitionID, + /* fdiskBlock */ fdiskBlock ); + + if ( newMedia ) + { + if ( newMedia->init( + /* base */ partitionBase, + /* size */ partitionSize, + /* preferredBlockSize */ mediaBlockSize, + /* isEjectable */ media->isEjectable(), + /* isWhole */ false, + /* isWritable */ media->isWritable(), + /* contentHint */ partitionHint ) ) + { + // Set a name for this partition. + + char name[24]; + sprintf(name, "Untitled %ld", partitionID); + newMedia->setName(name); + + // Set a location value (the partition number) for this partition. + + char location[12]; + sprintf(location, "%ld", partitionID); + newMedia->setLocation(location); + + // Set the "Partition ID" key for this partition. + + newMedia->setProperty(kIOMediaPartitionIDKey, partitionID, 32); + } + else + { + newMedia->release(); + newMedia = 0; + } + } + + return newMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOFDiskPartitionScheme::instantiateDesiredMediaObject( + fdisk_part * partition, + UInt32 partitionID, + UInt32 fdiskBlock ) +{ + // + // Allocate a new media object (called from instantiateMediaObject). + // + + return new IOMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOFDiskPartitionScheme, 15); diff --git a/iokit/Families/IOStorage/IOMedia.cpp b/iokit/Families/IOStorage/IOMedia.cpp new file mode 100644 index 000000000..9d97057c7 --- /dev/null +++ b/iokit/Families/IOStorage/IOMedia.cpp @@ -0,0 +1,792 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include // (ULONG_MAX, ...) +#include // (gIODTPlane, ...) +#include + +#define super IOStorage +OSDefineMetaClassAndStructors(IOMedia, IOStorage) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOStorage * IOMedia::getProvider() const +{ + // + // Obtain this object's provider. We override the superclass's method to + // return a more specific subclass of OSObject -- IOStorage. This method + // serves simply as a convenience to subclass developers. + // + + return (IOStorage *) IOService::getProvider(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::init(UInt64 base, + UInt64 size, + UInt64 preferredBlockSize, + bool isEjectable, + bool isWhole, + bool isWritable, + const char * contentHint = 0, + OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + if (super::init(properties) == false) return false; + + _mediaBase = base; + _mediaSize = size; + _isEjectable = isEjectable; + _isWhole = isWhole; + _isWritable = isWritable; + _openLevel = kIOStorageAccessNone; + _openReaders = OSSet::withCapacity(1); + _openReaderWriter = 0; + _preferredBlockSize = preferredBlockSize; + + if (_openReaders == 0) return false; + + // + // Create the standard media registry properties. + // + + setProperty(kIOMediaContentKey, contentHint ? contentHint : ""); + setProperty(kIOMediaContentHintKey, contentHint ? contentHint : ""); + setProperty(kIOMediaEjectableKey, isEjectable); + setProperty(kIOMediaLeafKey, true); + setProperty(kIOMediaPreferredBlockSizeKey, preferredBlockSize, 64); + setProperty(kIOMediaSizeKey, size, 64); + setProperty(kIOMediaWholeKey, isWhole); + setProperty(kIOMediaWritableKey, isWritable); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMedia::free(void) +{ + // + // Free all of this object's outstanding resources. + // + + if (_openReaders) _openReaders->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::attachToChild(IORegistryEntry * client, + const IORegistryPlane * plane) +{ + // + // This method is called for each client interested in the services we + // provide. The superclass links us as a parent to this client in the + // I/O Kit registry on success. + // + + OSString * s; + + // Ask our superclass' opinion. + + if (super::attachToChild(client, plane) == false) return false; + + // + // Determine whether the client is a storage object, which we consider + // to be a consumer of this storage object's content and a producer of + // new content. A storage object need not be an IOStorage subclass, so + // long as it identifies itself with a match category of "IOStorage". + // + // If the client is indeed a storage object, we reset the media's Leaf + // property to false and replace the media's Content property with the + // client's Content Mask property, if any. + // + + s = OSDynamicCast(OSString, client->getProperty(gIOMatchCategoryKey)); + + if (s && !strcmp(s->getCStringNoCopy(), kIOStorageCategory)) + { + setProperty(kIOMediaLeafKey, false); + + s = OSDynamicCast(OSString,client->getProperty(kIOMediaContentMaskKey)); + if (s) setProperty(kIOMediaContentKey, s->getCStringNoCopy()); + } + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMedia::detachFromChild(IORegistryEntry * client, + const IORegistryPlane * plane) +{ + // + // This method is called for each client that loses interest in the + // services we provide. The superclass unlinks us from this client + // in the I/O Kit registry on success. + // + // Note that this method is called at a nondeterministic time after + // our client is terminated, which means another client may already + // have arrived and attached in the meantime. This is not an issue + // should the termination be issued synchrnously, however, which we + // take advantage of when this media needs to eliminate one of its + // clients. If the termination was issued on this media or farther + // below in the hierarchy, we don't really care that the properties + // would not be consistent since this media object is going to die + // anyway. + // + + OSString * s; + + // + // Determine whether the client is a storage object, which we consider + // to be a consumer of this storage object's content and a producer of + // new content. A storage object need not be an IOStorage subclass, so + // long as it identifies itself with a match category of "IOStorage". + // + // If the client is indeed a storage object, we reset the media's Leaf + // property to true and reset the media's Content property to the hint + // we obtained when this media was initialized. + // + + s = OSDynamicCast(OSString, client->getProperty(gIOMatchCategoryKey)); + + if (s && !strcmp(s->getCStringNoCopy(), kIOStorageCategory)) + { + setProperty(kIOMediaContentKey, getContentHint()); + setProperty(kIOMediaLeafKey, true); + } + + // Pass the call onto our superclass. + + super::detachFromChild(client, plane); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::handleOpen(IOService * client, + IOOptionBits options, + void * argument) +{ + // + // The handleOpen method grants or denies permission to access this object + // to an interested client. The argument is an IOStorageAccess value that + // specifies the level of access desired -- reader or reader-writer. + // + // This method can be invoked to upgrade or downgrade the access level for + // an existing client as well. The previous access level will prevail for + // upgrades that fail, of course. A downgrade should never fail. If the + // new access level should be the same as the old for a given client, this + // method will do nothing and return success. In all cases, one, singular + // close-per-client is expected for all opens-per-client received. + // + // This method will work even when the media is in the terminated state. + // + // We are guaranteed that no other opens or closes will be processed until + // we make our decision, change our state, and return from this method. + // + + IOStorageAccess access = (IOStorageAccess) argument; + IOStorageAccess level; + + assert(client); + + // + // Chart our course of action. + // + + switch (access) + { + case kIOStorageAccessReader: + { + if (_openReaders->containsObject(client)) // (access: no change) + return true; + else if (_openReaderWriter == client) // (access: downgrade) + level = kIOStorageAccessReader; + else // (access: new reader) + level = _openReaderWriter ? kIOStorageAccessReaderWriter + : kIOStorageAccessReader; + break; + } + case kIOStorageAccessReaderWriter: + { + if (_openReaders->containsObject(client)) // (access: upgrade) + level = kIOStorageAccessReaderWriter; + else if (_openReaderWriter == client) // (access: no change) + return true; + else // (access: new writer) + level = kIOStorageAccessReaderWriter; + + if (_isWritable == false) // (is this media object writable?) + return false; + + if (_openReaderWriter) // (does a reader-writer already exist?) + return false; + + break; + } + default: + { + assert(0); + return false; + } + } + + // + // If we are in the terminated state, we only accept downgrades. + // + + if (isInactive() && _openReaderWriter != client) // (dead? not a downgrade?) + return false; + + // + // Determine whether the storage objects above us can be torn down, should + // this be a new reader-writer open or an upgrade into a reader-writer (if + // the client issuing the open is not a storage object itself, of course). + // + + if (access == kIOStorageAccessReaderWriter) // (new reader-writer/upgrade?) + { + const OSSymbol * category = OSSymbol::withCString(kIOStorageCategory); + + if (category) + { + IOService * storageObject = getClientWithCategory(category); + category->release(); + + if (storageObject && storageObject != client) + { + if (storageObject->terminate(kIOServiceSynchronous) == false) + return false; + } + } + } + + // + // Determine whether the storage objects below us accept this open at this + // multiplexed level of access -- new opens, upgrades, and downgrades (and + // no changes in access) all enter through the same open api. + // + + if (_openLevel != level) // (has open level changed?) + { + IOStorage * provider = OSDynamicCast(IOStorage, getProvider()); + + if (provider && provider->open(this, options, level) == false) + { + // + // We were unable to open the storage objects below us. We must + // recover from the terminate we issued above before bailing out, + // if applicable, by re-registering the media object for matching. + // + + if (access == kIOStorageAccessReaderWriter) + registerService(kIOServiceSynchronous); // (re-register media) + + return false; + } + } + + // + // Process the open. + // + // We make sure our open state is consistent before calling registerService + // (if applicable) since this method can be called again on the same thread + // (the lock protecting handleOpen is recursive, so access would be given). + // + + _openLevel = level; + + if (access == kIOStorageAccessReader) + { + _openReaders->setObject(client); + + if (_openReaderWriter == client) // (for a downgrade) + { + _openReaderWriter = 0; + registerService(kIOServiceSynchronous); // (re-register media) + } + } + else // (access == kIOStorageAccessReaderWriter) + { + _openReaderWriter = client; + + _openReaders->removeObject(client); // (for an upgrade) + } + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::handleIsOpen(const IOService * client) const +{ + // + // The handleIsOpen method determines whether the specified client, or any + // client if none is specificed, presently has an open on this object. + // + // This method will work even when the media is in the terminated state. + // + // We are guaranteed that no other opens or closes will be processed until + // we return from this method. + // + + if (client == 0) return (_openLevel != kIOStorageAccessNone); + + return ( _openReaderWriter == client || + _openReaders->containsObject(client) ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMedia::handleClose(IOService * client, IOOptionBits options) +{ + // + // A client is informing us that it is giving up access to our contents. + // + // This method will work even when the media is in the terminated state. + // + // We are guaranteed that no other opens or closes will be processed until + // we change our state and return from this method. + // + + assert(client); + + // + // Process the close. + // + + bool reregister = (_openReaderWriter == client) && (isInactive() == false); + + if (_openReaderWriter == client) // (is the client a reader-writer?) + { + _openReaderWriter = 0; + } + else if (_openReaders->containsObject(client)) // (is the client a reader?) + { + _openReaders->removeObject(client); + } + else // (is the client is an imposter?) + { + assert(0); + return; + } + + // + // Reevaluate the open we have on the level below us. If no opens remain, + // we close, or if no reader-writer remains, but readers do, we downgrade. + // + + IOStorageAccess level; + + if (_openReaderWriter) level = kIOStorageAccessReaderWriter; + else if (_openReaders->getCount()) level = kIOStorageAccessReader; + else level = kIOStorageAccessNone; + + if (_openLevel != level) // (has open level changed?) + { + IOStorage * provider = OSDynamicCast(IOStorage, getProvider()); + + assert(level != kIOStorageAccessReaderWriter); + + if (provider) + { + if (level == kIOStorageAccessNone) // (is a close in order?) + { + provider->close(this, options); + } + else // (is a downgrade in order?) + { + bool success; + success = provider->open(this, 0, level); + assert(success); // (should never fail, unless avoided deadlock) + } + } + + _openLevel = level; // (set new open level) + } + + // + // If the reader-writer just closeed, re-register the media so that I/O Kit + // will attempt to match storage objects that may now be interested in this + // media. + // + // We make sure our open state is consistent before calling registerService + // (if applicable) since this method can be called again on the same thread + // (the lock protecting handleClose is recursive, so access would be given). + // + + if (reregister) + registerService(kIOServiceSynchronous); // (re-register media) +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMedia::read(IOService * /* client */, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // Read data from the storage object at the specified byte offset into the + // specified buffer, asynchronously. When the read completes, the caller + // will be notified via the specified completion action. + // + // The buffer will be retained for the duration of the read. + // + // This method will work even when the media is in the terminated state. + // + + if (isInactive()) + { + complete(completion, kIOReturnNoMedia); + return; + } + + if (_openLevel == kIOStorageAccessNone) // (instantaneous value, no lock) + { + complete(completion, kIOReturnNotOpen); + return; + } + + if (_mediaSize == 0 || _preferredBlockSize == 0) + { + complete(completion, kIOReturnUnformattedMedia); + return; + } + + if (_mediaSize < byteStart + buffer->getLength()) + { + complete(completion, kIOReturnBadArgument); + return; + } + + byteStart += _mediaBase; + getProvider()->read(this, byteStart, buffer, completion); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMedia::write(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // Write data into the storage object at the specified byte offset from the + // specified buffer, asynchronously. When the write completes, the caller + // will be notified via the specified completion action. + // + // The buffer will be retained for the duration of the write. + // + // This method will work even when the media is in the terminated state. + // + + if (isInactive()) + { + complete(completion, kIOReturnNoMedia); + return; + } + + if (_openLevel == kIOStorageAccessNone) // (instantaneous value, no lock) + { + complete(completion, kIOReturnNotOpen); + return; + } + + if (_openReaderWriter != client) // (instantaneous value, no lock) + { +///m:2425148:workaround:commented:start +// complete(completion, kIOReturnNotPrivileged); +// return; +///m:2425148:workaround:commented:stop + } + + if (_isWritable == 0) + { + complete(completion, kIOReturnLockedWrite); + return; + } + + if (_mediaSize == 0 || _preferredBlockSize == 0) + { + complete(completion, kIOReturnUnformattedMedia); + return; + } + + if (_mediaSize < byteStart + buffer->getLength()) + { + complete(completion, kIOReturnBadArgument); + return; + } + + byteStart += _mediaBase; + getProvider()->write(this, byteStart, buffer, completion); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOMedia::synchronizeCache(IOService * client) +{ + if (isInactive()) + { + return kIOReturnNoMedia; + } + + if (_openLevel == kIOStorageAccessNone) // (instantaneous value, no lock) + { + return kIOReturnNotOpen; + } + + if (_openReaderWriter != client) // (instantaneous value, no lock) + { + return kIOReturnNotPrivileged; + } + + if (_isWritable == 0) + { + return kIOReturnLockedWrite; + } + + if (_mediaSize == 0 || _preferredBlockSize == 0) + { + return kIOReturnUnformattedMedia; + } + + return getProvider()->synchronizeCache(this); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt64 IOMedia::getPreferredBlockSize() const +{ + // + // Ask the media object for its natural block size. This information + // is useful to clients that want to optimize access to the media. + // + + return _preferredBlockSize; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt64 IOMedia::getSize() const +{ + // + // Ask the media object for its total length in bytes. + // + + return _mediaSize; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt64 IOMedia::getBase() const +{ + // + // Ask the media object for its byte offset relative to its provider media + // object below it in the storage hierarchy. + // + + return _mediaBase; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::isEjectable() const +{ + // + // Ask the media object whether it is ejectable. + // + + return _isEjectable; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::isFormatted() const +{ + // + // Ask the media object whether it is formatted. + // + + return (_mediaSize && _preferredBlockSize); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::isWritable() const +{ + // + // Ask the media object whether it is writable. + // + + return _isWritable; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::isWhole() const +{ + // + // Ask the media object whether it represents the whole disk. + // + + return _isWhole; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +const char * IOMedia::getContent() const +{ + // + // Ask the media object for a description of its contents. The description + // is the same as the hint at the time of the object's creation, but it is + // possible that the description be overrided by a client (which has probed + // the media and identified the content correctly) of the media object. It + // is more accurate than the hint for this reason. The string is formed in + // the likeness of Apple's "Apple_HFS" strings. + // + // The content description can be overrided by any client that matches onto + // this media object with a match category of kIOStorageCategory. The media + // object checks for a kIOMediaContentMaskKey property in the client, and if + // it finds one, it copies it into kIOMediaContentKey property. + // + + OSString * string; + + string = OSDynamicCast(OSString, getProperty(kIOMediaContentKey)); + if (string == 0) return ""; + return string->getCStringNoCopy(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +const char * IOMedia::getContentHint() const +{ + // + // Ask the media object for a hint of its contents. The hint is set at the + // time of the object's creation, should the creator have a clue as to what + // it may contain. The hint string does not change for the lifetime of the + // object and is also formed in the likeness of Apple's "Apple_HFS" strings. + // + + OSString * string; + + string = OSDynamicCast(OSString, getProperty(kIOMediaContentHintKey)); + if (string == 0) return ""; + return string->getCStringNoCopy(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMedia::matchPropertyTable(OSDictionary * table, SInt32 * score) +{ + // + // Compare the properties in the supplied table to this object's properties. + // + + // Ask our superclass' opinion. + + if (super::matchPropertyTable(table, score) == false) return false; + + // We return success if the following expression is true -- individual + // comparisions evaluate to truth if the named property is not present + // in the supplied table. + + return compareProperty(table, kIOMediaContentKey) && + compareProperty(table, kIOMediaContentHintKey) && + compareProperty(table, kIOMediaEjectableKey) && + compareProperty(table, kIOMediaLeafKey) && + compareProperty(table, kIOMediaSizeKey) && + compareProperty(table, kIOMediaWholeKey) && + compareProperty(table, kIOMediaWritableKey) ; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMedia, 15); diff --git a/iokit/Families/IOStorage/IOMediaBSDClient.cpp b/iokit/Families/IOStorage/IOMediaBSDClient.cpp new file mode 100644 index 000000000..13b3eb6f1 --- /dev/null +++ b/iokit/Families/IOStorage/IOMediaBSDClient.cpp @@ -0,0 +1,2125 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include // (DKIOCGETBLOCKSIZE, ...) +#include // (mach/vm_region.h, ...) +#include // (VM_REGION_BASIC_INFO, ...) +#include // (devfs_make_node, ...) +#include // (struct buf, ...) +#include // (bdevsw_add, ...) +#include // (FWRITE, ...) +#include // (IOCGROUP, ...) +#include // (S_ISBLK, ...) +#include // (struct uio, ...) +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define super IOService +OSDefineMetaClassAndStructors(IOMediaBSDClient, IOService) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +static IOMediaBSDClient * gIOMediaBSDClient = 0; + +const signed kMajor = 14; // (bsd interface [b|c]devsw major) +const unsigned kMinorsGrowCount = 16; // (entries to add on table growth) +const unsigned kMinorsMaxCount = 1 << 24; // (maximum entries; 24-bit minor) +const unsigned kAnchorsGrowCount = 2; // (entries to add on table growth) +const unsigned kAnchorsMaxCount = kMinorsMaxCount; // (maximum entries) + +#define kMsgBadWhole "%s: Peer whole media \"%s\" is not allowed.", getName() +#define kMsgNoWhole "%s: No whole media found for media \"%s\".\n", getName() +#define kMsgNoLocation "%s: No location is found for media \"%s\".\n", getName() + +#define IOMEDIABSDCLIENT_IOSTAT_SUPPORT // (enable iostat support for bsd) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +extern "C" +{ + int dkclose(dev_t dev, int flags, int devtype, struct proc *); + int dkioctl(dev_t dev, u_long cmd, caddr_t data, int, struct proc *); + int dkioctl_bdev(dev_t dev, u_long cmd, caddr_t data, int, struct proc *); + int dkopen(dev_t dev, int flags, int devtype, struct proc *); + int dkread(dev_t dev, struct uio * uio, int flags); + int dksize(dev_t dev); + void dkstrategy(struct buf * bp); + int dkwrite(dev_t dev, struct uio * uio, int flags); +} // extern "C" + +static struct bdevsw bdevswFunctions = +{ + /* d_open */ dkopen, + /* d_close */ dkclose, + /* d_strategy */ dkstrategy, + /* d_ioctl */ dkioctl_bdev, + /* d_dump */ eno_dump, + /* d_psize */ dksize, + /* d_type */ D_DISK +}; + +struct cdevsw cdevswFunctions = +{ + /* d_open */ dkopen, + /* d_close */ dkclose, + /* d_read */ dkread, + /* d_write */ dkwrite, + /* d_ioctl */ dkioctl, + /* d_stop */ eno_stop, + /* d_reset */ eno_reset, + /* d_ttys */ 0, + /* d_select */ eno_select, + /* d_mmap */ eno_mmap, + /* d_strategy */ eno_strat, + /* d_getc */ eno_getc, + /* d_putc */ eno_putc, + /* d_type */ D_TAPE +}; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +struct dio { dev_t dev; struct uio * uio; }; + +typedef void * dkr_t; /* dkreadwrite request */ +typedef enum { DKRTYPE_BUF, DKRTYPE_DIO } dkrtype_t; + +int dkreadwrite(dkr_t dkr, dkrtype_t dkrtype); +void dkreadwritecompletion(void *, void *, IOReturn, UInt64); + +#define get_kernel_task() kernel_task +#define get_user_task() current_task() + +#ifdef IOMEDIABSDCLIENT_IOSTAT_SUPPORT +#include +IOBlockStorageDriver * dk_drive[DK_NDRIVE]; +#endif IOMEDIABSDCLIENT_IOSTAT_SUPPORT + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +const UInt32 kInvalidAnchorID = (UInt32) (-1); + +struct AnchorSlot +{ + UInt32 isAssigned:1, // (anchor slot is occupied) + isObsolete:1; // (anchor slot is to be removed once refs gone) + + IOService * anchor; // (anchor object) + IONotifier * notifier; // (anchor termination notification, post-stop) +}; + +class AnchorTable +{ +protected: + AnchorSlot * _table; + UInt32 _tableCount; + UInt32 _tableGrowCount; + UInt32 _tableMaxCount; + + static IOReturn anchorWasNotified( void * target, + void * parameter, + UInt32 messageType, + IOService * provider, + void * messageArgument, + vm_size_t messageArgumentSize ); + +public: + AnchorTable(UInt32 growCount, UInt32 maxCount); + ~AnchorTable(); + + UInt32 insert(IOService * anchor); + UInt32 locate(IOService * anchor); + void obsolete(UInt32 anchorID); + void remove(UInt32 anchorID); + + bool isObsolete(UInt32 anchorID); +}; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +const UInt32 kInvalidMinorID = (UInt32) (-1); + +struct MinorSlot +{ + UInt32 isAssigned:1, // (minor slot is occupied) + isEjecting:1, // (minor slot is in eject flux, needs close) + isObsolete:1; // (minor slot is in eject flux, needs removal) + + UInt32 anchorID; // (minor's associated anchor ID) + IOMedia * media; // (minor's media object) + char * name; // (minor's name, private allocation space) + + UInt64 bdevBlockSize; // (block device's preferred block size) + void * bdevNode; // (block device's devfs node) + UInt32 bdevOpen:1, // (block device's open flag) + bdevWriter:1; // (block device's open writer flag) + + void * cdevNode; // (character device's devfs node) + UInt32 cdevOpen:1, // (character device's open flag) + cdevWriter:1; // (character device's open writer flag) +}; + +class MinorTable +{ +protected: + MinorSlot * _table; + UInt32 _tableCount; + UInt32 _tableGrowCount; + UInt32 _tableMaxCount; + +public: + MinorTable(UInt32 growCount, UInt32 maxCount); + ~MinorTable(); + + UInt32 insert(IOMedia * media, UInt32 anchorID, char * slicePath); + UInt32 locate(IOMedia * media); + void obsolete(UInt32 minorID); + void remove(UInt32 minorID); + + bool isObsolete(UInt32 minorID); + + MinorSlot * getMinor(UInt32 minorID); + + UInt32 getOpenCountForAnchorID(UInt32 anchorID); + IOMedia * getWholeMediaAtAnchorID(UInt32 anchorID); + bool hasReferencesToAnchorID(UInt32 anchorID); +}; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMediaBSDClient::init(OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + if ( super::init(properties) == false ) return false; + + _anchors = new AnchorTable(kAnchorsGrowCount, kAnchorsMaxCount); + _bdevswInstalled = false; + _cdevswInstalled = false; + _minors = new MinorTable(kMinorsGrowCount, kMinorsMaxCount); + _notifier = 0; + + if ( _anchors == 0 || _minors == 0 ) return false; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMediaBSDClient::free() +{ + // + // Free all of this object's outstanding resources. + // + + if ( _notifier ) _notifier->remove(); + if ( _cdevswInstalled ) cdevsw_remove(kMajor, &cdevswFunctions); + if ( _bdevswInstalled ) bdevsw_remove(kMajor, &bdevswFunctions); + if ( _minors ) delete _minors; + if ( _anchors ) delete _anchors; + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMediaBSDClient::start(IOService * provider) +{ + // + // This method is called once we have been attached to the provider object. + // + + assert(gIOMediaBSDClient == 0); + + // Ask our superclass' opinion. + + if ( super::start(provider) == false ) return false; + + // Establish a global reference to this instance. + + gIOMediaBSDClient = this; + + // Install bdevsw and cdevsw functions. + + _bdevswInstalled = (bdevsw_add(kMajor, &bdevswFunctions) == kMajor); + _cdevswInstalled = (cdevsw_add(kMajor, &cdevswFunctions) == kMajor); + + if ( _bdevswInstalled == false && _cdevswInstalled == false ) return false; + + // Create a notification handler for media arrival. We ask for a priority + // of ten to ensure that we are notified ahead of other interested clients + // (with a default priority of zero), so that we can place the BSD-related + // properties on the media object that they might need in time. + + _notifier = addNotification( /* type */ gIOFirstPublishNotification, + /* description */ serviceMatching("IOMedia"), + /* action */ mediaHasArrived, + /* target */ this, + /* parameter */ 0, + /* priority */ 10 ); + + if ( _notifier == 0 ) return false; + + // Register this object so it can be found via notification requests. It is + // not being registered to have I/O Kit attempt to have drivers match on it, + // which is the reason most other services are registered -- that's not the + // intention of this registerService call. + + registerService(); + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMediaBSDClient::stop(IOService * provider) +{ + // + // This method is called before we are detached from the provider object. + // + + IOMedia * media = (IOMedia *) provider; + UInt32 minorID = 0; + + // Disable access to tables, matching, opens, closes, and terminations. + + gIOMediaBSDClient->lockForArbitration(); + + // Find the minor assigned to this media. + + minorID = _minors->locate(media); + assert(minorID != kInvalidMinorID); + + // State our assumptions. + + assert(media->isOpen() == false); + + // Remove the minor from the minor table, unless it's still in flux (which + // means an open on the bdevsw/cdevsw switch is still outstanding: the one + // that sent the eject ioctl), in which case we mark the minor as obsolete + // for later removal. + + if ( _minors->getMinor(minorID)->isEjecting ) // (is minor in flux?) + { + assert(_minors->isObsolete(minorID) == false); + + _minors->obsolete(minorID); + } + else + { + assert(_minors->getMinor(minorID)->bdevOpen == false); + assert(_minors->getMinor(minorID)->cdevOpen == false); + + _minors->remove(minorID); + } + + // Enable access to tables, matching, opens, closes, and terminations. + + gIOMediaBSDClient->unlockForArbitration(); + + // Call upon the superclass to finish its work. + + super::stop(media); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMediaBSDClient::mediaHasArrived( void * /* target */, + void * /* parameter */, + IOService * service ) +{ + // + // Notification handler for media arrivals. + // + + IOMedia * media = OSDynamicCast(IOMedia, service); + bool success = false; + + assert(gIOMediaBSDClient); + + // Attach the media-bsd-client object as a client of the new media object. + + if ( media && gIOMediaBSDClient->attach(media) ) + { + // Disable access to tables, matching, opens, closes, and terminations. + + gIOMediaBSDClient->lockForArbitration(); + + // Create bdevsw and cdevsw nodes for the new media object. + + success = gIOMediaBSDClient->createNodes(media); + + // Enable access to tables, matching, opens, closes, and terminations. + + gIOMediaBSDClient->unlockForArbitration(); + + // Detach the media-bsd-client object from the media object on error. + + if (success == false) gIOMediaBSDClient->detach(media); + } + + return true; // (meaningless return value) +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOMediaBSDClient::getWholeMedia( IOMedia * media, + UInt32 * slicePathSize = 0, + char * slicePath = 0 ) +{ + // + // Find the whole media that roots this media tree. A null return value + // indicates no whole media was found or a malformed tree was detected. + // + // If slicePathSize is non-zero, the size required to fit the slice path + // (including the zero terminator) is passed back as a result. + // + // If slicePathSize and slicePath are both non-zero, the slice path will + // be written into the slicePath buffer. The value slicePathSize points + // to must be the size of the slicePath buffer, which is used for sanity + // checking in this method. + // + + UInt32 depth = 1; + UInt32 position = sizeof('\0'); + IOService * service = 0; + + assert(slicePath == 0 || slicePathSize != 0); + + // Search the registry for the parent whole media for this media. + + for ( service = media; service; service = service->getProvider() ) + { + if ( OSDynamicCast(IOMedia, service) ) // (is it a media?) + { + if ( ((IOMedia *)service)->isWhole() ) // (is it a whole media?) + { + if ( slicePath ) // (are we building the slice path?) + { + slicePath[*slicePathSize - 1] = 0; // (zero terminate path) + + if ( position < *slicePathSize ) // (need to move path?) + { + memmove( slicePath, // (move path to start of buffer) + slicePath + (*slicePathSize - position), + position ); + } + } + else if ( slicePathSize ) // (report size req'd for slice path?) + { + *slicePathSize = position; + } + + return (IOMedia *)service; // (return the whole media) + } + + // Determine whether this non-whole media has a location value. It + // must, by definition of a non-whole media, but if it does not, we + // should return an error condition. + + const char * location = service->getLocation(); + + if ( location == 0 ) // (no location on non-whole media?) + { + if ( service == media ) IOLog(kMsgNoLocation, media->getName()); + return 0; + } + + // Otherwise, it's a valid non-whole media: we compute the required + // size for the slice path or build the slice path, if so requested. + // Note that the slice path is built backwards from the ends of the + // supplied buffer to the beginning of the buffer. + + position += sizeof('s') + strlen(location); + + if ( slicePath ) // (build the slice path?) + { + char * path = slicePath + *slicePathSize - position; + + if ( position > *slicePathSize ) { assert(0); return 0; } + + *path = 's'; + strncpy(path + sizeof('s'), location, strlen(location)); + } + + depth += 1; + } + } + + // If we've fallen through, then the whole media was never found. + + if ( depth == 1 ) IOLog(kMsgNoWhole, media->getName()); + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMediaBSDClient::createNodes(IOMedia * media) +{ + // + // Create bdevsw and cdevsw nodes for the given media object. + // + // This method assumes that the arbitration lock is held. + // + + IOService * anchor; + UInt32 anchorID; + bool anchorNew = false; + UInt32 minorID; + char * slicePath = 0; + UInt32 slicePathSize; + IOMedia * whole; + + // + // Find the anchor that roots this media tree. The anchor is defined as the + // parent of the whole media that roots this media tree. It is an important + // object to us because this object stays in place when media is ejected, so + // we can continue to maintain the "unit number" of the "drive" such that if + // media is re-inserted, it will show up under the same "unit number". You + // can think of the typical anchor as being the drive, if it helps, although + // it could be one of many other kinds of drivers (eg. a RAID scheme). + // + + whole = getWholeMedia(media, &slicePathSize); + if ( whole == 0 ) return false; + + anchor = whole->getProvider(); + if ( anchor == 0 ) return false; + + // + // Determine whether the anchor already exists in the anchor table (obsolete + // occurences are skipped in the search, as appropriate, since those anchor + // IDs are to be removed soon). If the anchor does not exist, insert it into + // anchor table. + // + + anchorID = _anchors->locate(anchor); + + if ( anchorID != kInvalidAnchorID ) + { + // + // The anchor does exist in the table, however we've got more to check. + // + // We need to ensure that the whole media associated with this anchor is + // the same as ours. If it is, all is well. If it isn't, then there is + // still a chance all is well. It is possible to have an old media tree + // still associated with the anchor: the tree would be inactive, but not + // yet terminated (this can happen on forced termination of a media tree + // with oustanding opens, since the close must come before the terminate + // can proceed; it can happen even in normal eject conditions should the + // media be immediately reinserted when the termination on the old tree, + // which is asynchronous, is still chugging along on another thread). In + // case the tree is inactive, we mark the anchorID as obsolete and use a + // new anchorID. In the case the tree is not inactive, then we've got a + // problem and we must bail out. + // + // A few additional notes: + // + // o if the whole media is indeed the same as the one in our tables, we + // need not check that it is active, because by virtue of the fact we + // got a new media notification on the same tree, we know for sure it + // cannot be in the inactive state. + // + // o if the whole media is not in our tables, it is quite possible that + // some child non-whole media from the old media tree is still around + // as terminations work from the bottom (whole media) up (to leaves), + // and the asynchronous termination thread is still not done chugging + // through the medias on the old tree. We use a new anchorID in this + // case. + // + + IOMedia * wholeInTable = _minors->getWholeMediaAtAnchorID(anchorID); + + if ( wholeInTable == 0 ) // (is an existing whole media in our tables?) + { + if ( _minors->hasReferencesToAnchorID(anchorID) ) // (any medias?) + { + _anchors->obsolete(anchorID); // (obsolete old anchor ID) + anchorID = kInvalidAnchorID; // ( request new anchor ID) + } // (else, all is well) + } + else if ( whole != wholeInTable ) // (old whole media not same as new?) + { + if ( wholeInTable->isInactive() ) // (is it inactive/terminating?) + { + _anchors->obsolete(anchorID); // (obsolete old anchor ID) + anchorID = kInvalidAnchorID; // ( request new anchor ID) + } + else // (peer active whole medias detected, log error) + { + if ( whole == media ) IOLog(kMsgBadWhole, whole->getName()); + return false; + } + } // (else, all is well) + } + + if ( anchorID == kInvalidAnchorID ) + { + anchorID = _anchors->insert(anchor); // (get new anchor ID) + if ( anchorID == kInvalidAnchorID ) return false; + anchorNew = true; + } + + // + // Allocate space for and build the slice path for the device node names. + // + + slicePath = (char *) IOMalloc(slicePathSize); + if ( slicePath == 0 ) goto createNodesErr; + + whole = getWholeMedia(media, &slicePathSize, slicePath); + assert(whole); + + // + // Insert the new media into our minor table (we're almost done :-). + // + + minorID = _minors->insert(media, anchorID, slicePath); + if ( minorID == kInvalidMinorID ) goto createNodesErr; + + // + // Create the required properties on the media. + // + + media->setProperty(kIOBSDNameKey, _minors->getMinor(minorID)->name); + media->setProperty(kIOBSDUnitKey, anchorID, 32); // ("BSD Unit" ) + media->setProperty(kIOBSDMajorKey, kMajor, 32); // ("BSD Major") + media->setProperty(kIOBSDMinorKey, minorID, 32); // ("BSD Minor") + + // + // Clean up outstanding resources. + // + + IOFree(slicePath, slicePathSize); + + return true; // (success) + +createNodesErr: + + if (anchorNew) _anchors->remove(anchorID); + if (slicePath) IOFree(slicePath, slicePathSize); + + return false; // (failure) +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +AnchorTable * IOMediaBSDClient::getAnchors() +{ + // + // Obtain the table of anchors. + // + + return _anchors; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +MinorTable * IOMediaBSDClient::getMinors() +{ + // + // Obtain the table of anchors. + // + + return _minors; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +MinorSlot * IOMediaBSDClient::getMinor(UInt32 minorID) +{ + // + // Obtain information for the specified minor ID. + // + + return _minors->getMinor(minorID); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOMediaBSDClient, 15); + +// ============================================================================= +// BSD Functions + +int dkopen(dev_t dev, int flags, int devtype, struct proc *) +{ + // + // dkopen opens the device (called on each open). + // + + int error; + IOStorageAccess level; + MinorSlot * minor; + + assert(gIOMediaBSDClient); + assert(S_ISBLK(devtype) || S_ISCHR(devtype)); + + gIOMediaBSDClient->lockForArbitration(); // (disable access) + + assert(gIOMediaBSDClient->getMinors()); + + error = 0; + level = (flags & FWRITE) ? kIOStorageAccessReaderWriter + : kIOStorageAccessReader; + minor = gIOMediaBSDClient->getMinor(minor(dev)); + + // + // Process the open. + // + + if ( minor == 0 ) // (is minor valid?) + { + error = ENXIO; + } + else if ( minor->isEjecting ) // (is minor in flux?) + { + error = EBUSY; + } + else if ( (flags & FWRITE) ) // (is client a writer?) + { + if ( minor->bdevWriter || minor->cdevWriter ) + level = kIOStorageAccessNone; + } + else // (is client a reader?) + { + if ( minor->bdevOpen || minor->cdevOpen ) + level = kIOStorageAccessNone; + } + + if ( error == 0 && level != kIOStorageAccessNone ) // (issue open/upgrade?) + { + if ( minor->media->open(gIOMediaBSDClient, 0, level) == false ) // (go) + { + error = EBUSY; + } + } + + if ( error == 0 ) // (update state) + { + if ( S_ISBLK(devtype) ) + { + minor->bdevOpen = true; + if ( (flags & FWRITE) ) minor->bdevWriter = true; + } + else + { + minor->cdevOpen = true; + if ( (flags & FWRITE) ) minor->cdevWriter = true; + } + } + + gIOMediaBSDClient->unlockForArbitration(); // (enable access) + + return error; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +int dkclose(dev_t dev, int /* flags */, int devtype, struct proc *) +{ + // + // dkclose closes the device (called on last close). + // + + MinorSlot * minor; + bool wasWriter; + + assert(S_ISBLK(devtype) || S_ISCHR(devtype)); + + gIOMediaBSDClient->lockForArbitration(); // (disable access) + + minor = gIOMediaBSDClient->getMinor(minor(dev)); + wasWriter = (minor->bdevWriter || minor->cdevWriter); + + if ( S_ISBLK(devtype) ) // (update state) + { + minor->bdevBlockSize = minor->media->getPreferredBlockSize(); + minor->bdevOpen = false; + minor->bdevWriter = false; + } + else + { + minor->cdevOpen = false; + minor->cdevWriter = false; + } + + if ( minor->isEjecting ) // (is minor in flux?) + { + // + // We've determined that the specified minor is in ejection flux. This + // means we are in a state where the media object has been closed, only + // the device node is still open. This happens to the minor subsequent + // to a DKIOCEJECT ioctl -- this close resets the flux state to normal. + // + + minor->isEjecting = false; + + // If this minor is marked as obsolete, then we've already received the + // media's termination notification (stop method), but the minor is yet + // to be removed from the table -- remove it now. + + assert(minor->bdevOpen == false); + assert(minor->cdevOpen == false); + + if ( minor->isObsolete ) + gIOMediaBSDClient->getMinors()->remove(minor(dev)); + } + else if ( !minor->bdevOpen && !minor->cdevOpen ) + { + // + // We communicate the close down to the media object once all opens are + // gone, on both the block and character device nodes. + // + + minor->media->close(gIOMediaBSDClient); // (go) + } + else if ( !minor->bdevWriter && !minor->cdevWriter && wasWriter ) + { + // + // We communicate a downgrade down to the media object once all writers + // are gone and while readers still exist. + // + + bool s; + s = minor->media->open(gIOMediaBSDClient, 0, kIOStorageAccessReader); + assert(s); // (should never fail, unless deadlock avoided) + } + + gIOMediaBSDClient->unlockForArbitration(); // (enable access) + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +int dkread(dev_t dev, struct uio * uio, int /* flags */) +{ + // + // dkread reads data from a device. + // + + struct dio dio = { dev, uio }; + + return dkreadwrite(&dio, DKRTYPE_DIO); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +int dkwrite(dev_t dev, struct uio * uio, int /* flags */) +{ + // + // dkwrite writes data to a device. + // + + struct dio dio = { dev, uio }; + + return dkreadwrite(&dio, DKRTYPE_DIO); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void dkstrategy(struct buf * bp) +{ + // + // dkstrategy starts an asynchronous read or write operation. It returns + // to the caller as soon as the operation is queued, and completes it via + // the biodone function. + // + + dkreadwrite(bp, DKRTYPE_BUF); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +int dkioctl(dev_t dev, u_long cmd, caddr_t data, int, struct proc *) +{ + // + // dkioctl performs operations other than a read or write. + // + + int error = 0; + MinorSlot * minor = gIOMediaBSDClient->getMinor(minor(dev)); + + if ( minor->isEjecting ) return EBADF; // (is minor in flux?) + + // + // Process the ioctl. + // + + switch ( cmd ) + { + case DKIOCGETBLOCKSIZE: // getBlockSize(int * out); + { + // + // This ioctl returns the preferred block size of the media object. + // + + *(int *)data = (int) minor->media->getPreferredBlockSize(); + + } break; + + case DKIOCGETBLOCKCOUNT: // getBlockCount(int * out); + { + // + // This ioctl returns the size of the media object in blocks. The + // implied block size is returned by DKIOCGETBLOCKSIZE. + // + + if ( minor->media->getPreferredBlockSize() ) + *(int *)data = (int) ( minor->media->getSize() / + minor->media->getPreferredBlockSize() ); + else + *(int *)data = 0; + + } break; + + case DKIOCISFORMATTED: // isFormatted(int * out); + { + // + // This ioctl returns truth if the media object is formatted. + // + + *(int *)data = (int) minor->media->isFormatted(); + + } break; + + case DKIOCISWRITABLE: // isWritable(int * out); + { + // + // This ioctl returns truth if the media object is writable. + // + + *(int *)data = (int) minor->media->isWritable(); + + } break; + + case DKIOCGETLOCATION: // getLocation(char[128] out); + { + // + // This ioctl returns the open firmware path for this media object. + // + + int l = sizeof(((struct drive_location *)data)->location); + char * p = ((struct drive_location *)data)->location; + + if ( minor->media->getPath(p, &l, gIODTPlane) && strchr(p, ':') ) + strcpy(p, strchr(p, ':') + 1); // (strip the plane name) + else + error = EINVAL; + + } break; + + case DKIOCEJECT: // eject(void); + { + // + // This ioctl asks that the media object be ejected from the device. + // + + IOBlockStorageDriver * driver; + MinorTable * minors; + + driver = OSDynamicCast( IOBlockStorageDriver, + minor->media->getProvider() ); + minors = gIOMediaBSDClient->getMinors(); + + // Determine whether this media has an IOBlockStorageDriver parent. + + if ( driver == 0 ) { error = ENOTTY; break; } + + // Disable access to tables, matching, opens, closes, terminations. + + gIOMediaBSDClient->lockForArbitration(); + + // Determine whether there are other opens on the device nodes that + // are associated with this anchor -- the one valid open is the one + // that issued this eject. + + if ( minors->getOpenCountForAnchorID(minor->anchorID) > 1 ) + { + error = EBUSY; + + // Enable access to tables, matching, opens, closes, and so on. + + gIOMediaBSDClient->unlockForArbitration(); + } + else + { + IOReturn status; + + // Mark this minor as being in ejection flux (which means are in + // a state where the media object has been closed but the device + // node is still open; we must reject all future accesses to the + // device node until it is closed; note that we do this both on + // success and failure of the ejection call). + + minor->isEjecting = true; + + // Enable access to tables, matching, opens, closes, and so on. + + gIOMediaBSDClient->unlockForArbitration(); + + // Close the media object before the ejection request is made. + + minor->media->close(gIOMediaBSDClient); + + // Open the block storage driver to make the ejection request. + + if (driver->open(gIOMediaBSDClient, 0, kIOStorageAccessReader)) + { + // Eject the media from the drive. + + status = driver->ejectMedia(); + + // Close the block storage driver. + + driver->close(gIOMediaBSDClient); + } + else + { + status = kIOReturnBusy; + } + + error = gIOMediaBSDClient->errnoFromReturn(status); + } + + } break; + + default: + { + // + // A foreign ioctl was received. Log an error to the console. + // + + IOLog( "%s: ioctl(%s\'%c\',%d,%d) is unsupported.\n", + minor->name, + ((cmd & IOC_INOUT) == IOC_INOUT) ? ("_IOWR,") : + ( ((cmd & IOC_OUT) == IOC_OUT) ? ("_IOR,") : + ( ((cmd & IOC_IN) == IOC_IN) ? ("_IOW,") : + ( ((cmd & IOC_VOID) == IOC_VOID) ? ("_IO,") : "" ) ) ), + (char) IOCGROUP(cmd), + (int) (cmd & 0xff), + (int) IOCPARM_LEN(cmd) ); + + error = ENOTTY; + + } break; + } + + return error; // (return error status) +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +int dkioctl_bdev(dev_t dev, u_long cmd, caddr_t data, int f, struct proc * proc) +{ + // + // dkioctl_bdev performs operations other than a read or write, specific to + // the block device. + // + + int error = 0; + MinorSlot * minor = gIOMediaBSDClient->getMinor(minor(dev)); + + if ( minor->isEjecting ) return EBADF; // (is minor in flux?) + + // + // Process the ioctl. + // + + switch ( cmd ) + { + case DKIOCGETBLOCKSIZE: // getBlockSize(int * out); + { + // + // This ioctl returns the preferred (or overrided) block size of the + // media object. + // + + *(int *)data = (int) minor->bdevBlockSize; + + } break; + + case DKIOCSETBLOCKSIZE: // setBlockSize(int * in); + { + // + // This ioctl overrides the block size for the media object, for the + // duration of all block device opens at this minor. + // + + if ( *(int *)data > 0 ) + minor->bdevBlockSize = (UInt64) (*(int *)data); + else + error = EINVAL; + + } break; + + case DKIOCGETBLOCKCOUNT: // getBlockCount(int * out); + { + // + // This ioctl returns the size of the media object in blocks. The + // implied block size is returned by DKIOCGETBLOCKSIZE. + // + + if ( minor->bdevBlockSize ) + *(int *)data = (int) ( minor->media->getSize() / + minor->bdevBlockSize ); + else + *(int *)data = 0; + + } break; + + default: + { + // + // Call the common ioctl handler for all other ioctls. + // + + error = dkioctl(dev, cmd, data, f, proc); + + } break; + } + + return error; // (return error status) +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +int dksize(dev_t dev) +{ + // + // dksize returns the block size of the media. + // + // This is a departure from BSD 4.4's definition of this function, that is, + // it will not return the size of the disk partition, as would be expected + // in a BSD 4.4 implementation. + // + + MinorSlot * minor = gIOMediaBSDClient->getMinor(minor(dev)); + + if ( minor->isEjecting ) return 0; // (is minor in flux?) + + return (int) minor->bdevBlockSize; // (return block size) +} + +// ============================================================================= +// Support For BSD Functions + +inline dev_t DKR_GET_DEV(dkr_t dkr, dkrtype_t dkrtype) +{ + return (dkrtype == DKRTYPE_BUF) + ? ((struct buf *)dkr)->b_dev + : ((struct dio *)dkr)->dev; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline UInt64 DKR_GET_BYTE_COUNT(dkr_t dkr, dkrtype_t dkrtype) +{ + return (dkrtype == DKRTYPE_BUF) + ? ((struct buf *)dkr)->b_bcount + : ((struct dio *)dkr)->uio->uio_resid; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline UInt64 DKR_GET_BYTE_START(dkr_t dkr, dkrtype_t dkrtype) +{ + if (dkrtype == DKRTYPE_BUF) + { + struct buf * bp = (struct buf *)dkr; + MinorSlot * minor = gIOMediaBSDClient->getMinor(minor(bp->b_dev)); + + return bp->b_blkno * minor->bdevBlockSize; + } + return ((struct dio *)dkr)->uio->uio_offset; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline bool DKR_IS_READ(dkr_t dkr, dkrtype_t dkrtype) +{ + return (dkrtype == DKRTYPE_BUF) + ? ((((struct buf *)dkr)->b_flags & B_READ) == B_READ) + : ((((struct dio *)dkr)->uio->uio_rw) == UIO_READ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline bool DKR_IS_ASYNCHRONOUS(dkr_t dkr, dkrtype_t dkrtype) +{ + return (dkrtype == DKRTYPE_BUF) + ? true + : false; +} + + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline bool DKR_IS_RAW(dkr_t dkr, dkrtype_t dkrtype) +{ + return (dkrtype == DKRTYPE_BUF) + ? false + : true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline void DKR_SET_BYTE_COUNT(dkr_t dkr, dkrtype_t dkrtype, UInt64 bcount) +{ + if (dkrtype == DKRTYPE_BUF) + ((struct buf *)dkr)->b_resid = ((struct buf *)dkr)->b_bcount - bcount; + else + ((struct dio *)dkr)->uio->uio_resid -= bcount; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline void DKR_RUN_COMPLETION(dkr_t dkr, dkrtype_t dkrtype, IOReturn status) +{ + if (dkrtype == DKRTYPE_BUF) + { + struct buf * bp = (struct buf *)dkr; + + bp->b_error = gIOMediaBSDClient->errnoFromReturn(status); // (error?) + bp->b_flags |= (status != kIOReturnSuccess) ? B_ERROR : 0; // (error?) + biodone(bp); // (complete request) + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +inline IOMemoryDescriptor * DKR_GET_BUFFER(dkr_t dkr, dkrtype_t dkrtype) +{ + if (dkrtype == DKRTYPE_BUF) + { + struct buf * bp = (struct buf *)dkr; + + if ( (bp->b_flags & B_VECTORLIST) ) + { + assert(sizeof(IOPhysicalRange ) == sizeof(iovec )); + assert(sizeof(IOPhysicalRange::address) == sizeof(iovec::iov_base)); + assert(sizeof(IOPhysicalRange::length ) == sizeof(iovec::iov_len )); + + return IOMemoryDescriptor::withPhysicalRanges( // (multiple-range) + (IOPhysicalRange *) bp->b_vectorlist, + (UInt32) bp->b_vectorcount, + (bp->b_flags & B_READ) ? kIODirectionIn : kIODirectionOut, + true ); + } + + return IOMemoryDescriptor::withAddress( // (single-range) + (vm_address_t) bp->b_data, + (vm_size_t) bp->b_bcount, + (bp->b_flags & B_READ) ? kIODirectionIn : kIODirectionOut, + (bp->b_flags & B_PHYS) ? get_user_task() : get_kernel_task() ); + } + else + { + struct uio * uio = ((struct dio *)dkr)->uio; + + assert(sizeof(IOVirtualRange ) == sizeof(iovec )); + assert(sizeof(IOVirtualRange::address) == sizeof(iovec::iov_base)); + assert(sizeof(IOVirtualRange::length ) == sizeof(iovec::iov_len )); + + return IOMemoryDescriptor::withRanges( // (multiple-range) + (IOVirtualRange *) uio->uio_iov, + (UInt32) uio->uio_iovcnt, + (uio->uio_rw == UIO_READ ) ? kIODirectionIn : kIODirectionOut, + (uio->uio_segflg != UIO_SYSSPACE) ? get_user_task() : get_kernel_task(), + true ); + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +int dkreadwrite(dkr_t dkr, dkrtype_t dkrtype) +{ + // + // dkreadwrite performs a read or write operation. + // + + IOMemoryDescriptor * buffer; + register UInt64 byteCount; + register UInt64 byteStart; + UInt64 mediaSize; + MinorSlot * minor; + IOReturn status; + + minor = gIOMediaBSDClient->getMinor(minor(DKR_GET_DEV(dkr, dkrtype))); + + if ( minor->isEjecting ) // (is minor in flux?) + { + status = kIOReturnNoMedia; + goto dkreadwriteErr; + } + + if ( minor->media->isFormatted() == false ) // (is media unformatted?) + { + status = kIOReturnUnformattedMedia; + goto dkreadwriteErr; + } + + byteCount = DKR_GET_BYTE_COUNT(dkr, dkrtype); // (get byte count) + byteStart = DKR_GET_BYTE_START(dkr, dkrtype); // (get byte start) + mediaSize = minor->media->getSize(); // (get media size) + + // + // Reads that start at (or perhaps past) the end-of-media are not considered + // errors, even though no data is transferred, while writes at (or past) the + // end-of-media do indeed return errors under BSD semantics. + // + + if ( byteStart >= mediaSize ) // (is start at or past the end-of-media?) + { + status = DKR_IS_READ(dkr,dkrtype) ? kIOReturnSuccess : kIOReturnIOError; + goto dkreadwriteErr; + } + + // + // Reads and writes, via the character device, that do not start or end on a + // media block boundary are considered errors under BSD semantics. + // + + if ( DKR_IS_RAW(dkr, dkrtype) ) + { + UInt64 mediaBlockSize = minor->media->getPreferredBlockSize(); + + if ( (byteStart % mediaBlockSize) || (byteCount % mediaBlockSize) ) + { + status = kIOReturnNotAligned; + goto dkreadwriteErr; + } + } + + // + // Build a descriptor which describes the buffer involved in the transfer. + // + + buffer = DKR_GET_BUFFER(dkr, dkrtype); + + if ( buffer == 0 ) // (no buffer?) + { + status = kIOReturnNoMemory; + goto dkreadwriteErr; + } + + // + // Reads and writes that extend beyond the end-of-media are not considered + // errors under BSD semantics. We are to transfer as many bytes as can be + // read or written from the medium and return no error. This differs from + // IOMedia semantics which is to fail the entire request without copying a + // single byte should it include something past the end-of-media. We must + // adapt the IOMedia semantics to look like BSD semantics here. + // + // Clip the transfer buffer should this be a short read or write request. + // + + if ( byteCount > mediaSize - byteStart ) // (clip at end-of-media) + { + IOMemoryDescriptor * originalBuffer = buffer; + + buffer = IOMemoryDescriptor::withSubRange( + /* descriptor */ originalBuffer, + /* withOffset */ 0, + /* withLength */ mediaSize - byteStart, + /* withDirection */ originalBuffer->getDirection() ); + + originalBuffer->release(); // (either retained above or about to fail) + + if ( buffer == 0 ) // (no buffer?) + { + status = kIOReturnNoMemory; + goto dkreadwriteErr; + } + } + + // + // Execute the transfer. + // + + if ( DKR_IS_ASYNCHRONOUS(dkr, dkrtype) ) // (an asynchronous request?) + { + IOStorageCompletion completion; + + completion.target = dkr; + completion.action = dkreadwritecompletion; + completion.parameter = (void *) dkrtype; + + if ( DKR_IS_READ(dkr, dkrtype) ) // (a read?) + { + minor->media->read( /* client */ gIOMediaBSDClient, + /* byteStart */ byteStart, + /* buffer */ buffer, + /* completion */ completion ); // (go) + } + else // (a write?) + { + minor->media->write( /* client */ gIOMediaBSDClient, + /* byteStart */ byteStart, + /* buffer */ buffer, + /* completion */ completion ); // (go) + } + + status = kIOReturnSuccess; + } + else // (is this a synchronous request?) + { + if ( DKR_IS_READ(dkr, dkrtype) ) // (a read?) + { +///m:2333367:workaround:commented:start +// status = minor->media->read( +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = minor->media->IOStorage::read( +///m:2333367:workaround:added:stop + /* client */ gIOMediaBSDClient, + /* byteStart */ byteStart, + /* buffer */ buffer, + /* actualByteCount */ &byteCount ); // (go) + } + else // (a write?) + { +///m:2333367:workaround:commented:start +// status = minor->media->write( +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = minor->media->IOStorage::write( +///m:2333367:workaround:added:stop + /* client */ gIOMediaBSDClient, + /* byteStart */ byteStart, + /* buffer */ buffer, + /* actualByteCount */ &byteCount ); // (go) + } + + dkreadwritecompletion(dkr, (void *)dkrtype, status, byteCount); + } + + // + // We release our retain on the buffer now, even though in the asynchronous + // case, the object needs to exist for the duration of the transfer. While + // this might appear to be a mistake, it is not. The layers below us will + // have retained the buffer themselves. + // + + buffer->release(); // (release our retain on the buffer) + + return gIOMediaBSDClient->errnoFromReturn(status); // (return error status) + +dkreadwriteErr: + + dkreadwritecompletion(dkr, (void *)dkrtype, status, 0); + + return gIOMediaBSDClient->errnoFromReturn(status); // (return error status) +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void dkreadwritecompletion( void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount ) +{ + // + // dkreadwritecompletion cleans up after a read or write operation. + // + + dkr_t dkr = (dkr_t) target; + dkrtype_t dkrtype = (dkrtype_t) (int) parameter; + dev_t dev = DKR_GET_DEV(dkr, dkrtype); + +#ifdef IOMEDIABSDCLIENT_IOSTAT_SUPPORT + UInt32 anchorID = gIOMediaBSDClient->getMinor(minor(dev))->anchorID; + + if ( anchorID < DK_NDRIVE ) + { + IOBlockStorageDriver * d = dk_drive[anchorID]; + + if ( d ) + { + dk_xfer[anchorID] = (long) + ( d->getStatistic(IOBlockStorageDriver::kStatisticsReads ) + + d->getStatistic(IOBlockStorageDriver::kStatisticsWrites) ); + dk_wds[anchorID] = (long) (8 * + ( d->getStatistic(IOBlockStorageDriver::kStatisticsBytesRead ) + + d->getStatistic(IOBlockStorageDriver::kStatisticsBytesWritten)) ); + } + } +#endif IOMEDIABSDCLIENT_IOSTAT_SUPPORT + + if ( status != kIOReturnSuccess ) // (log errors to the console) + { + IOLog( "%s: %s.\n", + gIOMediaBSDClient->getMinor(minor(dev))->name, + gIOMediaBSDClient->stringFromReturn(status) ); + } + + DKR_SET_BYTE_COUNT(dkr, dkrtype, actualByteCount); // (set byte count) + DKR_RUN_COMPLETION(dkr, dkrtype, status); // (run completion) +} + +// ============================================================================= +// AnchorTable Class + +AnchorTable::AnchorTable(UInt32 growCount, UInt32 maxCount) +{ + // + // Initialize this object's minimal state. + // + + _table = 0; + _tableCount = 0; + _tableGrowCount = growCount; + _tableMaxCount = maxCount; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +AnchorTable::~AnchorTable() +{ + // + // Free all of this object's outstanding resources. + // + + for ( UInt32 anchorID = 0; anchorID < _tableCount; anchorID++ ) + if ( _table[anchorID].isAssigned ) remove(anchorID); + + if ( _table ) IODelete(_table, AnchorSlot, _tableCount); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 AnchorTable::insert(IOService * anchor) +{ + // + // This method inserts the specified anchor into an unassigned slot in the + // anchor table and returns its ID (or kInvalidAnchorID on a failure). + // + // Note that the anchor is transparently removed from the table should the + // anchor terminate (or it is at least marked obsolete, should references + // to the anchor still exist in the minor table). + // + + UInt32 anchorID; + IONotifier * notifier; + + // Search for an unassigned slot in the anchor table. + + for ( anchorID = 0; anchorID < _tableCount; anchorID++ ) + if ( _table[anchorID].isAssigned == false ) break; + + // Was an unassigned slot found? If not, grow the table. + + if ( anchorID == _tableCount ) + { + AnchorSlot * newTable; + UInt32 newTableCount; + + // We must expand the anchor table since no more slots are available. + + if ( _tableCount >= _tableMaxCount ) return kInvalidAnchorID; + + newTableCount = min(_tableGrowCount + _tableCount, _tableMaxCount); + newTable = IONew(AnchorSlot, newTableCount); + + if ( newTable == 0 ) return kInvalidAnchorID; + + bzero(newTable, newTableCount * sizeof(AnchorSlot)); + + // Copy over the old table's entries, then free the old table. + + if ( _table ) + { + bcopy(_table, newTable, _tableCount * sizeof(AnchorSlot)); + IODelete(_table, AnchorSlot, _tableCount); + } + + // Obtain the next unassigned index (simple since we know the size of + // the old table), then update our instance variables to reflect the + // new tables. + + anchorID = _tableCount; + _table = newTable; + _tableCount = newTableCount; + } + + // Create a notification handler for the anchor's termination (post-stop); + // the handler will remove the anchor transparently from the table if the + // anchor terminates (or at least marks it obsolete, if references to the + // anchor still exist in the minor table). + + notifier = anchor->registerInterest( + /* type */ gIOGeneralInterest, + /* action */ anchorWasNotified, + /* target */ this, + /* parameter */ 0 ); + + if ( notifier == 0 ) return kInvalidAnchorID; + + // Zero the new slot, fill it in, and retain the anchor object. + + bzero(&_table[anchorID], sizeof(AnchorSlot)); // (zero slot) + + _table[anchorID].isAssigned = true; // (fill in slot) + _table[anchorID].isObsolete = false; + _table[anchorID].anchor = anchor; + _table[anchorID].notifier = notifier; + + _table[anchorID].anchor->retain(); // (retain anchor) + +#ifdef IOMEDIABSDCLIENT_IOSTAT_SUPPORT + if ( anchorID < DK_NDRIVE ) + { + dk_drive[anchorID] = OSDynamicCast(IOBlockStorageDriver, anchor); + if ( anchorID + 1 > (UInt32) dk_ndrive ) dk_ndrive = anchorID + 1; + } +#endif IOMEDIABSDCLIENT_IOSTAT_SUPPORT + + return anchorID; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void AnchorTable::remove(UInt32 anchorID) +{ + // + // This method removes the specified anchor from the anchor table. + // + + assert(anchorID < _tableCount); + assert(_table[anchorID].isAssigned); + + // Release the resources retained in the anchor slot and zero it. + + _table[anchorID].notifier->remove(); + _table[anchorID].anchor->release(); // (release anchor) + + bzero(&_table[anchorID], sizeof(AnchorSlot)); // (zero slot) + +#ifdef IOMEDIABSDCLIENT_IOSTAT_SUPPORT + if ( anchorID < DK_NDRIVE ) + { + dk_drive[anchorID] = 0; + for (dk_ndrive = DK_NDRIVE; dk_ndrive; dk_ndrive--) + { + if ( dk_drive[dk_ndrive - 1] ) break; + } + } +#endif IOMEDIABSDCLIENT_IOSTAT_SUPPORT +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void AnchorTable::obsolete(UInt32 anchorID) +{ + // + // This method obsoletes the specified anchor, that is, the slot is marked + // as obsolete and will be removed later via the minor table remove method + // once it detects references to the anchor ID drop to 0. Once obsoleted, + // the anchor can be considered to be removed, since it will not appear in + // locate searches, even though behind the scenes it still occupies a slot. + // + + assert(anchorID < _tableCount); + assert(_table[anchorID].isAssigned); + + // Mark the anchor as obsolete so that it can be removed from the table as + // soon as all its references go away (minor table's responsibility). + + _table[anchorID].isObsolete = true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 AnchorTable::locate(IOService * anchor) +{ + // + // This method searches for the specified anchor in the anchor table and + // returns its ID (or kInvalidAnchorID on a failure). It ignores slots + // marked as obsolete. + // + + for (UInt32 anchorID = 0; anchorID < _tableCount; anchorID++) + { + if ( _table[anchorID].isAssigned != false && + _table[anchorID].isObsolete == false && + _table[anchorID].anchor == anchor ) return anchorID; + } + + return kInvalidAnchorID; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool AnchorTable::isObsolete(UInt32 anchorID) +{ + // + // Determine whether the specified anchor ID is marked as obsolete. + // + + assert(anchorID < _tableCount); + assert(_table[anchorID].isAssigned); + + return _table[anchorID].isObsolete ? true : false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn AnchorTable::anchorWasNotified( void * /* target */, + void * /* parameter */, + UInt32 messageType, + IOService * anchor, + void * /* messageArgument */, + vm_size_t /* messageArgumentSize */ ) +{ + // + // Notification handler for anchors. + // + + UInt32 anchorID; + + assert(gIOMediaBSDClient); + + // Determine whether this is a termination notification (post-stop). + + if ( messageType != kIOMessageServiceIsTerminated ) + return kIOReturnSuccess; + + // Disable access to tables, matching, opens, closes, and terminations. + + gIOMediaBSDClient->lockForArbitration(); + + // Determine whether this anchor is in the anchor table (obsolete occurences + // are skipped in the search, as appropriate, since those anchor IDs will be + // removed as it is). + + anchorID = gIOMediaBSDClient->getAnchors()->locate(anchor); + + if ( anchorID != kInvalidAnchorID ) + { + // Determine whether this anchor is still has references in the minor + // table. If it does, we mark the the anchor as obsolete so that it + // will be removed later, once references to it go to zero (which is + // handled by MinorTable::remove). + + if ( gIOMediaBSDClient->getMinors()->hasReferencesToAnchorID(anchorID) ) + gIOMediaBSDClient->getAnchors()->obsolete(anchorID); + else + gIOMediaBSDClient->getAnchors()->remove(anchorID); + } + + // Enable access to tables, matching, opens, closes, and terminations. + + gIOMediaBSDClient->unlockForArbitration(); + + return kIOReturnSuccess; +} + +// ============================================================================= +// MinorTable Class + +MinorTable::MinorTable(UInt32 growCount, UInt32 maxCount) +{ + // + // Initialize this object's minimal state. + // + + _table = 0; + _tableCount = 0; + _tableGrowCount = growCount; + _tableMaxCount = maxCount; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +MinorTable::~MinorTable() +{ + // + // Free all of this object's outstanding resources. + // + + for ( UInt32 minorID = 0; minorID < _tableCount; minorID++ ) + if ( _table[minorID].isAssigned ) remove(minorID); + + if ( _table ) IODelete(_table, MinorSlot, _tableCount); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 MinorTable::insert(IOMedia * media, UInt32 anchorID, char * slicePath) +{ + // + // This method inserts the specified media/anchorID pair into an unassigned + // slot in the minor table and returns its ID (or kInvalidMinorID on error). + // + // Note that the bdev and cdev nodes are published as a result of this call, + // with the name "[r]disk". For instance, "disk2s3s1" + // for an anchorID of 2 and slicePath of "s3s1". + // + + void * bdevNode; + void * cdevNode; + UInt32 minorID; + char * minorName; + UInt32 minorNameSize; + + // Search for an unassigned slot in the minor table. + + for ( minorID = 0; minorID < _tableCount; minorID++ ) + if ( _table[minorID].isAssigned == false ) break; + + // Was an unassigned slot found? If not, grow the table. + + if ( minorID == _tableCount ) + { + MinorSlot * newTable; + UInt32 newTableCount; + + // We must expand the minor table since no more slots are available. + + if ( _tableCount >= _tableMaxCount) return kInvalidMinorID; + + newTableCount = min(_tableGrowCount + _tableCount, _tableMaxCount); + newTable = IONew(MinorSlot, newTableCount); + + if ( newTable == 0 ) return kInvalidMinorID; + + bzero(newTable, newTableCount * sizeof(MinorSlot)); + + // Copy over the old table's entries, then free the old table. + + if ( _table ) + { + bcopy(_table, newTable, _tableCount * sizeof(MinorSlot)); + IODelete(_table, MinorSlot, _tableCount); + } + + // Obtain the next unassigned index (simple since we know the size of + // the old table), then update our instance variables to reflect the + // new tables. + + minorID = _tableCount; + _table = newTable; + _tableCount = newTableCount; + } + + // Create a buffer large enough to hold the full name of the minor. + + minorNameSize = strlen("disk#"); + for (unsigned temp = anchorID; temp >= 10; temp /= 10) minorNameSize++; + minorNameSize += strlen(slicePath); + minorNameSize += 1; + minorName = IONew(char, minorNameSize); + + // Create a block and character device node in BSD for this media. + + bdevNode = devfs_make_node( /* dev */ makedev(kMajor, minorID), + /* type */ DEVFS_BLOCK, + /* owner */ UID_ROOT, + /* group */ GID_OPERATOR, + /* permission */ media->isWritable()?0640:0440, + /* name (fmt) */ "disk%d%s", + /* name (arg) */ anchorID, + /* name (arg) */ slicePath ); + + cdevNode = devfs_make_node( /* dev */ makedev(kMajor, minorID), + /* type */ DEVFS_CHAR, + /* owner */ UID_ROOT, + /* group */ GID_OPERATOR, + /* permission */ media->isWritable()?0640:0440, + /* name (fmt) */ "rdisk%d%s", + /* name (arg) */ anchorID, + /* name (arg) */ slicePath ); + + if ( minorName == 0 || bdevNode == 0 || cdevNode == 0 ) + { + if ( cdevNode ) devfs_remove(cdevNode); + if ( bdevNode ) devfs_remove(bdevNode); + if ( minorName ) IODelete(minorName, char, minorNameSize); + + return kInvalidMinorID; + } + + // Construct a name for the node. + + sprintf(minorName, "disk%ld%s", anchorID, slicePath); + assert(strlen(minorName) + 1 == minorNameSize); + + // Zero the new slot, fill it in, and retain the media object. + + bzero(&_table[minorID], sizeof(MinorSlot)); // (zero slot) + + _table[minorID].isAssigned = true; // (fill in slot) + _table[minorID].isEjecting = false; + _table[minorID].isObsolete = false; + _table[minorID].anchorID = anchorID; + _table[minorID].media = media; + _table[minorID].name = minorName; + _table[minorID].bdevBlockSize = media->getPreferredBlockSize(); + _table[minorID].bdevNode = bdevNode; + _table[minorID].bdevOpen = false; + _table[minorID].cdevNode = cdevNode; + _table[minorID].cdevOpen = false; + + _table[minorID].media->retain(); // (retain media) + + return minorID; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void MinorTable::remove(UInt32 minorID) +{ + // + // This method removes the specified minor from the minor table. + // + + UInt32 anchorID; + + assert(minorID < _tableCount); + assert(_table[minorID].isAssigned); + + assert(_table[minorID].isEjecting == false); + assert(_table[minorID].bdevOpen == false); + assert(_table[minorID].cdevOpen == false); + + anchorID = _table[minorID].anchorID; + + // Release the resources retained in the minor slot and zero it. + + devfs_remove(_table[minorID].cdevNode); + devfs_remove(_table[minorID].bdevNode); + IODelete(_table[minorID].name, char, strlen(_table[minorID].name) + 1); + _table[minorID].media->release(); // (release media) + + bzero(&_table[minorID], sizeof(MinorSlot)); // (zero slot) + + // Determine whether the associated anchor ID is marked as obsolete. If it + // is and there are no other references to the anchor ID in the minor table, + // we remove the anchor ID from the anchor table. + + assert(gIOMediaBSDClient); + + if ( gIOMediaBSDClient->getAnchors()->isObsolete(anchorID) ) + { + if ( hasReferencesToAnchorID(anchorID) == false ) + gIOMediaBSDClient->getAnchors()->remove(anchorID); + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 MinorTable::locate(IOMedia * media) +{ + // + // This method searches for the specified media in the minor table and + // returns its ID (or kInvalidMinorID on an error). It ignores slots + // marked as obsolete. + // + + for (UInt32 minorID = 0; minorID < _tableCount; minorID++) + { + if ( _table[minorID].isAssigned != false && + _table[minorID].isObsolete == false && + _table[minorID].media == media ) return minorID; + } + + return kInvalidMinorID; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt32 MinorTable::getOpenCountForAnchorID(UInt32 anchorID) +{ + // + // This method obtains a count of opens on the minors associated with the + // specified anchor ID. A block device open is counted separately from a + // character device open. + // + + UInt32 opens = 0; + + for ( UInt32 minorID = 0; minorID < _tableCount; minorID++ ) + { + if ( _table[minorID].isAssigned != false && + _table[minorID].anchorID == anchorID ) + { + opens += (_table[minorID].bdevOpen) ? 1 : 0; + opens += (_table[minorID].cdevOpen) ? 1 : 0; + } + } + + return opens; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * MinorTable::getWholeMediaAtAnchorID(UInt32 anchorID) +{ + // + // This method obtains the whole media associated with the specified anchor + // ID. + // + + for ( UInt32 minorID = 0; minorID < _tableCount; minorID++ ) + { + if ( _table[minorID].isAssigned != false && + _table[minorID].anchorID == anchorID && + _table[minorID].media->isWhole() ) return _table[minorID].media; + } + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool MinorTable::hasReferencesToAnchorID(UInt32 anchorID) +{ + // + // This method determines whether there are assigned minors in the minor + // table that refer to the specified anchor ID. + // + + for ( UInt32 minorID = 0; minorID < _tableCount; minorID++ ) + { + if ( _table[minorID].isAssigned != false && + _table[minorID].anchorID == anchorID ) return true; + } + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +MinorSlot * MinorTable::getMinor(UInt32 minorID) +{ + // + // Obtain the structure describing the specified minor. + // + + if ( minorID < _tableCount && _table[minorID].isAssigned ) + return &_table[minorID]; + else + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void MinorTable::obsolete(UInt32 minorID) +{ + // + // This method obsoletes the specified minor, that is, the slot is marked + // as obsolete and will be removed later via the dkclose function once it + // detects the last close arrive. Once obsoleted, the minor can be cons- + // idered to be removed, since it will not appear in locate searches. + // + + assert(minorID < _tableCount); + assert(_table[minorID].isAssigned); + + // Mark the minor as obsolete so that it can be removed from the table as + // soon as the last close arrives (dkclose function's responsibility). + + _table[minorID].isObsolete = true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool MinorTable::isObsolete(UInt32 minorID) +{ + // + // Determine whether the specified minor ID is marked as obsolete. + // + + assert(minorID < _tableCount); + assert(_table[minorID].isAssigned); + + return _table[minorID].isObsolete ? true : false; +} diff --git a/iokit/Families/IOStorage/IONeXTPartitionScheme.cpp b/iokit/Families/IOStorage/IONeXTPartitionScheme.cpp new file mode 100644 index 000000000..e7ccb3052 --- /dev/null +++ b/iokit/Families/IOStorage/IONeXTPartitionScheme.cpp @@ -0,0 +1,627 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +#define super IOPartitionScheme +OSDefineMetaClassAndStructors(IONeXTPartitionScheme, IOPartitionScheme); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Notes +// +// o the on-disk structure's fields are: 16-bit packed, big-endian formatted +// o the on-disk structure is stored four times in succession, each takes up +// sizeof(disk_label_t) bytes rounded up to the drive's natural block size +// o the dl_label_blkno block value assumes the drive's natural block size +// o the dl_part[].p_base, dl_part[].p_size and dl_front block values assume +// a dl_secsize byte block size +// o the dl_part[].p_base and dl_label_blkno block values are absolute, with +// respect to the whole disk +// o the dl_part[].p_base block value doesn't take into account the dl_front +// offset, which is required in order to compute the actual start position +// of the partition on the disk +// o note that CDs often suffer from the mastering-with-a-different-natural- +// block-size problem, but we can assume that the first map will always be +// valid in those cases, and that we'll never need to compute the position +// of the next map correctly +// o note that bootable i386 disks will never have a valid first map, due to +// the boot code that lives in block zero, however the second map is valid +// o this implementation checks for the existence of the first map only; it +// does not bother with the last three maps, since backwards compatibility +// with unreleased NeXT-style i386 disks is a non-goal, and for reasons of +// minimizing access to the media during probe +// + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define kIONeXTPartitionSchemeContentTable "Content Table" + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IONeXTPartitionScheme::init(OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + // State our assumptions. + + assert(sizeof(disktab_t) == 514); // (compiler/platform check) + assert(sizeof(partition_t) == 46); // (compiler/platform check) + assert(sizeof(disk_label_t) == 7240); // (compiler/platform check) + + // Ask our superclass' opinion. + + if ( super::init(properties) == false ) return false; + + // Initialize our state. + + _partitions = 0; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IONeXTPartitionScheme::free() +{ + // + // Free all of this object's outstanding resources. + // + + if ( _partitions ) _partitions->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOService * IONeXTPartitionScheme::probe(IOService * provider, SInt32 * score) +{ + // + // Determine whether the provider media contains a NeXT partition map. If + // it does, we return "this" to indicate success, otherwise we return zero. + // + + // State our assumptions. + + assert(OSDynamicCast(IOMedia, provider)); + + // Ask our superclass' opinion. + + if ( super::probe(provider, score) == 0 ) return 0; + + // Scan the provider media for a NeXT partition map. + + _partitions = scan(score); + + return ( _partitions ) ? this : 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IONeXTPartitionScheme::start(IOService * provider) +{ + // + // Publish the new media objects which represent our partitions. + // + + IOMedia * partition; + OSIterator * partitionIterator; + + // State our assumptions. + + assert(_partitions); + + // Ask our superclass' opinion. + + if ( super::start(provider) == false ) return false; + + // Attach and register the new media objects representing our partitions. + + partitionIterator = OSCollectionIterator::withCollection(_partitions); + if ( partitionIterator == 0 ) return false; + + while ( (partition = (IOMedia *) partitionIterator->getNextObject()) ) + { + if ( partition->attach(this) ) + { + partition->registerService(); + } + } + + partitionIterator->release(); + + return true; +} + + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSSet * IONeXTPartitionScheme::scan(SInt32 * score) +{ + // + // Scan the provider media for a NeXT partition map. Returns the set + // of media objects representing each of the partitions (the retain for + // the set is passed to the caller), or null should no partition map be + // found. The default probe score can be adjusted up or down, based on + // the confidence of the scan. + // + + IOBufferMemoryDescriptor * buffer = 0; + UInt32 bufferSize = 0; + UInt64 labelBase = 0; + UInt32 labelBlock = 0; + UInt16 * labelCheckPtr = 0; + UInt32 labelCheckSize = 0; + bool labelFound = false; + UInt32 labelIndex = 0; + disk_label_t * labelMap = 0; + IOMedia * media = getProvider(); + UInt64 mediaBlockSize = media->getPreferredBlockSize(); + bool mediaIsOpen = false; + OSSet * partitions = 0; + IOReturn status = kIOReturnError; + + // Determine whether this media is formatted. + + if ( media->isFormatted() == false ) goto scanErr; + + // Determine whether this media has an appropriate block size. + + if ( (mediaBlockSize % DEV_BSIZE) ) goto scanErr; + + // Allocate a buffer large enough to hold one map, rounded to a media block. + + bufferSize = IORound(sizeof(disk_label_t), mediaBlockSize); + buffer = IOBufferMemoryDescriptor::withCapacity( + /* capacity */ bufferSize, + /* withDirection */ kIODirectionIn ); + if ( buffer == 0 ) goto scanErr; + + // Allocate a set to hold the set of media objects representing partitions. + + partitions = OSSet::withCapacity(1); + if ( partitions == 0 ) goto scanErr; + + // Open the media with read access. + + mediaIsOpen = media->open(this, 0, kIOStorageAccessReader); + if ( mediaIsOpen == false ) goto scanErr; + + // Compute this partition's absolute offset with respect to the whole media, + // since the disk_label structure requires this information; we go down the + // service hierarchy summing bases until we reach the whole media object. + + for (IOService * service = media; service; service = service->getProvider()) + { + if ( OSDynamicCast(IOMedia, service) ) // (is this a media object?) + { + labelBase += ((IOMedia *)service)->getBase(); + if ( ((IOMedia *)service)->isWhole() ) break; + } + } + + // Scan the media for a NeXT partition map. + // + // In the spirit of minimizing reads, we only check the first of the four + // possible label positions. Backwards compatibility with old NeXT-style + // i386 disks, including redundancy for NeXT-style disks in general, is a + // non-goal. + + for ( labelIndex = 0; labelIndex < 1; labelIndex++ ) // (first map only) + { + // Read the next NeXT map into our buffer. + +///m:2333367:workaround:commented:start +// status = media->read(this, labelIndex * bufferSize, buffer); +///m:2333367:workaround:commented:stop +///m:2333367:workaround:added:start + status = media->IOStorage::read(this, labelIndex * bufferSize, buffer); +///m:2333367:workaround:added:stop + if ( status != kIOReturnSuccess ) goto scanErr; + + labelBlock = ((labelIndex * bufferSize) + labelBase) / mediaBlockSize; + labelMap = (disk_label_t *) buffer->getBytesNoCopy(); + + // Determine whether the partition map signature is present. + + if ( OSSwapBigToHostInt32(labelMap->dl_version) == DL_V3 ) + { + labelCheckPtr = &(labelMap->dl_v3_checksum); + } + else if ( OSSwapBigToHostInt32(labelMap->dl_version) == DL_V2 || + OSSwapBigToHostInt32(labelMap->dl_version) == DL_V1 ) + { + labelCheckPtr = &(labelMap->dl_checksum); + } + else + { + continue; + } + + labelCheckSize = (UInt8 *) labelCheckPtr - + (UInt8 *) labelMap - sizeof(UInt16); + + // Determine whether the partition map block position is correct. + + if ( OSSwapBigToHostInt32(labelMap->dl_label_blkno) != labelBlock ) + { + continue; + } + + // Determine whether the partition map checksum is correct. + + labelMap->dl_label_blkno = OSSwapHostToBigInt32(0); + + if ( checksum16(labelMap, labelCheckSize) != *labelCheckPtr ) + { + continue; + } + + labelMap->dl_label_blkno = labelBlock; + + labelFound = true; + break; + } + + if ( labelFound == false ) + { + goto scanErr; + } + + // Scan for valid partition entries in the partition map. + + for ( unsigned index = 0; index < NPART; index++ ) + { + if ( isPartitionUsed(labelMap->dl_part + index) ) + { + // Determine whether the partition is corrupt (fatal). + + if ( isPartitionCorrupt( + /* partition */ labelMap->dl_part + index, + /* partitionID */ index + 1, + /* labelBase */ labelBase, + /* labelMap */ labelMap ) ) + { + goto scanErr; + } + + // Determine whether the partition is invalid (skipped). + + if ( isPartitionInvalid( + /* partition */ labelMap->dl_part + index, + /* partitionID */ index + 1, + /* labelBase */ labelBase, + /* labelMap */ labelMap ) ) + { + continue; + } + + // Create a media object to represent this partition. + + IOMedia * newMedia = instantiateMediaObject( + /* partition */ labelMap->dl_part + index, + /* partitionID */ index + 1, + /* labelBase */ labelBase, + /* labelMap */ labelMap ); + + if ( newMedia ) + { + partitions->setObject(newMedia); + newMedia->release(); + } + } + } + + // Release our resources. + + media->close(this); + buffer->release(); + + return partitions; + +scanErr: + + // Release our resources. + + if ( mediaIsOpen ) media->close(this); + if ( partitions ) partitions->release(); + if ( buffer ) buffer->release(); + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IONeXTPartitionScheme::isPartitionUsed(partition_t * partition) +{ + // + // Ask whether the given partition is used. + // + + return ( (SInt32) OSSwapBigToHostInt32(partition->p_base) >= 0 && + (SInt32) OSSwapBigToHostInt32(partition->p_size) > 0 ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IONeXTPartitionScheme::isPartitionCorrupt( + partition_t * /* partition */ , + UInt32 /* partitionID */ , + UInt64 /* labelBase */ , + disk_label_t * /* labelMap */ ) +{ + // + // Ask whether the given partition appears to be corrupt. A partition that + // is corrupt will cause the failure of the NeXT partition map recognition + // altogether. + // + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IONeXTPartitionScheme::isPartitionInvalid( partition_t * partition, + UInt32 partitionID, + UInt64 labelBase, + disk_label_t * labelMap ) +{ + // + // Ask whether the given partition appears to be invalid. A partition that + // is invalid will cause it to be skipped in the scan, but will not cause a + // failure of the NeXT partition map recognition. + // + + IOMedia * media = getProvider(); + UInt64 partitionBase = 0; + UInt64 partitionSize = 0; + + // Compute the absolute byte position and size of the new partition. + + partitionBase = OSSwapBigToHostInt32(partition->p_base) + + OSSwapBigToHostInt16(labelMap->dl_front); + partitionSize = OSSwapBigToHostInt32(partition->p_size); + partitionBase *= OSSwapBigToHostInt32(labelMap->dl_secsize); + partitionSize *= OSSwapBigToHostInt32(labelMap->dl_secsize); + + // Determine whether the new partition leaves the confines of the container. + + if ( partitionBase < labelBase ) return true; // (absolute partitionBase) + + // Compute the relative byte position of the new partition. + + partitionBase -= labelBase; // (relative partitionBase) + + // Determine whether the new partition leaves the confines of the container. + + if ( partitionBase + partitionSize > media->getSize() ) return true; + + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IONeXTPartitionScheme::instantiateMediaObject( + partition_t * partition, + UInt32 partitionID, + UInt64 labelBase, + disk_label_t * labelMap ) +{ + // + // Instantiate a new media object to represent the given partition. + // + + IOMedia * media = getProvider(); + UInt64 partitionBase = 0; + UInt64 partitionBlockSize = OSSwapBigToHostInt32(labelMap->dl_secsize); + char * partitionHint = 0; + char * partitionName = 0; + UInt64 partitionSize = 0; + + // Compute the relative byte position and size of the new partition. + + partitionBase = OSSwapBigToHostInt32(partition->p_base) + + OSSwapBigToHostInt16(labelMap->dl_front); + partitionSize = OSSwapBigToHostInt32(partition->p_size); + partitionBase *= OSSwapBigToHostInt32(labelMap->dl_secsize); + partitionSize *= OSSwapBigToHostInt32(labelMap->dl_secsize); + partitionBase -= labelBase; + + // Look up a type for the new partition. + + OSDictionary * hintTable = OSDynamicCast( + /* type */ OSDictionary, + /* instance */ getProperty(kIONeXTPartitionSchemeContentTable) ); + + if ( hintTable ) + { + OSString * hintValue = OSDynamicCast( + /* type */ OSString, + /* instance */ hintTable->getObject(partition->p_type) ); + + if ( hintValue ) partitionHint = (char *) hintValue->getCStringNoCopy(); + } + + // Look up a name for the new partition. + + if ( partition->p_mountpt[0] ) + partitionName = partition->p_mountpt; + else if ( labelMap->dl_label[0] ) + partitionName = labelMap->dl_label; + + // Create the new media object. + + IOMedia * newMedia = instantiateDesiredMediaObject( + /* partition */ partition, + /* partitionID */ partitionID, + /* labelBase */ labelBase, + /* labelMap */ labelMap ); + + if ( newMedia ) + { + if ( newMedia->init( + /* base */ partitionBase, + /* size */ partitionSize, + /* preferredBlockSize */ partitionBlockSize, + /* isEjectable */ media->isEjectable(), + /* isWhole */ false, + /* isWritable */ media->isWritable(), + /* contentHint */ partitionHint ) ) + { + // Set a name for this partition. + + char name[24]; + sprintf(name, "Untitled %ld", partitionID); + newMedia->setName(partitionName ? partitionName : name); + + // Set a location value (the partition number) for this partition. + + char location[12]; + sprintf(location, "%ld", partitionID); + newMedia->setLocation(location); + + // Set the "Partition ID" key for this partition. + + newMedia->setProperty(kIOMediaPartitionIDKey, partitionID, 32); + } + else + { + newMedia->release(); + newMedia = 0; + } + } + + return newMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IONeXTPartitionScheme::instantiateDesiredMediaObject( + partition_t * partition, + UInt32 partitionID, + UInt64 labelBase, + disk_label_t * labelMap ) +{ + // + // Allocate a new media object (called from instantiateMediaObject). + // + + return new IOMedia; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +UInt16 IONeXTPartitionScheme::checksum16(void * data, UInt32 bytes) const +{ + // + // Compute a big-endian, 16-bit checksum over the specified data range. + // + + UInt32 sum1 = 0; + UInt32 sum2; + UInt16 * wp = (UInt16 *) data; + + while ( bytes >= 2 ) + { + sum1 += OSSwapBigToHostInt16(*wp); + bytes -= sizeof(UInt16); + wp++; + } + + sum2 = ((sum1 & 0xFFFF0000) >> 16) + (sum1 & 0xFFFF); + + if ( sum2 > 65535 ) + sum2 -= 65535; + + return sum2; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IONeXTPartitionScheme, 15); diff --git a/iokit/Families/IOStorage/IOPartitionScheme.cpp b/iokit/Families/IOStorage/IOPartitionScheme.cpp new file mode 100644 index 000000000..022d384e5 --- /dev/null +++ b/iokit/Families/IOStorage/IOPartitionScheme.cpp @@ -0,0 +1,418 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#define super IOStorage +OSDefineMetaClassAndStructors(IOPartitionScheme, IOStorage) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMedia * IOPartitionScheme::getProvider() const +{ + // + // Obtain this object's provider. We override the superclass's method + // to return a more specific subclass of OSObject -- an IOMedia. This + // method serves simply as a convenience to subclass developers. + // + + return (IOMedia *) IOService::getProvider(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOPartitionScheme::init(OSDictionary * properties = 0) +{ + // + // Initialize this object's minimal state. + // + + if (super::init(properties) == false) return false; + + _openLevel = kIOStorageAccessNone; + _openReaders = OSSet::withCapacity(16); + _openReaderWriters = OSSet::withCapacity(16); + + if (_openReaders == 0 || _openReaderWriters == 0) return false; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOPartitionScheme::free() +{ + // + // Free all of this object's outstanding resources. + // + + if (_openReaders) _openReaders->release(); + if (_openReaderWriters) _openReaderWriters->release(); + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOPartitionScheme::handleOpen(IOService * client, + IOOptionBits options, + void * argument) +{ + // + // The handleOpen method grants or denies permission to access this object + // to an interested client. The argument is an IOStorageAccess value that + // specifies the level of access desired -- reader or reader-writer. + // + // This method can be invoked to upgrade or downgrade the access level for + // an existing client as well. The previous access level will prevail for + // upgrades that fail, of course. A downgrade should never fail. If the + // new access level should be the same as the old for a given client, this + // method will do nothing and return success. In all cases, one, singular + // close-per-client is expected for all opens-per-client received. + // + // This implementation replaces the IOService definition of handleOpen(). + // + // We are guaranteed that no other opens or closes will be processed until + // we make our decision, change our state, and return from this method. + // + + IOStorageAccess access = (IOStorageAccess) argument; + IOStorageAccess level; + + assert(client); + assert( access == kIOStorageAccessReader || + access == kIOStorageAccessReaderWriter ); + + // + // A partition scheme multiplexes the opens it receives from several clients + // and sends one open to the level below that satisfies the highest level of + // access. + // + + unsigned writers = _openReaderWriters->getCount(); + + if (_openReaderWriters->containsObject(client)) writers--; + if (access == kIOStorageAccessReaderWriter) writers++; + + level = (writers) ? kIOStorageAccessReaderWriter : kIOStorageAccessReader; + + // + // Determine whether the levels below us accept this open or not (we avoid + // the open if the required access is the access we already hold). + // + + if (_openLevel != level) // (has open level changed?) + { + IOStorage * provider = OSDynamicCast(IOStorage, getProvider()); + + if (provider && provider->open(this, options, level) == false) + return false; + } + + // + // Process the open. + // + + if (access == kIOStorageAccessReader) + { + _openReaders->setObject(client); + + _openReaderWriters->removeObject(client); // (for a downgrade) + } + else // (access == kIOStorageAccessReaderWriter) + { + _openReaderWriters->setObject(client); + + _openReaders->removeObject(client); // (for an upgrade) + } + + _openLevel = level; + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOPartitionScheme::handleIsOpen(const IOService * client) const +{ + // + // The handleIsOpen method determines whether the specified client, or any + // client if none is specificed, presently has an open on this object. + // + // This implementation replaces the IOService definition of handleIsOpen(). + // + // We are guaranteed that no other opens or closes will be processed until + // we return from this method. + // + + if (client == 0) return (_openLevel != kIOStorageAccessNone); + + return ( _openReaderWriters->containsObject(client) || + _openReaders->containsObject(client) ); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOPartitionScheme::handleClose(IOService * client, IOOptionBits options) +{ + // + // The handleClose method closes the client's access to this object. + // + // This implementation replaces the IOService definition of handleClose(). + // + // We are guaranteed that no other opens or closes will be processed until + // we change our state and return from this method. + // + + assert(client); + + // + // Process the close. + // + + if (_openReaderWriters->containsObject(client)) // (is it a reader-writer?) + { + _openReaderWriters->removeObject(client); + } + else if (_openReaders->containsObject(client)) // (is the client a reader?) + { + _openReaders->removeObject(client); + } + else // (is the client is an imposter?) + { + assert(0); + return; + } + + // + // Reevaluate the open we have on the level below us. If no opens remain, + // we close, or if no reader-writer remains, but readers do, we downgrade. + // + + IOStorageAccess level; + + if (_openReaderWriters->getCount()) level = kIOStorageAccessReaderWriter; + else if (_openReaders->getCount()) level = kIOStorageAccessReader; + else level = kIOStorageAccessNone; + + if (_openLevel != level) // (has open level changed?) + { + IOStorage * provider = OSDynamicCast(IOStorage, getProvider()); + + assert(level != kIOStorageAccessReaderWriter); + + if (provider) + { + if (level == kIOStorageAccessNone) // (is a close in order?) + { + provider->close(this, options); + } + else // (is a downgrade in order?) + { + bool success; + success = provider->open(this, 0, level); + assert(success); // (should never fail, unless avoided deadlock) + } + } + + _openLevel = level; // (set new open level) + } +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOPartitionScheme::read(IOService * /* client */, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // Read data from the storage object at the specified byte offset into the + // specified buffer, asynchronously. When the read completes, the caller + // will be notified via the specified completion action. + // + // The buffer will be retained for the duration of the read. + // + // For simple partition schemes, the default behavior is to simply pass the + // read through to the provider media. More complex partition schemes such + // as RAID will need to do extra processing here. + // + + getProvider()->read(this, byteStart, buffer, completion); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOPartitionScheme::write(IOService * /* client */, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) +{ + // + // Write data into the storage object at the specified byte offset from the + // specified buffer, asynchronously. When the write completes, the caller + // will be notified via the specified completion action. + // + // The buffer will be retained for the duration of the write. + // + // For simple partition schemes, the default behavior is to simply pass the + // write through to the provider media. More complex partition schemes such + // as RAID will need to do extra processing here. + // + + getProvider()->write(this, byteStart, buffer, completion); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOPartitionScheme::synchronizeCache(IOService * client) +{ + return getProvider()->synchronizeCache(this); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 15); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 16); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 17); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 18); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 19); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 20); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 21); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 22); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 23); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 24); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 25); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 26); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 27); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 28); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 29); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 30); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOPartitionScheme, 31); diff --git a/iokit/Families/IOStorage/IOStorage.cpp b/iokit/Families/IOStorage/IOStorage.cpp new file mode 100644 index 000000000..535c8878c --- /dev/null +++ b/iokit/Families/IOStorage/IOStorage.cpp @@ -0,0 +1,194 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +#define super IOService +OSDefineMetaClass(IOStorage, IOService) +OSDefineAbstractStructors(IOStorage, IOService) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Local Functions + +static void storageCompletion(void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount) +{ + // + // Internal completion routine for synchronous versions of read and write. + // + + if (parameter) *((UInt64 *)parameter) = actualByteCount; + ((IOSyncer *)target)->signal(status); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOStorage::open(IOService * client, + IOOptionBits options, + IOStorageAccess access) +{ + // + // Ask the storage object for permission to access its contents; the method + // is equivalent to IOService::open(), but with the correct parameter types. + // + + return super::open(client, options, (void *) access); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOStorage::read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + UInt64 * actualByteCount = 0) +{ + // + // Read data from the storage object at the specified byte offset into the + // specified buffer, synchronously. When the read completes, this method + // will return to the caller. The actual byte count field is optional. + // + + IOStorageCompletion completion; + IOSyncer * completionSyncer; + + // Initialize the lock we will synchronize against. + + completionSyncer = IOSyncer::create(); + + // Fill in the completion information for this request. + + completion.target = completionSyncer; + completion.action = storageCompletion; + completion.parameter = actualByteCount; + + // Issue the asynchronous read. + + read(client, byteStart, buffer, completion); + + // Wait for the read to complete. + + return completionSyncer->wait(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOStorage::write(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + UInt64 * actualByteCount = 0) +{ + // + // Write data into the storage object at the specified byte offset from the + // specified buffer, synchronously. When the write completes, this method + // will return to the caller. The actual byte count field is optional. + // + + IOStorageCompletion completion; + IOSyncer * completionSyncer; + + // Initialize the lock we will synchronize against. + + completionSyncer = IOSyncer::create(); + + // Fill in the completion information for this request. + + completion.target = completionSyncer; + completion.action = storageCompletion; + completion.parameter = actualByteCount; + + // Issue the asynchronous write. + + write(client, byteStart, buffer, completion); + + // Wait for the write to complete. + + return completionSyncer->wait(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 0); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 1); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 2); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 3); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 4); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 5); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 6); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 7); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 8); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 9); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 10); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 11); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 12); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 13); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 14); + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +OSMetaClassDefineReservedUnused(IOStorage, 15); diff --git a/iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp b/iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp new file mode 100644 index 000000000..a7ea145a1 --- /dev/null +++ b/iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + + +static IOReturn IOWatchDogTimerSleepHandler(void *target, void *refCon, + UInt32 messageType, + IOService *provider, + void *messageArgument, + vm_size_t argSize); + + +#define kWatchDogEnabledProperty "IOWatchDogEnabled" + + +#define super IOService + +OSDefineMetaClassAndAbstractStructors(IOWatchDogTimer, IOService); + +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 0); +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 1); +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 2); +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 3); + +bool IOWatchDogTimer::start(IOService *provider) +{ + if (!super::start(provider)) return false; + + notifier = registerSleepWakeInterest(IOWatchDogTimerSleepHandler, this); + if (notifier == 0) return false; + + setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); + setWatchDogTimer(0); + + registerService(); + + return true; +} + +void IOWatchDogTimer::stop(IOService *provider) +{ + setWatchDogTimer(0); + notifier->remove(); +} + +IOReturn IOWatchDogTimer::setProperties(OSObject *properties) +{ + OSNumber *theNumber; + UInt32 theValue; + IOReturn result; + + result = IOUserClient::clientHasPrivilege(current_task(), + kIOClientPrivilegeAdministrator); + if (result != kIOReturnSuccess) return kIOReturnNotPrivileged; + + theNumber = OSDynamicCast(OSNumber, properties); + if (theNumber == 0) return kIOReturnBadArgument; + + theValue = theNumber->unsigned32BitValue(); + if (theValue == 0) { + setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); + } else { + setProperty(kWatchDogEnabledProperty, kOSBooleanTrue); + } + + setWatchDogTimer(theValue); + + return kIOReturnSuccess; +} + +static IOReturn IOWatchDogTimerSleepHandler(void *target, void */*refCon*/, + UInt32 messageType, + IOService */*provider*/, + void *messageArgument, + vm_size_t /*argSize*/) +{ + IOWatchDogTimer *watchDogTimer = (IOWatchDogTimer *)target; + sleepWakeNote *swNote = (sleepWakeNote *)messageArgument; + + if (messageType != kIOMessageSystemWillSleep) return kIOReturnUnsupported; + + watchDogTimer->setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); + watchDogTimer->setWatchDogTimer(0); + + swNote->returnValue = 0; + acknowledgeSleepWakeNotification(swNote->powerRef); + + return kIOReturnSuccess; +} diff --git a/iokit/IOKit/IOBSD.h b/iokit/IOKit/IOBSD.h new file mode 100644 index 000000000..005faf0c3 --- /dev/null +++ b/iokit/IOKit/IOBSD.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOBSD_H +#define _IOBSD_H + +/* + * bsd-related registry properties + */ + +#define kIOBSDNameKey "BSD Name" // (an OSString) +#define kIOBSDNamesKey "BSD Names" // (an OSDictionary of OSString's, for links) +#define kIOBSDMajorKey "BSD Major" // (an OSNumber) +#define kIOBSDMinorKey "BSD Minor" // (an OSNumber) +#define kIOBSDUnitKey "BSD Unit" // (an OSNumber) + +#define kIOBSDName "BSD Name" ///d:deprecated +#define kIOBSDMajor "BSD Major" ///d:deprecated +#define kIOBSDMinor "BSD Minor" ///d:deprecated + +#endif /* !_IOBSD_H */ diff --git a/iokit/IOKit/IOBufferMemoryDescriptor.h b/iokit/IOKit/IOBufferMemoryDescriptor.h new file mode 100644 index 000000000..733fa1dd1 --- /dev/null +++ b/iokit/IOKit/IOBufferMemoryDescriptor.h @@ -0,0 +1,224 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOBUFFERMEMORYDESCRIPTOR_H +#define _IOBUFFERMEMORYDESCRIPTOR_H + +#include + +enum { + kIOMemoryDirectionMask = 0x0000000f, + kIOMemoryPhysicallyContiguous = 0x00000010, + kIOMemoryPageable = 0x00000020, + kIOMemorySharingTypeMask = 0x000f0000, + kIOMemoryUnshared = 0x00000000, + kIOMemoryKernelUserShared = 0x00010000, +}; + + +class IOBufferMemoryDescriptor : public IOGeneralMemoryDescriptor +{ + OSDeclareDefaultStructors(IOBufferMemoryDescriptor); + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * reserved; + +protected: + void * _buffer; + vm_size_t _capacity; + vm_offset_t _alignment; + IOOptionBits _options; + IOPhysicalAddress * _physAddrs; + unsigned _physSegCount; + +private: + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 0); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 1); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 2); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 3); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 4); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 5); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 6); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 7); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 8); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 9); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 10); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 11); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 12); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 13); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 14); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 15); + +protected: + virtual void free(); + + virtual bool initWithAddress( void * address, /* not supported */ + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithAddress( vm_address_t address, /* not supported */ + IOByteCount withLength, + IODirection withDirection, + task_t withTask ); + + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, /* not supported */ + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithPhysicalRanges( + IOPhysicalRange * ranges, /* not supported */ + UInt32 withCount, + IODirection withDirection, + bool asReference = false ); + + virtual bool initWithRanges( IOVirtualRange * ranges, /* not supported */ + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false ); + + IOGeneralMemoryDescriptor::withAddress; /* not supported */ + IOGeneralMemoryDescriptor::withPhysicalAddress; /* not supported */ + IOGeneralMemoryDescriptor::withPhysicalRanges; /* not supported */ + IOGeneralMemoryDescriptor::withRanges; /* not supported */ + IOGeneralMemoryDescriptor::withSubRange; /* not supported */ + +public: + + /* + * withOptions: + * + * Returns a new IOBufferMemoryDescriptor with a buffer large enough to + * hold capacity bytes. The descriptor's length is initially set to the + * capacity. + */ + virtual bool initWithOptions( IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment); + + static IOBufferMemoryDescriptor * withOptions( IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment = 1); + + /* + * withCapacity: + * + * Returns a new IOBufferMemoryDescriptor with a buffer large enough to + * hold capacity bytes. The descriptor's length is initially set to the + * capacity. + */ + static IOBufferMemoryDescriptor * withCapacity( + vm_size_t capacity, + IODirection withDirection, + bool withContiguousMemory = false); + /* + * initWithBytes: + * + * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). + * The descriptor's length and capacity are set to the input buffer's size. + */ + virtual bool initWithBytes(const void * bytes, + vm_size_t withLength, + IODirection withDirection, + bool withContiguousMemory = false); + + /* + * withBytes: + * + * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). + * The descriptor's length and capacity are set to the input buffer's size. + */ + static IOBufferMemoryDescriptor * withBytes( + const void * bytes, + vm_size_t withLength, + IODirection withDirection, + bool withContiguousMemory = false); + + /* + * setLength: + * + * Change the buffer length of the memory descriptor. When a new buffer + * is created, the initial length of the buffer is set to be the same as + * the capacity. The length can be adjusted via setLength for a shorter + * transfer (there is no need to create more buffer descriptors when you + * can reuse an existing one, even for different transfer sizes). Note + * that the specified length must not exceed the capacity of the buffer. + */ + virtual void setLength(vm_size_t length); + + /* + * setDirection: + * + * Change the direction of the transfer. This method allows one to redirect + * the descriptor's transfer direction. This eliminates the need to destroy + * and create new buffers when different transfer directions are needed. + */ + virtual void setDirection(IODirection direction); + + /* + * getCapacity: + * + * Get the buffer capacity + */ + virtual vm_size_t getCapacity() const; + + /* + * getBytesNoCopy: + * + * Return the virtual address of the beginning of the buffer + */ + virtual void *getBytesNoCopy(); + + /* + * getBytesNoCopy: + * + * Return the virtual address of an offset from the beginning of the buffer + */ + virtual void *getBytesNoCopy(vm_size_t start, vm_size_t withLength); + + /* + * appendBytes: + * + * Add some data to the end of the buffer. This method automatically + * maintains the memory descriptor buffer length. Note that appendBytes + * will not copy past the end of the memory descriptor's current capacity. + */ + virtual bool appendBytes(const void *bytes, vm_size_t withLength); + + /* + * getPhysicalSegment: + * + * Get the physical address of the buffer, relative to the current position. + * If the current position is at the end of the buffer, a zero is returned. + */ + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length); +}; + +#endif /* !_IOBUFFERMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IOCPU.h b/iokit/IOKit/IOCPU.h new file mode 100644 index 000000000..c78ea6ac0 --- /dev/null +++ b/iokit/IOKit/IOCPU.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#ifndef _IOKIT_CPU_H +#define _IOKIT_CPU_H + +extern "C" { +#include +#include +} + +#include +#include + +enum { + kIOCPUStateUnregistered = 0, + kIOCPUStateUninitalized, + kIOCPUStateStopped, + kIOCPUStateRunning, + kIOCPUStateCount +}; + +class IOCPUInterruptController; + +extern IOCPUInterruptController *gIOCPUInterruptController; + +class IOCPU : public IOService +{ + OSDeclareAbstractStructors(IOCPU); + +private: + OSArray *_cpuGroup; + UInt32 _cpuNumber; + UInt32 _cpuState; + +protected: + IOService *cpuNub; + processor_t machProcessor; + ipi_handler_t ipi_handler; + + struct ExpansionData { }; + ExpansionData *reserved; + + virtual void setCPUNumber(UInt32 cpuNumber); + virtual void setCPUState(UInt32 cpuState); + +public: + static void initCPUs(void); + + virtual bool start(IOService *provider); + virtual IOReturn setProperties(OSObject *properties); + virtual void initCPU(bool boot) = 0; + virtual void quiesceCPU(void) = 0; + virtual kern_return_t startCPU(vm_offset_t start_paddr, + vm_offset_t arg_paddr) = 0; + virtual void haltCPU(void) = 0; + virtual void signalCPU(IOCPU *target); + virtual void enableCPUTimeBase(bool enable); + + virtual UInt32 getCPUNumber(void); + virtual UInt32 getCPUState(void); + virtual OSArray *getCPUGroup(void); + virtual UInt32 getCPUGroupSize(void); + virtual processor_t getMachProcessor(void); + + virtual const OSSymbol *getCPUName(void) = 0; + + OSMetaClassDeclareReservedUnused(IOCPU, 0); + OSMetaClassDeclareReservedUnused(IOCPU, 1); + OSMetaClassDeclareReservedUnused(IOCPU, 2); + OSMetaClassDeclareReservedUnused(IOCPU, 3); + OSMetaClassDeclareReservedUnused(IOCPU, 4); + OSMetaClassDeclareReservedUnused(IOCPU, 5); + OSMetaClassDeclareReservedUnused(IOCPU, 6); + OSMetaClassDeclareReservedUnused(IOCPU, 7); +}; + +void IOCPUSleepKernel(void); + + +class IOCPUInterruptController : public IOInterruptController +{ + OSDeclareDefaultStructors(IOCPUInterruptController); + +private: + int enabledCPUs; + +protected: + int numCPUs; + IOCPU **cpus; + + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual IOReturn initCPUInterruptController(int sources); + virtual void registerCPUInterruptController(void); + virtual void setCPUInterruptProperties(IOService *service); + virtual void enableCPUInterrupt(IOCPU *cpu); + + virtual IOReturn registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon); + + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType); + + virtual IOReturn enableInterrupt(IOService *nub, int source); + virtual IOReturn disableInterrupt(IOService *nub, int source); + virtual IOReturn causeInterrupt(IOService *nub, int source); + + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, + int source); + + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 0); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 1); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 2); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 3); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 4); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 5); +}; + +#endif /* ! _IOKIT_CPU_H */ diff --git a/iokit/IOKit/IOCatalogue.h b/iokit/IOKit/IOCatalogue.h new file mode 100644 index 000000000..1b72688f4 --- /dev/null +++ b/iokit/IOKit/IOCatalogue.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOCATALOGUE_H +#define _IOKIT_IOCATALOGUE_H + +#include +#include +#include +#include +#include +#include + +#include + +class IOService; + +/*! + @class IOCatalogue + @abstract In-kernel database for IOKit driver personalities. + @discussion The IOCatalogue is a database which contains all IOKit driver personalities. IOService uses this resource when matching devices to their associated drivers. +*/ +class IOCatalogue : public OSObject +{ + OSDeclareDefaultStructors(IOCatalogue) + +private: + OSCollectionIterator * kernelTables; + OSArray * array; + IOLock * lock; + SInt32 generation; + +public: + /*! + @function initialize + @abstract Creates and initializes the database object and poputates it with in-kernel driver personalities. + */ + static void initialize( void ); + + /*! + @function init + @abstract Initializes the database object. + @param initArray The initial array of driver personalities to populate the database. + */ + bool init( OSArray * initArray ); + + /*! + @function free + @abstract Cleans up the database and deallocates memory allocated at initialization. This is never called in normal operation of the system. + */ + void free( void ); + + /*! + @function findDrivers + @abstract This is the primary entry point for IOService. + @param service + @param generationCount Returns a reference to the generation count of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. + @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. + */ + OSOrderedSet * findDrivers( IOService * service, SInt32 * generationCount ); + + /*! + @function findDrivers + @abstract A more general purpose interface which allows one to retreive driver personalities based the intersection of the 'matching' dictionary and the personality's own property list. + @param matching A dictionary containing only keys and values which are to be used for matching. For example, a matching dictionary containing 'IOProviderClass'='IOPCIDevice' will return all personalities with an IOProviderClass key and a value of IOPCIDevice. + @param generationCount Returns a reference to the current generation of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. + @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. + */ + OSOrderedSet * findDrivers( OSDictionary * matching, SInt32 * generationCount ); + + /*! + @function addDrivers + @abstract Adds an array of driver personalities to the database. + @param array Array of driver personalities to be added to the database. + @param doNubMatchng Start matching process after personalities have been added. + @result Returns true if driver personality was added to the database successfully. Failure is due to a memory allocation failure. + */ + bool addDrivers( OSArray * array, bool doNubMatching = true ); + + /*! + @function removeDrivers + @abstract Remove driver personalities from the database based on matching information provided. + @param matching A dictionary whose keys and values are used for matching personalities in the database. For example, a matching dictionary containing a 'IOProviderClass' key with the value 'IOPCIDevice' will remove all personalities which have the key 'IOProviderClass' equal to 'IOPCIDevice'. + @param doNubMatchng Start matching process after personalities have been removed. Matching criteria is based on IOProviderClass of those personalities which were removed. This is to allow drivers which haven't been matched to match against NUB's which were blocked by the previous personalities. + @result Returns true if personality was removed successfully. Failure is due to a memory allocation failure. + */ + bool removeDrivers( OSDictionary * matching, bool doNubMatching = true ); + + /*! + @function getGenerationCount + @abstract Get the current generation count of the database. + */ + SInt32 getGenerationCount( void ) const; + + /*! + @function isModuleLoaded + @abstract Reports if a kernel module has been loaded. + @param moduleName Name of the module. + @result Returns true if the associated kernel module has been loaded into the kernel. + */ + bool isModuleLoaded( OSString * moduleName ) const; + + /*! + @function isModuleLoaded + @abstract Reports if a kernel module has been loaded. + @param moduleName Name of the module. + @result Returns true if the associated kernel module has been loaded into the kernel. + */ + bool isModuleLoaded( const char * moduleName ) const; + + /*! + @function isModuleLoaded + @abstract Reports if a kernel module has been loaded for a particular personality. + @param driver A driver personality's property list. + @result Returns true if the associated kernel module has been loaded into the kernel for a particular driver personality on which it depends. + */ + bool isModuleLoaded( OSDictionary * driver ) const; + + /*! + @function moduleHasLoaded + @abstract Callback function called after a IOKit dependent kernel module is loaded. + @param name Name of the kernel module. + */ + void moduleHasLoaded( OSString * name ); + + /*! + @function moduleHasLoaded + @abstract Callback function called after a IOKit dependent kernel module is loaded. + @param name Name of the kernel module. + */ + void moduleHasLoaded( const char * name ); + + /*! + @function terminateDrivers + @abstract Terminates all instances of a driver which match the contents of the matching dictionary. Does not unload module. + @param matching Dictionary containing the matching criteria. + */ + IOReturn terminateDrivers( OSDictionary * matching ); + + /*! + @function terminateDriversForModule + @abstract Terminates all instances of a driver which depends on a particular module and unloads the module. + @param moduleName Name of the module which is used to determine which driver instances to terminate and unload. + @param unload Flag to cause the actual unloading of the module. + */ + IOReturn terminateDriversForModule( OSString * moduleName, bool unload = true); + + /*! + @function terminateDriversForModule + @abstract Terminates all instances of a driver which depends on a particular module and unloads the module. + @param moduleName Name of the module which is used to determine which driver instances to terminate and unload. + @param unload Flag to cause the actual unloading of the module. + */ + IOReturn terminateDriversForModule( const char * moduleName, bool unload = true); + + /*! + @function startMatching + @abstract Starts an IOService matching thread where matching keys and values are provided by the matching dictionary. + @param matching A dictionary containing keys and values to match against. + */ + bool startMatching( OSDictionary * matching ); + + /*! + @function reset + @abstract Return the Catalogue to its initial state. + */ + void reset(void); + + /*! + @function serialize + @abstract Serializes the catalog for transport to the user. + @param s The serializer object. + @result Returns false if unable to serialize database, most likely due to memory shortage. + */ + virtual bool serialize(OSSerialize * s) const; + + + /*! + @function recordStartupExtensions + @abstract Records extensions made available by the primary booter. +

+ This function is for internal use by the kernel startup linker. + Kernel extensions should never call it. + @result Returns true if startup extensions were successfully recorded, + false if not. + */ + virtual bool recordStartupExtensions(void); + + /*! + @function addExtensionsFromArchive() + @abstract Records an archive of extensions, as from device ROM. +

+ This function is currently for internal use. + Kernel extensions should never call it. + @param mkext An OSData object containing a multikext archive. + @result Returns true if mkext was properly unserialized and its + contents recorded, false if not. + */ + virtual bool addExtensionsFromArchive(OSData * mkext); + + + /*! + @function removeKernelLinker + @abstract Removes from memory all code and data related to + boot-time loading of kernel extensions. kextd triggers + this when it first starts in order to pass responsibility + for loading extensions from the kernel itself to kextd. + @result Returns KERN_SUCCESS if the kernel linker is successfully + removed or wasn't present, KERN_FAILURE otherwise. + */ + virtual kern_return_t removeKernelLinker(void); + +private: + + /*! + @function unloadModule + @abstract Unloads the reqested module if no driver instances are currently depending on it. + @param moduleName An OSString containing the name of the module to unload. + */ + IOReturn unloadModule( OSString * moduleName ) const; + + +}; + +__BEGIN_DECLS +/*! + @function IOKitRelocStart + @abstract Deprecated API. +*/ +kmod_start_func_t IOKitRelocStart; +/*! + @function IOKitRelocStop + @abstract Deprecated API. +*/ +kmod_stop_func_t IOKitRelocStop; +__END_DECLS + +extern const OSSymbol * gIOClassKey; +extern const OSSymbol * gIOProbeScoreKey; +extern IOCatalogue * gIOCatalogue; + +#endif /* ! _IOKIT_IOCATALOGUE_H */ diff --git a/iokit/IOKit/IOCommand.h b/iokit/IOKit/IOCommand.h new file mode 100644 index 000000000..f6e58d2e0 --- /dev/null +++ b/iokit/IOKit/IOCommand.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 2001-01-18 gvdl Made the primary queue pointer public, to be used when + * Ownership is clear. + * 11/13/2000 CJS Created IOCommand class and implementation + * + */ + +/*! + * @header IOCommand + * @abstract + * This header contains the IOCommand class definition. + */ + +#ifndef _IOKIT_IO_COMMAND_H_ +#define _IOKIT_IO_COMMAND_H_ + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include + +class IOCommandPool; + +/*! + * @class IOCommand + * @abstract + * This class is an abstract class which represents an I/O command. + * @discussion + * This class is an abstract class which represents an I/O command passed + * from a device driver to a controller. All controller commands (e.g. IOATACommand) + * should inherit from this class. + */ + +class IOCommand : public OSObject +{ + OSDeclareAbstractStructors(IOCommand) + +protected: + virtual bool init(void); + +public: +/*! @var fCommandChain + This variable is used by the current 'owner' to queue the command. During the life cycle of a command it moves through a series of queues. This is the queue pointer for it. Only valid while 'ownership' is clear. For instance a IOCommandPool uses this pointer to maintain its list of free commands. May be manipulated using the kern/queue.h macros */ + queue_chain_t fCommandChain; /* used to queue commands */ +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* _IOKIT_IO_COMMAND_H_ */ diff --git a/iokit/IOKit/IOCommandGate.h b/iokit/IOKit/IOCommandGate.h new file mode 100644 index 000000000..9542199b1 --- /dev/null +++ b/iokit/IOKit/IOCommandGate.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*[ + 1999-8-10 Godfrey van der Linden(gvdl) + Created. +]*/ +/*! @language embedded-c++ */ + +#ifndef _IOKIT_IOCOMMANDGATE_H +#define _IOKIT_IOCOMMANDGATE_H + +#include + +/*! + @class IOCommandGate : public IOEventSource + @abstract Single-threaded work-loop client request mechanism. + @discussion An IOCommandGate instance is an extremely light way mechanism +that executes an action on the driver's work-loop. 'On the work-loop' is +actually a lie but the work-loop single threaded semantic is maintained for this +event source. Using the work-loop gate rather than execution by the workloop. +The command gate tests for a potential self dead lock by checking if the +runCommand request is made from the work-loop's thread, it doens't check for a +mutual dead lock though where a pair of work loop's dead lock each other. +

+ The IOCommandGate is a lighter weight version of the IOCommandQueue and +should be used in preference. Generally use a command queue whenever you need a +client to submit a request to a work loop. A typical command gate action would +check if the hardware is active, if so it will add the request to a pending +queue internal to the device or the device's family. Otherwise if the hardware +is inactive then this request can be acted upon immediately. +

+ CAUTION: The runAction and runCommand functions can not be called from an interrupt context. + +*/ +class IOCommandGate : public IOEventSource +{ + OSDeclareDefaultStructors(IOCommandGate) + +public: +/*! + @typedef Action + @discussion Type and arguments of callout C function that is used when +a runCommand is executed by a client. Cast to this type when you want a C++ +member function to be used. Note the arg1 - arg3 parameters are straight pass +through from the runCommand to the action callout. + @param owner + Target of the function, can be used as a refcon. The owner is set +during initialisation of the IOCommandGate instance. Note if a C++ function +was specified this parameter is implicitly the first paramter in the target +member function's parameter list. + @param arg0 Argument to action from run operation. + @param arg1 Argument to action from run operation. + @param arg2 Argument to action from run operation. + @param arg3 Argument to action from run operation. +*/ + typedef IOReturn (*Action)(OSObject *owner, + void *arg0, void *arg1, + void *arg2, void *arg3); + +protected: +/*! + @function checkForWork + @abstract Not used, $link IOEventSource::checkForWork(). */ + virtual bool checkForWork(); + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +public: +/*! @function commandGate + @abstract Factory method to create and initialise an IOCommandGate, See $link init. + @result Returns a pointer to the new command gate if sucessful, 0 otherwise. */ + static IOCommandGate *commandGate(OSObject *owner, Action action = 0); + +/*! @function init + @abstract Class initialiser. + @discussion Initialiser for IOCommandGate operates only on newly 'newed' +objects. Shouldn't be used to re-init an existing instance. + @param owner Owner of this, newly created, instance of the IOCommandGate. This argument will be used as the first parameter in the action callout. + @param action + Pointer to a C function that is called whenever a client of the +IOCommandGate calls runCommand. NB Can be a C++ member function but caller +must cast the member function to $link IOCommandGate::Action and they will get a +compiler warning. Defaults to zero, see $link IOEventSource::setAction. + @result True if inherited classes initialise successfully. */ + virtual bool init(OSObject *owner, Action action = 0); + +/*! @function runCommand + @abstract Single thread a command with the target work-loop. + @discussion Client function that causes the current action to be called in +a single threaded manner. Beware the work-loop's gate is recursive and command +gates can cause direct or indirect re-entrancy. When the executing on a +client's thread runCommand will sleep until the work-loop's gate opens for +execution of client actions, the action is single threaded against all other +work-loop event sources. + @param arg0 Parameter for action of command gate, defaults to 0. + @param arg1 Parameter for action of command gate, defaults to 0. + @param arg2 Parameter for action of command gate, defaults to 0. + @param arg3 Parameter for action of command gate, defaults to 0. + @result kIOReturnSuccess if successful. kIOReturnNotPermitted if this +event source is currently disabled, kIOReturnNoResources if no action available. +*/ + virtual IOReturn runCommand(void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); + +/*! @function runAction + @abstract Single thread a call to an action with the target work-loop. + @discussion Client function that causes the given action to be called in +a single threaded manner. Beware the work-loop's gate is recursive and command +gates can cause direct or indirect re-entrancy. When the executing on a +client's thread runCommand will sleep until the work-loop's gate opens for +execution of client actions, the action is single threaded against all other +work-loop event sources. + @param action Pointer to function to be executed in work-loop context. + @param arg0 Parameter for action parameter, defaults to 0. + @param arg1 Parameter for action parameter, defaults to 0. + @param arg2 Parameter for action parameter, defaults to 0. + @param arg3 Parameter for action parameter, defaults to 0. + @result kIOReturnSuccess if successful. kIOReturnBadArgument if action is not defined, kIOReturnNotPermitted if this event source is currently disabled. +*/ + virtual IOReturn runAction(Action action, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); + +/*! @function attemptCommand + @abstract Single thread a command with the target work-loop. + @discussion Client function that causes the current action to be called in +a single threaded manner. Beware the work-loop's gate is recursive and command +gates can cause direct or indirect re-entrancy. When the executing on a +client's thread attemptCommand will fail if the work-loop's gate is open. + @param arg0 Parameter for action of command gate, defaults to 0. + @param arg1 Parameter for action of command gate, defaults to 0. + @param arg2 Parameter for action of command gate, defaults to 0. + @param arg3 Parameter for action of command gate, defaults to 0. + @result kIOReturnSuccess if successful. kIOReturnNotPermitted if this event source is currently disabled, kIOReturnNoResources if no action available, kIOReturnCannotLock if lock attempt fails. +*/ + virtual IOReturn attemptCommand(void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); + +/*! @function attemptAction + @abstract Single thread a call to an action with the target work-loop. + @discussion Client function that causes the given action to be called in +a single threaded manner. Beware the work-loop's gate is recursive and command +gates can cause direct or indirect re-entrancy. When the executing on a +client's thread attemptCommand will fail if the work-loop's gate is open. + @param action Pointer to function to be executed in work-loop context. + @param arg0 Parameter for action parameter, defaults to 0. + @param arg1 Parameter for action parameter, defaults to 0. + @param arg2 Parameter for action parameter, defaults to 0. + @param arg3 Parameter for action parameter, defaults to 0. + @result kIOReturnSuccess if successful. kIOReturnBadArgument if action is not defined, kIOReturnNotPermitted if this event source is currently disabled, kIOReturnCannotLock if lock attempt fails. + +*/ + virtual IOReturn attemptAction(Action action, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); + +/*! @function commandSleep + @abstract Put a thread that is currently holding the command gate to sleep. + @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs then the commandGate is closed before the returns. + @param event Pointer to an address. + @param interruptible THREAD_UNINT, THREAD_INTERRUPTIBLE or THREAD_ABORTSAFE, defaults to THREAD_ABORTSAFE. + @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted by clear_wait, THREAD_RESTART - restart operation entirely, kIOReturnNotPermitted if the calling thread does not hold the command gate. */ + virtual IOReturn commandSleep(void *event, + UInt32 interruptible = THREAD_ABORTSAFE); + +/*! @function commandWakeup + @abstract Wakeup one or more threads that are asleep on an event. + @param event Pointer to an address. + @param onlyOneThread true to only wake up at most one thread, false otherwise. */ + virtual void commandWakeup(void *event, bool oneThread = false); + +private: + OSMetaClassDeclareReservedUnused(IOCommandGate, 0); + OSMetaClassDeclareReservedUnused(IOCommandGate, 1); + OSMetaClassDeclareReservedUnused(IOCommandGate, 2); + OSMetaClassDeclareReservedUnused(IOCommandGate, 3); + OSMetaClassDeclareReservedUnused(IOCommandGate, 4); + OSMetaClassDeclareReservedUnused(IOCommandGate, 5); + OSMetaClassDeclareReservedUnused(IOCommandGate, 6); + OSMetaClassDeclareReservedUnused(IOCommandGate, 7); +}; + +#endif /* !_IOKIT_IOCOMMANDGATE_H */ diff --git a/iokit/IOKit/IOCommandPool.h b/iokit/IOKit/IOCommandPool.h new file mode 100644 index 000000000..7703d3d6a --- /dev/null +++ b/iokit/IOKit/IOCommandPool.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 2001-01-17 gvdl Re-implement on IOCommandGate::commandSleep + * 11/13/2000 CJS Created IOCommandPool class and implementation + * + */ + +/*! + * @header IOCommandPool + * @abstract + * This header contains the IOCommandPool class definition. + */ + +#ifndef _IOKIT_IO_COMMAND_POOL_H_ +#define _IOKIT_IO_COMMAND_POOL_H_ + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include +#include +#include +#include +#include + +/*! + * @class IOCommandPool + * @abstract + * The IOCommandPool class is used to manipulate a pool of commands which + * inherit from IOCommand. + * @discussion + * The IOCommandPool class is used to manipulate a pool of commands which + * inherit from IOCommand. It includes a factory method to create a pool + * of a certain size. Once the factory method is invoked, the semaphore + * is set to zero. The caller must then put commands in the pool by creating + * the command (via the controller's factory method or a memory allocation) + * and calling the returnCommand method with the newly created command as its + * argument. + */ + +class IOCommandPool : public OSObject +{ + + OSDeclareDefaultStructors(IOCommandPool) + + +protected: + + queue_head_t fQueueHead; /* head of the queue of elements available */ + UInt32 fSleepers; /* Count of threads sleeping on this pool */ + IOCommandGate *fSerializer; /* command gate used for serializing pool access */ + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOEventSource in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + /*! + * @defined kIOCommandPoolDefaultSize + * @abstract + * kIOCommandPoolDefaultSize is the default size of any command pool. + * @discussion + * kIOCommandPoolDefaultSize is the default size of any command pool. + * The default size was determined to be the smallest size for which + * a pool makes sense. + */ + + static const UInt32 kIOCommandPoolDefaultSize = 2; + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(void); + + +public: + + /*! + * @function initWithWorkLoop + * @abstract Primary initialiser for an IOCommandPool Object + * @discussion Primary initialiser for an IOCommandPool. + * Should probably use IOCommandPool::withWorkLoop() as it is easier to use. + * @param inWorkLoop + * The workloop that this command pool should synchronise with. + * @result true if command pool was sucessfully initialised. + */ + virtual bool initWithWorkLoop(IOWorkLoop *workLoop); + + /*! + * @function withWorkLoop + * @abstract Primary factory method for the IOCommandPool class + * @discussion + * The withWorkLoop method is what is known as a factory method. It creates + * a new instance of an IOCommandPool and returns a pointer to that object. + * @param inWorkLoop + * The workloop that this command pool should synchronise with. + * @result + * Returns a pointer to an instance of IOCommandPool if successful, + * otherwise NULL. + */ + + static IOCommandPool *withWorkLoop(IOWorkLoop *inWorkLoop); + + /*! + * @function init + * @abstract Should never be used, obsolete See initWithWorkLoop + */ + virtual bool init(IOService *inOwner, + IOWorkLoop *inWorkLoop, + UInt32 inSize = kIOCommandPoolDefaultSize); + + /*! + * @function withWorkLoop + * @abstract Should never be used, obsolete See IOCommandPool::withWorkLoop + */ + static IOCommandPool *commandPool(IOService *inOwner, + IOWorkLoop *inWorkLoop, + UInt32 inSize = kIOCommandPoolDefaultSize); + + + /*! + * @function getCommand + * @discussion + * The getCommand method is used to get a pointer to an object of type IOCommand + * from the pool. + * @param blockForCommand + * If the caller would like to have its thread slept until a command is + * available, it should pass true, else false + * @result + * If the caller passes true in blockForCommand, getCommand guarantees that + * the result will be a pointer to an IOCommand object from the pool. If + * the caller passes false, s/he is responsible for checking whether a non-NULL + * pointer was returned. + */ + + virtual IOCommand *getCommand(bool blockForCommand = true); + + /*! + * @function returnCommand + * @discussion + * The returnCommand method is used to place an object of type IOCommand + * into the pool, whether it be the first time, or the 1000th time. + * @param commmand + * The command to place in the pool. + */ + + virtual void returnCommand(IOCommand *command); + +protected: + + /*! + * @function gatedGetCommand + * @discussion + * The gatedGetCommand method is used to serialize the extraction of a + * command of from the pool behind a command gate. + * runAction-ed by getCommand. + * @param vCommand + * A pointer to a pointer to an IOCommand object where the returned + * command will be stored + * @param vBlock + * A bool that indicates whether to block the request until a command + * becomes available. + * @result + * Returns kIOReturnNoResources if no command is available and the client + * doesn't with to block until one does become available. + * kIOReturnSuccess if the vCommand argument is valid. + */ + virtual IOReturn gatedGetCommand(IOCommand **command, bool blockForCommand); + + /*! + * @function gatedReturnCommand + * @discussion + * The gatedReturnCommand method is used to serialize the return of a + * command of to the pool behind a command gate. + * runAction-ed by returnCommand. + * @param vCommand + * A pointer to the IOCommand object to be returned to the pool. + * @result + * Always returns kIOReturnSuccess if the vCommand argument is valid. + */ + virtual IOReturn gatedReturnCommand(IOCommand *command); + +private: + OSMetaClassDeclareReservedUnused(IOCommandPool, 0); + OSMetaClassDeclareReservedUnused(IOCommandPool, 1); + OSMetaClassDeclareReservedUnused(IOCommandPool, 2); + OSMetaClassDeclareReservedUnused(IOCommandPool, 3); + OSMetaClassDeclareReservedUnused(IOCommandPool, 4); + OSMetaClassDeclareReservedUnused(IOCommandPool, 5); + OSMetaClassDeclareReservedUnused(IOCommandPool, 6); + OSMetaClassDeclareReservedUnused(IOCommandPool, 7); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* _IOKIT_IO_COMMAND_POOL_H_ */ diff --git a/iokit/IOKit/IOCommandQueue.h b/iokit/IOKit/IOCommandQueue.h new file mode 100644 index 000000000..8b7f3dfdf --- /dev/null +++ b/iokit/IOKit/IOCommandQueue.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. + 1998-10-30 Godfrey van der Linden(gvdl) + Converted to C++ + 1999-9-22 Godfrey van der Linden(gvdl) + Deprecated +]*/ +#ifndef _IOKIT_IOCOMMANDQUEUE_H +#define _IOKIT_IOCOMMANDQUEUE_H + +#include + + + + +#warning IOCommandQueue has been deprecated in favour of IOCommandGate and will be going away before too long. + + + + + +class IOCommandQueue; + +typedef void (*IOCommandQueueAction) + (OSObject *, void *field0, void *field1, void *field2, void *field3); + +class IOCommandQueue : public IOEventSource +{ + OSDeclareDefaultStructors(IOCommandQueue) + +protected: + static const int kIOCQDefaultSize = 128; + + void *queue; + IOLock *producerLock; + semaphore_port_t producerSema; + int producerIndex, consumerIndex; + int size; + + virtual void free(); + + virtual bool checkForWork(); + +public: + static IOCommandQueue *commandQueue(OSObject *inOwner, + IOCommandQueueAction inAction = 0, + int inSize = kIOCQDefaultSize); + virtual bool init(OSObject *inOwner, + IOCommandQueueAction inAction = 0, + int inSize = kIOCQDefaultSize); + + virtual kern_return_t enqueueCommand(bool gotoSleep = true, + void *field0 = 0, void *field1 = 0, + void *field2 = 0, void *field3 = 0); + + // WARNING: This function can only be safely called from the appropriate + // work loop context. You should check IOWorkLoop::onThread is true. + // + // For each entry in the commandQueue call the target/action. + // Lockout all new entries to the queue while iterating. + // If the input fields are zero then the queue's owner/action will be used. + virtual int performAndFlush(OSObject *target = 0, + IOCommandQueueAction inAction = 0); +}; + +#endif /* !_IOKIT_IOCOMMANDQUEUE_H */ diff --git a/iokit/IOKit/IOConditionLock.h b/iokit/IOKit/IOConditionLock.h new file mode 100644 index 000000000..2bfef6f18 --- /dev/null +++ b/iokit/IOKit/IOConditionLock.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1994-1996 NeXT Software, Inc. All rights reserved. + */ + +#ifndef _IOKIT_IOCONDITIONLOCK_H +#define _IOKIT_IOCONDITIONLOCK_H + +#include +#include +#include + +#include + +class IOConditionLock : public OSObject +{ + OSDeclareDefaultStructors(IOConditionLock) + +private: + IOLock * cond_interlock; // condition var Simple lock + volatile int condition; + + IOLock * sleep_interlock; // sleep lock Simple lock + unsigned char interruptible; + volatile bool want_lock; + volatile bool waiting; + +public: + static IOConditionLock *withCondition(int condition, bool inIntr = true); + virtual bool initWithCondition(int condition, bool inIntr = true); + virtual void free(); + + virtual bool tryLock(); // acquire lock, no waiting + virtual int lock(); // acquire lock (enter critical section) + virtual void unlock(); // release lock (leave critical section) + + virtual bool getInterruptible() const; + virtual int getCondition() const; + virtual int setCondition(int condition); + + virtual int lockWhen(int condition); // acquire lock when condition + virtual void unlockWith(int condition); // set condition & release lock +}; + +#endif /* _IOKIT_IOCONDITIONLOCK_H */ diff --git a/iokit/IOKit/IODataQueue.h b/iokit/IOKit/IODataQueue.h new file mode 100644 index 000000000..6445ba5e8 --- /dev/null +++ b/iokit/IOKit/IODataQueue.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOKIT_IODATAQUEUE_H +#define _IOKIT_IODATAQUEUE_H + +#include +#include +#include +#include + +typedef struct _IODataQueueMemory IODataQueueMemory; +class IOMemoryDescriptor; + +struct _notifyMsg { + mach_msg_header_t h; +}; + +/*! + * @class IODataQueue : public OSObject + * @abstract A generic queue designed to pass data from the kernel to a user process. + * @discussion The IODataQueue class is designed to allow kernel code to queue data to a user process. IODataQueue objects are designed to be used in a single producer / single consumer situation. As such, there are no locks on the data itself. Because the kernel enqueue and user-space dequeue methods follow a strict set of guidelines, no locks are necessary to maintain the integrity of the data struct. + * + *
Each data entry can be variable sized, but the entire size of the queue data region (including overhead for each entry) must be specified up front. + * + *
In order for the IODataQueue instance to notify the user process that data is available, a notification mach port must be set. When the queue is empty and a new entry is added, a message is sent to the specified port. + * + *
User client code exists in the IOKit framework that facilitates the creation of the receive notification port as well as the listen process for new data available notifications. + * + *
In order to make the data queue memory available to a user process, the method getMemoryDescriptor() must be used to get an IOMemoryDescriptor instance that can be mapped into a user process. Typically, the clientMemoryForType() method on an IOUserClient instance will be used to request the IOMemoryDescriptor and then return it to be mapped into the user process. + */ +class IODataQueue : public OSObject +{ + OSDeclareDefaultStructors(IODataQueue) + +protected: + IODataQueueMemory * dataQueue; + + void * notifyMsg; + + virtual void free(); + + /*! + * @function sendDataAvailableNotification + * @abstract Sends a dataAvailableNotification message to the specified mach port. + * @discussion This method sends a message to the mach port passed to setNotificationPort(). It is used to indicate that data is available in the queue. + */ + virtual void sendDataAvailableNotification(); + +public: + /*! + * @function withCapacity + * @abstract Static method that creates a new IODataQueue instance with the capacity specified in the size parameter. + * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in .
This method allocates a new IODataQueue instance and then calls initWithCapacity() with the given size parameter. If the initWithCapacity() fails, the new instance is released and zero is returned. + * @param size The size of the data queue memory region. + * @result Returns the newly allocated IODataQueue instance. Zero is returned on failure. + */ + static IODataQueue *withCapacity(UInt32 size); + + /*! + * @function withEntries + * @abstract Static method that creates a new IODataQueue instance with the specified number of entries of the given size. + * @discussion This method will create a new IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. If the initWithEntries() fails, the new instance is released and zero is returned. + * @param numEntries Number of entries to allocate space for. + * @param entrySize Size of each entry. + * @result Reeturns the newly allocated IODataQueue instance. Zero is returned on failure. + */ + static IODataQueue *withEntries(UInt32 numEntries, UInt32 entrySize); + + /*! + * @function initWithCapacity + * @abstract Initializes an IODataQueue instance with the capacity specified in the size parameter. + * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in . + * @param size The size of the data queue memory region. + * @result Returns true on success and false on failure. + */ + virtual Boolean initWithCapacity(UInt32 size); + + /*! + * @function initWithEntries + * @abstract Initializes an IODataQueue instance with the specified number of entries of the given size. + * @discussion This method will initialize an IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. + * @param numEntries Number of entries to allocate space for. + * @param entrySize Size of each entry. + * @result Reeturns true on success and false on failure. + */ + virtual Boolean initWithEntries(UInt32 numEntries, UInt32 entrySize); + + /*! + * @function enqueue + * @abstract Enqueues a new entry on the queue. + * @discussion This method adds a new data entry of dataSize to the queue. It sets the size parameter of the entry pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue. Once that is done, it moves the tail to the next available location. When attempting to add a new entry towards the end of the queue and there isn't enough space at the end, it wraps back to the beginning.
If the queue is empty when a new entry is added, sendDataAvailableNotification() is called to send a message to the user process that data is now available. + * @param data Pointer to the data to be added to the queue. + * @param dataSize Size of the data pointed to by data. + * @result Returns true on success and false on failure. Typically failure means that the queue is full. + */ + virtual Boolean enqueue(void *data, UInt32 dataSize); + + /*! + * @function setNotificationPort + * @abstract Creates a simple mach message targeting the mach port specified in port. + * @discussion This message is sent when data is added to an empty queue. It is to notify a user process that new data has become available. + * @param port The mach port to target with the notification message. + */ + virtual void setNotificationPort(mach_port_t port); + + /*! + * @function getMemoryDescriptor + * @abstract Returns a memory descriptor covering the IODataQueueMemory region. + * @discussion The IOMemoryDescriptor instance returned by this method is intended to be mapped into a user process. This is the memory region that the IODataQueueClient code operates on. + * @result Returns a newly allocated IOMemoryDescriptor for the IODataQueueMemory region. Returns zero on failure. + */ + virtual IOMemoryDescriptor *getMemoryDescriptor(); +}; + +#endif /* _IOKIT_IODATAQUEUE_H */ diff --git a/iokit/IOKit/IODataQueueShared.h b/iokit/IOKit/IODataQueueShared.h new file mode 100644 index 000000000..6e853785e --- /dev/null +++ b/iokit/IOKit/IODataQueueShared.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOKIT_IODATAQUEUESHARED_H +#define _IOKIT_IODATAQUEUESHARED_H + +#include + +/*! + * @typedef IODataQueueEntry + * @abstract Represents an entry within the data queue + * @discussion This is a variable sized struct. The data field simply represents the start of the data region. The size of the data region is stored in the size field. The whole size of the specific entry is the size of a UInt32 plus the size of the data region. + * @field size The size of the following data region. + * @field data Represents the beginning of the data region. The address of the data field is a pointer to the start of the data region. + */ +typedef struct _IODataQueueEntry{ + UInt32 size; + void * data; +} IODataQueueEntry; + +/*! + * @typedef IODataQueueMemory + * @abstract A struct mapping to the header region of a data queue. + * @discussion This struct is variable sized. The struct represents the data queue header information plus a pointer to the actual data queue itself. The size of the struct is the combined size of the header fields (3 * sizeof(UInt32)) plus the actual size of the queue region. This size is stored in the queueSize field. + * @field queueSize The size of the queue region pointed to by the queue field. + * @field head The location of the queue head. This field is represented as a byte offset from the beginning of the queue memory region. + * @field tail The location of the queue tail. This field is represented as a byte offset from the beginning of the queue memory region. + * @field queue Represents the beginning of the queue memory region. The size of the region pointed to by queue is stored in the queueSize field. + */ +typedef struct _IODataQueueMemory { + UInt32 queueSize; + volatile UInt32 head; + volatile UInt32 tail; + IODataQueueEntry queue[1]; +} IODataQueueMemory; + +/*! + * @defined DATA_QUEUE_ENTRY_HEADER_SIZE Represents the size of the data queue entry header independent of the actual size of the data in the entry. This is the overhead of each entry in the queue. The total size of an entry is equal to this value plus the size stored in the entry's size field (in IODataQueueEntry). + */ +#define DATA_QUEUE_ENTRY_HEADER_SIZE (sizeof(IODataQueueEntry) - sizeof(void *)) + +/*! + * @defined DATA_QUEUE_MEMORY_HEADER_SIZE Represents the size of the data queue memory header independent of the actual size of the queue data itself. The total size of the queue memory is equal to this value plus the size of the queue data region which is stored in the queueSize field of IODataQueueMeory. + */ +#define DATA_QUEUE_MEMORY_HEADER_SIZE (sizeof(IODataQueueMemory) - sizeof(IODataQueueEntry)) + +#endif /* _IOKIT_IODATAQUEUESHARED_H */ + diff --git a/iokit/IOKit/IODeviceMemory.h b/iokit/IOKit/IODeviceMemory.h new file mode 100644 index 000000000..7c72d0524 --- /dev/null +++ b/iokit/IOKit/IODeviceMemory.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _IOKIT_IODEVICEMEMORY_H +#define _IOKIT_IODEVICEMEMORY_H + +#include + +/*! @class IODeviceMemory : public IOMemoryDescriptor + @abstract An IOMemoryDescriptor used for device physical memory ranges. + @discussion The IODeviceMemory class is a simple subclass of IOMemoryDescriptor that uses its methods to describe a single range of physical memory on a device. IODeviceMemory objects are usually looked up with IOService or IOPCIDevice accessors, and are created by memory mapped bus families. IODeviceMemory implements only some factory methods in addition to the methods of IOMemoryDescriptor. */ + +class IODeviceMemory : public IOMemoryDescriptor +{ + OSDeclareDefaultStructors(IODeviceMemory) + +public: + +/*! @struct InitElement + @field start First physical address in the range. + @field length Length of the range. + @field tag 32-bit value not interpreted by IODeviceMemory or IOMemoryDescriptor, for use by the bus family. */ + + struct InitElement { + IOPhysicalAddress start; + IOPhysicalLength length; + IOOptionBits tag; + }; + +/*! @function arrayFromList + @abstract Constructs an OSArray of IODeviceMemory instances, each describing one physical range, and a tag value. + @discussion This method creates IODeviceMemory instances for each physical range passed in a IODeviceMemory::InitElement array. Each element consists of a physical address, length and tag value for the IODeviceMemory. The instances are returned as a created OSArray. + @param list An array of IODeviceMemory::InitElement structures. + @param count The number of elements in the list. + @result A created OSArray of IODeviceMemory objects, to be released by the caller, or zero on failure. */ + + static OSArray * arrayFromList( + InitElement list[], + IOItemCount count ); + +/*! @function withRange + @abstract Constructs an IODeviceMemory instance, describing one physical range. + @discussion This method creates IODeviceMemory instance for one physical range passed as a physical address and length. It just calls IOMemoryDescriptor::withPhysicalAddress. + @param address The physical address of the first byte in the memory. + @param withLength The length of memory. + @result The created IODeviceMemory on success, to be released by the caller, or zero on failure. */ + + static IODeviceMemory * withRange( + IOPhysicalAddress start, + IOPhysicalLength length ); + +/*! @function withRange + @abstract Constructs an IODeviceMemory instance, describing a subset of an existing IODeviceMemory range. + @discussion This method creates IODeviceMemory instance for a subset of an existing IODeviceMemory range, passed as a physical address offset and length. It just calls IOMemoryDescriptor::withSubRange. + @param of The parent IODeviceMemory of which a subrange is to be used for the new descriptor, which will be retained by the subrange IODeviceMemory. + @param offset A byte offset into the parent's memory. + @param length The length of the subrange. + @result The created IODeviceMemory on success, to be released by the caller, or zero on failure. */ + + static IODeviceMemory * withSubRange( + IODeviceMemory * of, + IOPhysicalAddress offset, + IOPhysicalLength length ); +}; + +#endif /* ! _IOKIT_IODEVICEMEMORY_H */ diff --git a/iokit/IOKit/IODeviceTreeSupport.h b/iokit/IOKit/IODeviceTreeSupport.h new file mode 100644 index 000000000..7287f9e22 --- /dev/null +++ b/iokit/IOKit/IODeviceTreeSupport.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _IOKIT_IODEVICETREE_H +#define _IOKIT_IODEVICETREE_H + +#include +#include + +class IODeviceMemory; +class IOService; + +extern const IORegistryPlane * gIODTPlane; + +extern const OSSymbol * gIODTCompatibleKey; +extern const OSSymbol * gIODTTypeKey; +extern const OSSymbol * gIODTModelKey; + +extern const OSSymbol * gIODTAAPLInterruptsKey; +extern const OSSymbol * gIODTDefaultInterruptController; +extern const OSSymbol * gIODTNWInterruptMappingKey; + +IORegistryEntry * IODeviceTreeAlloc( void * dtTop ); + + +bool IODTMatchNubWithKeys( IORegistryEntry * nub, + const char * keys ); + +bool IODTCompareNubName( const IORegistryEntry * regEntry, + OSString * name, OSString ** matchingName ); + +enum { + kIODTRecursive = 0x00000001, + kIODTExclusive = 0x00000002, +}; + +OSCollectionIterator * IODTFindMatchingEntries( IORegistryEntry * from, + IOOptionBits options, const char * keys ); + +typedef SInt32 (*IODTCompareAddressCellFunc) + (UInt32 cellCount, UInt32 left[], UInt32 right[]); +typedef void (*IODTNVLocationFunc) + (IORegistryEntry * entry, + UInt8 * busNum, UInt8 * deviceNum, UInt8 * functionNum ); + +void IODTSetResolving( IORegistryEntry * regEntry, + IODTCompareAddressCellFunc compareFunc, + IODTNVLocationFunc locationFunc ); + +bool IODTResolveAddressCell( IORegistryEntry * regEntry, + UInt32 cellsIn[], + IOPhysicalAddress * phys, IOPhysicalLength * len ); + +OSArray * IODTResolveAddressing( IORegistryEntry * regEntry, + const char * addressPropertyName, + IODeviceMemory * parent ); + +#pragma options align=mac68k + +struct IONVRAMDescriptor { + unsigned int format:4; + unsigned int marker:1; + unsigned int bridgeCount:3; + unsigned int busNum:2; + unsigned int bridgeDevices:6 * 5; + unsigned int functionNum:3; + unsigned int deviceNum:5; +}; + +#pragma options align=reset + +IOReturn IODTMakeNVDescriptor( IORegistryEntry * regEntry, + IONVRAMDescriptor * hdr ); + +OSData * IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ); + +const OSSymbol * IODTInterruptControllerName( + IORegistryEntry * regEntry ); + +bool IODTMapInterrupts( IORegistryEntry * regEntry ); + +#ifdef __cplusplus +extern "C" { +#endif + +IOReturn IONDRVLibrariesInitialize( IOService * provider ); + +#ifdef __cplusplus +} +#endif + +#endif /* _IOKIT_IODEVICETREE_H */ + diff --git a/iokit/IOKit/IOEventSource.h b/iokit/IOKit/IOEventSource.h new file mode 100644 index 000000000..e3d37759a --- /dev/null +++ b/iokit/IOKit/IOEventSource.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. + 1998-10-30 Godfrey van der Linden(gvdl) + Converted to C++ +*/ +#ifndef _IOKIT_IOEVENTSOURCE_H +#define _IOKIT_IOEVENTSOURCE_H + +#include + +#include + +#include +#include +#include + + +__BEGIN_DECLS +#include +#include +__END_DECLS + +/*! + @class IOEventSource : public OSObject + @abstract Abstract class for all work-loop event sources. + @discussion The IOEventSource declares the abstract super class that all +event sources must inherit from if an IOWorkLoop is to receive events from them. +

+ An event source can represent any event that should cause the work-loop of a +device to wake up and perform work. Two examples of event sources are the +IOInterruptEventSource which delivers interrupt notifications and IOCommandGate +which delivers command requests. +

+ A kernel module can always use the work-loop model for serialising access to +anything at all. The IOEventSource is used for communicating events to the +work-loop, and the chain of event sources should be used to walk the possible +event sources and demultipex them. Note a particular instance of an event +source may only be a member of 1 linked list chain. If you need to move it +between chains than make sure it is removed from the original chain before +attempting to move it. +

+ The IOEventSource makes no attempt to maintain the consitency of it's internal data across multi-threading. It is assumed that the user of these basic tools will protect the data that these objects represent in some sort of device wide instance lock. For example the IOWorkLoop maintains the event chain by handing off change request to its own thread and thus single threading access to its state. +

+ All subclasses of the IOEventSource are expected to implement the checkForWork() member function. + +

+ checkForWork() is the key method in this class. It is called by some work-loop when convienient and is expected to evaluate it's internal state and determine if an event has occured since the last call. In the case of an event having occurred then the instance defined target(owner)/action will be called. The action is stored as an ordinary C function pointer but the first parameter is always the owner. This means that a C++ member function can be used as an action function though this depends on the ABI. +

+ Although the eventChainNext variable contains a reference to the next event source in the chain this reference is not retained. The list 'owner' i.e. the client that creates the event, not the work-loop, is expected to retain the source. +*/ +class IOEventSource : public OSObject +{ + OSDeclareAbstractStructors(IOEventSource) + friend class IOWorkLoop; + +public: +/*! + @typedef Action + @discussion Placeholder type for C++ function overloading discrimination. +As the all event sources require an action and it has to be stored somewhere +and be of some type, this is that type. + @param owner + Target of the function, can be used as a refcon. The owner is set +during initialisation. Note if a C++ function was specified this parameter +is implicitly the first paramter in the target member function's parameter list. +*/ + typedef void (*Action)(OSObject *owner, ...); + +/*! @defined IOEventSourceAction + @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOEventSource::Action */ + #define IOEventSourceAction IOEventSource::Action + +protected: +/*! @var eventChainNext + The next event source in the event chain. nil at end of chain. */ + IOEventSource *eventChainNext; + +/*! @var owner The owner object called when an event has been delivered. */ + OSObject *owner; + +/*! @var action + The action method called when an event has been delivered */ + Action action; + +/*! @var enabled + Is this event source enabled to deliver requests to the work-loop. */ + bool enabled; + +/*! @var workLoop What is the work-loop for this event source. */ + IOWorkLoop *workLoop; + +/*! @var refcon What ever the client wants to do, see $link setRefcon. */ + void *refcon; + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOEventSource in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +/*! @function init + @abstract Primary initialiser for the IOEventSource class. + @param owner + Owner of this instance of an event source. Used as the first parameter +of the action callout. Owner will generally be an OSObject it doesn't have to +be as no member functions will be called directly in it. It can just be a +refcon for a client routine. + @param action + Pointer to C call out function. Action is a pointer to a C function +that gets called when this event source has outstanding work. It will usually +be called by the checkForWork member function. The first parameter of the +action call out will always be the owner, this allows C++ member functions to +be used as actions. Defaults to 0. + @result true if the inherited classes and this instance initialise +successfully. +*/ + virtual bool init(OSObject *owner, IOEventSource::Action action = 0); + +/*! @function checkForWork + @abstract Pure Virtual member function used by IOWorkLoop for work +scheduling. + @discussion This function will be called to request a subclass to check +it's internal state for any work to do and then to call out the owner/action. + @result Return true if this function needs to be called again before all its outstanding events have been processed. + */ + virtual bool checkForWork() = 0; + +/*! @function setWorkLoop + @abstract Set'ter for $link workLoop variable. + @param workLoop + Target work-loop of this event source instance. A subclass of +IOWorkLoop that at least reacts to signalWorkAvailable() and onThread functions. +*/ + virtual void setWorkLoop(IOWorkLoop *workLoop); + +/*! @function setNext + @abstract Set'ter for $link eventChainNext variable. + @param next + Pointer to another IOEventSource instance. +*/ + virtual void setNext(IOEventSource *next); + +/*! @function getNext + @abstract Get'ter for $link eventChainNext variable. + @result value of eventChainNext. +*/ + virtual IOEventSource *getNext() const; + + +protected: + // Methods to access the IOWorkLoop exported fields + inline void signalWorkAvailable() { workLoop->signalWorkAvailable(); }; + inline void openGate() { workLoop->openGate(); }; + inline void closeGate() { workLoop->closeGate(); }; + inline bool tryCloseGate() { return workLoop->tryCloseGate(); }; + inline int sleepGate(void *event, UInt32 type) + { return workLoop->sleepGate(event, type); }; + inline void wakeupGate(void *event, bool oneThread) + { workLoop->wakeupGate(event, oneThread); }; + +public: +/*! @function setAction + @abstract Set'ter for $link action variable. + @param action Pointer to a C function of type IOEventSource::Action. */ + virtual void setAction(IOEventSource::Action action); + +/*! @function getAction + @abstract Get'ter for $link action variable. + @result value of action. */ + virtual IOEventSource::Action getAction() const; + +/*! @function enable + @abstract Enable event source. + @discussion A subclass implementation is expected to respect the enabled +state when checkForWork is called. Calling this function will cause the +work-loop to be signalled so that a checkForWork is performed. */ + virtual void enable(); + +/*! @function disable + @abstract Disable event source. + @discussion A subclass implementation is expected to respect the enabled +state when checkForWork is called. */ + virtual void disable(); + +/*! @function isEnabled + @abstract Get'ter for $link enable variable. + @result true if enabled. */ + virtual bool isEnabled() const; + +/*! @function getWorkLoop + @abstract Get'ter for $link workLoop variable. + @result value of workLoop. */ + virtual IOWorkLoop *getWorkLoop() const; + +/*! @function onThread + @abstract Convenience function for workLoop->onThread. + @result true if called on the work-loop thread. +*/ + virtual bool onThread() const; + +private: + OSMetaClassDeclareReservedUnused(IOEventSource, 0); + OSMetaClassDeclareReservedUnused(IOEventSource, 1); + OSMetaClassDeclareReservedUnused(IOEventSource, 2); + OSMetaClassDeclareReservedUnused(IOEventSource, 3); + OSMetaClassDeclareReservedUnused(IOEventSource, 4); + OSMetaClassDeclareReservedUnused(IOEventSource, 5); + OSMetaClassDeclareReservedUnused(IOEventSource, 6); + OSMetaClassDeclareReservedUnused(IOEventSource, 7); +}; + +#endif /* !_IOKIT_IOEVENTSOURCE_H */ diff --git a/iokit/IOKit/IOFilterInterruptEventSource.h b/iokit/IOKit/IOFilterInterruptEventSource.h new file mode 100644 index 000000000..0c554524c --- /dev/null +++ b/iokit/IOKit/IOFilterInterruptEventSource.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + +HISTORY + 1999-4-15 Godfrey van der Linden(gvdl) + Created. +*/ +#ifndef _IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H +#define _IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H + +#include + +class IOService; + +/*! @class IOFilterInterruptEventSource : public IOInterruptEventSource + @abstract Filtering varient of the $link IOInterruptEventSource. + @discussion An interrupt event source that calls the client to determine if a interrupt event needs to be scheduled on the work loop. A filter interrupt event source call's the client in the primary interrupt context, the client can then interrogate its hardware and determine if the interrupt needs to be processed yet. +

+ As the routine is called in the primary interrupt context great care must be taken in the writing of this routine. In general none of the generic IOKit environment is safe to call in this context. We intend this routine to be used by hardware that can interrogate its registers without destroying state. Primarily this variant of event sources will be used by drivers that share interrupts. The filter routine will determine if the interrupt is a real interrupt or a ghost and thus optimise the work thread context switch away. +

+ CAUTION: Called in primary interrupt context, if you need to disable interrupt to guard you registers against an unexpected call then it is better to use a straight IOInterruptEventSource and its secondary interrupt delivery mechanism. +*/ +class IOFilterInterruptEventSource : public IOInterruptEventSource +{ + OSDeclareDefaultStructors(IOFilterInterruptEventSource) + +public: +/*! + @typedef Filter + @discussion C Function pointer to a routine to call when an interrupt occurs. + @param owner Pointer to the owning/client instance. + @param sender Where is the interrupt comming from. + @result false if this interrupt can be ignored. */ + typedef bool (*Filter)(OSObject *, IOFilterInterruptEventSource *); + +/*! @defined IOFilterInterruptAction + @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOFilterInterruptSource::Filter */ +#define IOFilterInterruptAction IOFilterInterruptEventSource::Filter + +private: + // Hide the superclass initializers + virtual bool init(OSObject *inOwner, + IOInterruptEventSource::Action inAction = 0, + IOService *inProvider = 0, + int inIntIndex = 0); + + static IOInterruptEventSource * + interruptEventSource(OSObject *inOwner, + IOInterruptEventSource::Action inAction = 0, + IOService *inProvider = 0, + int inIntIndex = 0); + +protected: +/*! @var filterAction Filter callout */ + Filter filterAction; + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +public: +/*! @function filterInterruptEventSource + @abstract Factor method to create and initialise an IOFilterInterruptEventSource. See $link init. + @param owner Owner/client of this event source. + @param action 'C' Function to call when something happens. + @param filter 'C' Function to call when interrupt occurs. + @param provider Service that provides interrupts. + @param intIndex Defaults to 0. + @result a new event source if succesful, 0 otherwise. */ + static IOFilterInterruptEventSource * + filterInterruptEventSource(OSObject *owner, + IOInterruptEventSource::Action action, + Filter filter, + IOService *provider, + int intIndex = 0); + +/*! @function init + @abstract Primary initialiser for the IOFilterInterruptEventSource class. + @param owner Owner/client of this event source. + @param action 'C' Function to call when something happens. + @param filter 'C' Function to call in primary interrupt context. + @param provider Service that provides interrupts. + @param intIndex Interrupt source within provider. Defaults to 0. + @result true if the inherited classes and this instance initialise +successfully. */ + virtual bool init(OSObject *owner, + IOInterruptEventSource::Action action, + Filter filter, + IOService *provider, + int intIndex = 0); + + +/*! @function signalInterrupt + @abstract Cause the work loop to schedule the action. + @discussion Cause the work loop to schedule the interrupt action even if the filter routine returns 'false'. Note well the interrupting condition MUST be cleared from the hardware otherwise an infinite process interrupt loop will occur. Use this function when 'SoftDMA' is desired. See $link IOFilterInterruptSource::Filter */ + virtual void signalInterrupt(); + +/*! @function getFilterAction + @abstract Get'ter for filterAction variable. + @result value of filterAction. */ + virtual Filter getFilterAction() const; + +/*! @function normalInterruptOccurred + @abstract Override $link IOInterruptEventSource::normalInterruptOccured to make a filter callout. */ + virtual void normalInterruptOccurred(void *self, IOService *prov, int ind); + +/*! @function disableInterruptOccurred + @abstract Override $link IOInterruptEventSource::disableInterruptOccurred to make a filter callout. */ + virtual void disableInterruptOccurred(void *self, IOService *prov, int ind); + +private: + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 0); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 1); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 2); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 3); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 4); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 5); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 6); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 7); +}; + +#endif /* !_IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H */ diff --git a/iokit/IOKit/IOInterruptController.h b/iokit/IOKit/IOInterruptController.h new file mode 100644 index 000000000..4b4216b9b --- /dev/null +++ b/iokit/IOKit/IOInterruptController.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#ifndef _IOKIT_IOINTERRUPTCONTROLLER_H +#define _IOKIT_IOINTERRUPTCONTROLLER_H + +#include +#include +#include + + +class IOSharedInterruptController; + +struct IOInterruptVector { + volatile char interruptActive; + volatile char interruptDisabledSoft; + volatile char interruptDisabledHard; + volatile char interruptRegistered; + IOLock * interruptLock; + IOService * nub; + long source; + void * target; + IOInterruptHandler handler; + void * refCon; + IOSharedInterruptController *sharedController; +}; + +typedef struct IOInterruptVector IOInterruptVector; + + +class IOInterruptController : public IOService +{ + OSDeclareAbstractStructors(IOInterruptController); + +protected: + IOInterruptVector *vectors; + IOSimpleLock *controllerLock; + + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual IOReturn registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon); + virtual IOReturn unregisterInterrupt(IOService *nub, int source); + + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType); + + virtual IOReturn enableInterrupt(IOService *nub, int source); + virtual IOReturn disableInterrupt(IOService *nub, int source); + virtual IOReturn causeInterrupt(IOService *nub, int source); + + virtual IOInterruptAction getInterruptHandlerAddress(void); + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, + int source); + + // Methods to be overridden for simplifed interrupt controller subclasses. + + virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector *vector); + virtual void initVector(long vectorNumber, IOInterruptVector *vector); + virtual int getVectorType(long vectorNumber, IOInterruptVector *vector); + virtual void disableVectorHard(long vectorNumber, IOInterruptVector *vector); + virtual void enableVector(long vectorNumber, IOInterruptVector *vector); + virtual void causeVector(long vectorNumber, IOInterruptVector *vector); + + OSMetaClassDeclareReservedUnused(IOInterruptController, 0); + OSMetaClassDeclareReservedUnused(IOInterruptController, 1); + OSMetaClassDeclareReservedUnused(IOInterruptController, 2); + OSMetaClassDeclareReservedUnused(IOInterruptController, 3); + OSMetaClassDeclareReservedUnused(IOInterruptController, 4); + OSMetaClassDeclareReservedUnused(IOInterruptController, 5); +}; + + +class IOSharedInterruptController : public IOInterruptController +{ + OSDeclareDefaultStructors(IOSharedInterruptController); + +private: + IOService *provider; + int numVectors; + int vectorsRegistered; + int vectorsEnabled; + volatile long controllerDisabled; + bool sourceIsLevel; + + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual IOReturn initInterruptController(IOInterruptController *parentController, OSData *parentSource); + + virtual IOReturn registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon); + virtual IOReturn unregisterInterrupt(IOService *nub, int source); + + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType); + + virtual IOReturn enableInterrupt(IOService *nub, int source); + virtual IOReturn disableInterrupt(IOService *nub, int source); + + virtual IOInterruptAction getInterruptHandlerAddress(void); + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source); + + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 0); + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 1); + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 2); + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 3); +}; + + +#endif /* ! _IOKIT_IOINTERRUPTCONTROLLER_H */ diff --git a/iokit/IOKit/IOInterruptEventSource.h b/iokit/IOKit/IOInterruptEventSource.h new file mode 100644 index 000000000..9fff5fecb --- /dev/null +++ b/iokit/IOKit/IOInterruptEventSource.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. + 1998-10-30 Godfrey van der Linden(gvdl) + Converted to C++ +*/ + +#ifndef _IOKIT_IOINTERRUPTEVENTSOURCE_H +#define _IOKIT_IOINTERRUPTEVENTSOURCE_H + +#include + +class IOService; + +/*! @class IOInterruptEventSource : public IOEventSource + @abstract Event source for interrupt delivery to work-loop based drivers. + @discussion The IOInterruptEventSource is a generic object that delivers calls interrupt routines in it's client in a guaranteed single-threaded manner. IOInterruptEventSource is part of the IOKit $link IOWorkLoop infrastructure where the semantic that one and only one action method is executing within a work-loops event chain. +

+When the action method is called in the client member function will receive 2 arguments, (IOEventSource *) sender and (int) count, See $link IOInterruptEventSource::Action. Where sender will be reference to the interrupt that occured and the count will be computed by the difference between the $link producerCount and $link consumerCount. This number may not be reliable as no attempt is made to adjust for around the world type problems but is provided for general information and statistic gathering. +

+In general a client will use the factory member function to create and initialise the event source and then add it to their work-loop. It is the work loop's responsiblity to maintain the new event source in it's event chain. See $link IOWorkLoop. +

+An interrupt event source attaches itself to the given provider's interrupt source at initialisation time. At this time it determines if it is connected to a level or edge triggered interrupt. If the interrupt is an level triggered interrupt the event source automatically disables the interrupt source at primary interrupt time and after it call's the client it automatically reenables the interrupt. This action is fairly expensive but it is 100% safe and defaults sensibly so that the driver writer does not have to implement type dependant interrupt routines. So to repeat, the driver writer does not have to be concerned by the actual underlying interrupt mechanism as the event source hides the complexity. +

+Saying this if the hardware is a multi-device card, for instance a 4 port NIC, where all of the devices are sharing one level triggered interrupt AND it is possible to determine each port's interrupt state non-destructively then the $link IOFilterInterruptEventSource would be a better choice. +

+Warning: All IOInterruptEventSources are created in the disabled state. If you want to actually schedule interrupt delivery do not forget to enable the source. +*/ +class IOInterruptEventSource : public IOEventSource +{ + OSDeclareDefaultStructors(IOInterruptEventSource) + +public: +/*! @typedef Action + @discussion 'C' pointer prototype of functions that are called in a single threaded context when an interrupt occurs. + @param owner Pointer to client instance. + @param sender Pointer to generation interrupt event source. + @param count Number of interrupts seen before delivery. */ + typedef void (*Action)(OSObject *, IOInterruptEventSource *, int count); + +/*! @defined IOInterruptEventAction + @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOInterruptEventSource::Action */ +#define IOInterruptEventAction IOInterruptEventSource::Action + +protected: +/*! @var provider IOService that provides interrupts for delivery. */ + IOService *provider; + +/*! @var intIndex */ + int intIndex; + +/*! @var producerCount + Current count of produced interrupts that have been received. */ + volatile unsigned int producerCount; + +/*! @var consumerCount + Current count of produced interrupts that the owner has been informed of. */ + unsigned int consumerCount; + +/*! @var autoDisable Do we need to automatically disable the interrupt source when we take an interrupt, i.e. we are level triggered. */ + bool autoDisable; + +/*! @var explicitDisable Has the user expicitly disabled this event source, if so then do not overide their request when returning from the callout */ + bool explicitDisable; + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +/*! @function free + @abstract Sub-class implementation of free method, disconnects from the interrupt source. */ + virtual void free(); + +/*! @function checkForWork + @abstract Pure Virtual member function used by IOWorkLoop for issueing a client calls. + @discussion This function called when the work-loop is ready to check for any work to do and then to call out the owner/action. + @result Return true if this function needs to be called again before all its outstanding events have been processed. */ + virtual bool checkForWork(); + +public: + +/*! @function interruptEventSource + @abstract Factory function for IOInterruptEventSources creation and initialisation. + @param owner Owning client of the new event source. + @param action 'C' Function to call when something happens. + @param provider IOService that represents the interrupt source. Defaults to 0. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. + @param intIndex The index of the interrupt within the provider's interrupt sources. Defaults to 0, i.e. the first interrupt in the provider. + @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ + static IOInterruptEventSource * + interruptEventSource(OSObject *owner, + Action action, + IOService *provider = 0, + int intIndex = 0); + +/*! @function init + @abstract Primary initialiser for the IOInterruptEventSource class. + @param owner Owning client of the new event source. + @param action 'C' Function to call when something happens. + @param provider IOService that represents the interrupt source. Defaults to 0. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. + @param intIndex The index of the interrupt within the provider's interrupt sources. Defaults to 0, i.e. the first interrupt in the provider. + @result true if the inherited classes and this instance initialise +successfully. */ + virtual bool init(OSObject *owner, + Action action, + IOService *provider = 0, + int intIndex = 0); + +/*! @function enable + @abstract Enable event source. + @discussion A subclass implementation is expected to respect the enabled +state when checkForWork is called. Calling this function will cause the +work-loop to be signalled so that a checkForWork is performed. */ + virtual void enable(); + +/*! @function disable + @abstract Disable event source. + @discussion A subclass implementation is expected to respect the enabled +state when checkForWork is called. */ + virtual void disable(); + +/*! @function getProvider + @abstract Get'ter for $link provider variable. + @result value of provider. */ + virtual const IOService *getProvider() const; + +/*! @function getIntIndex + @abstract Get'ter for $link intIndex interrupt index variable. + @result value of intIndex. */ + virtual int getIntIndex() const; + +/*! @function getAutoDisable + @abstract Get'ter for $link autoDisable variable. + @result value of autoDisable. */ + virtual bool getAutoDisable() const; + +/*! @function interruptOccurred + @abstract Functions that get called by the interrupt controller. See $link IOService::registerInterrupt + @param nub Where did the interrupt originate from + @param ind What is this interrupts index within 'nub'. */ + virtual void interruptOccurred(void *, IOService *nub, int ind); + +/*! @function normalInterruptOccurred + @abstract Functions that get called by the interrupt controller.See $link IOService::registerInterrupt + @param nub Where did the interrupt originate from + @param ind What is this interrupts index within 'nub'. */ + virtual void normalInterruptOccurred(void *, IOService *nub, int ind); + +/*! @function disableInterruptOccurred + @abstract Functions that get called by the interrupt controller.See $link IOService::registerInterrupt + @param nub Where did the interrupt originate from + @param ind What is this interrupts index within 'nub'. */ + virtual void disableInterruptOccurred(void *, IOService *nub, int ind); + +private: + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 0); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 1); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 2); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 3); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 4); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 5); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 6); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 7); +}; + +#endif /* !_IOKIT_IOINTERRUPTEVENTSOURCE_H */ diff --git a/iokit/IOKit/IOInterrupts.h b/iokit/IOKit/IOInterrupts.h new file mode 100644 index 000000000..547f190ad --- /dev/null +++ b/iokit/IOKit/IOInterrupts.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#ifndef _IOKIT_IOINTERRUPTS_H +#define _IOKIT_IOINTERRUPTS_H + +#define kIOInterruptTypeEdge (0) +#define kIOInterruptTypeLevel (1) + +#ifdef __cplusplus + +class OSData; +class IOInterruptController; + +struct IOInterruptSource { + IOInterruptController *interruptController; + OSData *vectorData; +}; +typedef struct IOInterruptSource IOInterruptSource; + +#endif /* __cplusplus */ + +typedef void (*IOInterruptHandler)(void *target, void *refCon, + void *nub, int source); + +#endif /* ! _IOKIT_IOINTERRUPTS_H */ diff --git a/iokit/IOKit/IOKitDebug.h b/iokit/IOKit/IOKitDebug.h new file mode 100644 index 000000000..325352272 --- /dev/null +++ b/iokit/IOKit/IOKitDebug.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOKITDEBUG_H +#define _IOKIT_IOKITDEBUG_H + +#include + + +#ifdef __cplusplus + +#include +#include +#include + +class IOKitDiagnostics : public OSObject +{ + OSDeclareDefaultStructors(IOKitDiagnostics) + +public: + static OSObject * diagnostics( void ); + virtual bool serialize(OSSerialize *s) const; +private: + static void updateOffset( OSDictionary * dict, + UInt32 value, const char * name ); +}; + +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +enum { +// loggage + kIOLogAttach = 0x00000001ULL, + kIOLogProbe = 0x00000002ULL, + kIOLogStart = 0x00000004ULL, + kIOLogRegister = 0x00000008ULL, + kIOLogMatch = 0x00000010ULL, + kIOLogConfig = 0x00000020ULL, + kIOLogYield = 0x00000040ULL, + kIOLogPower = 0x00000080ULL, + kIOLogMapping = 0x00000100ULL, + kIOLogCatalogue = 0x00000200ULL, + + kIOLogServiceTree = 0x00001000ULL, + kIOLogDTree = 0x00002000ULL, + kIOLogMemory = 0x00004000ULL, + +// debug aids - change behaviour + kIONoFreeObjects = 0x00100000ULL +}; + +extern SInt64 gIOKitDebug; +extern char iokit_version[]; + +struct IORegistryPlane; +extern void IOPrintPlane( const struct IORegistryPlane * plane ); +extern void OSPrintMemory( void ); +#define IOPrintMemory OSPrintMemory + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* ! _IOKIT_IOKITDEBUG_H */ diff --git a/iokit/IOKit/IOKitKeys.h b/iokit/IOKit/IOKitKeys.h new file mode 100644 index 000000000..9fa67fe50 --- /dev/null +++ b/iokit/IOKit/IOKitKeys.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * Common symbol definitions for IOKit. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOKITKEYS_H +#define _IOKIT_IOKITKEYS_H + +// properties found in the registry root +#define kIOKitBuildVersionKey "IOKitBuildVersion" +#define kIOKitDiagnosticsKey "IOKitDiagnostics" + // a dictionary keyed by plane name +#define kIORegistryPlanesKey "IORegistryPlanes" +#define kIOCatalogueKey "IOCatalogue" + +// registry plane names +#define kIOServicePlane "IOService" +#define kIOPowerPlane "IOPower" +#define kIODeviceTreePlane "IODeviceTree" +#define kIOAudioPlane "IOAudio" +#define kIOFireWirePlane "IOFireWire" +#define kIOUSBPlane "IOUSB" + +// IOService class name +#define kIOServiceClass "IOService" + +// IOResources class name +#define kIOResourcesClass "IOResources" + +// IOService driver probing property names +#define kIOClassKey "IOClass" +#define kIOProbeScoreKey "IOProbeScore" +#define kIOKitDebugKey "IOKitDebug" + +// IOService matching property names +#define kIOProviderClassKey "IOProviderClass" +#define kIONameMatchKey "IONameMatch" +#define kIOPropertyMatchKey "IOPropertyMatch" +#define kIOPathMatchKey "IOPathMatch" +#define kIOLocationMatchKey "IOLocationMatch" +#define kIOResourceMatchKey "IOResourceMatch" +#define kIOMatchedServiceCountKey "IOMatchedServiceCountMatch" + +#define kIONameMatchedKey "IONameMatched" + +#define kIOMatchCategoryKey "IOMatchCategory" +#define kIODefaultMatchCategoryKey "IODefaultMatchCategory" + +// IOService default user client class, for loadable user clients +#define kIOUserClientClassKey "IOUserClientClass" + +// IOService notification types +#define kIOPublishNotification "IOServicePublish" +#define kIOFirstPublishNotification "IOServiceFirstPublish" +#define kIOMatchedNotification "IOServiceMatched" +#define kIOFirstMatchNotification "IOServiceFirstMatch" +#define kIOTerminatedNotification "IOServiceTerminate" + +// IOService interest notification types +#define kIOGeneralInterest "IOGeneralInterest" +#define kIOBusyInterest "IOBusyInterest" +#define kIOAppPowerStateInterest "IOAppPowerStateInterest" + +// IOService interest notification types +#define kIOCFPlugInTypesKey "IOCFPlugInTypes" + +// properties found in services that implement command pooling +#define kIOCommandPoolSizeKey "IOCommandPoolSize" + +#endif /* ! _IOKIT_IOKITKEYS_H */ + diff --git a/iokit/IOKit/IOKitServer.h b/iokit/IOKit/IOKitServer.h new file mode 100644 index 000000000..08fc0dc2a --- /dev/null +++ b/iokit/IOKit/IOKitServer.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +/* + * Internal definitions used between the iokit user library and + * server routines. + */ + +#ifndef _IOKIT_IOKITSERVER_H +#define _IOKIT_IOKITSERVER_H + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif +#include +#ifdef __cplusplus +} +#endif + +// IOMakeMatching +/*! + @enum IOMakeMatching + @constant kIOServiceMatching + @constant kIOBSDNameMatching + @constant kIOOFPathMatching +*/ +enum { + kIOServiceMatching = 100, + kIOBSDNameMatching = 101, + kIOOFPathMatching = 102, +}; + +// IOCatalogueSendData +/*! + @enum IOCatalogueSendData user-client flags. + @constant kIOCatalogAddDrivers Signals a call to the addDrivers function in IOCatalogue. + @constant kIOCatalogAddDriversNoMatch Signals a call to the addDrivers function in IOCatalogue but does not start a matching thread. + @constant kIOCatalogRemoveDrivers Signals a call to the removeDrivers function in IOCatalogue. + @constant kIOCatalogRemoveDriversNoMatch Signals a call to the removedrivers function in IOCatalogue but does not start a matching thread. + @constant kIOCatalogStartMatching Signals the IOCatalogue to start an IOService matching thread. +*/ +enum { + kIOCatalogAddDrivers = 1, + kIOCatalogAddDriversNoMatch, + kIOCatalogRemoveDrivers, + kIOCatalogRemoveDriversNoMatch, + kIOCatalogStartMatching, + kIOCatalogRemoveKernelLinker, +}; + +// IOCatalogueGetData +/*! + @enum IOCatalogueGetData user-client flags + @constant kIOCatalogGetContents Returns a snapshot of the database to the caller. +*/ +enum { + kIOCatalogGetContents = 1, +}; + +// IOCatalogueReset +/*! + @enum IOCatalogueReset user-client flag + @constant kIOCatalogResetDefault Removes all entries from IOCatalogue except those used for booting the system. +*/ +enum { + kIOCatalogResetDefault = 1, +}; + +// IOCatalogueTerminate +/*! + @enum IOCatalogueTerminate user-client flags. + @constant kIOCatalogModuleUnload Terminates all services which depend on a particular module and unloads the module. + @constant kIOCatalogModuleTerminate Terminates all services which depend on a particular module but does not unload the module. + @constant kIOCatalogServiceTerminate Terminates a particular service by name. +*/ +enum { + kIOCatalogModuleUnload = 1, + kIOCatalogModuleTerminate, + kIOCatalogServiceTerminate, +}; + +enum { + kIOCatalogMatchIdle = KMOD_IOKIT_END_RANGE_PACKET - 0x10 +}; + +#endif /* ! _IOKIT_IOKITSERVER_H */ + diff --git a/iokit/IOKit/IOLib.h b/iokit/IOKit/IOLib.h new file mode 100644 index 000000000..7e46f2a13 --- /dev/null +++ b/iokit/IOKit/IOLib.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef __IOKIT_IOLIB_H +#define __IOKIT_IOLIB_H + +#ifndef KERNEL +#error IOLib.h is for kernel use only +#endif + +#ifndef IOKIT_DEPRECATED +#define IOKIT_DEPRECATED 1 +#endif + +#include + +#include +#include +#include + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +/* + * min/max macros. + */ + +#define min(a,b) ((a) < (b) ? (a) : (b)) +#define max(a,b) ((a) > (b) ? (a) : (b)) + +/* + * These are opaque to the user. + */ +typedef thread_t IOThread; +typedef void (*IOThreadFunc)(void *argument); + +/* + * Memory allocation functions. + */ + +/*! @function IOMalloc + @abstract Allocates general purpose, wired memory in the kernel map. + @discussion This is a general purpose utility to allocate memory in the kernel. There are no alignment guarantees given on the returned memory, and alignment may vary depending on the kernel configuration. This function may block and so should not be called from interrupt level or while a simple lock is held. + @param size Size of the memory requested. + @result Pointer to the allocated memory, or zero on failure. */ + +void * IOMalloc(vm_size_t size); + +/*! @function IOFree + @abstract Frees memory allocated with IOMalloc. + @discussion This function frees memory allocated with IOMalloc, it may block and so should not be called from interrupt level or while a simple lock is held. + @param address Pointer to the allocated memory. + @param size Size of the memory allocated. */ + +void IOFree(void * address, vm_size_t size); + +/*! @function IOMallocAligned + @abstract Allocates wired memory in the kernel map, with an alignment restriction. + @discussion This is a utility to allocate memory in the kernel, with an alignment restriction which is specified as a byte count. This function may block and so should not be called from interrupt level or while a simple lock is held. + @param size Size of the memory requested. + @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bit 0-7 zero. + @result Pointer to the allocated memory, or zero on failure. */ + +void * IOMallocAligned(vm_size_t size, vm_offset_t alignment); + +/*! @function IOFreeAligned + @abstract Frees memory allocated with IOMallocAligned. + @discussion This function frees memory allocated with IOMallocAligned, it may block and so should not be called from interrupt level or while a simple lock is held. + @param address Pointer to the allocated memory. + @param size Size of the memory allocated. */ + +void IOFreeAligned(void * address, vm_size_t size); + +/*! @function IOMallocContiguous + @abstract Allocates wired memory in the kernel map, with an alignment restriction and physically contiguous. + @discussion This is a utility to allocate memory in the kernel, with an alignment restriction which is specified as a byte count, and will allocate only physically contiguous memory. The request may fail if memory is fragmented, and may cause large amounts of paging activity. This function may block and so should not be called from interrupt level or while a simple lock is held. + @param size Size of the memory requested. + @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. + @param physicalAddress IOMallocContiguous returns the physical address of the allocated memory here, if physicalAddress is a non-zero pointer. + @result Virtual address of the allocated memory, or zero on failure. */ + +void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, + IOPhysicalAddress * physicalAddress); + +/*! @function IOFreeContiguous + @abstract Frees memory allocated with IOMallocContiguous. + @discussion This function frees memory allocated with IOMallocContiguous, it may block and so should not be called from interrupt level or while a simple lock is held. + @param address Virtual address of the allocated memory. + @param size Size of the memory allocated. */ + +void IOFreeContiguous(void * address, vm_size_t size); + + +/*! @function IOMallocPageable + @abstract Allocates pageable memory in the kernel map. + @discussion This is a utility to allocate pageable memory in the kernel. This function may block and so should not be called from interrupt level or while a simple lock is held. + @param size Size of the memory requested. + @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. + @result Pointer to the allocated memory, or zero on failure. */ + +void * IOMallocPageable(vm_size_t size, vm_size_t alignment); + +/*! @function IOFreePageable + @abstract Frees memory allocated with IOMallocPageable. + @discussion This function frees memory allocated with IOMallocPageable, it may block and so should not be called from interrupt level or while a simple lock is held. + @param address Virtual address of the allocated memory. + @param size Size of the memory allocated. */ + +void IOFreePageable(void * address, vm_size_t size); + +/* + * Typed memory allocation macros. Both may block. + */ +#define IONew(type,number) (type*)IOMalloc(sizeof(type) * (number) ) +#define IODelete(ptr,type,number) IOFree( (ptr) , sizeof(type) * (number) ) + +/*! @function IOSetProcessorCacheMode + @abstract Sets the processor cache mode for mapped memory. + @discussion This function sets the cache mode of an already mapped & wired memory range. Note this may not be supported on I/O mappings or shared memory - it is far preferable to set the cache mode as mappings are created with the IOMemoryDescriptor::map method. + @param task Task the memory is mapped into. + @param address Virtual address of the memory. + @param length Length of the range to set. + @param cacheMode A constant from IOTypes.h,
+ kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.
+ kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.
+ @result An IOReturn code.*/ + +IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, + IOByteCount length, IOOptionBits cacheMode ); + +/*! @function IOFlushProcessorCache + @abstract Flushes the processor cache for mapped memory. + @discussion This function flushes the processor cache of an already mapped memory range. Note in most cases it is preferable to use IOMemoryDescriptor::prepare and complete to manage cache coherency since they are aware of the architecture's requirements. Flushing the processor cache is not required for coherency in most situations. + @param task Task the memory is mapped into. + @param address Virtual address of the memory. + @param length Length of the range to set. + @result An IOReturn code. */ + +IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, + IOByteCount length ); + +/*! @function IOThreadSelf + @abstract Returns the osfmk identifier for the currently running thread. + @discussion This function returns the current thread (a pointer to the currently active osfmk thread_shuttle). */ + +#define IOThreadSelf() (current_thread()) + +/*! @function IOCreateThread + @abstract Create a kernel thread. + @discussion This function creates a kernel thread, and passes the caller supplied argument to the new thread. + @param function A C-function pointer where the thread will begin execution. + @param argument Caller specified data to be passed to the new thread. + @result An IOThread identifier for the new thread, equivalent to an osfmk thread_t. */ + +IOThread IOCreateThread(IOThreadFunc function, void *argument); + +/*! @function IOExitThread + @abstract Terminate exceution of current thread. + @discussion This function destroys the currently running thread, and does not return. */ + +volatile void IOExitThread(); + +/*! @function IOSleep + @abstract Sleep the calling thread for a number of milliseconds. + @discussion This function blocks the calling thread for at least the number of specified milliseconds, giving time to other processes. + @param milliseconds The integer number of milliseconds to wait. */ + +void IOSleep(unsigned milliseconds); + +/*! @function IODelay + @abstract Spin delay for a number of microseconds. + @discussion This function spins to delay for at least the number of specified microseconds. Since the CPU is busy spinning no time is made available to other processes; this method of delay should be used only for short periods. Also, the AbsoluteTime based APIs of kern/clock.h provide finer grained and lower cost delays. + @param microseconds The integer number of microseconds to spin wait. */ + +void IODelay(unsigned microseconds); + +/*! @function IOLog + @abstract Log a message to console in text mode, and /var/log/system.log. + @discussion This function allows a driver to log diagnostic information to the screen during verbose boots, and to a log file found at /var/log/system.log. IOLog should not be called from interrupt context. + @param format A printf() style format string (see printf() documentation). + @param other arguments described by the format string. */ + +void IOLog(const char *format, ...) +__attribute__((format(printf, 1, 2))); + +void kprintf(const char *format, ...); + +/* + * Convert a integer constant (typically a #define or enum) to a string + * via an array of IONamedValue. + */ +const char *IOFindNameForValue(int value, + const IONamedValue *namedValueArray); + +/* + * Convert a string to an int via an array of IONamedValue. Returns + * kIOReturnSuccess of string found, else returns kIOReturnBadArgument. + */ +IOReturn IOFindValueForName(const char *string, + const IONamedValue *regValueArray, + int *value); /* RETURNED */ + +/*! @function Debugger + @abstract Enter the kernel debugger. + @discussion This function freezes the kernel and enters the builtin debugger. It may not be possible to exit the debugger without a second machine. + @param reason A C-string to describe why the debugger is being entered. */ + +void Debugger(const char * reason); + +struct OSDictionary * IOBSDNameMatching( const char * name ); +struct OSDictionary * IOOFPathMatching( const char * path, char * buf, int maxLen ); + +/* + * Convert between size and a power-of-two alignment. + */ +IOAlignment IOSizeToAlignment(unsigned int size); +unsigned int IOAlignmentToSize(IOAlignment align); + +/* + * Multiply and divide routines for IOFixed datatype. + */ + +static inline IOFixed IOFixedMultiply(IOFixed a, IOFixed b) +{ + return (IOFixed)((((SInt64) a) * ((SInt64) b)) >> 16); +} + +static inline IOFixed IOFixedDivide(IOFixed a, IOFixed b) +{ + return (IOFixed)((((SInt64) a) << 16) / ((SInt64) b)); +} + +/* + * IORound and IOTrunc convenience functions, in the spirit + * of vm's round_page() and trunc_page(). + */ +#define IORound(value,multiple) \ + ((((value) + (multiple) - 1) / (multiple)) * (multiple)) + +#define IOTrunc(value,multiple) \ + (((value) / (multiple)) * (multiple)); + + +#if IOKIT_DEPRECATED + +/* The following API is deprecated */ + +#undef eieio +#define eieio() \ + OSSynchronizeIO() + +void IOPanic(const char *reason); + +/* The AbsoluteTime clock API exported by kern/clock.h + should be used for high resolution timing. */ + +void IOGetTime( mach_timespec_t * clock_time); + +extern mach_timespec_t IOZeroTvalspec; + +#endif /* IOKIT_DEPRECATED */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* !__IOKIT_IOLIB_H */ diff --git a/iokit/IOKit/IOLocks.h b/iokit/IOKit/IOLocks.h new file mode 100644 index 000000000..7771cff8c --- /dev/null +++ b/iokit/IOKit/IOLocks.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + */ + +#ifndef __IOKIT_IOLOCKS_H +#define __IOKIT_IOLOCKS_H + +#ifndef KERNEL +#error IOLocks.h is for kernel use only +#endif + +#ifndef IOKIT_DEPRECATED +#define IOKIT_DEPRECATED 1 +#endif + +#include + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + * Mutex lock operations + */ + +typedef mutex_t IOLock; + +/*! @function IOLockAlloc + @abstract Allocates and initializes an osfmk mutex. + @discussion Allocates an osfmk mutex in general purpose memory, and initilizes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by osfmk/kern/lock.h. This function may block and so should not be called from interrupt level or while a simple lock is held. + @result Pointer to the allocated lock, or zero on failure. */ + +IOLock * IOLockAlloc( void ); + +/*! @function IOLockFree + @abstract Frees an osfmk mutex. + @discussion Frees a lock allocated with IOLockAlloc. Any blocked waiters will not be woken. + @param lock Pointer to the allocated lock. */ + +void IOLockFree( IOLock * lock); + +/*! @function IOLockLock + @abstract Lock an osfmk mutex. + @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a simple lock is held. Locking the mutex recursively from one thread will result in deadlock. + @param lock Pointer to the allocated lock. */ + +static __inline__ +void IOLockLock( IOLock * lock) +{ + _mutex_lock(lock); +} + +/*! @function IOLockTryLock + @abstract Attempt to lock an osfmk mutex. + @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false. + @param lock Pointer to the allocated lock. + @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */ + +static __inline__ +boolean_t IOLockTryLock( IOLock * lock) +{ + return(_mutex_try(lock)); +} + +/*! @function IOLockUnlock + @abstract Unlock an osfmk mutex. +@discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a simple lock is held. + @param lock Pointer to the allocated lock. */ + +static __inline__ +void IOLockUnlock( IOLock * lock) +{ + mutex_unlock(lock); +} + +#if IOKIT_DEPRECATED + +/* The following API is deprecated */ + +typedef enum { + kIOLockStateUnlocked = 0, + kIOLockStateLocked = 1, +} IOLockState; + +void IOLockInitWithState( IOLock * lock, IOLockState state); +#define IOLockInit( l ) IOLockInitWithState( l, kIOLockStateUnlocked); + +static __inline__ void IOTakeLock( IOLock * lock) { IOLockLock(lock); } +static __inline__ boolean_t IOTryLock( IOLock * lock) { return(IOLockTryLock(lock)); } +static __inline__ void IOUnlock( IOLock * lock) { IOLockUnlock(lock); } + +#endif /* IOKIT_DEPRECATED */ + +/* + * Recursive lock operations + */ + +typedef struct _IORecursiveLock IORecursiveLock; + +/*! @function IORecursiveLockAlloc + @abstract Allocates and initializes an recursive lock. + @discussion Allocates a recursive lock in general purpose memory, and initilizes it. Recursive locks function identically to osfmk mutexes but allow one thread to lock more than once, with balanced unlocks. + @result Pointer to the allocated lock, or zero on failure. */ + +IORecursiveLock * IORecursiveLockAlloc( void ); + +/*! @function IORecursiveLockFree + @abstract Frees a recursive lock. + @discussion Frees a lock allocated with IORecursiveLockAlloc. Any blocked waiters will not be woken. + @param lock Pointer to the allocated lock. */ + +void IORecursiveLockFree( IORecursiveLock * lock); + +/*! @function IORecursiveLockLock + @abstract Lock a recursive lock. + @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a simple lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock. + @param lock Pointer to the allocated lock. */ + +void IORecursiveLockLock( IORecursiveLock * lock); + +/*! @function IORecursiveLockTryLock + @abstract Attempt to lock a recursive lock. + @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock. + @param lock Pointer to the allocated lock. + @result True if the lock is now locked by the caller, otherwise false. */ + +boolean_t IORecursiveLockTryLock( IORecursiveLock * lock); + +/*! @function IORecursiveLockUnlock + @abstract Unlock a recursive lock. +@discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a simple lock is held. + @param lock Pointer to the allocated lock. */ + +void IORecursiveLockUnlock( IORecursiveLock * lock); + +/*! @function IORecursiveLockHaveLock + @abstract Check if a recursive lock is held by the calling thread. + @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned. + @param lock Pointer to the allocated lock. + @result True if the calling thread holds the lock otherwise false. */ + +boolean_t IORecursiveLockHaveLock( const IORecursiveLock * lock); + +extern int IORecursiveLockSleep( IORecursiveLock *_lock, + void *event, UInt32 interType); +extern void IORecursiveLockWakeup( IORecursiveLock *_lock, + void *event, bool oneThread); + +/* + * Complex (read/write) lock operations + */ + +typedef lock_t IORWLock; + +/*! @function IORWLockAlloc + @abstract Allocates and initializes an osfmk general (read/write) lock. +@discussion Allocates an initializes an osfmk lock_t in general purpose memory, and initilizes it. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by osfmk/kern/lock.h. This function may block and so should not be called from interrupt level or while a simple lock is held. + @result Pointer to the allocated lock, or zero on failure. */ + +IORWLock * IORWLockAlloc( void ); + +/*! @function IORWLockFree + @abstract Frees an osfmk general (read/write) lock. + @discussion Frees a lock allocated with IORWLockAlloc. Any blocked waiters will not be woken. + @param lock Pointer to the allocated lock. */ + +void IORWLockFree( IORWLock * lock); + +/*! @function IORWLockRead + @abstract Lock an osfmk lock for read. +@discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a simple lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock. + @param lock Pointer to the allocated lock. */ + +static __inline__ +void IORWLockRead( IORWLock * lock) +{ + lock_read( lock); +} + +/*! @function IORWLockWrite + @abstract Lock an osfmk lock for write. + @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a simple lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock. + @param lock Pointer to the allocated lock. */ + +static __inline__ +void IORWLockWrite( IORWLock * lock) +{ + lock_write( lock); +} + +/*! @function IORWLockUnlock + @abstract Unlock an osfmk lock. + @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a simple lock is held. + @param lock Pointer to the allocated lock. */ + +static __inline__ +void IORWLockUnlock( IORWLock * lock) +{ + lock_done( lock); +} + +#if IOKIT_DEPRECATED + +/* The following API is deprecated */ + +static __inline__ void IOReadLock( IORWLock * lock) { IORWLockRead(lock); } +static __inline__ void IOWriteLock( IORWLock * lock) { IORWLockWrite(lock); } +static __inline__ void IORWUnlock( IORWLock * lock) { IORWLockUnlock(lock); } + +#endif /* IOKIT_DEPRECATED */ + + +/* + * Simple locks. Cannot block while holding a simple lock. + */ + +typedef simple_lock_data_t IOSimpleLock; + +/*! @function IOSimpleLockAlloc + @abstract Allocates and initializes an osfmk simple (spin) lock. + @discussion Allocates an initializes an osfmk simple lock in general purpose memory, and initilizes it. Simple locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by osfmk/kern/simple_lock.h. This function may block and so should not be called from interrupt level or while a simple lock is held. + @result Pointer to the allocated lock, or zero on failure. */ + +IOSimpleLock * IOSimpleLockAlloc( void ); + +/*! @function IOSimpleLockFree + @abstract Frees an osfmk simple (spin) lock. + @discussion Frees a lock allocated with IOSimpleLockAlloc. + @param lock Pointer to the lock. */ + +void IOSimpleLockFree( IOSimpleLock * lock ); + +/*! @function IOSimpleLockInit + @abstract Initialize an osfmk simple (spin) lock. + @discussion Initialize an embedded osfmk simple lock, to the unlocked state. + @param lock Pointer to the lock. */ + +void IOSimpleLockInit( IOSimpleLock * lock ); + +/*! @function IOSimpleLockLock + @abstract Lock an osfmk simple lock. +@discussion Lock the simple lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock. + @param lock Pointer to the lock. */ + +static __inline__ +void IOSimpleLockLock( IOSimpleLock * lock ) +{ + simple_lock( lock ); +} + +/*! @function IOSimpleLockTryLock + @abstract Attempt to lock an osfmk simple lock. +@discussion Lock the simple lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock. + @param lock Pointer to the lock. + @result True if the lock was unlocked and is now locked by the caller, otherwise false. */ + +static __inline__ +boolean_t IOSimpleLockTryLock( IOSimpleLock * lock ) +{ + return( simple_lock_try( lock ) ); +} + +/*! @function IOSimpleLockUnlock + @abstract Unlock an osfmk simple lock. + @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock. + @param lock Pointer to the lock. */ + +static __inline__ +void IOSimpleLockUnlock( IOSimpleLock * lock ) +{ + simple_unlock( lock ); +} + +typedef long int IOInterruptState; + +/*! @function IOSimpleLockLockDisableInterrupt + @abstract Lock an osfmk simple lock. + @discussion Lock the simple lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock. + @param lock Pointer to the lock. */ + +static __inline__ +IOInterruptState IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock ) +{ + IOInterruptState state = ml_set_interrupts_enabled( false ); + simple_lock( lock ); + return( state ); +} + +/*! @function IOSimpleLockUnlockEnableInterrupt + @abstract Unlock an osfmk simple lock, and restore interrupt state. + @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock. + @param lock Pointer to the lock. + @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */ + +static __inline__ +void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock, + IOInterruptState state ) +{ + simple_unlock( lock ); + ml_set_interrupts_enabled( state ); +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* !__IOKIT_IOLOCKS_H */ + diff --git a/iokit/IOKit/IOMemoryCursor.h b/iokit/IOKit/IOMemoryCursor.h new file mode 100644 index 000000000..9f866e1b4 --- /dev/null +++ b/iokit/IOKit/IOMemoryCursor.h @@ -0,0 +1,451 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOMEMORYCURSOR_H +#define _IOMEMORYCURSOR_H + +#include +#include + +class IOMemoryDescriptor; + +/**************************** class IOMemoryCursor ***************************/ + +/*! + @class IOMemoryCursor : public OSObject + @abstract A mechanism to convert memory references to physical addresses. + @discussion The IOMemoryCursor declares the super class that all +specific memory cursors must inherit from, but a memory cursor can be created without a specific format subclass by just providing a segment function to the initializers. This class does the difficult stuff of dividing a memory descriptor into a physical scatter/gather list appropriate for the target hardware. +

+ A driver is expected to create a memory cursor and configure it to the limitations of it's DMA hardware; for instance the memory cursor used by the firewire SBP2 protocol has a maximum physical segment size of 2^16 - 1 but the actual transfer size is unlimited. Thus it would create a cursor with a maxSegmentSize of 65535 and a maxTransfer size of UINT_MAX. It would also provide a SegmentFunction that can output a pagelist entry. +

+Below is the simplest example of a SegmentFunction:- +void IONaturalMemoryCursor::outputSegment(PhysicalSegment segment, + void * outSegments, + UInt32 outSegmentIndex) +{ + ((PhysicalSegment *) outSegments)[outSegmentIndex] = segment; +} + +*/ +class IOMemoryCursor : public OSObject +{ + OSDeclareDefaultStructors(IOMemoryCursor) + +public: +/*! + @typedef PhysicalSegment + @discussion A physical address/length pair. +*/ + struct PhysicalSegment + { + IOPhysicalAddress location; + IOPhysicalLength length; + }; + +/*! @defined IOPhysicalSegment + @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOMemoryCursor::PhysicalSegment */ +#define IOPhysicalSegment IOMemoryCursor::PhysicalSegment + +/*! + @typedef SegmentFunction + @discussion Pointer to a C function that outputs a single physical segment to an element in the array as defined by the segments and segmentIndex parameters. + @param segment The physical address and length that is next to be output. + @param segments Base of the output vector of DMA address length pairs. + @param segmentIndex Index to output 'segment' in the 'segments' array. +*/ + typedef void (*SegmentFunction)(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); + +/*! @defined OutputSegmentFunc + @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOMemoryCursor::SegmentFunction */ +#define OutputSegmentFunc IOMemoryCursor::SegmentFunction + +protected: +/*! @var outSeg The action method called when an event has been delivered */ + SegmentFunction outSeg; + +/*! @var maxSegmentSize Maximum size of one segment in a scatter/gather list */ + IOPhysicalLength maxSegmentSize; + +/*! @var maxTransferSize + Maximum size of a transfer that this memory cursor is allowed to generate */ + IOPhysicalLength maxTransferSize; + +/*! @var alignMask + Currently unused. Reserved for automated aligment restriction code. */ + IOPhysicalLength alignMask; + +public: +/*! @function withSpecification + @abstract Factory function to create and initialise an IOMemoryCursor in one operation, see $link IOMemoryCursor::initWithSpecification. + @param outSegFunc SegmentFunction to call to output one physical segment. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result A new memory cursor if successfully created and initialised, 0 otherwise. +*/ + static IOMemoryCursor * + withSpecification(SegmentFunction outSegFunc, + IOPhysicalLength maxSegmentSize = 0, + IOPhysicalLength maxTransferSize = 0, + IOPhysicalLength alignment = 1); + +/*! @function initWithSpecification + @abstract Primary initialiser for the IOMemoryCursor class. + @param outSegFunc SegmentFunction to call to output one physical segment. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result true if the inherited classes and this instance initialise +successfully. +*/ + virtual bool initWithSpecification(SegmentFunction outSegFunc, + IOPhysicalLength maxSegmentSize = 0, + IOPhysicalLength maxTransferSize = 0, + IOPhysicalLength alignment = 1); + +/*! @function genPhysicalSegments + @abstract Generate a physical scatter/gather list given a memory descriptor. + @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. + @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + @param fromPosition Starting location of the I/O within a memory descriptor. + @param segments Void pointer to base of output physical scatter/gather list. Always passed directly onto the SegmentFunction without interpretation by the cursor. + @param maxSegments Maximum number of segments that can be written to segments array. + @param maxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + @param transferSize Pointer to a IOByteCount variable that can contain the total size of the transfer being described. Default to 0 indicating that no transfer size need be returned. + @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. +*/ + virtual UInt32 genPhysicalSegments( + IOMemoryDescriptor *descriptor, + IOByteCount fromPosition, + void * segments, + UInt32 maxSegments, + UInt32 maxTransferSize = 0, + IOByteCount *transferSize = 0); +}; + +/************************ class IONaturalMemoryCursor ************************/ + + +/*! + @class IONaturalMemoryCursor : public IOMemoryCursor + @abstract A $link IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the natural byte orientation for the cpu. + @discussion The IONaturalMemoryCursor would be used when it is too difficult to safely describe a SegmentFunction that is more appropriate for your hardware. This cursor just outputs an array of PhysicalSegments. +*/ +class IONaturalMemoryCursor : public IOMemoryCursor +{ + OSDeclareDefaultStructors(IONaturalMemoryCursor) + +public: +/*! @funtion outputSegment + @abstract Output the given segment into the output segments array in natural byte order. + @param segment The physical address and length that is next to be output. + @param segments Base of the output vector of DMA address length pairs. + @param segmentIndex Index to output 'segment' in the 'segments' array. +*/ + static void outputSegment(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); + +/*! @defined naturalOutputSegment + @discussion Backward compatibilty define for the old global function definition. See $link IONaturalMemoryCursor::outputSegment */ +#define naturalOutputSegment IONaturalMemoryCursor::outputSegment + +/*! @function withSpecification + @abstract Factory function to create and initialise an IONaturalMemoryCursor in one operation, see $link IONaturalMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result A new memory cursor if successfully created and initialised, 0 otherwise. +*/ + static IONaturalMemoryCursor * + withSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + +/*! @function initWithSpecification + @abstract Primary initialiser for the IONaturalMemoryCursor class. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result true if the inherited classes and this instance initialise +successfully. +*/ + virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + + +/*! @function getPhysicalSegments + @abstract Generate a cpu natural physical scatter/gather list given a memory descriptor. + @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps $link IOMemoryCursor::genPhysicalSegments. + @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + @param fromPosition Starting location of the I/O within a memory descriptor. + @param segments Pointer to an array of $link IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. + @param maxSegments Maximum number of segments that can be written to segments array. + @param maxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + @param transferSize Pointer to a IOByteCount variable that can contain the total size of the transfer being described. Default to 0 indicating that no transfer size need be returned. + @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. +*/ + virtual UInt32 getPhysicalSegments(IOMemoryDescriptor *descriptor, + IOByteCount fromPosition, + PhysicalSegment *segments, + UInt32 maxSegments, + UInt32 maxTransferSize = 0, + IOByteCount *transferSize = 0) + { + return genPhysicalSegments(descriptor, fromPosition, segments, + maxSegments, maxTransferSize, transferSize); + } +}; + +/************************** class IOBigMemoryCursor **************************/ + +/*! + @class IOBigMemoryCursor : public IOMemoryCursor + @abstract A $link IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the big endian byte order. + @discussion The IOBigMemoryCursor would be used when the DMA hardware requires a big endian address and length pair. This cursor outputs an array of PhysicalSegments that are encoded in big-endian format. +*/ +class IOBigMemoryCursor : public IOMemoryCursor +{ + OSDeclareDefaultStructors(IOBigMemoryCursor) + +public: +/*! @funtion outputSegment + @abstract Output the given segment into the output segments array in big endian byte order. + @param segment The physical address and length that is next to be output. + @param segments Base of the output vector of DMA address length pairs. + @param segmentIndex Index to output 'segment' in the 'segments' array. +*/ + static void outputSegment(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); + +/*! @defined bigOutputSegment + @discussion Backward compatibilty define for the old global function definition. See $link IOBigMemoryCursor::outputSegment */ +#define bigOutputSegment IOBigMemoryCursor::outputSegment + +/*! @function withSpecification + @abstract Factory function to create and initialise an IOBigMemoryCursor in one operation, see $link IOBigMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result A new memory cursor if successfully created and initialised, 0 otherwise. +*/ + static IOBigMemoryCursor * + withSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + +/*! @function initWithSpecification + @abstract Primary initialiser for the IOBigMemoryCursor class. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result true if the inherited classes and this instance initialise +successfully. +*/ + virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + + +/*! @function getPhysicalSegments + @abstract Generate a big endian physical scatter/gather list given a memory descriptor. + @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps $link IOMemoryCursor::genPhysicalSegments. + @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + @param fromPosition Starting location of the I/O within a memory descriptor. + @param segments Pointer to an array of $link IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. + @param maxSegments Maximum number of segments that can be written to segments array. + @param maxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + @param transferSize Pointer to a IOByteCount variable that can contain the total size of the transfer being described. Default to 0 indicating that no transfer size need be returned. + @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. +*/ + virtual UInt32 getPhysicalSegments(IOMemoryDescriptor * descriptor, + IOByteCount fromPosition, + PhysicalSegment * segments, + UInt32 maxSegments, + UInt32 maxTransferSize = 0, + IOByteCount * transferSize = 0) + { + return genPhysicalSegments(descriptor, fromPosition, segments, + maxSegments, maxTransferSize, transferSize); + } +}; + +/************************* class IOLittleMemoryCursor ************************/ + +/*! + @class IOLittleMemoryCursor : public IOMemoryCursor + @abstract A $link IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the little endian byte order. + @discussion The IOLittleMemoryCursor would be used when the DMA hardware requires a little endian address and length pair. This cursor outputs an array of PhysicalSegments that are encoded in little endian format. +*/ +class IOLittleMemoryCursor : public IOMemoryCursor +{ + OSDeclareDefaultStructors(IOLittleMemoryCursor) + +public: +/*! @funtion outputSegment + @abstract Output the given segment into the output segments array in little endian byte order. + @param segment The physical address and length that is next to be output. + @param segments Base of the output vector of DMA address length pairs. + @param segmentIndex Index to output 'segment' in the 'segments' array. +*/ + static void outputSegment(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); + +/*! @defined littleOutputSegment + @discussion Backward compatibilty define for the old global function definition. See $link IOLittleMemoryCursor::outputSegment */ +#define littleOutputSegment IOLittleMemoryCursor::outputSegment + +/*! @function withSpecification + @abstract Factory function to create and initialise an IOLittleMemoryCursor in one operation, see $link IOLittleMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result A new memory cursor if successfully created and initialised, 0 otherwise. +*/ + static IOLittleMemoryCursor * + withSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + +/*! @function initWithSpecification + @abstract Primary initialiser for the IOLittleMemoryCursor class. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result true if the inherited classes and this instance initialise +successfully. +*/ + virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + + +/*! @function getPhysicalSegments + @abstract Generate a little endian physical scatter/gather list given a memory descriptor. + @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps $link IOMemoryCursor::genPhysicalSegments. + @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + @param fromPosition Starting location of the I/O within a memory descriptor. + @param segments Pointer to an array of $link IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. + @param maxSegments Maximum number of segments that can be written to segments array. + @param maxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + @param transferSize Pointer to a IOByteCount variable that can contain the total size of the transfer being described. Default to 0 indicating that no transfer size need be returned. + @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. +*/ + virtual UInt32 getPhysicalSegments(IOMemoryDescriptor * descriptor, + IOByteCount fromPosition, + PhysicalSegment * segments, + UInt32 maxSegments, + UInt32 maxTransferSize = 0, + IOByteCount * transferSize = 0) + { + return genPhysicalSegments(descriptor, fromPosition, segments, + maxSegments, maxTransferSize, transferSize); + } +}; + +/************************* class IODBDMAMemoryCursor *************************/ + +#if defined(__ppc__) + +struct IODBDMADescriptor; + +/*! + @class IODBDMAMemoryCursor : public IOMemoryCursor + @abstract A $link IOMemoryCursor subclass that outputs a vector of DBDMA descriptors where the address and length are filled in. + @discussion The IODBDMAMemoryCursor would be used when the DBDMA hardware is available for the device for that will use an instance of this cursor. +*/ +class IODBDMAMemoryCursor : public IOMemoryCursor +{ + OSDeclareDefaultStructors(IODBDMAMemoryCursor) + +public: +/*! @funtion outputSegment + @abstract Output the given segment into the output segments array in address and length fields of an DBDMA descriptor. + @param segment The physical address and length that is next to be output. + @param segments Base of the output vector of DMA address length pairs. + @param segmentIndex Index to output 'segment' in the 'segments' array. +*/ + static void outputSegment(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); + +/*! @defined dbdmaOutputSegment + @discussion Backward compatibilty define for the old global function definition. See $link IODBDMAMemoryCursor::outputSegment */ +#define dbdmaOutputSegment IODBDMAMemoryCursor::outputSegment + +/*! @function withSpecification + @abstract Factory function to create and initialise an IODBDMAMemoryCursor in one operation, see $link IODBDMAMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result A new memory cursor if successfully created and initialised, 0 otherwise. +*/ + static IODBDMAMemoryCursor * + withSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + +/*! @function initWithSpecification + @abstract Primary initialiser for the IODBDMAMemoryCursor class. + @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + @param maxTransferSize Maximum size of an entire transfer. Default to 0 indicating no maximum. + @param alignment Alligment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + @result true if the inherited classes and this instance initialise +successfully. +*/ + virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); + + +/*! @function getPhysicalSegments + @abstract Generate a DBDMA physical scatter/gather list given a memory descriptor. + @discussion Generates a list of DBDMA descriptors where the address and length fields are filled in appropriately. But the client is expected to fill in the rest of teh DBDMA descriptor as is appropriate for their particular hardware. Wraps $link IOMemoryCursor::genPhysicalSegments. + @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + @param fromPosition Starting location of the I/O within a memory descriptor. + @param segments Pointer to an array of DBDMA descriptors for the output physical scatter/gather list. Be warned no room is left for a preamble in the output array. 'segments' should point to the first memory description slot in a DBDMA command. + @param maxSegments Maximum number of segments that can be written to the dbdma descriptor table. + @param maxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + @param transferSize Pointer to a IOByteCount variable that can contain the total size of the transfer being described. Default to 0 indicating that no transfer size need be returned. + @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. +*/ + virtual UInt32 getPhysicalSegments(IOMemoryDescriptor * descriptor, + IOByteCount fromPosition, + IODBDMADescriptor * segments, + UInt32 maxSegments, + UInt32 maxTransferSize = 0, + IOByteCount * transferSize = 0) + { + return genPhysicalSegments(descriptor, fromPosition, segments, + maxSegments, maxTransferSize, transferSize); + } +}; + +#endif /* defined(__ppc__) */ + +#endif /* !_IOMEMORYCURSOR_H */ + diff --git a/iokit/IOKit/IOMemoryDescriptor.h b/iokit/IOKit/IOMemoryDescriptor.h new file mode 100644 index 000000000..bb3b5d54e --- /dev/null +++ b/iokit/IOKit/IOMemoryDescriptor.h @@ -0,0 +1,690 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOMEMORYDESCRIPTOR_H +#define _IOMEMORYDESCRIPTOR_H + +#include +#include + +struct IOPhysicalRange +{ + IOPhysicalAddress address; + IOByteCount length; +}; + +class IOMemoryMap; + +/* + * Direction of transfer, with respect to the described memory. + */ +enum IODirection +{ + kIODirectionNone = 0x0, // same as VM_PROT_NONE + kIODirectionIn = 0x1, // User land 'read', same as VM_PROT_READ + kIODirectionOut = 0x2, // User land 'write', same as VM_PROT_WRITE + kIODirectionOutIn = kIODirectionIn | kIODirectionOut, +}; + +/*! @class IOMemoryDescriptor : public OSObject + @abstract An abstract base class defining common methods for describing physical or virtual memory. + @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */ + +class IOMemoryDescriptor : public OSObject +{ + friend class _IOMemoryMap; + friend class IOSubMemoryDescriptor; + + OSDeclareDefaultStructors(IOMemoryDescriptor); + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * reserved; + +protected: + OSSet * _mappings; + IOOptionBits _flags; + void * _memEntry; + + IODirection _direction; /* direction of transfer */ + IOByteCount _length; /* length of all ranges */ + IOOptionBits _tag; + +private: + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 0); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15); + +protected: + virtual void free(); +public: + static void initialize( void ); + +public: +/*! @function withAddress + @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task. + @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. + @param address The virtual address of the first byte in the memory. + @param withLength The length of memory. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withAddress(void * address, + IOByteCount withLength, + IODirection withDirection); + +/*! @function withAddress + @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map. + @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. + @param address The virtual address of the first byte in the memory. + @param withLength The length of memory. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param withTask The task the virtual ranges are mapped into. + @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withAddress(vm_address_t address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask); + +/*! @function withPhysicalAddress + @abstract Create an IOMemoryDescriptor to describe one physical range. + @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range. + @param address The physical address of the first byte in the memory. + @param withLength The length of memory. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ); + +/*! @function withRanges + @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges. + @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. + @param ranges An array of IOVirtualRange structures which specify the virtual ranges in the specified map which make up the memory to be described. + @param withCount The member count of the ranges array. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param withTask The task each of the virtual ranges are mapped into. + @param asReference If false, the IOMemoryDescriptor object will make a copy of the ranges array, otherwise, the array will be used in situ, avoiding an extra allocation. + @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false); + +/*! @function withPhysicalRanges + @abstract Create an IOMemoryDescriptor to describe one or more physical ranges. + @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of physical memory ranges. + @param ranges An array of IOPhysicalRange structures which specify the physical ranges which make up the memory to be described. + @param withCount The member count of the ranges array. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param asReference If false, the IOMemoryDescriptor object will make a copy of the ranges array, otherwise, the array will be used in situ, avoiding an extra allocation. + @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withPhysicalRanges( + IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false); + +/*! @function withSubRange + @abstract Create an IOMemoryDescriptor to describe a subrange of an existing descriptor. + @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a subrange of the specified memory descriptor. The parent memory descriptor is retained by the new descriptor. + @param of The parent IOMemoryDescriptor of which a subrange is to be used for the new descriptor, which will be retained by the subrange IOMemoryDescriptor. + @param offset A byte offset into the parent memory descriptor's memory. + @param length The length of the subrange. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. This is used over the direction of the parent descriptor. + @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor * of, + IOByteCount offset, + IOByteCount length, + IODirection withDirection); + +/*! @function initWithAddress + @abstract Initialize or reinitialize an IOMemoryDescriptor to describe one virtual range of the kernel task. + @discussion This method initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. An IOMemoryDescriptor can be re-used by calling initWithAddress or initWithRanges again on an existing instance -- note this behavior is not commonly supported in other IOKit classes, although it is supported here. + @param address The virtual address of the first byte in the memory. + @param withLength The length of memory. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @result true on success, false on failure. */ + + virtual bool initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection) = 0; + +/*! @function initWithAddress + @abstract Initialize or reinitialize an IOMemoryDescriptor to describe one virtual range of the specified map. + @discussion This method initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. An IOMemoryDescriptor can be re-used by calling initWithAddress or initWithRanges again on an existing instance -- note this behavior is not commonly supported in other IOKit classes, although it is supported here. + @param address The virtual address of the first byte in the memory. + @param withLength The length of memory. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param withTask The task the virtual ranges are mapped into. + @result true on success, false on failure. */ + + virtual bool initWithAddress(vm_address_t address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) = 0; + +/*! @function initWithPhysicalAddress + @abstract Initialize or reinitialize an IOMemoryDescriptor to describe one physical range. + @discussion This method initializes an IOMemoryDescriptor for memory consisting of a single physical memory range. An IOMemoryDescriptor can be re-used by calling initWithAddress or initWithRanges again on an existing instance -- note this behavior is not commonly supported in other IOKit classes, although it is supported here. + @param address The physical address of the first byte in the memory. + @param withLength The length of memory. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @result true on success, false on failure. */ + + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) = 0; + +/*! @function initWithRanges + @abstract Initialize or reinitialize an IOMemoryDescriptor to describe one or more virtual ranges. + @discussion This method initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. An IOMemoryDescriptor can be re-used by calling initWithAddress or initWithRanges again on an existing instance -- note this behavior is not commonly supported in other IOKit classes, although it is supported here. + @param ranges An array of IOVirtualRange structures which specify the virtual ranges in the specified map which make up the memory to be described. + @param withCount The member count of the ranges array. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param withTask The task each of the virtual ranges are mapped into. + @param asReference If false, the IOMemoryDescriptor object will make a copy of the ranges array, otherwise, the array will be used in situ, avoiding an extra allocation. + @result true on success, false on failure. */ + + virtual bool initWithRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) = 0; + +/*! @function initWithPhysicalRanges + @abstract Initialize or reinitialize an IOMemoryDescriptor to describe one or more physical ranges. + @discussion This method initializes an IOMemoryDescriptor for memory consisting of an array of physical memory ranges. An IOMemoryDescriptor can be re-used by calling initWithAddress or initWithRanges again on an existing instance -- note this behavior is not commonly supported in other IOKit classes, although it is supported here. + @param ranges An array of IOPhysicalRange structures which specify the physical ranges which make up the memory to be described. + @param withCount The member count of the ranges array. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param asReference If false, the IOMemoryDescriptor object will make a copy of the ranges array, otherwise, the array will be used in situ, avoiding an extra allocation. + @result true on success, false on failure. */ + + virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false) = 0; + +/*! @function getDirection + @abstract Accessor to get the direction the memory descriptor was created with. + @discussion This method returns the direction the memory descriptor was created with. + @result The direction. */ + + virtual IODirection getDirection() const; + +/*! @function getLength + @abstract Accessor to get the length of the memory descriptor (over all its ranges). + @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths. + @result The byte count. */ + + virtual IOByteCount getLength() const; + +/*! @function setTag + @abstract Set the tag for the memory descriptor. + @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor. + @param tag The tag. */ + + virtual void setTag( IOOptionBits tag ); + +/*! @function getTag + @abstract Accessor to the retrieve the tag for the memory descriptor. + @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor. + @result The tag. */ + + virtual IOOptionBits getTag( void ); + +/*! @function readBytes + @abstract Copy data from the memory descriptor's buffer to the specified buffer. + @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. + @param offset A byte offset into the memory descriptor's memory. + @param bytes The caller supplied buffer to copy the data to. + @param withLength The length of the data to copy. + @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. */ + + virtual IOByteCount readBytes(IOByteCount offset, + void * bytes, IOByteCount withLength) = 0; + +/*! @function writeBytes + @abstract Copy data to the memory descriptor's buffer from the specified buffer. + @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. + @param offset A byte offset into the memory descriptor's memory. + @param bytes The caller supplied buffer to copy the data from. + @param withLength The length of the data to copy. + @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. */ + + virtual IOByteCount writeBytes(IOByteCount offset, + const void * bytes, IOByteCount withLength) = 0; + +/*! @function getPhysicalSegment + @abstract Break a memory descriptor into its physically contiguous segments. + @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset. + @param offset A byte offset into the memory whose physical address to return. + @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. + @result A physical address, or zero if the offset is beyond the length of the memory. */ + + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length) = 0; + +/*! @function getPhysicalAddress + @abstract Return the physical address of the first byte in the memory. + @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous. + @result A physical address. */ + + inline IOPhysicalAddress getPhysicalAddress() + { return( getPhysicalSegment( 0, 0 )); } + + /* + * getVirtualSegment: + * + * Get the virtual address of the buffer, relative to the given offset. + * If the memory wasn't mapped into the caller's address space, it will be + * mapped in now. If the current position is at the end of the buffer, a + * null is returned. + */ + virtual void * getVirtualSegment(IOByteCount offset, + IOByteCount * length) = 0; + +/*! @function prepare + @abstract Prepare the memory for an I/O transfer. + @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. This method needn't called for non-pageable memory. + @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + @result An IOReturn code. */ + + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0; + +/*! @function complete + @abstract Complete processing of the memory after an I/O transfer finishes. + @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. + @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + @result An IOReturn code. */ + + virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0; + + /* + * Mapping functions. + */ + +/*! @function map + @abstract Maps a IOMemoryDescriptor into a task. + @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. + @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space. + @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored. + @param options Mapping options are defined in IOTypes.h,
+ kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.
+ kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.
+ kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.
+ kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.
+ kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.
+ @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory. + @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory. + @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */ + + virtual IOMemoryMap * map( + task_t intoTask, + IOVirtualAddress atAddress, + IOOptionBits options, + IOByteCount offset = 0, + IOByteCount length = 0 ); + +/*! @function map + @abstract Maps a IOMemoryDescriptor into the kernel map. + @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the map method for further details. + @param options Mapping options as in the full version of the map method, with kIOMapAnywhere assumed. + @result See the full version of the map method. */ + + virtual IOMemoryMap * map( + IOOptionBits options = 0 ); + +/*! @function setMapping + @abstract Establishes an already existing mapping. + @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed. + @param task Address space in which the mapping exists. + @param mapAddress Virtual address of the mapping. + @param options Caching and read-only attributes of the mapping. + @result A IOMemoryMap object created to represent the mapping. */ + + virtual IOMemoryMap * setMapping( + task_t task, + IOVirtualAddress mapAddress, + IOOptionBits options = 0 ); + +protected: + virtual IOMemoryMap * makeMapping( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress atAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ); + + virtual void addMapping( + IOMemoryMap * mapping ); + + virtual void removeMapping( + IOMemoryMap * mapping ); + + virtual IOReturn doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset = 0, + IOByteCount length = 0 ); + + virtual IOReturn doUnmap( + vm_map_t addressMap, + IOVirtualAddress logical, + IOByteCount length ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*! @class IOMemoryMap : public OSObject + @abstract An abstract base class defining common methods for describing a memory mapping. + @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */ + +class IOMemoryMap : public OSObject +{ + OSDeclareAbstractStructors(IOMemoryMap) + +public: +/*! @function getVirtualAddress + @abstract Accessor to the virtual address of the first byte in the mapping. + @discussion This method returns the virtual address of the first byte in the mapping. + @result A virtual address. */ + + virtual IOVirtualAddress getVirtualAddress() = 0; + +/*! @function getPhysicalSegment + @abstract Break a mapping into its physically contiguous segments. + @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment. + @param offset A byte offset into the mapping whose physical address to return. + @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. + @result A physical address, or zero if the offset is beyond the length of the mapping. */ + + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length) = 0; + +/*! @function getPhysicalAddress + @abstract Return the physical address of the first byte in the mapping. + @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous. + @result A physical address. */ + + inline IOPhysicalAddress getPhysicalAddress() + { return( getPhysicalSegment( 0, 0 )); } + +/*! @function getLength + @abstract Accessor to the length of the mapping. + @discussion This method returns the length of the mapping. + @result A byte count. */ + + virtual IOByteCount getLength() = 0; + +/*! @function getAddressTask + @abstract Accessor to the task of the mapping. + @discussion This method returns the mach task the mapping exists in. + @result A mach task_t. */ + + virtual task_t getAddressTask() = 0; + +/*! @function getMemoryDescriptor + @abstract Accessor to the IOMemoryDescriptor the mapping was created from. + @discussion This method returns the IOMemoryDescriptor the mapping was created from. + @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */ + + virtual IOMemoryDescriptor * getMemoryDescriptor() = 0; + +/*! @function getMapOptions + @abstract Accessor to the options the mapping was created with. + @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with. + @result Options for the mapping, including cache settings. */ + + virtual IOOptionBits getMapOptions() = 0; + +/*! @function unmap + @abstract Force the IOMemoryMap to unmap, without destroying the object. + @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used. + @result An IOReturn code. */ + + virtual IOReturn unmap() = 0; + + virtual void taskDied() = 0; +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +enum { + kIOMemoryRequiresWire = 0x00000001 +}; + +class IOGeneralMemoryDescriptor : public IOMemoryDescriptor +{ + OSDeclareDefaultStructors(IOGeneralMemoryDescriptor); + +protected: + union { + IOVirtualRange * v; + IOPhysicalRange * p; + } _ranges; /* list of address ranges */ + unsigned _rangesCount; /* number of address ranges in list */ + bool _rangesIsAllocated; /* is list allocated by us? */ + + task_t _task; /* task where all ranges are mapped to */ + + union { + IOVirtualRange v; + IOPhysicalRange p; + } _singleRange; /* storage space for a single range */ + + unsigned _wireCount; /* number of outstanding wires */ + + vm_address_t _cachedVirtualAddress; /* a cached virtual-to-physical */ + IOPhysicalAddress _cachedPhysicalAddress; /* mapping, for optimization */ + + bool _initialized; /* has superclass been initialized? */ + + virtual void free(); + +protected: /* (to be deprecated) */ + IOByteCount _position; /* absolute position over all ranges */ + virtual void setPosition(IOByteCount position); + +private: + unsigned _positionAtIndex; /* range #n in which position is now */ + IOByteCount _positionAtOffset; /* relative position within range #n */ + OSData *_memoryEntries; + + vm_offset_t _kernPtrAligned; + unsigned _kernPtrAtIndex; + IOByteCount _kernSize; + virtual void mapIntoKernel(unsigned rangeIndex); + virtual void unmapFromKernel(); + inline vm_map_t getMapForTask( task_t task, vm_address_t address ); + +public: + /* + * IOMemoryDescriptor required methods + */ + + virtual bool initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection); + + virtual bool initWithAddress(vm_address_t address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask); + + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false); + + virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false); + + virtual IOByteCount readBytes(IOByteCount offset, + void * bytes, IOByteCount withLength); + + virtual IOByteCount writeBytes(IOByteCount offset, + const void * bytes, IOByteCount withLength); + + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length); + + virtual void * getVirtualSegment(IOByteCount offset, + IOByteCount * length); + + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone); + + virtual IOReturn complete(IODirection forDirection = kIODirectionNone); + + virtual IOReturn doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset = 0, + IOByteCount length = 0 ); + + virtual IOReturn doUnmap( + vm_map_t addressMap, + IOVirtualAddress logical, + IOByteCount length ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IOSubMemoryDescriptor : public IOMemoryDescriptor +{ + friend IOMemoryDescriptor; + + OSDeclareDefaultStructors(IOSubMemoryDescriptor); + +protected: + IOMemoryDescriptor * _parent; + IOByteCount _start; + + virtual void free(); + + virtual bool initSubRange( IOMemoryDescriptor * parent, + IOByteCount offset, IOByteCount length, + IODirection withDirection ); + + virtual bool initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection); + + virtual bool initWithAddress(vm_address_t address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask); + + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false); + + virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false); + + IOMemoryDescriptor::withAddress; + IOMemoryDescriptor::withPhysicalAddress; + IOMemoryDescriptor::withPhysicalRanges; + IOMemoryDescriptor::withRanges; + IOMemoryDescriptor::withSubRange; + +public: + /* + * IOMemoryDescriptor required methods + */ + + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length); + + virtual IOByteCount readBytes(IOByteCount offset, + void * bytes, IOByteCount withLength); + + virtual IOByteCount writeBytes(IOByteCount offset, + const void * bytes, IOByteCount withLength); + + virtual void * getVirtualSegment(IOByteCount offset, + IOByteCount * length); + + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone); + + virtual IOReturn complete(IODirection forDirection = kIODirectionNone); + +protected: + virtual IOMemoryMap * makeMapping( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress atAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#endif /* !_IOMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IOMessage.h b/iokit/IOKit/IOMessage.h new file mode 100644 index 000000000..5e1addf59 --- /dev/null +++ b/iokit/IOKit/IOMessage.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __IOKIT_IOMESSAGE_H +#define __IOKIT_IOMESSAGE_H + +#include +#include + +typedef UInt32 IOMessage; + +#define iokit_common_msg(message) (UInt32)(sys_iokit|sub_iokit_common|message) +#define iokit_family_msg(sub,message) (UInt32)(sys_iokit|sub|message) + +#define kIOMessageServiceIsTerminated iokit_common_msg(0x010) +#define kIOMessageServiceIsSuspended iokit_common_msg(0x020) +#define kIOMessageServiceIsResumed iokit_common_msg(0x030) + +#define kIOMessageServiceIsRequestingClose iokit_common_msg(0x100) +#define kIOMessageServiceWasClosed iokit_common_msg(0x110) + +#define kIOMessageServiceBusyStateChange iokit_common_msg(0x120) + +#define kIOMessageCanDevicePowerOff iokit_common_msg(0x200) +#define kIOMessageDeviceWillPowerOff iokit_common_msg(0x210) +#define kIOMessageDeviceWillNotPowerOff iokit_common_msg(0x220) +#define kIOMessageDeviceHasPoweredOn iokit_common_msg(0x230) +#define kIOMessageCanSystemPowerOff iokit_common_msg(0x240) +#define kIOMessageSystemWillPowerOff iokit_common_msg(0x250) +#define kIOMessageSystemWillNotPowerOff iokit_common_msg(0x260) +#define kIOMessageCanSystemSleep iokit_common_msg(0x270) +#define kIOMessageSystemWillSleep iokit_common_msg(0x280) +#define kIOMessageSystemWillNotSleep iokit_common_msg(0x290) +#define kIOMessageSystemHasPoweredOn iokit_common_msg(0x300) + +#endif /* ! __IOKIT_IOMESSAGE_H */ diff --git a/iokit/IOKit/IOMultiMemoryDescriptor.h b/iokit/IOKit/IOMultiMemoryDescriptor.h new file mode 100644 index 000000000..68c8c41f8 --- /dev/null +++ b/iokit/IOKit/IOMultiMemoryDescriptor.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOMULTIMEMORYDESCRIPTOR_H +#define _IOMULTIMEMORYDESCRIPTOR_H + +#include + +/*! @class IOMultiMemoryDescriptor : public IOMemoryDescriptor + @abstract The IOMultiMemoryDescriptor object describes a memory area made up of several other IOMemoryDescriptors. + @discussion The IOMultiMemoryDescriptor object represents multiple ranges of memory, specified as an ordered list of IOMemoryDescriptors. The descriptors are chained end-to-end to make up a single contiguous buffer. */ + +class IOMultiMemoryDescriptor : public IOMemoryDescriptor +{ + OSDeclareDefaultStructors(IOMultiMemoryDescriptor); + +protected: + + IOMemoryDescriptor ** _descriptors; + UInt32 _descriptorsCount; + bool _descriptorsIsAllocated; + + virtual void free(); + + /* + * These methods are not supported under this subclass. + */ + + virtual bool initWithAddress( void * address, /* not supported */ + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithAddress( vm_address_t address, /* not supported */ + IOByteCount withLength, + IODirection withDirection, + task_t withTask ); + + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, /* not supported */ + IOByteCount withLength, + IODirection withDirection ); + + virtual bool initWithPhysicalRanges( + IOPhysicalRange * ranges, /* not supported */ + UInt32 withCount, + IODirection withDirection, + bool asReference = false ); + + virtual bool initWithRanges( IOVirtualRange * ranges, /* not supported */ + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false ); + + virtual void * getVirtualSegment( IOByteCount offset, /* not supported */ + IOByteCount * length ); + + IOMemoryDescriptor::withAddress; /* not supported */ + IOMemoryDescriptor::withPhysicalAddress; /* not supported */ + IOMemoryDescriptor::withPhysicalRanges; /* not supported */ + IOMemoryDescriptor::withRanges; /* not supported */ + IOMemoryDescriptor::withSubRange; /* not supported */ + +public: + +/*! @function withDescriptors + @abstract Create an IOMultiMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. + @discussion This method creates and initializes an IOMultiMemoryDescriptor for memory consisting of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. Passing the descriptor array as a reference will avoid an extra allocation. + @param descriptors An array of IOMemoryDescriptors which make up the memory to be described. + @param withCount The object count for the descriptors array. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param asReference If false, the IOMultiMemoryDescriptor object will make a copy of the descriptors array, otherwise, the array will be used in situ, avoiding an extra allocation. + @result The created IOMultiMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMultiMemoryDescriptor * withDescriptors( + IOMemoryDescriptor ** descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference = false ); + +/*! @function withDescriptors + @abstract Initialize an IOMultiMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. + @discussion This method initializes an IOMultiMemoryDescriptor for memory consisting of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. Passing the descriptor array as a reference will avoid an extra allocation. + @param descriptors An array of IOMemoryDescriptors which make up the memory to be described. + @param withCount The object count for the descriptors array. + @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + @param asReference If false, the IOMultiMemoryDescriptor object will make a copy of the descriptors array, otherwise, the array will be used in situ, avoiding an extra allocation. + @result The created IOMultiMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + virtual bool initWithDescriptors( + IOMemoryDescriptor ** descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference = false ); + +/*! @function getPhysicalAddress + @abstract Return the physical address of the first byte in the memory. + @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous. + @result A physical address. */ + + virtual IOPhysicalAddress getPhysicalSegment( IOByteCount offset, + IOByteCount * length ); + +/*! @function prepare + @abstract Prepare the memory for an I/O transfer. + @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. This method needn't called for non-pageable memory. + @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + @result An IOReturn code. */ + + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone); + +/*! @function complete + @abstract Complete processing of the memory after an I/O transfer finishes. + @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. + @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + @result An IOReturn code. */ + + virtual IOReturn complete(IODirection forDirection = kIODirectionNone); + +/*! @function readBytes + @abstract Copy data from the memory descriptor's buffer to the specified buffer. + @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. + @param offset A byte offset into the memory descriptor's memory. + @param bytes The caller supplied buffer to copy the data to. + @param withLength The length of the data to copy. + @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. */ + + virtual IOByteCount readBytes( IOByteCount offset, + void * bytes, + IOByteCount withLength ); + +/*! @function writeBytes + @abstract Copy data to the memory descriptor's buffer from the specified buffer. + @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. + @param offset A byte offset into the memory descriptor's memory. + @param bytes The caller supplied buffer to copy the data from. + @param withLength The length of the data to copy. + @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. */ + + virtual IOByteCount writeBytes( IOByteCount offset, + const void * bytes, + IOByteCount withLength ); +}; + +#endif /* !_IOMULTIMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IONVRAM.h b/iokit/IOKit/IONVRAM.h new file mode 100644 index 000000000..50c9df268 --- /dev/null +++ b/iokit/IOKit/IONVRAM.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOKIT_IONVRAM_H +#define _IOKIT_IONVRAM_H + +#include +#include +#include + +enum { + kIODTNVRAMImageSize = 0x2000, + kIODTNVRAMXPRAMSize = 0x0100, + kIODTNVRAMNameRegistrySize = 0x0400 +}; + +enum { + kOFVariableTypeBoolean = 1, + kOFVariableTypeNumber, + kOFVariableTypeString, + kOFVariableTypeData +}; + +enum { + kOFVariablePermRootOnly = 0, + kOFVariablePermUserRead, + kOFVariablePermUserWrite +}; + +class IODTNVRAM : public IOService +{ + OSDeclareDefaultStructors(IODTNVRAM); + +private: + IONVRAMController *_nvramController; + const OSSymbol *_registryPropertiesKey; + UInt8 *_nvramImage; + bool _nvramImageDirty; + UInt32 _ofPartitionOffset; + UInt32 _ofPartitionSize; + UInt8 *_ofImage; + bool _ofImageDirty; + OSDictionary *_ofDict; + UInt32 _xpramPartitionOffset; + UInt32 _xpramPartitionSize; + UInt8 *_xpramImage; + UInt32 _nrPartitionOffset; + UInt32 _nrPartitionSize; + UInt8 *_nrImage; + + virtual IOReturn initOFVariables(void); + virtual IOReturn syncOFVariables(void); + virtual UInt32 getOFVariableType(const OSSymbol *propSymbol) const; + virtual UInt32 getOFVariablePerm(const OSSymbol *propSymbol) const; + virtual bool getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, + UInt32 *propType, UInt32 *propOffset); + virtual bool convertPropToObject(UInt8 *propName, UInt32 propNameLength, + UInt8 *propData, UInt32 propDataLength, + const OSSymbol **propSymbol, + OSObject **propObject); + virtual bool convertObjectToProp(UInt8 *buffer, UInt32 *length, + const OSSymbol *propSymbol, OSObject *propObject); + virtual UInt16 generateOWChecksum(UInt8 *buffer); + virtual bool validateOWChecksum(UInt8 *buffer); + virtual void updateOWBootArgs(const OSSymbol *key, OSObject *value); + virtual bool searchNVRAMProperty(struct IONVRAMDescriptor *hdr, + UInt32 *where); + + virtual IOReturn readNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value); + virtual IOReturn writeNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol *name, + OSData * value); + + virtual OSData *unescapeBytesToData(UInt8 *bytes, UInt32 length); + virtual OSData *escapeDataToData(OSData * value); + + virtual IOReturn readNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value); + virtual IOReturn writeNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value); + +public: + virtual bool init(IORegistryEntry *old, const IORegistryPlane *plane); + + virtual void registerNVRAMController(IONVRAMController *nvram); + + virtual void sync(void); + + virtual bool serializeProperties(OSSerialize * serialize) const; + virtual OSObject *getProperty(const OSSymbol *aKey) const; + virtual OSObject *getProperty(const char *aKey) const; + virtual bool setProperty(const OSSymbol *aKey, OSObject *anObject); + virtual IOReturn setProperties(OSObject *properties); + + virtual IOReturn readXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length); + virtual IOReturn writeXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length); + + virtual IOReturn readNVRAMProperty(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value); + virtual IOReturn writeNVRAMProperty(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value); +}; + +#endif /* !_IOKIT_IONVRAM_H */ diff --git a/iokit/IOKit/IONotifier.h b/iokit/IOKit/IONotifier.h new file mode 100644 index 000000000..24e3d2560 --- /dev/null +++ b/iokit/IOKit/IONotifier.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _IOKIT_IONOTIFIER_H +#define _IOKIT_IONOTIFIER_H + +#include + +/*! @class IONotifier : public OSObject + @abstract An abstract base class defining common methods for controlling a notification request. + @discussion IOService notification requests are represented as implementations of the IONotifier object. It defines methods to enable, disable and remove notification requests. These actions are synchronized with invocations of the notification handler, so removing a notification request will guarantee the handler is not being executed. */ + +class IONotifier : public OSObject +{ + OSDeclareAbstractStructors(IONotifier) + +public: + +/*! @function remove + @abstract Removes the notification request and releases it. + @discussion Removes the notification request and release it. Since creating an IONotifier instance will leave it with a retain count of one, creating an IONotifier and then removing it will destroy it. This method is synchronous with any handler invocations, so when this method returns its guaranteed the handler will not be in entered. */ + + virtual void remove() = 0; + +/*! @function disable + @abstract Disables the notification request. + @discussion Disables the notification request. This method is synchronous with any handler invocations, so when this method returns its guaranteed the handler will not be in entered. + @result Returns the previous enable state of the IONotifier. */ + + virtual bool disable() = 0; + +/*! @function enable + @abstract Sets the enable state of the notification request. + @discussion Restores the enable state of the notification request, given the previous state passed in. + @param was The enable state of the notifier to restore. */ + + virtual void enable( bool was ) = 0; + +}; + +#endif /* ! _IOKIT_IONOTIFIER_H */ diff --git a/iokit/IOKit/IOPlatformExpert.h b/iokit/IOKit/IOPlatformExpert.h new file mode 100644 index 000000000..05d6e2d19 --- /dev/null +++ b/iokit/IOKit/IOPlatformExpert.h @@ -0,0 +1,274 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOPLATFORMEXPERT_H +#define _IOKIT_IOPLATFORMEXPERT_H + +#ifdef __cplusplus +#include +#include +#include +#include + +extern "C" { +#endif + +extern boolean_t PEGetMachineName( char * name, int maxLength ); +extern boolean_t PEGetModelName( char * name, int maxLength ); +extern int PEGetPlatformEpoch( void ); + +enum { + kPEHaltCPU, + kPERestartCPU +}; +extern int (*PE_halt_restart)(unsigned int type); +extern int PEHaltRestart(unsigned int type); + +extern long PEGetGMTTimeOfDay( void ); +extern void PESetGMTTimeOfDay( long secs ); + +#ifdef __cplusplus +} /* extern "C" */ + +extern OSSymbol * gPlatformInterruptControllerName; + +class IORangeAllocator; +class IONVRAMController; +class IOPMrootDomain; + +class IOPlatformExpert : public IOService +{ + OSDeclareDefaultStructors(IOPlatformExpert); + +private: + long _peBootROMType; + long _peChipSetType; + long _peMachineType; + +protected: + IOPMrootDomain * root; + int _pePMFeatures; + int _pePrivPMFeatures; + int _peNumBatteriesSupported; + OSArray * thePowerTree; + + bool searchingForAdditionalParents; + OSNumber * multipleParentKeyValue; + int numInstancesRegistered; + + struct ExpansionData { }; + ExpansionData *reserved; + + virtual void setBootROMType(long peBootROMType); + virtual void setChipSetType(long peChipSetType); + virtual void setMachineType(long peMachineType); + + virtual bool CheckSubTree (OSArray * inSubTree, IOService * theNub, IOService * theDevice, OSDictionary * theParent); + virtual bool RegisterServiceInTree (IOService * theService, OSDictionary * theTreeNode, OSDictionary * theTreeParentNode, IOService * theProvider); + + virtual void PMInstantiatePowerDomains ( void ); + +public: + virtual bool attach( IOService * provider ); + virtual bool start( IOService * provider ); + virtual bool configure( IOService * provider ); + virtual IOService * createNub( OSDictionary * from ); + + virtual bool compareNubName( const IOService * nub, OSString * name, + OSString ** matched = 0 ) const; + virtual IOReturn getNubResources( IOService * nub ); + + virtual long getBootROMType(void); + virtual long getChipSetType(void); + virtual long getMachineType(void); + + virtual bool getModelName( char * name, int maxLength ); + virtual bool getMachineName( char * name, int maxLength ); + + virtual int haltRestart(unsigned int type); + virtual void sleepKernel(void); + + virtual long getGMTTimeOfDay( void ); + virtual void setGMTTimeOfDay( long secs ); + + virtual IOReturn getConsoleInfo( PE_Video * consoleInfo ); + virtual IOReturn setConsoleInfo( PE_Video * consoleInfo, unsigned int op ); + + virtual void registerNVRAMController( IONVRAMController * nvram ); + + virtual IOReturn registerInterruptController(OSSymbol *name, IOInterruptController *interruptController); + virtual IOInterruptController *lookUpInterruptController(OSSymbol *name); + virtual void setCPUInterruptProperties(IOService *service); + virtual bool atInterruptLevel(void); + + virtual IOReturn callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4); + + virtual IORangeAllocator * getPhysicalRangeAllocator(void); + + virtual bool platformAdjustService(IOService *service); + + virtual void PMRegisterDevice(IOService * theNub, IOService * theDevice); + virtual void PMLog ( const char *,unsigned long, unsigned long, unsigned long ); + + virtual bool hasPMFeature (unsigned long featureMask); + virtual bool hasPrivPMFeature (unsigned long privFeatureMask); + virtual int numBatteriesSupported (void); + + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 0); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 1); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 2); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 3); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 4); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 5); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 6); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 7); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 8); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 9); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 10); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 11); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IODTNVRAM; + +class IODTPlatformExpert : public IOPlatformExpert +{ + OSDeclareAbstractStructors(IODTPlatformExpert); + +private: + IODTNVRAM *dtNVRAM; + + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual IOService * probe( IOService * provider, + SInt32 * score ); + virtual bool configure( IOService * provider ); + + virtual void processTopLevel( IORegistryEntry * root ); + virtual const char * deleteList( void ) = 0; + virtual const char * excludeList( void ) = 0; + virtual IOService * createNub( IORegistryEntry * from ); + virtual bool createNubs( IOService * parent, OSIterator * iter ); + + virtual bool compareNubName( const IOService * nub, OSString * name, + OSString ** matched = 0 ) const; + + virtual IOReturn getNubResources( IOService * nub ); + + virtual bool getModelName( char * name, int maxLength ); + virtual bool getMachineName( char * name, int maxLength ); + + virtual void registerNVRAMController( IONVRAMController * nvram ); + + virtual int haltRestart(unsigned int type); + + /* virtual */ IOReturn readXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length); + /* virtual */ IOReturn writeXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length); + + virtual IOReturn readNVRAMProperty( + IORegistryEntry * entry, + const OSSymbol ** name, OSData ** value ); + + virtual IOReturn writeNVRAMProperty( + IORegistryEntry * entry, + const OSSymbol * name, OSData * value ); + + + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 0); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 1); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 2); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 3); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 4); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 5); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 6); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 7); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* generic root nub of service tree */ + +class IOPlatformExpertDevice : public IOService +{ + OSDeclareDefaultStructors(IOPlatformExpertDevice) + +private: + IOWorkLoop *workLoop; + + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual bool initWithArgs( void * p1, void * p2, + void * p3, void *p4 ); + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const; + + virtual IOWorkLoop *getWorkLoop() const; + + virtual void free(); + + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 0); + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 1); + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 2); + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 3); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* generic nub for motherboard devices */ + +class IOPlatformDevice : public IOService +{ + OSDeclareDefaultStructors(IOPlatformDevice) + + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const; + virtual IOService * matchLocation( IOService * client ); + virtual IOReturn getResources( void ); + + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 0); + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 1); + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 2); + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 3); +}; + +#endif /* __cplusplus */ + +#endif /* ! _IOKIT_IOPLATFORMEXPERT_H */ diff --git a/iokit/IOKit/IORangeAllocator.h b/iokit/IOKit/IORangeAllocator.h new file mode 100644 index 000000000..761084e84 --- /dev/null +++ b/iokit/IOKit/IORangeAllocator.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 05 Nov 99 - created. + */ + +#ifndef _IOKIT_IORANGEALLOCATOR_H +#define _IOKIT_IORANGEALLOCATOR_H + +#include +#include + +typedef UInt32 IORangeScalar; + +/*! @class IORangeAllocator : public OSObject + @abstract A utility class to manage allocations from a range. + @discussion The IORangeAllocator class provides functions for allocating ranges, at a fixed or any offset, and freeing them back to a free list. It is useful for describing ranges of memory or address space without requiring storage in the memory - information describing the free elements is kept elsewhere. Ranges are described by a start offset and a size. IORangeAllocator is optionally protected against multithreaded access. */ + +class IORangeAllocator : public OSObject { + + OSDeclareDefaultStructors(IORangeAllocator) + +protected: + UInt32 numElements; + UInt32 capacity; + UInt32 capacityIncrement; + IORangeScalar defaultAlignmentMask; + IOOptionBits options; + + struct IORangeAllocatorElement * elements; + +private: + virtual bool allocElement( UInt32 index ); + + virtual void deallocElement( UInt32 index ); + +public: + enum { + kLocking = 0x00000001 + }; + +/*! @function init + @abstract Standard initializer for IORangeAllocator. + @discussion This method initializes an IORangeAllocator and optionally sets the free list to contain one fragment, from zero to an endOfRange parameter. The capacity in terms of free fragments and locking options are set for the instance. + @param endOfRange If the free list is to contain an initial fragment, set endOfRange to the last offset in the range, ie. size - 1, to create a free fragment for the range zero to endOfRange inclusive. If zero is passed the free list will be initialized empty, and can be populated with calls to the deallocate method. + @param defaultAlignment If this parameter is non-zero it specifies a required alignment for all allocations, for example pass 256 to align allocations on 256 byte boundaries. Zero or one specify unaligned allocations. + @param capacity Sets the initial size of the free list in number of non-contiguous fragments. This value is also used for the capacityIncrement. + @param options Pass kLocking if the instance can be used by multiple threads. + @result Returns true if the instance is successfully initialize, false on failure. */ + + virtual bool init( IORangeScalar endOfRange, + IORangeScalar defaultAlignment, + UInt32 capacity, + IOOptionBits options ); + +/*! @function withRange + @abstract Standard factory method for IORangeAllocator. + @discussion This method allocates and initializes an IORangeAllocator and optionally sets the free list to contain one fragment, from zero to an endOfRange parameter. The capacity in terms of free fragments and locking options are set for the instance. + @param endOfRange If the free list is to contain an initial fragment, set endOfRange to the last offset in the range, ie. size - 1, to create a free fragment for the range zero to endOfRange inclusive. If zero is passed the free list will be initialized empty, and can be populated with calls to the deallocate method. + @param defaultAlignment If this parameter is non-zero it specifies a required alignment for all allocations, for example pass 256 to align allocations on 256 byte boundaries. Zero or one specify unaligned allocations. + @param capacity Sets the initial size of the free list in number of non-contiguous fragments. This value is also used for the capacityIncrement. + @param options Pass kLocking if the instance can be used by multiple threads. + @result Returns the new IORangeAllocator instance, to be released by the caller, or zero on failure. */ + + static IORangeAllocator * withRange( IORangeScalar endOfRange, + IORangeScalar defaultAlignment = 0, UInt32 capacity = 0, + IOOptionBits options = 0 ); + + virtual void free(); + virtual bool serialize(OSSerialize *s) const; + +/*! @function getFragmentCount + @abstract Accessor to return the number of free fragments in the range. + @discussion This method returns a count of free fragments. Each fragment describes a non-contiguous free range - deallocations will merge contiguous fragments together. + @result The count of free fragments. */ + + virtual UInt32 getFragmentCount( void ); + +/*! @function getFragmentCapacity + @abstract Accessor to return the number of free fragments in the range. + @discussion This method returns the current capacity of the free fragment list. + @result The current capacity of free fragment list. */ + + virtual UInt32 getFragmentCapacity( void ); + +/*! @function setFragmentCapacityIncrement + @abstract Sets the count of fragments the free list will increase by when full. + @discussion This method sets the number of extra fragments the free list will expand to when full. It defaults to the initial capacity. + @param count The number of fragments to increment the capacity by when the free list is full. */ + + virtual void setFragmentCapacityIncrement( UInt32 count ); + +/*! @function getFreeCount + @abstract Totals the sizes of the free fragments. + @discussion This method returns the total of the sizes of the fragments on the free list. + @result The total of the free fragments sizes. */ + + virtual IORangeScalar getFreeCount( void ); + +/*! @function allocate + @abstract Allocate from the free list, at any offset. + @discussion This method allocates a range from the free list. The alignment will default to the alignment set when the allocator was created or may be set here. + @param size The size of the range requested. + @param result The beginning of the range allocated is returned here on success. + @param alignment If zero is passed, default to the allocators alignment, otherwise pass an alignment required for the allocation, for example 4096 to page align. + @result True if the allocation was successful, else false. */ + + virtual bool allocate( IORangeScalar size, + IORangeScalar * result, + IORangeScalar alignment = 0 ); + +/*! @function allocateRange + @abstract Allocate from the free list, at a set offset. + @discussion This method allocates a range from the free list, given a set offset passed in. + @param start The beginning of the range requested. + @param size The size of the range requested. + @result True if the allocation was successful, else false. */ + + virtual bool allocateRange( IORangeScalar start, + IORangeScalar size ); + +/*! @function deallocate + @abstract Deallocate a range to the free list. + @discussion This method deallocates a range to the free list, given a the start offset and length passed in. + @param start The beginning of the range requested. + @param size The size of the range requested. */ + + virtual void deallocate( IORangeScalar start, + IORangeScalar size ); +}; + +#endif /* _IOKIT_IORANGEALLOCATOR_H */ diff --git a/iokit/IOKit/IORegistryEntry.h b/iokit/IOKit/IORegistryEntry.h new file mode 100644 index 000000000..c1a067fc2 --- /dev/null +++ b/iokit/IOKit/IORegistryEntry.h @@ -0,0 +1,814 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOREGISTRYENTRY_H +#define _IOKIT_IOREGISTRYENTRY_H + +#include +#include + + +extern const OSSymbol * gIONameKey; +extern const OSSymbol * gIOLocationKey; + +class IORegistryEntry; +class IORegistryPlane; +class IORegistryIterator; + +typedef void (*IORegistryEntryApplierFunction)(IORegistryEntry * entry, + void * context); + +enum { + kIORegistryIterateRecursively = 0x00000001, + kIORegistryIterateParents = 0x00000002 +}; + +/*! @class IORegistryEntry : public OSObject + @abstract The base class for all objects in the registry. + @discussion The IORegistryEntry base class provides functions for describing graphs of connected registry entries, each with a dictionary-based property table. Entries may be connected in different planes, with differing topologies. Access to the registry is protected against multiple threads. Inside the kernel planes are specified with plane objects and are published by the creator - IOService exports the gIOServicePlane plane object for example. Non kernel clients specify planes by their name. +*/ + +class IORegistryEntry : public OSObject +{ + friend IORegistryIterator; + + OSDeclareDefaultStructors(IORegistryEntry) + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * reserved; + +private: + + OSDictionary * fRegistryTable; + OSDictionary * fPropertyTable; + +private: + OSMetaClassDeclareReservedUnused(IORegistryEntry, 0); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 1); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 2); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 3); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 4); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 5); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 6); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 7); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 8); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 9); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 10); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 11); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 12); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 13); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 14); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 15); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 16); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 17); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 18); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 19); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 20); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 21); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 22); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 23); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 24); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 25); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 26); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 27); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 28); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 29); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 30); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 31); + +public: + + /* Registry accessors */ + +/*! @function getRegistryRoot + @abstract Returns a pointer to the root instance of the registry. + @discussion This method provides an accessor to the root of the registry for the machine. The root may be passed to a registry iterator when iterating a plane, and contains properties that describe the available planes, and diagnostic information for IOKit. Keys for these properties are in IOKitKeys.h. + @result A pointer to the IORegistryEntry root instance. It should not be released by the caller. */ + + static IORegistryEntry * getRegistryRoot( void ); + +/*! @function getGenerationCount + @abstract Returns an generation count for all registry changing operations. + @discussion This method provides an accessor to the current generation count (or seed) of the registry which changes when any topology change occurs in the registry - this does not include property table changes. It may be used to invalidate any caching of the results from IORegistryEntry methods. + @result An integer generation count. */ + + static SInt32 getGenerationCount( void ); + +/*! @function getPlane + @abstract Looks up the plane object by a C-string name. + @discussion Planes are usually provided as globals by the creator, eg. gIOServicePlane, gIODeviceTreePlane, or gIOAudioPlane, however they may also be looked up by name with this method. + @result A pointer to the plane object, or zero if no such plane exists. The returned plane should not be released. */ + + static const IORegistryPlane * getPlane( const char * name ); + + /* Registry Entry allocation & init */ + +/*! @function init + @abstract Standard init method for all IORegistryEntry subclasses. + @discussion A registry entry must be initialized with this method before it can be used. A property dictionary may passed and will be retained by this method for use as the registry entry's property table, or an empty one will be created. + @param A dictionary that will become the registry entry's property table (retaining it), or zero which will cause an empty property table to be created. + @result true on success, or false on a resource failure. */ + + virtual bool init( OSDictionary * dictionary = 0 ); + +/*! @function free + @abstract Standard free method for all IORegistryEntry subclasses. + @discussion This method will release any resources of the entry, in particular its property table. Note that the registry entry must always be detached from the registry before free may be called, and subclasses (namely IOService) will have additional protocols for removing registry entries. free should never need be called directly. */ + + virtual void free( void ); + +/*! @function setPropertyTable + @abstract Replace a registry entry's property table. + @discussion This method will release the current property table of a the entry and replace it with another, retaining the new property table. + @param dict The new dictionary to be used as the entry's property table. */ + + virtual void setPropertyTable( OSDictionary * dict ); + + /* Synchronized property accessors; wrappers to OSDictionary + * plus property creation helpers */ + +/*! @function setProperty + @abstract Synchronized method to add a property to a registry entry's property table. + @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The properties name as an OSSymbol. + @param anObject The property value. + @result true on success or false on a resource failure. */ + + virtual bool setProperty(const OSSymbol * aKey, OSObject * anObject); + +/*! @function setProperty + @abstract Synchronized method to add a property to a registry entry's property table. + @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as an OSString. + @param anObject The property value. + @result true on success or false on a resource failure. */ + + virtual bool setProperty(const OSString * aKey, OSObject * anObject); + +/*! @function setProperty + @abstract Synchronized method to add a property to a registry entry's property table. + @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as a C-string. + @param anObject The property value. + @result true on success or false on a resource failure. */ + + virtual bool setProperty(const char * aKey, OSObject * anObject); + +/*! @function setProperty + @abstract Synchronized method to construct and add a OSString property to a registry entry's property table. + @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSString from the supplied C-string, set in the property table with the given name, and released. + @param aKey The property's name as a C-string. + @param aString The property value as a C-string. + @result true on success or false on a resource failure. */ + + virtual bool setProperty(const char * aKey, const char * aString); + +/*! @function setProperty + @abstract Synchronized method to construct and add an OSBoolean property to a registry entry's property table. + @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSBoolean from the supplied value, set in the property table with the given name, and released. + @param aKey The property's name as a C-string. + @param aBoolean The property's boolean value. + @result true on success or false on a resource failure. */ + + virtual bool setProperty(const char * aKey, bool aBoolean); + +/*! @function setProperty + @abstract Synchronized method to construct and add an OSNumber property to a registry entry's property table. + @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSNumber from the supplied value and size, set in the property table with the given name, and released. + @param aKey The property's name as a C-string. + @param aValue The property's numeric value. + @param aNumberOfBits The property's size in bits, for OSNumber. + @result true on success or false on a resource failure. */ + + virtual bool setProperty( const char * aKey, + unsigned long long aValue, + unsigned int aNumberOfBits); + +/*! @function setProperty + @abstract Synchronized method to construct and add an OSData property to a registry entry's property table. + @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSData copied from the supplied date and length, set in the property table with the given name, and released. + @param aKey The property's name as a C-string. + @param bytes The property's value as a pointer. OSData will copy this data. + @param length The property's size in bytes, for OSData. + @result true on success or false on a resource failure. */ + + virtual bool setProperty( const char * aKey, + void * bytes, + unsigned int length); + +/*! @function removeProperty + @abstract Synchronized method to remove a property from a registry entry's property table. + @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as an OSSymbol. */ + + virtual void removeProperty( const OSSymbol * aKey); + +/*! @function removeProperty + @abstract Synchronized method to remove a property from a registry entry's property table. + @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as an OSString. */ + + virtual void removeProperty( const OSString * aKey); + +/*! @function removeProperty + @abstract Synchronized method to remove a property from a registry entry's property table. + @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as a C-string. */ + + virtual void removeProperty( const char * aKey); + +/*! @function getProperty + @abstract Synchronized method to obtain a property from a registry entry's property table. + @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as an OSSymbol. + @result The property value found, or zero. */ + + virtual OSObject * getProperty( const OSSymbol * aKey) const; + +/*! @function getProperty + @abstract Synchronized method to obtain a property from a registry entry's property table. + @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as an OSString. + @result The property value found, or zero. */ + + virtual OSObject * getProperty( const OSString * aKey) const; + +/*! @function getProperty + @abstract Synchronized method to obtain a property from a registry entry's property table. + @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as a C-string. + @result The property value found, or zero. */ + + virtual OSObject * getProperty( const char * aKey) const; + +/*! @function getProperty + @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. + @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + @param aKey The property's name as an OSSymbol. + @param plane The plane to iterate over, eg. gIOServicePlane. + @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + @result The property value found, or zero. */ + + virtual OSObject * getProperty( const OSSymbol * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + +/*! @function getProperty + @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. + @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + @param aKey The property's name as an OSString. + @param plane The plane to iterate over, eg. gIOServicePlane. + @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + @result The property value found, or zero. */ + + virtual OSObject * getProperty( const OSString * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + +/*! @function getProperty + @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. + @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + @param aKey The property's name as a C-string. + @param plane The plane to iterate over, eg. gIOServicePlane. + @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + @result The property value found, or zero. */ + + virtual OSObject * getProperty( const char * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; + +/*! @function copyProperty + @abstract Synchronized method to obtain a property from a registry entry's property table. + @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as an OSSymbol. + @result The property value found, or zero. It should be released by the caller. */ + + virtual OSObject * copyProperty( const OSSymbol * aKey) const; + +/*! @function copyProperty + @abstract Synchronized method to obtain a property from a registry entry's property table. + @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as an OSString. + @result The property value found, or zero. It should be released by the caller. */ + + virtual OSObject * copyProperty( const OSString * aKey) const; + +/*! @function copyProperty + @abstract Synchronized method to obtain a property from a registry entry's property table. + @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. + @param aKey The property's name as a C-string. + @result The property value found, or zero. It should be released by the caller. */ + + virtual OSObject * copyProperty( const char * aKey) const; + +/*! @function dictionaryWithProperties + @abstract Synchronized method to obtain copy a registry entry's property table. + @discussion This method will copy a registry entry's property table, using the OSDictionary::withDictionary semantics. This method is synchronized with other IORegistryEntry accesses to the property table. Since OSDictionary will only copy property values by reference, synchronization is not guaranteed to any collection values. + @result The created dictionary, or zero on a resource value. It should be released by the caller. */ + + virtual OSDictionary * dictionaryWithProperties( void ) const; + +/*! @function serializeProperties + @abstract Synchronized method to serialize a registry entry's property table. + @discussion This method will serialize a registry entry's property table, using the OSDictionary::serialize semantics. This method is synchronized with other IORegistryEntry accesses to the property table. Many non-kernel clients of IOKit read information from the registry via properties, and will invoke this method in a registry entry to create a serialization of all the entry's properties, which is then reconstructed in the client's task as a CFDictionary. This method may be intercepted by subclasses to update their properties or implement a different serialization method, though it is usually better to implement such functionality by creating objects in the property table and implementing their serialize methods, avoiding any need to implement serializeProperties. + @param serialize The OSSerialize instance representing the serialization request. + @result True on success, false otherwise. */ + + virtual bool serializeProperties( OSSerialize * serialize ) const; + + /* Unsynchronized(!) property table access */ + +/*! @function getPropertyTable + @abstract Unsynchronized accessor to a registry entry's property table. + @discussion This method will return a pointer to the live property table as an OSDictionery. Its use is not recommended in most cases, instead use the synchronized accessors and helper functions of IORegistryEntry to access properties. It can only safely be used by one thread, which usually means it can only be used before a registry entry is entered into the registry. + @result A pointer to the property table as an OSDictionary. The pointer is valid while the registry entry is retained, and should not be released by the caller. */ + + inline OSDictionary * getPropertyTable( void ) const + { return(fPropertyTable); } + + /* Set properties from user level, to be overridden if supported */ + +/*! @function setProperties + @abstract Optionally supported external method to set properties in a registry entry. + @discussion This method is not implemented by IORegistryEntry, but is available to kernel and non-kernel clients to set properties in a registry entry. IOUserClient provides connection based, more controlled access to this functionality and may be more appropriate for many uses, since there is no differentiation between clients available to this method. + @param properties Any OSObject subclass, to be interpreted by the implementing method - for example an OSDictionary, OSData etc. may all be appropriate. + @result An IOReturn code to be returned to the caller. */ + + virtual IOReturn setProperties( OSObject * properties ); + + /* Topology */ + +/*! @function getParentIterator + @abstract Returns an iterator over an registry entry's parent entries in a specified plane. + @param plane The plane object. + @result Returns an iterator over the parents of the registry entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + + virtual OSIterator * getParentIterator( const IORegistryPlane * plane ) + const; + virtual void applyToParents( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const; + +/*! @function getParentEntry + @abstract Returns an registry entry's first parent entry in a plane. + @discussion This function will return the parent to which a registry entry was first attached. Since the majority of registry entrys have only one provider, this is a useful simplification. + @param plane The plane object. + @result Returns the first parent of the registry entry, or zero if the entry is not attached into the registry in that plane. The parent is retained while the entry is attached, and should not be released by the caller. */ + + virtual IORegistryEntry * getParentEntry( const IORegistryPlane * plane ) + const; + +/*! @function getChildIterator + @abstract Returns an iterator over an registry entry's child entries in a plane. + @discussion This method creates an iterator which will return each of a registry entry's child entries in a specified plane. + @param plane The plane object. + @result Returns an iterator over the children of the entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + + virtual OSIterator * getChildIterator( const IORegistryPlane * plane ) + const; + + virtual void applyToChildren( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const; + +/*! @function getChildEntry + @abstract Returns an registry entry's first child entry in a plane. + @discussion This function will return the child which first attached to a registry entry. + @param plane The plane object. + @result Returns the first child of the registry entry, or zero if the entry is not attached into the registry in that plane. The child is retained while the entry is attached, and should not be released by the caller. */ + + virtual IORegistryEntry * getChildEntry( const IORegistryPlane * plane ) + const; + +/*! @function isChild + @abstract Determines whether a registry entry is the child of another in a plane. + @discussion This method called in the parent entry determines if the specified entry is a child, in a plane. Additionally, it can check if the child is the only child of the parent entry. + @param child The possible child registry entry. + @param plane The plane object. + @param onlyChild If true, check also if the child is the only child. + @result If the child argument is not a child of the registry entry, false is returned. If onlyChild is true and the child is not the only child of the entry, false is returned, otherwise true is returned. */ + + virtual bool isChild( IORegistryEntry * child, + const IORegistryPlane * plane, + bool onlyChild = false ) const; + +/*! @function isParent + @abstract Determines whether a registry entry is the parent of another in a plane. + @discussion This method called in the child entry determines if the specified entry is a parent, in a plane. Additionally, it can check if the parent is the only parent of the child entry. + @param parent The possible parent registry entry. + @param plane The plane object. + @param onlyParent If true, check also if the parent is the only parent. + @result If the parent argument is not a parent of the registry entry, false is returned. If onlyParent is true and the parent is not the only parent of the entry, false is returned, otherwise true is returned. */ + + virtual bool isParent( IORegistryEntry * parent, + const IORegistryPlane * plane, + bool onlyParent = false ) const; + +/*! @function inPlane + @abstract Determines whether a registry entry is attached in a plane. + @discussion This method determines if the entry is attached in a plane to any other entry. + @param plane The plane object. + @result If the entry has a parent in the plane, true is returned, otherwise false is returned. */ + + virtual bool inPlane( const IORegistryPlane * plane ) const; + +/*! @function getDepth + @abstract Counts the maximum number of entries between an entry and the registry root, in a plane. + @discussion This method counts the number of entries between and entry and the registry root, in a plane, for each parent of the entry and returns the maximum value. + @param plane The plane object. + @result The maximum number of entries between the entry and the root. Zero is returned if the entry is not attached in the plane. */ + + virtual unsigned int getDepth( const IORegistryPlane * plane ) const; + + /* Attach / detach */ + +/*! @function attachToParent + @abstract Attaches a entry to a parent entry in a plane. + @discussion This is the usual method of entering an entry into the registry. It is a no-op and success if the entry is already attached to the parent. Attaching the entry into the registry retains both the child and parent while they are attached. This method will call attachToChild in the parent entry if it is not being called from attachToChild. + @param parent The registry entry to attach to. + @param plane The plane object. + @result true on success, or false on a resource failure, or if the parent is the same as the child. */ + + virtual bool attachToParent( IORegistryEntry * parent, + const IORegistryPlane * plane ); + +/*! @function detachFromParent + @abstract Detaches an entry from a parent entry in a plane. + @discussion This is the usual method of removing an entry from the registry. It is a no-op if the entry is not attached to the parent. Detaching the entry will release both the child and parent. This method will call detachFromChild in the parent entry if it is not being called from detachFromChild. + @param parent The registry entry to detach from. + @param plane The plane object. */ + + virtual void detachFromParent( IORegistryEntry * parent, + const IORegistryPlane * plane ); + +/*! @function attachToChild + @abstract Method called in the parent entry when a child attaches. + @discussion This method is called in the parent entry when a child attaches, to make overrides possible. This method will also call attachToParent in the child entry if it is not being called from attachToParent. It is a no-op and success if the entry is already a child. Attaching the entry into the registry retains both the child and parent while they are attached. + @param child The registry entry being attached. + @param plane The plane object. + @result true on success, or false on a resource failure, or if the parent is the same as the child. */ + + virtual bool attachToChild( IORegistryEntry * child, + const IORegistryPlane * plane ); + +/*! @function detachFromChild + @abstract Detaches a child entry from its parent in a plane. + @discussion This method is called in the parent entry when a child detaches, to make overrides possible. It is a no-op if the entry is not a child of the parent. Detaching the entry will release both the child and parent. This method will call detachFromParent in the child entry if it is not being called from detachFromParent. + @param parent The registry entry to detach. + @param plane The plane object. */ + + virtual void detachFromChild( IORegistryEntry * child, + const IORegistryPlane * plane ); + +/*! @function detachAbove + @abstract Detaches an entry from all its parent entries in a plane. + @discussion This method calls detachFromParent in the entry for each of its parent entries in the plane. + @param plane The plane object. */ + + virtual void detachAbove( const IORegistryPlane * plane ); + +/*! @function detachAll + @abstract Detaches an entry and all its children recursively in a plane. + @discussion This method breaks the registry connections for a subtree. detachAbove is called in the entry, and all child entries and their children in the plane. + @param plane The plane object. */ + + virtual void detachAll( const IORegistryPlane * plane ); + + /* Name, location and path accessors */ + +/*! @function getName + @abstract Returns the name assigned to the registry entry as a C-string. + @discussion Entries can be named in a particular plane, or globally. If the entry is named in plane and the plane is specified that name will be returned, otherwise the global name is returned. The global name defaults to the entry's meta class name if it has not been named. + @param plane The plane object, or zero for the global name. + @result A C-string name, valid while the entry is retained. */ + + virtual const char * getName( const IORegistryPlane * plane = 0 ) const; + +/*! @function copyName + @abstract Returns the name assigned to the registry entry as an OSSymbol. + @discussion Entries can be named in a particular plane, or globally. If the entry is named in plane and the plane is specified that name will be returned, otherwise the global name is returned. The global name defaults to the entry's meta class name if it has not been named. + @param plane The plane object, or zero for the global name. + @result A reference to an OSSymbol for the name, which should be released by the caller. */ + + virtual const OSSymbol * copyName( + const IORegistryPlane * plane = 0 ) const; + +/*! @function compareNames + @abstract Compares the name of the entry with one or more names, and optionally returns the matching name. + @discussion This method is called during IOService name matching and elsewhere to compare the entry's global name with a list of names, or a single name. A list of names may be passed as any OSCollection of OSStrings, while a single name may be passed an OSString, in the name parameter. compareNames will call the compareName method for each name, for overrides. + @param name The name or names to compare with as any OSCollection (eg. OSArray, OSSet, OSDictionary) of OSStrings, or a single name may be passed an OSString. + @param matched If the caller wants the successfully matched name returned, pass a non-zero pointer for the matched parameter and an OSString will be returned here. It should be released by the caller. + @result True if one of the names compared true with the entry's global name. */ + + virtual bool compareNames( OSObject * name, OSString ** matched = 0 ) const; + +/*! @function compareName + @abstract Compares the name of the entry with one name, and optionally returns the matching name. + @discussion This method is called during IOService name matching and elsewhere from the compareNames method. It should be overridden to provide non-standard name matching. + @param name The name to compare with as an OSString. + @param matched If the caller wants the successfully matched name returned, pass a non-zero pointer for the matched parameter and an OSString will be returned here. It should be released by the caller. Generally, this will be the same as the name parameter, but may not be if wildcards are used. + @result True if the name compared true with the entry's global name. */ + + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const; + +/*! @function setName + @abstract Sets a name for the registry entry, in a particular plane, or globally. + @discussion Entries can be named in a particular plane, or globally. If the plane is specified the name applies only to that plane, otherwise the global name is set. The global name defaults to the entry's meta class name if it has not been named. + @param name An OSSymbol which will be retained. + @param plane The plane object, or zero to set the global name. */ + + virtual void setName( const OSSymbol * name, + const IORegistryPlane * plane = 0 ); + +/*! @function setName + @abstract Sets a name for the registry entry, in a particular plane, or globally. + @discussion Entries can be named in a particular plane, or globally. If the plane is specified the name applies only to that plane, otherwise the global name is set. The global name defaults to the entry's meta class name if it has not been named. + @param name A const C-string name which will be copied. + @param plane The plane object, or zero to set the global name. */ + + virtual void setName( const char * name, + const IORegistryPlane * plane = 0 ); + +/*! @function getLocation + @abstract Returns the location string assigned to the registry entry as a C-string. + @discussion Entries can given a location string in a particular plane, or globally. If the entry has had a location set in a plane and the plane is specified that location string will be returned, otherwise the global location string is returned. If no global location string has been set, zero is returned. + @param plane The plane object, or zero for the global name. + @result A C-string location string, valid while the entry is retained, or zero. */ + + virtual const char * getLocation( const IORegistryPlane * plane = 0 ) const; + +/*! @function copyLocation + @abstract Returns the location string assigned to the registry entry as an OSSymbol. + @discussion Entries can given a location string in a particular plane, or globally. If the entry has had a location set in a plane and the plane is specified that location string will be returned, otherwise the global location string is returned. If no global location string has been set, zero is returned. + @param plane The plane object, or zero for the global name. + @result A reference to an OSSymbol for the location if one exists, which should be released by the caller, or zero. */ + + virtual const OSSymbol * copyLocation( + const IORegistryPlane * plane = 0 ) const; + +/*! @function setLocation + @abstract Sets a location string for the registry entry, in a particular plane, or globally. + @discussion Entries can be given a location string in a particular plane, or globally. If the plane is specified the location applies only to that plane, otherwise the global location is set. The location string may be used during path lookups of registry entries, to distinguish between sibling entries with the same name. The default IORegistryEntry parsing of location strings expects a list of hex numbers separated by commas, though subclasses of IORegistryEntry might do their own parsing. + @param location A C-string location string which will be copied, or an OSSymbol which will be retained. + @param plane The plane object, or zero to set the global location string. */ + + virtual void setLocation( const OSSymbol * location, + const IORegistryPlane * plane = 0 ); + virtual void setLocation( const char * location, + const IORegistryPlane * plane = 0 ); + +/*! @function getPath + @abstract Create a path for a registry entry. + @discussion The path for a registry entry is copied to the caller's buffer. The path describes the entry's attachment in a particular plane, which must be specified. The path begins with the plane name followed by a colon, and then followed by '/' separated path components for each of the entries between the root and the registry entry. Each component is constructed with the getPathComponent method called in each entry. An alias may also exist for the entry, which are described as properties in a registry entry found at /aliases in the plane. If a property value interpreted as a path in a call to IORegistryEntry::fromPath yields the entry, then the property name is used as the entry's path. + @param path A char buffer allocated by the caller. + @param length An in/out parameter - the caller sets the length of the buffer available, and getPath returns the total length of the path copied to the buffer. + @param plane The plane object. + @result getPath will fail if the entry is not attached in the plane, or if the buffer is not large enough to contain the path. */ + + virtual bool getPath( char * path, int * length, + const IORegistryPlane * plane) const; + +/*! @function getPathComponent + @abstract Create a path component for a registry entry. + @discussion Each component of a path created with getPath is created with getPathComponent. The default implementation concatenates the entry's name in the the plane, with the "at" symbol and the location string of the entry in the plane if it has been set. + @param path A char buffer allocated by the caller. + @param length An in/out parameter - the caller sets the length of the buffer available, and getPathComponent returns the total length of the path component copied to the buffer. + @param plane The plane object. + @result true if the path fits into the supplied buffer or false on a overflow. */ + + virtual bool getPathComponent( char * path, int * length, + const IORegistryPlane * plane ) const; + +/*! @function fromPath + @abstract Looks up a registry entry by path. + @discussion This function parses paths to lookup registry entries. The path may begin with the : created by getPath, or the plane may be set by the caller. If there are characters remaining unparsed after an entry has been looked up, this may be considered an invalid lookup, or those characters may be passed back to the caller and the lookup successful. + @param path A C-string path. + @param plane The plane to lookup up the path, or zero, in which case the path must begin with the plane name. + @param residualPath If the path may contain residual characters after the last path component, the residual will be copied back to the caller's residualPath buffer. If there are residual characters and no residual buffer is specified, fromPath will fail. + @param residualLength An in/out parameter - the caller sets the length of the residual buffer available, and fromPath returns the total length of the residual path copied to the buffer. If there is no residualBuffer (residualPath = 0) then residualLength may be zero also. + @param fromEntry The lookup will proceed rooted at this entry if non-zero, otherwise it proceeds from the root of the plane. + @result A retained registry entry is returned on success, or zero on failure. The caller should release the entry. */ + + static IORegistryEntry * fromPath( const char * path, + const IORegistryPlane * plane = 0, + char * residualPath = 0, + int * residualLength = 0, + IORegistryEntry * fromEntry = 0 ); + +/*! @function fromPath + @abstract Looks up a registry entry by relative path. + @discussion This function looks up a entry below the called entry by a relative path. It is just a convenience that calls IORegistryEntry::fromPath with this as the fromEntry parameter. + @param path See IORegistryEntry::fromPath. + @param plane See IORegistryEntry::fromPath. + @param residualPath See IORegistryEntry::fromPath. + @param residualLength See IORegistryEntry::fromPath. + @result See IORegistryEntry::fromPath. */ + + virtual IORegistryEntry * childFromPath( const char * path, + const IORegistryPlane * plane = 0, + char * residualPath = 0, + int * residualLength = 0 ); + +/*! @function dealiasPath + @abstract Strips any aliases from the head of path and returns the full path. + @discussion If the path specified begins with an alias found in the /aliases entry, the value of the alias is returned, and a pointer into the passed in path after the alias is passed back to the caller. If an alias is not found, zero is returned and the path parameter is unchanged. + @param opath An in/out paramter - the caller passes in a pointer to a C-string pointer to a path. If an alias is found, dealiasPath returns a pointer into the path just beyond the end of the alias. + @param plane A plane object must be specified. + @result A C-string pointer to the value of the alias if one is found, or zero if not. */ + + static const char * dealiasPath( const char ** opath, + const IORegistryPlane * plane ); + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + /* * * * * * * * * * * * internals * * * * * * * * * * * */ + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +public: + static IORegistryEntry * initialize( void ); + static const IORegistryPlane * makePlane( const char * name ); + // don't even think about using this + virtual bool init( IORegistryEntry * from, + const IORegistryPlane * inPlane ); +private: + inline bool arrayMember( OSArray * set, + const IORegistryEntry * member, + unsigned int * index = 0 ) const; + + bool makeLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const; + void breakLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const; + + virtual OSArray * getParentSetReference( const IORegistryPlane * plane ) + const; + virtual OSArray * getChildSetReference( const IORegistryPlane * plane ) + const; + virtual IORegistryEntry * getChildFromComponent( const char ** path, + const IORegistryPlane * plane ); + + virtual const OSSymbol * hasAlias( const IORegistryPlane * plane, + char * opath = 0, int * length = 0 ) const; + virtual const char * matchPathLocation( const char * cmp, + const IORegistryPlane * plane ); + +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/*! @class IORegistryIterator : public OSIterator + @abstract An iterator over the registry. + @discussion An iterator that can traverse the children or parents of a registry entry in a plane, and recurse. Access to the registry is protected against multiple threads, but an IORegistryIterator instance is for use by one thread only. */ + +class IORegistryIterator : public OSIterator +{ + OSDeclareAbstractStructors(IORegistryIterator) + +private: + struct IORegCursor { + IORegCursor * next; + IORegistryEntry * current; + OSIterator * iter; + }; + IORegCursor start; + IORegCursor * where; + IORegistryEntry * root; + OSOrderedSet * done; + const IORegistryPlane * plane; + IOOptionBits options; + + virtual void free( void ); + +public: +/*! @function iterateOver + @abstract Create an iterator rooted at a given registry entry. + @discussion This method creates an IORegistryIterator that is set up with options to iterate children or parents of a root entry, and to recurse automatically into entries as they are returned, or only when instructed. The iterator object keeps track of entries that have been recursed into previously to avoid loops. + @param start The root entry to begin the iteration at. + @param plane A plane object must be specified. + @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. + @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ + + static IORegistryIterator * iterateOver( IORegistryEntry * start, + const IORegistryPlane * plane, + IOOptionBits options = 0 ); + +/*! @function iterateOver + @abstract Create an iterator rooted at the registry root. + @discussion This method creates an IORegistryIterator that is set up with options to iterate children of the registry root entry, and to recurse automatically into entries as they are returned, or only when instructed. The iterator object keeps track of entries that have been recursed into previously to avoid loops. + @param plane A plane object must be specified. + @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. + @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ + + static IORegistryIterator * iterateOver( const IORegistryPlane * plane, + IOOptionBits options = 0 ); + +/*! @function getNextObject + @abstract Return the next object in the registry iteration. + @discussion This method calls either getNextObjectFlat or getNextObjectRecursive depending on the options the iterator was created with. This implements the OSIterator defined getNextObject method. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. + @result The next registry entry in the iteration (the current entry), or zero if the iteration has finished at this level of recursion. The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + + virtual IORegistryEntry * getNextObject( void ); + +/*! @function getNextObjectFlat + @abstract Return the next object in the registry iteration, ignoring the kIORegistryIterateRecursively option. + @discussion This method returns the next child, or parent if the kIORegistryIterateParents option was used to create the iterator, of the current root entry. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. + @result The next registry entry in the iteration (the current entry), or zero if the iteration has finished at this level of recursion, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + + virtual IORegistryEntry * getNextObjectFlat( void ); + +/*! @function getNextObjectRecursive + @abstract Return the next object in the registry iteration, and enter it. + @discussion If the iterator has a current entry, and the iterator has not already entered previously, enterEntry is called to recurse into it, ie. make it the new root, and the next child, or parent if the kIORegistryIterateParents option was used to create the iterator, at this new level of recursion is returned. If there is no current entry at this level of recursion, exitEntry is called and the process repeats, until the iteration returns to the entry the iterator was created with and zero is returned. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. + @result The next registry entry in the iteration (the current entry), or zero if its finished, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + + virtual IORegistryEntry * getNextObjectRecursive( void ); + +/*! @function getCurrentEntry + @abstract Return the current entry in the registry iteration. + @discussion This method returns the current entry, last returned by getNextObject et al. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. If the iteration is no longer valid (see isValid), the current entry is zero. + @result The current registry entry in the iteration, or zero if the last iteration returned zero, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + + virtual IORegistryEntry * getCurrentEntry( void ); + +/*! @function enterEntry + @abstract Recurse into the current entry in the registry iteration. + @discussion This method makes the current entry, ie. the last entry returned by getNextObject et al., the root in a new level of recursion. */ + + virtual void enterEntry( void ); + +/*! @function enterEntry + @abstract Recurse into the current entry in the registry iteration. + @discussion This method recurses into an entry as with enterEntry, but also switches from the current plane to a new one set by the caller. + @param plane The new plane to switch into. */ + + virtual void enterEntry( const IORegistryPlane * plane ); + +/*! @function exitEntry + @abstract Exits a level of recursion, restoring the current entry. + @discussion This method undoes an enterEntry, restoring the current entry. If there are no more levels of recursion to exit false is returned, otherwise true is returned. + @result true if a level of recursion was undone, false if no recursive levels are left in the iteration. */ + + virtual bool exitEntry( void ); + +/*! @function reset + @abstract Exits all levels of recursion, restoring the iterator to its state at creation. + @discussion This method exits all levels of recursion, and restores the iterator to its state at creation. */ + + virtual void reset( void ); + +/*! @function isValid + @abstract Checks that no registry changes have invalidated the iteration. + @discussion If a registry iteration is invalidated by changes to the registry, it will be made invalid, the currentEntry will be considered zero, and further calls to getNextObject et al. will return zero. The iterator should be reset to restart the iteration when this happens. + @result false if the iterator has been invalidated by changes to the registry, true otherwise. */ + + virtual bool isValid( void ); + +/*! @function iterateAll + @abstract Iterates all entries (with getNextObject) and returns a set of all returned entries. + @discussion This method will reset, then iterate all entries in the iteration (with getNextObject) until successful (ie. the iterator is valid at the end of the iteration). + @result A set of entries returned by the iteration. The caller should release the set when it has finished with it. Zero is returned on a resource failure. */ + + virtual OSOrderedSet * iterateAll( void ); +}; + +#endif /* _IOKIT_IOREGISTRYENTRY_H */ diff --git a/iokit/IOKit/IOReturn.h b/iokit/IOKit/IOReturn.h new file mode 100644 index 000000000..6664ca437 --- /dev/null +++ b/iokit/IOKit/IOReturn.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +/* + * Core IOReturn values. Others may be family defined. + */ + +#ifndef __IOKIT_IORETURN_H +#define __IOKIT_IORETURN_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +typedef kern_return_t IOReturn; + +#ifndef sys_iokit +#define sys_iokit err_system(0x38) +#endif /* sys_iokit */ +#define sub_iokit_common err_sub(0) +#define sub_iokit_usb err_sub(1) +#define sub_iokit_firewire err_sub(2) +#define sub_iokit_block_storage err_sub(4) +#define sub_iokit_reserved err_sub(-1) +#define iokit_common_err(return) (sys_iokit|sub_iokit_common|return) +#define iokit_family_err(sub,return) (sys_iokit|sub|return) + +#define kIOReturnSuccess KERN_SUCCESS // OK +#define kIOReturnError iokit_common_err(0x2bc) // general error +#define kIOReturnNoMemory iokit_common_err(0x2bd) // can't allocate memory +#define kIOReturnNoResources iokit_common_err(0x2be) // resource shortage +#define kIOReturnIPCError iokit_common_err(0x2bf) // error during IPC +#define kIOReturnNoDevice iokit_common_err(0x2c0) // no such device +#define kIOReturnNotPrivileged iokit_common_err(0x2c1) // privilege violation +#define kIOReturnBadArgument iokit_common_err(0x2c2) // invalid argument +#define kIOReturnLockedRead iokit_common_err(0x2c3) // device read locked +#define kIOReturnLockedWrite iokit_common_err(0x2c4) // device write locked +#define kIOReturnExclusiveAccess iokit_common_err(0x2c5) // exclusive access and + // device already open +#define kIOReturnBadMessageID iokit_common_err(0x2c6) // sent/received messages + // had different msg_id +#define kIOReturnUnsupported iokit_common_err(0x2c7) // unsupported function +#define kIOReturnVMError iokit_common_err(0x2c8) // misc. VM failure +#define kIOReturnInternalError iokit_common_err(0x2c9) // internal error +#define kIOReturnIOError iokit_common_err(0x2ca) // General I/O error +//#define kIOReturn???Error iokit_common_err(0x2cb) // ??? +#define kIOReturnCannotLock iokit_common_err(0x2cc) // can't acquire lock +#define kIOReturnNotOpen iokit_common_err(0x2cd) // device not open +#define kIOReturnNotReadable iokit_common_err(0x2ce) // read not supported +#define kIOReturnNotWritable iokit_common_err(0x2cf) // write not supported +#define kIOReturnNotAligned iokit_common_err(0x2d0) // alignment error +#define kIOReturnBadMedia iokit_common_err(0x2d1) // Media Error +#define kIOReturnStillOpen iokit_common_err(0x2d2) // device(s) still open +#define kIOReturnRLDError iokit_common_err(0x2d3) // rld failure +#define kIOReturnDMAError iokit_common_err(0x2d4) // DMA failure +#define kIOReturnBusy iokit_common_err(0x2d5) // Device Busy +#define kIOReturnTimeout iokit_common_err(0x2d6) // I/O Timeout +#define kIOReturnOffline iokit_common_err(0x2d7) // device offline +#define kIOReturnNotReady iokit_common_err(0x2d8) // not ready +#define kIOReturnNotAttached iokit_common_err(0x2d9) // device not attached +#define kIOReturnNoChannels iokit_common_err(0x2da) // no DMA channels left +#define kIOReturnNoSpace iokit_common_err(0x2db) // no space for data +//#define kIOReturn???Error iokit_common_err(0x2dc) // ??? +#define kIOReturnPortExists iokit_common_err(0x2dd) // port already exists +#define kIOReturnCannotWire iokit_common_err(0x2de) // can't wire down + // physical memory +#define kIOReturnNoInterrupt iokit_common_err(0x2df) // no interrupt attached +#define kIOReturnNoFrames iokit_common_err(0x2e0) // no DMA frames enqueued +#define kIOReturnMessageTooLarge iokit_common_err(0x2e1) // oversized msg received + // on interrupt port +#define kIOReturnNotPermitted iokit_common_err(0x2e2) // not permitted +#define kIOReturnNoPower iokit_common_err(0x2e3) // no power to device +#define kIOReturnNoMedia iokit_common_err(0x2e4) // media not present +#define kIOReturnUnformattedMedia iokit_common_err(0x2e5)// media not formatted +#define kIOReturnUnsupportedMode iokit_common_err(0x2e6) // no such mode +#define kIOReturnUnderrun iokit_common_err(0x2e7) // data underrun +#define kIOReturnOverrun iokit_common_err(0x2e8) // data overrun +#define kIOReturnDeviceError iokit_common_err(0x2e9) // the device is not working properly! +#define kIOReturnNoCompletion iokit_common_err(0x2ea) // a completion routine is required +#define kIOReturnAborted iokit_common_err(0x2eb) // operation aborted +#define kIOReturnNoBandwidth iokit_common_err(0x2ec) // bus bandwidth would be exceeded +#define kIOReturnNotResponding iokit_common_err(0x2ed) // device not responding +#define kIOReturnIsoTooOld iokit_common_err(0x2ee) // isochronous I/O request for distant past! +#define kIOReturnIsoTooNew iokit_common_err(0x2ef) // isochronous I/O request for distant future +#define kIOReturnNotFound iokit_common_err(0x2f0) // data was not found +#define kIOReturnInvalid iokit_common_err(0x1) // should never be seen + +#ifdef __cplusplus +} +#endif + +#endif /* ! __IOKIT_IORETURN_H */ diff --git a/iokit/IOKit/IOService.h b/iokit/IOKit/IOService.h new file mode 100644 index 000000000..dfcee1855 --- /dev/null +++ b/iokit/IOKit/IOService.h @@ -0,0 +1,1758 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998,1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOSERVICE_H +#define _IOKIT_IOSERVICE_H + +#include +#include +#include +#include +#include + +#include +#include + +class IOPMinformee; +class IOPowerConnection; + +#include +#include + +extern "C" { +#include +} + +enum { + kIODefaultProbeScore = 0 +}; + +// masks for getState() +enum { + kIOServiceInactiveState = 0x00000001, + kIOServiceRegisteredState = 0x00000002, + kIOServiceMatchedState = 0x00000004, + kIOServiceFirstPublishState = 0x00000008, + kIOServiceFirstMatchState = 0x00000010 +}; + +// options for registerService() +enum { + kIOServiceExclusive = 0x00000001, + kIOServiceSynchronous = 0x00000002, +}; + +// options for terminate() +enum { + kIOServiceRequired = 0x00000001 +}; + +// options for open() +enum { + kIOServiceSeize = 0x00000001, + kIOServiceFamilyOpenOptions = 0xffff0000 +}; + +// options for close() +enum { + kIOServiceFamilyCloseOptions = 0xffff0000 +}; + +typedef void * IONotificationRef; + +extern const IORegistryPlane * gIOServicePlane; +extern const IORegistryPlane * gIOPowerPlane; + +extern const OSSymbol * gIOResourcesKey; +extern const OSSymbol * gIOResourceMatchKey; +extern const OSSymbol * gIOProviderClassKey; +extern const OSSymbol * gIONameMatchKey; +extern const OSSymbol * gIONameMatchedKey; +extern const OSSymbol * gIOPropertyMatchKey; +extern const OSSymbol * gIOLocationMatchKey; +extern const OSSymbol * gIOPathMatchKey; +extern const OSSymbol * gIOMatchCategoryKey; +extern const OSSymbol * gIODefaultMatchCategoryKey; +extern const OSSymbol * gIOMatchedServiceCountKey; + +extern const OSSymbol * gIOUserClientClassKey; +extern const OSSymbol * gIOKitDebugKey; +extern const OSSymbol * gIOServiceKey; + +extern const OSSymbol * gIOCommandPoolSizeKey; + +extern const OSSymbol * gIOPublishNotification; +extern const OSSymbol * gIOFirstPublishNotification; +extern const OSSymbol * gIOMatchedNotification; +extern const OSSymbol * gIOFirstMatchNotification; +extern const OSSymbol * gIOTerminatedNotification; + +extern const OSSymbol * gIOGeneralInterest; +extern const OSSymbol * gIOBusyInterest; +extern const OSSymbol * gIOAppPowerStateInterest; + +extern const OSSymbol * gIODeviceMemoryKey; +extern const OSSymbol * gIOInterruptControllersKey; +extern const OSSymbol * gIOInterruptSpecifiersKey; + +extern SInt32 IOServiceOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ); + +typedef void (*IOInterruptAction)( OSObject * target, void * refCon, + IOService * nub, int source ); + +/*! @typedef IOServiceNotificationHandler + @param target Reference supplied when the notification was registered. + @param refCon Reference constant supplied when the notification was registered. + @param newService The IOService object the notification is delivering. It is retained for the duration of the handler's invocation and doesn't need to be released by the handler. */ + +typedef bool (*IOServiceNotificationHandler)( void * target, void * refCon, + IOService * newService ); + +/*! @typedef IOServiceInterestHandler + @param target Reference supplied when the notification was registered. + @param refCon Reference constant supplied when the notification was registered. + @param messageType Type of the message - IOKit defined in IOKit/IOMessage.h or family specific. + @param provider The IOService object who is delivering the notification. It is retained for the duration of the handler's invocation and doesn't need to be released by the handler. + @param messageArgument An argument for message, dependent on its type. + @param argSize Non zero if the argument represents a struct of that size, used when delivering messages outside the kernel. */ + +typedef IOReturn (*IOServiceInterestHandler)( void * target, void * refCon, + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ); + +typedef void (*IOServiceApplierFunction)(IOService * service, void * context); +typedef void (*OSObjectApplierFunction)(OSObject * object, void * context); + +class IOUserClient; +class IOPlatformExpert; + +/*! @class IOService : public IORegistryEntry + @abstract The base class for most families, devices and drivers. + @discussion The IOService base class defines APIs used to publish services, instantiate other services based on the existance of a providing service (ie. driver stacking), destroy a service and its dependent stack, notify interested parties of service state changes, and general utility functions useful across all families. + +Types of service are specified with a matching dictionary that describes properties of the service. For example, a matching dictionary might describe any IOUSBDevice (or subclass), an IOUSBDevice with a certain class code, or a IOPCIDevice with a set of OpenFirmware matching names or device & vendor IDs. Since the matching dictionary is interpreted by the family which created the service, as well as generically by IOService, the list of properties considered for matching depends on the familiy. + +Matching dictionaries are associated with IOService classes by the catalogue, as driver property tables, and also supplied by clients of the notification APIs. + +IOService provides matching based on c++ class (via OSMetaClass dynamic casting), registry entry name, a registry path to the service (which includes OpenFirmware paths), a name assigned by BSD, or by its location (its point of attachment). + +

Driver Instantiation by IOService

+ +Drivers are subclasses of IOService, and their availability is managed through the catalogue. They are instantiated based on the publication of an IOService they use (for example, an IOPCIDevice or IOUSBDevice), or when they are added to the catalogue and the IOService(s) they use are already available. + +When an IOService (the "provider") is published with the registerService() method, the matching and probing process begins, which is always single threaded per provider. A list of matching dictionaries from the catalog and installed publish notification requests, that successfully match the IOService, is constructed, with ordering supplied by kIOProbeScoreKey ("IOProbeScore") property in the dictionary, or supplied with the notification. + +Each entry in the list is then processed in order - for notifications, the notification is delivered, for driver property tables a lot more happens. + +The driver class is instantiated and init() called with its property table. The new driver instance is then attached to the provider, and has its probe() method called with the provider as an argument. The default probe method does nothing but return success, but a driver may implement this method to interrogate the provider to make sure it can work with it. It may also modify its probe score at this time. After probe, the driver is detached and the next in the list is considered (ie. attached, probed, and detached). + +When the probing phase is complete, the list consists of successfully probed drivers, in order of their probe score (after adjustment during the probe() call). The list is then divided into categories based on the kIOMatchCategoryKey property ("IOMatchCategory"); drivers without a match category are all considered in one default category. Match categories allow multiple clients of a provider to be attached and started, though the provider may also enforce open/close semantics to gain active access to it. + +For each category, the highest scoring driver in that category is attached to the provider, and its start() method called. If start() is successful, the rest of the drivers in the same match category are discarded, otherwise the next highest scoring driver is started, and so one. + +The driver should only consider itself in action when the start method is called, meaning it has been selected for use on the provider, and consuming that particular match category. It should also be prepared to be allocated, probed and freed even if the probe was sucessful. + +After the drivers have all synchronously been started, the installed "matched" notifications that match the registered IOService are delivered. + +

Properties used by IOService

+ + kIOClassKey, extern const OSSymbol * gIOClassKey, "IOClass" +
+Class of the driver to instantiate on matching providers. +
+
+ kIOProviderClassKey, extern const OSSymbol * gIOProviderClassKey, "IOProviderClass" +
+Class of the provider(s) to be considered for matching, checked with OSDynamicCast so subclasses will also match. +
+
+ kIOProbeScoreKey, extern const OSSymbol * gIOProbeScoreKey, "IOProbeScore" +
+The probe score initially used to order multiple matching drivers. +
+
+ kIOMatchCategoryKey, extern const OSSymbol * gIOMatchCategoryKey, "IOMatchCategory" +
+A string defining the driver category for matching purposes. All drivers with no IOMatchCategory property are considered to be in the same default category. Only one driver in a category can be started on each provider. +
+
+ kIONameMatchKey, extern const OSSymbol * gIONameMatchKey, "IONameMatch" +
+A string or collection of strings that match the provider's name. The comparison is implemented with the IORegistryEntry::compareNames method, which supports a single string, or any collection (OSArray, OSSet, OSDictionary etc.) of strings. IOService objects with OpenFirmware device tree properties (eg. IOPCIDevice) will also be matched based on that standard's "compatible", "name", "device_type" properties. The matching name will be left in the driver's property table in the kIONameMatchedKey property. +
+Examples +
+ <key>IONameMatch</key>
+ <string>pci106b,7<string> +
+For a list of possible matching names, a serialized array of strings should used, eg. +
+ <key>IONameMatch</key>
+ <array>
+ <string>APPL,happy16</string>
+ <string>pci106b,7</string>
+ </array> +
+
+ kIONameMatchedKey, extern const OSSymbol * gIONameMatchedKey, "IONameMatched" +
+The name successfully matched name from the kIONameMatchKey property will be left in the driver's property table as the kIONameMatchedKey property. +
+
+ kIOPropertyMatchKey, extern const OSSymbol * gIOPropertyMatchKey, "IOPropertyMatch" +
+A dictionary of properties that each must exist in the matching IOService and compare sucessfully with the isEqualTo method. + <key>IOPropertyMatch</key>
+ <dictionary>
+ <key>name</key>
+ <string>APPL,meek8</string>
+ </dictionary> +
+
+ kIOUserClientClassKey, extern const OSSymbol * gIOUserClientClassKey, "IOUserClientClass" +
+The class name that the service will attempt to allocate when a user client connection is requested. First the device nub is queried, then the nub's provider is queried by default. +
+
+ kIOKitDebugKey, extern const OSSymbol * gIOKitDebugKey, "IOKitDebug" +
+Set some debug flags for logging the driver loading process. Flags are defined in IOKit/IOKitDebug.h, but 65535 works well. + +*/ + +class IOService : public IORegistryEntry +{ + OSDeclareDefaultStructors(IOService) + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * reserved; + +private: + IOService * __provider; + SInt32 __providerGeneration; + IOService * __owner; + IOOptionBits __state[2]; + IOOptionBits __reserved[4]; + + // pointer to private instance variables for power management + IOPMpriv * priv; + +protected: + // TRUE once PMinit has been called + bool initialized; + // pointer to protected instance variables for power management + IOPMprot * pm_vars; + +private: + OSMetaClassDeclareReservedUnused(IOService, 0); + OSMetaClassDeclareReservedUnused(IOService, 1); + OSMetaClassDeclareReservedUnused(IOService, 2); + OSMetaClassDeclareReservedUnused(IOService, 3); + OSMetaClassDeclareReservedUnused(IOService, 4); + OSMetaClassDeclareReservedUnused(IOService, 5); + OSMetaClassDeclareReservedUnused(IOService, 6); + OSMetaClassDeclareReservedUnused(IOService, 7); + OSMetaClassDeclareReservedUnused(IOService, 8); + OSMetaClassDeclareReservedUnused(IOService, 9); + OSMetaClassDeclareReservedUnused(IOService, 10); + OSMetaClassDeclareReservedUnused(IOService, 11); + OSMetaClassDeclareReservedUnused(IOService, 12); + OSMetaClassDeclareReservedUnused(IOService, 13); + OSMetaClassDeclareReservedUnused(IOService, 14); + OSMetaClassDeclareReservedUnused(IOService, 15); + OSMetaClassDeclareReservedUnused(IOService, 16); + OSMetaClassDeclareReservedUnused(IOService, 17); + OSMetaClassDeclareReservedUnused(IOService, 18); + OSMetaClassDeclareReservedUnused(IOService, 19); + OSMetaClassDeclareReservedUnused(IOService, 20); + OSMetaClassDeclareReservedUnused(IOService, 21); + OSMetaClassDeclareReservedUnused(IOService, 22); + OSMetaClassDeclareReservedUnused(IOService, 23); + OSMetaClassDeclareReservedUnused(IOService, 24); + OSMetaClassDeclareReservedUnused(IOService, 25); + OSMetaClassDeclareReservedUnused(IOService, 26); + OSMetaClassDeclareReservedUnused(IOService, 27); + OSMetaClassDeclareReservedUnused(IOService, 28); + OSMetaClassDeclareReservedUnused(IOService, 29); + OSMetaClassDeclareReservedUnused(IOService, 30); + OSMetaClassDeclareReservedUnused(IOService, 31); + OSMetaClassDeclareReservedUnused(IOService, 32); + OSMetaClassDeclareReservedUnused(IOService, 33); + OSMetaClassDeclareReservedUnused(IOService, 34); + OSMetaClassDeclareReservedUnused(IOService, 35); + OSMetaClassDeclareReservedUnused(IOService, 36); + OSMetaClassDeclareReservedUnused(IOService, 37); + OSMetaClassDeclareReservedUnused(IOService, 38); + OSMetaClassDeclareReservedUnused(IOService, 39); + OSMetaClassDeclareReservedUnused(IOService, 40); + OSMetaClassDeclareReservedUnused(IOService, 41); + OSMetaClassDeclareReservedUnused(IOService, 42); + OSMetaClassDeclareReservedUnused(IOService, 43); + OSMetaClassDeclareReservedUnused(IOService, 44); + OSMetaClassDeclareReservedUnused(IOService, 45); + OSMetaClassDeclareReservedUnused(IOService, 46); + OSMetaClassDeclareReservedUnused(IOService, 47); + OSMetaClassDeclareReservedUnused(IOService, 48); + OSMetaClassDeclareReservedUnused(IOService, 49); + OSMetaClassDeclareReservedUnused(IOService, 50); + OSMetaClassDeclareReservedUnused(IOService, 51); + OSMetaClassDeclareReservedUnused(IOService, 52); + OSMetaClassDeclareReservedUnused(IOService, 53); + OSMetaClassDeclareReservedUnused(IOService, 54); + OSMetaClassDeclareReservedUnused(IOService, 55); + OSMetaClassDeclareReservedUnused(IOService, 56); + OSMetaClassDeclareReservedUnused(IOService, 57); + OSMetaClassDeclareReservedUnused(IOService, 58); + OSMetaClassDeclareReservedUnused(IOService, 59); + OSMetaClassDeclareReservedUnused(IOService, 60); + OSMetaClassDeclareReservedUnused(IOService, 61); + OSMetaClassDeclareReservedUnused(IOService, 62); + OSMetaClassDeclareReservedUnused(IOService, 63); + +public: +/*! @function getState + @abstract Accessor for IOService state bits, not normally needed or used outside IOService. + @result State bits for the IOService, eg. kIOServiceInactiveState, kIOServiceRegisteredState. */ + + virtual IOOptionBits getState( void ) const; + +/*! @function isInactive + @abstract Check the IOService has been terminated, and is in the process of being destroyed. + @discussion When an IOService is successfully terminated, it is immediately made inactive, which blocks further attach()es, matching or notifications occuring on the object. It remains inactive until the last client closes, and is then finalized and destroyed. + @result Returns true if the IOService has been terminated. */ + + inline bool isInactive( void ) const + { return( 0 != (kIOServiceInactiveState & getState())); } + + /* Stack creation */ + +/*! @function registerService + @abstract Start the registration process for a newly discovered IOService. + @discussion This function allows an IOService subclass to be published and made available to possible clients, by starting the registration process and delivering notifications to registered clients. The object should be completely setup and ready to field requests from clients before registerService is called. + @param options The default zero options mask is recommended & should be used in most cases. The registration process is usually asynchronous, with possible driver probing & notification occurring some time later. kIOServiceSynchronous may be passed to carry out the matching and notification process for currently registered clients before returning to the caller. */ + + virtual void registerService( IOOptionBits options = 0 ); + +/*! @function probe + @abstract During an IOService instantiation probe a matched service to see if it can be used. + @discussion The registration process for an IOService (the provider) includes instantiating possible driver clients. The probe method is called in the client instance to check the matched service can be used before the driver is considered to be started. Since matching screens many possible providers, in many cases the probe method can be left unimplemented by IOService subclasses. The client is already attached to the provider when probe is called. + @param provider The registered IOService which matches a driver personality's matching dictionary. + @param score Pointer to the current driver's probe score, which is used to order multiple matching drivers in the same match category. It defaults to the value of the IOProbeScore property in the drivers property table, or kIODefaultProbeScore if none is specified. The probe method may alter the score to affect start order. + @result Returns an IOService instance or zero when the probe is unsuccessful. In almost all cases the value of this is returned on success. If another IOService object is returned, the probed instance is detached and freed, and the returned instance is used in its stead for start. */ + + virtual IOService * probe( IOService * provider, + SInt32 * score ); + +/*! @function start + @abstract During an IOService instantiation, the start method is called when the IOService has been selected to run on the provider. + @discussion The registration process for an IOService (the provider) includes instantiating possible driver clients. The start method is called in the client instance when it has been selected (by its probe score and match category) to be the winning client. The client is already attached to the provider when start is called. + @result Return true if the start was successful, false otherwise (which will cause the instance to be detached and usually freed). */ + + virtual bool start( IOService * provider ); + +/*! @function stop + @abstract During an IOService termination, the stop method is called in its clients before they are detached & it is destroyed. + @discussion The termination process for an IOService (the provider) will call stop in each of its clients, after they have closed the provider if they had it open, or immediately on termination. */ + + virtual void stop( IOService * provider ); + + /* Open / Close */ + +/*! @function open + @abstract Request active access to a provider. + @discussion IOService provides generic open and close semantics to track clients of a provider that have established an active datapath. The use of open & close, and rules regarding ownership are family defined, and defined by the handleOpen / handleClose methods in the provider. Some families will limit access to a provider based on its open state. + @param forClient Designates the client of the provider requesting the open. + @param options Options for the open. The provider family may implement options for open; IOService defines only kIOServiceSeize to request the device be withdrawn from its current owner. + @result Return true if the open was successful, false otherwise. */ + + virtual bool open( IOService * forClient, + IOOptionBits options = 0, + void * arg = 0 ); + +/*! @function close + @abstract Release active access to a provider. + @discussion IOService provides generic open and close semantics to track clients of a provider that have established an active datapath. The use of open & close, and rules regarding ownership are family defined, and defined by the handleOpen / handleClose methods in the provider. + @param forClient Designates the client of the provider requesting the close. + @param options Options available for the close. The provider family may implement options for close; IOService defines none. + @param arg Family specific arguments, ignored by IOService. */ + + virtual void close( IOService * forClient, + IOOptionBits options = 0 ); + +/*! @function isOpen + @abstract Determine whether a specific, or any, client has an IOService open. + @discussion Returns the open state of an IOService with respect to the specified client, or when it is open by any client. + @param forClient If non-zero, isOpen returns the open state for that client. If zero is passed, isOpen returns the open state for all clients. + @result Returns true if the specific, or any, client has the IOService open. */ + + virtual bool isOpen( const IOService * forClient = 0 ) const; + +/*! @function handleOpen + @abstract Overrideable method to control the open / close behaviour of an IOService. + @discussion IOService calls this method in its subclasses in response to the open method, so the subclass may implement the request. The default implementation provides single owner access to an IOService via open. The object is locked via lockForArbitration before handleOpen is called. + @param forClient Designates the client of the provider requesting the open. + @param options Options for the open, may be interpreted by the implementor of handleOpen. + @result Return true if the open was successful, false otherwise. */ + + virtual bool handleOpen( IOService * forClient, + IOOptionBits options, + void * arg ); + +/*! @function handleClose + @abstract Overrideable method to control the open / close behaviour of an IOService. + @discussion IOService calls this method in its subclasses in response to the close method, so the subclass may implement the request. The default implementation provides single owner access to an IOService via open. The object is locked via lockForArbitration before handleClose is called. + @param forClient Designates the client of the provider requesting the close. + @param options Options for the close, may be interpreted by the implementor of handleOpen. */ + + virtual void handleClose( IOService * forClient, + IOOptionBits options ); + +/*! @function handleIsOpen + @abstract Overrideable method to control the open / close behaviour of an IOService. + @discussion IOService calls this method in its subclasses in response to the open method, so the subclass may implement the request. The default implementation provides single owner access to an IOService via open. The object is locked via lockForArbitration before handleIsOpen is called. + @param forClient If non-zero, isOpen returns the open state for that client. If zero is passed, isOpen returns the open state for all clients. + @result Returns true if the specific, or any, client has the IOService open. */ + + virtual bool handleIsOpen( const IOService * forClient ) const; + + /* Stacking change */ + +/*! @function terminate + @abstract Make an IOService inactive and begin its destruction. + @discussion Registering an IOService informs possible clients of its existance and instantiates drivers that may be used with it; terminate involves the opposite process of informing clients that an IOService is no longer able to be used and will be destroyed. By default, if any client has the service open, terminate fails. If the kIOServiceRequired flag is passed however, terminate will be sucessful though further progress in the destruction of the IOService will not proceed until the last client has closed it. The service will be made inactive immediately upon successful termination, and all its clients will be notified via their message method with a message of type kIOMessageServiceIsTerminated. Both these actions take place on the callers thread. After the IOService is made inactive, further matching or attach calls will fail on it. Each client has its stop method called upon their close of an inactive IOService, or on its termination if they do not have it open. After stop, detach is called in each client. When all clients have been detached, the finalize method is called in the inactive service. The terminate process is inherently asynchronous since it will be deferred until all clients have chosen to close. + @param options In most cases no options are needed. kIOServiceSynchronous may be passed to cause terminate to not return until the service is finalized. */ + + virtual bool terminate( IOOptionBits options = 0 ); + +/*! @function finalize + @abstract The last stage in an IOService destruction. + @discussion The finalize method is called in an inactive (ie. terminated) IOService after the last client has detached. IOService's implementation will call stop, close, and detach on each provider. When finalize returns, the object's retain count will have no references generated by IOService's registration process. + @param options The options passed to the terminate method of the IOService are passed on to finalize. + @result Returns true. */ + + virtual bool finalize( IOOptionBits options ); + +/*! @function lockForArbitration + @abstract Locks an IOService against changes in state or ownership. + @discussion The registration, termination and open / close functions of IOService use lockForArbtration to single thread access to an IOService. lockForArbitration will grant recursive access to the same thread. + @param isSuccessRequired If a request for access to an IOService should be denied if it is terminated, isSuccessRequired should passed as false, otherwise pass true. */ + + virtual bool lockForArbitration( bool isSuccessRequired = true ); + +/*! @function unlockForArbitration + @abstract Unlocks an IOService after a successful lockForArbitration. + @discussion A thread granted exclusive access to an IOService should release it with unlockForArbitration. */ + + virtual void unlockForArbitration( void ); + +/*! @function terminateClient + @abstract Passes a termination up the stack. + @discussion When an IOService is made inactive the default behaviour is to also make any of its clients that have it as their only provider also inactive, in this way recursing the termination up the driver stack. This method allows a terminated IOService to override this behaviour. Note the client may also override this behaviour by overriding its terminate method. + @param client The client of the of the terminated provider. + @param options Options originally passed to terminate, plus kIOServiceRecursing. + @result result of the terminate request on the client. */ + + virtual bool terminateClient( IOService * client, IOOptionBits options ); + + /* Busy state indicates discovery, matching or termination is in progress */ + +/*! @function getBusyState + @abstract Returns the busyState of an IOService. + @discussion Many activities in IOService are asynchronous. When registration, matching, or termination is in progress on an IOService, its busyState is increased by one. Change in busyState to or from zero also changes the IOService's provider's busyState by one, which means that an IOService is marked busy when any of the above activities is ocurring on it or any of its clients. + @result The busyState. */ + + virtual UInt32 getBusyState( void ); + +/*! @function adjustBusy + @abstract Adjusts the busyState of an IOService. + @discussion Applies a delta to an IOService's busyState. A change in the busyState to or from zero will changes the IOService's provider's busyState by one (in the same direction). + @param delta The delta to be applied to the IOService busy state. */ + + virtual void adjustBusy( SInt32 delta ); + +/*! @function waitQuiet + @abstract Wait for an IOService's busyState to be zero. + @discussion Blocks the caller until an IOService is non busy. + @param timeout Specifies a maximum time to wait. + @result Returns an error code if mach synchronization primitives fail, kIOReturnTimeout, or kIOReturnSuccess. */ + + virtual IOReturn waitQuiet( mach_timespec_t * timeout = 0 ); + + /* Matching */ + +/*! @function matchPropertyTable + @abstract Allows a registered IOService to implement family specific matching. + @discussion All matching on an IOService will call this method to allow a family writer to implement matching in addition to the generic methods provided by IOService. The implementer should examine the matching dictionary passed to see if it contains properties the family understands for matching, and use them to match with the IOService if so. Note that since matching is also carried out by other parts of IOKit, the matching dictionary may contain properties the family does not understand - these should not be considered matching failures. + @param table The dictionary of properties to be matched against. + @param score Pointer to the current driver's probe score, which is used to order multiple matching drivers in the same match category. It defaults to the value of the IOProbeScore property in the drivers property table, or kIODefaultProbeScore if none is specified. + @result Returns false if the family considers the matching dictionary does not match in properties it understands, true otherwise. */ + + virtual bool matchPropertyTable( OSDictionary * table, + SInt32 * score ); + + virtual bool matchPropertyTable( OSDictionary * table ); + +/*! @function matchLocation + @abstract Allows a registered IOService to direct location matching. + @discussion By default, a location matching property will be applied to an IOService's provider. This method allows that behaviour to be overridden by families. + @param client The IOService at which matching is taking place. + @result Returns the IOService instance to be used for location matching. */ + + virtual IOService * matchLocation( IOService * client ); + + /* Resource service */ + +/*! @function publishResource + @abstract Use the resource service to publish a property. + @discussion The resource service uses IOService's matching and notification to allow objects to be published and found by any IOKit client by a global name. publishResource makes an object available to anyone waiting for it or looking for it in the future. + @param key An OSSymbol key that globally identifies the object. + @param The object to be published. */ + + static void publishResource( const OSSymbol * key, OSObject * value = 0 ); + +/*! @function publishResource + @abstract Use the resource service to publish a property. + @discussion The resource service uses IOService's matching and notification to allow objects to be published and found by any IOKit client by a global name. publishResource makes an object available to anyone waiting for it or looking for it in the future. + @param key A C-string key that globally identifies the object. + @param The object to be published. */ + + static void publishResource( const char * key, OSObject * value = 0 ); + virtual bool addNeededResource( const char * key ); + + /* Notifications */ + +/*! @function addNotification + @abstract Add a persistant notification handler to be notified of IOService events. + @discussion IOService will deliver notifications of changes in state of an IOService to registered clients. The type of notification is specified by a symbol, for example gIOMatchedNotification or gIOTerminatedNotification, and notifications will only include IOService's that match the supplied matching dictionary. Notifications are ordered by a priority set with addNotification. When the notification is installed, its handler will be called with each of any currently existing IOService's that are in the correct state (eg. registered) and match the supplied matching dictionary, avoiding races between finding preexisting and new IOService events. The notification request is identified by an instance of an IONotifier object, through which it can be enabled, disabled or removed. addNotification will consume a retain count on the matching dictionary when the notification is removed. + @param type An OSSymbol identifying the type of notification and IOService state: +
gIOPublishNotification Delivered when an IOService is registered. +
gIOFirstPublishNotification Delivered when an IOService is registered, but only once per IOService instance. Some IOService's may be reregistered when their state is changed. +
gIOMatchedNotification Delivered when an IOService has been matched with all client drivers, and they have been probed and started. +
gIOFirstMatchNotification Delivered when an IOService has been matched with all client drivers, but only once per IOService instance. Some IOService's may be reregistered when their state is changed. +
gIOTerminatedNotification Delivered after an IOService has been terminated, during its finalize stage. + @param matching A matching dictionary to restrict notifications to only matching IOServices. The dictionary will be released when the notification is removed - consuming the passed in reference. + @param handler A C-function callback to deliver notifications. + @param target An instance reference for the callbacks use. + @param ref A reference constant for the callbacks use + @param priority A constant ordering all notifications of a each type. + @result Returns an instance of an IONotifier object that can be used to control or destroy the notification request. */ + + static IONotifier * addNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref = 0, + SInt32 priority = 0 ); + +/*! @function waitForService + @abstract Wait for a matching to service to be published. + @discussion Provides a method of waiting for an IOService matching the supplied matching dictionary to be registered and fully matched. + @param matching The matching dictionary describing the desired IOService. waitForService will consume one reference of the matching dictionary. + @param timeout The maximum time to wait. + @result A published IOService matching the supplied dictionary. */ + + static IOService * waitForService( OSDictionary * matching, + mach_timespec_t * timeout = 0); + +/*! @function getMatchingServices + @abstract Finds the set of current published IOServices matching a matching dictionary. + @discussion Provides a method of finding the current set of published IOServices matching the supplied matching dictionary. + @param matching The matching dictionary describing the desired IOServices. + @result An instance of an iterator over a set of IOServices. To be released by the caller. */ + + static OSIterator * getMatchingServices( OSDictionary * matching ); + +/*! @function installNotification + @abstract Add a persistant notification handler to be notified of IOService events. + @discussion A lower level interface to addNotification that will install a handler and return the current set of IOServices that are in the specified state and match the matching dictionary. + @param type See addNotification. + @param matching See addNotification. + @param handler See addNotification. + @param self See addNotification. + @param ref See addNotification. + @param priority See addNotification. + @param existing Returns an iterator over the set of IOServices that are currently in the specified state and match the matching dictionary. + @result See addNotification. */ + + static IONotifier * installNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ); + + /* Helpers to make matching dictionaries for simple cases, + * they add keys to an existing dictionary, or create one. */ + +/*! @function serviceMatching + @abstract Create a matching dictionary, or add matching properties to an existing dictionary, that specify an IOService class match. + @discussion A very common matching criteria for IOService is based on its class. serviceMatching will create a matching dictionary that specifies any IOService of a class, or its subclasses. The class is specified by name, and an existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + @param className The class name, as a const C-string. Class matching is successful on IOService's of this class or any subclass. + @param table If zero, serviceMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. + @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + + static OSDictionary * serviceMatching( const char * className, + OSDictionary * table = 0 ); + +/*! @function serviceMatching + @abstract Create a matching dictionary, or add matching properties to an existing dictionary, that specify an IOService class match. + @discussion A very common matching criteria for IOService is based on its class. serviceMatching will create a matching dictionary that specifies any IOService of a class, or its subclasses. The class is specified by name, and an existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + @param className The class name, as an OSString (which includes OSSymbol). Class matching is successful on IOService's of this class or any subclass. + @param table If zero, serviceMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. + @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + + static OSDictionary * serviceMatching( const OSString * className, + OSDictionary * table = 0 ); + +/*! @function nameMatching + @abstract Create a matching dictionary, or add matching properties to an existing dictionary, that specify an IOService name match. + @discussion A very common matching criteria for IOService is based on its name. nameMatching will create a matching dictionary that specifies any IOService which respond sucessfully to the IORegistryEntry method compareName. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + @param name The service's name, as a const C-string. Name matching is successful on IOService's which respond sucessfully to the IORegistryEntry method compareName. + @param table If zero, nameMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. + @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + + static OSDictionary * nameMatching( const char * name, + OSDictionary * table = 0 ); + +/*! @function nameMatching + @abstract Create a matching dictionary, or add matching properties to an existing dictionary, that specify an IOService name match. + @discussion A very common matching criteria for IOService is based on its name. nameMatching will create a matching dictionary that specifies any IOService which respond sucessfully to the IORegistryEntry method compareName. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + @param name The service's name, as an OSString (which includes OSSymbol). Name matching is successful on IOService's which respond sucessfully to the IORegistryEntry method compareName. + @param table If zero, nameMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. + @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + + static OSDictionary * nameMatching( const OSString* name, + OSDictionary * table = 0 ); + +/*! @function resourceMatching + @abstract Create a matching dictionary, or add matching properties to an existing dictionary, that specify a resource service match. + @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in IOKit based on a name, using the standard IOService matching and notification calls. + @param name The resource name, as a const C-string. Resource matching is successful when an object by that name has been published with the publishResource method. + @param table If zero, resourceMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. + @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + + static OSDictionary * resourceMatching( const char * name, + OSDictionary * table = 0 ); + +/*! @function resourceMatching + @abstract Create a matching dictionary, or add matching properties to an existing dictionary, that specify a resource service match. + @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in IOKit based on a name, using the standard IOService matching and notification calls. + @param name The resource name, as an OSString (which includes OSSymbol). Resource matching is successful when an object by that name has been published with the publishResource method. + @param table If zero, resourceMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. + @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + + static OSDictionary * resourceMatching( const OSString * name, + OSDictionary * table = 0 ); + +/*! @function addLocation + @abstract Add a location matching property to an existing dictionary. + @discussion This function creates matching properties that specify the location of a IOService, as an embedded matching dictionary. This matching will be successful on an IOService which attached to an IOService which matches this location matching dictionary. + @param table The matching properties are added to the specified dictionary, which must be non-zero. + @result The location matching dictionary created is returned on success, or zero on failure. */ + + static OSDictionary * addLocation( OSDictionary * table ); + + /* Helpers for matching dictionaries. */ + +/*! @function compareProperty + @abstract Utility to compare a property in a matching dictionary with an IOService's property table. + @discussion This is a helper function to aid in implementing matchPropertyTable. If the property specified by key exists in the matching dictionary, it is compared with a property of the same name in the IOService's property table. The comparison is performed with the isEqualTo method. If the property does not exist in the matching table, success is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. + @param matching The matching dictionary, which must be non-zero. + @param key The dictionary key specifying the property to be compared, as a C-string. + @result If the property does not exist in the matching table, true is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. Otherwise the result of calling the property from the matching dictionary's isEqualTo method with the IOService property as an argument is returned. */ + + virtual bool compareProperty( OSDictionary * matching, + const char * key ); +/*! @function compareProperty + @abstract Utility to compare a property in a matching dictionary with an IOService's property table. + @discussion This is a helper function to aid in implementing matchPropertyTable. If the property specified by key exists in the matching dictionary, it is compared with a property of the same name in the IOService's property table. The comparison is performed with the isEqualTo method. If the property does not exist in the matching table, success is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. + @param matching The matching dictionary, which must be non-zero. + @param key The dictionary key specifying the property to be compared, as an OSString (which includes OSSymbol). + @result If the property does not exist in the matching table, true is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. Otherwise the result of calling the property from the matching dictionary's isEqualTo method with the IOService property as an argument is returned. */ + + virtual bool compareProperty( OSDictionary * matching, + const OSString * key ); + +/*! @function compareProperties + @abstract Utility to compare a set of properties in a matching dictionary with an IOService's property table. + @discussion This is a helper function to aid in implementing matchPropertyTable. A collection of dictionary keys specifies properties in a matching dictionary to be compared, with compareProperty, with an IOService property table, if compareProperty returns true for each key, success is return else failure. + @param matching The matching dictionary, which must be non-zero. + @param keys A collection (eg. OSSet, OSArray, OSDictionary) which should contain OSStrings (or OSSymbols) that specify the property keys to be compared. + @result if compareProperty returns true for each key in the collection, success is return else failure. */ + + virtual bool compareProperties( OSDictionary * matching, + OSCollection * keys ); + + /* Client / provider accessors */ + +/*! @function attach + @abstract Attaches an IOService client to a provider in the registry. + @discussion This function called in an IOService client enters the client into the registry as a child of the provider in the service plane. The provider must be active or the attach will fail. Multiple attach calls to the same provider are no-ops and return success. A client may be attached to multiple providers. Entering an object into the registry will retain both the client and provider until they are detached. + @param provider The IOService object which will serve as this objects provider. + @result false if the provider is inactive or on a resource failure, otherwise true. */ + + virtual bool attach( IOService * provider ); + +/*! @function detach + @abstract Detaches an IOService client from a provider in the registry. + @discussion This function called in an IOService client removes the client as a child of the provider in the service plane of the registry. If the provider is not a parent of the client this is a no-op, otherwise the registry will release both the client and provider. + @param provider The IOService object to detach from. */ + + virtual void detach( IOService * provider ); + +/*! @function getProvider + @abstract Returns an IOService's primary provider. + @discussion This function called in an IOService client will return the provider to which it was first attached. Since the majority of IOService objects have only one provider, this is a useful simplification and also supports caching of the provider when the registry is unchanged. + @result Returns the first provider of the client, or zero if the IOService is not attached into the registry. The provider is retained while the client is attached, and should not be released by the caller. */ + + virtual IOService * getProvider( void ) const; + +/*! @function getWorkLoop + @abstract Returns the current work loop or provider->getWorkLoop(). + @discussion This function returns a valid work loop that a client can use to add an IOCommandGate to. The intention is that an IOService client has data that needs to be protected but doesn't want to pay the cost of an entire dedicated thread. This data has to be accessed from a providers call out context as well. So to achieve both of these goals the client creates an IOCommandGate to lock access to his data but he registers it with the providers work loop, i.e. the work loop which will make the completion call outs. In one fell swoop we avoid a potentially nasty deadlock 'cause a work loop's gate is recursive. + @result Always returns a work loop, either the current work loop or it walks up the $link getProvider() chain calling getWorkLoop. Eventually it will reach a valid work loop based driver or the root of the io tree where it will return a system wide work loop. Returns 0 if it fails to find (or create) */ + + virtual IOWorkLoop * getWorkLoop() const; + +/*! @function getProviderIterator + @abstract Returns an iterator over an IOService's providers. + @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers. + @result Returns an iterator over the providers of the client, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + + virtual OSIterator * getProviderIterator( void ) const; + +/*! @function getOpenProviderIterator + @abstract Returns an iterator over an client's providers that are currently opened by the client. + @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers, locking each in turn with lockForArbitration and returning those that have been opened by the client. + @result Returns an iterator over the providers the client has open, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ + + virtual OSIterator * getOpenProviderIterator( void ) const; + +/*! @function getClient + @abstract Returns an IOService's primary client. + @discussion This function called in an IOService provider will return the first client to attach to it. For IOService objects which have only only one client, this may be a useful simplification. + @result Returns the first client of the provider, or zero if the IOService is not attached into the registry. The client is retained while it is attached, and should not be released by the caller. */ + + virtual IOService * getClient( void ) const; + +/*! @function getClientIterator + @abstract Returns an iterator over an IOService's clients. + @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients. + @result Returns an iterator over the clients of the provider, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + + virtual OSIterator * getClientIterator( void ) const; + +/*! @function getOpenClientIterator + @abstract Returns an iterator over an provider's clients that currently have opened the provider. + @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients, locking each in turn with lockForArbitration and returning those that have opened the provider. + @result Returns an iterator over the clients which the provider open, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ + + virtual OSIterator * getOpenClientIterator( void ) const; + +/*! @function callPlatformFunction + @abstract Calls the platform function with the given name. + @discussion The platform expert or other drivers may implement various functions to control hardware features. callPlatformFunction allows any IOService object to access these functions. Normally callPlatformFunction will be called on a service's provider. The provider will service the request or pass it to it's provider. The systems IOPlatformExpert subclass will catch functions it knows about and redirect them into other parts of the IOService plane. If the IOPlatformExpert subclass can not execute the function, the base class will be called. The IOPlatformExpert base class will attempt to find a service to execute the function by looking up the function name in a IOResources name space. A service may publish a service using publishResource(functionName, this). If no service can be found to execute the function an error will be returned. + @param functionName name of the function to be called. When functionName is a c-string, callPlatformFunction will convert the c-string to a OSSymbol and call other OSSymbol version of callPlatformFunction. This process can block and should not be used from an interrupt context. + @param waitForFunction if true callPlatformFunction will not return until the function has been called. + @result Return an IOReturn code, kIOReturnSuccess if the function was successfully executed, kIOReturnUnsupported if a service to execute the function could not be found. Other return codes may be returned by the function.*/ + + virtual IOReturn callPlatformFunction( const OSSymbol * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ); + + virtual IOReturn callPlatformFunction( const char * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ); + + + /* Some accessors */ + +/*! @function getPlatform + @abstract Returns a pointer to the platform expert instance for the machine. + @discussion This method provides an accessor to the platform expert instance for the machine. + @result A pointer to the IOPlatformExport instance. It should not be released by the caller. */ + + static IOPlatformExpert * getPlatform( void ); + +/*! @function getServiceRoot + @abstract Returns a pointer to the root of the service plane. + @discussion This method provides an accessor to the root of the service plane for the machine. + @result A pointer to the IOService instance at the root of the service plane. It should not be released by the caller. */ + + static IOService * getServiceRoot( void ); + + /* Allocate resources for a matched service */ + +/*! @function getResources + @abstract Allocate any needed resources for a published IOService before clients attach. + @discussion This method is called during the registration process for an IOService object if there are success driver matches, before any clients attach. It allows for lazy allocation of resources to an IOService when a matching driver is found. + @result Return an IOReturn code, kIOReturnSuccess is necessary for the IOService to be successfully used, otherwise the registration process for the object is halted. */ + + virtual IOReturn getResources( void ); + + /* Device memory accessors */ + +/*! @function getDeviceMemoryCount + @abstract Returns a count of the physical memory ranges available for a device. + @discussion This method will return the count of physical memory ranges, each represented by an IODeviceMemory instance, that have been allocated for a memory mapped device. + @result An integer count of the number of ranges available. */ + + virtual IOItemCount getDeviceMemoryCount( void ); + +/*! @function getDeviceMemoryWithIndex + @abstract Returns an instance of IODeviceMemory representing one of a device's memory mapped ranges. + @discussion This method will return a pointer to an instance of IODeviceMemory for the physical memory range at the given index for a memory mapped device. + @param index An index into the array of ranges assigned to the device. + @result A pointer to an instance of IODeviceMemory, or zero if the index is beyond the count available. The IODeviceMemory is retained by the provider, so is valid while attached, or while any mappings to it exist. It should not be released by the caller. See also mapDeviceMemory() which will create a device memory mapping. */ + + virtual IODeviceMemory * getDeviceMemoryWithIndex( unsigned int index ); + +/*! @function mapDeviceMemoryWithIndex + @abstract Maps a physical range of a device. + @discussion This method will create a mapping for the IODeviceMemory at the given index, with IODeviceMemory::map(options). The mapping is represented by the returned instance of IOMemoryMap, which should not be released until the mapping is no longer required. + @param index An index into the array of ranges assigned to the device. + @result An instance of IOMemoryMap, or zero if the index is beyond the count available. The mapping should be released only when access to it is no longer required. */ + + virtual IOMemoryMap * mapDeviceMemoryWithIndex( unsigned int index, + IOOptionBits options = 0 ); + +/*! @function getDeviceMemory + @abstract Returns the array of IODeviceMemory objects representing a device's memory mapped ranges. + @discussion This method will return an array of IODeviceMemory objects representing the physical memory ranges allocated to a memory mapped device. + @result An OSArray of IODeviceMemory objects, or zero if none are available. The array is retained by the provider, so is valid while attached. */ + + virtual OSArray * getDeviceMemory( void ); + +/*! @function setDeviceMemory + @abstract Sets the array of IODeviceMemory objects representing a device's memory mapped ranges. + @discussion This method will set an array of IODeviceMemory objects representing the physical memory ranges allocated to a memory mapped device. + @param array An OSArray of IODeviceMemory objects, or zero if none are available. The array will be retained by the object. */ + + virtual void setDeviceMemory( OSArray * array ); + + /* Interrupt accessors */ + +/*! @function registerInterrupt + @abstract Register a C-function interrupt handler for a device supplying interrupts. + @discussion This method will install a C-function interrupt handler to be called at primary interrupt time for a device's interrupt. Only one handler may be installed per interrupt source. IOInterruptEventSource provides an IOWorkLoop based abstraction for interrupt delivery that may be more appropriate for work loop based drivers. + @param source The index of the interrupt source in the device. + @param target An object instance to be passed to the interrupt handler. + @param handler The C-function to be to be called at primary interrupt time when the interrupt occurs. The handler should process the interrupt by clearing the interrupt, or by disabling the source. + @param refCon A reference constant for the handler's use. + @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid.
kIOReturnNoResources is returned if the interrupt already has an installed handler. */ + + virtual IOReturn registerInterrupt(int source, OSObject *target, + IOInterruptAction handler, + void *refCon = 0); + +/*! @function unregisterInterrupt + @abstract Remove a C-function interrupt handler for a device supplying hardware interrupts. + @discussion This method will remove a C-function interrupt handler previously installed with registerInterrupt. + @param source The index of the interrupt source in the device. + @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid. */ + + virtual IOReturn unregisterInterrupt(int source); + +/*! @function getInterruptType + @abstract Return the type of interrupt used for a device supplying hardware interrupts. + @discussion This method will return the type of interrupt used by the device. + @param source The index of the interrupt source in the device. + @param interruptType The interrupt type for the interrupt source will be stored here by getInterruptType.
kIOInterruptTypeEdge will be returned for edge trigggered sources.
kIOInterruptTypeLevel will be returned for level trigggered sources. + @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid. */ + + virtual IOReturn getInterruptType(int source, int *interruptType); + +/*! @function enableInterrupt + @abstract Enable a device interrupt. + @discussion Enable a device interrupt. It is the callers responsiblity to keep track of the enable state of the interrupt source. + @param source The index of the interrupt source in the device. + @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid. */ + + virtual IOReturn enableInterrupt(int source); + +/*! @function disableInterrupt + @abstract Disable a device interrupt. + @discussion Disable a device interrupt. It is the callers responsiblity to keep track of the enable state of the interrupt source. + @param source The index of the interrupt source in the device. + @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid. */ + + virtual IOReturn disableInterrupt(int source); + +/*! @function causeInterrupt + @abstract Cause a device interrupt to occur. + @discussion Emulate a hardware interrupt, to be called from task level. + @param source The index of the interrupt source in the device. + @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid. */ + + virtual IOReturn causeInterrupt(int source); + +/*! @function requestProbe + @abstract An external request that hardware be re-scanned for devices. + @discussion For bus families that do not usually detect device addition or removal, this method represents an external request (eg. from a utility application) to rescan and publish or remove found devices. + @param options Family defined options, not interpreted by IOService. + @result An IOReturn code. */ + + virtual IOReturn requestProbe( IOOptionBits options ); + + /* Generic API for non-data-path upstream calls */ + +/*! @function message + @abstract Receive a generic message delivered from an attached provider. + @discussion A provider may deliver messages via the message method to its clients informing them of state changes, for example kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by IOKit in IOMessage.h while others may family dependent. This method is implemented in the client to receive messages. + @param type A type defined in IOMessage.h or defined by the provider family. + @param provider The provider from which the message originates. + @param argument An argument defined by the provider family, not used by IOService. + @result An IOReturn code defined by the message type. */ + + virtual IOReturn message( UInt32 type, IOService * provider, + void * argument = 0 ); + +/*! @function messageClient + @abstract Send a generic message to an attached client. + @discussion A provider may deliver messages via the message method to its clients informing them of state changes, for example kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by IOKit in IOMessage.h while others may family dependent. This method may be called in the provider to send a message to the specified client, which may be useful for overrides. + @param type A type defined in IOMessage.h or defined by the provider family. + @param client A client of the IOService to send the message. + @param argument An argument defined by the provider family, not used by IOService. + @result The return code from the client message call. */ + + virtual IOReturn messageClient( UInt32 messageType, OSObject * client, + void * messageArgument = 0, vm_size_t argSize = 0 ); + +/*! @function messageClients + @abstract Send a generic message to all attached clients. + @discussion A provider may deliver messages via the message method to its clients informing them of state changes, for example kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by IOKit in IOMessage.h while others may family dependent. This method may be called in the provider to send a message to all the attached clients, via the messageClient method. + @param type A type defined in IOMessage.h or defined by the provider family. + @param argument An argument defined by the provider family, not used by IOService. + @result Any non-kIOReturnSuccess return codes returned by the clients, or kIOReturnSuccess if all return kIOReturnSuccess. */ + + virtual IOReturn messageClients( UInt32 type, + void * argument = 0, vm_size_t argSize = 0 ); + + virtual IONotifier * registerInterest( const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, + void * target, void * ref = 0 ); + + virtual void applyToProviders( IOServiceApplierFunction applier, + void * context ); + + virtual void applyToClients( IOServiceApplierFunction applier, + void * context ); + + virtual void applyToInterested( const OSSymbol * typeOfInterest, + OSObjectApplierFunction applier, + void * context ); + + virtual IOReturn acknowledgeNotification( IONotificationRef notification, + IOOptionBits response ); + + /* User client create */ + +/*! @function newUserClient + @abstract A request to create a connection for a non kernel client. + @discussion A non kernel client may request a connection be opened via the IOServiceOpen() library function, which will call this method in an IOService. The rules & capabilities of user level clients are family dependent, and use the functions of the IOUserClient class for support. IOService's implementation returns kIOReturnUnsupported, so any family supporting user clients must implement this method. + @param owningTask The mach task requesting the connection. + @param securityID A token representing the access level for the task. + @param type A constant specifying the type of connection to be created, specified by the caller of IOServiceOpen and interpreted only by the family. + @param handler An instance of an IOUserClient object to represent the connection, which will be released when the connection is closed, or zero if the connection was not opened. + @param properties A dictionary of additional properties for the connection. + @result A return code to be passed back to the caller of IOServiceOpen. */ + + virtual IOReturn newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler ); + + virtual IOReturn newUserClient( task_t owningTask, void * securityID, + UInt32 type, IOUserClient ** handler ); + + /* Return code utilities */ + +/*! @function stringFromReturn + @abstract A utility to supply a programmer friendly string from an IOReturn code. + @discussion Strings are available for the standard return codes in IOReturn.h in IOService, while subclasses may implement this method to interpret family dependent return codes. + @param rtn The IOReturn code. + @result A pointer to a constant string, or zero if the return code is unknown. */ + + virtual const char * stringFromReturn( IOReturn rtn ); + +/*! @function errnoFromReturn + @abstract A utility to translate an IOReturn code to a BSD errno. + @discussion BSD defines its own return codes for its functions in sys/errno.h, and IOKit families may need to supply compliant results in BSD shims. Results are available for the standard return codes in IOReturn.h in IOService, while subclasses may implement this method to interpret family dependent return codes. + @param rtn The IOReturn code. + @result The BSD errno or EIO if unknown. */ + + virtual int errnoFromReturn( IOReturn rtn ); + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + /* * * * * * * * * * * * Internals * * * * * * * * * * * */ + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +public: + int _numInterruptSources; + IOInterruptSource *_interruptSources; + + static void initialize( void ); + + virtual bool serializeProperties( OSSerialize * s ) const; + + static void setPlatform( IOPlatformExpert * platform); + + static IOReturn catalogNewDrivers( OSOrderedSet * newTables ); + + static IOService * resources( void ); + virtual bool checkResources( void ); + virtual bool checkResource( OSObject * matching ); + + virtual void probeCandidates( OSOrderedSet * matches ); + virtual bool startCandidate( IOService * candidate ); + virtual IOService * getClientWithCategory( const OSSymbol * category ); + + virtual bool passiveMatch( OSDictionary * matching, bool changesOK = false); + + virtual void startMatching( IOOptionBits options = 0 ); + virtual void doServiceMatch( IOOptionBits options ); + virtual void doServiceTerminate( IOOptionBits options ); + + static OSObject * getExistingServices( OSDictionary * matching, + IOOptionBits inState, IOOptionBits options = 0 ); + + static IONotifier * setNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref, + SInt32 priority = 0 ); + + static IONotifier * doInstallNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ); + + static bool syncNotificationHandler( void * target, void * ref, + IOService * newService ); + + virtual void deliverNotification( const OSSymbol * type, + IOOptionBits orNewState, IOOptionBits andNewState ); + + bool invokeNotifer( class _IOServiceNotifier * notify ); + + virtual void unregisterAllInterest( void ); + + virtual IOReturn waitForState( UInt32 mask, UInt32 value, + mach_timespec_t * timeout = 0 ); + + virtual IOReturn resolveInterrupt(IOService *nub, int source); + virtual IOReturn lookupInterrupt(int source, bool resolve, IOInterruptController **interruptController); + + /* power management */ + +/*! @function PMinit + A power managment policy-maker for a device calls itself here to initialize its power management. + PMinit allocates and initializes the power management instance variables, and it should be called before any + access to those variables or the power management methods. */ + virtual void PMinit (void ); + +/*! @function PMstop + A power managment policy-maker for a device calls itself here when it resigns its responsibilities as + policy-maker. This typically happens when it is handing off the responsibility to another policy-maker, + or when the device is removed from the system. The power managment variables don't exist after + this call, and the power managment methods in the caller shouldn't be accessed. */ + virtual void PMstop ( void ); + +/*! @function joinPMtree + A policy-maker calls its nub here when initializing, to be attached into + the power management hierarchy. The default function is to call the + platform expert, which knows how to do it. This method is overridden + by a nub subclass which may either know how to do it, or may need + to take other action. + + This may be the only "power management" method used in a nub, meaning + it may be called even if the nub is not initialized for power management. + + Before the nub returns from this method, the caller will probably be called + at "setPowerParent" and "setAggressiveness" and possibly at "addPowerChild" as it is + added to me hierarchy. */ + virtual void joinPMtree ( IOService * driver ); + +/*! @function registerPowerDriver + A driver calls a policy-maker here to volunteer to control power to the device. + If the policy-maker accepts the volunteer, it adds the volunteer to its list of + interested drivers, and it will call the volunteer at appropriate times to switch + the power state of the device. + @param controllingDriver + This points to the calling driver. + @param powerStates + This is an array of power states which the driver can deal with. If this array + is no less rich than one supplied by an earlier volunteer, then the policy-maker + uses the calling driver as its power-controlling driver. + @param numberOfStates + The number of power states in the array. Power states are defined in + pwr_mgt/IOPMpowerState.h. + @result + IOPMNoErr is returned. There are various error conditions possible which prevent + the policy-maker from accepting the new power state array. These conditions + are logged in the power managment event log, but not returned to the caller. */ + virtual IOReturn registerPowerDriver ( IOService* controllingDriver, IOPMPowerState* powerStates, unsigned long numberOfStates ); + +/*! @function registerInterestedDriver + Some IOService calls a policy-maker here to register interest in the changing + power state of its device. + @param theDriver + The policy-maker adds this pointer to the calling IOService to its list of + interested drivers. It informs drivers on this list pre- and post-power change. + @result + The policy-maker returns flags describing the capability of the device in its + current power state. The policy-maker does not interpret these flags or + understand them; they come from the power state array, and are understood + only by interested drivers and perhaps the power-controlling driver. If the + current power state is not yet defined, zero is returned. This is the case when + the policy-maker is not yet in the power domain hierarchy or when it doesn't + have a power-controlling driver yet. */ + virtual IOPMPowerFlags registerInterestedDriver ( IOService* theDriver ); + +/*! @function deRegisterInterestedDriver + An IOService which has previously registered with a policy-maker as an interested + driver calls the policy-maker here to withdraw its interest. The policy-maker removes + it from its list of interested drivers. + @result + These bits describe the capability of the device in its current power state. They are + not understood by the policy-maker; they come from the capabilityFlags field of the + current power state in the power state array. */ + virtual IOReturn deRegisterInterestedDriver ( IOService * theDriver ); + +/*! @function acknowledgePowerChange + When a device is changing power state, its policy-maker informs interested + parties before and after the change. Interested parties are those which + have registered as interested drivers and also children of the policy-maker + in the case that it is a power domain. + When an object is so informed, it can return an indication that it is prepared + for the change, or it can return an indication that it needs some time to + prepare. In this case it will call this method in the policy-maker when it has + prepared. + @param theDriver + This points to the calling driver. The policy-maker uses it to know if all + interested parties have acknowledged the power state change. + @result + IOPMNoErr is returned. */ + virtual IOReturn acknowledgePowerChange ( IOService * whichDriver ); + +/*! @function acknowledgeSetPowerState + When a policy-maker instructs its controlling driver to switch the state of + the device, the driver can return an indication that the change is complete, + or it can return an indication that it needs some time to make the change. + In this case it will call this method in the policy-maker when it has made the + power state change. + @result + IOPMNoErr is returned. */ + virtual IOReturn acknowledgeSetPowerState ( void ); + +/*! @function powerDomainWillChangeTo + When a power domain changes state, it notifies its children, which + are policy-makers, by calling them at this method. It calls here + before it makes the change, and a called policy-maker can return + IOPMAckImplied to indicate that it is prepared for the change, + or it can return a non-zero number to indicate that it is not prepared + but will prepare and then call the parent at acknowledgePowerChange. + + To prepare for a lowering of the power domain, the policy-maker + informs all its interested parties of any resulting change in its device, + and when they have all acknowledged, it calls its controlling driver + to switch the device to an appropriate power state for the imminent + domain state. If any interested driver or the controlling driver does + not acknowledge immediately, then the policy-maker also will not. + + To prepare for a raising of the power domain, the policy-maker + informs all its interested parties of any resulting change in its device. + If any do not acknowledge immediately, then the policy-maker also will not. + @param newPowerStateFlags + These flags describe the character of power in the imminent domain state. + They are not understood by the policy-maker. It asks the controlling + driver to translate them into a state number within the power state array. + (The policy-maker for the domain also doesn't understand the bits; they + come from a outputPowerCharacter field of the power state array for + the power domain.) + @param whichParent + This pointer identifies the calling parent. */ + IOReturn powerDomainWillChangeTo ( IOPMPowerFlags newPowerStateFlags, IOPowerConnection * whichParent ); + +/*! @function powerDomainDidChangeTo + When a power domain changes state, it notifies its children, which + are policy-makers, by calling them at this method. It calls here + after the changed power of the power domain has settled at the + new level. A called policy-maker can return + IOPMAckImplied to indicate that it is prepared for the change, + or it can return a non-zero number to indicate that it is not prepared + but will prepare and then call the parent at acknowledgePowerChange. + + To prepare for a lowered power domain, the policy-maker + informs all its interested parties of the new power state of its device. + If any do not acknowledge immediately, then the policy-maker also will not. + + To prepare for a raised power domain, the policy-maker calls its controlling + driver to switch the device to the appropriate power state for the new + domain state. When that is accomplished, the policy-maker informs + all its interested parties of the new power state. If any interested driver + or the controlling driver does not acknowledge immediately, then the + policy-maker also will not. + + @param newPowerStateFlags + These flags describe the character of power in the new domain state. + They are not understood by the policy-maker. It asks the controlling + driver to translate them into a state number within the power state array. + (The policy-maker for the domain also doesn't understand the bits; they + come from a outputPowerCharacter field of the power state array for + the power domain.) + @param whichParent + This pointer identifies the calling parent. */ + IOReturn powerDomainDidChangeTo ( IOPMPowerFlags newPowerStateFlags, IOPowerConnection * whichParent ); + +/*! @function requestPowerDomainState + The child of a power domain calls it parent here to request power of a certain + character. It does this after lowering power in its own device which allows + it to tolerate lower power in the domain, and it does this if it needs more + power for its device than is currently available in the domain. + @param desiredState + These flags describe the power required for some state of the caller's device. + They are not understood by either the child or the parent. They come from + the power state array of the child (in the inputPowerRequirement field), and + the parent compares them to bits in the outputPowerCharacter fields of its + power state array. + @param whichChild + This points to the caller, so the power domain can know which child is requesting. + @param specificationFlags + This value modifies the parent's choice of power state. + If the parameter is IOPMNextHigherState, the parent will choose the lowest state + which matches desiredState and which is higher than the current state. + If the parameter is IOPMHighestState , the parent will choose the highest state + which matches desiredState. + If the parameter is IOPMNextLowerState, the parent will choose the highest state + which matches desiredState and which is lower than the current state. + If the parameter is IOPMLowestState, the parent will choose the lowest state + which matches desiredState. + A state matches desiredState if all the bits set in desiredState are also set in the + outputPowerCharacter field of that state in the parent's power state array. + @result + The power domain parent returns IOPMBadSpecification if specificationFlags + not wellformed. It returns IOPMNoSuchState if no state in its array satisfies + the callers specification. It returns IOPMNotYetInitialized if it has not power + state array yet to compare with. Otherwise it returns IOPMNoErr. In the last + case it will initiate its change to the new state if it has a parent in the hierarchy + (or is the root power domain.) */ + virtual IOReturn requestPowerDomainState ( IOPMPowerFlags desiredState, IOPowerConnection * whichChild, unsigned long specificationFlags ); + +/*! @function makeUsable + Some client of a device is asking that it become usable. Although + this has not come from the policy-maker for the device, treat it exactly + as if it had. In this way, subsequent requests for lower power from + the policy-maker will pre-empt this request. + We treat this as policy-maker request to switch to the highest power state. + @result + The return code reflects the state of the policy-maker's internal queue of power + changes and can be ignored by the caller. */ + virtual IOReturn makeUsable ( void ); + + /*! @function temporaryPowerClampOn + A power domain calls this method to hold itself in the highest power state until it + has children, and at that point the domain state is controlled by the childrens' + requirements. + @result + The return code reflects the state of the policy-maker's internal queue of power + changes and can be ignored by the caller. */ + virtual IOReturn temporaryPowerClampOn ( void ); + +/*! @function changePowerStateTo + The power-controlling driver calls the policy-maker here when it wants the device + switched to a different power state. This is mildly ironic in that it is the controlling + driver which does the switching, but it must do it this way so that the policy-maker + can make sure the power domain is correct and to notify interested parties + pre-change. When appropriate, the policy-maker will call the controlling driver and + have it switch the device to the requested state in the usual way. + This request by the controlling driver is sticky in that the policy-maker will not + switch the device lower than this request, so if the driver needs power raised for + some reason and then gets it and does what it needs, it should then rescind the + request by requesting state zero. This will allow the policy-maker to control the + device as usual. + @param ordinal + This is the number, in the power state array, of the desired power state. + @result + The return code reflects the state of the policy-maker's internal queue of power + changes and can be ignored by the caller. */ + virtual IOReturn changePowerStateTo ( unsigned long ordinal ); + +/*! @function currentCapability + Some object calls a policy-maker here to find out the current capability of a device. + The policy-maker returns a copy of the capabilityFlags field for the current power + state in the power state array. */ + virtual IOPMPowerFlags currentCapability ( void ); + +/*! @function currentPowerConsumption + Some object calls a policy-maker here to find out the current power consumption of a device. + The policy-maker returns a copy of the staticPower field for the current power state in the + power state array. */ + virtual unsigned long currentPowerConsumption ( void ); + +/*! @function activityTickle + A principal function of a policy-maker is deciding when the device is idle and can be + powered down. To do this it needs to know when the device is being used. In some + cases it is in the data path to the device so it knows when it is being used. In others + it is not and must be told. The activityTickle method is provided for objects in the + system to tell a policy-maker that its device is being used. + + If the policy-maker is managing the idleness determination totally on its own, the + paramter should be kIOPMSubclassPolicy, and the policy-maker should intercept + the activityTickle call, because the superclass will do nothing with it. + + The IOService superclass can manage idleness determination, too, with the simple + mechanism of an idle timer and this activityTickle call. To start this up, the policy- + maker calls its superclass at setIdleTimerPeriod. This starts a timer for the time + interval specified in the call. When the timer expires, the superclass checks to see + if there has been any activity since the last timer expiration. (It checks to see if + activityTickle has been called). If there has been activity, it restarts the timer, and + this process continues. When the timer expires, and there has been no device + activity, the superclass lowers the device power state to the next lower state. + This can continue until the device is in state zero. + + After the device has been powered down by at least one power state, + a call to activityTickle will cause the device to be switched to a higher state + required for the activity. + + activityTickle in the IOService superclass is meant to be called by sub-classed + policy-makers, because only they understand the paramters. They may implement + an activityTickle for their clients and then call this activityTickle in the superclass. + @param type + activityTickle with parameter kIOPMSubclassPolicy is not handled in IOService + and should be intercepted by the subclass policy-maker. + activityTickle with parameter kIOPMSuperclassPolicy1 causes an activity flag to be set, + and the device state checked. If the device has been powered down, it is powered up again. + @param stateNumber + When the type parameter is kIOPMSuperclassPolicy1, the stateNumber contains + the desired power state ordinal for the activity. If the device is in a lower state, + the superclass will switch it to this state. This is for devices which can handle + some accesses in lower power states than others; the device is powered up only + as far as it needs to be for the activity. + @result + When the type parameter is kIOPMSuperclassPolicy1, the superclass returns true + if the device is currently in the state specified by stateNumber. If it is in a lower + state and must be brought up, it returns false. In this case the superclass will + cause the device to be brought up. */ + virtual bool activityTickle ( unsigned long type, unsigned long stateNumber=0 ); + +/*! @function setAggressiveness + The parent of a policy-maker calls it here while broadcasting an aggressiveness factor + around the power management hierarchy. + + A policy-maker may want to intercept this call if it needs to do something with the + new factor, like change its idle timeout, for example. A policy-maker which does + intercept should call setAggressiveness in its superclass, though. + @param type + There are several aggressiveness factors which can be broadcast. One is a general + aggressiveness factor, and the others are specific to parts of the system, like the + hard drive or the display. A policy-maker takes action only on a factor that applies + to its policy. These factor types (e.g. kPMSetGeneralAggressiveness) are defined + in pwr_mgt/IOPM.h. + @param newLevel + This is the aggressiveness factor's new value. + @result + setAggressiveness returns IOPMNoErr. */ + virtual IOReturn setAggressiveness ( unsigned long, unsigned long newLevel ); + + /*! @function getAggressiveness + Return the current aggressiveness value for the given type. + */ + virtual IOReturn getAggressiveness ( unsigned long, unsigned long * ); + + /*! @function systemWake + The parent of a policy-maker calls it here while broadcasting a system wake event. + + A policy-maker must intercept this call if its device can wake the system from sleep. + It should check to see if its device did in fact wake the system, and if so, treat the + waking action as activity: it should request power from its parent to keep the system + up until it idles again. + + A policy-maker which does intercept should call systemWake in its superclass. + @result + systemWake returns IOPMNoErr. */ + virtual IOReturn systemWake ( void ); + + /*! @function temperatureCriticalForZone + A policy-maker calls its parent power domain to alert it to critical temperature in + some thermal zone. + @param whichZone + This is a pointer to the IOService policy-maker for the thermal zone which has + reported critical temperature. + @result + temperatureCriticalForZone returns IOPMNoErr. */ + virtual IOReturn temperatureCriticalForZone ( IOService * whichZone ); + +/*! @function youAreRoot + The Platform Expert instantiates the root power domain IOService and + calls it here to inform it that it is the root power domain. + (The only difference between the root domain and any other power domain + is that the root has no parent and therefore never calls it. */ + virtual IOReturn youAreRoot ( void ); + +/*! @function setPowerParent + The Platform Expert or some other IOService calls a policy-maker here to + inform it who its parent is in the power management hierarchy. This is + part of the process of attaching a policy-maker into the hierarchy. + @param theParent + This is a pointer to the parent IOService power domain. + @param stateKnown + This is true if the parent knows its power state. (It would not if it doesn't yet + have a parent or a controlling driver) + @param currentState + If the stateKnown parameter is true, these flags describe the character of + power in the power domain. If the policy-maker has a controlling driver, + the policy-maker asks the driver, given this power domain state, + what state it would be in, and then it tells the driver to assume that state. */ + virtual IOReturn setPowerParent ( IOPowerConnection * theParent, bool stateKnown, IOPMPowerFlags currentState ); + +/*! @function addPowerChild + The Platform Expert or some other IOService calls a power domain policy-maker + here to introduce it to a child of it, a member of the domain. + @param theChild + This is a pointer to the child IOService, which is another power domain policy-maker + or a device policy-maker. */ + virtual IOReturn addPowerChild ( IOService * theChild ); + +/*! @function removePowerChild + A power domain policy-maker is called here to tell it that one of its enclosed members + is disappearing. This happens when a device policy-maker hands off its responsibility + to another policy-maker or when its device disappears. */ + virtual IOReturn removePowerChild ( IOPowerConnection * theChild ); + +/* @function command_received + */ + virtual void command_received ( void *, void * , void * , void *); + +/* @function start_PM_idle_timer + */ + virtual void start_PM_idle_timer ( void ); + +/* @function PM_idle_timer_expiration + */ + virtual void PM_idle_timer_expiration ( void ); + +/* @function PM_Clamp_Timer_Expired + */ + virtual void PM_Clamp_Timer_Expired (void); + +/*! @function setIdleTimerPeriod + A policy-maker which uses the type 1 idleness determination provided by IOService + calls its superclass here to set or change the idle timer period. + + See activityTickle for a description of this idleness determination. + @param period + This is the desired idle timer period in seconds. + @result + The normal return is IOPMNoErr, but it is possible to return kIOReturnError if there + was difficulty creating the timer event or the command queue, for example (which is + done only on the first call.) */ + virtual IOReturn setIdleTimerPeriod ( unsigned long ); + +/*! @function getPMworkloop + */ + virtual IOWorkLoop *getPMworkloop ( void ); + +/* @function ack_timer_ticked + */ + void ack_timer_ticked ( void ); + +/* @function settleTimerExpired + */ + void settleTimerExpired ( void ); + + IOReturn serializedAllowPowerChange2 ( unsigned long ); + IOReturn serializedCancelPowerChange2 ( unsigned long ); + +// implemented by power-controlling driver... + +/*! @function setPowerState + A policy-maker (usually its superclass) calls its controlling driver here to change + the power state of its device. + @param powerStateOrdinal + This is the number in the power state array of the state the driver is being + instructed to switch to. + @param whatDevice + This is a pointer to the policy-maker. It is useful when a single power-controlling + driver controls multiple devices and needs to know for which device it is being + called. + @result + The driver returns IOPMAckImplied if it has complied with the request when it + returns. If it has started the process of changing power state but not finished + it, it should return a number of microseconds which is an upper limit of the time + it will need to finish. Then, when it has completed the power switch, it should + call acknowledgeSetPowerState in the policy-maker. */ +virtual IOReturn setPowerState ( unsigned long powerStateOrdinal, IOService* whatDevice ); + +/*! @function clampPowerOn + This method sets the device to the highest power state and ensures it stays there + until a timer of duration length expires. + */ +virtual void clampPowerOn (unsigned long duration); + +/*! @function maxCapabilityForDomainState + A policy-maker (usually its superclass) calls its controlling driver here to find out + the highest power state possible for a given power domain state. This happens + when the power domain is changing state and the policy-maker wants to find + out what states the device is capable of in the new domain state. + @param domainState + These flags describe the character of domain power in some domain power state. + The flags are not understood by the calling policy-maker; they were passed to it + by its power domain parent. They come from the outputPowerCharacter field + of a state in the power domain's power state array. + + This method is implemented in a simple way in IOService. It scans the power state + array looking for the highest state whose inputPowerRequirement field exactly + matches the parameter. If more intelligent determination is required, the + power-controlling driver should implement the method and override the superclass. + @result + A state number is returned. */ +virtual unsigned long maxCapabilityForDomainState ( IOPMPowerFlags domainState ); + +/*! @function initialPowerStateForDomainState + A policy-maker (usually its superclass) calls its controlling driver here to find out + which power state the device is in, given the current power domain state. This + happens once, when the policy-maker is initializing, and the controlling driver + can use this to know what state the device is in initially. + @param domainState + These flags describe the character of domain power in the current state of the + power domain. The flags are not understood by the calling policy-maker; they + were passed to it by its power domain parent. They come from the + outputPowerCharacter field of the current power state in the power domain's + power state array. + + This method is implemented in a simple way in IOService. It scans the power state + array looking for the highest state whose inputPowerRequirement field exactly + matches the parameter. If more intelligent determination is required, the + power-controlling driver should implement the method and override the superclass. + @result + A state number is returned. */ +virtual unsigned long initialPowerStateForDomainState ( IOPMPowerFlags ); + +/*! @function powerStateForDomainState + A policy-maker (usually its superclass) calls its controlling driver here to find out + what power state the device would be in for a given power domain state. This + happens when the power domain is changing state and the policy-maker wants + to find out the effect of the change. + @param domainState + These flags describe the character of domain power in some domain power state. + The flags are not understood by the calling policy-maker; they were passed to it + by its power domain parent. They come from the outputPowerCharacter field + of a state in the power domain's power state array. + + This method is implemented in a simple way in IOService. It scans the power state + array looking for the highest state whose inputPowerRequirement field exactly + matches the parameter. If more intelligent determination is required, the + power-controlling driver should implement the method and override the superclass. + @result + A state number is returned. */ +virtual unsigned long powerStateForDomainState ( IOPMPowerFlags domainState ); + +/*! @function powerStateWillChangeTo + A policy-maker informs interested parties that its device is about to change to + a different power state. Interested parties are those that have registered for + this notification via registerInterestedDriver and also the power-controlling + driver which is registered as an interested driver automatically when it registers + as the controlling driver. + @param capabilities + These flags describe the capability of the device in the new power state. They + are not understood by the policy-maker; they come from the capabilityFlags field + of the new state in the power state array. + @param stateNumber + This is the number of the state in the state array that the device is switching to. + @param whatDevice + This points to the policy-maker, and it is used by a driver which is receiving power + state change notifications for multiple devices. + @result + The driver returns IOPMAckImplied if it has prepared for the power change when it + returns. If it has started preparing but not finished, it should return a number of + microseconds which is an upper limit of the time it will need to finish preparing. + Then, when it has completed its preparations, it should call acknowledgePowerChange + in the policy-maker. */ +virtual IOReturn powerStateWillChangeTo ( IOPMPowerFlags, unsigned long, IOService* ); + +/*! @function powerStateDidChangeTo + A policy-maker informs interested parties that its device has changed to + a different power state. Interested parties are those that have registered for + this notification via registerInterestedDriver and also the power-controlling + driver which is registered as an interested driver automatically when it registers + as the controlling driver. + @param capabilities + These flags describe the capability of the device in the new power state. They + are not understood by the policy-maker; they come from the capabilityFlags field + of the new state in the power state array. + @param stateNumber + This is the number of the state in the state array that the device has switched to. + @param whatDevice + This points to the policy-maker, and it is used by a driver which is receiving power + state change notifications for multiple devices. + @result + The driver returns IOPMAckImplied if it has prepared for the power change when it + returns. If it has started preparing but not finished, it should return a number of + microseconds which is an upper limit of the time it will need to finish preparing. + Then, when it has completed its preparations, it should call acknowledgePowerChange + in the policy-maker. */ +virtual IOReturn powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService* ); + +/*! @function didYouWakeSystem + A policy-maker calls its power driver here to ask if its device is the one + which just woke the system from sleep. + @result + The driver returns true if it did wake the system and false if it didn't. */ +virtual bool didYouWakeSystem ( void ); + +/*! @function newTemperature + A thermal-zone driver calls its policy-maker here to tell it that the temperature in + the zone has changed. The thermal-zone policy-maker uses this information to + manage its thermal zone. + @param currentTemp + This is the new temperature in the thermal zone. + @param whichZone + This is a pointer to the controlling driver. + */ +virtual IOReturn newTemperature ( long currentTemp, IOService * whichZone ); + + virtual bool askChangeDown ( unsigned long ); + virtual bool tellChangeDown ( unsigned long ); + virtual void tellNoChangeDown ( unsigned long ); + virtual void tellChangeUp ( unsigned long ); + virtual IOReturn allowPowerChange ( unsigned long refcon ); + virtual IOReturn cancelPowerChange ( unsigned long refcon ); + +// ...implemented by power-controlling driver + + protected: +/*! @function changePowerStateToPriv + A policy-maker calls its superclass here to change the power state of the device. + The superclass takes care of making sure the power domain state is appropriate + and informing interested parties. It calls the controlling driver to make the change. + @param ordinal + This is the number, in the power state array, of the desired power state. + @result + The return code reflects the state of the policy-maker's internal queue of power + changes and can be ignored by the caller. + */ + IOReturn changePowerStateToPriv ( unsigned long ordinal ); + +/*! @function powerOverrideOnPriv + A policy-maker normally keeps its device at the highest state required by itself, + its power-controlling driver, and its children (when the power domain state + allows). There may be times, however, when a policy-maker needs the power + state lower than its driver or its children desire, and when this is the case, it + calls powerOverrideOnPriv in its superclass to enable this override. When the override + is on, the superclass keeps the device in the state desired by the policy-maker + (requested via changePowerStateToPriv), regardless of the children's or driver's desire. + Turning on the override will initiate a power change if the policy-maker's desired + power state is different from the maximum of the controlling driver's desire and + the children's desires. + @result + The return code reflects the state of the policy-maker's internal queue of power + changes and can be ignored by the caller. */ + IOReturn powerOverrideOnPriv ( void ); + +/*! @function powerOverrideOffPriv + When a policy-maker has enabled the override, it can disable it again by calling + this method in its superclass. This will allow the superclass to keep the device + at the highest state required by itself, its power-controlling driver, and its + children (when the power domain state allows). Turning off the override + will initiate a power change if the policy-maker's desired power state is different + from the maximum of the controlling driver's desire and the children's desires. + @result + The return code reflects the state of the policy-maker's internal queue of power + changes and can be ignored by the caller. */ + IOReturn powerOverrideOffPriv ( void ); + + /*! @function powerChangeDone + A policy-maker calls itself here when a power change is completely done, when + all interested parties have acknowledged the powerStateDidChangeTo call. + The implementation here is null; the method is meant to be overridden by + subclassed policy-makers, and that is how one finds out that a power change + it initiated is complete + @param stateNumber + This is the number of the state in the state array that the device has switched to. */ + virtual void powerChangeDone ( unsigned long ); + + bool tellClientsWithResponse ( int messageType ); + void tellClients ( int messageType ); + +private: + + IOReturn enqueuePowerChange ( unsigned long, unsigned long, unsigned long, IOPowerConnection * ); + IOReturn notifyAll ( bool is_prechange ); + bool notifyChild ( IOPowerConnection * nextObject, bool is_prechange ); + bool inform ( IOPMinformee * nextObject, bool is_prechange ); + void our_prechange_03 ( void ); + void our_prechange_05 ( void ); + void our_prechange_1 ( void ); + void our_prechange_2 ( void ); + void our_prechange_3 ( void ); + void our_prechange_4 ( void ); + IOReturn parent_down_0 ( void ); + void parent_down_05 ( void ); + IOReturn parent_down_1 ( void ); + IOReturn parent_down_2 ( void ); + void parent_down_3 ( void ); + void parent_down_4 ( void ); + void parent_down_5 ( void ); + void parent_down_6 ( void ); + void parent_up_0 ( void ); + IOReturn parent_up_1 ( void ); + IOReturn parent_up_2 ( void ); + IOReturn parent_up_3 ( void ); + void parent_up_4 ( void ); + void parent_up_5 ( void ); + void parent_up_6 ( void ); + void all_done ( void ); + void all_acked ( void ); + void driver_acked ( void ); + void start_ack_timer ( void ); + void stop_ack_timer ( void ); + unsigned long compute_settle_time ( void ); + IOReturn startSettleTimer ( unsigned long delay ); + IOReturn changeState ( void ); + IOReturn add_child_to_active_change ( IOPowerConnection * ); + IOReturn add_driver_to_active_change ( IOPMinformee * ); + IOReturn instruct_driver ( unsigned long newState ); + bool acquire_lock ( void ); + IOReturn start_parent_change ( unsigned long queue_head ); + void start_our_change ( unsigned long queue_head ); + IOReturn ask_parent ( unsigned long requestedState ); + bool checkForDone ( void ); + bool responseValid ( unsigned long x ); + IOReturn allowCancelCommon ( void ); +}; + +#endif /* ! _IOKIT_IOSERVICE_H */ diff --git a/iokit/IOKit/IOServicePM.h b/iokit/IOKit/IOServicePM.h new file mode 100644 index 000000000..7f958c7c9 --- /dev/null +++ b/iokit/IOKit/IOServicePM.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +extern "C" { +#include +} + +#include +#include +class IOPMinformee; +class IOPMinformeeList; +class IOPMchangeNoteList; +class IOPMpmChild; +class IOWorkLoop; +class IOCommandQueue; +class IOCommandGate; +class IOTimerEventSource; +class IOPlatformExpert; + +#include + + +/*! +@defined ACK_TIMER_PERIOD +@discussion When an IOService is waiting for acknowledgement to a power state change +notification from an interested driver or the controlling driver its ack timer is ticking every tenth of a second. +(100000000 nanoseconds are one tenth of a second). +*/ + #define ACK_TIMER_PERIOD 100000000 + + + +/*! +@class IOPMpriv : public OSObject +@abstract Private power management private instance variables for IOService objects. +*/ +class IOPMpriv : public OSObject +{ + friend class IOService; + + OSDeclareDefaultStructors(IOPMpriv) + + public: + +/*! @field we_are_root TRUE if this device is the root power domain */ + bool we_are_root; + + /*! @field interestedDrivers list of interested drivers */ + IOPMinformeeList * interestedDrivers; + + /*! @field children list of power domain children */ + IOPMinformeeList * children; + + /*! @field changeList list of pending power state changes */ + IOPMchangeNoteList * changeList; + + /*! @field driver_timer timeout on waiting for controlling driver to acknowledgeSetPowerState */ + IOReturn driver_timer; + + /*! @field ackTimer */ + thread_call_t ackTimer; + + /*! @field settleTimer */ + thread_call_t settleTimer; + + /*! @field machine_state state number of state machine processing current change note */ + unsigned long machine_state; + + /*! @field settle_time settle timer after changing power state */ + unsigned long settle_time; + + /*! @field head_note ordinal of change note currently being processed */ + long head_note; + + /*! @field head_note_flags copy of flags field in change note currently being processed*/ + unsigned long head_note_flags; + + /*! @field head_note_state copy of newStateNumberfield in change note currently being processed */ + unsigned long head_note_state; + + /*! @field head_note_outputFlags outputPowerCharacter field from change note currently being processed */ + unsigned long head_note_outputFlags; + + /*! @field head_note_domainState power domain flags from parent... (only on parent change) */ + unsigned long head_note_domainState; + + /*! @field head_note_parent pointer to initiating parent... (only on parent change) */ + IOPowerConnection * head_note_parent; + + /*! @field head_note_capabilityFlags copy of capabilityFlags field in change note currently being processed */ + unsigned long head_note_capabilityFlags; + + /*! @field head_note_pendingAcks number of acks we are waiting for during notification */ + unsigned long head_note_pendingAcks; + + /*! @field our_lock used to control access to head_note_pendingAcks and driver_timer */ + IOLock * our_lock; + + /*! @field flags_lock used to control access to response flags array */ + IOLock * flags_lock; + + /*! @field initial_change true forces first state to be broadcast even if it isn't a change */ + bool initial_change; + + /*! @field need_to_become_usable someone called makeUsable before we had a controlling driver */ + bool need_to_become_usable; + + /*! @field device_overrides state changes are made based only on subclass's desire */ + bool device_overrides; + + /*! @field clampOn domain is clamped on till first child registers */ + bool clampOn; + + /*! @field owner points to object which made this struct. Used for debug output only */ + IOService * owner; + + /*! @field activityLock used to protect activity flag */ + IOLock * activityLock; + + /*! @field timerEventSrc an idle timer */ + IOTimerEventSource * timerEventSrc; + + /*! @field idle_timer_period its period in seconds */ + unsigned long idle_timer_period; + + /*! @field clampTimerEventSrc timer for clamping power on */ + IOTimerEventSource * clampTimerEventSrc; + + /*! @field device_active true: there has been device activity since last idle timer expiration */ + bool device_active; + + /*! @field driverDesire +This is the power state desired by our controlling driver. It is initialized to myCurrentState and is changed +when the controlling driver calls changePowerStateTo. A change in driverDesire may cause a change in ourDesiredPowerState. +*/ + unsigned long driverDesire; + + + + /*! @field deviceDesire +This is the power state desired by a subclassed device object. It is initialized to myCurrentState and is changed +when the subclassed object calls changePowerStateToPriv. A change in deviceDesire may cause a change in ourDesiredPowerState. +*/ + unsigned long deviceDesire; + + + + /*! @field ourDesiredPowerState +This is the power state we desire currently. If equal to myCurrentState, we're happy. +Otherwise, we're waiting for the parent to raise the power domain to at least this level. + +If this is a power domain, this is the maximum of all our children's desires, driverDesire, and deviceDesire. +It increases when: +a child asks for more power via requestDomainState, +the controlling driver asks for more power via changePowerStateTo + +It decreases when: +we lose a child and the child had the highest power need of all our children, +the child with the highest power need suggests a lower power domain state, +the controlling driver asks for lower power for some reason via changePowerStateTo + +If this is not a power domain, ourDesiredPowerState represents the greater of driverDesire and deviceDesire. +It increases when: +the controlling driver asks for more power via changePowerStateTo +some driver calls makeUsable +a subclassed object asks for more power via changePowerStateToPriv + +It decreases when: +the controlling driver asks for lower power for some reason via changePowerStateTo +a subclassed object asks for lower power for some reason via changePowerStateToPriv +*/ + unsigned long ourDesiredPowerState; + + + /*! @field previousRequest +This is a reminder of what our parent thinks our need is. Whenever it changes, +we call requestDomainState in the parent to keep it current. It is usually equal to ourDesiredPowerState +except while a power change is in progress. +*/ + unsigned long previousRequest; + + + /*! @field askingFor +Used by activityTickle so it doesn't try to raise the device to a lower state than +what it may have previously requested. +*/ + unsigned long askingFor; + + + /*! @field imminentState +Usually the same as myCurrentState, except right after calling powerStateWillChangeTo. +*/ + unsigned long imminentState; + + /*! @function serialize +Serialize private instance variables for debug output (IORegistryDumper). +*/ + virtual bool serialize(OSSerialize *s) const; + +}; + + + + +/*! +@class IOPMprot : public OSObject +@abstract Protected power management instance variables for IOService objects. +*/ +class IOPMprot : public OSObject //management +{ + friend class IOService; + + OSDeclareDefaultStructors(IOPMprot) + + public: + + /*! @field ourName from getName(), used in logging */ + const char * ourName; + + /*! @field thePlatform from getPlatform, used in logging and registering */ + IOPlatformExpert * thePlatform; + + /*! @field theNumberOfPowerStates the number of states in the array */ + unsigned long theNumberOfPowerStates; // the number of states in the array + + /*! @field thePowerStates the array */ + IOPMPowerState thePowerStates[IOPMMaxPowerStates]; + + /*! @field theControllingDriver points to the controlling driver */ + IOService * theControllingDriver; + + /*! @field aggressiveness current value of power management aggressiveness */ + unsigned long aggressiveness; + + /*! @field current_aggressiveness_values array of aggressiveness values */ + unsigned long current_aggressiveness_values [kMaxType+1]; + + /*! @field current_aggressiveness_validity true for values that are currently valid */ + bool current_aggressiveness_valid [kMaxType+1]; + + /*! @field myCurrentState the ordinal of our current power state */ + unsigned long myCurrentState; + + /*! @field parentsKnowState true if all our parents know the state of their power domain */ + bool parentsKnowState; + + /*! @field parentsCurrentPowerFlags logical OR of power flags for the current state of each power domainparent */ + IOPMPowerFlags parentsCurrentPowerFlags; + + /*! @field maxCapability ordinal of highest state we can achieve in current power domain state */ + unsigned long maxCapability; + + /*! @field PMworkloop points to the single power management workloop */ + IOWorkLoop * PMworkloop; + + /*! @field commandQueue used to serialize idle-power-down and busy-power-up */ + IOCommandQueue * commandQueue; + + /*! @field PMcommandGate used to serialize timer expirations and incoming acknowledgements */ + IOCommandGate * PMcommandGate; + + /*! @field myCharacterFlags logical OR of all output power character flags in the array */ + IOPMPowerFlags myCharacterFlags; + + /*! @field serialNumber used to uniquely identify power management notification to apps and clients */ + UInt16 serialNumber; + + /*! @field responseFlags points to an OSArray which manages responses from notified apps and clients */ + OSArray* responseFlags; + + /*! @field doNotPowerDown keeps track of any negative responses from notified apps and clients */ + bool doNotPowerDown; + + /*! @function serialize +Serialize protected instance variables for debug output (IORegistryDumper). +*/ + virtual bool serialize(OSSerialize *s) const; + +}; + diff --git a/iokit/IOKit/IOSharedLock.h b/iokit/IOKit/IOSharedLock.h new file mode 100644 index 000000000..0f9e7bf33 --- /dev/null +++ b/iokit/IOKit/IOSharedLock.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +/* + * Multiprocessor locks used within the shared memory area between the + * kernel and event system. These must work in both user and kernel mode. + * + * These routines are public, for the purpose of writing frame buffer device + * drivers which handle their own cursors. Certain architectures define a + * generic display class which handles cursor drawing and is subclassed by + * driver writers. These drivers need not be concerned with the following + * types and definitions. + * + * The ev_lock(), ev_unlock(), and ev_try_lock() functions are available only + * to drivers built in or dynamically loaded into the kernel, and to DPS + * drivers built in or dynamically loaded into the Window Server. They do not + * exist in any shared library. + * + * --> They're now in IOKit user lib. + */ + +#ifndef _IOKIT_IOSHAREDLOCK_H +#define _IOKIT_IOSHAREDLOCK_H + +#ifdef __cplusplus +extern "C" { +#endif + +// should be 32 bytes on PPC +typedef volatile int IOSharedLockData; +typedef IOSharedLockData * IOSharedLock; + +#define IOSpinLockInit(l) (*(l) = (IOSpinLockData)0) + +extern void IOSpinLock(IOSharedLock l); +extern void IOSpinUnlock(IOSharedLock l); +extern boolean_t IOTrySpinLock(IOSharedLock l); + +/* exact same stuff & implementation */ + +typedef IOSharedLockData ev_lock_data_t; +typedef ev_lock_data_t * ev_lock_t; + +#define ev_init_lock(l) (*(l) = (ev_lock_data_t)0) +// needs isync? +//#define ev_is_locked(l) (*(l) != (ev_lock_data_t)0) + +extern void ev_lock(ev_lock_t l); // Spin lock! +extern void ev_unlock(ev_lock_t l); +extern boolean_t ev_try_lock(ev_lock_t l); + +#ifdef __cplusplus +} +#endif +#endif /* ! _IOKIT_IOSHAREDLOCK_H */ diff --git a/iokit/IOKit/IOSyncer.h b/iokit/IOKit/IOSyncer.h new file mode 100644 index 000000000..3bb2df5ac --- /dev/null +++ b/iokit/IOKit/IOSyncer.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOSYNCER_H +#define _IOSYNCER_H + +#include +#include +#include + +class IOSyncer : public OSObject +{ + OSDeclareDefaultStructors(IOSyncer) + +private: + // The spin lock that is used to guard the 'threadMustStop' variable. + IOSimpleLock *guardLock; + volatile bool threadMustStop; + IOReturn fResult; + virtual void free(); + virtual void privateSignal(); + +public: + + static IOSyncer * create(bool twoRetains = true); + + virtual bool init(bool twoRetains); + virtual void reinit(); + virtual IOReturn wait(bool autoRelease = true); + virtual void signal(IOReturn res = kIOReturnSuccess, + bool autoRelease = true); +}; + +#endif /* !_IOSYNCER */ + diff --git a/iokit/IOKit/IOTimeStamp.h b/iokit/IOKit/IOTimeStamp.h new file mode 100644 index 000000000..3e193aa77 --- /dev/null +++ b/iokit/IOKit/IOTimeStamp.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef IOKIT_IOTIMESTAMP_H +#define IOKIT_IOTIMESTAMP_H + +#include +#include + +static inline void +IOTimeStampStartConstant(unsigned int csc, + unsigned int a = 0, unsigned int b = 0, + unsigned int c = 0, unsigned int d = 0) +{ + KERNEL_DEBUG_CONSTANT(csc | DBG_FUNC_START, a, b, c, d, 0); +} + +static inline void +IOTimeStampEndConstant(unsigned int csc, + unsigned int a = 0, unsigned int b = 0, + unsigned int c = 0, unsigned int d = 0) +{ + KERNEL_DEBUG_CONSTANT(csc | DBG_FUNC_END, a, b, c, d, 0); +} + +static inline void +IOTimeStampConstant(unsigned int csc, + unsigned int a = 0, unsigned int b = 0, + unsigned int c = 0, unsigned int d = 0) +{ + KERNEL_DEBUG_CONSTANT(csc | DBG_FUNC_NONE, a, b, c, d, 0); +} + +#if KDEBUG + +static inline void +IOTimeStampStart(unsigned int csc, + unsigned int a = 0, unsigned int b = 0, + unsigned int c = 0, unsigned int d = 0) +{ + KERNEL_DEBUG(csc | DBG_FUNC_START, a, b, c, d, 0); +} + +static inline void +IOTimeStampEnd(unsigned int csc, + unsigned int a = 0, unsigned int b = 0, + unsigned int c = 0, unsigned int d = 0) +{ + KERNEL_DEBUG(csc | DBG_FUNC_END, a, b, c, d, 0); +} + +static inline void +IOTimeStamp(unsigned int csc, + unsigned int a = 0, unsigned int b = 0, + unsigned int c = 0, unsigned int d = 0) +{ + KERNEL_DEBUG(csc | DBG_FUNC_NONE, a, b, c, d, 0); +} + +#endif /* KDEBUG */ + +#define IODBG_SCSI(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSCSI, code)) +#define IODBG_DISK(code) (KDBG_CODE(DBG_IOKIT, DBG_IODISK, code)) +#define IODBG_NETWORK(code) (KDBG_CODE(DBG_IOKIT, DBG_IONETWORK, code)) +#define IODBG_KEYBOARD(code) (KDBG_CODE(DBG_IOKIT, DBG_IOKEYBOARD, code)) +#define IODBG_POINTING(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPOINTING, code)) +#define IODBG_AUDIO(code) (KDBG_CODE(DBG_IOKIT, DBG_IOAUDIO, code)) +#define IODBG_FLOPPY(code) (KDBG_CODE(DBG_IOKIT, DBG_IOFLOPPY, code)) +#define IODBG_SERIAL(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSERIAL, code)) +#define IODBG_TTY(code) (KDBG_CODE(DBG_IOKIT, DBG_IOTTY, code)) + +/* IOKit infrastructure subclasses */ +#define IODBG_WORKLOOP(code) (KDBG_CODE(DBG_IOKIT, DBG_IOWORKLOOP, code)) +#define IODBG_INTES(code) (KDBG_CODE(DBG_IOKIT, DBG_IOINTES, code)) +#define IODBG_TIMES(code) (KDBG_CODE(DBG_IOKIT, DBG_IOCLKES, code)) +#define IODBG_CMDQ(code) (KDBG_CODE(DBG_IOKIT, DBG_IOCMDQ, code)) +#define IODBG_MCURS(code) (KDBG_CODE(DBG_IOKIT, DBG_IOMCURS, code)) +#define IODBG_MDESC(code) (KDBG_CODE(DBG_IOKIT, DBG_IOMDESC, code)) + +/* IOKit specific codes - within each subclass */ + +/* DBG_IOKIT/DBG_IOSCSI codes */ + +/* DBG_IOKIT/DBG_IODISK codes */ + +/* DBG_IOKIT/DBG_IONETWORK codes */ + +/* DBG_IOKIT/DBG_IOKEYBOARD codes */ + +/* DBG_IOKIT/DBG_IOPOINTING codes */ + +/* DBG_IOKIT/DBG_IOAUDIO codes */ + +/* DBG_IOKIT/DBG_IOFLOPPY codes */ + +/* DBG_IOKIT/DBG_IOSERIAL codes */ + +/* DBG_IOKIT/DBG_IOTTY codes */ + +/* DBG_IOKIT/DBG_IOWORKLOOP codes */ +#define IOWL_CLIENT 1 /* 0x050a0004 */ +#define IOWL_WORK 2 /* 0x050a0008 */ + +/* DBG_IOKIT/DBG_IOINTES codes */ +#define IOINTES_CLIENT 1 /* 0x050b0004 */ +#define IOINTES_LAT 2 /* 0x050b0008 */ +#define IOINTES_SEMA 3 /* 0x050b000c */ +#define IOINTES_INTCTXT 4 /* 0x050b0010 */ +#define IOINTES_INTFLTR 5 /* 0x050b0014 */ +#define IOINTES_ACTION 6 /* 0x050b0018 */ +#define IOINTES_FILTER 7 /* 0x050b001c */ + +/* DBG_IOKIT/DBG_IOTIMES codes */ +#define IOTIMES_CLIENT 1 /* 0x050c0004 */ +#define IOTIMES_LAT 2 /* 0x050c0008 */ +#define IOTIMES_SEMA 3 /* 0x050c000c */ +#define IOTIMES_ACTION 4 /* 0x050c0010 */ + +/* DBG_IOKIT/DBG_IOCMDQ codes */ +#define IOCMDQ_CLIENT 1 /* 0x050d0004 */ +#define IOCMDQ_LAT 2 /* 0x050d0008 */ +#define IOCMDQ_SEMA 3 /* 0x050d000c */ +#define IOCMDQ_PSEMA 4 /* 0x050d0010 */ +#define IOCMDQ_PLOCK 5 /* 0x050d0014 */ +#define IOCMDQ_ACTION 6 /* 0x050d0018 */ + +/* DBG_IOKIT/DBG_IOMCURS codes */ + +/* DBG_IOKIT/DBG_IOMDESC codes */ + +#endif /* ! IOKIT_IOTIMESTAMP_H */ diff --git a/iokit/IOKit/IOTimerEventSource.h b/iokit/IOKit/IOTimerEventSource.h new file mode 100644 index 000000000..fd7335821 --- /dev/null +++ b/iokit/IOKit/IOTimerEventSource.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOTimerEventSource.h + * + * HISTORY + * 2-Feb-1999 Joe Liu (jliu) created. + * + */ + +#ifndef _IOTIMEREVENTSOURCE +#define _IOTIMEREVENTSOURCE + +#include + +__BEGIN_DECLS +#include +__END_DECLS + +#include +#include + +/*! + @class IOTimerEventSource : public IOEventSource + @abstract Time based event source mechanism. + @discussion An event source that implements a simple timer. A timeout handler is called once the timeout period expires. This timeout handler will be called by the work-loop that this event source is attached to. +

+ Usually a timer event source will be used to implement a timeout. In general when a driver makes a request it will need to setup a call to keep track of when the I/O doesn't complete. This class is designed to make that somewhat easier. +

+ Remember the system doesn't guarantee the accuracy of the callout. It is possible that a higher priority thread is running which will delay the execution of the action routine. In fact the thread will be made runable at the exact requested time, within the accuracy of the CPU's decrementer based interrupt, but the scheduler will then control execution. +*/ +class IOTimerEventSource : public IOEventSource +{ + OSDeclareDefaultStructors(IOTimerEventSource) + +protected: +/*! @var calloutEntry thread_call entry for preregistered thread callouts */ + void *calloutEntry; + +/*! @var abstime time to wake up next, see enable. */ + AbsoluteTime abstime; + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +/*! @function timeout + @abstract Function that routes the call from the OS' timeout mechanism into a work-loop context. + @discussion timeout will normally not be called nor overridden by a subclass. If the event source is enabled then close the work-loop's gate and call the action routine. + @param self This argument will be cast to an IOTimerEventSource. */ + static void timeout(void *self); + +/*! @function setTimeoutFunc + @abstract Set's timeout as the function of calloutEntry. + @discussion IOTimerEventSource is based upon the kern/thread_call.h APIs currently. This function allocates the calloutEntry member variable by using thread_call_allocate(timeout, this). If you need to write your own subclass of IOTimerEventSource you probably should override this method to allocate an entry that points to your own timeout routine. */ + virtual void setTimeoutFunc(); + +/*! @function free + @abstract Sub-class implementation of free method, frees calloutEntry */ + virtual void free(); + +/*! @function checkForWork + @abstract Have to implement it is mandatory in $link IOEventSource, but IOTimerEventSources don't actually use this work-loop mechanism. */ + virtual bool checkForWork(); + +public: + +/*! @typedef Action + @discussion 'C' Function pointer defining the callout routine of this event source. + @param owner Owning target object. Note by a startling coincidence the first parameter in a C callout is currently used to define the target of a C++ member function. + @param sender The object that timed out. */ + typedef void (*Action)(OSObject *owner, IOTimerEventSource *sender); + +/*! @function timerEventSource + @abstract Allocates and returns an initialized timer instance. + @param owner + @param action */ + static IOTimerEventSource * + timerEventSource(OSObject *owner, Action action = 0); + +/*! @function init + @abstract Initializes the timer with an owner, and a handler to call when the timeout expires. + @param owner + @param action */ + virtual bool init(OSObject *owner, Action action = 0); + +/*! @function enable + @abstract Enables a call to the action. + @discussion Allows the action function to be called. If the timer event source was disabled while a call was outstanding and the call wasn't cancelled then it will be rescheduled. So a disable/enable pair will disable calls from this event source. */ + virtual void enable(); + +/*! @function disable + @abstract Disable a timed callout. + @discussion When disable returns the action will not be called until the next time enable(qv) is called. */ + virtual void disable(); + + +/*! @function setTimeoutTicks + @abstract Setup a callback at after the delay in scheduler ticks. See wakeAtTime(AbsoluteTime). + @param interval Delay from now to wake up, in scheduler ticks, whatever that may be. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeoutTicks(UInt32 ticks); + +/*! @function setTimeoutMS + @abstract Setup a callback at after the delay in milliseconds. See wakeAtTime(AbsoluteTime). + @param interval Delay from now to wake up, time in milliseconds. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeoutMS(UInt32 ms); + +/*! @function setTimeoutUS + @abstract Setup a callback at after the delay in microseconds. See wakeAtTime(AbsoluteTime). + @param interval Delay from now to wake up, time in microseconds. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeoutUS(UInt32 us); + +/*! @function setTimeout + @abstract Setup a callback at after the delay in some unit. See wakeAtTime(AbsoluteTime). + @param interval Delay from now to wake up in some defined unit. + @param scale_factor Define the unit of interval, default to nanoseconds. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeout(UInt32 interval, + UInt32 scale_factor = kNanosecondScale); + +/*! @function setTimeout + @abstract Setup a callback at after the delay in decrementer ticks. See wakeAtTime(AbsoluteTime). + @param interval Delay from now to wake up. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeout(mach_timespec_t interval); + +/*! @function setTimeout + @abstract Setup a callback at after the delay in decrementer ticks. See wakeAtTime(AbsoluteTime). + @param interval Delay from now to wake up in decrementer ticks. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeout(AbsoluteTime interval); + +/*! @function wakeAtTimeTicks + @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + @param abstime Time to wake up in scheduler quantums, whatever that is? + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTimeTicks(UInt32 ticks); + +/*! @function wakeAtTimeMS + @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + @param abstime Time to wake up in milliseconds. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTimeMS(UInt32 ms); + +/*! @function wakeAtTimeUS + @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + @param abstime Time to wake up in microseconds. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTimeUS(UInt32 us); + +/*! @function wakeAtTime + @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + @param abstime Time to wake up in some unit. + @param scale_factor Define the unit of abstime, default to nanoseconds. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTime(UInt32 abstime, + UInt32 scale_factor = kNanosecondScale); + +/*! @function wakeAtTime + @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + @param abstime mach_timespec_t of the desired callout time. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTime(mach_timespec_t abstime); + +/*! @function wakeAtTime + @abstract Setup a callback at this absolute time. + @discussion Starts the timer, which will expire at abstime. After it expires, the timer will call the 'action' registered in the init() function. This timer is not periodic, a further call is needed to reset and restart the timer after it expires. + @param abstime Absolute Time when to wake up, counted in 'decrementer' units and starts at zero when system boots. + @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared by init or IOEventSource::setAction (qqv). */ + virtual IOReturn wakeAtTime(AbsoluteTime abstime); + +/*! @function cancelTimeout + @abstract Disable any outstanding calls to this event source. + @discussion Clear down any oustanding calls. By the time this function completes it is guaranteed that the action will not be called again. */ + virtual void cancelTimeout(); + +private: + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 0); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 1); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 2); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 3); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 4); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 5); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 6); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 7); +}; + +#endif /* !_IOTIMEREVENTSOURCE */ diff --git a/iokit/IOKit/IOTypes.h b/iokit/IOKit/IOTypes.h new file mode 100644 index 000000000..6a5f344f2 --- /dev/null +++ b/iokit/IOKit/IOTypes.h @@ -0,0 +1,229 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef __IOKIT_IOTYPES_H +#define __IOKIT_IOTYPES_H + +#ifndef IOKIT +#define IOKIT 1 +#endif /* !IOKIT */ + +#if KERNEL +#include +#else +#include +#include +#endif + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef NULL +#define NULL 0 +#endif + +/* + * Simple data types. + */ +#ifndef __MACTYPES__ /* CF MacTypes.h */ +#ifndef __TYPES__ /* guess... Mac Types.h */ + +#include + +#ifndef __cplusplus +#if !TYPE_BOOL +#ifdef KERNEL +typedef int bool; +enum { + false = 0, + true = 1 +}; +#endif +#endif +#endif + +#endif /* __TYPES__ */ +#endif /* __MACTYPES__ */ + +typedef UInt32 IOOptionBits; +typedef SInt32 IOFixed; +typedef UInt32 IOVersion; +typedef UInt32 IOItemCount; +typedef UInt32 IOCacheMode; + +typedef UInt32 IOByteCount; + +typedef vm_address_t IOVirtualAddress; +typedef IOVirtualAddress IOLogicalAddress; + +#if 0 + +typedef UInt64 IOPhysicalAddress; +typedef UInt64 IOPhysicalLength; +#define IOPhysical32( hi, lo ) ((UInt64) lo + ((UInt64)(hi) << 32)) +#define IOPhysSize 64 + +#else + +typedef UInt32 IOPhysicalAddress; +typedef UInt32 IOPhysicalLength; +#define IOPhysical32( hi, lo ) (lo) +#define IOPhysSize 32 + +#endif + +#if __cplusplus +struct IOVirtualRange +{ + IOVirtualAddress address; + IOByteCount length; +}; +#else +typedef struct +{ + IOVirtualAddress address; + IOByteCount length; +} IOVirtualRange; +#endif + +/* + * Map between #defined or enum'd constants and text description. + */ +typedef struct { + int value; + const char *name; +} IONamedValue; + + +/* + * Memory alignment -- specified as a power of two. + */ +typedef unsigned int IOAlignment; + +#define IO_NULL_VM_TASK ((vm_task_t)0) + + +/* + * Pull in machine specific stuff. + */ + +//#include + +#ifndef MACH_KERNEL + +#ifndef __IOKIT_PORTS_DEFINED__ +#define __IOKIT_PORTS_DEFINED__ +#ifdef KERNEL +typedef struct OSObject * io_object_t; +#else /* KERNEL */ +typedef mach_port_t io_object_t; +#endif /* KERNEL */ +#endif /* __IOKIT_PORTS_DEFINED__ */ + +#include + +typedef io_object_t io_connect_t; +typedef io_object_t io_iterator_t; +typedef io_object_t io_registry_entry_t; +typedef io_object_t io_service_t; + +typedef io_object_t io_enumerator_t; + +#endif /* MACH_KERNEL */ + +// IOConnectMapMemory memoryTypes +enum { + kIODefaultMemoryType = 0 +}; + +enum { + kIODefaultCache = 0, + kIOInhibitCache = 1, + kIOWriteThruCache = 2, + kIOCopybackCache = 3 +}; + +// IOMemory mapping options +enum { + kIOMapAnywhere = 0x00000001, + + kIOMapCacheMask = 0x00000300, + kIOMapCacheShift = 8, + kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, + kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, + kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, + kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, + + kIOMapUserOptionsMask = 0x00000fff, + + kIOMapReadOnly = 0x00001000, + + kIOMapStatic = 0x01000000, + kIOMapReference = 0x02000000 +}; + +/*! @enum Scale Factors + @discussion Used when a scale_factor parameter is required to define a unit of time. + @constant kNanosecondScale Scale factor for nanosecond based times. + @constant kMicrosecondScale Scale factor for microsecond based times. + @constant kMillisecondScale Scale factor for millisecond based times. + @constant kSecondScale Scale factor for second based times. */ + +enum { + kNanosecondScale = 1, + kMicrosecondScale = 1000, + kMillisecondScale = 1000 * 1000, + kSecondScale = 1000 * 1000 * 1000 +}; + +/* compatibility types */ + +#ifndef KERNEL +/* + * Machine-independent caching specification. + */ +typedef enum { + IO_CacheOff, // cache inhibit + IO_WriteThrough, + IO_CopyBack +} IOCache; + +//typedef char OSString[64]; +typedef unsigned int IODeviceNumber; +typedef unsigned int IOObjectNumber; + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* ! __IOKIT_IOTYPES_H */ diff --git a/iokit/IOKit/IOUserClient.h b/iokit/IOKit/IOUserClient.h new file mode 100644 index 000000000..df0686394 --- /dev/null +++ b/iokit/IOKit/IOUserClient.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Changes to this API are expected. + */ + +#ifndef _IOKIT_IOUSERCLIENT_H +#define _IOKIT_IOUSERCLIENT_H + +#include +#include +#include + + +enum { + kIOUCTypeMask = 0x0000000f, + kIOUCScalarIScalarO = 0, + kIOUCScalarIStructO = 2, + kIOUCStructIStructO = 3, + kIOUCScalarIStructI = 4, +}; + +typedef IOReturn (IOService::*IOMethod)(void * p1, void * p2, void * p3, + void * p4, void * p5, void * p6 ); + +typedef IOReturn (IOService::*IOAsyncMethod)(OSAsyncReference asyncRef, + void * p1, void * p2, void * p3, + void * p4, void * p5, void * p6 ); + +typedef IOReturn (IOService::*IOTrap)(void * p1, void * p2, void * p3, + void * p4, void * p5, void * p6 ); + +struct IOExternalMethod { + IOService * object; + IOMethod func; + IOOptionBits flags; + IOByteCount count0; + IOByteCount count1; +}; + +struct IOExternalAsyncMethod { + IOService * object; + IOAsyncMethod func; + IOOptionBits flags; + IOByteCount count0; + IOByteCount count1; +}; + +struct IOExternalTrap { + IOService * object; + IOTrap func; +}; + +enum { + kIOUserNotifyMaxMessageSize = 64 +}; + +// keys for clientHasPrivilege +#define kIOClientPrivilegeAdministrator "root" +#define kIOClientPrivilegeLocalUser "local" + +class IOUserClient : public IOService +{ + OSDeclareAbstractStructors(IOUserClient) + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * reserved; + +public: + OSSet * mappings; + void * __reserved[8]; + +private: + OSMetaClassDeclareReservedUnused(IOUserClient, 0); + OSMetaClassDeclareReservedUnused(IOUserClient, 1); + OSMetaClassDeclareReservedUnused(IOUserClient, 2); + OSMetaClassDeclareReservedUnused(IOUserClient, 3); + OSMetaClassDeclareReservedUnused(IOUserClient, 4); + OSMetaClassDeclareReservedUnused(IOUserClient, 5); + OSMetaClassDeclareReservedUnused(IOUserClient, 6); + OSMetaClassDeclareReservedUnused(IOUserClient, 7); + OSMetaClassDeclareReservedUnused(IOUserClient, 8); + OSMetaClassDeclareReservedUnused(IOUserClient, 9); + OSMetaClassDeclareReservedUnused(IOUserClient, 10); + OSMetaClassDeclareReservedUnused(IOUserClient, 11); + OSMetaClassDeclareReservedUnused(IOUserClient, 12); + OSMetaClassDeclareReservedUnused(IOUserClient, 13); + OSMetaClassDeclareReservedUnused(IOUserClient, 14); + OSMetaClassDeclareReservedUnused(IOUserClient, 15); + +protected: + static IOReturn sendAsyncResult(OSAsyncReference reference, + IOReturn result, void *args[], UInt32 numArgs); + static void setAsyncReference(OSAsyncReference asyncRef, + mach_port_t wakePort, + void *callback, void *refcon); +public: + + static void initialize( void ); + + static void destroyUserReferences( OSObject * obj ); + + static IOReturn clientHasPrivilege( void * securityToken, + const char * privilegeName ); + + // Currently ignores the all args, just passes up to IOService::init() + virtual bool initWithTask( + task_t owningTask, void * securityToken, UInt32 type, + OSDictionary * properties); + + virtual bool initWithTask( + task_t owningTask, void * securityToken, UInt32 type); + + virtual void free(); + + virtual IOReturn clientClose( void ); + virtual IOReturn clientDied( void ); + + virtual IOService * getService( void ); + + virtual IOReturn registerNotificationPort( + mach_port_t port, UInt32 type, UInt32 refCon ); + + virtual IOReturn getNotificationSemaphore( UInt32 notification_type, + semaphore_t * semaphore ); + + virtual IOReturn connectClient( IOUserClient * client ); + + // memory will be released by user client when last map is destroyed + virtual IOReturn clientMemoryForType( UInt32 type, + IOOptionBits * options, + IOMemoryDescriptor ** memory ); + + virtual IOMemoryMap * mapClientMemory( IOOptionBits type, + task_t task, + IOOptionBits mapFlags = kIOMapAnywhere, + IOVirtualAddress atAddress = 0 ); + /*! + @function exportObjectToClient + Make an arbitrary OSObject available to the client task. + @param task The task + @param obj The object we want to export to the client + @param clientObj returned value is the client's port name. + */ + virtual IOReturn exportObjectToClient(task_t task, + OSObject *obj, io_object_t *clientObj); + + // Old methods for accessing method vector backward compatiblility only + virtual IOExternalMethod * + getExternalMethodForIndex( UInt32 index ); + virtual IOExternalAsyncMethod * + getExternalAsyncMethodForIndex( UInt32 index ); + + // Methods for accessing method vector. + virtual IOExternalMethod * + getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ); + virtual IOExternalAsyncMethod * + getAsyncTargetAndMethodForIndex( IOService ** targetP, UInt32 index ); + + // Methods for accessing trap vector - old and new style + virtual IOExternalTrap * + getExternalTrapForIndex( UInt32 index ); + virtual IOExternalTrap * + getTargetAndTrapForIndex( IOService **targetP, UInt32 index ); +}; + +#endif /* ! _IOKIT_IOUSERCLIENT_H */ + diff --git a/iokit/IOKit/IOWorkLoop.h b/iokit/IOKit/IOWorkLoop.h new file mode 100644 index 000000000..6e8f7ecfc --- /dev/null +++ b/iokit/IOKit/IOWorkLoop.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. + 1998-10-30 Godfrey van der Linden(gvdl) + Converted to C++ +*/ + +#ifndef __IOKIT_IOWORKLOOP_H +#define __IOKIT_IOWORKLOOP_H + +#include +#include +#include +#include + +#include + +class IOEventSource; +class IOCommandGate; + +/*! @class IOWorkLoop : public OSObject + @discussion An IOWorkLoop is a thread of control that is intended to be used to provide single threaded access to hardware. This class has no knowledge of the nature and type of the events that it marshals and forwards. When an device driver sucessfully starts, See $link IOService::start it is expected to create the event sources it will need to receive events from. Then a work loop is initialised and the events are added to the work loop for monitoring. In general this set up will be automated by the family superclass of the specific device. +

+ The thread main method walks the event source linked list and messages each one requesting a work check. At this point each event source is expected to notify their registered owner that the event has occured. After each event has been walked and they indicate that another loop isn't required by the 'more' flag being false the thread will go to sleep on a signaling semaphore. +

+ When an event source is registered with a work loop it is informed of the semaphore to use to wake up the loop.*/ +class IOWorkLoop : public OSObject +{ + OSDeclareDefaultStructors(IOWorkLoop) + +private: +/*! @function launchThreadMain + @abstract Static function that setup thread state and calls the continuation function, $link threadMainContinuation */ + static void launchThreadMain(void *self); + +/*! @function threadMainContinuation + @abstract Static function that calls the $link threadMain function. */ + static void threadMainContinuation(); + +protected: + +/*! @typedef maintCommandEnum + @discussion Enumeration of commands that $link _maintCommand can deal with. + @enum + @constant mAddEvent Used to tag a Remove event source command. + @constant mRemoveEvent Used to tag a Remove event source command. */ + typedef enum { mAddEvent, mRemoveEvent } maintCommandEnum; + +/*! @var gateLock + Mutual exlusion lock that used by close and open Gate functions. */ + IORecursiveLock *gateLock; + +/*! @var eventChain Pointer to first Event Source in linked list. */ + IOEventSource *eventChain; + +/*! @var controlG Internal control gate to maintain event system. */ + IOCommandGate *controlG; + +/*! @var workSpinLock + The spin lock that is used to guard the 'workToDo' variable. */ + IOSimpleLock *workToDoLock; + +/*! @var workThread Work loop thread. */ + IOThread workThread; + +/*! @var workToDo + Used to to indicate that an interrupt has fired and needs to be processed. +*/ + volatile bool workToDo; + +/*! @var loopRestart + If event chain has been changed and the system has to be rechecked from start this flag is set. (Internal use only) */ + bool loopRestart; + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +/*! @function _maintRequest + @abstract Synchrounous implementation of $link addEventSource & $link removeEventSource functions. */ + virtual IOReturn _maintRequest(void *command, void *data, void *, void *); + +/*! @function free + @discussion Mandatory free of the object independent of the current retain count. If the work loop is running this method will not return until the thread has succefully terminated. Each event source in the chain will be released and the working semaphore will be destroyed. +

+ If the client has some outstanding requests on an event they will never be informed of completion. If an external thread is blocked on any of the event sources they will be awoken with a KERN_INTERUPTED status. */ + virtual void free(); + +/*! @function threadMain + @discussion Work loop threads main function. This function consists of 3 loops: the outermost loop is the semaphore clear and wait loop, the middle loop terminates when there is no more work and the inside loop walks the event list calling the $link checkForWork method in each event source. If an event source has more work to do then it can set the more flag and the middle loop will repeat. When no more work is outstanding the outermost will sleep until and event is signaled or the least wakeupTime whichever occurs first. If the event source does not require the semaphore wait to time out it must set the provided wakeupTime parameter to zero. */ + virtual void threadMain(); + +public: + +/*! @function workLoop + @abstract Factory member function to constuct and intialise a work loop. + @result workLoop instance if constructed successfully, 0 otherwise. */ + static IOWorkLoop *workLoop(); + +/*! @function init + @description + Initialises an instance of the workloop. This method creates and initialses the signaling semaphore and forks the thread that will continue executing. + @result true if initialised successfully, false otherwise. */ + virtual bool init(); + +/*! @function getThread + @abstract Get'ter for $link workThread. + @result Returns workThread */ + virtual IOThread getThread() const; + +/*! @function onThread + @abstract Is the current execution context on the work thread? + @result Returns true if IOThreadSelf() == workThread. */ + virtual bool onThread() const; + +/*! @function inGate + @abstract Is the current execution context holding the work-loop's gate? + @result Returns true if IOThreadSelf() is gate holder. */ + virtual bool inGate() const; + +/*! @function addEventSource + @discussion Add an event source to be monitored by the work loop. This function does not return until the work loop has acknowledged the arrival of the new event source. When a new event has been added the threadMain will always restart it's loop and check all outstanding events. The event source is retained by the work loop + @param newEvent Pointer to $link IOEventSource subclass to add. + @result Always returns kIOReturnSuccess. */ + virtual IOReturn addEventSource(IOEventSource *newEvent); + +/*! @function removeEventSource + @discussion Remove an event source from the work loop. This function does not return until the work loop has acknowledged the removal of the event source. When an event has been removed the threadMain will always restart it's loop and check all outstanding events. The event source will be released before return. + @param toRemove Pointer to $link IOEventSource subclass to remove. + @result kIOReturnSuccess if successful, kIOReturnBadArgument if toRemove couldn't be found. */ + virtual IOReturn removeEventSource(IOEventSource *toRemove); + +/*! @function enableAllEventSources + @abstract Call enable() in all event sources + @discussion For all event sources in $link eventChain call enable() function. See $link IOEventSource::enable() */ + virtual void enableAllEventSources() const; + +/*! @function disableAllEventSources + @abstract Call disable() in all event sources + @discussion For all event sources in $link eventChain call disable() function. See $link IOEventSource::disable() */ + virtual void disableAllEventSources() const; + +/*! @function enableAllInterrupts + @abstract Call enable() in all interrupt event sources + @discussion For all event sources, ES, for which IODynamicCast(IOInterruptEventSource, ES) is valid, in $link eventChain call enable() function. See $link IOEventSource::enable() */ + virtual void enableAllInterrupts() const; + +/*! @function disableAllInterrupts + @abstract Call disable() in all interrupt event sources + @discussion For all event sources, ES, for which IODynamicCast(IOInterruptEventSource, ES) is valid, in $link eventChain call disable() function. See $link IOEventSource::disable() */ + virtual void disableAllInterrupts() const; + +protected: + // Internal APIs used by event sources to control the thread + friend class IOEventSource; + virtual void signalWorkAvailable(); + virtual void openGate(); + virtual void closeGate(); + virtual bool tryCloseGate(); + virtual int sleepGate(void *event, UInt32 interuptibleType); + virtual void wakeupGate(void *event, bool oneThread); + + OSMetaClassDeclareReservedUnused(IOWorkLoop, 0); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 1); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 2); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 3); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 4); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 5); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 6); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 7); +}; + +#endif /* !__IOKIT_IOWORKLOOP_H */ diff --git a/iokit/IOKit/Makefile b/iokit/IOKit/Makefile new file mode 100644 index 000000000..f7f7c729b --- /dev/null +++ b/iokit/IOKit/Makefile @@ -0,0 +1,59 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + adb \ + cdb \ + graphics \ + hidsystem \ + network \ + nvram \ + pci \ + platform \ + power \ + pwr_mgt \ + rtc \ + scsi \ + storage \ + system_management + +INSTINC_SUBDIRS_PPC = \ + ppc \ + ndrvsupport + +INSTINC_SUBDIRS_I386 = \ + i386 \ + ps2 + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +NOT_EXPORT_HEADERS = +NOT_LOCAL_HEADERS = + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = IOBSD.h IOKitKeys.h IOKitServer.h IOReturn.h\ + IOSharedLock.h IOTypes.h OSMessageNotification.h\ + IODataQueueShared.h IOMessage.h + +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = . + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/OSMessageNotification.h b/iokit/IOKit/OSMessageNotification.h new file mode 100644 index 000000000..74bcb1f64 --- /dev/null +++ b/iokit/IOKit/OSMessageNotification.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef __OS_OSMESSAGENOTIFICATION_H +#define __OS_OSMESSAGENOTIFICATION_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +enum { + kFirstIOKitNotificationType = 100, + kIOServicePublishNotificationType = 100, + kIOServiceMatchedNotificationType = 101, + kIOServiceTerminatedNotificationType = 102, + kIOAsyncCompletionNotificationType = 150, + kIOServiceMessageNotificationType = 160, + kLastIOKitNotificationType = 199 +}; + +enum { + kOSNotificationMessageID = 53, + kOSAsyncCompleteMessageID = 57, + kMaxAsyncArgs = 16 +}; + +enum { + kIOAsyncReservedIndex = 0, + kIOAsyncReservedCount, + + kIOAsyncCalloutFuncIndex = kIOAsyncReservedCount, + kIOAsyncCalloutRefconIndex, + kIOAsyncCalloutCount, + + kIOMatchingCalloutFuncIndex = kIOAsyncReservedCount, + kIOMatchingCalloutRefconIndex, + kIOMatchingCalloutCount, + + kIOInterestCalloutFuncIndex = kIOAsyncReservedCount, + kIOInterestCalloutRefconIndex, + kIOInterestCalloutServiceIndex, + kIOInterestCalloutCount +}; + +enum { + kOSAsyncRefCount = 8, + kOSAsyncRefSize = 32 +}; +typedef natural_t OSAsyncReference[kOSAsyncRefCount]; + +struct OSNotificationHeader { + vm_size_t size; /* content size */ + natural_t type; + OSAsyncReference reference; + unsigned char content[0]; +}; + +struct IOServiceInterestContent { + natural_t messageType; + void * messageArgument[1]; +}; + +struct IOAsyncCompletionContent { + IOReturn result; + void * args[0]; +}; + +#ifndef __cplusplus +typedef struct OSNotificationHeader OSNotificationHeader; +typedef struct IOServiceInterestContent IOServiceInterestContent; +typedef struct IOAsyncCompletionContent IOAsyncCompletionContent; +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __OS_OSMESSAGENOTIFICATION_H */ + diff --git a/iokit/IOKit/adb/IOADBBus.h b/iokit/IOKit/adb/IOADBBus.h new file mode 100644 index 000000000..e7cf3317d --- /dev/null +++ b/iokit/IOKit/adb/IOADBBus.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ +/* + * 18 June 1998 sdouglas Start IOKit version. + * 23 Nov 1998 suurballe Port to C++ + */ + +#ifndef _IOKIT_IOADBBUS_H +#define _IOKIT_IOADBBUS_H + +#include +#include + +class IOADBDevice; + +#define ADB_DEVICE_COUNT 16 + +#define ADB_FLAGS_PRESENT 0x00000001 /* Device is present */ +#define ADB_FLAGS_REGISTERED 0x00000002 /* Device has a handler */ +#define ADB_FLAGS_UNRESOLVED 0x00000004 /* Device has not been fully probed */ + +/* + * ADB Commands + */ + +#define ADB_DEVCMD_SELF_TEST 0xff +#define ADB_DEVCMD_CHANGE_ID 0xfe +#define ADB_DEVCMD_CHANGE_ID_AND_ACT 0xfd +#define ADB_DEVCMD_CHANGE_ID_AND_ENABLE 0x00 + +/* + * ADB IORegistryEntry properties + */ + +#define ADBaddressProperty "address" +#define ADBhandlerIDProperty "handler id" +#define ADBdefAddressProperty "default address" +#define ADBdefHandlerProperty "default handler id" +#define ADBnameProperty "name" + + +struct ADBDeviceControl { + IOADBAddress address; + IOADBAddress defaultAddress; + UInt8 handlerID; + UInt8 defaultHandlerID; + UInt32 flags; + IOService * owner; + ADB_callback_func handler; + IOADBDevice * nub; +}; + +typedef struct ADBDeviceControl ADBDeviceControl; + + +class IOADBBus: public IOService +{ +OSDeclareAbstractStructors(IOADBBus) + +public: + +ADBDeviceControl * adbDevices[ ADB_DEVICE_COUNT ]; + +virtual bool init ( OSDictionary * properties = 0 ); +virtual bool matchNubWithPropertyTable( IOService * device, OSDictionary * propertyTable ) = 0; +virtual IOReturn setOwner ( void * device, IOService * client, ADB_callback_func handler ) = 0; +virtual IOReturn clearOwner ( void * device ) = 0; +virtual IOReturn flush ( ADBDeviceControl * busRef ) = 0; +virtual IOReturn readRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) = 0; +virtual IOReturn writeRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ) = 0; +virtual IOADBAddress address ( ADBDeviceControl * busRef ) = 0; +virtual IOADBAddress defaultAddress ( ADBDeviceControl * busRef ) = 0; +virtual UInt8 handlerID ( ADBDeviceControl * busRef ) = 0; +virtual UInt8 defaultHandlerID ( ADBDeviceControl * busRef ) = 0; +virtual IOReturn setHandlerID ( ADBDeviceControl * busRef, UInt8 handlerID ) = 0; + +}; + +#endif /* ! _IOKIT_IOADBBUS_H */ + diff --git a/iokit/IOKit/adb/IOADBController.h b/iokit/IOKit/adb/IOADBController.h new file mode 100644 index 000000000..44eca38ab --- /dev/null +++ b/iokit/IOKit/adb/IOADBController.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 18 June 1998 sdouglas Start IOKit version. + * 12 Nov 1998 suurballe Port objc protocol to c++ abstract class. + */ +#ifndef _IOKIT_ADBCONTROLLER_H +#define _IOKIT_ADBCONTROLLER_H + +#include +#include +#include + +extern "C" { +#include +} + +// referenced in subclasses: +void autopollHandler ( IOService *, UInt8, IOByteCount, UInt8 * ); + +class IOADBDevice; + +/* + * Results + */ + +#define ADB_RET_OK 0 /* Successful */ +#define ADB_RET_INUSE 1 /* ADB Device in use */ +#define ADB_RET_NOTPRESENT 2 /* ADB Device not present */ +#define ADB_RET_TIMEOUT 3 /* ADB Timeout */ +#define ADB_RET_UNEXPECTED_RESULT 4 /* Unknown result */ +#define ADB_RET_REQUEST_ERROR 5 /* Packet Request Error */ +#define ADB_RET_BUS_ERROR 6 /* ADB Bus Error */ + +class IOPMrootDomain; + +class IOADBController: public IOADBBus +{ +OSDeclareAbstractStructors(IOADBController) + +public: + + bool start ( IOService * nub ); + IOReturn setOwner ( void * device, IOService * client, ADB_callback_func handler ); + virtual IOReturn claimDevice ( unsigned long, IOService *, ADB_callback_func ); + virtual IOReturn releaseDevice ( unsigned long ); + virtual IOReturn readDeviceForUser(unsigned long, unsigned long, UInt8 *, IOByteCount *); + virtual IOReturn writeDeviceForUser(unsigned long, unsigned long, UInt8 *, IOByteCount *); + virtual IOReturn setAutoPollPeriod (int microseconds) = 0; + virtual IOReturn getAutoPollPeriod (int * microseconds) = 0; + virtual IOReturn setAutoPollList(UInt16 activeAddressMask) = 0; + virtual IOReturn getAutoPollList(UInt16 * activeAddressMask) = 0; + virtual IOReturn setAutoPollEnable(bool enable) = 0; + virtual IOReturn resetBus(void) = 0; + virtual IOReturn flushDevice(IOADBAddress address) = 0; + virtual IOReturn readFromDevice(IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length) = 0; + virtual IOReturn writeToDevice(IOADBAddress address, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length) = 0; + void packet ( UInt8 * data, IOByteCount length, UInt8 adbCommand ); + + IOReturn flush ( ADBDeviceControl * busRef ); + IOReturn readRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ); + IOReturn writeRegister ( ADBDeviceControl * busRef, IOADBRegister adbRegister, + UInt8 * data, IOByteCount * length ); + IOADBAddress address ( ADBDeviceControl * busRef ); + IOADBAddress defaultAddress ( ADBDeviceControl * busRef ); + UInt8 handlerID ( ADBDeviceControl * busRef ); + UInt8 defaultHandlerID ( ADBDeviceControl * busRef ); + IOReturn setHandlerID ( ADBDeviceControl * busRef, UInt8 handlerID ); + bool matchNubWithPropertyTable( IOService * device, OSDictionary * propertyTable ); + IOReturn newUserClient( task_t, void *, UInt32, IOUserClient ** ); + IOReturn powerStateWillChangeTo ( IOPMPowerFlags, unsigned long, IOService*); + IOReturn powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService*); + IOReturn probeBus ( void ); + +IOReturn clearOwner ( void * ); + +IOPMrootDomain * rootDomain; + +private: + + bool claimed_devices[16]; // true if a device has been claimed by user + + bool probeAddress ( IOADBAddress addr ); + bool moveDeviceFrom ( IOADBAddress from, IOADBAddress to, bool check ); + unsigned int firstBit ( unsigned int mask ); + int getURLComponentUnit ( IOService * device, char * path, int maxLen ); + bool busProbed; + thread_call_t probeThread; +}; + +#endif /* ! _IOKIT_ADBCONTROLLER_H */ diff --git a/iokit/IOKit/adb/IOADBDevice.h b/iokit/IOKit/adb/IOADBDevice.h new file mode 100644 index 000000000..882963ad0 --- /dev/null +++ b/iokit/IOKit/adb/IOADBDevice.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +class IOADBBus; + + +class IOADBDevice : public IOService +{ +OSDeclareDefaultStructors(IOADBDevice) + +private: + +IOADBBus * bus; +ADBDeviceControl * fBusRef; + +public: + +bool init ( OSDictionary * regEntry, ADBDeviceControl * us ); +bool attach ( IOADBBus * controller ); +virtual bool matchPropertyTable( OSDictionary * table ); +bool seizeForClient ( IOService * client, ADB_callback_func handler ); +void releaseFromClient ( IORegistryEntry * client ); +IOReturn flush ( void ); +IOReturn readRegister ( IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); +IOReturn writeRegister ( IOADBRegister adbRegister, UInt8 * data, IOByteCount * length ); +IOADBAddress address ( void ); +IOADBAddress defaultAddress ( void ); +UInt8 handlerID ( void ); +UInt8 defaultHandlerID ( void ); +IOReturn setHandlerID ( UInt8 handlerID ); +void * busRef ( void ); + +}; diff --git a/iokit/IOKit/adb/IOADBLib.h b/iokit/IOKit/adb/IOADBLib.h new file mode 100644 index 000000000..b5b519f64 --- /dev/null +++ b/iokit/IOKit/adb/IOADBLib.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#define kNumADBMethods 4 + +enum { + kADBReadDevice = 0, + kADBWriteDevice, + kADBClaimDevice, + kADBReleaseDevice +}; + +#ifndef KERNEL + +#include + +io_connect_t IOPMFindADBController( mach_port_t ); +IOReturn IOPMClaimADBDevice ( io_connect_t, unsigned long ); +IOReturn IOPMReleaseADBDevice ( io_connect_t, unsigned long ); +IOReturn IOPMReadADBDevice ( io_connect_t, unsigned long, unsigned long, unsigned char *, unsigned long * ); +IOReturn IOPMWriteADBDevice ( io_connect_t, unsigned long, unsigned long, unsigned char *, unsigned long ); + +#endif diff --git a/iokit/IOKit/adb/Makefile b/iokit/IOKit/adb/Makefile new file mode 100644 index 000000000..30071de72 --- /dev/null +++ b/iokit/IOKit/adb/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = adb +NOT_EXPORT_HEADERS = adb.h + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = IOADBLib.h +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/adb/adb.h b/iokit/IOKit/adb/adb.h new file mode 100644 index 000000000..e296be557 --- /dev/null +++ b/iokit/IOKit/adb/adb.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +typedef UInt8 IOADBAddress; +typedef UInt8 IOADBRegister; +typedef void (*ADB_callback_func) + (IOService * client, UInt8 adbCommand, IOByteCount length, UInt8 * data); diff --git a/iokit/IOKit/assert.h b/iokit/IOKit/assert.h new file mode 100644 index 000000000..575395cc1 --- /dev/null +++ b/iokit/IOKit/assert.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IO_ASSERT_H_ +#define _IO_ASSERT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef KERNEL +#if IOASSERT +#undef MACH_ASSERT +#define MACH_ASSERT 1 +#endif +#endif +#include + +#ifdef __cplusplus +} +#endif + +#endif /* _IO_ASSERT_H_ */ + diff --git a/iokit/IOKit/ata/IOATACommand_Reference.h b/iokit/IOKit/ata/IOATACommand_Reference.h new file mode 100644 index 000000000..f6b3af99c --- /dev/null +++ b/iokit/IOKit/ata/IOATACommand_Reference.h @@ -0,0 +1,500 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! +@header IOATACommand_Reference.h + +This header defines the IOATACommand class. + +This class encapsulates an ATA/ATAPI Command. The client driver allocates a +command using IOATADevice::allocCommand() and initializes it using +functions of this class. The client can then submit the command to +the ATA/ATAPI stack by invoking the execute() function. +*/ + +/*! +@typedef ATATaskfile +@discussion +The ATATaskfile structure provides information to be read/written into an IOATACommand. This +information includes the ATA taskfile register settings and the protocol the transport driver +is to use when the corresponding IOATACommand is executed. +@field protocol +Indicates the type of protocol the ATA Controller driver is to use when executing this command. +See enum ATAProtocol in IOATADevice_Reference for allowable values for this field. +@field tagType +Indicates whether an ATA command requires a tag. This field is only used when the protocol +selected for the command supports the Overlap feature set. +@field tag +This field is set by the IOATAController class to tell the host +adapter driver the tag value to assign to this IOATACommand. +@field resultmask +This field is set by the IOATADevice client and is a bit mask indicating the registers to be +returned when the IOATACommand completes. Clients should use ATARegToMask() convert ATA register +index values to bit mask values. +@field regmask +This field is set by the IOATADevice client and is a bit mask indicating the registers to be written +when an IOATACommand is executed. Clients should use ATARegToMask() convert ATA register +index values to bit mask values. +@field ataRegs +This array contains the ATA taskfile register values to be written when an IOATACommand is executed. +The index values for ATA taskfile registers is specified by enum ATARegs. +*/ +typedef struct ATATaskfile { + + ATAProtocol protocol; + + UInt32 flags; + + UInt8 tagType; + UInt32 tag; + + UInt32 resultmask; + + UInt32 regmask; + UInt32 ataRegs[kMaxATARegs]; + +} ATATaskfile; + +/*! +@typedef ATACDBInfo +@discussion +The ATACDBInfo structure provides cdb information to be read/written into an IOATACommand. +@field cdbFlags +Indicates flags to applicable to the CDB to be executed. There are currently no flags defined for +IOATADevice clients. +@field cdbLength +Set by the IOATADevice client to the length of the Command Descriptor +Block (CDB). +@field cdb +Set by the IOATADevice client to command descriptor block the client +wishes the ATAPI device to execute. +*/ +typedef struct ATACDBInfo { + + UInt32 cdbFlags; + + UInt32 cdbLength; + UInt8 cdb[16]; + + UInt32 reserved[16]; + +} ATACDBInfo; + +/*! +@typedef ATAResults +@discussion +The ATAResults structure provides completion information for an IOATACommand. +@field returnCode +The overall return code for the command. See iokit/iokit/IOReturn.h. +This value is also returned as the getResults() return value. +@field bytesTransferred +The total number of bytes transferred to/from the ATA device. +Note: Some ATA controllers are not capable of returning accurate byte counts when +operating in bus-master mode. Clients should use some caution in interpreting the +contents of this field. +@field adapterStatus +This field contains the low-level ATA Controller status for a completed IOATACommand. +Values for this field are defined by enum ATAReturnCode. +@field requestSenseDone +A boolean indicating whether sense data was obtained from the ATAPI +device. +@field requestSenseLength +The number of sense data bytes returned from the ATAPI device. +@field ataRegs +This array contains the ATA taskfile register values to be returned when an IOATACommand completes. +The index values for ATA taskfile registers is specified by enum ATARegs. Registers to be returned +are indicated by the bit mask resultmask. See structure ATATaskfile. +*/ +typedef struct ATAResults { + + IOReturn returnCode; + + UInt32 bytesTransferred; + + ATAReturnCode adapterStatus; + + bool requestSenseDone; + UInt32 requestSenseLength; + + UInt32 ataRegs[kMaxATARegs]; + + UInt32 reserved[16]; + +} ATAResults; + + + +class IOATACommand : public IOCDBCommand +{ +public: + + +/*! +@function setPointers +@abstract +Sets the data buffer component of an ATA/ATAPI Command. +@discussion +The client provides an IOMemoryDescriptor object to corresponding +to the client's data or request sense buffer, the maximum data transfer count +and data transfer direction. +@param desc +Pointer to a IOMemoryDescriptor describing the client's I/O buffer. +@param transferCount +Maximum data transfer count in bytes. +@param isWrite +Data transfer direction. (Defined with respect to the device, i.e. isWrite = true +indicates the host is writing to the device. +@param isSense +If isSense is set to false, the IOATACommand's data buffer information is set. If isSense is +set to true, the IOATACommand's request sense buffer information is set. +*/ + void setPointers( IOMemoryDescriptor *desc, + UInt32 transferCount, + bool isWrite, + bool isSense = false ); +/*! +@function getPointers +@abstract +Gets the data buffer component of an ATA/ATAPI Command. +@discussion +The client provides a set of pointers to fields to receive the IOATACommand's +data/request sense buffer pointers. +@param desc +Pointer to a field (IOMemoryDescriptor *) to receive the IOATACommand's IOMemoryDescriptor pointer. +@param transferCount +Pointer to a field (UInt32) to receive the IOATACommand's maximum transfer count. +@param isWrite +Pointer to a field (bool) to receive the IOATACommand's transfer direction. +@param isSense +If isSense is set to true, the IOATACommand's data buffer information is returned. Otherwise, +the IOATACommand's request sense buffer information is returned. +*/ + void getPointers( IOMemoryDescriptor **desc, + UInt32 *transferCount, + bool *isWrite, + bool isSense = false ); +/*! +@function setTimeout +@abstract +Sets the timeout for the command in milliseconds. +@discussion +The IOATAController class will abort a command which does not +complete with in the time interval specified. The client should +set the timeout parameter to zero if they want to suppress +timing. +@param timeout +Command timeout in milliseconds. +*/ + void setTimeout( UInt32 timeoutmS ); + +/*! +@function getTimeout +@abstract +Gets the timeout for the command in milliseconds. +@discussion +This function returns the command timeout previously set by setTimeout(). +@param timeout +Command timeout in milliseconds. +*/ + UInt32 getTimeout(); + +/*! +@function setCallback +@abstract +Sets the callback routine to be invoked when the ATA/ATAPI Command completes. +@param target +Pointer to the object to be passed to the callback routine. This would usually +be the client's (this) pointer. +@param callback +Pointer to the client's function to process the completed command +@param refcon +Pointer to the information required by the client's callback routine to process +the completed command. +*/ + void setCallback( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ); + +/*! +@function execute +@abstract +Submits a ATA/ATAPI command to be executed. +@discussion +Once the execute() function is called, the client should not +invoke any further functions on the ATA/ATAPI Command with the +exception of abort(). + +The execute() function optionally returns sets a unique sequence +number token for the command. If the client intends to use the abort() +method they must retain this sequence number token. +@param sequenceNumber +Pointer to field (UInt32) to receive the sequence number assigned to the ATA/ATAPI +Command. +*/ + bool execute( UInt32 *sequenceNumber = 0 ); + +/*! +@function abort +@abstract +Aborts an executing ATA/ATAPI Command. +@discussion +The client may invoke the abort() method to force the completion of an +executing ATA/ATAPI Command. The client must pass the sequence number +provided when the execute() function was invoked. + +Note: The abort function provides no status on whether or not a +command has been successfully aborted. The client should wait for the +command to actually complete to determine whether the abort completed +successfully. +@param sequenceNumber +The client must pass the sequence number assigned to the command when +the client called the execute() function. +*/ + void abort( UInt32 sequenceNumber ); + +/*! +@function complete +@abstract +Indicates the IOATAController subclass (host adapter driver) has completed an ATA/ATAPI command. +@discussion +Once the complete() function is called, the controller +subclass should make no further accesses to the IOATACommand +being completed. + +A IOATADevice client would not normally call this function. +*/ + void complete(); + +/*! +@function getClientData +@abstract +Returns a pointer to the ATA/ATAPI Command's client data area. +@discussion +The client may allocate storage in the ATA/ATAPI Command for its own use. +See IOATADevice::allocateCmd(). +*/ + void *getClientData(); + +/* +@function getCommandData +@abstract +Returns a pointer to the ATA Command's controller data area +@discussion +This area is allocated for use by the IOATAController subclass (host adapter +driver). The client should not normally access this area. +*/ + + void *getCommandData(); + +/*! +@function getSequenceNumber +@abstract +Returns the sequence number assigned to an executing command. +@discussion +The caller should check the sequence number for 0. This indicates that +the command has completed or has not been processed to the point where +a sequence number has been assigned. +*/ + UInt32 getSequenceNumber(); + +/*! +@function setTaskfile +@abstract +Sets the ATA taskfile register information and ATA/ATAPI protocol for the IOATACommand. +@discussion +See struct ATATaskfile additional information. +@param ATATaskfile +Pointer to an ATATaskfile structure. +*/ + void setTaskfile( ATATaskfile *taskfile ); + +/*! +@function getTaskfile +@abstract +Sets the ATA taskfile register information and ATA/ATAPI protocol for the IOATACommand. +@param ATATaskfile +Pointer to an ATATaskfile structure. +*/ + void getTaskfile( ATATaskfile *taskfile ); + +/*! +@function getProtocol +@abstract +Returns the protocol specified for the ATA/ATAPI Command. +@discussion +The protocol returned is specified by the client in the ATATaskfile structure. See setTaskfile(). +This function is normally used by subclasses of IOATAController to obtain information about the +ATA command being executed. +*/ + ATAProtocol getProtocol(); + +/*! +@function getResultMask +@abstract +Returns the resultMask specified for the ATA/ATAPI Command. +@discussion +The resultMask is specified by the client in the ATATaskfile structure and indicates the ATA taskfile registers +to be returned when the ATA/ATAPI command completes. See setTaskfile(). This function is normally used by +subclasses of IOATAController to obtain information about the ATA command being executed. +*/ + UInt32 getResultMask(); + +/*! +@function getFlags +@abstract +Returns the flags specified for the ATA/ATAPI Command. +@discussion +The flags are specified by the client in the ATATaskfile structure. See setTaskfile(). This function is +normally used by subclasses of IOATAController to obtain information about the ATA command being executed. +*/ + UInt32 getFlags(); + +/*! +@function setCDB +@abstract +Sets the CDB component of a ATA/ATAPI Command. +@param ataCDB +Pointer to a ATACDBInfo structure. +*/ + void setCDB( ATACDBInfo *ataCmd ); + +/*! +@function getCDB +@abstract +Gets the CDB component of a ATA/ATAPI Command. +@param ataCDB +Pointer to an ATACDBInfo structure to receive the ATA/ATAPI Command's cdb information. +*/ + + void getCDB( ATACDBInfo *ataCmd ); + +/*! +@function getResults +@abstract +Gets results from a completed ATA/ATAPI Command. +@discussion +The getResults() function returns the value of the returnCode field of the command results. If +the client is only interested in a pass/fail indication for the command, the client +can pass (ATAResults *)0 as a parameter. +@param results +Pointer to a ATAResults structure to receive the ATA/ATAPI Command's completion information. +*/ + IOReturn getResults( ATAResults *results ); + +/*! +@function setResults +@abstract +Sets the results component of a ATA/ATAPI Command. +@discussion +The setResults() function is used by the IOATAController subclass (host +adapter driver) return results for a ATA/ATAPI Command about to be completed. +@param ataResults Pointer to a ATAResults structure containing +completion information for the ATA/ATAPI Command. + +Completion information is copied into the command, so the caller may +release the ATAResults structure provided when this function returns. +*/ + void setResults( ATAResults *results ); + +/*! +@function getDevice +@abstract +Returns the IOATADevice this command is targeted to. +@param deviceType +The caller should use value kIOATADeviceType. +@discussion +In some cases a IOATACommand is not associated with a specific ATA Unit. This +would be the case for a ATA Bus Reset. In this case getDevice() returns 0. +*/ + IOATADevice *getDevice( IOATAStandardDevice *deviceType ); + +/*! +@function getUnit +@abstract +Returns the unit number for the IOATADevice this command is associated with. +*/ + ATAUnit getUnit(); + +/*! +@function setQueueInfo +@abstract +Sets queuing information for the ATA/ATAPI Command. +@discussion +Each IOATADevice has two queues, a normal Q and a bypass Q. The treatment of the +queues is essentially identical except that the bypass Q is given preference whenever +it has commands available. + +Usually, the client will use the normal Q for regular I/O commands and the bypass Q +to send error recovery commands to the device. +@param queueType +Set to kATAQTypeNormalQ or kATAQTypeBypassQ to indicate which IOATADevice queue the +ATA/ATAPI Command should be routed to. +@param queuePosition +Set to kATAQPositionTail or kATAQPositionHead to indicate whether the ATA/ATAPI Command should +be added to the head to tail for the selected IOATADevice queue. +*/ + void setQueueInfo( UInt32 forQueueType = kATAQTypeNormalQ, UInt32 forQueuePosition = kATAQPositionTail ); + +/*! +@function getQueueInfo +@abstract +Gets queuing information for the ATA Command. +@param queueType +Pointer to a field (UInt32) to receive the queue type previously set for this ATA Command. +@param queuePosition +Pointer to a field (UInt32) to receive the queue position previously set for this ATA Command. +*/ + void getQueueInfo( UInt32 *forQueueType, UInt32 *forQueuePosition = 0 ); + +/*! +@function getCmdType +@abstract +Obtains the underlying 'intent' of an ATA/ATAPI Command. +@discussion +This function provides information on the intent of a ATA/ATAPI +Command. For example, since Aborts, Request Sense and normal Execute commands are +all sent to the executeCommand() function, invoking getCmdType() +will indicate whether a Request Sense, Abort or Normal I/O request is +being processed. + +This information is not normally meaningful to IOATADevice clients. +*/ + UInt32 getCmdType(); + +/*! +@function getOriginalCmd +@abstract +Obtains a 'related' ATA/ATAPI Command. +@discussion +In cases where a ATA command is related to a previous command, this +function will return the original command. For example, if a +Request Sense command (CmdType = kATACommandReqSense)is processed, +then this function can be used to obtain the original command that +caused the check condition. If an Abort command (CmdType = +kATACommandAbort) then this function can be used to obtain the original +command the abort was issued against. + +It this information is not normally meaningful to IOATADevice clients. +*/ + + IOATAStandardCommand *getOriginalCmd(); + +}; + diff --git a/iokit/IOKit/ata/IOATAController_Reference.h b/iokit/IOKit/ata/IOATAController_Reference.h new file mode 100644 index 000000000..0bf16366c --- /dev/null +++ b/iokit/IOKit/ata/IOATAController_Reference.h @@ -0,0 +1,410 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/*! +@header IOATAController_Reference.h + +This header defines the IOATAController class. + +IOATAController provides the superclass for the ATA Family. In most +cases, actual controller drivers should be implemented to IOATAStandardDriver +which converts the relatively high-level commands produced by this class +to low-level ATA register commands. + +This class may be useful in cases where the actual ATA device is connected +by some intermediate bus and it would be more efficient for family for that +bus to deal with high-level commands rather than low-level ATA register I/O. +*/ + +class IOATAStandardController : public IOService +{ + +public: + +/*! +@function reset +@abstract +Perform an ATA bus reset. +@discussion +This function requests the IOATAController class to perform an ATA Bus reset. + +The IOATAController class will convert this request into a reset command and +will call the resetCommand() function. + +The reset() function is synchronous, i.e. it will wait for the reset to complete. +*/ + IOReturn reset(); + +protected: + +/*! +@function enableCommands +@abstract +Resume sending I/O commands to your driver. +@discussion +Resumes sending I/O commands to your driver that were previously suspended +by calling disableCommands(). +*/ + void enableCommands(); + +/*! +@function disableCommands +@abstract +Suspend sending I/O commands to your driver. +@discussion +In cases where your executeCommand() member function cannot accept +commands, you may disable further calls by invoking disableCommands(). +Use enableCommands() to resume receiving commands. + +Note: The resetCommand() and cancelCommands() entry points are not +affected by the use of this function. + +Note: The default timeout for disableCommands() is 5s. If this timeout +is exceeded the IOATAController class will call your driver's +disableTimeoutOccurred() function. The default action of this function +is to issue a ATA Bus Reset by calling your driver's resetCommand() +function. +@param timeoutmS +Your driver may override the default timeout +by specifying a timeout value in milliseconds. +*/ + void disableCommands( UInt32 disableTimeoutmS ); + +/*! +@function rescheduleCommand +@abstract +Return a IOATACommand for rescheduling. +@discussion +If your subclass function cannot start processing an otherwise +acceptable IOATACommand, you may have the IOATACommand rescheduled by +calling rescheduleCommand(). A IOATACommand passed to this function +should be treated as 'complete', i.e. you should make no further +accesses to it. + +Note: If you cannot process further commands, you should call the +disableCommands() function to prevent receiving additional commands +until you are ready to accept them. +@param ataCommand +Pointer to IOATACommand your driver needs to reschedule. +*/ + void rescheduleCommand( IOATAStandardCommand *forATACmd ); + + void resetStarted(); + void resetOccurred(); + + +/*! +@function findCommandWithNexus +@abstract +Locate an active IOATACommand using device/tag values. +@discussion +Your subclass can use this function to search for an active +IOATACommand by providing the device/tag values for the command. In +the case of a non-tagged command the second parameter must either be +omitted or set to -1. + +An unsuccessful search will return 0. +@param forDevice +Pointer to an IOATADevice. +wish to search for. +@param tagValue +Optional tag value you wish to search for. +*/ + IOATAStandardCommand *findCommandWithNexus( IOATAStandardDevice *forDevice, UInt32 tagValue = (UInt32)-1 ); + +/*! +@function getDeviceData +@abstract +Obtains a pointer to per-device data allocated by IOATAController. +@discussion +This function returns a pointer to per-device workarea allocated for +your driver's use. The size of this area must be specified in the +during the configure() function. See struct ATAControllerInfo, +field devicePrivateDataSize. +@param forUnit +The unit number of the ata device. +*/ + void *getDeviceData( ATAUnit forUnit ); + +/*! +@function getWorkLoop +@abstract +Returns/provides the IOWorkLoop object that services your driver. +@discussion +If your driver wishes to create its own workloop, you should implement this +function and return the IOWorkLoop for your subclass. Otherwise, if you +return 0, the IOATAController class will create a workloop for your driver. +*/ + virtual IOWorkLoop *getWorkLoop() const; + +/*! +@function getCommandCount +@abstract +Returns the current active command count for your driver. +@discussion +This indicates the number of executeCommands that have been sent to your +driver and have not been completed. +*/ + UInt32 getCommandCount(); + +/*! +@function setCommandLimit +@abstract +Modifies command limit indicated for the IOATADevice device. +@discussion +If the device currently has more than commands outstanding than the new command limit, +additional commands will not be sent to the device. If the command limit is increased, +the additional commands will be sent until the command limit is met. +@param device +Pointer to an IOATADevice. +@param commandLimit +New limit on outstanding executeCommands. +*/ + void setCommandLimit( IOATAStandardDevice *device, UInt32 commandLimit ); + +/*! +@function suspendDevice +@abstract +Stops sending executeCommands to the indicated device. +@discussion +This function causes the IOATAController class to stop sending executeCommands to the +indicated device. +@param forATADevice +Pointer to an IOATADevice for which executeCommand delivery is to be suspended. +*/ + void suspendDevice( IOATAStandardDevice *forATADevice ); + +/*! +@function resumeDevice +@abstract +Resumes sending executeCommands to the indicated device. +@discussion +This function causes the IOATAController class to resume sending executeCommands to an +IOATADevice that was previously suspended. If the IOATADevice was not previously +suspended, then this call has no effect. +@param forATADevice +Pointer to an IOATADevice for which executeCommand delivery is to be resumed. +*/ + void resumeDevice( IOATAStandardDevice *forATADevice ); + +/*! +@function selectDevice +@abstract +Returns a pointer to the IOATADevice device that was suspended the for the +longest amount of time. +@discussion +This function returns a 'hint' as which device to resume to implement fairness +in servicing IOATADevice contending for access to the ATA bus. +*/ + IOATAStandardDevice *selectDevice(); + +protected: + +/*! +@function configure +@abstract +Driver configuration/initialization request. +@discussion +The configure() member function is the first call your subclass will +receive. You should provide the information requested in the +ATAControllerInfo structure and enable your hardware for operation. +If your driver initialized successfully, you should return true, otherwise, +your driver should return false. +@param provider +Pointer to an object (usually IOPCIDevice) which represents the bus of +your device is attached to . Typically your driver will use functions +supplied by this object to access PCI space on your hardware. See +IOPCIDevice for a description of PCI services. +@param controllerInfo +Pointer to a ATAControllerInfo structure. Your driver should provide +the information requested in this structure prior to returning from +the configure() call. +*/ + virtual bool configure( IOService *provider, ATAControllerInfo *controllerInfo ) = 0; + +/*! +@function getProtocolSupported +@abstract +Returns a bit mask of transport protocols this IOATADevice supports. +@discussion +The subclass of IOATAController must return a bit-mask of transport protocols supported. +@param protocolsSupported +Pointer to a (UInt32) to receive a bit mask of transport protocols supported. See enum +ATAProtocol of a list of transport protocols. +*/ + virtual bool getProtocolsSupported( ATAProtocol *protocolsSupported ) = 0; + +/*! +@function executeCommand +@abstract +Execute an IOATACommand. +@discussion +The executeCommand() function is called for all 'routine' I/O requests. +The driver is passed a pointer to an +IOATACommand object. The driver obtains information about the I/O +request by using function calls provided by the IOATACommand +class. +@param ataCommand +Pointer to an IOATACommand. See IOATACommand_Reference for more information. +*/ + virtual void executeCommand( IOATAStandardCommand *forATACmd ) = 0; + +/*! +@function cancelCommand +@abstract +Cancels a IOATACommand previously submitted. +@discussion +The cancelCommand() function is called to inform your subclass to force +completion of an ATA command. + +Your subclass should call the getOriginalCmd() to determine the command +to complete. + +After calling complete() on the original command, you should complete +the IOATACommand passed to the cancelCommand() function + +Note: When a cancelCommand is issued, your subclass may presume that any +activity to remove an active command has already occurred. +@param ataCommand +Pointer to a IOATACommand. See IOATACommand for more information. +*/ + virtual void cancelCommand( IOATAStandardCommand *forATACmd ) = 0; + +/*! +@function resetCommand +@abstract +Request the IOATAController subclass issue an ATA Bus reset. +@discussion +The resetCommand() function indicates you should do an ATA Bus Reset. +After issuing the reset you should complete to IOATACommand passed. + +Note: After you report the IOATACommand Reset complete, you will +receive cancelCommand() requests for all outstanding commands. +@param ataCommand +Pointer to a IOATACommand. See IOATACommand for more information. +*/ + virtual void resetCommand( IOATAStandardCommand *forATACmd ) = 0; + +/*! +@function abortCommand +@abstract +Requests the IOATAController subclass abort a currently executing command. + +Note: In most cases ATA does not provide semantics to cleanly abort an executing +command. In these cases, the subclass may reset the ATA bus to implement this +function. +@param forATACmd +Pointer to an active IOATACommand to be aborted. +*/ + virtual void abortCommand( IOATAStandardCommand *forATACmd ) = 0; + +/*! +@function calculateTiming +Convert ATA timing parameters to controller register settings. +@discussion +The IOATAController subclass is presented with proposed timings. If the subclass +can support the provided timing parameters, it should calculate the corresponding +controller register settings and make them available for future lookup indexed +by the timingProtocol field of the ATATiming structure. If the controller driver +cannot support the indicated timing it should return false as the calculateTiming() +result. +@param deviceNum +The unit number (0/1) of the IOATADevice the timing is to apply to. +@param timing +A pointer to a ATATiming structure containing the parameters for the selected +timing. +*/ + virtual bool calculateTiming( UInt32 deviceNum, ATATiming *timing ) = 0; + + +/*! +@function allocateDevice +@abstract +The IOATAController class informs its subclass of allocation of an ATA device. +@discussion +The IOATAController subclass will be called at its allocateDevice() function when an +ATA device is about to be probed. The subclass should initialize its per-device data at +this time. If the subclass wishes to prevent probing of this device, it should return false +as the result of this function call. + +Note: This is an optional function. Your driver is not required to implement it. +@param unit +The ATA unit number of the device about to be allocated. +*/ + virtual bool allocateDevice( ATAUnit unit ); + +/*! +@function deallocateDevice +@abstract +The IOATAController class informs its subclass of deallocation of an ATA device. +@discussion +The IOATAController subclass will be called at its deallocateDevice() function when +an ATA device is about to be deallocated. The subclass must insure that there will +be no further access to the per-device data allocated to this device. + +Note: This is an optional function. Your driver is not required to implement it. +@param unit +The ATA unit number of the device about to be deallocated. +*/ + virtual void deallocateDevice( ATAUnit unit ); + +/*! +@function disableTimeoutOccurred +@abstract +Indicates the IOATAController subclass has suspended commands too long. +@discussion +The IOATAController class will timeout disableCommand() requests +to preclude the possibility of a hung ATA bus. If a timeout occurs, +then disableTimeoutOccurred() will be called. The default action of this +routine is to do a ATA Bus Reset by calling resetCommand(). Your +subclass may choose to modify the default behavior of this routine to do +additional adapter specific error recovery. +*/ + virtual void disableTimeoutOccurred(); + +/*! +@function enableControllerInterrupts +@abstract +Indicates the IOATAController subclass should enables its controller interrupt. +@discussion +The default implementation of this function enables all interrupt sources +associated with the current workloop. If the subclass needs more precise +control of its interrupt sources it should replace the implementation of +this function with its own. +*/ + virtual void enableControllerInterrupts(); + +/*! +@function disableControllerInterrupts +@abstract +Indicates the IOATAController subclass should disable its controller interrupt. +@discussion +The default implementation of this function disables all interrupt sources +associated with the current workloop. If the subclass needs more precise +control of its interrupt sources it should replace the implementation of +this function with its own. +*/ + virtual void disableControllerInterrupts(); + +}; + diff --git a/iokit/IOKit/ata/IOATADeviceInterface.h b/iokit/IOKit/ata/IOATADeviceInterface.h new file mode 100644 index 000000000..c4f9c8ed5 --- /dev/null +++ b/iokit/IOKit/ata/IOATADeviceInterface.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATADeviceInterface.h + * + */ +#ifndef _IOATADEVICEINTERFACE_H +#define _IOATADEVICEINTERFACE_H + +#include + +#include +#include +#include +#include +#include + +#endif diff --git a/iokit/IOKit/ata/IOATADevice_Reference.h b/iokit/IOKit/ata/IOATADevice_Reference.h new file mode 100644 index 000000000..266be5d49 --- /dev/null +++ b/iokit/IOKit/ata/IOATADevice_Reference.h @@ -0,0 +1,408 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! +@header IOATADevice_Reference.h + +This header defines the IOATADevice class. + +The ATA/ATAPI framework creates instances of this class to +represent each valid ATA/ATAPI device detected during +ATA bus scanning. When an instance of this class is registered with +IOKit, the instance will be presented to clients which +'match' the IOATADevice class. +*/ + +/*! +@enum ATADeviceType +Defines ATA/ATAPI Device types. +@constant kATADeviceNone +Indicates no device installed. +@constant kATADeviceATA +Indicates ATA type device, i.e. packet protocols not supported. +@constant kATADeviceATAPI +Indicates ATAPI type device, i.e. packet protocols supported. +*/ +enum ATADeviceType { + + kATADeviceNone, + kATADeviceATA, + kATADeviceATAPI, + +}; + +/*! +@enum ATATimingProtocol +Defines supported transport timing. See getTimingsSupported(), selectTiming(). +@constant kATATimingPIO +Indicates transport timing is for Programmed I/O. +@constant kATATimingDMA +Indicates transport timing is for DMA. +@constant kATATimingDMA33 +Indicates transport timing is for Ultra DMA/33. +@constant kATATimingDMA66 +Indicates transport timing is for Ultra DMA/66. +@constant kATAMaxTimings +Indicates number of timing protocols defined. +*/ +enum ATATimingProtocol +{ + kATATimingPIO = (1 << 0), + kATATimingDMA = (1 << 1), + kATATimingUltraDMA33 = (1 << 2), + kATATimingUltraDMA66 = (1 << 3), + kATAMaxTimings = 4, + +}; + +/*! +@enum ATAProtocol +Defines supported transport protocols. See getProtocolsSupported(). +@constant kATAProtocolNone +Indicates no transport protocol defined. +@constant kATAProtocolSetRegs +Indicates the transport driver should do a Set ATA Registers operation. For this +protocol, the transport driver sets the requested taskfile registers as indicated +in the ATATaskfile structure and then reads back the taskfile registers requested. +The transport presumes the device will not generate an interrupt as a result +of this operation. +@constant kATAProtocolPIO +Indicates the transport driver should do a Programmed I/O operation. For this +protocol, the transport driver sets the requested taskfile registers, and transfers +data to/from the IOATADevice via Programmed I/O operations. The IOATADevice client +can control the direction/amount data transferred by using setPointers() function. +The client can indicate a zero data transfer length to implement non-data transfer +commands such as power-management and set features. +@constant kATAProtocolDMA +Indicates the transport driver should do a DMA I/O operation to the device. For this +protocol, the transport driver sets the requested taskfile registers, and transfers +data to/from the IOATADevice via DMA operations. +@constant kATAProtocolDMAQueued +Indicates the transport driver should do DMA Queued I/O operations. In this case, +the driver will queue multiple I/O operations at the IOATADevice. Both the device +and the transport driver must support this protocol. +@constant kATAProtocolDMAQueueRelease +Indicates the transport driver should do DMA Queued I/O operations with bus release. +In this case, the driver will queue multiple I/O operations at the IOATADevice. In +addition this protocol allows Overlap between both devices on the ATA Bus. +@constant kATAProtocolATAPIPIO +Indicates the transport driver should send an use ATAPI packet protocol and transfer +data to/from the device via PIO cycles. +@constant kATAProtocolATAPIDMA +Indicates the transport driver should send an use ATAPI packet protocol and transfer +data to/from the device via DMA cycles. +*/ +enum ATAProtocol { + + kATAProtocolNone = 0, + kATAProtocolSetRegs = (1 << 0), + kATAProtocolPIO = (1 << 1), + kATAProtocolDMA = (1 << 2), + kATAProtocolDMAQueued = (1 << 3), + kATAProtocolDMAQueuedRelease = (1 << 4), + + kATAProtocolATAPIPIO = (1 << 16), + kATAProtocolATAPIDMA = (1 << 17), + +}; + +/*! +@typedef ATATiming +@abstract +Provides the low-level cycle times for the transport timing indicated. +@discussion +See enum ATATimingProtocols for a list of transport timing protocols. +@field timingProtocol +Indicates transport timing the structure refers to. See enum ATATimingProtocol +for a list of transport timings. +@field mode +Indicates the ATA DMA/PIO mode supported. The mode is a number indicating preset +timings for a particular set of timings as defined by the ATA specification. +@field minDataAccess +The minimum time (in nS) that IOW-/IOR- indicates that the data is valid for 16-bit +data transfers. This field does not apply for Ultra/33 and Ultra/66 timings. +@field minDataCycle +The minimum time (in nS) that a full 16-bit data transfer will take, i.e. the time +between consecutive assertions of IOW-/IOR-. For Ultra/33 and Ultra/66 timings +this field indicates the average single cycle time. +@field minCmdAccess +The minimum time (in nS) that IOW-/IOR- indicates that the data is valid for 8-bit +pio command transfers. +@field minCmdCycle +The minimum time (in nS) that a full 8-bit pio data transfer will take, i.e. the time +between consecutive assertions of IOW-/IOR-. +*/ +typedef struct ATATiming { + + ATATimingProtocol timingProtocol; + + UInt32 featureSetting; + + UInt32 mode; + UInt32 minDataAccess; + UInt32 minDataCycle; + UInt32 minCmdAccess; + UInt32 minCmdCycle; + UInt32 reserved_3[9]; + +} ATATiming; + +class IOATAStandardDevice : public IOATADevice +{ +public: + +/*! +@function allocCommand +@abstract +Allocates an IOATACommand object for this device. +@discussion +The client uses the allocCommand() member function to allocate IOATACommand(s) +for an IOATADevice. The client then uses the member functions of +the IOATACommand to initialize it and send it to the device. A completed IOATACommand +may be reused for subsequent I/O requests or returned to the ATA/ATAPI Family. +@param deviceType +Always specify kIOATADevice. +@param clientDataSize +The client may indicate the size of a per-command data area for its own +use. +*/ + IOATACommand *allocCommand( IOATADevice *deviceType, UInt32 clientDataSize = 0 ); + +/*! +@function getUnit +@abstract +Returns the ATA unit number corresponding to this device. +*/ + ATAUnit getUnit(); + +/*! +@function getDeviceType +@abstract +Returns the type of the corresponding ATA/ATAPI device. +@discussion +See enum ATADeviceType for return values for this function. +*/ + ATADeviceType getDeviceType(); + +/*! +@function getIdentifyData +@abstract +Returns the ATA/ATAPI Identify data for the IOATADevice +@discussion +Identify data is from the results of the last ATA bus probe. +@param identifyBuffer +Pointer to a 512-byte data buffer to receive the ATA/ATAPI Identify data. +*/ + bool getIdentifyData( ATAIdentify *identifyBuffer ); + +/*! +@function getInquiryData +@abstract +Returns ATAPI Inquiry data for the IOATADevice. +@discussion +Inquiry data returned is from the results of the last ATA bus probe. +@param inquiryBufSize +Size of the buffer supplied. +@param inquiryBuffer +Pointer to a buffer to receive the Inquiry data. +*/ + bool getInquiryData( UInt32 inquiryBufSize, ATAPIInquiry *inquiryBuffer ); + +/*! +@function getDeviceCapacity +@abstract +Returns the block count and block size of the ATA/ATAPI device. +@discussion +This function returns the capacity as returned by the ATA Identify command for ATA devices, +and the Read Device Capacity for ATAPI devices. The client should use caution in interpreting +the results of this function since the results are based on the last ATA bus scan. +@param blockMax +Pointer to a (UInt32) to receive the maximum addressable block on the device. Note: The device +block count is one greater than the maximum addressable block number. +@param blockSize +Pointer to a (UInt32) to receive the block size of the device in bytes. +*/ + bool getDeviceCapacity( UInt32 *blockMax, UInt32 *blockSize ); + + +/*! +@function getProtocolSupported +@abstract +Returns a bit mask of transport protocols this IOATADevice supports. +@discussion +There is no guarantee that a particular device/driver combination will support +all transport protocols defined. The IOATADevice client must use this function +to determine which ATAProtocol values are valid for this device. +@param protocolsSupported +Pointer to a (UInt32) to receive a bit mask of transport protocols supported. See enum +ATAProtocol of a list of transport protocols. +*/ + bool getProtocolsSupported( ATAProtocol *protocolsSupported ); + +/*! +@function getTimingsSupported +@abstract +Returns a bit mask of transport timings this IOATADevice supports +@discussion +There is no guarantee that a particular device/driver combination will support +all transport timings defined. The IOATADevice client must use this function +to determine which ATATimingProtocol values are valid for this device. +@param protocolsSupported +Pointer to a (UInt32) to receive a bit mask of transport timings supported. See enum +ATATimingProtocol of a list of transport timings. +*/ + bool getTimingsSupported( ATATimingProtocol *timingsSupported ); + +/*! +@function getTimingSelected +@abstract +Returns the last transport timing selected for the device +@param timingProtocol +Pointer to a (UInt32) to receive the current timing protocol selected. See enum ATATimingProtocol +for a list of transport timing protocols. +*/ + bool getTimingSelected( ATATimingProtocol *timingProtocol ); + +/*! +@function getTiming +@abstract +Returns the parameters for the transport timing indicated. +@discussion +If the transport/device combination does not support the transport timing +indicated, then this function will return false. See getTimingsSupported() +to obtain a bit mask of supported timings. +@param timingProtocol +Pointer to a (UInt32) which the client has set to the timing protocol whose parameters are +to be obtained. +@param timing +Pointer to a (struct ATATiming) to receive the parameters (cycle timings) for the requested +timing. +*/ + bool getTiming( ATATimingProtocol *timingProtocol, ATATiming *timing ); + +/*! +@function getATAPIPktInt +@abstract +Returns whether the an ATAPI device will generate an Interrupt prior to signal it is ready +to request a command packet. +@discussion +A return value of (true) will indicates the device will generate a packet transfer interrupt. +This function would not normally need to be used by the IOATADevice client. It is for use +by the transport driver. +*/ + bool getATAPIPktInt(); + +/*! +@function selectTiming +@abstract +Selects the transport timing to be used for this IOATADevice. +@discussion +The IOATADevice client must issue the selectTiming() function when initializing an IOATADevice and +after an ATA Bus Reset event. +@param timingProtocol +The transport timing to be selected for the device. See getTimingsSupported() for transport timings +supported by the transport/device combination. +@param fNotifyMsg +If fNotifyMsg is set to false, selectTiming() operates a synchronous call, i.e. it blocks the +client until it completes. If the client needs to call this function during an event such as +an ATA Bus Reset, it must use the asynchronous version of this function by setting fNotifyMsg +to true. In this case the client will be notified via a message when this function has +completed. +*/ + bool selectTiming( ATATimingProtocol timingProtocol, bool fNotifyMsg = false ); + +/*! +@function holdQueue +@abstract +Suspends sending additional IOATACommand to this device. +@discussion +holdQueue() may only be called from the IOATADevice workloop. The client +is guaranteed to be running in this context during a message() notification. + +holdQueue() has no effect on commands already passed to the host adapter. One +or more commands may complete after the queue is held. See notifyIdle() +@param queueType +Perform action on the indicated queue. See enum ATAQueueType in IOATACommand. +*/ + void holdQueue( UInt32 queueType ); + +/*! +@function releaseQueue +@abstract +Resumes sending IOATACommands to the IOATADevice. +@discussion +If the device queue was not held, releaseQueue() has no effect. + +releaseQueue() may only be called from the IOATADevice workloop. This is guaranteed +to be the case after a IOATACommand completion of after a message() notification. +@param queueType +Perform action on the indicated queue. See enum ATAQueueType in IOATACommand. +*/ + void releaseQueue( UInt32 queueType ); + +/*! +@function flushQueue +@abstract +Returns any commands on the IOATADevice's pending work queue. +@discussion +flushQueue() may only be called from the IOATADevice workloop. This is +guaranteed to be the case after a IOATACommand completion of after a +message() notification. + +All pending command are completed prior to flushQueue() returning to the caller. + +flushQueue() has no effect on commands already passed to the host adapter. One +or more commands may complete after the queue is flushed. See notifyIdle(). +@param queueType +Perform action on the indicated queue. See enum ATAQueueType in IOATACommand. +@param rc +The return code of any flushed commands is set to (rc). +*/ + void flushQueue( UInt32 queueType, IOReturn rc ); + +/*! +@function notifyIdle +@abstract +Notifies the client when all active commands on an ATA device have completed. +@discussion +notifyIdle() may only be called from the IOATADevice workloop. This is guaranteed +to be the case after a IOATACommand completion of after a message() notification. + +Only one notifyIdle() call may be active. Any outstanding notifyIdle() calls may +be cancelled by calling notifyIdle() with no parameters. +@param target +Object to receive the notification. Normally the client's (this) pointer. +@param callback +Pointer to a callback routine of type CallbackFn. +@param refcon +Pointer to client's private data. +*/ + void notifyIdle( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ); + +/*! +@function getWorkLoop +@abstract +Returns the IOWorkLoop object that services this IOATADevice. +*/ +IOWorkloop *getWorkLoop(); + +}; diff --git a/iokit/IOKit/ata/IOATADriver_Reference.h b/iokit/IOKit/ata/IOATADriver_Reference.h new file mode 100644 index 000000000..3cf67c805 --- /dev/null +++ b/iokit/IOKit/ata/IOATADriver_Reference.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! +@header IOATAStandardDriver_Reference.h + +This header defines the IOATAStandardDriver class. + +This class provides a standard ATA/ATAPI driver implementation. + +In most cases ATA controller drivers should be implemented to this class since +it relieves the controller driver writer from having to implement most of the ATA/ATAPI +protocol. +*/ + +/*! +@typedef ATAControllerInfo +Parameter structure passed for configure() function. +@field maxDevicesSupported +Maximum ATA devices supported per bus. Normally set to (2). +@field devicePrivateDataSize +Size of per unit storage (in bytes) available to the controller driver. See getDeviceData. +@field commandPrivateDataSize +Size of per command storage (in bytes) available to the controller driver. See getCommandData. +@field disableCancelCommands +Normally set to false by the controller driver. +*/ +typedef struct ATAControllerInfo { + + UInt32 maxDevicesSupported; + + UInt32 devicePrivateDataSize; + UInt32 commandPrivateDataSize; + + bool disableCancelCommands; + + UInt32 reserved[64]; + +} ATAControllerInfo; + +class IOATAStandardDriver : public IOATAStandardController +{ +protected: + +/*! +@function writeATAReg +@abstract +ATA taskfile register write function. +@discussion +The controller driver must implement this function by writing the indicated +ATA register. +@param regIndex +Register index values are defined by enum ATARegs. See IOATADevice_Reference. +@param regValue +Register value. For the ATA Data Register this is a 16-bit value. For other registers, +this is an 8-bit value. +*/ + virtual void writeATAReg( UInt32 regIndex, UInt32 regValue ) = 0; + +/*! +@function readATAReg +ATA taskfile register read function. +@discussion +The controller driver must implement this function by reading the indicated ATA register and returning the register value as a (UInt32). +@param regIndex +Register index values are defined by enum ATARegs. See IOATADevice_Reference. +*/ + virtual UInt32 readATAReg( UInt32 regIndex ) = 0; + +/*! +@function selectTiming +Select ATA timing parameters. +@discussion +The controller driver will be called at this entry point to indicate the timing to use +the next time the indicated device is selected. See newDeviceSelected(). +@param deviceNum +The unit number (0/1) of the IOATADevice the timing is to apply to. +@param timingProtocol +The timing protocol to use the next time the device is selected. See enum ATATimingProtocol in +IOATADevice_Reference. + +Note:The controller driver should have calculated and cached the necessary +controller register settings when the timing parameters were presented by the +calculateTiming() function. +*/ + virtual bool selectTiming( ATAUnit deviceNum, ATATimingProtocol timingProtocol ) = 0; + +/*! +@function calculateTiming +Convert ATA timing parameters to controller register settings. +@discussion +The controller driver is presented with proposed timings. If the controller driver +can support the provided timing parameters, it should calculate the corresponding +controller register settings and make them available for future lookup indexed +by the timingProtocol field of the ATATiming structure. If the controller driver +cannot support the indicated timing it should return false as the calculateTiming() +result. +@param deviceNum +The unit number (0/1) of the IOATADevice the timing is to apply to. +@param timing +A pointer to a ATATiming structure containing the parameters for the selected +timing. +*/ + virtual bool calculateTiming( UInt32 deviceNum, ATATiming *timing ) = 0; + +/*! +@function programDMA +Program the controller DMA hardware. +@discussion +The controller driver is presented with an IOATACommand and should use the +IOATACommand's getPointers() function to obtain the command's IOMemoryDescriptor, +transfer length and transfer direction. The controller driver then should +use IOMemoryCursor functions to obtain the physical transfer list for +the data buffer. +@param cmd +Pointer to an IOATACommand. +*/ + virtual bool programDma( IOATAStandardCommand *cmd ); + +/*! +@function startDma +Start the controller DMA hardware. +@discussion +The controller driver should start the controller's DMA hardware with settings +corresponding to the last programDma() function call. +@param cmd +Pointer to an IOATACommand. This will normally be the same command that was previously +presented during the programDma() call. +*/ + virtual bool startDma( IOATAStandardCommand *cmd ); + +/*! +@function stopDma +Stop the controller DMA hardware. +@discussion +The controller driver should stop the controller's DMA hardware and return the +current transfer count. +@param cmd +Pointer to an IOATACommand. This will normally be the same command that was previously +presented during the programDma() call. +*/ + virtual bool stopDma( IOATAStandardCommand *cmd, UInt32 *transferCount ); + +/*! +@function resetDma +Reset the controller DMA hardware. +@discussion +The controller driver should unconditionally stop the controller's DMA hardware. +*/ + virtual bool resetDma(); + +/*! +@function checkDmaActive +Return the state of the controller's DMA hardware. +@discussion +This function should return true if the controller's DMA channel is active, i.e. there +is a non-zero transfer count and false if the transfer count has been met. +*/ + virtual bool checkDmaActive(); + +/*! +@function newDeviceSelected +Indicates that a new ATA unit is to be selected. +@discussion +The controller driver should do any controller operation associated with selecting +a new ata unit. +*/ + virtual void newDeviceSelected( IOATAStandardDevice *newDevice ); + +/*! +@function interruptOccurred +Indicates that a controller interrupt occurred. +@discussion +This function will be called prior to the ATA standard driver begins processing +the interrupt event. A controller which 'latches' interrupt events should clear +the interrupting condition and then call the ATA standard driver interrupt handler +by calling super::interruptOccurred(). +*/ + virtual void interruptOccurred(); + +/*! +@function configure +@abstract +Driver configuration/initialization request. +@discussion +The configure() member function is the first call your subclass will +receive. You should provide the information requested in the +ATAControllerInfo structure and enable your hardware for operation. +If your driver initialized successfully, you should return true, otherwise, +your driver should return false. +@param provider +Pointer to an object (usually IOPCIDevice) which represents the bus of +your device is attached to . Typically your driver will use functions +supplied by this object to access PCI space on your hardware. See +IOPCIDevice for a description of PCI services. +@param controllerInfo +Pointer to a ATAControllerInfo structure. Your driver should provide +the information requested in this structure prior to returning from +the configure() call. +*/ + virtual bool configure( IOService *provider, ATAControllerInfo *controllerInfo ) = 0; + +}; + diff --git a/iokit/IOKit/ata/IOATAStandardInterface.h b/iokit/IOKit/ata/IOATAStandardInterface.h new file mode 100644 index 000000000..c1a9bf6b0 --- /dev/null +++ b/iokit/IOKit/ata/IOATAStandardInterface.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAStandardInterface.h + * + */ +#ifndef _IOATASTANDARDINTERFACE_H +#define _IOATASTANDARDINTERFACE_H + +#include + +#include +#include +#include +#include +#include +#include + +#endif diff --git a/iokit/IOKit/ata/Makefile b/iokit/IOKit/ata/Makefile new file mode 100644 index 000000000..1eb916703 --- /dev/null +++ b/iokit/IOKit/ata/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = ata +NOT_EXPORT_HEADERS = IOATADevice_Reference.h IOATACommand_Reference.h \ + IOATAController_Reference.h IOATADriver_Reference.h + +INSTINC_SUBDIRS = ata-device ata-standard +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/ata/ata-device/ATACommand.h b/iokit/IOKit/ata/ata-device/ATACommand.h new file mode 100644 index 000000000..d1581a2df --- /dev/null +++ b/iokit/IOKit/ata/ata-device/ATACommand.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ATACommand.h + * + */ + +#ifndef _ATACOMMAND_H +#define _ATACOMMAND_H + + +enum ATADeviceType +{ + kATADeviceNone, + kATADeviceATA, + kATADeviceATAPI, +}; + + +enum ATATimingProtocol +{ + kATATimingPIO = (1 << 0), + kATATimingDMA = (1 << 1), + kATATimingUltraDMA33 = (1 << 2), + kATATimingUltraDMA66 = (1 << 3), + kATAMaxTimings = 4, + +}; + +enum ATAProtocol +{ + kATAProtocolNone = 0, + kATAProtocolSetRegs = (1 << 0), + kATAProtocolPIO = (1 << 1), + kATAProtocolDMA = (1 << 2), + kATAProtocolDMAQueued = (1 << 3), + kATAProtocolDMAQueuedRelease = (1 << 4), + + kATAProtocolATAPIPIO = (1 << 16), + kATAProtocolATAPIDMA = (1 << 17), +}; + + + +typedef struct ATATiming +{ + enum ATATimingProtocol timingProtocol; + + UInt32 featureSetting; + + UInt32 mode; + UInt32 minDataAccess; + UInt32 minDataCycle; + UInt32 minCmdAccess; + UInt32 minCmdCycle; + UInt32 reserved_3[9]; +} ATATiming; + + +enum ATATagType +{ + kATATagTypeNone = 0, + kATATagTypeSimple, +}; + +enum ATAReturnCode +{ + kATAReturnSuccess, + kATAReturnNotSupported, + kATAReturnNoResource, + kATAReturnRetryPIO, + kATAReturnBusyError, + kATAReturnInterruptTimeout, + kATAReturnStatusError, + kATAReturnProtocolError, + kATAReturnDMAError, + kATAReturnBusReset, +}; + +#define ATARegtoMask(reg) (1<<(reg)) + +typedef struct ATATaskfile +{ + enum ATAProtocol protocol; + + UInt32 flags; + + UInt8 tagType; + UInt32 tag; + + UInt32 resultmask; + + UInt32 regmask; + UInt32 ataRegs[kMaxATARegs]; + +} ATATaskfile; + + +enum ATACmdFlags +{ + kATACmdFlagTimingChanged = 0x00000001, +}; + +typedef struct ATACDBInfo +{ + + UInt32 cdbFlags; + + UInt32 cdbLength; + UInt8 cdb[16]; + + UInt32 reserved[16]; +} ATACDBInfo; + + +enum ATACDBFlags +{ +}; + +typedef struct ATAResults +{ + IOReturn returnCode; + + UInt32 bytesTransferred; + + enum ATAReturnCode adapterStatus; + + Boolean requestSenseDone; + UInt32 requestSenseLength; + + UInt32 ataRegs[kMaxATARegs]; + + UInt32 reserved[16]; +} ATAResults; + + +#endif diff --git a/iokit/IOKit/ata/ata-device/ATADevice.h b/iokit/IOKit/ata/ata-device/ATADevice.h new file mode 100644 index 000000000..af79e5404 --- /dev/null +++ b/iokit/IOKit/ata/ata-device/ATADevice.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ATADevice.h + * + */ + +#ifndef _ATADEVICE_H +#define _ATADEVICE_H + +#define kDefaultInquirySize 255 + +typedef UInt32 ATAUnit; + + +enum ATADeviceTimeouts +{ + kATATimerIntervalmS = 500, + kATAProbeTimeoutmS = 5000, + kATAResetIntervalmS = 3000, + kATAAbortTimeoutmS = 5000, + kATAReqSenseTimeoutmS = 5000, + kATADisableTimeoutmS = 5000, + kATAResetPollIntervalmS = 50, + kATAResetTimeoutmS = 25000, + kATABusyTimeoutmS = 10, + kATADRQTimeoutmS = 10, +}; + +enum ATAClientMessage +{ + kATAClientMsgNone = 0x00005000, + kATAClientMsgDeviceAbort, + kATAClientMsgDeviceReset, + kATAClientMsgBusReset, + kATAClientMsgSelectTiming, + + kATAClientMsgDone = 0x80000000, +}; + +enum ATAQueueType +{ + kATAQTypeNormalQ = 0, + kATAQTypeBypassQ = 1, +}; + +enum ATAQueuePosition +{ + kATAQPositionTail = 0, + kATAQPositionHead = 1, +}; + + +#define kATAPropertyProtocol "ATA Protocol" /* IOCString */ +#define kATAPropertyDeviceNumber "ATA Device Number" /* OSNumber */ +#define kATAPropertyDeviceType "ATA Device Type" /* IOCString */ +#define kATAPropertyDeviceId "ATA Device Id" /* OSNumber */ +#define kATAPropertyModelNumber "ATA Device Model Number" /* IOCString */ +#define kATAPropertyFirmwareRev "ATA Device Firmware Revision" /* IOCString */ +#define kATAPropertyVendorName "ATA Device Vendor Name" /* IOCString */ +#define kATAPropertyProductName "ATA Device Product Name" /* IOCString */ +#define kATAPropertyProductRevision "ATA Device Product Revision" /* IOCString */ +#define kATAPropertyLocation "IOUnit" /* OSNumber */ + +#define kATAMaxProperties 9 + +#define kATAPropertyProtocolATA "ATA" +#define kATAPropertyProtocolATAPI "ATAPI" + +#define kATADeviceTypeDisk "Disk" +#define kATADeviceTypeTape "Tape" +#define kATADeviceTypeCDRom "CDRom" +#define kATADeviceTypeScanner "Scanner" +#define kATADeviceTypeOther "Other" + +#endif diff --git a/iokit/IOKit/ata/ata-device/ATAPublic.h b/iokit/IOKit/ata/ata-device/ATAPublic.h new file mode 100644 index 000000000..a7da0c7e6 --- /dev/null +++ b/iokit/IOKit/ata/ata-device/ATAPublic.h @@ -0,0 +1,441 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ATAPublic.h + * + */ + +#ifndef _ATAPUBLIC_H +#define _ATAPUBLIC_H + +typedef struct ATAIdentify +{ + UInt16 generalConfiguration; + UInt16 logicalCylinders; + UInt16 reserved_1[1]; + UInt16 logicalHeads; + UInt16 reserved_2[2]; + UInt16 logicalSectorsPerTrack; + UInt16 reserved_3[3]; + char serialNumber[20]; + UInt16 reserved_4[3]; + char firmwareRevision[8]; + char modelNumber[40]; + UInt16 multipleModeSectors; + UInt16 reserved_5[1]; + UInt16 capabilities1; + UInt16 capabilities2; + UInt16 pioMode; + UInt16 reserved_6[1]; + UInt16 validFields; + UInt16 currentLogicalCylinders; + UInt16 currentLogicalHeads; + UInt16 currentLogicalSectorsPerTrack; + UInt16 currentAddressableSectors[2]; + UInt16 currentMultipleModeSectors; + UInt16 userAddressableSectors[2]; + UInt16 reserved_7[1]; + UInt16 dmaModes; + UInt16 advancedPIOModes; + UInt16 minDMACycleTime; + UInt16 recDMACycleTime; + UInt16 minPIOCycleTimeNoIORDY; + UInt16 minPIOCyclcTimeIORDY; + UInt16 reserved_8[2]; + UInt16 busReleaseLatency; + UInt16 serviceLatency; + UInt16 reserved_9[2]; + UInt16 queueDepth; + UInt16 reserved_10[4]; + UInt16 versionMajor; + UInt16 versionMinor; + UInt16 commandSetsSupported1; + UInt16 commandSetsSupported2; + UInt16 commandSetsSupported3; + UInt16 commandSetsEnabled1; + UInt16 commandSetsEnabled2; + UInt16 commandSetsDefault; + UInt16 ultraDMAModes; + UInt16 securityEraseTime; + UInt16 securityEnhancedEraseTime; + UInt16 currentAdvPowerMgtValue; + UInt16 reserved_11[35]; + UInt16 removableMediaSupported; + UInt16 securityStatus; + UInt16 reserved_12[127]; +} ATAIdentify; + + + +typedef struct ATAPIInquiry +{ + unsigned char devType; /* 0 Device type, */ + unsigned char devTypeMod; /* 1 Device type modifier */ + unsigned char version; /* 2 ISO/ECMA/ANSI version */ + unsigned char format; /* 3 Response data format */ + unsigned char length; /* 4 Additional Length */ + unsigned char reserved5; /* 5 Reserved */ + unsigned char reserved6; /* 6 Reserved */ + unsigned char flags; /* 7 Capability flags */ + unsigned char vendorName[8]; /* 8-15 Vendor-specific */ + unsigned char productName[16]; /* 16-31 Product id */ + unsigned char productRevision[4]; /* 32-35 Product revision */ + unsigned char vendorSpecific[20]; /* 36-55 Vendor stuff */ + unsigned char moreReserved[40]; /* 56-95 Reserved */ +} ATAInquiry; + +/* + * These are device type qualifiers. We need them to distinguish between "unknown" + * and "missing" devices. + */ +enum +{ + kATAPIDevTypeQualifierConnected = 0x00, /* Exists and is connected */ + kATAPIDevTypeQualifierNotConnected = 0x20, /* Logical unit exists */ + kATAPIDevTypeQualifierReserved = 0x40, + kATAPIDevTypeQualifierMissing = 0x60, /* No such logical unit */ + kATAPIDevTypeQualifierVendorSpecific = 0x80, /* Non-standardized */ + kATAPIDevTypeQualifierMask = 0xE0, +}; + +enum ATAPIDevFlags +{ + kATAPIDevCapRelAdr = 0x80, + kATAPIDevCapWBus32 = 0x40, + kATAPIDevCapWBus16 = 0x20, + kATAPIDevCapSync = 0x10, + kATAPIDevCapLinked = 0x08, + kATAPIDevCapCmdQue = 0x02, + kATAPIDevCapSftRe = 0x01, +}; + +typedef struct ATAPISenseData +{ + unsigned char errorCode; /* 0 Result validity */ + unsigned char segmentNumber; /* 1 Segment number */ + unsigned char senseKey; /* 2 Sense code, flags */ + unsigned char info[4]; /* 3-6 Sense-key specific */ + unsigned char additionalSenseLength; /* 7 Sense length info */ + unsigned char reservedForCopy[4]; /* 8-11 Sense-key specific */ + unsigned char additionalSenseCode; /* 12 What kind of error */ + unsigned char additionalSenseQualifier; /* 13 More error info */ + unsigned char fruCode; /* 14 Field replacable */ + unsigned char senseKeySpecific[2]; /* 15-16 Additional info */ + unsigned char additional[101]; /* 17-26 Additional info */ +} ATASenseData; + +/* + * The high-bit of errorCode signals whether there is a logical + * block. The low value signals whether there is a valid sense + */ +enum ATAErrorCode +{ + kATAPISenseHasLBN = 0x80, /* Logical block number set */ + kATAPISenseInfoValid = 0x70, /* Is sense key valid? */ + kATAPISenseInfoMask = 0x70, /* Mask for sense info */ + kATAPISenseCurrentErr = 0x70, /* Error code (byte 0 & 0x7F */ + kATAPISenseDeferredErr = 0x71, /* Error code (byte 0 & 0x7F */ +}; + +/* + * These bits may be set in the sense key + */ +enum ATAPISenseKeyMasks +{ + kATAPISenseKeyMask = 0x0F, + kATAPISenseILI = 0x20, /* Illegal logical Length */ + kATAPISenseEOM = 0x40, /* End of media */ + kATAPISenseFileMark = 0x80, /* End of file mark */ +}; +/* + * ATA sense codes. (Returned after request sense). + */ +enum ATAPISenseKeys +{ + kATAPISenseNone = 0x00, /* No error */ + kATAPISenseRecoveredErr = 0x01, /* Warning */ + kATAPISenseNotReady = 0x02, /* Device not ready */ + kATAPISenseMediumErr = 0x03, /* Device medium error */ + kATAPISenseHardwareErr = 0x04, /* Device hardware error */ + kATAPISenseIllegalReq = 0x05, /* Illegal request for dev. */ + kATAPISenseUnitAtn = 0x06, /* Unit attention (not err) */ + kATAPISenseDataProtect = 0x07, /* Data protection */ + kATAPISenseBlankCheck = 0x08, /* Tape-specific error */ + kATAPISenseVendorSpecific = 0x09, /* Vendor-specific error */ + kATAPISenseCopyAborted = 0x0a, /* Copy request cancelled */ + kATAPISenseAbortedCmd = 0x0b, /* Initiator aborted cmd. */ + kATAPISenseEqual = 0x0c, /* Comparison equal */ + kATAPISenseVolumeOverflow = 0x0d, /* Write past end mark */ + kATAPISenseMiscompare = 0x0e, /* Comparison failed */ +}; + +enum ATAPIStatus +{ + kATAPIStatusGood = 0x00, + kATAPIStatusCheckCondition = 0x02, + kATAPIStatusConditionMet = 0x04, + kATAPIStatusBusy = 0x08, + kATAPIStatusIntermediate = 0x10, + kATAPIStatusIntermediateMet = 0x0a, + kATAPIStatusReservationConfict = 0x18, + kATAPIStatusCommandTerminated = 0x22, + kATAPIStatusQueueFull = 0x28, +}; + + +enum ATAPIDevTypes +{ + kATAPIDevTypeDirect = 0, /* Hard disk (not CD-ROM) */ + kATAPIDevTypeSequential, /* Magtape or DAT */ + kATAPIDevTypePrinter, /* Printer */ + kATAPIDevTypeProcessor, /* Attached processor */ + kATAPIDevTypeWorm, /* Write-once, read multiple */ + kATAPIDevTypeCDROM, /* CD-ROM */ + kATAPIDevTypeScanner, /* Scanner */ + kATAPIDevTypeOptical, /* Optical disk */ + kATAPIDevTypeChanger, /* Jukebox */ + kATAPIDevTypeComm, /* Communication link */ + kATAPIDevTypeGraphicArts0A, + kATAPIDevTypeGraphicArts0B, + kATAPIDevTypeFirstReserved, /* Reserved sequence start */ + kATAPIDevTypeUnknownOrMissing = 0x1F, + kATAPIDevTypeMask = 0x1F, +}; + + +/* + * ATA command codes. Commands defined as ...6, ...10, ...12, are + * six-byte, ten-byte, and twelve-byte variants of the indicated command. + */ + +/* + * These commands are supported for all devices. + */ +enum ATAPICmds +{ + kATAPICmdChangeDefinition = 0x40, + kATAPICmdCompare = 0x39, + kATAPICmdCopy = 0x18, + kATAPICmdCopyAndVerify = 0x3a, + kATAPICmdInquiry = 0x12, + kATAPICmdLogSelect = 0x4c, + kATAPICmdLogSense = 0x4d, + kATAPICmdModeSelect12 = 0x55, + kATAPICmdModeSelect6 = 0x15, + kATAPICmdModeSense12 = 0x5a, + kATAPICmdModeSense6 = 0x1a, + kATAPICmdReadBuffer = 0x3c, + kATAPICmdRecvDiagResult = 0x1c, + kATAPICmdRequestSense = 0x03, + kATAPICmdSendDiagnostic = 0x1d, + kATAPICmdTestUnitReady = 0x00, + kATAPICmdWriteBuffer = 0x3b, + +/* + * These commands are supported by direct-access devices only. + */ + kATAPICmdFormatUnit = 0x04, + kATAPICmdLockUnlockCache = 0x36, + kATAPICmdPrefetch = 0x34, + kATAPICmdPreventAllowRemoval = 0x1e, + kATAPICmdRead6 = 0x08, + kATAPICmdRead10 = 0x28, + kATAPICmdReadCapacity = 0x25, + kATAPICmdReadDefectData = 0x37, + kATAPICmdReadLong = 0x3e, + kATAPICmdReassignBlocks = 0x07, + kATAPICmdRelease = 0x17, + kATAPICmdReserve = 0x16, + kATAPICmdRezeroUnit = 0x01, + kATAPICmdSearchDataEql = 0x31, + kATAPICmdSearchDataHigh = 0x30, + kATAPICmdSearchDataLow = 0x32, + kATAPICmdSeek6 = 0x0b, + kATAPICmdSeek10 = 0x2b, + kATAPICmdSetLimits = 0x33, + kATAPICmdStartStopUnit = 0x1b, + kATAPICmdSynchronizeCache = 0x35, + kATAPICmdVerify = 0x2f, + kATAPICmdWrite6 = 0x0a, + kATAPICmdWrite10 = 0x2a, + kATAPICmdWriteAndVerify = 0x2e, + kATAPICmdWriteLong = 0x3f, + kATAPICmdWriteSame = 0x41, + +/* + * These commands are supported by sequential devices. + */ + kATAPICmdRewind = 0x01, + kATAPICmdWriteFilemarks = 0x10, + kATAPICmdSpace = 0x11, + kATAPICmdLoadUnload = 0x1B, +/* + * ANSI ATA-II for CD-ROM devices. + */ + kATAPICmdReadCDTableOfContents = 0x43, +}; + + +enum ATARegs +{ + /* + * ATA Register ordinals + */ + kATARegData = 0x00, + kATARegFeatures = 0x01, + kATARegSectorCount = 0x02, + kATARegSectorNumber = 0x03, + kATARegCylinderLow = 0x04, + kATARegCylinderHigh = 0x05, + kATARegDriveHead = 0x06, + kATARegCommand = 0x07, + + kATARegError = 0x01, + kATARegStatus = 0x07, + + kATARegDeviceControl = 0x08, + + kATARegAltStatus = 0x08, + + /* + * ATAPI Register ordinals + */ + kATARegATAPIData = 0x00, + kATARegATAPIFeatures = 0x01, + kATARegATAPIIntReason = 0x02, + kATARegATAPIByteCountLow = 0x04, + kATARegATAPIByteCountHigh = 0x05, + kATARegATAPIDeviceSelect = 0x06, + kATARegATAPICommand = 0x07, + + kATARegATAPIError = 0x01, + kATARegATAPIStatus = 0x07, + + kATARegATAPIDeviceControl = 0x08, + + kATARegATAPIAlternateStatus = 0x08, + + kMaxATARegs = 12, +}; + +enum ATASectorCountQDMA +{ + kATATagBit = 0x08, +}; + + +enum ATAPIIntReason +{ + kATAPIIntReasonCD = 0x01, + kATAPIIntReasonIO = 0x02, + kATAPIIntReasonREL = 0x04, + kATAPIIntReasonTagBit = 0x08, + kATAPIIntReasonTagMask = 0xf8, +}; + +enum ATACommand +{ + kATAModeCHS = 0xa0, + kATAModeLBA = 0xe0, + + kATACommandSetFeatures = 0xef, + + kATACommandIdentify = 0xec, + + kATACommandReadSector = 0x20, + + kATACommandService = 0xa2, + + kATACommandATAPIReset = 0x08, + kATACommandATAPIPacket = 0xa0, + kATACommandATAPIIdentify = 0xa1, +}; + +enum ATAFeatures +{ + kATAFeatureTransferMode = 0x03, + kATATransferModePIODefault = 0x00, // SectorCount settings (or'd w/Mode) + kATATransferModePIOwFC = 0x08, + kATATransferModeDMA = 0x20, + kATATransferModeUltraDMA33 = 0x40, + kATATransferModeMask = 0x07, +}; + + +enum ATAStatus +{ + kATAStatusERR = 0x01, + kATAStatusIDX = 0x02, + kATAStatusECC = 0x04, + kATAStatusDRQ = 0x08, + kATAStatusSC = 0x10, + kATAStatusDF = 0x20, + kATAStatusDRDY = 0x40, + kATAStatusBSY = 0x80, + + kATAStatusSERV = 0x10, + kATAStatusREL = 0x20, + + kATAPIStatusCHK = 0x01, + kATAPIStatusDRQ = 0x08, + kATAPIStatusSERV = 0x10, + kATAPIStatusDMRD = 0x20, + kATAPIStatusDRDY = 0x40, + kATAPIStatusBSY = 0x80, +}; + +enum ATAError +{ + kATAErrorNM = 0x02, + kATAErrorABRT = 0x04, + kATAErrorMCR = 0x08, + kATAErrorIDNF = 0x10, + kATAErrorMC = 0x20, + kATAErrorWP = 0x40, + + kATAPIErrorILI = 0x01, + kATAPIErrorEOM = 0x02, + kATAPIErrorABRT = 0x04, + kATAPIErrorSenseKeyBit = 0x10, + kATAPIErrorSenseKeyMask = 0xf0, +}; + +enum ATADeviceControl +{ + kATADevControlnIEN = 0x02, + kATADevControlSRST = 0x04, +}; + +enum ATASignatures +{ + kATASignatureSectorCount = 0x01, + kATASignatureSectorNumber = 0x01, + kATASignatureCylinderLow = 0x00, + kATASignatureCylinderHigh = 0x00, + + kATAPISignatureCylinderLow = 0x14, + kATAPISignatureCylinderHigh = 0xeb, +}; + + +#endif diff --git a/iokit/IOKit/ata/ata-device/IOATACommand.h b/iokit/IOKit/ata/ata-device/IOATACommand.h new file mode 100644 index 000000000..ad7737fca --- /dev/null +++ b/iokit/IOKit/ata/ata-device/IOATACommand.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATACommand.h + * + */ +#ifndef _IOATACOMMAND_H +#define _IOATACOMMAND_H + +class IOATADevice; +class IOATACommand; + +class IOATACommand : public IOCDBCommand +{ + OSDeclareAbstractStructors(IOATACommand) + +/*------------------Methods provided to IOCDBCommand users -------------------------*/ +public: + /* + * Set/Get IOMemoryDescriptor object to I/O data buffer or sense data buffer. + */ + virtual void setPointers( IOMemoryDescriptor *desc, + UInt32 transferCount, + bool isWrite, + bool isSense = false ) = 0; + + virtual void getPointers( IOMemoryDescriptor **desc, + UInt32 *transferCount, + bool *isWrite, + bool isSense = false ) = 0; + /* + * Set/Get command timeout (mS) + */ + virtual void setTimeout( UInt32 timeoutmS ) = 0; + virtual UInt32 getTimeout() = 0; + + /* + * Set async callback routine. Specifying no parameters indicates synchronous call. + */ + virtual void setCallback( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ) = 0; + + /* + * Set/Get CDB information. (Generic CDB version) + */ + virtual void setCDB( CDBInfo *cdbInfo ) = 0; + virtual void getCDB( CDBInfo *cdbInfo ) = 0; + + /* + * Get CDB results. (Generic CDB version) + */ + virtual IOReturn getResults( CDBResults *cdbResults ) = 0; + + /* + * Get CDB Device this command is directed to. + */ + virtual IOCDBDevice *getDevice( IOCDBDevice *deviceType ) = 0; + + /* + * Command verbs + */ + virtual bool execute( UInt32 *sequenceNumber = 0 ) = 0; + virtual void abort( UInt32 sequenceNumber ) = 0; + virtual void complete() = 0; + + /* + * Get pointers to client and command data. + */ + virtual void *getCommandData() = 0; + virtual void *getClientData() = 0; + + /* + * Get unique sequence number assigned to command. + */ + virtual UInt32 getSequenceNumber() = 0; + +/*------------------ Additional methods provided to IOATACommand users -------------------------*/ +public: + /* + * Set/Get ATA taskfile information. + */ + virtual void setTaskfile( ATATaskfile *taskfile ) = 0; + virtual void getTaskfile( ATATaskfile *taskfile ) = 0; + virtual ATAProtocol getProtocol() = 0; + virtual UInt32 getResultMask() = 0; + virtual UInt32 getFlags() = 0; + + /* + * Set/Get CDB information. (ATA specific version). + */ + virtual void setCDB( ATACDBInfo *scsiCmd ) = 0; + virtual void getCDB( ATACDBInfo *scsiCmd ) = 0; + + /* + * Get/Set CDB results. (ATA specific version). + */ + virtual IOReturn getResults( ATAResults *results ) = 0; + virtual void setResults( ATAResults *results ) = 0; + + /* + * Get ATA Device this command is directed to. + */ + virtual IOATADevice *getDevice( IOATADevice *deviceType ) = 0; + + /* + * Get ATA Target/Lun for this command. + */ + virtual ATAUnit getUnit() = 0; + + /* + * Get/Set queue routing for this command. + */ + virtual void setQueueInfo( UInt32 forQueueType = kATAQTypeNormalQ, UInt32 forQueuePosition = kATAQPositionTail ) = 0; + virtual void getQueueInfo( UInt32 *forQueueType, UInt32 *forQueuePosition = 0 ) = 0; + +}; + +#endif diff --git a/iokit/IOKit/ata/ata-device/IOATADevice.h b/iokit/IOKit/ata/ata-device/IOATADevice.h new file mode 100644 index 000000000..20396c5e2 --- /dev/null +++ b/iokit/IOKit/ata/ata-device/IOATADevice.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATADevice.h + * + * + * Methods in this header provide information about the ATA device + * the device client driver is submitting the ATACommand(s) to. + * + * Note: ATACommand(s) are allocated and freed by methods in this class. + * The remaining methods to setup and submit ATACommands are defined in + * IOATACommand.h + */ + +#ifndef _IOATADEVICE_H +#define _IOATADEVICE_H + +class IOATACommand; + +class IOATADevice : public IOCDBDevice +{ + OSDeclareAbstractStructors(IOATADevice) + +/*------------------Methods provided to IOCDBDevice clients-----------------------*/ +public: + + /* + * Allocate a CDB Command + */ + virtual IOCDBCommand *allocCommand( IOCDBDevice *cdbDevice, UInt32 clientDataSize = 0 ) = 0; + + /* + * Abort all outstanding commands on this device + */ + virtual void abort() = 0; + + /* + * Reset device (also aborts all outstanding commands) + */ + virtual void reset() = 0; + + /* + * Obtain information about this device + */ + virtual void getInquiryData( void *inquiryBuffer, + UInt32 inquiryBufSize, + UInt32 *inquiryDataSize ) = 0; + +/*------------------Additional methods provided to IOATADevice clients-----------------------*/ +public: + /* + * Allocate a ATACommand + */ + virtual IOATACommand *allocCommand( IOATADevice *scsiDevice, UInt32 clientDataSize = 0 ) = 0; + + /* + * Obtain information about this device + */ + virtual ATAUnit getUnit() = 0; + virtual ATADeviceType getDeviceType() = 0; + virtual bool getIdentifyData( ATAIdentify *identifyBuffer ) = 0; + virtual bool getInquiryData( UInt32 inquiryBufSize, ATAPIInquiry *inquiryBuffer ) = 0; + virtual bool getDeviceCapacity( UInt32 *blockMax, UInt32 *blockSize ) = 0; + virtual bool getProtocolsSupported( ATAProtocol *protocolsSupported ) = 0; + virtual bool getTimingsSupported( ATATimingProtocol *timingsSupported ) = 0; + virtual bool getTimingSelected( ATATimingProtocol *timingProtocol ) = 0; + virtual bool getTiming( ATATimingProtocol *timingProtocol, ATATiming *timing ) = 0; + virtual bool getATAPIPktInt() = 0; + + /* + * Select default device timing for this device + */ + virtual bool selectTiming( ATATimingProtocol timingProtocol, bool fNotifyMsg = false ) = 0; + + /* + * Queue management commands + */ + virtual void holdQueue( UInt32 queueType ) = 0; + virtual void releaseQueue( UInt32 queueType ) = 0; + virtual void flushQueue( UInt32 queueType, IOReturn rc ) = 0; + virtual void notifyIdle( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ) = 0; + +}; + +#define kIOATADevice ((IOATADevice *)0) + +#endif diff --git a/iokit/IOKit/ata/ata-device/Makefile b/iokit/IOKit/ata/ata-device/Makefile new file mode 100644 index 000000000..9b8dbeafa --- /dev/null +++ b/iokit/IOKit/ata/ata-device/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = ata/ata-device +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/ata/ata-standard/ATAStandardController.h b/iokit/IOKit/ata/ata-standard/ATAStandardController.h new file mode 100644 index 000000000..548f53e49 --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/ATAStandardController.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * ATAParallelController.h + * + */ + +#ifndef _ATASTANDARDCONTROLLER_H +#define _ATASTANDARDCONTROLLER_H + +class IOSyncer; + +typedef struct ATAControllerInfo +{ + UInt32 maxDevicesSupported; + + UInt32 devicePrivateDataSize; + UInt32 commandPrivateDataSize; + + bool disableCancelCommands; + + UInt32 reserved[64]; + +} ATAControllerInfo; + +/* + * Private for IOATAClass + */ +enum WorkLoopReqType +{ + kWorkLoopInitDevice = 1, + kWorkLoopReleaseDevice, +}; + +enum DispatchAction +{ + kDispatchNextCommand = 1, + kDispatchNextDevice, + kDispatchStop, +}; + +typedef struct WorkLoopRequest +{ + WorkLoopReqType type; + IOSyncer * sync; + bool rc; +} WorkLoopRequest; + +#endif + diff --git a/iokit/IOKit/ata/ata-standard/ATAStandardPrivate.h b/iokit/IOKit/ata/ata-standard/ATAStandardPrivate.h new file mode 100644 index 000000000..e18c7d3ea --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/ATAStandardPrivate.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ATAPrivate.h + * + */ + +typedef struct EndianTable +{ + UInt32 size; + UInt32 type; +} EndianTable; + +enum +{ + identifyWords_54to58_Valid = 0x0001, + identifyWords_64to70_Valid = 0x0002, + identifyWords_88to88_Valid = 0x0004, + + advPIOModes_Mode3_Supported = 0x0001, + advPIOModes_Mode4_Supported = 0x0002, + + dmaModes_Mode0_Supported = 0x0001, + dmaModes_Mode1_Supported = 0x0002, + dmaModes_Mode2_Supported = 0x0004, + dmaModes_Supported = 0x0007, + + ultraDMAModes_Mode0_Supported = 0x0001, + ultraDMAModes_Mode1_Supported = 0x0002, + ultraDMAModes_Mode2_Supported = 0x0004, + ultraDMAModes_Supported = 0x001f, + + commandSetsSupported2_ValidMask = 0xC000, + commandSetsSupported2_Valid = 0x4000, + + commandSetsSupported2_DMAQueued = 0x0002, + + commandSetsSupported3_ValidMask = 0xC000, + commandSetsSupported3_Valid = 0x4000, + + commandSetsEnabled2_DMAQueued = 0x0002, +}; + +enum +{ + kATAPIPktProtocolMask = 0x0060, + kATAPIPktProtocolSlowDRQ = 0x0000, + kATAPIPktProtocolIntDRQ = 0x0020, + kATAPIPktProtocolFastDRQ = 0x0040, +}; + +typedef struct +{ + UInt32 minDataAccess; + UInt32 minDataCycle; + +} ATAModeTable; + diff --git a/iokit/IOKit/ata/ata-standard/ATAStandardTarget.h b/iokit/IOKit/ata/ata-standard/ATAStandardTarget.h new file mode 100644 index 000000000..1a39c8bce --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/ATAStandardTarget.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * ATAParallelTarget.h + * + */ + +#ifndef _ATASTANDARDTARGET_H +#define _ATASTANDARDTARGET_H + +class IOATAStandardDevice; + +typedef struct ATATarget +{ + IOATAStandardDevice *device; + UInt32 state; + UInt32 flags; +} ATATarget; + +enum +{ + kStateIdle, + kStateIssue, + kStatePending, + kStateActive, +}; + +enum CDBFlagsInternal +{ + kCDBFlagsEnableTagQueuing = 0x80000000, +}; + + +enum ATACommandType +{ + kATACommandNone = 0, + kATACommandExecute, + kATACommandReqSense, + kATACommandAbort, + kATACommandAbortAll, + kATACommandDeviceReset, + kATACommandBusReset, + kATACommandCancel, +}; + + +#endif diff --git a/iokit/IOKit/ata/ata-standard/IOATAStandardCommand.h b/iokit/IOKit/ata/ata-standard/IOATAStandardCommand.h new file mode 100644 index 000000000..a989824e2 --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/IOATAStandardCommand.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATACommand.h + * + */ +#ifndef _IOATASTANDARDCOMMAND_H +#define _IOATASTANDARDCOMMAND_H + +class IOATAStandardDevice; +class IOATAStandardCommand; +class IOSyncer; +class IOATAStandardDriver; + +class IOATAStandardCommand : public IOATACommand +{ + OSDeclareDefaultStructors(IOATAStandardCommand) + + friend class IOATAStandardController; + friend class IOATAStandardDevice; + friend class IOATAStandardDriver; + +/*------------------Methods provided to IOCDBCommand users -------------------------*/ +public: + /* + * Set/Get IOMemoryDescriptor object to I/O data buffer or sense data buffer. + */ + void setPointers( IOMemoryDescriptor *desc, + UInt32 transferCount, + bool isWrite, + bool isSense = false ); + + void getPointers( IOMemoryDescriptor **desc, + UInt32 *transferCount, + bool *isWrite, + bool isSense = false ); + /* + * Set/Get command timeout (mS) + */ + void setTimeout( UInt32 timeoutmS ); + UInt32 getTimeout(); + + /* + * Set async callback routine. Specifying no parameters indicates synchronous call. + */ + void setCallback( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ); + + /* + * Set/Get CDB information. (Generic CDB version) + */ + void setCDB( CDBInfo *cdbInfo ); + void getCDB( CDBInfo *cdbInfo ); + + /* + * Get CDB results. (Generic CDB version) + */ + IOReturn getResults( CDBResults *cdbResults ); + + /* + * Get CDB Device this command is directed to. + */ + IOCDBDevice *getDevice( IOCDBDevice *deviceType ); +// #define kIOCDBDevice ((IOCDBDevice *)0) + + /* + * Command verbs + */ + bool execute( UInt32 *sequenceNumber = 0 ); + void abort( UInt32 sequenceNumber ); + void complete(); + + /* + * Get pointers to client and command data. + */ + void *getCommandData(); + void *getClientData(); + + /* + * Get unique sequence number assigned to command. + */ + UInt32 getSequenceNumber(); + +/*------------------ Additional methods provided to IOATACommand users -------------------------*/ +public: + /* + * Set/Get ATA taskfile information. + */ + void setTaskfile( ATATaskfile *taskfile ); + void getTaskfile( ATATaskfile *taskfile ); + ATAProtocol getProtocol(); + UInt32 getResultMask(); + UInt32 getFlags(); + + /* + * Set/Get CDB information. (ATA specific version). + */ + void setCDB( ATACDBInfo *ataCmd ); + void getCDB( ATACDBInfo *ataCmd ); + + /* + * Get/Set CDB results. (ATA specific version). + */ + IOReturn getResults( ATAResults *results ); + void setResults( ATAResults *results ); + + /* + * Get ATA Device this command is directed to. + */ + IOATAStandardDevice *getDevice( IOATAStandardDevice *deviceType ); +// #define kIOATADevice ((IOATADevice *)0) + + + /* + * Get ATA Target/Lun for this command. + */ + ATAUnit getUnit(); + + /* + * Get/Set queue routing for this command. + */ + void setQueueInfo( UInt32 forQueueType = kATAQTypeNormalQ, UInt32 forQueuePosition = kATAQPositionTail ); + void getQueueInfo( UInt32 *forQueueType, UInt32 *forQueuePosition = 0 ); + + /* + * Get command type / Get original command. + * + * These methods are provided for the controller class to identify and relate commands. + * They are not usually of interest to the client side. + */ + UInt32 getCmdType(); + IOATAStandardCommand *getOriginalCmd(); + +/*------------------Methods private to the IOATACommand class-------------------------*/ +public: + IOATADevice *getDevice( IOATADevice *deviceType ); + void free(); + +private: + ATACommandType cmdType; + + IOATAStandardController *controller; + IOATAStandardDevice *device; + + queue_head_t *list; + queue_chain_t nextCommand; + + ATATaskfile taskfile; + + ATAResults results; + + ATACDBInfo ataCmd; + + UInt32 timeout; + UInt32 timer; + + UInt8 queueType; + UInt8 queuePosition; + + IOMemoryDescriptor *xferDesc; + UInt32 xferCount; + UInt32 xferDirection; + + UInt32 senseLength; + IOMemoryDescriptor *senseData; + + IOATAStandardCommand *origCommand; + + union + { + struct + { + UInt32 reserved; + IOSyncer * lock; + } sync; + struct + { + CallbackFn callback; + void *target; + void *refcon; + } async; + } completionInfo; + + UInt32 dataSize; + void *dataArea; + void *commandPrivateData; + void *clientData; + + UInt32 sequenceNumber; +}; + +#endif diff --git a/iokit/IOKit/ata/ata-standard/IOATAStandardController.h b/iokit/IOKit/ata/ata-standard/IOATAStandardController.h new file mode 100644 index 000000000..2f9d54ab4 --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/IOATAStandardController.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAController.h + * + * Methods in this header list the methods an ATA controller driver must implement. + */ +#ifndef _IOATASTANDARDCONTROLLER_H +#define _IOATASTANDARDCONTROLLER_H + +#include +#include +#include +#include +#include +#include + +class IOATAStandardDevice; +class IOATAStandardCommand; +class IOATAStandardDriver; + +class IOATAStandardController : public IOService +{ + OSDeclareDefaultStructors(IOATAStandardController) + + friend class IOATAStandardCommand; + friend class IOATAStandardDevice; + friend class IOATAStandardDriver; + +/*------------------Methods provided by IOATAStandardController---------------------------------*/ +public: + IOReturn reset(); + +protected: + void enableCommands(); + void disableCommands(); + void disableCommands( UInt32 disableTimeoutmS ); + + void rescheduleCommand( IOATAStandardCommand *forATACmd ); + + void resetStarted(); + void resetOccurred(); + + IOATAStandardCommand *findCommandWithNexus( IOATAStandardDevice *forDevice, UInt32 tagValue = (UInt32)-1 ); + + void *getDeviceData( ATAUnit forUnit ); + + virtual IOWorkLoop *getWorkLoop() const; + + UInt32 getCommandCount(); + void setCommandLimit( IOATAStandardDevice *device, UInt32 commandLimit ); + + void suspendDevice( IOATAStandardDevice *forATADevice ); + void resumeDevice( IOATAStandardDevice *forATADevice ); + IOATAStandardDevice *selectDevice(); + + bool getTiming( ATAUnit unit, ATATimingProtocol *timingProtocol ); + + +/*------------------Methods the controller subclass must implement-----------------------*/ +protected: + /* + * Initialize controller hardware. + * + * Note: The controller driver's configure() method will be called prior to any other + * methods. If the controller driver returns successfully from this method it + * should be ready to accept any other method call listed. + */ + virtual bool configure( IOService *provider, ATAControllerInfo *controllerInfo ) = 0; + + /* + * Driver must indicate which ATA protocols it supports. + */ + virtual bool getProtocolsSupported( ATAProtocol *protocolsSupported ) = 0; + + /* + * Bus/target commands + * + */ + virtual void executeCommand( IOATAStandardCommand *forATACmd ) = 0; + virtual void cancelCommand( IOATAStandardCommand *forATACmd ) = 0; + virtual void resetCommand( IOATAStandardCommand *forATACmd ) = 0; + virtual void abortCommand( IOATAStandardCommand *forATACmd ) = 0; + + /* + * Methods to set timing for individual devices + */ + virtual bool calculateTiming( UInt32 deviceNum, ATATiming *timing ) = 0; + +/*------------------Optional methods the controller subclass may implement-----------------------*/ +protected: + /* + * These methods notify the IOATAStandardController subclass, that a target or lun is about to be + * probed. The subclass should initialize its per-target or per-lun data when called at these + * methods. If the subclass (for some reason) wants to prevent probing of a target or lun, it + * can return false to the corresponding allocate*() call. + */ + virtual bool allocateDevice( ATAUnit unit ); + virtual void deallocateDevice( ATAUnit unit ); + + virtual void disableTimeoutOccurred(); + + /* + * + */ + virtual void enableControllerInterrupts(); + virtual void disableControllerInterrupts(); + +/*------------------Methods private to the IOATAStandardController class----------------------*/ + +public: + bool start( IOService *provider ); + void free(); + +private: + void initQueues(); + bool scanATABus(); + void resetATABus(); + + bool createDeviceNubs(); + bool probeDeviceNubs(); + bool registerDeviceNubs(); + bool initTimings(); + bool matchNubWithPropertyTable( IOService *nub, OSDictionary *table ); + + bool resetBus(); + + + bool initDevice( IOATAStandardDevice *device ); + void releaseDevice( IOATAStandardDevice *device ); + + bool workLoopRequest( WorkLoopReqType type, UInt32 p1=0, UInt32 p2=0, UInt32 p3=0 ); + void workLoopProcessRequest( WorkLoopRequest *workLoopReq, void *p1, void *p2, void *p3 ); + + void addDevice( IOATAStandardDevice *forDevice ); + void deleteDevice( IOATAStandardDevice *forDevice ); + + void timer( IOTimerEventSource *); + + void dispatchRequest(); + void dispatch(); + + bool checkBusReset(); + + void completeCommand( IOATAStandardCommand *forATACmd ); + + bool createWorkLoop(); + bool configureController(); + + IOATAStandardCommand *allocCommand( UInt32 clientDataSize ); + +private: + + UInt32 sequenceNumber; + + UInt32 commandCount; + UInt32 commandLimit; + UInt32 commandLimitSave; + + UInt32 disableTimer; + bool commandDisable; + + UInt32 busResetState; + IOATAStandardCommand *resetCmd; + UInt32 resetTimer; + + IOATAStandardCommand *noDisconnectCmd; + + ATAControllerInfo controllerInfo; + ATATarget *targets; + + IOWorkLoop *workLoop; + IOTimerEventSource *timerEvent; + IOInterruptEventSource *dispatchEvent; + IOCommandGate *workLoopReqGate; + + IOService *provider; +}; + +#endif diff --git a/iokit/IOKit/ata/ata-standard/IOATAStandardDevice.h b/iokit/IOKit/ata/ata-standard/IOATAStandardDevice.h new file mode 100644 index 000000000..b630fd40c --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/IOATAStandardDevice.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATAStandardDevice.h + * + * + * Methods in this header provide information about the ATA device + * the device client driver is submitting the ATACommand(s) to. + * + * Note: ATACommand(s) are allocated and freed by methods in this class. + * The remaining methods to setup and submit ATACommands are defined in + * IOATACommand.h + */ + +#ifndef _IOATASTANDARDDEVICE_H +#define _IOATASTANDARDDEVICE_H + +class IOATAStandardController; + +class IOATAStandardDevice : public IOATADevice +{ + OSDeclareDefaultStructors(IOATAStandardDevice) + + friend class IOATAStandardCommand; + friend class IOATAStandardController; + +/*------------------Methods provided to IOCDBDevice clients-----------------------*/ +public: + + /* + * Allocate a CDB Command + */ + IOCDBCommand *allocCommand( IOCDBDevice *deviceType, UInt32 clientDataSize = 0 ); + + /* + * Abort all outstanding commands on this device + */ + void abort(); + + /* + * Reset device (also aborts all outstanding commands) + */ + void reset(); + + /* + * Obtain information about this device + */ + void getInquiryData( void *inquiryBuffer, + UInt32 inquiryBufSize, + UInt32 *inquiryDataSize ); + +/*------------------Additional methods provided to IOATADevice clients-----------------------*/ +public: + /* + * Allocate a ATACommand + */ + IOATAStandardCommand *allocCommand( IOATAStandardDevice *deviceType, UInt32 clientDataSize = 0 ); + + /* + * Obtain information about this device + */ + ATAUnit getUnit(); + ATADeviceType getDeviceType(); + bool getIdentifyData( ATAIdentify *identifyBuffer ); + bool getInquiryData( UInt32 inquiryBufSize, ATAPIInquiry *inquiryBuffer ); + bool getDeviceCapacity( UInt32 *blockMax, UInt32 *blockSize ); + bool getProtocolsSupported( ATAProtocol *protocolsSupported ); + bool getTimingsSupported( ATATimingProtocol *timingsSupported ); + bool getTimingSelected( ATATimingProtocol *timingProtocol ); + bool getTiming( ATATimingProtocol *timingProtocol, ATATiming *timing ); + bool getATAPIPktInt(); + + /* + * Select default device timing for this device + */ + bool selectTiming( ATATimingProtocol timingProtocol, bool fNotifyMsg = false ); + + /* + * Queue management commands + */ + void holdQueue( UInt32 queueType ); + void releaseQueue( UInt32 queueType ); + void flushQueue( UInt32 queueType, IOReturn rc ); + void notifyIdle( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ); + + /* + * + */ + IOWorkLoop *getWorkLoop() const; + +/*------------------Methods private to the IOATADevice class----------------*/ +public: + bool open( IOService *forClient, IOOptionBits options, void *arg ); + void close( IOService *forClient, IOOptionBits options ); + bool init( IOATAStandardController *forController, ATAUnit forUnit ); + void free(); + + bool matchPropertyTable( OSDictionary * table ); + IOService *matchLocation( IOService * client ); + + IOATACommand *allocCommand( IOATADevice *deviceType, UInt32 clientDataSize = 0 ); + +private: + void submitCommand( UInt32 cmdType, IOATAStandardCommand *ataCmd, UInt32 cmdSequenceNumber = 0 ); + void receiveCommand( UInt32 cmdType, IOATAStandardCommand *ataCmd, UInt32 cmdSequenceNumber, void *p3 ); + + IOReturn probeDevice(); + ATADeviceType probeDeviceType(); + + IOReturn doSpinUp(); + IOReturn doIdentify( void **dataPtr ); + IOReturn doSectorCommand( ATACommand ataCmd, UInt32 ataLBA, UInt32 ataCount, void **dataPtr ); + IOReturn doInquiry( void **dataPtr ); + IOReturn doTestUnitReady(); + IOReturn doReadCapacity( void *data ); + + bool getATATimings(); + + void selectTimingDone( IOATAStandardCommand *ataCmd ); + + void setupTarget(); + + void dispatchRequest(); + bool dispatch( UInt32 *dispatchAction ); + + void abortAllCommands( ATACommandType abortCmdType ); + + IOATAStandardCommand *findCommandWithNexus( UInt32 tagValue ); + + void abortCommand( IOATAStandardCommand *ataCmd, UInt32 cmdSequenceNumber ); + void completeCommand( IOATAStandardCommand *cmd ); + + void checkIdleNotify(); + + void executeCommandDone( IOATAStandardCommand *ataCmd ); + void executeReqSenseDone( IOATAStandardCommand *ataCmd ); + void abortCommandDone( IOATAStandardCommand *ataCmd ); + void cancelCommandDone( IOATAStandardCommand *ataCmd ); + void finishCommand( IOATAStandardCommand *ataCmd ); + + OSDictionary *createProperties(); + bool addToRegistry( OSDictionary *propTable, OSObject *regObj, char *key, bool doRelease = true ); + void stripBlanks( char *d, char *s, UInt32 l ); + + void endianConvertData( void *data, void *endianTable ); + + bool checkDeviceQueue( UInt32 *dispatchAction ); + void checkNegotiate( IOATAStandardCommand *ataCmd ); + bool checkTag( IOATAStandardCommand *ataCmd ); + bool checkReqSense(); + bool checkAbortQueue(); + void checkCancelQueue(); + + bool allocTag( UInt32 *tagId ); + void freeTag( UInt32 tagId ); + + void timer(); + + void resetOccurred( ATAClientMessage clientMsg = kATAClientMsgNone ); + void resetComplete(); + + void rescheduleCommand( IOATAStandardCommand *ataCmd ); + + void suspend(); + void resume(); + + void addCommand( queue_head_t *list, IOATAStandardCommand *ataCmd ); + void stackCommand( queue_head_t *list, IOATAStandardCommand *ataCmd ); + void deleteCommand( queue_head_t *list, IOATAStandardCommand *ataCmd, IOReturn rc = kIOReturnSuccess ); + IOATAStandardCommand *checkCommand( queue_head_t *list ); + IOATAStandardCommand *getCommand( queue_head_t *list ); + void moveCommand( queue_head_t *fromList, + queue_head_t *toList, + IOATAStandardCommand *ataCmd, + IOReturn rc = kIOReturnSuccess ); + void moveAllCommands( queue_head_t *fromList, queue_head_t *toList, IOReturn rc = kIOReturnSuccess ); + bool findCommand( queue_head_t *list, IOATAStandardCommand *findATACmd ); + void purgeAllCommands( queue_head_t *list, IOReturn rc ); + +private: + ATAUnit unit; + ATATarget *target; + + IOATAStandardController *controller; + IOCommandGate *deviceGate; + + IOService *client; + IORWLock * clientSem; + + queue_head_t deviceList; + queue_head_t bypassList; + queue_head_t activeList; + queue_head_t abortList; + queue_head_t cancelList; + + ATACommandType abortCmdPending; + + UInt32 reqSenseState; + UInt32 abortState; + UInt32 cancelState; + UInt32 negotiateState; + + IOATAStandardCommand *reqSenseOrigCmd; + + IOATAStandardCommand *reqSenseCmd; + IOATAStandardCommand *abortCmd; + IOATAStandardCommand *cancelCmd; + IOATAStandardCommand *probeCmd; + + UInt32 normalQHeld; + UInt32 bypassQHeld; + + bool idleNotifyActive; + CallbackFn idleNotifyCallback; + void *idleNotifyTarget; + void *idleNotifyRefcon; + + bool isSuspended; + AbsoluteTime suspendTime; + + UInt32 commandCount; + UInt32 commandLimit; + UInt32 commandLimitSave; + + UInt32 maxTags; + UInt32 tagArraySize; + UInt32 *tagArray; + + ATADeviceType deviceType; + + UInt32 protocolsSupported; + UInt32 atapiPktInt; + + ATAIdentify *identifyData; + + ATAInquiry *inquiryData; + UInt32 inquiryDataSize; + + ATATimingProtocol currentTiming; + + UInt32 numTimings; + ATATiming ataTimings[kATAMaxTimings]; + + void *devicePrivateData; +}; + +#define kIOATAStandardDevice ((IOATAStandardDevice *)0) + +#endif diff --git a/iokit/IOKit/ata/ata-standard/IOATAStandardDriver.h b/iokit/IOKit/ata/ata-standard/IOATAStandardDriver.h new file mode 100644 index 000000000..a77b51df0 --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/IOATAStandardDriver.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOATATStandardDriver.h + * + */ +#ifndef _IOATASTANDARDDRIVER_H +#define _IOATASTANDARDDRIVER_H + +class IOATAStandardDriver : public IOATAStandardController +{ + OSDeclareAbstractStructors( IOATAStandardDriver ) + +/* + * Methods that subclasses IOATAStandardDriver must implement. + */ +protected: + virtual void writeATAReg( UInt32 regIndex, UInt32 regValue ) = 0; + virtual UInt32 readATAReg( UInt32 regIndex ) = 0; + + virtual bool selectTiming( ATAUnit deviceNum, ATATimingProtocol timingProtocol ) = 0; + + virtual bool programDma( IOATAStandardCommand *cmd ); + virtual bool startDma( IOATAStandardCommand *cmd ); + virtual bool stopDma( IOATAStandardCommand *cmd, UInt32 *transferCount ); + virtual bool resetDma(); + virtual bool checkDmaActive(); + +/* + * Methods that subclasses of IOATAStandardDriver can optionally implement. + */ + virtual void newDeviceSelected( IOATAStandardDevice *newDevice ); + virtual bool getProtocolsSupported( ATAProtocol *forProtocol ); + +/* + * Methods provided to subclasses of IOATAStandardDriver. + */ + virtual void interruptOccurred(); + + virtual void resetCommand( IOATAStandardCommand *cmd ); + virtual void executeCommand( IOATAStandardCommand *cmd ); + virtual void abortCommand( IOATAStandardCommand *cmd ); + virtual void cancelCommand( IOATAStandardCommand *cmd ); + +/*------------------Methods private to the IOATAStandardDriver class----------------*/ + +private: + void processATAPioInt(); + void processATADmaInt(); + void processATAPIPioInt(); + void processATAPIDmaInt(); + void processATADmaQueuedInt(); + + ATAReturnCode readATAPIDevice( UInt32 n ); + ATAReturnCode writeATAPIDevice( UInt32 n ); + ATAReturnCode sendATAPIPacket( IOATAStandardCommand *cmd ); + + IOReturn getIOReturnCode( ATAReturnCode code ); + + void doProtocolSetRegs( IOATAStandardCommand *cmd ); + void doATAReset( IOATAStandardCommand *cmd ); + void checkATAResetComplete(); + void doATAProtocolPio( IOATAStandardCommand *cmd ); + void doATAProtocolDma( IOATAStandardCommand *cmd ); + void doATAProtocolDmaQueued( IOATAStandardCommand *cmd ); + void doATAPIProtocolPio( IOATAStandardCommand *cmd ); + void doATAPIProtocolDma( IOATAStandardCommand *cmd ); + void doProtocolNotSupported( IOATAStandardCommand *cmd ); + + bool selectDrive( UInt32 driveHeadReg ); + + void completeCmd( IOATAStandardCommand *cmd, ATAReturnCode returnCode, UInt32 bytesTransferred = 0 ); + void completeCmd( IOATAStandardCommand *cmd ); + + void updateCmdStatus( IOATAStandardCommand *cmd, ATAReturnCode returnCode, UInt32 bytesTransferred ); + + bool waitForStatus( UInt32 statusBitsOn, UInt32 statusBitsOff, UInt32 timeoutmS ); + bool waitForAltStatus( UInt32 statusBitsOn, UInt32 statusBitsOff, UInt32 timeoutmS ); + ATAReturnCode waitForDRQ( UInt32 timeoutmS ); + + bool start(IOService *provider); + IOReturn setPowerState(unsigned long powerStateOrdinal, IOService* whatDevice); + +protected: + IOATAStandardDevice *currentDevice; + ATAUnit currentUnit; + ATAProtocol currentProtocol; + +private: + IOMemoryDescriptor *xferDesc; + bool xferIsWrite; + UInt32 xferCount; + UInt32 xferRemaining; + bool dmaActive; + + IOTimerEventSource *resetPollEvent; + IOATAStandardCommand *resetCmd; + AbsoluteTime resetTimeout; + + bool wakingUpFromSleep; +}; + + +#endif diff --git a/iokit/IOKit/ata/ata-standard/Makefile b/iokit/IOKit/ata/ata-standard/Makefile new file mode 100644 index 000000000..215b89431 --- /dev/null +++ b/iokit/IOKit/ata/ata-standard/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = ata/ata-standard +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/cdb/CDBCommand.h b/iokit/IOKit/cdb/CDBCommand.h new file mode 100644 index 000000000..38e2b3338 --- /dev/null +++ b/iokit/IOKit/cdb/CDBCommand.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * CDBCommand.h + * + */ + +#ifndef _CDBCOMMAND_H +#define _CDBCOMMAND_H + +typedef struct CDBInfo +{ + + UInt32 cdbFlags; + + UInt32 cdbLength; + UInt8 cdb[16]; + + UInt32 reserved[16]; +} CDBInfo; + +typedef struct CDBResults +{ + IOReturn returnCode; + + UInt32 bytesTransferred; + + Boolean requestSenseDone; + UInt32 requestSenseLength; + + UInt32 reserved[16]; +} CDBResults; + + +#if __cplusplus +enum _CDBFlags +{ +}; +#endif /* !__cplusplus */ + + +#endif + diff --git a/iokit/IOKit/cdb/CDBPublic.h b/iokit/IOKit/cdb/CDBPublic.h new file mode 100644 index 000000000..71c493ee3 --- /dev/null +++ b/iokit/IOKit/cdb/CDBPublic.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * CDBPublic.h + * + */ + +#ifndef _CDBPUBLIC_H +#define _CDBPUBLIC_H + +#include + +#endif diff --git a/iokit/IOKit/cdb/IOCDBCommand.h b/iokit/IOKit/cdb/IOCDBCommand.h new file mode 100644 index 000000000..c43764a15 --- /dev/null +++ b/iokit/IOKit/cdb/IOCDBCommand.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOCDBCommand.h + * + */ +#ifndef _IOCDBCOMMAND_H +#define _IOCDBCOMMAND_H + +#include + +typedef void (*CallbackFn)(void *target, void *refcon ); + +class IOCDBDevice; + +class IOCDBCommand : public IOCommand +{ + OSDeclareAbstractStructors(IOCDBCommand) + +/*------------------Methods provided to IOCDBCommand users -------------------------*/ +public: + /* + * Set/Get IOMemoryDescriptor object to I/O data buffer or sense data buffer. + */ + virtual void setPointers( IOMemoryDescriptor *desc, + UInt32 transferCount, + bool isWrite, + bool isSense = false ) = 0; + + virtual void getPointers( IOMemoryDescriptor **desc, + UInt32 *transferCount, + bool *isWrite, + bool isSense = false ) = 0; + /* + * Set/Get command timeout (mS) + */ + virtual void setTimeout( UInt32 timeoutmS ) = 0; + virtual UInt32 getTimeout() = 0; + + /* + * Set async callback routine. Specifying no parameters indicates synchronous call. + */ + virtual void setCallback( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ) = 0; + + /* + * Set/Get CDB information. (Generic CDB version) + */ + virtual void setCDB( CDBInfo *cdbInfo ) = 0; + virtual void getCDB( CDBInfo *cdbInfo ) = 0; + + /* + * Get CDB results. (Generic CDB version) + */ + virtual IOReturn getResults( CDBResults *cdbResults ) = 0; + + /* + * Get CDB Device this command is directed to. + */ + virtual IOCDBDevice *getDevice( IOCDBDevice *deviceType ) = 0; + #define kIOCDBDevice ((IOCDBDevice *)0) + + /* + * Command verbs + */ + virtual bool execute( UInt32 *sequenceNumber = 0 ) = 0; + virtual void abort( UInt32 sequenceNumber ) = 0; + virtual void complete() = 0; + + /* + * Get pointers to client and command data. + */ + virtual void *getCommandData() = 0; + virtual void *getClientData() = 0; + + /* + * Get unique sequence number assigned to command. + */ + virtual UInt32 getSequenceNumber() = 0; +}; + +#endif diff --git a/iokit/IOKit/cdb/IOCDBDevice.h b/iokit/IOKit/cdb/IOCDBDevice.h new file mode 100644 index 000000000..e2eb3d0f5 --- /dev/null +++ b/iokit/IOKit/cdb/IOCDBDevice.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOCDBDevice.h + * + * + * + * Note: CDBCommand(s) are allocated and freed by methods in this class. + * The remaining methods to setup and submit CDBCommands are defined in + * IOCDBCommand.h + */ +#ifndef _IOCDBDEVICE_H +#define _IOCDBDEVICE_H + +class IOCDBCommand; + +class IOCDBDevice : public IOService +{ + OSDeclareAbstractStructors(IOCDBDevice) + +/*------------------Methods provided to IOCDBDevice clients-----------------------*/ +public: + /* + * Allocate a CDB Command + */ + virtual IOCDBCommand *allocCommand( IOCDBDevice *deviceType, UInt32 clientDataSize = 0 ) = 0; + + /* + * Abort all outstanding commands on this device + */ + virtual void abort() = 0; + + /* + * Reset device (also aborts all outstanding commands) + */ + virtual void reset() = 0; + + /* + * Obtain information about this device + */ + virtual void getInquiryData( void *inquiryBuffer, + UInt32 inquiryBufSize, + UInt32 *inquiryDataSize ) = 0; +}; + +#define kIOCDBDevice ((IOCDBDevice *)0) + + +#endif diff --git a/iokit/IOKit/cdb/IOCDBInterface.h b/iokit/IOKit/cdb/IOCDBInterface.h new file mode 100644 index 000000000..b3579bf48 --- /dev/null +++ b/iokit/IOKit/cdb/IOCDBInterface.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOCDBInterface + .h + * + */ +#ifndef _IOCDBINTERFACE_H +#define _IOCDBINTERFACE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#endif diff --git a/iokit/IOKit/cdb/Makefile b/iokit/IOKit/cdb/Makefile new file mode 100644 index 000000000..d402c2d78 --- /dev/null +++ b/iokit/IOKit/cdb/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = cdb +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = CDBCommand.h CDBPublic.h +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/graphics/IOAccelClientConnect.h b/iokit/IOKit/graphics/IOAccelClientConnect.h new file mode 100644 index 000000000..9f01fac37 --- /dev/null +++ b/iokit/IOKit/graphics/IOAccelClientConnect.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOACCEL_CLIENT_CONNECT_H +#define _IOACCEL_CLIENT_CONNECT_H + + +/* +** The IOAccelerator service name +*/ +#define kIOAcceleratorClassName "IOAccelerator" + + +/* +** IOAccelerator public client types. Private client types start with +** kIOAccelNumClientTypes. +*/ +enum eIOAcceleratorClientTypes { + kIOAccelSurfaceClientType, + kIOAccelNumClientTypes, +}; + + +#endif /* _IOACCEL_CLIENT_CONNECT_H */ + diff --git a/iokit/IOKit/graphics/IOAccelSurfaceConnect.h b/iokit/IOKit/graphics/IOAccelSurfaceConnect.h new file mode 100644 index 000000000..fe83f5395 --- /dev/null +++ b/iokit/IOKit/graphics/IOAccelSurfaceConnect.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOACCEL_SURFACE_CONNECT_H +#define _IOACCEL_SURFACE_CONNECT_H + +#include +#include + + +/* +** Surface visible region in device coordinates. +** +** num_rects: The number of rectangles in the rect array. If num_rects +** is zero the bounds rectangle is used for the visible rectangle. +** If num_rects is zero the surface must be completely contained +** by the device. +** +** bounds: The unclipped surface rectangle in device coords. Extends +** beyond the device bounds if the surface is not totally on +** the device. +** +** rect[]: An array of visible rectangles in device coords. If num_rects +** is non-zero only the region described by these rectangles is +** copied to the frame buffer during a flush operation. +*/ +typedef struct +{ + UInt32 num_rects; + IOAccelBounds bounds; + IOAccelBounds rect[0]; +} IOAccelDeviceRegion; + + +/* +** Determine the size of a region. +*/ +#define IOACCEL_SIZEOF_DEVICE_REGION(_rgn_) (sizeof(IOAccelDeviceRegion) + (_rgn_)->num_rects * sizeof(IOAccelBounds)) + + +/* +** Surface client public memory types. Private memory types start with +** kIOAccelNumSurfaceMemoryTypes. +*/ +enum eIOAccelSurfaceMemoryTypes { + kIOAccelNumSurfaceMemoryTypes, +}; + + +/* +** Surface client public methods. Private methods start with +** kIOAccelNumSurfaceMethods. +*/ +enum eIOAccelSurfaceMethods { + kIOAccelSurfaceSetIDMode, + kIOAccelSurfaceSetShape, + kIOAccelSurfaceGetState, + kIOAccelSurfaceLock, + kIOAccelSurfaceUnlock, + kIOAccelSurfaceRead, + kIOAccelSurfaceFlush, + kIOAccelNumSurfaceMethods, +}; + + +/* +** Option bits for IOAccelCreateSurface and the kIOAccelSurfaceSetIDMode method. +** The color depth field can take any value of the _CGSDepth enumeration. +*/ +typedef enum { + kIOAccelSurfaceModeColorDepthBits = 0x0000000F, +} eIOAccelSurfaceModeBits; + + +/* +** Options bits for IOAccelSetSurfaceShape and the kIOAccelSurfaceSetShape method. +*/ +typedef enum { + kIOAccelSurfaceShapeNone = 0x00000000, + kIOAccelSurfaceShapeBlockingBit = 0x00000001, + kIOAccelSurfaceShapeNonSimpleBit = 0x00000002, +} eIOAccelSurfaceShapeBits; + + +/* +** Return bits for the kIOAccelSurfaceGetState method. +*/ +typedef enum { + kIOAccelSurfaceStateNone = 0x00000000, + kIOAccelSurfaceStateIdleBit = 0x00000001, +} eIOAccelSurfaceStateBits; + + +#endif /* _IOACCEL_SURFACE_CONNECT_H */ + diff --git a/iokit/IOKit/graphics/IOAccelTypes.h b/iokit/IOKit/graphics/IOAccelTypes.h new file mode 100644 index 000000000..19a20d408 --- /dev/null +++ b/iokit/IOKit/graphics/IOAccelTypes.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOACCEL_TYPES_H +#define _IOACCEL_TYPES_H + +#include + +#define IOACCEL_TYPES_REV 3 + +/* Integer rectangle in device coordinates */ +typedef struct +{ + SInt16 x; + SInt16 y; + SInt16 w; + SInt16 h; +} IOAccelBounds; + +/* Surface information */ + +enum { + kIOAccelVolatileSurface = 0x00000001, + kIOAccelKeycolorSurface = 0x00000002 +}; + +typedef struct +{ + vm_address_t address[4]; + UInt32 rowBytes; + UInt32 width; + UInt32 height; + UInt32 pixelFormat; + IOOptionBits flags; + IOFixed colorTemperature[4]; + UInt32 typeDependent[4]; +} IOAccelSurfaceInformation; + +typedef struct +{ + long x, y, w, h; + void *client_addr; + unsigned long client_row_bytes; +} IOAccelSurfaceReadData; + + + +#endif /* _IOACCEL_TYPES_H */ + diff --git a/iokit/IOKit/graphics/IOAccelerator.h b/iokit/IOKit/graphics/IOAccelerator.h new file mode 100644 index 000000000..a188a2a3d --- /dev/null +++ b/iokit/IOKit/graphics/IOAccelerator.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IO_ACCELERATOR_H +#define _IO_ACCELERATOR_H + +#include + +class IOAccelerator : public IOService +{ + OSDeclareDefaultStructors(IOAccelerator) +}; + + +#endif /* _IO_ACCELERATOR_H */ + diff --git a/iokit/IOKit/graphics/IODisplay.h b/iokit/IOKit/graphics/IODisplay.h new file mode 100644 index 000000000..ae78f8d24 --- /dev/null +++ b/iokit/IOKit/graphics/IODisplay.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 23 Jul 98 - start IOKit + */ + +#ifndef _IOKIT_IODISPLAY_H +#define _IOKIT_IODISPLAY_H + +#include +#include + +extern const OSSymbol * gIODisplayParametersKey; +extern const OSSymbol * gIODisplayGUIDKey; + +extern const OSSymbol * gIODisplayValueKey; +extern const OSSymbol * gIODisplayMinValueKey; +extern const OSSymbol * gIODisplayMaxValueKey; + +extern const OSSymbol * gIODisplayContrastKey; +extern const OSSymbol * gIODisplayBrightnessKey; +extern const OSSymbol * gIODisplayHorizontalPositionKey; +extern const OSSymbol * gIODisplayHorizontalSizeKey; +extern const OSSymbol * gIODisplayVerticalPositionKey; +extern const OSSymbol * gIODisplayVerticalSizeKey; +extern const OSSymbol * gIODisplayTrapezoidKey; +extern const OSSymbol * gIODisplayPincushionKey; +extern const OSSymbol * gIODisplayParallelogramKey; +extern const OSSymbol * gIODisplayRotationKey; + +extern const OSSymbol * gIODisplayParametersCommitKey; +extern const OSSymbol * gIODisplayParametersDefaultKey; + +enum { + kIODisplayMaxPowerStates = 4 +}; + +struct DisplayPMVars // these are the private instance variables for power management +{ + IOIndex connectIndex; + // control bytes we send to the framebuffer to control syncs + UInt32 syncControls[kIODisplayMaxPowerStates]; + // mask bits that go with the control byte + UInt32 syncMask; + // current state of sync signals + UInt32 currentSyncs; + // highest state number normally, lowest usable state in emergency + unsigned long max_display_state; + bool displayIdle; // true if the display has had power lowered due to user inactivity + bool powerControllable; // false if no sync control available on video display +}; + +class IODisplayConnect : public IOService +{ + OSDeclareDefaultStructors(IODisplayConnect) + +private: + IOIndex connection; + +public: + virtual bool initWithConnection( IOIndex connection ); + virtual IOFramebuffer * getFramebuffer( void ); + virtual IOIndex getConnection( void ); + virtual IOReturn getAttributeForConnection( IOIndex, IOSelect, UInt32 * ); + virtual IOReturn setAttributeForConnection( IOIndex, IOSelect, UInt32 ); + virtual void joinPMtree ( IOService * driver ); +}; + +class IODisplay : public IOService +{ + OSDeclareAbstractStructors(IODisplay) + +public: + static void initialize( void ); + +private: + + // used to query the framebuffer controller + IODisplayConnect * connection; +protected: + // pointer to protected instance variables for power management + struct DisplayPMVars * displayPMVars; + + /* Reserved for future expansion. */ + int _IODisplay_reserved[2]; + + virtual void initForPM ( IOService * ); + + virtual IOReturn setProperties( OSObject * properties ); + + virtual IOReturn setAggressiveness ( unsigned long, unsigned long newLevel ); + virtual IOReturn setPowerState ( unsigned long, IOService* ); + virtual unsigned long maxCapabilityForDomainState ( IOPMPowerFlags ); + virtual unsigned long initialPowerStateForDomainState ( IOPMPowerFlags ); + virtual unsigned long powerStateForDomainState ( IOPMPowerFlags ); + +public: + virtual IOService * probe( IOService * provider, + SInt32 * score ); + + virtual bool start( IOService * provider ); + + virtual IODisplayConnect * getConnection( void ); + + virtual IOReturn getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ) = 0; + + virtual IOReturn getGammaTableByIndex( + UInt32 * channelCount, UInt32 * dataCount, + UInt32 * dataWidth, void ** data ); + + virtual void dropOneLevel ( void ); + virtual void makeDisplayUsable ( void ); + IOReturn registerPowerDriver ( IOService*, IOPMPowerState*, unsigned long ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class AppleSenseDisplay : public IODisplay +{ + OSDeclareDefaultStructors(AppleSenseDisplay) + +public: + virtual IOService * probe( IOService * provider, + SInt32 * score ); + + virtual IOReturn getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class AppleNoSenseDisplay : public IODisplay +{ + OSDeclareDefaultStructors(AppleNoSenseDisplay) + +public: + virtual IOReturn getConnectFlagsForDisplayMode( + IODisplayModeID mode, UInt32 * flags ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#endif /* ! _IOKIT_IODISPLAY_H */ + diff --git a/iokit/IOKit/graphics/IOFramebuffer.h b/iokit/IOKit/graphics/IOFramebuffer.h new file mode 100644 index 000000000..533a45041 --- /dev/null +++ b/iokit/IOKit/graphics/IOFramebuffer.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 30 Nov 98 sdouglas start cpp, from previous versions. + */ + +#ifndef _IOKIT_IOFRAMEBUFFER_H +#define _IOKIT_IOFRAMEBUFFER_H + +#include +#include +#include +#include + +class IOFramebuffer; +class IOBufferMemoryDescriptor; + +typedef void (*CursorBlitProc)( + IOFramebuffer * inst, + void * shmem, + volatile unsigned char *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ); + +typedef void (*CursorRemoveProc)( + IOFramebuffer * inst, + void * shmem, + volatile unsigned char *vramPtr, + unsigned int vramRow, + int width, + int height ); + +enum { + kTransparentEncoding = 0, + kInvertingEncoding +}; + +enum { + kTransparentEncodingShift = (kTransparentEncoding << 1), + kTransparentEncodedPixel = (0x01 << kTransparentEncodingShift), + + kInvertingEncodingShift = (kInvertingEncoding << 1), + kInvertingEncodedPixel = (0x01 << kInvertingEncodingShift), +}; + +enum { + kHardwareCursorDescriptorMajorVersion = 0x0001, + kHardwareCursorDescriptorMinorVersion = 0x0000 +}; + +struct IOHardwareCursorDescriptor { + UInt16 majorVersion; + UInt16 minorVersion; + UInt32 height; + UInt32 width; + UInt32 bitDepth; + UInt32 maskBitDepth; + UInt32 numColors; + UInt32 * colorEncodings; + UInt32 flags; + UInt32 supportedSpecialEncodings; + UInt32 specialEncodings[16]; +}; +typedef struct IOHardwareCursorDescriptor IOHardwareCursorDescriptor; + +enum { + kHardwareCursorInfoMajorVersion = 0x0001, + kHardwareCursorInfoMinorVersion = 0x0000 +}; + +struct IOHardwareCursorInfo { + UInt16 majorVersion; + UInt16 minorVersion; + UInt32 cursorHeight; + UInt32 cursorWidth; + // nil or big enough for hardware's max colors + IOColorEntry * colorMap; + UInt8 * hardwareCursorData; + UInt32 reserved[6]; +}; +typedef struct IOHardwareCursorInfo IOHardwareCursorInfo; + +// clock & data values +enum { + kIODDCLow = 0, + kIODDCHigh = 1, + kIODDCTristate = 2 +}; +// ddcBlockType constants +enum { + // EDID block type. + kIODDCBlockTypeEDID = 0 +}; + +// ddcFlags constants +enum { + // Force a new read of the EDID. + kIODDCForceRead = 0x00000001, +}; + +enum { + kDisabledInterruptState = 0, + kEnabledInterruptState = 1 +}; + +typedef void (*IOFBInterruptProc)( OSObject * target, void * ref ); + + +typedef IOReturn (*IOFramebufferNotificationHandler) + (OSObject * self, void * ref, + IOFramebuffer * framebuffer, IOIndex event, + void * info); + +// IOFramebufferNotificationHandler events +enum { + kIOFBNotifyDisplayModeWillChange = 1, + kIOFBNotifyDisplayModeDidChange, + kIOFBNotifyWillSleep, + kIOFBNotifyDidWake, +}; + + +struct StdFBShmem_t; +class IOFramebufferUserClient; + +class IOFramebuffer : public IOGraphicsDevice +{ + friend class IOFramebufferUserClient; + friend class IOFramebufferSharedUserClient; + friend class IOGraphicsEngineClient; + + OSDeclareDefaultStructors(IOFramebuffer) + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * reserved; + +private: + +protected: + StdFBShmem_t * priv; + int shmemClientVersion; + IOBufferMemoryDescriptor * sharedCursor; + + union { + struct { + /* Mapping tables used in cursor drawing to 5-5-5 displays. */ + unsigned char * _bm34To35SampleTable; + unsigned char * _bm35To34SampleTable; + /* Mapping tables used in cursor drawing to 8-bit RGB displays. */ + unsigned int * _bm256To38SampleTable; + unsigned char * _bm38To256SampleTable; + } t; + UInt8 * tables[ 4 ]; + } colorConvert; + + /* cursor blitting vars */ + CursorBlitProc cursorBlitProc; + CursorRemoveProc cursorRemoveProc; + + IOGSize maxCursorSize; + volatile unsigned char * cursorImages[ kIOFBNumCursorFrames ]; + volatile unsigned char * cursorMasks[ kIOFBNumCursorFrames ]; + volatile unsigned char * cursorSave; + unsigned int white; + + Point nextCursorLoc; + int nextCursorFrame; + void * vblInterrupt; + semaphore_t vblSemaphore; + + /* memory ranges */ + volatile unsigned char * frameBuffer; + unsigned int totalWidth; + unsigned int rowBytes; + unsigned int bytesPerPixel; + + IOMemoryMap * vramMap; + IOByteCount vramMapOffset; + OSArray * userAccessRanges; + OSArray * engineAccessRanges; + IOBufferMemoryDescriptor * engineContext; + OSSet * fbNotifications; + + class IOFramebufferUserClient * serverConnect; + class IOFramebufferSharedUserClient * sharedConnect; + + bool opened; + bool closed; + bool clutValid; + bool currentMono; + bool needCursorService; + bool haveVBLService; + bool haveHWCursor; + bool hwCursorLoaded; + + void * pmRef; + + /* Reserved for future expansion. */ + int _IOFramebuffer_reserved[7]; + +private: + OSMetaClassDeclareReservedUnused(IOFramebuffer, 0); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 1); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 2); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 3); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 4); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 5); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 6); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 7); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 8); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 9); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 10); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 11); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 12); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 13); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 14); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 15); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 16); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 17); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 18); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 19); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 20); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 21); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 22); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 23); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 24); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 25); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 26); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 27); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 28); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 29); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 30); + OSMetaClassDeclareReservedUnused(IOFramebuffer, 31); + + +public: + static void initialize(); + + virtual IOReturn powerStateWillChangeTo ( IOPMPowerFlags, unsigned long, IOService* ); + virtual IOReturn powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService* ); + virtual IOReturn setPowerState( unsigned long powerStateOrdinal, IOService * device); + virtual IOReturn newUserClient( task_t owningTask, + void * security_id, + UInt32 type, + IOUserClient ** handler ); + + + virtual void hideCursor( void ); + virtual void showCursor( Point * cursorLoc, int frame ); + virtual void moveCursor( Point * cursorLoc, int frame ); + // virtual + void resetCursor( void ); + + virtual void getVBLTime( AbsoluteTime * time, AbsoluteTime * delta ); + + virtual void getBoundingRect ( Bounds ** bounds ); + + virtual bool start( IOService * provider ); + + virtual IOReturn open( void ); + + virtual void close( void ); + + virtual bool isConsoleDevice( void ); + + virtual IOReturn setupForCurrentConfig( void ); + + virtual bool serializeInfo( OSSerialize * s ); + virtual bool setNumber( OSDictionary * dict, const char * key, + UInt32 number ); + + IONotifier * addFramebufferNotification( + IOFramebufferNotificationHandler handler, + OSObject * self, void * ref); + + virtual IODeviceMemory * getApertureRange( IOPixelAperture aperture ) = 0; + virtual IODeviceMemory * getVRAMRange( void ); + +protected: + + IOReturn deliverFramebufferNotification( + IOIndex event, void * info = 0 ); + +#ifdef IOFRAMEBUFFER_PRIVATE +#include +#endif + +public: + + virtual IOReturn enableController( void ); + + // List of pixel formats supported, null separated, + // doubly null terminated. + virtual const char * getPixelFormats( void ) = 0; + + // Array of supported display modes + virtual IOItemCount getDisplayModeCount( void ) = 0; + virtual IOReturn getDisplayModes( IODisplayModeID * allDisplayModes ) = 0; + + // Info about a display mode + virtual IOReturn getInformationForDisplayMode( IODisplayModeID displayMode, + IODisplayModeInformation * info ) = 0; + + // Mask of pixel formats available in mode and depth + virtual UInt64 getPixelFormatsForDisplayMode( IODisplayModeID displayMode, + IOIndex depth ) = 0; + + virtual IOReturn getPixelInformation( + IODisplayModeID displayMode, IOIndex depth, + IOPixelAperture aperture, IOPixelInformation * pixelInfo ) = 0; + + // Framebuffer info + + // Current display mode and depth + virtual IOReturn getCurrentDisplayMode( IODisplayModeID * displayMode, + IOIndex * depth ) = 0; + + // Set display mode and depth + virtual IOReturn setDisplayMode( IODisplayModeID displayMode, + IOIndex depth ); + + // For pages + virtual IOReturn setApertureEnable( IOPixelAperture aperture, + IOOptionBits enable ); + + // Display mode and depth for startup + virtual IOReturn setStartupDisplayMode( IODisplayModeID displayMode, + IOIndex depth ); + virtual IOReturn getStartupDisplayMode( IODisplayModeID * displayMode, + IOIndex * depth ); + + //// CLUTs + + virtual IOReturn setCLUTWithEntries( IOColorEntry * colors, UInt32 index, + UInt32 numEntries, IOOptionBits options ); + + //// Gamma + + virtual IOReturn setGammaTable( UInt32 channelCount, UInt32 dataCount, + UInt32 dataWidth, void * data ); + + //// Controller attributes + + virtual IOReturn setAttribute( IOSelect attribute, UInt32 value ); + virtual IOReturn getAttribute( IOSelect attribute, UInt32 * value ); + + //// Display mode timing information + + virtual IOReturn getTimingInfoForDisplayMode( + IODisplayModeID displayMode, IOTimingInformation * info ); + + //// Detailed timing information + + virtual IOReturn validateDetailedTiming( + void * description, IOByteCount descripSize ); + + virtual IOReturn setDetailedTimings( OSArray * array ); + + //// Connections + + virtual IOItemCount getConnectionCount( void ); + + virtual IOReturn setAttributeForConnection( IOIndex connectIndex, + IOSelect attribute, UInt32 value ); + virtual IOReturn getAttributeForConnection( IOIndex connectIndex, + IOSelect attribute, UInt32 * value ); + + //// HW Cursors + + virtual bool convertCursorImage( void * cursorImage, + IOHardwareCursorDescriptor * description, + IOHardwareCursorInfo * cursor ); + + virtual IOReturn setCursorImage( void * cursorImage ); + virtual IOReturn setCursorState( SInt32 x, SInt32 y, bool visible ); + + //// SW Cursors + + virtual void flushCursor( void ); + + // Apple sensing + + virtual IOReturn getAppleSense( IOIndex connectIndex, + UInt32 * senseType, + UInt32 * primary, + UInt32 * extended, + UInt32 * displayType ); + + virtual IOReturn connectFlags( IOIndex connectIndex, + IODisplayModeID displayMode, IOOptionBits * flags ); + + //// IOLowLevelDDCSense + + virtual void setDDCClock( IOIndex connectIndex, UInt32 value ); + virtual void setDDCData( IOIndex connectIndex, UInt32 value ); + virtual bool readDDCClock( IOIndex connectIndex ); + virtual bool readDDCData( IOIndex connectIndex ); + virtual IOReturn enableDDCRaster( bool enable ); + + //// IOHighLevelDDCSense + + virtual bool hasDDCConnect( IOIndex connectIndex ); + virtual IOReturn getDDCBlock( IOIndex connectIndex, UInt32 blockNumber, + IOSelect blockType, IOOptionBits options, + UInt8 * data, IOByteCount * length ); + + //// Interrupts + + // This is driven in the opposite direction to ndrv's ie. the base class + // registers a proc with the driver, and controls int generation with + // setInterruptState. Clients ask for serviceType. + + virtual IOReturn registerForInterruptType( IOSelect interruptType, + IOFBInterruptProc proc, OSObject * target, void * ref, + void ** interruptRef ); + virtual IOReturn unregisterInterrupt( void * interruptRef ); + virtual IOReturn setInterruptState( void * interruptRef, UInt32 state ); + + virtual IOReturn getNotificationSemaphore( IOSelect interruptType, + semaphore_t * semaphore ); +}; + +#endif /* ! _IOKIT_IOFRAMEBUFFER_H */ diff --git a/iokit/IOKit/graphics/IOFramebufferPrivate.h b/iokit/IOKit/graphics/IOFramebufferPrivate.h new file mode 100644 index 000000000..098191ec1 --- /dev/null +++ b/iokit/IOKit/graphics/IOFramebufferPrivate.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + void setupCursor( IOPixelInformation * info ); + void stopCursor( void ); + IOReturn doSetup( bool full ); + IOReturn createSharedCursor( int shmemVersion, + int maxWidth, int maxHeight ); + IOReturn setBoundingRect( Bounds * bounds ); + IOReturn setUserRanges( void ); + IOReturn getConnectFlagsForDisplayMode( + IODisplayModeID mode, IOIndex connection, UInt32 * flags ); + IOReturn extGetDisplayModeCount( IOItemCount * count ); + IOReturn extGetDisplayModes( IODisplayModeID * allModes, + IOByteCount * size ); + IOReturn extSetDisplayMode( IODisplayModeID displayMode, + IOIndex depth ); + IOReturn extGetInformationForDisplayMode( + IODisplayModeID mode, IODisplayModeInformation * info ); + + IOReturn extGetVRAMMapOffset( IOPixelAperture aperture, + IOByteCount * offset ); + IOReturn extSetBounds( Bounds * bounds ); + + IOReturn extSetNewCursor( void * cursor, IOIndex frame, + IOOptionBits options ); + IOReturn extSetCursorVisible( bool visible ); + IOReturn extSetCursorPosition( SInt32 x, SInt32 y ); + IOReturn extSetColorConvertTable( UInt32 select, + UInt8 * data, IOByteCount length ); + IOReturn extSetCLUTWithEntries( UInt32 index, IOOptionBits options, + IOColorEntry * colors, IOByteCount inputCount ); + IOReturn makeModeList( void ); + IOReturn getDefaultMode( IOIndex connection, IODisplayModeID * mode, + IOIndex * depth); + IOReturn extValidateDetailedTiming( + void * description, void * outDescription, + IOByteCount inSize, IOByteCount * outSize ); + IOReturn beginSystemSleep( void * ackRef ); + + static inline void StdFBDisplayCursor( IOFramebuffer * inst ); + static inline void StdFBRemoveCursor( IOFramebuffer * inst ); + static inline void RemoveCursor( IOFramebuffer * inst ); + static inline void DisplayCursor( IOFramebuffer * inst ); + static inline void SysHideCursor( IOFramebuffer * inst ); + static inline void SysShowCursor( IOFramebuffer * inst ); + static inline void CheckShield( IOFramebuffer * inst ); + + static void StdFBDisplayCursor8P( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned char *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ); + + static void StdFBDisplayCursor8G( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned char *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ); + + static void StdFBDisplayCursor555( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned short *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ); + + static void StdFBDisplayCursor444( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned short *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ); + + static void StdFBDisplayCursor32Axxx( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned int *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ); + + static void StdFBDisplayCursor32xxxA( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned int *vramPtr, + unsigned int cursStart, + unsigned int vramRow, + unsigned int cursRow, + int width, + int height ); + + static void StdFBRemoveCursor8( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned char *vramPtr, + unsigned int vramRow, + int width, + int height ); + static void StdFBRemoveCursor16( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned short *vramPtr, + unsigned int vramRow, + int width, + int height ); + + static void StdFBRemoveCursor32( + IOFramebuffer * inst, + StdFBShmem_t *shmem, + volatile unsigned int *vramPtr, + unsigned int vramRow, + int width, + int height ); + + static void deferredMoveCursor(IOFramebuffer * inst); + static void handleVBL(IOFramebuffer * inst, void * ref); diff --git a/iokit/IOKit/graphics/IOFramebufferShared.h b/iokit/IOKit/graphics/IOFramebufferShared.h new file mode 100644 index 000000000..4426a3945 --- /dev/null +++ b/iokit/IOKit/graphics/IOFramebufferShared.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992, 1993 NeXT Computer, Inc. All rights reserved. + * + * IOFrameBufferShared.h - Definitions of objects and types shared between + * kernel level IOFrameBufferDisplay driver and PostScript level driver. + * + * HISTORY + * 03 Sep 92 Joe Pasqua + * Created. + * 24 Jun 93 Derek B Clegg + * Moved to driverkit. + */ + +#ifndef _IOKIT_IOFRAMEBUFFERSHARED_H +#define _IOKIT_IOFRAMEBUFFERSHARED_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef KERNEL +// CGS use optional +#define IOFB_ARBITRARY_SIZE_CURSOR +#endif + +#define IOFB_SUPPORTS_XOR_CURSOR + +// +// Cursor and Window Server state data, occupying a slice of shared memory +// between the kernel and WindowServer. +// + +enum { + kIOFBNumCursorFrames = 4, + kIOFBNumCursorFramesShift = 2, + kIOFBMaxCursorDepth = 32 +}; + +#ifndef IOFB_ARBITRARY_SIZE_CURSOR + +#define CURSORWIDTH 16 /* width in pixels */ +#define CURSORHEIGHT 16 /* height in pixels */ + +struct bm12Cursor { + unsigned int image[4][16]; + unsigned int mask[4][16]; + unsigned int save[16]; +}; + +struct bm18Cursor { + unsigned char image[4][256]; + unsigned char mask[4][256]; + unsigned char save[256]; +}; + +struct bm34Cursor { + unsigned short image[4][256]; + unsigned short save[256]; +}; + +struct bm38Cursor { + unsigned int image[4][256]; + unsigned int save[256]; +}; + +#endif /* IOFB_ARBITRARY_SIZE_CURSOR */ + +struct StdFBShmem_t { + ev_lock_data_t cursorSema; + int frame; + char cursorShow; + char cursorObscured; + char shieldFlag; + char shielded; + IOGBounds saveRect; + IOGBounds shieldRect; + IOGPoint cursorLoc; + IOGBounds cursorRect; + IOGBounds oldCursorRect; + IOGBounds screenBounds; + int version; + int structSize; + AbsoluteTime vblTime; + AbsoluteTime vblDelta; + unsigned int reservedC[30]; + unsigned char hardwareCursorCapable; + unsigned char hardwareCursorActive; + unsigned char reservedB[2]; + IOGSize cursorSize[kIOFBNumCursorFrames]; + IOGPoint hotSpot[kIOFBNumCursorFrames]; +#ifndef IOFB_ARBITRARY_SIZE_CURSOR + union { + struct bm12Cursor bw; + struct bm18Cursor bw8; + struct bm34Cursor rgb; + struct bm38Cursor rgb24; + } cursor; +#else /* IOFB_ARBITRARY_SIZE_CURSOR */ + unsigned char cursor[0]; +#endif /* IOFB_ARBITRARY_SIZE_CURSOR */ +}; +#ifndef __cplusplus +typedef volatile struct StdFBShmem_t StdFBShmem_t; +#endif + + +enum { + // version for IOFBCreateSharedCursor + kIOFBCurrentShmemVersion = 2, + // memory types for IOConnectMapMemory. + // 0..n are apertures + kIOFBCursorMemory = 100, + kIOFBVRAMMemory = 110 +}; + +#define IOFRAMEBUFFER_CONFORMSTO "IOFramebuffer" + +#ifdef __cplusplus +} +#endif + +#endif /* ! _IOKIT_IOFRAMEBUFFERSHARED_H */ diff --git a/iokit/IOKit/graphics/IOGraphicsDevice.h b/iokit/IOKit/graphics/IOGraphicsDevice.h new file mode 100644 index 000000000..a16e0c114 --- /dev/null +++ b/iokit/IOKit/graphics/IOGraphicsDevice.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOGRAPHICSDEVICE_H +#define _IOKIT_IOGRAPHICSDEVICE_H + +#include +#include + + +class IOGraphicsDevice : public IOService +{ + OSDeclareAbstractStructors(IOGraphicsDevice) + +public: + + virtual void hideCursor( void ) = 0; + virtual void showCursor( Point * cursorLoc, int frame ) = 0; + virtual void moveCursor( Point * cursorLoc, int frame ) = 0; + + virtual void getVBLTime( AbsoluteTime * time, AbsoluteTime * delta ) = 0; + + virtual void getBoundingRect ( Bounds ** bounds ) = 0; +}; + +#endif /* ! _IOKIT_IOGRAPHICSDEVICE_H */ + diff --git a/iokit/IOKit/graphics/IOGraphicsEngine.h b/iokit/IOKit/graphics/IOGraphicsEngine.h new file mode 100644 index 000000000..352fdec7f --- /dev/null +++ b/iokit/IOKit/graphics/IOGraphicsEngine.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 10 Mar 99 sdouglas created. + */ + + +struct IOGraphicsEngineContext { + IOSharedLockData contextLock; + IOOptionBits state; + void * owner; + UInt32 version; + IOByteCount structSize; + UInt32 reserved[ 8 ]; +}; +#ifndef __cplusplus +typedef volatile struct IOGraphicsEngineContext IOGraphicsEngineContext; +#endif + +enum { + // memory type for IOMapMemory + kIOGraphicsEngineContext = 100 +}; + +enum { + // version + kIOGraphicsEngineContextVersion = 1 +}; diff --git a/iokit/IOKit/graphics/IOGraphicsTypes.h b/iokit/IOKit/graphics/IOGraphicsTypes.h new file mode 100644 index 000000000..e580fea71 --- /dev/null +++ b/iokit/IOKit/graphics/IOGraphicsTypes.h @@ -0,0 +1,397 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOGRAPHICSTYPES_H +#define _IOKIT_IOGRAPHICSTYPES_H + + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +typedef SInt32 IOIndex; +typedef UInt32 IOSelect; +typedef UInt32 IOFixed1616; +typedef SInt32 IODisplayModeID; +typedef UInt32 IODisplayVendorID; +typedef UInt32 IODisplayProductID; + +enum { + kIOMaxPixelBits = 64 +}; +typedef char IOPixelEncoding[ kIOMaxPixelBits ]; + +// Common Apple pixel formats + +#define IO1BitIndexedPixels "P" +#define IO2BitIndexedPixels "PP" +#define IO4BitIndexedPixels "PPPP" +#define IO8BitIndexedPixels "PPPPPPPP" +#define IO16BitDirectPixels "-RRRRRGGGGGBBBBB" +#define IO32BitDirectPixels "--------RRRRRRRRGGGGGGGGBBBBBBBB" + +// other possible pixel formats + +#define IOYUV422Pixels "Y4U2V2" +#define IO8BitOverlayPixels "O8" +// page flipping +#define IOPagedPixels "Page1" + +#define IO_SampleTypeAlpha 'A' +#define IO_SampleTypeSkip '-' + +// Info about a pixel format +enum { + kIOCLUTPixels = 0, + kIOFixedCLUTPixels = 1, + kIORGBDirectPixels = 2, + kIOMonoDirectPixels = 3, + kIOMonoInverseDirectPixels = 4, +}; + +struct IOPixelInformation { + IOByteCount bytesPerRow; + IOByteCount bytesPerPlane; + UInt32 bitsPerPixel; + UInt32 pixelType; + UInt32 componentCount; + UInt32 bitsPerComponent; + UInt32 componentMasks[ 8 * 2 ]; + IOPixelEncoding pixelFormat; + UInt32 flags; + UInt32 activeWidth; + UInt32 activeHeight; + UInt32 reserved[ 2 ]; +}; +typedef struct IOPixelInformation IOPixelInformation; + +// Info about a display mode +typedef UInt32 IOAppleTimingID; + +struct IODisplayModeInformation { + UInt32 nominalWidth; + UInt32 nominalHeight; + IOFixed1616 refreshRate; + IOIndex maxDepthIndex; + UInt32 flags; + UInt32 reserved[ 4 ]; +}; +typedef struct IODisplayModeInformation IODisplayModeInformation; + +// flags +enum { + kDisplayModeSafetyFlags = 0x00000007, + + kDisplayModeAlwaysShowFlag = 0x00000008, + kDisplayModeNeverShowFlag = 0x00000080, + kDisplayModeNotResizeFlag = 0x00000010, + kDisplayModeRequiresPanFlag = 0x00000020, + + kDisplayModeInterlacedFlag = 0x00000040, + + kDisplayModeSimulscanFlag = 0x00000100, + kDisplayModeBuiltInFlag = 0x00000400, + kDisplayModeNotPresetFlag = 0x00000200, + kDisplayModeStretchedFlag = 0x00000800 +}; +enum { + kDisplayModeValidFlag = 0x00000001, + kDisplayModeSafeFlag = 0x00000002, + kDisplayModeDefaultFlag = 0x00000004, +}; + +// Framebuffer info + +struct IOFramebufferInformation { + IOPhysicalAddress baseAddress; + UInt32 activeWidth; + UInt32 activeHeight; + IOByteCount bytesPerRow; + IOByteCount bytesPerPlane; + UInt32 bitsPerPixel; + UInt32 pixelType; + UInt32 flags; + UInt32 reserved[ 4 ]; +}; +typedef struct IOFramebufferInformation IOFramebufferInformation; + +// flags +enum { + kFramebufferSupportsCopybackCache = 0x00010000, + kFramebufferSupportsWritethruCache = 0x00020000, + kFramebufferSupportsGammaCorrection = 0x00040000, + kFramebufferDisableAltivecAccess = 0x00080000, +}; + +// Aperture is an index into supported pixel formats for a mode & depth +typedef IOIndex IOPixelAperture; +enum { + kIOFBSystemAperture = 0 +}; + +//// CLUTs + +typedef UInt16 IOColorComponent; + +struct IOColorEntry { + UInt16 index; + IOColorComponent red; + IOColorComponent green; + IOColorComponent blue; +}; +typedef struct IOColorEntry IOColorEntry; + +// options (masks) +enum { + kSetCLUTByValue = 0x00000001, // else at index + kSetCLUTImmediately = 0x00000002, // else at VBL + kSetCLUTWithLuminance = 0x00000004 // else RGB +}; + +//// Controller attributes + +enum { + kIOPowerAttribute = 'powr', + kIOHardwareCursorAttribute = 'crsr' +}; + +//// Display mode timing information + +struct IODetailedTimingInformation { + // from EDID defn + UInt32 pixelClock; // Hertz + UInt32 horizontalActive; // pixels + UInt32 horizontalBlanking; // pixels + UInt32 horizontalBorder; // pixels + UInt32 horizontalSyncOffset; // pixels + UInt32 horizontalSyncWidth; // pixels + UInt32 verticalActive; // lines + UInt32 verticalBlanking; // lines + UInt32 verticalBorder; // lines + UInt32 verticalSyncOffset; // lines + UInt32 verticalSyncWidth; // lines +}; +typedef struct IODetailedTimingInformation IODetailedTimingInformation; + +struct IOTimingInformation { + IOAppleTimingID appleTimingID; // appleTimingXXX const + UInt32 flags; + IODetailedTimingInformation detailedInfo; +}; +typedef struct IOTimingInformation IOTimingInformation; + +enum { + // b0-7 from EDID flags + kIODetailedTimingValid = 0x80000000 +}; + +//// Connections + +enum { + kOrConnections = 0xffffffe, + kAndConnections = 0xffffffd +}; + +enum { + kConnectionEnable = 'enab', + kConnectionSyncEnable = 'sync', + kConnectionSyncFlags = 'sycf', + kConnectionSupportsAppleSense = 'asns', + kConnectionSupportsLLDDCSense = 'lddc', + kConnectionSupportsHLDDCSense = 'hddc' +}; + +// kConnectionSyncControl values +enum { + kIOHSyncDisable = 0x00000001, + kIOVSyncDisable = 0x00000002, + kIOCSyncDisable = 0x00000004, + kIONoSeparateSyncControl = 0x00000040, + kIOTriStateSyncs = 0x00000080, + kIOSyncOnBlue = 0x00000008, + kIOSyncOnGreen = 0x00000010, + kIOSyncOnRed = 0x00000020 +}; + + +#define IO_DISPLAY_CAN_FILL 0x00000040 +#define IO_DISPLAY_CAN_BLIT 0x00000020 + +#define IO_24BPP_TRANSFER_TABLE_SIZE 256 +#define IO_15BPP_TRANSFER_TABLE_SIZE 256 +#define IO_8BPP_TRANSFER_TABLE_SIZE 256 +#define IO_12BPP_TRANSFER_TABLE_SIZE 256 +#define IO_2BPP_TRANSFER_TABLE_SIZE 256 + +#define STDFB_BM256_TO_BM38_MAP_SIZE 256 +#define STDFB_BM38_TO_BM256_MAP_SIZE 256 +#define STDFB_BM38_TO_256_WITH_LOGICAL_SIZE \ + (STDFB_BM38_TO_BM256_MAP_SIZE + (256/sizeof(int))) + +#define STDFB_4BPS_TO_5BPS_MAP_SIZE 16 +#define STDFB_5BPS_TO_4BPS_MAP_SIZE 32 + +enum { + // connection types for IOServiceOpen + kIOFBServerConnectType = 0, + kIOFBSharedConnectType = 1, + kIOFBEngineControllerConnectType = 20, + kIOFBEngineConnectType = 21 +}; + +struct IOGPoint { + SInt16 x; + SInt16 y; +}; +typedef struct IOGPoint IOGPoint; + +struct IOGSize { + SInt16 width; + SInt16 height; +}; +typedef struct IOGSize IOGSize; + +struct IOGBounds { + SInt16 minx; + SInt16 maxx; + SInt16 miny; + SInt16 maxy; +}; +typedef struct IOGBounds IOGBounds; + +#if !defined(__Point__) && !defined(BINTREE_H) && !defined(__MACTYPES__) +#define __Point__ +typedef IOGPoint Point; +#endif + +#if !defined(__Bounds__) && !defined(BINTREE_H) && !defined(__MACTYPES__) +#define __Bounds__ +typedef IOGBounds Bounds; +#endif + +// interrupt types + +enum { + kIOFBVBLInterruptType = 'vbl ', + kIOFBHBLInterruptType = 'hbl ', + kIOFBFrameInterruptType = 'fram', + kIOFBConnectInterruptType = 'dci ' +}; + +// property keys + +#define kIOFramebufferInfoKey "IOFramebufferInformation" + +#define kIOFBWidthKey "IOFBWidth" +#define kIOFBHeightKey "IOFBHeight" +#define kIOFBRefreshRateKey "IOFBRefreshRate" +#define kIOFBFlagsKey "IOFBFlags" +#define kIOFBBytesPerRowKey "IOFBBytesPerRow" +#define kIOFBBytesPerPlaneKey "IOFBBytesPerPlane" +#define kIOFBBitsPerPixelKey "IOFBBitsPerPixel" +#define kIOFBComponentCountKey "IOFBComponentCount" +#define kIOFBBitsPerComponentKey "IOFBBitsPerComponent" + +#define kIOFBDetailedTimingsKey "IOFBDetailedTimings" +#define kIOFBTimingRangeKey "IOFBTimingRange" + +#define kIOFBHostAccessFlagsKey "IOFBHostAccessFlags" + +#define kIODisplayEDIDKey "IODisplayEDID" +#define kIODisplayLocationKey "IODisplayLocation" + +enum { + kDisplayVendorIDUnknown = 'unkn', + kDisplayProductIDGeneric = 0x717 +}; + +#define kDisplayVendorID "DisplayVendorID" // CFNumber +#define kDisplayProductID "DisplayProductID" // CFNumber +#define kDisplaySerialNumber "DisplaySerialNumber" // CFNumber +#define kDisplaySerialString "DisplaySerialString" // CFString +#define kDisplayWeekOfManufacture "DisplayWeekManufacture" // CFNumber +#define kDisplayYearOfManufacture "DisplayYearManufacture" // CFNumber + +// CFDictionary of language-locale keys, name values +// eg. "en"="Color LCD", "en-GB"="Colour LCD" +#define kDisplayProductName "DisplayProductName" + +// all CFNumber or CFArray of CFNumber (floats) +#define kDisplayWhitePointX "DisplayWhitePointX" +#define kDisplayWhitePointY "DisplayWhitePointY" +#define kDisplayRedPointX "DisplayRedPointX" +#define kDisplayRedPointY "DisplayRedPointY" +#define kDisplayGreenPointX "DisplayGreenPointX" +#define kDisplayGreenPointY "DisplayGreenPointY" +#define kDisplayBluePointX "DisplayBluePointX" +#define kDisplayBluePointY "DisplayBluePointY" +#define kDisplayWhiteGamma "DisplayWhiteGamma" +#define kDisplayRedGamma "DisplayRedGamma" +#define kDisplayGreenGamma "DisplayGreenGamma" +#define kDisplayBlueGamma "DisplayBlueGamma" + +// CFData +#define kDisplayCSProfile "DisplayCSProfile" + +// CFNumber +#define kDisplayHorizontalImageSize "DisplayHorizontalImageSize" +#define kDisplayVerticalImageSize "DisplayVerticalImageSize" + +// Display parameters + +#define kIODisplayParametersKey "IODisplayParameters" +#define kIODisplayGUIDKey "IODisplayGUID" + +#define kIODisplayValueKey "value" +#define kIODisplayMinValueKey "min" +#define kIODisplayMaxValueKey "max" + +#define kIODisplayBrightnessKey "brightness" +#define kIODisplayContrastKey "contrast" +#define kIODisplayHorizontalPositionKey "horizontal-position" +#define kIODisplayHorizontalSizeKey "horizontal-size" +#define kIODisplayVerticalPositionKey "vertical-position" +#define kIODisplayVerticalSizeKey "vertical-size" +#define kIODisplayTrapezoidKey "trapezoid" +#define kIODisplayPincushionKey "pincushion" +#define kIODisplayParallelogramKey "parallelogram" +#define kIODisplayRotationKey "rotation" +#define kIODisplayTheatreModeKey "theatre-mode" + +#define kIODisplayParametersCommitKey "commit" +#define kIODisplayParametersDefaultKey "defaults" + +#ifdef __cplusplus +} +#endif + +#endif /* ! _IOKIT_IOGRAPHICSTYPES_H */ diff --git a/iokit/IOKit/graphics/Makefile b/iokit/IOKit/graphics/Makefile new file mode 100644 index 000000000..c533af748 --- /dev/null +++ b/iokit/IOKit/graphics/Makefile @@ -0,0 +1,39 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = graphics +NOT_EXPORT_HEADERS = IOFramebufferPrivate.h + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = IOGraphicsEngine.h IOFramebufferShared.h \ + IOGraphicsTypes.h IOAccelTypes.h IOAccelClientConnect.h \ + IOAccelSurfaceConnect.h + +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/hidsystem/IOHIDDescriptorParser.h b/iokit/IOKit/hidsystem/IOHIDDescriptorParser.h new file mode 100644 index 000000000..8777daea8 --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIDDescriptorParser.h @@ -0,0 +1,932 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __IOHIDDescriptorParser__ +#define __IOHIDDescriptorParser__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Types and enums required by these functions but not in IOTypes.h */ + +typedef UInt8 Byte; +typedef SInt8 SignedByte; +typedef unsigned long FourCharCode; +typedef FourCharCode OSType; +typedef UInt32 ByteCount; + +enum { + noErr = 0 +}; + +/* End missing types and enums */ + +enum +{ + kHIDSuccess = 0, + +/* HID assigned error numbers are -13949 .. -13900 */ + kHIDBaseError = -13950, + + kHIDNullStateErr, + kHIDBufferTooSmallErr, + kHIDValueOutOfRangeErr, + kHIDUsageNotFoundErr, + kHIDNotValueArrayErr, + kHIDInvalidPreparsedDataErr, + kHIDIncompatibleReportErr, + kHIDBadLogPhysValuesErr, + kHIDInvalidReportTypeErr, + kHIDInvalidReportLengthErr, + kHIDNullPointerErr, + kHIDBadParameterErr, + kHIDNotEnoughMemoryErr, + kHIDEndOfDescriptorErr, + kHIDUsagePageZeroErr, + kHIDBadLogicalMinimumErr, + kHIDBadLogicalMaximumErr, + kHIDInvertedLogicalRangeErr, + kHIDInvertedPhysicalRangeErr, + kHIDUnmatchedUsageRangeErr, + kHIDInvertedUsageRangeErr, + kHIDUnmatchedStringRangeErr, + kHIDUnmatchedDesignatorRangeErr, + kHIDReportSizeZeroErr, + kHIDReportCountZeroErr, + kHIDReportIDZeroErr, + kHIDInvalidRangePageErr, + + // + // HID device driver errors + // + + kHIDDeviceNotReady = -13910, // The device is still initializing, try again later + kHIDVersionIncompatibleErr, +}; + +// types of HID reports (input, output, feature) +enum +{ + kHIDInputReport = 1, + kHIDOutputReport, + kHIDFeatureReport, + kHIDUnknownReport = 255 +}; + +// flags passed to HIDOpenReportDescriptor +enum +{ + kHIDFlag_StrictErrorChecking = 0x00000001 +}; + +typedef UInt32 HIDReportType; +typedef UInt32 HIDUsage; + +typedef void *HIDPreparsedDataRef; + +/*! + @typedef HIDUsageAndPage + @abstract The HIDUsageAndPage data structure is used by HID clients when obtaining status of buttons to hold the usage page and usage of a button that is down. + @discussion Clients use the HIDUSageAndPage structure with the HIDGetButtonsEx function to obtain both the usage page and usage identifiers of each button that is down. + @field usage Specifies the usage identifier within the usage page specified by usagePage of a button that is down. + @field usagePage Specifies the usage page identifier of a button that is down. + */ +struct HIDUsageAndPage +{ + HIDUsage usage; + HIDUsage usagePage; +}; +typedef struct HIDUsageAndPage HIDUsageAndPage, *HIDUsageAndPagePtr; + +/*! + @typedef HIDCaps + @abstract The HIDCaps data structure is used by HID clients to hold the capabilities of a HID device. + @discussion This structure holds the parsed capabilities and data maximums returned for a device by the HIDGetCaps function. + @field usage Specifies the specific class of functionality that this device provides. This value is dependent and specific to the value provided in the usagePage field. For example, a keyboard could have a usagePage of kHIDUsagePage_Generic and a usage of kHIDUsage_Generic_Keyboard. + @field usagePage Specifies the usage page identifier for this top level collection. + @field inputReportByteLength Specifies the maximum length, in bytes, of an input report for this device, including the report ID which is unilaterally prepended to the device data. + @field outputReportByteLength Specifies the maximum length, in bytes, of an output report for this device, including the report ID which is unilaterally prepended to the device data. + @field featureReportByteLength Specifies the maximum length, in bytes, of a feature report for this device, including the report ID which is unilaterally prepended to the device data. + @field numberCollectionNodes Specifies the number of HIDCollectionNode structures that are returned for this top level collection by the HIDGetConnectionNodes function. + @field numberInputButtonCaps Specifies the number of input buttons. + @field numberInputValueCaps Specifies the number of input values. + @field numberOutputButtonCaps Specifies the number of output buttons. + @field numberOutputValueCaps Specifies the number of output values + @field numberFeatureButtonCaps Specifies the number of feature buttons. + @field numberFeatureValueCaps Specifies the number of feature values. + */ +struct HIDCaps +{ + HIDUsage usage; + HIDUsage usagePage; + ByteCount inputReportByteLength; + ByteCount outputReportByteLength; + ByteCount featureReportByteLength; + UInt32 numberCollectionNodes; + UInt32 numberInputButtonCaps; + UInt32 numberInputValueCaps; + UInt32 numberOutputButtonCaps; + UInt32 numberOutputValueCaps; + UInt32 numberFeatureButtonCaps; + UInt32 numberFeatureValueCaps; +}; +typedef struct HIDCaps HIDCaps, * HIDCapsPtr; + +struct HIDCapabilities { + HIDUsage usage; + HIDUsage usagePage; + ByteCount inputReportByteLength; + ByteCount outputReportByteLength; + ByteCount featureReportByteLength; + UInt32 numberCollectionNodes; + UInt32 numberInputButtonCaps; + UInt32 numberInputValueCaps; + UInt32 numberOutputButtonCaps; + UInt32 numberOutputValueCaps; + UInt32 numberFeatureButtonCaps; + UInt32 numberFeatureValueCaps; +}; +typedef struct HIDCapabilities HIDCapabilities, * HIDCapabilitiesPtr; + + +struct HIDCollectionNode +{ + HIDUsage collectionUsage; + HIDUsage collectionUsagePage; + UInt32 parent; + UInt32 numberOfChildren; + UInt32 nextSibling; + UInt32 firstChild; +}; +typedef struct HIDCollectionNode HIDCollectionNode, * HIDCollectionNodePtr; + +struct HIDButtonCaps +{ + HIDUsage usagePage; + UInt32 reportID; + UInt32 bitField; + UInt32 collection; + HIDUsage collectionUsage; + HIDUsage collectionUsagePage; + Boolean isRange; + Boolean isStringRange; + Boolean isDesignatorRange; + Boolean isAbsolute; + SInt32 startBit; // Added esb 9-29-99 + + union + { + struct + { + HIDUsage usageMin; + HIDUsage usageMax; + UInt32 stringMin; + UInt32 stringMax; + UInt32 designatorMin; + UInt32 designatorMax; + } range; + struct + { + HIDUsage usage; + HIDUsage reserved1; + UInt32 stringIndex; + UInt32 reserved2; + UInt32 designatorIndex; + UInt32 reserved3; + } notRange; + } u; +}; +typedef struct HIDButtonCaps HIDButtonCaps, * HIDButtonCapsPtr; + +struct HIDButtonCapabilities +{ + HIDUsage usagePage; + UInt32 reportID; + UInt32 bitField; + UInt32 collection; + HIDUsage collectionUsage; + HIDUsage collectionUsagePage; + Boolean isRange; + Boolean isStringRange; + Boolean isDesignatorRange; + Boolean isAbsolute; + + SInt32 unitExponent; // Added KH 1/25/01 + SInt32 units; // Added KH 1/25/01 +// UInt32 reserved; // Not added KH 1/25/01 + SInt32 startBit; // Added esb 9-29-99 + UInt32 pbVersion; // Added KH 1/25/01 + + union + { + struct + { + HIDUsage usageMin; + HIDUsage usageMax; + UInt32 stringMin; + UInt32 stringMax; + UInt32 designatorMin; + UInt32 designatorMax; + } range; + struct + { + HIDUsage usage; + HIDUsage reserved1; + UInt32 stringIndex; + UInt32 reserved2; + UInt32 designatorIndex; + UInt32 reserved3; + } notRange; + } u; +}; +typedef struct HIDButtonCapabilities HIDButtonCapabilities, * HIDButtonCapabilitiesPtr; + +struct HIDValueCaps +{ + HIDUsage usagePage; + UInt32 reportID; + UInt32 bitField; + UInt32 collection; + HIDUsage collectionUsage; + HIDUsage collectionUsagePage; + + Boolean isRange; + Boolean isStringRange; + Boolean isDesignatorRange; + Boolean isAbsolute; + + UInt32 startBit; // Added by esb 9-28-99 + UInt32 bitSize; + UInt32 reportCount; + + SInt32 logicalMin; + SInt32 logicalMax; + SInt32 physicalMin; + SInt32 physicalMax; + + union + { + struct + { + HIDUsage usageMin; + HIDUsage usageMax; + UInt32 stringMin; + UInt32 stringMax; + UInt32 designatorMin; + UInt32 designatorMax; + } range; + struct + { + HIDUsage usage; + HIDUsage reserved1; + UInt32 stringIndex; + UInt32 reserved2; + UInt32 designatorIndex; + UInt32 reserved3; + } notRange; + } u; +}; +typedef struct HIDValueCaps HIDValueCaps, * HIDValueCapsPtr; + +struct HIDValueCapabilities +{ + HIDUsage usagePage; + UInt32 reportID; + UInt32 bitField; + UInt32 collection; + HIDUsage collectionUsage; + HIDUsage collectionUsagePage; + + Boolean isRange; + Boolean isStringRange; + Boolean isDesignatorRange; + Boolean isAbsolute; + + UInt32 bitSize; + UInt32 reportCount; + + SInt32 logicalMin; + SInt32 logicalMax; + SInt32 physicalMin; + SInt32 physicalMax; + + SInt32 unitExponent; // Added KH 1/25/01 + SInt32 units; // Added KH 1/25/01 +// UInt32 reserved; // Not added KH 1/25/01 + SInt32 startBit; // Added esb 9-29-99 // Moved here KH 1/25/01 + UInt32 pbVersion; // Added KH 1/25/01 + + union + { + struct + { + HIDUsage usageMin; + HIDUsage usageMax; + UInt32 stringMin; + UInt32 stringMax; + UInt32 designatorMin; + UInt32 designatorMax; + } range; + struct + { + HIDUsage usage; + HIDUsage reserved1; + UInt32 stringIndex; + UInt32 reserved2; + UInt32 designatorIndex; + UInt32 reserved3; + } notRange; + } u; +}; +typedef struct HIDValueCapabilities HIDValueCapabilities, * HIDValueCapabilitiesPtr; + +/*! + @function HIDOpenReportDescriptor + @abstract The HIDOpenReportDescriptor function allocates the memory the parser needs to handle the given report descriptor, and then parses the report descriptor. + @discussion When the parsed information is no longer needed, clients should call the HIDCloseReportDescriptor function. + @param hidReportDescriptor Contains a pointer to the actual HID report descriptor from the USB device's firmware + @param descriptorLength The length of the HID report descriptor + @param preparsedDataRef Preparsed data reference to be used for subsequent function calls + @param flags Flags for this runction are kHIDFlag_StrictErrorChecking = 0x00000001 + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDOpenReportDescriptor (void * hidReportDescriptor, + ByteCount descriptorLength, + HIDPreparsedDataRef * preparsedDataRef, + UInt32 flags); + +/*! + @function HIDCloseReportDescriptor + @abstract Disposes of the memory the parser allocated for the HIDOpenReportDescriptor function. + @param hidReportDescriptor Contains a pointer to the actual HID report descriptor from the USB device's firmware + @param preparsedDataRef Preparsed data reference for the report that is returned by the HIDOpenReportDescriptor function. After making a call to the HIDCloseReportDescriptor function, the preparsedDataRef is invalid and should not be used. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDCloseReportDescriptor (HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetButtonCaps + @abstract Returns the button capabilities structures for a HID device based on the given preparsed data. + @param reportType Specifies the type of report for which to retrieve the scaled value. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport + @param buttonCaps Points to a caller-allocated buffer that will contain, on return, an array of HIDButtonCaps structures. The structures contain information for all buttons that meet the search criteria + @param buttonCapsSize Contains the size of the buttonCaps array passed in to the function and is set to the number of elements actually placed in the array after the call completes. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetButtonCaps (HIDReportType reportType, + HIDButtonCapsPtr buttonCaps, + UInt32 * buttonCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetButtonCapabilities + @abstract Returns the button capabilities structures for a HID device based on the given preparsed data. + @param reportType Specifies the type of report for which to retrieve the scaled value. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport + @param buttonCaps Points to a caller-allocated buffer that will contain, on return, an array of HIDButtonCapabilities structures. The structures contain information for all buttons that meet the search criteria + @param buttonCapsSize Contains the size of the buttonCaps array passed in to the function and is set to the number of elements actually placed in the array after the call completes. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetButtonCapabilities (HIDReportType reportType, + HIDButtonCapabilitiesPtr buttonCaps, + UInt32 * buttonCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetCaps + @abstract Returns the capabilities of a HID device based on the given preparsed data. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param capabilities Points to a caller allocated buffer, that upon return contains the parsed capability information for this HID device. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetCaps (HIDPreparsedDataRef preparsedDataRef, + HIDCapsPtr capabilities); + +/*! + @function HIDGetCapabilities + @abstract Returns the capabilities of a HID device based on the given preparsed data. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param capabilities Points to a caller allocated buffer, that upon return contains the parsed capability information for this HID device. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetCapabilities (HIDPreparsedDataRef preparsedDataRef, + HIDCapabilitiesPtr capabilities); + +/*! + @function HIDGetCollectionNodes + @abstract Returns an array of HIDCollectionNode structures that describe the relationships and layout of the link collections within this top level collection. + @discussion The length of the buffer required, in array elements, for an entire collection node array is found in the HIDCaps structure member numberCollectionNodes. You obtain the HIDCaps information by calling the HIDGetCaps function. For information on the relationships of link collections described by the data returned from this routine, see the descripton of the HIDCollectionNode structure. + @param collectionNodes Points to a caller-allocated array of HIDCollectionNode structures in which this routine returns an entry for each collection within the top level collection. A collection is a group of corresponding HID descriptors containing input, output, and feature items that have some common relationship to one another. For example, a pointer collection contains items for x and y position data, and button data. + @param collectionNodesSize On input, specifies the length in array elements of the buffer provided at collectionNodes. On output, this parameter is set to the number of entries in the collectionNodes array that were initialized. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetCollectionNodes (HIDCollectionNodePtr collectionNodes, + UInt32 * collectionNodesSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetScaledUsageValue + @abstract The HIDGetScaledUsageValue function returns the capabilities for all buttons for a given top level collection. + @discussion Clients who which to obtain all capabilities for a usage that contains multiple data items for a single usage that corresponds to a HID byte array, must call the HIDGetUsageValueArray function. + @param reportType Specifies the type of report for which to retrieve the scaled value. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page of the value to be retrieved. + @param collection Optionally specifies the link collection identifier of the value to be retrieved. + @param usage Specifies the usage of the scaled value to be retrieved. + @param usageValue Points to a variable, that on return from this routine holds the scaled value retrieved from the device report. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data + @param reportLength Specifies the length, in bytes, of the report data provided at report + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetScaledUsageValue (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + SInt32 * usageValue, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDGetSpecificButtonCaps + @abstract Retrieves the capabilities for all buttons in a specific type of report that meet the search criteria. + @discussion The HIDGetSpecificButtonCaps function retrieves capability data for buttons that meet a given search criteria, as opposed to the HIDGetButtonCaps function which returns the capability data for all buttons on the device. Calling this routine specifying zero for usagePage, usage and collection is equivalent to calling the HIDGetButtonCaps function. + @param reportType Specifies the type of report for which to retrieve the button capabilities. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies a usage page identifier to use as a search criteria. If this parameter is non-zero, then only buttons that specify this usage page will be retrieved. + @param collection Specifies a link collection identifier to use as a search criteria. If this parameter is non-zero, then only buttons that are part of the specified link collection are retrieved. + @param usage Specifies a usage identifier to use as a search criteria. If this parameter is non-zero, then only buttons that match the value specified are retrieved. + @param buttonCaps Points to a caller-allocated buffer that will contain, on return, an array of HIDButtonCaps structures. The structures contain information for all buttons that meet the search criteria. + @param buttonCapsLength On input, specifies the length, in array elements, of the buffer provided in the buttonCaps parameter. On output, this parameter is set to the actual number of elements that were returned by the function call, in the buffer provided in the buttonCaps parameter, if the routine completed without error. The correct length necessary to retrieve the button capabilities can be found in the capability data returned for the device by the HIDGetCaps function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetSpecificButtonCaps (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + HIDButtonCapsPtr buttonCaps, + UInt32 * buttonCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetSpecificButtonCapabilities + @abstract Retrieves the capabilities for all buttons in a specific type of report that meet the search criteria. + @discussion The HIDGetSpecificButtonCapabilities function retrieves capability data for buttons that meet a given search criteria, as opposed to the HIDGetButtonCapabilities function which returns the capability data for all buttons on the device. Calling this routine specifying zero for usagePage, usage and collection is equivalent to calling the HIDGetButtonCapabilities function. + @param reportType Specifies the type of report for which to retrieve the button capabilities. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies a usage page identifier to use as a search criteria. If this parameter is non-zero, then only buttons that specify this usage page will be retrieved. + @param collection Specifies a link collection identifier to use as a search criteria. If this parameter is non-zero, then only buttons that are part of the specified link collection are retrieved. + @param usage Specifies a usage identifier to use as a search criteria. If this parameter is non-zero, then only buttons that match the value specified are retrieved. + @param buttonCaps Points to a caller-allocated buffer that will contain, on return, an array of HIDButtonCapabilities structures. The structures contain information for all buttons that meet the search criteria. + @param buttonCapsLength On input, specifies the length, in array elements, of the buffer provided in the buttonCaps parameter. On output, this parameter is set to the actual number of elements that were returned by the function call, in the buffer provided in the buttonCaps parameter, if the routine completed without error. The correct length necessary to retrieve the button capabilities can be found in the capability data returned for the device by the HIDGetCaps function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetSpecificButtonCapabilities (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + HIDButtonCapabilitiesPtr buttonCaps, + UInt32 * buttonCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetSpecificValueCaps + @abstract Retrieves the capabilities for all values in a specific type of report that meet the search criteria. + @discussion The HIDGetSpecificValueCaps function retrieves capability data for values that meet given search criteria, as opposed to the HIDGetValueCaps function, which returns the capability data for all values on the device. Calling this routine with a value of zero for usagePage, usage and collection parameters is equivalent to calling the HIDGetValueCaps function. + @param reportType Specifies the type of report for which to retrieve the value capabilities. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport or kHIDFeatureReport. + @param usagePage Specifies a usage page identifier to use as a search criteria. If this parameter is non-zero, then only values that specify this usage page will be retrieved. + @param collection Specifies a link collection identifier to use as a search criteria. If this parameter is non-zero, then only values that are part of this link collection will be retrieved. + @param usage Specifies a usage identifier to use as a search criteria. If this parameter is non-zero, then only values that specify this usage will be retrieved. + @param valueCaps Points to a caller-allocated buffer that will contain, on return, an array of HIDValueCaps structures that contain information for all values that meet the search criteria. + @param valueCapsSize Specifies the length on input, in array elements, of the buffer provided in the valueCaps parameter. On output, this parameter is set to the actual number of elements that were returned by this function call, in the buffer provided in the valueCaps parameter, if the routine completed without error. The correct length necessary to retrieve the value capabilities can be found in the capability data returned for the device from the HIDGetCaps function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetSpecificValueCaps (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + HIDValueCapsPtr valueCaps, + UInt32 * valueCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetSpecificValueCapabilities + @abstract Retrieves the capabilities for all values in a specific type of report that meet the search criteria. + @discussion The HIDGetSpecificValueCapabilities function retrieves capability data for values that meet given search criteria, as opposed to the HIDGetValueCapabilities function, which returns the capability data for all values on the device. Calling this routine with a value of zero for usagePage, usage and collection parameters is equivalent to calling the HIDGetValueCapabilities function. + @param reportType Specifies the type of report for which to retrieve the value capabilities. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport or kHIDFeatureReport. + @param usagePage Specifies a usage page identifier to use as a search criteria. If this parameter is non-zero, then only values that specify this usage page will be retrieved. + @param collection Specifies a link collection identifier to use as a search criteria. If this parameter is non-zero, then only values that are part of this link collection will be retrieved. + @param usage Specifies a usage identifier to use as a search criteria. If this parameter is non-zero, then only values that specify this usage will be retrieved. + @param valueCaps Points to a caller-allocated buffer that will contain, on return, an array of HIDValueCapabilities structures that contain information for all values that meet the search criteria. + @param valueCapsSize Specifies the length on input, in array elements, of the buffer provided in the valueCaps parameter. On output, this parameter is set to the actual number of elements that were returned by this function call, in the buffer provided in the valueCaps parameter, if the routine completed without error. The correct length necessary to retrieve the value capabilities can be found in the capability data returned for the device from the HIDGetCaps function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetSpecificValueCapabilities (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + HIDValueCapabilitiesPtr valueCaps, + UInt32 * valueCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetButtonsOnPage + @abstract Retrieves the button stat information for buttons on a specified usage page. + @param reportType Specifies the type of report, provided in the report parameter, from which to retrieve the buttons. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport or kHIDFeatureReport. + @param usagePage Specifies the usage page of the buttons for which to retrieve the current state. + @param collection Optionally specifies the link collection identifier used to retrieve only specific button states. If this value is non-zero, only the buttons that are part of the given collection are returned. + @param usageList On return, points to a caller-allocated buffer that contains the usages of all the buttons that are perssed and belong to the usage page specified in the usagePage parameter. + @param usageListSize Is the size, in array elements, of the buffer provided in the usageList parameter. On return, this parameter contains the number of button states that were set by this routine. If the error kHIDBufferTooSmallErr was returned, this parameter contains the number of array elements required to hold all button data requested. The maximum number of buttons that can ever be returned for a given type of report can be obtained by calling the HIDMaxUsageListLength function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data + @param reportLength Specifies the size, in bytes, of the report data provided in the report parameter + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetButtonsOnPage (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage * usageList, + UInt32 * usageListSize, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDGetButtons + @abstract The HIDGetButtons function takes a report from a HID device and gets the current state of the buttons in that report. + @param reportType Specifies the type of report, provided in the report parameter, from which to retrieve the buttons. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport or kHIDFeatureReport + @param collection Optionally specifies the link collection identifier used to retrieve only specific button states. If this value is non-zero, only the buttons that are part of the given collection are returned. + @param usageList On return, points to a caller-allocated buffer that contains the usages of all the buttons that are pressed. + @param usageListSize Is the size, in array elements, of the buffer provided in the usageList parameter. On return, this parameter contains the number of button states that were set by this routine. If the error kHIDBufferToSmallErr was returned, this parameter contains the number of array elements required to hold all button data requested. The maximum number of buttons that can ever be returned for a given type of report can be obtained by calling the HIDMaxUsageListLength function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param reportLength Specifies the length, in bytes, of the report data provided in the report parameter. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetButtons (HIDReportType reportType, + UInt32 collection, + HIDUsageAndPagePtr usageList, + UInt32 * usageListSize, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +extern +OSStatus +HIDGetNextButtonInfo (HIDReportType reportType, + HIDUsage usagePage, + HIDUsage usage, + UInt32 * collection, + UInt8 * reportID, + HIDPreparsedDataRef preparsedDataRef); + +extern +OSStatus +HIDGetNextUsageValueInfo (HIDReportType reportType, + HIDUsage usagePage, + HIDUsage usage, + UInt32 * collection, + UInt8 * reportID, + HIDPreparsedDataRef preparsedDataRef); + +extern +OSStatus +HIDGetReportLength (HIDReportType reportType, + UInt8 reportID, + ByteCount * reportLength, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetUsageValue + @abstract The HIDGetUsageValue function returns a value from a device data report given a selected search criteria. + @discussion The HIDGetUsageValue function does not sign the value. To have the sign bit automatically applied, use the HIDGetScaledUsageValue function instead. For manually assigning the sign bit, the position of the sign bit can be found in the HIDValueCaps structure for this value. Clients who wish to obtain all data for a usage that contains multiple data items for a single usage, corresponding to a HID byte array, must call the HIDGetUsageValueArray function instead. + @param reportType Specifies the type of report, provided in report, from which to retrieve the value. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page of the value to retrieve. + @param collection Optionally specifies the link collection identifier of the value to be retrieved. + @param usage Specifies the usage of the value to be retrieved. + @param usageValue Points to a variable, that on return from this routine holds the value retrieved from the device report. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param reportLength Specifies the size, in bytes, of the report data provided in the report parameter. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetUsageValue (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + SInt32 * usageValue, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDGetUsageValueArray + @abstract The HIDGetUsageValueArray function returns a value from a device data report given a selected search criteria. + @discussion When the HIDGetUsageValueArray function retrieves the data, it fills in the buffer in little-endian order beginning with the least significant bit of the data for this usage. The data is filled in without regard to byte alignment and is shifted such that the least significant bit is placed as the 1st bit of the given buffer. + @param reportType Specifies the type of report, provided in report, from which to retrieve the value. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page of the data to be retrieved. + @param collection Optionally specifies the link collection identifier of the data to be retrieved. + @param usage Specifies the usage identifier of the value to be retrieved. + @param usageValueBuffer Points to a caller-allocated buffer that contains, on output, the data from the device. The correct length for this buffer can be found by multiplying the reportCount and bitSize fields of the HIDValueCaps structure for the value and rounding the resulting value up to the nearest byte. + @param usageValueBufferSize Specifies the size, in bytes, of the buffer in the usageValueBuffer parameter. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param reportLength Specifies the size, in bytes, of the report data provided in report. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetUsageValueArray (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + Byte * usageValueBuffer, + ByteCount usageValueBufferSize, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDGetValueCaps + @abstract The HIDGetValueCaps function retrieves the capabilities for all values for a specified top level collection. + @discussion The HIDGetValueCaps function retrieves the capability data for all values in a top level collection without regard for the usage, usage page or collection of the value. To retrieve value capabilities for a specific usage, usage page or collection, use the HIDGetSpecificValueCaps function. + @param reportType Specifies the type of report for which to retrieve the value capabilities. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param valueCaps On return, points to a caller-allocated buffer that contains an array of HIDValueCaps structures containing information for all values in the top level collection. + @param valueCapsSize On input, specifies the size in array elements of the buffer provided in the valueCaps parameter. On output, this parameter is set to the actual number of elements that were returned in the buffer provided in the valueCaps parameter, if the function completed without error. The correct length necessary to retrieve the value capabilities can be found in the capability data returned for the device by the HIDGetCaps function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetValueCaps (HIDReportType reportType, + HIDValueCapsPtr valueCaps, + UInt32 * valueCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDGetValueCapabilities + @abstract The HIDGetValueCapabilities function retrieves the capabilities for all values for a specified top level collection. + @discussion The HIDGetValueCapabilities function retrieves the capability data for all values in a top level collection without regard for the usage, usage page or collection of the value. To retrieve value capabilities for a specific usage, usage page or collection, use the HIDGetSpecificValueCapabilities function. + @param reportType Specifies the type of report for which to retrieve the value capabilities. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param valueCaps On return, points to a caller-allocated buffer that contains an array of HIDValueCapabilities structures containing information for all values in the top level collection. + @param valueCapsSize On input, specifies the size in array elements of the buffer provided in the valueCaps parameter. On output, this parameter is set to the actual number of elements that were returned in the buffer provided in the valueCaps parameter, if the function completed without error. The correct length necessary to retrieve the value capabilities can be found in the capability data returned for the device by the HIDGetCapabilities function. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDGetValueCapabilities (HIDReportType reportType, + HIDValueCapabilitiesPtr valueCaps, + UInt32 * valueCapsSize, + HIDPreparsedDataRef preparsedDataRef); + +extern +OSStatus +HIDInitReport (HIDReportType reportType, + UInt8 reportID, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDMaxUsageListLength + @abstract The HIDMaxUsageListLength function returns the maximum number of buttons that can be returned from a given report type for the top level collection. + @param reportType Specifies the type of report for which to get a maximum usage count. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Optionally specifies the usage page identifier to use as a search criteria. If this parameter is zero, the function returns the number of buttons for the entire top-level collection regardless of the actual value of the usage page. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +UInt32 +HIDMaxUsageListLength (HIDReportType reportType, + HIDUsage usagePage, + HIDPreparsedDataRef preparsedDataRef); + +/*! + @function HIDSetScaledUsageValue + @abstract The HIDSetScaledUsageValue function takes a signed physical (scaled) number and converts it to the logical, or device representation and inserts it in a given report. + @discussion The HIDSetScaledUsageValue function automatically handles the setting of the signed bit in the data to be sent to the device. + @param reportType Specifies the type of report. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page identifier of the value to be set in the report. + @param collection Optionally specifies the link collection identifier to distinguish between values that have the same usage page and usage identifiers. If this parameter is zero, it will be ignored. + @param usage Specifies the usage identifier of the value to be set in the report. + @param usageValue Specifies the physical, or scaled, value to be set in the value for the given report. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param Specifies the length, in bytes of the report data specified in the report parameter. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDSetScaledUsageValue (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + SInt32 usageValue, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDSetButtons + @abstract The HIDSetButtons function takes a report from a HID device and returns the current state of the buttons in that report. + @param reportType Specifies the type of repor. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page identifier of the value to be set in the report. + @param collection Optionally specifies the link collection identifier to distinguish between buttons. If this parameter is zero, it is ignored. + @param usageList Points to a caller-allocated buffer that contains an array of button data to be set in the report in the report parameter. + @param usageListSize Specifies the size, in array elements, of the buffer provided in the usageList parameter. If an error is returned by a call to this function, the usageListLength parameter contains the location in the array provided in the usageList parameter where the error was encountered. All array entries encountered prior to the error location were successfully set in the report provided in the report parameter. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param reportLength Specifies the size, in bytes, of the report data provided in the report parameter. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDSetButtons (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage * usageList, + UInt32 * usageListSize, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDSetUsageValue + @abstract The HIDSetUsageValue function sets a value in a give report. + @discussion The HIDSetUsageVlaue function does not automatically handle the sign bit. Clients must either manually set the sign bit, at the position provided in the HIDValueCaps structure for this value, or call the HIDSetScaledUsageValue function. + @param reportType Specifies the type of report. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page identifier of the value to be set in the report. + @param collection Optionally specifies the link collection identifier to distinguish between values that have the same usage page and usage identifiers. If this parameter is zero, it is ignored. + @param usage Specifies the usage identifier of the value to be set in the report. + @param usageValue Specifies the data that is to be set in the value for the given report. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param reportLength Specifies the size, in bytes, of the report data provided in the report parameter. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDSetUsageValue (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + SInt32 usageValue, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDSetUsageValueArray + @abstract The HIDSetUsageValueArray function sets an array of values in a given report. + @discussion The HIDSetUsageValue function does not automatically handle the sign bit. Clients must either manually set the sign bit, at the position provided in the HIDValueCaps structure for this value, or call the HIDSetScaledUsageValue function. + @param reportType Specifies the type of report. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page identifier of the value to be set in the report. + @param collection Optionally specifies the link collection identifier to distinguish between values that have the same usage page and usage identifiers. If this parameter is zero, it is ignored. + @param usage Specifies the usage identifier of the value to be set in the report. + @param usageValueBuffer Points to a caller-allocated buffer that contains, on output, the data from the device. The correct length for this buffer can be found by multiplying the reportCount and bitSize fields of the HIDValueCaps structure for this value and rounding the resulting value up to the nearest byte. + @param usageValueBufferLength Specifies the size, in bytes, of the buffer in the usageValueBuffer parameter. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param reportLength Specifies the size, in bytes, of the report data provided in the report parameter. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDSetUsageValueArray (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + Byte * usageValueBuffer, + ByteCount usageValueBufferLength, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + +/*! + @function HIDUsageListDifference + @abstract The HIDUsageListDifference function compares and provides the differences between two lists of buttons. + @param previousUsageList Points to the older button list to be used for comparison. + @param currentUsageList Points to the newer button list to be used for comparison. + @param breakUsageList On return, points to a caller-allocated buffer that contains the buttons set in the older list, specified in the previousUsageList parameter, but not set in the new list, specified in the currentUsageList parameter. + @param makeUsageList On return, points to a caller-allocated buffer that contains the buttons set in the new list, specified in the currentUsageList parameter, but not set in the old list, specified in the previousUsageList parameter. + @param usageListsLength Specifies the length, in array elements, of the buffers provided in the currentUsageList and previousUssageList parameters. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDUsageListDifference (HIDUsage * previousUsageList, + HIDUsage * currentUsageList, + HIDUsage * breakUsageList, + HIDUsage * makeUsageList, + UInt32 usageListsSize); + +/*! + @function HIDSetButton + @abstract The HIDSetButton function takes a report from a HID device and sets the current state of the specified button in that report. + @param reportType Specifies the type of report. This parameter must be one of the following: kHIDInputReport, kHIDOutputReport, or kHIDFeatureReport. + @param usagePage Specifies the usage page identifier of the value to be set in the report. + @param collection Optionally specifies the link collection identifier to distinguish between buttons. If this parameter is zero, it is ignored. + @param usage Points to a caller-allocated buffer that contains the button data to be set in the report in the report parameter. + @param preparsedDataRef Preparsed data reference for the report that is retuned by the HIDOpenReportDescriptor function + @param report Points to the caller-allocated buffer that contains the device report data. + @param reportLength Specifies the size, in bytes, of the report data provided in the report parameter. + @result OSStatus Returns an error code if an error was encountered or noErr on success. + */ + +extern +OSStatus +HIDSetButton (HIDReportType reportType, + HIDUsage usagePage, + UInt32 collection, + HIDUsage usage, + HIDPreparsedDataRef preparsedDataRef, + void * report, + ByteCount reportLength); + + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/iokit/IOKit/hidsystem/IOHIDParameter.h b/iokit/IOKit/hidsystem/IOHIDParameter.h new file mode 100644 index 000000000..bb806e679 --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIDParameter.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * evsio.h - Get/Set parameter calls for Event Status Driver. + * + * CAUTION: Developers should stick to the API exported in + * to guarantee + * binary compatability of their applications in future + * releases. + * + * HISTORY + * 22 May 1992 Mike Paquette at NeXT + * Created. + */ +#ifndef _DEV_EVSIO_H +#define _DEV_EVSIO_H + +/* Public type definitions. */ +#include +#include + +/* + * Identify this driver as one that uses the new driverkit and messaging API + */ +#ifndef _NeXT_MACH_EVENT_DRIVER_ +#define _NeXT_MACH_EVENT_DRIVER_ (1) +#endif /* !_NeXT_MACH_EVENT_DRIVER_ */ + +/* * */ + +#define kIOHIDKindKey "HIDKind" +#define kIOHIDInterfaceIDKey "HIDInterfaceID" +#define kIOHIDSubinterfaceIDKey "HIDSubinterfaceID" + +#define kIOHIDKeyRepeatKey "HIDKeyRepeat" +#define kIOHIDInitialKeyRepeatKey "HIDInitialKeyRepeat" +#define kIOHIDKeyMappingKey "HIDKeyMapping" +#define kIOHIDResetKeyboardKey "HIDResetKeyboard" + +#define kIOHIDPointerResolutionKey "HIDPointerResolution" +#define kIOHIDPointerAccelerationKey "HIDPointerAcceleration" +#define kIOHIDResetPointerKey "HIDResetPointer" +#define kIOHIDPointerConvertAbsoluteKey "HIDPointerConvertAbsolute" +#define kIOHIDPointerContactToMoveKey "HIDPointerContactToMove" +#define kIOHIDPointerPressureToClickKey "HIDPointerPressureToClick" + +#define kIOHIDClickTimeKey "HIDClickTime" +#define kIOHIDClickSpaceKey "HIDClickSpace" + +#define kIOHIDAutoDimThresholdKey "HIDAutoDimThreshold" +#define kIOHIDAutoDimStateKey "HIDAutoDimState" +#define kIOHIDAutoDimTimeKey "HIDAutoDimTime" +#define kIOHIDIdleTimeKey "HIDIdleTime" + +#define kIOHIDBrightnessKey "HIDBrightness" +#define kIOHIDAutoDimBrightnessKey "HIDAutoDimBrightness" + +#ifdef _undef +#define EVS_PREFIX "Evs_" /* All EVS calls start with this string */ + +/* WaitCursor-related ioctls */ + +#define EVSIOSWT "Evs_SetWaitThreshold" +#define EVSIOSWT_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOSWS "Evs_SetWaitSustain" +#define EVSIOSWS_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOSWFI "Evs_SetWaitFrameInterval" +#define EVSIOSWFI_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOCWINFO "Evs_CurrentWaitCursorInfo" +#define EVSIOCWINFO_THRESH 0 +#define EVSIOCWINFO_SUSTAIN (EVSIOCWINFO_THRESH + EVS_PACKED_TIME_SIZE) +#define EVSIOCWINFO_FINTERVAL (EVSIOCWINFO_SUSTAIN + EVS_PACKED_TIME_SIZE) +#define EVSIOCWINFO_SIZE (EVSIOCWINFO_FINTERVAL + EVS_PACKED_TIME_SIZE) +#endif + +#define EVS_PACKED_TIME_SIZE (sizeof(UInt64) / sizeof( unsigned int)) + +/* Device control ioctls. Levels specified may be in the range 0 - 64. */ + +#define EVSIOSB kIOHIDBrightnessKey +#define EVSIOSB_SIZE 1 + +#define EVSIOSADB kIOHIDAutoDimBrightnessKey +#define EVSIOSADB_SIZE 1 + +#ifdef _undef +#define EVSIOSA "Evs_SetAttenuation" +#define EVIOSA_SIZE 1 + +#define EVSIO_DCTLINFO "Evs_DeviceControlInfo" +typedef enum { + EVSIO_DCTLINFO_BRIGHT, + EVSIO_DCTLINFO_ATTEN, + EVSIO_DCTLINFO_AUTODIMBRIGHT +} evsio_DCTLINFOIndices; +#define EVSIO_DCTLINFO_SIZE (EVSIO_DCTLINFO_AUTODIMBRIGHT + 1) +#endif + +/* + * Device status request + */ +#define EVSIOINFO NX_EVS_DEVICE_INFO + + +/* Keyboard-related ioctls - implemented within Event Sources */ + +#define EVSIOSKR kIOHIDKeyRepeatKey +#define EVSIOSKR_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOSIKR kIOHIDInitialKeyRepeatKey +#define EVSIOSIKR_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIORKBD kIOHIDResetKeyboardKey +#define EVSIORKBD_SIZE 1 + +#define EVSIOCKR_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOCKML kIOHIDKeyMappingKey +#define EVSIOCKML_SIZE 1 + +/* The following two tokens are for use with the get/set character routines. */ +#define EVSIOSKM kIOHIDKeyMappingKey +#define EVSIOSKM_SIZE 4096 + +#define EVSIOCKM kIOHIDKeyMappingKey +#define EVSIOCKM_SIZE 4096 + +/* Mouse-related ioctls - implemented within Event Sources */ + +#define EVSIOSMS kIOHIDPointerAccelerationKey +#define EVSIOSMS_SIZE (1) + +#define EVSIOCMS kIOHIDPointerAccelerationKey +#define EVSIOCMS_SIZE (1) + +#ifdef _undef +#define EVSIOSMH "Evs_SetMouseHandedness" +#define EVSIOSMH_SIZE 1 // value from NXMouseButton enum + +#define EVSIOCMH "Evs_CurrentMouseHandedness" +#define EVSIOCMH_SIZE 1 +#endif + +/* Generic pointer device controls, implemented by the Event Driver. */ +#define EVSIOSCT kIOHIDClickTimeKey +#define EVSIOSCT_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOSCS kIOHIDClickSpaceKey +typedef enum { + EVSIOSCS_X, + EVSIOSCS_Y +} evsioEVSIOSCSIndices; +#define EVSIOSCS_SIZE (EVSIOSCS_Y + 1) + +#define EVSIOSADT kIOHIDAutoDimThresholdKey +#define EVSIOSADT_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOSADS kIOHIDAutoDimStateKey +#define EVSIOSADS_SIZE 1 + +#define EVSIORMS kIOHIDResetPointerKey +#define EVSIORMS_SIZE 1 + +#define EVSIOCCT kIOHIDClickTimeKey +#define EVSIOCCT_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOCADT kIOHIDAutoDimThresholdKey +#define EVSIOCADT_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOGDADT kIOHIDAutoDimTimeKey +#define EVSIOGDADT_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOIDLE kIOHIDIdleTimeKey +#define EVSIOIDLE_SIZE EVS_PACKED_TIME_SIZE + +#define EVSIOCCS kIOHIDClickSpaceKey +typedef enum { + EVSIOCCS_X, + EVSIOCCS_Y +} evsioEVSIOCCSIndices; +#define EVSIOCCS_SIZE (EVSIOCCS_Y + 1) + +#define EVSIOCADS kIOHIDAutoDimStateKey +#define EVSIOCADS_SIZE 1 + +#endif /* !_DEV_EVSIO_H */ diff --git a/iokit/IOKit/hidsystem/IOHIDShared.h b/iokit/IOKit/hidsystem/IOHIDShared.h new file mode 100644 index 000000000..e1f5325ae --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIDShared.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/****************************************************************************** + + evio.h + Ioctl calls for the events driver + Leovitch 02Jan88 + + Copyright 1988 NeXT, Inc. + + CAUTION: Developers should stick to the API exported in + to guarantee + binary compatability of their applications in future + releases. + + Modified: + + 09Dec88 Leo Broken out from evsio.h + 24Aug89 Ted ANSI function prototyping. + 19Feb90 Ted Major revision for multiple driver support. + 26Feb90 Ted New evioScreen structure and EVIOST ioctl. + 12Mar90 Ted New ev_unregister_screen function, SCREENTOKEN constant. + 06May90 Ted Added AALastEventSent and AALastEventConsumed to EvVars. + 22May90 Trey More wait cursor vars in EvVars. + 13Jun90 Ted NXCursorData structure. + 18Jun90 Ted Default wait cursor constants. + 26Sep90 Ted Enhanced cursor system to support intelligent drivers. + 26Nov90 Ted Removed NXSaveCursor and NXCursorData structures + 28Nov90 Ted Remove EvVars, rolled into EventGlobals + 28Nov90 Ted Renamed EventGlobals -> EvGlobals, eventGlobals -> evg + 05May92 Mike Reworked for NRW driver architecture. + +******************************************************************************/ + +#ifndef _DEV_EVIO_H +#define _DEV_EVIO_H + +#include + +__BEGIN_DECLS + +#if KERNEL +#include +#else /* !KERNEL */ +#include +#include +#endif /* KERNEL */ + +#include +#include +#include +#include + +/* + * Identify this driver as one that uses the new driverkit and messaging API + */ +#ifndef _NeXT_MACH_EVENT_DRIVER_ +#define _NeXT_MACH_EVENT_DRIVER_ (1) +#endif /* _NeXT_MACH_EVENT_DRIVER_ */ + + +/* Pressure Constants */ +#define MINPRESSURE EV_MINPRESSURE +#define MAXPRESSURE EV_MAXPRESSURE + +#define LLEQSIZE 80 /* Entries in low-level event queue */ + +typedef struct _NXEQElStruct { + int next; /* Slot of lleq for next event */ + ev_lock_data_t sema; /* Is high-level code reading this event now? */ + NXEvent event; /* The event itself */ +} NXEQElement; + + +/****************************************************************************** + SHARED MEMORY OVERVIEW + + PERSPECTIVE + The ev driver and PostScript share at least one page of wired memory. + This memory contains the low-level event queue which ev deposits events + into and PostScript reads events from. Also, this memory contains other + important data such as wait cursor state and some general cursor state. + This memory is critical for speed. That is, we avoid having to make + system calls for common operations. + + SHARED MEMORY REGIONS + There are currently three "regions" or "zones" delineated within this + shared memory. The first zone is the EvOffsets structure. This structure + contains two offsets from the beginning of shared memory. The first offset + is to the second zone, EvGlobals. The second offset is to the third + zone, private shmem for drivers. + + INITIALIZATION OF SHARED MEMORY + When the WindowServer starts up, it finds all screens that will be active. + It then opens the ev driver and calls the EVIOSSCR ioctl repeatedly for + each screen in use. This lets the ev driver set up the evScreen array + and fill in each element. This ioctl also returns to PostScript a running + total shared memory size with which to allocate. PostScript then allocates + a region of memory this size and calls evmmap to "map in" this shared + region. Evmmap initializes and fills in the EvOffsets and EvGlobals. + Next the WindowServer calls each screen in turn to register itself with + the ev driver in the same sequence as presented to EVIOSSCR. Each screen + driver calls ev_register_screen() which among other things allocates a + part of the private shmem (of the third shared memory zone) for the driver. + + DEBUGGING NOTES + You can easily display and set this shared memory from kgdb, but usually + cannot do so from within PostScript. Gdb (or some weird interaction + between gdb and the os) chokes on this shmem. So if you read or write + this area of memory, copy-on-write will occur and you'll get a completely + new page for PostScript. This will render the shared memory scheme + useless and you will have to restart PostScript. It was my understanding + that before, we were able to "read" this area from PS, but not write to + it (the idea behind copy-on-WRITE). However, this seems to be broken + in 2.0. We think this is a kernel bug. +******************************************************************************/ + +typedef volatile struct _evOffsets { + int evGlobalsOffset; /* Offset to EvGlobals structure */ + int evShmemOffset; /* Offset to private shmem regions */ +} EvOffsets; + +/****************************************************************************** + EvGlobals + This structures defines the portion of the events driver data structure + that is exported to the PostScript server. It contains the event queue + which is in memory shared between the driver and the PostScript server. + All the variables necessary to read and process events from the queue are + contained here. +******************************************************************************/ + +typedef volatile struct _evGlobals { + ev_lock_data_t cursorSema; /* set to disable periodic code */ + int LLEHead; /* The next event to be read */ + int LLETail; /* Where the next event will go */ + int LLELast; /* The last event entered */ + int eNum; /* Unique id for mouse events */ + int buttons; /* State of the mouse buttons 1==down, 0==up */ + int eventFlags; /* The current value of event.flags */ + int VertRetraceClock; /* The current value of event.time */ + IOGPoint cursorLoc; /* The current location of the cursor */ + int frame; /* current cursor frame */ + IOGBounds workBounds; /* bounding box of all screens */ + IOGBounds mouseRect; /* Rect for mouse-exited events */ + int version; /* for run time checks */ + int structSize; /* for run time checks */ + unsigned int reservedA[32]; + + unsigned reserved:27; + unsigned wantPressure:1; /* pressure in current mouseRect? */ + unsigned wantPrecision:1; /* precise coordinates in current mouseRect? */ + unsigned dontWantCoalesce:1;/* coalesce within the current mouseRect? */ + unsigned dontCoalesce:1; /* actual flag which determines coalescing */ + unsigned mouseRectValid:1; /* If nonzero, post a mouse-exited + whenever mouse outside mouseRect. */ + int movedMask; /* This contains an event mask for the + three events MOUSEMOVED, + LMOUSEDRAGGED, and RMOUSEDRAGGED. + It says whether driver should + generate those events. */ + int AALastEventSent; /* timestamp for wait cursor */ + int AALastEventConsumed; /* timestamp for wait cursor */ + ev_lock_data_t waitCursorSema; /* protects wait cursor fields */ + int waitCursorUp; /* Is wait cursor up? */ + char ctxtTimedOut; /* Has wait cursor timer expired? */ + char waitCursorEnabled; /* Play wait cursor game (per ctxt)? */ + char globalWaitCursorEnabled; /* Play wait cursor game (global)? */ + int waitThreshold; /* time before wait cursor appears */ + NXEQElement lleq[LLEQSIZE]; /* The event queue itself */ +} EvGlobals; + + +/* These evio structs are used in various calls supported by the ev driver. */ + +struct evioLLEvent { + int setCursor; + int type; + IOGPoint location; + NXEventData data; + int setFlags; + int flags; +}; + +typedef struct evioLLEvent _NXLLEvent; + +#ifdef mach3xxx + +/* + * On a keypress of a VOL UP or VOL DOWN key, we send a message to the + * sound server to notify it of the volume change. The message includes + * a flag to indicate which key was pressed, and the machine independant + * flag bits to indicate which modifier keys were pressed. + */ + +struct evioSpecialKeyMsg +{ + msg_header_t Head; + msg_type_t keyType; + int key; // special key number, from bsd/dev/ev_keymap.h + msg_type_t directionType; + int direction; // NX_KEYDOWN, NX_KEYUP from event.h + msg_type_t flagsType; + int flags; // device independant flags from event.h + msg_type_t levelType; + int level; // EV_AUDIO_MIN_VOLUME to EV_AUDIO_MAX_VOLUME +}; +#else +struct evioSpecialKeyMsg +{ + mach_msg_header_t Head; + int key; // special key number, from bsd/dev/ev_keymap.h + int direction; // NX_KEYDOWN, NX_KEYUP from event.h + int flags; // device independant flags from event.h + int level; // EV_AUDIO_MIN_VOLUME to EV_AUDIO_MAX_VOLUME +}; +#endif + +#define EV_SPECIAL_KEY_MSG_ID (('S'<<24) | ('k'<<16) | ('e'<<8) | ('y')) +typedef struct evioSpecialKeyMsg *evioSpecialKeyMsg_t; + +/* + * Volume ranges + */ +#define EV_AUDIO_MIN_VOLUME 0 +#define EV_AUDIO_MAX_VOLUME 64 + +#define kIOHIDSystemClass "IOHIDSystem" +#define kIOHIKeyboardClass "IOHIKeyboard" +#define kIOHIPointingClass "IOHIPointing" + +#define IOHIDSYSTEM_CONFORMSTO kIOHIDSystemClass + +enum { + kIOHIDCurrentShmemVersion = 2, + kIOHIDServerConnectType = 0, + kIOHIDParamConnectType = 1, + kIOHIDGlobalMemory = 0, + kIOHIDEventNotification = 0 +}; + +#ifdef KERNEL +typedef UInt16 (*MasterVolumeUpdate)(void); +typedef bool (*MasterMuteUpdate)(void); + +typedef struct { + MasterVolumeUpdate incrementMasterVolume; + MasterVolumeUpdate decrementMasterVolume; + MasterMuteUpdate toggleMasterMute; +} MasterAudioFunctions; + +extern MasterAudioFunctions *masterAudioFunctions; +#endif + +#ifndef KERNEL +#ifndef _IOKIT_IOHIDLIB_H +#include +#endif +#endif /* !KERNEL */ + +__END_DECLS + + +#endif /* !_DEV_EVIO_H */ diff --git a/iokit/IOKit/hidsystem/IOHIDSystem.h b/iokit/IOKit/hidsystem/IOHIDSystem.h new file mode 100644 index 000000000..c39558c89 --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIDSystem.h @@ -0,0 +1,498 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * EventDriver.h - Exported Interface Event Driver object. + * + * The EventDriver is a pseudo-device driver. + * + * HISTORY + * 19 Mar 1992 Mike Paquette at NeXT + * Created. + * 4 Aug 1993 Erik Kay at NeXT + * API cleanup + */ + +#ifndef _IOHIDSYSTEM_H +#define _IOHIDSYSTEM_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ev_keymap.h" /* For NX_NUM_SCANNED_SPECIALKEYS */ + +typedef void (*IOHIDAction)(OSObject *, void *); + +class IOHIDSystem : public IOService +{ + OSDeclareDefaultStructors(IOHIDSystem); + + friend class IOHIDUserClient; + friend class IOHIDParamUserClient; + +private: + IOLock * driverLock; + + IOWorkLoop * workLoop; + IOTimerEventSource * timerES; + IOCommandQueue * cmdQ; + IOUserClient * serverConnect; + IOUserClient * paramConnect; + IONotifier * publishNotify; + + // Ports on which we hold send rights + mach_port_t eventPort; // Send msg here when event queue + // goes non-empty + mach_port_t _specialKeyPort[NX_NUM_SCANNED_SPECIALKEYS]; // Special key msgs + void *eventMsg; // Msg to be sent to Window Server. + + // Shared memory area information + IOBufferMemoryDescriptor * globalMemory; + vm_offset_t shmem_addr; // kernel address of shared memory + vm_size_t shmem_size; // size of shared memory + + // Pointers to structures which occupy the shared memory area. + volatile void *evs; // Pointer to private driver shmem + volatile EvGlobals *evg; // Pointer to EvGlobals (shmem) + // Internal variables related to the shared memory area + int lleqSize; // # of entries in low-level queue + // FIXME: why is this ivar lleqSize an ivar? {Dan] + + // Screens list + vm_size_t evScreenSize; // Byte size of evScreen array + void *evScreen; // array of screens known to driver + volatile void *lastShmemPtr; // Pointer used to index thru shmem + // while assigning shared areas to + // drivers. + int screens; // running total of allocated screens + UInt32 cursorScreens; // bit mask of screens with cursor present + UInt32 cursorPinScreen;// a screen to pin against + Bounds cursorPin; // Range to which cursor is pinned + // while on this screen. + Bounds workSpace; // Bounds of full workspace. + // Event Status state - This includes things like event timestamps, + // time til screen dim, and related things manipulated through the + // Event Status API. + // + Point pointerLoc; // Current pointing device location + // The value leads evg->cursorLoc. + Point pointerDelta; // The cumulative pointer delta values since + // previous mouse move event was posted + Point clickLoc; // location of last mouse click + Point clickSpaceThresh; // max mouse delta to be a doubleclick + int clickState; // Current click state + unsigned char lastPressure; // last pressure seen + bool lastProximity; // last proximity state seen + + SInt32 curVolume; // Value of volume setting. + SInt32 dimmedBrightness;// Value of screen brightness when autoDim + // has turned on. + SInt32 curBright; // The current brightness is cached here while + // the driver is open. This number is always + // the user-specified brightness level; if the + // screen is autodimmed, the actual brightness + // level in the monitor will be less. + SInt32 autoDimmed; // Is screen currently autodimmed? + bool evOpenCalled; // Has the driver been opened? + bool evInitialized; // Has the first-open-only initialization run? + bool eventsOpen; // Boolean: has evmmap been called yet? + bool cursorStarted; // periodic events running? + bool cursorEnabled; // cursor positioning ok? + bool cursorCoupled; // cursor positioning on pointer moves ok? + + short leftENum; // Unique ID for last left down event + short rightENum; // Unique ID for last right down event + + // The periodic event mechanism timestamps and state + // are recorded here. + AbsoluteTime thisPeriodicRun; + AbsoluteTime periodicEventDelta;// Time between periodic events + // todo: make infinite + AbsoluteTime clickTime; // Timestamps used to determine doubleclicks + AbsoluteTime clickTimeThresh; + AbsoluteTime autoDimPeriod; // How long since last user action before + // we autodim screen? User preference item, + // set by InitMouse and evsioctl + AbsoluteTime autoDimTime; // Time value when we will autodim screen, + // if autoDimmed is 0. + // Set in LLEventPost. + + AbsoluteTime waitSustain; // Sustain time before removing cursor + AbsoluteTime waitSusTime; // Sustain counter + AbsoluteTime waitFrameRate; // Ticks per wait cursor frame + AbsoluteTime waitFrameTime; // Wait cursor frame timer + + AbsoluteTime postedVBLTime; // Used to post mouse events once per frame + AbsoluteTime lastEventTime; + AbsoluteTime lastMoveTime; + SInt32 accumDX; + SInt32 accumDY; + + // Flags used in scheduling periodic event callbacks + bool needSetCursorPosition; + bool needToKickEventConsumer; + IOLock * kickConsumerLock; + +public: + IOService * displayManager; // points to display manager + IOPMPowerFlags displayState; + +private: + inline short getUniqueEventNum(); + + virtual IOReturn powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService*); + /* Resets */ + void _resetMouseParameters(); + void _resetKeyboardParameters(); + + /* Initialize the shared memory area */ + void initShmem(); + /* Dispatch low level events through shared memory to the WindowServer */ + void postEvent(int what, + /* at */ Point * location, + /* atTime */ AbsoluteTime ts, + /* withData */ NXEventData * myData); + /* Dispatch mechanisms for screen state changes */ + void evDispatch( + /* command */ EvCmd evcmd); + /* Dispatch mechanism for special key press */ + void evSpecialKeyMsg(unsigned key, + /* direction */ unsigned dir, + /* flags */ unsigned f, + /* level */ unsigned l); + /* Message the event consumer to process posted events */ + void kickEventConsumer(); + IOReturn sendWorkLoopCommand(OSObject * target, + IOHIDAction action, + void * data); + static void _doPerformInIOThread( void* self, + void* target, + void* action, + void* data, + void* unused); + static void _periodicEvents(IOHIDSystem * self, + IOTimerEventSource *timer); + + static void _performSpecialKeyMsg(IOHIDSystem * self, + struct evioSpecialKeyMsg *msg); + static void _performKickEventConsumer(IOHIDSystem * self,void *); + + static bool publishNotificationHandler( IOHIDSystem * self, + void * ref, IOService * newService ); + +/* + * HISTORICAL NOTE: + * The following methods were part of the IOHIDSystem(Input) category; + * the declarations have now been merged directly into this class. + * + * Exported Interface Event Driver object input services. + */ + +private: + // Schedule next periodic run based on current event system state. + void scheduleNextPeriodicEvent(); + // Message invoked to run periodic events. This method runs in the workloop. + void periodicEvents(IOTimerEventSource *timer); + // Start the cursor running. + bool startCursor(); + // Repin cursor location. + bool resetCursor(); + // Wait Cursor machinery. + void showWaitCursor(); + void hideWaitCursor(); + void animateWaitCursor(); + void changeCursor(int frame); + // Return screen number a point lies on. + int pointToScreen(Point * p); + // Set the undimmed brightness. + void setBrightness(int b); + // Return undimmed brightness. + int brightness(); + // Set the dimmed brightness. + void setAutoDimBrightness(int b); + // Return dimmed brightness. + int autoDimBrightness(); + // Return the current brightness. + int currentBrightness(); + // Dim all displays. + void doAutoDim(); + // Return display brightness to normal. + void undoAutoDim(); + // Force dim/undim. + void forceAutoDimState(bool dim); + // Audio volume control. + void setAudioVolume(int v); + // Audio volume control, from ext user. + void setUserAudioVolume(int v); + // Return audio volume. + int audioVolume(); + // Propagate state out to screens. + inline void setBrightness(); + + inline void showCursor(); + inline void hideCursor(); + inline void moveCursor(); + // Claim ownership of event sources. + void attachDefaultEventSources(); + // Give up ownership of event sources. + void detachEventSources(); + bool registerEventSource(IOHIDevice * source); + + // Set abs cursor position. + void setCursorPosition(Point * newLoc, bool external); + void _setButtonState(int buttons, + /* atTime */ AbsoluteTime ts); + void _setCursorPosition(Point * newLoc, bool external); + + void _postMouseMoveEvent(int what, + Point * location, + AbsoluteTime theClock); + +/* END HISTORICAL NOTE */ + +public: + static IOHIDSystem * instance(); /* Return the current instance of the */ + /* EventDriver, or 0 if none. */ + + virtual bool init(OSDictionary * properties = 0); + virtual IOHIDSystem * probe(IOService * provider, + SInt32 * score); + virtual bool start(IOService * provider); + virtual IOReturn message(UInt32 type, IOService * provider, + void * argument); + virtual void free(); + + virtual IOWorkLoop *getWorkLoop() const; + + virtual IOReturn evOpen(void); + virtual IOReturn evClose(void); + + virtual bool updateProperties(void); + virtual IOReturn setParamProperties(OSDictionary * dict); + virtual bool serializeProperties( OSSerialize * s ) const; + + /* Create the shared memory area */ + virtual IOReturn createShmem(void*,void*,void*,void*,void*,void*); + /* Set the port for event available notify msg */ + virtual void setEventPort(mach_port_t port); + /* Set the port for the special key keypress msg */ + virtual IOReturn setSpecialKeyPort( + /* keyFlavor */ int special_key, + /* keyPort */ mach_port_t key_port); + virtual mach_port_t specialKeyPort(int special_key); + + + virtual IOReturn newUserClient(task_t owningTask, + /* withToken */ void * security_id, + /* ofType */ UInt32 type, + /* client */ IOUserClient ** handler); + +/* + * HISTORICAL NOTE: + * The following methods were part of the IOHIPointingEvents protocol; + * the declarations have now been merged directly into this class. + */ + +public: + /* Mouse event reporting */ + virtual void relativePointerEvent(int buttons, + /* deltaX */ int dx, + /* deltaY */ int dy, + /* atTime */ AbsoluteTime ts); + + /* Tablet event reporting */ + virtual void absolutePointerEvent(int buttons, + /* at */ Point * newLoc, + /* withBounds */ Bounds * bounds, + /* inProximity */ bool proximity, + /* withPressure */ int pressure, + /* withAngle */ int stylusAngle, + /* atTime */ AbsoluteTime ts); + + /* Mouse scroll wheel event reporting */ + virtual void scrollWheelEvent(short deltaAxis1, + short deltaAxis2, + short deltaAxis3, + AbsoluteTime ts); + + + virtual void tabletEvent(NXEventData *tabletData, + AbsoluteTime ts); + + virtual void proximityEvent(NXEventData *proximityData, + AbsoluteTime ts); + +/* + * HISTORICAL NOTE: + * The following methods were part of the IOHIKeyboardEvents protocol; + * the declarations have now been merged directly into this class. + */ + +public: + virtual void keyboardEvent(unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet, + /* keyboardType */ unsigned keyboardType, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts); + + virtual void keyboardSpecialEvent( unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* specialty */ unsigned flavor, + /* guid */ UInt64 guid, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts); + + virtual void updateEventFlags(unsigned flags); /* Does not generate events */ + + + + +private: + + /* + * statics for upstream callouts + */ + + void _scaleLocationToCurrentScreen(Point *location, Bounds *bounds); // Should this one be public??? + + static void _relativePointerEvent( IOHIDSystem * self, + int buttons, + /* deltaX */ int dx, + /* deltaY */ int dy, + /* atTime */ AbsoluteTime ts); + + /* Tablet event reporting */ + static void _absolutePointerEvent(IOHIDSystem * self, + int buttons, + /* at */ Point * newLoc, + /* withBounds */ Bounds * bounds, + /* inProximity */ bool proximity, + /* withPressure */ int pressure, + /* withAngle */ int stylusAngle, + /* atTime */ AbsoluteTime ts); + + /* Mouse scroll wheel event reporting */ + static void _scrollWheelEvent(IOHIDSystem *self, + short deltaAxis1, + short deltaAxis2, + short deltaAxis3, + AbsoluteTime ts); + + static void _tabletEvent(IOHIDSystem *self, + NXEventData *tabletData, + AbsoluteTime ts); + + static void _proximityEvent(IOHIDSystem *self, + NXEventData *proximityData, + AbsoluteTime ts); + + static void _keyboardEvent( IOHIDSystem * self, + unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet, + /* keyboardType */ unsigned keyboardType, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts); + static void _keyboardSpecialEvent( IOHIDSystem * self, + unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* specialty */ unsigned flavor, + /* guid */ UInt64 guid, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts); + static void _updateEventFlags( IOHIDSystem * self, + unsigned flags); /* Does not generate events */ + + +/* + * HISTORICAL NOTE: + * The following methods were part of the IOUserClient protocol; + * the declarations have now been merged directly into this class. + */ + +public: + + virtual IOReturn setEventsEnable(void*,void*,void*,void*,void*,void*); + virtual IOReturn setCursorEnable(void*,void*,void*,void*,void*,void*); + virtual IOReturn extPostEvent(void*,void*,void*,void*,void*,void*); + virtual IOReturn extSetMouseLocation(void*,void*,void*,void*,void*,void*); + virtual IOReturn extGetButtonEventNum(void*,void*,void*,void*,void*,void*); + +/* + * HISTORICAL NOTE: + * The following methods were part of the IOScreenRegistration protocol; + * the declarations have now been merged directly into this class. + * + * Methods exported by the EventDriver for display systems. + * + * The screenRegister protocol is used by frame buffer drivers to register + * themselves with the Event Driver. These methods are called in response + * to an _IOGetParameterInIntArray() call with "IO_Framebuffer_Register" or + * "IO_Framebuffer_Unregister". + */ + +public: + virtual int registerScreen(IOGraphicsDevice * instance, + /* bounds */ Bounds * bp); +// /* shmem */ void ** addr, +// /* size */ int * size) + virtual void unregisterScreen(int index); + +/* + * HISTORICAL NOTE: + * The following methods were part of the IOWorkspaceBounds protocol; + * the declarations have now been merged directly into this class. + * + * Absolute position input devices and some specialized output devices + * may need to know the bounding rectangle for all attached displays. + * The following method returns a Bounds* for the workspace. Please note + * that the bounds are kept as signed values, and that on a multi-display + * system the minx and miny values may very well be negative. + */ + +public: + virtual Bounds * workspaceBounds(); + +/* END HISTORICAL NOTES */ +}; + +#endif /* !_IOHIDSYSTEM_H */ diff --git a/iokit/IOKit/hidsystem/IOHIDTypes.h b/iokit/IOKit/hidsystem/IOHIDTypes.h new file mode 100644 index 000000000..63e3912d5 --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIDTypes.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/****************************************************************************** + ev_types.h + Data types for the events status driver. + This file contains public API. + mpaque 11Oct91 + + Copyright 1991 NeXT Computer, Inc. + + Modified: + +******************************************************************************/ + +#ifndef _DEV_EV_TYPES_H +#define _DEV_EV_TYPES_H + +#include +#include +#include + +/* Shared memory versions */ +#define EVENT_SYSTEM_VERSION 2 + +/* Maximum length of SetMouseScaling arrays */ +#define NX_MAXMOUSESCALINGS 20 + +typedef struct evsioKeymapping /* Match old struct names in kernel */ +{ + int size; + char *mapping; +} NXKeyMapping; + +typedef struct evsioMouseScaling /* Match old struct names in kernel */ +{ + int numScaleLevels; + short scaleThresholds[NX_MAXMOUSESCALINGS]; + short scaleFactors[NX_MAXMOUSESCALINGS]; +} NXMouseScaling; + +typedef enum { + NX_OneButton, + NX_LeftButton, + NX_RightButton +} NXMouseButton; + +/* + * NXEventSystemInfo() information structures. These are designed to + * allow for expansion. + * + * The current implementation of NXEventSystemInfo() uses an ioctl call. + * THIS WILL CHANGE. + */ + +/* + * Generic query max size and typedefs. + * + * The maximum size is selected to support anticipated future extensions + * of request flavors. Certain flavors planned for future releases may + * require roughtly 800 ints to represent. We allow a little extra, in + * case further growth is needed. + */ +typedef int *NXEventSystemInfoType; +#define NX_EVS_INFO_MAX (1024) /* Max array size */ +typedef int NXEventSystemInfoData[NX_EVS_INFO_MAX]; + +/* Event System Devices query */ +#define NX_EVS_DEVICE_MAX 16 + + /* Interface types */ +#define NX_EVS_DEVICE_INTERFACE_OTHER 0 +#define NX_EVS_DEVICE_INTERFACE_NeXT 1 // NeXT custom, in older sys +#define NX_EVS_DEVICE_INTERFACE_ADB 2 // NeXT/fruit keybds/mice +#define NX_EVS_DEVICE_INTERFACE_ACE 3 // For x86 PC keyboards +#define NX_EVS_DEVICE_INTERFACE_SERIAL_ACE 4 // For PC serial mice +#define NX_EVS_DEVICE_INTERFACE_BUS_ACE 5 // For PC bus mice +#define NX_EVS_DEVICE_INTERFACE_HIL 6 // For HIL hp keyboard +#define NX_EVS_DEVICE_INTERFACE_TYPE5 7 // For Sun Type5 keyboard + +/* + * Note! if any new interface types are added above, the following + * definition of the number of interfaces supported must reflect this. + * This is used in the libkeymap project (storemap.c module) which needs + * to be cognizant of the number of new devices coming online + * via support for heterogeneous architecture platforms. + * e.g., PCs, HP's HIL, Sun's Type5 keyboard,... + */ +#define NUM_SUPPORTED_INTERFACES (NX_EVS_DEVICE_INTERFACE_TYPE5 + 1) + // Other, NeXT, ADB, ACE,... + + /* Device types */ +#define NX_EVS_DEVICE_TYPE_OTHER 0 +#define NX_EVS_DEVICE_TYPE_KEYBOARD 1 +#define NX_EVS_DEVICE_TYPE_MOUSE 2 // Relative position devices +#define NX_EVS_DEVICE_TYPE_TABLET 3 // Absolute position devices + +typedef struct { + int interface; /* NeXT, ADB, other */ + int interface_addr; /* Device address on the interface */ + int dev_type; /* Keyboard, mouse, tablet, other */ + int id; /* manufacturer's device handler ID */ +} NXEventSystemDevice; + +typedef struct { + NXEventSystemDevice dev[NX_EVS_DEVICE_MAX]; +} NXEventSystemDeviceList; + +#define __OLD_NX_EVS_DEVICE_INFO 1 +#define NX_EVS_DEVICE_INFO "Evs_EventDeviceInfo" +#define NX_EVS_DEVICE_INFO_COUNT \ + (sizeof (NXEventSystemDeviceList) / sizeof (int)) + +/* + * Types used in evScreen protocol compliant operations. + */ + +typedef enum {EVNOP, EVHIDE, EVSHOW, EVMOVE, EVLEVEL} EvCmd; /* Cursor state */ + +#define EV_SCREEN_MIN_BRIGHTNESS 0 +#define EV_SCREEN_MAX_BRIGHTNESS 64 +/* Scale should lie between MIN_BRIGHTNESS and MAX_BRIGHTNESS */ +#define EV_SCALE_BRIGHTNESS( scale, datum ) \ + ((((unsigned long)(datum))*((unsigned long)scale)) >> 6) + +/* + * Definition of a tick, as a time in milliseconds. This controls how + * often the event system periodic jobs are run. All actual tick times + * are derived from the nanosecond timer. These values are typically used + * as part of computing mouse velocity for acceleration purposes. + */ +#define EV_TICK_TIME 16 /* 16 milliseconds */ +#define EV_TICKS_PER_SEC (1000/EV_TICK_TIME) /* ~ 62 Hz */ + +/* Mouse Button bits, as passed from an EventSrc to the Event Driver */ +#define EV_RB (0x01) +#define EV_LB (0x04) +#define EV_MOUSEBUTTONMASK (EV_LB | EV_RB) + +/* Tablet Pressure Constants, as passed from an EventSrc to the Event Driver */ +#define EV_MINPRESSURE 0 +#define EV_MAXPRESSURE 255 + +/* Cursor size in pixels */ +#define EV_CURSOR_WIDTH 16 +#define EV_CURSOR_HEIGHT 16 + + +#define kAppleOnboardGUID 0x0610000000000000ULL + +#endif /* !_DEV_EV_TYPES_H */ + diff --git a/iokit/IOKit/hidsystem/IOHIDUsageTables.h b/iokit/IOKit/hidsystem/IOHIDUsageTables.h new file mode 100644 index 000000000..b7b63401d --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIDUsageTables.h @@ -0,0 +1,1017 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOHIDUSAGETABLES_H +#define _IOHIDUSAGETABLES_H + +/* ****************************************************************************************** + * HID Usage Tables + * + * The following constants are from the USB 'HID Usage Tables' specification, revision 1.1rc3 + * ****************************************************************************************** */ + + +/* Usage Pages */ +enum +{ + kHIDPage_Undefined = 0x00, + kHIDPage_GenericDesktop = 0x01, + kHIDPage_Simulation = 0x02, + kHIDPage_VR = 0x03, + kHIDPage_Sport = 0x04, + kHIDPage_Game = 0x05, + /* Reserved 0x06 */ + kHIDPage_KeyboardOrKeypad = 0x07, /* USB Device Class Definition for Human Interface Devices (HID). Note: the usage type for all key codes is Selector (Sel). */ + kHIDPage_LEDs = 0x08, + kHIDPage_Button = 0x09, + kHIDPage_Ordinal = 0x0A, + kHIDPage_Telephony = 0x0B, + kHIDPage_Consumer = 0x0C, + kHIDPage_Digitizer = 0x0D, + /* Reserved 0x0E */ + kHIDPage_PID = 0x0F, /* USB Physical Interface Device definitions for force feedback and related devices. */ + kHIDPage_Unicode = 0x10, + /* Reserved 0x11 - 0x13 */ + kHIDPage_AlphanumericDisplay = 0x14, + /* Reserved 0x15 - 0x7F */ + /* Monitor 0x80 - 0x83 USB Device Class Definition for Monitor Devices */ + /* Power 0x84 - 0x87 USB Device Class Definition for Power Devices */ + /* Reserved 0x88 - 0x8B */ + kHIDPage_BarCodeScanner = 0x8C, /* (Point of Sale) USB Device Class Definition for Bar Code Scanner Devices */ + kHIDPage_Scale = 0x8D, /* (Point of Sale) USB Device Class Definition for Scale Devices */ + /* ReservedPointofSalepages 0x8E - 0x8F */ + kHIDPage_CameraControl = 0x90, /* USB Device Class Definition for Image Class Devices */ + kHIDPage_Arcade = 0x91, /* OAAF Definitions for arcade and coinop related Devices */ + /* Reserved 0x92 - 0xFEFF */ + /* VendorDefined 0xFF00 - 0xFFFF */ + kHIDPage_VendorDefinedStart = 0xFF00, +}; + +/* Undefined Usage for all usage pages */ +enum +{ + kHIDUsage_Undefined = 0x00 +}; + +/* GenericDesktop Page (0x01) */ +enum +{ + kHIDUsage_GD_Pointer = 0x01, /* Physical Collection */ + kHIDUsage_GD_Mouse = 0x02, /* Application Collection */ + /* 0x03 Reserved */ + kHIDUsage_GD_Joystick = 0x04, /* Application Collection */ + kHIDUsage_GD_GamePad = 0x05, /* Application Collection */ + kHIDUsage_GD_Keyboard = 0x06, /* Application Collection */ + kHIDUsage_GD_Keypad = 0x07, /* Application Collection */ + kHIDUsage_GD_MultiAxisController = 0x08, /* Application Collection */ + /* 0x09 - 0x2F Reserved */ + kHIDUsage_GD_X = 0x30, /* Dynamic Value */ + kHIDUsage_GD_Y = 0x31, /* Dynamic Value */ + kHIDUsage_GD_Z = 0x32, /* Dynamic Value */ + kHIDUsage_GD_Rx = 0x33, /* Dynamic Value */ + kHIDUsage_GD_Ry = 0x34, /* Dynamic Value */ + kHIDUsage_GD_Rz = 0x35, /* Dynamic Value */ + kHIDUsage_GD_Slider = 0x36, /* Dynamic Value */ + kHIDUsage_GD_Dial = 0x37, /* Dynamic Value */ + kHIDUsage_GD_Wheel = 0x38, /* Dynamic Value */ + kHIDUsage_GD_Hatswitch = 0x39, /* Dynamic Value */ + kHIDUsage_GD_CountedBuffer = 0x3A, /* Logical Collection */ + kHIDUsage_GD_ByteCount = 0x3B, /* Dynamic Value */ + kHIDUsage_GD_MotionWakeup = 0x3C, /* One-Shot Control */ + kHIDUsage_GD_Start = 0x3D, /* On/Off Control */ + kHIDUsage_GD_Select = 0x3E, /* On/Off Control */ + /* 0x3F Reserved */ + kHIDUsage_GD_Vx = 0x40, /* Dynamic Value */ + kHIDUsage_GD_Vy = 0x41, /* Dynamic Value */ + kHIDUsage_GD_Vz = 0x42, /* Dynamic Value */ + kHIDUsage_GD_Vbrx = 0x43, /* Dynamic Value */ + kHIDUsage_GD_Vbry = 0x44, /* Dynamic Value */ + kHIDUsage_GD_Vbrz = 0x45, /* Dynamic Value */ + kHIDUsage_GD_Vno = 0x46, /* Dynamic Value */ + /* 0x47 - 0x7F Reserved */ + kHIDUsage_GD_SystemControl = 0x80, /* Application Collection */ + kHIDUsage_GD_SystemPowerDown = 0x81, /* One-Shot Control */ + kHIDUsage_GD_SystemSleep = 0x82, /* One-Shot Control */ + kHIDUsage_GD_SystemWakeUp = 0x83, /* One-Shot Control */ + kHIDUsage_GD_SystemContextMenu = 0x84, /* One-Shot Control */ + kHIDUsage_GD_SystemMainMenu = 0x85, /* One-Shot Control */ + kHIDUsage_GD_SystemAppMenu = 0x86, /* One-Shot Control */ + kHIDUsage_GD_SystemMenuHelp = 0x87, /* One-Shot Control */ + kHIDUsage_GD_SystemMenuExit = 0x88, /* One-Shot Control */ + kHIDUsage_GD_SystemMenu = 0x89, /* Selector */ + kHIDUsage_GD_SystemMenuRight = 0x8A, /* Re-Trigger Control */ + kHIDUsage_GD_SystemMenuLeft = 0x8B, /* Re-Trigger Control */ + kHIDUsage_GD_SystemMenuUp = 0x8C, /* Re-Trigger Control */ + kHIDUsage_GD_SystemMenuDown = 0x8D, /* Re-Trigger Control */ + /* 0x8E - 0x8F Reserved */ + kHIDUsage_GD_DPadUp = 0x90, /* On/Off Control */ + kHIDUsage_GD_DPadDown = 0x91, /* On/Off Control */ + kHIDUsage_GD_DPadRight = 0x92, /* On/Off Control */ + kHIDUsage_GD_DPadLeft = 0x93, /* On/Off Control */ + /* 0x94 - 0xFFFF Reserved */ + kHIDUsage_GD_Reserved = 0xFFFF, +}; + +/* Simulation Page (0x02) */ +/* This section provides detailed descriptions of the usages employed by simulation devices. */ +enum +{ + kHIDUsage_Sim_FlightSimulationDevice = 0x01, /* Application Collection */ + kHIDUsage_Sim_AutomobileSimulationDevice = 0x02, /* Application Collection */ + kHIDUsage_Sim_TankSimulationDevice = 0x03, /* Application Collection */ + kHIDUsage_Sim_SpaceshipSimulationDevice = 0x04, /* Application Collection */ + kHIDUsage_Sim_SubmarineSimulationDevice = 0x05, /* Application Collection */ + kHIDUsage_Sim_SailingSimulationDevice = 0x06, /* Application Collection */ + kHIDUsage_Sim_MotorcycleSimulationDevice = 0x07, /* Application Collection */ + kHIDUsage_Sim_SportsSimulationDevice = 0x08, /* Application Collection */ + kHIDUsage_Sim_AirplaneSimulationDevice = 0x09, /* Application Collection */ + kHIDUsage_Sim_HelicopterSimulationDevice = 0x0A, /* Application Collection */ + kHIDUsage_Sim_MagicCarpetSimulationDevice = 0x0B, /* Application Collection */ + kHIDUsage_Sim_BicycleSimulationDevice = 0x0C, /* Application Collection */ + /* 0x0D - 0x1F Reserved */ + kHIDUsage_Sim_FlightControlStick = 0x20, /* Application Collection */ + kHIDUsage_Sim_FlightStick = 0x21, /* Application Collection */ + kHIDUsage_Sim_CyclicControl = 0x22, /* Physical Collection */ + kHIDUsage_Sim_CyclicTrim = 0x23, /* Physical Collection */ + kHIDUsage_Sim_FlightYoke = 0x24, /* Application Collection */ + kHIDUsage_Sim_TrackControl = 0x25, /* Physical Collection */ + /* 0x26 - 0xAF Reserved */ + kHIDUsage_Sim_Aileron = 0xB0, /* Dynamic Value */ + kHIDUsage_Sim_AileronTrim = 0xB1, /* Dynamic Value */ + kHIDUsage_Sim_AntiTorqueControl = 0xB2, /* Dynamic Value */ + kHIDUsage_Sim_AutopilotEnable = 0xB3, /* On/Off Control */ + kHIDUsage_Sim_ChaffRelease = 0xB4, /* One-Shot Control */ + kHIDUsage_Sim_CollectiveControl = 0xB5, /* Dynamic Value */ + kHIDUsage_Sim_DiveBrake = 0xB6, /* Dynamic Value */ + kHIDUsage_Sim_ElectronicCountermeasures = 0xB7, /* On/Off Control */ + kHIDUsage_Sim_Elevator = 0xB8, /* Dynamic Value */ + kHIDUsage_Sim_ElevatorTrim = 0xB9, /* Dynamic Value */ + kHIDUsage_Sim_Rudder = 0xBA, /* Dynamic Value */ + kHIDUsage_Sim_Throttle = 0xBB, /* Dynamic Value */ + kHIDUsage_Sim_FlightCommunications = 0xBC, /* On/Off Control */ + kHIDUsage_Sim_FlareRelease = 0xBD, /* One-Shot Control */ + kHIDUsage_Sim_LandingGear = 0xBE, /* On/Off Control */ + kHIDUsage_Sim_ToeBrake = 0xBF, /* Dynamic Value */ + kHIDUsage_Sim_Trigger = 0xC0, /* Momentary Control */ + kHIDUsage_Sim_WeaponsArm = 0xC1, /* On/Off Control */ + kHIDUsage_Sim_Weapons = 0xC2, /* Selector */ + kHIDUsage_Sim_WingFlaps = 0xC3, /* Dynamic Value */ + kHIDUsage_Sim_Accelerator = 0xC4, /* Dynamic Value */ + kHIDUsage_Sim_Brake = 0xC5, /* Dynamic Value */ + kHIDUsage_Sim_Clutch = 0xC6, /* Dynamic Value */ + kHIDUsage_Sim_Shifter = 0xC7, /* Dynamic Value */ + kHIDUsage_Sim_Steering = 0xC8, /* Dynamic Value */ + kHIDUsage_Sim_TurretDirection = 0xC9, /* Dynamic Value */ + kHIDUsage_Sim_BarrelElevation = 0xCA, /* Dynamic Value */ + kHIDUsage_Sim_DivePlane = 0xCB, /* Dynamic Value */ + kHIDUsage_Sim_Ballast = 0xCC, /* Dynamic Value */ + kHIDUsage_Sim_BicycleCrank = 0xCD, /* Dynamic Value */ + kHIDUsage_Sim_HandleBars = 0xCE, /* Dynamic Value */ + kHIDUsage_Sim_FrontBrake = 0xCF, /* Dynamic Value */ + kHIDUsage_Sim_RearBrake = 0xD0, /* Dynamic Value */ + /* 0xD1 - 0xFFFF Reserved */ + kHIDUsage_Sim_Reserved = 0xFFFF, +}; + +/* VR Page (0x03) */ +/* Virtual Reality controls depend on designators to identify the individual controls. Most of the following are */ +/* usages are applied to the collections of entities that comprise the actual device. */ +enum +{ + kHIDUsage_VR_Belt = 0x01, /* Application Collection */ + kHIDUsage_VR_BodySuit = 0x02, /* Application Collection */ + kHIDUsage_VR_Flexor = 0x03, /* Physical Collection */ + kHIDUsage_VR_Glove = 0x04, /* Application Collection */ + kHIDUsage_VR_HeadTracker = 0x05, /* Physical Collection */ + kHIDUsage_VR_HeadMountedDisplay = 0x06, /* Application Collection */ + kHIDUsage_VR_HandTracker = 0x07, /* Application Collection */ + kHIDUsage_VR_Oculometer = 0x08, /* Application Collection */ + kHIDUsage_VR_Vest = 0x09, /* Application Collection */ + kHIDUsage_VR_AnimatronicDevice = 0x0A, /* Application Collection */ + /* 0x0B - 0x1F Reserved */ + kHIDUsage_VR_StereoEnable = 0x20, /* On/Off Control */ + kHIDUsage_VR_DisplayEnable = 0x21, /* On/Off Control */ + /* 0x22 - 0xFFFF Reserved */ + kHIDUsage_VR_Reserved = 0xFFFF, +}; + +/* Sport Page (0x04) */ +enum +{ + kHIDUsage_Sprt_BaseballBat = 0x01, /* Application Collection */ + kHIDUsage_Sprt_GolfClub = 0x02, /* Application Collection */ + kHIDUsage_Sprt_RowingMachine = 0x03, /* Application Collection */ + kHIDUsage_Sprt_Treadmill = 0x04, /* Application Collection */ + /* 0x05 - 0x2F Reserved */ + kHIDUsage_Sprt_Oar = 0x30, /* Dynamic Value */ + kHIDUsage_Sprt_Slope = 0x31, /* Dynamic Value */ + kHIDUsage_Sprt_Rate = 0x32, /* Dynamic Value */ + kHIDUsage_Sprt_StickSpeed = 0x33, /* Dynamic Value */ + kHIDUsage_Sprt_StickFaceAngle = 0x34, /* Dynamic Value */ + kHIDUsage_Sprt_StickHeelOrToe = 0x35, /* Dynamic Value */ + kHIDUsage_Sprt_StickFollowThrough = 0x36, /* Dynamic Value */ + kHIDUsage_Sprt_StickTempo = 0x37, /* Dynamic Value */ + kHIDUsage_Sprt_StickType = 0x38, /* Named Array */ + kHIDUsage_Sprt_StickHeight = 0x39, /* Dynamic Value */ + /* 0x3A - 0x4F Reserved */ + kHIDUsage_Sprt_Putter = 0x50, /* Selector */ + kHIDUsage_Sprt_1Iron = 0x51, /* Selector */ + kHIDUsage_Sprt_2Iron = 0x52, /* Selector */ + kHIDUsage_Sprt_3Iron = 0x53, /* Selector */ + kHIDUsage_Sprt_4Iron = 0x54, /* Selector */ + kHIDUsage_Sprt_5Iron = 0x55, /* Selector */ + kHIDUsage_Sprt_6Iron = 0x56, /* Selector */ + kHIDUsage_Sprt_7Iron = 0x57, /* Selector */ + kHIDUsage_Sprt_8Iron = 0x58, /* Selector */ + kHIDUsage_Sprt_9Iron = 0x59, /* Selector */ + kHIDUsage_Sprt_10Iron = 0x5A, /* Selector */ + kHIDUsage_Sprt_11Iron = 0x5B, /* Selector */ + kHIDUsage_Sprt_SandWedge = 0x5C, /* Selector */ + kHIDUsage_Sprt_LoftWedge = 0x5D, /* Selector */ + kHIDUsage_Sprt_PowerWedge = 0x5E, /* Selector */ + kHIDUsage_Sprt_1Wood = 0x5F, /* Selector */ + kHIDUsage_Sprt_3Wood = 0x60, /* Selector */ + kHIDUsage_Sprt_5Wood = 0x61, /* Selector */ + kHIDUsage_Sprt_7Wood = 0x62, /* Selector */ + kHIDUsage_Sprt_9Wood = 0x63, /* Selector */ + /* 0x64 - 0xFFFF Reserved */ + kHIDUsage_Sprt_Reserved = 0xFFFF, +}; + +/* Game Page (0x05) */ +enum +{ + kHIDUsage_Game_3DGameController = 0x01, /* Application Collection */ + kHIDUsage_Game_PinballDevice = 0x02, /* Application Collection */ + kHIDUsage_Game_GunDevice = 0x03, /* Application Collection */ + /* 0x04 - 0x1F Reserved */ + kHIDUsage_Game_PointofView = 0x20, /* Physical Collection */ + kHIDUsage_Game_TurnRightOrLeft = 0x21, /* Dynamic Value */ + kHIDUsage_Game_PitchUpOrDown = 0x22, /* Dynamic Value */ + kHIDUsage_Game_RollRightOrLeft = 0x23, /* Dynamic Value */ + kHIDUsage_Game_MoveRightOrLeft = 0x24, /* Dynamic Value */ + kHIDUsage_Game_MoveForwardOrBackward = 0x25, /* Dynamic Value */ + kHIDUsage_Game_MoveUpOrDown = 0x26, /* Dynamic Value */ + kHIDUsage_Game_LeanRightOrLeft = 0x27, /* Dynamic Value */ + kHIDUsage_Game_LeanForwardOrBackward = 0x28, /* Dynamic Value */ + kHIDUsage_Game_HeightOfPOV = 0x29, /* Dynamic Value */ + kHIDUsage_Game_Flipper = 0x2A, /* Momentary Control */ + kHIDUsage_Game_SecondaryFlipper = 0x2B, /* Momentary Control */ + kHIDUsage_Game_Bump = 0x2C, /* Momentary Control */ + kHIDUsage_Game_NewGame = 0x2D, /* One-Shot Control */ + kHIDUsage_Game_ShootBall = 0x2E, /* One-Shot Control */ + kHIDUsage_Game_Player = 0x2F, /* One-Shot Control */ + kHIDUsage_Game_GunBolt = 0x30, /* On/Off Control */ + kHIDUsage_Game_GunClip = 0x31, /* On/Off Control */ + kHIDUsage_Game_Gun = 0x32, /* Selector */ + kHIDUsage_Game_GunSingleShot = 0x33, /* Selector */ + kHIDUsage_Game_GunBurst = 0x34, /* Selector */ + kHIDUsage_Game_GunAutomatic = 0x35, /* Selector */ + kHIDUsage_Game_GunSafety = 0x36, /* On/Off Control */ + kHIDUsage_Game_GamepadFireOrJump = 0x37, /* Logical Collection */ + kHIDUsage_Game_GamepadTrigger = 0x39, /* Logical Collection */ + /* 0x3A - 0xFFFF Reserved */ + kHIDUsage_Game_Reserved = 0xFFFF, +}; + +/* KeyboardOrKeypad Page (0x07) */ +/* This section is the Usage Page for key codes to be used in implementing a USB keyboard. A Boot Keyboard (84-, 101- or 104-key) should at a minimum support all associated usage codes as indicated in the ÒBootÓ */ +/* column below. */ +/* The usage type of all key codes is Selectors (Sel), except for the modifier keys Keyboard Left Control (0x224) to Keyboard Right GUI (0x231) which are Dynamic Flags (DV). */ +/* Note: A general note on Usages and languages: Due to the variation of keyboards from language to language, it is not feasible to specify exact key mappings for every language. Where this list is not specific for a key function in a language, the closest equivalent key position should be used, so that a keyboard may be modified for a different language by simply printing different keycaps. One example is the Y key on a North American keyboard. In Germany this is typically Z. Rather than changing the keyboard firmware to put the Z Usage into that place in the descriptor list, the vendor should use the Y Usage on both the North American and German keyboards. This continues to be the existing practice in the industry, in order to minimize the number of changes to the electronics to accommodate otherlanguages. */ +enum +{ + kHIDUsage_KeyboardErrorRollOver = 0x01, /* ErrorRollOver */ + kHIDUsage_KeyboardPOSTFail = 0x02, /* POSTFail */ + kHIDUsage_KeyboardErrorUndefined = 0x03, /* ErrorUndefined */ + kHIDUsage_KeyboardA = 0x04, /* a or A */ + kHIDUsage_KeyboardB = 0x05, /* b or B */ + kHIDUsage_KeyboardC = 0x06, /* c or C */ + kHIDUsage_KeyboardD = 0x07, /* d or D */ + kHIDUsage_KeyboardE = 0x08, /* e or E */ + kHIDUsage_KeyboardF = 0x09, /* f or F */ + kHIDUsage_KeyboardG = 0x0A, /* g or G */ + kHIDUsage_KeyboardH = 0x0B, /* h or H */ + kHIDUsage_KeyboardI = 0x0C, /* i or I */ + kHIDUsage_KeyboardJ = 0x0D, /* j or J */ + kHIDUsage_KeyboardK = 0x0E, /* k or K */ + kHIDUsage_KeyboardL = 0x0F, /* l or L */ + kHIDUsage_KeyboardM = 0x10, /* m or M */ + kHIDUsage_KeyboardN = 0x11, /* n or N */ + kHIDUsage_KeyboardO = 0x12, /* o or O */ + kHIDUsage_KeyboardP = 0x13, /* p or P */ + kHIDUsage_KeyboardQ = 0x14, /* q or Q */ + kHIDUsage_KeyboardR = 0x15, /* r or R */ + kHIDUsage_KeyboardS = 0x16, /* s or S */ + kHIDUsage_KeyboardT = 0x17, /* t or T */ + kHIDUsage_KeyboardU = 0x18, /* u or U */ + kHIDUsage_KeyboardV = 0x19, /* v or V */ + kHIDUsage_KeyboardW = 0x1A, /* w or W */ + kHIDUsage_KeyboardX = 0x1B, /* x or X */ + kHIDUsage_KeyboardY = 0x1C, /* y or Y */ + kHIDUsage_KeyboardZ = 0x1D, /* z or Z */ + kHIDUsage_Keyboard1 = 0x1E, /* 1 or ! */ + kHIDUsage_Keyboard2 = 0x1F, /* 2 or @ */ + kHIDUsage_Keyboard3 = 0x20, /* 3 or # */ + kHIDUsage_Keyboard4 = 0x21, /* 4 or $ */ + kHIDUsage_Keyboard5 = 0x22, /* 5 or % */ + kHIDUsage_Keyboard6 = 0x23, /* 6 or ^ */ + kHIDUsage_Keyboard7 = 0x24, /* 7 or & */ + kHIDUsage_Keyboard8 = 0x25, /* 8 or * */ + kHIDUsage_Keyboard9 = 0x26, /* 9 or ( */ + kHIDUsage_Keyboard0 = 0x27, /* 0 or ) */ + kHIDUsage_KeyboardReturnOrEnter = 0x28, /* Return (Enter) */ + kHIDUsage_KeyboardEscape = 0x29, /* Escape */ + kHIDUsage_KeyboardDeleteOrBackspace = 0x2A, /* Delete (Backspace) */ + kHIDUsage_KeyboardTab = 0x2B, /* Tab */ + kHIDUsage_KeyboardSpacebar = 0x2C, /* Spacebar */ + kHIDUsage_KeyboardHyphen = 0x2D, /* - or _ */ + kHIDUsage_KeyboardEqualSign = 0x2E, /* = or + */ + kHIDUsage_KeyboardOpenBracket = 0x2F, /* [ or { */ + kHIDUsage_KeyboardCloseBracket = 0x30, /* ] or } */ + kHIDUsage_KeyboardBackslash = 0x31, /* \ or | */ + kHIDUsage_KeyboardNonUSPound = 0x32, /* Non-US # or _ */ + kHIDUsage_KeyboardSemicolon = 0x33, /* ; or : */ + kHIDUsage_KeyboardQuote = 0x34, /* ' or " */ + kHIDUsage_KeyboardGraveAccentAndTilde = 0x35, /* Grave Accent and Tilde */ + kHIDUsage_KeyboardComma = 0x36, /* , or < */ + kHIDUsage_KeyboardPeriod = 0x37, /* . or > */ + kHIDUsage_KeyboardSlash = 0x38, /* / or ? */ + kHIDUsage_KeyboardCapsLock = 0x39, /* Caps Lock */ + kHIDUsage_KeyboardF1 = 0x3A, /* F1 */ + kHIDUsage_KeyboardF2 = 0x3B, /* F2 */ + kHIDUsage_KeyboardF3 = 0x3C, /* F3 */ + kHIDUsage_KeyboardF4 = 0x3D, /* F4 */ + kHIDUsage_KeyboardF5 = 0x3E, /* F5 */ + kHIDUsage_KeyboardF6 = 0x3F, /* F6 */ + kHIDUsage_KeyboardF7 = 0x40, /* F7 */ + kHIDUsage_KeyboardF8 = 0x41, /* F8 */ + kHIDUsage_KeyboardF9 = 0x42, /* F9 */ + kHIDUsage_KeyboardF10 = 0x43, /* F10 */ + kHIDUsage_KeyboardF11 = 0x44, /* F11 */ + kHIDUsage_KeyboardF12 = 0x45, /* F12 */ + kHIDUsage_KeyboardPrintScreen = 0x46, /* Print Screen */ + kHIDUsage_KeyboardScrollLock = 0x47, /* Scroll Lock */ + kHIDUsage_KeyboardPause = 0x48, /* Pause */ + kHIDUsage_KeyboardInsert = 0x49, /* Insert */ + kHIDUsage_KeyboardHome = 0x4A, /* Home */ + kHIDUsage_KeyboardPageUp = 0x4B, /* Page Up */ + kHIDUsage_KeyboardDeleteForward = 0x4C, /* Delete Forward */ + kHIDUsage_KeyboardEnd = 0x4D, /* End */ + kHIDUsage_KeyboardPageDown = 0x4E, /* Page Down */ + kHIDUsage_KeyboardRightArrow = 0x4F, /* Right Arrow */ + kHIDUsage_KeyboardLeftArrow = 0x50, /* Left Arrow */ + kHIDUsage_KeyboardDownArrow = 0x51, /* Down Arrow */ + kHIDUsage_KeyboardUpArrow = 0x52, /* Up Arrow */ + kHIDUsage_KeypadNumLock = 0x53, /* Keypad NumLock or Clear */ + kHIDUsage_KeypadSlash = 0x54, /* Keypad / */ + kHIDUsage_KeypadAsterisk = 0x55, /* Keypad * */ + kHIDUsage_KeypadHyphen = 0x56, /* Keypad - */ + kHIDUsage_KeypadPlus = 0x57, /* Keypad + */ + kHIDUsage_KeypadEnter = 0x58, /* Keypad Enter */ + kHIDUsage_Keypad1 = 0x59, /* Keypad 1 or End */ + kHIDUsage_Keypad2 = 0x5A, /* Keypad 2 or Down Arrow */ + kHIDUsage_Keypad3 = 0x5B, /* Keypad 3 or Page Down */ + kHIDUsage_Keypad4 = 0x5C, /* Keypad 4 or Left Arrow */ + kHIDUsage_Keypad5 = 0x5D, /* Keypad 5 */ + kHIDUsage_Keypad6 = 0x5E, /* Keypad 6 or Right Arrow */ + kHIDUsage_Keypad7 = 0x5F, /* Keypad 7 or Home */ + kHIDUsage_Keypad8 = 0x60, /* Keypad 8 or Up Arrow */ + kHIDUsage_Keypad9 = 0x61, /* Keypad 9 or Page Up */ + kHIDUsage_Keypad0 = 0x62, /* Keypad 0 or Insert */ + kHIDUsage_KeypadPeriod = 0x63, /* Keypad . or Delete */ + kHIDUsage_KeyboardNonUSBackslash = 0x64, /* Non-US \ or | */ + kHIDUsage_KeyboardApplication = 0x65, /* Application */ + kHIDUsage_KeyboardPower = 0x66, /* Power */ + kHIDUsage_KeypadEqualSign = 0x67, /* Keypad = */ + kHIDUsage_KeyboardF13 = 0x68, /* F13 */ + kHIDUsage_KeyboardF14 = 0x69, /* F14 */ + kHIDUsage_KeyboardF15 = 0x6A, /* F15 */ + kHIDUsage_KeyboardF16 = 0x6B, /* F16 */ + kHIDUsage_KeyboardF17 = 0x6C, /* F17 */ + kHIDUsage_KeyboardF18 = 0x6D, /* F18 */ + kHIDUsage_KeyboardF19 = 0x6E, /* F19 */ + kHIDUsage_KeyboardF20 = 0x6F, /* F20 */ + kHIDUsage_KeyboardF21 = 0x70, /* F21 */ + kHIDUsage_KeyboardF22 = 0x71, /* F22 */ + kHIDUsage_KeyboardF23 = 0x72, /* F23 */ + kHIDUsage_KeyboardF24 = 0x73, /* F24 */ + kHIDUsage_KeyboardExecute = 0x74, /* Execute */ + kHIDUsage_KeyboardHelp = 0x75, /* Help */ + kHIDUsage_KeyboardMenu = 0x76, /* Menu */ + kHIDUsage_KeyboardSelect = 0x77, /* Select */ + kHIDUsage_KeyboardStop = 0x78, /* Stop */ + kHIDUsage_KeyboardAgain = 0x79, /* Again */ + kHIDUsage_KeyboardUndo = 0x7A, /* Undo */ + kHIDUsage_KeyboardCut = 0x7B, /* Cut */ + kHIDUsage_KeyboardCopy = 0x7C, /* Copy */ + kHIDUsage_KeyboardPaste = 0x7D, /* Paste */ + kHIDUsage_KeyboardFind = 0x7E, /* Find */ + kHIDUsage_KeyboardMute = 0x7F, /* Mute */ + kHIDUsage_KeyboardVolumeUp = 0x80, /* Volume Up */ + kHIDUsage_KeyboardVolumeDown = 0x81, /* Volume Down */ + kHIDUsage_KeyboardLockingCapsLock = 0x82, /* Locking Caps Lock */ + kHIDUsage_KeyboardLockingNumLock = 0x83, /* Locking Num Lock */ + kHIDUsage_KeyboardLockingScrollLock = 0x84, /* Locking Scroll Lock */ + kHIDUsage_KeypadComma = 0x85, /* Keypad Comma */ + kHIDUsage_KeypadEqualSignAS400 = 0x86, /* Keypad Equal Sign for AS/400 */ + kHIDUsage_KeyboardInternational1 = 0x87, /* International1 */ + kHIDUsage_KeyboardInternational2 = 0x88, /* International2 */ + kHIDUsage_KeyboardInternational3 = 0x89, /* International3 */ + kHIDUsage_KeyboardInternational4 = 0x8A, /* International4 */ + kHIDUsage_KeyboardInternational5 = 0x8B, /* International5 */ + kHIDUsage_KeyboardInternational6 = 0x8C, /* International6 */ + kHIDUsage_KeyboardInternational7 = 0x8D, /* International7 */ + kHIDUsage_KeyboardInternational8 = 0x8E, /* International8 */ + kHIDUsage_KeyboardInternational9 = 0x8F, /* International9 */ + kHIDUsage_KeyboardLANG1 = 0x90, /* LANG1 */ + kHIDUsage_KeyboardLANG2 = 0x91, /* LANG2 */ + kHIDUsage_KeyboardLANG3 = 0x92, /* LANG3 */ + kHIDUsage_KeyboardLANG4 = 0x93, /* LANG4 */ + kHIDUsage_KeyboardLANG5 = 0x94, /* LANG5 */ + kHIDUsage_KeyboardLANG6 = 0x95, /* LANG6 */ + kHIDUsage_KeyboardLANG7 = 0x96, /* LANG7 */ + kHIDUsage_KeyboardLANG8 = 0x97, /* LANG8 */ + kHIDUsage_KeyboardLANG9 = 0x98, /* LANG9 */ + kHIDUsage_KeyboardAlternateErase = 0x99, /* AlternateErase */ + kHIDUsage_KeyboardSysReqOrAttention = 0x9A, /* SysReq/Attention */ + kHIDUsage_KeyboardCancel = 0x9B, /* Cancel */ + kHIDUsage_KeyboardClear = 0x9C, /* Clear */ + kHIDUsage_KeyboardPrior = 0x9D, /* Prior */ + kHIDUsage_KeyboardReturn = 0x9E, /* Return */ + kHIDUsage_KeyboardSeparator = 0x9F, /* Separator */ + kHIDUsage_KeyboardOut = 0xA0, /* Out */ + kHIDUsage_KeyboardOper = 0xA1, /* Oper */ + kHIDUsage_KeyboardClearOrAgain = 0xA2, /* Clear/Again */ + kHIDUsage_KeyboardCrSelOrProps = 0xA3, /* CrSel/Props */ + kHIDUsage_KeyboardExSel = 0xA4, /* ExSel */ + /* 0xA5-0xDF Reserved */ + kHIDUsage_KeyboardLeftControl = 0xE0, /* Left Control */ + kHIDUsage_KeyboardLeftShift = 0xE1, /* Left Shift */ + kHIDUsage_KeyboardLeftAlt = 0xE2, /* Left Alt */ + kHIDUsage_KeyboardLeftGUI = 0xE3, /* Left GUI */ + kHIDUsage_KeyboardRightControl = 0xE4, /* Right Control */ + kHIDUsage_KeyboardRightShift = 0xE5, /* Right Shift */ + kHIDUsage_KeyboardRightAlt = 0xE6, /* Right Alt */ + kHIDUsage_KeyboardRightGUI = 0xE7, /* Right GUI */ + /* 0xE8-0xFFFF Reserved */ + kHIDUsage_Keyboard_Reserved = 0xFFFF, +}; + +/* LEDs Page (0x08) */ +/* An LED or indicator is implemented as an On/Off Control (OOF) using the ÒSingle button toggleÓ mode, where a value of 1 will turn on the indicator, and a value of 0 will turn it off. The exceptions are described below. */ +enum +{ + kHIDUsage_LED_NumLock = 0x01, /* On/Off Control */ + kHIDUsage_LED_CapsLock = 0x02, /* On/Off Control */ + kHIDUsage_LED_ScrollLock = 0x03, /* On/Off Control */ + kHIDUsage_LED_Compose = 0x04, /* On/Off Control */ + kHIDUsage_LED_Kana = 0x05, /* On/Off Control */ + kHIDUsage_LED_Power = 0x06, /* On/Off Control */ + kHIDUsage_LED_Shift = 0x07, /* On/Off Control */ + kHIDUsage_LED_DoNotDisturb = 0x08, /* On/Off Control */ + kHIDUsage_LED_Mute = 0x09, /* On/Off Control */ + kHIDUsage_LED_ToneEnable = 0x0A, /* On/Off Control */ + kHIDUsage_LED_HighCutFilter = 0x0B, /* On/Off Control */ + kHIDUsage_LED_LowCutFilter = 0x0C, /* On/Off Control */ + kHIDUsage_LED_EqualizerEnable = 0x0D, /* On/Off Control */ + kHIDUsage_LED_SoundFieldOn = 0x0E, /* On/Off Control */ + kHIDUsage_LED_SurroundOn = 0x0F, /* On/Off Control */ + kHIDUsage_LED_Repeat = 0x10, /* On/Off Control */ + kHIDUsage_LED_Stereo = 0x11, /* On/Off Control */ + kHIDUsage_LED_SamplingRateDetect = 0x12, /* On/Off Control */ + kHIDUsage_LED_Spinning = 0x13, /* On/Off Control */ + kHIDUsage_LED_CAV = 0x14, /* On/Off Control */ + kHIDUsage_LED_CLV = 0x15, /* On/Off Control */ + kHIDUsage_LED_RecordingFormatDetect = 0x16, /* On/Off Control */ + kHIDUsage_LED_OffHook = 0x17, /* On/Off Control */ + kHIDUsage_LED_Ring = 0x18, /* On/Off Control */ + kHIDUsage_LED_MessageWaiting = 0x19, /* On/Off Control */ + kHIDUsage_LED_DataMode = 0x1A, /* On/Off Control */ + kHIDUsage_LED_BatteryOperation = 0x1B, /* On/Off Control */ + kHIDUsage_LED_BatteryOK = 0x1C, /* On/Off Control */ + kHIDUsage_LED_BatteryLow = 0x1D, /* On/Off Control */ + kHIDUsage_LED_Speaker = 0x1E, /* On/Off Control */ + kHIDUsage_LED_HeadSet = 0x1F, /* On/Off Control */ + kHIDUsage_LED_Hold = 0x20, /* On/Off Control */ + kHIDUsage_LED_Microphone = 0x21, /* On/Off Control */ + kHIDUsage_LED_Coverage = 0x22, /* On/Off Control */ + kHIDUsage_LED_NightMode = 0x23, /* On/Off Control */ + kHIDUsage_LED_SendCalls = 0x24, /* On/Off Control */ + kHIDUsage_LED_CallPickup = 0x25, /* On/Off Control */ + kHIDUsage_LED_Conference = 0x26, /* On/Off Control */ + kHIDUsage_LED_StandBy = 0x27, /* On/Off Control */ + kHIDUsage_LED_CameraOn = 0x28, /* On/Off Control */ + kHIDUsage_LED_CameraOff = 0x29, /* On/Off Control */ + kHIDUsage_LED_OnLine = 0x2A, /* On/Off Control */ + kHIDUsage_LED_OffLine = 0x2B, /* On/Off Control */ + kHIDUsage_LED_Busy = 0x2C, /* On/Off Control */ + kHIDUsage_LED_Ready = 0x2D, /* On/Off Control */ + kHIDUsage_LED_PaperOut = 0x2E, /* On/Off Control */ + kHIDUsage_LED_PaperJam = 0x2F, /* On/Off Control */ + kHIDUsage_LED_Remote = 0x30, /* On/Off Control */ + kHIDUsage_LED_Forward = 0x31, /* On/Off Control */ + kHIDUsage_LED_Reverse = 0x32, /* On/Off Control */ + kHIDUsage_LED_Stop = 0x33, /* On/Off Control */ + kHIDUsage_LED_Rewind = 0x34, /* On/Off Control */ + kHIDUsage_LED_FastForward = 0x35, /* On/Off Control */ + kHIDUsage_LED_Play = 0x36, /* On/Off Control */ + kHIDUsage_LED_Pause = 0x37, /* On/Off Control */ + kHIDUsage_LED_Record = 0x38, /* On/Off Control */ + kHIDUsage_LED_Error = 0x39, /* On/Off Control */ + kHIDUsage_LED_Usage = 0x3A, /* Selector */ + kHIDUsage_LED_UsageInUseIndicator = 0x3B, /* Usage Switch */ + kHIDUsage_LED_UsageMultiModeIndicator = 0x3C, /* Usage Modifier */ + kHIDUsage_LED_IndicatorOn = 0x3D, /* Selector */ + kHIDUsage_LED_IndicatorFlash = 0x3E, /* Selector */ + kHIDUsage_LED_IndicatorSlowBlink = 0x3F, /* Selector */ + kHIDUsage_LED_IndicatorFastBlink = 0x40, /* Selector */ + kHIDUsage_LED_IndicatorOff = 0x41, /* Selector */ + kHIDUsage_LED_FlashOnTime = 0x42, /* Dynamic Value */ + kHIDUsage_LED_SlowBlinkOnTime = 0x43, /* Dynamic Value */ + kHIDUsage_LED_SlowBlinkOffTime = 0x44, /* Dynamic Value */ + kHIDUsage_LED_FastBlinkOnTime = 0x45, /* Dynamic Value */ + kHIDUsage_LED_FastBlinkOffTime = 0x46, /* Dynamic Value */ + kHIDUsage_LED_UsageIndicatorColor = 0x47, /* Usage Modifier */ + kHIDUsage_LED_IndicatorRed = 0x48, /* Selector */ + kHIDUsage_LED_IndicatorGreen = 0x49, /* Selector */ + kHIDUsage_LED_IndicatorAmber = 0x4A, /* Selector */ + kHIDUsage_LED_GenericIndicator = 0x4B, /* On/Off Control */ + kHIDUsage_LED_SystemSuspend = 0x4C, /* On/Off Control */ + kHIDUsage_LED_ExternalPowerConnected = 0x4D, /* On/Off Control */ + /* 0x4E - 0xFFFF Reserved */ + kHIDUsage_LED_Reserved = 0xFFFF, +}; + +/* Button Page (0x09) */ +/* The Button page is the first place an application should look for user selection controls. System graphical user interfaces typically employ a pointer and a set of hierarchical selectors to select, move and otherwise manipulate their environment. For these purposes the following assignment of significance can be applied to the Button usages: */ +/* ¥ Button 1, Primary Button. Used for object selecting, dragging, and double click activation. On MacOS, this is the only button. Microsoft operating systems call this a logical left button, because it */ +/* is not necessarily physically located on the left of the pointing device. */ +/* ¥ Button 2, Secondary Button. Used by newer graphical user interfaces to browse object properties. Exposed by systems to applications that typically assign application-specific functionality. */ +/* ¥ Button 3, Tertiary Button. Optional control. Exposed to applications, but seldom assigned functionality due to prevalence of two- and one-button devices. */ +/* ¥ Buttons 4 -55. As the button number increases, its significance as a selector decreases. */ +/* In many ways the assignment of button numbers is similar to the assignment of Effort in Physical descriptors. Button 1 would be used to define the button a finger rests on when the hand is in the Òat restÓ position, that is, virtually no effort is required by the user to activate the button. Button values increment as the finger has to stretch to reach a control. See Section 6.2.3, ÒPhysical Descriptors,Ó in the HID Specification for methods of further qualifying buttons. */ +enum +{ + kHIDUsage_Button_1 = 0x01, /* (primary/trigger) */ + kHIDUsage_Button_2 = 0x02, /* (secondary) */ + kHIDUsage_Button_3 = 0x03, /* (tertiary) */ + kHIDUsage_Button_4 = 0x04, /* 4th button */ + /* ... */ + kHIDUsage_Button_65535 = 0xFFFF, +}; + +/* Ordinal Page (0x0A) */ +/* The Ordinal page allows multiple instances of a control or sets of controls to be declared without requiring individual enumeration in the native usage page. For example, it is not necessary to declare usages of Pointer 1, Pointer 2, and so forth on the Generic Desktop page. When parsed, the ordinal instance number is, in essence, concatenated to the usages attached to the encompassing collection to create Pointer 1, Pointer 2, and so forth. */ +/* For an example, see Section A.5, ÒMultiple Instances of a Control,Ó in Appendix A, ÒUsage Examples.Ó By convention, an Ordinal collection is placed inside the collection for which it is declaring multiple instances. */ +/* Instances do not have to be identical. */ +enum +{ + /* 0x00 Reserved */ + kHIDUsage_Ord_Instance1 = 0x01, /* Usage Modifier */ + kHIDUsage_Ord_Instance2 = 0x02, /* Usage Modifier */ + kHIDUsage_Ord_Instance3 = 0x03, /* Usage Modifier */ + kHIDUsage_Ord_Instance4 = 0x04, /* Usage Modifier */ + kHIDUsage_Ord_Instance65535 = 0xFFFF, /* Usage Modifier */ +}; + +/* Telephony Page (0x0B) */ +/* This usage page defines the keytop and control usages for telephony devices. */ +/* Indicators on a phone are handled by wrapping them in LED: Usage In Use Indicator and LED: Usage Selected Indicator usages. For example, a message-indicator LED would be identified by a Telephony: Message usage declared as a Feature or Output in a LED: Usage In Use Indicator collection. */ +/* See Section 14, ÒConsumer Page (0x0C),Ó for audio volume and tone controls. */ +enum +{ + kHIDUsage_Tfon_Phone = 0x01, /* Application Collection */ + kHIDUsage_Tfon_AnsweringMachine = 0x02, /* Application Collection */ + kHIDUsage_Tfon_MessageControls = 0x03, /* Logical Collection */ + kHIDUsage_Tfon_Handset = 0x04, /* Logical Collection */ + kHIDUsage_Tfon_Headset = 0x05, /* Logical Collection */ + kHIDUsage_Tfon_TelephonyKeyPad = 0x06, /* Named Array */ + kHIDUsage_Tfon_ProgrammableButton = 0x07, /* Named Array */ + /* 0x08 - 0x1F Reserved */ + kHIDUsage_Tfon_HookSwitch = 0x20, /* On/Off Control */ + kHIDUsage_Tfon_Flash = 0x21, /* Momentary Control */ + kHIDUsage_Tfon_Feature = 0x22, /* One-Shot Control */ + kHIDUsage_Tfon_Hold = 0x23, /* On/Off Control */ + kHIDUsage_Tfon_Redial = 0x24, /* One-Shot Control */ + kHIDUsage_Tfon_Transfer = 0x25, /* One-Shot Control */ + kHIDUsage_Tfon_Drop = 0x26, /* One-Shot Control */ + kHIDUsage_Tfon_Park = 0x27, /* On/Off Control */ + kHIDUsage_Tfon_ForwardCalls = 0x28, /* On/Off Control */ + kHIDUsage_Tfon_AlternateFunction = 0x29, /* Momentary Control */ + kHIDUsage_Tfon_Line = 0x2A, /* One-Shot Control */ + kHIDUsage_Tfon_SpeakerPhone = 0x2B, /* On/Off Control */ + kHIDUsage_Tfon_Conference = 0x2C, /* On/Off Control */ + kHIDUsage_Tfon_RingEnable = 0x2D, /* On/Off Control */ + kHIDUsage_Tfon_Ring = 0x2E, /* Selector */ + kHIDUsage_Tfon_PhoneMute = 0x2F, /* On/Off Control */ + kHIDUsage_Tfon_CallerID = 0x30, /* Momentary Control */ + /* 0x31 - 0x4F Reserved */ + kHIDUsage_Tfon_SpeedDial = 0x50, /* One-Shot Control */ + kHIDUsage_Tfon_StoreNumber = 0x51, /* One-Shot Control */ + kHIDUsage_Tfon_RecallNumber = 0x52, /* One-Shot Control */ + kHIDUsage_Tfon_PhoneDirectory = 0x53, /* On/Off Control */ + /* 0x54 - 0x6F Reserved */ + kHIDUsage_Tfon_VoiceMail = 0x70, /* On/Off Control */ + kHIDUsage_Tfon_ScreenCalls = 0x71, /* On/Off Control */ + kHIDUsage_Tfon_DoNotDisturb = 0x72, /* On/Off Control */ + kHIDUsage_Tfon_Message = 0x73, /* One-Shot Control */ + kHIDUsage_Tfon_AnswerOnOrOff = 0x74, /* On/Off Control */ + /* 0x75 - 0x8F Reserved */ + kHIDUsage_Tfon_InsideDialTone = 0x90, /* Momentary Control */ + kHIDUsage_Tfon_OutsideDialTone = 0x91, /* Momentary Control */ + kHIDUsage_Tfon_InsideRingTone = 0x92, /* Momentary Control */ + kHIDUsage_Tfon_OutsideRingTone = 0x93, /* Momentary Control */ + kHIDUsage_Tfon_PriorityRingTone = 0x94, /* Momentary Control */ + kHIDUsage_Tfon_InsideRingback = 0x95, /* Momentary Control */ + kHIDUsage_Tfon_PriorityRingback = 0x96, /* Momentary Control */ + kHIDUsage_Tfon_LineBusyTone = 0x97, /* Momentary Control */ + kHIDUsage_Tfon_ReorderTone = 0x98, /* Momentary Control */ + kHIDUsage_Tfon_CallWaitingTone = 0x99, /* Momentary Control */ + kHIDUsage_Tfon_ConfirmationTone1 = 0x9A, /* Momentary Control */ + kHIDUsage_Tfon_ConfirmationTone2 = 0x9B, /* Momentary Control */ + kHIDUsage_Tfon_TonesOff = 0x9C, /* On/Off Control */ + kHIDUsage_Tfon_OutsideRingback = 0x9D, /* Momentary Control */ + /* 0x9E - 0xAF Reserved */ + kHIDUsage_Tfon_PhoneKey0 = 0xB0, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey1 = 0xB1, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey2 = 0xB2, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey3 = 0xB3, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey4 = 0xB4, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey5 = 0xB5, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey6 = 0xB6, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey7 = 0xB7, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey8 = 0xB8, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKey9 = 0xB9, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKeyStar = 0xBA, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKeyPound = 0xBB, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKeyA = 0xBC, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKeyB = 0xBD, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKeyC = 0xBE, /* Selector/One-Shot Control */ + kHIDUsage_Tfon_PhoneKeyD = 0xBF, /* Selector/One-Shot Control */ + /* 0xC0 - 0xFFFF Reserved */ + kHIDUsage_TFon_Reserved = 0xFFFF, +}; + +/* Consumer Page (0x0C) */ +/* All controls on the Consumer page are application-specific. That is, they affect a specific device, not the system as a whole. */ +enum +{ + kHIDUsage_Csmr_ConsumerControl = 0x01, /* Application Collection */ + kHIDUsage_Csmr_NumericKeyPad = 0x02, /* Named Array */ + kHIDUsage_Csmr_ProgrammableButtons = 0x03, /* Named Array */ + /* 0x03 - 0x1F Reserved */ + kHIDUsage_Csmr_Plus10 = 0x20, /* One-Shot Control */ + kHIDUsage_Csmr_Plus100 = 0x21, /* One-Shot Control */ + kHIDUsage_Csmr_AMOrPM = 0x22, /* One-Shot Control */ + /* 0x23 - 0x3F Reserved */ + kHIDUsage_Csmr_Power = 0x30, /* On/Off Control */ + kHIDUsage_Csmr_Reset = 0x31, /* One-Shot Control */ + kHIDUsage_Csmr_Sleep = 0x32, /* One-Shot Control */ + kHIDUsage_Csmr_SleepAfter = 0x33, /* One-Shot Control */ + kHIDUsage_Csmr_SleepMode = 0x34, /* Re-Trigger Control */ + kHIDUsage_Csmr_Illumination = 0x35, /* On/Off Control */ + kHIDUsage_Csmr_FunctionButtons = 0x36, /* Named Array */ + /* 0x37 - 0x3F Reserved */ + kHIDUsage_Csmr_Menu = 0x40, /* On/Off Control */ + kHIDUsage_Csmr_MenuPick = 0x41, /* One-Shot Control */ + kHIDUsage_Csmr_MenuUp = 0x42, /* One-Shot Control */ + kHIDUsage_Csmr_MenuDown = 0x43, /* One-Shot Control */ + kHIDUsage_Csmr_MenuLeft = 0x44, /* One-Shot Control */ + kHIDUsage_Csmr_MenuRight = 0x45, /* One-Shot Control */ + kHIDUsage_Csmr_MenuEscape = 0x46, /* One-Shot Control */ + kHIDUsage_Csmr_MenuValueIncrease = 0x47, /* One-Shot Control */ + kHIDUsage_Csmr_MenuValueDecrease = 0x48, /* One-Shot Control */ + /* 0x49 - 0x5F Reserved */ + kHIDUsage_Csmr_DataOnScreen = 0x60, /* On/Off Control */ + kHIDUsage_Csmr_ClosedCaption = 0x61, /* On/Off Control */ + kHIDUsage_Csmr_ClosedCaptionSelect = 0x62, /* Selector */ + kHIDUsage_Csmr_VCROrTV = 0x63, /* On/Off Control */ + kHIDUsage_Csmr_BroadcastMode = 0x64, /* One-Shot Control */ + kHIDUsage_Csmr_Snapshot = 0x65, /* One-Shot Control */ + kHIDUsage_Csmr_Still = 0x66, /* One-Shot Control */ + /* 0x67 - 0x7F Reserved */ + kHIDUsage_Csmr_Selection = 0x80, /* Named Array */ + kHIDUsage_Csmr_Assign = 0x81, /* Selector */ + kHIDUsage_Csmr_ModeStep = 0x82, /* One-Shot Control */ + kHIDUsage_Csmr_RecallLast = 0x83, /* One-Shot Control */ + kHIDUsage_Csmr_EnterChannel = 0x84, /* One-Shot Control */ + kHIDUsage_Csmr_OrderMovie = 0x85, /* One-Shot Control */ + kHIDUsage_Csmr_Channel = 0x86, /* Linear Control */ + kHIDUsage_Csmr_MediaSelection = 0x87, /* Selector */ + kHIDUsage_Csmr_MediaSelectComputer = 0x88, /* Selector */ + kHIDUsage_Csmr_MediaSelectTV = 0x89, /* Selector */ + kHIDUsage_Csmr_MediaSelectWWW = 0x8A, /* Selector */ + kHIDUsage_Csmr_MediaSelectDVD = 0x8B, /* Selector */ + kHIDUsage_Csmr_MediaSelectTelephone = 0x8C, /* Selector */ + kHIDUsage_Csmr_MediaSelectProgramGuide = 0x8D, /* Selector */ + kHIDUsage_Csmr_MediaSelectVideoPhone = 0x8E, /* Selector */ + kHIDUsage_Csmr_MediaSelectGames = 0x8F, /* Selector */ + kHIDUsage_Csmr_MediaSelectMessages = 0x90, /* Selector */ + kHIDUsage_Csmr_MediaSelectCD = 0x91, /* Selector */ + kHIDUsage_Csmr_MediaSelectVCR = 0x92, /* Selector */ + kHIDUsage_Csmr_MediaSelectTuner = 0x93, /* Selector */ + kHIDUsage_Csmr_Quit = 0x94, /* One-Shot Control */ + kHIDUsage_Csmr_Help = 0x95, /* On/Off Control */ + kHIDUsage_Csmr_MediaSelectTape = 0x96, /* Selector */ + kHIDUsage_Csmr_MediaSelectCable = 0x97, /* Selector */ + kHIDUsage_Csmr_MediaSelectSatellite = 0x98, /* Selector */ + kHIDUsage_Csmr_MediaSelectSecurity = 0x99, /* Selector */ + kHIDUsage_Csmr_MediaSelectHome = 0x9A, /* Selector */ + kHIDUsage_Csmr_MediaSelectCall = 0x9B, /* Selector */ + kHIDUsage_Csmr_ChannelIncrement = 0x9C, /* One-Shot Control */ + kHIDUsage_Csmr_ChannelDecrement = 0x9D, /* One-Shot Control */ + kHIDUsage_Csmr_Media = 0x9E, /* Selector */ + /* 0x9F Reserved */ + kHIDUsage_Csmr_VCRPlus = 0xA0, /* One-Shot Control */ + kHIDUsage_Csmr_Once = 0xA1, /* One-Shot Control */ + kHIDUsage_Csmr_Daily = 0xA2, /* One-Shot Control */ + kHIDUsage_Csmr_Weekly = 0xA3, /* One-Shot Control */ + kHIDUsage_Csmr_Monthly = 0xA4, /* One-Shot Control */ + /* 0xA5 - 0xAF Reserved */ + kHIDUsage_Csmr_Play = 0xB0, /* On/Off Control */ + kHIDUsage_Csmr_Pause = 0xB1, /* On/Off Control */ + kHIDUsage_Csmr_Record = 0xB2, /* On/Off Control */ + kHIDUsage_Csmr_FastForward = 0xB3, /* On/Off Control */ + kHIDUsage_Csmr_Rewind = 0xB4, /* On/Off Control */ + kHIDUsage_Csmr_ScanNextTrack = 0xB5, /* One-Shot Control */ + kHIDUsage_Csmr_ScanPreviousTrack = 0xB6, /* One-Shot Control */ + kHIDUsage_Csmr_Stop = 0xB7, /* One-Shot Control */ + kHIDUsage_Csmr_Eject = 0xB8, /* One-Shot Control */ + kHIDUsage_Csmr_RandomPlay = 0xB9, /* On/Off Control */ + kHIDUsage_Csmr_SelectDisc = 0xBA, /* Named Array */ + kHIDUsage_Csmr_EnterDisc = 0xBB, /* Momentary Control */ + kHIDUsage_Csmr_Repeat = 0xBC, /* One-Shot Control */ + kHIDUsage_Csmr_Tracking = 0xBD, /* Linear Control */ + kHIDUsage_Csmr_TrackNormal = 0xBE, /* One-Shot Control */ + kHIDUsage_Csmr_SlowTracking = 0xBF, /* Linear Control */ + kHIDUsage_Csmr_FrameForward = 0xC0, /* Re-Trigger Control */ + kHIDUsage_Csmr_FrameBack = 0xC1, /* Re-Trigger Control */ + kHIDUsage_Csmr_Mark = 0xC2, /* One-Shot Control */ + kHIDUsage_Csmr_ClearMark = 0xC3, /* One-Shot Control */ + kHIDUsage_Csmr_RepeatFromMark = 0xC4, /* On/Off Control */ + kHIDUsage_Csmr_ReturnToMark = 0xC5, /* One-Shot Control */ + kHIDUsage_Csmr_SearchMarkForward = 0xC6, /* One-Shot Control */ + kHIDUsage_Csmr_SearchMarkBackwards = 0xC7, /* One-Shot Control */ + kHIDUsage_Csmr_CounterReset = 0xC8, /* One-Shot Control */ + kHIDUsage_Csmr_ShowCounter = 0xC9, /* One-Shot Control */ + kHIDUsage_Csmr_TrackingIncrement = 0xCA, /* Re-Trigger Control */ + kHIDUsage_Csmr_TrackingDecrement = 0xCB, /* Re-Trigger Control */ + kHIDUsage_Csmr_StopOrEject = 0xCC, /* One-Shot Control */ + kHIDUsage_Csmr_PlayOrPause = 0xCD, /* One-Shot Control */ + kHIDUsage_Csmr_PlayOrSkip = 0xCE, /* One-Shot Control */ + /* 0xCF - 0xDF Reserved */ + kHIDUsage_Csmr_Volume = 0xE0, /* Linear Control */ + kHIDUsage_Csmr_Balance = 0xE1, /* Linear Control */ + kHIDUsage_Csmr_Mute = 0xE2, /* On/Off Control */ + kHIDUsage_Csmr_Bass = 0xE3, /* Linear Control */ + kHIDUsage_Csmr_Treble = 0xE4, /* Linear Control */ + kHIDUsage_Csmr_BassBoost = 0xE5, /* On/Off Control */ + kHIDUsage_Csmr_SurroundMode = 0xE6, /* One-Shot Control */ + kHIDUsage_Csmr_Loudness = 0xE7, /* On/Off Control */ + kHIDUsage_Csmr_MPX = 0xE8, /* On/Off Control */ + kHIDUsage_Csmr_VolumeIncrement = 0xE9, /* Re-Trigger Control */ + kHIDUsage_Csmr_VolumeDecrement = 0xEA, /* Re-Trigger Control */ + /* 0xEB - 0xEF Reserved */ + kHIDUsage_Csmr_Speed = 0xF0, /* Selector */ + kHIDUsage_Csmr_PlaybackSpeed = 0xF1, /* Named Array */ + kHIDUsage_Csmr_StandardPlay = 0xF2, /* Selector */ + kHIDUsage_Csmr_LongPlay = 0xF3, /* Selector */ + kHIDUsage_Csmr_ExtendedPlay = 0xF4, /* Selector */ + kHIDUsage_Csmr_Slow = 0xF5, /* One-Shot Control */ + /* 0xF6 - 0xFF Reserved */ + kHIDUsage_Csmr_FanEnable = 0x100, /* On/Off Control */ + kHIDUsage_Csmr_FanSpeed = 0x101, /* Linear Control */ + kHIDUsage_Csmr_LightEnable = 0x102, /* On/Off Control */ + kHIDUsage_Csmr_LightIlluminationLevel = 0x103, /* Linear Control */ + kHIDUsage_Csmr_ClimateControlEnable = 0x104, /* On/Off Control */ + kHIDUsage_Csmr_RoomTemperature = 0x105, /* Linear Control */ + kHIDUsage_Csmr_SecurityEnable = 0x106, /* On/Off Control */ + kHIDUsage_Csmr_FireAlarm = 0x107, /* One-Shot Control */ + kHIDUsage_Csmr_PoliceAlarm = 0x108, /* One-Shot Control */ + /* 0x109 - 0x14F Reserved */ + kHIDUsage_Csmr_BalanceRight = 0x150, /* Re-Trigger Control */ + kHIDUsage_Csmr_BalanceLeft = 0x151, /* Re-Trigger Control */ + kHIDUsage_Csmr_BassIncrement = 0x152, /* Re-Trigger Control */ + kHIDUsage_Csmr_BassDecrement = 0x153, /* Re-Trigger Control */ + kHIDUsage_Csmr_TrebleIncrement = 0x154, /* Re-Trigger Control */ + kHIDUsage_Csmr_TrebleDecrement = 0x155, /* Re-Trigger Control */ + /* 0x156 - 0x15F Reserved */ + kHIDUsage_Csmr_SpeakerSystem = 0x160, /* Logical Collection */ + kHIDUsage_Csmr_ChannelLeft = 0x161, /* Logical Collection */ + kHIDUsage_Csmr_ChannelRight = 0x162, /* Logical Collection */ + kHIDUsage_Csmr_ChannelCenter = 0x163, /* Logical Collection */ + kHIDUsage_Csmr_ChannelFront = 0x164, /* Logical Collection */ + kHIDUsage_Csmr_ChannelCenterFront = 0x165, /* Logical Collection */ + kHIDUsage_Csmr_ChannelSide = 0x166, /* Logical Collection */ + kHIDUsage_Csmr_ChannelSurround = 0x167, /* Logical Collection */ + kHIDUsage_Csmr_ChannelLowFrequencyEnhancement = 0x168, /* Logical Collection */ + kHIDUsage_Csmr_ChannelTop = 0x169, /* Logical Collection */ + kHIDUsage_Csmr_ChannelUnknown = 0x16A, /* Logical Collection */ + /* 0x16B - 0x16F Reserved */ + kHIDUsage_Csmr_SubChannel = 0x170, /* Linear Control */ + kHIDUsage_Csmr_SubChannelIncrement = 0x171, /* One-Shot Control */ + kHIDUsage_Csmr_SubChannelDecrement = 0x172, /* One-Shot Control */ + kHIDUsage_Csmr_AlternateAudioIncrement = 0x173, /* One-Shot Control */ + kHIDUsage_Csmr_AlternateAudioDecrement = 0x174, /* One-Shot Control */ + /* 0x175 - 0x17F Reserved */ + kHIDUsage_Csmr_ApplicationLaunchButtons = 0x180, /* Named Array */ + kHIDUsage_Csmr_ALLaunchButtonConfigurationTool = 0x181, /* Selector */ + kHIDUsage_Csmr_ALProgrammableButtonConfiguration = 0x182, /* Selector */ + kHIDUsage_Csmr_ALConsumerControlConfiguration = 0x183, /* Selector */ + kHIDUsage_Csmr_ALWordProcessor = 0x184, /* Selector */ + kHIDUsage_Csmr_ALTextEditor = 0x185, /* Selector */ + kHIDUsage_Csmr_ALSpreadsheet = 0x186, /* Selector */ + kHIDUsage_Csmr_ALGraphicsEditor = 0x187, /* Selector */ + kHIDUsage_Csmr_ALPresentationApp = 0x188, /* Selector */ + kHIDUsage_Csmr_ALDatabaseApp = 0x189, /* Selector */ + kHIDUsage_Csmr_ALEmailReader = 0x18A, /* Selector */ + kHIDUsage_Csmr_ALNewsreader = 0x18B, /* Selector */ + kHIDUsage_Csmr_ALVoicemail = 0x18C, /* Selector */ + kHIDUsage_Csmr_ALContactsOrAddressBook = 0x18D, /* Selector */ + kHIDUsage_Csmr_ALCalendarOrSchedule = 0x18E, /* Selector */ + kHIDUsage_Csmr_ALTaskOrProjectManager = 0x18F, /* Selector */ + kHIDUsage_Csmr_ALLogOrJournalOrTimecard = 0x190, /* Selector */ + kHIDUsage_Csmr_ALCheckbookOrFinance = 0x191, /* Selector */ + kHIDUsage_Csmr_ALCalculator = 0x192, /* Selector */ + kHIDUsage_Csmr_ALAOrVCaptureOrPlayback = 0x193, /* Selector */ + kHIDUsage_Csmr_ALLocalMachineBrowser = 0x194, /* Selector */ + kHIDUsage_Csmr_ALLANOrWANBrowser = 0x195, /* Selector */ + kHIDUsage_Csmr_ALInternetBrowser = 0x196, /* Selector */ + kHIDUsage_Csmr_ALRemoteNetworkingOrISPConnect = 0x197, /* Selector */ + kHIDUsage_Csmr_ALNetworkConference = 0x198, /* Selector */ + kHIDUsage_Csmr_ALNetworkChat = 0x199, /* Selector */ + kHIDUsage_Csmr_ALTelephonyOrDialer = 0x19A, /* Selector */ + kHIDUsage_Csmr_ALLogon = 0x19B, /* Selector */ + kHIDUsage_Csmr_ALLogoff = 0x19C, /* Selector */ + kHIDUsage_Csmr_ALLogonOrLogoff = 0x19D, /* Selector */ + kHIDUsage_Csmr_ALTerminalLockOrScreensaver = 0x19E, /* Selector */ + kHIDUsage_Csmr_ALControlPanel = 0x19F, /* Selector */ + kHIDUsage_Csmr_ALCommandLineProcessorOrRun = 0x1A0, /* Selector */ + kHIDUsage_Csmr_ALProcessOrTaskManager = 0x1A1, /* Selector */ + kHIDUsage_Csmr_AL = 0x1A2, /* Selector */ + kHIDUsage_Csmr_ALNextTaskOrApplication = 0x143, /* Selector */ + kHIDUsage_Csmr_ALPreviousTaskOrApplication = 0x1A4, /* Selector */ + kHIDUsage_Csmr_ALPreemptiveHaltTaskOrApplication = 0x1A5, /* Selector */ + /* 0x1A6 - 0x1FF Reserved */ + kHIDUsage_Csmr_GenericGUIApplicationControls = 0x200, /* Named Array */ + kHIDUsage_Csmr_ACNew = 0x201, /* Selector */ + kHIDUsage_Csmr_ACOpen = 0x202, /* Selector */ + kHIDUsage_Csmr_ACClose = 0x203, /* Selector */ + kHIDUsage_Csmr_ACExit = 0x204, /* Selector */ + kHIDUsage_Csmr_ACMaximize = 0x205, /* Selector */ + kHIDUsage_Csmr_ACMinimize = 0x206, /* Selector */ + kHIDUsage_Csmr_ACSave = 0x207, /* Selector */ + kHIDUsage_Csmr_ACPrint = 0x208, /* Selector */ + kHIDUsage_Csmr_ACProperties = 0x209, /* Selector */ + kHIDUsage_Csmr_ACUndo = 0x21A, /* Selector */ + kHIDUsage_Csmr_ACCopy = 0x21B, /* Selector */ + kHIDUsage_Csmr_ACCut = 0x21C, /* Selector */ + kHIDUsage_Csmr_ACPaste = 0x21D, /* Selector */ + kHIDUsage_Csmr_AC = 0x21E, /* Selector */ + kHIDUsage_Csmr_ACFind = 0x21F, /* Selector */ + kHIDUsage_Csmr_ACFindandReplace = 0x220, /* Selector */ + kHIDUsage_Csmr_ACSearch = 0x221, /* Selector */ + kHIDUsage_Csmr_ACGoTo = 0x222, /* Selector */ + kHIDUsage_Csmr_ACHome = 0x223, /* Selector */ + kHIDUsage_Csmr_ACBack = 0x224, /* Selector */ + kHIDUsage_Csmr_ACForward = 0x225, /* Selector */ + kHIDUsage_Csmr_ACStop = 0x226, /* Selector */ + kHIDUsage_Csmr_ACRefresh = 0x227, /* Selector */ + kHIDUsage_Csmr_ACPreviousLink = 0x228, /* Selector */ + kHIDUsage_Csmr_ACNextLink = 0x229, /* Selector */ + kHIDUsage_Csmr_ACBookmarks = 0x22A, /* Selector */ + kHIDUsage_Csmr_ACHistory = 0x22B, /* Selector */ + kHIDUsage_Csmr_ACSubscriptions = 0x22C, /* Selector */ + kHIDUsage_Csmr_ACZoomIn = 0x22D, /* Selector */ + kHIDUsage_Csmr_ACZoomOut = 0x22E, /* Selector */ + kHIDUsage_Csmr_ACZoom = 0x22F, /* Selector */ + kHIDUsage_Csmr_ACFullScreenView = 0x230, /* Selector */ + kHIDUsage_Csmr_ACNormalView = 0x231, /* Selector */ + kHIDUsage_Csmr_ACViewToggle = 0x232, /* Selector */ + kHIDUsage_Csmr_ACScrollUp = 0x233, /* Selector */ + kHIDUsage_Csmr_ACScrollDown = 0x234, /* Selector */ + kHIDUsage_Csmr_ACScroll = 0x235, /* Selector */ + kHIDUsage_Csmr_ACPanLeft = 0x236, /* Selector */ + kHIDUsage_Csmr_ACPanRight = 0x237, /* Selector */ + kHIDUsage_Csmr_ACPan = 0x238, /* Selector */ + kHIDUsage_Csmr_ACNewWindow = 0x239, /* Selector */ + kHIDUsage_Csmr_ACTileHorizontally = 0x23A, /* Selector */ + kHIDUsage_Csmr_ACTileVertically = 0x23B, /* Selector */ + kHIDUsage_Csmr_ACFormat = 0x23C, /* Selector */ + /* 0x23D - 0xFFFF Reserved */ + kHIDUsage_Csmr_Reserved = 0xFFFF, +}; + +/* Digitizer Page (0x0D) */ +/* This section provides detailed descriptions of the usages employed by Digitizer Devices. */ +enum +{ + kHIDUsage_Dig_Digitizer = 0x01, /* Application Collection */ + kHIDUsage_Dig_Pen = 0x02, /* Application Collection */ + kHIDUsage_Dig_LightPen = 0x03, /* Application Collection */ + kHIDUsage_Dig_TouchScreen = 0x04, /* Application Collection */ + kHIDUsage_Dig_TouchPad = 0x05, /* Application Collection */ + kHIDUsage_Dig_WhiteBoard = 0x06, /* Application Collection */ + kHIDUsage_Dig_CoordinateMeasuringMachine = 0x07, /* Application Collection */ + kHIDUsage_Dig_3DDigitizer = 0x08, /* Application Collection */ + kHIDUsage_Dig_StereoPlotter = 0x09, /* Application Collection */ + kHIDUsage_Dig_ArticulatedArm = 0x0A, /* Application Collection */ + kHIDUsage_Dig_Armature = 0x0B, /* Application Collection */ + kHIDUsage_Dig_MultiplePointDigitizer = 0x0C, /* Application Collection */ + kHIDUsage_Dig_FreeSpaceWand = 0x0D, /* Application Collection */ + /* 0x0E - 0x1F Reserved */ + kHIDUsage_Dig_Stylus = 0x20, /* Logical Collection */ + kHIDUsage_Dig_Puck = 0x21, /* Logical Collection */ + kHIDUsage_Dig_Finger = 0x22, /* Logical Collection */ + /* 0x23 - 0x2F Reserved */ + kHIDUsage_Dig_TipPressure = 0x30, /* Dynamic Value */ + kHIDUsage_Dig_BarrelPressure = 0x31, /* Dynamic Value */ + kHIDUsage_Dig_InRange = 0x32, /* Momentary Control */ + kHIDUsage_Dig_Touch = 0x33, /* Momentary Control */ + kHIDUsage_Dig_Untouch = 0x34, /* One-Shot Control */ + kHIDUsage_Dig_Tap = 0x35, /* One-Shot Control */ + kHIDUsage_Dig_Quality = 0x36, /* Dynamic Value */ + kHIDUsage_Dig_DataValid = 0x37, /* Momentary Control */ + kHIDUsage_Dig_TransducerIndex = 0x38, /* Dynamic Value */ + kHIDUsage_Dig_TabletFunctionKeys = 0x39, /* Logical Collection */ + kHIDUsage_Dig_ProgramChangeKeys = 0x3A, /* Logical Collection */ + kHIDUsage_Dig_BatteryStrength = 0x3B, /* Dynamic Value */ + kHIDUsage_Dig_Invert = 0x3C, /* Momentary Control */ + kHIDUsage_Dig_XTilt = 0x3D, /* Dynamic Value */ + kHIDUsage_Dig_YTilt = 0x3E, /* Dynamic Value */ + kHIDUsage_Dig_Azimuth = 0x3F, /* Dynamic Value */ + kHIDUsage_Dig_Altitude = 0x40, /* Dynamic Value */ + kHIDUsage_Dig_Twist = 0x41, /* Dynamic Value */ + kHIDUsage_Dig_TipSwitch = 0x42, /* Momentary Control */ + kHIDUsage_Dig_SecondaryTipSwitch = 0x43, /* Momentary Control */ + kHIDUsage_Dig_BarrelSwitch = 0x44, /* Momentary Control */ + kHIDUsage_Dig_Eraser = 0x45, /* Momentary Control */ + kHIDUsage_Dig_TabletPick = 0x46, /* Momentary Control */ + /* 0x47 - 0xFFFF Reserved */ + kHIDUsage_Dig_Reserved = 0xFFFF, +}; + +/* AlphanumericDisplay Page (0x14) */ +/* The Alphanumeric Display page is intended for use by simple alphanumeric displays that are used on consumer devices. */ +enum +{ + kHIDUsage_AD_AlphanumericDisplay = 0x01, /* Application Collection */ + /* 0x02 - 0x1F Reserved */ + kHIDUsage_AD_DisplayAttributesReport = 0x20, /* Logical Collection */ + kHIDUsage_AD_ASCIICharacterSet = 0x21, /* Static Flag */ + kHIDUsage_AD_DataReadBack = 0x22, /* Static Flag */ + kHIDUsage_AD_FontReadBack = 0x23, /* Static Flag */ + kHIDUsage_AD_DisplayControlReport = 0x24, /* Logical Collection */ + kHIDUsage_AD_ClearDisplay = 0x25, /* Dynamic Flag */ + kHIDUsage_AD_DisplayEnable = 0x26, /* Dynamic Flag */ + kHIDUsage_AD_ScreenSaverDelay = 0x27, /* Static Value */ + kHIDUsage_AD_ScreenSaverEnable = 0x28, /* Dynamic Flag */ + kHIDUsage_AD_VerticalScroll = 0x29, /* Static Flag */ + kHIDUsage_AD_HorizontalScroll = 0x2A, /* Static Flag */ + kHIDUsage_AD_CharacterReport = 0x2B, /* Logical Collection */ + kHIDUsage_AD_DisplayData = 0x2C, /* Dynamic Value */ + kHIDUsage_AD_DisplayStatus = 0x2D, /* Logical Collection */ + kHIDUsage_AD_StatNotReady = 0x2E, /* Selector */ + kHIDUsage_AD_StatReady = 0x2F, /* Selector */ + kHIDUsage_AD_ErrNotaloadablecharacter = 0x30, /* Selector */ + kHIDUsage_AD_ErrFontdatacannotberead = 0x31, /* Selector */ + kHIDUsage_AD_CursorPositionReport = 0x32, /* Logical Collection */ + kHIDUsage_AD_Row = 0x33, /* Dynamic Value */ + kHIDUsage_AD_Column = 0x34, /* Dynamic Value */ + kHIDUsage_AD_Rows = 0x35, /* Static Value */ + kHIDUsage_AD_Columns = 0x36, /* Static Value */ + kHIDUsage_AD_CursorPixelPositioning = 0x37, /* Static Flag */ + kHIDUsage_AD_CursorMode = 0x38, /* Dynamic Flag */ + kHIDUsage_AD_CursorEnable = 0x39, /* Dynamic Flag */ + kHIDUsage_AD_CursorBlink = 0x3A, /* Dynamic Flag */ + kHIDUsage_AD_FontReport = 0x3B, /* Logical Collection */ + kHIDUsage_AD_FontData = 0x3C, /* Buffered Byte */ + kHIDUsage_AD_CharacterWidth = 0x3D, /* Static Value */ + kHIDUsage_AD_CharacterHeight = 0x3E, /* Static Value */ + kHIDUsage_AD_CharacterSpacingHorizontal = 0x3F, /* Static Value */ + kHIDUsage_AD_CharacterSpacingVertical = 0x40, /* Static Value */ + kHIDUsage_AD_UnicodeCharacterSet = 0x41, /* Static Flag */ + /* 0x42 - 0xFFFF Reserved */ + kHIDUsage_AD_Reserved = 0xFFFF, +}; + +#endif /* _IOHIDUSAGETABLES_H */ diff --git a/iokit/IOKit/hidsystem/IOHIDevice.h b/iokit/IOKit/hidsystem/IOHIDevice.h new file mode 100644 index 000000000..3214a0cae --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIDevice.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * IOHIDevice.h - Common Event Source object class. + * + * HISTORY + * 22 May 1992 Mike Paquette at NeXT + * Created. + * 4 Aug 1993 Erik Kay at NeXT + * API cleanup + * 5 Aug 1993 Erik Kay at NeXT + * added ivar space for future expansion + */ + +#ifndef _IOHIDEVICE_H +#define _IOHIDEVICE_H + +#include +#include + +typedef enum { + kHIUnknownDevice = 0, + kHIKeyboardDevice = 1, + kHIRelativePointingDevice = 2 +} IOHIDKind; + +class IOHIDevice : public IOService +{ + OSDeclareDefaultStructors(IOHIDevice); + +public: + virtual bool init(OSDictionary * properties = 0); + virtual void free(); + + virtual UInt32 deviceType(); + virtual IOHIDKind hidKind(); + virtual UInt32 interfaceID(); + virtual bool updateProperties(void); + virtual IOReturn setParamProperties(OSDictionary * dict); + virtual UInt64 getGUID(); +}; + +#endif /* !_IOHIDEVICE_H */ diff --git a/iokit/IOKit/hidsystem/IOHIKeyboard.h b/iokit/IOKit/hidsystem/IOHIKeyboard.h new file mode 100644 index 000000000..65972a0c5 --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIKeyboard.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * EventSrcPCKeyboard.h - PC Keyboard EventSrc subclass definition + * + * HISTORY + * 28 Aug 1992 Joe Pasqua + * Created. + */ + +#ifndef _IOHIKEYBOARD_H +#define _IOHIKEYBOARD_H + +#include +#include + +/* Start Action Definitions */ + +/* + * HISTORICAL NOTE: + * The following entry points were part of the IOHIKeyboardEvents + * protocol. + */ + +typedef void (*KeyboardEventAction)( OSObject * target, + /* eventFlags */ unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet, + /* keyboardType */ unsigned keyboardType, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts); + +typedef void (*KeyboardSpecialEventAction)(OSObject * target, + /* eventType */ unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* specialty */ unsigned flavor, + /* source id */ UInt64 guid, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts); + +typedef void (*UpdateEventFlagsAction)( OSObject * target, + /* flags */ unsigned flags); + +/* End Action Definitions */ + + + +/* Default key repeat parameters */ +#define EV_DEFAULTINITIALREPEAT 500000000ULL // 1/2 sec in nanoseconds +#define EV_DEFAULTKEYREPEAT 83333333ULL // 1/12 sec in nanoseconds +#define EV_MINKEYREPEAT 16700000ULL // 1/60 sec + +class IOHIKeyboard : public IOHIDevice +{ + OSDeclareDefaultStructors(IOHIKeyboard); + +protected: + IOLock * _deviceLock; // Lock for all device access + IOHIKeyboardMapper * _keyMap; // KeyMap instance + + // The following fields describe the kind of keyboard + UInt32 _interfaceType; + UInt32 _deviceType; + + // The following fields describe the state of the keyboard + UInt32 * _keyState; // kbdBitVector + IOByteCount _keyStateSize; // kbdBitVector allocated size + unsigned _eventFlags; // Current eventFlags + bool _alphaLock; // true means alpha lock is on + bool _numLock; // true means num lock is on + bool _charKeyActive; // true means char gen. key active + + // The following fields are used in performing key repeats + bool _isRepeat; // true means we're generating repeat + unsigned _codeToRepeat; // What we are repeating + bool _calloutPending; // true means we've sched. a callout + AbsoluteTime _lastEventTime; // Time last event was dispatched + AbsoluteTime _downRepeatTime; // Time when we should next repeat + AbsoluteTime _keyRepeat; // Delay between key repeats + AbsoluteTime _initialKeyRepeat; // Delay before initial key repeat + UInt64 _guid; + + OSObject * _keyboardEventTarget; + KeyboardEventAction _keyboardEventAction; + OSObject * _keyboardSpecialEventTarget; + KeyboardSpecialEventAction _keyboardSpecialEventAction; + OSObject * _updateEventFlagsTarget; + UpdateEventFlagsAction _updateEventFlagsAction; + +protected: + virtual void dispatchKeyboardEvent(unsigned int keyCode, + /* direction */ bool goingDown, + /* timeStamp */ AbsoluteTime time); + +public: + virtual bool init(OSDictionary * properties = 0); + virtual bool start(IOService * provider); + virtual void free(); + + virtual bool open(IOService * client, + IOOptionBits options, + KeyboardEventAction keAction, + KeyboardSpecialEventAction kseAction, + UpdateEventFlagsAction uefAction); + virtual void close(IOService * client, IOOptionBits ); + + virtual IOHIDKind hidKind(); + virtual bool updateProperties( void ); + virtual IOReturn setParamProperties(OSDictionary * dict); + +protected: // for subclasses to implement + virtual const unsigned char * defaultKeymapOfLength(UInt32 * length); + virtual void setAlphaLockFeedback(bool val); + virtual void setNumLockFeedback(bool val); + virtual UInt32 maxKeyCodes(); + + +private: + virtual bool resetKeyboard(); + virtual void scheduleAutoRepeat(); + static void _autoRepeat(void * arg, void *); + virtual void autoRepeat(); + virtual void setRepeat(unsigned eventType, unsigned keyCode); + +/* + * HISTORICAL NOTE: + * The following methods were part of the KeyMapDelegate protocol; + * the declarations have now been merged directly into this class. + */ + +public: + virtual void keyboardEvent(unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned keyCode, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet); + + virtual void keyboardSpecialEvent(unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned keyCode, + /* specialty */ unsigned flavor); + + virtual void updateEventFlags(unsigned flags); // Does not generate events + + virtual unsigned eventFlags(); // Global event flags + virtual unsigned deviceFlags(); // per-device event flags + virtual void setDeviceFlags(unsigned flags); // Set device event flags + virtual bool alphaLock(); // current alpha-lock state + virtual void setAlphaLock(bool val); // Set current alpha-lock state + virtual bool numLock(); + virtual void setNumLock(bool val); + virtual bool charKeyActive(); // Is a character gen. key down? + virtual void setCharKeyActive(bool val); // Note that a char gen key is down. + virtual bool doesKeyLock(unsigned key); //does key lock physically + virtual unsigned getLEDStatus(); //check hardware for LED status + + +}; + +#endif /* !_IOHIKEYBOARD_H */ diff --git a/iokit/IOKit/hidsystem/IOHIKeyboardMapper.h b/iokit/IOKit/hidsystem/IOHIKeyboardMapper.h new file mode 100644 index 000000000..d5b25859f --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIKeyboardMapper.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOHIKEYBOARDMAPPER_H +#define _IOHIKEYBOARDMAPPER_H + +#include + +class IOHIKeyboard; + +/* + * Key ip/down state is tracked in a bit list. Bits are set + * for key-down, and cleared for key-up. The bit vector and macros + * for it's manipulation are defined here. + */ + +typedef UInt32 * kbdBitVector; + +#define EVK_BITS_PER_UNIT 32 +#define EVK_BITS_MASK 31 +#define EVK_BITS_SHIFT 5 // 1<<5 == 32, for cheap divide + +#define EVK_KEYDOWN(n, bits) \ + (bits)[((n)>>EVK_BITS_SHIFT)] |= (1 << ((n) & EVK_BITS_MASK)) + +#define EVK_KEYUP(n, bits) \ + (bits)[((n)>>EVK_BITS_SHIFT)] &= ~(1 << ((n) & EVK_BITS_MASK)) + +#define EVK_IS_KEYDOWN(n, bits) \ + (((bits)[((n)>>EVK_BITS_SHIFT)] & (1 << ((n) & EVK_BITS_MASK))) != 0) + +class IOHIKeyboardMapper : public OSObject +{ + OSDeclareDefaultStructors(IOHIKeyboardMapper); + +private: + IOHIKeyboard * _delegate; // KeyMap delegate + bool _mappingShouldBeFreed; // true if map can be IOFree'd + NXParsedKeyMapping _parsedMapping; // current system-wide keymap + +public: + static IOHIKeyboardMapper * keyboardMapper( + IOHIKeyboard * delegate, + const UInt8 * mapping, + UInt32 mappingLength, + bool mappingShouldBeFreed ); + + virtual bool init(IOHIKeyboard * delegate, + const UInt8 * mapping, + UInt32 mappingLength, + bool mappingShouldBeFreed); + virtual void free(); + + virtual const UInt8 * mapping(); + virtual UInt32 mappingLength(); + virtual bool serialize(OSSerialize *s) const; + + virtual void translateKeyCode(UInt8 key, bool keyDown, kbdBitVector keyBits); + virtual UInt8 getParsedSpecialKey(UInt8 logical); //retrieve a key from _parsedMapping + + +private: + virtual bool parseKeyMapping(const UInt8 * mapping, + UInt32 mappingLength, + NXParsedKeyMapping * parsedMapping) const; + + virtual void calcModBit(int bit, kbdBitVector keyBits); + virtual void doModCalc(int key, kbdBitVector keyBits); + virtual void doCharGen(int keyCode, bool down); +}; + +#endif _IOHIKEYBOARDMAPPER_H + +/* + * HISTORICAL NOTE: + * The "delegate" object had to respond to the following protocol; + * this protocol has since been merged into the IOHIKeyboard class. + * + * @protocol KeyMapDelegate + * + * - keyboardEvent :(unsigned)eventType + * flags :(unsigned)flags + * keyCode :(unsigned)keyCode + * charCode:(unsigned)charCode + * charSet :(unsigned)charSet + * originalCharCode:(unsigned)origCharCode + * originalCharSet:(unsigned)origCharSet; + * + * - keyboardSpecialEvent:(unsigned)eventType + * flags :(unsigned)flags + * keyCode :(unsigned)keyCode + * specialty:(unsigned)flavor; + * + * - updateEventFlags:(unsigned)flags; // Does not generate events + * + * - (unsigned)eventFlags; // Global event flags + * - (unsigned)deviceFlags; // per-device event flags + * - setDeviceFlags:(unsigned)flags; // Set device event flags + * - (bool)alphaLock; // current alpha-lock state + * - setAlphaLock:(bool)val; // Set current alpha-lock state + * - (bool)charKeyActive; // Is a character gen. key down? + * - setCharKeyActive:(bool)val; // Note that a char gen key is down. + * + * @end + */ diff --git a/iokit/IOKit/hidsystem/IOHIPointing.h b/iokit/IOKit/hidsystem/IOHIPointing.h new file mode 100644 index 000000000..37b692add --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHIPointing.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOHIPOINTING_H +#define _IOHIPOINTING_H + +#include +#include +#include + +/* Start Action Definitions */ + +/* + * HISTORICAL NOTE: + * The following entry points were part of the IOHIPointingEvents + * protocol. + */ +typedef void (*RelativePointerEventAction)(OSObject * target, + /* buttons */ int buttons, + /* deltaX */ int dx, + /* deltaY */ int dy, + /* atTime */ AbsoluteTime ts); + +typedef void (*AbsolutePointerEventAction)(OSObject * target, + /* buttons */ int buttons, + /* at */ Point * newLoc, + /* withBounds */ Bounds * bounds, + /* inProximity */ bool proximity, + /* withPressure */ int pressure, + /* withAngle */ int stylusAngle, + /* atTime */ AbsoluteTime ts); + +typedef void (*ScrollWheelEventAction)(OSObject * target, + short deltaAxis1, + short deltaAxis2, + short deltaAxis3, + AbsoluteTime ts); + +/* End Action Definitions */ + +class IOHIPointing : public IOHIDevice +{ + OSDeclareDefaultStructors(IOHIPointing); + +private: + IOLock * _deviceLock; // Lock for all device access + int _buttonMode; // The "handedness" of the pointer + IOFixed _acceleration; + bool _convertAbsoluteToRelative; + bool _contactToMove; + bool _hadContact; + Point _previousLocation; + UInt8 _pressureThresholdToClick; // A scale factor of 0 to 255 to determine how much pressure is necessary to generate a primary mouse click - a value of 255 means no click will be generated + void * _scaleSegments; + IOItemCount _scaleSegCount; + IOFixed _fractX; + IOFixed _fractY; + + OSObject * _relativePointerEventTarget; + RelativePointerEventAction _relativePointerEventAction; + OSObject * _absolutePointerEventTarget; + AbsolutePointerEventAction _absolutePointerEventAction; + OSObject * _scrollWheelEventTarget; + ScrollWheelEventAction _scrollWheelEventAction; + + +protected: + virtual void dispatchRelativePointerEvent(int dx, + int dy, + UInt32 buttonState, + AbsoluteTime ts); + + virtual void dispatchAbsolutePointerEvent(Point * newLoc, + Bounds * bounds, + UInt32 buttonState, + bool proximity, + int pressure, + int pressureMin, + int pressureMax, + int stylusAngle, + AbsoluteTime ts); + + virtual void dispatchScrollWheelEvent(short deltaAxis1, + short deltaAxis2, + short deltaAxis3, + AbsoluteTime ts); + +public: + virtual bool init(OSDictionary * properties = 0); + virtual bool start(IOService * provider); + virtual void free(); + + virtual bool open(IOService * client, + IOOptionBits options, + RelativePointerEventAction rpeAction, + AbsolutePointerEventAction apeAction, + ScrollWheelEventAction sweAction); + virtual void close(IOService * client, IOOptionBits ); + + virtual IOHIDKind hidKind(); + virtual bool updateProperties( void ); + virtual IOReturn setParamProperties(OSDictionary * dict); + IOReturn powerStateWillChangeTo ( IOPMPowerFlags, unsigned long, IOService*); + IOReturn powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService*); + IOService * _rootDomain; //Needs to be public for C function use + + +protected: // for subclasses to implement + virtual OSData * copyAccelerationTable(); + virtual IOItemCount buttonCount(); + virtual IOFixed resolution(); + +private: + virtual bool resetPointer(); + virtual void scalePointer(int * dxp, int * dyp); + virtual void setupForAcceleration(IOFixed accl); +}; + +#endif /* !_IOHIPOINTING_H */ diff --git a/iokit/IOKit/hidsystem/IOHITablet.h b/iokit/IOKit/hidsystem/IOHITablet.h new file mode 100644 index 000000000..6571127d6 --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHITablet.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOHITABLET_H +#define _IOHITABLET_H + +#include +#include + +class IOHITabletPointer; + +#define kIOHIVendorID "VendorID" +#define kIOHISystemTabletID "SystemTabletID" +#define kIOHIVendorTabletID "VendorTabletID" + +typedef void (*TabletEventAction)(OSObject *target, + NXEventData *tabletData, // Do we want to parameterize this? + AbsoluteTime ts); + +typedef void (*ProximityEventAction)(OSObject *target, + NXEventData *proximityData, // or this? + AbsoluteTime ts); + +class IOHITablet : public IOHIPointing +{ + OSDeclareDefaultStructors(IOHITablet); + +public: + UInt16 _systemTabletID; + +private: + OSObject * _tabletEventTarget; + TabletEventAction _tabletEventAction; + OSObject * _proximityEventTarget; + ProximityEventAction _proximityEventAction; + +protected: + virtual void dispatchTabletEvent(NXEventData *tabletEvent, + AbsoluteTime ts); + + virtual void dispatchProximityEvent(NXEventData *proximityEvent, + AbsoluteTime ts); + + virtual bool startTabletPointer(IOHITabletPointer *pointer, OSDictionary *properties); + +public: + static UInt16 generateTabletID(); + + virtual bool init(OSDictionary * propTable); + virtual bool open(IOService * client, + IOOptionBits options, + RelativePointerEventAction rpeAction, + AbsolutePointerEventAction apeAction, + ScrollWheelEventAction sweAction, + TabletEventAction tabletAction, + ProximityEventAction proximityAction); + +}; + +#endif /* !_IOHITABLET_H */ diff --git a/iokit/IOKit/hidsystem/IOHITabletPointer.h b/iokit/IOKit/hidsystem/IOHITabletPointer.h new file mode 100644 index 000000000..4448bd590 --- /dev/null +++ b/iokit/IOKit/hidsystem/IOHITabletPointer.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOHITABLETPOINTER_H +#define _IOHITABLETPOINTER_H + +#include +#include +#include + +#define kIOHITabletPointerID "PointerID" +#define kIOHITabletPointerDeviceID "DeviceID" +#define kIOHITabletPointerVendorType "VendorPointerType" +#define kIOHITabletPointerType "PointerType" +#define kIOHITabletPointerSerialNumber "SerialNumber" +#define kIOHITabletPointerUniqueID "UniqueID" + +class IOHITabletPointer : public IOHIDevice +{ + OSDeclareDefaultStructors(IOHITabletPointer); + +public: + IOHITablet *_tablet; + UInt16 _deviceID; + + static UInt16 generateDeviceID(); + + virtual bool init(OSDictionary *propTable); + virtual bool attach(IOService *provider); + + virtual void dispatchTabletEvent(NXEventData *tabletEvent, + AbsoluteTime ts); + virtual void dispatchProximityEvent(NXEventData *proximityEvent, + AbsoluteTime ts); +}; + +#endif /* !_IOHITABLETPOINTER_H */ diff --git a/iokit/IOKit/hidsystem/IOLLEvent.h b/iokit/IOKit/hidsystem/IOLLEvent.h new file mode 100644 index 000000000..9d6185bcf --- /dev/null +++ b/iokit/IOKit/hidsystem/IOLLEvent.h @@ -0,0 +1,351 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/****************************************************************************** + event.h (PostScript side version) + + CONFIDENTIAL + Copyright (c) 1988 NeXT, Inc. as an unpublished work. + All Rights Reserved. + + Created Leo 01Mar88 + + Modified: + 04May88 Leo Final event types and record + 22Aug88 Leo Change short -> int for window, add reserved + 26May90 Ted Added NX_UNDIMMASK to correct triggering of UndoAutoDim + 12Dec91 Mike Brought into sync with dpsclient/event.h, and fixed + the #ifndef interlock with dpsclient/event.h that was + broken during the Great Header Revision. + + The PostScript version of this file differs from the + Window Kit version in that the coordinates here are + ints instead of floats. +******************************************************************************/ + +#ifndef _DEV_EVENT_H +#define _DEV_EVENT_H + +#include +#include + +#ifdef EVENT_H /* Interlock with dpsclient/event.h */ +#if !defined(_NXSIZE_) /* Work around patch for old event.h in Phase 3 projs*/ +#define _NXSIZE_ 1 /* NXCoord, NXPoint, NXSize decl seen */ +#define _NXSize_ NXSize +#endif /* _NXSIZE_ */ +#else /* EVENT_H */ /* Haven't seen dpsclient/event.h, so define away */ +#define EVENT_H + +#ifdef KERNEL +#else /* KERNEL */ + +#if !defined(_NXSIZE_) /* Work around patch for old event.h in Phase 3 projs*/ +#define _NXSIZE_ 1 /* NXCoord, NXPoint, NXSize decl seen */ +typedef float NXCoord; + +typedef struct _NXPoint { /* point */ + NXCoord x, y; +} NXPoint; + +typedef struct _NXSize { /* size */ + NXCoord width, height; +} NXSize; +#define _NXSize_ NXSize /* Correct usage in event_status_driver.h */ +#endif /* _NXSIZE_ */ + +#endif /* KERNEL */ + +/* Event types */ + +#define NX_NULLEVENT 0 /* internal use */ + +/* mouse events */ + +#define NX_LMOUSEDOWN 1 /* left mouse-down event */ +#define NX_LMOUSEUP 2 /* left mouse-up event */ +#define NX_RMOUSEDOWN 3 /* right mouse-down event */ +#define NX_RMOUSEUP 4 /* right mouse-up event */ +#define NX_MOUSEMOVED 5 /* mouse-moved event */ +#define NX_LMOUSEDRAGGED 6 /* left mouse-dragged event */ +#define NX_RMOUSEDRAGGED 7 /* right mouse-dragged event */ +#define NX_MOUSEENTERED 8 /* mouse-entered event */ +#define NX_MOUSEEXITED 9 /* mouse-exited event */ + +/* keyboard events */ + +#define NX_KEYDOWN 10 /* key-down event */ +#define NX_KEYUP 11 /* key-up event */ +#define NX_FLAGSCHANGED 12 /* flags-changed event */ + +/* composite events */ + +#define NX_KITDEFINED 13 /* application-kit-defined event */ +#define NX_SYSDEFINED 14 /* system-defined event */ +#define NX_APPDEFINED 15 /* application-defined event */ +/* There are additional DPS client defined events past this point. */ + +/* Scroll wheel events */ + +#define NX_SCROLLWHEELMOVED 22 + +/* tablet events */ + +#define NX_TABLETPOINTER 23 +#define NX_TABLETPROXIMITY 24 + +#define NX_FIRSTEVENT 0 +#define NX_LASTEVENT 24 +#define NX_NUMPROCS (NX_LASTEVENT-NX_FIRSTEVENT+1) + +/* Event masks */ + +#define NX_LMOUSEDOWNMASK (1 << NX_LMOUSEDOWN) /* left mouse-down */ +#define NX_LMOUSEUPMASK (1 << NX_LMOUSEUP) /* left mouse-up */ +#define NX_RMOUSEDOWNMASK (1 << NX_RMOUSEDOWN) /* right mouse-down */ +#define NX_RMOUSEUPMASK (1 << NX_RMOUSEUP) /* right mouse-up */ +#define NX_MOUSEMOVEDMASK (1 << NX_MOUSEMOVED) /* mouse-moved */ +#define NX_LMOUSEDRAGGEDMASK (1 << NX_LMOUSEDRAGGED) /* left-dragged */ +#define NX_RMOUSEDRAGGEDMASK (1 << NX_RMOUSEDRAGGED) /* right-dragged */ +#define NX_MOUSEENTEREDMASK (1 << NX_MOUSEENTERED) /* mouse-entered */ +#define NX_MOUSEEXITEDMASK (1 << NX_MOUSEEXITED) /* mouse-exited */ +#define NX_KEYDOWNMASK (1 << NX_KEYDOWN) /* key-down */ +#define NX_KEYUPMASK (1 << NX_KEYUP) /* key-up */ +#define NX_FLAGSCHANGEDMASK (1 << NX_FLAGSCHANGED) /* flags-changed */ +#define NX_KITDEFINEDMASK (1 << NX_WINCHANGED) /* kit-defined */ +#define NX_SYSDEFINEDMASK (1 << NX_SYSDEFINED) /* system-defined */ +#define NX_APPDEFINEDMASK (1 << NX_APPDEFINED) /* app-defined */ +#define NX_SCROLLWHEELMOVEDMASK (1 << NX_SCROLLWHEELMOVED) /* scroll wheel moved */ +#define NX_TABLETPOINTERMASK (1 << NX_TABLETPOINTER) /* tablet pointer moved */ +#define NX_TABLETPROXIMITYMASK (1 << NX_TABLETPROXIMITY) /* tablet pointer proximity */ + +#define EventCodeMask(type) (1 << (type)) +#define NX_ALLEVENTS -1 /* Check for all events */ + +/* sub types for system defined events */ + +#define NX_SUBTYPE_POWER_KEY 1 +#define NX_SUBTYPE_AUX_MOUSE_BUTTONS 7 +#define NX_SUBTYPE_AUX_CONTROL_BUTTONS 8 + +#define NX_SUBTYPE_EJECT_KEY 10 +#define NX_SUBTYPE_SLEEP_EVENT 11 +#define NX_SUBTYPE_RESTART_EVENT 12 +#define NX_SUBTYPE_SHUTDOWN_EVENT 13 + +/* Masks for the bits in event.flags */ + +/* device-independent */ + +#define NX_ALPHASHIFTMASK 0x00010000 +#define NX_SHIFTMASK 0x00020000 +#define NX_CONTROLMASK 0x00040000 +#define NX_ALTERNATEMASK 0x00080000 +#define NX_COMMANDMASK 0x00100000 +#define NX_NUMERICPADMASK 0x00200000 +#define NX_HELPMASK 0x00400000 +#define NX_SECONDARYFNMASK 0x00800000 + +/* device-dependent (really?) */ + +//#define NX_NEXTCTLKEYMASK 0x00000001 +//#define NX_NEXTLSHIFTKEYMASK 0x00000002 +//#define NX_NEXTRSHIFTKEYMASK 0x00000004 +//#define NX_NEXTLCMDKEYMASK 0x00000008 +//#define NX_NEXTRCMDKEYMASK 0x00000010 +//#define NX_NEXTLALTKEYMASK 0x00000020 +//#define NX_NEXTRALTKEYMASK 0x00000040 + +/* + * Additional reserved bits in event.flags + */ + +#define NX_STYLUSPROXIMITYMASK 0x00000080 +#define NX_NONCOALSESCEDMASK 0x00000100 + +/* click state values + * If you have the following events in close succession, the click + * field has the indicated value: + * + * Event Click Value Comments + * mouse-down 1 Not part of any click yet + * mouse-up 1 Aha! A click! + * mouse-down 2 Doing a double-click + * mouse-up 2 It's finished + * mouse-down 3 A triple + * mouse-up 3 + */ + +/* Values for the character set in event.data.key.charSet */ + +#define NX_ASCIISET 0 +#define NX_SYMBOLSET 1 +#define NX_DINGBATSSET 2 + +/* EventData type: defines the data field of an event */ + +typedef union { + struct { /* For mouse-down and mouse-up events */ + unsigned char subx; /* sub-pixel position for x */ + unsigned char suby; /* sub-pixel position for y */ + short eventNum; /* unique identifier for this button */ + int click; /* click state of this event */ + unsigned char pressure; /* pressure value: 0=none, 255=full */ + char reserved1; + short reserved2; + int reserved3; + int reserved4; + int reserved5; + int reserved6; + int reserved7; + } mouse; + struct { + int dx; + int dy; + int reserved1; + int reserved2; + int reserved3; + int reserved4; + int reserved5; + int reserved6; + } mouseMove; + struct { /* For key-down and key-up events */ + unsigned short origCharSet; /* unmodified character set code */ + short repeat; /* for key-down: nonzero if really a repeat */ + unsigned short charSet; /* character set code */ + unsigned short charCode; /* character code in that set */ + unsigned short keyCode; /* device-dependent key number */ + unsigned short origCharCode; /* unmodified character code */ + int reserved1; + unsigned int keyboardType; + int reserved2; + int reserved3; + int reserved4; + } key; + struct { /* For mouse-entered and mouse-exited events */ + short reserved; + short eventNum; /* unique identifier from mouse down event */ + int trackingNum; /* unique identifier from settrackingrect */ + int userData; /* uninterpreted integer from settrackingrect */ + int reserved1; + int reserved2; + int reserved3; + int reserved4; + int reserved5; + } tracking; + struct { + short deltaAxis1; + short deltaAxis2; + short deltaAxis3; + short reserved1; + int reserved2; + int reserved3; + int reserved4; + int reserved5; + int reserved6; + int reserved7; + } scrollWheel; + struct { /* For window-changed, sys-defined, and app-defined events */ + short reserved; + short subType; /* event subtype for compound events */ + union { + float F[7]; /* for use in compound events */ + long L[7]; /* for use in compound events */ + short S[14]; /* for use in compound events */ + char C[28]; /* for use in compound events */ + } misc; + } compound; + struct { + SInt32 x; /* absolute x coordinate in tablet space at full tablet resolution */ + SInt32 y; /* absolute y coordinate in tablet space at full tablet resolution */ + SInt32 z; /* absolute z coordinate in tablet space at full tablet resolution */ + UInt16 buttons; /* one bit per button - bit 0 is first button - 1 = closed */ + UInt16 pressure; /* scaled pressure value; MAXPRESSURE=(2^16)-1, MINPRESSURE=0 */ + struct { + SInt16 x; /* scaled tilt x value; range is -((2^15)-1) to (2^15)-1 (-32767 to 32767) */ + SInt16 y; /* scaled tilt y value; range is -((2^15)-1) to (2^15)-1 (-32767 to 32767) */ + } tilt; + UInt16 rotation; /* Fixed-point representation of device rotation in a 10.6 format */ + SInt16 tangentialPressure; /* tangential pressure on the device; range same as tilt */ + UInt16 deviceID; /* system-assigned unique device ID - matches to deviceID field in proximity event */ + SInt16 vendor1; /* vendor-defined signed 16-bit integer */ + SInt16 vendor2; /* vendor-defined signed 16-bit integer */ + SInt16 vendor3; /* vendor-defined signed 16-bit integer */ + } tablet; + struct { + UInt16 vendorID; /* vendor-defined ID - typically will be USB vendor ID */ + UInt16 tabletID; /* vendor-defined tablet ID - typically will be USB product ID for the tablet */ + UInt16 pointerID; /* vendor-defined ID of the specific pointing device */ + UInt16 deviceID; /* system-assigned unique device ID - matches to deviceID field in tablet event */ + UInt16 systemTabletID; /* system-assigned unique tablet ID */ + UInt16 vendorPointerType; /* vendor-defined pointer type */ + UInt32 pointerSerialNumber; /* vendor-defined serial number of the specific pointing device */ + UInt64 uniqueID; /* vendor-defined unique ID for this pointer */ + UInt32 capabilityMask; /* mask representing the capabilities of the device */ + UInt8 pointerType; /* type of pointing device - enum to be defined */ + UInt8 enterProximity; /* non-zero = entering; zero = leaving */ + SInt16 reserved1; + } proximity; +} NXEventData; + +/* Finally! The event record! */ +typedef struct _NXEvent { + int type; /* An event type from above */ + struct { + int x, y; /* Base coordinates in window, */ + } location; /* from bottom left */ + unsigned long long time; /* time since launch */ + int flags; /* key state flags */ + unsigned int window; /* window number of assigned window */ + NXEventData data; /* type-dependent data */ +} NXEvent, *NXEventPtr; + + +/* How to pick window(s) for event (for PostEvent) */ +#define NX_NOWINDOW -1 +#define NX_BYTYPE 0 +#define NX_BROADCAST 1 +#define NX_TOPWINDOW 2 +#define NX_FIRSTWINDOW 3 +#define NX_MOUSEWINDOW 4 +#define NX_NEXTWINDOW 5 +#define NX_LASTLEFT 6 +#define NX_LASTRIGHT 7 +#define NX_LASTKEY 8 +#define NX_EXPLICIT 9 +#define NX_TRANSMIT 10 +#define NX_BYPSCONTEXT 11 + +#endif /* EVENT_H */ /* End of defs common with dpsclient/event.h */ + +/* Mask of events that cause screen to undim */ + +#define NX_UNDIMMASK (NX_KEYDOWNMASK | NX_KEYUPMASK | \ + NX_LMOUSEDOWNMASK | NX_LMOUSEUPMASK | \ + NX_RMOUSEDOWNMASK | NX_RMOUSEUPMASK | \ + NX_MOUSEMOVEDMASK | NX_FLAGSCHANGEDMASK | \ + NX_MOUSEENTEREDMASK | NX_MOUSEEXITEDMASK | \ + NX_LMOUSEDRAGGEDMASK | NX_RMOUSEDRAGGEDMASK | \ + NX_SCROLLWHEELMOVEDMASK | NX_TABLETPOINTERMASK | \ + NX_TABLETPROXIMITYMASK) + +#endif /* !_DEV_EVENT_H */ + diff --git a/iokit/IOKit/hidsystem/Makefile b/iokit/IOKit/hidsystem/Makefile new file mode 100644 index 000000000..13e02f6ef --- /dev/null +++ b/iokit/IOKit/hidsystem/Makefile @@ -0,0 +1,35 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = hidsystem +NOT_EXPORT_HEADERS = ev_private.h + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = IOHIDTypes.h IOHIDShared.h IOHIDParameter.h IOLLEvent.h ev_keymap.h +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/hidsystem/ev_keymap.h b/iokit/IOKit/hidsystem/ev_keymap.h new file mode 100644 index 000000000..a5f74dcee --- /dev/null +++ b/iokit/IOKit/hidsystem/ev_keymap.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * ev_keymap.h + * Defines the structure used for parsing keymappings. These structures + * and definitions are used by event sources in the kernel and by + * applications and utilities which manipulate keymaps. + * + * HISTORY + * 02-Jun-1992 Mike Paquette at NeXT + * Created. + */ + +#ifndef _DEV_EV_KEYMAP_H +#define _DEV_EV_KEYMAP_H + +#define NX_NUMKEYCODES 128 /* Highest key code is 0x7f */ +#define NX_NUMSEQUENCES 128 /* Maximum possible number of sequences */ +#define NX_NUMMODIFIERS 16 /* Maximum number of modifier bits */ +#define NX_BYTE_CODES 0 /* If first short 0, all are bytes (else shorts) */ + +#define NX_WHICHMODMASK 0x0f /* bits out of keyBits for bucky bits */ +#define NX_MODMASK 0x10 /* Bit out of keyBits indicates modifier bit */ +#define NX_CHARGENMASK 0x20 /* bit out of keyBits for char gen */ +#define NX_SPECIALKEYMASK 0x40 /* bit out of keyBits for specialty key */ +#define NX_KEYSTATEMASK 0x80 /* OBSOLETE - DO NOT USE IN NEW DESIGNS */ + +/* + * Special keys currently known to and understood by the system. + * If new specialty keys are invented, extend this list as appropriate. + * The presence of these keys in a particular implementation is not + * guaranteed. + */ +#define NX_NOSPECIALKEY 0xFFFF +#define NX_KEYTYPE_SOUND_UP 0 +#define NX_KEYTYPE_SOUND_DOWN 1 +#define NX_KEYTYPE_BRIGHTNESS_UP 2 +#define NX_KEYTYPE_BRIGHTNESS_DOWN 3 +#define NX_KEYTYPE_CAPS_LOCK 4 +#define NX_KEYTYPE_HELP 5 +#define NX_POWER_KEY 6 +#define NX_KEYTYPE_MUTE 7 +#define NX_UP_ARROW_KEY 8 +#define NX_DOWN_ARROW_KEY 9 +#define NX_KEYTYPE_NUM_LOCK 10 + +#define NX_KEYTYPE_CONTRAST_UP 11 +#define NX_KEYTYPE_CONTRAST_DOWN 12 +#define NX_KEYTYPE_LAUNCH_PANEL 13 +#define NX_KEYTYPE_EJECT 14 + +#define NX_NUMSPECIALKEYS 15 /* Maximum number of special keys */ +#define NX_NUM_SCANNED_SPECIALKEYS 15 /* First 15 special keys are */ + /* actively scanned in kernel */ + +/* Mask of special keys that are posted as events */ + +#define NX_SPECIALKEY_POST_MASK \ + ((1 << NX_KEYTYPE_SOUND_UP) | (1 << NX_KEYTYPE_SOUND_DOWN) | \ + (1 << NX_POWER_KEY) | (1 << NX_KEYTYPE_MUTE) | \ + (1 << NX_KEYTYPE_BRIGHTNESS_UP) | (1 << NX_KEYTYPE_BRIGHTNESS_DOWN) | \ + (1 << NX_KEYTYPE_CONTRAST_UP) | (1 << NX_KEYTYPE_CONTRAST_UP) | \ + (1 << NX_KEYTYPE_LAUNCH_PANEL) | (1 << NX_KEYTYPE_EJECT) | \ + 0) + +/* Modifier key indices into modDefs[] */ +#define NX_MODIFIERKEY_ALPHALOCK 0 +#define NX_MODIFIERKEY_SHIFT 1 +#define NX_MODIFIERKEY_CONTROL 2 +#define NX_MODIFIERKEY_ALTERNATE 3 +#define NX_MODIFIERKEY_COMMAND 4 +#define NX_MODIFIERKEY_NUMERICPAD 5 +#define NX_MODIFIERKEY_HELP 6 +#define NX_MODIFIERKEY_SECONDARYFN 7 +#define NX_MODIFIERKEY_NUMLOCK 8 + + +typedef struct _NXParsedKeyMapping_ { + /* If nonzero, all numbers are shorts; if zero, all numbers are bytes*/ + short shorts; + + /* + * For each keycode, low order bit says if the key + * generates characters. + * High order bit says if the key is assigned to a modifier bit. + * The second to low order bit gives the current state of the key. + */ + char keyBits[NX_NUMKEYCODES]; + + /* Bit number of highest numbered modifier bit */ + int maxMod; + + /* Pointers to where the list of keys for each modifiers bit begins, + * or NULL. + */ + unsigned char *modDefs[NX_NUMMODIFIERS]; + + /* Key code of highest key deinfed to generate characters */ + int numDefs; + + /* Pointer into the keyMapping where this key's definitions begin */ + unsigned char *keyDefs[NX_NUMKEYCODES]; + + /* number of sequence definitions */ + int numSeqs; + + /* pointers to sequences */ + unsigned char *seqDefs[NX_NUMSEQUENCES]; + + /* Special key definitions */ + int numSpecialKeys; + + /* Special key values, or 0xFFFF if none */ + unsigned short specialKeys[NX_NUMSPECIALKEYS]; + + /* Pointer to the original keymapping string */ + const unsigned char *mapping; + + /* Length of the original string */ + int mappingLen; +} NXParsedKeyMapping; + +#endif /* !_DEV_EV_KEYMAP_H */ diff --git a/iokit/IOKit/hidsystem/ev_private.h b/iokit/IOKit/hidsystem/ev_private.h new file mode 100644 index 000000000..596dfbae2 --- /dev/null +++ b/iokit/IOKit/hidsystem/ev_private.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/****************************************************************************** + + ev_private.h + Internal defs for the events driver. The contents of this module + may need to be tweaked slightly from one architecture to the next. + 22 May 1992 Mike Paquette at NeXT Computers, Inc. + + Copyright 1992 NeXT, Inc. + + Modified: + + +******************************************************************************/ + +#ifndef _DEV_EV_PRIVATE_H +#define _DEV_EV_PRIVATE_H + +#include + +/* Initial cursor position */ +#define INIT_CURSOR_X 16 +#define INIT_CURSOR_Y 16 + +/* Default mouse click time and motion constants */ +#define EV_DCLICKTIME 500000000 /* Default nanoseconds for a double-click */ +#define EV_DCLICKSPACE 3 /* Default pixel threshold for double-clicks */ + +/* Default Wait Cursor Timing Constants (in nanoseconds) */ +#define DefaultWCSustain 300000000 /* 0.3 seconds */ +#define DefaultWCFrameRate 75000000 /* 13.3 frames/second */ +#define DefaultWCThreshold 1200000000ULL /* 1.2 seconds */ + +#define EV_STD_CURSOR 0 +#define EV_WAITCURSOR 1 +#define EV_WAITCURSOR_1 2 +#define EV_WAITCURSOR_2 3 +#define EV_MAXCURSOR (EV_WAITCURSOR_2) + +/* Default dim time is 5 minutes, nanoseconds */ +#define DAUTODIMPERIOD (1000ULL*1000ULL*1000ULL*60*5) +/* Default dim level is one-fourth */ +#define DDIMBRIGHTNESS (EV_SCREEN_MAX_BRIGHTNESS/4) + + +/* Where event numbers start */ +#define INITEVENTNUM 13 +#define NULLEVENTNUM 0 /* The event number that never was */ + +#define MOVEDEVENTMASK \ + (NX_MOUSEMOVEDMASK | NX_LMOUSEDRAGGEDMASK | NX_RMOUSEDRAGGEDMASK ) +#define COALESCEEVENTMASK \ + (MOVEDEVENTMASK | NX_MOUSEEXITEDMASK) +#define MOUSEEVENTMASK \ + (NX_LMOUSEDOWNMASK|NX_RMOUSEDOWNMASK|NX_LMOUSEUPMASK|NX_RMOUSEUPMASK) +#define PRESSUREEVENTMASK \ + (NX_LMOUSEDOWNMASK|NX_LMOUSEUPMASK|NX_MOUSEMOVEDMASK|NX_LMOUSEDRAGGEDMASK) + +/* Flags which can modify meaning of special volume/brightness keys */ +#define SPECIALKEYS_MODIFIER_MASK \ + (NX_COMMANDMASK|NX_ALTERNATEMASK|NX_CONTROLMASK) + +/* Bits in evg->eventFlags owned by keyboard devices */ +#define KEYBOARD_FLAGSMASK \ + (NX_ALPHASHIFTMASK | NX_SHIFTMASK | NX_CONTROLMASK | NX_ALTERNATEMASK \ + | NX_COMMANDMASK | NX_NUMERICPADMASK | NX_HELPMASK | NX_SECONDARYFNMASK) +// | NX_NEXTCTLKEYMASK \ +// | NX_NEXTLSHIFTKEYMASK | NX_NEXTRSHIFTKEYMASK | NX_NEXTLCMDKEYMASK \ +// | NX_NEXTRCMDKEYMASK | NX_NEXTLALTKEYMASK | NX_NEXTRALTKEYMASK) + +/* Some non-zero token to or with screen number */ +#define SCREENTOKEN 256 + +/* A macro to report if the event queue is not empty */ +#define EventsInQueue() \ + (eventsOpen && (((EvGlobals*)evg)->LLEHead != ((EvGlobals*)evg)->LLETail)) + +struct _eventMsg { + mach_msg_header_t h; +#ifdef mach3xxx // this is not needed anymore? + msg_type_t t; +#endif +}; + +/****************************************************************************** + EvScreen + This structure is used by the ev driver. + It holds information about a single screen: how much private shmem it owns, + where its private shmem region starts, its global bounds and four procedure + vectors. This structure is allocated by the ev driver and is filled in + when a driver calls ev_register_screen(). +******************************************************************************/ + +typedef volatile struct _evScreen { + IOGraphicsDevice * instance;/* Driver instance owning this screen. */ +// void *shmemPtr; /* Ptr to private shmem (if non-zero size) */ +// int shmemSize; /* Size of private shmem */ + Bounds * bounds; /* Screen's bounds in device coordinates */ +} EvScreen; + +/* + * We maintain a queue of EventSrc instances attached to the Event + * Driver. These sources are dynamically checked in with the Event + * Driver. When the driver is closed (as on Window Server exit) we + * post a relinquishOwnership:client message to the drivers. + */ +typedef struct { + IOHIDevice * eventSrc; +} eventSrcInfo; + +typedef struct { + eventSrcInfo info; + queue_chain_t link; +} attachedEventSrc; + +// No-op XPR stuff +#define xpr_ev_cursor(x, a, b, c, d, e) +#define xpr_ev_post(x, a, b, c, d, e) + +#endif /* !_DEV_EV_PRIVATE_H */ + diff --git a/iokit/IOKit/i386/IOSharedLockImp.h b/iokit/IOKit/i386/IOSharedLockImp.h new file mode 100644 index 000000000..00eb2dfdb --- /dev/null +++ b/iokit/IOKit/i386/IOSharedLockImp.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * EventShmemLock.h - Shared memory area locks for use between the + * WindowServer and the Event Driver. + * + * + * HISTORY + * 29 April 1992 Mike Paquette at NeXT + * Created. + * + * Multiprocessor locks used within the shared memory area between the + * kernel and event system. These must work in both user and kernel mode. + * The locks are defined in an include file so they get exported to the local + * include file area. + * + * This is basically a ripoff of the spin locks under the cthreads packages. + */ + +#ifndef _IOKIT_IOSHAREDLOCKIMP_H +#define _IOKIT_IOSHAREDLOCKIMP_H + +#include + +// 'Till we're building in kernel +.macro DISABLE_PREEMPTION +#ifdef KERNEL +#endif +.endmacro +.macro ENABLE_PREEMPTION +#ifdef KERNEL +#endif +.endmacro + +/* + * void + * ev_lock(p) + * int *p; + * + * Lock the lock pointed to by p. Spin (possibly forever) until the next + * lock is available. + */ + TEXT + +#ifndef KERNEL +LEAF(_ev_lock, 0) +LEAF(_IOSpinLock, 0) + push %eax + push %ecx + movl $1, %ecx + movl 12(%esp), %eax +_spin: + xchgl %ecx,0(%eax) + cmp $0, %ecx + jne _spin + pop %ecx + pop %eax +END(_ev_lock) +#endif + +/* + * void + * ev_unlock(p) + * int *p; + * + * Unlock the lock pointed to by p. + */ +LEAF(_ev_unlock, 0) +LEAF(_IOSpinUnlock, 0) + push %eax + movl 8(%esp),%eax + movl $0,0(%eax) + ENABLE_PREEMPTION() + pop %eax +END(_ev_unlock) + + + +/* + * int + * ev_try_lock(p) + * int *p; + * + * Try to lock p. Return zero if not successful. + */ + +LEAF(_ev_try_lock, 0) +LEAF(_IOTrySpinLock, 0) + DISABLE_PREEMPTION() + movl 4(%esp), %eax + lock;bts $0, 0(%eax) + jb 1f + movl $1, %eax /* yes */ + ret +1: + ENABLE_PREEMPTION() + xorl %eax, %eax /* no */ +END(_ev_try_lock) + + +#endif /* ! _IOKIT_IOSHAREDLOCKIMP_H */ diff --git a/iokit/IOKit/i386/Makefile b/iokit/IOKit/i386/Makefile new file mode 100644 index 000000000..05dac0f86 --- /dev/null +++ b/iokit/IOKit/i386/Makefile @@ -0,0 +1,35 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MD_DIR = i386 +EXCLUDE_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) +HEADER_LIST = $(filter-out $(EXCLUDE_HEADERS), $(ALL_HEADERS)) + +INSTALL_MD_LIST = ${HEADER_LIST} +INSTALL_MD_LCL_LIST = "" +INSTALL_MD_DIR = $(MD_DIR) + +EXPORT_MD_LIST = ${HEADER_LIST} +EXPORT_MD_DIR = IOKit/$(MD_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/machine/IOSharedLockImp.h b/iokit/IOKit/machine/IOSharedLockImp.h new file mode 100644 index 000000000..543a83ed4 --- /dev/null +++ b/iokit/IOKit/machine/IOSharedLockImp.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#if defined (__ppc__) +#include "IOKit/ppc/IOSharedLockImp.h" +#elif defined (__i386__) +#include "IOKit/i386/IOSharedLockImp.h" +#else +#error architecture not supported +#endif + diff --git a/iokit/IOKit/machine/Makefile b/iokit/IOKit/machine/Makefile new file mode 100644 index 000000000..4ef1c73e7 --- /dev/null +++ b/iokit/IOKit/machine/Makefile @@ -0,0 +1,35 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = machine +EXCLUDE_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) +HEADER_LIST = $(filter-out $(EXCLUDE_HEADERS), $(ALL_HEADERS)) + +INSTALL_MI_LIST = ${HEADER_LIST} +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = ${HEADER_LIST} +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/ndrvsupport/IOMacOSTypes.h b/iokit/IOKit/ndrvsupport/IOMacOSTypes.h new file mode 100644 index 000000000..dbf127856 --- /dev/null +++ b/iokit/IOKit/ndrvsupport/IOMacOSTypes.h @@ -0,0 +1,394 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 21 July 98 - start IOKit + */ + +/* + File: Types.h + + Contains: Basic Macintosh data types. + + Version: Technology: PowerSurge 1.0.2. + Package: Universal Interfaces 2.1.2 on ETO #20 + + Copyright: © 1984-1995 by Apple Computer, Inc. + All rights reserved. + + Bugs?: If you find a problem with this file, use the Apple Bug Reporter + stack. Include the file and version information (from above) + in the problem description and send to: + Internet: apple.bugs@applelink.apple.com + AppleLink: APPLE.BUGS + +*/ + +#ifndef _IOKIT_IOMACOSTYPES_H +#define _IOKIT_IOMACOSTYPES_H +#ifndef __MACTYPES__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#pragma options align=mac68k + +#ifndef NULL +#if !defined(__cplusplus) && (defined(__SC__) || defined(THINK_C)) +#define NULL ((void *) 0) +#else +#define NULL 0 +#endif +#endif + +enum { + noErr = 0 +}; + +typedef unsigned long KernelProcessID; +typedef unsigned long AddressSpaceID; + +#if 0 +#ifndef __cplusplus +enum { false, true }; +#endif +#endif + +typedef unsigned char Byte; + +typedef signed char SignedByte; + +typedef UInt16 UniChar; + +typedef char *Ptr; + +typedef Ptr *Handle; + +typedef long Fixed; + +typedef Fixed *FixedPtr; + +typedef long Fract; + +typedef Fract *FractPtr; + +struct _extended80 { + short exp; + short man[4]; +}; +struct _extended96 { + short exp[2]; + short man[4]; +}; +typedef struct wide *WidePtr; + +typedef struct UnsignedWide *UnsignedWidePtr; + + +/* +enum { + false, + true +}; +#if !__option(bool) + #ifndef true + #define true 1 + #endif + #ifndef false + #define false 0 + #endif +#endif + +typedef unsigned char Boolean; +*/ + + +typedef short OSErr; + +typedef unsigned int FourCharCode; + +typedef FourCharCode OSType; + +typedef FourCharCode ResType; + +typedef OSType *OSTypePtr; + +typedef ResType *ResTypePtr; + +struct Rect { + short top; + short left; + short bottom; + short right; +}; +typedef struct Rect Rect; + +typedef Rect *RectPtr; + +// Quickdraw.i + +/* + kVariableLengthArray is used in array bounds to specify a variable length array. + It is ususally used in variable length structs when the last field is an array + of any size. Before ANSI C, we used zero as the bounds of variable length + array, but that is illegal in ANSI C. Example: + + struct FooList + { + short listLength; + Foo elements[kVariableLengthArray]; + }; +*/ + +enum { + kVariableLengthArray = 1 +}; + +/* Numeric version part of 'vers' resource */ +struct NumVersion { + UInt8 majorRev; /*1st part of version number in BCD*/ + UInt8 minorAndBugRev; /*2nd & 3rd part of version number share a byte*/ + UInt8 stage; /*stage code: dev, alpha, beta, final*/ + UInt8 nonRelRev; /*revision level of non-released version*/ +}; +typedef struct NumVersion NumVersion; + +typedef struct OpaqueRef *KernelID; + +typedef UInt8 *BytePtr; + +typedef UInt32 ByteCount; + +typedef UInt32 ItemCount; + +typedef void *LogicalAddress; + +typedef void *PhysicalAddress; + +typedef UInt32 PBVersion; + +typedef SInt32 Duration; + +#define kInvalidID 0 + +enum { + kNilOptions = 0 +}; + + +typedef unsigned char Str31[32]; + + +/* +From: + File: DriverFamilyMatching.i <18> + Copyright: © 1995-1996 by Apple Computer, Inc., all rights reserved. +*/ + +//############################################## +// Well known properties in the Name Registry +//############################################## + +#define kPropertyName "name" +#define kPropertyCompatible "compatible" +#define kPropertyDriverPtr "driver-ptr" +#define kPropertyDriverDesc "driver-description" +#define kPropertyReg "reg" +#define kPropertyAAPLAddress "AAPL,address" +#define kPropertyMatching "matching" + + +//######################################################### +// Descriptor for Drivers and NDRVs +//######################################################### +/* Driver Typing Information Used to Match Drivers With Devices */ +struct DriverType { + Str31 nameInfoStr; /* Driver Name/Info String*/ + NumVersion version; /* Driver Version Number*/ +}; +typedef struct DriverType DriverType; +typedef DriverType * DriverTypePtr; + +/* OS Runtime Information Used to Setup and Maintain a Driver's Runtime Environment */ +typedef OptionBits RuntimeOptions; + + +enum { + kDriverIsLoadedUponDiscovery = 0x00000001, /* auto-load driver when discovered*/ + kDriverIsOpenedUponLoad = 0x00000002, /* auto-open driver when loaded*/ + kDriverIsUnderExpertControl = 0x00000004, /* I/O expert handles loads/opens*/ + kDriverIsConcurrent = 0x00000008, /* supports concurrent requests*/ + kDriverQueuesIOPB = 0x00000010, /* device manager doesn't queue IOPB*/ + kDriverIsLoadedAtBoot = 0x00000020, /* Driver is loaded at the boot time */ + kDriverIsForVirtualDevice = 0x00000040, /* Driver is for a virtual Device */ + kDriverSupportDMSuspendAndResume = 0x00000080 /* Driver supports Device Manager Suspend and Resume command */ +}; + +struct DriverOSRuntime { + RuntimeOptions driverRuntime; /* Options for OS Runtime*/ + Str31 driverName; /* Driver's name to the OS*/ + UInt32 driverDescReserved[8]; /* Reserved area*/ +}; +typedef struct DriverOSRuntime DriverOSRuntime; +typedef DriverOSRuntime * DriverOSRuntimePtr; + +/* OS Service Information Used To Declare What APIs a Driver Supports */ +typedef UInt32 ServiceCount; + +struct DriverServiceInfo { + OSType serviceCategory; /* Service Category Name*/ + OSType serviceType; /* Type within Category*/ + NumVersion serviceVersion; /* Version of service*/ +}; +typedef struct DriverServiceInfo DriverServiceInfo; +typedef DriverServiceInfo * DriverServiceInfoPtr; + +struct DriverOSService { + ServiceCount nServices; /* Number of Services Supported*/ + DriverServiceInfo service[1]; /* The List of Services (at least one)*/ +}; +typedef struct DriverOSService DriverOSService; +typedef DriverOSService * DriverOSServicePtr; + +/* Categories */ + +enum { + kServiceCategoryDisplay = 'disp', /* Display Manager*/ + kServiceCategoryOpenTransport = 'otan', /* Open Transport*/ + kServiceCategoryBlockStorage = 'blok', /* Block Storage*/ + kServiceCategoryNdrvDriver = 'ndrv', /* Generic Native Driver*/ + kServiceCategoryScsiSIM = 'scsi', /* SCSI */ + kServiceCategoryFileManager = 'file', /* File Manager */ + kServiceCategoryIDE = 'ide-', /* ide */ + kServiceCategoryADB = 'adb-', /* adb */ + kServiceCategoryPCI = 'pci-', /* pci bus */ + /* Nu Bus */ + kServiceCategoryDFM = 'dfm-', /* DFM */ + kServiceCategoryMotherBoard = 'mrbd', /* mother Board */ + kServiceCategoryKeyboard = 'kybd', /* Keyboard */ + kServiceCategoryPointing = 'poit', /* Pointing */ + kServiceCategoryRTC = 'rtc-', /* RTC */ + kServiceCategoryNVRAM = 'nram', /* NVRAM */ + kServiceCategorySound = 'sond', /* Sound (1/3/96 MCS) */ + kServiceCategoryPowerMgt = 'pgmt', /* Power Management */ + kServiceCategoryGeneric = 'genr' /* Generic Service Category to receive general Events */ +}; + +/* Ndrv ServiceCategory Types */ +enum { + kNdrvTypeIsGeneric = 'genr', /* generic*/ + kNdrvTypeIsVideo = 'vido', /* video*/ + kNdrvTypeIsBlockStorage = 'blok', /* block storage*/ + kNdrvTypeIsNetworking = 'netw', /* networking*/ + kNdrvTypeIsSerial = 'serl', /* serial*/ + kNdrvTypeIsParallel = 'parl', /* parallel */ + kNdrvTypeIsSound = 'sond', /* sound*/ + kNdrvTypeIsBusBridge = 'brdg' +}; + +typedef UInt32 DriverDescVersion; + +/* The Driver Description */ +enum { + kInitialDriverDescriptor = 0, + kVersionOneDriverDescriptor = 1 +}; + +enum { + kTheDescriptionSignature = 'mtej', + kDriverDescriptionSignature = 'pdes' +}; + + +struct DriverDescription { + OSType driverDescSignature; /* Signature field of this structure*/ + DriverDescVersion driverDescVersion; /* Version of this data structure*/ + DriverType driverType; /* Type of Driver*/ + DriverOSRuntime driverOSRuntimeInfo; /* OS Runtime Requirements of Driver*/ + DriverOSService driverServices; /* Apple Service API Membership*/ +}; +typedef struct DriverDescription DriverDescription; +typedef DriverDescription * DriverDescriptionPtr; + + +#pragma options align=reset + +#ifdef __cplusplus +} +#endif + +#endif /* __MACTYPES__ */ + +#ifndef __QUICKDRAW__ + +#ifdef __cplusplus +extern "C" { +#endif + +#pragma options align=mac68k + +struct RGBColor { + unsigned short red; /*magnitude of red component*/ + unsigned short green; /*magnitude of green component*/ + unsigned short blue; /*magnitude of blue component*/ +}; +typedef struct RGBColor RGBColor; +typedef RGBColor *RGBColorPtr; +typedef RGBColorPtr *RGBColorHdl; + +struct ColorSpec { + short value; /*index or other value*/ + RGBColor rgb; /*true color*/ +}; + +typedef struct ColorSpec ColorSpec; +typedef ColorSpec *ColorSpecPtr; + +struct GammaTbl { + short gVersion; /*gamma version number*/ + short gType; /*gamma data type*/ + short gFormulaSize; /*Formula data size*/ + short gChanCnt; /*number of channels of data*/ + short gDataCnt; /*number of values/channel*/ + short gDataWidth; /*bits/corrected value (data packed to next larger byte size)*/ + short gFormulaData[1]; /*data for formulas followed by gamma values*/ +}; +typedef struct GammaTbl GammaTbl; +typedef GammaTbl *GammaTblPtr; + +#pragma options align=reset + +#ifdef __cplusplus +} +#endif + +#endif /* __QUICKDRAW__ */ + +#endif /* _IOKIT_IOMACOSTYPES_H */ diff --git a/iokit/IOKit/ndrvsupport/IOMacOSVideo.h b/iokit/IOKit/ndrvsupport/IOMacOSVideo.h new file mode 100644 index 000000000..978c4e0fc --- /dev/null +++ b/iokit/IOKit/ndrvsupport/IOMacOSVideo.h @@ -0,0 +1,1236 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: Video.h + + Contains: Video Driver Interfaces. + + Copyright: (c) 1986-2000 by Apple Computer, Inc., all rights reserved + + Bugs?: For bug reports, consult the following page on + the World Wide Web: + + http://developer.apple.com/bugreporter/ + +*/ +#ifndef __IOMACOSVIDEO__ +#define __IOMACOSVIDEO__ + +#ifdef IOKIT + +#define PRAGMA_STRUCT_ALIGN 1 +#define FOUR_CHAR_CODE(x) (x) +#include + +#else /* IOKIT */ + +#ifndef __QUICKDRAW__ +#include +#endif + +#endif /* IOKIT */ + +#if PRAGMA_ONCE +#pragma once +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#if PRAGMA_IMPORT +#pragma import on +#endif + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=mac68k +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(push, 2) +#elif PRAGMA_STRUCT_PACK + #pragma pack(2) +#endif + +enum { + mBaseOffset = 1, /*Id of mBaseOffset.*/ + mRowBytes = 2, /*Video sResource parameter Id's */ + mBounds = 3, /*Video sResource parameter Id's */ + mVersion = 4, /*Video sResource parameter Id's */ + mHRes = 5, /*Video sResource parameter Id's */ + mVRes = 6, /*Video sResource parameter Id's */ + mPixelType = 7, /*Video sResource parameter Id's */ + mPixelSize = 8, /*Video sResource parameter Id's */ + mCmpCount = 9, /*Video sResource parameter Id's */ + mCmpSize = 10, /*Video sResource parameter Id's */ + mPlaneBytes = 11, /*Video sResource parameter Id's */ + mVertRefRate = 14, /*Video sResource parameter Id's */ + mVidParams = 1, /*Video parameter block id.*/ + mTable = 2, /*Offset to the table.*/ + mPageCnt = 3, /*Number of pages*/ + mDevType = 4, /*Device Type*/ + oneBitMode = 128, /*Id of OneBitMode Parameter list.*/ + twoBitMode = 129, /*Id of TwoBitMode Parameter list.*/ + fourBitMode = 130, /*Id of FourBitMode Parameter list.*/ + eightBitMode = 131 /*Id of EightBitMode Parameter list.*/ +}; + +enum { + sixteenBitMode = 132, /*Id of SixteenBitMode Parameter list.*/ + thirtyTwoBitMode = 133, /*Id of ThirtyTwoBitMode Parameter list.*/ + firstVidMode = 128, /*The new, better way to do the above. */ + secondVidMode = 129, /* QuickDraw only supports six video */ + thirdVidMode = 130, /* at this time. */ + fourthVidMode = 131, + fifthVidMode = 132, + sixthVidMode = 133, + spGammaDir = 64, + spVidNamesDir = 65 +}; + + +/* csTimingFormat values in VDTimingInfo */ +/* look in the declaration rom for timing info */ +enum { + kDeclROMtables = FOUR_CHAR_CODE('decl'), + kDetailedTimingFormat = FOUR_CHAR_CODE('arba') /* Timing is a detailed timing*/ +}; + +/* Size of a block of EDID (Extended Display Identification Data) */ +enum { + kDDCBlockSize = 128 +}; + +/* ddcBlockType constants*/ +enum { + kDDCBlockTypeEDID = 0 /* EDID block type. */ +}; + +/* ddcFlags constants*/ +enum { + kDDCForceReadBit = 0, /* Force a new read of the EDID. */ + kDDCForceReadMask = (1 << kDDCForceReadBit) /* Mask for kddcForceReadBit. */ +}; + + +/* Timing mode constants for Display Manager MultiMode support + Corresponding .h equates are in Video.h + .a equates are in Video.a + .r equates are in DepVideoEqu.r + + The second enum is the old names (for compatibility). + The first enum is the new names. +*/ +enum { + timingInvalid = 0, /* Unknown timing... force user to confirm. */ + timingInvalid_SM_T24 = 8, /* Work around bug in SM Thunder24 card.*/ + timingApple_FixedRateLCD = 42, /* Lump all fixed-rate LCDs into one category.*/ + timingApple_512x384_60hz = 130, /* 512x384 (60 Hz) Rubik timing. */ + timingApple_560x384_60hz = 135, /* 560x384 (60 Hz) Rubik-560 timing. */ + timingApple_640x480_67hz = 140, /* 640x480 (67 Hz) HR timing. */ + timingApple_640x400_67hz = 145, /* 640x400 (67 Hz) HR-400 timing. */ + timingVESA_640x480_60hz = 150, /* 640x480 (60 Hz) VGA timing. */ + timingVESA_640x480_72hz = 152, /* 640x480 (72 Hz) VGA timing. */ + timingVESA_640x480_75hz = 154, /* 640x480 (75 Hz) VGA timing. */ + timingVESA_640x480_85hz = 158, /* 640x480 (85 Hz) VGA timing. */ + timingGTF_640x480_120hz = 159, /* 640x480 (120 Hz) VESA Generalized Timing Formula */ + timingApple_640x870_75hz = 160, /* 640x870 (75 Hz) FPD timing.*/ + timingApple_640x818_75hz = 165, /* 640x818 (75 Hz) FPD-818 timing.*/ + timingApple_832x624_75hz = 170, /* 832x624 (75 Hz) GoldFish timing.*/ + timingVESA_800x600_56hz = 180, /* 800x600 (56 Hz) SVGA timing. */ + timingVESA_800x600_60hz = 182, /* 800x600 (60 Hz) SVGA timing. */ + timingVESA_800x600_72hz = 184, /* 800x600 (72 Hz) SVGA timing. */ + timingVESA_800x600_75hz = 186, /* 800x600 (75 Hz) SVGA timing. */ + timingVESA_800x600_85hz = 188, /* 800x600 (85 Hz) SVGA timing. */ + timingVESA_1024x768_60hz = 190, /* 1024x768 (60 Hz) VESA 1K-60Hz timing. */ + timingVESA_1024x768_70hz = 200, /* 1024x768 (70 Hz) VESA 1K-70Hz timing. */ + timingVESA_1024x768_75hz = 204, /* 1024x768 (75 Hz) VESA 1K-75Hz timing (very similar to timingApple_1024x768_75hz). */ + timingVESA_1024x768_85hz = 208, /* 1024x768 (85 Hz) VESA timing. */ + timingApple_1024x768_75hz = 210, /* 1024x768 (75 Hz) Apple 19" RGB. */ + timingApple_1152x870_75hz = 220, /* 1152x870 (75 Hz) Apple 21" RGB. */ + timingAppleNTSC_ST = 230, /* 512x384 (60 Hz, interlaced, non-convolved). */ + timingAppleNTSC_FF = 232, /* 640x480 (60 Hz, interlaced, non-convolved). */ + timingAppleNTSC_STconv = 234, /* 512x384 (60 Hz, interlaced, convolved). */ + timingAppleNTSC_FFconv = 236, /* 640x480 (60 Hz, interlaced, convolved). */ + timingApplePAL_ST = 238, /* 640x480 (50 Hz, interlaced, non-convolved). */ + timingApplePAL_FF = 240, /* 768x576 (50 Hz, interlaced, non-convolved). */ + timingApplePAL_STconv = 242, /* 640x480 (50 Hz, interlaced, convolved). */ + timingApplePAL_FFconv = 244, /* 768x576 (50 Hz, interlaced, convolved). */ + timingVESA_1280x960_75hz = 250, /* 1280x960 (75 Hz) */ + timingVESA_1280x960_60hz = 252, /* 1280x960 (60 Hz) */ + timingVESA_1280x960_85hz = 254, /* 1280x960 (85 Hz) */ + timingVESA_1280x1024_60hz = 260, /* 1280x1024 (60 Hz) */ + timingVESA_1280x1024_75hz = 262, /* 1280x1024 (75 Hz) */ + timingVESA_1280x1024_85hz = 268, /* 1280x1024 (85 Hz) */ + timingVESA_1600x1200_60hz = 280, /* 1600x1200 (60 Hz) VESA timing. */ + timingVESA_1600x1200_65hz = 282, /* 1600x1200 (65 Hz) VESA timing. */ + timingVESA_1600x1200_70hz = 284, /* 1600x1200 (70 Hz) VESA timing. */ + timingVESA_1600x1200_75hz = 286, /* 1600x1200 (75 Hz) VESA timing (pixel clock is 189.2 Mhz dot clock). */ + timingVESA_1600x1200_80hz = 288, /* 1600x1200 (80 Hz) VESA timing (pixel clock is 216>? Mhz dot clock) - proposed only. */ + timingVESA_1600x1200_85hz = 289, /* 1600x1200 (85 Hz) VESA timing (pixel clock is 229.5 Mhz dot clock). */ + timingVESA_1792x1344_60hz = 296, /* 1792x1344 (60 Hz) VESA timing (204.75 Mhz dot clock). */ + timingVESA_1792x1344_75hz = 298, /* 1792x1344 (75 Hz) VESA timing (261.75 Mhz dot clock). */ + timingVESA_1856x1392_60hz = 300, /* 1856x1392 (60 Hz) VESA timing (218.25 Mhz dot clock). */ + timingVESA_1856x1392_75hz = 302, /* 1856x1392 (75 Hz) VESA timing (288 Mhz dot clock). */ + timingVESA_1920x1440_60hz = 304, /* 1920x1440 (60 Hz) VESA timing (234 Mhz dot clock). */ + timingVESA_1920x1440_75hz = 306, /* 1920x1440 (75 Hz) VESA timing (297 Mhz dot clock). */ + timingSMPTE240M_60hz = 400, /* 60Hz V, 33.75KHz H, interlaced timing, 16:9 aspect, typical resolution of 1920x1035. */ + timingFilmRate_48hz = 410, /* 48Hz V, 25.20KHz H, non-interlaced timing, typical resolution of 640x480. */ + timingSony_1600x1024_76hz = 500, /* 1600x1024 (76 Hz) Sony timing (pixel clock is 170.447 Mhz dot clock). */ + timingSony_1920x1080_60hz = 510, /* 1920x1080 (60 Hz) Sony timing (pixel clock is 159.84 Mhz dot clock). */ + timingSony_1920x1080_72hz = 520, /* 1920x1080 (72 Hz) Sony timing (pixel clock is 216.023 Mhz dot clock). */ + timingSony_1920x1200_76hz = 540, /* 1900x1200 (76 Hz) Sony timing (pixel clock is 243.20 Mhz dot clock). */ + timingApple_0x0_0hz_Offline = 550 /* Indicates that this timing will take the display off-line and remove it from the system. */ +}; + + +/* Deprecated timing names.*/ +enum { + timingApple12 = timingApple_512x384_60hz, + timingApple12x = timingApple_560x384_60hz, + timingApple13 = timingApple_640x480_67hz, + timingApple13x = timingApple_640x400_67hz, + timingAppleVGA = timingVESA_640x480_60hz, + timingApple15 = timingApple_640x870_75hz, + timingApple15x = timingApple_640x818_75hz, + timingApple16 = timingApple_832x624_75hz, + timingAppleSVGA = timingVESA_800x600_56hz, + timingApple1Ka = timingVESA_1024x768_60hz, + timingApple1Kb = timingVESA_1024x768_70hz, + timingApple19 = timingApple_1024x768_75hz, + timingApple21 = timingApple_1152x870_75hz, + timingSony_1900x1200_74hz = 530, /* 1900x1200 (74 Hz) Sony timing (pixel clock is 236.25 Mhz dot clock). */ + timingSony_1900x1200_76hz = timingSony_1920x1200_76hz /* 1900x1200 (76 Hz) Sony timing (pixel clock is 245.48 Mhz dot clock). */ +}; + +/* csConnectFlags values in VDDisplayConnectInfo */ +enum { + kAllModesValid = 0, /* All modes not trimmed by primary init are good close enough to try */ + kAllModesSafe = 1, /* All modes not trimmed by primary init are know to be safe */ + kReportsTagging = 2, /* Can detect tagged displays (to identify smart monitors) */ + kHasDirectConnection = 3, /* True implies that driver can talk directly to device (e.g. serial data link via sense lines) */ + kIsMonoDev = 4, /* Says whether there's an RGB (0) or Monochrome (1) connection. */ + kUncertainConnection = 5, /* There may not be a display (no sense lines?). */ + kTaggingInfoNonStandard = 6, /* Set when csConnectTaggedType/csConnectTaggedData are non-standard (i.e., not the Apple CRT sense codes). */ + kReportsDDCConnection = 7, /* Card can do ddc (set kHasDirectConnect && kHasDDCConnect if you actually found a ddc display). */ + kHasDDCConnection = 8, /* Card has ddc connect now. */ + kConnectionInactive = 9, /* Set when the connection is NOT currently active (generally used in a multiconnection environment). */ + kDependentConnection = 10, /* Set when some ascpect of THIS connection depends on another (will generally be set in a kModeSimulscan environment). */ + kBuiltInConnection = 11, /* Set when connection is KNOWN to be built-in (this is not the same as kHasDirectConnection). */ + kOverrideConnection = 12, /* Set when the reported connection is not the true one, but is one that has been forced through a SetConnection call */ + kFastCheckForDDC = 13, /* Set when all 3 are true: 1) sense codes indicate DDC display could be attached 2) attempted fast check 3) DDC failed */ + kReportsHotPlugging = 14 /* Detects and reports hot pluggging on connector (via VSL also implies DDC will be up to date w/o force read) */ +}; + + +/* csDisplayType values in VDDisplayConnectInfo */ +enum { + kUnknownConnect = 1, /* Not sure how we'll use this, but seems like a good idea. */ + kPanelConnect = 2, /* For use with fixed-in-place LCD panels. */ + kPanelTFTConnect = 2, /* Alias for kPanelConnect */ + kFixedModeCRTConnect = 3, /* For use with fixed-mode (i.e., very limited range) displays. */ + kMultiModeCRT1Connect = 4, /* 320x200 maybe, 12" maybe, 13" (default), 16" certain, 19" maybe, 21" maybe */ + kMultiModeCRT2Connect = 5, /* 320x200 maybe, 12" maybe, 13" certain, 16" (default), 19" certain, 21" maybe */ + kMultiModeCRT3Connect = 6, /* 320x200 maybe, 12" maybe, 13" certain, 16" certain, 19" default, 21" certain */ + kMultiModeCRT4Connect = 7, /* Expansion to large multi mode (not yet used) */ + kModelessConnect = 8, /* Expansion to modeless model (not yet used) */ + kFullPageConnect = 9, /* 640x818 (to get 8bpp in 512K case) and 640x870 (these two only) */ + kVGAConnect = 10, /* 640x480 VGA default -- question everything else */ + kNTSCConnect = 11, /* NTSC ST (default), FF, STconv, FFconv */ + kPALConnect = 12, /* PAL ST (default), FF, STconv, FFconv */ + kHRConnect = 13, /* Straight-6 connect -- 640x480 and 640x400 (to get 8bpp in 256K case) (these two only) */ + kPanelFSTNConnect = 14, /* For use with fixed-in-place LCD FSTN (aka "Supertwist") panels */ + kMonoTwoPageConnect = 15, /* 1152x870 Apple color two-page display */ + kColorTwoPageConnect = 16, /* 1152x870 Apple B&W two-page display */ + kColor16Connect = 17, /* 832x624 Apple B&W two-page display */ + kColor19Connect = 18, /* 1024x768 Apple B&W two-page display */ + kGenericCRT = 19, /* Indicates nothing except that connection is CRT in nature. */ + kGenericLCD = 20, /* Indicates nothing except that connection is LCD in nature. */ + kDDCConnect = 21, /* DDC connection, always set kHasDDCConnection */ + kNoConnect = 22 /* No display is connected - load sensing or similar level of hardware detection is assumed (used by resident drivers that support hot plugging when nothing is currently connected) */ +}; + +/* csTimingFlags values in VDTimingInfoRec */ +enum { + kModeValid = 0, /* Says that this mode should NOT be trimmed. */ + kModeSafe = 1, /* This mode does not need confirmation */ + kModeDefault = 2, /* This is the default mode for this type of connection */ + kModeShowNow = 3, /* This mode should always be shown (even though it may require a confirm) */ + kModeNotResize = 4, /* This mode should not be used to resize the display (eg. mode selects a different connector on card) */ + kModeRequiresPan = 5, /* This mode has more pixels than are actually displayed */ + kModeInterlaced = 6, /* This mode is interlaced (single pixel lines look bad). */ + kModeShowNever = 7, /* This mode should not be shown in the user interface. */ + kModeSimulscan = 8, /* Indicates that more than one display connection can be driven from a single framebuffer controller. */ + kModeNotPreset = 9, /* Indicates that the timing is not a factory preset for the current display (geometry may need correction) */ + kModeBuiltIn = 10, /* Indicates that the display mode is for the built-in connect only (on multiconnect devices like the PB 3400) Only the driver is quieried */ + kModeStretched = 11 /* Indicates that the display mode will be stretched/distorted to match the display aspect ratio */ +}; + +/* csDepthFlags in VDVideoParametersInfoRec */ +enum { + kDepthDependent = 0 /* Says that this depth mode may cause dependent changes in other framebuffers (and . */ +}; + +/* csResolutionFlags bit flags for VDResolutionInfoRec */ +enum { + kResolutionHasMultipleDepthSizes = 0 /* Says that this mode has different csHorizontalPixels, csVerticalLines at different depths (usually slightly larger at lower depths) */ +}; + + +enum { + /* Power Mode constants for VDPowerStateRec.powerState. Note the numeric order does not match the power state order */ + kAVPowerOff = 0, /* Power fully off*/ + kAVPowerStandby = 1, + kAVPowerSuspend = 2, + kAVPowerOn = 3, + kHardwareSleep = 128, + kHardwareWake = 129, + kHardwareWakeFromSuspend = 130, + kHardwareWakeToDoze = 131, + kHardwareWakeToDozeFromSuspend = 132 +}; + +enum { + /* Power Mode masks and bits for VDPowerStateRec.powerFlags. */ + kPowerStateNeedsRefresh = 0, /* When leaving this power mode, a display will need refreshing */ + kPowerStateSleepAwareBit = 1, /* if gestaltPCCardDockingSelectorFix, Docking mgr checks this bit before checking kPowerStateSleepAllowedBit */ + kPowerStateSleepForbiddenBit = 2, /* if kPowerStateSleepAwareBit, Docking mgr checks this bit before sleeping */ + kPowerStateSleepCanPowerOffBit = 3, /* supports power down sleep (ie PCI power off)*/ + kPowerStateSleepNoDPMSBit = 4, /* Bug #2425210. Do not use DPMS with this display.*/ + kPowerStateSleepWaketoDozeBit = 5, /* Supports Wake to Doze */ + kPowerStateNeedsRefreshMask = (1L << kPowerStateNeedsRefresh), + kPowerStateSleepAwareMask = (1L << kPowerStateSleepAwareBit), + kPowerStateSleepForbiddenMask = (1L << kPowerStateSleepForbiddenBit), + kPowerStateSleepCanPowerOffMask = (1L << kPowerStateSleepCanPowerOffBit), + kPowerStateSleepNoDPMSMask = (1L << kPowerStateSleepNoDPMSBit), + kPowerStateSleepWaketoDozeMask = (1L << kPowerStateSleepWaketoDozeBit) +}; + + +enum { + /* Control Codes */ + cscReset = 0, + cscKillIO = 1, + cscSetMode = 2, + cscSetEntries = 3, + cscSetGamma = 4, + cscGrayPage = 5, + cscGrayScreen = 5, + cscSetGray = 6, + cscSetInterrupt = 7, + cscDirectSetEntries = 8, + cscSetDefaultMode = 9, + cscSwitchMode = 10, + cscSetSync = 11, + cscSavePreferredConfiguration = 16, + cscSetHardwareCursor = 22, + cscDrawHardwareCursor = 23, + cscSetConvolution = 24, + cscSetPowerState = 25, + cscPrivateControlCall = 26, /* Takes a VDPrivateSelectorDataRec*/ + cscSetMultiConnect = 28, /* From a GDI point of view, this call should be implemented completely in the HAL and not at all in the core.*/ + cscSetClutBehavior = 29, /* Takes a VDClutBehavior */ + cscSetDetailedTiming = 31, /* Takes a VDDetailedTimingPtr */ + cscDoCommunication = 33, /* Takes a VDCommunicationPtr */ + cscUnusedCall = 127 /* This call used to expend the scrn resource. Its imbedded data contains more control info */ +}; + +enum { + /* Status Codes */ + cscGetMode = 2, + cscGetEntries = 3, + cscGetPageCnt = 4, + cscGetPages = 4, /* This is what C&D 2 calls it. */ + cscGetPageBase = 5, + cscGetBaseAddr = 5, /* This is what C&D 2 calls it. */ + cscGetGray = 6, + cscGetInterrupt = 7, + cscGetGamma = 8, + cscGetDefaultMode = 9, + cscGetCurMode = 10, + cscGetSync = 11, + cscGetConnection = 12, /* Return information about the connection to the display */ + cscGetModeTiming = 13, /* Return timing info for a mode */ + cscGetModeBaseAddress = 14, /* Return base address information about a particular mode */ + cscGetScanProc = 15, /* QuickTime scan chasing routine */ + cscGetPreferredConfiguration = 16, + cscGetNextResolution = 17, + cscGetVideoParameters = 18, + cscGetGammaInfoList = 20, + cscRetrieveGammaTable = 21, + cscSupportsHardwareCursor = 22, + cscGetHardwareCursorDrawState = 23, + cscGetConvolution = 24, + cscGetPowerState = 25, + cscPrivateStatusCall = 26, /* Takes a VDPrivateSelectorDataRec*/ + cscGetDDCBlock = 27, /* Takes a VDDDCBlockRec */ + cscGetMultiConnect = 28, /* From a GDI point of view, this call should be implemented completely in the HAL and not at all in the core.*/ + cscGetClutBehavior = 29, /* Takes a VDClutBehavior */ + cscGetTimingRanges = 30, /* Takes a VDDisplayTimingRangePtr */ + cscGetDetailedTiming = 31, /* Takes a VDDetailedTimingPtr */ + cscGetCommunicationInfo = 32 /* Takes a VDCommunicationInfoPtr */ +}; + +/* Bit definitions for the Get/Set Sync call*/ +enum { + kDisableHorizontalSyncBit = 0, + kDisableVerticalSyncBit = 1, + kDisableCompositeSyncBit = 2, + kEnableSyncOnBlue = 3, + kEnableSyncOnGreen = 4, + kEnableSyncOnRed = 5, + kNoSeparateSyncControlBit = 6, + kTriStateSyncBit = 7, + kHorizontalSyncMask = 0x01, + kVerticalSyncMask = 0x02, + kCompositeSyncMask = 0x04, + kDPMSSyncMask = 0x07, + kTriStateSyncMask = 0x80, + kSyncOnBlueMask = 0x08, + kSyncOnGreenMask = 0x10, + kSyncOnRedMask = 0x20, + kSyncOnMask = 0x38 +}; + +enum { + /* Power Mode constants for translating DPMS modes to Get/SetSync calls. */ + kDPMSSyncOn = 0, + kDPMSSyncStandby = 1, + kDPMSSyncSuspend = 2, + kDPMSSyncOff = 7 +}; + +/* Bit definitions for the Get/Set Convolution call*/ +enum { + kConvolved = 0, + kLiveVideoPassThru = 1, + kConvolvedMask = 0x01, + kLiveVideoPassThruMask = 0x02 +}; + + + +struct VPBlock { + long vpBaseOffset; /*Offset to page zero of video RAM (From minorBaseOS).*/ + short vpRowBytes; /*Width of each row of video memory.*/ + Rect vpBounds; /*BoundsRect for the video display (gives dimensions).*/ + short vpVersion; /*PixelMap version number.*/ + short vpPackType; + long vpPackSize; + long vpHRes; /*Horizontal resolution of the device (pixels per inch).*/ + long vpVRes; /*Vertical resolution of the device (pixels per inch).*/ + short vpPixelType; /*Defines the pixel type.*/ + short vpPixelSize; /*Number of bits in pixel.*/ + short vpCmpCount; /*Number of components in pixel.*/ + short vpCmpSize; /*Number of bits per component*/ + long vpPlaneBytes; /*Offset from one plane to the next.*/ +}; +typedef struct VPBlock VPBlock; +typedef VPBlock * VPBlockPtr; + +struct VDEntryRecord { + Ptr csTable; /*(long) pointer to color table entry=value, r,g,b:INTEGER*/ +}; +typedef struct VDEntryRecord VDEntryRecord; + +typedef VDEntryRecord * VDEntRecPtr; +/* Parm block for SetGray control call */ + +struct VDGrayRecord { + Boolean csMode; /*Same as GDDevType value (0=color, 1=mono)*/ + SInt8 filler; +}; +typedef struct VDGrayRecord VDGrayRecord; + +typedef VDGrayRecord * VDGrayPtr; +/* Parm block for SetInterrupt call */ + +struct VDFlagRecord { + SInt8 csMode; + SInt8 filler; +}; +typedef struct VDFlagRecord VDFlagRecord; + +typedef VDFlagRecord * VDFlagRecPtr; +/* Parm block for SetEntries control call */ + +struct VDSetEntryRecord { + ColorSpec * csTable; /*Pointer to an array of color specs*/ + short csStart; /*Which spec in array to start with, or -1*/ + short csCount; /*Number of color spec entries to set*/ +}; +typedef struct VDSetEntryRecord VDSetEntryRecord; + +typedef VDSetEntryRecord * VDSetEntryPtr; +/* Parm block for SetGamma control call */ + +struct VDGammaRecord { + Ptr csGTable; /*pointer to gamma table*/ +}; +typedef struct VDGammaRecord VDGammaRecord; + +typedef VDGammaRecord * VDGamRecPtr; + +struct VDBaseAddressInfoRec { + long csDevData; /* LONGINT - (long) timing mode */ + long csDevBase; /* LONGINT - (long) base address of the mode */ + short csModeReserved; /* INTEGER - (short) will some day be the depth */ + long csModeBase; /* LONGINT - (long) reserved */ +}; +typedef struct VDBaseAddressInfoRec VDBaseAddressInfoRec; + +typedef VDBaseAddressInfoRec * VDBaseAddressInfoPtr; + +struct VDSwitchInfoRec { + unsigned short csMode; /*(word) mode depth*/ + unsigned long csData; /*(long) functional sResource of mode*/ + unsigned short csPage; /*(word) page to switch in*/ + Ptr csBaseAddr; /*(long) base address of page (return value)*/ + unsigned long csReserved; /*(long) Reserved (set to 0) */ +}; +typedef struct VDSwitchInfoRec VDSwitchInfoRec; + +typedef VDSwitchInfoRec * VDSwitchInfoPtr; + +struct VDTimingInfoRec { + unsigned long csTimingMode; /* LONGINT - (long) timing mode (a la InitGDevice) */ + unsigned long csTimingReserved; /* LONGINT - (long) reserved */ + unsigned long csTimingFormat; /* LONGINT - (long) what format is the timing info */ + unsigned long csTimingData; /* LONGINT - (long) data supplied by driver */ + unsigned long csTimingFlags; /* LONGINT - (long) mode within device */ +}; +typedef struct VDTimingInfoRec VDTimingInfoRec; + +typedef VDTimingInfoRec * VDTimingInfoPtr; + +struct VDDisplayConnectInfoRec { + unsigned short csDisplayType; /* INTEGER - (word) Type of display connected */ + unsigned char csConnectTaggedType; /* BYTE - type of tagging */ + unsigned char csConnectTaggedData; /* BYTE - tagging data */ + unsigned long csConnectFlags; /* LONGINT - (long) tell us about the connection */ + unsigned long csDisplayComponent; /* LONGINT - (long) if the card has a direct connection to the display, it returns the display component here (FUTURE) */ + unsigned long csConnectReserved; /* LONGINT - (long) reserved */ +}; +typedef struct VDDisplayConnectInfoRec VDDisplayConnectInfoRec; + +typedef VDDisplayConnectInfoRec * VDDisplayConnectInfoPtr; + +struct VDMultiConnectInfoRec { + unsigned long csDisplayCountOrNumber; /* For GetMultiConnect, returns count n of 1..n connections; otherwise, indicates the ith connection.*/ + VDDisplayConnectInfoRec csConnectInfo; /* Standard VDDisplayConnectionInfo for connection i.*/ +}; +typedef struct VDMultiConnectInfoRec VDMultiConnectInfoRec; + +typedef VDMultiConnectInfoRec * VDMultiConnectInfoPtr; +/* RawSenseCode + This abstract data type is not exactly abstract. Rather, it is merely enumerated constants + for the possible raw sense code values when 'standard' sense code hardware is implemented. + + For 'standard' sense code hardware, the raw sense is obtained as follows: + o Instruct the frame buffer controller NOT to actively drive any of the monitor sense lines + o Read the state of the monitor sense lines 2, 1, and 0. (2 is the MSB, 0 the LSB) + + IMPORTANT Note: + When the 'kTaggingInfoNonStandard' bit of 'csConnectFlags' is FALSE, then these constants + are valid 'csConnectTaggedType' values in 'VDDisplayConnectInfo' + +*/ +typedef unsigned char RawSenseCode; +enum { + kRSCZero = 0, + kRSCOne = 1, + kRSCTwo = 2, + kRSCThree = 3, + kRSCFour = 4, + kRSCFive = 5, + kRSCSix = 6, + kRSCSeven = 7 +}; + + +/* ExtendedSenseCode + This abstract data type is not exactly abstract. Rather, it is merely enumerated constants + for the values which are possible when the extended sense algorithm is applied to hardware + which implements 'standard' sense code hardware. + + For 'standard' sense code hardware, the extended sense code algorithm is as follows: + (Note: as described here, sense line 'A' corresponds to '2', 'B' to '1', and 'C' to '0') + o Drive sense line 'A' low and read the values of 'B' and 'C'. + o Drive sense line 'B' low and read the values of 'A' and 'C'. + o Drive sense line 'C' low and read the values of 'A' and 'B'. + + In this way, a six-bit number of the form BC/AC/AB is generated. + + IMPORTANT Note: + When the 'kTaggingInfoNonStandard' bit of 'csConnectFlags' is FALSE, then these constants + are valid 'csConnectTaggedData' values in 'VDDisplayConnectInfo' + +*/ +typedef unsigned char ExtendedSenseCode; +enum { + kESCZero21Inch = 0x00, /* 21" RGB */ + kESCOnePortraitMono = 0x14, /* Portrait Monochrome */ + kESCTwo12Inch = 0x21, /* 12" RGB */ + kESCThree21InchRadius = 0x31, /* 21" RGB (Radius) */ + kESCThree21InchMonoRadius = 0x34, /* 21" Monochrome (Radius) */ + kESCThree21InchMono = 0x35, /* 21" Monochrome */ + kESCFourNTSC = 0x0A, /* NTSC */ + kESCFivePortrait = 0x1E, /* Portrait RGB */ + kESCSixMSB1 = 0x03, /* MultiScan Band-1 (12" thru 1Six") */ + kESCSixMSB2 = 0x0B, /* MultiScan Band-2 (13" thru 19") */ + kESCSixMSB3 = 0x23, /* MultiScan Band-3 (13" thru 21") */ + kESCSixStandard = 0x2B, /* 13"/14" RGB or 12" Monochrome */ + kESCSevenPAL = 0x00, /* PAL */ + kESCSevenNTSC = 0x14, /* NTSC */ + kESCSevenVGA = 0x17, /* VGA */ + kESCSeven16Inch = 0x2D, /* 16" RGB (GoldFish) */ + kESCSevenPALAlternate = 0x30, /* PAL (Alternate) */ + kESCSeven19Inch = 0x3A, /* Third-Party 19" */ + kESCSevenDDC = 0x3E, /* DDC display */ + kESCSevenNoDisplay = 0x3F /* No display connected */ +}; + +/* DepthMode + This abstract data type is used to to reference RELATIVE pixel depths. + Its definition is largely derived from its past usage, analogous to 'xxxVidMode' + + Bits per pixel DOES NOT directly map to 'DepthMode' For example, on some + graphics hardware, 'kDepthMode1' may represent 1 BPP, whereas on other + hardware, 'kDepthMode1' may represent 8BPP. + + DepthMode IS considered to be ordinal, i.e., operations such as <, >, ==, etc. + behave as expected. The values of the constants which comprise the set are such + that 'kDepthMode4 < kDepthMode6' behaves as expected. +*/ +typedef unsigned short DepthMode; +enum { + kDepthMode1 = 128, + kDepthMode2 = 129, + kDepthMode3 = 130, + kDepthMode4 = 131, + kDepthMode5 = 132, + kDepthMode6 = 133 +}; + +enum { + kFirstDepthMode = 128, /* These constants are obsolete, and just included */ + kSecondDepthMode = 129, /* for clients that have converted to the above */ + kThirdDepthMode = 130, /* kDepthModeXXX constants. */ + kFourthDepthMode = 131, + kFifthDepthMode = 132, + kSixthDepthMode = 133 +}; + + + +struct VDPageInfo { + short csMode; /*(word) mode within device*/ + long csData; /*(long) data supplied by driver*/ + short csPage; /*(word) page to switch in*/ + Ptr csBaseAddr; /*(long) base address of page*/ +}; +typedef struct VDPageInfo VDPageInfo; + +typedef VDPageInfo * VDPgInfoPtr; + +struct VDSizeInfo { + short csHSize; /*(word) desired/returned h size*/ + short csHPos; /*(word) desired/returned h position*/ + short csVSize; /*(word) desired/returned v size*/ + short csVPos; /*(word) desired/returned v position*/ +}; +typedef struct VDSizeInfo VDSizeInfo; + +typedef VDSizeInfo * VDSzInfoPtr; + +struct VDSettings { + short csParamCnt; /*(word) number of params*/ + short csBrightMax; /*(word) max brightness*/ + short csBrightDef; /*(word) default brightness*/ + short csBrightVal; /*(word) current brightness*/ + short csCntrstMax; /*(word) max contrast*/ + short csCntrstDef; /*(word) default contrast*/ + short csCntrstVal; /*(word) current contrast*/ + short csTintMax; /*(word) max tint*/ + short csTintDef; /*(word) default tint*/ + short csTintVal; /*(word) current tint*/ + short csHueMax; /*(word) max hue*/ + short csHueDef; /*(word) default hue*/ + short csHueVal; /*(word) current hue*/ + short csHorizDef; /*(word) default horizontal*/ + short csHorizVal; /*(word) current horizontal*/ + short csHorizMax; /*(word) max horizontal*/ + short csVertDef; /*(word) default vertical*/ + short csVertVal; /*(word) current vertical*/ + short csVertMax; /*(word) max vertical*/ +}; +typedef struct VDSettings VDSettings; +typedef VDSettings * VDSettingsPtr; + +struct VDDefMode { + UInt8 csID; + SInt8 filler; +}; +typedef struct VDDefMode VDDefMode; +typedef VDDefMode * VDDefModePtr; + +struct VDSyncInfoRec { + UInt8 csMode; + UInt8 csFlags; +}; +typedef struct VDSyncInfoRec VDSyncInfoRec; + +typedef VDSyncInfoRec * VDSyncInfoPtr; +typedef UInt32 AVIDType; +typedef AVIDType DisplayIDType; +typedef UInt32 DisplayModeID; +typedef UInt32 VideoDeviceType; +typedef UInt32 GammaTableID; +/* + All displayModeID values from 0x80000000 to 0xFFFFFFFF and 0x00 + are reserved for Apple Computer. +*/ +/* Constants for the cscGetNextResolution call */ +enum { + kDisplayModeIDCurrent = 0x00, /* Reference the Current DisplayModeID */ + kDisplayModeIDInvalid = (long)0xFFFFFFFF, /* A bogus DisplayModeID in all cases */ + kDisplayModeIDFindFirstResolution = (long)0xFFFFFFFE, /* Used in cscGetNextResolution to reset iterator */ + kDisplayModeIDNoMoreResolutions = (long)0xFFFFFFFD, /* Used in cscGetNextResolution to indicate End Of List */ + kDisplayModeIDFindFirstProgrammable = (long)0xFFFFFFFC, /* Used in cscGetNextResolution to find unused programmable timing */ + kDisplayModeIDBootProgrammable = (long)0xFFFFFFFB, /* This is the ID given at boot time by the OF driver to a programmable timing */ + kDisplayModeIDReservedBase = (long)0x80000000 /* Lowest (unsigned) DisplayModeID reserved by Apple */ +}; + +/* Constants for the GetGammaInfoList call */ +enum { + kGammaTableIDFindFirst = (long)0xFFFFFFFE, /* Get the first gamma table ID */ + kGammaTableIDNoMoreTables = (long)0xFFFFFFFD, /* Used to indicate end of list */ + kGammaTableIDSpecific = 0x00 /* Return the info for the given table id */ +}; + +/* Constants for GetMultiConnect call*/ +enum { + kGetConnectionCount = (long)0xFFFFFFFF, /* Used to get the number of possible connections in a "multi-headed" framebuffer environment.*/ + kActivateConnection = (0 << kConnectionInactive), /* Used for activating a connection (csConnectFlags value).*/ + kDeactivateConnection = (1 << kConnectionInactive) /* Used for deactivating a connection (csConnectFlags value.)*/ +}; + +/* VDCommunicationRec.csBusID values*/ +enum { + kVideoDefaultBus = 0 +}; + + +/* VDCommunicationInfoRec.csBusType values*/ +enum { + kVideoBusTypeInvalid = 0, + kVideoBusTypeI2C = 1 +}; + + +/* VDCommunicationRec.csSendType and VDCommunicationRec.csReplyType values*/ +enum { + kVideoNoTransactionType = 0, /* No transaction*/ + kVideoSimpleI2CType = 1, /* Simple I2C message*/ + kVideoDDCciReplyType = 2 /* DDC/ci message (with imbedded length)*/ +}; + + + + +struct VDResolutionInfoRec { + DisplayModeID csPreviousDisplayModeID; /* ID of the previous resolution in a chain */ + DisplayModeID csDisplayModeID; /* ID of the next resolution */ + unsigned long csHorizontalPixels; /* # of pixels in a horizontal line at the max depth */ + unsigned long csVerticalLines; /* # of lines in a screen at the max depth */ + Fixed csRefreshRate; /* Vertical Refresh Rate in Hz */ + DepthMode csMaxDepthMode; /* 0x80-based number representing max bit depth */ + unsigned long csResolutionFlags; /* Reserved - flag bits */ + unsigned long csReserved; /* Reserved */ +}; +typedef struct VDResolutionInfoRec VDResolutionInfoRec; + +typedef VDResolutionInfoRec * VDResolutionInfoPtr; + +struct VDVideoParametersInfoRec { + DisplayModeID csDisplayModeID; /* the ID of the resolution we want info on */ + DepthMode csDepthMode; /* The bit depth we want the info on (0x80 based) */ + VPBlockPtr csVPBlockPtr; /* Pointer to a video parameter block */ + unsigned long csPageCount; /* Number of pages supported by the resolution */ + VideoDeviceType csDeviceType; /* Device Type: Direct, Fixed or CLUT; */ + UInt32 csDepthFlags; /* Flags */ +}; +typedef struct VDVideoParametersInfoRec VDVideoParametersInfoRec; + +typedef VDVideoParametersInfoRec * VDVideoParametersInfoPtr; + +struct VDGammaInfoRec { + GammaTableID csLastGammaID; /* the ID of the previous gamma table */ + GammaTableID csNextGammaID; /* the ID of the next gamma table */ + Ptr csGammaPtr; /* Ptr to a gamma table data */ + unsigned long csReserved; /* Reserved */ +}; +typedef struct VDGammaInfoRec VDGammaInfoRec; + +typedef VDGammaInfoRec * VDGammaInfoPtr; + +struct VDGetGammaListRec { + GammaTableID csPreviousGammaTableID; /* ID of the previous gamma table */ + GammaTableID csGammaTableID; /* ID of the gamma table following csPreviousDisplayModeID */ + unsigned long csGammaTableSize; /* Size of the gamma table in bytes */ + char * csGammaTableName; /* Gamma table name (c-string) */ +}; +typedef struct VDGetGammaListRec VDGetGammaListRec; + +typedef VDGetGammaListRec * VDGetGammaListPtr; + +struct VDRetrieveGammaRec { + GammaTableID csGammaTableID; /* ID of gamma table to retrieve */ + GammaTbl * csGammaTablePtr; /* Location to copy desired gamma to */ +}; +typedef struct VDRetrieveGammaRec VDRetrieveGammaRec; + +typedef VDRetrieveGammaRec * VDRetrieveGammaPtr; + +struct VDSetHardwareCursorRec { + void * csCursorRef; /* reference to cursor data */ + UInt32 csReserved1; /* reserved for future use */ + UInt32 csReserved2; /* should be ignored */ +}; +typedef struct VDSetHardwareCursorRec VDSetHardwareCursorRec; + +typedef VDSetHardwareCursorRec * VDSetHardwareCursorPtr; + +struct VDDrawHardwareCursorRec { + SInt32 csCursorX; /* x coordinate */ + SInt32 csCursorY; /* y coordinate */ + UInt32 csCursorVisible; /* true if cursor is must be visible */ + UInt32 csReserved1; /* reserved for future use */ + UInt32 csReserved2; /* should be ignored */ +}; +typedef struct VDDrawHardwareCursorRec VDDrawHardwareCursorRec; + +typedef VDDrawHardwareCursorRec * VDDrawHardwareCursorPtr; + +struct VDSupportsHardwareCursorRec { + UInt32 csSupportsHardwareCursor; + /* true if hardware cursor is supported */ + UInt32 csReserved1; /* reserved for future use */ + UInt32 csReserved2; /* must be zero */ +}; +typedef struct VDSupportsHardwareCursorRec VDSupportsHardwareCursorRec; + +typedef VDSupportsHardwareCursorRec * VDSupportsHardwareCursorPtr; + +struct VDHardwareCursorDrawStateRec { + SInt32 csCursorX; /* x coordinate */ + SInt32 csCursorY; /* y coordinate */ + UInt32 csCursorVisible; /* true if cursor is visible */ + UInt32 csCursorSet; /* true if cursor successfully set by last set control call */ + UInt32 csReserved1; /* reserved for future use */ + UInt32 csReserved2; /* must be zero */ +}; +typedef struct VDHardwareCursorDrawStateRec VDHardwareCursorDrawStateRec; + +typedef VDHardwareCursorDrawStateRec * VDHardwareCursorDrawStatePtr; + +struct VDConvolutionInfoRec { + DisplayModeID csDisplayModeID; /* the ID of the resolution we want info on */ + DepthMode csDepthMode; /* The bit depth we want the info on (0x80 based) */ + unsigned long csPage; + UInt32 csFlags; + UInt32 csReserved; +}; +typedef struct VDConvolutionInfoRec VDConvolutionInfoRec; + +typedef VDConvolutionInfoRec * VDConvolutionInfoPtr; + +struct VDPowerStateRec { + unsigned long powerState; + unsigned long powerFlags; + + unsigned long powerReserved1; + unsigned long powerReserved2; +}; +typedef struct VDPowerStateRec VDPowerStateRec; + +typedef VDPowerStateRec * VDPowerStatePtr; +/* + Private Data to video drivers. + + In versions of MacOS with multiple address spaces (System 8), the OS + must know the extent of parameters in order to move them between the caller + and driver. The old private-selector model for video drivers does not have + this information so: + + For post-7.x Systems private calls should be implemented using the cscPrivateCall +*/ + +struct VDPrivateSelectorDataRec { + LogicalAddress privateParameters; /* Caller's parameters*/ + ByteCount privateParametersSize; /* Size of data sent from caller to driver*/ + LogicalAddress privateResults; /* Caller's return area. Can be nil, or same as privateParameters.*/ + ByteCount privateResultsSize; /* Size of data driver returns to caller. Can be nil, or same as privateParametersSize.*/ +}; +typedef struct VDPrivateSelectorDataRec VDPrivateSelectorDataRec; + + +struct VDPrivateSelectorRec { + UInt32 reserved; /* Reserved (set to 0). */ + VDPrivateSelectorDataRec data[1]; +}; +typedef struct VDPrivateSelectorRec VDPrivateSelectorRec; + +struct VDDDCBlockRec { + UInt32 ddcBlockNumber; /* Input -- DDC EDID (Extended Display Identification Data) number (1-based) */ + ResType ddcBlockType; /* Input -- DDC block type (EDID/VDIF) */ + UInt32 ddcFlags; /* Input -- DDC Flags*/ + UInt32 ddcReserved; /* Reserved */ + Byte ddcBlockData[128]; /* Output -- DDC EDID/VDIF data (kDDCBlockSize) */ +}; +typedef struct VDDDCBlockRec VDDDCBlockRec; + +typedef VDDDCBlockRec * VDDDCBlockPtr; + +enum { + /* timingSyncConfiguration*/ + kSyncInterlaceMask = (1 << 7), + kSyncAnalogCompositeMask = 0, + kSyncAnalogCompositeSerrateMask = (1 << 2), + kSyncAnalogCompositeRGBSyncMask = (1 << 1), + kSyncAnalogBipolarMask = (1 << 3), + kSyncAnalogBipolarSerrateMask = (1 << 2), + kSyncAnalogBipolarSRGBSyncMask = (1 << 1), + kSyncDigitalCompositeMask = (1 << 4), + kSyncDigitalCompositeSerrateMask = (1 << 2), + kSyncDigitalCompositeMatchHSyncMask = (1 << 2), + kSyncDigitalSeperateMask = (1 << 4) + (1 << 3), + kSyncDigitalVSyncPositiveMask = (1 << 2), + kSyncDigitalHSyncPositiveMask = (1 << 1) +}; + + + + +struct VDDisplayTimingRangeRec { + UInt32 csRangeSize; /* Init to sizeof(VDDisplayTimingRangeRec) */ + UInt32 csRangeType; /* Init to 0 */ + UInt32 csRangeVersion; /* Init to 0 */ + UInt32 csRangeReserved; /* Init to 0 */ + + UInt32 csRangeBlockIndex; /* Requested block (first index is 0)*/ + UInt32 csRangeGroup; /* set to 0 */ + UInt32 csRangeBlockCount; /* # blocks */ + UInt32 csRangeFlags; /* dependent video */ + + UInt64 csMinPixelClock; /* Min dot clock in Hz */ + UInt64 csMaxPixelClock; /* Max dot clock in Hz */ + + UInt32 csMaxPixelError; /* Max dot clock error */ + UInt32 csTimingRangeSyncFlags; + UInt32 csTimingRangeSignalLevels; + UInt32 csReserved0; + + UInt32 csMinFrameRate; /* Hz */ + UInt32 csMaxFrameRate; /* Hz */ + UInt32 csMinLineRate; /* Hz */ + UInt32 csMaxLineRate; /* Hz */ + + + UInt32 csMaxHorizontalTotal; /* Clocks - Maximum total (active + blanking) */ + UInt32 csMaxVerticalTotal; /* Clocks - Maximum total (active + blanking) */ + UInt32 csMaxTotalReserved1; /* Reserved */ + UInt32 csMaxTotalReserved2; /* Reserved */ + + + + /* Some cards require that some timing elements*/ + /* be multiples of a "character size" (often 8*/ + /* clocks). The "xxxxCharSize" fields document*/ + /* those requirements.*/ + + + UInt8 csCharSizeHorizontalActive; /* Character size */ + UInt8 csCharSizeHorizontalBlanking; /* Character size */ + UInt8 csCharSizeHorizontalSyncOffset; /* Character size */ + UInt8 csCharSizeHorizontalSyncPulse; /* Character size */ + + UInt8 csCharSizeVerticalActive; /* Character size */ + UInt8 csCharSizeVerticalBlanking; /* Character size */ + UInt8 csCharSizeVerticalSyncOffset; /* Character size */ + UInt8 csCharSizeVerticalSyncPulse; /* Character size */ + + UInt8 csCharSizeHorizontalBorderLeft; /* Character size */ + UInt8 csCharSizeHorizontalBorderRight; /* Character size */ + UInt8 csCharSizeVerticalBorderTop; /* Character size */ + UInt8 csCharSizeVerticalBorderBottom; /* Character size */ + + UInt8 csCharSizeHorizontalTotal; /* Character size for active + blanking */ + UInt8 csCharSizeVerticalTotal; /* Character size for active + blanking */ + UInt16 csCharSizeReserved1; /* Reserved (Init to 0) */ + + + UInt32 csMinHorizontalActiveClocks; + UInt32 csMaxHorizontalActiveClocks; + UInt32 csMinHorizontalBlankingClocks; + UInt32 csMaxHorizontalBlankingClocks; + + UInt32 csMinHorizontalSyncOffsetClocks; + UInt32 csMaxHorizontalSyncOffsetClocks; + UInt32 csMinHorizontalPulseWidthClocks; + UInt32 csMaxHorizontalPulseWidthClocks; + + UInt32 csMinVerticalActiveClocks; + UInt32 csMaxVerticalActiveClocks; + UInt32 csMinVerticalBlankingClocks; + UInt32 csMaxVerticalBlankingClocks; + + UInt32 csMinVerticalSyncOffsetClocks; + UInt32 csMaxVerticalSyncOffsetClocks; + UInt32 csMinVerticalPulseWidthClocks; + UInt32 csMaxVerticalPulseWidthClocks; + + + UInt32 csMinHorizontalBorderLeft; + UInt32 csMaxHorizontalBorderLeft; + UInt32 csMinHorizontalBorderRight; + UInt32 csMaxHorizontalBorderRight; + + UInt32 csMinVerticalBorderTop; + UInt32 csMaxVerticalBorderTop; + UInt32 csMinVerticalBorderBottom; + UInt32 csMaxVerticalBorderBottom; + + UInt32 csReserved1; /* Reserved (Init to 0)*/ + UInt32 csReserved2; /* Reserved (Init to 0)*/ + UInt32 csReserved3; /* Reserved (Init to 0)*/ + UInt32 csReserved4; /* Reserved (Init to 0)*/ + + UInt32 csReserved5; /* Reserved (Init to 0)*/ + UInt32 csReserved6; /* Reserved (Init to 0)*/ + UInt32 csReserved7; /* Reserved (Init to 0)*/ + UInt32 csReserved8; /* Reserved (Init to 0)*/ +}; +typedef struct VDDisplayTimingRangeRec VDDisplayTimingRangeRec; + +typedef VDDisplayTimingRangeRec * VDDisplayTimingRangePtr; + +enum { + /* csDisplayModeState*/ + kDMSModeReady = 0, /* Display Mode ID is configured and ready*/ + kDMSModeNotReady = 1, /* Display Mode ID is is being programmed*/ + kDMSModeFree = 2 /* Display Mode ID is not associated with a timing*/ +}; + + +/* Video driver Errors -10930 to -10959 */ +enum { + kTimingChangeRestrictedErr = -10930, + kVideoI2CReplyPendingErr = -10931, + kVideoI2CTransactionErr = -10932, + kVideoI2CBusyErr = -10933, + kVideoI2CTransactionTypeErr = -10934, + kVideoBufferSizeErr = -10935 +}; + + +enum { + /* csTimingRangeSignalLevels*/ + kRangeSupportsSignal_0700_0300_Bit = 0, + kRangeSupportsSignal_0714_0286_Bit = 1, + kRangeSupportsSignal_1000_0400_Bit = 2, + kRangeSupportsSignal_0700_0000_Bit = 3, + kRangeSupportsSignal_0700_0300_Mask = (1 << kRangeSupportsSignal_0700_0300_Bit), + kRangeSupportsSignal_0714_0286_Mask = (1 << kRangeSupportsSignal_0714_0286_Bit), + kRangeSupportsSignal_1000_0400_Mask = (1 << kRangeSupportsSignal_1000_0400_Bit), + kRangeSupportsSignal_0700_0000_Mask = (1 << kRangeSupportsSignal_0700_0000_Bit) +}; + + +enum { + /* csSignalConfig*/ + kDigitalSignalBit = 0, /* Do not set. Mac OS does not currently support arbitrary digital timings*/ + kAnalogSetupExpectedBit = 1, /* Analog displays - display expects a blank-to-black setup or pedestal. See VESA signal standards.*/ + kDigitalSignalMask = (1 << kDigitalSignalBit), + kAnalogSetupExpectedMask = (1 << kAnalogSetupExpectedBit) +}; + + +enum { + /* csSignalLevels for analog*/ + kAnalogSignalLevel_0700_0300 = 0, + kAnalogSignalLevel_0714_0286 = 1, + kAnalogSignalLevel_1000_0400 = 2, + kAnalogSignalLevel_0700_0000 = 3 +}; + + +enum { + /* csTimingRangeSyncFlags*/ + kRangeSupportsSeperateSyncsBit = 0, + kRangeSupportsSyncOnGreenBit = 1, + kRangeSupportsCompositeSyncBit = 2, + kRangeSupportsVSyncSerrationBit = 3, + kRangeSupportsSeperateSyncsMask = (1 << kRangeSupportsSeperateSyncsBit), + kRangeSupportsSyncOnGreenMask = (1 << kRangeSupportsSyncOnGreenBit), + kRangeSupportsCompositeSyncMask = (1 << kRangeSupportsCompositeSyncBit), + kRangeSupportsVSyncSerrationMask = (1 << kRangeSupportsVSyncSerrationBit) +}; + + + +enum { + /* csHorizontalSyncConfig and csVerticalSyncConfig*/ + kSyncPositivePolarityBit = 0, /* Digital separate sync polarity for analog interfaces (0 => negative polarity)*/ + kSyncPositivePolarityMask = (1 << kSyncPositivePolarityBit) +}; + + + + +/* For timings with kDetailedTimingFormat.*/ + +struct VDDetailedTimingRec { + UInt32 csTimingSize; /* Init to sizeof(VDDetailedTimingRec)*/ + UInt32 csTimingType; /* Init to 0*/ + UInt32 csTimingVersion; /* Init to 0*/ + UInt32 csTimingReserved; /* Init to 0*/ + + DisplayModeID csDisplayModeID; /* Init to 0*/ + UInt32 csDisplayModeSeed; /* */ + UInt32 csDisplayModeState; /* Display Mode state*/ + UInt32 csDisplayModeAlias; /* Mode to use when programmed.*/ + + UInt32 csSignalConfig; + UInt32 csSignalLevels; + + UInt64 csPixelClock; /* Hz*/ + + UInt64 csMinPixelClock; /* Hz - With error what is slowest actual clock */ + UInt64 csMaxPixelClock; /* Hz - With error what is fasted actual clock */ + + + UInt32 csHorizontalActive; /* Pixels*/ + UInt32 csHorizontalBlanking; /* Pixels*/ + UInt32 csHorizontalSyncOffset; /* Pixels*/ + UInt32 csHorizontalSyncPulseWidth; /* Pixels*/ + + UInt32 csVerticalActive; /* Lines*/ + UInt32 csVerticalBlanking; /* Lines*/ + UInt32 csVerticalSyncOffset; /* Lines*/ + UInt32 csVerticalSyncPulseWidth; /* Lines*/ + + UInt32 csHorizontalBorderLeft; /* Pixels*/ + UInt32 csHorizontalBorderRight; /* Pixels*/ + UInt32 csVerticalBorderTop; /* Lines*/ + UInt32 csVerticalBorderBottom; /* Lines*/ + + UInt32 csHorizontalSyncConfig; + UInt32 csHorizontalSyncLevel; /* Future use (init to 0)*/ + UInt32 csVerticalSyncConfig; + UInt32 csVerticalSyncLevel; /* Future use (init to 0)*/ + + UInt32 csReserved1; /* Init to 0*/ + UInt32 csReserved2; /* Init to 0*/ + UInt32 csReserved3; /* Init to 0*/ + UInt32 csReserved4; /* Init to 0*/ + + UInt32 csReserved5; /* Init to 0*/ + UInt32 csReserved6; /* Init to 0*/ + UInt32 csReserved7; /* Init to 0*/ + UInt32 csReserved8; /* Init to 0*/ +}; +typedef struct VDDetailedTimingRec VDDetailedTimingRec; + +typedef VDDetailedTimingRec * VDDetailedTimingPtr; +typedef UInt32 VDClutBehavior; +typedef VDClutBehavior * VDClutBehaviorPtr; +enum { + kSetClutAtSetEntries = 0, /* SetEntries behavior is to update clut during SetEntries call*/ + kSetClutAtVBL = 1 /* SetEntries behavior is to upate clut at next vbl*/ +}; + + + +struct VDCommunicationRec { + SInt32 csBusID; /* kVideoDefaultBus for single headed cards.*/ + UInt32 csReserved0; /* Always zero*/ + UInt32 csReserved1; /* Always zero*/ + UInt32 csReserved2; /* Always zero*/ + + UInt32 csSendAddress; /* Usually I2C address (eg 0x6E)*/ + UInt32 csSendType; /* See kVideoSimpleI2CType etc.*/ + LogicalAddress csSendBuffer; /* Pointer to the send buffer*/ + ByteCount csSendSize; /* Number of bytes to send*/ + + UInt32 csReplyAddress; /* Address from which to read (eg 0x6F for kVideoDDCciReplyType I2C address)*/ + UInt32 csReplyType; /* See kVideoDDCciReplyType etc.*/ + LogicalAddress csReplyBuffer; /* Pointer to the reply buffer*/ + ByteCount csReplySize; /* Max bytes to reply (size of csReplyBuffer)*/ + + UInt32 csReserved3; + UInt32 csReserved4; + UInt32 csReserved5; /* Always zero*/ + UInt32 csReserved6; /* Always zero*/ +}; +typedef struct VDCommunicationRec VDCommunicationRec; + +typedef VDCommunicationRec * VDCommunicationPtr; + +struct VDCommunicationInfoRec { + SInt32 csBusID; /* kVideoDefaultBus for single headed cards. */ + UInt32 csBusType; /* See kVideoBusI2C etc.*/ + SInt32 csMinBus; /* Minimum bus (usually kVideoDefaultBus). Used to probe additional busses*/ + SInt32 csMaxBus; /* Max bus (usually kVideoDefaultBus). Used to probe additional busses*/ + + UInt32 csSupportedTypes; /* Bit field for first 32 supported transaction types. Eg. 0x07 => support for kVideoNoTransactionType, kVideoSimpleI2CType and kVideoDDCciReplyType.*/ + UInt32 csReserved1; /* Always zero*/ + UInt32 csReserved2; /* Always zero*/ + UInt32 csReserved3; /* Always zero*/ + + UInt32 csReserved4; /* Always zero*/ + UInt32 csReserved5; /* Always zero*/ + UInt32 csReserved6; /* Always zero*/ + UInt32 csReserved7; /* Always zero*/ +}; +typedef struct VDCommunicationInfoRec VDCommunicationInfoRec; + +typedef VDCommunicationInfoRec * VDCommunicationInfoPtr; + +#if PRAGMA_STRUCT_ALIGN + #pragma options align=reset +#elif PRAGMA_STRUCT_PACKPUSH + #pragma pack(pop) +#elif PRAGMA_STRUCT_PACK + #pragma pack() +#endif + +#ifdef PRAGMA_IMPORT_OFF +#pragma import off +#elif PRAGMA_IMPORT +#pragma import reset +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __IOMACOSVIDEO__ */ + diff --git a/iokit/IOKit/ndrvsupport/IONDRVFramebuffer.h b/iokit/IOKit/ndrvsupport/IONDRVFramebuffer.h new file mode 100644 index 000000000..846c3eedc --- /dev/null +++ b/iokit/IOKit/ndrvsupport/IONDRVFramebuffer.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in. + * sdouglas 24 Jul 98 - start IOKit. + * sdouglas 15 Dec 98 - cpp. + * + */ + +#ifndef _IOKIT_IONDRVFRAMEBUFFER_H +#define _IOKIT_IONDRVFRAMEBUFFER_H + +#include +#include +#include + + +#define kIONDRVDisableKey "AAPL,disable-ndrv" + + +class IONDRVFramebuffer : public IOFramebuffer +{ + OSDeclareDefaultStructors(IONDRVFramebuffer) + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * reserved; + +protected: + + IOService * nub; + class IONDRV * ndrv; + + // current configuration + IODisplayModeID currentDisplayMode; + IOIndex currentDepth; + IOIndex currentPage; + bool directMode; + + IOPhysicalAddress physicalFramebuffer; + IODeviceMemory * vramRange; + + UInt8 startAt8; + UInt8 grayMode; + UInt8 lastGrayMode; + VDClutBehavior lastClutSetting; + UInt8 platformDeepSleep; + + bool consoleDevice; + UInt32 powerState; + UInt32 ndrvState; + SInt32 ndrvEnter; + OSArray * detailedTimings; + UInt32 detailedTimingsSeed; + UInt32 * detailedTimingsCurrent; + + IODeviceMemory * vramMemory; + + VDResolutionInfoRec cachedVDResolution; + + struct _VSLService * vslServices; + + UInt32 accessFlags; + + UInt32 __reserved[32]; + +private: + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 0); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 1); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 2); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 3); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 4); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 5); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 6); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 7); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 8); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 9); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 10); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 11); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 12); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 13); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 14); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 15); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 16); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 17); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 18); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 19); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 20); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 21); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 22); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 23); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 24); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 25); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 26); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 27); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 28); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 29); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 30); + OSMetaClassDeclareReservedUnused(IONDRVFramebuffer, 31); + +private: + + void initForPM ( void ); + IOReturn setPowerState( unsigned long, IOService* ); + unsigned long maxCapabilityForDomainState( IOPMPowerFlags ); + unsigned long initialPowerStateForDomainState( IOPMPowerFlags ); + unsigned long powerStateForDomainState( IOPMPowerFlags ); + + virtual IOReturn checkDriver( void ); + virtual UInt32 iterateAllModes( IODisplayModeID * displayModeIDs ); + virtual IOReturn getResInfoForMode( IODisplayModeID modeID, + IODisplayModeInformation * theInfo ); + virtual IOReturn getResInfoForArbMode( IODisplayModeID modeID, + IODisplayModeInformation * theInfo ); + virtual IOReturn validateDisplayMode( + IODisplayModeID mode, IOOptionBits flags, + VDDetailedTimingRec ** detailed ); + virtual IOReturn setDetailedTiming( + IODisplayModeID mode, IOOptionBits options, + void * description, IOByteCount descripSize ); + virtual void getCurrentConfiguration( void ); + static const IOTVector * _undefinedSymbolHandler( void * self, + const char * libraryName, const char * symbolName ); + +public: + virtual IOReturn doControl( UInt32 code, void * params ); + virtual IOReturn doStatus( UInt32 code, void * params ); + +public: + + virtual IOService * probe( IOService * provider, + SInt32 * score ); + + virtual bool start( IOService * provider ); + + virtual void free( void ); + + virtual IOReturn setProperties( OSObject * properties ); + + virtual IOReturn enableController( void ); + + virtual IODeviceMemory * makeSubRange( IOPhysicalAddress start, + IOPhysicalLength length ); + virtual IODeviceMemory * getApertureRange( IOPixelAperture aperture ); + virtual IODeviceMemory * getVRAMRange( void ); + + virtual IODeviceMemory * findVRAM( void ); + + virtual bool isConsoleDevice( void ); + + virtual const IOTVector * undefinedSymbolHandler( const char * libraryName, + const char * symbolName ); + + virtual const char * getPixelFormats( void ); + + // Array of supported display modes + virtual IOItemCount getDisplayModeCount( void ); + virtual IOReturn getDisplayModes( IODisplayModeID * allDisplayModes ); + + // Info about a display mode + virtual IOReturn getInformationForDisplayMode( IODisplayModeID displayMode, + IODisplayModeInformation * info ); + + // Mask of pixel formats available in mode and depth + virtual UInt64 getPixelFormatsForDisplayMode( IODisplayModeID displayMode, + IOIndex depth ); + + virtual IOReturn getPixelInformation( + IODisplayModeID displayMode, IOIndex depth, + IOPixelAperture aperture, IOPixelInformation * pixelInfo ); + + // Framebuffer info + + // Current display mode and depth + virtual IOReturn getCurrentDisplayMode( IODisplayModeID * displayMode, + IOIndex * depth ); + + // Set display mode and depth + virtual IOReturn setDisplayMode( IODisplayModeID displayMode, + IOIndex depth ); + + // For pages + virtual IOReturn setApertureEnable( IOPixelAperture aperture, + IOOptionBits enable ); + + virtual IOReturn setStartupDisplayMode( IODisplayModeID displayMode, + IOIndex depth ); + virtual IOReturn getStartupDisplayMode( IODisplayModeID * displayMode, + IOIndex * depth ); + + //// CLUTs + + virtual IOReturn setCLUTWithEntries( IOColorEntry * colors, UInt32 index, + UInt32 numEntries, IOOptionBits options ); + + //// Gamma + + virtual IOReturn setGammaTable( UInt32 channelCount, UInt32 dataCount, + UInt32 dataWidth, void * data ); + + //// Display mode timing information + + virtual IOReturn getTimingInfoForDisplayMode( IODisplayModeID displayMode, + IOTimingInformation * info ); + + //// Detailed timing information + + virtual IOReturn validateDetailedTiming( + void * description, IOByteCount descripSize ); + + virtual IOReturn setDetailedTimings( OSArray * array ); + + //// Controller attributes + + virtual IOReturn getAttribute( IOSelect attribute, UInt32 * value ); + + //// Connections + + virtual IOItemCount getConnectionCount( void ); + + virtual IOReturn getAttributeForConnection( IOIndex connectIndex, + IOSelect attribute, UInt32 * value ); + + virtual IOReturn setAttributeForConnection( IOIndex connectIndex, + IOSelect attribute, UInt32 info ); + + // Apple sensing + + virtual IOReturn getAppleSense( IOIndex connectIndex, + UInt32 * senseType, + UInt32 * primary, + UInt32 * extended, + UInt32 * displayType ); + + virtual IOReturn connectFlags( IOIndex connectIndex, + IODisplayModeID displayMode, IOOptionBits * flags ); + + //// IOHighLevelDDCSense + + virtual bool hasDDCConnect( IOIndex connectIndex ); + virtual IOReturn getDDCBlock( IOIndex connectIndex, UInt32 blockNumber, + IOSelect blockType, IOOptionBits options, + UInt8 * data, IOByteCount * length ); + + //// Interrupts + + virtual IOReturn registerForInterruptType( IOSelect interruptType, + IOFBInterruptProc proc, OSObject * target, void * ref, + void ** interruptRef ); + virtual IOReturn unregisterInterrupt( void * interruptRef ); + virtual IOReturn setInterruptState( void * interruptRef, UInt32 state ); + + //// HW Cursors + + virtual IOReturn setCursorImage( void * cursorImage ); + virtual IOReturn setCursorState( SInt32 x, SInt32 y, bool visible ); + + //// VSL calls + + static OSStatus VSLNewInterruptService( + void * entryID, + UInt32 serviceType, + _VSLService ** serviceID ); + static OSStatus VSLDisposeInterruptService( _VSLService * serviceID ); + static OSStatus VSLDoInterruptService( _VSLService * serviceID ); + static Boolean VSLPrepareCursorForHardwareCursor( + void * cursorRef, + IOHardwareCursorDescriptor * hwDesc, + IOHardwareCursorInfo * hwCursorInfo ); +}; + +#endif /* ! _IOKIT_IONDRVFRAMEBUFFER_H */ + + diff --git a/iokit/IOKit/ndrvsupport/IONDRVSupport.h b/iokit/IOKit/ndrvsupport/IONDRVSupport.h new file mode 100644 index 000000000..7763c4412 --- /dev/null +++ b/iokit/IOKit/ndrvsupport/IONDRVSupport.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __IONDRVSUPPORT__ +#define __IONDRVSUPPORT__ + +#include + +#pragma options align=mac68k + +#ifdef __cplusplus +extern "C" { +#endif + +#define kIONDRVIgnoreKey "AAPL,iokit-ignore-ndrv" +#define kIONDRVForXKey "AAPL,iokit-ndrv" + +struct IOTVector { + void * pc; + UInt32 toc; +}; +typedef struct IOTVector IOTVector; + +struct IONDRVInterruptSetMember { + void * setID; + UInt32 member; +}; +typedef struct IONDRVInterruptSetMember IONDRVInterruptSetMember; + +typedef SInt32 (*IONDRVInterruptHandler)( IONDRVInterruptSetMember setMember, void *refCon, UInt32 theIntCount); +typedef void (*IONDRVInterruptEnabler)( IONDRVInterruptSetMember setMember, void *refCon); +typedef Boolean (*IONDRVInterruptDisabler)( IONDRVInterruptSetMember setMember, void *refCon); + +enum { + kIONDRVFirstMemberNumber = 1, + kIONDRVIsrIsComplete = 0, + kIONDRVIsrIsNotComplete = -1, + kIONDRVMemberNumberParent = -2 +}; + +enum { + kIONDRVReturnToParentWhenComplete = 0x00000001, + kIONDRVReturnToParentWhenNotComplete = 0x00000002 +}; + +enum { + kIONDRVISTChipInterruptSource = 0, + kIONDRVISTOutputDMAInterruptSource = 1, + kIONDRVISTInputDMAInterruptSource = 2, + kIONDRVISTPropertyMemberCount = 3 +}; + +#define kIONDRVISTPropertyName "driver-ist" + +IOReturn +IONDRVInstallInterruptFunctions(void * setID, + UInt32 member, + void * refCon, + IOTVector * handler, + IOTVector * enabler, + IOTVector * disabler ); + +typedef const IOTVector * (*IONDRVUndefinedSymbolHandler)( void * self, + const char * libraryName, const char * symbolName ); + +#pragma options align=reset + +#ifdef __cplusplus +} +#endif + +#endif /* __IONDRVSUPPORT__ */ diff --git a/iokit/IOKit/ndrvsupport/Makefile b/iokit/IOKit/ndrvsupport/Makefile new file mode 100644 index 000000000..c3e4ddf8d --- /dev/null +++ b/iokit/IOKit/ndrvsupport/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MD_DIR = ndrvsupport +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MD_LIST = IOMacOSTypes.h IOMacOSVideo.h +INSTALL_MD_LCL_LIST = "" +INSTALL_MD_DIR = $(MD_DIR) + +EXPORT_MD_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MD_DIR = IOKit/$(MD_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/network/IOBasicOutputQueue.h b/iokit/IOKit/network/IOBasicOutputQueue.h new file mode 100644 index 000000000..aa3f83c2d --- /dev/null +++ b/iokit/IOKit/network/IOBasicOutputQueue.h @@ -0,0 +1,301 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOBasicOutputQueue.h + * + * HISTORY + * + */ + +#ifndef _IOBASICOUTPUTQUEUE_H +#define _IOBASICOUTPUTQUEUE_H + +#include +#include +#include // FIXME - remove + +struct IOMbufQueue; + +/*! @class IOBasicOutputQueue : public IOOutputQueue + @abstract A concrete implementation of an IOOutputQueue. This object + uses a spinlock to protect the packet queue from multiple producers. + A single producer is promoted to become a consumer when the queue is + not active. Otherwise, the producer will simply queue the packet and + return without blocking. + + The flow of packets from the queue to its target can be controlled + by calling methods such as start(), stop(), or service(). The target + is expected to call those methods from a single threaded context, + i.e. the work loop context in a network driver. In addition, the + target must also return a status for every packet delivered by the + consumer thread. This return value is the only mechanism that the + target can use to manage the queue when it is running on the + consumer thread. */ + +class IOBasicOutputQueue : public IOOutputQueue +{ + OSDeclareDefaultStructors( IOBasicOutputQueue ) + +private: + static IOReturn dispatchNetworkDataNotification(void * target, + void * param, + IONetworkData * data, + UInt32 type); + + void dequeue(); + +protected: + OSObject * _target; + IOOutputAction _action; + IOOutputQueueStats * _stats; + IONetworkData * _statsData; + IOSimpleLock * _spinlock; + IOMbufQueue * _inQueue; + IOMbufQueue * _queues[2]; + volatile bool _waitDequeueDone; + volatile UInt32 _state; + volatile UInt32 _serviceCount; + +/*! @function serviceThread + @abstract Method called by the scheduled service thread when it + starts to run. + @discussion Provide an implementation for the serviceThread() method + defined in IOOutputQueue. The service thread is scheduled by service() + to restart a stalled queue when the kServiceAsync options is given. + @param A parameter that was given to scheduleServiceThread(). + This parameter is not used. */ + + virtual void serviceThread(void * param); + +/*! @function output + @abstract Transfer all packets in the mbuf queue to the target. + @param queue A queue of output packets. + @param state Return a state bit defined by IOBasicOutputQueue that + declares the new state of the queue following this method call. + A kStateStalled is returned if the queue should stall, otherwise 0 + is returned. */ + + virtual void output(IOMbufQueue * queue, UInt32 * state); + +/*! @function free + @abstract Free the IOBasicOutputQueue object. + @discussion Release allocated resources, then call super::free(). */ + + virtual void free(); + +/*! @function handleNetworkDataAccess + @abstract Handle an external access to the IONetworkData object + returned by getStatisticsData(). + @param data The IONetworkData object being accessed. + @param type Describes the type of access being performed. + @param param An optional parameter for the handler. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + virtual IOReturn handleNetworkDataAccess(IONetworkData * data, + UInt32 type, + void * param); + +public: + +/*! @function init + @abstract Initialize an IOBasicOutputQueue object. + @param target The object that will handle packets removed from the + queue, and is usually a subclass of IONetworkController. + @param action The function that will handle packets removed from the + queue. + @param capacity The initial capacity of the output queue. + @result true if initialized successfully, false otherwise. */ + + virtual bool init(OSObject * target, + IOOutputAction action, + UInt32 capacity = 0); + +/*! @function withTarget + @abstract Factory method that will construct and initialize an + IOBasicOutputQueue object. + @param target An IONetworkController object that will handle packets + removed from the queue. + @param capacity The initial capacity of the output queue. + @result An IOBasicOutputQueue object on success, or 0 otherwise. */ + + static IOBasicOutputQueue * withTarget(IONetworkController * target, + UInt32 capacity = 0); + +/*! @function withTarget + @abstract Factory method that will construct and initialize an + IOBasicOutputQueue object. + @param target The object that will handle packets removed from the + queue. + @param action The function that will handle packets removed from the + queue. + @param capacity The initial capacity of the output queue. + @result An IOBasicOutputQueue object on success, or 0 otherwise. */ + + static IOBasicOutputQueue * withTarget(OSObject * target, + IOOutputAction action, + UInt32 capacity = 0); + +/*! @function enqueue + @abstract Called by a client to add a packet, or a chain of packets, + to the queue. + @discussion A packet is described by a mbuf chain, while a chain + of packets is constructed by linking multiple mbuf chains via the + m_nextpkt field. This method can be called by multiple client + threads. + @param m A single packet, or a chain of packets. + @param param A parameter provided by the caller. + @result Always return 0. */ + + virtual UInt32 enqueue(struct mbuf * m, void * param); + +/*! @function start + @abstract Start up the packet flow between the queue and its target. + @discussion Called by the target to start the queue. This will allow + packets to be removed from the queue, and then delivered to the target. + @result true if the queue was started successfully, false otherwise. */ + + virtual bool start(); + +/*! @function stop + @abstract Stop the packet flow between the queue and its target. + @discussion Stop the queue and prevent it from sending packets to its + target. This call is synchronous and it may block. After this method + returns, the queue will no longer call the registered target/action, + even as new packets are added to the queue. The queue will continue to + absorb new packets until the size of the queue reaches its capacity. + The registered action must never call stop(), or a deadlock will occur. + @result Returns the previous running state of the queue, + true if the queue was running, false if the queue was already stopped. */ + + virtual bool stop(); + +/*! @enum An enumeration of the option bits recognized by service(). + @constant kServiceAsync Set this option to service the queue in + an asynchronous manner. The service() call will not block, but a + scheduling latency will be introduced before the queue is serviced. */ + + enum { + kServiceAsync = 0x1 + }; + +/*! @function service + @abstract Service a queue that was stalled by the target. + @discussion A target that stalls the queue must call service() when + it becomes ready to accept more packets. Calling this methods when the + queue is not stalled is harmless. + @result true if the queue was stalled and there were packets sitting in + the queue awaiting delivery, false otherwise. */ + + virtual bool service(IOOptionBits options = 0); + +/*! @function flush + @abstract Drop and free all packets currently held by the queue. + @discussion To ensure that all packets are removed from the queue, + stop() should be called prior to flush(), to make sure there are + no packets in-flight and being delivered to the target. + @result The number of packets that were dropped and freed. */ + + virtual UInt32 flush(); + +/*! @function setCapacity + @abstract Change the number of packets that the queue can hold + before it begins to drop excess packets. + @param capacity The new desired capacity. + @result true if the new capacity was accepted, false otherwise. */ + + virtual bool setCapacity(UInt32 capacity); + +/*! @function getCapacity + @abstract Get the number of packets that the queue can hold. + @discussion The queue will begin to drop incoming packets when the + size of queue reaches its capacity. + @result The current queue capacity. */ + + virtual UInt32 getCapacity() const; + +/*! @function getSize + @abstract Get the number of packets currently held in the queue. + @result The size of the queue. */ + + virtual UInt32 getSize() const; + +/*! @function getDropCount + @abstract Get the number of packets dropped by the queue. + @result The number of packets dropped due to over-capacity, or by + external calls to the flush() method. */ + + virtual UInt32 getDropCount(); + +/*! @function getOutputCount + @sbstract Get the number of packets accepted by the target. + @result The number of times that kIOOutputStatusAccepted is returned by + the target. */ + + virtual UInt32 getOutputCount(); + +/*! @function getRetryCount + @abstract Get the number of instances when the target has refused to + accept the packet provided. + @result The number of times that kIOOutputStatusRetry is returned by the + target. */ + + virtual UInt32 getRetryCount(); + +/*! @function getStallCount + @abstract Get the number of instances when the target has stalled the + queue. + @result The number of times that kIOOutputCommandStall is returned by the + target. */ + + virtual UInt32 getStallCount(); + +/*! @enum An enumeration of the bits in the value returned by getState(). + @constant kStateRunning Set when the queue is running. Calling start() + and stop() will set or clear this bit. + @constant kStateStalled Set when the queue is stalled by the target. + @constant kStateActive Set when a consumer thread is actively removing + packets from the queue and passing them to the target. */ + + enum { + kStateRunning = 0x1, + kStateOutputStalled = 0x2, + kStateOutputActive = 0x4, + kStateOutputServiceMask = 0xff00 + }; + +/*! @function getState + @abstract Get the state of the queue object. + @result The current state of the queue object. */ + + virtual UInt32 getState() const; + +/*! @function getStatisticsData + @abstract Return an IONetworkData object containing statistics counters + updated by the queue. + @result An IONetworkData object. */ + + virtual IONetworkData * getStatisticsData() const; +}; + +#endif /* !_IOBASICOUTPUTQUEUE_H */ diff --git a/iokit/IOKit/network/IOEthernetController.h b/iokit/IOKit/network/IOEthernetController.h new file mode 100644 index 000000000..10c577aaf --- /dev/null +++ b/iokit/IOKit/network/IOEthernetController.h @@ -0,0 +1,447 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOEthernetController.h + * + * HISTORY + * + * Dec 3, 1998 jliu - C++ conversion. + */ + +#ifndef _IOETHERNETCONTROLLER_H +#define _IOETHERNETCONTROLLER_H + +#include + +/*! @defined kIOEthernetControllerClass + @abstract kIOEthernetControllerClass is the name of the + IOEthernetController class. */ + +#define kIOEthernetControllerClass "IOEthernetController" + +/*! @defined kIOEthernetAddressSize + @abstract The number of bytes in an Ethernet hardware address. */ + +#define kIOEthernetAddressSize 6 + +/*! @defined kIOEthernetMaxPacketSize + @abstract The maximum size of an Ethernet packet, including + the FCS bytes. */ + +#define kIOEthernetMaxPacketSize 1518 + +/*! @defined kIOEthernetMinPacketSize + @abstract The minimum size of an Ethernet packet, including + the FCS bytes. */ + +#define kIOEthernetMinPacketSize 64 + +/*! @defined kIOEthernetCRCSize + @abstract The size in bytes of the 32-bit CRC value appended + to the end of each Ethernet frame. */ + +#define kIOEthernetCRCSize 4 + +/*! @defined kIOEthernetWakeOnLANFilterGroup + @abstract kIOEthernetWakeOnLANFilterGroup describes the name assigned + to the Ethernet wake-On-LAN filter group. */ + +#define kIOEthernetWakeOnLANFilterGroup "IOEthernetWakeOnLANFilterGroup" + +/*! @enum Enumeration of wake-On-LAN filters. + @discussion An enumeration of all filters in the wake-on-LAN filter + group. Each filter listed will respond to a network event that + will trigger a system wake-up. + @constant kIOEthernetWakeOnMagicPacket Reception of a Magic Packet. */ + +enum { + kIOEthernetWakeOnMagicPacket = 0x1 +}; + +/* + * Kernel + */ +#if defined(KERNEL) && defined(__cplusplus) + +struct IOEthernetAddress { + UInt8 bytes[kIOEthernetAddressSize]; +}; + +extern "C" { // FIXME - remove +#include +#include +#include +} + +/*! @defined gIOEthernetWakeOnLANFilterGroup + @discussion gIOEthernetWakeOnLANFilterGroup is an OSSymbol object + that contains the name of the Ethernet wake-on-LAN filter group + defined by kIOEthernetWakeOnLANFilterGroup. */ + +extern const OSSymbol * gIOEthernetWakeOnLANFilterGroup; + +/*! @class IOEthernetController : public IONetworkController + @abstract An abstract superclass for Ethernet controllers. Ethernet + controller drivers should subclass IOEthernetController, and implement + or override the hardware specific methods to create an Ethernet driver. + An interface object (an IOEthernetInterface instance) must be + instantiated by the driver, through attachInterface(), to connect + the controller driver to the data link layer. */ + +class IOEthernetController : public IONetworkController +{ + OSDeclareAbstractStructors( IOEthernetController ) + +protected: + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * _reserved; + + +public: + +/*! @function initialize + @abstract IOEthernetController class initializer. + @discussion Create global OSSymbol objects that are used as keys. */ + + static void initialize(); + +/*! @function init + @abstract Initialize an IOEthernetController object. + @param properties A dictionary object containing a property table + associated with this instance. + @result true on success, false otherwise. */ + + virtual bool init(OSDictionary * properties); + +/*! @function getPacketFilters + @abstract Get the set of packet filters supported by the Ethernet + controller in the given filter group. + @discussion The default implementation of the abstract method inherited + from IONetworkController. When the filter group specified is + gIONetworkFilterGroup, then this method will return a value formed by + a bitwise OR of kIOPacketFilterUnicast, kIOPacketFilterBroadcast, + kIOPacketFilterMulticast, kIOPacketFilterPromiscuous. Otherwise, the + return value will be set to zero (0). Subclasses must override this + method if their filtering capability differs from what is reported by + this default implementation. This method is called from the workloop + context, and the result is published to the I/O Kit registry. + @param group The name of the filter group. + @param filters Pointer to the mask of supported filters returned by + this method. + @result kIOReturnSuccess. Drivers that override this + method must return kIOReturnSuccess to indicate success, or an error + return code otherwise. */ + + virtual IOReturn getPacketFilters(const OSSymbol * group, + UInt32 * filters) const; + +/*! @function enablePacketFilter + @abstract Enable one of the supported packet filters from the + given filter group. + @discussion The default implementation of the abstract method inherited + from IONetworkController. This method will call setMulticastMode() or + setPromiscuousMode() when the multicast or the promiscuous filter is to be + enabled. Requests to disable the Unicast or Broadcast filters are handled + silently, without informing the subclass. Subclasses can override this + method to change this default behavior, or to extend it to handle + additional filter types or filter groups. This method call is synchronized + by the workloop's gate. + @param group The name of the filter group containing the filter to be + enabled. + @param aFilter The filter to enable. + @param enabledFilters All filters currently enabled by the client. + @param options Optional flags for the enable request. + @result The return from setMulticastMode() or setPromiscuousMode() if + either of those two methods are called. kIOReturnSuccess if the filter + specified is kIOPacketFilterUnicast or kIOPacketFilterBroadcast. + kIOReturnUnsupported if the filter group specified is not + gIONetworkFilterGroup. */ + + virtual IOReturn enablePacketFilter(const OSSymbol * group, + UInt32 aFilter, + UInt32 enabledFilters, + IOOptionBits options = 0); + +/*! @function disablePacketFilter + @abstract Disable a packet filter that is currently enabled from the + given filter group. + @discussion The default implementation of the abstract method inherited + from IONetworkController. This method will call setMulticastMode() or + setPromiscuousMode() when the multicast or the promiscuous filter is to be + disabled. Requests to disable the Unicast or Broadcast filters are handled + silently, without informing the subclass. Subclasses can override this + method to change this default behavior, or to extend it to handle + additional filter types or filter groups. This method call is synchronized + by the workloop's gate. + @param group The name of the filter group containing the filter to be + disabled. + @param aFilter The filter to disable. + @param enabledFilters All filters currently enabled by the client. + @param options Optional flags for the disable request. + @result The return from setMulticastMode() or setPromiscuousMode() if + either of those two methods are called. kIOReturnSuccess if the filter + specified is kIOPacketFilterUnicast or kIOPacketFilterBroadcast. + kIOReturnUnsupported if the filter group specified is not + gIONetworkFilterGroup. */ + + virtual IOReturn disablePacketFilter(const OSSymbol * group, + UInt32 aFilter, + UInt32 enabledFilters, + IOOptionBits options = 0); + +/*! @function getHardwareAddress + @abstract Get the Ethernet controller's station address. + @discussion The default implementation of the abstract method inherited + from IONetworkController. This method will call the overloaded form + IOEthernetController::getHardwareAddress() that subclasses are expected + to override. + @param addr The buffer where the controller's hardware address should + be written. + @param inOutAddrBytes The size of the address buffer provided by the + client, and replaced by this method with the actual size of + the hardware address in bytes. + @result kIOReturnSuccess on success, or an error otherwise. */ + + virtual IOReturn getHardwareAddress(void * addr, + UInt32 * inOutAddrBytes); + +/*! @function setHardwareAddress + @abstract Set or change the station address used by the Ethernet + controller. + @discussion The default implementation of the abstract method inherited + from IONetworkController. This method will call the overloaded form + IOEthernetController::setHardwareAddress() that subclasses are expected + to override. + @param addr The buffer containing the hardware address provided by + the client. + @param addrBytes The size of the address buffer provided by the + client in bytes. + @result kIOReturnSuccess on success, or an error otherwise. */ + + virtual IOReturn setHardwareAddress(const void * addr, + UInt32 addrBytes); + +/*! @function getMaxPacketSize + @abstract Get the maximum packet size supported by the Ethernet + controller, including the frame header and FCS. + @param maxSize Pointer to the return value. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + virtual IOReturn getMaxPacketSize(UInt32 * maxSize) const; + +/*! @function getMinPacketSize + @abstract Get the minimum packet size supported by the Ethernet + controller, including the frame header and FCS. + @param minSize Pointer to the return value. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + virtual IOReturn getMinPacketSize(UInt32 * minSize) const; + +/*! @function getPacketFilters + @abstract Get the set of packet filters supported by the Ethernet + controller in the network filter group. + @param filters Pointer to the return value containing a mask of + supported filters. + @result kIOReturnSuccess. Drivers that override this + method must return kIOReturnSuccess to indicate success, or an error + return code otherwise. */ + + virtual IOReturn getPacketFilters(UInt32 * filters) const; + +/*! @function getHardwareAddress + @abstract Get the Ethernet controller's permanent station address. + @discussion. Ethernet drivers must implement this method, by reading the + address from hardware and writing it to the buffer provided. This method + is called from the workloop context. + @param addrP Pointer to an IOEthernetAddress where the hardware address + should be returned. + @result kIOReturnSuccess on success, or an error return code otherwise. */ + + virtual IOReturn getHardwareAddress(IOEthernetAddress * addrP) = 0; + +/*! @function setHardwareAddress + @abstract Set or change the station address used by the Ethernet + controller. + @discussion This method is called in response to a client command to + change the station address used by the Ethernet controller. Implementation + of this method is optional. This method is called from the workloop context. + @param addrP Pointer to an IOEthernetAddress containing the new station + address. + @result The default implementation will always return kIOReturnUnsupported. + If overridden, drivers must return kIOReturnSuccess on success, or an error + return code otherwise. */ + + virtual IOReturn setHardwareAddress(const IOEthernetAddress * addrP); + +/*! @function setMulticastMode + @abstract Enable or disable multicast mode. + @discussion Called by enablePacketFilter() or disablePacketFilter() + when there is a change in the activation state of the multicast filter + identified by kIOPacketFilterMulticast. This method is called from the + workloop context. + @param active True to enable multicast mode, false to disable it. + @result kIOReturnUnsupported. If overridden, drivers must return + kIOReturnSuccess on success, or an error return code otherwise. */ + + virtual IOReturn setMulticastMode(bool active); + +/*! @function setMulticastList + @abstract Set the list of multicast addresses that the multicast filter + should use to match against the destination address of an incoming frame. + The frame should be accepted when a match occurs. + @discussion Called when the multicast group membership of an interface + object is changed. Drivers that support kIOPacketFilterMulticast should + override this method and update the hardware multicast filter using the + list of Ethernet addresses provided. Perfect multicast filtering is + preferred if supported by the hardware, to order to reduce the number of + unwanted packets received. If the number of multicast addresses in the + list exceeds what the hardware is capable of supporting, or if perfect + filtering is not supported, then ideally the hardware should be programmed + to perform imperfect filtering, thorugh some form of hash filtering + mechanism. Only at the last resort should the driver enable reception of + all multicast packets to satisfy this request. This method is called + from the workloop context, and only if the driver reports + kIOPacketFilterMulticast support in getPacketFilters(). + @param addrs An array of Ethernet addresses. This argument must be + ignored if the count argument is 0. + @param count The number of Ethernet addresses in the list. This value + will be zero when the list becomes empty. + @result kIOReturnUnsupported. Drivers must return kIOReturnSuccess to + indicate success, or an error return code otherwise. */ + + virtual IOReturn setMulticastList(IOEthernetAddress * addrs, + UInt32 count); + +/*! @function setPromiscuousMode + @abstract Enable or disable promiscuous mode. + @discussion Called by enablePacketFilter() or disablePacketFilter() + when there is a change in the activation state of the promiscuous + filter identified by kIOPacketFilterPromiscuous. This method is + called from the workloop context. + @param active True to enable promiscuous mode, false to disable it. + @result kIOReturnUnsupported. If overridden, drivers must return + kIOReturnSuccess on success, or an error return code otherwise. */ + + virtual IOReturn setPromiscuousMode(bool active); + +/*! @function setWakeOnMagicPacket + @abstract Enable or disable the wake on Magic Packet support. + @discussion Called by enablePacketFilter() or disablePacketFilter() + when there is a change in the activation state of the wake-on-LAN + filter identified by kIOEthernetWakeOnMagicPacket. This method is + called from the workloop context. + @param active True to enable support for system wake on reception + of a Magic Packet, false to disable it. + @result kIOReturnUnsupported. If overridden, drivers must return + kIOReturnSuccess on success, or an error return code otherwise. */ + + virtual IOReturn setWakeOnMagicPacket(bool active); + +protected: + +/*! @function createInterface + @abstract Create an IOEthernetInterface object. + @discussion Allocate and return a new IOEthernetInterface instance. + A subclass of IONetworkController must implement this method and return + a matching interface object. The implementation in IOEthernetController + will return an IOEthernetInterface object. Subclasses of + IOEthernetController, such as Ethernet controller drivers, will have + little reason to override this implementation. + @result A newly allocated and initialized IOEthernetInterface object. */ + + virtual IONetworkInterface * createInterface(); + +/*! @function free + @abstract Free the IOEthernetController instance. Release resources, + then followed by a call to super::free(). */ + + virtual void free(); + +/*! @function publishProperties + @abstract Publish Ethernet controller properties and capabilities. + @discussion Publish Ethernet controller properties to the property + table. For instance, getHardwareAddress() is called to fetch the + hardware address, and the address is then published to the property + table. This method call is synchronized by the workloop's gate, + and must never be called directly by subclasses. + @result true if all properties and capabilities were discovered, + and published successfully, false otherwise. Returning false will + prevent client objects from attaching to the Ethernet controller + since a property that a client relies upon may be missing. */ + + virtual bool publishProperties(); + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IOEthernetController, 0); + OSMetaClassDeclareReservedUnused( IOEthernetController, 1); + OSMetaClassDeclareReservedUnused( IOEthernetController, 2); + OSMetaClassDeclareReservedUnused( IOEthernetController, 3); + OSMetaClassDeclareReservedUnused( IOEthernetController, 4); + OSMetaClassDeclareReservedUnused( IOEthernetController, 5); + OSMetaClassDeclareReservedUnused( IOEthernetController, 6); + OSMetaClassDeclareReservedUnused( IOEthernetController, 7); + OSMetaClassDeclareReservedUnused( IOEthernetController, 8); + OSMetaClassDeclareReservedUnused( IOEthernetController, 9); + OSMetaClassDeclareReservedUnused( IOEthernetController, 10); + OSMetaClassDeclareReservedUnused( IOEthernetController, 11); + OSMetaClassDeclareReservedUnused( IOEthernetController, 12); + OSMetaClassDeclareReservedUnused( IOEthernetController, 13); + OSMetaClassDeclareReservedUnused( IOEthernetController, 14); + OSMetaClassDeclareReservedUnused( IOEthernetController, 15); + OSMetaClassDeclareReservedUnused( IOEthernetController, 16); + OSMetaClassDeclareReservedUnused( IOEthernetController, 17); + OSMetaClassDeclareReservedUnused( IOEthernetController, 18); + OSMetaClassDeclareReservedUnused( IOEthernetController, 19); + OSMetaClassDeclareReservedUnused( IOEthernetController, 20); + OSMetaClassDeclareReservedUnused( IOEthernetController, 21); + OSMetaClassDeclareReservedUnused( IOEthernetController, 22); + OSMetaClassDeclareReservedUnused( IOEthernetController, 23); + OSMetaClassDeclareReservedUnused( IOEthernetController, 24); + OSMetaClassDeclareReservedUnused( IOEthernetController, 25); + OSMetaClassDeclareReservedUnused( IOEthernetController, 26); + OSMetaClassDeclareReservedUnused( IOEthernetController, 27); + OSMetaClassDeclareReservedUnused( IOEthernetController, 28); + OSMetaClassDeclareReservedUnused( IOEthernetController, 29); + OSMetaClassDeclareReservedUnused( IOEthernetController, 30); + OSMetaClassDeclareReservedUnused( IOEthernetController, 31); +}; + +/* + * FIXME: remove this. + */ +enum { + kIOEnetPromiscuousModeOff = false, + kIOEnetPromiscuousModeOn = true, + kIOEnetPromiscuousModeAll = true, + kIOEnetMulticastModeOff = false, + kIOEnetMulticastModeFilter = true +}; +typedef bool IOEnetPromiscuousMode; +typedef bool IOEnetMulticastMode; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOETHERNETCONTROLLER_H */ diff --git a/iokit/IOKit/network/IOEthernetInterface.h b/iokit/IOKit/network/IOEthernetInterface.h new file mode 100644 index 000000000..805a01a1f --- /dev/null +++ b/iokit/IOKit/network/IOEthernetInterface.h @@ -0,0 +1,296 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOEthernetInterface.h + * + * HISTORY + * 8-Jan-1999 Joe Liu (jliu) created. + */ + +#ifndef _IOETHERNETINTERFACE_H +#define _IOETHERNETINTERFACE_H + +/*! @defined kIOEthernetInterfaceClass + @abstract kIOEthernetInterfaceClass is the name of the + IOEthernetInterface class. */ + +#define kIOEthernetInterfaceClass "IOEthernetInterface" + +/*! @defined kIOActivePacketFilters + @abstract kIOActivePacketFilters is a property of IOEthernetInterface + objects. It has an OSDictionary value. + @discussion The kIOActivePacketFilters property describes the current + set of packet filters that have been successfully activated. Each + entry in the dictionary is a key/value pair consisting of the filter + group name, and an OSNumber describing the set of active filters for + that group. Entries in this dictionary will mirror those in + kIORequiredPacketFilters if the controller has reported success for + all filter change requests from the IOEthernetInterface object. */ + +#define kIOActivePacketFilters "IOActivePacketFilters" + +/*! @defined kIORequiredPacketFilters + @abstract kIORequiredPacketFilters is a property of IOEthernetInterface + objects. It has an OSDictionary value. + @discussion The kIORequiredPacketFilters property describes the current + set of required packet filters. Each entry in the dictionary is a + key/value pair consisting of the filter group name, and an OSNumber + describing the set of required filters for that group. */ + +#define kIORequiredPacketFilters "IORequiredPacketFilters" + +/*! @defined kIOMulticastAddressList + @abstract kIOMulticastAddressList is a property of IOEthernetInterface + objects. It is an OSData object. + @discussion The kIOMulticastAddressList property describes the + list of multicast addresses that are being used by the + controller to match against the destination address of an + incoming frame. */ + +#define kIOMulticastAddressList "IOMulticastAddressList" +#define kIOMulticastFilterData kIOMulticastAddressList + +/* + * Kernel + */ +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include +#include + +/*! @class IOEthernetInterface : public IONetworkInterface + @abstract The Ethernet interface object. An Ethernet controller driver, + that is a subclass of IOEthernetController, will instantiate an object + of this class when the driver calls the attachInterface() method. + This interface object will then vend an Ethernet interface to DLIL, + and manage the connection between the controller driver and the upper + networking layers. Drivers will seldom need to subclass + IOEthernetInterface. */ + +class IOEthernetInterface : public IONetworkInterface +{ + OSDeclareDefaultStructors( IOEthernetInterface ) + +private: + struct arpcom * _arpcom; // Arpcom struct allocated + UInt32 _mcAddrCount; // # of multicast addresses + bool _ctrEnabled; // Is controller enabled? + OSDictionary * _supportedFilters; // Controller's supported filters + OSDictionary * _requiredFilters; // The required filters + OSDictionary * _activeFilters; // Currently active filters + bool _controllerLostPower; // true if controller is unusable + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * _reserved; + + + IOReturn enableController(IONetworkController * ctr); + IOReturn setupMulticastFilter(IONetworkController * ctr); + + UInt32 getFilters(const OSDictionary * dict, + const OSSymbol * group); + + bool setFilters(OSDictionary * dict, + const OSSymbol * group, + UInt32 filters); + + IOReturn disableFilter(IONetworkController * ctr, + const OSSymbol * group, + UInt32 filter, + IOOptionBits options = 0); + + IOReturn enableFilter(IONetworkController * ctr, + const OSSymbol * group, + UInt32 filter, + IOOptionBits options = 0); + + int syncSIOCSIFFLAGS(IONetworkController * ctr); + int syncSIOCSIFADDR(IONetworkController * ctr); + int syncSIOCADDMULTI(IONetworkController * ctr); + int syncSIOCDELMULTI(IONetworkController * ctr); + + static int performGatedCommand(void *, void *, void *, void *, void *); + +public: + +/*! @function init + @abstract Initialize an IOEthernetInterface instance. + @discussion Instance variables are initialized, and an arpcom + structure is allocated. + @param controller A network controller object that will service + the interface object being initialized. + @result true on success, false otherwise. */ + + virtual bool init( IONetworkController * controller ); + +/*! @function getNamePrefix + @abstract Return a string containing the prefix to use when + creating a BSD name for this interface. + @discussion The BSD name for each interface object is created by + concatenating a string returned by this method, with an unique + unit number assigned by IONetworkStack. + @result A pointer to a constant C string "en". Therefore, Ethernet + interfaces will be registered with BSD as en0, en1, etc. */ + + virtual const char * getNamePrefix() const; + +protected: + +/*! @function free + @abstract Free the IOEthernetInterface instance. + @discussion The memory allocated for the arpcom structure is released, + followed by a call to super::free(). */ + + virtual void free(); + +/*! @function performCommand + @abstract Handle an ioctl command sent to the Ethernet interface. + @discussion This method handles socket ioctl commands sent to the Ethernet + interface from DLIL. Commands recognized and processed by this method are + SIOCSIFADDR, SIOCSIFFLAGS, SIOCADDMULTI, and SIOCDELMULTI. Other commands + are passed to the superclass. + @param controller The controller object. + @param cmd The ioctl command code. + @param arg0 Command argument 0. Generally a pointer to an ifnet structure + associated with the interface. + @param arg1 Command argument 1. + @result A BSD return value defined in bsd/sys/errno.h. */ + + virtual SInt32 performCommand(IONetworkController * controller, + UInt32 cmd, + void * arg0, + void * arg1); + +/*! @function controllerDidOpen + @abstract A notification that the interface has opened the network + controller. + @discussion This method will be called by IONetworkInterface after a + network controller has accepted an open from this interface object. + IOEthernetInterface will first call the implementation in its + superclass, then inspect the controller through properties published + in the registry. This method is called with the arbitration lock held. + @param controller The controller object that was opened. + @result true on success, false otherwise. Returning false will + cause the controller to be closed, and any pending client opens to be + rejected. */ + + virtual bool controllerDidOpen(IONetworkController * controller); + +/*! @function controllerWillClose + @abstract A notification that the interface will close the network + controller. + @discussion This method will simply call super to propagate the method + call. This method is called with the arbitration lock held. + @param controller The controller that is about to be closed. */ + + virtual void controllerWillClose(IONetworkController * controller); + +/*! @function initIfnet + @abstract Initialize the ifnet structure given. + @discussion IOEthernetInterface will initialize this structure in a manner + that is appropriate for Ethernet interfaces, then call super::initIfnet() + to allow the superclass to perform generic interface initialization. + @param ifp Pointer to an ifnet structure obtained earlier through + the getIfnet() method call. + @result true on success, false otherwise. */ + + virtual bool initIfnet(struct ifnet * ifp); + +/*! @function getIfnet + @abstract Get the ifnet structure allocated by the interface object. + @discussion This method returns a pointer to an ifnet structure + that was allocated by a concrete subclass of IONetworkInterface. + IOEthernetInterface will allocate an arpcom structure during init(), + and returns a pointer to that structure when this method is called. + @result Pointer to an ifnet structure. */ + + virtual struct ifnet * getIfnet() const; + +/*! @function controllerWillChangePowerState + @abstract Handle a notification that the network controller which is + servicing this interface object is about to transition to a new power state. + @discussion If the controller is about to transition to an unusable state, + and it is currently enabled, then the disable() method on the controller is + called. + @param controller The network controller object. + @param flags Flags that describe the capability of the controller in the new + power state. + @param stateNumber An index to a state in the network controller's + power state array that the controller is switching to. + @param policyMaker A reference to the network controller's policy-maker, + and is also the originator of this notification. + @result The return value is always kIOReturnSuccess. */ + + virtual IOReturn controllerWillChangePowerState( + IONetworkController * controller, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker); + +/*! @function controllerDidChangePowerState + @abstract Handle a notification that the network controller which is servicing + this interface object has transitioned to a new power state. + @discussion If the controller did transition to a usable state, and it was + previously disabled due to a previous power change, then it is re-enabled. + @param controller The network controller object. + @param flags Flags that describe the capability of the controller in the new + power state. + @param stateNumber An index to a state in the network controller's + power state array that the controller has switched to. + @param policyMaker A reference to the network controller's policy-maker, + and is also the originator of this notification. + @result The return value is always kIOReturnSuccess. */ + + virtual IOReturn controllerDidChangePowerState( + IONetworkController * controller, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker); + + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 0); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 1); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 2); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 3); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 4); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 5); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 6); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 7); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 8); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 9); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 10); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 11); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 12); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 13); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 14); + OSMetaClassDeclareReservedUnused( IOEthernetInterface, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOETHERNETINTERFACE_H */ diff --git a/iokit/IOKit/network/IOEthernetStats.h b/iokit/IOKit/network/IOEthernetStats.h new file mode 100644 index 000000000..15cc89325 --- /dev/null +++ b/iokit/IOKit/network/IOEthernetStats.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOEthernetStats.h - Ethernet MIB statistics definitions. + * + * HISTORY + */ + +#ifndef _IOETHERNETSTATS_H +#define _IOETHERNETSTATS_H + +/*! @header IOEthernetStats.h + @discussion Ethernet statistics. */ + +//--------------------------------------------------------------------------- +// Ethernet-like statistics group. + +/*! @typedef IODot3StatsEntry + @discussion Ethernet MIB statistics structure. + @field alignmentErrors dot3StatsAlignmentErrors. + @field fcsErrors dot3StatsFCSErrors. + @field singleCollisionFrames dot3StatsSingleCollisionFrames. + @field multipleCollisionFrames dot3StatsMultipleCollisionFrames. + @field sqeTestErrors dot3StatsSQETestErrors. + @field deferredTransmissions dot3StatsDeferredTransmissions. + @field lateCollisions dot3StatsLateCollisions. + @field excessiveCollisions dot3StatsExcessiveCollisions. + @field internalMacTransmitErrors dot3StatsInternalMacTransmitErrors. + @field carrierSenseErrors dot3StatsCarrierSenseErrors. + @field frameTooLongs dot3StatsFrameTooLongs. + @field internalMacReceiveErrors dot3StatsInternalMacReceiveErrors. + @field etherChipSet dot3StatsEtherChipSet. + @field missedFrames dot3StatsMissedFrames (not in RFC1650). + */ + +typedef struct { + UInt32 alignmentErrors; + UInt32 fcsErrors; + UInt32 singleCollisionFrames; + UInt32 multipleCollisionFrames; + UInt32 sqeTestErrors; + UInt32 deferredTransmissions; + UInt32 lateCollisions; + UInt32 excessiveCollisions; + UInt32 internalMacTransmitErrors; + UInt32 carrierSenseErrors; + UInt32 frameTooLongs; + UInt32 internalMacReceiveErrors; + UInt32 etherChipSet; + UInt32 missedFrames; +} IODot3StatsEntry; + +//--------------------------------------------------------------------------- +// Ethernet-like collision statistics group (optional). + +/*! @typedef IODot3CollEntry + @discussion Collision statistics structure. + @field collFrequencies dot3StatsCollFrequencies. */ + +typedef struct { + UInt32 collFrequencies[16]; +} IODot3CollEntry; + +//--------------------------------------------------------------------------- +// Receiver extra statistics group (not defined by RFC 1650). + +/*! @typedef IODot3RxExtraEntry + @discussion Extra receiver statistics not defined by RFC1650. + @field overruns receiver overruns. + @field watchdogTimeouts watchdog timer expirations. + @field frameTooShorts runt frames. + @field collisionErrors frames damages by late collision. + @field phyErrors PHY receive errors. + @field timeouts receiver timeouts. + @field interrupts receiver interrupts. + @field resets receiver resets. + @field resourceErrors receiver resource shortages. + */ + +typedef struct { + UInt32 overruns; + UInt32 watchdogTimeouts; + UInt32 frameTooShorts; + UInt32 collisionErrors; + UInt32 phyErrors; + UInt32 timeouts; + UInt32 interrupts; + UInt32 resets; + UInt32 resourceErrors; + UInt32 reserved[4]; +} IODot3RxExtraEntry; + +//--------------------------------------------------------------------------- +// Transmitter extra statistics group (not defined by RFC 1650). + +/*! @typedef IODot3TxExtraEntry + @discussion Extra transmitter statistics not defined by RFC1650. + @field underruns transmit underruns. + @field jabbers jabber events. + @field phyErrors PHY transmit errors. + @field timeouts transmitter timeouts. + @field interrupts transmitter interrupts. + @field resets transmitter resets. + @field resourceErrors transmitter resource shortages. + */ + +typedef struct { + UInt32 underruns; + UInt32 jabbers; + UInt32 phyErrors; + UInt32 timeouts; + UInt32 interrupts; + UInt32 resets; + UInt32 resourceErrors; + UInt32 reserved[4]; +} IODot3TxExtraEntry; + +//--------------------------------------------------------------------------- +// Aggregate Ethernet statistics. + +/*! @typedef IOEthernetStats + @discussion Aggregate Ethernet statistics structure. + @field dot3StatsEntry IODot3StatsEntry statistics group. + @field dot3CollEntry IODot3CollEntry statistics group. + @field dot3RxExtraEntry IODot3RxExtraEntry statistics group. + @field dot3TxExtraEntry IODot3TxExtraEntry statistics group. + */ + +typedef struct { + IODot3StatsEntry dot3StatsEntry; + IODot3CollEntry dot3CollEntry; + IODot3RxExtraEntry dot3RxExtraEntry; + IODot3TxExtraEntry dot3TxExtraEntry; +} IOEthernetStats; + +/*! @defined kIOEthernetStatsKey + @discussion Defines the name of an IONetworkData that contains + an IOEthernetStats. */ + +#define kIOEthernetStatsKey "IOEthernetStatsKey" + +#endif /* !_IOETHERNETSTATS_H */ diff --git a/iokit/IOKit/network/IOGatedOutputQueue.h b/iokit/IOKit/network/IOGatedOutputQueue.h new file mode 100644 index 000000000..37f887fc9 --- /dev/null +++ b/iokit/IOKit/network/IOGatedOutputQueue.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOGatedOutputQueue.h + * + * HISTORY + * + */ + +#ifndef _IOGATEDOUTPUTQUEUE_H +#define _IOGATEDOUTPUTQUEUE_H + +#include +#include +#include +#include + +/*! @class IOGatedOutputQueue : public IOBasicOutputQueue + @abstract An extension of an IOBasicOutputQueue. An IOCommandGate + object is created by this queue and added to a work loop as an + event source. All calls to the target by the consumer thread must + occur with the gate closed. Therefore, all calls to the target of + this type of queue will be serialized with any other thread that + runs on the same work loop context. This is useful for network + drivers that have a tight hardware coupling between the transmit + and receive engines, and a single-threaded hardware access model + is desirable. */ + +class IOGatedOutputQueue : public IOBasicOutputQueue +{ + OSDeclareDefaultStructors( IOGatedOutputQueue ) + +private: + static void gatedOutput(OSObject * owner, + IOGatedOutputQueue * self, + IOMbufQueue * queue, + UInt32 * state); + + static void restartDeferredOutput(OSObject * owner, + IOInterruptEventSource * sender, + int count); + +protected: + IOCommandGate * _gate; + IOInterruptEventSource * _interruptSrc; + +/*! @function output + @abstract Transfer all packets in the mbuf queue to the target. + @param queue A queue of output packets. + @param state Return a state bit defined by IOBasicOutputQueue that + declares the new state of the queue following this method call. + A kStateStalled is returned if the queue should stall, otherwise 0 + is returned. */ + + virtual void output(IOMbufQueue * queue, UInt32 * state); + +/*! @function free + @abstract Free the IOGatedOutputQueue object. + @discussion Release allocated resources, then call super::free(). */ + + virtual void free(); + +/*! @function output + @abstract Override the method inherited from IOOutputQueue. + @result true if a thread was successfully scheduled to service + the queue. */ + + virtual bool scheduleServiceThread(void * param); + +public: + +/*! @function init + @abstract Initialize an IOGatedOutputQueue object. + @param target The object that will handle packets removed from the + queue, and is usually a subclass of IONetworkController. + @param action The function that will handle packets removed from the + queue. + @param workloop A workloop object. An IOCommandGate object is created + and added to this workloop as an event source. + @param capacity The initial capacity of the output queue. + @result true if initialized successfully, false otherwise. */ + + virtual bool init(OSObject * target, + IOOutputAction action, + IOWorkLoop * workloop, + UInt32 capacity = 0); + +/*! @function withTarget + @abstract Factory method that will construct and initialize an + IOGatedOutputQueue object. + @param target An IONetworkController object that will handle packets + removed from the queue. + @param workloop A workloop object. An IOCommandGate object is created + and added to this workloop as an event source. + @param capacity The initial capacity of the output queue. + @result An IOGatedOutputQueue object on success, or 0 otherwise. */ + + static IOGatedOutputQueue * withTarget(IONetworkController * target, + IOWorkLoop * workloop, + UInt32 capacity = 0); + +/*! @function withTarget + @abstract Factory method that will construct and initialize an + IOGatedOutputQueue object. + @param target The object that will handle packets removed from the + queue. + @param action The function that will handle packets removed from the + queue. + @param workloop A workloop object. An IOCommandGate object is created + and added to this workloop as an event source. + @param capacity The initial capacity of the output queue. + @result An IOGatedOutputQueue object on success, or 0 otherwise. */ + + static IOGatedOutputQueue * withTarget(OSObject * target, + IOOutputAction action, + IOWorkLoop * workloop, + UInt32 capacity = 0); +}; + +#endif /* !_IOGATEDOUTPUTQUEUE_H */ diff --git a/iokit/IOKit/network/IOKernelDebugger.h b/iokit/IOKit/network/IOKernelDebugger.h new file mode 100644 index 000000000..967e56bbb --- /dev/null +++ b/iokit/IOKit/network/IOKernelDebugger.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOKernelDebugger.cpp + * + * HISTORY + * + */ + +#ifndef _IOKERNELDEBUGGER_H +#define _IOKERNELDEBUGGER_H + +#include + +/*! @typedef IODebuggerRxHandler + @discussion Defines the receive handler that must be implemented + by the target to service KDP receive requests. This handler is called + by kdpReceiveDispatcher(). + @param target The target object. + @param buffer KDP receive buffer. The buffer allocated has room for + 1518 bytes. The receive handler must not overflow this buffer. + @param length The amount of data received and placed into the buffer. + Set to 0 if no frame was received during the poll interval. + @param timeout The amount of time to poll in milliseconds while waiting + for a frame to arrive. */ + +typedef void (*IODebuggerRxHandler)( IOService * target, + void * buffer, + UInt32 * length, + UInt32 timeout ); + +/*! @typedef IODebuggerTxHandler + @discussion Defines the transmit handler that must be implemented + by the target to service KDP transmit requests. This handler is called + by kdpTransmitDispatcher(). + @param target The target object. + @param buffer KDP transmit buffer. This buffer contains a KDP frame + to be sent on the network. + @param length The number of bytes in the transmit buffer. */ + +typedef void (*IODebuggerTxHandler)( IOService * target, + void * buffer, + UInt32 length ); + +/*! @typedef IODebuggerLockState + @discussion Defines flags returned by IOKernelDebugger::lock(). + @constant kIODebuggerLockTaken Set if the debugger lock was taken. */ + +typedef enum { + kIODebuggerLockTaken = 0x1, +} IODebuggerLockState; + +/*! class IOKernelDebugger : public IOService + @abstract Kernel debugger nub. + @discussion This object interfaces with the KDP + (kernel debugger protocol) module and dispatches KDP requests to its + target (provider). The target, designated as the debugger device, must + implement a pair of handler functions that are called to handle KDP + transmit and receive requests during a debugging session. Only a single + IOKernelDebugger in the system can be active at a given time. The + active IOKernelDebugger is the one that has an IOKDP object attached + as a client. + + The debugger device is usually a subclass of IOEthernetController. + However, any IOService can service an IOKernelDebugger client, + implement the two polled mode handlers, and transport the KDP + packets through a data channel. However, KDP assumes that the + debugger device is an Ethernet interface and therefore it will + always send, and expect to receive, an Ethernet frame. */ + +class IOKernelDebugger : public IOService +{ + OSDeclareDefaultStructors( IOKernelDebugger ) + +protected: + IOService * _target; // target (provider) + IODebuggerTxHandler _txHandler; // target's transmit handler. + IODebuggerRxHandler _rxHandler; // target's receive handler. + IOService * _client; // client that has opened us. + bool _pmDisabled; // true if disabled by PM. + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * _reserved; + + + static void pmEnableDebugger( IOKernelDebugger * debugger ); + static void pmDisableDebugger( IOKernelDebugger * debugger ); + +/*! @function kdpReceiveDispatcher + @abstract The KDP receive dispatch function. + @discussion Field KDP receive requests, then dispatches the call to the + registered receiver handler. + @param buffer KDP receive buffer. The buffer allocated by KDP has room + for 1518 bytes. The receive handler must not overflow this buffer. + @param length The amount of data received and placed into the buffer. + Set to 0 if a frame was not received during the poll interval. + @param timeout The amount of time to poll in milliseconds while waiting + for a frame to arrive. */ + + static void kdpReceiveDispatcher(void * buffer, + UInt32 * length, + UInt32 timeout); + +/*! @function kdpTransmitDispatcher + @abstract The KDP transmit dispatch function. + @discussion Field KDP transmit requests, then dispatches the call to the + registered transmit handler. + @param buffer KDP transmit buffer. This buffer contains a KDP frame to + be sent on the network. + @param length The number of bytes in the transmit buffer. */ + + static void kdpTransmitDispatcher(void * buffer, UInt32 length); + +/*! @function free + @abstract Free the IOKernelDebugger instance. */ + + virtual void free(); + +/*! @function nullTxHandler + @abstract Null transmit handler. + @discussion This function is registered as the transmit handler when an + IOKernelDebugger object surrenders its status as the active debugger nub. + Until another IOKernelDebugger object gets promoted, this function will + handle polled transmit requests from KDP. This function does nothing + useful. */ + + static void nullTxHandler( IOService * target, + void * buffer, + UInt32 length ); + +/*! @function nullRxHandler + @abstract Null receive handler. + @discussion This function is registered as the receive handler when an + IOKernelDebugger object surrenders its status as the active debugger nub. + Until another IOKernelDebugger object gets promoted, this function will + handle polled receive requests from KDP. This function does nothing + except to log a warning message. */ + + static void nullRxHandler( IOService * target, + void * buffer, + UInt32 * length, + UInt32 timeout ); + +/*! @function registerHandler + @abstract Register the target and the handler functions. + @discussion This method is called by handleOpen() and handleClose() + to register or unregister the target and its handler functions. + @param target The target object. + @param txHandler The transmit handler function. The null handler is + registered if the argument is zero. + @param rxHandler The receive handler function. The null handler is + registered if the argument is zero. */ + + static void registerHandler( IOService * target, + IODebuggerTxHandler txHandler = 0, + IODebuggerRxHandler rxHandler = 0 ); + +/*! @function powerStateWillChangeTo + @abstract Handle notification that the network controller will change + power state. + @discussion If the controller is about to become unusable, then the + controller's handlers are unregistered, and the controller disabled. + @param flags Describe the capability of the controller in the new power + state. + @param stateNumber The number of the state in the state array that the + controller is switching to. + @param policyMaker The policy-maker that manages the controller's + power state. + @result The constant 3000000, to indicate a maximum of 3 seconds for the + preparation to complete, and an acknowledgement delivered to the + policy-maker. */ + + virtual IOReturn powerStateWillChangeTo( IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ); + +/*! @function powerStateDidChangeTo + @abstract Handle notification that the network controller did change + power state. + @discussion If the controller became usable, then the controller is + re-enabled, and the controller's handlers are re-registered. + @param flags Describe the capability of the controller in the new power + state. + @param stateNumber The number of the state in the state array that the + controller is switching to. + @param policyMaker The policy-maker that manages the controller's + power state. + @result The constant 3000000, to indicate a maximum of 3 seconds for the + preparation to complete, and an acknowledgement delivered to the + policy-maker. */ + + virtual IOReturn powerStateDidChangeTo( IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ); + +/*! @function handleOpen + @abstract Handle a client open. + @discussion This method is called by IOService::open() to handle an + open from a client (IOKDP) with the arbitration lock held. + @param forClient The client (IOKDP) requesting the open. + @param options Options passed to the open() call. Not used. + @param arg A family defined argument passed to the open() call. Not used. + @result true on success, false otherwise. */ + + virtual bool handleOpen( IOService * forClient, + IOOptionBits options, + void * arg ); + +/*! @function handleClose + @abstract Handle a client close. + @discussion This method is called by IOService::close() to handle a + close from a client with the arbitration lock held. + @param forClient The client (IOKDP) requesting the close. + @param options Options passed to the close() call. Not used. */ + + virtual void handleClose( IOService * forClient, + IOOptionBits options ); + +/*! @function handleIsOpen + @abstract Query whether a client has an open on this object. + @discussion This method is called by IOService::isOpen() with the + arbitration lock held. + @result true if the specified client, or any client if none (0) is + specified, presently has an open on this object. */ + + virtual bool handleIsOpen( const IOService * forClient ) const; + +public: + +/*! @function lock + @abstract Take the debugger lock conditionally. + @discussion Take the debugger lock if the object given matches the + target registered by registerHandler(). + @param target The target or provider of an IOKernelDebugger object. + @result kIODebuggerLockTaken if the lock was taken, or 0 otherwise. */ + + static IODebuggerLockState lock( IOService * target ); + +/*! @function unlock + @abstract Release the debugger lock. + @discussion Release the debugger lock if the kIODebuggerLockTaken flag is + set in the argument. */ + + static void unlock( IODebuggerLockState state ); + +/*! @function init + @abstract Initialize an IOKernelDebugger instance. + @param target The target object that implements the debugger handlers. + @param txHandler The target's transmit handler. A pointer to a 'C' function. + @param rxHandler The target's receive handler. A pointer to a 'C' function. + @result true if the instance initialized successfully, false otherwise. */ + + virtual bool init( IOService * target, + IODebuggerTxHandler txHandler, + IODebuggerRxHandler rxHandler ); + +/*! @function debugger + @abstract A factory method that performs allocation and initialization + of an IOKernelDebugger object. + @param target The target object that implements the debugger handlers. + @param txHandler The target's transmit handler. A pointer to a 'C' function. + @param rxHandler The target's receive handler. A pointer to a 'C' function. + @result An IOKernelDebugger instance on success, 0 otherwise. */ + + static IOKernelDebugger * debugger( IOService * target, + IODebuggerTxHandler txHandler, + IODebuggerRxHandler rxHandler ); + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IOKernelDebugger, 0); + OSMetaClassDeclareReservedUnused( IOKernelDebugger, 1); + OSMetaClassDeclareReservedUnused( IOKernelDebugger, 2); + OSMetaClassDeclareReservedUnused( IOKernelDebugger, 3); +}; + +// Concise form of the lock()/unlock() static member functions. +// +#define IODebuggerLock IOKernelDebugger::lock +#define IODebuggerUnlock IOKernelDebugger::unlock + +#endif /* !_IOKERNELDEBUGGER_H */ diff --git a/iokit/IOKit/network/IOMbufMemoryCursor.h b/iokit/IOKit/network/IOMbufMemoryCursor.h new file mode 100644 index 000000000..6150c58bb --- /dev/null +++ b/iokit/IOKit/network/IOMbufMemoryCursor.h @@ -0,0 +1,438 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOMbufMemoryCursor.h created by gvdl on 1999-1-20 + * + */ + +#ifndef _IOKIT_NETWORK_IOMBUFMEMORYCURSOR_H +#define _IOKIT_NETWORK_IOMBUFMEMORYCURSOR_H + +#include + +struct mbuf; + +/*! @class IOMbufMemoryCursor : public IOMemoryCursor + @abstract A mechanism to convert mbuf chains to physical addresses. + @discussion The IOMbufMemoryCursor defines the super class that all + specific mbuf cursors must inherit from, but a mbuf cursor can be created + without a specific formal subclass by just providing a segment function to + the initializers. This class performs the task of walking a given + mbuf chain and creating a physical scatter/gather list appropriate for + the target hardware. When necessary, this class may also coalesce + mbuf chains when the generated scatter/gather list exceeds the specified + hardware limit. However, this should be avoided since it exacts a + performance cost. +

+ A driver is expected to create a mbuf cursor and configure it to match the + limitations of it's DMA hardware; for instance the mbuf cursor used by + an Ethernet controller driver may have a maximum physical segment size + of 1520, and allow for up to 6 physical segments. Thus it would create a + mbuf cursor with a maxSegmentSize of 1520 and a maxNumSegments of 6. + The driver may choose to supply an OutputSegmentFunc function to + format the output of each scatter/gather segment to match the + hardware descriptor format, or it may use a subclass of + IOMbufMemoryCursor to generate IOPhysicalSegment segments with + various byte orders. +

+ A driver may also create more than one mbuf cursor, perhaps one + dedicated for the transmit thread, and the other for the receive thread. + This becomes a requirement when the driver is multi-threaded, since the + mbuf cursor maintains state and does not support reentrancy. */ + +class IOMbufMemoryCursor : public IOMemoryCursor +{ + OSDeclareAbstractStructors(IOMbufMemoryCursor) + +protected: + UInt32 maxNumSegments; + UInt32 coalesceCount; + UInt32 packetTooBigErrors; + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + virtual bool initWithSpecification(OutputSegmentFunc outSeg, + UInt32 maxSegmentSize, + UInt32 maxTransferSize, + UInt32 align); + +public: +/*! @function initWithSpecification + @abstract Primary initializer for the IOMbufMemoryCursor class. + @param outSeg Function to call to output one physical segment. + @param maxSegmentSize Maximum allowable size for one segment. + @param maxNumSegments Maximum number of segments. + @result true if the inherited classes and this instance initialized + successfully. */ + + virtual bool initWithSpecification(OutputSegmentFunc outSeg, + UInt32 maxSegmentSize, + UInt32 maxNumSegments); + +/*! @function genPhysicalSegments + @abstract Generate a physical scatter/gather list given a mbuf packet. + @discussion Generates a list of physical segments from the given mbuf. + @param packet The mbuf packet. + @param vector Void pointer to base of output physical scatter/gather list. + Always passed directly onto the OutputSegmentFunc without interpretation + by the cursor. + @param maxSegs Maximum number of segments that can be written to segments + array. + @param doCoalesce Set to true to perform coalescing when the required + number of segments exceeds the specified limit, otherwise abort and + return 0. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + virtual UInt32 genPhysicalSegments(struct mbuf * packet, void * vector, + UInt32 maxSegs, bool doCoalesce); + +/*! @function getAndResetCoalesceCount + @abstract Returns a count of the total number of mbuf chains coalesced + by genPhysicalSegments(). The counter is then reset to 0. + @result The coalesce count. */ + + UInt32 getAndResetCoalesceCount(); + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IOMbufMemoryCursor, 0); + OSMetaClassDeclareReservedUnused( IOMbufMemoryCursor, 1); + OSMetaClassDeclareReservedUnused( IOMbufMemoryCursor, 2); + OSMetaClassDeclareReservedUnused( IOMbufMemoryCursor, 3); +}; + + +/*! @class IOMbufNaturalMemoryCursor : public IOMbufMemoryCursor + @abstract A IOMbufMemoryCursor subclass that outputs a vector of + IOPhysicalSegments in the natural byte orientation for the cpu. + @discussion The IOMbufNaturalMemoryCursor would be used when it is too + difficult to implement an OutputSegmentFunc that is more appropriate for + your hardware. This cursor just outputs an array of IOPhysicalSegments. */ + +class IOMbufNaturalMemoryCursor : public IOMbufMemoryCursor +{ + OSDeclareDefaultStructors(IOMbufNaturalMemoryCursor) + +public: + +/*! @function withSpecification + @abstract Factory function to create and initialize an + IOMbufNaturalMemoryCursor in one operation, see + IOMbufMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. + @param maxNumSegments Maximum number of segments. + @result A new mbuf cursor if successfully created and initialized, + 0 otherwise. */ + + static IOMbufNaturalMemoryCursor * withSpecification(UInt32 maxSegmentSize, + UInt32 maxNumSegments); + +/*! @function getPhysicalSegments + @abstract Generate a cpu natural physical scatter/gather list from a given + mbuf. + @param packet The mbuf packet. + @param vector Pointer to an array of IOPhysicalSegments for the output + physical scatter/gather list. + @param numVectorSegments Maximum number of IOPhysicalSegments accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegments(struct mbuf * packet, + struct IOPhysicalSegment * vector, + UInt32 numVectorSegments = 0); + +/*! @function getPhysicalSegmentsWithCoalesce + @abstract Generate a cpu natural physical scatter/gather list from a given + mbuf. + @discussion Generate a cpu natural physical scatter/gather list from a + given mbuf. Coalesce mbuf chain when the number of segments in the + scatter/gather list exceeds numVectorSegments. + @param packet The mbuf packet. + @param vector Pointer to an array of IOPhysicalSegments for the output + physical scatter/gather list. + @param numVectorSegments Maximum number of IOPhysicalSegments accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegmentsWithCoalesce(struct mbuf * packet, + struct IOPhysicalSegment * vector, + UInt32 numVectorSegments = 0); +}; + +//=========================================================================== +//=========================================================================== + +/*! @class IOMbufBigMemoryCursor : public IOMbufMemoryCursor + @abstract A IOMbufMemoryCursor subclass that outputs a vector of + IOPhysicalSegments in the big endian byte order. + @discussion The IOMbufBigMemoryCursor would be used when the DMA hardware + requires a big endian address and length pair. This cursor outputs an + array of IOPhysicalSegments that are encoded in big-endian format. */ + +class IOMbufBigMemoryCursor : public IOMbufMemoryCursor +{ + OSDeclareDefaultStructors(IOMbufBigMemoryCursor) + +public: + +/*! @function withSpecification + @abstract Factory function to create and initialize an + IOMbufBigMemoryCursor in one operation, see + IOMbufMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. + @param maxNumSegments Maximum number of segments. + @result A new mbuf cursor if successfully created and initialized, + 0 otherwise. */ + + static IOMbufBigMemoryCursor * withSpecification(UInt32 maxSegmentSize, + UInt32 maxNumSegments); + +/*! @function getPhysicalSegments + @abstract Generate a big endian physical scatter/gather list from a given + mbuf. + @param packet The mbuf packet. + @param vector Pointer to an array of IOPhysicalSegments for the output + physical scatter/gather list. + @param numVectorSegments Maximum number of IOPhysicalSegments accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegments(struct mbuf * packet, + struct IOPhysicalSegment * vector, + UInt32 numVectorSegments = 0); + +/*! @function getPhysicalSegmentsWithCoalesce + @abstract Generate a big endian physical scatter/gather list from a given + mbuf. + @discussion Generate a big endian physical scatter/gather list from a + given mbuf. Coalesce mbuf chain when the number of segments in the + scatter/gather list exceeds numVectorSegments. + @param packet The mbuf packet. + @param vector Pointer to an array of IOPhysicalSegments for the output + physical scatter/gather list. + @param numVectorSegments Maximum number of IOPhysicalSegments accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegmentsWithCoalesce(struct mbuf * packet, + struct IOPhysicalSegment * vector, + UInt32 numVectorSegments = 0); +}; + +//=========================================================================== +//=========================================================================== + +/*! @class IOMbufLittleMemoryCursor : public IOMbufMemoryCursor + @abstract A IOMbufMemoryCursor subclass that outputs a vector of + IOPhysicalSegments in the little endian byte order. + @discussion The IOMbufLittleMemoryCursor would be used when the DMA + hardware requires a little endian address and length pair. This cursor + outputs an array of IOPhysicalSegments that are encoded in little endian + format. */ + +class IOMbufLittleMemoryCursor : public IOMbufMemoryCursor +{ + OSDeclareDefaultStructors(IOMbufLittleMemoryCursor) + +public: + +/*! @function withSpecification + @abstract Factory function to create and initialize an + IOMbufLittleMemoryCursor in one operation, see + IOMbufMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. + @param maxNumSegments Maximum number of segments. + @result A new mbuf cursor if successfully created and initialized, + 0 otherwise. */ + + static IOMbufLittleMemoryCursor * withSpecification(UInt32 maxSegmentSize, + UInt32 maxNumSegments); + +/*! @function getPhysicalSegments + @abstract Generate a little endian physical scatter/gather list from a + given mbuf. + @param packet The mbuf packet. + @param vector Pointer to an array of IOPhysicalSegments for the output + physical scatter/gather list. + @param numVectorSegments Maximum number of IOPhysicalSegments accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegments(struct mbuf * packet, + struct IOPhysicalSegment * vector, + UInt32 numVectorSegments = 0); + +/*! @function getPhysicalSegmentsWithCoalesce + @abstract Generate a little endian physical scatter/gather list from a + given mbuf. + @discussion Generate a little endian physical scatter/gather list from a + given mbuf. Coalesce mbuf chain when the number of segments in the + scatter/gather list exceeds numVectorSegments. + @param packet The mbuf packet. + @param vector Pointer to an array of IOPhysicalSegments for the output + physical scatter/gather list. + @param numVectorSegments Maximum number of IOPhysicalSegments accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegmentsWithCoalesce(struct mbuf * packet, + struct IOPhysicalSegment * vector, + UInt32 numVectorSegments = 0); +}; + +#ifdef __ppc__ + +struct IODBDMADescriptor; + +//=========================================================================== +//=========================================================================== + +/*! @class IOMbufDBDMAMemoryCursor : public IOMbufMemoryCursor + @abstract A IOMbufMemoryCursor subclass that outputs a vector of + IODBDMADescriptors. */ + +class IOMbufDBDMAMemoryCursor : public IOMbufMemoryCursor +{ + OSDeclareDefaultStructors(IOMbufDBDMAMemoryCursor) + +public: + +/*! @function withSpecification + @abstract Factory function to create and initialize an + IOMbufDBDMAMemoryCursor in one operation, see + IOMbufMemoryCursor::initWithSpecification. + @param maxSegmentSize Maximum allowable size for one segment. + @param maxNumSegments Maximum number of segments. + @result A new mbuf cursor if successfully created and initialized, + 0 otherwise. */ + + static IOMbufDBDMAMemoryCursor * withSpecification(UInt32 maxSegmentSize, + UInt32 maxNumSegments); + +/*! @function getPhysicalSegments + @abstract Generate a DBDMA descriptor list from a given mbuf. + @param packet The mbuf packet. + @param vector Pointer to an array of IODBDMADescriptor for the output list. + @param numVectorSegments Maximum number of IODBDMADescriptors accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegments(struct mbuf * packet, + struct IODBDMADescriptor *vector, + UInt32 numVectorSegments = 0); + +/*! @function getPhysicalSegmentsWithCoalesce + @abstract Generate a DBDMA descriptor list from a given mbuf. + @discussion Generate a DBDMA descriptor list from a given mbuf. + Coalesce mbuf chain when the number of elements in the list exceeds + numVectorSegments. + @param packet The mbuf packet. + @param vector Pointer to an array of IODBDMADescriptor for the output list. + @param numVectorSegments Maximum number of IODBDMADescriptors accepted. + @result The number of segments that were filled in is returned, or + 0 if an error occurred. */ + + UInt32 getPhysicalSegmentsWithCoalesce(struct mbuf * packet, + struct IODBDMADescriptor * vector, + UInt32 numVectorSegments = 0); +}; + +#endif /* __ppc__ */ + +inline UInt32 IOMbufMemoryCursor::getAndResetCoalesceCount() +{ + UInt32 cnt = coalesceCount; coalesceCount = 0; return cnt; +} + +inline UInt32 +IOMbufNaturalMemoryCursor::getPhysicalSegments(struct mbuf *packet, + struct IOPhysicalSegment *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, false); +} + +inline UInt32 +IOMbufNaturalMemoryCursor::getPhysicalSegmentsWithCoalesce(struct mbuf *packet, + struct IOPhysicalSegment *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, true); +} + +inline UInt32 +IOMbufBigMemoryCursor::getPhysicalSegments(struct mbuf *packet, + struct IOPhysicalSegment *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, false); +} + +inline UInt32 +IOMbufBigMemoryCursor::getPhysicalSegmentsWithCoalesce(struct mbuf *packet, + struct IOPhysicalSegment *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, true); +} + +inline UInt32 +IOMbufLittleMemoryCursor::getPhysicalSegments(struct mbuf *packet, + struct IOPhysicalSegment *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, false); +} + +inline UInt32 +IOMbufLittleMemoryCursor::getPhysicalSegmentsWithCoalesce(struct mbuf *packet, + struct IOPhysicalSegment *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, true); +} + +#ifdef __ppc__ +inline UInt32 +IOMbufDBDMAMemoryCursor::getPhysicalSegments(struct mbuf *packet, + struct IODBDMADescriptor *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, false); +} + +inline UInt32 +IOMbufDBDMAMemoryCursor::getPhysicalSegmentsWithCoalesce(struct mbuf *packet, + struct IODBDMADescriptor *vector, + UInt32 numVectorSegments = 0) +{ + return genPhysicalSegments(packet, vector, numVectorSegments, true); +} +#endif /* __ppc__ */ + +#endif /* !_IOKIT_NETWORK_IOMBUFMEMORYCURSOR_H */ + diff --git a/iokit/IOKit/network/IONetworkController.h b/iokit/IOKit/network/IONetworkController.h new file mode 100644 index 000000000..e9ca383e6 --- /dev/null +++ b/iokit/IOKit/network/IONetworkController.h @@ -0,0 +1,1375 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkController.h + * + * Network controller driver superclass. + * + * HISTORY + * 9-Dec-1998 Joe Liu (jliu) created. + * + */ + +#ifndef _IONETWORKCONTROLLER_H +#define _IONETWORKCONTROLLER_H + +/*! @defined kIONetworkControllerClass + @abstract kIONetworkControllerClass is the name of the + IONetworkController class. */ + +#define kIONetworkControllerClass "IONetworkController" + +/*! @defined kIOVendor + @abstract kIOVendor is a property of IONetworkController objects. + It has an OSString value. + @discussion The kIOVendor property is a string describing the + vendor of the network controller. */ + +#define kIOVendor "IOVendor" + +/*! @defined kIOModel + @abstract kIOModel is a property of IONetworkController objects. + It has an OSString value. + @discussion The kIOModel property is a string describing the + model of the network controller. */ + +#define kIOModel "IOModel" + +/*! @defined kIORevision + @abstract kIORevision is a property of IONetworkController objects. + It has an OSString value. + @discussion The kIORevision property is a string describing the + revision level of the network controller. */ + +#define kIORevision "IORevision" + +/*! @defined kIOFeatures + @abstract kIOFeatures is a property of IONetworkController objects. + It has an OSNumber value. + @discussion The kIOFeatures property describes generic features + defined by IONetworkController that are supported by the + network controller. */ + +#define kIOFeatures "IOFeatures" + +/*! @defined kIOMediumDictionary + @abstract kIOMediumDictionary is a property of IONetworkController + objects. It has an OSDictionary value. + @discussion The kIOMediumDictionary property is a container for the + collection of IONetworkMedium objects that represent the media + types supported by the network controller. + Each entry in the dictionary is a key/value pair consisting of + the medium name, and a dictionary value that contains the + properties for that medium entry. */ + +#define kIOMediumDictionary "IOMediumDictionary" + +/*! @defined kIODefaultMedium + @abstract kIODefaultMedium is a property of IONetworkController + objects. It has an OSString value. + @discussion The kIODefaultMedium property describes the name of the + default medium. This definition may change or disappear in the + future. */ + +#define kIODefaultMedium "IODefaultMedium" + +/*! @defined kIOSelectedMedium + @abstract kIOSelectedMedium is a property of IONetworkController + objects. It has an OSSymbol value. + @discussion The kIOSelectedMedium property describes the name of the + current selected medium. This name can be used as a key into the + medium dictionary to gather additional information about the + selected medium. */ + +#define kIOSelectedMedium "IOSelectedMedium" + +/*! @defined kIOActiveMedium + @abstract kIOActiveMedium is a property of IONetworkController + objects. It has an OSSymbol value. + @discussion The kIOActiveMedium property describes the name of the + active medium. This is the name of the medium where an active + link has been established. This name can be used as a key into + the medium dictionary to gather additional information about the + active medium. */ + +#define kIOActiveMedium "IOActiveMedium" + +/*! @defined kIOLinkSpeed + @abstract kIOLinkSpeed is a property of IONetworkController + objects. It has an OSNumber value. + @discussion The kIOLinkSpeed property describes the speed of the + link established over the active medium in bits per second. */ + +#define kIOLinkSpeed "IOLinkSpeed" + +/*! @defined kIOLinkStatus + @abstract kIOLinkStatus is a property of IONetworkController + objects. It has an OSNumber value. + @discussion The kIOLinkStatus property describes the current network + link status. See IONetworkMedium for the definition of the link + status bits. */ + +#define kIOLinkStatus "IOLinkStatus" + +/*! @defined kIOLinkData + @abstract kIOLinkData is a property of IONetworkController + objects. It has an OSData value. + @discussion The kIOLinkData property contains additional information, + that describes the active link that was established. + Its interpretation is not defined. */ + +#define kIOLinkData "IOLinkData" + +/*! @defined kIOPacketFilters + @abstract kIOPacketFilters is a property of IONetworkController + objects. It has an OSDictionary value. + @discussion The kIOPacketFilters property describes the entire + set of packet filters supported by the controller. Each entry + in the dictionary is a key/value pair consisting of the filter + group name, and an OSNumber describing the set of supported + filters for that group. */ + +#define kIOPacketFilters "IOPacketFilters" + +/*! @defined kIOMACAddress + @abstract kIOMACAddress is a property of IONetworkController + objects. It has an OSData value. + @discussion The kIOMACAddress property describes the hardware + MAC (media access controller) address, or station address, + of the network controller. */ + +#define kIOMACAddress "IOMACAddress" + +/*! @defined kIOMaxPacketSize + @abstract kIOMaxPacketSize is a property of IONetworkController + objects. It has an OSNumber value. + @discussion The kIOMaxPacketSize property describes the maximum + packet size supported by the controller. */ + +#define kIOMaxPacketSize "IOMaxPacketSize" + +/*! @defined kIOMinPacketSize + @abstract kIOMinPacketSize is a property of IONetworkController + objects. It has an OSNumber value. + @discussion The kIOMinPacketSize property describes the minimum + packet size supported by the controller. */ + +#define kIOMinPacketSize "IOMinPacketSize" + +/*! @defined kIONetworkFilterGroup + @abstract kIONetworkFilterGroup describes the name assigned to the + standard network filter group. */ + +#define kIONetworkFilterGroup "IONetworkFilterGroup" + +/*! @enum Enumeration of standard packet filters. + @discussion An enumeration of all standard packet filters. Each + filter will allow the reception of certain class of packets + depending on its destination MAC address. + @constant kIOPacketFilterUnicast Reception of unicast packets. + @constant kIOPacketFilterBroadcast Reception of broadcast packets. + @constant kIOPacketFilterMulticast Reception of multicast packets + addressed to a set of multicast addresses. + @constant kIOPacketFilterMulticastAll Reception of all multicast + packets. + @constant kIOPacketFilterPromiscuous Reception of all packets. + @constant kIOPacketFilterPromiscuousAll Reception of all packets, + including bad packets. */ + +enum { + kIOPacketFilterUnicast = 0x1, + kIOPacketFilterBroadcast = 0x2, + kIOPacketFilterMulticast = 0x10, + kIOPacketFilterMulticastAll = 0x20, + kIOPacketFilterPromiscuous = 0x100, + kIOPacketFilterPromiscuousAll = 0x200, +}; + +/*! @enum Feature flags returned by the getFeatures() method. + @constant kIONetworkFeatureNoBSDWait Set this bit in the value + returned by getFeatures() to disable the automatic wait for + "IOBSD" resource by the IONetworkController::start() method. */ + +enum { + kIONetworkFeatureNoBSDWait = 0x01, +}; + +/* + * Kernel + */ +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include +#include +#include + +struct mbuf; // forward declarations +class IOCommandGate; +class IOOutputQueue; +class IONetworkMedium; + +/*! @typedef IOPacketBufferConstraints + @discussion Constraint parameters, specified by a driver, + for the data buffer in a packet mbuf. This is observed by + allocatePacket() to satisfy the stated requirements. + @field alignStart Starting address byte alignment. + @field alignLength Buffer length byte alignment. */ + +typedef struct { + UInt32 alignStart; + UInt32 alignLength; + UInt32 reserved[6]; +} IOPacketBufferConstraints; + +// Some frequently used alignment constants. +// +enum { + kIOPacketBufferAlign1 = 1, + kIOPacketBufferAlign2 = 2, + kIOPacketBufferAlign4 = 4, + kIOPacketBufferAlign8 = 8, + kIOPacketBufferAlign16 = 16, + kIOPacketBufferAlign32 = 32, +}; + +/*! @defined gIONetworkFilterGroup + @discussion gIONetworkFilterGroup is an OSSymbol object that contains + the name of the standard network filter group as defined by + kIONetworkFilterGroup. */ + +extern const OSSymbol * gIONetworkFilterGroup; + +/*! @class IONetworkController : public IOService + @abstract IONetworkController implements the framework for a generic + network controller. A subclass of IONetworkController must provide + additional functionality specific for a particular networking type. + In addition, the driver must implement (override) a basic set of + hardware dependent methods to create a working driver. + + IONetworkController attaches itself to the data link layer (DLIL) via + an IONetworkInterface object. A controller object without a companion + interface is not accessible to the networking system. The controller + interacts with DLIL by calling methods defined by the interface object. + And conversely, DLIL will issue commands and packets to the controller + through the interface object. + + IONetworkController will create an IOCommandGate and attach this + event source to an IOWorkLoop object. All commands sent from the + interface object are handled through the IOCommandGate object, + which will serialize access to the controller. Outbound packets sent + from the interface to the controller have no implicit serialization. + Drivers must implement an output function that is thread safe, or use + an IOOutputQueue object which will provide a serialization model. + */ + +class IONetworkController : public IOService +{ + OSDeclareAbstractStructors( IONetworkController ) + +private: + + IOWorkLoop * _workLoop; + IOCommandGate * _cmdGate; + IOOutputQueue * _outputQueue; + OSSet * _clientSet; + OSCollectionIterator * _clientSetIter; + OSObject * _cmdClient; + UInt32 _alignStart; + UInt32 _alignLength; + UInt32 _alignPadding; + bool _propertiesPublished; + IOLock * _mediumLock; + IODebuggerLockState _debugLockState; + SInt32 _debugLockCount; + OSNumber * _linkStatus; + OSNumber * _linkSpeed; + const OSData * _lastLinkData; + const OSSymbol * _lastActiveMediumName; + const OSSymbol * _lastCurrentMediumName; + struct mbuf * _freeList; + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * _reserved; + + + bool _broadcastEvent(UInt32 type, void * data = 0); + + static void debugRxHandler(IOService * handler, + void * buffer, + UInt32 * length, + UInt32 timeout); + + static void debugTxHandler(IOService * handler, + void * buffer, + UInt32 length); + + static IOReturn executeCommandAction(OSObject * owner, + void * arg0, + void * arg1, + void * arg2, + void * arg3); + + static IOReturn handleCommand(void * target, + void * param0, + void * param1, + void * param2, + void * param3); + +public: + +/*! @function initialize + @abstract IONetworkController class initializer. + @discussion Create frequently used OSSymbol objects that are used as keys. + This method is called explicitly by a line in IOStartIOKit.cpp and not + by the OSDefineMetaClassAndInit() mechanism, to ensure that this method + is called after the OSSymbol class (pool) is initialized. */ + + static void initialize(); + +/*! @function init + @abstract Initialize the IONetworkController object. + @discussion Instance variables are initialized, then super::init() + is called. + @param properties A dictionary object containing a property table + associated with this instance. + @result true on success, false otherwise. */ + + virtual bool init(OSDictionary * properties); + +/*! @function start + @abstract Start the network controller. + @discussion After the controller driver has successfully matched + to a provider, this method is called to start the network controller. + IONetworkController will allocate resources and gather controller + properties in its implementation. No I/O will be performed until + the subclass tries to attach a client object. A driver must override + this method, and call super::start() at the beginning of its own + implementation. Then check the return value to make sure that its + superclass was started successfully before proceeding. Tasks that + are usually performed by a driver's start method are; resource + allocation, hardware initialization, allocation of IOEventSources + and attaching them to a workloop, publishing a medium dictionary, + and finally, attaching an interface object when it is ready to + handle client requests. + @param provider The provider that the controller was matched + (and attached) to. + @result true on success, false otherwise. */ + + virtual bool start(IOService * provider); + +/*! @function stop + @abstract Stop the network controller. + @discussion The counterpart of start(). The controller has been + instructed to stop running. The stop() method should release + resources and undo actions performed by the start() method. + Subclasses must override this method and call super::stop() + at the end of its implementation. + @param provider The provider that the controller was matched + (and attached) to. */ + + virtual void stop(IOService * provider); + +/*! @typedef IONetworkController::Action + @discussion Definition of a C function that can be called + through executeCommand(). + @param target The first argument passed to action. + @param param0 Action parameter 0. + @param param1 Action parameter 1. + @param param2 Action parameter 2. + @param param3 Action parameter 3. */ + + typedef IOReturn (*Action)(void * target, void * param0, + void * param1, + void * param2, + void * param3); + +/*! @function executeCommand + @abstract Make a C function call through the command gate. + @discussion Make a call to a C function that will be synchronized + with the workloop thread, and any other threads that are called + with the workloop's gate closed. + @param client The client requesting the action. This parameter is not + passed to the function. + @param action Pointer to a C function to be executed. + @param target The first parameter in the action callout. + @param param0 Action parameter 0. + @param param1 Action parameter 1. + @param param2 Action parameter 2. + @param param3 Action parameter 3. + @result The value returned by the action. */ + + virtual IOReturn executeCommand(OSObject * client, + Action action, + void * target, + void * param0 = 0, + void * param1 = 0, + void * param2 = 0, + void * param3 = 0); + +/*! @function outputPacket + @abstract Transmit an output packet. + @discussion If an IOOutputQueue was created by createOutputQueue(), + then this method will be called by the output queue object. + Otherwise, an interface object will call this method directly when + it receives an output packet from the data link layer. + + There is no upper limit on the number of mbufs, hence the number of + memory fragments, in the mbuf chain provided. Drivers must be able to + handle cases when the mbuf count might exceed the limit supported by their + DMA engines, and perform coalescing to copy the various memory fragments + into a lesser number of fragments. This complexity can be hidden from + the driver when an IOMbufMemoryCursor is used, which is able to convert + a mbuf chain into a physical address scatter-gather list that will not + exceed a specified number of physically contiguous memory segments. + See IOMbufMemoryCursor. + + The implementation in IONetworkController performs no useful action + and will drop all packets. A driver must override this method and + process the output packet provided. The implementation in the driver + must not block, since this may cause the network stack to be reentered + from an unsafe point. + @param packet A mbuf chain containing the output packet to be sent on + the network. + @param param A parameter provided by the caller. + @result A return code defined by the caller. */ + + virtual UInt32 outputPacket(struct mbuf * packet, void * param); + +/*! @function getFeatures + @abstract Report generic features supported by the controller and/or + the driver. + @result This method will always return 0. Subclasses may override + this method and return a bit mask of all supported features. */ + + virtual UInt32 getFeatures() const; + +/*! @function newVendorString + @result Return a string describing the vendor of the network controller. + The caller is responsible for releasing the string object returned. */ + + virtual const OSString * newVendorString() const; + +/*! @function newModelString + @result Return a string describing the model of the network controller. + The caller is responsible for releasing the string object returned. */ + + virtual const OSString * newModelString() const; + +/*! @function newRevisionString + @result Return a string describing the hardware revision of the + network controller. The caller is responsible for releasing the + string object returned. */ + + virtual const OSString * newRevisionString() const; + +/*! @function getSelectedMedium + @abstract Get the current selected medium. + @discussion If the driver has previously called setSelectedMedium() + to indicate its current media selection, then this method will return + that medium object. Otherwise, the driver's property table is + consulted and a default medium property is examined, and the + corresponding entry in the medium dictionary is returned. + @result The current selected medium, the default medium, or 0. */ + + virtual const IONetworkMedium * getSelectedMedium() const; + inline const IONetworkMedium * getCurrentMedium() const; + +/*! @function getMediumDictionary + @abstract Returns the medium dictionary published by the driver. + @discussion Returns the medium dictionary published by the driver + through publishMediumDictionary(). Use copyMediumDictionary() to + create and get a copy of the medium dictionary. + @result The published medium dictionary, or 0 if the driver has not + yet published a medium dictionary through publishMediumDictionary(). */ + + virtual const OSDictionary * getMediumDictionary() const; + +/*! @function copyMediumDictionary + @abstract Returns a copy of the medium dictionary published by the + driver. + @discussion The caller is responsible for releasing the dictionary + object returned. Use getMediumDictionary() to get a reference to the + published medium dictionary instead of creating a copy. + @result A copy of the medium dictionary, or 0 if the driver has not + published a medium dictionary through publishMediumDictionary(). */ + + virtual OSDictionary * copyMediumDictionary() const; + +/*! @function getOutputHandler + @abstract Get the address of the method designated to handle output + packets for the network controller. + @result A pointer to the outputPacket() method is returned. */ + + virtual IOOutputAction getOutputHandler() const; + +/*! @function doEnable + @abstract Make a synchronized call to enable() through executeCommand(). + @discussion Do not use this method, it may be removed in the future. + See enable(). */ + + virtual IOReturn doEnable(IOService * client); + +/*! @function doDisable + @abstract Make a synchronized call to disable() through executeCommand(). + @discussion Do not use this method, it may be removed in the future. + See disable(). */ + + virtual IOReturn doDisable(IOService * client); + +/*! @function getCommandGate + @abstract Get the IOCommandGate object created by IONetworkController. + @discussion When IONetworkController is started, an IOCommandGate object + is instantiated and attached to the workloop returned by getWorkLoop(). + This IOCommandGate object is used internally to synchronize client + commands handled through executeCommand(). Subclasses that need an + IOCommandGate should try to reuse the object returned by this method, + rather than creating a new instance. See IOCommandGate documentation. + @result The IOCommandGate object created by IONetworkController. */ + + virtual IOCommandGate * getCommandGate() const; + +/*! @function getHardwareAddress + @abstract Get the network controller's permanent hardware/station + address. This method call is synchronized by the workloop's gate. + @param addr The buffer where the controller's hardware address should + be stored. + @param inOutAddrBytes The size of the address buffer provided by the + client, and replaced by this method with the actual size of + the hardware address in bytes. + @result kIOReturnSuccess on success, or an error otherwise. */ + + virtual IOReturn getHardwareAddress(void * addr, + UInt32 * inOutAddrBytes) = 0; + +/*! @function setHardwareAddress + @abstract Set or change the station address used by the network + controller. This method call is synchronized by the workloop's gate. + @param buffer The buffer containing the hardware address provided by + the client. + @param addrBytes The size of the address buffer provided by the + client in bytes. + @result kIOReturnSuccess on success, or an error otherwise. */ + + virtual IOReturn setHardwareAddress(const void * addr, + UInt32 addrBytes) = 0; + +/*! @function enable + @abstract An enable request from a client. + @discussion Handle an enable request from a client. A client will call + enable after it has opened the controller, and before it starts to use + the controller to send and to receive packets over the network. The + client object provided is typecasted using OSDynamicCast, and depending + on whether the client is an IOKernelDebugger or an IONetworkInterface, + then an overloaded enable method that takes a more specific argument + type is called. If the client matches neither type, then + kIOReturnBadArgument is returned. A driver has the option of overriding + this base enable method, or the overloaded form. This method call is + synchronized by the workloop's gate. + @param client The client object requesting the enable. + @result The return value from the overloaded enable() method, or + kIOReturnBadArgument if the client type is unknown. */ + + virtual IOReturn enable(IOService * client); + +/*! @function disable + @abstract A disable request from a client. + @discussion Handle a disable request from a client. A client will call + disable if it has previously enabled the controller, and it no longer + needs to transport packets or perform I/O using the controller. + The client object is typecasted using OSDynamicCast, and depending on + whether the client is an IOKernelDebugger or an IONetworkInterface, + then an overloaded disable method that takes a more specific argument + type is called. If the client matches neither type, then + kIOReturnBadArgument is returned. A driver has the option of overriding + this base disable method, or the overloaded form. This method call is + synchronized by the workloop's gate. + @param client The client object requesting the disable. + @result The return from the overloaded disable() method, or + kIOReturnBadArgument if the client type is unknown. */ + + virtual IOReturn disable(IOService * client); + +/*! @function setMaxPacketSize + @abstract A client request to change the maximum packet size. + @discussion This method call is synchronized by the workloop's gate. + @param maxSize The new maximum packet size. + @result kIOReturnUnsupported. Drivers may override this method + and return either kIOReturnSuccess to indicate that the new size + was accepted and is in effect, or an error code to indicate failure. */ + + virtual IOReturn setMaxPacketSize(UInt32 maxSize); + +/*! @function getMaxPacketSize + @abstract Get the maximum packet size supported by the controller. + @param maxSize Pointer to the return value. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + virtual IOReturn getMaxPacketSize(UInt32 * maxSize) const = 0; + +/*! @function getMinPacketSize + @abstract Get the minimum packet size supported by the controller. + @param minSize Pointer to the return value. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + virtual IOReturn getMinPacketSize(UInt32 * minSize) const = 0; + +/*! @function selectMedium + @abstract A client request to change the medium selection. + @discussion This method is called when a client issues a command + for the controller to change its current medium selection. + The implementation must call setSelectedMedium() after the change + has occurred. This method call is synchronized by the workloop's + gate. + @param medium An entry from the published medium dictionary that + represents the selection chosen by the client. + @result kIOReturnUnsupported. Drivers may override this method and + return kIOReturnSuccess if the selection was successful, + or an error code otherwise. */ + + virtual IOReturn selectMedium(const IONetworkMedium * medium); + +/*! @function selectMediumWithName + @abstract A client request to change the medium selection. + @discussion This method is called when a client issues a command + for the controller to change its current medium selection. + This implementation will look for an entry in the medium + dictionary published by the driver that is associated with the + key given. If a match is found, then selectMedium() is called to + perform the selection, otherwise an error is reported back to the + client. Subclasses should override selectMedium() and not this + method. This method call is synchronized by the workloop's gate. + @param mediumName An OSSymbol object that describes the name of the + new medium selected by the client. + @result The return from selectMedium() if a matching entry was found + from the medium dictionary. kIOReturnUnsupported if a medium + dictionary does not exist, or kIOReturnBadArgument if the name given + does not match any entry in the medium dictionary. */ + + virtual IOReturn selectMediumWithName(const OSSymbol * mediumName); + +/*! @function getPacketFilters + @abstract Get the set of packet filters supported by the network + controller for the given filter group. + @discussion A subclass must implement this method and report the + set of filters that are supported for the given filter group. + This method call is synchronized by the workloop's gate. + @param group The name of the filter group. + @param filters Pointer to the mask of supported filters returned by + this method. + @result kIOReturnSuccess on success, or an error to indicate a + failure to discover the set of supported filters. */ + + virtual IOReturn getPacketFilters(const OSSymbol * group, + UInt32 * filters) const = 0; + +/*! @function enablePacketFilter + @abstract Enable one of the supported packet filters from the + given filter group. + @discussion A client will call this method to enable a supported filter + from the filter group specified. If the client wishes to enable more + than one filter, it must call this method multiple times to enable the + desired set of filters. This method call is synchronized by the + workloop's gate. + @param group The name of the filter group containing the filter to be + enabled. + @param aFilter The filter to enable. + @param enabledFilters All filters currently enabled by the client. + @param options Optional flags for the enable request. + @result kIOReturnSuccess on success, or an error otherwise. */ + + virtual IOReturn enablePacketFilter(const OSSymbol * group, + UInt32 aFilter, + UInt32 enabledFilters, + IOOptionBits options = 0) = 0; + +/*! @function disablePacketFilter + @abstract Disable a packet filter that is currently enabled from the + given filter group. + @discussion After a supported filter has been successfully enabled, + a client can call this method to disable that filter. This method call + is synchronized by the workloop's gate. + @param group The name of the filter group containing the filter to be + disabled. + @param aFilter The filter to disable. + @param enabledFilters All filters currently enabled by the client. + @param options Optional flags for the disable request. + @result kIOReturnSuccess on success, or an error otherwise. */ + + virtual IOReturn disablePacketFilter(const OSSymbol * group, + UInt32 aFilter, + UInt32 enabledFilters, + IOOptionBits options = 0) = 0; + +/*! @function getOutputQueue + @abstract Get the IOOutputQueue object created by createOutputQueue(). + @result Return a reference to the output queue object created by + createOutputQueue(). */ + + virtual IOOutputQueue * getOutputQueue() const; + +/*! @function getPacketBufferConstraints + @abstract Get the controller's packet buffer constraints. + @discussion Called by start() to obtain the constraints on the + memory buffer for each mbuf packet allocated through allocatePacket(). + Drivers can override this method to specify the buffer constraints + imposed by their bus master hardware. Note that outbound packets, + those that originate from the network stack, are not currently + subject to the constraints reported here. + @param constraints A pointer to an IOPacketBufferConstraints + structure that this method is expected to initialize. + See IOPacketBufferConstraints structure definition. */ + + virtual void getPacketBufferConstraints( + IOPacketBufferConstraints * constraints) const; + +/*! @function allocatePacket + @abstract Allocate a packet with a data buffer that is larger than + or equal to the size specified. + @discussion This method will always return a single mbuf unless the + size requested (plus the alignment padding) is greater than MCLBYTES. + The data buffer for the mbuf (or a mbuf chain) returned is aligned + according to the constraints reported by getPacketBufferConstraints(). + The length fields in each mbuf returned is set by this method, thus + allowing the mbuf to be passed directly to an IOMbufMemoryCursor object + in order to convert the mbuf to a physical address scatter-gather list. + @param size The minimum size of the data buffer for the mbuf + packet allocated. + @result A mbuf packet, or 0 if allocation failed. */ + + virtual struct mbuf * allocatePacket(UInt32 size); + +/*! @function copyPacket + @abstract Allocate a new packet, containing data copied from an + existing source packet. + @discussion The source packet is not modified by this method. + @param m The source packet. + @param size The number of bytes to copy. If set to 0, then the + entire data buffer from the source packet is copied. + @result A new packet containing the same data as the source packet. */ + + virtual struct mbuf * copyPacket(const struct mbuf * m, UInt32 size = 0); + +/*! @function replacePacket + @abstract Allocate a new packet to replace an existing packet, the + existing packet is then returned. + @param mp A handle to the existing packet. + @param size If size is 0, then the new packet shall have the same buffer + size as the original packet that is being replaced. Otherwise, the new + packet shall have the buffer size specified by this value. + @result If packet allocation was successful, then a replacement will + take place and the original packet will be returned. Otherwise, 0 + is returned, and the original packet will be left untouched. */ + + virtual struct mbuf * replacePacket(struct mbuf ** mp, UInt32 size = 0); + +/*! @function replaceOrCopyPacket + @abstract A helper method that combines the functionality of + copyPacket() and replacePacket() to process a packet containing + a received frame. + @discussion This method will either make a copy or replace the existing + packet, whichever is more time efficient. Packets containing small frames + are copied, otherwise they are replaced. If replaced, then the existing + packet is returned, and a new packet with the same buffer size is created + to take its place. If copied, the existing packet is left intact, while a + copy is returned that will hold a copy of the data from the source packet. + @param mp A handle to the existing packet that may be replaced. + @param length The number of bytes received held in the packet. + Must be greater than zero. + @param replaced Pointer to a return value that is set to true to indicate + that the existing packet was replaced, or false to indicate that the + existing packet was not replaced, and a copy was created. + @result A replacement or a copy of the existing packet, or 0 if packet + allocation failed. */ + + virtual struct mbuf * replaceOrCopyPacket(struct mbuf ** mp, + UInt32 length, + bool * replaced); + + enum { + kDelayFree = 0x01 + }; + +/*! @function freePacket + @abstract Release the packet given back to the free pool. + @param m The packet to be freed. + @param options When kDelayFree option is set, then the packet + provided to this function will be queued on the free packet queue. + A subsequent call to releaseFreePackets() will release all queued + packets by making a single BSD function call. Without the kDelayFree + option, the packet provided will be released immediately. */ + + virtual void freePacket(struct mbuf * m, IOOptionBits options = 0); + +/*! @function releaseFreePackets + @abstract Release all packets held in the free packet queue. + @discussion The free packet queue is not protected by a lock. This + function must be called in a single threaded manner with respect to + all calls to freePacket() with the kDelayFree option set. + @result The number of packets queued and released. */ + + virtual UInt32 releaseFreePackets(); + +/*! @enum An enumeration of TCP/IP checksums that may be supported by the + hardware. + @constant kChecksumFamilyTCPIP A value that describes the collection + of TCP/IP checksums. + @constant kChecksumIP An IP header checksum. + @constant kChecksumTCP A TCP checksum that covers the TCP header and TCP + data. + @constant kChecksumUDP An UDP checksum that covers the UDP header and UDP + data. + @constant kChecksumTCPNoPseudoHeader A TCP checksum that covers the TCP + header and the TCP data, but the pseudo header is not included in the + checksum computation. A partial 16-bit checksum value must be provided + to allow the protocol stacks to calculate and verify the final checksum. + This type of checksum is not currently supported on the output path. + @constant kChecksumUDPNoPseudoHeader An UDP checksum that covers the UDP + header and the UDP data, but the pseudo header is not included in the + checksum computation. A partial 16-bit checksum value must be provided + to allow the protocol stacks to calculate and verify the final checksum. + This type of checksum is not currently supported on the output path. + @constant kChecksumTCPSum16 The hardware has a simple checksum engine + that can perform a TCP style ones complement sum of 16-bit words over + a certain range of bytes in a packet. The hardware does not have the + ability to scan for IP or TCP headers, and the driver must pass/get + additional parameter(s) to or from the protocol stack to coordinate + the checksumming effort. */ + + enum { + kChecksumFamilyTCPIP = 0x00000001, + kChecksumIP = 0x0001, + kChecksumTCP = 0x0002, + kChecksumUDP = 0x0004, + kChecksumTCPNoPseudoHeader = 0x0100, + kChecksumUDPNoPseudoHeader = 0x0200, + kChecksumTCPSum16 = 0x0400, + }; + +/*! @function getChecksumSupport + @abstract Get checksums that are supported by the network controller for + the given checksum family. + @discussion A network controller that is capable of inserting and verifying + checksums on output and input packets, should override this method and + advertise its capability in order to assist or offload the software checksum + calculations performed by the protocol stacks. + @param checksumMask A pointer to the mask of supported checksums returned + by this method. + @param checksumFamily A value that specifies the checksum family. + @param isOutput Set to true to query the support for checksum insertion on + output packets, or false to query the support for checksum verification + on input packets. Controllers that have symmetric hardware checksum support + can return a fixed checksum mask value, and ignore this argument. + @result Default return is kIOReturnUnsupported. Controllers that override + this method must return kIOReturnSuccess. Any other return value will be + interpretated as a lack of checksum support, regardless of the value + returned through the first argument. */ + + virtual IOReturn getChecksumSupport( UInt32 * checksumMask, + UInt32 checksumFamily, + bool isOutput ); + +/*! @function setChecksumResult + @abstract Encode a received packet with the checksum result reported + by the hardware. + @discussion A network controller that can verify the checksum(s) for a + received packet, should call this method to encode the result on the + packet, before passing it up towards the protocol stacks. + @param packet A mbuf containing a packet that has been checksummed by + the hardware. + @param checksumFamily A value that specifies the checksum family. + @param resultMask A mask of all checksums that were checked or computed. + Setting a bit implies that the driver is able to report the result of + the checksum computation, by asserting the validity of the checksum, + or by returning a partial checksum value. + @param validMask A mask of all checksums are were computed and verified + by the hardware as valid. Certain types of checksum performed by the + hardware are inheritely incomplete, and therefore should never be marked + as valid. A checksum cannot be marked valid unless it has also been + checked. + @param param0 Optional parameter 0, defaults to 0. + @param param1 Optional parameter 1, defaults to 0. + @result true if the checksum family is valid and the packet has been + encoded with the checksum result provided, false otherwise. */ + + virtual bool setChecksumResult( struct mbuf * packet, + UInt32 checksumFamily, + UInt32 resultMask, + UInt32 validMask, + UInt32 param0 = 0, + UInt32 param1 = 0 ); + +/*! @function getChecksumDemand + @abstract Fetch the demand for hardware checksum computation and insertion + for the given packet before it is transmitted on the network. + @discussion A network controller that can insert a checksum for output + packets must call this method to obtain the set of checksums that it must + compute, and insert into the appropriate fields in the given output packet. + @param packet A mbuf containing a packet that may be missing one or more + checksums in the specified checksum family. + @param checksumFamily A value which specifies the checksum family. + @param demandMask A mask of all checksums that the hardware must compute + and insert into the appropriate checksum fields in the packet. + @param param0 Optional parameter 0, defaults to 0. + @param param1 Optional parameter 1, defaults to 0. */ + + virtual void getChecksumDemand( const struct mbuf * packet, + UInt32 checksumFamily, + UInt32 * demandMask, + void * param0 = 0, + void * param1 = 0 ); + +/*! @function publishMediumDictionary + @abstract Publish a dictionary of IONetworkMedium objects to + advertise the media selection supported by the network controller. + @discussion Called by drivers to publish their medium dictionary. + Each entry in the dictionary is an IONetworkMedium object that + represents a single medium that is supported by the controller. + This method will make a copy of the dictionary provided, then add + the copy to the driver's property table. The dictionary provided + can be released by the caller upon returning from this method. + It is permissible to call this method multiple times, which may be + necessary if the hardware's media capability changes dynamically. + However, if the capability is static, which is often the case, + then drivers will typically call this method only once from + its start() method. + + Several methods depend on the presence of a medium dictionary. + They should be called after the medium dictionary has been + published. Those methods are: + setSelectedMedium() + getSelectedMedium() + getMediumDictionary() + copyMediumDictionary() + + @param mediumDict A dictionary of IONetworkMedium objects. + @result true if the dictionary is valid, and was successfully + exported to the property table, false otherwise. */ + + virtual bool publishMediumDictionary(const OSDictionary * mediumDict); + +/*! @function setSelectedMedium + @abstract Designate an entry in the published medium dictionary as + the current selected medium. + @discussion After the driver has configured the hardware to select + one of its supported media types, it must call this method to inform + its parent about the change that has occurred. IONetworkController + will update a property in the registry to reflect the current selection. + @param medium A medium object representing the current selection. + @result true if the property table update was successful, + false if the update failed, or if the medium provided does not match + any entry from the published medium dictionary. */ + + virtual bool setSelectedMedium(const IONetworkMedium * medium); + inline bool setCurrentMedium(const IONetworkMedium * medium); + +/*! @function setLinkStatus + @abstract Report the link status and the active medium. + @discussion Drivers must call this method when a link change is + detected. IONetworkController will update the link status properties + in the registry, and generate an event to inform the upper layers + about the change. + @param status Link status bits. + See IONetworkMedium for the definition of the link status bits. + @param activeMedium An object in the published medium dictionary + that represents the active medium. This may not be the same as + the selected medium. Set this to 0 if the link is inactive. + @param speed Link speed in units of bits per second. If zero, then + the link speed is taken from the medium object provided. + @param data An OSData containing any additional link parameter that + the driver wishes to publish to the registry. + @result true if all link properties were successfully updated, + false otherwise. */ + + virtual bool setLinkStatus( + UInt32 status, + const IONetworkMedium * activeMedium = 0, + UInt64 speed = 0, + OSData * data = 0); + +protected: + +/*! @function free + @abstract Free the IONetworkController object. + @discussion Free the IONetworkController object by releasing all + allocated resources, followed by a call to super::free(). */ + + virtual void free(); + +/*! @function registerWithPolicyMaker + @abstract Implemented by controller drivers to register with + the power management policy-maker. + @discussion Drivers that are able to power manage their hardware + should override this method and register with the policy-maker + provided by calling IOService::registerPowerDriver(). + IONetworkController will call this method before the initial + attempt is made to attach a client. + @param policyMaker The policy-maker chosen to manage power for + this network controller. + @result kIOReturnSuccess on success, kIOReturnUnsupported if the + driver does not support power management, or an appropriate error + return code. The default return is kIOReturnUnsupported. */ + + virtual IOReturn registerWithPolicyMaker(IOService * policyMaker); + +/*! @function createWorkLoop + @abstract Method called by IONetworkController prior to the initial + getWorkLoop() call. + @discussion Before IONetworkController calls getWorkLoop() in its + start() method, it will call createWorkLoop() to make sure that a + subclass that wants to create a workloop, will do so before its + first use. + @result True to indicate success, false otherwise. Returning false + will fail IONetworkController::start(). */ + + virtual bool createWorkLoop(); + +/*! @function prepare + @abstract Prepare the controller before an IOService is created and + attached as a client. + @discussion This method is called by attachInterface() or + attachDebuggerClient() to prepare the controller before the new client + object is attached. This method will call publishProperties() to publish + controller capabilities and properties that may be used by client objects. + However, publishProperties() will be called only once, even if prepare() + is called multiple times. This method call is synchronized by the + workloop's gate. + @result kIOReturnSuccess on success, or an error code otherwise. + Returning an error will fail the client attach. */ + + virtual IOReturn prepare(); + +/*! @function publishProperties + @abstract Publish controller properties and capabilities. + @discussion Called by IONetworkController to discover controller + properties, and publish them to the property table in the I/O Kit + registry. This method is called once by prepare(). + @result true if all properties were discovered and published + successfully, false otherwise. Returning false will prevent client + objects from attaching to the controller, since a property that + a client relies upon may be missing. */ + + virtual bool publishProperties(); + +/*! @function getCommandClient + @abstract Get the command client object. + @discussion Methods called on the workloop context to service a + client request can call this method to get the client object which + initiated the command. + @result The command client. If the caller is not running on the + workloop thread, or if the thread does not have the workloop's gate + closed, then 0 is returned. */ + + virtual OSObject * getCommandClient() const; + +/*! @function handleOpen + @abstract Handle a client open. + @discussion Handle a client open on the controller object. IOService + calls this method with the arbitration lock held. Subclasses + should not override this method. + @param client The client that is attempting to open the controller. + @param options Not used. See IOService. + @param argument Not used. See IOService. + @result true to accept the client open, false to refuse it. */ + + virtual bool handleOpen(IOService * client, + IOOptionBits options, + void * argument); + +/*! @function handleClose + @abstract Handle a client close. + @discussion Handle a close from one of the client objects. IOService + calls this method with the arbitration lock held. Subclasses + should not override this method. + @param client The client that is closing the controller. + @param options Not used. See IOService. */ + + virtual void handleClose(IOService * client, IOOptionBits options); + +/*! @function handleIsOpen + @abstract Query whether a client has an open on the controller. + @discussion This method is always called by IOService with the + arbitration lock held. Subclasses should not override this method. + @result true if the specified client, or any client if none (0) is + specified, presently has an open on this object. */ + + virtual bool handleIsOpen(const IOService * client) const; + +/*! @function enable + @abstract A request from an interface client to enable the controller. + @discussion Called by an interface client to enable the controller. + Upon receiving this command, the controller driver must bring up the + hardware and become ready to transmit and receive packets. A driver + should also delay the allocation of most runtime resources until this + method is called in order to conserve system resources. This method call + is synchronized by the workloop's gate. + @param interface The interface client object that requested the enable. + @result kIOReturnUnsupported. Drivers that override this method must + return kIOReturnSuccess on success, or an error code otherwise. */ + + virtual IOReturn enable(IONetworkInterface * interface); + +/*! @function disable + @abstract A request from an interface client to disable the controller. + @discussion Called by an interface client to disable the controller. + This method should stop the hardware and disable hardware interrupt + sources. Any resources allocated by enable() should also be deallocated. + This method call is synchronized by the workloop's gate. + @param interface The interface object that requested the disable. + @result kIOReturnUnsupported. Drivers that override this method must + return kIOReturnSuccess on success, or an error code otherwise. */ + + virtual IOReturn disable(IONetworkInterface * interface); + +/*! @function attachInterface + @abstract Attach a new interface client object. + @discussion Create a new interface object and attach it to the + controller. The createInterface() method is called to perform + the interface allocation and initialization, followed by a call to + configureInterface() to configure it. Subclasses can override those + two methods to customize the interface client attached. Drivers will + usually call this method from start(), after they are ready to process + client requests. Since most drivers will have a single interface + client, this method will likely be called only once. + @param interface Upon success (return value is true), the + interface object will be written to the handle provided. + @param doRegister If true, then registerService() is called to register + the interface, which will trigger the matching process, and will ultimately + cause the interface to become registered with the data link layer. + Drivers that wish to delay the registration can set doRegister to false, + and call registerService() on the interface object when the controller + becomes ready. This allows the driver to attach an interface without + making its services available to the rest of the system. + @result true on success, false otherwise. */ + + virtual bool attachInterface(IONetworkInterface ** interface, + bool doRegister = true); + +/*! @function detachInterface + @abstract Detach an interface client object. + @discussion This method will verify that the object provided is indeed + an IONetworkInterface instance, and then call its terminate() method. + Note that an interface object will close and detach from its + controller after the data link layer has removed all references to + all data structures exposed by the interface. The interface object + should be released following this call. + @param interface An interface object to be detached and terminated. + @param sync If true, the interface is terminated synchronously. + This may cause this method to block for an indeterminate + amount of time. */ + + virtual void detachInterface(IONetworkInterface * interface, + bool sync = false); + +/*! @function createInterface + @abstract Create a new network interface object. + @discussion This method is called by attachInterface() to perform + allocation and initialization of a new interface object. A subclass of + IONetworkController must implement this method and return a matching + interface object. For example, IOEthernetController's implementation + will return an IOEthernetInterface object when createInterface() is + called. + @result A newly allocated and initialized interface object. */ + + virtual IONetworkInterface * createInterface() = 0; + +/*! @function configureInterface + @abstract Configure a newly created network interface object. + @discussion Configure an interface object that was created by + createInterface(). Subclasses can override this method to customize, + and examine the interface object that will be attached to the + controller as a client. + @param interface The interface object to be configured. + @result true if the operation was successful, false otherwise + (this will cause attachInterface() to fail and return 0). */ + + virtual bool configureInterface(IONetworkInterface * interface); + +/*! @function createOutputQueue + @abstract Create an IOOutputQueue to handle output packet queueing, + and also to resolve contention for the controller's transmitter from + multiple client threads. + @discussion Called by start() to create an IOOutputQueue object to + handle output packet queueing. The default implementation will always + return 0, hence no output queue will be created. A driver may override + this method and return a subclass of IOOutputQueue. IONetworkController + will keep a reference to the queue created, and will release this + object when IONetworkController is freed. Also see getOutputQueue(). + @result A newly allocated and initialized IOOutputQueue object. */ + + virtual IOOutputQueue * createOutputQueue(); + +/*! @function enable + @abstract An enable request from an IOKernelDebugger client. + @discussion Drivers that provide debugging support may either override + this method and setup the hardware to support the polled-mode send and + receive methods; receivePacket() and sendPacket(). Or override the base + enable() and disable() methods that take an IOService argument. + @param debugger The IOKernelDebugger client requesting the enable. + @result kIOReturnSuccess. Drivers must return kIOReturnSuccess + on success, or an error otherwise. */ + + virtual IOReturn enable(IOKernelDebugger * debugger); + +/*! @function disable + @abstract A disable request from an IOKernelDebugger client. + @discussion Drivers that provide debugging support may either override + this method to disable support for the polled-mode send and receive + methods. Or override the base enable() and disable() methods that + take an IOService argument. + @param debugger The IOKernelDebugger client requesting the disable. + @result kIOReturnSuccess. Drivers must return kIOReturnSuccess + on success, or an error otherwise. */ + + virtual IOReturn disable(IOKernelDebugger * debugger); + +/*! @function attachDebuggerClient + @abstract Attach a new IOKernelDebugger client object. + @discussion Allocate an IOKernelDebugger object and attach it as + a client. Having a debugger client implies that the controller + supports kernel debugging, and therefore must implement the two + polled-mode methods that are called by the debugger client. See + sendPacket() and receivePacket(). Only a single debugger client + should be attached to each controller. + @param debuggerP A handle that will return the new + IOKernelDebugger object created. + @result true on success, false otherwise. */ + + virtual bool attachDebuggerClient(IOKernelDebugger ** debuggerP); + +/*! @function detachDebuggerClient + @abstract Detach an IOKernelDebugger client object. + @discussion Detach and terminate the IOKernelDebugger client object + provided. A synchronous termination is issued, and this method will + return after the debugger client has been terminated. The debugger + client should be released following this call. + @param debugger The IOKernelDebugger object to be detached and + terminated. If the argument provided is NULL or is not an + IOKernelDebugger, this method will return immediately. */ + + virtual void detachDebuggerClient(IOKernelDebugger * debugger); + +/*! @function reserveDebuggerLock + @abstract Take the global debugger lock. + @discussion This method should not be used. Instead, call the + lock() method provided by IOKernelDebugger. */ + + void reserveDebuggerLock(); + +/*! @function releaseDebuggerLock + @abstract Release the global debugger lock. + @discussion This method should not be used. Instead, call the + unlock() method provided by IOKernelDebugger. */ + + void releaseDebuggerLock(); + +/*! @function receivePacket + @abstract Debugger polled-mode receive handler. + @discussion This method must be implemented by a driver that supports + kernel debugging. After a debugger client has been attached through + attachDebuggerClient(), this method will be called by the debugger + client to poll for a incoming packet when the kernel debugger is active. + This method may be called from the primary interrupt context, and the + implementation must avoid any memory allocation, and must never block. + The receivePacket() method in IONetworkController is used as a placeholder, + it performs no useful action, and should not be called. A driver that + attaches a debugger client must override this method. + @param pkt Address of a receive buffer where the received packet should + be stored. This buffer has room for 1518 bytes. + @param pktSize Address where the number of bytes received must be + recorded. Set this to zero if no packets were received during + the timeout interval. + @param timeout The maximum amount of time in milliseconds to poll for + a packet to arrive before this method must return. */ + + virtual void receivePacket(void * pkt, UInt32 * pktSize, UInt32 timeout); + +/*! @function sendPacket + @abstract Debugger polled-mode transmit handler. + @discussion This method must be implemented by a driver that supports + kernel debugging. After a debugger client has been attached through + attachDebuggerClient(), this method will be called by the debugger + to send an outbound packet only when the kernel debugger is active. + This method may be called from the primary interrupt context, and the + implementation must avoid any memory allocation, and must never block. + The sendPacket() method in IONetworkController is used as a placeholder, + it performs no useful action, and should not be called. A driver that + attaches a debugger client must override this method. + @param pkt Pointer to a transmit buffer containing the packet to be + sent on the network. + @param pktSize The size of the transmit buffer in bytes. */ + + virtual void sendPacket(void * pkt, UInt32 pktSize); + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IONetworkController, 0); + OSMetaClassDeclareReservedUnused( IONetworkController, 1); + OSMetaClassDeclareReservedUnused( IONetworkController, 2); + OSMetaClassDeclareReservedUnused( IONetworkController, 3); + OSMetaClassDeclareReservedUnused( IONetworkController, 4); + OSMetaClassDeclareReservedUnused( IONetworkController, 5); + OSMetaClassDeclareReservedUnused( IONetworkController, 6); + OSMetaClassDeclareReservedUnused( IONetworkController, 7); + OSMetaClassDeclareReservedUnused( IONetworkController, 8); + OSMetaClassDeclareReservedUnused( IONetworkController, 9); + OSMetaClassDeclareReservedUnused( IONetworkController, 10); + OSMetaClassDeclareReservedUnused( IONetworkController, 11); + OSMetaClassDeclareReservedUnused( IONetworkController, 12); + OSMetaClassDeclareReservedUnused( IONetworkController, 13); + OSMetaClassDeclareReservedUnused( IONetworkController, 14); + OSMetaClassDeclareReservedUnused( IONetworkController, 15); + OSMetaClassDeclareReservedUnused( IONetworkController, 16); + OSMetaClassDeclareReservedUnused( IONetworkController, 17); + OSMetaClassDeclareReservedUnused( IONetworkController, 18); + OSMetaClassDeclareReservedUnused( IONetworkController, 19); + OSMetaClassDeclareReservedUnused( IONetworkController, 20); + OSMetaClassDeclareReservedUnused( IONetworkController, 21); + OSMetaClassDeclareReservedUnused( IONetworkController, 22); + OSMetaClassDeclareReservedUnused( IONetworkController, 23); + OSMetaClassDeclareReservedUnused( IONetworkController, 24); + OSMetaClassDeclareReservedUnused( IONetworkController, 25); + OSMetaClassDeclareReservedUnused( IONetworkController, 26); + OSMetaClassDeclareReservedUnused( IONetworkController, 27); + OSMetaClassDeclareReservedUnused( IONetworkController, 28); + OSMetaClassDeclareReservedUnused( IONetworkController, 29); + OSMetaClassDeclareReservedUnused( IONetworkController, 30); + OSMetaClassDeclareReservedUnused( IONetworkController, 31); +}; + +inline const IONetworkMedium * IONetworkController::getCurrentMedium() const +{ + return getSelectedMedium(); +} + +inline bool IONetworkController::setCurrentMedium(const IONetworkMedium * medium) +{ + return setSelectedMedium(medium); +} + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IONETWORKCONTROLLER_H */ + diff --git a/iokit/IOKit/network/IONetworkData.h b/iokit/IOKit/network/IONetworkData.h new file mode 100644 index 000000000..1c08caec0 --- /dev/null +++ b/iokit/IOKit/network/IONetworkData.h @@ -0,0 +1,462 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * IONetworkData.h + * + * HISTORY + * 21-Apr-1999 Joe Liu (jliu) created. + * + */ + +#ifndef _IONETWORKDATA_H +#define _IONETWORKDATA_H + +#define IONetworkParameter IONetworkData // FIXME + +/*! @enum An enumeration of constants that describe access types. + @constant kIONetworkDataAccessTypeRead Read access. + @constant kIONetworkDataAccessTypeWrite Write access. + @constant kIONetworkDataAccessTypeReset Reset access. + @constant kIONetworkDataAccessTypeSerialize Serialization access. */ + +enum { + kIONetworkDataAccessTypeRead = 0x01, + kIONetworkDataAccessTypeWrite = 0x02, + kIONetworkDataAccessTypeReset = 0x04, + kIONetworkDataAccessTypeSerialize = 0x08, + kIONetworkDataAccessTypeMask = 0xff, +}; + +/*! @define kIONetworkDataBasicAccessTypes + @discussion The default access types supported by an IONetworkData + object. Allow read() and serialize(). */ + +#define kIONetworkDataBasicAccessTypes \ + (kIONetworkDataAccessTypeRead | kIONetworkDataAccessTypeSerialize) + +/*! @enum An enumeration of the type of data buffers that can be + managed by an IONetworkData object. + @constant kIONetworkDataBufferTypeInternal An internal data buffer + allocated by the init() method. + @constant kIONetworkDataBufferTypeExternal An external (persistent) data + buffer. + @constant kIONetworkDataBufferTypeNone No data buffer. The only useful + action perfomed by an IONetworkData object with this buffer type + is to call the access notification handler. */ + +enum { + kIONetworkDataBufferTypeInternal = 0, + kIONetworkDataBufferTypeExternal, + kIONetworkDataBufferTypeNone, +}; + +/*! @defined kIONetworkDataBytes + @abstract kIONetworkDataBytes is a property of IONetworkData objects. + It has an OSData value. + @discussion The kIONetworkDataBytes property is an OSData that describes + the data buffer of an IONetworkData object. This property is present + only if kIONetworkDataAccessTypeSerialize access is supported. */ + +#define kIONetworkDataBytes "Data" + +/*! @defined kIONetworkDataAccessTypes + @abstract kIONetworkDataAccessTypes is a property of IONetworkData + objects. It has an OSNumber value. + @discussion The kIONetworkDataAccessTypes property is an OSNumber that + describes the supported access types of an IONetworkData object. */ + +#define kIONetworkDataAccessTypes "Access Types" + +/*! @defined kIONetworkDataSize + @abstract kIONetworkDataSize is a property of IONetworkData + objects. It has an OSNumber value. + @discussion The kIONetworkDataSize property is an OSNumber that + describes the size of the data buffer of an IONetworkData object. */ + +#define kIONetworkDataSize "Size" + +#ifdef KERNEL + +#include +#include + +/*! @class IONetworkData : public OSObject + An IONetworkData object manages a fixed-size named buffer. + This object provides external access methods that can be used to + access the contents of the data buffer. In addition, serialization + is supported, and therefore this object can be added to a property + table to publish the data object. An unique name must be assigned to + the object during initialization. An OSSymbol key will be created + based on the assigned name, and this key can be used when the object + is added to a dictionary. + + The level of access granted to the access methods can be restricted, + by specifying a set of supported access types when the object is + initialized, or modified later by calling setAccessTypes(). By default, + each IONetworkData object created will support serialization, and will + also allow its data buffer to be read through the read() access method. + + An access notification handler, in the form of a 'C' function, can + be registered to receive a call each time the data buffer is accessed + through an access method. Arguments provided to the handler will identify + the data object and the type of access that triggered the notification. + The handler can therefore perform lazy update of the data buffer until + an interested party tries to read or serialize the data. The notification + handler can also take over the default action performed by the access + methods when the buffer type is set to kIONetworkDataBufferTypeNone. + This will prevent the access methods from accessing the data buffer, + and allow the handler to override the access protocol. + + This object is primarily used by IONetworkInterface to export interface + properties to user space. */ + + +class IONetworkData : public OSObject +{ + OSDeclareDefaultStructors( IONetworkData ) + +public: + +/*! @typedef Action + Defines a C function that may be called by an IONetworkData object + when one of its access methods is called. + @param target The target of the notification. + @param param A parameter that was provided when the notification + handler was registered. + @param data The IONetworkData object being accessed, and the + sender of the notification. + @param accessType A bit will be set indicating the type of access + which triggered the notification. + @param buffer Pointer to the accessor's buffer. Only valid for + read() and write() accesses. + @param bufferSize Pointer to the size of the accessor's buffer. + @param offset An offset from the start of the data buffer to begin + reading or writing. */ + + typedef IOReturn (*Action)(void * target, + void * param, + IONetworkData * data, + UInt32 accessType, + void * buffer, + UInt32 * bufferSize, + UInt32 offset); + +protected: + const OSSymbol * _key; // key associated with this object. + UInt32 _access; // supported access types. + void * _buffer; // Data buffer. + UInt32 _bufType; // buffer type + UInt32 _size; // data buffer size. + void * _tapTarget; // target for access notification. + Action _tapAction; // the function to call. + void * _tapParam; // arbitrary notification param. + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * _reserved; + + +/*! @function free + @abstract Free the IONetworkData object. */ + + virtual void free(); + +/*! @function writeBytes + @abstract Write to the data buffer with data from a source buffer + provided by the caller. + @param srcBuffer Pointer to a source buffer provided by the caller. + @param srcBufferSize The size of the source buffer. + @param writeOffset A byte offset from the start of the data buffer + to begin writting. + @result true if the operation was successful, false otherwise. */ + + virtual bool writeBytes(const void * srcBuffer, + UInt32 srcBufferSize, + UInt32 writeOffset = 0); + +/*! @function readBytes + @abstract Read from the data buffer and copy the data to a destination + buffer provided by the caller. + @param dstBuffer Pointer to the destination buffer. + @param dstBufferSize Pointer to an integer containing the size of the + destination buffer. And is overwritten by this method with the actual + number of bytes copied to the destination buffer. + @param readOffset A byte offset from the start of the data buffer + to begin reading. + @result true if the operation was successful, false otherwise. */ + + virtual bool readBytes(void * dstBuffer, + UInt32 * dstBufferSize, + UInt32 readOffset = 0) const; + +/*! @function clearBuffer + @abstract Clear the data buffer by filling it with zeroes. + @result true if the operation was successful, false otherwise. */ + + virtual bool clearBuffer(); + +public: + +/*! @function initialize + @abstract IONetworkData class initializer. */ + + static void initialize(); + +/*! @function withInternalBuffer + @abstract Factory method that will construct and initialize an + IONetworkData object with an internal data buffer. + @param name A name to assign to this object. + @param bufferSize The number of bytes to allocate for the internal data + buffer. + @param accessTypes The initial supported access types. + @param target The notification target. + @param action The notification action. + @param param A parameter to pass to the notification action. + @result An IONetworkData object on success, or 0 otherwise. */ + + static IONetworkData * + withInternalBuffer(const char * name, + UInt32 bufferSize, + UInt32 accessTypes = + kIONetworkDataBasicAccessTypes, + void * target = 0, + Action action = 0, + void * param = 0); + +/*! @function withExternalBuffer + @abstract Factory method that will construct and initialize an + IONetworkData object with an external data buffer. + @param name A name to assign to this object. + @param bufferSize The size of the external data buffer. + @param externalBuffer Pointer to the external data buffer. + @param accessTypes The initial supported access types. + @param target The notification target. + @param action The notification action. + @param param A parameter to pass to the notification action. + @result An IONetworkData object on success, or 0 otherwise. */ + + static IONetworkData * + withExternalBuffer(const char * name, + UInt32 bufferSize, + void * externalBuffer, + UInt32 accessTypes = + kIONetworkDataBasicAccessTypes, + void * target = 0, + Action action = 0, + void * param = 0); + +/*! @function withNoBuffer + @abstract Factory method that will construct and initialize an + IONetworkData object without a data buffer. The notification handler + must intervene when the IONetworkData is accessed. + @param name A name to assign to this object. + @param bufferSize The size of the phantom data buffer. + @param accessTypes The initial supported access types. + @param target The notification target. + @param action The notification action. + @param param A parameter to pass to the notification action. + @result An IONetworkData object on success, or 0 otherwise. */ + + static IONetworkData * withNoBuffer(const char * name, + UInt32 bufferSize, + UInt32 accessTypes, + void * target, + Action action, + void * param = 0); + +/*! @function init + @abstract Initialize an IONetworkData object. + @param name A name to assign to this object. + @param bufferType The type of buffer associated with this object. + @param bufferSize The size of the data buffer. + @param externalBuffer Pointer to an external data buffer. + @param accessTypes The initial supported access types. + Can be later modified by calling setAccessTypes(). + @param target The notification target. + @param action The notification action. + @param param A parameter to pass to the notification action. + @result true if initialized successfully, false otherwise. */ + + virtual bool init(const char * name, + UInt32 bufferType, + UInt32 bufferSize, + void * externalBuffer = 0, + UInt32 accessTypes = + kIONetworkDataBasicAccessTypes, + void * target = 0, + Action action = 0, + void * param = 0); + +/*! @function setAccessTypes + @abstract Set the types of access that are permitted on the data buffer. + @param types A mask of access types indicating the supported access + types. */ + + virtual void setAccessTypes(UInt32 types); + +/*! @function setNotificationTarget + @abstract Register a C function to handle access notifications sent + from this object. + @discussion A notification is sent by an IONetworkData object to the + registered notification handler, when an access method is called to + modify the contents of the data buffer. + @param target The first parameter passed to the notification handler. + @param action A pointer to a C function that will handle the notification. + If 0, then notification is disabled. + @param param An optional parameter passed to the notification handler. */ + + virtual void setNotificationTarget(void * target, + Action action, + void * param = 0); + +/*! @function getBuffer + @abstract Get a pointer to the data buffer. + @result A pointer to the data buffer. Returns 0 if the buffer type is + kIONetworkDataBufferTypeNone. */ + + virtual const void * getBuffer() const; + +/*! @function getBufferType + @abstract Get the type of data buffer managed by this object. + @result A constant that describes the type of the data buffer. */ + + virtual UInt32 getBufferType() const; + +/*! @function getAccessTypes + @abstract Get the types of data access supported by this object. + @result A mask of supported access types. */ + + virtual UInt32 getAccessTypes() const; + +/*! @function getNotificationTarget + @abstract Get the first parameter that will be passed to the access + notification handler. + @result The first parameter that will be passed to the access notification + handler. */ + + virtual void * getNotificationTarget() const; + +/*! @function getNotificationAction + @abstract Get the C function that was registered to handle access + notifications sent from this object. + @result A pointer to a C function, or 0 if notification is disabled. */ + + virtual Action getNotificationAction() const; + +/*! @function getNotificationParameter + @abstract Get the parameter that will be passed to the access + notification handler. + @result The parameter that will be passed to the access notification + handler. */ + + virtual void * getNotificationParameter() const; + +/*! @function getKey + @abstract Get an unique OSSymbol key associated with this object. + @discussion During initialization, IONetworkData will create an + OSSymbol key based on its assigned name. + @result An OSSymbol key that was generated from the name assigned to + this object. */ + + virtual const OSSymbol * getKey() const; + +/*! @function getSize + @abstract Get the size of the data buffer. + @result The size of the data buffer managed by this object in bytes. */ + + virtual UInt32 getSize() const; + +/*! @function reset + @abstract An access method to reset the data buffer. + @discussion Handle an external request to reset the data buffer. + If notication is enabled, then the notification handler is called + after the data buffer has been cleared. + @result kIOReturnSuccess on success, + kIOReturnNotWritable if reset access is not permitted, + or an error from the notification handler. */ + + virtual IOReturn reset(); + +/*! @function read + @abstract An access method to read from the data buffer. + @discussion Handle an external request to read from the data buffer + and copy it to the destination buffer provided by the accessor. + If notification is enabled, then the notification handler is called + before the data buffer is copied to the destination buffer. The + notification handler may use this opportunity to intervene and + to update the contents of the data buffer. + @param dstBuffer Pointer to the destination buffer. + @param dstBufferSize Pointer to an integer containing the size of the + destination buffer. And is overwritten by this method to the actual number + of bytes copied to the destination buffer. + @param readOffset An offset from the start of the source data buffer to + begin reading. + @result kIOReturnSuccess on success, + kIOReturnBadArgument if any of the arguments provided is invalid, + kIOReturnNotReadable if read access is not permitted, + or an error from the notification handler. */ + + virtual IOReturn read(void * dstBuffer, + UInt32 * dstBufferSize, + UInt32 readOffset = 0); + +/*! @function write + @abstract An access method to write to the data buffer. + @discussion Handle an external request to write to the data buffer + from a source buffer provided by the accessor. After checking that + the data object supports write accesses, the data buffer is updated + if it exists. Then the registered notification handler is called. + @param srcBuffer Pointer to the source buffer. + @param srcBufferSize The number of bytes to write to the data buffer. + @param writeOffset An offset from the start of the destination data buffer + to begin writing. + @result kIOReturnSuccess on success, + kIOReturnBadArgument if any of the arguments provided is invalid, + kIOReturnNotWritable if write access is not permitted, + or an error from the notification handler. */ + + virtual IOReturn write(void * srcBuffer, + UInt32 srcBufferSize, + UInt32 writeOffset = 0); + +/*! @function serialize + @abstract Serialize the IONetworkData object. + @discussion If notification is enabled, then the notification + handler is called just before the data buffer is serialized. + @param s An OSSerialize object. + @result true on success, false otherwise. */ + + virtual bool serialize(OSSerialize * s) const; + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IONetworkData, 0); + OSMetaClassDeclareReservedUnused( IONetworkData, 1); + OSMetaClassDeclareReservedUnused( IONetworkData, 2); + OSMetaClassDeclareReservedUnused( IONetworkData, 3); +}; + +#endif /* KERNEL */ + +#endif /* !_IONETWORKDATA_H */ diff --git a/iokit/IOKit/network/IONetworkInterface.h b/iokit/IOKit/network/IONetworkInterface.h new file mode 100644 index 000000000..85f16fb45 --- /dev/null +++ b/iokit/IOKit/network/IONetworkInterface.h @@ -0,0 +1,933 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkInterface.h + * + * HISTORY + * 8-Jan-1999 Joe Liu (jliu) created. + */ + +#ifndef _IONETWORKINTERFACE_H +#define _IONETWORKINTERFACE_H + +/*! @defined kIONetworkInterfaceClass + @abstract kIONetworkInterfaceClass is the name of the + IONetworkInterface class. */ + +#define kIONetworkInterfaceClass "IONetworkInterface" + +/*! @defined kIONetworkData + @abstract kIONetworkData is a property of IONetworkInterface + objects. It has an OSDictionary value. + @discussion The kIONetworkData property is a container for the + set of IONetworkData objects managed by the interface. + Each entry in the dictionary is a key/value pair consisting of + the network data name, and an OSDictionary describing the + contents of the network data. */ + +#define kIONetworkData "IONetworkData" + +/*! @defined kIOInterfaceType + @abstract kIOInterfaceType is a property of IONetworkInterface objects. + It has an OSNumber value. + @discussion The kIOInterfaceType property specifies the type of + network interface that this interface represents. The type + constants are defined in bsd/net/if_types.h. */ + +#define kIOInterfaceType "IOInterfaceType" + +/*! @defined kIOMaxTransferUnit + @abstract kIOMaxTransferUnit is a property of IONetworkInterface objects. + It has an OSNumber value. + @discussion The kIOMaxTransferUnit property specifies the maximum + transfer unit for the interface in bytes. */ + +#define kIOMaxTransferUnit "IOMaxTransferUnit" + +/*! @defined kIOMediaAddressLength + @abstract kIOMediaAddressLength is a property of IONetworkInterface objects. + It has an OSNumber value. + @discussion The kIOMediaAddressLength property specifies the size of the + media address in bytes. */ + +#define kIOMediaAddressLength "IOMediaAddressLength" + +/*! @defined kIOMediaHeaderLength + @abstract kIOMediaHeaderLength is a property of IONetworkInterface objects. + It has an OSNumber value. + @discussion The kIOMediaHeaderLength property specifies the size of the + media header in bytes. */ + +#define kIOMediaHeaderLength "IOMediaHeaderLength" + +/*! @defined kIOInterfaceFlags + @abstract kIOInterfaceFlags is a property of IONetworkInterface objects. + It has an OSNumber value. + @discussion The kIOInterfaceFlags property specifies the current value + of the interface flags. The flag constants are defined in + bsd/net/if.h. */ + +#define kIOInterfaceFlags "IOInterfaceFlags" + +/*! @defined kIOInterfaceExtraFlags + @abstract kIOInterfaceExtraFlags is a property of IONetworkInterface + objects. It has an OSNumber value. + @discussion The kIOInterfaceExtraFlags property specifies the current + value of the interface extra flags. The extra flag constants are + defined in bsd/net/if.h. */ + +#define kIOInterfaceExtraFlags "IOInterfaceExtraFlags" + +/*! @defined kIOInterfaceUnit + @abstract kIOInterfaceUnit is a property of IONetworkInterface + objects. It has an OSNumber value. + @discussion The kIOInterfaceUnit property describes the unit number + assigned to the interface object. */ + +#define kIOInterfaceUnit "IOInterfaceUnit" + +/*! @defined kIOInterfaceState + @abstract kIOInterfaceState is a property of IONetworkInterface + objects. It has an OSNumber value. + @discussion The kIOInterfaceState property describes the current state + of the interface object. This property is not exported to BSD via + the ifnet structure. */ + +#define kIOInterfaceState "IOInterfaceState" + +/*! @defined kIOInterfaceNamePrefix + @abstract kIOInterfaceNamePrefix is a property of IONetworkInterface + objects. It has an OSString value. + @discussion The kIOInterfaceNamePrefix property describes the string + prefix for the BSD name assigned to the interface. */ + +#define kIOInterfaceNamePrefix "IOInterfaceNamePrefix" + +/*! @defined kIOPrimaryInterface + @abstract kIOPrimaryInterface is a property of IONetworkInterface + objects. It has an OSBoolean value. + @discussion The kIOInterfaceNamePrefix property describes whether the + interface is the primary or the built-in network interface. */ + +#define kIOPrimaryInterface "IOPrimaryInterface" + +/*! @enum Interface state flags. + @discussion An enumeration of the constants that are used to encode the + state of the interface object. + @constant kIONetworkInterfaceRegisteredState The interface object has + registered with the data link layer. + @constant kIONetworkInterfaceOpenedState One or more clients have an + open on the interface object. + @constant kIONetworkInterfaceDisabledState The interface is temporarily + unable to service its clients. This will occur when the network + controller that is servicing the interface has entered a low power + state that renders it unusable. */ + +enum { + kIONetworkInterfaceRegisteredState = 0x1, + kIONetworkInterfaceOpenedState = 0x2, + kIONetworkInterfaceDisabledState = 0x4 +}; + +/* + * Kernel + */ +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include +#include +#include + +struct mbuf; // forward declarations. +struct ifnet; +class IONetworkController; +class IONetworkStack; +class IOCommandGate; + +/*! @typedef IOOutputAction + @discussion Prototype for an output packet handler that will process + all outbound packets sent to the interface from the data link layer. + An output handler is registered with the interface by calling + registerOutputHandler(). + @param m A packet mbuf. + @param param A parameter for the output request. */ + +typedef UInt32 (OSObject::*IOOutputAction)(struct mbuf * m, void * param); + +/*! @typedef BPF_FUNC + @discussion Prototype for the BPF tap handler. This will disappear + when the correct DLIL header file is included. */ + +typedef int (*BPF_FUNC)(struct ifnet *, struct mbuf *); + +// Network event types recognized by inputEvent(). +// +enum { + /* DLIL defined event, argument must be a pointer to a + kern_event_msg structure. */ + kIONetworkEventTypeDLIL = 0xff000001, + + /* Link up event, no argument */ + kIONetworkEventTypeLinkUp = 0xff000002, + + /* Link down event, no argument */ + kIONetworkEventTypeLinkDown = 0xff000003 +}; + +/*! @class IONetworkInterface : public IOService + @abstract An IONetworkInterface object manages the connection between + an IONetworkController and the data link interface layer (DLIL). + All interactions between the controller and DLIL must go through an + interface object. Any data structures that are required by DLIL for a + particular interface type shall be allocated and mantained by the + interface object. IONetworkInterface is an abstract class that must be + extended by a concrete subclass to specialize for a particular network + type. + + Although most drivers will allocate a single interface object. + It is possible for multiple interfaces to be attached to a single + controller. This controller driver will be responsible for arbitrating + access among its multiple interface clients. + + IONetworkInterface also maintains a dictionary of IONetworkData + objects containing statistics structures. Controller drivers can + ask for a particular data object by name and update the + statistics counters within directly. This dictionary is added to + the interface's property table and is visible outside of the kernel. */ + +class IONetworkInterface : public IOService +{ + OSDeclareAbstractStructors( IONetworkInterface ) + + friend class IONetworkStack; + +private: + IONetworkController * _controller; + struct ifnet * _ifp; + IORecursiveLock * _ifLock; + OSSet * _clientSet; + OSNumber * _stateBits; + BPF_FUNC _inputFilterFunc; + BPF_FUNC _outputFilterFunc; + OSObject * _outTarget; + IOOutputAction _outAction; + UInt32 _clientVar[4]; + OSDictionary * _dataDict; + struct mbuf * _inputQHead; + struct mbuf * _inputQTail; + UInt32 _inputQCount; + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData * _reserved; + + bool _syncNetworkDataDict(); + bool _setInterfaceProperty(UInt32 value, + UInt32 mask, + UInt32 bytes, + void * addr, + char * name); + + SInt32 syncSIOCSIFMEDIA(IONetworkController * ctlr, struct ifreq * ifr); + SInt32 syncSIOCGIFMEDIA(IONetworkController * ctlr, struct ifreq * ifr); + SInt32 syncSIOCSIFMTU(IONetworkController * ctlr, struct ifreq * ifr); + + static int performGatedCommand(void *, void *, void *, void *, void *); + static int ioctl_shim(struct ifnet * ifp, u_long cmd, caddr_t data); + static int set_bpf_tap_shim(struct ifnet * ifp, int mode, BPF_FUNC func); + static int free_shim(struct ifnet * ifp); + static int output_shim(struct ifnet * ifp, struct mbuf *m); + static void null_shim(struct ifnet * ifp); + + static IOReturn sControllerWillChangePowerState( IONetworkInterface *, + void *, void *, + void *, void *); + + static IOReturn sControllerDidChangePowerState( IONetworkInterface *, + void *, void *, + void *, void *); + +public: + +/*! @function isPrimaryInterface + @abstract Query whether the interface object provided represents + the "primary" network interface for the system. + @result true if the interface provided is the primary inteface, + false otherwise. */ + + virtual bool isPrimaryInterface() const; + +/*! @function init + @abstract Initialize an IONetworkInterface object. + @discussion Initialize instance variables, and allocate resources. + Call getIfnet() to get the ifnet structure allocated by a concrete + subclass, then call initIfnet() to initialize this ifnet structure. + @param controller A network controller object that will service + the interface object being initialized. + @result true on success, false otherwise. */ + + virtual bool init( IONetworkController * controller ); + +/*! @function isRegistered + @abstract Returns true if the interface has been registered with + the data link layer. + @discussion Once registered, the interface will be assigned a + BSD name (such as en0), and a kIOBSDNameKey property is added to the + property table containing this name. Calling this method performs + the same function as checking for the kIONetworkInterfaceRegisteredState + bit in the value returned by getInterfaceState(). + @result True if interface is registered. False if the data link layer + has no references to this network interface, which implies that either the + interface has yet to attach to the data link layer, or the interface has + been detached. */ + + virtual bool isRegistered() const; + +/*! @function getInterfaceState + @abstract Report the current state of the interface object by returning + the interface state flags. + @result Returns the interface state flags. */ + + virtual UInt32 getInterfaceState() const; + +/*! @function matchPropertyTable + @abstract Override the implementation in IOService in order to + implement family specific matching. + @discussion When the gIOLocationMatchKey property is present in the + dictionary provided, then fail the match unless the kIOBSDNameKey property + is found. This is to prevent a premature match when hunting for a root + device for BSD. The presence of the kIOBSDNameKey property indicates that + the interface has registered with BSD, and is a valid candidate for + matching against the gIOLocationMatchKey property. If the + gIOLocationMatchKey property is absent, then this method will always + return true. + @param table The dictionary of properties to match against. + @param score Pointer to the current driver's probe score, not used. + @result Returns true for a positive match, false otherwise. */ + + virtual bool matchPropertyTable( OSDictionary * table, + SInt32 * score ); + +/*! @function getController + @abstract Return the provider, an IONetworkController object, that + is servicing this interface object. + @discussion This is the same controller object that was supplied as + an argument to the init() method. + @result The IONetworkController object that is providing service to + this interface object. */ + + virtual IONetworkController * getController() const; + +/*! @function inputPacket + @abstract Called by the network controller to submit a single packet + received from the network to the data link layer. + @discussion The packet received by this method may be added to an input + queue on the interface object, which the controller can use to postpone + the packet handoff to the upper layers, until all received packets have + been transferred to the input queue. A subsequent call to flushInputQueue(), + will transfer the entire contents of the queue to the data link layer, + by making a single call to dlil_input(). Other methods that can be used + to manage the input queue are flushInputQueue() and clearInputQueue(). + This input queue is not protected by a lock. Access to the queue by the + controller must be serialized, otherwise its use must be avoided. + @param m The mbuf containing the received packet. + @param length Specify the size of the received packet in the mbuf. + The mbuf length fields are updated with this value. If zero, + then the mbuf length fields are not updated. + @param options Options defined by inputPacket() that the caller + can use to specify this method call. + @param param A parameter provided by the caller. Not used by + IONetworkInterface. + @result The number of packets that were submitted to the data link layer, + or 0 if the packet was queued. */ + + virtual UInt32 inputPacket(struct mbuf * m, + UInt32 length = 0, + IOOptionBits options = 0, + void * param = 0); + +/*! @enum Options for the inputPacket() method. + @discussion An enumeration of the option bits that can be specified + in the options argument when calling inputPacket(). + @constant kInputOptionQueuePacket Keep the packet provided in the + input packet queue. No packets are sent to the data link layers, + and the caller's thread will not venture outside the interface + object. Calls to inputPacket() must be serialized. */ + + enum { + kInputOptionQueuePacket = 0x1 + }; + +/*! @function flushInputQueue + @abstract Send all packets held in the input queue to the data + link layer. + @discussion Remove all packets from the input queue and + send them to the data link layer by calling dlil_input(). This + method should be used in connection with the inputPacket() method, + to flush the input queue after inputPacket() was used to queue up + some number of received packets. See inputPacket() and clearInputQueue(). + @result The number of packets that were submitted to the data link layer. + May be zero if the queue was empty. */ + + virtual UInt32 flushInputQueue(); + +/*! @function clearInputQueue + @abstract Remove and discard all packets in the input queue. + @discussion Remove all packets from the input queue and + release them back to the free mbuf pool. Also see flushInputQueue(). + @result The number of packets freed. */ + + virtual UInt32 clearInputQueue(); + +/*! @function inputEvent + @abstract Send an event to the data link layer. + @discussion This method can be used by the network controller to + send an event to the data link layer. + @param type A constant describing the event type. + @param data Data associated with the event. + @result true if the event was delivered, false if the event type + specified is invalid, or if the event delivery was unsuccesful. */ + + virtual bool inputEvent(UInt32 type, void * data); + +/*! @function registerOutputHandler + @abstract Register a target/action to handle output packets. + @discussion The interface object will forward all output packets, + received from the data link layer, to the output handler registered + through this method. The default target and action are set by the init() + method to the controller, and the handler returned by the controller's + getOutputHandler() method. Once the interface becomes registered with + the data link layer, this method will return false and will reject any + further changes. + @param target Target object that implements the output handler. + @param action The function that will process output packets. + @result true if the target/action provided was accepted, + false otherwise. */ + + virtual bool registerOutputHandler(OSObject * target, + IOOutputAction action); + +/*! @function getNamePrefix + @abstract Return a string containing the prefix to use when + creating a BSD name for this interface. + @discussion The BSD name for each interface object is generated by + concatenating a string returned by this method, with an unique + unit number assigned by IONetworkStack. + A concrete subclass of IONetworkInterface must implement this method + and enforce a consistent name for all of its instances. + @result A pointer to a constant C string. */ + + virtual const char * getNamePrefix() const = 0; + +/*! @function getInterfaceType + @abstract Get the interface type. + @discussion Return the value in the if_type field in the ifnet structure. + @result A constant defined in bsd/net/if_types.h header file + that describes the interface type. */ + + virtual UInt8 getInterfaceType() const; + +/*! @function getMaxTransferUnit + @abstract Get the maximum transfer unit for this interface. + @discussion Return the value in the if_mtu field in the ifnet structure. + @result The interface MTU size in bytes. */ + + virtual UInt32 getMaxTransferUnit() const; + +/*! @function getFlags + @abstract Get the value of the interface flags. + @discussion Return the value in the if_flags field in the ifnet structure. + @result The value of the interface flags. */ + + virtual UInt16 getFlags() const; + +/*! @function getExtraFlags + @abstract Get the value of the interface extra flags. + @discussion Return the value in the if_eflags field in the ifnet structure. + @result The value of the interface extra flags. */ + + virtual UInt32 getExtraFlags() const; + +/*! @function getMediaAddressLength + @abstract Get the size of the media (MAC-layer) address. + @discussion Return the value in the if_addrlen field in the ifnet structure. + @result The size of the media address in bytes. */ + + virtual UInt8 getMediaAddressLength() const; + +/*! @function getMediaHeaderLength + @abstract Get the size of the media header. + @discussion Return the value in the if_hdrlen field in the ifnet structure. + @result The size of the media header in bytes. */ + + virtual UInt8 getMediaHeaderLength() const; + +/*! @function getUnitNumber + @abstract Get the unit number assigned to this interface object. + @discussion Return the value in the if_unit field in the ifnet structure. + @result The assigned interface unit number. */ + + virtual UInt16 getUnitNumber() const; + +/*! @function addNetworkData + @abstract Add an IONetworkData object to a dictionary managed by + the interface. + @param aData An IONetworkData object to be added to a dictionary + managed by the interface. This object is retained by the dictionary. + @result true if the operation was successful, false otherwise. */ + + virtual bool addNetworkData(IONetworkData * aData); + +/*! @function removeNetworkData + @abstract Remove an entry from the IONetworkData dictionary + managed by the interface. The removed object is released. + @param aKey A unique OSSymbol identifying the IONetworkData object + to be removed from the dictionary. + @result true if the operation was successful, false otherwise. */ + + virtual bool removeNetworkData(const OSSymbol * aKey); + +/*! @function removeNetworkData + @abstract Remove an entry from the IONetworkData dictionary + managed by the interface. The removed object is released. + @param aKey A unique string identifying the IONetworkData object + to be removed from the dictionary. + @result true if the operation was successful, false otherwise. */ + + virtual bool removeNetworkData(const char * aKey); + +/*! @function getNetworkData + @abstract Get an IONetworkData object from the interface that is + associated with the given key. + @param aKey The unique string identifying the IONetworkData object to be + returned to caller. + @result Returns a reference to the matching IONetworkData object, + or 0 if no match was found. */ + + virtual IONetworkData * getNetworkData(const char * aKey) const; + +/*! @function getNetworkData + @abstract Get an IONetworkData object from the interface that is + associated with the given key. + @param aKey The unique OSSymbol identifying the IONetworkData object to be + returned to caller. + @result Returns a reference to the matching IONetworkData object, + or 0 if no match was found. */ + + virtual IONetworkData * getNetworkData(const OSSymbol * aKey) const; + + // FIXME - Compatibility methods (to be removed) + inline IONetworkData * getParameter(const char * aKey) const + { return getNetworkData(aKey); } + + inline bool setExtendedFlags(UInt32 flags, UInt32 clear = 0) + { return true; } + +protected: + +/*! @function setInterfaceType + @abstract Set the interface type. + @discussion Both the if_type field in the ifnet structure, and the + kIOInterfaceType property are updated with the value provided. + @param type A constant defined in bsd/net/if_types.h header file + that describes the interface type. + @result true if the update was successful, false otherwise. */ + + virtual bool setInterfaceType(UInt8 type); + +/*! @function setMaxTransferUnit + @abstract Set the maximum transfer unit for this interface. + @discussion Both the if_mtu field in the ifnet structure, and the + kIOMaxTransferUnit property are updated with the value provided. + @param mtu The interface MTU size in bytes. + @result true if the update was successful, false otherwise. */ + + virtual bool setMaxTransferUnit(UInt32 mtu); + +/*! @function setFlags + @abstract Perform a read-modify-write operation on the current + interface flags value. + @discussion See bsd/net/if.h header file for the definition of the + flag constants. Both the if_flags field in the ifnet structure, and + the kIOInterfaceFlags property are updated with the value provided. + @param flags The bits that should be set. + @param clear The bits that should be cleared. If 0, then non + of the flags are cleared and the result is formed by OR'ing the + original flags value with the new flags. + @result true if the update was successful, false otherwise. */ + + virtual bool setFlags(UInt16 flags, UInt16 clear = 0); + +/*! @function setExtraFlags + @abstract Perform a read-modify-write operation on the current + interface extra flags value. + @discussion See bsd/net/if.h header file for the definition of the + extra flag constants. Both the if_eflags field in the ifnet structure, + and the kIOInterfaceExtraFlags property are updated with the value + provided. + @param flags The bits that should be set. + @param flags The bits that should be set. + @param clear The bits that should be cleared. If 0, then non + of the flags are cleared and the result is formed by OR'ing the + original flags with the new flags. + @result true if the update was successful, false otherwise. */ + + virtual bool setExtraFlags(UInt32 flags, UInt32 clear = 0); + +/*! @function setMediaAddressLength + @abstract Set the size of the media (MAC-layer) address. + @discussion Both the if_addrlen field in the ifnet structure, and the + kIOMediaAddressLength property are updated with the value provided. + @param length The size of the media address in bytes. + @result true if the update was successful, false otherwise. */ + + virtual bool setMediaAddressLength(UInt8 length); + +/*! @function setMediaHeaderLength + @abstract Set the size of the media header. + @discussion Both the if_hdrlen field in the ifnet structure, and the + kIOMediaHeaderLength property are updated with the value provided. + @param length The size of the media header in bytes. + @result true if the update was successful, false otherwise. */ + + virtual bool setMediaHeaderLength(UInt8 length); + +/*! @function setUnitNumber + @abstract Assign an unique unit number to this interface. + @discussion This method is called by IONetworkStack before the + interface is registered with the data link layer, to assign an + unique unit number to the interface object. Both the if_unit field + in the ifnet structure, and the kIOInterfaceUnit property are updated + with the value provided. + @param unit The unit number assigned to this interface object. + @result true if the update was successful, false otherwise. */ + + virtual bool setUnitNumber(UInt16 unit); + +/*! @function free + @abstract Free the IONetworkInterface object. + @discussion Resource allocated by init() are released, and + clearInputQueue() is called to ensure that the input queue is empty. */ + + virtual void free(); + +/*! @function handleOpen + @abstract Handle a client open on the interface. + @discussion This method is called by IOService::open() with the + arbitration lock held, and must return true to accept the client open. + This method will in turn call handleClientOpen() to qualify the client + requesting the open. Since the controller is opened by the interface + in a lazy fashion, the interface may also perform an open on the + controller before this method returns. If the controller was opened, + then controllerDidOpen() is called to notify interested subclasses. + Subclasses should not override this method. + @param client The client object that requested the open. + @param options Options passed to IOService::open(). + @param argument Argument passed to IOService::open(). + @result true to accept the client open, false otherwise. */ + + virtual bool handleOpen(IOService * client, + IOOptionBits options, + void * argument); + +/*! @function handleClose + @abstract Handle a client close on the interface. + @discussion This method is called by IOService::close() with the + arbitration lock held. This method will in turn call handleClientClose() + to notify interested subclasses about the client close. If this represents + the last close, then the interface will also close the controller before + this method returns. The controllerWillClose() method will be called before + closing the controller. Subclasses should not override this method. + @param client The client object that requested the close. + @param options Options passed to IOService::close(). */ + + virtual void handleClose(IOService * client, IOOptionBits options); + +/*! @function handleIsOpen + @abstract Query whether a client has an open on the interface. + @discussion This method is always called by IOService with the + arbitration lock held. Subclasses should not override this method. + @result true if the specified client, or any client if none (0) is + specified, presently has an open on this object. */ + + virtual bool handleIsOpen(const IOService * client) const; + +/*! @function lock + @abstract Take the network interface lock. + @discussion Take the recursive lock that protects the interface + state. All updates to the interface state and to the ifnet structure + must be performed while holding this lock. This call must be balanced + by a subsequent call to unlock(). */ + + virtual void lock(); + +/*! @function unlock + @abstract Release the network interface lock. + @discussion Release the recursive lock that protects the interface + state to balance a previous lock() call. */ + + virtual void unlock(); + +/*! @function controllerDidOpen + @abstract A notification that the interface has opened the network + controller. + @discussion Called by handleOpen() to notify subclasses that the + controller has been opened. The open on the controller is done when + the interface receives the initial open request from a client. + Subclasses can override this method and inspect the controller before + allowing the client open. The implementation in the subclass must first + call the method in super and check the return value. This method is + called with our arbitration lock held, hence issuing I/O to the + controller must be avoided to eliminate the possibility of a + deadlock. + @param controller The controller that was opened. + @result Must return true in order for handleOpen() to accept + the client open. If the return is false, then the controller will be + closed and the client open will be refused. */ + + virtual bool controllerDidOpen(IONetworkController * controller); + +/*! @function controllerWillClose + @abstract A notification that the interface will close the network + controller. + @discussion Called by handleClose() after receiving a close from the + last client, and just before the controller is closed. Subclasses + can override this method to perform any cleanup action before the + controller is closed. This method is called with our arbitration lock + held, hence issuing I/O to the controller must be avoided to eliminate + the possibility of a deadlock. + @param controller The controller that is about to be closed. */ + + virtual void controllerWillClose(IONetworkController * controller); + +/*! @function performCommand + @abstract Handle an ioctl command sent to the network interface. + @discussion This method handles socket ioctl commands sent to the + network interface from DLIL. + IONetworkInterface handles commands that are common for all network + interface types. A subclass of IONetworkInterface may override this + method to override the command handling in IONetworkInterface, or + to extend the command processing to handle additional commands, + and then call super for any commands not handled in the subclass. + The ioctl commands handled by IONetworkInterface are + SIOCGIFMTU (Get interface MTU size), + SIOCSIFMTU (Set interface MTU size), + SIOCSIFMEDIA (Set media), and + SIOCGIFMEDIA (Get media and link status). + @param controller The controller object. + @param cmd The ioctl command code. + @param arg0 Command argument 0. Generally a pointer to an ifnet structure + associated with the interface. + @param arg1 Command argument 1. + @result A BSD return value defined in bsd/sys/errno.h. */ + + virtual SInt32 performCommand(IONetworkController * controller, + UInt32 cmd, + void * arg0, + void * arg1); + +/*! @function getIfnet + @abstract Get the ifnet structure allocated by the interface object. + @discussion Request an interface to reveal its ifnet structure. + A concrete subclass must allocate an ifnet structure when the + object is initialized, and return a pointer to the ifnet structure + when this method is called. + @result Pointer to an ifnet structure allocated by a concrete + interface subclass. */ + + virtual struct ifnet * getIfnet() const = 0; + +/*! @function initIfnet + @abstract Initialize the ifnet structure given. + @discussion A concrete subclass must override this method and initialize + the ifnet structure given. The implementation in the subclass must call + super before it returns, to allow IONetworkInterface to complete the + initialization, and to insert the BSD shim functions implemented in + IONetworkInterface to the appropriate function pointer fields in the + ifnet structure. IONetworkInterface will call this method during its + init() method. Subclasses are encouraged to use the ifnet accessor + methods to update the ifnet structure when possible, since this will + ensure that properties in the registry will also be updated to reflect + any changes made. + @param ifp Pointer to an ifnet structure obtained earlier through + the getIfnet() method call. + @result true on success, false otherwise. */ + + virtual bool initIfnet(struct ifnet * ifp); + +/*! @function handleClientOpen + @abstract Handle a client open on the interface. + @discussion Called by handleOpen() to handle an open from a client object. + Unlike handleOpen(), subclasses may override this method to catch an open + request from a client. This method is called with the arbitration lock held. + @param client The client object requesting the open. + @param options Options passed to IONetworkInterface::handleOpen(). + @param argument Argument passed to IONetworkInterface::handleOpen(). + @result true to accept the client open, false to refuse it. */ + + virtual bool handleClientOpen(IOService * client, + IOOptionBits options, + void * argument); + +/*! @function handleClientClose + @abstract Handle a client close on the interface. + @discussion Called by handleClose() to handle a close from a client object. + Unlike handleClose(), subclasses may override this method to catch a close + reuqest from a client. This method is called with the arbitration lock held. + @param client The client object requesting the close. + @param options Options passed to IONetworkInterface::handleClose(). */ + + virtual void handleClientClose(IOService * client, + IOOptionBits options); + +/*! @function newUserClient + @abstract A request to create a connection for a non kernel client. + @discussion Create a new IOUserClient to service a connection to a + non kernel client. + @param owningTask The mach task requesting the connection. + @param security_id A token representing the access level for the task. + @param type A constant specifying the type of connection to be created. + An IONetworkUserClient object is created if the type specified is + kIONetworkUserClientTypeID. + @param handler The IOUserClient object returned. + @result kIOReturnSuccess if an IONetworkUserClient was created, + kIOReturnNoMemory for a memory allocation error, or + kIOReturnBadArgument if the type specified is unknown. */ + + virtual IOReturn newUserClient(task_t owningTask, + void * security_id, + UInt32 type, + IOUserClient ** handler); + +/*! @function setInterfaceState + @abstract Update the interface object state flags. + @discussion The kIOInterfaceState property is updated with the value + provided. + @param flags The bits that should be set. + @param clear The bits that should be cleared. + @result The resulting interface state flags following any changes + made by this method. */ + + virtual UInt32 setInterfaceState( UInt32 set, UInt32 clear = 0 ); + +/*! @function powerStateWillChangeTo + @abstract Handle a notification that the network controller which is servicing + this interface object is about to transition to a new power state. + @discussion This method will call the controllerWillChangePowerState() method + on the controller's work loop context to prepare for the power state change. + Subclasses should not override this method. + @param flags Flags that describe the capability of the controller in the new + power state. + @param stateNumber An index to a state in the network controller's + power state array that the controller is switching to. + @param policyMaker A reference to the network controller's policy-maker, + and is also the originator of this notification. + @result The return will always be IOPMAckImplied to indicate that the + preparation for the power change has already completed when this method + returns. */ + + virtual IOReturn powerStateWillChangeTo( IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ); + +/*! @function powerStateDidChangeTo + @abstract Handle a notification that the network controller which is servicing + this interface object has transitioned to a new power state. + @discussion This method will call the controllerDidChangePowerState() method + on the controller's work loop context to prepare for the power state change. + Subclasses should not override this method. + @param flags Flags that describe the capability of the controller in the new + power state. + @param stateNumber An index to a state in the network controller's + power state array that the controller has switched to. + @param policyMaker A reference to the network controller's policy-maker, + and is also the originator of this notification. + @result The return will always be IOPMAckImplied to indicate that the + preparation for the power change has already completed when this method + returns. */ + + virtual IOReturn powerStateDidChangeTo( IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker ); + +/*! @function controllerWillChangePowerState + @abstract Handle a notification that the network controller which is servicing + this interface object is about to transition to a new power state. + @param controller The network controller object. + @param flags Flags that describe the capability of the controller in the new + power state. + @param stateNumber An index to a state in the network controller's + power state array that the controller is switching to. + @param policyMaker A reference to the network controller's policy-maker, + and is also the originator of this notification. + @result The return value is always kIOReturnSuccess. */ + + virtual IOReturn controllerWillChangePowerState( + IONetworkController * controller, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker); + +/*! @function controllerDidChangePowerState + @abstract Handle a notification that the network controller which is servicing + this interface object has transitioned to a new power state. + @param controller The network controller object. + @param flags Flags that describe the capability of the controller in the new + power state. + @param stateNumber An index to a state in the network controller's + power state array that the controller has switched to. + @param policyMaker A reference to the network controller's policy-maker, + and is also the originator of this notification. + @result The return value is always kIOReturnSuccess. */ + + virtual IOReturn controllerDidChangePowerState( + IONetworkController * controller, + IOPMPowerFlags flags, + UInt32 stateNumber, + IOService * policyMaker); + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IONetworkInterface, 0); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 1); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 2); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 3); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 4); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 5); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 6); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 7); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 8); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 9); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 10); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 11); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 12); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 13); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 14); + OSMetaClassDeclareReservedUnused( IONetworkInterface, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IONETWORKINTERFACE_H */ diff --git a/iokit/IOKit/network/IONetworkLib.h b/iokit/IOKit/network/IONetworkLib.h new file mode 100644 index 000000000..375c5693c --- /dev/null +++ b/iokit/IOKit/network/IONetworkLib.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _IONETWORKLIB_H +#define _IONETWORKLIB_H + +#include +#include +#include +#include +#include +#include + +typedef UInt32 IONDHandle; + +#ifdef __cplusplus +extern "C" { +#endif + +/*! @function IONetworkOpen + @abstract Open a connection to an IONetworkInterface object. + An IONetworkUserClient object is created to manage the connection. */ + + IOReturn IONetworkOpen(io_object_t obj, io_connect_t * con); + +/*! @function IONetworkClose + @abstract Close the connection to an IONetworkInterface object. */ + + IOReturn IONetworkClose(io_connect_t con); + +/*! @function IONetworkWriteData + @abstract Write to the buffer of a network data object. + @param conObject The connection object. + @param dataHandle The handle of a network data object. + @param srcBuf The data to write is taken from this buffer. + @param inSize The size of the source buffer. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + IOReturn IONetworkWriteData(io_connect_t conObj, + IONDHandle dataHandle, + UInt8 * srcBuf, + UInt32 inSize); + +/*! @function IONetworkReadData + @abstract Read the buffer of a network data object. + @param conObject The connection object. + @param dataHandle The handle of a network data object. + @param destBuf The buffer where the data read shall be written to. + @param inOutSizeP Pointer to an integer that the caller must initialize + to contain the size of the buffer. This function will overwrite + it with the actual number of bytes written to the buffer. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + IOReturn IONetworkReadData(io_connect_t conObj, + IONDHandle dataHandle, + UInt8 * destBuf, + UInt32 * inOutSizeP); + +/*! @function IONetworkResetData + @abstract Fill the buffer of a network data object with zeroes. + @param conObject The connection object. + @param dataHandle The handle of a network data object. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + IOReturn IONetworkResetData(io_connect_t conObject, IONDHandle dataHandle); + +/*! @function IONetworkGetDataCapacity + @abstract Get the capacity (in bytes) of a network data object. + @param con The connection object. + @param dataHandle The handle of a network data object. + @param capacityP Upon success, the capacity is written to this address. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + IOReturn IONetworkGetDataCapacity(io_connect_t conObject, + IONDHandle dataHandle, + UInt32 * capacityP); + +/*! @function IONetworkGetDataHandle + @abstract Get the handle of a network data object with the given name. + @param con The connection object. + @param dataName The name of the network data object. + @param dataHandleP Upon success, the handle is written to this address. + @result kIOReturnSuccess on success, or an error code otherwise. */ + + IOReturn IONetworkGetDataHandle(io_connect_t conObject, + const char * dataName, + IONDHandle * dataHandleP); + +#ifdef __cplusplus +} +#endif + +#endif /* !_IONETWORKLIB_H */ diff --git a/iokit/IOKit/network/IONetworkMedium.h b/iokit/IOKit/network/IONetworkMedium.h new file mode 100644 index 000000000..fd948c279 --- /dev/null +++ b/iokit/IOKit/network/IONetworkMedium.h @@ -0,0 +1,411 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkMedium.h + * + * HISTORY + * + */ + +#ifndef _IONETWORKMEDIUM_H +#define _IONETWORKMEDIUM_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/*! @typedef IOMediumType + @discussion A 32-bit value divided into fields which describes + a single medium type. */ + +typedef UInt32 IOMediumType; + +/*! @defined kIOMediumType + @abstract kIOMediumType is a property of IONetworkMedium objects. + It is an OSNumber object. + @discussion The kIOMediumType property describes the type of + medium that this object represents. */ + +#define kIOMediumType "Type" + +/*! @defined kIOMediumFlags + @abstract kIOMediumFlags is a property of IONetworkMedium objects. + It is an OSNumber object. + @discussion The kIOMediumFlags property describes a set of + attributes assigned to the medium. */ + +#define kIOMediumFlags "Flags" + +/*! @defined kIOMediumSpeed + @abstract kIOMediumSpeed is a property of IONetworkMedium objects. + It is an OSNumber object. + @discussion The kIOMediumSpeed property describes the maximum link + speed supported by the medium in bits per second. */ + +#define kIOMediumSpeed "Speed" + +/*! @defined kIOMediumIndex + @abstract kIOMediumIndex is a property of IONetworkMedium objects. + It is an OSNumber object. + @discussion The kIOMediumIndex property describes an index assigned + by the owner of the medium object. Its interpretation is driver + specific. */ + +#define kIOMediumIndex "Index" + +//=========================================================================== +// Medium Type (IOMediumType). +// +// The medium type is encoded by a 32-bit value. The definitions of +// the fields and the encoding for each field is adapted from FreeBSD. +// +// Bits Definition +// ------------------- +// 4-0 medium subtype +// 7-5 network type +// 15-8 network specific options +// 19-16 reserved +// 27-20 common options +// 31-28 instance number + +// Ethernet. +// +enum { + kIOMediumEthernet = IFM_ETHER, + kIOMediumEthernetAuto = ( IFM_AUTO | IFM_ETHER ), + kIOMediumEthernetManual = ( IFM_MANUAL | IFM_ETHER ), + kIOMediumEthernetNone = ( IFM_NONE | IFM_ETHER ), + kIOMediumEthernet10BaseT = ( IFM_10_T | IFM_ETHER ), + kIOMediumEthernet10Base2 = ( IFM_10_2 | IFM_ETHER ), + kIOMediumEthernet10Base5 = ( IFM_10_5 | IFM_ETHER ), + kIOMediumEthernet100BaseTX = ( IFM_100_TX | IFM_ETHER ), + kIOMediumEthernet100BaseFX = ( IFM_100_FX | IFM_ETHER ), + kIOMediumEthernet100BaseT4 = ( IFM_100_T4 | IFM_ETHER ), + kIOMediumEthernet100BaseVG = ( IFM_100_VG | IFM_ETHER ), + kIOMediumEthernet100BaseT2 = ( IFM_100_T2 | IFM_ETHER ), + kIOMediumEthernet1000BaseSX = ( IFM_1000_SX | IFM_ETHER ), + kIOMediumEthernet10BaseSTP = ( IFM_10_STP | IFM_ETHER ), + kIOMediumEthernet10BaseFL = ( IFM_10_FL | IFM_ETHER ), + kIOMediumEthernet1000BaseLX = ( IFM_1000_LX | IFM_ETHER ), + kIOMediumEthernet1000BaseCX = ( IFM_1000_CX | IFM_ETHER ), + kIOMediumEthernet1000BaseTX = ( IFM_1000_TX | IFM_ETHER ), + kIOMediumEthernetHomePNA1 = ( IFM_HPNA_1 | IFM_ETHER ), +}; + +// IEEE 802.11 Wireless. +// +enum { + kIOMediumIEEE80211 = IFM_IEEE80211, + kIOMediumIEEE80211Auto = ( IFM_AUTO | IFM_IEEE80211 ), + kIOMediumIEEE80211Manual = ( IFM_MANUAL | IFM_IEEE80211 ), + kIOMediumIEEE80211None = ( IFM_NONE | IFM_IEEE80211 ), + kIOMediumIEEE80211FH1 = ( IFM_IEEE80211_FH1 | IFM_IEEE80211 ), + kIOMediumIEEE80211FH2 = ( IFM_IEEE80211_FH2 | IFM_IEEE80211 ), + kIOMediumIEEE80211DS2 = ( IFM_IEEE80211_DS2 | IFM_IEEE80211 ), + kIOMediumIEEE80211DS5 = ( IFM_IEEE80211_DS5 | IFM_IEEE80211 ), + kIOMediumIEEE80211DS11 = ( IFM_IEEE80211_DS11 | IFM_IEEE80211 ), + kIOMediumIEEE80211DS1 = ( IFM_IEEE80211_DS1 | IFM_IEEE80211 ), + kIOMediumIEEE80211OptionAdhoc = IFM_IEEE80211_ADHOC, +}; + +// Common options. +// +enum { + kIOMediumOptionFullDuplex = IFM_FDX, + kIOMediumOptionHalfDuplex = IFM_HDX, + kIOMediumOptionFlowControl = IFM_FLOW, + kIOMediumOptionFlag0 = IFM_FLAG0, + kIOMediumOptionFlag1 = IFM_FLAG1, + kIOMediumOptionFlag2 = IFM_FLAG2, + kIOMediumOptionLoopback = IFM_LOOP, +}; + +// Medium type masks. +// +#define kIOMediumSubTypeMask IFM_TMASK +#define kIOMediumNetworkTypeMask IFM_NMASK +#define kIOMediumOptionsMask IFM_OMASK +#define kIOMediumCommonOptionsMask IFM_GMASK +#define kIOMediumInstanceShift IFM_ISHIFT +#define kIOMediumInstanceMask IFM_IMASK + +// Medium type field accessors. +// +#define IOMediumGetSubType(x) ((x) & kIOMediumSubTypeMask) +#define IOMediumGetNetworkType(x) ((x) & kIOMediumNetworkMask) +#define IOMediumGetInstance(x) (((x) & kIOMediumInstanceMask) >> \ + kIOMediumInstanceShift) + +//=========================================================================== +// Medium flags. + + +//=========================================================================== +// Link status bits. +// +enum { + kIONetworkLinkValid = IFM_AVALID, // link status is valid + kIONetworkLinkActive = IFM_ACTIVE, // link is up/active. +}; + +#ifdef __cplusplus +} +#endif + +//=========================================================================== +// IONetworkMedium class. + +#ifdef KERNEL + +#include +#include + +/*! @class IONetworkMedium + @abstract An object that encapsulates information about a network + medium (i.e. 10Base-T, or 100Base-T Full Duplex). The main purpose of + this object is for network drivers to advertise its media capability, + through a collection of IONetworkMedium objects stored in a dictionary + in its property table. IONetworkMedium supports serialization, and will + encode its properties in the form of a dictionary to the serialization + stream when instructed. This will allow a user-space application to + browse the set of media types supported by the controller. */ + +class IONetworkMedium : public OSObject +{ + OSDeclareDefaultStructors( IONetworkMedium ) + +protected: + IOMediumType _type; + UInt32 _flags; + UInt64 _speed; + UInt32 _index; + const OSSymbol * _name; + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *_reserved; + + +/*! @function free + @abstract Free the IONetworkMedium object. */ + + virtual void free(); + +public: + +/*! @function nameForType + @abstract Create a name that describes a medium type. + @discussion Given a medium type, create an OSymbol object that + describes the medium type. There is a 1-to-1 mapping between the + medium type, and the medium name created by this method. The caller + is responsible for releasing the OSSymbol object returned. + @param type A medium type. See IONetworkMedium.h for type encoding. + @result An OSSymbol object is created based on the type provided. */ + + static const OSSymbol * nameForType(IOMediumType type); + +/*! @function addMedium + @abstract Add an IONetworkMedium object to a dictionary. + @discussion A helper function to add an IONetworkMedium object to a + given dictionary. The name of the medium is used as the key for the + new dictionary entry. + @param dict An OSDictionary object where the medium object should be + added as a new entry. + @param medium The IONetworkMedium object to add to the dictionary. + @result true on success, false otherwise. */ + + static bool addMedium(OSDictionary * dict, + const IONetworkMedium * medium); + +/*! @function removeMedium + @abstract Remove an IONetworkMedium object from a dictionary. + @discussion A helper function to remove an entry in a dictionary. + @param dict The OSDictionary object where the medium object should be + removed from. + @param medium The name of this medium object is used as the key. */ + + static void removeMedium(OSDictionary * dict, + const IONetworkMedium * medium); + +/*! @function getMediumWithType + @abstract Find a medium object from a dictionary with a given type. + @discussion Iterate through a dictionary and return an IONetworkMedium + entry with the given type. An optional mask supplies the don't care bits. + @param dict The dictionary to look for a matching entry. + @param type Search for an entry with this type. + @param mask The don't care bits in IOMediumType. Defaults to 0, which + implies that a perfect match is desired. + @result The first matching IONetworkMedium entry found, + or 0 if no match was found. */ + + static IONetworkMedium * getMediumWithType(const OSDictionary * dict, + IOMediumType type, + IOMediumType mask = 0); + +/*! @function getMediumWithIndex + @abstract Find a medium object from a dictionary with a given index. + @discussion Iterate through a dictionary and return an IONetworkMedium + entry with the given index. An optional mask supplies the don't care bits. + @param dict The dictionary to look for a matching entry. + @param index Search for an entry with the given index. + @param mask The don't care bits in index. Defaults to 0, which + implies that a perfect match is desired. + @result The first matching IONetworkMedium entry found, + or 0 if no match was found. */ + + static IONetworkMedium * getMediumWithIndex(const OSDictionary * dict, + UInt32 index, + UInt32 mask = 0); + +/*! @function init + @abstract Initialize an IONetworkMedium object. + @param type The medium type, this value is encoded with bits defined in + IONetworkMedium.h. + @param speed The maximum (or the only) link speed supported over this + medium in units of bits per second. + @param flags An optional flag for the medium object. + See IONetworkMedium.h for defined flags. + @param index An optional index number assigned by the owner. + Drivers can use this to store an index to a media table in + the driver, or it may map to a driver defined media type. + @param name An optional name assigned to this medium object. If 0, + then a name will be created based on the medium type by + calling IONetworkMedium::nameForType(). Since the name of + the medium is used as a key when inserted into a dictionary, + the name chosen must be unique within the scope of the owner. + @result true on success, false otherwise. */ + + virtual bool init(IOMediumType type, + UInt64 speed, + UInt32 flags = 0, + UInt32 index = 0, + const char * name = 0); + +/*! @function medium + @abstract Factory method which performs allocation and initialization + of an IONetworkMedium object. + @param type The medium type, this value is encoded with bits defined in + IONetworkMedium.h. + @param speed The maximum (or the only) link speed supported over this + medium in units of bits per second. + @param flags An optional flag for the medium object. + See IONetworkMedium.h for defined flags. + @param index An optional index number assigned by the owner. + Drivers can use this to store an index to a media table in + the driver, or it may map to a driver defined media type. + @param name An optional name assigned to this medium object. If 0, + then a name will be created based on the medium type by + calling IONetworkMedium::nameForType(). Since the name of + the medium is used as a key when inserted into a dictionary, + the name chosen must be unique within the scope of the owner. + @result An IONetworkMedium instance on success, or 0 otherwise. */ + + static IONetworkMedium * medium(IOMediumType type, + UInt64 speed, + UInt32 flags = 0, + UInt32 index = 0, + const char * name = 0); + +/*! @function getType + @result The medium type assigned to this medium object. */ + + virtual IOMediumType getType() const; + +/*! @function getSpeed + @result The maximum link speed supported by this medium. */ + + virtual UInt64 getSpeed() const; + +/*! @function getFlags + @result The medium flags. */ + + virtual UInt32 getFlags() const; + +/*! @function getIndex + @result The assigned medium index. */ + + virtual UInt32 getIndex() const; + +/*! @function getName + @result The name assigned to this medium object. */ + + virtual const OSSymbol * getName() const; + +/*! @function getKey + @result The key to use for this medium object. This key should be + used when this object is added to a dictionary. Same as getName(). */ + + virtual const OSSymbol * getKey() const; + +/*! @function isEqualTo + @abstract Test for equality between two IONetworkMedium objects. + @discussion Two IONetworkMedium objects are considered equal if + they have similar properties assigned to them during initialization. + @param medium An IONetworkMedium to test against the IONetworkMedium + object being called. + @result true if equal, false otherwise. */ + + virtual bool isEqualTo(const IONetworkMedium * medium) const; + +/*! @function isEqualTo + @abstract Test for equality between a IONetworkMedium object and an + OSObject. + @discussion The OSObject is considered equal to the IONetworkMedium + object if the OSObject is an IONetworkMedium, and they have + similar properties assigned to them during initialization. + @param obj An OSObject to test against an IONetworkMedium object. + @result true if equal, false otherwise. */ + + virtual bool isEqualTo(const OSMetaClassBase * obj) const; + +/*! @function serialize + @abstract Serialize the IONetworkMedium object. + @discussion A dictionary is created containing the properties + assigned to this medium object, and this dictionary is then + serialized using the OSSerialize object provided. + @param s An OSSerialize object. + @result true on success, false otherwise. */ + + virtual bool serialize(OSSerialize * s) const; + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IONetworkMedium, 0); + OSMetaClassDeclareReservedUnused( IONetworkMedium, 1); + OSMetaClassDeclareReservedUnused( IONetworkMedium, 2); + OSMetaClassDeclareReservedUnused( IONetworkMedium, 3); +}; + +// Translate getKey() to getName(). +// +inline const OSSymbol * IONetworkMedium::getKey() const +{ + return getName(); +} + +#endif /* KERNEL */ + +#endif /* !_IONETWORKMEDIUM_H */ diff --git a/iokit/IOKit/network/IONetworkStack.h b/iokit/IOKit/network/IONetworkStack.h new file mode 100644 index 000000000..b2d97c810 --- /dev/null +++ b/iokit/IOKit/network/IONetworkStack.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkStack.h - An IOKit proxy for the BSD network stack. + * + * HISTORY + * + */ + +#ifndef _IONETWORKSTACK_H +#define _IONETWORKSTACK_H + +// User-client keys +// +#define kIONetworkStackUserCommand "IONetworkStackUserCommand" +enum { + kIORegisterOne = 1, + kIORegisterAll +}; + +#ifdef KERNEL + +class IONetworkInterface; + +class IONetworkStack : public IOService +{ + OSDeclareDefaultStructors( IONetworkStack ) + +protected: + OSOrderedSet * _ifSet; + OSDictionary * _ifDict; + IONotifier * _interfaceNotifier; + bool _registerPrimaryInterface; + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *_reserved; + + + static bool interfacePublished( void * target, + void * param, + IOService * service ); + + static void unregisterBSDInterface( IONetworkInterface * netif ); + + static void registerBSDInterface( IONetworkInterface * netif ); + + static SInt32 orderRegisteredInterfaces( const OSMetaClassBase * obj1, + const OSMetaClassBase * obj2, + void * ref ); + + static void completeRegistrationUsingArray( OSArray * array ); + + static void completeRegistration( OSArray * array, bool isSync ); + + virtual void free(); + + virtual bool addInterface( IONetworkInterface * netif ); + + virtual void removeInterface( IONetworkInterface * netif ); + + virtual IONetworkInterface * getInterface( UInt32 index ); + + virtual bool containsInterface( IONetworkInterface * netif ); + + virtual bool addRegisteredInterface( IONetworkInterface * netif ); + + virtual void removeRegisteredInterface( IONetworkInterface * netif ); + + virtual IONetworkInterface * getRegisteredInterface( const char * name, + UInt32 unit ); + + virtual IONetworkInterface * getLastRegisteredInterface(const char * name); + + virtual UInt32 getNextAvailableUnitNumber( const char * name, + UInt32 startingUnit = 0 ); + + virtual bool preRegisterInterface( IONetworkInterface * netif, + const char * name, + UInt32 unit, + OSArray * array ); + +public: + static IONetworkStack * getNetworkStack(); + + static int bsdInterfaceWasUnregistered( struct ifnet * ifp ); + + virtual bool init( OSDictionary * properties ); + + virtual bool start( IOService * provider ); + + virtual void stop( IOService * provider ); + + virtual IOReturn registerAllInterfaces(); + + virtual IOReturn registerPrimaryInterface( bool enable ); + + virtual IOReturn registerInterface( IONetworkInterface * netif, + const char * name, + UInt32 unit = 0, + bool isSync = true ); + + virtual IOReturn message( UInt32 type, + IOService * provider, + void * argument = 0 ); + + virtual IOReturn newUserClient( task_t owningTask, + void * security_id, + UInt32 type, + IOUserClient ** handler ); + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IONetworkStack, 0); + OSMetaClassDeclareReservedUnused( IONetworkStack, 1); + OSMetaClassDeclareReservedUnused( IONetworkStack, 2); + OSMetaClassDeclareReservedUnused( IONetworkStack, 3); + +}; + +#endif /* KERNEL */ + +#endif /* !_IONETWORKSTACK_H */ diff --git a/iokit/IOKit/network/IONetworkStats.h b/iokit/IOKit/network/IONetworkStats.h new file mode 100644 index 000000000..03496c214 --- /dev/null +++ b/iokit/IOKit/network/IONetworkStats.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IONetworkStats.h + * + * HISTORY + */ + +#ifndef _IONETWORKSTATS_H +#define _IONETWORKSTATS_H + +/*! @header IONetworkStats.h + @discussion Generic network statistics. */ + +//------------------------------------------------------------------------ +// Generic network statistics. Common to all network interfaces. +// +// WARNING: This structure must match the statistics field in +// ifnet->if_data. This structure will overlay a portion of ifnet. + +/*! @typedef IONetworkStats + @discussion Generic network statistics structure. + @field inputPackets count input packets. + @field inputErrors count input errors. + @field outputPackets count output packets. + @field outputErrors count output errors. + @field collisions count collisions on CDMA networks. */ + +typedef struct { + UInt32 inputPackets; + UInt32 inputErrors; + UInt32 outputPackets; + UInt32 outputErrors; + UInt32 collisions; +} IONetworkStats; + +/*! @defined kIONetworkStatsKey + @discussion Defines the name of an IONetworkData that contains + an IONetworkStats. */ + +#define kIONetworkStatsKey "IONetworkStatsKey" + +//------------------------------------------------------------------------ +// Output queue statistics. + +/*! @typedef IOOutputQueueStats + @discussion Statistics recorded by IOOutputQueue objects. + @field capacity queue capacity. + @field size current size of the queue. + @field peakSize peak size of the queue. + @field dropCount number of packets dropped. + @field outputCount number of output packets. + @field retryCount number of retries. + @field stallCount number of queue stalls. */ + +typedef struct { + UInt32 capacity; + UInt32 size; + UInt32 peakSize; + UInt32 dropCount; + UInt32 outputCount; + UInt32 retryCount; + UInt32 stallCount; + UInt32 reserved[4]; +} IOOutputQueueStats; + +/*! @defined kIOOutputQueueStatsKey + @discussion Defines the name of an IONetworkData that contains + an IOOutputQueueStats. */ + +#define kIOOutputQueueStatsKey "IOOutputQueueStatsKey" + +#endif /* !_IONETWORKSTATS_H */ diff --git a/iokit/IOKit/network/IONetworkUserClient.h b/iokit/IOKit/network/IONetworkUserClient.h new file mode 100644 index 000000000..9bbfab393 --- /dev/null +++ b/iokit/IOKit/network/IONetworkUserClient.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _IONETWORKUSERCLIENT_H +#define _IONETWORKUSERCLIENT_H + +// IONetworkUserClient type ID. +// +#define kIONetworkUserClientTypeID 0xff000001 +#define kIONUCType 0xff000001 // FIXME + +// IONetworkUserClient call structure definitions. +// +enum { + kIONUCResetNetworkDataIndex = 0, +#define kIONUCResetNetworkDataInputs 1 +#define kIONUCResetNetworkDataOutputs 0 +#define kIONUCResetNetworkDataFlags kIOUCScalarIScalarO + + kIONUCWriteNetworkDataIndex = 1, +#define kIONUCWriteNetworkDataInput0 0xffffffff +#define kIONUCWriteNetworkDataInput1 0xffffffff +#define kIONUCWriteNetworkDataFlags kIOUCScalarIStructI + + kIONUCReadNetworkDataIndex = 2, +#define kIONUCReadNetworkDataInputs 1 +#define kIONUCReadNetworkDataOutputs 0xffffffff +#define kIONUCReadNetworkDataFlags kIOUCScalarIStructO + + kIONUCGetNetworkDataCapacityIndex = 3, +#define kIONUCGetNetworkDataCapacityInputs 1 +#define kIONUCGetNetworkDataCapacityOutputs 1 +#define kIONUCGetNetworkDataCapacityFlags kIOUCScalarIScalarO + + kIONUCGetNetworkDataHandleIndex = 4, +#define kIONUCGetNetworkDataHandleInputs 0xffffffff +#define kIONUCGetNetworkDataHandleOutputs 0xffffffff +#define kIONUCGetNetworkDataHandleFlags kIOUCStructIStructO + + kIONUCLastIndex +}; + +#ifdef KERNEL + +#include + +class IONetworkInterface; + +/*! @class IONetworkUserClient + @abstract An IOUserClient created by an IONetworkInterface to + manage user space requests. */ + +class IONetworkUserClient : public IOUserClient +{ + OSDeclareDefaultStructors( IONetworkUserClient ) + +protected: + IONetworkInterface * _owner; + task_t _task; + IOExternalMethod _methods[kIONUCLastIndex]; + +/*! @function + @abstract Free the IONetworkUserClient object. */ + + virtual void free(); + +public: + +/*! @function withTask + @abstract Factory method that performs allocation and initialization + of an IONetworkUserClient object. + @param owningTask See IOUserClient. + @result An IONetworkUserClient on success, 0 otherwise. */ + + static IONetworkUserClient * withTask(task_t owningTask); + +/*! @function start + @abstract Start the IONetworkUserClient. + @discussion Open the provider, must be an IONetworkInterface object, + and initialize the IOExternalMethod array. + @result true on success, false otherwise. */ + + virtual bool start(IOService * provider); + +/*! @function clientClose + @abstract Handle a client close. + @discussion Close and detach from our owner (provider). + @result kIOReturnSuccess. */ + + virtual IOReturn clientClose(); + +/*! @function clientDied + @abstract Handle client death. + @discussion Close and detach from our owner (provider). + @result kIOReturnSuccess. */ + + virtual IOReturn clientDied(); + +/*! @function getExternalMethodForIndex + @abstract Look up a method entry from the method array. + @discussion Called by IOUserClient to fetch the method entry, + described by an IOExternalMethod structure, that correspond to + the index provided. + @param index The method index. + @result A pointer to an IOExternalMethod structure containing the + method definition for the given index. */ + + virtual IOExternalMethod * getExternalMethodForIndex(UInt32 index); + +protected: + +/*! @function resetNetworkData + @abstract Fill the data buffer in an IONetworkData object with zeroes. + @param key An OSSymbol key associated with an IONetworkData object. + @result kIOReturnSuccess on success, kIOReturnBadArgument if an + argument is invalid, or an error from IONetworkData::reset(). */ + + virtual IOReturn resetNetworkData(OSSymbol * key); + +/*! @function writeNetworkData + @abstract Write to the data buffer in an IONetworkData object with + data from a source buffer provided by the caller. + @param key The OSSymbol key associated with an IONetworkData object. + @param srcBuffer The source buffer provided by the caller. + @param srcBufferSize The size of the source buffer. + @result kIOReturnSuccess on success, kIOReturnBadArgument if an + argument is invalid, or an error from IONetworkData::write(). */ + + virtual IOReturn writeNetworkData(OSSymbol * key, + void * srcBuffer, + IOByteCount srcBufferSize); + +/*! @function readNetworkData + @abstract Read the data buffer in an IONetworkData object and copy + this data to a destination buffer provided by the caller. + @param key The OSSymbol key associated with an IONetworkData object. + @param dstBuffer The destination buffer provided by the caller. + @param dstBufferSize Pointer to an integer that the caller must + initialize to hold the size of the destination buffer. This method + will overwrite it with the actual number of bytes written. + @result kIOReturnSuccess on success, kIOReturnBadArgument if an + argument is invalid, or an error from IONetworkData::read(). */ + + virtual IOReturn readNetworkData(OSSymbol * key, + void * dstBuffer, + IOByteCount * dstBufferSize); + +/*! @function getNetworkDataCapacity + @abstract Get the capacity of an IONetworkData object, described + by the size of its data buffer. + @param key The OSSymbol key of an IONetworkData object. + @param capacity A pointer to the capacity value returned by this + method. + @result kIOReturnSuccess on success, kIOReturnBadArgument if an + argument is invalid. */ + + virtual IOReturn getNetworkDataCapacity(OSSymbol * key, + UInt32 * capacity); + +/*! @function getNetworkDataHandle + @abstract Return an opaque handle to a provider's IONetworkData object. + @discussion Called to obtain an unique handle that maps to an IONetworkData + object. This handle can be later passed to other methods defined in this + class to refer to the same object. + @param name A C string with the name of the IONetworkData object. + @param handle If an IONetworkData object with the given name is found, + then its associated OSSymbol object is written to this address. + @param nameSize The size of the name string, including the final + terminating null character. + @param handleSizeP The size of the buffer allocated by the caller + to store the handle. This should be 4 bytes. + @result kIOReturnSuccess on success, kIOReturnBadArgument if an + argument is invalid, or kIOReturnNoMemory if unable to allocate memory. */ + + virtual IOReturn getNetworkDataHandle(char * name, + OSSymbol ** handle, + IOByteCount nameSize, + IOByteCount * handleSizeP); +}; + +#endif /* KERNEL */ + +#endif /* !_IONETWORKUSERCLIENT_H */ diff --git a/iokit/IOKit/network/IOOutputQueue.h b/iokit/IOKit/network/IOOutputQueue.h new file mode 100644 index 000000000..3c685f3e0 --- /dev/null +++ b/iokit/IOKit/network/IOOutputQueue.h @@ -0,0 +1,257 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOOutputQueue.h + * + * HISTORY + * 2-Feb-1999 Joe Liu (jliu) created. + * + */ + +#ifndef _IOOUTPUTQUEUE_H +#define _IOOUTPUTQUEUE_H + +#include + +// Forward declarations. +// +struct mbuf; +class IONetworkData; + +// FIXME - We do not want the enqueue/dequeue macros defined in queue.h. +// +#undef enqueue(queue,elt) +#undef dequeue(queue) + +// FIXME - Belongs in IOBasicOutputQueue.h +// +/*! @enum The status of the packet sent to the target. + @constant kIOOutputStatusMask Define the status field in the return code. + @constant kIOOutputStatusAccept Packet was accepted by the target. + @constant kIOOutputStatusDropped Packet accepted, but was also dropped. + @constant kIOOutputStatusRetry Target ran out of resources, and is unable + to accept the packet. The ownership of the packet reverts back to the + queue. */ + +enum { + kIOOutputStatusMask = 0x00ff, + kIOOutputStatusAccepted = 0x0000, + kIOOutputStatusDropped = 0x0001, + kIOOutputStatusRetry = 0x0002 +}; + +/*! @enum A command requested by the target. + @constant kIOOutputCommandMask Define the command field in the return code. + @constant kIOOutputCommandNone No command. + @constant kIOOutputCommandStall A command to stall the queue. */ + +enum { + kIOOutputCommandMask = 0xff00, + kIOOutputCommandNone = 0x0000, + kIOOutputCommandStall = 0x0100 +}; + +/*! @enum Definition of common return codes returned by the target's + output handler. + @constant kIOReturnOutputSuccess Packet was accepted. + @constant kIOReturnOutputDropped Packet was dropped. + @constant kIOReturnOutputStall Stall the queue and retry the same packet + when the queue is restarted. */ + +enum { + kIOReturnOutputSuccess = (kIOOutputStatusAccepted | kIOOutputCommandNone), + kIOReturnOutputDropped = (kIOOutputStatusDropped | kIOOutputCommandNone), + kIOReturnOutputStall = (kIOOutputStatusRetry | kIOOutputCommandStall) +}; + +/*! @class IOOutputQueue : public OSObject + @abstract A packet queue that supports multiple producers, and a single + consumer. Each producer, or a client thread, will deliver a chain of packets + to the queue. A single consumer will remove packets from the queue one at a + time and forward it to the registered target/action. This object may be used + by an IONetworkController on the output (transmit) side to handle the output + packet flow downstream from an IONetworkInterface, and then call the driver's + output function. IOOutputQueue is an abstract class that provides an interface + for its subclasses. Concrete subclasses will complete the implementation, and + specify the context that the target is called for packets removed from + the queue. */ + +class IOOutputQueue : public OSObject +{ + OSDeclareAbstractStructors( IOOutputQueue ) + +private: + + static void runServiceThread(thread_call_param_t, thread_call_param_t); + +protected: + + thread_call_t _callEntry; // callout entry structure. + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *_reserved; + + +/*! @function init + @abstract Initialize an IOOutputQueue object. + @result true if initialized successfully, false otherwise. */ + + virtual bool init(); + +/*! @function free + @abstract Free the IOOutputQueue object. + @discussion Release allocated resources, then call super::free(). */ + + virtual void free(); + +/*! @function scheduleServiceThread + @abstract Schedule a service thread callout. + @discussion This method can be called by service() to schedule + a thread that will call serviceThread() when it starts running. + @param param A parameter to pass to the serviceThread() method. + @result true if a thread callout was scheduled, false otherwise. */ + + virtual bool scheduleServiceThread(void * param); + +/*! @function cancelServiceThread + @abstract Cancel any pending service thread callout. + @result true if a previously scheduled thread callout was canceled, + false otherwise. */ + + virtual bool cancelServiceThread(); + +/*! @function serviceThread + @abstract Method called by the scheduled service thread when it + starts to run. + @discussion Must be implemented by a subclass that calls + scheduleServiceThread(). The default implementation does nothing. + @param param A parameter that was given to scheduleServiceThread() + when the service thread was scheduled. */ + + virtual void serviceThread(void * param); + +public: + +/*! @function start + @abstract Start up the queue. + @discussion Called by the target to start the queue. This will allow + packets to be removed from the queue, then delivered to the target. + @result true if the queue was started successfully, false otherwise. */ + + virtual bool start() = 0; + +/*! @function stop + @abstract Stop the queue. + @discussion Stop the queue and prevent it from sending packets to its + target. + @result Returns the previous running state of the queue, + true if the queue was running, false if the queue was already stopped. */ + + virtual bool stop() = 0; + +/*! @function service + @abstract Service the queue. + @discussion Manage the queue after it has been started. + @param options Options for the service request. + @result A return value to indicate the service result. */ + + virtual bool service(IOOptionBits options = 0) = 0; + +/*! @function flush + @abstract Drop and free all packets currently held by the queue. + @result The number of packets that were dropped and freed. */ + + virtual UInt32 flush() = 0; + +/*! @function setCapacity + @abstract Change the number of packets that the queue can hold + before it begins to drop excess packets. + @param capacity The new desired capacity. + @result true if the new capacity was accepted, false otherwise. */ + + virtual bool setCapacity(UInt32 capacity) = 0; + +/*! @function getCapacity + @abstract Get the number of packets that the queue can hold. + @discussion The queue will begin to drop incoming packets when the + size of queue reaches its capacity. + @result The current queue capacity. */ + + virtual UInt32 getCapacity() const = 0; + +/*! @function getSize + @abstract Get the number of packets currently held in the queue. + @result The size of the queue. */ + + virtual UInt32 getSize() const = 0; + +/*! @function enqueue + @abstract Called by a client to add a packet, or a chain of packets, + to the queue. + @discussion A packet is described by a mbuf chain, while a chain + of packets is constructed by linking multiple mbuf chains via the + m_nextpkt field. + @param m A single packet, or a chain of packets. + @param param A parameter provided by the caller. + @result A return code. */ + + virtual UInt32 enqueue(struct mbuf * m, void * param) = 0; + +/*! @function getOutputHandler + @abstract Return the address of a function that is designated to handle + incoming packets sent to the queue object. + @result The address of the enqueue() method is returned. */ + + virtual IOOutputAction getOutputHandler() const; + +/*! @function getStatisticsData + @abstract Return an IONetworkData object containing statistics counters + updated by the queue. + @result An IONetworkData object. This implementation will always return + 0. */ + + virtual IONetworkData * getStatisticsData() const; + + // Virtual function padding + OSMetaClassDeclareReservedUnused( IOOutputQueue, 0); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 1); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 2); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 3); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 4); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 5); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 6); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 7); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 8); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 9); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 10); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 11); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 12); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 13); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 14); + OSMetaClassDeclareReservedUnused( IOOutputQueue, 15); +}; + +#endif /* !_IOOUTPUTQUEUE_H */ diff --git a/iokit/IOKit/network/IOPacketQueue.h b/iokit/IOKit/network/IOPacketQueue.h new file mode 100644 index 000000000..d77df981f --- /dev/null +++ b/iokit/IOKit/network/IOPacketQueue.h @@ -0,0 +1,271 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * IOPacketQueue.h + * + * HISTORY + * 9-Dec-1998 Joe Liu (jliu) created. + * + */ + +#ifndef _IOPACKETQUEUE_H +#define _IOPACKETQUEUE_H + +#include +#include + +// Forward declarations. +// +struct mbuf; +struct IOMbufQueue; + +// We do not want the enqueue/dequeue macros defined in queue.h. +// +// #warning queue.h should not be included +#undef enqueue(queue,elt) +#undef dequeue(queue) + +/*! @class IOPacketQueue : public OSObject + @abstract Implements a bounded FIFO queue of mbuf packets. Packets are + removed from the head of the queue (dequeue), and new packets are added + to the tail of the queue (enqueue). A spinlock is used to synchronize + access to the queue between methods that have a "lock" prefix. */ + +class IOPacketQueue : public OSObject +{ + OSDeclareDefaultStructors( IOPacketQueue ) + +protected: + IOMbufQueue * _queue; // mbuf queue + IOSimpleLock * _lock; // spinlock for synchronized methods + + struct ExpansionData { }; + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *_reserved; + +/*! @function free + @abstract Free the IOPacketQueue object. + @discussion All packets held by the queue are released back to the free + pool, resource are deallocated, then super::free() is called. */ + + virtual void free(); + +/*! @var IOPacketQueueDefaultCapacity Describes the default capacity of the + queue object. The capacity is only observed by the enqueue() method. + Therefore, it is possible for the size of the queue to exceed its + capacity when other methods, such as prepend(), are used to add packets + to the queue. */ + + static const UInt32 IOPacketQueueDefaultCapacity = 100; + +public: + +/*! @function withCapacity + @abstract Factory method that will construct and initialize an + IOPacketQueue object. + @param capacity The initial capacity of the queue object. Can be + later changed by calling the setCapacity() method. + @result An IOPacketQueue instance on success, or 0 otherwise. */ + + static IOPacketQueue * withCapacity(UInt32 capacity = + IOPacketQueueDefaultCapacity); + +/*! @function initWithCapacity + @abstract Initialize an IOPacketQueue object. + @discussion Initialize an IOPacketQueue object with the given capacity. + @param capacity The initial capacity of the queue. Can be later changed + by calling the setCapacity() method. + @result true if initialized successfully, false otherwise. */ + + virtual bool initWithCapacity(UInt32 capacity = + IOPacketQueueDefaultCapacity); + +/*! @function getSize + @abstract Get the size of the queue. + @result The number of packets currently held by the queue. */ + + virtual UInt32 getSize() const; + +/*! @function setCapacity + @abstract Change the capacity of the queue. + @param capacity The new capacity. + @result true if the new capacity was accepted, false otherwise. */ + + virtual bool setCapacity(UInt32 capacity); + +/*! @function getCapacity + @abstract Get the current capacity of the queue. + @result The current queue capacity. */ + + virtual UInt32 getCapacity() const; + +/*! @function peek + @abstract Examine the packet at the head of the queue without + removing it from the queue. + @discussion A following call to peek() or dequeue() will return + the same packet. The caller must never modify the mbuf packet returned. + @result The packet at the head of the queue. */ + + virtual const struct mbuf * peek() const; + +/*! @function prepend + @abstract Add a chain of packets to the head of the queue. + @param m A chain of packets to add to the head of the queue. */ + + virtual void prepend(struct mbuf * m); + +/*! @function prepend + @abstract Remove all packets from the specified queue, and add them + to the head of this queue. + @param queue The source IOPacketQueue object containing the packets to + be transferred. */ + + virtual void prepend(IOPacketQueue * queue); + +/*! @function lockPrepend + @abstract Add a chain of packets to the head of a synchronized queue. + @discussion A spinlock is used to synchronize access to the queue. + @param m A chain of packets to add to the head of the queue. + @result Will always return true. */ + + virtual void lockPrepend(struct mbuf * m); + +/*! @function enqueue + @abstract Add a chain of packets to the tail of the queue. + @discussion Packets are not added if the size of the queue has reached + its capacity. + @param m A chain of packets to add to the tail of the queue. + @result true on success, or false to indicate over-capacity and refusal + to accept the packet chain provided. */ + + virtual bool enqueue(struct mbuf * m); + +/*! @function enqueue + @abstract Remove all packets from the specified queue, and add them + to the tail of this queue. + @param queue The source IOPacketQueue object containing the packets to + be transferred. + @result Always return true. */ + + virtual bool enqueue(IOPacketQueue * queue); + +/*! @function enqueueWithDrop + @abstract Add a chain of packets to the tail of the queue. Packets are + dropped if the size of the queue has reached its capacity. + @param m A chain of packets to add to the tail of the queue. + @result The number of packets dropped and freed by the queue. */ + + virtual UInt32 enqueueWithDrop(struct mbuf * m); + +/*! @function lockEnqueue + @abstract Add a chain of packets to the tail of a synchronized queue. + @discussion Packets are not added if the size of the queue has reached + its capacity. A spinlock is used to synchronize access to the queue. + @param m A chain of packets to add to the tail of the queue. + @result true on success, or false to indicate over-capacity and refusal + to accept the packet chain provided. */ + + virtual bool lockEnqueue(struct mbuf * m); + +/*! @function lockEnqueueWithDrop + @abstract Add a chain of packets to the tail of a synchronized queue. + Packets are dropped if the size of the queue has reached its capacity. + @discussion A spinlock is used to synchronize access to the queue. + @param m A chain of packets to add to the tail of the queue. + @result The number of packets dropped and freed by the queue. */ + + virtual UInt32 lockEnqueueWithDrop(struct mbuf * m); + +/*! @function dequeue + @abstract Remove a single packet from the head of the queue. + @result A packet removed from the head of the queue, or NULL if the + queue was empty. */ + + virtual struct mbuf * dequeue(); + +/*! @function lockDequeue + @abstract Remove a single packet from the head of a synchronized queue. + @discussion A spinlock is used to synchronize access to the queue. + @result A packet removed from the head of the queue, or NULL if the + queue was empty. */ + + virtual struct mbuf * lockDequeue(); + +/*! @function dequeueAll + @abstract Remove all packets from the queue and return the head of the + packet chain. + @discussion The size of the queue is cleared to zero. + @result The head of a packet chain linking all packets that were held + in the queue, or NULL if the queue was empty. */ + + virtual struct mbuf * dequeueAll(); + +/*! @function lockDequeueAll + @abstract Remove all packets from a synchronized queue and return the + head of the packet chain. + @discussion The size of the queue is cleared to zero. A spinlock is used + to synchronize access to the queue. + @result The head of a packet chain linking all packets that were held + in the queue, or NULL if the queue was empty. */ + + virtual struct mbuf * lockDequeueAll(); + +/*! @function flush + @abstract Free all packets currently held in the queue and release them + back to the free mbuf pool. + @discussion The size of the queue is cleared to zero. + @result The number of packets freed. */ + + virtual UInt32 flush(); + +/*! @function lockFlush + @abstract Free all packets currently held in a synchronized queue and + release them back to the free mbuf pool. + @discussion The size of the queue is cleared to zero. A spinlock is used + to synchronize access to the queue. + @result The number of packets freed. */ + + virtual UInt32 lockFlush(); + + // Virtual Pad functions + OSMetaClassDeclareReservedUnused( IOPacketQueue, 0); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 1); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 2); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 3); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 4); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 5); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 6); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 7); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 8); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 9); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 10); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 11); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 12); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 13); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 14); + OSMetaClassDeclareReservedUnused( IOPacketQueue, 15); +}; + +#endif /* !_IOPACKETQUEUE_H */ diff --git a/iokit/IOKit/network/Makefile b/iokit/IOKit/network/Makefile new file mode 100644 index 000000000..030f9d775 --- /dev/null +++ b/iokit/IOKit/network/Makefile @@ -0,0 +1,44 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = network +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = \ + IONetworkMedium.h \ + IONetworkData.h \ + IONetworkStats.h \ + IOEthernetStats.h \ + IONetworkUserClient.h \ + IONetworkStack.h \ + IONetworkController.h \ + IONetworkInterface.h \ + IOEthernetController.h \ + IOEthernetInterface.h +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/nvram/IONVRAMController.h b/iokit/IOKit/nvram/IONVRAMController.h new file mode 100644 index 000000000..eccee0bc4 --- /dev/null +++ b/iokit/IOKit/nvram/IONVRAMController.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +class IONVRAMController: public IOService +{ + OSDeclareAbstractStructors(IONVRAMController); + +public: + virtual bool start(IOService *provider); + + virtual void sync(void); + + virtual IOReturn read(IOByteCount offset, UInt8 *buffer, + IOByteCount length) = 0; + virtual IOReturn write(IOByteCount offset, UInt8 *buffer, + IOByteCount length) = 0; +}; diff --git a/iokit/IOKit/nvram/Makefile b/iokit/IOKit/nvram/Makefile new file mode 100644 index 000000000..9ae63c0b5 --- /dev/null +++ b/iokit/IOKit/nvram/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = nvram +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/pci/IOAGPDevice.h b/iokit/IOKit/pci/IOAGPDevice.h new file mode 100644 index 000000000..fe06914b0 --- /dev/null +++ b/iokit/IOKit/pci/IOAGPDevice.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOAGPDEVICE_H +#define _IOKIT_IOAGPDEVICE_H + +#include +#include + +/* Definitions of AGP config registers */ +enum { + kIOPCIConfigAGPStatusOffset = 4, + kIOPCIConfigAGPCommandOffset = 8 +}; + +/* Definitions of AGP Command & Status registers */ +enum { + kIOAGPRequestQueueMask = 0xff000000, + kIOAGPSideBandAddresssing = 0x00000200, + kIOAGPEnable = 0x00000100, + kIOAGP4GbAddressing = 0x00000020, + kIOAGP4xDataRate = 0x00000004, + kIOAGP2xDataRate = 0x00000002, + kIOAGP1xDataRate = 0x00000001 +}; + +enum { + kIOAGPGartInvalidate = 0x00000001 +}; + +// getAGPStatus() defines +enum { + kIOAGPDefaultStatus = 0 +}; +enum { + kIOAGPIdle = 0x00000001, + kIOAGPInvalidGARTEntry = 0x00000002, + kIOAGPAccessOutOfRange = 0x00000004 +}; + +#define kIOAGPBusFlagsKey "IOAGPFlags" +enum { + kIOAGPGartIdleInvalidate = 0x00000001 +}; + +// masterState +enum { + kIOAGPStateEnabled = 0x00000001, + kIOAGPStateEnablePending = 0x00010000 +}; + + +/*! @class IOAGPDevice : public IOPCIDevice + @abstract An IOService class representing an AGP master device. + @discussion The discovery of an AGP master device by the PCI bus family results in an instance of the IOAGPDevice being created and published. It provides services specific to AGP, in addition to the PCI services supplied by its superclass IOPCIDevice. */ + +class IOAGPDevice : public IOPCIDevice +{ + OSDeclareDefaultStructors(IOAGPDevice) + +protected: + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +public: + UInt32 masterState; + UInt8 masterAGPRegisters; + +/*! @function createAGPSpace + @abstract Allocates the AGP space, and enables AGP transactions on the master and slave. + @discussion This method should be called by the driver for the AGP master device to set the size of the space and enable AGP transactions. It will destroy any AGP space currently allocated. + @param options No options are currently defined, pass zero. + @param address The physical range allocated for the AGP space is passed back to the caller. + @param length An in/out parameter - the caller sets the devices maximum AGP addressing and the actual size created is passed back. + @result An IOReturn code indicating success or failure. */ + + virtual IOReturn createAGPSpace( IOOptionBits options, + IOPhysicalAddress * address, + IOPhysicalLength * length ); + +/*! @function destroyAGPSpace + @abstract Destroys the AGP space, and disables AGP transactions on the master and slave. + @discussion This method should be called by the driver to shutdown AGP transactions and release resources. */ + + virtual IOReturn destroyAGPSpace( void ); + +/*! @function getAGPRangeAllocator + @abstract Accessor to obtain the AGP range allocator. + @discussion To allocate ranges in AGP space, obtain a range allocator for the space with this method. It is retained while the space is created (until destroyAGPSpace is called) and should not be released by the caller. + @result A pointer to the range allocator for the AGP space. */ + + virtual IORangeAllocator * getAGPRangeAllocator( void ); + +/*! @function getAGPStatus + @abstract Returns the current state of the AGP bus. + @discussion Returns state bits for the AGP bus. Only one type of status is currently defined. + @param which Type of status - only kIOAGPDefaultStatus is currently valid. + @result Mask of status bits for the AGP bus. */ + + virtual IOOptionBits getAGPStatus( IOOptionBits which = kIOAGPDefaultStatus ); + +/*! @function commitAGPMemory + @abstract Makes memory addressable by AGP transactions. + @discussion Makes the memory described by the IOMemoryDescriptor object addressable by AGP by entering its pages into the GART array, given an offset into AGP space supplied by the caller (usually allocated by the AGP range allocator). It is the callers responsibility to prepare non-kernel pageable memory before calling this method, with IOMemoryDescriptor::prepare. + @param memory A IOMemoryDescriptor object describing the memory to add to the GART. + @param agpOffset An offset into AGP space that the caller has allocated - usually allocated by the AGP range allocator. + @param options Pass kIOAGPGartInvalidate if the AGP target should invalidate any GART TLB. + @result An IOReturn code indicating success or failure. */ + + virtual IOReturn commitAGPMemory( IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options = 0 ); + +/*! @function releaseAGPMemory + @abstract Releases memory addressable by AGP transactions. + @discussion Makes the memory described by the IOMemoryDescriptor object unaddressable by AGP by removing its pages from the GART array, given an offset into AGP space supplied by the caller (usually allocated by the AGP range allocator). It is the callers responsibility to complete non-kernel pageable memory before calling this method, with IOMemoryDescriptor::complete. + @param memory A IOMemoryDescriptor object describing the memory to remove from the GART. + @param agpOffset An offset into AGP space that the caller has allocated - usually allocated by the AGP range allocator. + @param options Pass kIOAGPGartInvalidate if the AGP target should invalidate any GART TLB. + @result An IOReturn code indicating success or failure. */ + + virtual IOReturn releaseAGPMemory( IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options = 0 ); + + virtual IOReturn resetAGP( IOOptionBits options = 0 ); + +/*! @function getAGPSpace + @abstract Returns the allocated AGP space. + @discussion This method can be called by the driver for the AGP master device to retrieve the physical address and size of the space created with createAGPSpace. + @param address The physical range allocated for the AGP space is passed back to the caller. Zero may be pased if the address is not needed by the caller. + @param length The size of the the AGP space created is passed back. Zero may be pased if the length is not needed by the caller. + @result An IOReturn code indicating success or failure. */ + + virtual IOReturn getAGPSpace( IOPhysicalAddress * address, + IOPhysicalLength * length ); + + // Unused Padding + OSMetaClassDeclareReservedUnused(IOAGPDevice, 0); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 1); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 2); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 3); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 4); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 5); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 6); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 7); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 8); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 9); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 10); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 11); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 12); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 13); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 14); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 15); + OSMetaClassDeclareReservedUnused(IOAGPDevice, 16); +}; + +#endif /* ! _IOKIT_IOAGPDEVICE_H */ diff --git a/iokit/IOKit/pci/IOPCIBridge.h b/iokit/IOKit/pci/IOPCIBridge.h new file mode 100644 index 000000000..0349d35c7 --- /dev/null +++ b/iokit/IOKit/pci/IOPCIBridge.h @@ -0,0 +1,271 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOPCIBRIDGE_H +#define _IOKIT_IOPCIBRIDGE_H + +#include +#include +#include + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IOPCIBridge : public IOService +{ + friend IOPCIDevice; + + OSDeclareAbstractStructors(IOPCIBridge) + +private: + IORegistryEntry * findMatching( OSIterator * in, IOPCIAddressSpace space ); + void publishNubs( OSIterator * kids, UInt32 index ); + virtual bool isDTNub( IOPCIDevice * nub ); + static void nvLocation( IORegistryEntry * entry, + UInt8 * busNum, UInt8 * deviceNum, UInt8 * functionNum ); + +protected: + IORangeAllocator * bridgeMemoryRanges; + IORangeAllocator * bridgeIORanges; + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +protected: + virtual void probeBus( IOService * provider, UInt8 busNum ); + + virtual UInt8 firstBusNum( void ); + virtual UInt8 lastBusNum( void ); + + virtual void spaceFromProperties( OSDictionary * propTable, + IOPCIAddressSpace * space ); + virtual OSDictionary * constructProperties( IOPCIAddressSpace space ); + + virtual IOPCIDevice * createNub( OSDictionary * from ); + + virtual bool initializeNub( IOPCIDevice * nub, OSDictionary * from ); + + virtual bool publishNub( IOPCIDevice * nub, UInt32 index ); + + virtual bool addBridgeMemoryRange( IOPhysicalAddress start, + IOPhysicalLength length, bool host ); + + virtual bool addBridgeIORange( IOByteCount start, IOByteCount length ); + + virtual bool constructRange( IOPCIAddressSpace * flags, + IOPhysicalAddress phys, IOPhysicalLength len, + OSArray * array ); + + virtual bool matchNubWithPropertyTable( IOService * nub, + OSDictionary * propertyTable, + SInt32 * score ); + + virtual bool compareNubName( const IOService * nub, OSString * name, + OSString ** matched = 0 ) const; + + virtual bool pciMatchNub( IOPCIDevice * nub, + OSDictionary * table, SInt32 * score); + + virtual bool matchKeys( IOPCIDevice * nub, const char * keys, + UInt32 defaultMask, UInt8 regNum ); + + virtual IOReturn getNubResources( IOService * nub ); + + virtual IOReturn getNubAddressing( IOPCIDevice * nub ); + + virtual IOReturn getDTNubAddressing( IOPCIDevice * nub ); + +public: + + virtual bool start( IOService * provider ); + + virtual bool configure( IOService * provider ); + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + virtual IODeviceMemory * ioDeviceMemory( void ) = 0; + + virtual UInt32 configRead32( IOPCIAddressSpace space, UInt8 offset ) = 0; + virtual void configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ) = 0; + virtual UInt16 configRead16( IOPCIAddressSpace space, UInt8 offset ) = 0; + virtual void configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ) = 0; + virtual UInt8 configRead8( IOPCIAddressSpace space, UInt8 offset ) = 0; + virtual void configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ) = 0; + + virtual IOPCIAddressSpace getBridgeSpace( void ) = 0; + + virtual UInt32 findPCICapability( IOPCIAddressSpace space, + UInt8 capabilityID, UInt8 * offset = 0 ); + + virtual IOReturn setDevicePowerState( IOPCIDevice * device, + unsigned long powerState ); + virtual IOReturn saveDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ); + virtual IOReturn restoreDeviceState( IOPCIDevice * device, + IOOptionBits options = 0 ); + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + virtual IOReturn createAGPSpace( IOAGPDevice * master, + IOOptionBits options, + IOPhysicalAddress * address, + IOPhysicalLength * length ); + + virtual IOReturn destroyAGPSpace( IOAGPDevice * master ); + + virtual IORangeAllocator * getAGPRangeAllocator( IOAGPDevice * master ); + + virtual IOOptionBits getAGPStatus( IOAGPDevice * master, + IOOptionBits options = 0 ); + virtual IOReturn resetAGPDevice( IOAGPDevice * master, + IOOptionBits options = 0 ); + + virtual IOReturn getAGPSpace( IOAGPDevice * master, + IOPhysicalAddress * address, + IOPhysicalLength * length ); + + virtual IOReturn commitAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options ); + + virtual IOReturn releaseAGPMemory( IOAGPDevice * master, + IOMemoryDescriptor * memory, + IOByteCount agpOffset, + IOOptionBits options ); + + // Unused Padding + OSMetaClassDeclareReservedUnused(IOPCIBridge, 0); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 1); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 2); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 3); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 4); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 5); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 6); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 7); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 8); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 9); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 10); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 11); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 12); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 13); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 14); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 15); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 16); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 17); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 18); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 19); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 20); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 21); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 22); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 23); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 24); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 25); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 26); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 27); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 28); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 29); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 30); + OSMetaClassDeclareReservedUnused(IOPCIBridge, 31); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define kIOPCIBridgeRegs (32) + +class IOPCI2PCIBridge : public IOPCIBridge +{ + OSDeclareDefaultStructors(IOPCI2PCIBridge) + +private: + + IOPCIDevice * bridgeDevice; + UInt32 bridgeState[kIOPCIBridgeRegs]; + +protected: +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + virtual UInt8 firstBusNum( void ); + virtual UInt8 lastBusNum( void ); + +public: + virtual IOService * probe( IOService * provider, + SInt32 * score ); + + virtual bool configure( IOService * provider ); + + virtual void saveBridgeState( void ); + + virtual void restoreBridgeState( void ); + + virtual bool publishNub( IOPCIDevice * nub, UInt32 index ); + + virtual IODeviceMemory * ioDeviceMemory( void ); + + virtual IOPCIAddressSpace getBridgeSpace( void ); + + virtual UInt32 configRead32( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ); + virtual UInt16 configRead16( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ); + virtual UInt8 configRead8( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ); + + // Unused Padding + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 0); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 1); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 2); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 3); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 4); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 5); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 6); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 7); + OSMetaClassDeclareReservedUnused(IOPCI2PCIBridge, 8); +}; + +#endif /* ! _IOKIT_IOPCIBRIDGE_H */ diff --git a/iokit/IOKit/pci/IOPCIDevice.h b/iokit/IOKit/pci/IOPCIDevice.h new file mode 100644 index 000000000..43c4fbead --- /dev/null +++ b/iokit/IOKit/pci/IOPCIDevice.h @@ -0,0 +1,489 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOPCIDEVICE_H +#define _IOKIT_IOPCIDEVICE_H + +#include + +/* Definitions of PCI Config Registers */ +enum { + kIOPCIConfigVendorID = 0x00, + kIOPCIConfigDeviceID = 0x02, + kIOPCIConfigCommand = 0x04, + kIOPCIConfigStatus = 0x06, + kIOPCIConfigRevisionID = 0x08, + kIOPCIConfigClassCode = 0x09, + kIOPCIConfigCacheLineSize = 0x0C, + kIOPCIConfigLatencyTimer = 0x0D, + kIOPCIConfigHeaderType = 0x0E, + kIOPCIConfigBIST = 0x0F, + kIOPCIConfigBaseAddress0 = 0x10, + kIOPCIConfigBaseAddress1 = 0x14, + kIOPCIConfigBaseAddress2 = 0x18, + kIOPCIConfigBaseAddress3 = 0x1C, + kIOPCIConfigBaseAddress4 = 0x20, + kIOPCIConfigBaseAddress5 = 0x24, + kIOPCIConfigCardBusCISPtr = 0x28, + kIOPCIConfigSubSystemVendorID = 0x2C, + kIOPCIConfigSubSystemID = 0x2E, + kIOPCIConfigExpansionROMBase = 0x30, + kIOPCIConfigCapabilitiesPtr = 0x34, + kIOPCIConfigInterruptLine = 0x3C, + kIOPCIConfigInterruptPin = 0x3D, + kIOPCIConfigMinimumGrant = 0x3E, + kIOPCIConfigMaximumLatency = 0x3F +}; + +/* Definitions of Capabilities PCI Config Register */ +enum { + kIOPCICapabilityIDOffset = 0x00, + kIOPCINextCapabilityOffset = 0x01, + kIOPCIPowerManagementCapability = 0x01, + kIOPCIAGPCapability = 0x02 +}; + +/* Space definitions */ +enum { + kIOPCIConfigSpace = 0, + kIOPCIIOSpace = 1, + kIOPCI32BitMemorySpace = 2, + kIOPCI64BitMemorySpace = 3 +}; + +/* Command register definitions */ +enum { + kIOPCICommandIOSpace = 0x0001, + kIOPCICommandMemorySpace = 0x0002, + kIOPCICommandBusMaster = 0x0004, + kIOPCICommandSpecialCycles = 0x0008, + kIOPCICommandMemWrInvalidate = 0x0010, + kIOPCICommandPaletteSnoop = 0x0020, + kIOPCICommandParityError = 0x0040, + kIOPCICommandAddressStepping = 0x0080, + kIOPCICommandSERR = 0x0100, + kIOPCICommandFastBack2Back = 0x0200 +}; + +/* Status register definitions */ +enum { + kIOPCIStatusCapabilities = 0x0010, + kIOPCIStatusPCI66 = 0x0020, + kIOPCIStatusUDF = 0x0040, + kIOPCIStatusFastBack2Back = 0x0080, + kIOPCIStatusDevSel0 = 0x0000, + kIOPCIStatusDevSel1 = 0x0200, + kIOPCIStatusDevSel2 = 0x0400, + kIOPCIStatusDevSel3 = 0x0600, + kIOPCIStatusTargetAbortCapable = 0x0800, + kIOPCIStatusTargetAbortActive = 0x1000, + kIOPCIStatusMasterAbortActive = 0x2000, + kIOPCIStatusSERRActive = 0x4000, + kIOPCIStatusParityErrActive = 0x8000 +}; + +union IOPCIAddressSpace { + UInt32 bits; + struct { +#if __BIG_ENDIAN__ + unsigned int reloc:1; + unsigned int prefetch:1; + unsigned int t:1; + unsigned int resv:3; + unsigned int space:2; + unsigned int busNum:8; + unsigned int deviceNum:5; + unsigned int functionNum:3; + unsigned int registerNum:8; +#elif __LITTLE_ENDIAN__ + unsigned int registerNum:8; + unsigned int functionNum:3; + unsigned int deviceNum:5; + unsigned int busNum:8; + unsigned int space:2; + unsigned int resv:3; + unsigned int t:1; + unsigned int prefetch:1; + unsigned int reloc:1; +#endif + } s; +}; + +struct IOPCIPhysicalAddress { + IOPCIAddressSpace physHi; + UInt32 physMid; + UInt32 physLo; + UInt32 lengthHi; + UInt32 lengthLo; +}; + +// IOPCIDevice matching property names +#define kIOPCIMatchKey "IOPCIMatch" +#define kIOPCIPrimaryMatchKey "IOPCIPrimaryMatch" +#define kIOPCISecondaryMatchKey "IOPCISecondaryMatch" +#define kIOPCIClassMatchKey "IOPCIClassMatch" + + +/*! @class IOPCIDevice : public IOService + @abstract An IOService class representing a PCI device. + @discussion The discovery of an PCI device by the PCI bus family results in an instance of the IOPCIDevice being created and published. It provides services for looking up and mapping memory mapped hardware, and access to the PCI configuration and I/O spaces. + +

Matching Supported by IOPCIDevice

+ +Two types of matching are available, OpenFirmware name matching and PCI register matching. Currently, only one of these two matching schemes can be used in the same property table. + +

OpenFirmware Name Matching

+ +IOService performs matching based on the IONameMatch property (see IOService). IOPCIDevices created with OpenFirmware device tree entries will name match based on the standard OpenFirmware name matching properties. + +

PCI Register Matching

+ +A PCI device driver can also match on the values of certain config space registers. + +In each case, several matching values can be specified, and an optional mask for the value of the config space register may follow the value, preceded by an '&' character. +
+
+ kIOPCIMatchKey, "IOPCIMatch" +
+The kIOPCIMatchKey property matches the vendor and device ID (0x00) register, or the subsystem register (0x2c). +
+
+ kIOPCIPrimaryMatchKey, "IOPCIPrimaryMatch" +
+The kIOPCIPrimaryMatchKey property matches the vendor and device ID (0x00) register. +
+
+ kIOPCISecondaryMatchKey, "IOPCISecondaryMatch" +
+The kIOPCISecondaryMatchKey property matches the subsystem register (0x2c). +
+
+ kIOPCIClassMatchKey, "IOPCIClassMatch" +
+The kIOPCIClassMatchKey property matches the class code register (0x08). The default mask for this register is 0xffffff00. +
+
+Examples: +
+
+ <key>IOPCIMatch</key>
+ <string>0x00261011</string> +
+Matches a device whose vendor ID is 0x1011, and device ID is 0x0026, including subsystem IDs. +
+
+ <key>IOPCIMatch</key>
+ <string>0x00789004&0x00ffffff 0x78009004&0x0xff00ffff</string> +
+Matches with any device with a vendor ID of 0x9004, and a device ID of 0xzz78 or 0x78zz, where 'z' is don't care. +
+
+ <key>IOPCIClassMatch</key>
+ <string>0x02000000&0xffff0000</string> +
+
+Matches a device whose class code is 0x0200zz, an ethernet device. + +*/ + +class IOPCIDevice : public IOService +{ + OSDeclareDefaultStructors(IOPCIDevice) + + friend class IOPCIBridge; + friend class IOPCI2PCIBridge; + +protected: + IOPCIBridge * parent; + IOMemoryMap * ioMap; + OSObject * slotNameProperty; + +/*! @struct ExpansionData + @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; + +/*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +public: + IOPCIAddressSpace space; + UInt32 * savedConfig; + +public: + /* IOService/IORegistryEntry methods */ + + virtual bool attach( IOService * provider ); + virtual unsigned long maxCapabilityForDomainState( IOPMPowerFlags ); + virtual unsigned long initialPowerStateForDomainState( IOPMPowerFlags ); + virtual unsigned long powerStateForDomainState( IOPMPowerFlags ); + virtual IOReturn setPowerState( unsigned long, IOService * ); + virtual IOReturn addPowerChild ( IOService * theChild ); + virtual void joinPMtree( IOService * driver ); + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const; + virtual bool matchPropertyTable( OSDictionary * table, + SInt32 * score ); + virtual IOService * matchLocation( IOService * client ); + virtual IOReturn getResources( void ); + + /* Config space accessors */ + + virtual UInt32 configRead32( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite32( IOPCIAddressSpace space, + UInt8 offset, UInt32 data ); + virtual UInt16 configRead16( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite16( IOPCIAddressSpace space, + UInt8 offset, UInt16 data ); + virtual UInt8 configRead8( IOPCIAddressSpace space, UInt8 offset ); + virtual void configWrite8( IOPCIAddressSpace space, + UInt8 offset, UInt8 data ); + +/*! @function configRead32 + @abstract Reads a 32-bit value from the PCI device's configuration space. + @discussion This method reads a 32-bit configuration space register on the device and returns its value. + @param offset An 8-bit offset into configuration space, of which bits 0-1 are ignored. + @result An 32-bit value in host byte order (big endian on PPC). */ + + virtual UInt32 configRead32( UInt8 offset ); + +/*! @function configRead16 + @abstract Reads a 16-bit value from the PCI device's configuration space. + @discussion This method reads a 16-bit configuration space register on the device and returns its value. + @param offset An 8-bit offset into configuration space, of which bit 0 is ignored. + @result An 16-bit value in host byte order (big endian on PPC). */ + + virtual UInt16 configRead16( UInt8 offset ); + +/*! @function configRead8 + @abstract Reads a 8-bit value from the PCI device's configuration space. + @discussion This method reads a 8-bit configuration space register on the device and returns its value. + @param offset An 8-bit offset into configuration space. + @result An 8-bit value. */ + + virtual UInt8 configRead8( UInt8 offset ); + +/*! @function configWrite32 + @abstract Writes a 32-bit value to the PCI device's configuration space. + @discussion This method write a 32-bit value to a configuration space register on the device. + @param offset An 8-bit offset into configuration space, of which bits 0-1 are ignored. + @param data An 32-bit value to be written in host byte order (big endian on PPC). */ + + virtual void configWrite32( UInt8 offset, UInt32 data ); + +/*! @function configWrite16 + @abstract Writes a 16-bit value to the PCI device's configuration space. + @discussion This method write a 16-bit value to a configuration space register on the device. + @param offset An 8-bit offset into configuration space, of which bit 0 is ignored. + @param data An 16-bit value to be written in host byte order (big endian on PPC). */ + + virtual void configWrite16( UInt8 offset, UInt16 data ); + +/*! @function configWrite8 + @abstract Writes a 8-bit value to the PCI device's configuration space. + @discussion This method write a 8-bit value to a configuration space register on the device. + @param offset An 8-bit offset into configuration space. + @param data An 8-bit value to be written. */ + + virtual void configWrite8( UInt8 offset, UInt8 data ); + + virtual IOReturn saveDeviceState( IOOptionBits options = 0 ); + virtual IOReturn restoreDeviceState( IOOptionBits options = 0 ); + +/*! @function setConfigBits + @abstract Sets masked bits in a configuration space register. + @discussion This method sets masked bits in a configuration space register on the device by reading and writing the register. The value of the masked bits before the write is returned. + @param offset An 8-bit offset into configuration space, of which bits 0-1 are ignored. + @param mask An 32-bit mask indicating which bits in the value parameter are valid. + @param data An 32-bit value to be written in host byte order (big endian on PPC). + @result The value of the register masked with the mask before the write. */ + + virtual UInt32 setConfigBits( UInt8 offset, UInt32 mask, UInt32 value ); + +/*! @function setMemoryEnable + @abstract Sets the device's memory space response. + @discussion This method sets the memory space response bit in the device's command config space register to the passed value, and returns the previous state of the enable. + @param enable True or false to enable or disable the memory space response. + @result True if the memory space response was previously enabled, false otherwise. */ + + virtual bool setMemoryEnable( bool enable ); + +/*! @function setIOEnable + @abstract Sets the device's I/O space response. + @discussion This method sets the I/O space response bit in the device's command config space register to the passed value, and returns the previous state of the enable. The exclusive option allows only one exclusive device on the bus to be enabled concurrently, this should be only for temporary access. + @param enable True or false to enable or disable the I/O space response. + @param exclusive If true, only one setIOEnable with the exclusive flag set will be allowed at a time on the bus, this should be only for temporary access. + @result True if the I/O space response was previously enabled, false otherwise. */ + + virtual bool setIOEnable( bool enable, bool exclusive = false ); + +/*! @function setBusMasterEnable + @abstract Sets the device's bus master enable. + @discussion This method sets the bus master enable bit in the device's command config space register to the passed value, and returns the previous state of the enable. + @param enable True or false to enable or disable bus mastering. + @result True if bus mastering was previously enabled, false otherwise. */ + + virtual bool setBusMasterEnable( bool enable ); + +/*! @function findPCICapability + @abstract Search configuration space for a PCI capability register. + @discussion This method searchs the device's config space for a PCI capability register matching the passed capability ID, if the device supports PCI capabilities. + @param capabilityID An 8-bit PCI capability ID. + @param offset An optional pointer to return the offset into config space where the capability was found. + @result The 32-bit value of the capability register if one was found, zero otherwise. */ + + virtual UInt32 findPCICapability( UInt8 capabilityID, UInt8 * offset = 0 ); + +/*! @function getBusNumber + @abstract Accessor to return the PCI device's assigned bus number. + @discussion This method is an accessor to return the PCI device's assigned bus number. + @result The 8-bit value of device's PCI bus number. */ + + virtual UInt8 getBusNumber( void ); + +/*! @function getDeviceNumber + @abstract Accessor to return the PCI device's device number. + @discussion This method is an accessor to return the PCI device's device number. + @result The 5-bit value of device's device number. */ + + virtual UInt8 getDeviceNumber( void ); + +/*! @function getFunctionNumber + @abstract Accessor to return the PCI device's function number. + @discussion This method is an accessor to return the PCI device's function number. + @result The 3-bit value of device's function number. */ + + virtual UInt8 getFunctionNumber( void ); + + /* Device memory accessors */ + +/*! @function getDeviceMemoryWithRegister + @abstract Returns an instance of IODeviceMemory representing one of the device's memory mapped ranges. + @discussion This method will return a pointer to an instance of IODeviceMemory for the physical memory range that was assigned to the configuration space base address register passed in. It is analogous to IOService::getDeviceMemoryWithIndex. + @param reg The 8-bit configuration space register that is the base address register for the desired range. + @result A pointer to an instance of IODeviceMemory, or zero no such range was found. The IODeviceMemory is retained by the provider, so is valid while attached, or while any mappings to it exist. It should not be released by the caller. */ + + virtual IODeviceMemory * getDeviceMemoryWithRegister( UInt8 reg ); + +/*! @function mapDeviceMemoryWithRegister + @abstract Maps a physical range of the device. + @discussion This method will create a mapping for the IODeviceMemory for the physical memory range that was assigned to the configuration space base address register passed in, with IODeviceMemory::map(options). The mapping is represented by the returned instance of IOMemoryMap, which should not be released until the mapping is no longer required. This method is analogous to IOService::mapDeviceMemoryWithIndex. + @param index An index into the array of ranges assigned to the device. + @result An instance of IOMemoryMap, or zero if the index is beyond the count available. The mapping should be released only when access to it is no longer required. */ + + virtual IOMemoryMap * mapDeviceMemoryWithRegister( UInt8 reg, + IOOptionBits options = 0 ); + +/*! @function ioDeviceMemory + @abstract Accessor to the I/O space aperture for the bus. + @discussion This method will return a reference to the IODeviceMemory for the I/O aperture of the bus the device is on. + @result A pointer to an IODeviceMemory object for the I/O aperture. The IODeviceMemory is retained by the provider, so is valid while attached, or while any mappings to it exist. It should not be released by the caller. */ + + virtual IODeviceMemory * ioDeviceMemory( void ); + + /* I/O space accessors */ + +/*! @function ioWrite32 + @abstract Writes a 32-bit value to an I/O space aperture. + @discussion This method will write a 32-bit value to a 4 byte aligned offset in an I/O space aperture. If a map object is passed in, the value is written relative to it, otherwise to the value is written relative to the I/O space aperture for the bus. This function encapsulates the differences between architectures in generating I/O space operations. An eieio instruction is included on PPC. + @param offset An offset into a bus or device's I/O space aperture. + @param value The value to be written in host byte order (big endian on PPC). + @param map If the offset is relative to the beginning of a device's aperture, an IOMemoryMap object for that object should be passed in. Otherwise, passing zero will write the value relative to the beginning of the bus' I/O space. */ + + virtual void ioWrite32( UInt16 offset, UInt32 value, + IOMemoryMap * map = 0 ); + +/*! @function ioWrite16 + @abstract Writes a 16-bit value to an I/O space aperture. + @discussion This method will write a 16-bit value to a 2 byte aligned offset in an I/O space aperture. If a map object is passed in, the value is written relative to it, otherwise to the value is written relative to the I/O space aperture for the bus. This function encapsulates the differences between architectures in generating I/O space operations. An eieio instruction is included on PPC. + @param offset An offset into a bus or device's I/O space aperture. + @param value The value to be written in host byte order (big endian on PPC). + @param map If the offset is relative to the beginning of a device's aperture, an IOMemoryMap object for that object should be passed in. Otherwise, passing zero will write the value relative to the beginning of the bus' I/O space. */ + + virtual void ioWrite16( UInt16 offset, UInt16 value, + IOMemoryMap * map = 0 ); + +/*! @function ioWrite8 + @abstract Writes a 8-bit value to an I/O space aperture. + @discussion This method will write a 8-bit value to an offset in an I/O space aperture. If a map object is passed in, the value is written relative to it, otherwise to the value is written relative to the I/O space aperture for the bus. This function encapsulates the differences between architectures in generating I/O space operations. An eieio instruction is included on PPC. + @param offset An offset into a bus or device's I/O space aperture. + @param value The value to be written in host byte order (big endian on PPC). + @param map If the offset is relative to the beginning of a device's aperture, an IOMemoryMap object for that object should be passed in. Otherwise, passing zero will write the value relative to the beginning of the bus' I/O space. */ + + virtual void ioWrite8( UInt16 offset, UInt8 value, + IOMemoryMap * map = 0 ); + +/*! @function ioRead32 + @abstract Reads a 32-bit value from an I/O space aperture. + @discussion This method will read a 32-bit value from a 4 byte aligned offset in an I/O space aperture. If a map object is passed in, the value is read relative to it, otherwise to the value is read relative to the I/O space aperture for the bus. This function encapsulates the differences between architectures in generating I/O space operations. An eieio instruction is included on PPC. + @param offset An offset into a bus or device's I/O space aperture. + @param map If the offset is relative to the beginning of a device's aperture, an IOMemoryMap object for that object should be passed in. Otherwise, passing zero will write the value relative to the beginning of the bus' I/O space. + @result The value read in host byte order (big endian on PPC). */ + + virtual UInt32 ioRead32( UInt16 offset, IOMemoryMap * map = 0 ); + +/*! @function ioRead16 + @abstract Reads a 16-bit value from an I/O space aperture. + @discussion This method will read a 16-bit value from a 2 byte aligned offset in an I/O space aperture. If a map object is passed in, the value is read relative to it, otherwise to the value is read relative to the I/O space aperture for the bus. This function encapsulates the differences between architectures in generating I/O space operations. An eieio instruction is included on PPC. + @param offset An offset into a bus or device's I/O space aperture. + @param map If the offset is relative to the beginning of a device's aperture, an IOMemoryMap object for that object should be passed in. Otherwise, passing zero will write the value relative to the beginning of the bus' I/O space. + @result The value read in host byte order (big endian on PPC). */ + + virtual UInt16 ioRead16( UInt16 offset, IOMemoryMap * map = 0 ); + +/*! @function ioRead8 + @abstract Reads a 8-bit value from an I/O space aperture. + @discussion This method will read a 8-bit value from an offset in an I/O space aperture. If a map object is passed in, the value is read relative to it, otherwise to the value is read relative to the I/O space aperture for the bus. This function encapsulates the differences between architectures in generating I/O space operations. An eieio instruction is included on PPC. + @param offset An offset into a bus or device's I/O space aperture. + @param map If the offset is relative to the beginning of a device's aperture, an IOMemoryMap object for that object should be passed in. Otherwise, passing zero will write the value relative to the beginning of the bus' I/O space. + @result The value read. */ + + virtual UInt8 ioRead8( UInt16 offset, IOMemoryMap * map = 0 ); + + // Unused Padding + OSMetaClassDeclareReservedUnused(IOPCIDevice, 0); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 1); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 2); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 3); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 4); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 5); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 6); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 7); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 8); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 9); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 10); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 11); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 12); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 13); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 14); + OSMetaClassDeclareReservedUnused(IOPCIDevice, 15); +}; + +#endif /* ! _IOKIT_IOPCIDEVICE_H */ + diff --git a/iokit/IOKit/pci/Makefile b/iokit/IOKit/pci/Makefile new file mode 100644 index 000000000..ba1f3c6e2 --- /dev/null +++ b/iokit/IOKit/pci/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = pci +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/platform/AppleMacIO.h b/iokit/IOKit/platform/AppleMacIO.h new file mode 100644 index 000000000..553246eab --- /dev/null +++ b/iokit/IOKit/platform/AppleMacIO.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_APPLEMACIO_H +#define _IOKIT_APPLEMACIO_H + +#include + +#include + +class AppleMacIO : public IOService +{ + OSDeclareAbstractStructors(AppleMacIO); + + IOService * fNub; + IOMemoryMap * fMemory; + + struct ExpansionData { }; + ExpansionData *fReserved; + +protected: + virtual bool selfTest( void ); + +public: + virtual bool start( IOService * provider ); + + virtual IOService * createNub( IORegistryEntry * from ); + + virtual void processNub( IOService * nub ); + + virtual void publishBelow( IORegistryEntry * root ); + + virtual const char * deleteList( void ); + virtual const char * excludeList( void ); + + virtual bool compareNubName( const IOService * nub, OSString * name, + OSString ** matched = 0 ) const; + + virtual IOReturn getNubResources( IOService * nub ); + + OSMetaClassDeclareReservedUnused(AppleMacIO, 0); + OSMetaClassDeclareReservedUnused(AppleMacIO, 1); + OSMetaClassDeclareReservedUnused(AppleMacIO, 2); + OSMetaClassDeclareReservedUnused(AppleMacIO, 3); +}; + +#endif /* ! _IOKIT_APPLEMACIO_H */ diff --git a/iokit/IOKit/platform/AppleMacIODevice.h b/iokit/IOKit/platform/AppleMacIODevice.h new file mode 100644 index 000000000..51f9abd39 --- /dev/null +++ b/iokit/IOKit/platform/AppleMacIODevice.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_APPLEMACIODEVICE_H +#define _IOKIT_APPLEMACIODEVICE_H + +#include + +class AppleMacIODevice : public IOService +{ + OSDeclareDefaultStructors(AppleMacIODevice); + +private: + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const; + virtual IOService *matchLocation(IOService *client); + virtual IOReturn getResources( void ); + + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 0); + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 1); + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 2); + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 3); +}; + +#endif /* ! _IOKIT_APPLEMACIODEVICE_H */ diff --git a/iokit/IOKit/platform/AppleNMI.h b/iokit/IOKit/platform/AppleNMI.h new file mode 100644 index 000000000..713259f6a --- /dev/null +++ b/iokit/IOKit/platform/AppleNMI.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-9 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +#ifndef _IOKIT_APPLENMI_H +#define _IOKIT_APPLENMI_H + +#include +#include + +// NMI Interrupt Constants +enum +{ + kExtInt9_NMIIntSource = 0x800506E0, + kNMIIntLevelMask = 0x00004000, + kNMIIntMask = 0x00000080 +}; + + +class AppleNMI : public IOService +{ + OSDeclareDefaultStructors(AppleNMI); + +private: + bool enable_debugger; + bool mask_NMI; + + struct ExpansionData { }; + ExpansionData * reserved; // Reserved for future use + +public: + IOService *rootDomain; + virtual bool start(IOService *provider); + virtual IOReturn initNMI(IOInterruptController *parentController, OSData *parentSource); + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source); + + // Power handling methods: + virtual IOReturn powerStateWillChangeTo(IOPMPowerFlags, unsigned long, IOService*); + + OSMetaClassDeclareReservedUnused(AppleNMI, 0); + OSMetaClassDeclareReservedUnused(AppleNMI, 1); + OSMetaClassDeclareReservedUnused(AppleNMI, 2); + OSMetaClassDeclareReservedUnused(AppleNMI, 3); +}; + +#endif /* ! _IOKIT_APPLENMI_H */ diff --git a/iokit/IOKit/platform/ApplePlatformExpert.h b/iokit/IOKit/platform/ApplePlatformExpert.h new file mode 100644 index 000000000..a684a5b73 --- /dev/null +++ b/iokit/IOKit/platform/ApplePlatformExpert.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_APPLEPLATFORM_H +#define _IOKIT_APPLEPLATFORM_H + +#include + +enum { + kBootROMTypeOldWorld = 0, + kBootROMTypeNewWorld +}; + +enum { + kChipSetTypePowerSurge = 0, + kChipSetTypePowerStar, + kChipSetTypeGossamer, + kChipSetTypePowerExpress, + kChipSetTypeCore99, + kChipSetTypeCore2001 +}; + +enum { + kMachineTypeUnknown = 0 +}; + +extern const OSSymbol *gGetDefaultBusSpeedsKey; + +class ApplePlatformExpert : public IODTPlatformExpert +{ + OSDeclareAbstractStructors(ApplePlatformExpert); + +private: + SInt32 _timeToGMT; + + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual bool start( IOService * provider ); + virtual bool configure( IOService * provider ); + virtual const char * deleteList( void ); + virtual const char * excludeList( void ); + + virtual void registerNVRAMController( IONVRAMController * nvram ); + + virtual long getGMTTimeOfDay(void); + virtual void setGMTTimeOfDay(long secs); + + virtual bool getMachineName(char *name, int maxLength); + + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 0); + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 1); + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 2); + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 3); +}; + + +#endif /* ! _IOKIT_APPLEPLATFORM_H */ diff --git a/iokit/IOKit/platform/Makefile b/iokit/IOKit/platform/Makefile new file mode 100644 index 000000000..5399c0501 --- /dev/null +++ b/iokit/IOKit/platform/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = platform +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/power/IOPwrController.h b/iokit/IOKit/power/IOPwrController.h new file mode 100644 index 000000000..82eb90e1e --- /dev/null +++ b/iokit/IOKit/power/IOPwrController.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 24 Nov 1998 suurballe Created. + */ + +#include + +class IOPwrController: public IOService +{ +OSDeclareAbstractStructors(IOPwrController) + +public: + +}; + diff --git a/iokit/IOKit/power/Makefile b/iokit/IOKit/power/Makefile new file mode 100644 index 000000000..f7ee70086 --- /dev/null +++ b/iokit/IOKit/power/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = power +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/ppc/IODBDMA.h b/iokit/IOKit/ppc/IODBDMA.h new file mode 100644 index 000000000..8bcc66c58 --- /dev/null +++ b/iokit/IOKit/ppc/IODBDMA.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * Simon Douglas 10 Nov 97 + * - first checked in, mostly from MacOS DBDMA.i, machdep/ppc/dbdma.h + * but use byte reverse ops. + */ + +#ifndef _IODBDMA_H_ +#define _IODBDMA_H_ + +#include +#include +#include + + +/* DBDMA definitions */ + +struct IODBDMAChannelRegisters { + volatile unsigned long channelControl; + volatile unsigned long channelStatus; + volatile unsigned long commandPtrHi; /* implementation optional*/ + volatile unsigned long commandPtrLo; + volatile unsigned long interruptSelect; /* implementation optional*/ + volatile unsigned long branchSelect; /* implementation optional*/ + volatile unsigned long waitSelect; /* implementation optional*/ + volatile unsigned long transferModes; /* implementation optional*/ + volatile unsigned long data2PtrHi; /* implementation optional*/ + volatile unsigned long data2PtrLo; /* implementation optional*/ + + volatile unsigned long reserved1; + volatile unsigned long addressHi; /* implementation optional*/ + volatile unsigned long reserved2[4]; + volatile unsigned long unimplemented[16]; + +/* This structure must remain fully padded to 256 bytes.*/ + volatile unsigned long undefined[32]; +}; +typedef struct IODBDMAChannelRegisters IODBDMAChannelRegisters; + +/* These constants define the DB-DMA channel control words and status flags.*/ + +enum { + kdbdmaRun = 0x00008000, + kdbdmaPause = 0x00004000, + kdbdmaFlush = 0x00002000, + kdbdmaWake = 0x00001000, + kdbdmaDead = 0x00000800, + kdbdmaActive = 0x00000400, + kdbdmaBt = 0x00000100, + kdbdmaS7 = 0x00000080, + kdbdmaS6 = 0x00000040, + kdbdmaS5 = 0x00000020, + kdbdmaS4 = 0x00000010, + kdbdmaS3 = 0x00000008, + kdbdmaS2 = 0x00000004, + kdbdmaS1 = 0x00000002, + kdbdmaS0 = 0x00000001 +}; + + +#define IOSetDBDMAChannelControlBits(mask) ( ((mask) | (mask) << 16) ) +#define IOClearDBDMAChannelControlBits(mask) ( (mask) << 16) + + +/* This structure defines the DB-DMA channel command descriptor.*/ + +/* + *** WARNING: Endian-ness issues must be considered when performing load/store! *** +*/ + +struct IODBDMADescriptor { + unsigned long operation; /* cmd || key || i || b || w || reqCount*/ + unsigned long address; + volatile unsigned long cmdDep; + volatile unsigned long result; /* xferStatus || resCount*/ +}; +typedef struct IODBDMADescriptor IODBDMADescriptor; + +/* These constants define the DB-DMA channel command operations and modifiers.*/ + + +enum { +/* Command.cmd operations*/ + kdbdmaOutputMore = 0, + kdbdmaOutputLast = 1, + kdbdmaInputMore = 2, + kdbdmaInputLast = 3, + kdbdmaStoreQuad = 4, + kdbdmaLoadQuad = 5, + kdbdmaNop = 6, + kdbdmaStop = 7, +}; + + +enum { +/* Command.key modifiers (choose one for INPUT, OUTPUT, LOAD, and STORE)*/ + kdbdmaKeyStream0 = 0, /* default modifier*/ + kdbdmaKeyStream1 = 1, + kdbdmaKeyStream2 = 2, + kdbdmaKeyStream3 = 3, + kdbdmaKeyRegs = 5, + kdbdmaKeySystem = 6, + kdbdmaKeyDevice = 7, + + kdbdmaIntNever = 0, /* default modifier*/ + kdbdmaIntIfTrue = 1, + kdbdmaIntIfFalse = 2, + kdbdmaIntAlways = 3, + + kdbdmaBranchNever = 0, /* default modifier*/ + kdbdmaBranchIfTrue = 1, + kdbdmaBranchIfFalse = 2, + kdbdmaBranchAlways = 3, + + kdbdmaWaitNever = 0, /* default modifier*/ + kdbdmaWaitIfTrue = 1, + kdbdmaWaitIfFalse = 2, + kdbdmaWaitAlways = 3, + + kdbdmaCommandMask = (long)0xFFFF0000, + kdbdmaReqCountMask = 0x0000FFFF +}; + + +/* These constants define the DB-DMA channel command results.*/ + +enum { + /* result masks*/ + kdbdmaStatusRun = kdbdmaRun << 16, + kdbdmaStatusPause = kdbdmaPause << 16, + kdbdmaStatusFlush = kdbdmaFlush << 16, + kdbdmaStatusWake = kdbdmaWake << 16, + kdbdmaStatusDead = kdbdmaDead << 16, + kdbdmaStatusActive = kdbdmaActive << 16, + kdbdmaStatusBt = kdbdmaBt << 16, + kdbdmaStatusS7 = kdbdmaS7 << 16, + kdbdmaStatusS6 = kdbdmaS6 << 16, + kdbdmaStatusS5 = kdbdmaS5 << 16, + kdbdmaStatusS4 = kdbdmaS4 << 16, + kdbdmaStatusS3 = kdbdmaS3 << 16, + kdbdmaStatusS2 = kdbdmaS2 << 16, + kdbdmaStatusS1 = kdbdmaS1 << 16, + kdbdmaStatusS0 = kdbdmaS0 << 16, + kdbdmaResCountMask = 0x0000FFFF, + kdbdmaXferStatusMask = 0xFFFF0000 +}; + + +/* These macros are are IODBDMAChannelRegisters accessor functions. */ + +#define IOSetDBDMAChannelRegister(registerSetPtr,field,value) \ +OSWriteSwapInt32(registerSetPtr,offsetof(IODBDMAChannelRegisters,field),value) + +#define IOGetDBDMAChannelRegister(registerSetPtr, field) \ +OSReadSwapInt32(registerSetPtr,offsetof(IODBDMAChannelRegisters, field)) + + +/* void IOSetDBDMAChannelControl (IODBDMAChannelRegisters *registerSetPtr, unsigned long ctlValue); */ + +#define IOSetDBDMAChannelControl(registerSetPtr,ctlValue) \ +do { \ + eieio(); \ + IOSetDBDMAChannelRegister(registerSetPtr,channelControl,ctlValue); \ + eieio(); \ +} while(0) + +/* unsigned long IOGetDBDMAChannelStatus (IODBDMAChannelRegisters *registerSetPtr); */ + +#define IOGetDBDMAChannelStatus(registerSetPtr) \ + IOGetDBDMAChannelRegister(registerSetPtr,channelStatus) + +/* unsigned long IOGetDBDMACommandPtr (IODBDMAChannelRegisters *registerSetPtr); */ + +#define IOGetDBDMACommandPtr(registerSetPtr) \ + IOGetDBDMAChannelRegister(registerSetPtr,commandPtrLo) + +/* void IOSetDBDMACommandPtr (IODBDMAChannelRegisters *registerSetPtr, unsigned long cclPtr); */ + +#define IOSetDBDMACommandPtr(registerSetPtr,cclPtr) \ +do { \ + IOSetDBDMAChannelRegister(registerSetPtr,commandPtrHi,0); \ + eieio(); \ + IOSetDBDMAChannelRegister(registerSetPtr,commandPtrLo,cclPtr); \ + eieio(); \ +} while(0) + + +/* unsigned long IOGetDBDMAInterruptSelect (IODBDMAChannelRegisters *registerSetPtr); */ + +#define IOGetDBDMAInterruptSelect(registerSetPtr) \ + IOGetDBDMAChannelRegister(registerSetPtr,interruptSelect) + +/* void IOSetDBDMAInterruptSelect (IODBDMAChannelRegisters *registerSetPtr, unsigned long intSelValue); */ + +#define IOSetDBDMAInterruptSelect(registerSetPtr,intSelValue) \ +do { \ + IOSetDBDMAChannelRegister(registerSetPtr,interruptSelect,intSelValue); \ + eieio(); \ +} while(0) + +/* unsigned long IOGetDBDMABranchSelect (IODBDMAChannelRegisters *registerSetPtr); */ + +#define IOGetDBDMABranchSelect(registerSetPtr) \ + IOGetDBDMAChannelRegister(registerSetPtr,branchSelect) + +/* void IOSetDBDMABranchSelect (IODBDMAChannelRegisters *registerSetPtr, unsigned long braSelValue); */ + +#define IOSetDBDMABranchSelect(registerSetPtr,braSelValue) \ +do { \ + IOSetDBDMAChannelRegister(registerSetPtr,branchSelect,braSelValue); \ + eieio(); \ +} while(0) + +/* unsigned long IOGetDBDMAWaitSelect (IODBDMAChannelRegisters *registerSetPtr); */ + +#define IOGetDBDMAWaitSelect(registerSetPtr) \ + IOGetDBDMAChannelRegister(registerSetPtr,waitSelect) + +/* void IOSetDBDMAWaitSelect (IODBDMAChannelRegisters *registerSetPtr, unsigned long waitSelValue); */ + +#define IOSetDBDMAWaitSelect(registerSetPtr,waitSelValue) \ +do { \ + IOSetDBDMAChannelRegister(registerSetPtr,waitSelect,waitSelValue); \ + eieio(); \ +} while(0) + + +/* These macros are IODBDMADescriptor accessor functions. */ + +#define IOSetDBDMADescriptor(descPtr,field,value) \ +OSWriteSwapInt32( descPtr, offsetof( IODBDMADescriptor, field), value) + +#define IOGetDBDMADescriptor(descPtr,field) \ +OSReadSwapInt32( descPtr, offsetof( IODBDMADescriptor, field)) + +#define IOMakeDBDMAOperation(cmd,key,interrupt,branch,wait,count) \ + ( ((cmd) << 28) | ((key) << 24) | ((interrupt) << 20) \ + | ((branch) << 18) | ( (wait) << 16) | (count) ) + +/* void IOMakeDBDMADescriptor (IODBDMADescriptor *descPtr, + unsigned long cmd, + unsigned long key, + unsigned long interrupt, + unsigned long branch, + unsigned long wait, + unsigned long count, + unsigned long addr); */ + +#define IOMakeDBDMADescriptor(descPtr,cmd,key,interrupt,branch,wait,count,addr)\ +do { \ + IOSetDBDMADescriptor(descPtr, address, addr); \ + IOSetDBDMADescriptor(descPtr, cmdDep, 0); \ + IOSetDBDMADescriptor(descPtr, result, 0); \ + eieio(); \ + IOSetDBDMADescriptor(descPtr, operation, \ + IOMakeDBDMAOperation(cmd,key,interrupt,branch,wait,count)); \ + eieio(); \ +} while(0) + +/* void IOMakeDBDMADescriptorDep (IODBDMADescriptor *descPtr, + unsigned long cmd, + unsigned long key, + unsigned long interrupt, + unsigned long branch, + unsigned long wait, + unsigned long count, + unsigned long addr, + unsigned long dep); */ + +#define IOMakeDBDMADescriptorDep(descPtr,cmd,key,interrupt,branch,wait,count,addr,dep) \ +do { \ + IOSetDBDMADescriptor(descPtr, address, addr); \ + IOSetDBDMADescriptor(descPtr, cmdDep, dep); \ + IOSetDBDMADescriptor(descPtr, result, 0); \ + eieio(); \ + IOSetDBDMADescriptor(descPtr, operation, \ + IOMakeDBDMAOperation(cmd, key, interrupt, branch, wait, count)); \ + eieio(); \ +} while(0) + +/* Field accessors - NOTE: unsynchronized */ + +/* unsigned long IOGetDBDMAOperation (IODBDMADescriptor *descPtr) */ + +#define IOGetCCOperation(descPtr) \ + IOGetDBDMADescriptor(descPtr,operation) + +/* void IOSetCCOperation (IODBDMADescriptor *descPtr, unsigned long operationValue) */ + +#define IOSetCCOperation(descPtr,operationValue) \ + IOSetDBDMADescriptor(descPtr,operation,operationValue) + +/* unsigned long IOGetCCAddress (IODBDMADescriptor *descPtr) */ + +#define IOGetCCAddress(descPtr) \ + IOGetDBDMADescriptor(descPtr,address) + +/* void IOSetCCAddress (IODBDMADescriptor *descPtr, unsigned long addressValue) */ + +#define IOSetCCAddress(descPtr,addressValue) \ + IOSetDBDMADescriptor(descPtr,address, addressValue) + +/* unsigned long IOGetCCCmdDep (IODBDMADescriptor *descPtr) */ + +#define IOGetCCCmdDep(descPtr) \ + IOGetDBDMADescriptor(descPtr,cmdDep) + +/* void IOSetCCCmdDep (IODBDMADescriptor *descPtr, unsigned long cmdDepValue) */ + +#define IOSetCCCmdDep(descPtr,cmdDepValue) \ + IOSetDBDMADescriptor(descPtr,cmdDep,cmdDepValue) + +/* unsigned long IOGetCCResult (IODBDMADescriptor *descPtr) */ + +#define IOGetCCResult(descPtr) \ + IOGetDBDMADescriptor(descPtr,result) + +/* void IOSetCCResult (IODBDMADescriptor *descPtr, unsigned long resultValue) */ + +#define IOSetCCResult(descPtr,resultValue) \ + IOSetDBDMADescriptor(descPtr,result,resultValue) + + +/* DBDMA routines */ + +extern void IODBDMAStart( volatile IODBDMAChannelRegisters *registerSetPtr, volatile IODBDMADescriptor *physicalDescPtr); +extern void IODBDMAStop( volatile IODBDMAChannelRegisters *registerSetPtr); +extern void IODBDMAFlush( volatile IODBDMAChannelRegisters *registerSetPtr); +extern void IODBDMAReset( volatile IODBDMAChannelRegisters *registerSetPtr); +extern void IODBDMAContinue( volatile IODBDMAChannelRegisters *registerSetPtr); +extern void IODBDMAPause( volatile IODBDMAChannelRegisters *registerSetPtr); + +extern IOReturn IOAllocatePhysicallyContiguousMemory( unsigned int size, unsigned int options, + IOVirtualAddress * logical, IOPhysicalAddress * physical ); +extern IOReturn IOFreePhysicallyContiguousMemory( IOVirtualAddress * logical, unsigned int size); + +#endif /* !defined(_IODBDMA_H_) */ diff --git a/iokit/IOKit/ppc/IOSharedLockImp.h b/iokit/IOKit/ppc/IOSharedLockImp.h new file mode 100644 index 000000000..f320f9d33 --- /dev/null +++ b/iokit/IOKit/ppc/IOSharedLockImp.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. + * + * EventShmemLock.h - Shared memory area locks for use between the + * WindowServer and the Event Driver. + * + * HISTORY + * 30 Nov 1992 Ben Fathi (benf@next.com) + * Ported to m98k. + * + * 29 April 1992 Mike Paquette at NeXT + * Created. + * + * Multiprocessor locks used within the shared memory area between the + * kernel and event system. These must work in both user and kernel mode. + * The locks are defined in an include file so they get exported to the local + * include file area. + */ + + +#ifndef _IOKIT_IOSHAREDLOCKIMP_H +#define _IOKIT_IOSHAREDLOCKIMP_H + +#include + +// 'Till we're building in kernel +.macro DISABLE_PREEMPTION +#ifdef KERNEL +#endif +.endmacro +.macro ENABLE_PREEMPTION +#ifdef KERNEL +#endif +.endmacro + +/* + * void + * ev_lock(p) + * register int *p; + * + * Lock the lock pointed to by p. Spin (possibly forever) until + * the lock is available. Test and test and set logic used. + */ + TEXT + +#ifndef KERNEL +LEAF(_ev_lock) +LEAF(_IOSpinLock) + li a6,1 // lock value + lwarx a7,0,a0 // CEMV10 +9: + sync + lwarx a7,0,a0 // read the lock + cmpwi cr0,a7,0 // is it busy? + bne- 9b // yes, spin + sync + stwcx. a6,0,a0 // try to get the lock + bne- 9b // failed, try again + isync + blr // got it, return +END(_ev_lock) +#endif + +/* + * void + * spin_unlock(p) + * int *p; + * + * Unlock the lock pointed to by p. + */ + +LEAF(_ev_unlock) +LEAF(_IOSpinUnlock) + sync + li a7,0 + stw a7,0(a0) + ENABLE_PREEMPTION() + blr +END(_ev_unlock) + + +/* + * ev_try_lock(p) + * int *p; + * + * Try to lock p. Return TRUE if successful in obtaining lock. + */ + +LEAF(_ev_try_lock) +LEAF(_IOTrySpinLock) + li a6,1 // lock value + DISABLE_PREEMPTION() + lwarx a7,0,a0 // CEMV10 +8: + sync + lwarx a7,0,a0 // read the lock + cmpwi cr0,a7,0 // is it busy? + bne- 9f // yes, give up + sync + stwcx. a6,0,a0 // try to get the lock + bne- 8b // failed, try again + li a0,1 // return TRUE + isync + blr +9: + ENABLE_PREEMPTION() + li a0,0 // return FALSE + blr +END(_ev_try_lock) + +#endif /* ! _IOKIT_IOSHAREDLOCKIMP_H */ diff --git a/iokit/IOKit/ppc/Makefile b/iokit/IOKit/ppc/Makefile new file mode 100644 index 000000000..23ca4f7bf --- /dev/null +++ b/iokit/IOKit/ppc/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MD_DIR = ppc +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MD_LIST = IOSharedLockImp.h +INSTALL_MD_LCL_LIST = "" +INSTALL_MD_DIR = $(MD_DIR) + +EXPORT_MD_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MD_DIR = IOKit/$(MD_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/ps2/ApplePS2Device.h b/iokit/IOKit/ps2/ApplePS2Device.h new file mode 100644 index 000000000..88540b3eb --- /dev/null +++ b/iokit/IOKit/ps2/ApplePS2Device.h @@ -0,0 +1,300 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _APPLEPS2DEVICE_H +#define _APPLEPS2DEVICE_H + +#include + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Definitions +// +// Data Port (0x60) Commands. These commands are all transmitted directly to +// the physical keyboard and/or mouse, so expect an acknowledge for each byte +// that you send through this port. +// + +#define kDP_SetMouseScaling1To1 0xE6 // (mouse) +#define kDP_SetMouseScaling2To1 0xE7 // (mouse) +#define kDP_SetMouseResolution 0xE8 // (mouse) +#define kDP_GetMouseInformation 0xE9 // (mouse) +#define kDP_SetMouseStreamMode 0xEA // (mouse) +#define kDP_SetKeyboardLEDs 0xED // (keyboard) +#define kDP_TestKeyboardEcho 0xEE // (keyboard) +#define kDP_GetSetKeyboardASCs 0xF0 // (keyboard) +#define kDP_GetId 0xF2 // (keyboard+mouse) +#define kDP_SetKeyboardTypematic 0xF3 // (keyboard) +#define kDP_SetMouseSampleRate 0xF3 // (mouse) +#define kDP_Enable 0xF4 // (keyboard+mouse) +#define kDP_SetDefaultsAndDisable 0xF5 // (keyboard+mouse) +#define kDP_SetDefaults 0xF6 // (keyboard+mouse) +#define kDP_SetAllTypematic 0xF7 // (keyboard) +#define kDP_SetAllMakeRelease 0xF8 // (keyboard) +#define kDP_SetAllMakeOnly 0xF9 // (keyboard) +#define kDP_SetAllTypematicMakeRelease 0xFA // (keyboard) +#define kDP_SetKeyMakeRelease 0xFB // (keyboard) +#define kDP_SetKeyMakeOnly 0xFC // (keyboard) +#define kDP_Reset 0xFF // (keyboard+mouse) + +// +// Command Port (0x64) Commands. These commands all access registers local +// to the motherboard, ie. nothing is transmitted, thus these commands and +// any associated data passed thru the Data Port do not return acknowledges. +// + +#define kCP_GetCommandByte 0x20 // (keyboard+mouse) +#define kCP_ReadControllerRAMBase 0x21 // +#define kCP_SetCommandByte 0x60 // (keyboard+mouse) +#define kCP_WriteControllerRAMBase 0x61 // +#define kCP_TestPassword 0xA4 // +#define kCP_GetPassword 0xA5 // +#define kCP_VerifyPassword 0xA6 // +#define kCP_DisableMouseClock 0xA7 // (mouse) +#define kCP_EnableMouseClock 0xA8 // (mouse) +#define kCP_TestMousePort 0xA9 // +#define kCP_TestController 0xAA // +#define kCP_TestKeyboardPort 0xAB // +#define kCP_GetControllerDiagnostic 0xAC // +#define kCP_DisableKeyboardClock 0xAD // (keyboard) +#define kCP_EnableKeyboardClock 0xAE // (keyboard) +#define kCP_ReadInputPort 0xC0 // +#define kCP_PollInputPortLow 0xC1 // +#define kCP_PollInputPortHigh 0xC2 // +#define kCP_ReadOutputPort 0xD0 // +#define kCP_WriteOutputPort 0xD1 // +#define kCP_WriteKeyboardOutputBuffer 0xD2 // (keyboard) +#define kCP_WriteMouseOutputBuffer 0xD3 // (mouse) +#define kCP_TransmitToMouse 0xD4 // (mouse) +#define kCP_ReadTestInputs 0xE0 // +#define kCP_PulseOutputBitBase 0xF0 // + +// +// Bit definitions for the 8-bit "Command Byte" register, which is accessed +// through the Command Port (0x64). +// + +#define kCB_EnableKeyboardIRQ 0x01 // Enable Keyboard IRQ +#define kCB_EnableMouseIRQ 0x02 // Enable Mouse IRQ +#define kCB_SystemFlag 0x04 // Set System Flag +#define kCB_InhibitOverride 0x08 // Inhibit Override +#define kCB_DisableKeyboardClock 0x10 // Disable Keyboard Clock +#define kCB_DisableMouseClock 0x20 // Disable Mouse Clock +#define kCB_TranslateMode 0x40 // Keyboard Translate Mode + +// +// Bit definitions for the 8-bit "LED" register, which is accessed through +// the Data Port (0x64). Undefined bit positions must be zero. +// + +#define kLED_ScrollLock 0x01 // Scroll Lock +#define kLED_NumLock 0x02 // Num Lock +#define kLED_CapsLock 0x04 // Caps Lock + +// +// Scan Codes used for special purposes on the keyboard and/or mouse receive +// port. These values would be received from your interrupt handler or from +// a ReadDataPort command primitive. These values do not represent actual +// keys, but indicate some sort of status. +// + +#define kSC_Acknowledge 0xFA // ack for transmitted commands +#define kSC_Extend 0xE0 // marker for "extended" sequence +#define kSC_Pause 0xE1 // marker for pause key sequence +#define kSC_Resend 0xFE // request to resend keybd cmd +#define kSC_Reset 0xAA // the keyboard/mouse has reset +#define kSC_UpBit 0x80 // OR'd in if key below is released + +// +// Scan Codes for some modifier keys. +// + +#define kSC_Alt 0x38 // (extended = right key) +#define kSC_Ctrl 0x1D // (extended = right key) +#define kSC_ShiftLeft 0x2A +#define kSC_ShiftRight 0x36 +#define kSC_WindowsLeft 0x5B // extended +#define kSC_WindowsRight 0x5C // extended + +// +// Scan Codes for some keys. +// + +#define kSC_Delete 0x53 // (extended = gray key) +#define kSC_NumLock 0x45 + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// PS/2 Command Primitives +// +// o kPS2C_ReadDataPort: +// o Description: Reads the next available byte off the data port (60h). +// o Out Field: Holds byte that was read. +// +// o kPS2C_ReadDataAndCompare: +// o Description: Reads the next available byte off the data port (60h), +// and compares it with the byte in the In Field. If the +// comparison fails, the request is aborted (refer to the +// commandsCount field in the request structure). +// o In Field: Holds byte that comparison should be made to. +// +// o kPS2C_WriteDataPort: +// o Description: Writes the byte in the In Field to the data port (60h). +// o In Field: Holds byte that should be written. +// +// o kPS2C_WriteCommandPort: +// o Description: Writes the byte in the In Field to the command port (64h). +// o In Field: Holds byte that should be written. +// + +enum PS2CommandEnum +{ + kPS2C_ReadDataPort, + kPS2C_ReadDataPortAndCompare, + kPS2C_WriteDataPort, + kPS2C_WriteCommandPort +}; +typedef enum PS2CommandEnum PS2CommandEnum; + +struct PS2Command +{ + PS2CommandEnum command; + UInt8 inOrOut; +}; +typedef struct PS2Command PS2Command; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// PS/2 Request Structure +// +// o General Notes: +// o allocateRequest allocates the request structure -- use it always. +// o freeRequest deallocates the request structure -- use it always. +// o It is the driver's responsibility to free the request structure: +// o after a submitRequestAndBlock call returns, or +// o in the completion routine for each submitRequest issued. +// o It is not the driver's resposiblity to free the request structure: +// o when no completion routine is specified in a request issued via +// submitRequest, in which case the request is freed automatically +// by the controller. This case is called "fire-and-forget". +// o On completion, the requester can see how far the processing got by +// looking at the commandsCount field. If it is equal to the original +// number of commands, then the request was successful. If isn't, the +// value represents the zero-based index of the command that failed. +// +// o General Notes For Inquisitive Minds: +// o Requests are executed atomically with respect to all other requests, +// that is, if a keyboard request is currently being processed, then a +// request submitted by the mouse driver or one submitted by a separate +// thread of control in the keyboard driver will get queued until the +// controller is available again. +// o Request processing can be preempted to service interrupts on other +// PS/2 devices, should other-device data arrive unexpectedly on the +// input stream while processing a request. +// o The request processor knows when to read the mouse input stream +// over the keyboard input stream for a given command sequence. It +// does not depend on which driver it came from, rest assurred. If +// the mouse driver so chose, it could send keyboard commands. +// +// o commands: +// o Description: Holds list of commands that controller should execute. +// o Comments: Refer to PS2Command structure. +// +// o commandsCount: +// o Description: Holds the number of commands in the command list. +// o Comments: Number of commands should never exceed kMaxCommands. +// +// o completionRoutineTarget, Action, and Param: +// o Description: Object and method of the completion routine, which is +// called when the request has finished. The Param field +// may be filled with anything you want; it is passed to +// completion routine when it is called. These fields +// are optional. If left null, the request structure +// will be deallocated automatically by the controller +// on completion of the request. +// o Prototype: void completionRoutine(void * target, void * param); +// o Comments: Never issue submitRequestAndBlock or otherwise BLOCK on +// any request sent down to your device from the completion +// routine. Obey, or deadlock. +// + +#define kMaxCommands 20 + +typedef void (*PS2CompletionAction)(void * target, void * param); + +struct PS2Request +{ + UInt8 commandsCount; + PS2Command commands[kMaxCommands]; + void * completionTarget; + PS2CompletionAction completionAction; + void * completionParam; +}; +typedef struct PS2Request PS2Request; + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// ApplePS2KeyboardDevice and ApplePS2MouseDevice Class Descriptions +// +// +// o General Notes: +// o When the probe method is invoked on the client driver, the controller +// guarantees that the keyboard clock is enabled and the keyboard itself +// is disabled. This implies the client driver can send commands to the +// keyboard without a problem, and the keyboard itself will not send any +// asynchronous key data that may mess up the responses expected by the +// commands sent to it. +// +// o installInterruptAction: +// o Description: Ask the device to deliver asynchronous data to driver. +// o In Fields: Target/action of completion routine. +// +// o installInterruptAction Interrupt Routine: +// o Description: Delivers a newly read byte from the input data stream. +// o Prototype: void interruptOccurred(void * target, UInt8 byte); +// o In Fields: Byte that was read. +// o Comments: Never issue submitRequestAndBlock or otherwise BLOCK on +// any request sent down to your device from the interrupt +// routine. Obey, or deadlock. +// +// o uninstallInterruptHandler: +// o Description: Ask the device to stop delivering asynchronous data. +// +// o allocateRequest: +// o Description: Allocate a request structure, blocks until successful. +// o Result: Request structure pointer. +// o Comments: Request structure is guaranteed to be zeroed. +// +// o freeRequest: +// o Description: Deallocate a request structure. +// o In Fields: Request structure pointer. +// +// o submitRequest: +// o Description: Submit the request to the controller for processing. +// o In Fields: Request structure pointer. +// o Result: kern_return_t queueing status. +// +// o submitRequestAndBlock: +// o Description: Submit the request to the controller for processing, then +// block the calling thread until the request completes. +// o In Fields: Request structure pointer. +// + +typedef void (*PS2InterruptAction)(void * target, UInt8 data); + +#endif /* !_APPLEPS2DEVICE_H */ diff --git a/iokit/IOKit/ps2/ApplePS2KeyboardDevice.h b/iokit/IOKit/ps2/ApplePS2KeyboardDevice.h new file mode 100644 index 000000000..5ef738b03 --- /dev/null +++ b/iokit/IOKit/ps2/ApplePS2KeyboardDevice.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _APPLEPS2KEYBOARDDEVICE_H +#define _APPLEPS2KEYBOARDDEVICE_H + +#include + +class ApplePS2Controller; + +class ApplePS2KeyboardDevice : public IOService +{ + OSDeclareDefaultStructors(ApplePS2KeyboardDevice); + +private: + ApplePS2Controller * _controller; + +public: + virtual bool attach(IOService * provider); + virtual void detach(IOService * provider); + + // Interrupt Handling Routines + + virtual void installInterruptAction(OSObject *, PS2InterruptAction); + virtual void uninstallInterruptAction(); + + // Request Submission Routines + + virtual PS2Request * allocateRequest(); + virtual void freeRequest(PS2Request * request); + virtual bool submitRequest(PS2Request * request); + virtual void submitRequestAndBlock(PS2Request * request); +}; + +#endif /* !_APPLEPS2KEYBOARDDEVICE_H */ diff --git a/iokit/IOKit/ps2/ApplePS2MouseDevice.h b/iokit/IOKit/ps2/ApplePS2MouseDevice.h new file mode 100644 index 000000000..f46cf9c86 --- /dev/null +++ b/iokit/IOKit/ps2/ApplePS2MouseDevice.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _APPLEPS2MOUSEDEVICE_H +#define _APPLEPS2MOUSEDEVICE_H + +#include + +class ApplePS2Controller; + +class ApplePS2MouseDevice : public IOService +{ + OSDeclareDefaultStructors(ApplePS2MouseDevice); + +private: + ApplePS2Controller * _controller; + +public: + virtual bool attach(IOService * provider); + virtual void detach(IOService * provider); + + // Interrupt Handling Routines + + virtual void installInterruptAction(OSObject *, PS2InterruptAction); + virtual void uninstallInterruptAction(); + + // Request Submission Routines + + virtual PS2Request * allocateRequest(); + virtual void freeRequest(PS2Request * request); + virtual bool submitRequest(PS2Request * request); + virtual void submitRequestAndBlock(PS2Request * request); +}; + +#endif /* !_APPLEPS2MOUSEDEVICE_H */ diff --git a/iokit/IOKit/ps2/Makefile b/iokit/IOKit/ps2/Makefile new file mode 100644 index 000000000..57243bc95 --- /dev/null +++ b/iokit/IOKit/ps2/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MD_DIR = ps2 +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MD_LIST = +INSTALL_MD_LCL_LIST = "" +INSTALL_MD_DIR = $(MD_DIR) + +EXPORT_MD_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MD_DIR = IOKit/$(MD_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/pwr_mgt/IOPM.h b/iokit/IOKit/pwr_mgt/IOPM.h new file mode 100644 index 000000000..6809d125b --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPM.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOKIT_IOPM_H +#define _IOKIT_IOPM_H + +#define IOPMMaxPowerStates 10 + +typedef unsigned long IOPMPowerFlags; + +#define IOPMNotAttainable 0x0001 +#define IOPMPowerOn 0x0002 +#define IOPMClockNormal 0x0004 +#define IOPMClockRunning 0x0008 +#define IOPMWakeupEnabled 0x0010 +#define IOPMAuxPowerOn 0x0020 + // following "capabilites" exist for the convenience + // of the "interested drivers" +#define IOPMDeviceUsable 0x8000 +#define IOPMMaxPerformance 0x4000 +#define IOPMContextRetained 0x2000 +#define IOPMConfigRetained 0x1000 +#define IOPMNotPowerManaged 0x0800 +#define IOPMSoftSleep 0x0400 + +#define IOPMNextHigherState 1 +#define IOPMHighestState 2 +#define IOPMNextLowerState 3 +#define IOPMLowestState 4 + + +enum { // commands on power managment command queue + kPMbroadcastAggressiveness = 1, + kPMunIdleDevice, + kPMsleepDemand, + kPMwakeSignal, + kPMallowSleep, + kPMcancelSleep +}; + +// Power events +enum { + kClamshellClosedEventMask = (1<<0), // User closed lid + kDockingBarEventMask = (1<<1), // OBSOLETE + kACPlugEventMask = (1<<2), // User plugged or unplugged adapter + kFrontPanelButtonEventMask = (1<<3), // User hit the front panel button + kBatteryStatusEventMask = (1<<4) // Battery status has changed +}; + +// Power commands issued to root domain +enum { + kIOPMSleepNow = (1<<0), // put machine to sleep now + kIOPMAllowSleep = (1<<1), // allow idle sleep + kIOPMPreventSleep = (1<<2), // do not allow idle sleep + kIOPMPowerButton = (1<<3), // power button was pressed + kIOPMClamshellClosed = (1<<4), // clamshell was closed + kIOPMPowerEmergency = (1<<5), // battery dangerously low + kIOPMIgnoreClamshell = (1<<6) // take no action on clamshell closure +}; + // Return codes + +// PUBLIC power management features +// NOTE: this is a direct port from classic, some of these bits +// are obsolete but are included for completeness +enum { + kPMHasWakeupTimerMask = (1<<0), // 1=wake timer is supported + kPMHasSharedModemPortMask = (1<<1), // Not used + kPMHasProcessorCyclingMask = (1<<2), // 1=processor cycling supported + kPMMustProcessorCycleMask = (1<<3), // Not used + kPMHasReducedSpeedMask = (1<<4), // 1=supports reduced processor speed + kPMDynamicSpeedChangeMask = (1<<5), // 1=supports changing processor speed on the fly + kPMHasSCSIDiskModeMask = (1<<6), // 1=supports using machine as SCSI drive + kPMCanGetBatteryTimeMask = (1<<7), // 1=battery time can be calculated + kPMCanWakeupOnRingMask = (1<<8), // 1=machine can wake on modem ring + kPMHasDimmingSupportMask = (1<<9), // 1=has monitor dimming support + kPMHasStartupTimerMask = (1<<10), // 1=can program startup timer + kPMHasChargeNotificationMask = (1<<11), // 1=client can determine charger status/get notifications + kPMHasDimSuspendSupportMask = (1<<12), // 1=can dim diplay to DPMS ('off') state + kPMHasWakeOnNetActivityMask = (1<<13), // 1=supports waking upon receipt of net packet + kPMHasWakeOnLidMask = (1<<14), // 1=can wake upon lid/case opening + kPMCanPowerOffPCIBusMask = (1<<15), // 1=can remove power from PCI bus on sleep + kPMHasDeepSleepMask = (1<<16), // 1=supports deep (hibernation) sleep + kPMHasSleepMask = (1<<17), // 1=machine support low power sleep (ala powerbooks) + kPMSupportsServerModeAPIMask = (1<<18), // 1=supports reboot on AC resume for unexpected power loss + kPMHasUPSIntegrationMask = (1<<19) // 1=supports incorporating UPS devices into power source calcs +}; + +// PRIVATE power management features +// NOTE: this is a direct port from classic, some of these bits +// are obsolete but are included for completeness. +enum { + kPMHasExtdBattInfoMask = (1<<0), // Not used + kPMHasBatteryIDMask = (1<<1), // Not used + kPMCanSwitchPowerMask = (1<<2), // Not used + kPMHasCelsiusCyclingMask = (1<<3), // Not used + kPMHasBatteryPredictionMask = (1<<4), // Not used + kPMHasPowerLevelsMask = (1<<5), // Not used + kPMHasSleepCPUSpeedMask = (1<<6), // Not used + kPMHasBtnIntHandlersMask = (1<<7), // 1=supports individual button interrupt handlers + kPMHasSCSITermPowerMask = (1<<8), // 1=supports SCSI termination power switch + kPMHasADBButtonHandlersMask = (1<<9), // 1=supports button handlers via ADB + kPMHasICTControlMask = (1<<10), // 1=supports ICT control + kPMHasLegacyDesktopSleepMask = (1<<11), // 1=supports 'doze' style sleep + kPMHasDeepIdleMask = (1<<12), // 1=supports Idle2 in hardware + kPMOpenLidPreventsSleepMask = (1<<13), // 1=open case prevent machine from sleeping + kPMClosedLidCausesSleepMask = (1<<14), // 1=case closed (clamshell closed) causes sleep + kPMHasFanControlMask = (1<<15), // 1=machine has software-programmable fan/thermostat controls + kPMHasThermalControlMask = (1<<16), // 1=machine supports thermal monitoring + kPMHasVStepSpeedChangeMask = (1<<17), // 1=machine supports processor voltage/clock change + kPMEnvironEventsPolledMask = (1<<18) // 1=machine doesn't generate pmu env ints, we must poll instead +}; + +// DEFAULT public and private features for machines whose device tree +// does NOT contain this information (pre-Core99). + +// For Cuda-based Desktops + +#define kStdDesktopPMFeatures kPMHasWakeupTimerMask |\ + kPMHasProcessorCyclingMask |\ + kPMHasDimmingSupportMask |\ + kPMHasStartupTimerMask |\ + kPMSupportsServerModeAPIMask |\ + kPMHasUPSIntegrationMask + +#define kStdDesktopPrivPMFeatures kPMHasExtdBattInfoMask |\ + kPMHasICTControlMask |\ + kPMHasLegacyDesktopSleepMask + +#define kStdDesktopNumBatteries 0 + +// For Wallstreet (PowerBook G3 Series 1998) + +#define kWallstreetPMFeatures kPMHasWakeupTimerMask |\ + kPMHasProcessorCyclingMask |\ + kPMHasReducedSpeedMask |\ + kPMDynamicSpeedChangeMask |\ + kPMHasSCSIDiskModeMask |\ + kPMCanGetBatteryTimeMask |\ + kPMHasDimmingSupportMask |\ + kPMHasChargeNotificationMask |\ + kPMHasDimSuspendSupportMask |\ + kPMHasSleepMask + +#define kWallstreetPrivPMFeatures kPMHasExtdBattInfoMask |\ + kPMHasBatteryIDMask |\ + kPMCanSwitchPowerMask |\ + kPMHasADBButtonHandlersMask |\ + kPMHasSCSITermPowerMask |\ + kPMHasICTControlMask |\ + kPMClosedLidCausesSleepMask |\ + kPMEnvironEventsPolledMask + +#define kStdPowerBookPMFeatures kWallstreetPMFeatures +#define kStdPowerBookPrivPMFeatures kWallstreetPrivPMFeatures + +#define kStdPowerBookNumBatteries 2 + +// For 101 (PowerBook G3 Series 1999) + +#define k101PMFeatures kPMHasWakeupTimerMask |\ + kPMHasProcessorCyclingMask |\ + kPMHasReducedSpeedMask |\ + kPMDynamicSpeedChangeMask |\ + kPMHasSCSIDiskModeMask |\ + kPMCanGetBatteryTimeMask |\ + kPMHasDimmingSupportMask |\ + kPMHasChargeNotificationMask |\ + kPMHasDimSuspendSupportMask |\ + kPMHasSleepMask |\ + kPMHasUPSIntegrationMask + +#define k101PrivPMFeatures kPMHasExtdBattInfoMask |\ + kPMHasBatteryIDMask |\ + kPMCanSwitchPowerMask |\ + kPMHasADBButtonHandlersMask |\ + kPMHasSCSITermPowerMask |\ + kPMHasICTControlMask |\ + kPMClosedLidCausesSleepMask |\ + kPMEnvironEventsPolledMask + +#define IOPMNoErr 0 // normal return + + // returned by powerStateWillChange and powerStateDidChange: +#define IOPMAckImplied 0 // acknowledgement of power state change is implied +#define IOPMWillAckLater 1 // acknowledgement of power state change will come later + + // returned by requestDomainState +#define IOPMBadSpecification 4 // unrecognized specification parameter +#define IOPMNoSuchState 5 // no power state matches search specification + +#define IOPMCannotRaisePower 6 // a device cannot change its power for some reason + + // returned by changeStateTo +#define IOPMParameterError 7 // requested state doesn't exist +#define IOPMNotYetInitialized 8 // device not yet fully hooked into power management "graph" + + + // used by Root Domain UserClient + +enum { + kPMGeneralAggressiveness = 0, + kPMMinutesToDim, + kPMMinutesToSpinDown, + kPMMinutesToSleep +}; +#define kMaxType kPMMinutesToSleep + + +#define kIOBatteryInfoKey "IOBatteryInfo" +#define kIOBatteryCurrentChargeKey "Current" +#define kIOBatteryCapacityKey "Capacity" +#define kIOBatteryFlagsKey "Flags" +#define kIOBatteryVoltageKey "Voltage" +#define kIOBatteryAmperageKey "Amperage" +enum { + kIOBatteryInstalled = (1 << 2), + kIOBatteryCharge = (1 << 1), + kIOBatteryChargerConnect = (1 << 0) +}; + + +#if KERNEL && __cplusplus +class IOService; + +enum { + kIOPowerEmergencyLevel = 1000 +}; + +enum { + kIOPMSubclassPolicy, + kIOPMSuperclassPolicy1 +}; + +struct stateChangeNote{ + IOPMPowerFlags stateFlags; + unsigned long stateNum; + void * powerRef; +}; +typedef struct stateChangeNote stateChangeNote; + +struct sleepWakeNote{ + void * powerRef; + unsigned long returnValue; +}; +typedef struct sleepWakeNote sleepWakeNote; + +extern void IOPMRegisterDevice(const char *, IOService *); +#endif /* KERNEL && __cplusplus */ + +#endif /* ! _IOKIT_IOPM_H */ + diff --git a/iokit/IOKit/pwr_mgt/IOPMLibDefs.h b/iokit/IOKit/pwr_mgt/IOPMLibDefs.h new file mode 100644 index 000000000..752c829a4 --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMLibDefs.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#define kPMSetAggressiveness 0 +#define kPMGetAggressiveness 1 +#define kPMSleepSystem 2 +#define kPMAllowPowerChange 3 +#define kPMCancelPowerChange 4 + +#define kNumPMMethods 5 diff --git a/iokit/IOKit/pwr_mgt/IOPMPowerSource.h b/iokit/IOKit/pwr_mgt/IOPMPowerSource.h new file mode 100644 index 000000000..20a069fc3 --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMPowerSource.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +class ApplePMU; + +// Our flags + +enum { + kBatteryInstalled = (1<<0), + kBatteryCharging = (1<<1), + kACInstalled = (1<<2), + kUPSInstalled = (1<<3), + kBatteryAtWarn = (1<<4), + kBatteryDepleted = (1<<5) +}; + +const unsigned long kSecondsPerHour = (60*60); +const unsigned long kTenMinutesInSeconds = (10 * 60); + +// our battery (power source) object + +class IOPMPowerSource : public OSObject +{ + OSDeclareDefaultStructors(IOPMPowerSource) + + protected: + + UInt32 bFlags; + UInt32 bTimeRemaining; + UInt16 bCurCapacity; + UInt16 bMaxCapacity; + SInt16 bCurrent; + UInt16 bVoltage; + UInt16 bBatteryIndex; + + public: + + IOPMPowerSource * nextInList; + + bool init (unsigned short whichBatteryIndex); + unsigned long capacityPercentRemaining (void); + bool atWarnLevel (void); + bool depleted (void); + + // accessors + + bool isInstalled (void); + bool isCharging (void); + bool acConnected (void); + unsigned long timeRemaining (void); + unsigned long maxCapacity (void); + unsigned long curCapacity (void); + long currentDrawn (void); + unsigned long voltage (void); + + // calculations + + // function updateStatus is called whenever the system needs + // to obtain the latest power source state...must be overridden + // by subclasses. + virtual void updateStatus (void); +}; + + + diff --git a/iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h b/iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h new file mode 100644 index 000000000..e46736196 --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +class IOPMPowerSource; + +class IOPMPowerSourceList : public OSObject +{ +OSDeclareDefaultStructors(IOPMPowerSourceList) + +private: + +IOPMPowerSource * firstItem; // pointer to first power source in list +unsigned long length; // how many power sources are in the list + + +public: +void initialize ( void ); + +IOReturn addToList ( IOPMPowerSource * newPowerSource ); + +IOPMPowerSource * firstInList ( void ); + +IOPMPowerSource * nextInList ( IOPMPowerSource * currentItem ); + +unsigned long numberOfItems ( void ); + +IOReturn removeFromList ( IOPMPowerSource * theItem ); + +void free ( void ); +}; + diff --git a/iokit/IOKit/pwr_mgt/IOPMchangeNoteList.h b/iokit/IOKit/pwr_mgt/IOPMchangeNoteList.h new file mode 100644 index 000000000..aaf484e9c --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMchangeNoteList.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +class IOPowerConnection; + +// This is a list of State Changes which are in progress. Its purpose is to keep track of the +// notifications and acknowledgements caused by state change. A change is added to the list +// when either our parent notifies us that the power domain is changing state or when we decide +// to change power to our device or domain. +// +// A change is removed from the list when all interested parties have been informed of the upcoming +// change, all of them have acknowledged the notification, the change has been made, all interested +// parties have been informed that the change was made, and all of them have acknowledged. +// +// The list is strictly first-in, first-out. It is implemented as a circular list in a linear +// array. There are two pointers into the array. The circular list is empty when these two +// pointers are equal. + +// More specifically, a change note is put into the array when one of these things happens: +// the device decides it is idle and needs to reduce power. (changePowerStateTo) +// the device decides it is not idle and needs to increase power. (changePowerStateTo) +// the controlling driver requests a state change. (changePowerStateTo) +// some client needs to use the device but it is powered down. (makeUsable) +// the parent says the domain power is changing. (powerStateWillChangeTo) +// a child says it no longer needs power, and all other children are similarly idle. (requestDomainState) +//. a child wants more power in the domain so it can raise its power state. (requestDomainState) +// +// A change note is removed from the array when all interested drivers and power domain +// children have acknowledged the change. + +// Each change note contains: +// A flag field which describes the change. +// Which power state the device will be in after the change. +// The power flags which describe the result of this change. + +struct changeNoteItem{ +unsigned long flags; +unsigned long newStateNumber; +IOPMPowerFlags outputPowerCharacter; +IOPMPowerFlags inputPowerRequirement; +IOPMPowerFlags domainState; +IOPowerConnection * parent; +IOPMPowerFlags capabilityFlags; +}; + +typedef struct changeNoteItem changeNoteItem; + + + // flags field + +#define IOPMParentInitiated 1 // this power change initiated by our parent +#define IOPMWeInitiated 2 // this power change initiated by this device (not parent) +#define IOPMNotDone 4 // we couldn't make this change +#define IOPMNotInUse 8 // this list element not currently in use +#define IOPMDomainWillChange 16 // parent change started by PowerDomainWillChangeTo +#define IOPMDomainDidChange 32 // parent change started by PowerDomainDidChangeTo + + +// Length of change note list is maximum 5. There cannot be two adjacent device-initiated change notes unless +// one is currently being actioned, because two adjacent in-active device-initiated changes are always collapsed +// into one, and there cannot be more than two parent-initiated change notes in the queue (from one parent), +// because the parent does not +// initiate a change (by calling domainStateWillChange) until everybody has acknowledged the previous one +// (by calling domainStateDidChange), although if we are the last to send that acknowledgement, the change we +// are acknowledging will still be in the queue as we acknowledge, and at that point the parent can give us another +// (by callingdomainStateWillChange). So we need room for two parent changes, two non-adjacent device changes, +// one more per additional parent, say two more, +// and room for one more device change to get into the queue before collapsing it with its neighbor. In this case, seven +// entryies suffices. Or, we need +// room for two adjacent device changes (one in progress), a parent change, another device change, another parent change, +// another device change, another parent change, another device change, plus +// one more device change to get into the queue before collapsing it with its neighbor. Nine entries in this case. +// I'm not sure what irrationallity causes me to code for twenty entries in the queue. +#define IOPMMaxChangeNotes 20 + +class IOPMchangeNoteList :public OSObject +{ +OSDeclareDefaultStructors(IOPMchangeNoteList) + +private: + unsigned long firstInList; // points to oldest active change note in list + unsigned long firstUnused; // points just beyond newest change note in list + +public: + + changeNoteItem changeNote[IOPMMaxChangeNotes]; + + +void initialize ( void ); + +long createChangeNote ( void ); + +long currentChange ( void ); + +long latestChange ( void ); + +IOReturn releaseHeadChangeNote ( void ); + +IOReturn releaseTailChangeNote ( void ); + +bool changeNoteInUse ( unsigned long ordinal ); + +long nextChangeNote ( unsigned long ordinal ); + +unsigned long increment (unsigned long ordinal ); + +unsigned long decrement (unsigned long ordinal ); + +long previousChangeNote (unsigned long ordinal ); + +bool listEmpty ( void ); + +}; diff --git a/iokit/IOKit/pwr_mgt/IOPMinformee.h b/iokit/IOKit/pwr_mgt/IOPMinformee.h new file mode 100644 index 000000000..bab21e8a3 --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMinformee.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +class IOPMinformee : public OSObject +{ +OSDeclareDefaultStructors(IOPMinformee) + +public: + +// points to interested driver or power domain child +IOService * whatObject; + +// -1, 0, or positive number decrementing with each tick + IOReturn timer; + +// next informee in the list + IOPMinformee * nextInList; + + +void initialize ( IOService * theObject ); +void free ( void ); +}; + diff --git a/iokit/IOKit/pwr_mgt/IOPMinformeeList.h b/iokit/IOKit/pwr_mgt/IOPMinformeeList.h new file mode 100644 index 000000000..608a93d4a --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMinformeeList.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +class IOPMinformee; +class IOService; + +class IOPMinformeeList : public OSObject +{ +OSDeclareDefaultStructors(IOPMinformeeList) + +private: +IOPMinformee * firstItem; // pointer to first informee in the list +unsigned long length; // how many informees are in the list + + +public: +void initialize ( void ); + +IOReturn addToList ( IOPMinformee * newInformee ); + +IOPMinformee * firstInList ( void ); + +IOPMinformee * nextInList ( IOPMinformee * currentItem ); + +unsigned long numberOfItems ( void ); + +IOPMinformee * findItem ( IOService * driverOrChild ); + +IOReturn removeFromList ( IOService * theItem ); + +void free ( void ); +}; + diff --git a/iokit/IOKit/pwr_mgt/IOPMlog.h b/iokit/IOKit/pwr_mgt/IOPMlog.h new file mode 100644 index 000000000..401197aeb --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMlog.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#define PMlogSetParent 1 +#define PMlogAddChild 2 +#define PMlogRemoveChild 3 +#define PMlogControllingDriver 4 +#define PMlogControllingDriverErr1 5 /* bad power state array version */ +#define PMlogControllingDriverErr2 6 /* too many power states */ +#define PMlogControllingDriverErr3 7 /* not a real IOPMDriver */ +#define PMlogControllingDriverErr4 8 /* power state change in progress */ +#define PMlogInterestedDriver 9 +#define PMlogAcknowledgeErr1 10 /* unknown entity called acknowledgePowerChange */ +#define PMlogChildAcknowledge 11 +#define PMlogDriverAcknowledge 12 /* interested driver acknowledges */ +#define PMlogAcknowledgeErr2 13 /* object has already acked */ +#define PMlogAcknowledgeErr3 14 /* not expecting any acks */ +#define PMlogAcknowledgeErr4 15 /* not expecting acknowledgeSetPowerState */ +#define PMlogDriverAcknowledgeSet 16 /* controlling driver acknowledges */ +#define PMlogWillChange 17 +#define PMlogDidChange 18 +#define PMlogRequestDomain 19 +#define PMlogMakeUsable 20 +#define PMlogChangeStateTo 21 +#define PMlogChangeStateToPriv 22 +#define PMlogSetAggressiveness 23 +#define PMlogCriticalTemp 24 +#define PMlogOverrideOn 25 +#define PMlogOverrideOff 26 +#define PMlogEnqueueErr 27 /* change queue overflow */ +#define PMlogCollapseQueue 28 +#define PMlogChangeDone 29 +#define PMlogCtrlDriverTardy 30 /* controlling driver didn't acknowledge */ +#define PMlogIntDriverTardy 31 /* interested driver didn't acknowledge */ +#define PMlogStartAckTimer 32 +#define PMlogStartParentChange 33 +#define PMlogAmendParentChange 34 +#define PMlogStartDeviceChange 35 +#define PMlogRequestDenied 36 /* parent denied domain state change request */ +#define PMlogControllingDriverErr5 37 /* zero power states or we already have a driver with more power states */ +#define PMlogProgramHardware 38 +#define PMlogInformDriverPreChange 39 +#define PMlogInformDriverPostChange 40 +#define PMlogRemoveDriver 41 +#define PMsetIdleTimerPeriod 42 +#define PMlogSystemWake 43 +#define PMlogAcknowledgeErr5 44 +#define PMlogClientAcknowledge 45 +#define PMlogClientTardy 46 /* application or kernel client didn't acknowledge */ +#define PMlogClientCancel 47 diff --git a/iokit/IOKit/pwr_mgt/IOPMpmChild.h b/iokit/IOKit/pwr_mgt/IOPMpmChild.h new file mode 100644 index 000000000..6df79af9b --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMpmChild.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +class IOPMpmChild: public IOPMinformee +{ +OSDeclareDefaultStructors(IOPMpmChild) + +public: +unsigned long desiredDomainState; // which power domain state the child desires + + +void initialize ( IOService * theObject ); + +}; + diff --git a/iokit/IOKit/pwr_mgt/IOPMpowerState.h b/iokit/IOKit/pwr_mgt/IOPMpowerState.h new file mode 100644 index 000000000..095b653fd --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPMpowerState.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +struct IOPMPowerState +{ + unsigned long version; // version number of this struct +IOPMPowerFlags capabilityFlags; // bits that describe (to interested drivers) the capability of the device in this state +IOPMPowerFlags outputPowerCharacter; // description (to power domain children) of the power provided in this state +IOPMPowerFlags inputPowerRequirement; // description (to power domain parent) of input power required in this state +unsigned long staticPower; // average consumption in milliwatts +unsigned long unbudgetedPower; // additional consumption from separate power supply (mw) +unsigned long powerToAttain; // additional power to attain this state from next lower state (in mw) +unsigned long timeToAttain; // time required to enter this state from next lower state (in microseconds) +unsigned long settleUpTime; // settle time required after entering this state from next lower state (microseconds) +unsigned long timeToLower; // time required to enter next lower state from this one (in microseconds) +unsigned long settleDownTime; // settle time required after entering next lower state from this state (microseconds) +unsigned long powerDomainBudget; // power in mw a domain in this state can deliver to its children +}; + +typedef struct IOPMPowerState IOPMPowerState; + diff --git a/iokit/IOKit/pwr_mgt/IOPowerConnection.h b/iokit/IOKit/pwr_mgt/IOPowerConnection.h new file mode 100644 index 000000000..03adf0aa4 --- /dev/null +++ b/iokit/IOKit/pwr_mgt/IOPowerConnection.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOPOWERCONNECTION_H +#define _IOKIT_IOPOWERCONNECTION_H + +#include +#include + +class IOPowerConnection : public IOService +{ + OSDeclareDefaultStructors(IOPowerConnection) + +protected: + /*! @field parentKnowsState true: parent knows state of its domain + used by child */ + bool stateKnown; + /*! @field currentPowerFlags power flags which describe the current state of the power domain + used by child */ + IOPMPowerFlags currentPowerFlags; + /*! @field desiredDomainState state number which corresponds to the child's desire + used by parent */ + unsigned long desiredDomainState; + +public: + /*! @function setParentKnowsState + @abstract Sets the stateKnown variable. + @discussion Called by the parent when the object is created and called by the child when it discovers that the parent now knows its state. */ + void setParentKnowsState (bool ); + + /*! @function setParentCurrentPowerFlags + @abstract Sets the currentPowerFlags variable. + @discussion Called by the parent when the object is created and called by the child when it discovers that the parent state is changing. */ + void setParentCurrentPowerFlags (IOPMPowerFlags ); + + /*! @function parentKnowsState + @abstract Returns the stateKnown variable. */ + bool parentKnowsState (void ); + + /*! @function parentCurrentPowerFlags + @abstract Returns the currentPowerFlags variable. */ + IOPMPowerFlags parentCurrentPowerFlags (void ); + + /*! @function setDesiredDomainState + @abstract Sets the desiredDomainState variable. + @discussion Called by the child. */ + void setDesiredDomainState (unsigned long ); + + /*! @function getDesiredDomainState + @abstract Returns the desiredDomainState variable. + @discussion Called by the child. */ + unsigned long getDesiredDomainState ( void ); +}; + +#endif /* ! _IOKIT_IOPOWERCONNECTION_H */ + diff --git a/iokit/IOKit/pwr_mgt/Makefile b/iokit/IOKit/pwr_mgt/Makefile new file mode 100644 index 000000000..855e67178 --- /dev/null +++ b/iokit/IOKit/pwr_mgt/Makefile @@ -0,0 +1,39 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = pwr_mgt +NOT_EXPORT_HEADERS = \ + IOPMchangeNoteList.h \ + IOPMinformee.h \ + IOPMinformeeList.h \ + IOPMlog.h \ + IOPMpmChild.h + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = IOPMLibDefs.h IOPM.h +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/pwr_mgt/RootDomain.h b/iokit/IOKit/pwr_mgt/RootDomain.h new file mode 100644 index 000000000..91a798947 --- /dev/null +++ b/iokit/IOKit/pwr_mgt/RootDomain.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOKIT_ROOTDOMAIN_H +#define _IOKIT_ROOTDOMAIN_H + +#include +#include + +class RootDomainUserClient; + +enum { + kRootDomainSleepNotSupported = 0x00000000, + kRootDomainSleepSupported = 0x00000001, + kFrameBufferDeepSleepSupported = 0x00000002 +}; + +extern "C" +{ + IONotifier * registerSleepWakeInterest(IOServiceInterestHandler, void *, void * = 0); + IOReturn acknowledgeSleepWakeNotification(void * ); +} + + +class IOPMrootDomain: public IOService +{ +OSDeclareDefaultStructors(IOPMrootDomain) + +public: + + virtual bool start( IOService * provider ); + virtual IOReturn newUserClient ( task_t, void *, UInt32, IOUserClient ** ); + virtual IOReturn setAggressiveness ( unsigned long, unsigned long ); + virtual IOReturn youAreRoot ( void ); + virtual IOReturn sleepSystem ( void ); + virtual IOReturn receivePowerNotification (UInt32 msg); + virtual void setSleepSupported( IOOptionBits flags ); + virtual IOOptionBits getSleepSupported(); + virtual bool activityTickle ( unsigned long, unsigned long x=0 ); + +private: + + class IORootParent * patriarch; // points to our parent + unsigned long idlePeriod; // idle timer period + + virtual void powerChangeDone ( unsigned long ); + virtual void command_received ( void *, void * , void * , void *); + virtual bool tellChangeDown ( unsigned long stateNum); + virtual bool askChangeDown ( unsigned long stateNum); + virtual void tellChangeUp ( unsigned long ); + virtual void tellNoChangeDown ( unsigned long ); + + bool systemBooting; + bool ignoringClamshell; + bool allowSleep; + bool sleepIsSupported; + IOOptionBits platformSleepSupport; +}; + +class IORootParent: public IOService +{ +OSDeclareDefaultStructors(IORootParent) + +public: + + bool start ( IOService * nub ); + void shutDownSystem ( void ); + void sleepSystem ( void ); + void wakeSystem ( void ); +}; + + +#endif /* _IOKIT_ROOTDOMAIN_H */ diff --git a/iokit/IOKit/rtc/IORTCController.h b/iokit/IOKit/rtc/IORTCController.h new file mode 100644 index 000000000..6cbf02ef9 --- /dev/null +++ b/iokit/IOKit/rtc/IORTCController.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * 24 Nov 1998 suurballe Created. + */ + +#include + +typedef void (*RTC_tick_handler)( IOService * ); + + +class IORTCController: public IOService +{ +OSDeclareAbstractStructors(IORTCController) + +public: + +virtual IOReturn getRealTimeClock ( UInt8 * currentTime, IOByteCount * length ) = 0; +virtual IOReturn setRealTimeClock ( UInt8 * newTime ) = 0; +}; + diff --git a/iokit/IOKit/rtc/Makefile b/iokit/IOKit/rtc/Makefile new file mode 100644 index 000000000..4e074c0e4 --- /dev/null +++ b/iokit/IOKit/rtc/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = rtc +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/scsi/IOSCSICommand_Reference.h b/iokit/IOKit/scsi/IOSCSICommand_Reference.h new file mode 100644 index 000000000..68f0c48b7 --- /dev/null +++ b/iokit/IOKit/scsi/IOSCSICommand_Reference.h @@ -0,0 +1,538 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/*! +@header IOSCSICommand_Reference.h + +This header defines the IOSCSICommand class. + +This class encapsulates a SCSI Command. The client driver allocates a +command using IOSCSIDevice::allocCommand() and initializes it using +functions of this class. The client can then submit the command to +the SCSI stack by invoking the execute() function. +*/ + + +/*! +@enum SCSICDBFlags +Defines values for the cdbFlags field in the SCSICDBInfo structure. +@constant kCDBFNoDisconnect +Set by the IOSCSIDevice client to indicate the target may not disconnect +during the execution of this IOSCSICommand. +@constant kCDBFlagsDisableParity +Set by the IOSCSIController class to tell the host adapter driver to disable +parity checking during the execution of this CDB. +@constant kCDBFlagsNoDisconnect +Set by the IOSCSIController class to tell the host adapter driver that the +target may not disconnect during the execution of this IOSCSICommand. +@constant kCDBFlagsNegotiateSDTR +Set by the IOSCSIController class to tell the host adapter driver that it +should initiate synchronous data transfer negotiation during this IOSCSICommand. +@constant kCDBFlagsNegotiateWDTR +Set by the IOSCSIController class to tell the host adapter driver that it +should initiate wide data transfer negotiation during this IOSCSICommand. +*/ +enum SCSICDBFlags { + kCDBFNoDisconnect = 0x00000001, + +/* + * Note: These flags are for IOSCSIController subclasses only + */ + kCDBFlagsDisableParity = 0x08000000, + kCDBFlagsNoDisconnect = 0x10000000, + kCDBFlagsNegotiateSDTR = 0x20000000, + kCDBFlagsNegotiateWDTR = 0x40000000, +}; + + +/*! +@enum SCSIAdapterStatus +Defines the values of the adapterStatus field of the SCSIResults structure. +@constant kSCSIAdapterStatusSuccess +Request completed with no adapter reported errors. +@constant kSCSIAdapterStatusProtocolError +Violation of SCSI protocol detected by host adapter. +@constant kSCSIAdapterStatusSelectionTimeout +Target device did not respond to selection. +@constant kSCSIAdapterStatusMsgReject +Adapter received a msg reject from the target device. +@constant kSCSIAdapterStatusParityError +Adapter detected, or target reported a parity error during the +IOSCSICommand. +@constant kSCSIAdapterStatusOverrun +Target device requested more data than supplied by host. +*/ +enum SCSIAdapterStatus { + kSCSIAdapterStatusSuccess = 0, + kSCSIAdapterStatusProtocolError, + kSCSIAdapterStatusSelectionTimeout, + kSCSIAdapterStatusMsgReject, + kSCSIAdapterStatusParityError, + kSCSIAdapterStatusOverrun, +}; + + +/*! +@typedef SCSICDBInfo +@discussion +Fields specified here are set by IOSCSIDevice client, while others +are set by the IOSCSIController class for use by the host adapter +driver. The client should zero all fields of the structure prior +to use. +@field cdbFlags +See enum SCSICDBFlags for flag definitions. +@field cdbTagMsg +This field should be set to zero by the IOSCSIDevice client. If the +SCSI device supports tag queuing then the IOSCSIController class +will set this field to select simple (unordered) tags. +@field cdbTag +This field is set by the IOSCSIController class to tell the host +adapter driver the SCSI tag value to assign to this IOSCSICommand. +@field cdbLength +Set by the IOSCSIDevice client to the length of the Command Descriptor +Block (CDB). +@field cdb +Set by the IOSCSIDevice client to command descriptor block the client +wishes the target to execute. +*/ +typedef struct SCSICDBInfo { + + UInt32 cdbFlags; + + UInt32 cdbTagMsg; + UInt32 cdbTag; + + UInt32 cdbAbortMsg; + + UInt32 cdbLength; + UInt8 cdb[16]; + + UInt32 reserved[16]; +} SCSICDBInfo; + + +/*! +@typedef SCSIResults +@field returnCode +The overall return code for the command. See iokit/iokit/IOReturn.h. +This value is also returned as the getResults() return value. + +Note: The SCSI Family will automatically generate standard return codes +based on the values in the adapterStatus and scsiStatus fields. Unless +the IOSCSIController subclass needs set a specific return code, it should +leave this field set to zero. +@field bytesTransferred +The total number of bytes transferred to/from the target device. +@field adapterStatus +The IOSCSIController subclass must fill-in this field as appropriate. +See enum SCSIAdapterStatus. +@field scsiStatus +The SCSI Status byte returned from the target device. +@field requestSenseDone +A boolean indicating whether sense data was obtained from the target +device. +@field requestSenseLength +The number of sense data bytes returned from the target device. +*/ +typedef struct SCSIResults { + IOReturn returnCode; + + UInt32 bytesTransferred; + + enum SCSIAdapterStatus adapterStatus; + UInt8 scsiStatus; + + bool requestSenseDone; + UInt32 requestSenseLength; +} SCSIResults; + + +/*! +@enum SCSIQueueType +Each IOSCSIDevice has two queues, a normal Q and a bypass Q. The treatment of the +queues is essentially identical except that the bypass Q is given preference whenever +it has commands available. + +Usually, the client will use the normal Q for regular I/O commands and the bypass Q +to send error recovery commands to the device. +@constant kQTypeNormalQ +Indicates command applies to the normal IOSCSIDevice queue. +@constant kQTypeBypassQ +Indicates command applies to the bypass IOSCSIDevice queue. +*/ +enum SCSIQueueType { + kQTypeNormalQ = 0, + kQTypeBypassQ = 1, +}; + + +/*! +@enum SCSIQueuePosition +Indicates whether a IOSCSICommand should be added to the head or tail +of the queue selected. +@constant kQPositionTail +Queue request at the tail (end) of the selected queue. +@constant kQPositionHead +Queue request at the head (front) of the selected queue. +*/ +enum SCSIQueuePosition { + kQPositionTail = 0, + kQPositionHead = 1, +}; + + +/*! +@struct SCSITargetLun +@field target +The SCSI Id for the SCSI device being selected. +@field lun +The SCSI Lun for the SCSI device being selected. +*/ +typedef struct SCSITargetLun { + UInt8 target; + UInt8 lun; + UInt8 reserved[2]; +} SCSITargetLun; + +/*! +@class IOSCSICommand : public IOCDBCommand +@abstract +Class that describes a SCSI device (target/lun pair). +@discussion +This class encapsulates a SCSI Command. The client driver allocates a +command using IOSCSIDevice::allocCommand() and initializes it using +functions of this class. The client can then submit the command to +the SCSI stack by invoking the execute() function. +*/ +class IOSCSICommand : public IOCDBCommand +{ +public: + + +/*! +@function setPointers +@abstract +Sets the data buffer component of a SCSI Command. +@discussion +The client provides an IOMemoryDescriptor object to corresponding +to the client's data or request sense buffer, the maximum data transfer count +and data transfer direction. +@param desc +Pointer to a IOMemoryDescriptor describing the client's I/O buffer. +@param transferCount +Maximum data transfer count in bytes. +@param isWrite +Data transfer direction. (Defined with respect to the device, i.e. isWrite = true +indicates the host is writing to the device. +@param isSense +If isSense is set to false, the IOSCSICommand's data buffer information is set. Otherwise, +the IOSCSICommand's request sense buffer information is set +*/ +void setPointers( IOMemoryDescriptor *desc, UInt32 transferCount, bool isWrite, bool isSense=false ); + + +/*! +@function getPointers +@abstract +Gets the data buffer component of a SCSI Command. +@discussion +The client provides a set of pointers to fields to receive the IOSCSICommand's +data/request sense buffer pointers. +@param desc +Pointer to a field (IOMemoryDescriptor *) to receive the IOSCSICommand's IOMemoryDescriptor pointer. +@param transferCount +Pointer to a field (UInt32) to receive the IOSCSICommand's maximum transfer count. +@param isWrite +Pointer to a field (bool) to receive the IOSCSICommand's transfer direction. +@param isSense +If isSense is set to true, the IOSCSICommand's data buffer information is returned. Otherwise, +the IOSCSICommand's request sense buffer information is returned. +*/ +void getPointers( IOMemoryDescriptor **desc, UInt32 *transferCount, bool *isWrite, bool isSense = false ); + +/*! +@function setTimeout +@abstract +Sets the timeout for the command in milliseconds. +@discussion +The IOSCSIController class will abort a command which does not +complete with in the time interval specified. The client should +set the timeout parameter to zero if they want to suppress +timing. +@param timeout +Command timeout in milliseconds. +*/ +void setTimeout( UInt32 timeoutmS ); + +/*! +@function getTimeout +@abstract +Gets the timeout for the command in milliseconds. +@discussion +This function returns the command timeout previously set by setTimeout(). +@param timeout +Command timeout in milliseconds. +*/ +UInt32 getTimeout(); + + +/*! +@function setCallback +@abstract +Sets the callback routine to be invoked when the SCSI Command completes. +@param target +Pointer to the object to be passed to the callback routine. This would usually +be the client's (this) pointer. +@param callback +Pointer to the client's function to process the completed command +@param refcon +Pointer to the information required by the client's callback routine to process +the completed command. +*/ +void setCallback( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ); + + +/*! +@function getClientData +@abstract +Returns a pointer to the SCSI Command's client data area. +@discussion +The client may allocate storage in the SCSI Command for its own use. +See IOSCSIDevice::allocateCmd(). +*/ +void *getClientData(); + +/* +@function getCommandData +@abstract +Returns a pointer to the SCSI Command's controller data area +@discussion +This area is allocated for use by the IOSCSIController subclass (host adapter +driver). The client should not normally access this area. +*/ +void *getCommandData(); + + +/*! +@function setCDB +@abstract +Sets the CDB component of a SCSI Command. +@param scsiCDB +Pointer to a SCSICDBInfo structure. +*/ +void setCDB( SCSICDBInfo *scsiCmd ); + + +/*! +@function getCDB +@abstract +Gets the CDB component of a SCSI Command. +@param scsiCDB +Pointer to a SCSICDBInfo structure to receive the SCSI Command's cdb information. +*/ +void getCDB( SCSICDBInfo *scsiCmd ); + + +/*! +@function getResults +@abstract +Gets results from a completed SCSI Command. +@discussion +The getResults() function returns the value of the returnCode field of the command results. If +the client is only interested in a pass/fail indication for the command, the client +can pass (SCSIResult *)0 as a parameter. +@param results +Pointer to a SCSIResults structure to receive the SCSI Commands completion information. +*/ +IOReturn getResults( SCSIResults *results ); + +/*! +@function setResults +@abstract +Sets the results component of a SCSI Command. +@discussion +The setResults() function is used by the IOSCSIController subclass (host +adapter driver) return results for a SCSI Command about to be completed. +@param scsiResults Pointer to a SCSIResults structure containing +completion information for the SCSI Command. + +Completion information is copied into the command, so the caller may +release the SCSIResults structure provided when this function returns. +*/ +void setResults( SCSIResults *results ); + + +/*! +@function getDevice +@abstract +Returns the IOSCSIDevice this command is targeted to. +@param deviceType +The caller should use value kIOSCSIDeviceType. +@discussion +In some cases a IOSCSICommand is not associated with a specific target/lun. This +would be the case for a SCSI Bus Reset. In this case getDevice() returns 0. +*/ +IOSCSIDevice *getDevice( IOSCSIDevice *deviceType ); + + +/*! +@function getTargetLun +@abstract +Returns the target/lun for the IOSCSIDevice this command is associated with. +@param targetLun +Pointer to a SCSITargetLun structure to receive the target/lun information. +*/ +void getTargetLun( SCSITargetLun *targetLun ); + + +/*! +@function execute +@abstract +Submits a SCSI command to be executed. +@discussion +Once the execute() function is called, the client should not +invoke any further functions on the SCSI Command with the +exception of abort(). + +The execute() function optionally returns sets a unique sequence +number token for the command. If the client intends to use the abort() +method they must retain this sequence number token. +@param sequenceNumber +Pointer to field (UInt32) to receive the sequence number assigned to the SCSI +Command. +*/ +bool execute( UInt32 *sequenceNumber = 0 ); + +/*! +@function abort +@abstract +Aborts an executing SCSI Command. +@discussion +The client may invoke the abort() method to force the completion of an +executing SCSI Command. The client must pass the sequence number +provided when the execute() function was invoked. + +Note: The abort function provides no status on whether or not a +command has been successfully aborted. The client should wait for the +command to actually complete to determine whether the abort completed +successfully. +@param sequenceNumber +The client must pass the sequence number assigned to the command when +the client called the execute() function. +*/ +void abort( UInt32 sequenceNumber ); + +/*! +@function complete +@abstract +Indicates the IOSCSIController subclass (host adapter driver) has completed a SCSI command. +@discussion +Once the complete() function is called, the controller +subclass should make no further accesses to the IOSCSICommand +being completed. + +A IOSCSIDevice client would not normally call this function. +*/ +void complete(); + + +/*! +@function getSequenceNumber +@abstract +Returns the sequence number assigned to an executing command. +@discussion +The caller should check the sequence number for 0. This indicates that +the command has completed or has not been processed to the point where +a sequence number has been assigned. +*/ +UInt32 getSequenceNumber(); + + +/*! +@function setQueueInfo +@abstract +Sets queuing information for the SCSI Command. +@discussion +Each IOSCSIDevice has two queues, a normal Q and a bypass Q. The treatment of the +queues is esentially identical except that the bypass Q is given preference whenever +it has commands available. + +Usually, the client will use the normal Q for regular I/O commands and the bypass Q +to send error recovery commands to the device. +@param queueType +Set to kQTypeNormalQ or kQTypeBypassQ to indicate which IOSCSIDevice queue the +SCSI Command should be routed to. +@param queuePosition +Set to kQPositionTail or kQPositionHead to indicate whether the SCSI Command should +be added to the head to tail for the selected IOSCSIDevice queue. +*/ +void setQueueInfo( UInt32 queueType = kQTypeNormalQ, UInt32 queuePosition = kQPositionTail ); + + +/*! +@function getQueueInfo +@abstract +Gets queuing information for the SCSI Command. +@param queueType +Pointer to a field (UInt32) to receive the queue type previously set for this SCSI Command. +@param queuePosition +Pointer to a field (UInt32) to receive the queue position previously set for this SCSI Command. +*/ +void getQueueInfo( UInt32 *queueType, UInt32 *queuePosition = 0 ); + + +/*! +@function getCmdType +@abstract +Obtains the underlying 'intent' of a SCSI Command. +@discussion +This function provides information on the intent of a SCSI +Command. For example, since Aborts, Request Sense and normal Execute commands are +all sent to the executeCommand() function, invoking getCmdType() +will indicate whether a Request Sense, Abort or Normal I/O request is +being processed. + +It this information is not normally meaningful to IOSCSIDevice clients. +*/ +UInt32 getCmdType(); + + +/*! +@function getOriginalCmd +@abstract +Obtains a 'related' SCSI Command. +@discussion +In cases where a SCSI command is related to a previous command, this +function will return the original command. For example, if a +Request Sense command (CmdType = kSCSICommandReqSense)is processed, +then this function can be used to obtain the original command that +caused the check condition. If an Abort command (CmdType = +kSCSICommandAbort) then this function can be used to obtain the original +command the abort was issued against. + + +It this information is not normally meaningful to IOSCSIDevice clients. +*/ +IOSCSICommand *getOriginalCmd(); + +}; diff --git a/iokit/IOKit/scsi/IOSCSIController_Reference.h b/iokit/IOKit/scsi/IOSCSIController_Reference.h new file mode 100644 index 000000000..e8db8729a --- /dev/null +++ b/iokit/IOKit/scsi/IOSCSIController_Reference.h @@ -0,0 +1,475 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/*! +@header IOSCSIController_Reference.h + +This header defines the IOSCSIController class. + +IOSCSIController provides the superclass for SCSI host +adapter drivers. + +Drivers are instantiated based on their 'personality' entry matching +their adapter's OpenFirmware device tree entry. When a match occurs, +the driver's class is instantiated. Since the driver is written as a +subclass of IOSCSIController, an instance of the SCSI Family is automatically +instantiated. +*/ + + +/*! +@typedef SCSIControllerInfo +Parameter structure passed for configure() function. +@field initiatorId +The SCSI address of your host adapter. Usually 7 (decimal). +@field maxTargetsSupported +The number of targets you controller supports. Typically 8 or 16. +@field maxLunsSupported +The number of logical units per target your controller supports. +Typically 8. +@field minTransferPeriodpS +The minimum synchronous data transfer period in picoseconds your +controller supports. +@field maxTransferOffset +The maximum synchronous data offset your controller supports in bytes. +@field maxTransferWidth +The maximum data SCSI bus width your controller supports in bytes. Must +be a power of 2. +@field maxCommandsPerController +The maximum number of outstanding commands your controller supports +across all targets and luns. Set to 0 if there is no controller limit in +this category. +@field maxCommandsPerTarget +The maximum number of outstanding commands your controller supports on a +given target. Set to 0 if there is no controller limit in this category. +@field maxCommandsPerLun +The maximum number of outstanding commands your controller supports on a +given lun. Set to 0 if there is no controller limit in this category. +@field tagAllocationMethod +Controls whether tags are allocated on a per Lun, per Target or per +Controller basis. See enum SCSITagAllocation. +@field maxTags +The maximum number of tags allocated to each Lun, Target or Controller +depending on the tagAllocationMethod setting. +@field targetPrivateDataSize +IOSCSIController will optionally allocate per-target storage for your +driver based on the setting of this field. The amount of storage needed +is specified in bytes. +@field lunPrivateDataSize +IOSCSIController will optionally allocate per-lun storage for your +driver based on the setting of this field. The amount of storage needed +is specified in bytes. +@field commandPrivateDataSize +IOSCSIController will optionally allocate per-command storage for your +driver based on the setting of this field. The amount of storage needed +is specified in bytes. + +Note: The amount of per-command storage allowed is under review. We +anticipate that typical SCSI controllers will need not more than 1024 +bytes per command. +@field disableCancelCommands +Subclasses of IOSCSIController which do their own management of +aborts/resets can set this field to true to avoid receiving +cancelCommand() requests. +*/ +typedef struct SCSIControllerInfo { + UInt32 initiatorId; + + UInt32 maxTargetsSupported; + UInt32 maxLunsSupported; + + UInt32 minTransferPeriodpS; + UInt32 maxTransferOffset; + UInt32 maxTransferWidth; + + UInt32 maxCommandsPerController; + UInt32 maxCommandsPerTarget; + UInt32 maxCommandsPerLun; + + UInt32 tagAllocationMethod; + UInt32 maxTags; + + UInt32 targetPrivateDataSize; + UInt32 lunPrivateDataSize; + UInt32 commandPrivateDataSize; + + bool disableCancelCommands; + + UInt32 reserved[64]; + +} SCSIControllerInfo; + + +/*! +@enum SCSITagAllocation +@discussion +This enum defines how SCSI tags are allocated. +@constant kTagAllocationNone +This controller does not support tag queuing. +@constant kTagAllocationPerLun +Each SCSI Lun has its own private tag pool containing +(maxTags) SCSI tags. +@constant kTagAllocationPerTarget +Each SCSI Target has its own private tag pool contain +(maxTags) SCSI tags. Luns connected to this target +allocate tags from this pool. +@constant kTagAllocationPerController +The controller has a global tag pool containing (maxTags) +SCSI tags. This pool is shared by all Luns connected to +this controller. +*/ +enum { + kTagAllocationNone = 0, + kTagAllocationPerLun, + kTagAllocationPerTarget, + kTagAllocationPerController, +}; + + +/*! +@class IOSCSIController : public IOService +@abstract +Superclass for SCSI host adapter drivers +@discussion +The IOSCSIController class provides a number of services to simplify +writing a driver for your host adapter. + +Specifically, the class provides the following features: + +1. Complete request scheduling semantics. + +The IOSCSIController class manages request queues on behalf of its +subclasses. It tracks all requests submitted to its subclasses, +including managing timeouts, aborts and request cancellations. + +2. Request Sense scheduling + +Subclasses of IOSCSIController do not need to implement +auto-request-sense functionality. Your driver can use the default +handling in the super class. + +3. Storage management. + +The IOSCSIController subclass provides per-request private storage areas +for your subclass. + +4. Resource management. + +The IOSCSIController subclass will manage the number of outstanding +commands submitted to your subclass on a per-controller and per-lun +basis. +*/ +@class IOSCSIController : public IOService +{ +public: + + +/*! +@function configure +@abstract +Driver configuration/initialization request. +@discussion +The configure() member function is the first call your subclass will +receive. You should provide the information requested in the +SCSIControllerInfo structure and enable your hardware for operation. +If your driver initialized successfully, you should return true, otherwise, +your driver should return false. +@param provider +Pointer to an object (usually IOPCIDevice) which represents the bus of +your device is attached to . Typically your driver will use functions +supplied by this object to access PCI space on your hardware. See +IOPCIDevice for a description of PCI services. +@param controllerInfo +Pointer to a SCSIControllerInfo structure. Your driver should provide +the information requested in this structure prior to returning from +the configure() call. +*/ +bool configure( IOService *provider, SCSIControllerInfo *controllerInfo ); + + +/*! +@function executeCommand +@abstract +Execute a IOSCSICommand. +@discussion +The executeCommand() function is called for all 'routine' I/O requests +including abort requests. The driver is passed a pointer to an +IOSCSICommand object. The driver obtains information about the I/O +request by using function calls provided by the IOSCSICommand +class. +@param scsiCommand +Pointer to a IOSCSICommand. See IOSCSICommand for more information. +*/ +void executeCommand( IOSCSICommand *scsiCommand ); + + +/*! +@function cancelCommand +@abstract +Cancels a IOSCSICommand previously submitted to the driver. +@discussion +The cancelCommand() function is called to inform your subclass to force +completion of a SCSI command. + +Your subclass should call the getOriginalCmd() to determine the command +to complete. + +After calling complete() on the original command, you should complete +the IOSCSICommand passed to the cancelCommand() function + +Note: When a cancelCommand is issued, your subclass may presume that any +activity to remove an active command from the SCSI Target, i.e. (abort +tag/abort) has already occurred. +@param scsiCommand +Pointer to a IOSCSICommand. See IOSCSICommand for more information. +*/ +void cancelCommand( IOSCSICommand *scsiCommand ); + + +/*! +@function resetCommand +@abstract +Request the driver issue a SCSI Bus reset. +@discussion +The resetCommand() function indicates you should do a SCSI Bus Reset. +After issuing the reset you should complete to IOSCSICommand passed. + +Note: After you report the IOSCSICommand Reset complete, you will +receive cancelCommand() requests for all outstanding commands. +@param scsiCommand +Pointer to a IOSCSICommand. See IOSCSICommand for more information. +*/ +void resetCommand( IOSCSICommand *scsiCommand ); + + +/*! +@function resetOccurred +@abstract +Inform the IOSCSIController class of an unsolicited SCSI Bus reset. +@discussion +Your subclass should call this function if +you detect a target initiated bus reset, or need to do an unplanned SCSI +Bus Reset as part of adapter error recovery. + +Note: After you call the resetOccurred() function, you will receive +cancelCommand() requests for all outstanding IOSCSICommand(s). +*/ +void resetOccurred(); + +/*! +@function rescheduleCommand +@abstract +Return a IOSCSICommand for rescheduling. +@discussion +If your subclass function cannot start processing an otherwise +acceptable IOSCSICommand due to resource constraints, i.e. MailBox full, +lost SCSI Bus arbitration, you may have the IOSCSICommand rescheduled by +calling rescheduleCommand(). A IOSCSICommand passed to this function +should be treated as 'complete', i.e. you should make no further +accesses to it. + +Note: If you cannot process further commands, you should call the +disableCommands() function to prevent receiving additional commands +until you are ready to accept them. +@param scsiCommand +Pointer to IOSCSICommand your driver needs to reschedule. +*/ +void rescheduleCommand( IOSCSICommand *scsiCommand ); + + +/*! +@function disableCommands +@abstract +Suspend sending I/O commands to your driver. +@discussion +In cases where your executeCommand() member function cannot accept +commands, you may disable further calls by invoking disableCommands(). +Use enableCommands() to resume receiving commands. + +Note: The resetCommand() and cancelCommands() entry points are not +affected by the use of this function. + +Note: The default timeout for disableCommands() is 5s. If this timeout +is exceeded the IOSCSIController class will call your driver's +disableTimeoutOccurred() function. The default action of this function +is to issue a SCSI Bus Reset by calling your driver's resetCommand() +function. +@param timeoutmS +Your driver may override the default timeout +by specifying a timeout value in milliseconds. +*/ +void disableCommands( UInt32 timeoutmS ); + + +/*! +@function enableCommands +@abstract +Resume sending I/O commands to your driver. +@discussion +Resumes sending I/O commands to your driver that were previously suspended +by calling disableCommands(). +*/ +void enableCommands(); + +/*! +@function disableTimeoutOccurred +@abstract +Indicates your driver has suspended commands too long. +@discussion +The IOSCSIController superclass will timeout disableCommand() requests +to preclude the possibility of a hung SCSI bus. If a timeout occurs, +then disableTimeoutOccurred() will be called. The default action of this +routine is to do a SCSI Bus Reset by calling resetCommand(). Your +subclass may choose to modify the default behavior of this routine to do +additional adapter specific error recovery. +*/ +void disableTimeoutOccurred(); + + +/*! +@function findCommandWithNexus +@abstract +Locate an active IOSCSICommand using target/lun/tag values. +@discussion +Your subclass can use this function to search for an active +IOSCSICommand by providing the target/lun/tag values for the command. In +the case of a non-tagged command the second parameter must either be +omitted or set to -1. + +An unsuccessful search will return 0. +@param targetLun +Structure of type SCSITargetLun, initialized to the target/lun value you +wish to search for. +@param tagValue +Optional tag value you wish to search for. +*/ +IOSCSICommand *findCommandWithNexus( SCSITargetLun targetLun, UInt32 tagValue = (UInt32) -1 ); + +/*! +@function allocateTarget +@abstract +Notifies driver of allocation of per-Target resources. +@discussion +Your driver will be called at its allocateTarget() function when a target is about +to be probed. The your driver should initialize its per-target data at this time. +If the subclass wishes to prevent probing of this target, it should return false +as the result of this function call. + +This is an optional function. Your driver is not required to implement it. +@param targetLun +SCSITargetLun structure containing the SCSI Id of the target that is about to be +allocated. +*/ +bool allocateTarget( SCSITargetLun targetLun ); + + +/*! +@function deallocateTarget +@abstract +Notifies driver that target resources will be deallocated. +@discussion +Your driver will be called at its deallocateTarget() function when a target is about +deallocated. The your driver must insure that there will be no further access to +the per-target data allocated to this target. + +This is an optional function. Your driver is not required to implement it. +@param targetLun +SCSITargetLun structure containing the SCSI Id of the target that is about to be +deallocated. +*/ +bool deallocateTarget( SCSITargetLun targetLun ); + + +/*! +@function allocateLun +@abstract +Notifies driver of allocation of per-Lun resources. +@discussion +Your driver will be called at its allocateLun() function when a Lun is about +to be probed. The your driver should initialize its per-lun data at this time. +If the subclass wishes to prevent probing of this lun, it should return false +as the result of this function call. + +This is an optional function. Your driver is not required to implement it. +@param targetLun +SCSITargetLun structure containing the SCSI Id of the target/lun that is about to be +allocated. +*/ +bool allocateLun( SCSITargetLun targetLun ); + + +/*! +@function deallocateLun +@abstract +Notifies driver of deallocation of per-Lun resources. +@discussion +Your driver will be called at its deallocateLun() function when a Lun is about +deallocated. The your driver must insure that there will be no further access to +the per-lun data allocated to this lun. + +This is an optional function. Your driver is not required to implement it. +@param targetLun +SCSITargetLun structure containing the SCSI Id of the target/lun that is about to be +deallocated. +*/ +bool allocateLun( SCSITargetLun targetLun ); + + +/*! +@function getTargetData +@abstract +Obtains a pointer to per-Target data allocated by IOSCSIController. +@discussion +This function returns a pointer to per-Target workarea allocated for +your driver's use. The size of this area must be specified in the +during the configure() function. See struct SCSIControllerInfo, +field targetDataSize. +@param targetLun +SCSITargetLun structure containing the SCSI Id of the target who's +workarea you are requesting a pointer to. +*/ +void *getTargetData( SCSITargetLun targetLun ); + + +/*! +@function getLunData +@abstract +Obtains a pointer to per-Lun data allocated by IOSCSIController. +@discussion +This function returns a pointer to per-Lun workarea allocated for +your driver's use. The size of this area must be specified +during the configure() function. See struct SCSIControllerInfo, +field lunDataSize. +*/ +void *getLunData( SCSITargetLun targetLun ); + + +/*! +@function getWorkLoop +@abstract +Returns the IOWorkLoop object that services your driver. +*/ +IOWorkloop *getWorkLoop(); + + +} diff --git a/iokit/IOKit/scsi/IOSCSIDeviceInterface.h b/iokit/IOKit/scsi/IOSCSIDeviceInterface.h new file mode 100644 index 000000000..e1533679f --- /dev/null +++ b/iokit/IOKit/scsi/IOSCSIDeviceInterface.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSIDeviceInterface.h + * + */ +#ifndef _IOSCSIDEVICEINTERFACE_H +#define _IOSCSIDEVICEINTERFACE_H + +#include + +#include +#include +#include +#include +#include + +#endif diff --git a/iokit/IOKit/scsi/IOSCSIDevice_Reference.h b/iokit/IOKit/scsi/IOSCSIDevice_Reference.h new file mode 100644 index 000000000..e1436ea0d --- /dev/null +++ b/iokit/IOKit/scsi/IOSCSIDevice_Reference.h @@ -0,0 +1,401 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/*! +@header IOSCSIDevice_Reference.h + +This header defines the IOSCSIDevice class. + +The SCSI framework creates instances of this class to +represent each valid SCSI device (target/lun) detected during +SCSI bus scanning. When an instance of this class is registered with +IOKit, the instance will be presented to clients which +'match' the IOSCSIDevice class. +*/ + +/*! +@typedef SCSITargetParms +Parameter structure for get/setTargetParms +@field transferPeriodpS +Minimum SCSI synchronous transfer period allowed +for this target in picoseconds (10E-12). For asynchronous data transfer, +set this field to 0. +@field transferOffset +Maximum SCSI synchronous transfer offset allowed for this target in +bytes. For asynchronous data transfer, set this field to 0. +@field transferWidth +Maximum SCSI bus width in bytes. Note: must be a +power of 2. +@field enableTagQueuing +Setting enableTagQueuing to true enables tag queuing for SCSI Commands +issued to the target. +@field disableParity +Set to (true) to disable parity checking on the +SCSI bus for this target. +*/ +typedef struct SCSITargetParms { + UInt32 transferPeriodpS; + UInt32 transferOffset; + UInt32 transferWidth; + + bool enableTagQueuing; + bool disableParity; + + UInt32 reserved[16]; + +} SCSITargetParms; + + +/*! +@typedef SCSILunParms +Parameter structure for get/setLunParms +@field disableDisconnect +Setting disableDisconnect to true disables SCSI disconnect for SCSI +Commands issued to the target/lun pair. +*/ +typedef struct SCSILunParms { + bool disableDisconnect; + + UInt32 reserved[16]; + +} SCSILunParms; + + +/*! +@enum SCSIClientMessage +@discussion +IOSCSIDevice notifies its client of significant 'events' by the IOService::message() +api. When possible the client is notified of the event prior to any action taken. The +client is responsible for managing the device queue for the IOSCSIDevice +via the holdQueue(), releaseQueue(), flushQueue() and notifyIdle() api's. The client is also +notified at the end of an 'event' by the corresponding message id with or'd with +kClientMsgDone. +@constant kClientMsgDeviceAbort +A client initiated device abort is beginning. +@constant kClientMsgDeviceReset +A client initiated device reset is beginning. +@constant kClientMsgBusReset +An unsolicited bus reset has occurred. +@constant kClientMsgDone +This constant is or'd with one of the above message ids to indicate the +client should complete processing of the corresponding event. +*/ +enum SCSIClientMessage { + kClientMsgDeviceAbort = 0x00005000, + kClientMsgDeviceReset, + kClientMsgBusReset, + + kClientMsgDone = 0x80000000, +}; + + +/*! +@class IOSCSIDevice : public IOCDBDevice +@abstract +Class that describes a SCSI device (target/lun pair). +@discussion +The IOSCSIDevice class provides basic services +to initialize and supervise a SCSI device. Once the device is +initialized, the client will normally use the allocCommand() member +function to create IOSCSICommand(s) to send SCSI CDBs to the target/lun. +*/ +class IOSCSIDevice : public IOCDBDevice +{ +public: + +/*! +@function allocCommand +@abstract +Allocates a IOSCSICommand object for this device. +@discussion +The client uses the allocCommand() member function to allocate IOSCSICommand(s) +for a IOSCSIDevice. The client then uses the member functions of +the IOSCSICommand to initialize it and send it to the device. A completed IOSCSICommand +may be reused for subsequent I/O requests or returned to the SCSI Family. +@param scsiDevice +Always specify kIOSCSIDevice. +@param clientDataSize +The client may indicate the size of a per-command data area for its own +use. + +Note: The amount of per-command storage allowed is under review. We +anticipate that typical SCSI clients will need not more than 1024 bytes +per command. +*/ +IOSCSICommand *allocCommand( IOSCSIDevice *scsiDevice, UInt32 clientDataSize = 0 ); + + +/*! +@function setTargetParms +@abstract +Sets SCSI parameters that apply to all luns on a SCSI target device. +@discussion +This function will block until we attempt to set the +requested parameters. It may not be called from the device's workloop context. + +The SCSI Family will serialize accesses to the SCSI +target so as not to disrupt commands in progress prior to processing a +change of target parameters. +@param targetParms +Pointer to structure of type SCSITargetParms. +*/ +bool setTargetParms( SCSITargetParms *targetParms ); + + +/*! +@function getTargetParms +@abstract +Gets the current target parameters. +@discussion +Returns the parameters currently in effect for the SCSI target. +See setTargetParms(). +@param targetParms +Pointer to structure of type SCSITargetParms. +*/ +void getTargetParms( SCSITargetParms *targetParms ); + + +/*! +@function setLunParms +@abstract +Sets the logical unit parameters for this device. +@discussion +This function will block until we attempt to set the +requested parameters. It may not be called from the device's workloop context. + +The SCSI Family will serialize accesses to the SCSI +target/lun so as not to disrupt commands in progress prior to processing a +change of lun parameters. +@param lunParms +Pointer to structure of type SCSILunParms +*/ +bool setLunParms( SCSILunParms *lunParms ); + + +/*! +@function getLunParms +@abstract +Gets the current logical unit parameters. +@discussion +Returns the parameters currently in effect for the SCSI target/lun. +@param lunParms +Pointer to structure of type SCSITargetParms +*/ +void getLunParms( SCSILunParms *lunParms ); + + +/*! +@function abort +@abstract +Aborts all outstanding requests for the target/lun pair. +@discussion +If any I/O requests are currently active for the target/lun, an abort +command is sent to the device and any active requests are completed. + +Prior to abort processing beginning, the client will be notified via: + +message( kClientMsgDeviceAbort ); + +When abort processing is completed, the client will be notified via: + +message( kClientMsgDeviceAbort | kClientMsgDone ); + +The client is responsible for managing the pending work queue for +the device when an abort request occurs. See holdQueue(), flushQueue(), +notifyIdle() functions. +*/ +void abort(); + + +/*! +@function reset +@abstract +Resets the SCSI target. +@discussion +Since a SCSI target may have multiple logical units (lun(s)) the +reset() function may affect multiple IOSCSIDevice instances. Processing for +each lun is similar. + +Prior to reset processing beginning, the client will be notified via: + +message( kClientMsgDeviceReset ); + +When reset processing is completed, the client will be notified via: + +message( kClientMsgDeviceReset | kClientMsgDone ); + +The client is responsible for managing the pending work queue for +the device when an abort request occurs. See holdQueue(), flushQueue(), +notifyIdle() functions. +*/ +void reset(); + + +/*! +@function getInquiryData +@abstract Returns SCSI Inquiry data for the IOSCSIDevice. +@discussion +Inquiry data returned is from the results of the last SCSI bus probe. +@param inquiryBuffer +Pointer to a buffer to receive the Inquiry data. +@param inquiryBufSize +Size of the buffer supplied. +@param inquiryDataSize +Pointer to a UInt32 to receive the size of the Inquiry data actually +returned. +*/ +void getInquiryData( void *inquiryBuffer, UInt32 inquiryBufSize, UInt32 *inquiryDataSize ); + + +/*! +@function message +@abstract +IOService message function. +@discussion +IOSCSIDevice notifies its client of significant 'events' by the IOService::message() +api. When possible the client is notified of the event prior to any action taken. The +client is responsible for managing the device queue for the IOSCSIDevice +via the holdQueue(), releaseQueue(), flushQueue() and notifyIdle() api's. + +Any return codes provided by the client are ignored. +@param message-id +Message id's for IOSCSIDevice are defined by enum SCSIClientMessage +@param provider +Pointer to the IOSCSIDevice reporting the event. +@param argument +Unused. +*/ +IOReturn message( UInt32 type, IOService * provider, void * argument = 0 ); + + +/*! +@function open +@abstract +IOService open function +@discussion +A client should open a IOSCSIDevice prior to accessing it. Only one open is allowed +per device. +@param client +Pointer to the IOSCSI device the client is opening. +@param options +There are currently no options defined by the SCSI Family. +@param arg +Unused. Omit or specify 0. +*/ +bool open( IOService *client, IOOptionBits options = 0, void *arg = 0 ); + + +/*! +@function close +@abstract +IOService close function +@discussion +A client must close a IOSCSIDevice if the client plans no further accesses to it. +@param client +Pointer to the IOSCSI device the client is closing. +@param options +There are currently no options defined by the SCSI Family. +*/ +void close( IOService *client, IOOptionBits options = 0 ); + + +/*! +@function holdQueue +@abstract +Suspends sending additional IOSCSICommands to the target/lun. +@discussion +holdQueue() may only be called from the IOSCSIDevice workloop. The client +is guaranteed to be running in this context during a message() notification. + +holdQueue() has no effect on commands already passed to the host adapter. One +or more commands may complete after the queue is held. See notifyIdle() +@param queueType +Perform action on the indicated queue. See enum SCSIQueueType in IOSCSICommand. +*/ +holdQueue( UInt32 queueType ); + + +/*! +@function flushQueue +@abstract +Returns any commands on the IOSCSIDevice's pending work queue. +@discussion +flushQueue() may only be called from the IOSCSIDevice workloop. This is +guaranteed to be the case after a IOSCSICommand completion of after a +message() notification. + +All pending command are completed prior to flushQueue() returning to the caller. + +flushQueue() has no effect on commands already passed to the host adapter. One +or more commands may complete after the queue is flushed. See notifyIdle(). +@param queueType +Perform action on the indicated queue. See enum SCSIQueueType in IOSCSICommand. +@param rc +The return code of any flushed commands is set to (rc). +*/ +void flushQueue( UInt32 queueType, IOReturn rc ); + + +/*! +@function notifyIdle +@abstract +Notifies the client when all active commands on a SCSI device have completed. +@discussion +notifyIdle() may only be called from the IOSCSIDevice workloop. This is guaranteed +to be the case after a IOSCSICommand completion of after a message() notification. + +Only one notifyIdle() call may be active. Any outstanding notifyIdle() calls may +be cancelled by calling notifyIdle() with no parameters. +@param target +Object to receive the notification. Normally the client's (this) pointer. +@param callback +Pointer to a callback routine of type CallbackFn. +@param refcon +Pointer to client's private data. +*/ +void notifyIdle( void *target, Callback callback, void *refcon ); + + +/*! +@function releaseQueue +@abstract +Resumes sending IOSCSICommands to the IOSCSIDevice. +@discussion +If the device queue was not held, releaseQueue() has no effect. + +releaseQueue() may only be called from the IOSCSIDevice workloop. This is guaranteed +to be the case after a IOSCSICommand completion of after a message() notification. +@param queueType +Perform action on the indicated queue. See enum SCSIQueueType in IOSCSICommand. +*/ +void releaseQueue( UInt32 queueType ); + + +/*! +@function getWorkLoop +@abstract +Returns the IOWorkLoop object that services this IOSCSIDevice. +*/ +IOWorkloop *getWorkLoop(); + +} diff --git a/iokit/IOKit/scsi/IOSCSIParallelInterface.h b/iokit/IOKit/scsi/IOSCSIParallelInterface.h new file mode 100644 index 000000000..4458f2979 --- /dev/null +++ b/iokit/IOKit/scsi/IOSCSIParallelInterface.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSIParallelInterface.h + * + */ +#ifndef _IOSCSIPARALLELINTERFACE_H +#define _IOSCSIPARALLELINTERFACE_H + +#include + +#include +#include +#include +#include +#include + +#endif diff --git a/iokit/IOKit/scsi/Makefile b/iokit/IOKit/scsi/Makefile new file mode 100644 index 000000000..d5165ee3f --- /dev/null +++ b/iokit/IOKit/scsi/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = scsi +NOT_EXPORT_HEADERS = IOSCSIDevice_Reference.h IOSCSICommand_Reference.h IOSCSIController_Reference.h + +INSTINC_SUBDIRS = scsi-device scsi-parallel +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/scsi/scsi-device/IOSCSICommand.h b/iokit/IOKit/scsi/scsi-device/IOSCSICommand.h new file mode 100644 index 000000000..af8932c45 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-device/IOSCSICommand.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSICommand.h + * + */ +#ifndef _IOSCSICOMMAND_H +#define _IOSCSICOMMAND_H + +class IOSCSIDevice; +class IOSCSICommand; + +class IOSCSICommand : public IOCDBCommand +{ + OSDeclareAbstractStructors(IOSCSICommand) + +/*------------------Methods provided to IOCDBCommand users -------------------------*/ +public: + /* + * Set/Get IOMemoryDescriptor object to I/O data buffer or sense data buffer. + */ + virtual void setPointers( IOMemoryDescriptor *desc, + UInt32 transferCount, + bool isWrite, + bool isSense = false ) = 0; + + virtual void getPointers( IOMemoryDescriptor **desc, + UInt32 *transferCount, + bool *isWrite, + bool isSense = false ) = 0; + /* + * Set/Get command timeout (mS) + */ + virtual void setTimeout( UInt32 timeoutmS ) = 0; + virtual UInt32 getTimeout() = 0; + + /* + * Set async callback routine. Specifying no parameters indicates synchronous call. + */ + virtual void setCallback( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ) = 0; + + /* + * Set/Get CDB information. (Generic CDB version) + */ + virtual void setCDB( CDBInfo *cdbInfo ) = 0; + virtual void getCDB( CDBInfo *cdbInfo ) = 0; + + /* + * Get CDB results. (Generic CDB version) + */ + virtual IOReturn getResults( CDBResults *cdbResults ) = 0; + + /* + * Get CDB Device this command is directed to. + */ + virtual IOCDBDevice *getDevice( IOCDBDevice *deviceType ) = 0; + + /* + * Command verbs + */ + virtual bool execute( UInt32 *sequenceNumber = 0 ) = 0; + virtual void abort( UInt32 sequenceNumber ) = 0; + virtual void complete() = 0; + + /* + * Get pointers to client and command data. + */ + virtual void *getCommandData() = 0; + virtual void *getClientData() = 0; + + /* + * Get unique sequence number assigned to command. + */ + virtual UInt32 getSequenceNumber() = 0; + +/*------------------ Additional methods provided to IOSCSICommand users -------------------------*/ +public: + /* + * Set/Get CDB information. (SCSI specific version). + */ + virtual void setCDB( SCSICDBInfo *scsiCmd ) = 0; + virtual void getCDB( SCSICDBInfo *scsiCmd ) = 0; + + /* + * Get/Set CDB results. (SCSI specific version). + */ + virtual IOReturn getResults( SCSIResults *results ) = 0; + virtual void setResults( SCSIResults *results ) = 0; + + /* + * Get SCSI Device this command is directed to. + */ + virtual IOSCSIDevice *getDevice( IOSCSIDevice *deviceType ) = 0; + + /* + * Get SCSI Target/Lun for this command. + */ + virtual void getTargetLun( SCSITargetLun *targetLun ) = 0; + + /* + * Get/Set queue routing for this command. + */ + virtual void setQueueInfo( UInt32 forQueueType = kQTypeNormalQ, UInt32 forQueuePosition = kQPositionTail ) = 0; + virtual void getQueueInfo( UInt32 *forQueueType, UInt32 *forQueuePosition = 0 ) = 0; + + /* + * Set to blank state, call prior to re-use of this object. + */ + virtual void zeroCommand() = 0; +}; + +#endif diff --git a/iokit/IOKit/scsi/scsi-device/IOSCSIDevice.h b/iokit/IOKit/scsi/scsi-device/IOSCSIDevice.h new file mode 100644 index 000000000..f542f8915 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-device/IOSCSIDevice.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSIDevice.h + * + * + * Methods in this header provide information about the SCSI device + * the device client driver is submitting the SCSICommand(s) to. + * + * Note: SCSICommand(s) are allocated and freed by methods in this class. + * The remaining methods to setup and submit SCSICommands are defined in + * IOSCSICommand.h + */ + +#ifndef _IOSCSIDEVICE_H +#define _IOSCSIDEVICE_H + +class IOSCSICommand; + +class IOSCSIDevice : public IOCDBDevice +{ + OSDeclareAbstractStructors(IOSCSIDevice) + +/*------------------Methods provided to IOCDBDevice clients-----------------------*/ +public: + + /* + * Allocate a CDB Command + */ + virtual IOCDBCommand *allocCommand( IOCDBDevice *cdbDevice, UInt32 clientDataSize = 0 ) = 0; + + /* + * Abort all outstanding commands on this device + */ + virtual void abort() = 0; + + /* + * Reset device (also aborts all outstanding commands) + */ + virtual void reset() = 0; + + /* + * Obtain information about this device + */ + virtual void getInquiryData( void *inquiryBuffer, + UInt32 inquiryBufSize, + UInt32 *inquiryDataSize ) = 0; + +/*------------------Additional methods provided to IOSCSIDevice clients-----------------------*/ +public: + /* + * Allocate a SCSICommand + */ + virtual IOSCSICommand *allocCommand( IOSCSIDevice *scsiDevice, UInt32 clientDataSize = 0 ) = 0; + + /* + * Target management commands + */ + virtual bool setTargetParms( SCSITargetParms *targetParms ) = 0; + virtual void getTargetParms( SCSITargetParms *targetParms ) = 0; + + /* + * Lun management commands + */ + virtual bool setLunParms( SCSILunParms *lunParms ) = 0; + virtual void getLunParms( SCSILunParms *lunParms ) = 0; + + /* + * Queue management commands + */ + virtual void holdQueue( UInt32 queueType ) = 0; + virtual void releaseQueue( UInt32 queueType ) = 0; + virtual void flushQueue( UInt32 queueType, IOReturn rc ) = 0; + virtual void notifyIdle( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ) = 0; + +}; + +#define kIOSCSIDevice ((IOSCSIDevice *)0) + +#endif diff --git a/iokit/IOKit/scsi/scsi-device/Makefile b/iokit/IOKit/scsi/scsi-device/Makefile new file mode 100644 index 000000000..a2cad73cc --- /dev/null +++ b/iokit/IOKit/scsi/scsi-device/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = scsi/scsi-device +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = SCSICommand.h SCSIDevice.h SCSIPublic.h +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/scsi/scsi-device/SCSICommand.h b/iokit/IOKit/scsi/scsi-device/SCSICommand.h new file mode 100644 index 000000000..7b2867df4 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-device/SCSICommand.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * SCSICommand.h + * + */ + +#ifndef _SCSICOMMAND_H +#define _SCSICOMMAND_H + +typedef struct SCSICDBInfo +{ + + UInt32 cdbFlags; + + UInt32 cdbTagMsg; + UInt32 cdbTag; + + UInt32 cdbAbortMsg; + + UInt32 cdbLength; + UInt8 cdb[16]; + + UInt32 reserved[16]; +} SCSICDBInfo; + + +enum SCSICDBFlags +{ + kCDBFNoDisconnect = 0x00000001, + +/* + * Note: These flags are for IOSCSIController subclasses only + */ + kCDBFlagsNegotiatePPR = 0x04000000, + kCDBFlagsDisableParity = 0x08000000, + kCDBFlagsNoDisconnect = 0x10000000, + kCDBFlagsNegotiateSDTR = 0x20000000, + kCDBFlagsNegotiateWDTR = 0x40000000, +// = 0x80000000, // reserved +}; + +enum SCSIAdapterStatus +{ + kSCSIAdapterStatusSuccess = 0, + kSCSIAdapterStatusProtocolError, + kSCSIAdapterStatusSelectionTimeout, + kSCSIAdapterStatusMsgReject, + kSCSIAdapterStatusParityError, + kSCSIAdapterStatusOverrun, +}; + +typedef struct SCSIResults +{ + IOReturn returnCode; + + UInt32 bytesTransferred; + + enum SCSIAdapterStatus adapterStatus; + UInt8 scsiStatus; + + Boolean requestSenseDone; + UInt32 requestSenseLength; + + UInt32 reserved[16]; +} SCSIResults; + + +#endif diff --git a/iokit/IOKit/scsi/scsi-device/SCSIDevice.h b/iokit/IOKit/scsi/scsi-device/SCSIDevice.h new file mode 100644 index 000000000..c8e019d0a --- /dev/null +++ b/iokit/IOKit/scsi/scsi-device/SCSIDevice.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * SCSIDevice.h + * + */ + +#ifndef _SCSIDEVICE_H +#define _SCSIDEVICE_H + +#define kDefaultInquirySize 255 + +typedef struct SCSITargetLun +{ + UInt8 target; + UInt8 lun; + UInt8 reserved[2]; +} SCSITargetLun; + +typedef struct SCSILunParms +{ + Boolean disableDisconnect; + + UInt32 reserved[16]; +} SCSILunParms; + +typedef struct SCSITargetParms +{ + UInt32 transferPeriodpS; + UInt32 transferOffset; + UInt32 transferWidth; + UInt32 transferOptions; + + Boolean enableTagQueuing; + Boolean disableParity; + + UInt32 reserved[16]; +} SCSITargetParms; + +enum SCSITransferOptions +{ + kSCSITransferOptionClockDT = 0x00000001, + kSCSITransferOptionQAS = 0x00000100, + kSCSITransferOptionIUS = 0x00000200, + kSCSITransferOptionPPR = 0x00000400, +}; + +#define kSCSITransferOptionsSCSI3 (kSCSITransferOptionClockDT | kSCSITransferOptionQAS | kSCSITransferOptionIUS | kSCSITransferOptionPPR) + + +enum SCSIDeviceTimeouts +{ + kSCSITimerIntervalmS = 500, + kSCSIProbeTimeoutmS = 5000, + kSCSIResetIntervalmS = 3000, + kSCSIAbortTimeoutmS = 5000, + kSCSIReqSenseTimeoutmS = 5000, + kSCSIDisableTimeoutmS = 5000, +}; + +enum SCSIClientMessage +{ + kSCSIClientMsgNone = 0x00005000, + kSCSIClientMsgDeviceAbort, + kSCSIClientMsgDeviceReset, + kSCSIClientMsgBusReset, + + kSCSIClientMsgDone = 0x80000000, +}; + +enum SCSIQueueType +{ + kQTypeNormalQ = 0, + kQTypeBypassQ = 1, +}; + +enum SCSIQueuePosition +{ + kQPositionTail = 0, + kQPositionHead = 1, +}; + + +#define kSCSIMaxProperties 12 + +#define kSCSIPropertyTarget "SCSI Target" /* OSNumber */ +#define kSCSIPropertyLun "SCSI Lun" /* OSNumber */ +#define kSCSIPropertyIOUnit "IOUnit" /* OSNumber */ +#define kSCSIPropertyDeviceTypeID "SCSI Device Type" /* OSNumber */ +#define kSCSIPropertyRemovableMedia "SCSI Removable Media" /* OSBoolean */ +#define kSCSIPropertyVendorName "SCSI Vendor Name" /* OSString */ +#define kSCSIPropertyProductName "SCSI Product Name" /* OSString */ +#define kSCSIPropertyProductRevision "SCSI Product Revision" /* OSString */ +#define kSCSIPropertyTransferPeriod "SCSI Transfer Period" /* OSNumber */ +#define kSCSIPropertyTransferOffset "SCSI Transfer Offset" /* OSNumber */ +#define kSCSIPropertyTransferWidth "SCSI Transfer Width" /* OSNumber */ +#define kSCSIPropertyTransferOptions "SCSI Transfer Options" /* OSNumber */ +#define kSCSIPropertyCmdQueue "SCSI CmdQueue Enabled" /* OSNumber */ + +#endif diff --git a/iokit/IOKit/scsi/scsi-device/SCSIPublic.h b/iokit/IOKit/scsi/scsi-device/SCSIPublic.h new file mode 100644 index 000000000..7c60f5981 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-device/SCSIPublic.h @@ -0,0 +1,291 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * SCSIPublic.h + * + */ + +#ifndef _SCSIPUBLIC_H +#define _SCSIPUBLIC_H + +typedef struct _SCSIInquiry +{ + unsigned char devType; /* 0 Device type, */ + unsigned char devTypeMod; /* 1 Device type modifier */ + unsigned char version; /* 2 ISO/ECMA/ANSI version */ + unsigned char format; /* 3 Response data format */ + unsigned char length; /* 4 Additional Length */ + unsigned char reserved5; /* 5 Reserved */ + unsigned char reserved6; /* 6 Reserved */ + unsigned char flags; /* 7 Capability flags */ + unsigned char vendorName[8]; /* 8-15 Vendor-specific */ + unsigned char productName[16]; /* 16-31 Product id */ + unsigned char productRevision[4]; /* 32-35 Product revision */ + unsigned char vendorSpecific[20]; /* 36-55 Vendor stuff */ + unsigned char scsi3Options; /* 56 SCSI-3 options */ + unsigned char moreReserved[39]; /* 57-95 Reserved */ +} SCSIInquiry; + +/* + * These are device type qualifiers. We need them to distinguish between "unknown" + * and "missing" devices. + */ +enum +{ + kSCSIDevTypeQualifierConnected = 0x00, /* Exists and is connected */ + kSCSIDevTypeQualifierNotConnected = 0x20, /* Logical unit exists */ + kSCSIDevTypeQualifierReserved = 0x40, + kSCSIDevTypeQualifierMissing = 0x60, /* No such logical unit */ + kSCSIDevTypeQualifierVendorSpecific = 0x80, /* Non-standardized */ + kSCSIDevTypeQualifierMask = 0xE0, +}; + +enum +{ + kSCSIDevTypeModRemovable = 0x80, /* Device has removable media */ +}; + +enum _SCSIDevFlags +{ + kSCSIDevCapRelAdr = 0x80, + kSCSIDevCapWBus32 = 0x40, + kSCSIDevCapWBus16 = 0x20, + kSCSIDevCapSync = 0x10, + kSCSIDevCapLinked = 0x08, + kSCSIDevCapCmdQue = 0x02, + kSCSIDevCapSftRe = 0x01, +}; + +typedef struct _SCSISenseData +{ + unsigned char errorCode; /* 0 Result validity */ + unsigned char segmentNumber; /* 1 Segment number */ + unsigned char senseKey; /* 2 Sense code, flags */ + unsigned char info[4]; /* 3-6 Sense-key specific */ + unsigned char additionalSenseLength; /* 7 Sense length info */ + unsigned char reservedForCopy[4]; /* 8-11 Sense-key specific */ + unsigned char additionalSenseCode; /* 12 What kind of error */ + unsigned char additionalSenseQualifier; /* 13 More error info */ + unsigned char fruCode; /* 14 Field replacable */ + unsigned char senseKeySpecific[2]; /* 15-16 Additional info */ + unsigned char additional[101]; /* 17-26 Additional info */ +} SCSISenseData; + +/* + * The high-bit of errorCode signals whether there is a logical + * block. The low value signals whether there is a valid sense + */ +enum _SCSIErrorCode +{ + kSCSISenseHasLBN = 0x80, /* Logical block number set */ + kSCSISenseInfoValid = 0x70, /* Is sense key valid? */ + kSCSISenseInfoMask = 0x70, /* Mask for sense info */ + kSCSISenseCurrentErr = 0x70, /* Error code (byte 0 & 0x7F */ + kSCSISenseDeferredErr = 0x71, /* Error code (byte 0 & 0x7F */ +}; + +/* + * These bits may be set in the sense key + */ +enum _SCSISenseKeyMasks +{ + kSCSISenseKeyMask = 0x0F, + kSCSISenseILI = 0x20, /* Illegal logical Length */ + kSCSISenseEOM = 0x40, /* End of media */ + kSCSISenseFileMark = 0x80, /* End of file mark */ +}; +/* + * SCSI sense codes. (Returned after request sense). + */ +enum _SCSISenseKeys +{ + kSCSISenseNone = 0x00, /* No error */ + kSCSISenseRecoveredErr = 0x01, /* Warning */ + kSCSISenseNotReady = 0x02, /* Device not ready */ + kSCSISenseMediumErr = 0x03, /* Device medium error */ + kSCSISenseHardwareErr = 0x04, /* Device hardware error */ + kSCSISenseIllegalReq = 0x05, /* Illegal request for dev. */ + kSCSISenseUnitAtn = 0x06, /* Unit attention (not err) */ + kSCSISenseDataProtect = 0x07, /* Data protection */ + kSCSISenseBlankCheck = 0x08, /* Tape-specific error */ + kSCSISenseVendorSpecific = 0x09, /* Vendor-specific error */ + kSCSISenseCopyAborted = 0x0a, /* Copy request cancelled */ + kSCSISenseAbortedCmd = 0x0b, /* Initiator aborted cmd. */ + kSCSISenseEqual = 0x0c, /* Comparison equal */ + kSCSISenseVolumeOverflow = 0x0d, /* Write past end mark */ + kSCSISenseMiscompare = 0x0e, /* Comparison failed */ +}; + +enum _SCSIStatus +{ + kSCSIStatusGood = 0x00, + kSCSIStatusCheckCondition = 0x02, + kSCSIStatusConditionMet = 0x04, + kSCSIStatusBusy = 0x08, + kSCSIStatusIntermediate = 0x10, + kSCSIStatusIntermediateMet = 0x0a, + kSCSIStatusReservationConfict = 0x18, + kSCSIStatusCommandTerminated = 0x22, + kSCSIStatusQueueFull = 0x28, +}; + + +enum _SCSIDevTypes +{ + kSCSIDevTypeDirect = 0, /* Hard disk (not CD-ROM) */ + kSCSIDevTypeSequential, /* Magtape or DAT */ + kSCSIDevTypePrinter, /* Printer */ + kSCSIDevTypeProcessor, /* Attached processor */ + kSCSIDevTypeWorm, /* Write-once, read multiple */ + kSCSIDevTypeCDROM, /* CD-ROM */ + kSCSIDevTypeScanner, /* Scanner */ + kSCSIDevTypeOptical, /* Optical disk */ + kSCSIDevTypeChanger, /* Jukebox */ + kSCSIDevTypeComm, /* Communication link */ + kSCSIDevTypeGraphicArts0A, + kSCSIDevTypeGraphicArts0B, + kSCSIDevTypeFirstReserved, /* Reserved sequence start */ + kSCSIDevTypeUnknownOrMissing = 0x1F, + kSCSIDevTypeMask = 0x1F, +}; + +enum _SCSIInqVersion +{ + kSCSIInqVersionSCSI3 = 0x03, +}; + +enum _SCSI3Options +{ + kSCSI3InqOptionIUS = 0x01, + kSCSI3InqOptionQAS = 0x02, + kSCSI3InqOptionClockDT = 0x04, +}; + + +/* + * SCSI command codes. Commands defined as ...6, ...10, ...12, are + * six-byte, ten-byte, and twelve-byte variants of the indicated command. + */ + +/* + * These commands are supported for all devices. + */ +enum _SCSICmds +{ + kSCSICmdChangeDefinition = 0x40, + kSCSICmdCompare = 0x39, + kSCSICmdCopy = 0x18, + kSCSICmdCopyAndVerify = 0x3a, + kSCSICmdInquiry = 0x12, + kSCSICmdLogSelect = 0x4c, + kSCSICmdLogSense = 0x4d, + kSCSICmdModeSelect12 = 0x55, + kSCSICmdModeSelect6 = 0x15, + kSCSICmdModeSense12 = 0x5a, + kSCSICmdModeSense6 = 0x1a, + kSCSICmdReadBuffer = 0x3c, + kSCSICmdRecvDiagResult = 0x1c, + kSCSICmdRequestSense = 0x03, + kSCSICmdSendDiagnostic = 0x1d, + kSCSICmdTestUnitReady = 0x00, + kSCSICmdWriteBuffer = 0x3b, + +/* + * These commands are supported by direct-access devices only. + */ + kSCSICmdFormatUnit = 0x04, + kSCSICmdLockUnlockCache = 0x36, + kSCSICmdPrefetch = 0x34, + kSCSICmdPreventAllowRemoval = 0x1e, + kSCSICmdRead6 = 0x08, + kSCSICmdRead10 = 0x28, + kSCSICmdReadCapacity = 0x25, + kSCSICmdReadDefectData = 0x37, + kSCSICmdReadLong = 0x3e, + kSCSICmdReassignBlocks = 0x07, + kSCSICmdRelease = 0x17, + kSCSICmdReserve = 0x16, + kSCSICmdRezeroUnit = 0x01, + kSCSICmdSearchDataEql = 0x31, + kSCSICmdSearchDataHigh = 0x30, + kSCSICmdSearchDataLow = 0x32, + kSCSICmdSeek6 = 0x0b, + kSCSICmdSeek10 = 0x2b, + kSCSICmdSetLimits = 0x33, + kSCSICmdStartStopUnit = 0x1b, + kSCSICmdSynchronizeCache = 0x35, + kSCSICmdVerify = 0x2f, + kSCSICmdWrite6 = 0x0a, + kSCSICmdWrite10 = 0x2a, + kSCSICmdWriteAndVerify = 0x2e, + kSCSICmdWriteLong = 0x3f, + kSCSICmdWriteSame = 0x41, + +/* + * These commands are supported by sequential devices. + */ + kSCSICmdRewind = 0x01, + kSCSICmdWriteFilemarks = 0x10, + kSCSICmdSpace = 0x11, + kSCSICmdLoadUnload = 0x1B, +/* + * ANSI SCSI-II for CD-ROM devices. + */ + kSCSICmdReadCDTableOfContents = 0x43, +}; + +/* + * Message codes (for Msg In and Msg Out phases). + */ +enum _SCSIMsgs +{ + kSCSIMsgAbort = 0x06, + kSCSIMsgAbortTag = 0x0d, + kSCSIMsgBusDeviceReset = 0x0c, + kSCSIMsgClearQueue = 0x0e, + kSCSIMsgCmdComplete = 0x00, + kSCSIMsgDisconnect = 0x04, + kSCSIMsgIdentify = 0x80, + kSCSIMsgIgnoreWideResdue = 0x23, + kSCSIMsgInitiateRecovery = 0x0f, + kSCSIMsgInitiatorDetectedErr = 0x05, + kSCSIMsgLinkedCmdComplete = 0x0a, + kSCSIMsgLinkedCmdCompleteFlag = 0x0b, + kSCSIMsgParityErr = 0x09, + kSCSIMsgRejectMsg = 0x07, + kSCSIMsgModifyDataPtr = 0x00, /* Extended msg */ + kSCSIMsgNop = 0x08, + kSCSIMsgHeadOfQueueTag = 0x21, /* Two byte msg */ + kSCSIMsgOrderedQueueTag = 0x22, /* Two byte msg */ + kSCSIMsgSimpleQueueTag = 0x20, /* Two byte msg */ + kSCSIMsgReleaseRecovery = 0x10, + kSCSIMsgRestorePointers = 0x03, + kSCSIMsgSaveDataPointers = 0x02, + kSCSIMsgSyncXferReq = 0x01, /* Extended msg */ + kSCSIMsgWideDataXferReq = 0x03, /* Extended msg */ + kSCSIMsgTerminateIOP = 0x11, + kSCSIMsgExtended = 0x01, + kSCSIMsgEnableDisconnectMask = 0x40, +}; + +#endif diff --git a/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelCommand.h b/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelCommand.h new file mode 100644 index 000000000..21a72a1c3 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelCommand.h @@ -0,0 +1,206 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSICommand.h + * + */ +#ifndef _IOSCSIPARALLELCOMMAND_H +#define _IOSCSIPARALLELCOMMAND_H + +class IOSCSIParallelDevice; +class IOSCSIParallelCommand; +class IOSyncer; + +class IOSCSIParallelCommand : public IOSCSICommand +{ + OSDeclareDefaultStructors(IOSCSIParallelCommand) + + friend class IOSCSIParallelController; + friend class IOSCSIParallelDevice; + +/*------------------Methods provided to IOCDBCommand users -------------------------*/ +public: + /* + * Set/Get IOMemoryDescriptor object to I/O data buffer or sense data buffer. + */ + void setPointers( IOMemoryDescriptor *desc, + UInt32 transferCount, + bool isWrite, + bool isSense = false ); + + void getPointers( IOMemoryDescriptor **desc, + UInt32 *transferCount, + bool *isWrite, + bool isSense = false ); + /* + * Set/Get command timeout (mS) + */ + void setTimeout( UInt32 timeoutmS ); + UInt32 getTimeout(); + + /* + * Set async callback routine. Specifying no parameters indicates synchronous call. + */ + void setCallback( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ); + + /* + * Set/Get CDB information. (Generic CDB version) + */ + void setCDB( CDBInfo *cdbInfo ); + void getCDB( CDBInfo *cdbInfo ); + + /* + * Get CDB results. (Generic CDB version) + */ + IOReturn getResults( CDBResults *cdbResults ); + + /* + * Get CDB Device this command is directed to. + */ + IOCDBDevice *getDevice( IOCDBDevice *deviceType ); + + /* + * Command verbs + */ + bool execute( UInt32 *sequenceNumber = 0 ); + void abort( UInt32 sequenceNumber ); + void complete(); + + /* + * Get pointers to client and command data. + */ + void *getCommandData(); + void *getClientData(); + + /* + * Get unique sequence number assigned to command. + */ + UInt32 getSequenceNumber(); + +/*------------------ Additional methods provided to IOSCSICommand users -------------------------*/ +public: + /* + * Set/Get CDB information. (SCSI specific version). + */ + void setCDB( SCSICDBInfo *scsiCmd ); + void getCDB( SCSICDBInfo *scsiCmd ); + + /* + * Get/Set CDB results. (SCSI specific version). + */ + IOReturn getResults( SCSIResults *results ); + void setResults( SCSIResults *results, SCSINegotiationResults *negotiationResults ); + + /* + * Get SCSI Device this command is directed to. + */ + IOSCSIParallelDevice *getDevice( IOSCSIParallelDevice *deviceType ); + + + /* + * Get SCSI Target/Lun for this command. + */ + void getTargetLun( SCSITargetLun *targetLun ); + + /* + * Get/Set queue routing for this command. + */ + void setQueueInfo( UInt32 forQueueType = kQTypeNormalQ, UInt32 forQueuePosition = kQPositionTail ); + void getQueueInfo( UInt32 *forQueueType, UInt32 *forQueuePosition = 0 ); + + /* + * Get command type / Get original command. + * + * These methods are provided for the controller class to identify and relate commands. + * They are not usually of interest to the client side. + */ + UInt32 getCmdType(); + IOSCSIParallelCommand *getOriginalCmd(); + + /* + * Set to blank state, call prior to re-use of this object. + */ + void zeroCommand(); + +/*------------------Methods private to the IOSCSICommand class-------------------------*/ +public: + void free(); + + IOSCSIDevice *getDevice( IOSCSIDevice *deviceType ); + void setResults( SCSIResults *results ); + +private: + IOReturn adapterStatusToIOReturnCode( SCSIAdapterStatus adapterStatus ); + IOReturn scsiStatusToIOReturnCode( UInt8 scsiStatus ); + +private: + SCSICommandType cmdType; + + IOSCSIParallelController *controller; + IOSCSIParallelDevice *device; + + queue_head_t *list; + queue_chain_t nextCommand; + + SCSICDBInfo scsiCmd; + SCSIResults results; + + UInt32 timeout; + UInt32 timer; + + UInt8 queueType; + UInt8 queuePosition; + + IOMemoryDescriptor *xferDesc; + UInt32 xferCount; + UInt32 xferDirection; + + UInt32 senseLength; + IOMemoryDescriptor *senseData; + + IOSCSIParallelCommand *origCommand; + + union + { + struct + { + UInt32 reserved; + IOSyncer * lock; + } sync; + struct + { + CallbackFn callback; + void *target; + void *refcon; + } async; + } completionInfo; + + UInt32 dataSize; + void *dataArea; + void *commandPrivateData; + void *clientData; + + UInt32 sequenceNumber; +}; + +#endif diff --git a/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelController.h b/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelController.h new file mode 100644 index 000000000..feaa8ecdb --- /dev/null +++ b/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelController.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSIController.h + * + * Methods in this header list the methods an SCSI controller driver must implement. + */ +#ifndef _IOSCSIPARALLELCONTROLLER_H +#define _IOSCSIPARALLELCONTROLLER_H + +#include +#include +#include +#include +#include +#include + +class IOSCSIParallelDevice; +class IOSCSIParallelCommand; + +class IOSCSIParallelController : public IOService +{ + OSDeclareDefaultStructors(IOSCSIParallelController) + + friend class IOSCSIParallelCommand; + friend class IOSCSIParallelDevice; + +/*------------------Methods provided by IOSCSIParallelController---------------------------------*/ +public: + bool probeTarget( SCSITargetLun targetLun ); + void reset(); + +protected: + void resetOccurred(); + + void enableCommands(); + void disableCommands(); + void disableCommands( UInt32 disableTimeoutmS ); + + void rescheduleCommand( IOSCSIParallelCommand *forSCSICmd ); + + IOSCSIParallelDevice *findDeviceWithTargetLun( SCSITargetLun targetLun ); + IOSCSIParallelCommand *findCommandWithNexus( SCSITargetLun targetLun, UInt32 tagValue = (UInt32)-1 ); + + void *getTargetData( SCSITargetLun targetLun ); + void *getLunData( SCSITargetLun targetLun ); + + virtual IOWorkLoop *getWorkLoop() const; + + void setCommandLimit( UInt32 commandLimit ); // temp + + +/*------------------Methods the controller subclass must implement-----------------------*/ +protected: + /* + * Initialize controller hardware. + * + * Note: The controller driver's configure() method will be called prior to any other + * methods. If the controller driver returns successfully from this method it + * should be ready to accept any other method call listed. + */ + virtual bool configure( IOService *provider, SCSIControllerInfo *controllerInfo ) = 0; + + /* + * Bus/target commands + * + */ + virtual void executeCommand( IOSCSIParallelCommand *forSCSICmd ) = 0; + virtual void cancelCommand( IOSCSIParallelCommand *forSCSICmd ) = 0; + virtual void resetCommand( IOSCSIParallelCommand *forSCSICmd ) = 0; + +/*------------------Optional methods the controller subclass may implement-----------------------*/ +protected: + /* + * These methods notify the IOSCSIParallelController subclass, that a target or lun is about to be + * probed. The subclass should initialize its per-target or per-lun data when called at these + * methods. If the subclass (for some reason) wants to prevent probing of a target or lun, it + * can return false to the corresponding allocate*() call. + */ + virtual bool allocateTarget( SCSITargetLun targetLun ); + virtual void deallocateTarget( SCSITargetLun targetLun ); + + virtual bool allocateLun( SCSITargetLun targetLun ); + virtual void deallocateLun( SCSITargetLun targetLun ); + + virtual void disableTimeoutOccurred(); + + +/*------------------Methods private to the IOSCSIParallelController class----------------------*/ + +public: + bool start( IOService *provider ); + void free(); + +private: + IOSCSIParallelDevice *createDevice(); + + void initQueues(); + bool scanSCSIBus(); + + bool initTarget( SCSITargetLun targetLun ); + bool initTargetGated( SCSITargetLun *targetLun ); + void releaseTarget( SCSITargetLun targetLun ); + void releaseTargetGated( SCSITargetLun *targetLun ); + bool initDevice( IOSCSIParallelDevice *device ); + bool initDeviceGated( IOSCSIParallelDevice *device ); + void releaseDevice( IOSCSIParallelDevice *device ); + void releaseDeviceGated( IOSCSIParallelDevice *device ); + + + void addDevice( IOSCSIParallelDevice *forDevice ); + void deleteDevice( IOSCSIParallelDevice *forDevice ); + + void timer( IOTimerEventSource *); + + void dispatchRequest(); + void dispatch(); + + bool checkBusReset(); + + void completeCommand( IOSCSIParallelCommand *forSCSICmd ); + + bool createWorkLoop(); + bool configureController(); + + IOSCSIParallelCommand *allocCommand( UInt32 clientDataSize ); + +private: + + UInt32 sequenceNumber; + + UInt32 commandCount; + UInt32 commandLimit; + UInt32 commandLimitSave; + + UInt32 disableTimer; + bool commandDisable; + + UInt32 tagArraySize; + UInt32 *tagArray; + + UInt32 busResetState; + IOSCSIParallelCommand *resetCmd; + UInt32 resetTimer; + + IOSCSIParallelCommand *noDisconnectCmd; + + SCSIControllerInfo controllerInfo; + SCSITarget *targets; + + IOWorkLoop *workLoop; + IOTimerEventSource *timerEvent; + IOInterruptEventSource *dispatchEvent; + + IOCommandGate *controllerGate; + + IOService *provider; +}; + +#endif diff --git a/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelDevice.h b/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelDevice.h new file mode 100644 index 000000000..9b72757ac --- /dev/null +++ b/iokit/IOKit/scsi/scsi-parallel/IOSCSIParallelDevice.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * IOSCSIParallelDevice.h + * + * + * Methods in this header provide information about the SCSI device + * the device client driver is submitting the SCSICommand(s) to. + * + * Note: SCSICommand(s) are allocated and freed by methods in this class. + * The remaining methods to setup and submit SCSICommands are defined in + * IOSCSICommand.h + */ + +#ifndef _IOSCSIPARALLELDEVICE_H +#define _IOSCSIPARALLELDEVICE_H + +class IOSCSIParallelController; + +class IOSCSIParallelDevice : public IOSCSIDevice +{ + OSDeclareDefaultStructors(IOSCSIParallelDevice) + + friend class IOSCSIParallelCommand; + friend class IOSCSIParallelController; + +/*------------------Methods provided to IOCDBDevice clients-----------------------*/ +public: + + /* + * Allocate a CDB Command + */ + IOCDBCommand *allocCommand( IOCDBDevice *deviceType, UInt32 clientDataSize = 0 ); + + /* + * Abort all outstanding commands on this device + */ + void abort(); + + /* + * Reset device (also aborts all outstanding commands) + */ + void reset(); + + /* + * Obtain information about this device + */ + void getInquiryData( void *inquiryBuffer, + UInt32 inquiryBufSize, + UInt32 *inquiryDataSize ); + +/*------------------Additional methods provided to IOSCSIDevice clients-----------------------*/ +public: + /* + * Allocate a SCSICommand + */ + IOSCSIParallelCommand *allocCommand( IOSCSIParallelDevice *deviceType, UInt32 clientDataSize = 0 ); + + /* + * Target management commands + */ + bool setTargetParms( SCSITargetParms *targetParms ); + void getTargetParms( SCSITargetParms *targetParms ); + + /* + * Lun management commands + */ + bool setLunParms( SCSILunParms *lunParms ); + void getLunParms( SCSILunParms *lunParms ); + + /* + * Queue management commands + */ + void holdQueue( UInt32 queueType ); + void releaseQueue( UInt32 queueType ); + void flushQueue( UInt32 queueType, IOReturn rc ); + void notifyIdle( void *target = 0, CallbackFn callback = 0, void *refcon = 0 ); + + /* + * + */ + IOWorkLoop *getWorkLoop() const; + +/*------------------Methods private to the IOSCSIDevice class----------------*/ +public: + bool open( IOService *forClient, IOOptionBits options = 0, void *arg = 0 ); + void close( IOService *forClient, IOOptionBits options = 0 ); + IOReturn message( UInt32 clientMsg, IOService *forProvider, void *forArg = 0 ); + bool init( IOSCSIParallelController *forController, SCSITargetLun forTargetLun ); + void free(); + + bool matchPropertyTable( OSDictionary * table ); + IOService *matchLocation( IOService * client ); + + IOSCSICommand *allocCommand( IOSCSIDevice *deviceType, UInt32 clientDataSize = 0 ); + +private: + void submitCommand( UInt32 cmdType, IOSCSIParallelCommand *scsiCmd, UInt32 cmdSequenceNumber = 0 ); + void receiveCommand( UInt32 cmdType, IOSCSIParallelCommand *scsiCmd, UInt32 cmdSequenceNumber, void *p3 ); + + IOReturn probeTargetLun(); + bool checkCmdQueEnabled(); + void setupTarget(); + + void dispatchRequest(); + bool dispatch( UInt32 *dispatchAction ); + + void abortAllCommands( SCSICommandType abortCmdType ); + + IOSCSIParallelCommand *findCommandWithNexus( UInt32 tagValue ); + + void abortCommand( IOSCSIParallelCommand *scsiCmd, UInt32 cmdSequenceNumber ); + void completeCommand( IOSCSIParallelCommand *cmd ); + + void checkIdleNotify(); + + void executeCommandDone( IOSCSIParallelCommand *scsiCmd ); + void executeReqSenseDone( IOSCSIParallelCommand *scsiCmd ); + void abortCommandDone( IOSCSIParallelCommand *scsiCmd ); + void cancelCommandDone( IOSCSIParallelCommand *scsiCmd ); + void finishCommand( IOSCSIParallelCommand *scsiCmd ); + + OSDictionary *createProperties(); + bool addToRegistry( OSDictionary *propTable, OSObject *regObj, char *key, bool doRelease = true ); + void stripBlanks( char *d, char *s, UInt32 l ); + + bool checkDeviceQueue( UInt32 *dispatchAction ); + void checkNegotiate( IOSCSIParallelCommand *scsiCmd ); + bool checkTag( IOSCSIParallelCommand *scsiCmd ); + bool checkReqSense(); + bool checkAbortQueue(); + void checkCancelQueue(); + + void negotiationComplete(); + + bool allocTag( UInt32 *tagId ); + void freeTag( UInt32 tagId ); + + void timer(); + + void resetOccurred( SCSIClientMessage clientMsg ); + void resetComplete(); + + void rescheduleCommand( IOSCSIParallelCommand *scsiCmd ); + + void addCommand( queue_head_t *list, IOSCSIParallelCommand *scsiCmd ); + void stackCommand( queue_head_t *list, IOSCSIParallelCommand *scsiCmd ); + void deleteCommand( queue_head_t *list, IOSCSIParallelCommand *scsiCmd, IOReturn rc = kIOReturnSuccess ); + IOSCSIParallelCommand *checkCommand( queue_head_t *list ); + IOSCSIParallelCommand *getCommand( queue_head_t *list ); + void moveCommand( queue_head_t *fromList, + queue_head_t *toList, + IOSCSIParallelCommand *scsiCmd, + IOReturn rc = kIOReturnSuccess ); + void moveAllCommands( queue_head_t *fromList, queue_head_t *toList, IOReturn rc = kIOReturnSuccess ); + bool findCommand( queue_head_t *list, IOSCSIParallelCommand *findScsiCmd ); + void purgeAllCommands( queue_head_t *list, IOReturn rc ); + +private: + queue_chain_t nextDevice; + + SCSITargetLun targetLun; + + SCSITarget *target; + + IOSCSIParallelController *controller; + IOCommandGate *deviceGate; + + IOService *client; + IORWLock * clientSem; + + queue_head_t deviceList; + queue_head_t bypassList; + queue_head_t activeList; + queue_head_t abortList; + queue_head_t cancelList; + + SCSICommandType abortCmdPending; + + UInt32 reqSenseState; + UInt32 abortState; + UInt32 cancelState; + UInt32 negotiateState; + + IOSCSIParallelCommand *reqSenseOrigCmd; + + IOSCSIParallelCommand *reqSenseCmd; + IOSCSIParallelCommand *abortCmd; + IOSCSIParallelCommand *cancelCmd; + IOSCSIParallelCommand *probeCmd; + + bool normalQHeld; + bool bypassQHeld; + + bool idleNotifyActive; + CallbackFn idleNotifyCallback; + void *idleNotifyTarget; + void *idleNotifyRefcon; + + UInt32 commandCount; + UInt32 commandLimit; + UInt32 commandLimitSave; + + bool disableDisconnect; + + bool lunAllocated; + + OSNumber *regObjTransferPeriod; + OSNumber *regObjTransferOffset; + OSNumber *regObjTransferWidth; + OSNumber *regObjTransferOptions; + OSNumber *regObjCmdQueue; + + UInt32 *tagArray; + + SCSILunParms lunParmsNew; + + SCSIInquiry *inquiryData; + UInt32 inquiryDataSize; + + void *devicePrivateData; +}; + +#define kIOSCSIParallelDevice ((IOSCSIParallelDevice *)0) + +#endif diff --git a/iokit/IOKit/scsi/scsi-parallel/Makefile b/iokit/IOKit/scsi/scsi-parallel/Makefile new file mode 100644 index 000000000..bc17ca9e4 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-parallel/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = scsi/scsi-parallel +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" + +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) + +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/scsi/scsi-parallel/SCSIParallelController.h b/iokit/IOKit/scsi/scsi-parallel/SCSIParallelController.h new file mode 100644 index 000000000..f85aa8a02 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-parallel/SCSIParallelController.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * SCSIParallelController.h + * + */ + +#ifndef _SCSIPARALLELCONTROLLER_H +#define _SCSIPARALLELCONTROLLER_H + +class IOSyncer; + +typedef struct SCSIControllerInfo +{ + UInt32 initiatorId; + + UInt32 maxTargetsSupported; + UInt32 maxLunsSupported; + + UInt32 minTransferPeriodpS; + UInt32 maxTransferOffset; + UInt32 maxTransferWidth; + + UInt32 maxCommandsPerController; + UInt32 maxCommandsPerTarget; + UInt32 maxCommandsPerLun; + + UInt32 tagAllocationMethod; + UInt32 maxTags; + + UInt32 targetPrivateDataSize; + UInt32 lunPrivateDataSize; + UInt32 commandPrivateDataSize; + + bool disableCancelCommands; + + UInt32 reserved[64]; + +} SCSIControllerInfo; + +enum SCSITagAllocation +{ + kTagAllocationNone = 0, + kTagAllocationPerLun, + kTagAllocationPerTarget, + kTagAllocationPerController, +}; + +/* + * Private for IOSCSIClass + */ +enum WorkLoopReqType +{ + kWorkLoopInitTarget = 1, + kWorkLoopReleaseTarget, + kWorkLoopInitDevice, + kWorkLoopReleaseDevice, +}; + +enum DispatchAction +{ + kDispatchNextCommand = 1, + kDispatchNextLun, + kDispatchNextTarget, + kDispatchStop, +}; + +typedef struct WorkLoopRequest +{ + WorkLoopReqType type; + IOSyncer * sync; + bool rc; +} WorkLoopRequest; + +#endif + diff --git a/iokit/IOKit/scsi/scsi-parallel/SCSIParallelTarget.h b/iokit/IOKit/scsi/scsi-parallel/SCSIParallelTarget.h new file mode 100644 index 000000000..c27e15387 --- /dev/null +++ b/iokit/IOKit/scsi/scsi-parallel/SCSIParallelTarget.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * SCSIParallelTarget.h + * + */ + +#ifndef _SCSIPARALLELTARGET_H +#define _SCSIPARALLELTARGET_H + +typedef struct SCSINegotiationResults +{ + IOReturn returnCode; + + UInt32 transferPeriodpS; + UInt32 transferOffset; + UInt32 transferWidth; + UInt32 transferOptions; + +} SCSINegotiationResults; + + +typedef struct SCSITarget +{ + queue_head_t deviceList; + + UInt32 commandCount; + UInt32 commandLimit; + UInt32 commandLimitSave; + + IORWLock * clientSem; + IORWLock * targetSem; + + UInt32 *tagArray; + + UInt32 negotiateState; + SCSINegotiationResults negotiationResult; + + UInt32 state; + + SCSITargetParms targetParmsCurrent; + SCSITargetParms targetParmsNew; + + OSNumber *regObjTransferPeriod; + OSNumber *regObjTransferOffset; + OSNumber *regObjTransferWidth; + OSNumber *regObjTransferOptions; + OSNumber *regObjCmdQueue; + + UInt32 reqSenseCount; + UInt32 reqSenseState; + + void *targetPrivateData; + + bool targetAllocated; + +} SCSITarget; + +enum +{ + kStateIdle, + kStateIssue, + kStatePending, + kStateActive, +}; + +enum _cdbFlagsInternal +{ + kCDBFlagsEnableTagQueuing = 0x80000000, +}; + + +enum SCSICommandType +{ + kSCSICommandNone = 0, + kSCSICommandExecute, + kSCSICommandReqSense, + kSCSICommandAbort, + kSCSICommandAbortAll, + kSCSICommandDeviceReset, + kSCSICommandBusReset, + kSCSICommandCancel, +}; + + +#endif diff --git a/iokit/IOKit/storage/IOApplePartitionScheme.h b/iokit/IOKit/storage/IOApplePartitionScheme.h new file mode 100644 index 000000000..46140bddf --- /dev/null +++ b/iokit/IOKit/storage/IOApplePartitionScheme.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOAPPLEPARTITIONSCHEME_H +#define _IOAPPLEPARTITIONSCHEME_H + +#include + +/* + * Apple Partition Map Definitions + */ + +#pragma pack(2) /* (enable 16-bit struct packing for dpme, DDMap, Block0) */ + +/* Structure constants. */ + +#define DPISTRLEN 32 + +/* Partition map entry, as found in blocks 1 to dpme_map_entries of the disk. */ + +typedef struct dpme +{ + UInt16 dpme_signature; /* (unique value for partition entry, 'PM') */ + UInt16 dpme_reserved_1; /* (reserved for future use) */ + UInt32 dpme_map_entries; /* (number of partition entries) */ + UInt32 dpme_pblock_start; /* (physical block start of partition) */ + UInt32 dpme_pblocks; /* (physical block count of partition) */ + char dpme_name[DPISTRLEN]; /* (name of partition) */ + char dpme_type[DPISTRLEN]; /* (type of partition, eg. Apple_HFS) */ + UInt32 dpme_lblock_start; /* (logical block start of partition) */ + UInt32 dpme_lblocks; /* (logical block count of partition) */ + UInt32 dpme_flags; /* (partition flags, see defines below) */ + UInt32 dpme_boot_block; /* (logical block start of boot code) */ + UInt32 dpme_boot_bytes; /* (byte count of boot code) */ + UInt8 * dpme_load_addr; /* (load address in memory of boot code) */ + UInt8 * dpme_load_addr_2; /* (reserved for future use) */ + UInt8 * dpme_goto_addr; /* (jump address in memory of boot code) */ + UInt8 * dpme_goto_addr_2; /* (reserved for future use) */ + UInt32 dpme_checksum; /* (checksum of boot code) */ + UInt8 dpme_process_id[16]; /* (processor type) */ + UInt32 dpme_reserved_2[32]; /* (reserved for future use) */ + UInt32 dpme_reserved_3[62]; /* (reserved for future use) */ +} DPME; + +/* Driver descriptor map entry. */ + +typedef struct DDMap +{ + UInt32 ddBlock; /* (driver's block start, sbBlkSize-blocks) */ + UInt16 ddSize; /* (driver's block count, 512-blocks) */ + UInt16 ddType; /* (driver's system type) */ +} DDMap; + +/* Driver descriptor map, as found in block zero of the disk. */ + +typedef struct Block0 +{ + UInt16 sbSig; /* (unique value for block zero, 'ER') */ + UInt16 sbBlkSize; /* (block size for this device) */ + UInt32 sbBlkCount; /* (block count for this device) */ + UInt16 sbDevType; /* (device type) */ + UInt16 sbDevId; /* (device id) */ + UInt32 sbDrvrData; /* (driver data) */ + UInt16 sbDrvrCount; /* (driver descriptor count) */ + DDMap sbDrvrMap[8]; /* (driver descriptor table) */ + UInt8 sbReserved[430]; /* (reserved for future use) */ +} Block0; + +/* Partition map signature (sbSig). */ + +#define BLOCK0_SIGNATURE 0x4552 + +/* Partition map entry signature (dpme_signature). */ + +#define DPME_SIGNATURE 0x504D + +/* Partition map entry flags (dpme_flags). */ + +#define DPME_FLAGS_VALID 0x00000001 /* (bit 0) */ +#define DPME_FLAGS_ALLOCATED 0x00000002 /* (bit 1) */ +#define DPME_FLAGS_IN_USE 0x00000004 /* (bit 2) */ +#define DPME_FLAGS_BOOTABLE 0x00000008 /* (bit 3) */ +#define DPME_FLAGS_READABLE 0x00000010 /* (bit 4) */ +#define DPME_FLAGS_WRITABLE 0x00000020 /* (bit 5) */ +#define DPME_FLAGS_OS_PIC_CODE 0x00000040 /* (bit 6) */ +#define DPME_FLAGS_OS_SPECIFIC_2 0x00000080 /* (bit 7) */ +#define DPME_FLAGS_OS_SPECIFIC_1 0x00000100 /* (bit 8) */ +#define DPME_FLAGS_RESERVED_2 0xFFFFFE00 /* (bit 9..31) */ + +#pragma options align=reset /* (reset to default struct packing) */ + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include + +/* + * Class + */ + +class IOApplePartitionScheme : public IOPartitionScheme +{ + OSDeclareDefaultStructors(IOApplePartitionScheme); + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + OSSet * _partitions; /* (set of media objects representing partitions) */ + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(void); + + /* + * Scan the provider media for an Apple partition map. Returns the set + * of media objects representing each of the partitions (the retain for + * the set is passed to the caller), or null should no partition map be + * found. The default probe score can be adjusted up or down, based on + * the confidence of the scan. + */ + + virtual OSSet * scan(SInt32 * score); + + /* + * Ask whether the given partition appears to be corrupt. A partition that + * is corrupt will cause the failure of the Apple partition map recognition + * altogether. + */ + + virtual bool isPartitionCorrupt( dpme * partition, + UInt32 partitionID, + UInt32 partitionBlockSize ); + + /* + * Ask whether the given partition appears to be invalid. A partition that + * is invalid will cause it to be skipped in the scan, but will not cause a + * failure of the Apple partition map recognition. + */ + + virtual bool isPartitionInvalid( dpme * partition, + UInt32 partitionID, + UInt32 partitionBlockSize ); + + /* + * Instantiate a new media object to represent the given partition. + */ + + virtual IOMedia * instantiateMediaObject( dpme * partition, + UInt32 partitionID, + UInt32 partitionBlockSize ); + + /* + * Allocate a new media object (called from instantiateMediaObject). + */ + + virtual IOMedia * instantiateDesiredMediaObject( + dpme * partition, + UInt32 partitionID, + UInt32 partitionBlockSize ); + + /* + * Attach the given media object to the device tree plane. + */ + + virtual bool attachMediaObjectToDeviceTree(IOMedia * media); + + /* + * Detach the given media object from the device tree plane. + */ + + virtual void detachMediaObjectFromDeviceTree(IOMedia * media); + +public: + + /* + * Initialize this object's minimal state. + */ + + virtual bool init(OSDictionary * properties = 0); + + /* + * Determine whether the provider media contains an Apple partition map. + */ + + virtual IOService * probe(IOService * provider, SInt32 * score); + + /* + * Publish the new media objects which represent our partitions. + */ + + virtual bool start(IOService * provider); + + /* + * Clean up after the media objects we published before terminating. + */ + + virtual void stop(IOService * provider); + + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 0); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 1); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 2); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 3); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 4); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 5); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 6); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 7); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 8); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 9); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 10); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 11); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 12); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 13); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 14); + OSMetaClassDeclareReservedUnused(IOApplePartitionScheme, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOAPPLEPARTITIONSCHEME_H */ diff --git a/iokit/IOKit/storage/IOBlockStorageDevice.h b/iokit/IOKit/storage/IOBlockStorageDevice.h new file mode 100644 index 000000000..0d7fc0c86 --- /dev/null +++ b/iokit/IOKit/storage/IOBlockStorageDevice.h @@ -0,0 +1,411 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOBlockStorageDevice.h + * + * This class is the protocol for generic block storage functionality, independent + * of the physical connection protocol (e.g. SCSI, ATA, USB). + * + * A subclass implements relay methods that translate our requests into + * calls to a protocol- and device-specific provider. + */ + +/*! @language embedded-c++ */ + +#ifndef _IOBLOCKSTORAGEDEVICE_H +#define _IOBLOCKSTORAGEDEVICE_H + +#include +#include +#include +#include + +/*! + * @defined kIOMessageMediaStateHasChanged + * The message ID which indicates that the media state has changed. The message + * is passed to all clients of the IOBlockStorageDevice via the message() method. + * The argument that is passed along with this message is an IOMediaState value. + * + * Devices that aren't capable of detecting media state changes indicate this in + * the reportPollRequirements() method. + */ +#define kIOMessageMediaStateHasChanged iokit_family_msg(sub_iokit_block_storage, 1) + +/* Property used for matching, so the generic driver gets the nub it wants. */ +/*! + * @defined kIOBlockStorageDeviceTypeKey + * The name of the property tested for nub type matching by the generic block + * storage driver. + */ +#define kIOBlockStorageDeviceTypeKey "device-type" +/*! + * @defined kIOBlockStorageDeviceTypeGeneric + * A character string used for nub matching. + */ +#define kIOBlockStorageDeviceTypeGeneric "Generic" + +class IOMemoryDescriptor; + +/*! + * @class + * IOBlockStorageDevice : public IOService + * @abstract + * "Impedance-matcher" class to connect Generic device driver to Transport Driver. + * @discussion + * The IOBlockStorageDevice class exports the generic block storage protocol, + * forwarding all requests to its provider (the Transport Driver). + * Though the nub does no actual processing of requests, it is necessary + * in a C++ environment. The Transport Driver can be of any type, as + * long as it inherits from IOService. Because Transport Drivers needn't + * derive from a type known to IOBlockStorageDriver, it isn't possible for + * IOBlockStorageDriver to include the appropriate header file to allow direct + * communication with the Transport Driver. Thus we achieve polymorphism by + * having the Transport Driver instantiate a subclass of IOBlockStorageDevice. + * A typical implementation for a concrete subclass of IOBlockStorageDevice + * simply relays all methods to its provider (the Transport Driver). + * + * All pure-virtual functions must be implemented by the Transport Driver, which + * is responsible for instantiating the Nub. + */ + +class IOBlockStorageDevice : public IOService { + + OSDeclareAbstractStructors(IOBlockStorageDevice) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + +public: + + /* Overrides from IORegistryEntry */ + + /*! + * @function init + * @discussion + * This function is overridden so that IOBlockStorageDevice can set a + * property, used by IOBlockStorageDriver for matching. Since the concrete + * subclass of IOBlockStorageDevice can be of any class type, the property + * is used for matching. + * + * This function is usually not overridden by developers. + */ + virtual bool init(OSDictionary * properties); + + /* --- A subclass must implement the the following methods: --- */ + + /*! + * @function doAsyncReadWrite + * @abstract + * Start an asynchronous read or write operation. + * @param buffer + * An IOMemoryDescriptor describing the data-transfer buffer. The data direction + * is contained in the IOMemoryDescriptor. Responsiblity for releasing the descriptor + * rests with the caller. + * @param block + * The starting block number of the data transfer. + * @param nblks + * The integral number of blocks to be transferred. + * @param completion + * The completion routine to call once the data transfer is complete. + */ + + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion) = 0; + + /*! + * @function doSyncReadWrite + * @abstract + * Perform a synchronous read or write operation. + * @param buffer + * An IOMemoryDescriptor describing the data-transfer buffer. The data direction + * is contained in the IOMemoryDescriptor. Responsiblity for releasing the descriptor + * rests with the caller. + * @param block + * The starting block number of the data transfer. + * @param nblks + * The integral number of blocks to be transferred. + */ + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks) = 0; + + /*! + * @function doEjectMedia + * @abstract + * Eject the media. + */ + virtual IOReturn doEjectMedia(void) = 0; + + /*! + * @function doFormatMedia + * @abstract + * Format the media to the specified byte capacity. + * @discussion + * The specified byte capacity must be one supported by the device. + * Supported capacities can be obtained by calling doGetFormatCapacities. + * @param byteCapacity + * The byte capacity to which the device is to be formatted, if possible. + */ + virtual IOReturn doFormatMedia(UInt64 byteCapacity) = 0; + + /*! + * @function doGetFormatCapacities + * @abstract + * Return the allowable formatting byte capacities. + * @discussion + * This function returns the supported byte capacities for the device. + * @param capacities + * Pointer for returning the list of capacities. + * @param capacitiesMaxCount + * The number of capacity values returned in "capacities." + */ + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const = 0; + + /*! + * @function doLockUnlockMedia + * @abstract + * Lock or unlock the (removable) media in the drive. + * @discussion + * This method should only be called if the media is known to be removable. + * @param doLock + * True to lock the media, False to unlock. + */ + virtual IOReturn doLockUnlockMedia(bool doLock) = 0; + + /*! + * @function doSynchronizeCache + * @abstract + * Force data blocks in the hardware's buffer to be flushed to the media. + * @discussion + * This method should only be called if the media is writable. + */ + virtual IOReturn doSynchronizeCache(void) = 0; + + /*! + * @function getVendorString + * @abstract + * Return Vendor Name string for the device. + * @result + * A pointer to a static character string. + */ + virtual char * getVendorString(void) = 0; + + /*! + * @function getProductString + * @abstract + * Return Product Name string for the device. + * @result + * A pointer to a static character string. + */ + virtual char * getProductString(void) = 0; + + /*! + * @function getRevisionString + * @abstract + * Return Product Revision string for the device. + * @result + * A pointer to a static character string. + */ + virtual char * getRevisionString(void) = 0; + + /*! + * @function getAdditionalDeviceInfoString + * @abstract + * Return additional informational string for the device. + * @result + * A pointer to a static character string. + */ + virtual char * getAdditionalDeviceInfoString(void) = 0; + + /*! + * @function reportBlockSize + * @abstract + * Report the block size for the device, in bytes. + * @param blockSize + * Pointer to returned block size value. + */ + virtual IOReturn reportBlockSize(UInt64 *blockSize) = 0; + + /*! + * @function reportEjectability + * @abstract + * Report if the media is ejectable under software control. + * @discussion + * This method should only be called if the media is known to be removable. + * @param isEjectable + * Pointer to returned result. True indicates the media is ejectable, False indicates + * the media cannot be ejected under software control. + */ + virtual IOReturn reportEjectability(bool *isEjectable) = 0; + + /*! + * @function reportLockability + * @abstract + * Report if the media is lockable under software control. + * @discussion + * This method should only be called if the media is known to be removable. + * @param isLockable + * Pointer to returned result. True indicates the media can be locked in place; False + * indicates the media cannot be locked by software. + */ + virtual IOReturn reportLockability(bool *isLockable) = 0; + + /*! + * @function reportMaxReadTransfer + * @abstract + * Report the maximum allowed byte transfer for read operations. + * @discussion + * Some devices impose a maximum data transfer size. Because this limit + * may be determined by the size of a block-count field in a command, the limit may + * depend on the block size of the transfer. + * @param blockSize + * The block size desired for the transfer. + * @param max + * Pointer to returned result. + */ + virtual IOReturn reportMaxReadTransfer (UInt64 blockSize,UInt64 *max) = 0; + + /*! + * @function reportMaxWriteTransfer + * @abstract + * Report the maximum allowed byte transfer for write operations. + * @discussion + * Some devices impose a maximum data transfer size. Because this limit + * may be determined by the size of a block-count field in a command, the limit may + * depend on the block size of the transfer. + * @param blockSize + * The block size desired for the transfer. + * @param max + * Pointer to returned result. + */ + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max) = 0; + + /*! + * @function reportMaxValidBlock + * @abstract + * Report the highest valid block for the device. + * @param maxBlock + * Pointer to returned result + */ + virtual IOReturn reportMaxValidBlock(UInt64 *maxBlock) = 0; + + /*! + * @function reportMediaState + * @abstract + * Report the device's media state. + * @discussion + * This method reports whether we have media in the drive or not, and + * whether the state has changed from the previously reported state. + * + * A result of kIOReturnSuccess is always returned if the test for media is successful, + * regardless of media presence. The mediaPresent result should be used to determine + * whether media is present or not. A return other than kIOReturnSuccess indicates that + * the Transport Driver was unable to interrogate the device. In this error case, the + * outputs mediaState and changedState will *not* be stored. + * @param mediaPresent Pointer to returned media state. True indicates media is present + * in the device; False indicates no media is present. + * @param changedState Pointer to returned result. True indicates a change of state since + * prior calls, False indicates that the state has not changed. + */ + virtual IOReturn reportMediaState(bool *mediaPresent,bool *changedState) = 0; + + /*! + * @function reportPollRequirements + * @abstract + * Report if it's necessary to poll for media insertion, and if polling is expensive. + * @discussion + * This method reports whether the device must be polled to detect media + * insertion, and whether a poll is expensive to perform. + * + * The term "expensive" typically implies a device that must be spun-up to detect media, + * as on a PC floppy. Most devices can detect media inexpensively. + * @param pollRequired + * Pointer to returned result. True indicates that polling is required; False indicates + * that polling is not required to detect media. + * @param pollIsExpensive + * Pointer to returned result. True indicates that the polling operation is expensive; + * False indicates that the polling operation is cheap. + */ + virtual IOReturn reportPollRequirements(bool *pollRequired, + bool *pollIsExpensive) = 0; + + /*! + * @function reportRemovability + * @abstract + * Report whether the media is removable or not. + * @discussion + * This method reports whether the media is removable, but it does not + * provide detailed information regarding software eject or lock/unlock capability. + * @param isRemovable + * Pointer to returned result. True indicates that the media is removable; False + * indicates the media is not removable. + */ + virtual IOReturn reportRemovability(bool *isRemovable) = 0; + + /*! + * @function reportWriteProtection + * @abstract + * Report whether the media is write-protected or not. + * @param isWriteProtected + * Pointer to returned result. True indicates that the media is write-protected (it + * cannot be written); False indicates that the media is not write-protected (it + * is permissible to write). + */ + virtual IOReturn reportWriteProtection(bool *isWriteProtected) = 0; + + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 0); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 1); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 2); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 3); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 4); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 5); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 6); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 7); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 8); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 9); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 10); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 11); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 12); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 13); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 14); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 15); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 16); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 17); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 18); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 19); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 20); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 21); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 22); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 23); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 24); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 25); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 26); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 27); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 28); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 29); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 30); + OSMetaClassDeclareReservedUnused(IOBlockStorageDevice, 31); +}; +#endif diff --git a/iokit/IOKit/storage/IOBlockStorageDriver.h b/iokit/IOKit/storage/IOBlockStorageDriver.h new file mode 100644 index 000000000..f02dc1d96 --- /dev/null +++ b/iokit/IOKit/storage/IOBlockStorageDriver.h @@ -0,0 +1,1320 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IOBlockStorageDriver + * @abstract + * This header contains the IOBlockStorageDriver class definition. + */ + +#ifndef _IOBLOCKSTORAGEDRIVER_H +#define _IOBLOCKSTORAGEDRIVER_H + +/*! + * @defined kIOBlockStorageDriverClass + * @abstract + * kIOBlockStorageDriverClass is the name of the IOBlockStorageDriver class. + * @discussion + * kIOBlockStorageDriverClass is the name of the IOBlockStorageDriver class. + */ + +#define kIOBlockStorageDriverClass "IOBlockStorageDriver" + +/*! + * @defined kIOBlockStorageDriverStatisticsKey + * @abstract + * This property holds a table of numeric values describing the driver's + * operating statistics. + * @discussion + * This property holds a table of numeric values describing the driver's + * operating statistics. The table is an OSDictionary, where each entry + * describes one given statistic. + */ + +#define kIOBlockStorageDriverStatisticsKey "Statistics" + +/*! + * @defined kIOBlockStorageDriverStatisticsBytesReadKey + * @abstract + * This property describes the number of bytes read since the block storage + * driver was instantiated. It is one of the statistic entries listed under + * the top-level kIOBlockStorageDriverStatisticsKey property table. + * @discussion + * This property describes the number of bytes read since the block storage + * driver was instantiated. It is one of the statistic entries listed under + * the top-level kIOBlockStorageDriverStatisticsKey property table. It has + * an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsBytesReadKey "Bytes (Read)" + +/*! + * @defined kIOBlockStorageDriverStatisticsBytesWrittenKey + * @abstract + * This property describes the number of bytes written since the block storage + * driver was instantiated. It is one of the statistic entries listed under the + * top-level kIOBlockStorageDriverStatisticsKey property table. + * @discussion + * This property describes the number of bytes written since the block storage + * driver was instantiated. It is one of the statistic entries listed under the + * top-level kIOBlockStorageDriverStatisticsKey property table. It has an + * OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsBytesWrittenKey "Bytes (Write)" + +/*! + * @defined kIOBlockStorageDriverStatisticsReadErrorsKey + * @abstract + * This property describes the number of read errors encountered since the block + * storage driver was instantiated. It is one of the statistic entries listed + * under the top-level kIOBlockStorageDriverStatisticsKey property table. + * @discussion + * This property describes the number of read errors encountered since the block + * storage driver was instantiated. It is one of the statistic entries listed + * under the top-level kIOBlockStorageDriverStatisticsKey property table. It + * has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsReadErrorsKey "Errors (Read)" + +/*! + * @defined kIOBlockStorageDriverStatisticsWriteErrorsKey + * @abstract + * This property describes the number of write errors encountered since the + * block storage driver was instantiated. It is one of the statistic entries + * listed under the top-level kIOBlockStorageDriverStatisticsKey property table. + * @discussion + * This property describes the number of write errors encountered since the + * block storage driver was instantiated. It is one of the statistic entries + * listed under the top-level kIOBlockStorageDriverStatisticsKey property table. + * It has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsWriteErrorsKey "Errors (Write)" + +/*! + * @defined kIOBlockStorageDriverStatisticsLatentReadTimeKey + * @abstract + * This property describes the number of nanoseconds of latency during reads + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. + * @discussion + * This property describes the number of nanoseconds of latency during reads + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. It has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsLatentReadTimeKey "Latency Time (Read)" + +/*! + * @defined kIOBlockStorageDriverStatisticsLatentWriteTimeKey + * @abstract + * This property describes the number of nanoseconds of latency during writes + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. + * @discussion + * This property describes the number of nanoseconds of latency during writes + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. It has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsLatentWriteTimeKey "Latency Time (Write)" + +/*! + * @defined kIOBlockStorageDriverStatisticsReadsKey + * @abstract + * This property describes the number of read operations processed since the + * block storage driver was instantiated. It is one of the statistic entries + * listed under the top-level kIOBlockStorageDriverStatisticsKey property table. + * @discussion + * This property describes the number of read operations processed since the + * block storage driver was instantiated. It is one of the statistic entries + * listed under the top-level kIOBlockStorageDriverStatisticsKey property table. + * It has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsReadsKey "Operations (Read)" + +/*! + * @defined kIOBlockStorageDriverStatisticsWritesKey + * @abstract + * This property describes the number of write operations processed since the + * block storage driver was instantiated. It is one of the statistic entries + * listed under the top-level kIOBlockStorageDriverStatisticsKey property table. + * @discussion + * This property describes the number of write operations processed since the + * block storage driver was instantiated. It is one of the statistic entries + * listed under the top-level kIOBlockStorageDriverStatisticsKey property table. + * It has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsWritesKey "Operations (Write)" + +/*! + * @defined kIOBlockStorageDriverStatisticsReadRetriesKey + * @abstract + * This property describes the number of read retries required since the block + * storage driver was instantiated. It is one of the statistic entries listed + * under the top-level kIOBlockStorageDriverStatisticsKey property table. + * @discussion + * This property describes the number of read retries required since the block + * storage driver was instantiated. It is one of the statistic entries listed + * under the top-level kIOBlockStorageDriverStatisticsKey property table. It + * has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsReadRetriesKey "Retries (Read)" + +/*! + * @defined kIOBlockStorageDriverStatisticsWriteRetriesKey + * @abstract + * This property describes the number of write retries required since the block + * storage driver was instantiated. It is one of the statistic entries listed + * under the top-level kIOBlockStorageDriverStatisticsKey property table. It + * has an OSNumber value. + * @discussion + * This property describes the number of write retries required since the block + * storage driver was instantiated. It is one of the statistic entries listed + * under the top-level kIOBlockStorageDriverStatisticsKey property table. It + * has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsWriteRetriesKey "Retries (Write)" + +/*! + * @defined kIOBlockStorageDriverStatisticsTotalReadTimeKey + * @abstract + * This property describes the number of nanoseconds spent performing reads + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. + * @discussion + * This property describes the number of nanoseconds spent performing reads + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. It has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsTotalReadTimeKey "Total Time (Read)" + +/*! + * @defined kIOBlockStorageDriverStatisticsTotalWriteTimeKey + * @abstract + * This property describes the number of nanoseconds spent performing writes + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. + * @discussion + * This property describes the number of nanoseconds spent performing writes + * since the block storage driver was instantiated. It is one of the statistic + * entries listed under the top-level kIOBlockStorageDriverStatisticsKey + * property table. It has an OSNumber value. + */ + +#define kIOBlockStorageDriverStatisticsTotalWriteTimeKey "Total Time (Write)" + +/*! + * @enum IOMediaState + * @discussion + * The different states that getMediaState() can report. + * @constant kIOMediaStateOffline + * Media is not available. + * @constant kIOMediaStateOnline + * Media is available and ready for operations. + * @constant kIOMediaStateBusy + * Media is available, but not ready for operations. + */ + +typedef UInt32 IOMediaState; + +#define kIOMediaStateOffline 0 +#define kIOMediaStateOnline 1 +#define kIOMediaStateBusy 2 + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include +#include +#include + +/*! + * @class IOBlockStorageDriver + * @abstract + * The IOBlockStorageDriver class is the common base class for generic block + * storage drivers. It matches and communicates via an IOBlockStorageDevice + * interface, and connects to the remainder of the storage framework via the + * IOStorage protocol. + * @discussion + * The IOBlockStorageDriver class is the common base class for generic block + * storage drivers. It matches and communicates via an IOBlockStorageDevice + * interface, and connects to the remainder of the storage framework via the + * IOStorage protocol. It extends the IOStorage protocol by implementing the + * appropriate open and close semantics, deblocking for unaligned transfers, + * polling for ejectable media, locking and ejection policies, media object + * creation and teardown, and statistics gathering and reporting. + * + * Block storage drivers are split into two parts: the generic driver handles + * all generic device issues, independent of the lower-level transport + * mechanism (e.g. SCSI, ATA, USB, FireWire). All storage operations + * at the generic driver level are translated into a series of generic + * device operations. These operations are passed via the IOBlockStorageDevice + * nub to a transport driver, which implements the appropriate + * transport-dependent protocol to execute these operations. + * + * To determine the write-protect state of a device (or media), for + * example, the generic driver would issue a call to the + * Transport Driver's reportWriteProtection method. If this were a SCSI + * device, its transport driver would issue a Mode Sense command to + * extract the write-protection status bit. The transport driver then + * reports true or false to the generic driver. + * + * The generic driver therefore has no knowledge of, or involvement + * with, the actual commands and mechanisms used to communicate with + * the device. It is expected that the generic driver will rarely, if + * ever, need to be subclassed to handle device idiosyncrasies; rather, + * the transport driver should be changed via overrides. + * + * A generic driver could be subclassed to create a different type of + * generic device. The generic driver IOCDBlockStorageDriver class is + * a subclass of IOBlockStorageDriver, adding CD functions. + */ + +class IOBlockStorageDriver : public IOStorage +{ + OSDeclareDefaultStructors(IOBlockStorageDriver); + +public: + + /*! + * @enum Statistics + * @discussion + * Indices for the different statistics that getStatistics() can report. + * @constant kStatisticsReads + * Number of read operations thus far. + * @constant kStatisticsBytesRead + * Number of bytes read thus far. + * @constant kStatisticsTotalReadTime + * Nanoseconds spent performing reads thus far. + * @constant kStatisticsLatentReadTime + * Nanoseconds of latency during reads thus far. + * @constant kStatisticsReadRetries + * Number of read retries thus far. + * @constant kStatisticsReadErrors + * Number of read errors thus far. + * @constant kStatisticsWrites + * Number of write operations thus far. + * @constant kStatisticsSingleBlockWrites + * Number of write operations for a single block thus far. + * @constant kStatisticsBytesWritten + * Number of bytes written thus far. + * @constant kStatisticsTotalWriteTime + * Nanoseconds spent performing writes thus far. + * @constant kStatisticsLatentWriteTime + * Nanoseconds of latency during writes thus far. + * @constant kStatisticsWriteRetries + * Number of write retries thus far. + * @constant kStatisticsWriteErrors + * Number of write errors thus far. + */ + + enum Statistics + { + kStatisticsReads, + kStatisticsBytesRead, + kStatisticsTotalReadTime, + kStatisticsLatentReadTime, + kStatisticsReadRetries, + kStatisticsReadErrors, + + kStatisticsWrites, + kStatisticsSingleBlockWrites, + kStatisticsBytesWritten, + kStatisticsTotalWriteTime, + kStatisticsLatentWriteTime, + kStatisticsWriteRetries, + kStatisticsWriteErrors + }; + + static const UInt32 kStatisticsCount = kStatisticsWriteErrors + 1; + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + OSSet * _openClients; + OSNumber * _statistics[kStatisticsCount]; + + /* + * @struct Context + * @discussion + * Context structure for a read/write operation. It describes the block size, + * and where applicable, a block type and block sub-type, for a data transfer, + * as well as the completion information for the original request. Note that + * the block type field is unused in the IOBlockStorageDriver class. + * @field block.size + * Block size for the operation. + * @field block.type + * Block type for the operation. Unused in IOBlockStorageDriver. The default + * value for this field is IOBlockStorageDriver::kBlockTypeStandard. + * @field block.typeSub + * Block sub-type for the operation. It's definition depends on block.type. + * Unused in IOBlockStorageDriver. + * @field original.byteStart + * Starting byte offset for the data transfer. + * @param original.buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param original.completion + * Completion routine to call once the data transfer is complete. + */ + + struct Context + { + struct + { + UInt32 size; + UInt8 type; + UInt8 typeSub[3]; + } block; + + struct + { + UInt64 byteStart; + IOMemoryDescriptor * buffer; + IOStorageCompletion completion; + } original; + + UInt32 reserved[8]; + }; + + static const UInt8 kBlockTypeStandard = 0x00; + + /* + * Free all of this object's outstanding resources. + * + * This method's implementation is not typically overidden. + */ + + void free(); + + /*! + * @function handleOpen + * @discussion + * The handleOpen method grants or denies permission to access this object + * to an interested client. The argument is an IOStorageAccess value that + * specifies the level of access desired -- reader or reader-writer. + * + * This method can be invoked to upgrade or downgrade the access level for + * an existing client as well. The previous access level will prevail for + * upgrades that fail, of course. A downgrade should never fail. If the + * new access level should be the same as the old for a given client, this + * method will do nothing and return success. In all cases, one, singular + * close-per-client is expected for all opens-per-client received. + * + * This implementation replaces the IOService definition of handleIsOpen(). + * @param client + * Client requesting the open. + * @param options + * Options for the open. Set to zero. + * @param access + * Access level for the open. Set to kIOStorageAccessReader or + * kIOStorageAccessReaderWriter. + * @result + * Returns true if the open was successful, false otherwise. + */ + + virtual bool handleOpen(IOService * client, + IOOptionBits options, + void * access); + + /*! + * @function handleIsOpen + * @discussion + * The handleIsOpen method determines whether the specified client, or any + * client if none is specificed, presently has an open on this object. + * + * This implementation replaces the IOService definition of handleIsOpen(). + * @param client + * Client to check the open state of. Set to zero to check the open state + * of all clients. + * @result + * Returns true if the client was (or clients were) open, false otherwise. + */ + + virtual bool handleIsOpen(const IOService * client) const; + + /*! + * @function handleClose + * @discussion + * The handleClose method closes the client's access to this object. + * + * This implementation replaces the IOService definition of handleIsOpen(). + * @param client + * Client requesting the close. + * @param options + * Options for the close. Set to zero. + */ + + virtual void handleClose(IOService * client, IOOptionBits options); + + /*! + * @function addToBytesTransferred + * @discussion + * Update the total number of bytes transferred, the total transfer time, + * and the total latency time -- used for statistics. + * + * This method's implementation is not typically overidden. + * @param bytesTransferred + * Number of bytes transferred in this operation. + * @param totalTime + * Nanoseconds spent performing this operation. + * @param latentTime + * Nanoseconds of latency during this operation. + * @param isWrite + * Indicates whether this operation was a write, otherwise is was a read. + */ + + virtual void addToBytesTransferred(UInt64 bytesTransferred, + UInt64 totalTime, + UInt64 latentTime, + bool isWrite); + + /*! + * @function incrementErrors + * @discussion + * Update the total error count -- used for statistics. + * + * This method's implementation is not typically overidden. + * @param isWrite + * Indicates whether this operation was a write, otherwise is was a read. + */ + + virtual void incrementErrors(bool isWrite); + + /*! + * @function incrementRetries + * @discussion + * Update the total retry count -- used for statistics. + * + * This method's implementation is not typically overidden. + * @param isWrite + * Indicates whether this operation was a write, otherwise is was a read. + */ + + virtual void incrementRetries(bool isWrite); + + /*! + * @function allocateContext + * @discussion + * Allocate a context structure for a read/write operation. + * @result + * Context structure. + */ + + virtual Context * allocateContext(); + + /*! + * @function deleteContext + * @discussion + * Delete a context structure from a read/write operation. + * @param context + * Context structure to be deleted. + */ + + virtual void deleteContext(Context * context); + + /*! + * @function prepareRequest + * @discussion + * The prepareRequest method allocates and prepares state for the transfer. + * + * This method is part of a sequence of methods invoked for each read/write + * request. The first is prepareRequest, which allocates and prepares some + * context for the transfer; the second is deblockRequest, which aligns the + * transfer at the media block boundaries; and the third is executeRequest, + * which implements the actual transfer from the block storage device. + * + * This method's implementation is not typically overidden. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void prepareRequest(UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + /*! + * @function deblockRequest + * @discussion + * The deblockRequest method checks to see if the incoming request rests + * on the media's block boundaries, and if not, deblocks it. Deblocking + * involves rounding out the request to the nearest block boundaries and + * transferring the excess bytes into a scratch buffer. + * + * This method is part of a sequence of methods invoked for each read/write + * request. The first is prepareRequest, which allocates and prepares some + * context for the transfer; the second is deblockRequest, which aligns the + * transfer at the media block boundaries; and the third is executeRequest, + * which implements the actual transfer from the block storage device. + * + * This method's implementation is not typically overidden. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + * @param context + * Additional context information for the data transfer (eg. block size). + */ + + virtual void deblockRequest(UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion, + Context * context); + + /*! + * @function executeRequest + * @discussion + * Execute an asynchrnous storage request. The request is guaranteed to be + * block-aligned. + * + * This method is part of a sequence of methods invoked for each read/write + * request. The first is prepareRequest, which allocates and prepares some + * context for the transfer; the second is deblockRequest, which aligns the + * transfer at the media block boundaries; and the third is executeRequest, + * which implements the actual transfer from the block storage device. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + * @param context + * Additional context information for the data transfer (eg. block size). + */ + + virtual void executeRequest(UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion, + Context * context); + + /*! + * @function handleStart + * @discussion + * Prepare the block storage driver for operation. + * + * This is where a media object needs to be created for fixed media, and + * optionally for removable media. + * + * Note that this method is called from within the start() routine; + * if this method returns successfully, it should be prepared to accept + * any of IOBlockStorageDriver's APIs. + * @param provider + * This object's provider. + * @result + * Returns true on success, false otherwise. + */ + + virtual bool handleStart(IOService * provider); + + /*! + * @function handleYield + * @discussion + * Stop the block storage driver. + * + * This method is called as a result of the kIOMessageServiceIsTerminated + * or kIOMessageServiceIsRequestingClose provider messages. The argument + * is passed in as-is from the message. The kIOServiceRequired option is + * set for the kIOMessageServiceIsTerminated message to indicate that the + * yield must succeed. + * + * This is where the driver should clean up its state in preparation for + * removal from the system. This implementation issues a synchronize cache + * operation, if the media is writable, and then ejects the media. + * + * Note that this method is called from within the yield() routine. + * + * This method is called with the arbitration lock held. + * @param provider + * This object's provider. + */ + + virtual bool handleYield(IOService * provider, + IOOptionBits options = 0, + void * argument = 0); + + + /*! + * @function getMediaBlockSize + * @discussion + * Ask the driver about the media's natural block size. + * @result + * Natural block size, in bytes. + */ + + virtual UInt64 getMediaBlockSize() const; + +public: + +///m:2333367:workaround:commented:start +// using read; +// using write; +///m:2333367:workaround:commented:stop + + /* + * Initialize this object's minimal state. + * + * This method's implementation is not typically overidden. + */ + + virtual bool init(OSDictionary * properties = 0); + + /* + * This method is called once we have been attached to the provider object. + * + * This method's implementation is not typically overidden. + */ + + virtual bool start(IOService * provider); + + /* + * This method is called as a result of the kIOMessageServiceIsTerminated + * or kIOMessageServiceIsRequestingClose provider messages. The argument + * is passed in as-is from the message. The kIOServiceRequired option is + * set for the kIOMessageServiceIsTerminated message to indicate that the + * yield must succeed. + * + * This method is called with the arbitration lock held. + * + * This method's implementation is not typically overidden. + */ + + virtual bool yield(IOService * provider, + IOOptionBits options = 0, + void * argument = 0); + + /*! + * @function read + * @discussion + * The read method is the receiving end for all read requests from the + * storage framework (through the media object created by this driver). + * + * This method kicks off a sequence of three methods for each read or write + * request. The first is prepareRequest, which allocates and prepares some + * context for the transfer; the second is deblockRequest, which aligns the + * transfer at the media block boundaries; and the third is executeRequest, + * which implements the actual transfer from the block storage device. + * + * This method's implementation is not typically overidden. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + /*! + * @function write + * @discussion + * The write method is the receiving end for all write requests from the + * storage framework (through the media object created by this driver). + * + * This method kicks off a sequence of three methods for each read or write + * request. The first is prepareRequest, which allocates and prepares some + * context for the transfer; the second is deblockRequest, which aligns the + * transfer at the media block boundaries; and the third is executeRequest, + * which implements the actual transfer from the block storage device. + * + * This method's implementation is not typically overidden. + * @param client + * Client requesting the write. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void write(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + virtual IOReturn synchronizeCache(IOService * client); + + /*! + * @function ejectMedia + * @discussion + * Eject the media from the device. The driver is responsible for tearing + * down the media object it created before proceeding with the eject. If + * the teardown fails, an error should be returned. + * @result + * An IOReturn code. + */ + + virtual IOReturn ejectMedia(); + + /*! + * @function formatMedia + * @discussion + * Format the media with the specified byte capacity. The driver is + * responsible for tearing down the media object and recreating it. + * @param byteCapacity + * Number of bytes to format media to. + * @result + * An IOReturn code. + */ + + virtual IOReturn formatMedia(UInt64 byteCapacity); + + /*! + * @function lockMedia + * @discussion + * Lock or unlock the ejectable media in the device, that is, prevent + * it from manual ejection or allow its manual ejection. + * @param lock + * Pass true to lock the media, otherwise pass false to unlock the media. + * @result + * An IOReturn code. + */ + + virtual IOReturn lockMedia(bool lock); + + /*! + * @function pollMedia + * @discussion + * Poll for the presence of media in the device. The driver is responsible + * for tearing down the media object it created should the media have been + * removed since the last poll, and vice-versa, creating the media object + * should new media have arrived since the last poll. + * @result + * An IOReturn code. + */ + + virtual IOReturn pollMedia(); + + /*! + * @function isMediaEjectable + * @discussion + * Ask the driver whether the media is ejectable. + * @result + * Returns true if the media is ejectable, false otherwise. + */ + + virtual bool isMediaEjectable() const; + + /*! + * @function isMediaPollExpensive + * @discussion + * Ask the driver whether a pollMedia() would be an expensive operation, + * that is, one that requires the device to spin up or delay for a while. + * @result + * Returns true if polling the media is expensive, false otherwise. + */ + + virtual bool isMediaPollExpensive() const; + + /*! + * @function isMediaPollRequired + * @discussion + * Ask the driver whether the block storage device requires polling, which is + * typically required for devices without the ability to asynchronously detect + * the arrival or departure of the media. + * @result + * Returns true if polling the media is required, false otherwise. + */ + + virtual bool isMediaPollRequired() const; + + virtual bool isMediaWritable() const; + + /*! + * @function getMediaState + * @discussion + * Ask the driver about the media's current state. + * @result + * An IOMediaState value. + */ + + virtual IOMediaState getMediaState() const; + + /*! + * @function getFormatCapacities + * @discussion + * Ask the driver to report the feasible formatting capacities for the + * inserted media (in bytes). This routine fills the caller's buffer, + * up to the maximum count specified if the real number of capacities + * would overflow the buffer. The return value indicates the actual + * number of capacities copied to the buffer. + * + * If the capacities buffer is not supplied or if the maximum count is + * zero, the routine returns the proposed count of capacities instead. + * @param capacities + * Buffer that will receive the UInt64 capacity values. + * @param capacitiesMaxCount + * Maximum number of capacity values that can be held in the buffer. + * @result + * Actual number of capacity values copied to the buffer, or if no buffer + * is given, the total number of capacity values available. + */ + + virtual UInt32 getFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + + /*! + * @function getStatistics + * @discussion + * Ask the driver to report its operating statistics. + * + * The statistics are each indexed by IOBlockStorageDriver::Statistics + * indices. This routine fills the caller's buffer, up to the maximum + * count specified if the real number of statistics would overflow the + * buffer. The return value indicates the actual number of statistics + * copied to the buffer. + * + * If the statistics buffer is not supplied or if the maximum count is + * zero, the routine returns the proposed count of statistics instead. + * @param statistics + * Buffer that will receive the UInt64 statistic values. + * @param statisticsMaxCount + * Maximum number of statistic values that can be held in the buffer. + * @result + * Actual number of statistic values copied to the buffer, or if no buffer + * is given, the total number of statistic values available. + */ + + virtual UInt32 getStatistics(UInt64 * statistics, + UInt32 statisticsMaxCount) const; + + /*! + * @function getStatistic + * @discussion + * Ask the driver to report one of its operating statistics. + * @param statistic + * Statistic index (an IOBlockStorageDriver::Statistics index). + * @result + * Statistic value. + */ + + virtual UInt64 getStatistic(Statistics statistic) const; + + /* + * Generic entry point for calls from the provider. A return value of + * kIOReturnSuccess indicates that the message was received, and where + * applicable, that it was successful. + */ + + virtual IOReturn message(UInt32 type, IOService * provider, void * argument); + + /* + * Obtain this object's provider. We override the superclass's method to + * return a more specific subclass of IOService -- IOBlockStorageDevice. + * This method serves simply as a convenience to subclass developers. + */ + + virtual IOBlockStorageDevice * getProvider() const; + +protected: + + IOLock * _deblockRequestWriteLock; + thread_call_t _pollerCall; + + /* + * This is the completion routine for the aligned deblocker subrequests. + * It verifies the success of the just-completed stage, transitions to + * the next stage, then builds and issues a transfer for the next stage. + */ + + static void deblockRequestCompletion(void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount); + + /* + * This is the completion routine for the prepared request. It updates + * the driver's statistics, performs some clean up work, then calls the + * original request's completion routine. + */ + + static void prepareRequestCompletion(void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount); + + /* + * Schedule the poller mechanism. + */ + + virtual void schedulePoller(); + + /* + * Unschedule the poller mechanism. + */ + + virtual void unschedulePoller(); + + /* + * This method is the timeout handler for the poller mechanism. It polls + * for media and reschedules another timeout if there are still no opens. + */ + + static void poller(void *, void *); + +protected: + + /* Device info: */ + + /*! + * @var _removable + * True if the media is removable; False if it is fixed (not removable). + */ + bool _removable; + + /*! + * @var _ejectable + * True if the media is ejectable under software control. + */ + bool _ejectable; /* software-ejectable */ + + /*! + * @var _lockable + * True if the media can be locked in the device under software control. + */ + bool _lockable; /* software lockable in device */ + /*! + * @var _pollIsRequired + * True if we must poll to detect media insertion or removal. + */ + bool _pollIsRequired; + /*! + * @var _pollIsExpensive + * True if polling is expensive; False if not. + */ + bool _pollIsExpensive; + + /* Media info and states: */ + + /*! + * @var _mediaObject + * A pointer to the media object we have instantiated (if any). + */ + IOMedia * _mediaObject; + /*! + * @var _mediaType + * Type of the media (can be used to differentiate between the + * different types of CD media, DVD media, etc). + */ + UInt32 _mediaType; + /*! + * @var _mediaPresent + * True if media is present in the device; False if not. + */ + bool _mediaPresent; /* media is present and ready */ + /*! + * @var _writeProtected + * True if the media is write-protected; False if not. + */ + bool _writeProtected; + +private: + + /*! + * @var _mediaStateLock + * A lock used to protect during media checks. + */ + IOLock * _mediaStateLock; + +protected: + + /*! + * @var _mediaBlockSize + * The block size of the media, in bytes. + */ + UInt64 _mediaBlockSize; + /*! + * @var _maxBlockNumber + * The maximum allowable block number for the media, zero-based. + */ + UInt64 _maxBlockNumber; + + /*! + * @var _maxReadByteTransfer + * The maximum byte transfer allowed for read operations. + */ + UInt64 _maxReadByteTransfer; + + /*! + * @var _maxWriteByteTransfer + * The maximum byte transfer allowed for write operations. + */ + UInt64 _maxWriteByteTransfer; + + /*! + * @function acceptNewMedia + * @abstract + * React to new media insertion. + * @discussion + * This method logs the media block size and block count, then calls + * instantiateMediaObject to get a media object instantiated. The + * media object is then attached above us and registered. + * + * This method can be overridden to control what happens when new media + * is inserted. The default implementation deals with one IOMedia object. + */ + virtual IOReturn acceptNewMedia(void); + + /*! + * @function constrainByteCount + * @abstract + * Constrain the byte count for this IO to device limits. + * @discussion + * This function should be called prior to each read or write operation, so that + * the driver can constrain the requested byte count, as necessary, to meet + * current device limits. Such limits could be imposed by the device depending + * on operating modes, media types, or transport prototol (e.g. ATA, SCSI). + * + * At present, this method is not used. + * @param requestedCount + * The requested byte count for the next read or write operation. + * @param isWrite + * True if the operation will be a write; False if the operation will be a read. + */ + virtual UInt64 constrainByteCount(UInt64 requestedCount,bool isWrite); + + /*! + * @function decommissionMedia + * @abstract + * Decommission an existing piece of media that has gone away. + * @discussion + * This method wraps a call to terminate, to tear down the stack and + * the IOMedia object for the media. If "forcible" is true, the media + * object will be forgotten, and initMediaState will be called. A + * forcible decommission would occur when an unrecoverable error + * happens during teardown (e.g. perhaps a client is still open), but + * we must still forget about the media. + * @param forcible + * True to force forgetting of the media object even if terminate reports + * that there was an active client. + */ + virtual IOReturn decommissionMedia(bool forcible); + + /*! + * @function instantiateDesiredMediaObject + * @abstract + * Create an IOMedia object for media. + * @discussion + * This method creates the exact type of IOMedia object desired. It is called by + * instantiateMediaObject. A subclass may override this one-line method to change + * the type of media object actually instantiated. + */ + virtual IOMedia * instantiateDesiredMediaObject(void); + + /*! + * @function instantiateMediaObject + * @abstract + * Create an IOMedia object for media. + * @discussion + * This method creates an IOMedia object from the supplied parameters. It is a + * convenience method to wrap the handful of steps to do the job. + * @param base + * Byte number of beginning of active data area of the media. Usually zero. + * @param byteSize + * Size of the data area of the media, in bytes. + * @param blockSize + * Block size of the media, in bytes. + * @param mediaName + * Name of the IOMedia object. + * @result + * A pointer to the created IOMedia object, or a null on error. + */ + virtual IOMedia * instantiateMediaObject(UInt64 base,UInt64 byteSize, + UInt32 blockSize,char *mediaName); + + /*! + * @function recordMediaParameters + * @abstract + * Obtain media-related parameters on media insertion. + * @discussion + * This method obtains media-related parameters via calls to the + * Transport Driver's reportBlockSize, reportMaxValidBlock, + * reportMaxReadTransfer, reportMaxWriteTransfer, and reportWriteProtection + * methods. + */ + virtual IOReturn recordMediaParameters(void); + + /*! + * @function rejectMedia + * @abstract + * Reject new media. + * @discussion + * This method will be called if validateNewMedia returns False (thus rejecting + * the new media. A vendor may choose to override this method to control behavior + * when media is rejected. + * + * The default implementation simply calls ejectMedia. + */ + virtual void rejectMedia(void); /* default ejects */ + + /*! + * @function validateNewMedia + * @abstract + * Verify that new media is acceptable. + * @discussion + * This method will be called whenever new media is detected. Return true to accept + * the media, or false to reject it (andcall rejectMedia). Vendors might override + * this method to handle password-protection for new media. + * + * The default implementation always returns True, indicating media is accepted. + */ + virtual bool validateNewMedia(void); + + /* --- Internally used methods. --- */ + + /* + * @group + * Internally Used Methods + * @discussion + * These methods are used internally, and will not generally be modified. + */ + + /*! + * @function checkForMedia + * @abstract + * Check if media has newly arrived or disappeared. + * @discussion + * This method does most of the work in polling for media, first + * calling the block storage device's reportMediaState method. If + * reportMediaState reports no change in the media state, kIOReturnSuccess + * is returned. If the media state has indeed changed, a call is made to + * mediaStateHasChanged to act on the event. + */ + virtual IOReturn checkForMedia(void); + + /*! + * @function getDeviceTypeName + * @abstract + * Return the desired device name. + * @discussion + * This method returns a string, used to compare the + * kIOBlockStorageDeviceTypeKey of our provider. This method is called from + * probe. + * + * The default implementation of this method returns + * kIOBlockStorageDeviceTypeGeneric. + */ + virtual const char * getDeviceTypeName(void); + + /*! + * @function initMediaState + * @abstract + * Initialize media-related instance variables. + * @discussion + * Called when media is not present, this method marks the device state + * as not having media present, not spun up, and write-enabled. + */ + virtual void initMediaState(void); + + /*! + * @function mediaStateHasChanged + * @abstract + * React to a new media insertion or a media removal. + * @discussion + * This method is called on a media state change, that is, an arrival + * or removal. If media has just become available, calls are made to + * recordMediaParameters and acceptNewMedia. If media has just gone + * away, a call is made to decommissionMedia, with the forcible + * parameter set to true. The forcible teardown is needed to enforce + * the disappearance of media, regardless of interested clients. + */ + virtual IOReturn mediaStateHasChanged(IOMediaState state); + + /* + * @endgroup + */ + + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 0); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 1); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 2); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 3); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 4); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 5); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 6); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 7); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 8); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 9); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 10); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 11); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 12); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 13); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 14); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 15); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 16); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 17); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 18); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 19); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 20); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 21); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 22); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 23); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 24); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 25); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 26); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 27); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 28); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 29); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 30); + OSMetaClassDeclareReservedUnused(IOBlockStorageDriver, 31); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOBLOCKSTORAGEDRIVER_H */ diff --git a/iokit/IOKit/storage/IOCDAudioControl.h b/iokit/IOKit/storage/IOCDAudioControl.h new file mode 100644 index 000000000..7a1a99969 --- /dev/null +++ b/iokit/IOKit/storage/IOCDAudioControl.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IOCDAudioControl + * @abstract + * This header contains the IOCDAudioControl class definition. + */ + +#ifndef _IOCDAUDIOCONTROL_H +#define _IOCDAUDIOCONTROL_H + +/*! + * @defined kIOCDAudioControlClass + * @abstract + * kIOCDAudioControlClass is the name of the IOCDAudioControl class. + * @discussion + * kIOCDAudioControlClass is the name of the IOCDAudioControl class. + */ + +#define kIOCDAudioControlClass "IOCDAudioControl" + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include + +/*! + * @class IOCDAudioControl + * @discussion + * This class is the protocol for CD audio control functionality, independent of + * the physical connection protocol (eg. SCSI, ATA, USB). Any methods that deal + * with audio play and/or volume are here. + */ + +class IOCDAudioControl : public IOService +{ + OSDeclareDefaultStructors(IOCDAudioControl) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + /* + * Create a new IOCDAudioControlUserClient. + */ + + virtual IOReturn newUserClient( task_t task, + void * security, + UInt32 type, + IOUserClient ** object ); + +public: + + /*! + * @function getStatus + * @abstract + * Get the current audio play status information. + * @param status + * The buffer for the returned information. + */ + + virtual IOReturn getStatus(CDAudioStatus * status); + + /*! + * @function getTOC + * @abstract + * Get the full Table Of Contents. + * @result + * Returns a pointer to the TOC buffer (do not deallocate). + */ + + virtual CDTOC * getTOC(void); + + /*! + * @function getVolume + * @abstract + * Get the current audio volume. + * @param left + * A pointer to the returned left-channel volume. + * @param right + * A pointer to the returned right-channel volume. + */ + + virtual IOReturn getVolume(UInt8 * left, UInt8 * right); + + /*! + * @function setVolume + * @abstract + * Set the current audio volume. + * @param left + * The desired left-channel volume. + * @param right + * The desired right-channel volume. + */ + + virtual IOReturn setVolume(UInt8 left, UInt8 right); + + /*! + * @function pause + * @abstract + * Pause or resume the audio playback. + * @param pause + * True to pause playback; False to resume. + */ + + virtual IOReturn pause(bool pause); + + /*! + * @function play + * @abstract + * Play audio. + * @param timeStart + * The M:S:F address from which to begin. + * @param timeStop + * The M:S:F address at which to stop. + */ + + virtual IOReturn play(CDMSF timeStart, CDMSF timeStop); + + /*! + * @function scan + * @abstract + * Perform a fast-forward or fast-backward operation. + * @param timeStart + * The M:S:F address from which to begin. + * @param reverse + * True to go backward; False to go forward. + */ + + virtual IOReturn scan(CDMSF timeStart, bool reverse); + + /*! + * @function stop + * @abstract + * Stop the audio playback (or audio scan). + */ + + virtual IOReturn stop(); + + /* + * Obtain this object's provider. We override the superclass's method to + * return a more specific subclass of IOService -- IOCDBlockStorageDriver. + * This method serves simply as a convenience to subclass developers. + */ + + virtual IOCDBlockStorageDriver * getProvider() const; + + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 0); + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 1); + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 2); + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 3); + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 4); + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 5); + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 6); + OSMetaClassDeclareReservedUnused(IOCDAudioControl, 7); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOCDAUDIOCONTROL_H */ diff --git a/iokit/IOKit/storage/IOCDAudioControlUserClient.h b/iokit/IOKit/storage/IOCDAudioControlUserClient.h new file mode 100644 index 000000000..f6b4c688b --- /dev/null +++ b/iokit/IOKit/storage/IOCDAudioControlUserClient.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOCDAUDIOCONTROLUSERCLIENT_H +#define _IOCDAUDIOCONTROLUSERCLIENT_H + +#include + +/* + * Audio Control User Client Methods + */ + +enum +{ + kIOCDAudioControlMethodGetStatus, // IOCDAudioControlUserClient::getStatus() + kIOCDAudioControlMethodGetTOC, // IOCDAudioControlUserClient::getTOC() + kIOCDAudioControlMethodGetVolume, // IOCDAudioControlUserClient::getVolume() + kIOCDAudioControlMethodSetVolume, // IOCDAudioControlUserClient::setVolume() + kIOCDAudioControlMethodPause, // IOCDAudioControlUserClient::pause() + kIOCDAudioControlMethodPlay, // IOCDAudioControlUserClient::play() + kIOCDAudioControlMethodScan, // IOCDAudioControlUserClient::scan() + kIOCDAudioControlMethodStop, // IOCDAudioControlUserClient::stop() + kIOCDAudioControlMethodCount // (total number of methods supported) +}; + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include + +class IOCDAudioControlUserClient : public IOUserClient +{ + OSDeclareDefaultStructors(IOCDAudioControlUserClient) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + IOExternalMethod _methods[kIOCDAudioControlMethodCount]; + + /* + * Get the current audio play status information. + */ + + virtual IOReturn getStatus(CDAudioStatus * status, UInt32 * statusSize); + + /* + * Get the full Table Of Contents. + */ + + virtual IOReturn getTOC(CDTOC * toc, UInt32 * tocMaxSize); + + /* + * Get the current audio volume. + */ + + virtual IOReturn getVolume(UInt32 * left, UInt32 * right); + + /* + * Set the current audio volume. + */ + + virtual IOReturn setVolume(UInt32 left, UInt32 right); + + /* + * Pause or resume the audio playback. + */ + + virtual IOReturn pause(UInt32 pause); + + /* + * Play audio. + */ + + virtual IOReturn play(UInt32 msfStart, UInt32 msfStop); + + /* + * Perform a fast-forward or fast-backward operation. + */ + + virtual IOReturn scan(UInt32 msfStart, UInt32 reverse); + + /* + * Stop the audio playback (or audio scan). + */ + + virtual IOReturn stop(); + +public: + + /* + * Create a new IOCDAudioControlUserClient. + */ + + static IOCDAudioControlUserClient * withTask(task_t task); + + /* + * Prepare the user client for usage. + */ + + virtual bool start(IOService * provider); + + /* + * Relinquish the user client. + */ + + virtual IOReturn clientClose(); + + /* + * Obtain the method definition given a method index. + */ + + virtual IOExternalMethod * getExternalMethodForIndex(UInt32 index); + + /* + * Obtain this object's provider. We override the superclass's method + * to return a more specific subclass of IOService -- IOCDAudioControl. + * This method serves simply as a convenience to subclass developers. + */ + + virtual IOCDAudioControl * getProvider() const; + + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 0); + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 1); + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 2); + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 3); + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 4); + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 5); + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 6); + OSMetaClassDeclareReservedUnused(IOCDAudioControlUserClient, 7); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* _IOCDAUDIOCONTROLUSERCLIENT_H */ diff --git a/iokit/IOKit/storage/IOCDBlockStorageDevice.h b/iokit/IOKit/storage/IOCDBlockStorageDevice.h new file mode 100644 index 000000000..cb2d31273 --- /dev/null +++ b/iokit/IOKit/storage/IOCDBlockStorageDevice.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOCDBlockStorageDevice.h + * + * This class is the protocol for generic CDROM functionality, independent of + * the physical connection protocol (e.g. SCSI, ATA, USB). + * + * The APIs are the union of CDROM (block storage) data APIs and all + * necessary low-level CD APIs. + * + * A subclass implements relay methods that translate our requests into + * calls to a protocol- and device-specific provider. + */ + +#ifndef _IOCDBLOCKSTORAGEDEVICE_H +#define _IOCDBLOCKSTORAGEDEVICE_H + +#include +#include +#include + +/* Property used for matching, so the generic driver gets the nub it wants. */ +#define kIOBlockStorageDeviceTypeCDROM "CDROM" + +class IOMemoryDescriptor; + +class IOCDBlockStorageDevice : public IOBlockStorageDevice { + + OSDeclareAbstractStructors(IOCDBlockStorageDevice) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + +public: + + /* Overrides from IORegistryEntry */ + + virtual bool init(OSDictionary * properties); + + /*-----------------------------------------*/ + /* CD APIs */ + /*-----------------------------------------*/ + + virtual IOReturn doAsyncReadCD(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion) = 0; + virtual UInt32 getMediaType(void) = 0; + virtual IOReturn readISRC(UInt8 track,CDISRC isrc) = 0; + virtual IOReturn readMCN(CDMCN mcn) = 0; + virtual IOReturn readTOC(IOMemoryDescriptor * buffer) = 0; + + /*-----------------------------------------*/ + /* APIs exported by IOCDAudioControl */ + /*-----------------------------------------*/ + + virtual IOReturn audioPause(bool pause) = 0; + virtual IOReturn audioPlay(CDMSF timeStart,CDMSF timeStop) = 0; + virtual IOReturn audioScan(CDMSF timeStart,bool reverse) = 0; + virtual IOReturn audioStop() = 0; + virtual IOReturn getAudioStatus(CDAudioStatus *status) = 0; + virtual IOReturn getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume) = 0; + virtual IOReturn setAudioVolume(UInt8 leftVolume,UInt8 rightVolume) = 0; + + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 0); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 1); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 2); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 3); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 4); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 5); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 6); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 7); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 8); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 9); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 10); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 11); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 12); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 13); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 14); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDevice, 15); +}; +#endif diff --git a/iokit/IOKit/storage/IOCDBlockStorageDriver.h b/iokit/IOKit/storage/IOCDBlockStorageDriver.h new file mode 100644 index 000000000..de98e7489 --- /dev/null +++ b/iokit/IOKit/storage/IOCDBlockStorageDriver.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOCDBlockStorageDriver.h + * + * This class implements CD functionality, independent of + * the physical connection protocol (e.g. SCSI, ATA, USB). + * + * A protocol-specific provider implements the functionality using an appropriate + * protocol and commands. + */ + +#ifndef _IOCDBLOCKSTORAGEDRIVER_H +#define _IOCDBLOCKSTORAGEDRIVER_H + +#include +#include +#include +#include + +/*! + * @defined kIOCDBlockStorageDriverClass + * @abstract + * kIOCDBlockStorageDriverClass is the name of the IOCDBlockStorageDriver class. + * @discussion + * kIOCDBlockStorageDriverClass is the name of the IOCDBlockStorageDriver class. + */ + +#define kIOCDBlockStorageDriverClass "IOCDBlockStorageDriver" + +class IOCDAudioControl; +class IOCDMedia; +class IOCDBlockStorageDevice; + +class IOCDBlockStorageDriver : public IOBlockStorageDriver { + + OSDeclareDefaultStructors(IOCDBlockStorageDriver) + +public: + + static const UInt64 kBlockSizeCD = 2352; + static const UInt8 kBlockTypeCD = 0x01; + + /* Overrides of IORegistryEntry */ + + virtual bool init(OSDictionary * properties); + + /* Overrides of IOBlockStorageDriver: */ + + virtual IOReturn ejectMedia(void); + virtual void executeRequest(UInt64 byteStart, + IOMemoryDescriptor *buffer, + IOStorageCompletion completion, + Context *context); + virtual const char * getDeviceTypeName(void); + virtual IOMedia * instantiateDesiredMediaObject(void); + virtual IOMedia * instantiateMediaObject(UInt64 base,UInt64 byteSize, + UInt32 blockSize,char *mediaName); + + /* End of IOBlockStorageDriver overrides. */ + + /* + * @function getMediaType + * @abstract + * Get the current type of media inserted in the CD drive. + * @discussion + * Certain I/O operations may not be allowed depending on the type of + * media currently inserted. For example, one cannot issue write operations + * if CD-ROM media is inserted. + * @result + * See the kCDMediaType constants in IOCDTypes.h. + */ + virtual UInt32 getMediaType(void); + + /* -------------------------------------------------*/ + /* APIs implemented here, exported by IOCDMedia: */ + /* -------------------------------------------------*/ + + virtual CDTOC * getTOC(void); + virtual void readCD(IOService *client, + UInt64 byteStart, + IOMemoryDescriptor *buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + virtual IOReturn readISRC(UInt8 track,CDISRC isrc); + virtual IOReturn readMCN(CDMCN mcn); + + /* end of IOCDMedia APIs */ + + /* --------------------------------------------------------*/ + /* APIs implemented here, exported by IOCDAudioControl: */ + /* --------------------------------------------------------*/ + + virtual IOReturn audioPause(bool pause); + virtual IOReturn audioPlay(CDMSF timeStart,CDMSF timeStop); + virtual IOReturn audioScan(CDMSF timeStart,bool reverse); + virtual IOReturn audioStop(); + virtual IOReturn getAudioStatus(CDAudioStatus *status); + virtual IOReturn getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume); + virtual IOReturn setAudioVolume(UInt8 leftVolume,UInt8 rightVolume); + + /* end of IOCDAudioControl APIs */ + + /* + * Obtain this object's provider. We override the superclass's method to + * return a more specific subclass of IOService -- IOCDBlockStorageDevice. + * This method serves simply as a convenience to subclass developers. + */ + + virtual IOCDBlockStorageDevice * getProvider() const; + +protected: + + /* Overrides of IOBlockStorageDriver behavior. */ + + /* When CD media is inserted, we want to create multiple nubs for the data and + * audio tracks, for sessions, and the entire media. We override the methods + * that manage nubs. + */ + virtual IOReturn acceptNewMedia(void); + virtual IOReturn decommissionMedia(bool forcible); + + /* End of IOBlockStorageDriver overrides. */ + + /* Internally used methods: */ + + virtual IOReturn cacheTocInfo(void); + virtual UInt64 getMediaBlockSize(CDSectorArea area,CDSectorType type); + virtual void prepareRequest(UInt64 byteStart, + IOMemoryDescriptor *buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + + /* ------- */ + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + IOCDAudioControl * _acNub; + + /* We keep the TOC here because we'll always need it, so what the heck. + * + * There are possible "point" track entries for 0xa0..a2, 0xb0..b4, and 0xc0..0xc1. + * Tracks need not start at 1, as long as they're between 1 and 99, and have contiguous + * numbers. + */ + + CDTOC * _toc; + UInt32 _tocSize; + + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 0); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 1); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 2); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 3); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 4); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 5); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 6); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 7); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 8); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 9); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 10); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 11); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 12); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 13); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 14); + OSMetaClassDeclareReservedUnused(IOCDBlockStorageDriver, 15); +}; +#endif diff --git a/iokit/IOKit/storage/IOCDMedia.h b/iokit/IOKit/storage/IOCDMedia.h new file mode 100644 index 000000000..05315c9fb --- /dev/null +++ b/iokit/IOKit/storage/IOCDMedia.h @@ -0,0 +1,283 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IOCDMedia + * @abstract + * This header contains the IOCDMedia class definition. + */ + +#ifndef _IOCDMEDIA_H +#define _IOCDMEDIA_H + +/*! + * @defined kIOCDMediaClass + * @abstract + * kIOCDMediaClass is the name of the IOCDMedia class. + * @discussion + * kIOCDMediaClass is the name of the IOCDMedia class. + */ + +#define kIOCDMediaClass "IOCDMedia" + +/*! + * @defined kIOCDMediaTOCKey + * @abstract + * kIOCDMediaTOCKey is a property of IOCDMedia objects. It has an OSData value + * and a CDTOC structure. + * @discussion + * The kIOCDMediaTOCKey property contains the CD's full table of contents, + * formatted as a CDTOC structure. The CDTOC structure is same as what is + * returned by a READ TOC command, format 0x02, but with BCD numbers converted + * to binary-encoded numbers, and the multi-byte fields converted to + * host-endianess. + */ + +#define kIOCDMediaTOCKey "TOC" +#define kIOCDMediaTOC "TOC" ///d:deprecated + +/*! + * @defined kIOCDMediaTypeKey + * @abstract + * kIOCDMediaTypeKey is a property of IOCDMedia objects. It has an OSString + * value. + * @discussion + * The kIOCDMediaTypeKey property identifies the CD media type (CD-ROM, CD-R, + * CD-RW, etc). See the kIOCDMediaType contants for possible values. + */ + +#define kIOCDMediaTypeKey "Type" + +/*! + * @defined kIOCDMediaTypeROM + * The kIOCDMediaTypeKey constant for CD-ROM media (inclusive of the CD-I, + * CD-ROM XA, and CD Audio standards, and mixed mode combinations thereof). + */ + +#define kIOCDMediaTypeROM "CD-ROM" + +/*! + * @defined kIOCDMediaTypeR + * The kIOCDMediaTypeKey constant for CD Recordable (CD-R) media. + */ + +#define kIOCDMediaTypeR "CD-R" + +/*! + * @defined kIOCDMediaTypeRW + * The kIOCDMediaTypeKey constant for CD ReWritable (CD-RW) media. + */ + +#define kIOCDMediaTypeRW "CD-RW" + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include + +/*! + * @class IOCDMedia + * @abstract + * The IOCDMedia class is a random-access disk device abstraction for CDs. + * @discussion + * The IOCDMedia class is a random-access disk device abstraction for CDs. It + * extends the IOMedia class by implementing special CD APIs, such as readCD, + * and publishing the TOC as a property of the IOCDMedia object. + */ + +class IOCDMedia : public IOMedia +{ + OSDeclareDefaultStructors(IOCDMedia) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + +public: + +///m:2333367:workaround:commented:start +// using read; +///m:2333367:workaround:commented:stop + + /*! + * @function read + * @discussion + * Read data from the storage object at the specified byte offset into the + * specified buffer, asynchronously. When the read completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the read. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + /*! + * @function readCD + * @discussion + * Read data from the CD media object at the specified byte offset into the + * specified buffer, asynchronously. Special areas of the CD sector can be + * read via this method, such as the header and subchannel data. When the + * read completes, the caller will be notified via the specified completion + * action. + * + * The buffer will be retained for the duration of the read. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer (see withAreas parameter). + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param sectorArea + * Sector area(s) to read. The sum of each area's size defines the "natural + * block size" of the media for the call, which should be taken into account + * when computing the address of byteStart. See IOCDTypes.h. + * @param sectorType + * Sector type that is expected. The data transfer is terminated as soon as + * data is encountered that does not match the expected type. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void readCD(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + + /*! + * @function readCD + * @discussion + * Read data from the CD media object at the specified byte offset into the + * specified buffer, synchronously. Special areas of the CD sector can be + * read via this method, such as the header and subchannel data. When the + * read completes, this method will return to the caller. The actual byte + * count field is optional. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param sectorArea + * Sector area(s) to read. The sum of each area's size defines the "natural + * block size" of the media for the call, which should be taken into account + * when computing the address of byteStart. See IOCDTypes.h. + * @param sectorType + * Sector type that is expected. The data transfer is terminated as soon as + * data is encountered that does not match the expected type. + * @param actualByteCount + * Returns the actual number of bytes transferred in the data transfer. + * @result + * Returns the status of the data transfer. + */ + + virtual IOReturn readCD(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + CDSectorArea sectorArea, + CDSectorType sectorType, + UInt64 * actualByteCount = 0); + + /*! + * @function readISRC + * @abstract + * Read the International Standard Recording Code for the specified track. + * @param track + * The track number from which to read the ISRC. + * @param isrc + * The buffer for the ISRC data. Buffer contents will be zero-terminated. + */ + + virtual IOReturn readISRC(UInt8 track, CDISRC isrc); + + /*! + * @function readMCN + * @abstract + * Read the Media Catalog Number (also known as the Universal Product Code). + * @param mcn + * The buffer for the MCN data. Buffer contents will be zero-terminated. + */ + + virtual IOReturn readMCN(CDMCN mcn); + + /* + * @function getTOC + * @discussion + * Get the full Table Of Contents. + * + * All CDTOC fields passed across I/O Kit APIs are guaranteed to be + * binary-encoded numbers (not BCD) and converted to host-endianess. + * @result + * Returns a pointer to the TOC buffer (do not deallocate). + */ + + virtual CDTOC * getTOC(); + + /* + * Obtain this object's provider. We override the superclass's method to + * return a more specific subclass of IOService -- IOCDBlockStorageDriver. + * This method serves simply as a convenience to subclass developers. + */ + + virtual IOCDBlockStorageDriver * getProvider() const; + + OSMetaClassDeclareReservedUnused(IOCDMedia, 0); + OSMetaClassDeclareReservedUnused(IOCDMedia, 1); + OSMetaClassDeclareReservedUnused(IOCDMedia, 2); + OSMetaClassDeclareReservedUnused(IOCDMedia, 3); + OSMetaClassDeclareReservedUnused(IOCDMedia, 4); + OSMetaClassDeclareReservedUnused(IOCDMedia, 5); + OSMetaClassDeclareReservedUnused(IOCDMedia, 6); + OSMetaClassDeclareReservedUnused(IOCDMedia, 7); + OSMetaClassDeclareReservedUnused(IOCDMedia, 8); + OSMetaClassDeclareReservedUnused(IOCDMedia, 9); + OSMetaClassDeclareReservedUnused(IOCDMedia, 10); + OSMetaClassDeclareReservedUnused(IOCDMedia, 11); + OSMetaClassDeclareReservedUnused(IOCDMedia, 12); + OSMetaClassDeclareReservedUnused(IOCDMedia, 13); + OSMetaClassDeclareReservedUnused(IOCDMedia, 14); + OSMetaClassDeclareReservedUnused(IOCDMedia, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOCDMEDIA_H */ diff --git a/iokit/IOKit/storage/IOCDPartitionScheme.h b/iokit/IOKit/storage/IOCDPartitionScheme.h new file mode 100644 index 000000000..4161d157c --- /dev/null +++ b/iokit/IOKit/storage/IOCDPartitionScheme.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IOCDPartitionScheme + * @abstract + * This header contains the IOCDPartitionScheme class definition. + */ + +#ifndef _IOCDPARTITIONSCHEME_H +#define _IOCDPARTITIONSCHEME_H + +#include + +/*! + * @defined kIOMediaSessionIDKey + * @abstract + * kIOMediaSessionIDKey is property of IOMedia objects. It has an OSNumber + * value. + * @discussion + * The kIOMediaSessionIDKey property is placed into each IOMedia instance + * created by the CD partition scheme. It identifies the session number + * the track was recorded on. + */ + +#define kIOMediaSessionIDKey "Session ID" + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include + +/* + * Class + */ + +class IOCDPartitionScheme : public IOPartitionScheme +{ + OSDeclareDefaultStructors(IOCDPartitionScheme); + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + OSSet * _partitions; /* (set of media objects representing partitions) */ + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(void); + + /* + * Scan the provider media for CD partitions (in TOC). Returns the set + * of media objects representing each of the partitions (the retain for + * the set is passed to the caller), or null should no CD partitions be + * found. The default probe score can be adjusted up or down, based on + * the confidence of the scan. + */ + + virtual OSSet * scan(SInt32 * score); + + /* + * Ask whether the given partition appears to be corrupt. A partition that + * is corrupt will cause the failure of the CD partition scheme altogether. + */ + + virtual bool isPartitionCorrupt( CDTOCDescriptor * partition, + UInt64 partitionSize, + UInt32 partitionBlockSize, + CDSectorType partitionBlockType, + CDTOC * toc ); + + /* + * Ask whether the given partition appears to be invalid. A partition that + * is invalid will cause it to be skipped in the scan, but will not cause a + * failure of the CD partition scheme. + */ + + virtual bool isPartitionInvalid( CDTOCDescriptor * partition, + UInt64 partitionSize, + UInt32 partitionBlockSize, + CDSectorType partitionBlockType, + CDTOC * toc ); + + /* + * Instantiate a new media object to represent the given partition. + */ + + virtual IOMedia * instantiateMediaObject( + CDTOCDescriptor * partition, + UInt64 partitionSize, + UInt32 partitionBlockSize, + CDSectorType partitionBlockType, + CDTOC * toc ); + + /* + * Allocate a new media object (called from instantiateMediaObject). + */ + + virtual IOMedia * instantiateDesiredMediaObject( + CDTOCDescriptor * partition, + UInt64 partitionSize, + UInt32 partitionBlockSize, + CDSectorType partitionBlockType, + CDTOC * toc ); + +public: + + /* + * Initialize this object's minimal state. + */ + + virtual bool init(OSDictionary * properties = 0); + + /* + * Scan the provider media for CD partitions. + */ + + virtual IOService * probe(IOService * provider, SInt32 * score); + + /* + * Determine whether the provider media contains CD partitions. + */ + + virtual bool start(IOService * provider); + + /* + * Read data from the storage object at the specified byte offset into the + * specified buffer, asynchronously. When the read completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the read. + * + * For the CD partition scheme, we convert the read from a partition + * object into the appropriate readCD command to our provider media. + */ + + virtual void read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + /* + * Obtain this object's provider. We override the superclass's method + * to return a more specific subclass of OSObject -- IOCDMedia. This + * method serves simply as a convenience to subclass developers. + */ + + virtual IOCDMedia * getProvider() const; + + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 0); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 1); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 2); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 3); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 4); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 5); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 6); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 7); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 8); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 9); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 10); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 11); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 12); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 13); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 14); + OSMetaClassDeclareReservedUnused(IOCDPartitionScheme, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOCDPARTITIONSCHEME_H */ diff --git a/iokit/IOKit/storage/IOCDTypes.h b/iokit/IOKit/storage/IOCDTypes.h new file mode 100644 index 000000000..92b5bd456 --- /dev/null +++ b/iokit/IOKit/storage/IOCDTypes.h @@ -0,0 +1,226 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOCDTYPES_H +#define _IOCDTYPES_H + +#include + +/* + * Minutes, Seconds, Frames (M:S:F) + * + * All M:S:F values passed across I/O Kit APIs are guaranteed to be + * binary-encoded numbers (no BCD-encoded numbers are ever passed). + */ + +typedef struct +{ + UInt8 minute; + UInt8 second; + UInt8 frame; +} CDMSF; + +/* + * Media Catalogue Numbers (MCN), International Standard Recording Codes (ISRC) + * + * All MCN and ISRC values passed across I/O Kit APIs are guaranteed + * to have a zero-terminating byte, for convenient use as C strings. + */ + +#define kCDMCNMaxLength 13 +#define kCDISRCMaxLength 12 + +typedef char CDMCN [kCDMCNMaxLength + 1]; +typedef char CDISRC[kCDISRCMaxLength + 1]; + +/* + * Audio Status + * + * All CDAudioStatus fields passed across I/O Kit APIs are guaranteed to + * be binary-encoded numbers (no BCD-encoded numbers are ever passed). + */ + +#define kCDAudioStatusUnsupported 0x00 +#define kCDAudioStatusActive 0x11 +#define kCDAudioStatusPaused 0x12 +#define kCDAudioStatusSuccess 0x13 +#define kCDAudioStatusFailure 0x14 +#define kCDAudioStatusNone 0x15 + +typedef struct +{ + UInt8 status; + struct + { + CDMSF time; + struct { + UInt8 index; + UInt8 number; + CDMSF time; + } track; + } position; +} CDAudioStatus; + +/* + * Table Of Contents + * + * All CDTOC fields passed across I/O Kit APIs are guaranteed to be + * binary-encoded numbers (not BCD) and converted to host-endianess. + */ + +typedef struct +{ + UInt8 session; +#if defined(__LITTLE_ENDIAN__) + UInt8 control:4, adr:4; +#else /* !defined(__LITTLE_ENDIAN__) */ + UInt8 adr:4, control:4; +#endif /* !defined(__LITTLE_ENDIAN__) */ + UInt8 tno; + UInt8 point; + CDMSF address; + UInt8 zero; + CDMSF p; +} CDTOCDescriptor; + +typedef struct +{ + UInt16 length; + UInt8 sessionFirst; + UInt8 sessionLast; + CDTOCDescriptor descriptors[0]; +} CDTOC; + +/* + * M:S:F To LBA Convenience Function + */ + +static UInt32 __inline CDConvertMSFToLBA(CDMSF msf) +{ + return (((msf.minute * 60UL) + msf.second) * 75UL) + msf.frame - 150; +} + +/* + * LBA To M:S:F Convenience Function + */ + +static CDMSF __inline CDConvertLBAToMSF(UInt32 lba) +{ + CDMSF msf; + + lba += 150; + msf.minute = (lba / (75 * 60)); + msf.second = (lba % (75 * 60)) / 75; + msf.frame = (lba % (75 )); + + return msf; +} + +/* + * Track Number To M:S:F Convenience Function + * + * The CDTOC structure is assumed to be complete, that is, none of + * the descriptors are missing or clipped due to an insufficiently + * sized buffer holding the CDTOC contents. + */ + +static CDMSF __inline CDConvertTrackNumberToMSF(UInt8 track, CDTOC * toc) +{ + UInt32 count = (toc->length - sizeof(UInt16)) / sizeof(CDTOCDescriptor); + UInt32 i; + CDMSF msf = { 0xFF, 0xFF, 0xFF }; + + for (i = 0; i < count; i++) + { + if (toc->descriptors[i].point == track && toc->descriptors[i].adr == 1) + { + msf = toc->descriptors[i].p; + break; + } + } + + return msf; +} + +/* + * Sector Areas, Sector Types + * + * Bytes Per Type CDDA Mode1 Mode2 Mode2Form1 Mode2Form2 + * Per Area +----------+----------+----------+----------+----------+ + * Sync | 0 | 12 | 12 | 12 | 12 | + * Header | 0 | 4 | 4 | 4 | 4 | + * SubHeader | 0 | 0 | 0 | 8 | 8 | + * User | 2352 | 2048 | 2336 | 2048 | 2328 | + * Auxiliary | 0 | 288 | 0 | 280 | 0 | + * ErrorFlags | 294 | 294 | 294 | 294 | 294 | + * SubChannel | 96 | 96 | 96 | 96 | 96 | + * +----------+----------+----------+----------+----------+ + */ + +typedef enum +{ + kCDSectorAreaSync = 0x80, + kCDSectorAreaHeader = 0x20, + kCDSectorAreaSubHeader = 0x40, + kCDSectorAreaUser = 0x10, + kCDSectorAreaAuxiliary = 0x08, + kCDSectorAreaErrorFlags = 0x02, + kCDSectorAreaSubChannel = 0x01 +} CDSectorArea; + +typedef enum +{ + kCDSectorTypeUnknown = 0x00, + kCDSectorTypeCDDA = 0x01, + kCDSectorTypeMode1 = 0x02, + kCDSectorTypeMode2 = 0x03, + kCDSectorTypeMode2Form1 = 0x04, + kCDSectorTypeMode2Form2 = 0x05, + kCDSectorTypeCount = 0x06 +} CDSectorType; + +typedef enum +{ + kCDSectorSizeCDDA = 2352, + kCDSectorSizeMode1 = 2048, + kCDSectorSizeMode2 = 2336, + kCDSectorSizeMode2Form1 = 2048, + kCDSectorSizeMode2Form2 = 2328, + kCDSectorSizeWhole = 2352 +} CDSectorSize; + +/* + * Media Types + */ + +typedef enum +{ + kCDMediaTypeUnknown = 0x0100, + kCDMediaTypeROM = 0x0102, /* CD-ROM */ + kCDMediaTypeR = 0x0104, /* CD-R */ + kCDMediaTypeRW = 0x0105, /* CD-RW */ + + kCDMediaTypeMin = 0x0100, + kCDMediaTypeMax = 0x01FF +} CDMediaType; + +#endif /* _IOCDTYPES_H */ diff --git a/iokit/IOKit/storage/IODVDBlockStorageDevice.h b/iokit/IOKit/storage/IODVDBlockStorageDevice.h new file mode 100644 index 000000000..9f51aed87 --- /dev/null +++ b/iokit/IOKit/storage/IODVDBlockStorageDevice.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + /* This class is the protocol for generic DVD functionality, independent of + * the physical connection protocol (e.g. SCSI, ATA, USB). + * + * The APIs are the union of CDRO APIs and all + * necessary new low-level DVD APIs. + * + * A subclass implements relay methods that translate our requests into + * calls to a protocol- and device-specific provider. + */ + +#ifndef _IODVDBLOCKSTORAGEDEVICE_H +#define _IODVDBLOCKSTORAGEDEVICE_H + +#include +#include +#include + +/* Property used for matching, so the generic driver gets the nub it wants. */ +#define kIOBlockStorageDeviceTypeDVD "DVD" + +class IOMemoryDescriptor; + +class IODVDBlockStorageDevice : public IOCDBlockStorageDevice { + + OSDeclareAbstractStructors(IODVDBlockStorageDevice) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + +public: + + /* Overrides from IORegistryEntry */ + + virtual bool init(OSDictionary * properties); + + /* New APIs for DVD */ + + virtual IOReturn reportKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt32 lba,const UInt8 agid,const DVDKeyFormat keyFormat) = 0; + virtual IOReturn sendKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt8 agid,const DVDKeyFormat keyFormat) = 0; + + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 0); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 1); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 2); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 3); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 4); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 5); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 6); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 7); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 8); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 9); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 10); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 11); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 12); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 13); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 14); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 15); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 16); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 17); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 18); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 19); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 20); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 21); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 22); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 23); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 24); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 25); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 26); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 27); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 28); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 29); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 30); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDevice, 31); +}; +#endif diff --git a/iokit/IOKit/storage/IODVDBlockStorageDriver.h b/iokit/IOKit/storage/IODVDBlockStorageDriver.h new file mode 100644 index 000000000..d1cb02bc7 --- /dev/null +++ b/iokit/IOKit/storage/IODVDBlockStorageDriver.h @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IODVDBlockStorageDriver.h + * + * This class implements DVD functionality, independent of + * the physical connection protocol (e.g. SCSI, ATA, USB). + * + * A protocol-specific provider implements the functionality using an appropriate + * protocol and commands. + */ + +#ifndef _IODVDBLOCKSTORAGEDRIVER_H +#define _IODVDBLOCKSTORAGEDRIVER_H + +#include +#include +#include +#include + +/*! + * @defined kIODVDBlockStorageDriverClass + * @abstract + * kIODVDBlockStorageDriverClass is the name of the IODVDBlockStorageDriver class. + * @discussion + * kIODVDBlockStorageDriverClass is the name of the IODVDBlockStorageDriver class. + */ + +#define kIODVDBlockStorageDriverClass "IODVDBlockStorageDriver" + +class IODVDBlockStorageDevice; + +/*! + * @class + * IODVDBlockStorageDriver : public IOCDBlockStorageDriver + * @abstract + * Generic DVD Driver. + * @discussion + * Storage drivers are split into two parts: the Generic Driver handles + * all generic device issues, independent of the lower-level transport + * mechanism (e.g. SCSI, ATA, USB, FireWire). All storage operations + * at the Generic Driver level are translated into a series of generic + * device operations. These operations are passed via the Device Nub + * to a Transport Driver, which implements the appropriate + * transport-dependent protocol to execute these operations. + * + * To determine the write-protect state of a device (or media), for + * example, the generic driver would issue a call to the + * Transport Driver's reportWriteProtection method. If this were a SCSI + * device, its Transport Driver would issue a Mode Sense command to + * extract the write-protection status bit. The Transport Driver then + * reports true or false to the generic driver. + * + * The generic driver therefore has no knowledge of, or involvement + * with, the actual commands and mechanisms used to communicate with + * the device. It is expected that the generic driver will rarely, if + * ever, need to be subclassed to handle device idiosyncrasies; rather, + * the Transport Driver should be changed via overrides. + * + * A generic driver could be subclassed to create a different type of + * generic device. The generic driver IODVDBlockStorageDriver class is a subclass + * of IOCDBlockStorageDriver, adding DVD functions. Similarly, the Transport Driver + * IODVDBlockStorageDevice is a subclass of IOCDBlockStorageDevice, adding DVD + * functions. +*/ + +class IODVDBlockStorageDriver : public IOCDBlockStorageDriver { + + OSDeclareDefaultStructors(IODVDBlockStorageDriver) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + +public: + + /* Overrides of IOCDBlockStorageDriver: */ + + virtual const char * getDeviceTypeName(void); + virtual IOMedia * instantiateDesiredMediaObject(void); + virtual IOMedia * instantiateMediaObject(UInt64 base,UInt64 byteSize, + UInt32 blockSize,char *mediaName); + + /* End of IOCDBlockStorageDriver overrides. */ + + /*! + * @function reportKey + * @abstract + * Get key info from the DVD drive. + * @discussion + * This function handles the getting of key- and encryption-related data for the drive. + * @param buffer + * A buffer containing information, as documented in the specification + * "MtFuji Commands For Multimedia Devices." + * @param keyClass + * As documented by MtFuji. See DVDKeyClass. + * @param agid + * An AGID, as documented by MtFuji. + * @param keyFormat + * As documented by MtFuji. See DVDKeyFormat. + */ + virtual IOReturn reportKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt32 lba,const UInt8 agid,const DVDKeyFormat keyFormat); + + /*! + * @function sendKey + * @abstract + * Send key info to the DVD drive. + * @discussion + * This function handles the setting of key- and encryption-related data for the drive. + * @param buffer + * A buffer containing information, as documented in the specification + * "MtFuji Commands For Multimedia Devices." + * @param keyClass + * As documented by MtFuji. See DVDKeyClass. + * @param agid + * As documented by MtFuji. + * @param keyFormat + * As documented by MtFuji. See DVDKeyFormat. + */ + virtual IOReturn sendKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt8 agid,const DVDKeyFormat keyFormat); + + /* + * Obtain this object's provider. We override the superclass's method to + * return a more specific subclass of IOService -- IODVDBlockStorageDevice. + * This method serves simply as a convenience to subclass developers. + */ + + virtual IODVDBlockStorageDevice * getProvider() const; + +protected: + + /* Overrides of IOCDBlockStorageDriver behavior. */ + + virtual IOReturn acceptNewMedia(void); + + /* End of IOCDBlockStorageDriver overrides. */ + + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 0); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 1); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 2); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 3); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 4); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 5); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 6); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 7); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 8); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 9); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 10); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 11); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 12); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 13); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 14); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 15); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 16); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 17); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 18); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 19); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 20); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 21); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 22); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 23); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 24); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 25); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 26); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 27); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 28); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 29); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 30); + OSMetaClassDeclareReservedUnused(IODVDBlockStorageDriver, 31); +}; +#endif diff --git a/iokit/IOKit/storage/IODVDMedia.h b/iokit/IOKit/storage/IODVDMedia.h new file mode 100644 index 000000000..24472829e --- /dev/null +++ b/iokit/IOKit/storage/IODVDMedia.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IODVDMedia + * @abstract + * This header contains the IODVDMedia class definition. + */ + +#ifndef _IODVDMEDIA_H +#define _IODVDMEDIA_H + +/*! + * @defined kIODVDMediaClass + * @abstract + * kIODVDMediaClass is the name of the IODVDMedia class. + * @discussion + * kIODVDMediaClass is the name of the IODVDMedia class. + */ + +#define kIODVDMediaClass "IODVDMedia" + +/*! + * @defined kIODVDMediaTypeKey + * @abstract + * kIODVDMediaTypeKey is a property of IODVDMedia objects. It has an OSString + * value. + * @discussion + * The kIODVDMediaTypeKey property identifies the DVD media type (DVD-ROM, + * DVD-R, DVD-RW, DVD+RW, DVD-RAM, etc). See the kIODVDMediaType contants + * for possible values. + */ + +#define kIODVDMediaTypeKey "Type" + +/*! + * @defined kIODVDMediaTypeROM + * The kIODVDMediaTypeKey constant for DVD-ROM media. + */ + +#define kIODVDMediaTypeROM "DVD-ROM" + +/*! + * @defined kIODVDMediaTypeR + * The kIODVDMediaTypeKey constant for DVD Recordable (DVD-R) media. + */ + +#define kIODVDMediaTypeR "DVD-R" + +/*! + * @defined kIODVDMediaTypeRW + * The kIODVDMediaTypeKey constant for DVD ReWritable (DVD-RW) media. + */ + +#define kIODVDMediaTypeRW "DVD-RW" + +/*! + * @defined kIODVDMediaTypePlusRW + * The kIODVDMediaTypeKey constant for DVD "Plus" ReWritable (DVD+RW) media. + */ + +#define kIODVDMediaTypePlusRW "DVD+RW" + +/*! + * @defined kIODVDMediaTypeRAM + * The kIODVDMediaTypeKey constant for DVD-RAM media. + */ + +#define kIODVDMediaTypeRAM "DVD-RAM" + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include + +/*! + * @class IODVDMedia + * @abstract + * The IODVDMedia class is a random-access disk device abstraction for DVDs. + * @discussion + * The IODVDMedia class is a random-access disk device abstraction for DVDs. + */ + +class IODVDMedia : public IOMedia +{ + OSDeclareDefaultStructors(IODVDMedia) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + +public: + + /* + * Obtain this object's provider. We override the superclass's method to + * return a more specific subclass of IOService -- IODVDBlockStorageDriver. + * This method serves simply as a convenience to subclass developers. + */ + + virtual IODVDBlockStorageDriver * getProvider() const; + + OSMetaClassDeclareReservedUnused(IODVDMedia, 0); + OSMetaClassDeclareReservedUnused(IODVDMedia, 1); + OSMetaClassDeclareReservedUnused(IODVDMedia, 2); + OSMetaClassDeclareReservedUnused(IODVDMedia, 3); + OSMetaClassDeclareReservedUnused(IODVDMedia, 4); + OSMetaClassDeclareReservedUnused(IODVDMedia, 5); + OSMetaClassDeclareReservedUnused(IODVDMedia, 6); + OSMetaClassDeclareReservedUnused(IODVDMedia, 7); + OSMetaClassDeclareReservedUnused(IODVDMedia, 8); + OSMetaClassDeclareReservedUnused(IODVDMedia, 9); + OSMetaClassDeclareReservedUnused(IODVDMedia, 10); + OSMetaClassDeclareReservedUnused(IODVDMedia, 11); + OSMetaClassDeclareReservedUnused(IODVDMedia, 12); + OSMetaClassDeclareReservedUnused(IODVDMedia, 13); + OSMetaClassDeclareReservedUnused(IODVDMedia, 14); + OSMetaClassDeclareReservedUnused(IODVDMedia, 15); + OSMetaClassDeclareReservedUnused(IODVDMedia, 16); + OSMetaClassDeclareReservedUnused(IODVDMedia, 17); + OSMetaClassDeclareReservedUnused(IODVDMedia, 18); + OSMetaClassDeclareReservedUnused(IODVDMedia, 19); + OSMetaClassDeclareReservedUnused(IODVDMedia, 20); + OSMetaClassDeclareReservedUnused(IODVDMedia, 21); + OSMetaClassDeclareReservedUnused(IODVDMedia, 22); + OSMetaClassDeclareReservedUnused(IODVDMedia, 23); + OSMetaClassDeclareReservedUnused(IODVDMedia, 24); + OSMetaClassDeclareReservedUnused(IODVDMedia, 25); + OSMetaClassDeclareReservedUnused(IODVDMedia, 26); + OSMetaClassDeclareReservedUnused(IODVDMedia, 27); + OSMetaClassDeclareReservedUnused(IODVDMedia, 28); + OSMetaClassDeclareReservedUnused(IODVDMedia, 29); + OSMetaClassDeclareReservedUnused(IODVDMedia, 30); + OSMetaClassDeclareReservedUnused(IODVDMedia, 31); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IODVDMEDIA_H */ diff --git a/iokit/IOKit/storage/IODVDTypes.h b/iokit/IOKit/storage/IODVDTypes.h new file mode 100644 index 000000000..4b13111ea --- /dev/null +++ b/iokit/IOKit/storage/IODVDTypes.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IODVDTYPES_H +#define _IODVDTYPES_H + +#include + + enum DVDKeyFormat { + kCSSAGID = 0x00, + kChallengeKey = 0x01, + kKey1 = 0x02, + kKey2 = 0x03, + kTitleKey = 0x04, + kASF = 0x05, + kSetRegion = 0x06, + kRPCState = 0x08, + kCSS2AGID = 0x10, + kCPRMAGID = 0x11, + kInvalidateAGID = 0x3f + }; + + enum DVDKeyClass { + kCSS_CSS2_CPRM = 0x00, + kRSSA = 0x01 + }; + + enum DVDMediaType { + kDVDMediaTypeUnknown = 0x0200, + kDVDMediaTypeROM = 0x0202, /* DVD-ROM */ + kDVDMediaTypeRAM = 0x0203, /* DVD-RAM */ + kDVDMediaTypeR = 0x0204, /* DVD-R */ + kDVDMediaTypeRW = 0x0205, /* DVD-RW */ + kDVDMediaTypePlusRW = 0x0206, /* DVD+RW */ + + kDVDMediaTypeMin = 0x0200, + kDVDMediaTypeMax = 0x02FF + }; + +#endif diff --git a/iokit/IOKit/storage/IOFDiskPartitionScheme.h b/iokit/IOKit/storage/IOFDiskPartitionScheme.h new file mode 100644 index 000000000..b26ebdbd0 --- /dev/null +++ b/iokit/IOKit/storage/IOFDiskPartitionScheme.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOFDISKPARTITIONSCHEME_H +#define _IOFDISKPARTITIONSCHEME_H + +#include + +/* + * FDisk Partition Map Definitions + */ + +#pragma pack(2) /* (enable 16-bit struct packing for fdisk_part, disk_blk0) */ + +/* Structure constants. */ + +#define DISK_BLK0SZ sizeof(struct disk_blk0) /* (size of partition map) */ +#define DISK_BOOTSZ 446 /* (size of boot code in map) */ +#define DISK_NPART 4 /* (number of entries in map) */ + +/* Partition map entry. */ + +struct fdisk_part +{ + UInt8 bootid; /* (is active boot partition?) */ + UInt8 beghead; /* (beginning head) */ + UInt8 begsect; /* (beginning sector; beginning cylinder, high 2 bits) */ + UInt8 begcyl; /* (beginning cylinder, low 8 bits) */ + UInt8 systid; /* (type) */ + UInt8 endhead; /* (ending head) */ + UInt8 endsect; /* (ending sector; ending cylinder, high 2 bits) */ + UInt8 endcyl; /* (ending cylinder, low 8 bits) */ + UInt32 relsect; /* (block start) */ + UInt32 numsect; /* (block count) */ +}; + +/* Partition map, as found in block zero of the disk (or extended partition). */ + +struct disk_blk0 +{ + UInt8 bootcode[DISK_BOOTSZ]; /* (boot code) */ + struct fdisk_part parts[DISK_NPART]; /* (partition entries) */ + UInt16 signature; /* (unique signature for map) */ +}; + +/* Partition map signature (signature). */ + +#define DISK_SIGNATURE 0xAA55 + +/* Partition map entry types (systid). */ + +#define FDISK_PARTITION_TYPE_01 "DOS_FAT_12" +#define FDISK_PARTITION_TYPE_04 "DOS_FAT_16_S" +#define FDISK_PARTITION_TYPE_05 "DOS_Extended" +#define FDISK_PARTITION_TYPE_06 "DOS_FAT_16" +#define FDISK_PARTITION_TYPE_07 "Windows_NTFS" +#define FDISK_PARTITION_TYPE_0A "Boot_Manager" +#define FDISK_PARTITION_TYPE_0B "DOS_FAT_32" +#define FDISK_PARTITION_TYPE_0C "Windows_FAT_32" +#define FDISK_PARTITION_TYPE_0E "Windows_FAT_16" +#define FDISK_PARTITION_TYPE_0F "Windows_Extended" +#define FDISK_PARTITION_TYPE_11 "DOS_FAT_12_Hidden" +#define FDISK_PARTITION_TYPE_14 "DOS_FAT_16_S_Hidden" +#define FDISK_PARTITION_TYPE_16 "DOS_FAT_16_Hidden" +#define FDISK_PARTITION_TYPE_17 "Windows_NTFS_Hidden" +#define FDISK_PARTITION_TYPE_1B "DOS_FAT_32_Hidden" +#define FDISK_PARTITION_TYPE_1C "Windows_FAT_32_Hidden" +#define FDISK_PARTITION_TYPE_1E "Windows_FAT_16_Hidden" +#define FDISK_PARTITION_TYPE_63 "UNIX" +#define FDISK_PARTITION_TYPE_82 "Linux_Swap" +#define FDISK_PARTITION_TYPE_83 "Linux_Ext2FS" +#define FDISK_PARTITION_TYPE_84 "Hibernation" +#define FDISK_PARTITION_TYPE_85 "Linux_Extended" +#define FDISK_PARTITION_TYPE_86 "Windows_FAT_16_FT" +#define FDISK_PARTITION_TYPE_87 "Windows_NTFS_FT" +#define FDISK_PARTITION_TYPE_A5 "FreeBSD" +#define FDISK_PARTITION_TYPE_A6 "OpenBSD" +#define FDISK_PARTITION_TYPE_A7 "Apple_Rhapsody_UFS" +#define FDISK_PARTITION_TYPE_A8 "Apple_UFS" +#define FDISK_PARTITION_TYPE_A9 "NetBSD" +#define FDISK_PARTITION_TYPE_AB "Apple_Boot" +#define FDISK_PARTITION_TYPE_AF "Apple_HFS" +#define FDISK_PARTITION_TYPE_B7 "BSDI" +#define FDISK_PARTITION_TYPE_B8 "BSDI_Swap" +#define FDISK_PARTITION_TYPE_C6 "Windows_FAT_16_FT_Corrupt" +#define FDISK_PARTITION_TYPE_C7 "Windows_NTFS_FT_Corrupt" +#define FDISK_PARTITION_TYPE_EB "BeOS" +#define FDISK_PARTITION_TYPE_F2 "DOS_Secondary" +#define FDISK_PARTITION_TYPE_FD "Linux_RAID" + +#pragma options align=reset /* (reset to default struct packing) */ + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include + +/* + * Class + */ + +class IOFDiskPartitionScheme : public IOPartitionScheme +{ + OSDeclareDefaultStructors(IOFDiskPartitionScheme); + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + OSSet * _partitions; /* (set of media objects representing partitions) */ + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(void); + + /* + * Scan the provider media for an FDisk partition map. Returns the set + * of media objects representing each of the partitions (the retain for + * the set is passed to the caller), or null should no partition map be + * found. The default probe score can be adjusted up or down, based on + * the confidence of the scan. + */ + + virtual OSSet * scan(SInt32 * score); + + /* + * Ask whether the given partition is extended. + */ + + virtual bool isPartitionExtended(fdisk_part * partition); + + /* + * Ask whether the given partition is used. + */ + + virtual bool isPartitionUsed(fdisk_part * partition); + + /* + * Ask whether the given partition appears to be corrupt. A partition that + * is corrupt will cause the failure of the FDisk partition map recognition + * altogether. + */ + + virtual bool isPartitionCorrupt( fdisk_part * partition, + UInt32 partitionID, + UInt32 fdiskBlock ); + + /* + * Ask whether the given partition appears to be invalid. A partition that + * is invalid will cause it to be skipped in the scan, but will not cause a + * failure of the FDisk partition map recognition. + */ + + virtual bool isPartitionInvalid( fdisk_part * partition, + UInt32 partitionID, + UInt32 fdiskBlock ); + + /* + * Instantiate a new media object to represent the given partition. + */ + + virtual IOMedia * instantiateMediaObject( fdisk_part * partition, + UInt32 partitionID, + UInt32 fdiskBlock ); + + /* + * Allocate a new media object (called from instantiateMediaObject). + */ + + virtual IOMedia * instantiateDesiredMediaObject( fdisk_part * partition, + UInt32 partitionID, + UInt32 fdiskBlock ); + +public: + + /* + * Initialize this object's minimal state. + */ + + virtual bool init(OSDictionary * properties = 0); + + /* + * Determine whether the provider media contains an FDisk partition map. + */ + + virtual IOService * probe(IOService * provider, SInt32 * score); + + /* + * Publish the new media objects which represent our partitions. + */ + + virtual bool start(IOService * provider); + + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 0); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 1); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 2); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 3); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 4); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 5); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 6); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 7); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 8); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 9); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 10); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 11); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 12); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 13); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 14); + OSMetaClassDeclareReservedUnused(IOFDiskPartitionScheme, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOFDISKPARTITIONSCHEME_H */ diff --git a/iokit/IOKit/storage/IOMedia.h b/iokit/IOKit/storage/IOMedia.h new file mode 100644 index 000000000..709f8f16c --- /dev/null +++ b/iokit/IOKit/storage/IOMedia.h @@ -0,0 +1,527 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IOMedia + * @abstract + * This header contains the IOMedia class definition. + */ + +#ifndef _IOMEDIA_H +#define _IOMEDIA_H + +/*! + * @defined kIOMediaClass + * @abstract + * kIOMediaClass is the name of the IOMedia class. + * @discussion + * kIOMediaClass is the name of the IOMedia class. + */ + +#define kIOMediaClass "IOMedia" + +/*! + * @defined kIOMediaContentKey + * @abstract + * kIOMediaContentKey is a property of IOMedia objects. It has an OSString + * value. + * @discussion + * The kIOMediaContentKey property contains a description of the media's + * contents. The description is the same as the hint at the time of the object's + * creation, but it is possible that the description be overrided by a client + * (which has probed the media and identified the content correctly) of the media + * object. It is more accurate than the hint for this reason. The string is + * formed in the likeness of Apple's "Apple_HFS" strings. + */ + +#define kIOMediaContentKey "Content" +#define kIOMediaContent "Content" ///d:deprecated + +/*! + * @defined kIOMediaContentHintKey + * @abstract + * kIOMediaContentHintKey is a property of IOMedia objects. It has an OSString + * value. + * @discussion + * The kIOMediaContentHintKey property contains a hint of the media's contents. + * The hint is set at the time of the object's creation, should the creator have + * a clue as to what it may contain. The hint string does not change for the + * lifetime of the object and is formed in the likeness of Apple's "Apple_HFS" + * strings. + */ + +#define kIOMediaContentHintKey "Content Hint" + +/*! + * @defined kIOMediaEjectableKey + * @abstract + * kIOMediaEjectableKey is a property of IOMedia objects. It has an OSBoolean + * value. + * @discussion + * The kIOMediaEjectableKey property describes whether the media is ejectable. + */ + +#define kIOMediaEjectableKey "Ejectable" +#define kIOMediaEjectable "Ejectable" ///d:deprecated + +/*! + * @defined kIOMediaLeafKey + * @abstract + * kIOMediaLeafKey is a property of IOMedia objects. It has an OSBoolean value. + * @discussion + * The kIOMediaLeafKey property describes whether the media is a leaf, that is, + * it is the deepest media object in this branch of the I/O Kit registry. + */ + +#define kIOMediaLeafKey "Leaf" +#define kIOMediaLeaf "Leaf" ///d:deprecated + +/*! + * @defined kIOMediaPreferredBlockSizeKey + * @abstract + * kIOMediaPreferredBlockSizeKey is a property of IOMedia objects. It has an + * OSNumber value. + * @discussion + * The kIOMediaPreferredBlockSizeKey property describes the media's natural block + * size in bytes. This information is useful to clients that want to optimize + * access to the media. + */ + +#define kIOMediaPreferredBlockSizeKey "Preferred Block Size" + +/*! + * @defined kIOMediaSizeKey + * @abstract + * kIOMediaSizeKey is a property of IOMedia objects. It has an OSNumber value. + * @discussion + * The kIOMediaSizeKey property describes the total length of the media in bytes. + */ + +#define kIOMediaSizeKey "Size" +#define kIOMediaSize "Size" ///d:deprecated + +/*! + * @defined kIOMediaWholeKey + * @abstract + * kIOMediaWholeKey is a property of IOMedia objects. It has an OSBoolean value. + * @discussion + * The kIOMediaWholeKey property describes whether the media is whole, that is, + * it represents the whole disk (the physical disk, or a virtual replica + * thereof). + */ + +#define kIOMediaWholeKey "Whole" + +/*! + * @defined kIOMediaWritableKey + * @abstract + * kIOMediaWritableKey is a property of IOMedia objects. It has an OSBoolean + * value. + * @discussion + * The kIOMediaWritableKey property describes whether the media is writable. + */ + +#define kIOMediaWritableKey "Writable" +#define kIOMediaWritable "Writable" ///d:deprecated + +/*! + * @defined kIOMediaContentMaskKey + * @abstract + * kIOMediaContentMaskKey is a property of IOMedia clients. It has an OSString + * value. + * @discussion + * The kIOMediaContentMaskKey property must exist in all IOMedia clients that + * drive new content (that is, produce new media objects). When the client + * matches against the provider media, the value of the client's + * kIOMediaContentMaskKey property is used to replace the provider's + * kIOMediaContentKey property. + */ + +#define kIOMediaContentMaskKey "Content Mask" + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include + +/*! + * @class IOMedia + * @abstract + * The IOMedia class is a random-access disk device abstraction. + * @discussion + * The IOMedia class is a random-access disk device abstraction. It provides a + * consistent interface for both real and virtual disk devices, for subdivisions + * of disks such as partitions, for supersets of disks such as RAID volumes, and + * so on. It extends the IOStorage class by implementing the appropriate open, + * close, read, write, and matching semantics for media objects. The properties + * it has reflect the properties of real disk devices, such as ejectability and + * writability. + * + * The read and write interfaces support byte-level access to the storage space, + * with the appropriate deblocking handled by the block storage driver, however, + * a typical client will want to get the natural block size in order to optimize + * access to the real disk device. A read or write is accepted so long as the + * client's access is valid, the media is formatted and the transfer is within + * the bounds of the media. An optional non-zero base (offset) is then applied + * before the read or write is passed to provider object. + * + * An open is accepted so long as no more than one writer is active at any time. + */ + +class IOMedia : public IOStorage +{ + OSDeclareDefaultStructors(IOMedia) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + bool _isEjectable; + bool _isWhole; + bool _isWritable; + + UInt64 _mediaBase; /* (relative to the storage object below us) */ + UInt64 _mediaSize; + + IOStorageAccess _openLevel; + OSSet * _openReaders; + IOService * _openReaderWriter; + + UInt64 _preferredBlockSize; + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(); + + /*! + * @function handleOpen + * @discussion + * The handleOpen method grants or denies permission to access this object + * to an interested client. The argument is an IOStorageAccess value that + * specifies the level of access desired -- reader or reader-writer. + * + * This method can be invoked to upgrade or downgrade the access level for + * an existing client as well. The previous access level will prevail for + * upgrades that fail, of course. A downgrade should never fail. If the + * new access level should be the same as the old for a given client, this + * method will do nothing and return success. In all cases, one, singular + * close-per-client is expected for all opens-per-client received. + * + * This implementation replaces the IOService definition of handleOpen(). + * @param client + * Client requesting the open. + * @param options + * Options for the open. Set to zero. + * @param access + * Access level for the open. Set to kIOStorageAccessReader or + * kIOStorageAccessReaderWriter. + * @result + * Returns true if the open was successful, false otherwise. + */ + + virtual bool handleOpen(IOService * client, + IOOptionBits options, + void * access); + + /*! + * @function handleIsOpen + * @discussion + * The handleIsOpen method determines whether the specified client, or any + * client if none is specificed, presently has an open on this object. + * + * This implementation replaces the IOService definition of handleIsOpen(). + * @param client + * Client to check the open state of. Set to zero to check the open state + * of all clients. + * @result + * Returns true if the client was (or clients were) open, false otherwise. + */ + + virtual bool handleIsOpen(const IOService * client) const; + + /*! + * @function handleClose + * @discussion + * The handleClose method closes the client's access to this object. + * + * This implementation replaces the IOService definition of handleClose(). + * @param client + * Client requesting the close. + * @param options + * Options for the close. Set to zero. + */ + + virtual void handleClose(IOService * client, IOOptionBits options); + +public: + +///m:2333367:workaround:commented:start +// using read; +// using write; +///m:2333367:workaround:commented:stop + + /*! + * @function init + * @discussion + * Initialize this object's minimal state. + * @param base + * Media offset, in bytes. + * @param size + * Media size, in bytes. + * @param preferredBlockSize + * Natural block size, in bytes. + * @param isEjectable + * Indicates whether the media is ejectable. + * @param isWhole + * Indicated whether the media represents the whole disk. + * @param isWritable + * Indicates whether the media is writable. + * @param contentHint + * Hint of media's contents (optional). See getContentHint(). + * @param properties + * Substitute property table for this object (optional). + * @result + * Returns true on success, false otherwise. + */ + + virtual bool init(UInt64 base, + UInt64 size, + UInt64 preferredBlockSize, + bool isEjectable, + bool isWhole, + bool isWritable, + const char * contentHint = 0, + OSDictionary * properties = 0); + + /* + * This method is called for each client interested in the services we + * provide. The superclass links us as a parent to this client in the + * I/O Kit registry on success. + */ + + virtual bool attachToChild(IORegistryEntry * client, + const IORegistryPlane * plane); + + /* + * This method is called for each client that loses interest in the + * services we provide. The superclass unlinks us from this client + * in the I/O Kit registry on success. + */ + + virtual void detachFromChild(IORegistryEntry * client, + const IORegistryPlane * plane); + + /* + * Compare the properties in the supplied table to this object's properties. + */ + + virtual bool matchPropertyTable(OSDictionary * table, SInt32 * score); + + /*! + * @function read + * @discussion + * Read data from the storage object at the specified byte offset into the + * specified buffer, asynchronously. When the read completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the read. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + /*! + * @function write + * @discussion + * Write data into the storage object at the specified byte offset from the + * specified buffer, asynchronously. When the write completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the write. + * @param client + * Client requesting the write. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void write(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + virtual IOReturn synchronizeCache(IOService * client); + + /*! + * @function getPreferredBlockSize + * @discussion + * Ask the media object for its natural block size. This information + * is useful to clients that want to optimize access to the media. + * @result + * Natural block size, in bytes. + */ + + virtual UInt64 getPreferredBlockSize() const; + + /*! + * @function getSize + * @discussion + * Ask the media object for its total length in bytes. + * @result + * Media size, in bytes. + */ + + virtual UInt64 getSize() const; + + /*! + * @function getBase + * @discussion + * Ask the media object for its byte offset relative to its provider media + * object below it in the storage hierarchy. + * Media offset, in bytes. + */ + + virtual UInt64 getBase() const; + + /*! + * @function isEjectable + * @discussion + * Ask the media object whether it is ejectable. + * @result + * Returns true if the media is ejectable, false otherwise. + */ + + virtual bool isEjectable() const; + + /*! + * @function isFormatted + * @discussion + * Ask the media object whether it is formatted. + * @result + * Returns true if the media is formatted, false otherwise. + */ + + virtual bool isFormatted() const; + + /*! + * @function isWhole + * @discussion + * Ask the media object whether it represents the whole disk. + * @result + * Returns true if the media represents the whole disk, false otherwise. + */ + + virtual bool isWhole() const; + + /*! + * @function isWritable + * @discussion + * Ask the media object whether it is writable. + * @result + * Returns true if the media is writable, false otherwise. + */ + + virtual bool isWritable() const; + + /*! + * @function getContent + * @discussion + * Ask the media object for a description of its contents. The description + * is the same as the hint at the time of the object's creation, but it is + * possible that the description be overrided by a client (which has probed + * the media and identified the content correctly) of the media object. It + * is more accurate than the hint for this reason. The string is formed in + * the likeness of Apple's "Apple_HFS" strings. + * + * The content description can be overrided by any client that matches onto + * this media object with a match category of kIOStorageCategory. The media + * object checks for a kIOMediaContentMaskKey property in the client, and if + * it finds one, it copies it into kIOMediaContentKey property. + * @result + * Description of media's contents. + */ + + virtual const char * getContent() const; + + /*! + * @function getContentHint + * @discussion + * Ask the media object for a hint of its contents. The hint is set at the + * time of the object's creation, should the creator have a clue as to what + * it may contain. The hint string does not change for the lifetime of the + * object and is also formed in the likeness of Apple's "Apple_HFS" strings. + * @result + * Hint of media's contents. + */ + + virtual const char * getContentHint() const; + + /* + * Obtain this object's provider. We override the superclass's method to + * return a more specific subclass of OSObject -- IOStorage. This method + * serves simply as a convenience to subclass developers. + */ + + virtual IOStorage * getProvider() const; + + OSMetaClassDeclareReservedUnused(IOMedia, 0); + OSMetaClassDeclareReservedUnused(IOMedia, 1); + OSMetaClassDeclareReservedUnused(IOMedia, 2); + OSMetaClassDeclareReservedUnused(IOMedia, 3); + OSMetaClassDeclareReservedUnused(IOMedia, 4); + OSMetaClassDeclareReservedUnused(IOMedia, 5); + OSMetaClassDeclareReservedUnused(IOMedia, 6); + OSMetaClassDeclareReservedUnused(IOMedia, 7); + OSMetaClassDeclareReservedUnused(IOMedia, 8); + OSMetaClassDeclareReservedUnused(IOMedia, 9); + OSMetaClassDeclareReservedUnused(IOMedia, 10); + OSMetaClassDeclareReservedUnused(IOMedia, 11); + OSMetaClassDeclareReservedUnused(IOMedia, 12); + OSMetaClassDeclareReservedUnused(IOMedia, 13); + OSMetaClassDeclareReservedUnused(IOMedia, 14); + OSMetaClassDeclareReservedUnused(IOMedia, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOMEDIA_H */ diff --git a/iokit/IOKit/storage/IOMediaBSDClient.h b/iokit/IOKit/storage/IOMediaBSDClient.h new file mode 100644 index 000000000..599fdb344 --- /dev/null +++ b/iokit/IOKit/storage/IOMediaBSDClient.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOMEDIABSDCLIENT_H +#define _IOMEDIABSDCLIENT_H + +#include + +/* + * Definitions + */ + +class AnchorTable; +class MinorTable; +struct MinorSlot; + +/* + * Class + */ + +class IOMediaBSDClient : public IOService +{ + OSDeclareDefaultStructors(IOMediaBSDClient) + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + AnchorTable * _anchors; /* (table of anchors) */ + bool _bdevswInstalled; /* (are bdevsw functions installed?) */ + bool _cdevswInstalled; /* (are cdevsw functions installed?) */ + MinorTable * _minors; /* (table of minors) */ + IONotifier * _notifier; /* (media arrival notification) */ + + /* + * Notification handler for media arrivals. + */ + + static bool mediaHasArrived(void *, void *, IOService * service); + + /* + * Find the whole media that roots this media tree. + */ + + virtual IOMedia * getWholeMedia( IOMedia * media, + UInt32 * slicePathSize = 0, + char * slicePath = 0 ); + + /* + * Create bdevsw and cdevsw nodes for the given media object. + */ + + virtual bool createNodes(IOMedia * media); + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(); + +public: + /* + * Initialize this object's minimal state. + */ + + virtual bool init(OSDictionary * properties = 0); + + /* + * This method is called once we have been attached to the provider object. + */ + + virtual bool start(IOService * provider); + + /* + * This method is called before we are detached from the provider object. + */ + + virtual void stop(IOService * provider); + + /* + * Obtain the table of anchors. + */ + + virtual AnchorTable * getAnchors(); + + /* + * Obtain the table of minors. + */ + + virtual MinorTable * getMinors(); + + /* + * Obtain information for the specified minor ID. + */ + + virtual MinorSlot * getMinor(UInt32 minorID); + + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 0); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 1); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 2); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 3); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 4); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 5); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 6); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 7); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 8); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 9); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 10); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 11); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 12); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 13); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 14); + OSMetaClassDeclareReservedUnused(IOMediaBSDClient, 15); +}; + +#endif /* !_IOMEDIABSDCLIENT_H */ diff --git a/iokit/IOKit/storage/IONeXTPartitionScheme.h b/iokit/IOKit/storage/IONeXTPartitionScheme.h new file mode 100644 index 000000000..3172c6bd0 --- /dev/null +++ b/iokit/IOKit/storage/IONeXTPartitionScheme.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IONEXTPARTITIONSCHEME_H +#define _IONEXTPARTITIONSCHEME_H + +#include + +/* + * NeXT Partition Map Definitions + */ + +#pragma pack(2) /* (enable 16-bit struct packing for dl_un, disk[tab,_label]) */ + +#include + +/* Structure constants. */ + +#define MAXLBLLEN 24 /* (length of disk name) */ +#define NBAD 1670 /* (size of bad sector table in map) */ +#define NLABELS 4 /* (number of partition maps on disk) */ + +/* Structure aliases, for disktab and dl_un fields. */ + +#define dl_name dl_dt.d_name +#define dl_type dl_dt.d_type +#define dl_part dl_dt.d_partitions +#define dl_front dl_dt.d_front +#define dl_back dl_dt.d_back +#define dl_ngroups dl_dt.d_ngroups +#define dl_ag_size dl_dt.d_ag_size +#define dl_ag_alts dl_dt.d_ag_alts +#define dl_ag_off dl_dt.d_ag_off +#define dl_secsize dl_dt.d_secsize +#define dl_ncyl dl_dt.d_ncylinders +#define dl_nsect dl_dt.d_nsectors +#define dl_ntrack dl_dt.d_ntracks +#define dl_rpm dl_dt.d_rpm +#define dl_bootfile dl_dt.d_bootfile +#define dl_boot0_blkno dl_dt.d_boot0_blkno +#define dl_hostname dl_dt.d_hostname +#define dl_rootpartition dl_dt.d_rootpartition +#define dl_rwpartition dl_dt.d_rwpartition +#define dl_v3_checksum dl_un.DL_v3_checksum +#define dl_bad dl_un.DL_bad + +/* Partition map, as found in block zero (or a redundant block) of the disk. */ + +typedef union +{ + UInt16 DL_v3_checksum; /* (V3: ones complement checksum) */ + SInt32 DL_bad[NBAD]; /* (V1-V2: bad sector table) */ +} dl_un_t; + +typedef struct disk_label +{ + SInt32 dl_version; /* (unique signature for map) */ + SInt32 dl_label_blkno; /* (block on which this map resides) */ + SInt32 dl_size; /* (device block count) */ + char dl_label[MAXLBLLEN]; /* (device name) */ + UInt32 dl_flags; /* (device flags) */ + UInt32 dl_tag; /* (device tag) */ + struct disktab dl_dt; /* (device info, partition entries) */ + dl_un_t dl_un; + UInt16 dl_checksum; /* (V1-V2: ones complement checksum) */ + + /* (add things here so dl_checksum stays in a fixed place) */ +} disk_label_t; + +/* Partition map signature (dl_version). */ + +#define DL_V1 0x4e655854 /* (version 1: "NeXT") */ +#define DL_V2 0x646c5632 /* (version 2: "dlV2") */ +#define DL_V3 0x646c5633 /* (version 3: "dlV3") */ +#define DL_VERSION DL_V3 /* (default version) */ + +/* Partition map flags (dl_flags). */ + +#define DL_UNINIT 0x80000000 /* (is uninitialized?) */ + +#pragma options align=reset /* (reset to default struct packing) */ + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include + +/* + * Class + */ + +class IONeXTPartitionScheme : public IOPartitionScheme +{ + OSDeclareDefaultStructors(IONeXTPartitionScheme); + +private: + + /* + * Compute a 16-bit (big-endian) checksum over the specified data range. + */ + + UInt16 checksum16(void * data, UInt32 bytes) const; + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + OSSet * _partitions; /* (set of media objects representing partitions) */ + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(void); + + /* + * Scan the provider media for a NeXT partition map. Returns the set + * of media objects representing each of the partitions (the retain for + * the set is passed to the caller), or null should no partition map be + * found. The default probe score can be adjusted up or down, based on + * the confidence of the scan. + */ + + virtual OSSet * scan(SInt32 * score); + + /* + * Ask whether the given partition is used. + */ + + virtual bool isPartitionUsed(partition_t * partition); + + /* + * Ask whether the given partition appears to be corrupt. A partition that + * is corrupt will cause the failure of the NeXT partition map recognition + * altogether. + */ + + virtual bool isPartitionCorrupt( partition_t * partition, + UInt32 partitionID, + UInt64 nextBase, + disk_label_t * nextMap ); + + /* + * Ask whether the given partition appears to be invalid. A partition that + * is invalid will cause it to be skipped in the scan, but will not cause a + * failure of the NeXT partition map recognition. + */ + + virtual bool isPartitionInvalid( partition_t * partition, + UInt32 partitionID, + UInt64 nextBase, + disk_label_t * nextMap ); + + /* + * Instantiate a new media object to represent the given partition. + */ + + virtual IOMedia * instantiateMediaObject( partition_t * partition, + UInt32 partitionID, + UInt64 nextBase, + disk_label_t * nextMap ); + + /* + * Allocate a new media object (called from instantiateMediaObject). + */ + + virtual IOMedia * instantiateDesiredMediaObject( partition_t * partition, + UInt32 partitionID, + UInt64 nextBase, + disk_label_t * nextMap ); + +public: + + /* + * Initialize this object's minimal state. + */ + + virtual bool init(OSDictionary * properties = 0); + + /* + * Determine whether the provider media contains a NeXT partition map. + */ + + virtual IOService * probe(IOService * provider, SInt32 * score); + + /* + * Publish the new media objects which represent our partitions. + */ + + virtual bool start(IOService * provider); + + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 0); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 1); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 2); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 3); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 4); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 5); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 6); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 7); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 8); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 9); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 10); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 11); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 12); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 13); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 14); + OSMetaClassDeclareReservedUnused(IONeXTPartitionScheme, 15); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IONEXTPARTITIONSCHEME_H */ diff --git a/iokit/IOKit/storage/IOPartitionScheme.h b/iokit/IOKit/storage/IOPartitionScheme.h new file mode 100644 index 000000000..3babae091 --- /dev/null +++ b/iokit/IOKit/storage/IOPartitionScheme.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IOPartitionScheme + * @abstract + * This header contains the IOPartitionScheme class definition. + */ + +#ifndef _IOPARTITIONSCHEME_H +#define _IOPARTITIONSCHEME_H + +/*! + * @defined kIOPartitionSchemeClass + * @abstract + * kIOPartitionSchemeClass is the name of the IOPartitionScheme class. + * @discussion + * kIOPartitionSchemeClass is the name of the IOPartitionScheme class. + */ + +#define kIOPartitionSchemeClass "IOPartitionScheme" + +/*! + * @defined kIOMediaPartitionIDKey + * @abstract + * kIOMediaPartitionIDKey is property of IOMedia objects. It has an OSNumber + * value. + * @discussion + * The kIOMediaPartitionIDKey property is placed into each IOMedia instance + * created via the partition scheme. It is an ID that differentiates one + * partition from the other (within a given scheme). It is typically an index + * into the on-disk partition table. + */ + +#define kIOMediaPartitionIDKey "Partition ID" +#define kIOMediaPartitionID "Partition ID" ///d:deprecated + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include + +/*! + * @class IOPartitionScheme + * @abstract + * The IOPartitionScheme class is the common base class for all partition scheme + * objects. + * @discussion + * The IOPartitionScheme class is the common base class for all partition scheme + * objects. It extends the IOStorage class by implementing the appropriate open + * and close semantics for partition objects (standard semantics are to act as a + * multiplexor for incoming opens, producing one outgoing open with the correct + * access). It also implements the default read and write semantics, which pass + * all reads and writes through to the provider media unprocessed. For simple + * schemes, the default behavior is sufficient. More complex partition schemes + * such as RAID will want to do extra processing for reads and writes. + */ + +class IOPartitionScheme : public IOStorage +{ + OSDeclareDefaultStructors(IOPartitionScheme); + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + IOStorageAccess _openLevel; + OSSet * _openReaders; + OSSet * _openReaderWriters; + + /* + * Free all of this object's outstanding resources. + */ + + virtual void free(); + + /*! + * @function handleOpen + * @discussion + * The handleOpen method grants or denies permission to access this object + * to an interested client. The argument is an IOStorageAccess value that + * specifies the level of access desired -- reader or reader-writer. + * + * This method can be invoked to upgrade or downgrade the access level for + * an existing client as well. The previous access level will prevail for + * upgrades that fail, of course. A downgrade should never fail. If the + * new access level should be the same as the old for a given client, this + * method will do nothing and return success. In all cases, one, singular + * close-per-client is expected for all opens-per-client received. + * + * This implementation replaces the IOService definition of handleOpen(). + * @param client + * Client requesting the open. + * @param options + * Options for the open. Set to zero. + * @param access + * Access level for the open. Set to kIOStorageAccessReader or + * kIOStorageAccessReaderWriter. + * @result + * Returns true if the open was successful, false otherwise. + */ + + virtual bool handleOpen(IOService * client, + IOOptionBits options, + void * access); + + /*! + * @function handleIsOpen + * @discussion + * The handleIsOpen method determines whether the specified client, or any + * client if none is specificed, presently has an open on this object. + * + * This implementation replaces the IOService definition of handleIsOpen(). + * @param client + * Client to check the open state of. Set to zero to check the open state + * of all clients. + * @result + * Returns true if the client was (or clients were) open, false otherwise. + */ + + virtual bool handleIsOpen(const IOService * client) const; + + /*! + * @function handleClose + * @discussion + * The handleClose method closes the client's access to this object. + * + * This implementation replaces the IOService definition of handleClose(). + * @param client + * Client requesting the close. + * @param options + * Options for the close. Set to zero. + */ + + virtual void handleClose(IOService * client, IOOptionBits options); + +public: + +///m:2333367:workaround:commented:start +// using read; +// using write; +///m:2333367:workaround:commented:stop + + /* + * Initialize this object's minimal state. + */ + + virtual bool init(OSDictionary * properties = 0); + + /*! + * @function read + * @discussion + * Read data from the storage object at the specified byte offset into the + * specified buffer, asynchronously. When the read completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the read. + * + * For simple partition schemes, the default behavior is to simply pass the + * read through to the provider media. More complex partition schemes such + * as RAID will need to do extra processing here. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + /*! + * @function write + * @discussion + * Write data into the storage object at the specified byte offset from the + * specified buffer, asynchronously. When the write completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the write. + * + * For simple partition schemes, the default behavior is to simply pass the + * write through to the provider media. More complex partition schemes such + * as RAID will need to do extra processing here. + * @param client + * Client requesting the write. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void write(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion); + + virtual IOReturn synchronizeCache(IOService * client); + + /* + * Obtain this object's provider. We override the superclass's method + * to return a more specific subclass of OSObject -- an IOMedia. This + * method serves simply as a convenience to subclass developers. + */ + + virtual IOMedia * getProvider() const; + + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 0); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 1); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 2); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 3); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 4); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 5); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 6); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 7); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 8); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 9); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 10); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 11); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 12); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 13); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 14); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 15); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 16); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 17); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 18); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 19); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 20); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 21); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 22); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 23); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 24); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 25); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 26); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 27); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 28); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 29); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 30); + OSMetaClassDeclareReservedUnused(IOPartitionScheme, 31); +}; + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOPARTITIONSCHEME_H */ diff --git a/iokit/IOKit/storage/IOStorage.h b/iokit/IOKit/storage/IOStorage.h new file mode 100644 index 000000000..e2247581f --- /dev/null +++ b/iokit/IOKit/storage/IOStorage.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*! + * @header IOStorage + * @abstract + * This header contains the IOStorage class definition. + */ + +#ifndef _IOSTORAGE_H +#define _IOSTORAGE_H + +#include + +/*! + * @defined kIOStorageClass + * @abstract + * kIOStorageClass is the name of the IOStorage class. + * @discussion + * kIOStorageClass is the name of the IOStorage class. + */ + +#define kIOStorageClass "IOStorage" + +/*! + * @enum IOStorageAccess + * @discussion + * The IOStorageAccess enumeration describes the possible access levels for open + * requests. + * @constant kIOStorageAccessNone + * No access is requested; should not be passed to open(). + * @constant kIOStorageAccessReader + * Read-only access is requested. + * @constant kIOStorageAccessReaderWriter + * Read and write access is requested. + */ + +typedef UInt32 IOStorageAccess; + +#define kIOStorageAccessNone 0x00 +#define kIOStorageAccessReader 0x01 +#define kIOStorageAccessReaderWriter 0x03 + +/*! + * @defined kIOStorageCategory + * @abstract + * kIOStorageCategory is a value for IOService's kIOMatchCategoryKey property. + * @discussion + * The kIOStorageCategory value is the standard value for the IOService property + * kIOMatchCategoryKey ("IOMatchCategory") for all storage drivers. All storage + * objects that expect to drive new content (that is, produce new media objects) + * are expected to compete within the kIOStorageCategory namespace. + * + * See the IOService documentation for more information on match categories. + */ + +#define kIOStorageCategory "IOStorage" /* (as IOMatchCategory) */ + +/* + * Kernel + */ + +#if defined(KERNEL) && defined(__cplusplus) + +#include +#include +#include + +/*! + * @typedef IOStorageCompletionAction + * @discussion + * The IOStorageCompletionAction declaration describes the C (or C++) completion + * routine that is called once an asynchronous storage operation completes. + * @param target + * Opaque client-supplied pointer (or an instance pointer for a C++ callback). + * @param parameter + * Opaque client-supplied pointer. + * @param status + * Status of the data transfer. + * @param actualByteCount + * Actual number of bytes transferred in the data transfer. + */ + +typedef void (*IOStorageCompletionAction)(void * target, + void * parameter, + IOReturn status, + UInt64 actualByteCount); + +/*! + * @struct IOStorageCompletion + * @discussion + * The IOStorageCompletion structure describes the C (or C++) completion routine + * that is called once an asynchronous storage operation completes. The values + * passed for the target and parameter fields will be passed to the routine when + * it is called. + * @field target + * Opaque client-supplied pointer (or an instance pointer for a C++ callback). + * @field action + * Completion routine to call on completion of the data transfer. + * @field parameter + * Opaque client-supplied pointer. + */ + +struct IOStorageCompletion +{ + void * target; + IOStorageCompletionAction action; + void * parameter; +}; + +/*! + * @class IOStorage + * @abstract + * The IOStorage class is the common base class for mass storage objects. + * @discussion + * The IOStorage class is the common base class for mass storage objects. It is + * an abstract class that defines the open/close/read/write APIs that need to be + * implemented in a given subclass. Synchronous versions of the read/write APIs + * are provided here -- they are coded in such a way as to wrap the asynchronous + * versions implmeneted in the subclass. + */ + +class IOStorage : public IOService +{ + OSDeclareAbstractStructors(IOStorage); + +protected: + + struct ExpansionData { /* */ }; + ExpansionData * _expansionData; + + /*! + * @function handleOpen + * @discussion + * The handleOpen method grants or denies permission to access this object + * to an interested client. The argument is an IOStorageAccess value that + * specifies the level of access desired -- reader or reader-writer. + * + * This method can be invoked to upgrade or downgrade the access level for + * an existing client as well. The previous access level will prevail for + * upgrades that fail, of course. A downgrade should never fail. If the + * new access level should be the same as the old for a given client, this + * method will do nothing and return success. In all cases, one, singular + * close-per-client is expected for all opens-per-client received. + * @param client + * Client requesting the open. + * @param options + * Options for the open. Set to zero. + * @param access + * Access level for the open. Set to kIOStorageAccessReader or + * kIOStorageAccessReaderWriter. + * @result + * Returns true if the open was successful, false otherwise. + */ + + virtual bool handleOpen(IOService * client, + IOOptionBits options, + void * access) = 0; + + /*! + * @function handleIsOpen + * @discussion + * The handleIsOpen method determines whether the specified client, or any + * client if none is specificed, presently has an open on this object. + * @param client + * Client to check the open state of. Set to zero to check the open state + * of all clients. + * @result + * Returns true if the client was (or clients were) open, false otherwise. + */ + + virtual bool handleIsOpen(const IOService * client) const = 0; + + /*! + * @function handleClose + * @discussion + * The handleClose method closes the client's access to this object. + * @param client + * Client requesting the close. + * @param options + * Options for the close. Set to zero. + */ + + virtual void handleClose(IOService * client, IOOptionBits options) = 0; + +public: + +///m:2333367:workaround:commented:start +// using open; +///m:2333367:workaround:commented:stop + + /*! + * @function open + * @discussion + * Ask the storage object for permission to access its contents; the method + * is equivalent to IOService::open(), but with the correct parameter types. + * + * This method may also be invoked to upgrade or downgrade the access of an + * existing open (if it fails, the existing open prevails). + * @param client + * Client requesting the open. + * @param options + * Options for the open. Set to zero. + * @param access + * Access level for the open. Set to kIOStorageAccessReader or + * kIOStorageAccessReaderWriter. + * @result + * Returns true if the open was successful, false otherwise. + */ + + virtual bool open(IOService * client, + IOOptionBits options, + IOStorageAccess access); + + /*! + * @function read + * @discussion + * Read data from the storage object at the specified byte offset into the + * specified buffer, asynchronously. When the read completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the read. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) = 0; + + /*! + * @function write + * @discussion + * Write data into the storage object at the specified byte offset from the + * specified buffer, asynchronously. When the write completes, the caller + * will be notified via the specified completion action. + * + * The buffer will be retained for the duration of the write. + * @param client + * Client requesting the write. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param completion + * Completion routine to call once the data transfer is complete. + */ + + virtual void write(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + IOStorageCompletion completion) = 0; + + /*! + * @function read + * @discussion + * Read data from the storage object at the specified byte offset into the + * specified buffer, synchronously. When the read completes, this method + * will return to the caller. The actual byte count field is optional. + * @param client + * Client requesting the read. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param actualByteCount + * Returns the actual number of bytes transferred in the data transfer. + * @result + * Returns the status of the data transfer. + */ + + virtual IOReturn read(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + UInt64 * actualByteCount = 0); + + /*! + * @function write + * @discussion + * Write data into the storage object at the specified byte offset from the + * specified buffer, synchronously. When the write completes, this method + * will return to the caller. The actual byte count field is optional. + * @param client + * Client requesting the write. + * @param byteStart + * Starting byte offset for the data transfer. + * @param buffer + * Buffer for the data transfer. The size of the buffer implies the size of + * the data transfer. + * @param actualByteCount + * Returns the actual number of bytes transferred in the data transfer. + * @result + * Returns the status of the data transfer. + */ + + virtual IOReturn write(IOService * client, + UInt64 byteStart, + IOMemoryDescriptor * buffer, + UInt64 * actualByteCount = 0); + + virtual IOReturn synchronizeCache(IOService * client) = 0; + + /*! + * @function complete + * @discussion + * Invokes the specified completion action of the read/write request. If + * the completion action is unspecified, no action is taken. This method + * serves simply as a convenience to storage subclass developers. + * @param completion + * Completion information for the data transfer. + * @param status + * Status of the data transfer. + * @param actualByteCount + * Actual number of bytes transferred in the data transfer. + */ + + static inline void complete(IOStorageCompletion completion, + IOReturn status, + UInt64 actualByteCount = 0); + + OSMetaClassDeclareReservedUnused(IOStorage, 0); + OSMetaClassDeclareReservedUnused(IOStorage, 1); + OSMetaClassDeclareReservedUnused(IOStorage, 2); + OSMetaClassDeclareReservedUnused(IOStorage, 3); + OSMetaClassDeclareReservedUnused(IOStorage, 4); + OSMetaClassDeclareReservedUnused(IOStorage, 5); + OSMetaClassDeclareReservedUnused(IOStorage, 6); + OSMetaClassDeclareReservedUnused(IOStorage, 7); + OSMetaClassDeclareReservedUnused(IOStorage, 8); + OSMetaClassDeclareReservedUnused(IOStorage, 9); + OSMetaClassDeclareReservedUnused(IOStorage, 10); + OSMetaClassDeclareReservedUnused(IOStorage, 11); + OSMetaClassDeclareReservedUnused(IOStorage, 12); + OSMetaClassDeclareReservedUnused(IOStorage, 13); + OSMetaClassDeclareReservedUnused(IOStorage, 14); + OSMetaClassDeclareReservedUnused(IOStorage, 15); +}; + +/* + * Inline Functions + */ + +inline void IOStorage::complete(IOStorageCompletion completion, + IOReturn status, + UInt64 actualByteCount) +{ + /* + * Invokes the specified completion action of the read/write request. If + * the completion action is unspecified, no action is taken. This method + * serves simply as a convenience to storage subclass developers. + */ + + if (completion.action) (*completion.action)(completion.target, + completion.parameter, + status, + actualByteCount); +} + +#endif /* defined(KERNEL) && defined(__cplusplus) */ + +#endif /* !_IOSTORAGE_H */ diff --git a/iokit/IOKit/storage/Makefile b/iokit/IOKit/storage/Makefile new file mode 100644 index 000000000..3b567c9c7 --- /dev/null +++ b/iokit/IOKit/storage/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = storage +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = \ + scsi + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = \ + IOApplePartitionScheme.h \ + IOBlockStorageDriver.h \ + IOCDAudioControlUserClient.h \ + IOCDMedia.h \ + IOCDPartitionScheme.h \ + IOCDTypes.h \ + IODVDMedia.h \ + IOFDiskPartitionScheme.h \ + IOMedia.h \ + IONeXTPartitionScheme.h \ + IOPartitionScheme.h \ + IOStorage.h +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/storage/ata/IOATAHDDrive.h b/iokit/IOKit/storage/ata/IOATAHDDrive.h new file mode 100644 index 000000000..e70e6ae75 --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAHDDrive.h @@ -0,0 +1,470 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAHDDrive.h + * + * HISTORY + * Aug 27, 1999 jliu - Ported from AppleATADrive. + */ + +#ifndef _IOATAHDDRIVE_H +#define _IOATAHDDRIVE_H + +#include +#include +#include +#include +#include + +class IOSyncer; + +// ATA parameters. +// +#define kIOATASectorSize 512 +#define kIOATAMaxBlocksPerXfer 256 + +// ATA commands. +// +enum { + kIOATACommandReadPIO = 0x20, + kIOATACommandWritePIO = 0x30, + kIOATACommandReadDMA = 0xc8, + kIOATACommandWriteDMA = 0xca, + kIOATACommandReadDMAQueued = 0xc7, + kIOATACommandWriteDMAQueued = 0xcc, + kIOATACommandStandbyImmediate = 0xe0, + kIOATACommandSleep = 0xe6, + kIOATACommandFlushCache = 0xe7, + kIOATACommandSetFeatures = 0xef, +}; + +// ATA power states, from lowest to highest power usage. +// +typedef enum { + kIOATAPowerStateSleep = 0, + kIOATAPowerStateStandby, + kIOATAPowerStateIdle, + kIOATAPowerStateActive +} IOATAPowerState; + +// ATA supported features. +// +enum { + kIOATAFeaturePowerManagement = 0x01, + kIOATAFeatureWriteCache = 0x02 +}; + +// Stages to transition into each power state. +// +enum { + kIOATAStandbyStage0, /* hold the queue */ + kIOATAStandbyStage1, /* flush disk write cache */ + kIOATAStandbyStage2, /* issue ATA STANDBY IMMEDIATE command */ + kIOATAStandbyStage3 /* finalize state transition */ +}; + +enum { + kIOATAActiveStage0, /* issue a software reset */ + kIOATAActiveStage1, /* spin up the drive */ + kIOATAActiveStage2, /* release the queue */ + kIOATAActiveStage3 /* finalize state transition */ +}; + +// Property table keys. +// +#define kIOATASupportedFeaturesKey "ATA Features" +#define kIOATAEnableWriteCacheKey "Enable Write Cache" + +//=========================================================================== +// IOATAClientData - This structure is stored on the IOATACommand's +// driver private area. +//=========================================================================== + +struct IOATAClientData +{ + IOATACommand * command; // back pointer to command object. + IOMemoryDescriptor * buffer; // transfer buffer descriptor. + union { + IOStorageCompletion async; // completion target/action/param. + IOSyncer * syncLock; // used by sync commands. + } completion; + bool isSync; // command is synchronous. + SInt32 maxRetries; // max retry attempts (0 = no retry). + IOReturn returnCode; // sync command return code. +}; + +// Get driver private data (IOATAClientData) from an IOATACommand object. +// +#define ATA_CLIENT_DATA(x) ((IOATAClientData *)((x)->getClientData())) + +//=========================================================================== +// IOATAHDDrive +//=========================================================================== + +class IOATAHDDrive : public IOService +{ + OSDeclareDefaultStructors(IOATAHDDrive) + +protected: + IOATADevice * _ataDevice; + IOCommandGate * _cmdGate; + UInt _unit; + ATATimingProtocol _timingProtocol; + ATAProtocol _ataProtocol; + UInt8 _ataReadCmd; + UInt8 _ataWriteCmd; + char _revision[9]; + char _model[41]; + bool _powerStateChanging; + bool _setPowerAckPending; + bool _logSelectedTimingProtocol; + IOOptionBits _supportedFeatures; + IOATAPowerState _currentATAPowerState; + IOATAPowerState _proposedATAPowerState; + void * _configThreadCall; + bool _pmReady; + + //----------------------------------------------------------------------- + // Default timeout (in milliseconds) for async and sync commands. + + static const UInt kATADefaultTimeout = 30000; // 30 seconds + + //----------------------------------------------------------------------- + // Default retry count for async and sync commands. + + static const UInt kATADefaultRetries = 4; + static const UInt kATAZeroRetry = 0; + + //----------------------------------------------------------------------- + // Static member functions called by IOCommandGate, or registered + // as completion routines. + + static void sHandleCommandCompletion(IOATAHDDrive * self, + IOATACommand * cmd); + + static void sHandleSetPowerState(IOATAHDDrive * self, + UInt32 powerStateOrdinal, + IOService * whatDevice, + IOReturn * handlerReturn); + + static void sHandleSleepStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred); + + static void sHandleActiveStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred); + + static void sHandleIdleStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred); + + static void sHandleStandbyStateTransition(IOATAHDDrive * self, + void * stage, + IOReturn status, + UInt64 bytesTransferred); + + static void sHandleInitialPowerStateForDomainState( + IOATAHDDrive * self, + IOPMPowerFlags domainState, + UInt32 * state); + + static void sHandleConfigureDevice(IOATAHDDrive * self); + + //----------------------------------------------------------------------- + // Release all allocated resource before calling super::free(). + + virtual void free(); + + //----------------------------------------------------------------------- + // Select the device timing protocol. + + virtual bool selectTimingProtocol(); + + //----------------------------------------------------------------------- + // Select the ATA protocol. + + virtual bool selectCommandProtocol(bool isDMA); + + //----------------------------------------------------------------------- + // Setup an ATATaskFile from the parameters given, and write the taskfile + // to the ATATaskfile structure pointer provided. + + virtual void setupReadWriteTaskFile(ATATaskfile * taskfile, + ATAProtocol protocol, + UInt8 command, + UInt32 block, + UInt32 nblks); + + //----------------------------------------------------------------------- + // Return an IOATACommand initialized to perform a read/write operation. + + virtual IOATACommand * ataCommandReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + + //----------------------------------------------------------------------- + // Return a ATA Set Features command. + + virtual IOATACommand * ataCommandSetFeatures(UInt8 features, + UInt8 SectorCount = 0, + UInt8 SectorNumber = 0, + UInt8 CylinderLow = 0, + UInt8 CyclinderHigh = 0); + + //----------------------------------------------------------------------- + // Return a ATA Flush Cache command. + + virtual IOATACommand * ataCommandFlushCache(); + + //----------------------------------------------------------------------- + // Return a ATA Standby Immediate command. + + virtual IOATACommand * ataCommandStandby(); + + //----------------------------------------------------------------------- + // Issue a synchronous ATA command. + + virtual IOReturn syncExecute(IOATACommand * cmd, + UInt32 timeout = kATADefaultTimeout, + UInt retries = kATADefaultRetries, + IOMemoryDescriptor * senseData = 0); + + //----------------------------------------------------------------------- + // Issue an asynchronous ATA command. + + virtual IOReturn asyncExecute( + IOATACommand * cmd, + IOStorageCompletion completion, + UInt32 timeout = kATADefaultTimeout, + UInt retries = kATADefaultRetries); + + //----------------------------------------------------------------------- + // Allocate an IOATACommand object. + + virtual IOATACommand * allocateCommand(); + + //----------------------------------------------------------------------- + // Inspect the ATA device. + + virtual bool inspectDevice(IOATADevice * device); + + //----------------------------------------------------------------------- + // Configure the ATA device. + + virtual bool configureDevice(IOATADevice * device); + + //----------------------------------------------------------------------- + // Returns an IOATAHDDriveNub instance. + + virtual IOService * instantiateNub(); + + //----------------------------------------------------------------------- + // Calls instantiateNub() then initialize, attach, and register the + // drive nub. + + virtual bool createNub(IOService * provider); + + //----------------------------------------------------------------------- + // Power management support. Subclasses can override these functions + // to replace/enhance the default power management support. + + virtual void initForPM(); + + virtual UInt32 handleInitialPowerStateForDomainState( + IOPMPowerFlags domainState); + + virtual IOReturn handleSetPowerState(UInt32 powerStateOrdinal, + IOService * whatDevice); + + virtual IOATAPowerState getATAPowerStateForStateOrdinal( + UInt32 stateOrdinal); + + virtual void startATAPowerStateTransition(IOATAPowerState ataPowerState); + + virtual void endATAPowerStateTransition(IOATAPowerState ataPowerState); + + virtual void abortATAPowerStateTransition(); + + virtual void handleSleepStateTransition(UInt32 stage, IOReturn status); + + virtual void handleActiveStateTransition(UInt32 stage, IOReturn status); + + virtual void handleIdleStateTransition(UInt32 stage, IOReturn status); + + virtual void handleStandbyStateTransition( UInt32 stage, IOReturn status); + + virtual IOReturn readSector(IOStorageCompletion completion, + UInt32 sector = 0); + + static void acknowledgeATAPowerStateTransition(void *castMeToIOATAHDDrive, void*); + +public: + /* + * Overrides from IOService. + */ + virtual bool init(OSDictionary * properties); + virtual IOService * probe(IOService * provider, SInt32 * score); + virtual bool start(IOService * provider); + virtual void stop(IOService * provider); + + //----------------------------------------------------------------------- + // Report the type of ATA device (ATA vs. ATAPI). + + virtual ATADeviceType reportATADeviceType() const; + + //----------------------------------------------------------------------- + // Handles read/write requests. + + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion); + + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + + //----------------------------------------------------------------------- + // Eject the media in the drive. + + virtual IOReturn doEjectMedia(); + + //----------------------------------------------------------------------- + // Format the media in the drive. + + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + + //----------------------------------------------------------------------- + // Returns disk capacity in bytes. + + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + + //----------------------------------------------------------------------- + // Lock the media and prevent a user-initiated eject. + + virtual IOReturn doLockUnlockMedia(bool doLock); + + //----------------------------------------------------------------------- + // Flush the write-cache to the physical media. + + virtual IOReturn doSynchronizeCache(); + + //----------------------------------------------------------------------- + // Start/stop the drive. + + virtual IOReturn doStart(); + virtual IOReturn doStop(); + + //----------------------------------------------------------------------- + // Return device identification strings + + virtual char * getAdditionalDeviceInfoString(); + virtual char * getProductString(); + virtual char * getRevisionString(); + virtual char * getVendorString(); + + //----------------------------------------------------------------------- + // Report the device block size in bytes. + + virtual IOReturn reportBlockSize(UInt64 * blockSize); + + //----------------------------------------------------------------------- + // Report whether the media in the ATA device is ejectable. + + virtual IOReturn reportEjectability(bool * isEjectable); + + //----------------------------------------------------------------------- + // Report whether the media can be locked. + + virtual IOReturn reportLockability(bool * isLockable); + + //----------------------------------------------------------------------- + // Report the polling requirements for a removable media. + + virtual IOReturn reportPollRequirements(bool * pollRequired, + bool * pollIsExpensive); + + //----------------------------------------------------------------------- + // Report the max number of bytes transferred for an ATA read command. + + virtual IOReturn reportMaxReadTransfer(UInt64 blocksize, + UInt64 * max); + + //----------------------------------------------------------------------- + // Report the max number of bytes transferred for an ATA write command. + + virtual IOReturn reportMaxWriteTransfer(UInt64 blocksize, + UInt64 * max); + + //----------------------------------------------------------------------- + // Returns the maximum addressable sector number. + + virtual IOReturn reportMaxValidBlock(UInt64 * maxBlock); + + //----------------------------------------------------------------------- + // Report whether the media is currently present, and whether a media + // change has been registered since the last reporting. + + virtual IOReturn reportMediaState(bool * mediaPresent, + bool * changed); + + //----------------------------------------------------------------------- + // Report whether the media is removable. + + virtual IOReturn reportRemovability(bool * isRemovable); + + //----------------------------------------------------------------------- + // Report if the media is write-protected. + + virtual IOReturn reportWriteProtection(bool * isWriteProtected); + + //----------------------------------------------------------------------- + // Handles messages (notifications) from our provider. + + virtual IOReturn message(UInt32 type, + IOService * provider, + void * argument); + + //----------------------------------------------------------------------- + // Returns the device type. + + virtual const char * getDeviceTypeName(); + + //----------------------------------------------------------------------- + // Power management support. Functions inherited from IOService. + + virtual IOReturn setAggressiveness(UInt32 type, UInt32 minutes); + + virtual UInt32 initialPowerStateForDomainState(IOPMPowerFlags domainState); + + virtual IOReturn setPowerState(UInt32 powerStateOrdinal, + IOService * whatDevice); +}; + +#endif /* !_IOATAHDDRIVE_H */ diff --git a/iokit/IOKit/storage/ata/IOATAHDDriveNub.h b/iokit/IOKit/storage/ata/IOATAHDDriveNub.h new file mode 100644 index 000000000..cbf7749eb --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAHDDriveNub.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAHDDriveNub.h + * + * This subclass implements a relay to a protocol and device-specific + * provider. + * + * HISTORY + * Aug 27, 1999 jliu - Created. + */ + +#ifndef _IOATAHDDRIVENUB_H +#define _IOATAHDDRIVENUB_H + +#include +#include + +class IOATAHDDrive; + +class IOATAHDDriveNub : public IOBlockStorageDevice +{ + OSDeclareDefaultStructors(IOATAHDDriveNub) + +protected: + IOATAHDDrive * _provider; + +public: + /* + * Overrides from IOService. + */ + virtual bool attach(IOService * provider); + virtual void detach(IOService * provider); + + /* + * Mandatory overrides from IOBlockStorageDevice. + */ + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion); + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + virtual IOReturn doEjectMedia(); + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + virtual IOReturn doLockUnlockMedia(bool doLock); + virtual IOReturn doSynchronizeCache(); + virtual char * getVendorString(); + virtual char * getProductString(); + virtual char * getRevisionString(); + virtual char * getAdditionalDeviceInfoString(); + virtual IOReturn reportBlockSize(UInt64 * blockSize); + virtual IOReturn reportEjectability(bool * isEjectable); + virtual IOReturn reportLockability(bool * isLockable); + virtual IOReturn reportMediaState(bool * mediaPresent, bool * changed); + virtual IOReturn reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive); + virtual IOReturn reportMaxReadTransfer (UInt64 blockSize,UInt64 * max); + virtual IOReturn reportMaxValidBlock(UInt64 * maxBlock); + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize,UInt64 * max); + virtual IOReturn reportRemovability(bool * isRemovable); + virtual IOReturn reportWriteProtection(bool * isWriteProtected); +}; + +#endif /* !_IOATAHDDRIVENUB_H */ diff --git a/iokit/IOKit/storage/ata/IOATAPICDDrive.h b/iokit/IOKit/storage/ata/IOATAPICDDrive.h new file mode 100644 index 000000000..aa5e65683 --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAPICDDrive.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPICDDrive.h - Generic ATAPI CD-ROM driver. + * + * HISTORY + * Sep 2, 1999 jliu - Ported from AppleATAPIDrive. + */ + +#ifndef _IOATAPICDDRIVE_H +#define _IOATAPICDDRIVE_H + +#include +#include +#include + +// ATAPI (inquiry) device type. +// +enum +{ + kIOATAPIDeviceTypeCDROM = 0x05 +}; + +// ATAPI packet commands. +// +enum +{ + kIOATAPICommandReadSubChannel = 0x42, + kIOATAPICommandReadTOC = 0x43, + kIOATAPICommandPlayAudioMSF = 0x47, + kIOATAPICommandPauseResume = 0x4b, + kIOATAPICommandStopPlay = 0x4e, + kIOATAPICommandScan = 0xba, + kIOATAPICommandReadCD = 0xbe +}; + +//=========================================================================== +// IOATAPICDDrive +//=========================================================================== + +class IOATAPICDDrive : public IOATAPIHDDrive +{ + OSDeclareDefaultStructors(IOATAPICDDrive) + +protected: + //----------------------------------------------------------------------- + // Given the device type from the ATAPI Inquiry data, returns true if + // the device type is supported by this driver. + + virtual bool matchATAPIDeviceType(UInt8 type, SInt32 * score); + + //---------------------------------------------------------------------- + // ATAPI Read Subchannel command (42). + + virtual IOATACommand * atapiCommandReadSubChannel( + IOMemoryDescriptor * buffer, + UInt8 dataFormat, + UInt8 trackNumber); + + //----------------------------------------------------------------------- + // ATAPI Read TOC command (43). + + virtual IOATACommand * atapiCommandReadTOC( + IOMemoryDescriptor * buffer, + bool msf, + UInt8 format, + UInt8 startTrackSession); + + //---------------------------------------------------------------------- + // ATAPI Play Audio command (47). + + virtual IOATACommand * atapiCommandPlayAudioMSF( + CDMSF timeStart, + CDMSF timeStop); + + //---------------------------------------------------------------------- + // ATAPI Pause/Resume command (4b). + + virtual IOATACommand * atapiCommandPauseResume( + bool resume); + + + //---------------------------------------------------------------------- + // ATAPI STOP PLAY/SCAN command (4e). + + virtual IOATACommand * atapiCommandStopPlay(); + + //---------------------------------------------------------------------- + // ATAPI Read CD command (b9). + + virtual IOATACommand * atapiCommandReadCD( + IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType); + + //---------------------------------------------------------------------- + // ATAPI Scan command (ba). + + virtual IOATACommand * atapiCommandScan( + CDMSF timeStart, + bool reverse); + + //----------------------------------------------------------------------- + // Overrides the method in IOATAPIHDDrive and returns an + // IOATAPICDDriveNub instance. + + virtual IOService * instantiateNub(); + +public: + //----------------------------------------------------------------------- + // Handles read CD requests. + + virtual IOReturn doAsyncReadCD(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + + //----------------------------------------------------------------------- + // IOATAHDDrive override. Returns the device type. + + virtual const char * getDeviceTypeName(); + + //----------------------------------------------------------------------- + // IOATAPIHDDrive override. Reports whether media is write protected. + + virtual IOReturn reportWriteProtection(bool * isWriteProtected); + + //----------------------------------------------------------------------- + // Read the Table of Contents. + + virtual IOReturn readTOC(IOMemoryDescriptor * buffer); + + //----------------------------------------------------------------------- + // Play audio + + virtual IOReturn audioPause(bool pause); + + virtual IOReturn audioPlay(CDMSF timeStart, CDMSF timeStop); + + virtual IOReturn audioScan(CDMSF timeStart, bool reverse); + + virtual IOReturn audioStop(); + + virtual IOReturn getAudioStatus(CDAudioStatus *status); + + virtual IOReturn getAudioVolume(UInt8 * leftVolume, + UInt8 * rightVolume); + + virtual IOReturn setAudioVolume(UInt8 leftVolume, + UInt8 rightVolume); + + virtual IOReturn readModeSense(UInt8 * buffer, + UInt32 length, UInt8 pageCode, + UInt8 pageControl = 0); + + virtual IOReturn writeModeSelect(UInt8 * buffer, + UInt32 length); + + virtual IOReturn readSubChannel(UInt8 * buffer, + UInt32 length, + UInt8 dataFormat, + UInt8 trackNumber); + + virtual IOReturn readMCN(CDMCN mcn); + + virtual IOReturn readISRC(UInt8 track, CDISRC isrc); +}; + +#endif /* !_IOATAPICDDRIVE_H */ diff --git a/iokit/IOKit/storage/ata/IOATAPICDDriveNub.h b/iokit/IOKit/storage/ata/IOATAPICDDriveNub.h new file mode 100644 index 000000000..6a0465f93 --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAPICDDriveNub.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPICDDriveNub.h + * + * This subclass implements a relay to a protocol and device-specific + * provider. + * + * HISTORY + * 2-Sep-1999 Joe Liu (jliu) created. + */ + +#ifndef _IOATAPICDDRIVENUB_H +#define _IOATAPICDDRIVENUB_H + +#include +#include + +class IOATAPICDDrive; + +class IOATAPICDDriveNub : public IOCDBlockStorageDevice +{ + OSDeclareDefaultStructors(IOATAPICDDriveNub) + +protected: + IOATAPICDDrive * _provider; + +public: + /* + * Overrides from IOService. + */ + virtual bool attach(IOService * provider); + virtual void detach(IOService * provider); + + /* + * Mandatory overrides from IOBlockStorageDevice. + */ + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion); + + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + virtual IOReturn doSynchronizeCache(); + virtual IOReturn doEjectMedia(); + virtual IOReturn doLockUnlockMedia(bool doLock); + virtual char * getVendorString(); + virtual char * getProductString(); + virtual char * getRevisionString(); + virtual char * getAdditionalDeviceInfoString(); + virtual IOReturn reportBlockSize(UInt64 * blockSize); + virtual IOReturn reportEjectability(bool * isEjectable); + virtual IOReturn reportLockability(bool * isLockable); + virtual IOReturn reportMediaState(bool * mediaPresent, bool * changed); + virtual IOReturn reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive); + virtual IOReturn reportMaxReadTransfer (UInt64 blockSize, UInt64 * max); + virtual IOReturn reportMaxValidBlock(UInt64 * maxBlock); + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize, UInt64 * max); + virtual IOReturn reportRemovability(bool * isRemovable); + virtual IOReturn reportWriteProtection(bool * isWriteProtected); + + /*-----------------------------------------*/ + /* CD APIs */ + /*-----------------------------------------*/ + + virtual IOReturn doAsyncReadCD(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + virtual UInt32 getMediaType(); + virtual IOReturn readISRC(UInt8 track, CDISRC isrc); + virtual IOReturn readMCN(CDMCN mcn); + virtual IOReturn readTOC(IOMemoryDescriptor * buffer); + + /*-----------------------------------------*/ + /* APIs exported by IOCDAudioControl */ + /*-----------------------------------------*/ + + virtual IOReturn audioPause(bool pause); + virtual IOReturn audioPlay(CDMSF timeStart,CDMSF timeStop); + virtual IOReturn audioScan(CDMSF timeStart,bool reverse); + virtual IOReturn audioStop(); + virtual IOReturn getAudioStatus(CDAudioStatus * status); + virtual IOReturn getAudioVolume(UInt8 * leftVolume, + UInt8 * rightVolume); + virtual IOReturn setAudioVolume(UInt8 leftVolume, UInt8 rightVolume); +}; + +#endif /* !_IOATAPICDDRIVENUB_H */ diff --git a/iokit/IOKit/storage/ata/IOATAPIDVDDrive.h b/iokit/IOKit/storage/ata/IOATAPIDVDDrive.h new file mode 100644 index 000000000..23525e6e8 --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAPIDVDDrive.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOATAPIDVDDRIVE_H +#define _IOATAPIDVDDRIVE_H + +#include +#include +#include + +enum { + kIOATAPIFeatureProfileList = 0x0000, + kIOATAPIFeatureCore = 0x0001, + kIOATAPIFeatureMorphing = 0x0002, + kIOATAPIFeatureRemovableMedium = 0x0003, + kIOATAPIFeatureRandomReadable = 0x0010, + kIOATAPIFeatureMultiRead = 0x001d, + kIOATAPIFeatureCDRead = 0x001e, + kIOATAPIFeatureDVDRead = 0x001f, + kIOATAPIFeatureRandomWrite = 0x0020, + kIOATAPIFeatureIncrStreamWrite = 0x0021, + kIOATAPIFeatureSectorErasable = 0x0022, + kIOATAPIFeatureFormattable = 0x0023, + kIOATAPIFeatureDefectManagement = 0x0024, + kIOATAPIFeatureWriteOnce = 0x0025, + kIOATAPIFeatureRestrictedOverwrite = 0x0026, + kIOATAPIFeatureDVDRWRestrictedOverwrite = 0x002c, + kIOATAPIFeatureCDTrackAtOnce = 0x002d, + kIOATAPIFeatureCDMastering = 0x002e, + kIOATAPIFeatureDVDR_RWWrite = 0x002f, + kIOATAPIFeaturePowerManagement = 0x0100, + kIOATAPIFeatureSMART = 0x0101, + kIOATAPIFeatureEmbeddedChanger = 0x0102, + kIOATAPIFeatureCDAudioAnalogPlay = 0x0103, + kIOATAPIFeatureMicrocodeUpgrade = 0x0104, + kIOATAPIFeatureTimeout = 0x0105, + kIOATAPIFeatureDVDCSS = 0x0106, + kIOATAPIFeatureRealTimeStreaming = 0x0107, + kIOATAPIFeatureLUNSerialNumber = 0x0108, + kIOATAPIFeatureDiskControlBlocks = 0x010a, + kIOATAPIFeatureDVDCPRM = 0x010b +}; + +// DVD specific command codes. + +enum { + kIOATAPICommandGetConfiguration = 0x46, + kIOATAPICommandSendKey = 0xa3, + kIOATAPICommandReportKey = 0xa4, + kIOATAPICommandReadDVDStructure = 0xad +}; + +// Format code definitions for READ DVD STRUCTURE command. + +enum { + kIODVDReadStructurePhysical = 0x00, + kIODVDReadStructureCopyright = 0x01, + kIODVDReadStructureWriteProtection = 0xC0 +}; + +#define IODVDGetDataLength16(ptr) OSReadBigInt16((void *) ptr, 0) +#define IODVDGetDataLength32(ptr) OSReadBigInt32((void *) ptr, 0) + +#if defined(__BIG_ENDIAN__) + +// Big Endian DVD structure definitions. + +struct IODVDStructurePhysical +{ + UInt8 length[2]; + UInt8 rsvd1[2]; + + UInt8 bookType : 4, + partVersion : 4; + +#define kIODVDBookTypeDVDROM 0 +#define kIODVDBookTypeDVDRAM 1 +#define kIODVDBookTypeDVDR 2 +#define kIODVDBookTypeDVDRW 3 +#define kIODVDBookTypeDVDPlusRW 9 + + UInt8 diskSize : 4, + maximumRate : 4; + + UInt8 rsvd2 : 1, + layers : 2, + trackPath : 1, + layerType : 4; + + UInt8 linearDensity : 4, + trackDensity : 4; + + UInt8 zero1; + UInt8 dataAreaPSNStart[3]; + UInt8 zero2; + UInt8 dataAreaPSNEnd[3]; + UInt8 zero3; + UInt8 layerZeroEndSectorNumber; + + UInt8 bcaFlag : 1, + rsvd3 : 7; +}; + +struct IODVDStructureWriteProtection +{ + UInt8 length[2]; + UInt8 rsvd1[2]; + + UInt8 rsvd2 : 4, + mswi : 1, + cwp : 1, + pwp : 1, + swpp : 1; + + UInt8 rsvd3[3]; +}; + +#elif defined(__LITTLE_ENDIAN__) + +// Little Endian DVD structure definitions. + +struct IODVDStructurePhysical +{ + UInt8 length[2]; + UInt8 rsvd1[2]; + + UInt8 partVersion : 4, + bookType : 4; + +#define kIODVDBookTypeDVDROM 0 +#define kIODVDBookTypeDVDRAM 1 +#define kIODVDBookTypeDVDR 2 +#define kIODVDBookTypeDVDRW 3 +#define kIODVDBookTypeDVDPlusRW 9 + + UInt8 maximumRate : 4, + diskSize : 4; + + UInt8 layerType : 4, + trackPath : 1, + layers : 2, + rsvd2 : 1; + + UInt8 trackDensity : 4, + linearDensity : 4; + + UInt8 zero1; + UInt8 dataAreaPSNStart[3]; + UInt8 zero2; + UInt8 dataAreaPSNEnd[3]; + UInt8 zero3; + UInt8 layerZeroEndSectorNumber; + + UInt8 rsvd3 : 7, + bcaFlag : 1; +}; + +struct IODVDStructureWriteProtectionStatus +{ + UInt8 length[2]; + UInt8 rsvd1[2]; + + UInt8 swpp : 1, + pwp : 1, + cwp : 1, + mswi : 1, + rsvd2 : 4; + + UInt8 rsvd3[3]; +}; + +#else +#error Unknown endianess. +#endif + +//=========================================================================== +// IOATAPIDVDDrive +//=========================================================================== + +class IOATAPIDVDDrive : public IOATAPICDDrive +{ + OSDeclareDefaultStructors(IOATAPIDVDDrive) + +protected: + virtual IOReturn determineMediaType(UInt32 * mediaType); + + virtual IOReturn getConfiguration(UInt8 * buffer, + UInt32 length, + UInt32 * actualLength, + bool current); + + virtual IOReturn classifyDrive(bool * isDVDDrive); + + virtual bool matchATAPIDeviceType(UInt8 type, SInt32 * score); + + virtual IOService * instantiateNub(); + + virtual IOATACommand * atapiCommandGetConfiguration( + IOMemoryDescriptor * buffer, + UInt8 rt, + UInt16 sfn = 0); + + virtual IOATACommand * atapiCommandSendKey( + IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt8 agid, + const DVDKeyFormat keyFormat); + + virtual IOATACommand * atapiCommandReportKey( + IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt32 lba, + const UInt8 agid, + const DVDKeyFormat keyFormat); + + virtual IOATACommand * atapiCommandReadDVDStructure( + IOMemoryDescriptor * buffer, + UInt8 format, + UInt32 address = 0, + UInt8 layer = 0, + UInt8 agid = 0); + +public: + virtual bool init(OSDictionary * properties); + + virtual const char * getDeviceTypeName(); + + virtual UInt32 getMediaType(); + + virtual IOReturn reportWriteProtection(bool * isWriteProtected); + + virtual IOReturn reportMediaState(bool * mediaPresent, bool * changed); + + virtual IOReturn reportKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt32 lba, + const UInt8 agid, + const DVDKeyFormat keyFormat); + + virtual IOReturn sendKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt8 agid, + const DVDKeyFormat keyFormat); +}; + +#endif /* ! _IOATAPIDVDDRIVE_H */ diff --git a/iokit/IOKit/storage/ata/IOATAPIDVDDriveNub.h b/iokit/IOKit/storage/ata/IOATAPIDVDDriveNub.h new file mode 100644 index 000000000..1f7bddcf5 --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAPIDVDDriveNub.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPIDVDDriveNub.h + * + * This subclass implements a relay to a protocol and device-specific + * provider. + * + * HISTORY + * 2-Sep-1999 Joe Liu (jliu) created. + */ + +#ifndef _IOATAPIDVDDRIVENUB_H +#define _IOATAPIDVDDRIVENUB_H + +#include +#include +#include + +class IOATAPIDVDDrive; + +class IOATAPIDVDDriveNub : public IODVDBlockStorageDevice +{ + OSDeclareDefaultStructors(IOATAPIDVDDriveNub) + +protected: + IOATAPIDVDDrive * _provider; + +public: + /* + * Overrides from IOService. + */ + virtual bool attach(IOService * provider); + virtual void detach(IOService * provider); + + /* + * Mandatory overrides from IOBlockStorageDevice. + */ + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion); + + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + virtual IOReturn doSynchronizeCache(); + virtual IOReturn doEjectMedia(); + virtual IOReturn doLockUnlockMedia(bool doLock); + virtual char * getVendorString(); + virtual char * getProductString(); + virtual char * getRevisionString(); + virtual char * getAdditionalDeviceInfoString(); + virtual IOReturn reportBlockSize(UInt64 * blockSize); + virtual IOReturn reportEjectability(bool * isEjectable); + virtual IOReturn reportLockability(bool * isLockable); + virtual IOReturn reportMediaState(bool * mediaPresent, bool * changed); + virtual IOReturn reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive); + virtual IOReturn reportMaxReadTransfer (UInt64 blockSize, UInt64 * max); + virtual IOReturn reportMaxValidBlock(UInt64 * maxBlock); + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize, UInt64 * max); + virtual IOReturn reportRemovability(bool * isRemovable); + virtual IOReturn reportWriteProtection(bool * isWriteProtected); + + /*-----------------------------------------*/ + /* CD APIs */ + /*-----------------------------------------*/ + + virtual IOReturn doAsyncReadCD(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + virtual UInt32 getMediaType(); + virtual IOReturn readISRC(UInt8 track, CDISRC isrc); + virtual IOReturn readMCN(CDMCN mcn); + virtual IOReturn readTOC(IOMemoryDescriptor * buffer); + + /*-----------------------------------------*/ + /* APIs exported by IOCDAudioControl */ + /*-----------------------------------------*/ + + virtual IOReturn audioPause(bool pause); + virtual IOReturn audioPlay(CDMSF timeStart, CDMSF timeStop); + virtual IOReturn audioScan(CDMSF timeStart, bool reverse); + virtual IOReturn audioStop(); + virtual IOReturn getAudioStatus(CDAudioStatus * status); + virtual IOReturn getAudioVolume(UInt8 * leftVolume, + UInt8 * rightVolume); + virtual IOReturn setAudioVolume(UInt8 leftVolume, + UInt8 rightVolume); + /*-----------------------------------------*/ + /* DVD APIs */ + /*-----------------------------------------*/ + + virtual IOReturn reportKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt32 lba, + const UInt8 agid, + const DVDKeyFormat keyFormat); + + virtual IOReturn sendKey(IOMemoryDescriptor * buffer, + const DVDKeyClass keyClass, + const UInt8 agid, + const DVDKeyFormat keyFormat); +}; + +#endif /* !_IOATAPIDVDDRIVENUB_H */ diff --git a/iokit/IOKit/storage/ata/IOATAPIHDDrive.h b/iokit/IOKit/storage/ata/IOATAPIHDDrive.h new file mode 100644 index 000000000..02ac00b7a --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAPIHDDrive.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPIHDDrive.h - Generic ATAPI Direct-Access driver. + * + * HISTORY + * Sep 2, 1999 jliu - Ported from AppleATAPIDrive. + */ + +#ifndef _IOATAPIHDDRIVE_H +#define _IOATAPIHDDRIVE_H + +#include +#include + +// ATAPI (inquiry) device type. +// +enum IOATAPIDeviceType +{ + kIOATAPIDeviceTypeDirectAccess = 0x00 +}; + +// ATAPI packet commands. +// +enum { + kIOATAPICommandTestUnitReady = 0x00, + kIOATAPICommandFormatUnit = 0x04, + kIOATAPICommandStartStopUnit = 0x1b, + kIOATAPICommandPreventAllow = 0x1e, + kIOATAPICommandSynchronizeCache = 0x35, + kIOATAPICommandModeSelect = 0x55, + kIOATAPICommandModeSense = 0x5a, + kIOATAPICommandRead = 0xa8, + kIOATAPICommandWrite = 0xaa +}; + +// ATAPI feature register flags. +// +enum { + kIOATAPIFeaturesDMA = 0x01, + kIOATAPIFeaturesOverlay = 0x02, +}; + +#define kIOATAPIMaxTransfer 65534 + + +//=========================================================================== +// IOATAPIHDDrive +//=========================================================================== + +class IOATAPIHDDrive : public IOATAHDDrive +{ + OSDeclareDefaultStructors(IOATAPIHDDrive) + +protected: + char _vendor[9]; + char _product[17]; + bool _mediaPresent; + bool _isRemovable; + ATAProtocol _atapiProtocol; + bool _isLocked; + + //----------------------------------------------------------------------- + // Selects a command protocol to use. The argument specifies whether + // the device supports DMA mode. + + virtual bool selectCommandProtocol(bool isDMA); + + //----------------------------------------------------------------------- + // Issues an ATAPI Start/Stop Unit command. + + virtual IOReturn doStartStop(bool doStart); + + //----------------------------------------------------------------------- + // Given the device type from the ATAPI Inquiry data, returns true if + // the device type is supported by this driver. + + virtual bool matchATAPIDeviceType(UInt8 type, SInt32 * score); + + //----------------------------------------------------------------------- + // Setup a ATATaskFile for an ATAPI packet command from the parameters + // given. + + virtual void setupPacketTaskFile(ATATaskfile * taskfile, + ATAProtocol protocol, + UInt16 byteCount); + + //----------------------------------------------------------------------- + // Create a generic ATAPI command object. + + virtual IOATACommand * atapiCommand(ATACDBInfo * packetCommand, + IOMemoryDescriptor * transferBuffer = 0); + + //----------------------------------------------------------------------- + // Allocates and return an IOATACommand to perform a read/write operation. + + virtual IOATACommand * atapiCommandReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + + //----------------------------------------------------------------------- + // ATAPI Start/Stop Unit command (1B). + + virtual IOATACommand * atapiCommandStartStopUnit(bool doStart, + bool doLoadEject, + bool immediate); + + //----------------------------------------------------------------------- + // ATAPI Format Unit command (04). + + virtual IOATACommand * atapiCommandFormatUnit( + UInt16 interleave, + UInt8 flagBits, + UInt8 vendorBits, + IOMemoryDescriptor * formatData); + + //----------------------------------------------------------------------- + // ATAPI Synchronize Cache command (35). + + virtual IOATACommand * atapiCommandSynchronizeCache(); + + //----------------------------------------------------------------------- + // ATAPI Prevent/Allow medium removal command (1E). + + virtual IOATACommand * atapiCommandPreventAllowRemoval(bool doLock); + + //----------------------------------------------------------------------- + // ATAPI Test Unit Ready command (00). + + virtual IOATACommand * atapiCommandTestUnitReady(); + + //---------------------------------------------------------------------- + // ATAPI Mode Sense command (5a). + + virtual IOATACommand * atapiCommandModeSense( + IOMemoryDescriptor * buffer, + UInt8 pageCode, + UInt8 pageControl); + + //---------------------------------------------------------------------- + // ATAPI Mode Select command (55). + + virtual IOATACommand * atapiCommandModeSelect( + IOMemoryDescriptor * buffer); + + //----------------------------------------------------------------------- + // Overrides the method in IOATAHDDrive and returns an IOATAPIHDDriveNub + // instance. + + virtual IOService * instantiateNub(); + + //----------------------------------------------------------------------- + // Overrides the method in IOATAHDDrive. Inspect the ATAPI device. + + virtual bool inspectDevice(IOATADevice * device); + + //----------------------------------------------------------------------- + // Overrides the method in IOATAHDDrive. Add ATAPI wakeup handler. + + virtual void handleActiveStateTransition( UInt32 stage, + IOReturn status ); + +public: + /* + * IOATAHDDrive overrides. + */ + virtual bool init(OSDictionary * properties); + virtual IOService * probe(IOService * provider, SInt32 * score); + + virtual ATADeviceType reportATADeviceType() const; + + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion); + + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + + virtual IOReturn doEjectMedia(); + + virtual IOReturn doFormatMedia(UInt64 byteCapacity, + IOMemoryDescriptor * formatData = 0); + + virtual IOReturn doLockUnlockMedia(bool doLock); + + virtual IOReturn doSynchronizeCache(); + + virtual IOReturn doStart(); + virtual IOReturn doStop(); + + virtual char * getVendorString(); + virtual char * getProductString(); + virtual char * getRevisionString(); + virtual char * getAdditionalDeviceInfoString(); + + virtual IOReturn reportEjectability(bool * isEjectable); + virtual IOReturn reportLockability(bool * isLockable); + virtual IOReturn reportPollRequirements(bool * pollRequired, + bool * pollIsExpensive); + + virtual IOReturn reportMediaState(bool * mediaPresent, + bool * changed); + + virtual IOReturn reportRemovability(bool * isRemovable); + + virtual IOReturn reportWriteProtection(bool * isWriteProtected); +}; + +#endif /* !_IOATAPIHDDRIVE_H */ diff --git a/iokit/IOKit/storage/ata/IOATAPIHDDriveNub.h b/iokit/IOKit/storage/ata/IOATAPIHDDriveNub.h new file mode 100644 index 000000000..eb2933096 --- /dev/null +++ b/iokit/IOKit/storage/ata/IOATAPIHDDriveNub.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOATAPIHDDriveNub.h + * + * This subclass implements a relay to a protocol and device-specific + * provider. + * + * HISTORY + * 2-Sep-1999 Joe Liu (jliu) created. + */ + +#ifndef _IOATAPIHDDRIVENUB_H +#define _IOATAPIHDDRIVENUB_H + +#include +#include + +class IOATAPIHDDrive; + +class IOATAPIHDDriveNub : public IOBlockStorageDevice +{ + OSDeclareDefaultStructors(IOATAPIHDDriveNub) + +protected: + IOATAPIHDDrive * _provider; + +public: + /* Overrides from IOService */ + + virtual bool attach(IOService * provider); + virtual void detach(IOService * provider); + + /* Mandatory overrides from IOBlockStorageDevice */ + + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks, + IOStorageCompletion completion); + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor * buffer, + UInt32 block, + UInt32 nblks); + virtual IOReturn doEjectMedia(); + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + virtual IOReturn doLockUnlockMedia(bool doLock); + virtual IOReturn doSynchronizeCache(); + virtual char * getVendorString(); + virtual char * getProductString(); + virtual char * getRevisionString(); + virtual char * getAdditionalDeviceInfoString(); + virtual IOReturn reportBlockSize(UInt64 * blockSize); + virtual IOReturn reportEjectability(bool * isEjectable); + virtual IOReturn reportLockability(bool * isLockable); + virtual IOReturn reportMediaState(bool * mediaPresent, bool * changed); + virtual IOReturn reportPollRequirements(bool * pollIsRequired, + bool * pollIsExpensive); + virtual IOReturn reportMaxReadTransfer (UInt64 blockSize,UInt64 * max); + virtual IOReturn reportMaxValidBlock(UInt64 * maxBlock); + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize,UInt64 * max); + virtual IOReturn reportRemovability(bool * isRemovable); + virtual IOReturn reportWriteProtection(bool * isWriteProtected); +}; + +#endif /* !_IOATAPIHDDRIVENUB_H */ diff --git a/iokit/IOKit/storage/ata/Makefile b/iokit/IOKit/storage/ata/Makefile new file mode 100644 index 000000000..7067075f2 --- /dev/null +++ b/iokit/IOKit/storage/ata/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = storage/ata +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/storage/scsi/IOBasicSCSI.h b/iokit/IOKit/storage/scsi/IOBasicSCSI.h new file mode 100644 index 000000000..4c3b356e8 --- /dev/null +++ b/iokit/IOKit/storage/scsi/IOBasicSCSI.h @@ -0,0 +1,1207 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* ============================================================================= + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOBasicSCSI.h + * + * This class implements generic SCSI functionality. + */ + +#ifndef _IOBASICSCSI_H +#define _IOBASICSCSI_H + +#include +#include +#include +#include +#include + +const int kMinInqSize = 5; /* minimal, supported by all devs */ +const int kReadCapSize = 8; +const int kModeSenseSize = 64; +const int kMaxInqSize = 256; + +const int kCheckCondition = 0x02; +const int kUnitAttention = 0x06; + +/* SCSI operation codes: */ + +const UInt8 SOP_TUR = 0x00; /* test unit ready */ +const UInt8 SOP_INQUIRY = 0x12; /* inquiry */ +const UInt8 SOP_MODESELECT = 0x15; /* mode select */ +const UInt8 SOP_MODESENSE = 0x1a; /* mode sense */ +const UInt8 SOP_READCAP = 0x25; /* read capacity */ +const UInt8 SOP_READ10 = 0x28; /* read (10-byte) */ +const UInt8 SOP_WRITE10 = 0x2a; /* write (10-byte) */ + +struct IOTURcdb { + UInt8 opcode; + UInt8 lunbits; + UInt8 reserved1; + UInt8 reserved2; + UInt8 reserved3; + UInt8 ctlbyte; +}; + +struct IORWcdb { /* CDB for read and write */ + UInt8 opcode; /* read=0x28, write=0x2a */ + UInt8 lunbits; /* lun and control bits */ + UInt8 lba_3; /* logical block address: msb */ + UInt8 lba_2; + UInt8 lba_1; + UInt8 lba_0; /* logical block address: lsb */ + UInt8 reserved; + UInt8 count_msb; /* block count: msb */ + UInt8 count_lsb; /* block count: lsb */ + UInt8 ctlbyte; +}; + +struct IOInquirycdb { /* inquiry */ + UInt8 opcode; /* 0x12 */ + UInt8 lunbits; /* lun and control bits */ + UInt8 pagecode; /* page code/op code */ + UInt8 reserved; + UInt8 len; /* allocation length */ + UInt8 ctlbyte; +}; + +struct IOReadCapcdb { + UInt8 opcode; + UInt8 lunbits; + UInt8 lba_3; + UInt8 lba_2; + UInt8 lba_1; + UInt8 lba_0; + UInt8 reserved1; + UInt8 reserved2; + UInt8 reserved3; + UInt8 ctlbyte; +}; + +struct IOModeSensecdb { + UInt8 opcode; + UInt8 lunbits; /* lun and control bits */ + UInt8 pagecode; + UInt8 reserved; + UInt8 len; /* allocation length */ + UInt8 ctlbyte; +}; + +struct IOModeSelectcdb { + UInt8 opcode; + UInt8 lunbits; + UInt8 reserved1; + UInt8 reserved2; + UInt8 paramlen; + UInt8 ctlbyte; +}; + +/*! + * @enum stateValues + * @discussion + * These state values are used to determin the state of an IO operation. + * Some are simply for debugging use. + * @constant kNone + * Nothing happening. + * @constant kAsyncReadWrite + * Doing an asynchronous IO operation. + * @constant kSimpleSynchIO + * Doing a simple synchronous IO operation. + * @constant kHandlingUnitAttention + * Currently handling a Unit-Attention condition. + * @constant kDoneHandlingUnitAttention + * Done handling Unit Attention; command should be reissued. + * @constant kAwaitingPower + * Awaiting power. + * @constant kMaxValidState + * The maximum valid state value. + * @constant kMaxStateValue + * The maximum state value possible. + */ +enum stateValues { + kNone = 0, + kAsyncReadWrite = 1, + kSimpleSynchIO = 2, + kHandlingUnitAttention = 3, + kHandlingRecoveryAfterBusReset = 4, + kAwaitingPower = 5, + + kMaxValidState = kAwaitingPower, + + kMaxStateValue = 255 +}; +/*! + * @typedef statevalue + * @discussion + * Shorthand for enum StateValues. + */ +typedef enum stateValues stateValue; + +const bool kSync = true; /* type info for requests awaiting power */ +const bool kAsync = false; + +const UInt32 kMaxRetries = 3; + +/*! + * @class + * IOBasicSCSI : public IOService + * @abstract + * Basic SCSI support functions. + * @discussion + * IOBasicSCSI provides a set of basic SCSI functions and support + * utilities. It is intended to be the base class for a SCSI Transport + * Driver. + */ + +class IOBasicSCSI : public IOService { + + OSDeclareAbstractStructors(IOBasicSCSI) + +public: + + /*! + * @struct completion + * @field action + * The C function called upon completion of the operation. + * @field target + * The C++ class pointer, passed to tha action function. + * @field param + * A value passed to the action function. This value is not touched. + */ + /*! + * @struct context + * @discussion + * The context structure contains all persistent information needed for a + * synchronous or asynchronous IO operation. + * @field completion + * The completion information for an asynchronous read or write operation. + * @field state + * The current state of the operation. + * @field step + * The current step value, if we are handling a Unit Attention. + * @field originalContext + * A pointer to the context for the command that caused the Unit Attention + * condition. + * @field scsireq + * A pointer to the IOSCSIRequest object. + * @field memory + * The data buffer for the operation. A pointer to an IOMemoryDescriptor. + * @field scsiresult + * A pointer to the IOSCSIResult object. + * @field desiredPower + * The desired power level for the operation to execute. + * @field isSync + * True if synchronous; False if asynchronous. + * @field next + * A pointer to a context structure, used as a queue forward-link. + * @field sync + * A syncer used to block a thread awaiting a power level, or for completion + * of a synchronous operation. + */ + struct context { + + /* Completion information for our client, used only for async operations. + * Typically this information will only be used by subclasses. + */ + IOStorageCompletion completion; /* function to call */ + + /* Parameters used during an IO retry: */ + + stateValue state; /* what state we're in */ + UInt32 step; + struct context *originalIOContext; /* original SCSI IO if doing a retry */ + bool retryInProgress; + UInt32 retryCount; + + IOMemoryDescriptor *memory; + + UInt32 desiredPower; /* desired power level state */ + bool isSync; /* true if sync, false if async */ + struct context *next; /* for queue of requests pending power */ + /* Parameters to hand off to the SCSI provider: */ + + IOSCSICommand *scsireq; + SCSISenseData *senseData; + IOMemoryDescriptor *senseDataDesc; + + IOSyncer *sync; /* to wait for completion */ + }; + + /* Overrides from IOService: */ + + virtual void free(void); + + virtual bool init(OSDictionary * properties); + + /*! + * @function message + * @discussion + * This override allows us to receive notification of Bus Reset events from + * the SCSI Device. + */ + virtual IOReturn message(UInt32 type,IOService * provider,void * argument); + + /*! + * @function probe + * @abstract + * Determine if device matches expected type. + * @discussion + * This method is responsible for matching the device type. It calls + * doInquiry to issue a SCSI Inquiry command to the device, then calls + * deviceTypeMatches to ensure that the device type matches the expected + * type. (The Vendor, Product, and Revision strings are unconditionally + * copied from the inquiry data). If deviceTypeMatches returns true, "this" is + * returned. If the device type does not match, NULL is returned. + * + * The default implementation passes the score parameter to deviceTypeMatches + * so that method may alter the match score. + */ + virtual IOService * probe(IOService * provider,SInt32 * score); + + virtual bool start(IOService *provider); + + /* --- end of IOService overrides --- */ + + /*! + * @function deviceTypeMatches + * @abstract + * Determine if device type matches expected type. + * @discussion + * This method must be implemented by a device-specific subclass. + * @param inqBuf + * A pointer to the SCSI inquiry data for the device. + * @param inqLen + * The size of the data in the inquiry buffer. + * @param score + * A pointer to the match score, which will be returned by probe. + * @result + * True indicates a match; False indicates a failure. + */ + virtual bool deviceTypeMatches(UInt8 inqBuf[],UInt32 inqLen,SInt32 *score) = 0; + + /*! + * @function getAdditionalDeviceInfoString + * @abstract + * Return additional informational string for the device. + * @result + * A pointer to a static character string. The default implementation + * returns "[SCSI]" . + */ + virtual char * getAdditionalDeviceInfoString(void); + + /*! + * @function getVendorString + * @abstract + * Return Vendor Name string + * @result + * A pointer to a static character string, copied from the inquiry data. + */ + virtual char * getVendorString(void); + + /*! + * @function getProductString + * @abstract + * Return Product Name string for the device. + * @result + A pointer to a static character string, copied from the inquiry data. + */ + virtual char * getProductString(void); + + /*! + * @function getRevisionString + * @abstract + * Return Product Revision string for the device. + * @result + * A pointer to a static character string, copied from the inquiry data. + */ + virtual char * getRevisionString(void); + + /*! + * @function reportBlockSize + * @abstract + * Report the block size for the device, in bytes. + * @discussion + * This method returns the block size for the media. The default + * implementation obtains the block size from the SCSI Read Capacity + * command. Since the result of the Read Capacity is used by this + * method and reportMaxValidBlock, this method either returns a cached + * value or calls doReadCapacity to issue the command and cache both + * values. + * @param blockSize + * Pointer to returned block size value. + */ + virtual IOReturn reportBlockSize(UInt64 *blockSize); + + /*! + * @function reportEjectability + * @abstract + * Report if the media is ejectable under software control. + * @discussion + * This method reports whether the media is ejectable under software + * control. The default implementation always reports that removable + * media is ejectable. + * + * This method should only be called if the media is known to be removable. + * @param isEjectable + * Pointer to returned result. True indicates the media is ejectable, False indicates + * the media cannot be ejected under software control. + */ + virtual IOReturn reportEjectability(bool *isEjectable); + + /*! + * @function reportLockability + * @abstract + * Report if the media is lockable under software control. + * @discussion + * This method reports whether the media can be locked under software + * control, to prevent the user from removing the media manually, e.g. + * by pressing a button on the drive. This method is only called by + * the generic driver when the media is known to be removable. The + * default implementation always returns true. + * + * This method should only be called if the media is known to be removable. + * @param isLockable + * Pointer to returned result. True indicates the media can be locked in place; False + * indicates the media cannot be locked by software. + */ + virtual IOReturn reportLockability(bool *isLockable); + + /*! + * @function reportMaxReadTransfer + * @abstract + * Report the maximum allowed byte transfer for read operations. + * @discussion + * Some devices impose a maximum data transfer size. Because this limit + * may be determined by the size of a block-count field in a command, the limit may + * depend on the block size of the transfer. + * The default implementation reports blocksize * 65536, which is the maximum + * number of bytes that can be transferred + * in a SCSI command with a standard 16-bit block count field. + * @param blockSize + * The block size desired for the transfer. + * @param max + * Pointer to returned result. + */ + virtual IOReturn reportMaxReadTransfer (UInt64 blocksize,UInt64 *max); + + /*! + * @function reportMaxValidBlock + * @abstract + * Report the highest valid block for the device. + * @discussion + * This method reports the maximum allowable block number. The default + * implementation obtains the block number from the SCSI Read Capacity + * command. Since the result of the Read Capacity is used by this + * method and reportBlockSize, this method either returns a cached + * value or calls doReadCapacity to issue the command and cache both + * values. + * @param maxBlock + * Pointer to returned result + */ + virtual IOReturn reportMaxValidBlock(UInt64 *maxBlock); + + /*! + * @function reportMaxWriteTransfer + * @abstract + * Report the maximum allowed byte transfer for write operations. + * @discussion + * Some devices impose a maximum data transfer size. Because this limit + * may be determined by the size of a block-count field in a command, the limit may + * depend on the block size of the transfer. + * The default implementation reports blocksize * 65536, which is the maximum + * number of bytes that can be transferred + * in a SCSI command with a standard 16-bit block count field. + * @param blockSize + * The block size desired for the transfer. + * @param max + * Pointer to returned result. + */ + virtual IOReturn reportMaxWriteTransfer(UInt64 blocksize,UInt64 *max); + + /*! + * @function reportPollRequirements + * @abstract + * Report if it's necessary to poll for media insertion, and if polling is expensive. + * @discussion + * This method reports whether the device must be polled to detect media + * insertion, and whether a poll is expensive to perform. + * + * The term "expensive" typically implies a device that must be spun-up to detect media, + * as on a PC floppy. Most devices can detect media inexpensively. + * + * The default implementation of this method always reports an + * inexpensive poll (pollIsExpensive = false), and that all removable + * media must be polled. + * @param pollRequired + * Pointer to returned result. True indicates that polling is required; False indicates + * that polling is not required to detect media. + * @param pollIsExpensive + * Pointer to returned result. True indicates that the polling operation is expensive; + * False indicates that the polling operation is cheap. + */ + virtual IOReturn reportPollRequirements(bool *pollRequired,bool *pollIsExpensive); + + /*! + * @function reportRemovability + * @abstract + * Report whether the media is removable or not. + * @discussion + * This method reports whether the media is removable, but it does not + * provide detailed information regarding software eject or lock/unlock capability. + * + * The default implementation of this method examines the cached + * Inquiry data to determine if media is removable. If the RMB bit + * (0x80 of Inquiry data byte 1) is set, the media is removable. If + * there is no Inquiry data, the media is reported to be nonremovable. + * + * This method also sets the instance variable _removable. + * @param isRemovable + * Pointer to returned result. True indicates that the media is removable; False + * indicates the media is not removable. + */ + virtual IOReturn reportRemovability(bool *isRemovable); + + /*! + * @function reportWriteProtection + * @abstract + * Report whether the media is write-protected or not. + * @discussion + * The default implementation of this method issues a SCSI Mode Sense + * command to test the WP bit( 0x80 of byte 2 of the Mode Sense Header + * data). A request is made for Mode Sense Page 1, though any valid + * page will return a header. If the bit is set, the media is considered + * write-protected. + * @param isWriteProtected + * Pointer to returned result. True indicates that the media is write-protected (it + * cannot be written); False indicates that the media is not write-protected (it + * is permissible to write). + */ + virtual IOReturn reportWriteProtection(bool *isWriteProtected); + +protected: + + /*! + * @function createReadCdb + * @abstract + * Create a SCSI CDB for a read operation. + * @discussion + * Override this to control the cdb created for a read operation. + * The default implementation creates a 10-byte read command with + * disconnect allowed, 8-byte autosense, and a 2-second timeout. + * @param cdb + * A pointer to the CDB bytes. + * @param cdbLength + * The length of the CDB in bytes. + * @param block + * The device block to be read. + * @param nblks + * The number of blocks to be transferred. + * @param maxAutoSenseLength + * The maximum size of the autosense data, in bytes. A value of zero + * will disable autosense. + * @param timeoutSeconds + * The command timeout in seconds. + * @result + * The IOSCSICommandOptions returned will be used to issue the command. + */ + virtual UInt32 createReadCdb( + UInt8 *cdb, /* in */ + UInt32 *cdbLength, /* out */ + UInt32 block, /* in */ + UInt32 nblks, /* in */ + UInt32 *maxAutoSenseLength, /* out */ + UInt32 *timeoutSeconds); /* out */ + + /*! + * @function createWriteCdb + * @abstract + * Create a SCSI CDB for a write operation. + * @discussion + * Override this to control the cdb created for a write operation. + * The default implementation creates a 10-byte write command with + * disconnect allowed, 8-byte autosense, and a 2-second timeout. + * @param cdb + * A pointer to the CDB bytes. + * @param cdbLength + * The length of the CDB in bytes. + * @param block + * The device block to be written. + * @param nblks + * The number of blocks to be transferred. + * @param maxAutoSenseLength + * The maximum size of the autosense data, in bytes. A value of zero + * will disable autosense. + * @param timeoutSeconds + * The command timeout in seconds. + * @result + * The IOSCSICommandOptions returned will be used to issue the command. + */ + virtual UInt32 createWriteCdb( + UInt8 *cdb, /* in */ + UInt32 *cdbLength, /* out */ + UInt32 block, /* in */ + UInt32 nblks, /* in */ + UInt32 *maxAutoSenseLength, /* out */ + UInt32 *timeoutSeconds); /* out */ + + + /*! + * @function doInquiry + * @abstract + * Obtain SCSI Inquiry data from the device. + * @discussion + * This method issues a SCSI Inquiry command to the device, to obtain + * the result in the supplied buffer. The method first issues an + * inquiry with a 5-byte length, to obtain the full length of the + * devices inquiry data. The second Inquiry command is issued to get + * the full inquiry data (limited to maxLen, of course). + * @param inqBuf + * A pointer to the buffer. + * @param maxLen + * The maximum number of bytes the buffer can contain. + * @param actualLen + * A pointer to the returned byte count actually transferred. + */ + virtual IOReturn doInquiry(UInt8 *inqBuf,UInt32 maxLen,UInt32 *actualLen); + + /* ---------------- Internally used methods. ---------------- */ + + /* + * @group + * Internally Used Methods + * @discussion + * These methods are used internally, and will not generally be modified. + */ + + /*! + * @function allocateContext + * @abstract + * Allocate a context structure for use with the current IO operation. + */ + virtual struct context * allocateContext(void); + + /*! + * @function allocateInquiryBuffer + * @abstract + * Allocate an inquiry buffer. + * @param buf + * A pointer for the returned buffer pointer. + * @param size + * The requested size of the buffer, in bytes. + */ + virtual IOReturn allocateInquiryBuffer(UInt8 **buf,UInt32 size); + + /*! + * @function allocateTempBuffer + * @abstract + * Allocate a buffer for temporary use. + * @param buf + * A pointer for the returned buffer pointer. + * @param size + * The requested size of the buffer, in bytes. + */ + virtual IOReturn allocateTempBuffer(UInt8 **buf,UInt32 size); + + /*! + * @function allocateReadCapacityBuffer + * @abstract + * Allocate a buffer for Read-Capacity data. + * @param buf + * A pointer for the returned buffer pointer. + * @param size + * The requested size of the buffer, in bytes. + */ + virtual IOReturn allocateReadCapacityBuffer(UInt8 **buf,UInt8 size); + + /*! + * @function automaticRetry + * @abstract + * Return TRUE if we should automatically retry the command just completed. + * @discussion + * The default implementation of this method reacts to Unit Attention and + * Bus Reset conditions, possibly starting the recovery processes for those + * conditions and arranging that the subject command is retried after + * the recovery procedure finishes. + * @param cx + * A pointer to the context for the command just completed. + */ + virtual bool automaticRetry(struct context *cx); + + /*! + * @function beginBusResetRecovery + * @abstract + * Begin the Bus Reset recovery process. + * @discussion + * This method can be overridden to issue the first command necessary + * to perform the Bus Reset recovery process for the device. + * + * The default implementation does nothing and simply calls finishBusResetRecovery. + */ + virtual void beginBusResetRecovery(void); + + /*! + * @function beginUnitAttentionRecovery + * @abstract + * Begin the Unit Attention recovery process. + * @discussion + * This method can be overridden to issue the first command necessary + * to perform the Bus Reset recovery process for the device. + * + * The default implementation does nothing and simply calls finishUnitAttentionRecovery. + */ + virtual void beginUnitAttentionRecovery(void); + + /*! + * @function busResetRecoveryCommandComplete + * @abstract + * Handle a command completion during the Bus Reset recovery process. + * @discussion + * This method can be overridden to check the result of each command issued + * during the Bus Reset recovery process for the device. Typically it would + * bump the "step" value and issue the next command, calling finishBusResetRecovery + * when the process is complete. + * + * The default implementation does nothing. + */ + virtual void busResetRecoveryCommandComplete(struct context *cx); + + /*! + * @function customAutomaticRetry + * @abstract + * Return TRUE if we should automatically retry the command just completed. + * @discussion + * This method should be overridden to allow checking for, and causing, an + * automatic retry of a command. + * + * The default implementation of this method does nothing except return FALSE. + * @param cx + * A pointer to the context for the command just completed. + */ + virtual bool customAutomaticRetry(struct context *cx); + + /*! + * @function deleteContext + * @abstract + * Delete a context structure. + * @discussion + * This method also issues a "release" for the IO buffer and/or lock, if any. + * @param cx + * A pointer to the context structure to be deleted. + */ + virtual void deleteContext(struct context *cx); + + /*! + * @function deleteInquiryBuffer + * @abstract + * Delete an inquiry data buffer. + * @param buf + * A pointer to the buffer. + * @param size + * The requested size of the buffer, in bytes. + */ + virtual void deleteInquiryBuffer(UInt8 *buf,UInt32 size); + + /*! + * @function deleteTempBuffer + * @abstract + * Delete a temporary data buffer. + * @param buf + * A pointer to the buffer. + * @param len + * The requested size of the buffer, in bytes. + */ + virtual void deleteTempBuffer(UInt8 *buf,UInt32 len); + + /*! + * @function deleteReadCapacityBuffer + * @abstract + * Delete a Read-Capacity data buffer. + * @param buf + * A pointer to the buffer. + * @param len + * The requested size of the buffer, in bytes. + */ + virtual void deleteReadCapacityBuffer(UInt8 *buf,UInt32 len); + + /*! + * @function doReadCapacity + * @abstract + * @discussion + * The default implementation of this method issues a standard SCSI + * Read Capacity command. The block size and maximum valid block are + * extracted from the returned data in an endian-neutral way. + * @param blockSize + * A pointer to the returned block size value. + * @param maxBlock + * A pointer to the returned maximum block number. + */ + virtual IOReturn doReadCapacity(UInt64 *blockSize,UInt64 *maxBlock); + + /*! + * @function finishBusResetRecovery + * @abstract + * Finish up after the Bus Reset recovery process is complete. + * @discussion + * This method would usually not require an override. + */ + virtual void finishBusResetRecovery(void); + + /*! + * @function finishUnitAttentionRecovery + * @abstract + * Finish up after the Unit Attention recovery process is complete. + * @discussion + * This method would usually not require an override. + */ + virtual void finishUnitAttentionRecovery(void); + + /*! + * @function getBlockSize + * @abstract + * Return the device block size. + * @discussion + * This method obtains the block size from the Read-Capacity data. If RC data is + * not yet cached, a call is made to doReadCapacity to obtain the data. + */ + virtual UInt64 getBlockSize(void); + + + /*! + * @function dequeueCommands + * @abstract + * Dequeue commands previously enqueued awaiting the proper device power level. + * @discussion + * This method is called when a command is queued (from queueCommand), when a call + * completes (from RWCompletion), and when the device power level changes. All commands + * for which the device power level is proper are immediately dequeued. + * + * Queued synchronous commands are simply "awakened" by unlocking a lock. The originating + * thread then continues and issues the command. Asynchronous commands are immediately + * dispatched via a call to standardAsyncReadWriteExecute. + */ + virtual void dequeueCommands(void); + + /*! + * @function queueCommand + * @abstract + * Queue commands awaiting the proper device power level. + * @discussion + * This method is called prior to issuing any IO command, so that each command can + * be enqueued awaiting its desired device power level. After queuing the command, a + * call is made to dequeueCommands to attempt to dequeue any available command that can + * be executed (including the one just queued). Putting commands into the queue ensures + * that the proper sequence is maintained. + * @param cx + * The context for the command being queued. + * @param isSync + * True if the command is synchronous; False if the command is asynchronous. + * @param desiredPower + * The device power level needed before the command can execute. + */ + virtual void queueCommand(struct context *cx,bool isSync,UInt32 desiredPower); + + /*! + * @function RWCompletion + * @abstract + * Asynchronous read/write completion routine. + * @discussion + * A subclass must implement the read-write completion, called upon completion + * of an IO started by doAsyncReadWrite. + * @param cx + * A pointer to the context structure for the completing command. + */ + virtual void RWCompletion(struct context *cx) = 0; + + /*! + * @function setupBusResetRecovery + * @abstract + * Set up to begin Bus Reset recovery. + * @discussion + * This method would usually not require an override. + */ + virtual void setupBusResetRecovery(void); + + /*! + * @function setupUnitAttentionRecovery + * @abstract + * Set up to begin Unit Attention recovery. + * @discussion + * This method would usually not require an override. + */ + virtual void setupUnitAttentionRecovery(struct context *cx); + + /*! + * @function simpleAsynchIO + * @abstract + * Issue a simple asynchronous SCSI command. + * @discussion + * This method issues a single SCSI command. + * The SCSI command must already be set up in the context structure. + * @param cx + * A pointer to the context structure for the command. + */ + virtual IOReturn simpleAsynchIO(struct context *cx); + + /*! + * @function simpleSynchIO + * @abstract + * Issue a simple synchronous SCSI command. + * @discussion + * This method issues a single SCSI command and waits for the command + * to complete. The SCSI command must already be set up in the context + * structure. + * @param cx + * A pointer to the context structure for the command. + */ + virtual IOReturn simpleSynchIO(struct context *cx); + + /*! + * @function standardAsyncReadWrite + * @abstract + * Start an asynchronous read or write operation. + * @discussion + * This method starts an asynchronous read or write operation. No + * incoming parameters are validated. The default implementation + * calls createReadCdb or createWriteCdb, + * then issues a SCSI command to IOSCSIDevice. If the command is + * accepted, then the completion will be called at some future time. + * @result + * The only possible returns from this method are: + * + * kIOReturnSuccess, meaning that the IO was accepted by the transport + * drivers provider (e.g. IOSCSIDevice), and that the completion + * function will be called when the IO completes, i.e. target->action(param). + * + * kIOReturnNoMemory, meaning that memory allocation failed. + * + * Other kIOReturn codes from the provider which occurred + * because the IO was not accepted in that provider's queue. This + * might indicate a full queue or bad parameter. + * @param buffer + * An IOMemoryDescriptor describing the data-transfer buffer. The data direction + * is contained in the IOMemoryDescriptor. Responsiblity for releasing the descriptor + * rests with the caller. + * @param block + * The starting block number of the data transfer. + * @param nblks + * The integral number of blocks to be transferred. + * @param action + * The C function called upon completion of the data transfer. + * @param target + * The C++ class "this" pointer, passed as an argument to "action." + * @param param + * This value is passed as an argument to "action." It is not validated or modified. + */ + virtual IOReturn standardAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion); + + /*! + * @function standardAsyncReadWriteExecute + * @abstract + * Issue an asynchronous read/write operation after dequeuing. + * @param cx + * A pointer to the context structure for the command. + */ + virtual IOReturn standardAsyncReadWriteExecute(struct context *cx); + + /*! + * @function standardSyncReadWrite + * Perform a synchronous read or write operation. + * @param buffer + * An IOMemoryDescriptor describing the data-transfer buffer. The data direction + * is contained in the IOMemoryDescriptor. Responsiblity for releasing the descriptor + * rests with the caller. + * @param block + * The starting block number of the data transfer. + * @param nblks + * The integral number of blocks to be transferred. + */ + virtual IOReturn standardSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks); + + /*! + * @function stringFromState + * @abstract + * Return a string description of a state value. + * @discussion + * Used for debugging. + * @param state + * The state to be converted to a string description. + */ + virtual char * stringFromState(stateValue state); + + /*! + * @function unitAttentionRecoveryCommandComplete + * @abstract + * Handle a command completion during the Unit Attention recovery process. + * @discussion + * This method can be overridden to check the result of each command issued + * during the Unit Attention recovery process for the device. Typically it would + * bump the "step" value and issue the next command, calling finishUnitAttentionRecovery + * when the process is complete. + * + * The default implementation does nothing. + */ + virtual void unitAttentionRecoveryCommandComplete(struct context *cx); + + /*! + * @function unitAttentionDetected + * @abstract + * Determine if a Unit Attention condition occurred. + * @param cx + * A pointer to the context structure for the command just executed. + */ + virtual bool unitAttentionDetected(struct context *cx); + +public: + + /*! + * @function genericCompletion + * @abstract + * Generic IO completion function. + * @discussion + * This method handles completion of a SCSI command. It implements a + * simple state machine to handle a Unit Attention condition on a + * command. + * + * This method must be public so we can reach it from + * the C-language callback "glue" routine. It should not be called + * from outside this class. + * + * + * + * If a Unit Attention condition occurs, we set the state to + * kHandlingUnitAttention and call handleUnitAttention to do whatever + * is necessary to clear the condition. Eventually, handleUnitAttention + * resets the state to kDoneHandlingUnitAttention, which will allow + * the state machine to reissue the original command. + * + * If we are already processing a Unit Attention, then genericCompletion + * increments a step counter and calls handleUnitAttention. The step + * counter allows handleUnitAttention to issue multiple SCSI commands + * to clear the condition. The handleUnitAttention method is called + * repeatedly, until the state is set to kDoneHandlingUnitAttention. + * + * If this operation is a normal asynchronous read or write (usually + * started by standardAsyncReadWrite, though this is not required), + * then a call is made to RWCompletion, followed by deletion of the + * context structure for the command. RWCompletion is implemented by + * the subclass of IOBasicSCSI, for example in IOSCSIHDDrive. + * @param cx + * A pointer to the context structure for the command. + */ + virtual void genericCompletion(struct context *cx); + + /* + * @endgroup + */ + +protected: + + /* + * @group + * Power Management Methods + * @discussion + * A subclass must implement these to report the power level required to do various commands. + */ + + /*! + * @function getExecuteCDBPowerState + * @abstract + * Return the required device power level to execute a client CDB. + */ + virtual UInt32 getExecuteCDBPowerState(void) = 0; + + /*! + * @function getInquiryPowerState + * @abstract + * Return the required device power level to issue an Inquiry command. + */ + virtual UInt32 getInquiryPowerState(void) = 0; + + /*! + * @function getReadCapacityPowerState + * @abstract + * Return the required device power level to issue a Read Capacity command. + */ + virtual UInt32 getReadCapacityPowerState(void) = 0; + + /*! + * @function getReadWritePowerState + * @abstract + * Return the required device power level to issue a data read or write. + */ + virtual UInt32 getReadWritePowerState(void) = 0; + + /*! + * @function getReportWriteProtectionPowerState + * @abstract + * Return the required device power level to determine media write protection. + */ + virtual UInt32 getReportWriteProtectionPowerState(void) = 0; + + /*! + * @function powerTickle + * @abstract + * Check for the device power state currently being in the desired state. + * @discussion + * A subclass must implement powerTickle, which is called when we desire power to + * execute a command. PowerTickle may handle generic or a subclass-expanded set of + * power states. The implementation will usually relay the call to the Power Management + * subsystem function activityTickle. For a device without power management capability, + * the implementation should always return True. + * @param desiredState + * The desired device power level. + * @result + * True if power is in the desired state (or better); False if the caller must wait + * until power is available. + */ + virtual bool powerTickle(UInt32 desiredState) = 0; + + /* + * @endgroup + */ + + /*! + * @var _provider + * A pointer to our provider. + */ + IOSCSIDevice * _provider; + + /*! + * @var _busResetContext + * A pointer to a context struct to be used by recoverAfterBusReset. + */ + struct context * _busResetContext; + + /*! + * @var _unitAttentionContext + * A pointer to a context struct to be used by handleUnitAttention. + */ + struct context * _unitAttentionContext; + + /*! + * @var _busResetRecoveryInProgress + * True if recovery from Bus Reset is in progress. + */ + bool _busResetRecoveryInProgress; + + /*! + * @var _unitAttentionRecoveryInProgress + * True if recovery from Unit Attention is in progress. + */ + bool _unitAttentionRecoveryInProgress; + + /* Device information : */ + + /*! + * @var _inqBuf + * A pointer to the allocate Inquiry Data buffer. + */ + UInt8 * _inqBuf; /* the Inquiry data buffer */ + + /*! + * @var _inqBufSize + * The size of the inquiry data buffer, in bytes. + */ + UInt32 _inqBufSize; /* size of the buffer */ + + /*! + * @var _inqLen + * The number of valid bytes of inquiry data. + */ + UInt32 _inqLen; /* valid bytes in buffer */ + + /*! + * @var _vendor + * The Vendor Name string from the inquiry data, null-terminated. + */ + char _vendor[9]; /* info from Inquiry data */ + + /*! + * @var _product + * The Product Name string from the inquiry data, null-terminated. + */ + char _product[17]; + + /*! + * @var _rev + * The Product Revision string from the inquiry data, null-terminated. + */ + char _rev[5]; + + /* Since we get both of these items from the same command, we + * just cache both values if we get either call, so we only + * have to issue the command once. + */ + + /*! + * @var _readCapDone + * True if we have issued a Read-Capacity command to obtain the + * values for _maxBlock and _blockSize. + */ + bool _readCapDone; + + /*! + * @var _removable + * True if the media is removable; False if the media is fixed. + */ + bool _removable; + + /*! + * @var _maxBlock + * The highest valid block on the media, relative to zero. + */ + UInt64 _maxBlock; + + /*! + * @var _blockSize + * The block size of the media in bytes. + */ + UInt64 _blockSize; + + /* The queue of pending requests awaiting power: */ + + /*! + * @struct queue + * @discussion + * A data structure for a queue. + * @field head + * A pointer to the head item. + * @field tail + * A pointer to the tail item. + * @field lock + * A lock used to protect the queue during changes. + */ + /*! + * @var _powerQueue + * A queue structure containing operations queued awaiting power level. + */ + struct queue { + struct context * head; + struct context * tail; + IOLock * lock; + } _powerQueue; + +}; +#endif diff --git a/iokit/IOKit/storage/scsi/IOSCSICDDrive.h b/iokit/IOKit/storage/scsi/IOSCSICDDrive.h new file mode 100644 index 000000000..8acd8c08f --- /dev/null +++ b/iokit/IOKit/storage/scsi/IOSCSICDDrive.h @@ -0,0 +1,410 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOSCSICDDrive.h + * + * This class implements SCSI CDROM functionality. + * + * Subclasses may modify the operations to handle device-specific variations. + */ + +#ifndef _IOSCSICDDRIVE_H +#define _IOSCSICDDRIVE_H + +#include +#include +#include +#include + +/* SCSI (inquiry) device type. */ + +enum { + kIOSCSIDeviceTypeCDROM = 0x05 +}; + +/* SCSI commands. */ + +enum { + kIOSCSICommandReadSubChannel = 0x42, + kIOSCSICommandReadTOC = 0x43, + kIOSCSICommandPlayAudioMSF = 0x47, + kIOSCSICommandPauseResume = 0x4b, + kIOSCSICommandStopPlay = 0x4e, + kIOSCSICommandScan = 0xba, + kIOSCSICommandReadCD = 0xbe +}; + +struct IOAudioPlayMSFcdb { + UInt8 opcode; + UInt8 lunbits; + UInt8 reserved1; + UInt8 start_m; + UInt8 start_s; + UInt8 start_f; + UInt8 end_m; + UInt8 end_s; + UInt8 end_f; + UInt8 ctlbyte; +}; + +struct IOReadToccdb { + UInt8 opcode; + UInt8 lunbits; +static const UInt8 kMSF = 0x02; /* set to get mm:ss:ff format, else logical addr */ + UInt8 reserved1; + UInt8 reserved2; + UInt8 reserved3; + UInt8 reserved4; + UInt8 start_trk_session; /* starting track/session number */ + UInt8 len_hi; + UInt8 len_lo; + UInt8 ctlbyte; /* and format code */ +static const UInt8 kTOC = 0x00; +static const UInt8 kSessionInfo = 0x01; +static const UInt8 kFullTOC = 0x02; +}; + +struct IORSCcdb { + UInt8 opcode; + UInt8 lunbits; +static const UInt8 kMSF = 0x02; /* set to get mm:ss:ff format, else logical addr */ + UInt8 subq; +static const UInt8 kSubq = 0x40; /* set to get subq data */ + UInt8 dataformat; +static const UInt8 kCurrentPosition = 1; +static const UInt8 kMCN = 2; +static const UInt8 kISRC = 3; + UInt8 reserved1; + UInt8 reserved2; + UInt8 track; + UInt8 len_hi; + UInt8 len_lo; + UInt8 ctlbyte; +}; + +/*! + * @class IOSCSICDDrive : public IOSCSIHDDrive + * @abstract + * Driver for SCSI CD-ROM drives. + * @discussion + * IOSCSICDDrive is a subclass of IOSCSIHDDrive. It adds appropriate CD-ROM + * APIs (e.g. audio), and overrides some methods of IOSCSIHDDrive in order + * to alter their behavior for CD-ROM devices. + */ +/*------------------------------------------------*/ +class IOSCSICDDrive : public IOSCSIHDDrive { + + OSDeclareDefaultStructors(IOSCSICDDrive) + +public: + + /* Overrides from IOService: */ + + virtual bool init(OSDictionary * properties); + + /* Overrides from IOBasicSCSI: */ + + /*! + * @function deviceTypeMatches + * @abstract + * Determine if the device type matches that which we expect. + * @discussion + * This override allows us to check for the SCSI CD-ROM + * device type instead of hard disk. + * See IOBasicSCSI for details. + */ + virtual bool deviceTypeMatches(UInt8 inqBuf[],UInt32 inqLen,SInt32 *score); + + /* End of IOBasicSCSI overrides */ + + /* IOSCSIHDDrive overrides: */ + + /*! + * @function doAsyncReadWrite + * @abstract + * Start an asynchronous read or write operation. + * @discussion + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion); + + /*! + * @function doSyncReadWrite + * @abstract + * Perform a synchronous read or write operation. + * @discussion + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks); + + /*! + * @function doFormatMedia + * @abstract + * Attempt to format the media. + * @discussion + * This override allows us to reject formatting attempts for CD-ROM. + */ + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + + /*! + * @function doGetFormatCapacities + * @abstract + * Report available formatting capacities for the device/media. + * @discussion + * This override allows us to reject formatting attempts for CD-ROM. + */ + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + + /*! + * @function doSynchronizeCache + * @abstract + * Issue a synchronize-cache command when finished with a drive. + * @discussion + * This override allows us to reject the operation, since we never write to CD-ROM. + */ + virtual IOReturn doSynchronizeCache(void); + + /*! + * @function getDeviceTypeName + * @abstract + * Return a character string for the device type. + * @discussion + * This override returns kIOBlockStorageDeviceTypeCDROM. + */ + virtual const char * getDeviceTypeName(void); + /*! + * @function instantiateNub + * @abstract + * Create the device nub. + * @discussion + * This override instantiates an IOSCSICDDriveNub instead of an IOSCSIHDDriveNub. + */ + virtual IOService * instantiateNub(void); + + /* We want to track media changes to do cleanup. */ + /*! + * @function reportMediaState + * @abstract + * Report the device's media state. + * @discussion + * This override allows us to reset device settings when media changes. + */ + virtual IOReturn reportMediaState(bool *mediaPresent,bool *changed); + + /* end of IOSCSIHDDrive overrides */ + + /*-----------------------------------------*/ + /* CD APIs */ + /*-----------------------------------------*/ + + /*! + * @abstract + * Start an asynchronous read CD operation. + * @param buffer + * An IOMemoryDescriptor describing the data-transfer buffer. Responsiblity for releasing the descriptor + * rests with the caller. + * @param timeStart + * The starting M:S:F address of the data transfer. + * @param timeStop + * The ending M:S:F address of the data transfer. + * @param sectorArea + * Sector area(s) to read. + * @param sectorType + * Sector type that is expected. The data transfer is terminated as soon as + * data is encountered that does not match the expected type. + * @param action + * The C function called upon completion of the data transfer. + * @param target + * The C++ class "this" pointer, passed as an argument to "action." + * @param param + * This value is passed as an argument to "action." It is not validated or modified. + */ + + virtual IOReturn doAsyncReadCD(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + + /*! + * @function readISRC + * @abstract + * Read the International Standard Recording Code for the specified track. + * @param track + * The track number from which to read the ISRC. + * @param isrc + * The buffer for the ISRC data. Buffer contents will be zero-terminated. + */ + virtual IOReturn readISRC(UInt8 track,CDISRC isrc); + + /*! + * @function readMCN + * @abstract + * Read the Media Catalog Number (also known as the Universal Product Code). + * @param mcn + * The buffer for the MCN data. Buffer contents will be zero-terminated. + */ + virtual IOReturn readMCN(CDMCN mcn); + + /*! + * @function readTOC + * @abstract + * Read the full Table Of Contents. + * @param buffer + * The buffer for the returned data. + */ + virtual IOReturn readTOC(IOMemoryDescriptor * buffer); + + /*! + * @function reportMaxWriteTransfer + * @abstract + * Report the maximum allowed byte transfer for write operations. + * @discussion + * This override lets us return zero for the max write transfer, since + * we never write to CD-ROM media. See IOBasicSCSI for other details. + */ + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max); + + /*! + * @function reportWriteProtection + * @abstract + * Report whether the media is write-protected or not. + * @discussion + * This override lets us return TRUE in all cases. See IOBasicSCSI for details. + */ + virtual IOReturn reportWriteProtection(bool *isWriteProtected); + + /*-----------------------------------------*/ + /* APIs exported by IOCDAudioControl */ + /*-----------------------------------------*/ + + /*! + * @function audioPause + * @abstract + * Pause or resume the audio playback. + * @param pause + * True to pause playback; False to resume. + */ + virtual IOReturn audioPause(bool pause); + /*! + * @function audioPlay + * @abstract + * Play audio. + * @param timeStart + * The M:S:F address from which to begin. + * @param timeStop + * The M:S:F address at which to stop. + */ + virtual IOReturn audioPlay(CDMSF timeStart,CDMSF timeStop); + /*! + * @function audioScan + * @abstract + * Perform a fast-forward or fast-backward operation. + * @param timeStart + * The M:S:F address from which to begin. + * @param reverse + * True to go backward; False to go forward. + */ + virtual IOReturn audioScan(CDMSF timeStart,bool reverse); + /*! + * @function audioStop + * @abstract + * Stop the audio playback (or audio scan). + */ + virtual IOReturn audioStop(); + /*! + * @function getAudioStatus + * @abstract + * Get the current audio play status information. + * @param status + * The buffer for the returned information. + */ + virtual IOReturn getAudioStatus(CDAudioStatus *status); + /*! + * @function getAudioVolume + * @abstract + * Get the current audio volume. + * @param leftVolume + * A pointer to the returned left-channel volume. + * @param rightVolume + * A pointer to the returned right-channel volume. + */ + virtual IOReturn getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume); + /*! + * @function setAudioVolume + * @abstract + * Set the current audio volume. + * @param leftVolume + * The desired left-channel volume. + * @param rightVolume + * The desired right-channel volume. + */ + virtual IOReturn setAudioVolume(UInt8 leftVolume,UInt8 rightVolume); + +protected: + + /* Internally used methods: */ + + /*! + * @function doAudioPlayCommand + * @abstract + * Issue an audio play command to the device. + * @param timeStart + * The M:S:F address from which to begin. + * @param timeStop + * The M:S:F address at which to stop. + */ + virtual IOReturn doAudioPlayCommand(CDMSF timeStart,CDMSF timeStop); + + /*! + * @function mediaArrived + * @abstract + * React to new media arrival. + */ + virtual void mediaArrived(void); + + /*! + * @function mediaGone + * @abstract + * React to media going away. + */ + virtual void mediaGone(void); + + /*! + * @function readSubChannel + * @abstract + * Issue the command necessary to read subchannel data. + * @param buffer + * The buffer for the data. + * @param length + * The maximum data length desired. + * @param dataFormat + * The subchannel data desired. + * @param track + * The desired track from which to read the data + */ + virtual IOReturn readSubChannel(UInt8 *buffer,UInt32 length,UInt8 dataFormat,UInt8 trackNumber); +}; +#endif diff --git a/iokit/IOKit/storage/scsi/IOSCSICDDriveNub.h b/iokit/IOKit/storage/scsi/IOSCSICDDriveNub.h new file mode 100644 index 000000000..d8c2d4b23 --- /dev/null +++ b/iokit/IOKit/storage/scsi/IOSCSICDDriveNub.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOSCSICDDriveNub.h created by rick on Wed 07-Apr-1999 */ + +/* This subclass implements a relay to a protocol- and device-specific provider. */ + +#ifndef _IOSCSICDDRIVENUB_H +#define _IOSCSICDDRIVENUB_H + +#include +#include + +class IOSCSICDDrive; + +class IOSCSICDDriveNub : public IOCDBlockStorageDevice { + + OSDeclareDefaultStructors(IOSCSICDDriveNub) + +public: + + /* Overrides from IOService */ + + virtual bool attach(IOService * provider); + + /* Overrides from IOBlockStorageDevice: */ + + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion); + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks); + + /* --------------------------------------------------------------------------*/ + /* APIs used by the IOBlockStorageDevice portion of IOCDBlockStorageDevice: */ + /* --------------------------------------------------------------------------*/ + + virtual IOReturn doEjectMedia(void); + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + virtual IOReturn doLockUnlockMedia(bool doLock); + virtual IOReturn doSynchronizeCache(void); + virtual char * getVendorString(void); + virtual char * getProductString(void); + virtual char * getRevisionString(void); + virtual char * getAdditionalDeviceInfoString(void); + virtual IOReturn reportBlockSize(UInt64 *blockSize); + virtual IOReturn reportEjectability(bool *isEjectable); + virtual IOReturn reportLockability(bool *isLockable); + virtual IOReturn reportPollRequirements(bool *pollIsRequired,bool *pollIsExpensive); + virtual IOReturn reportMaxReadTransfer(UInt64 blockSize,UInt64 *max); + virtual IOReturn reportMaxValidBlock(UInt64 *maxBlock); + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max); + virtual IOReturn reportMediaState(bool *mediaPresent,bool *changed); + virtual IOReturn reportRemovability(bool *isRemovable); + virtual IOReturn reportWriteProtection(bool *isWriteProtected); + + /*-----------------------------------------*/ + /* CD APIs */ + /*-----------------------------------------*/ + + virtual IOReturn doAsyncReadCD(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + virtual UInt32 getMediaType(void); + virtual IOReturn readISRC(UInt8 track,CDISRC isrc); + virtual IOReturn readMCN(CDMCN mcn); + virtual IOReturn readTOC(IOMemoryDescriptor * buffer); + + /*-----------------------------------------*/ + /* APIs exported by IOCDAudioControl */ + /*-----------------------------------------*/ + + virtual IOReturn audioPause(bool pause); + virtual IOReturn audioPlay(CDMSF timeStart,CDMSF timeStop); + virtual IOReturn audioScan(CDMSF timeStart,bool reverse); + virtual IOReturn audioStop(); + virtual IOReturn getAudioStatus(CDAudioStatus *status); + virtual IOReturn getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume); + virtual IOReturn setAudioVolume(UInt8 leftVolume,UInt8 rightVolume); + +protected: + + IOSCSICDDrive * _provider; +}; +#endif diff --git a/iokit/IOKit/storage/scsi/IOSCSIDVDDrive.h b/iokit/IOKit/storage/scsi/IOSCSIDVDDrive.h new file mode 100644 index 000000000..01d3f229e --- /dev/null +++ b/iokit/IOKit/storage/scsi/IOSCSIDVDDrive.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOSCSIDVDDrive.h + * + * This class implements SCSI DVD functionality. + * + * Subclasses may modify the operations to handle device-specific variations. + */ + +#ifndef _IOSCSIDVDDRIVE_H +#define _IOSCSIDVDDRIVE_H + +#include +#include +#include +#include + +struct IOGCCdb { + UInt8 opcode; + UInt8 lunRT; + UInt8 startFeature_hi; + UInt8 startFeature_lo; + UInt8 reserved[3]; + UInt8 len_hi; + UInt8 len_lo; + UInt8 ctlbyte; +}; + +struct IORKCdb { + UInt8 opcode; + UInt8 lba_0; //msb + UInt8 lba_1; + UInt8 lba_2; + UInt8 lba_3; + UInt8 reserved; + UInt8 keyClass; + UInt8 len_hi; + UInt8 len_lo; + UInt8 agidKeyFormat; + UInt8 ctlbyte; +}; + +struct IOSKCdb { + UInt8 opcode; + UInt8 lun; + UInt8 reserved[5]; + UInt8 keyClass; + UInt8 len_hi; + UInt8 len_lo; + UInt8 agidKeyFormat; + UInt8 ctlbyte; +}; + +enum { + kIOSCSICommandGetConfiguration = 0x46, + kIOSCSICommandSendKey = 0xa3, + kIOSCSICommandReportKey = 0xa4 +}; + +const int kMaxConfigLength = 1024; +class IOMemoryDescriptor; + +/*------------------------------------------------*/ +class IOSCSIDVDDrive : public IOSCSICDDrive { + + OSDeclareDefaultStructors(IOSCSIDVDDrive) + +public: + + /* Overrides from IOService: */ + + virtual bool init(OSDictionary * properties); + + /* Overrides from IOBasicSCSI: */ + + virtual bool deviceTypeMatches(UInt8 inqBuf[],UInt32 inqLen,SInt32 *score); + + /*! + * @function reportWriteProtection + * @abstract + * Report whether the media is write-protected or not. + * @discussion + * This override allows us to return the cached write-protection status + * without interrogating the drive. + */ + virtual IOReturn reportWriteProtection(bool *isWriteProtected); + + /* End of IOBasicSCSI overrides */ + + /* IOSCSIHDDrive overrides: */ + + /*! + * @function doAsyncReadWrite + * @abstract + * Start an asynchronous read or write operation. + * @discussion + * This override allows us to accept writes, which our superclass, IOSCSICDDrive, + * unconditionally rejects. + */ + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion); + + /*! + * @function doSyncReadWrite + * @abstract + * Perform a synchronous read or write operation. + * @discussion + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks); + + /*! + * @function doFormatMedia + * @abstract + * Attempt to format the media. + * @discussion + * This override allows us to handle formatting for DVD-RAM. + */ + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + + /*! + * @function doGetFormatCapacities + * @abstract + * Report available formatting capacities for the device/media. + * @discussion + * This override allows us to handle formatting for DVD. + */ + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + + /*! + * @function doSynchronizeCache + * @abstract + * Force data blocks in the drive's buffer to be flushed to the media. + * @discussion + * This override allows us to issue a standard Synchronize-Cache command for DVD-RAM. + */ + virtual IOReturn doSynchronizeCache(void); + + virtual const char * getDeviceTypeName(void); + + /*! + * @function getGetConfigurationPowerState + * @abstract + * Return the required device power level to execute a Get Configuration command. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getGetConfigurationPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getReportKeyPowerState + * @abstract + * Return the required device power level to execute a Report Key command. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getReportKeyPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getSendKeyPowerState + * @abstract + * Return the required device power level to execute a Send Key command. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getSendKeyPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function instantiateNub + * @abstract + * Instantiate the desired nub. + * @discussion + * This override allows us to instantiate an IOSCSIDVDDriveNub. + */ + virtual IOService * instantiateNub(void); + + /*! + * @function reportMediaState + * @abstract + * Report the device's media state. + * @discussion + * This override allows us to determine the media type after something is inserted. + */ + virtual IOReturn reportMediaState(bool *mediaPresent,bool *changed); + + /* end of IOSCSIHDDrive overrides */ + + /* DVD APIs: */ + + virtual UInt32 getMediaType(void); + virtual IOReturn reportKey(IOMemoryDescriptor *buffer,const DVDKeyClass DVDKeyClass, + const UInt32 lba,const UInt8 agid,const DVDKeyFormat keyFormat); + virtual IOReturn sendKey(IOMemoryDescriptor *buffer,const DVDKeyClass DVDKeyClass, + const UInt8 agid,const DVDKeyFormat keyFormat); + +protected: + + virtual void checkConfig(UInt8 *buf,UInt32 actual); + virtual IOReturn determineMediaType(void); + virtual IOReturn getConfiguration(UInt8 *buffer,UInt32 length,UInt32 *actualLength,bool current); + + UInt8 _configBuf[kMaxConfigLength]; + + UInt32 _configSize; + bool _isDVDDrive; + bool _canDoCSS; + UInt32 _mediaType; + bool _isWriteProtected; + +}; +#endif diff --git a/iokit/IOKit/storage/scsi/IOSCSIDVDDriveNub.h b/iokit/IOKit/storage/scsi/IOSCSIDVDDriveNub.h new file mode 100644 index 000000000..c806dd049 --- /dev/null +++ b/iokit/IOKit/storage/scsi/IOSCSIDVDDriveNub.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* This subclass implements a relay to a protocol- and device-specific provider. */ + +#ifndef _IOSCSIDVDDRIVENUB_H +#define _IOSCSIDVDDRIVENUB_H + +#include +#include + +class IOSCSIDVDDrive; + +class IOSCSIDVDDriveNub : public IODVDBlockStorageDevice { + + OSDeclareDefaultStructors(IOSCSIDVDDriveNub) + +public: + + /* Overrides from IOService */ + + virtual bool attach(IOService * provider); + + /* Overrides from IOBlockStorageDevice: */ + + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion); + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks); + + /* --------------------------------------------------------------------------*/ + /* APIs used by the IOBlockStorageDevice portion of IODVDBlockStorageDevice: */ + /* --------------------------------------------------------------------------*/ + + virtual IOReturn doEjectMedia(void); + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + virtual IOReturn doLockUnlockMedia(bool doLock); + virtual IOReturn doSynchronizeCache(void); + virtual char * getVendorString(void); + virtual char * getProductString(void); + virtual char * getRevisionString(void); + virtual char * getAdditionalDeviceInfoString(void); + virtual IOReturn reportBlockSize(UInt64 *blockSize); + virtual IOReturn reportEjectability(bool *isEjectable); + virtual IOReturn reportLockability(bool *isLockable); + virtual IOReturn reportPollRequirements(bool *pollIsRequired,bool *pollIsExpensive); + virtual IOReturn reportMaxReadTransfer(UInt64 blockSize,UInt64 *max); + virtual IOReturn reportMaxValidBlock(UInt64 *maxBlock); + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max); + virtual IOReturn reportMediaState(bool *mediaPresent,bool *changed); + virtual IOReturn reportRemovability(bool *isRemovable); + virtual IOReturn reportWriteProtection(bool *isWriteProtected); + + /*-----------------------------------------*/ + /* CD APIs */ + /*-----------------------------------------*/ + + virtual IOReturn doAsyncReadCD(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + CDSectorArea sectorArea, + CDSectorType sectorType, + IOStorageCompletion completion); + virtual UInt32 getMediaType(void); + virtual IOReturn readISRC(UInt8 track,CDISRC isrc); + virtual IOReturn readMCN(CDMCN mcn); + virtual IOReturn readTOC(IOMemoryDescriptor * buffer); + + /*-----------------------------------------*/ + /* APIs exported by IOCDAudioControl */ + /*-----------------------------------------*/ + + virtual IOReturn audioPause(bool pause); + virtual IOReturn audioPlay(CDMSF timeStart,CDMSF timeStop); + virtual IOReturn audioScan(CDMSF timeStart,bool reverse); + virtual IOReturn audioStop(); + virtual IOReturn getAudioStatus(CDAudioStatus *status); + virtual IOReturn getAudioVolume(UInt8 *leftVolume,UInt8 *rightVolume); + virtual IOReturn setAudioVolume(UInt8 leftVolume,UInt8 rightVolume); + + /* DVD APIs beyond standard CD APIs: */ + + virtual IOReturn reportKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt32 lba,const UInt8 agid,const DVDKeyFormat keyFormat); + virtual IOReturn sendKey(IOMemoryDescriptor *buffer,const DVDKeyClass keyClass, + const UInt8 agid,const DVDKeyFormat keyFormat); +protected: + + IOSCSIDVDDrive * _provider; + +}; +#endif diff --git a/iokit/IOKit/storage/scsi/IOSCSIHDDrive.h b/iokit/IOKit/storage/scsi/IOSCSIHDDrive.h new file mode 100644 index 000000000..d60b2cb05 --- /dev/null +++ b/iokit/IOKit/storage/scsi/IOSCSIHDDrive.h @@ -0,0 +1,784 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * IOSCSIHDDrive.h + * + * This class implements SCSI hard disk functionality. + * + * Subclasses may modify the operations to handle device-specific variations. + */ + +#ifndef _IOSCSIHDDRIVE_H +#define _IOSCSIHDDRIVE_H + +#include +#include +#include + +/* SCSI (inquiry) device type. */ + +enum { + kIOSCSIDeviceTypeDirectAccess = 0x00 +}; + +/* SCSI commands. */ + +enum { + kIOSCSICommandTestUnitReady = 0x00, + kIOSCSICommandFormatUnit = 0x04, + kIOSCSICommandStartStopUnit = 0x1b, + kIOSCSICommandPreventAllow = 0x1e, + kIOSCSICommandSynchronizeCache = 0x35, + kIOSCSICommandModeSelect = 0x55, + kIOSCSICommandModeSense = 0x5a, + kIOSCSICommandRead = 0xa8, + kIOSCSICommandWrite = 0xaa +}; + +struct IOFormatcdb { + UInt8 opcode; /* 0x12 */ + UInt8 lunbits; /* lun and control bits */ + UInt8 vendor; + UInt8 interleave_msb; + UInt8 interleave_lsb; + UInt8 ctlbyte; +}; + +struct IOPrevAllowcdb { + UInt8 opcode; + UInt8 lunbits; + UInt8 reserved1; + UInt8 reserved2; + UInt8 prevent; + UInt8 ctlbyte; +}; + +struct IOStartStopcdb { + UInt8 opcode; + UInt8 lunImmed; + UInt8 reserved1; + UInt8 reserved2; + + /* Control bits: */ + /* Power Conditions */ +static const UInt8 P_NOCHANGE = 0x00; /* 0 - no change */ +static const UInt8 P_ACTIVE = 0x10; /* 1 - change to Active */ +static const UInt8 P_IDLE = 0x20; /* 2 - change to Idle */ +static const UInt8 P_STANDBY = 0x30; /* 3 - change to Standby */ +static const UInt8 P_RESERVED4 = 0x40; /* 4 - reserved */ +static const UInt8 P_SLEEP = 0x50; /* 5 - change to Sleep */ +static const UInt8 P_RESERVED6 = 0x60; /* 6 - reserved */ +static const UInt8 P_LUNCONTROL = 0x70; /* 7 - give pwr ctl to LUN */ +static const UInt8 P_RESERVED8 = 0x80; /* 8 - reserved */ +static const UInt8 P_RESERVED9 = 0x90; /* 9 - reserved */ +static const UInt8 P_TIDLEZERO = 0xa0; /* a - force Idle Cond Timer = 0 */ +static const UInt8 P_TSTDBYZERO = 0xb0; /* b - force Stby Cond Timer = 0 */ + +static const UInt8 C_LOEJ = 0x02; /* load on start/eject on stop */ +static const UInt8 C_SPINUP = 0x01; +static const UInt8 C_SPINDOWN = 0x00; + + UInt8 controls; + UInt8 ctlbyte; +}; + +struct IOSyncCachecdb { + UInt8 opcode; + UInt8 lunbits; + UInt8 lba_3; /* msb */ + UInt8 lba_2; + UInt8 lba_1; + UInt8 lba_0; /* lsb */ + UInt8 reserved; + UInt8 nblks_msb; + UInt8 nblks_lsb; + UInt8 ctlbyte; +}; + +/*! + * @enum Power States + * @discussion + * We define and understand three basic, generic power states. A subclass may change + * the power management logic, but all power-management routines should be examined + * if anything is changed. The only routines that deal directly with these values + * are directly related to power management. All other functions merely ask for and + * pass along power state values. + * @constant kAllOff + * The power state for an all-off condition. + * @constant kElectronicsOn + * The power state for the electronics on, but the media off. + * @constant kAllOn + * The power state for the electronics and media on. + * @constant kNumberOfPowerStates + * The maximum enum value. + */ +enum { /* electronics mechanical */ + kAllOff = 0, /* OFF OFF */ + kElectronicsOn = 1, /* ON OFF */ + kAllOn = 2, /* ON ON */ + + kNumberOfPowerStates = 3 +}; + +/*! + * @class + * IOSCSIHDDrive : public IOBasicSCSI + * @abstract + * SCSI Hard Disk driver. + * @discussion + * IOSCSIHDDrive derives from IOBasicSCSI and adds all functionality + * needed to support removable or fixed hard disk drives. + */ + +class IOSCSIHDDrive : public IOBasicSCSI { + + OSDeclareDefaultStructors(IOSCSIHDDrive) + +public: + + /* Overrides from IOService: */ + + virtual bool init(OSDictionary * properties); + + /*! + * @function start + * @abstract + * Start the driver. + * @discussion + * We override IOBasicSCSI::start so we can initialize Power Management, + * then we call createNub to create an IOSCSIHDDriveNub. + */ + virtual bool start(IOService * provider); + + /* Overrides from IOBasicSCSI: */ + + /*! + * @function deviceTypeMatches + * @abstract + * Determine if device type matches expected type. + * @discussion + * We implement this function so we can return a match + * on the hard disk device type. + */ + virtual bool deviceTypeMatches(UInt8 inqBuf[],UInt32 inqLen,SInt32 *score); + + /*! + * @function constructDeviceProperties + * @abstract + * Construct a set of properties about the device. + * @discussion + * This function creates a set of properties reflecting information + * about the device. + * + * This function is presently not used. + * @result + * A pointer to an OSDictionary containing the properties. The caller + * is responsible for releasing the OSDictionary. + */ + virtual OSDictionary *constructDeviceProperties(void); + + /*! + * @function RWCompletion + * @abstract + * Asynchronous read/write completion routine. + * @discussion + * We implement this function in this class. It is called from the base + * class when an IO operation completes. + */ + virtual void RWCompletion(struct context *cx); + + /* End of IOBasicSCSI overrides */ + + /* Additional API added to IOBasicSCSI: */ + + /*! + * @function doAsyncReadWrite + * @abstract + * Start an asynchronous read or write operation. + * @discussion + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion); + + /*! + * @function doSyncReadWrite + * @abstract + * Perform a synchronous read or write operation. + * @discussion + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks); + + /*! + * @function doEjectMedia + * @abstract + * Eject the media. + * @discussion + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doEjectMedia(void); + + /*! + * @function doFormatMedia + * @abstract + * Format the media to the specified byte capacity. + * @discussion + * The default implementation calls standardFormatMedia. + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + + /*! + * @function doGetFormatCapacities + * @abstract + * Return the allowable formatting byte capacities. + * @discussion + * The default implementation of this method returns a value of block + * size * max block, and a capacities count of 1. + * See IOBlockStorageDevice for details. + */ + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + + /*! + * @function doLockUnlockMedia + * @abstract + * Lock or unlock the (removable) media in the drive. + * @discussion + * This method issues a standard SCSI Prevent/Allow command to lock + * or unlock the media in the drive. + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doLockUnlockMedia(bool doLock); + + /*! + * @function doSynchronizeCache + * @abstract + * Force data blocks in the drive's buffer to be flushed to the media. + * @discussion + * This method issues a SCSI Synchronize Cache command, to ensure that + * all blocks in the device cache are written to the media. + * See IOBlockStorageDevice for details. + */ + virtual IOReturn doSynchronizeCache(void); + + /*! + * @function reportMediaState + * @abstract + * Report the device's media state. + * @discussion + * This method reports whether media is present or not, and also + * whether the media state has changed since the last call to + * reportMediaState. The default implementation issues a SCSI Test + * Unit Ready command: depending on the result of that command, the + * following cases are reported: + * + * 1. TUR status == good completion: we report media present and return + * kIOReturnSuccess. + * + * 2. TUR status != good completion, but good autosense returned: + * + * 2a: sense key says not ready: we report media not present + * and return kIOReturnSuccess. + * + * 2b: sense key is anything else: we report media not present + * and return kIOReturnIOError. + * + * 3. TUR status != good completion, and no autosense data: we do not + * set mediaPresent or changedState, and we return whatever result + * came back from the SCSI operation. + */ + virtual IOReturn reportMediaState(bool *mediaPresent,bool *changed); + + /* --- end of additional API --- */ + +protected: + + /*! + * @function createFormatCdb + * @abstract + * Create a SCSI CDB for a format operation. + * @discussion + * Override this to control the cdb created for a format operation. + * The default implementation creates a 6-byte format command with + * no data buffer, disconnect allowed, 8-byte autosense, and a 15-minute timeout. + * + * See also: allocateFormatBuffer, deleteFormatBuffer, composeFormatBuffer. + * @param byteCapacity + * The requested byte capacity to which the media should be formatted. This value + * should have been previously validated, otherwise the device may return an error. + * @param cdb + * A pointer to the CDB bytes. + * @param cdbLength + * The length of the CDB in bytes. + * @param block + * The device block to be written. + * @param nblks + * The number of blocks to be transferred. + * @param maxAutoSenseLength + * The maximum size of the autosense data, in bytes. A value of zero + * will disable autosense. + * @param timeoutSeconds + * The command timeout in seconds. + * @result + * The IOSCSICommandOptions returned will be used to issue the command. + */ + virtual UInt32 createFormatCdb( + UInt64 byteCapacity, /* in */ + UInt8 *cdb, /* in */ + UInt32 *cdbLength, /* out */ + UInt8 buf[], /* in */ + UInt32 bufLen, /* in */ + UInt32 *maxAutoSenseLength, /* out */ + UInt32 *timeoutSeconds); /* out */ + + + /*! + * @function allocateFormatBuffer + * @abstract + * Create a data buffer to be used for formatting the media. + * @discussion + * If a format buffer is to be used, then "allocateFormatBuffer" and + * deleteFormatBuffer" must be overridden to manage the buffer. The + * buffer must be prepared for IO upon return from allocateFormatBuffer. + * The default implementations of these methods don't allocate a buffer. + * @param buf + * A pointer for the returned buffer pointer. + * @param buflen + * The desired length of the buffer, in bytes. + */ + virtual IOReturn allocateFormatBuffer(UInt8 **buf,UInt32 *buflen); + + /*! + * @function deleteFormatBuffer + * @abstract + * Delete the data buffer to be used for formatting the media. + * @discussion + * If a format buffer is to be used, then "allocateFormatBuffer" and + * deleteFormatBuffer" must be overridden to manage the buffer. + * The default implementation of this method does nothing. + * @param buf + * A pointer to the buffer to delete. + * @param buflen + * The size of the buffer, in bytes. + */ + virtual void deleteFormatBuffer(UInt8 *buf,UInt32 buflen); + + /*! + * @function composeFormatBuffer + * @abstract + * Compose the data in the buffer used for the format command. + * @discussion + * This method will be called to compose the data in the format buffer. + * + * The default implementation of this method does nothing. + * @param buf + * A pointer to the format data buffer. + * @param buflen + * The size of the format data buffer, in bytes. + * @result + * The return value should be the desired values for the "CmpLst" and Defect + * List Format bits in the CDB. The default implementation returns zero. + */ + virtual UInt8 composeFormatBuffer(UInt8 *buf,UInt32 buflen); + + /* Override these methods to save and restore the state of the device electronics + * when power is turned off and on. The defaults do nothing and return kIOReturnSuccess. + */ + + /*! + * @function restoreElectronicsState + * @abstract + * Restore the state of the device electronics when powering-up. + * @discussion + * This method is called just after the device transitions from a powered-off state. + * + * The default implementation of this method does nothing and returns kIOReturnSuccess. + */ + virtual IOReturn restoreElectronicsState(void); + + /*! + * @function saveElectronicsState + * @abstract + * Save the state of the device electronics when powering-down. + * @discussion + * This method is called just before the device transitions to a powered-off state. + * + * The default implementation of this method does nothing and returns kIOReturnSuccess. + */ + virtual IOReturn saveElectronicsState(void); + + /*! + * @function initialPowerStateForDomainState + * @abstract + * Return the initial power state for the device. + * @discussion + * This method is called to obtain the initial power state for the device, + * by calling getInitialPowerState. + * @param domainState + * Power domain state flags. + * @result + * The return value must be a valid power state value. + */ + virtual unsigned long initialPowerStateForDomainState ( IOPMPowerFlags domainState ); + + /*! + * @function maxCapabilityForDomainState + * @abstract + * Return the maximum power level obtainable for the given state. + * @discussion + * This method is called to obtain the maximum power level obtainable for the + * given state. + * @param domainState + * Power domain state flags. + * @result + * The return value must be a valid power state value. + */ + virtual unsigned long maxCapabilityForDomainState ( IOPMPowerFlags domainState ); + + /*! + * @function powerStateForDomainState + * Return the maximum power level obtainable for the given state. + * @discussion + * This method is called to obtain the maximum power level obtainable for the + * given state. + * @param domainState + * Power domain state flags. + * @result + * The return value must be a valid power state value. + */ + virtual unsigned long powerStateForDomainState ( IOPMPowerFlags domainState ); + + /*! + * @function powerStateDidChangeTo + * @abstract + * React to a change in power state. + * @discussion + * This method is called when the power state changes. We call restoreElectronicsState + * if necessary, then call dequeueCommands if we have changed to a state that has power. + * @param stateOrdinal + * The power level to which we have changed. + */ + virtual IOReturn powerStateDidChangeTo ( unsigned long, unsigned long stateOrdinal, IOService* ); + + /*! + * @function powerStateWillChangeTo + * @abstract + * Prepare for a power state change. + * @discussion + * This method is called when the power state will change. If we are powering-up from kAllOff, + * we schedule a call to restoreElectronicsState. If, instead, we are powering-down from an "on" state, + * we schedule a call to saveElectronicsState. + * @param stateOrdinal + * The power level to which we will change. + */ + virtual IOReturn powerStateWillChangeTo ( unsigned long, unsigned long stateOrdinal, IOService* ); + + /*! + * @function setPowerState + * @abstract + * Set the power state to the specified state. + * @discussion + * This method is called to cause a change in power state. We handle changes to and from + * kAllOn and kElectronicsOn, which are done by spinning up and down the media. + * @param powerStateOrdinal + * The power level to which we must change. + */ + virtual IOReturn setPowerState ( unsigned long powerStateOrdinal, IOService* ); + + /*! + * @function powerTickle + * Check for the device power state currently being in the desired state. + * @discussion + * This method simply "tickles" + * the Power Management subsystem to ensure that the device transitions to the desired + * state if necessary. + */ + virtual bool powerTickle(UInt32 desiredState); + + /* Override this method to report the initial device power state when its domain is + * powered up. The default implementation assumes the drive spins up. + */ + + /*! + * @function getInitialPowerState + * @abstract + * Report the initial power state of the device. + * @discussion + * The default implementation of this method returns kAllOn, assuming that the + * drive spindle spins up initially. + * @result + * The return value must be a valid power state value. + */ + virtual unsigned long getInitialPowerState(void); /* default = kAllOn */ + + /* Override these to change power level required to do various commands. */ + + /*! + * @function getEjectPowerState + * @abstract + * Return the required device power level to determine eject the media. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getEjectPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getExecuteCDBPowerState + * @abstract + * @discussion + * @param + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getExecuteCDBPowerState(void); /* default = kAllOn */ + + /*! + * @function getFormatMediaPowerState + * @abstract + * Return the required device power level to execute a client CDB. + * @discussion + * The default implementation of this method returns kAllOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getFormatMediaPowerState(void); /* default = kAllOn */ + + /*! + * @function getInquiryPowerState + * @abstract + * Return the required device power level to execute an Inquiry command. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getInquiryPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getLockUnlockMediaPowerState + * @abstract + * Return the required device power level to lock or unlock the media. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getLockUnlockMediaPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getReadCapacityPowerState + * @abstract + * Return the required device power level to execute a Read-Capacity command. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getReadCapacityPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getReadWritePowerState + * @abstract + * Return the required device power level to execute a Read or Write command. + * @discussion + * The default implementation of this method returns kAllOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getReadWritePowerState(void); /* default = kAllOn */ + + /*! + * @function getReportWriteProtectionPowerState + * @abstract + * Return the required device power level to report media write protection. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getReportWriteProtectionPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getStartPowerState + * @abstract + * Return the required device power level to start (spin up) the media. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getStartPowerState(void); /* default = kElectronicsOn */ + + /*! + * @function getStopPowerState + * @abstract + * Return the required device power level to stop (spin down) the media. + * @discussion + * The default implementation of this method returns kAllOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getStopPowerState(void); /* default = kAllOn */ + + /*! + * @function getSynchronizeCachePowerState + * @abstract + * Return the required device power level to issue a Synchronize-Cache command. + * @discussion + * The default implementation of this method returns kAllOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getSynchronizeCachePowerState(void); /* default = kAllOn */ + + /*! + * @function getTestUnitReadyPowerState + * @abstract + * Return the required device power level to issue a Test Unit Ready command. + * @discussion + * The default implementation of this method returns kElectronicsOn. + * @result + * The return value must be a valid power state value. + */ + virtual UInt32 getTestUnitReadyPowerState(void); /* default = kElectronicsOn */ + + /* + * @group + * Internally used methods. + */ + + /*! + * @function createNub + * @abstract + * Create, init, attach, and register the device nub. + * @discussion + * This method calls instantiateNub, then init, attach, and register. + * @result + * A pointer to the nub or NULL if something failed. + */ + virtual IOService * createNub(void); + + /*! + * @function getDeviceTypeName + * @abstract + * Return a character string for the device type. + * @discussion + * The default implementation of this method returns + * kIOBlockStorageDeviceTypeGeneric. + */ + virtual const char * getDeviceTypeName(void); + + /*! + * @function instantiateNub + * @abstract + * Create the device nub. + * @discussion + * A subclass will override this method to change the type of nub created. + * A CD driver, for example, will instantiate an IOSCSICDDriveNub instead + * of the default implementation's IOSCSIHDDriveNub. + */ + virtual IOService * instantiateNub(void); + + /*! + * @function doStart + * @abstract + * Start (spin up) the media. + * @discussion + * This method calls doStartStop. + */ + virtual IOReturn doStart(void); + + /*! + * @function doStartStop + * @abstract + * Perform the actual spin up/down command. + * @discussion + * This method issues a SCSI Start Stop Unit command to start or stop + * the device. Because the powerCondition value is only for use with + * SCSI-3 devices, the current implementation ignores powerCondition. + * @param start + * True to start (spin-up) the media; False to stop (spin-down) the media. + * @param loadEject + * True to eject; False to not eject. This parameter is applicable only to a stop + * operation. + * @param powerCondition + * The power condition to which the drive should transition. This is a SCSI-3 + * capability; it is presently unused. + */ + virtual IOReturn doStartStop(bool start,bool loadEject,UInt8 powerCondition); + + /*! + * @function doStop + * @abstract + * Stop (spin down) the media. + * @discussion + * This method calls doStartStop. + */ + virtual IOReturn doStop(void); + + /*! + * @function standardFormatMedia + * @abstract + * Perform a standard media format operation. + * @discussion + * See doFormatMedia for further information. + */ + virtual IOReturn standardFormatMedia(UInt64 byteCapacity); + + /*! + * @function standardSynchronizeCache + * @abstract + * Perform a standard Synchronize-Cache operation. + * @discussion + * See doFormatMedia for further information. + */ + virtual IOReturn standardSynchronizeCache(void); + + /* + * @endgroup + */ + + /* Device information : */ + + /*! + * @var _mediaPresent + * True if media is present; False if media is not present. + */ + bool _mediaPresent; + + /*! + * @var _startStopDisabled + * True if the start/stop commands are disabled due to an error. + */ + bool _startStopDisabled; + + /*! + * @var _restoreState + * True if we must restore the device electronics state after a power-up. + */ + bool _restoreState; /* true if we must restore after power-up */ }; +#endif diff --git a/iokit/IOKit/storage/scsi/IOSCSIHDDriveNub.h b/iokit/IOKit/storage/scsi/IOSCSIHDDriveNub.h new file mode 100644 index 000000000..94647d545 --- /dev/null +++ b/iokit/IOKit/storage/scsi/IOSCSIHDDriveNub.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOSCSIHDDriveNub.h created by rick on Tue 23-Mar-1999 */ + +/* This subclass implements a relay to a protocol- and device-specific provider. */ + +#ifndef _IOSCSIHDDRIVENUB_H +#define _IOSCSIHDDRIVENUB_H + +#include +#include + +class IOSCSIHDDrive; + +class IOSCSIHDDriveNub : public IOBlockStorageDevice { + + OSDeclareDefaultStructors(IOSCSIHDDriveNub) + +public: + + /* Overrides from IOService */ + + virtual bool attach(IOService * provider); + virtual void detach(IOService * provider); + + /* Mandatory overrides from IOBlockStorageDevice: */ + + virtual IOReturn doAsyncReadWrite(IOMemoryDescriptor *buffer, + UInt32 block,UInt32 nblks, + IOStorageCompletion completion); + virtual IOReturn doSyncReadWrite(IOMemoryDescriptor *buffer,UInt32 block,UInt32 nblks); + virtual IOReturn doEjectMedia(void); + virtual IOReturn doFormatMedia(UInt64 byteCapacity); + virtual UInt32 doGetFormatCapacities(UInt64 * capacities, + UInt32 capacitiesMaxCount) const; + virtual IOReturn doLockUnlockMedia(bool doLock); + virtual IOReturn doSynchronizeCache(void); + virtual char * getVendorString(void); + virtual char * getProductString(void); + virtual char * getRevisionString(void); + virtual char * getAdditionalDeviceInfoString(void); + virtual IOReturn reportBlockSize(UInt64 *blockSize); + virtual IOReturn reportEjectability(bool *isEjectable); + virtual IOReturn reportLockability(bool *isLockable); + virtual IOReturn reportMediaState(bool *mediaPresent,bool *changed); + virtual IOReturn reportPollRequirements(bool *PollIsRequired,bool *pollIsExpensive); + virtual IOReturn reportMaxReadTransfer (UInt64 blockSize,UInt64 *max); + virtual IOReturn reportMaxValidBlock(UInt64 *maxBlock); + virtual IOReturn reportMaxWriteTransfer(UInt64 blockSize,UInt64 *max); + virtual IOReturn reportRemovability(bool *isRemovable); + virtual IOReturn reportWriteProtection(bool *isWriteProtected); + +protected: + + IOSCSIHDDrive * _provider; +}; +#endif diff --git a/iokit/IOKit/storage/scsi/Makefile b/iokit/IOKit/storage/scsi/Makefile new file mode 100644 index 000000000..56003c286 --- /dev/null +++ b/iokit/IOKit/storage/scsi/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = storage/scsi +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/IOKit/system.h b/iokit/IOKit/system.h new file mode 100644 index 000000000..354ee70dd --- /dev/null +++ b/iokit/IOKit/system.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __IOKIT_SYSTEM_H +#define __IOKIT_SYSTEM_H + +#include + +__BEGIN_DECLS + +#include +#include +#include +#include + +#include + +#if KERNEL_PRIVATE +#include /* Must be before other includes of kern/assert.h */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif /* KERNEL_PRIVATE */ + +extern int bcmp(const void *, const void *, size_t); +extern void bcopy(const void *, void *, size_t); +extern void bzero(void *, size_t); + +extern int memcmp(const void *, const void *, size_t); + +extern void _doprnt( const char *format, va_list *arg, + void (*putc)(char), int radix ); + +extern int sscanf(const char *input, const char *fmt, ...); +extern int sprintf(char *s, const char *format, ...); +extern long strtol(const char *, char **, int); +extern unsigned long strtoul(const char *, char **, int); + +extern +#ifdef __GNUC__ +volatile +#endif +void panic(const char * msg, ...); + +/* + */ + +#ifdef __ppc__ + +/* + * Really need a set of interfaces from osfmk/pexpert components to do + * all that is required to prepare an I/O from a cache management point + * of view. + * osfmk/ppc/cache.s + */ +extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); +extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); + +#endif + +__END_DECLS + +#endif /* !__IOKIT_SYSTEM_H */ diff --git a/iokit/IOKit/system_management/IOWatchDogTimer.h b/iokit/IOKit/system_management/IOWatchDogTimer.h new file mode 100644 index 000000000..9fc09fe98 --- /dev/null +++ b/iokit/IOKit/system_management/IOWatchDogTimer.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _IOWATCHDOGTIMER_H +#define _IOWATCHDOGTIMER_H + +#include + +class IOWatchDogTimer : public IOService +{ + OSDeclareAbstractStructors(IOWatchDogTimer); + +protected: + IONotifier *notifier; + struct ExpansionData { }; + ExpansionData *reserved; + +public: + virtual bool start(IOService *provider); + virtual void stop(IOService *provider); + virtual IOReturn setProperties(OSObject *properties); + virtual void setWatchDogTimer(UInt32 timeOut) = 0; + + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 0); + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 1); + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 2); + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 3); +}; + +#endif /* !_IOWATCHDOGTIMER_H */ diff --git a/iokit/IOKit/system_management/Makefile b/iokit/IOKit/system_management/Makefile new file mode 100644 index 000000000..8b2463850 --- /dev/null +++ b/iokit/IOKit/system_management/Makefile @@ -0,0 +1,34 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +IOKIT_FRAMEDIR = $(FRAMEDIR)/IOKit.framework/Versions/A +export INCDIR = $(IOKIT_FRAMEDIR)/Headers +export LCLDIR = $(IOKIT_FRAMEDIR)/PrivateHeaders + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MI_DIR = system_management +NOT_EXPORT_HEADERS = + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +ALL_HEADERS = $(shell (cd $(SOURCE); echo *.h)) + +INSTALL_MI_LIST = +INSTALL_MI_LCL_LIST = "" +INSTALL_MI_DIR = $(MI_DIR) + +EXPORT_MI_LIST = $(filter-out $(NOT_EXPORT_HEADERS), $(ALL_HEADERS)) +EXPORT_MI_DIR = IOKit/$(MI_DIR) + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp new file mode 100644 index 000000000..c209c7035 --- /dev/null +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -0,0 +1,425 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +#include +#include + +__BEGIN_DECLS +void ipc_port_release_send(ipc_port_t port); +__END_DECLS + +extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address ); + +#define super IOGeneralMemoryDescriptor +OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, + IOGeneralMemoryDescriptor); + +bool IOBufferMemoryDescriptor::initWithAddress( + void * /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ ) +{ + return false; +} + +bool IOBufferMemoryDescriptor::initWithAddress( + vm_address_t /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ , + task_t /* withTask */ ) +{ + return false; +} + +bool IOBufferMemoryDescriptor::initWithPhysicalAddress( + IOPhysicalAddress /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ ) +{ + return false; +} + +bool IOBufferMemoryDescriptor::initWithPhysicalRanges( + IOPhysicalRange * /* ranges */ , + UInt32 /* withCount */ , + IODirection /* withDirection */ , + bool /* asReference */ ) +{ + return false; +} + +bool IOBufferMemoryDescriptor::initWithRanges( + IOVirtualRange * /* ranges */ , + UInt32 /* withCount */ , + IODirection /* withDirection */ , + task_t /* withTask */ , + bool /* asReference */ ) +{ + return false; +} + +bool IOBufferMemoryDescriptor::initWithOptions( + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment) +{ + if (!capacity) + return false; + + _options = options; + _capacity = capacity; + _physAddrs = 0; + _physSegCount = 0; + _buffer = 0; + + if ((options & kIOMemorySharingTypeMask) && (alignment < page_size)) + alignment = page_size; + + _alignment = alignment; + if (options & kIOMemoryPageable) + /* Allocate some kernel address space. */ + _buffer = IOMallocPageable(capacity, alignment); + /* Allocate a wired-down buffer inside kernel space. */ + else if (options & kIOMemoryPhysicallyContiguous) + _buffer = IOMallocContiguous(capacity, alignment, 0); + else if (alignment > 1) + _buffer = IOMallocAligned(capacity, alignment); + else + _buffer = IOMalloc(capacity); + + if (!_buffer) + return false; + + _singleRange.v.address = (vm_address_t) _buffer; + _singleRange.v.length = capacity; + + if (!super::initWithRanges(&_singleRange.v, 1, + (IODirection) (options & kIOMemoryDirectionMask), + kernel_task, true)) + return false; + + if (options & kIOMemoryPageable) { + _flags |= kIOMemoryRequiresWire; + + kern_return_t kr; + ipc_port_t sharedMem = (ipc_port_t) _memEntry; + vm_size_t size = _ranges.v[0].length; + + // must create the entry before any pages are allocated + if( 0 == sharedMem) { + kr = mach_make_memory_entry( IOPageableMapForAddress( _ranges.v[0].address ), + &size, _ranges.v[0].address, + VM_PROT_READ | VM_PROT_WRITE, &sharedMem, + NULL ); + if( (KERN_SUCCESS == kr) && (size != _ranges.v[0].length)) { + ipc_port_release_send( sharedMem ); + kr = kIOReturnVMError; + } + if( KERN_SUCCESS != kr) + sharedMem = 0; + _memEntry = (void *) sharedMem; + } + + } else { + /* Precompute virtual-to-physical page mappings. */ + vm_address_t inBuffer = (vm_address_t) _buffer; + _physSegCount = atop(trunc_page(inBuffer + capacity - 1) - + trunc_page(inBuffer)) + 1; + _physAddrs = IONew(IOPhysicalAddress, _physSegCount); + if (!_physAddrs) + return false; + + inBuffer = trunc_page(inBuffer); + for (unsigned i = 0; i < _physSegCount; i++) { + _physAddrs[i] = pmap_extract(get_task_pmap(kernel_task), inBuffer); + assert(_physAddrs[i]); /* supposed to be wired */ + inBuffer += page_size; + } + } + + setLength(capacity); + + return true; +} + +IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment = 1) +{ + IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + + if (me && !me->initWithOptions(options, capacity, alignment)) { + me->release(); + me = 0; + } + return me; +} + + +/* + * withCapacity: + * + * Returns a new IOBufferMemoryDescriptor with a buffer large enough to + * hold capacity bytes. The descriptor's length is initially set to the capacity. + */ +IOBufferMemoryDescriptor * +IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, + IODirection inDirection, + bool inContiguous) +{ + return( IOBufferMemoryDescriptor::withOptions( + inDirection | kIOMemoryUnshared + | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), + inCapacity, inContiguous ? inCapacity : 1 )); +} + +/* + * initWithBytes: + * + * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). + * The descriptor's length and capacity are set to the input buffer's size. + */ +bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, + vm_size_t inLength, + IODirection inDirection, + bool inContiguous) +{ + if (!initWithOptions( + inDirection | kIOMemoryUnshared + | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), + inLength, inLength )) + return false; + + // start out with no data + setLength(0); + + if (!appendBytes(inBytes, inLength)) + return false; + + return true; +} + +/* + * withBytes: + * + * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). + * The descriptor's length and capacity are set to the input buffer's size. + */ +IOBufferMemoryDescriptor * +IOBufferMemoryDescriptor::withBytes(const void * inBytes, + vm_size_t inLength, + IODirection inDirection, + bool inContiguous) +{ + IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + + if (me && !me->initWithBytes(inBytes, inLength, inDirection, inContiguous)){ + me->release(); + me = 0; + } + return me; +} + +/* + * free: + * + * Free resources + */ +void IOBufferMemoryDescriptor::free() +{ + IOOptionBits options = _options; + vm_size_t size = _capacity; + void * buffer = _buffer; + vm_offset_t alignment = _alignment; + + if (_physAddrs) + IODelete(_physAddrs, IOPhysicalAddress, _physSegCount); + + /* super::free may unwire - deallocate buffer afterwards */ + super::free(); + + if (buffer) { + if (options & kIOMemoryPageable) + IOFreePageable(buffer, size); + else { + if (options & kIOMemoryPhysicallyContiguous) + IOFreeContiguous(buffer, size); + else if (alignment > 1) + IOFreeAligned(buffer, size); + else + IOFree(buffer, size); + } + } +} + +/* + * getCapacity: + * + * Get the buffer capacity + */ +vm_size_t IOBufferMemoryDescriptor::getCapacity() const +{ + return _capacity; +} + +/* + * setLength: + * + * Change the buffer length of the memory descriptor. When a new buffer + * is created, the initial length of the buffer is set to be the same as + * the capacity. The length can be adjusted via setLength for a shorter + * transfer (there is no need to create more buffer descriptors when you + * can reuse an existing one, even for different transfer sizes). Note + * that the specified length must not exceed the capacity of the buffer. + */ +void IOBufferMemoryDescriptor::setLength(vm_size_t length) +{ + assert(length <= _capacity); + + _length = length; + _singleRange.v.length = length; +} + +/* + * setDirection: + * + * Change the direction of the transfer. This method allows one to redirect + * the descriptor's transfer direction. This eliminates the need to destroy + * and create new buffers when different transfer directions are needed. + */ +void IOBufferMemoryDescriptor::setDirection(IODirection direction) +{ + _direction = direction; +} + +/* + * appendBytes: + * + * Add some data to the end of the buffer. This method automatically + * maintains the memory descriptor buffer length. Note that appendBytes + * will not copy past the end of the memory descriptor's current capacity. + */ +bool +IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) +{ + vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); + + assert(_length <= _capacity); + bcopy(/* from */ bytes, (void *)(_singleRange.v.address + _length), + actualBytesToCopy); + _length += actualBytesToCopy; + _singleRange.v.length += actualBytesToCopy; + + return true; +} + +/* + * getBytesNoCopy: + * + * Return the virtual address of the beginning of the buffer + */ +void * IOBufferMemoryDescriptor::getBytesNoCopy() +{ + return (void *)_singleRange.v.address; +} + +/* + * getBytesNoCopy: + * + * Return the virtual address of an offset from the beginning of the buffer + */ +void * +IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) +{ + if (start < _length && (start + withLength) <= _length) + return (void *)(_singleRange.v.address + start); + return 0; +} + +/* + * getPhysicalSegment: + * + * Get the physical address of the buffer, relative to the current position. + * If the current position is at the end of the buffer, a zero is returned. + */ +IOPhysicalAddress +IOBufferMemoryDescriptor::getPhysicalSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) +{ + IOPhysicalAddress physAddr; + + if( offset != _position) + setPosition( offset ); + + assert(_position <= _length); + + /* Fail gracefully if the position is at (or past) the end-of-buffer. */ + if (_position >= _length) { + *lengthOfSegment = 0; + return 0; + } + + if (_options & kIOMemoryPageable) { + physAddr = super::getPhysicalSegment(offset, lengthOfSegment); + + } else { + /* Compute the largest contiguous physical length possible. */ + vm_address_t actualPos = _singleRange.v.address + _position; + vm_address_t actualPage = trunc_page(actualPos); + unsigned physInd = atop(actualPage-trunc_page(_singleRange.v.address)); + + vm_size_t physicalLength = actualPage + page_size - actualPos; + for (unsigned index = physInd + 1; index < _physSegCount && + _physAddrs[index] == _physAddrs[index-1] + page_size; index++) { + physicalLength += page_size; + } + + /* Clip contiguous physical length at the end-of-buffer. */ + if (physicalLength > _length - _position) + physicalLength = _length - _position; + + *lengthOfSegment = physicalLength; + physAddr = _physAddrs[physInd] + (actualPos - actualPage); + } + + return physAddr; +} + +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 0); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 1); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 2); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 3); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 4); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 5); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 6); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 7); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 8); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 9); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 10); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 11); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 12); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 13); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 14); +OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor, 15); diff --git a/iokit/Kernel/IOCPU.cpp b/iokit/Kernel/IOCPU.cpp new file mode 100644 index 000000000..5b171c0d2 --- /dev/null +++ b/iokit/Kernel/IOCPU.cpp @@ -0,0 +1,446 @@ +/* + * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999-2000 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + +extern "C" { +#include +#include +} + +#include +#include +#include +#include + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +kern_return_t PE_cpu_start(cpu_id_t target, + vm_offset_t start_paddr, vm_offset_t arg_paddr) +{ + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + + if (targetCPU == 0) return KERN_FAILURE; + return targetCPU->startCPU(start_paddr, arg_paddr); +} + +void PE_cpu_halt(cpu_id_t target) +{ + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + + if (targetCPU) targetCPU->haltCPU(); +} + +void PE_cpu_signal(cpu_id_t source, cpu_id_t target) +{ + IOCPU *sourceCPU = OSDynamicCast(IOCPU, (OSObject *)source); + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + + if (sourceCPU && targetCPU) sourceCPU->signalCPU(targetCPU); +} + +void PE_cpu_machine_init(cpu_id_t target, boolean_t boot) +{ + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + + if (targetCPU) targetCPU->initCPU(boot); +} + +void PE_cpu_machine_quiesce(cpu_id_t target) +{ + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + + if (targetCPU) targetCPU->quiesceCPU(); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService + +OSDefineMetaClassAndAbstractStructors(IOCPU, IOService); +OSMetaClassDefineReservedUnused(IOCPU, 0); +OSMetaClassDefineReservedUnused(IOCPU, 1); +OSMetaClassDefineReservedUnused(IOCPU, 2); +OSMetaClassDefineReservedUnused(IOCPU, 3); +OSMetaClassDefineReservedUnused(IOCPU, 4); +OSMetaClassDefineReservedUnused(IOCPU, 5); +OSMetaClassDefineReservedUnused(IOCPU, 6); +OSMetaClassDefineReservedUnused(IOCPU, 7); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static OSArray *gIOCPUs; +static const OSSymbol *gIOCPUStateKey; +static OSString *gIOCPUStateNames[kIOCPUStateCount]; + +void IOCPUSleepKernel(void) +{ + long cnt, numCPUs; + IOCPU *target; + + numCPUs = gIOCPUs->getCount(); + + // Sleep the CPUs. + cnt = numCPUs; + while (cnt--) { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + if (target->getCPUState() == kIOCPUStateRunning) { + target->haltCPU(); + } + } + + // Wake the other CPUs. + for (cnt = 1; cnt < numCPUs; cnt++) { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + if (target->getCPUState() == kIOCPUStateStopped) { + processor_start(target->getMachProcessor()); + } + } +} + +void IOCPU::initCPUs(void) +{ + if (gIOCPUs == 0) { + gIOCPUs = OSArray::withCapacity(1); + + gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); + + gIOCPUStateNames[kIOCPUStateUnregistered] = + OSString::withCStringNoCopy("Unregistered"); + gIOCPUStateNames[kIOCPUStateUninitalized] = + OSString::withCStringNoCopy("Uninitalized"); + gIOCPUStateNames[kIOCPUStateStopped] = + OSString::withCStringNoCopy("Stopped"); + gIOCPUStateNames[kIOCPUStateRunning] = + OSString::withCStringNoCopy("Running"); + } +} + +bool IOCPU::start(IOService *provider) +{ + OSData *busFrequency, *cpuFrequency, *decFrequency; + + if (!super::start(provider)) return false; + + initCPUs(); + + _cpuGroup = gIOCPUs; + cpuNub = provider; + + gIOCPUs->setObject(this); + + // Correct the bus, cpu and dec frequencies in the device tree. + busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); + cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4); + decFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.dec_clock_rate_hz, 4); + provider->setProperty("bus-frequency", busFrequency); + provider->setProperty("clock-frequency", cpuFrequency); + provider->setProperty("timebase-frequency", decFrequency); + busFrequency->release(); + cpuFrequency->release(); + decFrequency->release(); + + setProperty("IOCPUID", (UInt32)this, 32); + + setCPUNumber(0); + setCPUState(kIOCPUStateUnregistered); + + return true; +} + +IOReturn IOCPU::setProperties(OSObject *properties) +{ + OSDictionary *dict = OSDynamicCast(OSDictionary, properties); + OSString *stateStr; + + if (dict == 0) return kIOReturnUnsupported; + + stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey)); + if (stateStr != 0) { + if (!IOUserClient::clientHasPrivilege(current_task(), "root")) + return kIOReturnNotPrivileged; + + if (_cpuNumber == 0) return kIOReturnUnsupported; + + if (stateStr->isEqualTo("running")) { + if (_cpuState == kIOCPUStateStopped) { + processor_start(machProcessor); + } else if (_cpuState != kIOCPUStateRunning) { + return kIOReturnUnsupported; + } + } else if (stateStr->isEqualTo("stopped")) { + if (_cpuState == kIOCPUStateRunning) { + haltCPU(); + } else if (_cpuState != kIOCPUStateStopped) { + return kIOReturnUnsupported; + } + } else return kIOReturnUnsupported; + + return kIOReturnSuccess; + } + + return kIOReturnUnsupported; +} + +void IOCPU::signalCPU(IOCPU */*target*/) +{ +} + +void IOCPU::enableCPUTimeBase(bool /*enable*/) +{ +} + +UInt32 IOCPU::getCPUNumber(void) +{ + return _cpuNumber; +} + +void IOCPU::setCPUNumber(UInt32 cpuNumber) +{ + _cpuNumber = cpuNumber; + setProperty("IOCPUNumber", _cpuNumber, 32); +} + +UInt32 IOCPU::getCPUState(void) +{ + return _cpuState; +} + +void IOCPU::setCPUState(UInt32 cpuState) +{ + if ((cpuState >= 0) && (cpuState < kIOCPUStateCount)) { + _cpuState = cpuState; + setProperty(gIOCPUStateKey, gIOCPUStateNames[cpuState]); + } +} + +OSArray *IOCPU::getCPUGroup(void) +{ + return _cpuGroup; +} + +UInt32 IOCPU::getCPUGroupSize(void) +{ + return _cpuGroup->getCount(); +} + +processor_t IOCPU::getMachProcessor(void) +{ + return machProcessor; +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOInterruptController + +OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController); + +OSMetaClassDefineReservedUnused(IOCPUInterruptController, 0); +OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1); +OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2); +OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3); +OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4); +OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5); + + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +IOReturn IOCPUInterruptController::initCPUInterruptController(int sources) +{ + int cnt; + + if (!super::init()) return kIOReturnInvalid; + + numCPUs = sources; + + cpus = (IOCPU **)IOMalloc(numCPUs * sizeof(IOCPU *)); + if (cpus == 0) return kIOReturnNoMemory; + bzero(cpus, numCPUs * sizeof(IOCPU *)); + + vectors = (IOInterruptVector *)IOMalloc(numCPUs * sizeof(IOInterruptVector)); + if (vectors == 0) return kIOReturnNoMemory; + bzero(vectors, numCPUs * sizeof(IOInterruptVector)); + + // Allocate locks for the + for (cnt = 0; cnt < numCPUs; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < numCPUs; cnt++) { + if (vectors[cnt].interruptLock != NULL) + IOLockFree(vectors[cnt].interruptLock); + } + return kIOReturnNoResources; + } + } + + return kIOReturnSuccess; +} + +void IOCPUInterruptController::registerCPUInterruptController(void) +{ + registerService(); + + getPlatform()->registerInterruptController(gPlatformInterruptControllerName, + this); +} + +void IOCPUInterruptController::setCPUInterruptProperties(IOService *service) +{ + int cnt; + OSArray *controller; + OSArray *specifier; + OSData *tmpData; + long tmpLong; + + // Create the interrupt specifer array. + specifier = OSArray::withCapacity(numCPUs); + for (cnt = 0; cnt < numCPUs; cnt++) { + tmpLong = cnt; + tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); + specifier->setObject(tmpData); + tmpData->release(); + }; + + // Create the interrupt controller array. + controller = OSArray::withCapacity(numCPUs); + for (cnt = 0; cnt < numCPUs; cnt++) { + controller->setObject(gPlatformInterruptControllerName); + } + + // Put the two arrays into the property table. + service->setProperty(gIOInterruptControllersKey, controller); + service->setProperty(gIOInterruptSpecifiersKey, specifier); + controller->release(); + specifier->release(); +} + +void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu) +{ + ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, + (IOInterruptHandler)&IOCPUInterruptController::handleInterrupt, 0); + + enabledCPUs++; + + if (enabledCPUs == numCPUs) thread_wakeup(this); +} + +IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + IOInterruptVector *vector; + + if (source >= numCPUs) return kIOReturnNoResources; + + vector = &vectors[source]; + + // Get the lock for this vector. + IOTakeLock(vector->interruptLock); + + // Make sure the vector is not in use. + if (vector->interruptRegistered) { + IOUnlock(vector->interruptLock); + return kIOReturnNoResources; + } + + // Fill in vector with the client's info. + vector->handler = handler; + vector->nub = nub; + vector->source = source; + vector->target = target; + vector->refCon = refCon; + + // Get the vector ready. It starts hard disabled. + vector->interruptDisabledHard = 1; + vector->interruptDisabledSoft = 1; + vector->interruptRegistered = 1; + + IOUnlock(vector->interruptLock); + + if (enabledCPUs != numCPUs) { + assert_wait(this, THREAD_UNINT); + thread_block(0); + } + + return kIOReturnSuccess; +} + +IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/, + int /*source*/, + int *interruptType) +{ + if (interruptType == 0) return kIOReturnBadArgument; + + *interruptType = kIOInterruptTypeLevel; + + return kIOReturnSuccess; +} + +IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/, + int /*source*/) +{ +// ml_set_interrupts_enabled(true); + return kIOReturnSuccess; +} + +IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/, + int /*source*/) +{ +// ml_set_interrupts_enabled(false); + return kIOReturnSuccess; +} + +IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/, + int /*source*/) +{ + ml_cause_interrupt(); + return kIOReturnSuccess; +} + +IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/, + IOService */*nub*/, + int source) +{ + IOInterruptVector *vector; + + vector = &vectors[source]; + + if (!vector->interruptRegistered) return kIOReturnInvalid; + + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + + return kIOReturnSuccess; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/iokit/Kernel/IOCatalogue.cpp b/iokit/Kernel/IOCatalogue.cpp new file mode 100644 index 000000000..3fa1d62ee --- /dev/null +++ b/iokit/Kernel/IOCatalogue.cpp @@ -0,0 +1,877 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#include +#include +#include +#include +#include +extern "C" { +#include +#include +#include +}; + +#include + +#include + + +extern "C" { +int IODTGetLoaderInfo( char *key, void **infoAddr, int *infoSize ); +extern void IODTFreeLoaderInfo( char *key, void *infoAddr, int infoSize ); +extern void OSRuntimeUnloadCPPForSegment( + struct segment_command * segment); +}; + + +/***** + * At startup these function pointers are set to use the libsa in-kernel + * linker for recording and loading kmods. Once the root filesystem + * is available, the kmod_load_function pointer gets switched to point + * at the kmod_load_extension() function built into the kernel, and the + * others are set to zero. Those two functions must *always* be checked + * before being invoked. + */ +extern "C" { +kern_return_t (*kmod_load_function)(char *extension_name) = + &kmod_load_extension; +bool (*record_startup_extensions_function)(void) = 0; +bool (*add_from_mkext_function)(OSData * mkext) = 0; +void (*remove_startup_extension_function)(const char * name) = 0; +}; + + +/***** + * A few parts of IOCatalogue require knowledge of + * whether the in-kernel linker is present. This + * variable is set by libsa's bootstrap code. + */ +int kernelLinkerPresent = 0; + + +#define super OSObject +#define kModuleKey "CFBundleIdentifier" + +OSDefineMetaClassAndStructors(IOCatalogue, OSObject) + +#define CATALOGTEST 0 + +IOCatalogue * gIOCatalogue; +const OSSymbol * gIOClassKey; +const OSSymbol * gIOProbeScoreKey; + +static void UniqueProperties( OSDictionary * dict ) +{ + OSString * data; + + data = OSDynamicCast( OSString, dict->getObject( gIOClassKey )); + if( data) { + const OSSymbol *classSymbol = OSSymbol::withString(data); + + dict->setObject( gIOClassKey, (OSSymbol *) classSymbol); + classSymbol->release(); + } + + data = OSDynamicCast( OSString, dict->getObject( gIOMatchCategoryKey )); + if( data) { + const OSSymbol *classSymbol = OSSymbol::withString(data); + + dict->setObject( gIOMatchCategoryKey, (OSSymbol *) classSymbol); + classSymbol->release(); + } +} + +void IOCatalogue::initialize( void ) +{ + OSArray * array; + OSString * errorString; + bool rc; + + extern const char * gIOKernelConfigTables; + + array = OSDynamicCast(OSArray, OSUnserialize(gIOKernelConfigTables, &errorString)); + if (!array && errorString) { + IOLog("KernelConfigTables syntax error: %s\n", + errorString->getCStringNoCopy()); + errorString->release(); + } + + gIOClassKey = OSSymbol::withCStringNoCopy( kIOClassKey ); + gIOProbeScoreKey = OSSymbol::withCStringNoCopy( kIOProbeScoreKey ); + assert( array && gIOClassKey && gIOProbeScoreKey); + + gIOCatalogue = new IOCatalogue; + assert(gIOCatalogue); + rc = gIOCatalogue->init(array); + assert(rc); + array->release(); +} + +// Initialize the IOCatalog object. +bool IOCatalogue::init(OSArray * initArray) +{ + IORegistryEntry * entry; + OSDictionary * dict; + + if ( !super::init() ) + return false; + + generation = 1; + + array = initArray; + array->retain(); + kernelTables = OSCollectionIterator::withCollection( array ); + + lock = IOLockAlloc(); + + kernelTables->reset(); + while( (dict = (OSDictionary *) kernelTables->getNextObject())) { + UniqueProperties(dict); + if( 0 == dict->getObject( gIOClassKey )) + IOLog("Missing or bad \"%s\" key\n", + gIOClassKey->getCStringNoCopy()); + } + +#if CATALOGTEST + AbsoluteTime deadline; + clock_interval_to_deadline( 1000, kMillisecondScale ); + thread_call_func_delayed( ping, this, deadline ); +#endif + + entry = IORegistryEntry::getRegistryRoot(); + if ( entry ) + entry->setProperty(kIOCatalogueKey, this); + + return true; +} + +// Release all resources used by IOCatalogue and deallocate. +// This will probably never be called. +void IOCatalogue::free( void ) +{ + if ( array ) + array->release(); + + if ( kernelTables ) + kernelTables->release(); + + super::free(); +} + +#if CATALOGTEST + +static int hackLimit; + +enum { kDriversPerIter = 4 }; + +void IOCatalogue::ping( thread_call_param_t arg, thread_call_param_t) +{ + IOCatalogue * self = (IOCatalogue *) arg; + OSOrderedSet * set; + OSDictionary * table; + int newLimit; + + set = OSOrderedSet::withCapacity( 1 ); + + IOTakeLock( &self->lock ); + + for( newLimit = 0; newLimit < kDriversPerIter; newLimit++) { + table = (OSDictionary *) self->array->getObject( + hackLimit + newLimit ); + if( table) { + set->setLastObject( table ); + + OSSymbol * sym = (OSSymbol *) table->getObject( gIOClassKey ); + kprintf("enabling %s\n", sym->getCStringNoCopy()); + + } else { + newLimit--; + break; + } + } + + IOService::catalogNewDrivers( set ); + + hackLimit += newLimit; + self->generation++; + + IOUnlock( &self->lock ); + + if( kDriversPerIter == newLimit) { + AbsoluteTime deadline; + clock_interval_to_deadline( 500, kMillisecondScale ); + thread_call_func_delayed( ping, this, deadline ); + } +} +#endif + +OSOrderedSet * IOCatalogue::findDrivers( IOService * service, + SInt32 * generationCount ) +{ + OSDictionary * nextTable; + OSOrderedSet * set; + OSString * imports; + + set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, + (void *)gIOProbeScoreKey ); + if( !set ) + return( 0 ); + + IOTakeLock( lock ); + kernelTables->reset(); + +#if CATALOGTEST + int hackIndex = 0; +#endif + while( (nextTable = (OSDictionary *) kernelTables->getNextObject())) { +#if CATALOGTEST + if( hackIndex++ > hackLimit) + break; +#endif + imports = OSDynamicCast( OSString, + nextTable->getObject( gIOProviderClassKey )); + if( imports && service->metaCast( imports )) + set->setObject( nextTable ); + } + + *generationCount = getGenerationCount(); + + IOUnlock( lock ); + + return( set ); +} + +// Is personality already in the catalog? +OSOrderedSet * IOCatalogue::findDrivers( OSDictionary * matching, + SInt32 * generationCount) +{ + OSDictionary * dict; + OSOrderedSet * set; + + UniqueProperties(matching); + + set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, + (void *)gIOProbeScoreKey ); + + IOTakeLock( lock ); + kernelTables->reset(); + while ( (dict = (OSDictionary *) kernelTables->getNextObject()) ) { + if ( dict->isEqualTo(matching, matching) ) + set->setObject(dict); + } + *generationCount = getGenerationCount(); + IOUnlock( lock ); + + return set; +} + +// Add a new personality to the set if it has a unique IOResourceMatchKey value. +// XXX -- svail: This should be optimized. +// esb - There doesn't seem like any reason to do this - it causes problems +// esb - when there are more than one loadable driver matching on the same provider class +static void AddNewImports( OSOrderedSet * set, OSDictionary * dict ) +{ + set->setObject(dict); +} + +// Add driver config tables to catalog and start matching process. +bool IOCatalogue::addDrivers(OSArray * drivers, + bool doNubMatching = true ) +{ + OSCollectionIterator * iter; + OSDictionary * dict; + OSOrderedSet * set; + OSArray * persons; + bool ret; + + ret = true; + persons = OSDynamicCast(OSArray, drivers); + if ( !persons ) + return false; + + iter = OSCollectionIterator::withCollection( persons ); + if (!iter ) + return false; + + set = OSOrderedSet::withCapacity( 10, IOServiceOrdering, + (void *)gIOProbeScoreKey ); + if ( !set ) { + iter->release(); + return false; + } + + IOTakeLock( lock ); + while ( (dict = (OSDictionary *) iter->getNextObject()) ) { + UInt count; + + UniqueProperties( dict ); + + // Add driver personality to catalogue. + count = array->getCount(); + while ( count-- ) { + OSDictionary * driver; + + // Be sure not to double up on personalities. + driver = (OSDictionary *)array->getObject(count); + if ( dict->isEqualTo(driver, driver) ) { + array->removeObject(count); + break; + } + } + + ret = array->setObject( dict ); + if ( !ret ) + break; + + AddNewImports( set, dict ); + } + // Start device matching. + if ( doNubMatching && (set->getCount() > 0) ) { + IOService::catalogNewDrivers( set ); + generation++; + } + IOUnlock( lock ); + + if ( doNubMatching ) { + (IOService::getServiceRoot())->waitQuiet(); + kmod_send_generic( kIOCatalogMatchIdle, 0, 0 ); + } + + set->release(); + iter->release(); + + return ret; +} + +// Remove drivers from the catalog which match the +// properties in the matching dictionary. +bool IOCatalogue::removeDrivers( OSDictionary * matching, + bool doNubMatching = true) +{ + OSCollectionIterator * tables; + OSDictionary * dict; + OSOrderedSet * set; + OSArray * arrayCopy; + + if ( !matching ) + return false; + + set = OSOrderedSet::withCapacity(10, + IOServiceOrdering, + (void *)gIOProbeScoreKey); + if ( !set ) + return false; + + arrayCopy = OSArray::withCapacity(100); + if ( !arrayCopy ) { + set->release(); + return false; + } + + tables = OSCollectionIterator::withCollection(arrayCopy); + arrayCopy->release(); + if ( !tables ) { + set->release(); + return false; + } + + UniqueProperties( matching ); + + IOTakeLock( lock ); + kernelTables->reset(); + arrayCopy->merge(array); + array->flushCollection(); + tables->reset(); + while ( (dict = (OSDictionary *)tables->getNextObject()) ) { + if ( dict->isEqualTo(matching, matching) ) { + AddNewImports( set, dict ); + continue; + } + + array->setObject(dict); + } + // Start device matching. + if ( doNubMatching && (set->getCount() > 0) ) { + IOService::catalogNewDrivers(set); + generation++; + } + IOUnlock( lock ); + + set->release(); + tables->release(); + + return true; +} + +// Return the generation count. +SInt32 IOCatalogue::getGenerationCount( void ) const +{ + return( generation ); +} + +bool IOCatalogue::isModuleLoaded( OSString * moduleName ) const +{ + return isModuleLoaded(moduleName->getCStringNoCopy()); +} + +bool IOCatalogue::isModuleLoaded( const char * moduleName ) const +{ + kmod_info_t * k_info; + + if ( !moduleName ) + return false; + + // Is the module already loaded? + k_info = kmod_lookupbyname((char *)moduleName); + if ( !k_info ) { + kern_return_t ret; + + // If the module hasn't been loaded, then load it. + if (kmod_load_function != 0) { + ret = kmod_load_function((char *)moduleName); + if ( ret != kIOReturnSuccess ) { + IOLog("IOCatalogue: %s cannot be loaded.\n", moduleName); + + /* If the extension couldn't be loaded this time, + * make it unavailable so that no more requests are + * made in vain. This also enables other matching + * extensions to have a chance. + */ + if (kernelLinkerPresent && remove_startup_extension_function) { + (*remove_startup_extension_function)(moduleName); + } + return false; + } else if (kernelLinkerPresent) { + // If kern linker is here, the driver is actually loaded, + // so return true. + return true; + } else { + // kern linker isn't here, a request has been queued + // but the module isn't necessarily loaded yet, so stall. + return false; + } + } else { + IOLog("IOCatalogue: %s cannot be loaded " + "(kmod load function not set).\n", + moduleName); + } + + return false; + } + + return true; +} + +// Check to see if module has been loaded already. +bool IOCatalogue::isModuleLoaded( OSDictionary * driver ) const +{ + OSString * moduleName = NULL; + + if ( !driver ) + return false; + + moduleName = OSDynamicCast(OSString, driver->getObject(kModuleKey)); + if ( moduleName ) + return isModuleLoaded(moduleName); + + /* If a personality doesn't hold the "CFBundleIdentifier" key + * it is assumed to be an "in-kernel" driver. + */ + return true; +} + +// This function is called after a module has been loaded. +void IOCatalogue::moduleHasLoaded( OSString * moduleName ) +{ + OSDictionary * dict; + + dict = OSDictionary::withCapacity(2); + dict->setObject(kModuleKey, moduleName); + startMatching(dict); + dict->release(); +} + +void IOCatalogue::moduleHasLoaded( const char * moduleName ) +{ + OSString * name; + + name = OSString::withCString(moduleName); + moduleHasLoaded(name); + name->release(); +} + +IOReturn IOCatalogue::unloadModule( OSString * moduleName ) const +{ + kmod_info_t * k_info; + kern_return_t ret; + const char * name; + + ret = kIOReturnBadArgument; + if ( moduleName ) { + name = moduleName->getCStringNoCopy(); + k_info = kmod_lookupbyname((char *)name); + if ( k_info && (k_info->reference_count < 1) ) { + if ( k_info->stop && + !((ret = k_info->stop(k_info, 0)) == kIOReturnSuccess) ) + return ret; + + ret = kmod_destroy(host_priv_self(), k_info->id); + } + } + + return ret; +} + +static IOReturn _terminateDrivers( OSArray * array, OSDictionary * matching ) +{ + OSCollectionIterator * tables; + OSCollectionIterator * props; + OSDictionary * dict; + OSIterator * iter; + OSArray * arrayCopy; + IOService * service; + IOReturn ret; + + if ( !matching ) + return kIOReturnBadArgument; + + ret = kIOReturnSuccess; + dict = 0; + iter = IORegistryIterator::iterateOver(gIOServicePlane, + kIORegistryIterateRecursively); + if ( !iter ) + return kIOReturnNoMemory; + + UniqueProperties( matching ); + + props = OSCollectionIterator::withCollection(matching); + if ( !props ) { + iter->release(); + return kIOReturnNoMemory; + } + + // terminate instances. + do { + iter->reset(); + while( (service = (IOService *)iter->getNextObject()) ) { + dict = service->getPropertyTable(); + if ( !dict ) + continue; + + if ( !dict->isEqualTo(matching, matching) ) + continue; + + if ( !service->terminate(kIOServiceRequired|kIOServiceSynchronous) ) { + ret = kIOReturnUnsupported; + break; + } + } + } while( !service && !iter->isValid()); + iter->release(); + + // remove configs from catalog. + if ( ret != kIOReturnSuccess ) + return ret; + + arrayCopy = OSArray::withCapacity(100); + if ( !arrayCopy ) + return kIOReturnNoMemory; + + tables = OSCollectionIterator::withCollection(arrayCopy); + arrayCopy->release(); + if ( !tables ) + return kIOReturnNoMemory; + + arrayCopy->merge(array); + array->flushCollection(); + tables->reset(); + while ( (dict = (OSDictionary *)tables->getNextObject()) ) { + if ( dict->isEqualTo(matching, matching) ) + continue; + + array->setObject(dict); + } + + tables->release(); + + return ret; +} + +IOReturn IOCatalogue::terminateDrivers( OSDictionary * matching ) +{ + IOReturn ret; + + ret = kIOReturnSuccess; + IOTakeLock( lock ); + ret = _terminateDrivers(array, matching); + kernelTables->reset(); + IOUnlock( lock ); + + return ret; +} + +IOReturn IOCatalogue::terminateDriversForModule( + OSString * moduleName, + bool unload ) +{ + IOReturn ret; + OSDictionary * dict; + + dict = OSDictionary::withCapacity(1); + if ( !dict ) + return kIOReturnNoMemory; + + dict->setObject(kModuleKey, moduleName); + + IOTakeLock( lock ); + + ret = _terminateDrivers(array, dict); + kernelTables->reset(); + + // Unload the module itself. + if ( unload && ret == kIOReturnSuccess ) { + // Do kmod stop first. + ret = unloadModule(moduleName); + } + + IOUnlock( lock ); + + dict->release(); + + return ret; +} + +IOReturn IOCatalogue::terminateDriversForModule( + const char * moduleName, + bool unload ) +{ + OSString * name; + IOReturn ret; + + name = OSString::withCString(moduleName); + if ( !name ) + return kIOReturnNoMemory; + + ret = terminateDriversForModule(name, unload); + name->release(); + + return ret; +} + +bool IOCatalogue::startMatching( OSDictionary * matching ) +{ + OSDictionary * dict; + OSOrderedSet * set; + + if ( !matching ) + return false; + + set = OSOrderedSet::withCapacity(10, IOServiceOrdering, + (void *)gIOProbeScoreKey); + if ( !set ) + return false; + + IOTakeLock( lock ); + kernelTables->reset(); + + while ( (dict = (OSDictionary *)kernelTables->getNextObject()) ) { + if ( dict->isEqualTo(matching, matching) ) + AddNewImports(set, dict); + } + // Start device matching. + if ( set->getCount() > 0 ) { + IOService::catalogNewDrivers(set); + generation++; + } + + IOUnlock( lock ); + + set->release(); + + return true; +} + +void IOCatalogue::reset(void) +{ + OSArray * tables; + OSDictionary * entry; + unsigned int count; + + IOLog("Resetting IOCatalogue.\n"); + + IOTakeLock( lock ); + tables = OSArray::withArray(array); + array->flushCollection(); + + count = tables->getCount(); + while ( count-- ) { + entry = (OSDictionary *)tables->getObject(count); + if ( entry && !entry->getObject(kModuleKey) ) { + array->setObject(entry); + } + } + + kernelTables->reset(); + IOUnlock( lock ); + + tables->release(); +} + +bool IOCatalogue::serialize(OSSerialize * s) const +{ + bool ret; + + if ( !s ) + return false; + + IOTakeLock( lock ); + ret = array->serialize(s); + IOUnlock( lock ); + + return ret; +} + + +bool IOCatalogue::recordStartupExtensions(void) { + bool result = false; + + if (record_startup_extensions_function) { + result = (*record_startup_extensions_function)(); + } else { + IOLog("Can't record startup extensions; " + "kernel linker is not present.\n"); + result = false; + } + + return result; +} + + +/********************************************************************* +*********************************************************************/ +bool IOCatalogue::addExtensionsFromArchive(OSData * mkext) { + bool result = false; + + if (add_from_mkext_function) { + result = (*add_from_mkext_function)(mkext); + } else { + IOLog("Can't add startup extensions from archive; " + "kernel linker is not present.\n"); + result = false; + } + + return result; +} + + +/********************************************************************* +* This function clears out all references to the in-kernel linker, +* frees the list of startup extensions in extensionDict, and +* deallocates the kernel's __KLD segment to reclaim that memory. +*********************************************************************/ +kern_return_t IOCatalogue::removeKernelLinker(void) { + kern_return_t result = KERN_SUCCESS; + extern struct mach_header _mh_execute_header; + struct segment_command * segment; + char * dt_segment_name; + void * segment_paddress; + int segment_size; + + /* This must be the very first thing done by this function. + */ + IOTakeLock(lock); + + + /* If the kernel linker isn't here, that's automatically + * a success. + */ + if (!kernelLinkerPresent) { + result = KERN_SUCCESS; + goto finish; + } + + IOLog("Jettisoning kernel linker.\n"); + + kernelLinkerPresent = 0; + + /* Set the kmod_load_extension function as the means for loading + * a kernel extension. + */ + kmod_load_function = &kmod_load_extension; + + record_startup_extensions_function = 0; + add_from_mkext_function = 0; + remove_startup_extension_function = 0; + + + /* Invoke destructors for the __KLD and __LINKEDIT segments. + * Do this for all segments before actually freeing their + * memory so that any cross-dependencies (not that there + * should be any) are handled. + */ + segment = getsegbynamefromheader( + &_mh_execute_header, "__KLD"); + if (!segment) { + result = KERN_FAILURE; + goto finish; + } + OSRuntimeUnloadCPPForSegment(segment); + + segment = getsegbynamefromheader( + &_mh_execute_header, "__LINKEDIT"); + if (!segment) { + result = KERN_FAILURE; + goto finish; + } + OSRuntimeUnloadCPPForSegment(segment); + + + /* Free the memory that was set up by bootx. + */ + dt_segment_name = "Kernel-__KLD"; + if (0 == IODTGetLoaderInfo(dt_segment_name, &segment_paddress, &segment_size)) { + IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress, + (int)segment_size); + } + + dt_segment_name = "Kernel-__LINKEDIT"; + if (0 == IODTGetLoaderInfo(dt_segment_name, &segment_paddress, &segment_size)) { + IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress, + (int)segment_size); + } + + +finish: + + /* This must be the very last thing done before returning. + */ + IOUnlock(lock); + + return result; +} diff --git a/iokit/Kernel/IOCommand.cpp b/iokit/Kernel/IOCommand.cpp new file mode 100644 index 000000000..f9dbc1d33 --- /dev/null +++ b/iokit/Kernel/IOCommand.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 11/13/2000 CJS Created IOCommand class and implementation + * + */ + +#include + +#define super OSObject +OSDefineMetaClassAndAbstractStructors(IOCommand, OSObject); + + +//-------------------------------------------------------------------------- +// init - initialize our data structures +//-------------------------------------------------------------------------- + +bool IOCommand::init(void) +{ + if (super::init()) { + queue_init(&fCommandChain); + return true; + } + else + return false; +} diff --git a/iokit/Kernel/IOCommandGate.cpp b/iokit/Kernel/IOCommandGate.cpp new file mode 100644 index 000000000..53a99baa9 --- /dev/null +++ b/iokit/Kernel/IOCommandGate.cpp @@ -0,0 +1,167 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#define super IOEventSource + +OSDefineMetaClassAndStructors(IOCommandGate, IOEventSource) +OSMetaClassDefineReservedUnused(IOCommandGate, 0); +OSMetaClassDefineReservedUnused(IOCommandGate, 1); +OSMetaClassDefineReservedUnused(IOCommandGate, 2); +OSMetaClassDefineReservedUnused(IOCommandGate, 3); +OSMetaClassDefineReservedUnused(IOCommandGate, 4); +OSMetaClassDefineReservedUnused(IOCommandGate, 5); +OSMetaClassDefineReservedUnused(IOCommandGate, 6); +OSMetaClassDefineReservedUnused(IOCommandGate, 7); + +bool IOCommandGate::checkForWork() { return false; } + +bool IOCommandGate::init(OSObject *inOwner, Action inAction = 0) +{ + return super::init(inOwner, (IOEventSource::Action) inAction); +} + +IOCommandGate * +IOCommandGate::commandGate(OSObject *inOwner, Action inAction = 0) +{ + IOCommandGate *me = new IOCommandGate; + + if (me && !me->init(inOwner, inAction)) { + me->free(); + return 0; + } + + return me; +} + +IOReturn IOCommandGate::runCommand(void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0) +{ + IOReturn res; + + if (!enabled) + return kIOReturnNotPermitted; + + if (!action) + return kIOReturnNoResources; + + // closeGate is recursive so don't worry if we already hold the lock. + IOTimeStampConstant(IODBG_CMDQ(IOCMDQ_ACTION), + (unsigned int) action, (unsigned int) owner); + + closeGate(); + res = (*(Action) action)(owner, arg0, arg1, arg2, arg3); + openGate(); + + return res; +} + +IOReturn IOCommandGate::runAction(Action inAction, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0) +{ + IOReturn res; + + if (!enabled) + return kIOReturnNotPermitted; + + if (!inAction) + return kIOReturnBadArgument; + + IOTimeStampConstant(IODBG_CMDQ(IOCMDQ_ACTION), + (unsigned int) inAction, (unsigned int) owner); + + // closeGate is recursive so don't worry if we already hold the lock. + closeGate(); + res = (*inAction)(owner, arg0, arg1, arg2, arg3); + openGate(); + + return res; +} + +IOReturn IOCommandGate::attemptCommand(void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0) +{ + IOReturn res; + + if (!enabled) + return kIOReturnNotPermitted; + + if (!action) + return kIOReturnNoResources; + + // Try to hold the lock if can't get return immediately. + if (!tryCloseGate()) + return kIOReturnCannotLock; + + // closeGate is recursive so don't worry if we already hold the lock. + IOTimeStampConstant(IODBG_CMDQ(IOCMDQ_ACTION), + (unsigned int) action, (unsigned int) owner); + + res = (*(Action) action)(owner, arg0, arg1, arg2, arg3); + openGate(); + + return res; +} + +IOReturn IOCommandGate::attemptAction(Action inAction, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0) +{ + IOReturn res; + + if (!enabled) + return kIOReturnNotPermitted; + + if (!inAction) + return kIOReturnBadArgument; + + // Try to close the gate if can't get return immediately. + if (!tryCloseGate()) + return kIOReturnCannotLock; + + IOTimeStampConstant(IODBG_CMDQ(IOCMDQ_ACTION), + (unsigned int) inAction, (unsigned int) owner); + + res = (*inAction)(owner, arg0, arg1, arg2, arg3); + openGate(); + + return res; +} + +IOReturn IOCommandGate::commandSleep(void *event, UInt32 interruptible) +{ + IOReturn ret; + + if (!workLoop->inGate()) + return kIOReturnNotPermitted; + + return sleepGate(event, interruptible); +} + +void IOCommandGate::commandWakeup(void *event, bool oneThread) +{ + wakeupGate(event, oneThread); +} diff --git a/iokit/Kernel/IOCommandPool.cpp b/iokit/Kernel/IOCommandPool.cpp new file mode 100644 index 000000000..c50a61636 --- /dev/null +++ b/iokit/Kernel/IOCommandPool.cpp @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 2001-01-17 gvdl Re-implement on IOCommandGate::commandSleep + * 10/9/2000 CJS Created IOCommandPool class and implementation + * + */ + +#include + +#define super OSObject +OSDefineMetaClassAndStructors(IOCommandPool, OSObject); +OSMetaClassDefineReservedUnused(IOCommandPool, 0); +OSMetaClassDefineReservedUnused(IOCommandPool, 1); +OSMetaClassDefineReservedUnused(IOCommandPool, 2); +OSMetaClassDefineReservedUnused(IOCommandPool, 3); +OSMetaClassDefineReservedUnused(IOCommandPool, 4); +OSMetaClassDefineReservedUnused(IOCommandPool, 5); +OSMetaClassDefineReservedUnused(IOCommandPool, 6); +OSMetaClassDefineReservedUnused(IOCommandPool, 7); + +//-------------------------------------------------------------------------- +// withWorkLoop - primary initializer and factory method +//-------------------------------------------------------------------------- + +IOCommandPool *IOCommandPool:: +withWorkLoop(IOWorkLoop *inWorkLoop) +{ + IOCommandPool * me = new IOCommandPool; + + if (me && !me->initWithWorkLoop(inWorkLoop)) { + me->release(); + return 0; + } + + return me; +} + + +bool IOCommandPool:: +initWithWorkLoop(IOWorkLoop *inWorkLoop) +{ + assert(inWorkLoop); + + if (!super::init()) + return false; + + queue_init(&fQueueHead); + + fSerializer = IOCommandGate::commandGate(this); + assert(fSerializer); + if (!fSerializer) + return false; + + if (kIOReturnSuccess != inWorkLoop->addEventSource(fSerializer)) + return false; + + return true; +} + +//-------------------------------------------------------------------------- +// commandPool & init - obsolete initializer and factory method +//-------------------------------------------------------------------------- + +IOCommandPool *IOCommandPool:: +commandPool(IOService * inOwner, IOWorkLoop *inWorkLoop, UInt32 inSize) +{ + IOCommandPool * me = new IOCommandPool; + + if (me && !me->init(inOwner, inWorkLoop, inSize)) { + me->release(); + return 0; + } + + return me; +} + +bool IOCommandPool:: +init(IOService */* inOwner */, IOWorkLoop *inWorkLoop, UInt32 /* inSize */) +{ + return initWithWorkLoop(inWorkLoop); +} + + +//-------------------------------------------------------------------------- +// free - free all allocated resources +//-------------------------------------------------------------------------- + +void +IOCommandPool::free(void) +{ + if (fSerializer) { + // remove our event source from owner's workloop + IOWorkLoop *wl = fSerializer->getWorkLoop(); + if (wl) + wl->removeEventSource(fSerializer); + + fSerializer->release(); + fSerializer = 0; + } + + // Tell our superclass to cleanup too + super::free(); +} + + +//-------------------------------------------------------------------------- +// getCommand - Gets a command from the pool. Pass true in +// blockForCommand if you want your thread to sleep +// waiting for resources +//-------------------------------------------------------------------------- + +IOCommand * +IOCommandPool::getCommand(bool blockForCommand) +{ + IOReturn result = kIOReturnSuccess; + IOCommand *command = 0; + + result = fSerializer->runAction((IOCommandGate::Action) + &IOCommandPool::gatedGetCommand, + (void *) &command, (void *) blockForCommand); + if (kIOReturnSuccess == result) + return command; + else + return 0; +} + + +//-------------------------------------------------------------------------- +// gatedGetCommand - Static callthrough function +// (on safe side of command gate) +//-------------------------------------------------------------------------- + +IOReturn IOCommandPool:: +gatedGetCommand(IOCommand **command, bool blockForCommand) +{ + while (queue_empty(&fQueueHead)) { + if (!blockForCommand) + return kIOReturnNoResources; + + fSleepers++; + fSerializer->commandSleep(&fSleepers, THREAD_UNINT); + } + + queue_remove_first(&fQueueHead, + *command, IOCommand *, fCommandChain); + return kIOReturnSuccess; +} + + +//-------------------------------------------------------------------------- +// returnCommand - Returns command to the pool. +//-------------------------------------------------------------------------- + +void IOCommandPool:: +returnCommand(IOCommand *command) +{ + (void) fSerializer->runAction((IOCommandGate::Action) + &IOCommandPool::gatedReturnCommand, (void *) command); +} + + +//-------------------------------------------------------------------------- +// gatedReturnCommand - Callthrough function +// (on safe side of command gate) +//-------------------------------------------------------------------------- + +IOReturn IOCommandPool:: +gatedReturnCommand(IOCommand *command) +{ + queue_enter(&fQueueHead, command, IOCommand *, fCommandChain); + if (fSleepers) { + fSerializer->commandWakeup(&fSleepers, /* oneThread */ true); + fSleepers--; + } + return kIOReturnSuccess; +} diff --git a/iokit/Kernel/IOCommandQueue.cpp b/iokit/Kernel/IOCommandQueue.cpp new file mode 100644 index 000000000..e5439a542 --- /dev/null +++ b/iokit/Kernel/IOCommandQueue.cpp @@ -0,0 +1,268 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. +]*/ +#include +#include +#include + +#include + +#define NUM_FIELDS_IN_COMMAND 4 +typedef struct commandEntryTag { + void *f[NUM_FIELDS_IN_COMMAND]; +} commandEntryT; + +#define super IOEventSource + +OSDefineMetaClassAndStructors(IOCommandQueue, IOEventSource) + +/*[ +Instance Methods + +initWithNext:owner:action:size: + - initWithNext: (IOEventSource *) inNext + owner: (id) inOwner + action: (SEL) inAction + size: (int) inSize; + +Primary initialiser for the IOCommandQueue class. Returns an +IOCommandQueue object that is initialised with the next object in +the chain and the owner and action. On return the signalWorkAvailableIMP +has been cached for this function. + +If the object fails to initialise for some reason then [self free] will +be called and nil will be returned. + +See also: initWithNext:owner:action:(IOEventSource) +]*/ +bool IOCommandQueue::init(OSObject *inOwner, + IOCommandQueueAction inAction, + int inSize) +{ + if ( !super::init(inOwner, (IOEventSourceAction) inAction) ) + return false; + + if (KERN_SUCCESS + != semaphore_create(kernel_task, &producerSema, SYNC_POLICY_FIFO, inSize)) + return false; + + size = inSize + 1; /* Allocate one more entry than needed */ + + queue = (void *)kalloc(size * sizeof(commandEntryT)); + if (!queue) + return false; + + producerLock = IOLockAlloc(); + if (!producerLock) + return false; + + producerIndex = consumerIndex = 0; + + return true; +} + +IOCommandQueue * +IOCommandQueue::commandQueue(OSObject *inOwner, + IOCommandQueueAction inAction, + int inSize) +{ + IOCommandQueue *me = new IOCommandQueue; + + if (me && !me->init(inOwner, inAction, inSize)) { + me->free(); + return 0; + } + + return me; +} + +/*[ +free + - free + +Mandatory free of the object independent of the current retain count. +Returns nil. +]*/ +void IOCommandQueue::free() +{ + if (queue) + kfree((vm_offset_t)queue, size * sizeof(commandEntryT)); + if (producerSema) + semaphore_destroy(kernel_task, producerSema); + if (producerLock) + IOLockFree(producerLock); + + super::free(); +} + +#if NUM_FIELDS_IN_COMMAND != 4 +#error IOCommandQueue::checkForWork needs to be updated for new command size +#endif + +bool IOCommandQueue::checkForWork() +{ + void *field0, *field1, *field2, *field3; + + if (!enabled || consumerIndex == producerIndex) + return false; + + { + commandEntryT *q = (commandEntryT *) queue; + int localIndex = consumerIndex; + + field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1]; + field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3]; + semaphore_signal(producerSema); + } + + if (++consumerIndex >= size) + consumerIndex = 0; + + IOTimeStampConstant(IODBG_CMDQ(IOCMDQ_ACTION), + (unsigned int) action, (unsigned int) owner); + + (*(IOCommandQueueAction) action)(owner, field0, field1, field2, field3); + + return (consumerIndex != producerIndex); +} + +/*[ +enqueueSleep:command: + - (kern_return_t) enqueueSleepRaw: (BOOL) gotoSleep + field0: (void *) field0 field1: (void *) field1 + field2: (void *) field2 field3: (void *) field3; + +Key method that enqueues the four input fields onto the command queue +and calls signalWorkAvailable to indicate that work is available to the +consumer. This routine is safe against multiple threaded producers. + +A family of convenience functions have been provided to assist with the +enqueueing of an method selector and an integer tag. This relies on the +IODevice rawCommandOccurred... command to forward on the requests. + +See also: signalWorkAvailable, checkForWork +]*/ +#if NUM_FIELDS_IN_COMMAND != 4 +#error IOCommandQueue::enqueueCommand needs to be updated +#endif + +kern_return_t +IOCommandQueue::enqueueCommand(bool gotoSleep, + void *field0, void *field1, + void *field2, void *field3) +{ + kern_return_t rtn = KERN_SUCCESS; + int retry; + + /* Make sure there is room in the queue before doing anything else */ + + if (gotoSleep) { + retry = 0; + do + rtn = semaphore_wait(producerSema); + while( (KERN_SUCCESS != rtn) + && (KERN_OPERATION_TIMED_OUT != rtn) + && (KERN_SEMAPHORE_DESTROYED != rtn) + && (KERN_TERMINATED != rtn) + && ((retry++) < 4)); + } else + rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO); + + if (KERN_SUCCESS != rtn) + return rtn; + + /* Block other producers */ + IOTakeLock(producerLock); + + /* + * Make sure that we update the current producer entry before we + * increment the producer pointer. This avoids a nasty race as the + * as the test for work is producerIndex != consumerIndex and a signal. + */ + { + commandEntryT *q = (commandEntryT *) queue; + int localIndex = producerIndex; + + q[localIndex].f[0] = field0; q[localIndex].f[1] = field1; + q[localIndex].f[2] = field2; q[localIndex].f[3] = field3; + } + if (++producerIndex >= size) + producerIndex = 0; + + /* Clear to allow other producers to go now */ + IOUnlock(producerLock); + + /* + * Right we have created some new work, we had better make sure that + * we notify the work loop that it has to test producerIndex. + */ + signalWorkAvailable(); + return rtn; +} + +int IOCommandQueue::performAndFlush(OSObject *target, + IOCommandQueueAction inAction) +{ + int numEntries; + kern_return_t rtn; + + // Set the defaults if necessary + if (!target) + target = owner; + if (!inAction) + inAction = (IOCommandQueueAction) action; + + // Lock out the producers first + do { + rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO); + } while (rtn == KERN_SUCCESS); + + // now step over all remaining entries in the command queue + for (numEntries = 0; consumerIndex != producerIndex; ) { + void *field0, *field1, *field2, *field3; + + { + commandEntryT *q = (commandEntryT *) queue; + int localIndex = consumerIndex; + + field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1]; + field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3]; + } + + if (++consumerIndex >= size) + consumerIndex = 0; + + (*inAction)(target, field0, field1, field2, field3); + } + + // finally refill the producer semaphore to size - 1 + for (int i = 1; i < size; i++) + semaphore_signal(producerSema); + + return numEntries; +} diff --git a/iokit/Kernel/IOConditionLock.cpp b/iokit/Kernel/IOConditionLock.cpp new file mode 100644 index 000000000..d4940b77e --- /dev/null +++ b/iokit/Kernel/IOConditionLock.cpp @@ -0,0 +1,199 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1997 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1994-1996 NeXT Software, Inc. All rights reserved. + * + * AppleIOPSSafeCondLock.m. Lock object with exported condition variable, + * kernel version. + * + * HISTORY + * 1997-11- + * 01-Aug-91 Doug Mitchell at NeXT + * Created. + */ + +#include + +#define super OSObject +OSDefineMetaClassAndStructors(IOConditionLock, OSObject) + +bool IOConditionLock::initWithCondition(int inCondition, bool inIntr = true) +{ + if (!super::init()) + return false; + + cond_interlock = IOLockAlloc(); + sleep_interlock = IOLockAlloc(); + + condition = inCondition; + want_lock = false; + waiting = false; + interruptible = (inIntr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT; + + return cond_interlock && sleep_interlock; +} + +IOConditionLock *IOConditionLock::withCondition(int condition, bool intr = true) +{ + IOConditionLock *me = new IOConditionLock; + + if (me && !me->initWithCondition(condition, intr)) { + me->free(); + return 0; + } + + return me; +} +void IOConditionLock::free() +{ + if (cond_interlock) + IOLockFree(cond_interlock); + if (sleep_interlock) + IOLockFree(sleep_interlock); + super::free(); +} + +bool IOConditionLock::getInterruptible() const +{ + return interruptible; +} + +int IOConditionLock:: getCondition() const +{ + return condition; +} + +int IOConditionLock:: setCondition(int inCondition) +{ + int old = condition; + + condition = inCondition; + thread_wakeup_one((void *) &condition); + + return old; +} + +void IOConditionLock::unlock() +{ + IOTakeLock(sleep_interlock); + + thread_wakeup_one((void *) &condition); + + want_lock = false; + if (waiting) { + waiting = false; + thread_wakeup(this); // Wakeup everybody + } + + IOUnlock(sleep_interlock); +} + +void IOConditionLock::unlockWith(int inCondition) +{ + IOTakeLock(sleep_interlock); + IOTakeLock(cond_interlock); + + condition = inCondition; + + IOUnlock(cond_interlock); + IOUnlock(sleep_interlock); + + unlock(); +} + +bool IOConditionLock::tryLock() +{ + bool result; + + IOTakeLock(sleep_interlock); + + result = !want_lock; + if (result) + want_lock = true; + + IOUnlock(sleep_interlock); + + return result; +} + +int IOConditionLock::lock() +{ + int thread_res = THREAD_AWAKENED; + + IOTakeLock(sleep_interlock); + + /* Try to acquire the want_lock bit. */ + while (want_lock && thread_res == THREAD_AWAKENED) + { + waiting = true; + + assert_wait((void *) this, interruptible); /* assert event */ + IOUnlock(sleep_interlock); /* release the lock */ + thread_res = thread_block((void (*)(void)) 0); /* block ourselves */ + + IOTakeLock(sleep_interlock); + } + if (thread_res == THREAD_AWAKENED) + want_lock = true; + + IOUnlock(sleep_interlock); + + return thread_res; +} + +int IOConditionLock::lockWhen(int inCondition) +{ + int thread_res; + + do + { + /* First get the actual lock */ + thread_res = lock(); + if (thread_res != THREAD_AWAKENED) + break; // Failed to acquire lock + + if (inCondition == condition) + break; // Hold lock and condition is expected value + + /* + * Need to hold a IOTakeLock when we call thread_sleep(). + * Both _cond_interlock and want_lock must be held to + * change _condition. + */ + IOTakeLock(cond_interlock); + unlock(); // Release lock and sleep + + /* + * this is the critical section on a multi in which + * another thread could hold _sleep_interlock, but they + * can't change _condition. Holding _cond_interlock here + * (until after assert_wait() is called from + * thread_sleep()) ensures that we'll be notified + * of changes in _condition. + */ + assert_wait((void *) &condition, interruptible); /* assert event */ + IOUnlock(cond_interlock); /* release the lock */ + thread_res = thread_block((void (*)(void)) 0); /* block ourselves */ + } while (thread_res == THREAD_AWAKENED); + + return thread_res; +} diff --git a/iokit/Kernel/IODataQueue.cpp b/iokit/Kernel/IODataQueue.cpp new file mode 100644 index 000000000..b98060e75 --- /dev/null +++ b/iokit/Kernel/IODataQueue.cpp @@ -0,0 +1,213 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +#ifdef enqueue +#undef enqueue +#endif + +#ifdef dequeue +#undef dequeue +#endif + +#define super OSObject + +OSDefineMetaClassAndStructors(IODataQueue, OSObject) + +IODataQueue *IODataQueue::withCapacity(UInt32 size) +{ + IODataQueue *dataQueue = new IODataQueue; + + if (dataQueue) { + if (!dataQueue->initWithCapacity(size)) { + dataQueue->release(); + dataQueue = 0; + } + } + + return dataQueue; +} + +IODataQueue *IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) +{ + IODataQueue *dataQueue = new IODataQueue; + + if (dataQueue) { + if (!dataQueue->initWithEntries(numEntries, entrySize)) { + dataQueue->release(); + dataQueue = 0; + } + } + + return dataQueue; +} + +Boolean IODataQueue::initWithCapacity(UInt32 size) +{ + if (!super::init()) { + return false; + } + + dataQueue = (IODataQueueMemory *)IOMallocAligned(round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE), PAGE_SIZE); + if (dataQueue == 0) { + return false; + } + + dataQueue->queueSize = size; + dataQueue->head = 0; + dataQueue->tail = 0; + + return true; +} + +Boolean IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize) +{ + return (initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize))); +} + +void IODataQueue::free() +{ + if (dataQueue) { + IOFreeAligned(dataQueue, round_page(dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE)); + } + + super::free(); + + return; +} + +Boolean IODataQueue::enqueue(void * data, UInt32 dataSize) +{ + const UInt32 head = dataQueue->head; // volatile + const UInt32 tail = dataQueue->tail; + const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; + IODataQueueEntry * entry; + + if ( tail >= head ) + { + if ( (tail + entrySize) < dataQueue->queueSize ) + { + entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); + + entry->size = dataSize; + memcpy(&entry->data, data, dataSize); + dataQueue->tail += entrySize; + } + else if ( head > entrySize ) + { + // Wrap around to the beginning, but do not allow the tail to catch + // up to the head. + + dataQueue->queue->size = dataSize; + ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; + memcpy(&dataQueue->queue->data, data, dataSize); + dataQueue->tail = entrySize; + } + else + { + return false; // queue is full + } + } + else + { + // Do not allow the tail to catch up to the head when the queue is full. + // That's why the comparison uses a '>' rather than '>='. + + if ( (head - tail) > entrySize ) + { + entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); + + entry->size = dataSize; + memcpy(&entry->data, data, dataSize); + dataQueue->tail += entrySize; + } + else + { + return false; // queue is full + } + } + + // Send notification (via mach message) that data is available. + + if ( ( head == tail ) /* queue was empty prior to enqueue() */ + || ( dataQueue->head == tail ) ) /* queue was emptied during enqueue() */ + { + sendDataAvailableNotification(); + } + + return true; +} + +void IODataQueue::setNotificationPort(mach_port_t port) +{ + static struct _notifyMsg init_msg = { { + MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0), + sizeof (struct _notifyMsg), + MACH_PORT_NULL, + MACH_PORT_NULL, + 0, + 0 + } }; + + if (notifyMsg == 0) { + notifyMsg = IOMalloc(sizeof(struct _notifyMsg)); + } + + *((struct _notifyMsg *)notifyMsg) = init_msg; + + ((struct _notifyMsg *)notifyMsg)->h.msgh_remote_port = port; +} + +void IODataQueue::sendDataAvailableNotification() +{ + kern_return_t kr; + mach_msg_header_t * msgh; + + msgh = (mach_msg_header_t *)notifyMsg; + if (msgh) { + kr = mach_msg_send_from_kernel(msgh, msgh->msgh_size); + switch(kr) { + case MACH_SEND_TIMED_OUT: // Notification already sent + case MACH_MSG_SUCCESS: + break; + default: + IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/"IODataQueue", kr); + break; + } + } +} + +IOMemoryDescriptor *IODataQueue::getMemoryDescriptor() +{ + IOMemoryDescriptor *descriptor = 0; + + if (dataQueue != 0) { + descriptor = IOMemoryDescriptor::withAddress(dataQueue, dataQueue->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn); + } + + return descriptor; +} + diff --git a/iokit/Kernel/IODeviceMemory.cpp b/iokit/Kernel/IODeviceMemory.cpp new file mode 100644 index 000000000..d33ccf99a --- /dev/null +++ b/iokit/Kernel/IODeviceMemory.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas created. + * 30 Sep 99 sdouglas, merged IODeviceMemory into IOMemoryDescriptor. + */ + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IODeviceMemory * IODeviceMemory::withRange( + IOPhysicalAddress start, + IOPhysicalLength length ) +{ + return( (IODeviceMemory *) IOMemoryDescriptor::withPhysicalAddress( + start, length, kIODirectionNone )); +} + + +IODeviceMemory * IODeviceMemory::withSubRange( + IODeviceMemory * of, + IOPhysicalAddress offset, + IOPhysicalLength length ) +{ + return( (IODeviceMemory *) IOMemoryDescriptor::withSubRange( + of, offset, length, kIODirectionNone )); +} + + +OSArray * IODeviceMemory::arrayFromList( + InitElement list[], + IOItemCount count ) +{ + OSArray * array; + IODeviceMemory * range; + IOItemCount i; + + array = OSArray::withCapacity( count ); + if( 0 == array ) + return( 0); + + for( i = 0; i < count; i++) { + range = IODeviceMemory::withRange( list[i].start, list[i].length ); + if( range) { + range->setTag( list[i].tag ); + array->setObject( range); + range->release(); + } else { + array->release(); + array = 0; + break; + } + } + + return( array ); +} + diff --git a/iokit/Kernel/IODeviceTreeSupport.cpp b/iokit/Kernel/IODeviceTreeSupport.cpp new file mode 100644 index 000000000..059598278 --- /dev/null +++ b/iokit/Kernel/IODeviceTreeSupport.cpp @@ -0,0 +1,1056 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 23 Nov 98 sdouglas, created from IODeviceTreeBus.m, & MacOS exp mgr. + * 05 Apr 99 sdouglas, add interrupt mapping. + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +extern "C" { +#include +void DTInit( void * data ); + +int IODTGetLoaderInfo( char *key, void **infoAddr, int *infosize ); +void IODTFreeLoaderInfo( char *key, void *infoAddr, int infoSize ); +} + +#include + + +const IORegistryPlane * gIODTPlane; + +static OSArray * gIODTPHandles; +static OSArray * gIODTPHandleMap; + +const OSSymbol * gIODTNameKey; +const OSSymbol * gIODTUnitKey; +const OSSymbol * gIODTCompatibleKey; +const OSSymbol * gIODTTypeKey; +const OSSymbol * gIODTModelKey; + +const OSSymbol * gIODTSizeCellKey; +const OSSymbol * gIODTAddressCellKey; +const OSSymbol * gIODTRangeKey; + +const OSSymbol * gIODTPersistKey; + +const OSSymbol * gIODTDefaultInterruptController; +const OSSymbol * gIODTAAPLInterruptsKey; +const OSSymbol * gIODTPHandleKey; +const OSSymbol * gIODTInterruptCellKey; +const OSSymbol * gIODTInterruptParentKey; +const OSSymbol * gIODTNWInterruptMappingKey; + + +static IORegistryEntry * MakeReferenceTable( DTEntry dtEntry, bool copy ); +static void AddPHandle( IORegistryEntry * regEntry ); +static void FreePhysicalMemory( vm_offset_t * range ); + +IORegistryEntry * +IODeviceTreeAlloc( void * dtTop ) +{ + IORegistryEntry * parent; + IORegistryEntry * child; + IORegistryIterator * regIter; + DTEntryIterator iter; + DTEntry dtChild; + DTEntry mapEntry; + OSArray * stack; + OSData * prop; + OSObject * obj; + vm_offset_t * dtMap; + int propSize; + bool intMap; + bool freeDT; + + IOLog("IODeviceTreeSupport "); + + gIODTPlane = IORegistryEntry::makePlane( kIODeviceTreePlane ); + + gIODTNameKey = OSSymbol::withCStringNoCopy( "name" ); + gIODTUnitKey = OSSymbol::withCStringNoCopy( "AAPL,unit-string" ); + gIODTCompatibleKey = OSSymbol::withCStringNoCopy( "compatible" ); + gIODTTypeKey = OSSymbol::withCStringNoCopy( "device_type" ); + gIODTModelKey = OSSymbol::withCStringNoCopy( "model" ); + gIODTSizeCellKey = OSSymbol::withCStringNoCopy( "#size-cells" ); + gIODTAddressCellKey = OSSymbol::withCStringNoCopy( "#address-cells" ); + gIODTRangeKey = OSSymbol::withCStringNoCopy( "ranges" ); + + gIODTPersistKey = OSSymbol::withCStringNoCopy( "IODTPersist" ); + + assert( gIODTPlane && gIODTCompatibleKey + && gIODTTypeKey && gIODTModelKey + && gIODTSizeCellKey && gIODTAddressCellKey && gIODTRangeKey + && gIODTPersistKey ); + + gIODTDefaultInterruptController + = OSSymbol::withCStringNoCopy("IOPrimaryInterruptController"); + gIODTNWInterruptMappingKey + = OSSymbol::withCStringNoCopy("IONWInterrupts"); + + gIODTAAPLInterruptsKey + = OSSymbol::withCStringNoCopy("AAPL,interrupts"); + gIODTPHandleKey + = OSSymbol::withCStringNoCopy("AAPL,phandle"); + + gIODTInterruptParentKey + = OSSymbol::withCStringNoCopy("interrupt-parent"); + + gIODTPHandles = OSArray::withCapacity( 1 ); + gIODTPHandleMap = OSArray::withCapacity( 1 ); + + gIODTInterruptCellKey + = OSSymbol::withCStringNoCopy("#interrupt-cells"); + + assert( gIODTDefaultInterruptController && gIODTNWInterruptMappingKey + && gIODTAAPLInterruptsKey + && gIODTPHandleKey && gIODTInterruptParentKey + && gIODTPHandles && gIODTPHandleMap + && gIODTInterruptCellKey + ); + + freeDT = (kSuccess == DTLookupEntry( 0, "/chosen/memory-map", &mapEntry )) + && (kSuccess == DTGetProperty( mapEntry, + "DeviceTree", (void **) &dtMap, &propSize )) + && ((2 * sizeof( vm_offset_t)) == propSize); + + parent = MakeReferenceTable( (DTEntry)dtTop, freeDT ); + + stack = OSArray::withObjects( & (const OSObject *) parent, 1, 10 ); + DTCreateEntryIterator( (DTEntry)dtTop, &iter ); + + do { + parent = (IORegistryEntry *)stack->getObject( stack->getCount() - 1); + //parent->release(); + stack->removeObject( stack->getCount() - 1); + + while( kSuccess == DTIterateEntries( iter, &dtChild) ) { + + child = MakeReferenceTable( dtChild, freeDT ); + child->attachToParent( parent, gIODTPlane); + + AddPHandle( child ); + + if( kSuccess == DTEnterEntry( iter, dtChild)) { + stack->setObject( parent); + parent = child; + } + // only registry holds retain + child->release(); + } + + } while( stack->getCount() + && (kSuccess == DTExitEntry( iter, &dtChild))); + + stack->release(); + DTDisposeEntryIterator( iter); + + // parent is now root of the created tree + + // make root name first compatible entry (purely cosmetic) + if( (prop = (OSData *) parent->getProperty( gIODTCompatibleKey))) { + parent->setName( parent->getName(), gIODTPlane ); + parent->setName( (const char *) prop->getBytesNoCopy() ); + } + + // attach tree to meta root + parent->attachToParent( IORegistryEntry::getRegistryRoot(), gIODTPlane); + parent->release(); + + if( freeDT ) { + // free original device tree + DTInit(0); + IODTFreeLoaderInfo( "DeviceTree", + (void *)dtMap[0], round_page(dtMap[1]) ); + } + + // adjust tree + intMap = false; + regIter = IORegistryIterator::iterateOver( gIODTPlane, + kIORegistryIterateRecursively ); + assert( regIter ); + if( regIter) { + while( (child = regIter->getNextObject())) { + IODTMapInterrupts( child ); + if( !intMap && child->getProperty( gIODTInterruptParentKey)) + intMap = true; + + // Look for a "driver,AAPL,MacOSX,PowerPC" property. + if( (obj = child->getProperty( "driver,AAPL,MacOSX,PowerPC"))) { + gIOCatalogue->addExtensionsFromArchive((OSData *)obj); + + child->removeProperty( "driver,AAPL,MacOSX,PowerPC"); + } + + // some gross pruning + child->removeProperty( "lanLib,AAPL,MacOS,PowerPC"); + + if( (obj = child->getProperty( "driver,AAPL,MacOS,PowerPC"))) { + + if( (0 == (prop = (OSData *)child->getProperty( gIODTTypeKey ))) + || (strcmp( "display", (char *) prop->getBytesNoCopy())) ) { + + child->removeProperty( "driver,AAPL,MacOS,PowerPC"); + } + } + } + regIter->release(); + } + + if( intMap) + // set a key in the root to indicate we found NW interrupt mapping + parent->setProperty( gIODTNWInterruptMappingKey, + (OSObject *) gIODTNWInterruptMappingKey ); + + IOLog("done\n"); + + return( parent); +} + +int IODTGetLoaderInfo( char *key, void **infoAddr, int *infoSize ) +{ + IORegistryEntry *chosen; + OSData *propObj; + unsigned int *propPtr; + unsigned int propSize; + + chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); + if ( chosen == 0 ) return -1; + + propObj = OSDynamicCast( OSData, chosen->getProperty(key) ); + if ( propObj == 0 ) return -1; + + propSize = propObj->getLength(); + if ( propSize != (2 * sizeof(UInt32)) ) return -1; + + propPtr = (unsigned int *)propObj->getBytesNoCopy(); + if ( propPtr == 0 ) return -1; + + *infoAddr = (void *)propPtr[0] ; + *infoSize = (int) propPtr[1]; + + return 0; +} + +void IODTFreeLoaderInfo( char *key, void *infoAddr, int infoSize ) +{ + vm_offset_t range[2]; + IORegistryEntry *chosen; + + range[0] = (vm_offset_t)infoAddr; + range[1] = (vm_offset_t)infoSize; + FreePhysicalMemory( range ); + + if ( key != 0 ) { + chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); + if ( chosen != 0 ) { + chosen->removeProperty(key); + } + } +} + +static void FreePhysicalMemory( vm_offset_t * range ) +{ + vm_offset_t virt; + + virt = ml_static_ptovirt( range[0] ); + if( virt) { + ml_static_mfree( virt, range[1] ); + } +} + +static IORegistryEntry * +MakeReferenceTable( DTEntry dtEntry, bool copy ) +{ + IORegistryEntry * regEntry; + OSDictionary * propTable; + const OSSymbol * nameKey; + OSData * data; + const OSSymbol * sym; + DTPropertyIterator dtIter; + void * prop; + int propSize; + char * name; + char location[ 32 ]; + bool noLocation = true; + + regEntry = new IOService; + + if( regEntry && (false == regEntry->init())) { + regEntry->release(); + regEntry = 0; + } + + if( regEntry && + (kSuccess == DTCreatePropertyIterator( dtEntry, &dtIter))) { + + propTable = regEntry->getPropertyTable(); + + while( kSuccess == DTIterateProperties( dtIter, &name)) { + + if( kSuccess != DTGetProperty( dtEntry, name, &prop, &propSize )) + continue; + + if( copy) { + nameKey = OSSymbol::withCString(name); + data = OSData::withBytes(prop, propSize); + } else { + nameKey = OSSymbol::withCStringNoCopy(name); + data = OSData::withBytesNoCopy(prop, propSize); + } + assert( nameKey && data ); + + propTable->setObject( nameKey, data); + data->release(); + nameKey->release(); + + if( nameKey == gIODTNameKey ) { + if( copy) + sym = OSSymbol::withCString( (const char *) prop); + else + sym = OSSymbol::withCStringNoCopy( (const char *) prop); + regEntry->setName( sym ); + sym->release(); + + } else if( nameKey == gIODTUnitKey ) { + // all OF strings are null terminated... except this one + if( propSize >= (int) sizeof( location)) + propSize = sizeof( location) - 1; + strncpy( location, (const char *) prop, propSize ); + location[ propSize ] = 0; + regEntry->setLocation( location ); + propTable->removeObject( gIODTUnitKey ); + noLocation = false; + + } else if( noLocation && (0 == strcmp( name, "reg"))) { + // default location - override later + sprintf( location, "%lX", *((UInt32 *) prop) ); + regEntry->setLocation( location ); + } + } + DTDisposePropertyIterator( dtIter); + } + + return( regEntry); +} + +static void AddPHandle( IORegistryEntry * regEntry ) +{ + OSData * data; + + if( regEntry->getProperty( gIODTInterruptCellKey) + && (data = OSDynamicCast( OSData, regEntry->getProperty( gIODTPHandleKey )))) { + // a possible interrupt-parent + gIODTPHandles->setObject( data ); + gIODTPHandleMap->setObject( regEntry ); + } +} + +static IORegistryEntry * FindPHandle( UInt32 phandle ) +{ + OSData * data; + IORegistryEntry * regEntry = 0; + int i; + + for( i = 0; + (data = (OSData *)gIODTPHandles->getObject( i )); + i++ ) { + if( phandle == *((UInt32 *)data->getBytesNoCopy())) { + regEntry = (IORegistryEntry *) + gIODTPHandleMap->getObject( i ); + break; + } + } + + return( regEntry ); +} + +static bool GetUInt32( IORegistryEntry * regEntry, const OSSymbol * name, + UInt32 * value ) +{ + OSData * data; + + if( (data = OSDynamicCast( OSData, regEntry->getProperty( name ))) + && (4 == data->getLength())) { + *value = *((UInt32 *) data->getBytesNoCopy()); + return( true ); + } else + return( false ); +} + +IORegistryEntry * IODTFindInterruptParent( IORegistryEntry * regEntry ) +{ + IORegistryEntry * parent; + UInt32 phandle; + + if( GetUInt32( regEntry, gIODTInterruptParentKey, &phandle)) + parent = FindPHandle( phandle ); + + else if( 0 == regEntry->getProperty( "interrupt-controller")) + parent = regEntry->getParentEntry( gIODTPlane); + else + parent = 0; + + return( parent ); +} + +const OSSymbol * IODTInterruptControllerName( IORegistryEntry * regEntry ) +{ + const OSSymbol * sym; + UInt32 phandle; + bool ok; + char buf[48]; + + ok = GetUInt32( regEntry, gIODTPHandleKey, &phandle); + assert( ok ); + + if( ok) { + sprintf( buf, "IOInterruptController%08lX", phandle); + sym = OSSymbol::withCString( buf ); + } else + sym = 0; + + return( sym ); +} + +#define unexpected(a) { kprintf("unexpected %s:%d\n", __FILE__, __LINE__); a; } + +static void IODTGetICellCounts( IORegistryEntry * regEntry, + UInt32 * iCellCount, UInt32 * aCellCount) +{ + if( !GetUInt32( regEntry, gIODTInterruptCellKey, iCellCount)) + unexpected( *iCellCount = 1 ); + if( !GetUInt32( regEntry, gIODTAddressCellKey, aCellCount)) + *aCellCount = 0; +} + +UInt32 IODTMapOneInterrupt( IORegistryEntry * regEntry, UInt32 * intSpec, + OSData ** spec, const OSSymbol ** controller ) +{ + IORegistryEntry * parent = 0; + OSData * data; + UInt32 * addrCmp; + UInt32 * maskCmp; + UInt32 * map; + UInt32 * endMap; + UInt32 acells, icells, pacells, picells, cell; + UInt32 i, skip = 0; + bool cmp, ok = false; + + do { + if( parent && (data = OSDynamicCast( OSData, + regEntry->getProperty( "interrupt-controller")))) { + // found a controller - don't want to follow cascaded controllers + parent = 0; + *spec = OSData::withBytesNoCopy( (void *) intSpec, + icells * sizeof( UInt32)); + *controller = IODTInterruptControllerName( regEntry ); + ok = (*spec && *controller); + + } else if( parent && (data = OSDynamicCast( OSData, + regEntry->getProperty( "interrupt-map")))) { + // interrupt-map + map = (UInt32 *) data->getBytesNoCopy(); + endMap = map + (data->getLength() / sizeof(UInt32)); + data = OSDynamicCast( OSData, regEntry->getProperty( "interrupt-map-mask" )); + if( data && (data->getLength() >= ((acells + icells) * sizeof( UInt32)))) + maskCmp = (UInt32 *) data->getBytesNoCopy(); + else + maskCmp = 0; + + do { + for( i = 0, cmp = true; + cmp && (i < (acells + icells)); + i++) { + cell = (i < acells) ? addrCmp[i] : intSpec[ i - acells ]; + if( maskCmp) + cell &= maskCmp[i]; + cmp = (cell == map[i]); + } + + map += acells + icells; + if( 0 == (parent = FindPHandle( *(map++) ))) + unexpected(break); + + if( cmp) + intSpec = map; + else { + IODTGetICellCounts( parent, &picells, &pacells ); + map += pacells + picells; + parent = 0; + } + } while( !cmp && (map < endMap) ); + + } else + // first time (usually) + parent = IODTFindInterruptParent( regEntry ); + + if( parent) { + IODTGetICellCounts( parent, &icells, &acells ); + addrCmp = 0; + if( acells) { + data = OSDynamicCast( OSData, regEntry->getProperty( "reg" )); + if( data && (data->getLength() >= (acells * sizeof( UInt32)))) + addrCmp = (UInt32 *) data->getBytesNoCopy(); + } + regEntry = parent; + if( !skip) + skip = icells; + } + } while( parent); + + if( ok) + return( skip ); + else + return( 0 ); +} + +bool IODTMapInterrupts( IORegistryEntry * regEntry ) +{ + IORegistryEntry * parent; + OSData * local; + OSData * local2; + UInt32 * localBits; + UInt32 * localEnd; + OSData * map; + OSArray * mapped; + const OSSymbol * controller; + OSArray * controllers; + UInt32 skip = 1; + bool ok, nw; + + nw = (0 == (local = OSDynamicCast( OSData, + regEntry->getProperty( gIODTAAPLInterruptsKey)))); + if( nw + && (0 == (local = OSDynamicCast( OSData, + regEntry->getProperty( "interrupts"))))) + return( true ); // nothing to see here + + if( nw && (parent = regEntry->getParentEntry( gIODTPlane))) { + // check for bridges on old world + if( (local2 = OSDynamicCast( OSData, + parent->getProperty( gIODTAAPLInterruptsKey)))) { + local = local2; + nw = false; + } + } + + localBits = (UInt32 *) local->getBytesNoCopy(); + localEnd = localBits + (local->getLength() / sizeof( UInt32)); + mapped = OSArray::withCapacity( 1 ); + controllers = OSArray::withCapacity( 1 ); + + ok = (mapped && controllers); + + if( ok) do { + if( nw) { + skip = IODTMapOneInterrupt( regEntry, localBits, &map, &controller ); + if( 0 == skip) { + IOLog("%s: error mapping interrupt[%d]\n", + regEntry->getName(), mapped->getCount()); + break; + } + } else { + map = OSData::withData( local, mapped->getCount() * sizeof( UInt32), + sizeof( UInt32)); + controller = gIODTDefaultInterruptController; + } + + localBits += skip; + mapped->setObject( map ); + map->release(); + controllers->setObject( (OSObject *) controller ); + controller->release(); + + } while( localBits < localEnd); + + ok &= (localBits == localEnd); + + if( ok ) { + // store results + ok = regEntry->setProperty( gIOInterruptControllersKey, controllers); + ok &= regEntry->setProperty( gIOInterruptSpecifiersKey, mapped); + } + + if( controllers) + controllers->release(); + if( mapped) + mapped->release(); + + return( ok ); +} + +/* + */ + +static const char * +CompareKey( OSString * key, + const IORegistryEntry * table, const OSSymbol * propName ) +{ + OSObject * prop; + OSData * data; + OSString * string; + const char * ckey; + UInt32 keyLen; + const char * names; + const char * lastName; + bool wild; + bool matched; + const char * result = 0; + + if( 0 == (prop = table->getProperty( propName ))) + return( 0 ); + + if( (data = OSDynamicCast( OSData, prop ))) { + names = (const char *) data->getBytesNoCopy(); + lastName = names + data->getLength(); + + } else if( (string = OSDynamicCast( OSString, prop ))) { + names = string->getCStringNoCopy(); + lastName = names + string->getLength() + 1; + + } else + return( 0 ); + + ckey = key->getCStringNoCopy(); + keyLen = key->getLength(); + wild = ('*' == key->getChar( keyLen - 1 )); + + do { + // for each name in the property + if( wild) + matched = (0 == strncmp( ckey, names, keyLen - 1 )); + else + matched = (keyLen == strlen( names )) + && (0 == strncmp( ckey, names, keyLen )); + + if( matched) + result = names; + + names = names + strlen( names) + 1; + + } while( (names < lastName) && (false == matched)); + + return( result); +} + + +bool IODTCompareNubName( const IORegistryEntry * regEntry, + OSString * name, OSString ** matchingName ) +{ + const char * result; + bool matched; + + matched = (0 != (result = CompareKey( name, regEntry, gIODTNameKey))) + || (0 != (result = CompareKey( name, regEntry, gIODTCompatibleKey))) + || (0 != (result = CompareKey( name, regEntry, gIODTTypeKey))) + || (0 != (result = CompareKey( name, regEntry, gIODTModelKey))); + + if( result && matchingName) + *matchingName = OSString::withCString( result ); + + return( result != 0 ); +} + +bool IODTMatchNubWithKeys( IORegistryEntry * regEntry, + const char * keys ) +{ + OSObject * obj; + bool result = false; + + obj = OSUnserialize( keys, 0 ); + + if( obj) { + result = regEntry->compareNames( obj ); + obj->release(); + } +#ifdef DEBUG + else IOLog("Couldn't unserialize %s\n", keys ); +#endif + + return( result ); +} + +OSCollectionIterator * IODTFindMatchingEntries( IORegistryEntry * from, + IOOptionBits options, const char * keys ) +{ + OSSet * result; + IORegistryEntry * next; + IORegistryIterator * iter; + OSCollectionIterator * cIter; + bool cmp; + bool minus = options & kIODTExclusive; + + result = OSSet::withCapacity( 3 ); + if( !result) + return( 0); + + iter = IORegistryIterator::iterateOver( from, gIODTPlane, + (options & kIODTRecursive) ? kIORegistryIterateRecursively : 0 ); + if( iter) { + while( (next = iter->getNextObject())) { + + // Look for existence of a debug property to skip + if( next->getProperty("AAPL,ignore")) + continue; + + if( keys) { + cmp = IODTMatchNubWithKeys( next, keys ); + if( (minus && (false == cmp)) + || ((false == minus) && (false != cmp)) ) + result->setObject( next); + } else + result->setObject( next); + } + iter->release(); + } + + cIter = OSCollectionIterator::withCollection( result); + result->release(); + + return( cIter); +} + + +struct IODTPersistent { + IODTCompareAddressCellFunc compareFunc; + IODTNVLocationFunc locationFunc; +}; + +void IODTSetResolving( IORegistryEntry * regEntry, + IODTCompareAddressCellFunc compareFunc, + IODTNVLocationFunc locationFunc ) +{ + IODTPersistent persist; + OSData * prop; + + persist.compareFunc = compareFunc; + persist.locationFunc = locationFunc; + prop = OSData::withBytes( &persist, sizeof( persist)); + if( !prop) + return; + + regEntry->setProperty( gIODTPersistKey, prop); + prop->release(); +} + +static SInt32 DefaultCompare( UInt32 cellCount, UInt32 left[], UInt32 right[] ) +{ + cellCount--; + return( left[ cellCount ] - right[ cellCount ] ); +} + + +void IODTGetCellCounts( IORegistryEntry * regEntry, + UInt32 * sizeCount, UInt32 * addressCount) +{ + if( !GetUInt32( regEntry, gIODTSizeCellKey, sizeCount)) + *sizeCount = 1; + if( !GetUInt32( regEntry, gIODTAddressCellKey, addressCount)) + *addressCount = 2; +} + +// Given addr & len cells from our child, find it in our ranges property, then +// look in our parent to resolve the base of the range for us. + +// Range[]: child-addr our-addr child-len +// #cells: child ours child + +bool IODTResolveAddressCell( IORegistryEntry * regEntry, + UInt32 cellsIn[], + IOPhysicalAddress * phys, IOPhysicalLength * len ) +{ + IORegistryEntry * parent; + OSData * prop; + // cells in addresses at regEntry + UInt32 sizeCells, addressCells; + // cells in addresses below regEntry + UInt32 childSizeCells, childAddressCells; + UInt32 childCells; + UInt32 cell[ 5 ], offset = 0, length; + UInt32 * range; + UInt32 * endRanges; + bool ok = true; + SInt32 diff; + + IODTPersistent * persist; + IODTCompareAddressCellFunc compare; + + IODTGetCellCounts( regEntry, &childSizeCells, &childAddressCells ); + childCells = childAddressCells + childSizeCells; + + bcopy( cellsIn, cell, 4 * childCells ); + if( childSizeCells > 1) + *len = IOPhysical32( cellsIn[ childAddressCells ], + cellsIn[ childAddressCells + 1 ] ); + else + *len = IOPhysical32( 0, cellsIn[ childAddressCells ] ); + + do { + prop = OSDynamicCast( OSData, regEntry->getProperty( gIODTRangeKey )); + if( 0 == prop) { + /* end of the road */ + *phys = IOPhysical32( 0, cell[ childAddressCells - 1 ] + offset); + break; + } + + parent = regEntry->getParentEntry( gIODTPlane ); + IODTGetCellCounts( parent, &sizeCells, &addressCells ); + + if( (length = prop->getLength())) { + // search + range = (UInt32 *) prop->getBytesNoCopy(); + endRanges = range + (length / 4); + + prop = (OSData *) regEntry->getProperty( gIODTPersistKey ); + if( prop) { + persist = (IODTPersistent *) prop->getBytesNoCopy(); + compare = persist->compareFunc; + } else + compare = DefaultCompare; + + for( ok = false; + range < endRanges; + range += (childCells + addressCells) ) { + + // is cell >= range start? + diff = (*compare)( childAddressCells, cell, range ); + if( diff < 0) + continue; + // is cell + size <= range end? + if( (diff + cell[ childCells - 1 ]) + > range[ childCells + addressCells - 1 ]) + continue; + + offset += diff; + ok = true; + break; + } + + // Get the physical start of the range from our parent + bcopy( range + childAddressCells, cell, 4 * addressCells ); + bzero( cell + addressCells, 4 * sizeCells ); + + } /* else zero length range => pass thru to parent */ + + regEntry = parent; + childSizeCells = sizeCells; + childAddressCells = addressCells; + childCells = childAddressCells + childSizeCells; + + } while( ok && regEntry); + + return( ok); +} + + +OSArray * IODTResolveAddressing( IORegistryEntry * regEntry, + const char * addressPropertyName, + IODeviceMemory * parent ) +{ + IORegistryEntry * parentEntry; + OSData * addressProperty; + UInt32 sizeCells, addressCells, cells; + int i, num; + UInt32 * reg; + IOPhysicalAddress phys; + IOPhysicalLength len; + OSArray * array; + IODeviceMemory * range; + + parentEntry = regEntry->getParentEntry( gIODTPlane ); + addressProperty = (OSData *) regEntry->getProperty( addressPropertyName ); + if( (0 == addressProperty) || (0 == parentEntry)) + return( 0); + + IODTGetCellCounts( parentEntry, &sizeCells, &addressCells ); + if( 0 == sizeCells) + return( 0); + + cells = sizeCells + addressCells; + reg = (UInt32 *) addressProperty->getBytesNoCopy(); + num = addressProperty->getLength() / (4 * cells); + + array = OSArray::withCapacity( 1 ); + if( 0 == array) + return( 0); + + for( i = 0; i < num; i++) { + + if( IODTResolveAddressCell( parentEntry, reg, &phys, &len )) { + + range = 0; + if( parent) + range = IODeviceMemory::withSubRange( parent, + phys - parent->getPhysicalAddress(), len ); + if( 0 == range) + range = IODeviceMemory::withRange( phys, len ); + if( range) + array->setObject( range ); + } + reg += cells; + } + + regEntry->setProperty( gIODeviceMemoryKey, array); + array->release(); /* ??? */ + + return( array); +} + +static void IODTGetNVLocation( + IORegistryEntry * parent, + IORegistryEntry * regEntry, + UInt8 * busNum, UInt8 * deviceNum, UInt8 * functionNum ) +{ + + OSData * prop; + IODTPersistent * persist; + UInt32 * cell; + + prop = (OSData *) parent->getProperty( gIODTPersistKey ); + if( prop) { + persist = (IODTPersistent *) prop->getBytesNoCopy(); + (*persist->locationFunc)( regEntry, busNum, deviceNum, functionNum ); + + } else { + prop = (OSData *) regEntry->getProperty( "reg" ); + *functionNum = 0; + if( prop) { + cell = (UInt32 *) prop->getBytesNoCopy(); +#if 0 + if( (regHi & 0xf0000000) != 0xf0000000) { + *bus = 0x03 & (regHi >> 16); + *function = 0x07 & (regHi >> 8); + *device = 0x1f & (regHi >> 11); +#endif + *busNum = 3; + *deviceNum = 0x1f & (cell[ 0 ] >> 24); + } else { + *busNum = 0; + *deviceNum = 0; + } + } +} + +/* + * Try to make the same messed up descriptor as Mac OS + */ + +IOReturn IODTMakeNVDescriptor( IORegistryEntry * regEntry, + IONVRAMDescriptor * hdr ) +{ + IORegistryEntry * parent; + UInt32 level; + UInt32 bridgeDevices; + UInt8 busNum; + UInt8 deviceNum; + UInt8 functionNum; + + hdr->format = 1; + hdr->marker = 0; + + for( + level = 0, bridgeDevices = 0; + (parent = regEntry->getParentEntry( gIODTPlane )) + && (level < 7); + level++ ) { + + IODTGetNVLocation( parent, regEntry, + &busNum, &deviceNum, &functionNum ); + if( level) + bridgeDevices |= ((deviceNum & 0x1f) << ((level - 1) * 5)); + else { + hdr->busNum = busNum; + hdr->deviceNum = deviceNum; + hdr->functionNum = functionNum; + } + regEntry = parent; + } + hdr->bridgeCount = level - 2; + hdr->bridgeDevices = bridgeDevices; + + return( kIOReturnSuccess ); +} + +OSData * IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ) +{ + IORegistryEntry * parent; + OSData * data; + OSData * ret = 0; + UInt32 * bits; + UInt32 i; + char * names; + char * lastName; + UInt32 mask; + + data = (OSData *) regEntry->getProperty("AAPL,slot-name"); + if( data) + return( data); + parent = regEntry->getParentEntry( gIODTPlane ); + if( !parent) + return( 0 ); + data = OSDynamicCast( OSData, parent->getProperty("slot-names")); + if( !data) + return( 0 ); + if( data->getLength() <= 4) + return( 0 ); + + bits = (UInt32 *) data->getBytesNoCopy(); + mask = *bits; + if( (0 == (mask & (1 << deviceNumber)))) + return( 0 ); + + names = (char *)(bits + 1); + lastName = names + (data->getLength() - 4); + + for( i = 0; + (i <= deviceNumber) && (names < lastName); + i++ ) { + + if( mask & (1 << i)) { + if( i == deviceNumber) { + data = OSData::withBytesNoCopy( names, 1 + strlen( names)); + if( data) { + regEntry->setProperty("AAPL,slot-name", data); + ret = data; + data->release(); + } + } else + names += 1 + strlen( names); + } + } + + return( ret ); +} diff --git a/iokit/Kernel/IOEventSource.cpp b/iokit/Kernel/IOEventSource.cpp new file mode 100644 index 000000000..d90fb2f62 --- /dev/null +++ b/iokit/Kernel/IOEventSource.cpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. +]*/ +#include + +#include +#include + +#define super OSObject + +OSDefineMetaClassAndAbstractStructors(IOEventSource, OSObject) +OSMetaClassDefineReservedUnused(IOEventSource, 0); +OSMetaClassDefineReservedUnused(IOEventSource, 1); +OSMetaClassDefineReservedUnused(IOEventSource, 2); +OSMetaClassDefineReservedUnused(IOEventSource, 3); +OSMetaClassDefineReservedUnused(IOEventSource, 4); +OSMetaClassDefineReservedUnused(IOEventSource, 5); +OSMetaClassDefineReservedUnused(IOEventSource, 6); +OSMetaClassDefineReservedUnused(IOEventSource, 7); + +bool IOEventSource::init(OSObject *inOwner, + Action inAction = 0) +{ + if (!inOwner) + return false; + + owner = inOwner; + + if ( !super::init() ) + return false; + + (void) setAction(inAction); + enabled = true; + + return true; +} + +IOEventSource::Action IOEventSource::getAction () const { return action; }; + +void IOEventSource::setAction(Action inAction) +{ + action = inAction; +} + +IOEventSource *IOEventSource::getNext() const { return eventChainNext; }; + +void IOEventSource::setNext(IOEventSource *inNext) +{ + eventChainNext = inNext; +} + +void IOEventSource::enable() +{ + enabled = true; + if (workLoop) + return signalWorkAvailable(); +} + +void IOEventSource::disable() +{ + enabled = false; +} + +bool IOEventSource::isEnabled() const +{ + return enabled; +} + +void IOEventSource::setWorkLoop(IOWorkLoop *inWorkLoop) +{ + if ( !inWorkLoop ) + disable(); + workLoop = inWorkLoop; +} + +IOWorkLoop *IOEventSource::getWorkLoop() const +{ + return workLoop; +} + +bool IOEventSource::onThread() const +{ + return (workLoop != 0) && workLoop->onThread(); +} diff --git a/iokit/Kernel/IOFilterInterruptEventSource.cpp b/iokit/Kernel/IOFilterInterruptEventSource.cpp new file mode 100644 index 000000000..0869ec7de --- /dev/null +++ b/iokit/Kernel/IOFilterInterruptEventSource.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + +HISTORY + 1999-4-15 Godfrey van der Linden(gvdl) + Created. +*/ +#include +#include +#include +#include + +#if KDEBUG + +#define IOTimeTypeStampS(t) \ +do { \ + IOTimeStampStart(IODBG_INTES(t), \ + (unsigned int) this, (unsigned int) owner); \ +} while(0) + +#define IOTimeTypeStampE(t) \ +do { \ + IOTimeStampEnd(IODBG_INTES(t), \ + (unsigned int) this, (unsigned int) owner); \ +} while(0) + +#define IOTimeStampLatency() \ +do { \ + IOTimeStampEnd(IODBG_INTES(IOINTES_LAT), \ + (unsigned int) this, (unsigned int) owner); \ +} while(0) + +#else /* !KDEBUG */ +#define IOTimeTypeStampS(t) +#define IOTimeTypeStampE(t) +#define IOTimeStampLatency() +#endif /* KDEBUG */ + +#define super IOInterruptEventSource + +OSDefineMetaClassAndStructors + (IOFilterInterruptEventSource, IOInterruptEventSource) +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 0); +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 1); +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 2); +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 3); +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 4); +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 5); +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 6); +OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 7); + +/* + * Implement the call throughs for the private protection conversion + */ +bool IOFilterInterruptEventSource::init(OSObject *inOwner, + Action inAction = 0, + IOService *inProvider = 0, + int inIntIndex = 0) +{ + return false; +} + +IOInterruptEventSource * +IOFilterInterruptEventSource::interruptEventSource(OSObject *inOwner, + Action inAction, + IOService *inProvider, + int inIntIndex) +{ + return 0; +} + +bool +IOFilterInterruptEventSource::init(OSObject *inOwner, + Action inAction, + Filter inFilterAction, + IOService *inProvider, + int inIntIndex = 0) +{ + if ( !super::init(inOwner, inAction, inProvider, inIntIndex) ) + return false; + + if (!inFilterAction) + return false; + + filterAction = inFilterAction; + return true; +} + +IOFilterInterruptEventSource *IOFilterInterruptEventSource +::filterInterruptEventSource(OSObject *inOwner, + Action inAction, + Filter inFilterAction, + IOService *inProvider, + int inIntIndex = 0) +{ + IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; + + if (me + && !me->init(inOwner, inAction, inFilterAction, inProvider, inIntIndex)) { + me->free(); + return 0; + } + + return me; +} + +void IOFilterInterruptEventSource::signalInterrupt() +{ +IOTimeStampLatency(); + + producerCount++; + +IOTimeTypeStampS(IOINTES_SEMA); + signalWorkAvailable(); +IOTimeTypeStampE(IOINTES_SEMA); +} + + +IOFilterInterruptEventSource::Filter +IOFilterInterruptEventSource::getFilterAction() const +{ + return filterAction; +} + + + + +void IOFilterInterruptEventSource::normalInterruptOccurred + (void */*refcon*/, IOService */*prov*/, int /*source*/) +{ + bool filterRes; + +IOTimeTypeStampS(IOINTES_INTCTXT); + +IOTimeTypeStampS(IOINTES_INTFLTR); + IOTimeStampConstant(IODBG_INTES(IOINTES_FILTER), + (unsigned int) filterAction, (unsigned int) owner); + filterRes = (*filterAction)(owner, this); +IOTimeTypeStampE(IOINTES_INTFLTR); + + if (filterRes) + signalInterrupt(); + +IOTimeTypeStampE(IOINTES_INTCTXT); +} + +void IOFilterInterruptEventSource::disableInterruptOccurred + (void */*refcon*/, IOService *prov, int source) +{ + bool filterRes; + +IOTimeTypeStampS(IOINTES_INTCTXT); + +IOTimeTypeStampS(IOINTES_INTFLTR); + IOTimeStampConstant(IODBG_INTES(IOINTES_FILTER), + (unsigned int) filterAction, (unsigned int) owner); + filterRes = (*filterAction)(owner, this); +IOTimeTypeStampE(IOINTES_INTFLTR); + + if (filterRes) { + prov->disableInterrupt(source); /* disable the interrupt */ + + signalInterrupt(); + } +IOTimeTypeStampE(IOINTES_INTCTXT); +} diff --git a/iokit/Kernel/IOInterruptController.cpp b/iokit/Kernel/IOInterruptController.cpp new file mode 100644 index 000000000..c41df7e8c --- /dev/null +++ b/iokit/Kernel/IOInterruptController.cpp @@ -0,0 +1,653 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * DRI: Josh de Cesare + * + */ + + +#if __ppc__ +#include +#endif + +#include +#include +#include +#include +#include + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService + +OSDefineMetaClassAndAbstractStructors(IOInterruptController, IOService); + +OSMetaClassDefineReservedUnused(IOInterruptController, 0); +OSMetaClassDefineReservedUnused(IOInterruptController, 1); +OSMetaClassDefineReservedUnused(IOInterruptController, 2); +OSMetaClassDefineReservedUnused(IOInterruptController, 3); +OSMetaClassDefineReservedUnused(IOInterruptController, 4); +OSMetaClassDefineReservedUnused(IOInterruptController, 5); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOInterruptController::registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + long wasDisabledSoft; + IOReturn error; + OSData *vectorData; + IOService *originalNub; + int originalSource; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOTakeLock(vector->interruptLock); + + // If this vector is already in use, and can be shared, + // register as a shared interrupt. + if (vector->interruptRegistered) { + if (!vectorCanBeShared(vectorNumber, vector)) { + IOUnlock(vector->interruptLock); + return kIOReturnNoResources; + } + + // If this vector is not already shared, break it out. + if (vector->sharedController == 0) { + // Make the IOShareInterruptController instance + vector->sharedController = new IOSharedInterruptController; + if (vector->sharedController == 0) { + IOUnlock(vector->interruptLock); + return kIOReturnNoMemory; + } + + // Save the nub and source for the original consumer. + originalNub = vector->nub; + originalSource = vector->source; + + // Save the dis/enable state for the original consumer's interrupt. + // Then disable the source + wasDisabledSoft = vector->interruptDisabledSoft; + disableInterrupt(originalNub, originalSource); + + // Initialize the new shared interrupt controller. + error = vector->sharedController->initInterruptController(this, + vectorData); + // If the IOSharedInterruptController could not be initalized, + // put the original consumor's interrupt back to normal and + // get rid of whats left of the shared controller. + if (error != kIOReturnSuccess) { + enableInterrupt(originalNub, originalSource); + vector->sharedController->release(); + vector->sharedController = 0; + IOUnlock(vector->interruptLock); + return error; + } + + // Try to register the original consumer on the shared controller. + error = vector->sharedController->registerInterrupt(originalNub, + originalSource, + vector->target, + vector->handler, + vector->refCon); + // If the original consumer could not be moved to the shared controller, + // put the original consumor's interrupt back to normal and + // get rid of whats left of the shared controller. + if (error != kIOReturnSuccess) { + enableInterrupt(originalNub, originalSource); + vector->sharedController->release(); + vector->sharedController = 0; + IOUnlock(vector->interruptLock); + return error; + } + + // Fill in vector with the shared controller's info. + vector->handler = (IOInterruptHandler)vector->sharedController->getInterruptHandlerAddress(); + vector->nub = vector->sharedController; + vector->source = 0; + vector->target = vector->sharedController; + vector->refCon = 0; + + // Enable the original consumer's interrupt if needed. + if (!wasDisabledSoft) originalNub->enableInterrupt(originalSource); + } + + error = vector->sharedController->registerInterrupt(nub, source, target, + handler, refCon); + IOUnlock(vector->interruptLock); + return error; + } + + // Fill in vector with the client's info. + vector->handler = handler; + vector->nub = nub; + vector->source = source; + vector->target = target; + vector->refCon = refCon; + + // Do any specific initalization for this vector. + initVector(vectorNumber, vector); + + // Get the vector ready. It starts hard disabled. + vector->interruptDisabledHard = 1; + vector->interruptDisabledSoft = 1; + vector->interruptRegistered = 1; + + IOUnlock(vector->interruptLock); + return kIOReturnSuccess; +} + +IOReturn IOInterruptController::unregisterInterrupt(IOService *nub, int source) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOTakeLock(vector->interruptLock); + + // Return success if it is not already registered + if (!vector->interruptRegistered) { + IOUnlock(vector->interruptLock); + return kIOReturnSuccess; + } + + // Soft disable the source. + disableInterrupt(nub, source); + + // Turn the source off at hardware. + disableVectorHard(vectorNumber, vector); + + // Clear all the storage for the vector except for interruptLock. + vector->interruptActive = 0; + vector->interruptDisabledSoft = 0; + vector->interruptDisabledHard = 0; + vector->interruptRegistered = 0; + vector->nub = 0; + vector->source = 0; + vector->handler = 0; + vector->target = 0; + vector->refCon = 0; + + IOUnlock(vector->interruptLock); + return kIOReturnSuccess; +} + +IOReturn IOInterruptController::getInterruptType(IOService *nub, int source, + int *interruptType) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + if (interruptType == 0) return kIOReturnBadArgument; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + *interruptType = getVectorType(vectorNumber, vector); + + return kIOReturnSuccess; +} + +IOReturn IOInterruptController::enableInterrupt(IOService *nub, int source) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + if (vector->interruptDisabledSoft) { + vector->interruptDisabledSoft = 0; + + if (vector->interruptDisabledHard) { + vector->interruptDisabledHard = 0; + + enableVector(vectorNumber, vector); + } + } + + return kIOReturnSuccess; +} + +IOReturn IOInterruptController::disableInterrupt(IOService *nub, int source) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + vector->interruptDisabledSoft = 1; +#if __ppc__ + sync(); + isync(); +#endif + + if (!getPlatform()->atInterruptLevel()) { + while (vector->interruptActive); +#if __ppc__ + isync(); +#endif + } + + return kIOReturnSuccess; +} + +IOReturn IOInterruptController::causeInterrupt(IOService *nub, int source) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + causeVector(vectorNumber, vector); + + return kIOReturnSuccess; +} + +IOInterruptAction IOInterruptController::getInterruptHandlerAddress(void) +{ + return 0; +} + +IOReturn IOInterruptController::handleInterrupt(void *refCon, IOService *nub, + int source) +{ + return kIOReturnInvalid; +} + + +// Methods to be overridden for simplifed interrupt controller subclasses. + +bool IOInterruptController::vectorCanBeShared(long /*vectorNumber*/, + IOInterruptVector */*vector*/) +{ + return false; +} + +void IOInterruptController::initVector(long /*vectorNumber*/, + IOInterruptVector */*vector*/) +{ +} + +int IOInterruptController::getVectorType(long /*vectorNumber*/, + IOInterruptVector */*vector*/) +{ + return kIOInterruptTypeEdge; +} + +void IOInterruptController::disableVectorHard(long /*vectorNumber*/, + IOInterruptVector */*vector*/) +{ +} + +void IOInterruptController::enableVector(long /*vectorNumber*/, + IOInterruptVector */*vector*/) +{ +} + +void IOInterruptController::causeVector(long /*vectorNumber*/, + IOInterruptVector */*vector*/) +{ +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOInterruptController + +OSDefineMetaClassAndStructors(IOSharedInterruptController, IOInterruptController); + +OSMetaClassDefineReservedUnused(IOSharedInterruptController, 0); +OSMetaClassDefineReservedUnused(IOSharedInterruptController, 1); +OSMetaClassDefineReservedUnused(IOSharedInterruptController, 2); +OSMetaClassDefineReservedUnused(IOSharedInterruptController, 3); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOReturn IOSharedInterruptController::initInterruptController(IOInterruptController *parentController, OSData *parentSource) +{ + int cnt, interruptType; + IOReturn error; + + if (!super::init()) + return kIOReturnNoResources; + + // Set provider to this so enable/disable nub stuff works. + provider = this; + + // Allocate the IOInterruptSource so this can act like a nub. + _interruptSources = (IOInterruptSource *)IOMalloc(sizeof(IOInterruptSource)); + if (_interruptSources == 0) return kIOReturnNoMemory; + _numInterruptSources = 1; + + // Set up the IOInterruptSource to point at this. + _interruptSources[0].interruptController = parentController; + _interruptSources[0].vectorData = parentSource; + + sourceIsLevel = false; + error = provider->getInterruptType(0, &interruptType); + if (error == kIOReturnSuccess) { + if (interruptType & kIOInterruptTypeLevel) + sourceIsLevel = true; + } + + // Allocate the memory for the vectors + numVectors = 8; // For now a constant number. + vectors = (IOInterruptVector *)IOMalloc(numVectors * sizeof(IOInterruptVector)); + if (vectors == NULL) { + IOFree(_interruptSources, sizeof(IOInterruptSource)); + return kIOReturnNoMemory; + } + bzero(vectors, numVectors * sizeof(IOInterruptVector)); + + // Allocate the lock for the controller. + controllerLock = IOSimpleLockAlloc(); + if (controllerLock == 0) return kIOReturnNoResources; + + // Allocate locks for the vectors. + for (cnt = 0; cnt < numVectors; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < numVectors; cnt++) { + if (vectors[cnt].interruptLock != NULL) + IOLockFree(vectors[cnt].interruptLock); + } + return kIOReturnNoResources; + } + } + + vectorsRegistered = 0; + vectorsEnabled = 0; + controllerDisabled = 1; + + return kIOReturnSuccess; +} + +IOReturn IOSharedInterruptController::registerInterrupt(IOService *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector = 0; + OSData *vectorData; + IOInterruptState interruptState; + + interruptSources = nub->_interruptSources; + + // Find a free vector. + vectorNumber = numVectors; + while (vectorsRegistered != numVectors) { + for (vectorNumber = 0; vectorNumber < numVectors; vectorNumber++) { + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOTakeLock(vector->interruptLock); + + // Is it unregistered? + if (!vector->interruptRegistered) break; + + // Move along to the next one. + IOUnlock(vector->interruptLock); + } + + if (vectorNumber != numVectors) break; + } + + // Could not find a free one, so give up. + if (vectorNumber == numVectors) { + return kIOReturnNoResources; + } + + // Create the vectorData for the IOInterruptSource. + vectorData = OSData::withBytes(&vectorNumber, sizeof(vectorNumber)); + if (vectorData == 0) { + return kIOReturnNoMemory; + } + + // Fill in the IOInterruptSource with the controller's info. + interruptSources[source].interruptController = this; + interruptSources[source].vectorData = vectorData; + + // Fill in vector with the client's info. + vector->handler = handler; + vector->nub = nub; + vector->source = source; + vector->target = target; + vector->refCon = refCon; + + // Get the vector ready. It start soft disabled. + vector->interruptDisabledSoft = 1; + vector->interruptRegistered = 1; + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + vectorsRegistered++; + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + + IOUnlock(vector->interruptLock); + return kIOReturnSuccess; +} + +IOReturn IOSharedInterruptController::unregisterInterrupt(IOService *nub, + int source) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + IOInterruptState interruptState;; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOTakeLock(vector->interruptLock); + + // Return success if it is not already registered + if (!vector->interruptRegistered) { + IOUnlock(vector->interruptLock); + return kIOReturnSuccess; + } + + // Soft disable the source. + disableInterrupt(nub, source); + + // Clear all the storage for the vector except for interruptLock. + vector->interruptActive = 0; + vector->interruptDisabledSoft = 0; + vector->interruptDisabledHard = 0; + vector->interruptRegistered = 0; + vector->nub = 0; + vector->source = 0; + vector->handler = 0; + vector->target = 0; + vector->refCon = 0; + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + vectorsRegistered--; + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + + IOUnlock(vector->interruptLock); + return kIOReturnSuccess; +} + +IOReturn IOSharedInterruptController::getInterruptType(IOService */*nub*/, + int /*source*/, + int *interruptType) +{ + return provider->getInterruptType(0, interruptType); +} + +IOReturn IOSharedInterruptController::enableInterrupt(IOService *nub, + int source) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + IOInterruptState interruptState;; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + if (vector->interruptDisabledSoft) { + vector->interruptDisabledSoft = 0; + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + vectorsEnabled++; + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + + if (controllerDisabled && (vectorsEnabled == vectorsRegistered)) { + controllerDisabled = 0; + provider->enableInterrupt(0); + } + } + + return kIOReturnSuccess; +} + +IOReturn IOSharedInterruptController::disableInterrupt(IOService *nub, + int source) +{ + IOInterruptSource *interruptSources; + long vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + IOInterruptState interruptState;; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(long *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + if (!vector->interruptDisabledSoft) { + vector->interruptDisabledSoft = 1; +#if __ppc__ + sync(); + isync(); +#endif + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + vectorsEnabled--; + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + } + + if (!getPlatform()->atInterruptLevel()) { + while (vector->interruptActive); +#if __ppc__ + isync(); +#endif + } + + return kIOReturnSuccess; +} + +IOInterruptAction IOSharedInterruptController::getInterruptHandlerAddress(void) +{ + return (IOInterruptAction)&IOSharedInterruptController::handleInterrupt; +} + +IOReturn IOSharedInterruptController::handleInterrupt(void * /*refCon*/, + IOService * nub, + int /*source*/) +{ + long vectorNumber; + IOInterruptVector *vector; + + for (vectorNumber = 0; vectorNumber < numVectors; vectorNumber++) { + vector = &vectors[vectorNumber]; + + vector->interruptActive = 1; +#if __ppc__ + sync(); + isync(); +#endif + if (!vector->interruptDisabledSoft) { +#if __ppc__ + isync(); +#endif + + // Call the handler if it exists. + if (vector->interruptRegistered) { + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + } + } + + vector->interruptActive = 0; + } + + // if any of the vectors are dissabled, then dissable this controller. + IOSimpleLockLock(controllerLock); + if (vectorsEnabled != vectorsRegistered) { + nub->disableInterrupt(0); + controllerDisabled = 1; + } + IOSimpleLockUnlock(controllerLock); + + return kIOReturnSuccess; +} + diff --git a/iokit/Kernel/IOInterruptEventSource.cpp b/iokit/Kernel/IOInterruptEventSource.cpp new file mode 100644 index 000000000..a6b0034f4 --- /dev/null +++ b/iokit/Kernel/IOInterruptEventSource.cpp @@ -0,0 +1,246 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. +*/ +#include +#include +#include +#include +#include +#include + +#if KDEBUG + +#define IOTimeTypeStampS(t) \ +do { \ + IOTimeStampStart(IODBG_INTES(t), \ + (unsigned int) this, (unsigned int) owner); \ +} while(0) + +#define IOTimeTypeStampE(t) \ +do { \ + IOTimeStampEnd(IODBG_INTES(t), \ + (unsigned int) this, (unsigned int) owner); \ +} while(0) + +#define IOTimeStampLatency() \ +do { \ + IOTimeStampEnd(IODBG_INTES(IOINTES_LAT), \ + (unsigned int) this, (unsigned int) owner); \ +} while(0) + +#else /* !KDEBUG */ +#define IOTimeTypeStampS(t) +#define IOTimeTypeStampE(t) +#define IOTimeStampLatency() +#endif /* KDEBUG */ + +#define super IOEventSource + +OSDefineMetaClassAndStructors(IOInterruptEventSource, IOEventSource) +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 0); +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 1); +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 2); +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 3); +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 4); +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 5); +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 6); +OSMetaClassDefineReservedUnused(IOInterruptEventSource, 7); + +bool IOInterruptEventSource::init(OSObject *inOwner, + Action inAction = 0, + IOService *inProvider = 0, + int inIntIndex = 0) +{ + bool res = true; + + if ( !super::init(inOwner, (IOEventSourceAction) inAction) ) + return false; + + provider = inProvider; + producerCount = consumerCount = 0; + autoDisable = explicitDisable = false; + intIndex = -1; + + // Assumes inOwner holds a reference(retain) on the provider + if (inProvider) { + int intType; + + res = (kIOReturnSuccess + == inProvider->getInterruptType(inIntIndex, &intType)); + if (res) { + IOInterruptAction intHandler; + + autoDisable = (intType == kIOInterruptTypeLevel); + if (autoDisable) { + intHandler = (IOInterruptAction) + &IOInterruptEventSource::disableInterruptOccurred; + } + else + intHandler = (IOInterruptAction) + &IOInterruptEventSource::normalInterruptOccurred; + + res = (kIOReturnSuccess == inProvider->registerInterrupt + (inIntIndex, this, intHandler)); + if (res) + intIndex = inIntIndex; + } + } + + return res; +} + +IOInterruptEventSource * +IOInterruptEventSource::interruptEventSource(OSObject *inOwner, + Action inAction, + IOService *inProvider, + int inIntIndex) +{ + IOInterruptEventSource *me = new IOInterruptEventSource; + + if (me && !me->init(inOwner, inAction, inProvider, inIntIndex)) { + me->free(); + return 0; + } + + return me; +} + +void IOInterruptEventSource::free() +{ + if (provider && intIndex != -1) + provider->unregisterInterrupt(intIndex); + + super::free(); +} + +void IOInterruptEventSource::enable() +{ + if (provider && intIndex != -1) { + provider->enableInterrupt(intIndex); + explicitDisable = false; + } +} + +void IOInterruptEventSource::disable() +{ + if (provider && intIndex != -1) { + provider->disableInterrupt(intIndex); + explicitDisable = true; + } +} + +const IOService *IOInterruptEventSource::getProvider() const +{ + return provider; +} + +int IOInterruptEventSource::getIntIndex() const +{ + return intIndex; +} + +bool IOInterruptEventSource::getAutoDisable() const +{ + return autoDisable; +} + +bool IOInterruptEventSource::checkForWork() +{ + unsigned int cacheProdCount = producerCount; + int numInts = cacheProdCount - consumerCount; + IOInterruptEventAction intAction = (IOInterruptEventAction) action; + + if (numInts > 0) { + + IOTimeStampLatency(); + IOTimeTypeStampS(IOINTES_CLIENT); + IOTimeStampConstant(IODBG_INTES(IOINTES_ACTION), + (unsigned int) intAction, (unsigned int) owner); + (*intAction)(owner, this, numInts); + IOTimeTypeStampE(IOINTES_CLIENT); + + consumerCount = cacheProdCount; + if (autoDisable && !explicitDisable) + enable(); + } + else if (numInts < 0) { + IOTimeStampLatency(); + IOTimeTypeStampS(IOINTES_CLIENT); + IOTimeStampConstant(IODBG_INTES(IOINTES_ACTION), + (unsigned int) intAction, (unsigned int) owner); + (*intAction)(owner, this, -numInts); + IOTimeTypeStampE(IOINTES_CLIENT); + + consumerCount = cacheProdCount; + if (autoDisable && !explicitDisable) + enable(); + } + + return false; +} + +void IOInterruptEventSource::normalInterruptOccurred + (void */*refcon*/, IOService */*prov*/, int /*source*/) +{ +IOTimeTypeStampS(IOINTES_INTCTXT); +IOTimeStampLatency(); + + producerCount++; + +IOTimeTypeStampS(IOINTES_SEMA); + signalWorkAvailable(); +IOTimeTypeStampE(IOINTES_SEMA); + +IOTimeTypeStampE(IOINTES_INTCTXT); +} + +void IOInterruptEventSource::disableInterruptOccurred + (void */*refcon*/, IOService *prov, int source) +{ +IOTimeTypeStampS(IOINTES_INTCTXT); +IOTimeStampLatency(); + + prov->disableInterrupt(source); /* disable the interrupt */ + + producerCount++; + +IOTimeTypeStampS(IOINTES_SEMA); + signalWorkAvailable(); +IOTimeTypeStampE(IOINTES_SEMA); + +IOTimeTypeStampE(IOINTES_INTCTXT); +} + +void IOInterruptEventSource::interruptOccurred + (void *refcon, IOService *prov, int source) +{ + if (autoDisable && prov) + disableInterruptOccurred(refcon, prov, source); + else + normalInterruptOccurred(refcon, prov, source); +} diff --git a/iokit/Kernel/IOKitDebug.cpp b/iokit/Kernel/IOKitDebug.cpp new file mode 100644 index 000000000..e7be3a62a --- /dev/null +++ b/iokit/Kernel/IOKitDebug.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +extern "C" { + +SInt64 gIOKitDebug +#ifdef IOKITDEBUG + = IOKITDEBUG +#endif +; + +int debug_malloc_size; +int debug_iomalloc_size; +int debug_container_malloc_size; +// int debug_ivars_size; // in OSObject.cpp + +void IOPrintPlane( const IORegistryPlane * plane ) +{ + IORegistryEntry * next; + IORegistryIterator * iter; + OSOrderedSet * all; + char format[] = "%xxxs"; + IOService * service; + + iter = IORegistryIterator::iterateOver( plane ); + assert( iter ); + all = iter->iterateAll(); + if( all) { + IOLog("Count %d\n", all->getCount() ); + all->release(); + } else + IOLog("Empty\n"); + + iter->reset(); + while( (next = iter->getNextObjectRecursive())) { + sprintf( format + 1, "%ds", next->getDepth( plane )); + IOLog( format, ""); + if( (service = OSDynamicCast(IOService, next))) + IOLog("<%ld>", service->getBusyState()); + IOLog( "%s\n", next->getName()); + } + iter->release(); +} + +void IOPrintMemory( void ) +{ + +// OSMetaClass::printInstanceCounts(); + + IOLog("\n" + "ivar kalloc() 0x%08x\n" + "malloc() 0x%08x\n" + "containers kalloc() 0x%08x\n" + "IOMalloc() 0x%08x\n" + "----------------------------------------\n", + debug_ivars_size, + debug_malloc_size, + debug_container_malloc_size, + debug_iomalloc_size + ); +} + +} /* extern "C" */ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super OSObject +OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSObject * IOKitDiagnostics::diagnostics( void ) +{ + IOKitDiagnostics * diags; + + diags = new IOKitDiagnostics; + if( diags && !diags->init()) { + diags->release(); + diags = 0; + } + + return( diags ); +} + +void IOKitDiagnostics::updateOffset( OSDictionary * dict, + UInt32 value, const char * name ) +{ + OSNumber * off; + + off = OSNumber::withNumber( value, 32 ); + if( !off) + return; + + dict->setObject( name, off ); + off->release(); +} + + +bool IOKitDiagnostics::serialize(OSSerialize *s) const +{ + OSDictionary * dict; + bool ok; + + dict = OSDictionary::withCapacity( 5 ); + if( !dict) + return( false ); + + updateOffset( dict, debug_ivars_size, "Instance allocation" ); + updateOffset( dict, debug_container_malloc_size, "Container allocation" ); + updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" ); + + dict->setObject( "Classes", OSMetaClass::getClassDictionary() ); + + ok = dict->serialize( s ); + + dict->release(); + + return( ok ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/iokit/Kernel/IOLib.c b/iokit/Kernel/IOLib.c new file mode 100644 index 000000000..525094a7c --- /dev/null +++ b/iokit/Kernel/IOLib.c @@ -0,0 +1,607 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 17-Apr-91 Portions from libIO.m, Doug Mitchell at NeXT. + * 17-Nov-98 cpp + * + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include + +mach_timespec_t IOZeroTvalspec = { 0, 0 }; + +/* + * Static variables for this module. + */ + +static IOThreadFunc threadArgFcn; +static void * threadArgArg; +static lock_t * threadArgLock; + + +enum { kIOMaxPageableMaps = 16 }; +enum { kIOPageableMapSize = 16 * 1024 * 1024 }; +enum { kIOPageableMaxMapSize = 32 * 1024 * 1024 }; + +typedef struct { + vm_map_t map; + vm_offset_t address; + vm_offset_t end; +} IOMapData; + +static struct { + UInt32 count; + UInt32 hint; + IOMapData maps[ kIOMaxPageableMaps ]; + mutex_t * lock; +} gIOKitPageableSpace; + + +void IOLibInit(void) +{ + kern_return_t ret; + + static bool libInitialized; + + if(libInitialized) + return; + + threadArgLock = lock_alloc( true, NULL, NULL ); + + gIOKitPageableSpace.maps[0].address = 0; + ret = kmem_suballoc(kernel_map, + &gIOKitPageableSpace.maps[0].address, + kIOPageableMapSize, + TRUE, + TRUE, + &gIOKitPageableSpace.maps[0].map); + if (ret != KERN_SUCCESS) + panic("failed to allocate iokit pageable map\n"); + + gIOKitPageableSpace.lock = mutex_alloc( 0 ); + gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize; + gIOKitPageableSpace.hint = 0; + gIOKitPageableSpace.count = 1; + + libInitialized = true; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * We pass an argument to a new thread by saving fcn and arg in some + * locked variables and starting the thread at ioThreadStart(). This + * function retrives fcn and arg and makes the appropriate call. + * + */ + +static void ioThreadStart( void ) +{ + IOThreadFunc fcn; + void * arg; + + fcn = threadArgFcn; + arg = threadArgArg; + lock_done( threadArgLock); + + (*fcn)(arg); + + IOExitThread(); +} + +IOThread IOCreateThread(IOThreadFunc fcn, void *arg) +{ + IOThread thread; + + lock_write( threadArgLock); + threadArgFcn = fcn; + threadArgArg = arg; + + thread = kernel_thread( kernel_task, ioThreadStart); + + return(thread); +} + + +volatile void IOExitThread() +{ + (void) thread_terminate(current_act()); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +void * IOMalloc(vm_size_t size) +{ + void * address; + + address = (void *)kalloc(size); +#if IOALLOCDEBUG + if (address) + debug_iomalloc_size += size; +#endif + return address; +} + +void IOFree(void * address, vm_size_t size) +{ + if (address) { + kfree((vm_offset_t)address, size); +#if IOALLOCDEBUG + debug_iomalloc_size -= size; +#endif + } +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void * IOMallocAligned(vm_size_t size, vm_size_t alignment) +{ + kern_return_t kr; + vm_address_t address; + vm_address_t allocationAddress; + vm_size_t adjustedSize; + vm_offset_t alignMask; + + if (size == 0) + return 0; + if (alignment == 0) + alignment = 1; + + alignMask = alignment - 1; + adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t); + + if (adjustedSize >= page_size) { + + kr = kernel_memory_allocate(kernel_map, &address, + size, alignMask, KMA_KOBJECT); + if (KERN_SUCCESS != kr) { + IOLog("Failed %08x, %08x\n", size, alignment); + address = 0; + } + + } else { + + adjustedSize += alignMask; + allocationAddress = (vm_address_t) kalloc(adjustedSize); + + if (allocationAddress) { + address = (allocationAddress + alignMask + + (sizeof(vm_size_t) + sizeof(vm_address_t))) + & (~alignMask); + + *((vm_size_t *)(address - sizeof(vm_size_t) + - sizeof(vm_address_t))) = adjustedSize; + *((vm_address_t *)(address - sizeof(vm_address_t))) + = allocationAddress; + } else + address = 0; + } + + assert(0 == (address & alignMask)); + +#if IOALLOCDEBUG + if( address) + debug_iomalloc_size += size; +#endif + + return (void *) address; +} + +void IOFreeAligned(void * address, vm_size_t size) +{ + vm_address_t allocationAddress; + vm_size_t adjustedSize; + + if( !address) + return; + + assert(size); + + adjustedSize = size + sizeof(vm_size_t) + sizeof(vm_address_t); + if (adjustedSize >= page_size) { + + kmem_free( kernel_map, (vm_address_t) address, size); + + } else { + adjustedSize = *((vm_size_t *)( (vm_address_t) address + - sizeof(vm_address_t) - sizeof(vm_size_t))); + allocationAddress = *((vm_address_t *)( (vm_address_t) address + - sizeof(vm_address_t) )); + + kfree((vm_offset_t) allocationAddress, adjustedSize); + } + +#if IOALLOCDEBUG + debug_iomalloc_size -= size; +#endif +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, + IOPhysicalAddress * physicalAddress) +{ + kern_return_t kr; + vm_address_t address; + vm_address_t allocationAddress; + vm_size_t adjustedSize; + vm_offset_t alignMask; + + if (size == 0) + return 0; + if (alignment == 0) + alignment = 1; + + alignMask = alignment - 1; + adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t); + + if (adjustedSize >= page_size) { + + kr = kmem_alloc_contig(kernel_map, &address, size, + alignMask, KMA_KOBJECT); + if (KERN_SUCCESS != kr) + address = 0; + + } else { + + adjustedSize += alignMask; + allocationAddress = (vm_address_t) + kalloc(adjustedSize); + if (allocationAddress) { + + address = (allocationAddress + alignMask + + (sizeof(vm_size_t) + sizeof(vm_address_t))) + & (~alignMask); + + if (atop(address) != atop(address + size - 1)) + address = round_page(address); + + *((vm_size_t *)(address - sizeof(vm_size_t) + - sizeof(vm_address_t))) = adjustedSize; + *((vm_address_t *)(address - sizeof(vm_address_t))) + = allocationAddress; + } else + address = 0; + } + + if( address && physicalAddress) + *physicalAddress = (IOPhysicalAddress) pmap_extract( kernel_pmap, + address ); + + assert(0 == (address & alignMask)); + +#if IOALLOCDEBUG + if( address) + debug_iomalloc_size += size; +#endif + + return (void *) address; +} + +void IOFreeContiguous(void * address, vm_size_t size) +{ + vm_address_t allocationAddress; + vm_size_t adjustedSize; + + if( !address) + return; + + assert(size); + + adjustedSize = (2 * size) + sizeof(vm_size_t) + sizeof(vm_address_t); + if (adjustedSize >= page_size) { + + kmem_free( kernel_map, (vm_address_t) address, size); + + } else { + adjustedSize = *((vm_size_t *)( (vm_address_t) address + - sizeof(vm_address_t) - sizeof(vm_size_t))); + allocationAddress = *((vm_address_t *)( (vm_address_t) address + - sizeof(vm_address_t) )); + + kfree((vm_offset_t) allocationAddress, adjustedSize); + } + +#if IOALLOCDEBUG + debug_iomalloc_size -= size; +#endif +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void * IOMallocPageable(vm_size_t size, vm_size_t alignment) +{ + kern_return_t kr = kIOReturnNotReady; + vm_address_t address; + vm_size_t segSize; + UInt32 attempts; + UInt32 index; + vm_offset_t min; + vm_map_t map; + + if (alignment > page_size) + return( 0 ); + if (size > kIOPageableMaxMapSize) + return( 0 ); + + do { + index = gIOKitPageableSpace.hint; + attempts = gIOKitPageableSpace.count; + while( attempts--) { + kr = kmem_alloc_pageable( gIOKitPageableSpace.maps[index].map, + &address, size); + if( KERN_SUCCESS == kr) { + gIOKitPageableSpace.hint = index; + break; + } + if( index) + index--; + else + index = gIOKitPageableSpace.count - 1; + } + if( KERN_SUCCESS == kr) + break; + + mutex_lock( gIOKitPageableSpace.lock ); + + index = gIOKitPageableSpace.count; + if( index >= (kIOMaxPageableMaps - 1)) { + mutex_unlock( gIOKitPageableSpace.lock ); + break; + } + + if( size < kIOPageableMapSize) + segSize = kIOPageableMapSize; + else + segSize = size; + + min = 0; + kr = kmem_suballoc(kernel_map, + &min, + segSize, + TRUE, + TRUE, + &map); + if( KERN_SUCCESS != kr) { + mutex_unlock( gIOKitPageableSpace.lock ); + break; + } + + gIOKitPageableSpace.maps[index].map = map; + gIOKitPageableSpace.maps[index].address = min; + gIOKitPageableSpace.maps[index].end = min + segSize; + gIOKitPageableSpace.hint = index; + gIOKitPageableSpace.count = index + 1; + + mutex_unlock( gIOKitPageableSpace.lock ); + + } while( true ); + + if( KERN_SUCCESS != kr) + address = 0; + +#if IOALLOCDEBUG + if( address) + debug_iomalloc_size += round_page(size); +#endif + + return (void *) address; +} + +vm_map_t IOPageableMapForAddress( vm_address_t address ) +{ + vm_map_t map = 0; + UInt32 index; + + for( index = 0; index < gIOKitPageableSpace.count; index++) { + if( (address >= gIOKitPageableSpace.maps[index].address) + && (address < gIOKitPageableSpace.maps[index].end) ) { + map = gIOKitPageableSpace.maps[index].map; + break; + } + } + if( !map) + IOPanic("IOPageableMapForAddress: null"); + + return( map ); +} + +void IOFreePageable(void * address, vm_size_t size) +{ + vm_map_t map; + + map = IOPageableMapForAddress( (vm_address_t) address); + if( map) + kmem_free( map, (vm_offset_t) address, size); + +#if IOALLOCDEBUG + debug_iomalloc_size -= round_page(size); +#endif +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +extern kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, + vm_size_t length, unsigned int options); + +IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, + IOByteCount length, IOOptionBits cacheMode ) +{ + IOReturn ret = kIOReturnSuccess; + vm_offset_t physAddr; + + if( task != kernel_task) + return( kIOReturnUnsupported ); + + length = round_page(address + length) - trunc_page( address ); + address = trunc_page( address ); + + // make map mode + cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask; + + while( (kIOReturnSuccess == ret) && (length > 0) ) { + + physAddr = pmap_extract( kernel_pmap, address ); + if( physAddr) + ret = IOMapPages( get_task_map(task), address, physAddr, page_size, cacheMode ); + else + ret = kIOReturnVMError; + + length -= page_size; + } + + return( ret ); +} + + +IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, + IOByteCount length ) +{ + if( task != kernel_task) + return( kIOReturnUnsupported ); + +#if __ppc__ + flush_dcache( (vm_offset_t) address, (unsigned) length, false ); +#endif + + return( kIOReturnSuccess ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +SInt32 OSKernelStackRemaining( void ) +{ + SInt32 stack; + + stack = (((SInt32) &stack) & (KERNEL_STACK_SIZE - 1)); + + return( stack ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IOSleep(unsigned milliseconds) +{ + int wait_result; + + assert_wait_timeout(milliseconds, THREAD_INTERRUPTIBLE); + wait_result = thread_block((void (*)(void))0); + if (wait_result != THREAD_TIMED_OUT) + thread_cancel_timer(); +} + +/* + * Spin for indicated number of microseconds. + */ +void IODelay(unsigned microseconds) +{ + extern void delay(int usec); + + delay(microseconds); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IOLog(const char *format, ...) +{ + va_list ap; + extern void conslog_putc(char); + extern void logwakeup(); + + va_start(ap, format); + _doprnt(format, &ap, conslog_putc, 16); + va_end(ap); +} + +void IOPanic(const char *reason) +{ + panic(reason); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * Convert a integer constant (typically a #define or enum) to a string. + */ +static char noValue[80]; // that's pretty + +const char *IOFindNameForValue(int value, const IONamedValue *regValueArray) +{ + for( ; regValueArray->name; regValueArray++) { + if(regValueArray->value == value) + return(regValueArray->name); + } + sprintf(noValue, "0x%x (UNDEFINED)", value); + return((const char *)noValue); +} + +IOReturn IOFindValueForName(const char *string, + const IONamedValue *regValueArray, + int *value) +{ + for( ; regValueArray->name; regValueArray++) { + if(!strcmp(regValueArray->name, string)) { + *value = regValueArray->value; + return kIOReturnSuccess; + } + } + return kIOReturnBadArgument; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOAlignment IOSizeToAlignment(unsigned int size) +{ + register int shift; + const int intsize = sizeof(unsigned int) * 8; + + for (shift = 1; shift < intsize; shift++) { + if (size & 0x80000000) + return (IOAlignment)(intsize - shift); + size <<= 1; + } + return 0; +} + +unsigned int IOAlignmentToSize(IOAlignment align) +{ + unsigned int size; + + for (size = 1; align; align--) { + size <<= 1; + } + return size; +} diff --git a/iokit/Kernel/IOLocks.cpp b/iokit/Kernel/IOLocks.cpp new file mode 100644 index 000000000..f9940461e --- /dev/null +++ b/iokit/Kernel/IOLocks.cpp @@ -0,0 +1,223 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#include + +#include +#include +#include + +extern "C" { +#include +#include + +IOLock * IOLockAlloc( void ) +{ + return( mutex_alloc(ETAP_IO_AHA) ); +} + +void IOLockFree( IOLock * lock) +{ + mutex_free( lock ); +} + +void IOLockInitWithState( IOLock * lock, IOLockState state) +{ + mutex_init( lock, ETAP_IO_AHA); + + if( state == kIOLockStateLocked) + IOLockLock( lock); +} + +struct _IORecursiveLock { + mutex_t * mutex; + thread_t thread; + UInt32 count; +}; + +IORecursiveLock * IORecursiveLockAlloc( void ) +{ + _IORecursiveLock * lock; + + lock = IONew( _IORecursiveLock, 1); + if( !lock) + return( 0 ); + + lock->mutex = mutex_alloc(ETAP_IO_AHA); + if( lock->mutex) { + lock->thread = 0; + lock->count = 0; + } else { + IODelete( lock, _IORecursiveLock, 1); + lock = 0; + } + + return( (IORecursiveLock *) lock ); +} + +void IORecursiveLockFree( IORecursiveLock * _lock ) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + mutex_free( lock->mutex ); + IODelete( lock, _IORecursiveLock, 1); +} + +void IORecursiveLockLock( IORecursiveLock * _lock) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + if( lock->thread == IOThreadSelf()) + lock->count++; + else { + _mutex_lock( lock->mutex ); + assert( lock->thread == 0 ); + assert( lock->count == 0 ); + lock->thread = IOThreadSelf(); + lock->count = 1; + } +} + +boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + if( lock->thread == IOThreadSelf()) { + lock->count++; + return( true ); + } else { + if( _mutex_try( lock->mutex )) { + assert( lock->thread == 0 ); + assert( lock->count == 0 ); + lock->thread = IOThreadSelf(); + lock->count = 1; + return( true ); + } + } + return( false ); +} + +void IORecursiveLockUnlock( IORecursiveLock * _lock) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + assert( lock->thread == IOThreadSelf() ); + + if( 0 == (--lock->count)) { + lock->thread = 0; + mutex_unlock( lock->mutex ); + } +} + +boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + return( lock->thread == IOThreadSelf()); +} + +int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + UInt32 count = lock->count; + int res; + + assert(lock->thread == IOThreadSelf()); + assert(lock->count == 1 || interType == THREAD_UNINT); + + assert_wait((event_t) event, (int) interType); + lock->count = 0; + lock->thread = 0; + mutex_unlock(lock->mutex); + + res = thread_block(0); + + if (THREAD_AWAKENED == res) { + _mutex_lock(lock->mutex); + assert(lock->thread == 0); + assert(lock->count == 0); + lock->thread = IOThreadSelf(); + lock->count = count; + } + + return res; +} + +void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) +{ + thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); +} + +/* + * Complex (read/write) lock operations + */ + +IORWLock * IORWLockAlloc( void ) +{ + IORWLock * lock; + + lock = lock_alloc( true, ETAP_IO_AHA, ETAP_IO_AHA); + + return( lock); +} + +void IORWLockFree( IORWLock * lock) +{ + lock_free( lock ); +} + + +/* + * Spin locks + */ + +IOSimpleLock * IOSimpleLockAlloc( void ) +{ + IOSimpleLock * lock; + + lock = (IOSimpleLock *) IOMalloc( sizeof(IOSimpleLock)); + if( lock) + IOSimpleLockInit( lock ); + + return( lock ); +} + +void IOSimpleLockInit( IOSimpleLock * lock) +{ + simple_lock_init( (simple_lock_t) lock, ETAP_IO_AHA ); +} + +void IOSimpleLockFree( IOSimpleLock * lock ) +{ + IOFree( lock, sizeof(IOSimpleLock)); +} + +} /* extern "C" */ + + diff --git a/iokit/Kernel/IOMemoryCursor.cpp b/iokit/Kernel/IOMemoryCursor.cpp new file mode 100644 index 000000000..b0da5f75a --- /dev/null +++ b/iokit/Kernel/IOMemoryCursor.cpp @@ -0,0 +1,328 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOMemoryCursor.cpp created by wgulland on 1999-3-02 */ + +#include +#include +#include +#include +#include + +/**************************** class IOMemoryCursor ***************************/ + +#undef super +#define super OSObject +OSDefineMetaClassAndStructors(IOMemoryCursor, OSObject) + +IOMemoryCursor * +IOMemoryCursor::withSpecification(SegmentFunction inSegFunc, + IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + IOMemoryCursor * me = new IOMemoryCursor; + + if (me && !me->initWithSpecification(inSegFunc, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) + { + me->release(); + return 0; + } + + return me; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool +IOMemoryCursor::initWithSpecification(SegmentFunction inSegFunc, + IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + if (!super::init()) + return false; + + if (!inSegFunc) + return false; + + outSeg = inSegFunc; + maxSegmentSize = inMaxSegmentSize; + if (inMaxTransferSize) + maxTransferSize = inMaxTransferSize; + else + maxTransferSize = (IOPhysicalLength) -1; + alignMask = inAlignment - 1; + assert(alignMask == 0); // No alignment code yet! + + return true; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +UInt32 +IOMemoryCursor::genPhysicalSegments(IOMemoryDescriptor *inDescriptor, + IOPhysicalLength fromPosition, + void * inSegments, + UInt32 inMaxSegments, + UInt32 inMaxTransferSize, + IOByteCount *outTransferSize) +{ + if (!inDescriptor) + return 0; + + if (!inMaxSegments) + return 0; + + if (!inMaxTransferSize) + inMaxTransferSize = maxTransferSize; + + /* + * Iterate over the packet, translating segments where allowed + * + * If we finished cleanly return number of segments found + * and update the position in the descriptor. + */ + UInt curSegIndex = 0; + UInt curTransferSize = 0; + PhysicalSegment seg; + + while ((curSegIndex < inMaxSegments) + && (curTransferSize < inMaxTransferSize) + && (seg.location = inDescriptor->getPhysicalSegment( + fromPosition + curTransferSize, &seg.length))) + { + assert(seg.length); + seg.length = min(inMaxTransferSize-curTransferSize, + (min(seg.length, maxSegmentSize))); + (*outSeg)(seg, inSegments, curSegIndex++); + curTransferSize += seg.length; + } + + if (outTransferSize) + *outTransferSize = curTransferSize; + + return curSegIndex; +} + +/************************ class IONaturalMemoryCursor ************************/ + +#undef super +#define super IOMemoryCursor +OSDefineMetaClassAndStructors(IONaturalMemoryCursor, IOMemoryCursor) + +void IONaturalMemoryCursor::outputSegment(PhysicalSegment segment, + void * outSegments, + UInt32 outSegmentIndex) +{ + ((PhysicalSegment *) outSegments)[outSegmentIndex] = segment; +} + +IONaturalMemoryCursor * +IONaturalMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + IONaturalMemoryCursor *me = new IONaturalMemoryCursor; + + if (me && !me->initWithSpecification(inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) + { + me->release(); + return 0; + } + + return me; +} + +bool +IONaturalMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + return super::initWithSpecification(&IONaturalMemoryCursor::outputSegment, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment); +} + +/************************** class IOBigMemoryCursor **************************/ + +#undef super +#define super IOMemoryCursor +OSDefineMetaClassAndStructors(IOBigMemoryCursor, IOMemoryCursor) + +void +IOBigMemoryCursor::outputSegment(PhysicalSegment inSegment, + void * inSegments, + UInt32 inSegmentIndex) +{ + IOPhysicalAddress * segment; + + segment = &((PhysicalSegment *) inSegments)[inSegmentIndex].location; + OSWriteBigInt(segment, 0, inSegment.location); + OSWriteBigInt(segment, sizeof(IOPhysicalAddress), inSegment.length); +} + +IOBigMemoryCursor * +IOBigMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + IOBigMemoryCursor * me = new IOBigMemoryCursor; + + if (me && !me->initWithSpecification(inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) + { + me->release(); + return 0; + } + + return me; +} + +bool +IOBigMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + return super::initWithSpecification(&IOBigMemoryCursor::outputSegment, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment); +} + +/************************* class IOLittleMemoryCursor ************************/ + +#undef super +#define super IOMemoryCursor +OSDefineMetaClassAndStructors(IOLittleMemoryCursor, IOMemoryCursor) + +void +IOLittleMemoryCursor::outputSegment(PhysicalSegment inSegment, + void * inSegments, + UInt32 inSegmentIndex) +{ + IOPhysicalAddress * segment; + + segment = &((PhysicalSegment *) inSegments)[inSegmentIndex].location; + OSWriteLittleInt(segment, 0, inSegment.location); + OSWriteLittleInt(segment, sizeof(IOPhysicalAddress), inSegment.length); +} + +IOLittleMemoryCursor * +IOLittleMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + IOLittleMemoryCursor * me = new IOLittleMemoryCursor; + + if (me && !me->initWithSpecification(inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) + { + me->release(); + return 0; + } + + return me; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool +IOLittleMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + return super::initWithSpecification(&IOLittleMemoryCursor::outputSegment, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment); +} + +/************************* class IODBDMAMemoryCursor *************************/ + +#if defined(__ppc__) + +#include + +#undef super +#define super IOMemoryCursor +OSDefineMetaClassAndStructors(IODBDMAMemoryCursor, IOMemoryCursor) + +void +IODBDMAMemoryCursor::outputSegment(PhysicalSegment inSegment, + void * inSegments, + UInt32 inSegmentIndex) +{ + IODBDMADescriptor *segment; + + segment = &((IODBDMADescriptor *) inSegments)[inSegmentIndex]; + + // Write location into address field + OSWriteSwapInt32((UInt32 *) segment, 4, inSegment.location); + + // Write count into 1st two bytes of operation field. + // DO NOT touch rest of operation field as it should contain a STOP command. + OSWriteSwapInt16((UInt16 *) segment, 0, inSegment.length); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IODBDMAMemoryCursor * +IODBDMAMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + IODBDMAMemoryCursor *me = new IODBDMAMemoryCursor; + + if (me && !me->initWithSpecification(inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) + { + me->release(); + return 0; + } + + return me; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool +IODBDMAMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) +{ + return super::initWithSpecification(&IODBDMAMemoryCursor::outputSegment, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment); +} + +#endif /* defined(__ppc__) */ + diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp new file mode 100644 index 000000000..129b53f23 --- /dev/null +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -0,0 +1,1826 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#include +#include +#include +#include + +#include + +#include +#include + +__BEGIN_DECLS +#include +void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, + vm_prot_t prot, boolean_t wired); +void ipc_port_release_send(ipc_port_t port); +vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset); +__END_DECLS + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClass( IOMemoryDescriptor, OSObject ) +OSDefineAbstractStructors( IOMemoryDescriptor, OSObject ) + +#define super IOMemoryDescriptor + +OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) + +extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address ); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address ) +{ + if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags)) + return( IOPageableMapForAddress( address ) ); + else + return( get_task_map( task )); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* + * withAddress: + * + * Create a new IOMemoryDescriptor. The buffer is a virtual address + * relative to the specified task. If no task is supplied, the kernel + * task is implied. + */ +IOMemoryDescriptor * +IOMemoryDescriptor::withAddress(void * address, + IOByteCount withLength, + IODirection withDirection) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (that->initWithAddress(address, withLength, withDirection)) + return that; + + that->release(); + } + return 0; +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withAddress(vm_address_t address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (that->initWithAddress(address, withLength, withDirection, withTask)) + return that; + + that->release(); + } + return 0; +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) +{ + return( IOMemoryDescriptor::withAddress( address, withLength, + withDirection, (task_t) 0 )); +} + + +/* + * withRanges: + * + * Create a new IOMemoryDescriptor. The buffer is made up of several + * virtual address ranges, from a given task. + * + * Passing the ranges as a reference will avoid an extra allocation. + */ +IOMemoryDescriptor * +IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference)) + return that; + + that->release(); + } + return 0; +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false) +{ + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) + { + if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference)) + return that; + + that->release(); + } + return 0; +} + +IOMemoryDescriptor * +IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, + IOByteCount offset, + IOByteCount length, + IODirection withDirection) +{ + IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor; + + if (that && !that->initSubRange(of, offset, length, withDirection)) { + that->release(); + that = 0; + } + return that; +} + +/* + * initWithAddress: + * + * Initialize an IOMemoryDescriptor. The buffer is a virtual address + * relative to the specified task. If no task is supplied, the kernel + * task is implied. + * + * An IOMemoryDescriptor can be re-used by calling initWithAddress or + * initWithRanges again on an existing instance -- note this behavior + * is not commonly supported in other I/O Kit classes, although it is + * supported here. + */ +bool +IOGeneralMemoryDescriptor::initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection) +{ + _singleRange.v.address = (vm_address_t) address; + _singleRange.v.length = withLength; + + return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); +} + +bool +IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) +{ + _singleRange.v.address = address; + _singleRange.v.length = withLength; + + return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); +} + +bool +IOGeneralMemoryDescriptor::initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) +{ + _singleRange.p.address = address; + _singleRange.p.length = withLength; + + return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); +} + +/* + * initWithRanges: + * + * Initialize an IOMemoryDescriptor. The buffer is made up of several + * virtual address ranges, from a given task + * + * Passing the ranges as a reference will avoid an extra allocation. + * + * An IOMemoryDescriptor can be re-used by calling initWithAddress or + * initWithRanges again on an existing instance -- note this behavior + * is not commonly supported in other I/O Kit classes, although it is + * supported here. + */ +bool +IOGeneralMemoryDescriptor::initWithRanges( + IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) +{ + assert(ranges); + assert(withCount); + + /* + * We can check the _initialized instance variable before having ever set + * it to an initial value because I/O Kit guarantees that all our instance + * variables are zeroed on an object's allocation. + */ + + if (_initialized == false) + { + if (super::init() == false) return false; + _initialized = true; + } + else + { + /* + * An existing memory descriptor is being retargeted to point to + * somewhere else. Clean up our present state. + */ + + assert(_wireCount == 0); + + while (_wireCount) + complete(); + if (_kernPtrAligned) + unmapFromKernel(); + if (_ranges.v && _rangesIsAllocated) + IODelete(_ranges.v, IOVirtualRange, _rangesCount); + } + + /* + * Initialize the memory descriptor. + */ + + _ranges.v = 0; + _rangesCount = withCount; + _rangesIsAllocated = asReference ? false : true; + _direction = withDirection; + _length = 0; + _task = withTask; + _position = 0; + _positionAtIndex = 0; + _positionAtOffset = 0; + _kernPtrAligned = 0; + _cachedPhysicalAddress = 0; + _cachedVirtualAddress = 0; + _flags = 0; + + if (withTask && (withTask != kernel_task)) + _flags |= kIOMemoryRequiresWire; + + if (asReference) + _ranges.v = ranges; + else + { + _ranges.v = IONew(IOVirtualRange, withCount); + if (_ranges.v == 0) return false; + bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange)); + } + + for (unsigned index = 0; index < _rangesCount; index++) + { + _length += _ranges.v[index].length; + } + + return true; +} + +bool +IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false) +{ +#warning assuming virtual, physical addresses same size + return( initWithRanges( (IOVirtualRange *) ranges, + withCount, withDirection, (task_t) 0, asReference )); +} + +/* + * free + * + * Free resources. + */ +void IOGeneralMemoryDescriptor::free() +{ + while (_wireCount) + complete(); + if (_kernPtrAligned) + unmapFromKernel(); + if (_ranges.v && _rangesIsAllocated) + IODelete(_ranges.v, IOVirtualRange, _rangesCount); + if( _memEntry) + ipc_port_release_send( (ipc_port_t) _memEntry ); + super::free(); +} + +void IOGeneralMemoryDescriptor::unmapFromKernel() +{ + kern_return_t krtn; + vm_offset_t off; + // Pull the shared pages out of the task map + // Do we need to unwire it first? + for ( off = 0; off < _kernSize; off += page_size ) + { + pmap_change_wiring( + kernel_pmap, + _kernPtrAligned + off, + FALSE); + + pmap_remove( + kernel_pmap, + _kernPtrAligned + off, + _kernPtrAligned + off + page_size); + } + // Free the former shmem area in the task + krtn = vm_deallocate(kernel_map, + _kernPtrAligned, + _kernSize ); + assert(krtn == KERN_SUCCESS); + _kernPtrAligned = 0; +} + +void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) +{ + kern_return_t krtn; + vm_offset_t off; + + if (_kernPtrAligned) + { + if (_kernPtrAtIndex == rangeIndex) return; + unmapFromKernel(); + assert(_kernPtrAligned == 0); + } + + vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); + + _kernSize = trunc_page(_ranges.v[rangeIndex].address + + _ranges.v[rangeIndex].length + + page_size - 1) - srcAlign; + + /* Find some memory of the same size in kernel task. We use vm_allocate() + to do this. vm_allocate inserts the found memory object in the + target task's map as a side effect. */ + krtn = vm_allocate( kernel_map, + &_kernPtrAligned, + _kernSize, + VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit + assert(krtn == KERN_SUCCESS); + if(krtn) return; + + /* For each page in the area allocated from the kernel map, + find the physical address of the page. + Enter the page in the target task's pmap, at the + appropriate target task virtual address. */ + for ( off = 0; off < _kernSize; off += page_size ) + { + vm_offset_t kern_phys_addr, phys_addr; + if( _task) + phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off ); + else + phys_addr = srcAlign + off; + assert(phys_addr); + if(phys_addr == 0) return; + + // Check original state. + kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off ); + // Set virtual page to point to the right physical one + pmap_enter( + kernel_pmap, + _kernPtrAligned + off, + phys_addr, + VM_PROT_READ|VM_PROT_WRITE, + TRUE); + } + _kernPtrAtIndex = rangeIndex; +} + +/* + * getDirection: + * + * Get the direction of the transfer. + */ +IODirection IOMemoryDescriptor::getDirection() const +{ + return _direction; +} + +/* + * getLength: + * + * Get the length of the transfer (over all ranges). + */ +IOByteCount IOMemoryDescriptor::getLength() const +{ + return _length; +} + +void IOMemoryDescriptor::setTag( + IOOptionBits tag ) +{ + _tag = tag; +} + +IOOptionBits IOMemoryDescriptor::getTag( void ) +{ + return( _tag); +} + +/* + * setPosition + * + * Set the logical start position inside the client buffer. + * + * It is convention that the position reflect the actual byte count that + * is successfully transferred into or out of the buffer, before the I/O + * request is "completed" (ie. sent back to its originator). + */ + +void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) +{ + assert(position <= _length); + + if (position >= _length) + { + _position = _length; + _positionAtIndex = _rangesCount; /* careful: out-of-bounds */ + _positionAtOffset = 0; + return; + } + + if (position < _position) + { + _positionAtOffset = position; + _positionAtIndex = 0; + } + else + { + _positionAtOffset += (position - _position); + } + _position = position; + + while (_positionAtOffset >= _ranges.v[_positionAtIndex].length) + { + _positionAtOffset -= _ranges.v[_positionAtIndex].length; + _positionAtIndex++; + } +} + +/* + * readBytes: + * + * Copy data from the memory descriptor's buffer into the specified buffer, + * relative to the current position. The memory descriptor's position is + * advanced based on the number of bytes copied. + */ + +IOByteCount IOGeneralMemoryDescriptor::readBytes(IOByteCount offset, + void * bytes, IOByteCount withLength) +{ + IOByteCount bytesLeft; + void * segment; + IOByteCount segmentLength; + + if( offset != _position) + setPosition( offset ); + + withLength = min(withLength, _length - _position); + bytesLeft = withLength; + +#if 0 + while (bytesLeft && (_position < _length)) + { + /* Compute the relative length to the end of this virtual segment. */ + segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft); + + /* Compute the relative address of this virtual segment. */ + segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); + + if (KERN_SUCCESS != vm_map_read_user(getMapForTask(_task, segment), + /* from */ (vm_offset_t) segment, /* to */ (vm_offset_t) bytes, + /* size */ segmentLength)) + { + assert( false ); + bytesLeft = withLength; + break; + } + bytesLeft -= segmentLength; + offset += segmentLength; + setPosition(offset); + } +#else + while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength))) + { + segmentLength = min(segmentLength, bytesLeft); + bcopy(/* from */ segment, /* to */ bytes, /* size */ segmentLength); + bytesLeft -= segmentLength; + offset += segmentLength; + bytes = (void *) (((UInt32) bytes) + segmentLength); + } +#endif + + return withLength - bytesLeft; +} + +/* + * writeBytes: + * + * Copy data to the memory descriptor's buffer from the specified buffer, + * relative to the current position. The memory descriptor's position is + * advanced based on the number of bytes copied. + */ +IOByteCount IOGeneralMemoryDescriptor::writeBytes(IOByteCount offset, + const void* bytes,IOByteCount withLength) +{ + IOByteCount bytesLeft; + void * segment; + IOByteCount segmentLength; + + if( offset != _position) + setPosition( offset ); + + withLength = min(withLength, _length - _position); + bytesLeft = withLength; + +#if 0 + while (bytesLeft && (_position < _length)) + { + assert(_position <= _length); + + /* Compute the relative length to the end of this virtual segment. */ + segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft); + + /* Compute the relative address of this virtual segment. */ + segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); + + if (KERN_SUCCESS != vm_map_write_user(getMapForTask(_task, segment), + /* from */ (vm_offset_t) bytes, + /* to */ (vm_offset_t) segment, + /* size */ segmentLength)) + { + assert( false ); + bytesLeft = withLength; + break; + } + bytesLeft -= segmentLength; + offset += segmentLength; + setPosition(offset); + } +#else + while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength))) + { + segmentLength = min(segmentLength, bytesLeft); + bcopy(/* from */ bytes, /* to */ segment, /* size */ segmentLength); + // Flush cache in case we're copying code around, eg. handling a code page fault + IOFlushProcessorCache(kernel_task, (vm_offset_t) segment, segmentLength ); + + bytesLeft -= segmentLength; + offset += segmentLength; + bytes = (void *) (((UInt32) bytes) + segmentLength); + } +#endif + + return withLength - bytesLeft; +} + +/* + * getPhysicalSegment: + * + * Get the physical address of the buffer, relative to the current position. + * If the current position is at the end of the buffer, a zero is returned. + */ +IOPhysicalAddress +IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) +{ + vm_address_t virtualAddress; + IOByteCount virtualLength; + pmap_t virtualPMap; + IOPhysicalAddress physicalAddress; + IOPhysicalLength physicalLength; + + if( kIOMemoryRequiresWire & _flags) + assert( _wireCount ); + + if ((0 == _task) && (1 == _rangesCount)) + { + assert(offset <= _length); + if (offset >= _length) + { + physicalAddress = 0; + physicalLength = 0; + } + else + { + physicalLength = _length - offset; + physicalAddress = offset + _ranges.v[0].address; + } + + if (lengthOfSegment) + *lengthOfSegment = physicalLength; + return physicalAddress; + } + + if( offset != _position) + setPosition( offset ); + + assert(_position <= _length); + + /* Fail gracefully if the position is at (or past) the end-of-buffer. */ + if (_position >= _length) + { + *lengthOfSegment = 0; + return 0; + } + + /* Prepare to compute the largest contiguous physical length possible. */ + + virtualAddress = _ranges.v[_positionAtIndex].address + _positionAtOffset; + virtualLength = _ranges.v[_positionAtIndex].length - _positionAtOffset; + vm_address_t virtualPage = trunc_page(virtualAddress); + if( _task) + virtualPMap = get_task_pmap(_task); + else + virtualPMap = 0; + + physicalAddress = (virtualAddress == _cachedVirtualAddress) ? + _cachedPhysicalAddress : /* optimization */ + virtualPMap ? + pmap_extract(virtualPMap, virtualAddress) : + virtualAddress; + physicalLength = trunc_page(physicalAddress) + page_size - physicalAddress; + + if (!physicalAddress && _task) + { + physicalAddress = + vm_map_get_phys_page(get_task_map(_task), virtualPage); + physicalAddress += virtualAddress - virtualPage; + } + + if (physicalAddress == 0) /* memory must be wired in order to proceed */ + { + assert(physicalAddress); + *lengthOfSegment = 0; + return 0; + } + + /* Compute the largest contiguous physical length possible, within range. */ + IOPhysicalAddress physicalPage = trunc_page(physicalAddress); + + while (physicalLength < virtualLength) + { + physicalPage += page_size; + virtualPage += page_size; + _cachedVirtualAddress = virtualPage; + _cachedPhysicalAddress = virtualPMap ? + pmap_extract(virtualPMap, virtualPage) : + virtualPage; + if (!_cachedPhysicalAddress && _task) + { + _cachedPhysicalAddress = + vm_map_get_phys_page(get_task_map(_task), virtualPage); + } + + if (_cachedPhysicalAddress != physicalPage) break; + + physicalLength += page_size; + } + + /* Clip contiguous physical length at the end of this range. */ + if (physicalLength > virtualLength) + physicalLength = virtualLength; + + if( lengthOfSegment) + *lengthOfSegment = physicalLength; + + return physicalAddress; +} + + +/* + * getVirtualSegment: + * + * Get the virtual address of the buffer, relative to the current position. + * If the memory wasn't mapped into the caller's address space, it will be + * mapped in now. If the current position is at the end of the buffer, a + * null is returned. + */ +void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) +{ + if( offset != _position) + setPosition( offset ); + + assert(_position <= _length); + + /* Fail gracefully if the position is at (or past) the end-of-buffer. */ + if (_position >= _length) + { + *lengthOfSegment = 0; + return 0; + } + + /* Compute the relative length to the end of this virtual segment. */ + *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset; + + /* Compute the relative address of this virtual segment. */ + if (_task == kernel_task) + return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); + else + { + vm_offset_t off; + + mapIntoKernel(_positionAtIndex); + + off = _ranges.v[_kernPtrAtIndex].address; + off -= trunc_page(off); + + return (void *) (_kernPtrAligned + off + _positionAtOffset); + } +} + +/* + * prepare + * + * Prepare the memory for an I/O transfer. This involves paging in + * the memory, if necessary, and wiring it down for the duration of + * the transfer. The complete() method completes the processing of + * the memory after the I/O transfer finishes. This method needn't + * called for non-pageable memory. + */ +IOReturn IOGeneralMemoryDescriptor::prepare( + IODirection forDirection = kIODirectionNone) +{ + UInt rangeIndex = 0; + + if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { + kern_return_t rc; + + if(forDirection == kIODirectionNone) + forDirection = _direction; + + vm_prot_t access = VM_PROT_DEFAULT; // Could be cleverer using direction + + // + // Check user read/write access to the data buffer. + // + + for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) + { + vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address); + vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length ); + + while (checkSize) + { + vm_region_basic_info_data_t regionInfo; + mach_msg_type_number_t regionInfoSize = sizeof(regionInfo); + vm_size_t regionSize; + + if ( (vm_region( + /* map */ getMapForTask(_task, checkBase), + /* address */ &checkBase, + /* size */ ®ionSize, + /* flavor */ VM_REGION_BASIC_INFO, + /* info */ (vm_region_info_t) ®ionInfo, + /* info size */ ®ionInfoSize, + /* object name */ 0 ) != KERN_SUCCESS ) || + ( (forDirection & kIODirectionIn ) && + !(regionInfo.protection & VM_PROT_WRITE) ) || + ( (forDirection & kIODirectionOut) && + !(regionInfo.protection & VM_PROT_READ ) ) ) + { + return kIOReturnVMError; + } + + assert((regionSize & PAGE_MASK) == 0); + + regionSize = min(regionSize, checkSize); + checkSize -= regionSize; + checkBase += regionSize; + } // (for each vm region) + } // (for each io range) + + for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { + + vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); + IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + + _ranges.v[rangeIndex].length + + page_size - 1); + + vm_map_t taskVMMap = getMapForTask(_task, srcAlign); + + rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE); + if (KERN_SUCCESS != rc) { + IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc); + goto abortExit; + } + + // If this I/O is for a user land task then protect ourselves + // against COW and other vm_shenanigans + if (_task && _task != kernel_task) { + // setup a data object to hold the 'named' memory regions + // @@@ gvdl: If we fail to allocate an OSData we will just + // hope for the best for the time being. Lets not fail a + // prepare at this late stage in product release. + if (!_memoryEntries) + _memoryEntries = OSData::withCapacity(16); + if (_memoryEntries) { + vm_object_offset_t desiredSize = srcAlignEnd - srcAlign; + vm_object_offset_t entryStart = srcAlign; + ipc_port_t memHandle; + + do { + vm_object_offset_t actualSize = desiredSize; + + rc = mach_make_memory_entry_64 + (taskVMMap, &actualSize, entryStart, + forDirection, &memHandle, NULL); + if (KERN_SUCCESS != rc) { + IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc); + goto abortExit; + } + + _memoryEntries-> + appendBytes(&memHandle, sizeof(memHandle)); + desiredSize -= actualSize; + entryStart += actualSize; + } while (desiredSize); + } + } + } + } + _wireCount++; + return kIOReturnSuccess; + +abortExit: + UInt doneIndex; + + + for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) { + vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address); + IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address + + _ranges.v[doneIndex].length + + page_size - 1); + + vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, + srcAlignEnd, FALSE); + } + + if (_memoryEntries) { + ipc_port_t *handles, *handlesEnd; + + handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); + handlesEnd = (ipc_port_t *) + ((vm_address_t) handles + _memoryEntries->getLength()); + while (handles < handlesEnd) + ipc_port_release_send(*handles++); + _memoryEntries->release(); + _memoryEntries = 0; + } + + return kIOReturnVMError; +} + +/* + * complete + * + * Complete processing of the memory after an I/O transfer finishes. + * This method should not be called unless a prepare was previously + * issued; the prepare() and complete() must occur in pairs, before + * before and after an I/O transfer involving pageable memory. + */ + +IOReturn IOGeneralMemoryDescriptor::complete( + IODirection forDirection = kIODirectionNone) +{ + assert(_wireCount); + + if(0 == _wireCount) + return kIOReturnSuccess; + + _wireCount--; + if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { + UInt rangeIndex; + kern_return_t rc; + + if(forDirection == kIODirectionNone) + forDirection = _direction; + + for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { + + vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); + IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + + _ranges.v[rangeIndex].length + + page_size - 1); + + if(forDirection == kIODirectionIn) + pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd); + + rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, + srcAlignEnd, FALSE); + if(rc != KERN_SUCCESS) + IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc); + } + + if (_memoryEntries) { + ipc_port_t *handles, *handlesEnd; + + handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); + handlesEnd = (ipc_port_t *) + ((vm_address_t) handles + _memoryEntries->getLength()); + while (handles < handlesEnd) + ipc_port_release_send(*handles++); + + _memoryEntries->release(); + _memoryEntries = 0; + } + + _cachedVirtualAddress = 0; + } + return kIOReturnSuccess; +} + +IOReturn IOGeneralMemoryDescriptor::doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset = 0, + IOByteCount length = 0 ) +{ + kern_return_t kr; + + // mapping source == dest? (could be much better) + if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere) + && (1 == _rangesCount) && (0 == sourceOffset) + && (length <= _ranges.v[0].length) ) { + *atAddress = _ranges.v[0].address; + return( kIOReturnSuccess ); + } + + if( _task && _memEntry && (_flags & kIOMemoryRequiresWire)) { + + do { + + if( (1 != _rangesCount) + || (kIOMapDefaultCache != (options & kIOMapCacheMask)) ) { + kr = kIOReturnUnsupported; + continue; + } + + if( 0 == length) + length = getLength(); + if( (sourceOffset + length) > _ranges.v[0].length) { + kr = kIOReturnBadArgument; + continue; + } + + ipc_port_t sharedMem = (ipc_port_t) _memEntry; + vm_prot_t prot = VM_PROT_READ + | ((options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); + + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + if( options & kIOMapAnywhere) + *atAddress = 0; + + if( 0 == sharedMem) + kr = kIOReturnVMError; + else + kr = KERN_SUCCESS; + + if( KERN_SUCCESS == kr) + kr = vm_map( addressMap, + atAddress, + length, 0 /* mask */, + (( options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) + | VM_MAKE_TAG(VM_MEMORY_IOKIT), + sharedMem, sourceOffset, + false, // copy + prot, // cur + prot, // max + VM_INHERIT_NONE); + + } while( false ); + + } else + kr = super::doMap( addressMap, atAddress, + options, sourceOffset, length ); + return( kr ); +} + +IOReturn IOGeneralMemoryDescriptor::doUnmap( + vm_map_t addressMap, + IOVirtualAddress logical, + IOByteCount length ) +{ + // could be much better + if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount) + && (logical == _ranges.v[0].address) + && (length <= _ranges.v[0].length) ) + return( kIOReturnSuccess ); + + return( super::doUnmap( addressMap, logical, length )); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +extern "C" { +// osfmk/device/iokit_rpc.c +extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa, + vm_size_t length, unsigned int mapFlags); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static IORecursiveLock * gIOMemoryLock; + +#define LOCK IORecursiveLockLock( gIOMemoryLock) +#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClass( IOMemoryMap, OSObject ) +OSDefineAbstractStructors( IOMemoryMap, OSObject ) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class _IOMemoryMap : public IOMemoryMap +{ + OSDeclareDefaultStructors(_IOMemoryMap) + + IOMemoryDescriptor * memory; + IOMemoryMap * superMap; + IOByteCount offset; + IOByteCount length; + IOVirtualAddress logical; + task_t addressTask; + vm_map_t addressMap; + IOOptionBits options; + +public: + virtual void free(); + + // IOMemoryMap methods + virtual IOVirtualAddress getVirtualAddress(); + virtual IOByteCount getLength(); + virtual task_t getAddressTask(); + virtual IOMemoryDescriptor * getMemoryDescriptor(); + virtual IOOptionBits getMapOptions(); + + virtual IOReturn unmap(); + virtual void taskDied(); + + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length); + + // for IOMemoryDescriptor use + _IOMemoryMap * isCompatible( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ); + + bool init( + IOMemoryDescriptor * memory, + IOMemoryMap * superMap, + IOByteCount offset, + IOByteCount length ); + + bool init( + IOMemoryDescriptor * memory, + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOMemoryMap + +OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool _IOMemoryMap::init( + IOMemoryDescriptor * _memory, + IOMemoryMap * _superMap, + IOByteCount _offset, + IOByteCount _length ) +{ + + if( !super::init()) + return( false); + + if( (_offset + _length) > _superMap->getLength()) + return( false); + + _memory->retain(); + memory = _memory; + _superMap->retain(); + superMap = _superMap; + + offset = _offset; + if( _length) + length = _length; + else + length = _memory->getLength(); + + options = superMap->getMapOptions(); + logical = superMap->getVirtualAddress() + offset; + + return( true ); +} + +bool _IOMemoryMap::init( + IOMemoryDescriptor * _memory, + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits _options, + IOByteCount _offset, + IOByteCount _length ) +{ + bool ok; + + if( (!_memory) || (!intoTask) || !super::init()) + return( false); + + if( (_offset + _length) > _memory->getLength()) + return( false); + + addressMap = get_task_map(intoTask); + if( !addressMap) + return( false); + kernel_vm_map_reference(addressMap); + + _memory->retain(); + memory = _memory; + + offset = _offset; + if( _length) + length = _length; + else + length = _memory->getLength(); + + addressTask = intoTask; + logical = toAddress; + options = _options; + + if( options & kIOMapStatic) + ok = true; + else + ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical, + options, offset, length )); + if( !ok) { + logical = 0; + _memory->release(); + vm_map_deallocate(addressMap); + addressMap = 0; + } + return( ok ); +} + +IOReturn IOMemoryDescriptor::doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset = 0, + IOByteCount length = 0 ) +{ + IOReturn err = kIOReturnSuccess; + vm_size_t ourSize; + vm_size_t bytes; + vm_offset_t mapped; + vm_address_t logical; + IOByteCount pageOffset; + IOPhysicalLength segLen; + IOPhysicalAddress physAddr; + + if( 0 == length) + length = getLength(); + + physAddr = getPhysicalSegment( sourceOffset, &segLen ); + assert( physAddr ); + + pageOffset = physAddr - trunc_page( physAddr ); + ourSize = length + pageOffset; + physAddr -= pageOffset; + + logical = *atAddress; + if( 0 == (options & kIOMapAnywhere)) { + mapped = trunc_page( logical ); + if( (logical - mapped) != pageOffset) + err = kIOReturnVMError; + } + if( kIOReturnSuccess == err) + err = vm_allocate( addressMap, &mapped, ourSize, + ((options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) + | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); + + if( err) { +#ifdef DEBUG + kprintf("IOMemoryDescriptor::doMap: vm_allocate() " + "returned %08x\n", err); +#endif + return( err); + } + + // we have to make sure that these guys don't get copied if we fork. + err = vm_inherit( addressMap, mapped, ourSize, VM_INHERIT_NONE); + if( err != KERN_SUCCESS) { + doUnmap( addressMap, mapped, ourSize); // back out + return( err); + } + + logical = mapped; + *atAddress = mapped + pageOffset; + + segLen += pageOffset; + bytes = ourSize; + do { + // in the middle of the loop only map whole pages + if( segLen >= bytes) + segLen = bytes; + else if( segLen != trunc_page( segLen)) + err = kIOReturnVMError; + if( physAddr != trunc_page( physAddr)) + err = kIOReturnBadArgument; + +#ifdef DEBUG + if( kIOLogMapping & gIOKitDebug) + kprintf("_IOMemoryMap::map(%x) %08x->%08x:%08x\n", + addressMap, mapped + pageOffset, physAddr + pageOffset, + segLen - pageOffset); +#endif + + if( kIOReturnSuccess == err) + err = IOMapPages( addressMap, mapped, physAddr, segLen, options ); + if( err) + break; + + sourceOffset += segLen - pageOffset; + mapped += segLen; + bytes -= segLen; + pageOffset = 0; + + } while( bytes + && (physAddr = getPhysicalSegment( sourceOffset, &segLen ))); + + if( bytes) + err = kIOReturnBadArgument; + if( err) + doUnmap( addressMap, logical, ourSize ); + else + mapped = true; + + return( err ); +} + +IOReturn IOMemoryDescriptor::doUnmap( + vm_map_t addressMap, + IOVirtualAddress logical, + IOByteCount length ) +{ + IOReturn err; + +#ifdef DEBUG + if( kIOLogMapping & gIOKitDebug) + kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n", + addressMap, logical, length ); +#endif + + if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))) + err = vm_deallocate( addressMap, logical, length ); + else + err = kIOReturnSuccess; + + return( err ); +} + +IOReturn _IOMemoryMap::unmap( void ) +{ + IOReturn err; + + LOCK; + + if( logical && addressMap && (0 == superMap) + && (0 == (options & kIOMapStatic))) { + + err = memory->doUnmap( addressMap, logical, length ); + vm_map_deallocate(addressMap); + addressMap = 0; + + } else + err = kIOReturnSuccess; + + logical = 0; + + UNLOCK; + + return( err ); +} + +void _IOMemoryMap::taskDied( void ) +{ + LOCK; + if( addressMap) { + vm_map_deallocate(addressMap); + addressMap = 0; + } + addressTask = 0; + logical = 0; + UNLOCK; +} + +void _IOMemoryMap::free() +{ + unmap(); + + if( memory) { + LOCK; + memory->removeMapping( this); + UNLOCK; + memory->release(); + } + + if( superMap) + superMap->release(); + + super::free(); +} + +IOByteCount _IOMemoryMap::getLength() +{ + return( length ); +} + +IOVirtualAddress _IOMemoryMap::getVirtualAddress() +{ + return( logical); +} + +task_t _IOMemoryMap::getAddressTask() +{ + if( superMap) + return( superMap->getAddressTask()); + else + return( addressTask); +} + +IOOptionBits _IOMemoryMap::getMapOptions() +{ + return( options); +} + +IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor() +{ + return( memory ); +} + +_IOMemoryMap * _IOMemoryMap::isCompatible( + IOMemoryDescriptor * owner, + task_t task, + IOVirtualAddress toAddress, + IOOptionBits _options, + IOByteCount _offset, + IOByteCount _length ) +{ + _IOMemoryMap * mapping; + + if( (!task) || (task != getAddressTask())) + return( 0 ); + if( (options ^ _options) & (kIOMapCacheMask | kIOMapReadOnly)) + return( 0 ); + + if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress)) + return( 0 ); + + if( _offset < offset) + return( 0 ); + + _offset -= offset; + + if( (_offset + _length) > length) + return( 0 ); + + if( (length == _length) && (!_offset)) { + retain(); + mapping = this; + + } else { + mapping = new _IOMemoryMap; + if( mapping + && !mapping->init( owner, this, _offset, _length )) { + mapping->release(); + mapping = 0; + } + } + + return( mapping ); +} + +IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, + IOPhysicalLength * length) +{ + IOPhysicalAddress address; + + LOCK; + address = memory->getPhysicalSegment( offset + _offset, length ); + UNLOCK; + + return( address ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super OSObject + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IOMemoryDescriptor::initialize( void ) +{ + if( 0 == gIOMemoryLock) + gIOMemoryLock = IORecursiveLockAlloc(); +} + +void IOMemoryDescriptor::free( void ) +{ + if( _mappings) + _mappings->release(); + + super::free(); +} + +IOMemoryMap * IOMemoryDescriptor::setMapping( + task_t intoTask, + IOVirtualAddress mapAddress, + IOOptionBits options = 0 ) +{ + _IOMemoryMap * map; + + map = new _IOMemoryMap; + + LOCK; + + if( map + && !map->init( this, intoTask, mapAddress, + options | kIOMapStatic, 0, getLength() )) { + map->release(); + map = 0; + } + + addMapping( map); + + UNLOCK; + + return( map); +} + +IOMemoryMap * IOMemoryDescriptor::map( + IOOptionBits options = 0 ) +{ + + return( makeMapping( this, kernel_task, 0, + options | kIOMapAnywhere, + 0, getLength() )); +} + +IOMemoryMap * IOMemoryDescriptor::map( + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits options, + IOByteCount offset = 0, + IOByteCount length = 0 ) +{ + if( 0 == length) + length = getLength(); + + return( makeMapping( this, intoTask, toAddress, options, offset, length )); +} + +IOMemoryMap * IOMemoryDescriptor::makeMapping( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ) +{ + _IOMemoryMap * mapping = 0; + OSIterator * iter; + + LOCK; + + do { + // look for an existing mapping + if( (iter = OSCollectionIterator::withCollection( _mappings))) { + + while( (mapping = (_IOMemoryMap *) iter->getNextObject())) { + + if( (mapping = mapping->isCompatible( + owner, intoTask, toAddress, + options | kIOMapReference, + offset, length ))) + break; + } + iter->release(); + if( mapping) + continue; + } + + + if( mapping || (options & kIOMapReference)) + continue; + + owner = this; + + mapping = new _IOMemoryMap; + if( mapping + && !mapping->init( owner, intoTask, toAddress, options, + offset, length )) { + + IOLog("Didn't make map %08lx : %08lx\n", offset, length ); + mapping->release(); + mapping = 0; + } + + } while( false ); + + owner->addMapping( mapping); + + UNLOCK; + + return( mapping); +} + +void IOMemoryDescriptor::addMapping( + IOMemoryMap * mapping ) +{ + if( mapping) { + if( 0 == _mappings) + _mappings = OSSet::withCapacity(1); + if( _mappings && _mappings->setObject( mapping )) + mapping->release(); /* really */ + } +} + +void IOMemoryDescriptor::removeMapping( + IOMemoryMap * mapping ) +{ + if( _mappings) { + mapping->retain(); + mapping->retain(); + _mappings->removeObject( mapping); + } +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOMemoryDescriptor + +OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, + IOByteCount offset, IOByteCount length, + IODirection withDirection ) +{ + if( !super::init()) + return( false ); + + if( !parent) + return( false); + + if( (offset + length) > parent->getLength()) + return( false); + + parent->retain(); + _parent = parent; + _start = offset; + _length = length; + _direction = withDirection; + _tag = parent->getTag(); + + return( true ); +} + +void IOSubMemoryDescriptor::free( void ) +{ + if( _parent) + _parent->release(); + + super::free(); +} + + +IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, + IOByteCount * length ) +{ + IOPhysicalAddress address; + IOByteCount actualLength; + + assert(offset <= _length); + + if( length) + *length = 0; + + if( offset >= _length) + return( 0 ); + + address = _parent->getPhysicalSegment( offset + _start, &actualLength ); + + if( address && length) + *length = min( _length - offset, actualLength ); + + return( address ); +} + +void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) +{ + return( 0 ); +} + +IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, + void * bytes, IOByteCount withLength) +{ + IOByteCount byteCount; + + assert(offset <= _length); + + if( offset >= _length) + return( 0 ); + + LOCK; + byteCount = _parent->readBytes( _start + offset, bytes, + min(withLength, _length - offset) ); + UNLOCK; + + return( byteCount ); +} + +IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, + const void* bytes, IOByteCount withLength) +{ + IOByteCount byteCount; + + assert(offset <= _length); + + if( offset >= _length) + return( 0 ); + + LOCK; + byteCount = _parent->writeBytes( _start + offset, bytes, + min(withLength, _length - offset) ); + UNLOCK; + + return( byteCount ); +} + +IOReturn IOSubMemoryDescriptor::prepare( + IODirection forDirection = kIODirectionNone) +{ + IOReturn err; + + LOCK; + err = _parent->prepare( forDirection); + UNLOCK; + + return( err ); +} + +IOReturn IOSubMemoryDescriptor::complete( + IODirection forDirection = kIODirectionNone) +{ + IOReturn err; + + LOCK; + err = _parent->complete( forDirection); + UNLOCK; + + return( err ); +} + +IOMemoryMap * IOSubMemoryDescriptor::makeMapping( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress toAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ) +{ + IOMemoryMap * mapping; + + mapping = (IOMemoryMap *) _parent->makeMapping( + _parent, intoTask, + toAddress - (_start + offset), + options | kIOMapReference, + _start + offset, length ); + + if( !mapping) + mapping = super::makeMapping( owner, intoTask, toAddress, options, + offset, length ); + + return( mapping ); +} + +/* ick */ + +bool +IOSubMemoryDescriptor::initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection) +{ + return( false ); +} + +bool +IOSubMemoryDescriptor::initWithAddress(vm_address_t address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) +{ + return( false ); +} + +bool +IOSubMemoryDescriptor::initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) +{ + return( false ); +} + +bool +IOSubMemoryDescriptor::initWithRanges( + IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) +{ + return( false ); +} + +bool +IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false) +{ + return( false ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 0); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); +OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); diff --git a/iokit/Kernel/IOMultiMemoryDescriptor.cpp b/iokit/Kernel/IOMultiMemoryDescriptor.cpp new file mode 100644 index 000000000..174fb7d91 --- /dev/null +++ b/iokit/Kernel/IOMultiMemoryDescriptor.cpp @@ -0,0 +1,368 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +#define super IOMemoryDescriptor +OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor) + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMultiMemoryDescriptor::initWithAddress( + void * /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMultiMemoryDescriptor::initWithAddress( + vm_address_t /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ , + task_t /* withTask */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMultiMemoryDescriptor::initWithPhysicalAddress( + IOPhysicalAddress /* address */ , + IOByteCount /* withLength */ , + IODirection /* withDirection */ ) +{ + return false; +} + + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMultiMemoryDescriptor::initWithPhysicalRanges( + IOPhysicalRange * /* ranges */ , + UInt32 /* withCount */ , + IODirection /* withDirection */ , + bool /* asReference */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMultiMemoryDescriptor::initWithRanges( + IOVirtualRange * /* ranges */ , + UInt32 /* withCount */ , + IODirection /* withDirection */ , + task_t /* withTask */ , + bool /* asReference */ ) +{ + return false; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors( + IOMemoryDescriptor ** descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference = false ) +{ + // + // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several + // memory descriptors, that are to be chained end-to-end to make up a single + // memory descriptor. + // + // Passing the ranges as a reference will avoid an extra allocation. + // + + IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor; + + if ( me && me->initWithDescriptors( + /* descriptors */ descriptors, + /* withCount */ withCount, + /* withDirection */ withDirection, + /* asReference */ asReference ) == false ) + { + me->release(); + me = 0; + } + + return me; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +bool IOMultiMemoryDescriptor::initWithDescriptors( + IOMemoryDescriptor ** descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference = false ) +{ + // + // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several + // memory descriptors, that are to be chained end-to-end to make up a single + // memory descriptor. + // + // Passing the ranges as a reference will avoid an extra allocation. + // + + assert(descriptors); + assert(withCount); + + // Ask our superclass' opinion. + + if ( super::init() == false ) return false; + + // Initialize our minimal state. + + _descriptors = 0; + _descriptorsCount = withCount; + _descriptorsIsAllocated = asReference ? false : true; + _direction = withDirection; + _length = 0; + _mappings = 0; + _tag = 0; + + if ( asReference ) + { + _descriptors = descriptors; + } + else + { + _descriptors = IONew(IOMemoryDescriptor *, withCount); + if ( _descriptors == 0 ) return false; + + bcopy( /* from */ descriptors, + /* to */ _descriptors, + /* bytes */ withCount * sizeof(IOMemoryDescriptor *) ); + } + + for ( unsigned index = 0; index < withCount; index++ ) + { + descriptors[index]->retain(); + _length += descriptors[index]->getLength(); + if ( _tag == 0 ) _tag = descriptors[index]->getTag(); + assert(descriptors[index]->getDirection() == withDirection); + } + + return true; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void IOMultiMemoryDescriptor::free() +{ + // + // Free all of this object's outstanding resources. + // + + if ( _descriptors ) + { + for ( unsigned index = 0; index < _descriptorsCount; index++ ) + _descriptors[index]->release(); + + if ( _descriptorsIsAllocated ) + IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); + } + + super::free(); +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOMultiMemoryDescriptor::prepare(IODirection forDirection) +{ + // + // Prepare the memory for an I/O transfer. + // + // This involves paging in the memory and wiring it down for the duration + // of the transfer. The complete() method finishes the processing of the + // memory after the I/O transfer finishes. + // + + unsigned index; + IOReturn status = kIOReturnInternalError; + IOReturn statusUndo; + + if ( forDirection == kIODirectionNone ) + { + forDirection = _direction; + } + + for ( index = 0; index < _descriptorsCount; index++ ) + { + status = _descriptors[index]->prepare(forDirection); + if ( status != kIOReturnSuccess ) break; + } + + if ( status != kIOReturnSuccess ) + { + for ( unsigned indexUndo = 0; indexUndo <= index; indexUndo++ ) + { + statusUndo = _descriptors[index]->complete(forDirection); + assert(statusUndo == kIOReturnSuccess); + } + } + + return status; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOReturn IOMultiMemoryDescriptor::complete(IODirection forDirection) +{ + // + // Complete processing of the memory after an I/O transfer finishes. + // + // This method shouldn't be called unless a prepare() was previously issued; + // the prepare() and complete() must occur in pairs, before and after an I/O + // transfer. + // + + IOReturn status; + IOReturn statusFinal = kIOReturnSuccess; + + if ( forDirection == kIODirectionNone ) + { + forDirection = _direction; + } + + for ( unsigned index = 0; index < _descriptorsCount; index++ ) + { + status = _descriptors[index]->complete(forDirection); + if ( status != kIOReturnSuccess ) statusFinal = status; + assert(status == kIOReturnSuccess); + } + + return statusFinal; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOPhysicalAddress IOMultiMemoryDescriptor::getPhysicalSegment( + IOByteCount offset, + IOByteCount * length ) +{ + // + // This method returns the physical address of the byte at the given offset + // into the memory, and optionally the length of the physically contiguous + // segment from that offset. + // + + assert(offset <= _length); + + for ( unsigned index = 0; index < _descriptorsCount; index++ ) + { + if ( offset < _descriptors[index]->getLength() ) + { + return _descriptors[index]->getPhysicalSegment(offset, length); + } + offset -= _descriptors[index]->getLength(); + } + + if ( length ) *length = 0; + + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +void * IOMultiMemoryDescriptor::getVirtualSegment( IOByteCount /* offset */ , + IOByteCount * /* length */ ) +{ + return 0; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOByteCount IOMultiMemoryDescriptor::readBytes( IOByteCount offset, + void * bytes, + IOByteCount withLength ) +{ + // + // Copies data from the memory descriptor's buffer at the given offset, to + // the specified buffer. Returns the number of bytes copied. + // + + IOByteCount bytesCopied = 0; + unsigned index; + + for ( index = 0; index < _descriptorsCount; index++ ) + { + if ( offset < _descriptors[index]->getLength() ) break; + offset -= _descriptors[index]->getLength(); + } + + for ( ; index < _descriptorsCount && withLength; index++) + { + IOByteCount copy = min(_descriptors[index]->getLength(), withLength); + IOByteCount copied = _descriptors[index]->readBytes(offset,bytes,copy); + + bytesCopied += copied; + if ( copied != copy ) break; + + bytes = ((UInt8 *) bytes) + copied; + withLength -= copied; + offset = 0; + } + + return bytesCopied; +} + +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +IOByteCount IOMultiMemoryDescriptor::writeBytes( IOByteCount offset, + const void * bytes, + IOByteCount withLength ) +{ + // + // Copies data to the memory descriptor's buffer at the given offset, from + // the specified buffer. Returns the number of bytes copied. + // + + IOByteCount bytesCopied = 0; + unsigned index; + + for ( index = 0; index < _descriptorsCount; index++ ) + { + if ( offset < _descriptors[index]->getLength() ) break; + offset -= _descriptors[index]->getLength(); + } + + for ( ; index < _descriptorsCount && withLength; index++) + { + IOByteCount copy = min(_descriptors[index]->getLength(), withLength); + IOByteCount copied = _descriptors[index]->writeBytes(offset,bytes,copy); + + bytesCopied += copied; + if ( copied != copy ) break; + + bytes = ((UInt8 *) bytes) + copied; + withLength -= copied; + offset = 0; + } + + return bytesCopied; +} diff --git a/iokit/Kernel/IONVRAM.cpp b/iokit/Kernel/IONVRAM.cpp new file mode 100644 index 000000000..e4bc666b2 --- /dev/null +++ b/iokit/Kernel/IONVRAM.cpp @@ -0,0 +1,1349 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include + +#define super IOService + +OSDefineMetaClassAndStructors(IODTNVRAM, IOService); + +bool IODTNVRAM::init(IORegistryEntry *old, const IORegistryPlane *plane) +{ + OSDictionary *dict; + + if (!super::init(old, plane)) return false; + + dict = OSDictionary::withCapacity(1); + if (dict == 0) return false; + setPropertyTable(dict); + + _nvramImage = IONew(UInt8, kIODTNVRAMImageSize); + if (_nvramImage == 0) return false; + + _registryPropertiesKey = OSSymbol::withCStringNoCopy("aapl,pci"); + if (_registryPropertiesKey == 0) return false; + + return true; +} + +void IODTNVRAM::registerNVRAMController(IONVRAMController *nvram) +{ + UInt32 currentOffset = 0; + + if (_nvramController != 0) return; + + _nvramController = nvram; + + _nvramController->read(0, _nvramImage, kIODTNVRAMImageSize); + + // Find the offsets for the OF, XPRAM and NameRegistry partitions in NVRAM. + _ofPartitionOffset = 0xFFFFFFFF; + _xpramPartitionOffset = 0xFFFFFFFF; + _nrPartitionOffset = 0xFFFFFFFF; + if (getPlatform()->getBootROMType()) { + // Look through the partitions to find the OF, MacOS partitions. + while (currentOffset < kIODTNVRAMImageSize) { + if (strcmp((const char *)_nvramImage + currentOffset + 4, "common") == 0) { + _ofPartitionOffset = currentOffset + 16; + _ofPartitionSize = + (((UInt16 *)(_nvramImage + currentOffset))[1] - 1) * 0x10; + } else if (strcmp((const char *)_nvramImage + currentOffset + 4, "APL,MacOS75") == 0) { + _xpramPartitionOffset = currentOffset + 16; + _xpramPartitionSize = kIODTNVRAMXPRAMSize; + _nrPartitionOffset = _xpramPartitionOffset + _xpramPartitionSize; + _nrPartitionSize = + (((UInt16 *)(_nvramImage + currentOffset))[1] - 1) * 0x10 - + _xpramPartitionSize; + } + currentOffset += ((short *)(_nvramImage + currentOffset))[1] * 16; + } + } else { + // Use the fixed address for old world machines. + _ofPartitionOffset = 0x1800; + _ofPartitionSize = 0x0800; + _xpramPartitionOffset = 0x1300; + _xpramPartitionSize = 0x0100; + _nrPartitionOffset = 0x1400; + _nrPartitionSize = 0x0400; + } + + if (_ofPartitionOffset != 0xFFFFFFFF) + _ofImage = _nvramImage + _ofPartitionOffset; + if (_xpramPartitionOffset != 0xFFFFFFFF) + _xpramImage = _nvramImage + _xpramPartitionOffset; + if (_nrPartitionOffset != 0xFFFFFFFF) + _nrImage = _nvramImage + _nrPartitionOffset; + + initOFVariables(); +} + +void IODTNVRAM::sync(void) +{ + if (!_nvramImageDirty && !_ofImageDirty) return; + + syncOFVariables(); + + _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + _nvramController->sync(); + + _nvramImageDirty = false; +} + +bool IODTNVRAM::serializeProperties(OSSerialize *serialize) const +{ + bool result; + UInt32 variablePerm; + const OSSymbol *key; + OSDictionary *dict, *tmpDict = 0; + OSCollectionIterator *iter = 0; + + if (_ofDict == 0) return false; + + // Verify permissions. + result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); + if (result != kIOReturnSuccess) { + tmpDict = OSDictionary::withCapacity(1); + if (tmpDict == 0) return false; + + iter = OSCollectionIterator::withCollection(_ofDict); + if (iter == 0) return false; + + while (1) { + key = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (key == 0) break; + + variablePerm = getOFVariablePerm(key); + if (variablePerm != kOFVariablePermRootOnly) { + tmpDict->setObject(key, _ofDict->getObject(key)); + } + } + dict = tmpDict; + } else { + dict = _ofDict; + } + + result = dict->serialize(serialize); + + if (tmpDict != 0) tmpDict->release(); + if (iter != 0) iter->release(); + + return result; +} + +OSObject *IODTNVRAM::getProperty(const OSSymbol *aKey) const +{ + IOReturn result; + UInt32 variablePerm; + + if (_ofDict == 0) return 0; + + // Verify permissions. + result = IOUserClient::clientHasPrivilege(current_task(), "root"); + if (result != kIOReturnSuccess) { + variablePerm = getOFVariablePerm(aKey); + if (variablePerm == kOFVariablePermRootOnly) return 0; + } + + return _ofDict->getObject(aKey); +} + +OSObject *IODTNVRAM::getProperty(const char *aKey) const +{ + const OSSymbol *keySymbol; + OSObject *theObject = 0; + + keySymbol = OSSymbol::withCStringNoCopy(aKey); + if (keySymbol != 0) { + theObject = getProperty(keySymbol); + keySymbol->release(); + } + + return theObject; +} + +bool IODTNVRAM::setProperty(const OSSymbol *aKey, OSObject *anObject) +{ + bool result; + UInt32 propType, propPerm; + OSString *tmpString; + OSObject *propObject = 0; + + if (_ofDict == 0) return false; + + // Verify permissions. + result = IOUserClient::clientHasPrivilege(current_task(), "root"); + if (result != kIOReturnSuccess) { + propPerm = getOFVariablePerm(aKey); + if (propPerm != kOFVariablePermUserWrite) return false; + } + + // Don't allow creation of new properties on old world machines. + if (getPlatform()->getBootROMType() == 0) { + if (_ofDict->getObject(aKey) == 0) return false; + } + + // Make sure the object is of the correct type. + propType = getOFVariableType(aKey); + switch (propType) { + case kOFVariableTypeBoolean : + propObject = OSDynamicCast(OSBoolean, anObject); + break; + + case kOFVariableTypeNumber : + propObject = OSDynamicCast(OSNumber, anObject); + break; + + case kOFVariableTypeString : + propObject = OSDynamicCast(OSString, anObject); + break; + + case kOFVariableTypeData : + propObject = OSDynamicCast(OSData, anObject); + if (propObject == 0) { + tmpString = OSDynamicCast(OSString, anObject); + if (tmpString != 0) { + propObject = OSData::withBytes(tmpString->getCStringNoCopy(), + tmpString->getLength()); + } + } + break; + } + + if (propObject == 0) return false; + + result = _ofDict->setObject(aKey, propObject); + + if (result) { + if (getPlatform()->getBootROMType() == 0) { + updateOWBootArgs(aKey, propObject); + } + + _ofImageDirty = true; + } + + return result; +} + +IOReturn IODTNVRAM::setProperties(OSObject *properties) +{ + bool result = true; + OSObject *object; + const OSSymbol *key; + OSDictionary *dict; + OSCollectionIterator *iter; + + dict = OSDynamicCast(OSDictionary, properties); + if (dict == 0) return kIOReturnBadArgument; + + iter = OSCollectionIterator::withCollection(dict); + if (iter == 0) return kIOReturnBadArgument; + + while (result) { + key = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (key == 0) break; + + object = dict->getObject(key); + if (object == 0) continue; + + result = setProperty(key, object); + } + + iter->release(); + + if (result) return kIOReturnSuccess; + else return kIOReturnError; +} + +IOReturn IODTNVRAM::readXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length) +{ + if ((_nvramImage == 0) || (_xpramPartitionOffset == 0)) + return kIOReturnNotReady; + + if ((buffer == 0) || (length <= 0) || (offset < 0) || + (offset + length > kIODTNVRAMXPRAMSize)) + return kIOReturnBadArgument; + + bcopy(_nvramImage + _xpramPartitionOffset + offset, buffer, length); + + return kIOReturnSuccess; +} + +IOReturn IODTNVRAM::writeXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length) +{ + if ((_nvramImage == 0) || (_xpramPartitionOffset == 0)) + return kIOReturnNotReady; + + if ((buffer == 0) || (length <= 0) || (offset < 0) || + (offset + length > kIODTNVRAMXPRAMSize)) + return kIOReturnBadArgument; + + bcopy(buffer, _nvramImage + _xpramPartitionOffset + offset, length); + + _nvramImageDirty = true; + + return kIOReturnSuccess; +} + +IOReturn IODTNVRAM::readNVRAMProperty(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value) +{ + IOReturn err; + + if (getPlatform()->getBootROMType()) + err = readNVRAMPropertyType1(entry, name, value); + else + err = readNVRAMPropertyType0(entry, name, value); + + return err; +} + +IOReturn IODTNVRAM::writeNVRAMProperty(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value) +{ + IOReturn err; + + if (getPlatform()->getBootROMType()) + err = writeNVRAMPropertyType1(entry, name, value); + else + err = writeNVRAMPropertyType0(entry, name, value); + + return err; +} + + + +// Private methods for Open Firmware variable access. + +struct OWVariablesHeader { + UInt16 owMagic; + UInt8 owVersion; + UInt8 owPages; + UInt16 owChecksum; + UInt16 owHere; + UInt16 owTop; + UInt16 owNext; + UInt32 owFlags; + UInt32 owNumbers[9]; + struct { + UInt16 offset; + UInt16 length; + } owStrings[10]; +}; +typedef struct OWVariablesHeader OWVariablesHeader; + +IOReturn IODTNVRAM::initOFVariables(void) +{ + UInt32 cnt, propOffset, propType; + UInt8 *propName, *propData; + UInt32 propNameLength, propDataLength; + const OSSymbol *propSymbol; + OSObject *propObject; + OWVariablesHeader *owHeader; + + if (_ofImage == 0) return kIOReturnNotReady; + + _ofDict = OSDictionary::withCapacity(1); + if (_ofDict == 0) return kIOReturnNoMemory; + + if (getPlatform()->getBootROMType()) { + cnt = 0; + while (cnt < _ofPartitionSize) { + // Break if there is no name. + if (_ofImage[cnt] == '\0') break; + + // Find the length of the name. + propName = _ofImage + cnt; + for (propNameLength = 0; (cnt + propNameLength) < _ofPartitionSize; + propNameLength++) { + if (_ofImage[cnt + propNameLength] == '=') break; + } + + // Break if the name goes past the end of the partition. + if ((cnt + propNameLength) >= _ofPartitionSize) break; + cnt += propNameLength + 1; + + propData = _ofImage + cnt; + for (propDataLength = 0; (cnt + propDataLength) < _ofPartitionSize; + propDataLength++) { + if (_ofImage[cnt + propDataLength] == '\0') break; + } + + // Break if the data goes past the end of the partition. + if ((cnt + propDataLength) >= _ofPartitionSize) break; + cnt += propDataLength + 1; + + if (convertPropToObject(propName, propNameLength, + propData, propDataLength, + &propSymbol, &propObject)) { + _ofDict->setObject(propSymbol, propObject); + propSymbol->release(); + propObject->release(); + } + } + + // Create the boot-args property if it is not in the dictionary. + if (_ofDict->getObject("boot-args") == 0) { + propObject = OSString::withCStringNoCopy(""); + if (propObject != 0) { + _ofDict->setObject("boot-args", propObject); + propObject->release(); + } + } + } else { + owHeader = (OWVariablesHeader *)_ofImage; + if (!validateOWChecksum(_ofImage)) { + _ofDict->release(); + _ofDict = 0; + return kIOReturnBadMedia; + } + + cnt = 0; + while (1) { + if (!getOWVariableInfo(cnt++, &propSymbol, &propType, &propOffset)) + break; + + switch (propType) { + case kOFVariableTypeBoolean : + propObject = OSBoolean::withBoolean(owHeader->owFlags & propOffset); + break; + + case kOFVariableTypeNumber : + propObject = OSNumber::withNumber(owHeader->owNumbers[propOffset], 32); + break; + + case kOFVariableTypeString : + propData = _ofImage + owHeader->owStrings[propOffset].offset - + _ofPartitionOffset; + propDataLength = owHeader->owStrings[propOffset].length; + propName = IONew(UInt8, propDataLength + 1); + if (propName != 0) { + strncpy((char *)propName, (const char *)propData, propDataLength); + propName[propDataLength] = '\0'; + propObject = OSString::withCString((const char *)propName); + IODelete(propName, UInt8, propDataLength + 1); + } + break; + } + + if (propObject == 0) break; + + _ofDict->setObject(propSymbol, propObject); + propSymbol->release(); + propObject->release(); + } + + // Create the boot-args property. + propSymbol = OSSymbol::withCString("boot-command"); + if (propSymbol != 0) { + propObject = _ofDict->getObject(propSymbol); + if (propObject != 0) { + updateOWBootArgs(propSymbol, propObject); + } + propSymbol->release(); + } + } + + return kIOReturnSuccess; +} + +IOReturn IODTNVRAM::syncOFVariables(void) +{ + bool ok; + UInt32 cnt, length, maxLength; + UInt32 curOffset, tmpOffset, tmpType, tmpDataLength; + UInt8 *buffer, *tmpBuffer, *tmpData; + const OSSymbol *tmpSymbol; + OSObject *tmpObject; + OSBoolean *tmpBoolean; + OSNumber *tmpNumber; + OSString *tmpString; + OSCollectionIterator *iter; + OWVariablesHeader *owHeader, *owHeaderOld; + + if ((_ofImage == 0) || (_ofDict == 0)) return kIOReturnNotReady; + + if (!_ofImageDirty) return kIOReturnSuccess; + + if (getPlatform()->getBootROMType()) { + buffer = tmpBuffer = IONew(UInt8, _ofPartitionSize); + if (buffer == 0) return kIOReturnNoMemory; + bzero(buffer, _ofPartitionSize); + + ok = true; + maxLength = _ofPartitionSize; + + iter = OSCollectionIterator::withCollection(_ofDict); + if (iter == 0) ok = false; + + while (ok) { + tmpSymbol = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (tmpSymbol == 0) break; + + tmpObject = _ofDict->getObject(tmpSymbol); + + length = maxLength; + ok = convertObjectToProp(tmpBuffer, &length, tmpSymbol, tmpObject); + if (ok) { + tmpBuffer += length; + maxLength -= length; + } + } + iter->release(); + + if (ok) { + bcopy(buffer, _ofImage, _ofPartitionSize); + } + + IODelete(buffer, UInt8, _ofPartitionSize); + + if (!ok) return kIOReturnBadArgument; + } else { + buffer = IONew(UInt8, _ofPartitionSize); + if (buffer == 0) return kIOReturnNoMemory; + bzero(buffer, _ofPartitionSize); + + owHeader = (OWVariablesHeader *)buffer; + owHeaderOld = (OWVariablesHeader *)_ofImage; + + owHeader->owMagic = owHeaderOld->owMagic; + owHeader->owVersion = owHeaderOld->owVersion; + owHeader->owPages = owHeaderOld->owPages; + + curOffset = _ofPartitionSize; + + ok = true; + cnt = 0; + while (ok) { + if (!getOWVariableInfo(cnt++, &tmpSymbol, &tmpType, &tmpOffset)) + break; + + tmpObject = _ofDict->getObject(tmpSymbol); + + switch (tmpType) { + case kOFVariableTypeBoolean : + tmpBoolean = OSDynamicCast(OSBoolean, tmpObject); + if (tmpBoolean->getValue()) owHeader->owFlags |= tmpOffset; + break; + + case kOFVariableTypeNumber : + tmpNumber = OSDynamicCast(OSNumber, tmpObject); + owHeader->owNumbers[tmpOffset] = tmpNumber->unsigned32BitValue(); + break; + + case kOFVariableTypeString : + tmpString = OSDynamicCast(OSString, tmpObject); + tmpData = (UInt8 *) tmpString->getCStringNoCopy(); + tmpDataLength = tmpString->getLength(); + + if ((curOffset - tmpDataLength) < sizeof(OWVariablesHeader)) { + ok = false; + break; + } + + owHeader->owStrings[tmpOffset].length = tmpDataLength; + curOffset -= tmpDataLength; + owHeader->owStrings[tmpOffset].offset = curOffset + _ofPartitionOffset; + if (tmpDataLength != 0) + bcopy(tmpData, buffer + curOffset, tmpDataLength); + break; + } + } + + if (ok) { + owHeader->owHere = _ofPartitionOffset + sizeof(OWVariablesHeader); + owHeader->owTop = _ofPartitionOffset + curOffset; + owHeader->owNext = 0; + + owHeader->owChecksum = 0; + owHeader->owChecksum = ~generateOWChecksum(buffer); + + bcopy(buffer, _ofImage, _ofPartitionSize); + } + + IODelete(buffer, UInt8, _ofPartitionSize); + + if (!ok) return kIOReturnBadArgument; + } + + _ofImageDirty = false; + _nvramImageDirty = true; + + return kIOReturnSuccess; +} + +struct OFVariable { + char *variableName; + UInt32 variableType; + UInt32 variablePerm; + SInt32 variableOffset; +}; +typedef struct OFVariable OFVariable; + +enum { + kOWVariableOffsetNumber = 8, + kOWVariableOffsetString = 17 +}; + +OFVariable gOFVariables[] = { + {"little-endian?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 0}, + {"real-mode?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 1}, + {"auto-boot?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 2}, + {"diag-switch?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 3}, + {"fcode-debug?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 4}, + {"oem-banner?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 5}, + {"oem-logo?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 6}, + {"use-nvramrc?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 7}, + {"use-generic?", kOFVariableTypeBoolean, kOFVariablePermUserRead, -1}, + {"default-mac-address?", kOFVariableTypeBoolean, kOFVariablePermUserRead,-1}, + {"real-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 8}, + {"real-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 9}, + {"virt-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 10}, + {"virt-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 11}, + {"load-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 12}, + {"pci-probe-list", kOFVariableTypeNumber, kOFVariablePermUserRead, 13}, + {"pci-probe-mask", kOFVariableTypeNumber, kOFVariablePermUserRead, -1}, + {"screen-#columns", kOFVariableTypeNumber, kOFVariablePermUserRead, 14}, + {"screen-#rows", kOFVariableTypeNumber, kOFVariablePermUserRead, 15}, + {"selftest-#megs", kOFVariableTypeNumber, kOFVariablePermUserRead, 16}, + {"boot-device", kOFVariableTypeString, kOFVariablePermUserRead, 17}, + {"boot-file", kOFVariableTypeString, kOFVariablePermUserRead, 18}, + {"boot-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"console-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"diag-device", kOFVariableTypeString, kOFVariablePermUserRead, 19}, + {"diag-file", kOFVariableTypeString, kOFVariablePermUserRead, 20}, + {"input-device", kOFVariableTypeString, kOFVariablePermUserRead, 21}, + {"output-device", kOFVariableTypeString, kOFVariablePermUserRead, 22}, + {"input-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"output-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"mouse-device", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"oem-banner", kOFVariableTypeString, kOFVariablePermUserRead, 23}, + {"oem-logo", kOFVariableTypeString, kOFVariablePermUserRead, 24}, + {"nvramrc", kOFVariableTypeString, kOFVariablePermUserRead, 25}, + {"boot-command", kOFVariableTypeString, kOFVariablePermUserRead, 26}, + {"default-client-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-server-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-gateway-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-subnet-mask", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-router-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"boot-script", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"boot-args", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"aapl,pci", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, + {"security-mode", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"security-password", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, + {0, kOFVariableTypeData, kOFVariablePermUserRead, -1} +}; + +UInt32 IODTNVRAM::getOFVariableType(const OSSymbol *propSymbol) const +{ + OFVariable *ofVar; + + ofVar = gOFVariables; + while (1) { + if ((ofVar->variableName == 0) || + propSymbol->isEqualTo(ofVar->variableName)) break; + ofVar++; + } + + return ofVar->variableType; +} + +UInt32 IODTNVRAM::getOFVariablePerm(const OSSymbol *propSymbol) const +{ + OFVariable *ofVar; + + ofVar = gOFVariables; + while (1) { + if ((ofVar->variableName == 0) || + propSymbol->isEqualTo(ofVar->variableName)) break; + ofVar++; + } + + return ofVar->variablePerm; +} + +bool IODTNVRAM::getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, + UInt32 *propType, UInt32 *propOffset) +{ + OFVariable *ofVar; + + ofVar = gOFVariables; + while (1) { + if (ofVar->variableName == 0) return false; + + if (ofVar->variableOffset == (SInt32) variableNumber) break; + + ofVar++; + } + + *propSymbol = OSSymbol::withCStringNoCopy(ofVar->variableName); + *propType = ofVar->variableType; + + switch (*propType) { + case kOFVariableTypeBoolean : + *propOffset = 1 << (31 - variableNumber); + break; + + case kOFVariableTypeNumber : + *propOffset = variableNumber - kOWVariableOffsetNumber; + break; + + case kOFVariableTypeString : + *propOffset = variableNumber - kOWVariableOffsetString; + break; + } + + return true; +} + +bool IODTNVRAM::convertPropToObject(UInt8 *propName, UInt32 propNameLength, + UInt8 *propData, UInt32 propDataLength, + const OSSymbol **propSymbol, + OSObject **propObject) +{ + UInt32 propType; + const OSSymbol *tmpSymbol; + OSObject *tmpObject; + OSNumber *tmpNumber; + OSString *tmpString; + + // Create the symbol. + propName[propNameLength] = '\0'; + tmpSymbol = OSSymbol::withCString((const char *)propName); + propName[propNameLength] = '='; + if (tmpSymbol == 0) { + return false; + } + + propType = getOFVariableType(tmpSymbol); + + // Create the object. + tmpObject = 0; + switch (propType) { + case kOFVariableTypeBoolean : + if (!strncmp("true", (const char *)propData, propDataLength)) { + tmpObject = kOSBooleanTrue; + } else if (!strncmp("false", (const char *)propData, propDataLength)) { + tmpObject = kOSBooleanFalse; + } + break; + + case kOFVariableTypeNumber : + tmpNumber = OSNumber::withNumber(strtol((const char *)propData, 0, 0), 32); + if (tmpNumber != 0) tmpObject = tmpNumber; + break; + + case kOFVariableTypeString : + tmpString = OSString::withCString((const char *)propData); + if (tmpString != 0) tmpObject = tmpString; + break; + + case kOFVariableTypeData : + tmpObject = unescapeBytesToData(propData, propDataLength); + break; + } + + if (tmpObject == 0) { + tmpSymbol->release(); + return false; + } + + *propSymbol = tmpSymbol; + *propObject = tmpObject; + + return true; +} + +bool IODTNVRAM::convertObjectToProp(UInt8 *buffer, UInt32 *length, + const OSSymbol *propSymbol, OSObject *propObject) +{ + UInt8 *propName; + UInt32 propNameLength, propDataLength; + UInt32 propType, tmpValue; + OSBoolean *tmpBoolean = 0; + OSNumber *tmpNumber = 0; + OSString *tmpString = 0; + OSData *tmpData = 0; + + propName = (UInt8 *)propSymbol->getCStringNoCopy(); + propNameLength = propSymbol->getLength(); + propType = getOFVariableType(propSymbol); + + // Get the size of the data. + propDataLength = 0xFFFFFFFF; + switch (propType) { + case kOFVariableTypeBoolean : + tmpBoolean = OSDynamicCast(OSBoolean, propObject); + if (tmpBoolean != 0) propDataLength = 5; + break; + + case kOFVariableTypeNumber : + tmpNumber = OSDynamicCast(OSNumber, propObject); + if (tmpNumber != 0) propDataLength = 10; + break; + + case kOFVariableTypeString : + tmpString = OSDynamicCast(OSString, propObject); + if (tmpString != 0) propDataLength = tmpString->getLength(); + break; + + case kOFVariableTypeData : + tmpData = OSDynamicCast(OSData, propObject); + if (tmpData != 0) { + tmpData = escapeDataToData(tmpData); + propDataLength = tmpData->getLength(); + } + break; + } + + // Make sure the propertySize is known and will fit. + if (propDataLength == 0xFFFFFFFF) return false; + if ((propNameLength + propDataLength + 2) > *length) return false; + + // Copy the property name equal sign. + sprintf((char *)buffer, "%s=", propName); + buffer += propNameLength + 1; + + switch (propType) { + case kOFVariableTypeBoolean : + if (tmpBoolean->getValue()) { + strcpy((char *)buffer, "true"); + } else { + strcpy((char *)buffer, "false"); + } + break; + + case kOFVariableTypeNumber : + tmpValue = tmpNumber->unsigned32BitValue(); + if (tmpValue == 0xFFFFFFFF) { + strcpy((char *)buffer, "-1"); + } else if (tmpValue < 1000) { + sprintf((char *)buffer, "%ld", tmpValue); + } else { + sprintf((char *)buffer, "0x%lx", tmpValue); + } + break; + + case kOFVariableTypeString : + strcpy((char *)buffer, tmpString->getCStringNoCopy()); + break; + + case kOFVariableTypeData : + bcopy(tmpData->getBytesNoCopy(), buffer, propDataLength); + tmpData->release(); + break; + } + + propDataLength = strlen((const char *)buffer); + + *length = propNameLength + propDataLength + 2; + + return true; +} + + +UInt16 IODTNVRAM::generateOWChecksum(UInt8 *buffer) +{ + UInt32 cnt, checksum = 0; + UInt16 *tmpBuffer = (UInt16 *)buffer; + + for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) + checksum += tmpBuffer[cnt]; + + return checksum % 0x0000FFFF; +} + +bool IODTNVRAM::validateOWChecksum(UInt8 *buffer) +{ + UInt32 cnt, checksum, sum = 0; + UInt16 *tmpBuffer = (UInt16 *)buffer; + + for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) + sum += tmpBuffer[cnt]; + + checksum = (sum >> 16) + (sum & 0x0000FFFF); + if (checksum == 0x10000) checksum--; + checksum = (checksum ^ 0x0000FFFF) & 0x0000FFFF; + + return checksum == 0; +} + +void IODTNVRAM::updateOWBootArgs(const OSSymbol *key, OSObject *value) +{ + bool wasBootArgs, bootr = false; + UInt32 cnt; + OSString *tmpString, *bootCommand, *bootArgs = 0; + UInt8 *bootCommandData, *bootArgsData, *tmpData; + UInt32 bootCommandDataLength, bootArgsDataLength, tmpDataLength; + + tmpString = OSDynamicCast(OSString, value); + if (tmpString == 0) return; + + if (key->isEqualTo("boot-command")) { + wasBootArgs = false; + bootCommand = tmpString; + } else if (key->isEqualTo("boot-args")) { + wasBootArgs = true; + bootArgs = tmpString; + bootCommand = OSDynamicCast(OSString, _ofDict->getObject("boot-command")); + if (bootCommand == 0) return; + } else return; + + bootCommandData = (UInt8 *)bootCommand->getCStringNoCopy(); + bootCommandDataLength = bootCommand->getLength(); + + if (bootCommandData == 0) return; + + for (cnt = 0; cnt < bootCommandDataLength; cnt++) { + if ((bootCommandData[cnt] == 'b') && + !strncmp("bootr", (const char *)bootCommandData + cnt, 5)) { + cnt += 5; + while (bootCommandData[cnt] == ' ') cnt++; + bootr = true; + break; + } + } + if (!bootr) { + _ofDict->removeObject("boot-args"); + return; + } + + if (wasBootArgs) { + bootArgsData = (UInt8 *)bootArgs->getCStringNoCopy(); + bootArgsDataLength = bootArgs->getLength(); + if (bootArgsData == 0) return; + + tmpDataLength = cnt + bootArgsDataLength; + tmpData = IONew(UInt8, tmpDataLength + 1); + if (tmpData == 0) return; + + strncpy((char *)tmpData, (const char *)bootCommandData, cnt); + tmpData[cnt] = '\0'; + strcat((char *)tmpData, (const char *)bootArgsData); + + bootCommand = OSString::withCString((const char *)tmpData); + if (bootCommand != 0) { + _ofDict->setObject("boot-command", bootCommand); + bootCommand->release(); + } + + IODelete(tmpData, UInt8, tmpDataLength + 1); + } else { + bootArgs = OSString::withCString((const char *)(bootCommandData + cnt)); + if (bootArgs != 0) { + _ofDict->setObject("boot-args", bootArgs); + bootArgs->release(); + } + } +} + + +// Private methods for Name Registry access. + +enum { + kMaxNVNameLength = 4, + kMaxNVDataLength = 8 +}; + +#pragma options align=mac68k +struct NVRAMProperty +{ + IONVRAMDescriptor header; + UInt8 nameLength; + UInt8 name[ kMaxNVNameLength ]; + UInt8 dataLength; + UInt8 data[ kMaxNVDataLength ]; +}; +#pragma options align=reset + +bool IODTNVRAM::searchNVRAMProperty(IONVRAMDescriptor *hdr, UInt32 *where) +{ + UInt32 offset; + SInt32 nvEnd; + + nvEnd = *((UInt16 *)_nrImage); + if(getPlatform()->getBootROMType()) { + // on NewWorld, offset to partition start + nvEnd -= 0x100; + } else { + // on old world, absolute + nvEnd -= _nrPartitionOffset; + } + if((nvEnd < 0) || (nvEnd >= kIODTNVRAMNameRegistrySize)) + nvEnd = 2; + + offset = 2; + while ((offset + sizeof(NVRAMProperty)) <= (UInt32)nvEnd) { + if (bcmp(_nrImage + offset, hdr, sizeof(*hdr)) == 0) { + *where = offset; + return true; + } + offset += sizeof(NVRAMProperty); + } + + if ((nvEnd + sizeof(NVRAMProperty)) <= kIODTNVRAMNameRegistrySize) + *where = nvEnd; + else + *where = 0; + + return false; +} + +IOReturn IODTNVRAM::readNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value) +{ + IONVRAMDescriptor hdr; + NVRAMProperty *prop; + IOByteCount length; + UInt32 offset; + IOReturn err; + char nameBuf[kMaxNVNameLength + 1]; + + if (_nrImage == 0) return kIOReturnUnsupported; + if ((entry == 0) || (name == 0) || (value == 0)) return kIOReturnBadArgument; + + err = IODTMakeNVDescriptor(entry, &hdr); + if (err != kIOReturnSuccess) return err; + + if (searchNVRAMProperty(&hdr, &offset)) { + prop = (NVRAMProperty *)(_nrImage + offset); + + length = prop->nameLength; + if (length > kMaxNVNameLength) length = kMaxNVNameLength; + strncpy(nameBuf, (const char *)prop->name, length); + nameBuf[length] = 0; + *name = OSSymbol::withCString(nameBuf); + + length = prop->dataLength; + if (length > kMaxNVDataLength) length = kMaxNVDataLength; + *value = OSData::withBytes(prop->data, length); + + if ((*name != 0) && (*value != 0)) return kIOReturnSuccess; + else return kIOReturnNoMemory; + } + + return kIOReturnNoResources; +} + +IOReturn IODTNVRAM::writeNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value) +{ + IONVRAMDescriptor hdr; + NVRAMProperty *prop; + IOByteCount nameLength; + IOByteCount dataLength; + UInt32 offset; + IOReturn err; + UInt16 nvLength; + bool exists; + + if (_nrImage == 0) return kIOReturnUnsupported; + if ((entry == 0) || (name == 0) || (value == 0)) return kIOReturnBadArgument; + + nameLength = name->getLength(); + dataLength = value->getLength(); + if (nameLength > kMaxNVNameLength) return kIOReturnNoSpace; + if (dataLength > kMaxNVDataLength) return kIOReturnNoSpace; + + err = IODTMakeNVDescriptor(entry, &hdr); + if (err != kIOReturnSuccess) return err; + + exists = searchNVRAMProperty(&hdr, &offset); + if (offset == 0) return kIOReturnNoMemory; + + prop = (NVRAMProperty *)(_nrImage + offset); + if (!exists) bcopy(&hdr, &prop->header, sizeof(hdr)); + + prop->nameLength = nameLength; + bcopy(name->getCStringNoCopy(), prop->name, nameLength); + prop->dataLength = dataLength; + bcopy(value->getBytesNoCopy(), prop->data, dataLength); + + if (!exists) { + nvLength = offset + sizeof(NVRAMProperty); + if (getPlatform()->getBootROMType()) + nvLength += 0x100; + else + nvLength += _nrPartitionOffset; + *((UInt16 *)_nrImage) = nvLength; + } + + _nvramImageDirty = true; + + return err; +} + +OSData *IODTNVRAM::unescapeBytesToData(UInt8 *bytes, UInt32 length) +{ + OSData *data = 0; + UInt32 totalLength = 0; + UInt32 cnt, cnt2; + UInt8 byte; + bool ok; + + // Calculate the actual length of the data. + ok = true; + totalLength = 0; + for (cnt = 0; cnt < length;) { + byte = bytes[cnt++]; + if (byte == 0xFF) { + byte = bytes[cnt++]; + if (byte == 0x00) { + ok = false; + break; + } + cnt2 = byte & 0x7F; + } else + cnt2 = 1; + totalLength += cnt2; + } + + if (ok) { + // Create an empty OSData of the correct size. + data = OSData::withCapacity(totalLength); + if (data != 0) { + for (cnt = 0; cnt < length;) { + byte = bytes[cnt++]; + if (byte == 0xFF) { + byte = bytes[cnt++]; + cnt2 = byte & 0x7F; + byte = (byte & 0x80) ? 0xFF : 0x00; + } else + cnt2 = 1; + data->appendByte(byte, cnt2); + } + } + } + + return data; +} + +OSData * IODTNVRAM::escapeDataToData(OSData * value) +{ + OSData * result; + UInt8 * start; + UInt8 * end; + UInt8 * where; + UInt8 byte; + bool ok = true; + + where = (UInt8 *) value->getBytesNoCopy(); + end = where + value->getLength(); + + result = OSData::withCapacity(end - where); + if (!result) + return result; + + while (where < end) { + start = where; + byte = *where++; + if ((byte == 0x00) || (byte == 0xFF)) { + for (; + ((where - start) < 0x80) && (where < end) && (byte == *where); + where++) {} + ok &= result->appendByte(0xff, 1); + byte = (byte & 0x80) | (where - start); + } + ok &= result->appendByte(byte, 1); + } + ok &= result->appendByte(0, 1); + + if (!ok) { + result->release(); + result = 0; + } + + return result; +} + +IOReturn IODTNVRAM::readNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value) +{ + IOReturn err = kIOReturnNoResources; + OSData *data; + UInt8 *start; + UInt8 *end; + UInt8 *where; + UInt8 *nvPath = 0; + UInt8 *nvName = 0; + UInt8 byte; + + if (_ofDict == 0) return err; + data = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); + if (data == 0) return err; + + start = (UInt8 *) data->getBytesNoCopy(); + end = start + data->getLength(); + + where = start; + while (where < end) { + byte = *(where++); + if (byte) + continue; + + if (nvPath == 0) + nvPath = start; + else if (nvName == 0) + nvName = start; + else if (entry == + IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane)) { + *name = OSSymbol::withCString((const char *) nvName); + *value = unescapeBytesToData(start, where - start - 1); + if ((*name != 0) && (*value != 0)) + err = kIOReturnSuccess; + else + err = kIOReturnNoMemory; + break; + } else + nvPath = nvName = 0; + + start = where; + } + + return err; +} + +IOReturn IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol *propName, + OSData *value) +{ + OSData *oldData; + OSData *data = 0; + UInt8 *start; + UInt8 *propStart; + UInt8 *end; + UInt8 *where; + UInt8 *nvPath = 0; + UInt8 *nvName = 0; + const char * comp; + const char * name; + UInt8 byte; + bool ok = true; + + if (_ofDict == 0) return kIOReturnNoResources; + + // copy over existing properties for other entries + + oldData = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); + if (oldData) { + start = (UInt8 *) oldData->getBytesNoCopy(); + end = start + oldData->getLength(); + + propStart = start; + where = start; + while (where < end) { + byte = *(where++); + if (byte) + continue; + if (nvPath == 0) + nvPath = start; + else if (nvName == 0) + nvName = start; + else if (entry == + IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane)) { + // delete old property (nvPath -> where) + data = OSData::withBytes(propStart, nvPath - propStart); + if (data) + ok &= data->appendBytes(where, end - where); + break; + } else + nvPath = nvName = 0; + + start = where; + } + } + + // make the new property + + if (!data) { + if (oldData) + data = OSData::withData(oldData); + else + data = OSData::withCapacity(16); + if (!data) + return kIOReturnNoMemory; + } + + // get entries in path + OSArray *array = OSArray::withCapacity(5); + if (!array) { + data->release(); + return kIOReturnNoMemory; + } + do + array->setObject(entry); + while ((entry = entry->getParentEntry(gIODTPlane))); + + // append path + for (int i = array->getCount() - 3; + (entry = (IORegistryEntry *) array->getObject(i)); + i--) { + + name = entry->getName(gIODTPlane); + comp = entry->getLocation(gIODTPlane); + if( comp && (0 == strcmp("pci", name)) + && (0 == strcmp("80000000", comp))) { + // yosemite hack + comp = "/pci@80000000"; + } else { + if (comp) + ok &= data->appendBytes("/@", 2); + else { + if (!name) + continue; + ok &= data->appendByte('/', 1); + comp = name; + } + } + ok &= data->appendBytes(comp, strlen(comp)); + } + ok &= data->appendByte(0, 1); + array->release(); + + // append prop name + ok &= data->appendBytes(propName->getCStringNoCopy(), propName->getLength() + 1); + + // append escaped data + oldData = escapeDataToData(value); + ok &= (oldData != 0); + if (ok) + ok &= data->appendBytes(oldData); + + if (ok) { + ok = _ofDict->setObject(_registryPropertiesKey, data); + if (ok) + _ofImageDirty = true; + } + data->release(); + + return ok ? kIOReturnSuccess : kIOReturnNoMemory; +} diff --git a/iokit/Kernel/IOPMPowerSource.cpp b/iokit/Kernel/IOPMPowerSource.cpp new file mode 100644 index 000000000..c91b1ea3d --- /dev/null +++ b/iokit/Kernel/IOPMPowerSource.cpp @@ -0,0 +1,166 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#define super OSObject + +OSDefineMetaClassAndStructors(IOPMPowerSource, OSObject) + +// ********************************************************************************** +// init +// +// ********************************************************************************** +bool IOPMPowerSource::init (unsigned short whichBatteryIndex) +{ + if (!super::init ()) + return false; + + bBatteryIndex = whichBatteryIndex; + nextInList = 0; + + return true; +} + +// ********************************************************************************** +// capacityPercentRemaining +// +// ********************************************************************************** +unsigned long IOPMPowerSource::capacityPercentRemaining (void) +{ + unsigned long percentage = 0; + + if (bMaxCapacity > 0) + percentage = (bCurCapacity * 100) / bMaxCapacity; + + // always return a non-zero value unless the real capacity IS zero. + if (percentage == 0 && bCurCapacity > 0) + percentage = 1; + + return percentage; +} + +// ********************************************************************************** +// atWarnLevel +// +// ********************************************************************************** +bool IOPMPowerSource::atWarnLevel (void) +{ + return bFlags & kBatteryAtWarn; +} + +// ********************************************************************************** +// acConnected +// +// ********************************************************************************** +bool IOPMPowerSource::acConnected (void) +{ + return bFlags & kACInstalled; +} + +// ********************************************************************************** +// depleted +// +// ********************************************************************************** +bool IOPMPowerSource::depleted (void) +{ + return bFlags & kBatteryDepleted; +} + +// ********************************************************************************** +// isInstalled +// +// ********************************************************************************** +bool IOPMPowerSource::isInstalled (void) +{ + return bFlags & kBatteryInstalled; +} + +// ********************************************************************************** +// isCharging +// +// ********************************************************************************** +bool IOPMPowerSource::isCharging (void) +{ + return bFlags & kBatteryCharging; +} + +// ********************************************************************************** +// timeRemaining +// +// ********************************************************************************** +unsigned long IOPMPowerSource::timeRemaining (void) +{ + return bTimeRemaining; +} + +// ********************************************************************************** +// maxCapacity +// +// ********************************************************************************** +unsigned long IOPMPowerSource::maxCapacity (void) +{ + return bMaxCapacity; +} + +// ********************************************************************************** +// curCapacity +// +// ********************************************************************************** +unsigned long IOPMPowerSource::curCapacity (void) +{ + return bCurCapacity; +} + +// ********************************************************************************** +// currentDrawn +// +// ********************************************************************************** +long IOPMPowerSource::currentDrawn (void) +{ + return bCurrent; +} + +// ********************************************************************************** +// voltage +// +// ********************************************************************************** + +unsigned long IOPMPowerSource::voltage (void) +{ + return bVoltage; +} + +// ********************************************************************************** +// updateStatus +// +// ********************************************************************************** + +void IOPMPowerSource::updateStatus (void) +{ + +} + + + + + diff --git a/iokit/Kernel/IOPMPowerSourceList.cpp b/iokit/Kernel/IOPMPowerSourceList.cpp new file mode 100644 index 000000000..6a6fcb865 --- /dev/null +++ b/iokit/Kernel/IOPMPowerSourceList.cpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#define super OSObject +OSDefineMetaClassAndStructors(IOPMPowerSourceList,OSObject) + +//********************************************************************************* +// init +// +//********************************************************************************* +void IOPMPowerSourceList::initialize ( void ) +{ + firstItem = NULL; + length = 0; +} + +//********************************************************************************* +// addToList +// +//********************************************************************************* + +IOReturn IOPMPowerSourceList::addToList ( IOPMPowerSource * newPowerSource ) +{ + IOPMPowerSource * nextPowerSource; + nextPowerSource = firstItem; // Is new object already in the list? + while ( nextPowerSource != NULL ) { + if ( nextPowerSource == newPowerSource ) { + return IOPMNoErr; // yes, just return + } + nextPowerSource = nextInList(nextPowerSource); + } + newPowerSource->nextInList = firstItem; // add it to list + firstItem = newPowerSource; + length += 1; + return IOPMNoErr; +} + + +//********************************************************************************* +// firstInList +// +//********************************************************************************* + +IOPMPowerSource * IOPMPowerSourceList::firstInList ( void ) +{ + return firstItem; +} + +//********************************************************************************* +// nextInList +// +//********************************************************************************* + +IOPMPowerSource * IOPMPowerSourceList::nextInList ( IOPMPowerSource * currentItem ) +{ + if ( currentItem != NULL ) { + return (currentItem->nextInList); + } + return NULL; +} + +//********************************************************************************* +// numberOfItems +// +//********************************************************************************* + +unsigned long IOPMPowerSourceList::numberOfItems ( void ) +{ + return length; +} + +//********************************************************************************* +// removeFromList +// +// Find the item in the list, unlink it, and free it. +//********************************************************************************* + +IOReturn IOPMPowerSourceList::removeFromList ( IOPMPowerSource * theItem ) +{ + IOPMPowerSource * item = firstItem; + IOPMPowerSource * temp; + + if ( item != NULL ) { + if ( item == theItem ) { + firstItem = item->nextInList; + length--; + item->release(); + return IOPMNoErr; + } + while ( item->nextInList != NULL ) { + if ( item->nextInList == theItem ) { + temp = item->nextInList; + item->nextInList = temp->nextInList; + length--; + temp->release(); + return IOPMNoErr; + } + item = item->nextInList; + } + } + return IOPMNoErr; +} + + +//********************************************************************************* +// free +// +// Free all items in the list, and then free the list itself +//********************************************************************************* + +void IOPMPowerSourceList::free (void ) +{ + IOPMPowerSource * next = firstItem; + + while ( next != NULL ) { + firstItem = next->nextInList; + length--; + next->release(); + next = firstItem; + } +super::free(); +} + + + + + + + diff --git a/iokit/Kernel/IOPMchangeNoteList.cpp b/iokit/Kernel/IOPMchangeNoteList.cpp new file mode 100644 index 000000000..6c529ca80 --- /dev/null +++ b/iokit/Kernel/IOPMchangeNoteList.cpp @@ -0,0 +1,219 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +#define super OSObject +OSDefineMetaClassAndStructors(IOPMchangeNoteList,OSObject) + +//********************************************************************************* +// init +// +//********************************************************************************* +void IOPMchangeNoteList::initialize ( void ) +{ + long i; + + firstInList = 0; + firstUnused = 0; + for ( i = 0; i < IOPMMaxChangeNotes; i++ ) { + changeNote[i].flags = IOPMNotInUse; + } +} + +//********************************************************************************* +// createChangeNote +// +//********************************************************************************* + +long IOPMchangeNoteList::createChangeNote ( void ) +{ + unsigned long i, j; + + i = increment(firstUnused); + if ( firstInList == i ) { + return -1; + } + j = firstUnused; + firstUnused = i; + + return j; +} + +//********************************************************************************* +// currentChange +// +// Return the ordinal of the first change note in the list. +// If the list is empty, return -1. +//********************************************************************************* + +long IOPMchangeNoteList::currentChange ( void ) +{ + if ( firstUnused == firstInList ) { + return -1; + } + else { + return firstInList; + } +} + +//********************************************************************************* +// latestChange +// +// Return the ordinal of the last change note in the list. +// If the list is empty, return -1. +//********************************************************************************* + +long IOPMchangeNoteList::latestChange ( void ) +{ + if ( firstUnused == firstInList ) { + return -1; + } + else { + return decrement(firstUnused); + } +} + +//********************************************************************************* +// releaseHeadChangeNote +// +// Mark the head node unused. +// This happens when the first change in the list is completely processed. +// That is, all interested parties have acknowledged it, and power is settled +// at the new level. +//********************************************************************************* + +IOReturn IOPMchangeNoteList::releaseHeadChangeNote ( void ) +{ + changeNote[firstInList].flags = IOPMNotInUse; + firstInList = increment(firstInList); + return IOPMNoErr; +} + +//********************************************************************************* +// releaseTailChangeNote +// +// Mark the tail node unused. +// This happens when a power change is queued up after another which has +// not yet been started, and the second one supercedes the first. The data in +// the second is copied into the first and the the second is released. This +// collapses the first change out of the list. +//********************************************************************************* + +IOReturn IOPMchangeNoteList::releaseTailChangeNote ( void ) +{ + firstUnused = decrement(firstUnused); + changeNote[firstUnused].flags = IOPMNotInUse; + return IOPMNoErr; +} + +//********************************************************************************* +// changeNoteInUse +// +//********************************************************************************* + +bool IOPMchangeNoteList::changeNoteInUse ( unsigned long ordinal ) +{ + if ( changeNote[ordinal].flags == IOPMNotInUse ) { + return false; + } + else { + return true; + } +} + +//********************************************************************************* +// nextChangeNote +// +// If the parameter corresponds to the most recent power change notification +// passed to drivers and children, return -1. Otherwise, return the array +// position of the next notification in the circular list. +//********************************************************************************* + +long IOPMchangeNoteList::nextChangeNote ( unsigned long ordinal ) +{ + unsigned long i; + + i = increment(ordinal); + if ( i == firstUnused) { + return -1; + } + return ( i ); +} + +//********************************************************************************* +// increment +// +// Increment the parameter mod the circular list size and return it. +//********************************************************************************* + +unsigned long IOPMchangeNoteList::increment ( unsigned long ordinal ) +{ + if ( ordinal == (IOPMMaxChangeNotes - 1) ) { + return 0; + } + else { + return ordinal + 1; + } +} + +//********************************************************************************* +// decrement +// +// Decrement the parameter mod the circular list size and return it. +//********************************************************************************* + +unsigned long IOPMchangeNoteList::decrement ( unsigned long ordinal ) +{ + if ( ordinal == 0 ) { + return IOPMMaxChangeNotes - 1; + } + else { + return ordinal - 1; + } +} + +//********************************************************************************* +// previousChangeNote +// +// If the parameter corresponds to the oldest power change notification +// passed to drivers and children, return -1. Otherwise, return the array +// position of the previous notification in the circular list. +//********************************************************************************* + +long IOPMchangeNoteList::previousChangeNote ( unsigned long ordinal ) +{ + if ( ordinal == firstInList ) { + return -1; + } + return decrement(ordinal); +} + +//********************************************************************************* +// listEmpty +// +//********************************************************************************* + +bool IOPMchangeNoteList::listEmpty ( void ) +{ + return ( firstInList == firstUnused ) ; +} diff --git a/iokit/Kernel/IOPMinformee.cpp b/iokit/Kernel/IOPMinformee.cpp new file mode 100644 index 000000000..8f7452cf7 --- /dev/null +++ b/iokit/Kernel/IOPMinformee.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +#define super OSObject +OSDefineMetaClassAndStructors(IOPMinformee,OSObject) + +//********************************************************************************* +// constructor +// +//********************************************************************************* + +void IOPMinformee::initialize ( IOService * theObject ) +{ + whatObject = theObject; + timer = 0; + whatObject->retain(); + +} + + +//********************************************************************************* +// free +// +//********************************************************************************* + +void IOPMinformee::free (void ) +{ + whatObject->release(); + super::free(); +} + diff --git a/iokit/Kernel/IOPMinformeeList.cpp b/iokit/Kernel/IOPMinformeeList.cpp new file mode 100644 index 000000000..888dca936 --- /dev/null +++ b/iokit/Kernel/IOPMinformeeList.cpp @@ -0,0 +1,168 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#define super OSObject +OSDefineMetaClassAndStructors(IOPMinformeeList,OSObject) + +//********************************************************************************* +// init +// +//********************************************************************************* +void IOPMinformeeList::initialize ( void ) +{ + firstItem = NULL; + length = 0; +} + +//********************************************************************************* +// addToList +// +//********************************************************************************* + +IOReturn IOPMinformeeList::addToList ( IOPMinformee * newInformee ) +{ + IOPMinformee * nextInformee; + nextInformee = firstItem; // Is new object already in the list? + while ( nextInformee != NULL ) { + if ( nextInformee->whatObject == newInformee->whatObject ) { + return IOPMNoErr; // yes, just return + } + nextInformee = nextInList(nextInformee); + } + newInformee->nextInList = firstItem; // add it to list + firstItem = newInformee; + length += 1; + return IOPMNoErr; +} + + +//********************************************************************************* +// firstInList +// +//********************************************************************************* + +IOPMinformee * IOPMinformeeList::firstInList ( void ) +{ + return firstItem; +} + +//********************************************************************************* +// nextInList +// +//********************************************************************************* + +IOPMinformee * IOPMinformeeList::nextInList ( IOPMinformee * currentItem ) +{ + if ( currentItem != NULL ) { + return (currentItem->nextInList); + } + return NULL; +} + +//********************************************************************************* +// numberOfItems +// +//********************************************************************************* + +unsigned long IOPMinformeeList::numberOfItems ( void ) +{ + return length; +} + +//********************************************************************************* +// findItem +// +// Look through the list for the one which points to the object identified +// by the parameter. Return a pointer to the list item or NULL. +//********************************************************************************* + +IOPMinformee * IOPMinformeeList::findItem ( IOService * driverOrChild ) +{ + IOPMinformee * nextObject; + + nextObject = firstInList(); + while ( nextObject != NULL ) { + if ( nextObject->whatObject == driverOrChild ) { + return nextObject; + } + nextObject = nextInList(nextObject); + } + return NULL; +} + + +//********************************************************************************* +// removeFromList +// +// Find the item in the list, unlink it, and free it. +//********************************************************************************* + +IOReturn IOPMinformeeList::removeFromList ( IOService * theItem ) +{ + IOPMinformee * item = firstItem; + IOPMinformee * temp; + + if ( item != NULL ) { + if ( item->whatObject == theItem ) { + firstItem = item->nextInList; + length--; + item->release(); + return IOPMNoErr; + } + while ( item->nextInList != NULL ) { + if ( item->nextInList->whatObject == theItem ) { + temp = item->nextInList; + item->nextInList = temp->nextInList; + length--; + temp->release(); + return IOPMNoErr; + } + item = item->nextInList; + } + } + return IOPMNoErr; +} + + +//********************************************************************************* +// free +// +// Free all items in the list, and then free the list itself +//********************************************************************************* + +void IOPMinformeeList::free (void ) +{ + IOPMinformee * next = firstItem; + + while ( next != NULL ) { + firstItem = next->nextInList; + length--; + next->release(); + next = firstItem; + } +super::free(); +} + diff --git a/iokit/Kernel/IOPMpmChild.cpp b/iokit/Kernel/IOPMpmChild.cpp new file mode 100644 index 000000000..0b34a9e45 --- /dev/null +++ b/iokit/Kernel/IOPMpmChild.cpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +#define super IOPMinformee +OSDefineMetaClassAndStructors(IOPMpmChild,IOPMinformee) + +//********************************************************************************* +// constructor +// +//********************************************************************************* + +void IOPMpmChild::initialize ( IOService * theObject ) +{ + desiredDomainState = 0; + super::initialize(theObject); +} diff --git a/iokit/Kernel/IOPlatformExpert.cpp b/iokit/Kernel/IOPlatformExpert.cpp new file mode 100644 index 000000000..7faa16986 --- /dev/null +++ b/iokit/Kernel/IOPlatformExpert.cpp @@ -0,0 +1,1059 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * HISTORY + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + +extern "C" { +#include +#include +} + +void printDictionaryKeys (OSDictionary * inDictionary, char * inMsg); +static void getCStringForObject (OSObject * inObj, char * outStr); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IOService + +OSDefineMetaClassAndStructors(IOPlatformExpert, IOService) + +OSMetaClassDefineReservedUnused(IOPlatformExpert, 0); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 1); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 2); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 3); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 4); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 5); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 6); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 7); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 8); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 9); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 10); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 11); + +static IOPlatformExpert * gIOPlatform; + +OSSymbol * gPlatformInterruptControllerName; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOPlatformExpert::attach( IOService * provider ) +{ + + if( !super::attach( provider )) + return( false); + + return( true); +} + +bool IOPlatformExpert::start( IOService * provider ) +{ + IORangeAllocator * physicalRanges; + OSData * busFrequency; + + if (!super::start(provider)) + return false; + + // Correct the bus frequency in the device tree. + busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); + provider->setProperty("clock-frequency", busFrequency); + busFrequency->release(); + + gPlatformInterruptControllerName = (OSSymbol *)OSSymbol::withCStringNoCopy("IOPlatformInterruptController"); + + physicalRanges = IORangeAllocator::withRange(0xffffffff, 1, 16, + IORangeAllocator::kLocking); + assert(physicalRanges); + setProperty("Platform Memory Ranges", physicalRanges); + + setPlatform( this ); + gIOPlatform = this; + + PMInstantiatePowerDomains(); + + return( configure(provider) ); +} + +bool IOPlatformExpert::configure( IOService * provider ) +{ + OSSet * topLevel; + OSDictionary * dict; + IOService * nub; + + topLevel = OSDynamicCast( OSSet, getProperty("top-level")); + + if( topLevel) { + while( (dict = OSDynamicCast( OSDictionary, + topLevel->getAnyObject()))) { + dict->retain(); + topLevel->removeObject( dict ); + nub = createNub( dict ); + if( 0 == nub) + continue; + dict->release(); + nub->attach( this ); + nub->registerService(); + } + } + + return( true ); +} + +IOService * IOPlatformExpert::createNub( OSDictionary * from ) +{ + IOService * nub; + + nub = new IOPlatformDevice; + if(nub) { + if( !nub->init( from )) { + nub->release(); + nub = 0; + } + } + return( nub); +} + +bool IOPlatformExpert::compareNubName( const IOService * nub, + OSString * name, OSString ** matched = 0 ) const +{ + return( nub->IORegistryEntry::compareName( name, matched )); +} + +IOReturn IOPlatformExpert::getNubResources( IOService * nub ) +{ + return( kIOReturnSuccess ); +} + +long IOPlatformExpert::getBootROMType(void) +{ + return _peBootROMType; +} + +long IOPlatformExpert::getChipSetType(void) +{ + return _peChipSetType; +} + +long IOPlatformExpert::getMachineType(void) +{ + return _peMachineType; +} + +void IOPlatformExpert::setBootROMType(long peBootROMType) +{ + _peBootROMType = peBootROMType; +} + +void IOPlatformExpert::setChipSetType(long peChipSetType) +{ + _peChipSetType = peChipSetType; +} + +void IOPlatformExpert::setMachineType(long peMachineType) +{ + _peMachineType = peMachineType; +} + +bool IOPlatformExpert::getMachineName( char * /*name*/, int /*maxLength*/) +{ + return( false ); +} + +bool IOPlatformExpert::getModelName( char * /*name*/, int /*maxLength*/) +{ + return( false ); +} + +IORangeAllocator * IOPlatformExpert::getPhysicalRangeAllocator(void) +{ + return(OSDynamicCast(IORangeAllocator, + getProperty("Platform Memory Ranges"))); +} + +int (*PE_halt_restart)(unsigned int type) = 0; + +int IOPlatformExpert::haltRestart(unsigned int type) +{ + if (PE_halt_restart) return (*PE_halt_restart)(type); + else return -1; +} + +void IOPlatformExpert::sleepKernel(void) +{ +#if 0 + long cnt; + boolean_t intState; + + intState = ml_set_interrupts_enabled(false); + + for (cnt = 0; cnt < 10000; cnt++) { + IODelay(1000); + } + + ml_set_interrupts_enabled(intState); +#else +// PE_initialize_console(0, kPEDisableScreen); + + IOCPUSleepKernel(); + +// PE_initialize_console(0, kPEEnableScreen); +#endif +} + +long IOPlatformExpert::getGMTTimeOfDay(void) +{ + return(0); +} + +void IOPlatformExpert::setGMTTimeOfDay(long secs) +{ +} + + +IOReturn IOPlatformExpert::getConsoleInfo( PE_Video * consoleInfo ) +{ + return( PE_current_console( consoleInfo)); +} + +IOReturn IOPlatformExpert::setConsoleInfo( PE_Video * consoleInfo, + unsigned int op) +{ + return( PE_initialize_console( consoleInfo, op )); +} + +IOReturn IOPlatformExpert::registerInterruptController(OSSymbol *name, IOInterruptController *interruptController) +{ + publishResource(name, interruptController); + + return kIOReturnSuccess; +} + +IOInterruptController *IOPlatformExpert::lookUpInterruptController(OSSymbol *name) +{ + IOInterruptController *interruptController; + IOService *service; + + service = waitForService(resourceMatching(name)); + + interruptController = OSDynamicCast(IOInterruptController, service->getProperty(name)); + + return interruptController; +} + + +void IOPlatformExpert::setCPUInterruptProperties(IOService *service) +{ + IOCPUInterruptController *controller; + + controller = OSDynamicCast(IOCPUInterruptController, waitForService(serviceMatching("IOCPUInterruptController"))); + if (controller) controller->setCPUInterruptProperties(service); +} + +bool IOPlatformExpert::atInterruptLevel(void) +{ + return ml_at_interrupt_context(); +} + +bool IOPlatformExpert::platformAdjustService(IOService */*service*/) +{ + return true; +} + + +//********************************************************************************* +// PMLog +// +//********************************************************************************* + +void IOPlatformExpert::PMLog(const char * who,unsigned long event,unsigned long param1, unsigned long param2) +{ + if( gIOKitDebug & kIOLogPower) { + kprintf("%s %02d %08x %08x\n",who,event,param1,param2); +// IOLog("%s %02d %08x %08x\n",who,event,param1,param2); + } +} + + +//********************************************************************************* +// PMInstantiatePowerDomains +// +// In this vanilla implementation, a Root Power Domain is instantiated. +// All other objects which register will be children of this Root. +// Where this is inappropriate, PMInstantiatePowerDomains is overridden +// in a platform-specific subclass. +//********************************************************************************* + +void IOPlatformExpert::PMInstantiatePowerDomains ( void ) +{ + root = new IOPMrootDomain; + root->init(); + root->attach(this); + root->start(this); + root->youAreRoot(); +} + + +//********************************************************************************* +// PMRegisterDevice +// +// In this vanilla implementation, all callers are made children of the root power domain. +// Where this is inappropriate, PMRegisterDevice is overridden in a platform-specific subclass. +//********************************************************************************* + +void IOPlatformExpert::PMRegisterDevice(IOService * theNub, IOService * theDevice) +{ + root->addPowerChild ( theDevice ); +} + +//********************************************************************************* +// hasPMFeature +// +//********************************************************************************* + +bool IOPlatformExpert::hasPMFeature (unsigned long featureMask) +{ + return ((_pePMFeatures & featureMask) != 0); +} + +//********************************************************************************* +// hasPrivPMFeature +// +//********************************************************************************* + +bool IOPlatformExpert::hasPrivPMFeature (unsigned long privFeatureMask) +{ + return ((_pePrivPMFeatures & privFeatureMask) != 0); +} + +//********************************************************************************* +// numBatteriesSupported +// +//********************************************************************************* + +int IOPlatformExpert::numBatteriesSupported (void) +{ + return (_peNumBatteriesSupported); +} + +//********************************************************************************* +// CheckSubTree +// +// This method is called by the instantiated sublass of the platform expert to +// determine how a device should be inserted into the Power Domain. The subclass +// provides an XML power tree description against which a device is matched based +// on class and provider. If a match is found this routine returns true in addition +// to flagging the description tree at the appropriate node that a device has been +// registered for the given service. +//********************************************************************************* + +bool IOPlatformExpert::CheckSubTree (OSArray * inSubTree, IOService * theNub, IOService * theDevice, OSDictionary * theParent) +{ + unsigned int i; + unsigned int numPowerTreeNodes; + OSDictionary * entry; + OSDictionary * matchingDictionary; + OSDictionary * providerDictionary; + OSDictionary * deviceDictionary; + OSDictionary * nubDictionary; + OSArray * children; + bool nodeFound = false; + bool continueSearch = false; + bool deviceMatch = false; + bool providerMatch = false; + bool multiParentMatch = false; + + if ( (NULL == theDevice) || (NULL == inSubTree) ) + return false; + + numPowerTreeNodes = inSubTree->getCount (); + + // iterate through the power tree to find a home for this device + + for ( i = 0; i < numPowerTreeNodes; i++ ) { + + entry = (OSDictionary *) inSubTree->getObject (i); + + matchingDictionary = (OSDictionary *) entry->getObject ("device"); + providerDictionary = (OSDictionary *) entry->getObject ("provider"); + + deviceMatch = true; // if no matching dictionary, this is not a criteria and so must match + if ( matchingDictionary ) { + deviceMatch = false; + if ( NULL != (deviceDictionary = theDevice->dictionaryWithProperties ())) { + deviceMatch = deviceDictionary->isEqualTo ( matchingDictionary, matchingDictionary ); + deviceDictionary->release (); + } + } + + providerMatch = true; // we indicate a match if there is no nub or provider + if ( theNub && providerDictionary ) { + providerMatch = false; + if ( NULL != (nubDictionary = theNub->dictionaryWithProperties ()) ) { + providerMatch = nubDictionary->isEqualTo ( providerDictionary, providerDictionary ); + nubDictionary->release (); + } + } + + multiParentMatch = true; // again we indicate a match if there is no multi-parent node + if (deviceMatch && providerMatch) { + if (NULL != multipleParentKeyValue) { + OSNumber * aNumber = (OSNumber *) entry->getObject ("multiple-parent"); + multiParentMatch = (NULL != aNumber) ? multipleParentKeyValue->isEqualTo (aNumber) : false; + } + } + + nodeFound = (deviceMatch && providerMatch && multiParentMatch); + + // if the power tree specifies a provider dictionary but theNub is + // NULL then we cannot match with this entry. + + if ( theNub == NULL && providerDictionary != NULL ) + nodeFound = false; + + // if this node is THE ONE...then register the device + + if ( nodeFound ) { + if (RegisterServiceInTree (theDevice, entry, theParent, theNub) ) { + + if ( kIOLogPower & gIOKitDebug) + IOLog ("PMRegisterDevice/CheckSubTree - service registered!\n"); + + numInstancesRegistered++; + + // determine if we need to search for additional nodes for this item + multipleParentKeyValue = (OSNumber *) entry->getObject ("multiple-parent"); + } + else + nodeFound = false; + } + + continueSearch = ( (false == nodeFound) || (NULL != multipleParentKeyValue) ); + + if ( continueSearch && (NULL != (children = (OSArray *) entry->getObject ("children"))) ) { + nodeFound = CheckSubTree ( children, theNub, theDevice, entry ); + continueSearch = ( (false == nodeFound) || (NULL != multipleParentKeyValue) ); + } + + if ( false == continueSearch ) + break; + } + + return ( nodeFound ); +} + +//********************************************************************************* +// RegisterServiceInTree +// +// Register a device at the specified node of our power tree. +//********************************************************************************* + +bool IOPlatformExpert::RegisterServiceInTree (IOService * theService, OSDictionary * theTreeNode, OSDictionary * theTreeParentNode, IOService * theProvider) +{ + IOService * aService; + bool registered = false; + OSArray * children; + unsigned int numChildren; + OSDictionary * child; + + // make sure someone is not already registered here + + if ( NULL == theTreeNode->getObject ("service") ) { + + if ( theTreeNode->setObject ("service", OSDynamicCast ( OSObject, theService)) ) { + + // 1. CHILDREN ------------------ + + // we registered the node in the tree...now if the node has children + // registered we must tell this service to add them. + + if ( NULL != (children = (OSArray *) theTreeNode->getObject ("children")) ) { + numChildren = children->getCount (); + for ( unsigned int i = 0; i < numChildren; i++ ) { + if ( NULL != (child = (OSDictionary *) children->getObject (i)) ) { + if ( NULL != (aService = (IOService *) child->getObject ("service")) ) + theService->addPowerChild (aService); + } + } + } + + // 2. PARENT -------------------- + + // also we must notify the parent of this node (if a registered service + // exists there) of a new child. + + if ( theTreeParentNode ) { + if ( NULL != (aService = (IOService *) theTreeParentNode->getObject ("service")) ) + if (aService != theProvider) + aService->addPowerChild (theService); + } + + registered = true; + } + } + + return registered; +} + +//********************************************************************************* +// printDictionaryKeys +// +// Print the keys for the given dictionary and selected contents. +//********************************************************************************* +void printDictionaryKeys (OSDictionary * inDictionary, char * inMsg) +{ + OSCollectionIterator * mcoll = OSCollectionIterator::withCollection (inDictionary); + OSSymbol * mkey; + OSString * ioClass; + unsigned int i = 0; + + mcoll->reset (); + + mkey = OSDynamicCast (OSSymbol, mcoll->getNextObject ()); + + while (mkey) { + + // kprintf ("dictionary key #%d: %s\n", i, mkey->getCStringNoCopy () ); + + // if this is the IOClass key, print it's contents + + if ( mkey->isEqualTo ("IOClass") ) { + ioClass = (OSString *) inDictionary->getObject ("IOClass"); + if ( ioClass ) IOLog ("%s IOClass is %s\n", inMsg, ioClass->getCStringNoCopy () ); + } + + // if this is an IOProviderClass key print it + + if ( mkey->isEqualTo ("IOProviderClass") ) { + ioClass = (OSString *) inDictionary->getObject ("IOProviderClass"); + if ( ioClass ) IOLog ("%s IOProviderClass is %s\n", inMsg, ioClass->getCStringNoCopy () ); + + } + + // also print IONameMatch keys + if ( mkey->isEqualTo ("IONameMatch") ) { + ioClass = (OSString *) inDictionary->getObject ("IONameMatch"); + if ( ioClass ) IOLog ("%s IONameMatch is %s\n", inMsg, ioClass->getCStringNoCopy () ); + } + + // also print IONameMatched keys + + if ( mkey->isEqualTo ("IONameMatched") ) { + ioClass = (OSString *) inDictionary->getObject ("IONameMatched"); + if ( ioClass ) IOLog ("%s IONameMatched is %s\n", inMsg, ioClass->getCStringNoCopy () ); + } + +#if 0 + // print clock-id + + if ( mkey->isEqualTo ("AAPL,clock-id") ) { + char * cstr; + cstr = getCStringForObject (inDictionary->getObject ("AAPL,clock-id")); + if (cstr) + kprintf (" ===> AAPL,clock-id is %s\n", cstr ); + } +#endif + + // print name + + if ( mkey->isEqualTo ("name") ) { + char nameStr[64]; + nameStr[0] = 0; + getCStringForObject (inDictionary->getObject ("name"), nameStr ); + if (strlen(nameStr) > 0) + IOLog ("%s name is %s\n", inMsg, nameStr); + } + + mkey = (OSSymbol *) mcoll->getNextObject (); + + i++; + } + + mcoll->release (); +} + +static void getCStringForObject (OSObject * inObj, char * outStr) +{ + char * buffer; + unsigned int len, i; + + if ( (NULL == inObj) || (NULL == outStr)) + return; + + char * objString = (char *) (inObj->getMetaClass())->getClassName(); + + if ((0 == strcmp(objString,"OSString")) || (0 == strcmp (objString, "OSSymbol"))) + strcpy (outStr, ((OSString *)inObj)->getCStringNoCopy()); + + else if (0 == strcmp(objString,"OSData")) { + len = ((OSData *)inObj)->getLength(); + buffer = (char *)((OSData *)inObj)->getBytesNoCopy(); + if (buffer && (len > 0)) { + for (i=0; i < len; i++) { + outStr[i] = buffer[i]; + } + outStr[len] = 0; + } + } +} + +extern "C" { + +/* + * Callouts from BSD for machine name & model + */ + +boolean_t PEGetMachineName( char * name, int maxLength ) +{ + if( gIOPlatform) + return( gIOPlatform->getMachineName( name, maxLength )); + else + return( false ); +} + +boolean_t PEGetModelName( char * name, int maxLength ) +{ + if( gIOPlatform) + return( gIOPlatform->getModelName( name, maxLength )); + else + return( false ); +} + +int PEGetPlatformEpoch(void) +{ + if( gIOPlatform) + return( gIOPlatform->getBootROMType()); + else + return( -1 ); +} + +int PEHaltRestart(unsigned int type) +{ + if (gIOPlatform) return gIOPlatform->haltRestart(type); + else return -1; +} + +long PEGetGMTTimeOfDay(void) +{ + if( gIOPlatform) + return( gIOPlatform->getGMTTimeOfDay()); + else + return( 0 ); +} + +void PESetGMTTimeOfDay(long secs) +{ + if( gIOPlatform) + gIOPlatform->setGMTTimeOfDay(secs); +} + +} /* extern "C" */ + +void IOPlatformExpert::registerNVRAMController(IONVRAMController * caller) +{ + publishResource("IONVRAM"); +} + +IOReturn IOPlatformExpert::callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4) +{ + IOService *service, *_resources; + + if (waitForFunction) { + _resources = waitForService(resourceMatching(functionName)); + } else { + _resources = resources(); + } + if (_resources == 0) return kIOReturnUnsupported; + + service = OSDynamicCast(IOService, _resources->getProperty(functionName)); + if (service == 0) return kIOReturnUnsupported; + + return service->callPlatformFunction(functionName, waitForFunction, + param1, param2, param3, param4); +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOPlatformExpert + +OSDefineMetaClassAndAbstractStructors( IODTPlatformExpert, IOPlatformExpert ) + +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 0); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 1); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 2); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 3); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 4); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 5); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 6); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 7); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IOService * IODTPlatformExpert::probe( IOService * provider, + SInt32 * score ) +{ + if( !super::probe( provider, score)) + return( 0 ); + + // check machine types + if( !provider->compareNames( getProperty( gIONameMatchKey ) )) + return( 0 ); + + return( this); +} + +bool IODTPlatformExpert::configure( IOService * provider ) +{ + if( !super::configure( provider)) + return( false); + + processTopLevel( provider ); + + return( true ); +} + +IOService * IODTPlatformExpert::createNub( IORegistryEntry * from ) +{ + IOService * nub; + + nub = new IOPlatformDevice; + if( nub) { + if( !nub->init( from, gIODTPlane )) { + nub->free(); + nub = 0; + } + } + return( nub); +} + +bool IODTPlatformExpert::createNubs( IOService * parent, OSIterator * iter ) +{ + IORegistryEntry * next; + IOService * nub; + bool ok = true; + + if( iter) { + while( (next = (IORegistryEntry *) iter->getNextObject())) { + + if( 0 == (nub = createNub( next ))) + continue; + + nub->attach( parent ); + nub->registerService(); + } + iter->release(); + } + + return( ok ); +} + +void IODTPlatformExpert::processTopLevel( IORegistryEntry * root ) +{ + OSIterator * kids; + IORegistryEntry * next; + IORegistryEntry * cpus; + IORegistryEntry * options; + + // infanticide + kids = IODTFindMatchingEntries( root, 0, deleteList() ); + if( kids) { + while( (next = (IORegistryEntry *)kids->getNextObject())) { + next->detachAll( gIODTPlane); + } + kids->release(); + } + + // Publish an IODTNVRAM class on /options. + options = root->childFromPath("options", gIODTPlane); + if (options) { + dtNVRAM = new IODTNVRAM; + if (dtNVRAM) { + if (!dtNVRAM->init(options, gIODTPlane)) { + dtNVRAM->release(); + dtNVRAM = 0; + } else { + dtNVRAM->attach(this); + dtNVRAM->registerService(); + } + } + } + + // Publish the cpus. + cpus = root->childFromPath( "cpus", gIODTPlane); + if ( cpus) + createNubs( this, IODTFindMatchingEntries( cpus, kIODTExclusive, 0)); + + // publish top level, minus excludeList + createNubs( this, IODTFindMatchingEntries( root, kIODTExclusive, excludeList())); +} + +IOReturn IODTPlatformExpert::getNubResources( IOService * nub ) +{ + if( nub->getDeviceMemory()) + return( kIOReturnSuccess ); + + IODTResolveAddressing( nub, "reg", 0); + + return( kIOReturnSuccess); +} + +bool IODTPlatformExpert::compareNubName( const IOService * nub, + OSString * name, OSString ** matched ) const +{ + return( IODTCompareNubName( nub, name, matched ) + || super::compareNubName( nub, name, matched) ); +} + +bool IODTPlatformExpert::getModelName( char * name, int maxLength ) +{ + OSData * prop; + const char * str; + int len; + char c; + bool ok = false; + + maxLength--; + + prop = (OSData *) getProvider()->getProperty( gIODTCompatibleKey ); + if( prop ) { + str = (const char *) prop->getBytesNoCopy(); + + if( 0 == strncmp( str, "AAPL,", strlen( "AAPL," ) )) + str += strlen( "AAPL," ); + + len = 0; + while( (c = *str++)) { + if( (c == '/') || (c == ' ')) + c = '-'; + + name[ len++ ] = c; + if( len >= maxLength) + break; + } + + name[ len ] = 0; + ok = true; + } + return( ok ); +} + +bool IODTPlatformExpert::getMachineName( char * name, int maxLength ) +{ + OSData * prop; + bool ok = false; + + maxLength--; + prop = (OSData *) getProvider()->getProperty( gIODTModelKey ); + ok = (0 != prop); + + if( ok ) + strncpy( name, (const char *) prop->getBytesNoCopy(), maxLength ); + + return( ok ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IODTPlatformExpert::registerNVRAMController( IONVRAMController * nvram ) +{ + if (dtNVRAM) dtNVRAM->registerNVRAMController(nvram); + + super::registerNVRAMController(nvram); +} + +int IODTPlatformExpert::haltRestart(unsigned int type) +{ + if (dtNVRAM) dtNVRAM->sync(); + + return super::haltRestart(type); +} + +IOReturn IODTPlatformExpert::readXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length) +{ + if (dtNVRAM) return dtNVRAM->readXPRAM(offset, buffer, length); + else return kIOReturnNotReady; +} + +IOReturn IODTPlatformExpert::writeXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length) +{ + if (dtNVRAM) return dtNVRAM->writeXPRAM(offset, buffer, length); + else return kIOReturnNotReady; +} + +IOReturn IODTPlatformExpert::readNVRAMProperty( + IORegistryEntry * entry, + const OSSymbol ** name, OSData ** value ) +{ + if (dtNVRAM) return dtNVRAM->readNVRAMProperty(entry, name, value); + else return kIOReturnNotReady; +} + +IOReturn IODTPlatformExpert::writeNVRAMProperty( + IORegistryEntry * entry, + const OSSymbol * name, OSData * value ) +{ + if (dtNVRAM) return dtNVRAM->writeNVRAMProperty(entry, name, value); + else return kIOReturnNotReady; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOService + +OSDefineMetaClassAndStructors(IOPlatformExpertDevice, IOService) + +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 0); +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 1); +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 2); +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 3); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOPlatformExpertDevice::compareName( OSString * name, + OSString ** matched = 0 ) const +{ + return( IODTCompareNubName( this, name, matched )); +} + +bool +IOPlatformExpertDevice::initWithArgs( + void * dtTop, void * p2, void * p3, void * p4 ) +{ + IORegistryEntry * dt = 0; + void * argsData[ 4 ]; + bool ok; + + // dtTop may be zero on non- device tree systems + if( dtTop && (dt = IODeviceTreeAlloc( dtTop ))) + ok = super::init( dt, gIODTPlane ); + else + ok = super::init(); + + if( !ok) + return( false); + + workLoop = IOWorkLoop::workLoop(); + if (!workLoop) + return false; + + argsData[ 0 ] = dtTop; + argsData[ 1 ] = p2; + argsData[ 2 ] = p3; + argsData[ 3 ] = p4; + + setProperty("IOPlatformArgs", (void *)argsData, sizeof( argsData)); + + return( true); +} + +IOWorkLoop *IOPlatformExpertDevice::getWorkLoop() const +{ + return workLoop; +} + +void IOPlatformExpertDevice::free() +{ + if (workLoop) + workLoop->release(); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOService + +OSDefineMetaClassAndStructors(IOPlatformDevice, IOService) + +OSMetaClassDefineReservedUnused(IOPlatformDevice, 0); +OSMetaClassDefineReservedUnused(IOPlatformDevice, 1); +OSMetaClassDefineReservedUnused(IOPlatformDevice, 2); +OSMetaClassDefineReservedUnused(IOPlatformDevice, 3); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOPlatformDevice::compareName( OSString * name, + OSString ** matched = 0 ) const +{ + return( ((IOPlatformExpert *)getProvider())-> + compareNubName( this, name, matched )); +} + +IOService * IOPlatformDevice::matchLocation( IOService * /* client */ ) +{ + return( this ); +} + +IOReturn IOPlatformDevice::getResources( void ) +{ + return( ((IOPlatformExpert *)getProvider())->getNubResources( this )); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/********************************************************************* +* IOPanicPlatform class +* +* If no legitimate IOPlatformDevice matches, this one does and panics +* the kernel with a suitable message. +*********************************************************************/ + +class IOPanicPlatform : IOPlatformExpert { + OSDeclareDefaultStructors(IOPanicPlatform); + +public: + bool start(IOService * provider); +}; + + +OSDefineMetaClassAndStructors(IOPanicPlatform, IOPlatformExpert); + + +bool IOPanicPlatform::start(IOService * provider) { + const char * platform_name = "(unknown platform name)"; + + if (provider) platform_name = provider->getName(); + + panic("Unable to find driver for this platform: \"%s\".\n", + platform_name); + + return false; +} diff --git a/iokit/Kernel/IOPowerConnection.cpp b/iokit/Kernel/IOPowerConnection.cpp new file mode 100644 index 000000000..2cc0e72e6 --- /dev/null +++ b/iokit/Kernel/IOPowerConnection.cpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#define super IOService +OSDefineMetaClassAndStructors(IOPowerConnection,IOService) + + +// ********************************************************************************** +// setDesiredDomainState +// +// Parent of the connection calls here to save the childs desire +// ********************************************************************************** +void IOPowerConnection::setDesiredDomainState (unsigned long stateNumber ) +{ + desiredDomainState = stateNumber; +} + + +// ********************************************************************************** +// getDesiredDomainState +// +// ********************************************************************************** +unsigned long IOPowerConnection::getDesiredDomainState ( void ) +{ + return desiredDomainState; +} + + +// ********************************************************************************** +// setParentKnowsState +// +// Child of the connection calls here to set its reminder that the parent does +// or does not yet know the state if its domain. +// ********************************************************************************** +void IOPowerConnection::setParentKnowsState (bool flag ) +{ + stateKnown = flag; +} + + +// ********************************************************************************** +// setParentCurrentPowerFlags +// +// Child of the connection calls here to save what the parent says +// is the state if its domain. +// ********************************************************************************** +void IOPowerConnection::setParentCurrentPowerFlags (IOPMPowerFlags flags ) +{ + currentPowerFlags = flags; +} + + +// ********************************************************************************** +// parentKnowsState +// +// ********************************************************************************** +bool IOPowerConnection::parentKnowsState (void ) +{ + return stateKnown; +} + + +// ********************************************************************************** +// parentCurrentPowerFlags +// +// ********************************************************************************** +IOPMPowerFlags IOPowerConnection::parentCurrentPowerFlags (void ) +{ + return currentPowerFlags; +} + + diff --git a/iokit/Kernel/IORangeAllocator.cpp b/iokit/Kernel/IORangeAllocator.cpp new file mode 100644 index 000000000..26f6161ac --- /dev/null +++ b/iokit/Kernel/IORangeAllocator.cpp @@ -0,0 +1,369 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 05 Nov 99 - created. + */ + +#include +#include +#include +#include +#include +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super OSObject + +OSDefineMetaClassAndStructors( IORangeAllocator, OSObject ) + +struct IORangeAllocatorElement { + // closed range + IORangeScalar start; + IORangeScalar end; +}; + +IOLock * gIORangeAllocatorLock; + +#define LOCK() \ + if( options & kLocking) IOTakeLock( gIORangeAllocatorLock ) +#define UNLOCK() \ + if( options & kLocking) IOUnlock( gIORangeAllocatorLock ) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IORangeAllocator::init( IORangeScalar endOfRange, + IORangeScalar _defaultAlignment, + UInt32 _capacity, + IOOptionBits _options ) +{ + if( !super::init()) + return( false ); + + if( !_capacity) + _capacity = 1; + if( !_defaultAlignment) + _defaultAlignment = 1; + capacity = 0; + capacityIncrement = _capacity; + numElements = 0; + elements = 0; + defaultAlignmentMask = _defaultAlignment - 1; + options = _options; + + if( (!gIORangeAllocatorLock) && (options & kLocking)) + gIORangeAllocatorLock = IOLockAlloc(); + + if( endOfRange) + deallocate( 0, endOfRange + 1 ); + + return( true ); +} + +IORangeAllocator * IORangeAllocator:: withRange( + IORangeScalar endOfRange, + IORangeScalar defaultAlignment = 0, + UInt32 capacity = 0, + IOOptionBits options = 0 ) +{ + IORangeAllocator * thingy; + + thingy = new IORangeAllocator; + if( thingy && ! thingy->init( endOfRange, defaultAlignment, + capacity, options )) { + thingy->release(); + thingy = 0; + } + + return( thingy ); +} + +void IORangeAllocator::free() +{ + if( elements) + IODelete( elements, IORangeAllocatorElement, capacity ); + + super::free(); +} + +UInt32 IORangeAllocator::getFragmentCount( void ) +{ + return( numElements ); +} + +UInt32 IORangeAllocator::getFragmentCapacity( void ) +{ + return( capacity ); +} + +void IORangeAllocator::setFragmentCapacityIncrement( UInt32 count ) +{ + capacityIncrement = count; +} + + +// allocate element at index +bool IORangeAllocator::allocElement( UInt32 index ) +{ + UInt32 newCapacity; + IORangeAllocatorElement * newElements; + + if( ((numElements == capacity) && capacityIncrement) + || (!elements)) { + + newCapacity = capacity + capacityIncrement; + newElements = IONew( IORangeAllocatorElement, newCapacity ); + if( !newElements) + return( false ); + + if( elements) { + bcopy( elements, + newElements, + index * sizeof( IORangeAllocatorElement)); + bcopy( elements + index, + newElements + index + 1, + (numElements - index) * sizeof( IORangeAllocatorElement)); + + IODelete( elements, IORangeAllocatorElement, capacity ); + } + + elements = newElements; + capacity = newCapacity; + + } else { + + bcopy( elements + index, + elements + index + 1, + (numElements - index) * sizeof( IORangeAllocatorElement)); + } + numElements++; + + return( true ); +} + +// destroy element at index +void IORangeAllocator::deallocElement( UInt32 index ) +{ + numElements--; + bcopy( elements + index + 1, + elements + index, + (numElements - index) * sizeof( IORangeAllocatorElement)); +} + +bool IORangeAllocator::allocate( IORangeScalar size, + IORangeScalar * result, + IORangeScalar alignment = 0 ) +{ + IORangeScalar data, dataEnd; + IORangeScalar thisStart, thisEnd; + UInt32 index; + bool ok = false; + + if( !size || !result) + return( false ); + + if( 0 == alignment) + alignment = defaultAlignmentMask; + else + alignment--; + + size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; + + LOCK(); + + for( index = 0; index < numElements; index++ ) { + + thisStart = elements[index].start; + thisEnd = elements[index].end; + data = (thisStart + alignment) & ~alignment; + dataEnd = (data + size - 1); + + ok = (dataEnd <= thisEnd); + if( ok) { + if( data != thisStart) { + if( dataEnd != thisEnd) { + if( allocElement( index + 1 )) { + elements[index++].end = data - 1; + elements[index].start = dataEnd + 1; + elements[index].end = thisEnd; + } else + ok = false; + } else + elements[index].end = data - 1; + } else { + if( dataEnd != thisEnd) + elements[index].start = dataEnd + 1; + else + deallocElement( index ); + } + if( ok) + *result = data; + break; + } + } + + UNLOCK(); + + return( ok ); +} + +bool IORangeAllocator::allocateRange( IORangeScalar data, + IORangeScalar size ) +{ + IORangeScalar thisStart, thisEnd; + IORangeScalar dataEnd; + UInt32 index; + bool found = false; + + if( !size) + return( 0 ); + + size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; + dataEnd = data + size - 1; + + LOCK(); + + for( index = 0; + (!found) && (index < numElements); + index++ ) { + + thisStart = elements[index].start; + thisEnd = elements[index].end; + + if( thisStart > data) + break; + found = (dataEnd <= thisEnd); + + if( found) { + if( data != thisStart) { + if( dataEnd != thisEnd) { + found = allocElement( index + 1 ); + if( found) { + elements[index++].end = data - 1; + elements[index].start = dataEnd + 1; + elements[index].end = thisEnd; + } + } else + elements[index].end = data - 1; + } else if( dataEnd != thisEnd) + elements[index].start = dataEnd + 1; + else + deallocElement( index ); + } + } + + UNLOCK(); + + return( found ); +} + +void IORangeAllocator::deallocate( IORangeScalar data, + IORangeScalar size ) +{ + IORangeScalar dataEnd; + UInt32 index; + bool headContig = false; + bool tailContig = false; + + size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; + dataEnd = data + size - 1; + + LOCK(); + + for( index = 0; index < numElements; index++ ) { + if( elements[index].start < data) { + headContig = (data <= (elements[index].end + 1)); + continue; + } + tailContig = ((data + size) >= elements[index].start); + break; + } + + if( headContig) { + if( tailContig) { + elements[index-1].end = elements[index].end; + deallocElement( index ); + } else /*safe*/ if( dataEnd > elements[index-1].end) + elements[index-1].end = dataEnd; + + } else if( tailContig) { + if( data < elements[index].start) /*safe*/ + elements[index].start = data; + + } else if( allocElement( index)) { + elements[index].start = data; + elements[index].end = dataEnd; + } + + UNLOCK(); +} + +bool IORangeAllocator::serialize(OSSerialize *s) const +{ + OSArray * array = OSArray::withCapacity( numElements * 2 ); + OSNumber * num; + UInt32 index; + bool ret; + + if( !array) + return( false ); + + LOCK(); + + for( index = 0; index < numElements; index++) { + if( (num = OSNumber::withNumber( elements[index].start, + 8 * sizeof(IORangeScalar) ))) { + array->setObject(num); + num->release(); + } + if( (num = OSNumber::withNumber( elements[index].end, + 8 * sizeof(IORangeScalar) ))) { + array->setObject(num); + num->release(); + } + } + + UNLOCK(); + + ret = array->serialize(s); + array->release(); + + return( ret ); +} + +IORangeScalar IORangeAllocator::getFreeCount( void ) +{ + UInt32 index; + IORangeScalar sum = 0; + + for( index = 0; index < numElements; index++) + sum += elements[index].end - elements[index].start + 1; + + return( sum ); +} + diff --git a/iokit/Kernel/IORegistryEntry.cpp b/iokit/Kernel/IORegistryEntry.cpp new file mode 100644 index 000000000..bb72bda03 --- /dev/null +++ b/iokit/Kernel/IORegistryEntry.cpp @@ -0,0 +1,1999 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 12 Nov 98 sdouglas created. + * + */ + +#include +#include +#include +#include + +#include + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super OSObject + +OSDefineMetaClassAndStructors(IORegistryEntry, OSObject) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static IORegistryEntry * gRegistryRoot; +static OSDictionary * gIORegistryPlanes; + +const OSSymbol * gIONameKey; +const OSSymbol * gIOLocationKey; + +enum { + kParentSetIndex = 0, + kChildSetIndex = 1, + kNumSetIndex +}; +enum { + kIOMaxPlaneName = 32 +}; + +class IORegistryPlane : public OSObject { + + friend IORegistryEntry; + + OSDeclareAbstractStructors(IORegistryPlane) + + const OSSymbol * nameKey; + const OSSymbol * keys[ kNumSetIndex ]; + const OSSymbol * pathNameKey; + const OSSymbol * pathLocationKey; + int reserved[2]; + +public: + virtual bool serialize(OSSerialize *s) const; +}; + +OSDefineMetaClassAndStructors(IORegistryPlane, OSObject) + + +static IORecursiveLock * gPropertiesLock; +static SInt32 gIORegistryGenerationCount; + +#define UNLOCK s_lock_done( &gIORegistryLock ) +#define RLOCK s_lock_read( &gIORegistryLock ) +#define WLOCK s_lock_write( &gIORegistryLock ); \ + gIORegistryGenerationCount++ + // make atomic + +#define PUNLOCK IORecursiveLockUnlock( gPropertiesLock ) +#define PLOCK IORecursiveLockLock( gPropertiesLock ) + +#define IOREGSPLITTABLES + +#ifdef IOREGSPLITTABLES +#define registryTable() fRegistryTable +#else +#define registryTable() fPropertyTable +#endif + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct s_lock_t { + decl_simple_lock_data(,interlock) /* "hardware" interlock field */ + volatile unsigned int + read_count:16, /* No. of accepted readers */ + want_upgrade:1, /* Read-to-write upgrade waiting */ + want_write:1, /* Writer is waiting, or + locked for write */ + waiting:1, /* Someone is sleeping on lock */ + can_sleep:1; /* Can attempts to lock go to sleep? */ +}; + +static struct s_lock_t gIORegistryLock; + +/* Time we loop without holding the interlock. + * The former is for when we cannot sleep, the latter + * for when our thread can go to sleep (loop less) + * we shouldn't retake the interlock at all frequently + * if we cannot go to sleep, since it interferes with + * any other processors. In particular, 100 is too small + * a number for powerpc MP systems because of cache + * coherency issues and differing lock fetch times between + * the processors + */ +static unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ; + +static void +s_lock_init( + s_lock_t *l, + boolean_t can_sleep) +{ + (void) memset((void *) l, 0, sizeof(s_lock_t)); + + simple_lock_init(&l->interlock, 0); + l->want_write = FALSE; + l->want_upgrade = FALSE; + l->read_count = 0; + l->can_sleep = can_sleep; +} + +static void +s_lock_write( + register s_lock_t * l) +{ + register int i; + + simple_lock(&l->interlock); + + /* + * Try to acquire the want_write bit. + */ + while (l->want_write) { + + i = lock_wait_time[l->can_sleep ? 1 : 0]; + if (i != 0) { + simple_unlock(&l->interlock); + while (--i != 0 && l->want_write) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && l->want_write) { + l->waiting = TRUE; + thread_sleep_simple_lock((event_t) l, + simple_lock_addr(l->interlock), FALSE); + simple_lock(&l->interlock); + } + } + l->want_write = TRUE; + + /* Wait for readers (and upgrades) to finish */ + + while ((l->read_count != 0) || l->want_upgrade) { + + i = lock_wait_time[l->can_sleep ? 1 : 0]; + if (i != 0) { + simple_unlock(&l->interlock); + while (--i != 0 && (l->read_count != 0 || + l->want_upgrade)) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) { + l->waiting = TRUE; + thread_sleep_simple_lock((event_t) l, + simple_lock_addr(l->interlock), FALSE); + simple_lock(&l->interlock); + } + } + + simple_unlock(&l->interlock); +} + +static void +s_lock_done( + register s_lock_t * l) +{ + boolean_t do_wakeup = FALSE; + + simple_lock(&l->interlock); + + if (l->read_count != 0) { + l->read_count -= 1; + } + else { + if (l->want_upgrade) { + l->want_upgrade = FALSE; + } + else { + l->want_write = FALSE; + } + } + + /* + * There is no reason to wakeup a waiting thread + * if the read-count is non-zero. Consider: + * we must be dropping a read lock + * threads are waiting only if one wants a write lock + * if there are still readers, they can't proceed + */ + if (l->waiting && (l->read_count == 0)) { + l->waiting = FALSE; + do_wakeup = TRUE; + } + + simple_unlock(&l->interlock); + + if (do_wakeup) + thread_wakeup((event_t) l); +} + +static void +s_lock_read( + register s_lock_t * l) +{ + register int i; + + simple_lock(&l->interlock); + + while ( l->want_upgrade || ((0 == l->read_count) && l->want_write )) { + + i = lock_wait_time[l->can_sleep ? 1 : 0]; + + if (i != 0) { + simple_unlock(&l->interlock); + while (--i != 0 && + (l->want_upgrade || ((0 == l->read_count) && l->want_write ))) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && + (l->want_upgrade || ((0 == l->read_count) && l->want_write ))) { + l->waiting = TRUE; + thread_sleep_simple_lock((event_t) l, + simple_lock_addr(l->interlock), FALSE); + simple_lock(&l->interlock); + } + } + + l->read_count += 1; + simple_unlock(&l->interlock); + +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IORegistryEntry * IORegistryEntry::initialize( void ) +{ + bool ok; + + if( !gRegistryRoot) { + + s_lock_init( &gIORegistryLock, true ); + gRegistryRoot = new IORegistryEntry; + gPropertiesLock = IORecursiveLockAlloc(); + gIORegistryPlanes = OSDictionary::withCapacity( 1 ); + + assert( gRegistryRoot && gPropertiesLock + && gIORegistryPlanes ); + ok = gRegistryRoot->init(); + + gIONameKey = OSSymbol::withCStringNoCopy( "IOName" ); + gIOLocationKey = OSSymbol::withCStringNoCopy( "IOLocation" ); + + assert( ok && gIONameKey && gIOLocationKey ); + + gRegistryRoot->setName( "Root" ); + gRegistryRoot->setProperty( kIORegistryPlanesKey, gIORegistryPlanes ); + } + + return( gRegistryRoot ); +} + +IORegistryEntry * IORegistryEntry::getRegistryRoot( void ) +{ + return( gRegistryRoot ); +} + +SInt32 IORegistryEntry::getGenerationCount( void ) +{ + return( gIORegistryGenerationCount ); +} + + +const IORegistryPlane * IORegistryEntry::makePlane( const char * name ) +{ + IORegistryPlane * plane; + const OSSymbol * nameKey; + const OSSymbol * parentKey; + const OSSymbol * childKey; + const OSSymbol * pathNameKey; + const OSSymbol * pathLocationKey; + char key[ kIOMaxPlaneName + 16 ]; + char * end; + + strncpy( key, name, kIOMaxPlaneName ); + key[ kIOMaxPlaneName ] = 0; + end = key + strlen( name ); + + nameKey = OSSymbol::withCString( key); + + strcpy( end, "ParentLinks" ); + parentKey = OSSymbol::withCString( key); + + strcpy( end, "ChildLinks" ); + childKey = OSSymbol::withCString( key); + + strcpy( end, "Name" ); + pathNameKey = OSSymbol::withCString( key); + + strcpy( end, "Location" ); + pathLocationKey = OSSymbol::withCString( key); + + plane = new IORegistryPlane; + + if( plane && plane->init() + && nameKey && parentKey && childKey + && pathNameKey && pathLocationKey ) { + + plane->nameKey = nameKey; + plane->keys[ kParentSetIndex ] = parentKey; + plane->keys[ kChildSetIndex ] = childKey; + plane->pathNameKey = pathNameKey; + plane->pathLocationKey = pathLocationKey; + + WLOCK; + gIORegistryPlanes->setObject( nameKey, plane ); + UNLOCK; + + } else { + + if( plane) + plane->release(); + if( pathLocationKey) + pathLocationKey->release(); + if( pathNameKey) + pathNameKey->release(); + if( parentKey) + parentKey->release(); + if( childKey) + childKey->release(); + if( nameKey) + nameKey->release(); + plane = 0; + } + + return( plane); +} + +const IORegistryPlane * IORegistryEntry::getPlane( const char * name ) +{ + const IORegistryPlane * plane; + + RLOCK; + plane = (const IORegistryPlane *) gIORegistryPlanes->getObject( name ); + UNLOCK; + + return( plane ); +} + +bool IORegistryPlane::serialize(OSSerialize *s) const +{ + return( nameKey->serialize(s) ); +} + +enum { kIORegCapacityIncrement = 4 }; + +bool IORegistryEntry::init( OSDictionary * dict = 0 ) +{ + OSString * prop; + + if( !super::init()) + return( false); + + if( dict) { + dict->retain(); + fPropertyTable = dict; + + } else { + fPropertyTable = OSDictionary::withCapacity( kIORegCapacityIncrement ); + if( fPropertyTable) + fPropertyTable->setCapacityIncrement( kIORegCapacityIncrement ); + } + + if( !fPropertyTable) + return( false); + +#ifdef IOREGSPLITTABLES + fRegistryTable = OSDictionary::withCapacity( kIORegCapacityIncrement ); + if( fRegistryTable) + fRegistryTable->setCapacityIncrement( kIORegCapacityIncrement ); + + if( (prop = OSDynamicCast( OSString, getProperty( gIONameKey)))) { + OSSymbol * sym = (OSSymbol *)OSSymbol::withString( prop); + // ok for OSSymbol too + setName( sym); + sym->release(); + } + +#endif /* IOREGSPLITTABLES */ + + return( true); +} + +bool IORegistryEntry::init( IORegistryEntry * old, + const IORegistryPlane * plane ) +{ + OSArray * all; + IORegistryEntry * next; + unsigned int index; + + if( !super::init()) + return( false); + + WLOCK; + + fPropertyTable = old->getPropertyTable(); + old->fPropertyTable = 0; +#ifdef IOREGSPLITTABLES + fRegistryTable = old->fRegistryTable; + old->fRegistryTable = 0; +#endif /* IOREGSPLITTABLES */ + + all = getParentSetReference( plane ); + if( all) for( index = 0; + (next = (IORegistryEntry *) all->getObject(index)); + index++ ) { + next->makeLink( this, kChildSetIndex, plane ); + next->breakLink( old, kChildSetIndex, plane ); + } + + all = getChildSetReference( plane ); + if( all) for( index = 0; + (next = (IORegistryEntry *) all->getObject(index)); + index++ ) { + next->makeLink( this, kParentSetIndex, plane ); + next->breakLink( old, kParentSetIndex, plane ); + } + + UNLOCK; + + return( true ); +} + +void IORegistryEntry::free( void ) +{ + +#ifdef DEBUG + OSArray * links; + const OSSymbol * key; + const IORegistryPlane * plane; + OSCollectionIterator * iter; + + if( registryTable()) { + iter = OSCollectionIterator::withCollection( gIORegistryPlanes ); + if( iter) { + while( (key = (const OSSymbol *) iter->getNextObject())) { + if( 0 == (plane = (const IORegistryPlane *) + OSDynamicCast( IORegistryPlane, + gIORegistryPlanes->getObject( key )))) + continue; + if( (links = getParentSetReference( plane )) + || (links = getChildSetReference( plane )) ) { + + IOLog("%s: Still attached in %s at free()\n", + getName(), plane->nameKey->getCStringNoCopy()); + } + } + iter->release(); + } + } +#endif + + if( getPropertyTable()) + getPropertyTable()->release(); + +#ifdef IOREGSPLITTABLES + if( registryTable()) + registryTable()->release(); +#endif /* IOREGSPLITTABLES */ + + super::free(); +} + +void IORegistryEntry::setPropertyTable( OSDictionary * dict ) +{ + if( fPropertyTable) + fPropertyTable->release(); + if( dict) + dict->retain(); + fPropertyTable = dict; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Wrappers to synchronize property table */ + +#define wrap1(func,type,constant) \ +OSObject * \ +IORegistryEntry::func ## Property( type * aKey) constant \ +{ \ + OSObject * obj; \ + \ + PLOCK; \ + obj = getPropertyTable()->func ## Object( aKey ); \ + PUNLOCK; \ + \ + return( obj ); \ +} + +#define wrap2(type,constant) \ +OSObject * \ +IORegistryEntry::copyProperty( type * aKey) constant \ +{ \ + OSObject * obj; \ + \ + PLOCK; \ + obj = getPropertyTable()->getObject( aKey ); \ + if( obj) \ + obj->retain(); \ + PUNLOCK; \ + \ + return( obj ); \ +} + +#define wrap3(func,type,constant) \ +void \ +IORegistryEntry::func ## Property( type * aKey) constant \ +{ \ + PLOCK; \ + getPropertyTable()->func ## Object( aKey ); \ + PUNLOCK; \ +} + +#define wrap4(type,constant) \ +OSObject * \ +IORegistryEntry::getProperty( type * aKey, \ + const IORegistryPlane * plane, \ + IOOptionBits options ) constant \ +{ \ + OSObject * obj = getProperty( aKey ); \ + \ + if ( (0 == obj) && (options & kIORegistryIterateRecursively) ) { \ + IORegistryEntry * entry = (IORegistryEntry *) this; \ + IORegistryIterator * iter; \ + iter = IORegistryIterator::iterateOver( entry, plane, options ); \ + \ + while ( (0 == obj) && (entry = iter->getNextObject()) ) { \ + obj = entry->getProperty( aKey ); \ + } \ + iter->release(); \ + } \ + \ + return( obj ); \ +} + +bool IORegistryEntry::serializeProperties( OSSerialize * s ) const +{ + bool ok; + +// setProperty( getRetainCount(), 32, "__retain" ); + + PLOCK; + ok = getPropertyTable()->serialize( s ); + PUNLOCK; + + return( ok ); +} + +OSDictionary * IORegistryEntry::dictionaryWithProperties( void ) const +{ + OSDictionary * dict; + + PLOCK; + dict = OSDictionary::withDictionary( getPropertyTable(), + getPropertyTable()->getCapacity() ); + PUNLOCK; + + return( dict ); +} + +IOReturn IORegistryEntry::setProperties( OSObject * properties ) +{ + return( kIOReturnUnsupported ); +} + +wrap1(get, const OSSymbol, const) // getProperty() definition +wrap1(get, const OSString, const) // getProperty() definition +wrap1(get, const char, const) // getProperty() definition + +wrap2(const OSSymbol, const) // copyProperty() definition +wrap2(const OSString, const) // copyProperty() definition +wrap2(const char, const) // copyProperty() definition + +wrap3(remove, const OSSymbol,) // removeProperty() definition +wrap3(remove, const OSString,) // removeProperty() definition +wrap3(remove, const char,) // removeProperty() definition + +wrap4(const OSSymbol, const) // getProperty() w/plane definition +wrap4(const OSString, const) // getProperty() w/plane definition +wrap4(const char, const) // getProperty() w/plane definition + +bool +IORegistryEntry::setProperty( const OSSymbol * aKey, OSObject * anObject) +{ + bool ret = false; + PLOCK; + ret = getPropertyTable()->setObject( aKey, anObject ); + PUNLOCK; + + return ret; +} + +bool +IORegistryEntry::setProperty( const OSString * aKey, OSObject * anObject) +{ + bool ret = false; + PLOCK; + ret = getPropertyTable()->setObject( aKey, anObject ); + PUNLOCK; + + return ret; +} + +bool +IORegistryEntry::setProperty( const char * aKey, OSObject * anObject) +{ + bool ret = false; + PLOCK; + ret = getPropertyTable()->setObject( aKey, anObject ); + PUNLOCK; + + return ret; +} + +bool +IORegistryEntry::setProperty(const char * aKey, const char * aString) +{ + bool ret = false; + OSSymbol * aSymbol = (OSSymbol *) OSSymbol::withCString( aString ); + + if( aSymbol) { + PLOCK; + ret = getPropertyTable()->setObject( aKey, aSymbol ); + PUNLOCK; + aSymbol->release(); + } + return( ret ); +} + +bool +IORegistryEntry::setProperty(const char * aKey, bool aBoolean) +{ + bool ret = false; + OSBoolean * aBooleanObj = OSBoolean::withBoolean( aBoolean ); + + if( aBooleanObj) { + PLOCK; + ret = getPropertyTable()->setObject( aKey, aBooleanObj ); + PUNLOCK; + aBooleanObj->release(); + } + return( ret ); +} + +bool +IORegistryEntry::setProperty( const char * aKey, + unsigned long long aValue, + unsigned int aNumberOfBits) +{ + bool ret = false; + OSNumber * anOffset = OSNumber::withNumber( aValue, aNumberOfBits ); + + if( anOffset) { + PLOCK; + ret = getPropertyTable()->setObject( aKey, anOffset ); + PUNLOCK; + anOffset->release(); + } + return( ret ); +} + +bool +IORegistryEntry::setProperty( const char * aKey, + void * bytes, + unsigned int length) +{ + bool ret = false; + OSData * data = OSData::withBytes( bytes, length ); + + if( data) { + PLOCK; + ret = getPropertyTable()->setObject( aKey, data ); + PUNLOCK; + data->release(); + } + return( ret ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* Name, location, paths */ + +const char * IORegistryEntry::getName( const IORegistryPlane * plane = 0 ) const +{ + OSSymbol * sym = 0; + + RLOCK; + if( plane) + sym = (OSSymbol *) registryTable()->getObject( plane->pathNameKey ); + if( !sym) + sym = (OSSymbol *) registryTable()->getObject( gIONameKey ); + UNLOCK; + + if( sym) + return( sym->getCStringNoCopy()); + else + return( (getMetaClass())->getClassName()); +} + +const OSSymbol * IORegistryEntry::copyName( + const IORegistryPlane * plane = 0 ) const +{ + OSSymbol * sym = 0; + + RLOCK; + if( plane) + sym = (OSSymbol *) registryTable()->getObject( plane->pathNameKey ); + if( !sym) + sym = (OSSymbol *) registryTable()->getObject( gIONameKey ); + if( sym) + sym->retain(); + UNLOCK; + + if( sym) + return( sym ); + else + return( OSSymbol::withCString((getMetaClass())->getClassName()) ); +} + +const OSSymbol * IORegistryEntry::copyLocation( + const IORegistryPlane * plane = 0 ) const +{ + OSSymbol * sym = 0; + + RLOCK; + if( plane) + sym = (OSSymbol *) registryTable()->getObject( plane->pathLocationKey ); + if( !sym) + sym = (OSSymbol *) registryTable()->getObject( gIOLocationKey ); + if( sym) + sym->retain(); + UNLOCK; + + return( sym ); +} + +const char * IORegistryEntry::getLocation( const IORegistryPlane * plane = 0 ) const +{ + const OSSymbol * sym = copyLocation( plane ); + const char * result = 0; + + if( sym) { + result = sym->getCStringNoCopy(); + sym->release(); + } + + return( result ); +} + +void IORegistryEntry::setName( const OSSymbol * name, + const IORegistryPlane * plane = 0 ) +{ + const OSSymbol * key; + + if( name) { + if( plane) + key = plane->pathNameKey; + else + key = gIONameKey; + + WLOCK; + registryTable()->setObject( key, (OSObject *) name); + UNLOCK; + } +} + +void IORegistryEntry::setName( const char * name, + const IORegistryPlane * plane = 0 ) +{ + OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( name ); + if ( sym ) { + setName( sym, plane ); + sym->release(); + } +} + +void IORegistryEntry::setLocation( const OSSymbol * location, + const IORegistryPlane * plane = 0 ) +{ + const OSSymbol * key; + + if( location) { + if( plane) + key = plane->pathLocationKey; + else + key = gIOLocationKey; + + WLOCK; + registryTable()->setObject( key, (OSObject *) location); + UNLOCK; + } +} + +void IORegistryEntry::setLocation( const char * location, + const IORegistryPlane * plane = 0 ) +{ + OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( location ); + if ( sym ) { + setLocation( sym, plane ); + sym->release(); + } +} + +bool +IORegistryEntry::compareName( OSString * name, OSString ** matched = 0 ) const +{ + const OSSymbol * sym = copyName(); + bool isEqual; + + isEqual = sym->isEqualTo( name ); + + if( isEqual && matched) { + name->retain(); + *matched = name; + } + + if( sym) + sym->release(); + + return( isEqual ); +} + +bool +IORegistryEntry::compareNames( OSObject * names, OSString ** matched = 0 ) const +{ + OSString * string; + OSCollection * collection; + OSIterator * iter = 0; + bool result = false; + + if( (collection = OSDynamicCast( OSCollection, names))) { + iter = OSCollectionIterator::withCollection( collection ); + string = 0; + } else + string = OSDynamicCast( OSString, names); + + do { + if( string) + result = compareName( string, matched ); + + } while( (false == result) + && iter && (string = OSDynamicCast( OSString, iter->getNextObject()))); + + if( iter) + iter->release(); + + return( result); +} + + +bool IORegistryEntry::getPath( char * path, int * length, + const IORegistryPlane * plane ) const +{ + OSArray * stack; + IORegistryEntry * root; + const IORegistryEntry * entry; + IORegistryEntry * parent; + const OSSymbol * alias; + int index; + int len, maxLength, compLen; + char * nextComp; + bool ok; + + if( !path || !length || !plane) + return( false); + + len = 0; + maxLength = *length - 2; + nextComp = path; + + len = plane->nameKey->getLength(); + if( len >= maxLength) + return( false); + strcpy( nextComp, plane->nameKey->getCStringNoCopy()); + nextComp[ len++ ] = ':'; + nextComp += len; + + if( (alias = hasAlias( plane ))) { + len += alias->getLength(); + ok = (maxLength > len); + *length = len; + if( ok) + strcpy( nextComp, alias->getCStringNoCopy()); + return( ok ); + } + + entry = this; + parent = entry->getParentEntry( plane ); + if( !parent) + // Error if not attached in plane + return( false); + + stack = OSArray::withCapacity( getDepth( plane )); + if( !stack) + return( false); + + RLOCK; + + root = gRegistryRoot->getChildEntry( plane ); + while( parent && (entry != root)) { + // stop below root + stack->setObject( (OSObject *) entry ); + entry = parent; + parent = entry->getParentEntry( plane ); + } + + index = stack->getCount(); + ok = true; + + if( 0 == index) { + + *nextComp++ = '/'; + *nextComp = 0; + len++; + + } else while( ok && ((--index) >= 0)) { + + entry = (IORegistryEntry *) stack->getObject((unsigned int) index ); + assert( entry ); + + if( (alias = entry->hasAlias( plane ))) { + len = plane->nameKey->getLength() + 1; + nextComp = path + len; + + compLen = alias->getLength(); + ok = (maxLength > len + compLen); + if( ok) + strcpy( nextComp, alias->getCStringNoCopy()); + } else { + compLen = maxLength - len; + ok = entry->getPathComponent( nextComp + 1, &compLen, plane ); + + if( ok && compLen) { + compLen++; + *nextComp = '/'; + } + } + + if( ok) { + len += compLen; + nextComp += compLen; + } + } + *length = len; + + UNLOCK; + + stack->release(); + + return( ok ); +} + +bool IORegistryEntry::getPathComponent( char * path, int * length, + const IORegistryPlane * plane ) const +{ + int len, locLen, maxLength; + const char * compName; + const char * loc; + bool ok; + + maxLength = *length; + + compName = getName( plane ); + len = strlen( compName ); + if( (loc = getLocation( plane ))) + locLen = 1 + strlen( loc ); + else + locLen = 0; + + ok = ((len + locLen) < maxLength); + if( ok) { + strcpy( path, compName ); + if( loc) { + path += len; + len += locLen; + *path++ = '@'; + strcpy( path, loc ); + } + *length = len; + } + + return( ok ); +} + +const char * IORegistryEntry::matchPathLocation( const char * cmp, + const IORegistryPlane * plane ) +{ + const char * str; + const char * result = 0; + int num1, num2; + char c1, c2; + + str = getLocation( plane ); + if( str) { + c2 = str[0]; + do { + num1 = strtoul( cmp, (char **) &cmp, 16 ); + if( c2) { + num2 = strtoul( str, (char **) &str, 16 ); + c2 = str[0]; + } else + num2 = 0; + + if( num1 != num2) + break; + + c1 = *cmp++; + + if( (c2 == ':') && (c2 == c1)) { + str++; + continue; + } + + if( ',' != c1) { + result = cmp - 1; + break; + } + + if( c2) { + if( c2 != ',') + break; + str++; + } + + } while( true); + } + + return( result ); +} + +IORegistryEntry * IORegistryEntry::getChildFromComponent( const char ** opath, + const IORegistryPlane * plane ) +{ + IORegistryEntry * entry = 0; + OSArray * set; + unsigned int index; + const char * path; + const char * cmp = 0; + size_t len; + const char * str; + + set = getChildSetReference( plane ); + if( set) { + + path = *opath; + + for( index = 0; + (entry = (IORegistryEntry *) set->getObject(index)); + index++ ) { + + cmp = path; + + if( *cmp != '@') { + str = entry->getName( plane ); + len = strlen( str ); + if( strncmp( str, cmp, len )) + continue; + cmp += len; + if( *cmp != '@' ) + break; + } + cmp++; + if( (cmp = entry->matchPathLocation( cmp, plane ))) + break; + } + if( entry) + *opath = cmp; + } + + return( entry ); +} + +const OSSymbol * IORegistryEntry::hasAlias( const IORegistryPlane * plane, + char * opath = 0, int * length = 0 ) const +{ + IORegistryEntry * entry; + IORegistryEntry * entry2; + const OSSymbol * key; + const OSSymbol * bestKey = 0; + OSIterator * iter; + OSData * data; + const char * path = "/aliases"; + + entry = IORegistryEntry::fromPath( path, plane ); + if( entry) { + RLOCK; + if( (iter = OSCollectionIterator::withCollection( + entry->getPropertyTable() ))) { + + while( (key = (OSSymbol *) iter->getNextObject())) { + + data = (OSData *) entry->getProperty( key ); + path = (const char *) data->getBytesNoCopy(); + if( (entry2 = IORegistryEntry::fromPath( path, plane, + opath, length ))) { + if( this == entry2) { + if( !bestKey + || (bestKey->getLength() > key->getLength())) + // pick the smallest alias + bestKey = key; + } + entry2->release(); + } + } + iter->release(); + } + entry->release(); + UNLOCK; + } + return( bestKey ); +} + +const char * IORegistryEntry::dealiasPath( + const char ** opath, + const IORegistryPlane * plane ) +{ + IORegistryEntry * entry; + OSData * data; + const char * path = *opath; + const char * rpath = 0; + const char * end; + char c; + char temp[ kIOMaxPlaneName + 1 ]; + + if( path[0] == '/') + return( rpath ); + + // check for alias + end = path; + while( (c = *end++) && (c != '/') && (c != ':')) + {} + end--; + if( (end - path) < kIOMaxPlaneName) { + strncpy( temp, path, end - path ); + temp[ end - path ] = 0; + + RLOCK; + entry = IORegistryEntry::fromPath( "/aliases", plane ); + if( entry) { + data = (OSData *) entry->getProperty( temp ); + if( data ) { + rpath = (const char *) data->getBytesNoCopy(); + if( rpath) + *opath = end; + } + entry->release(); + } + UNLOCK; + } + + return( rpath ); +} + +IORegistryEntry * IORegistryEntry::fromPath( + const char * path, + const IORegistryPlane * plane = 0, + char * opath = 0, + int * length = 0, + IORegistryEntry * fromEntry = 0 ) +{ + IORegistryEntry * where = 0; + IORegistryEntry * next; + const char * alias; + const char * end; + int len = 0; + int len2; + char c; + char temp[ kIOMaxPlaneName + 1 ]; + + if( 0 == path) + return( 0 ); + + if( 0 == plane) { + // get plane name + end = strchr( path, ':' ); + if( end && ((end - path) < kIOMaxPlaneName)) { + strncpy( temp, path, end - path ); + temp[ end - path ] = 0; + plane = getPlane( temp ); + path = end + 1; + } + } + if( 0 == plane) + return( 0 ); + + // check for alias + end = path; + if( (alias = dealiasPath( &end, plane))) { + if( length) + len = *length; + where = IORegistryEntry::fromPath( alias, plane, + opath, &len, fromEntry ); + if( where) + path = end; + else + len = 0; + } + + RLOCK; + + do { + if( 0 == where) { + if( (0 == fromEntry) && (*path++ == '/')) + fromEntry = gRegistryRoot->getChildEntry( plane ); + where = fromEntry; + if( 0 == where) + break; + } else { + c = *path++; + if( c != '/') { + if( c && (c != ':')) // check valid terminator + where = 0; + break; + } + } + next = where->getChildFromComponent( &path, plane ); + if( next) + where = next; + } while( next ); + + if( where) { + // check residual path + if( where != fromEntry) + path--; + + if( opath && length) { + // copy out residual path + len2 = len + strlen( path ); + if( len2 < *length) + strcpy( opath + len, path ); + *length = len2; + + } else if( path[0]) + // no residual path => must be no tail for success + where = 0; + } + + if( where) + where->retain(); + + UNLOCK; + + return( where ); +} + +IORegistryEntry * IORegistryEntry::childFromPath( + const char * path, + const IORegistryPlane * plane = 0, + char * opath = 0, + int * len = 0 ) +{ + return( IORegistryEntry::fromPath( path, plane, opath, len, this )); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define IOLinkIterator OSCollectionIterator + +#undef super +#define super OSObject + +inline bool IORegistryEntry::arrayMember( OSArray * set, + const IORegistryEntry * member, + unsigned int * index = 0 ) const +{ + int i; + OSObject * probeObject; + + for( i = 0; (probeObject = set->getObject(i)); i++) { + if (probeObject == (OSObject *) member) { + if( index) + *index = i; + return( true ); + } + } + return( false ); +} + +bool IORegistryEntry::makeLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const +{ + OSArray * links; + bool result = false; + + if( (links = (OSArray *) + registryTable()->getObject( plane->keys[ relation ] ))) { + + result = arrayMember( links, to ); + if( !result) + result = links->setObject( to ); + + } else { + + links = OSArray::withObjects( & (const OSObject *) to, 1, 1 ); + result = (links != 0); + if( result) { + result = registryTable()->setObject( plane->keys[ relation ], + links ); + links->release(); + } + } + + return( result); +} + +void IORegistryEntry::breakLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const +{ + OSArray * links; + unsigned int index; + + if( (links = (OSArray *) + registryTable()->getObject( plane->keys[ relation ]))) { + + if( arrayMember( links, to, &index )) { + links->removeObject( index ); + if( 0 == links->getCount()) + registryTable()->removeObject( plane->keys[ relation ]); + } + } +} + + +OSArray * IORegistryEntry::getParentSetReference( + const IORegistryPlane * plane ) const +{ + if( plane) + return( (OSArray *) registryTable()->getObject( + plane->keys[ kParentSetIndex ])); + else + return( 0 ); +} + +OSIterator * IORegistryEntry::getParentIterator( + const IORegistryPlane * plane ) const +{ + OSArray * links; + OSIterator * iter; + + if( !plane) + return( 0 ); + + RLOCK; + links = getParentSetReference( plane ); + if( 0 == links) + links = OSArray::withCapacity( 1 ); + else + links = OSArray::withArray( links, links->getCount() ); + UNLOCK; + + iter = IOLinkIterator::withCollection( links ); + + if( links) + links->release(); + + return( iter ); +} + +IORegistryEntry * IORegistryEntry::getParentEntry( const IORegistryPlane * plane ) const +{ + IORegistryEntry * entry = 0; + OSArray * links; + + RLOCK; + + if( (links = getParentSetReference( plane ))) + entry = (IORegistryEntry *) links->getObject( 0 ); + + UNLOCK; + + return( entry); +} + +OSArray * IORegistryEntry::getChildSetReference( const IORegistryPlane * plane ) const +{ + if( plane) + return( (OSArray *) registryTable()->getObject( + plane->keys[ kChildSetIndex ])); + else + return( 0 ); +} + +OSIterator * IORegistryEntry::getChildIterator( const IORegistryPlane * plane ) const +{ + OSArray * links; + OSIterator * iter; + + if( !plane) + return( 0 ); + + RLOCK; + links = getChildSetReference( plane ); + if( 0 == links) + links = OSArray::withCapacity( 1 ); + else + links = OSArray::withArray( links, links->getCount() ); + UNLOCK; + + iter = IOLinkIterator::withCollection( links ); + + if( links) + links->release(); + + return( iter ); +} + + +IORegistryEntry * IORegistryEntry::getChildEntry( + const IORegistryPlane * plane ) const +{ + IORegistryEntry * entry = 0; + OSArray * links; + + RLOCK; + + if( (links = getChildSetReference( plane ))) + entry = (IORegistryEntry *) links->getObject( 0 ); + + UNLOCK; + + return( entry); +} + +void IORegistryEntry::applyToChildren( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const +{ + OSArray * array; + unsigned int index; + IORegistryEntry * next; + + if( !plane) + return; + + RLOCK; + array = OSArray::withArray( getChildSetReference( plane )); + UNLOCK; + if( array) { + for( index = 0; + (next = (IORegistryEntry *) array->getObject( index )); + index++) + (*applier)(next, context); + array->release(); + } +} + +void IORegistryEntry::applyToParents( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const +{ + OSArray * array; + unsigned int index; + IORegistryEntry * next; + + if( !plane) + return; + + RLOCK; + array = OSArray::withArray( getParentSetReference( plane )); + UNLOCK; + if( array) { + for( index = 0; + (next = (IORegistryEntry *) array->getObject( index )); + index++) + (*applier)(next, context); + array->release(); + } +} + +bool IORegistryEntry::isChild( IORegistryEntry * child, + const IORegistryPlane * plane, + bool onlyChild = false ) const +{ + OSArray * links; + bool ret = false; + + RLOCK; + + if( (links = getChildSetReference( plane ))) { + if( (!onlyChild) || (1 == links->getCount())) + ret = arrayMember( links, child ); + } + if( ret && (links = child->getParentSetReference( plane ))) + ret = arrayMember( links, this ); + + UNLOCK; + + return( ret); +} + +bool IORegistryEntry::isParent( IORegistryEntry * parent, + const IORegistryPlane * plane, + bool onlyParent = false ) const + +{ + OSArray * links; + bool ret = false; + + RLOCK; + + if( (links = getParentSetReference( plane ))) { + if( (!onlyParent) || (1 == links->getCount())) + ret = arrayMember( links, parent ); + } + if( ret && (links = parent->getChildSetReference( plane ))) + ret = arrayMember( links, this ); + + UNLOCK; + + return( ret); +} + +bool IORegistryEntry::inPlane( const IORegistryPlane * plane ) const +{ + bool ret; + + RLOCK; + + ret = (0 != getParentSetReference( plane )); + + UNLOCK; + + return( ret ); +} + +bool IORegistryEntry::attachToParent( IORegistryEntry * parent, + const IORegistryPlane * plane ) +{ + OSArray * links; + bool ret; + bool needParent; + + if( this == parent) + return( false ); + + WLOCK; + + ret = makeLink( parent, kParentSetIndex, plane ); + + if( (links = parent->getChildSetReference( plane ))) + needParent = (false == arrayMember( links, this )); + else + needParent = true; + +// ret &= parent->makeLink( this, kChildSetIndex, plane ); + + UNLOCK; + + if( needParent) + ret &= parent->attachToChild( this, plane ); + + return( ret ); +} + +bool IORegistryEntry::attachToChild( IORegistryEntry * child, + const IORegistryPlane * plane ) +{ + OSArray * links; + bool ret; + bool needChild; + + if( this == child) + return( false ); + + WLOCK; + + ret = makeLink( child, kChildSetIndex, plane ); + + if( (links = child->getParentSetReference( plane ))) + needChild = (false == arrayMember( links, this )); + else + needChild = true; + + UNLOCK; + + if( needChild) + ret &= child->attachToParent( this, plane ); + + return( ret ); +} + +void IORegistryEntry::detachFromParent( IORegistryEntry * parent, + const IORegistryPlane * plane ) +{ + OSArray * links; + bool needParent; + + WLOCK; + + parent->retain(); + + breakLink( parent, kParentSetIndex, plane ); + + if( (links = parent->getChildSetReference( plane ))) + needParent = arrayMember( links, this ); + else + needParent = false; + +// parent->breakLink( this, kChildSetIndex, plane ); + + UNLOCK; + + if( needParent) + parent->detachFromChild( this, plane ); + + parent->release(); +} + +void IORegistryEntry::detachFromChild( IORegistryEntry * child, + const IORegistryPlane * plane ) +{ + OSArray * links; + bool needChild; + + WLOCK; + + child->retain(); + + breakLink( child, kChildSetIndex, plane ); + + if( (links = child->getParentSetReference( plane ))) + needChild = arrayMember( links, this ); + else + needChild = false; + + UNLOCK; + + if( needChild) + child->detachFromParent( this, plane ); + + child->release(); +} + +void IORegistryEntry::detachAbove( const IORegistryPlane * plane ) +{ + IORegistryEntry * parent; + + retain(); + while( (parent = getParentEntry( plane ))) + detachFromParent( parent, plane ); + release(); +} + +void IORegistryEntry::detachAll( const IORegistryPlane * plane ) +{ + OSOrderedSet * all; + IORegistryEntry * next; + IORegistryIterator * regIter; + + regIter = IORegistryIterator::iterateOver( this, plane, true ); + if( 0 == regIter) + return; + all = regIter->iterateAll(); + regIter->release(); + + detachAbove( plane ); + if( all) { + while( (next = (IORegistryEntry *) all->getLastObject())) { + + next->retain(); + all->removeObject(next); + + next->detachAbove( plane ); + next->release(); + } + all->release(); + } +} + +unsigned int IORegistryEntry::getDepth( const IORegistryPlane * plane ) const +{ + unsigned int depth = 1; + OSArray * parents; + unsigned int oneDepth, maxParentDepth, count; + IORegistryEntry * one; + const IORegistryEntry * next; + unsigned int index; + + RLOCK; + + next = this; + while( (parents = next->getParentSetReference( plane ))) { + + count = parents->getCount(); + if( 0 == count) + break; + if( 1 == count) { + depth++; + next = (IORegistryEntry *) parents->getObject( 0 ); + } else { + // painful + maxParentDepth = 0; + for( index = 0; + (one = (IORegistryEntry *) parents->getObject( index )); + index++ ) { + oneDepth = one->getDepth( plane ); + if( oneDepth > maxParentDepth) + maxParentDepth = oneDepth; + } + depth += maxParentDepth; + break; + } + } + + UNLOCK; + + return( depth); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super OSIterator + +OSDefineMetaClassAndStructors(IORegistryIterator, OSIterator) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +IORegistryIterator * +IORegistryIterator::iterateOver( IORegistryEntry * root, + const IORegistryPlane * plane, + IOOptionBits options = 0 ) +{ + IORegistryIterator * create; + + if( 0 == root) + return( 0); + if( 0 == plane) + return( 0); + + create = new IORegistryIterator; + if( create) { + if( create->init()) { + + root->retain(); + create->root = root; + create->where = &create->start; + create->start.current = root; + create->plane = plane; + create->options = options; + + } else { + create->release(); + create = 0; + } + } + return( create); +} + +IORegistryIterator * +IORegistryIterator::iterateOver( const IORegistryPlane * plane, + IOOptionBits options = 0 ) +{ + return( iterateOver( gRegistryRoot, plane, options )); +} + +bool IORegistryIterator::isValid( void ) +{ + bool ok; + IORegCursor * next; + + ok = true; + next = where; + + RLOCK; + while( ok && next) { + if( where->iter) + ok = where->iter->isValid(); + next = next->next; + } + UNLOCK; + + return( ok); +} + +void IORegistryIterator::enterEntry( const IORegistryPlane * enterPlane ) +{ + IORegCursor * prev; + + prev = where; + where = (IORegCursor *) IOMalloc( sizeof( IORegCursor)); + assert( where); + + if( where) { + where->iter = 0; + where->next = prev; + where->current = prev->current; + plane = enterPlane; + } +} + +void IORegistryIterator::enterEntry( void ) +{ + enterEntry( plane ); +} + +bool IORegistryIterator::exitEntry( void ) +{ + IORegCursor * gone; + + if( where->iter) { + where->iter->release(); + where->iter = 0; + if( where->current)// && (where != &start)) + where->current->release(); + } + + if( where != &start) { + gone = where; + where = gone->next; + IOFree( gone, sizeof( IORegCursor)); + return( true); + + } else + return( false); +} + +void IORegistryIterator::reset( void ) +{ + while( exitEntry()) + {} + + if( done) { + done->release(); + done = 0; + } + + where->current = root; +} + +void IORegistryIterator::free( void ) +{ + reset(); + + if( root) + root->release(); + + super::free(); +} + + +IORegistryEntry * IORegistryIterator::getNextObjectFlat( void ) +{ + IORegistryEntry * next = 0; + OSArray * links = 0; + + RLOCK; + + if( (0 == where->iter)) { + // just entered - create new iter + if( isValid() + && where->current + && (links = ( (options & kIORegistryIterateParents) ? + where->current->getParentSetReference( plane ) : + where->current->getChildSetReference( plane ) )) ) + + where->iter = OSCollectionIterator::withCollection( links ); + + } else + // next sibling - release current + if( where->current) + where->current->release(); + + if( where->iter) + next = (IORegistryEntry *) where->iter->getNextObject(); + + if( next) + next->retain(); + + where->current = next; + + UNLOCK; + + return( next); +} + +IORegistryEntry * IORegistryIterator::getNextObjectRecursive( void ) +{ + IORegistryEntry * next; + + do + next = getNextObjectFlat(); + while( (0 == next) && exitEntry()); + + if( next) { + if( 0 == done) + done = OSOrderedSet::withCapacity( 10 ); + if( done->setObject((OSObject *) next)) { + // done set didn't contain this one, so recurse + enterEntry(); + } + } + return( next); +} + +IORegistryEntry * IORegistryIterator::getNextObject( void ) +{ + if( options & kIORegistryIterateRecursively) + return( getNextObjectRecursive()); + else + return( getNextObjectFlat()); +} + +IORegistryEntry * IORegistryIterator::getCurrentEntry( void ) +{ + if( isValid()) + return( where->current); + else + return( 0); +} + +OSOrderedSet * IORegistryIterator::iterateAll( void ) +{ + reset(); + while( getNextObjectRecursive()) + {} + if( done) + done->retain(); + return( done); +} + +OSMetaClassDefineReservedUnused(IORegistryEntry, 0); +OSMetaClassDefineReservedUnused(IORegistryEntry, 1); +OSMetaClassDefineReservedUnused(IORegistryEntry, 2); +OSMetaClassDefineReservedUnused(IORegistryEntry, 3); +OSMetaClassDefineReservedUnused(IORegistryEntry, 4); +OSMetaClassDefineReservedUnused(IORegistryEntry, 5); +OSMetaClassDefineReservedUnused(IORegistryEntry, 6); +OSMetaClassDefineReservedUnused(IORegistryEntry, 7); +OSMetaClassDefineReservedUnused(IORegistryEntry, 8); +OSMetaClassDefineReservedUnused(IORegistryEntry, 9); +OSMetaClassDefineReservedUnused(IORegistryEntry, 10); +OSMetaClassDefineReservedUnused(IORegistryEntry, 11); +OSMetaClassDefineReservedUnused(IORegistryEntry, 12); +OSMetaClassDefineReservedUnused(IORegistryEntry, 13); +OSMetaClassDefineReservedUnused(IORegistryEntry, 14); +OSMetaClassDefineReservedUnused(IORegistryEntry, 15); +OSMetaClassDefineReservedUnused(IORegistryEntry, 16); +OSMetaClassDefineReservedUnused(IORegistryEntry, 17); +OSMetaClassDefineReservedUnused(IORegistryEntry, 18); +OSMetaClassDefineReservedUnused(IORegistryEntry, 19); +OSMetaClassDefineReservedUnused(IORegistryEntry, 20); +OSMetaClassDefineReservedUnused(IORegistryEntry, 21); +OSMetaClassDefineReservedUnused(IORegistryEntry, 22); +OSMetaClassDefineReservedUnused(IORegistryEntry, 23); +OSMetaClassDefineReservedUnused(IORegistryEntry, 24); +OSMetaClassDefineReservedUnused(IORegistryEntry, 25); +OSMetaClassDefineReservedUnused(IORegistryEntry, 26); +OSMetaClassDefineReservedUnused(IORegistryEntry, 27); +OSMetaClassDefineReservedUnused(IORegistryEntry, 28); +OSMetaClassDefineReservedUnused(IORegistryEntry, 29); +OSMetaClassDefineReservedUnused(IORegistryEntry, 30); +OSMetaClassDefineReservedUnused(IORegistryEntry, 31); diff --git a/iokit/Kernel/IOService.cpp b/iokit/Kernel/IOService.cpp new file mode 100644 index 000000000..51a544089 --- /dev/null +++ b/iokit/Kernel/IOService.cpp @@ -0,0 +1,3865 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1991-1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 29-Jan-91 Portions from IODevice.m, Doug Mitchell at NeXT, Created. + * 18-Jun-98 start IOKit objc + * 10-Nov-98 start iokit cpp + * 25-Feb-99 sdouglas, add threads and locks to ensure deadlock + * + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#define LESS_THREAD_CREATE +//#define LOG kprintf +#define LOG IOLog + +#include "IOServicePrivate.h" +#include + +#include + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define super IORegistryEntry + +OSDefineMetaClassAndStructors(IOService, IORegistryEntry) + +OSDefineMetaClassAndStructors(_IOServiceNotifier, IONotifier) + +OSDefineMetaClassAndStructors(_IOServiceInterestNotifier, IONotifier) + +OSDefineMetaClassAndStructors(_IOConfigThread, OSObject) + +OSDefineMetaClassAndStructors(_IOServiceJob, OSObject) + +OSDefineMetaClassAndStructors(IOResources, IOService) + +OSDefineMetaClassAndStructors(_IOOpenServiceIterator, OSIterator) + +OSDefineMetaClassAndAbstractStructors(IONotifier, OSObject) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +static IOPlatformExpert * gIOPlatform; +const IORegistryPlane * gIOServicePlane; +const IORegistryPlane * gIOPowerPlane; +const OSSymbol * gIODeviceMemoryKey; +const OSSymbol * gIOInterruptControllersKey; +const OSSymbol * gIOInterruptSpecifiersKey; + +const OSSymbol * gIOResourcesKey; +const OSSymbol * gIOResourceMatchKey; +const OSSymbol * gIOProviderClassKey; +const OSSymbol * gIONameMatchKey; +const OSSymbol * gIONameMatchedKey; +const OSSymbol * gIOPropertyMatchKey; +const OSSymbol * gIOLocationMatchKey; +const OSSymbol * gIOPathMatchKey; +const OSSymbol * gIOMatchCategoryKey; +const OSSymbol * gIODefaultMatchCategoryKey; +const OSSymbol * gIOMatchedServiceCountKey; + +const OSSymbol * gIOUserClientClassKey; +const OSSymbol * gIOKitDebugKey; + +const OSSymbol * gIOCommandPoolSizeKey; + +static int gIOResourceGenerationCount; + +const OSSymbol * gIOServiceKey; +const OSSymbol * gIOPublishNotification; +const OSSymbol * gIOFirstPublishNotification; +const OSSymbol * gIOMatchedNotification; +const OSSymbol * gIOFirstMatchNotification; +const OSSymbol * gIOTerminatedNotification; + +const OSSymbol * gIOGeneralInterest; +const OSSymbol * gIOBusyInterest; +const OSSymbol * gIOAppPowerStateInterest; + +static OSDictionary * gNotifications; +static IORecursiveLock * gNotificationLock; + +static IOService * gIOResources; +static IOService * gIOServiceRoot; + +static OSOrderedSet * gJobs; +static semaphore_port_t gJobsSemaphore; +static IOLock * gJobsLock; +static int gOutstandingJobs; +static int gNumConfigThreads; +static int gNumWaitingThreads; +static IOLock * gIOServiceBusyLock; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define LOCKREADNOTIFY() \ + IORecursiveLockLock( gNotificationLock ) +#define LOCKWRITENOTIFY() \ + IORecursiveLockLock( gNotificationLock ) +#define LOCKWRITE2READNOTIFY() +#define UNLOCKNOTIFY() \ + IORecursiveLockUnlock( gNotificationLock ) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +struct ArbitrationLockQueueElement { + queue_chain_t link; + IOThread thread; + IOService * service; + unsigned count; + bool required; + bool aborted; +}; + +static queue_head_t gArbitrationLockQueueActive; +static queue_head_t gArbitrationLockQueueWaiting; +static queue_head_t gArbitrationLockQueueFree; +static IOLock * gArbitrationLockQueueLock; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +void IOService::initialize( void ) +{ + kern_return_t err; + + gIOServicePlane = IORegistryEntry::makePlane( kIOServicePlane ); + gIOPowerPlane = IORegistryEntry::makePlane( kIOPowerPlane ); + + gIOProviderClassKey = OSSymbol::withCStringNoCopy( kIOProviderClassKey ); + gIONameMatchKey = OSSymbol::withCStringNoCopy( kIONameMatchKey ); + gIONameMatchedKey = OSSymbol::withCStringNoCopy( kIONameMatchedKey ); + gIOPropertyMatchKey = OSSymbol::withCStringNoCopy( kIOPropertyMatchKey ); + gIOPathMatchKey = OSSymbol::withCStringNoCopy( kIOPathMatchKey ); + gIOLocationMatchKey = OSSymbol::withCStringNoCopy( kIOLocationMatchKey ); + + gIOMatchCategoryKey = OSSymbol::withCStringNoCopy( kIOMatchCategoryKey ); + gIODefaultMatchCategoryKey = OSSymbol::withCStringNoCopy( + kIODefaultMatchCategoryKey ); + gIOMatchedServiceCountKey = OSSymbol::withCStringNoCopy( + kIOMatchedServiceCountKey ); + + gIOUserClientClassKey = OSSymbol::withCStringNoCopy( kIOUserClientClassKey ); + + gIOResourcesKey = OSSymbol::withCStringNoCopy( kIOResourcesClass ); + gIOResourceMatchKey = OSSymbol::withCStringNoCopy( kIOResourceMatchKey ); + + gIODeviceMemoryKey = OSSymbol::withCStringNoCopy( "IODeviceMemory" ); + gIOInterruptControllersKey + = OSSymbol::withCStringNoCopy("IOInterruptControllers"); + gIOInterruptSpecifiersKey + = OSSymbol::withCStringNoCopy("IOInterruptSpecifiers"); + + gIOKitDebugKey = OSSymbol::withCStringNoCopy( kIOKitDebugKey ); + + gIOCommandPoolSizeKey = OSSymbol::withCStringNoCopy( kIOCommandPoolSizeKey ); + + gIOGeneralInterest = OSSymbol::withCStringNoCopy( kIOGeneralInterest ); + gIOBusyInterest = OSSymbol::withCStringNoCopy( kIOBusyInterest ); + gIOAppPowerStateInterest = OSSymbol::withCStringNoCopy( kIOAppPowerStateInterest ); + + gNotifications = OSDictionary::withCapacity( 1 ); + gIOPublishNotification = OSSymbol::withCStringNoCopy( + kIOPublishNotification ); + gIOFirstPublishNotification = OSSymbol::withCStringNoCopy( + kIOFirstPublishNotification ); + gIOMatchedNotification = OSSymbol::withCStringNoCopy( + kIOMatchedNotification ); + gIOFirstMatchNotification = OSSymbol::withCStringNoCopy( + kIOFirstMatchNotification ); + gIOTerminatedNotification = OSSymbol::withCStringNoCopy( + kIOTerminatedNotification ); + gIOServiceKey = OSSymbol::withCStringNoCopy( kIOServiceClass); + + gNotificationLock = IORecursiveLockAlloc(); + + assert( gIOServicePlane && gIODeviceMemoryKey + && gIOInterruptControllersKey && gIOInterruptSpecifiersKey + && gIOResourcesKey && gNotifications && gNotificationLock + && gIOProviderClassKey && gIONameMatchKey && gIONameMatchedKey + && gIOMatchCategoryKey && gIODefaultMatchCategoryKey + && gIOPublishNotification && gIOMatchedNotification + && gIOTerminatedNotification && gIOServiceKey ); + + gJobsLock = IOLockAlloc(); + gJobs = OSOrderedSet::withCapacity( 10 ); + + gIOServiceBusyLock = IOLockAlloc(); + + err = semaphore_create(kernel_task, &gJobsSemaphore, SYNC_POLICY_FIFO, 0); + + assert( gIOServiceBusyLock && gJobs && gJobsLock && (err == KERN_SUCCESS) ); + + gIOResources = IOResources::resources(); + assert( gIOResources ); + + gArbitrationLockQueueLock = IOLockAlloc(); + queue_init(&gArbitrationLockQueueActive); + queue_init(&gArbitrationLockQueueWaiting); + queue_init(&gArbitrationLockQueueFree); + + assert( gArbitrationLockQueueLock ); + +#ifdef LESS_THREAD_CREATE + for( int i = 0; i < kMaxConfigThreads; i++) + _IOConfigThread::configThread(); +#endif + +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#if IOMATCHDEBUG +static UInt64 getDebugFlags( OSDictionary * props ) +{ + OSNumber * debugProp; + UInt64 debugFlags; + + debugProp = OSDynamicCast( OSNumber, + props->getObject( gIOKitDebugKey )); + if( debugProp) + debugFlags = debugProp->unsigned64BitValue(); + else + debugFlags = gIOKitDebug; + + return( debugFlags ); +} +#endif + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// Probe a matched service and return an instance to be started. +// The default score is from the property table, & may be altered +// during probe to change the start order. + +IOService * IOService::probe( IOService * /* provider */, + SInt32 * /* score */) +{ + return( this ); +} + +bool IOService::start( IOService * provider ) +{ + return( true ); +} + +void IOService::stop( IOService * /* provider */ ) +{ +} + +/* + * Attach in service plane + */ +bool IOService::attach( IOService * provider ) +{ + bool ok; + + if( provider) { + + if( gIOKitDebug & kIOLogAttach) + LOG( "%s::attach(%s)\n", getName(), + provider->getName()); + + provider->lockForArbitration(); + if( provider->__state[0] & kIOServiceInactiveState) + ok = false; + else + ok = attachToParent( provider, gIOServicePlane); + provider->unlockForArbitration(); + + } else { + + gIOServiceRoot = this; + ok = attachToParent( getRegistryRoot(), gIOServicePlane); + gIOResources->attachToParent( getRegistryRoot(), + gIOServicePlane ); + publishResource("IOKit"); + } + + return( ok ); +} + +IOService * IOService::getServiceRoot( void ) +{ + return( gIOServiceRoot ); +} + +void IOService::detach( IOService * provider ) +{ + if( gIOKitDebug & kIOLogAttach) + LOG("%s::detach(%s)\n", getName(), provider->getName()); + + lockForArbitration(); + + if( (__state[1] & kIOServiceBusyStateMask) + && (provider == getProvider())) + provider->adjustBusy( -1 ); + + detachFromParent( provider, gIOServicePlane ); + + unlockForArbitration(); +} + +/* + * Register instance - publish it for matching + */ + +void IOService::registerService( IOOptionBits options = 0 ) +{ + char * pathBuf; + const char * path; + char * skip; + int len; + enum { kMaxPathLen = 256 }; + enum { kMaxChars = 63 }; + + IORegistryEntry * parent = this; + IORegistryEntry * root = getRegistryRoot(); + while( parent && (parent != root)) + parent = parent->getParentEntry( gIOServicePlane); + + if( parent != root) { + IOLog("%s: not registry member at registerService()\n", getName()); + return; + } + + // Allow the Platform Expert to adjust this node. + if( gIOPlatform && (!gIOPlatform->platformAdjustService(this))) + return; + + if( (this != gIOResources) + && (kIOLogRegister & gIOKitDebug)) { + + pathBuf = (char *) IOMalloc( kMaxPathLen ); + + IOLog( "Registering: " ); + + len = kMaxPathLen; + if( pathBuf && getPath( pathBuf, &len, gIOServicePlane)) { + + path = pathBuf; + if( len > kMaxChars) { + IOLog(".."); + len -= kMaxChars; + path += len; + if( (skip = strchr( path, '/'))) + path = skip; + } + } else + path = getName(); + + IOLog( "%s\n", path ); + + if( pathBuf) + IOFree( pathBuf, kMaxPathLen ); + } + + startMatching( options ); +} + +void IOService::startMatching( IOOptionBits options = 0 ) +{ + IOService * provider; + bool needConfig; + bool ok; + bool sync; + + lockForArbitration(); + + sync = (options & kIOServiceSynchronous) + || ((provider = getProvider()) + && (provider->__state[1] & kIOServiceSynchronousState)); + if( sync) + __state[1] |= kIOServiceSynchronousState; + else + __state[1] &= ~kIOServiceSynchronousState; + + needConfig = (0 == (__state[1] & kIOServiceConfigState)) + && (0 == (__state[0] & kIOServiceInactiveState)) + && (kIOServiceRegisteredState != + (__state[0] & (kIOServiceRegisteredState + | kIOServiceMatchedState))); + __state[1] |= kIOServiceConfigState; +// __state[0] &= ~kIOServiceInactiveState; + +// if( sync) LOG("OSKernelStackRemaining = %08x @ %s\n", +// OSKernelStackRemaining(), getName()); + + unlockForArbitration(); + + if( needConfig) { + adjustBusy( 1 ); + if( sync) { + doServiceMatch( options ); + waitQuiet(); + } else + ok = (0 != _IOServiceJob::startJob( this, kMatchNubJob, options )); + } +} + +IOReturn IOService::catalogNewDrivers( OSOrderedSet * newTables ) +{ + OSDictionary * table; + OSIterator * iter; + IOService * service; +#if IOMATCHDEBUG + SInt32 count = 0; +#endif + + newTables->retain(); + + while( (table = (OSDictionary *) newTables->getFirstObject())) { + + LOCKWRITENOTIFY(); + iter = (OSIterator *) getExistingServices( table, + kIOServiceMatchedState ); + UNLOCKNOTIFY(); + if( iter) { + while( (service = (IOService *) iter->getNextObject())) { + service->startMatching(); +#if IOMATCHDEBUG + count++; +#endif + } + iter->release(); + } +#if IOMATCHDEBUG + if( getDebugFlags( table ) & kIOLogMatch) + LOG("Matching service count = %ld\n", count); +#endif + newTables->removeObject(table); + } + + newTables->release(); + + return( kIOReturnSuccess ); +} + + _IOServiceJob * _IOServiceJob::startJob( IOService * nub, int type, + IOOptionBits options = 0 ) +{ + _IOServiceJob * job; + + job = new _IOServiceJob; + if( job && !job->init()) { + job->release(); + job = 0; + } + + if( job) { + job->type = type; + job->nub = nub; + job->options = options; + nub->retain(); // thread will release() + pingConfig( job ); + } + + return( job ); +} + +/* + * Called on a registered service to see if it matches + * a property table. + */ + +bool IOService::matchPropertyTable( OSDictionary * table, SInt32 * score ) +{ + return( matchPropertyTable(table) ); +} + +bool IOService::matchPropertyTable( OSDictionary * table ) +{ + return( true ); +} + +/* + * Called on a matched service to allocate resources + * before first driver is attached. + */ + +IOReturn IOService::getResources( void ) +{ + return( kIOReturnSuccess); +} + +/* + * Client/provider accessors + */ + +IOService * IOService::getProvider( void ) const +{ + IOService * self = (IOService *) this; + IOService * parent; + SInt32 generation; + + parent = __provider; + generation = getGenerationCount(); + if( __providerGeneration == generation) + return( parent ); + + parent = (IOService *) getParentEntry( gIOServicePlane); + if( parent == IORegistryEntry::getRegistryRoot()) + /* root is not an IOService */ + parent = 0; + + self->__provider = parent; + // save the count before getParentEntry() + self->__providerGeneration = generation; + + return( parent ); +} + +IOWorkLoop * IOService::getWorkLoop() const +{ + IOService *provider = getProvider(); + + if (provider) + return provider->getWorkLoop(); + else + return 0; +} + +OSIterator * IOService::getProviderIterator( void ) const +{ + return( getParentIterator( gIOServicePlane)); +} + +IOService * IOService::getClient( void ) const +{ + return( (IOService *) getChildEntry( gIOServicePlane)); +} + +OSIterator * IOService::getClientIterator( void ) const +{ + return( getChildIterator( gIOServicePlane)); +} + +OSIterator * _IOOpenServiceIterator::iterator( OSIterator * _iter, + const IOService * client, + const IOService * provider ) +{ + _IOOpenServiceIterator * inst; + + if( !_iter) + return( 0 ); + + inst = new _IOOpenServiceIterator; + + if( inst && !inst->init()) { + inst->release(); + inst = 0; + } + if( inst) { + inst->iter = _iter; + inst->client = client; + inst->provider = provider; + } + + return( inst ); +} + +void _IOOpenServiceIterator::free() +{ + iter->release(); + if( last) + last->unlockForArbitration(); + OSIterator::free(); +} + +OSObject * _IOOpenServiceIterator::getNextObject() +{ + IOService * next; + + if( last) + last->unlockForArbitration(); + + while( (next = (IOService *) iter->getNextObject())) { + + next->lockForArbitration(); + if( (client && (next->isOpen( client ))) + || (provider && (provider->isOpen( next ))) ) + break; + next->unlockForArbitration(); + } + + last = next; + + return( next ); +} + +bool _IOOpenServiceIterator::isValid() +{ + return( iter->isValid() ); +} + +void _IOOpenServiceIterator::reset() +{ + if( last) { + last->unlockForArbitration(); + last = 0; + } + iter->reset(); +} + +OSIterator * IOService::getOpenProviderIterator( void ) const +{ + return( _IOOpenServiceIterator::iterator( getProviderIterator(), this, 0 )); +} + +OSIterator * IOService::getOpenClientIterator( void ) const +{ + return( _IOOpenServiceIterator::iterator( getClientIterator(), 0, this )); +} + + +IOReturn IOService::callPlatformFunction( const OSSymbol * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ) +{ + IOReturn result = kIOReturnUnsupported; + IOService *provider = getProvider(); + + if (provider != 0) { + result = provider->callPlatformFunction(functionName, waitForFunction, + param1, param2, param3, param4); + } + + return result; +} + +IOReturn IOService::callPlatformFunction( const char * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ) +{ + IOReturn result = kIOReturnNoMemory; + const OSSymbol *functionSymbol = OSSymbol::withCString(functionName); + + if (functionSymbol != 0) { + result = callPlatformFunction(functionSymbol, waitForFunction, + param1, param2, param3, param4); + functionSymbol->release(); + } + + return result; +} + + +/* + * Platform expert accessors + */ + +IOPlatformExpert * IOService::getPlatform( void ) +{ + return( gIOPlatform); +} + +void IOService::setPlatform( IOPlatformExpert * platform) +{ + gIOPlatform = platform; +} + +/* + * Stacking change + */ + +bool IOService::lockForArbitration( bool isSuccessRequired = true ) +{ + bool found; + bool success; + ArbitrationLockQueueElement * element; + ArbitrationLockQueueElement * active; + ArbitrationLockQueueElement * waiting; + + enum { kPutOnFreeQueue, kPutOnActiveQueue, kPutOnWaitingQueue } action; + + // lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + // obtain an unused queue element + if( !queue_empty( &gArbitrationLockQueueFree )) { + queue_remove_first( &gArbitrationLockQueueFree, + element, + ArbitrationLockQueueElement *, + link ); + } else { + element = IONew( ArbitrationLockQueueElement, 1 ); + assert( element ); + } + + // prepare the queue element + element->thread = IOThreadSelf(); + element->service = this; + element->count = 1; + element->required = isSuccessRequired; + element->aborted = false; + + // determine whether this object is already locked (ie. on active queue) + found = false; + queue_iterate( &gArbitrationLockQueueActive, + active, + ArbitrationLockQueueElement *, + link ) + { + if( active->service == element->service ) { + found = true; + break; + } + } + + if( found ) { // this object is already locked + + // determine whether it is the same or a different thread trying to lock + if( active->thread != element->thread ) { // it is a different thread + + ArbitrationLockQueueElement * victim = 0; + + // before placing this new thread on the waiting queue, we look for + // a deadlock cycle... + + while( 1 ) { + // determine whether the active thread holding the object we + // want is waiting for another object to be unlocked + found = false; + queue_iterate( &gArbitrationLockQueueWaiting, + waiting, + ArbitrationLockQueueElement *, + link ) + { + if( waiting->thread == active->thread ) { + assert( false == waiting->aborted ); + found = true; + break; + } + } + + if( found ) { // yes, active thread waiting for another object + + // this may be a candidate for rejection if the required + // flag is not set, should we detect a deadlock later on + if( false == waiting->required ) + victim = waiting; + + // find the thread that is holding this other object, that + // is blocking the active thread from proceeding (fun :-) + found = false; + queue_iterate( &gArbitrationLockQueueActive, + active, // (reuse active queue element) + ArbitrationLockQueueElement *, + link ) + { + if( active->service == waiting->service ) { + found = true; + break; + } + } + + // someone must be holding it or it wouldn't be waiting + assert( found ); + + if( active->thread == element->thread ) { + + // doh, it's waiting for the thread that originated + // this whole lock (ie. current thread) -> deadlock + if( false == element->required ) { // willing to fail? + + // the originating thread doesn't have the required + // flag, so it can fail + success = false; // (fail originating lock request) + break; // (out of while) + + } else { // originating thread is not willing to fail + + // see if we came across a waiting thread that did + // not have the 'required' flag set: we'll fail it + if( victim ) { + + // we do have a willing victim, fail it's lock + victim->aborted = true; + + // take the victim off the waiting queue + queue_remove( &gArbitrationLockQueueWaiting, + victim, + ArbitrationLockQueueElement *, + link ); + + // wake the victim + thread_wakeup_one(victim); + + // allow this thread to proceed (ie. wait) + success = true; // (put request on wait queue) + break; // (out of while) + } else { + + // all the waiting threads we came across in + // finding this loop had the 'required' flag + // set, so we've got a deadlock we can't avoid + panic("I/O Kit: Unrecoverable deadlock."); + } + } + } else { + // repeat while loop, redefining active thread to be the + // thread holding "this other object" (see above), and + // looking for threads waiting on it; note the active + // variable points to "this other object" already... so + // there nothing to do in this else clause. + } + } else { // no, active thread is not waiting for another object + + success = true; // (put request on wait queue) + break; // (out of while) + } + } // while forever + + if( success ) { // put the request on the waiting queue? + kern_return_t wait_result; + + // place this thread on the waiting queue and put it to sleep; + // we place it at the tail of the queue... + queue_enter( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ); + + // declare that this thread will wait for a given event +restart_sleep: assert_wait( element, + element->required ? THREAD_UNINT + : THREAD_INTERRUPTIBLE ); + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); + + // put thread to sleep, waiting for our event to fire... + wait_result = thread_block((void (*)(void)) 0); + + // ...and we've been woken up; we might be in one of two states: + // (a) we've been aborted and our queue element is not on + // any of the three queues, but is floating around + // (b) we're allowed to proceed with the lock and we have + // already been moved from the waiting queue to the + // active queue. + // ...plus a 3rd state, should the thread have been interrupted: + // (c) we're still on the waiting queue + + // determine whether we were interrupted out of our sleep + if( THREAD_INTERRUPTED == wait_result ) { + + // re-lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + // determine whether we're still on the waiting queue + found = false; + queue_iterate( &gArbitrationLockQueueWaiting, + waiting, // (reuse waiting queue element) + ArbitrationLockQueueElement *, + link ) + { + if( waiting == element ) { + found = true; + break; + } + } + + if( found ) { // yes, we're still on the waiting queue + + // determine whether we're willing to fail + if( false == element->required ) { + + // mark us as aborted + element->aborted = true; + + // take us off the waiting queue + queue_remove( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ); + } else { // we are not willing to fail + + // ignore interruption, go back to sleep + goto restart_sleep; + } + } + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); + + // proceed as though this were a normal wake up + wait_result = THREAD_AWAKENED; + } + + assert( THREAD_AWAKENED == wait_result ); + + // determine whether we've been aborted while we were asleep + if( element->aborted ) { + assert( false == element->required ); + + // re-lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + action = kPutOnFreeQueue; + success = false; + } else { // we weren't aborted, so we must be ready to go :-) + + // we've already been moved from waiting to active queue + return true; + } + + } else { // the lock request is to be failed + + // return unused queue element to queue + action = kPutOnFreeQueue; + } + } else { // it is the same thread, recursive access is allowed + + // add one level of recursion + active->count++; + + // return unused queue element to queue + action = kPutOnFreeQueue; + success = true; + } + } else { // this object is not already locked, so let this thread through + action = kPutOnActiveQueue; + success = true; + } + + // put the new element on a queue + if( kPutOnActiveQueue == action ) { + queue_enter( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ); + } else if( kPutOnFreeQueue == action ) { + queue_enter( &gArbitrationLockQueueFree, + element, + ArbitrationLockQueueElement *, + link ); + } else { + assert( 0 ); // kPutOnWaitingQueue never occurs, handled specially above + } + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); + + return( success ); +} + +void IOService::unlockForArbitration( void ) +{ + bool found; + ArbitrationLockQueueElement * element; + + // lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + // find the lock element for this object (ie. on active queue) + found = false; + queue_iterate( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ) + { + if( element->service == this ) { + found = true; + break; + } + } + + assert( found ); + + // determine whether the lock has been taken recursively + if( element->count > 1 ) { + // undo one level of recursion + element->count--; + + } else { + + // remove it from the active queue + queue_remove( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ); + + // put it on the free queue + queue_enter( &gArbitrationLockQueueFree, + element, + ArbitrationLockQueueElement *, + link ); + + // determine whether a thread is waiting for object (head to tail scan) + found = false; + queue_iterate( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ) + { + if( element->service == this ) { + found = true; + break; + } + } + + if ( found ) { // we found an interested thread on waiting queue + + // remove it from the waiting queue + queue_remove( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ); + + // put it on the active queue + queue_enter( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ); + + // wake the waiting thread + thread_wakeup_one(element); + } + } + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); +} + +void IOService::applyToProviders( IOServiceApplierFunction applier, + void * context ) +{ + applyToParents( (IORegistryEntryApplierFunction) applier, + context, gIOServicePlane ); +} + +void IOService::applyToClients( IOServiceApplierFunction applier, + void * context ) +{ + applyToChildren( (IORegistryEntryApplierFunction) applier, + context, gIOServicePlane ); +} + + +/* + * Client messages + */ + + +// send a message to a client or interested party of this service +IOReturn IOService::messageClient( UInt32 type, OSObject * client, + void * argument = 0, vm_size_t argSize = 0 ) +{ + IOReturn ret; + IOService * service; + _IOServiceInterestNotifier * notify; + + if( (service = OSDynamicCast( IOService, client))) + ret = service->message( type, this, argument ); + + else if( (notify = OSDynamicCast( _IOServiceInterestNotifier, client))) { + + _IOServiceNotifierInvocation invocation; + bool willNotify; + + invocation.thread = current_thread(); + + LOCKWRITENOTIFY(); + willNotify = (0 != (kIOServiceNotifyEnable & notify->state)); + + if( willNotify) { + queue_enter( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + } + UNLOCKNOTIFY(); + + if( willNotify) { + + ret = (*notify->handler)( notify->target, notify->ref, + type, this, argument, argSize ); + + LOCKWRITENOTIFY(); + queue_remove( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + if( kIOServiceNotifyWaiter & notify->state) { + notify->state &= ~kIOServiceNotifyWaiter; + thread_wakeup( (event_t) notify); + } + UNLOCKNOTIFY(); + + } else + ret = kIOReturnSuccess; + + } else + ret = kIOReturnBadArgument; + + return( ret ); +} + +void IOService::applyToInterested( const OSSymbol * typeOfInterest, + OSObjectApplierFunction applier, + void * context ) +{ + OSArray * array; + unsigned int index; + OSObject * next; + OSArray * copyArray; + + applyToClients( (IOServiceApplierFunction) applier, context ); + + LOCKREADNOTIFY(); + array = OSDynamicCast( OSArray, getProperty( typeOfInterest )); + if( array) { + copyArray = OSArray::withArray( array ); + UNLOCKNOTIFY(); + if( copyArray) { + for( index = 0; + (next = array->getObject( index )); + index++) { + (*applier)(next, context); + } + copyArray->release(); + } + } else + UNLOCKNOTIFY(); +} + +struct MessageClientsContext { + IOService * service; + UInt32 type; + void * argument; + vm_size_t argSize; + IOReturn ret; +}; + +static void messageClientsApplier( OSObject * object, void * ctx ) +{ + IOReturn ret; + MessageClientsContext * context = (MessageClientsContext *) ctx; + + ret = context->service->messageClient( context->type, + object, context->argument, context->argSize ); + if( kIOReturnSuccess != ret) + context->ret = ret; +} + +// send a message to all clients +IOReturn IOService::messageClients( UInt32 type, + void * argument = 0, vm_size_t argSize = 0 ) +{ + MessageClientsContext context; + + context.service = this; + context.type = type; + context.argument = argument; + context.argSize = argSize; + context.ret = kIOReturnSuccess; + + applyToInterested( gIOGeneralInterest, + &messageClientsApplier, &context ); + + return( context.ret ); +} + +IOReturn IOService::acknowledgeNotification( IONotificationRef notification, + IOOptionBits response ) +{ + return( kIOReturnUnsupported ); +} + +IONotifier * IOService::registerInterest( const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, void * target, void * ref ) +{ + _IOServiceInterestNotifier * notify = 0; + OSArray * set; + + if( (typeOfInterest != gIOGeneralInterest) + && (typeOfInterest != gIOBusyInterest) + && (typeOfInterest != gIOAppPowerStateInterest)) + return( 0 ); + + lockForArbitration(); + if( 0 == (__state[0] & kIOServiceInactiveState)) { + + notify = new _IOServiceInterestNotifier; + if( notify && !notify->init()) { + notify->release(); + notify = 0; + } + + if( notify) { + notify->handler = handler; + notify->target = target; + notify->ref = ref; + notify->state = kIOServiceNotifyEnable; + queue_init( ¬ify->handlerInvocations ); + + ////// queue + + LOCKWRITENOTIFY(); + if( 0 == (set = (OSArray *) getProperty( typeOfInterest ))) { + set = OSArray::withCapacity( 1 ); + if( set) { + setProperty( typeOfInterest, set ); + set->release(); + } + } + notify->whence = set; + if( set) + set->setObject( notify ); + UNLOCKNOTIFY(); + } + } + unlockForArbitration(); + + return( notify ); +} + +static void cleanInterestArray( OSObject * object ) +{ + OSArray * array; + unsigned int index; + _IOServiceInterestNotifier * next; + + if( (array = OSDynamicCast( OSArray, object))) { + LOCKWRITENOTIFY(); + for( index = 0; + (next = (_IOServiceInterestNotifier *) + array->getObject( index )); + index++) { + next->whence = 0; + } + UNLOCKNOTIFY(); + } +} + +void IOService::unregisterAllInterest( void ) +{ + cleanInterestArray( getProperty( gIOGeneralInterest )); + cleanInterestArray( getProperty( gIOBusyInterest )); +} + +/* + * _IOServiceInterestNotifier + */ + +// wait for all threads, other than the current one, +// to exit the handler + +void _IOServiceInterestNotifier::wait() +{ + _IOServiceNotifierInvocation * next; + bool doWait; + + do { + doWait = false; + queue_iterate( &handlerInvocations, next, + _IOServiceNotifierInvocation *, link) { + if( next->thread != current_thread() ) { + doWait = true; + break; + } + } + if( doWait) { + state |= kIOServiceNotifyWaiter; + assert_wait( this, THREAD_UNINT); + UNLOCKNOTIFY(); + thread_block((void (*)(void)) 0); + LOCKWRITENOTIFY(); + } + + } while( doWait ); +} + +void _IOServiceInterestNotifier::free() +{ + assert( queue_empty( &handlerInvocations )); + OSObject::free(); +} + +void _IOServiceInterestNotifier::remove() +{ + LOCKWRITENOTIFY(); + + if( whence) { + whence->removeObject(whence->getNextIndexOfObject( + (OSObject *) this, 0 )); + whence = 0; + } + + state &= ~kIOServiceNotifyEnable; + + wait(); + + UNLOCKNOTIFY(); + + release(); +} + +bool _IOServiceInterestNotifier::disable() +{ + bool ret; + + LOCKWRITENOTIFY(); + + ret = (0 != (kIOServiceNotifyEnable & state)); + state &= ~kIOServiceNotifyEnable; + if( ret) + wait(); + + UNLOCKNOTIFY(); + + return( ret ); +} + +void _IOServiceInterestNotifier::enable( bool was ) +{ + LOCKWRITENOTIFY(); + if( was) + state |= kIOServiceNotifyEnable; + else + state &= ~kIOServiceNotifyEnable; + UNLOCKNOTIFY(); +} + + +/* + * Terminate + */ + +// a method in case someone needs to override it +bool IOService::terminateClient( IOService * client, IOOptionBits options ) +{ + bool ok; + + if( client->isParent( this, gIOServicePlane, true)) + // we are the clients only provider + ok = client->terminate( options ); + else + ok = true; + + return( ok ); +} + +struct TerminateClientsContext { + IOService * provider; + IOOptionBits options; +}; + +static void terminateInterestApplier( OSObject * object, void * ctx ) +{ + TerminateClientsContext * context = (TerminateClientsContext *) ctx; + + context->provider->messageClient( kIOMessageServiceIsTerminated, + object, (void *) context->options ); +} + +static void terminateClientsApplier( IOService * client, void * ctx ) +{ + TerminateClientsContext * context = (TerminateClientsContext *) ctx; + + if( gIOKitDebug & kIOLogYield) + LOG("%s::terminateClient(%s,%08lx)\n", + context->provider->getName(), + client->getName(), context->options); + + context->provider->terminateClient( client, + ((context->options) | kIOServiceRecursing) & ~kIOServiceSynchronous ); +} + +static void terminateRequestClose( IOService * client, void * ctx ) +{ + TerminateClientsContext * context = (TerminateClientsContext *) ctx; + IOService * provider = context->provider; + + if( provider->isOpen( client)) { + if( gIOKitDebug & kIOLogYield) + LOG("%s::terminate request close(%s,%08lx)\n", + provider->getName(), + client->getName(), context->options); + provider->messageClient( kIOMessageServiceIsRequestingClose, + client, (void *) context->options ); + } +} + +bool IOService::terminate( IOOptionBits options = 0 ) +{ + bool ok; + bool wasClosed; + bool madeInactive; + TerminateClientsContext context; + + if( false == lockForArbitration( (options & kIOServiceRequired) )) + return false; + + retain(); + + context.provider = this; + context.options = options; + + ok = (options & kIOServiceRequired); + wasClosed = (false == handleIsOpen( 0 )); + if( !ok) + ok = wasClosed; + + if( !ok) { + applyToClients( &terminateRequestClose, (void *) &context ); + wasClosed = (false == handleIsOpen( 0 )); + ok = wasClosed; + } + + if( ok) { + madeInactive = (0 == (__state[0] & kIOServiceInactiveState)); + if( madeInactive) { + __state[0] |= kIOServiceInactiveState; + __state[0] &= ~(kIOServiceRegisteredState | kIOServiceMatchedState); + if( 0 == (options & kIOServiceRecursing)) + __state[1] |= kIOServiceTerminatedState; + } + } else + madeInactive = false; + + unlockForArbitration(); + + if( madeInactive) { + + adjustBusy( 1 ); + applyToInterested( gIOGeneralInterest, + &terminateInterestApplier, (void *) &context ); + + deliverNotification( gIOTerminatedNotification, 0, 0xffffffff ); + + applyToClients( &terminateClientsApplier, (void *) &context ); + + if( wasClosed && (0 == (options & kIOServiceRecursing))) { + if( options & kIOServiceSynchronous) + doServiceTerminate( 0 ); + else + ok = (0 != _IOServiceJob::startJob( this, kTerminateNubJob, 0 )); + } + if( options & kIOServiceSynchronous) + waitQuiet(); + } + + release(); + + return( ok ); +} + +void IOService::doServiceTerminate( IOOptionBits options ) +{ + IOService * next; + OSIterator * iter; + IOService * client; + OSArray * deathList = 0; + unsigned int index; + bool finalize; + bool ok = true; + + next = this; + deathList = OSArray::withObjects( & (const OSObject *) next, 1, 1 ); + assert( deathList ); + if( !deathList) + return; + + index = 0; + do { + iter = next->getClientIterator(); + assert( iter ); + if( iter) { + + while( (client = (IOService *) iter->getNextObject())) { + + if( gIOKitDebug & kIOLogYield) + LOG("%s::actionClients(%s)\n", + next->getName(), client->getName()); + + client->stop( next ); + + if( next->isOpen( client )) + next->close( client ); + + client->detach( next ); + + client->lockForArbitration(); + if( (client->__state[0] & kIOServiceInactiveState) + && (0 == (client->__state[1] & kIOServiceTerminatedState)) + && (0 == client->getProvider()) ) { + client->__state[1] |= kIOServiceTerminatedState; + finalize = (false == client->handleIsOpen( 0 )); + } else + finalize = false; + client->unlockForArbitration(); + + if( finalize) + deathList->setObject( client ); + } + iter->release(); + } + + } while( (next = (IOService *) deathList->getObject( ++index )) ); + + while( index--) { + + next = (IOService *) deathList->getObject( index ); + assert( next ); + next->retain(); + deathList->removeObject( index ); + + IOUserClient::destroyUserReferences( next ); + + next->unregisterAllInterest(); + + ok = next->finalize( options ); + assert( ok ); + + if( gIOKitDebug & kIOLogYield) + LOG("%s __state = %08lx:%08lx\n", next->getName(), + next->__state[0], next->__state[1]); + next->adjustBusy( -1 ); + + next->release(); + } + + deathList->release(); +} + +bool IOService::finalize( IOOptionBits options ) +{ + OSIterator * iter; + IOService * provider; + + iter = getProviderIterator(); + assert( iter ); + + if( iter) { + while( (provider = (IOService *) iter->getNextObject())) { + /* we come down here on programmatic terminate */ + stop( provider ); + if( provider->isOpen( this )) + provider->close( this ); + detach( provider ); + } + iter->release(); + } + + return( true ); +} + +/* + * Open & close + */ + +bool IOService::open( IOService * forClient, + IOOptionBits options = 0, + void * arg = 0 ) +{ + bool ok; + + if( false == lockForArbitration(false) ) + return false; + + ok = (0 == (__state[0] & kIOServiceInactiveState)); + if( ok) + ok = handleOpen( forClient, options, arg ); + + unlockForArbitration(); + + return( ok ); +} + +void IOService::close( IOService * forClient, + IOOptionBits options = 0 ) +{ + bool ok; + bool wasClosed; + bool last = false; + OSIterator * iter; + IOService * client; + + lockForArbitration(); + + wasClosed = handleIsOpen( forClient ); + if( wasClosed) { + handleClose( forClient, options ); + + last = ( (__state[1] & kIOServiceTerminatedState) + && (false == handleIsOpen( 0 )) ); + } + + unlockForArbitration(); + + if( last) { + ok = (0 != _IOServiceJob::startJob( this, kTerminateNubJob, 0 )); + assert( ok ); + + } else if( wasClosed) { + + iter = getClientIterator(); + assert( iter ); + + if( iter) { + while( (client = (IOService *) iter->getNextObject())) { + if( client != forClient) + messageClient( kIOMessageServiceWasClosed, client, 0 ); + } + iter->release(); + } + } +} + +bool IOService::isOpen( const IOService * forClient = 0 ) const +{ + IOService * self = (IOService *) this; + bool ok; + + self->lockForArbitration(); + + ok = handleIsOpen( forClient ); + + self->unlockForArbitration(); + + return( ok ); +} + +bool IOService::handleOpen( IOService * forClient, + IOOptionBits options, + void * arg ) +{ + bool ok; + + ok = (0 == __owner); + if( ok ) + __owner = forClient; + + else if( options & kIOServiceSeize ) { + ok = (kIOReturnSuccess == messageClient( kIOMessageServiceIsRequestingClose, + __owner, (void *) options )); + if( ok && (0 == __owner )) + __owner = forClient; + } + + return( ok ); +} + +void IOService::handleClose( IOService * forClient, + IOOptionBits options ) +{ + if( __owner == forClient) + __owner = 0; +} + +bool IOService::handleIsOpen( const IOService * forClient ) const +{ + if( forClient) + return( __owner == forClient ); + else + return( __owner != forClient ); +} + +/* + * Probing & starting + */ +static SInt32 IONotifyOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ) +{ + const _IOServiceNotifier * obj1 = (const _IOServiceNotifier *) inObj1; + const _IOServiceNotifier * obj2 = (const _IOServiceNotifier *) inObj2; + SInt32 val1; + SInt32 val2; + + val1 = 0; + val2 = 0; + + if ( obj1 ) + val1 = obj1->priority; + + if ( obj2 ) + val2 = obj2->priority; + + return ( val1 - val2 ); +} + +static SInt32 IOServiceObjectOrder( const OSObject * entry, void * ref) +{ + OSDictionary * dict; + IOService * service; + _IOServiceNotifier * notify; + OSSymbol * key = (OSSymbol *) ref; + OSNumber * offset; + + if( (notify = OSDynamicCast( _IOServiceNotifier, entry))) + return( notify->priority ); + + else if( (service = OSDynamicCast( IOService, entry))) + offset = OSDynamicCast(OSNumber, service->getProperty( key )); + else if( (dict = OSDynamicCast( OSDictionary, entry))) + offset = OSDynamicCast(OSNumber, dict->getObject( key )); + else { + assert( false ); + offset = 0; + } + + if( offset) + return( (SInt32) offset->unsigned32BitValue()); + else + return( kIODefaultProbeScore ); +} + +SInt32 IOServiceOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ) +{ + const OSObject * obj1 = (const OSObject *) inObj1; + const OSObject * obj2 = (const OSObject *) inObj2; + SInt32 val1; + SInt32 val2; + + val1 = 0; + val2 = 0; + + if ( obj1 ) + val1 = IOServiceObjectOrder( obj1, ref ); + + if ( obj2 ) + val2 = IOServiceObjectOrder( obj2, ref ); + + return ( val1 - val2 ); +} + +IOService * IOService::getClientWithCategory( const OSSymbol * category ) +{ + IOService * service = 0; + OSIterator * iter; + const OSSymbol * nextCat; + + iter = getClientIterator(); + if( iter) { + while( (service = (IOService *) iter->getNextObject())) { + if( kIOServiceInactiveState & service->__state[0]) + continue; + nextCat = (const OSSymbol *) OSDynamicCast( OSSymbol, + service->getProperty( gIOMatchCategoryKey )); + if( category == nextCat) + break; + } + iter->release(); + } + return( service ); +} + +bool IOService::invokeNotifer( _IOServiceNotifier * notify ) +{ + _IOServiceNotifierInvocation invocation; + bool willNotify; + bool ret = true; + + invocation.thread = current_thread(); + + LOCKWRITENOTIFY(); + willNotify = (0 != (kIOServiceNotifyEnable & notify->state)); + + if( willNotify) { + queue_enter( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + } + UNLOCKNOTIFY(); + + if( willNotify) { + + ret = (*notify->handler)( notify->target, notify->ref, this ); + + LOCKWRITENOTIFY(); + queue_remove( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + if( kIOServiceNotifyWaiter & notify->state) { + notify->state &= ~kIOServiceNotifyWaiter; + thread_wakeup( (event_t) notify); + } + UNLOCKNOTIFY(); + } + + return( ret ); +} + +/* + * Alloc and probe matching classes, + * called on the provider instance + */ + +void IOService::probeCandidates( OSOrderedSet * matches ) +{ + OSDictionary * match = 0; + OSSymbol * symbol; + IOService * inst; + IOService * newInst; + OSDictionary * props; + SInt32 score; + OSNumber * newPri; + OSOrderedSet * familyMatches = 0; + OSOrderedSet * startList; + OSDictionary * startDict = 0; + const OSSymbol * category; + OSIterator * iter; + _IOServiceNotifier * notify; + OSObject * nextMatch = 0; + bool started; + bool needReloc = false; +#if IOMATCHDEBUG + SInt64 debugFlags; +#endif + + assert( matches ); + while( !needReloc && (nextMatch = matches->getFirstObject())) { + + nextMatch->retain(); + matches->removeObject(nextMatch); + + if( (notify = OSDynamicCast( _IOServiceNotifier, nextMatch ))) { + + lockForArbitration(); + if( 0 == (__state[0] & kIOServiceInactiveState)) + invokeNotifer( notify ); + unlockForArbitration(); + nextMatch->release(); + nextMatch = 0; + continue; + + } else if( !(match = OSDynamicCast( OSDictionary, nextMatch ))) { + nextMatch->release(); + nextMatch = 0; + continue; + } + + props = 0; +#if IOMATCHDEBUG + debugFlags = getDebugFlags( match ); +#endif + + do { + category = OSDynamicCast( OSSymbol, + match->getObject( gIOMatchCategoryKey )); + if( 0 == category) + category = gIODefaultMatchCategoryKey; + + if( getClientWithCategory( category )) { +#if IOMATCHDEBUG + if( debugFlags & kIOLogMatch) + LOG("%s: match category %s exists\n", getName(), + category->getCStringNoCopy()); +#endif + nextMatch->release(); + nextMatch = 0; + continue; + } + + // create a copy now in case its modified during matching + props = OSDictionary::withDictionary( match, match->getCount()); + if( 0 == props) + continue; + props->setCapacityIncrement(1); + + // check the nub matches + if( false == passiveMatch( props, true )) + continue; + + // Check to see if driver reloc has been loaded. + needReloc = (false == gIOCatalogue->isModuleLoaded( match )); + if( needReloc) { +#if IOMATCHDEBUG + if( debugFlags & kIOLogCatalogue) + LOG("%s: stalling for module\n", getName()); +#endif + // If reloc hasn't been loaded, exit; + // reprobing will occur after reloc has been loaded. + continue; + } + + // reorder on family matchPropertyTable score. + if( 0 == familyMatches) + familyMatches = OSOrderedSet::withCapacity( 1, + IOServiceOrdering, (void *) gIOProbeScoreKey ); + if( familyMatches) + familyMatches->setObject( props ); + + } while( false ); + + if (nextMatch) { + nextMatch->release(); + nextMatch = 0; + } + if( props) + props->release(); + } + matches->release(); + matches = 0; + + if( familyMatches) { + + while( !needReloc + && (props = (OSDictionary *) familyMatches->getFirstObject())) { + + props->retain(); + familyMatches->removeObject( props ); + + inst = 0; + newInst = 0; +#if IOMATCHDEBUG + debugFlags = getDebugFlags( props ); +#endif + do { + symbol = OSDynamicCast( OSSymbol, + props->getObject( gIOClassKey)); + if( !symbol) + continue; + + // alloc the driver instance + inst = (IOService *) OSMetaClass::allocClassWithName( symbol); + + if( !inst) { + IOLog("Couldn't alloc class \"%s\"\n", + symbol->getCStringNoCopy()); + continue; + } + + // init driver instance + if( !(inst->init( props ))) { +#if IOMATCHDEBUG + if( debugFlags & kIOLogStart) + IOLog("%s::init fails\n", symbol->getCStringNoCopy()); +#endif + continue; + } + if( __state[1] & kIOServiceSynchronousState) + inst->__state[1] |= kIOServiceSynchronousState; + + // give the driver the default match category if not specified + category = OSDynamicCast( OSSymbol, + props->getObject( gIOMatchCategoryKey )); + if( 0 == category) + category = gIODefaultMatchCategoryKey; + inst->setProperty( gIOMatchCategoryKey, (OSObject *) category ); + + // attach driver instance + if( !(inst->attach( this ))) + continue; + + // pass in score from property table + score = familyMatches->orderObject( props ); + + // & probe the new driver instance +#if IOMATCHDEBUG + if( debugFlags & kIOLogProbe) + LOG("%s::probe(%s)\n", + inst->getMetaClass()->getClassName(), getName()); +#endif + + newInst = inst->probe( this, &score ); + inst->detach( this ); + if( 0 == newInst) { +#if IOMATCHDEBUG + if( debugFlags & kIOLogProbe) + IOLog("%s::probe fails\n", symbol->getCStringNoCopy()); +#endif + continue; + } + + // save the score + newPri = OSNumber::withNumber( score, 32 ); + if( newPri) { + newInst->setProperty( gIOProbeScoreKey, newPri ); + newPri->release(); + } + + // add to start list for the match category + if( 0 == startDict) + startDict = OSDictionary::withCapacity( 1 ); + assert( startDict ); + startList = (OSOrderedSet *) + startDict->getObject( category ); + if( 0 == startList) { + startList = OSOrderedSet::withCapacity( 1, + IOServiceOrdering, (void *) gIOProbeScoreKey ); + if( startDict && startList) { + startDict->setObject( category, startList ); + startList->release(); + } + } + assert( startList ); + if( startList) + startList->setObject( newInst ); + + } while( false ); + + props->release(); + if( inst) + inst->release(); + } + familyMatches->release(); + familyMatches = 0; + } + + // start the best (until success) of each category + + iter = OSCollectionIterator::withCollection( startDict ); + if( iter) { + while( (category = (const OSSymbol *) iter->getNextObject())) { + + startList = (OSOrderedSet *) startDict->getObject( category ); + assert( startList ); + if( !startList) + continue; + + started = false; + while( true // (!started) + && (inst = (IOService *)startList->getFirstObject())) { + + inst->retain(); + startList->removeObject(inst); + +#if IOMATCHDEBUG + debugFlags = getDebugFlags( inst->getPropertyTable() ); + + if( debugFlags & kIOLogStart) { + if( started) + LOG( "match category exists, skipping " ); + LOG( "%s::start(%s) <%d>\n", inst->getName(), + getName(), inst->getRetainCount()); + } +#endif + if( false == started) + started = startCandidate( inst ); +#if IOMATCHDEBUG + if( (debugFlags & kIOLogStart) && (false == started)) + LOG( "%s::start(%s) <%d> failed\n", inst->getName(), getName(), + inst->getRetainCount()); +#endif + inst->release(); + } + } + iter->release(); + } + + if( startDict) + startDict->release(); +} + +/* + * Start a previously attached & probed instance, + * called on exporting object instance + */ + +bool IOService::startCandidate( IOService * service ) +{ + bool ok; + + ok = service->attach( this ); + + if( ok) { + // stall for any nub resources + checkResources(); + // stall for any driver resources + service->checkResources(); + + ok = service->start( this ); + if( !ok) + service->detach( this ); + } + return( ok ); +} + +IOService * IOService::resources( void ) +{ + return( gIOResources ); +} + +void IOService::publishResource( const char * key, OSObject * value = 0 ) +{ + const OSSymbol * sym; + + if( (sym = OSSymbol::withCString( key))) { + publishResource( sym, value); + sym->release(); + } +} + +void IOService::publishResource( const OSSymbol * key, OSObject * value = 0 ) +{ + if( 0 == value) + value = (OSObject *) gIOServiceKey; + + gIOResources->setProperty( key, value); + + gIOResourceGenerationCount++; + gIOResources->registerService(); +} + +bool IOService::addNeededResource( const char * key ) +{ + OSObject * resources; + OSSet * set; + OSString * newKey; + bool ret; + + resources = getProperty( gIOResourceMatchKey ); + + newKey = OSString::withCString( key ); + if( (0 == resources) || (0 == newKey)) + return( false); + + set = OSDynamicCast( OSSet, resources ); + if( !set) { + set = OSSet::withCapacity( 1 ); + if( set) + set->setObject( resources ); + } + else + set->retain(); + + set->setObject( newKey ); + newKey->release(); + ret = setProperty( gIOResourceMatchKey, set ); + set->release(); + + return( ret ); +} + +bool IOService::checkResource( OSObject * matching ) +{ + OSString * str; + OSDictionary * table; + + if( (str = OSDynamicCast( OSString, matching ))) { + if( gIOResources->getProperty( str )) + return( true ); + } + + if( str) + table = resourceMatching( str ); + else if( (table = OSDynamicCast( OSDictionary, matching ))) + table->retain(); + else { + IOLog("%s: Can't match using: %s\n", getName(), + matching->getMetaClass()->getClassName()); + /* false would stall forever */ + return( true ); + } + + if( gIOKitDebug & kIOLogConfig) + LOG("config(%x): stalling %s\n", (int) IOThreadSelf(), getName()); + + waitForService( table ); + + if( gIOKitDebug & kIOLogConfig) + LOG("config(%x): waking\n", (int) IOThreadSelf() ); + + return( true ); +} + +bool IOService::checkResources( void ) +{ + OSObject * resources; + OSSet * set; + OSIterator * iter; + bool ok; + + resources = getProperty( gIOResourceMatchKey ); + if( 0 == resources) + return( true ); + + if( (set = OSDynamicCast( OSSet, resources ))) { + + iter = OSCollectionIterator::withCollection( set ); + ok = (0 != iter); + while( ok && (resources = iter->getNextObject()) ) + ok = checkResource( resources ); + if( iter) + iter->release(); + + } else + ok = checkResource( resources ); + + return( ok ); +} + + +_IOConfigThread * _IOConfigThread::configThread( void ) +{ + _IOConfigThread * inst; + + do { + if( !(inst = new _IOConfigThread)) + continue; + if( !inst->init()) + continue; + if( !(inst->thread = IOCreateThread + ( (IOThreadFunc) &_IOConfigThread::main, inst ))) + continue; + + return( inst ); + + } while( false); + + if( inst) + inst->release(); + + return( 0 ); +} + +void _IOConfigThread::free( void ) +{ + OSObject::free(); +} + +void IOService::doServiceMatch( IOOptionBits options ) +{ + _IOServiceNotifier * notify; + OSIterator * iter; + OSOrderedSet * matches; + SInt32 catalogGeneration; + bool keepGuessing = true; + bool reRegistered = true; + +// job->nub->deliverNotification( gIOPublishNotification, +// kIOServiceRegisteredState, 0xffffffff ); + + while( keepGuessing ) { + + matches = gIOCatalogue->findDrivers( this, &catalogGeneration ); + // the matches list should always be created by findDrivers() + if( matches) { + + lockForArbitration(); + if( 0 == (__state[0] & kIOServiceFirstPublishState)) + deliverNotification( gIOFirstPublishNotification, + kIOServiceFirstPublishState, 0xffffffff ); + LOCKREADNOTIFY(); + __state[1] &= ~kIOServiceConfigState; + __state[0] |= kIOServiceRegisteredState; + + if( reRegistered && (0 == (__state[0] & kIOServiceInactiveState))) { + + iter = OSCollectionIterator::withCollection( (OSOrderedSet *) + gNotifications->getObject( gIOPublishNotification ) ); + if( iter) { + while((notify = (_IOServiceNotifier *) + iter->getNextObject())) { + + if( passiveMatch( notify->matching ) + && (kIOServiceNotifyEnable & notify->state)) + matches->setObject( notify ); + } + iter->release(); + } + } + + UNLOCKNOTIFY(); + unlockForArbitration(); + + if( matches->getCount() && (kIOReturnSuccess == getResources())) + probeCandidates( matches ); + else + matches->release(); + } + + lockForArbitration(); + reRegistered = (0 != (__state[1] & kIOServiceConfigState)); + keepGuessing = + (reRegistered || (catalogGeneration != + gIOCatalogue->getGenerationCount())) + && (0 == (__state[0] & kIOServiceInactiveState)); + + if( keepGuessing) + unlockForArbitration(); + } + + if( 0 == (__state[0] & kIOServiceInactiveState)) { + deliverNotification( gIOMatchedNotification, + kIOServiceMatchedState, 0xffffffff ); + if( 0 == (__state[0] & kIOServiceFirstMatchState)) + deliverNotification( gIOFirstMatchNotification, + kIOServiceFirstMatchState, 0xffffffff ); + } + + unlockForArbitration(); + + adjustBusy( -1 ); +} + +void IOService::adjustBusy( SInt32 delta ) +{ + IOService * next; + UInt32 count; + bool wasQuiet, nowQuiet; + + if( 0 == delta) + return; + + IOTakeLock( gIOServiceBusyLock ); + next = this; + + do { + count = next->__state[1] & kIOServiceBusyStateMask; + assert( count < kIOServiceBusyMax); + wasQuiet = (0 == count); + assert( (!wasQuiet) || (delta > 0)); + next->__state[1] += delta; + nowQuiet = (0 == (next->__state[1] & kIOServiceBusyStateMask)); + + if( nowQuiet) + thread_wakeup( (event_t) next); + + if( (wasQuiet || nowQuiet) ) { + OSArray * array; + unsigned int index; + OSObject * interested; + + array = OSDynamicCast( OSArray, next->getProperty( gIOBusyInterest )); + if( array) { + LOCKREADNOTIFY(); + for( index = 0; + (interested = array->getObject( index )); + index++) { + next->messageClient(kIOMessageServiceBusyStateChange, + interested, (void *) wasQuiet /* busy now */); + } + UNLOCKNOTIFY(); + } + + if( nowQuiet && (next == gIOServiceRoot)) + OSMetaClass::considerUnloads(); + } + + delta = nowQuiet ? -1 : +1; + + } while( (wasQuiet || nowQuiet) && (next = next->getProvider())); + + IOUnlock( gIOServiceBusyLock ); +} + +UInt32 IOService::getBusyState( void ) +{ + return( __state[1] & kIOServiceBusyStateMask ); +} + +IOReturn IOService::waitForState( UInt32 mask, UInt32 value, + mach_timespec_t * timeout = 0 ) +{ + bool wait; + int waitResult = THREAD_AWAKENED; + bool computeDeadline = true; + AbsoluteTime abstime; + + do { + IOTakeLock( gIOServiceBusyLock ); + wait = (value != (__state[1] & mask)); + if( wait) + { + assert_wait( (event_t) this, THREAD_UNINT ); + if ( timeout ) + { + if ( computeDeadline ) + { + AbsoluteTime nsinterval; + clock_interval_to_absolutetime_interval( + timeout->tv_sec, kSecondScale, &abstime ); + clock_interval_to_absolutetime_interval( + timeout->tv_nsec, kNanosecondScale, &nsinterval ); + ADD_ABSOLUTETIME( &abstime, &nsinterval ); + clock_absolutetime_interval_to_deadline( + abstime, &abstime ); + computeDeadline = false; + } + thread_set_timer_deadline( abstime ); + } + } + IOUnlock( gIOServiceBusyLock ); + if( wait) + { + waitResult = thread_block((void (*)(void)) 0); + if ( timeout && (waitResult != THREAD_TIMED_OUT)) + thread_cancel_timer(); + } + + } while( wait && ( waitResult != THREAD_TIMED_OUT ) ); + + if ( waitResult == THREAD_TIMED_OUT ) + return( kIOReturnTimeout ); + else + return( kIOReturnSuccess ); +} + +IOReturn IOService::waitQuiet( mach_timespec_t * timeout = 0 ) +{ + return( waitForState( kIOServiceBusyStateMask, 0, timeout )); +} + +bool IOService::serializeProperties( OSSerialize * s ) const +{ +#if 0 + ((IOService *)this)->setProperty( ((IOService *)this)->__state, + sizeof( __state), "__state"); +#endif + return( super::serializeProperties(s) ); +} + + +void _IOConfigThread::main( _IOConfigThread * self ) +{ + _IOServiceJob * job; + IOService * nub; + bool alive = true; + + do { + +#if 0 +#define randomDelay() \ + int del = read_processor_clock(); \ + del = (((int)IOThreadSelf()) ^ del ^ (del >> 10)) & 0x3ff; \ + IOSleep( del ); + randomDelay(); +#endif + + semaphore_wait( gJobsSemaphore ); + + IOTakeLock( gJobsLock ); + job = (_IOServiceJob *) gJobs->getFirstObject(); + job->retain(); + gJobs->removeObject(job); + if( job) { + gOutstandingJobs--; +#ifndef LESS_THREAD_CREATE +// gNumConfigThreads--; // we're out of service + gNumWaitingThreads--; // we're out of service +#endif + } + IOUnlock( gJobsLock ); + + if( job) { + + nub = job->nub; + + if( gIOKitDebug & kIOLogConfig) + LOG("config(%x): starting on %s, %d\n", + (int) IOThreadSelf(), job->nub->getName(), job->type); + + switch( job->type) { + + case kMatchNubJob: + nub->doServiceMatch( job->options ); + break; + + case kTerminateNubJob: + nub->doServiceTerminate( job->options ); + break; + + default: + LOG("config(%x): strange type (%d)\n", + (int) IOThreadSelf(), job->type ); + break; + } + + nub->release(); + job->release(); + + IOTakeLock( gJobsLock ); +#ifndef LESS_THREAD_CREATE + alive = (gOutstandingJobs > gNumWaitingThreads); + if( alive) + gNumWaitingThreads++; // back in service +// gNumConfigThreads++; + else + gNumConfigThreads--; +#endif + IOUnlock( gJobsLock ); + } + + } while( alive ); + + if( gIOKitDebug & kIOLogConfig) + LOG("config(%x): terminating\n", (int) IOThreadSelf() ); + + self->release(); +} + +void _IOServiceJob::pingConfig( _IOServiceJob * job ) +{ + int count; + bool create; + + assert( job ); + + IOTakeLock( gJobsLock ); + + gOutstandingJobs++; + gJobs->setLastObject( job ); + + count = gNumWaitingThreads; +// if( gNumConfigThreads) count++;// assume we're called from a config thread + + create = ( (gOutstandingJobs > count) + && (gNumConfigThreads < kMaxConfigThreads) ); + if( create) { + gNumConfigThreads++; + gNumWaitingThreads++; + } + + IOUnlock( gJobsLock ); + + job->release(); + + if( create) { + if( gIOKitDebug & kIOLogConfig) + LOG("config(%d): creating\n", gNumConfigThreads - 1); + _IOConfigThread::configThread(); + } + + semaphore_signal( gJobsSemaphore ); +} + + +// internal - call with gNotificationLock +OSObject * IOService::getExistingServices( OSDictionary * matching, + IOOptionBits inState, IOOptionBits options = 0 ) +{ + OSObject * current = 0; + OSIterator * iter; + IOService * service; + + if( !matching) + return( 0 ); + + iter = IORegistryIterator::iterateOver( gIOServicePlane, + kIORegistryIterateRecursively ); + if( iter) { + do { + iter->reset(); + while( (service = (IOService *) iter->getNextObject())) { + if( (inState == (service->__state[0] & inState)) + && (0 == (service->__state[0] & kIOServiceInactiveState)) + && service->passiveMatch( matching )) { + + if( options & kIONotifyOnce) { + current = service; + break; + } + if( current) + ((OSSet *)current)->setObject( service ); + else + current = OSSet::withObjects( + & (const OSObject *) service, 1, 1 ); + } + } + } while( !service && !iter->isValid()); + iter->release(); + } + + if( current && (0 == (options & kIONotifyOnce))) { + iter = OSCollectionIterator::withCollection( (OSSet *)current ); + current->release(); + current = iter; + } + + return( current ); +} + +// public version +OSIterator * IOService::getMatchingServices( OSDictionary * matching ) +{ + OSIterator * iter; + + // is a lock even needed? + LOCKWRITENOTIFY(); + + iter = (OSIterator *) getExistingServices( matching, + kIOServiceRegisteredState ); + + UNLOCKNOTIFY(); + + return( iter ); +} + + +// internal - call with gNotificationLock +IONotifier * IOService::setNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, void * target, void * ref, + SInt32 priority = 0 ) +{ + _IOServiceNotifier * notify = 0; + OSOrderedSet * set; + + if( !matching) + return( 0 ); + + notify = new _IOServiceNotifier; + if( notify && !notify->init()) { + notify->release(); + notify = 0; + } + + if( notify) { + notify->matching = matching; + notify->handler = handler; + notify->target = target; + notify->ref = ref; + notify->priority = priority; + notify->state = kIOServiceNotifyEnable; + queue_init( ¬ify->handlerInvocations ); + + ////// queue + + if( 0 == (set = (OSOrderedSet *) gNotifications->getObject( type ))) { + set = OSOrderedSet::withCapacity( 1, + IONotifyOrdering, 0 ); + if( set) { + gNotifications->setObject( type, set ); + set->release(); + } + } + notify->whence = set; + if( set) + set->setObject( notify ); + } + + return( notify ); +} + +// internal - call with gNotificationLock +IONotifier * IOService::doInstallNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ) +{ + OSIterator * exist; + IONotifier * notify; + IOOptionBits inState; + + if( !matching) + return( 0 ); + + if( (type == gIOPublishNotification) + || (type == gIOFirstPublishNotification)) + inState = kIOServiceRegisteredState; + else if( (type == gIOMatchedNotification) + || (type == gIOFirstMatchNotification)) + inState = kIOServiceMatchedState; + else if( type == gIOTerminatedNotification) + inState = 0; + else + return( 0 ); + + notify = setNotification( type, matching, handler, target, ref, priority ); + + if( inState) + // get the current set + exist = (OSIterator *) getExistingServices( matching, inState ); + else + exist = 0; + + *existing = exist; + + return( notify ); +} + + +IONotifier * IOService::installNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ) +{ + IONotifier * notify; + + LOCKWRITENOTIFY(); + + notify = doInstallNotification( type, matching, handler, target, ref, + priority, existing ); + + UNLOCKNOTIFY(); + + return( notify ); +} + +IONotifier * IOService::addNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref = 0, + SInt32 priority = 0 ) +{ + OSIterator * existing; + _IOServiceNotifier * notify; + IOService * next; + + notify = (_IOServiceNotifier *) installNotification( type, matching, + handler, target, ref, priority, &existing ); + + // send notifications for existing set + if( existing) { + + notify->retain(); // in case handler remove()s + while( (next = (IOService *) existing->getNextObject())) { + + next->lockForArbitration(); + if( 0 == (next->__state[0] & kIOServiceInactiveState)) + next->invokeNotifer( notify ); + next->unlockForArbitration(); + } + notify->release(); + existing->release(); + } + + return( notify ); +} + +struct SyncNotifyVars { + semaphore_port_t waitHere; + IOService * result; +}; + +bool IOService::syncNotificationHandler( + void * /* target */, void * ref, + IOService * newService ) +{ + + // result may get written more than once before the + // notification is removed! + ((SyncNotifyVars *) ref)->result = newService; + semaphore_signal( ((SyncNotifyVars *) ref)->waitHere ); + + return( false ); +} + +IOService * IOService::waitForService( OSDictionary * matching, + mach_timespec_t * timeout = 0 ) +{ + IONotifier * notify = 0; + // priority doesn't help us much since we need a thread wakeup + SInt32 priority = 0; + SyncNotifyVars state; + kern_return_t err = kIOReturnBadArgument; + + if( !matching) + return( 0 ); + + state.waitHere = 0; + state.result = 0; + + LOCKWRITENOTIFY(); + + do { + + state.result = (IOService *) getExistingServices( matching, + kIOServiceMatchedState, kIONotifyOnce ); + if( state.result) + continue; + + err = semaphore_create( kernel_task, &state.waitHere, + SYNC_POLICY_FIFO, 0 ); + if( KERN_SUCCESS != err) + continue; + + notify = IOService::setNotification( gIOMatchedNotification, matching, + &IOService::syncNotificationHandler, (void *) 0, + (void *) &state, priority ); + + } while( false ); + + UNLOCKNOTIFY(); + + if( notify) { + if( timeout) + err = semaphore_timedwait( state.waitHere, *timeout ); + else + err = semaphore_wait( state.waitHere ); + } + + if( notify) + notify->remove(); // dequeues + else + matching->release(); + if( state.waitHere) + semaphore_destroy( kernel_task, state.waitHere ); + + return( state.result ); +} + +void IOService::deliverNotification( const OSSymbol * type, + IOOptionBits orNewState, IOOptionBits andNewState ) +{ + _IOServiceNotifier * notify; + OSIterator * iter; + OSArray * willSend = 0; + + lockForArbitration(); + + if( (0 == (__state[0] & kIOServiceInactiveState)) + || (type == gIOTerminatedNotification)) { + + LOCKREADNOTIFY(); + + iter = OSCollectionIterator::withCollection( (OSOrderedSet *) + gNotifications->getObject( type ) ); + + if( iter) { + while( (notify = (_IOServiceNotifier *) iter->getNextObject())) { + + if( passiveMatch( notify->matching) + && (kIOServiceNotifyEnable & notify->state)) { + if( 0 == willSend) + willSend = OSArray::withCapacity(8); + if( willSend) + willSend->setObject( notify ); + } + } + iter->release(); + } + + __state[0] = (__state[0] | orNewState) & andNewState; + + UNLOCKNOTIFY(); + } + + if( willSend) { + for( unsigned int idx = 0; + (notify = (_IOServiceNotifier *) willSend->getObject(idx)); + idx++) { + invokeNotifer( notify ); + } + willSend->release(); + } + unlockForArbitration(); +} + +IOOptionBits IOService::getState( void ) const +{ + return( __state[0] ); +} + +/* + * Helpers to make matching objects for simple cases + */ + +OSDictionary * IOService::serviceMatching( const OSString * name, + OSDictionary * table = 0 ) +{ + if( !table) + table = OSDictionary::withCapacity( 2 ); + if( table) + table->setObject(gIOProviderClassKey, (OSObject *)name ); + + return( table ); +} + +OSDictionary * IOService::serviceMatching( const char * name, + OSDictionary * table = 0 ) +{ + const OSString * str; + + str = OSSymbol::withCString( name ); + if( !str) + return( 0 ); + + table = serviceMatching( str, table ); + str->release(); + return( table ); +} + +OSDictionary * IOService::nameMatching( const OSString * name, + OSDictionary * table = 0 ) +{ + if( !table) + table = OSDictionary::withCapacity( 2 ); + if( table) + table->setObject( gIONameMatchKey, (OSObject *)name ); + + return( table ); +} + +OSDictionary * IOService::nameMatching( const char * name, + OSDictionary * table = 0 ) +{ + const OSString * str; + + str = OSSymbol::withCString( name ); + if( !str) + return( 0 ); + + table = nameMatching( str, table ); + str->release(); + return( table ); +} + +OSDictionary * IOService::resourceMatching( const OSString * str, + OSDictionary * table = 0 ) +{ + table = serviceMatching( gIOResourcesKey, table ); + if( table) + table->setObject( gIOResourceMatchKey, (OSObject *) str ); + + return( table ); +} + +OSDictionary * IOService::resourceMatching( const char * name, + OSDictionary * table = 0 ) +{ + const OSSymbol * str; + + str = OSSymbol::withCString( name ); + if( !str) + return( 0 ); + + table = resourceMatching( str, table ); + str->release(); + + return( table ); +} + +/* + * _IOServiceNotifier + */ + +// wait for all threads, other than the current one, +// to exit the handler + +void _IOServiceNotifier::wait() +{ + _IOServiceNotifierInvocation * next; + bool doWait; + + do { + doWait = false; + queue_iterate( &handlerInvocations, next, + _IOServiceNotifierInvocation *, link) { + if( next->thread != current_thread() ) { + doWait = true; + break; + } + } + if( doWait) { + state |= kIOServiceNotifyWaiter; + assert_wait( this, THREAD_UNINT); + UNLOCKNOTIFY(); + thread_block((void (*)(void)) 0); + LOCKWRITENOTIFY(); + } + + } while( doWait ); +} + +void _IOServiceNotifier::free() +{ + assert( queue_empty( &handlerInvocations )); + OSObject::free(); +} + +void _IOServiceNotifier::remove() +{ + LOCKWRITENOTIFY(); + + if( whence) { + whence->removeObject( (OSObject *) this ); + whence = 0; + } + if( matching) { + matching->release(); + matching = 0; + } + + state &= ~kIOServiceNotifyEnable; + + wait(); + + UNLOCKNOTIFY(); + + release(); +} + +bool _IOServiceNotifier::disable() +{ + bool ret; + + LOCKWRITENOTIFY(); + + ret = (0 != (kIOServiceNotifyEnable & state)); + state &= ~kIOServiceNotifyEnable; + if( ret) + wait(); + + UNLOCKNOTIFY(); + + return( ret ); +} + +void _IOServiceNotifier::enable( bool was ) +{ + LOCKWRITENOTIFY(); + if( was) + state |= kIOServiceNotifyEnable; + else + state &= ~kIOServiceNotifyEnable; + UNLOCKNOTIFY(); +} + +/* + * IOResources + */ + +IOService * IOResources::resources( void ) +{ + IOResources * inst; + + inst = new IOResources; + if( inst && !inst->init()) { + inst->release(); + inst = 0; + } + + return( inst ); +} + +IOWorkLoop * IOResources::getWorkLoop() const +{ + // If we are the resource root then bringe over to the + // platform to get its workloop + if (this == (IOResources *) gIOResources) + return getPlatform()->getWorkLoop(); + else + return IOService::getWorkLoop(); +} + +bool IOResources::matchPropertyTable( OSDictionary * table ) +{ + OSObject * prop; + OSString * str; + OSSet * set; + OSIterator * iter; + bool ok = false; + + prop = table->getObject( gIOResourceMatchKey ); + str = OSDynamicCast( OSString, prop ); + if( str) + ok = (0 != getProperty( str )); + + else if( (set = OSDynamicCast( OSSet, prop))) { + + iter = OSCollectionIterator::withCollection( set ); + ok = (iter != 0); + while( ok && (str = OSDynamicCast( OSString, iter->getNextObject()) )) + ok = (0 != getProperty( str )); + + if( iter) + iter->release(); + } + + return( ok ); +} + +/* + * Helpers for matching dictionaries. + * Keys existing in matching are checked in properties. + * Keys may be a string or OSCollection of IOStrings + */ + +bool IOService::compareProperty( OSDictionary * matching, + const char * key ) +{ + OSObject * value; + bool ok; + + value = matching->getObject( key ); + if( value) + ok = value->isEqualTo( getProperty( key )); + else + ok = true; + + return( ok ); +} + + +bool IOService::compareProperty( OSDictionary * matching, + const OSString * key ) +{ + OSObject * value; + bool ok; + + value = matching->getObject( key ); + if( value) + ok = value->isEqualTo( getProperty( key )); + else + ok = true; + + return( ok ); +} + +bool IOService::compareProperties( OSDictionary * matching, + OSCollection * keys ) +{ + OSCollectionIterator * iter; + const OSString * key; + bool ok = true; + + if( !matching || !keys) + return( false ); + + iter = OSCollectionIterator::withCollection( keys ); + + if( iter) { + while( ok && (key = OSDynamicCast( OSString, iter->getNextObject()))) + ok = compareProperty( matching, key ); + + iter->release(); + } + keys->release(); // !! consume a ref !! + + return( ok ); +} + +/* Helper to add a location matching dict to the table */ + +OSDictionary * IOService::addLocation( OSDictionary * table ) +{ + OSDictionary * dict; + + if( !table) + return( 0 ); + + dict = OSDictionary::withCapacity( 1 ); + if( dict) { + table->setObject( gIOLocationMatchKey, dict ); + dict->release(); + } + + return( dict ); +} + +/* + * Go looking for a provider to match a location dict. + */ + +IOService * IOService::matchLocation( IOService * /* client */ ) +{ + IOService * parent; + + parent = getProvider(); + + if( parent) + parent = parent->matchLocation( this ); + + return( parent ); +} + +bool IOService::passiveMatch( OSDictionary * table, bool changesOK ) +{ + IOService * where; + OSString * matched; + OSObject * obj; + OSString * str; + IORegistryEntry * entry; + OSNumber * num; + SInt32 score; + OSNumber * newPri; + bool match = true; + UInt32 done; + + assert( table ); + + where = this; + + do { + done = 0; + + str = OSDynamicCast( OSString, table->getObject( gIOProviderClassKey)); + if( str) { + done++; + match = (0 != where->metaCast( str )); + if( !match) + break; + } + + obj = table->getObject( gIONameMatchKey ); + if( obj) { + done++; + match = compareNames( obj, changesOK ? &matched : 0 ); + if( !match) + break; + if( changesOK && matched) { + // leave a hint as to which name matched + table->setObject( gIONameMatchedKey, matched ); + matched->release(); + } + } + obj = table->getObject( gIOPropertyMatchKey ); + if( obj) { + + OSDictionary * dict; + OSDictionary * nextDict; + OSIterator * iter; + + done++; + dict = where->dictionaryWithProperties(); + if( dict) { + nextDict = OSDynamicCast( OSDictionary, obj); + if( nextDict) + iter = 0; + else + iter = OSCollectionIterator::withCollection( + OSDynamicCast(OSCollection, obj)); + + while( nextDict + || (iter && (0 != (nextDict = OSDynamicCast(OSDictionary, + iter->getNextObject()))))) { + match = dict->isEqualTo( nextDict, nextDict); + if( !match) + break; + nextDict = 0; + } + dict->release(); + if( iter) + iter->release(); + if( !match) + break; + } + } + + str = OSDynamicCast( OSString, table->getObject( gIOPathMatchKey )); + if( str) { + done++; + entry = IORegistryEntry::fromPath( str->getCStringNoCopy() ); + match = (where == entry); + if( entry) + entry->release(); + if( !match) + break; + } + + num = OSDynamicCast( OSNumber, table->getObject( gIOMatchedServiceCountKey )); + if( num) { + + OSIterator * iter; + IOService * service = 0; + UInt32 serviceCount = 0; + + done++; + iter = getClientIterator(); + if( iter) { + while( (service = (IOService *) iter->getNextObject())) { + if( kIOServiceInactiveState & service->__state[0]) + continue; + if( 0 == service->getProperty( gIOMatchCategoryKey )) + continue; + ++serviceCount; + } + iter->release(); + } + match = (serviceCount == num->unsigned32BitValue()); + if( !match) + break; + } + + if( done == table->getCount()) + // don't call family if we've done all the entries in the table + break; + + // pass in score from property table + score = IOServiceObjectOrder( table, (void *) gIOProbeScoreKey); + + // do family specific matching + match = where->matchPropertyTable( table, &score ); + + if( !match) { +#if IOMATCHDEBUG + if( kIOLogMatch & getDebugFlags( table )) + LOG("%s: family specific matching fails\n", where->getName()); +#endif + break; + } + + if( changesOK) { + // save the score + newPri = OSNumber::withNumber( score, 32 ); + if( newPri) { + table->setObject( gIOProbeScoreKey, newPri ); + newPri->release(); + } + } + + if( !(match = where->compareProperty( table, kIOBSDNameKey ))) + break; + + table = OSDynamicCast( OSDictionary, + table->getObject( gIOLocationMatchKey )); + if( table) { + match = false; + where = where->getProvider(); + if( where) + where = where->matchLocation( where ); + } + + } while( table && where ); + + if( kIOLogMatch & gIOKitDebug) + if( where != this) + LOG("match location @ %s = %d\n", + where->getName(), match ); + + return( match ); +} + + +IOReturn IOService::newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler ) +{ + const OSSymbol *userClientClass = 0; + IOUserClient *client; + OSObject *temp; + + // First try my own properties for a user client class name + temp = getProperty(gIOUserClientClassKey); + if (temp) { + if (OSDynamicCast(OSSymbol, temp)) + userClientClass = (const OSSymbol *) temp; + else if (OSDynamicCast(OSString, temp)) { + userClientClass = OSSymbol::withString((OSString *) temp); + if (userClientClass) + setProperty(kIOUserClientClassKey, + (OSObject *) userClientClass); + } + } + + // Didn't find one so lets just bomb out now without further ado. + if (!userClientClass) + return kIOReturnUnsupported; + + temp = OSMetaClass::allocClassWithName(userClientClass); + if (!temp) + return kIOReturnNoMemory; + + if (OSDynamicCast(IOUserClient, temp)) + client = (IOUserClient *) temp; + else { + temp->release(); + return kIOReturnUnsupported; + } + + if ( !client->initWithTask(owningTask, securityID, type, properties) ) { + client->release(); + return kIOReturnBadArgument; + } + + if ( !client->attach(this) ) { + client->release(); + return kIOReturnUnsupported; + } + + if ( !client->start(this) ) { + client->detach(this); + client->release(); + return kIOReturnUnsupported; + } + + *handler = client; + return kIOReturnSuccess; +} + +IOReturn IOService::newUserClient( task_t owningTask, void * securityID, + UInt32 type, IOUserClient ** handler ) +{ + return( newUserClient( owningTask, securityID, type, 0, handler )); +} + +IOReturn IOService::requestProbe( IOOptionBits options ) +{ + return( kIOReturnUnsupported); +} + +/* + * Convert an IOReturn to text. Subclasses which add additional + * IOReturn's should override this method and call + * super::stringFromReturn if the desired value is not found. + */ + +const char * IOService::stringFromReturn( IOReturn rtn ) +{ + static const IONamedValue IOReturn_values[] = { + {kIOReturnSuccess, "success" }, + {kIOReturnError, "general error" }, + {kIOReturnNoMemory, "memory allocation error" }, + {kIOReturnNoResources, "resource shortage" }, + {kIOReturnIPCError, "Mach IPC failure" }, + {kIOReturnNoDevice, "no such device" }, + {kIOReturnNotPrivileged, "privilege violation" }, + {kIOReturnBadArgument, "invalid argument" }, + {kIOReturnLockedRead, "device is read locked" }, + {kIOReturnLockedWrite, "device is write locked" }, + {kIOReturnExclusiveAccess, "device is exclusive access" }, + {kIOReturnBadMessageID, "bad IPC message ID" }, + {kIOReturnUnsupported, "unsupported function" }, + {kIOReturnVMError, "virtual memory error" }, + {kIOReturnInternalError, "internal driver error" }, + {kIOReturnIOError, "I/O error" }, + {kIOReturnCannotLock, "cannot acquire lock" }, + {kIOReturnNotOpen, "device is not open" }, + {kIOReturnNotReadable, "device is not readable" }, + {kIOReturnNotWritable, "device is not writeable" }, + {kIOReturnNotAligned, "alignment error" }, + {kIOReturnBadMedia, "media error" }, + {kIOReturnStillOpen, "device is still open" }, + {kIOReturnRLDError, "rld failure" }, + {kIOReturnDMAError, "DMA failure" }, + {kIOReturnBusy, "device is busy" }, + {kIOReturnTimeout, "I/O timeout" }, + {kIOReturnOffline, "device is offline" }, + {kIOReturnNotReady, "device is not ready" }, + {kIOReturnNotAttached, "device/channel is not attached" }, + {kIOReturnNoChannels, "no DMA channels available" }, + {kIOReturnNoSpace, "no space for data" }, + {kIOReturnPortExists, "device port already exists" }, + {kIOReturnCannotWire, "cannot wire physical memory" }, + {kIOReturnNoInterrupt, "no interrupt attached" }, + {kIOReturnNoFrames, "no DMA frames enqueued" }, + {kIOReturnMessageTooLarge, "message is too large" }, + {kIOReturnNotPermitted, "operation is not permitted" }, + {kIOReturnNoPower, "device is without power" }, + {kIOReturnNoMedia, "media is not present" }, + {kIOReturnUnformattedMedia, "media is not formatted" }, + {kIOReturnUnsupportedMode, "unsupported mode" }, + {kIOReturnUnderrun, "data underrun" }, + {kIOReturnOverrun, "data overrun" }, + {kIOReturnDeviceError, "device error" }, + {kIOReturnNoCompletion, "no completion routine" }, + {kIOReturnAborted, "operation was aborted" }, + {kIOReturnNoBandwidth, "bus bandwidth would be exceeded" }, + {kIOReturnNotResponding, "device is not responding" }, + {kIOReturnInvalid, "unanticipated driver error" }, + {0, NULL } + }; + + return IOFindNameForValue(rtn, IOReturn_values); +} + +/* + * Convert an IOReturn to an errno. + */ +int IOService::errnoFromReturn( IOReturn rtn ) +{ + switch(rtn) { + // (obvious match) + case kIOReturnSuccess: + return(0); + case kIOReturnNoMemory: + return(ENOMEM); + case kIOReturnNoDevice: + return(ENXIO); + case kIOReturnVMError: + return(EFAULT); + case kIOReturnNotPermitted: + return(EPERM); + case kIOReturnNotPrivileged: + return(EACCES); + case kIOReturnIOError: + return(EIO); + case kIOReturnNotWritable: + return(EROFS); + case kIOReturnBadArgument: + return(EINVAL); + case kIOReturnUnsupported: + return(EOPNOTSUPP); + case kIOReturnBusy: + return(EBUSY); + case kIOReturnNoPower: + return(EPWROFF); + case kIOReturnDeviceError: + return(EDEVERR); + case kIOReturnTimeout: + return(ETIMEDOUT); + case kIOReturnMessageTooLarge: + return(EMSGSIZE); + case kIOReturnNoSpace: + return(ENOSPC); + case kIOReturnCannotLock: + return(ENOLCK); + + // (best match) + case kIOReturnBadMessageID: + case kIOReturnNoCompletion: + case kIOReturnNotAligned: + return(EINVAL); + case kIOReturnNotReady: + return(EBUSY); + case kIOReturnRLDError: + return(EBADMACHO); + case kIOReturnPortExists: + case kIOReturnStillOpen: + return(EEXIST); + case kIOReturnExclusiveAccess: + case kIOReturnLockedRead: + case kIOReturnLockedWrite: + case kIOReturnNotAttached: + case kIOReturnNotOpen: + case kIOReturnNotReadable: + return(EACCES); + case kIOReturnCannotWire: + case kIOReturnNoResources: + return(ENOMEM); + case kIOReturnAborted: + case kIOReturnOffline: + case kIOReturnNotResponding: + return(EBUSY); + case kIOReturnBadMedia: + case kIOReturnNoMedia: + case kIOReturnUnformattedMedia: + return(EIO); // (media error) + case kIOReturnDMAError: + case kIOReturnOverrun: + case kIOReturnUnderrun: + return(EIO); // (transfer error) + case kIOReturnNoBandwidth: + case kIOReturnNoChannels: + case kIOReturnNoFrames: + case kIOReturnNoInterrupt: + return(EIO); // (hardware error) + case kIOReturnError: + case kIOReturnInternalError: + case kIOReturnInvalid: + return(EIO); // (generic error) + case kIOReturnIPCError: + return(EIO); // (ipc error) + default: + return(EIO); // (all other errors) + } +} + +IOReturn IOService::message( UInt32 type, IOService * provider, + void * argument ) +{ + /* + * Generic entry point for calls from the provider. A return value of + * kIOReturnSuccess indicates that the message was received, and where + * applicable, that it was successful. + */ + + return kIOReturnUnsupported; +} + +/* + * Device memory + */ + +IOItemCount IOService::getDeviceMemoryCount( void ) +{ + OSArray * array; + IOItemCount count; + + array = OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey)); + if( array) + count = array->getCount(); + else + count = 0; + + return( count); +} + +IODeviceMemory * IOService::getDeviceMemoryWithIndex( unsigned int index ) +{ + OSArray * array; + IODeviceMemory * range; + + array = OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey)); + if( array) + range = (IODeviceMemory *) array->getObject( index ); + else + range = 0; + + return( range); +} + +IOMemoryMap * IOService::mapDeviceMemoryWithIndex( unsigned int index, + IOOptionBits options = 0 ) +{ + IODeviceMemory * range; + IOMemoryMap * map; + + range = getDeviceMemoryWithIndex( index ); + if( range) + map = range->map( options ); + else + map = 0; + + return( map ); +} + +OSArray * IOService::getDeviceMemory( void ) +{ + return( OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey))); +} + + +void IOService::setDeviceMemory( OSArray * array ) +{ + setProperty( gIODeviceMemoryKey, array); +} + +/* + * Device interrupts + */ + +IOReturn IOService::resolveInterrupt(IOService *nub, int source) +{ + IOInterruptController *interruptController; + OSDictionary *propTable; + OSArray *array; + OSData *data; + OSSymbol *interruptControllerName; + long numSources; + IOInterruptSource *interruptSources; + + // Get the property table from the nub. + propTable = nub->getPropertyTable(); + if (propTable == 0) return kIOReturnNoResources; + + // Get the parents list from the property table. + array = OSDynamicCast(OSArray, + propTable->getObject(gIOInterruptControllersKey)); + if (array == 0) return kIOReturnNoResources; + + // Allocate space for the IOInterruptSources if needed... then return early. + if (nub->_interruptSources == 0) { + numSources = array->getCount(); + interruptSources = (IOInterruptSource *)IOMalloc(numSources * sizeof(IOInterruptSource)); + if (interruptSources == 0) return kIOReturnNoMemory; + + bzero(interruptSources, numSources * sizeof(IOInterruptSource)); + + nub->_numInterruptSources = numSources; + nub->_interruptSources = interruptSources; + return kIOReturnSuccess; + } + + interruptControllerName = OSDynamicCast(OSSymbol,array->getObject(source)); + if (interruptControllerName == 0) return kIOReturnNoResources; + + interruptController = getPlatform()->lookUpInterruptController(interruptControllerName); + if (interruptController == 0) return kIOReturnNoResources; + + // Get the interrupt numbers from the property table. + array = OSDynamicCast(OSArray, + propTable->getObject(gIOInterruptSpecifiersKey)); + if (array == 0) return kIOReturnNoResources; + data = OSDynamicCast(OSData, array->getObject(source)); + if (data == 0) return kIOReturnNoResources; + + // Set the interruptController and interruptSource in the nub's table. + interruptSources = nub->_interruptSources; + interruptSources[source].interruptController = interruptController; + interruptSources[source].vectorData = data; + + return kIOReturnSuccess; +} + +IOReturn IOService::lookupInterrupt(int source, bool resolve, IOInterruptController **interruptController) +{ + IOReturn ret; + + /* Make sure the _interruptSources are set */ + if (_interruptSources == 0) { + ret = resolveInterrupt(this, source); + if (ret != kIOReturnSuccess) return ret; + } + + /* Make sure the local source number is valid */ + if ((source < 0) || (source >= _numInterruptSources)) + return kIOReturnNoInterrupt; + + /* Look up the contoller for the local source */ + *interruptController = _interruptSources[source].interruptController; + + if (*interruptController == NULL) { + if (!resolve) return kIOReturnNoInterrupt; + + /* Try to reslove the interrupt */ + ret = resolveInterrupt(this, source); + if (ret != kIOReturnSuccess) return ret; + + *interruptController = _interruptSources[source].interruptController; + } + + return kIOReturnSuccess; +} + +IOReturn IOService::registerInterrupt(int source, OSObject *target, + IOInterruptAction handler, + void *refCon) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, true, &interruptController); + if (ret != kIOReturnSuccess) return ret; + + /* Register the source */ + return interruptController->registerInterrupt(this, source, target, + (IOInterruptHandler)handler, + refCon); +} + +IOReturn IOService::unregisterInterrupt(int source) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) return ret; + + /* Unregister the source */ + return interruptController->unregisterInterrupt(this, source); +} + +IOReturn IOService::getInterruptType(int source, int *interruptType) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, true, &interruptController); + if (ret != kIOReturnSuccess) return ret; + + /* Return the type */ + return interruptController->getInterruptType(this, source, interruptType); +} + +IOReturn IOService::enableInterrupt(int source) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) return ret; + + /* Enable the source */ + return interruptController->enableInterrupt(this, source); +} + +IOReturn IOService::disableInterrupt(int source) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) return ret; + + /* Disable the source */ + return interruptController->disableInterrupt(this, source); +} + +IOReturn IOService::causeInterrupt(int source) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) return ret; + + /* Cause an interrupt for the source */ + return interruptController->causeInterrupt(this, source); +} + +OSMetaClassDefineReservedUnused(IOService, 0); +OSMetaClassDefineReservedUnused(IOService, 1); +OSMetaClassDefineReservedUnused(IOService, 2); +OSMetaClassDefineReservedUnused(IOService, 3); +OSMetaClassDefineReservedUnused(IOService, 4); +OSMetaClassDefineReservedUnused(IOService, 5); +OSMetaClassDefineReservedUnused(IOService, 6); +OSMetaClassDefineReservedUnused(IOService, 7); +OSMetaClassDefineReservedUnused(IOService, 8); +OSMetaClassDefineReservedUnused(IOService, 9); +OSMetaClassDefineReservedUnused(IOService, 10); +OSMetaClassDefineReservedUnused(IOService, 11); +OSMetaClassDefineReservedUnused(IOService, 12); +OSMetaClassDefineReservedUnused(IOService, 13); +OSMetaClassDefineReservedUnused(IOService, 14); +OSMetaClassDefineReservedUnused(IOService, 15); +OSMetaClassDefineReservedUnused(IOService, 16); +OSMetaClassDefineReservedUnused(IOService, 17); +OSMetaClassDefineReservedUnused(IOService, 18); +OSMetaClassDefineReservedUnused(IOService, 19); +OSMetaClassDefineReservedUnused(IOService, 20); +OSMetaClassDefineReservedUnused(IOService, 21); +OSMetaClassDefineReservedUnused(IOService, 22); +OSMetaClassDefineReservedUnused(IOService, 23); +OSMetaClassDefineReservedUnused(IOService, 24); +OSMetaClassDefineReservedUnused(IOService, 25); +OSMetaClassDefineReservedUnused(IOService, 26); +OSMetaClassDefineReservedUnused(IOService, 27); +OSMetaClassDefineReservedUnused(IOService, 28); +OSMetaClassDefineReservedUnused(IOService, 29); +OSMetaClassDefineReservedUnused(IOService, 30); +OSMetaClassDefineReservedUnused(IOService, 31); +OSMetaClassDefineReservedUnused(IOService, 32); +OSMetaClassDefineReservedUnused(IOService, 33); +OSMetaClassDefineReservedUnused(IOService, 34); +OSMetaClassDefineReservedUnused(IOService, 35); +OSMetaClassDefineReservedUnused(IOService, 36); +OSMetaClassDefineReservedUnused(IOService, 37); +OSMetaClassDefineReservedUnused(IOService, 38); +OSMetaClassDefineReservedUnused(IOService, 39); +OSMetaClassDefineReservedUnused(IOService, 40); +OSMetaClassDefineReservedUnused(IOService, 41); +OSMetaClassDefineReservedUnused(IOService, 42); +OSMetaClassDefineReservedUnused(IOService, 43); +OSMetaClassDefineReservedUnused(IOService, 44); +OSMetaClassDefineReservedUnused(IOService, 45); +OSMetaClassDefineReservedUnused(IOService, 46); +OSMetaClassDefineReservedUnused(IOService, 47); +OSMetaClassDefineReservedUnused(IOService, 48); +OSMetaClassDefineReservedUnused(IOService, 49); +OSMetaClassDefineReservedUnused(IOService, 50); +OSMetaClassDefineReservedUnused(IOService, 51); +OSMetaClassDefineReservedUnused(IOService, 52); +OSMetaClassDefineReservedUnused(IOService, 53); +OSMetaClassDefineReservedUnused(IOService, 54); +OSMetaClassDefineReservedUnused(IOService, 55); +OSMetaClassDefineReservedUnused(IOService, 56); +OSMetaClassDefineReservedUnused(IOService, 57); +OSMetaClassDefineReservedUnused(IOService, 58); +OSMetaClassDefineReservedUnused(IOService, 59); +OSMetaClassDefineReservedUnused(IOService, 60); +OSMetaClassDefineReservedUnused(IOService, 61); +OSMetaClassDefineReservedUnused(IOService, 62); +OSMetaClassDefineReservedUnused(IOService, 63); diff --git a/iokit/Kernel/IOServicePM.cpp b/iokit/Kernel/IOServicePM.cpp new file mode 100644 index 000000000..424f3f79c --- /dev/null +++ b/iokit/Kernel/IOServicePM.cpp @@ -0,0 +1,3642 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "IOKit/pwr_mgt/IOPMinformeeList.h" +#include "IOKit/pwr_mgt/IOPMchangeNoteList.h" +#include "IOKit/pwr_mgt/IOPMlog.h" +#include "IOKit/pwr_mgt/IOPowerConnection.h" + +static void ack_timer_expired(thread_call_param_t); +static void settle_timer_expired(thread_call_param_t); +void PMreceiveCmd ( OSObject *, void *, void *, void *, void * ); +static void PM_idle_timer_expired(OSObject *, IOTimerEventSource *); +static void c_PM_Clamp_Timer_Expired (OSObject * client,IOTimerEventSource *); +void tellAppWithResponse ( OSObject * object, void * context); +void tellClientWithResponse ( OSObject * object, void * context); +void tellClient ( OSObject * object, void * context); +IOReturn serializedAllowPowerChange ( OSObject *, void *, void *, void *, void *); +IOReturn serializedCancelPowerChange ( OSObject *, void *, void *, void *, void *); + +extern const IORegistryPlane * gIOPowerPlane; + + +// and there's 1000 nanoseconds in a microsecond: +#define ns_per_us 1000 + + +// The current change note is processed by a state machine. +// Inputs are acks from interested parties, ack from the controlling driver, +// ack timeouts, settle timeout, and powerStateDidChange from the parent. +// These are the states: + +enum { + IOPMour_prechange_03 = 1, + IOPMour_prechange_05, + IOPMour_prechange_1, + IOPMour_prechange_2, + IOPMour_prechange_3, + IOPMour_prechange_4, + IOPMparent_down_0, + IOPMparent_down_2, + IOPMparent_down_3, + IOPMparent_down_4, + IOPMparent_down_5, + IOPMparent_down_6, + IOPMparent_up_0, + IOPMparent_up_1, + IOPMparent_up_4, + IOPMparent_up_5, + IOPMparent_up_6, + IOPMfinished + }; + +struct context { // used for applyToInterested + OSArray * responseFlags; + UInt16 serialNumber; + UInt16 counter; + UInt32 maxTimeRequested; + int msgType; + IOService * us; + IOLock * flags_lock; +}; + + // five minutes in microseconds +#define FIVE_MINUTES 5*60*1000000 +#define k15seconds 15*1000000 + +/* + There are two different kinds of power state changes. One is initiated by a subclassed device object which has either + decided to change power state, or its controlling driver has suggested it, or some other driver wants to use the + idle device and has asked it to become usable. The second kind of power state change is initiated by the power + domain parent. The two are handled slightly differently. + +There is a queue of so-called change notifications, or change notes for short. Usually the queue is empty, and when + it isn't, usually there is one change note in it, but since it's possible to have more than one power state change pending + at one time, a queue is implemented. Example: the subclass device decides it's idle and initiates a change to a lower + power state. This causes interested parties to be notified, but they don't all acknowledge right away. This causes the + change note to sit in the queue until all the acks are received. During this time, the device decides it isn't idle anymore and + wants to raise power back up again. This change can't be started, however, because the previous one isn't complete yet, + so the second one waits in the queue. During this time, the parent decides to lower or raise the power state of the entire + power domain and notifies the device, and that notification goes into the queue, too, and can't be actioned until the + others are. + + This is how a power change initiated by the subclass device is handled: + First, all interested parties are notified of the change via their powerStateWillChangeTo method. If they all don't + acknowledge via return code, then we have to wait. If they do, or when they finally all acknowledge via our + acknowledgePowerChange method, then we can continue. We call the controlling driver, instructing it to change to + the new state. Then we wait for power to settle. If there is no settling-time, or after it has passed, we notify + interested parties again, this time via their powerStateDidChangeTo methods. When they have all acked, we're done. + If we lowered power and don't need the power domain to be in its current power state, we suggest to the parent that + it lower the power domain state. + + This is how a change to a lower power domain state initiated by the parent is handled: + First, we figure out what power state we will be in when the new domain state is reached. Then all interested parties are + notified that we are moving to that new state. When they have acknowledged, we call the controlling driver to assume + that state and we wait for power to settle. Then we acknowledge our preparedness to our parent. When all its interested + parties have acknowledged, it lowers power and then notifies its interested parties again. When we get this call, we notify + our interested parties that the power state has changed, and when they have all acknowledged, we're done. + + This is how a change to a higher power domain state initiated by the parent is handled: + We figure out what power state we will be in when the new domain state is reached. If it is different from our current + state we acknowledge the parent. When all the parent's interested parties have acknowledged, it raises power in the +domain and waits for power to settle. Then it notifies everyone that the new state has been reached. When we get this call, + we call the controlling driver, instructing it to assume the new state, and wait for power to settle. Then we notify our interested + parties. When they all acknowledge we are done. + + In either of the two cases above, it is possible that we will not be changing state even though the domain is. Examples: + A change to a lower domain state may not affect us because we are already in a low enough state, and + We will not take advantage of a change to a higher domain state, because we have no need of the higher power. + In such a case, there is nothing to do but acknowledge the parent. So when the parent calls our powerDomainWillChange + method, and we decide that we will not be changing state, we merely acknowledge the parent, via return code, and wait. + When the parent subsequently calls powerStateDidChange, we acknowledge again via return code, and the change is complete. + + Power state changes are processed in a state machine, and since there are four varieties of power state changes, there are + four major paths through the state machine: + + The fourth is nearly trivial. In this path, the parent is changing the domain state, but we are not changing the device state. + The change starts when the parent calls powerDomainWillChange. All we do is acknowledge the parent. +When the parent calls powerStateDidChange, we acknowledge the parent again, and we're done. + + The first is fairly simple. It starts when a power domain child calls requestPowerDomainState and we decide to change power states + to accomodate the child, or if our power-controlling driver calls changePowerStateTo, or if some other driver which is using our + device calls makeUsable, or if a subclassed object calls changePowerStateToPriv. These are all power changes initiated by us, not + forced upon us by the parent. We start by notifying interested parties. If they all acknowledge via return code, we can go + on to state "our_prechange_1". Otherwise, we start the ack timer and wait for the stragglers to acknowlege by calling + acknowledgePowerChange. We move on to state "our_prechange_1" when all the stragglers have acknowledged, + or when the ack timer expires on all those which didn't acknowledge. In "our_prechange_1" we call the power-controlling + driver to change the power state of the hardware. If it returns saying it has done so, we go on to state "our_prechange_2". + Otherwise, we have to wait for it, so we set the ack timer and wait. When it calls acknowledgeSetPowerState, or when the + ack timer expires, we go on. In "our_prechange_2", we look in the power state array to see if there is any settle time required + when changing from our current state to the new state. If not, we go right away to "our_prechange_3". Otherwise, we + set the settle timer and wait. When it expires, we move on. In "our_prechange_3" state, we notify all our interested parties + via their powerStateDidChange methods that we have finished changing power state. If they all acknowledge via return + code, we move on to "our_prechange_4". Otherwise we set the ack timer and wait. When they have all acknowledged, or + when the ack timer has expired for those that didn't, we move on to "our_prechange_4", where we remove the used + change note from the head of the queue and start the next one if one exists. + + Parent-initiated changes are more complex in the state machine. First, power going up and power going down are handled + differently, so they have different paths throught the state machine. Second, we can acknowledge the parent's notification + in two different ways, so each of the parent paths is really two. + + When the parent calls our powerDomainWillChange method, notifying us that it will lower power in the domain, we decide + what state that will put our device in. Then we embark on the state machine path "IOPMparent_down_1" + and "IOPMparent_down_2", in which we notify interested parties of the upcoming change, instruct our driver to make + the change, check for settle time, and notify interested parties of the completed change. If we get to the end of this path without + stalling due to an interested party which didn't acknowledge via return code, due to the controlling driver not able to change + state right away, or due to a non-zero settling time, then we return IOPMAckImplied to the parent, and we're done with the change. + If we do stall in any of those states, we return IOPMWillAckLater to the parent and enter the parallel path "IOPMparent_down_4" + "IOPMparent_down_5", and "IOPMparent_down_3", where we continue with the same processing, except that at the end we + acknowledge the parent explicitly via acknowledgePowerChange, and we're done with the change. +Then when the parent calls us at powerStateDidChange we acknowledging via return code, because we have already made + the power change. In any case, when we are done we remove the used change note from the head of the queue and start on the next one. + + The case of the parent raising power in the domain is handled similarly in that there are parallel paths, one for no-stall + that ends in implicit acknowleging the parent, and one that has stalled at least once that ends in explicit acknowledging + the parent. This case is different, though in that our device changes state in the second half, after the parent calls + powerStateDidChange rather than before, as in the power-lowering case. + + When the parent calls our powerDomainWillChange method, notifying us that it will raise power in the domain, we acknowledge + via return code, because there's really nothing we can do until the power is actually raised in the domain. + When the parent calls us at powerStateDidChange, we start by notifying our interested parties. If they all acknowledge via return code, + we go on to" IOPMparent_up_1" to instruct the driver to raise its power level. After that, we check for any + necessary settling time in "IOPMparent_up_2", and we notify all interested parties that power has changed + in "IOPMparent_up_3". If none of these operations stall, we acknowledge the parent via return code, release + the change note, and start the next, if there is one. If one of them does stall, we enter the parallel path "IOPMparent_up_0", + "IOPMparent_up_4", "IOPMparent_up_5", and "IOPMparent_up_6", which ends with + our explicit acknowledgement to the parent. + +*/ + + +const char priv_key[ ] = "Power Management private data"; +const char prot_key[ ] = "Power Management protected data"; + + +void IOService::PMinit ( void ) +{ + if ( ! initialized ) { + + pm_vars = new IOPMprot; // make space for our variables + priv = new IOPMpriv; + pm_vars->init(); + priv->init(); + + setProperty(prot_key, (OSObject *) pm_vars); // add these to the properties + setProperty(priv_key, (OSObject *) priv); + + priv->owner = this; + pm_vars->theNumberOfPowerStates = 0; // then initialize them + priv->we_are_root = false; + pm_vars->theControllingDriver = NULL; + priv->our_lock = IOLockAlloc(); + priv->flags_lock = IOLockAlloc(); + priv->interestedDrivers = new IOPMinformeeList; + priv->interestedDrivers->initialize(); + priv->changeList = new IOPMchangeNoteList; + priv->changeList->initialize(); + pm_vars->aggressiveness = 0; + for (unsigned int i = 0; i <= kMaxType; i++) { + pm_vars->current_aggressiveness_values[i] = 0; + pm_vars->current_aggressiveness_valid[i] = false; + } + pm_vars->myCurrentState = 0; + priv->imminentState = 0; + priv->askingFor = 0; + priv->ourDesiredPowerState = 0; + pm_vars->parentsCurrentPowerFlags = 0; + pm_vars->maxCapability = 0; + priv->driverDesire = 0; + priv->deviceDesire = 0; + priv->initial_change = true; + priv->need_to_become_usable = false; + priv->previousRequest = 0; + priv->device_overrides = false; + priv->machine_state = IOPMfinished; + pm_vars->commandQueue = NULL; + priv->timerEventSrc = NULL; + priv->clampTimerEventSrc = NULL; + pm_vars->PMworkloop = NULL; + priv->activityLock = NULL; + pm_vars->ourName = getName(); + pm_vars->thePlatform = getPlatform(); + pm_vars->parentsKnowState = false; + assert( pm_vars->thePlatform != 0 ); + priv->clampOn = false; + pm_vars->serialNumber = 0; + pm_vars->responseFlags = NULL; + pm_vars->doNotPowerDown = true; + pm_vars->PMcommandGate = NULL; + priv->ackTimer = thread_call_allocate((thread_call_func_t)ack_timer_expired, (thread_call_param_t)this); + priv->settleTimer = thread_call_allocate((thread_call_func_t)settle_timer_expired, (thread_call_param_t)this); + initialized = true; + } +} + + +//********************************************************************************* +// PMstop +// +// Free up the data created in PMinit. +//********************************************************************************* +void IOService::PMstop ( void ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + + initialized = false; + + removeProperty(prot_key); // remove the properties + removeProperty(priv_key); + + iter = getParentIterator(gIOPowerPlane); // detach parents + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + ((IOService *)(connection->getParentEntry(gIOPowerPlane)))->removePowerChild(connection); + } + } + iter->release(); + } + detachAbove( gIOPowerPlane ); // detach IOConnections + + pm_vars->parentsKnowState = false; // no more power state changes +#if 0 + +// This loop is insufficient. Currently only leaf nodes are removed, and it's not clear today what +// it means to remove a subtree from the tree. Should the IOPowerConnection at the top of it stay +// or go? Should its child be notified of a change in the domain state? + + iter = getChildIterator(gIOPowerPlane); // detach children + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + removePowerChild(connection); + } + } + iter->release(); + } +#endif + + if ( priv->clampTimerEventSrc != NULL ) { + getPMworkloop()->removeEventSource(priv->clampTimerEventSrc); + priv->clampTimerEventSrc->release(); + priv->clampTimerEventSrc = NULL; + } + if ( priv->timerEventSrc != NULL ) { + pm_vars->PMworkloop->removeEventSource(priv->timerEventSrc); + priv->timerEventSrc->release(); + priv->timerEventSrc = NULL; + } + thread_call_free(priv->settleTimer); + thread_call_free(priv->ackTimer); + + priv->interestedDrivers->release(); // remove lists + priv->changeList->release(); + pm_vars->release(); // remove the instance variables + priv->release(); + pm_vars = NULL; + priv = NULL; +} + + +//********************************************************************************* +// joinPMtree +// +// A policy-maker calls its nub here when initializing, to be attached into +// the power management hierarchy. The default function is to call the +// platform expert, which knows how to do it. This method is overridden +// by a nub subclass which may either know how to do it, or may need +// to take other action. +// +// This may be the only "power management" method used in a nub, +// meaning it may not be initialized for power management. +//********************************************************************************* +void IOService::joinPMtree ( IOService * driver ) +{ + IOPlatformExpert * thePlatform; + + thePlatform = getPlatform(); + assert(thePlatform != 0 ); + thePlatform->PMRegisterDevice(this,driver); +} + + +//********************************************************************************* +// youAreRoot +// +// Power Managment is informing us that we are the root power domain. +// The only difference between us and any other power domain is that +// we have no parent and therefore never call it. +//********************************************************************************* +IOReturn IOService::youAreRoot ( void ) +{ + priv-> we_are_root = true; + pm_vars->parentsKnowState = true; + attachToParent( getRegistryRoot(),gIOPowerPlane ); + + return IOPMNoErr; +} + + +//********************************************************************************* +// setPowerParent +// +// Power Management is informing us who our parent is. +// If we have a controlling driver, find out, given our newly-informed +// power domain state, what state it would be in, and then tell it +// to assume that state. +//********************************************************************************* +IOReturn IOService::setPowerParent ( IOPowerConnection * theParent, bool stateKnown, IOPMPowerFlags currentState ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + unsigned long tempDesire; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogSetParent,stateKnown,currentState); + + if ( stateKnown && ((pm_vars->PMworkloop == NULL) || (pm_vars->PMcommandGate == NULL)) ) { + getPMworkloop(); // we have a path to the root + if ( pm_vars->PMworkloop != NULL ) { // find out the workloop + if ( pm_vars->PMcommandGate == NULL ) { // and make our command gate + pm_vars->PMcommandGate = IOCommandGate::commandGate((OSObject *)this); + if ( pm_vars->PMcommandGate != NULL ) { + pm_vars->PMworkloop->addEventSource(pm_vars->PMcommandGate); + } + } + } + } + + theParent->setParentCurrentPowerFlags(currentState); // set our connection data + theParent->setParentKnowsState(stateKnown); + + pm_vars->parentsKnowState = true; // combine parent knowledge + pm_vars->parentsCurrentPowerFlags = 0; + + iter = getParentIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + pm_vars->parentsKnowState &= connection->parentKnowsState(); + pm_vars->parentsCurrentPowerFlags |= connection->parentCurrentPowerFlags(); + } + } + iter->release(); + } + + if ( (pm_vars->theControllingDriver != NULL) && + (pm_vars->parentsKnowState) ) { + pm_vars->maxCapability = pm_vars->theControllingDriver->maxCapabilityForDomainState(pm_vars->parentsCurrentPowerFlags); + tempDesire = priv->deviceDesire; // initially change into the state we are already in + priv->deviceDesire = pm_vars->theControllingDriver->initialPowerStateForDomainState(pm_vars->parentsCurrentPowerFlags); + changeState(); + priv->deviceDesire = tempDesire; // put this back like before + } + return IOPMNoErr; +} + + +//********************************************************************************* +// addPowerChild +// +// Power Management is informing us who our children are. +//********************************************************************************* +IOReturn IOService::addPowerChild ( IOService * theChild ) +{ + IOPowerConnection * connection; + unsigned int i; + + if ( ! initialized ) { + return IOPMNotYetInitialized; // we're not a power-managed IOService + } + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAddChild,0,0); + + connection = new IOPowerConnection; // make a nub + + connection->init(); + connection->start(this); + + attachToChild( connection,gIOPowerPlane ); // connect it up + connection->attachToChild( theChild,gIOPowerPlane ); + connection->release(); + + if ( (pm_vars->theControllingDriver == NULL) || // tell it the current state of the power domain + ! (inPlane(gIOPowerPlane)) || + ! (pm_vars->parentsKnowState) ) { + theChild->setPowerParent(connection,false,0); + if ( inPlane(gIOPowerPlane) ) { + for (i = 0; i <= kMaxType; i++) { + if ( pm_vars->current_aggressiveness_valid[i] ) { + theChild->setAggressiveness (i, pm_vars->current_aggressiveness_values[i]); + } + } + } + } + else { + theChild->setPowerParent(connection,true,pm_vars->thePowerStates[pm_vars->myCurrentState].outputPowerCharacter); + for (i = 0; i <= kMaxType; i++) { + if ( pm_vars->current_aggressiveness_valid[i] ) { + theChild->setAggressiveness (i, pm_vars->current_aggressiveness_values[i]); + } + } + add_child_to_active_change(connection); // catch it up if change is in progress + } + + return IOPMNoErr; +} + + +//********************************************************************************* +// removePowerChild +// +//********************************************************************************* +IOReturn IOService::removePowerChild ( IOPowerConnection * theChild ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRemoveChild,0,0); + + detachFromChild(theChild,gIOPowerPlane); // remove the departing child + + if ( (pm_vars->theControllingDriver == NULL) || // if not fully initialized + ! (inPlane(gIOPowerPlane)) || + ! (pm_vars->parentsKnowState) ) { + return IOPMNoErr; // we can do no more + } + + changeState(); // change state if we can now tolerate lower power + + return IOPMNoErr; +} + + +//********************************************************************************* +// registerPowerDriver +// +// A driver has called us volunteering to control power to our device. +// If the power state array it provides is richer than the one we already +// know about (supplied by an earlier volunteer), then accept the offer. +// Notify all interested parties of our power state, which we now know. +//********************************************************************************* + +IOReturn IOService::registerPowerDriver ( IOService * controllingDriver, IOPMPowerState* powerStates, unsigned long numberOfStates ) +{ + unsigned long i; + unsigned long tempDesire; + + if ( (numberOfStates > pm_vars->theNumberOfPowerStates) && (numberOfStates > 1) ) { + if ( priv->changeList->currentChange() == -1 ) { + if ( controllingDriver != NULL ) { + if ( numberOfStates <= IOPMMaxPowerStates ) { + switch ( powerStates[0].version ) { + case 1: + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriver, + (unsigned long)numberOfStates, (unsigned long)powerStates[0].version); + for ( i = 0; i < numberOfStates; i++ ) { + pm_vars->thePowerStates[i] = powerStates[i]; + } + break; + case 2: + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriver, + (unsigned long) numberOfStates,(unsigned long) powerStates[0].version); + for ( i = 0; i < numberOfStates; i++ ) { + pm_vars->thePowerStates[i].version = powerStates[i].version; + pm_vars->thePowerStates[i].capabilityFlags = powerStates[i].capabilityFlags; + pm_vars->thePowerStates[i].outputPowerCharacter = powerStates[i].outputPowerCharacter; + pm_vars->thePowerStates[i].inputPowerRequirement = powerStates[i].inputPowerRequirement; + pm_vars->thePowerStates[i].staticPower = powerStates[i].staticPower; + pm_vars->thePowerStates[i].unbudgetedPower = powerStates[i].unbudgetedPower; + pm_vars->thePowerStates[i].powerToAttain = powerStates[i].powerToAttain; + pm_vars->thePowerStates[i].timeToAttain = powerStates[i].timeToAttain; + pm_vars->thePowerStates[i].settleUpTime = powerStates[i].settleUpTime; + pm_vars->thePowerStates[i].timeToLower = powerStates[i].timeToLower; + pm_vars->thePowerStates[i].settleDownTime = powerStates[i].settleDownTime; + pm_vars->thePowerStates[i].powerDomainBudget = powerStates[i].powerDomainBudget; + } + break; + default: + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriverErr1, + (unsigned long)powerStates[0].version,0); + return IOPMNoErr; + } + + pm_vars->myCharacterFlags = 0; // make a mask of all the character bits we know about + for ( i = 0; i < numberOfStates; i++ ) { + pm_vars->myCharacterFlags |= pm_vars->thePowerStates[i].outputPowerCharacter; + } + + pm_vars->theNumberOfPowerStates = numberOfStates; + pm_vars->theControllingDriver = controllingDriver; + if ( priv->interestedDrivers->findItem(controllingDriver) == NULL ) { // register it as interested + registerInterestedDriver (controllingDriver ); // unless already done + } + if ( priv->need_to_become_usable ) { + priv->need_to_become_usable = false; + priv->deviceDesire = pm_vars->theNumberOfPowerStates - 1; + } + + if ( inPlane(gIOPowerPlane) && + (pm_vars->parentsKnowState) ) { + pm_vars->maxCapability = pm_vars->theControllingDriver->maxCapabilityForDomainState(pm_vars->parentsCurrentPowerFlags); + tempDesire = priv->deviceDesire; // initially change into the state we are already in + priv->deviceDesire = pm_vars->theControllingDriver->initialPowerStateForDomainState(pm_vars->parentsCurrentPowerFlags); + changeState(); + priv->deviceDesire = tempDesire; // put this back like before + } + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriverErr2,(unsigned long)numberOfStates,0); + } + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriverErr4,0,0); + } + } + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogControllingDriverErr5,(unsigned long)numberOfStates,0); + } + return IOPMNoErr; +} + +//********************************************************************************* +// registerInterestedDriver +// +// Add the caller to our list of interested drivers and return our current +// power state. If we don't have a power-controlling driver yet, we will +// call this interested driver again later when we do get a driver and find +// out what the current power state of the device is. +//********************************************************************************* + +IOPMPowerFlags IOService::registerInterestedDriver ( IOService * theDriver ) +{ + IOPMinformee * newInformee; + IOPMPowerFlags futureCapability; + + if (theDriver == NULL ) { + return 0; + } + + newInformee = new IOPMinformee; // make new driver node + newInformee->initialize(theDriver); + priv->interestedDrivers->addToList(newInformee); // add it to list of drivers + + if ( (pm_vars->theControllingDriver == NULL) || + ! (inPlane(gIOPowerPlane)) || + ! (pm_vars->parentsKnowState) ) { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInterestedDriver,IOPMNotPowerManaged,0); + return IOPMNotPowerManaged; // can't tell it a state yet + } + + switch (priv->machine_state) { // can we notify new driver of a change in progress? + case IOPMour_prechange_1: + case IOPMour_prechange_4: + case IOPMparent_down_4: + case IOPMparent_down_6: + case IOPMparent_up_0: + case IOPMparent_up_6: + futureCapability = priv->head_note_capabilityFlags; // yes, remember what we tell it + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInterestedDriver,(unsigned long)futureCapability,1); + add_driver_to_active_change(newInformee); // notify it + return futureCapability; // and return the same thing + } + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInterestedDriver, + (unsigned long) pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags,2); + return pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags; // no, return current capability +} + + +//********************************************************************************* +// deRegisterInterestedDriver +// +//********************************************************************************* +IOReturn IOService::deRegisterInterestedDriver ( IOService * theDriver ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRemoveDriver,0,0); + + priv->interestedDrivers->removeFromList(theDriver); // remove the departing driver + + return IOPMNoErr; +} + + +//********************************************************************************* +// acknowledgePowerChange +// +// After we notified one of the interested drivers or a power-domain child +// of an impending change in power, it has called to say it is now +// prepared for the change. If this object is the last to +// acknowledge this change, we take whatever action we have been waiting +// for. +// That may include acknowledging to our parent. In this case, we do it +// last of all to insure that this doesn't cause the parent to call us some- +// where else and alter data we are relying on here (like the very existance +// of a "current change note".) +//********************************************************************************* + +IOReturn IOService::acknowledgePowerChange ( IOService * whichObject ) +{ + IOPMinformee * ackingObject; + + ackingObject = priv->interestedDrivers->findItem(whichObject); // one of our interested drivers? + if ( ackingObject == NULL ) { + if ( ! isChild(whichObject,gIOPowerPlane) ) { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr1,0,0); + kprintf("errant driver: %s\n",whichObject->getName()); + return IOPMNoErr; // no, just return + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChildAcknowledge,0,0); + } + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogDriverAcknowledge,0,0); + } + + if (! acquire_lock() ) { + return IOPMNoErr; + } + + if (priv->head_note_pendingAcks != 0 ) { // yes, make sure we're expecting acks + if ( ackingObject != NULL ) { // it's an interested driver + if ( ackingObject->timer != 0 ) { // make sure we're expecting this ack + ackingObject->timer = 0; // mark it acked + priv->head_note_pendingAcks -= 1; // that's one fewer to worry about + if ( priv->head_note_pendingAcks == 0 ) { // is that the last? + stop_ack_timer(); // yes, stop the timer + IOUnlock(priv->our_lock); + all_acked(); // and now we can continue + return IOPMNoErr; + } + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr2,0,0); // this driver has already acked + kprintf("errant driver: %s\n",whichObject->getName()); + } + } + else { // it's a child + priv->head_note_pendingAcks -= 1; // that's one fewer to worry about + if ( priv->head_note_pendingAcks == 0 ) { // is that the last? + stop_ack_timer(); // yes, stop the timer + IOUnlock(priv->our_lock); + all_acked(); // and now we can continue + return IOPMNoErr; + } + } + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr3,0,0); // not expecting anybody to ack + kprintf("errant driver: %s\n",whichObject->getName()); + } + IOUnlock(priv->our_lock); + return IOPMNoErr; +} + +//********************************************************************************* +// acknowledgeSetPowerState +// +// After we instructed our controlling driver to change power states, +// it has called to say it has finished doing so. +// We continue to process the power state change. +//********************************************************************************* + +IOReturn IOService::acknowledgeSetPowerState ( void ) +{ + if (! acquire_lock() ) { + return IOPMNoErr; + } + if ( priv->driver_timer == -1 ) { + priv->driver_timer = 0; // driver is acking instead of using return code + } + else { + if ( priv->driver_timer > 0 ) { // are we expecting this? + stop_ack_timer(); // yes, stop the timer + priv->driver_timer = 0; + IOUnlock(priv->our_lock); + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogDriverAcknowledgeSet,0,0); + driver_acked(); + return IOPMNoErr; + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr4,0,0); // no + } + } + IOUnlock(priv->our_lock); + return IOPMNoErr; +} + + +//********************************************************************************* +// driver_acked +// +// Either the controlling driver has called acknowledgeSetPowerState +// or the acknowledgement timer has expired while waiting for that. +// We carry on processing the current change note. +//********************************************************************************* + +void IOService::driver_acked ( void ) +{ + switch (priv->machine_state) { + case IOPMour_prechange_2: + our_prechange_2(); + break; + case IOPMparent_down_5: + parent_down_5(); + break; + case IOPMparent_up_4: + parent_up_4(); + break; + } +} + + +//********************************************************************************* +// powerDomainWillChangeTo +// +// Called by the power-hierarchy parent notifying of a new power state +// in the power domain. +// We enqueue a parent power-change to our queue of power changes. +// This may or may not cause us to change power, depending on what +// kind of change is occuring in the domain. +//********************************************************************************* + +IOReturn IOService::powerDomainWillChangeTo ( IOPMPowerFlags newPowerStateFlags, IOPowerConnection * whichParent ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + unsigned long newStateNumber; + IOPMPowerFlags combinedPowerFlags; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogWillChange,(unsigned long)newPowerStateFlags,0); + + if ( ! inPlane(gIOPowerPlane) ) { + return IOPMAckImplied; // somebody goofed + } + + if ( (pm_vars->PMworkloop == NULL) || (pm_vars->PMcommandGate == NULL) ) { + getPMworkloop(); // we have a path to the root, + if ( pm_vars->PMworkloop != NULL ) { // so find out the workloop + if ( pm_vars->PMcommandGate == NULL ) { // and make our command gate + pm_vars->PMcommandGate = IOCommandGate::commandGate((OSObject *)this); + if ( pm_vars->PMcommandGate != NULL ) { + pm_vars->PMworkloop->addEventSource(pm_vars->PMcommandGate); + } + } + } + } + + combinedPowerFlags = 0; // combine parents' power states + + iter = getParentIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( connection == whichParent ){ + combinedPowerFlags |= newPowerStateFlags; + } + else { + combinedPowerFlags |= connection->parentCurrentPowerFlags(); + } + } + } + iter->release(); + } + + if ( pm_vars->theControllingDriver == NULL ) { // we can't take any more action + return IOPMAckImplied; + } + newStateNumber = pm_vars->theControllingDriver->maxCapabilityForDomainState(combinedPowerFlags); + return enqueuePowerChange(IOPMParentInitiated | IOPMDomainWillChange, newStateNumber,combinedPowerFlags,whichParent); //make the change +} + + +//********************************************************************************* +// powerDomainDidChangeTo +// +// Called by the power-hierarchy parent after the power state of the power domain +// has settled at a new level. +// We enqueue a parent power-change to our queue of power changes. +// This may or may not cause us to change power, depending on what +// kind of change is occuring in the domain. +//********************************************************************************* + +IOReturn IOService::powerDomainDidChangeTo ( IOPMPowerFlags newPowerStateFlags, IOPowerConnection * whichParent ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + unsigned long newStateNumber; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogDidChange,newPowerStateFlags,0); + + whichParent->setParentCurrentPowerFlags(newPowerStateFlags); // set our connection data + whichParent->setParentKnowsState(true); + + pm_vars->parentsCurrentPowerFlags = 0; // recompute our parent info + pm_vars->parentsKnowState = true; + + iter = getParentIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + pm_vars->parentsKnowState &= connection->parentKnowsState(); + pm_vars->parentsCurrentPowerFlags |= connection->parentCurrentPowerFlags(); + } + } + iter->release(); + } + + if ( pm_vars->theControllingDriver == NULL ) { + return IOPMAckImplied; + } + + newStateNumber = pm_vars->theControllingDriver->maxCapabilityForDomainState(pm_vars->parentsCurrentPowerFlags); + return enqueuePowerChange(IOPMParentInitiated | IOPMDomainDidChange, newStateNumber,pm_vars->parentsCurrentPowerFlags,whichParent); // tell interested parties about it +} + + +//********************************************************************************* +// requestPowerDomainState +// +// +//********************************************************************************* +IOReturn IOService::requestPowerDomainState ( IOPMPowerFlags desiredState, IOPowerConnection * whichChild, unsigned long specification ) +{ + unsigned long i; + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRequestDomain, + (unsigned long)desiredState,(unsigned long)specification); + + if ( pm_vars->theControllingDriver == NULL) { + return IOPMNotYetInitialized; + } + + switch (specification) { + case IOPMLowestState: + i = 0; + while ( i < pm_vars->theNumberOfPowerStates ) { + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & desiredState) == (desiredState & pm_vars->myCharacterFlags) ) { + break; + } + i++; + } + if ( i >= pm_vars->theNumberOfPowerStates ) { + return IOPMNoSuchState; + } + break; + + case IOPMNextLowerState: + i = pm_vars->myCurrentState - 1; + while ( i >= 0 ) { + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & desiredState) == (desiredState & pm_vars->myCharacterFlags) ) { + break; + } + i--; + } + if ( i < 0 ) { + return IOPMNoSuchState; + } + break; + + case IOPMHighestState: + i = pm_vars->theNumberOfPowerStates; + while ( i >= 0 ) { + i--; + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & desiredState) == (desiredState & pm_vars->myCharacterFlags) ) { + break; + } + } + if ( i < 0 ) { + return IOPMNoSuchState; + } + break; + + case IOPMNextHigherState: + i = pm_vars->myCurrentState + 1; + while ( i < pm_vars->theNumberOfPowerStates ) { + if ( ( pm_vars->thePowerStates[i].outputPowerCharacter & desiredState) == (desiredState & pm_vars->myCharacterFlags) ) { + break; + } + i++; + } + if ( i == pm_vars->theNumberOfPowerStates ) { + return IOPMNoSuchState; + } + break; + + default: + return IOPMBadSpecification; + } + +// Now loop through the children. When we encounter the calling child, save +// the new state as this child's desire. Then, compute a new maximum +// of everybody's desires. + + iter = getChildIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( connection == whichChild ) { + connection->setDesiredDomainState(i); + } + } + } + iter->release(); + } + + if ( inPlane(gIOPowerPlane) && + (pm_vars->parentsKnowState) ) { + changeState(); // change state if all children can now tolerate lower power + } + + if ( priv->clampOn ) { // are we clamped on, waiting for this child? + priv->clampOn = false; // yes, remove the clamp + changePowerStateToPriv(0); + } + + return IOPMNoErr; +} + + +//********************************************************************************* +// temporaryPowerClampOn +// +// A power domain wants to clamp its power on till it has children which +// will thendetermine the power domain state. +// +// We enter the highest state until addPowerChild is called. +//********************************************************************************* + +IOReturn IOService::temporaryPowerClampOn ( void ) +{ + priv->clampOn = true; + makeUsable(); + return IOPMNoErr; +} + + +//********************************************************************************* +// makeUsable +// +// Some client of our device is asking that we become usable. Although +// this has not come from a subclassed device object, treat it exactly +// as if it had. In this way, subsequent requests for lower power from +// a subclassed device object will pre-empt this request. +// +// We treat this as a subclass object request to switch to the +// highest power state. +//********************************************************************************* + +IOReturn IOService::makeUsable ( void ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogMakeUsable,0,0); + + if ( pm_vars->theControllingDriver == NULL ) { + priv->need_to_become_usable = true; + return IOPMNoErr; + } + priv->deviceDesire = pm_vars->theNumberOfPowerStates - 1; + if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) { + return changeState(); + } + return IOPMNoErr; +} + + +//********************************************************************************* +// currentCapability +// +//********************************************************************************* + +IOPMPowerFlags IOService::currentCapability ( void ) +{ + if ( pm_vars->theControllingDriver == NULL ) { + return 0; + } + else { + return pm_vars->thePowerStates[pm_vars->myCurrentState].capabilityFlags; + } +} + + +//********************************************************************************* +// changePowerStateTo +// +// For some reason, our power-controlling driver has decided it needs to change +// power state. We enqueue the power change so that appropriate parties +// will be notified, and then we will instruct the driver to make the change. +//********************************************************************************* + +IOReturn IOService::changePowerStateTo ( unsigned long ordinal ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeStateTo,ordinal,0); + + if ( ordinal >= pm_vars->theNumberOfPowerStates ) { + return IOPMParameterError; + } + priv->driverDesire = ordinal; + if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) { + return changeState(); + } + + return IOPMNoErr; +} + +//********************************************************************************* +// changePowerStateToPriv +// +// For some reason, a subclassed device object has decided it needs to change +// power state. We enqueue the power change so that appropriate parties +// will be notified, and then we will instruct the driver to make the change. +//********************************************************************************* + +IOReturn IOService::changePowerStateToPriv ( unsigned long ordinal ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeStateToPriv,ordinal,0); + + if ( pm_vars->theControllingDriver == NULL) { + return IOPMNotYetInitialized; + } + if ( ordinal >= pm_vars->theNumberOfPowerStates ) { + return IOPMParameterError; + } + priv->deviceDesire = ordinal; + if ( inPlane(gIOPowerPlane) && (pm_vars->parentsKnowState) ) { + return changeState(); + } + + return IOPMNoErr; +} + + +//********************************************************************************* +// changeState +// +// A subclass object, our controlling driver, or a power domain child +// has asked for a different power state. Here we compute what new +// state we should enter and enqueue the change (or start it). +//********************************************************************************* + +IOReturn IOService::changeState ( void ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + unsigned long newDesiredState = 0; + + // Compute the maximum of our children's desires, our controlling driver's desire, and the subclass device's desire. + + if ( ! priv->device_overrides ) { + iter = getChildIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( connection->getDesiredDomainState() > newDesiredState ) { + newDesiredState = connection->getDesiredDomainState(); + } + } + } + iter->release(); + } + + if ( priv->driverDesire > newDesiredState ) { + newDesiredState = priv->driverDesire; + } + } + + if ( priv->deviceDesire > newDesiredState ) { + newDesiredState = priv->deviceDesire; + } + + priv->ourDesiredPowerState = newDesiredState; + + if ( (pm_vars->theControllingDriver == NULL) || // if not fully initialized + ! (inPlane(gIOPowerPlane)) || + ! (pm_vars->parentsKnowState) ) { + return IOPMNoErr; // we can do no more + } + + return enqueuePowerChange(IOPMWeInitiated,newDesiredState,0,0); +} + + +//********************************************************************************* +// currentPowerConsumption +// +//********************************************************************************* + +unsigned long IOService::currentPowerConsumption ( void ) +{ + if ( pm_vars->theControllingDriver == NULL ) { + return 0; + } + else { + return pm_vars->thePowerStates[pm_vars->myCurrentState].staticPower; + } +} + +//********************************************************************************* +// activityTickle +// +// The activity tickle with parameter kIOPMSubclassPolicyis not handled +// here and should have been intercepted by the subclass. +// The tickle with parameter kIOPMSuperclassPolicy1 causes the activity +// flag to be set, and the device state checked. If the device has been +// powered down, it is powered up again. +//********************************************************************************* + +bool IOService::activityTickle ( unsigned long type, unsigned long stateNumber=0 ) +{ + if ( type == kIOPMSuperclassPolicy1 ) { + if ( (priv->activityLock == NULL) || + (pm_vars->theControllingDriver == NULL) || + ( pm_vars->commandQueue == NULL) ) { + return true; + } + IOTakeLock(priv->activityLock); + priv->device_active = true; + if ( pm_vars->myCurrentState >= stateNumber) { + IOUnlock(priv->activityLock); + return true; + } + IOUnlock(priv->activityLock); // send a message on the command queue + pm_vars->commandQueue->enqueueCommand(true, (void *)kPMunIdleDevice, (void *)stateNumber); + return false; + } + return true; +} + +//********************************************************************************* +// getPMworkloop +// +// A child is calling to get a pointer to the Power Management workloop. +// We got it or get it from one of our parents. +//********************************************************************************* + +IOWorkLoop * IOService::getPMworkloop ( void ) +{ +IOService * aParent; + + if ( ! inPlane(gIOPowerPlane) ) { + return NULL; + } + if ( pm_vars->PMworkloop == NULL ) { // we have no workloop yet + aParent = (IOService *)getParentEntry(gIOPowerPlane)->getParentEntry(gIOPowerPlane); + if ( aParent != NULL ) { // ask one of our parents for the workloop + pm_vars->PMworkloop = aParent->getPMworkloop(); + } + } + return pm_vars->PMworkloop; +} + + +//********************************************************************************* +// setIdleTimerPeriod +// +// A subclass policy-maker is going to use our standard idleness +// detection service. Make a command queue and an idle timer and +// connect them to the power management workloop. Finally, +// start the timer. +//********************************************************************************* + +IOReturn IOService::setIdleTimerPeriod ( unsigned long period ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMsetIdleTimerPeriod,period, 0); + + priv->idle_timer_period = period; + + if ( period > 0 ) { + if ( getPMworkloop() == NULL ) { + return kIOReturnError; + } + + if (pm_vars->commandQueue == NULL ) { // make the command queue + pm_vars->commandQueue = IOCommandQueue::commandQueue(this, PMreceiveCmd); + if (! pm_vars->commandQueue || + ( pm_vars->PMworkloop->addEventSource( pm_vars->commandQueue) != kIOReturnSuccess) ) { + return kIOReturnError; + } + } + // make the timer event + if ( priv->timerEventSrc == NULL ) { + priv->timerEventSrc = IOTimerEventSource::timerEventSource(this, + PM_idle_timer_expired); + if ( ! priv->timerEventSrc || + ( pm_vars->PMworkloop->addEventSource( priv->timerEventSrc) != kIOReturnSuccess) ) { + return kIOReturnError; + } + } + + if ( priv->activityLock == NULL ) { + priv->activityLock = IOLockAlloc(); + } + + start_PM_idle_timer(); + } + return IOPMNoErr; +} + + +//********************************************************************************* +// start_PM_idle_timer +// +// The parameter is a pointer to us. Use it to call our timeout method. +//********************************************************************************* +void IOService::start_PM_idle_timer ( void ) +{ + priv->timerEventSrc->setTimeout(priv->idle_timer_period, NSEC_PER_SEC); +} + + +//********************************************************************************* +// PM_idle_timer_expired +// +// The parameter is a pointer to us. Use it to call our timeout method. +//********************************************************************************* + +void PM_idle_timer_expired(OSObject * ourSelves, IOTimerEventSource *) +{ + ((IOService *)ourSelves)->PM_idle_timer_expiration(); +} + + +//********************************************************************************* +// PM_idle_timer_expiration +// +// The idle timer has expired. If there has been activity since the last +// expiration, just restart the timer and return. If there has not been +// activity, switch to the next lower power state and restart the timer. +//********************************************************************************* + +void IOService::PM_idle_timer_expiration ( void ) +{ + if ( ! initialized ) { + return; // we're unloading + } + + if ( priv->idle_timer_period > 0 ) { + IOTakeLock(priv->activityLock); + if ( priv->device_active ) { + priv->device_active = false; + IOUnlock(priv->activityLock); + start_PM_idle_timer(); + return; + } + if ( pm_vars->myCurrentState > 0 ) { + IOUnlock(priv->activityLock); + priv->askingFor = pm_vars->myCurrentState - 1; + changePowerStateToPriv(pm_vars->myCurrentState - 1); + start_PM_idle_timer(); + return; + } + IOUnlock(priv->activityLock); + start_PM_idle_timer(); + } +} + + + +// ********************************************************************************** +// PMreceiveCmd +// +// +// +// ********************************************************************************** +void PMreceiveCmd ( OSObject * theDriver, void * command, void * param1, void * param2, void *param3 ) +{ + ((IOService *)theDriver)->command_received(command,param1,param2,param3); +} + + +// ********************************************************************************** +// command_received +// +// We have received a command from ourselves on the command queue. +// This is to prevent races with timer-expiration code. +// ********************************************************************************** +void IOService::command_received ( void * command, void *stateNumber , void * , void *) +{ + if ( ! initialized ) { + return; // we're unloading + } + + if ( command == (void *)kPMunIdleDevice ) { + if ( (pm_vars->myCurrentState < (unsigned long)stateNumber) && + (priv->imminentState < (unsigned long)stateNumber ) && + ((unsigned long)stateNumber > priv->askingFor) ) { + priv->askingFor = (unsigned long)stateNumber; + changePowerStateToPriv((unsigned long)stateNumber); + } + } +} + + +//********************************************************************************* +// setAggressiveness +// +// Pass on the input parameters to all power domain children. All those which are +// power domains will pass it on to their children, etc. +//********************************************************************************* + +IOReturn IOService::setAggressiveness ( unsigned long type, unsigned long newLevel ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogSetAggressiveness,type, newLevel); + + if ( type <= kMaxType ) { + pm_vars->current_aggressiveness_values[type] = newLevel; + pm_vars->current_aggressiveness_valid[type] = true; + } + + iter = getChildIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + ((IOService *)(connection->getChildEntry(gIOPowerPlane)))->setAggressiveness(type, newLevel); + } + } + iter->release(); + } + + return IOPMNoErr; +} + +//********************************************************************************* +// getAggressiveness +// +// Called by the user client. +//********************************************************************************* + +IOReturn IOService::getAggressiveness ( unsigned long type, unsigned long * currentLevel ) +{ + if ( type <= kMaxType ) { + *currentLevel = pm_vars->current_aggressiveness_values[type]; + } + return kIOReturnSuccess; +} + +//********************************************************************************* +// systemWake +// +// Pass this to all power domain children. All those which are +// power domains will pass it on to their children, etc. +//********************************************************************************* + +IOReturn IOService::systemWake ( void ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogSystemWake,0, 0); + + iter = getChildIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + ((IOService *)(connection->getChildEntry(gIOPowerPlane)))->systemWake(); + } + } + iter->release(); + } + + if ( pm_vars->theControllingDriver != NULL ) { + if ( pm_vars->theControllingDriver->didYouWakeSystem() ) { + makeUsable(); + } + } + + return IOPMNoErr; +} + + +//********************************************************************************* +// temperatureCriticalForZone +// +//********************************************************************************* + +IOReturn IOService::temperatureCriticalForZone ( IOService * whichZone ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogCriticalTemp,0,0); + + if ( inPlane(gIOPowerPlane) && ! (priv->we_are_root) ) { + ((IOService *)(getParentEntry(gIOPowerPlane)->getParentEntry(gIOPowerPlane)))->temperatureCriticalForZone(whichZone); + } + return IOPMNoErr; +} + + +//********************************************************************************* +// powerOverrideOnPriv +// +//********************************************************************************* + + +IOReturn IOService::powerOverrideOnPriv ( void ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogOverrideOn,0,0); + + priv->device_overrides = true; // turn on the override + return changeState(); // change state if that changed something +} + + +//********************************************************************************* +// powerOverrideOffPriv +// +//********************************************************************************* +IOReturn IOService::powerOverrideOffPriv ( void ) +{ + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogOverrideOff,0,0); + + priv->device_overrides = false; // turn off the override + return changeState(); // change state if that changed something +} + + +//********************************************************************************* +// enqueuePowerChange +// +// Allocate a new state change notification, initialize it with fields from the +// caller, and add it to the tail of the list of pending power changes. +// +// If it is early enough in the list, and almost all the time it is the only one in +// the list, start the power change. +// +// In rare instances, this change will preempt the previous change in the list. +// If the previous change is un-actioned in any way (because we are still +// processing an even earlier power change), and if both the previous change +// in the list and this change are initiated by us (not the parent), then we +// needn't perform the previous change, so we collapse the list a little. +//********************************************************************************* + +IOReturn IOService::enqueuePowerChange ( unsigned long flags, unsigned long whatStateOrdinal, unsigned long domainState, IOPowerConnection * whichParent ) +{ + long newNote; + long previousNote; + +// Create and initialize the new change note + + newNote = priv->changeList->createChangeNote(); + if ( newNote == -1 ) { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogEnqueueErr,0,0); + return IOPMAckImplied; // uh-oh, our list is full + } + + priv->changeList->changeNote[newNote].newStateNumber = whatStateOrdinal; + priv->changeList->changeNote[newNote].outputPowerCharacter = pm_vars->thePowerStates[whatStateOrdinal].outputPowerCharacter; + priv->changeList->changeNote[newNote].inputPowerRequirement = pm_vars->thePowerStates[whatStateOrdinal].inputPowerRequirement; + priv->changeList->changeNote[newNote].capabilityFlags = pm_vars->thePowerStates[whatStateOrdinal].capabilityFlags; + priv->changeList->changeNote[newNote].flags = flags; + if (flags & IOPMParentInitiated ) { + priv->changeList->changeNote[newNote].domainState = domainState; + priv->changeList->changeNote[newNote].parent = whichParent; + } + + previousNote = priv->changeList->previousChangeNote(newNote); + + if ( previousNote == -1 ) { + + // Queue is empty, we can start this change. + + if (flags & IOPMWeInitiated ) { + start_our_change(newNote); + return 0; + } + else { + return start_parent_change(newNote); + } + } + + // The queue is not empty. Try to collapse this new change and the previous one in queue into one change. + // This is possible only if both changes are initiated by us, and neither has been started yet. + // Do this more than once if possible. + + // (A change is started iff it is at the head of the queue) + + while ( (previousNote != priv->head_note) && (previousNote != -1) && + (priv->changeList->changeNote[newNote].flags & priv->changeList->changeNote[previousNote].flags & IOPMWeInitiated) ) { + priv->changeList->changeNote[previousNote].outputPowerCharacter = priv->changeList->changeNote[newNote].outputPowerCharacter; + priv->changeList->changeNote[previousNote].inputPowerRequirement = priv->changeList->changeNote[newNote].inputPowerRequirement; + priv->changeList->changeNote[previousNote].capabilityFlags =priv-> changeList->changeNote[newNote].capabilityFlags; + priv->changeList->changeNote[previousNote].newStateNumber = priv->changeList->changeNote[newNote].newStateNumber; + priv->changeList->releaseTailChangeNote(); + newNote = previousNote; + previousNote = priv->changeList->previousChangeNote(newNote); + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogCollapseQueue,0,0); + } + return IOPMWillAckLater; // in any case, we can't start yet +} + + +//********************************************************************************* +// notifyAll +// +// Notify all interested parties either that a change is impending or that the +// previously-notified change is done and power has settled. +// The parameter identifies whether this is the +// pre-change notification or the post-change notification. +// +//********************************************************************************* + +IOReturn IOService::notifyAll ( bool is_prechange ) +{ + IOPMinformee * nextObject; + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + + // To prevent acknowledgePowerChange from finishing the change note and removing it from the queue if + // some driver calls it, we inflate the number of pending acks so it cannot become zero. We'll fix it later. + + priv->head_note_pendingAcks =1; + + // OK, we will go through the lists of interested drivers and power domain children + // and notify each one of this change. + + nextObject = priv->interestedDrivers->firstInList(); // notify interested drivers + while ( nextObject != NULL ) { + priv->head_note_pendingAcks +=1; + if (! inform(nextObject, is_prechange) ) { + } + nextObject = priv->interestedDrivers->nextInList(nextObject); + } + + if (! acquire_lock() ) { + return IOPMNoErr; + } + if ( priv->head_note_pendingAcks > 1 ) { // did they all ack? + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); // no + start_ack_timer(); + } + IOUnlock(priv->our_lock); // either way + + iter = getChildIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + priv->head_note_pendingAcks +=1; + notifyChild(connection, is_prechange); + } + } + iter->release(); + } + + if (! acquire_lock() ) { + return IOPMNoErr; + } + priv->head_note_pendingAcks -= 1; // now make this real + if (priv->head_note_pendingAcks == 0 ) { // is it all acked? + IOUnlock(priv->our_lock); // yes + return IOPMAckImplied; // return ack to parent + } + IOUnlock(priv->our_lock); // no + return IOPMWillAckLater; +} + + +//********************************************************************************* +// notifyChild +// +// Notify a power domain child of an upcoming power change. +// +// If the object acknowledges the current change, we return TRUE. +//********************************************************************************* + +bool IOService::notifyChild ( IOPowerConnection * theNub, bool is_prechange ) +{ + IOReturn k = IOPMAckImplied; + + if ( is_prechange ) { + k =((IOService *)(theNub->getChildEntry(gIOPowerPlane)))->powerDomainWillChangeTo( priv->head_note_outputFlags,theNub); + } + else { + k =((IOService *)(theNub->getChildEntry(gIOPowerPlane)))->powerDomainDidChangeTo( priv->head_note_outputFlags,theNub); + } + + if ( k == IOPMAckImplied ) { // did the return code ack? + priv->head_note_pendingAcks -=1; // yes + return true; + } + return false; +} + + +//********************************************************************************* +// inform +// +// Notify an interested driver of an upcoming power change. +// +// If the object acknowledges the current change, we return TRUE. +//********************************************************************************* + +bool IOService::inform ( IOPMinformee * nextObject, bool is_prechange ) +{ + IOReturn k = IOPMAckImplied; + + nextObject->timer = -1; // initialize this + + if ( is_prechange ) { + pm_vars->thePlatform->PMLog (pm_vars->ourName,PMlogInformDriverPreChange, + (unsigned long)priv->head_note_capabilityFlags,(unsigned long)priv->head_note_state); + k = nextObject->whatObject->powerStateWillChangeTo( priv->head_note_capabilityFlags,priv->head_note_state,this); + } + else { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogInformDriverPostChange, + (unsigned long)priv->head_note_capabilityFlags,(unsigned long)priv->head_note_state); + k = nextObject->whatObject->powerStateDidChangeTo(priv->head_note_capabilityFlags,priv->head_note_state,this); + } + + if ( nextObject->timer == 0 ) { // did it ack behind our back? + return true; // yes + } + if ( k ==IOPMAckImplied ) { // no, did the return code ack? + nextObject->timer = 0; // yes + priv->head_note_pendingAcks -= 1; + return true; + } + if ( k < 0 ) { + nextObject->timer = 0; // somebody goofed + priv-> head_note_pendingAcks -= 1; + return true; + } + nextObject->timer = (k * ns_per_us / ACK_TIMER_PERIOD) + 1; // no, it's a timer + return false; +} + + +//********************************************************************************* +// our_prechange_03 +// +// All registered applications and kernel clients have positively acknowledged our +// intention of lowering power. Here we notify them all that we will definitely +// lower the power. If we don't have to wait for any of them to acknowledge, we +// carry on by notifying interested drivers. Otherwise, we do wait. +//********************************************************************************* + +void IOService::our_prechange_03 ( void ) +{ + priv->machine_state = IOPMour_prechange_05; // next state + if ( tellChangeDown(priv->head_note_state) ) { // are we waiting for responses? + return our_prechange_05(); // no, notify interested drivers + } +} + + +//********************************************************************************* +// our_prechange_05 +// +// All registered applications and kernel clients have acknowledged our notification +// that we are lowering power. Here we notify interested drivers. If we don't have +// to wait for any of them to acknowledge, we instruct our power driver to make the change. +// Otherwise, we do wait. +//********************************************************************************* + +void IOService::our_prechange_05 ( void ) +{ + priv->machine_state = IOPMour_prechange_1; // no, in case they don't all ack + if ( notifyAll(true) == IOPMAckImplied ) { + our_prechange_1(); + } +} + + +//********************************************************************************* +// our_prechange_1 +// +// All interested drivers have acknowledged our pre-change notification of a power +// change we initiated. Here we instruct our controlling driver to make +// the change to the hardware. If it does so, we continue processing +// (waiting for settle and notifying interested parties post-change.) +// If it doesn't, we have to wait for it to acknowledge and then continue. +//********************************************************************************* + +void IOService::our_prechange_1 ( void ) +{ + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { + our_prechange_2(); // it's done, carry on + } + else { + priv->machine_state = IOPMour_prechange_2; // it's not, wait for it + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); + start_ack_timer(); + } +} + + +//********************************************************************************* +// our_prechange_2 +// +// Our controlling driver has changed power state on the hardware +// during a power change we initiated. Here we see if we need to wait +// for power to settle before continuing. If not, we continue processing +// (notifying interested parties post-change). If so, we wait and +// continue later. +//********************************************************************************* + +void IOService::our_prechange_2 ( void ) +{ + priv->settle_time = compute_settle_time(); + if ( priv->settle_time == 0 ) { + our_prechange_3(); + } + else { + priv->machine_state = IOPMour_prechange_3; + startSettleTimer(priv->settle_time); + } +} + + +//********************************************************************************* +// our_prechange_3 +// +// Power has settled on a power change we initiated. Here we notify +// all our interested parties post-change. If they all acknowledge, we're +// done with this change note, and we can start on the next one. +// Otherwise we have to wait for acknowledgements and finish up later. +//********************************************************************************* + +void IOService::our_prechange_3 ( void ) +{ + priv->machine_state = IOPMour_prechange_4; // in case they don't all ack + if ( notifyAll(false) == IOPMAckImplied ) { + our_prechange_4(); + } +} + + +//********************************************************************************* +// our_prechange_4 +// +// Power has settled on a power change we initiated, and +// all our interested parties have acknowledged. We're +// done with this change note, and we can start on the next one. +//********************************************************************************* + +void IOService::our_prechange_4 ( void ) +{ + all_done(); +} + + +//********************************************************************************* +// parent_down_0 +// +// All applications and kernel clients have been notified of a power lowering +// initiated by the parent and we didn't have to wait for any responses. Here +// we notify any interested drivers and power domain children. If they all ack, +// we continue with the power change. +// If at least one doesn't, we have to wait for it to acknowledge and then continue. +//********************************************************************************* + +IOReturn IOService::parent_down_0 ( void ) +{ + priv->machine_state = IOPMparent_down_4; // in case they don't all ack + if ( notifyAll(true) == IOPMAckImplied ) { + return parent_down_1(); // they did + } + return IOPMWillAckLater; // they didn't +} + + +//********************************************************************************* +// parent_down_05 +// +// All applications and kernel clients have been notified of a power lowering +// initiated by the parent and we had to wait for their responses. Here we notify +// any interested drivers and power domain children. If they all ack, we continue +// with the power change. +// If at least one doesn't, we have to wait for it to acknowledge and then continue. +//********************************************************************************* + +void IOService::parent_down_05 ( void ) +{ + priv->machine_state = IOPMparent_down_4; // in case they don't all ack + if ( notifyAll(true) == IOPMAckImplied ) { + parent_down_4(); // they did + } +} + + +//********************************************************************************* +// parent_down_1 +// +// All parties have acknowledged our pre-change notification of a power +// lowering initiated by the parent. Here we instruct our controlling driver +// to put the hardware in the state it needs to be in when the domain is +// lowered. If it does so, we continue processing +// (waiting for settle and acknowledging the parent.) +// If it doesn't, we have to wait for it to acknowledge and then continue. +//********************************************************************************* + +IOReturn IOService::parent_down_1 ( void ) +{ + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { + return parent_down_2(); // it's done, carry on + } + priv->machine_state = IOPMparent_down_5; // it's not, wait for it + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); + start_ack_timer(); + return IOPMWillAckLater; +} + + +//********************************************************************************* +// parent_down_4 +// +// We had to wait for it, but all parties have acknowledged our pre-change +// notification of a power lowering initiated by the parent. +// Here we instruct our controlling driver +// to put the hardware in the state it needs to be in when the domain is +// lowered. If it does so, we continue processing +// (waiting for settle and acknowledging the parent.) +// If it doesn't, we have to wait for it to acknowledge and then continue. +//********************************************************************************* + +void IOService::parent_down_4 ( void ) +{ + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { + parent_down_5(); // it's done, carry on + } + else { + priv-> machine_state = IOPMparent_down_5; // it's not, wait for it + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); + start_ack_timer(); + } +} + + +//********************************************************************************* +// parent_down_2 +// +// Our controlling driver has changed power state on the hardware +// during a power change initiated by our parent. Here we see if we need +// to wait for power to settle before continuing. If not, we continue +// processing (acknowledging our preparedness to the parent). +// If so, we wait and continue later. +//********************************************************************************* + +IOReturn IOService::parent_down_2 ( void ) +{ + priv->settle_time = compute_settle_time(); + if ( priv->settle_time == 0 ) { + priv->machine_state = IOPMparent_down_6; // in case they don't all ack + if ( notifyAll(false) == IOPMAckImplied ) { + all_done(); + return IOPMAckImplied; + } + return IOPMWillAckLater; // they didn't + } + else { + priv->machine_state = IOPMparent_down_3; + startSettleTimer(priv->settle_time); + return IOPMWillAckLater; + } +} + + +//********************************************************************************* +// parent_down_5 +// +// Our controlling driver has changed power state on the hardware +// during a power change initiated by our parent. We have had to wait +// for acknowledgement from interested parties, or we have had to wait +// for the controlling driver to change the state. Here we see if we need +// to wait for power to settle before continuing. If not, we continue +// processing (acknowledging our preparedness to the parent). +// If so, we wait and continue later. +//********************************************************************************* + +void IOService::parent_down_5 ( void ) +{ + priv->settle_time = compute_settle_time(); + if ( priv->settle_time == 0 ) { + parent_down_3(); + } + else { + priv->machine_state = IOPMparent_down_3; + startSettleTimer(priv->settle_time); + } +} + + +//********************************************************************************* +// parent_down_3 +// +// Power has settled on a power change initiated by our parent. Here we +// notify interested parties. +//********************************************************************************* + +void IOService::parent_down_3 ( void ) +{ + IOService * parent; + + priv->machine_state = IOPMparent_down_6; // in case they don't all ack + if ( notifyAll(false) == IOPMAckImplied ) { + parent = priv->head_note_parent; + all_done(); + ((IOService *)(parent->getParentEntry(gIOPowerPlane)))->acknowledgePowerChange(parent); + } +} + + +//********************************************************************************* +// parent_down_6 +// +// We had to wait for it, but all parties have acknowledged our post-change +// notification of a power lowering initiated by the parent. +// Here we acknowledge the parent. +// We are done with this change note, and we can start on the next one. +//********************************************************************************* + +void IOService::parent_down_6 ( void ) +{ + IOService * parent; + + parent = priv->head_note_parent; + all_done(); + ((IOService *)(parent->getParentEntry(gIOPowerPlane)))->acknowledgePowerChange(parent); +} + + +//********************************************************************************* +// parent_up_0 +// +// Our parent has informed us via powerStateDidChange that it has +// raised the power in our power domain, and we have had to wait +// for some interested party to acknowledge our notification. +// Here we instruct our controlling +// driver to program the hardware to take advantage of the higher domain +// power. If it does so, we continue processing +// (waiting for settle and notifying interested parties post-change.) +// If it doesn't, we have to wait for it to acknowledge and then continue. +//********************************************************************************* + +void IOService::parent_up_0 ( void ) +{ + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { + parent_up_4(); // it did it, carry on + } + else { + priv->machine_state = IOPMparent_up_4; // it didn't, wait for it + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); + start_ack_timer(); + } +} + + +//********************************************************************************* +// parent_up_1 +// +// Our parent has informed us via powerStateDidChange that it has +// raised the power in our power domain. Here we instruct our controlling +// driver to program the hardware to take advantage of the higher domain +// power. If it does so, we continue processing +// (waiting for settle and notifying interested parties post-change.) +// If it doesn't, we have to wait for it to acknowledge and then continue. +//********************************************************************************* + +IOReturn IOService::parent_up_1 ( void ) +{ + if ( instruct_driver(priv->head_note_state) == IOPMAckImplied ) { + return parent_up_2(); // it did it, carry on + } + else { + priv->machine_state = IOPMparent_up_4; // it didn't, wait for it + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,0,0); + start_ack_timer(); + return IOPMWillAckLater; + } +} + + +//********************************************************************************* +// parent_up_2 +// +// Our controlling driver has changed power state on the hardware +// during a power raise initiated by the parent. Here we see if we need to wait +// for power to settle before continuing. If not, we continue processing +// (notifying interested parties post-change). If so, we wait and +// continue later. +//********************************************************************************* + +IOReturn IOService::parent_up_2 ( void ) +{ + priv->settle_time = compute_settle_time(); + if ( priv->settle_time == 0 ) { + return parent_up_3(); + } + else { + priv->machine_state = IOPMparent_up_5; + startSettleTimer(priv->settle_time); + return IOPMWillAckLater; + } +} + + +//********************************************************************************* +// parent_up_4 +// +// Our controlling driver has changed power state on the hardware +// during a power raise initiated by the parent, but we had to wait for it. +// Here we see if we need to wait for power to settle before continuing. +// If not, we continue processing (notifying interested parties post-change). +// If so, we wait and continue later. +//********************************************************************************* + +void IOService::parent_up_4 ( void ) +{ + priv->settle_time = compute_settle_time(); + if ( priv->settle_time == 0 ) { + parent_up_5(); + } + else { + priv->machine_state = IOPMparent_up_5; + startSettleTimer(priv->settle_time); + } +} + + +//********************************************************************************* +// parent_up_3 +// +// No power settling was required on a power raise initiated by the parent. +// Here we notify all our interested parties post-change. If they all acknowledge, +// we're done with this change note, and we can start on the next one. +// Otherwise we have to wait for acknowledgements and finish up later. +//********************************************************************************* + +IOReturn IOService::parent_up_3 ( void ) +{ + priv->machine_state = IOPMparent_up_6; // in case they don't all ack + if ( notifyAll(false) == IOPMAckImplied ) { + all_done(); + return IOPMAckImplied; + } + return IOPMWillAckLater; // they didn't +} + + +//********************************************************************************* +// parent_up_5 +// +// Power has settled on a power raise initiated by the parent. +// Here we notify all our interested parties post-change. If they all acknowledge, +// we're done with this change note, and we can start on the next one. +// Otherwise we have to wait for acknowledgements and finish up later. +//********************************************************************************* + +void IOService::parent_up_5 ( void ) +{ + priv->machine_state = IOPMparent_up_6; // in case they don't all ack + if ( notifyAll(false) == IOPMAckImplied ) { + parent_up_6(); + } +} + + +//********************************************************************************* +// parent_up_6 +// +// All parties have acknowledged our post-change notification of a power +// raising initiated by the parent. Here we acknowledge the parent. +// We are done with this change note, and we can start on the next one. +//********************************************************************************* + +void IOService::parent_up_6 ( void ) +{ + IOService * parent; + + parent = priv->head_note_parent; + all_done(); + ((IOService *)(parent->getParentEntry(gIOPowerPlane)))->acknowledgePowerChange(parent); +} + + +//********************************************************************************* +// all_done +// +// A power change is complete, and the used post-change note is at +// the head of the queue. Remove it and set myCurrentState to the result +// of the change. Start up the next change in queue. +//********************************************************************************* + +void IOService::all_done ( void ) +{ + priv->machine_state = IOPMfinished; + + if ( priv->head_note_flags & IOPMWeInitiated ) { // our power change + if ( !( priv->head_note_flags & IOPMNotDone) ) { // could our driver switch to the new state? + if ( pm_vars->myCurrentState < priv->head_note_state ) { // yes, did power raise? + tellChangeUp (priv->head_note_state); // yes, inform clients and apps + } + else { + if ( ! priv->we_are_root ) { // no, if this lowers our + ask_parent(priv->head_note_state); // power requirements, tell the parent + } + } + pm_vars->myCurrentState = priv->head_note_state; // either way + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeDone,(unsigned long)pm_vars->myCurrentState,0); + powerChangeDone(pm_vars->myCurrentState); // inform subclass policy-maker + } +// else { // no +// pm_vars->myCurrentState = pm_vars->theControllingDriver->powerStateForDomainState(pm_vars->parentsCurrentPowerFlags); +// } + } + if ( priv->head_note_flags & IOPMParentInitiated) { // parent's power change + if ( ((priv->head_note_flags & IOPMDomainWillChange) && (pm_vars->myCurrentState >= priv->head_note_state)) || + ((priv->head_note_flags & IOPMDomainDidChange) && (pm_vars->myCurrentState < priv->head_note_state)) ) { + if ( pm_vars->myCurrentState < priv->head_note_state ) { // did power raise? + tellChangeUp (priv->head_note_state); // yes, inform clients and apps + } + pm_vars->myCurrentState = priv->head_note_state; // either way + pm_vars->maxCapability = pm_vars->theControllingDriver->maxCapabilityForDomainState(priv->head_note_domainState); + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogChangeDone,(unsigned long)pm_vars->myCurrentState,0); + powerChangeDone(pm_vars->myCurrentState); // inform subclass policy-maker + } + } + + priv->changeList->releaseHeadChangeNote(); // we're done with this + + priv->head_note = priv->changeList->currentChange(); // start next one in queue + if ( priv->head_note != -1 ) { + + if (priv->changeList->changeNote[priv->head_note].flags & IOPMWeInitiated ) { + start_our_change(priv->head_note); + } + else { + if ( start_parent_change(priv->head_note) == IOPMAckImplied ) { + ((IOService *)(priv->head_note_parent->getParentEntry(gIOPowerPlane)))->acknowledgePowerChange(priv->head_note_parent); + } + } + } +} + + + +//********************************************************************************* +// all_acked +// +// A driver or child has acknowledged our notification of an upcoming power +// change, and this acknowledgement is the last one pending +// before we change power or after changing power. +// +//********************************************************************************* + +void IOService::all_acked ( void ) +{ + switch (priv->machine_state) { + case IOPMour_prechange_1: + our_prechange_1(); + break; + case IOPMour_prechange_4: + our_prechange_4(); + break; + case IOPMparent_down_4: + parent_down_4(); + break; + case IOPMparent_down_6: + parent_down_6(); + break; + case IOPMparent_up_0: + parent_up_0(); + break; + case IOPMparent_up_6: + parent_up_6(); + break; + } +} + + +//********************************************************************************* +// settleTimerExpired +// +// Power has settled after our last change. Notify interested parties that +// there is a new power state. +//********************************************************************************* + +void IOService::settleTimerExpired ( void ) +{ + if ( ! initialized ) { + return; // we're unloading + } + + switch (priv->machine_state) { + case IOPMour_prechange_3: + our_prechange_3(); + break; + case IOPMparent_down_3: + parent_down_3(); + break; + case IOPMparent_up_5: + parent_up_5(); + break; + } +} + + +//********************************************************************************* +// compute_settle_time +// +// Compute the power-settling delay in microseconds for the +// change from myCurrentState to head_note_state. +//********************************************************************************* + +unsigned long IOService::compute_settle_time ( void ) +{ + unsigned long totalTime; + unsigned long i; + + totalTime = 0; // compute total time to attain the new state + i = pm_vars->myCurrentState; + if ( priv->head_note_state < pm_vars->myCurrentState ) { // we're lowering power + while ( i > priv->head_note_state ) { + totalTime += pm_vars->thePowerStates[i].settleDownTime; + i--; + } + } + + if ( priv->head_note_state > pm_vars->myCurrentState ) { // we're raising power + while ( i < priv->head_note_state ) { + totalTime += pm_vars->thePowerStates[i+1].settleUpTime; + i++; + } + } + + return totalTime; +} + + +//********************************************************************************* +// startSettleTimer +// +// Enter with a power-settling delay in microseconds and start a nano-second +// timer for that delay. +//********************************************************************************* + +IOReturn IOService::startSettleTimer ( unsigned long delay ) +{ + AbsoluteTime deadline; + + clock_interval_to_deadline(delay, kMicrosecondScale, &deadline); + + thread_call_enter_delayed(priv->settleTimer, deadline); + + return IOPMNoErr; +} + +//********************************************************************************* +// ack_timer_ticked +// +// The acknowledgement timeout periodic timer has ticked. +// If we are awaiting acks for a power change notification, +// we decrement the timer word of each interested driver which hasn't acked. +// If a timer word becomes zero, we pretend the driver aknowledged. +// If we are waiting for the controlling driver to change the power +// state of the hardware, we decrement its timer word, and if it becomes +// zero, we pretend the driver acknowledged. +//********************************************************************************* + +void IOService::ack_timer_ticked ( void ) +{ + IOPMinformee * nextObject; + + if ( ! initialized ) { + return; // we're unloading + } + + if (! acquire_lock() ) { + return; + } + + switch (priv->machine_state) { + case IOPMour_prechange_2: + case IOPMparent_down_5: + case IOPMparent_up_4: + if ( priv->driver_timer != 0 ) { // are we waiting for our driver to make its change? + priv->driver_timer -= 1; // yes, tick once + if ( priv->driver_timer == 0 ) { // it's tardy, we'll go on without it + IOUnlock(priv->our_lock); + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogCtrlDriverTardy,0,0); + driver_acked(); + } + else { // still waiting, set timer again + start_ack_timer(); + IOUnlock(priv->our_lock); + } + } + else { + IOUnlock(priv->our_lock); + } + break; + + case IOPMour_prechange_1: + case IOPMour_prechange_4: + case IOPMparent_down_4: + case IOPMparent_down_6: + case IOPMparent_up_0: + case IOPMparent_up_6: + if (priv->head_note_pendingAcks != 0 ) { // are we waiting for interested parties to acknowledge? + nextObject = priv->interestedDrivers->firstInList(); // yes, go through the list of interested drivers + while ( nextObject != NULL ) { // and check each one + if ( nextObject->timer > 0 ) { + nextObject->timer -= 1; + if ( nextObject->timer == 0 ) { // this one should have acked by now + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogIntDriverTardy,0,0); + kprintf("interested driver tardy: %s\n",nextObject->whatObject->getName()); + priv->head_note_pendingAcks -= 1; + } + } + nextObject = priv->interestedDrivers->nextInList(nextObject); + } + if ( priv->head_note_pendingAcks == 0 ) { // is that the last? + IOUnlock(priv->our_lock); + all_acked(); // yes, we can continue + } + else { // no, set timer again + start_ack_timer(); + IOUnlock(priv->our_lock); + } + } + else { + IOUnlock(priv->our_lock); + } + break; + + case IOPMparent_down_0: // apps didn't respond to parent-down notification + IOUnlock(priv->our_lock); + IOLockLock(priv->flags_lock); + if (pm_vars->responseFlags) { + pm_vars->responseFlags->release(); // get rid of this stuff + pm_vars->responseFlags = NULL; + } + IOLockUnlock(priv->flags_lock); + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,1); + parent_down_05(); // carry on with the change + break; + + case IOPMour_prechange_03: // apps didn't respond to our power-down request + IOUnlock(priv->our_lock); + IOLockLock(priv->flags_lock); + if (pm_vars->responseFlags) { + pm_vars->responseFlags->release(); // get rid of this stuff + pm_vars->responseFlags = NULL; + } + IOLockUnlock(priv->flags_lock); + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,2); + tellNoChangeDown(priv->head_note_state); // rescind the request + priv->head_note_flags |= IOPMNotDone; // mark the change note un-actioned + all_done(); // and we're done + break; + + case IOPMour_prechange_05: // apps didn't respond to our power-down notification + IOUnlock(priv->our_lock); + IOLockLock(priv->flags_lock); + if (pm_vars->responseFlags) { + pm_vars->responseFlags->release(); // get rid of this stuff + pm_vars->responseFlags = NULL; + } + IOLockUnlock(priv->flags_lock); + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientTardy,0,3); + our_prechange_05(); // carry on with the change + break; + + default: + IOUnlock(priv->our_lock); // not waiting for acks + break; + } +} + + +//********************************************************************************* +// start_ack_timer +// +//********************************************************************************* + +void IOService::start_ack_timer ( void ) +{ + AbsoluteTime deadline; + + clock_interval_to_deadline(ACK_TIMER_PERIOD, kNanosecondScale, &deadline); + + thread_call_enter_delayed(priv->ackTimer, deadline); +} + + +//********************************************************************************* +// stop_ack_timer +// +//********************************************************************************* + +void IOService::stop_ack_timer ( void ) +{ + thread_call_cancel(priv->ackTimer); +} + + +//********************************************************************************* +// c-language timer expiration functions +// +//********************************************************************************* + +static void ack_timer_expired ( thread_call_param_t us) +{ + ((IOService *)us)->ack_timer_ticked(); +} + + +static void settle_timer_expired ( thread_call_param_t us) +{ + ((IOService *)us)->settleTimerExpired(); +} + + +//********************************************************************************* +// add_child_to_active_change +// +// A child has just registered with us. If there is +// currently a change in progress, get the new party involved: if we +// have notified all parties and are waiting for acks, notify the new +// party. +//********************************************************************************* + +IOReturn IOService::add_child_to_active_change ( IOPowerConnection * newObject ) +{ + if (! acquire_lock() ) { + return IOPMNoErr; + } + + switch (priv->machine_state) { + case IOPMour_prechange_1: + case IOPMparent_down_4: + case IOPMparent_up_0: + priv->head_note_pendingAcks += 2; // one for this child and one to prevent + IOUnlock(priv->our_lock); // incoming acks from changing our state + notifyChild(newObject, true); + if (! acquire_lock() ) { + --priv->head_note_pendingAcks; // put it back + return IOPMNoErr; + } + if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? + stop_ack_timer(); // no, stop the timer + IOUnlock(priv->our_lock); + all_acked(); // and now we can continue + return IOPMNoErr; + } + break; + case IOPMour_prechange_4: + case IOPMparent_down_6: + case IOPMparent_up_6: + priv->head_note_pendingAcks += 2; // one for this child and one to prevent + IOUnlock(priv->our_lock); // incoming acks from changing our state + notifyChild(newObject, false); + if (! acquire_lock() ) { + --priv->head_note_pendingAcks; // put it back + return IOPMNoErr; + } + if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? + stop_ack_timer(); // no, stop the timer + IOUnlock(priv->our_lock); + all_acked(); // and now we can continue + return IOPMNoErr; + } + break; + } + IOUnlock(priv->our_lock); + return IOPMNoErr; +} + + +//********************************************************************************* +// add_driver_to_active_change +// +// An interested driver has just registered with us. If there is +// currently a change in progress, get the new party involved: if we +// have notified all parties and are waiting for acks, notify the new +// party. +//********************************************************************************* + +IOReturn IOService::add_driver_to_active_change ( IOPMinformee * newObject ) +{ + if (! acquire_lock() ) { + return IOPMNoErr; + } + + switch (priv->machine_state) { + case IOPMour_prechange_1: + case IOPMparent_down_4: + case IOPMparent_up_0: + priv->head_note_pendingAcks += 2; // one for this driver and one to prevent + IOUnlock(priv->our_lock); // incoming acks from changing our state + inform(newObject, true); // inform the driver + if (! acquire_lock() ) { + --priv->head_note_pendingAcks; // put it back + return IOPMNoErr; + } + if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? + stop_ack_timer(); // no, stop the timer + IOUnlock(priv->our_lock); + all_acked(); // and now we can continue + return IOPMNoErr; + } + break; + case IOPMour_prechange_4: + case IOPMparent_down_6: + case IOPMparent_up_6: + priv->head_note_pendingAcks += 2; // one for this driver and one to prevent + IOUnlock(priv->our_lock); // incoming acks from changing our state + inform(newObject, false); // inform the driver + if (! acquire_lock() ) { + --priv->head_note_pendingAcks; // put it back + return IOPMNoErr; + } + if ( --priv->head_note_pendingAcks == 0 ) { // are we still waiting for acks? + stop_ack_timer(); // no, stop the timer + IOUnlock(priv->our_lock); + all_acked(); // and now we can continue + return IOPMNoErr; + } + break; + } + IOUnlock(priv->our_lock); + return IOPMNoErr; +} + + +//********************************************************************************* +// start_parent_change +// +// Here we begin the processing of a change note initiated by our parent +// which is at the head of the queue. +// +// It is possible for the change to be processed to completion and removed from the queue. +// There are several possible interruptions to the processing, though, and they are: +// we may have to wait for interested parties to acknowledge our pre-change notification, +// we may have to wait for our controlling driver to change the hardware power state, +// there may be a settling time after changing the hardware power state, +// we may have to wait for interested parties to acknowledge our post-change notification, +// we may have to wait for the acknowledgement timer expiration to substitute for the +// acknowledgement from a failing driver. +//********************************************************************************* + +IOReturn IOService::start_parent_change ( unsigned long queue_head ) +{ + priv->head_note = queue_head; + priv->head_note_flags = priv-> changeList->changeNote[priv->head_note].flags; + priv->head_note_state = priv->changeList->changeNote[priv->head_note].newStateNumber; + priv->head_note_outputFlags = priv->changeList->changeNote[priv->head_note].outputPowerCharacter; + priv->head_note_domainState = priv->changeList->changeNote[priv->head_note].domainState; + priv->head_note_parent = priv->changeList->changeNote[priv->head_note].parent; + priv->head_note_capabilityFlags = priv->changeList->changeNote[priv->head_note].capabilityFlags; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartParentChange, + (unsigned long)priv->head_note_state,(unsigned long)pm_vars->myCurrentState); + + ask_parent( priv->ourDesiredPowerState); // if we need something and haven't told the parent, do so + + if ( priv->head_note_state < pm_vars->myCurrentState ) { // power domain is lowering + priv->initial_change = false; + priv->machine_state = IOPMparent_down_0; // tell apps and kernel clients + if ( tellChangeDown(priv->head_note_state) ) { // are we waiting for responses? + return parent_down_0(); // no, notify interested drivers + } + return IOPMWillAckLater; // yes + } + + if ( priv->head_note_state > pm_vars->myCurrentState ) { // parent is raising power, we may or may not + if ( priv->ourDesiredPowerState > pm_vars->myCurrentState ) { + if ( priv->ourDesiredPowerState < priv->head_note_state ) { + priv->head_note_state = priv->ourDesiredPowerState; // we do, but not all the way + priv->head_note_outputFlags = pm_vars->thePowerStates[priv->head_note_state].outputPowerCharacter; + priv->head_note_capabilityFlags = pm_vars->thePowerStates[priv->head_note_state].capabilityFlags; + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAmendParentChange,(unsigned long)priv->head_note_state,0); + } + } + else { + priv-> head_note_state = pm_vars->myCurrentState; // we don't + priv->head_note_outputFlags = pm_vars->thePowerStates[priv->head_note_state].outputPowerCharacter; + priv->head_note_capabilityFlags = pm_vars->thePowerStates[priv->head_note_state].capabilityFlags; + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAmendParentChange,(unsigned long)priv->head_note_state,0); + } + } + + if ( (priv->head_note_state > pm_vars->myCurrentState) && + (priv->head_note_flags & IOPMDomainDidChange) ) { // changing up + priv->initial_change = false; + priv->machine_state = IOPMparent_up_0; + if ( notifyAll(true) == IOPMAckImplied ) { + return parent_up_1(); + } + return IOPMWillAckLater; // they didn't all ack + } + + all_done(); + return IOPMAckImplied; // a null change or power will go up +} + + +//********************************************************************************* +// start_our_change +// +// Here we begin the processing of a change note initiated by us +// which is at the head of the queue. +// +// It is possible for the change to be processed to completion and removed from the queue. +// There are several possible interruptions to the processing, though, and they are: +// we may have to wait for interested parties to acknowledge our pre-change notification, +// changes initiated by the parent will wait in the middle for powerStateDidChange, +// we may have to wait for our controlling driver to change the hardware power state, +// there may be a settling time after changing the hardware power state, +// we may have to wait for interested parties to acknowledge our post-change notification, +// we may have to wait for the acknowledgement timer expiration to substitute for the +// acknowledgement from a failing driver. +//********************************************************************************* + +void IOService::start_our_change ( unsigned long queue_head ) +{ + priv->head_note = queue_head; + priv->head_note_flags = priv->changeList->changeNote[priv->head_note].flags; + priv->head_note_state = priv->changeList->changeNote[priv->head_note].newStateNumber; + priv->head_note_outputFlags = priv->changeList->changeNote[priv->head_note].outputPowerCharacter; + priv->head_note_capabilityFlags = priv->changeList->changeNote[priv->head_note].capabilityFlags; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartDeviceChange, + (unsigned long)priv->head_note_state,(unsigned long)pm_vars->myCurrentState); + + if ( priv->head_note_capabilityFlags & IOPMNotAttainable ) { // can our driver switch to the new state? + if ( ! priv->we_are_root ) { // no, ask the parent to do it then + ask_parent(priv->head_note_state); + } + priv-> head_note_flags |= IOPMNotDone; // mark the change note un-actioned + all_done(); // and we're done + return; + } + // is there enough power in the domain? + if ( (pm_vars->maxCapability < priv->head_note_state) && (! priv->we_are_root) ) { + if ( ! priv->we_are_root ) { // no, ask the parent to raise it + ask_parent(priv->head_note_state); + } + priv->head_note_flags |= IOPMNotDone; // no, mark the change note un-actioned + all_done(); // and we're done + return; // till the parent raises power + } + + if ( ! priv->initial_change ) { + if ( priv->head_note_state == pm_vars->myCurrentState ) { + all_done(); // we initiated a null change; forget it + return; + } + } + priv->initial_change = false; + + if ( priv->head_note_state < pm_vars->myCurrentState ) { // dropping power? + priv->machine_state = IOPMour_prechange_03; // yes, in case we have to wait for acks + pm_vars->doNotPowerDown = false; + if ( askChangeDown(priv->head_note_state) ) { // ask apps and kernel clients if we can drop power + if ( pm_vars->doNotPowerDown ) { // don't have to wait, did any clients veto? + tellNoChangeDown(priv->head_note_state); // yes, rescind the warning + priv-> head_note_flags |= IOPMNotDone; // mark the change note un-actioned + all_done(); // and we're done + } + else { + our_prechange_03(); // no, tell'em we're dropping power + } + } + } + else { + if ( ! priv->we_are_root ) { // we are raising power + ask_parent(priv->head_note_state); // if this changes our power requirement, tell the parent + } + priv->machine_state = IOPMour_prechange_1; // in case they don't all ack + if ( notifyAll(true) == IOPMAckImplied ) { // notify interested drivers and children + our_prechange_1(); + } + } +} + + +//********************************************************************************* +// ask_parent +// +// Call the power domain parent to ask for a higher power state in the domain +// or to suggest a lower power state. +//********************************************************************************* + +IOReturn IOService::ask_parent ( unsigned long requestedState ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + + if ( priv->previousRequest == pm_vars->thePowerStates[requestedState].inputPowerRequirement ) { // is this a new desire? + return IOPMNoErr; // no, the parent knows already, just return + } + + if ( priv->we_are_root ) { + return IOPMNoErr; + } + priv->previousRequest = pm_vars->thePowerStates[requestedState].inputPowerRequirement; + + iter = getParentIterator(gIOPowerPlane); + + if ( iter ) { + while ( (next = iter->getNextObject()) ) { + if ( (connection = OSDynamicCast(IOPowerConnection,next)) ) { + if ( ((IOService *)(connection->getParentEntry(gIOPowerPlane)))->requestPowerDomainState( priv->previousRequest,connection,IOPMLowestState)!= IOPMNoErr ) { + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogRequestDenied,(unsigned long)priv->previousRequest,0); + } + } + } + iter->release(); + } + + return IOPMNoErr; +} + + +//********************************************************************************* +// instruct_driver +// +// Call the controlling driver and have it change the power state of the +// hardware. If it returns IOPMAckImplied, the change is complete, and +// we return IOPMAckImplied. Otherwise, it will ack when the change +// is done; we return IOPMWillAckLater. +//********************************************************************************* + +IOReturn IOService::instruct_driver ( unsigned long newState ) +{ + IOReturn return_code; + + if ( pm_vars->thePowerStates[newState].capabilityFlags & IOPMNotAttainable ) { // can our driver switch to the desired state? + return IOPMAckImplied; // no, so don't try + } + priv->driver_timer = -1; + + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogProgramHardware,newState,0); + return_code = pm_vars->theControllingDriver->setPowerState( newState,this ); // yes, instruct it + if ( return_code == IOPMAckImplied ) { // it finished + priv->driver_timer = 0; + return IOPMAckImplied; + } + + if ( priv->driver_timer == 0 ) { // it acked behind our back + return IOPMAckImplied; + } + + if ( return_code < 0 ) { // somebody goofed + return IOPMAckImplied; + } + + priv->driver_timer = (return_code * ns_per_us / ACK_TIMER_PERIOD) + 1; // it didn't finish + return IOPMWillAckLater; +} + + +//********************************************************************************* +// acquire_lock +// +// We are acquiring the lock we use to protect our queue head from +// simutaneous access by a thread which calls acknowledgePowerStateChange +// or acknowledgeSetPowerState and the ack timer expiration thread. +// Return TRUE if we acquire the lock, and the queue head didn't change +// while we were acquiring the lock (and maybe blocked). +// If there is no queue head, or it changes while we are blocked, +// return FALSE with the lock unlocked. +//********************************************************************************* + +bool IOService::acquire_lock ( void ) +{ + long current_change_note; + + current_change_note = priv->head_note; + if ( current_change_note == -1 ) { + return FALSE; + } + + IOTakeLock(priv->our_lock); + if ( current_change_note == priv->head_note ) { + return TRUE; + } + else { // we blocked and something changed radically + IOUnlock(priv->our_lock); // so there's nothing to do any more + return FALSE; + } +} + + +//********************************************************************************* +// askChangeDown +// +// Ask registered applications and kernel clients if we can change to a lower +// power state. +// +// Subclass can override this to send a different message type. Parameter is +// the destination state number. +// +// Return true if we don't have to wait for acknowledgements +//********************************************************************************* + +bool IOService::askChangeDown ( unsigned long ) +{ + return tellClientsWithResponse(kIOMessageCanDevicePowerOff); +} + + +//********************************************************************************* +// tellChangeDown +// +// Notify registered applications and kernel clients that we are definitely +// dropping power. +// +// Subclass can override this to send a different message type. Parameter is +// the destination state number. +// +// Return true if we don't have to wait for acknowledgements +//********************************************************************************* + +bool IOService::tellChangeDown ( unsigned long ) +{ + return tellClientsWithResponse(kIOMessageDeviceWillPowerOff); +} + + +//********************************************************************************* +// tellClientsWithResponse +// +// Notify registered applications and kernel clients that we are definitely +// dropping power. +// +// Return true if we don't have to wait for acknowledgements +//********************************************************************************* + +bool IOService::tellClientsWithResponse ( int messageType ) +{ + struct context theContext; + AbsoluteTime deadline; + OSBoolean * aBool; + + pm_vars->responseFlags = OSArray::withCapacity( 1 ); + pm_vars->serialNumber += 1; + + theContext.responseFlags = pm_vars->responseFlags; + theContext.serialNumber = pm_vars->serialNumber; + theContext.flags_lock = priv->flags_lock; + theContext.counter = 1; + theContext.msgType = messageType; + theContext.us = this; + theContext.maxTimeRequested = 0; + + IOLockLock(priv->flags_lock); + aBool = OSBoolean::withBoolean(false); // position zero is false to + theContext.responseFlags->setObject(0,aBool); // prevent allowCancelCommon from succeeding + aBool->release(); + IOLockUnlock(priv->flags_lock); + + applyToInterested(gIOAppPowerStateInterest,tellAppWithResponse,(void *)&theContext); + applyToInterested(gIOGeneralInterest,tellClientWithResponse,(void *)&theContext); + + if (! acquire_lock() ) { + return true; + } + IOLockLock(priv->flags_lock); + aBool = OSBoolean::withBoolean(true); // now fix position zero + theContext.responseFlags->replaceObject(0,aBool); + aBool->release(); + IOLockUnlock(priv->flags_lock); + + if ( ! checkForDone() ) { // we have to wait for somebody + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogStartAckTimer,theContext.maxTimeRequested,0); + clock_interval_to_deadline(theContext.maxTimeRequested / 1000, kMillisecondScale, &deadline); + + thread_call_enter_delayed(priv->ackTimer, deadline); + + IOUnlock(priv->our_lock); // yes + return false; + } + + IOUnlock(priv->our_lock); + IOLockLock(priv->flags_lock); + pm_vars->responseFlags->release(); // everybody responded + pm_vars->responseFlags = NULL; + IOLockUnlock(priv->flags_lock); + + return true; +} + + +//********************************************************************************* +// tellAppWithResponse +// +// We send a message to an application, and we expect a response, so we compute a +// cookie we can identify the response with. +//********************************************************************************* +void tellAppWithResponse ( OSObject * object, void * context) +{ + struct context * theContext = (struct context *)context; + UInt32 refcon; + OSBoolean * aBool; + + if( OSDynamicCast( IOService, object) ) { + IOLockLock(theContext->flags_lock); + aBool = OSBoolean::withBoolean(true); + theContext->responseFlags->setObject(theContext->counter,aBool); + aBool->release(); + IOLockUnlock(theContext->flags_lock); + } + else { + refcon = ((theContext->serialNumber & 0xFFFF)<<16) + (theContext->counter & 0xFFFF); + IOLockLock(theContext->flags_lock); + aBool = OSBoolean::withBoolean(false); + theContext->responseFlags->setObject(theContext->counter,aBool); + aBool->release(); + IOLockUnlock(theContext->flags_lock); + theContext->us->messageClient(theContext->msgType,object,(void *)refcon); + if ( theContext->maxTimeRequested < k15seconds ) { + theContext->maxTimeRequested = k15seconds; + } + } + theContext->counter += 1; +} + + +//********************************************************************************* +// tellClientWithResponse +// +// We send a message to an in-kernel client, and we expect a response, so we compute a +// cookie we can identify the response with. +// If it doesn't understand the notification (it is not power-management savvy) +// we won't wait for it to prepare for sleep. If it tells us via a return code +// in the passed struct that it is currently ready, we won't wait for it to prepare. +// If it tells us via the return code in the struct that it does need time, we will chill. +//********************************************************************************* +void tellClientWithResponse ( OSObject * object, void * context) +{ + struct context * theContext = (struct context *)context; + sleepWakeNote paramBlock; + UInt32 refcon; + IOReturn retCode; + OSBoolean * aBool; + OSObject * theFlag; + + refcon = ((theContext->serialNumber & 0xFFFF)<<16) + (theContext->counter & 0xFFFF); + IOLockLock(theContext->flags_lock); + aBool = OSBoolean::withBoolean(false); + theContext->responseFlags->setObject(theContext->counter,aBool); + aBool->release(); + IOLockUnlock(theContext->flags_lock); + paramBlock.powerRef = (void *)refcon; + paramBlock.returnValue = 0; + retCode = theContext->us->messageClient(theContext->msgType,object,(void *)¶mBlock); + if ( retCode == kIOReturnSuccess ) { + if ( paramBlock.returnValue == 0 ) { // client doesn't want time to respond + IOLockLock(theContext->flags_lock); + aBool = OSBoolean::withBoolean(true); + theContext->responseFlags->replaceObject(theContext->counter,aBool); // so set its flag true + aBool->release(); + IOLockUnlock(theContext->flags_lock); + } + else { + IOLockLock(theContext->flags_lock); + theFlag = theContext->responseFlags->getObject(theContext->counter); // it does want time, and it hasn't + if ( theFlag != 0 ) { // responded yet + if ( ((OSBoolean *)theFlag)->isFalse() ) { // so note its time requirement + if ( theContext->maxTimeRequested < paramBlock.returnValue ) { + theContext->maxTimeRequested = paramBlock.returnValue; + } + } + } + IOLockUnlock(theContext->flags_lock); + } + } + else { // not a client of ours + IOLockLock(theContext->flags_lock); + aBool = OSBoolean::withBoolean(true); // so we won't be waiting for response + theContext->responseFlags->replaceObject(theContext->counter,aBool); + aBool->release(); + IOLockUnlock(theContext->flags_lock); + } + theContext->counter += 1; +} + + +//********************************************************************************* +// tellNoChangeDown +// +// Notify registered applications and kernel clients that we are not +// dropping power. +// +// Subclass can override this to send a different message type. Parameter is +// the aborted destination state number. +//********************************************************************************* + +void IOService::tellNoChangeDown ( unsigned long ) +{ + return tellClients(kIOMessageDeviceWillNotPowerOff); +} + + +//********************************************************************************* +// tellChangeUp +// +// Notify registered applications and kernel clients that we are raising power. +// +// Subclass can override this to send a different message type. Parameter is +// the aborted destination state number. +//********************************************************************************* + +void IOService::tellChangeUp ( unsigned long ) +{ + return tellClients(kIOMessageDeviceHasPoweredOn); +} + + +//********************************************************************************* +// tellClients +// +// Notify registered applications and kernel clients of something. +//********************************************************************************* + +void IOService::tellClients ( int messageType ) +{ + struct context theContext; + + theContext.msgType = messageType; + theContext.us = this; + + applyToInterested(gIOAppPowerStateInterest,tellClient,(void *)&theContext); + applyToInterested(gIOGeneralInterest,tellClient,(void *)&theContext); +} + + +//********************************************************************************* +// tellClient +// +// Notify a registered application or kernel client of something. +//********************************************************************************* +void tellClient ( OSObject * object, void * context) +{ + struct context * theContext = (struct context *)context; + + theContext->us->messageClient(theContext->msgType,object,0); +} + + +// ********************************************************************************** +// checkForDone +// +// ********************************************************************************** +bool IOService::checkForDone ( void ) +{ + int i = 0; + OSObject * theFlag; + + IOLockLock(priv->flags_lock); + if ( pm_vars->responseFlags == NULL ) { + IOLockUnlock(priv->flags_lock); + return true; + } + for ( i = 0; ; i++ ) { + theFlag = pm_vars->responseFlags->getObject(i); + if ( theFlag == NULL ) { + break; + } + if ( ((OSBoolean *)theFlag)->isFalse() ) { + IOLockUnlock(priv->flags_lock); + return false; + } + } + IOLockUnlock(priv->flags_lock); + return true; +} + + +// ********************************************************************************** +// responseValid +// +// ********************************************************************************** +bool IOService::responseValid ( unsigned long x ) +{ + UInt16 serialComponent; + UInt16 ordinalComponent; + OSObject * theFlag; + unsigned long refcon = (unsigned long)x; + OSBoolean * aBool; + + serialComponent = (refcon>>16) & 0xFFFF; + ordinalComponent = refcon & 0xFFFF; + + if ( serialComponent != pm_vars->serialNumber ) { + return false; + } + + IOLockLock(priv->flags_lock); + if ( pm_vars->responseFlags == NULL ) { + IOLockUnlock(priv->flags_lock); + return false; + } + + theFlag = pm_vars->responseFlags->getObject(ordinalComponent); + + if ( theFlag == 0 ) { + IOLockUnlock(priv->flags_lock); + return false; + } + + if ( ((OSBoolean *)theFlag)->isFalse() ) { + aBool = OSBoolean::withBoolean(true); + pm_vars->responseFlags->replaceObject(ordinalComponent,aBool); + aBool->release(); + } + + IOLockUnlock(priv->flags_lock); + return true; +} + + +// ********************************************************************************** +// allowPowerChange +// +// Our power state is about to lower, and we have notified applications +// and kernel clients, and one of them has acknowledged. If this is the last to do +// so, and all acknowledgements are positive, we continue with the power change. +// +// We serialize this processing with timer expiration with a command gate on the +// power management workloop, which the timer expiration is command gated to as well. +// ********************************************************************************** +IOReturn IOService::allowPowerChange ( unsigned long refcon ) +{ + if ( ! initialized ) { + return kIOReturnSuccess; // we're unloading + } + + return pm_vars->PMcommandGate->runAction(serializedAllowPowerChange,(void *)refcon); +} + + +IOReturn serializedAllowPowerChange ( OSObject *owner, void * refcon, void *, void *, void *) +{ + return ((IOService *)owner)->serializedAllowPowerChange2((unsigned long)refcon); +} + +IOReturn IOService::serializedAllowPowerChange2 ( unsigned long refcon ) +{ + if ( ! responseValid(refcon) ) { // response valid? + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr5,refcon,0); + return kIOReturnSuccess; // no, just return + } + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientAcknowledge,refcon,0); + + return allowCancelCommon(); +} + + +// ********************************************************************************** +// cancelPowerChange +// +// Our power state is about to lower, and we have notified applications +// and kernel clients, and one of them has vetoed the change. If this is the last +// client to respond, we abandon the power change. +// +// We serialize this processing with timer expiration with a command gate on the +// power management workloop, which the timer expiration is command gated to as well. +// ********************************************************************************** +IOReturn IOService::cancelPowerChange ( unsigned long refcon ) +{ + if ( ! initialized ) { + return kIOReturnSuccess; // we're unloading + } + + return pm_vars->PMcommandGate->runAction(serializedCancelPowerChange,(void *)refcon); +} + + +IOReturn serializedCancelPowerChange ( OSObject *owner, void * refcon, void *, void *, void *) +{ + return ((IOService *)owner)->serializedCancelPowerChange2((unsigned long)refcon); +} + +IOReturn IOService::serializedCancelPowerChange2 ( unsigned long refcon ) +{ + if ( ! responseValid(refcon) ) { // response valid? + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogAcknowledgeErr5,refcon,0); + return kIOReturnSuccess; // no, just return + } + pm_vars->thePlatform->PMLog(pm_vars->ourName,PMlogClientCancel,refcon,0); + + pm_vars->doNotPowerDown = true; + + return allowCancelCommon(); +} + + +// ********************************************************************************** +// allowCancelCommon +// +// ********************************************************************************** +IOReturn IOService::allowCancelCommon ( void ) +{ + if (! acquire_lock() ) { + return kIOReturnSuccess; + } + + if ( checkForDone() ) { // is this the last response? + stop_ack_timer(); // yes, stop the timer + IOUnlock(priv->our_lock); + IOLockLock(priv->flags_lock); + if ( pm_vars->responseFlags ) { + pm_vars->responseFlags->release(); + pm_vars->responseFlags = NULL; + } + IOLockUnlock(priv->flags_lock); + switch (priv->machine_state) { + case IOPMour_prechange_03: // our change, was it vetoed? + if ( ! pm_vars->doNotPowerDown ) { + our_prechange_03(); // no, we can continue + } + else { + tellNoChangeDown(priv->head_note_state); // yes, rescind the warning + priv->head_note_flags |= IOPMNotDone; // mark the change note un-actioned + all_done(); // and we're done + } + break; + case IOPMour_prechange_05: + our_prechange_05(); // our change, continue + break; + case IOPMparent_down_0: + parent_down_05(); // parent change, continueq8q + break; + } + return kIOReturnSuccess; + } + + IOUnlock(priv->our_lock); // not done yet + return kIOReturnSuccess; +} + + +//********************************************************************************* +// clampPowerOn +// +// Set to highest available power state for a minimum of duration milliseconds +//********************************************************************************* + +#define kFiveMinutesInNanoSeconds (300 * NSEC_PER_SEC) + +void IOService::clampPowerOn (unsigned long duration) +{ + changePowerStateToPriv (pm_vars->theNumberOfPowerStates-1); + + if ( priv->clampTimerEventSrc == NULL ) { + priv->clampTimerEventSrc = IOTimerEventSource::timerEventSource(this, + c_PM_Clamp_Timer_Expired); + + IOWorkLoop * workLoop = getPMworkloop (); + + if ( !priv->clampTimerEventSrc || !workLoop || + ( workLoop->addEventSource( priv->clampTimerEventSrc) != kIOReturnSuccess) ) { + + } + } + + priv->clampTimerEventSrc->setTimeout(kFiveMinutesInNanoSeconds, NSEC_PER_SEC); +} + +//********************************************************************************* +// PM_Clamp_Timer_Expired +// +// called when clamp timer expires...set power state to 0. +//********************************************************************************* + +void IOService::PM_Clamp_Timer_Expired (void) +{ + if ( ! initialized ) { + return; // we're unloading + } + + changePowerStateToPriv (0); +} + +//********************************************************************************* +// c_PM_clamp_Timer_Expired (C Func) +// +// Called when our clamp timer expires...we will call the object method. +//********************************************************************************* + +void c_PM_Clamp_Timer_Expired (OSObject * client, IOTimerEventSource *) +{ + if (client) + ((IOService *)client)->PM_Clamp_Timer_Expired (); +} + + +//********************************************************************************* +// setPowerState +// +// Does nothing here. This should be implemented in a subclass driver. +//********************************************************************************* + +IOReturn IOService::setPowerState ( unsigned long powerStateOrdinal, IOService* whatDevice ) +{ + return IOPMNoErr; +} + + +//********************************************************************************* +// maxCapabilityForDomainState +// +// Finds the highest power state in the array whose input power +// requirement is equal to the input parameter. Where a more intelligent +// decision is possible, override this in the subclassed driver. +//********************************************************************************* + +unsigned long IOService::maxCapabilityForDomainState ( IOPMPowerFlags domainState ) +{ + int i; + + if (pm_vars->theNumberOfPowerStates == 0 ) { + return 0; + } + for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) { + if ( pm_vars->thePowerStates[i].inputPowerRequirement == domainState ) { + return i; + } + } + return 0; +} + + +//********************************************************************************* +// initialPowerStateForDomainState +// +// Finds the highest power state in the array whose input power +// requirement is equal to the input parameter. Where a more intelligent +// decision is possible, override this in the subclassed driver. +//********************************************************************************* + +unsigned long IOService::initialPowerStateForDomainState ( IOPMPowerFlags domainState ) +{ + int i; + + if (pm_vars->theNumberOfPowerStates == 0 ) { + return 0; + } + for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) { + if ( pm_vars->thePowerStates[i].inputPowerRequirement == domainState ) { + return i; + } + } + return 0; +} + + +//********************************************************************************* +// powerStateForDomainState +// +// Finds the highest power state in the array whose input power +// requirement is equal to the input parameter. Where a more intelligent +// decision is possible, override this in the subclassed driver. +//********************************************************************************* + +unsigned long IOService::powerStateForDomainState ( IOPMPowerFlags domainState ) +{ + int i; + + if (pm_vars->theNumberOfPowerStates == 0 ) { + return 0; + } + for ( i = (pm_vars->theNumberOfPowerStates)-1; i >= 0; i-- ) { + if ( pm_vars->thePowerStates[i].inputPowerRequirement == domainState ) { + return i; + } + } + return 0; +} + + +//********************************************************************************* +// didYouWakeSystem +// +// Does nothing here. This should be implemented in a subclass driver. +//********************************************************************************* + +bool IOService::didYouWakeSystem ( void ) +{ + return false; +} + + +//********************************************************************************* +// powerStateWillChangeTo +// +// Does nothing here. This should be implemented in a subclass driver. +//********************************************************************************* + +IOReturn IOService::powerStateWillChangeTo ( IOPMPowerFlags, unsigned long, IOService*) +{ + return 0; +} + + +//********************************************************************************* +// powerStateDidChangeTo +// +// Does nothing here. This should be implemented in a subclass driver. +//********************************************************************************* + +IOReturn IOService::powerStateDidChangeTo ( IOPMPowerFlags, unsigned long, IOService*) +{ + return 0; +} + + +//********************************************************************************* +// powerChangeDone +// +// Does nothing here. This should be implemented in a subclass policy-maker. +//********************************************************************************* + +void IOService::powerChangeDone ( unsigned long ) +{ +} + + +//********************************************************************************* +// newTemperature +// +// Does nothing here. This should be implemented in a subclass driver. +//********************************************************************************* + +IOReturn IOService::newTemperature ( long currentTemp, IOService * whichZone ) + +{ + return IOPMNoErr; +} + + +#undef super +#define super OSObject + +OSDefineMetaClassAndStructors(IOPMprot, OSObject) +//********************************************************************************* +// serialize +// +// Serialize protected instance variables for debug output. +//********************************************************************************* +bool IOPMprot::serialize(OSSerialize *s) const +{ + OSString * theOSString; + char * buffer; + char * ptr; + int i; + bool rtn_code; + + buffer = ptr = IONew(char, 2000); + if(!buffer) + return false; + + ptr += sprintf(ptr,"{ theNumberOfPowerStates = %d, ",(unsigned int)theNumberOfPowerStates); + + if ( theNumberOfPowerStates != 0 ) { + ptr += sprintf(ptr,"version %d, ",(unsigned int)thePowerStates[0].version); + } + + if ( theNumberOfPowerStates != 0 ) { + for ( i = 0; i < (int)theNumberOfPowerStates; i++ ) { + ptr += sprintf(ptr,"power state %d = { ",i); + ptr += sprintf(ptr,"capabilityFlags %08x, ",(unsigned int)thePowerStates[i].capabilityFlags); + ptr += sprintf(ptr,"outputPowerCharacter %08x, ",(unsigned int)thePowerStates[i].outputPowerCharacter); + ptr += sprintf(ptr,"inputPowerRequirement %08x, ",(unsigned int)thePowerStates[i].inputPowerRequirement); + ptr += sprintf(ptr,"staticPower %d, ",(unsigned int)thePowerStates[i].staticPower); + ptr += sprintf(ptr,"unbudgetedPower %d, ",(unsigned int)thePowerStates[i].unbudgetedPower); + ptr += sprintf(ptr,"powerToAttain %d, ",(unsigned int)thePowerStates[i].powerToAttain); + ptr += sprintf(ptr,"timeToAttain %d, ",(unsigned int)thePowerStates[i].timeToAttain); + ptr += sprintf(ptr,"settleUpTime %d, ",(unsigned int)thePowerStates[i].settleUpTime); + ptr += sprintf(ptr,"timeToLower %d, ",(unsigned int)thePowerStates[i].timeToLower); + ptr += sprintf(ptr,"settleDownTime %d, ",(unsigned int)thePowerStates[i].settleDownTime); + ptr += sprintf(ptr,"powerDomainBudget %d }, ",(unsigned int)thePowerStates[i].powerDomainBudget); + } + } + + ptr += sprintf(ptr,"aggressiveness = %d, ",(unsigned int)aggressiveness); + ptr += sprintf(ptr,"myCurrentState = %d, ",(unsigned int)myCurrentState); + ptr += sprintf(ptr,"parentsCurrentPowerFlags = %08x, ",(unsigned int)parentsCurrentPowerFlags); + ptr += sprintf(ptr,"maxCapability = %d }",(unsigned int)maxCapability); + + theOSString = OSString::withCString(buffer); + rtn_code = theOSString->serialize(s); + theOSString->release(); + IODelete(buffer, char, 2000); + + return rtn_code; +} + + +#undef super +#define super OSObject + +OSDefineMetaClassAndStructors(IOPMpriv, OSObject) +//********************************************************************************* +// serialize +// +// Serialize private instance variables for debug output. +//********************************************************************************* +bool IOPMpriv::serialize(OSSerialize *s) const +{ + OSString * theOSString; + bool rtn_code; + char * buffer; + char * ptr; + IOPMinformee * nextObject; + + buffer = ptr = IONew(char, 2000); + if(!buffer) + return false; + + ptr += sprintf(ptr,"{ this object = %08x",(unsigned int)owner); + if ( we_are_root ) { + ptr += sprintf(ptr," (root)"); + } + ptr += sprintf(ptr,", "); + + nextObject = interestedDrivers->firstInList(); // display interested drivers + while ( nextObject != NULL ) { + ptr += sprintf(ptr,"interested driver = %08x, ",(unsigned int)nextObject->whatObject); + nextObject = interestedDrivers->nextInList(nextObject); + } + + if ( machine_state != IOPMfinished ) { + ptr += sprintf(ptr,"machine_state = %d, ",(unsigned int)machine_state); + ptr += sprintf(ptr,"driver_timer = %d, ",(unsigned int)driver_timer); + ptr += sprintf(ptr,"settle_time = %d, ",(unsigned int)settle_time); + ptr += sprintf(ptr,"head_note_flags = %08x, ",(unsigned int)head_note_flags); + ptr += sprintf(ptr,"head_note_state = %d, ",(unsigned int)head_note_state); + ptr += sprintf(ptr,"head_note_outputFlags = %08x, ",(unsigned int)head_note_outputFlags); + ptr += sprintf(ptr,"head_note_domainState = %08x, ",(unsigned int)head_note_domainState); + ptr += sprintf(ptr,"head_note_capabilityFlags = %08x, ",(unsigned int)head_note_capabilityFlags); + ptr += sprintf(ptr,"head_note_pendingAcks = %d, ",(unsigned int)head_note_pendingAcks); + } + + if ( device_overrides ) { + ptr += sprintf(ptr,"device overrides, "); + } + ptr += sprintf(ptr,"driverDesire = %d, ",(unsigned int)driverDesire); + ptr += sprintf(ptr,"deviceDesire = %d, ",(unsigned int)deviceDesire); + ptr += sprintf(ptr,"ourDesiredPowerState = %d, ",(unsigned int)ourDesiredPowerState); + ptr += sprintf(ptr,"previousRequest = %d }",(unsigned int)previousRequest); + + theOSString = OSString::withCString(buffer); + rtn_code = theOSString->serialize(s); + theOSString->release(); + IODelete(buffer, char, 2000); + + return rtn_code; +} + diff --git a/iokit/Kernel/IOServicePrivate.h b/iokit/Kernel/IOServicePrivate.h new file mode 100644 index 000000000..6ff2870a3 --- /dev/null +++ b/iokit/Kernel/IOServicePrivate.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _IOKIT_IOSERVICEPRIVATE_H +#define _IOKIT_IOSERVICEPRIVATE_H + +// options for getExistingServices() +enum { + kIONotifyOnce = 0x00000001 +}; + +// masks for __state[1] +enum { + kIOServiceBusyStateMask = 0x000000ff, + kIOServiceBusyMax = 255, + kIOServiceTerminatedState = 0x80000000, + kIOServiceConfigState = 0x40000000, + kIOServiceSynchronousState = 0x20000000, +}; + +// options for terminate() +enum { + kIOServiceRecursing = 0x00100000, +}; + + +// notify state +enum { + kIOServiceNotifyEnable = 0x00000001, + kIOServiceNotifyWaiter = 0x00000002 +}; + +struct _IOServiceNotifierInvocation +{ + IOThread thread; + queue_chain_t link; +}; + +class _IOServiceNotifier : public IONotifier +{ + friend IOService; + + OSDeclareDefaultStructors(_IOServiceNotifier) + +public: + OSOrderedSet * whence; + + OSDictionary * matching; + IOServiceNotificationHandler handler; + void * target; + void * ref; + SInt32 priority; + queue_head_t handlerInvocations; + IOOptionBits state; + + virtual void free(); + virtual void remove(); + virtual bool disable(); + virtual void enable( bool was ); + virtual void wait(); +}; + +class _IOServiceInterestNotifier : public IONotifier +{ + friend IOService; + + OSDeclareDefaultStructors(_IOServiceInterestNotifier) + +public: + OSArray * whence; + + IOServiceInterestHandler handler; + void * target; + void * ref; + queue_head_t handlerInvocations; + IOOptionBits state; + + virtual void free(); + virtual void remove(); + virtual bool disable(); + virtual void enable( bool was ); + virtual void wait(); +}; + +class _IOConfigThread : public OSObject +{ + friend IOService; + + OSDeclareDefaultStructors(_IOConfigThread) + +public: + IOThread thread; + + virtual void free(); + + static _IOConfigThread * configThread( void ); + static void main( _IOConfigThread * self ); +}; + +enum { +#ifdef LESS_THREAD_CREATE + kMaxConfigThreads = 4, +#else + kMaxConfigThreads = 32, +#endif +}; + +enum { + kMatchNubJob = 10, + kTerminateNubJob, +}; + +class _IOServiceJob : public OSObject +{ + friend IOService; + + OSDeclareDefaultStructors(_IOServiceJob) + +public: + int type; + IOService * nub; + IOOptionBits options; + + static _IOServiceJob * startJob( IOService * nub, int type, + IOOptionBits options = 0 ); + static void pingConfig( class _IOServiceJob * job ); + +}; + +class IOResources : public IOService +{ + friend IOService; + + OSDeclareDefaultStructors(IOResources) + +public: + static IOService * resources( void ); + virtual IOWorkLoop * getWorkLoop( ) const; + virtual bool matchPropertyTable( OSDictionary * table ); +}; + +class _IOOpenServiceIterator : public OSIterator +{ + friend IOService; + + OSDeclareDefaultStructors(_IOOpenServiceIterator) + + OSIterator * iter; + const IOService * client; + const IOService * provider; + IOService * last; + +public: + static OSIterator * iterator( OSIterator * _iter, + const IOService * client, + const IOService * provider ); + virtual void free(); + virtual void reset(); + virtual bool isValid(); + virtual OSObject * getNextObject(); +}; + +#endif /* ! _IOKIT_IOSERVICEPRIVATE_H */ + diff --git a/iokit/Kernel/IOStartIOKit.cpp b/iokit/Kernel/IOStartIOKit.cpp new file mode 100644 index 000000000..23268cd62 --- /dev/null +++ b/iokit/Kernel/IOStartIOKit.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998,1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern "C" { + +extern void OSlibkernInit (void); +extern void IOLibInit(void); + +#include + + +void IOKitResetTime( void ) +{ + mach_timespec_t t; + + t.tv_sec = 30; + t.tv_nsec = 0; + IOService::waitForService( + IOService::resourceMatching("IORTC"), &t ); +#ifndef i386 + IOService::waitForService( + IOService::resourceMatching("IONVRAM"), &t ); +#endif + + clock_initialize_calendar(); +} + + +void StartIOKit( void * p1, void * p2, void * p3, void * p4 ) +{ + IOPlatformExpertDevice * rootNub; + int debugFlags; + IORegistryEntry * root; + OSObject * obj; + extern const char * gIOKernelKmods; + OSString * errorString = NULL; // must release + OSDictionary * fakeKmods; // must release + OSCollectionIterator * kmodIter; // must release + OSString * kmodName; // don't release + + IOLog( iokit_version ); + + if( PE_parse_boot_arg( "io", &debugFlags )) + gIOKitDebug = debugFlags; + // + // Have to start IOKit environment before we attempt to start + // the C++ runtime environment. At some stage we have to clean up + // the initialisation path so that OS C++ can initialise independantly + // of iokit basic service initialisation, or better we have IOLib stuff + // initialise as basic OS services. + // + IOLibInit(); + OSlibkernInit(); + + IOLog("_cppInit done\n"); + + + /***** + * Declare the fake kmod_info structs for built-in components + * that must be tracked as independent units for dependencies. + */ + fakeKmods = OSDynamicCast(OSDictionary, + OSUnserialize(gIOKernelKmods, &errorString)); + + if (!fakeKmods) { + if (errorString) { + panic("Kernel kmod list syntax error: %s\n", + errorString->getCStringNoCopy()); + errorString->release(); + } else { + panic("Error loading kernel kmod list.\n"); + } + } + + kmodIter = OSCollectionIterator::withCollection(fakeKmods); + if (!kmodIter) { + panic("Can't declare in-kernel kmods.\n"); + } + while ((kmodName = OSDynamicCast(OSString, kmodIter->getNextObject()))) { + + OSString * kmodVersion = OSDynamicCast(OSString, + fakeKmods->getObject(kmodName)); + if (!kmodVersion) { + panic("Can't declare in-kernel kmod; \"%s\" has " + "an invalid version.\n", + kmodName->getCStringNoCopy()); + } + if (KERN_SUCCESS != kmod_create_fake(kmodName->getCStringNoCopy(), + kmodVersion->getCStringNoCopy())) { + panic("Failure declaring in-kernel kmod \"%s\".\n", + kmodName->getCStringNoCopy()); + } + } + + kmodIter->release(); + fakeKmods->release(); + + + + root = IORegistryEntry::initialize(); + assert( root ); + IOService::initialize(); + IOCatalogue::initialize(); + IOUserClient::initialize(); + IOMemoryDescriptor::initialize(); + IONetworkController::initialize(); + IODisplay::initialize(); + + obj = OSString::withCString( iokit_version ); + assert( obj ); + if( obj ) { + root->setProperty( kIOKitBuildVersionKey, obj ); + obj->release(); + } + obj = IOKitDiagnostics::diagnostics(); + if( obj ) { + root->setProperty( kIOKitDiagnosticsKey, obj ); + obj->release(); + } + +#ifdef i386 + // pretend there's no device-tree for intel + p1 = 0; +#endif + + rootNub = new IOPlatformExpertDevice; + + if( rootNub && rootNub->initWithArgs( p1, p2, p3, p4)) { + rootNub->attach( 0 ); + + /* Enter into the catalogue the drivers + * provided by BootX. + */ + gIOCatalogue->recordStartupExtensions(); + + rootNub->registerService(); + } +} + +}; /* extern "C" */ diff --git a/iokit/Kernel/IOStringFuncs.c b/iokit/Kernel/IOStringFuncs.c new file mode 100644 index 000000000..8fe5ddec5 --- /dev/null +++ b/iokit/Kernel/IOStringFuncs.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1995 NeXT Computer, Inc. All rights reserved. + * + * strol.c - The functions strtol() & strtoul() are exported as public API + * via the header file ~driverkit/generalFuncs.h + * + * HISTORY + * 25-Oct-1995 Dean Reece at NeXT + * Created based on BSD4.4's strtol.c & strtoul.c. + * Removed dependency on _ctype_ by static versions of isupper()... + * Added support for "0b101..." binary constants. + * Commented out references to errno. + */ + +/*- + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* +#include +#include +#include +*/ +#include +#include + +typedef int BOOL; + +static inline BOOL +isupper(char c) +{ + return (c >= 'A' && c <= 'Z'); +} + +static inline BOOL +isalpha(char c) +{ + return ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')); +} + + +static inline BOOL +isspace(char c) +{ + return (c == ' ' || c == '\t' || c == '\n' || c == '\12'); +} + +static inline BOOL +isdigit(char c) +{ + return (c >= '0' && c <= '9'); +} + +/* + * Convert a string to a long integer. + * + * Ignores `locale' stuff. Assumes that the upper and lower case + * alphabets and digits are each contiguous. + */ +long +strtol(nptr, endptr, base) + const char *nptr; + char **endptr; + register int base; +{ + register const char *s = nptr; + register unsigned long acc; + register int c; + register unsigned long cutoff; + register int neg = 0, any, cutlim; + + /* + * Skip white space and pick up leading +/- sign if any. + * If base is 0, allow 0x for hex and 0 for octal, else + * assume decimal; if base is already 16, allow 0x. + */ + do { + c = *s++; + } while (isspace(c)); + if (c == '-') { + neg = 1; + c = *s++; + } else if (c == '+') + c = *s++; + if ((base == 0 || base == 16) && + c == '0' && (*s == 'x' || *s == 'X')) { + c = s[1]; + s += 2; + base = 16; + } else if ((base == 0 || base == 2) && + c == '0' && (*s == 'b' || *s == 'B')) { + c = s[1]; + s += 2; + base = 2; + } + if (base == 0) + base = c == '0' ? 8 : 10; + + /* + * Compute the cutoff value between legal numbers and illegal + * numbers. That is the largest legal value, divided by the + * base. An input number that is greater than this value, if + * followed by a legal input character, is too big. One that + * is equal to this value may be valid or not; the limit + * between valid and invalid numbers is then based on the last + * digit. For instance, if the range for longs is + * [-2147483648..2147483647] and the input base is 10, + * cutoff will be set to 214748364 and cutlim to either + * 7 (neg==0) or 8 (neg==1), meaning that if we have accumulated + * a value > 214748364, or equal but the next digit is > 7 (or 8), + * the number is too big, and we will return a range error. + * + * Set any if any `digits' consumed; make it negative to indicate + * overflow. + */ + cutoff = neg ? -(unsigned long)LONG_MIN : LONG_MAX; + cutlim = cutoff % (unsigned long)base; + cutoff /= (unsigned long)base; + for (acc = 0, any = 0;; c = *s++) { + if (isdigit(c)) + c -= '0'; + else if (isalpha(c)) + c -= isupper(c) ? 'A' - 10 : 'a' - 10; + else + break; + if (c >= base) + break; + if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim) ) + any = -1; + else { + any = 1; + acc *= base; + acc += c; + } + } + if (any < 0) { + acc = neg ? LONG_MIN : LONG_MAX; +// errno = ERANGE; + } else if (neg) + acc = -acc; + if (endptr != 0) + *endptr = (char *)(any ? s - 1 : nptr); + return (acc); +} + +unsigned long +strtoul(nptr, endptr, base) + const char *nptr; + char **endptr; + register int base; +{ + register const char *s = nptr; + register unsigned long acc; + register int c; + register unsigned long cutoff; + register int neg = 0, any, cutlim; + + /* + * See strtol for comments as to the logic used. + */ + do { + c = *s++; + } while (isspace(c)); + if (c == '-') { + neg = 1; + c = *s++; + } else if (c == '+') + c = *s++; + if ((base == 0 || base == 16) && + c == '0' && (*s == 'x' || *s == 'X')) { + c = s[1]; + s += 2; + base = 16; + } else if ((base == 0 || base == 2) && + c == '0' && (*s == 'b' || *s == 'B')) { + c = s[1]; + s += 2; + base = 2; + } + if (base == 0) + base = c == '0' ? 8 : 10; + cutoff = (unsigned long)ULONG_MAX / (unsigned long)base; + cutlim = (unsigned long)ULONG_MAX % (unsigned long)base; + for (acc = 0, any = 0;; c = *s++) { + if (isdigit(c)) + c -= '0'; + else if (isalpha(c)) + c -= isupper(c) ? 'A' - 10 : 'a' - 10; + else + break; + if (c >= base) + break; + if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim) ) + any = -1; + else { + any = 1; + acc *= base; + acc += c; + } + } + if (any < 0) { + acc = ULONG_MAX; +// errno = ERANGE; + } else if (neg) + acc = -acc; + if (endptr != 0) + *endptr = (char *)(any ? s - 1 : nptr); + return (acc); +} + +/* + * + */ + +char *strchr(const char *str, int ch) +{ + do { + if (*str == ch) + return((char *)str); + } while (*str++); + return ((char *) 0); +} + +/* + * + */ + +char * +strncat(char *s1, const char *s2, unsigned long n) +{ + char *os1; + int i = n; + + os1 = s1; + while (*s1++) + ; + --s1; + while ((*s1++ = *s2++)) + if (--i < 0) { + *--s1 = '\0'; + break; + } + return(os1); +} diff --git a/iokit/Kernel/IOSyncer.cpp b/iokit/Kernel/IOSyncer.cpp new file mode 100644 index 000000000..815cc3b1f --- /dev/null +++ b/iokit/Kernel/IOSyncer.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOSyncer.cpp created by wgulland on 2000-02-02 */ + +#include +#include + +OSDefineMetaClassAndStructors(IOSyncer, OSObject) + +IOSyncer * IOSyncer::create(bool twoRetains) +{ + IOSyncer * me = new IOSyncer; + + if (me && !me->init(twoRetains)) { + me->release(); + return 0; + } + + return me; +} + +bool IOSyncer::init(bool twoRetains) +{ + if (!OSObject::init()) + return false; + + if (!(guardLock = IOSimpleLockAlloc()) ) + return false; + + IOSimpleLockInit(guardLock); + + if(twoRetains) + retain(); + + fResult = kIOReturnSuccess; + + reinit(); + + return true; +} + +void IOSyncer::reinit() +{ + IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); + threadMustStop = true; + IOSimpleLockUnlockEnableInterrupt(guardLock, is); +} + +void IOSyncer::free() +{ + // just in case a thread is blocked here: + privateSignal(); + + if (guardLock != NULL) + IOSimpleLockFree(guardLock); + + OSObject::free(); +} + +IOReturn IOSyncer::wait(bool autoRelease = true) +{ + IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); + + if (threadMustStop) { + assert_wait((void *) &threadMustStop, false); + IOSimpleLockUnlockEnableInterrupt(guardLock, is); + thread_block(0); + } + else + IOSimpleLockUnlockEnableInterrupt(guardLock, is); + + IOReturn result = fResult; // Pick up before auto deleting! + + if(autoRelease) + release(); + + return result; +} + +void IOSyncer::signal(IOReturn res = kIOReturnSuccess, + bool autoRelease = true) +{ + fResult = res; + privateSignal(); + if(autoRelease) + release(); +} + +void IOSyncer::privateSignal() +{ + if (threadMustStop) { + IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); + threadMustStop = false; + thread_wakeup_one((void *) &threadMustStop); + IOSimpleLockUnlockEnableInterrupt(guardLock, is); + } +} diff --git a/iokit/Kernel/IOTimerEventSource.cpp b/iokit/Kernel/IOTimerEventSource.cpp new file mode 100644 index 000000000..7f42f2471 --- /dev/null +++ b/iokit/Kernel/IOTimerEventSource.cpp @@ -0,0 +1,234 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * IOTimerEventSource.cpp + * + * HISTORY + * 2-Feb-1999 Joe Liu (jliu) created. + * 1999-10-14 Godfrey van der Linden(gvdl) + * Revamped to use thread_call APIs + * + */ + +#include + +__BEGIN_DECLS +#include +__END_DECLS + +#include +#include + +#include +#include +#include + +#include + +#define super IOEventSource +OSDefineMetaClassAndStructors(IOTimerEventSource, IOEventSource) +OSMetaClassDefineReservedUnused(IOTimerEventSource, 0); +OSMetaClassDefineReservedUnused(IOTimerEventSource, 1); +OSMetaClassDefineReservedUnused(IOTimerEventSource, 2); +OSMetaClassDefineReservedUnused(IOTimerEventSource, 3); +OSMetaClassDefineReservedUnused(IOTimerEventSource, 4); +OSMetaClassDefineReservedUnused(IOTimerEventSource, 5); +OSMetaClassDefineReservedUnused(IOTimerEventSource, 6); +OSMetaClassDefineReservedUnused(IOTimerEventSource, 7); + +bool IOTimerEventSource::checkForWork() { return false; } + +// Timeout handler function. This function is called by the kernel when +// the timeout interval expires. +// +void IOTimerEventSource::timeout(void *self) +{ + IOTimerEventSource *me = (IOTimerEventSource *) self; + + if (me->enabled) { + Action doit = (Action) me->action; + + if (doit) { + IOTimeStampConstant(IODBG_TIMES(IOTIMES_ACTION), + (unsigned int) doit, (unsigned int) me->owner); + me->closeGate(); + (*doit)(me->owner, me); + me->openGate(); + } + } +} + +void IOTimerEventSource::setTimeoutFunc() +{ + calloutEntry = (void *) thread_call_allocate((thread_call_func_t) timeout, + (thread_call_param_t) this); +} + +bool IOTimerEventSource::init(OSObject *inOwner, Action inAction) +{ + if (!super::init(inOwner, (IOEventSource::Action) inAction) ) + return false; + + setTimeoutFunc(); + if (!calloutEntry) + return false; + + return true; +} + +IOTimerEventSource * +IOTimerEventSource::timerEventSource(OSObject *inOwner, Action inAction) +{ + IOTimerEventSource *me = new IOTimerEventSource; + + if (me && !me->init(inOwner, inAction)) { + me->free(); + return 0; + } + + return me; +} + +void IOTimerEventSource::free() +{ + if (calloutEntry) { + cancelTimeout(); + thread_call_free((thread_call_t) calloutEntry); + } + + super::free(); +} + +void IOTimerEventSource::cancelTimeout() +{ + thread_call_cancel((thread_call_t) calloutEntry); + AbsoluteTime_to_scalar(&abstime) = 0; +} + +void IOTimerEventSource::enable() +{ + super::enable(); + if (kIOReturnSuccess != wakeAtTime(abstime)) + super::disable(); // Problem re-scheduling timeout ignore enable +} + +void IOTimerEventSource::disable() +{ + thread_call_cancel((thread_call_t) calloutEntry); + super::disable(); +} + +IOReturn IOTimerEventSource::setTimeoutTicks(UInt32 ticks) +{ + return setTimeout(ticks, NSEC_PER_SEC/hz); +} + +IOReturn IOTimerEventSource::setTimeoutMS(UInt32 ms) +{ + return setTimeout(ms, kMillisecondScale); +} + +IOReturn IOTimerEventSource::setTimeoutUS(UInt32 us) +{ + return setTimeout(us, kMicrosecondScale); +} + +IOReturn IOTimerEventSource::setTimeout(UInt32 interval, UInt32 scale_factor) +{ + AbsoluteTime end; + + clock_interval_to_deadline(interval, scale_factor, &end); + return wakeAtTime(end); +} + +IOReturn IOTimerEventSource::setTimeout(mach_timespec_t interval) +{ + AbsoluteTime end, nsecs; + + clock_interval_to_absolutetime_interval + (interval.tv_nsec, kNanosecondScale, &nsecs); + clock_interval_to_deadline + (interval.tv_sec, NSEC_PER_SEC, &end); + ADD_ABSOLUTETIME(&end, &nsecs); + + return wakeAtTime(end); +} + +IOReturn IOTimerEventSource::setTimeout(AbsoluteTime interval) +{ + AbsoluteTime end; + + clock_get_uptime(&end); + ADD_ABSOLUTETIME(&end, &interval); + + return wakeAtTime(end); +} + +IOReturn IOTimerEventSource::wakeAtTimeTicks(UInt32 ticks) +{ + return wakeAtTime(ticks, NSEC_PER_SEC/hz); +} + +IOReturn IOTimerEventSource::wakeAtTimeMS(UInt32 ms) +{ + return wakeAtTime(ms, kMillisecondScale); +} + +IOReturn IOTimerEventSource::wakeAtTimeUS(UInt32 us) +{ + return wakeAtTime(us, kMicrosecondScale); +} + +IOReturn IOTimerEventSource::wakeAtTime(UInt32 abstime, UInt32 scale_factor) +{ + AbsoluteTime end; + clock_interval_to_absolutetime_interval(abstime, scale_factor, &end); + + return wakeAtTime(end); +} + +IOReturn IOTimerEventSource::wakeAtTime(mach_timespec_t abstime) +{ + AbsoluteTime end, nsecs; + + clock_interval_to_absolutetime_interval + (abstime.tv_nsec, kNanosecondScale, &nsecs); + clock_interval_to_absolutetime_interval + (abstime.tv_sec, kSecondScale, &end); + ADD_ABSOLUTETIME(&end, &nsecs); + + return wakeAtTime(end); +} + +IOReturn IOTimerEventSource::wakeAtTime(AbsoluteTime inAbstime) +{ + if (!action) + return kIOReturnNoResources; + + abstime = inAbstime; + if ( enabled && AbsoluteTime_to_scalar(&abstime) ) + thread_call_enter_delayed((thread_call_t) calloutEntry, abstime); + + return kIOReturnSuccess; +} diff --git a/iokit/Kernel/IOUserClient.cpp b/iokit/Kernel/IOUserClient.cpp new file mode 100644 index 000000000..83e846bca --- /dev/null +++ b/iokit/Kernel/IOUserClient.cpp @@ -0,0 +1,2616 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 14 Aug 98 sdouglas created. + * 08 Dec 98 sdouglas cpp. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// definitions we should get from osfmk + +//typedef struct ipc_port * ipc_port_t; +typedef natural_t ipc_kobject_type_t; + +#define IKOT_IOKIT_SPARE 27 +#define IKOT_IOKIT_CONNECT 29 +#define IKOT_IOKIT_OBJECT 30 + +extern "C" { + +extern ipc_port_t iokit_alloc_object_port( io_object_t obj, + ipc_kobject_type_t type ); + +extern kern_return_t iokit_destroy_object_port( ipc_port_t port ); + +extern mach_port_name_t iokit_make_send_right( task_t task, + io_object_t obj, ipc_kobject_type_t type ); + +extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task); + +extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef); + +extern ipc_port_t master_device_port; + +#include + +} /* extern "C" */ + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject. + +class IOMachPort : public OSObject +{ + OSDeclareDefaultStructors(IOMachPort) +public: + OSObject * object; + ipc_port_t port; + + static IOMachPort * portForObject( OSObject * obj, + ipc_kobject_type_t type ); + static void releasePortForObject( OSObject * obj, + ipc_kobject_type_t type ); + static OSDictionary * dictForType( ipc_kobject_type_t type ); + + static mach_port_name_t makeSendRightForTask( task_t task, + io_object_t obj, ipc_kobject_type_t type ); + + virtual void free(); +}; + +#define super OSObject +OSDefineMetaClassAndStructors(IOMachPort, OSObject) + +static IOLock * gIOObjectPortLock; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +// not in dictForType() for debugging ease +static OSDictionary * gIOObjectPorts; +static OSDictionary * gIOConnectPorts; + +OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type ) +{ + OSDictionary ** dict; + + if( IKOT_IOKIT_OBJECT == type ) + dict = &gIOObjectPorts; + else if( IKOT_IOKIT_CONNECT == type ) + dict = &gIOConnectPorts; + else + return( 0 ); + + if( 0 == *dict) + *dict = OSDictionary::withCapacity( 1 ); + + return( *dict ); +} + +IOMachPort * IOMachPort::portForObject ( OSObject * obj, + ipc_kobject_type_t type ) +{ + IOMachPort * inst = 0; + OSDictionary * dict; + + IOTakeLock( gIOObjectPortLock); + + do { + + dict = dictForType( type ); + if( !dict) + continue; + + if( (inst = (IOMachPort *) + dict->getObject( (const OSSymbol *) obj ))) + continue; + + inst = new IOMachPort; + if( inst && !inst->init()) { + inst = 0; + continue; + } + + inst->port = iokit_alloc_object_port( obj, type ); + if( inst->port) { + // retains obj + dict->setObject( (const OSSymbol *) obj, inst ); + inst->release(); // one more to free port => release obj + + } else { + inst->release(); + inst = 0; + } + + } while( false ); + + IOUnlock( gIOObjectPortLock); + + return( inst ); +} + +void IOMachPort::releasePortForObject( OSObject * obj, + ipc_kobject_type_t type ) +{ + OSDictionary * dict; + + IOTakeLock( gIOObjectPortLock); + + if( (dict = dictForType( type ))) { + obj->retain(); + dict->removeObject( (const OSSymbol *) obj ); + obj->release(); + } + + IOUnlock( gIOObjectPortLock); +} + +void IOUserClient::destroyUserReferences( OSObject * obj ) +{ + IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT ); + IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT ); +} + +mach_port_name_t IOMachPort::makeSendRightForTask( task_t task, + io_object_t obj, ipc_kobject_type_t type ) +{ + return( iokit_make_send_right( task, obj, type )); +} + +void IOMachPort::free( void ) +{ + if( port) + iokit_destroy_object_port( port ); + super::free(); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +extern "C" { + +// functions called from osfmk/device/iokit_rpc.c + +void +iokit_add_reference( io_object_t obj ) +{ + if( obj) + obj->retain(); +} + +void +iokit_remove_reference( io_object_t obj ) +{ + if( obj) + obj->release(); +} + +ipc_port_t +iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type ) +{ + IOMachPort * machPort; + + if( (machPort = IOMachPort::portForObject( obj, type ))) + return( machPort->port ); + else + return( 0 ); +} + +kern_return_t +iokit_client_died( io_object_t obj, ipc_port_t /* port */, + ipc_kobject_type_t type ) +{ + IOUserClient * client; + IOMemoryMap * map; + + if( (IKOT_IOKIT_CONNECT == type) + && (client = OSDynamicCast( IOUserClient, obj ))) + client->clientDied(); + else if( (IKOT_IOKIT_OBJECT == type) + && (map = OSDynamicCast( IOMemoryMap, obj ))) + map->taskDied(); + + IOMachPort::releasePortForObject( obj, type ); + + return( kIOReturnSuccess); +} + +}; /* extern "C" */ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +class IOUserNotification : public OSIterator +{ + OSDeclareDefaultStructors(IOUserNotification) + + struct PingMsg { + mach_msg_header_t msgHdr; + OSNotificationHeader notifyHeader; + }; + + PingMsg * pingMsg; + vm_size_t msgSize; + IONotifier * holdNotify; + IOLock * lock; + +public: + + virtual bool init( mach_port_t port, natural_t type, + OSAsyncReference reference, + vm_size_t messageSize ); + virtual void free(); + + virtual void setNotification( IONotifier * obj ); + + virtual void reset(); + virtual bool isValid(); +}; + +class IOServiceUserNotification : public IOUserNotification +{ + OSDeclareDefaultStructors(IOServiceUserNotification) + + enum { kMaxOutstanding = 256 }; + + OSArray * newSet; + OSObject * lastEntry; + bool armed; + +public: + + virtual bool init( mach_port_t port, natural_t type, + OSAsyncReference reference ); + virtual void free(); + + static bool _handler( void * target, + void * ref, IOService * newService ); + virtual bool handler( void * ref, IOService * newService ); + + virtual OSObject * getNextObject(); +}; + +class IOServiceMessageUserNotification : public IOUserNotification +{ + OSDeclareDefaultStructors(IOServiceMessageUserNotification) + +public: + + virtual bool init( mach_port_t port, natural_t type, + OSAsyncReference reference, vm_size_t extraSize ); + virtual void free(); + + static IOReturn _handler( void * target, void * ref, + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ); + virtual IOReturn handler( void * ref, + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ); + + virtual OSObject * getNextObject(); +}; + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super OSIterator +OSDefineMetaClass( IOUserNotification, OSIterator ) +OSDefineAbstractStructors( IOUserNotification, OSIterator ) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOUserNotification::init( mach_port_t port, natural_t type, + OSAsyncReference reference, vm_size_t extraSize ) +{ + if( !super::init()) + return( false ); + + lock = IOLockAlloc(); + if( !lock) + return( false ); + + msgSize = sizeof( PingMsg) + extraSize; + pingMsg = (PingMsg *) IOMalloc( msgSize); + if( !pingMsg) + return( false ); + + bzero( pingMsg, msgSize); + + pingMsg->msgHdr.msgh_remote_port = port; + pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS( + MACH_MSG_TYPE_COPY_SEND, + MACH_MSG_TYPE_COPY_SEND ); + pingMsg->msgHdr.msgh_size = msgSize; + pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; + + pingMsg->notifyHeader.size = extraSize; + pingMsg->notifyHeader.type = type; + bcopy( reference, pingMsg->notifyHeader.reference, sizeof(OSAsyncReference) ); + + return( true ); +} + +void IOUserNotification::free( void ) +{ + if( holdNotify) + holdNotify->remove(); + // can't be in handler now + + if( pingMsg) + IOFree( pingMsg, msgSize); + + if( lock) + IOLockFree( lock ); + + super::free(); +} + + +void IOUserNotification::setNotification( IONotifier * notify ) +{ + if( holdNotify) + holdNotify->remove(); + + holdNotify = notify; +} + +void IOUserNotification::reset() +{ + // ? +} + +bool IOUserNotification::isValid() +{ + return( true ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOUserNotification +OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOServiceUserNotification::init( mach_port_t port, natural_t type, + OSAsyncReference reference ) +{ + newSet = OSArray::withCapacity( 1 ); + if( !newSet) + return( false ); + + return( super::init( port, type, reference, 0) ); +} + +void IOServiceUserNotification::free( void ) +{ + if( lastEntry) + lastEntry->release(); + + if( newSet) + newSet->release(); + + super::free(); +} + +bool IOServiceUserNotification::_handler( void * target, + void * ref, IOService * newService ) +{ + return( ((IOServiceUserNotification *) target)->handler( ref, newService )); +} + +bool IOServiceUserNotification::handler( void * /* ref */, + IOService * newService ) +{ + unsigned int count; + kern_return_t kr; + IOMachPort * machPort; + bool sendPing = false; + + IOTakeLock( lock ); + + count = newSet->getCount(); + if( count < kMaxOutstanding) { + + newSet->setObject( newService ); + if( (sendPing = (armed && (0 == count)))) + armed = false; + } + + IOUnlock( lock ); + + if( sendPing) { + if( (0 == pingMsg->msgHdr.msgh_local_port) + && (machPort = IOMachPort::portForObject( this, IKOT_IOKIT_OBJECT ) )) + pingMsg->msgHdr.msgh_local_port = machPort->port; + + kr = mach_msg_send_from_kernel( &pingMsg->msgHdr, + pingMsg->msgHdr.msgh_size); + if( KERN_SUCCESS != kr) + IOLog("%s: mach_msg_send_from_kernel {%x}\n", __FILE__, kr ); + } + + return( true ); +} + +OSObject * IOServiceUserNotification::getNextObject() +{ + unsigned int count; + OSObject * result; + + IOTakeLock( lock ); + + if( lastEntry) + lastEntry->release(); + + count = newSet->getCount(); + if( count ) { + result = newSet->getObject( count - 1 ); + result->retain(); + newSet->removeObject( count - 1); + } else { + result = 0; + armed = true; + } + lastEntry = result; + + IOUnlock( lock ); + + return( result ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification) + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, + OSAsyncReference reference, vm_size_t extraSize ) +{ + return( super::init( port, type, reference, + sizeof(IOServiceInterestContent) + extraSize) ); +} + +void IOServiceMessageUserNotification::free( void ) +{ + super::free(); +} + +IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref, + UInt32 messageType, IOService * provider, + void * argument, vm_size_t argSize ) +{ + return( ((IOServiceMessageUserNotification *) target)->handler( + ref, messageType, provider, argument, argSize)); +} + +IOReturn IOServiceMessageUserNotification::handler( void * ref, + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ) +{ + kern_return_t kr; + IOMachPort * machPort; + IOServiceInterestContent * data = (IOServiceInterestContent *) + pingMsg->notifyHeader.content; + + data->messageType = messageType; + if( argSize == 0) { + argSize = sizeof( messageArgument); + data->messageArgument[0] = messageArgument; + } else { + if( argSize > kIOUserNotifyMaxMessageSize) + argSize = kIOUserNotifyMaxMessageSize; + bcopy( messageArgument, data->messageArgument, argSize ); + } + pingMsg->msgHdr.msgh_size = sizeof( PingMsg) + + sizeof( IOServiceInterestContent ) + - sizeof( data->messageArgument) + + argSize; + + if( (machPort = IOMachPort::portForObject( provider, IKOT_IOKIT_OBJECT ) )) + pingMsg->msgHdr.msgh_local_port = machPort->port; + else + pingMsg->msgHdr.msgh_local_port = MACH_PORT_NULL; + + kr = mach_msg_send_from_kernel( &pingMsg->msgHdr, + pingMsg->msgHdr.msgh_size); + if( KERN_SUCCESS != kr) + IOLog("%s: mach_msg_send_from_kernel {%x}\n", __FILE__, kr ); + + return( kIOReturnSuccess ); +} + +OSObject * IOServiceMessageUserNotification::getNextObject() +{ + return( 0 ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#undef super +#define super IOService +OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService ) + +void IOUserClient::initialize( void ) +{ + gIOObjectPortLock = IOLockAlloc(); + + assert( gIOObjectPortLock ); +} + +void IOUserClient::setAsyncReference(OSAsyncReference asyncRef, + mach_port_t wakePort, + void *callback, void *refcon) +{ + asyncRef[kIOAsyncReservedIndex] = (natural_t) wakePort; + asyncRef[kIOAsyncCalloutFuncIndex] = (natural_t) callback; + asyncRef[kIOAsyncCalloutRefconIndex] = (natural_t) refcon; +} + +IOReturn IOUserClient::clientHasPrivilege( void * securityToken, + const char * privilegeName ) +{ + kern_return_t kr; + security_token_t token; + mach_msg_type_number_t count; + + if( 0 != strcmp( privilegeName, kIOClientPrivilegeAdministrator)) + return( kIOReturnUnsupported ); + + count = TASK_SECURITY_TOKEN_COUNT; + kr = task_info( (task_t) securityToken, TASK_SECURITY_TOKEN, + (task_info_t) &token, &count ); + if( (kr == kIOReturnSuccess) + && (0 != token.val[0])) + kr = kIOReturnNotPrivileged; + + return( kr ); +} + +bool IOUserClient::initWithTask(task_t owningTask, + void * securityID, + UInt32 type ) +{ + if( getPropertyTable()) + return true; + else + return super::init(); +} + +bool IOUserClient::initWithTask(task_t owningTask, + void * securityID, + UInt32 type, + OSDictionary * properties ) +{ + bool ok; + + ok = super::init( properties ); + ok &= initWithTask( owningTask, securityID, type ); + + return( ok ); +} + +void IOUserClient::free() +{ + if( mappings) + mappings->release(); + + super::free(); +} + +IOReturn IOUserClient::clientDied( void ) +{ + return( clientClose()); +} + +IOReturn IOUserClient::clientClose( void ) +{ + return( kIOReturnUnsupported ); +} + +IOService * IOUserClient::getService( void ) +{ + return( 0 ); +} + +IOReturn IOUserClient::registerNotificationPort( + mach_port_t /* port */, + UInt32 /* type */, + UInt32 /* refCon */) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type, + semaphore_t * semaphore ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOUserClient::connectClient( IOUserClient * /* client */ ) +{ + return( kIOReturnUnsupported); +} + +IOReturn IOUserClient::clientMemoryForType( UInt32 type, + IOOptionBits * options, + IOMemoryDescriptor ** memory ) +{ + return( kIOReturnUnsupported); +} + +IOMemoryMap * IOUserClient::mapClientMemory( + IOOptionBits type, + task_t task, + IOOptionBits mapFlags = kIOMapAnywhere, + IOVirtualAddress atAddress = 0 ) +{ + IOReturn err; + IOOptionBits options = 0; + IOMemoryDescriptor * memory; + IOMemoryMap * map = 0; + + err = clientMemoryForType( (UInt32) type, &options, &memory ); + + if( memory && (kIOReturnSuccess == err)) { + + options = (options & ~kIOMapUserOptionsMask) + | (mapFlags & kIOMapUserOptionsMask); + map = memory->map( task, atAddress, options ); + memory->release(); + } + + return( map ); +} + +IOReturn IOUserClient::exportObjectToClient(task_t task, + OSObject *obj, io_object_t *clientObj) +{ + mach_port_name_t name; + + name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT ); + assert( name ); + + *(mach_port_name_t *)clientObj = name; + return kIOReturnSuccess; +} + +IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */) +{ + return( 0 ); +} + +IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */) +{ + return( 0 ); +} + +IOExternalMethod * IOUserClient:: +getTargetAndMethodForIndex(IOService **targetP, UInt32 index) +{ + IOExternalMethod *method = getExternalMethodForIndex(index); + + if (method) + *targetP = (IOService *) method->object; + + return method; +} + +IOExternalAsyncMethod * IOUserClient:: +getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index) +{ + IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index); + + if (method) + *targetP = (IOService *) method->object; + + return method; +} + +IOExternalTrap * IOUserClient:: +getExternalTrapForIndex(UInt32 index) +{ + return NULL; +} + +IOExternalTrap * IOUserClient:: +getTargetAndTrapForIndex(IOService ** targetP, UInt32 index) +{ + IOExternalTrap *trap = getExternalTrapForIndex(index); + + if (trap) { + *targetP = trap->object; + } + + return trap; +} + +IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference, + IOReturn result, void *args[], UInt32 numArgs) +{ + struct ReplyMsg { + mach_msg_header_t msgHdr; + OSNotificationHeader notifyHdr; + IOAsyncCompletionContent asyncContent; + void * args[kMaxAsyncArgs]; + }; + ReplyMsg replyMsg; + mach_port_t replyPort; + kern_return_t kr; + + // If no reply port, do nothing. + replyPort = (mach_port_t) reference[0]; + if(replyPort == MACH_PORT_NULL) + return kIOReturnSuccess; + + if(numArgs > kMaxAsyncArgs) + return kIOReturnMessageTooLarge; + replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,0); + replyMsg.msgHdr.msgh_size = + sizeof(replyMsg) - (kMaxAsyncArgs-numArgs)*sizeof(void *); + replyMsg.msgHdr.msgh_remote_port = replyPort; + replyMsg.msgHdr.msgh_local_port = 0; + replyMsg.msgHdr.msgh_id = kOSNotificationMessageID; + + replyMsg.notifyHdr.size = sizeof(IOAsyncCompletionContent) + + numArgs*sizeof(void *); + replyMsg.notifyHdr.type = kIOAsyncCompletionNotificationType; + bcopy( reference, replyMsg.notifyHdr.reference, sizeof(OSAsyncReference)); + + replyMsg.asyncContent.result = result; + if(numArgs > 0) + bcopy(args, replyMsg.args, sizeof(void *)*numArgs); + kr = mach_msg_send_from_kernel( &replyMsg.msgHdr, + replyMsg.msgHdr.msgh_size); + if( KERN_SUCCESS != kr) + IOLog("%s: mach_msg_send_from_kernel {%x}\n", __FILE__, kr ); + return kr; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#include + +static void makeMatchingCompatible( OSDictionary * dict ) +{ + const char * key; + const char * newKey; + OSObject * value; + OSString * str; + int i = 0; + + static const char * gratuitousNameChanges[] = { + "IOImports", kIOProviderClassKey, + "IOClass Names", kIOClassKey, + "IOProbe Score", kIOProbeScoreKey, + "IOKit Debug", kIOKitDebugKey, + "IONeededResources", kIOResourceMatchKey, + "IOName Match", kIONameMatchKey, + "IOPCI Match", kIOPCIMatchKey, + "IOPCI Primary Match", kIOPCIPrimaryMatchKey, + "IOPCI Secondary Match",kIOPCISecondaryMatchKey, + "IOPCI Class Match", kIOPCIClassMatchKey, + 0 + }; + + while( (key = gratuitousNameChanges[i++])) { + newKey = gratuitousNameChanges[i++]; + if( (value = dict->getObject( key)) + && (0 == dict->getObject( newKey))) { + + dict->setObject( newKey, value); + dict->removeObject( key); + + if( (str = OSDynamicCast(OSString, dict->getObject("CFBundleIdentifier")))) + IOLog("kext \"%s\" ", str->getCStringNoCopy()); + IOLog("must change \"%s\" to \"%s\"\n", key, newKey); + } + } +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +extern "C" { + +#define CHECK(cls,obj,out) \ + cls * out; \ + if( !(out = OSDynamicCast( cls, obj))) \ + return( kIOReturnBadArgument ) + +/* Routine io_object_get_class */ +kern_return_t is_io_object_get_class( + io_object_t object, + io_name_t className ) +{ + if( !object) + return( kIOReturnBadArgument ); + + strcpy( className, object->getMetaClass()->getClassName()); + return( kIOReturnSuccess ); +} + +/* Routine io_object_conforms_to */ +kern_return_t is_io_object_conforms_to( + io_object_t object, + io_name_t className, + boolean_t *conforms ) +{ + if( !object) + return( kIOReturnBadArgument ); + + *conforms = (0 != object->metaCast( className )); + return( kIOReturnSuccess ); +} + +/* Routine io_object_get_retain_count */ +kern_return_t is_io_object_get_retain_count( + io_object_t object, + int *retainCount ) +{ + if( !object) + return( kIOReturnBadArgument ); + + *retainCount = object->getRetainCount(); + return( kIOReturnSuccess ); +} + +/* Routine io_iterator_next */ +kern_return_t is_io_iterator_next( + io_object_t iterator, + io_object_t *object ) +{ + OSObject * obj; + + CHECK( OSIterator, iterator, iter ); + + obj = iter->getNextObject(); + if( obj) { + obj->retain(); + *object = obj; + return( kIOReturnSuccess ); + } else + return( kIOReturnNoDevice ); +} + +/* Routine io_iterator_reset */ +kern_return_t is_io_iterator_reset( + io_object_t iterator ) +{ + CHECK( OSIterator, iterator, iter ); + + iter->reset(); + + return( kIOReturnSuccess ); +} + +/* Routine io_iterator_is_valid */ +kern_return_t is_io_iterator_is_valid( + io_object_t iterator, + boolean_t *is_valid ) +{ + CHECK( OSIterator, iterator, iter ); + + *is_valid = iter->isValid(); + + return( kIOReturnSuccess ); +} + +/* Routine io_service_match_property_table */ +kern_return_t is_io_service_match_property_table( + io_service_t _service, + io_string_t matching, + boolean_t *matches ) +{ + CHECK( IOService, _service, service ); + + kern_return_t kr; + OSObject * obj; + OSDictionary * dict; + + obj = OSUnserializeXML( matching ); + + if( (dict = OSDynamicCast( OSDictionary, obj))) { + *matches = service->passiveMatch( dict ); + kr = kIOReturnSuccess; + } else + kr = kIOReturnBadArgument; + + if( obj) + obj->release(); + + return( kr ); +} + +/* Routine io_service_get_matching_services */ +kern_return_t is_io_service_get_matching_services( + mach_port_t master_port, + io_string_t matching, + io_iterator_t *existing ) +{ + kern_return_t kr; + OSObject * obj; + OSDictionary * dict; + + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + obj = OSUnserializeXML( matching ); + + if( (dict = OSDynamicCast( OSDictionary, obj))) { + makeMatchingCompatible( dict ); // temp for binary compatibility + *existing = IOService::getMatchingServices( dict ); + kr = kIOReturnSuccess; + } else + kr = kIOReturnBadArgument; + + if( obj) + obj->release(); + + return( kr ); +} + +/* Routine io_service_add_notification */ +kern_return_t is_io_service_add_notification( + mach_port_t master_port, + io_name_t notification_type, + io_string_t matching, + mach_port_t port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + io_object_t * notification ) +{ + + IOServiceUserNotification * userNotify = 0; + IONotifier * notify = 0; + const OSSymbol * sym; + OSDictionary * dict; + IOReturn err; + unsigned long int userMsgType; + + + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + do { + err = kIOReturnNoResources; + + if( !(sym = OSSymbol::withCString( notification_type ))) + err = kIOReturnNoResources; + + if( !(dict = OSDynamicCast( OSDictionary, + OSUnserializeXML( matching )))) { + err = kIOReturnBadArgument; + continue; + } + makeMatchingCompatible( dict ); // temp for binary compatibility + + if( (sym == gIOPublishNotification) + || (sym == gIOFirstPublishNotification)) + userMsgType = kIOServicePublishNotificationType; + else if( (sym == gIOMatchedNotification) + || (sym == gIOFirstMatchNotification)) + userMsgType = kIOServiceMatchedNotificationType; + else if( sym == gIOTerminatedNotification) + userMsgType = kIOServiceTerminatedNotificationType; + else + userMsgType = kLastIOKitNotificationType; + + userNotify = new IOServiceUserNotification; + + if( userNotify && !userNotify->init( port, userMsgType, + reference)) { + userNotify->release(); + userNotify = 0; + } + if( !userNotify) + continue; + + notify = IOService::addNotification( sym, dict, + &userNotify->_handler, userNotify ); + if( notify) { + dict = 0; + *notification = userNotify; + userNotify->setNotification( notify ); + err = kIOReturnSuccess; + } else + err = kIOReturnUnsupported; + + } while( false ); + + if( sym) + sym->release(); + if( dict) + dict->release(); + + return( err ); +} + +/* Routine io_service_add_notification_old */ +kern_return_t is_io_service_add_notification_old( + mach_port_t master_port, + io_name_t notification_type, + io_string_t matching, + mach_port_t port, + natural_t ref, + io_object_t * notification ) +{ + return( is_io_service_add_notification( master_port, notification_type, + matching, port, &ref, 1, notification )); +} + +/* Routine io_service_add_message_notification */ +kern_return_t is_io_service_add_interest_notification( + io_object_t _service, + io_name_t type_of_interest, + mach_port_t port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + io_object_t * notification ) +{ + + IOServiceMessageUserNotification * userNotify = 0; + IONotifier * notify = 0; + const OSSymbol * sym; + IOReturn err; + + CHECK( IOService, _service, service ); + + err = kIOReturnNoResources; + if( (sym = OSSymbol::withCString( type_of_interest ))) do { + + userNotify = new IOServiceMessageUserNotification; + + if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType, + reference, kIOUserNotifyMaxMessageSize )) { + userNotify->release(); + userNotify = 0; + } + if( !userNotify) + continue; + + notify = service->registerInterest( sym, + &userNotify->_handler, userNotify ); + if( notify) { + *notification = userNotify; + userNotify->setNotification( notify ); + err = kIOReturnSuccess; + } else + err = kIOReturnUnsupported; + + } while( false ); + + return( err ); +} + +/* Routine io_service_acknowledge_notification */ +kern_return_t is_io_service_acknowledge_notification( + io_object_t _service, + natural_t notify_ref, + natural_t response ) +{ + CHECK( IOService, _service, service ); + + return( service->acknowledgeNotification( (IONotificationRef) notify_ref, + (IOOptionBits) response )); + +} + +/* Routine io_connect_get_semaphore */ +kern_return_t is_io_connect_get_notification_semaphore( + io_connect_t connection, + natural_t notification_type, + semaphore_t *semaphore ) +{ + CHECK( IOUserClient, connection, client ); + + return( client->getNotificationSemaphore( (UInt32) notification_type, + semaphore )); +} + +/* Routine io_registry_get_root_entry */ +kern_return_t is_io_registry_get_root_entry( + mach_port_t master_port, + io_object_t *root ) +{ + IORegistryEntry * entry; + + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + entry = IORegistryEntry::getRegistryRoot(); + if( entry) + entry->retain(); + *root = entry; + + return( kIOReturnSuccess ); +} + +/* Routine io_registry_create_iterator */ +kern_return_t is_io_registry_create_iterator( + mach_port_t master_port, + io_name_t plane, + int options, + io_object_t *iterator ) +{ + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + *iterator = IORegistryIterator::iterateOver( + IORegistryEntry::getPlane( plane ), options ); + + return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); +} + +/* Routine io_registry_entry_create_iterator */ +kern_return_t is_io_registry_entry_create_iterator( + io_object_t registry_entry, + io_name_t plane, + int options, + io_object_t *iterator ) +{ + CHECK( IORegistryEntry, registry_entry, entry ); + + *iterator = IORegistryIterator::iterateOver( entry, + IORegistryEntry::getPlane( plane ), options ); + + return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); +} + +/* Routine io_registry_iterator_enter */ +kern_return_t is_io_registry_iterator_enter_entry( + io_object_t iterator ) +{ + CHECK( IORegistryIterator, iterator, iter ); + + iter->enterEntry(); + + return( kIOReturnSuccess ); +} + +/* Routine io_registry_iterator_exit */ +kern_return_t is_io_registry_iterator_exit_entry( + io_object_t iterator ) +{ + bool didIt; + + CHECK( IORegistryIterator, iterator, iter ); + + didIt = iter->exitEntry(); + + return( didIt ? kIOReturnSuccess : kIOReturnNoDevice ); +} + +/* Routine io_registry_entry_from_path */ +kern_return_t is_io_registry_entry_from_path( + mach_port_t master_port, + io_string_t path, + io_object_t *registry_entry ) +{ + IORegistryEntry * entry; + + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + entry = IORegistryEntry::fromPath( path ); + + *registry_entry = entry; + + return( kIOReturnSuccess ); +} + +/* Routine io_registry_entry_in_plane */ +kern_return_t is_io_registry_entry_in_plane( + io_object_t registry_entry, + io_name_t plane, + boolean_t *inPlane ) +{ + CHECK( IORegistryEntry, registry_entry, entry ); + + *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane )); + + return( kIOReturnSuccess ); +} + + +/* Routine io_registry_entry_get_path */ +kern_return_t is_io_registry_entry_get_path( + io_object_t registry_entry, + io_name_t plane, + io_string_t path ) +{ + int length; + CHECK( IORegistryEntry, registry_entry, entry ); + + length = sizeof( io_string_t); + if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) + return( kIOReturnSuccess ); + else + return( kIOReturnBadArgument ); +} + + +/* Routine io_registry_entry_get_name */ +kern_return_t is_io_registry_entry_get_name( + io_object_t registry_entry, + io_name_t name ) +{ + CHECK( IORegistryEntry, registry_entry, entry ); + + strncpy( name, entry->getName(), sizeof( io_name_t)); + + return( kIOReturnSuccess ); +} + +/* Routine io_registry_entry_get_name_in_plane */ +kern_return_t is_io_registry_entry_get_name_in_plane( + io_object_t registry_entry, + io_name_t plane, + io_name_t name ) +{ + CHECK( IORegistryEntry, registry_entry, entry ); + + strncpy( name, entry->getName( IORegistryEntry::getPlane( plane )), + sizeof( io_name_t)); + + return( kIOReturnSuccess ); +} + +// Create a vm_map_copy_t or kalloc'ed data for memory +// to be copied out. ipc will free after the copyout. + +static kern_return_t copyoutkdata( void * data, vm_size_t len, + io_buf_ptr_t * buf ) +{ + kern_return_t err; + vm_map_copy_t copy; + + err = vm_map_copyin( kernel_map, (vm_offset_t) data, len, + false /* src_destroy */, ©); + + assert( err == KERN_SUCCESS ); + if( err == KERN_SUCCESS ) + *buf = (char *) copy; + return( err ); +} + + +/* Routine io_registry_entry_get_properties */ +kern_return_t is_io_registry_entry_get_properties( + io_object_t registry_entry, + io_buf_ptr_t *properties, + mach_msg_type_number_t *propertiesCnt ) +{ + kern_return_t err; + vm_size_t len; + + CHECK( IORegistryEntry, registry_entry, entry ); + + OSSerialize * s = OSSerialize::withCapacity(4096); + + if( !s) + return( kIOReturnNoMemory ); + + s->clearText(); + + if( entry->serializeProperties( s )) { + len = s->getLength(); + *propertiesCnt = len; + err = copyoutkdata( s->text(), len, properties ); + + } else + err = kIOReturnUnsupported; + + s->release(); + + return( err ); +} + +/* Routine io_registry_entry_set_properties */ +kern_return_t is_io_registry_entry_set_properties +( + io_object_t registry_entry, + io_buf_ptr_t properties, + mach_msg_type_number_t propertiesCnt, + natural_t * result) +{ + OSObject * obj; + kern_return_t err; + IOReturn res; + vm_offset_t data; + + CHECK( IORegistryEntry, registry_entry, entry ); + + err = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) properties ); + + if( KERN_SUCCESS == err) { + + // must return success after vm_map_copyout() succeeds + obj = OSUnserializeXML( (const char *) data ); + vm_deallocate( kernel_map, data, propertiesCnt ); + + if( obj) { + res = entry->setProperties( obj ); + obj->release(); + } else + res = kIOReturnBadArgument; + } else + res = err; + + *result = res; + return( err ); +} + + +/* Routine io_registry_entry_get_property */ +kern_return_t is_io_registry_entry_get_property( + io_object_t registry_entry, + io_name_t property_name, + io_scalar_inband_t buf, + mach_msg_type_number_t *dataCnt ) +{ + OSObject * obj; + OSData * data; + OSString * str; + OSBoolean * boo; + OSNumber * off; + UInt64 offsetBytes; + unsigned int len = 0; + const void * bytes = 0; + IOReturn ret = kIOReturnSuccess; + + CHECK( IORegistryEntry, registry_entry, entry ); + + obj = entry->getProperty( property_name ); + if( !obj) + return( kIOReturnNoResources ); + + // One day OSData will be a common container base class + // until then... + if( (data = OSDynamicCast( OSData, obj ))) { + len = data->getLength(); + bytes = data->getBytesNoCopy(); + + } else if( (str = OSDynamicCast( OSString, obj ))) { + len = str->getLength() + 1; + bytes = str->getCStringNoCopy(); + + } else if( (boo = OSDynamicCast( OSBoolean, obj ))) { + len = boo->isTrue() ? sizeof("Yes") : sizeof("No"); + bytes = boo->isTrue() ? "Yes" : "No"; + + } else if( (off = OSDynamicCast( OSNumber, obj ))) { + offsetBytes = off->unsigned64BitValue(); + len = off->numberOfBytes(); + bytes = &offsetBytes; +#if __BIG_ENDIAN__ + bytes = (const void *) + (((UInt32) bytes) + (sizeof( UInt64) - len)); +#endif + + } else + ret = kIOReturnBadArgument; + + if( bytes) { + if( *dataCnt < len) + ret = kIOReturnIPCError; + else { + *dataCnt = len; + bcopy( bytes, buf, len ); + } + } + + return( ret ); +} + + +/* Routine io_registry_entry_get_child_iterator */ +kern_return_t is_io_registry_entry_get_child_iterator( + io_object_t registry_entry, + io_name_t plane, + io_object_t *iterator ) +{ + CHECK( IORegistryEntry, registry_entry, entry ); + + *iterator = entry->getChildIterator( + IORegistryEntry::getPlane( plane )); + + return( kIOReturnSuccess ); +} + +/* Routine io_registry_entry_get_parent_iterator */ +kern_return_t is_io_registry_entry_get_parent_iterator( + io_object_t registry_entry, + io_name_t plane, + io_object_t *iterator) +{ + CHECK( IORegistryEntry, registry_entry, entry ); + + *iterator = entry->getParentIterator( + IORegistryEntry::getPlane( plane )); + + return( kIOReturnSuccess ); +} + +/* Routine io_service_get_busy_state */ +kern_return_t is_io_service_get_busy_state( + io_object_t _service, + int *busyState ) +{ + CHECK( IOService, _service, service ); + + *busyState = service->getBusyState(); + + return( kIOReturnSuccess ); +} + +/* Routine io_service_wait_quiet */ +kern_return_t is_io_service_wait_quiet( + io_object_t _service, + mach_timespec_t wait_time ) +{ + CHECK( IOService, _service, service ); + + return( service->waitQuiet( &wait_time )); +} + +/* Routine io_service_request_probe */ +kern_return_t is_io_service_request_probe( + io_object_t _service, + int options ) +{ + CHECK( IOService, _service, service ); + + return( service->requestProbe( options )); +} + + +/* Routine io_service_open */ +kern_return_t is_io_service_open( + io_object_t _service, + task_t owningTask, + int connect_type, + io_object_t *connection ) +{ + IOUserClient * client; + IOReturn err; + + CHECK( IOService, _service, service ); + + err = service->newUserClient( owningTask, (void *) owningTask, + connect_type, &client ); + + if( err == kIOReturnSuccess) { + assert( OSDynamicCast(IOUserClient, client) ); + *connection = client; + } + + return( err); +} + +/* Routine io_service_close */ +kern_return_t is_io_service_close( + io_object_t connection ) +{ + CHECK( IOUserClient, connection, client ); + + client->clientClose(); + + return( kIOReturnSuccess ); +} + +/* Routine io_connect_get_service */ +kern_return_t is_io_connect_get_service( + io_object_t connection, + io_object_t *service ) +{ + IOService * theService; + + CHECK( IOUserClient, connection, client ); + + theService = client->getService(); + if( theService) + theService->retain(); + + *service = theService; + + return( theService ? kIOReturnSuccess : kIOReturnUnsupported ); +} + +/* Routine io_connect_set_notification_port */ +kern_return_t is_io_connect_set_notification_port( + io_object_t connection, + int notification_type, + mach_port_t port, + int reference) +{ + CHECK( IOUserClient, connection, client ); + + return( client->registerNotificationPort( port, notification_type, + reference )); +} + +kern_return_t is_io_connect_map_memory( + io_object_t connect, + int type, + task_t task, + vm_address_t * mapAddr, + vm_size_t * mapSize, + int flags ) +{ + IOReturn err; + IOMemoryMap * map; + + CHECK( IOUserClient, connect, client ); + + map = client->mapClientMemory( type, task, flags, *mapAddr ); + + if( map) { + *mapAddr = map->getVirtualAddress(); + if( mapSize) + *mapSize = map->getLength(); + + if( task != current_task()) { + // push a name out to the task owning the map, + // so we can clean up maps + mach_port_name_t name = IOMachPort::makeSendRightForTask( + task, map, IKOT_IOKIT_OBJECT ); + assert( name ); + + } else { + // keep it with the user client + IOLockLock( gIOObjectPortLock); + if( 0 == client->mappings) + client->mappings = OSSet::withCapacity(2); + if( client->mappings) + client->mappings->setObject( map); + IOLockUnlock( gIOObjectPortLock); + map->release(); + } + err = kIOReturnSuccess; + + } else + err = kIOReturnBadArgument; + + return( err ); +} + +kern_return_t is_io_connect_unmap_memory( + io_object_t connect, + int type, + task_t task, + vm_address_t mapAddr ) +{ + IOReturn err; + IOOptionBits options = 0; + IOMemoryDescriptor * memory; + IOMemoryMap * map; + + CHECK( IOUserClient, connect, client ); + + err = client->clientMemoryForType( (UInt32) type, &options, &memory ); + + if( memory && (kIOReturnSuccess == err)) { + + options = (options & ~kIOMapUserOptionsMask) + | kIOMapAnywhere | kIOMapReference; + + map = memory->map( task, mapAddr, options ); + memory->release(); + if( map) { + IOLockLock( gIOObjectPortLock); + if( client->mappings) + client->mappings->removeObject( map); + IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); + IOLockUnlock( gIOObjectPortLock); + map->release(); + } else + err = kIOReturnBadArgument; + } + + return( err ); +} + + +/* Routine io_connect_add_client */ +kern_return_t is_io_connect_add_client( + io_object_t connection, + io_object_t connect_to) +{ + CHECK( IOUserClient, connection, client ); + CHECK( IOUserClient, connect_to, to ); + + return( client->connectClient( to ) ); +} + + +/* Routine io_connect_set_properties */ +kern_return_t is_io_connect_set_properties( + io_object_t connection, + io_buf_ptr_t properties, + mach_msg_type_number_t propertiesCnt, + natural_t * result) +{ + return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result )); +} + + +/* Routine io_connect_method_scalarI_scalarO */ +kern_return_t is_io_connect_method_scalarI_scalarO( + io_object_t connect, + UInt32 index, + void * input[], + IOByteCount inputCount, + void * output[], + IOByteCount * outputCount ) +{ + IOReturn err; + IOExternalMethod * method; + IOService * object; + IOMethod func; + + CHECK( IOUserClient, connect, client); + if( (method = client->getTargetAndMethodForIndex(&object, index))) { + do { + err = kIOReturnBadArgument; + if( kIOUCScalarIScalarO != (method->flags & kIOUCTypeMask)) + continue; + if( inputCount != method->count0) + continue; + if( *outputCount != method->count1) + continue; + + func = method->func; + + switch( inputCount) { + + case 6: + err = (object->*func)( input[0], input[1], input[2], + input[3], input[4], input[5] ); + break; + case 5: + err = (object->*func)( input[0], input[1], input[2], + input[3], input[4], + &output[0] ); + break; + case 4: + err = (object->*func)( input[0], input[1], input[2], + input[3], + &output[0], &output[1] ); + break; + case 3: + err = (object->*func)( input[0], input[1], input[2], + &output[0], &output[1], &output[2] ); + break; + case 2: + err = (object->*func)( input[0], input[1], + &output[0], &output[1], &output[2], + &output[3] ); + break; + case 1: + err = (object->*func)( input[0], + &output[0], &output[1], &output[2], + &output[3], &output[4] ); + break; + case 0: + err = (object->*func)( &output[0], &output[1], &output[2], + &output[3], &output[4], &output[5] ); + break; + + default: + IOLog("%s: Bad method table\n", client->getName()); + } + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} + +/* Routine io_connect_method_scalarI_structureO */ +kern_return_t is_io_connect_method_scalarI_structureO( + io_object_t connect, + UInt32 index, + void * input[], + IOByteCount inputCount, + void * output, + IOByteCount * outputCount ) +{ + IOReturn err; + IOExternalMethod * method; + IOService * object; + IOMethod func; + + CHECK( IOUserClient, connect, client); + + if( (method = client->getTargetAndMethodForIndex(&object, index)) ) { + do { + err = kIOReturnBadArgument; + if( kIOUCScalarIStructO != (method->flags & kIOUCTypeMask)) + continue; + if( inputCount != method->count0) + continue; + if( (0xffffffff != method->count1) + && (*outputCount != method->count1)) + continue; + + func = method->func; + + switch( inputCount) { + + case 5: + err = (object->*func)( input[0], input[1], input[2], + input[3], input[4], + output ); + break; + case 4: + err = (object->*func)( input[0], input[1], input[2], + input[3], + output, (void *)outputCount ); + break; + case 3: + err = (object->*func)( input[0], input[1], input[2], + output, (void *)outputCount, 0 ); + break; + case 2: + err = (object->*func)( input[0], input[1], + output, (void *)outputCount, 0, 0 ); + break; + case 1: + err = (object->*func)( input[0], + output, (void *)outputCount, 0, 0, 0 ); + break; + case 0: + err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", client->getName()); + } + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} + +/* Routine io_connect_method_scalarI_structureI */ +kern_return_t is_io_connect_method_scalarI_structureI( + io_connect_t connect, + UInt32 index, + void * input[], + IOByteCount inputCount, + UInt8 * inputStruct, + IOByteCount inputStructCount ) +{ + IOReturn err; + IOExternalMethod * method; + IOService * object; + IOMethod func; + + CHECK( IOUserClient, connect, client); + + if( (method = client->getTargetAndMethodForIndex(&object, index)) ) { + do { + err = kIOReturnBadArgument; + if( kIOUCScalarIStructI != (method->flags & kIOUCTypeMask)) + continue; + if( (0xffffffff != method->count0) + && (inputCount != method->count0)) + continue; + if( (0xffffffff != method->count1) + && (inputStructCount != method->count1)) + continue; + + func = method->func; + + switch( inputCount) { + + case 5: + err = (object->*func)( input[0], input[1], input[2], + input[3], input[4], + inputStruct ); + break; + case 4: + err = (object->*func)( input[0], input[1], input[2], + input[3], + inputStruct, (void *)inputStructCount ); + break; + case 3: + err = (object->*func)( input[0], input[1], input[2], + inputStruct, (void *)inputStructCount, + 0 ); + break; + case 2: + err = (object->*func)( input[0], input[1], + inputStruct, (void *)inputStructCount, + 0, 0 ); + break; + case 1: + err = (object->*func)( input[0], + inputStruct, (void *)inputStructCount, + 0, 0, 0 ); + break; + case 0: + err = (object->*func)( inputStruct, (void *)inputStructCount, + 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", client->getName()); + } + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} + +/* Routine io_connect_method_structureI_structureO */ +kern_return_t is_io_connect_method_structureI_structureO( + io_object_t connect, + UInt32 index, + UInt8 * input, + IOByteCount inputCount, + UInt8 * output, + IOByteCount * outputCount ) +{ + IOReturn err; + IOExternalMethod * method; + IOService * object; + IOMethod func; + + CHECK( IOUserClient, connect, client); + + if( (method = client->getTargetAndMethodForIndex(&object, index)) ) { + do { + err = kIOReturnBadArgument; + if( kIOUCStructIStructO != (method->flags & kIOUCTypeMask)) + continue; + if( (0xffffffff != method->count0) + && (inputCount != method->count0)) + continue; + if( (0xffffffff != method->count1) + && (*outputCount != method->count1)) + continue; + + func = method->func; + + if( method->count1) { + if( method->count0) { + err = (object->*func)( input, output, + (void *)inputCount, outputCount, 0, 0 ); + } else { + err = (object->*func)( output, outputCount, 0, 0, 0, 0 ); + } + } else { + err = (object->*func)( input, (void *)inputCount, 0, 0, 0, 0 ); + } + + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} + +kern_return_t is_io_async_method_scalarI_scalarO( + io_object_t connect, + mach_port_t wakePort, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + UInt32 index, + void * input[], + IOByteCount inputCount, + void * output[], + IOByteCount * outputCount ) +{ + IOReturn err; + IOExternalAsyncMethod *method; + IOService * object; + IOAsyncMethod func; + + CHECK( IOUserClient, connect, client); + if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { + do { + err = kIOReturnBadArgument; + if( kIOUCScalarIScalarO != (method->flags & kIOUCTypeMask)) + continue; + if( inputCount != method->count0) + continue; + if( *outputCount != method->count1) + continue; + + reference[0] = (natural_t) wakePort; + func = method->func; + + switch( inputCount) { + + case 6: + err = (object->*func)( reference, + input[0], input[1], input[2], + input[3], input[4], input[5] ); + break; + case 5: + err = (object->*func)( reference, + input[0], input[1], input[2], + input[3], input[4], + &output[0] ); + break; + case 4: + err = (object->*func)( reference, + input[0], input[1], input[2], + input[3], + &output[0], &output[1] ); + break; + case 3: + err = (object->*func)( reference, + input[0], input[1], input[2], + &output[0], &output[1], &output[2] ); + break; + case 2: + err = (object->*func)( reference, + input[0], input[1], + &output[0], &output[1], &output[2], + &output[3] ); + break; + case 1: + err = (object->*func)( reference, + input[0], + &output[0], &output[1], &output[2], + &output[3], &output[4] ); + break; + case 0: + err = (object->*func)( reference, + &output[0], &output[1], &output[2], + &output[3], &output[4], &output[5] ); + break; + + default: + IOLog("%s: Bad method table\n", client->getName()); + } + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} + +kern_return_t is_io_async_method_scalarI_structureO( + io_object_t connect, + mach_port_t wakePort, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + UInt32 index, + void * input[], + IOByteCount inputCount, + void * output, + IOByteCount * outputCount ) +{ + IOReturn err; + IOExternalAsyncMethod *method; + IOService * object; + IOAsyncMethod func; + + CHECK( IOUserClient, connect, client); + + if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { + do { + err = kIOReturnBadArgument; + if( kIOUCScalarIStructO != (method->flags & kIOUCTypeMask)) + continue; + if( inputCount != method->count0) + continue; + if( (0xffffffff != method->count1) + && (*outputCount != method->count1)) + continue; + + reference[0] = (natural_t) wakePort; + func = method->func; + + switch( inputCount) { + + case 5: + err = (object->*func)( reference, + input[0], input[1], input[2], + input[3], input[4], + output ); + break; + case 4: + err = (object->*func)( reference, + input[0], input[1], input[2], + input[3], + output, (void *)outputCount ); + break; + case 3: + err = (object->*func)( reference, + input[0], input[1], input[2], + output, (void *)outputCount, 0 ); + break; + case 2: + err = (object->*func)( reference, + input[0], input[1], + output, (void *)outputCount, 0, 0 ); + break; + case 1: + err = (object->*func)( reference, + input[0], + output, (void *)outputCount, 0, 0, 0 ); + break; + case 0: + err = (object->*func)( reference, + output, (void *)outputCount, 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", client->getName()); + } + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} + +kern_return_t is_io_async_method_scalarI_structureI( + io_connect_t connect, + mach_port_t wakePort, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + UInt32 index, + void * input[], + IOByteCount inputCount, + UInt8 * inputStruct, + IOByteCount inputStructCount ) +{ + IOReturn err; + IOExternalAsyncMethod *method; + IOService * object; + IOAsyncMethod func; + + CHECK( IOUserClient, connect, client); + + if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { + do { + err = kIOReturnBadArgument; + if( kIOUCScalarIStructI != (method->flags & kIOUCTypeMask)) + continue; + if( (0xffffffff != method->count0) + && (inputCount != method->count0)) + continue; + if( (0xffffffff != method->count1) + && (inputStructCount != method->count1)) + continue; + + reference[0] = (natural_t) wakePort; + func = method->func; + + switch( inputCount) { + + case 5: + err = (object->*func)( reference, + input[0], input[1], input[2], + input[3], input[4], + inputStruct ); + break; + case 4: + err = (object->*func)( reference, + input[0], input[1], input[2], + input[3], + inputStruct, (void *)inputStructCount ); + break; + case 3: + err = (object->*func)( reference, + input[0], input[1], input[2], + inputStruct, (void *)inputStructCount, + 0 ); + break; + case 2: + err = (object->*func)( reference, + input[0], input[1], + inputStruct, (void *)inputStructCount, + 0, 0 ); + break; + case 1: + err = (object->*func)( reference, + input[0], + inputStruct, (void *)inputStructCount, + 0, 0, 0 ); + break; + case 0: + err = (object->*func)( reference, + inputStruct, (void *)inputStructCount, + 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", client->getName()); + } + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} + +kern_return_t is_io_async_method_structureI_structureO( + io_object_t connect, + mach_port_t wakePort, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + UInt32 index, + UInt8 * input, + IOByteCount inputCount, + UInt8 * output, + IOByteCount * outputCount ) +{ + IOReturn err; + IOExternalAsyncMethod *method; + IOService * object; + IOAsyncMethod func; + + CHECK( IOUserClient, connect, client); + + if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { + do { + err = kIOReturnBadArgument; + if( kIOUCStructIStructO != (method->flags & kIOUCTypeMask)) + continue; + if( (0xffffffff != method->count0) + && (inputCount != method->count0)) + continue; + if( (0xffffffff != method->count1) + && (*outputCount != method->count1)) + continue; + + reference[0] = (natural_t) wakePort; + func = method->func; + + if( method->count1) { + if( method->count0) { + err = (object->*func)( reference, + input, output, + (void *)inputCount, outputCount, 0, 0 ); + } else { + err = (object->*func)( reference, + output, outputCount, 0, 0, 0, 0 ); + } + } else { + err = (object->*func)( reference, + input, (void *)inputCount, 0, 0, 0, 0 ); + } + + } while( false); + + } else + err = kIOReturnUnsupported; + + return( err); +} +/* Routine io_make_matching */ +kern_return_t is_io_make_matching( + mach_port_t master_port, + UInt32 type, + IOOptionBits options, + UInt8 * input, + IOByteCount inputCount, + io_string_t matching ) +{ + OSSerialize * s; + IOReturn err = kIOReturnSuccess; + OSDictionary * dict; + + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + switch( type) { + + case kIOServiceMatching: + dict = IOService::serviceMatching( gIOServiceKey ); + break; + + case kIOBSDNameMatching: + dict = IOBSDNameMatching( (const char *) input ); + break; + + case kIOOFPathMatching: + dict = IOOFPathMatching( (const char *) input, + matching, sizeof( io_string_t)); + break; + + default: + dict = 0; + } + + if( !dict) + return( kIOReturnUnsupported); + + do { + s = OSSerialize::withCapacity(4096); + if( !s) { + err = kIOReturnNoMemory; + continue; + } + s->clearText(); + if( !dict->serialize( s )) { + err = kIOReturnUnsupported; + continue; + } + + if( s->getLength() > sizeof( io_string_t)) { + err = kIOReturnNoMemory; + continue; + } else + strcpy( matching, s->text()); + + } while( false); + + if( s) + s->release(); + if( dict) + dict->release(); + + return( err); +} + +/* Routine io_catalog_send_data */ +kern_return_t is_io_catalog_send_data( + mach_port_t master_port, + int flag, + io_buf_ptr_t inData, + mach_msg_type_number_t inDataCount, + natural_t * result) +{ + OSObject * obj = 0; + vm_offset_t data; + kern_return_t kr = kIOReturnError; + + //printf("io_catalog_send_data called. flag: %d\n", flag); + + if( master_port != master_device_port) + return kIOReturnNotPrivileged; + + // FIXME: This is a hack. Should have own function for removeKernelLinker() + if(flag != kIOCatalogRemoveKernelLinker && ( !inData || !inDataCount) ) + return kIOReturnBadArgument; + + if (data) { + kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t)inData); + if( kr != KERN_SUCCESS) + return kr; + + // must return success after vm_map_copyout() succeeds + + if( inDataCount ) { + obj = (OSObject *)OSUnserializeXML((const char *)data); + vm_deallocate( kernel_map, data, inDataCount ); + if( !obj) { + *result = kIOReturnNoMemory; + return( KERN_SUCCESS); + } + } + } + + switch ( flag ) { + case kIOCatalogAddDrivers: + case kIOCatalogAddDriversNoMatch: { + OSArray * array; + + array = OSDynamicCast(OSArray, obj); + if ( array ) { +//-- + OSDictionary * dict; + int i = 0; + while( (dict = OSDynamicCast(OSDictionary, array->getObject(i++)))) + makeMatchingCompatible( dict ); +//-- + if ( !gIOCatalogue->addDrivers( array , + flag == kIOCatalogAddDrivers) ) { + kr = kIOReturnError; + } + } + else { + kr = kIOReturnBadArgument; + } + } + break; + + case kIOCatalogRemoveDrivers: + case kIOCatalogRemoveDriversNoMatch: { + OSDictionary * dict; + + dict = OSDynamicCast(OSDictionary, obj); + if ( dict ) { + makeMatchingCompatible( dict ); + if ( !gIOCatalogue->removeDrivers( dict, + flag == kIOCatalogRemoveDrivers ) ) { + kr = kIOReturnError; + } + } + else { + kr = kIOReturnBadArgument; + } + } + break; + + case kIOCatalogStartMatching: { + OSDictionary * dict; + + dict = OSDynamicCast(OSDictionary, obj); + if ( dict ) { + makeMatchingCompatible( dict ); + if ( !gIOCatalogue->startMatching( dict ) ) { + kr = kIOReturnError; + } + } + else { + kr = kIOReturnBadArgument; + } + } + break; + + case kIOCatalogRemoveKernelLinker: { + if (gIOCatalogue->removeKernelLinker() != KERN_SUCCESS) { + kr = kIOReturnError; + } + } + break; + + default: + kr = kIOReturnBadArgument; + break; + } + + if (obj) obj->release(); + + *result = kr; + return( KERN_SUCCESS); +} + +/* Routine io_catalog_terminate */ +kern_return_t is_io_catalog_terminate( + mach_port_t master_port, + int flag, + io_name_t name ) +{ + kern_return_t kr; + + if( master_port != master_device_port ) + return kIOReturnNotPrivileged; + + kr = IOUserClient::clientHasPrivilege( (void *) current_task(), + kIOClientPrivilegeAdministrator ); + if( kIOReturnSuccess != kr) + return( kr ); + + switch ( flag ) { + case kIOCatalogServiceTerminate: + OSIterator * iter; + IOService * service; + + iter = IORegistryIterator::iterateOver(gIOServicePlane, + kIORegistryIterateRecursively); + if ( !iter ) + return kIOReturnNoMemory; + + do { + iter->reset(); + while( (service = (IOService *)iter->getNextObject()) ) { + if( service->metaCast(name)) { + if ( !service->terminate( kIOServiceRequired + | kIOServiceSynchronous) ) { + kr = kIOReturnUnsupported; + break; + } + } + } + } while( !service && !iter->isValid()); + iter->release(); + break; + + case kIOCatalogModuleUnload: + case kIOCatalogModuleTerminate: + kr = gIOCatalogue->terminateDriversForModule(name, + flag == kIOCatalogModuleUnload); + break; + + default: + kr = kIOReturnBadArgument; + break; + } + + return( kr ); +} + +/* Routine io_catalog_get_data */ +kern_return_t is_io_catalog_get_data( + mach_port_t master_port, + int flag, + io_buf_ptr_t *outData, + mach_msg_type_number_t *outDataCount) +{ + kern_return_t kr = kIOReturnSuccess; + OSSerialize * s; + + if( master_port != master_device_port) + return kIOReturnNotPrivileged; + + //printf("io_catalog_get_data called. flag: %d\n", flag); + + s = OSSerialize::withCapacity(4096); + if ( !s ) + return kIOReturnNoMemory; + + s->clearText(); + switch ( flag ) { + case kIOCatalogGetContents: + if ( !gIOCatalogue->serialize(s) ) { + kr = kIOReturnNoMemory; + } + break; + + default: + kr = kIOReturnBadArgument; + break; + } + + if ( kr == kIOReturnSuccess ) { + vm_offset_t data; + vm_map_copy_t copy; + vm_size_t size; + + size = s->getLength(); + kr = vm_allocate(kernel_map, &data, size, true); + if ( kr == kIOReturnSuccess ) { + bcopy(s->text(), (void *)data, size); + kr = vm_map_copyin(kernel_map, data, size, true, ©); + *outData = (char *)copy; + *outDataCount = size; + } + } + + s->release(); + + return kr; +} + +/* Routine io_catalog_get_gen_count */ +kern_return_t is_io_catalog_get_gen_count( + mach_port_t master_port, + int *genCount) +{ + if( master_port != master_device_port) + return kIOReturnNotPrivileged; + + //printf("io_catalog_get_gen_count called.\n"); + + if ( !genCount ) + return kIOReturnBadArgument; + + *genCount = gIOCatalogue->getGenerationCount(); + + return kIOReturnSuccess; +} + +/* Routine io_catalog_module_loaded */ +kern_return_t is_io_catalog_module_loaded( + mach_port_t master_port, + io_name_t name) +{ + if( master_port != master_device_port) + return kIOReturnNotPrivileged; + + //printf("io_catalog_module_loaded called. name %s\n", name); + + if ( !name ) + return kIOReturnBadArgument; + + gIOCatalogue->moduleHasLoaded(name); + + return kIOReturnSuccess; +} + +kern_return_t is_io_catalog_reset( + mach_port_t master_port, + int flag) +{ + if( master_port != master_device_port) + return kIOReturnNotPrivileged; + + switch ( flag ) { + case kIOCatalogResetDefault: + gIOCatalogue->reset(); + break; + + default: + return kIOReturnBadArgument; + } + + return kIOReturnSuccess; +} + +kern_return_t iokit_user_client_trap(io_object_t userClientRef, UInt32 index, void *p1, void *p2, void *p3, void *p4, void *p5, void *p6) +{ + kern_return_t result = kIOReturnBadArgument; + IOUserClient *userClient; + + if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task(userClientRef)))) { + IOExternalTrap *trap; + IOService *target = NULL; + + trap = userClient->getTargetAndTrapForIndex(&target, index); + + if (trap && target) { + IOTrap func; + + func = trap->func; + + if (func) { + result = (target->*func)(p1, p2, p3, p4, p5, p6); + } + } + + userClient->release(); + } + + return result; +} + +}; /* extern "C" */ + +OSMetaClassDefineReservedUnused(IOUserClient, 0); +OSMetaClassDefineReservedUnused(IOUserClient, 1); +OSMetaClassDefineReservedUnused(IOUserClient, 2); +OSMetaClassDefineReservedUnused(IOUserClient, 3); +OSMetaClassDefineReservedUnused(IOUserClient, 4); +OSMetaClassDefineReservedUnused(IOUserClient, 5); +OSMetaClassDefineReservedUnused(IOUserClient, 6); +OSMetaClassDefineReservedUnused(IOUserClient, 7); +OSMetaClassDefineReservedUnused(IOUserClient, 8); +OSMetaClassDefineReservedUnused(IOUserClient, 9); +OSMetaClassDefineReservedUnused(IOUserClient, 10); +OSMetaClassDefineReservedUnused(IOUserClient, 11); +OSMetaClassDefineReservedUnused(IOUserClient, 12); +OSMetaClassDefineReservedUnused(IOUserClient, 13); +OSMetaClassDefineReservedUnused(IOUserClient, 14); +OSMetaClassDefineReservedUnused(IOUserClient, 15); + diff --git a/iokit/Kernel/IOWorkLoop.cpp b/iokit/Kernel/IOWorkLoop.cpp new file mode 100644 index 000000000..99205ee24 --- /dev/null +++ b/iokit/Kernel/IOWorkLoop.cpp @@ -0,0 +1,430 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + +HISTORY + 1998-7-13 Godfrey van der Linden(gvdl) + Created. +*/ +#include +#include +#include +#include +#include + +#define super OSObject + +OSDefineMetaClassAndStructors(IOWorkLoop, OSObject); + +// Block of unused functions intended for future use +OSMetaClassDefineReservedUnused(IOWorkLoop, 0); +OSMetaClassDefineReservedUnused(IOWorkLoop, 1); +OSMetaClassDefineReservedUnused(IOWorkLoop, 2); +OSMetaClassDefineReservedUnused(IOWorkLoop, 3); +OSMetaClassDefineReservedUnused(IOWorkLoop, 4); +OSMetaClassDefineReservedUnused(IOWorkLoop, 5); +OSMetaClassDefineReservedUnused(IOWorkLoop, 6); +OSMetaClassDefineReservedUnused(IOWorkLoop, 7); + +enum IOWorkLoopState { kLoopRestart = 0x1, kLoopTerminate = 0x2 }; +static inline void SETP(void *addr, unsigned int flag) + { unsigned int *num = (unsigned int *) addr; *num |= flag; } +static inline void CLRP(void *addr, unsigned int flag) + { unsigned int *num = (unsigned int *) addr; *num &= ~flag; } +static inline bool ISSETP(void *addr, unsigned int flag) + { unsigned int *num = (unsigned int *) addr; return (*num & flag) != 0; } + +#define fFlags loopRestart + +void IOWorkLoop::launchThreadMain(void *self) +{ + register thread_t mythread = current_thread(); + + // Make sure that this thread always has a kernel stack + stack_privilege(mythread); + thread_set_cont_arg((int) self); + threadMainContinuation(); +} + +bool IOWorkLoop::init() +{ + // The super init and gateLock allocation MUST be done first + if ( !super::init() ) + return false; + + if ( !(gateLock = IORecursiveLockAlloc()) ) + return false; + + if ( !(workToDoLock = IOSimpleLockAlloc()) ) + return false; + + controlG = IOCommandGate:: + commandGate(this, (IOCommandGate::Action) &IOWorkLoop::_maintRequest); + if ( !controlG ) + return false; + + IOSimpleLockInit(workToDoLock); + workToDo = false; + + // Point the controlGate at the workLoop. Usually addEventSource + // does this automatically. The problem is in this case addEventSource + // uses the control gate and it has to be bootstrapped. + controlG->setWorkLoop(this); + if (addEventSource(controlG) != kIOReturnSuccess) + return false; + + workThread = IOCreateThread(launchThreadMain, (void *) this); + if (!workThread) + return false; + + return true; +} + +IOWorkLoop * +IOWorkLoop::workLoop() +{ + IOWorkLoop *me = new IOWorkLoop; + + if (me && !me->init()) { + me->free(); + return 0; + } + + return me; +} + +// Free is called twice: +// First when the atomic retainCount transitions from 1 -> 0 +// Secondly when the work loop itself is commiting hari kari +// Hence the each leg of the free must be single threaded. +void IOWorkLoop::free() +{ + if (workThread) { + IOInterruptState is; + + // If we are here then we must be trying to shut down this work loop + // in this case disable all of the event source, mark the loop for + // as terminating and wakeup the work thread itself and return + // Note: we hold the gate across the entire operation mainly for the + // benefit of our event sources so we can disable them cleanly. + closeGate(); + + disableAllEventSources(); + + is = IOSimpleLockLockDisableInterrupt(workToDoLock); + SETP(&fFlags, kLoopTerminate); + thread_wakeup_one((void *) &workToDo); + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + + openGate(); + } + else /* !workThread */ { + IOEventSource *event, *next; + + for (event = eventChain; event; event = next) { + next = event->getNext(); + event->setWorkLoop(0); + event->setNext(0); + event->release(); + } + eventChain = 0; + + // Either we have a partial initialisation to clean up + // or we the workThread itself is performing hari-kari. + // either way clean up all of our resources and return. + + if (controlG) { + controlG->release(); + controlG = 0; + } + + if (workToDoLock) { + IOSimpleLockFree(workToDoLock); + workToDoLock = 0; + } + + if (gateLock) { + IORecursiveLockFree(gateLock); + gateLock = 0; + } + + super::free(); + } +} + +IOReturn IOWorkLoop::addEventSource(IOEventSource *newEvent) +{ + return controlG->runCommand((void *) mAddEvent, (void *) newEvent); +} + +IOReturn IOWorkLoop::removeEventSource(IOEventSource *toRemove) +{ + return controlG->runCommand((void *) mRemoveEvent, (void *) toRemove); +} + +void IOWorkLoop::enableAllEventSources() const +{ + IOEventSource *event; + + for (event = eventChain; event; event = event->getNext()) + event->enable(); +} + +void IOWorkLoop::disableAllEventSources() const +{ + IOEventSource *event; + + for (event = eventChain; event; event = event->getNext()) + if (event != controlG) // Don't disable the control gate + event->disable(); +} + +void IOWorkLoop::enableAllInterrupts() const +{ + IOEventSource *event; + + for (event = eventChain; event; event = event->getNext()) + if (OSDynamicCast(IOInterruptEventSource, event)) + event->enable(); +} + +void IOWorkLoop::disableAllInterrupts() const +{ + IOEventSource *event; + + for (event = eventChain; event; event = event->getNext()) + if (OSDynamicCast(IOInterruptEventSource, event)) + event->disable(); +} + +#if KDEBUG +#define IOTimeClientS() \ +do { \ + IOTimeStampStart(IODBG_WORKLOOP(IOWL_CLIENT), \ + (unsigned int) this, (unsigned int) event); \ +} while(0) + +#define IOTimeClientE() \ +do { \ + IOTimeStampEnd(IODBG_WORKLOOP(IOWL_CLIENT), \ + (unsigned int) this, (unsigned int) event); \ +} while(0) + +#define IOTimeWorkS() \ +do { \ + IOTimeStampStart(IODBG_WORKLOOP(IOWL_WORK), (unsigned int) this); \ +} while(0) + +#define IOTimeWorkE() \ +do { \ + IOTimeStampEnd(IODBG_WORKLOOP(IOWL_WORK),(unsigned int) this); \ +} while(0) + +#else /* !KDEBUG */ + +#define IOTimeClientS() +#define IOTimeClientE() +#define IOTimeWorkS() +#define IOTimeWorkE() + +#endif /* KDEBUG */ + +void IOWorkLoop::threadMainContinuation() +{ + IOWorkLoop* self; + self = (IOWorkLoop *) thread_get_cont_arg(); + + self->threadMain(); +} + +void IOWorkLoop::threadMain() +{ + CLRP(&fFlags, kLoopRestart); + + for (;;) { + bool more; + IOInterruptState is; + + IOTimeWorkS(); + + closeGate(); + if (ISSETP(&fFlags, kLoopTerminate)) + goto exitThread; + + do { + workToDo = more = false; + for (IOEventSource *event = eventChain; event; event = event->getNext()) { + + IOTimeClientS(); + more |= event->checkForWork(); + IOTimeClientE(); + + if (ISSETP(&fFlags, kLoopTerminate)) + goto exitThread; + else if (fFlags & kLoopRestart) { + CLRP(&fFlags, kLoopRestart); + continue; + } + } + } while (more); + + IOTimeWorkE(); + + openGate(); + + is = IOSimpleLockLockDisableInterrupt(workToDoLock); + if ( !ISSETP(&fFlags, kLoopTerminate) && !workToDo) { + assert_wait((void *) &workToDo, false); + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + +#if defined (__i386__) + thread_block(0); + continue; +#else + thread_set_cont_arg((int) this); + thread_block(&threadMainContinuation); +#endif + /* NOTREACHED */ + } + + // At this point we either have work to do or we need + // to commit suicide. But no matter + // Clear the simple lock and retore the interrupt state + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + if (workToDo) + continue; + else + break; + } + +exitThread: + workThread = 0; // Say we don't have a loop and free ourselves + free(); + IOExitThread(0); +} + +IOThread IOWorkLoop::getThread() const +{ + return workThread; +} + +bool IOWorkLoop::onThread() const +{ + return (IOThreadSelf() == workThread); +} + +bool IOWorkLoop::inGate() const +{ + return IORecursiveLockHaveLock(gateLock); +} + +// Internal APIs used by event sources to control the thread +void IOWorkLoop::signalWorkAvailable() +{ + if (workToDoLock) { + IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock); + workToDo = true; + thread_wakeup_one((void *) &workToDo); + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + } +} + +void IOWorkLoop::openGate() +{ + IORecursiveLockUnlock(gateLock); +} + +void IOWorkLoop::closeGate() +{ + IORecursiveLockLock(gateLock); +} + +bool IOWorkLoop::tryCloseGate() +{ + return IORecursiveLockTryLock(gateLock) != 0; +} + +int IOWorkLoop::sleepGate(void *event, UInt32 interuptibleType) +{ + return IORecursiveLockSleep(gateLock, event, interuptibleType); +} + +void IOWorkLoop::wakeupGate(void *event, bool oneThread) +{ + IORecursiveLockWakeup(gateLock, event, oneThread); +} + +IOReturn IOWorkLoop::_maintRequest(void *inC, void *inD, void *, void *) +{ + maintCommandEnum command = (maintCommandEnum) (vm_address_t) inC; + IOEventSource *inEvent = (IOEventSource *) inD; + IOReturn res = kIOReturnSuccess; + + switch (command) + { + case mAddEvent: + SETP(&fFlags, kLoopRestart); + inEvent->retain(); + inEvent->setWorkLoop(this); + inEvent->setNext(0); + + if (!eventChain) + eventChain = inEvent; + else { + IOEventSource *event, *next; + + for (event = eventChain; (next = event->getNext()); event = next) + ; + event->setNext(inEvent); + } + break; + + case mRemoveEvent: + if (eventChain == inEvent) + eventChain = inEvent->getNext(); + else { + IOEventSource *event, *next; + + event = eventChain; + while ((next = event->getNext()) && next != inEvent) + event = next; + + if (!next) { + res = kIOReturnBadArgument; + break; + } + event->setNext(inEvent->getNext()); + } + + inEvent->setWorkLoop(0); + inEvent->setNext(0); + inEvent->release(); + SETP(&fFlags, kLoopRestart); + break; + + default: + return kIOReturnUnsupported; + } + + return res; +} diff --git a/iokit/Kernel/PMmisc.cpp b/iokit/Kernel/PMmisc.cpp new file mode 100644 index 000000000..0ab16cfbe --- /dev/null +++ b/iokit/Kernel/PMmisc.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +extern "C" { +extern void kprintf(const char *, ...); +} + +static char rootDomain[ ] = "IOPMrootDomain"; +static char displayDevice[ ] = "IODisplayWrangler"; +static bool rootRegistered; +static bool displayRegistered; +static IOService * root; +static IOService * display; + +void IOPMLog(const char * who,unsigned long event,unsigned long param1, unsigned long param2) +{ +// kprintf("%s %02d %08x %08x\n",who,event,param1,param2); +} + + +void IOPMRegisterDevice(const char * who, IOService * theDevice) +{ + + if ( strcmp(rootDomain,who) == 0 ) { // root power domain is registering + theDevice->youAreRoot(); + rootRegistered = true; + root = theDevice; + if ( displayRegistered ) { + root->addChild ( display ); + } + } + else{ + if ( strcmp(displayDevice,who) == 0 ) { // somebody else is registering + displayRegistered = true; // save pointer to display wrangler + display = theDevice; + } + if ( rootRegistered ) { // if not root domain, then it's + root->addChild ( theDevice ); // one of its children + } + } +} + diff --git a/iokit/Kernel/i386/IOAsmSupport.s b/iokit/Kernel/i386/IOAsmSupport.s new file mode 100644 index 000000000..4483a5d1f --- /dev/null +++ b/iokit/Kernel/i386/IOAsmSupport.s @@ -0,0 +1,39 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + + +/* + * Seemingly unused references from cpp statically initialized objects. + */ + +.globl .constructors_used +.globl .destructors_used +.data + .align 2 + .long 0x11223344 +.constructors_used: + .long 0xdeadbeef + .long 0x11223344 +.destructors_used: + .long 0xdeadbeef + .long 0x11223344 diff --git a/iokit/Kernel/i386/IOSharedLock.s b/iokit/Kernel/i386/IOSharedLock.s new file mode 100644 index 000000000..de84a9173 --- /dev/null +++ b/iokit/Kernel/i386/IOSharedLock.s @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/Kernel/ppc/IOAsmSupport.s b/iokit/Kernel/ppc/IOAsmSupport.s new file mode 100644 index 000000000..ecc34366b --- /dev/null +++ b/iokit/Kernel/ppc/IOAsmSupport.s @@ -0,0 +1,114 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + /* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in from DriverServices + * sdouglas 28 Jul 98 - start IOKit + */ + +#include + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; ENTRY functionName +; +; Assembly directives to begin an exported function. +; +; Takes: functionName - name of the exported function +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +.macro ENTRY + .text + .align 2 + .globl $0 +$0: +.endmacro + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +/* + +OSStatus SynchronizeIO( void ) + +*/ + + ENTRY __eSynchronizeIO + + li r0, 0 + eieio + li r3, 0 + blr + +/* + +OSStatus CallTVector_NoRecover( + void * p1, void * p2, void * p3, void * p4, void * p5, void * p6, // r3-8 + LogicalAddress entry ) // r9 + +*/ + +#define PARAM_SIZE 24 +#define FM_SIZE 64 +#define FM_LR_SAVE 8 +#define FM_TOC_SAVE 20 + + ENTRY _CallTVector + +#if 1 + stw r2, FM_TOC_SAVE(r1) + lwz r0, 0(r9) + lwz r2, 4(r9) + mtspr ctr, r0 + bctr + +#else + mflr r0 + stw r0, FM_LR_SAVE(r1) + stw r2, FM_TOC_SAVE(r1) + + stwu r1, -(PARAM_SIZE+FM_SIZE)(r1) + + lwz r2, 4(r9) + lwz r0, 0(r9) + mtspr lr, r0 + mfspr r12, lr + blrl + + addi r1, r1,(PARAM_SIZE+FM_SIZE) + lwz r2, FM_TOC_SAVE(r1) + lwz r0, FM_LR_SAVE(r1) + mtlr r0 + blr +#endif + +/* + * Seemingly unused references from cpp statically initialized objects. + */ + +.globl .constructors_used +.constructors_used = 0 +.globl .destructors_used +.destructors_used = 0 diff --git a/iokit/Kernel/ppc/IODBDMA.cpp b/iokit/Kernel/ppc/IODBDMA.cpp new file mode 100644 index 000000000..5a3d9d38e --- /dev/null +++ b/iokit/Kernel/ppc/IODBDMA.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * + * HISTORY + * + * Simon Douglas 10 Nov 97 + * - first checked in, mostly from machdep/ppc/dbdma.c + * + */ + + +#include +#include + +void +IODBDMAStart( volatile IODBDMAChannelRegisters *registers, volatile IODBDMADescriptor *physicalDescPtr) +{ + + if( ((int) physicalDescPtr) & 0xf) + panic("IODBDMAStart: unaligned IODBDMADescriptor"); + + eieio(); + IOSetDBDMAInterruptSelect(registers, 0xff000000); // clear out interrupts + + IOSetDBDMAChannelControl( registers, + IOClearDBDMAChannelControlBits( kdbdmaRun | kdbdmaPause | kdbdmaFlush | kdbdmaWake | kdbdmaDead | kdbdmaActive )); + + while( IOGetDBDMAChannelStatus( registers) & kdbdmaActive) + eieio(); + + IOSetDBDMACommandPtr( registers, (unsigned int) physicalDescPtr); + + IOSetDBDMAChannelControl( registers, + IOSetDBDMAChannelControlBits( kdbdmaRun | kdbdmaWake )); + +} + +void +IODBDMAStop( volatile IODBDMAChannelRegisters *registers) +{ + + IOSetDBDMAChannelControl( registers, + IOClearDBDMAChannelControlBits( kdbdmaRun ) + | IOSetDBDMAChannelControlBits( kdbdmaFlush )); + + while( IOGetDBDMAChannelStatus( registers) & ( kdbdmaActive | kdbdmaFlush)) + eieio(); + +} + +void +IODBDMAFlush( volatile IODBDMAChannelRegisters *registers) +{ + + IOSetDBDMAChannelControl( registers, + IOSetDBDMAChannelControlBits( kdbdmaFlush )); + + while( IOGetDBDMAChannelStatus( registers) & kdbdmaFlush) + eieio(); + +} + +void +IODBDMAReset( volatile IODBDMAChannelRegisters *registers) +{ + + IOSetDBDMAChannelControl( registers, + IOClearDBDMAChannelControlBits( kdbdmaRun | kdbdmaPause | kdbdmaFlush | kdbdmaWake | kdbdmaDead | kdbdmaActive )); + + while( IOGetDBDMAChannelStatus( registers) & kdbdmaActive) + eieio(); + +} + +void +IODBDMAContinue( volatile IODBDMAChannelRegisters *registers) +{ + + IOSetDBDMAChannelControl( registers, + IOClearDBDMAChannelControlBits( kdbdmaPause | kdbdmaDead ) + | IOSetDBDMAChannelControlBits( kdbdmaRun | kdbdmaWake )); + +} + +void +IODBDMAPause( volatile IODBDMAChannelRegisters *registers) +{ + + IOSetDBDMAChannelControl( registers, + IOSetDBDMAChannelControlBits( kdbdmaPause )); + + while( IOGetDBDMAChannelStatus( registers) & kdbdmaActive) + eieio(); + +} + +IOReturn +IOAllocatePhysicallyContiguousMemory( + unsigned int /* size */, unsigned int /* options */, + IOVirtualAddress * /* logical */, + IOPhysicalAddress * /* physical */ ) +{ +#if 0 + IOReturn err; + vm_offset_t mem; + + if( (size > 4096) || (options)) + return( kIOReturnUnsupported); + + mem = (vm_offset_t) IOMalloc( size); + *logical = (IOVirtualAddress) mem; + + if( mem) { + err = IOPhysicalFromVirtual( IOVmTaskSelf(), mem, (vm_offset_t *) physical); + if( err) + IOFree( (char *)mem, size); + + } else { + err = kIOReturnNoMemory; + *physical = 0; + } + + return( err); +#endif /* 0 */ + return (kIOReturnUnsupported); +} + +IOReturn +IOFreePhysicallyContiguousMemory( IOVirtualAddress * logical, unsigned int size) +{ + IOFree( logical, size); + return( kIOReturnSuccess); +} diff --git a/iokit/Kernel/ppc/IOSharedLock.s b/iokit/Kernel/ppc/IOSharedLock.s new file mode 100644 index 000000000..de84a9173 --- /dev/null +++ b/iokit/Kernel/ppc/IOSharedLock.s @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/Kernel/printPlist b/iokit/Kernel/printPlist new file mode 100644 index 000000000..02b586301 --- /dev/null +++ b/iokit/Kernel/printPlist @@ -0,0 +1,80 @@ +void printPlist(OSObject * plist, UInt32 indent = 0) { + const OSMetaClass * typeID; + OSCollectionIterator * iterator; + OSString * key; + OSObject * value; + unsigned int i; + + if (!plist) { + IOLog("error! null plist\n"); + return; + } + + typeID = OSTypeIDInst(plist); + + if (typeID == OSTypeID(OSDictionary)) { + + IOLog("{\n"); + OSDictionary * dict = OSDynamicCast(OSDictionary, plist); + iterator = OSCollectionIterator::withCollection(dict); + while ( (key = OSDynamicCast(OSString, iterator->getNextObject())) ) { + for (i = 0; i < indent + 4; i++) { + IOLog(" "); + } + IOLog("%s = ", key->getCStringNoCopy()); + value = dict->getObject(key); + printPlist(value, indent + 4); + } + + for (i = 0; i < indent; i++) { + IOLog(" "); + } + IOLog("}\n"); + + } else if (typeID == OSTypeID(OSArray)) { + + IOLog("{\n"); + + OSArray * array = OSDynamicCast(OSArray, plist); + iterator = OSCollectionIterator::withCollection(array); + while ( (value = iterator->getNextObject()) ) { + for (i = 0; i < indent + 4; i++) { + IOLog(" "); + } + printPlist(value, indent + 4); + } + + for (i = 0; i < indent; i++) { + IOLog(" "); + } + IOLog("}\n"); + + } else if (typeID == OSTypeID(OSString) || typeID == OSTypeID(OSSymbol)) { + + OSString * string = OSDynamicCast(OSString, plist); + IOLog("\"%s\"\n", string->getCStringNoCopy()); + + } else if (typeID == OSTypeID(OSNumber)) { + + OSNumber * number = OSDynamicCast(OSNumber, plist); + UInt32 numberValue = number->unsigned32BitValue(); + IOLog("0x%lx (%ld base 10)\n", numberValue, numberValue); + + } else if (typeID == OSTypeID(OSBoolean)) { + + OSBoolean * boolObj = OSDynamicCast(OSBoolean, plist); + IOLog("%s\n", boolObj->isTrue() ? "true" : "false"); + + } else if (typeID == OSTypeID(OSData)) { + + IOLog("(binary data)\n"); + + } else { + + IOLog("(object of class %s)\n", plist->getMetaClass()->getClassName()); + + } + + return; +} + diff --git a/iokit/KernelConfigTables.cpp b/iokit/KernelConfigTables.cpp new file mode 100644 index 000000000..8070a2eaf --- /dev/null +++ b/iokit/KernelConfigTables.cpp @@ -0,0 +1,740 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/* This list is used in IOStartIOKit.cpp to declare fake kmod_info + * structs for kext dependencies that are built into the kernel. + * See the SystemKEXT project for fuller information on these + * fake or pseudo-kexts, including their compatible versions. + */ +const char * gIOKernelKmods = +"{ + 'com.apple.kernel' = '1.0.0b1'; + 'com.apple.kernel.bsd' = '1.0.0b1'; + 'com.apple.kernel.iokit' = '1.0.0b1'; + 'com.apple.kernel.libkern' = '1.0.0b1'; + 'com.apple.kernel.mach' = '1.0.0b1'; + 'com.apple.iokit.IOADBFamily' = '1.0.0b1'; + 'com.apple.iokit.IOCDStorageFamily' = '1.0.0b1'; + 'com.apple.iokit.IODVDStorageFamily' = '1.0.0b1'; + 'com.apple.iokit.IOGraphicsFamily' = '1.0.0b1'; + 'com.apple.iokit.IOHIDSystem' = '1.0.0b1'; + 'com.apple.iokit.IONDRVSupport' = '1.0.0b1'; + 'com.apple.iokit.IONetworkingFamily' = '1.0.0b1'; + 'com.apple.iokit.IOPCIFamily' = '1.0.0b1'; + 'com.apple.iokit.IOStorageFamily' = '1.0.0b1'; + 'com.apple.iokit.IOSystemManagementFamily' = '1.0.0b1'; +}"; + + +const char * gIOKernelConfigTables = +"( + { + 'IOClass' = IOPanicPlatform; + 'IOProviderClass' = IOPlatformExpertDevice; + 'IOProbeScore' = '-1'; + }, + { + 'IOClass' = IOHIDSystem; + 'IOProviderClass' = IOResources; + 'IOResourceMatch' = IOKit; + 'IOMatchCategory' = IOHID; + }, + { + 'IOClass' = IOBSDConsole; + 'IOProviderClass' = IOResources; + 'IOResourceMatch' = IOBSD; + 'IOMatchCategory' = IOBSDConsole; + }, + { + 'IOClass' = IODisplayWrangler; + 'IOProviderClass' = IOResources; + 'IOResourceMatch' = IOKit; + 'IOMatchCategory' = IOGraphics; + }, + { + 'IOClass' = IOApplePartitionScheme; + 'IOProviderClass' = IOMedia; + 'IOProbeScore' = 2000:32; + 'IOMatchCategory' = IOStorage; + 'IOPropertyMatch' = + { + 'Whole' = .true.; + }; + 'Content Mask' = 'Apple_partition_scheme'; + }, + { + 'IOClass' = IOApplePartitionScheme; + 'IOProviderClass' = IOMedia; + 'IOProbeScore' = 2000:32; + 'IOMatchCategory' = IOStorage; + 'IOPropertyMatch' = + { + 'Content Hint' = 'CD_ROM_Mode_1'; + }; + 'Content Mask' = 'Apple_partition_scheme'; + }, + { + 'IOClass' = IOApplePartitionScheme; + 'IOProviderClass' = IOMedia; + 'IOProbeScore' = 2000:32; + 'IOMatchCategory' = IOStorage; + 'IOPropertyMatch' = + { + 'Content Hint' = 'CD_ROM_Mode_2_Form_1'; + }; + 'Content Mask' = 'Apple_partition_scheme'; + }, + { + 'IOClass' = IONeXTPartitionScheme; + 'IOProviderClass' = IOMedia; + 'IOProbeScore' = 1000:32; + 'IOMatchCategory' = IOStorage; + 'IOPropertyMatch' = + { + 'Whole' = .true.; + }; + 'Content Mask' = 'NeXT_partition_scheme'; + 'Content Table' = + { + '4.4BSD' = 'Apple_UFS'; + '4.1BSD' = 'Unknown'; + '4.2BSD' = 'Unknown'; + '4.4LFS' = 'Unknown'; + }; + }, + { + 'IOClass' = IONeXTPartitionScheme; + 'IOProviderClass' = IOMedia; + 'IOProbeScore' = 1000:32; + 'IOMatchCategory' = IOStorage; + 'IOPropertyMatch' = + { + 'Content Hint' = 'CD_ROM_Mode_1'; + }; + 'Content Mask' = 'NeXT_partition_scheme'; + 'Content Table' = + { + '4.4BSD' = 'Apple_UFS'; + '4.1BSD' = 'Unknown'; + '4.2BSD' = 'Unknown'; + '4.4LFS' = 'Unknown'; + }; + }, + { + 'IOClass' = IONeXTPartitionScheme; + 'IOProviderClass' = IOMedia; + 'IOProbeScore' = 1000:32; + 'IOMatchCategory' = IOStorage; + 'IOPropertyMatch' = + { + 'Content Hint' = 'Apple_Rhapsody_UFS'; + }; + 'Content Mask' = 'NeXT_partition_scheme'; + 'Content Table' = + { + '4.4BSD' = 'Apple_UFS'; + '4.1BSD' = 'Unknown'; + '4.2BSD' = 'Unknown'; + '4.4LFS' = 'Unknown'; + }; + }, + { + 'IOClass' = IOFDiskPartitionScheme; + 'IOProviderClass' = IOMedia; + 'IOProbeScore' = 3000:32; + 'IOMatchCategory' = IOStorage; + 'IOPropertyMatch' = + { + 'Whole' = .true.; + }; + 'Content Mask' = 'FDisk_partition_scheme'; + 'Content Table' = + { + '0x01' = 'DOS_FAT_12'; + '0x04' = 'DOS_FAT_16_S'; + '0x05' = 'DOS_Extended'; + '0x06' = 'DOS_FAT_16'; + '0x07' = 'Windows_NTFS'; + '0x0A' = 'Boot_Manager'; + '0x0B' = 'DOS_FAT_32'; + '0x0C' = 'Windows_FAT_32'; + '0x0E' = 'Windows_FAT_16'; + '0x0F' = 'Windows_Extended'; + '0x11' = 'DOS_FAT_12_Hidden'; + '0x14' = 'DOS_FAT_16_S_Hidden'; + '0x16' = 'DOS_FAT_16_Hidden'; + '0x17' = 'Windows_NTFS_Hidden'; + '0x1B' = 'DOS_FAT_32_Hidden'; + '0x1C' = 'Windows_FAT_32_Hidden'; + '0x1E' = 'Windows_FAT_16_Hidden'; + '0x63' = 'UNIX'; + '0x82' = 'Linux_Swap'; + '0x83' = 'Linux_Ext2FS'; + '0x84' = 'Hibernation'; + '0x85' = 'Linux_Extended'; + '0x86' = 'Windows_FAT_16_FT'; + '0x87' = 'Windows_NTFS_FT'; + '0xA5' = 'FreeBSD'; + '0xA6' = 'OpenBSD'; + '0xA7' = 'NeXTSTEP'; + '0xA8' = 'Apple_UFS'; + '0xA9' = 'NetBSD'; + '0xAB' = 'Apple_Boot'; + '0xAF' = 'Apple_HFS'; + '0xB7' = 'BSDI'; + '0xB8' = 'BSDI_Swap'; + '0xC6' = 'Windows_FAT_16_FT_Corrupt'; + '0xC7' = 'Windows_NTFS_FT_Corrupt'; + '0xEB' = 'BeOS'; + '0xF2' = 'DOS_Secondary'; + '0xFD' = 'Linux_RAID'; + }; + }, + { + 'IOClass' = IOCDPartitionScheme; + 'IOProviderClass' = IOCDMedia; + 'IOMatchCategory' = IOStorage; + 'Content Mask' = 'CD_partition_scheme'; + 'Content Table' = + { + '0x01' = 'CD_DA'; + '0x02' = 'CD_ROM_Mode_1'; + '0x03' = 'CD_ROM_Mode_2'; + '0x04' = 'CD_ROM_Mode_2_Form_1'; + '0x05' = 'CD_ROM_Mode_2_Form_2'; + }; + }, + { + 'IOClass' = IOMediaBSDClient; + 'IOProviderClass' = IOResources; + 'IOMatchCategory' = IOMediaBSDClient; + 'IOResourceMatch' = IOBSD; + }, + { + 'IOClass' = AppleDDCDisplay; + 'IOProviderClass' = IODisplayConnect; + 'IOProbeScore' = 2000:32; + appleDDC = <00000082 00ff2140 0000008c 00043147 " + "00000096 00053140 00000098 0003314c " + "0000009a 0002314f 0000009c 00ff3159 " + "000000aa 000d494f 000000b4 0001fffc " + "000000b6 00004540 000000b8 000f454c " + "000000ba 000e454f 000000bc 00ff4559 " + "000000be 000b6140 000000c8 000a614a " + "000000cc 0009614f 000000d0 00ff6159 " + "000000d2 00ff614f 000000dc 0017ffc4 " + "000000fa 00ff814f 00000104 00ff8180 " + "00000106 0008818f 0000010c 00ff8199 " + "00000118 00ffa940 0000011a 00ffa945 " + "0000011c 00ffa94a 0000011e 00ffa94f " + "00000120 00ffa954 00000121 00ffa959 " + "00000128 00ffc140 0000012a 00ffc14f " + "0000012c 00ffc940 0000012e 00ffc94f " + "00000130 00ffd140 00000132 00ffd14f " + "000001fe 00ffd1c0 00000208 00ffd1cc>; + overrides = ( { ID = 0x06105203:32; + additions = <0000010c>; }, + { ID = 0x0610049c:32; + deletions = <000000b6>; }, + { ID = 0x0610059c:32; + deletions = <000000b6>; }, + { ID = 0x0610069c:32; + deletions = <000000b6>; }, + { ID = 0x0610079c:32; + deletions = <000000b6>; }, + { ID = 0x0610089c:32; + deletions = <000000b6>; }, + { ID = 0x06101092:32; + additions = <00000121>; }, + { ID = 0x0610029d:32; + additions = <0000009e>; } ); + }, + { + 'IOClass' = AppleG3SeriesDisplay; + 'IOProviderClass' = IODisplayConnect; + 'IOProbeScore' = 1500:32; + }, + { + 'IOClass' = AppleSenseDisplay; + 'IOProviderClass' = IODisplayConnect; + 'IOProbeScore' = 1000:32; + }, + { + 'IOClass' = AppleNoSenseDisplay; + 'IOProviderClass' = IODisplayConnect; + 'IOProbeScore' = 500:32; + }, + { + 'IOClass' = IOBlockStorageDriver; + 'IOProviderClass' = IOBlockStorageDevice; + 'IOPropertyMatch' = + { + 'device-type' = 'Generic'; + }; + }, + { + 'IOClass' = IOSCSIHDDrive; + 'IOProviderClass' = IOSCSIDevice; + }, + { + 'IOClass' = IOCDBlockStorageDriver; + 'IOProviderClass' = IOCDBlockStorageDevice; + 'IOPropertyMatch' = + { + 'device-type' = 'CDROM'; + }; + }, + { + 'IOClass' = IOSCSICDDrive; + 'IOProviderClass' = IOSCSIDevice; + }, + { + 'IOClass' = IODVDBlockStorageDriver; + 'IOProviderClass' = IODVDBlockStorageDevice; + 'IOPropertyMatch' = + { + 'device-type' = 'DVD'; + }; + }, + { + 'IOClass' = IOSCSIDVDDrive; + 'IOProviderClass' = IOSCSIDevice; + }, +" +#if defined(__i386__) +" + { + 'IOClass' = IOATAHDDrive; + 'IOProviderClass' = IOATADevice; + }, + { + 'IOClass' = IOATAPIHDDrive; + 'IOProviderClass' = IOATADevice; + }, + { + 'IOClass' = IOATAPICDDrive; + 'IOProviderClass' = IOATADevice; + }, + { + 'IOClass' = IOATAPIDVDDrive; + 'IOProviderClass' = IOATADevice; + }, +" +#endif +" + { + 'IOClass' = IONetworkStack; + 'IOProviderClass' = IOResources; + 'IOResourceMatch' = IOBSD; + 'IOMatchCategory' = IONetworkStack; + } +" +#ifdef PPC +" , + { + 'IOClass' = AppleCPU; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = 'cpu'; + 'IOProbeScore' = 100:32; + }, +" +#if 0 +" + { + 'IOClass' = PowerSurgePE; + 'IOProviderClass' = IOPlatformExpertDevice; + 'IONameMatch' = ('AAPL,7300', 'AAPL,7500', 'AAPL,8500', 'AAPL,9500'); + 'IOProbeScore' = 10000:32; + }, +" +#endif +" +" +#if 0 +" + { + 'IOClass' = PowerStarPE; + 'IOProviderClass' = IOPlatformExpertDevice; + 'IONameMatch' = ('AAPL,3400/2400', 'AAPL,3500'); + 'IOProbeScore' = 10000:32; + }, +" +#endif +" + { + 'IOClass' = GossamerPE; + 'IOProviderClass' = IOPlatformExpertDevice; + 'IONameMatch' = ('AAPL,Gossamer', 'AAPL,PowerMac G3', 'AAPL,PowerBook1998', 'iMac,1', 'PowerMac1,1', 'PowerMac1,2', 'PowerBook1,1'); + 'IOProbeScore' = 10000:32; + }, + { + 'IOClass' = GossamerCPU; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = 'cpu'; + 'IOProbeScore' = 1000:32; + }, +" +#if 0 +" + { + 'IOClass' = PowerExpressPE; + 'IOProviderClass' = IOPlatformExpertDevice; + 'IONameMatch' = 'AAPL,9700'; + 'IOProbeScore' = 10000:32; + 'senses' = <00000000 00000000 00000000 00000000 " + "00000000 00000000 00000000 00000000 " + "00000000 00000000 00000000 00000000 " + "00000000 00000000 00000000 00000000 " + "00000000 00000000 00000000 00000000 " + "00000000 00000000 00000000 00000000 " + "00000000 00000000 00000001 00000001 " + "00000001 00000001 00000001 00000001 " + "00000001 00000001 00000001 00000001 " + "00000001 00000001>; + }, +" +#endif +" + { + 'IOClass' = AppleGracklePCI; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = ('grackle', 'MOT,PPC106'); + }, + { + 'IOClass' = AppleMacRiscPCI; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = ('bandit', 'uni-north'); + }, + { + 'IOClass' = AppleMacRiscAGP; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = 'uni-north'; + 'IOProbeScore' = 1000:32; + 'IOAGPFlags' = 1:32; + }, + { + 'IOClass' = AppleMacRiscVCI; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = chaos; + }, + { + 'IOClass' = IOPCI2PCIBridge; + 'IOProviderClass' = IOPCIDevice; + 'IONameMatch' = 'pci-bridge'; + }, + { + 'IOClass' = IOPCI2PCIBridge; + 'IOProviderClass' = IOPCIDevice; + 'IOPCIMatch' = '0x00261011'; + }, +" +#if 0 +" + { + 'IOClass' = GrandCentral; + 'IOProviderClass' = IOPCIDevice; + 'IONameMatch' = gc; + 'IOProbeScore' = 2000:32; + }, +" +#endif +" + { + 'IOClass' = OHare; + 'IOProviderClass' = IOPCIDevice; + 'IONameMatch' = ('ohare', 'pci106b,7'); + }, + { + 'IOClass' = AppleNMI; + 'IOProviderClass' = AppleMacIODevice; + 'IONameMatch' = 'programmer-switch'; + }, + { + 'IOClass' = AppleCuda; + 'IOProviderClass' = AppleVIADevice; + 'IONameMatch' = cuda; + }," +#if 0 +" { + 'IOClass' = ApplePMU; + 'IOProviderClass' = AppleVIADevice; + 'IONameMatch' = pmu; + }," +#endif + "{ + 'IOClass' = IOPMUADBController; + 'IOProviderClass' = AppleMacIODevice; + 'IONameMatch' = adb; + }, + { + 'IOClass' = AppleNVRAM; + 'IOProviderClass' = AppleMacIODevice; + 'IONameMatch' = nvram; + }, + { + 'IOClass' = IOADBBus; + 'IOProviderClass' = IOADBController; + }, + { + 'IOClass' = AppleADBKeyboard; + 'IOProviderClass' = IOADBDevice; + 'ADB Match' = '2'; + }, + { + 'IOClass' = AppleADBButtons; + 'IOProviderClass' = IOADBDevice; + 'ADB Match' = '7'; + }, + { + 'IOClass' = AppleADBMouseType1; + 'IOProviderClass' = IOADBDevice; + 'ADB Match' = '3'; + 'IOProbeScore' = 5000:32; + }, + { + 'IOClass' = AppleADBMouseType2; + 'IOProviderClass' = IOADBDevice; + 'ADB Match' = '3'; + 'IOProbeScore' = 10000:32; + }, + { + 'IOClass' = AppleADBMouseType4; + 'IOProviderClass' = IOADBDevice; + 'ADB Match' = '3-01'; + 'IOProbeScore' = 20000:32; + 'accltpad' = <" + "0000b000" + "74706164000700000000000100010000" + "00010000000020000005000123c60001" + "00000002fe3c0003800000055719000b" + "000000082ebf001a0000000a3ff1002e" + "0000000050000005000123c600010000" + "0002de8a000410000005682c000fe000" + "00081ebf00226000000a1f680037e000" + "000080000007000123c6000100000001" + "c378000280000002ac150004a0000004" + "5402000dc00000061285001bb0000007" + "e68b002d1000000a44eb004a90000000" + "b000000900012429000100000001b37c" + "0002800000025e5f000580000003bf2c" + "000f00000004bc350017a00000061e38" + "0027b00000075d4500385000000899a2" + "004bb000000a91050066b0000000e000" + "000a00011855000130000001b2280002" + "f000000253690006a00000036f4a0010" + "d00000046aab001f100000054aab002d" + "500000062555003f400000071aab0051" + "c00000089aab00663000000a8aab007d" + "700000010000000b0001185500013000" + "0001b228000310000002536900071000" + "00032f4a001180000003acfb001c8800" + "00043aab0028e0000004caab00384000" + "000555550048f00000063aab005c9000" + "0007aaab00731000000a3aab008b7000" + ">; + }, + { + 'IOClass' = IONDRVFramebuffer; + 'IOProviderClass' = IOPCIDevice; + 'IONameMatch' = display; + 'IOProbeScore' = 20000:32; + 'IOMatchCategory' = IOFramebuffer; + }, + { + 'IOClass' = IONDRVFramebuffer; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = display; + 'IOProbeScore' = 20000:32; + 'IOMatchCategory' = IOFramebuffer; + }, + { + 'IOClass' = IOBootFramebuffer; + 'IOProviderClass' = IOPCIDevice; + 'IONameMatch' = display; + 'IOMatchCategory' = IOFramebuffer; + }, + { + 'IOClass' = AppleADBDisplay; + 'IOProbeScore' = 1000:32; + 'IOProviderClass' = IOADBDevice; + 'ADB Match' = '*-c0'; + modes850 = <000000dc 0000008c 0000009a 0000009e " + "000000aa 000000d2 000000d0 000000fa " + "00000106 0000010c 00000118 0000011a " + "0000011c 0000011e>; + modes750 = <000000dc 0000008c 000000aa 000000d2 " + "000000fa 00000106 00000118>; + modesStudio = <000000d2 0000008c 000000aa>; + adb2Modes = modes750; + adb3Modes = modes850; + adb4Modes = modes850; + adb5Modes = modes750; + adb6Modes = modesStudio; + }, + { + 'IOClass' = BMacEnet; + 'IOProviderClass' = AppleMacIODevice; + 'IONameMatch' = ('bmac', 'bmac+'); + }, +" +#if 0 +" + { + 'IOClass' = Sym8xxSCSIController; + 'IOProviderClass' = IOPCIDevice; + 'IONameMatch' = ('apple53C8xx', 'Apple53C875Card', 'ATTO,ExpressPCIProLVD', 'ATTO,ExpressPCIProUL2D', 'ATTO,ExpressPCIProUL3D'); + }, +" +#endif +" +" +#if 0 +" + { + 'IOClass' = MaceEnet; + 'IOProviderClass' = AppleMacIODevice; + 'IONameMatch' = mace; + }, + { + 'IOClass' = Intel82557; + 'IOProviderClass' = IOPCIDevice; + 'IOPCIMatch' = '0x12298086'; + 'IODefaultMedium' = '00000020'; + 'Flow Control' = .true.; + 'Verbose' = .false.; + }, +" +#endif /* 0 */ +" + { + 'IOClass' = IOKDP; + 'IOProviderClass' = IOKernelDebugger; + 'IOMatchCategory' = IOKDP; + 'IOEnableKDP' = .true.; + 'IODriverMatch' = + { + 'IOClass' = ('BMacEnet', 'UniNEnet', 'MaceEnet'); + }; + 'IODriverNubMatch' = + { + 'built-in' = <>; + }; + } +" +#endif /* PPC */ +#ifdef i386 +" , + { + 'IOClass' = AppleI386PlatformExpert; + 'IOProviderClass' = IOPlatformExpertDevice; + 'top-level' = " + /* set of dicts to make into nubs */ + "[ + { IOName = cpu; }, + { IOName = intel-pic; }, + { IOName = intel-clock; }, + { IOName = ps2controller; }, + { IOName = pci; }, + { IOName = display; 'AAPL,boot-display' = Yes; } + ]; + }, + { + 'IOClass' = AppleI386CPU; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = cpu; + 'IOProbeScore' = 100:32; + }, + { + 'IOClass' = AppleIntelClassicPIC; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = intel-pic; + }, + { + 'IOClass' = AppleIntelClock; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = intel-clock; + }, + { + 'IOClass' = AppleI386PCI; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = pci; + }, + { + 'IOClass' = ApplePS2Controller; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = ps2controller; + }, + { + 'IOClass' = ApplePS2Keyboard; + 'IOProviderClass' = ApplePS2KeyboardDevice; + }, + { + 'IOClass' = ApplePS2Mouse; + 'IOProviderClass' = ApplePS2MouseDevice; + }, + { + 'IOClass' = IOBootFramebuffer; + 'IOProviderClass' = IOPlatformDevice; + 'IONameMatch' = display; + }, + { + 'IOClass' = AppleATAPIIX; + 'IOProviderClass' = IOPCIDevice; + 'IOPCIMatch' = '0x12308086 0x70108086 0x71118086 0x24118086 0x24218086 0x244a8086 0x244b8086'; + 'IOMatchCategory' = AppleATAPIIXChannel0; + }, + { + 'IOClass' = AppleATAPIIX; + 'IOProviderClass' = IOPCIDevice; + 'IOPCIMatch' = '0x12308086 0x70108086 0x71118086 0x24118086 0x24218086 0x244a8086 0x244b8086'; + 'IOMatchCategory' = AppleATAPIIXChannel1; + }, + { + 'IOClass' = IOPCI2PCIBridge; + 'IOProviderClass' = IOPCIDevice; + 'IOPCIClassMatch' = '0x06040000&0xffff0000'; + }, + { + 'IOClass' = Intel82557; + 'IOProviderClass' = IOPCIDevice; + 'IOPCIMatch' = '0x12298086'; + 'IODefaultMedium' = '00000020'; + 'Flow Control' = .true.; + 'Verbose' = .false.; + }, + { + 'IOClass' = IOKDP; + 'IOProviderClass' = IOKernelDebugger; + 'IOMatchCategory' = IOKDP; + 'IOEnableKDP' = .true.; + 'IODriverMatch' = + { + 'IOClass' = ('Intel82557', 'DEC21x4'); + }; + 'IODriverNubMatch' = + { + }; + } +" +#endif /* i386 */ +")"; diff --git a/iokit/Makefile b/iokit/Makefile new file mode 100644 index 000000000..1657e4a11 --- /dev/null +++ b/iokit/Makefile @@ -0,0 +1,30 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = include IOKit +INSTINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS} +INSTINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS} + + +EXPINC_SUBDIRS = IOKit +EXPINC_SUBDIRS_PPC = ${EXPINC_SUBDIRS} +EXPINC_SUBDIRS_I386 = ${EXPINC_SUBDIRS} + + +SETUP_SUBDIRS = conf + +COMP_SUBDIRS = conf + +INST_SUBDIRS = User + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/Tests/TestCollections.cpp b/iokit/Tests/TestCollections.cpp new file mode 100644 index 000000000..ea7f556f0 --- /dev/null +++ b/iokit/Tests/TestCollections.cpp @@ -0,0 +1,941 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#if DEBUG +#include "Tests.h" + +#include +#include +#include +#include +#include +#include + +void testArray() +{ + bool res = true; + void *spaceCheck, *spaceCheck2 , *spaceCheck3; + int i, j, count, count2; + OSObject *cache[numStrCache], *str, *sym; + OSArray *array1, *array2; + + // Do first test without memory leak tests to initialise the metaclass + array1 = OSArray::withCapacity(1); + TEST_ASSERT('A', "0a", array1); + if (array1) + array1->release(); + + // Grow the symbol pool to maximum + for (i = 0; i < numStrCache; i++) + cache[i] = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + for (i = 0; i < numStrCache; i++) + cache[i]->release(); + + // Create and destroy an array + spaceCheck = checkPointSpace(); + array1 = OSArray::withCapacity(1); + TEST_ASSERT('A', "1a", array1); + if (array1) { + TEST_ASSERT('A', "1b", !array1->getCount()); + TEST_ASSERT('A', "1c", 1 == array1->getCapacity()); + TEST_ASSERT('A', "1d", 1 == array1->getCapacityIncrement()); + TEST_ASSERT('A', "1e", 4 == array1->setCapacityIncrement(4)); + TEST_ASSERT('A', "1f", 4 == array1->getCapacityIncrement()); + TEST_ASSERT('A', "1g", 8 == array1->ensureCapacity(5)); + + spaceCheck2 = checkPointSpace(); + cache[0] = IOString::withCStringNoCopy(strCache[0]); + + spaceCheck3 = checkPointSpace(); + TEST_ASSERT('A', "1h", array1->setObject(cache[0])); + TEST_ASSERT('A', "1i", cache[0] == array1->getObject(0)); + cache[0]->release(); + res = res && checkSpace("(A)1j", spaceCheck3, 0); + + TEST_ASSERT('A', "1k", 1 == array1->getCount()); + array1->flushCollection(); + TEST_ASSERT('A', "1l", !array1->getCount()); + res = res && checkSpace("(A)1m", spaceCheck2, 0); + + array1->release(); + } + res = res && checkSpace("(A)1", spaceCheck, 0); + + // Check the creation of a sizable OSArray from an array of IOObjects + // Also check indexing into the array. + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) + cache[i] = OSString::withCStringNoCopy(strCache[i]); + array1 = OSArray::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('A', "2a", array1); + for (i = 0; i < numStrCache; i++) + cache[i]->release(); + if (array1) { + TEST_ASSERT('A', "2b", numStrCache == (int) array1->getCount()); + TEST_ASSERT('A', "2c", numStrCache == (int) array1->getCapacity()); + TEST_ASSERT('A', "2d", + numStrCache == (int) array1->getCapacityIncrement()); + + for (i = 0; (str = array1->getObject(i)); i++) { + if (str != cache[i]) { + verPrintf(("testArray(A) test 2e%d failed\n", i)); + res = false; + } + } + TEST_ASSERT('A', "2f", numStrCache == i); + array1->release(); + } + res = res && checkSpace("(A)2", spaceCheck, 0); + + // Test array creation from another array by both the setObject method + // and the withArray factory. And test __takeObject code first + // with tail removal then with head removal + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) + cache[i] = OSString::withCStringNoCopy(strCache[i]); + array1 = OSArray::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('A', "3a", array1); + for (i = 0; i < numStrCache; i++) + cache[i]->release(); + array2 = 0; + if (array1) { + array2 = OSArray::withCapacity(1); + TEST_ASSERT('A', "3b", array2); + TEST_ASSERT('A', "3c", !array2->getCount()); + TEST_ASSERT('A', "3d", array2->setObject(array1)); + TEST_ASSERT('A', "3e", array1->getCount() == array2->getCount()); + } + if (array2) { + count = 0; + TEST_ASSERT('A', "3f", numStrCache == (int) array2->getCount()); + for (i = array2->getCount(); (str = array2->__takeObject(--i)); ) { + if (str != cache[i]) { + verPrintf(("testArray(A) test 3g%d failed\n", i)); + res = false; + } + count += ((int) array2->getCount() == i); + str->release(); + } + TEST_ASSERT('A', "3h", count == numStrCache); + TEST_ASSERT('A', "3i", -1 == i); + TEST_ASSERT('A', "3j", !array2->getCount()); + + spaceCheck2 = checkPointSpace(); + array2->flushCollection(); + res = res && checkSpace("(A)3k", spaceCheck2, 0); + + array2->release(); + array2 = 0; + } + if (array1) { + array2 = OSArray::withArray(array1, numStrCache - 1); + TEST_ASSERT('A', "3l", !array2); + array2 = OSArray::withArray(array1, array1->getCount()); + TEST_ASSERT('A', "3m", array2); + array1->release(); + } + if (array2) { + count = 0; + TEST_ASSERT('A', "3o", numStrCache == (int) array2->getCount()); + for (i = 0; (str = array2->__takeObject(0)); i++) { + count += (str == cache[i]); + str->release(); + } + TEST_ASSERT('A', "3p", count == numStrCache); + TEST_ASSERT('A', "3q", !array2->getCount()); + array2->release(); + array2 = 0; + } + res = res && checkSpace("(A)3", spaceCheck, 0); + + // Test object replacement from one array to another + spaceCheck = checkPointSpace(); + array1 = OSArray::withCapacity(numStrCache); + TEST_ASSERT('A', "4a", array1); + if (array1) { + count = count2 = 0; + for (i = 0; i < numStrCache; i++) { + str = OSString::withCStringNoCopy(strCache[i]); + count += array1->setObject(str); + count2 += (str == array1->lastObject()); + str->release(); + } + TEST_ASSERT('A', "4b", numStrCache == (int) array1->getCount()); + TEST_ASSERT('A', "4c", count == numStrCache); + TEST_ASSERT('A', "4d", count2 == numStrCache); + } + array2 = OSArray::withCapacity(1); + TEST_ASSERT('A', "4e", array2); + if (array2) { + count = count2 = 0; + str = (OSObject *) OSSymbol::withCStringNoCopy(strCache[0]); + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += array2->setObject(sym, 0); + count2 += (str == array2->lastObject()); + sym->release(); + } + str->release(); + TEST_ASSERT('A', "4f", numStrCache == (int) array2->getCount()); + TEST_ASSERT('A', "4g", count == numStrCache); + TEST_ASSERT('A', "4h", count2 == numStrCache); + } + if (array1 && array2) { + + count = count2 = 0; + for (i = array1->getCount() - 1; (sym = array2->__takeObject(0)); i--) { + str = array1->replaceObject(sym, i); + count += (str != 0); + count2 += (sym != str); + if (str) + str->release(); + if (sym) + sym->release(); + } + TEST_ASSERT('A', "4k", numStrCache == (int) array1->getCount()); + TEST_ASSERT('A', "4l", count == numStrCache); + TEST_ASSERT('A', "4m", count2 == numStrCache); + array1->release(); + array2->release(); + } + else { + if (array1) array1->release(); + if (array2) array2->release(); + } + res = res && checkSpace("(A)4", spaceCheck, 0); + + // Test array duplicate removal + spaceCheck = checkPointSpace(); + array1 = OSArray::withCapacity(numStrCache); + TEST_ASSERT('A', "5a", array1); + if (array1) { + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += array1->setObject(sym); + sym->release(); + } + TEST_ASSERT('A', "5b", numStrCache == (int) array1->getCount()); + + // remove duplicates + for (i = 0; (sym = array1->getObject(i)); ) + if (sym->getRetainCount() == 1) + i++; + else { + //sym = array1->__takeObject(i); + //sym->release(); + array1->removeObject(i); + } + TEST_ASSERT('A', "5c", numStrCache != (int) array1->getCount()); + + // check to see that all symbols are really there + for (count = 0, i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + for (count2 = false, j = 0; (str = array1->getObject(j)); j++) + if (str == sym) { + count2 = true; + break; + } + count += count2; + sym->release(); + } + TEST_ASSERT('A', "5c", count == numStrCache); + array1->release(); + } + res = res && checkSpace("(S)5", spaceCheck, 0); + + if (res) + verPrintf(("testArray: All OSArray Tests passed\n")); + else + logPrintf(("testArray: Some OSArray Tests failed\n")); +} + +void testSet() +{ + bool res = true; + void *spaceCheck, *spaceCheck2 , *spaceCheck3; + int i, count, count2; + OSObject *cache[numStrCache], *str, *sym; + OSSet *set1, *set2; + OSArray *array; + + // Do first test without memory leak tests to initialise the metaclass + set1 = OSSet::withCapacity(1); + TEST_ASSERT('S', "0a", set1); + if (set1) + set1->release(); + + // Grow the symbol pool to maximum + for (i = 0; i < numStrCache; i++) + cache[i] = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + for (i = 0; i < numStrCache; i++) + cache[i]->release(); + + // Create and destroy an set + spaceCheck = checkPointSpace(); + set1 = OSSet::withCapacity(1); + TEST_ASSERT('S', "1a", set1); + if (set1) { + TEST_ASSERT('S', "1b", !set1->getCount()); + TEST_ASSERT('S', "1c", 1 == set1->getCapacity()); + TEST_ASSERT('S', "1d", 1 == set1->getCapacityIncrement()); + TEST_ASSERT('S', "1e", 4 == set1->setCapacityIncrement(4)); + TEST_ASSERT('S', "1f", 4 == set1->getCapacityIncrement()); + TEST_ASSERT('S', "1g", 8 == set1->ensureCapacity(5)); + + spaceCheck2 = checkPointSpace(); + cache[0] = IOString::withCStringNoCopy(strCache[0]); + + spaceCheck3 = checkPointSpace(); + TEST_ASSERT('S', "1h", set1->setObject(cache[0])); + TEST_ASSERT('S', "1i", set1->containsObject(cache[0])); + TEST_ASSERT('S', "1j", cache[0] == set1->getAnyObject()); + cache[0]->release(); + res = res && checkSpace("(S)1k", spaceCheck3, 0); + + TEST_ASSERT('S', "1l", 1 == set1->getCount()); + set1->flushCollection(); + TEST_ASSERT('S', "1m", !set1->getCount()); + res = res && checkSpace("(S)1n", spaceCheck2, 0); + + set1->release(); + } + res = res && checkSpace("(S)1", spaceCheck, 0); + + // Check the creation of a sizable OSSet from an set of IOObjects + // Also check member test of set. + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) + cache[i] = OSString::withCStringNoCopy(strCache[i]); + set1 = OSSet::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('S', "2a", set1); + for (i = 0; i < numStrCache; i++) + cache[i]->release(); + if (set1) { + TEST_ASSERT('S', "2b", numStrCache == (int) set1->getCount()); + TEST_ASSERT('S', "2c", numStrCache == (int) set1->getCapacity()); + TEST_ASSERT('S', "2d", + numStrCache == (int) set1->getCapacityIncrement()); + + count = 0; + for (i = set1->getCount(); --i >= 0; ) + count += set1->member(cache[i]); + + TEST_ASSERT('S', "2e", numStrCache == count); + set1->release(); + } + res = res && checkSpace("(S)2", spaceCheck, 0); + + // Test set creation from another set by both the setObject method + // and the withArray factory. And test __takeObject code first + // with tail removal then with head removal + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) + cache[i] = OSString::withCStringNoCopy(strCache[i]); + set1 = OSSet::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('S', "3a", set1); + for (i = 0; i < numStrCache; i++) + cache[i]->release(); + set2 = 0; + if (set1) { + set2 = OSSet::withCapacity(set1->getCount()); + TEST_ASSERT('S', "3b", set2); + TEST_ASSERT('S', "3c", !set2->getCount()); + TEST_ASSERT('S', "3d", set2->setObject(set1)); + TEST_ASSERT('S', "3e", set1->getCount() == set2->getCount()); + } + if (set2) { + TEST_ASSERT('S', "3f", numStrCache == (int) set2->getCount()); + count = count2 = 0; + while ( (str = set2->getAnyObject()) ) { + count += set2->__takeObject(str); + count2 += set1->member(str); + str->release(); + } + TEST_ASSERT('S', "3g", !set2->getCount()); + TEST_ASSERT('S', "3h", numStrCache == count); + TEST_ASSERT('S', "3i", numStrCache == count2); + + spaceCheck2 = checkPointSpace(); + set2->flushCollection(); + res = res && checkSpace("(S)3j", spaceCheck2, 0); + + set2->release(); + set2 = 0; + } + if (set1) { + set2 = OSSet::withSet(set1, numStrCache - 1); + TEST_ASSERT('S', "3k", !set2); + set2 = OSSet::withSet(set1, set1->getCount()); + TEST_ASSERT('S', "3l", set2); + set1->release(); + } + if (set2) { + TEST_ASSERT('S', "3m", numStrCache == (int) set2->getCount()); + i = count = count2 = 0; + while ( (str = set2->getAnyObject()) ) { + count += set2->__takeObject(str); + count2 += (cache[i++] == str); + str->release(); + } + TEST_ASSERT('S', "3n", !set2->getCount()); + TEST_ASSERT('S', "3o", numStrCache == count); + TEST_ASSERT('S', "3p", numStrCache == count2); + + set2->release(); + set2 = 0; + } + res = res && checkSpace("(S)3", spaceCheck, 0); + + // Test duplicate removal + spaceCheck = checkPointSpace(); + set2 = 0; + set1 = OSSet::withCapacity(numStrCache); + TEST_ASSERT('S', "4a", set1); + if (set1) { + count = 0; + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += set1->setObject(sym); + sym->release(); + } + TEST_ASSERT('S', "4b", numStrCache != (int) set1->getCount()); + TEST_ASSERT('S', "4c", count == (int) set1->getCount()); + + count = count2 = 0; + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += set1->member(sym); + count2 += sym->getRetainCount(); + sym->release(); + } + TEST_ASSERT('S', "4d", count == numStrCache); + TEST_ASSERT('S', "4e", count2 == numStrCache * 2); + + set2 = OSSet::withSet(set1, 2 * set1->getCount()); + } + TEST_ASSERT('S', "4f", set2); + if (set2) { + set2->setObject(set1); + TEST_ASSERT('S', "4g", set1->getCount() == set2->getCount()); + set1->release(); + set2->release(); + } + res = res && checkSpace("(S)4", spaceCheck, 0); + + // Test array duplicate removal + spaceCheck = checkPointSpace(); + array = OSArray::withCapacity(numStrCache); + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += array->setObject(sym); + sym->release(); + } + set1 = OSSet::withArray(array, numStrCache); + TEST_ASSERT('S', "5a", set1); + if (set1) { + TEST_ASSERT('S', "5b", array->getCount() != set1->getCount()); + array->release(); + + count = count2 = set1->getCount(); + while ( (sym = set1->getAnyObject()) ) { + count -= set1->__takeObject(sym); + count2 -= sym->getRetainCount(); + sym->release(); + } + TEST_ASSERT('S', "5c", !count); + TEST_ASSERT('S', "5d", !count2); + set1->release(); + } + res = res && checkSpace("(S)5", spaceCheck, 0); + + if (res) + verPrintf(("testSet: All OSSet Tests passed\n")); + else + logPrintf(("testSet: Some OSSet Tests failed\n")); +} + +void testDictionary() +{ + bool res = true; + void *spaceCheck, *spaceCheck2, *spaceCheck3; + OSObject *cache[numStrCache]; + OSString *str; + const OSSymbol *symCache[numStrCache], *sym; + OSDictionary *dict1, *dict2; + int i, numSymbols, count1, count2; + + // Do first test without memory leak tests to initialise the metaclass + dict1 = OSDictionary::withCapacity(1); + TEST_ASSERT('D', "0a", dict1); + if (dict1) + dict1->release(); + + // Grow the symbol pool to maximum + for (i = 0; i < numStrCache; i++) + symCache[i] = OSSymbol::withCStringNoCopy(strCache[i]); + for (i = 0; i < numStrCache; i++) + symCache[i]->release(); + + // Create and destroy a dictionary + spaceCheck = checkPointSpace(); + dict1 = OSDictionary::withCapacity(1); + TEST_ASSERT('D', "1a", dict1); + if (dict1) { + TEST_ASSERT('D', "1b", !dict1->getCount()); + TEST_ASSERT('D', "1c", 1 == dict1->getCapacity()); + TEST_ASSERT('D', "1d", 1 == dict1->getCapacityIncrement()); + TEST_ASSERT('D', "1e", 4 == dict1->setCapacityIncrement(4)); + TEST_ASSERT('D', "1f", 4 == dict1->getCapacityIncrement()); + TEST_ASSERT('D', "1g", 8 == dict1->ensureCapacity(5)); + + spaceCheck2 = checkPointSpace(); + sym = OSSymbol::withCStringNoCopy(strCache[0]); + + spaceCheck3 = checkPointSpace(); + TEST_ASSERT('D', "1h", dict1->setObject((OSObject *) sym, sym)); + TEST_ASSERT('D', "1i", (OSObject *) sym == dict1->getObject(sym)); + sym->release(); + TEST_ASSERT('D', "1i", 2 == sym->getRetainCount()); + res = res && checkSpace("(D)1j", spaceCheck3, 0); + + TEST_ASSERT('D', "1k", 1 == dict1->getCount()); + dict1->flushCollection(); + TEST_ASSERT('D', "1l", !dict1->getCount()); + res = res && checkSpace("(D)1m", spaceCheck2, 0); + + dict1->release(); + } + res = res && checkSpace("(D)1", spaceCheck, 0); + + // Check the creation of a sizable OSDictionary from an array of IOObjects + // Also check indexing into the array. + spaceCheck = checkPointSpace(); + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) + symCache[numSymbols++] = sym; + else + sym->release(); + } + dict1 = OSDictionary::withObjects( + (OSObject **) symCache, symCache, numSymbols, numSymbols); + TEST_ASSERT('D', "2a", dict1); + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) + count1 += (symCache[i]->getRetainCount() == 3); + TEST_ASSERT('D', "2b", count1 == numSymbols); + if (dict1) { + TEST_ASSERT('D', "2c", numSymbols == (int) dict1->getCount()); + TEST_ASSERT('D', "2d", numSymbols == (int) dict1->getCapacity()); + TEST_ASSERT('D', "2e", + numSymbols == (int) dict1->getCapacityIncrement()); + + for (i = dict1->getCount(); --i >= 0; ) { + str = (OSString *) dict1->getObject(symCache[i]); + if (str != (OSString *) symCache[i]) { + verPrintf(("testDictionary(D) test 2f%d failed\n", i)); + res = false; + } + } + dict1->release(); + } + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (symCache[i]->getRetainCount() == 1); + symCache[i]->release(); + } + TEST_ASSERT('D', "2g", count1 == numSymbols); + res = res && checkSpace("(D)2", spaceCheck, 0); + + // Check the creation of a sizable Dictionary from an array of IOStrings + // Also check searching dictionary use OSString for a key. + spaceCheck = checkPointSpace(); + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) { + cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); + symCache[numSymbols] = sym; + numSymbols++; + } + else + sym->release(); + } + dict1 = OSDictionary::withObjects((OSObject **) symCache, + (OSString **) cache, + numSymbols, numSymbols); + TEST_ASSERT('D', "3a", dict1); + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (symCache[i]->getRetainCount() == 3); + count2 += (cache[i]->getRetainCount() == 1); + } + TEST_ASSERT('D', "3b", count1 == numSymbols); + TEST_ASSERT('D', "3c", count2 == numSymbols); + if (dict1) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + str = (OSString *) cache[i]; + count1 += (symCache[i] == (const OSSymbol *) dict1->getObject(str)); + count2 += (symCache[i]->getRetainCount() == 3); + } + TEST_ASSERT('D', "3d", count1 == numSymbols); + TEST_ASSERT('D', "3e", count2 == numSymbols); + + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + const char *cStr = ((OSString *) cache[i])->getCStringNoCopy(); + + count1 += (symCache[i] == (const OSSymbol *) dict1->getObject(cStr)); + count2 += (symCache[i]->getRetainCount() == 3); + } + TEST_ASSERT('D', "3f", count1 == numSymbols); + TEST_ASSERT('D', "3g", count2 == numSymbols); + + dict1->release(); + } + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (symCache[i]->getRetainCount() == 1); + count2 += (cache[i]->getRetainCount() == 1); + symCache[i]->release(); + cache[i]->release(); + } + TEST_ASSERT('D', "3h", count1 == numSymbols); + res = res && checkSpace("(D)3", spaceCheck, 0); + + // Check the creation of a small dictionary then grow it one item at a time + // Create a new dictionary from the old dictionary. + // Finally remove each item permanently. + spaceCheck = checkPointSpace(); + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) { + cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); + symCache[numSymbols] = sym; + numSymbols++; + } + else + sym->release(); + } + dict2 = 0; + dict1 = OSDictionary::withCapacity(1); + TEST_ASSERT('D', "4a", dict1); + if (dict1) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + sym = symCache[i]; + count1 += ((OSObject *) sym == dict1->setObject((OSObject *) sym, + sym->getCStringNoCopy())); + count2 += (sym->getRetainCount() == 3); + } + TEST_ASSERT('D', "4b", numSymbols == (int) dict1->getCount()); + TEST_ASSERT('D', "4c", numSymbols == count1); + TEST_ASSERT('D', "4d", numSymbols == count2); + + dict2 = OSDictionary::withDictionary(dict1, numSymbols-1); + TEST_ASSERT('D', "4b", !dict2); + dict2 = OSDictionary::withDictionary(dict1, numSymbols); + } + TEST_ASSERT('D', "4e", dict2); + if (dict2) { + dict1->release(); dict1 = 0; + + TEST_ASSERT('D', "4f", numSymbols == (int) dict2->getCount()); + + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + OSObject *replacedObject; + + sym = symCache[i]; + str = (OSString *) cache[i]; + replacedObject = dict2->setObject(str, str); + count1 += ((OSString *) sym == replacedObject); + replacedObject->release(); + count2 += (sym->getRetainCount() == 2); + str->release(); + } + TEST_ASSERT('D', "4g", numSymbols == count1); + TEST_ASSERT('D', "4h", numSymbols == count2); + + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + sym = symCache[i]; + str = (OSString *) cache[i]; + count1 += (str == dict2->__takeObject(sym)); + str->release(); + count2 += (sym->getRetainCount() == 1); + sym->release(); + } + TEST_ASSERT('D', "4i", numSymbols == count1); + TEST_ASSERT('D', "4j", numSymbols == count2); + TEST_ASSERT('D', "4k", !dict2->getCount()); + dict2->release(); dict2 = 0; + } + else if (dict1) + dict1->release(); + res = res && checkSpace("(D)4", spaceCheck, 0); + + if (res) + verPrintf(("testDictionary: All OSDictionary Tests passed\n")); + else + logPrintf(("testDictionary: Some OSDictionary Tests failed\n")); +} + +void testIterator() +{ + bool res = true; + void *spaceCheck; + OSObject *cache[numStrCache]; + OSString *str = 0; + const OSSymbol *symCache[numStrCache], *sym; + OSDictionary *dict; + OSSet *set; + OSArray *array, *bigReturn; + OSCollectionIterator *iter1, *iter2; + int i, numSymbols, count1, count2, count3; + + // Setup symbol and string pools + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) { + cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); + symCache[numSymbols] = sym; + numSymbols++; + } + else + sym->release(); + } + + // Test the array iterator + spaceCheck = checkPointSpace(); + iter1 = iter2 = 0; + array = OSArray::withCapacity(numSymbols); + TEST_ASSERT('I', "1a", array); + if (array) { + count1 = count2 = 0; + for (i = numSymbols; --i >= 0; ) + count1 += array->setObject(cache[i], 0); + TEST_ASSERT('I', "1b", count1 == numSymbols); + + iter1 = OSCollectionIterator::withCollection(array); + iter2 = OSCollectionIterator::withCollection(array); + } + TEST_ASSERT('I', "1c", iter1); + TEST_ASSERT('I', "1d", iter2); + if (iter1 && iter2) { + count1 = count2 = count3 = 0; + for (i = 0; (str = (IOString *) iter1->getNextObject()); i++) { + bigReturn = iter2->nextEntries(); + count1 += (bigReturn->getCount() == 1); + count2 += (cache[i] == bigReturn->getObject(0)); + count3 += (cache[i] == str); + } + TEST_ASSERT('I', "1e", count1 == numSymbols); + TEST_ASSERT('I', "1f", count2 == numSymbols); + TEST_ASSERT('I', "1g", count3 == numSymbols); + TEST_ASSERT('I', "1h", iter1->valid()); + TEST_ASSERT('I', "1i", iter2->valid()); + + iter1->reset(); + str = (OSString *) array->__takeObject(0); + array->setObject(str, 0); + str->release(); + TEST_ASSERT('I', "1j", !iter1->getNextObject()); + TEST_ASSERT('I', "1k", !iter1->valid()); + + iter1->reset(); + count1 = count2 = count3 = 0; + for (i = 0; ; i++) { + if (i & 1) + str = (OSString *) iter1->getNextObject(); + else if ( (bigReturn = iter1->nextEntries()) ) + str = (OSString *) bigReturn->getObject(0); + else + str = 0; + + if (!str) + break; + count1 += (cache[i] == str); + } + TEST_ASSERT('I', "1l", count1 == numSymbols); + TEST_ASSERT('I', "1m", i == numSymbols); + TEST_ASSERT('I', "1n", iter1->valid()); + + TEST_ASSERT('I', "1o", 3 == array->getRetainCount()); + array->release(); + } + + if (iter1) iter1->release(); + if (iter2) iter2->release(); + res = res && checkSpace("(I)1", spaceCheck, 0); + + // Test the set iterator + spaceCheck = checkPointSpace(); + iter1 = 0; + set = OSSet::withCapacity(numSymbols); + TEST_ASSERT('I', "2a", set); + if (set) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) + count1 += set->setObject(cache[i]); + TEST_ASSERT('I', "2b", count1 == numSymbols); + + iter1 = OSCollectionIterator::withCollection(set); + iter2 = OSCollectionIterator::withCollection(set); + } + TEST_ASSERT('I', "2c", iter1); + TEST_ASSERT('I', "2d", iter2); + if (iter1 && iter2) { + count1 = count2 = count3 = 0; + for (i = 0; (str = (IOString *) iter1->getNextObject()); i++) { + bigReturn = iter2->nextEntries(); + count1 += (bigReturn->getCount() == 1); + count2 += (cache[i] == bigReturn->getObject(0)); + count3 += (cache[i] == str); + } + TEST_ASSERT('I', "2e", count1 == numSymbols); + TEST_ASSERT('I', "2f", count2 == numSymbols); + TEST_ASSERT('I', "2g", count3 == numSymbols); + TEST_ASSERT('I', "2h", iter1->valid()); + TEST_ASSERT('I', "2i", iter2->valid()); + + iter1->reset(); + count1 = count2 = count3 = 0; + for (i = 0; ; i++) { + if (i & 1) + str = (OSString *) iter1->getNextObject(); + else if ( (bigReturn = iter1->nextEntries()) ) + str = (OSString *) bigReturn->getObject(0); + else + str = 0; + + if (!str) + break; + count1 += (cache[i] == str); + } + TEST_ASSERT('I', "2l", count1 == numSymbols); + TEST_ASSERT('I', "2m", i == numSymbols); + TEST_ASSERT('I', "2n", iter1->valid()); + + iter1->reset(); + str = (OSString *) set->getAnyObject(); + (void) set->__takeObject(str); + set->setObject(str); + str->release(); + TEST_ASSERT('I', "2j", !iter1->getNextObject()); + TEST_ASSERT('I', "2k", !iter1->valid()); + + TEST_ASSERT('I', "2o", 3 == set->getRetainCount()); + set->release(); + } + + if (iter1) iter1->release(); + if (iter2) iter2->release(); + res = res && checkSpace("(I)2", spaceCheck, 0); + + // Test the dictionary iterator + spaceCheck = checkPointSpace(); + iter1 = 0; + dict = OSDictionary::withCapacity(numSymbols); + TEST_ASSERT('I', "3a", dict); + if (dict) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) + count1 += (0 != dict->setObject(cache[i], symCache[i])); + TEST_ASSERT('I', "3b", count1 == numSymbols); + + iter1 = OSCollectionIterator::withCollection(dict); + iter2 = OSCollectionIterator::withCollection(dict); + } + TEST_ASSERT('I', "3c", iter1); + TEST_ASSERT('I', "3d", iter2); + if (iter1 && iter2) { + count1 = count2 = count3 = 0; + for (i = 0; (sym = (const IOSymbol *) iter1->getNextObject()); i++) { + bigReturn = iter2->nextEntries(); + count1 += (bigReturn->getCount() == 2); + count2 += (cache[i] == bigReturn->getObject(1)); + count3 += (symCache[i] == sym); + } + TEST_ASSERT('I', "3e", count1 == numSymbols); + TEST_ASSERT('I', "3f", count2 == numSymbols); + TEST_ASSERT('I', "3g", count3 == numSymbols); + TEST_ASSERT('I', "3h", iter1->valid()); + TEST_ASSERT('I', "3i", iter2->valid()); + + iter1->reset(); + count1 = count2 = count3 = 0; + i = 0; + for (i = 0; ; i++) { + if (i & 1) { + sym = (const OSSymbol *) iter1->getNextObject(); + str = 0; + } + else if ( (bigReturn = iter1->nextEntries()) ) { + sym = (const OSSymbol *) bigReturn->getObject(0); + str = (OSString *) bigReturn->getObject(1); + } + else + sym = 0; + + if (!sym) + break; + + count1 += (symCache[i] == sym); + count2 += (!str || cache[i] == str); + } + TEST_ASSERT('I', "3l", count1 == numSymbols); + TEST_ASSERT('I', "3m", count2 == numSymbols); + TEST_ASSERT('I', "3n", i == numSymbols); + TEST_ASSERT('I', "3o", iter1->valid()); + + iter1->reset(); + str = (OSString *) dict->__takeObject(symCache[numSymbols-1]); + dict->setObject(str, symCache[numSymbols-1]); + str->release(); + TEST_ASSERT('I', "3j", !iter1->getNextObject()); + TEST_ASSERT('I', "3k", !iter1->valid()); + + TEST_ASSERT('I', "3p", 3 == dict->getRetainCount()); + dict->release(); + } + + if (iter1) iter1->release(); + if (iter2) iter2->release(); + res = res && checkSpace("(I)3", spaceCheck, 0); + + count1 = count2 = count3 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (1 == cache[i]->getRetainCount()); + count2 += (1 == symCache[i]->getRetainCount()); + cache[i]->release(); + symCache[i]->release(); + } + TEST_ASSERT('I', "4a", count1 == numSymbols); + TEST_ASSERT('I', "4b", count2 == numSymbols); + + if (res) + verPrintf(("testIterator: All OSCollectionIterator Tests passed\n")); + else + logPrintf(("testIterator: Some OSCollectionIterator Tests failed\n")); +} + +#endif /* DEBUG */ diff --git a/iokit/Tests/TestContainers.cpp b/iokit/Tests/TestContainers.cpp new file mode 100644 index 000000000..6641fc6a7 --- /dev/null +++ b/iokit/Tests/TestContainers.cpp @@ -0,0 +1,470 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#if DEBUG +#include "Tests.h" + +#include +#include +#include + +static const char testC00[] = "The quick brown fox jumps over the lazy dog. "; +static const char testC01[] = "The quick brown fox "; +static const char testC02[] = "jumps over the "; +static const char testC03[] = "lazy dog. \n"; +static const char testC04[] = "The "; +static const char testC05[] = "quick "; +static const char testC06[] = "brown "; +static const char testC07[] = "fox "; +static const char testC08[] = "jumps "; +static const char testC09[] = "over "; +static const char testC10[] = "the "; +static const char testC11[] = "lazy "; +static const char testC12[] = "dog. \n"; +static const char testC13[] = "Now is the time for all good " + "men to come to the aid of the party \n"; +static const char testC14[] = "Now is the time for "; +static const char testC15[] = "all good men to come "; +static const char testC16[] = "to the aid of the party \n"; +static const char testC17[] = "Now "; +static const char testC18[] = "is "; +static const char testC19[] = "the "; +static const char testC20[] = "time "; +static const char testC21[] = "for "; +static const char testC22[] = "all "; +static const char testC23[] = "good "; +static const char testC24[] = "men "; +static const char testC25[] = "to "; +static const char testC26[] = "come "; +static const char testC27[] = "to "; +static const char testC28[] = "the "; +static const char testC29[] = "aid "; +static const char testC30[] = "of "; +static const char testC31[] = "the "; +static const char testC32[] = "party. \n"; +static const char testC33[] = "Frank Burns eats worms. \n"; +static const char testC34[] = "Frank Burns "; +static const char testC35[] = "eats worms. \n"; +static const char testC36[] = "Frank "; +static const char testC37[] = "Burns "; +static const char testC38[] = "eats "; +static const char testC39[] = "worms. \n"; +static const char testC40[] = "Tired eyes? Stiff neck? Tight shoulders? " + "Aching back? The right moves can help " + "prevent these kinds of problem. "; +static const char testC41[] = "Tired eyes? Stiff neck? "; +static const char testC42[] = "Tight shoulders? Aching back? "; +static const char testC43[] = "The right moves can help prevent "; +static const char testC44[] = "these kinds of problem. "; +static const char testC45[] = "Tired "; +static const char testC46[] = "eyes? "; +static const char testC47[] = "Stiff "; +static const char testC48[] = "neck? "; +static const char testC49[] = "Tight "; +static const char testC50[] = "shoulders? "; +static const char testC51[] = "Aching "; +static const char testC52[] = "back? "; +static const char testC53[] = "The "; +static const char testC54[] = "right "; +static const char testC55[] = "moves "; +static const char testC56[] = "can "; +static const char testC57[] = "help "; +static const char testC58[] = "prevent "; +static const char testC59[] = "these "; +static const char testC60[] = "kinds "; +static const char testC61[] = "of "; +static const char testC62[] = "problem. "; + +const char *strCache[] = { + testC00, testC01, testC02, testC03, testC04, testC05, testC06, testC07, + testC08, testC09, testC10, testC11, testC12, testC13, testC14, testC15, + testC16, testC17, testC18, testC19, testC20, testC21, testC22, testC23, + testC24, testC25, testC26, testC27, testC28, testC29, testC30, testC31, + testC32, testC33, testC34, testC35, testC36, testC37, testC38, testC39, + testC40, testC41, testC42, testC43, testC44, testC45, testC46, testC47, + testC48, testC49, testC50, testC51, testC52, testC53, testC54, testC55, + testC56, testC57, testC58, testC59, testC60, testC61, testC62, +}; +const int numStrCache = ((int) (sizeof(strCache)/sizeof(strCache[0]))); + +void testData() +{ +#define DATA_SIZE_1 256 +#define DATA_SIZE_2 512 +#define DATA_SIZE_3 1024 +#define DATA_SIZE_4 8192 + + OSData *test1, *test2, *test3; + void *spaceCheck; + unsigned int len; + unsigned int i; + bool res = true; + unsigned short testData[DATA_SIZE_4/sizeof(short)], *cp; + + // very first test initialises the OSMetaClass cache. + test1 = OSData::withCapacity(DATA_SIZE_1); + TEST_ASSERT('d', "0a", test1); + if (test1) + test1->release(); + + for (i = 0; i < sizeof(testData)/sizeof(short); i++) + testData[i] = (unsigned short) i; + + // Check empty data allocation + spaceCheck = checkPointSpace(); + test1 = OSData::withCapacity(DATA_SIZE_1); + TEST_ASSERT('d', "1a", test1); + if (test1) { + TEST_ASSERT('d', "1b", !test1->getLength()); + TEST_ASSERT('d', "1c", test1->getCapacity() == DATA_SIZE_1); + TEST_ASSERT('d', "1d", !test1->getBytesNoCopy()); + TEST_ASSERT('d', "1e", !test1->getBytesNoCopy(10, DATA_SIZE_1 - 10)); + TEST_ASSERT('d', "1f", test1->appendBytes(spaceCheck, 0)); + TEST_ASSERT('d', "1g", !test1->getLength()); + TEST_ASSERT('d', "1h", test1->getCapacity() == DATA_SIZE_1); + TEST_ASSERT('d', "1i", !test1->getBytesNoCopy()); + test1->release(); + } + res = res && checkSpace("(d)1", spaceCheck, 0); + + // Check appending to empty data allocation + spaceCheck = checkPointSpace(); + test1 = OSData::withCapacity(DATA_SIZE_1); + TEST_ASSERT('d', "2a", test1); + if (test1) { + TEST_ASSERT('d', "2b", !test1->getLength()); + TEST_ASSERT('d', "2c", !test1->getBytesNoCopy()); + TEST_ASSERT('d', "2d", test1->appendBytes(testData, DATA_SIZE_1)); + TEST_ASSERT('d', "2e", test1->getLength() == DATA_SIZE_1); + TEST_ASSERT('d', "2f", test1->getBytesNoCopy()); + cp = (unsigned short *) test1->getBytesNoCopy(); + for (i = 0; cp && i < (DATA_SIZE_1/sizeof(short)); i++) { + TEST_ASSERT('d', "2g", *cp++ == testData[i]); + if (*cp != testData[i]) + break; + } + TEST_ASSERT('d', "2h", test1->getBytesNoCopy(10, DATA_SIZE_1-10)); + cp = (unsigned short *) test1->getBytesNoCopy(10, DATA_SIZE_1 - 10); + for (i = 5; cp && i < (DATA_SIZE_1/sizeof(short)) - 5; i++) { + TEST_ASSERT('d', "2i", *cp++ == testData[i]); + if (*cp != testData[i]) + break; + } + TEST_ASSERT('d', "2j", test1->isEqualTo(testData, DATA_SIZE_1)); + test1->release(); + } + res = res && checkSpace("(d)2", spaceCheck, 0); + + // Check data allocation from some constant data + spaceCheck = checkPointSpace(); + test1 = OSData::withBytes(testData, sizeof(testData)); + TEST_ASSERT('d', "3a", test1); + if (test1) { + TEST_ASSERT('d', "3b", test1->getLength() == sizeof(testData)); + TEST_ASSERT('d', "3c", test1->getCapacity() == sizeof(testData)); + TEST_ASSERT('d', "3d", test1->getBytesNoCopy()); + TEST_ASSERT('d', "3e", test1->getBytesNoCopy(10, sizeof(testData)-10)); + TEST_ASSERT('d', "3f", test1->appendBytes(spaceCheck, 0)); + TEST_ASSERT('d', "3g", test1->getLength() == sizeof(testData)); + TEST_ASSERT('d', "3h", test1->getCapacity() == sizeof(testData)); + TEST_ASSERT('d', "3i", test1->getBytesNoCopy()); + TEST_ASSERT('d', "3j", test1->getBytesNoCopy(10, sizeof(testData)-10)); + TEST_ASSERT('d', "3k", !test1->appendBytes(testData, 10)); + test1->release(); + } + res = res && checkSpace("(d)3", spaceCheck, 0); + + // Check and continious addition of more data + spaceCheck = checkPointSpace(); + test1 = OSData::withCapacity(DATA_SIZE_4); + test2 = OSData::withBytesNoCopy(testData, DATA_SIZE_3); + len = DATA_SIZE_3; + TEST_ASSERT('d', "4a", (test1 && test2)); + if (test1 && test2) { + TEST_ASSERT('d', "4b", !test1->getLength()); + for (i = 0; i < DATA_SIZE_4; i += DATA_SIZE_3) + TEST_ASSERT('d', "4c", test1->appendBytes(test2)); + TEST_ASSERT('d', "4d", !test1->appendBytes(test2)); + for (i = 0; i < DATA_SIZE_4; i += DATA_SIZE_3) { + + TEST_ASSERT('d', "4e", test2->isEqualTo( + test1->getBytesNoCopy(i, DATA_SIZE_3), + DATA_SIZE_3)); + + test3 = OSData::withData(test1, i, DATA_SIZE_3); + TEST_ASSERT('d', "4f", test3); + if (test3) { + TEST_ASSERT('d', "4g", test2->isEqualTo(test3)); + test3->release(); + } + + test3 = OSData::withData(test1, i, len); + TEST_ASSERT('d', "4i", test3); + if (test3) { + TEST_ASSERT('d', "4j", test2->isEqualTo(test3)); + test3->release(); + } + } + test1->release(); + test2->release(); + } + res = res && checkSpace("(d)3", spaceCheck, 0); + + if (res) + verPrintf(("testData: All OSData Tests passed\n")); + else + logPrintf(("testData: Some OSData Tests failed\n")); +#undef DATA_SIZE_4 +#undef DATA_SIZE_3 +#undef DATA_SIZE_2 +#undef DATA_SIZE_1 +} + +void testString() +{ + OSString *test1, *test2; + void *spaceCheck; + int i; + char c; + bool res = true; + + // very first test initialises the OSMetaClass cache. + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "0a", test1); + if (test1) + test1->release(); + + // Check c string allocation + spaceCheck = checkPointSpace(); + test1 = OSString::withCString(testC00); + TEST_ASSERT('s', "1a", test1); + TEST_ASSERT('s', "1b", testC00 != test1->getCStringNoCopy()); + TEST_ASSERT('s', "1c", strcmp(testC00, test1->getCStringNoCopy()) == 0); + TEST_ASSERT('s', "1d", strlen(testC00) == test1->getLength()); + TEST_ASSERT('s', "1e", test1->isEqualTo(testC00)); + TEST_ASSERT('s', "1f", !test1->isEqualTo(testC01)); + if (test1) test1->release(); + res = res && checkSpace("(s)1", spaceCheck, 0); + + // Check c string no allocation + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "2a", test1); + TEST_ASSERT('s', "2b", testC00 == test1->getCStringNoCopy()); + if (test1) test1->release(); + res = res && checkSpace("(s)2", spaceCheck, 0); + + // Check string from other string generation + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "3a", test1); + test2 = OSString::withString(test1); + TEST_ASSERT('s', "3b", test2); + TEST_ASSERT('s', "3c", test1 != test2); + TEST_ASSERT('s', "3d", test1->isEqualTo(test2)); + if (test1) test1->release(); + if (test2) test2->release(); + res = res && checkSpace("(s)3", spaceCheck, 0); + + // Check string comparison functionality no copy + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + test2 = OSString::withCStringNoCopy(testC01); + TEST_ASSERT('s', "4a", test1 && test2); + TEST_ASSERT('s', "4b", !test1->isEqualTo(test2)); + TEST_ASSERT('s', "4c", !test1->isEqualTo(testC01)); + TEST_ASSERT('s', "4d", test1->isEqualTo(testC00)); + if (test1) test1->release(); + if (test2) test2->release(); + res = res && checkSpace("(s)4", spaceCheck, 0); + + // Check string comparison functionality with copy + spaceCheck = checkPointSpace(); + test1 = OSString::withCString(testC00); + test2 = OSString::withCString(testC01); + TEST_ASSERT('s', "5a", test1 && test2); + TEST_ASSERT('s', "5b", !test1->isEqualTo(test2)); + TEST_ASSERT('s', "5c", !test1->isEqualTo(testC01)); + TEST_ASSERT('s', "5d", test1->isEqualTo(testC00)); + if (test1) test1->release(); + if (test2) test2->release(); + res = res && checkSpace("(s)5", spaceCheck, 0); + + // Check string inplace modifications + spaceCheck = checkPointSpace(); + test1 = OSString::withCString(testC00); + TEST_ASSERT('s', "6a", test1); + for (i = 0; (c = test1->getChar(i)); i++) + if (c != testC00[i]) { + verPrintf(("testString(s) test 6b failed\n")); res = false; + break; + } + TEST_ASSERT('s', "6c", !c); + TEST_ASSERT('s', "6d", test1->setChar(' ', 0)); + TEST_ASSERT('s', "6e", !test1->isEqualTo(testC00)); + TEST_ASSERT('s', "6f", test1->setChar('T', 0)); + TEST_ASSERT('s', "6g", !test1->setChar(' ', sizeof(testC00))); + TEST_ASSERT('s', "6h", test1->isEqualTo(testC00)); + if (test1) test1->release(); + res = res && checkSpace("(s)6", spaceCheck, 0); + + // Check const string fail inplace modifications + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "7a", test1); + for (i = 0; (c = test1->getChar(i)); i++) + if (c != testC00[i]) { + verPrintf(("testString(s) test 7b failed\n")); res = false; + break; + } + TEST_ASSERT('s', "7c", !c); + TEST_ASSERT('s', "7d", !test1->setChar(' ', 0)); + TEST_ASSERT('s', "7e", test1->isEqualTo(testC00)); + TEST_ASSERT('s', "7f", !test1->setChar(' ', sizeof(testC00))); + TEST_ASSERT('s', "7g", test1->isEqualTo(testC00)); + if (test1) test1->release(); + res = res && checkSpace("(s)7", spaceCheck, 0); + + if (res) + verPrintf(("testString: All OSString Tests passed\n")); + else + logPrintf(("testString: Some OSString Tests failed\n")); +} + +void testSymbol() +{ + bool res = true; + int i, j; + int countDups; + const OSSymbol *cache[numStrCache]; + void *spaceCheck; + + // very first test initialises the OSMetaClass cache. + cache[0] = IOSymbol::withCStringNoCopy(testC00); + TEST_ASSERT('u', "0a", cache[0]); + if (cache[0]) + cache[0]->release(); + + spaceCheck = checkPointSpace(); + + // Setup the symbol cache, make sure it grows the symbol unique'ing + // hash table. Also determine that the symbol is created ok and that + // it is indeed equal to the creating cString by strcmp. + for (i = 0; i < numStrCache; i++) { + cache[i] = OSSymbol::withCStringNoCopy(strCache[i]); + if (!cache[i]) { + verPrintf(("testSymbol(u) test 1a%d failed\n", i)); res = false; + } + else if (!cache[i]->isEqualTo(strCache[i])) { + verPrintf(("testSymbol(u) test 1b%d failed\n", i)); res = false; + } + } + + // The strCache does have some duplicates in it, mostly 'the'. Make + // sure that we wind them and that different cache entries really are + // different by strcmp. Fundamental to OSSymbol semantics. + countDups = 0; + for (i = 0; i < numStrCache; i++) + for (j = i+1; j < numStrCache; j++) { + if (cache[i] != cache[j] && cache[i]->isEqualTo(cache[j])) { + verPrintf(("testSymbol(u) test 2a%d,%d failed\n", i, j)); + res = false; + } + else if (cache[i] == cache[j]) { + if (cache[i]->getRetainCount() == 1) { + verPrintf(("testSymbol(u) test 2b%d,%d failed\n", i, j)); + res = false; + } + countDups++; + } + } + TEST_ASSERT('u', "2c", countDups); + + // Clear out the cache and check that the unique'ing hashtable has grown + for (i = 0; i < numStrCache; i++) { + if (cache[i]) { + cache[i]->release(); + cache[i] = 0; + } + } + // As of 1998-11-17 the hash growth is 364. + res = res && checkSpace("(u)3", spaceCheck, 972); + logSpace(); + + // Check for leaks by repeating the cacheing and freeing + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) + cache[i] = OSSymbol::withCString(strCache[i]); + for (i = 0; i < numStrCache; i++) { + if (cache[i]) { + cache[i]->release(); + cache[i] = 0; + } + } + res = res && checkSpace("(u)4", spaceCheck, 0); + + // Check that the OSString based symbol constructors work + // and that they don't leak, and finally double check that while + // the cache is active the symbol semantics still work. + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) { + OSString *tmpStr; + + tmpStr = (i&1) + ? OSString::withCString(strCache[i]) + : OSString::withCStringNoCopy(strCache[i]); + if (tmpStr) { + cache[i] = OSSymbol::withString(tmpStr); + if (!cache[i]) { + verPrintf(("testSymbol(u) test 5a%d failed\n", i)); + res = false; + } + tmpStr->release(); + } + } + + for (i = 0; i < numStrCache; i++) { + if (cache[i]) { + const OSSymbol *tmpSymb; + + tmpSymb = OSSymbol::withCStringNoCopy(strCache[i]); + if (cache[i] != tmpSymb) { + verPrintf(("testSymbol(u) test 5b%d failed\n", i)); + res = false; + } + tmpSymb->release(); + cache[i]->release(); + cache[i] = 0; + } + else { + verPrintf(("testSymbol(u) test 5c%d failed\n", i)); + res = false; + } + } + res = res && checkSpace("(u)5", spaceCheck, 0); + + if (res) + verPrintf(("testSymbol: All OSSymbol Tests passed\n")); + else + logPrintf(("testSymbol: Some OSSymbol Tests failed\n")); +} + +#endif /* DEBUG */ diff --git a/iokit/Tests/TestDevice.cpp b/iokit/Tests/TestDevice.cpp new file mode 100644 index 000000000..effbab484 --- /dev/null +++ b/iokit/Tests/TestDevice.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#if DEBUG + +#include "Tests.h" + +#include +#include +#include + +#include + +#define super OSObject + +static TestDevice *sDevice; + +static mach_timespec_t hundredMill = { 0, 100000000 }; +static semaphore_port_t completeSema; + +OSDefineMetaClassAndStructors(TestDevice, OSObject) + +kern_return_t +TestDevice::enqueueCommand(bool sleep, + TestDeviceAction act, int tag, void *dataP) +{ + return commQ->enqueueCommand(sleep, (void *) act, (void *) tag, dataP); +} + +bool TestDevice::init() +{ + if ( !super::init() ) + return false; + + workLoop = IOWorkLoop::workLoop(); + if ( !workLoop ) + return false; + + commQ = IOCommandQueue::commandQueue + (this, (IOCommandQueueAction) &rawCommandOccurred, 8); + if (!commQ || kIOReturnSuccess != workLoop->addEventSource(commQ)) + return false; + + intES = IOInterruptEventSource::interruptEventSource + (this, (IOInterruptEventAction) &interruptAction); + if (!intES || kIOReturnSuccess != workLoop->addEventSource(intES)) + return false; + + return true; +} + +void TestDevice::free() +{ + if (intES) intES->release(); + if (commQ) commQ->release(); + if (workLoop) workLoop->release(); + + super::free(); +} + +void +TestDevice::rawCommandOccurred + (void *field0, void *field1, void *field2, void *) +{ + (*(TestDeviceAction) field0)(this, (int) field1, field2); +} + +void +TestDevice::interruptAction(IOInterruptEventSource *, int count) +{ + logPrintf(("I(%d, %d) ", count, ++intCount)); +} + +void +TestDevice::producer1Action(int tag) +{ + logPrintf(("C1(%d) ", tag)); +} + +void +TestDevice::producer2Action(int tag, void *count) +{ + logPrintf(("C2(%d,%d) ", tag, (int) count)); + if ( !(tag % 10) ) + IOSleep(1000); +} + +void +TestDevice::alarm() +{ + intES->interruptOccurred(0, 0, 0); + IOScheduleFunc((IOThreadFunc) alarm, (void *) this, hundredMill, 1); +} + +static void producer(void *inProducerId) +{ + int producerId = (int) inProducerId; + TestDeviceAction command; + int i; + + semaphore_wait(completeSema); + + if (producerId & 1) + command = (TestDeviceAction) sDevice->producer1Action; + else + command = (TestDeviceAction) sDevice->producer2Action; + + for (i = 0; i < 5 * (producerId << 1); i++) { + sDevice->enqueueCommand + (true, command, i, (void *) (i % (producerId + 1))); + if ( !(i % (producerId + 1)) ) + /* cthread_yield() */; + logPrintf(("TestDevice(%d): %d\n", producerId, i)); + } + + logPrintf(("TestDevice: producer %d exiting\n", producerId)); + semaphore_signal(completeSema); + + IOExitThread(producerId); +} + +void testWorkLoop() +{ + int i; + + sDevice = new TestDevice; + if (!sDevice || !sDevice->init()) { + if (sDevice) sDevice->free(); + logPrintf(("TestDevice: couldn't create device instance\n")); + return; + } + + IOSleep(1000); + + IOScheduleFunc((IOThreadFunc) sDevice->alarm, sDevice, hundredMill, 1); + + IOSleep(2000); + + if (KERN_SUCCESS + != semaphore_create(kernel_task, &completeSema, SYNC_POLICY_FIFO, 4)) + return; + + IOCreateThread(producer, (void *) 4); + IOCreateThread(producer, (void *) 3); + IOCreateThread(producer, (void *) 2); + IOCreateThread(producer, (void *) 1); + + IOSleep(2000); + + for (i = 0; i < 4; i++) + semaphore_wait(completeSema); + + IOUnscheduleFunc((IOThreadFunc) sDevice->alarm, sDevice); + + sDevice->free(); sDevice = 0; + + logPrintf(("TestDevice: exiting\n")); +} + +#endif /* DEBUG */ diff --git a/iokit/Tests/Tests.cpp b/iokit/Tests/Tests.cpp new file mode 100644 index 000000000..5e570cc27 --- /dev/null +++ b/iokit/Tests/Tests.cpp @@ -0,0 +1,159 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + */ + +#include +#include +#include + +#include + + +extern "C" { +extern int debug_container_malloc_size; +extern int debug_ivars_size; +} + +static void DumpTree( void ) +{ + IORegistryEntry * next; + IORegistryEntry * packages = 0; + IORegistryEntry * deblocker = 0; + IORegistryEntry * keyboard = 0; + IORegistryIterator * iter; + OSOrderedSet * all; + + IOLog("ivars %08x, containers %08x\n", + debug_ivars_size, debug_container_malloc_size); + + iter = IORegistryIterator::iterateOver( gIODTPlane ); + assert( iter ); + + all = iter->iterateAll(); + IOLog("\nCount %d\n", all->getCount() ); + all->release(); + + iter->reset(); + while( (next = iter->nextEntryRecursive())) { + if( 0 == strcmp( "packages", next->getName())) + packages = next; + if( 0 == strcmp( "deblocker", next->getName())) + deblocker = next; + if( 0 == strcmp( "keyboard", next->getName())) + keyboard = next; + } + + if( deblocker && keyboard) + deblocker->attachToParent( keyboard, gIODTPlane); + + iter->reset(); + while( (next = iter->nextEntryRecursive())) { + IOLog("%s=%d,", next->getName(), next->getDepth( gIODTPlane )); + if( 0 == strcmp( "gc", next->getName())) { + packages = next; + } + } + + IOLog("ivars %08x, containers %08x\n", + debug_ivars_size, debug_container_malloc_size); + + if( packages) + packages->detachAll( gIODTPlane); + all = iter->iterateAll(); + IOLog("del gc/, count now %d\n", all->getCount() ); + all->release(); + + iter->release(); + + IOLog("ivars %08x, containers %08x\n", + debug_ivars_size, debug_container_malloc_size); + +} + +extern "C" { +void PathTests( void ) +{ + const char * tests[] = { + "IODeviceTree:/bandit", + "IODeviceTree:/", + "IODeviceTree:/xxxx", + "IODeviceTree:/bandit/xxx", + "IODeviceTree:/bandit@F2000000", + "IODeviceTree:/bandit/gc", + "IODeviceTree:/bandit/gc/mace:17.202.42.95,\\mach_kernel", + "IODeviceTree:/bandit/@10/mesh", + "IODeviceTree:enet:17.202", + "IODeviceTree:scsi/@0:0", + "IODeviceTree:scsi-int", + "IODeviceTree:/bandit/gc@10/mesh", + "IODeviceTree:/bandit/gc/53c94/disk@0:6,mach_kernel", + "IOService:/", + "IOService:/ApplePlatformExpert", + "IOService:/ApplePlatformExpert/hammerhead@F8000000", + "IOService:/ApplePlatformExpert/bandit/AppleMacRiscPCI" + }; + + IORegistryEntry * entry; + char str[256]; + int len; + + for( unsigned int i = 0; i < sizeof(tests)/sizeof(tests[0]); i++) { + + len = sizeof( str ); + entry = IORegistryEntry::fromPath( tests[i], 0, str, &len ); + IOLog("\"%s\" ", tests[i] ); + if( entry) { + IOLog("found %s, tail = \"%s\"\n", entry->getName(), str ); + len = sizeof( str ); + if( entry->getPath( str, &len, + IORegistryEntry::getPlane("IODeviceTree"))) { + IOLog("path = \"%s\"\n", str); + } + entry->release(); + } else + IOLog("not found\n"); + } +} +} + +void TestsCpp( void * dtTop ) +{ + IORegistryEntry * dt; + + IOLog("\nivars %08x, containers %08x\n", + debug_ivars_size, debug_container_malloc_size); + + OSMetaClass::printInstanceCounts(); + dt = IODeviceTreeAlloc( dtTop ); + assert( dt ); + +// OSMetaClass::printInstanceCounts(); + DumpTree(); +// OSMetaClass::printInstanceCounts(); + dt->detachAll( gIODTPlane); + OSMetaClass::printInstanceCounts(); + IOLog("ivars %08x, containers %08x\n", + debug_ivars_size, debug_container_malloc_size); +} + diff --git a/iokit/Tests/Tests.h b/iokit/Tests/Tests.h new file mode 100644 index 000000000..99c6033fe --- /dev/null +++ b/iokit/Tests/Tests.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +#ifdef __cplusplus + +#define logPrintf(x) \ + do { \ + kprintf x; \ + } while (0) + +#define verPrintf(x) logPrintf(x) + +// Assumes 'bool res = true' in current scope +#define TEST_ASSERT(t, l, c) \ + do { \ + if ( !(c) ) { \ + verPrintf(("TEST (%c) test %s failed\n", t, l)); \ + res = false; \ + } \ + } while(0) + +#define logSpace() do { } while(0) +#define checkPointSpace() ((void *) 0) +#define checkSpace(l, ckp, d) ((int) 1) + +// In TestContainers.cc +extern const int numStrCache; +extern const char *strCache[]; + +extern void testString(); +extern void testSymbol(); +extern void testData(); + +// In TestCollections.cc +extern void testArray(); +extern void testSet(); +extern void testDictionary(); +extern void testIterator(); + +// In TestDevice.cc +extern void testWorkLoop(); + +#include + +class IOWorkLoop; +class IOCommandQueue; +class IOInterruptEventSource; + +class TestDevice; +typedef void (*TestDeviceAction)(TestDevice *, int, void *); + +class TestDevice : public OSObject +{ + OSDeclareDefaultStructors(TestDevice) + + IOWorkLoop *workLoop; + int intCount; + IOCommandQueue *commQ; + +public: + IOInterruptEventSource *intES; + + virtual bool init(); + virtual void free(); + + void rawCommandOccurred + (void *field0, void *field1, void *field2, void *field3); + kern_return_t enqueueCommand(bool sleep, + TestDeviceAction act, int tag, void *dataP); + + void interruptAction(IOInterruptEventSource *event, int count); + + void producer1Action(int tag); + void producer2Action(int tag, void *inCount); + + void alarm(); +}; + +#endif /* __cplusplus */ diff --git a/iokit/User/Makefile b/iokit/User/Makefile new file mode 100644 index 000000000..5dcb75d06 --- /dev/null +++ b/iokit/User/Makefile @@ -0,0 +1,38 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +do_all: + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + ($(MKDIR) $(COMPOBJROOT)/User; \ + cd $(COMPOBJROOT)/User; \ + ${MAKE} MAKEFILES=$(SOURCE)/Makefile.user \ + TARGET=$(TARGET) \ + do_build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + + +do_build_all: do_all + +do_install: + @echo "[ $(SOURCE) ] Starting do_install $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + ($(MKDIR) $(COMPOBJROOT)/User; \ + cd $(COMPOBJROOT)/User; \ + ${MAKE} MAKEFILES=$(SOURCE)/Makefile.user \ + TARGET=$(TARGET) \ + do_build_install \ + ); \ + echo "[ $(SOURCE) ] Returning do_install $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_install: do_install + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/User/Makefile.user b/iokit/User/Makefile.user new file mode 100644 index 000000000..b8af24563 --- /dev/null +++ b/iokit/User/Makefile.user @@ -0,0 +1,41 @@ +# +# User level code makefile +# + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTALL_DIR= /usr/lib +LIB_NAME= libIOKit +BUILD_VERS= .A +COMPAT_VERS= 1 +LIB_INSTALL_FLAGS= -c -m 555 -S "-S" +BUILD_NAME= $(LIB_NAME)$(BUILD_VERS).dylib +FRAMEWORK_NAME= /System/Library/Frameworks/IOKit.framework/Versions/A/IOKit + +# ALLARCHLIBS = $(foreach archlib, $(INSTALL_ARCHS), $(OBJROOT)/$(KERNEL_CONFIG)_$(archlib)/$(COMPONENT)/User/$(BUILD_NAME)) + +do_build_all: + +$(DSTROOT)$(INSTALL_DIR)/$(BUILD_NAME): + ${MKDIR} $(DSTROOT)$(INSTALL_DIR); \ + (cd $(DSTROOT)$(INSTALL_DIR); \ + if [ ! -h $(BUILD_NAME) ]; then \ + $(LN) $(FRAMEWORK_NAME) $(BUILD_NAME); \ + fi ); + +$(DSTROOT)$(INSTALL_DIR)/$(LIB_NAME).dylib: $(DSTROOT)$(INSTALL_DIR)/$(BUILD_NAME) + (cd $(DSTROOT)$(INSTALL_DIR); \ + if [ ! -h $(LIB_NAME).dylib ]; then \ + $(LN) $(BUILD_NAME) $(LIB_NAME).dylib; \ + fi ); + +do_build_install: $(DSTROOT)$(INSTALL_DIR)/$(LIB_NAME).dylib + +# include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/iokit/bsddev/IOBSDConsole.cpp b/iokit/bsddev/IOBSDConsole.cpp new file mode 100644 index 000000000..b2a82e4e9 --- /dev/null +++ b/iokit/bsddev/IOBSDConsole.cpp @@ -0,0 +1,216 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include "IOBSDConsole.h" +#include +#include + +static IOBSDConsole * gBSDConsoleInst = 0; +bool displayWranglerPublished( OSObject *, void *, IOService * ); + +#define super IOService +OSDefineMetaClassAndStructors(IOBSDConsole, IOService); + +//************************************************************************ + +bool IOBSDConsole::start(IOService * provider) +{ + OSObject * notify; + + if (!super::start(provider)) return false; + + assert( gBSDConsoleInst == 0 ); + gBSDConsoleInst = this; + + notify = addNotification( gIOPublishNotification, + serviceMatching("IOHIKeyboard"), + (IOServiceNotificationHandler) &IOBSDConsole::publishNotificationHandler, + this, 0 ); + assert( notify ); + + notify = addNotification( gIOPublishNotification, + serviceMatching("IODisplayWrangler"), + (IOServiceNotificationHandler)displayWranglerPublished, + this, 0 ); + assert( notify ); + + notify = addNotification( gIOPublishNotification, + serviceMatching("IOAudioStream"), + (IOServiceNotificationHandler) &IOBSDConsole::publishNotificationHandler, + this, this ); + assert( notify ); + + return( true ); +} + +bool IOBSDConsole::publishNotificationHandler( + IOBSDConsole * self, + void * ref, + IOService * newService ) + +{ + IOHIKeyboard * keyboard = 0; + IOService * audio = 0; + + if( ref) { + audio = OSDynamicCast(IOService, newService->metaCast("IOAudioStream")); + if (audio != 0) { + OSNumber *out; + out = OSDynamicCast(OSNumber, newService->getProperty("Out")); + if (out) { + if (out->unsigned8BitValue() == 1) { + self->fAudioOut = newService; + } + } + } + } else { + audio = 0; + keyboard = OSDynamicCast( IOHIKeyboard, newService ); + + if( keyboard && self->attach( keyboard )) { + self->arbitrateForKeyboard( keyboard ); + } + } + + if( !keyboard && !audio) + IOLog("%s: strange service notify \"%s\"\n", + self->getName(), newService->getName()); + + return true; +} + +// ********************************************************************************** +// displayWranglerPublished +// +// The Display Wrangler has appeared. We will be calling its +// ActivityTickle method when there is user activity. +// ********************************************************************************** +bool displayWranglerPublished( OSObject * us, void * ref, IOService * yourDevice ) +{ + if ( yourDevice != NULL ) { + ((IOBSDConsole *)us)->displayManager = yourDevice; + } + return true; +} + + +//************************************************************************ +// Keyboard client stuff +//************************************************************************ + +void IOBSDConsole::arbitrateForKeyboard( IOHIKeyboard * nub ) +{ + nub->open(this, 0, + keyboardEvent, 0, updateEventFlags); + // failure can be expected if the HID system already has it +} + +IOReturn IOBSDConsole::message(UInt32 type, IOService * provider, + void * argument) +{ + IOReturn status = kIOReturnSuccess; + + switch (type) + { + case kIOMessageServiceIsTerminated: + case kIOMessageServiceIsRequestingClose: + provider->close( this ); + break; + + case kIOMessageServiceWasClosed: + arbitrateForKeyboard( (IOHIKeyboard *) provider ); + break; + + default: + status = super::message(type, provider, argument); + break; + } + + return status; +} + +extern "C" { + void cons_cinput( char c); +} +#warning REMOVE cons_cinput DECLARATION FROM HERE + +void IOBSDConsole::keyboardEvent(OSObject * target, + /* eventType */ unsigned eventType, + /* flags */ unsigned /* flags */, + /* keyCode */ unsigned /* key */, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned /* origCharCode */, + /* originalCharSet */ unsigned /* origCharSet */, + /* keyboardType */ unsigned /* keyboardType */, + /* repeat */ bool /* repeat */, + /* atTime */ AbsoluteTime /* ts */) +{ + static const char cursorCodes[] = { 'D', 'A', 'C', 'B' }; + + if ( ((IOBSDConsole *)target)->displayManager != NULL ) { // if there is a display manager, + ((IOBSDConsole *)target)->displayManager->activityTickle(kIOPMSuperclassPolicy1); // tell it there is user activity + } + + if( eventType == NX_KEYDOWN) { + if( (charSet == NX_SYMBOLSET) + && (charCode >= 0xac) && (charCode <= 0xaf)) { + cons_cinput( '\033'); + cons_cinput( 'O'); + charCode = cursorCodes[ charCode - 0xac ]; + } + cons_cinput( charCode); + } +} + +void IOBSDConsole::updateEventFlags(OSObject * /*target*/, unsigned /*flags*/) +{ + return; +} + +//************************************************************************ +// Utility sound making stuff, callable from C +//************************************************************************ +extern "C" { + int asc_ringbell(); +} + + +bool (*playBeep)(IOService *outputStream) = 0; + +/* +* Make some sort of noise if possible +*/ + +int asc_ringbell() +{ + IOService *output; + + if (gBSDConsoleInst && playBeep && (output = gBSDConsoleInst->getAudioOut())) { + playBeep(output); + } + + return true; +} + diff --git a/iokit/bsddev/IOBSDConsole.h b/iokit/bsddev/IOBSDConsole.h new file mode 100644 index 000000000..240ff84c5 --- /dev/null +++ b/iokit/bsddev/IOBSDConsole.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _IOBSDCONSOLE_H +#define _IOBSDCONSOLE_H + +#include + +class IOHIKeyboard; + +class IOBSDConsole : public IOService +{ + OSDeclareDefaultStructors(IOBSDConsole); + +private: + IOService * fAudioOut; + + static bool publishNotificationHandler( + IOBSDConsole * self, void * ref, + IOService * newService ); + + virtual void arbitrateForKeyboard( IOHIKeyboard * kb ); + +public: + IOService * displayManager; // we send user activity tickles to the display manager + + static void keyboardEvent(OSObject * target, + /* eventType */ unsigned eventType, + /* flags */ unsigned flags, + /* keyCode */ unsigned key, + /* charCode */ unsigned charCode, + /* charSet */ unsigned charSet, + /* originalCharCode */ unsigned origCharCode, + /* originalCharSet */ unsigned origCharSet, + /* keyboardType */ unsigned keyboardType, + /* repeat */ bool repeat, + /* atTime */ AbsoluteTime ts); + + static void updateEventFlags(OSObject * target, unsigned flags); + + virtual bool start(IOService * provider); + + virtual IOReturn message(UInt32 type, IOService * provider, + void * argument); + + IOService * getAudioOut() { return fAudioOut; }; +}; + +#endif /* _IOBSDCONSOLE_H */ diff --git a/iokit/bsddev/IOKitBSDInit.cpp b/iokit/bsddev/IOKitBSDInit.cpp new file mode 100644 index 000000000..b161dbf60 --- /dev/null +++ b/iokit/bsddev/IOKitBSDInit.cpp @@ -0,0 +1,466 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern "C" { + +#include +#include + +// how long to wait for matching root device, secs +#define ROOTDEVICETIMEOUT 60 + + +kern_return_t +IOKitBSDInit( void ) +{ + IOLog("IOKitBSDInit\n"); + + IOService::publishResource("IOBSD"); + + return( kIOReturnSuccess ); +} + +OSDictionary * IOBSDNameMatching( const char * name ) +{ + OSDictionary * dict; + const OSSymbol * str = 0; + + do { + + dict = IOService::serviceMatching( gIOServiceKey ); + if( !dict) + continue; + str = OSSymbol::withCString( name ); + if( !str) + continue; + dict->setObject( kIOBSDNameKey, (OSObject *) str ); + str->release(); + + return( dict ); + + } while( false ); + + if( dict) + dict->release(); + if( str) + str->release(); + + return( 0 ); +} + +OSDictionary * IONetworkMatching( const char * path, + char * buf, int maxLen ) +{ + OSDictionary * matching = 0; + OSDictionary * dict; + OSString * str; + char * comp; + const char * skip; + int len; + + do { + + len = strlen( kIODeviceTreePlane ":" ); + maxLen -= len; + if( maxLen < 0) + continue; + + strcpy( buf, kIODeviceTreePlane ":" ); + comp = buf + len; + + // remove parameters following ':' from the path + skip = strchr( path, ':'); + if( !skip) + continue; + + len = skip - path; + maxLen -= len; + if( maxLen < 0) + continue; + strncpy( comp, path, len ); + comp[ len ] = 0; + + matching = IOService::serviceMatching( "IONetworkInterface" ); + if( !matching) + continue; + dict = IOService::addLocation( matching ); + if( !dict) + continue; + + str = OSString::withCString( buf ); + if( !str) + continue; + dict->setObject( kIOPathMatchKey, str ); + str->release(); + + return( matching ); + + } while( false ); + + if( matching) + matching->release(); + + return( 0 ); +} + +OSDictionary * IONetworkNamePrefixMatching( const char * prefix ) +{ + OSDictionary * matching; + OSDictionary * propDict = 0; + const OSSymbol * str = 0; + + do { + matching = IOService::serviceMatching( "IONetworkInterface" ); + if ( matching == 0 ) + continue; + + propDict = OSDictionary::withCapacity(1); + if ( propDict == 0 ) + continue; + + str = OSSymbol::withCString( prefix ); + if ( str == 0 ) + continue; + + propDict->setObject( kIOInterfaceNamePrefix, (OSObject *) str ); + str->release(); + str = 0; + + if ( matching->setObject( gIOPropertyMatchKey, + (OSObject *) propDict ) != true ) + continue; + + propDict->release(); + propDict = 0; + + return( matching ); + + } while ( false ); + + if ( matching ) matching->release(); + if ( propDict ) propDict->release(); + if ( str ) str->release(); + + return( 0 ); +} + +static bool IORegisterNetworkInterface( IONetworkInterface * netif ) +{ + IONetworkStack * stack; + + if (( stack = IONetworkStack::getNetworkStack() )) + { + stack->registerInterface( netif, netif->getNamePrefix() ); + } + + return ( netif->getProperty( kIOBSDNameKey ) != 0 ); +} + +static void IORegisterPrimaryNetworkInterface() +{ + IONetworkStack * stack; + + if (( stack = IONetworkStack::getNetworkStack() )) + { + stack->registerPrimaryInterface( true ); + } +} + +OSDictionary * IODiskMatching( const char * path, char * buf, int maxLen ) +{ + const char * look; + const char * alias; + char * comp; + long unit = -1; + long partition = -1; + char c; + + // scan the tail of the path for "@unit:partition" + do { + // Have to get the full path to the controller - an alias may + // tell us next to nothing, like "hd:8" + alias = IORegistryEntry::dealiasPath( &path, gIODTPlane ); + + look = path + strlen( path); + c = ':'; + while( look != path) { + if( *(--look) == c) { + if( c == ':') { + partition = strtol( look + 1, 0, 0 ); + c = '@'; + } else if( c == '@') { + unit = strtol( look + 1, 0, 16 ); + c = '/'; + } else if( c == '/') { + c = 0; + break; + } + } + + if( alias && (look == path)) { + path = alias; + look = path + strlen( path); + alias = 0; + } + } + if( c || unit == -1 || partition == -1) + continue; + + maxLen -= strlen( "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" ); + maxLen -= ( alias ? strlen( alias ) : 0 ) + (look - path); + maxLen -= strlen( "/@hhhhhhhh:dddddddddd';}" ); + + if( maxLen > 0) { + sprintf( buf, "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" ); + comp = buf + strlen( buf ); + + if( alias) { + strcpy( comp, alias ); + comp += strlen( alias ); + } + + if ( (look - path)) { + strncpy( comp, path, look - path); + comp += look - path; + } + + sprintf( comp, "/@%lx:%ld';}", unit, partition ); + } else + continue; + + return( OSDynamicCast(OSDictionary, OSUnserialize( buf, 0 )) ); + + } while( false ); + + return( 0 ); +} + +OSDictionary * IOOFPathMatching( const char * path, char * buf, int maxLen ) +{ + /* need to look up path, get device type, + call matching help based on device type */ + + return( IODiskMatching( path, buf, maxLen )); + +} + +kern_return_t IOFindBSDRoot( char * rootName, + dev_t * root, u_int32_t * oflags ) +{ + mach_timespec_t t; + IOService * service; + IORegistryEntry * regEntry; + OSDictionary * matching = 0; + OSString * iostr; + OSNumber * off; + OSData * data = 0; + + UInt32 flags = 0; + int minor, major; + char * rdBootVar; + enum { kMaxPathBuf = 512, kMaxBootVar = 128 }; + char * str; + const char * look = 0; + int len; + bool forceNet = false; + + static int mountAttempts = 0; + + if( mountAttempts++) + IOSleep( 5 * 1000 ); + + str = (char *) IOMalloc( kMaxPathBuf + kMaxBootVar ); + if( !str) + return( kIOReturnNoMemory ); + rdBootVar = str + kMaxPathBuf; + + if (!PE_parse_boot_arg("rd", rdBootVar ) + && !PE_parse_boot_arg("rootdev", rdBootVar )) + rdBootVar[0] = 0; + + do { + if( (regEntry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ))) { + data = (OSData *) regEntry->getProperty( "rootpath" ); + regEntry->release(); + if( data) + continue; + } + if( (regEntry = IORegistryEntry::fromPath( "/options", gIODTPlane ))) { + data = (OSData *) regEntry->getProperty( "boot-file" ); + regEntry->release(); + if( data) + continue; + } + } while( false ); + + if( data) + look = (const char *) data->getBytesNoCopy(); + + if( rdBootVar[0] == '*') { + look = rdBootVar + 1; + forceNet = false; + } else { + if( (regEntry = IORegistryEntry::fromPath( "/", gIODTPlane ))) { + forceNet = (0 != regEntry->getProperty( "net-boot" )); + regEntry->release(); + } + } + + if( look) { + // from OpenFirmware path + IOLog("From path: \"%s\", ", look); + + if( forceNet || (0 == strncmp( look, "enet", strlen( "enet" ))) ) + matching = IONetworkMatching( look, str, kMaxPathBuf ); + else + matching = IODiskMatching( look, str, kMaxPathBuf ); + } + + if( (!matching) && rdBootVar[0] ) { + // by BSD name + look = rdBootVar; + if( look[0] == '*') + look++; + + if ( strncmp( look, "en", strlen( "en" )) == 0 ) + matching = IONetworkNamePrefixMatching( "en" ); + else + matching = IOBSDNameMatching( look ); + } + + if( !matching) { + OSString * astring; + // any UFS + matching = IOService::serviceMatching( "IOMedia" ); + astring = OSString::withCStringNoCopy("Apple_UFS"); + if ( astring ) { + matching->setObject(kIOMediaContentKey, astring); + astring->release(); + } + } + + if( true && matching) { + OSSerialize * s = OSSerialize::withCapacity( 5 ); + + if( matching->serialize( s )) { + IOLog( "Waiting on %s\n", s->text() ); + s->release(); + } + } + + IOService::waitForService(IOService::serviceMatching("IOMediaBSDClient")); + + do { + t.tv_sec = ROOTDEVICETIMEOUT; + t.tv_nsec = 0; + matching->retain(); + service = IOService::waitForService( matching, &t ); + if( (!service) || (mountAttempts == 10)) { + PE_display_icon( 0, "noroot"); + IOLog( "Still waiting for root device\n" ); + } + } while( !service); + matching->release(); + + major = 0; + minor = 0; + + // If the IOService we matched to is a subclass of IONetworkInterface, + // then make sure it has been registered with BSD and has a BSD name + // assigned. + + if ( service + && service->metaCast( "IONetworkInterface" ) + && !IORegisterNetworkInterface( (IONetworkInterface *) service ) ) + { + service = 0; + } + IORegisterPrimaryNetworkInterface(); + + if( service) { + + len = kMaxPathBuf; + service->getPath( str, &len, gIOServicePlane ); + IOLog( "Got boot device = %s\n", str ); + + iostr = (OSString *) service->getProperty( kIOBSDNameKey ); + if( iostr) + strcpy( rootName, iostr->getCStringNoCopy() ); + off = (OSNumber *) service->getProperty( kIOBSDMajorKey ); + if( off) + major = off->unsigned32BitValue(); + off = (OSNumber *) service->getProperty( kIOBSDMinorKey ); + if( off) + minor = off->unsigned32BitValue(); + + if( service->metaCast( "IONetworkInterface" )) + flags |= 1; + + } else { + + IOLog( "Wait for root failed\n" ); + strcpy( rootName, "en0"); + flags |= 1; + } + + IOLog( "BSD root: %s", rootName ); + if( major) + IOLog(", major %d, minor %d\n", major, minor ); + else + IOLog("\n"); + + *root = makedev( major, minor ); + *oflags = flags; + + IOFree( str, kMaxPathBuf + kMaxBootVar ); + + if( gIOKitDebug & (kIOLogDTree | kIOLogServiceTree | kIOLogMemory)) { + + IOSleep(10 * 1000); +// IOService::getPlatform()->waitQuiet(); + if( gIOKitDebug & kIOLogDTree) { + IOLog("\nDT plane:\n"); + IOPrintPlane( gIODTPlane ); + } + if( gIOKitDebug & kIOLogServiceTree) { + IOLog("\nService plane:\n"); + IOPrintPlane( gIOServicePlane ); + } + if( gIOKitDebug & kIOLogMemory) + IOPrintMemory(); + } + + return( kIOReturnSuccess ); +} + +} /* extern "C" */ diff --git a/iokit/bsddev/IOKitBSDInit.h b/iokit/bsddev/IOKitBSDInit.h new file mode 100644 index 000000000..98606f650 --- /dev/null +++ b/iokit/bsddev/IOKitBSDInit.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifdef __cplusplus +extern "C" { +#endif + +kern_return_t +IOKitBSDInit( void ); + +#ifdef __cplusplus +} +#endif + diff --git a/iokit/conf/MASTER b/iokit/conf/MASTER new file mode 100644 index 000000000..1e7c30b2e --- /dev/null +++ b/iokit/conf/MASTER @@ -0,0 +1,58 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +####################################################################### +# +# Master machine independent configuration file. +# +# Specific configuration files are created based on this and +# the machine specific master file using the doconf script. +# +# Any changes to the master configuration files will affect all +# other configuration files based upon it. +# +####################################################################### +# +# To build a configuration, execute "doconf ." +# Configurations are specified in the "Configurations:" section +# of the MASTER and MASTER.* files as follows: +# +# = [ ... ] +# +# Lines in the MASTER and MASTER.* files are selected based on +# the attribute selector list, found in a comment at the end of +# the line. This is a list of attributes separated by commas. +# The "!" operator selects the line if none of the attributes are +# specified. +# +# For example: +# +# selects a line if "foo" or "bar" are specified. +# selects a line if neither "foo" nor "bar" is +# specified. +# +# Lines with no attributes specified are selected for all +# configurations. +# +####################################################################### +# +# +# These are the default configurations that can be used by most sites. +# They are used internally by the Mach project. +# +# IOKIT = [iokitcpp debug] +# +####################################################################### +# +ident IOKIT + +options KERNOBJC # Objective-C implementation # +options IOKITCPP # C++ implementation # +options KDEBUG # kernel tracing # +#makeoptions LIBDRIVER = "libDriver_kern.o" # +#makeoptions LIBOBJC = "libkobjc.o" # + + diff --git a/iokit/conf/MASTER.i386 b/iokit/conf/MASTER.i386 new file mode 100644 index 000000000..7e7bc20b9 --- /dev/null +++ b/iokit/conf/MASTER.i386 @@ -0,0 +1,15 @@ +# +###################################################################### +# +# Standard NeXT Research Configurations: +# -------- ---- -------- --------------- +# +# RELEASE = [intel mach iokitcpp] +# PROFILE = [intel mach iokitcpp profile] +# DEBUG = [intel mach iokitcpp debug] +# +###################################################################### + +machine "i386" # +cpu "i386" # + diff --git a/iokit/conf/MASTER.ppc b/iokit/conf/MASTER.ppc new file mode 100644 index 000000000..001486d75 --- /dev/null +++ b/iokit/conf/MASTER.ppc @@ -0,0 +1,17 @@ +# +###################################################################### +# +# Standard Apple MacOS X Configurations: +# -------- ---- -------- --------------- +# +# RELEASE = [ppc mach iokitcpp] +# PROFILE = [ppc mach iokitcpp profile] +# DEBUG = [ppc mach iokitcpp debug] +# RELEASE_TRACE = [ RELEASE kdebug ] +# DEBUG_TRACE = [ DEBUG kdebug ] +# +###################################################################### + +machine "ppc" # +cpu "ppc" # + diff --git a/iokit/conf/Makefile b/iokit/conf/Makefile new file mode 100644 index 000000000..4b64836c7 --- /dev/null +++ b/iokit/conf/Makefile @@ -0,0 +1,63 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + tools + +COMP_SUBDIRS = + +INST_SUBDIRS = + +ifndef IOKIT_KERNEL_CONFIG +export IOKIT_KERNEL_CONFIG = $(KERNEL_CONFIG) +endif + +COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: + make build_setup + +$(COMPOBJROOT)/$(IOKIT_KERNEL_CONFIG)/Makefile : $(SOURCE)/MASTER \ + $(SOURCE)/MASTER.$(arch_config) \ + $(SOURCE)/Makefile.template \ + $(SOURCE)/Makefile.$(arch_config) \ + $(SOURCE)/files \ + $(SOURCE)/files.$(arch_config) \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf + @echo "Running doconf for $(IOKIT_KERNEL_CONFIG)"; + (doconf_target=$(addsuffix /conf, $(TARGET)); \ + echo $${doconf_target};\ + $(MKDIR) $${doconf_target}; \ + cd $${doconf_target}; \ + rm -f $(notdir $?); \ + cp $? $${doconf_target}; \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(arch_config) -d $(TARGET)/$(IOKIT_KERNEL_CONFIG) $(IOKIT_KERNEL_CONFIG); \ + ); + +.ORDER: $(COMPOBJROOT)/$(IOKIT_KERNEL_CONFIG)/Makefile + +do_setup_conf: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf \ + $(COMPOBJROOT)/$(IOKIT_KERNEL_CONFIG)/Makefile + +do_all: do_setup_conf + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(IOKIT_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + (cd $(COMPOBJROOT)/$(IOKIT_KERNEL_CONFIG); \ + next_source=$(subst conf/,,$(SOURCE)); \ + ${MAKE} MAKEFILES=$(TARGET)/$(IOKIT_KERNEL_CONFIG)/Makefile \ + SOURCE=$${next_source} \ + TARGET=$(TARGET) \ + build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(IOKIT_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_all: do_all + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/iokit/conf/Makefile.i386 b/iokit/conf/Makefile.i386 new file mode 100644 index 000000000..2f6232c14 --- /dev/null +++ b/iokit/conf/Makefile.i386 @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for i386 +###################################################################### + +###################################################################### +#END Machine dependent Makefile fragment for i386 +###################################################################### + diff --git a/iokit/conf/Makefile.ppc b/iokit/conf/Makefile.ppc new file mode 100644 index 000000000..7786ccbd6 --- /dev/null +++ b/iokit/conf/Makefile.ppc @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for ppc +###################################################################### + +###################################################################### +#END Machine dependent Makefile fragment for ppc +###################################################################### + diff --git a/iokit/conf/Makefile.template b/iokit/conf/Makefile.template new file mode 100644 index 000000000..1debf49ee --- /dev/null +++ b/iokit/conf/Makefile.template @@ -0,0 +1,112 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# Export IDENT for sub-makefiles +# +export IDENT + +# +# XXX: INCFLAGS +# +INCFLAGS_MAKEFILE= $(INCFLAGS_POSIX) -I$(SOURCE) -I$(SOURCE)include + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +# +# XXX: CFLAGS +# +CFLAGS+= -DKERNEL -DDRIVER_PRIVATE \ + -Wall -Wno-four-char-constants -fno-common \ + -DIOMATCHDEBUG=1 -DIOALLOCDEBUG=1 -DIOASSERT=0 \ +#-DIOKITDEBUG=-1 + +SFLAGS+= -DKERNEL + +# +# Directories for mig generated files +# +COMP_SUBDIRS = + +# +# Make sure we don't remove this by accident if interrupted at the wrong +# time. +# +.PRECIOUS: Makefile + +VERSION_FILES= \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.major \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.minor \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.variant + +COPYRIGHT_FILES = \ + $(SOURCE_DIR)/$(COMPONENT)/conf/copyright + +# +# Theses macros are filled in by the config program depending on the +# current configuration. The MACHDEP macro is replaced by the +# contents of the machine dependent makefile template and the others +# are replaced by the corresponding symbol definitions for the +# configuration. +# + +%OBJS + +%CFILES + +%MFILES + +%SFILES + +%BFILES + +%ORDERED +%MACHDEP + +# +# OBJSDEPS is the set of files (defined in the machine dependent +# template if necessary) which all objects depend on (such as an +# in-line assembler expansion filter) +# +${OBJS}: ${OBJSDEPS} + + +%LOAD + +LDOBJS = $(OBJS) + +$(COMPONENT).o: $(LDOBJS) + @echo "creating $(COMPONENT).o" + $(RM) $(RMFLAGS) vers.c + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} + ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c + @echo [ updating $(COMPONENT).o ${IOKIT_KERNEL_CONFIG} ] + $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT).o ${LDOBJS} vers.o + +do_depend: do_all + ${MD} -u Makedep -f -d `ls *.d` + + +do_all: $(COMPONENT).o + +do_build_all: do_depend + +%RULES + +-include Makedep + +include $(MakeInc_rule) + +include $(MakeInc_dir) + diff --git a/iokit/conf/copyright b/iokit/conf/copyright new file mode 100644 index 000000000..8930fb873 --- /dev/null +++ b/iokit/conf/copyright @@ -0,0 +1,6 @@ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ diff --git a/iokit/conf/files b/iokit/conf/files new file mode 100644 index 000000000..8d6ee281c --- /dev/null +++ b/iokit/conf/files @@ -0,0 +1,233 @@ +# options + +OPTIONS/iokitcpp optional iokitcpp +OPTIONS/kdebug optional kdebug + +# libIOKit + +iokit/Kernel/IOLib.c optional iokitcpp +iokit/Kernel/IOLocks.cpp optional iokitcpp +iokit/Kernel/IOConditionLock.cpp optional iokitcpp +iokit/Kernel/IOSyncer.cpp optional iokitcpp + +#iokit/Kernel/IORuntime.cpp optional iokitcpp +iokit/Kernel/IOStartIOKit.cpp optional iokitcpp + +iokit/Kernel/IODeviceTreeSupport.cpp optional iokitcpp + +iokit/Kernel/IORegistryEntry.cpp optional iokitcpp +iokit/Kernel/IOService.cpp optional iokitcpp +iokit/Kernel/IOServicePM.cpp optional iokitcpp +iokit/Kernel/IOPMchangeNoteList.cpp optional iokitcpp +iokit/Kernel/IOPMinformee.cpp optional iokitcpp +iokit/Kernel/IOPMinformeeList.cpp optional iokitcpp +iokit/Kernel/IOCatalogue.cpp optional iokitcpp +iokit/Kernel/IOPMPowerSource.cpp optional iokitcpp +iokit/Kernel/IOPMPowerSourceList.cpp optional iokitcpp + +iokit/Kernel/IOWorkLoop.cpp optional iokitcpp +iokit/Kernel/IOEventSource.cpp optional iokitcpp +iokit/Kernel/IOInterruptEventSource.cpp optional iokitcpp +iokit/Kernel/IOCommandGate.cpp optional iokitcpp +iokit/Kernel/IOCommand.cpp optional iokitcpp +iokit/Kernel/IOCommandPool.cpp optional iokitcpp +iokit/Kernel/IOCommandQueue.cpp optional iokitcpp +iokit/Kernel/IOFilterInterruptEventSource.cpp optional iokitcpp +iokit/Kernel/IOTimerEventSource.cpp optional iokitcpp + +iokit/Kernel/IODeviceMemory.cpp optional iokitcpp +iokit/Kernel/IOMemoryDescriptor.cpp optional iokitcpp +iokit/Kernel/IOMemoryCursor.cpp optional iokitcpp +iokit/Kernel/IOBufferMemoryDescriptor.cpp optional iokitcpp +iokit/Kernel/IOMultiMemoryDescriptor.cpp optional iokitcpp +iokit/Kernel/IORangeAllocator.cpp optional iokitcpp + +iokit/Kernel/IOPlatformExpert.cpp optional iokitcpp + +iokit/Kernel/IOCPU.cpp optional iokitcpp + +iokit/Kernel/IONVRAM.cpp optional iokitcpp + +iokit/Kernel/IOInterruptController.cpp optional iokitcpp + +iokit/Kernel/IOUserClient.cpp optional iokitcpp + +iokit/Kernel/IOKitDebug.cpp optional iokitcpp +iokit/Kernel/IODataQueue.cpp optional iokitcpp +# iokit/Tests/Tests.cpp optional iokitcpp +# iokit/Tests/TestDevice.cpp optional iokitcpp +# iokit/Tests/TestContainers.cpp optional iokitcpp +# iokit/Tests/TestCollections.cpp optional iokitcpp + +iokit/Kernel/IOStringFuncs.c standard + +# Property tables for kernel-linked objects + +iokit/KernelConfigTables.cpp optional iokitcpp + +# Networking + +iokit/Families/IONetworking/IOPacketQueue.cpp optional iokitcpp +iokit/Families/IONetworking/IOMbufMemoryCursor.cpp optional iokitcpp +iokit/Families/IONetworking/IOOutputQueue.cpp optional iokitcpp +iokit/Families/IONetworking/IOKernelDebugger.cpp optional iokitcpp +iokit/Families/IONetworking/IONetworkController.cpp optional iokitcpp +iokit/Families/IONetworking/IONetworkInterface.cpp optional iokitcpp +iokit/Families/IONetworking/IONetworkData.cpp optional iokitcpp +iokit/Families/IONetworking/IONetworkMedium.cpp optional iokitcpp +iokit/Families/IONetworking/IONetworkStack.cpp optional iokitcpp +iokit/Families/IONetworking/IONetworkUserClient.cpp optional iokitcpp +iokit/Families/IONetworking/IOEthernetController.cpp optional iokitcpp +iokit/Families/IONetworking/IOEthernetInterface.cpp optional iokitcpp + +# PCI support + +iokit/Families/IOPCIBus/IOPCIBridge.cpp optional iokitcpp +iokit/Families/IOPCIBus/IOPCIDevice.cpp optional iokitcpp + +# Event driver + +iokit/Families/IOHIDSystem/IOHIDevice.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDIO.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDSystem.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHIKeyboard.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHIKeyboardMapper.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHIPointing.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHITablet.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHITabletPointer.cpp optional iokitcpp§ +iokit/Families/IOHIDSystem/IOHIDUserClient.cpp optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCheckReport.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDCountDescriptorItems.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonCaps.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtons.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetButtonsOnPage.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCaps.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetCollectionNodes.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetData.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextButtonInfo.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetNextUsageValueInfo.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetReportLength.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValue.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetUsageValueArray.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDGetValueCaps.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDHasUsage.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDInitReport.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDIsButtonOrValue.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDMaxUsageListLength.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDNextItem.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDOpenCloseDescriptor.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDParseDescriptor.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPostProcessRIValue.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessCollection.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessGlobalItem.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessLocalItem.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessMainItem.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDProcessReportItem.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDPutData.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDScaleUsageValue.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetButtons.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetScaledUsageValue.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValue.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDSetUsageValueArray.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageAndPageFromIndex.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageInRange.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/HIDUsageListDifference.c optional iokitcpp +iokit/Families/IOHIDSystem/IOHIDDescriptorParser/PoolAlloc.c optional iokitcpp + +# Graphics support + +iokit/Families/IOGraphics/IOGraphicsDevice.cpp optional iokitcpp +iokit/Families/IOGraphics/IOFramebuffer.cpp optional iokitcpp +iokit/Families/IOGraphics/IOFramebufferUserClient.cpp optional iokitcpp +iokit/Families/IOGraphics/IODisplay.cpp optional iokitcpp +iokit/Families/IOGraphics/IODisplayWrangler.cpp optional iokitcpp +iokit/Families/IOGraphics/AppleDDCDisplay.cpp optional iokitcpp +iokit/Families/IOGraphics/IOBootFramebuffer.cpp optional iokitcpp +iokit/Families/IOGraphics/IOAccelerator.cpp optional iokitcpp + +# BSD shim files + +iokit/bsddev/IOBSDConsole.cpp optional iokitcpp +iokit/bsddev/IOKitBSDInit.cpp optional iokitcpp + +# SCSI support +iokit/Families/IOSCSIParallel/IOSCSIParallelController.cpp optional iokitcpp +iokit/Families/IOSCSIParallel/IOSCSIParallelDevice.cpp optional iokitcpp +iokit/Families/IOSCSIParallel/IOSCSIParallelCommand.cpp optional iokitcpp +iokit/Families/IOSCSIParallel/queueHelpers.cpp optional iokitcpp + +# ATA support +#iokit/Families/IOATAStandard/IOATAStandardController.cpp optional iokitcpp +#iokit/Families/IOATAStandard/IOATAStandardDevice.cpp optional iokitcpp +#iokit/Families/IOATAStandard/IOATAStandardCommand.cpp optional iokitcpp +#iokit/Families/IOATAStandard/IOATAStandardData.cpp optional iokitcpp +#iokit/Families/IOATAStandard/ATAQueueHelpers.cpp optional iokitcpp + +#iokit/Families/IOATAStandard/IOATAStandardDriver.cpp optional iokitcpp +#iokit/Families/IOATAStandard/IOATAStandardDriverPio.cpp optional iokitcpp +#iokit/Families/IOATAStandard/IOATAStandardDriverDma.cpp optional iokitcpp + +# Power Management +iokit/Drivers/platform/drvAppleRootDomain/RootDomainUserClient.cpp optional iokitcpp +iokit/Kernel/IOPowerConnection.cpp optional iokitcpp + + +# Storage Family +iokit/Families/IOStorage/IOApplePartitionScheme.cpp optional iokitcpp +iokit/Families/IOStorage/IOBlockStorageDriver.cpp optional iokitcpp +iokit/Families/IOStorage/IOFDiskPartitionScheme.cpp optional iokitcpp +iokit/Families/IOStorage/IOMedia.cpp optional iokitcpp +iokit/Families/IOStorage/IOMediaBSDClient.cpp optional iokitcpp +iokit/Families/IOStorage/IONeXTPartitionScheme.cpp optional iokitcpp +iokit/Families/IOStorage/IOPartitionScheme.cpp optional iokitcpp +iokit/Families/IOStorage/IOStorage.cpp optional iokitcpp + +# Storage Family: CDs +iokit/Families/IOCDStorage/IOCDAudioControl.cpp optional iokitcpp +iokit/Families/IOCDStorage/IOCDAudioControlUserClient.cpp optional iokitcpp +iokit/Families/IOCDStorage/IOCDBlockStorageDriver.cpp optional iokitcpp +iokit/Families/IOCDStorage/IOCDMedia.cpp optional iokitcpp +iokit/Families/IOCDStorage/IOCDPartitionScheme.cpp optional iokitcpp + +# Storage Family: DVDs +iokit/Families/IODVDStorage/IODVDBlockStorageDriver.cpp optional iokitcpp +iokit/Families/IODVDStorage/IODVDMedia.cpp optional iokitcpp + + +# Block Storage Family +iokit/Families/IOBlockStorage/IOBlockStorageDevice.cpp optional iokitcpp +iokit/Families/IOCDBlockStorage/IOCDBlockStorageDevice.cpp optional iokitcpp +iokit/Families/IODVDBlockStorage/IODVDBlockStorageDevice.cpp optional iokitcpp + +# Block Storage Family: SCSI +iokit/Families/IOSCSIHDDrive/IOBasicSCSI.cpp optional iokitcpp +iokit/Families/IOSCSIHDDrive/IOSCSIHDDrive.cpp optional iokitcpp +iokit/Families/IOSCSIHDDrive/IOSCSIHDDriveNub.cpp optional iokitcpp + +iokit/Families/IOSCSICDDrive/IOSCSICDDrive.cpp optional iokitcpp +iokit/Families/IOSCSICDDrive/IOSCSICDDriveNub.cpp optional iokitcpp + +iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDrive.cpp optional iokitcpp +iokit/Families/IOSCSIDVDDrive/IOSCSIDVDDriveNub.cpp optional iokitcpp + +# Block Storage Family: ATA +#iokit/Families/IOATAHDDrive/IOATAHDDrive.cpp optional iokitcpp +#iokit/Families/IOATAHDDrive/IOATAHDDriveNub.cpp optional iokitcpp +#iokit/Families/IOATAHDDrive/IOATAHDCommand.cpp optional iokitcpp +#iokit/Families/IOATAHDDrive/IOATAHDPower.cpp optional iokitcpp + +# Block Storage Family: ATAPI +#iokit/Families/IOATAPIHDDrive/IOATAPIHDDrive.cpp optional iokitcpp +#iokit/Families/IOATAPIHDDrive/IOATAPIHDDriveNub.cpp optional iokitcpp +#iokit/Families/IOATAPIHDDrive/IOATAPIHDCommand.cpp optional iokitcpp + +#iokit/Families/IOATAPICDDrive/IOATAPICDDrive.cpp optional iokitcpp +#iokit/Families/IOATAPICDDrive/IOATAPICDDriveNub.cpp optional iokitcpp +#iokit/Families/IOATAPICDDrive/IOATAPICDCommand.cpp optional iokitcpp + +#iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDrive.cpp optional iokitcpp +#iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDriveNub.cpp optional iokitcpp +#iokit/Families/IOATAPIDVDDrive/IOATAPIDVDCommand.cpp optional iokitcpp + +# System Management +iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp optional iokitcpp diff --git a/iokit/conf/files.i386 b/iokit/conf/files.i386 new file mode 100644 index 000000000..bd7ca34d4 --- /dev/null +++ b/iokit/conf/files.i386 @@ -0,0 +1,72 @@ + +# Intel platform support + +iokit/Drivers/platform/drvAppleI386Generic/AppleI386PlatformExpert.cpp optional iokitcpp +iokit/Drivers/platform/drvAppleI386Generic/AppleI386CPU.cpp optional iokitcpp + +# PCI support + +iokit/Families/IOPCIBus/IOPCIDeviceI386.cpp optional iokitcpp +iokit/Drivers/pci/drvApplePCI/AppleI386PCI.cpp optional iokitcpp + +# Shared lock + +iokit/Kernel/i386/IOSharedLock.s standard +iokit/Kernel/i386/IOAsmSupport.s standard + +# PS2 support + +iokit/Drivers/platform/drvApplePS2Controller/ApplePS2Controller.cpp optional iokitcpp +iokit/Drivers/platform/drvApplePS2Controller/ApplePS2KeyboardDevice.cpp optional iokitcpp +iokit/Drivers/platform/drvApplePS2Controller/ApplePS2MouseDevice.cpp optional iokitcpp +iokit/Drivers/hidsystem/drvApplePS2Keyboard/ApplePS2Keyboard.cpp optional iokitcpp +iokit/Drivers/hidsystem/drvApplePS2Mouse/ApplePS2Mouse.cpp optional iokitcpp + + +# Interrupt Controller +iokit/Drivers/platform/drvAppleIntelClassicPIC/PIC8259.cpp optional iokitcpp + +# Real Time Clock hack +iokit/Drivers/platform/drvAppleIntelClock/IntelClock.cpp optional iokitcpp + +# Power Domains +iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp optional iokitcpp + +# Intel EtherExpress Pro driver (i82557/i82558) +iokit/Drivers/network/drvIntel82557/i82557.cpp optional iokitcpp +iokit/Drivers/network/drvIntel82557/i82557Private.cpp optional iokitcpp +iokit/Drivers/network/drvIntel82557/i82557PHY.cpp optional iokitcpp +iokit/Drivers/network/drvIntel82557/i82557eeprom.cpp optional iokitcpp + +# ATA support +iokit/Families/IOATAStandard/IOATAStandardController.cpp optional iokitcpp +iokit/Families/IOATAStandard/IOATAStandardDevice.cpp optional iokitcpp +iokit/Families/IOATAStandard/IOATAStandardCommand.cpp optional iokitcpp +iokit/Families/IOATAStandard/IOATAStandardData.cpp optional iokitcpp +iokit/Families/IOATAStandard/ATAQueueHelpers.cpp optional iokitcpp + +iokit/Families/IOATAStandard/IOATAStandardDriver.cpp optional iokitcpp +iokit/Families/IOATAStandard/IOATAStandardDriverPio.cpp optional iokitcpp +iokit/Families/IOATAStandard/IOATAStandardDriverDma.cpp optional iokitcpp + +# Block Storage Family: ATA +iokit/Families/IOATAHDDrive/IOATAHDDrive.cpp optional iokitcpp +iokit/Families/IOATAHDDrive/IOATAHDDriveNub.cpp optional iokitcpp +iokit/Families/IOATAHDDrive/IOATAHDCommand.cpp optional iokitcpp +iokit/Families/IOATAHDDrive/IOATAHDPower.cpp optional iokitcpp + +# Block Storage Family: ATAPI +iokit/Families/IOATAPIHDDrive/IOATAPIHDDrive.cpp optional iokitcpp +iokit/Families/IOATAPIHDDrive/IOATAPIHDDriveNub.cpp optional iokitcpp +iokit/Families/IOATAPIHDDrive/IOATAPIHDCommand.cpp optional iokitcpp + +iokit/Families/IOATAPICDDrive/IOATAPICDDrive.cpp optional iokitcpp +iokit/Families/IOATAPICDDrive/IOATAPICDDriveNub.cpp optional iokitcpp +iokit/Families/IOATAPICDDrive/IOATAPICDCommand.cpp optional iokitcpp + +iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDrive.cpp optional iokitcpp +iokit/Families/IOATAPIDVDDrive/IOATAPIDVDDriveNub.cpp optional iokitcpp +iokit/Families/IOATAPIDVDDrive/IOATAPIDVDCommand.cpp optional iokitcpp + +# ATA (Intel PIIX IDE) controller driver +iokit/Drivers/ata/drvApplePIIXATA/AppleATAPIIX.cpp optional iokitcpp diff --git a/iokit/conf/files.ppc b/iokit/conf/files.ppc new file mode 100644 index 000000000..4a56a7717 --- /dev/null +++ b/iokit/conf/files.ppc @@ -0,0 +1,107 @@ + +iokit/Kernel/ppc/IOAsmSupport.s standard +iokit/Kernel/ppc/IODBDMA.cpp optional iokitcpp +iokit/Kernel/ppc/IOSharedLock.s standard + +iokit/Families/IOADBBus/IOADBBus.cpp optional iokitcpp +iokit/Families/IOADBBus/IOADBController.cpp optional iokitcpp +iokit/Families/IOADBBus/IOADBControllerUserClient.cpp optional iokitcpp +iokit/Families/IOPCIBus/IOPCIDevicePPC.cpp optional iokitcpp + +# ndrv support + +iokit/Families/IONDRVSupport/IOPEFLoader.c standard +iokit/Families/IONDRVSupport/IOPEFInternals.c standard +iokit/Families/IONDRVSupport/IONDRV.cpp optional iokitcpp +iokit/Families/IONDRVSupport/IONDRVLibraries.cpp optional iokitcpp +iokit/Families/IONDRVSupport/IONDRVFramebuffer.cpp optional iokitcpp + +iokit/Drivers/hidsystem/drvAppleADBDevices/IOADBDevice.cpp optional iokitcpp +iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBMouse.cpp optional iokitcpp +iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBKeyboard.cpp optional iokitcpp +iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBDisplay.cpp optional iokitcpp +iokit/Drivers/hidsystem/drvAppleADBDevices/AppleADBButtons.cpp optional iokitcpp + +iokit/Families/IONVRAM/IONVRAMController.cpp optional iokitcpp +iokit/Drivers/platform/drvAppleNVRAM/AppleNVRAM.cpp optional iokitcpp + +#iokit/Drivers/platform/drvApplePMU/IOPMUNVRAMController.cpp optional iokitcpp + +iokit/Drivers/platform/drvAppleCuda/AppleCuda.cpp optional iokitcpp +iokit/Drivers/platform/drvAppleCuda/IOCudaADBController.cpp optional iokitcpp + +#iokit/Drivers/platform/drvApplePMU/AppleViaInterface.cpp optional iokitcpp +#iokit/Drivers/platform/drvApplePMU/ApplePMU.cpp optional iokitcpp +#iokit/Drivers/platform/drvApplePMU/ApplePMUUserClient.cpp optional iokitcpp +#iokit/Drivers/platform/drvApplePMU/IOPMURTCController.cpp optional iokitcpp +#iokit/Drivers/platform/drvApplePMU/IOPMUPwrController.cpp optional iokitcpp +#iokit/Drivers/platform/drvApplePMU/IOPMUPowerSource.cpp optional iokitcpp + +iokit/Drivers/platform/drvApplePMU/IOPMUADBController.cpp optional iokitcpp + +iokit/Drivers/pci/drvApplePCI/AppleMacRiscPCI.cpp optional iokitcpp +iokit/Drivers/pci/drvApplePCI/AppleGracklePCI.cpp optional iokitcpp + + +# Apple Platform Expert +iokit/Drivers/platform/drvApplePlatformExpert/ApplePlatformExpert.cpp optional iokitcpp +iokit/Drivers/platform/drvApplePlatformExpert/AppleCPU.cpp optional iokitcpp + +# Power Domains +iokit/Drivers/platform/drvAppleRootDomain/RootDomain.cpp optional iokitcpp + +# Apple Mac-IO driver +iokit/Drivers/platform/drvAppleMacIO/AppleMacIO.cpp optional iokitcpp + +# Apple NMI driver +iokit/Drivers/platform/drvAppleNMI/AppleNMI.cpp optional iokitcpp + +# Platform Experts +iokit/Drivers/platform/drvApplePowerSurgePE/PowerSurge.cpp optional disabled-iokitcpp +iokit/Drivers/platform/drvApplePowerStarPE/PowerStar.cpp optional disabled-iokitcpp +iokit/Drivers/platform/drvAppleGossamerPE/Gossamer.cpp optional iokitcpp +iokit/Drivers/platform/drvAppleGossamerPE/GossamerCPU.cpp optional iokitcpp +iokit/Drivers/platform/drvApplePowerExpressPE/PowerExpress.cpp optional iokitcpp + + +# Mac-IO drivers +iokit/Drivers/platform/drvAppleGrandCentral/GrandCentral.cpp optional disabled-iokitcpp +iokit/Drivers/platform/drvAppleOHare/OHare.cpp optional iokitcpp + + + +# BMac driver + +iokit/Drivers/network/drvPPCBMac/BMacEnetHW.cpp optional iokitcpp +iokit/Drivers/network/drvPPCBMac/BMacEnetPrivate.cpp optional iokitcpp +iokit/Drivers/network/drvPPCBMac/BMacEnet.cpp optional iokitcpp +iokit/Drivers/network/drvPPCBMac/BMacEnetMII.cpp optional iokitcpp + + +# Mace driver +iokit/Drivers/network/drvMaceEnet/MaceEnetHW.cpp optional disabled-iokitcpp +iokit/Drivers/network/drvMaceEnet/MaceEnetPrivate.cpp optional disabled-iokitcpp +iokit/Drivers/network/drvMaceEnet/MaceEnet.cpp optional disabled-iokitcpp + +# Display drivers + +iokit/Families/IOGraphics/AppleG3SeriesDisplay.cpp optional iokitcpp + + +# Intel EtherExpress Pro driver (i82557/i82558) +iokit/Drivers/network/drvIntel82557/i82557.cpp optional iokitcpp +iokit/Drivers/network/drvIntel82557/i82557Private.cpp optional iokitcpp +iokit/Drivers/network/drvIntel82557/i82557PHY.cpp optional iokitcpp +iokit/Drivers/network/drvIntel82557/i82557eeprom.cpp optional iokitcpp + +# Symbios 8xx SCSI Driver +#iokit/Drivers/scsi/drvSymbios8xx/Sym8xxClient.cpp optional iokitcpp +#iokit/Drivers/scsi/drvSymbios8xx/Sym8xxExecute.cpp optional iokitcpp +#iokit/Drivers/scsi/drvSymbios8xx/Sym8xxInit.cpp optional iokitcpp +#iokit/Drivers/scsi/drvSymbios8xx/Sym8xxMisc.cpp optional iokitcpp + +# ATA driver +#iokit/Drivers/ata/drvAppleUltra66ATA/AppleUltra66ATA.cpp optional iokitcpp +#iokit/Drivers/ata/drvAppleUltra33ATA/AppleUltra33ATA.cpp optional iokitcpp + + diff --git a/iokit/conf/tools/Makefile b/iokit/conf/tools/Makefile new file mode 100644 index 000000000..9df86ce8c --- /dev/null +++ b/iokit/conf/tools/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + doconf \ + newvers + +COMP_SUBDIRS = \ + doconf \ + newvers + +INST_SUBDIRS = \ + + +setup_build_all: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/conf/tools/doconf/Makefile b/iokit/conf/tools/doconf/Makefile new file mode 100644 index 000000000..2bf0b7a10 --- /dev/null +++ b/iokit/conf/tools/doconf/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)doconf + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/conf/tools/doconf/doconf.csh b/iokit/conf/tools/doconf/doconf.csh new file mode 100755 index 000000000..43388c11c --- /dev/null +++ b/iokit/conf/tools/doconf/doconf.csh @@ -0,0 +1,313 @@ +#!/bin/csh -f +set path = ($path .) +###################################################################### +# HISTORY +# 1-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University +# Added "-verbose" switch, so this script produces no output +# in the normal case. +# +# 10-Oct-87 Mike Accetta (mja) at Carnegie-Mellon University +# Flushed cmu_*.h and spin_locks.h +# [ V5.1(XF18) ] +# +# 6-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# Use MASTER.local and MASTER..local for generation of +# configuration files in addition to MASTER and MASTER.. +# +# 25-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Removed use of obsolete wb_*.h files when building the feature +# list; modified to save the previous configuration file and +# display the differences between it and the new file. +# [ V5.1(F8) ] +# +# 25-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# If there is no /etc/machine just print out a message telling +# user to use the -cpu option. I thought this script was supposed +# to work even without a /etc/machine, but it doesn't... and this +# is the easiest way out. +# +# 13-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Added "romp_fpa.h" file to extra features for the RT. +# [ V5.1(F7) ] +# +# 11-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to maintain the appropriate configuration features file +# in the "machine" directory whenever the corresponding +# configuration is generated. This replaces the old mechanism of +# storing this directly in the file since it was +# machine dependent and also precluded building programs for more +# than one configuration from the same set of sources. +# [ V5.1(F6) ] +# +# 21-Feb-87 Mike Accetta (mja) at Carnegie-Mellon University +# Fixed to require wired-in cpu type names for only those +# machines where the kernel name differs from that provided by +# /etc/machine (i.e. IBMRT => ca and SUN => sun3); updated to +# permit configuration descriptions in both machine indepedent +# and dependent master configuration files so that attributes can +# be grouped accordingly. +# [ V5.1(F3) ] +# +# 17-Jan-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to work from any directory at the same level as +# "conf"; generate configuration from both MASTER and +# MASTER. files; added -cpu switch. +# [ V5.1(F1) ] +# +# 18-Aug-86 Mike Accetta (mja) at Carnegie-Mellon University +# Added -make switch and changed meaning of -config; upgraded to +# allow multiple attributes per configuration and to define +# configurations in terms of these attributes within MASTER. +# +# 14-Apr-83 Mike Accetta (mja) at Carnegie-Mellon University +# Added -config switch to only run /etc/config without +# "make depend" and "make". +# +###################################################################### + +set prog=$0 +set prog=$prog:t +set nonomatch +set OBJDIR=../BUILD +if ("`/usr/bin/uname`" == "Rhapsody" ) then +set CONFIG_DIR=/usr/local/bin +else +set CONFIG_DIR=/usr/bin +endif + +unset domake +unset doconfig +unset beverbose +unset MACHINE +unset profile + +while ($#argv >= 1) + if ("$argv[1]" =~ -*) then + switch ("$argv[1]") + case "-c": + case "-config": + set doconfig + breaksw + case "-m": + case "-make": + set domake + breaksw + case "-cpu": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set MACHINE="$argv[2]" + shift + breaksw + case "-d": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set OBJDIR="$argv[2]" + shift + breaksw + case "-verbose": + set beverbose + breaksw + case "-p": + case "-profile": + set profile + breaksw + default: + echo "${prog}: ${argv[1]}: unknown switch" + exit 1 + breaksw + endsw + shift + else + break + endif +end + +if ($#argv == 0) set argv=(GENERIC) + +if (! $?MACHINE) then + if (-d /NextApps) then + set MACHINE=`hostinfo | awk '/MC680x0/ { printf("m68k") } /MC880x0/ { printf("m88k") }'` + endif +endif + +if (! $?MACHINE) then + if (-f /etc/machine) then + set MACHINE="`/etc/machine`" + else + echo "${prog}: no /etc/machine, specify machine type with -cpu" + echo "${prog}: e.g. ${prog} -cpu VAX CONFIGURATION" + exit 1 + endif +endif + +set FEATURES_EXTRA= + +switch ("$MACHINE") + case IBMRT: + set cpu=ca + set ID=RT + set FEATURES_EXTRA="romp_dualcall.h romp_fpa.h" + breaksw + case SUN: + set cpu=sun3 + set ID=SUN3 + breaksw + default: + set cpu=`echo $MACHINE | tr A-Z a-z` + set ID=`echo $MACHINE | tr a-z A-Z` + breaksw +endsw +set FEATURES=../h/features.h +set FEATURES_H=(cs_*.h mach_*.h net_*.h\ + cputypes.h cpus.h vice.h\ + $FEATURES_EXTRA) +set MASTER_DIR=../conf +set MASTER = ${MASTER_DIR}/MASTER +set MASTER_CPU=${MASTER}.${cpu} + +set MASTER_LOCAL = ${MASTER}.local +set MASTER_CPU_LOCAL = ${MASTER_CPU}.local +if (! -f $MASTER_LOCAL) set MASTER_LOCAL = "" +if (! -f $MASTER_CPU_LOCAL) set MASTER_CPU_LOCAL = "" + +if (! -d $OBJDIR) then + echo "[ creating $OBJDIR ]" + mkdir -p $OBJDIR +endif + +foreach SYS ($argv) + set SYSID=${SYS}_${ID} + set SYSCONF=$OBJDIR/config.$SYSID + set BLDDIR=$OBJDIR + if ($?beverbose) then + echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" + endif + echo +$SYS \ + | \ + cat $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL - \ + $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL \ + | \ + sed -n \ + -e "/^+/{" \ + -e "s;[-+];#&;gp" \ + -e 't loop' \ + -e ': loop' \ + -e 'n' \ + -e '/^#/b loop' \ + -e '/^$/b loop' \ + -e 's;^\([^#]*\).*#[ ]*<\(.*\)>[ ]*$;\2#\1;' \ + -e 't not' \ + -e 's;\([^#]*\).*;#\1;' \ + -e 't not' \ + -e ': not' \ + -e 's;[ ]*$;;' \ + -e 's;^\!\(.*\);\1#\!;' \ + -e 'p' \ + -e 't loop' \ + -e 'b loop' \ + -e '}' \ + -e "/^[^#]/d" \ + -e 's; ; ;g' \ + -e "s;^# *\([^ ]*\)[ ]*=[ ]*\[\(.*\)\].*;\1#\2;p" \ + | \ + awk '-F#' '\ +part == 0 && $1 != "" {\ + m[$1]=m[$1] " " $2;\ + next;\ +}\ +part == 0 && $1 == "" {\ + for (i=NF;i>1;i--){\ + s=substr($i,2);\ + c[++na]=substr($i,1,1);\ + a[na]=s;\ + }\ + while (na > 0){\ + s=a[na];\ + d=c[na--];\ + if (m[s] == "") {\ + f[s]=d;\ + } else {\ + nx=split(m[s],x," ");\ + for (j=nx;j>0;j--) {\ + z=x[j];\ + a[++na]=z;\ + c[na]=d;\ + }\ + }\ + }\ + part=1;\ + next;\ +}\ +part != 0 {\ + if ($1 != "") {\ + n=split($1,x,",");\ + ok=0;\ + for (i=1;i<=n;i++) {\ + if (f[x[i]] == "+") {\ + ok=1;\ + }\ + }\ + if (NF > 2 && ok == 0 || NF <= 2 && ok != 0) {\ + print $2; \ + }\ + } else { \ + print $2; \ + }\ +}\ +' >$SYSCONF.new + if (-z $SYSCONF.new) then + echo "${prog}: ${$SYSID}: no such configuration in $MASTER_DIR/MASTER{,.$cpu}" + rm -f $SYSCONF.new + endif + if (! -d $BLDDIR) then + echo "[ creating $BLDDIR ]" + mkdir -p $BLDDIR + endif +# +# These paths are used by config. +# +# "builddir" is the name of the directory where kernel binaries +# are put. It is a single path element, never absolute, and is +# always relative to "objectdir". "builddir" is used by config +# solely to determine where to put files created by "config" (e.g. +# the created Makefile and *.h's.) +# +# "objectdir" is the name of the directory which will hold "builddir". +# It is a path; if relative, it is relative to the current directory +# where config is run. It's sole use is to be prepended to "builddir" +# to indicate where config-created files are to be placed (see above). +# +# "sourcedir" is the location of the sources used to build the kernel. +# It is a path; if relative, it is relative to the directory specified +# by the concatenation of "objectdir" and "builddir" (i.e. where the +# kernel binaries are put). +# + echo 'builddir "."' >> $SYSCONF.new + set OBJRELDIR=`relpath $OBJROOT $OBJDIR` + echo 'objectdir "'$OBJROOT'/'$OBJRELDIR'"' >> $SYSCONF.new + set SRCDIR=`dirname $SOURCE` + echo 'sourcedir "'$SRCROOT'"' >> $SYSCONF.new + if (-f $SYSCONF) then + diff $SYSCONF $SYSCONF.new + rm -f $SYSCONF.old + mv $SYSCONF $SYSCONF.old + endif + rm -f $SYSCONF + mv $SYSCONF.new $SYSCONF + if ($?doconfig) then + echo "[ configuring $SYSID ]" + if ($?profile) then + $CONFIG_DIR/config -c $MASTER_DIR -p $SYSCONF + else + $CONFIG_DIR/config -c $MASTER_DIR $SYSCONF + endif + endif + if ($?domake) then + echo "[ making $SYSID ]" + (cd $BLDDIR; make) + endif +end diff --git a/iokit/conf/tools/newvers/Makefile b/iokit/conf/tools/newvers/Makefile new file mode 100644 index 000000000..73603c753 --- /dev/null +++ b/iokit/conf/tools/newvers/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)newvers + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/conf/tools/newvers/newvers.csh b/iokit/conf/tools/newvers/newvers.csh new file mode 100644 index 000000000..381446bb2 --- /dev/null +++ b/iokit/conf/tools/newvers/newvers.csh @@ -0,0 +1,34 @@ +#!/bin/sh - +# +# Mach Operating System +# Copyright (c) 1990 Carnegie-Mellon University +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# newvers.sh copyright major minor variant +# + +major="$1"; minor="$2"; variant="$3" +v="${major}.${minor}" d=`pwd` h="rcbuilder" t=`date` w=`whoami` +if [ -z "$d" -o -z "$h" -o -z "$t" ]; then + exit 1 +fi +CONFIG=`expr "$d" : '.*/\([^/]*\)$'` +d=`expr "$d" : '.*/\([^/]*/[^/]*/[^/]*\)$'` +( + /bin/echo "int ${COMPONENT}_version_major = ${major};" ; + /bin/echo "int ${COMPONENT}_version_minor = ${minor};" ; + /bin/echo "char ${COMPONENT}_version_variant[] = \"${variant}\";" ; + /bin/echo "char ${COMPONENT}_version[] = \"IOKit Component Version ${v}:\\n${t}; $w($h):$d\\n\";" ; + /bin/echo "char ${COMPONENT}_osrelease[] = \"${major}.${minor}\";" ; + /bin/echo "char ${COMPONENT}_ostype[] = \"IOKit\";" ; + /bin/echo "char ${COMPONENT}_builder[] = \"$w\";" ; +) > vers.c +if [ -s vers.suffix -o ! -f vers.suffix ]; then + rm -f vers.suffix + echo ".${variant}.${CONFIG}" > vers.suffix +fi +exit 0 diff --git a/iokit/conf/version.major b/iokit/conf/version.major new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/iokit/conf/version.major @@ -0,0 +1 @@ +1 diff --git a/iokit/conf/version.minor b/iokit/conf/version.minor new file mode 100644 index 000000000..573541ac9 --- /dev/null +++ b/iokit/conf/version.minor @@ -0,0 +1 @@ +0 diff --git a/iokit/conf/version.variant b/iokit/conf/version.variant new file mode 100644 index 000000000..e69de29bb diff --git a/iokit/include/DeviceTree.h b/iokit/include/DeviceTree.h new file mode 100644 index 000000000..ef2a616b8 --- /dev/null +++ b/iokit/include/DeviceTree.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __DEVICE_TREE__ +#define __DEVICE_TREE__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +------------------------------------------------------------------------------------ + Foundation Types +------------------------------------------------------------------------------------ +*/ +enum { + kDTPathNameSeparator = '/' /* 0x2F */ +}; + + +/* Property Name Definitions (Property Names are C-Strings)*/ +enum { + kDTMaxPropertyNameLength = 31 /* Max length of Property Name (terminator not included) */ +}; + +typedef char DTPropertyNameBuf[32]; + + +/* Entry Name Definitions (Entry Names are C-Strings)*/ +enum { + kDTMaxEntryNameLength = 31 /* Max length of a C-String Entry Name (terminator not included) */ +}; + +/* length of DTEntryNameBuf = kDTMaxEntryNameLength +1*/ +typedef char DTEntryNameBuf[32]; + + +/* Entry*/ +typedef struct OpaqueDTEntry* DTEntry; + +/* Entry Iterator*/ +typedef struct OpaqueDTEntryIterator* DTEntryIterator; + +/* Property Iterator*/ +typedef struct OpaqueDTPropertyIterator* DTPropertyIterator; + + +/* status values*/ +enum { + kError = -1, + kIterationDone = 0, + kSuccess = 1 +}; + +/* +------------------------------------------------------------------------------------ + Device Tree Calls +------------------------------------------------------------------------------------ +*/ + +/* +------------------------------------------------------------------------------------ + Entry Handling +------------------------------------------------------------------------------------ +*/ +/* Compare two Entry's for equality. */ +extern int DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2); + +/* +------------------------------------------------------------------------------------ + LookUp Entry by Name +------------------------------------------------------------------------------------ +*/ +/* + Lookup Entry + Locates an entry given a specified subroot (searchPoint) and path name. If the + searchPoint pointer is NULL, the path name is assumed to be an absolute path + name rooted to the root of the device tree. +*/ +extern int DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry); + +/* +------------------------------------------------------------------------------------ + Entry Iteration +------------------------------------------------------------------------------------ +*/ +/* + An Entry Iterator maintains three variables that are of interest to clients. + First is an "OutermostScope" which defines the outer boundry of the iteration. + This is defined by the starting entry and includes that entry plus all of it's + embedded entries. Second is a "currentScope" which is the entry the iterator is + currently in. And third is a "currentPosition" which is the last entry returned + during an iteration. + + Create Entry Iterator + Create the iterator structure. The outermostScope and currentScope of the iterator + are set to "startEntry". If "startEntry" = NULL, the outermostScope and + currentScope are set to the root entry. The currentPosition for the iterator is + set to "nil". +*/ +extern int DTCreateEntryIterator(const DTEntry startEntry, DTEntryIterator *iterator); + +/* Dispose Entry Iterator*/ +extern int DTDisposeEntryIterator(DTEntryIterator iterator); + +/* + Enter Child Entry + Move an Entry Iterator into the scope of a specified child entry. The + currentScope of the iterator is set to the entry specified in "childEntry". If + "childEntry" is nil, the currentScope is set to the entry specified by the + currentPosition of the iterator. +*/ +extern int DTEnterEntry(DTEntryIterator iterator, DTEntry childEntry); + +/* + Exit to Parent Entry + Move an Entry Iterator out of the current entry back into the scope of it's parent + entry. The currentPosition of the iterator is reset to the current entry (the + previous currentScope), so the next iteration call will continue where it left off. + This position is returned in parameter "currentPosition". +*/ +extern int DTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition); + +/* + Iterate Entries + Iterate and return entries contained within the entry defined by the current + scope of the iterator. Entries are returned one at a time. When + int == kIterationDone, all entries have been exhausted, and the + value of nextEntry will be Nil. +*/ +extern int DTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry); + +/* + Restart Entry Iteration + Restart an iteration within the current scope. The iterator is reset such that + iteration of the contents of the currentScope entry can be restarted. The + outermostScope and currentScope of the iterator are unchanged. The currentPosition + for the iterator is set to "nil". +*/ +extern int DTRestartEntryIteration(DTEntryIterator iterator); + +/* +------------------------------------------------------------------------------------ + Get Property Values +------------------------------------------------------------------------------------ +*/ +/* + Get the value of the specified property for the specified entry. + + Get Property +*/ +extern int DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValue, int *propertySize); + +/* +------------------------------------------------------------------------------------ + Iterating Properties +------------------------------------------------------------------------------------ +*/ +/* + Create Property Iterator + Create the property iterator structure. The target entry is defined by entry. +*/ +extern int DTCreatePropertyIterator(const DTEntry entry, DTPropertyIterator *iterator); + +/* Dispose Property Iterator*/ +extern int DTDisposePropertyIterator(DTPropertyIterator iterator); + +/* + Iterate Properites + Iterate and return properties for given entry. + When int == kIterationDone, all properties have been exhausted. +*/ +extern int DTIterateProperties(DTPropertyIterator iterator, char **foundProperty); + +/* + Restart Property Iteration + Used to re-iterate over a list of properties. The Property Iterator is reset to + the beginning of the list of properties for an entry. +*/ +extern int DTRestartPropertyIteration(DTPropertyIterator iterator); + +#ifdef __cplusplus +} +#endif + +#endif /* __DEVICE_TREE__ */ + diff --git a/iokit/include/Makefile b/iokit/include/Makefile new file mode 100644 index 000000000..a813d3ea6 --- /dev/null +++ b/iokit/include/Makefile @@ -0,0 +1,30 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = bsddev drivers +INSTINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS} +INSTINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS} + + +EXPINC_SUBDIRS = +EXPINC_SUBDIRS_PPC = ${EXPINC_SUBDIRS} +EXPINC_SUBDIRS_I386 = ${EXPINC_SUBDIRS} + + +SETUP_SUBDIRS = + +COMP_SUBDIRS = + +INST_SUBDIRS = + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/include/architecture/i386/kernBootStruct.h b/iokit/include/architecture/i386/kernBootStruct.h new file mode 100644 index 000000000..787b62ce2 --- /dev/null +++ b/iokit/include/architecture/i386/kernBootStruct.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include diff --git a/iokit/include/architecture/i386/pio.h b/iokit/include/architecture/i386/pio.h new file mode 100644 index 000000000..408aac2bc --- /dev/null +++ b/iokit/include/architecture/i386/pio.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.2.1 1998/10/13 00:40:44 ehewitt + * Added support for Intel. + * + * Revision 1.1.1.1 1998/09/22 21:05:37 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:38 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.2 1996/07/31 09:46:36 paire + * Merged with nmk20b7_shared (1.1.11.2 -> 1.1.11.1) + * [96/06/10 paire] + * + * Revision 1.1.11.2 1996/06/13 12:38:25 bernadat + * Do not use inline macros when MACH_ASSERT is configured. + * [96/05/24 bernadat] + * + * Revision 1.1.11.1 1996/05/14 13:50:23 paire + * Added new linl and loutl __inline__. + * Added conditional compilation for [l]{in|oub}[bwl]() __inline__. + * [95/11/24 paire] + * + * Revision 1.1.8.1 1994/09/23 02:00:28 ezf + * change marker to not FREE + * [1994/09/22 21:25:52 ezf] + * + * Revision 1.1.4.5 1993/08/09 19:40:41 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:45:57 dswartz] + * + * Revision 1.1.4.4 1993/06/11 15:17:37 jeffc + * CR9176 - ANSI C violations: inb/outb macros must be changed from + * ({ ... }) to inline functions, with proper type definitions. Callers + * must pass proper types to these functions: 386 I/O port addresses + * are unsigned shorts (not pointers). + * [1993/06/10 14:26:10 jeffc] + * + * Revision 1.1.4.3 1993/06/07 22:09:28 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:00:26 jeffc] + * + * Revision 1.1.4.2 1993/06/04 15:28:45 jeffc + * CR9176 - ANSI problems - + * Added casts to get macros to take caddr_t as an I/O space address. + * [1993/06/04 13:45:55 jeffc] + * + * Revision 1.1 1992/09/30 02:25:51 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/05/14 16:14:20 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:13:56 mrt + * Changed to new Mach copyright + * [91/02/01 17:37:08 mrt] + * + * Revision 2.3 90/12/20 16:36:37 jeffreyh + * changes for __STDC__ + * [90/12/07 jeffreyh] + * + * Revision 2.2 90/11/26 14:48:41 rvb + * Pulled from 2.5 + * [90/11/22 10:09:38 rvb] + * + * [90/08/14 mg32] + * + * Now we know how types are factor in. + * Cleaned up a bunch: eliminated ({ for output and flushed unused + * output variables. + * [90/08/14 rvb] + * + * This is how its done in gcc: + * Created. + * [90/03/26 rvb] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +#ifndef I386_PIO_H +#define I386_PIO_H +//#include +//#include +#define MACH_ASSERT 0 + +typedef unsigned short i386_ioport_t; + +/* read a longword */ +extern unsigned long inl( + i386_ioport_t port); +/* read a shortword */ +extern unsigned short inw( + i386_ioport_t port); +/* read a byte */ +extern unsigned char inb( + i386_ioport_t port); +/* write a longword */ +extern void outl( + i386_ioport_t port, + unsigned long datum); +/* write a word */ +extern void outw( + i386_ioport_t port, + unsigned short datum); +/* write a longword */ +extern void outb( + i386_ioport_t port, + unsigned char datum); + +/* input an array of longwords */ +extern void linl( + i386_ioport_t port, + int * data, + int count); +/* output an array of longwords */ +extern void loutl( + i386_ioport_t port, + int * data, + int count); + +/* input an array of words */ +extern void linw( + i386_ioport_t port, + int * data, + int count); +/* output an array of words */ +extern void loutw( + i386_ioport_t port, + int * data, + int count); + +/* input an array of bytes */ +extern void linb( + i386_ioport_t port, + char * data, + int count); +/* output an array of bytes */ +extern void loutb( + i386_ioport_t port, + char * data, + int count); + +#if defined(__GNUC__) && (!MACH_ASSERT) +extern __inline__ unsigned long inl( + i386_ioport_t port) +{ + unsigned long datum; + __asm__ volatile("inl %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ unsigned short inw( + i386_ioport_t port) +{ + unsigned short datum; + __asm__ volatile(".byte 0x66; inl %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ unsigned char inb( + i386_ioport_t port) +{ + unsigned char datum; + __asm__ volatile("inb %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ void outl( + i386_ioport_t port, + unsigned long datum) +{ + __asm__ volatile("outl %0, %1" : : "a" (datum), "d" (port)); +} + +extern __inline__ void outw( + i386_ioport_t port, + unsigned short datum) +{ + __asm__ volatile(".byte 0x66; outl %0, %1" : : "a" (datum), "d" (port)); +} + +extern __inline__ void outb( + i386_ioport_t port, + unsigned char datum) +{ + __asm__ volatile("outb %0, %1" : : "a" (datum), "d" (port)); +} +#endif /* defined(__GNUC__) && (!MACH_ASSERT) */ +#endif /* I386_PIO_H */ diff --git a/iokit/include/assert.h b/iokit/include/assert.h new file mode 100644 index 000000000..9ee0085ff --- /dev/null +++ b/iokit/include/assert.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#warning include is going away use IOKit/assert.h instead + +#include diff --git a/iokit/include/bsddev/EventShmemLock.h b/iokit/include/bsddev/EventShmemLock.h new file mode 100644 index 000000000..6e7eebdef --- /dev/null +++ b/iokit/include/bsddev/EventShmemLock.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include diff --git a/iokit/include/bsddev/Makefile b/iokit/include/bsddev/Makefile new file mode 100644 index 000000000..3fc8d90bd --- /dev/null +++ b/iokit/include/bsddev/Makefile @@ -0,0 +1,41 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = machine + +INSTINC_SUBDIRS_PPC = ppc + +INSTINC_SUBDIRS_I386 = i386 + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + EventShmemLock.h \ + ev_types.h \ + event.h \ + evio.h \ + ev_keymap.h \ + evsio.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = dev + +EXPORT_MI_LIST = \ + +EXPORT_MI_DIR = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/include/bsddev/ev_keymap.h b/iokit/include/bsddev/ev_keymap.h new file mode 100644 index 000000000..f2e2b401b --- /dev/null +++ b/iokit/include/bsddev/ev_keymap.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/ev_types.h b/iokit/include/bsddev/ev_types.h new file mode 100644 index 000000000..fa204d704 --- /dev/null +++ b/iokit/include/bsddev/ev_types.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/event.h b/iokit/include/bsddev/event.h new file mode 100644 index 000000000..d1a7f61fb --- /dev/null +++ b/iokit/include/bsddev/event.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/evio.h b/iokit/include/bsddev/evio.h new file mode 100644 index 000000000..5db70f692 --- /dev/null +++ b/iokit/include/bsddev/evio.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/evsio.h b/iokit/include/bsddev/evsio.h new file mode 100644 index 000000000..ebdd5d8cf --- /dev/null +++ b/iokit/include/bsddev/evsio.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +/* + * Identify this driver as one that uses the new driverkit and messaging API + */ +#ifndef _NeXT_MACH_EVENT_DRIVER_ +#define _NeXT_MACH_EVENT_DRIVER_ (1) +#endif /* _NeXT_MACH_EVENT_DRIVER_ */ diff --git a/iokit/include/bsddev/i386/EventShmemLock.h b/iokit/include/bsddev/i386/EventShmemLock.h new file mode 100644 index 000000000..6d6fa7c01 --- /dev/null +++ b/iokit/include/bsddev/i386/EventShmemLock.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/i386/Makefile b/iokit/include/bsddev/i386/Makefile new file mode 100644 index 000000000..fd80bdd0b --- /dev/null +++ b/iokit/include/bsddev/i386/Makefile @@ -0,0 +1,39 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + evsio.h \ + evio.h \ + event.h \ + EventShmemLock.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = dev/i386 + +EXPORT_MD_LIST = \ + +EXPORT_MD_DIR = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/include/bsddev/i386/event.h b/iokit/include/bsddev/i386/event.h new file mode 100644 index 000000000..f40fa9d5f --- /dev/null +++ b/iokit/include/bsddev/i386/event.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/i386/evio.h b/iokit/include/bsddev/i386/evio.h new file mode 100644 index 000000000..71c52f803 --- /dev/null +++ b/iokit/include/bsddev/i386/evio.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/i386/evsio.h b/iokit/include/bsddev/i386/evsio.h new file mode 100644 index 000000000..fffabee74 --- /dev/null +++ b/iokit/include/bsddev/i386/evsio.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/machine/EventShmemLock.h b/iokit/include/bsddev/machine/EventShmemLock.h new file mode 100644 index 000000000..3d14ddf25 --- /dev/null +++ b/iokit/include/bsddev/machine/EventShmemLock.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_DEV_MACHINE_EVENTSHMEMLOCK_H_ +#define _BSD_DEV_MACHINE_EVENTSHMEMLOCK_H_ + + +#if defined (__ppc__) +#include "bsd/dev/ppc/EventShmemLock.h" +#elif defined (__i386__) +#include "bsd/dev/i386/EventShmemLock.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_DEV_MACHINE_EVENTSHMEMLOCK_H_ */ diff --git a/iokit/include/bsddev/machine/Makefile b/iokit/include/bsddev/machine/Makefile new file mode 100644 index 000000000..110ceda46 --- /dev/null +++ b/iokit/include/bsddev/machine/Makefile @@ -0,0 +1,39 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + evsio.h \ + evio.h \ + event.h \ + EventShmemLock.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = dev/machine + +EXPORT_MI_LIST = \ + +EXPORT_MI_DIR = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/include/bsddev/machine/event.h b/iokit/include/bsddev/machine/event.h new file mode 100644 index 000000000..622867427 --- /dev/null +++ b/iokit/include/bsddev/machine/event.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_DEV_MACHINE_EVENT_H_ +#define _BSD_DEV_MACHINE_EVENT_H_ + + +#if defined (__ppc__) +#include "bsd/dev/ppc/event.h" +#elif defined (__i386__) +#include "bsd/dev/i386/event.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_DEV_MACHINE_EVENT_H_ */ diff --git a/iokit/include/bsddev/machine/evio.h b/iokit/include/bsddev/machine/evio.h new file mode 100644 index 000000000..447de6893 --- /dev/null +++ b/iokit/include/bsddev/machine/evio.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_DEV_MACHINE_EVIO_H_ +#define _BSD_DEV_MACHINE_EVIO_H_ + + +#if defined (__ppc__) +#include "bsd/dev/ppc/evio.h" +#elif defined (__i386__) +#include "bsd/dev/i386/evio.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_DEV_MACHINE_EVIO_H_ */ diff --git a/iokit/include/bsddev/machine/evsio.h b/iokit/include/bsddev/machine/evsio.h new file mode 100644 index 000000000..2dfcc948c --- /dev/null +++ b/iokit/include/bsddev/machine/evsio.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _BSD_DEV_MACHINE_EVSIO_H_ +#define _BSD_DEV_MACHINE_EVSIO_H_ + + +#if defined (__ppc__) +#include "bsd/dev/ppc/evsio.h" +#elif defined (__i386__) +#include "bsd/dev/i386/evsio.h" +#else +#error architecture not supported +#endif + + +#endif /* _BSD_DEV_MACHINE_EVSIO_H_ */ diff --git a/iokit/include/bsddev/ppc/EventShmemLock.h b/iokit/include/bsddev/ppc/EventShmemLock.h new file mode 100644 index 000000000..6d6fa7c01 --- /dev/null +++ b/iokit/include/bsddev/ppc/EventShmemLock.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/ppc/Makefile b/iokit/include/bsddev/ppc/Makefile new file mode 100644 index 000000000..b952d4510 --- /dev/null +++ b/iokit/include/bsddev/ppc/Makefile @@ -0,0 +1,39 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + +INSTINC_SUBDIRS_PPC = \ + +INSTINC_SUBDIRS_I386 = \ + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + evsio.h \ + evio.h \ + event.h \ + EventShmemLock.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = dev/ppc + +EXPORT_MD_LIST = \ + +EXPORT_MD_DIR = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/include/bsddev/ppc/event.h b/iokit/include/bsddev/ppc/event.h new file mode 100644 index 000000000..f40fa9d5f --- /dev/null +++ b/iokit/include/bsddev/ppc/event.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/ppc/evio.h b/iokit/include/bsddev/ppc/evio.h new file mode 100644 index 000000000..71c52f803 --- /dev/null +++ b/iokit/include/bsddev/ppc/evio.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/bsddev/ppc/evsio.h b/iokit/include/bsddev/ppc/evsio.h new file mode 100644 index 000000000..fffabee74 --- /dev/null +++ b/iokit/include/bsddev/ppc/evsio.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + diff --git a/iokit/include/drivers/Makefile b/iokit/include/drivers/Makefile new file mode 100644 index 000000000..4b77117cc --- /dev/null +++ b/iokit/include/drivers/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + event_status_driver.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = drivers + +EXPORT_MI_LIST = \ + +EXPORT_MI_DIR = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/iokit/include/drivers/event_status_driver.h b/iokit/include/drivers/event_status_driver.h new file mode 100644 index 000000000..1a939be02 --- /dev/null +++ b/iokit/include/drivers/event_status_driver.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/****************************************************************************** + event_status_driver.h + API for the events status driver. + This file contains public API. + mpaque 11Oct91 + + Copyright 1991 NeXT Computer, Inc. + + Modified: + +******************************************************************************/ + +#ifndef _DRIVERS_EVENT_STATUS_DRIVER_ +#define _DRIVERS_EVENT_STATUS_DRIVER_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + + +/* + * Event System Handle: + * + * Information used by the system between calls to NXOpenEventSystem and + * NXCloseEventSystem. The application should not + * access any of the elements of this structure. + */ +typedef mach_port_t NXEventHandle; + +/* Open and Close */ +NXEventHandle NXOpenEventStatus(void); +void NXCloseEventStatus(NXEventHandle handle); + +/* Status */ +extern NXEventSystemInfoType NXEventSystemInfo(NXEventHandle handle, + char *flavor, + int *evs_info, + unsigned int *evs_info_cnt); +/* Keyboard */ +extern void NXSetKeyRepeatInterval(NXEventHandle handle, double seconds); +extern double NXKeyRepeatInterval(NXEventHandle handle); +extern void NXSetKeyRepeatThreshold(NXEventHandle handle, double threshold); +extern double NXKeyRepeatThreshold(NXEventHandle handle); +extern NXKeyMapping *NXSetKeyMapping(NXEventHandle h, NXKeyMapping *keymap); +extern int NXKeyMappingLength(NXEventHandle handle); +extern NXKeyMapping *NXGetKeyMapping(NXEventHandle h, NXKeyMapping *keymap); +extern void NXResetKeyboard(NXEventHandle handle); + +/* Mouse */ +extern void NXSetClickTime(NXEventHandle handle, double seconds); +extern double NXClickTime(NXEventHandle handle); +extern void NXSetClickSpace(NXEventHandle handle, _NXSize_ *area); +extern void NXGetClickSpace(NXEventHandle handle, _NXSize_ *area); +extern void NXSetMouseScaling(NXEventHandle handle, NXMouseScaling *scaling); +extern void NXGetMouseScaling(NXEventHandle handle, NXMouseScaling *scaling); +#ifdef _undef +extern void NXEnableMouseButton(NXEventHandle handle, NXMouseButton button); +extern NXMouseButton NXMouseButtonEnabled(NXEventHandle handle); +#endif +extern void NXResetMouse(NXEventHandle handle); + +/* Screen Brightness and Auto-dimming */ + +extern void NXSetAutoDimThreshold(NXEventHandle handle, double seconds); +extern double NXAutoDimThreshold(NXEventHandle handle); +extern double NXAutoDimTime(NXEventHandle handle); +extern double NXIdleTime(NXEventHandle handle); +extern void NXSetAutoDimState(NXEventHandle handle, boolean_t dimmed); +extern boolean_t NXAutoDimState(NXEventHandle handle); +extern void NXSetAutoDimBrightness(NXEventHandle handle, double level); +extern double NXAutoDimBrightness(NXEventHandle handle); +extern void NXSetScreenBrightness(NXEventHandle handle, double level); +extern double NXScreenBrightness(NXEventHandle handle); + +/* Speaker Volume */ +#ifdef _undef +extern void NXSetCurrentVolume(NXEventHandle handle, double volume); +extern double NXCurrentVolume(NXEventHandle handle); +#endif + +/* Wait Cursor */ +#ifdef _undef +extern void NXSetWaitCursorThreshold(NXEventHandle handle, double seconds); +extern double NXWaitCursorThreshold(NXEventHandle handle); +extern void NXSetWaitCursorSustain(NXEventHandle handle, double seconds); +extern double NXWaitCursorSustain(NXEventHandle handle); +extern void NXSetWaitCursorFrameInterval(NXEventHandle handle, double seconds); +extern double NXWaitCursorFrameInterval(NXEventHandle handle); +#endif + +/* + * Generic calls. Argument values are device and architecture dependent. + * This API is provided for the convenience of special device users. Code + * which is intended to be portable across multiple platforms and architectures + * should not use the following functions. + */ +#ifdef _undef +extern int NXEvSetParameterInt(NXEventHandle handle, + char *parameterName, + unsigned int *parameterArray, + unsigned int count); + +extern int NXEvSetParameterChar(NXEventHandle handle, + char *parameterName, + unsigned char *parameterArray, + unsigned int count); + +extern int NXEvGetParameterInt(NXEventHandle handle, + char *parameterName, + unsigned int maxCount, + unsigned int *parameterArray, + unsigned int *returnedCount); + +extern int NXEvGetParameterChar(NXEventHandle handle, + char *parameterName, + unsigned int maxCount, + unsigned char *parameterArray, + unsigned int *returnedCount); +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /*_DRIVERS_EVENT_STATUS_DRIVER_ */ + diff --git a/iokit/include/mach/mach.h b/iokit/include/mach/mach.h new file mode 100644 index 000000000..e5965a497 --- /dev/null +++ b/iokit/include/mach/mach.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ diff --git a/iokit/mach-o/mach_header.h b/iokit/mach-o/mach_header.h new file mode 100644 index 000000000..4d14d1380 --- /dev/null +++ b/iokit/mach-o/mach_header.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: kern/mach_header.h + * + * Definitions for accessing mach-o headers. + * + * HISTORY + * 29-Jan-92 Mike DeMoney (mike@next.com) + * Made into machine independent form from machdep/m68k/mach_header.h. + * Ifdef'ed out most of this since I couldn't find any references. + */ + +#ifndef _KERN_MACH_HEADER_ +#define _KERN_MACH_HEADER_ + +#include +#include + +#if KERNEL +struct mach_header **getmachheaders(void); +vm_offset_t getlastaddr(void); + +struct segment_command *firstseg(void); +struct segment_command *firstsegfromheader(struct mach_header *header); +struct segment_command *nextseg(struct segment_command *sgp); +struct segment_command *nextsegfromheader( + struct mach_header *header, + struct segment_command *seg); +struct segment_command *getsegbyname(char *seg_name); +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name); +void *getsegdatafromheader(struct mach_header *, char *, int *); +struct section *getsectbyname(char *seg_name, char *sect_name); +struct section *getsectbynamefromheader( + struct mach_header *header, + char *seg_name, + char *sect_name); +void *getsectdatafromheader(struct mach_header *, char *, char *, int *); +struct section *firstsect(struct segment_command *sgp); +struct section *nextsect(struct segment_command *sgp, struct section *sp); +struct fvmlib_command *fvmlib(void); +struct fvmlib_command *fvmlibfromheader(struct mach_header *header); +struct segment_command *getfakefvmseg(void); +#ifdef MACH_KDB +struct symtab_command *getsectcmdsymtabfromheader(struct mach_header *); +boolean_t getsymtab(struct mach_header *, vm_offset_t *, int *, + vm_offset_t *, vm_size_t *); +#endif + +#endif /* KERNEL */ + +#endif /* _KERN_MACH_HEADER_ */ diff --git a/libkern/Makefile b/libkern/Makefile new file mode 100644 index 000000000..fdf3de1cc --- /dev/null +++ b/libkern/Makefile @@ -0,0 +1,25 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = libkern +INSTINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS} +INSTINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS = libkern +EXPINC_SUBDIRS_PPC = ${EXPINC_SUBDIRS} +EXPINC_SUBDIRS_I386 = ${EXPINC_SUBDIRS} + +SETUP_SUBDIRS = conf + +COMP_SUBDIRS = conf + +INST_SUBDIRS = kmod + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/libkern/c++/OSArray.cpp b/libkern/c++/OSArray.cpp new file mode 100644 index 000000000..3c56bc0d2 --- /dev/null +++ b/libkern/c++/OSArray.cpp @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOArray.m created by rsulack on Fri 12-Sep-1997 */ +/* IOArray.cpp converted to C++ by gvdl on Fri 1998-10-30 */ + + +#include +#include +#include + +#define super OSCollection + +OSDefineMetaClassAndStructors(OSArray, OSCollection) +OSMetaClassDefineReservedUnused(OSArray, 0); +OSMetaClassDefineReservedUnused(OSArray, 1); +OSMetaClassDefineReservedUnused(OSArray, 2); +OSMetaClassDefineReservedUnused(OSArray, 3); +OSMetaClassDefineReservedUnused(OSArray, 4); +OSMetaClassDefineReservedUnused(OSArray, 5); +OSMetaClassDefineReservedUnused(OSArray, 6); +OSMetaClassDefineReservedUnused(OSArray, 7); + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +bool OSArray::initWithCapacity(unsigned int inCapacity) +{ + int size; + + if (!super::init()) + return false; + + size = sizeof(const OSMetaClassBase *) * inCapacity; + array = (const OSMetaClassBase **) kalloc(size); + if (!array) + return false; + + count = 0; + capacity = inCapacity; + capacityIncrement = (inCapacity)? inCapacity : 16; + + bzero(array, size); + ACCUMSIZE(size); + + return this; +} + +bool OSArray::initWithObjects(const OSObject *objects[], + unsigned int theCount, + unsigned int theCapacity = 0) +{ + unsigned int capacity; + + if (!theCapacity) + capacity = theCount; + else if (theCount > theCapacity) + return false; + else + capacity = theCapacity; + + if (!objects || !initWithCapacity(capacity)) + return false; + + for ( unsigned int i = 0; i < theCount; i++ ) { + const OSMetaClassBase *newObject = *objects++; + + if (!newObject) + return false; + + array[count++] = newObject; + newObject->retain(); + } + + return true; +} + +bool OSArray::initWithArray(const OSArray *anArray, + unsigned int theCapacity = 0) +{ + if ( !anArray ) + return false; + + return initWithObjects((const OSObject **) anArray->array, + anArray->count, theCapacity); +} + +OSArray *OSArray::withCapacity(unsigned int capacity) +{ + OSArray *me = new OSArray; + + if (me && !me->initWithCapacity(capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSArray *OSArray::withObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity = 0) +{ + OSArray *me = new OSArray; + + if (me && !me->initWithObjects(objects, count, capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSArray *OSArray::withArray(const OSArray *array, + unsigned int capacity = 0) +{ + OSArray *me = new OSArray; + + if (me && !me->initWithArray(array, capacity)) { + me->free(); + return 0; + } + + return me; +} + +void OSArray::free() +{ + flushCollection(); + + if (array) { + kfree((vm_offset_t)array, sizeof(const OSMetaClassBase *) * capacity); + ACCUMSIZE( -(sizeof(const OSMetaClassBase *) * capacity) ); + } + + super::free(); +} + + +unsigned int OSArray::getCount() const { return count; } +unsigned int OSArray::getCapacity() const { return capacity; } +unsigned int OSArray::getCapacityIncrement() const { return capacityIncrement; } +unsigned int OSArray::setCapacityIncrement(unsigned int increment) +{ + capacityIncrement = (increment)? increment : 16; + + return capacityIncrement; +} + +unsigned int OSArray::ensureCapacity(unsigned int newCapacity) +{ + const OSMetaClassBase **newArray; + int oldSize, newSize; + + if (newCapacity <= capacity) + return capacity; + + // round up + newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; + newSize = sizeof(const OSMetaClassBase *) * newCapacity; + + newArray = (const OSMetaClassBase **) kalloc(newSize); + if (newArray) { + oldSize = sizeof(const OSMetaClassBase *) * capacity; + + ACCUMSIZE(newSize - oldSize); + + bcopy(array, newArray, oldSize); + bzero(&newArray[capacity], newSize - oldSize); + kfree((vm_offset_t)array, oldSize); + array = newArray; + capacity = newCapacity; + } + + return capacity; +} + +void OSArray::flushCollection() +{ + unsigned int i; + + haveUpdated(); + for (i = 0; i < count; i++) + array[i]->release(); + count = 0; +} + +bool OSArray::setObject(const OSMetaClassBase *anObject) +{ + return setObject(count, anObject); +} + +bool OSArray::setObject(unsigned int index, const OSMetaClassBase *anObject) +{ + unsigned int i; + unsigned int newCount = count + 1; + + if ((index > count) || !anObject) + return false; + + // do we need more space? + if (newCount > capacity && newCount > ensureCapacity(newCount)) + return false; + + haveUpdated(); + if (index != count) { + for (i = count; i > index; i--) + array[i] = array[i-1]; + } + array[index] = anObject; + anObject->retain(); + count++; + + return true; +} + +bool OSArray::merge(const OSArray * otherArray) +{ + unsigned int otherCount = otherArray->getCount(); + unsigned int newCount = count + otherCount; + + if (!otherCount) + return true; + + // do we need more space? + if (newCount > capacity && newCount > ensureCapacity(newCount)) + return false; + + haveUpdated(); + for (unsigned int i = 0; i < otherCount; i++) { + const OSMetaClassBase *newObject = otherArray->getObject(i); + + array[count++] = newObject; + newObject->retain(); + } + + return true; +} + +void OSArray:: +replaceObject(unsigned int index, const OSMetaClassBase *anObject) +{ + const OSMetaClassBase *oldObject; + + if ((index >= count) || !anObject) + return; + + haveUpdated(); + oldObject = array[index]; + array[index] = anObject; + anObject->retain(); + + oldObject->release(); +} + +void OSArray::removeObject(unsigned int index) +{ + unsigned int i; + const OSMetaClassBase *oldObject; + + if (index >= count) + return; + + haveUpdated(); + oldObject = array[index]; + + count--; + for (i = index; i < count; i++) + array[i] = array[i+1]; + + oldObject->release(); +} + +bool OSArray::isEqualTo(const OSArray *anArray) const +{ + unsigned int i; + + if ( this == anArray ) + return true; + + if ( count != anArray->getCount() ) + return false; + + for ( i = 0; i < count; i++ ) { + if ( !array[i]->isEqualTo(anArray->getObject(i)) ) + return false; + } + + return true; +} + +bool OSArray::isEqualTo(const OSMetaClassBase *anObject) const +{ + OSArray *otherArray; + + otherArray = OSDynamicCast(OSArray, anObject); + if ( otherArray ) + return isEqualTo(otherArray); + else + return false; +} + +OSObject *OSArray::getObject(unsigned int index) const +{ + if (index >= count) + return 0; + else + return (OSObject *) array[index]; +} + +OSObject *OSArray::getLastObject() const +{ + if (count == 0) + return 0; + else + return (OSObject *) array[count - 1]; +} + +unsigned int OSArray::getNextIndexOfObject(const OSMetaClassBase * anObject, + unsigned int index) const +{ + while ((index < count) && (array[index] != anObject)) + index++; + if (index >= count) + index = (unsigned int)-1; + return index; +} + +unsigned int OSArray::iteratorSize() const +{ + return sizeof(unsigned int); +} + +bool OSArray::initIterator(void *inIterator) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + + *iteratorP = 0; + return true; +} + +bool OSArray::getNextObjectForIterator(void *inIterator, OSObject **ret) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; + + if (index < count) { + *ret = (OSObject *) array[index]; + return true; + } + else { + *ret = 0; + return false; + } +} + +bool OSArray::serialize(OSSerialize *s) const +{ + if (s->previouslySerialized(this)) return true; + + if (!s->addXMLStartTag(this, "array")) return false; + + for (unsigned i = 0; i < count; i++) { + if (!array[i]->serialize(s)) return false; + } + + return s->addXMLEndTag("array"); +} diff --git a/libkern/c++/OSBoolean.cpp b/libkern/c++/OSBoolean.cpp new file mode 100644 index 000000000..cd3ba2ef9 --- /dev/null +++ b/libkern/c++/OSBoolean.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSBoolean.cpp created by rsulack on Tue Oct 12 1999 */ + +#include +#include +#include +#include + +#define super OSObject + +OSDefineMetaClassAndStructors(OSBoolean, OSObject) +OSMetaClassDefineReservedUnused(OSBoolean, 0); +OSMetaClassDefineReservedUnused(OSBoolean, 1); +OSMetaClassDefineReservedUnused(OSBoolean, 2); +OSMetaClassDefineReservedUnused(OSBoolean, 3); +OSMetaClassDefineReservedUnused(OSBoolean, 4); +OSMetaClassDefineReservedUnused(OSBoolean, 5); +OSMetaClassDefineReservedUnused(OSBoolean, 6); +OSMetaClassDefineReservedUnused(OSBoolean, 7); + +static OSBoolean * gOSBooleanTrue = 0; +static OSBoolean * gOSBooleanFalse = 0; + +OSBoolean * const & kOSBooleanTrue = gOSBooleanTrue; +OSBoolean * const & kOSBooleanFalse = gOSBooleanFalse; + +void OSBoolean::initialize() +{ + gOSBooleanTrue = new OSBoolean; + assert(gOSBooleanTrue); + + if (!gOSBooleanTrue->init()) { + gOSBooleanTrue->OSObject::free(); + assert(false); + }; + gOSBooleanTrue->value = true; + + gOSBooleanFalse = new OSBoolean; + assert(gOSBooleanFalse); + + if (!gOSBooleanFalse->init()) { + gOSBooleanFalse->OSObject::free(); + assert(false); + }; + gOSBooleanFalse->value = false; +} + +void OSBoolean::free() +{ + /* + * An OSBoolean should never have free() called on it, since it is a shared + * object, with two non-mutable instances: kOSBooleanTrue, kOSBooleanFalse. + * There will be cases where an incorrect number of releases will cause the + * free() method to be called, however, which we must catch and ignore here. + */ + assert(false); +} + +OSBoolean *OSBoolean::withBoolean(bool inValue) +{ + if (inValue) { + kOSBooleanTrue->retain(); + return kOSBooleanTrue; + } else { + kOSBooleanFalse->retain(); + return kOSBooleanFalse; + } +} + +bool OSBoolean::isTrue() const { return value; } +bool OSBoolean::isFalse() const { return !value; } +bool OSBoolean::getValue() const { return value; } + +bool OSBoolean::isEqualTo(const OSBoolean *boolean) const +{ + return (boolean == this); +} + +bool OSBoolean::isEqualTo(const OSMetaClassBase *obj) const +{ + OSBoolean * boolean; + if ((boolean = OSDynamicCast(OSBoolean, obj))) + return isEqualTo(boolean); + else + return false; +} + +bool OSBoolean::serialize(OSSerialize *s) const +{ + return s->addString(value ? "" : ""); +} diff --git a/libkern/c++/OSCPPDebug.cpp b/libkern/c++/OSCPPDebug.cpp new file mode 100644 index 000000000..5cf9649d8 --- /dev/null +++ b/libkern/c++/OSCPPDebug.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +__BEGIN_DECLS + +void OSPrintMemory( void ) +{ + + OSMetaClass::printInstanceCounts(); + + IOLog("\n" + "ivar kalloc() 0x%08x\n" + "malloc() 0x%08x\n" + "containers kalloc() 0x%08x\n" + "IOMalloc() 0x%08x\n" + "----------------------------------------\n", + debug_ivars_size, + debug_malloc_size, + debug_container_malloc_size, + debug_iomalloc_size + ); +} + +__END_DECLS + diff --git a/libkern/c++/OSCollection.cpp b/libkern/c++/OSCollection.cpp new file mode 100644 index 000000000..6a958424a --- /dev/null +++ b/libkern/c++/OSCollection.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOArray.h created by rsulack on Thu 11-Sep-1997 */ + +#include +#include + +#define super OSObject + +OSDefineMetaClassAndAbstractStructors(OSCollection, OSObject) +OSMetaClassDefineReservedUnused(OSCollection, 0); +OSMetaClassDefineReservedUnused(OSCollection, 1); +OSMetaClassDefineReservedUnused(OSCollection, 2); +OSMetaClassDefineReservedUnused(OSCollection, 3); +OSMetaClassDefineReservedUnused(OSCollection, 4); +OSMetaClassDefineReservedUnused(OSCollection, 5); +OSMetaClassDefineReservedUnused(OSCollection, 6); +OSMetaClassDefineReservedUnused(OSCollection, 7); + +bool OSCollection::init() +{ + if (!super::init()) + return false; + + updateStamp = 0; + + return true; +} diff --git a/libkern/c++/OSCollectionIterator.cpp b/libkern/c++/OSCollectionIterator.cpp new file mode 100644 index 000000000..e9a77cced --- /dev/null +++ b/libkern/c++/OSCollectionIterator.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOArray.h created by rsulack on Thu 11-Sep-1997 */ + +#include +#include +#include +#include + +#define super OSIterator + +OSDefineMetaClassAndStructors(OSCollectionIterator, OSIterator) + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +bool OSCollectionIterator::initWithCollection(const OSCollection *inColl) +{ + if ( !super::init() || !inColl) + return false; + + inColl->retain(); + collection = inColl; + collIterator = 0; + initialUpdateStamp = 0; + valid = false; + + return this; +} + +OSCollectionIterator * +OSCollectionIterator::withCollection(const OSCollection *inColl) +{ + + OSCollectionIterator *me = new OSCollectionIterator; + + if (me && !me->initWithCollection(inColl)) { + me->free(); + return 0; + } + + return me; +} + +void OSCollectionIterator::free() +{ + if (collIterator) { + kfree((vm_offset_t)collIterator, collection->iteratorSize()); + ACCUMSIZE(-(collection->iteratorSize())); + collIterator = 0; + } + + if (collection) { + collection->release(); + collection = 0; + } + + super::free(); +} + +void OSCollectionIterator::reset() +{ + valid = false; + + if (!collIterator) { + collIterator = (void *)kalloc(collection->iteratorSize()); + ACCUMSIZE(collection->iteratorSize()); + if (!collIterator) + return; + } + + if (!collection->initIterator(collIterator)) + return; + + initialUpdateStamp = collection->updateStamp; + valid = true; +} + +bool OSCollectionIterator::isValid() +{ + if (!collIterator) { + collIterator = (void *)kalloc(collection->iteratorSize()); + ACCUMSIZE(collection->iteratorSize()); + if (!collection->initIterator(collIterator)) + return false; + initialUpdateStamp = collection->updateStamp; + valid = true; + } + else if (!valid || collection->updateStamp != initialUpdateStamp) + return false; + + return true; +} + +OSObject *OSCollectionIterator::getNextObject() +{ + OSObject *retObj; + bool retVal; + + if (!isValid()) + return 0; + + retVal = collection->getNextObjectForIterator(collIterator, &retObj); + return (retVal)? retObj : 0; +} + diff --git a/libkern/c++/OSData.cpp b/libkern/c++/OSData.cpp new file mode 100644 index 000000000..edfa98243 --- /dev/null +++ b/libkern/c++/OSData.cpp @@ -0,0 +1,407 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOData.m created by rsulack on Thu 25-Sep-1997 */ + + +#include +#include +#include +#include + +#define super OSObject + +OSDefineMetaClassAndStructors(OSData, OSObject) +OSMetaClassDefineReservedUnused(OSData, 0); +OSMetaClassDefineReservedUnused(OSData, 1); +OSMetaClassDefineReservedUnused(OSData, 2); +OSMetaClassDefineReservedUnused(OSData, 3); +OSMetaClassDefineReservedUnused(OSData, 4); +OSMetaClassDefineReservedUnused(OSData, 5); +OSMetaClassDefineReservedUnused(OSData, 6); +OSMetaClassDefineReservedUnused(OSData, 7); + +#define EXTERNAL ((unsigned int) -1) + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +bool OSData::initWithCapacity(unsigned int inCapacity) +{ + if (!super::init()) + return false; + + if(inCapacity) { + data = (void *) kalloc(inCapacity); + if (!data) + return false; + } + + length = 0; + capacity = inCapacity; + capacityIncrement = capacity; + if(!capacityIncrement) + capacityIncrement = 16; + + ACCUMSIZE(capacity); + + return true; +} + +bool OSData::initWithBytes(const void *bytes, unsigned int inLength) +{ + if ((inLength && !bytes) || !initWithCapacity(inLength)) + return false; + + bcopy(bytes, data, inLength); + length = inLength; + + return true; +} + +bool OSData::initWithBytesNoCopy(void *bytes, unsigned int inLength) +{ + if (!super::init()) + return false; + + length = inLength; + capacity = EXTERNAL; + data = bytes; + + return true; +} + +bool OSData::initWithData(const OSData *inData) +{ + return initWithBytes(inData->data, inData->length); +} + +bool OSData::initWithData(const OSData *inData, + unsigned int start, unsigned int inLength) +{ + const void *localData = inData->getBytesNoCopy(start, inLength); + + if (localData) + return initWithBytes(localData, inLength); + else + return false; +} + +OSData *OSData::withCapacity(unsigned int inCapacity) +{ + OSData *me = new OSData; + + if (me && !me->initWithCapacity(inCapacity)) { + me->free(); + return 0; + } + + return me; +} + +OSData *OSData::withBytes(const void *bytes, unsigned int inLength) +{ + OSData *me = new OSData; + + if (me && !me->initWithBytes(bytes, inLength)) { + me->free(); + return 0; + } + return me; +} + +OSData *OSData::withBytesNoCopy(void *bytes, unsigned int inLength) +{ + OSData *me = new OSData; + + if (me && !me->initWithBytesNoCopy(bytes, inLength)) { + me->free(); + return 0; + } + + return me; +} + +OSData *OSData::withData(const OSData *inData) +{ + OSData *me = new OSData; + + if (me && !me->initWithData(inData)) { + me->free(); + return 0; + } + + return me; +} + +OSData *OSData::withData(const OSData *inData, + unsigned int start, unsigned int inLength) +{ + OSData *me = new OSData; + + if (me && !me->initWithData(inData, start, inLength)) { + me->free(); + return 0; + } + + return me; +} + +void OSData::free() +{ + if (capacity != EXTERNAL && data && capacity) { + kfree((vm_offset_t)data, capacity); + ACCUMSIZE( -capacity ); + } + super::free(); +} + +unsigned int OSData::getLength() const { return length; } +unsigned int OSData::getCapacity() const { return capacity; } + +unsigned int OSData::getCapacityIncrement() const +{ + return capacityIncrement; +} + +unsigned int OSData::setCapacityIncrement(unsigned increment) +{ + return capacityIncrement = increment; +} + +unsigned int OSData::ensureCapacity(unsigned int newCapacity) +{ + unsigned char * newData; + + if (newCapacity <= capacity) + return capacity; + + newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; + + newData = (unsigned char *) kalloc(newCapacity); + + if ( newData ) { + bzero(newData + capacity, newCapacity - capacity); + if (data) { + bcopy(data, newData, capacity); + kfree((vm_offset_t)data, capacity); + } + ACCUMSIZE( newCapacity - capacity ); + data = (void *) newData; + capacity = newCapacity; + } + + return capacity; +} + +bool OSData::appendBytes(const void *bytes, unsigned int inLength) +{ + unsigned int newSize; + + if (inLength == 0) + return true; + + if (capacity == EXTERNAL) + return false; + + newSize = length + inLength; + if ( (newSize > capacity) && newSize > ensureCapacity(newSize) ) + return false; + + bcopy(bytes, &((unsigned char *)data)[length], inLength); + length = newSize; + + return true; +} + +bool OSData::appendByte(unsigned char byte, unsigned int inLength) +{ + unsigned int newSize; + + if (inLength == 0) + return true; + + if (capacity == EXTERNAL) + return false; + + newSize = length + inLength; + if ( (newSize > capacity) && newSize > ensureCapacity(newSize) ) + return false; + + memset(&((unsigned char *)data)[length], byte, inLength); + length = newSize; + + return true; +} + +bool OSData::appendBytes(const OSData *other) +{ + return appendBytes(other->data, other->length); +} + +const void *OSData::getBytesNoCopy() const +{ + if (length == 0) + return 0; + else + return data; +} + +const void *OSData::getBytesNoCopy(unsigned int start, + unsigned int inLength) const +{ + const void *outData = 0; + + if (length + && start < length + && (start + inLength) <= length) + outData = (const void *) ((char *) data + start); + + return outData; +} + +bool OSData::isEqualTo(const OSData *aData) const +{ + unsigned int len; + + len = aData->length; + if ( length != len ) + return false; + + return isEqualTo(aData->data, len); +} + +bool OSData::isEqualTo(const void *someData, unsigned int inLength) const +{ + return (length >= inLength) && (bcmp(data, someData, inLength) == 0); +} + +bool OSData::isEqualTo(const OSMetaClassBase *obj) const +{ + OSData * data; + OSString * str; + + if ((data = OSDynamicCast(OSData, obj))) + return isEqualTo(data); + else if ((str = OSDynamicCast (OSString, obj))) + return isEqualTo(str); + else + return false; +} + +bool OSData::isEqualTo(const OSString *obj) const +{ + const char * aCString; + char * dataPtr; + unsigned int checkLen = length; + unsigned int stringLen; + + if (NULL == obj) + return false; + + stringLen = obj->getLength (); + + dataPtr = (char *)data; + + if (stringLen != checkLen) { + + // check for the fact that OSData may be a buffer that + // that includes a termination byte and will thus have + // a length of the actual string length PLUS 1. In this + // case we verify that the additional byte is a terminator + // and if so count the two lengths as being the same. + + if ( (checkLen - stringLen) == 1) { + if (dataPtr[checkLen-1] != 0) // non-zero means not a terminator and thus not likely the same + return false; + checkLen--; + } + else + return false; + } + + aCString = obj->getCStringNoCopy (); + + for ( unsigned int i=0; i < checkLen; i++ ) { + if ( *dataPtr++ != aCString[i] ) + return false; + } + + return true; +} + +//this was taken from CFPropertyList.c +static const char __CFPLDataEncodeTable[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +bool OSData::serialize(OSSerialize *s) const +{ + unsigned int i; + const unsigned char *p; + unsigned char c; + + if (s->previouslySerialized(this)) return true; + + if (!s->addXMLStartTag(this, "data")) return false; + + for (i = 0, p = (unsigned char *)data; i < length; i++, p++) { + /* 3 bytes are encoded as 4 */ + switch (i % 3) { + case 0: + c = __CFPLDataEncodeTable [ ((p[0] >> 2) & 0x3f)]; + if (!s->addChar(c)) return false; + break; + case 1: + c = __CFPLDataEncodeTable [ ((((p[-1] << 8) | p[0]) >> 4) & 0x3f)]; + if (!s->addChar(c)) return false; + break; + case 2: + c = __CFPLDataEncodeTable [ ((((p[-1] << 8) | p[0]) >> 6) & 0x3f)]; + if (!s->addChar(c)) return false; + c = __CFPLDataEncodeTable [ (p[0] & 0x3f)]; + if (!s->addChar(c)) return false; + break; + } + } + switch (i % 3) { + case 0: + break; + case 1: + c = __CFPLDataEncodeTable [ ((p[-1] << 4) & 0x30)]; + if (!s->addChar(c)) return false; + if (!s->addChar('=')) return false; + if (!s->addChar('=')) return false; + break; + case 2: + c = __CFPLDataEncodeTable [ ((p[-1] << 2) & 0x3c)]; + if (!s->addChar(c)) return false; + if (!s->addChar('=')) return false; + break; + } + + return s->addXMLEndTag("data"); +} diff --git a/libkern/c++/OSDictionary.cpp b/libkern/c++/OSDictionary.cpp new file mode 100644 index 000000000..04c680f92 --- /dev/null +++ b/libkern/c++/OSDictionary.cpp @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSDictionary.m created by rsulack on Fri 12-Sep-1997 */ +/* OSDictionary.cpp converted to C++ by gvdl on Fri 1998-10-30 */ +/* OSDictionary.cpp rewritten by gvdl on Fri 1998-10-30 */ + + +#include +#include +#include +#include +#include +#include + +#define super OSCollection + +OSDefineMetaClassAndStructors(OSDictionary, OSCollection) +OSMetaClassDefineReservedUnused(OSDictionary, 0); +OSMetaClassDefineReservedUnused(OSDictionary, 1); +OSMetaClassDefineReservedUnused(OSDictionary, 2); +OSMetaClassDefineReservedUnused(OSDictionary, 3); +OSMetaClassDefineReservedUnused(OSDictionary, 4); +OSMetaClassDefineReservedUnused(OSDictionary, 5); +OSMetaClassDefineReservedUnused(OSDictionary, 6); +OSMetaClassDefineReservedUnused(OSDictionary, 7); + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +bool OSDictionary::initWithCapacity(unsigned int inCapacity) +{ + if (!super::init()) + return false; + + int size = inCapacity * sizeof(dictEntry); + + dictionary = (dictEntry *) kalloc(size); + if (!dictionary) + return false; + + bzero(dictionary, size); + ACCUMSIZE(size); + + count = 0; + capacity = inCapacity; + capacityIncrement = (inCapacity)? inCapacity : 16; + + return true; +} + +bool OSDictionary::initWithObjects(const OSObject *objects[], + const OSSymbol *keys[], + unsigned int theCount, + unsigned int theCapacity = 0) +{ + unsigned int capacity = theCount; + + if (!objects || !keys) + return false; + + if ( theCapacity ) { + if (theCount > theCapacity) + return false; + + capacity = theCapacity; + } + + if (!initWithCapacity(capacity)) + return false; + + for (unsigned int i = 0; i < theCount; i++) { + const OSMetaClassBase *newObject = *objects++; + + if (!newObject || !keys[i] || !setObject(keys[i], newObject)) + return false; + } + + return true; +} + +bool OSDictionary::initWithObjects(const OSObject *objects[], + const OSString *keys[], + unsigned int theCount, + unsigned int theCapacity = 0) +{ + unsigned int capacity = theCount; + + if (!objects || !keys) + return false; + + if ( theCapacity ) { + if (theCount > theCapacity) + return false; + + capacity = theCapacity; + } + + if (!initWithCapacity(capacity)) + return false; + + for (unsigned int i = 0; i < theCount; i++) { + const OSSymbol *key = OSSymbol::withString(*keys++); + const OSMetaClassBase *newObject = *objects++; + + if (!key) + return false; + + if (!newObject || !setObject(key, newObject)) { + key->release(); + return false; + } + + key->release(); + } + + return true; +} + +bool OSDictionary::initWithDictionary(const OSDictionary *dict, + unsigned int theCapacity = 0) +{ + unsigned int capacity; + + if ( !dict ) + return false; + + capacity = dict->count; + + if ( theCapacity ) { + if ( dict->count > theCapacity ) + return false; + + capacity = theCapacity; + } + + if (!initWithCapacity(capacity)) + return false; + + count = dict->count; + bcopy(dict->dictionary, dictionary, count * sizeof(dictEntry)); + for (unsigned int i = 0; i < count; i++) { + dictionary[i].key->retain(); + dictionary[i].value->retain(); + } + + return true; +} + +OSDictionary *OSDictionary::withCapacity(unsigned int capacity) +{ + OSDictionary *me = new OSDictionary; + + if (me && !me->initWithCapacity(capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSDictionary *OSDictionary::withObjects(const OSObject *objects[], + const OSSymbol *keys[], + unsigned int count, + unsigned int capacity = 0) +{ + OSDictionary *me = new OSDictionary; + + if (me && !me->initWithObjects(objects, keys, count, capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSDictionary *OSDictionary::withObjects(const OSObject *objects[], + const OSString *keys[], + unsigned int count, + unsigned int capacity = 0) +{ + OSDictionary *me = new OSDictionary; + + if (me && !me->initWithObjects(objects, keys, count, capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSDictionary *OSDictionary::withDictionary(const OSDictionary *dict, + unsigned int capacity = 0) +{ + OSDictionary *me = new OSDictionary; + + if (me && !me->initWithDictionary(dict, capacity)) { + me->free(); + return 0; + } + + return me; +} + +void OSDictionary::free() +{ + flushCollection(); + if (dictionary) { + kfree((vm_offset_t)dictionary, capacity * sizeof(dictEntry)); + ACCUMSIZE( -(capacity * sizeof(dictEntry)) ); + } + + super::free(); +} + +unsigned int OSDictionary::getCount() const { return count; } +unsigned int OSDictionary::getCapacity() const { return capacity; } + +unsigned int OSDictionary::getCapacityIncrement() const +{ + return capacityIncrement; +} + +unsigned int OSDictionary::setCapacityIncrement(unsigned int increment) +{ + capacityIncrement = (increment)? increment : 16; + + return capacityIncrement; +} + +unsigned int OSDictionary::ensureCapacity(unsigned int newCapacity) +{ + dictEntry *newDict; + int oldSize, newSize; + + if (newCapacity <= capacity) + return capacity; + + // round up + newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; + newSize = sizeof(dictEntry) * newCapacity; + + newDict = (dictEntry *) kalloc(newSize); + if (newDict) { + oldSize = sizeof(dictEntry) * capacity; + + bcopy(dictionary, newDict, oldSize); + bzero(&newDict[capacity], newSize - oldSize); + + ACCUMSIZE(newSize - oldSize); + kfree((vm_offset_t)dictionary, oldSize); + + dictionary = newDict; + capacity = newCapacity; + } + + return capacity; +} + +void OSDictionary::flushCollection() +{ + haveUpdated(); + + for (unsigned int i = 0; i < count; i++) { + dictionary[i].key->release(); + dictionary[i].value->release(); + } + count = 0; +} + +bool OSDictionary:: +setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject) +{ + if (!anObject || !aKey) + return false; + + // if the key exists, replace the object + for (unsigned int i = 0; i < count; i++) { + if (aKey == dictionary[i].key) { + const OSMetaClassBase *oldObject = dictionary[i].value; + + anObject->retain(); + dictionary[i].value = anObject; + + haveUpdated(); + + oldObject->release(); + return true; + } + } + + // add new key, possibly extending our capacity + if (count >= capacity && count >= ensureCapacity(count+1)) + return 0; + + aKey->retain(); + anObject->retain(); + dictionary[count].key = aKey; + dictionary[count].value = anObject; + count++; + + haveUpdated(); + + return true; +} + +void OSDictionary::removeObject(const OSSymbol *aKey) +{ + if (!aKey) + return; + + // if the key exists, remove the object + for (unsigned int i = 0; i < count; i++) + if (aKey == dictionary[i].key) { + dictEntry oldEntry = dictionary[i]; + + haveUpdated(); + + count--; + for (; i < count; i++) + dictionary[i] = dictionary[i+1]; + + oldEntry.key->release(); + oldEntry.value->release(); + return; + } +} + + +// Returns true on success, false on an error condition. +bool OSDictionary::merge(const OSDictionary *aDictionary) +{ + const OSSymbol * sym; + OSCollectionIterator * iter; + + if ( !OSDynamicCast(OSDictionary, (OSDictionary *) aDictionary) ) + return false; + + iter = OSCollectionIterator::withCollection((OSDictionary *)aDictionary); + if ( !iter ) + return false; + + while ( (sym = (const OSSymbol *)iter->getNextObject()) ) { + const OSMetaClassBase * obj; + + obj = aDictionary->getObject(sym); + if ( !setObject(sym, obj) ) { + iter->release(); + return false; + } + } + iter->release(); + + return true; +} + +OSObject *OSDictionary::getObject(const OSSymbol *aKey) const +{ + if (!aKey) + return 0; + + // if the key exists, remove the object + for (unsigned int i = 0; i < count; i++) + if (aKey == dictionary[i].key) + return (OSObject *) dictionary[i].value; + + return 0; +} + +// Wrapper macros +#define OBJECT_WRAP_1(cmd, k) \ +{ \ + const OSSymbol *tmpKey = k; \ + OSObject *retObj = cmd(tmpKey); \ + \ + tmpKey->release(); \ + return retObj; \ +} + +#define OBJECT_WRAP_2(cmd, k, o) \ +{ \ + const OSSymbol *tmpKey = k; \ + bool ret = cmd(tmpKey, o); \ + \ + tmpKey->release(); \ + return ret; \ +} + +#define OBJECT_WRAP_3(cmd, k) \ +{ \ + const OSSymbol *tmpKey = k; \ + cmd(tmpKey); \ + tmpKey->release(); \ +} + + +bool OSDictionary::setObject(const OSString *aKey, const OSMetaClassBase *anObject) + OBJECT_WRAP_2(setObject, OSSymbol::withString(aKey), anObject) +bool OSDictionary::setObject(const char *aKey, const OSMetaClassBase *anObject) + OBJECT_WRAP_2(setObject, OSSymbol::withCString(aKey), anObject) + +OSObject *OSDictionary::getObject(const OSString *aKey) const + OBJECT_WRAP_1(getObject, OSSymbol::withString(aKey)) +OSObject *OSDictionary::getObject(const char *aKey) const + OBJECT_WRAP_1(getObject, OSSymbol::withCString(aKey)) + +void OSDictionary::removeObject(const OSString *aKey) + OBJECT_WRAP_3(removeObject, OSSymbol::withString(aKey)) +void OSDictionary::removeObject(const char *aKey) + OBJECT_WRAP_3(removeObject, OSSymbol::withCString(aKey)) + +bool +OSDictionary::isEqualTo(const OSDictionary *aDictionary, const OSCollection *keys) const +{ + OSCollectionIterator * iter; + unsigned int keysCount; + const OSMetaClassBase * obj1; + const OSMetaClassBase * obj2; + OSString * aKey; + bool ret; + + if ( this == aDictionary ) + return true; + + keysCount = keys->getCount(); + if ( (count < keysCount) || (aDictionary->getCount() < keysCount) ) + return false; + + iter = OSCollectionIterator::withCollection(keys); + if ( !iter ) + return false; + + ret = true; + while ( (aKey = OSDynamicCast(OSString, iter->getNextObject())) ) { + obj1 = getObject(aKey); + obj2 = aDictionary->getObject(aKey); + if ( !obj1 || !obj2 ) { + ret = false; + break; + } + + if ( !obj1->isEqualTo(obj2) ) { + ret = false; + break; + } + } + iter->release(); + + return ret; +} + +bool OSDictionary::isEqualTo(const OSDictionary *aDictionary) const +{ + unsigned int i; + const OSMetaClassBase * obj; + + if ( this == aDictionary ) + return true; + + if ( count != aDictionary->getCount() ) + return false; + + for ( i = 0; i < count; i++ ) { + obj = aDictionary->getObject(dictionary[i].key); + if ( !obj ) + return false; + + if ( !dictionary[i].value->isEqualTo(obj) ) + return false; + } + + return true; +} + +bool OSDictionary::isEqualTo(const OSMetaClassBase *anObject) const +{ + OSDictionary *dict; + + dict = OSDynamicCast(OSDictionary, anObject); + if ( dict ) + return isEqualTo(dict); + else + return false; +} + +unsigned int OSDictionary::iteratorSize() const +{ + return sizeof(unsigned int); +} + +bool OSDictionary::initIterator(void *inIterator) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + + *iteratorP = 0; + return true; +} + +bool OSDictionary::getNextObjectForIterator(void *inIterator, OSObject **ret) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; + + if (index < count) + *ret = (OSObject *) dictionary[index].key; + else + *ret = 0; + + return (*ret != 0); +} + +bool OSDictionary::serialize(OSSerialize *s) const +{ + if (s->previouslySerialized(this)) return true; + + if (!s->addXMLStartTag(this, "dict")) return false; + + for (unsigned i = 0; i < count; i++) { + const OSSymbol *key = dictionary[i].key; + + // due the nature of the XML syntax, this must be a symbol + if (!key->metaCast("OSSymbol")) { + return false; + } + if (!s->addString("")) return false; + const char *c = key->getCStringNoCopy(); + while (*c) { + if (*c == '<') { + if (!s->addString("<")) return false; + } else if (*c == '>') { + if (!s->addString(">")) return false; + } else if (*c == '&') { + if (!s->addString("&")) return false; + } else { + if (!s->addChar(*c)) return false; + } + c++; + } + if (!s->addXMLEndTag("key")) return false; + + if (!dictionary[i].value->serialize(s)) return false; + } + + return s->addXMLEndTag("dict"); +} diff --git a/libkern/c++/OSIterator.cpp b/libkern/c++/OSIterator.cpp new file mode 100644 index 000000000..b462c88c6 --- /dev/null +++ b/libkern/c++/OSIterator.cpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#include + +OSDefineMetaClassAndAbstractStructors(OSIterator, OSObject) +OSMetaClassDefineReservedUnused(OSIterator, 0); +OSMetaClassDefineReservedUnused(OSIterator, 1); +OSMetaClassDefineReservedUnused(OSIterator, 2); +OSMetaClassDefineReservedUnused(OSIterator, 3); + diff --git a/libkern/c++/OSMetaClass.cpp b/libkern/c++/OSMetaClass.cpp new file mode 100644 index 000000000..4c5a81f39 --- /dev/null +++ b/libkern/c++/OSMetaClass.cpp @@ -0,0 +1,794 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSMetaClass.cpp created by gvdl on Fri 1998-11-17 */ + +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +__BEGIN_DECLS + +#include +#include +#include +#include +#include +#include +#include + +extern void OSRuntimeUnloadCPP(kmod_info_t *ki, void *); + +#if OSALLOCDEBUG +extern int debug_container_malloc_size; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif /* OSALLOCDEBUG */ + +__END_DECLS + +static enum { + kCompletedBootstrap = 0, + kNoDictionaries = 1, + kMakingDictionaries = 2 +} sBootstrapState = kNoDictionaries; + +static const int kClassCapacityIncrement = 40; +static const int kKModCapacityIncrement = 10; +static OSDictionary *sAllClassesDict, *sKModClassesDict; + +static mutex_t *loadLock; +static struct StalledData { + const char *kmodName; + OSReturn result; + unsigned int capacity; + unsigned int count; + OSMetaClass **classes; +} *sStalled; + +static unsigned int sConsiderUnloadDelay = 60; /* secs */ + +static const char OSMetaClassBasePanicMsg[] = + "OSMetaClassBase::_RESERVEDOSMetaClassBase%d called\n"; + +void OSMetaClassBase::_RESERVEDOSMetaClassBase0() + { panic(OSMetaClassBasePanicMsg, 0); } +void OSMetaClassBase::_RESERVEDOSMetaClassBase1() + { panic(OSMetaClassBasePanicMsg, 1); } +void OSMetaClassBase::_RESERVEDOSMetaClassBase2() + { panic(OSMetaClassBasePanicMsg, 2); } +void OSMetaClassBase::_RESERVEDOSMetaClassBase3() + { panic(OSMetaClassBasePanicMsg, 3); } +void OSMetaClassBase::_RESERVEDOSMetaClassBase4() + { panic(OSMetaClassBasePanicMsg, 4); } +void OSMetaClassBase::_RESERVEDOSMetaClassBase5() + { panic(OSMetaClassBasePanicMsg, 5); } +void OSMetaClassBase::_RESERVEDOSMetaClassBase6() + { panic(OSMetaClassBasePanicMsg, 6); } +void OSMetaClassBase::_RESERVEDOSMetaClassBase7() + { panic(OSMetaClassBasePanicMsg, 7); } + +OSMetaClassBase::OSMetaClassBase() +{ +} + +OSMetaClassBase::~OSMetaClassBase() +{ + void **thisVTable; + + thisVTable = (void **) this; + *thisVTable = (void *) -1UL; +} + +bool OSMetaClassBase::isEqualTo(const OSMetaClassBase *anObj) const +{ + return this == anObj; +} + +OSMetaClassBase *OSMetaClassBase::metaCast(const OSMetaClass *toMeta) const +{ + return toMeta->checkMetaCast(this); +} + +OSMetaClassBase *OSMetaClassBase::metaCast(const OSSymbol *toMetaSymb) const +{ + return OSMetaClass::checkMetaCastWithName(toMetaSymb, this); +} + +OSMetaClassBase *OSMetaClassBase::metaCast(const OSString *toMetaStr) const +{ + const OSSymbol *tempSymb = OSSymbol::withString(toMetaStr); + OSMetaClassBase *ret = 0; + if (tempSymb) { + ret = metaCast(tempSymb); + tempSymb->release(); + } + return ret; +} + +OSMetaClassBase *OSMetaClassBase::metaCast(const char *toMetaCStr) const +{ + const OSSymbol *tempSymb = OSSymbol::withCStringNoCopy(toMetaCStr); + OSMetaClassBase *ret = 0; + if (tempSymb) { + ret = metaCast(tempSymb); + tempSymb->release(); + } + return ret; +} + +class OSMetaClassMeta : public OSMetaClass +{ +public: + OSMetaClassMeta(); + OSObject *alloc() const; +}; +OSMetaClassMeta::OSMetaClassMeta() + : OSMetaClass("OSMetaClass", 0, sizeof(OSMetaClass)) + { } +OSObject *OSMetaClassMeta::alloc() const { return 0; } + +static OSMetaClassMeta sOSMetaClassMeta; + +const OSMetaClass * const OSMetaClass::metaClass = &sOSMetaClassMeta; +const OSMetaClass * OSMetaClass::getMetaClass() const + { return &sOSMetaClassMeta; } + +static const char OSMetaClassPanicMsg[] = + "OSMetaClass::_RESERVEDOSMetaClass%d called\n"; + +void OSMetaClass::_RESERVEDOSMetaClass0() + { panic(OSMetaClassPanicMsg, 0); } +void OSMetaClass::_RESERVEDOSMetaClass1() + { panic(OSMetaClassPanicMsg, 1); } +void OSMetaClass::_RESERVEDOSMetaClass2() + { panic(OSMetaClassPanicMsg, 2); } +void OSMetaClass::_RESERVEDOSMetaClass3() + { panic(OSMetaClassPanicMsg, 3); } +void OSMetaClass::_RESERVEDOSMetaClass4() + { panic(OSMetaClassPanicMsg, 4); } +void OSMetaClass::_RESERVEDOSMetaClass5() + { panic(OSMetaClassPanicMsg, 5); } +void OSMetaClass::_RESERVEDOSMetaClass6() + { panic(OSMetaClassPanicMsg, 6); } +void OSMetaClass::_RESERVEDOSMetaClass7() + { panic(OSMetaClassPanicMsg, 7); } + +void OSMetaClass::logError(OSReturn result) +{ + const char *msg; + + switch (result) { + case kOSMetaClassNoInit: + msg="OSMetaClass::preModLoad wasn't called, runtime internal error"; + break; + case kOSMetaClassNoDicts: + msg="Allocation failure for Metaclass internal dictionaries"; break; + case kOSMetaClassNoKModSet: + msg="Allocation failure for internal kmodule set"; break; + case kOSMetaClassNoInsKModSet: + msg="Can't insert the KMod set into the module dictionary"; break; + case kOSMetaClassDuplicateClass: + msg="Duplicate class"; break; + case kOSMetaClassNoSuper: + msg="Can't associate a class with its super class"; break; + case kOSMetaClassInstNoSuper: + msg="Instance construction, unknown super class."; break; + default: + case kOSMetaClassInternal: + msg="runtime internal error"; break; + case kOSReturnSuccess: + return; + } + printf("%s\n", msg); +} + +OSMetaClass::OSMetaClass(const char *inClassName, + const OSMetaClass *inSuperClass, + unsigned int inClassSize) +{ + instanceCount = 0; + classSize = inClassSize; + superClassLink = inSuperClass; + + className = (const OSSymbol *) inClassName; + + if (!sStalled) { + printf("OSMetaClass::preModLoad wasn't called for %s, " + "runtime internal error\n", inClassName); + } else if (!sStalled->result) { + // Grow stalled array if neccessary + if (sStalled->count >= sStalled->capacity) { + OSMetaClass **oldStalled = sStalled->classes; + int oldSize = sStalled->capacity * sizeof(OSMetaClass *); + int newSize = oldSize + + kKModCapacityIncrement * sizeof(OSMetaClass *); + + sStalled->classes = (OSMetaClass **) kalloc(newSize); + if (!sStalled->classes) { + sStalled->classes = oldStalled; + sStalled->result = kOSMetaClassNoTempData; + return; + } + + sStalled->capacity += kKModCapacityIncrement; + memmove(sStalled->classes, oldStalled, oldSize); + kfree((vm_offset_t)oldStalled, oldSize); + ACCUMSIZE(newSize - oldSize); + } + + sStalled->classes[sStalled->count++] = this; + } +} + +OSMetaClass::~OSMetaClass() +{ + do { + OSCollectionIterator *iter; + + if (sAllClassesDict) + sAllClassesDict->removeObject(className); + + iter = OSCollectionIterator::withCollection(sKModClassesDict); + if (!iter) + break; + + OSSymbol *iterKey; + while ( (iterKey = (OSSymbol *) iter->getNextObject()) ) { + OSSet *kmodClassSet; + kmodClassSet = (OSSet *) sKModClassesDict->getObject(iterKey); + if (kmodClassSet && kmodClassSet->containsObject(this)) { + kmodClassSet->removeObject(this); + break; + } + } + iter->release(); + } while (false); + + if (sStalled) { + unsigned int i; + + // First pass find class in stalled list + for (i = 0; i < sStalled->count; i++) + if (this == sStalled->classes[i]) + break; + + if (i < sStalled->count) { + sStalled->count--; + if (i < sStalled->count) + memmove(&sStalled->classes[i], &sStalled->classes[i+1], + (sStalled->count - i) * sizeof(OSMetaClass *)); + } + return; + } +} + +// Don't do anything as these classes must be statically allocated +void *OSMetaClass::operator new(size_t size) { return 0; } +void OSMetaClass::operator delete(void *mem, size_t size) { } +void OSMetaClass::retain() const { } +void OSMetaClass::release() const { } +void OSMetaClass::release(int when) const { }; +int OSMetaClass::getRetainCount() const { return 0; } + +const char *OSMetaClass::getClassName() const +{ + return className->getCStringNoCopy(); +} + +unsigned int OSMetaClass::getClassSize() const +{ + return classSize; +} + +void *OSMetaClass::preModLoad(const char *kmodName) +{ + if (!loadLock) { + loadLock = mutex_alloc(ETAP_IO_AHA); + _mutex_lock(loadLock); + } + else + _mutex_lock(loadLock); + + sStalled = (StalledData *) kalloc(sizeof(*sStalled)); + if (sStalled) { + sStalled->classes = (OSMetaClass **) + kalloc(kKModCapacityIncrement * sizeof(OSMetaClass *)); + if (!sStalled->classes) { + kfree((vm_offset_t) sStalled, sizeof(*sStalled)); + return 0; + } + ACCUMSIZE((kKModCapacityIncrement * sizeof(OSMetaClass *)) + sizeof(*sStalled)); + + sStalled->result = kOSReturnSuccess; + sStalled->capacity = kKModCapacityIncrement; + sStalled->count = 0; + sStalled->kmodName = kmodName; + bzero(sStalled->classes, kKModCapacityIncrement * sizeof(OSMetaClass *)); + } + + return sStalled; +} + +bool OSMetaClass::checkModLoad(void *loadHandle) +{ + return sStalled && loadHandle == sStalled + && sStalled->result == kOSReturnSuccess; +} + +OSReturn OSMetaClass::postModLoad(void *loadHandle) +{ + OSReturn result = kOSReturnSuccess; + OSSet *kmodSet = 0; + + if (!sStalled || loadHandle != sStalled) { + logError(kOSMetaClassInternal); + return kOSMetaClassInternal; + } + + if (sStalled->result) + result = sStalled->result; + else switch (sBootstrapState) { + case kNoDictionaries: + sBootstrapState = kMakingDictionaries; + // No break; fall through + + case kMakingDictionaries: + sKModClassesDict = OSDictionary::withCapacity(kKModCapacityIncrement); + sAllClassesDict = OSDictionary::withCapacity(kClassCapacityIncrement); + if (!sAllClassesDict || !sKModClassesDict) { + result = kOSMetaClassNoDicts; + break; + } + // No break; fall through + + case kCompletedBootstrap: + { + unsigned int i; + + if (!sStalled->count) + break; // Nothing to do so just get out + + // First pass checking classes aren't already loaded + for (i = 0; i < sStalled->count; i++) { + OSMetaClass *me = sStalled->classes[i]; + + if (0 != sAllClassesDict->getObject((const char *) me->className)) { + printf("Class \"%s\" is duplicate\n", (const char *) me->className); + result = kOSMetaClassDuplicateClass; + break; + } + } + if (i != sStalled->count) + break; + + kmodSet = OSSet::withCapacity(sStalled->count); + if (!kmodSet) { + result = kOSMetaClassNoKModSet; + break; + } + + if (!sKModClassesDict->setObject(sStalled->kmodName, kmodSet)) { + result = kOSMetaClassNoInsKModSet; + break; + } + + // Second pass symbolling strings and inserting classes in dictionary + for (unsigned int i = 0; i < sStalled->count; i++) { + OSMetaClass *me = sStalled->classes[i]; + me->className = + OSSymbol::withCStringNoCopy((const char *) me->className); + + sAllClassesDict->setObject(me->className, me); + kmodSet->setObject(me); + } + sBootstrapState = kCompletedBootstrap; + break; + } + + default: + result = kOSMetaClassInternal; + break; + } + + if (kmodSet) + kmodSet->release(); + + if (sStalled) { + ACCUMSIZE(-(sStalled->capacity * sizeof(OSMetaClass *) + + sizeof(*sStalled))); + kfree((vm_offset_t) sStalled->classes, + sStalled->capacity * sizeof(OSMetaClass *)); + kfree((vm_offset_t) sStalled, sizeof(*sStalled)); + sStalled = 0; + } + + logError(result); + mutex_unlock(loadLock); + return result; +} + + +void OSMetaClass::instanceConstructed() const +{ + // if ((0 == OSIncrementAtomic((SInt32 *)&(((OSMetaClass *) this)->instanceCount))) && superClassLink) + if ((0 == OSIncrementAtomic((SInt32 *) &instanceCount)) && superClassLink) + superClassLink->instanceConstructed(); +} + +void OSMetaClass::instanceDestructed() const +{ + if ((1 == OSDecrementAtomic((SInt32 *) &instanceCount)) && superClassLink) + superClassLink->instanceDestructed(); + + if( ((int) instanceCount) < 0) + printf("%s: bad retain(%d)", getClassName(), instanceCount); +} + +bool OSMetaClass::modHasInstance(const char *kmodName) +{ + bool result = false; + + if (!loadLock) { + loadLock = mutex_alloc(ETAP_IO_AHA); + _mutex_lock(loadLock); + } + else + _mutex_lock(loadLock); + + do { + OSSet *kmodClasses; + OSCollectionIterator *iter; + OSMetaClass *checkClass; + + kmodClasses = OSDynamicCast(OSSet, + sKModClassesDict->getObject(kmodName)); + if (!kmodClasses) + break; + + iter = OSCollectionIterator::withCollection(kmodClasses); + if (!iter) + break; + + while ( (checkClass = (OSMetaClass *) iter->getNextObject()) ) + if (checkClass->getInstanceCount()) { + result = true; + break; + } + + iter->release(); + } while (false); + + mutex_unlock(loadLock); + + return result; +} + +void OSMetaClass::reportModInstances(const char *kmodName) +{ + OSSet *kmodClasses; + OSCollectionIterator *iter; + OSMetaClass *checkClass; + + kmodClasses = OSDynamicCast(OSSet, + sKModClassesDict->getObject(kmodName)); + if (!kmodClasses) + return; + + iter = OSCollectionIterator::withCollection(kmodClasses); + if (!iter) + return; + + while ( (checkClass = (OSMetaClass *) iter->getNextObject()) ) + if (checkClass->getInstanceCount()) { + printf("%s: %s has %d instance(s)\n", + kmodName, + checkClass->getClassName(), + checkClass->getInstanceCount()); + } + + iter->release(); +} + +static void _OSMetaClassConsiderUnloads(thread_call_param_t p0, + thread_call_param_t p1) +{ + OSSet *kmodClasses; + OSSymbol *kmodName; + OSCollectionIterator *kmods; + OSCollectionIterator *classes; + OSMetaClass *checkClass; + kmod_info_t *ki; + kern_return_t ret; + bool didUnload; + + _mutex_lock(loadLock); + + do { + + kmods = OSCollectionIterator::withCollection(sKModClassesDict); + if (!kmods) + break; + + didUnload = false; + while ( (kmodName = (OSSymbol *) kmods->getNextObject()) ) { + + ki = kmod_lookupbyname((char *)kmodName->getCStringNoCopy()); + if (!ki) + continue; + + if (ki->reference_count) + continue; + + kmodClasses = OSDynamicCast(OSSet, + sKModClassesDict->getObject(kmodName)); + classes = OSCollectionIterator::withCollection(kmodClasses); + if (!classes) + continue; + + while ((checkClass = (OSMetaClass *) classes->getNextObject()) + && (0 == checkClass->getInstanceCount())) + {} + classes->release(); + + if (0 == checkClass) { + OSRuntimeUnloadCPP(ki, 0); // call destructors + ret = kmod_destroy(host_priv_self(), ki->id); + didUnload = true; + } + + } while (false); + + kmods->release(); + + } while (didUnload); + + mutex_unlock(loadLock); +} + +void OSMetaClass::considerUnloads() +{ + static thread_call_t unloadCallout; + AbsoluteTime when; + + _mutex_lock(loadLock); + + if (!unloadCallout) + unloadCallout = thread_call_allocate(&_OSMetaClassConsiderUnloads, 0); + + thread_call_cancel(unloadCallout); + clock_interval_to_deadline(sConsiderUnloadDelay, 1000 * 1000 * 1000, &when); + thread_call_enter_delayed(unloadCallout, when); + + mutex_unlock(loadLock); +} + +const OSMetaClass *OSMetaClass::getMetaClassWithName(const OSSymbol *name) +{ + OSMetaClass *retMeta = 0; + + if (!name) + return 0; + + if (sAllClassesDict) + retMeta = (OSMetaClass *) sAllClassesDict->getObject(name); + + if (!retMeta && sStalled) + { + // Oh dear we have to scan the stalled list and walk the + // the stalled list manually. + const char *cName = name->getCStringNoCopy(); + unsigned int i; + + // find class in stalled list + for (i = 0; i < sStalled->count; i++) { + retMeta = sStalled->classes[i]; + if (0 == strcmp(cName, (const char *) retMeta->className)) + break; + } + + if (i < sStalled->count) + retMeta = 0; + } + + return retMeta; +} + +OSObject *OSMetaClass::allocClassWithName(const OSSymbol *name) +{ + OSObject * result; + _mutex_lock(loadLock); + + const OSMetaClass * const meta = getMetaClassWithName(name); + + if (meta) + result = meta->alloc(); + else + result = 0; + + mutex_unlock(loadLock); + + return result; +} + +OSObject *OSMetaClass::allocClassWithName(const OSString *name) +{ + const OSSymbol *tmpKey = OSSymbol::withString(name); + OSObject *result = allocClassWithName(tmpKey); + tmpKey->release(); + return result; +} + +OSObject *OSMetaClass::allocClassWithName(const char *name) +{ + const OSSymbol *tmpKey = OSSymbol::withCStringNoCopy(name); + OSObject *result = allocClassWithName(tmpKey); + tmpKey->release(); + return result; +} + + +OSMetaClassBase *OSMetaClass:: +checkMetaCastWithName(const OSSymbol *name, const OSMetaClassBase *in) +{ + OSMetaClassBase * result; + _mutex_lock(loadLock); + const OSMetaClass * const meta = getMetaClassWithName(name); + + if (meta) + result = meta->checkMetaCast(in); + else + result = 0; + + mutex_unlock(loadLock); + return result; +} + +OSMetaClassBase *OSMetaClass:: +checkMetaCastWithName(const OSString *name, const OSMetaClassBase *in) +{ + const OSSymbol *tmpKey = OSSymbol::withString(name); + OSMetaClassBase *result = checkMetaCastWithName(tmpKey, in); + tmpKey->release(); + return result; +} + +OSMetaClassBase *OSMetaClass:: +checkMetaCastWithName(const char *name, const OSMetaClassBase *in) +{ + const OSSymbol *tmpKey = OSSymbol::withCStringNoCopy(name); + OSMetaClassBase *result = checkMetaCastWithName(tmpKey, in); + tmpKey->release(); + return result; +} + +/* +OSMetaClass::checkMetaCast + checkMetaCast(const OSMetaClassBase *check) + +Check to see if the 'check' object has this object in it's metaclass chain. Returns check if it is indeed a kind of the current meta class, 0 otherwise. + +Generally this method is not invoked directly but is used to implement the OSMetaClassBase::metaCast member function. + +See also OSMetaClassBase::metaCast + + */ +OSMetaClassBase *OSMetaClass::checkMetaCast(const OSMetaClassBase *check) const +{ + const OSMetaClass * const toMeta = this; + const OSMetaClass *fromMeta; + + for (fromMeta = check->getMetaClass(); ; fromMeta = fromMeta->superClassLink) { + if (toMeta == fromMeta) + return (OSMetaClassBase *) check; // Discard const + + if (!fromMeta->superClassLink) + break; + } + + return 0; +} + +void OSMetaClass::reservedCalled(int ind) const +{ + const char *cname = className->getCStringNoCopy(); + panic("%s::_RESERVED%s%d called\n", cname, cname, ind); +} + +const OSMetaClass *OSMetaClass::getSuperClass() const +{ + return superClassLink; +} + +unsigned int OSMetaClass::getInstanceCount() const +{ + return instanceCount; +} + +void OSMetaClass::printInstanceCounts() +{ + OSCollectionIterator *classes; + OSSymbol *className; + OSMetaClass *meta; + + classes = OSCollectionIterator::withCollection(sAllClassesDict); + if (!classes) + return; + + while( (className = (OSSymbol *)classes->getNextObject())) { + meta = (OSMetaClass *) sAllClassesDict->getObject(className); + assert(meta); + + printf("%24s count: %03d x 0x%03x = 0x%06x\n", + className->getCStringNoCopy(), + meta->getInstanceCount(), + meta->getClassSize(), + meta->getInstanceCount() * meta->getClassSize() ); + } + printf("\n"); + classes->release(); +} + +OSDictionary * OSMetaClass::getClassDictionary() +{ + return sAllClassesDict; +} + +bool OSMetaClass::serialize(OSSerialize *s) const +{ + OSDictionary * dict; + OSNumber * off; + bool ok = false; + + if (s->previouslySerialized(this)) return true; + + dict = 0;// IODictionary::withCapacity(2); + off = OSNumber::withNumber(getInstanceCount(), 32); + + if (dict) { + dict->setObject("InstanceCount", off ); + ok = dict->serialize(s); + } else if( off) + ok = off->serialize(s); + + if (dict) + dict->release(); + if (off) + off->release(); + + return ok; +} + diff --git a/libkern/c++/OSNumber.cpp b/libkern/c++/OSNumber.cpp new file mode 100644 index 000000000..2f94543e6 --- /dev/null +++ b/libkern/c++/OSNumber.cpp @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOOffset.m created by rsulack on Wed 17-Sep-1997 */ + +#include +#include +#include +#include + +#define sizeMask ((1ULL << (size)) - 1) + +#define super OSObject + +OSDefineMetaClassAndStructors(OSNumber, OSObject) +OSMetaClassDefineReservedUnused(OSNumber, 0); +OSMetaClassDefineReservedUnused(OSNumber, 1); +OSMetaClassDefineReservedUnused(OSNumber, 2); +OSMetaClassDefineReservedUnused(OSNumber, 3); +OSMetaClassDefineReservedUnused(OSNumber, 4); +OSMetaClassDefineReservedUnused(OSNumber, 5); +OSMetaClassDefineReservedUnused(OSNumber, 6); +OSMetaClassDefineReservedUnused(OSNumber, 7); + +bool OSNumber::init(unsigned long long inValue, unsigned int numberOfBits) +{ + if (!super::init()) + return false; + + size = numberOfBits; + value = (inValue & sizeMask); + + return true; +} + +bool OSNumber::init(const char *value, unsigned int numberOfBits) +{ + unsigned long long thisOffset; + +#ifdef q_works + sscanf(value, "%qi", thisOffset); +#else + unsigned int smallOffset; + + sscanf(value, "%i", &smallOffset); + thisOffset = smallOffset; +#endif + + return init(thisOffset, numberOfBits); +} + +void OSNumber::free() { super::free(); } + +OSNumber *OSNumber::withNumber(unsigned long long value, + unsigned int numberOfBits) +{ + OSNumber *me = new OSNumber; + + if (me && !me->init(value, numberOfBits)) { + me->free(); + return 0; + } + + return me; +} + +OSNumber *OSNumber::withNumber(const char *value, unsigned int numberOfBits) +{ + OSNumber *me = new OSNumber; + + if (me && !me->init(value, numberOfBits)) { + me->free(); + return 0; + } + + return me; +} + +unsigned int OSNumber::numberOfBits() const { return size; } + +unsigned int OSNumber::numberOfBytes() const { return (size + 7) / 8; } + + +unsigned char OSNumber::unsigned8BitValue() const +{ + return (unsigned char) value; +} + +unsigned short OSNumber::unsigned16BitValue() const +{ + return (unsigned short) value; +} + +unsigned int OSNumber::unsigned32BitValue() const +{ + return (unsigned int) value; +} + +unsigned long long OSNumber::unsigned64BitValue() const +{ + return value; +} + +void OSNumber::addValue(signed long long inValue) +{ + value = ((value + inValue) & sizeMask); +} + +void OSNumber::setValue(unsigned long long inValue) +{ + value = (inValue & sizeMask); +} + +bool OSNumber::isEqualTo(const OSNumber *integer) const +{ + return((value == integer->value)); +} + +bool OSNumber::isEqualTo(const OSMetaClassBase *obj) const +{ + OSNumber * offset; + if ((offset = OSDynamicCast(OSNumber, obj))) + return isEqualTo(offset); + else + return false; +} + +bool OSNumber::serialize(OSSerialize *s) const +{ + char temp[32]; + + if (s->previouslySerialized(this)) return true; + + sprintf(temp, "integer size=\"%d\"", size); + if (!s->addXMLStartTag(this, temp)) return false; + + //XXX sprintf(temp, "0x%qx", value); + if ((value >> 32)) { + sprintf(temp, "0x%lx%08lx", (unsigned long)(value >> 32), + (unsigned long)(value & 0xFFFFFFFF)); + } else { + sprintf(temp, "0x%lx", (unsigned long)value); + } + if (!s->addString(temp)) return false; + + return s->addXMLEndTag("integer"); +} diff --git a/libkern/c++/OSObject.cpp b/libkern/c++/OSObject.cpp new file mode 100644 index 000000000..73cf09fd5 --- /dev/null +++ b/libkern/c++/OSObject.cpp @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSObject.cpp created by gvdl on Fri 1998-11-17 */ + +#include +#include +#include +#include +#include +#include + +__BEGIN_DECLS +int debug_ivars_size; +__END_DECLS + +#if OSALLOCDEBUG +#define ACCUMSIZE(s) do { debug_ivars_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +// OSDefineMetaClassAndAbstractStructors(OSObject, 0); +/* Class global data */ +OSObject::MetaClass OSObject::gMetaClass; +const OSMetaClass * const OSObject::metaClass = &OSObject::gMetaClass; +const OSMetaClass * const OSObject::superClass = 0; + +/* Class member functions - Can't use defaults */ +OSObject::OSObject() { retainCount = 1; } +OSObject::OSObject(const OSMetaClass *) { retainCount = 1; } +OSObject::~OSObject() { } +const OSMetaClass * OSObject::getMetaClass() const + { return &gMetaClass; } +OSObject *OSObject::MetaClass::alloc() const { return 0; } + +/* The OSObject::MetaClass constructor */ +OSObject::MetaClass::MetaClass() + : OSMetaClass("OSObject", OSObject::superClass, sizeof(OSObject)) + { } + +// Virtual Padding +OSMetaClassDefineReservedUnused(OSObject, 0); +OSMetaClassDefineReservedUnused(OSObject, 1); +OSMetaClassDefineReservedUnused(OSObject, 2); +OSMetaClassDefineReservedUnused(OSObject, 3); +OSMetaClassDefineReservedUnused(OSObject, 4); +OSMetaClassDefineReservedUnused(OSObject, 5); +OSMetaClassDefineReservedUnused(OSObject, 6); +OSMetaClassDefineReservedUnused(OSObject, 7); +OSMetaClassDefineReservedUnused(OSObject, 8); +OSMetaClassDefineReservedUnused(OSObject, 9); +OSMetaClassDefineReservedUnused(OSObject, 10); +OSMetaClassDefineReservedUnused(OSObject, 11); +OSMetaClassDefineReservedUnused(OSObject, 12); +OSMetaClassDefineReservedUnused(OSObject, 13); +OSMetaClassDefineReservedUnused(OSObject, 14); +OSMetaClassDefineReservedUnused(OSObject, 15); +OSMetaClassDefineReservedUnused(OSObject, 16); +OSMetaClassDefineReservedUnused(OSObject, 17); +OSMetaClassDefineReservedUnused(OSObject, 18); +OSMetaClassDefineReservedUnused(OSObject, 19); +OSMetaClassDefineReservedUnused(OSObject, 20); +OSMetaClassDefineReservedUnused(OSObject, 21); +OSMetaClassDefineReservedUnused(OSObject, 22); +OSMetaClassDefineReservedUnused(OSObject, 23); +OSMetaClassDefineReservedUnused(OSObject, 24); +OSMetaClassDefineReservedUnused(OSObject, 25); +OSMetaClassDefineReservedUnused(OSObject, 26); +OSMetaClassDefineReservedUnused(OSObject, 27); +OSMetaClassDefineReservedUnused(OSObject, 28); +OSMetaClassDefineReservedUnused(OSObject, 29); +OSMetaClassDefineReservedUnused(OSObject, 30); +OSMetaClassDefineReservedUnused(OSObject, 31); + + +bool OSObject::init() { return true; } +void OSObject::free() +{ + const OSMetaClass *meta = getMetaClass(); + + if (meta) + meta->instanceDestructed(); + delete this; +} + +int OSObject::getRetainCount() const +{ + return retainCount; +} + +void OSObject::retain() const +{ + OSIncrementAtomic((SInt32 *) &retainCount); +} + +void OSObject::release(int when) const +{ + if (OSDecrementAtomic((SInt32 *) &retainCount) <= when) + ((OSObject *) this)->free(); +} + +void OSObject::release() const +{ + release(1); +} + +bool OSObject::serialize(OSSerialize *s) const +{ + if (s->previouslySerialized(this)) return true; + + if (!s->addXMLStartTag(this, "string")) return false; + + const OSMetaClass *meta = getMetaClass(); + const char *className = (meta)? meta->getClassName() : "unknown class?"; + + if (!s->addString(className)) return false; + if (!s->addString(" is not serializable")) return false; + + return s->addXMLEndTag("string"); +} + +void *OSObject::operator new(size_t size) +{ + void *mem = (void *) kalloc(size); + assert(mem); + bzero(mem, size); + + ACCUMSIZE(size); + + return mem; +} + +void OSObject::operator delete(void *mem, size_t size) +{ + kfree((vm_offset_t) mem, size); + + ACCUMSIZE(-size); +} diff --git a/libkern/c++/OSOrderedSet.cpp b/libkern/c++/OSOrderedSet.cpp new file mode 100644 index 000000000..1eccf927a --- /dev/null +++ b/libkern/c++/OSOrderedSet.cpp @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +#define super OSCollection + +OSDefineMetaClassAndStructors(OSOrderedSet, OSCollection) +OSMetaClassDefineReservedUnused(OSOrderedSet, 0); +OSMetaClassDefineReservedUnused(OSOrderedSet, 1); +OSMetaClassDefineReservedUnused(OSOrderedSet, 2); +OSMetaClassDefineReservedUnused(OSOrderedSet, 3); +OSMetaClassDefineReservedUnused(OSOrderedSet, 4); +OSMetaClassDefineReservedUnused(OSOrderedSet, 5); +OSMetaClassDefineReservedUnused(OSOrderedSet, 6); +OSMetaClassDefineReservedUnused(OSOrderedSet, 7); + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +struct _Element { + const OSMetaClassBase * obj; +// unsigned int pri; +}; + + +bool OSOrderedSet:: +initWithCapacity(unsigned int inCapacity, + OSOrderFunction inOrdering, void *inOrderingRef) +{ + int size; + + if (!super::init()) + return false; + + size = sizeof(_Element) * inCapacity; + array = (_Element *) kalloc(size); + if (!array) + return false; + + count = 0; + capacity = inCapacity; + capacityIncrement = (inCapacity)? inCapacity : 16; + ordering = inOrdering; + orderingRef = inOrderingRef; + + bzero(array, size); + ACCUMSIZE(size); + + return this; +} + +OSOrderedSet * OSOrderedSet:: +withCapacity(unsigned int capacity, + OSOrderFunction ordering, void * orderingRef) +{ + OSOrderedSet *me = new OSOrderedSet; + + if (me && !me->initWithCapacity(capacity, ordering, orderingRef)) { + me->free(); + me = 0; + } + + return me; +} + +void OSOrderedSet::free() +{ + flushCollection(); + + if (array) { + kfree((vm_offset_t)array, sizeof(_Element) * capacity); + ACCUMSIZE( -(sizeof(_Element) * capacity) ); + } + + super::free(); +} + +unsigned int OSOrderedSet::getCount() const { return count; } +unsigned int OSOrderedSet::getCapacity() const { return capacity; } +unsigned int OSOrderedSet::getCapacityIncrement() const + { return capacityIncrement; } +unsigned int OSOrderedSet::setCapacityIncrement(unsigned int increment) +{ + capacityIncrement = (increment)? increment : 16; + return capacityIncrement; +} + +unsigned int OSOrderedSet::ensureCapacity(unsigned int newCapacity) +{ + _Element *newArray; + int oldSize, newSize; + + if (newCapacity <= capacity) + return capacity; + + // round up + newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; + newSize = sizeof(_Element) * newCapacity; + + newArray = (_Element *) kalloc(newSize); + if (newArray) { + oldSize = sizeof(_Element) * capacity; + + ACCUMSIZE(newSize - oldSize); + + bcopy(array, newArray, oldSize); + bzero(&newArray[capacity], newSize - oldSize); + kfree((vm_offset_t)array, oldSize); + array = newArray; + capacity = newCapacity; + } + + return capacity; +} + +void OSOrderedSet::flushCollection() +{ + unsigned int i; + + haveUpdated(); + + for (i = 0; i < count; i++) + array[i].obj->release(); + + count = 0; +} + +/* internal */ +bool OSOrderedSet::setObject(unsigned int index, const OSMetaClassBase *anObject) +{ + unsigned int i; + unsigned int newCount = count + 1; + + if ((index > count) || !anObject) + return false; + + if (containsObject(anObject)) + return false; + + // do we need more space? + if (newCount > capacity && newCount > ensureCapacity(newCount)) + return false; + + haveUpdated(); + if (index != count) { + for (i = count; i > index; i--) + array[i] = array[i-1]; + } + array[index].obj = anObject; +// array[index].pri = pri; + anObject->retain(); + count++; + + return true; +} + + +bool OSOrderedSet::setFirstObject(const OSMetaClassBase *anObject) +{ + return( setObject(0, anObject)); +} + +bool OSOrderedSet::setLastObject(const OSMetaClassBase *anObject) +{ + return( setObject( count, anObject)); +} + + +#define ORDER(obj1,obj2) \ + (ordering ? ((*ordering)( (OSObject *) obj1, (OSObject *) obj2, orderingRef)) : 0) + +bool OSOrderedSet::setObject(const OSMetaClassBase *anObject ) +{ + unsigned int i; + + // queue it behind those with same priority + for( i = 0; + (i < count) && (ORDER(array[i].obj, anObject) >= 0); + i++ ) {} + + return( setObject(i, anObject)); +} + +void OSOrderedSet::removeObject(const OSMetaClassBase *anObject) +{ + bool deleted = false; + unsigned int i; + + for (i = 0; i < count; i++) { + + if( deleted) + array[i-1] = array[i]; + else if( (array[i].obj == anObject)) { + array[i].obj->release(); + deleted = true; + } + } + + if( deleted) { + count--; + haveUpdated(); + } +} + +bool OSOrderedSet::containsObject(const OSMetaClassBase *anObject) const +{ + return anObject && member(anObject); +} + +bool OSOrderedSet::member(const OSMetaClassBase *anObject) const +{ + unsigned int i; + + for( i = 0; + (i < count) && (array[i].obj != anObject); + i++ ) {} + + return( i < count); +} + +/* internal */ +OSObject *OSOrderedSet::getObject( unsigned int index ) const +{ + if (index >= count) + return 0; + +// if( pri) +// *pri = array[index].pri; + + return( (OSObject *) array[index].obj ); +} + +OSObject *OSOrderedSet::getFirstObject() const +{ + if( count) + return( (OSObject *) array[0].obj ); + else + return( 0 ); +} + +OSObject *OSOrderedSet::getLastObject() const +{ + if( count) + return( (OSObject *) array[count-1].obj ); + else + return( 0 ); +} + +SInt32 OSOrderedSet::orderObject( const OSMetaClassBase * anObject ) +{ + return( ORDER( anObject, 0 )); +} + +void *OSOrderedSet::getOrderingRef() +{ + return orderingRef; +} + +bool OSOrderedSet::isEqualTo(const OSOrderedSet *anOrderedSet) const +{ + unsigned int i; + + if ( this == anOrderedSet ) + return true; + + if ( count != anOrderedSet->getCount() ) + return false; + + for ( i = 0; i < count; i++ ) { + if ( !array[i].obj->isEqualTo(anOrderedSet->getObject(i)) ) + return false; + } + + return true; +} + +bool OSOrderedSet::isEqualTo(const OSMetaClassBase *anObject) const +{ + OSOrderedSet *oSet; + + oSet = OSDynamicCast(OSOrderedSet, anObject); + if ( oSet ) + return isEqualTo(oSet); + else + return false; +} + +unsigned int OSOrderedSet::iteratorSize() const +{ + return( sizeof(unsigned int)); +} + +bool OSOrderedSet::initIterator(void *inIterator) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + + *iteratorP = 0; + return true; +} + +bool OSOrderedSet:: +getNextObjectForIterator(void *inIterator, OSObject **ret) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; + + if (index < count) + *ret = (OSObject *) array[index].obj; + else + *ret = 0; + + return (*ret != 0); +} + diff --git a/libkern/c++/OSRuntime.cpp b/libkern/c++/OSRuntime.cpp new file mode 100644 index 000000000..e5ddca485 --- /dev/null +++ b/libkern/c++/OSRuntime.cpp @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + */ +#include +#include +#include +#include + +#include + +__BEGIN_DECLS + +#include + +struct mach_header; + +#include +#include +#include + +#if OSALLOCDEBUG +extern int debug_iomalloc_size; +#endif + +#define MDECL(reqlen) \ +typedef union { \ + struct _mhead hdr; \ + char _m[(reqlen) + sizeof (struct _mhead)]; \ +} hdr_t; \ +hdr_t + +struct _mhead { + size_t mlen; + char dat[0]; +}; + +void *kern_os_malloc( + size_t size) +{ + MDECL(size) *mem; + size_t memsize = sizeof (*mem); + + if (size == 0) + return (0); + + mem = (hdr_t *)kalloc(memsize); + if (!mem) + return (0); + +#if OSALLOCDEBUG + debug_iomalloc_size += memsize; +#endif + + mem->hdr.mlen = memsize; + (void) memset(mem->hdr.dat, 0, size); + + return (mem->hdr.dat); +} + +void kern_os_free( + void *addr) +{ + struct _mhead *hdr; + + if (!addr) + return; + + hdr = (struct _mhead *) addr; hdr--; + +#if OSALLOCDEBUG + debug_iomalloc_size -= hdr->mlen; +#endif + +#if 0 + memset((vm_offset_t)hdr, 0xbb, hdr->mlen); +#else + kfree((vm_offset_t)hdr, hdr->mlen); +#endif +} + +void *kern_os_realloc( + void *addr, + size_t nsize) +{ + struct _mhead *ohdr; + MDECL(nsize) *nmem; + size_t nmemsize, osize; + + if (!addr) + return (kern_os_malloc(nsize)); + + ohdr = (struct _mhead *) addr; ohdr--; + osize = ohdr->mlen - sizeof (*ohdr); + if (nsize == osize) + return (addr); + + if (nsize == 0) { + kern_os_free(addr); + return (0); + } + + nmemsize = sizeof (*nmem); + nmem = (hdr_t *) kalloc(nmemsize); + if (!nmem){ + kern_os_free(addr); + return (0); + } + +#if OSALLOCDEBUG + debug_iomalloc_size += (nmemsize - ohdr->mlen); +#endif + + nmem->hdr.mlen = nmemsize; + if (nsize > osize) + (void) memset(&nmem->hdr.dat[osize], 0, nsize - osize); + (void) memcpy(nmem->hdr.dat, ohdr->dat, + (nsize > osize) ? osize : nsize); + kfree((vm_offset_t)ohdr, ohdr->mlen); + + return (nmem->hdr.dat); +} + +size_t kern_os_malloc_size( + void *addr) +{ + struct _mhead *hdr; + + if (!addr) + return( 0); + + hdr = (struct _mhead *) addr; hdr--; + return( hdr->mlen - sizeof (struct _mhead)); +} + +void __pure_virtual( void ) { panic(__FUNCTION__); } + +typedef void (*structor_t)(void); + +void OSRuntimeUnloadCPPForSegment(struct segment_command * segment) { + + struct section * section; + + for (section = firstsect(segment); + section != 0; + section = nextsect(segment, section)) { + + if (strcmp(section->sectname, "__destructor") == 0) { + structor_t * destructors = (structor_t *)section->addr; + + if (destructors) { + int num_destructors = section->size / sizeof(structor_t); + + for (int i = 0; i < num_destructors; i++) { + (*destructors[i])(); + } + } /* if (destructors) */ + } /* if (strcmp...) */ + } /* for (section...) */ + + return; +} + +void OSRuntimeUnloadCPP(kmod_info_t *ki, void *) +{ + if (ki && ki->address) { + + struct segment_command * segment; + struct mach_header *header; + + OSSymbol::checkForPageUnload((void *) ki->address, + (void *) (ki->address + ki->size)); + + header = (struct mach_header *)ki->address; + segment = firstsegfromheader(header); + + for (segment = firstsegfromheader(header); + segment != 0; + segment = nextseg(segment)) { + + OSRuntimeUnloadCPPForSegment(segment); + } + } +} + +kern_return_t OSRuntimeFinalizeCPP(kmod_info_t *ki, void *) +{ + void *metaHandle; + + if (OSMetaClass::modHasInstance(ki->name)) { + // @@@ gvdl should have a verbose flag + printf("Can't unload %s due to -\n", ki->name); + OSMetaClass::reportModInstances(ki->name); + return kOSMetaClassHasInstances; + } + + // Tell the meta class system that we are starting to unload + metaHandle = OSMetaClass::preModLoad(ki->name); + OSRuntimeUnloadCPP(ki, 0); // Do the actual unload + (void) OSMetaClass::postModLoad(metaHandle); + + return KMOD_RETURN_SUCCESS; +} + +// Functions used by the extenTools/kmod library project +kern_return_t OSRuntimeInitializeCPP(kmod_info_t *ki, void *) +{ + struct mach_header *header; + void *metaHandle; + bool load_success; + struct segment_command * segment; + struct segment_command * failure_segment; + + if (!ki || !ki->address) + return KMOD_RETURN_FAILURE; + else + header = (struct mach_header *) ki->address; + + // Tell the meta class system that we are starting the load + metaHandle = OSMetaClass::preModLoad(ki->name); + assert(metaHandle); + if (!metaHandle) + return KMOD_RETURN_FAILURE; + + load_success = true; + failure_segment = 0; + + /* Scan the header for all sections named "__constructor", in any + * segment, and invoke the constructors within those sections. + */ + for (segment = firstsegfromheader(header); + segment != 0 && load_success; + segment = nextseg(segment)) { + + struct section * section; + + /* Record the current segment in the event of a failure. + */ + failure_segment = segment; + + for (section = firstsect(segment); + section != 0 && load_success; + section = nextsect(segment, section)) { + + if (strcmp(section->sectname, "__constructor") == 0) { + structor_t * constructors = (structor_t *)section->addr; + + if (constructors) { + // FIXME: can we break here under the assumption that + // section names are unique within a segment? + + int num_constructors = section->size / sizeof(structor_t); + int hit_null_constructor = 0; + + for (int i = 0; + i < num_constructors && + OSMetaClass::checkModLoad(metaHandle); + i++) { + + if (constructors[i]) { + (*constructors[i])(); + } else if (!hit_null_constructor) { + hit_null_constructor = 1; + printf("Error! Null constructor in segment %s.\n", + section->segname); + } + } + load_success = OSMetaClass::checkModLoad(metaHandle); + + } /* if (constructors) */ + } /* if (strcmp...) */ + } /* for (section...) */ + } /* for (segment...) */ + + + // We failed so call all of the destructors + if (!load_success) { + + /* Scan the header for all sections named "__constructor", in any + * segment, and invoke the constructors within those sections. + */ + for (segment = firstsegfromheader(header); + segment != failure_segment && segment != 0; + segment = nextseg(segment)) { + + OSRuntimeUnloadCPPForSegment(segment); + + } /* for (segment...) */ + } + + return OSMetaClass::postModLoad(metaHandle); +} + +static KMOD_LIB_DECL(__kernel__, 0); +void OSlibkernInit(void) +{ + vm_address_t *headerArray = (vm_address_t *) getmachheaders(); + + KMOD_INFO_NAME.address = headerArray[0]; assert(!headerArray[1]); + if (kOSReturnSuccess != OSRuntimeInitializeCPP(&KMOD_INFO_NAME, 0)) + panic("OSRuntime: C++ runtime failed to initialize"); + + OSBoolean::initialize(); +} + +__END_DECLS + +void * operator new( size_t size) +{ + void * result; + + result = (void *) kern_os_malloc( size); + if( result) + bzero( result, size); + return( result); +} + +void operator delete( void * addr) +{ + kern_os_free( addr); +} + diff --git a/libkern/c++/OSSerialize.cpp b/libkern/c++/OSSerialize.cpp new file mode 100644 index 000000000..cfaec1b9c --- /dev/null +++ b/libkern/c++/OSSerialize.cpp @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSSerialize.cpp created by rsulack on Wen 25-Nov-1998 */ + +#include +#include +#include + +#define super OSObject + +OSDefineMetaClassAndStructors(OSSerialize, OSObject) +OSMetaClassDefineReservedUnused(OSSerialize, 0); +OSMetaClassDefineReservedUnused(OSSerialize, 1); +OSMetaClassDefineReservedUnused(OSSerialize, 2); +OSMetaClassDefineReservedUnused(OSSerialize, 3); +OSMetaClassDefineReservedUnused(OSSerialize, 4); +OSMetaClassDefineReservedUnused(OSSerialize, 5); +OSMetaClassDefineReservedUnused(OSSerialize, 6); +OSMetaClassDefineReservedUnused(OSSerialize, 7); + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +char * OSSerialize::text() const +{ + return data; +} + +void OSSerialize::clearText() +{ + bzero((void *)data, capacity); + length = 1; + tag = 0; + tags->flushCollection(); +} + +bool OSSerialize::previouslySerialized(const OSMetaClassBase *o) +{ + char temp[16]; + OSString *tagString; + + // look it up + tagString = (OSString *)tags->getObject((const OSSymbol *) o); + + // does it exist? + if (tagString) { + addString("getCStringNoCopy()); + addString("\"/>"); + return true; + } + + // build a tag + sprintf(temp, "%u", tag++); + tagString = OSString::withCString(temp); + + // add to tag dictionary + tags->setObject((const OSSymbol *) o, tagString);// XXX check return + tagString->release(); + + return false; +} + +bool OSSerialize::addXMLStartTag(const OSMetaClassBase *o, const char *tagString) +{ + + if (!addChar('<')) return false; + if (!addString(tagString)) return false; + if (!addString(" ID=\"")) return false; + if (!addString(((OSString *)tags->getObject((const OSSymbol *)o))->getCStringNoCopy())) + return false; + if (!addChar('\"')) return false; + if (!addChar('>')) return false; + return true; +} + +bool OSSerialize::addXMLEndTag(const char *tagString) +{ + + if (!addChar('<')) return false; + if (!addChar('/')) return false; + if (!addString(tagString)) return false; + if (!addChar('>')) return false; + return true; +} + +bool OSSerialize::addChar(const char c) +{ + // add char, possibly extending our capacity + if (length >= capacity && length >=ensureCapacity(capacity+capacityIncrement)) + return false; + + data[length - 1] = c; + length++; + + return true; +} + +bool OSSerialize::addString(const char *s) +{ + bool rc = false; + + while (*s && (rc = addChar(*s++))) ; + + return rc; +} + +bool OSSerialize::initWithCapacity(unsigned int inCapacity) +{ + if (!super::init()) + return false; + + tags = OSDictionary::withCapacity(32); + if (!tags) { + return false; + } + + tag = 0; + length = 1; + capacity = inCapacity; + capacityIncrement = (capacity)? capacity : 256; + + capacity = (((capacity - 1) / capacityIncrement) + 1) + * capacityIncrement; + data = (char *) kalloc(capacity); + if (!data) { + tags->release(); + tags = 0; + return false; + } + bzero((void *)data, capacity); + + + ACCUMSIZE(capacity); + + return true; +} + +OSSerialize *OSSerialize::withCapacity(unsigned int inCapacity) +{ + OSSerialize *me = new OSSerialize; + + if (me && !me->initWithCapacity(inCapacity)) { + me->free(); + return 0; + } + + return me; +} + +unsigned int OSSerialize::getLength() const { return length; } +unsigned int OSSerialize::getCapacity() const { return capacity; } +unsigned int OSSerialize::getCapacityIncrement() const { return capacityIncrement; } +unsigned int OSSerialize::setCapacityIncrement(unsigned int increment) +{ + capacityIncrement = (increment)? increment : 256; + return capacityIncrement; +} + +unsigned int OSSerialize::ensureCapacity(unsigned int newCapacity) +{ + char *newData; + unsigned int oldCapacity; + + if (newCapacity <= capacity) + return capacity; + + // round up + newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; + newData = (char *) kalloc(newCapacity); + if (newData) { + oldCapacity = capacity; + + ACCUMSIZE(newCapacity - oldCapacity); + + bcopy(data, newData, oldCapacity); + bzero(&newData[capacity], newCapacity - oldCapacity); + kfree((vm_offset_t)data, oldCapacity); + data = newData; + capacity = newCapacity; + } + + return capacity; +} + +void OSSerialize::free() +{ + if (tags) + tags->release(); + + if (data) { + kfree((vm_offset_t)data, capacity); + ACCUMSIZE( -capacity ); + } + super::free(); +} + + +OSDefineMetaClassAndStructors(OSSerializer, OSObject) + +OSSerializer * OSSerializer::forTarget( void * target, + OSSerializerCallback callback, void * ref = 0 ) +{ + OSSerializer * thing; + + thing = new OSSerializer; + if( thing && !thing->init()) { + thing->release(); + thing = 0; + } + + if( thing) { + thing->target = target; + thing->ref = ref; + thing->callback = callback; + } + return( thing ); +} + +bool OSSerializer::serialize( OSSerialize * s ) const +{ + return( (*callback)(target, ref, s) ); +} diff --git a/libkern/c++/OSSet.cpp b/libkern/c++/OSSet.cpp new file mode 100644 index 000000000..0b86dd4c1 --- /dev/null +++ b/libkern/c++/OSSet.cpp @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOSet.m created by rsulack on Thu 11-Jun-1998 */ + +#include +#include +#include + +#define super OSCollection + +OSDefineMetaClassAndStructors(OSSet, OSCollection) +OSMetaClassDefineReservedUnused(OSSet, 0); +OSMetaClassDefineReservedUnused(OSSet, 1); +OSMetaClassDefineReservedUnused(OSSet, 2); +OSMetaClassDefineReservedUnused(OSSet, 3); +OSMetaClassDefineReservedUnused(OSSet, 4); +OSMetaClassDefineReservedUnused(OSSet, 5); +OSMetaClassDefineReservedUnused(OSSet, 6); +OSMetaClassDefineReservedUnused(OSSet, 7); + +bool OSSet::initWithCapacity(unsigned int inCapacity) +{ + if ( !super::init() ) + return false; + + members = OSArray::withCapacity(inCapacity); + if (!members) + return false; + + return true; +} + +bool OSSet::initWithObjects(const OSObject *inObjects[], + unsigned int inCount, + unsigned int inCapacity = 0) +{ + unsigned int capacity = inCount; + + if ( inCapacity ) { + if ( inCount > inCapacity ) + return false; + + capacity = inCapacity; + } + + if (!inObjects || !initWithCapacity(capacity)) + return false; + + for ( unsigned int i = 0; i < inCount; i++ ) { + if (members->getCount() < inCapacity) + setObject(inObjects[i]); + else + return false; + } + + return true; +} + +bool OSSet::initWithArray(const OSArray *inArray, + unsigned int inCapacity = 0) +{ + if ( !inArray ) + return false; + + return initWithObjects((const OSObject **) inArray->array, + inArray->count, inCapacity); +} + +bool OSSet::initWithSet(const OSSet *inSet, + unsigned int inCapacity = 0) +{ + return initWithArray(inSet->members, inCapacity); +} + +OSSet *OSSet::withCapacity(unsigned int capacity) +{ + OSSet *me = new OSSet; + + if (me && !me->initWithCapacity(capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSSet *OSSet::withObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity = 0) +{ + OSSet *me = new OSSet; + + if (me && !me->initWithObjects(objects, count, capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSSet *OSSet::withArray(const OSArray *array, + unsigned int capacity = 0) +{ + OSSet *me = new OSSet; + + if (me && !me->initWithArray(array, capacity)) { + me->free(); + return 0; + } + + return me; +} + +OSSet *OSSet::withSet(const OSSet *set, + unsigned int capacity = 0) +{ + OSSet *me = new OSSet; + + if (me && !me->initWithSet(set, capacity)) { + me->free(); + return 0; + } + + return me; +} + +void OSSet::free() +{ + if (members) + members->release(); + + super::free(); +} + +unsigned int OSSet::getCount() const +{ + return members->count; +} + +unsigned int OSSet::getCapacity() const +{ + return members->capacity; +} + +unsigned int OSSet::getCapacityIncrement() const +{ + return members->capacityIncrement; +} + +unsigned int OSSet::setCapacityIncrement(unsigned int increment) +{ + return members->setCapacityIncrement(increment); +} + +unsigned int OSSet::ensureCapacity(unsigned int newCapacity) +{ + return members->ensureCapacity(newCapacity); +} + +void OSSet::flushCollection() +{ + haveUpdated(); + members->flushCollection(); +} + +bool OSSet::setObject(const OSMetaClassBase *anObject) +{ + if (containsObject(anObject)) + return false; + else { + haveUpdated(); + return members->setObject(anObject); + } +} + +bool OSSet::merge(const OSArray *array) +{ + const OSMetaClassBase *anObject; + bool retVal = false; + + for (int i = 0; (anObject = array->getObject(i)); i++) + if (setObject(anObject)) + retVal = true; + + return retVal; +} + +bool OSSet::merge(const OSSet *set) +{ + return setObject(set->members); +} + +void OSSet::removeObject(const OSMetaClassBase *anObject) +{ + const OSMetaClassBase *probeObject; + + for (int i = 0; (probeObject = members->getObject(i)); i++) + if (probeObject == anObject) { + haveUpdated(); + members->removeObject(i); + return; + } +} + + +bool OSSet::containsObject(const OSMetaClassBase *anObject) const +{ + return anObject && member(anObject); +} + +bool OSSet::member(const OSMetaClassBase *anObject) const +{ + OSMetaClassBase *probeObject; + + for (int i = 0; (probeObject = members->getObject(i)); i++) + if (probeObject == anObject) + return true; + + return false; +} + +OSObject *OSSet::getAnyObject() const +{ + return members->getObject(0); +} + +bool OSSet::isEqualTo(const OSSet *aSet) const +{ + unsigned int count; + unsigned int i; + const OSMetaClassBase *obj1; + const OSMetaClassBase *obj2; + + if ( this == aSet ) + return true; + + count = members->count; + if ( count != aSet->getCount() ) + return false; + + for ( i = 0; i < count; i++ ) { + obj1 = aSet->members->getObject(i); + obj2 = members->getObject(i); + if ( !obj1 || !obj2 ) + return false; + + if ( !obj1->isEqualTo(obj2) ) + return false; + } + + return true; +} + +bool OSSet::isEqualTo(const OSMetaClassBase *anObject) const +{ + OSSet *otherSet; + + otherSet = OSDynamicCast(OSSet, anObject); + if ( otherSet ) + return isEqualTo(otherSet); + else + return false; +} + +unsigned int OSSet::iteratorSize() const +{ + return sizeof(unsigned int); +} + +bool OSSet::initIterator(void *inIterator) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + + *iteratorP = 0; + return true; +} + +bool OSSet::getNextObjectForIterator(void *inIterator, OSObject **ret) const +{ + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; + + if (index < members->count) + *ret = members->getObject(index); + else + *ret = 0; + + return (*ret != 0); +} + +bool OSSet::serialize(OSSerialize *s) const +{ + const OSMetaClassBase *o; + + if (s->previouslySerialized(this)) return true; + + if (!s->addXMLStartTag(this, "set")) return false; + + for (int i = 0; (o = members->getObject(i)); i++) { + if (!o->serialize(s)) return false; + } + + return s->addXMLEndTag("set"); +} diff --git a/libkern/c++/OSString.cpp b/libkern/c++/OSString.cpp new file mode 100644 index 000000000..4ad5133c2 --- /dev/null +++ b/libkern/c++/OSString.cpp @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOString.m created by rsulack on Wed 17-Sep-1997 */ +/* IOString.cpp converted to C++ on Tue 1998-9-22 */ + + +#include +#include +#include +#include + +#define super OSObject + +OSDefineMetaClassAndStructors(OSString, OSObject) +OSMetaClassDefineReservedUnused(OSString, 0); +OSMetaClassDefineReservedUnused(OSString, 1); +OSMetaClassDefineReservedUnused(OSString, 2); +OSMetaClassDefineReservedUnused(OSString, 3); +OSMetaClassDefineReservedUnused(OSString, 4); +OSMetaClassDefineReservedUnused(OSString, 5); +OSMetaClassDefineReservedUnused(OSString, 6); +OSMetaClassDefineReservedUnused(OSString, 7); +OSMetaClassDefineReservedUnused(OSString, 8); +OSMetaClassDefineReservedUnused(OSString, 9); +OSMetaClassDefineReservedUnused(OSString, 10); +OSMetaClassDefineReservedUnused(OSString, 11); +OSMetaClassDefineReservedUnused(OSString, 12); +OSMetaClassDefineReservedUnused(OSString, 13); +OSMetaClassDefineReservedUnused(OSString, 14); +OSMetaClassDefineReservedUnused(OSString, 15); + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +bool OSString::initWithString(const OSString *aString) +{ + return initWithCString(aString->string); +} + +bool OSString::initWithCString(const char *cString) +{ + if (!cString || !super::init()) + return false; + + length = strlen(cString) + 1; + string = (char *) kalloc(length); + if (!string) + return false; + + bcopy(cString, string, length); + + ACCUMSIZE(length); + + return true; +} + +bool OSString::initWithCStringNoCopy(const char *cString) +{ + if (!cString || !super::init()) + return false; + + length = strlen(cString) + 1; + flags |= kOSStringNoCopy; + string = (char *) cString; + + return true; +} + +OSString *OSString::withString(const OSString *aString) +{ + OSString *me = new OSString; + + if (me && !me->initWithString(aString)) { + me->free(); + return 0; + } + + return me; +} + +OSString *OSString::withCString(const char *cString) +{ + OSString *me = new OSString; + + if (me && !me->initWithCString(cString)) { + me->free(); + return 0; + } + + return me; +} + +OSString *OSString::withCStringNoCopy(const char *cString) +{ + OSString *me = new OSString; + + if (me && !me->initWithCStringNoCopy(cString)) { + me->free(); + return 0; + } + + return me; +} + +/* @@@ gvdl */ +#if 0 +OSString *OSString::stringWithFormat(const char *format, ...) +{ +#ifndef KERNEL // mach3xxx + OSString *me; + va_list argList; + + if (!format) + return 0; + + va_start(argList, format); + me = stringWithCapacity(256); + me->length = vsnprintf(me->string, 256, format, argList); + me->length++; // we include the null in the length + if (me->Length > 256) + me->Length = 256; + va_end (argList); + + return me; +#else + return 0; +#endif +} +#endif /* 0 */ + +void OSString::free() +{ + if ( !(flags & kOSStringNoCopy) && string) { + kfree((vm_offset_t)string, (vm_size_t)length); + ACCUMSIZE(-length); + } + + super::free(); +} + +unsigned int OSString::getLength() const { return length - 1; } + +const char *OSString::getCStringNoCopy() const +{ + return string; +} + +bool OSString::setChar(char aChar, unsigned int index) +{ + if ( !(flags & kOSStringNoCopy) && index < length - 1) { + string[index] = aChar; + + return true; + } + else + return false; +} + +char OSString::getChar(unsigned int index) const +{ + if (index < length) + return string[index]; + else + return '\0'; +} + + +bool OSString::isEqualTo(const OSString *aString) const +{ + if (length != aString->length) + return false; + else + return isEqualTo((const char *) aString->string); +} + +bool OSString::isEqualTo(const char *aCString) const +{ + return strcmp(string, aCString) == 0; +} + +bool OSString::isEqualTo(const OSMetaClassBase *obj) const +{ + OSString * str; + OSData * data; + + if ((str = OSDynamicCast(OSString, obj))) + return isEqualTo(str); + else if ((data = OSDynamicCast (OSData, obj))) + return isEqualTo(data); + else + return false; +} + +bool OSString::isEqualTo(const OSData *obj) const +{ + if (NULL == obj) + return false; + + unsigned int dataLen = obj->getLength ();; + char * dataPtr = (char *) obj->getBytesNoCopy (); + + if (dataLen != length) { + + // check for the fact that OSData may be a buffer that + // that includes a termination byte and will thus have + // a length of the actual string length PLUS 1. In this + // case we verify that the additional byte is a terminator + // and if so count the two lengths as being the same. + + if ( (dataLen - length) == 1 ) { + if (dataPtr[dataLen-1] != 0) + return false; + dataLen--; + } + else + return false; + } + + for ( unsigned int i=0; i < dataLen; i++ ) { + if ( *dataPtr++ != string[i] ) + return false; + } + + return true; +} + +bool OSString::serialize(OSSerialize *s) const +{ + char *c = string; + + if (s->previouslySerialized(this)) return true; + + if (!s->addXMLStartTag(this, "string")) return false; + while (*c) { + if (*c == '<') { + if (!s->addString("<")) return false; + } else if (*c == '>') { + if (!s->addString(">")) return false; + } else if (*c == '&') { + if (!s->addString("&")) return false; + } else { + if (!s->addChar(*c)) return false; + } + c++; + } + + return s->addXMLEndTag("string"); +} diff --git a/libkern/c++/OSSymbol.cpp b/libkern/c++/OSSymbol.cpp new file mode 100644 index 000000000..df7465901 --- /dev/null +++ b/libkern/c++/OSSymbol.cpp @@ -0,0 +1,521 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOSymbol.cpp created by gvdl on Fri 1998-11-17 */ + +#include + +__BEGIN_DECLS +#include +__END_DECLS + +#include +#include + +#define super OSString + +typedef struct { int i, j; } OSSymbolPoolState; + +#if OSALLOCDEBUG +extern "C" { + extern int debug_container_malloc_size; +}; +#define ACCUMSIZE(s) do { debug_container_malloc_size += (s); } while(0) +#else +#define ACCUMSIZE(s) +#endif + +class OSSymbolPool +{ +private: + static const unsigned int kInitBucketCount = 16; + + typedef struct { unsigned int count; OSSymbol **symbolP; } Bucket; + + Bucket *buckets; + unsigned int nBuckets; + unsigned int count; + mutex_t *poolGate; + + static inline void hashSymbol(const char *s, + unsigned int *hashP, + unsigned int *lenP) + { + unsigned int hash = 0; + unsigned int len = 0; + + /* Unroll the loop. */ + for (;;) { + if (!*s) break; len++; hash ^= *s++; + if (!*s) break; len++; hash ^= *s++ << 8; + if (!*s) break; len++; hash ^= *s++ << 16; + if (!*s) break; len++; hash ^= *s++ << 24; + } + *lenP = len; + *hashP = hash; + } + + static unsigned long log2(unsigned int x); + static unsigned long exp2ml(unsigned int x); + + void reconstructSymbols(); + +public: + static void *operator new(size_t size); + static void operator delete(void *mem, size_t size); + + OSSymbolPool() { }; + OSSymbolPool(const OSSymbolPool *old); + virtual ~OSSymbolPool(); + + bool init(); + + inline void closeGate() { mutex_lock(poolGate); }; + inline void openGate() { mutex_unlock(poolGate); }; + + OSSymbol *findSymbol(const char *cString) const; + OSSymbol *insertSymbol(OSSymbol *sym); + void removeSymbol(const char *cString); + + OSSymbolPoolState initHashState(); + OSSymbol *nextHashState(OSSymbolPoolState *stateP); +}; + +void * OSSymbolPool::operator new(size_t size) +{ + void *mem = (void *)kalloc(size); + ACCUMSIZE(size); + assert(mem); + bzero(mem, size); + + return mem; +} + +void OSSymbolPool::operator delete(void *mem, size_t size) +{ + kfree((vm_offset_t)mem, size); + ACCUMSIZE(-size); +} + +bool OSSymbolPool::init() +{ + count = 0; + nBuckets = exp2ml(1 + log2(kInitBucketCount)); + buckets = (Bucket *) kalloc(nBuckets * sizeof(Bucket)); + ACCUMSIZE(nBuckets * sizeof(Bucket)); + if (!buckets) + return false; + + bzero(buckets, nBuckets * sizeof(Bucket)); + + poolGate = mutex_alloc(0); + + return poolGate != 0; +} + +OSSymbolPool::OSSymbolPool(const OSSymbolPool *old) +{ + count = old->count; + nBuckets = old->nBuckets; + buckets = old->buckets; + + poolGate = 0; // Do not duplicate the poolGate +} + +OSSymbolPool::~OSSymbolPool() +{ + if (buckets) { + kfree((vm_offset_t)buckets, nBuckets * sizeof(Bucket)); + ACCUMSIZE(-(nBuckets * sizeof(Bucket))); + } + + if (poolGate) + kfree((vm_offset_t) poolGate, 36 * 4); +} + +unsigned long OSSymbolPool::log2(unsigned int x) +{ + unsigned long i; + + for (i = 0; x > 1 ; i++) + x >>= 1; + return i; +} + +unsigned long OSSymbolPool::exp2ml(unsigned int x) +{ + return (1 << x) - 1; +} + +OSSymbolPoolState OSSymbolPool::initHashState() +{ + OSSymbolPoolState newState = { nBuckets, 0 }; + return newState; +} + +OSSymbol *OSSymbolPool::nextHashState(OSSymbolPoolState *stateP) +{ + Bucket *thisBucket = &buckets[stateP->i]; + + while (!stateP->j) { + if (!stateP->i) + return 0; + stateP->i--; + thisBucket--; + stateP->j = thisBucket->count; + } + + stateP->j--; + if (thisBucket->count == 1) + return (OSSymbol *) thisBucket->symbolP; + else + return thisBucket->symbolP[stateP->j]; +} + +void OSSymbolPool::reconstructSymbols() +{ + OSSymbolPool old(this); + OSSymbol *insert; + OSSymbolPoolState state; + + nBuckets += nBuckets + 1; + count = 0; + buckets = (Bucket *) kalloc(nBuckets * sizeof(Bucket)); + ACCUMSIZE(nBuckets * sizeof(Bucket)); + /* @@@ gvdl: Zero test and panic if can't set up pool */ + bzero(buckets, nBuckets * sizeof(Bucket)); + + state = old.initHashState(); + while ( (insert = old.nextHashState(&state)) ) + insertSymbol(insert); +} + +OSSymbol *OSSymbolPool::findSymbol(const char *cString) const +{ + Bucket *thisBucket; + unsigned int j, inLen, hash; + OSSymbol *probeSymbol, **list; + + hashSymbol(cString, &hash, &inLen); inLen++; + thisBucket = &buckets[hash % nBuckets]; + j = thisBucket->count; + + if (!j) + return 0; + + if (j == 1) { + probeSymbol = (OSSymbol *) thisBucket->symbolP; + + if (inLen == probeSymbol->length + && (strcmp(probeSymbol->string, cString) == 0) + && (probeSymbol->getRetainCount() >= 1)) // WRONG need when + return probeSymbol; + else + return 0; + } + + for (list = thisBucket->symbolP; j--; list++) { + probeSymbol = *list; + if (inLen == probeSymbol->length + && (strcmp(probeSymbol->string, cString) == 0) + && (probeSymbol->getRetainCount() >= 1)) // WRONG need when + return probeSymbol; + } + + return 0; +} + +OSSymbol *OSSymbolPool::insertSymbol(OSSymbol *sym) +{ + const char *cString = sym->string; + Bucket *thisBucket; + unsigned int j, inLen, hash; + OSSymbol *probeSymbol, **list; + + hashSymbol(cString, &hash, &inLen); inLen++; + thisBucket = &buckets[hash % nBuckets]; + j = thisBucket->count; + + if (!j) { + thisBucket->symbolP = (OSSymbol **) sym; + thisBucket->count++; + count++; + return 0; + } + + if (j == 1) { + probeSymbol = (OSSymbol *) thisBucket->symbolP; + + if (inLen == probeSymbol->length + && strcmp(probeSymbol->string, cString) == 0) + return probeSymbol; + + list = (OSSymbol **) kalloc(2 * sizeof(OSSymbol *)); + ACCUMSIZE(2 * sizeof(OSSymbol *)); + /* @@@ gvdl: Zero test and panic if can't set up pool */ + list[0] = sym; + list[1] = probeSymbol; + thisBucket->symbolP = list; + thisBucket->count++; + count++; + if (count > nBuckets) + reconstructSymbols(); + + return 0; + } + + for (list = thisBucket->symbolP; j--; list++) { + probeSymbol = *list; + if (inLen == probeSymbol->length + && strcmp(probeSymbol->string, cString) == 0) + return probeSymbol; + } + + j = thisBucket->count++; + count++; + list = (OSSymbol **) kalloc(thisBucket->count * sizeof(OSSymbol *)); + ACCUMSIZE(thisBucket->count * sizeof(OSSymbol *)); + /* @@@ gvdl: Zero test and panic if can't set up pool */ + list[0] = sym; + bcopy(thisBucket->symbolP, list + 1, j * sizeof(OSSymbol *)); + kfree((vm_offset_t)thisBucket->symbolP, j * sizeof(OSSymbol *)); + ACCUMSIZE(-(j * sizeof(OSSymbol *))); + thisBucket->symbolP = list; + if (count > nBuckets) + reconstructSymbols(); + + return 0; +} + +void OSSymbolPool::removeSymbol(const char *cString) +{ + Bucket *thisBucket; + unsigned int j, inLen, hash; + OSSymbol *probeSymbol, **list; + + hashSymbol(cString, &hash, &inLen); inLen++; + thisBucket = &buckets[hash % nBuckets]; + j = thisBucket->count; + list = thisBucket->symbolP; + + if (!j) + return; + + if (j == 1) { + probeSymbol = (OSSymbol *) list; + + if (inLen == probeSymbol->length + && strcmp(probeSymbol->string, cString) == 0) { + thisBucket->symbolP = 0; + count--; + thisBucket->count--; + return; + } + return; + } + + if (j == 2) { + probeSymbol = list[0]; + if (inLen == probeSymbol->length + && strcmp(probeSymbol->string, cString) == 0) { + thisBucket->symbolP = (OSSymbol **) list[1]; + kfree((vm_offset_t)list, 2 * sizeof(OSSymbol *)); + ACCUMSIZE(-(2 * sizeof(OSSymbol *))); + count--; + thisBucket->count--; + return; + } + + probeSymbol = list[1]; + if (inLen == probeSymbol->length + && strcmp(probeSymbol->string, cString) == 0) { + thisBucket->symbolP = (OSSymbol **) list[0]; + kfree((vm_offset_t)list, 2 * sizeof(OSSymbol *)); + ACCUMSIZE(-(2 * sizeof(OSSymbol *))); + count--; + thisBucket->count--; + return; + } + return; + } + + for (; j--; list++) { + probeSymbol = *list; + if (inLen == probeSymbol->length + && strcmp(probeSymbol->string, cString) == 0) { + + list = (OSSymbol **) + kalloc((thisBucket->count-1) * sizeof(OSSymbol *)); + ACCUMSIZE((thisBucket->count-1) * sizeof(OSSymbol *)); + if (thisBucket->count-1 != j) + bcopy(thisBucket->symbolP, list, + (thisBucket->count-1-j) * sizeof(OSSymbol *)); + if (j) + bcopy(thisBucket->symbolP + thisBucket->count-j, + list + thisBucket->count-1-j, + j * sizeof(OSSymbol *)); + kfree((vm_offset_t)thisBucket->symbolP, thisBucket->count * sizeof(OSSymbol *)); + ACCUMSIZE(-(thisBucket->count * sizeof(OSSymbol *))); + thisBucket->symbolP = list; + count--; + thisBucket->count--; + return; + } + } +} + +/* + ********************************************************************* + * From here on we are actually implementing the OSSymbol class + ********************************************************************* + */ +OSDefineMetaClassAndStructorsWithInit(OSSymbol, OSString, + OSSymbol::initialize()) +OSMetaClassDefineReservedUnused(OSSymbol, 0); +OSMetaClassDefineReservedUnused(OSSymbol, 1); +OSMetaClassDefineReservedUnused(OSSymbol, 2); +OSMetaClassDefineReservedUnused(OSSymbol, 3); +OSMetaClassDefineReservedUnused(OSSymbol, 4); +OSMetaClassDefineReservedUnused(OSSymbol, 5); +OSMetaClassDefineReservedUnused(OSSymbol, 6); +OSMetaClassDefineReservedUnused(OSSymbol, 7); + +static OSSymbolPool *pool; + +void OSSymbol::initialize() +{ + pool = new OSSymbolPool; + assert(pool); + + if (!pool->init()) { + delete pool; + assert(false); + }; +} + +bool OSSymbol::initWithCStringNoCopy(const char *) { return false; } +bool OSSymbol::initWithCString(const char *) { return false; } +bool OSSymbol::initWithString(const OSString *) { return false; } + +const OSSymbol *OSSymbol::withString(const OSString *aString) +{ + // This string may be a OSSymbol already, cheap check. + if (OSDynamicCast(OSSymbol, aString)) { + aString->retain(); + return (const OSSymbol *) aString; + } + else if (((const OSSymbol *) aString)->flags & kOSStringNoCopy) + return OSSymbol::withCStringNoCopy(aString->getCStringNoCopy()); + else + return OSSymbol::withCString(aString->getCStringNoCopy()); +} + +const OSSymbol *OSSymbol::withCString(const char *cString) +{ + pool->closeGate(); + + OSSymbol *newSymb = pool->findSymbol(cString); + if (newSymb) + newSymb->retain(); + else if ( (newSymb = new OSSymbol) ) { + if (newSymb->OSString::initWithCString(cString)) + pool->insertSymbol(newSymb); + else { + newSymb->free(); + newSymb = 0; + } + } + pool->openGate(); + + return newSymb; +} + +const OSSymbol *OSSymbol::withCStringNoCopy(const char *cString) +{ + pool->closeGate(); + + OSSymbol *newSymb = pool->findSymbol(cString); + if (newSymb) + newSymb->retain(); + else if ( (newSymb = new OSSymbol) ) { + if (newSymb->OSString::initWithCStringNoCopy(cString)) + pool->insertSymbol(newSymb); + else { + newSymb->free(); + newSymb = 0; + } + } + pool->openGate(); + + return newSymb; +} + +void OSSymbol::checkForPageUnload(void *startAddr, void *endAddr) +{ + OSSymbol *probeSymbol; + OSSymbolPoolState state; + + pool->closeGate(); + state = pool->initHashState(); + while ( (probeSymbol = pool->nextHashState(&state)) ) { + if (probeSymbol->string >= startAddr && probeSymbol->string < endAddr) { + const char *oldString = probeSymbol->string; + + probeSymbol->string = (char *) kalloc(probeSymbol->length); + ACCUMSIZE(probeSymbol->length); + bcopy(oldString, probeSymbol->string, probeSymbol->length); + probeSymbol->flags &= ~kOSStringNoCopy; + } + } + pool->openGate(); +} + +void OSSymbol::free() +{ + pool->closeGate(); + pool->removeSymbol(string); + pool->openGate(); + + super::free(); +} + +bool OSSymbol::isEqualTo(const char *aCString) const +{ + return super::isEqualTo(aCString); +} + +bool OSSymbol::isEqualTo(const OSSymbol *aSymbol) const +{ + return aSymbol == this; +} + +bool OSSymbol::isEqualTo(const OSMetaClassBase *obj) const +{ + OSSymbol * sym; + OSString * str; + + if ((sym = OSDynamicCast(OSSymbol, obj))) + return isEqualTo(sym); + else if ((str = OSDynamicCast(OSString, obj))) + return super::isEqualTo(str); + else + return false; +} diff --git a/libkern/c++/OSUnserialize.cpp b/libkern/c++/OSUnserialize.cpp new file mode 100644 index 000000000..bf5d57040 --- /dev/null +++ b/libkern/c++/OSUnserialize.cpp @@ -0,0 +1,1614 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* OSUnserialize.y created by rsulack on Nov 21 1998 */ + +// "classic" parser for unserializing OSContainer objects +// +// XXX - this code should really be removed! +// - the XML format is now prefered +// - this code leaks on syntax errors, the XML doesn't +// - "classic" looks, reads, ... much better than XML :-( +// - well except the XML is more efficent on OSData +// +// +// to build : +// bison -p OSUnserialize OSUnserialize.y +// head -50 OSUnserialize.y > OSUnserialize.cpp +// sed -e "s/stdio.h/stddef.h/" < OSUnserialize.tab.c >> OSUnserialize.cpp +// +// when changing code check in both OSUnserialize.y and OSUnserialize.cpp +// +// +// +// +// DO NOT EDIT OSUnserialize.tab.cpp! +// +// this means you! +// +// +// +// + +/* A Bison parser, made from OSUnserialize.y + by GNU Bison version 1.28 */ + +#define YYBISON 1 /* Identify Bison output. */ + +#define yyparse OSUnserializeparse +#define yylex OSUnserializelex +#define yyerror OSUnserializeerror +#define yylval OSUnserializelval +#define yychar OSUnserializechar +#define yydebug OSUnserializedebug +#define yynerrs OSUnserializenerrs +#define NUMBER 257 +#define STRING 258 +#define DATA 259 +#define BOOLEAN 260 +#define SYNTAX_ERROR 261 + +#line 54 "OSUnserialize.y" + +#include +#include +#include + +typedef struct object { + struct object *next; + struct object *prev; + void *object; + int size; // for data + union { + void *key; // for dictionary + long long offset; // for offset + } u; + +} object_t; + +static int yyparse(); +static int yyerror(char *s); +static int yylex(); + +static object_t * newObject(); +static void freeObject(object_t *o); + +static OSObject *buildOSDictionary(object_t *); +static OSObject *buildOSArray(object_t *); +static OSObject *buildOSSet(object_t *); +static OSObject *buildOSString(object_t *); +static OSObject *buildOSData(object_t *); +static OSObject *buildOSOffset(object_t *); +static OSObject *buildOSBoolean(object_t *o); + +static void rememberObject(int, object_t *); +static OSObject *retrieveObject(int); + +// temp variable to use during parsing +static object_t *o; + +// resultant object of parsed text +static OSObject *parsedObject; + +#define YYSTYPE object_t * + +extern "C" { +extern void *kern_os_malloc(size_t size); +extern void *kern_os_realloc(void * addr, size_t size); +extern void kern_os_free(void * addr); +} /* extern "C" */ + +#define malloc(s) kern_os_malloc(s) +#define realloc(a, s) kern_os_realloc(a, s) +#define free(a) kern_os_free(a) + +#ifndef YYSTYPE +#define YYSTYPE int +#endif +#include + +#ifndef __cplusplus +#ifndef __STDC__ +#define const +#endif +#endif + + + +#define YYFINAL 43 +#define YYFLAG -32768 +#define YYNTBASE 19 + +#define YYTRANSLATE(x) ((unsigned)(x) <= 261 ? yytranslate[x] : 31) + +static const char yytranslate[] = { 0, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 13, + 14, 2, 2, 17, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 18, 12, 2, + 11, 2, 2, 8, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 15, 2, 16, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 9, 2, 10, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 1, 3, 4, 5, 6, + 7 +}; + +#if YYDEBUG != 0 +static const short yyprhs[] = { 0, + 0, 1, 3, 5, 7, 9, 11, 13, 15, 17, + 19, 22, 26, 29, 33, 35, 38, 43, 46, 50, + 53, 57, 59, 63, 67, 69, 71 +}; + +static const short yyrhs[] = { -1, + 20, 0, 7, 0, 21, 0, 24, 0, 25, 0, + 29, 0, 28, 0, 27, 0, 30, 0, 8, 3, + 0, 20, 8, 3, 0, 9, 10, 0, 9, 22, + 10, 0, 23, 0, 22, 23, 0, 20, 11, 20, + 12, 0, 13, 14, 0, 13, 26, 14, 0, 15, + 16, 0, 15, 26, 16, 0, 20, 0, 26, 17, + 20, 0, 3, 18, 3, 0, 5, 0, 4, 0, + 6, 0 +}; + +#endif + +#if YYDEBUG != 0 +static const short yyrline[] = { 0, + 116, 117, 118, 121, 122, 123, 124, 125, 126, 127, + 128, 137, 145, 146, 149, 150, 153, 163, 164, 167, + 168, 171, 176, 187, 195, 200, 205 +}; +#endif + + +#if YYDEBUG != 0 || defined (YYERROR_VERBOSE) + +static const char * const yytname[] = { "$","error","$undefined.","NUMBER", +"STRING","DATA","BOOLEAN","SYNTAX_ERROR","'@'","'{'","'}'","'='","';'","'('", +"')'","'['","']'","','","':'","input","object","dict","pairs","pair","array", +"set","elements","offset","data","string","boolean", NULL +}; +#endif + +static const short yyr1[] = { 0, + 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 21, 21, 22, 22, 23, 24, 24, 25, + 25, 26, 26, 27, 28, 29, 30 +}; + +static const short yyr2[] = { 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 2, 3, 2, 3, 1, 2, 4, 2, 3, 2, + 3, 1, 3, 3, 1, 1, 1 +}; + +static const short yydefact[] = { 1, + 0, 26, 25, 27, 3, 0, 0, 0, 0, 2, + 4, 5, 6, 9, 8, 7, 10, 0, 11, 13, + 0, 0, 15, 18, 22, 0, 20, 0, 0, 24, + 0, 14, 16, 19, 0, 21, 12, 0, 23, 17, + 0, 0, 0 +}; + +static const short yydefgoto[] = { 41, + 21, 11, 22, 23, 12, 13, 26, 14, 15, 16, + 17 +}; + +static const short yypact[] = { 12, + -13,-32768,-32768,-32768,-32768, 9, 33, 46, -2, 2, +-32768,-32768,-32768,-32768,-32768,-32768,-32768, 25,-32768,-32768, + 21, 59,-32768,-32768, 2, 16,-32768, 7, 31,-32768, + 72,-32768,-32768,-32768, 72,-32768,-32768, 14, 2,-32768, + 40, 44,-32768 +}; + +static const short yypgoto[] = {-32768, + 0,-32768,-32768, 23,-32768,-32768, 38,-32768,-32768,-32768, +-32768 +}; + + +#define YYLAST 87 + + +static const short yytable[] = { 10, + 1, 2, 3, 4, 18, 6, 7, 25, 25, 29, + 8, 19, 9, 27, 1, 2, 3, 4, 5, 6, + 7, 29, 36, 35, 8, 40, 9, 30, 29, 34, + 38, 31, 35, 37, 39, 1, 2, 3, 4, 42, + 6, 7, 20, 43, 33, 8, 28, 9, 1, 2, + 3, 4, 0, 6, 7, 0, 0, 0, 8, 24, + 9, 1, 2, 3, 4, 0, 6, 7, 32, 0, + 0, 8, 0, 9, 1, 2, 3, 4, 0, 6, + 7, 0, 0, 0, 8, 0, 9 +}; + +static const short yycheck[] = { 0, + 3, 4, 5, 6, 18, 8, 9, 8, 9, 8, + 13, 3, 15, 16, 3, 4, 5, 6, 7, 8, + 9, 8, 16, 17, 13, 12, 15, 3, 8, 14, + 31, 11, 17, 3, 35, 3, 4, 5, 6, 0, + 8, 9, 10, 0, 22, 13, 9, 15, 3, 4, + 5, 6, -1, 8, 9, -1, -1, -1, 13, 14, + 15, 3, 4, 5, 6, -1, 8, 9, 10, -1, + -1, 13, -1, 15, 3, 4, 5, 6, -1, 8, + 9, -1, -1, -1, 13, -1, 15 +}; +/* -*-C-*- Note some compilers choke on comments on `#line' lines. */ +#line 3 "/usr/share/bison.simple" +/* This file comes from bison-1.28. */ + +/* Skeleton output parser for bison, + Copyright (C) 1984, 1989, 1990 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, when this file is copied by Bison into a + Bison output file, you may use that output file without restriction. + This special exception was added by the Free Software Foundation + in version 1.24 of Bison. */ + +/* This is the parser code that is written into each bison parser + when the %semantic_parser declaration is not specified in the grammar. + It was written by Richard Stallman by simplifying the hairy parser + used when %semantic_parser is specified. */ + +#ifndef YYSTACK_USE_ALLOCA +#ifdef alloca +#define YYSTACK_USE_ALLOCA +#else /* alloca not defined */ +#ifdef __GNUC__ +#define YYSTACK_USE_ALLOCA +#define alloca __builtin_alloca +#else /* not GNU C. */ +#if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi) || (defined (__sun) && defined (__i386)) +#define YYSTACK_USE_ALLOCA +#include +#else /* not sparc */ +/* We think this test detects Watcom and Microsoft C. */ +/* This used to test MSDOS, but that is a bad idea + since that symbol is in the user namespace. */ +#if (defined (_MSDOS) || defined (_MSDOS_)) && !defined (__TURBOC__) +#if 0 /* No need for malloc.h, which pollutes the namespace; + instead, just don't use alloca. */ +#include +#endif +#else /* not MSDOS, or __TURBOC__ */ +#if defined(_AIX) +/* I don't know what this was needed for, but it pollutes the namespace. + So I turned it off. rms, 2 May 1997. */ +/* #include */ + #pragma alloca +#define YYSTACK_USE_ALLOCA +#else /* not MSDOS, or __TURBOC__, or _AIX */ +#if 0 +#ifdef __hpux /* haible@ilog.fr says this works for HPUX 9.05 and up, + and on HPUX 10. Eventually we can turn this on. */ +#define YYSTACK_USE_ALLOCA +#define alloca __builtin_alloca +#endif /* __hpux */ +#endif +#endif /* not _AIX */ +#endif /* not MSDOS, or __TURBOC__ */ +#endif /* not sparc */ +#endif /* not GNU C */ +#endif /* alloca not defined */ +#endif /* YYSTACK_USE_ALLOCA not defined */ + +#ifdef YYSTACK_USE_ALLOCA +#define YYSTACK_ALLOC alloca +#else +#define YYSTACK_ALLOC malloc +#endif + +/* Note: there must be only one dollar sign in this file. + It is replaced by the list of actions, each action + as one case of the switch. */ + +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY -2 +#define YYEOF 0 +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrlab1 +/* Like YYERROR except do call yyerror. + This remains here temporarily to ease the + transition to the new meaning of YYERROR, for GCC. + Once GCC version 2 has supplanted version 1, this can go. */ +#define YYFAIL goto yyerrlab +#define YYRECOVERING() (!!yyerrstatus) +#define YYBACKUP(token, value) \ +do \ + if (yychar == YYEMPTY && yylen == 1) \ + { yychar = (token), yylval = (value); \ + yychar1 = YYTRANSLATE (yychar); \ + YYPOPSTACK; \ + goto yybackup; \ + } \ + else \ + { yyerror ("syntax error: cannot back up"); YYERROR; } \ +while (0) + +#define YYTERROR 1 +#define YYERRCODE 256 + +#ifndef YYPURE +#define YYLEX yylex() +#endif + +#ifdef YYPURE +#ifdef YYLSP_NEEDED +#ifdef YYLEX_PARAM +#define YYLEX yylex(&yylval, &yylloc, YYLEX_PARAM) +#else +#define YYLEX yylex(&yylval, &yylloc) +#endif +#else /* not YYLSP_NEEDED */ +#ifdef YYLEX_PARAM +#define YYLEX yylex(&yylval, YYLEX_PARAM) +#else +#define YYLEX yylex(&yylval) +#endif +#endif /* not YYLSP_NEEDED */ +#endif + +/* If nonreentrant, generate the variables here */ + +#ifndef YYPURE + +int yychar; /* the lookahead symbol */ +YYSTYPE yylval; /* the semantic value of the */ + /* lookahead symbol */ + +#ifdef YYLSP_NEEDED +YYLTYPE yylloc; /* location data for the lookahead */ + /* symbol */ +#endif + +int yynerrs; /* number of parse errors so far */ +#endif /* not YYPURE */ + +#if YYDEBUG != 0 +int yydebug; /* nonzero means print parse trace */ +/* Since this is uninitialized, it does not stop multiple parsers + from coexisting. */ +#endif + +/* YYINITDEPTH indicates the initial size of the parser's stacks */ + +#ifndef YYINITDEPTH +#define YYINITDEPTH 200 +#endif + +/* YYMAXDEPTH is the maximum size the stacks can grow to + (effective only if the built-in stack extension method is used). */ + +#if YYMAXDEPTH == 0 +#undef YYMAXDEPTH +#endif + +#ifndef YYMAXDEPTH +#define YYMAXDEPTH 10000 +#endif + +/* Define __yy_memcpy. Note that the size argument + should be passed with type unsigned int, because that is what the non-GCC + definitions require. With GCC, __builtin_memcpy takes an arg + of type size_t, but it can handle unsigned int. */ + +#if __GNUC__ > 1 /* GNU C and GNU C++ define this. */ +#define __yy_memcpy(TO,FROM,COUNT) __builtin_memcpy(TO,FROM,COUNT) +#else /* not GNU C or C++ */ +#ifndef __cplusplus + +/* This is the most reliable way to avoid incompatibilities + in available built-in functions on various systems. */ +static void +__yy_memcpy (to, from, count) + char *to; + char *from; + unsigned int count; +{ + register char *f = from; + register char *t = to; + register int i = count; + + while (i-- > 0) + *t++ = *f++; +} + +#else /* __cplusplus */ + +/* This is the most reliable way to avoid incompatibilities + in available built-in functions on various systems. */ +static void +__yy_memcpy (char *to, char *from, unsigned int count) +{ + register char *t = to; + register char *f = from; + register int i = count; + + while (i-- > 0) + *t++ = *f++; +} + +#endif +#endif + +#line 217 "/usr/share/bison.simple" + +/* The user can define YYPARSE_PARAM as the name of an argument to be passed + into yyparse. The argument should have type void *. + It should actually point to an object. + Grammar actions can access the variable by casting it + to the proper pointer type. */ + +#ifdef YYPARSE_PARAM +#ifdef __cplusplus +#define YYPARSE_PARAM_ARG void *YYPARSE_PARAM +#define YYPARSE_PARAM_DECL +#else /* not __cplusplus */ +#define YYPARSE_PARAM_ARG YYPARSE_PARAM +#define YYPARSE_PARAM_DECL void *YYPARSE_PARAM; +#endif /* not __cplusplus */ +#else /* not YYPARSE_PARAM */ +#define YYPARSE_PARAM_ARG +#define YYPARSE_PARAM_DECL +#endif /* not YYPARSE_PARAM */ + +/* Prevent warning if -Wstrict-prototypes. */ +#ifdef __GNUC__ +#ifdef YYPARSE_PARAM +int yyparse (void *); +#else +int yyparse (void); +#endif +#endif + +int +yyparse(YYPARSE_PARAM_ARG) + YYPARSE_PARAM_DECL +{ + register int yystate; + register int yyn; + register short *yyssp; + register YYSTYPE *yyvsp; + int yyerrstatus; /* number of tokens to shift before error messages enabled */ + int yychar1 = 0; /* lookahead token as an internal (translated) token number */ + + short yyssa[YYINITDEPTH]; /* the state stack */ + YYSTYPE yyvsa[YYINITDEPTH]; /* the semantic value stack */ + + short *yyss = yyssa; /* refer to the stacks thru separate pointers */ + YYSTYPE *yyvs = yyvsa; /* to allow yyoverflow to reallocate them elsewhere */ + +#ifdef YYLSP_NEEDED + YYLTYPE yylsa[YYINITDEPTH]; /* the location stack */ + YYLTYPE *yyls = yylsa; + YYLTYPE *yylsp; + +#define YYPOPSTACK (yyvsp--, yyssp--, yylsp--) +#else +#define YYPOPSTACK (yyvsp--, yyssp--) +#endif + + int yystacksize = YYINITDEPTH; + int yyfree_stacks = 0; + +#ifdef YYPURE + int yychar; + YYSTYPE yylval; + int yynerrs; +#ifdef YYLSP_NEEDED + YYLTYPE yylloc; +#endif +#endif + + YYSTYPE yyval; /* the variable used to return */ + /* semantic values from the action */ + /* routines */ + + int yylen; + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Starting parse\n"); +#endif + + yystate = 0; + yyerrstatus = 0; + yynerrs = 0; + yychar = YYEMPTY; /* Cause a token to be read. */ + + /* Initialize stack pointers. + Waste one element of value and location stack + so that they stay on the same level as the state stack. + The wasted elements are never initialized. */ + + yyssp = yyss - 1; + yyvsp = yyvs; +#ifdef YYLSP_NEEDED + yylsp = yyls; +#endif + +/* Push a new state, which is found in yystate . */ +/* In all cases, when you get here, the value and location stacks + have just been pushed. so pushing a state here evens the stacks. */ +yynewstate: + + *++yyssp = yystate; + + if (yyssp >= yyss + yystacksize - 1) + { + /* Give user a chance to reallocate the stack */ + /* Use copies of these so that the &'s don't force the real ones into memory. */ + YYSTYPE *yyvs1 = yyvs; + short *yyss1 = yyss; +#ifdef YYLSP_NEEDED + YYLTYPE *yyls1 = yyls; +#endif + + /* Get the current used size of the three stacks, in elements. */ + int size = yyssp - yyss + 1; + +#ifdef yyoverflow + /* Each stack pointer address is followed by the size of + the data in use in that stack, in bytes. */ +#ifdef YYLSP_NEEDED + /* This used to be a conditional around just the two extra args, + but that might be undefined if yyoverflow is a macro. */ + yyoverflow("parser stack overflow", + &yyss1, size * sizeof (*yyssp), + &yyvs1, size * sizeof (*yyvsp), + &yyls1, size * sizeof (*yylsp), + &yystacksize); +#else + yyoverflow("parser stack overflow", + &yyss1, size * sizeof (*yyssp), + &yyvs1, size * sizeof (*yyvsp), + &yystacksize); +#endif + + yyss = yyss1; yyvs = yyvs1; +#ifdef YYLSP_NEEDED + yyls = yyls1; +#endif +#else /* no yyoverflow */ + /* Extend the stack our own way. */ + if (yystacksize >= YYMAXDEPTH) + { + yyerror("parser stack overflow"); + if (yyfree_stacks) + { + free (yyss); + free (yyvs); +#ifdef YYLSP_NEEDED + free (yyls); +#endif + } + return 2; + } + yystacksize *= 2; + if (yystacksize > YYMAXDEPTH) + yystacksize = YYMAXDEPTH; +#ifndef YYSTACK_USE_ALLOCA + yyfree_stacks = 1; +#endif + yyss = (short *) YYSTACK_ALLOC (yystacksize * sizeof (*yyssp)); + __yy_memcpy ((char *)yyss, (char *)yyss1, + size * (unsigned int) sizeof (*yyssp)); + yyvs = (YYSTYPE *) YYSTACK_ALLOC (yystacksize * sizeof (*yyvsp)); + __yy_memcpy ((char *)yyvs, (char *)yyvs1, + size * (unsigned int) sizeof (*yyvsp)); +#ifdef YYLSP_NEEDED + yyls = (YYLTYPE *) YYSTACK_ALLOC (yystacksize * sizeof (*yylsp)); + __yy_memcpy ((char *)yyls, (char *)yyls1, + size * (unsigned int) sizeof (*yylsp)); +#endif +#endif /* no yyoverflow */ + + yyssp = yyss + size - 1; + yyvsp = yyvs + size - 1; +#ifdef YYLSP_NEEDED + yylsp = yyls + size - 1; +#endif + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Stack size increased to %d\n", yystacksize); +#endif + + if (yyssp >= yyss + yystacksize - 1) + YYABORT; + } + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Entering state %d\n", yystate); +#endif + + goto yybackup; + yybackup: + +/* Do appropriate processing given the current state. */ +/* Read a lookahead token if we need one and don't already have one. */ +/* yyresume: */ + + /* First try to decide what to do without reference to lookahead token. */ + + yyn = yypact[yystate]; + if (yyn == YYFLAG) + goto yydefault; + + /* Not known => get a lookahead token if don't already have one. */ + + /* yychar is either YYEMPTY or YYEOF + or a valid token in external form. */ + + if (yychar == YYEMPTY) + { +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Reading a token: "); +#endif + yychar = YYLEX; + } + + /* Convert token to internal form (in yychar1) for indexing tables with */ + + if (yychar <= 0) /* This means end of input. */ + { + yychar1 = 0; + yychar = YYEOF; /* Don't call YYLEX any more */ + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Now at end of input.\n"); +#endif + } + else + { + yychar1 = YYTRANSLATE(yychar); + +#if YYDEBUG != 0 + if (yydebug) + { + fprintf (stderr, "Next token is %d (%s", yychar, yytname[yychar1]); + /* Give the individual parser a way to print the precise meaning + of a token, for further debugging info. */ +#ifdef YYPRINT + YYPRINT (stderr, yychar, yylval); +#endif + fprintf (stderr, ")\n"); + } +#endif + } + + yyn += yychar1; + if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1) + goto yydefault; + + yyn = yytable[yyn]; + + /* yyn is what to do for this token type in this state. + Negative => reduce, -yyn is rule number. + Positive => shift, yyn is new state. + New state is final state => don't bother to shift, + just return success. + 0, or most negative number => error. */ + + if (yyn < 0) + { + if (yyn == YYFLAG) + goto yyerrlab; + yyn = -yyn; + goto yyreduce; + } + else if (yyn == 0) + goto yyerrlab; + + if (yyn == YYFINAL) + YYACCEPT; + + /* Shift the lookahead token. */ + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Shifting token %d (%s), ", yychar, yytname[yychar1]); +#endif + + /* Discard the token being shifted unless it is eof. */ + if (yychar != YYEOF) + yychar = YYEMPTY; + + *++yyvsp = yylval; +#ifdef YYLSP_NEEDED + *++yylsp = yylloc; +#endif + + /* count tokens shifted since error; after three, turn off error status. */ + if (yyerrstatus) yyerrstatus--; + + yystate = yyn; + goto yynewstate; + +/* Do the default action for the current state. */ +yydefault: + + yyn = yydefact[yystate]; + if (yyn == 0) + goto yyerrlab; + +/* Do a reduction. yyn is the number of a rule to reduce with. */ +yyreduce: + yylen = yyr2[yyn]; + if (yylen > 0) + yyval = yyvsp[1-yylen]; /* implement default value of the action */ + +#if YYDEBUG != 0 + if (yydebug) + { + int i; + + fprintf (stderr, "Reducing via rule %d (line %d), ", + yyn, yyrline[yyn]); + + /* Print the symbols being reduced, and their result. */ + for (i = yyprhs[yyn]; yyrhs[i] > 0; i++) + fprintf (stderr, "%s ", yytname[yyrhs[i]]); + fprintf (stderr, " -> %s\n", yytname[yyr1[yyn]]); + } +#endif + + + switch (yyn) { + +case 1: +#line 116 "OSUnserialize.y" +{ parsedObject = (OSObject *)NULL; YYACCEPT; ; + break;} +case 2: +#line 117 "OSUnserialize.y" +{ parsedObject = (OSObject *)yyvsp[0]; YYACCEPT; ; + break;} +case 3: +#line 118 "OSUnserialize.y" +{ yyerror("syntax error"); YYERROR; ; + break;} +case 4: +#line 121 "OSUnserialize.y" +{ yyval = (object_t *)buildOSDictionary(yyvsp[0]); ; + break;} +case 5: +#line 122 "OSUnserialize.y" +{ yyval = (object_t *)buildOSArray(yyvsp[0]); ; + break;} +case 6: +#line 123 "OSUnserialize.y" +{ yyval = (object_t *)buildOSSet(yyvsp[0]); ; + break;} +case 7: +#line 124 "OSUnserialize.y" +{ yyval = (object_t *)buildOSString(yyvsp[0]); ; + break;} +case 8: +#line 125 "OSUnserialize.y" +{ yyval = (object_t *)buildOSData(yyvsp[0]); ; + break;} +case 9: +#line 126 "OSUnserialize.y" +{ yyval = (object_t *)buildOSOffset(yyvsp[0]); ; + break;} +case 10: +#line 127 "OSUnserialize.y" +{ yyval = (object_t *)buildOSBoolean(yyvsp[0]); ; + break;} +case 11: +#line 128 "OSUnserialize.y" +{ yyval = (object_t *)retrieveObject(yyvsp[0]->u.offset); + if (yyval) { + ((OSObject *)yyval)->retain(); + } else { + yyerror("forward reference detected"); + YYERROR; + } + freeObject(yyvsp[0]); + ; + break;} +case 12: +#line 137 "OSUnserialize.y" +{ yyval = yyvsp[-2]; + rememberObject(yyvsp[0]->u.offset, yyvsp[-2]); + freeObject(yyvsp[0]); + ; + break;} +case 13: +#line 145 "OSUnserialize.y" +{ yyval = NULL; ; + break;} +case 14: +#line 146 "OSUnserialize.y" +{ yyval = yyvsp[-1]; ; + break;} +case 16: +#line 150 "OSUnserialize.y" +{ yyvsp[0]->next = yyvsp[-1]; yyvsp[-1]->prev = yyvsp[0]; yyval = yyvsp[0]; ; + break;} +case 17: +#line 153 "OSUnserialize.y" +{ yyval = newObject(); + yyval->next = NULL; + yyval->prev = NULL; + yyval->u.key = yyvsp[-3]; + yyval->object = yyvsp[-1]; + ; + break;} +case 18: +#line 163 "OSUnserialize.y" +{ yyval = NULL; ; + break;} +case 19: +#line 164 "OSUnserialize.y" +{ yyval = yyvsp[-1]; ; + break;} +case 20: +#line 167 "OSUnserialize.y" +{ yyval = NULL; ; + break;} +case 21: +#line 168 "OSUnserialize.y" +{ yyval = yyvsp[-1]; ; + break;} +case 22: +#line 171 "OSUnserialize.y" +{ yyval = newObject(); + yyval->object = yyvsp[0]; + yyval->next = NULL; + yyval->prev = NULL; + ; + break;} +case 23: +#line 176 "OSUnserialize.y" +{ o = newObject(); + o->object = yyvsp[0]; + o->next = yyvsp[-2]; + o->prev = NULL; + yyvsp[-2]->prev = o; + yyval = o; + ; + break;} +case 24: +#line 187 "OSUnserialize.y" +{ yyval = yyvsp[-2]; + yyval->size = yyvsp[0]->u.offset; + freeObject(yyvsp[0]); + ; + break;} +} + /* the action file gets copied in in place of this dollarsign */ +#line 543 "/usr/share/bison.simple" + + yyvsp -= yylen; + yyssp -= yylen; +#ifdef YYLSP_NEEDED + yylsp -= yylen; +#endif + +#if YYDEBUG != 0 + if (yydebug) + { + short *ssp1 = yyss - 1; + fprintf (stderr, "state stack now"); + while (ssp1 != yyssp) + fprintf (stderr, " %d", *++ssp1); + fprintf (stderr, "\n"); + } +#endif + + *++yyvsp = yyval; + +#ifdef YYLSP_NEEDED + yylsp++; + if (yylen == 0) + { + yylsp->first_line = yylloc.first_line; + yylsp->first_column = yylloc.first_column; + yylsp->last_line = (yylsp-1)->last_line; + yylsp->last_column = (yylsp-1)->last_column; + yylsp->text = 0; + } + else + { + yylsp->last_line = (yylsp+yylen-1)->last_line; + yylsp->last_column = (yylsp+yylen-1)->last_column; + } +#endif + + /* Now "shift" the result of the reduction. + Determine what state that goes to, + based on the state we popped back to + and the rule number reduced by. */ + + yyn = yyr1[yyn]; + + yystate = yypgoto[yyn - YYNTBASE] + *yyssp; + if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp) + yystate = yytable[yystate]; + else + yystate = yydefgoto[yyn - YYNTBASE]; + + goto yynewstate; + +yyerrlab: /* here on detecting error */ + + if (! yyerrstatus) + /* If not already recovering from an error, report this error. */ + { + ++yynerrs; + +#ifdef YYERROR_VERBOSE + yyn = yypact[yystate]; + + if (yyn > YYFLAG && yyn < YYLAST) + { + int size = 0; + char *msg; + int x, count; + + count = 0; + /* Start X at -yyn if nec to avoid negative indexes in yycheck. */ + for (x = (yyn < 0 ? -yyn : 0); + x < (sizeof(yytname) / sizeof(char *)); x++) + if (yycheck[x + yyn] == x) + size += strlen(yytname[x]) + 15, count++; + msg = (char *) malloc(size + 15); + if (msg != 0) + { + strcpy(msg, "parse error"); + + if (count < 5) + { + count = 0; + for (x = (yyn < 0 ? -yyn : 0); + x < (sizeof(yytname) / sizeof(char *)); x++) + if (yycheck[x + yyn] == x) + { + strcat(msg, count == 0 ? ", expecting `" : " or `"); + strcat(msg, yytname[x]); + strcat(msg, "'"); + count++; + } + } + yyerror(msg); + free(msg); + } + else + yyerror ("parse error; also virtual memory exceeded"); + } + else +#endif /* YYERROR_VERBOSE */ + yyerror("parse error"); + } + + goto yyerrlab1; +yyerrlab1: /* here on error raised explicitly by an action */ + + if (yyerrstatus == 3) + { + /* if just tried and failed to reuse lookahead token after an error, discard it. */ + + /* return failure if at end of input */ + if (yychar == YYEOF) + YYABORT; + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Discarding token %d (%s).\n", yychar, yytname[yychar1]); +#endif + + yychar = YYEMPTY; + } + + /* Else will try to reuse lookahead token + after shifting the error token. */ + + yyerrstatus = 3; /* Each real token shifted decrements this */ + + goto yyerrhandle; + +yyerrdefault: /* current state does not do anything special for the error token. */ + +#if 0 + /* This is wrong; only states that explicitly want error tokens + should shift them. */ + yyn = yydefact[yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/ + if (yyn) goto yydefault; +#endif + +yyerrpop: /* pop the current state because it cannot handle the error token */ + + if (yyssp == yyss) YYABORT; + yyvsp--; + yystate = *--yyssp; +#ifdef YYLSP_NEEDED + yylsp--; +#endif + +#if YYDEBUG != 0 + if (yydebug) + { + short *ssp1 = yyss - 1; + fprintf (stderr, "Error: state stack now"); + while (ssp1 != yyssp) + fprintf (stderr, " %d", *++ssp1); + fprintf (stderr, "\n"); + } +#endif + +yyerrhandle: + + yyn = yypact[yystate]; + if (yyn == YYFLAG) + goto yyerrdefault; + + yyn += YYTERROR; + if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR) + goto yyerrdefault; + + yyn = yytable[yyn]; + if (yyn < 0) + { + if (yyn == YYFLAG) + goto yyerrpop; + yyn = -yyn; + goto yyreduce; + } + else if (yyn == 0) + goto yyerrpop; + + if (yyn == YYFINAL) + YYACCEPT; + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Shifting error token, "); +#endif + + *++yyvsp = yylval; +#ifdef YYLSP_NEEDED + *++yylsp = yylloc; +#endif + + yystate = yyn; + goto yynewstate; + + yyacceptlab: + /* YYACCEPT comes here. */ + if (yyfree_stacks) + { + free (yyss); + free (yyvs); +#ifdef YYLSP_NEEDED + free (yyls); +#endif + } + return 0; + + yyabortlab: + /* YYABORT comes here. */ + if (yyfree_stacks) + { + free (yyss); + free (yyvs); +#ifdef YYLSP_NEEDED + free (yyls); +#endif + } + return 1; +} +#line 208 "OSUnserialize.y" + + +static int lineNumber = 0; +static const char *parseBuffer; +static int parseBufferIndex; + +#define currentChar() (parseBuffer[parseBufferIndex]) +#define nextChar() (parseBuffer[++parseBufferIndex]) +#define prevChar() (parseBuffer[parseBufferIndex - 1]) + +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) + +static char yyerror_message[128]; + +int +yyerror(char *s) /* Called by yyparse on error */ +{ + sprintf(yyerror_message, "OSUnserialize: %s near line %d\n", s, lineNumber); + return 0; +} + +int +yylex() +{ + int c; + + if (parseBufferIndex == 0) lineNumber = 1; + + top: + c = currentChar(); + + /* skip white space */ + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + + /* skip over comments */ + if (c == '#') while ((c = nextChar()) != 0 && c != '\n') {}; + + /* keep track of line number, don't return \n's */ + if (c == '\n') { + lineNumber++; + (void)nextChar(); + goto top; + } + + /* parse boolean */ + if (c == '.') { + bool boolean = false; + if (nextChar() == 't') { + if (nextChar() != 'r') return SYNTAX_ERROR; + if (nextChar() != 'u') return SYNTAX_ERROR; + if (nextChar() != 'e') return SYNTAX_ERROR; + boolean = true; + } else { + if (currentChar() != 'f') return SYNTAX_ERROR; + if (nextChar() != 'a') return SYNTAX_ERROR; + if (nextChar() != 'l') return SYNTAX_ERROR; + if (nextChar() != 's') return SYNTAX_ERROR; + if (nextChar() != 'e') return SYNTAX_ERROR; + } + if (nextChar() != '.') return SYNTAX_ERROR; + /* skip over dot */ + (void)nextChar(); + + yylval = (object_t *)boolean; + return BOOLEAN; + } + + /* parse unquoted string */ + if (isAlpha(c)) { + int start, length; + char * tempString; + + start = parseBufferIndex; + /* find end of string */ + while (isAlphaNumeric(c)) { + c = nextChar(); + } + length = parseBufferIndex - start; + + /* copy to null terminated buffer */ + tempString = (char *)malloc(length + 1); + if (tempString == 0) { + printf("OSUnserialize: can't alloc temp memory\n"); + return 0; + } + bcopy(&parseBuffer[start], tempString, length); + tempString[length] = 0; + yylval = (object_t *)tempString; + return STRING; + } + + /* parse quoted string */ + if (c == '"' || c == '\'') { + int start, length; + char * tempString; + char quoteChar = c; + + start = parseBufferIndex + 1; // skip quote + /* find end of string, line, buffer */ + while ((c = nextChar()) != quoteChar) { + if (c == '\\') c = nextChar(); + if (c == '\n') lineNumber++; + if (c == 0) return SYNTAX_ERROR; + } + length = parseBufferIndex - start; + /* skip over trailing quote */ + (void)nextChar(); + /* copy to null terminated buffer */ + tempString = (char *)malloc(length + 1); + if (tempString == 0) { + printf("OSUnserialize: can't alloc temp memory\n"); + return 0; + } + + int to = 0; + for (int from=start; from < parseBufferIndex; from++) { + // hack - skip over backslashes + if (parseBuffer[from] == '\\') { + length--; + continue; + } + tempString[to] = parseBuffer[from]; + to++; + } + tempString[length] = 0; + yylval = (object_t *)tempString; + return STRING; + } + + /* process numbers */ + if (isDigit (c)) + { + unsigned long long n = 0; + int base = 10; + + if (c == '0') { + c = nextChar(); + if (c == 'x') { + base = 16; + c = nextChar(); + } + } + if (base == 10) { + while(isDigit(c)) { + n = (n * base + c - '0'); + c = nextChar(); + } + } else { + while(isHexDigit(c)) { + if (isDigit(c)) { + n = (n * base + c - '0'); + } else { + n = (n * base + 0xa + c - 'a'); + } + c = nextChar(); + } + } + + yylval = newObject(); + yylval->u.offset = n; + + return NUMBER; + } + +#define OSDATA_ALLOC_SIZE 4096 + + /* process data */ + if (c == '<') { + unsigned char *d, *start, *lastStart; + + start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + c = nextChar(); // skip over '<' + while (c != 0 && c != '>') { + + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + if (c == '#') while ((c = nextChar()) != 0 && c != '\n') {}; + if (c == '\n') { + lineNumber++; + c = nextChar(); + continue; + } + + // get high nibble + if (!isHexDigit(c)) break; + if (isDigit(c)) { + *d = (c - '0') << 4; + } else { + *d = (0xa + (c - 'a')) << 4; + } + + // get low nibble + c = nextChar(); + if (!isHexDigit(c)) break; + if (isDigit(c)) { + *d |= c - '0'; + } else { + *d |= 0xa + (c - 'a'); + } + + d++; + if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { + int oldsize = d - start; + start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + d = lastStart = start + oldsize; + } + c = nextChar(); + } + if (c != '>' ) { + free(start); + return SYNTAX_ERROR; + } + + // got it! + yylval = newObject(); + yylval->object = start; + yylval->size = d - start; + + (void)nextChar(); // skip over '>' + return DATA; + } + + + /* return single chars, move pointer to next char */ + (void)nextChar(); + return c; +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +#ifdef DEBUG +int debugUnserializeAllocCount = 0; +#endif + +object_t * +newObject() +{ +#ifdef DEBUG + debugUnserializeAllocCount++; +#endif + return (object_t *)malloc(sizeof(object_t)); +} + +void +freeObject(object_t *o) +{ +#ifdef DEBUG + debugUnserializeAllocCount--; +#endif + free(o); +} + +static OSDictionary *tags; + +static void +rememberObject(int tag, object_t *o) +{ + char key[16]; + sprintf(key, "%u", tag); + + tags->setObject(key, (OSObject *)o); +} + +static OSObject * +retrieveObject(int tag) +{ + char key[16]; + sprintf(key, "%u", tag); + + return tags->getObject(key); +} + +OSObject * +buildOSDictionary(object_t *o) +{ + object_t *temp, *last = o; + int count = 0; + + // get count and last object + while (o) { + count++; + last = o; + o = o->next; + } + o = last; + + OSDictionary *d = OSDictionary::withCapacity(count); + + while (o) { +#ifdef metaclass_stuff_worksXXX + if (((OSObject *)o->u.key)->metaCast("OSSymbol")) { + // XXX the evil frontdoor + d->setObject((OSSymbol *)o->u.key, (OSObject *)o->object); + } else { + // If it isn't a symbol, I hope it's a string! + d->setObject((OSString *)o->u.key, (OSObject *)o->object); + } +#else + d->setObject((OSString *)o->u.key, (OSObject *)o->object); +#endif + ((OSObject *)o->object)->release(); + ((OSObject *)o->u.key)->release(); + temp = o; + o = o->prev; + freeObject(temp); + } + return d; +}; + +OSObject * +buildOSArray(object_t *o) +{ + object_t *temp, *last = o; + int count = 0; + + // get count and last object + while (o) { + count++; + last = o; + o = o->next; + } + o = last; + + OSArray *a = OSArray::withCapacity(count); + + while (o) { + a->setObject((OSObject *)o->object); + ((OSObject *)o->object)->release(); + temp = o; + o = o->prev; + freeObject(temp); + } + return a; +}; + +OSObject * +buildOSSet(object_t *o) +{ + OSArray *a = (OSArray *)buildOSArray(o); + OSSet *s = OSSet::withArray(a, a->getCapacity()); + + a->release(); + return s; +}; + +OSObject * +buildOSString(object_t *o) +{ + OSString *s = OSString::withCString((char *)o); + + free(o); + + return s; +}; + +OSObject * +buildOSData(object_t *o) +{ + OSData *d; + + if (o->size) { + d = OSData::withBytes(o->object, o->size); + } else { + d = OSData::withCapacity(0); + } + free(o->object); + freeObject(o); + return d; +}; + +OSObject * +buildOSOffset(object_t *o) +{ + OSNumber *off = OSNumber::withNumber(o->u.offset, o->size); + freeObject(o); + return off; +}; + +OSObject * +buildOSBoolean(object_t *o) +{ + OSBoolean *b = OSBoolean::withBoolean((bool)o); + return b; +}; + +__BEGIN_DECLS +#include +__END_DECLS + +static mutex_t *lock = 0; + +OSObject* +OSUnserialize(const char *buffer, OSString **errorString) +{ + OSObject *object; + + if (!lock) { + lock = mutex_alloc(ETAP_IO_AHA); + _mutex_lock(lock); + } else { + _mutex_lock(lock); + + } + +#ifdef DEBUG + debugUnserializeAllocCount = 0; +#endif + yyerror_message[0] = 0; //just in case + parseBuffer = buffer; + parseBufferIndex = 0; + tags = OSDictionary::withCapacity(128); + if (yyparse() == 0) { + object = parsedObject; + if (errorString) *errorString = 0; + } else { + object = 0; + if (errorString) + *errorString = OSString::withCString(yyerror_message); + } + + tags->release(); +#ifdef DEBUG + if (debugUnserializeAllocCount) { + printf("OSUnserialize: allocation check failed, count = %d.\n", + debugUnserializeAllocCount); + } +#endif + mutex_unlock(lock); + + return object; +} + + +// +// +// +// +// +// DO NOT EDIT OSUnserialize.cpp! +// +// this means you! +// +// +// +// +// diff --git a/libkern/c++/OSUnserialize.y b/libkern/c++/OSUnserialize.y new file mode 100644 index 000000000..47da0c284 --- /dev/null +++ b/libkern/c++/OSUnserialize.y @@ -0,0 +1,659 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* OSUnserialize.y created by rsulack on Nov 21 1998 */ + +// "classic" parser for unserializing OSContainer objects +// +// XXX - this code should really be removed! +// - the XML format is now prefered +// - this code leaks on syntax errors, the XML doesn't +// - "classic" looks, reads, ... much better than XML :-( +// - well except the XML is more efficent on OSData +// +// +// to build : +// bison -p OSUnserialize OSUnserialize.y +// head -50 OSUnserialize.y > OSUnserialize.cpp +// sed -e "s/stdio.h/stddef.h/" < OSUnserialize.tab.c >> OSUnserialize.cpp +// +// when changing code check in both OSUnserialize.y and OSUnserialize.cpp +// +// +// +// +// DO NOT EDIT OSUnserialize.tab.cpp! +// +// this means you! +// +// +// +// +// + + +%{ +#include +#include +#include + +typedef struct object { + struct object *next; + struct object *prev; + void *object; + int size; // for data + union { + void *key; // for dictionary + long long offset; // for offset + } u; + +} object_t; + +static int yyparse(); +static int yyerror(char *s); +static int yylex(); + +static object_t * newObject(); +static void freeObject(object_t *o); + +static OSObject *buildOSDictionary(object_t *); +static OSObject *buildOSArray(object_t *); +static OSObject *buildOSSet(object_t *); +static OSObject *buildOSString(object_t *); +static OSObject *buildOSData(object_t *); +static OSObject *buildOSOffset(object_t *); +static OSObject *buildOSBoolean(object_t *o); + +static void rememberObject(int, object_t *); +static OSObject *retrieveObject(int); + +// temp variable to use during parsing +static object_t *o; + +// resultant object of parsed text +static OSObject *parsedObject; + +#define YYSTYPE object_t * + +extern "C" { +extern void *kern_os_malloc(size_t size); +extern void *kern_os_realloc(void * addr, size_t size); +extern void kern_os_free(void * addr); +} /* extern "C" */ + +#define malloc(s) kern_os_malloc(s) +#define realloc(a, s) kern_os_realloc(a, s) +#define free(a) kern_os_free(a) + +%} +%token NUMBER +%token STRING +%token DATA +%token BOOLEAN +%token SYNTAX_ERROR + +%% /* Grammar rules and actions follow */ + +input: /* empty */ { parsedObject = (OSObject *)NULL; YYACCEPT; } + | object { parsedObject = (OSObject *)$1; YYACCEPT; } + | SYNTAX_ERROR { yyerror("syntax error"); YYERROR; } + ; + +object: dict { $$ = (object_t *)buildOSDictionary($1); } + | array { $$ = (object_t *)buildOSArray($1); } + | set { $$ = (object_t *)buildOSSet($1); } + | string { $$ = (object_t *)buildOSString($1); } + | data { $$ = (object_t *)buildOSData($1); } + | offset { $$ = (object_t *)buildOSOffset($1); } + | boolean { $$ = (object_t *)buildOSBoolean($1); } + | '@' NUMBER { $$ = (object_t *)retrieveObject($2->u.offset); + if ($$) { + ((OSObject *)$$)->retain(); + } else { + yyerror("forward reference detected"); + YYERROR; + } + freeObject($2); + } + | object '@' NUMBER { $$ = $1; + rememberObject($3->u.offset, $1); + freeObject($3); + } + ; + +//------------------------------------------------------------------------------ + +dict: '{' '}' { $$ = NULL; } + | '{' pairs '}' { $$ = $2; } + ; + +pairs: pair + | pairs pair { $2->next = $1; $1->prev = $2; $$ = $2; } + ; + +pair: object '=' object ';' { $$ = newObject(); + $$->next = NULL; + $$->prev = NULL; + $$->u.key = $1; + $$->object = $3; + } + ; + +//------------------------------------------------------------------------------ + +array: '(' ')' { $$ = NULL; } + | '(' elements ')' { $$ = $2; } + ; + +set: '[' ']' { $$ = NULL; } + | '[' elements ']' { $$ = $2; } + ; + +elements: object { $$ = newObject(); + $$->object = $1; + $$->next = NULL; + $$->prev = NULL; + } + | elements ',' object { o = newObject(); + o->object = $3; + o->next = $1; + o->prev = NULL; + $1->prev = o; + $$ = o; + } + ; + +//------------------------------------------------------------------------------ + +offset: NUMBER ':' NUMBER { $$ = $1; + $$->size = $3->u.offset; + freeObject($3); + } + ; + +//------------------------------------------------------------------------------ + +data: DATA + ; + +//------------------------------------------------------------------------------ + +string: STRING + ; + +//------------------------------------------------------------------------------ + +boolean: BOOLEAN + ; + +%% + +static int lineNumber = 0; +static const char *parseBuffer; +static int parseBufferIndex; + +#define currentChar() (parseBuffer[parseBufferIndex]) +#define nextChar() (parseBuffer[++parseBufferIndex]) +#define prevChar() (parseBuffer[parseBufferIndex - 1]) + +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) + +static char yyerror_message[128]; + +int +yyerror(char *s) /* Called by yyparse on error */ +{ + sprintf(yyerror_message, "OSUnserialize: %s near line %d\n", s, lineNumber); + return 0; +} + +int +yylex() +{ + int c; + + if (parseBufferIndex == 0) lineNumber = 1; + + top: + c = currentChar(); + + /* skip white space */ + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + + /* skip over comments */ + if (c == '#') while ((c = nextChar()) != 0 && c != '\n') {}; + + /* keep track of line number, don't return \n's */ + if (c == '\n') { + lineNumber++; + (void)nextChar(); + goto top; + } + + /* parse boolean */ + if (c == '.') { + bool boolean = false; + if (nextChar() == 't') { + if (nextChar() != 'r') return SYNTAX_ERROR; + if (nextChar() != 'u') return SYNTAX_ERROR; + if (nextChar() != 'e') return SYNTAX_ERROR; + boolean = true; + } else { + if (currentChar() != 'f') return SYNTAX_ERROR; + if (nextChar() != 'a') return SYNTAX_ERROR; + if (nextChar() != 'l') return SYNTAX_ERROR; + if (nextChar() != 's') return SYNTAX_ERROR; + if (nextChar() != 'e') return SYNTAX_ERROR; + } + if (nextChar() != '.') return SYNTAX_ERROR; + /* skip over dot */ + (void)nextChar(); + + yylval = (object_t *)boolean; + return BOOLEAN; + } + + /* parse unquoted string */ + if (isAlpha(c)) { + int start, length; + char * tempString; + + start = parseBufferIndex; + /* find end of string */ + while (isAlphaNumeric(c)) { + c = nextChar(); + } + length = parseBufferIndex - start; + + /* copy to null terminated buffer */ + tempString = (char *)malloc(length + 1); + if (tempString == 0) { + printf("OSUnserialize: can't alloc temp memory\n"); + return 0; + } + bcopy(&parseBuffer[start], tempString, length); + tempString[length] = 0; + yylval = (object_t *)tempString; + return STRING; + } + + /* parse quoted string */ + if (c == '"' || c == '\'') { + int start, length; + char * tempString; + char quoteChar = c; + + start = parseBufferIndex + 1; // skip quote + /* find end of string, line, buffer */ + while ((c = nextChar()) != quoteChar) { + if (c == '\\') c = nextChar(); + if (c == '\n') lineNumber++; + if (c == 0) return SYNTAX_ERROR; + } + length = parseBufferIndex - start; + /* skip over trailing quote */ + (void)nextChar(); + /* copy to null terminated buffer */ + tempString = (char *)malloc(length + 1); + if (tempString == 0) { + printf("OSUnserialize: can't alloc temp memory\n"); + return 0; + } + + int to = 0; + for (int from=start; from < parseBufferIndex; from++) { + // hack - skip over backslashes + if (parseBuffer[from] == '\\') { + length--; + continue; + } + tempString[to] = parseBuffer[from]; + to++; + } + tempString[length] = 0; + yylval = (object_t *)tempString; + return STRING; + } + + /* process numbers */ + if (isDigit (c)) + { + unsigned long long n = 0; + int base = 10; + + if (c == '0') { + c = nextChar(); + if (c == 'x') { + base = 16; + c = nextChar(); + } + } + if (base == 10) { + while(isDigit(c)) { + n = (n * base + c - '0'); + c = nextChar(); + } + } else { + while(isHexDigit(c)) { + if (isDigit(c)) { + n = (n * base + c - '0'); + } else { + n = (n * base + 0xa + c - 'a'); + } + c = nextChar(); + } + } + + yylval = newObject(); + yylval->u.offset = n; + + return NUMBER; + } + +#define OSDATA_ALLOC_SIZE 4096 + + /* process data */ + if (c == '<') { + unsigned char *d, *start, *lastStart; + + start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + c = nextChar(); // skip over '<' + while (c != 0 && c != '>') { + + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + if (c == '#') while ((c = nextChar()) != 0 && c != '\n') {}; + if (c == '\n') { + lineNumber++; + c = nextChar(); + continue; + } + + // get high nibble + if (!isHexDigit(c)) break; + if (isDigit(c)) { + *d = (c - '0') << 4; + } else { + *d = (0xa + (c - 'a')) << 4; + } + + // get low nibble + c = nextChar(); + if (!isHexDigit(c)) break; + if (isDigit(c)) { + *d |= c - '0'; + } else { + *d |= 0xa + (c - 'a'); + } + + d++; + if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { + int oldsize = d - start; + start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + d = lastStart = start + oldsize; + } + c = nextChar(); + } + if (c != '>' ) { + free(start); + return SYNTAX_ERROR; + } + + // got it! + yylval = newObject(); + yylval->object = start; + yylval->size = d - start; + + (void)nextChar(); // skip over '>' + return DATA; + } + + + /* return single chars, move pointer to next char */ + (void)nextChar(); + return c; +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +#ifdef DEBUG +int debugUnserializeAllocCount = 0; +#endif + +object_t * +newObject() +{ +#ifdef DEBUG + debugUnserializeAllocCount++; +#endif + return (object_t *)malloc(sizeof(object_t)); +} + +void +freeObject(object_t *o) +{ +#ifdef DEBUG + debugUnserializeAllocCount--; +#endif + free(o); +} + +static OSDictionary *tags; + +static void +rememberObject(int tag, object_t *o) +{ + char key[16]; + sprintf(key, "%u", tag); + + tags->setObject(key, (OSObject *)o); +} + +static OSObject * +retrieveObject(int tag) +{ + char key[16]; + sprintf(key, "%u", tag); + + return tags->getObject(key); +} + +OSObject * +buildOSDictionary(object_t *o) +{ + object_t *temp, *last = o; + int count = 0; + + // get count and last object + while (o) { + count++; + last = o; + o = o->next; + } + o = last; + + OSDictionary *d = OSDictionary::withCapacity(count); + + while (o) { +#ifdef metaclass_stuff_worksXXX + if (((OSObject *)o->u.key)->metaCast("OSSymbol")) { + // XXX the evil frontdoor + d->setObject((OSSymbol *)o->u.key, (OSObject *)o->object); + } else { + // If it isn't a symbol, I hope it's a string! + d->setObject((OSString *)o->u.key, (OSObject *)o->object); + } +#else + d->setObject((OSString *)o->u.key, (OSObject *)o->object); +#endif + ((OSObject *)o->object)->release(); + ((OSObject *)o->u.key)->release(); + temp = o; + o = o->prev; + freeObject(temp); + } + return d; +}; + +OSObject * +buildOSArray(object_t *o) +{ + object_t *temp, *last = o; + int count = 0; + + // get count and last object + while (o) { + count++; + last = o; + o = o->next; + } + o = last; + + OSArray *a = OSArray::withCapacity(count); + + while (o) { + a->setObject((OSObject *)o->object); + ((OSObject *)o->object)->release(); + temp = o; + o = o->prev; + freeObject(temp); + } + return a; +}; + +OSObject * +buildOSSet(object_t *o) +{ + OSArray *a = (OSArray *)buildOSArray(o); + OSSet *s = OSSet::withArray(a, a->getCapacity()); + + a->release(); + return s; +}; + +OSObject * +buildOSString(object_t *o) +{ + OSString *s = OSString::withCString((char *)o); + + free(o); + + return s; +}; + +OSObject * +buildOSData(object_t *o) +{ + OSData *d; + + if (o->size) { + d = OSData::withBytes(o->object, o->size); + } else { + d = OSData::withCapacity(0); + } + free(o->object); + freeObject(o); + return d; +}; + +OSObject * +buildOSOffset(object_t *o) +{ + OSNumber *off = OSNumber::withNumber(o->u.offset, o->size); + freeObject(o); + return off; +}; + +OSObject * +buildOSBoolean(object_t *o) +{ + OSBoolean *b = OSBoolean::withBoolean((bool)o); + return b; +}; + +__BEGIN_DECLS +#include +__END_DECLS + +static mutex_t *lock = 0; + +OSObject* +OSUnserialize(const char *buffer, OSString **errorString) +{ + OSObject *object; + + if (!lock) { + lock = mutex_alloc(ETAP_IO_AHA); + _mutex_lock(lock); + } else { + _mutex_lock(lock); + + } + +#ifdef DEBUG + debugUnserializeAllocCount = 0; +#endif + yyerror_message[0] = 0; //just in case + parseBuffer = buffer; + parseBufferIndex = 0; + tags = OSDictionary::withCapacity(128); + if (yyparse() == 0) { + object = parsedObject; + if (errorString) *errorString = 0; + } else { + object = 0; + if (errorString) + *errorString = OSString::withCString(yyerror_message); + } + + tags->release(); +#ifdef DEBUG + if (debugUnserializeAllocCount) { + printf("OSUnserialize: allocation check failed, count = %d.\n", + debugUnserializeAllocCount); + } +#endif + mutex_unlock(lock); + + return object; +} + + +// +// +// +// +// +// DO NOT EDIT OSUnserialize.cpp! +// +// this means you! +// +// +// +// +// diff --git a/libkern/c++/OSUnserializeXML.cpp b/libkern/c++/OSUnserializeXML.cpp new file mode 100644 index 000000000..825cfb9e5 --- /dev/null +++ b/libkern/c++/OSUnserializeXML.cpp @@ -0,0 +1,2040 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* OSUnserializeXML.y created by rsulack on Tue Oct 12 1999 */ + +// XML parser for unserializing OSContainer objects +// +// to build : +// bison -p OSUnserializeXML OSUnserializeXML.y +// head -50 OSUnserializeXML.y > OSUnserializeXML.cpp +// sed -e "s/stdio.h/stddef.h/" < OSUnserializeXML.tab.c >> OSUnserializeXML.cpp +// +// when changing code check in both OSUnserializeXML.y and OSUnserializeXML.cpp +// +// +// +// +// +// +// +// DO NOT EDIT OSUnserializeXML.cpp! +// +// this means you! +// +// +// +// +// +// +// + + +/* A Bison parser, made from OSUnserializeXML.y + by GNU Bison version 1.28 */ + +#define YYBISON 1 /* Identify Bison output. */ + +#define yyparse OSUnserializeXMLparse +#define yylex OSUnserializeXMLlex +#define yyerror OSUnserializeXMLerror +#define yylval OSUnserializeXMLlval +#define yychar OSUnserializeXMLchar +#define yydebug OSUnserializeXMLdebug +#define yynerrs OSUnserializeXMLnerrs +#define ARRAY 257 +#define BOOLEAN 258 +#define DATA 259 +#define DICTIONARY 260 +#define IDREF 261 +#define KEY 262 +#define NUMBER 263 +#define SET 264 +#define STRING 265 +#define SYNTAX_ERROR 266 + +#line 52 "OSUnserializeXML.y" + +#include +#include +#include +#include + +typedef struct object { + struct object *next; + struct object *free; + struct object *elements; + OSObject *object; + const OSSymbol *key; // for dictionary + int size; + void *data; // for data + char *string; // for string & symbol + long long number; // for number + int idref; +} object_t; + +static int yyparse(); +static int yyerror(char *s); +static int yylex(); + +static object_t * newObject(); +static void freeObject(object_t *o); + +static object_t *buildOSDictionary(object_t *); +static object_t *buildOSArray(object_t *); +static object_t *buildOSSet(object_t *); +static object_t *buildOSString(object_t *); +static object_t *buildKey(object_t *); +static object_t *buildOSData(object_t *); +static object_t *buildOSNumber(object_t *); +static object_t *buildOSBoolean(object_t *o); + +static void rememberObject(int, OSObject *); +static object_t *retrieveObject(int); + +// resultant object of parsed text +static OSObject *parsedObject; + +#define YYSTYPE object_t * + +extern "C" { +extern void *kern_os_malloc(size_t size); +extern void *kern_os_realloc(void * addr, size_t size); +extern void kern_os_free(void * addr); + +//XXX shouldn't have to define these +extern long strtol(const char *, char **, int); +extern unsigned long strtoul(const char *, char **, int); + +} /* extern "C" */ + +#define malloc(s) kern_os_malloc(s) +#define realloc(a, s) kern_os_realloc(a, s) +#define free(a) kern_os_free(a) + +#ifndef YYSTYPE +#define YYSTYPE int +#endif +#include + +#ifndef __cplusplus +#ifndef __STDC__ +#define const +#endif +#endif + + + +#define YYFINAL 40 +#define YYFLAG -32768 +#define YYNTBASE 19 + +#define YYTRANSLATE(x) ((unsigned)(x) <= 266 ? yytranslate[x] : 33) + +static const char yytranslate[] = { 0, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 15, + 16, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 17, 2, 18, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 13, 2, 14, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 1, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12 +}; + +#if YYDEBUG != 0 +static const short yyprhs[] = { 0, + 0, 1, 3, 5, 7, 9, 11, 13, 15, 17, + 19, 21, 24, 28, 30, 32, 35, 38, 40, 43, + 47, 49, 52, 56, 58, 60, 63, 65, 67, 69, + 71 +}; + +static const short yyrhs[] = { -1, + 20, 0, 12, 0, 21, 0, 25, 0, 26, 0, + 32, 0, 29, 0, 31, 0, 28, 0, 30, 0, + 13, 14, 0, 13, 22, 14, 0, 6, 0, 23, + 0, 22, 23, 0, 24, 20, 0, 8, 0, 15, + 16, 0, 15, 27, 16, 0, 3, 0, 17, 18, + 0, 17, 27, 18, 0, 10, 0, 20, 0, 27, + 20, 0, 4, 0, 5, 0, 7, 0, 9, 0, + 11, 0 +}; + +#endif + +#if YYDEBUG != 0 +static const short yyrline[] = { 0, + 123, 124, 129, 135, 136, 137, 138, 139, 140, 141, + 142, 155, 158, 161, 164, 165, 170, 178, 183, 186, + 189, 192, 195, 198, 201, 204, 211, 214, 217, 220, + 223 +}; +#endif + + +#if YYDEBUG != 0 || defined (YYERROR_VERBOSE) + +static const char * const yytname[] = { "$","error","$undefined.","ARRAY", +"BOOLEAN","DATA","DICTIONARY","IDREF","KEY","NUMBER","SET","STRING","SYNTAX_ERROR", +"'{'","'}'","'('","')'","'['","']'","input","object","dict","pairs","pair","key", +"array","set","elements","boolean","data","idref","number","string", NULL +}; +#endif + +static const short yyr1[] = { 0, + 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, + 20, 21, 21, 21, 22, 22, 23, 24, 25, 25, + 25, 26, 26, 26, 27, 27, 28, 29, 30, 31, + 32 +}; + +static const short yyr2[] = { 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 2, 3, 1, 1, 2, 2, 1, 2, 3, + 1, 2, 3, 1, 1, 2, 1, 1, 1, 1, + 1 +}; + +static const short yydefact[] = { 1, + 21, 27, 28, 14, 29, 30, 24, 31, 3, 0, + 0, 0, 2, 4, 5, 6, 10, 8, 11, 9, + 7, 18, 12, 0, 15, 0, 19, 25, 0, 22, + 0, 13, 16, 17, 20, 26, 23, 0, 0, 0 +}; + +static const short yydefgoto[] = { 38, + 28, 14, 24, 25, 26, 15, 16, 29, 17, 18, + 19, 20, 21 +}; + +static const short yypact[] = { 45, +-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768, 4, + 60, -2,-32768,-32768,-32768,-32768,-32768,-32768,-32768,-32768, +-32768,-32768,-32768, 6,-32768, 90,-32768,-32768, 75,-32768, + 29,-32768,-32768,-32768,-32768,-32768,-32768, 10, 17,-32768 +}; + +static const short yypgoto[] = {-32768, + 0,-32768,-32768, -18,-32768,-32768,-32768, 7,-32768,-32768, +-32768,-32768,-32768 +}; + + +#define YYLAST 107 + + +static const short yytable[] = { 13, + 1, 2, 3, 4, 5, 33, 6, 7, 8, 39, + 10, 22, 11, 22, 12, 30, 40, 23, 31, 32, + 0, 0, 0, 0, 0, 34, 0, 0, 36, 0, + 36, 1, 2, 3, 4, 5, 0, 6, 7, 8, + 0, 10, 0, 11, 0, 12, 37, 1, 2, 3, + 4, 5, 0, 6, 7, 8, 9, 10, 0, 11, + 0, 12, 1, 2, 3, 4, 5, 0, 6, 7, + 8, 0, 10, 0, 11, 27, 12, 1, 2, 3, + 4, 5, 0, 6, 7, 8, 0, 10, 0, 11, + 35, 12, 1, 2, 3, 4, 5, 0, 6, 7, + 8, 0, 10, 0, 11, 0, 12 +}; + +static const short yycheck[] = { 0, + 3, 4, 5, 6, 7, 24, 9, 10, 11, 0, + 13, 8, 15, 8, 17, 18, 0, 14, 12, 14, + -1, -1, -1, -1, -1, 26, -1, -1, 29, -1, + 31, 3, 4, 5, 6, 7, -1, 9, 10, 11, + -1, 13, -1, 15, -1, 17, 18, 3, 4, 5, + 6, 7, -1, 9, 10, 11, 12, 13, -1, 15, + -1, 17, 3, 4, 5, 6, 7, -1, 9, 10, + 11, -1, 13, -1, 15, 16, 17, 3, 4, 5, + 6, 7, -1, 9, 10, 11, -1, 13, -1, 15, + 16, 17, 3, 4, 5, 6, 7, -1, 9, 10, + 11, -1, 13, -1, 15, -1, 17 +}; +/* -*-C-*- Note some compilers choke on comments on `#line' lines. */ +#line 3 "/usr/share/bison.simple" +/* This file comes from bison-1.28. */ + +/* Skeleton output parser for bison, + Copyright (C) 1984, 1989, 1990 Free Software Foundation, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2, or (at your option) + any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* As a special exception, when this file is copied by Bison into a + Bison output file, you may use that output file without restriction. + This special exception was added by the Free Software Foundation + in version 1.24 of Bison. */ + +/* This is the parser code that is written into each bison parser + when the %semantic_parser declaration is not specified in the grammar. + It was written by Richard Stallman by simplifying the hairy parser + used when %semantic_parser is specified. */ + +#ifndef YYSTACK_USE_ALLOCA +#ifdef alloca +#define YYSTACK_USE_ALLOCA +#else /* alloca not defined */ +#ifdef __GNUC__ +#define YYSTACK_USE_ALLOCA +#define alloca __builtin_alloca +#else /* not GNU C. */ +#if (!defined (__STDC__) && defined (sparc)) || defined (__sparc__) || defined (__sparc) || defined (__sgi) || (defined (__sun) && defined (__i386)) +#define YYSTACK_USE_ALLOCA +#include +#else /* not sparc */ +/* We think this test detects Watcom and Microsoft C. */ +/* This used to test MSDOS, but that is a bad idea + since that symbol is in the user namespace. */ +#if (defined (_MSDOS) || defined (_MSDOS_)) && !defined (__TURBOC__) +#if 0 /* No need for malloc.h, which pollutes the namespace; + instead, just don't use alloca. */ +#include +#endif +#else /* not MSDOS, or __TURBOC__ */ +#if defined(_AIX) +/* I don't know what this was needed for, but it pollutes the namespace. + So I turned it off. rms, 2 May 1997. */ +/* #include */ + #pragma alloca +#define YYSTACK_USE_ALLOCA +#else /* not MSDOS, or __TURBOC__, or _AIX */ +#if 0 +#ifdef __hpux /* haible@ilog.fr says this works for HPUX 9.05 and up, + and on HPUX 10. Eventually we can turn this on. */ +#define YYSTACK_USE_ALLOCA +#define alloca __builtin_alloca +#endif /* __hpux */ +#endif +#endif /* not _AIX */ +#endif /* not MSDOS, or __TURBOC__ */ +#endif /* not sparc */ +#endif /* not GNU C */ +#endif /* alloca not defined */ +#endif /* YYSTACK_USE_ALLOCA not defined */ + +#ifdef YYSTACK_USE_ALLOCA +#define YYSTACK_ALLOC alloca +#else +#define YYSTACK_ALLOC malloc +#endif + +/* Note: there must be only one dollar sign in this file. + It is replaced by the list of actions, each action + as one case of the switch. */ + +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY -2 +#define YYEOF 0 +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrlab1 +/* Like YYERROR except do call yyerror. + This remains here temporarily to ease the + transition to the new meaning of YYERROR, for GCC. + Once GCC version 2 has supplanted version 1, this can go. */ +#define YYFAIL goto yyerrlab +#define YYRECOVERING() (!!yyerrstatus) +#define YYBACKUP(token, value) \ +do \ + if (yychar == YYEMPTY && yylen == 1) \ + { yychar = (token), yylval = (value); \ + yychar1 = YYTRANSLATE (yychar); \ + YYPOPSTACK; \ + goto yybackup; \ + } \ + else \ + { yyerror ("syntax error: cannot back up"); YYERROR; } \ +while (0) + +#define YYTERROR 1 +#define YYERRCODE 256 + +#ifndef YYPURE +#define YYLEX yylex() +#endif + +#ifdef YYPURE +#ifdef YYLSP_NEEDED +#ifdef YYLEX_PARAM +#define YYLEX yylex(&yylval, &yylloc, YYLEX_PARAM) +#else +#define YYLEX yylex(&yylval, &yylloc) +#endif +#else /* not YYLSP_NEEDED */ +#ifdef YYLEX_PARAM +#define YYLEX yylex(&yylval, YYLEX_PARAM) +#else +#define YYLEX yylex(&yylval) +#endif +#endif /* not YYLSP_NEEDED */ +#endif + +/* If nonreentrant, generate the variables here */ + +#ifndef YYPURE + +int yychar; /* the lookahead symbol */ +YYSTYPE yylval; /* the semantic value of the */ + /* lookahead symbol */ + +#ifdef YYLSP_NEEDED +YYLTYPE yylloc; /* location data for the lookahead */ + /* symbol */ +#endif + +int yynerrs; /* number of parse errors so far */ +#endif /* not YYPURE */ + +#if YYDEBUG != 0 +int yydebug; /* nonzero means print parse trace */ +/* Since this is uninitialized, it does not stop multiple parsers + from coexisting. */ +#endif + +/* YYINITDEPTH indicates the initial size of the parser's stacks */ + +#ifndef YYINITDEPTH +#define YYINITDEPTH 200 +#endif + +/* YYMAXDEPTH is the maximum size the stacks can grow to + (effective only if the built-in stack extension method is used). */ + +#if YYMAXDEPTH == 0 +#undef YYMAXDEPTH +#endif + +#ifndef YYMAXDEPTH +#define YYMAXDEPTH 10000 +#endif + +/* Define __yy_memcpy. Note that the size argument + should be passed with type unsigned int, because that is what the non-GCC + definitions require. With GCC, __builtin_memcpy takes an arg + of type size_t, but it can handle unsigned int. */ + +#if __GNUC__ > 1 /* GNU C and GNU C++ define this. */ +#define __yy_memcpy(TO,FROM,COUNT) __builtin_memcpy(TO,FROM,COUNT) +#else /* not GNU C or C++ */ +#ifndef __cplusplus + +/* This is the most reliable way to avoid incompatibilities + in available built-in functions on various systems. */ +static void +__yy_memcpy (to, from, count) + char *to; + char *from; + unsigned int count; +{ + register char *f = from; + register char *t = to; + register int i = count; + + while (i-- > 0) + *t++ = *f++; +} + +#else /* __cplusplus */ + +/* This is the most reliable way to avoid incompatibilities + in available built-in functions on various systems. */ +static void +__yy_memcpy (char *to, char *from, unsigned int count) +{ + register char *t = to; + register char *f = from; + register int i = count; + + while (i-- > 0) + *t++ = *f++; +} + +#endif +#endif + +#line 217 "/usr/share/bison.simple" + +/* The user can define YYPARSE_PARAM as the name of an argument to be passed + into yyparse. The argument should have type void *. + It should actually point to an object. + Grammar actions can access the variable by casting it + to the proper pointer type. */ + +#ifdef YYPARSE_PARAM +#ifdef __cplusplus +#define YYPARSE_PARAM_ARG void *YYPARSE_PARAM +#define YYPARSE_PARAM_DECL +#else /* not __cplusplus */ +#define YYPARSE_PARAM_ARG YYPARSE_PARAM +#define YYPARSE_PARAM_DECL void *YYPARSE_PARAM; +#endif /* not __cplusplus */ +#else /* not YYPARSE_PARAM */ +#define YYPARSE_PARAM_ARG +#define YYPARSE_PARAM_DECL +#endif /* not YYPARSE_PARAM */ + +/* Prevent warning if -Wstrict-prototypes. */ +#ifdef __GNUC__ +#ifdef YYPARSE_PARAM +int yyparse (void *); +#else +int yyparse (void); +#endif +#endif + +int +yyparse(YYPARSE_PARAM_ARG) + YYPARSE_PARAM_DECL +{ + register int yystate; + register int yyn; + register short *yyssp; + register YYSTYPE *yyvsp; + int yyerrstatus; /* number of tokens to shift before error messages enabled */ + int yychar1 = 0; /* lookahead token as an internal (translated) token number */ + + short yyssa[YYINITDEPTH]; /* the state stack */ + YYSTYPE yyvsa[YYINITDEPTH]; /* the semantic value stack */ + + short *yyss = yyssa; /* refer to the stacks thru separate pointers */ + YYSTYPE *yyvs = yyvsa; /* to allow yyoverflow to reallocate them elsewhere */ + +#ifdef YYLSP_NEEDED + YYLTYPE yylsa[YYINITDEPTH]; /* the location stack */ + YYLTYPE *yyls = yylsa; + YYLTYPE *yylsp; + +#define YYPOPSTACK (yyvsp--, yyssp--, yylsp--) +#else +#define YYPOPSTACK (yyvsp--, yyssp--) +#endif + + int yystacksize = YYINITDEPTH; + int yyfree_stacks = 0; + +#ifdef YYPURE + int yychar; + YYSTYPE yylval; + int yynerrs; +#ifdef YYLSP_NEEDED + YYLTYPE yylloc; +#endif +#endif + + YYSTYPE yyval; /* the variable used to return */ + /* semantic values from the action */ + /* routines */ + + int yylen; + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Starting parse\n"); +#endif + + yystate = 0; + yyerrstatus = 0; + yynerrs = 0; + yychar = YYEMPTY; /* Cause a token to be read. */ + + /* Initialize stack pointers. + Waste one element of value and location stack + so that they stay on the same level as the state stack. + The wasted elements are never initialized. */ + + yyssp = yyss - 1; + yyvsp = yyvs; +#ifdef YYLSP_NEEDED + yylsp = yyls; +#endif + +/* Push a new state, which is found in yystate . */ +/* In all cases, when you get here, the value and location stacks + have just been pushed. so pushing a state here evens the stacks. */ +yynewstate: + + *++yyssp = yystate; + + if (yyssp >= yyss + yystacksize - 1) + { + /* Give user a chance to reallocate the stack */ + /* Use copies of these so that the &'s don't force the real ones into memory. */ + YYSTYPE *yyvs1 = yyvs; + short *yyss1 = yyss; +#ifdef YYLSP_NEEDED + YYLTYPE *yyls1 = yyls; +#endif + + /* Get the current used size of the three stacks, in elements. */ + int size = yyssp - yyss + 1; + +#ifdef yyoverflow + /* Each stack pointer address is followed by the size of + the data in use in that stack, in bytes. */ +#ifdef YYLSP_NEEDED + /* This used to be a conditional around just the two extra args, + but that might be undefined if yyoverflow is a macro. */ + yyoverflow("parser stack overflow", + &yyss1, size * sizeof (*yyssp), + &yyvs1, size * sizeof (*yyvsp), + &yyls1, size * sizeof (*yylsp), + &yystacksize); +#else + yyoverflow("parser stack overflow", + &yyss1, size * sizeof (*yyssp), + &yyvs1, size * sizeof (*yyvsp), + &yystacksize); +#endif + + yyss = yyss1; yyvs = yyvs1; +#ifdef YYLSP_NEEDED + yyls = yyls1; +#endif +#else /* no yyoverflow */ + /* Extend the stack our own way. */ + if (yystacksize >= YYMAXDEPTH) + { + yyerror("parser stack overflow"); + if (yyfree_stacks) + { + free (yyss); + free (yyvs); +#ifdef YYLSP_NEEDED + free (yyls); +#endif + } + return 2; + } + yystacksize *= 2; + if (yystacksize > YYMAXDEPTH) + yystacksize = YYMAXDEPTH; +#ifndef YYSTACK_USE_ALLOCA + yyfree_stacks = 1; +#endif + yyss = (short *) YYSTACK_ALLOC (yystacksize * sizeof (*yyssp)); + __yy_memcpy ((char *)yyss, (char *)yyss1, + size * (unsigned int) sizeof (*yyssp)); + yyvs = (YYSTYPE *) YYSTACK_ALLOC (yystacksize * sizeof (*yyvsp)); + __yy_memcpy ((char *)yyvs, (char *)yyvs1, + size * (unsigned int) sizeof (*yyvsp)); +#ifdef YYLSP_NEEDED + yyls = (YYLTYPE *) YYSTACK_ALLOC (yystacksize * sizeof (*yylsp)); + __yy_memcpy ((char *)yyls, (char *)yyls1, + size * (unsigned int) sizeof (*yylsp)); +#endif +#endif /* no yyoverflow */ + + yyssp = yyss + size - 1; + yyvsp = yyvs + size - 1; +#ifdef YYLSP_NEEDED + yylsp = yyls + size - 1; +#endif + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Stack size increased to %d\n", yystacksize); +#endif + + if (yyssp >= yyss + yystacksize - 1) + YYABORT; + } + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Entering state %d\n", yystate); +#endif + + goto yybackup; + yybackup: + +/* Do appropriate processing given the current state. */ +/* Read a lookahead token if we need one and don't already have one. */ +/* yyresume: */ + + /* First try to decide what to do without reference to lookahead token. */ + + yyn = yypact[yystate]; + if (yyn == YYFLAG) + goto yydefault; + + /* Not known => get a lookahead token if don't already have one. */ + + /* yychar is either YYEMPTY or YYEOF + or a valid token in external form. */ + + if (yychar == YYEMPTY) + { +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Reading a token: "); +#endif + yychar = YYLEX; + } + + /* Convert token to internal form (in yychar1) for indexing tables with */ + + if (yychar <= 0) /* This means end of input. */ + { + yychar1 = 0; + yychar = YYEOF; /* Don't call YYLEX any more */ + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Now at end of input.\n"); +#endif + } + else + { + yychar1 = YYTRANSLATE(yychar); + +#if YYDEBUG != 0 + if (yydebug) + { + fprintf (stderr, "Next token is %d (%s", yychar, yytname[yychar1]); + /* Give the individual parser a way to print the precise meaning + of a token, for further debugging info. */ +#ifdef YYPRINT + YYPRINT (stderr, yychar, yylval); +#endif + fprintf (stderr, ")\n"); + } +#endif + } + + yyn += yychar1; + if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != yychar1) + goto yydefault; + + yyn = yytable[yyn]; + + /* yyn is what to do for this token type in this state. + Negative => reduce, -yyn is rule number. + Positive => shift, yyn is new state. + New state is final state => don't bother to shift, + just return success. + 0, or most negative number => error. */ + + if (yyn < 0) + { + if (yyn == YYFLAG) + goto yyerrlab; + yyn = -yyn; + goto yyreduce; + } + else if (yyn == 0) + goto yyerrlab; + + if (yyn == YYFINAL) + YYACCEPT; + + /* Shift the lookahead token. */ + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Shifting token %d (%s), ", yychar, yytname[yychar1]); +#endif + + /* Discard the token being shifted unless it is eof. */ + if (yychar != YYEOF) + yychar = YYEMPTY; + + *++yyvsp = yylval; +#ifdef YYLSP_NEEDED + *++yylsp = yylloc; +#endif + + /* count tokens shifted since error; after three, turn off error status. */ + if (yyerrstatus) yyerrstatus--; + + yystate = yyn; + goto yynewstate; + +/* Do the default action for the current state. */ +yydefault: + + yyn = yydefact[yystate]; + if (yyn == 0) + goto yyerrlab; + +/* Do a reduction. yyn is the number of a rule to reduce with. */ +yyreduce: + yylen = yyr2[yyn]; + if (yylen > 0) + yyval = yyvsp[1-yylen]; /* implement default value of the action */ + +#if YYDEBUG != 0 + if (yydebug) + { + int i; + + fprintf (stderr, "Reducing via rule %d (line %d), ", + yyn, yyrline[yyn]); + + /* Print the symbols being reduced, and their result. */ + for (i = yyprhs[yyn]; yyrhs[i] > 0; i++) + fprintf (stderr, "%s ", yytname[yyrhs[i]]); + fprintf (stderr, " -> %s\n", yytname[yyr1[yyn]]); + } +#endif + + + switch (yyn) { + +case 1: +#line 123 "OSUnserializeXML.y" +{ parsedObject = (OSObject *)NULL; YYACCEPT; ; + break;} +case 2: +#line 124 "OSUnserializeXML.y" +{ parsedObject = yyvsp[0]->object; + yyvsp[0]->object = 0; + freeObject(yyvsp[0]); + YYACCEPT; + ; + break;} +case 3: +#line 129 "OSUnserializeXML.y" +{ + yyerror("syntax error"); + YYERROR; + ; + break;} +case 4: +#line 135 "OSUnserializeXML.y" +{ yyval = buildOSDictionary(yyvsp[0]); ; + break;} +case 5: +#line 136 "OSUnserializeXML.y" +{ yyval = buildOSArray(yyvsp[0]); ; + break;} +case 6: +#line 137 "OSUnserializeXML.y" +{ yyval = buildOSSet(yyvsp[0]); ; + break;} +case 7: +#line 138 "OSUnserializeXML.y" +{ yyval = buildOSString(yyvsp[0]); ; + break;} +case 8: +#line 139 "OSUnserializeXML.y" +{ yyval = buildOSData(yyvsp[0]); ; + break;} +case 9: +#line 140 "OSUnserializeXML.y" +{ yyval = buildOSNumber(yyvsp[0]); ; + break;} +case 10: +#line 141 "OSUnserializeXML.y" +{ yyval = buildOSBoolean(yyvsp[0]); ; + break;} +case 11: +#line 142 "OSUnserializeXML.y" +{ yyval = retrieveObject(yyvsp[0]->idref); + if (yyval) { + yyval->object->retain(); + } else { + yyerror("forward reference detected"); + YYERROR; + } + freeObject(yyvsp[0]); + ; + break;} +case 12: +#line 155 "OSUnserializeXML.y" +{ yyval = yyvsp[-1]; + yyval->elements = NULL; + ; + break;} +case 13: +#line 158 "OSUnserializeXML.y" +{ yyval = yyvsp[-2]; + yyval->elements = yyvsp[-1]; + ; + break;} +case 16: +#line 165 "OSUnserializeXML.y" +{ yyval = yyvsp[0]; + yyval->next = yyvsp[-1]; + ; + break;} +case 17: +#line 170 "OSUnserializeXML.y" +{ yyval = yyvsp[-1]; + yyval->next = NULL; + yyval->object = yyvsp[0]->object; + yyvsp[0]->object = 0; + freeObject(yyvsp[0]); + ; + break;} +case 18: +#line 178 "OSUnserializeXML.y" +{ yyval = buildKey(yyvsp[0]); ; + break;} +case 19: +#line 183 "OSUnserializeXML.y" +{ yyval = yyvsp[-1]; + yyval->elements = NULL; + ; + break;} +case 20: +#line 186 "OSUnserializeXML.y" +{ yyval = yyvsp[-2]; + yyval->elements = yyvsp[-1]; + ; + break;} +case 22: +#line 192 "OSUnserializeXML.y" +{ yyval = yyvsp[-1]; + yyval->elements = NULL; + ; + break;} +case 23: +#line 195 "OSUnserializeXML.y" +{ yyval = yyvsp[-2]; + yyval->elements = yyvsp[-1]; + ; + break;} +case 25: +#line 201 "OSUnserializeXML.y" +{ yyval = yyvsp[0]; + yyval->next = NULL; + ; + break;} +case 26: +#line 204 "OSUnserializeXML.y" +{ yyval = yyvsp[0]; + yyval->next = yyvsp[-1]; + ; + break;} +} + /* the action file gets copied in in place of this dollarsign */ +#line 543 "/usr/share/bison.simple" + + yyvsp -= yylen; + yyssp -= yylen; +#ifdef YYLSP_NEEDED + yylsp -= yylen; +#endif + +#if YYDEBUG != 0 + if (yydebug) + { + short *ssp1 = yyss - 1; + fprintf (stderr, "state stack now"); + while (ssp1 != yyssp) + fprintf (stderr, " %d", *++ssp1); + fprintf (stderr, "\n"); + } +#endif + + *++yyvsp = yyval; + +#ifdef YYLSP_NEEDED + yylsp++; + if (yylen == 0) + { + yylsp->first_line = yylloc.first_line; + yylsp->first_column = yylloc.first_column; + yylsp->last_line = (yylsp-1)->last_line; + yylsp->last_column = (yylsp-1)->last_column; + yylsp->text = 0; + } + else + { + yylsp->last_line = (yylsp+yylen-1)->last_line; + yylsp->last_column = (yylsp+yylen-1)->last_column; + } +#endif + + /* Now "shift" the result of the reduction. + Determine what state that goes to, + based on the state we popped back to + and the rule number reduced by. */ + + yyn = yyr1[yyn]; + + yystate = yypgoto[yyn - YYNTBASE] + *yyssp; + if (yystate >= 0 && yystate <= YYLAST && yycheck[yystate] == *yyssp) + yystate = yytable[yystate]; + else + yystate = yydefgoto[yyn - YYNTBASE]; + + goto yynewstate; + +yyerrlab: /* here on detecting error */ + + if (! yyerrstatus) + /* If not already recovering from an error, report this error. */ + { + ++yynerrs; + +#ifdef YYERROR_VERBOSE + yyn = yypact[yystate]; + + if (yyn > YYFLAG && yyn < YYLAST) + { + int size = 0; + char *msg; + int x, count; + + count = 0; + /* Start X at -yyn if nec to avoid negative indexes in yycheck. */ + for (x = (yyn < 0 ? -yyn : 0); + x < (sizeof(yytname) / sizeof(char *)); x++) + if (yycheck[x + yyn] == x) + size += strlen(yytname[x]) + 15, count++; + msg = (char *) malloc(size + 15); + if (msg != 0) + { + strcpy(msg, "parse error"); + + if (count < 5) + { + count = 0; + for (x = (yyn < 0 ? -yyn : 0); + x < (sizeof(yytname) / sizeof(char *)); x++) + if (yycheck[x + yyn] == x) + { + strcat(msg, count == 0 ? ", expecting `" : " or `"); + strcat(msg, yytname[x]); + strcat(msg, "'"); + count++; + } + } + yyerror(msg); + free(msg); + } + else + yyerror ("parse error; also virtual memory exceeded"); + } + else +#endif /* YYERROR_VERBOSE */ + yyerror("parse error"); + } + + goto yyerrlab1; +yyerrlab1: /* here on error raised explicitly by an action */ + + if (yyerrstatus == 3) + { + /* if just tried and failed to reuse lookahead token after an error, discard it. */ + + /* return failure if at end of input */ + if (yychar == YYEOF) + YYABORT; + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Discarding token %d (%s).\n", yychar, yytname[yychar1]); +#endif + + yychar = YYEMPTY; + } + + /* Else will try to reuse lookahead token + after shifting the error token. */ + + yyerrstatus = 3; /* Each real token shifted decrements this */ + + goto yyerrhandle; + +yyerrdefault: /* current state does not do anything special for the error token. */ + +#if 0 + /* This is wrong; only states that explicitly want error tokens + should shift them. */ + yyn = yydefact[yystate]; /* If its default is to accept any token, ok. Otherwise pop it.*/ + if (yyn) goto yydefault; +#endif + +yyerrpop: /* pop the current state because it cannot handle the error token */ + + if (yyssp == yyss) YYABORT; + yyvsp--; + yystate = *--yyssp; +#ifdef YYLSP_NEEDED + yylsp--; +#endif + +#if YYDEBUG != 0 + if (yydebug) + { + short *ssp1 = yyss - 1; + fprintf (stderr, "Error: state stack now"); + while (ssp1 != yyssp) + fprintf (stderr, " %d", *++ssp1); + fprintf (stderr, "\n"); + } +#endif + +yyerrhandle: + + yyn = yypact[yystate]; + if (yyn == YYFLAG) + goto yyerrdefault; + + yyn += YYTERROR; + if (yyn < 0 || yyn > YYLAST || yycheck[yyn] != YYTERROR) + goto yyerrdefault; + + yyn = yytable[yyn]; + if (yyn < 0) + { + if (yyn == YYFLAG) + goto yyerrpop; + yyn = -yyn; + goto yyreduce; + } + else if (yyn == 0) + goto yyerrpop; + + if (yyn == YYFINAL) + YYACCEPT; + +#if YYDEBUG != 0 + if (yydebug) + fprintf(stderr, "Shifting error token, "); +#endif + + *++yyvsp = yylval; +#ifdef YYLSP_NEEDED + *++yylsp = yylloc; +#endif + + yystate = yyn; + goto yynewstate; + + yyacceptlab: + /* YYACCEPT comes here. */ + if (yyfree_stacks) + { + free (yyss); + free (yyvs); +#ifdef YYLSP_NEEDED + free (yyls); +#endif + } + return 0; + + yyabortlab: + /* YYABORT comes here. */ + if (yyfree_stacks) + { + free (yyss); + free (yyvs); +#ifdef YYLSP_NEEDED + free (yyls); +#endif + } + return 1; +} +#line 226 "OSUnserializeXML.y" + + +static int lineNumber = 0; +static const char *parseBuffer; +static int parseBufferIndex; + +#define currentChar() (parseBuffer[parseBufferIndex]) +#define nextChar() (parseBuffer[++parseBufferIndex]) +#define prevChar() (parseBuffer[parseBufferIndex - 1]) + +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) + +static char yyerror_message[128]; + +int +yyerror(char *s) /* Called by yyparse on error */ +{ + sprintf(yyerror_message, "OSUnserializeXML: %s near line %d\n", s, lineNumber); + return 0; +} + +#define TAG_MAX_LENGTH 32 +#define TAG_MAX_ATTRIBUTES 32 +#define TAG_BAD 0 +#define TAG_START 1 +#define TAG_END 2 +#define TAG_EMPTY 3 +#define TAG_COMMENT 4 + +static int +getTag(char tag[TAG_MAX_LENGTH], + int *attributeCount, + char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH], + char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH] ) +{ + int length = 0;; + int c = currentChar(); + int tagType = TAG_START; + + *attributeCount = 0; + + if (c != '<') return TAG_BAD; + c = nextChar(); // skip '<' + + if (c == '?' || c == '!') { + while ((c = nextChar()) != 0) { + if (c == '\n') lineNumber++; + if (c == '>') { + (void)nextChar(); + return TAG_COMMENT; + } + } + } + + if (c == '/') { + c = nextChar(); // skip '/' + tagType = TAG_END; + } + if (!isAlpha(c)) return TAG_BAD; + + /* find end of tag while copying it */ + while (isAlphaNumeric(c)) { + tag[length++] = c; + c = nextChar(); + if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + } + + tag[length] = 0; + +//printf("tag %s, type %d\n", tag, tagType); + + // look for attributes of the form attribute = "value" ... + while ((c != '>') && (c != '/')) { + while (isSpace(c)) c = nextChar(); + + length = 0; + while (isAlphaNumeric(c)) { + attributes[*attributeCount][length++] = c; + if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + c = nextChar(); + } + attributes[*attributeCount][length] = 0; + + while (isSpace(c)) c = nextChar(); + + if (c != '=') return TAG_BAD; + c = nextChar(); + + while (isSpace(c)) c = nextChar(); + + if (c != '"') return TAG_BAD; + c = nextChar(); + length = 0; + while (c != '"') { + values[*attributeCount][length++] = c; + if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + c = nextChar(); + } + values[*attributeCount][length] = 0; + + c = nextChar(); // skip closing quote + +//printf(" attribute '%s' = '%s', nextchar = '%c'\n", attributes[*attributeCount], values[*attributeCount], c); + + (*attributeCount)++; + if (*attributeCount >= TAG_MAX_ATTRIBUTES) return TAG_BAD; + } + + if (c == '/') { + c = nextChar(); // skip '/' + tagType = TAG_EMPTY; + } + if (c != '>') return TAG_BAD; + c = nextChar(); // skip '>' + + return tagType; +} + +static char * +getString() +{ + int c = currentChar(); + + int start, length, i, j;; + char * tempString; + + start = parseBufferIndex; + /* find end of string */ + + while (c != 0) { + if (c == '\n') lineNumber++; + if (c == '<') { + break; + } + c = nextChar(); + } + + if (c != '<') return 0; + + length = parseBufferIndex - start; + + /* copy to null terminated buffer */ + tempString = (char *)malloc(length + 1); + if (tempString == 0) { + printf("OSUnserializeXML: can't alloc temp memory\n"); + return 0; + } + + // copy out string in tempString + // "&" -> '&', "<" -> '<', ">" -> '>' + + i = j = 0; + while (i < length) { + c = parseBuffer[start + i++]; + if (c != '&') { + tempString[j++] = c; + } else { + if ((i+3) > length) goto error; + c = parseBuffer[start + i++]; + if (c == 'l') { + if (parseBuffer[start + i++] != 't') goto error; + if (parseBuffer[start + i++] != ';') goto error; + tempString[j++] = '<'; + continue; + } + if (c == 'g') { + if (parseBuffer[start + i++] != 't') goto error; + if (parseBuffer[start + i++] != ';') goto error; + tempString[j++] = '>'; + continue; + } + if ((i+3) > length) goto error; + if (c == 'a') { + if (parseBuffer[start + i++] != 'm') goto error; + if (parseBuffer[start + i++] != 'p') goto error; + if (parseBuffer[start + i++] != ';') goto error; + tempString[j++] = '&'; + continue; + } + goto error; + } + } + tempString[j] = 0; + +//printf("string %s\n", tempString); + + return tempString; + +error: + if (tempString) free(tempString); + return 0; +} + +static long long +getNumber() +{ + unsigned long long n = 0; + int base = 10; + int c = currentChar(); + + if (!isDigit (c)) return 0; + + if (c == '0') { + c = nextChar(); + if (c == 'x') { + base = 16; + c = nextChar(); + } + } + if (base == 10) { + while(isDigit(c)) { + n = (n * base + c - '0'); + c = nextChar(); + } + } else { + while(isHexDigit(c)) { + if (isDigit(c)) { + n = (n * base + c - '0'); + } else { + n = (n * base + 0xa + c - 'a'); + } + c = nextChar(); + } + } +//printf("number 0x%x\n", (unsigned long)n); + return n; +} + +// taken from CFXMLParsing/CFPropertyList.c + +static const signed char __CFPLDataDecodeTable[128] = { + /* 000 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 010 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 020 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 030 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* ' ' */ -1, -1, -1, -1, -1, -1, -1, -1, + /* '(' */ -1, -1, -1, 62, -1, -1, -1, 63, + /* '0' */ 52, 53, 54, 55, 56, 57, 58, 59, + /* '8' */ 60, 61, -1, -1, -1, 0, -1, -1, + /* '@' */ -1, 0, 1, 2, 3, 4, 5, 6, + /* 'H' */ 7, 8, 9, 10, 11, 12, 13, 14, + /* 'P' */ 15, 16, 17, 18, 19, 20, 21, 22, + /* 'X' */ 23, 24, 25, -1, -1, -1, -1, -1, + /* '`' */ -1, 26, 27, 28, 29, 30, 31, 32, + /* 'h' */ 33, 34, 35, 36, 37, 38, 39, 40, + /* 'p' */ 41, 42, 43, 44, 45, 46, 47, 48, + /* 'x' */ 49, 50, 51, -1, -1, -1, -1, -1 +}; + +#define OSDATA_ALLOC_SIZE 4096 + +static void * +getCFEncodedData(unsigned int *size) +{ + int numeq = 0, acc = 0, cntr = 0; + int tmpbufpos = 0, tmpbuflen = 0; + unsigned char *tmpbuf = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + + int c = currentChar(); + *size = 0; + + while (c != '<') { + c &= 0x7f; + if (c == 0) { + free(tmpbuf); + return 0; + } + if (c == '=') numeq++; else numeq = 0; + if (c == '\n') lineNumber++; + if (__CFPLDataDecodeTable[c] < 0) { + c = nextChar(); + continue; + } + cntr++; + acc <<= 6; + acc += __CFPLDataDecodeTable[c]; + if (0 == (cntr & 0x3)) { + if (tmpbuflen <= tmpbufpos + 2) { + tmpbuflen += OSDATA_ALLOC_SIZE; + tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); + } + tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; + if (numeq < 2) + tmpbuf[tmpbufpos++] = (acc >> 8) & 0xff; + if (numeq < 1) + tmpbuf[tmpbufpos++] = acc & 0xff; + } + c = nextChar(); + } + *size = tmpbufpos; + return tmpbuf; +} + +static void * +getHexData(unsigned int *size) +{ + int c; + unsigned char *d, *start, *lastStart; + + start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + c = currentChar(); + + while (c != '<') { + + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + if (c == '\n') { + lineNumber++; + c = nextChar(); + continue; + } + + // get high nibble + if (isDigit(c)) { + *d = (c - '0') << 4; + } else if (isAlphaDigit(c)) { + *d = (0xa + (c - 'a')) << 4; + } else { + goto error; + } + + // get low nibble + c = nextChar(); + if (isDigit(c)) { + *d |= c - '0'; + } else if (isAlphaDigit(c)) { + *d |= 0xa + (c - 'a'); + } else { + goto error; + } + + d++; + if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { + int oldsize = d - start; + start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + d = lastStart = start + oldsize; + } + c = nextChar(); + } + + *size = d - start; + return start; + + error: + + *size = 0; + free(start); + return 0; +} + +static int +yylex() +{ + int c; + int tagType; + char tag[TAG_MAX_LENGTH]; + int attributeCount; + char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; + char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; + + if (parseBufferIndex == 0) lineNumber = 1; + + top: + c = currentChar(); + + /* skip white space */ + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + + /* keep track of line number, don't return \n's */ + if (c == '\n') { + lineNumber++; + (void)nextChar(); + goto top; + } + + if (!c) return c; + + tagType = getTag(tag, &attributeCount, attributes, values); + if (tagType == TAG_BAD) return SYNTAX_ERROR; + if (tagType == TAG_COMMENT) goto top; + + // handle allocation and check for "ID" and "IDREF" tags up front + yylval = newObject(); + yylval->idref = -1; + for (int i=0; i < attributeCount; i++) { + if (attributes[i][0] == 'I' && attributes[i][1] == 'D') { + // check for idref's, note: we ignore the tag, for + // this to work correctly, all idrefs must be unique + // across the whole serialization + if (attributes[i][2] == 'R' && attributes[i][3] == 'E' && + attributes[i][4] == 'F' && !attributes[i][5]) { + if (tagType != TAG_EMPTY) return SYNTAX_ERROR; + yylval->idref = strtol(values[i], NULL, 0); + return IDREF; + } + // check for id's + if (!attributes[i][2]) { + yylval->idref = strtol(values[i], NULL, 0); + } else { + return SYNTAX_ERROR; + } + } + } + + switch (*tag) { + case 'a': + if (!strcmp(tag, "array")) { + if (tagType == TAG_EMPTY) { + yylval->elements = NULL; + return ARRAY; + } + return (tagType == TAG_START) ? '(' : ')'; + } + break; + case 'd': + if (!strcmp(tag, "dict")) { + if (tagType == TAG_EMPTY) { + yylval->elements = NULL; + return DICTIONARY; + } + return (tagType == TAG_START) ? '{' : '}'; + } + if (!strcmp(tag, "data")) { + unsigned int size; + int readable = 0; + if (tagType == TAG_EMPTY) { + yylval->data = NULL; + yylval->size = 0; + return DATA; + } + for (int i=0; i < attributeCount; i++) { + if (!strcmp(attributes[i], "format") && !strcmp(values[i], "hex")) { + readable++; + break; + } + } + // CF encoded is the default form + if (readable) { + yylval->data = getHexData(&size); + } else { + yylval->data = getCFEncodedData(&size); + } + yylval->size = size; + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "data")) { + return SYNTAX_ERROR; + } + return DATA; + } + break; + case 'f': + if (!strcmp(tag, "false")) { + if (tagType == TAG_EMPTY) { + yylval->number = 0; + return BOOLEAN; + } + } + break; + case 'i': + if (!strcmp(tag, "integer")) { + yylval->size = 64; // default + for (int i=0; i < attributeCount; i++) { + if (!strcmp(attributes[i], "size")) { + yylval->size = strtoul(values[i], NULL, 0); + } + } + if (tagType == TAG_EMPTY) { + yylval->number = 0; + return NUMBER; + } + yylval->number = getNumber(); + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "integer")) { + return SYNTAX_ERROR; + } + return NUMBER; + } + break; + case 'k': + if (!strcmp(tag, "key")) { + if (tagType == TAG_EMPTY) return SYNTAX_ERROR; + yylval->string = getString(); + if (!yylval->string) { + return SYNTAX_ERROR; + } + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + || strcmp(tag, "key")) { + return SYNTAX_ERROR; + } + return KEY; + } + break; + case 'p': + if (!strcmp(tag, "plist")) { + freeObject(yylval); + goto top; + } + break; + case 's': + if (!strcmp(tag, "string")) { + if (tagType == TAG_EMPTY) { + yylval->string = (char *)malloc(1); + *yylval->string = 0; + return STRING; + } + yylval->string = getString(); + if (!yylval->string) { + return SYNTAX_ERROR; + } + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + || strcmp(tag, "string")) { + return SYNTAX_ERROR; + } + return STRING; + } + if (!strcmp(tag, "set")) { + if (tagType == TAG_EMPTY) { + yylval->elements = NULL; + return SET;; + } + if (tagType == TAG_START) { + return '['; + } else { + return ']'; + } + } + break; + case 't': + if (!strcmp(tag, "true")) { + if (tagType == TAG_EMPTY) { + yylval->number = 1; + return BOOLEAN; + } + } + break; + + default: + // XXX should we ignore invalid tags? + return SYNTAX_ERROR; + break; + } + + return 0; +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +// "java" like allocation, if this code hits a syntax error in the +// the middle of the parsed string we just bail with pointers hanging +// all over place, so this code helps keeps all together + +static object_t *objects = 0; +static object_t *freeObjects = 0; + +object_t * +newObject() +{ + object_t *o; + + if (freeObjects) { + o = freeObjects; + freeObjects = freeObjects->next; + } else { + o = (object_t *)malloc(sizeof(object_t)); + bzero(o, sizeof(object_t)); + o->free = objects; + objects = o; + } + + return o; +} + +void +freeObject(object_t *o) +{ + o->next = freeObjects; + freeObjects = o; +} + +void +cleanupObjects() +{ + object_t *t, *o = objects; + + while (o) { + if (o->object) { + printf("OSUnserializeXML: releasing object o=%x object=%x\n", (int)o, (int)o->object); + o->object->release(); + } + if (o->data) { + printf("OSUnserializeXML: freeing object o=%x data=%x\n", (int)o, (int)o->data); + free(o->data); + } + if (o->key) { + printf("OSUnserializeXML: releasing object o=%x key=%x\n", (int)o, (int)o->key); + o->key->release(); + } + if (o->string) { + printf("OSUnserializeXML: freeing object o=%x string=%x\n", (int)o, (int)o->string); + free(o->string); + } + + t = o; + o = o->free; + free(t); + } +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +static OSDictionary *tags; + +static void +rememberObject(int tag, OSObject *o) +{ + char key[16]; + sprintf(key, "%u", tag); + +//printf("remember key %s\n", key); + + tags->setObject(key, o); +} + +static object_t * +retrieveObject(int tag) +{ + char key[16]; + sprintf(key, "%u", tag); + +//printf("retrieve key '%s'\n", key); + + OSObject *ref = tags->getObject(key); + if (!ref) return 0; + + object_t *o = newObject(); + o->object = ref; + return o; +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +object_t * +buildOSDictionary(object_t * header) +{ + object_t *o, *t; + int count = 0; + + // get count and reverse order + o = header->elements; + header->elements = 0; + while (o) { + count++; + t = o; + o = o->next; + + t->next = header->elements; + header->elements = t; + } + + OSDictionary *d = OSDictionary::withCapacity(count); + + if (header->idref >= 0) rememberObject(header->idref, d); + + o = header->elements; + while (o) { + d->setObject(o->key, o->object); + o->object->release(); + o->object = 0; + o->key->release(); + o->key = 0; + t = o; + o = o->next; + freeObject(t); + } + o = header; + o->object = d; + return o; +}; + +object_t * +buildOSArray(object_t * header) +{ + object_t *o, *t; + int count = 0; + + // get count and reverse order + o = header->elements; + header->elements = 0; + while (o) { + count++; + t = o; + o = o->next; + + t->next = header->elements; + header->elements = t; + } + + OSArray *a = OSArray::withCapacity(count); + + if (header->idref >= 0) rememberObject(header->idref, a); + + o = header->elements; + while (o) { + a->setObject(o->object); + o->object->release(); + o->object = 0; + t = o; + o = o->next; + freeObject(t); + } + o = header; + o->object = a; + return o; +}; + +object_t * +buildOSSet(object_t *o) +{ + o = buildOSArray(o); + OSArray *a = (OSArray *)o->object; + + OSSet *s = OSSet::withArray(a, a->getCapacity()); + + //write over reference created in array + if (o->idref >= 0) rememberObject(o->idref, s); + + a->release(); + o->object = s; + return o; +}; + +object_t * +buildOSString(object_t *o) +{ + OSString *s = OSString::withCString(o->string); + + if (o->idref >= 0) rememberObject(o->idref, s); + + free(o->string); + o->string = 0; + o->object = s; + + return o; +}; + +object_t * +buildKey(object_t *o) +{ + const OSSymbol *s = OSSymbol::withCString(o->string); + + free(o->string); + o->string = 0; + o->key = s; + + return o; +}; + +object_t * +buildOSData(object_t *o) +{ + OSData *d; + + if (o->size) { + d = OSData::withBytes(o->data, o->size); + free(o->data); + } else { + d = OSData::withCapacity(0); + } + if (o->idref >= 0) rememberObject(o->idref, d); + + o->data = 0; + o->object = d; + return o; +}; + +object_t * +buildOSNumber(object_t *o) +{ + OSNumber *n = OSNumber::withNumber(o->number, o->size); + + if (o->idref >= 0) rememberObject(o->idref, n); + + o->object = n; + return o; +}; + +object_t * +buildOSBoolean(object_t *o) +{ + OSBoolean *b = OSBoolean::withBoolean(o->number != 0); + o->object = b; + return o; +}; + +__BEGIN_DECLS +#include +__END_DECLS + +static mutex_t *lock = 0; + +OSObject* +OSUnserializeXML(const char *buffer, OSString **errorString) +{ + OSObject *object; + + if (!lock) { + lock = mutex_alloc(ETAP_IO_AHA); + _mutex_lock(lock); + } else { + _mutex_lock(lock); + + } + + objects = 0; + freeObjects = 0; + yyerror_message[0] = 0; //just in case + parseBuffer = buffer; + parseBufferIndex = 0; + tags = OSDictionary::withCapacity(128); + if (yyparse() == 0) { + object = parsedObject; + if (errorString) *errorString = 0; + } else { + object = 0; + if (errorString) + *errorString = OSString::withCString(yyerror_message); + } + + cleanupObjects(); + tags->release(); + mutex_unlock(lock); + + return object; +} + + +// +// +// +// +// +// DO NOT EDIT OSUnserializeXML.cpp! +// +// this means you! +// +// +// +// +// diff --git a/libkern/c++/OSUnserializeXML.y b/libkern/c++/OSUnserializeXML.y new file mode 100644 index 000000000..918c68304 --- /dev/null +++ b/libkern/c++/OSUnserializeXML.y @@ -0,0 +1,1082 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* OSUnserializeXML.y created by rsulack on Tue Oct 12 1999 */ + +// XML parser for unserializing OSContainer objects +// +// to build : +// bison -p OSUnserializeXML OSUnserializeXML.y +// head -50 OSUnserializeXML.y > OSUnserializeXML.cpp +// sed -e "s/stdio.h/stddef.h/" < OSUnserializeXML.tab.c >> OSUnserializeXML.cpp +// +// when changing code check in both OSUnserializeXML.y and OSUnserializeXML.cpp +// +// +// +// +// +// +// +// DO NOT EDIT OSUnserializeXML.cpp! +// +// this means you! +// +// +// +// +// +// +// + + +%{ +#include +#include +#include +#include + +typedef struct object { + struct object *next; + struct object *free; + struct object *elements; + OSObject *object; + const OSSymbol *key; // for dictionary + int size; + void *data; // for data + char *string; // for string & symbol + long long number; // for number + int idref; +} object_t; + +static int yyparse(); +static int yyerror(char *s); +static int yylex(); + +static object_t * newObject(); +static void freeObject(object_t *o); + +static object_t *buildOSDictionary(object_t *); +static object_t *buildOSArray(object_t *); +static object_t *buildOSSet(object_t *); +static object_t *buildOSString(object_t *); +static object_t *buildKey(object_t *); +static object_t *buildOSData(object_t *); +static object_t *buildOSNumber(object_t *); +static object_t *buildOSBoolean(object_t *o); + +static void rememberObject(int, OSObject *); +static object_t *retrieveObject(int); + +// resultant object of parsed text +static OSObject *parsedObject; + +#define YYSTYPE object_t * + +extern "C" { +extern void *kern_os_malloc(size_t size); +extern void *kern_os_realloc(void * addr, size_t size); +extern void kern_os_free(void * addr); + +//XXX shouldn't have to define these +extern long strtol(const char *, char **, int); +extern unsigned long strtoul(const char *, char **, int); + +} /* extern "C" */ + +#define malloc(s) kern_os_malloc(s) +#define realloc(a, s) kern_os_realloc(a, s) +#define free(a) kern_os_free(a) + +%} +%token ARRAY +%token BOOLEAN +%token DATA +%token DICTIONARY +%token IDREF +%token KEY +%token NUMBER +%token SET +%token STRING +%token SYNTAX_ERROR +%% /* Grammar rules and actions follow */ + +input: /* empty */ { parsedObject = (OSObject *)NULL; YYACCEPT; } + | object { parsedObject = $1->object; + $1->object = 0; + freeObject($1); + YYACCEPT; + } + | SYNTAX_ERROR { + yyerror("syntax error"); + YYERROR; + } + ; + +object: dict { $$ = buildOSDictionary($1); } + | array { $$ = buildOSArray($1); } + | set { $$ = buildOSSet($1); } + | string { $$ = buildOSString($1); } + | data { $$ = buildOSData($1); } + | number { $$ = buildOSNumber($1); } + | boolean { $$ = buildOSBoolean($1); } + | idref { $$ = retrieveObject($1->idref); + if ($$) { + $$->object->retain(); + } else { + yyerror("forward reference detected"); + YYERROR; + } + freeObject($1); + } + ; + +//------------------------------------------------------------------------------ + +dict: '{' '}' { $$ = $1; + $$->elements = NULL; + } + | '{' pairs '}' { $$ = $1; + $$->elements = $2; + } + | DICTIONARY + ; + +pairs: pair + | pairs pair { $$ = $2; + $$->next = $1; + } + ; + +pair: key object { $$ = $1; + $$->next = NULL; + $$->object = $2->object; + $2->object = 0; + freeObject($2); + } + ; + +key: KEY { $$ = buildKey($1); } + ; + +//------------------------------------------------------------------------------ + +array: '(' ')' { $$ = $1; + $$->elements = NULL; + } + | '(' elements ')' { $$ = $1; + $$->elements = $2; + } + | ARRAY + ; + +set: '[' ']' { $$ = $1; + $$->elements = NULL; + } + | '[' elements ']' { $$ = $1; + $$->elements = $2; + } + | SET + ; + +elements: object { $$ = $1; + $$->next = NULL; + } + | elements object { $$ = $2; + $$->next = $1; + } + ; + +//------------------------------------------------------------------------------ + +boolean: BOOLEAN + ; + +data: DATA + ; + +idref: IDREF + ; + +number: NUMBER + ; + +string: STRING + ; + +%% + +static int lineNumber = 0; +static const char *parseBuffer; +static int parseBufferIndex; + +#define currentChar() (parseBuffer[parseBufferIndex]) +#define nextChar() (parseBuffer[++parseBufferIndex]) +#define prevChar() (parseBuffer[parseBufferIndex - 1]) + +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) + +static char yyerror_message[128]; + +int +yyerror(char *s) /* Called by yyparse on error */ +{ + sprintf(yyerror_message, "OSUnserializeXML: %s near line %d\n", s, lineNumber); + return 0; +} + +#define TAG_MAX_LENGTH 32 +#define TAG_MAX_ATTRIBUTES 32 +#define TAG_BAD 0 +#define TAG_START 1 +#define TAG_END 2 +#define TAG_EMPTY 3 +#define TAG_COMMENT 4 + +static int +getTag(char tag[TAG_MAX_LENGTH], + int *attributeCount, + char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH], + char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH] ) +{ + int length = 0;; + int c = currentChar(); + int tagType = TAG_START; + + *attributeCount = 0; + + if (c != '<') return TAG_BAD; + c = nextChar(); // skip '<' + + if (c == '?' || c == '!') { + while ((c = nextChar()) != 0) { + if (c == '\n') lineNumber++; + if (c == '>') { + (void)nextChar(); + return TAG_COMMENT; + } + } + } + + if (c == '/') { + c = nextChar(); // skip '/' + tagType = TAG_END; + } + if (!isAlpha(c)) return TAG_BAD; + + /* find end of tag while copying it */ + while (isAlphaNumeric(c)) { + tag[length++] = c; + c = nextChar(); + if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + } + + tag[length] = 0; + +//printf("tag %s, type %d\n", tag, tagType); + + // look for attributes of the form attribute = "value" ... + while ((c != '>') && (c != '/')) { + while (isSpace(c)) c = nextChar(); + + length = 0; + while (isAlphaNumeric(c)) { + attributes[*attributeCount][length++] = c; + if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + c = nextChar(); + } + attributes[*attributeCount][length] = 0; + + while (isSpace(c)) c = nextChar(); + + if (c != '=') return TAG_BAD; + c = nextChar(); + + while (isSpace(c)) c = nextChar(); + + if (c != '"') return TAG_BAD; + c = nextChar(); + length = 0; + while (c != '"') { + values[*attributeCount][length++] = c; + if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + c = nextChar(); + } + values[*attributeCount][length] = 0; + + c = nextChar(); // skip closing quote + +//printf(" attribute '%s' = '%s', nextchar = '%c'\n", attributes[*attributeCount], values[*attributeCount], c); + + (*attributeCount)++; + if (*attributeCount >= TAG_MAX_ATTRIBUTES) return TAG_BAD; + } + + if (c == '/') { + c = nextChar(); // skip '/' + tagType = TAG_EMPTY; + } + if (c != '>') return TAG_BAD; + c = nextChar(); // skip '>' + + return tagType; +} + +static char * +getString() +{ + int c = currentChar(); + + int start, length, i, j;; + char * tempString; + + start = parseBufferIndex; + /* find end of string */ + + while (c != 0) { + if (c == '\n') lineNumber++; + if (c == '<') { + break; + } + c = nextChar(); + } + + if (c != '<') return 0; + + length = parseBufferIndex - start; + + /* copy to null terminated buffer */ + tempString = (char *)malloc(length + 1); + if (tempString == 0) { + printf("OSUnserializeXML: can't alloc temp memory\n"); + return 0; + } + + // copy out string in tempString + // "&" -> '&', "<" -> '<', ">" -> '>' + + i = j = 0; + while (i < length) { + c = parseBuffer[start + i++]; + if (c != '&') { + tempString[j++] = c; + } else { + if ((i+3) > length) goto error; + c = parseBuffer[start + i++]; + if (c == 'l') { + if (parseBuffer[start + i++] != 't') goto error; + if (parseBuffer[start + i++] != ';') goto error; + tempString[j++] = '<'; + continue; + } + if (c == 'g') { + if (parseBuffer[start + i++] != 't') goto error; + if (parseBuffer[start + i++] != ';') goto error; + tempString[j++] = '>'; + continue; + } + if ((i+3) > length) goto error; + if (c == 'a') { + if (parseBuffer[start + i++] != 'm') goto error; + if (parseBuffer[start + i++] != 'p') goto error; + if (parseBuffer[start + i++] != ';') goto error; + tempString[j++] = '&'; + continue; + } + goto error; + } + } + tempString[j] = 0; + +//printf("string %s\n", tempString); + + return tempString; + +error: + if (tempString) free(tempString); + return 0; +} + +static long long +getNumber() +{ + unsigned long long n = 0; + int base = 10; + int c = currentChar(); + + if (!isDigit (c)) return 0; + + if (c == '0') { + c = nextChar(); + if (c == 'x') { + base = 16; + c = nextChar(); + } + } + if (base == 10) { + while(isDigit(c)) { + n = (n * base + c - '0'); + c = nextChar(); + } + } else { + while(isHexDigit(c)) { + if (isDigit(c)) { + n = (n * base + c - '0'); + } else { + n = (n * base + 0xa + c - 'a'); + } + c = nextChar(); + } + } +//printf("number 0x%x\n", (unsigned long)n); + return n; +} + +// taken from CFXMLParsing/CFPropertyList.c + +static const signed char __CFPLDataDecodeTable[128] = { + /* 000 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 010 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 020 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 030 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* ' ' */ -1, -1, -1, -1, -1, -1, -1, -1, + /* '(' */ -1, -1, -1, 62, -1, -1, -1, 63, + /* '0' */ 52, 53, 54, 55, 56, 57, 58, 59, + /* '8' */ 60, 61, -1, -1, -1, 0, -1, -1, + /* '@' */ -1, 0, 1, 2, 3, 4, 5, 6, + /* 'H' */ 7, 8, 9, 10, 11, 12, 13, 14, + /* 'P' */ 15, 16, 17, 18, 19, 20, 21, 22, + /* 'X' */ 23, 24, 25, -1, -1, -1, -1, -1, + /* '`' */ -1, 26, 27, 28, 29, 30, 31, 32, + /* 'h' */ 33, 34, 35, 36, 37, 38, 39, 40, + /* 'p' */ 41, 42, 43, 44, 45, 46, 47, 48, + /* 'x' */ 49, 50, 51, -1, -1, -1, -1, -1 +}; + +#define OSDATA_ALLOC_SIZE 4096 + +static void * +getCFEncodedData(unsigned int *size) +{ + int numeq = 0, acc = 0, cntr = 0; + int tmpbufpos = 0, tmpbuflen = 0; + unsigned char *tmpbuf = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + + int c = currentChar(); + *size = 0; + + while (c != '<') { + c &= 0x7f; + if (c == 0) { + free(tmpbuf); + return 0; + } + if (c == '=') numeq++; else numeq = 0; + if (c == '\n') lineNumber++; + if (__CFPLDataDecodeTable[c] < 0) { + c = nextChar(); + continue; + } + cntr++; + acc <<= 6; + acc += __CFPLDataDecodeTable[c]; + if (0 == (cntr & 0x3)) { + if (tmpbuflen <= tmpbufpos + 2) { + tmpbuflen += OSDATA_ALLOC_SIZE; + tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); + } + tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; + if (numeq < 2) + tmpbuf[tmpbufpos++] = (acc >> 8) & 0xff; + if (numeq < 1) + tmpbuf[tmpbufpos++] = acc & 0xff; + } + c = nextChar(); + } + *size = tmpbufpos; + return tmpbuf; +} + +static void * +getHexData(unsigned int *size) +{ + int c; + unsigned char *d, *start, *lastStart; + + start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); + c = currentChar(); + + while (c != '<') { + + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + if (c == '\n') { + lineNumber++; + c = nextChar(); + continue; + } + + // get high nibble + if (isDigit(c)) { + *d = (c - '0') << 4; + } else if (isAlphaDigit(c)) { + *d = (0xa + (c - 'a')) << 4; + } else { + goto error; + } + + // get low nibble + c = nextChar(); + if (isDigit(c)) { + *d |= c - '0'; + } else if (isAlphaDigit(c)) { + *d |= 0xa + (c - 'a'); + } else { + goto error; + } + + d++; + if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { + int oldsize = d - start; + start = (unsigned char *)realloc(start, oldsize + OSDATA_ALLOC_SIZE); + d = lastStart = start + oldsize; + } + c = nextChar(); + } + + *size = d - start; + return start; + + error: + + *size = 0; + free(start); + return 0; +} + +static int +yylex() +{ + int c; + int tagType; + char tag[TAG_MAX_LENGTH]; + int attributeCount; + char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; + char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; + + if (parseBufferIndex == 0) lineNumber = 1; + + top: + c = currentChar(); + + /* skip white space */ + if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + + /* keep track of line number, don't return \n's */ + if (c == '\n') { + lineNumber++; + (void)nextChar(); + goto top; + } + + if (!c) return c; + + tagType = getTag(tag, &attributeCount, attributes, values); + if (tagType == TAG_BAD) return SYNTAX_ERROR; + if (tagType == TAG_COMMENT) goto top; + + // handle allocation and check for "ID" and "IDREF" tags up front + yylval = newObject(); + yylval->idref = -1; + for (int i=0; i < attributeCount; i++) { + if (attributes[i][0] == 'I' && attributes[i][1] == 'D') { + // check for idref's, note: we ignore the tag, for + // this to work correctly, all idrefs must be unique + // across the whole serialization + if (attributes[i][2] == 'R' && attributes[i][3] == 'E' && + attributes[i][4] == 'F' && !attributes[i][5]) { + if (tagType != TAG_EMPTY) return SYNTAX_ERROR; + yylval->idref = strtol(values[i], NULL, 0); + return IDREF; + } + // check for id's + if (!attributes[i][2]) { + yylval->idref = strtol(values[i], NULL, 0); + } else { + return SYNTAX_ERROR; + } + } + } + + switch (*tag) { + case 'a': + if (!strcmp(tag, "array")) { + if (tagType == TAG_EMPTY) { + yylval->elements = NULL; + return ARRAY; + } + return (tagType == TAG_START) ? '(' : ')'; + } + break; + case 'd': + if (!strcmp(tag, "dict")) { + if (tagType == TAG_EMPTY) { + yylval->elements = NULL; + return DICTIONARY; + } + return (tagType == TAG_START) ? '{' : '}'; + } + if (!strcmp(tag, "data")) { + unsigned int size; + int readable = 0; + if (tagType == TAG_EMPTY) { + yylval->data = NULL; + yylval->size = 0; + return DATA; + } + for (int i=0; i < attributeCount; i++) { + if (!strcmp(attributes[i], "format") && !strcmp(values[i], "hex")) { + readable++; + break; + } + } + // CF encoded is the default form + if (readable) { + yylval->data = getHexData(&size); + } else { + yylval->data = getCFEncodedData(&size); + } + yylval->size = size; + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "data")) { + return SYNTAX_ERROR; + } + return DATA; + } + break; + case 'f': + if (!strcmp(tag, "false")) { + if (tagType == TAG_EMPTY) { + yylval->number = 0; + return BOOLEAN; + } + } + break; + case 'i': + if (!strcmp(tag, "integer")) { + yylval->size = 64; // default + for (int i=0; i < attributeCount; i++) { + if (!strcmp(attributes[i], "size")) { + yylval->size = strtoul(values[i], NULL, 0); + } + } + if (tagType == TAG_EMPTY) { + yylval->number = 0; + return NUMBER; + } + yylval->number = getNumber(); + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "integer")) { + return SYNTAX_ERROR; + } + return NUMBER; + } + break; + case 'k': + if (!strcmp(tag, "key")) { + if (tagType == TAG_EMPTY) return SYNTAX_ERROR; + yylval->string = getString(); + if (!yylval->string) { + return SYNTAX_ERROR; + } + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + || strcmp(tag, "key")) { + return SYNTAX_ERROR; + } + return KEY; + } + break; + case 'p': + if (!strcmp(tag, "plist")) { + freeObject(yylval); + goto top; + } + break; + case 's': + if (!strcmp(tag, "string")) { + if (tagType == TAG_EMPTY) { + yylval->string = (char *)malloc(1); + *yylval->string = 0; + return STRING; + } + yylval->string = getString(); + if (!yylval->string) { + return SYNTAX_ERROR; + } + if ((getTag(tag, &attributeCount, attributes, values) != TAG_END) + || strcmp(tag, "string")) { + return SYNTAX_ERROR; + } + return STRING; + } + if (!strcmp(tag, "set")) { + if (tagType == TAG_EMPTY) { + yylval->elements = NULL; + return SET;; + } + if (tagType == TAG_START) { + return '['; + } else { + return ']'; + } + } + break; + case 't': + if (!strcmp(tag, "true")) { + if (tagType == TAG_EMPTY) { + yylval->number = 1; + return BOOLEAN; + } + } + break; + + default: + // XXX should we ignore invalid tags? + return SYNTAX_ERROR; + break; + } + + return 0; +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +// "java" like allocation, if this code hits a syntax error in the +// the middle of the parsed string we just bail with pointers hanging +// all over place, so this code helps keeps all together + +static object_t *objects = 0; +static object_t *freeObjects = 0; + +object_t * +newObject() +{ + object_t *o; + + if (freeObjects) { + o = freeObjects; + freeObjects = freeObjects->next; + } else { + o = (object_t *)malloc(sizeof(object_t)); + bzero(o, sizeof(object_t)); + o->free = objects; + objects = o; + } + + return o; +} + +void +freeObject(object_t *o) +{ + o->next = freeObjects; + freeObjects = o; +} + +void +cleanupObjects() +{ + object_t *t, *o = objects; + + while (o) { + if (o->object) { + printf("OSUnserializeXML: releasing object o=%x object=%x\n", (int)o, (int)o->object); + o->object->release(); + } + if (o->data) { + printf("OSUnserializeXML: freeing object o=%x data=%x\n", (int)o, (int)o->data); + free(o->data); + } + if (o->key) { + printf("OSUnserializeXML: releasing object o=%x key=%x\n", (int)o, (int)o->key); + o->key->release(); + } + if (o->string) { + printf("OSUnserializeXML: freeing object o=%x string=%x\n", (int)o, (int)o->string); + free(o->string); + } + + t = o; + o = o->free; + free(t); + } +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +static OSDictionary *tags; + +static void +rememberObject(int tag, OSObject *o) +{ + char key[16]; + sprintf(key, "%u", tag); + +//printf("remember key %s\n", key); + + tags->setObject(key, o); +} + +static object_t * +retrieveObject(int tag) +{ + char key[16]; + sprintf(key, "%u", tag); + +//printf("retrieve key '%s'\n", key); + + OSObject *ref = tags->getObject(key); + if (!ref) return 0; + + object_t *o = newObject(); + o->object = ref; + return o; +} + +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# +// !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# + +object_t * +buildOSDictionary(object_t * header) +{ + object_t *o, *t; + int count = 0; + + // get count and reverse order + o = header->elements; + header->elements = 0; + while (o) { + count++; + t = o; + o = o->next; + + t->next = header->elements; + header->elements = t; + } + + OSDictionary *d = OSDictionary::withCapacity(count); + + if (header->idref >= 0) rememberObject(header->idref, d); + + o = header->elements; + while (o) { + d->setObject(o->key, o->object); + o->object->release(); + o->object = 0; + o->key->release(); + o->key = 0; + t = o; + o = o->next; + freeObject(t); + } + o = header; + o->object = d; + return o; +}; + +object_t * +buildOSArray(object_t * header) +{ + object_t *o, *t; + int count = 0; + + // get count and reverse order + o = header->elements; + header->elements = 0; + while (o) { + count++; + t = o; + o = o->next; + + t->next = header->elements; + header->elements = t; + } + + OSArray *a = OSArray::withCapacity(count); + + if (header->idref >= 0) rememberObject(header->idref, a); + + o = header->elements; + while (o) { + a->setObject(o->object); + o->object->release(); + o->object = 0; + t = o; + o = o->next; + freeObject(t); + } + o = header; + o->object = a; + return o; +}; + +object_t * +buildOSSet(object_t *o) +{ + o = buildOSArray(o); + OSArray *a = (OSArray *)o->object; + + OSSet *s = OSSet::withArray(a, a->getCapacity()); + + //write over reference created in array + if (o->idref >= 0) rememberObject(o->idref, s); + + a->release(); + o->object = s; + return o; +}; + +object_t * +buildOSString(object_t *o) +{ + OSString *s = OSString::withCString(o->string); + + if (o->idref >= 0) rememberObject(o->idref, s); + + free(o->string); + o->string = 0; + o->object = s; + + return o; +}; + +object_t * +buildKey(object_t *o) +{ + const OSSymbol *s = OSSymbol::withCString(o->string); + + free(o->string); + o->string = 0; + o->key = s; + + return o; +}; + +object_t * +buildOSData(object_t *o) +{ + OSData *d; + + if (o->size) { + d = OSData::withBytes(o->data, o->size); + free(o->data); + } else { + d = OSData::withCapacity(0); + } + if (o->idref >= 0) rememberObject(o->idref, d); + + o->data = 0; + o->object = d; + return o; +}; + +object_t * +buildOSNumber(object_t *o) +{ + OSNumber *n = OSNumber::withNumber(o->number, o->size); + + if (o->idref >= 0) rememberObject(o->idref, n); + + o->object = n; + return o; +}; + +object_t * +buildOSBoolean(object_t *o) +{ + OSBoolean *b = OSBoolean::withBoolean(o->number != 0); + o->object = b; + return o; +}; + +__BEGIN_DECLS +#include +__END_DECLS + +static mutex_t *lock = 0; + +OSObject* +OSUnserializeXML(const char *buffer, OSString **errorString) +{ + OSObject *object; + + if (!lock) { + lock = mutex_alloc(ETAP_IO_AHA); + _mutex_lock(lock); + } else { + _mutex_lock(lock); + + } + + objects = 0; + freeObjects = 0; + yyerror_message[0] = 0; //just in case + parseBuffer = buffer; + parseBufferIndex = 0; + tags = OSDictionary::withCapacity(128); + if (yyparse() == 0) { + object = parsedObject; + if (errorString) *errorString = 0; + } else { + object = 0; + if (errorString) + *errorString = OSString::withCString(yyerror_message); + } + + cleanupObjects(); + tags->release(); + mutex_unlock(lock); + + return object; +} + + +// +// +// +// +// +// DO NOT EDIT OSUnserializeXML.cpp! +// +// this means you! +// +// +// +// +// diff --git a/libkern/c++/Tests/TestSerialization/CustomInfo.xml b/libkern/c++/Tests/TestSerialization/CustomInfo.xml new file mode 100644 index 000000000..2398ff4ce --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/CustomInfo.xml @@ -0,0 +1,15 @@ + + + + + Name + TestSerialization + Vendor + Your-Company + Version + 0.1 + Date + October 13, 1999 + + + diff --git a/libkern/c++/Tests/TestSerialization/Makefile b/libkern/c++/Tests/TestSerialization/Makefile new file mode 100644 index 000000000..1998d67f6 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/Makefile @@ -0,0 +1,43 @@ +# +# Generated by the Apple Project Builder. +# +# NOTE: Do NOT change this file -- Project Builder maintains it. +# +# Put all of your customizations in files called Makefile.preamble +# and Makefile.postamble (both optional), and Makefile will include them. +# + +NAME = TestSerialization + +PROJECTVERSION = 2.8 +PROJECT_TYPE = Kernel Extension + +TOOLS = test1.kmodproj test2.kmodproj + +OTHERSRCS = Makefile.preamble Makefile Makefile.postamble\ + CustomInfo.xml + +MAKEFILEDIR = $(MAKEFILEPATH)/pb_makefiles +CODE_GEN_STYLE = DYNAMIC +MAKEFILE = kext.make +NEXTSTEP_INSTALLDIR = /System/Library/Extensions +LIBS = +DEBUG_LIBS = $(LIBS) +PROF_LIBS = $(LIBS) +BUNDLE_EXTENSION = kext + + + + +NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc +NEXTSTEP_JAVA_COMPILER = /usr/bin/javac + +include $(MAKEFILEDIR)/platform.make + +-include Makefile.preamble + +include $(MAKEFILEDIR)/$(MAKEFILE) + +-include Makefile.postamble + +-include Makefile.dependencies diff --git a/libkern/c++/Tests/TestSerialization/Makefile.postamble b/libkern/c++/Tests/TestSerialization/Makefile.postamble new file mode 100644 index 000000000..411cde671 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/Makefile.postamble @@ -0,0 +1,100 @@ +############################################################################### +# Makefile.postamble +# Copyright 1997, Apple Computer, Inc. +# +# Use this makefile, which is imported after all other makefiles, to +# override attributes for a project's Makefile environment. This allows you +# to take advantage of the environment set up by the other Makefiles. +# You can also define custom rules at the end of this file. +# +############################################################################### +# +# These variables are exported by the standard makefiles and can be +# used in any customizations you make. They are *outputs* of +# the Makefiles and should be used, not set. +# +# PRODUCTS: products to install. All of these products will be placed in +# the directory $(DSTROOT)$(INSTALLDIR) +# GLOBAL_RESOURCE_DIR: The directory to which resources are copied. +# LOCAL_RESOURCE_DIR: The directory to which localized resources are copied. +# OFILE_DIR: Directory into which .o object files are generated. +# DERIVED_SRC_DIR: Directory used for all other derived files +# +# ALL_CFLAGS: flags to pass when compiling .c files +# ALL_MFLAGS: flags to pass when compiling .m files +# ALL_CCFLAGS: flags to pass when compiling .cc, .cxx, and .C files +# ALL_MMFLAGS: flags to pass when compiling .mm, .mxx, and .M files +# ALL_PRECOMPFLAGS: flags to pass when precompiling .h files +# ALL_LDFLAGS: flags to pass when linking object files +# ALL_LIBTOOL_FLAGS: flags to pass when libtooling object files +# ALL_PSWFLAGS: flags to pass when processing .psw and .pswm (pswrap) files +# ALL_RPCFLAGS: flags to pass when processing .rpc (rpcgen) files +# ALL_YFLAGS: flags to pass when processing .y (yacc) files +# ALL_LFLAGS: flags to pass when processing .l (lex) files +# +# NAME: name of application, bundle, subproject, palette, etc. +# LANGUAGES: langages in which the project is written (default "English") +# English_RESOURCES: localized resources (e.g. nib's, images) of project +# GLOBAL_RESOURCES: non-localized resources of project +# +# SRCROOT: base directory in which to place the new source files +# SRCPATH: relative path from SRCROOT to present subdirectory +# +# INSTALLDIR: Directory the product will be installed into by 'install' target +# PUBLIC_HDR_INSTALLDIR: where to install public headers. Don't forget +# to prefix this with DSTROOT when you use it. +# PRIVATE_HDR_INSTALLDIR: where to install private headers. Don't forget +# to prefix this with DSTROOT when you use it. +# +# EXECUTABLE_EXT: Executable extension for the platform (i.e. .exe on Windows) +# +############################################################################### + +# Some compiler flags can be overridden here for certain build situations. +# +# WARNING_CFLAGS: flag used to set warning level (defaults to -Wmost) +# DEBUG_SYMBOLS_CFLAGS: debug-symbol flag passed to all builds (defaults +# to -g) +# DEBUG_BUILD_CFLAGS: flags passed during debug builds (defaults to -DDEBUG) +# OPTIMIZE_BUILD_CFLAGS: flags passed during optimized builds (defaults +# to -O) +# PROFILE_BUILD_CFLAGS: flags passed during profile builds (defaults +# to -pg -DPROFILE) +# LOCAL_DIR_INCLUDE_DIRECTIVE: flag used to add current directory to +# the include path (defaults to -I.) +# DEBUG_BUILD_LDFLAGS, OPTIMIZE_BUILD_LDFLAGS, PROFILE_BUILD_LDFLAGS: flags +# passed to ld/libtool (defaults to nothing) + + +# Library and Framework projects only: +# INSTALL_NAME_DIRECTIVE: This directive ensures that executables linked +# against the framework will run against the correct version even if +# the current version of the framework changes. You may override this +# to "" as an alternative to using the DYLD_LIBRARY_PATH during your +# development cycle, but be sure to restore it before installing. + + +# Ownership and permissions of files installed by 'install' target + +#INSTALL_AS_USER = root + # User/group ownership +#INSTALL_AS_GROUP = wheel + # (probably want to set both of these) +#INSTALL_PERMISSIONS = + # If set, 'install' chmod's executable to this + + +# Options to strip. Note: -S strips debugging symbols (executables can be stripped +# down further with -x or, if they load no bundles, with no options at all). + +#STRIPFLAGS = -S + + +######################################################################### +# Put rules to extend the behavior of the standard Makefiles here. Include them in +# the dependency tree via cvariables like AFTER_INSTALL in the Makefile.preamble. +# +# You should avoid redefining things like "install" or "app", as they are +# owned by the top-level Makefile API and no context has been set up for where +# derived files should go. +# diff --git a/libkern/c++/Tests/TestSerialization/Makefile.preamble b/libkern/c++/Tests/TestSerialization/Makefile.preamble new file mode 100644 index 000000000..c1624b450 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/Makefile.preamble @@ -0,0 +1,137 @@ +############################################################################### +# Makefile.preamble +# Copyright 1997, Apple Computer, Inc. +# +# Use this makefile for configuring the standard application makefiles +# associated with ProjectBuilder. It is included before the main makefile. +# In Makefile.preamble you set attributes for a project, so they are available +# to the project's makefiles. In contrast, you typically write additional rules or +# override built-in behavior in the Makefile.postamble. +# +# Each directory in a project tree (main project plus subprojects) should +# have its own Makefile.preamble and Makefile.postamble. +############################################################################### +# +# Before the main makefile is included for this project, you may set: +# +# MAKEFILEDIR: Directory in which to find $(MAKEFILE) +# MAKEFILE: Top level mechanism Makefile (e.g., app.make, bundle.make) + +# Compiler/linker flags added to the defaults: The OTHER_* variables will be +# inherited by all nested sub-projects, but the LOCAL_ versions of the same +# variables will not. Put your -I, -D, -U, and -L flags in ProjectBuilder's +# Build Attributes inspector if at all possible. To override the default flags +# that get passed to ${CC} (e.g. change -O to -O2), see Makefile.postamble. The +# variables below are *inputs* to the build process and distinct from the override +# settings done (less often) in the Makefile.postamble. +# +# OTHER_CFLAGS, LOCAL_CFLAGS: additional flags to pass to the compiler +# Note that $(OTHER_CFLAGS) and $(LOCAL_CFLAGS) are used for .h, ...c, .m, +# .cc, .cxx, .C, and .M files. There is no need to respecify the +# flags in OTHER_MFLAGS, etc. +# OTHER_MFLAGS, LOCAL_MFLAGS: additional flags for .m files +# OTHER_CCFLAGS, LOCAL_CCFLAGS: additional flags for .cc, .cxx, and ...C files +# OTHER_MMFLAGS, LOCAL_MMFLAGS: additional flags for .mm and .M files +# OTHER_PRECOMPFLAGS, LOCAL_PRECOMPFLAGS: additional flags used when +# precompiling header files +# OTHER_LDFLAGS, LOCAL_LDFLAGS: additional flags passed to ld and libtool +# OTHER_PSWFLAGS, LOCAL_PSWFLAGS: additional flags passed to pswrap +# OTHER_RPCFLAGS, LOCAL_RPCFLAGS: additional flags passed to rpcgen +# OTHER_YFLAGS, LOCAL_YFLAGS: additional flags passed to yacc +# OTHER_LFLAGS, LOCAL_LFLAGS: additional flags passed to lex + +# These variables provide hooks enabling you to add behavior at almost every +# stage of the make: +# +# BEFORE_PREBUILD: targets to build before installing headers for a subproject +# AFTER_PREBUILD: targets to build after installing headers for a subproject +# BEFORE_BUILD_RECURSION: targets to make before building subprojects +# BEFORE_BUILD: targets to make before a build, but after subprojects +# AFTER_BUILD: targets to make after a build +# +# BEFORE_INSTALL: targets to build before installing the product +# AFTER_INSTALL: targets to build after installing the product +# BEFORE_POSTINSTALL: targets to build before postinstalling every subproject +# AFTER_POSTINSTALL: targts to build after postinstalling every subproject +# +# BEFORE_INSTALLHDRS: targets to build before installing headers for a +# subproject +# AFTER_INSTALLHDRS: targets to build after installing headers for a subproject +# BEFORE_INSTALLSRC: targets to build before installing source for a subproject +# AFTER_INSTALLSRC: targets to build after installing source for a subproject +# +# BEFORE_DEPEND: targets to build before building dependencies for a +# subproject +# AFTER_DEPEND: targets to build after building dependencies for a +# subproject +# +# AUTOMATIC_DEPENDENCY_INFO: if YES, then the dependency file is +# updated every time the project is built. If NO, the dependency +# file is only built when the depend target is invoked. + +# Framework-related variables: +# FRAMEWORK_DLL_INSTALLDIR: On Windows platforms, this variable indicates +# where to put the framework's DLL. This variable defaults to +# $(INSTALLDIR)/../Executables + +# Library-related variables: +# PUBLIC_HEADER_DIR: Determines where public exported header files +# should be installed. Do not include $(DSTROOT) in this value -- +# it is prefixed automatically. For library projects you should +# set this to something like /Developer/Headers/$(NAME). Do not set +# this variable for framework projects unless you do not want the +# header files included in the framework. +# PRIVATE_HEADER_DIR: Determines where private exported header files +# should be installed. Do not include $(DSTROOT) in this value -- +# it is prefixed automatically. +# LIBRARY_STYLE: This may be either STATIC or DYNAMIC, and determines +# whether the libraries produced are statically linked when they +# are used or if they are dynamically loadable. This defaults to +# DYNAMIC. +# LIBRARY_DLL_INSTALLDIR: On Windows platforms, this variable indicates +# where to put the library's DLL. This variable defaults to +# $(INSTALLDIR)/../Executables +# +# INSTALL_AS_USER: owner of the intalled products (default root) +# INSTALL_AS_GROUP: group of the installed products (default wheel) +# INSTALL_PERMISSIONS: permissions of the installed product (default o+rX) +# +# OTHER_RECURSIVE_VARIABLES: The names of variables which you want to be +# passed on the command line to recursive invocations of make. Note that +# the values in OTHER_*FLAGS are inherited by subprojects automatically -- +# you do not have to (and shouldn't) add OTHER_*FLAGS to +# OTHER_RECURSIVE_VARIABLES. + +# Additional headers to export beyond those in the PB.project: +# OTHER_PUBLIC_HEADERS +# OTHER_PROJECT_HEADERS +# OTHER_PRIVATE_HEADERS + +# Additional files for the project's product: <> +# OTHER_RESOURCES: (non-localized) resources for this project +# OTHER_OFILES: relocatables to be linked into this project +# OTHER_LIBS: more libraries to link against +# OTHER_PRODUCT_DEPENDS: other dependencies of this project +# OTHER_SOURCEFILES: other source files maintained by .pre/postamble +# OTHER_GARBAGE: additional files to be removed by `make clean' + +# Set this to YES if you don't want a final libtool call for a library/framework. +# BUILD_OFILES_LIST_ONLY + +# To include a version string, project source must exist in a directory named +# $(NAME).%d[.%d][.%d] and the following line must be uncommented. +# OTHER_GENERATED_OFILES = $(VERS_OFILE) + +# This definition will suppress stripping of debug symbols when an executable +# is installed. By default it is YES. +# STRIP_ON_INSTALL = NO + +# Uncomment to suppress generation of a KeyValueCoding index when installing +# frameworks (This index is used by WOB and IB to determine keys available +# for an object). Set to YES by default. +# PREINDEX_FRAMEWORK = NO + +# Change this definition to install projects somewhere other than the +# standard locations. NEXT_ROOT defaults to "C:/Apple" on Windows systems +# and "" on other systems. +DSTROOT = $(HOME) diff --git a/libkern/c++/Tests/TestSerialization/PB.project b/libkern/c++/Tests/TestSerialization/PB.project new file mode 100644 index 000000000..a14195b01 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/PB.project @@ -0,0 +1,17 @@ +{ + BUNDLE_EXTENSION = kext; + DYNAMIC_CODE_GEN = YES; + FILESTABLE = { + OTHER_SOURCES = (Makefile.preamble, Makefile, Makefile.postamble, CustomInfo.xml); + SUBPROJECTS = (test1.kmodproj, test2.kmodproj); + }; + LANGUAGE = English; + MAKEFILEDIR = "$(MAKEFILEPATH)/pb_makefiles"; + NEXTSTEP_BUILDTOOL = /bin/gnumake; + NEXTSTEP_INSTALLDIR = /System/Library/Extensions; + NEXTSTEP_JAVA_COMPILER = /usr/bin/javac; + NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc; + PROJECTNAME = TestSerialization; + PROJECTTYPE = "Kernel Extension"; + PROJECTVERSION = 2.8; +} diff --git a/libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist b/libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist new file mode 100644 index 000000000..22b403e4f --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/PBUserInfo/PBUserInfo_root.plist @@ -0,0 +1 @@ +{NSMACHOperatingSystem = {Archs = "18 "; Target = extension; }; } diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml b/libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml new file mode 100644 index 000000000..f3b0eeae1 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1.kmodproj/CustomInfo.xml @@ -0,0 +1,24 @@ + + + + + Module + + Version + 0.1 + Name + test1 + File + test1 + Initialize + test1_start + Finalize + test1_stop + Target + Kernel + Format + mach-o + + + + diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile new file mode 100644 index 000000000..5b476501e --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile @@ -0,0 +1,49 @@ +# +# Generated by the Apple Project Builder. +# +# NOTE: Do NOT change this file -- Project Builder maintains it. +# +# Put all of your customizations in files called Makefile.preamble +# and Makefile.postamble (both optional), and Makefile will include them. +# + +NAME = test1 + +PROJECTVERSION = 2.8 +PROJECT_TYPE = Kernel Module + +CPPFILES = test1_main.cpp + +HFILES = test1_main.h + +OTHERSRCS = Makefile.preamble Makefile Makefile.postamble\ + CustomInfo.xml + +MAKEFILEDIR = $(MAKEFILEPATH)/pb_makefiles +CODE_GEN_STYLE = DYNAMIC +MAKEFILE = kmod.make +NEXTSTEP_INSTALLDIR = /System/Library/Extensions +LIBS = +DEBUG_LIBS = $(LIBS) +PROF_LIBS = $(LIBS) + + +NEXTSTEP_PB_CFLAGS = -Wno-format + + +NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc +WINDOWS_OBJCPLUS_COMPILER = $(DEVDIR)/gcc +PDO_UNIX_OBJCPLUS_COMPILER = $(NEXTDEV_BIN)/gcc +NEXTSTEP_JAVA_COMPILER = /usr/bin/javac +WINDOWS_JAVA_COMPILER = $(JDKBINDIR)/javac.exe +PDO_UNIX_JAVA_COMPILER = $(JDKBINDIR)/javac + +include $(MAKEFILEDIR)/platform.make + +-include Makefile.preamble + +include $(MAKEFILEDIR)/$(MAKEFILE) + +-include Makefile.postamble + +-include Makefile.dependencies diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble new file mode 100644 index 000000000..411cde671 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.postamble @@ -0,0 +1,100 @@ +############################################################################### +# Makefile.postamble +# Copyright 1997, Apple Computer, Inc. +# +# Use this makefile, which is imported after all other makefiles, to +# override attributes for a project's Makefile environment. This allows you +# to take advantage of the environment set up by the other Makefiles. +# You can also define custom rules at the end of this file. +# +############################################################################### +# +# These variables are exported by the standard makefiles and can be +# used in any customizations you make. They are *outputs* of +# the Makefiles and should be used, not set. +# +# PRODUCTS: products to install. All of these products will be placed in +# the directory $(DSTROOT)$(INSTALLDIR) +# GLOBAL_RESOURCE_DIR: The directory to which resources are copied. +# LOCAL_RESOURCE_DIR: The directory to which localized resources are copied. +# OFILE_DIR: Directory into which .o object files are generated. +# DERIVED_SRC_DIR: Directory used for all other derived files +# +# ALL_CFLAGS: flags to pass when compiling .c files +# ALL_MFLAGS: flags to pass when compiling .m files +# ALL_CCFLAGS: flags to pass when compiling .cc, .cxx, and .C files +# ALL_MMFLAGS: flags to pass when compiling .mm, .mxx, and .M files +# ALL_PRECOMPFLAGS: flags to pass when precompiling .h files +# ALL_LDFLAGS: flags to pass when linking object files +# ALL_LIBTOOL_FLAGS: flags to pass when libtooling object files +# ALL_PSWFLAGS: flags to pass when processing .psw and .pswm (pswrap) files +# ALL_RPCFLAGS: flags to pass when processing .rpc (rpcgen) files +# ALL_YFLAGS: flags to pass when processing .y (yacc) files +# ALL_LFLAGS: flags to pass when processing .l (lex) files +# +# NAME: name of application, bundle, subproject, palette, etc. +# LANGUAGES: langages in which the project is written (default "English") +# English_RESOURCES: localized resources (e.g. nib's, images) of project +# GLOBAL_RESOURCES: non-localized resources of project +# +# SRCROOT: base directory in which to place the new source files +# SRCPATH: relative path from SRCROOT to present subdirectory +# +# INSTALLDIR: Directory the product will be installed into by 'install' target +# PUBLIC_HDR_INSTALLDIR: where to install public headers. Don't forget +# to prefix this with DSTROOT when you use it. +# PRIVATE_HDR_INSTALLDIR: where to install private headers. Don't forget +# to prefix this with DSTROOT when you use it. +# +# EXECUTABLE_EXT: Executable extension for the platform (i.e. .exe on Windows) +# +############################################################################### + +# Some compiler flags can be overridden here for certain build situations. +# +# WARNING_CFLAGS: flag used to set warning level (defaults to -Wmost) +# DEBUG_SYMBOLS_CFLAGS: debug-symbol flag passed to all builds (defaults +# to -g) +# DEBUG_BUILD_CFLAGS: flags passed during debug builds (defaults to -DDEBUG) +# OPTIMIZE_BUILD_CFLAGS: flags passed during optimized builds (defaults +# to -O) +# PROFILE_BUILD_CFLAGS: flags passed during profile builds (defaults +# to -pg -DPROFILE) +# LOCAL_DIR_INCLUDE_DIRECTIVE: flag used to add current directory to +# the include path (defaults to -I.) +# DEBUG_BUILD_LDFLAGS, OPTIMIZE_BUILD_LDFLAGS, PROFILE_BUILD_LDFLAGS: flags +# passed to ld/libtool (defaults to nothing) + + +# Library and Framework projects only: +# INSTALL_NAME_DIRECTIVE: This directive ensures that executables linked +# against the framework will run against the correct version even if +# the current version of the framework changes. You may override this +# to "" as an alternative to using the DYLD_LIBRARY_PATH during your +# development cycle, but be sure to restore it before installing. + + +# Ownership and permissions of files installed by 'install' target + +#INSTALL_AS_USER = root + # User/group ownership +#INSTALL_AS_GROUP = wheel + # (probably want to set both of these) +#INSTALL_PERMISSIONS = + # If set, 'install' chmod's executable to this + + +# Options to strip. Note: -S strips debugging symbols (executables can be stripped +# down further with -x or, if they load no bundles, with no options at all). + +#STRIPFLAGS = -S + + +######################################################################### +# Put rules to extend the behavior of the standard Makefiles here. Include them in +# the dependency tree via cvariables like AFTER_INSTALL in the Makefile.preamble. +# +# You should avoid redefining things like "install" or "app", as they are +# owned by the top-level Makefile API and no context has been set up for where +# derived files should go. +# diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble new file mode 100644 index 000000000..c1624b450 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1.kmodproj/Makefile.preamble @@ -0,0 +1,137 @@ +############################################################################### +# Makefile.preamble +# Copyright 1997, Apple Computer, Inc. +# +# Use this makefile for configuring the standard application makefiles +# associated with ProjectBuilder. It is included before the main makefile. +# In Makefile.preamble you set attributes for a project, so they are available +# to the project's makefiles. In contrast, you typically write additional rules or +# override built-in behavior in the Makefile.postamble. +# +# Each directory in a project tree (main project plus subprojects) should +# have its own Makefile.preamble and Makefile.postamble. +############################################################################### +# +# Before the main makefile is included for this project, you may set: +# +# MAKEFILEDIR: Directory in which to find $(MAKEFILE) +# MAKEFILE: Top level mechanism Makefile (e.g., app.make, bundle.make) + +# Compiler/linker flags added to the defaults: The OTHER_* variables will be +# inherited by all nested sub-projects, but the LOCAL_ versions of the same +# variables will not. Put your -I, -D, -U, and -L flags in ProjectBuilder's +# Build Attributes inspector if at all possible. To override the default flags +# that get passed to ${CC} (e.g. change -O to -O2), see Makefile.postamble. The +# variables below are *inputs* to the build process and distinct from the override +# settings done (less often) in the Makefile.postamble. +# +# OTHER_CFLAGS, LOCAL_CFLAGS: additional flags to pass to the compiler +# Note that $(OTHER_CFLAGS) and $(LOCAL_CFLAGS) are used for .h, ...c, .m, +# .cc, .cxx, .C, and .M files. There is no need to respecify the +# flags in OTHER_MFLAGS, etc. +# OTHER_MFLAGS, LOCAL_MFLAGS: additional flags for .m files +# OTHER_CCFLAGS, LOCAL_CCFLAGS: additional flags for .cc, .cxx, and ...C files +# OTHER_MMFLAGS, LOCAL_MMFLAGS: additional flags for .mm and .M files +# OTHER_PRECOMPFLAGS, LOCAL_PRECOMPFLAGS: additional flags used when +# precompiling header files +# OTHER_LDFLAGS, LOCAL_LDFLAGS: additional flags passed to ld and libtool +# OTHER_PSWFLAGS, LOCAL_PSWFLAGS: additional flags passed to pswrap +# OTHER_RPCFLAGS, LOCAL_RPCFLAGS: additional flags passed to rpcgen +# OTHER_YFLAGS, LOCAL_YFLAGS: additional flags passed to yacc +# OTHER_LFLAGS, LOCAL_LFLAGS: additional flags passed to lex + +# These variables provide hooks enabling you to add behavior at almost every +# stage of the make: +# +# BEFORE_PREBUILD: targets to build before installing headers for a subproject +# AFTER_PREBUILD: targets to build after installing headers for a subproject +# BEFORE_BUILD_RECURSION: targets to make before building subprojects +# BEFORE_BUILD: targets to make before a build, but after subprojects +# AFTER_BUILD: targets to make after a build +# +# BEFORE_INSTALL: targets to build before installing the product +# AFTER_INSTALL: targets to build after installing the product +# BEFORE_POSTINSTALL: targets to build before postinstalling every subproject +# AFTER_POSTINSTALL: targts to build after postinstalling every subproject +# +# BEFORE_INSTALLHDRS: targets to build before installing headers for a +# subproject +# AFTER_INSTALLHDRS: targets to build after installing headers for a subproject +# BEFORE_INSTALLSRC: targets to build before installing source for a subproject +# AFTER_INSTALLSRC: targets to build after installing source for a subproject +# +# BEFORE_DEPEND: targets to build before building dependencies for a +# subproject +# AFTER_DEPEND: targets to build after building dependencies for a +# subproject +# +# AUTOMATIC_DEPENDENCY_INFO: if YES, then the dependency file is +# updated every time the project is built. If NO, the dependency +# file is only built when the depend target is invoked. + +# Framework-related variables: +# FRAMEWORK_DLL_INSTALLDIR: On Windows platforms, this variable indicates +# where to put the framework's DLL. This variable defaults to +# $(INSTALLDIR)/../Executables + +# Library-related variables: +# PUBLIC_HEADER_DIR: Determines where public exported header files +# should be installed. Do not include $(DSTROOT) in this value -- +# it is prefixed automatically. For library projects you should +# set this to something like /Developer/Headers/$(NAME). Do not set +# this variable for framework projects unless you do not want the +# header files included in the framework. +# PRIVATE_HEADER_DIR: Determines where private exported header files +# should be installed. Do not include $(DSTROOT) in this value -- +# it is prefixed automatically. +# LIBRARY_STYLE: This may be either STATIC or DYNAMIC, and determines +# whether the libraries produced are statically linked when they +# are used or if they are dynamically loadable. This defaults to +# DYNAMIC. +# LIBRARY_DLL_INSTALLDIR: On Windows platforms, this variable indicates +# where to put the library's DLL. This variable defaults to +# $(INSTALLDIR)/../Executables +# +# INSTALL_AS_USER: owner of the intalled products (default root) +# INSTALL_AS_GROUP: group of the installed products (default wheel) +# INSTALL_PERMISSIONS: permissions of the installed product (default o+rX) +# +# OTHER_RECURSIVE_VARIABLES: The names of variables which you want to be +# passed on the command line to recursive invocations of make. Note that +# the values in OTHER_*FLAGS are inherited by subprojects automatically -- +# you do not have to (and shouldn't) add OTHER_*FLAGS to +# OTHER_RECURSIVE_VARIABLES. + +# Additional headers to export beyond those in the PB.project: +# OTHER_PUBLIC_HEADERS +# OTHER_PROJECT_HEADERS +# OTHER_PRIVATE_HEADERS + +# Additional files for the project's product: <> +# OTHER_RESOURCES: (non-localized) resources for this project +# OTHER_OFILES: relocatables to be linked into this project +# OTHER_LIBS: more libraries to link against +# OTHER_PRODUCT_DEPENDS: other dependencies of this project +# OTHER_SOURCEFILES: other source files maintained by .pre/postamble +# OTHER_GARBAGE: additional files to be removed by `make clean' + +# Set this to YES if you don't want a final libtool call for a library/framework. +# BUILD_OFILES_LIST_ONLY + +# To include a version string, project source must exist in a directory named +# $(NAME).%d[.%d][.%d] and the following line must be uncommented. +# OTHER_GENERATED_OFILES = $(VERS_OFILE) + +# This definition will suppress stripping of debug symbols when an executable +# is installed. By default it is YES. +# STRIP_ON_INSTALL = NO + +# Uncomment to suppress generation of a KeyValueCoding index when installing +# frameworks (This index is used by WOB and IB to determine keys available +# for an object). Set to YES by default. +# PREINDEX_FRAMEWORK = NO + +# Change this definition to install projects somewhere other than the +# standard locations. NEXT_ROOT defaults to "C:/Apple" on Windows systems +# and "" on other systems. +DSTROOT = $(HOME) diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project b/libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project new file mode 100644 index 000000000..771c5728e --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1.kmodproj/PB.project @@ -0,0 +1,25 @@ +{ + DYNAMIC_CODE_GEN = NO; + FILESTABLE = { + CLASSES = (test1_main.cpp); + H_FILES = (test1_main.h); + OTHER_SOURCES = (Makefile.preamble, Makefile, Makefile.postamble, CustomInfo.xml); + }; + LANGUAGE = English; + LOCALIZABLE_FILES = {}; + MAKEFILEDIR = "$(MAKEFILEPATH)/pb_makefiles"; + NEXTSTEP_BUILDTOOL = /bin/gnumake; + NEXTSTEP_COMPILEROPTIONS = "-Wno-format"; + NEXTSTEP_INSTALLDIR = /System/Library/Extensions; + NEXTSTEP_JAVA_COMPILER = /usr/bin/javac; + NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc; + PDO_UNIX_BUILDTOOL = $NEXT_ROOT/Developer/bin/make; + PDO_UNIX_JAVA_COMPILER = "$(JDKBINDIR)/javac"; + PDO_UNIX_OBJCPLUS_COMPILER = "$(NEXTDEV_BIN)/gcc"; + PROJECTNAME = test1; + PROJECTTYPE = "Kernel Module"; + PROJECTVERSION = 2.8; + WINDOWS_BUILDTOOL = $NEXT_ROOT/Developer/Executables/make; + WINDOWS_JAVA_COMPILER = "$(JDKBINDIR)/javac.exe"; + WINDOWS_OBJCPLUS_COMPILER = "$(DEVDIR)/gcc"; +} diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.cpp b/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.cpp new file mode 100644 index 000000000..d9f86d6b2 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.cpp @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +__BEGIN_DECLS +#include +#include +#include + +kmod_start_func_t test1_start; +kmod_stop_func_t test1_stop; +__END_DECLS + +#include +#include + +char *testBuffer = " +{ string = \"this is a 'string' with spaces\"; + string2 = 'this is also a \"string\" with spaces'; + offset = 16384:32; + true = .true.; + false = .false.; + data = <0123 4567 89abcdef>; + array = (1:8, 2:16, 3:32, 4:64 ); + set = [ one, two, three, four ]; + emptydict = { }@1; + emptyarray = ( )@2; + emptyset = [ ]@3; + emptydata = < >@4; + emptydict2 = @1; + emptyarray2 = @2; + emptyset2 = @3; + emptydata2 = @4; + dict2 = { string = asdfasdf; }; + dict3 = { string = asdfasdf; }; +}@0"; + +kern_return_t +test1_start(struct kmod_info *ki, void *data) +{ + IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer); + + // test unserialize + OSString *errmsg; + OSObject *d = OSUnserialize(testBuffer, &errmsg); + if (!d) { + IOLog("%s\n", errmsg->getCStringNoCopy()); + return KMOD_RETURN_SUCCESS; + } + + // test serialize + OSSerialize *s = OSSerialize::withCapacity(5); + if (!d->serialize(s)) { + IOLog("serialization failed\n"); + return KMOD_RETURN_SUCCESS; + } + + IOLog("serialized object's length = %d, capacity = %d\n", s->getLength(), s->getCapacity()); + IOLog("object unformatted = %s\n", s->text()); + + // try second time + OSObject *d2 = OSUnserializeXML(s->text(), &errmsg); + if (!d2) { + IOLog("%s\n", errmsg->getCStringNoCopy()); + return KMOD_RETURN_SUCCESS; + } + + IOLog("\nserialized objects compared %ssuccessfully objectwise\n\n", + d->isEqualTo(d2) ? "":"un"); + + if (d2) d2->release(); + s->release(); + if (d) d->release(); + + return KMOD_RETURN_SUCCESS; +} + +kern_return_t +test1_stop(struct kmod_info *ki, void *data) +{ + return KMOD_RETURN_SUCCESS; +} diff --git a/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h b/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h new file mode 100644 index 000000000..5c8af20b3 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test1.kmodproj/test1_main.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml b/libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml new file mode 100755 index 000000000..b7e7e2716 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test2.kmodproj/CustomInfo.xml @@ -0,0 +1,24 @@ + + + + + Module + + Version + 0.1 + Name + test2 + File + test2 + Initialize + test2_start + Finalize + test2_stop + Target + Kernel + Format + mach-o + + + + diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile new file mode 100644 index 000000000..a34b54cb3 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile @@ -0,0 +1,47 @@ +# +# Generated by the Apple Project Builder. +# +# NOTE: Do NOT change this file -- Project Builder maintains it. +# +# Put all of your customizations in files called Makefile.preamble +# and Makefile.postamble (both optional), and Makefile will include them. +# + +NAME = test2 + +PROJECTVERSION = 2.8 +PROJECT_TYPE = Kernel Module + +CPPFILES = test2_main.cpp + +OTHERSRCS = Makefile.preamble Makefile Makefile.postamble\ + CustomInfo.xml + +MAKEFILEDIR = $(MAKEFILEPATH)/pb_makefiles +CODE_GEN_STYLE = DYNAMIC +MAKEFILE = kmod.make +NEXTSTEP_INSTALLDIR = /System/Library/Extensions +LIBS = +DEBUG_LIBS = $(LIBS) +PROF_LIBS = $(LIBS) + + +NEXTSTEP_PB_CFLAGS = -Wno-format + + +NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc +WINDOWS_OBJCPLUS_COMPILER = $(DEVDIR)/gcc +PDO_UNIX_OBJCPLUS_COMPILER = $(NEXTDEV_BIN)/gcc +NEXTSTEP_JAVA_COMPILER = /usr/bin/javac +WINDOWS_JAVA_COMPILER = $(JDKBINDIR)/javac.exe +PDO_UNIX_JAVA_COMPILER = $(JDKBINDIR)/javac + +include $(MAKEFILEDIR)/platform.make + +-include Makefile.preamble + +include $(MAKEFILEDIR)/$(MAKEFILE) + +-include Makefile.postamble + +-include Makefile.dependencies diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble new file mode 100644 index 000000000..411cde671 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.postamble @@ -0,0 +1,100 @@ +############################################################################### +# Makefile.postamble +# Copyright 1997, Apple Computer, Inc. +# +# Use this makefile, which is imported after all other makefiles, to +# override attributes for a project's Makefile environment. This allows you +# to take advantage of the environment set up by the other Makefiles. +# You can also define custom rules at the end of this file. +# +############################################################################### +# +# These variables are exported by the standard makefiles and can be +# used in any customizations you make. They are *outputs* of +# the Makefiles and should be used, not set. +# +# PRODUCTS: products to install. All of these products will be placed in +# the directory $(DSTROOT)$(INSTALLDIR) +# GLOBAL_RESOURCE_DIR: The directory to which resources are copied. +# LOCAL_RESOURCE_DIR: The directory to which localized resources are copied. +# OFILE_DIR: Directory into which .o object files are generated. +# DERIVED_SRC_DIR: Directory used for all other derived files +# +# ALL_CFLAGS: flags to pass when compiling .c files +# ALL_MFLAGS: flags to pass when compiling .m files +# ALL_CCFLAGS: flags to pass when compiling .cc, .cxx, and .C files +# ALL_MMFLAGS: flags to pass when compiling .mm, .mxx, and .M files +# ALL_PRECOMPFLAGS: flags to pass when precompiling .h files +# ALL_LDFLAGS: flags to pass when linking object files +# ALL_LIBTOOL_FLAGS: flags to pass when libtooling object files +# ALL_PSWFLAGS: flags to pass when processing .psw and .pswm (pswrap) files +# ALL_RPCFLAGS: flags to pass when processing .rpc (rpcgen) files +# ALL_YFLAGS: flags to pass when processing .y (yacc) files +# ALL_LFLAGS: flags to pass when processing .l (lex) files +# +# NAME: name of application, bundle, subproject, palette, etc. +# LANGUAGES: langages in which the project is written (default "English") +# English_RESOURCES: localized resources (e.g. nib's, images) of project +# GLOBAL_RESOURCES: non-localized resources of project +# +# SRCROOT: base directory in which to place the new source files +# SRCPATH: relative path from SRCROOT to present subdirectory +# +# INSTALLDIR: Directory the product will be installed into by 'install' target +# PUBLIC_HDR_INSTALLDIR: where to install public headers. Don't forget +# to prefix this with DSTROOT when you use it. +# PRIVATE_HDR_INSTALLDIR: where to install private headers. Don't forget +# to prefix this with DSTROOT when you use it. +# +# EXECUTABLE_EXT: Executable extension for the platform (i.e. .exe on Windows) +# +############################################################################### + +# Some compiler flags can be overridden here for certain build situations. +# +# WARNING_CFLAGS: flag used to set warning level (defaults to -Wmost) +# DEBUG_SYMBOLS_CFLAGS: debug-symbol flag passed to all builds (defaults +# to -g) +# DEBUG_BUILD_CFLAGS: flags passed during debug builds (defaults to -DDEBUG) +# OPTIMIZE_BUILD_CFLAGS: flags passed during optimized builds (defaults +# to -O) +# PROFILE_BUILD_CFLAGS: flags passed during profile builds (defaults +# to -pg -DPROFILE) +# LOCAL_DIR_INCLUDE_DIRECTIVE: flag used to add current directory to +# the include path (defaults to -I.) +# DEBUG_BUILD_LDFLAGS, OPTIMIZE_BUILD_LDFLAGS, PROFILE_BUILD_LDFLAGS: flags +# passed to ld/libtool (defaults to nothing) + + +# Library and Framework projects only: +# INSTALL_NAME_DIRECTIVE: This directive ensures that executables linked +# against the framework will run against the correct version even if +# the current version of the framework changes. You may override this +# to "" as an alternative to using the DYLD_LIBRARY_PATH during your +# development cycle, but be sure to restore it before installing. + + +# Ownership and permissions of files installed by 'install' target + +#INSTALL_AS_USER = root + # User/group ownership +#INSTALL_AS_GROUP = wheel + # (probably want to set both of these) +#INSTALL_PERMISSIONS = + # If set, 'install' chmod's executable to this + + +# Options to strip. Note: -S strips debugging symbols (executables can be stripped +# down further with -x or, if they load no bundles, with no options at all). + +#STRIPFLAGS = -S + + +######################################################################### +# Put rules to extend the behavior of the standard Makefiles here. Include them in +# the dependency tree via cvariables like AFTER_INSTALL in the Makefile.preamble. +# +# You should avoid redefining things like "install" or "app", as they are +# owned by the top-level Makefile API and no context has been set up for where +# derived files should go. +# diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble new file mode 100644 index 000000000..c1624b450 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test2.kmodproj/Makefile.preamble @@ -0,0 +1,137 @@ +############################################################################### +# Makefile.preamble +# Copyright 1997, Apple Computer, Inc. +# +# Use this makefile for configuring the standard application makefiles +# associated with ProjectBuilder. It is included before the main makefile. +# In Makefile.preamble you set attributes for a project, so they are available +# to the project's makefiles. In contrast, you typically write additional rules or +# override built-in behavior in the Makefile.postamble. +# +# Each directory in a project tree (main project plus subprojects) should +# have its own Makefile.preamble and Makefile.postamble. +############################################################################### +# +# Before the main makefile is included for this project, you may set: +# +# MAKEFILEDIR: Directory in which to find $(MAKEFILE) +# MAKEFILE: Top level mechanism Makefile (e.g., app.make, bundle.make) + +# Compiler/linker flags added to the defaults: The OTHER_* variables will be +# inherited by all nested sub-projects, but the LOCAL_ versions of the same +# variables will not. Put your -I, -D, -U, and -L flags in ProjectBuilder's +# Build Attributes inspector if at all possible. To override the default flags +# that get passed to ${CC} (e.g. change -O to -O2), see Makefile.postamble. The +# variables below are *inputs* to the build process and distinct from the override +# settings done (less often) in the Makefile.postamble. +# +# OTHER_CFLAGS, LOCAL_CFLAGS: additional flags to pass to the compiler +# Note that $(OTHER_CFLAGS) and $(LOCAL_CFLAGS) are used for .h, ...c, .m, +# .cc, .cxx, .C, and .M files. There is no need to respecify the +# flags in OTHER_MFLAGS, etc. +# OTHER_MFLAGS, LOCAL_MFLAGS: additional flags for .m files +# OTHER_CCFLAGS, LOCAL_CCFLAGS: additional flags for .cc, .cxx, and ...C files +# OTHER_MMFLAGS, LOCAL_MMFLAGS: additional flags for .mm and .M files +# OTHER_PRECOMPFLAGS, LOCAL_PRECOMPFLAGS: additional flags used when +# precompiling header files +# OTHER_LDFLAGS, LOCAL_LDFLAGS: additional flags passed to ld and libtool +# OTHER_PSWFLAGS, LOCAL_PSWFLAGS: additional flags passed to pswrap +# OTHER_RPCFLAGS, LOCAL_RPCFLAGS: additional flags passed to rpcgen +# OTHER_YFLAGS, LOCAL_YFLAGS: additional flags passed to yacc +# OTHER_LFLAGS, LOCAL_LFLAGS: additional flags passed to lex + +# These variables provide hooks enabling you to add behavior at almost every +# stage of the make: +# +# BEFORE_PREBUILD: targets to build before installing headers for a subproject +# AFTER_PREBUILD: targets to build after installing headers for a subproject +# BEFORE_BUILD_RECURSION: targets to make before building subprojects +# BEFORE_BUILD: targets to make before a build, but after subprojects +# AFTER_BUILD: targets to make after a build +# +# BEFORE_INSTALL: targets to build before installing the product +# AFTER_INSTALL: targets to build after installing the product +# BEFORE_POSTINSTALL: targets to build before postinstalling every subproject +# AFTER_POSTINSTALL: targts to build after postinstalling every subproject +# +# BEFORE_INSTALLHDRS: targets to build before installing headers for a +# subproject +# AFTER_INSTALLHDRS: targets to build after installing headers for a subproject +# BEFORE_INSTALLSRC: targets to build before installing source for a subproject +# AFTER_INSTALLSRC: targets to build after installing source for a subproject +# +# BEFORE_DEPEND: targets to build before building dependencies for a +# subproject +# AFTER_DEPEND: targets to build after building dependencies for a +# subproject +# +# AUTOMATIC_DEPENDENCY_INFO: if YES, then the dependency file is +# updated every time the project is built. If NO, the dependency +# file is only built when the depend target is invoked. + +# Framework-related variables: +# FRAMEWORK_DLL_INSTALLDIR: On Windows platforms, this variable indicates +# where to put the framework's DLL. This variable defaults to +# $(INSTALLDIR)/../Executables + +# Library-related variables: +# PUBLIC_HEADER_DIR: Determines where public exported header files +# should be installed. Do not include $(DSTROOT) in this value -- +# it is prefixed automatically. For library projects you should +# set this to something like /Developer/Headers/$(NAME). Do not set +# this variable for framework projects unless you do not want the +# header files included in the framework. +# PRIVATE_HEADER_DIR: Determines where private exported header files +# should be installed. Do not include $(DSTROOT) in this value -- +# it is prefixed automatically. +# LIBRARY_STYLE: This may be either STATIC or DYNAMIC, and determines +# whether the libraries produced are statically linked when they +# are used or if they are dynamically loadable. This defaults to +# DYNAMIC. +# LIBRARY_DLL_INSTALLDIR: On Windows platforms, this variable indicates +# where to put the library's DLL. This variable defaults to +# $(INSTALLDIR)/../Executables +# +# INSTALL_AS_USER: owner of the intalled products (default root) +# INSTALL_AS_GROUP: group of the installed products (default wheel) +# INSTALL_PERMISSIONS: permissions of the installed product (default o+rX) +# +# OTHER_RECURSIVE_VARIABLES: The names of variables which you want to be +# passed on the command line to recursive invocations of make. Note that +# the values in OTHER_*FLAGS are inherited by subprojects automatically -- +# you do not have to (and shouldn't) add OTHER_*FLAGS to +# OTHER_RECURSIVE_VARIABLES. + +# Additional headers to export beyond those in the PB.project: +# OTHER_PUBLIC_HEADERS +# OTHER_PROJECT_HEADERS +# OTHER_PRIVATE_HEADERS + +# Additional files for the project's product: <> +# OTHER_RESOURCES: (non-localized) resources for this project +# OTHER_OFILES: relocatables to be linked into this project +# OTHER_LIBS: more libraries to link against +# OTHER_PRODUCT_DEPENDS: other dependencies of this project +# OTHER_SOURCEFILES: other source files maintained by .pre/postamble +# OTHER_GARBAGE: additional files to be removed by `make clean' + +# Set this to YES if you don't want a final libtool call for a library/framework. +# BUILD_OFILES_LIST_ONLY + +# To include a version string, project source must exist in a directory named +# $(NAME).%d[.%d][.%d] and the following line must be uncommented. +# OTHER_GENERATED_OFILES = $(VERS_OFILE) + +# This definition will suppress stripping of debug symbols when an executable +# is installed. By default it is YES. +# STRIP_ON_INSTALL = NO + +# Uncomment to suppress generation of a KeyValueCoding index when installing +# frameworks (This index is used by WOB and IB to determine keys available +# for an object). Set to YES by default. +# PREINDEX_FRAMEWORK = NO + +# Change this definition to install projects somewhere other than the +# standard locations. NEXT_ROOT defaults to "C:/Apple" on Windows systems +# and "" on other systems. +DSTROOT = $(HOME) diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project b/libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project new file mode 100644 index 000000000..36dc5a113 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test2.kmodproj/PB.project @@ -0,0 +1,24 @@ +{ + DYNAMIC_CODE_GEN = NO; + FILESTABLE = { + CLASSES = (test2_main.cpp); + OTHER_SOURCES = (Makefile.preamble, Makefile, Makefile.postamble, CustomInfo.xml); + }; + LANGUAGE = English; + LOCALIZABLE_FILES = {}; + MAKEFILEDIR = "$(MAKEFILEPATH)/pb_makefiles"; + NEXTSTEP_BUILDTOOL = /bin/gnumake; + NEXTSTEP_COMPILEROPTIONS = "-Wno-format"; + NEXTSTEP_INSTALLDIR = /System/Library/Extensions; + NEXTSTEP_JAVA_COMPILER = /usr/bin/javac; + NEXTSTEP_OBJCPLUS_COMPILER = /usr/bin/cc; + PDO_UNIX_BUILDTOOL = $NEXT_ROOT/Developer/bin/make; + PDO_UNIX_JAVA_COMPILER = "$(JDKBINDIR)/javac"; + PDO_UNIX_OBJCPLUS_COMPILER = "$(NEXTDEV_BIN)/gcc"; + PROJECTNAME = test2; + PROJECTTYPE = "Kernel Module"; + PROJECTVERSION = 2.8; + WINDOWS_BUILDTOOL = $NEXT_ROOT/Developer/Executables/make; + WINDOWS_JAVA_COMPILER = "$(JDKBINDIR)/javac.exe"; + WINDOWS_OBJCPLUS_COMPILER = "$(DEVDIR)/gcc"; +} diff --git a/libkern/c++/Tests/TestSerialization/test2.kmodproj/test2_main.cpp b/libkern/c++/Tests/TestSerialization/test2.kmodproj/test2_main.cpp new file mode 100644 index 000000000..c4c735719 --- /dev/null +++ b/libkern/c++/Tests/TestSerialization/test2.kmodproj/test2_main.cpp @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +__BEGIN_DECLS +#include +#include +#include + +kmod_start_func_t test2_start; +kmod_stop_func_t test2_stop; +__END_DECLS + +#include +#include + +char *testBuffer = " + + + + + + key true + key false + + key d0 + key d1 AQ== + key d2 ASM= + key d3 ASNF + key d4 0123 4567 89abcdef + key d5 ASNFZw== + + key i0 + key i1 123456789 + key i2 0x12345678 + + key s0 + key s1 string 1 + key s2 string 2 + key <&> <&> + + key c0 + + + key a0 + + + key a1 + array string 1 + array string 2 + + + key t0 + + key t1 + set string 1 + set string 2 + + + key r1 + key r2 + key r3 + key r4 + key r5 + key r6 + + key e1 + key e2 + key e3 + key e4 + key e5 + key e6 + + + +"; + +/* + this causes the parser to return an empty string? it doesn't look like yyerror gets called + char *testBuffer = "" + +*/ + +kern_return_t +test2_start(struct kmod_info *ki, void *data) +{ + IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer); + + // test unserialize + OSString *errmsg = 0; + OSObject *d = OSUnserializeXML(testBuffer, &errmsg); + if (!d) { + if (errmsg) + IOLog("%s\n", errmsg->getCStringNoCopy()); + else + IOLog("bogus error message\n"); + + return KMOD_RETURN_SUCCESS; + } + + // test serialize + OSSerialize *s = OSSerialize::withCapacity(5); + if (!d->serialize(s)) { + IOLog("serialization failed\n"); + return KMOD_RETURN_SUCCESS; + } + + IOLog("serialized object's length = %d, capacity = %d\n", s->getLength(), s->getCapacity()); + IOLog("object unformatted = %s\n", s->text()); + + // try second time + OSObject *d2 = OSUnserializeXML(s->text(), &errmsg); + if (!d2) { + IOLog("%s\n", errmsg->getCStringNoCopy()); + return KMOD_RETURN_SUCCESS; + } + OSSerialize *s2 = OSSerialize::withCapacity(5); + if (!d2->serialize(s2)) { + IOLog("serialization #2 failed\n"); + return KMOD_RETURN_SUCCESS; + } + + IOLog("serialized object's length = %d, capacity = %d\n", + s2->getLength(), s2->getCapacity()); + IOLog("object unformatted = %s\n", s2->text()); + + IOLog("\nserialized objects compared %ssuccessfully textually\n\n", + strcmp(s->text(), s2->text()) ? "un":""); + + IOLog("\nserialized objects compared %ssuccessfully objectwise\n\n", + d->isEqualTo(d2) ? "":"un"); + + s2->release(); + if (d2) d2->release(); + s->release(); + if (d) d->release(); + + return KMOD_RETURN_SUCCESS; +} + +kern_return_t +test2_stop(struct kmod_info *ki, void *data) +{ + return KMOD_RETURN_SUCCESS; +} diff --git a/libkern/conf/MASTER b/libkern/conf/MASTER new file mode 100644 index 000000000..3307faed8 --- /dev/null +++ b/libkern/conf/MASTER @@ -0,0 +1,55 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +####################################################################### +# +# Master machine independent configuration file. +# +# Specific configuration files are created based on this and +# the machine specific master file using the doconf script. +# +# Any changes to the master configuration files will affect all +# other configuration files based upon it. +# +####################################################################### +# +# To build a configuration, execute "doconf ." +# Configurations are specified in the "Configurations:" section +# of the MASTER and MASTER.* files as follows: +# +# = [ ... ] +# +# Lines in the MASTER and MASTER.* files are selected based on +# the attribute selector list, found in a comment at the end of +# the line. This is a list of attributes separated by commas. +# The "!" operator selects the line if none of the attributes are +# specified. +# +# For example: +# +# selects a line if "foo" or "bar" are specified. +# selects a line if neither "foo" nor "bar" is +# specified. +# +# Lines with no attributes specified are selected for all +# configurations. +# +####################################################################### +# +# Standard Mach Research Configurations: +# -------- ---- -------- --------------- +# +# These are the default configurations that can be used by most sites. +# They are used internally by the Mach project. +# +# LIBKERN = [libkerncpp debug] +# +####################################################################### +# +ident LIBKERN + +options KDEBUG # kernel tracing # +options LIBKERNCPP # C++ implementation # diff --git a/libkern/conf/MASTER.i386 b/libkern/conf/MASTER.i386 new file mode 100644 index 000000000..83a06f878 --- /dev/null +++ b/libkern/conf/MASTER.i386 @@ -0,0 +1,11 @@ +###################################################################### +# +# RELEASE = [intel mach libkerncpp] +# PROFILE = [intel mach libkerncpp profile] +# DEBUG = [intel mach libkerncpp debug] +# +###################################################################### + +machine "i386" # +cpu "i386" # + diff --git a/libkern/conf/MASTER.ppc b/libkern/conf/MASTER.ppc new file mode 100644 index 000000000..a2764000e --- /dev/null +++ b/libkern/conf/MASTER.ppc @@ -0,0 +1,17 @@ +# +###################################################################### +# +# Standard Apple MacOS X Configurations: +# -------- ---- -------- --------------- +# +# RELEASE = [ppc mach libkerncpp] +# PROFILE = [ppc mach libkerncpp profile] +# DEBUG = [ppc mach libkerncpp debug] +# RELEASE_TRACE = [ RELEASE kdebug ] +# DEBUG_TRACE = [ DEBUG kdebug ] +# +###################################################################### + +machine "ppc" # +cpu "ppc" # + diff --git a/libkern/conf/Makefile b/libkern/conf/Makefile new file mode 100644 index 000000000..c744dea55 --- /dev/null +++ b/libkern/conf/Makefile @@ -0,0 +1,63 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + tools + +COMP_SUBDIRS = + +INST_SUBDIRS = + +ifndef LIBKERN_KERNEL_CONFIG +export LIBKERN_KERNEL_CONFIG = $(KERNEL_CONFIG) +endif + +COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: + make build_setup + +$(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile : $(SOURCE)/MASTER \ + $(SOURCE)/MASTER.$(arch_config) \ + $(SOURCE)/Makefile.template \ + $(SOURCE)/Makefile.$(arch_config) \ + $(SOURCE)/files \ + $(SOURCE)/files.$(arch_config) \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf + @echo "Running doconf for $(LIBKERN_KERNEL_CONFIG)"; + (doconf_target=$(addsuffix /conf, $(TARGET)); \ + echo $${doconf_target};\ + $(MKDIR) $${doconf_target}; \ + cd $${doconf_target}; \ + rm -f $(notdir $?); \ + cp $? $${doconf_target}; \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(arch_config) -d $(TARGET)/$(LIBKERN_KERNEL_CONFIG) $(LIBKERN_KERNEL_CONFIG); \ + ); + +.ORDER: $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile + +do_setup_conf: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf \ + $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile + +do_all: do_setup_conf + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(LIBKERN_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + (cd $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG); \ + next_source=$(subst conf/,,$(SOURCE)); \ + ${MAKE} MAKEFILES=$(TARGET)/$(LIBKERN_KERNEL_CONFIG)/Makefile \ + SOURCE=$${next_source} \ + TARGET=$(TARGET) \ + build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(LIBKERN_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_all: do_all + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/libkern/conf/Makefile.i386 b/libkern/conf/Makefile.i386 new file mode 100644 index 000000000..b89fdd145 --- /dev/null +++ b/libkern/conf/Makefile.i386 @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for i386 +###################################################################### + + +###################################################################### +#END Machine dependent Makefile fragment for i386 +###################################################################### diff --git a/libkern/conf/Makefile.ppc b/libkern/conf/Makefile.ppc new file mode 100644 index 000000000..2b438f2fa --- /dev/null +++ b/libkern/conf/Makefile.ppc @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for ppc +###################################################################### + + +###################################################################### +#END Machine dependent Makefile fragment for ppc +###################################################################### diff --git a/libkern/conf/Makefile.template b/libkern/conf/Makefile.template new file mode 100644 index 000000000..0524aad2d --- /dev/null +++ b/libkern/conf/Makefile.template @@ -0,0 +1,109 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# Export IDENT for sub-makefiles +# +export IDENT + +# +# XXX: INCFLAGS +# +INCFLAGS_MAKEFILE= + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +# +# XXX: CFLAGS +# +CFLAGS+= -DKERNEL -DLIBKERN_KERNEL_PRIVATE -DOSALLOCDEBUG=1 \ + -Wall -Wno-four-char-constants -fno-common + +SFLAGS+= -DKERNEL + +# +# Directories for mig generated files +# +COMP_SUBDIRS = + +# +# Make sure we don't remove this by accident if interrupted at the wrong +# time. +# +.PRECIOUS: Makefile + +VERSION_FILES= \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.major \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.minor \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.variant + +COPYRIGHT_FILES = \ + $(SOURCE_DIR)/$(COMPONENT)/conf/copyright + +# +# Theses macros are filled in by the config program depending on the +# current configuration. The MACHDEP macro is replaced by the +# contents of the machine dependent makefile template and the others +# are replaced by the corresponding symbol definitions for the +# configuration. +# + +%OBJS + +%CFILES + +%MFILES + +%SFILES + +%BFILES + +%ORDERED +%MACHDEP + +# +# OBJSDEPS is the set of files (defined in the machine dependent +# template if necessary) which all objects depend on (such as an +# in-line assembler expansion filter) +# +${OBJS}: ${OBJSDEPS} + + +%LOAD + +LDOBJS = $(OBJS) + +$(COMPONENT).o: $(LDOBJS) + @echo "creating $(COMPONENT).o" + $(RM) $(RMFLAGS) vers.c + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} + ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c + @echo [ updating $(COMPONENT).o ${LIBKERN_KERNEL_CONFIG} ] + $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT).o ${LDOBJS} vers.o + +do_all: $(COMPONENT).o + +do_depend: do_all + ${MD} -u Makedep -f -d `ls *.d` + +do_build_all: do_depend + +%RULES + +-include Makedep + +include $(MakeInc_rule) + +include $(MakeInc_dir) + diff --git a/libkern/conf/copyright b/libkern/conf/copyright new file mode 100644 index 000000000..3837f6c79 --- /dev/null +++ b/libkern/conf/copyright @@ -0,0 +1,6 @@ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + */ + diff --git a/libkern/conf/files b/libkern/conf/files new file mode 100644 index 000000000..2bc05b4eb --- /dev/null +++ b/libkern/conf/files @@ -0,0 +1,29 @@ +# options + +OPTIONS/libkerncpp optional libkerncpp +OPTIONS/kdebug optional kdebug + +# libkern + +libkern/gen/OSAtomicOperations.c standard + +libkern/c++/OSMetaClass.cpp optional libkerncpp +libkern/c++/OSObject.cpp optional libkerncpp + +libkern/c++/OSArray.cpp optional libkerncpp +libkern/c++/OSBoolean.cpp optional libkerncpp +libkern/c++/OSCollection.cpp optional libkerncpp +libkern/c++/OSCollectionIterator.cpp optional libkerncpp +libkern/c++/OSData.cpp optional libkerncpp +libkern/c++/OSDictionary.cpp optional libkerncpp +libkern/c++/OSIterator.cpp optional libkerncpp +libkern/c++/OSNumber.cpp optional libkerncpp +libkern/c++/OSOrderedSet.cpp optional libkerncpp +libkern/c++/OSRuntime.cpp optional libkerncpp +libkern/c++/OSSerialize.cpp optional libkerncpp +libkern/c++/OSSet.cpp optional libkerncpp +libkern/c++/OSString.cpp optional libkerncpp +libkern/c++/OSSymbol.cpp optional libkerncpp +libkern/c++/OSUnserialize.cpp optional libkerncpp +libkern/c++/OSUnserializeXML.cpp optional libkerncpp + diff --git a/libkern/conf/files.i386 b/libkern/conf/files.i386 new file mode 100644 index 000000000..70f37ed51 --- /dev/null +++ b/libkern/conf/files.i386 @@ -0,0 +1 @@ +libkern/i386/OSAtomic.s standard diff --git a/libkern/conf/files.ppc b/libkern/conf/files.ppc new file mode 100644 index 000000000..b1cd7ebc2 --- /dev/null +++ b/libkern/conf/files.ppc @@ -0,0 +1,2 @@ +libkern/ppc/OSAtomic.s standard + diff --git a/libkern/conf/tools/Makefile b/libkern/conf/tools/Makefile new file mode 100644 index 000000000..9df86ce8c --- /dev/null +++ b/libkern/conf/tools/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + doconf \ + newvers + +COMP_SUBDIRS = \ + doconf \ + newvers + +INST_SUBDIRS = \ + + +setup_build_all: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/conf/tools/doconf/Makefile b/libkern/conf/tools/doconf/Makefile new file mode 100644 index 000000000..2bf0b7a10 --- /dev/null +++ b/libkern/conf/tools/doconf/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)doconf + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/conf/tools/doconf/doconf.csh b/libkern/conf/tools/doconf/doconf.csh new file mode 100755 index 000000000..43388c11c --- /dev/null +++ b/libkern/conf/tools/doconf/doconf.csh @@ -0,0 +1,313 @@ +#!/bin/csh -f +set path = ($path .) +###################################################################### +# HISTORY +# 1-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University +# Added "-verbose" switch, so this script produces no output +# in the normal case. +# +# 10-Oct-87 Mike Accetta (mja) at Carnegie-Mellon University +# Flushed cmu_*.h and spin_locks.h +# [ V5.1(XF18) ] +# +# 6-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# Use MASTER.local and MASTER..local for generation of +# configuration files in addition to MASTER and MASTER.. +# +# 25-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Removed use of obsolete wb_*.h files when building the feature +# list; modified to save the previous configuration file and +# display the differences between it and the new file. +# [ V5.1(F8) ] +# +# 25-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# If there is no /etc/machine just print out a message telling +# user to use the -cpu option. I thought this script was supposed +# to work even without a /etc/machine, but it doesn't... and this +# is the easiest way out. +# +# 13-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Added "romp_fpa.h" file to extra features for the RT. +# [ V5.1(F7) ] +# +# 11-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to maintain the appropriate configuration features file +# in the "machine" directory whenever the corresponding +# configuration is generated. This replaces the old mechanism of +# storing this directly in the file since it was +# machine dependent and also precluded building programs for more +# than one configuration from the same set of sources. +# [ V5.1(F6) ] +# +# 21-Feb-87 Mike Accetta (mja) at Carnegie-Mellon University +# Fixed to require wired-in cpu type names for only those +# machines where the kernel name differs from that provided by +# /etc/machine (i.e. IBMRT => ca and SUN => sun3); updated to +# permit configuration descriptions in both machine indepedent +# and dependent master configuration files so that attributes can +# be grouped accordingly. +# [ V5.1(F3) ] +# +# 17-Jan-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to work from any directory at the same level as +# "conf"; generate configuration from both MASTER and +# MASTER. files; added -cpu switch. +# [ V5.1(F1) ] +# +# 18-Aug-86 Mike Accetta (mja) at Carnegie-Mellon University +# Added -make switch and changed meaning of -config; upgraded to +# allow multiple attributes per configuration and to define +# configurations in terms of these attributes within MASTER. +# +# 14-Apr-83 Mike Accetta (mja) at Carnegie-Mellon University +# Added -config switch to only run /etc/config without +# "make depend" and "make". +# +###################################################################### + +set prog=$0 +set prog=$prog:t +set nonomatch +set OBJDIR=../BUILD +if ("`/usr/bin/uname`" == "Rhapsody" ) then +set CONFIG_DIR=/usr/local/bin +else +set CONFIG_DIR=/usr/bin +endif + +unset domake +unset doconfig +unset beverbose +unset MACHINE +unset profile + +while ($#argv >= 1) + if ("$argv[1]" =~ -*) then + switch ("$argv[1]") + case "-c": + case "-config": + set doconfig + breaksw + case "-m": + case "-make": + set domake + breaksw + case "-cpu": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set MACHINE="$argv[2]" + shift + breaksw + case "-d": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set OBJDIR="$argv[2]" + shift + breaksw + case "-verbose": + set beverbose + breaksw + case "-p": + case "-profile": + set profile + breaksw + default: + echo "${prog}: ${argv[1]}: unknown switch" + exit 1 + breaksw + endsw + shift + else + break + endif +end + +if ($#argv == 0) set argv=(GENERIC) + +if (! $?MACHINE) then + if (-d /NextApps) then + set MACHINE=`hostinfo | awk '/MC680x0/ { printf("m68k") } /MC880x0/ { printf("m88k") }'` + endif +endif + +if (! $?MACHINE) then + if (-f /etc/machine) then + set MACHINE="`/etc/machine`" + else + echo "${prog}: no /etc/machine, specify machine type with -cpu" + echo "${prog}: e.g. ${prog} -cpu VAX CONFIGURATION" + exit 1 + endif +endif + +set FEATURES_EXTRA= + +switch ("$MACHINE") + case IBMRT: + set cpu=ca + set ID=RT + set FEATURES_EXTRA="romp_dualcall.h romp_fpa.h" + breaksw + case SUN: + set cpu=sun3 + set ID=SUN3 + breaksw + default: + set cpu=`echo $MACHINE | tr A-Z a-z` + set ID=`echo $MACHINE | tr a-z A-Z` + breaksw +endsw +set FEATURES=../h/features.h +set FEATURES_H=(cs_*.h mach_*.h net_*.h\ + cputypes.h cpus.h vice.h\ + $FEATURES_EXTRA) +set MASTER_DIR=../conf +set MASTER = ${MASTER_DIR}/MASTER +set MASTER_CPU=${MASTER}.${cpu} + +set MASTER_LOCAL = ${MASTER}.local +set MASTER_CPU_LOCAL = ${MASTER_CPU}.local +if (! -f $MASTER_LOCAL) set MASTER_LOCAL = "" +if (! -f $MASTER_CPU_LOCAL) set MASTER_CPU_LOCAL = "" + +if (! -d $OBJDIR) then + echo "[ creating $OBJDIR ]" + mkdir -p $OBJDIR +endif + +foreach SYS ($argv) + set SYSID=${SYS}_${ID} + set SYSCONF=$OBJDIR/config.$SYSID + set BLDDIR=$OBJDIR + if ($?beverbose) then + echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" + endif + echo +$SYS \ + | \ + cat $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL - \ + $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL \ + | \ + sed -n \ + -e "/^+/{" \ + -e "s;[-+];#&;gp" \ + -e 't loop' \ + -e ': loop' \ + -e 'n' \ + -e '/^#/b loop' \ + -e '/^$/b loop' \ + -e 's;^\([^#]*\).*#[ ]*<\(.*\)>[ ]*$;\2#\1;' \ + -e 't not' \ + -e 's;\([^#]*\).*;#\1;' \ + -e 't not' \ + -e ': not' \ + -e 's;[ ]*$;;' \ + -e 's;^\!\(.*\);\1#\!;' \ + -e 'p' \ + -e 't loop' \ + -e 'b loop' \ + -e '}' \ + -e "/^[^#]/d" \ + -e 's; ; ;g' \ + -e "s;^# *\([^ ]*\)[ ]*=[ ]*\[\(.*\)\].*;\1#\2;p" \ + | \ + awk '-F#' '\ +part == 0 && $1 != "" {\ + m[$1]=m[$1] " " $2;\ + next;\ +}\ +part == 0 && $1 == "" {\ + for (i=NF;i>1;i--){\ + s=substr($i,2);\ + c[++na]=substr($i,1,1);\ + a[na]=s;\ + }\ + while (na > 0){\ + s=a[na];\ + d=c[na--];\ + if (m[s] == "") {\ + f[s]=d;\ + } else {\ + nx=split(m[s],x," ");\ + for (j=nx;j>0;j--) {\ + z=x[j];\ + a[++na]=z;\ + c[na]=d;\ + }\ + }\ + }\ + part=1;\ + next;\ +}\ +part != 0 {\ + if ($1 != "") {\ + n=split($1,x,",");\ + ok=0;\ + for (i=1;i<=n;i++) {\ + if (f[x[i]] == "+") {\ + ok=1;\ + }\ + }\ + if (NF > 2 && ok == 0 || NF <= 2 && ok != 0) {\ + print $2; \ + }\ + } else { \ + print $2; \ + }\ +}\ +' >$SYSCONF.new + if (-z $SYSCONF.new) then + echo "${prog}: ${$SYSID}: no such configuration in $MASTER_DIR/MASTER{,.$cpu}" + rm -f $SYSCONF.new + endif + if (! -d $BLDDIR) then + echo "[ creating $BLDDIR ]" + mkdir -p $BLDDIR + endif +# +# These paths are used by config. +# +# "builddir" is the name of the directory where kernel binaries +# are put. It is a single path element, never absolute, and is +# always relative to "objectdir". "builddir" is used by config +# solely to determine where to put files created by "config" (e.g. +# the created Makefile and *.h's.) +# +# "objectdir" is the name of the directory which will hold "builddir". +# It is a path; if relative, it is relative to the current directory +# where config is run. It's sole use is to be prepended to "builddir" +# to indicate where config-created files are to be placed (see above). +# +# "sourcedir" is the location of the sources used to build the kernel. +# It is a path; if relative, it is relative to the directory specified +# by the concatenation of "objectdir" and "builddir" (i.e. where the +# kernel binaries are put). +# + echo 'builddir "."' >> $SYSCONF.new + set OBJRELDIR=`relpath $OBJROOT $OBJDIR` + echo 'objectdir "'$OBJROOT'/'$OBJRELDIR'"' >> $SYSCONF.new + set SRCDIR=`dirname $SOURCE` + echo 'sourcedir "'$SRCROOT'"' >> $SYSCONF.new + if (-f $SYSCONF) then + diff $SYSCONF $SYSCONF.new + rm -f $SYSCONF.old + mv $SYSCONF $SYSCONF.old + endif + rm -f $SYSCONF + mv $SYSCONF.new $SYSCONF + if ($?doconfig) then + echo "[ configuring $SYSID ]" + if ($?profile) then + $CONFIG_DIR/config -c $MASTER_DIR -p $SYSCONF + else + $CONFIG_DIR/config -c $MASTER_DIR $SYSCONF + endif + endif + if ($?domake) then + echo "[ making $SYSID ]" + (cd $BLDDIR; make) + endif +end diff --git a/libkern/conf/tools/newvers/Makefile b/libkern/conf/tools/newvers/Makefile new file mode 100644 index 000000000..73603c753 --- /dev/null +++ b/libkern/conf/tools/newvers/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)newvers + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/conf/tools/newvers/newvers.csh b/libkern/conf/tools/newvers/newvers.csh new file mode 100644 index 000000000..b462d3387 --- /dev/null +++ b/libkern/conf/tools/newvers/newvers.csh @@ -0,0 +1,34 @@ +#!/bin/sh - +# +# Mach Operating System +# Copyright (c) 1990 Carnegie-Mellon University +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# newvers.sh copyright major minor variant +# + +major="$1"; minor="$2"; variant="$3" +v="${major}.${minor}" d=`pwd` h="rcbuilder" t=`date` w=`whoami` +if [ -z "$d" -o -z "$h" -o -z "$t" ]; then + exit 1 +fi +CONFIG=`expr "$d" : '.*/\([^/]*\)$'` +d=`expr "$d" : '.*/\([^/]*/[^/]*/[^/]*\)$'` +( + /bin/echo "int ${COMPONENT}_version_major = ${major};" ; + /bin/echo "int ${COMPONENT}_version_minor = ${minor};" ; + /bin/echo "char ${COMPONENT}_version_variant[] = \"${variant}\";" ; + /bin/echo "char ${COMPONENT}_version[] = \"Common Services Component Version ${v}:\\n${t}; $w($h):$d\\n\";" ; + /bin/echo "char ${COMPONENT}_osrelease[] = \"${major}.${minor}\";" ; + /bin/echo "char ${COMPONENT}_ostype[] = \"Common Services\";" ; + /bin/echo "char ${COMPONENT}_builder[] = \"$w\";" ; +) > vers.c +if [ -s vers.suffix -o ! -f vers.suffix ]; then + rm -f vers.suffix + echo ".${variant}.${CONFIG}" > vers.suffix +fi +exit 0 diff --git a/libkern/conf/version.major b/libkern/conf/version.major new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/libkern/conf/version.major @@ -0,0 +1 @@ +1 diff --git a/libkern/conf/version.minor b/libkern/conf/version.minor new file mode 100644 index 000000000..573541ac9 --- /dev/null +++ b/libkern/conf/version.minor @@ -0,0 +1 @@ +0 diff --git a/libkern/conf/version.variant b/libkern/conf/version.variant new file mode 100644 index 000000000..e69de29bb diff --git a/libkern/gen/OSAtomicOperations.c b/libkern/gen/OSAtomicOperations.c new file mode 100644 index 000000000..735fa2ac3 --- /dev/null +++ b/libkern/gen/OSAtomicOperations.c @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +enum { + false = 0, + true = 1 +}; +#define NULL 0L + + +/* + * atomic operations + * these are _the_ atomic operations, currently cast atop CompareAndSwap, + * which is implemented in assembler. if we are worried about the cost of + * this layering (we shouldn't be), then all this stuff could be + * implemented in assembler, as it is in MacOS8/9 + * (derived from SuperMario/NativeLibs/IO/DriverServices/Synchronization.s, + * which I wrote for NuKernel in a previous life with a different last name...) + * + * native Boolean CompareAndSwap(UInt32 oldValue, UInt32 newValue, UInt32 * oldValuePtr); + */ + +#ifndef __ppc__ + +SInt32 OSAddAtomic(SInt32 amount, SInt32 * value) +{ + SInt32 oldValue; + SInt32 newValue; + + do { + oldValue = *value; + newValue = oldValue + amount; + } while (! OSCompareAndSwap((UInt32) oldValue, (UInt32) newValue, (UInt32 *) value)); + + return oldValue; +} + +SInt32 OSIncrementAtomic(SInt32 * value) +{ + return OSAddAtomic(1, value); +} + +SInt32 OSDecrementAtomic(SInt32 * value) +{ + return OSAddAtomic(-1, value); +} + +#endif /* !__ppc__ */ + +static UInt32 OSBitwiseAtomic(UInt32 and_mask, UInt32 or_mask, UInt32 xor_mask, UInt32 * value) +{ + UInt32 oldValue; + UInt32 newValue; + + do { + oldValue = *value; + newValue = ((oldValue & and_mask) | or_mask) ^ xor_mask; + } while (! OSCompareAndSwap(oldValue, newValue, value)); + + return oldValue; +} + +UInt32 OSBitAndAtomic(UInt32 mask, UInt32 * value) +{ + return OSBitwiseAtomic(mask, 0, 0, value); +} + +UInt32 OSBitOrAtomic(UInt32 mask, UInt32 * value) +{ + return OSBitwiseAtomic((UInt32) -1, mask, 0, value); +} + +UInt32 OSBitXorAtomic(UInt32 mask, UInt32 * value) +{ + return OSBitwiseAtomic((UInt32) -1, 0, mask, value); +} + + +static Boolean OSCompareAndSwap8(UInt8 oldValue8, UInt8 newValue8, UInt8 * value8) +{ + UInt32 mask = 0x000000ff; + UInt32 newbits = (UInt32) newValue8; + int shift; + UInt32 alignment = ((UInt32) value8) & (sizeof(UInt32) - 1); + UInt32 oldValue; + UInt32 newValue; + UInt32 * value; + + switch (alignment) { + default: + // assert(false); + case 0: + value = (UInt32 *) value8; + shift = 24; + break; + case 1: + value = (UInt32 *) (value8 + 1); + shift = 16; + break; + case 2: + value = (UInt32 *) (value8 + 2); + shift = 8; + break; + case 3: + value = (UInt32 *) (value8 + 3); + shift = 0; + break; + } + + mask <<= shift; + newbits <<= shift; + + oldValue = *value; + newValue = (oldValue & ~mask) | (newbits & mask); + + return OSCompareAndSwap(oldValue, newValue, value); +} + +static Boolean OSTestAndSetClear(UInt32 bit, Boolean wantSet, UInt8 * startAddress) +{ + UInt8 mask = 1; + UInt8 oldValue; + UInt8 wantValue; + + startAddress += (bit / 8); + mask <<= (7 - (bit % 8)); + wantValue = wantSet ? mask : 0; + + do { + oldValue = *startAddress; + if ((oldValue & mask) == wantValue) { + break; + } + } while (! OSCompareAndSwap8(oldValue, (oldValue & ~mask) | wantValue, startAddress)); + + return (oldValue & mask) == wantValue; +} + +Boolean OSTestAndSet(UInt32 bit, UInt8 * startAddress) +{ + return OSTestAndSetClear(bit, true, startAddress); +} + +Boolean OSTestAndClear(UInt32 bit, UInt8 * startAddress) +{ + return OSTestAndSetClear(bit, false, startAddress); +} + +void * OSDequeueAtomic(void ** inList, SInt32 inOffset) +{ + void * oldListHead; + void * newListHead; + + do { + oldListHead = *inList; + if (oldListHead == NULL) { + break; + } + + newListHead = *(void **) (((char *) oldListHead) + inOffset); + } while (! OSCompareAndSwap((UInt32)oldListHead, + (UInt32)newListHead, (UInt32 *)inList)); + + return oldListHead; +} + +void OSEnqueueAtomic(void ** inList, void * inNewLink, SInt32 inOffset) +{ + void * oldListHead; + void * newListHead = inNewLink; + void ** newLinkNextPtr = (void **) (((char *) inNewLink) + inOffset); + + do { + oldListHead = *inList; + *newLinkNextPtr = oldListHead; + } while (! OSCompareAndSwap((UInt32)oldListHead, (UInt32)newListHead, + (UInt32 *)inList)); +} + +/* + * silly unaligned versions + */ + +SInt8 OSIncrementAtomic8(SInt8 * value) +{ + return OSAddAtomic8(1, value); +} + +SInt8 OSDecrementAtomic8(SInt8 * value) +{ + return OSAddAtomic8(-1, value); +} + +SInt8 OSAddAtomic8(SInt32 amount, SInt8 * value) +{ + SInt8 oldValue; + SInt8 newValue; + + do { + oldValue = *value; + newValue = oldValue + amount; + } while (! OSCompareAndSwap8((UInt8) oldValue, (UInt8) newValue, (UInt8 *) value)); + + return oldValue; +} + +static UInt8 OSBitwiseAtomic8(UInt32 and_mask, UInt32 or_mask, UInt32 xor_mask, UInt8 * value) +{ + UInt8 oldValue; + UInt8 newValue; + + do { + oldValue = *value; + newValue = ((oldValue & and_mask) | or_mask) ^ xor_mask; + } while (! OSCompareAndSwap8(oldValue, newValue, value)); + + return oldValue; +} + +UInt8 OSBitAndAtomic8(UInt32 mask, UInt8 * value) +{ + return OSBitwiseAtomic8(mask, 0, 0, value); +} + +UInt8 OSBitOrAtomic8(UInt32 mask, UInt8 * value) +{ + return OSBitwiseAtomic8((UInt32) -1, mask, 0, value); +} + +UInt8 OSBitXorAtomic8(UInt32 mask, UInt8 * value) +{ + return OSBitwiseAtomic8((UInt32) -1, 0, mask, value); +} + + +static Boolean OSCompareAndSwap16(UInt16 oldValue16, UInt16 newValue16, UInt16 * value16) +{ + UInt32 mask = 0x0000ffff; + UInt32 newbits = (UInt32) newValue16; + int shift; + UInt32 alignment = ((UInt32) value16) & (sizeof(UInt32) - 1); + UInt32 oldValue; + UInt32 newValue; + UInt32 * value; + + if (alignment == 2) { + value = (UInt32 *) (value16 - 1); + shift = 0; + } + else { + // assert(alignment == 0); + value = (UInt32 *) value16; + shift = 16; + } + + mask <<= shift; + newbits <<= shift; + + oldValue = *value; + newValue = (oldValue & ~mask) | (newbits & mask); + + return OSCompareAndSwap(oldValue, newValue, value); +} + +SInt16 OSIncrementAtomic16(SInt16 * value) +{ + return OSAddAtomic16(1, value); +} + +SInt16 OSDecrementAtomic16(SInt16 * value) +{ + return OSAddAtomic16(-1, value); +} + +SInt16 OSAddAtomic16(SInt32 amount, SInt16 * value) +{ + SInt16 oldValue; + SInt16 newValue; + + do { + oldValue = *value; + newValue = oldValue + amount; + } while (! OSCompareAndSwap16((UInt16) oldValue, (UInt16) newValue, (UInt16 *) value)); + + return oldValue; +} + +static UInt16 OSBitwiseAtomic16(UInt32 and_mask, UInt32 or_mask, UInt32 xor_mask, UInt16 * value) +{ + UInt16 oldValue; + UInt16 newValue; + + do { + oldValue = *value; + newValue = ((oldValue & and_mask) | or_mask) ^ xor_mask; + } while (! OSCompareAndSwap16(oldValue, newValue, value)); + + return oldValue; +} + +UInt16 OSBitAndAtomic16(UInt32 mask, UInt16 * value) +{ + return OSBitwiseAtomic16(mask, 0, 0, value); +} + +UInt16 OSBitOrAtomic16(UInt32 mask, UInt16 * value) +{ + return OSBitwiseAtomic16((UInt32) -1, mask, 0, value); +} + +UInt16 OSBitXorAtomic16(UInt32 mask, UInt16 * value) +{ + return OSBitwiseAtomic16((UInt32) -1, 0, mask, value); +} + diff --git a/libkern/i386/OSAtomic.c b/libkern/i386/OSAtomic.c new file mode 100644 index 000000000..c5904c3ea --- /dev/null +++ b/libkern/i386/OSAtomic.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + /* + * Copyright (c) 1999 Apple Computer, Inc. + * + * + * HISTORY + * + * wgulland 23 Jul 99 - Bogus implementation! + */ +#include + +#warning Bogus OSCompareAndSwap() for i386 +int OSCompareAndSwap( UInt32 oldVal, + UInt32 newVal, UInt32 * addr ) +{ + /* + * This should be + * EAX = oldVal + * Lock:CMPXCHG addr, newVal + * But I don't know how to write that. + */ + int ok; + if( (ok = (oldVal == *addr))) + *addr = newVal; + return( ok ); +} + diff --git a/libkern/i386/OSAtomic.s b/libkern/i386/OSAtomic.s new file mode 100644 index 000000000..e7f13bf55 --- /dev/null +++ b/libkern/i386/OSAtomic.s @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#;************************************************************************** +#;* Boolean OSCompareAndSwap(SInt32 oldValue, SInt32 newValue, SInt32 * ptr) * +#;************************************************************************** + + .globl _OSCompareAndSwap + +_OSCompareAndSwap: + #; this is _lame_, the project will not currently accept asm code that + #; requires anything beyond a 386, but that chip: + #; - does not support MP + #; - does not support the cmpxchgl instruction + #; - does not support the lock meta-instruction + #; so what is a poor guy to do? comment it out... + pushl %edi + pushl %esi + movl 0+8+4(%esp),%eax #; oldValue + movl 4+8+4(%esp),%edi #; newValue + movl 8+8+4(%esp),%esi #; ptr + lock + cmpxchgl %edi,0(%esi) #; CAS (eax is an implicit operand) + sete %al #; did CAS succeed? (TZ=1) + andl $0x000000ff,%eax #; clear out the high bytes (has to be an easier way...) + popl %esi + popl %edi + ret + diff --git a/libkern/kmod/Makefile b/libkern/kmod/Makefile new file mode 100644 index 000000000..3bad6a04f --- /dev/null +++ b/libkern/kmod/Makefile @@ -0,0 +1,37 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMPOBJROOT = $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +do_all: + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + ($(MKDIR) $(COMPOBJROOT)/kmod; \ + cd $(COMPOBJROOT)/kmod; \ + ${MAKE} MAKEFILES=$(SOURCE)/Makefile.kmod \ + TARGET=$(TARGET) \ + do_build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_all: do_all + +do_install: + @echo "[ $(SOURCE) ] Starting do_install $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + ($(MKDIR) $(COMPOBJROOT)/kmod; \ + cd $(COMPOBJROOT)/kmod; \ + ${MAKE} MAKEFILES=$(SOURCE)/Makefile.kmod \ + TARGET=$(TARGET) \ + do_build_install \ + ); \ + echo "[ $(SOURCE) ] Returning do_install $(COMPONENT) $(MACH_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_install: do_install + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/libkern/kmod/Makefile.kmod b/libkern/kmod/Makefile.kmod new file mode 100644 index 000000000..c49e1fe69 --- /dev/null +++ b/libkern/kmod/Makefile.kmod @@ -0,0 +1,63 @@ +# +# Kernel Module Library code makefile +# + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTALL_DIR = $(DSTROOT)/usr/lib +KMOD_NAME = libkmod +KMODCPP_NAME = libkmodc++ +LIB_INSTALL_FLAGS = -c -m 444 -S "-S" + +CFLAGS += -Wall -Wno-four-char-constants +CFLAGS_PPC += -mlong-branch + +COMPOBJROOT = $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/kmod +INSTOBJROOT = $(OBJROOT)/$(INSTALL_TYPE)_$(ARCH_CONFIG)/$(COMPONENT)/kmod + +KMOD_CFILES = c_start.c c_stop.c +KMODCPP_CFILES = cplus_start.c cplus_stop.c + +KMOD_OFILES = $(KMOD_CFILES:.c=.o) +KMODCPP_OFILES = $(KMODCPP_CFILES:.c=.o) + +ALL_OFILES = $(KMOD_OFILES) $(KMODCPP_OFILES) + +$(ALL_OFILES): %.o : %.c + ${KCC} -c ${CFLAGS} ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} -o $(COMPOBJROOT)/$(*F).o $< + +$(COMPOBJROOT)/$(KMOD_NAME).a: $(KMOD_OFILES) + libtool -static -o $@ $^ + +$(COMPOBJROOT)/$(KMODCPP_NAME).a: $(KMODCPP_OFILES) + libtool -static -o $@ $^ + +do_build_all: $(COMPOBJROOT)/$(KMOD_NAME).a $(COMPOBJROOT)/$(KMODCPP_NAME).a + +$(INSTALL_DIR)/%.a: $(INSTOBJROOT)/%.a + @allarchs=""; \ + for onearch in $(INSTALL_ARCHS); do \ + archdir=$(OBJROOT)/$(KERNEL_CONFIG)_$$onearch/$(COMPONENT); \ + if [ -e $$archdir/kmod/$(*F).a ]; then \ + allarchs="$$allarchs $$archdir/kmod/$(*F).a"; \ + fi; \ + done; \ + $(RM) $@ || true; \ + ${MKDIR} $(INSTALL_DIR) $(SYMROOT); \ + cmd="lipo $$allarchs -create -output $(SYMROOT)/$(*F).a"; \ + echo $$cmd; eval $$cmd; \ + cmd="install $(LIB_INSTALL_FLAGS) $(SYMROOT)/$(*F).a $@"; \ + echo $$cmd; eval $$cmd + + +do_build_install: $(INSTALL_DIR)/$(KMOD_NAME).a $(INSTALL_DIR)/$(KMODCPP_NAME).a + +# include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/libkern/kmod/README b/libkern/kmod/README new file mode 100644 index 000000000..f42e502fd --- /dev/null +++ b/libkern/kmod/README @@ -0,0 +1,28 @@ +# +# Subtle combination of files and libraries make up the C++ runtime system for +# kernel modules. We are dependant on the KernelModule kmod.make and +# CreateKModInfo.perl scripts to be exactly instep with both this library +# module and the libkmod module as well. +# +# If you do any maintenance on any of the following files make sure great +# care is taken to keep them in Sync. +# extenTools/KernelModule.bproj/kmod.make +# extenTools/KernelModule.bproj/CreateKModInfo.perl +# IOKitUser/kmodc++/pure.c +# IOKitUser/kmodc++/cplus_start.c +# IOKitUser/kmodc++/cplus_start.c +# IOKitUser/kmodc/c_start.c +# IOKitUser/kmodc/c_stop.c +# +# The trick is that the linkline links all of the developers modules. +# If any static constructors are used .constructors_used will be left as +# an undefined symbol. This symbol is exported by the cplus_start.c routine +# which automatically brings in the appropriate C++ _start routine. However +# the actual _start symbol is only required by the kmod_info structure that +# is created and initialized by the CreateKModInfo.perl script. If no C++ +# was used the _start will be an undefined symbol that is finally satisfied +# by the c_start module in the kmod library. +# +# The linkline must look like this. +# *.o -lkmodc++ kmod_info.o -lkmod +# diff --git a/libkern/kmod/c_start.c b/libkern/kmod/c_start.c new file mode 100644 index 000000000..ad33f01f4 --- /dev/null +++ b/libkern/kmod/c_start.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + + If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + KernelModule.bproj/kmod.make + KernelModule.bproj/CreateKModInfo.perl + KernelModule.bproj/kmodc++/pure.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc/c_start.c + KernelModule.bproj/kmodc/c_stop.c + + The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + + The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod + */ +#include + +// These global symbols will be defined by CreateInfo script's info.c file. +extern kmod_start_func_t *_realmain; + +__private_extern__ kern_return_t _start(kmod_info_t *ki, void *data) +{ + if (_realmain) + return (*_realmain)(ki, data); + else + return KERN_SUCCESS; +} diff --git a/libkern/kmod/c_stop.c b/libkern/kmod/c_stop.c new file mode 100644 index 000000000..3de526ad5 --- /dev/null +++ b/libkern/kmod/c_stop.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + + If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + KernelModule.bproj/kmod.make + KernelModule.bproj/CreateKModInfo.perl + KernelModule.bproj/kmodc++/pure.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc/c_start.c + KernelModule.bproj/kmodc/c_stop.c + + The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + + The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod + */ +#include + +// These global symbols will be defined by CreateInfo script's info.c file. +extern kmod_stop_func_t *_antimain; + +__private_extern__ kern_return_t _stop(kmod_info_t *ki, void *data) +{ + if (_antimain) + return (*_antimain)(ki, data); + else + return KERN_SUCCESS; +} diff --git a/libkern/kmod/cplus_start.c b/libkern/kmod/cplus_start.c new file mode 100644 index 000000000..81bc3adc0 --- /dev/null +++ b/libkern/kmod/cplus_start.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + + If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + KernelModule.bproj/kmod.make + KernelModule.bproj/CreateKModInfo.perl + KernelModule.bproj/kmodc++/pure.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc/c_start.c + KernelModule.bproj/kmodc/c_stop.c + + The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + + The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod + */ +#include + +asm(".constructors_used = 0"); +asm(".private_extern .constructors_used"); + +// This global symbols will be defined by CreateInfo script's info.c file. +extern kmod_start_func_t *_realmain; + +// Functions defined in libkern/c++/OSRuntime.cpp +extern kern_return_t OSRuntimeInitializeCPP(kmod_info_t *ki, void *data); + +__private_extern__ kern_return_t _start(kmod_info_t *ki, void *data) +{ + kern_return_t res = OSRuntimeInitializeCPP(ki, data); + + if (!res && _realmain) + res = (*_realmain)(ki, data); + + return res; +} diff --git a/libkern/kmod/cplus_stop.c b/libkern/kmod/cplus_stop.c new file mode 100644 index 000000000..8151fb43f --- /dev/null +++ b/libkern/kmod/cplus_stop.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + + If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + KernelModule.bproj/kmod.make + KernelModule.bproj/CreateKModInfo.perl + KernelModule.bproj/kmodc++/pure.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc++/cplus_start.c + KernelModule.bproj/kmodc/c_start.c + KernelModule.bproj/kmodc/c_stop.c + + The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + + The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod + */ +#include + +asm(".destructors_used = 0"); +asm(".private_extern .destructors_used"); + +// Functions defined in libkern/c++/OSRuntime.cpp +extern kern_return_t OSRuntimeFinalizeCPP(kmod_info_t *ki, void *data); + +// This global symbols will be defined by CreateInfo script's info.c file. +extern kmod_stop_func_t *_antimain; + +__private_extern__ kern_return_t _stop(kmod_info_t *ki, void *data) +{ + kern_return_t res = OSRuntimeFinalizeCPP(ki, data); + + if (!res && _antimain) + res = (*_antimain)(ki, data); + + return res; +} diff --git a/libkern/libkern/Makefile b/libkern/libkern/Makefile new file mode 100644 index 000000000..f79a7813c --- /dev/null +++ b/libkern/libkern/Makefile @@ -0,0 +1,43 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + machine \ + c++ + +INSTINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS} \ + ppc + +INSTINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS} \ + i386 + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + OSAtomic.h \ + OSBase.h \ + OSByteOrder.h \ + OSReturn.h \ + OSTypes.h + +INSTALL_MI_LIST = OSReturn.h OSTypes.h + +INSTALL_MI_DIR = libkern + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = libkern + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/libkern/OSAtomic.h b/libkern/libkern/OSAtomic.h new file mode 100644 index 000000000..c86f099c6 --- /dev/null +++ b/libkern/libkern/OSAtomic.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _OS_OSATOMIC_H +#define _OS_OSATOMIC_H + +#include + +#if defined(__cplusplus) +extern "C" { +#endif + +/*! @function OSCompareAndSwap + @abstract Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false. + @param oldValue The value to compare at address. + @param newValue The value to write to address if oldValue compares true. + @param address The 4-byte aligned address of the data to update atomically. + @result true if newValue was written to the address. */ + +extern Boolean OSCompareAndSwap( UInt32 oldValue, UInt32 newValue, UInt32 * address ); + +/*! @function OSAddAtomic + @abstract 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSAddAtomic function adds the specified amount to the value at the specified address and returns the result. + @param amount The amount to add. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the addition. */ + +extern SInt32 OSAddAtomic(SInt32 amount, SInt32 * address); + +/*! @function OSAddAtomic16 + @abstract 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the result. + @param amount The amount to add. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the addition. */ + +extern SInt16 OSAddAtomic16(SInt32 amount, SInt16 * address); + +/*! @function OSAddAtomic8 + @abstract 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the result. + @param amount The amount to add. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the addition. */ + +extern SInt8 OSAddAtomic8(SInt32 amount, SInt8 * address); + +/*! @function OSIncrementAtomic + @abstract 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSIncrementAtomic function increments the value at the specified address by one and returns the value as it was before the change. + @param address The 4-byte aligned address of the value to update atomically. + @result The value before the increment. */ + +extern SInt32 OSIncrementAtomic(SInt32 * address); + +/*! @function OSIncrementAtomic16 + @abstract 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSIncrementAtomic16 function increments the value at the specified address by one and returns the value as it was before the change. + @param address The 4-byte aligned address of the value to update atomically. + @result The value before the increment. */ + +extern SInt16 OSIncrementAtomic16(SInt16 * address); + +/*! @function OSIncrementAtomic8 + @abstract 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSIncrementAtomic8 function increments the value at the specified address by one and returns the value as it was before the change. + @param address The 4-byte aligned address of the value to update atomically. + @result The value before the increment. */ + +extern SInt8 OSIncrementAtomic8(SInt8 * address); + +/*! @function OSDecrementAtomic + @abstract 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSDecrementAtomic function decrements the value at the specified address by one and returns the value as it was before the change. + @param address The 4-byte aligned address of the value to update atomically. + @result The value before the decrement. */ + +extern SInt32 OSDecrementAtomic(SInt32 * address); + +/*! @function OSDecrementAtomic16 + @abstract 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the value as it was before the change. + @param address The 4-byte aligned address of the value to update atomically. + @result The value before the decrement. */ + +extern SInt16 OSDecrementAtomic16(SInt16 * address); + +/*! @function OSDecrementAtomic8 + @abstract 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the value as it was before the change. + @param address The 4-byte aligned address of the value to update atomically. + @result The value before the decrement. */ + +extern SInt8 OSDecrementAtomic8(SInt8 * address); + +/*! @function OSBitAndAtomic + @abstract 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically and with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical and. */ + +extern UInt32 OSBitAndAtomic(UInt32 mask, UInt32 * address); + +/*! @function OSBitAndAtomic16 + @abstract 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically and with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical and. */ + +extern UInt16 OSBitAndAtomic16(UInt32 mask, UInt16 * address); + +/*! @function OSBitAndAtomic8 + @abstract 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically and with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical and. */ + +extern UInt8 OSBitAndAtomic8(UInt32 mask, UInt8 * address); + +/*! @function OSBitOrAtomic + @abstract 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically or with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical or. */ + +extern UInt32 OSBitOrAtomic(UInt32 mask, UInt32 * address); + +/*! @function OSBitOrAtomic16 + @abstract 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically or with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical or. */ + +extern UInt16 OSBitOrAtomic16(UInt32 mask, UInt16 * address); + +/*! @function OSBitOrAtomic8 + @abstract 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically or with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical or. */ + +extern UInt8 OSBitOrAtomic8(UInt32 mask, UInt8 * address); + +/*! @function OSBitXorAtomic + @abstract 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically or with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical xor. */ + +extern UInt32 OSBitXorAtomic(UInt32 mask, UInt32 * address); + +/*! @function OSBitXorAtomic16 + @abstract 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically or with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical xor. */ + +extern UInt16 OSBitXorAtomic16(UInt32 mask, UInt16 * address); + +/*! @function OSBitXorAtomic8 + @abstract 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the result. + @param mask The mask to logically or with the value. + @param address The 4-byte aligned address of the value to update atomically. + @result The result of the logical xor. */ + +extern UInt8 OSBitXorAtomic8(UInt32 mask, UInt8 * address); + +/*! @function OSTestAndSet + @abstract Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise. + @param bit The bit number in the range 0 through 7. + @param address The address of the byte to update atomically. + @result true if the bit was already set, false otherwise. */ + +extern Boolean OSTestAndSet(UInt32 bit, UInt8 * startAddress); + +/*! @function OSTestAndClear + @abstract Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise. + @param bit The bit number in the range 0 through 7. + @param address The address of the byte to update atomically. + @result true if the bit was already clear, false otherwise. */ + +extern Boolean OSTestAndClear(UInt32 bit, UInt8 * startAddress); + +/*! @function OSEnqueueAtomic + @abstract Singly linked list head insertion, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSEnqueueAtomic function places an element at the head of a single linked list, which is specified with the address of a head pointer, listHead. The element structure has a next field whose offset is specified. + @param listHead The address of a head pointer for the list . + @param element The list element to insert at the head of the list. + @param elementNextFieldOffset The byte offset into the element where a pointer to the next element in the list is stored. */ + +extern void OSEnqueueAtomic(void ** listHead, void * element, + SInt32 elementNextFieldOffset); + +/*! @function OSDequeueAtomic + @abstract Singly linked list element head removal, performed atomically with respect to all devices that participate in the coherency architecture of the platform. + @discussion The OSDequeueAtomic function removes an element from the head of a single linked list, which is specified with the address of a head pointer, listHead. The element structure has a next field whose offset is specified. + @param listHead The address of a head pointer for the list . + @param elementNextFieldOffset The byte offset into the element where a pointer to the next element in the list is stored. + @result A removed element, or zero if the list is empty. */ + +extern void * OSDequeueAtomic(void ** listHead, + SInt32 elementNextFieldOffset); + +/*! @function OSSynchronizeIO + @abstract The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. + @discussion The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors. */ + +static __inline__ void OSSynchronizeIO(void) +{ +#if defined(__ppc__) + __asm__ ("eieio"); +#endif +} + +#if defined(__cplusplus) +} +#endif + +#endif /* ! _OS_OSATOMIC_H */ + + diff --git a/libkern/libkern/OSBase.h b/libkern/libkern/OSBase.h new file mode 100644 index 000000000..cfc9c01fc --- /dev/null +++ b/libkern/libkern/OSBase.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _OS_OSBASE_H +#define _OS_OSBASE_H + +#include + +#include + +#if !defined(OS_INLINE) + #if defined(__GNUC__) + #define OS_INLINE static __inline__ + #elif defined(__MWERKS__) || defined(__cplusplus) + #define OS_INLINE static inline + #endif +#endif + +#endif /* _OS_OSBASE_H */ + diff --git a/libkern/libkern/OSByteOrder.h b/libkern/libkern/OSByteOrder.h new file mode 100644 index 000000000..24ad759ca --- /dev/null +++ b/libkern/libkern/OSByteOrder.h @@ -0,0 +1,778 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _OS_OSBYTEORDER_H +#define _OS_OSBYTEORDER_H + +#include + +#if defined(__ppc__) +#include +#elif defined(__i386__) +#include +#else +#include +#endif + +enum { + OSUnknownByteOrder, + OSLittleEndian, + OSBigEndian +}; + +OS_INLINE +UInt32 +OSHostByteOrder(void) { + UInt32 x = (OSBigEndian << 24) | OSLittleEndian; + return (UInt32)*((UInt8 *)&x); +} + +/* Macros for swapping constant values in the preprocessing stage. */ +#define OSSwapConstInt16(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8)) + +#define OSSwapConstInt32(x) ((OSSwapConstInt16(x) << 16) | \ + (OSSwapConstInt16((x) >> 16))) + +#define OSSwapConstInt64(x) ((OSSwapConstInt32(x) << 32) | \ + (OSSwapConstInt32((x) >> 32))) + +#if defined(__BIG_ENDIAN__) + +/* Functions for loading big endian to host endianess. */ + +OS_INLINE +UInt +OSReadBigInt( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt *)((UInt8 *)base + offset); +} + +OS_INLINE +UInt16 +OSReadBigInt16( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt16 *)((UInt8 *)base + offset); +} + +OS_INLINE +UInt32 +OSReadBigInt32( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt32 *)((UInt8 *)base + offset); +} + +OS_INLINE +UInt64 +OSReadBigInt64( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt64 *)((UInt8 *)base + offset); +} + +/* Functions for storing host endianess to big endian. */ + +OS_INLINE +void +OSWriteBigInt( + volatile void * base, + UInt offset, + UInt data +) +{ + *(volatile UInt *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteBigInt16( + volatile void * base, + UInt offset, + UInt16 data +) +{ + *(volatile UInt16 *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteBigInt32( + volatile void * base, + UInt offset, + UInt32 data +) +{ + *(volatile UInt32 *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteBigInt64( + volatile void * base, + UInt offset, + UInt64 data +) +{ + *(volatile UInt64 *)((UInt8 *)base + offset) = data; +} + +/* Functions for loading little endian to host endianess. */ + +OS_INLINE +UInt +OSReadLittleInt( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt(base, offset); +} + +OS_INLINE +UInt16 +OSReadLittleInt16( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt16(base, offset); +} + +OS_INLINE +UInt32 +OSReadLittleInt32( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt32(base, offset); +} + +OS_INLINE +UInt64 +OSReadLittleInt64( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt64(base, offset); +} + +/* Functions for storing host endianess to little endian. */ + +OS_INLINE +void +OSWriteLittleInt( + volatile void * base, + UInt offset, + UInt data +) +{ + OSWriteSwapInt(base, offset, data); +} + +OS_INLINE +void +OSWriteLittleInt16( + volatile void * base, + UInt offset, + UInt16 data +) +{ + OSWriteSwapInt16(base, offset, data); +} + +OS_INLINE +void +OSWriteLittleInt32( + volatile void * base, + UInt offset, + UInt32 data +) +{ + OSWriteSwapInt32(base, offset, data); +} + +OS_INLINE +void +OSWriteLittleInt64( + volatile void * base, + UInt offset, + UInt64 data +) +{ + OSWriteSwapInt64(base, offset, data); +} + +/* Host endianess to big endian byte swapping macros for constants. */ + +#define OSSwapHostToBigConstInt16(x) (x) +#define OSSwapHostToBigConstInt32(x) (x) +#define OSSwapHostToBigConstInt64(x) (x) + +/* Generic host endianess to big endian byte swapping functions. */ + +OS_INLINE +UInt +OSSwapHostToBigInt( + UInt data +) +{ + return data; +} + +OS_INLINE +UInt16 +OSSwapHostToBigInt16( + UInt16 data +) +{ + return data; +} + +OS_INLINE +UInt32 +OSSwapHostToBigInt32( + UInt32 data +) +{ + return data; +} + +OS_INLINE +UInt64 +OSSwapHostToBigInt64( + UInt64 data +) +{ + return data; +} + +/* Host endianess to little endian byte swapping macros for constants. */ + +#define OSSwapHostToLittleConstInt16(x) OSSwapConstInt16(x) +#define OSSwapHostToLittleConstInt32(x) OSSwapConstInt32(x) +#define OSSwapHostToLittleConstInt64(x) OSSwapConstInt64(x) + +/* Generic host endianess to little endian byte swapping functions. */ + +OS_INLINE +UInt +OSSwapHostToLittleInt( + UInt data +) +{ + return OSSwapInt(data); +} + +OS_INLINE +UInt16 +OSSwapHostToLittleInt16( + UInt16 data +) +{ + return OSSwapInt16(data); +} + +OS_INLINE +UInt32 +OSSwapHostToLittleInt32( + UInt32 data +) +{ + return OSSwapInt32(data); +} + +OS_INLINE +UInt64 +OSSwapHostToLittleInt64( + UInt64 data +) +{ + return OSSwapInt64(data); +} + +/* Big endian to host endianess byte swapping macros for constants. */ + +#define OSSwapBigToHostConstInt16(x) (x) +#define OSSwapBigToHostConstInt32(x) (x) +#define OSSwapBigToHostConstInt64(x) (x) + +/* Generic big endian to host endianess byte swapping functions. */ + +OS_INLINE +UInt +OSSwapBigToHostInt( + UInt data +) +{ + return data; +} + +OS_INLINE +UInt16 +OSSwapBigToHostInt16( + UInt16 data +) +{ + return data; +} + +OS_INLINE +UInt32 +OSSwapBigToHostInt32( + UInt32 data +) +{ + return data; +} + +OS_INLINE +UInt64 +OSSwapBigToHostInt64( + UInt64 data +) +{ + return data; +} + +/* Little endian to host endianess byte swapping macros for constants. */ + +#define OSSwapLittleToHostConstInt16(x) OSSwapConstInt16(x) +#define OSSwapLittleToHostConstInt32(x) OSSwapConstInt32(x) +#define OSSwapLittleToHostConstInt64(x) OSSwapConstInt64(x) + +/* Generic little endian to host endianess byte swapping functions. */ + +OS_INLINE +UInt +OSSwapLittleToHostInt( + UInt data +) +{ + return OSSwapInt(data); +} + +OS_INLINE +UInt16 +OSSwapLittleToHostInt16( + UInt16 data +) +{ + return OSSwapInt16(data); +} + +OS_INLINE +UInt32 +OSSwapLittleToHostInt32( + UInt32 data +) +{ + return OSSwapInt32(data); +} + +OS_INLINE +UInt64 +OSSwapLittleToHostInt64( + UInt64 data +) +{ + return OSSwapInt64(data); +} + +#elif defined(__LITTLE_ENDIAN__) + +/* Functions for loading big endian to host endianess. */ + +OS_INLINE +UInt +OSReadBigInt( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt(base, offset); +} + +OS_INLINE +UInt16 +OSReadBigInt16( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt16(base, offset); +} + +OS_INLINE +UInt32 +OSReadBigInt32( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt32(base, offset); +} + +OS_INLINE +UInt64 +OSReadBigInt64( + volatile void * base, + UInt offset +) +{ + return OSReadSwapInt64(base, offset); +} + +/* Functions for storing host endianess to big endian. */ + +OS_INLINE +void +OSWriteBigInt( + volatile void * base, + UInt offset, + UInt data +) +{ + OSWriteSwapInt(base, offset, data); +} + +OS_INLINE +void +OSWriteBigInt16( + volatile void * base, + UInt offset, + UInt16 data +) +{ + OSWriteSwapInt16(base, offset, data); +} + +OS_INLINE +void +OSWriteBigInt32( + volatile void * base, + UInt offset, + UInt32 data +) +{ + OSWriteSwapInt32(base, offset, data); +} + +OS_INLINE +void +OSWriteBigInt64( + volatile void * base, + UInt offset, + UInt64 data +) +{ + OSWriteSwapInt64(base, offset, data); +} + +/* Functions for loading little endian to host endianess. */ + +OS_INLINE +UInt +OSReadLittleInt( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt *)((UInt8 *)base + offset); +} + +OS_INLINE +UInt16 +OSReadLittleInt16( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt16 *)((UInt8 *)base + offset); +} + +OS_INLINE +UInt32 +OSReadLittleInt32( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt32 *)((UInt8 *)base + offset); +} + +OS_INLINE +UInt64 +OSReadLittleInt64( + volatile void * base, + UInt offset +) +{ + return *(volatile UInt64 *)((UInt8 *)base + offset); +} + +/* Functions for storing host endianess to little endian. */ + +OS_INLINE +void +OSWriteLittleInt( + volatile void * base, + UInt offset, + UInt data +) +{ + *(volatile UInt *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteLittleInt16( + volatile void * base, + UInt offset, + UInt16 data +) +{ + *(volatile UInt16 *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteLittleInt32( + volatile void * base, + UInt offset, + UInt32 data +) +{ + *(volatile UInt32 *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteLittleInt64( + volatile void * base, + UInt offset, + UInt64 data +) +{ + *(volatile UInt64 *)((UInt8 *)base + offset) = data; +} + +/* Host endianess to big endian byte swapping macros for constants. */ + +#define OSSwapHostToBigConstInt16(x) OSSwapConstInt16(x) +#define OSSwapHostToBigConstInt32(x) OSSwapConstInt32(x) +#define OSSwapHostToBigConstInt64(x) OSSwapConstInt64(x) + +/* Generic host endianess to big endian byte swapping functions. */ + +OS_INLINE +UInt +OSSwapHostToBigInt( + UInt data +) +{ + return OSSwapInt(data); +} + +OS_INLINE +UInt16 +OSSwapHostToBigInt16( + UInt16 data +) +{ + return OSSwapInt16(data); +} + +OS_INLINE +UInt32 +OSSwapHostToBigInt32( + UInt32 data +) +{ + return OSSwapInt32(data); +} + +OS_INLINE +UInt64 +OSSwapHostToBigInt64( + UInt64 data +) +{ + return OSSwapInt64(data); +} + +/* Host endianess to little endian byte swapping macros for constants. */ + +#define OSSwapHostToLittleConstInt16(x) (x) +#define OSSwapHostToLittleConstInt32(x) (x) +#define OSSwapHostToLittleConstInt64(x) (x) + +/* Generic host endianess to little endian byte swapping functions. */ + +OS_INLINE +UInt +OSSwapHostToLittleInt( + UInt data +) +{ + return data; +} + +OS_INLINE +UInt16 +OSSwapHostToLittleInt16( + UInt16 data +) +{ + return data; +} + +OS_INLINE +UInt32 +OSSwapHostToLittleInt32( + UInt32 data +) +{ + return data; +} + +OS_INLINE +UInt64 +OSSwapHostToLittleInt64( + UInt64 data +) +{ + return data; +} + +/* Big endian to host endianess byte swapping macros for constants. */ + +#define OSSwapBigToHostConstInt16(x) OSSwapConstInt16(x) +#define OSSwapBigToHostConstInt32(x) OSSwapConstInt32(x) +#define OSSwapBigToHostConstInt64(x) OSSwapConstInt64(x) + +/* Generic big endian to host endianess byte swapping functions. */ + +OS_INLINE +UInt +OSSwapBigToHostInt( + UInt data +) +{ + return OSSwapInt(data); +} + +OS_INLINE +UInt16 +OSSwapBigToHostInt16( + UInt16 data +) +{ + return OSSwapInt16(data); +} + +OS_INLINE +UInt32 +OSSwapBigToHostInt32( + UInt32 data +) +{ + return OSSwapInt32(data); +} + +OS_INLINE +UInt64 +OSSwapBigToHostInt64( + UInt64 data +) +{ + return OSSwapInt64(data); +} + +/* Little endian to host endianess byte swapping macros for constants. */ + +#define OSSwapLittleToHostConstInt16(x) (x) +#define OSSwapLittleToHostConstInt32(x) (x) +#define OSSwapLittleToHostConstInt64(x) (x) + +/* Generic little endian to host endianess byte swapping functions. */ + +OS_INLINE +UInt +OSSwapLittleToHostInt( + UInt data +) +{ + return data; +} + +OS_INLINE +UInt16 +OSSwapLittleToHostInt16( + UInt16 data +) +{ + return data; +} + +OS_INLINE +UInt32 +OSSwapLittleToHostInt32( + UInt32 data +) +{ + return data; +} + +OS_INLINE +UInt64 +OSSwapLittleToHostInt64( + UInt64 data +) +{ + return data; +} + +#else +#error Unknown endianess. +#endif + +#endif /* ! _OS_OSBYTEORDER_H */ + + diff --git a/libkern/libkern/OSReturn.h b/libkern/libkern/OSReturn.h new file mode 100644 index 000000000..5e925ac03 --- /dev/null +++ b/libkern/libkern/OSReturn.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +/* + * Core IOReturn values. Others may be family defined. + */ + +#ifndef __LIBKERN_OSRETURN_H +#define __LIBKERN_OSRETURN_H + +#include + +__BEGIN_DECLS + +#include + +typedef kern_return_t OSReturn; + +#ifndef sys_libkern +#define sys_libkern err_system(0x37) +#endif /* sys_libkern */ + +#define sub_libkern_common err_sub(0) +#define sub_libkern_metaclass err_sub(1) +#define sub_libkern_reserved err_sub(-1) + +#define libkern_common_err(return) \ + (sys_libkern|sub_libkern_common|(return)) +#define libkern_metaclass_err(return) \ + (sys_libkern|sub_libkern_metaclass|(return)) + +#define kOSReturnSuccess KERN_SUCCESS // OK +#define kOSReturnError libkern_common_err(1) // general error + + +// OSMetaClass subsystem error's +#define kOSMetaClassInternal libkern_metaclass_err(1) // runtime internal error +#define kOSMetaClassHasInstances libkern_metaclass_err(2) // Can't unload outstanding instances +#define kOSMetaClassNoInit libkern_metaclass_err(3) // kmodInitializeLoad wasn't called, runtime internal error +#define kOSMetaClassNoTempData libkern_metaclass_err(4) // Allocation failure internal data +#define kOSMetaClassNoDicts libkern_metaclass_err(5) // Allocation failure for Metaclass internal dictionaries +#define kOSMetaClassNoKModSet libkern_metaclass_err(6) // Allocation failure for internal kmodule set +#define kOSMetaClassNoInsKModSet libkern_metaclass_err(7) // Can't insert the KMod set into the module dictionary +#define kOSMetaClassNoSuper libkern_metaclass_err(8) // Can't associate a class with its super class +#define kOSMetaClassInstNoSuper libkern_metaclass_err(9) // During instance construction can't find a super class +#define kOSMetaClassDuplicateClass libkern_metaclass_err(10) // Found class duplicate during module load + +__END_DECLS + +#endif /* ! __LIBKERN_OSRETURN_H */ diff --git a/libkern/libkern/OSTypes.h b/libkern/libkern/OSTypes.h new file mode 100644 index 000000000..120db5e11 --- /dev/null +++ b/libkern/libkern/OSTypes.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _OS_OSTYPES_H +#define _OS_OSTYPES_H + +typedef unsigned int UInt; +typedef signed int SInt; + +#ifndef __MACTYPES__ /* CF MacTypes.h */ +#ifndef __TYPES__ /* guess... Mac Types.h */ + +typedef unsigned char UInt8; +typedef unsigned short UInt16; +typedef unsigned long UInt32; +typedef unsigned long long UInt64; +#if defined(__BIG_ENDIAN__) +typedef struct UnsignedWide { + UInt32 hi; + UInt32 lo; +} UnsignedWide; +#elif defined(__LITTLE_ENDIAN__) +typedef struct UnsignedWide { + UInt32 lo; + UInt32 hi; +} UnsignedWide; +#else +#error Unknown endianess. +#endif + +typedef signed char SInt8; +typedef signed short SInt16; +typedef signed long SInt32; +typedef signed long long SInt64; +#if defined(__BIG_ENDIAN__) +typedef struct wide { + SInt32 hi; + UInt32 lo; +} wide; +#elif defined(__LITTLE_ENDIAN__) +typedef struct wide { + UInt32 lo; + SInt32 hi; +} wide; +#else +#error Unknown endianess. +#endif + +typedef SInt32 OSStatus; +typedef UnsignedWide AbsoluteTime; +typedef UInt32 OptionBits; + +typedef unsigned char Boolean; + +#endif /* __TYPES__ */ +#endif /* __MACTYPES__ */ + + +#endif /* _OS_OSTYPES_H */ + + diff --git a/libkern/libkern/c++/Makefile b/libkern/libkern/c++/Makefile new file mode 100644 index 000000000..3c98e5543 --- /dev/null +++ b/libkern/libkern/c++/Makefile @@ -0,0 +1,53 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + OSArray.h \ + OSBoolean.h \ + OSCollection.h \ + OSCollectionIterator.h \ + OSContainers.h \ + OSCPPDebug.h \ + OSData.h \ + OSDictionary.h \ + OSIterator.h \ + OSLib.h \ + OSMetaClass.h \ + OSNumber.h \ + OSObject.h \ + OSOrderedSet.h \ + OSSerialize.h \ + OSSet.h \ + OSString.h \ + OSSymbol.h \ + OSUnserialize.h + +INSTALL_MD_LIST = + +INSTALL_MD_DIR = libkern/c++ + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = libkern/c++ + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/libkern/c++/OSArray.h b/libkern/libkern/c++/OSArray.h new file mode 100644 index 000000000..03630ed7a --- /dev/null +++ b/libkern/libkern/c++/OSArray.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOArray.h created by rsulack on Thu 11-Sep-1997 */ +/* IOArray.h converted to C++ by gvdl on Fri 1998-10-30 */ + +#ifndef _OS_OSARRAY_H +#define _OS_OSARRAY_H + +#include + +class OSSerialize; + +/*! + @class OSArray + @abstract A collection class whose instances maintain a list of object references. + @discussion + An instance of an OSArray is a mutable collection which maintains a list of references to OSMetaClassBase derived objects. Objects are referenced by index, where the index is an integer with a value of 0 to N-1 where N is the number of objects contained within the array. + + Objects placed into an array are automatically retained and objects removed or replaced are automatically released. All objects are released when the array is freed. +*/ + +class OSArray : public OSCollection +{ + friend class OSSet; + + OSDeclareDefaultStructors(OSArray) + +protected: + const OSMetaClassBase **array; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + /* + * OSCollectionIterator interfaces. + */ + virtual unsigned int iteratorSize() const; + virtual bool initIterator(void *iterator) const; + virtual bool getNextObjectForIterator(void *iterator, OSObject **ret) const; + +public: + /*! + @function withCapacity + @abstract A static constructor function to create and initialize a new instance of OSArray with a given capacity. + @param capacity The initial capacity (number of refernces) of the OSArray instance. + @result Returns a reference to an instance of OSArray or 0 if an error occurred. + */ + static OSArray *withCapacity(unsigned int capacity); + /*! + @function withObjects + @abstract A static constructor function to create and initialize a new instance of OSArray and populates it with a list of objects provided. + @param objects A static array of references to OSMetaClassBase derived objects. + @param count The number of objects provided. + @param capacity The initial storage size of the OSArray instance. If 0, the capacity will be set to the size of count, else the capacity must be greater than or equal to count. + @result Returns a reference to a new instance of OSArray or 0 if an error occurred. + */ + static OSArray *withObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function withArray + @abstract A static constructor function to create and initialize an instance of OSArray of a given capacity and populate it with the contents of the supplied OSArray object. + @param array An instance of OSArray from which the new instance will aquire its contents. + @param capacity The capacity of the new OSArray. If 0, the capacity will be set to the number of elements in the array, else the capacity must be greater than or equal to the number of elements in the array. + @result Returns a reference to an new instance of OSArray or 0 if an error occurred. + */ + static OSArray *withArray(const OSArray *array, + unsigned int capacity = 0); + + /*! + @function initWithCapacity + @abstract A member function which initializes an instance of OSArray. + @param capacity The initial capacity of the new instance of OSArray. + @result Returns a true if initialization succeeded or false if not. + */ + virtual bool initWithCapacity(unsigned int capacity); + /*! + @function initWithObjects + @abstract A member function which initializes an instance of OSArray and populates it with the given list of objects. + @param objects A static array containing references to OSMetaClassBase derived objects. + @param count The number of objects to added to the array. + @param capacity The initial capacity of the new instance of OSArray. If 0, the capacity will be set to the same value as the 'count' parameter, else capacity must be greater than or equal to the value of 'count'. + @result Returns a true if initialization succeeded or false if not. + */ + virtual bool initWithObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function initWithArray + @abstract A member function which initializes an instance of OSArray and populates it with the contents of the supplied OSArray object. + @param anArray An instance of OSArray containing the references to objects which will be copied to the new instances of OSArray. + @param capacity The initial capacity of the new instance of OSArray. If 0, the capacity will be set to the number of elements in the array, else the capacity must be greater than or equal to the number of elements in the array. + @result Returns a true if initialization succeeded or false if not. + */ + virtual bool initWithArray(const OSArray *anArray, + unsigned int theCapacity = 0); + /*! + @function free + @abstract Deallocates and releases all resources used by the OSArray instance. Normally, this is not called directly. + @discussion This function should not be called directly, use release() instead. + */ + virtual void free(); + + /*! + @function getCount + @abstract A member function which returns the number of references contained within the OSArray object. + @result Returns the number of items within the OSArray object. + */ + virtual unsigned int getCount() const; + /*! + @function getCapacity + @abstract A member function which returns the storage capacity of the OSArray object. + @result Returns the storage capacity of the OSArray object. + */ + virtual unsigned int getCapacity() const; + /*! + @function getCapacityIncrement + @abstract A member function which returns the size by which the array will grow. + @result Returns the size by which the array will grow. + */ + virtual unsigned int getCapacityIncrement() const; + /*! + @function setCapacityIncrement + @abstract A member function which sets the growth size of the array. + @result Returns the new growth size. + */ + virtual unsigned int setCapacityIncrement(unsigned increment); + + /*! + @function ensureCapacity + @abstract A member function which will expand the size of the collection to a given storage capacity. + @param newCapacity The new capacity for the array. + @result Returns the new capacity of the array or the previous capacity upon error. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity); + + /*! + @function flushCollection + @abstract A member function which removes and releases all items within the array. + */ + virtual void flushCollection(); + + /*! + @function setObject + @abstract A member function which appends an object onto the end of the array. + @param anObject The object to add to the OSArray instance. The object will be retained automatically. + @result Returns true if the addition of 'anObject' was successful, false if not; failure usually results from failing to allocate the necessary memory. + */ + virtual bool setObject(const OSMetaClassBase *anObject); + /*! + @function setObject + @abstract A member function which inserts an object into the array at a particular index. + @param index The index into the array to insert the object. + @param anObject The object to add to the OSArray instance. The object will be retained automatically. + @result Returns true if the addition of 'anObject' was successful, false if not. + */ + virtual bool setObject(unsigned int index, const OSMetaClassBase *anObject); + + /*! + @function merge + @abstract A member function which appends the contents of an array onto the receiving array. + @param otherArray The array whose contents will be appended to the reveiving array. + @result Returns true when merging was successful, false otherwise. + */ + virtual bool merge(const OSArray *otherArray); + + /*! + @function replaceObject + @abstract A member function which will replace an object in an array at a given index. The original object will be released and the new object will be retained. + @param index The index into the array at which the new object will be placed. + @param anObject The object to be placed into the array. + */ + virtual void replaceObject(unsigned int index, const OSMetaClassBase *anObject); + /*! + @function removeObject + @abstract A member function which removes an object from the array. + @param index The index of the object to be removed. + @discussion This function removes an object from the array which is located at a given index. Once removed the contents of the array will shift to fill in the vacated spot. The removed object is automatically released. + */ + virtual void removeObject(unsigned int index); + + /*! + @function isEqualTo + @abstract A member function which tests the equality of two OSArray objects. + @param anArray The array object being compared against the receiver. + @result Returns true if the two arrays are equivalent or false otherwise. + */ + virtual bool isEqualTo(const OSArray *anArray) const; + /*! + @function isEqualTo + @abstract A member function which compares the equality of the receiving array to an arbitrary object. + @param anObject The object to be compared against the receiver. + @result Returns true if the two objects are equivalent, that is they are either the same object or they are both arrays containing the same or equivalent objects, or false otherwise. + */ + virtual bool isEqualTo(const OSMetaClassBase *anObject) const; + + /*! + @function getObject + @abstract A member function which returns a reference to an object located within the array at a given index. The caller should not release the returned object. + @param index The index into the array from which the reference to an object is taken. + @result Returns a reference to an object or 0 if the index is beyond the bounds of the array. + */ + virtual OSObject *getObject(unsigned int index) const; + /*! + @function getLastObject + @abstract A member function which returns a reference to the last object in the array. The caller should not release the returned object. + @result Returns a reference to the last object in the array or 0 if the array is empty. + */ + virtual OSObject *getLastObject() const; + + /*! + @function getNextIndexOfObject + @abstract A member function which returns the next array index of an object, at or beyond the supplied index. + @result Returns the next index of the object in the array or (-1) if none is found. + */ + virtual unsigned int getNextIndexOfObject(const OSMetaClassBase * anObject, + unsigned int index) const; + + /*! + @function serialize + @abstract A member function which archives the receiver. + @param s The OSSerialize object. + @result Returns true if serialization was successful, false if not. + */ + virtual bool serialize(OSSerialize *s) const; + + OSMetaClassDeclareReservedUnused(OSArray, 0); + OSMetaClassDeclareReservedUnused(OSArray, 1); + OSMetaClassDeclareReservedUnused(OSArray, 2); + OSMetaClassDeclareReservedUnused(OSArray, 3); + OSMetaClassDeclareReservedUnused(OSArray, 4); + OSMetaClassDeclareReservedUnused(OSArray, 5); + OSMetaClassDeclareReservedUnused(OSArray, 6); + OSMetaClassDeclareReservedUnused(OSArray, 7); +}; + +#endif /* !_OS_OSARRAY_H */ diff --git a/libkern/libkern/c++/OSBoolean.h b/libkern/libkern/c++/OSBoolean.h new file mode 100644 index 000000000..c228492b5 --- /dev/null +++ b/libkern/libkern/c++/OSBoolean.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSBoolean.cpp created by rsulack on Tue Oct 12 1999 */ + +#ifndef _OS_OSBOOLEAN_H +#define _OS_OSBOOLEAN_H + +#include + +class OSString; + +/*! + @class OSBoolean + @abstract Container class for boolean values. +*/ +class OSBoolean : public OSObject +{ + OSDeclareDefaultStructors(OSBoolean) + +protected: + bool value; + +public: + static void initialize(); + + /*D + @function withBoolean + @abstract A static constructor function to create and initialize an instance of OSBoolean. + @param value A boolean value. + @result Returns and instance of OSBoolean, or 0 if an error occurred. + */ + static OSBoolean *withBoolean(bool value); + + /*D + @function free + @abstract A member function to release all resources used by the OSBoolean instance. + @discussion This function should not be called directly, use release() instead. + */ + virtual void free(); + + /*! + @function isTrue + @abstract A member function to test if the boolean object is true. + @result Returns true if the OSBoolean object is true, false otherwise. + */ + virtual bool isTrue() const; + /*! + @function isFalse + @abstract A member function to test if the boolean object is false. + @result Returns true if the OSBoolean object is false, false otherwise. + */ + virtual bool isFalse() const; + + /*! + @function getValue + @abstract Obtains the value of the OSBoolean object as the standard C++ type bool. + @result The value of the OSBoolean object. + */ + virtual bool getValue() const; + + /*! + @function isEqualTo + @abstract A member function to test the equality of two OSBoolean objects. + @param boolean An OSBoolean object to be compared against the receiver. + @result Returns true if the two objects are equivalent. + */ + virtual bool isEqualTo(const OSBoolean *boolean) const; + /*! + @function isEqualTo + @abstract A member function to test the equality between an arbitrary OSObject derived object and an OSBoolean object. + @param obj An OSObject derived object to be compared against the receiver. + @result Returns true if the two objects are equivalent. + */ + virtual bool isEqualTo(const OSMetaClassBase *obj) const; + + /*! + @function serialize + @abstract A member function which archives the receiver. + @param s The OSSerialize object. + @result Returns true if serialization was successful, false if not. + */ + virtual bool serialize(OSSerialize *s) const; + + OSMetaClassDeclareReservedUnused(OSBoolean, 0); + OSMetaClassDeclareReservedUnused(OSBoolean, 1); + OSMetaClassDeclareReservedUnused(OSBoolean, 2); + OSMetaClassDeclareReservedUnused(OSBoolean, 3); + OSMetaClassDeclareReservedUnused(OSBoolean, 4); + OSMetaClassDeclareReservedUnused(OSBoolean, 5); + OSMetaClassDeclareReservedUnused(OSBoolean, 6); + OSMetaClassDeclareReservedUnused(OSBoolean, 7); +}; + +/*! + @defined kOSBooleanTrue + @abstract The OSBoolean constant for "true". + @discussion The OSBoolean constant for "true". The object does not need to be retained or released. Comparisons of the form (booleanObject == kOSBooleanTrue) are acceptable and would be equivalent to (booleanObject->getValue() == true). +*/ +extern OSBoolean * const & kOSBooleanTrue; + +/*! + @defined kOSBooleanFalse + @abstract The OSBoolean constant for "false". + @discussion The OSBoolean constant for "false". The object does not need to be retained or released. Comparisons of the form (booleanObject == kOSBooleanFalse) are acceptable and would be equivalent to (booleanObject->getValue() == false). +*/ +extern OSBoolean * const & kOSBooleanFalse; + +#endif /* !_OS_OSBOOLEAN_H */ diff --git a/libkern/libkern/c++/OSCPPDebug.h b/libkern/libkern/c++/OSCPPDebug.h new file mode 100644 index 000000000..8d2eeeb10 --- /dev/null +++ b/libkern/libkern/c++/OSCPPDebug.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#define OSCPP_DEBUG + +#ifdef OSCPP_DEBUG + +__BEGIN_DECLS + +extern int debug_malloc_size; +extern int debug_iomalloc_size; +extern int debug_container_malloc_size; +extern int debug_ivars_size; + +void OSPrintMemory( void ); + +__END_DECLS + +#endif + diff --git a/libkern/libkern/c++/OSCollection.h b/libkern/libkern/c++/OSCollection.h new file mode 100644 index 000000000..1b3a22600 --- /dev/null +++ b/libkern/libkern/c++/OSCollection.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOCollection.h created by gvdl on Thu 1998-10-22 */ + +#ifndef _OS_OSCOLLECTION_H +#define _OS_OSCOLLECTION_H + +#include + +/*! + @class OSCollection + @abstract Abstract super class for all collections. + @discussion + OSCollection is the abstract super class for all OSObject derived collections and provides the necessary interfaces for managing storage space and iteration through a collection. +*/ +class OSCollection : public OSObject +{ + friend class OSCollectionIterator; + + OSDeclareAbstractStructors(OSCollection) + +protected: + unsigned int updateStamp; + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + // Member functions used by the OSCollectionIterator class. + /* + @function iteratorSize + @abstract A pure virtual member function to return the size of the iterator context. + @result Returns an integer size for the storage space required to contain context necessary for iterating through a collection. + @discussion + This member function is called by an OSCollectionIterator object to allow it to allocate enough storage space for the iterator context. This context contains the data necessary to iterate through the collection when getNextObjectForIterator() is called. + */ + virtual unsigned int iteratorSize() const = 0; + /* + @function initIterator + @abstract Pure virtual member function to allocate and initialize the iterator context data. + @param iterator The iterator context. + @result Returns true if initialization was successful, false otherwise. + */ + virtual bool initIterator(void *iterator) const = 0; + /* + @function getNextObjectForIterator + @abstract A pure virtual member function which returns the next member of a collection. + @param iterator The iterator context. + @param ret The object returned to the caller. + @result Returns true if an object was found, false otherwise. + @discussion + This is the entry point used by an OSCollectionIterator object to advance to next object in the collection. The iterator context is passed to the receiver to allow it to find the location of the current object and then advance the iterator context to the next object. + */ + virtual bool getNextObjectForIterator(void *iterator, OSObject **ret) const = 0; + + /* + @function init + @abstract A member function to initialize the OSCollection object. + @result Returns true if an object was initialized successfully, false otherwise. + @discussion + This function is used to initialize state within a newly created OSCollection object. + */ + virtual bool init(); + +public: + /* + @function haveUpdated + @abstract A member function to track of all updates to the collection. + */ + void haveUpdated() { updateStamp++; }; + + /* + @function getCount + @abstract A pure virtual member function which returns the number of objects in the collection subclass. + @results Returns the number objects in a collection. + */ + virtual unsigned int getCount() const = 0; + /* + @function getCapacity + @abstract A pure virtual member function which returns the storage space in the collection subclass. + @results Returns the number objects in a collection. + */ + virtual unsigned int getCapacity() const = 0; + /* + @function getCapacityIncrement + @abstract A pure virtual member function which returns the growth factor of the collection subclass. + @results Returns the size by which the collection subclass should grow. + */ + virtual unsigned int getCapacityIncrement() const = 0; + /* + @function setCapacityIncrement + @abstract A pure virtual member function which sets the growth factor of the collection subclass. + @param increment The new size by which the capacity of the collection should grow. + @results Returns the new capacity increment. + */ + virtual unsigned int setCapacityIncrement(unsigned increment) = 0; + + /* + @function ensureCapacity + @abstract A pure virtual member function which + @param newCapacity + @result + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity) = 0; + + /* + @function flushCollection + @abstract A pure virtual member function which + */ + virtual void flushCollection() = 0; + + OSMetaClassDeclareReservedUnused(OSCollection, 0); + OSMetaClassDeclareReservedUnused(OSCollection, 1); + OSMetaClassDeclareReservedUnused(OSCollection, 2); + OSMetaClassDeclareReservedUnused(OSCollection, 3); + OSMetaClassDeclareReservedUnused(OSCollection, 4); + OSMetaClassDeclareReservedUnused(OSCollection, 5); + OSMetaClassDeclareReservedUnused(OSCollection, 6); + OSMetaClassDeclareReservedUnused(OSCollection, 7); +}; + +#endif /* !_OS_OSCOLLECTION_H */ diff --git a/libkern/libkern/c++/OSCollectionIterator.h b/libkern/libkern/c++/OSCollectionIterator.h new file mode 100644 index 000000000..387b6355d --- /dev/null +++ b/libkern/libkern/c++/OSCollectionIterator.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOCollectionIterator.h created by gvdl on Fri 1998-10-30 */ + +#ifndef _OS_OSCOLLECTIONITERATOR_H +#define _OS_OSCOLLECTIONITERATOR_H + +#include + +class OSCollection; + +/*! + @class OSCollectionIterator + @discussion + OSCollectionIterator objects provide a consistent mechanism to iterate through all OSCollection derived collections. +*/ +class OSCollectionIterator : public OSIterator +{ + OSDeclareDefaultStructors(OSCollectionIterator) + +protected: + const OSCollection *collection; + void *collIterator; + unsigned int initialUpdateStamp; + bool valid; + +public: + /*! + @function withCollection + @abstract A static constructor function which creates and initializes an instance of OSCollectionIterator for the provided collection object. + @param inColl The OSCollection derived collection object to be iteratated. + @result Returns a new instance of OSCollection or 0 on failure. + */ + static OSCollectionIterator *withCollection(const OSCollection *inColl); + + /*! + @function withCollection + @abstract A member function to initialize the intance of OSCollectionIterator with the provided colleciton object. + @param inColl The OSCollection derived collection object to be iteratated. + @result Returns true if the initialization was successful or false on failure. + */ + virtual bool initWithCollection(const OSCollection *inColl); + /*! + @function free + @abstract A member function to release and deallocate all resources created or used by the OSCollectionIterator object. + @discussion This function should not be called directly, use release() instead. + */ + virtual void free(); + + /*! + @function reset + @abstract A member function which resets the iterator to begin the next iteration from the beginning of the collection. + */ + virtual void reset(); + + /*! + @function isValid + @abstract A member function for determining if the collection was modified during iteration. + */ + virtual bool isValid(); + + /*! + @function getNextObject + @abstract A member function to get the next object in the collection being iterated. + @result Returns the next object in the collection or 0 when the end has been reached. + */ + virtual OSObject *getNextObject(); +}; + +#endif /* !_OS_OSCOLLECTIONITERATOR_H */ diff --git a/libkern/libkern/c++/OSContainers.h b/libkern/libkern/c++/OSContainers.h new file mode 100644 index 000000000..725a786fb --- /dev/null +++ b/libkern/libkern/c++/OSContainers.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOContainers.h created by rsulack on Fri 26-Jun-1998 */ + +#ifndef _OS_OSCONTAINERS_H +#define _OS_OSCONTAINERS_H + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#endif /* ! _OS_OSCONTAINERS_H */ diff --git a/libkern/libkern/c++/OSData.h b/libkern/libkern/c++/OSData.h new file mode 100644 index 000000000..2406be7c0 --- /dev/null +++ b/libkern/libkern/c++/OSData.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOData.h created by rsulack on Wed 17-Sep-1997 */ +/* IOData.h converted to C++ by gvdl on Fri 1998-10-30 */ + +#ifndef _OS_OSDATA_H +#define _OS_OSDATA_H + +#include + +class OSString; + +/*! + @class OSData + @abstract A container class to manage an array of bytes. +*/ +class OSData : public OSObject +{ + OSDeclareDefaultStructors(OSData) + +protected: + void *data; + unsigned int length; + unsigned int capacity; + unsigned int capacityIncrement; + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +public: + /*! + @function withCapacity + @abstract A static constructor function to create and initialize an empty instance of OSData with a given capacity. + @param inCapacity The initial capacity of the OSData object in bytes. + @result Returns an instance of OSData or 0 if a failure occurs. + */ + static OSData *withCapacity(unsigned int inCapacity); + /*! + @function withBytes + @abstract A static constructor function to create and initialize an instance of OSData and copies in the provided data. + @param bytes A buffer of data. + @param inLength The size of the given buffer. + @result Returns an instance of OSData or 0 if a failure occurs. + */ + static OSData *withBytes(const void *bytes, unsigned int inLength); + /*! + @function withBytesNoCopy + @abstract A static constructor function to create and initialize an instance of OSData which references a buffer of data. + @param bytes A reference to a block of data. + @param inLength The size of the data block. + @result Returns an instance of OSData or 0 if a failure occurs. + */ + static OSData *withBytesNoCopy(void *bytes, unsigned int inLength); + /*! + @function withData + @abstract A static constructor function to create and initialize an instance of OSData with the data provided. + @param inData An OSData object which provides the initial data. + @result Returns an instance of OSData or 0 if a failure occurs. + */ + static OSData *withData(const OSData *inData); + /*! + @function withData + @abstract A static constructor function to create and initialize an instance of OSData with a specific range of the data provided. + @param inData An OSData object which provides the initial data. + @param start The starting index at which the data will be copied. + @param inLength The number of bytes to be copied starting at index 'start'. + @result Returns an instance of OSData or 0 if a failure occurs. + */ + static OSData *withData(const OSData *inData, + unsigned int start, unsigned int inLength); + + /*! + @function initWithBytes + @abstract A member function to initialize an instance of OSData with the provided data. + @param bytes A pointer to a block of data to be copied. + @param inLength The length of the block of data. + @result Returns true if initialization was successful, false otherwise. + */ + virtual bool initWithCapacity(unsigned int inCapacity); + /*! + @function initWithBytes + @abstract A member function to initialize an instance of OSData which references a block of data. + @param bytes A reference to a block of data + @param inLength The length of the block of data. + @result Returns true if initialization was successful, false otherwise. + */ + virtual bool initWithBytes(const void *bytes, unsigned int inLength); + /*! + @function initWithBytes + @abstract A member function to initialize an instance of OSData which references a block of data. + @param bytes A reference to a block of data + @param inLength The length of the block of data. + @result Returns true if initialization was successful, false otherwise. + */ + virtual bool initWithBytesNoCopy(void *bytes, unsigned int inLength); + /*! + @function initWithData + @abstract A member function to initialize an instance of OSData with the data provided. + @param inData An OSData object which provides the data to be copied. + @result Returns true if initialization was successful, false otherwise. + */ + virtual bool initWithData(const OSData *inData); + /*! + @function initWithData + @abstract A member function to initialize an instance of OSData with a specific range of the data provided + @param inData An OSData object. + @param start The starting range of data to be copied. + @param inLength The length in bytes of the data to be copied. + @result Returns true if initialization was successful, false otherwise. + */ + virtual bool initWithData(const OSData *inData, + unsigned int start, unsigned int inLength); + /*! + @function free + @abstract A member function which releases all resources created or used by the OSData object. + @discussion Do not call this function directly, use release() instead. + */ + virtual void free(); + + /*! + @function getLength + @abstract A member function which returns the length of the internal data buffer. + @result Returns an integer value for the length of data in the object's internal data buffer. + */ + virtual unsigned int getLength() const; + /*! + @function getCapacity + @abstract A member function which returns the capacity of the internal data buffer. + @result Returns an integer value for the size of the object's internal data buffer. + */ + virtual unsigned int getCapacity() const; + /*! + @function getCapacityIncrement + @abstract A member function which returns the size by which the data buffer will grow. + @result Returns the size by which the data buffer will grow. + */ + virtual unsigned int getCapacityIncrement() const; + /*! + @function setCapacityIncrement + @abstract A member function which sets the growth size of the data buffer. + @result Returns the new growth size. + */ + virtual unsigned int setCapacityIncrement(unsigned increment); + /*! + @function ensureCapacity + @abstract A member function which will expand the size of the collection to a given storage capacity. + @param newCapacity The new capacity for the data buffer. + @result Returns the new capacity of the data buffer or the previous capacity upon error. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity); + /*! + @function appendBytes + @abstract A member function which appends a buffer of data onto the end of the object's internal data buffer. + @param bytes A pointer to the block of data. + @param inLength The length of the data block. + @result Returns true if the object was able to append the new data, false otherwise. + */ + virtual bool appendBytes(const void *bytes, unsigned int inLength); + /*! + @function appendBytes + @abstract A member function which appends the data contained in an OSData object to the receiver. + @param other An OSData object. + @result Returns true if appending the new data was successful, false otherwise. + */ + virtual bool appendBytes(const OSData *other); + + /*! + @function getBytesNoCopy + @abstract A member function to return a pointer to the OSData object's internal data buffer. + @result Returns a reference to the OSData object's internal data buffer. + */ + virtual const void *getBytesNoCopy() const; + /*! + @function getBytesNoCopy + @abstract Returns a reference into the OSData object's internal data buffer at particular offset and with a particular length. + @param start The offset from the base of the internal data buffer. + @param inLength The length of window. + @result Returns a pointer at a particular offset into the data buffer, or 0 if the starting offset or length are not valid. + */ + virtual const void *getBytesNoCopy(unsigned int start, + unsigned int inLength) const; + + /*! + @function isEqualTo + @abstract A member function to test the equality of two OSData objects. + @param aData The OSData object to be compared to the receiver. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSData *aData) const; + /*! + @function isEqualTo + @abstract A member function to test the equality of an arbitrary block of data with the OSData object's internal data buffer. + @param someData A pointer to a block of data. + @param inLength The length of the block of data. + @result Returns true if the two blocks of data are equivalent, false otherwise. + */ + virtual bool isEqualTo(const void *someData, unsigned int inLength) const; + /*! + @function isEqualTo + @abstract A member function to test the equality between an OSData object and an arbitrary OSObject derived object. + @param obj An OSObject derived object. + @result Returns true if the two objects are equivalent. + */ + virtual bool isEqualTo(const OSMetaClassBase *obj) const; + /*! + @function isEqualTo + @abstract A member function to test the equality between an OSData object and an OSString object. + @param obj An OSString object + @result Returns true if the two objects are equivalent. + */ + virtual bool isEqualTo(const OSString *obj) const; + /*! + @function serialize + @abstract A member function which archives the receiver. + @param s The OSSerialize object. + @result Returns true if serialization was successful, false if not. + */ + virtual bool serialize(OSSerialize *s) const; + + /*! + @function appendByte + @abstract A member function which appends a buffer of constant data onto the end of the object's internal data buffer. + @param byte A byte value to replicate as the added data. + @param inCount The length of the data to add. + @result Returns true if the object was able to append the new data, false otherwise. + */ + virtual bool appendByte(unsigned char byte, unsigned int inCount); + + + OSMetaClassDeclareReservedUnused(OSData, 0); + OSMetaClassDeclareReservedUnused(OSData, 1); + OSMetaClassDeclareReservedUnused(OSData, 2); + OSMetaClassDeclareReservedUnused(OSData, 3); + OSMetaClassDeclareReservedUnused(OSData, 4); + OSMetaClassDeclareReservedUnused(OSData, 5); + OSMetaClassDeclareReservedUnused(OSData, 6); + OSMetaClassDeclareReservedUnused(OSData, 7); +}; + +#endif /* !_OS_OSDATA_H */ diff --git a/libkern/libkern/c++/OSDictionary.h b/libkern/libkern/c++/OSDictionary.h new file mode 100644 index 000000000..f42c4b1d4 --- /dev/null +++ b/libkern/libkern/c++/OSDictionary.h @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * OSDictionary.h created by rsulack on Wed 17-Sep-1997 + * OSDictionary.h converted to C++ by gvdl on Fri 1998-10-30 + */ + +#ifndef _IOKIT_IODICTIONARY_H +#define _IOKIT_IODICTIONARY_H + +#include + +class OSArray; +class OSSymbol; +class OSString; + +/*! + @class OSDictionary + @abstract A collection class whose instances maintain a list of object references. Objects in the collection are acquired with unique associative keys. + @discussion + An instance of OSDictionary is a mutable container which contains a list of OSMetaClassBase derived object references and these objects are identified and acquired by unique associative keys. When an object is placed into a dictionary, a unique identifier or key must provided to identify the object within the collection. The key then must be provided to find the object within the collection. If an object is not found within the collection, a 0 is returned. Placing an object into a dictionary for a key, which already identifies an object within that dictionary, will replace the current object with the new object. + + Objects placed into a dictionary are automatically retained and objects removed or replaced are automatically released. All objects are released when the collection is freed. +*/ +class OSDictionary : public OSCollection +{ + OSDeclareDefaultStructors(OSDictionary) + +protected: + struct dictEntry { + const OSSymbol *key; + const OSMetaClassBase *value; + }; + dictEntry *dictionary; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + // Member functions used by the OSCollectionIterator class. + virtual unsigned int iteratorSize() const; + virtual bool initIterator(void *iterator) const; + virtual bool getNextObjectForIterator(void *iterator, OSObject **ret) const; + +public: + /*! + @function withCapacity + @abstract A static constructor function to create and initialize an instance of OSDictionary. + @param capacity The initial storage capacity of the dictionary object. + @result Returns an instance of OSDictionary or 0 on failure. + */ + static OSDictionary *withCapacity(unsigned int capacity); + /*! + @function withObjects + @abstract A static constructor function to create and initialize an instance of OSDictionary and populate it with objects provided. + @param objects A static array of OSMetaClassBase derived objects. + @param keys A static array of OSSymbol keys. + @param count The number of items to be placed into the dictionary. + @param capacity The initial storage capacity of the dictionary object. If 0, the capacity will be set to the size of 'count', else this value must be greater or equal to 'count'. + @result Returns an instance of OSDictionary or 0 on failure. + */ + static OSDictionary *withObjects(const OSObject *objects[], + const OSSymbol *keys[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function withObjects + @abstract A static constructor function to create and initialize an instance of OSDictionary and populate it with objects provided. + @param objects A static array of OSMetaClassBase derived objects. + @param keys A static array of OSString keys. + @param count The number of items to be placed into the dictionary. + @param capacity The initial storage capacity of the dictionary object. If 0, the capacity will be set to the size of 'count', else this value must be greater or equal to 'count'. + @result Returns an instance of OSDictionary or 0 on failure. + */ + static OSDictionary *withObjects(const OSObject *objects[], + const OSString *keys[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function withDictionary + @abstract A static constructor function to create and initialize an instance of OSDictionary and populate it with objects from another dictionary. + @param dict A dictionary whose contents will be placed in the new instance. + @param capacity The initial storage capacity of the dictionary object. If 0, the capacity will be set to the number of elements in the dictionary object, else the capacity must be greater than or equal to the number of elements in the dictionary. + @result Returns an instance of OSDictionary or 0 on failure. + */ + static OSDictionary *withDictionary(const OSDictionary *dict, + unsigned int capacity = 0); + + /*! + @function initWithCapacity + @abstract A member function to initialize an instance of OSDictionary. + @param capacity The initial storage capacity of the dictionary object. + @result Returns true if initialization succeeded or false on failure. + */ + virtual bool initWithCapacity(unsigned int capacity); + /*! + @function initWithObjects + @abstract A member function to initialize an instance of OSDictionary and populate it with the provided objects and keys. + @param objects A static array of OSMetaClassBase derived objects to be placed into the dictionary. + @param keys A static array of OSSymbol keys which identify the corresponding objects provided in the 'objects' parameter. + @param count The number of objects provided to the dictionary. + @param capacity The initial storage capacity of the dictionary object. If 0, the capacity will be set to the size of 'count', else the capacity must be greater than or equal to the value of 'count'. + @result Returns true if initialization succeeded or false on failure. + */ + virtual bool initWithObjects(const OSObject *objects[], + const OSSymbol *keys[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function initWithObjects + @abstract A member function to initialize an instance of OSDictionary and populate it with the provided objects and keys. + @param objects A static array of OSMetaClassBase derived objects to be placed into the dictionary. + @param keys A static array of OSString keys which identify the corresponding objects provided in the 'objects' parameter. + @param count The number of objects provided to the dictionary. + @param capacity The initial storage capacity of the dictionary object. If 0, the capacity will be set to the size of 'count', else the capacity must be greater than or equal to the value of 'count'. + @result Returns true if initialization succeeded or false on failure. + */ + virtual bool initWithObjects(const OSObject *objects[], + const OSString *keys[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function initWithDictionary + @abstract A member function to initialize an instance of OSDictionary and populate it with the contents of another dictionary. + @param dict The dictionary containing the objects to be used to populate the receiving dictionary. + @param capacity The initial storage capacity of the dictionary. If 0, the value of capacity will be set to the number of elements in the dictionary object, else the value of capacity must be greater than or equal to the number of elements in the dictionary object. + @result Returns true if initialization succeeded or false on failure. + */ + virtual bool initWithDictionary(const OSDictionary *dict, + unsigned int capacity = 0); + /*! + @function free + @abstract A member functions to deallocate and release all resources used by the OSDictionary instance. + @discussion This function should not be called directly, use release() instead. + */ + virtual void free(); + + /*! + @function getCount + @abstract A member function which returns the current number of objects within the collection. + @result Returns the number of objects contained within the dictionary. + */ + virtual unsigned int getCount() const; + /*! + @function getCapacity + @abstract A member function which returns the storage capacity of the collection. + @result Returns the storage capacity of the dictionary. + */ + virtual unsigned int getCapacity() const; + /*! + @function getCapacityIncrement + @abstract A member function which returns the growth size for the collection. + */ + virtual unsigned int getCapacityIncrement() const; + /*! + @function setCapacityIncrement + @abstract A member function to set the growth size of the collection. + @param increment The new growth size. + @result Returns the new capacity increment. + */ + virtual unsigned int setCapacityIncrement(unsigned increment); + + /*! + @function ensureCapacity + @abstract Member function to grow the size of the collection. + @param newCapacity The new capacity for the dictionary to expand to. + @result Returns the new capacity of the dictionary or the previous capacity upon error. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity); + + /*! + @function flushCollection + @abstract A member function which removes and releases all objects within the collection. + */ + virtual void flushCollection(); + + /*! + @function setObject + @abstract A member function which places an object into the dictionary and identified by a unique key. + @param aKey A unique OSSymbol identifying the object placed within the collection. + @param anObject The object to be stored in the dictionary. It is automatically retained. + @result Returns true if the addition of an object was successful, false otherwise. + */ + virtual bool setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject); + /*! + @function setObject + @abstract A member function which places an object into the dictionary and identified by a unique key. + @param aKey A unique OSString identifying the object placed within the collection. + @param anObject The object to be stored in the dictionary. It is automatically retained. + @result Returns true if the addition of an object was successful, false otherwise. + */ + virtual bool setObject(const OSString *aKey, const OSMetaClassBase *anObject); + /*! + @function setObject + @abstract A member function which places an object into the dictionary and identified by a unique key. + @param aKey A unique string identifying the object placed within the collection. + @param anObject The object to be stored in the dictionary. It is automatically retained. + @result Returns true if the addition of an object was successful, false otherwise. + */ + virtual bool setObject(const char *aKey, const OSMetaClassBase *anObject); + + /*! + @function removeObject + @abstract A member function which removes an object from the dictionary. The removed object is automatically released. + @param aKey A unique OSSymbol identifying the object to be removed from the dictionary. + */ + virtual void removeObject(const OSSymbol *aKey); + /*! + @function removeObject + @abstract A member function which removes an object from the dictionary. The removed object is automatically released. + @param aKey A unique OSString identifying the object to be removed from the dictionary. + */ + virtual void removeObject(const OSString *aKey); + /*! + @function removeObject + @abstract A member function which removes an object from the dictionary. The removed object is automatically released. + @param aKey A unique string identifying the object to be removed from the dictionary. + */ + virtual void removeObject(const char *aKey); + + /*! + @function merge + @abstract A member function which merges the contents of a dictionary into the receiver. + @param aDictionary The dictionary whose contents are to be merged with the receiver. + @result Returns true if the merger is successful, false otherwise. + @discussion If there are keys in 'aDictionary' which match keys in the receiving dictionary, then the objects in the receiver are replaced by those from 'aDictionary', the replaced objects are released. + */ + virtual bool merge(const OSDictionary *aDictionary); + + /*! + @function getObject + @abstract A member function to find an object in the dictionary associated by a given key. + @param aKey The unique OSSymbol key identifying the object to be returned to caller. + @result Returns a reference to the object corresponding to the given key, or 0 if the key does not exist in the dictionary. + */ + virtual OSObject *getObject(const OSSymbol *aKey) const; + /*! + @function getObject + @abstract A member function to find an object in the dictionary associated by a given key. + @param aKey The unique OSString key identifying the object to be returned to caller. + @result Returns a reference to the object corresponding to the given key, or 0 if the key does not exist in the dictionary. + */ + virtual OSObject *getObject(const OSString *aKey) const; + /*! + @function getObject + @abstract A member function to find an object in the dictionary associated by a given key. + @param aKey The unique string identifying the object to be returned to caller. + @result Returns a reference to the object corresponding to the given key, or 0 if the key does not exist in the dictionary. + */ + virtual OSObject *getObject(const char *aKey) const; + + /*! + @function isEqualTo + @abstract A member function to test the equality of the intersections of two dictionaries. + @param aDictionary The dictionary to be compared against the receiver. + @param keys An OSArray or OSDictionary containing the keys describing the intersection for the comparison. + @result Returns true if the intersections of the two dictionaries are equal. + */ + virtual bool isEqualTo(const OSDictionary *aDictionary, const OSCollection *keys) const; + /*! + @function isEqualTo + @abstract A member function to test the equality of two dictionaries. + @param aDictionary The dictionary to be compared against the receiver. + @result Returns true if the dictionaries are equal. + */ + virtual bool isEqualTo(const OSDictionary *aDictionary) const; + /*! + @function isEqualTo + @abstract A member function to test the equality between the receiver and an unknown object. + @param anObject An object to be compared against the receiver. + @result Returns true if the objects are equal. + */ + virtual bool isEqualTo(const OSMetaClassBase *anObject) const; + + /*! + @function serialize + @abstract A member function which archives the receiver. + @param s The OSSerialize object. + @result Returns true if serialization was successful, false if not. + */ + virtual bool serialize(OSSerialize *s) const; + + + OSMetaClassDeclareReservedUnused(OSDictionary, 0); + OSMetaClassDeclareReservedUnused(OSDictionary, 1); + OSMetaClassDeclareReservedUnused(OSDictionary, 2); + OSMetaClassDeclareReservedUnused(OSDictionary, 3); + OSMetaClassDeclareReservedUnused(OSDictionary, 4); + OSMetaClassDeclareReservedUnused(OSDictionary, 5); + OSMetaClassDeclareReservedUnused(OSDictionary, 6); + OSMetaClassDeclareReservedUnused(OSDictionary, 7); +}; + +#endif /* !_IOKIT_IODICTIONARY_H */ diff --git a/libkern/libkern/c++/OSIterator.h b/libkern/libkern/c++/OSIterator.h new file mode 100644 index 000000000..47fb89213 --- /dev/null +++ b/libkern/libkern/c++/OSIterator.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1998-1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _OS_OSITERATOR_H +#define _OS_OSITERATOR_H + +#include + +/*! + @class OSIterator + @abstract Abstract super class for iterator classes. + @discussion + OSIterator is an abstract super class providing a consistent set of API's for subclasses. +*/ +class OSIterator : public OSObject +{ + OSDeclareAbstractStructors(OSIterator) + +public: + /*! + @function reset + @abstract A pure virtual member function to be over-ridden by the subclass which reset the iterator to the beginning of the collection. + */ + virtual void reset() = 0; + + /*! + @function isValid + @abstract A pure virtual member function to be over-ridden by the subclass which indicates a modification was made to the collection. + */ + virtual bool isValid() = 0; + + /*! + @function getNextObject + @abstract A pure virtual function to be over-ridden by the subclass which returns a reference to the current object in the collection and advances the interator to the next object. + */ + virtual OSObject *getNextObject() = 0; + + OSMetaClassDeclareReservedUnused(OSIterator, 0); + OSMetaClassDeclareReservedUnused(OSIterator, 1); + OSMetaClassDeclareReservedUnused(OSIterator, 2); + OSMetaClassDeclareReservedUnused(OSIterator, 3); +}; + +#endif /* ! _OS_OSITERATOR_H */ diff --git a/libkern/libkern/c++/OSLib.h b/libkern/libkern/c++/OSLib.h new file mode 100644 index 000000000..d3b597dca --- /dev/null +++ b/libkern/libkern/c++/OSLib.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _OS_OSLIB_H +#define _OS_OSLIB_H + +#include + +#ifdef KERNEL +#define MACH_ASSERT 1 +#endif + +__BEGIN_DECLS + +#include +#include + +#include +#include + +__END_DECLS + +#ifndef NULL +#define NULL 0 +#endif + +#endif /* _OS_OSLIB_H */ + diff --git a/libkern/libkern/c++/OSMetaClass.h b/libkern/libkern/c++/OSMetaClass.h new file mode 100644 index 000000000..9bfd3475c --- /dev/null +++ b/libkern/libkern/c++/OSMetaClass.h @@ -0,0 +1,559 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _LIBKERN_OSMETACLASS_H +#define _LIBKERN_OSMETACLASS_H + +#include + +#include + +class OSMetaClass; +class OSObject; +class OSString; +class OSSymbol; +class OSDictionary; +class OSSerialize; + +class OSMetaClassBase +{ +public: +/*! @function OSTypeID + @abstract Given the name of a class return it's typeID + @param type Name of the desired type, eg. OSObject. + @result 'this' if object is of desired type, otherwise 0. +*/ +#define OSTypeID(type) (type::metaClass) + +/*! @function OSTypeIDInst + @abstract Given a pointer to an object return it's typeID + @param typeinst An instance of an OSObject subclass. + @result The typeID, ie. OSMetaClass *. +*/ +#define OSTypeIDInst(typeinst) ((typeinst)->getMetaClass()) + +/*! @function OSDynamicCast + @abstract Roughly analogous to (type *) inst, but check if valid first. + @discussion OSDynamicCast is an attempt to implement a rudimentary equivalent to rtti's dynamic_cast operator. Embedded-C++ doesn't allow the use of rtti. OSDynamicCast is build on the OSMetaClass mechanism. Note it is safe to call this with a 0 parameter. + @param type name of desired class name. Notice that it is assumed that you desire to cast to a pointer to an object of this type. Also type qualifiers, like const, are not recognized and will cause an, usually obscure, compile error. + @param inst Pointer to object that you wish to attempt to type cast. May be 0. + @result inst if object non-zero and it is of the desired type, otherwise 0. +*/ +#define OSDynamicCast(type, inst) \ + ((type *) OSMetaClassBase::safeMetaCast((inst), OSTypeID(type))) + +/*! @function OSCheckTypeInst + @abstract Is the target object a subclass of the reference object? + @param typeinst Reference instance of an object, desired type. + @param inst Instance of object to check for type compatibility. + @result false if typeinst or inst are 0 or inst is not a subclass of typeinst's class. true otherwise. +*/ +#define OSCheckTypeInst(typeinst, inst) \ + OSMetaClassBase::checkTypeInst(inst, typeinst) + + +protected: + OSMetaClassBase(); + virtual ~OSMetaClassBase(); + +private: + // Disable copy constructors of OSMetaClassBase based objects +/*! @function operator = + @abstract Disable implicit copy constructor by making private + @param src Reference to source object that isn't allowed to be copied +*/ + void operator =(OSMetaClassBase &src); + +/*! @function OSMetaClassBase + @abstract Disable implicit copy constructor by making private + @param src Reference to source object that isn't allowed to be copied +*/ + OSMetaClassBase(OSMetaClassBase &src); + +public: +/*! @function release + @abstract Primary implementation of the release mechanism. + @discussion If $link retainCount <= the when argument then call $link free(). This indirect implementation of $link release allows the developer to break reference circularity. An example of this sort of problem is a parent/child mutual reference, either the parent or child can implement: void release() { release(2); } thus breaking the cirularity. + @param when When retainCount == when then call free(). */ + virtual void release(int when) const = 0; + +/*! @function getRetainCount + @abstract How many times has this object been retained? + @result Current retain count +*/ + virtual int getRetainCount() const = 0; + +/*! @function retain + @abstract Retain a reference in this object. +*/ + virtual void retain() const = 0; +/*! @function release + @abstract Release a reference to this object +*/ + virtual void release() const = 0; + +/*! @function serialize + @abstract + @discussion + @param s + @result +*/ + virtual bool serialize(OSSerialize *s) const = 0; + + virtual const OSMetaClass * getMetaClass() const = 0; + +/*! @function isEqualTo + @abstract Is this == anObj? + @discussion OSMetaClassBase::isEqualTo implements this as a shallow pointer comparison. The OS container classes do a more meaningful comparison. Your mileage may vary. + @param anObj Object to compare 'this' to. + @result true if the objects are equivalent, false otherwise. +*/ + virtual bool isEqualTo(const OSMetaClassBase *anObj) const; + +/*! @function metaCast + @abstract Check to see if this object is or inherits from the given type. + @discussion This function is the guts of the OSMetaClass system. IODynamicCast, qv, is implemented using this function. + @param toMeta Pointer to a constant OSMetaClass for the desired target type. + @result 'this' if object is of desired type, otherwise 0. +*/ + OSMetaClassBase *metaCast(const OSMetaClass *toMeta) const; + + +/*! @function metaCast + @abstract See OSMetaClassBase::metaCast(const OSMetaClass *) + @param toMeta OSSymbol of the desired class' name. + @result 'this' if object is of desired type, otherwise 0. +*/ + OSMetaClassBase *metaCast(const OSSymbol *toMeta) const; + +/*! @function metaCast + @abstract See OSMetaClassBase::metaCast(const OSMetaClass *) + @param toMeta OSString of the desired class' name. + @result 'this' if object is of desired type, otherwise 0. +*/ + OSMetaClassBase *metaCast(const OSString *toMeta) const; + +/*! @function metaCast + @abstract See OSMetaClassBase::metaCast(const OSMetaClass *) + @param toMeta const char * C String of the desired class' name. + @result 'this' if object is of desired type, otherwise 0. +*/ + OSMetaClassBase *metaCast(const char *toMeta) const; + + // Helper inlines for runtime type preprocessor macros + static OSMetaClassBase * + safeMetaCast(const OSMetaClassBase *me, const OSMetaClass *toType) + { return (me)? me->metaCast(toType) : 0; } + + static bool + checkTypeInst(const OSMetaClassBase *inst, const OSMetaClassBase *typeinst) + { + const OSMetaClass *toType = OSTypeIDInst(typeinst); + return typeinst && inst && (0 != inst->metaCast(toType)); + } + +private: + // Virtual Padding + virtual void _RESERVEDOSMetaClassBase0(); + virtual void _RESERVEDOSMetaClassBase1(); + virtual void _RESERVEDOSMetaClassBase2(); + virtual void _RESERVEDOSMetaClassBase3(); + virtual void _RESERVEDOSMetaClassBase4(); + virtual void _RESERVEDOSMetaClassBase5(); + virtual void _RESERVEDOSMetaClassBase6(); + virtual void _RESERVEDOSMetaClassBase7(); +}; + +/*! + @class OSMetaClass : OSMetaClassBase + @abstract An instance of a OSMetaClass represents one class then the kernel's runtime type information system is aware of. +*/ +class OSMetaClass : private OSMetaClassBase +{ + +private: + static void *operator new(size_t size); + + struct ExpansionData { }; + +/*! @var reserved Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +/*! @var superClass Handle to the superclass' meta class. */ + const OSMetaClass *superClassLink; + +/*! @var className OSSymbol of the class' name. */ + const OSSymbol *className; + +/*! @var classSize How big is a single instancde of this class. */ + unsigned int classSize; + +/*! @var instanceCount Roughly number of instances of the object. Used primarily as a code in use flag. */ + mutable unsigned int instanceCount; + +/*! @function OSMetaClass + @abstract Private the default constructor */ + OSMetaClass(); + + // Called by postModLoad +/*! @function logError + @abstract Given an error code log an error string using printf */ + static void logError(OSReturn result); + +/*! @function getMetaClassWithName + @abstract Lookup a meta-class in the runtime type information system + @param name Name of the desired class's meta-class. + @result pointer to a meta-class object if found, 0 otherwise. */ + + static const OSMetaClass *getMetaClassWithName(const OSSymbol *name); + +protected: +/*! @function retain + @abstract Implement abstract but should no dynamic allocation is allowed */ + virtual void retain() const; + +/*! @function release + @abstract Implement abstract but should no dynamic allocation is allowed */ + virtual void release() const; + +/*! @function release + @abstract Implement abstract but should no dynamic allocation is allowed + @param when ignored. */ + virtual void release(int when) const; + +/*! @function getRetainCount + @abstract Implement abstract but should no dynamic allocation is allowed */ + virtual int getRetainCount() const; + + virtual const OSMetaClass * getMetaClass() const; + +/*! @function OSMetaClass + @abstract Constructor for OSMetaClass objects + @discussion This constructor is protected and cannot not be used to instantiate an OSMetaClass object, i.e. OSMetaClass is an abstract class. This function stores the currently constructing OSMetaClass instance away for later processing. See preModLoad and postModLoad. + @param inClassName cString of the name of the class this meta-class represents. + @param inSuperClassName cString of the name of the super class. + @param inClassSize sizeof the class. */ + OSMetaClass(const char *inClassName, + const OSMetaClass *inSuperClass, + unsigned int inClassSize); + +/*! @function ~OSMetaClass + @abstract Destructor for OSMetaClass objects + @discussion If this function is called it means that the object code that implemented this class is actually in the process of unloading. The destructor removes all reference's to the subclass from the runtime type information system. */ + virtual ~OSMetaClass(); + + static void operator delete(void *mem, size_t size); + +public: + static const OSMetaClass * const metaClass; + +/*! @function preModLoad + @abstract Prepare the runtime type system for the load of a module. + @discussion Prepare the runtime type information system for the loading of new all meta-classes constructed between now and the next postModLoad. preModLoad grab's a lock so that the runtime type information system loading can be protected, the lock is released by the postModLoad function. Any OSMetaClass that is constructed between the bracketing pre and post calls will be assosiated with the module name. + @param kmodName globally unique cString name of the kernel module being loaded. + @result If success full return a handle to be used in later calls 0 otherwise. */ + static void *preModLoad(const char *kmodName); + +/*! @function failModLoad + @abstract Record an error during the loading of an kernel module. + @discussion As constructor's can't return errors nor can they through exceptions in embedded-c++ an indirect error mechanism is necessary. Check mod load returns a bool to indicate the current error state of the runtime type information system. During object construction a call to failModLoad will cause an error code to be recorded. Once an error has been set the continuing construction will be ignored until the end of the pre/post load. + @param error Code of the error. */ + static void failModLoad(OSReturn error); + +/*! @function checkModLoad + @abstract Check if the current load attempt is still OK. + @param loadHandle Handle returned when a successful call to preModLoad is made. + @result true if no error's are outstanding and the system is primed to recieve more objects. */ + static bool checkModLoad(void *loadHandle); + +/*! @function postModLoad + @abstract Finish postprocessing on a kernel module's meta-classes. + @discussion As the order of static object construction is undefined it is necessary to process the constructors in two phases. These phases rely on global information that is created be the preparation step, preModLoad, which also guarantees single threading between multiple modules. Phase one was the static construction of each meta-class object one by one withing the context prepared by the preModLoad call. postModLoad is the second phase of processing. Inserts links all of the super class inheritance chains up, inserts the meta-classes into the global register of classes and records for each meta-class which kernel module caused it's construction. Finally it cleans up the temporary storage and releases the single threading lock and returns whatever error has been recorded in during the construction phase or the post processing phase. + @param loadHandle Handle returned when a successful call to preModLoad is made. + @result Error code of the first error encountered. */ + static OSReturn postModLoad(void *loadHandle); + +/*! @function modHasInstance + @abstract Do any of the objects represented by OSMetaClass and associated with the given kernel module name have instances? + @discussion Check all meta-classes associated with the module name and check their instance counts. This function is used to check to see if a module can be unloaded. Obviously if an instance is still outstanding it isn't safe to unload the code that relies on that object. + @param kmodName cString of the kernel module name. + @result true if there are any current instances of any class in the module. +*/ + static bool modHasInstance(const char *kmodName); + +/*! @function reportModInstances + @abstract Log any object that has instances in a module. + @discussion When a developer ask for a module to be unloaded but the unload fails due to outstanding instances. This function will report which classes still have instances. It is intended mostly for developers to find problems with unloading classes and will be called automatically by 'verbose' unloads. + @param kmodName cString of the kernel module name. */ + static void reportModInstances(const char *kmodName); + +/*! @function considerUnloads + @abstract Schedule module unloading. + @discussion Schedule unused modules to be unloaded; called when IOKit matching goes idle. */ + + static void considerUnloads(); + +/*! @function allocClassWithName + @abstract Lookup a meta-class in the runtime type information system and return the results of an alloc call. + @param name Name of the desired class. + @result pointer to an new object, 0 if not found or so memory. */ + static OSObject *allocClassWithName(const OSSymbol *name); + +/*! @function allocClassWithName + @abstract Lookup a meta-class in the runtime type information system and return the results of an alloc call. + @param name Name of the desired class. + @result pointer to an new object, 0 if not found or so memory. */ + static OSObject *allocClassWithName(const OSString *name); + +/*! @function allocClassWithName + @abstract Lookup a meta-class in the runtime type information system and return the results of an alloc call. + @param name Name of the desired class. + @result pointer to an new object, 0 if not found or so memory. */ + static OSObject *allocClassWithName(const char *name); + +/*! @function checkMetaCastWithName + @abstract Introspect an objects inheritance tree looking for a class of the given name. Basis of MacOSX's kernel dynamic casting mechanism. + @param name Name of the desired class or super class. + @param in object to be introspected. + @result in parameter if cast valid, 0 otherwise. */ + static OSMetaClassBase * + checkMetaCastWithName(const OSSymbol *name, const OSMetaClassBase *in); + +/*! @function checkMetaCastWithName + @abstract Introspect an objects inheritance tree looking for a class of the given name. Basis of MacOSX's kernel dynamic casting mechanism. + @param name Name of the desired class or super class. + @param in object to be introspected. + @result in parameter if cast valid, 0 otherwise. */ + static OSMetaClassBase * + checkMetaCastWithName(const OSString *name, const OSMetaClassBase *in); + +/*! @function checkMetaCastWithName + @abstract Introspect an objects inheritance tree looking for a class of the given name. Basis of MacOSX's kernel dynamic casting mechanism. + @param name Name of the desired class or super class. + @param in object to be introspected. + @result in parameter if cast valid, 0 otherwise. */ + static OSMetaClassBase * + checkMetaCastWithName(const char *name, const OSMetaClassBase *in); + + +/*! @function instanceConstructed + @abstract Counts the instances of the class behind this metaclass. + @discussion Every non-abstract class that inherits from OSObject has a default constructor that calls it's own meta-class' instanceConstructed function. This constructor is defined by the OSDefineMetaClassAndStructors macro (qv) that all OSObject subclasses must use. Also if the instance count goes from 0 to 1, ie the first instance, then increment the instance count of the super class */ + void instanceConstructed() const; + +/*! @function instanceDestructed + @abstract Removes one instance of the class behind this metaclass. + @discussion OSObject's free function calls this method just before it does a 'delete this' on itself. If the instance count transitions from 1 to 0, i.e. the last object, then one instance of the superclasses is also removed. */ + void instanceDestructed() const; + + +/*! @function checkMetaCast + @abstract Ask a OSMetaClass instance if the given object is either an instance of it or an instance of a subclass of it. + @param check Pointer of object to introspect. + @result check parameter if cast valid, 0 otherwise. */ + OSMetaClassBase *checkMetaCast(const OSMetaClassBase *check) const; + + +/*! @function getInstanceCount + @abstract How many instances of the class have been created. + @result Count of the number of instances. */ + unsigned int getInstanceCount() const; + + +/*! @function getSuperClass + @abstract 'Get'ter for the super class. + @result Pointer to superclass, chain ends with 0 for OSObject. */ + const OSMetaClass *getSuperClass() const; + +/*! @function getClassName + @abstract 'Get'ter for class name. + @result cString of the class name. */ + const char *getClassName() const; + +/*! @function getClassSize + @abstract 'Get'ter for sizeof(class). + @result sizeof of class that this OSMetaClass instance represents. */ + unsigned int getClassSize() const; + +/*! @function alloc + @abstract Allocate an instance of the class that this OSMetaClass instance represents. + @discussion This alloc function is analogous to the old ObjC class alloc method. Typically not used by clients as the static function allocClassWithName is more generally useful. Infact that function is implemented in terms of this virtual function. All subclass's of OSMetaClass must implement this function but that is what the OSDefineMetaClassAndStructor's families of macros does for the developer automatically. + @result Pointer to a new object with a retain count of 1. */ + virtual OSObject *alloc() const = 0; + +/*! @function OSDeclareCommonStructors + @abstract Basic helper macro for the OSDeclare for Default and Abstract macros, qv. DO NOT USE. + @param className Name of class. NO QUOTES. */ +#define OSDeclareCommonStructors(className) \ + private: \ + static const OSMetaClass * const superClass; \ + public: \ + static const OSMetaClass * const metaClass; \ + static class MetaClass : public OSMetaClass { \ + public: \ + MetaClass(); \ + virtual OSObject *alloc() const; \ + } gMetaClass; \ + friend class className ## ::MetaClass; \ + virtual const OSMetaClass * getMetaClass() const; \ + protected: \ + className ## (const OSMetaClass *); \ + virtual ~ ## className ## () + + +/*! @function OSDeclareDefaultStructors + @abstract One of the macro's used in the class declaration of all subclasses of OSObject, declares runtime type information data and interfaces. + @discussion Macro used in the class declaration all subclasses of OSObject, declares runtime type information data and interfaces. By convention it should be 'called' immediately after the opening brace in a class declaration. It leaves the current privacy state as 'protected:'. + @param className Name of class. NO QUOTES. */ +#define OSDeclareDefaultStructors(className) \ + OSDeclareCommonStructors(className); \ + public: \ + className ## (); \ + protected: + + +/*! @function OSDeclareAbstractStructors + @abstract One of the macro's used in the class declaration of all subclasses of OSObject, declares runtime type information data and interfaces. + @discussion This macro is used when the class being declared has one or more '= 0' pure virtual methods and thus it is illegal to create an instance of this class. It leaves the current privacy state as 'protected:'. + @param className Name of class. NO QUOTES. */ +#define OSDeclareAbstractStructors(className) \ + OSDeclareCommonStructors(className); \ + private: \ + className ## (); /* Make primary constructor private in abstract */ \ + protected: + +/*! @function OSDefineMetaClassWithInit + @abstract Basic helper macro for the OSDefineMetaClass for the default and Abstract macros, qv. DO NOT USE. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. + @param init Name of a function to call after the OSMetaClass is constructed. */ +#define OSDefineMetaClassWithInit(className, superClassName, init) \ + /* Class global data */ \ + className ## ::MetaClass className ## ::gMetaClass; \ + const OSMetaClass * const className ## ::metaClass = \ + & ## className ## ::gMetaClass; \ + const OSMetaClass * const className ## ::superClass = \ + & ## superClassName ## ::gMetaClass; \ + /* Class member functions */ \ + className ## :: ## className(const OSMetaClass *meta) \ + : superClassName ## (meta) { } \ + className ## ::~ ## className() { } \ + const OSMetaClass * className ## ::getMetaClass() const \ + { return &gMetaClass; } \ + /* The ::MetaClass constructor */ \ + className ## ::MetaClass::MetaClass() \ + : OSMetaClass(#className, className::superClass, sizeof(className)) \ + { init; } + +/*! @function OSDefineAbstractStructors + @abstract Basic helper macro for the OSDefineMetaClass for the default and Abstract macros, qv. DO NOT USE. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. */ +#define OSDefineAbstractStructors(className, superClassName) \ + OSObject * ## className ## ::MetaClass::alloc() const { return 0; } + +/*! @function OSDefineDefaultStructors + @abstract Basic helper macro for the OSDefineMetaClass for the default and Abstract macros, qv. DO NOT USE. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. */ +#define OSDefineDefaultStructors(className, superClassName) \ + OSObject * ## className ## ::MetaClass::alloc() const \ + { return new className; } \ + className ## :: ## className () : superClassName ## (&gMetaClass) \ + { gMetaClass.instanceConstructed(); } + + +/*! @function OSDefineMetaClassAndAbstractStructorsWithInit + @abstract Primary definition macro for all abstract classes that a subclasses of OSObject. + @discussion Define an OSMetaClass subclass and the primary constructors and destructors for a subclass of OSObject that is an abstract class. In general this 'function' is 'called' at the top of the file just before the first function is implemented for a particular class. Once the OSMetaClass has been constructed, at load time, call the init routine. NB you can not rely on the order of execution of the init routines. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. + @param init Name of a function to call after the OSMetaClass is constructed. */ +#define OSDefineMetaClassAndAbstractStructorsWithInit(className, superClassName, init) \ + OSDefineMetaClassWithInit(className, superClassName, init) \ + OSDefineAbstractStructors(className, superClassName) + +/*! @function OSDefineMetaClassAndStructorsWithInit + @abstract See OSDefineMetaClassAndStructors + @discussion Define an OSMetaClass subclass and the primary constructors and destructors for a subclass of OSObject that isn't an abstract class. In general this 'function' is 'called' at the top of the file just before the first function is implemented for a particular class. Once the OSMetaClass has been constructed, at load time, call the init routine. NB you can not rely on the order of execution of the init routines. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. + @param init Name of a function to call after the OSMetaClass is constructed. */ +#define OSDefineMetaClassAndStructorsWithInit(className, superClassName, init) \ + OSDefineMetaClassWithInit(className, superClassName, init) \ + OSDefineDefaultStructors(className, superClassName) + +/* Helpers */ +/*! @function OSDefineMetaClass + @abstract Define an OSMetaClass instance, used for backward compatiblility only. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. */ +#define OSDefineMetaClass(className, superClassName) \ + OSDefineMetaClassWithInit(className, superClassName, ) + +/*! @function OSDefineMetaClassAndStructors + @abstract Define an OSMetaClass subclass and the runtime system routines. + @discussion Define an OSMetaClass subclass and the primary constructors and destructors for a subclass of OSObject that isn't an abstract class. In general this 'function' is 'called' at the top of the file just before the first function is implemented for a particular class. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. */ +#define OSDefineMetaClassAndStructors(className, superClassName) \ + OSDefineMetaClassAndStructorsWithInit(className, superClassName, ) + +/*! @function OSDefineMetaClassAndAbstractStructors + @abstract Define an OSMetaClass subclass and the runtime system routines. + @discussion Define an OSMetaClass subclass and the primary constructors and destructors for a subclass of OSObject that is an abstract class. In general this 'function' is 'called' at the top of the file just before the first function is implemented for a particular class. + @param className Name of class. NO QUOTES and NO MACROS. + @param superClassName Name of super class. NO QUOTES and NO MACROS. */ +#define OSDefineMetaClassAndAbstractStructors(className, superClassName) \ + OSDefineMetaClassAndAbstractStructorsWithInit (className, superClassName, ) + + // Dynamic vtable patchup support routines and types + void reservedCalled(int ind) const; + +#define OSMetaClassDeclareReservedUnused(classname, index) \ + private: \ + virtual void _RESERVED ## classname ## index () + +#define OSMetaClassDeclareReservedUsed(classname, index) + +#define OSMetaClassDefineReservedUnused(classname, index) \ +void classname ## ::_RESERVED ## classname ## index () \ + { gMetaClass.reservedCalled(index); } + +#define OSMetaClassDefineReservedUsed(classname, index) + + // IOKit debug internal routines. + static void printInstanceCounts(); + static OSDictionary *getClassDictionary(); + virtual bool serialize(OSSerialize *s) const; + + // Virtual Padding functions for MetaClass's + OSMetaClassDeclareReservedUnused(OSMetaClass, 0); + OSMetaClassDeclareReservedUnused(OSMetaClass, 1); + OSMetaClassDeclareReservedUnused(OSMetaClass, 2); + OSMetaClassDeclareReservedUnused(OSMetaClass, 3); + OSMetaClassDeclareReservedUnused(OSMetaClass, 4); + OSMetaClassDeclareReservedUnused(OSMetaClass, 5); + OSMetaClassDeclareReservedUnused(OSMetaClass, 6); + OSMetaClassDeclareReservedUnused(OSMetaClass, 7); +}; + +#endif /* !_LIBKERN_OSMETACLASS_H */ diff --git a/libkern/libkern/c++/OSNumber.h b/libkern/libkern/c++/OSNumber.h new file mode 100644 index 000000000..aa32d0fcb --- /dev/null +++ b/libkern/libkern/c++/OSNumber.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOOffset.h created by rsulack on Wed 17-Sep-1997 */ +/* IOOffset.h converted to C++ by gvdl on Fri 1998-10-30 */ + +#ifndef _OS_OSNUMBER_H +#define _OS_OSNUMBER_H + +#include + +/*! + @class OSNumber + @abstract A container class for numeric values. +*/ +class OSNumber : public OSObject +{ + OSDeclareDefaultStructors(OSNumber) + +protected: + unsigned long long value; + unsigned int size; + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +public: + /*! + @function withNumber + @abstract A static constructor function to create and initialize an instance of OSNumber with a given value. + @param value The numeric integer value. + @param numberOfBits The number of bit required to represent the value. + @result Returns an instance of OSNumber or 0 if an error occurred. + */ + static OSNumber *withNumber(unsigned long long value, + unsigned int numberOfBits); + /*! + @function withNumber + @abstract A static constructor function to create and initialize an instance of OSNumber with a given value represented as a simple c-string. + @param value A c-string representing a numeric value. + @param numberOfBits The number of bit required to represent the value. + @result Returns an instance of OSNumber or 0 if an error occurred. + */ + static OSNumber *withNumber(const char *value, unsigned int numberOfBits); + + /*! + @function init + @abstract A member function to initialize an instance of OSNumber. + @param value An integer value. + @param numberOfBits The number of bit required to represent the value. + @result Returns true if instance was successfully initialized, false otherwise. + */ + virtual bool init(unsigned long long value, unsigned int numberOfBits); + /*! + @function init + @abstract A member function to initialize an instance of OSNumber. + @param value A c-string representation of a numeric value. + @param numberOfBits The number of bit required to represent the value. + @result Returns true if instance was successfully initialized, false otherwise. + */ + virtual bool init(const char *value, unsigned int numberOfBits); + /*! + @function free + @abstract Releases and deallocates resources created by the OSNumber instances. + @discussion This function should not be called directly, use release() instead. + */ + virtual void free(); + + /*! + @function numberOfBits + @abstract A member function which returns the number of bits used to represent the value. + @result Returns the number of bits required to represent the value. + */ + virtual unsigned int numberOfBits() const; + /*! + @function numberOfBytes + @abstract A member function which returns the number of bytes used to represent the value. + @result Returns the number of bytes required to represent the value. + */ + virtual unsigned int numberOfBytes() const; + + /*! + @function unsigned8BitValue + @abstract A member function which returns its internal value as an 8-bit value. + @result Returns the internal value as an 8-bit value. + */ + virtual unsigned char unsigned8BitValue() const; + /*! + @function unsigned16BitValue + @abstract A member function which returns its internal value as an 16-bit value. + @result Returns the internal value as an 16-bit value. + */ + virtual unsigned short unsigned16BitValue() const; + /*! + @function unsigned32BitValue + @abstract A member function which returns its internal value as an 32-bit value. + @result Returns the internal value as an 32-bit value. + */ + virtual unsigned int unsigned32BitValue() const; + /*! + @function unsigned64BitValue + @abstract A member function which returns its internal value as an 64-bit value. + @result Returns the internal value as an 64-bit value. + */ + virtual unsigned long long unsigned64BitValue() const; + + /*! + @function addValue + @abstract A member function which adds an integer value to the internal numeric value of the OSNumber object. + @param value The value to be added. + */ + virtual void addValue(signed long long value); + /*! + @function setValue + @abstract Replaces the current internal numeric value of the OSNumber object by the value given. + @param value The new value for the OSNumber object. + */ + virtual void setValue(unsigned long long value); + + /*! + @function isEqualTo + @abstract A member function to test the equality of two OSNumber objects. + @param integer The OSNumber object to be compared against the receiver. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSNumber *integer) const; + /*! + @function isEqualTo + @abstract A member function to test the equality of an arbitrary OSObject derived object and an OSNumber object. + @param obj The OSObject derived object to be compared to the receiver. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSMetaClassBase *obj) const; + + /*! + @function serialize + @abstract A member function which archives the receiver. + @param s The OSSerialize object. + @result Returns true if serialization was successful, false if not. + */ + virtual bool serialize(OSSerialize *s) const; + + + OSMetaClassDeclareReservedUnused(OSNumber, 0); + OSMetaClassDeclareReservedUnused(OSNumber, 1); + OSMetaClassDeclareReservedUnused(OSNumber, 2); + OSMetaClassDeclareReservedUnused(OSNumber, 3); + OSMetaClassDeclareReservedUnused(OSNumber, 4); + OSMetaClassDeclareReservedUnused(OSNumber, 5); + OSMetaClassDeclareReservedUnused(OSNumber, 6); + OSMetaClassDeclareReservedUnused(OSNumber, 7); +}; + +#endif /* !_OS_OSNUMBER_H */ diff --git a/libkern/libkern/c++/OSObject.h b/libkern/libkern/c++/OSObject.h new file mode 100644 index 000000000..a7e0505a9 --- /dev/null +++ b/libkern/libkern/c++/OSObject.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* +Copyright (c) 1998 Apple Computer, Inc. All rights reserved. +HISTORY + 1998-10-30 Godfrey van der Linden(gvdl) + Created +*/ +#ifndef _LIBKERN_OSOBJECT_H +#define _LIBKERN_OSOBJECT_H + +#include + +class OSSymbol; +class OSString; +/*! + @class OSObject : OSMetaClassBase + @abstract The root base class for Mac OS X kernel and just generally all-round useful class to have around. + @discussion +Defines the minimum functionality that an object can expect. Implements reference counting, type safe object casting, allocation primitives & serialisation among other functionality. This object is an abstract base class and can not be copied, nor can it be constructed by itself. + +

Construction

+ +As Mac OS X's C++ is based upon Embedded C++ we have a problem with the typical C++ method of using constructors. Embedded C++ does not allow exceptions. This means that the standard constructors can not report a failure. Well obviously initialisation of a new object can fail so we have had to work around this language limitation. In the Mac OS X kernel we have chosen to break object construction into two phases. Phase one is the familiar C++ new operator, the only initialisation is the object has exactly one reference after creation. Once the new is called the client MUST call init and check it's return value. If the init call fails then the object MUST be immediately released. IOKit usually implements factory methods to make construction a one step process for clients. + +

Reference Counting

+ +OSObject provides reference counting services using the $link retain(), $link release(), $link release(int when) and $link free() functions. The public interface to the reference counting is retain() & release(). release() is implemented as a simple call to release(1). The actual implementation of release(when) is a little subtle. If the current reference count is less than or equal to the 'when' parameter the object will call free on itself. +
+In general a subclass is expected to only override $link free(). It may also choose to override release() if the object has a circular retain count, see $link release(int when); + +

Runtime Type Information System

+ +The Mac OS X C++ implements a basic runtime type information system using meta class information and a number of macros, $link OSDynamicCast, $link OSTypeID, $link OSTypeIDInst, $link OSCheckTypeInst and $link OSMetaClass. +*/ +class OSObject : public OSMetaClassBase +{ + OSDeclareAbstractStructors(OSObject) + +private: +/*! @var retainCount Number of references held on this instance. */ + mutable int retainCount; + +protected: + +/*! @function release + @abstract Primary implementation of the release mechanism. + @discussion If $link retainCount <= the when argument then call $link free(). This indirect implementation of $link release allows the developer to break reference circularity. An example of this sort of problem is a parent/child mutual reference, either the parent or child can implement: void release() { release(2); } thus breaking the cirularity. + @param when When retainCount == when then call free(). */ + virtual void release(int when) const; + +/*! @function init + @abstract Mac OS X kernel's primary mechanism for constructing objects. + @discussion Your responsibility as a subclass author is to override the init method of your parent. In general most of our implementations call ::init() before doing local initialisation, if the parent fails then return false immediately. If you have a failure during you local initialisation then return false. + @result OSObject::init Always returns true, but subclasses will return false on init failure. +*/ + virtual bool init(); + +/*! @function free + @abstract The last reference is gone so clean up your resources. + @discussion Release all resources held by the object, then call your parent's free(). + +

Caution: +
1> You can not assume that you have completed initialization before your free is called, so be very careful in your implementation. +
2> The implementation is OSObject::free() { delete this; } so do not call super::free() until just before you return. +
3> Free is not allowed to fail all resource must be released on completion. */ + virtual void free(); + +/*! @function operator delete + @abstract Release the 'operator new'ed memory. + @discussion Never attempt to delete an object that inherits from OSObject directly use $link release(). + @param mem pointer to block of memory + @param size size of block of memory +*/ + static void operator delete(void *mem, size_t size); + +public: + +/*! @function operator new + @abstract Allocator for all objects that inherit from OSObject + @param size number of bytes to allocate + @result returns pointer to block of memory if available, 0 otherwise. +*/ + static void *operator new(size_t size); + +/*! @function getRetainCount + @abstract How many times has this object been retained? + @result Current retain count +*/ + virtual int getRetainCount() const; + +/*! @function retain + @abstract Retain a reference in this object. +*/ + virtual void retain() const; + +/*! @function release + @abstract Release a reference to this object +*/ + virtual void release() const; + +/*! @function serialize + @abstract + @discussion + @param s + @result +*/ + virtual bool serialize(OSSerialize *s) const; + + // Unused Padding + OSMetaClassDeclareReservedUnused(OSObject, 0); + OSMetaClassDeclareReservedUnused(OSObject, 1); + OSMetaClassDeclareReservedUnused(OSObject, 2); + OSMetaClassDeclareReservedUnused(OSObject, 3); + OSMetaClassDeclareReservedUnused(OSObject, 4); + OSMetaClassDeclareReservedUnused(OSObject, 5); + OSMetaClassDeclareReservedUnused(OSObject, 6); + OSMetaClassDeclareReservedUnused(OSObject, 7); + OSMetaClassDeclareReservedUnused(OSObject, 8); + OSMetaClassDeclareReservedUnused(OSObject, 9); + OSMetaClassDeclareReservedUnused(OSObject, 10); + OSMetaClassDeclareReservedUnused(OSObject, 11); + OSMetaClassDeclareReservedUnused(OSObject, 12); + OSMetaClassDeclareReservedUnused(OSObject, 13); + OSMetaClassDeclareReservedUnused(OSObject, 14); + OSMetaClassDeclareReservedUnused(OSObject, 15); + OSMetaClassDeclareReservedUnused(OSObject, 16); + OSMetaClassDeclareReservedUnused(OSObject, 17); + OSMetaClassDeclareReservedUnused(OSObject, 18); + OSMetaClassDeclareReservedUnused(OSObject, 19); + OSMetaClassDeclareReservedUnused(OSObject, 20); + OSMetaClassDeclareReservedUnused(OSObject, 21); + OSMetaClassDeclareReservedUnused(OSObject, 22); + OSMetaClassDeclareReservedUnused(OSObject, 23); + OSMetaClassDeclareReservedUnused(OSObject, 24); + OSMetaClassDeclareReservedUnused(OSObject, 25); + OSMetaClassDeclareReservedUnused(OSObject, 26); + OSMetaClassDeclareReservedUnused(OSObject, 27); + OSMetaClassDeclareReservedUnused(OSObject, 28); + OSMetaClassDeclareReservedUnused(OSObject, 29); + OSMetaClassDeclareReservedUnused(OSObject, 30); + OSMetaClassDeclareReservedUnused(OSObject, 31); +}; + +#endif /* !_LIBKERN_OSOBJECT_H */ diff --git a/libkern/libkern/c++/OSOrderedSet.h b/libkern/libkern/c++/OSOrderedSet.h new file mode 100644 index 000000000..4357aee37 --- /dev/null +++ b/libkern/libkern/c++/OSOrderedSet.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OS_OSORDEREDSET_H +#define _OS_OSORDEREDSET_H + +#include +#include + +class OSOffset; + +/*! + @class OSOrderedSet + @abstract A collection class for maintaining and sorting a set of OSMetaClassBase derived objects. + @discussion + An instance of OSOrderedSet maintains and sorts a collection of OSMetaClassBase derived objects. The sorting algorithm is supplied to the instance via the OSOrderFunction. +*/ +class OSOrderedSet : public OSCollection +{ + OSDeclareDefaultStructors(OSOrderedSet) + +public: + /*! + @typedef OSOrderFunction + @abstract The sorting function used by the collection to order objects. + @param obj1 An object from the collection. + @param obj2 An object to be compared to obj1. + @param ref The ordering context used by the sorting function as a hint for sorting. + @result Returns a comparison result of the object, a negative value if obj1 < obj2, 0 if obj1 == obj2, and a positive value if obj1 > obj2. + */ + typedef SInt32 (*OSOrderFunction)(const OSMetaClassBase * obj1, + const OSMetaClassBase * obj2, + void * ref ); + +protected: + struct _Element * array; + OSOrderFunction ordering; + void * orderingRef; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +protected: + /* + * OSCollectionIterator interfaces. + */ + virtual unsigned int iteratorSize() const; + virtual bool initIterator(void *iterator) const; + virtual bool getNextObjectForIterator(void *iterator, OSObject **ret) const; + +public: + + /*! + @function withCapacity + @abstract A static constructor function for creating and initializing an instance of OSOrderedSet. + @param capacity The initial storage size in number of objects of the set. + @param orderFunc A c-style function which implements the sorting algorithm for the set. + @param orderingRef A ordering context used as a hint for ordering objects within the set. + @result Returns an instance of OSSet, or 0 if a failure occurred. + */ + static OSOrderedSet *withCapacity(unsigned int capacity, + OSOrderFunction orderFunc = 0, + void * orderingRef = 0); + + /*! + @function initWithCapacity + @abstract A member function for initializing an instance of OSOrderedSet. + @param capacity The initial storage size in number of objects of the set. + @param orderFunc A c-style function which implements the sorting algorithm for the set. + @param orderingRef A ordering context used as a hint for ordering objects within the set. + @result Returns true if initialization was successful, or false if a failure occurred. + */ + virtual bool initWithCapacity(unsigned int capacity, + OSOrderFunction orderFunc = 0, + void * orderingRef = 0); + /*! + @function free + @abstract A member function to release and deallocate any resources used by the instance of OSOrderedSet. + */ + virtual void free(); + + /*! + @function getCount + @abstract A member function to return the number of objects within the collection. + @result Returns the number of items in the set. + */ + virtual unsigned int getCount() const; + /*! + @function getCapacity + @abstract A member function to return the storage capacity of the collection. + @result Returns the total storage capacity of the set. + */ + virtual unsigned int getCapacity() const; + /*! + @function getCapacityIncrement + @abstract A member function to get the size by which the collection will grow. + @result Returns the current growth size. + */ + virtual unsigned int getCapacityIncrement() const; + /*! + @function setCapacityIncrement + @abstract A member function to set the size by which the collection will grow. + @param increment The new growth factor for the set. + @result Returns new growth size. + */ + virtual unsigned int setCapacityIncrement(unsigned increment); + + /*! + @function ensureCapacity + @abstract A member function to expand the size of the collection. + @param newCapacity The new size capacity for the collection. + @result Returns new capacity of the set when successful or the old capacity on failure. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity); + + /*! + @function flushCollection + @abstract A member function to remove and release all items in the set. + */ + virtual void flushCollection(); + + /*! + @function setObject + @abstract A member function to place an OSMetaClassBase derived object into the set. The object will be automatically sorted in the set. + @param anObject The object to be placed into the collection. + @result Returns true if object was successfully added to the collection, false otherwise. + */ + virtual bool setObject(const OSMetaClassBase *anObject); + /*! + @function setFirstObject + @abstract A member function to place an OSMetaClassBase derived object order it first in the set. + @param anObject The object to be placed into the collection. + @result Returns true if object was successfully added to the collection, false otherwise. + */ + virtual bool setFirstObject(const OSMetaClassBase *anObject); + /*! + @function setLastObject + @abstract A member function to place an OSMetaClassBase derived object order it last in the set. + @param anObject The object to be placed into the collection. + @result Returns true if object was successfully added to the collection, false otherwise. + */ + virtual bool setLastObject(const OSMetaClassBase *anObject); + + /*! + @function removeObject + @abstract A member function to remove and release an object in the set. + @param anObject The object to remove from the set. + */ + virtual void removeObject(const OSMetaClassBase *anObject); + + /*! + @function containsObject + @abstract A member function to query the set for the presence of a particular object. + @param anObject The object to be located. + @result Returns true if the object is present in the set, false otherwise. + */ + virtual bool containsObject(const OSMetaClassBase *anObject) const; + /*! + @function member + @abstract A member function to query the set for the presence of a particular object. + @param anObject The object to be located. + @result Returns true if the object is present in the set, false otherwise. + */ + virtual bool member(const OSMetaClassBase *anObject) const; + + /*! + @function getFirstObject + @abstract A member function to return the first object in the set. + @result Returns the object ordered first in the set or 0 if none exist. + */ + virtual OSObject *getFirstObject() const; + /*! + @function getLastObject + @abstract A member function to return the last object in the set. + @result Returns the object ordered last in the set or 0 if none exist. + */ + virtual OSObject *getLastObject() const; + + /*! + @function orderObject + @abstract A member function to return the ordering value of an object. + @param anObject The object to be queried. + @result Returns the ordering value for an object. + */ + virtual SInt32 orderObject( const OSMetaClassBase * anObject ); + + /*! + @function setObject + @abstract A member function to place an object into the set at a particular index. + @param index The index in the set to place the object. + @param anObject The object to be placed into the set. + @result Returns true if the object was successfully placed into the collection, false otherwise. + */ + virtual bool setObject(unsigned int index, const OSMetaClassBase *anObject); + /*! + @function getObject + @abstract A member function to return a reference to an object at a particular index. + @param index The index into the set. + @result Returns a reference to the object at the given index, 0 if none exist at that location. + */ + virtual OSObject *getObject( unsigned int index) const; + /*! + @function getOrderingRef + @abstract A member function to return a the ordering context. + @result Returns the ordering context, or NULL if none exist. + */ + virtual void *getOrderingRef(); + + /*! + @function isEqualTo + @abstract A member function to test the equality between an OSOrderedSet object and the receiver. + @param anOrderedSet The OSOrderedSet object to be compared against the receiver. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSOrderedSet *anOrderedSet) const; + /*! + @function isEqualTo + @abstract A member function to test the equality between an arbitrary OSMetaClassBase derived object and the receiver. + @param anObject The OSMetaClassBase derived object to be compared against the receiver. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSMetaClassBase *anObject) const; + + + OSMetaClassDeclareReservedUnused(OSOrderedSet, 0); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 1); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 2); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 3); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 4); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 5); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 6); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 7); +}; + +#endif /* ! _OS_OSORDEREDSET_H */ diff --git a/libkern/libkern/c++/OSSerialize.h b/libkern/libkern/c++/OSSerialize.h new file mode 100644 index 000000000..b27811dce --- /dev/null +++ b/libkern/libkern/c++/OSSerialize.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSSerialize.h created by rsulack on Wen 25-Nov-1998 */ + +#ifndef _OS_OSSERIALIZE_H +#define _OS_OSSERIALIZE_H + +#include + +class OSSet; +class OSDictionary; + +/*! + @class OSSerialize + @abstract A class used by the OS Container classes to serialize their instance data. + @discussion This class is for the most part internal to the OS Container classes and should not be used directly. Each class inherits a serialize() method from OSObject which is used to actually serialize an object. +*/ + +class OSSerialize : public OSObject +{ + OSDeclareDefaultStructors(OSSerialize) + +protected: + char *data; // container for serialized data + unsigned int length; // of serialized data (counting NULL) + unsigned int capacity; // of container + unsigned int capacityIncrement; // of container + + unsigned int tag; + OSDictionary *tags; // tags for all objects seen + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + +public: + static OSSerialize *withCapacity(unsigned int capacity); + + virtual char *text() const; + + virtual void clearText(); // using this can be a great speedup + // if you are serializing the same object + // over and over again + + // stuff to serialize your object + virtual bool previouslySerialized(const OSMetaClassBase *); + + virtual bool addXMLStartTag(const OSMetaClassBase *o, const char *tagString); + virtual bool addXMLEndTag(const char *tagString); + + virtual bool addChar(const char); + virtual bool addString(const char *); + + // stuff you should never have to use (in theory) + + virtual bool initWithCapacity(unsigned int inCapacity); + virtual unsigned int getLength() const; + virtual unsigned int getCapacity() const; + virtual unsigned int getCapacityIncrement() const; + virtual unsigned int setCapacityIncrement(unsigned increment); + virtual unsigned int ensureCapacity(unsigned int newCapacity); + virtual void free(); + + OSMetaClassDeclareReservedUnused(OSSerialize, 0); + OSMetaClassDeclareReservedUnused(OSSerialize, 1); + OSMetaClassDeclareReservedUnused(OSSerialize, 2); + OSMetaClassDeclareReservedUnused(OSSerialize, 3); + OSMetaClassDeclareReservedUnused(OSSerialize, 4); + OSMetaClassDeclareReservedUnused(OSSerialize, 5); + OSMetaClassDeclareReservedUnused(OSSerialize, 6); + OSMetaClassDeclareReservedUnused(OSSerialize, 7); +}; + +typedef bool (*OSSerializerCallback)(void * target, void * ref, + OSSerialize * s); + +class OSSerializer : public OSObject +{ + OSDeclareDefaultStructors(OSSerializer) + + void * target; + void * ref; + OSSerializerCallback callback; + +public: + + static OSSerializer * forTarget(void * target, + OSSerializerCallback callback, void * ref = 0); + + virtual bool serialize(OSSerialize * s) const; +}; + +#endif /* _OS_OSSERIALIZE_H */ diff --git a/libkern/libkern/c++/OSSet.h b/libkern/libkern/c++/OSSet.h new file mode 100644 index 000000000..ee2467122 --- /dev/null +++ b/libkern/libkern/c++/OSSet.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOSet.h created by rsulack on Thu 11-Jun-1998 */ +/* IOSet.h converted to C++ by gvdl on Fri 1998-10-30 */ + +#ifndef _OS_OSSET_H +#define _OS_OSSET_H + +#include + +class OSArray; + +/*! + @class OSSet + @abstract A collection class for storing OSMetaClassBase derived objects. + @discussion + Instances of OSSet store unique OSMetaClassBase derived objects in a non-ordered manner. +*/ +class OSSet : public OSCollection +{ + OSDeclareDefaultStructors(OSSet) + +private: + OSArray *members; + +protected: + /* + * OSCollectionIterator interfaces. + */ + virtual unsigned int iteratorSize() const; + virtual bool initIterator(void *iterator) const; + virtual bool getNextObjectForIterator(void *iterator, OSObject **ret) const; + + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +public: + /*! + @function withCapacity + @abstract A static constructor function to create and initialize an instance of OSSet with a given capacity. + @param capacity The initial capacity of the collection. The capacity is the total number of objects that can be stored in the collection. + @result Returns an instance of OSSet or 0 on failure. + */ + static OSSet *withCapacity(unsigned int capacity); + /*! + @function withObjects + @abstract A static constructor function to create and initialize an instance of OSSet and populate it with the objects provided. + @param objects A static array of OSMetaClassBase derived objects which are used to populate the collection. + @param count The number of objects passed to the collection. + @param capacity The initial storage size of the collection. The capacity is the total number of objects that can be stored in the collection. This value must be equal to or larger than the count parameter. + @result Returns an instance of OSSet or 0 on failure. + */ + static OSSet *withObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function withArray + @abstract A static constructor function to create and initialize an instance of OSSet and populate it with the objects from an OSSArray object. + @param array An OSArray object containing a list of OSMetaClassBase derived objects which are used to initially populate the OSSet object. + @param capacity The initial storage size of the collection. This value must be equal to or larger than the number of objects provided by the OSArray object passed as the first parameter. + @result Returns an instance of OSSet or 0 on failure. + */ + static OSSet *withArray(const OSArray *array, + unsigned int capacity = 0); + /*! + @function withSet + @abstract A static constructor function to create an instance of OSSet and populate it with the objects from another OSSet object. + @param array An OSSet object containing OSMetaClassBase derived objects which are used to initially populate the new OSSet object. + @param capacity The initial storage size of the collection. This value must be equal to or larger than the number of objects provided by the OSSet object passed as the first parameter. + @result Returns an instance of OSSet or 0 on failure. + */ + static OSSet *withSet(const OSSet *set, + unsigned int capacity = 0); + + /*! + @function initWithCapacity + @abstract A member function to initialize an instance of OSSet with a given capacity. + @param capacity The initial storage size of the collection. + @result Returns true if initialization successful or false on failure. + */ + virtual bool initWithCapacity(unsigned int capacity); + /*! + @function initWithObjects + @abstract A member function to initialize an instance of OSSet with a given capacity and populate the collection with the objects provided. + @param object A static array containing OSMetaClassBase derived objects used to populate the collection. + @param count The number of objects provided. + @param capacity The initial storage size of the collection. This value must be equal to or larger than the 'count' parameter. + @result Returns true if initialization successful or false on failure. + */ + virtual bool initWithObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity = 0); + /*! + @function initWithArray + @abstract A member function to initialize a new instance of OSSet and populate it with the contents of the OSArray object provided. + @param array The OSArray object containing OSMetaClassBase derived objects used to populate the new OSSet object. + @param capacity The initial storage capacity of the object. This value must be equal to or larger than the number of objects provided by the OSArray object passed as the first parameter. + @result Returns true if initialization successful or false on failure. + */ + virtual bool initWithArray(const OSArray *array, + unsigned int capacity = 0); + /*! + @function initWithSet + @abstract A member function to initialize a new instance of OSSet and populate it with the contents of the OSSet object provided. + @param array The OSSet object containing OSMetaClassBase derived objects used to populate the new OSSet object. + @param capacity The initial storage capacity of the object. This value must be equal to or larger than the number of objects provided by the OSSet object passed as the first parameter. + @result Returns true if initialization successful or false on failure. + @discussion This function should not be called directly, use release() instead. + */ + virtual bool initWithSet(const OSSet *set, + unsigned int capacity = 0); + /*! + @function free + @abstract A member function to release all resources created or used by the OSArray instance. + */ + virtual void free(); + + /*! + @function getCount + @abstract A member function which returns the number of objects current in the collection. + @result Returns the number of objects in the collection. + */ + virtual unsigned int getCount() const; + /*! + @function getCapacity + @abstract A member function which returns the storage capacity of the collection. + @result Returns the storage size of the collection. + */ + virtual unsigned int getCapacity() const; + /*! + @function getCapacityIncrement + @abstract A member function which returns the growth factor of the collection. + @result Returns the size by which the collection will grow. + */ + virtual unsigned int getCapacityIncrement() const; + /*! + @function setCapacityIncrement + @abstract A member function which sets the growth factor of the collection. + @result Returns the new increment. + */ + virtual unsigned int setCapacityIncrement(unsigned increment); + + /*! + @function ensureCapacity + @abstract A member function to grow the size of the collection. + @param newCapacity The new capacity for the collection to expand to. + @result Returns the new capacity of the collection or the previous capacity upon error. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity); + + /*! + @function flushCollection + @abstract A member function which removes and releases all objects within the collection. + */ + virtual void flushCollection(); + + /*! + @function setObject + @abstract A member function to place objects into the collection. + @param anObject The OSMetaClassBase derived object to be placed into the collection. + @result Returns true if the object was successfully placed into the collection, false otherwise. + @discussion The object added to the collection is automatically retained. + */ + virtual bool setObject(const OSMetaClassBase *anObject); + /*! + @function merge + @abstract A member function to merge the contents of an OSArray object with set. + @param array The OSArray object which contains the objects to be merged. + @result Returns true if the contents of the OSArray were successfully merged into the receiver. + */ + virtual bool merge(const OSArray *array); + /*! + @function merge + @abstract A member function to merge the contents of an OSSet object with receiver. + @param set The OSSet object which contains the objects to be merged. + @result Returns true if the contents of the OSSet were successfully merged into the receiver. + */ + virtual bool merge(const OSSet *set); + + /*! + @function removeObject + @abstract A member function to remove objects from the collection. + @param anObject The OSMetaClassBase derived object to be removed from the collection. + @discussion The object removed from the collection is automatically released. + */ + virtual void removeObject(const OSMetaClassBase * anObject); + + /*! + @function containsObject + @abstract A member function to query the collection for the presence of an object. + @param anObject The OSMetaClassBase derived object to be queried for in the collecion. + @result Returns true if the object is present within the set, false otherwise. + */ + virtual bool containsObject(const OSMetaClassBase *anObject) const; + /*! + @function member + @abstract A member function to query the collection for the presence of an object. + @param anObject The OSMetaClassBase derived object to be queried for in the collecion. + @result Returns true if the object is present within the set, false otherwise. + */ + virtual bool member(const OSMetaClassBase *anObject) const; + /*! + @function getAnyObject + @abstract A member function which returns an object from the set. + @result Returns an object if one exists within the set. + */ + virtual OSObject *getAnyObject() const; + + /*! + @function isEqualTo + @abstract A member function to test the equality between the receiver and an OSSet object. + @param aSet An OSSet object to be compared against the receiver. + @result Returns true if the objects are equivalent. + */ + virtual bool isEqualTo(const OSSet *aSet) const; + /*! + @function isEqualTo + @abstract A member function to test the equality between the receiver and an unknown object. + @param anObject An object to be compared against the receiver. + @result Returns true if the objects are equal. + */ + virtual bool isEqualTo(const OSMetaClassBase *anObject) const; + + /*! + @function serialize + @abstract A member function which archives the receiver. + @param s The OSSerialize object. + @result Returns true if serialization was successful, false if not. + */ + virtual bool serialize(OSSerialize *s) const; + + + OSMetaClassDeclareReservedUnused(OSSet, 0); + OSMetaClassDeclareReservedUnused(OSSet, 1); + OSMetaClassDeclareReservedUnused(OSSet, 2); + OSMetaClassDeclareReservedUnused(OSSet, 3); + OSMetaClassDeclareReservedUnused(OSSet, 4); + OSMetaClassDeclareReservedUnused(OSSet, 5); + OSMetaClassDeclareReservedUnused(OSSet, 6); + OSMetaClassDeclareReservedUnused(OSSet, 7); +}; + +#endif /* !_OS_OSSET_H */ diff --git a/libkern/libkern/c++/OSString.h b/libkern/libkern/c++/OSString.h new file mode 100644 index 000000000..577807a86 --- /dev/null +++ b/libkern/libkern/c++/OSString.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOString.h created by rsulack on Wed 17-Sep-1997 */ +/* IOString.h converted to C++ by gvdl on Fri 1998-10-30 */ + +#ifndef _OS_OSSTRING_H +#define _OS_OSSTRING_H + +#include + +class OSData; + +enum { kOSStringNoCopy = 0x00000001 }; + +/*! + @class OSString + @abstract A container class for managing strings. + @discussion + OSString is a container class for managing arrays of characters. Strings come in two varieties, mutable and immutable. An immutable OSString string is one which was created or initialized with the "NoCopy" functions, all other strings are mutable. When modifying an immutable string, the function called to perform the action will fail. +*/ +class OSString : public OSObject +{ + OSDeclareDefaultStructors(OSString) + +protected: + unsigned int flags; + unsigned int length; + char *string; + +public: + /*! + @function withString + @abstract Static constructor function to create and initialize an instance of OSString from another OSString. + @param aString An OSString object. + @result Returns an instance of OSString or 0 on error. + */ + static OSString *withString(const OSString *aString); + /*! + @function withCString + @abstract Static constructor function to create and initialize an instance of OSString. + @param cString A simple c-string. + @result Returns an instance of OSString or 0 on error. + */ + static OSString *withCString(const char *cString); + /*! + @function withCStringNoCopy + @abstract Static constructor function to create and initialize an instance of OSString but does not copy the original c-string into container. + @param cString A simple c-string. + @result Returns an instance of OSString or 0 on error. + */ + static OSString *withCStringNoCopy(const char *cString); + + /*! + @function initWithString + @abstract Member function to initialize an instance of OSString from another OSString object. + @param aString An OSString object. + @result Returns true on success, false otherwise. + */ + virtual bool initWithString(const OSString *aString); + /*! + @function initWithCString + @abstract Member function to initialize an instance of OSString with a simple c-string. + @param cString A simple c-string. + @result Returns true on success, false otherwise. + */ + virtual bool initWithCString(const char *cString); + /*! + @function initWithCStringNoCopy + @abstract Member function to initialize an instance of OSString with a simple c-string but does not copy the string into the container. + @param cString A simple c-string. + @result Returns true on success, false otherwise. + */ + virtual bool initWithCStringNoCopy(const char *cString); + /*! + @function free + @abstract Releases all resources used by the OSString object. + @discussion This function should not be called directly, use release() instead. + */ + virtual void free(); + + /*! + @function getLength + @abstract A member function to return the length of the string. + @result Returns the length of the string. + */ + virtual unsigned int getLength() const; + /*! + @function getChar + @abstract Returns a character at a particular index in the string object. + @param index The index into the string. + @result Returns a character. + */ + virtual char getChar(unsigned int index) const; + /*! + @function setChar + @abstract Replaces a character at a particular index in the string object. + @param index The index into the string. + @result Returns true if the character was successfully replaced or false if the string is immutable or index was beyond the bounds of the character array. + */ + virtual bool setChar(char aChar, unsigned int index); + + /*! + @function getCStringNoCopy + @abstract Returns a pointer to the internal c-string array. + @result Returns a pointer to the internal c-string array. + */ + virtual const char *getCStringNoCopy() const; + + /*! + @function isEqualTo + @abstract A member function to test the equality of two OSString objects. + @param aString An OSString object. + @result Returns true if the two strings are equal, false otherwise. + */ + virtual bool isEqualTo(const OSString *aString) const; + /*! + @function isEqualTo + @abstract A member function to test the equality of c-string and the internal string array of the receiving OSString object. + @param aCString A simple c-string. + @result Returns true if the two strings are equal, false otherwise. + */ + virtual bool isEqualTo(const char *aCString) const; + /*! + @function isEqualTo + @abstract A member function to test the equality of an unknown OSObject derived object and the OSString instance. + @param obj An OSObject derived object. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSMetaClassBase *obj) const; + /*! + @function isEqualTo + @abstract A member function to test the equality of an unknown OSData object and the OSString instance. + @param obj An OSData object. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSData *obj) const; + + /*! + @function serialize + @abstract A member function which archives the receiver. + @param s The OSSerialize object. + @result Returns true if serialization was successful, false if not. + */ + virtual bool serialize(OSSerialize *s) const; + + OSMetaClassDeclareReservedUnused(OSString, 0); + OSMetaClassDeclareReservedUnused(OSString, 1); + OSMetaClassDeclareReservedUnused(OSString, 2); + OSMetaClassDeclareReservedUnused(OSString, 3); + OSMetaClassDeclareReservedUnused(OSString, 4); + OSMetaClassDeclareReservedUnused(OSString, 5); + OSMetaClassDeclareReservedUnused(OSString, 6); + OSMetaClassDeclareReservedUnused(OSString, 7); + OSMetaClassDeclareReservedUnused(OSString, 8); + OSMetaClassDeclareReservedUnused(OSString, 9); + OSMetaClassDeclareReservedUnused(OSString, 10); + OSMetaClassDeclareReservedUnused(OSString, 11); + OSMetaClassDeclareReservedUnused(OSString, 12); + OSMetaClassDeclareReservedUnused(OSString, 13); + OSMetaClassDeclareReservedUnused(OSString, 14); + OSMetaClassDeclareReservedUnused(OSString, 15); +}; + +#endif /* !_OS_OSSTRING_H */ diff --git a/libkern/libkern/c++/OSSymbol.h b/libkern/libkern/c++/OSSymbol.h new file mode 100644 index 000000000..83143678d --- /dev/null +++ b/libkern/libkern/c++/OSSymbol.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* IOSymbol.h created by gvdl on Fri 1998-10-30 */ +/* IOSymbol must be created through the factory methods and thus is not subclassable. */ + +#ifndef _OS_OSSYMBOL_H +#define _OS_OSSYMBOL_H + +#include + +/*! + @class OSSymbol + @abstract A container class whose instances represent unique string values. + @discussion + An OSSymbol object represents a unique string value. When creating an OSSymbol, a string is given and an OSSymbol representing this string is created if none exist for this string. If a symbol for this string already exists, then a reference to an existing symbol is returned. +*/ +class OSSymbol : public OSString +{ + friend class OSSymbolPool; + + OSDeclareAbstractStructors(OSSymbol) + +private: + struct ExpansionData { }; + + /*! @var reserved + Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + + static void initialize(); + + friend void checkModuleForSymbols(void); /* in catalogue? */ + + // The string init methods have to be removed from the inheritance. + virtual bool initWithString(const OSString *aString); + virtual bool initWithCString(const char *cString); + virtual bool initWithCStringNoCopy(const char *cString); + +protected: + /*! + @function free + @abstract A member function to release all resources created or used by the OSString object. + @discussion This function should not be called directly, use release() instead. + */ + virtual void free(); + +public: + /*! + @function withString + @abstract A static constructor function to create an OSSymbol instance from an OSString object or returns an existing OSSymbol object based on the OSString object given. + @param aString An OSString object. + @result Returns a unique OSSymbol object for the string given. + */ + static const OSSymbol *withString(const OSString *aString); + /*! + @function withCString + @abstract A static constructor function to create an OSSymbol instance from a simple c-string returns an existing OSSymbol object based on the string object given. + @param cString A c-string. + @result Returns a unique OSSymbol object for the string given. + */ + static const OSSymbol *withCString(const char *cString); + /*! + @function withCStringNoCopy + @abstract A static constructor function to create an OSSymbol instance from a simple c-string, but does not copy the string to the container. + @param cString A c-string. + @result Returns a unique OSSymbol object for the string given. + */ + static const OSSymbol *withCStringNoCopy(const char *cString); + + /*! + @function isEqualTo + @abstract A member function which tests the equality between two OSSymbol objects. Two OSSymbol objects are only equivalent when their references are identical + @param aSymbol The OSSymbol object to be compared against the receiver. + @result Returns true if the two objects are equivalent, false otherwise. + */ + virtual bool isEqualTo(const OSSymbol *aSymbol) const; + /*! + @function isEqualTo + @abstract A member function which tests the equality between an OSSymbol object and a simple c-string. + @param aCString The c-string to be compared against the receiver. + @result Returns true if the OSSymbol's internal string representation is equivalent to the c-string it is being compared against, false otherwise. + */ + virtual bool isEqualTo(const char *aCString) const; + /*! + @function isEqualTo + @abstract A member function which tests the equality between an OSSymbol object and and arbitrary OSObject derived object. + @param obj The OSObject derived object to be compared against the receiver. + @result Returns true if the OSSymbol and the OSObject objects are equivalent. + */ + virtual bool isEqualTo(const OSMetaClassBase *obj) const; + + /* OSRuntime only INTERNAL API - DO NOT USE */ + static void checkForPageUnload(void *startAddr, void *endAddr); + + + OSMetaClassDeclareReservedUnused(OSSymbol, 0); + OSMetaClassDeclareReservedUnused(OSSymbol, 1); + OSMetaClassDeclareReservedUnused(OSSymbol, 2); + OSMetaClassDeclareReservedUnused(OSSymbol, 3); + OSMetaClassDeclareReservedUnused(OSSymbol, 4); + OSMetaClassDeclareReservedUnused(OSSymbol, 5); + OSMetaClassDeclareReservedUnused(OSSymbol, 6); + OSMetaClassDeclareReservedUnused(OSSymbol, 7); +}; + +#endif /* !_OS_OSSYMBOL_H */ diff --git a/libkern/libkern/c++/OSUnserialize.h b/libkern/libkern/c++/OSUnserialize.h new file mode 100644 index 000000000..80827e652 --- /dev/null +++ b/libkern/libkern/c++/OSUnserialize.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* OSUnserialize.h created by rsulack on Mon 23-Nov-1998 */ + +#ifndef _OS_OSUNSERIALIZE_H +#define _OS_OSUNSERIALIZE_H + +class OSObject; +class OSString; + +/*! @function OSUnserializeXML + @abstract Recreates an OS Container object from its previously serialized OS Container class instance data. + @param buffer pointer to buffer containing XML data representing the object to be recreated. + @param errorString if this is a valid pointer and the XML parser finds a error in buffer, errorString contains text indicating the line number and type of error encountered. + @result Pointer to the recreated object, or zero on failure. */ + +extern OSObject* OSUnserializeXML(const char *buffer, OSString **errorString = 0); + +/* this should no longer be used */ +extern OSObject* OSUnserialize(const char *buffer, OSString **errorString = 0); + +#endif /* _OS_OSUNSERIALIZE_H */ diff --git a/libkern/libkern/i386/Makefile b/libkern/libkern/i386/Makefile new file mode 100644 index 000000000..1bdad11a7 --- /dev/null +++ b/libkern/libkern/i386/Makefile @@ -0,0 +1,35 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + OSByteOrder.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = libkern/i386 + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = libkern/i386 + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/libkern/i386/OSByteOrder.h b/libkern/libkern/i386/OSByteOrder.h new file mode 100644 index 000000000..da3d0a459 --- /dev/null +++ b/libkern/libkern/i386/OSByteOrder.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + +#ifndef _OS_OSBYTEORDERI386_H +#define _OS_OSBYTEORDERI386_H + +#include + +/* Functions for byte reversed loads. */ + +OS_INLINE +UInt16 +OSReadSwapInt16( + volatile void * base, + UInt offset +) +{ + UInt16 result; + + result = *(volatile UInt16 *)((UInt8 *)base + offset); + __asm__ volatile("xchgb %b0,%h0" + : "=q" (result) + : "0" (result)); + return result; +} + +OS_INLINE +UInt32 +OSReadSwapInt32( + volatile void * base, + UInt offset +) +{ + UInt32 result; + + result = *(volatile UInt32 *)((UInt8 *)base + offset); + __asm__ volatile("bswap %0" + : "=r" (result) + : "0" (result)); + return result; +} + +OS_INLINE +UInt64 +OSReadSwapInt64( + volatile void * base, + UInt offset +) +{ + UInt64 * inp; + union ullc { + UInt64 ull; + UInt ul[2]; + } outv; + + inp = (UInt64 *)base; + outv.ul[0] = OSReadSwapInt32(inp, offset + 4); + outv.ul[1] = OSReadSwapInt32(inp, offset); + return outv.ull; +} + +OS_INLINE +UInt +OSReadSwapInt( + volatile void * base, + UInt offset +) +{ + UInt result; + + result = *(volatile UInt *)((UInt8 *)base + offset); + __asm__ volatile("bswap %0" + : "=r" (result) + : "0" (result)); + return result; +} + +/* Functions for byte reversed stores. */ + +OS_INLINE +void +OSWriteSwapInt16( + volatile void * base, + UInt offset, + UInt16 data +) +{ + __asm__ volatile("xchgb %b0,%h0" + : "=q" (data) + : "0" (data)); + *(volatile UInt16 *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteSwapInt32( + volatile void * base, + UInt offset, + UInt32 data +) +{ + __asm__ volatile("bswap %0" + : "=r" (data) + : "0" (data)); + *(volatile UInt32 *)((UInt8 *)base + offset) = data; +} + +OS_INLINE +void +OSWriteSwapInt64( + volatile void * base, + UInt offset, + UInt64 data +) +{ + UInt64 * outp; + union ullc { + UInt64 ull; + UInt ul[2]; + } *inp; + + outp = (UInt64 *)base; + inp = (union ullc *)&data; + OSWriteSwapInt32(outp, offset, inp->ul[1]); + OSWriteSwapInt32(outp, offset + 4, inp->ul[0]); +} + +OS_INLINE +void +OSWriteSwapInt( + volatile void * base, + UInt offset, + UInt data +) +{ + __asm__ volatile("bswap %0" + : "=r" (data) + : "0" (data)); + *(volatile UInt *)((UInt8 *)base + offset) = data; +} + +/* Generic byte swapping functions. */ + +OS_INLINE +UInt16 +OSSwapInt16( + UInt16 data +) +{ + UInt16 temp = data; + return OSReadSwapInt16(&temp, 0); +} + +OS_INLINE +UInt32 +OSSwapInt32( + UInt32 data +) +{ + UInt32 temp = data; + return OSReadSwapInt32(&temp, 0); +} + +OS_INLINE +UInt64 +OSSwapInt64( + UInt64 data +) +{ + UInt64 temp = data; + return OSReadSwapInt64(&temp, 0); +} + +OS_INLINE +UInt +OSSwapInt( + UInt data +) +{ + UInt temp = data; + return OSReadSwapInt(&temp, 0); +} + +#endif /* ! _OS_OSBYTEORDERI386_H */ diff --git a/libkern/libkern/machine/Makefile b/libkern/libkern/machine/Makefile new file mode 100644 index 000000000..461b7c379 --- /dev/null +++ b/libkern/libkern/machine/Makefile @@ -0,0 +1,35 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + OSByteOrder.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = libkern/machine + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = libkern/machine + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/libkern/machine/OSByteOrder.h b/libkern/libkern/machine/OSByteOrder.h new file mode 100644 index 000000000..dd4d67331 --- /dev/null +++ b/libkern/libkern/machine/OSByteOrder.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _OS_OSBYTEORDERMACHINE_H +#define _OS_OSBYTEORDERMACHINE_H + +#include + +/* Functions for byte reversed loads. */ + +OS_INLINE +UInt16 +OSReadSwapInt16( + volatile void * base, + UInt offset +) +{ + union sconv { + UInt16 us; + UInt8 uc[2]; + } *inp, outv; + inp = (union sconv *)((UInt8 *)base + offset); + outv.uc[0] = inp->uc[1]; + outv.uc[1] = inp->uc[0]; + return (outv.us); +} + +OS_INLINE +UInt32 +OSReadSwapInt32( + volatile void * base, + UInt offset +) +{ + union lconv { + UInt32 ul; + UInt8 uc[4]; + } *inp, outv; + inp = (union lconv *)((UInt8 *)base + offset); + outv.uc[0] = inp->uc[3]; + outv.uc[1] = inp->uc[2]; + outv.uc[2] = inp->uc[1]; + outv.uc[3] = inp->uc[0]; + return (outv.ul); +} + +OS_INLINE +UInt64 +OSReadSwapInt64( + volatile void * base, + UInt offset +) +{ + union llconv { + UInt64 ull; + UInt8 uc[8]; + } *inp, outv; + inp = (union llconv *)((UInt8 *)base + offset); + outv.uc[0] = inp->uc[7]; + outv.uc[1] = inp->uc[6]; + outv.uc[2] = inp->uc[5]; + outv.uc[3] = inp->uc[4]; + outv.uc[4] = inp->uc[3]; + outv.uc[5] = inp->uc[2]; + outv.uc[6] = inp->uc[1]; + outv.uc[7] = inp->uc[0]; + return (outv.ull); +} + +OS_INLINE +UInt +OSReadSwapInt( + volatile void * base, + UInt offset +) +{ + return (UInt)OSReadSwapInt32(base, offset); +} + +/* Functions for byte reversed stores. */ + +OS_INLINE +void +OSWriteSwapInt16( + volatile void * base, + UInt offset, + UInt16 data +) +{ + union sconv { + UInt16 us; + UInt8 uc[2]; + } *inp, *outp; + inp = (union sconv *)((UInt8 *)base + offset); + outp = (union sconv *)&data; + outp->uc[0] = inp->uc[1]; + outp->uc[1] = inp->uc[0]; +} + +OS_INLINE +void +OSWriteSwapInt32( + volatile void * base, + UInt offset, + UInt32 data +) +{ + union lconv { + UInt32 ul; + UInt8 uc[4]; + } *inp, *outp; + inp = (union lconv *)((UInt8 *)base + offset); + outp = (union lconv *)&data; + outp->uc[0] = inp->uc[3]; + outp->uc[1] = inp->uc[2]; + outp->uc[2] = inp->uc[1]; + outp->uc[3] = inp->uc[0]; +} + +OS_INLINE +void +OSWriteSwapInt64( + volatile void * base, + UInt offset, + UInt64 data +) +{ + union llconv { + UInt64 ull; + UInt8 uc[8]; + } *inp, *outp; + inp = (union llconv *)((UInt8 *)base + offset); + outp = (union llconv *)&data; + outp->uc[0] = inp->uc[7]; + outp->uc[1] = inp->uc[6]; + outp->uc[2] = inp->uc[5]; + outp->uc[3] = inp->uc[4]; + outp->uc[4] = inp->uc[3]; + outp->uc[5] = inp->uc[2]; + outp->uc[6] = inp->uc[1]; + outp->uc[7] = inp->uc[0]; +} + +OS_INLINE +void +OSWriteSwapInt( + volatile void * base, + UInt offset, + UInt data +) +{ + OSWriteSwapInt32(base, offset, (UInt32)data); +} + +/* Generic byte swapping functions. */ + +OS_INLINE +UInt16 +OSSwapInt16( + UInt16 data +) +{ + UInt16 temp = data; + return OSReadSwapInt16(&temp, 0); +} + +OS_INLINE +UInt32 +OSSwapInt32( + UInt32 data +) +{ + UInt32 temp = data; + return OSReadSwapInt32(&temp, 0); +} + +OS_INLINE +UInt64 +OSSwapInt64( + UInt64 data +) +{ + UInt64 temp = data; + return OSReadSwapInt64(&temp, 0); +} + +OS_INLINE +UInt +OSSwapInt( + UInt data +) +{ + UInt temp = data; + return OSReadSwapInt(&temp, 0); +} + +#endif /* ! _OS_OSBYTEORDERMACHINE_H */ diff --git a/libkern/libkern/ppc/Makefile b/libkern/libkern/ppc/Makefile new file mode 100644 index 000000000..723f47dfa --- /dev/null +++ b/libkern/libkern/ppc/Makefile @@ -0,0 +1,35 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + OSByteOrder.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = libkern/ppc + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = libkern/ppc + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libkern/libkern/ppc/OSByteOrder.h b/libkern/libkern/ppc/OSByteOrder.h new file mode 100644 index 000000000..0a7e75a72 --- /dev/null +++ b/libkern/libkern/ppc/OSByteOrder.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + */ + + +#ifndef _OS_OSBYTEORDERPPC_H +#define _OS_OSBYTEORDERPPC_H + +#include + +/* Functions for byte reversed loads. */ + +OS_INLINE +UInt16 +OSReadSwapInt16( + volatile void * base, + UInt offset +) +{ + UInt16 result; + __asm__ volatile("lhbrx %0, %1, %2" + : "=r" (result) + : "b" (base), "r" (offset) + : "memory"); + return result; +} + +OS_INLINE +UInt32 +OSReadSwapInt32( + volatile void * base, + UInt offset +) +{ + UInt32 result; + __asm__ volatile("lwbrx %0, %1, %2" + : "=r" (result) + : "b" (base), "r" (offset) + : "memory"); + return result; +} + +OS_INLINE +UInt64 +OSReadSwapInt64( + volatile void * base, + UInt offset +) +{ + UInt64 * inp; + union ullc { + UInt64 ull; + UInt ul[2]; + } outv; + + inp = (UInt64 *)base; + outv.ul[0] = OSReadSwapInt32(inp, offset + 4); + outv.ul[1] = OSReadSwapInt32(inp, offset); + return outv.ull; +} + +OS_INLINE +UInt +OSReadSwapInt( + volatile void * base, + UInt offset +) +{ + UInt result; + __asm__ volatile("lwbrx %0, %1, %2" + : "=r" (result) + : "b" (base), "r" (offset) + : "memory"); + return result; +} + +/* Functions for byte reversed stores. */ + +OS_INLINE +void +OSWriteSwapInt16( + volatile void * base, + UInt offset, + UInt16 data +) +{ + __asm__ volatile("sthbrx %0, %1, %2" + : + : "r" (data), "b" (base), "r" (offset) + : "memory"); +} + +OS_INLINE +void +OSWriteSwapInt32( + volatile void * base, + UInt offset, + UInt32 data +) +{ + __asm__ volatile("stwbrx %0, %1, %2" + : + : "r" (data), "b" (base), "r" (offset) + : "memory" ); +} + +OS_INLINE +void +OSWriteSwapInt64( + volatile void * base, + UInt offset, + UInt64 data +) +{ + UInt64 * outp; + union ullc { + UInt64 ull; + UInt ul[2]; + } *inp; + + outp = (UInt64 *)base; + inp = (union ullc *)&data; + OSWriteSwapInt32(outp, offset, inp->ul[1]); + OSWriteSwapInt32(outp, offset + 4, inp->ul[0]); +} + +OS_INLINE +void +OSWriteSwapInt( + volatile void * base, + UInt offset, + UInt data +) +{ + __asm__ volatile("stwbrx %0, %1, %2" + : + : "r" (data), "b" (base), "r" (offset) + : "memory" ); +} + +/* Generic byte swapping functions. */ + +OS_INLINE +UInt16 +OSSwapInt16( + UInt16 data +) +{ + UInt16 temp = data; + return OSReadSwapInt16(&temp, 0); +} + +OS_INLINE +UInt32 +OSSwapInt32( + UInt32 data +) +{ + UInt32 temp = data; + return OSReadSwapInt32(&temp, 0); +} + +OS_INLINE +UInt64 +OSSwapInt64( + UInt64 data +) +{ + UInt64 temp = data; + return OSReadSwapInt64(&temp, 0); +} + +OS_INLINE +UInt +OSSwapInt( + UInt data +) +{ + UInt temp = data; + return OSReadSwapInt(&temp, 0); +} + +#endif /* ! _OS_OSBYTEORDERPPC_H */ diff --git a/libkern/mach-o/loader.h b/libkern/mach-o/loader.h new file mode 100644 index 000000000..277b2b1e2 --- /dev/null +++ b/libkern/mach-o/loader.h @@ -0,0 +1,722 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHO_LOADER_H_ +#define _MACHO_LOADER_H_ + +/* + * This file describes the format of mach object files. + */ + +/* + * is needed here for the cpu_type_t and cpu_subtype_t types + * and contains the constants for the possible values of these types. + */ +#include + +/* + * is needed here for the vm_prot_t type and contains the + * constants that are or'ed together for the possible values of this type. + */ +#include + +/* + * is expected to define the flavors of the thread + * states and the structures of those flavors for each machine. + */ +#include + +/* + * The mach header appears at the very beginning of the object file. + */ +struct mach_header { + unsigned long magic; /* mach magic number identifier */ + cpu_type_t cputype; /* cpu specifier */ + cpu_subtype_t cpusubtype; /* machine specifier */ + unsigned long filetype; /* type of file */ + unsigned long ncmds; /* number of load commands */ + unsigned long sizeofcmds; /* the size of all the load commands */ + unsigned long flags; /* flags */ +}; + +/* Constant for the magic field of the mach_header */ +#define MH_MAGIC 0xfeedface /* the mach magic number */ +#define MH_CIGAM NXSwapInt(MH_MAGIC) + +/* + * The layout of the file depends on the filetype. For all but the MH_OBJECT + * file type the segments are padded out and aligned on a segment alignment + * boundary for efficient demand pageing. The MH_EXECUTE, MH_FVMLIB, MH_DYLIB, + * MH_DYLINKER and MH_BUNDLE file types also have the headers included as part + * of their first segment. + * + * The file type MH_OBJECT is a compact format intended as output of the + * assembler and input (and possibly output) of the link editor (the .o + * format). All sections are in one unnamed segment with no segment padding. + * This format is used as an executable format when the file is so small the + * segment padding greatly increases it's size. + * + * The file type MH_PRELOAD is an executable format intended for things that + * not executed under the kernel (proms, stand alones, kernels, etc). The + * format can be executed under the kernel but may demand paged it and not + * preload it before execution. + * + * A core file is in MH_CORE format and can be any in an arbritray legal + * Mach-O file. + * + * Constants for the filetype field of the mach_header + */ +#define MH_OBJECT 0x1 /* relocatable object file */ +#define MH_EXECUTE 0x2 /* demand paged executable file */ +#define MH_FVMLIB 0x3 /* fixed VM shared library file */ +#define MH_CORE 0x4 /* core file */ +#define MH_PRELOAD 0x5 /* preloaded executable file */ +#define MH_DYLIB 0x6 /* dynamicly bound shared library file*/ +#define MH_DYLINKER 0x7 /* dynamic link editor */ +#define MH_BUNDLE 0x8 /* dynamicly bound bundle file */ + +/* Constants for the flags field of the mach_header */ +#define MH_NOUNDEFS 0x1 /* the object file has no undefined + references, can be executed */ +#define MH_INCRLINK 0x2 /* the object file is the output of an + incremental link against a base file + and can't be link edited again */ +#define MH_DYLDLINK 0x4 /* the object file is input for the + dynamic linker and can't be staticly + link edited again */ +#define MH_BINDATLOAD 0x8 /* the object file's undefined + references are bound by the dynamic + linker when loaded. */ +#define MH_PREBOUND 0x10 /* the file has it's dynamic undefined + references prebound. */ + +/* + * The load commands directly follow the mach_header. The total size of all + * of the commands is given by the sizeofcmds field in the mach_header. All + * load commands must have as their first two fields cmd and cmdsize. The cmd + * field is filled in with a constant for that command type. Each command type + * has a structure specifically for it. The cmdsize field is the size in bytes + * of the particular load command structure plus anything that follows it that + * is a part of the load command (i.e. section structures, strings, etc.). To + * advance to the next load command the cmdsize can be added to the offset or + * pointer of the current load command. The cmdsize MUST be a multiple of + * sizeof(long) (this is forever the maximum alignment of any load commands). + * The padded bytes must be zero. All tables in the object file must also + * follow these rules so the file can be memory mapped. Otherwise the pointers + * to these tables will not work well or at all on some machines. With all + * padding zeroed like objects will compare byte for byte. + */ +struct load_command { + unsigned long cmd; /* type of load command */ + unsigned long cmdsize; /* total size of command in bytes */ +}; + +/* Constants for the cmd field of all load commands, the type */ +#define LC_SEGMENT 0x1 /* segment of this file to be mapped */ +#define LC_SYMTAB 0x2 /* link-edit stab symbol table info */ +#define LC_SYMSEG 0x3 /* link-edit gdb symbol table info (obsolete) */ +#define LC_THREAD 0x4 /* thread */ +#define LC_UNIXTHREAD 0x5 /* unix thread (includes a stack) */ +#define LC_LOADFVMLIB 0x6 /* load a specified fixed VM shared library */ +#define LC_IDFVMLIB 0x7 /* fixed VM shared library identification */ +#define LC_IDENT 0x8 /* object identification info (obsolete) */ +#define LC_FVMFILE 0x9 /* fixed VM file inclusion (internal use) */ +#define LC_PREPAGE 0xa /* prepage command (internal use) */ +#define LC_DYSYMTAB 0xb /* dynamic link-edit symbol table info */ +#define LC_LOAD_DYLIB 0xc /* load a dynamicly linked shared library */ +#define LC_ID_DYLIB 0xd /* dynamicly linked shared lib identification */ +#define LC_LOAD_DYLINKER 0xe /* load a dynamic linker */ +#define LC_ID_DYLINKER 0xf /* dynamic linker identification */ +#define LC_PREBOUND_DYLIB 0x10 /* modules prebound for a dynamicly */ + /* linked shared library */ + +/* + * A variable length string in a load command is represented by an lc_str + * union. The strings are stored just after the load command structure and + * the offset is from the start of the load command structure. The size + * of the string is reflected in the cmdsize field of the load command. + * Once again any padded bytes to bring the cmdsize field to a multiple + * of sizeof(long) must be zero. + */ +union lc_str { + unsigned long offset; /* offset to the string */ + char *ptr; /* pointer to the string */ +}; + +/* + * The segment load command indicates that a part of this file is to be + * mapped into the task's address space. The size of this segment in memory, + * vmsize, maybe equal to or larger than the amount to map from this file, + * filesize. The file is mapped starting at fileoff to the beginning of + * the segment in memory, vmaddr. The rest of the memory of the segment, + * if any, is allocated zero fill on demand. The segment's maximum virtual + * memory protection and initial virtual memory protection are specified + * by the maxprot and initprot fields. If the segment has sections then the + * section structures directly follow the segment command and their size is + * reflected in cmdsize. + */ +struct segment_command { + unsigned long cmd; /* LC_SEGMENT */ + unsigned long cmdsize; /* includes sizeof section structs */ + char segname[16]; /* segment name */ + unsigned long vmaddr; /* memory address of this segment */ + unsigned long vmsize; /* memory size of this segment */ + unsigned long fileoff; /* file offset of this segment */ + unsigned long filesize; /* amount to map from the file */ + vm_prot_t maxprot; /* maximum VM protection */ + vm_prot_t initprot; /* initial VM protection */ + unsigned long nsects; /* number of sections in segment */ + unsigned long flags; /* flags */ +}; + +/* Constants for the flags field of the segment_command */ +#define SG_HIGHVM 0x1 /* the file contents for this segment is for + the high part of the VM space, the low part + is zero filled (for stacks in core files) */ +#define SG_FVMLIB 0x2 /* this segment is the VM that is allocated by + a fixed VM library, for overlap checking in + the link editor */ +#define SG_NORELOC 0x4 /* this segment has nothing that was relocated + in it and nothing relocated to it, that is + it maybe safely replaced without relocation*/ + +/* + * A segment is made up of zero or more sections. Non-MH_OBJECT files have + * all of their segments with the proper sections in each, and padded to the + * specified segment alignment when produced by the link editor. The first + * segment of a MH_EXECUTE and MH_FVMLIB format file contains the mach_header + * and load commands of the object file before it's first section. The zero + * fill sections are always last in their segment (in all formats). This + * allows the zeroed segment padding to be mapped into memory where zero fill + * sections might be. + * + * The MH_OBJECT format has all of it's sections in one segment for + * compactness. There is no padding to a specified segment boundary and the + * mach_header and load commands are not part of the segment. + * + * Sections with the same section name, sectname, going into the same segment, + * segname, are combined by the link editor. The resulting section is aligned + * to the maximum alignment of the combined sections and is the new section's + * alignment. The combined sections are aligned to their original alignment in + * the combined section. Any padded bytes to get the specified alignment are + * zeroed. + * + * The format of the relocation entries referenced by the reloff and nreloc + * fields of the section structure for mach object files is described in the + * header file . + */ +struct section { + char sectname[16]; /* name of this section */ + char segname[16]; /* segment this section goes in */ + unsigned long addr; /* memory address of this section */ + unsigned long size; /* size in bytes of this section */ + unsigned long offset; /* file offset of this section */ + unsigned long align; /* section alignment (power of 2) */ + unsigned long reloff; /* file offset of relocation entries */ + unsigned long nreloc; /* number of relocation entries */ + unsigned long flags; /* flags (section type and attributes)*/ + unsigned long reserved1; /* reserved */ + unsigned long reserved2; /* reserved */ +}; + +/* + * The flags field of a section structure is separated into two parts a section + * type and section attributes. The section types are mutually exclusive (it + * can only have one type) but the section attributes are not (it may have more + * than one attribute). + */ +#define SECTION_TYPE 0x000000ff /* 256 section types */ +#define SECTION_ATTRIBUTES 0xffffff00 /* 24 section attributes */ + +/* Constants for the type of a section */ +#define S_REGULAR 0x0 /* regular section */ +#define S_ZEROFILL 0x1 /* zero fill on demand section */ +#define S_CSTRING_LITERALS 0x2 /* section with only literal C strings*/ +#define S_4BYTE_LITERALS 0x3 /* section with only 4 byte literals */ +#define S_8BYTE_LITERALS 0x4 /* section with only 8 byte literals */ +#define S_LITERAL_POINTERS 0x5 /* section with only pointers to */ + /* literals */ +/* + * For the two types of symbol pointers sections and the symbol stubs section + * they have indirect symbol table entries. For each of the entries in the + * section the indirect symbol table entries, in corresponding order in the + * indirect symbol table, start at the index stored in the reserved1 field + * of the section structure. Since the indirect symbol table entries + * correspond to the entries in the section the number of indirect symbol table + * entries is inferred from the size of the section divided by the size of the + * entries in the section. For symbol pointers sections the size of the entries + * in the section is 4 bytes and for symbol stubs sections the byte size of the + * stubs is stored in the reserved2 field of the section structure. + */ +#define S_NON_LAZY_SYMBOL_POINTERS 0x6 /* section with only non-lazy + symbol pointers */ +#define S_LAZY_SYMBOL_POINTERS 0x7 /* section with only lazy symbol + pointers */ +#define S_SYMBOL_STUBS 0x8 /* section with only symbol + stubs, byte size of stub in + the reserved2 field */ +#define S_MOD_INIT_FUNC_POINTERS 0x9 /* section with only function + pointers for initialization*/ +/* + * Constants for the section attributes part of the flags field of a section + * structure. + */ +#define SECTION_ATTRIBUTES_USR 0xff000000 /* User setable attributes */ +#define S_ATTR_PURE_INSTRUCTIONS 0x80000000 /* section contains only true + machine instructions */ +#define SECTION_ATTRIBUTES_SYS 0x00ffff00 /* system setable attributes */ +#define S_ATTR_SOME_INSTRUCTIONS 0x00000400 /* section contains some + machine instructions */ +#define S_ATTR_EXT_RELOC 0x00000200 /* section has external + relocation entries */ +#define S_ATTR_LOC_RELOC 0x00000100 /* section has local + relocation entries */ + + +/* + * The names of segments and sections in them are mostly meaningless to the + * link-editor. But there are few things to support traditional UNIX + * executables that require the link-editor and assembler to use some names + * agreed upon by convention. + * + * The initial protection of the "__TEXT" segment has write protection turned + * off (not writeable). + * + * The link-editor will allocate common symbols at the end of the "__common" + * section in the "__DATA" segment. It will create the section and segment + * if needed. + */ + +/* The currently known segment names and the section names in those segments */ + +#define SEG_PAGEZERO "__PAGEZERO" /* the pagezero segment which has no */ + /* protections and catches NULL */ + /* references for MH_EXECUTE files */ + + +#define SEG_TEXT "__TEXT" /* the tradition UNIX text segment */ +#define SECT_TEXT "__text" /* the real text part of the text */ + /* section no headers, and no padding */ +#define SECT_FVMLIB_INIT0 "__fvmlib_init0" /* the fvmlib initialization */ + /* section */ +#define SECT_FVMLIB_INIT1 "__fvmlib_init1" /* the section following the */ + /* fvmlib initialization */ + /* section */ + +#define SEG_DATA "__DATA" /* the tradition UNIX data segment */ +#define SECT_DATA "__data" /* the real initialized data section */ + /* no padding, no bss overlap */ +#define SECT_BSS "__bss" /* the real uninitialized data section*/ + /* no padding */ +#define SECT_COMMON "__common" /* the section common symbols are */ + /* allocated in by the link editor */ + +#define SEG_OBJC "__OBJC" /* objective-C runtime segment */ +#define SECT_OBJC_SYMBOLS "__symbol_table" /* symbol table */ +#define SECT_OBJC_MODULES "__module_info" /* module information */ +#define SECT_OBJC_STRINGS "__selector_strs" /* string table */ +#define SECT_OBJC_REFS "__selector_refs" /* string table */ + +#define SEG_ICON "__ICON" /* the NeXT icon segment */ +#define SECT_ICON_HEADER "__header" /* the icon headers */ +#define SECT_ICON_TIFF "__tiff" /* the icons in tiff format */ + +#define SEG_LINKEDIT "__LINKEDIT" /* the segment containing all structs */ + /* created and maintained by the link */ + /* editor. Created with -seglinkedit */ + /* option to ld(1) for MH_EXECUTE and */ + /* FVMLIB file types only */ + +#define SEG_UNIXSTACK "__UNIXSTACK" /* the unix stack segment */ + +/* + * Fixed virtual memory shared libraries are identified by two things. The + * target pathname (the name of the library as found for execution), and the + * minor version number. The address of where the headers are loaded is in + * header_addr. + */ +struct fvmlib { + union lc_str name; /* library's target pathname */ + unsigned long minor_version; /* library's minor version number */ + unsigned long header_addr; /* library's header address */ +}; + +/* + * A fixed virtual shared library (filetype == MH_FVMLIB in the mach header) + * contains a fvmlib_command (cmd == LC_IDFVMLIB) to identify the library. + * An object that uses a fixed virtual shared library also contains a + * fvmlib_command (cmd == LC_LOADFVMLIB) for each library it uses. + */ +struct fvmlib_command { + unsigned long cmd; /* LC_IDFVMLIB or LC_LOADFVMLIB */ + unsigned long cmdsize; /* includes pathname string */ + struct fvmlib fvmlib; /* the library identification */ +}; + +/* + * Dynamicly linked shared libraries are identified by two things. The + * pathname (the name of the library as found for execution), and the + * compatibility version number. The pathname must match and the compatibility + * number in the user of the library must be greater than or equal to the + * library being used. The time stamp is used to record the time a library was + * built and copied into user so it can be use to determined if the library used + * at runtime is exactly the same as used to built the program. + */ +struct dylib { + union lc_str name; /* library's path name */ + unsigned long timestamp; /* library's build time stamp */ + unsigned long current_version; /* library's current version number */ + unsigned long compatibility_version;/* library's compatibility vers number*/ +}; + +/* + * A dynamicly linked shared library (filetype == MH_DYLIB in the mach header) + * contains a dylib_command (cmd == LC_ID_DYLIB) to identify the library. + * An object that uses a dynamicly linked shared library also contains a + * dylib_command (cmd == LC_LOAD_DYLIB) for each library it uses. + */ +struct dylib_command { + unsigned long cmd; /* LC_ID_DYLIB or LC_LOAD_DYLIB */ + unsigned long cmdsize; /* includes pathname string */ + struct dylib dylib; /* the library identification */ +}; + +/* + * A program (filetype == MH_EXECUTE) or bundle (filetype == MH_BUNDLE) that is + * prebound to it's dynamic libraries has one of these for each library that + * the static linker used in prebinding. It contains a bit vector for the + * modules in the library. The bits indicate which modules are bound (1) and + * which are not (0) from the library. The bit for module 0 is the low bit + * of the first byte. So the bit for the Nth module is: + * (linked_modules[N/8] >> N%8) & 1 + */ +struct prebound_dylib_command { + unsigned long cmd; /* LC_PREBOUND_DYLIB */ + unsigned long cmdsize; /* includes strings */ + union lc_str name; /* library's path name */ + unsigned long nmodules; /* number of modules in library */ + union lc_str linked_modules; /* bit vector of linked modules */ +}; + +/* + * A program that uses a dynamic linker contains a dylinker_command to identify + * the name of the dynamic linker (LC_LOAD_DYLINKER). And a dynamic linker + * contains a dylinker_command to identify the dynamic linker (LC_ID_DYLINKER). + * A file can have at most one of these. + */ +struct dylinker_command { + unsigned long cmd; /* LC_ID_DYLINKER or LC_LOAD_DYLINKER */ + unsigned long cmdsize; /* includes pathname string */ + union lc_str name; /* dynamic linker's path name */ +}; + +/* + * Thread commands contain machine-specific data structures suitable for + * use in the thread state primitives. The machine specific data structures + * follow the struct thread_command as follows. + * Each flavor of machine specific data structure is preceded by an unsigned + * long constant for the flavor of that data structure, an unsigned long + * that is the count of longs of the size of the state data structure and then + * the state data structure follows. This triple may be repeated for many + * flavors. The constants for the flavors, counts and state data structure + * definitions are expected to be in the header file . + * These machine specific data structures sizes must be multiples of + * sizeof(long). The cmdsize reflects the total size of the thread_command + * and all of the sizes of the constants for the flavors, counts and state + * data structures. + * + * For executable objects that are unix processes there will be one + * thread_command (cmd == LC_UNIXTHREAD) created for it by the link-editor. + * This is the same as a LC_THREAD, except that a stack is automatically + * created (based on the shell's limit for the stack size). Command arguments + * and environment variables are copied onto that stack. + */ +struct thread_command { + unsigned long cmd; /* LC_THREAD or LC_UNIXTHREAD */ + unsigned long cmdsize; /* total size of this command */ + /* unsigned long flavor flavor of thread state */ + /* unsigned long count count of longs in thread state */ + /* struct XXX_thread_state state thread state for this flavor */ + /* ... */ +}; + +/* + * The symtab_command contains the offsets and sizes of the link-edit 4.3BSD + * "stab" style symbol table information as described in the header files + * and . + */ +struct symtab_command { + unsigned long cmd; /* LC_SYMTAB */ + unsigned long cmdsize; /* sizeof(struct symtab_command) */ + unsigned long symoff; /* symbol table offset */ + unsigned long nsyms; /* number of symbol table entries */ + unsigned long stroff; /* string table offset */ + unsigned long strsize; /* string table size in bytes */ +}; + +/* + * This is the second set of the symbolic information which is used to support + * the data structures for the dynamicly link editor. + * + * The original set of symbolic information in the symtab_command which contains + * the symbol and string tables must also be present when this load command is + * present. When this load command is present the symbol table is organized + * into three groups of symbols: + * local symbols (static and debugging symbols) - grouped by module + * defined external symbols - grouped by module (sorted by name if not lib) + * undefined external symbols (sorted by name) + * In this load command there are offsets and counts to each of the three groups + * of symbols. + * + * This load command contains a the offsets and sizes of the following new + * symbolic information tables: + * table of contents + * module table + * reference symbol table + * indirect symbol table + * The first three tables above (the table of contents, module table and + * reference symbol table) are only present if the file is a dynamicly linked + * shared library. For executable and object modules, which are files + * containing only one module, the information that would be in these three + * tables is determined as follows: + * table of contents - the defined external symbols are sorted by name + * module table - the file contains only one module so everything in the + * file is part of the module. + * reference symbol table - is the defined and undefined external symbols + * + * For dynamicly linked shared library files this load command also contains + * offsets and sizes to the pool of relocation entries for all sections + * separated into two groups: + * external relocation entries + * local relocation entries + * For executable and object modules the relocation entries continue to hang + * off the section structures. + */ +struct dysymtab_command { + unsigned long cmd; /* LC_DYSYMTAB */ + unsigned long cmdsize; /* sizeof(struct dysymtab_command) */ + + /* + * The symbols indicated by symoff and nsyms of the LC_SYMTAB load command + * are grouped into the following three groups: + * local symbols (further grouped by the module they are from) + * defined external symbols (further grouped by the module they are from) + * undefined symbols + * + * The local symbols are used only for debugging. The dynamic binding + * process may have to use them to indicate to the debugger the local + * symbols for a module that is being bound. + * + * The last two groups are used by the dynamic binding process to do the + * binding (indirectly through the module table and the reference symbol + * table when this is a dynamicly linked shared library file). + */ + unsigned long ilocalsym; /* index to local symbols */ + unsigned long nlocalsym; /* number of local symbols */ + + unsigned long iextdefsym; /* index to externally defined symbols */ + unsigned long nextdefsym; /* number of externally defined symbols */ + + unsigned long iundefsym; /* index to undefined symbols */ + unsigned long nundefsym; /* number of undefined symbols */ + + /* + * For the for the dynamic binding process to find which module a symbol + * is defined in the table of contents is used (analogous to the ranlib + * structure in an archive) which maps defined external symbols to modules + * they are defined in. This exists only in a dynamicly linked shared + * library file. For executable and object modules the defined external + * symbols are sorted by name and is use as the table of contents. + */ + unsigned long tocoff; /* file offset to table of contents */ + unsigned long ntoc; /* number of entries in table of contents */ + + /* + * To support dynamic binding of "modules" (whole object files) the symbol + * table must reflect the modules that the file was created from. This is + * done by having a module table that has indexes and counts into the merged + * tables for each module. The module structure that these two entries + * refer to is described below. This exists only in a dynamicly linked + * shared library file. For executable and object modules the file only + * contains one module so everything in the file belongs to the module. + */ + unsigned long modtaboff; /* file offset to module table */ + unsigned long nmodtab; /* number of module table entries */ + + /* + * To support dynamic module binding the module structure for each module + * indicates the external references (defined and undefined) each module + * makes. For each module there is an offset and a count into the + * reference symbol table for the symbols that the module references. + * This exists only in a dynamicly linked shared library file. For + * executable and object modules the defined external symbols and the + * undefined external symbols indicates the external references. + */ + unsigned long extrefsymoff; /* offset to referenced symbol table */ + unsigned long nextrefsyms; /* number of referenced symbol table entries */ + + /* + * The sections that contain "symbol pointers" and "routine stubs" have + * indexes and (implied counts based on the size of the section and fixed + * size of the entry) into the "indirect symbol" table for each pointer + * and stub. For every section of these two types the index into the + * indirect symbol table is stored in the section header in the field + * reserved1. An indirect symbol table entry is simply a 32bit index into + * the symbol table to the symbol that the pointer or stub is referring to. + * The indirect symbol table is ordered to match the entries in the section. + */ + unsigned long indirectsymoff; /* file offset to the indirect symbol table */ + unsigned long nindirectsyms; /* number of indirect symbol table entries */ + + /* + * To support relocating an individual module in a library file quickly the + * external relocation entries for each module in the library need to be + * accessed efficiently. Since the relocation entries can't be accessed + * through the section headers for a library file they are separated into + * groups of local and external entries further grouped by module. In this + * case the presents of this load command who's extreloff, nextrel, + * locreloff and nlocrel fields are non-zero indicates that the relocation + * entries of non-merged sections are not referenced through the section + * structures (and the reloff and nreloc fields in the section headers are + * set to zero). + * + * Since the relocation entries are not accessed through the section headers + * this requires the r_address field to be something other than a section + * offset to identify the item to be relocated. In this case r_address is + * set to the offset from the vmaddr of the first LC_SEGMENT command. + * + * The relocation entries are grouped by module and the module table + * entries have indexes and counts into them for the group of external + * relocation entries for that the module. + * + * For sections that are merged across modules there must not be any + * remaining external relocation entries for them (for merged sections + * remaining relocation entries must be local). + */ + unsigned long extreloff; /* offset to external relocation entries */ + unsigned long nextrel; /* number of external relocation entries */ + + /* + * All the local relocation entries are grouped together (they are not + * grouped by their module since they are only used if the object is moved + * from it staticly link edited address). + */ + unsigned long locreloff; /* offset to local relocation entries */ + unsigned long nlocrel; /* number of local relocation entries */ + +}; + +/* + * An indirect symbol table entry is simply a 32bit index into the symbol table + * to the symbol that the pointer or stub is refering to. Unless it is for a + * non-lazy symbol pointer section for a defined symbol which strip(1) as + * removed. In which case it has the value INDIRECT_SYMBOL_LOCAL. If the + * symbol was also absolute INDIRECT_SYMBOL_ABS is or'ed with that. + */ +#define INDIRECT_SYMBOL_LOCAL 0x80000000 +#define INDIRECT_SYMBOL_ABS 0x40000000 + + +/* a table of contents entry */ +struct dylib_table_of_contents { + unsigned long symbol_index; /* the defined external symbol + (index into the symbol table) */ + unsigned long module_index; /* index into the module table this symbol + is defined in */ +}; + +/* a module table entry */ +struct dylib_module { + unsigned long module_name; /* the module name (index into string table) */ + + unsigned long iextdefsym; /* index into externally defined symbols */ + unsigned long nextdefsym; /* number of externally defined symbols */ + unsigned long irefsym; /* index into reference symbol table */ + unsigned long nrefsym; /* number of reference symbol table entries */ + unsigned long ilocalsym; /* index into symbols for local symbols */ + unsigned long nlocalsym; /* number of local symbols */ + + unsigned long iextrel; /* index into external relocation entries */ + unsigned long nextrel; /* number of external relocation entries */ + + unsigned long iinit; /* index into the init section */ + unsigned long ninit; /* number of init section entries */ + + unsigned long /* for this module address of the start of */ + objc_module_info_addr; /* the (__OBJC,__module_info) section */ + unsigned long /* for this module size of */ + objc_module_info_size; /* the (__OBJC,__module_info) section */ +}; + +/* + * The entries in the reference symbol table are used when loading the module + * (both by the static and dynamic link editors) and if the module is unloaded + * or replaced. Therefore all external symbols (defined and undefined) are + * listed in the module's reference table. The flags describe the type of + * reference that is being made. The constants for the flags are defined in + * as they are also used for symbol table entries. + */ +struct dylib_reference { + unsigned long isym:24, /* index into the symbol table */ + flags:8; /* flags to indicate the type of reference */ +}; + +/* + * The symseg_command contains the offset and size of the GNU style + * symbol table information as described in the header file . + * The symbol roots of the symbol segments must also be aligned properly + * in the file. So the requirement of keeping the offsets aligned to a + * multiple of a sizeof(long) translates to the length field of the symbol + * roots also being a multiple of a long. Also the padding must again be + * zeroed. (THIS IS OBSOLETE and no longer supported). + */ +struct symseg_command { + unsigned long cmd; /* LC_SYMSEG */ + unsigned long cmdsize; /* sizeof(struct symseg_command) */ + unsigned long offset; /* symbol segment offset */ + unsigned long size; /* symbol segment size in bytes */ +}; + +/* + * The ident_command contains a free format string table following the + * ident_command structure. The strings are null terminated and the size of + * the command is padded out with zero bytes to a multiple of sizeof(long). + * (THIS IS OBSOLETE and no longer supported). + */ +struct ident_command { + unsigned long cmd; /* LC_IDENT */ + unsigned long cmdsize; /* strings that follow this command */ +}; + +/* + * The fvmfile_command contains a reference to a file to be loaded at the + * specified virtual address. (Presently, this command is reserved for NeXT + * internal use. The kernel ignores this command when loading a program into + * memory). + */ +struct fvmfile_command { + unsigned long cmd; /* LC_FVMFILE */ + unsigned long cmdsize; /* includes pathname string */ + union lc_str name; /* files pathname */ + unsigned long header_addr; /* files virtual address */ +}; + +#endif /*_MACHO_LOADER_H_*/ diff --git a/libkern/mach-o/mach_header.h b/libkern/mach-o/mach_header.h new file mode 100644 index 000000000..4d14d1380 --- /dev/null +++ b/libkern/mach-o/mach_header.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: kern/mach_header.h + * + * Definitions for accessing mach-o headers. + * + * HISTORY + * 29-Jan-92 Mike DeMoney (mike@next.com) + * Made into machine independent form from machdep/m68k/mach_header.h. + * Ifdef'ed out most of this since I couldn't find any references. + */ + +#ifndef _KERN_MACH_HEADER_ +#define _KERN_MACH_HEADER_ + +#include +#include + +#if KERNEL +struct mach_header **getmachheaders(void); +vm_offset_t getlastaddr(void); + +struct segment_command *firstseg(void); +struct segment_command *firstsegfromheader(struct mach_header *header); +struct segment_command *nextseg(struct segment_command *sgp); +struct segment_command *nextsegfromheader( + struct mach_header *header, + struct segment_command *seg); +struct segment_command *getsegbyname(char *seg_name); +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name); +void *getsegdatafromheader(struct mach_header *, char *, int *); +struct section *getsectbyname(char *seg_name, char *sect_name); +struct section *getsectbynamefromheader( + struct mach_header *header, + char *seg_name, + char *sect_name); +void *getsectdatafromheader(struct mach_header *, char *, char *, int *); +struct section *firstsect(struct segment_command *sgp); +struct section *nextsect(struct segment_command *sgp, struct section *sp); +struct fvmlib_command *fvmlib(void); +struct fvmlib_command *fvmlibfromheader(struct mach_header *header); +struct segment_command *getfakefvmseg(void); +#ifdef MACH_KDB +struct symtab_command *getsectcmdsymtabfromheader(struct mach_header *); +boolean_t getsymtab(struct mach_header *, vm_offset_t *, int *, + vm_offset_t *, vm_size_t *); +#endif + +#endif /* KERNEL */ + +#endif /* _KERN_MACH_HEADER_ */ diff --git a/libkern/ppc/OSAtomic.s b/libkern/ppc/OSAtomic.s new file mode 100644 index 000000000..39481085d --- /dev/null +++ b/libkern/ppc/OSAtomic.s @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + /* + * Copyright (c) 1997-1998 Apple Computer, Inc. + * + * + * HISTORY + * + * sdouglas 22 Oct 97 - first checked in from DriverServices + * sdouglas 28 Jul 98 - start IOKit + */ + +#include + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; ENTRY functionName +; +; Assembly directives to begin an exported function. +; +; Takes: functionName - name of the exported function +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +.macro ENTRY + .text + .align 2 + .globl $0 +$0: +.endmacro + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +/* +int OSCompareAndSwap( UInt32 oldVal, UInt32 newVal, UInt32 * addr ) +*/ + + + ENTRY _OSCompareAndSwap + + lwarx r6, 0,r5 /* CEMV10 */ +.L_CASretry: + lwarx r6, 0,r5 + cmpw r6, r3 + bne- .L_CASfail + stwcx. r4, 0,r5 + bne- .L_CASretry + isync + li r3, 1 + blr +.L_CASfail: + li r3, 0 + blr + + +/* +SInt32 OSDecrementAtomic(SInt32 * value) +*/ + ENTRY _OSDecrementAtomic + mr r4, r3 + li r3, -1 + b _OSAddAtomic + +/* +SInt32 OSIncrementAtomic(SInt32 * value) +*/ + + ENTRY _OSIncrementAtomic + mr r4, r3 + li r3, 1 + +/* +SInt32 OSAddAtomic(SInt32 amount, SInt32 * value) +*/ + + ENTRY _OSAddAtomic + + mr r5,r3 /* Save the increment */ + lwarx r3,0,r4 /* CEMV10 */ + +.L_AAretry: + lwarx r3, 0, r4 /* Grab the area value */ + add r6, r3, r5 /* Add the value */ + stwcx. r6, 0, r4 /* Try to save the new value */ + bne- .L_AAretry /* Didn't get it, try again... */ + blr /* Return the original value */ + + + diff --git a/libsa/Makefile b/libsa/Makefile new file mode 100644 index 000000000..3e84f239e --- /dev/null +++ b/libsa/Makefile @@ -0,0 +1,30 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = libsa +INSTINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS} +INSTINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS} + + +EXPINC_SUBDIRS = libsa +EXPINC_SUBDIRS_PPC = ${EXPINC_SUBDIRS} +EXPINC_SUBDIRS_I386 = ${EXPINC_SUBDIRS} + + +SETUP_SUBDIRS = conf + +COMP_SUBDIRS = conf + +INST_SUBDIRS = + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/bootstrap.cpp b/libsa/bootstrap.cpp new file mode 100644 index 000000000..12c9ed6e7 --- /dev/null +++ b/libsa/bootstrap.cpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +#include +#include +extern "C" { +#include +}; + +extern "C" { +/***** + * This function is used by IOCatalogue to load a kernel + * extension. libsa initially sets it to be a function + * that uses libkld to load and link the extension from + * within the kernel. Once the root filesystem is up, + * this gets switch to the kmod_load_extension() function, + * which merely queues the extension for loading by the + * kmodload utility. + */ +extern kern_return_t (*kmod_load_function)(char *extension_name); +extern bool (*record_startup_extensions_function)(void); +extern bool (*add_from_mkext_function)(OSData * mkext); +extern void (*remove_startup_extension_function)(const char * name); + +/**** + * IOCatalogue uses this variable to make a few decisions + * about loading and matching drivers. + */ +extern int kernelLinkerPresent; + +extern IOLock * kld_lock; +}; + + +class KLDBootstrap { +public: + KLDBootstrap(); + ~KLDBootstrap(); +}; + + +static KLDBootstrap bootstrap_obj; + + +/* The constructor creates a lock and puts entries into a dispatch + * table for functions used to record and load kmods. + */ +KLDBootstrap::KLDBootstrap() { + + kld_lock = IOLockAlloc(); + IOLockLock(kld_lock); + + kmod_load_function = &load_kernel_extension; + + record_startup_extensions_function = &recordStartupExtensions; + add_from_mkext_function = &addExtensionsFromArchive; + remove_startup_extension_function = &removeStartupExtension; + + kernelLinkerPresent = 1; + + IOLockUnlock(kld_lock); +} + +/* The destructor frees all wired memory regions held + * by libsa's malloc package and disposes of the lock. + */ +KLDBootstrap::~KLDBootstrap() { + + OSDictionary * startupExtensions; + + IOLockLock(kld_lock); + + malloc_reset(); + + startupExtensions = getStartupExtensions(); + if (startupExtensions) startupExtensions->release(); + + IOLockUnlock(kld_lock); + IOLockFree(kld_lock); + +} diff --git a/libsa/bsearch.c b/libsa/bsearch.c new file mode 100644 index 000000000..2a0832467 --- /dev/null +++ b/libsa/bsearch.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)bsearch.c 8.1 (Berkeley) 6/4/93"; +#endif /* LIBC_SCCS and not lint */ + +#include + +/* + * Perform a binary search. + * + * The code below is a bit sneaky. After a comparison fails, we + * divide the work in half by moving either left or right. If lim + * is odd, moving left simply involves halving lim: e.g., when lim + * is 5 we look at item 2, so we change lim to 2 so that we will + * look at items 0 & 1. If lim is even, the same applies. If lim + * is odd, moving right again involes halving lim, this time moving + * the base up one item past p: e.g., when lim is 5 we change base + * to item 3 and make lim 2 so that we will look at items 3 and 4. + * If lim is even, however, we have to shrink it by one before + * halving: e.g., when lim is 4, we still looked at item 2, so we + * have to make lim 3, then halve, obtaining 1, so that we will only + * look at item 3. + */ +__private_extern__ +void * bsearch( + register const void *key, + const void *base0, + size_t nmemb, + register size_t size, + register int (*compar)(const void *, const void *)) { + + register const char *base = base0; + register size_t lim; + register int cmp; + register const void *p; + + for (lim = nmemb; lim != 0; lim >>= 1) { + p = base + (lim >> 1) * size; + cmp = (*compar)(key, p); + if (cmp == 0) + return ((void *)p); + if (cmp > 0) { /* key > p: move right */ + base = (char *)p + size; + lim--; + } /* else move left */ + } + return (NULL); +} diff --git a/libsa/catalogue.cpp b/libsa/catalogue.cpp new file mode 100644 index 000000000..1528cc634 --- /dev/null +++ b/libsa/catalogue.cpp @@ -0,0 +1,1438 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +#include +#include +}; + +#include + +#include + + +extern "C" { +extern void IODTFreeLoaderInfo( char *key, void *infoAddr, int infoSize ); +extern kern_return_t host_info(host_t host, + host_flavor_t flavor, + host_info_t info, + mach_msg_type_number_t *count); +extern int check_cpu_subtype(cpu_subtype_t cpu_subtype); + +extern IOLock * kld_lock; +}; + + +#define LOG_DELAY() + +#define VTYELLOW "\033[33m" +#define VTRESET "\033[0m" + + +/********************************************************************* +*********************************************************************/ +static OSDictionary * gStartupExtensions = 0; + +OSDictionary * getStartupExtensions(void) { + if (gStartupExtensions) { + return gStartupExtensions; + } + gStartupExtensions = OSDictionary::withCapacity(1); + if (!gStartupExtensions) { + IOLog("Error: Couldn't allocate " + "startup extensions dictionary.\n"); + LOG_DELAY(); + } + return gStartupExtensions; +} + + +/********************************************************************* +* This function checks that a driver dict has all the required +* entries and does a little bit of value checking too. +*********************************************************************/ +bool validateExtensionDict(OSDictionary * extension) { + + bool result = true; + OSString * name; // do not release + OSString * stringValue; // do not release + UInt32 vers; + + name = OSDynamicCast(OSString, + extension->getObject("CFBundleIdentifier")); + if (!name) { + IOLog(VTYELLOW "Extension has no \"CFBundleIdentifier\" property.\n" + VTRESET); + LOG_DELAY(); + result = false; + goto finish; + } + + stringValue = OSDynamicCast(OSString, + extension->getObject("CFBundleVersion")); + if (!stringValue) { + IOLog(VTYELLOW "Extension \"%s\" has no \"CFBundleVersion\" " + "property.\n" VTRESET, + name->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + if (!VERS_parse_string(stringValue->getCStringNoCopy(), + &vers)) { + IOLog(VTYELLOW "Extension \"%s\" has an invalid " + "\"CFBundleVersion\" property.\n" VTRESET, + name->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + + +finish: + // FIXME: Make return real result after kext conversion + return true; + + return result; +} + + +/********************************************************************* +*********************************************************************/ +OSDictionary * compareExtensionVersions( + OSDictionary * incumbent, + OSDictionary * candidate) { + + OSDictionary * winner = NULL; + + OSDictionary * incumbentPlist = NULL; + OSDictionary * candidatePlist = NULL; + OSString * incumbentName = NULL; + OSString * candidateName = NULL; + OSString * incumbentVersionString = NULL; + OSString * candidateVersionString = NULL; + UInt32 incumbent_vers = 0; + UInt32 candidate_vers = 0; + + incumbentPlist = OSDynamicCast(OSDictionary, + incumbent->getObject("plist")); + candidatePlist = OSDynamicCast(OSDictionary, + candidate->getObject("plist")); + + if (!incumbentPlist || !candidatePlist) { + IOLog("compareExtensionVersions() called with invalid " + "extension dictionaries.\n"); + LOG_DELAY(); + winner = NULL; + goto finish; + } + + incumbentName = OSDynamicCast(OSString, + incumbentPlist->getObject("CFBundleIdentifier")); + candidateName = OSDynamicCast(OSString, + candidatePlist->getObject("CFBundleIdentifier")); + incumbentVersionString = OSDynamicCast(OSString, + incumbentPlist->getObject("CFBundleVersion")); + candidateVersionString = OSDynamicCast(OSString, + candidatePlist->getObject("CFBundleVersion")); + + if (!incumbentName || !candidateName || + !incumbentVersionString || !candidateVersionString) { + + IOLog("compareExtensionVersions() called with invalid " + "extension dictionaries.\n"); + LOG_DELAY(); + winner = NULL; + goto finish; + } + + if (strcmp(incumbentName->getCStringNoCopy(), + candidateName->getCStringNoCopy())) { + + IOLog("compareExtensionVersions() called with different " + "extension names (%s and %s).\n", + incumbentName->getCStringNoCopy(), + candidateName->getCStringNoCopy()); + LOG_DELAY(); + winner = NULL; + goto finish; + } + + if (!VERS_parse_string(incumbentVersionString->getCStringNoCopy(), + &incumbent_vers)) { + + IOLog(VTYELLOW "Error parsing version string for extension %s (%s)\n" + VTRESET, + incumbentName->getCStringNoCopy(), + incumbentVersionString->getCStringNoCopy()); + LOG_DELAY(); + winner = NULL; + goto finish; + } + + if (!VERS_parse_string(candidateVersionString->getCStringNoCopy(), + &candidate_vers)) { + + IOLog(VTYELLOW "Error parsing version string for extension %s (%s)\n" + VTRESET, + candidateName->getCStringNoCopy(), + candidateVersionString->getCStringNoCopy()); + LOG_DELAY(); + winner = NULL; + goto finish; + } + + if (candidate_vers > incumbent_vers) { + IOLog(VTYELLOW "Replacing extension \"%s\" with newer version " + "(%s -> %s).\n" VTRESET, + incumbentName->getCStringNoCopy(), + incumbentVersionString->getCStringNoCopy(), + candidateVersionString->getCStringNoCopy()); + LOG_DELAY(); + winner = candidate; + goto finish; + } else { + IOLog(VTYELLOW "Skipping duplicate extension \"%s\" with older/same " + " version (%s -> %s).\n" VTRESET, + candidateName->getCStringNoCopy(), + candidateVersionString->getCStringNoCopy(), + incumbentVersionString->getCStringNoCopy()); + LOG_DELAY(); + winner = incumbent; + goto finish; + } + +finish: + + // no cleanup, how nice + return winner; +} + + +/********************************************************************* +* This function merges entries in the mergeFrom dictionary into the +* mergeInto dictionary. If it returns false, the two dictionaries are +* not altered. If it returns true, then mergeInto may have new +* entries; any keys that were already present in mergeInto are +* removed from mergeFrom, so that the caller can see what was +* actually merged. +*********************************************************************/ +bool mergeExtensionDictionaries(OSDictionary * mergeInto, + OSDictionary * mergeFrom) { + + bool result = true; + OSDictionary * mergeIntoCopy = NULL; // must release + OSDictionary * mergeFromCopy = NULL; // must release + OSCollectionIterator * keyIterator = NULL; // must release + OSString * key; // don't release + + /* Add 1 to count to guarantee copy can grow (grr). + */ + mergeIntoCopy = OSDictionary::withDictionary(mergeInto, + mergeInto->getCount() + 1); + if (!mergeIntoCopy) { + IOLog("Error: Failed to copy 'into' extensions dictionary " + "for merge.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + /* Add 1 to count to guarantee copy can grow (grr). + */ + mergeFromCopy = OSDictionary::withDictionary(mergeFrom, + mergeFrom->getCount() + 1); + if (!mergeFromCopy) { + IOLog("Error: Failed to copy 'from' extensions dictionary " + "for merge.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + keyIterator = OSCollectionIterator::withCollection(mergeFrom); + if (!keyIterator) { + IOLog("Error: Failed to allocate iterator for extensions.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + + /***** + * Loop through "from" dictionary, checking if the identifier already + * exists in the "into" dictionary and checking versions if it does. + */ + while ((key = OSDynamicCast(OSString, keyIterator->getNextObject()))) { + OSDictionary * incumbentExt = OSDynamicCast(OSDictionary, + mergeIntoCopy->getObject(key)); + OSDictionary * candidateExt = OSDynamicCast(OSDictionary, + mergeFrom->getObject(key)); + + if (!incumbentExt) { + if (!mergeIntoCopy->setObject(key, candidateExt)) { + + /* This is a fatal error, so bail. + */ + IOLog("mergeExtensionDictionaries(): Failed to add " + "identifier %s\n", + key->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + } else { + OSDictionary * mostRecentExtension = + compareExtensionVersions(incumbentExt, candidateExt); + + if (mostRecentExtension == incumbentExt) { + mergeFromCopy->removeObject(key); + } else if (mostRecentExtension == candidateExt) { + + if (!mergeIntoCopy->setObject(key, candidateExt)) { + + /* This is a fatal error, so bail. + */ + IOLog("mergeExtensionDictionaries(): Failed to add " + "identifier %s\n", + key->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + } else /* should be NULL */ { + + /* This is a nonfatal error, so continue doing others. + */ + IOLog("mergeExtensionDictionaries(): Error comparing " + "versions of duplicate extensions %s.\n", + key->getCStringNoCopy()); + LOG_DELAY(); + continue; + } + } + } + +finish: + + /* If successful, replace the contents of the original + * dictionaries with those of the modified copies. + */ + if (result) { + mergeInto->flushCollection(); + mergeInto->merge(mergeIntoCopy); + mergeFrom->flushCollection(); + mergeFrom->merge(mergeFromCopy); + } + + if (mergeIntoCopy) mergeIntoCopy->release(); + if (mergeFromCopy) mergeFromCopy->release(); + if (keyIterator) keyIterator->release(); + + return result; +} + + +/**** + * These bits are used to parse data made available by bootx. + */ +#define BOOTX_KEXT_PREFIX "Driver-" +#define BOOTX_MULTIKEXT_PREFIX "DriversPackage-" + +typedef struct MemoryMapFileInfo { + UInt32 paddr; + UInt32 length; +} MemoryMapFileInfo; + +typedef struct BootxDriverInfo { + char *plistAddr; + long plistLength; + void *moduleAddr; + long moduleLength; +} BootxDriverInfo; + + +/********************************************************************* +* This private function reads the data for a single extension from +* the bootx memory-map's propery dict, returning a dictionary with +* keys "plist" for the extension's Info.plist as a parsed OSDictionary +* and "code" for the extension's executable code as an OSData. +*********************************************************************/ +OSDictionary * readExtension(OSDictionary * propertyDict, + const char * memory_map_name) { + + int error = 0; + OSData * bootxDriverDataObject = NULL; + OSDictionary * driverPlist = NULL; + OSString * driverName = NULL; + OSData * driverCode = NULL; + OSString * errorString = NULL; + OSDictionary * driverDict = NULL; + + MemoryMapFileInfo * driverInfo = 0; + BootxDriverInfo * dataBuffer; + + kmod_info_t * loaded_kmod = NULL; + + + bootxDriverDataObject = OSDynamicCast(OSData, + propertyDict->getObject(memory_map_name)); + // don't release bootxDriverDataObject + + if (!bootxDriverDataObject) { + IOLog("Error: No driver data object " + "for device tree entry \"%s\".\n", + memory_map_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + driverDict = OSDictionary::withCapacity(2); + if (!driverDict) { + IOLog("Error: Couldn't allocate dictionary " + "for device tree entry \"%s\".\n", memory_map_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + driverInfo = (MemoryMapFileInfo *) + bootxDriverDataObject->getBytesNoCopy(0, + sizeof(MemoryMapFileInfo)); + dataBuffer = (BootxDriverInfo *)ml_static_ptovirt( + driverInfo->paddr); + if (!dataBuffer) { + IOLog("Error: No data buffer " + "for device tree entry \"%s\".\n", memory_map_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + driverPlist = OSDynamicCast(OSDictionary, + OSUnserializeXML(dataBuffer->plistAddr, &errorString)); + if (!driverPlist) { + IOLog("Error: Couldn't read XML property list " + "for device tree entry \"%s\".\n", memory_map_name); + LOG_DELAY(); + if (errorString) { + IOLog("XML parse error: %s.\n", + errorString->getCStringNoCopy()); + LOG_DELAY(); + } + error = 1; + goto finish; + } + + + driverName = OSDynamicCast(OSString, + driverPlist->getObject("CFBundleIdentifier")); // do not release + if (!driverName) { + IOLog("Error: Device tree entry \"%s\" has " + "no \"CFBundleIdentifier\" property.\n", memory_map_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + /* Check if kmod is already loaded and is a real loadable one (has + * an address). + */ + loaded_kmod = kmod_lookupbyname(driverName->getCStringNoCopy()); + if (loaded_kmod && loaded_kmod->address) { + IOLog("Skipping new extension \"%s\"; an extension named " + "\"%s\" is already loaded.\n", + driverName->getCStringNoCopy(), + loaded_kmod->name); + LOG_DELAY(); + error = 1; + goto finish; + } + + if (!validateExtensionDict(driverPlist)) { + IOLog("Error: Failed to validate property list " + "for device tree entry \"%s\".\n", memory_map_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + driverDict->setObject("plist", driverPlist); + + /* It's perfectly okay for a KEXT to have no executable. + * Check that moduleAddr is nonzero before attempting to + * get one. + */ + if (dataBuffer->moduleAddr && dataBuffer->moduleLength) { + driverCode = OSData::withBytes(dataBuffer->moduleAddr, + dataBuffer->moduleLength); + if (!driverCode) { + IOLog("Error: Couldn't allocate data object " + "to hold code for device tree entry \"%s\".\n", + memory_map_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + if (driverCode) { + driverDict->setObject("code", driverCode); + } + } + +finish: + + /* Free the memory for this extension that was set up + * by bootx. + */ + IODTFreeLoaderInfo(memory_map_name, (void *)driverInfo->paddr, + (int)driverInfo->length); + + // do not release bootxDriverDataObject + // do not release driverName + + if (driverPlist) { + driverPlist->release(); + } + if (errorString) { + errorString->release(); + } + if (driverCode) { + driverCode->release(); + } + if (error) { + if (driverDict) { + driverDict->release(); + driverDict = NULL; + } + } + return driverDict; +} + + +/********************************************************************* +* Used to uncompress a single file entry in an mkext archive. +*********************************************************************/ +int uncompressFile(u_int8_t * base_address, + mkext_file * fileinfo, + /* out */ OSData ** file) { + + int result = 1; + u_int8_t * uncompressed_file = 0; // don't free; owned by OSData obj + OSData * uncompressedFile = 0; // don't release + size_t uncompressed_size = 0; + + size_t offset = OSSwapBigToHostInt32(fileinfo->offset); + size_t compsize = OSSwapBigToHostInt32(fileinfo->compsize); + size_t realsize = OSSwapBigToHostInt32(fileinfo->realsize); + time_t modifiedsecs = OSSwapBigToHostInt32(fileinfo->modifiedsecs); + + *file = 0; + + /* If these four fields are zero there's no file, but that isn't + * an error. + */ + if (offset == 0 && compsize == 0 && + realsize == 0 && modifiedsecs == 0) { + goto finish; + } + + // Add 1 for '\0' to terminate XML string! + uncompressed_file = (u_int8_t *)kalloc(realsize + 1); + if (!uncompressed_file) { + IOLog("Error: Couldn't allocate data buffer " + "to uncompress file.\n"); + LOG_DELAY(); + result = 0; + goto finish; + } + + uncompressedFile = OSData::withBytesNoCopy(uncompressed_file, + realsize + 1); + if (!uncompressedFile) { + IOLog("Error: Couldn't allocate data object " + "to uncompress file.\n"); + LOG_DELAY(); + result = 0; + goto finish; + } + + if (compsize != 0) { + uncompressed_size = decompress_lzss(uncompressed_file, + base_address + offset, + compsize); + if (uncompressed_size != realsize) { + IOLog("Error: Uncompressed file is not the length " + "recorded.\n"); + LOG_DELAY(); + result = 0; + goto finish; + } + } else { + bcopy(base_address + offset, uncompressed_file, + compsize); + } + uncompressed_file[uncompressed_size] = '\0'; + + *file = uncompressedFile; + +finish: + if (!result) { + if (uncompressedFile) { + uncompressedFile->release(); + *file = 0; + } + } + return result; +} + + +/********************************************************************* +* Does the work of pulling extensions out of an mkext archive located +* in memory. +*********************************************************************/ +bool extractExtensionsFromArchive(MemoryMapFileInfo * mkext_file_info, + OSDictionary * extensions) { + + bool result = true; + + u_int8_t * crc_address = 0; + u_int32_t checksum; + mkext_header * mkext_data = 0; // don't free + mkext_kext * onekext_data = 0; // don't free + mkext_file * plist_file = 0; // don't free + mkext_file * module_file = 0; // don't free + OSData * driverPlistDataObject = 0; // must release + OSDictionary * driverPlist = 0; // must release + OSData * driverCode = 0; // must release + OSDictionary * driverDict = 0; // must release + OSString * moduleName = 0; // don't release + OSString * errorString = NULL; // must release + + mkext_data = (mkext_header *)mkext_file_info->paddr; + + if (OSSwapBigToHostInt32(mkext_data->magic) != MKEXT_MAGIC || + OSSwapBigToHostInt32(mkext_data->signature) != MKEXT_SIGN) { + IOLog("Error: Extension archive has invalid magic or signature.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + if (OSSwapBigToHostInt32(mkext_data->length) != mkext_file_info->length) { + IOLog("Error: Mismatch between extension archive & " + "recorded length.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + crc_address = (u_int8_t *)&mkext_data->version; + checksum = adler32(crc_address, + (unsigned int)mkext_data + + OSSwapBigToHostInt32(mkext_data->length) - (unsigned int)crc_address); + + if (OSSwapBigToHostInt32(mkext_data->adler32) != checksum) { + IOLog("Error: Extension archive has a bad checksum.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + /* If the MKEXT archive isn't fat, check that the CPU type & subtype + * match that of the running kernel. + */ + if (OSSwapBigToHostInt32(mkext_data->cputype) != (UInt32)CPU_TYPE_ANY) { + kern_return_t kresult = KERN_FAILURE; + host_basic_info_data_t hostinfo; + host_info_t hostinfo_ptr = (host_info_t)&hostinfo; + mach_msg_type_number_t count = sizeof(hostinfo)/sizeof(integer_t); + + kresult = host_info((host_t)1, HOST_BASIC_INFO, + hostinfo_ptr, &count); + if (kresult != KERN_SUCCESS) { + IOLog("Error: Couldn't get current host info.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + if ((UInt32)hostinfo.cpu_type != + OSSwapBigToHostInt32(mkext_data->cputype)) { + + IOLog("Error: Extension archive doesn't contain software " + "for this computer's CPU type.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + if (!check_cpu_subtype(OSSwapBigToHostInt32(mkext_data->cpusubtype))) { + IOLog("Error: Extension archive doesn't contain software " + "for this computer's CPU subtype.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + } + + for (unsigned int i = 0; + i < OSSwapBigToHostInt32(mkext_data->numkexts); + i++) { + + kmod_info_t * loaded_kmod = 0; + + if (driverPlistDataObject) { + driverPlistDataObject->release(); + driverPlistDataObject = NULL; + } + if (driverPlist) { + driverPlist->release(); + driverPlist = NULL; + } + if (driverCode) { + driverCode->release(); + driverCode = NULL; + } + if (driverDict) { + driverDict->release(); + driverDict = NULL; + } + if (errorString) { + errorString->release(); + errorString = NULL; + } + + onekext_data = &mkext_data->kext[i]; + plist_file = &onekext_data->plist; + module_file = &onekext_data->module; + + if (!uncompressFile((u_int8_t *)mkext_data, plist_file, + &driverPlistDataObject)) { + + IOLog("Error: couldn't uncompress plist file " + "%d from multikext archive.\n", i); + LOG_DELAY(); + result = false; + goto finish; // or just continue? + } + + if (!driverPlistDataObject) { + IOLog("Error: No property list present " + "for multikext archive entry %d.\n", i); + LOG_DELAY(); + result = false; + goto finish; // or just continue? + } else { + driverPlist = OSDynamicCast(OSDictionary, + OSUnserializeXML( + (char *)driverPlistDataObject->getBytesNoCopy(), + &errorString)); + if (!driverPlist) { + IOLog("Error: Couldn't read XML property list " + "for multikext archive entry %d.\n", i); + LOG_DELAY(); + if (errorString) { + IOLog("XML parse error: %s.\n", + errorString->getCStringNoCopy()); + LOG_DELAY(); + } + result = false; + goto finish; // or just continue? + } + + if (!validateExtensionDict(driverPlist)) { + IOLog("Error: Failed to validate property list " + "for multikext archive entry %d.\n", i); + LOG_DELAY(); + result = false; + goto finish; + } + + } + + /* Get the extension's module name. This is used to record + * the extension. + */ + moduleName = OSDynamicCast(OSString, + driverPlist->getObject("CFBundleIdentifier")); // do not release + if (!moduleName) { + IOLog("Error: Multikext archive entry %d has " + "no \"CFBundleIdentifier\" property.\n", i); + LOG_DELAY(); + continue; // assume a kext config error & continue + } + + /* Check if kmod is already loaded and is a real loadable one (has + * an address). + */ + loaded_kmod = kmod_lookupbyname(moduleName->getCStringNoCopy()); + if (loaded_kmod && loaded_kmod->address) { + IOLog("Skipping new extension \"%s\"; an extension named " + "\"%s\" is already loaded.\n", + moduleName->getCStringNoCopy(), + loaded_kmod->name); + continue; + } + + + driverDict = OSDictionary::withCapacity(2); + if (!driverDict) { + IOLog("Error: Couldn't allocate dictionary " + "for multikext archive entry %d.\n", i); + LOG_DELAY(); + result = false; + goto finish; + } + + driverDict->setObject("plist", driverPlist); + + if (!uncompressFile((u_int8_t *)mkext_data, module_file, + &driverCode)) { + + IOLog("Error: couldn't uncompress module file " + "%d from multikext archive.\n", i); + LOG_DELAY(); + result = false; + goto finish; // or just continue? + } + + /* It's okay for there to be no module + */ + if (driverCode) { + driverDict->setObject("code", driverCode); + } + + OSDictionary * incumbentExt = OSDynamicCast(OSDictionary, + extensions->getObject(moduleName)); + + if (!incumbentExt) { + extensions->setObject(moduleName, driverDict); + } else { + OSDictionary * mostRecentExtension = + compareExtensionVersions(incumbentExt, driverDict); + + if (mostRecentExtension == incumbentExt) { + /* Do nothing, we've got the most recent. */ + } else if (mostRecentExtension == driverDict) { + if (!extensions->setObject(moduleName, driverDict)) { + + /* This is a fatal error, so bail. + */ + IOLog("extractExtensionsFromArchive(): Failed to add " + "identifier %s\n", + moduleName->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + } else /* should be NULL */ { + + /* This is a nonfatal error, so continue. + */ + IOLog("extractExtensionsFromArchive(): Error comparing " + "versions of duplicate extensions %s.\n", + moduleName->getCStringNoCopy()); + LOG_DELAY(); + continue; + } + } + } + +finish: + + if (driverPlistDataObject) driverPlistDataObject->release(); + if (driverPlist) driverPlist->release(); + if (driverCode) driverCode->release(); + if (driverDict) driverDict->release(); + if (errorString) errorString->release(); + + return result; +} + + +/********************************************************************* +* Unlike with single KEXTs, a failure to read any member of a +* multi-KEXT archive is considered a failure for all. We want to +* take no chances unpacking a single, compressed archive of drivers. +*********************************************************************/ +bool readExtensions(OSDictionary * propertyDict, + const char * memory_map_name, + OSDictionary * extensions) { + + bool result = true; + OSData * mkextDataObject = 0; // don't release + MemoryMapFileInfo * mkext_file_info = 0; // don't free + + mkextDataObject = OSDynamicCast(OSData, + propertyDict->getObject(memory_map_name)); + // don't release mkextDataObject + + if (!mkextDataObject) { + IOLog("Error: No mkext data object " + "for device tree entry \"%s\".\n", + memory_map_name); + LOG_DELAY(); + result = false; + goto finish; + } + + mkext_file_info = (MemoryMapFileInfo *)mkextDataObject->getBytesNoCopy(); + if (!mkext_file_info) { + result = false; + goto finish; + } + + result = extractExtensionsFromArchive(mkext_file_info, extensions); + +finish: + + if (!result && extensions) { + extensions->flushCollection(); + } + + IODTFreeLoaderInfo(memory_map_name, (void *)mkext_file_info->paddr, + (int)mkext_file_info->length); + + return result; +} + + +/********************************************************************* +* Adds the personalities for an extensions dictionary to the global +* IOCatalogue. +*********************************************************************/ +bool addPersonalities(OSDictionary * extensions) { + bool result = true; + OSCollectionIterator * keyIterator = NULL; // must release + OSString * key; // don't release + OSDictionary * driverDict = NULL; // don't release + OSDictionary * driverPlist = NULL; // don't release + OSDictionary * thisDriverPersonalities = NULL; // don't release + OSArray * allDriverPersonalities = NULL; // must release + + allDriverPersonalities = OSArray::withCapacity(1); + if (!allDriverPersonalities) { + IOLog("Error: Couldn't allocate personality dictionary.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + /* Record all personalities found so that they can be + * added to the catalogue. + * Note: Not all extensions have personalities. + */ + + keyIterator = OSCollectionIterator::withCollection(extensions); + if (!keyIterator) { + IOLog("Error: Couldn't allocate iterator to record personalities.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + while ( ( key = OSDynamicCast(OSString, + keyIterator->getNextObject() ))) { + + driverDict = OSDynamicCast(OSDictionary, + extensions->getObject(key)); + driverPlist = OSDynamicCast(OSDictionary, + driverDict->getObject("plist")); + thisDriverPersonalities = OSDynamicCast(OSDictionary, + driverPlist->getObject("IOKitPersonalities")); + + if (thisDriverPersonalities) { + OSCollectionIterator * pIterator; + OSString * key; + pIterator = OSCollectionIterator::withCollection( + thisDriverPersonalities); + if (!pIterator) { + IOLog("Error: Couldn't allocate iterator " + "to record extension personalities.\n"); + LOG_DELAY(); + continue; + } + while ( (key = OSDynamicCast(OSString, + pIterator->getNextObject())) ) { + + OSDictionary * personality = OSDynamicCast( + OSDictionary, + thisDriverPersonalities->getObject(key)); + if (personality) { + allDriverPersonalities->setObject(personality); + } + } + pIterator->release(); + } + } /* extract personalities */ + + + /* Add all personalities found to the IOCatalogue, + * but don't start matching. + */ + gIOCatalogue->addDrivers(allDriverPersonalities, false); + +finish: + + if (allDriverPersonalities) allDriverPersonalities->release(); + if (keyIterator) keyIterator->release(); + + return result; +} + + +/********************************************************************* +* Called from IOCatalogue to add extensions from an mkext archive. +*********************************************************************/ +bool addExtensionsFromArchive(OSData * mkextDataObject) { + bool result = true; + + OSDictionary * startupExtensions = NULL; // don't release + OSDictionary * extensions = NULL; // must release + MemoryMapFileInfo mkext_file_info; + OSCollectionIterator * keyIterator = NULL; // must release + OSString * key = NULL; // don't release + + IOLockLock(kld_lock); + + startupExtensions = getStartupExtensions(); + if (!startupExtensions) { + IOLog("Can't record extension archive; there is no + extensions dictionary.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + extensions = OSDictionary::withCapacity(2); + if (!extensions) { + IOLog("Error: Couldn't allocate dictionary to unpack " + "extension archive.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + mkext_file_info.paddr = (UInt32)mkextDataObject->getBytesNoCopy(); + mkext_file_info.length = mkextDataObject->getLength(); + + result = extractExtensionsFromArchive(&mkext_file_info, extensions); + if (!result) { + IOLog("Error: Failed to extract extensions from archive.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + result = mergeExtensionDictionaries(startupExtensions, extensions); + if (!result) { + IOLog("Error: Failed to merge new extensions into existing set.\n"); + LOG_DELAY(); + goto finish; + } + + result = addPersonalities(extensions); + if (!result) { + IOLog("Error: Failed to add personalities for extensions extracted " + "from archive.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + +finish: + + if (!result) { + IOLog("Error: Failed to record extensions from archive.\n"); + LOG_DELAY(); + } else { + keyIterator = OSCollectionIterator::withCollection( + extensions); + + if (keyIterator) { + while ( (key = OSDynamicCast(OSString, + keyIterator->getNextObject())) ) { + + IOLog("Added extension \"%s\" from archive.\n", + key->getCStringNoCopy()); + LOG_DELAY(); + } + keyIterator->release(); + } + } + + if (extensions) extensions->release(); + + IOLockUnlock(kld_lock); + + return result; +} + + +/********************************************************************* +* This function builds dictionaries for the startup extensions +* put into memory by bootx, recording each in the startup extensions +* dictionary. The dictionary format is this: +* +* { +* "plist" = (the extension's Info.plist as an OSDictionary) +* "code" = (an OSData containing the executable file) +* } +* +* This function returns true if any extensions were found and +* recorded successfully, or if there are no start extensions, +* and false if an unrecoverable error occurred. An error reading +* a single extension is not considered fatal, and this function +* will simply skip the problematic extension to try the next one. +*********************************************************************/ +bool recordStartupExtensions(void) { + bool result = true; + OSDictionary * startupExtensions = NULL; // must release + OSDictionary * existingExtensions = NULL; // don't release + OSDictionary * mkextExtensions = NULL; // must release + IORegistryEntry * bootxMemoryMap = NULL; // must release + OSDictionary * propertyDict = NULL; // must release + OSCollectionIterator * keyIterator = NULL; // must release + OSString * key = NULL; // don't release + + OSDictionary * newDriverDict = NULL; // must release + OSDictionary * driverPlist = NULL; // don't release + + IOLockLock(kld_lock); + + IOLog("Recording startup extensions.\n"); + LOG_DELAY(); + + existingExtensions = getStartupExtensions(); + if (!existingExtensions) { + IOLog("Error: There is no dictionary for startup extensions.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + startupExtensions = OSDictionary::withCapacity(1); + if (!startupExtensions) { + IOLog("Error: Couldn't allocate dictionary " + "to record startup extensions.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + bootxMemoryMap = + IORegistryEntry::fromPath( + "/chosen/memory-map", // path + gIODTPlane // plane + ); + // return value is retained so be sure to release it + + if (!bootxMemoryMap) { + IOLog("Error: Couldn't read booter memory map.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + propertyDict = bootxMemoryMap->dictionaryWithProperties(); + if (!propertyDict) { + IOLog("Error: Couldn't get property dictionary " + "from memory map.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + keyIterator = OSCollectionIterator::withCollection(propertyDict); + if (!keyIterator) { + IOLog("Error: Couldn't allocate iterator for driver images.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + while ( (key = OSDynamicCast(OSString, + keyIterator->getNextObject())) ) { + + /* Clear newDriverDict & mkextExtensions upon entry to the loop, + * handling both successful and unsuccessful iterations. + */ + if (newDriverDict) { + newDriverDict->release(); + newDriverDict = NULL; + } + if (mkextExtensions) { + mkextExtensions->release(); + mkextExtensions = NULL; + } + + const char * keyValue = key->getCStringNoCopy(); + + if ( !strncmp(keyValue, BOOTX_KEXT_PREFIX, + strlen(BOOTX_KEXT_PREFIX)) ) { + + /* Read the extension from the bootx-supplied memory. + */ + newDriverDict = readExtension(propertyDict, keyValue); + if (!newDriverDict) { + IOLog("Error: Couldn't read data " + "for device tree entry \"%s\".\n", keyValue); + LOG_DELAY(); + continue; + } + + + /* Preprare to record the extension by getting its info plist. + */ + driverPlist = OSDynamicCast(OSDictionary, + newDriverDict->getObject("plist")); + if (!driverPlist) { + IOLog("Error: Extension in device tree entry \"%s\" " + "has no property list.\n", keyValue); + LOG_DELAY(); + continue; + } + + + /* Get the extension's module name. This is used to record + * the extension. Do *not* release the moduleName. + */ + OSString * moduleName = OSDynamicCast(OSString, + driverPlist->getObject("CFBundleIdentifier")); + if (!moduleName) { + IOLog("Error: Device tree entry \"%s\" has " + "no \"CFBundleIdentifier\" property.\n", keyValue); + LOG_DELAY(); + continue; + } + + + /* All has gone well so far, so record the extension under + * its module name, checking for an existing duplicate. + * + * Do not release moduleName, as it's part of the extension's + * plist. + */ + OSDictionary * incumbentExt = OSDynamicCast(OSDictionary, + startupExtensions->getObject(moduleName)); + + if (!incumbentExt) { + startupExtensions->setObject(moduleName, newDriverDict); + } else { + OSDictionary * mostRecentExtension = + compareExtensionVersions(incumbentExt, newDriverDict); + + if (mostRecentExtension == incumbentExt) { + /* Do nothing, we've got the most recent. */ + } else if (mostRecentExtension == newDriverDict) { + if (!startupExtensions->setObject(moduleName, + newDriverDict)) { + + /* This is a fatal error, so bail. + */ + IOLog("recordStartupExtensions(): Failed to add " + "identifier %s\n", + moduleName->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + } else /* should be NULL */ { + + /* This is a nonfatal error, so continue. + */ + IOLog("recordStartupExtensions(): Error comparing " + "versions of duplicate extensions %s.\n", + moduleName->getCStringNoCopy()); + LOG_DELAY(); + continue; + } + } + + + } else if ( !strncmp(keyValue, BOOTX_MULTIKEXT_PREFIX, + strlen(BOOTX_MULTIKEXT_PREFIX)) ) { + + mkextExtensions = OSDictionary::withCapacity(10); + if (!mkextExtensions) { + IOLog("Error: Couldn't allocate dictionary to unpack " + "multi-extension archive.\n"); + LOG_DELAY(); + result = false; + goto finish; // allocation failure is fatal for this routine + } + if (!readExtensions(propertyDict, keyValue, mkextExtensions)) { + IOLog("Error: Couldn't unpack multi-extension archive.\n"); + LOG_DELAY(); + continue; + } else { + if (!mergeExtensionDictionaries(startupExtensions, + mkextExtensions)) { + + IOLog("Error: Failed to merge new extensions into " + "existing set.\n"); + LOG_DELAY(); + result = false; + goto finish; // merge error is fatal for this routine + } + } + } + + // Do not release key. + + } /* while ( (key = OSDynamicCast(OSString, ... */ + + if (!mergeExtensionDictionaries(existingExtensions, startupExtensions)) { + IOLog("Error: Failed to merge new extensions into existing set.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + result = addPersonalities(startupExtensions); + if (!result) { + IOLog("Error: Failed to add personalities for extensions extracted " + "from archive.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + +finish: + + // reused so clear first! + if (keyIterator) { + keyIterator->release(); + keyIterator = 0; + } + + if (!result) { + IOLog("Error: Failed to record startup extensions.\n"); + LOG_DELAY(); + } else { + keyIterator = OSCollectionIterator::withCollection( + startupExtensions); + + if (keyIterator) { + while ( (key = OSDynamicCast(OSString, + keyIterator->getNextObject())) ) { + + IOLog("Found extension \"%s\".\n", + key->getCStringNoCopy()); + LOG_DELAY(); + } + keyIterator->release(); + keyIterator = 0; + } + } + + if (newDriverDict) newDriverDict->release(); + if (propertyDict) propertyDict->release(); + if (bootxMemoryMap) bootxMemoryMap->release(); + if (mkextExtensions) mkextExtensions->release(); + if (startupExtensions) startupExtensions->release(); + + IOLockUnlock(kld_lock); + return result; +} + + +/********************************************************************* +* This function removes an entry from the dictionary of startup +* extensions. It's used when an extension can't be loaded, for +* whatever reason. For drivers, this allows another matching driver +* to be loaded, so that, for example, a driver for the root device +* can be found. +*********************************************************************/ +void removeStartupExtension(const char * extensionName) { + OSDictionary * startupExtensions = NULL; // don't release + OSDictionary * extensionDict = NULL; // don't release + OSDictionary * extensionPlist = NULL; // don't release + OSDictionary * extensionPersonalities = NULL; // don't release + OSDictionary * personality = NULL; // don't release + OSCollectionIterator * keyIterator = NULL; // must release + OSString * key = NULL; // don't release + + IOLockLock(kld_lock); + + startupExtensions = getStartupExtensions(); + if (!startupExtensions) goto finish; + + + /* Find the extension's entry in the dictionary of + * startup extensions. + */ + extensionDict = OSDynamicCast(OSDictionary, + startupExtensions->getObject(extensionName)); + if (!extensionDict) goto finish; + + extensionPlist = OSDynamicCast(OSDictionary, + extensionDict->getObject("plist")); + if (!extensionPlist) goto finish; + + extensionPersonalities = OSDynamicCast(OSDictionary, + extensionPlist->getObject("IOKitPersonalities")); + if (!extensionPersonalities) goto finish; + + /* If it was there, remove it from the catalogue proper + * by calling removeDrivers(). Pass true for the second + * argument to trigger a new round of matching, and + * then remove the extension from the dictionary of startup + * extensions. + */ + keyIterator = OSCollectionIterator::withCollection( + extensionPersonalities); + if (!keyIterator) { + IOLog("Error: Couldn't allocate iterator to scan + personalities for %s.\n", extensionName); + LOG_DELAY(); + } + + while ((key = OSDynamicCast(OSString, keyIterator->getNextObject()))) { + personality = OSDynamicCast(OSDictionary, + extensionPersonalities->getObject(key)); + + + if (personality) { + gIOCatalogue->removeDrivers(personality, true); + } + } + + startupExtensions->removeObject(extensionName); + +finish: + + if (keyIterator) keyIterator->release(); + + IOLockUnlock(kld_lock); + return; +} diff --git a/libsa/conf/MASTER b/libsa/conf/MASTER new file mode 100644 index 000000000..62e57f7ec --- /dev/null +++ b/libsa/conf/MASTER @@ -0,0 +1,55 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +####################################################################### +# +# Master machine independent configuration file. +# +# Specific configuration files are created based on this and +# the machine specific master file using the doconf script. +# +# Any changes to the master configuration files will affect all +# other configuration files based upon it. +# +####################################################################### +# +# To build a configuration, execute "doconf ." +# Configurations are specified in the "Configurations:" section +# of the MASTER and MASTER.* files as follows: +# +# = [ ... ] +# +# Lines in the MASTER and MASTER.* files are selected based on +# the attribute selector list, found in a comment at the end of +# the line. This is a list of attributes separated by commas. +# The "!" operator selects the line if none of the attributes are +# specified. +# +# For example: +# +# selects a line if "foo" or "bar" are specified. +# selects a line if neither "foo" nor "bar" is +# specified. +# +# Lines with no attributes specified are selected for all +# configurations. +# +####################################################################### +# +# Standard Mach Research Configurations: +# -------- ---- -------- --------------- +# +# These are the default configurations that can be used by most sites. +# They are used internally by the Mach project. +# +# LIBSA = [debug] +# +####################################################################### +# +ident LIBSA + +options KDEBUG # kernel tracing # + diff --git a/libsa/conf/MASTER.i386 b/libsa/conf/MASTER.i386 new file mode 100644 index 000000000..83a06f878 --- /dev/null +++ b/libsa/conf/MASTER.i386 @@ -0,0 +1,11 @@ +###################################################################### +# +# RELEASE = [intel mach libkerncpp] +# PROFILE = [intel mach libkerncpp profile] +# DEBUG = [intel mach libkerncpp debug] +# +###################################################################### + +machine "i386" # +cpu "i386" # + diff --git a/libsa/conf/MASTER.ppc b/libsa/conf/MASTER.ppc new file mode 100644 index 000000000..a2764000e --- /dev/null +++ b/libsa/conf/MASTER.ppc @@ -0,0 +1,17 @@ +# +###################################################################### +# +# Standard Apple MacOS X Configurations: +# -------- ---- -------- --------------- +# +# RELEASE = [ppc mach libkerncpp] +# PROFILE = [ppc mach libkerncpp profile] +# DEBUG = [ppc mach libkerncpp debug] +# RELEASE_TRACE = [ RELEASE kdebug ] +# DEBUG_TRACE = [ DEBUG kdebug ] +# +###################################################################### + +machine "ppc" # +cpu "ppc" # + diff --git a/libsa/conf/Makefile b/libsa/conf/Makefile new file mode 100644 index 000000000..c744dea55 --- /dev/null +++ b/libsa/conf/Makefile @@ -0,0 +1,63 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + tools + +COMP_SUBDIRS = + +INST_SUBDIRS = + +ifndef LIBKERN_KERNEL_CONFIG +export LIBKERN_KERNEL_CONFIG = $(KERNEL_CONFIG) +endif + +COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: + make build_setup + +$(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile : $(SOURCE)/MASTER \ + $(SOURCE)/MASTER.$(arch_config) \ + $(SOURCE)/Makefile.template \ + $(SOURCE)/Makefile.$(arch_config) \ + $(SOURCE)/files \ + $(SOURCE)/files.$(arch_config) \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf + @echo "Running doconf for $(LIBKERN_KERNEL_CONFIG)"; + (doconf_target=$(addsuffix /conf, $(TARGET)); \ + echo $${doconf_target};\ + $(MKDIR) $${doconf_target}; \ + cd $${doconf_target}; \ + rm -f $(notdir $?); \ + cp $? $${doconf_target}; \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(arch_config) -d $(TARGET)/$(LIBKERN_KERNEL_CONFIG) $(LIBKERN_KERNEL_CONFIG); \ + ); + +.ORDER: $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile + +do_setup_conf: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf \ + $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG)/Makefile + +do_all: do_setup_conf + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(LIBKERN_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + (cd $(COMPOBJROOT)/$(LIBKERN_KERNEL_CONFIG); \ + next_source=$(subst conf/,,$(SOURCE)); \ + ${MAKE} MAKEFILES=$(TARGET)/$(LIBKERN_KERNEL_CONFIG)/Makefile \ + SOURCE=$${next_source} \ + TARGET=$(TARGET) \ + build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(LIBKERN_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_all: do_all + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/libsa/conf/Makefile.i386 b/libsa/conf/Makefile.i386 new file mode 100644 index 000000000..b89fdd145 --- /dev/null +++ b/libsa/conf/Makefile.i386 @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for i386 +###################################################################### + + +###################################################################### +#END Machine dependent Makefile fragment for i386 +###################################################################### diff --git a/libsa/conf/Makefile.ppc b/libsa/conf/Makefile.ppc new file mode 100644 index 000000000..2b438f2fa --- /dev/null +++ b/libsa/conf/Makefile.ppc @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for ppc +###################################################################### + + +###################################################################### +#END Machine dependent Makefile fragment for ppc +###################################################################### diff --git a/libsa/conf/Makefile.template b/libsa/conf/Makefile.template new file mode 100644 index 000000000..7725d5aac --- /dev/null +++ b/libsa/conf/Makefile.template @@ -0,0 +1,112 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# Export IDENT for sub-makefiles +# +export IDENT + +# +# XXX: INCFLAGS +# +INCFLAGS_MAKEFILE= + + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +# +# XXX: CFLAGS +# +CFLAGS+= -DKERNEL -DLIBKERN_KERNEL_PRIVATE -DDEBUG \ + -Wall -Wno-four-char-constants -fno-common + +SFLAGS+= -DKERNEL + +# +# Directories for mig generated files +# +COMP_SUBDIRS = + +# +# Make sure we don't remove this by accident if interrupted at the wrong +# time. +# +.PRECIOUS: Makefile + +VERSION_FILES= \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.major \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.minor \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.variant + +COPYRIGHT_FILES = \ + $(SOURCE_DIR)/$(COMPONENT)/conf/copyright + +# +# Theses macros are filled in by the config program depending on the +# current configuration. The MACHDEP macro is replaced by the +# contents of the machine dependent makefile template and the others +# are replaced by the corresponding symbol definitions for the +# configuration. +# + +%OBJS + +%CFILES + +%MFILES + +%SFILES + +%BFILES + +%ORDERED +%MACHDEP + +# +# OBJSDEPS is the set of files (defined in the machine dependent +# template if necessary) which all objects depend on (such as an +# in-line assembler expansion filter) +# +${OBJS}: ${OBJSDEPS} + + +%LOAD + +LDOBJS = $(OBJS) + +$(COMPONENT).o: $(LDOBJS) + @echo "creating $(COMPONENT).o" + $(RM) $(RMFLAGS) vers.c + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} + ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c + @echo [ updating $(COMPONENT).o ${LIBKERN_KERNEL_CONFIG} ] + $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT).o ${LDOBJS} vers.o /usr/local/lib/libkld.a + $(SEG_HACK) __KLD $(COMPONENT).o -o $(COMPONENT)_kld.o + mv $(COMPONENT)_kld.o $(COMPONENT).o + +do_all: $(COMPONENT).o + +do_depend: do_all + ${MD} -u Makedep -f -d `ls *.d` + +do_build_all: do_depend + +%RULES + +-include Makedep + +include $(MakeInc_rule) + +include $(MakeInc_dir) + diff --git a/libsa/conf/copyright b/libsa/conf/copyright new file mode 100644 index 000000000..3837f6c79 --- /dev/null +++ b/libsa/conf/copyright @@ -0,0 +1,6 @@ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + */ + diff --git a/libsa/conf/files b/libsa/conf/files new file mode 100644 index 000000000..b8718a272 --- /dev/null +++ b/libsa/conf/files @@ -0,0 +1,17 @@ +# options + +# OPTIONS/libkerncpp optional libkerncpp +# OPTIONS/kdebug optional kdebug + +# libsa + +libsa/bootstrap.cpp standard +libsa/catalogue.cpp standard +libsa/kmod.cpp standard +libsa/mach.c standard +libsa/malloc.c standard +libsa/misc.c standard +libsa/mkext.c standard +libsa/sort.c standard +libsa/bsearch.c standard +libsa/vers_rsrc.c standard diff --git a/libsa/conf/files.i386 b/libsa/conf/files.i386 new file mode 100644 index 000000000..a5f24a43f --- /dev/null +++ b/libsa/conf/files.i386 @@ -0,0 +1 @@ +libsa/i386/setjmp.s standard diff --git a/libsa/conf/files.ppc b/libsa/conf/files.ppc new file mode 100644 index 000000000..79390ba2e --- /dev/null +++ b/libsa/conf/files.ppc @@ -0,0 +1,2 @@ +libsa/ppc/setjmp.s standard + diff --git a/libsa/conf/tools/Makefile b/libsa/conf/tools/Makefile new file mode 100644 index 000000000..9df86ce8c --- /dev/null +++ b/libsa/conf/tools/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + doconf \ + newvers + +COMP_SUBDIRS = \ + doconf \ + newvers + +INST_SUBDIRS = \ + + +setup_build_all: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/conf/tools/doconf/Makefile b/libsa/conf/tools/doconf/Makefile new file mode 100644 index 000000000..2bf0b7a10 --- /dev/null +++ b/libsa/conf/tools/doconf/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)doconf + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/conf/tools/doconf/doconf.csh b/libsa/conf/tools/doconf/doconf.csh new file mode 100755 index 000000000..43388c11c --- /dev/null +++ b/libsa/conf/tools/doconf/doconf.csh @@ -0,0 +1,313 @@ +#!/bin/csh -f +set path = ($path .) +###################################################################### +# HISTORY +# 1-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University +# Added "-verbose" switch, so this script produces no output +# in the normal case. +# +# 10-Oct-87 Mike Accetta (mja) at Carnegie-Mellon University +# Flushed cmu_*.h and spin_locks.h +# [ V5.1(XF18) ] +# +# 6-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# Use MASTER.local and MASTER..local for generation of +# configuration files in addition to MASTER and MASTER.. +# +# 25-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Removed use of obsolete wb_*.h files when building the feature +# list; modified to save the previous configuration file and +# display the differences between it and the new file. +# [ V5.1(F8) ] +# +# 25-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# If there is no /etc/machine just print out a message telling +# user to use the -cpu option. I thought this script was supposed +# to work even without a /etc/machine, but it doesn't... and this +# is the easiest way out. +# +# 13-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Added "romp_fpa.h" file to extra features for the RT. +# [ V5.1(F7) ] +# +# 11-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to maintain the appropriate configuration features file +# in the "machine" directory whenever the corresponding +# configuration is generated. This replaces the old mechanism of +# storing this directly in the file since it was +# machine dependent and also precluded building programs for more +# than one configuration from the same set of sources. +# [ V5.1(F6) ] +# +# 21-Feb-87 Mike Accetta (mja) at Carnegie-Mellon University +# Fixed to require wired-in cpu type names for only those +# machines where the kernel name differs from that provided by +# /etc/machine (i.e. IBMRT => ca and SUN => sun3); updated to +# permit configuration descriptions in both machine indepedent +# and dependent master configuration files so that attributes can +# be grouped accordingly. +# [ V5.1(F3) ] +# +# 17-Jan-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to work from any directory at the same level as +# "conf"; generate configuration from both MASTER and +# MASTER. files; added -cpu switch. +# [ V5.1(F1) ] +# +# 18-Aug-86 Mike Accetta (mja) at Carnegie-Mellon University +# Added -make switch and changed meaning of -config; upgraded to +# allow multiple attributes per configuration and to define +# configurations in terms of these attributes within MASTER. +# +# 14-Apr-83 Mike Accetta (mja) at Carnegie-Mellon University +# Added -config switch to only run /etc/config without +# "make depend" and "make". +# +###################################################################### + +set prog=$0 +set prog=$prog:t +set nonomatch +set OBJDIR=../BUILD +if ("`/usr/bin/uname`" == "Rhapsody" ) then +set CONFIG_DIR=/usr/local/bin +else +set CONFIG_DIR=/usr/bin +endif + +unset domake +unset doconfig +unset beverbose +unset MACHINE +unset profile + +while ($#argv >= 1) + if ("$argv[1]" =~ -*) then + switch ("$argv[1]") + case "-c": + case "-config": + set doconfig + breaksw + case "-m": + case "-make": + set domake + breaksw + case "-cpu": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set MACHINE="$argv[2]" + shift + breaksw + case "-d": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set OBJDIR="$argv[2]" + shift + breaksw + case "-verbose": + set beverbose + breaksw + case "-p": + case "-profile": + set profile + breaksw + default: + echo "${prog}: ${argv[1]}: unknown switch" + exit 1 + breaksw + endsw + shift + else + break + endif +end + +if ($#argv == 0) set argv=(GENERIC) + +if (! $?MACHINE) then + if (-d /NextApps) then + set MACHINE=`hostinfo | awk '/MC680x0/ { printf("m68k") } /MC880x0/ { printf("m88k") }'` + endif +endif + +if (! $?MACHINE) then + if (-f /etc/machine) then + set MACHINE="`/etc/machine`" + else + echo "${prog}: no /etc/machine, specify machine type with -cpu" + echo "${prog}: e.g. ${prog} -cpu VAX CONFIGURATION" + exit 1 + endif +endif + +set FEATURES_EXTRA= + +switch ("$MACHINE") + case IBMRT: + set cpu=ca + set ID=RT + set FEATURES_EXTRA="romp_dualcall.h romp_fpa.h" + breaksw + case SUN: + set cpu=sun3 + set ID=SUN3 + breaksw + default: + set cpu=`echo $MACHINE | tr A-Z a-z` + set ID=`echo $MACHINE | tr a-z A-Z` + breaksw +endsw +set FEATURES=../h/features.h +set FEATURES_H=(cs_*.h mach_*.h net_*.h\ + cputypes.h cpus.h vice.h\ + $FEATURES_EXTRA) +set MASTER_DIR=../conf +set MASTER = ${MASTER_DIR}/MASTER +set MASTER_CPU=${MASTER}.${cpu} + +set MASTER_LOCAL = ${MASTER}.local +set MASTER_CPU_LOCAL = ${MASTER_CPU}.local +if (! -f $MASTER_LOCAL) set MASTER_LOCAL = "" +if (! -f $MASTER_CPU_LOCAL) set MASTER_CPU_LOCAL = "" + +if (! -d $OBJDIR) then + echo "[ creating $OBJDIR ]" + mkdir -p $OBJDIR +endif + +foreach SYS ($argv) + set SYSID=${SYS}_${ID} + set SYSCONF=$OBJDIR/config.$SYSID + set BLDDIR=$OBJDIR + if ($?beverbose) then + echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" + endif + echo +$SYS \ + | \ + cat $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL - \ + $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL \ + | \ + sed -n \ + -e "/^+/{" \ + -e "s;[-+];#&;gp" \ + -e 't loop' \ + -e ': loop' \ + -e 'n' \ + -e '/^#/b loop' \ + -e '/^$/b loop' \ + -e 's;^\([^#]*\).*#[ ]*<\(.*\)>[ ]*$;\2#\1;' \ + -e 't not' \ + -e 's;\([^#]*\).*;#\1;' \ + -e 't not' \ + -e ': not' \ + -e 's;[ ]*$;;' \ + -e 's;^\!\(.*\);\1#\!;' \ + -e 'p' \ + -e 't loop' \ + -e 'b loop' \ + -e '}' \ + -e "/^[^#]/d" \ + -e 's; ; ;g' \ + -e "s;^# *\([^ ]*\)[ ]*=[ ]*\[\(.*\)\].*;\1#\2;p" \ + | \ + awk '-F#' '\ +part == 0 && $1 != "" {\ + m[$1]=m[$1] " " $2;\ + next;\ +}\ +part == 0 && $1 == "" {\ + for (i=NF;i>1;i--){\ + s=substr($i,2);\ + c[++na]=substr($i,1,1);\ + a[na]=s;\ + }\ + while (na > 0){\ + s=a[na];\ + d=c[na--];\ + if (m[s] == "") {\ + f[s]=d;\ + } else {\ + nx=split(m[s],x," ");\ + for (j=nx;j>0;j--) {\ + z=x[j];\ + a[++na]=z;\ + c[na]=d;\ + }\ + }\ + }\ + part=1;\ + next;\ +}\ +part != 0 {\ + if ($1 != "") {\ + n=split($1,x,",");\ + ok=0;\ + for (i=1;i<=n;i++) {\ + if (f[x[i]] == "+") {\ + ok=1;\ + }\ + }\ + if (NF > 2 && ok == 0 || NF <= 2 && ok != 0) {\ + print $2; \ + }\ + } else { \ + print $2; \ + }\ +}\ +' >$SYSCONF.new + if (-z $SYSCONF.new) then + echo "${prog}: ${$SYSID}: no such configuration in $MASTER_DIR/MASTER{,.$cpu}" + rm -f $SYSCONF.new + endif + if (! -d $BLDDIR) then + echo "[ creating $BLDDIR ]" + mkdir -p $BLDDIR + endif +# +# These paths are used by config. +# +# "builddir" is the name of the directory where kernel binaries +# are put. It is a single path element, never absolute, and is +# always relative to "objectdir". "builddir" is used by config +# solely to determine where to put files created by "config" (e.g. +# the created Makefile and *.h's.) +# +# "objectdir" is the name of the directory which will hold "builddir". +# It is a path; if relative, it is relative to the current directory +# where config is run. It's sole use is to be prepended to "builddir" +# to indicate where config-created files are to be placed (see above). +# +# "sourcedir" is the location of the sources used to build the kernel. +# It is a path; if relative, it is relative to the directory specified +# by the concatenation of "objectdir" and "builddir" (i.e. where the +# kernel binaries are put). +# + echo 'builddir "."' >> $SYSCONF.new + set OBJRELDIR=`relpath $OBJROOT $OBJDIR` + echo 'objectdir "'$OBJROOT'/'$OBJRELDIR'"' >> $SYSCONF.new + set SRCDIR=`dirname $SOURCE` + echo 'sourcedir "'$SRCROOT'"' >> $SYSCONF.new + if (-f $SYSCONF) then + diff $SYSCONF $SYSCONF.new + rm -f $SYSCONF.old + mv $SYSCONF $SYSCONF.old + endif + rm -f $SYSCONF + mv $SYSCONF.new $SYSCONF + if ($?doconfig) then + echo "[ configuring $SYSID ]" + if ($?profile) then + $CONFIG_DIR/config -c $MASTER_DIR -p $SYSCONF + else + $CONFIG_DIR/config -c $MASTER_DIR $SYSCONF + endif + endif + if ($?domake) then + echo "[ making $SYSID ]" + (cd $BLDDIR; make) + endif +end diff --git a/libsa/conf/tools/newvers/Makefile b/libsa/conf/tools/newvers/Makefile new file mode 100644 index 000000000..73603c753 --- /dev/null +++ b/libsa/conf/tools/newvers/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)newvers + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/conf/tools/newvers/newvers.csh b/libsa/conf/tools/newvers/newvers.csh new file mode 100644 index 000000000..b462d3387 --- /dev/null +++ b/libsa/conf/tools/newvers/newvers.csh @@ -0,0 +1,34 @@ +#!/bin/sh - +# +# Mach Operating System +# Copyright (c) 1990 Carnegie-Mellon University +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# newvers.sh copyright major minor variant +# + +major="$1"; minor="$2"; variant="$3" +v="${major}.${minor}" d=`pwd` h="rcbuilder" t=`date` w=`whoami` +if [ -z "$d" -o -z "$h" -o -z "$t" ]; then + exit 1 +fi +CONFIG=`expr "$d" : '.*/\([^/]*\)$'` +d=`expr "$d" : '.*/\([^/]*/[^/]*/[^/]*\)$'` +( + /bin/echo "int ${COMPONENT}_version_major = ${major};" ; + /bin/echo "int ${COMPONENT}_version_minor = ${minor};" ; + /bin/echo "char ${COMPONENT}_version_variant[] = \"${variant}\";" ; + /bin/echo "char ${COMPONENT}_version[] = \"Common Services Component Version ${v}:\\n${t}; $w($h):$d\\n\";" ; + /bin/echo "char ${COMPONENT}_osrelease[] = \"${major}.${minor}\";" ; + /bin/echo "char ${COMPONENT}_ostype[] = \"Common Services\";" ; + /bin/echo "char ${COMPONENT}_builder[] = \"$w\";" ; +) > vers.c +if [ -s vers.suffix -o ! -f vers.suffix ]; then + rm -f vers.suffix + echo ".${variant}.${CONFIG}" > vers.suffix +fi +exit 0 diff --git a/libsa/conf/version.major b/libsa/conf/version.major new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/libsa/conf/version.major @@ -0,0 +1 @@ +1 diff --git a/libsa/conf/version.minor b/libsa/conf/version.minor new file mode 100644 index 000000000..573541ac9 --- /dev/null +++ b/libsa/conf/version.minor @@ -0,0 +1 @@ +0 diff --git a/libsa/conf/version.variant b/libsa/conf/version.variant new file mode 100644 index 000000000..e69de29bb diff --git a/libsa/i386/setjmp.s b/libsa/i386/setjmp.s new file mode 100644 index 000000000..9bbd90f4a --- /dev/null +++ b/libsa/i386/setjmp.s @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * C library -- _setjmp, _longjmp + * + * _longjmp(a,v) + * will generate a "return(v)" from + * the last call to + * _setjmp(a) + * by restoring registers from the stack, + * The previous signal state is NOT restored. + * + */ + +#include + +.private_extern _setjmp +.private_extern _longjmp + +ENTRY(setjmp) + movl 4(%esp),%ecx # fetch buffer + movl %ebx,0(%ecx) + movl %esi,4(%ecx) + movl %edi,8(%ecx) + movl %ebp,12(%ecx) # save frame pointer of caller + popl %edx + movl %esp,16(%ecx) # save stack pointer of caller + movl %edx,20(%ecx) # save pc of caller + xorl %eax,%eax + jmp *%edx + + +ENTRY(longjmp) + movl 8(%esp),%eax # return(v) + movl 4(%esp),%ecx # fetch buffer + movl 0(%ecx),%ebx + movl 4(%ecx),%esi + movl 8(%ecx),%edi + movl 12(%ecx),%ebp + movl 16(%ecx),%esp + orl %eax,%eax + jnz 0f + incl %eax +0: jmp *20(%ecx) # done, return.... diff --git a/libsa/kmod.cpp b/libsa/kmod.cpp new file mode 100644 index 000000000..df62979f5 --- /dev/null +++ b/libsa/kmod.cpp @@ -0,0 +1,1240 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include + +extern "C" { +#include +#include +#include +#include +#include +#include +#include +#include +}; + + +extern "C" { +extern load_return_t fatfile_getarch( + void * vp, // normally a (struct vnode *) + vm_offset_t data_ptr, + struct fat_arch * archret); + +extern kern_return_t +kmod_create_internal( + kmod_info_t *info, + kmod_t *id); + +extern kern_return_t +kmod_destroy_internal(kmod_t id); + +extern kern_return_t +kmod_start_or_stop( + kmod_t id, + int start, + kmod_args_t *data, + mach_msg_type_number_t *dataCount); + +extern kern_return_t kmod_retain(kmod_t id); +extern kern_return_t kmod_release(kmod_t id); + +extern void flush_dcache(vm_offset_t addr, unsigned cnt, int phys); +extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); +}; + + +IOLock * kld_lock; + + +#define LOG_DELAY() + +#define VTYELLOW "\033[33m" +#define VTRESET "\033[0m" + + +/********************************************************************* +* This function builds a uniqued, in-order list of modules that need +* to be loaded in order for kmod_name to be successfully loaded. This +* list ends with kmod_name itself. +*********************************************************************/ +static +OSArray * getDependencyListForKmod(char * kmod_name) { + + int error = 0; + + OSDictionary * extensionsDict; // don't release + OSDictionary * extDict; // don't release + OSDictionary * extPlist; // don't release + OSString * extName; // don't release + OSArray * dependencyList = NULL; // return value, caller releases + unsigned int i; + + /* These are used to remove duplicates from the dependency list. + */ + OSArray * originalList = NULL; // must be released + OSDictionary * encounteredNames = NULL; // must be release + + + /* Get the dictionary of startup extensions. + * This is keyed by module name. + */ + extensionsDict = getStartupExtensions(); + if (!extensionsDict) { + IOLog("getDependencyListForKmod(): No extensions dictionary.\n"); + LOG_DELAY(); + error = 1; + goto finish; + } + + + /* Get the requested extension's dictionary entry and its property + * list, containing module dependencies. + */ + extDict = OSDynamicCast(OSDictionary, + extensionsDict->getObject(kmod_name)); + + if (!extDict) { + IOLog("getDependencyListForKmod(): " + "Extension \"%s\" cannot be found.\n", + kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + extPlist = OSDynamicCast(OSDictionary, extDict->getObject("plist")); + if (!extPlist) { + IOLog("getDependencyListForKmod(): " + "Extension \"%s\" has no property list.\n", + kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + + /* Verify that the retrieved entry's "CFBundleIdentifier" property exists. + * This will be added to the dependency list. + */ + extName = OSDynamicCast(OSString, + extPlist->getObject("CFBundleIdentifier")); + if (!extName) { + IOLog("getDependencyListForKmod(): " + "Extension \"%s\" has no \"CFBundleIdentifier\" property.\n", + kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + dependencyList = OSArray::withCapacity(10); + if (!dependencyList) { + IOLog("getDependencyListForKmod(): " + "Couldn't allocate dependency array for extension \"%s\".\n", + kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + dependencyList->setObject(extName); + + + /* Here's a slightly tricky bit. This loop iterates through + * the dependency list until it runs off the end. Each time + * through, however, any number of dependencies can be added + * to the end of the list. Eventually some extensions won't + * have any more dependencies, no more names will be added + * to the list, and this loop will terminate. + */ + for (i = 0; i < dependencyList->getCount(); i++) { + + // None of these needs to be released, as they're all from plists. + OSString * curName; + OSDictionary * curExtDict; + OSDictionary * curExtDepDict; + OSDictionary * curExtPlist; + OSString * curDepName; + + + /* An arbitrary limit to prevent infinite loops. + */ + if (i > 255) { + IOLog("getDependencyListForKmod(): " + "max dependency list length exceeded for " + "extension \"%s\".\n", + kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + curName = OSDynamicCast(OSString, dependencyList->getObject(i)); + + curExtDict = OSDynamicCast(OSDictionary, + extensionsDict->getObject(curName)); + if (!curExtDict) { + IOLog("getDependencyListForKmod(): " + "Extension \"%s\", required for extension \"%s\", " + "is not available.\n", + curName->getCStringNoCopy(), kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + curExtPlist = OSDynamicCast(OSDictionary, + curExtDict->getObject("plist")); + if (!curExtPlist) { + IOLog("getDependencyListForKmod(): " + "Extension \"%s\", required for extension \"%s\", " + "has no property list.\n", + curName->getCStringNoCopy(), kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + curExtDepDict = OSDynamicCast(OSDictionary, + curExtPlist->getObject("OSBundleLibraries")); + if (curExtDepDict) { + OSCollectionIterator * keyIterator = + OSCollectionIterator::withCollection(curExtDepDict); + + if (!keyIterator) { + IOLog("getDependencyListForKmod(): " + "Couldn't allocate iterator for extension " + "\"%s\".\n", kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + while ( (curDepName = + OSDynamicCast(OSString, + keyIterator->getNextObject())) ) { + + dependencyList->setObject(curDepName); + } + + keyIterator->release(); + } + } + + + /***** + * The dependency list now exists in the reverse order of required loads, + * and may have duplicates. Now we turn the list around and remove + * duplicates. + */ + originalList = dependencyList; + dependencyList = OSArray::withCapacity(originalList->getCount()); + if (!dependencyList) { + IOLog("getDependenciesForKmod(): " + "Couldn't allocate reversal dependency list for extension " + "\"%s\".\n", kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + encounteredNames = OSDictionary::withCapacity(originalList->getCount()); + if (!encounteredNames) { + IOLog("getDependenciesForKmod(): " + "Couldn't allocate list of encountered names for extension " + "\"%s\".\n", kmod_name); + LOG_DELAY(); + error = 1; + goto finish; + } + + + /* Go backward through the original list, using the encounteredNames + * dictionary to check for duplicates. We put originalList in as the + * value because we need some non-NULL value. + */ + i = originalList->getCount(); + + if (i > 0) { + do { + i--; + + OSString * item = OSDynamicCast(OSString, + originalList->getObject(i)); + + if ( ! encounteredNames->getObject(item) ) { + encounteredNames->setObject(item, originalList); + dependencyList->setObject(item); + } + } while (i > 0); + } + + + +finish: + + if (originalList) { + originalList->release(); + } + if (encounteredNames) { + encounteredNames->release(); + } + if (error) { + if (dependencyList) { + dependencyList->release(); + dependencyList = NULL; + } + } + + return dependencyList; +} + + +/********************************************************************* +*********************************************************************/ +static bool verifyCompatibleVersions(OSArray * dependencyList) { + bool result = true; + + OSString * requestedModuleName = NULL; + + OSDictionary * extensionsDict = NULL; + int count, i; + OSString * curName = NULL; + OSDictionary * curExt = NULL; + OSDictionary * curExtPlist = NULL; + + OSBoolean * isKernelResource = NULL; + + OSDictionary * dependencies = NULL; + OSCollectionIterator * dependencyIterator = NULL; // must release + OSString * dependencyName = NULL; + OSString * curExtDependencyVersion = NULL; + UInt32 cur_ext_required_dependency_vers; + + OSDictionary * dependency = NULL; + OSDictionary * dependencyPlist = NULL; + + OSString * dependencyVersion = NULL; + OSString * dependencyCompatibleVersion = NULL; + UInt32 dependency_vers; + UInt32 dependency_compat_vers; + + + /* Get the dictionary of startup extensions. + * This is keyed by module name. + */ + extensionsDict = getStartupExtensions(); + if (!extensionsDict) { + IOLog("verifyCompatibleVersions(): No extensions dictionary.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + + count = dependencyList->getCount(); + if (!count) { + IOLog("verifyCompatibleVersions(): " + "Invoked with no dependency list.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + requestedModuleName = OSDynamicCast(OSString, + dependencyList->getObject(count - 1)); + + for (i = count - 1; i >= 0; i--) { + + if (dependencyIterator) { + dependencyIterator->release(); + dependencyIterator = NULL; + } + + curName = OSDynamicCast(OSString, dependencyList->getObject(i)); + if (!curName) { + IOLog("verifyCompatibleVersions(): Internal error (1).\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + curExt = OSDynamicCast(OSDictionary, + extensionsDict->getObject(curName)); + if (!curExt) { + IOLog("verifyCompatibleVersions(): Internal error (2).\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + curExtPlist = OSDynamicCast(OSDictionary, + curExt->getObject("plist")); + if (!curExtPlist) { + IOLog("verifyCompatibleVersions(): Internal error (3).\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + + /* In-kernel extensions don't need to check dependencies. + */ + isKernelResource = OSDynamicCast(OSBoolean, + curExtPlist->getObject("OSKernelResource")); + if (isKernelResource && isKernelResource->isTrue()) { + continue; + } + + dependencies = OSDynamicCast(OSDictionary, + curExtPlist->getObject("OSBundleLibraries")); + if (!dependencies || dependencies->getCount() < 1) { + IOLog(VTYELLOW "verifyCompatibleVersions(): Extension \"%s\" " + "declares no dependencies.\n" VTRESET, + curName->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + + dependencyIterator = + OSCollectionIterator::withCollection(dependencies); + if (!curExtPlist) { + IOLog("verifyCompatibleVersions(): Internal error (4).\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + while ((dependencyName = OSDynamicCast(OSString, + dependencyIterator->getNextObject()))) { + + curExtDependencyVersion = OSDynamicCast(OSString, + dependencies->getObject(dependencyName)); + if (!curExtDependencyVersion) { + IOLog("verifyCompatibleVersions(): Internal error (5).\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + dependency = OSDynamicCast(OSDictionary, + extensionsDict->getObject(dependencyName)); + if (!dependency) { + IOLog("verifyCompatibleVersions(): Internal error (6).\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + dependencyPlist = OSDynamicCast(OSDictionary, + dependency->getObject("plist")); + if (!dependencyPlist) { + IOLog("verifyCompatibleVersions(): Internal error (7).\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + dependencyVersion = OSDynamicCast(OSString, + dependencyPlist->getObject("CFBundleVersion")); + if (!curExtDependencyVersion) { + IOLog(VTYELLOW "Dependency extension \"%s\" doesn't declare a " + "version.\n" VTRESET, + dependencyName->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + + dependencyCompatibleVersion = OSDynamicCast(OSString, + dependencyPlist->getObject("OSBundleCompatibleVersion")); + if (!dependencyCompatibleVersion) { + IOLog(VTYELLOW "Dependency extension \"%s\" doesn't declare a " + "compatible version.\n" VTRESET, + dependencyName->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + +IOLog("\033[33m %s (needs %s, compat-current is %s-%s).\n" VTRESET, + dependencyName->getCStringNoCopy(), + curExtDependencyVersion->getCStringNoCopy(), + dependencyCompatibleVersion->getCStringNoCopy(), + dependencyVersion->getCStringNoCopy()); +LOG_DELAY(); + + if (!VERS_parse_string(curExtDependencyVersion->getCStringNoCopy(), + &cur_ext_required_dependency_vers)) { + } + if (!VERS_parse_string(dependencyVersion->getCStringNoCopy(), + &dependency_vers)) { + } + if (!VERS_parse_string(dependencyCompatibleVersion->getCStringNoCopy(), + &dependency_compat_vers)) { + } + + if (cur_ext_required_dependency_vers > dependency_vers || + cur_ext_required_dependency_vers < dependency_compat_vers) { + + IOLog(VTYELLOW "Cannot load extension \"%s\": dependencies " + "\"%s\" and \"%s\" are not of compatible versions.\n" VTRESET, + requestedModuleName->getCStringNoCopy(), + curName->getCStringNoCopy(), + dependencyName->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + } + } + +finish: + return result; +} + + +/* Used in address_for_loaded_kmod. + */ +static kmod_info_t * g_current_kmod_info = NULL; +static const char * g_current_kmod_name = NULL; + +/* Globals to pass link buffer info from + * address_for_loaded_kmod() and alloc_for_kmod() + * to load_kmod(). + * + * link_load_address is the address used to lay + * down the linked code. It gets adjusted by the + * pad between the headers size and a full page + * multiple. If an error occurs this gets set to + * zero so that the kld client code can detect + * an address or allocation error even if kld + * returns success. + * + * link_load_size is the size of the image as + * created by kld_load_from_memory(). link_buffer_size + * is the size of the buffer allocated for the final + * laid-down image, and is adjusted by rounding the + * load size and header size up to full-page multiples. + * + * link_buffer_address is set only by alloc_for_kmod(); + * its value is used as a check if kld_load_from_memory() + * fails so that the buffer can be deallocated. + */ +static unsigned long link_load_address = 0; +static unsigned long link_load_size = 0; +static unsigned long link_buffer_size = 0; +static unsigned long link_header_size = 0; +static unsigned long link_buffer_address = 0; + + +/********************************************************************* +* This function is registered before kmod_load_from_memory() is +* invoked to build symbol table entries for an already-loaded +* kmod. This function just checks the g_current_kmod_info variable +* to gets its load address, and futzes it by the header offset (pad). +* See lower comments for more info on load address futzing. +*********************************************************************/ +static +unsigned long address_for_loaded_kmod( + unsigned long size, + unsigned long headers_size) { + + unsigned long round_headers_size; + unsigned long headers_pad; + + if (!g_current_kmod_info) { + IOLog("address_for_loaded_kmod(): No current kmod.\n"); + LOG_DELAY(); + link_load_address = 0; // error sentinel for kld client + return 0; + } + + round_headers_size = round_page(headers_size); + headers_pad = round_headers_size - headers_size; + + link_load_address = (unsigned long)g_current_kmod_info->address + + headers_pad; + + return link_load_address; +} + + +/********************************************************************* +* This function is registered before kmod_load_from_memory() is +* invoked to actually load a new kmod. It rounds up the header and +* total sizes and vm_allocates a buffer for the kmod. Now, KLD doesn't +* enforce any alignment of headers or segments, and we want to make +* sure that the executable code of the kmod lies on a page boundary. +* to do so, this function figures the pad between the actual header +* size and the page-rounded header size, and returns that offset into +* the allocated buffer. After kmod_load_from_memory() returns, its +* caller will move the mach_header struct back to the beginning of the +* allocated buffer so that the kmod_info_t structure contains the +* correct address. +*********************************************************************/ +static +unsigned long alloc_for_kmod( + unsigned long size, + unsigned long headers_size) { + + vm_address_t buffer = 0; + kern_return_t k_result; + + unsigned long round_headers_size; + unsigned long round_segments_size; + unsigned long round_size; + unsigned long headers_pad; + + round_headers_size = round_page(headers_size); + round_segments_size = round_page(size - headers_size); + round_size = round_headers_size + round_segments_size; + headers_pad = round_headers_size - headers_size; + + k_result = vm_allocate(kernel_map, (vm_offset_t *)&buffer, + round_size, TRUE); + if (k_result != KERN_SUCCESS) { + IOLog("alloc_for_kmod(): Can't allocate memory.\n"); + LOG_DELAY(); + link_buffer_address = 0; // make sure it's clear + link_load_address = 0; // error sentinel for kld client + return 0; + } + + link_load_size = size; + + link_buffer_address = buffer; + link_buffer_size = round_size; + link_header_size = headers_size; // NOT rounded! + + link_load_address = link_buffer_address + headers_pad; + + return link_load_address; +} + + +/********************************************************************* +* This function reads the startup extensions dictionary to get the +* address and length of the executable data for the requested kmod. +*********************************************************************/ +static +int get_text_info_for_kmod(const char * kmod_name, + char ** text_address, + unsigned long * text_size) { + + // None of these needs to be released. + OSDictionary * extensionsDict; + OSDictionary * kmodDict; + OSData * driverCode; + + vm_offset_t kmod_address; + typedef union { + struct mach_header mach_header; + struct fat_header fat_header; + } kmod_header_composite; + kmod_header_composite * kmod_headers; + + + /* Get the requested kmod's info dictionary from the global + * startup extensions dictionary. + */ + extensionsDict = getStartupExtensions(); + if (!extensionsDict) { + IOLog("text_address_for_kmod(): No extensions dictionary.\n"); + LOG_DELAY(); + return 0; + } + + kmodDict = OSDynamicCast(OSDictionary, + extensionsDict->getObject(kmod_name)); + if (!kmodDict) { + IOLog("text_address_for_kmod(): " + "Extension \"%s\" cannot be found.\n", kmod_name); + LOG_DELAY(); + return 0; + } + + driverCode = OSDynamicCast(OSData, kmodDict->getObject("code")); + if (!driverCode) { + IOLog("text_address_for_kmod(): " + "Extension \"%s\" has no \"code\" property.\n", + kmod_name); + LOG_DELAY(); + return 0; + } + + kmod_address = (vm_offset_t)driverCode->getBytesNoCopy(); + kmod_headers = (kmod_header_composite *)kmod_address; + + /* Now extract the appropriate code from the executable data. + */ + if (kmod_headers->mach_header.magic == MH_MAGIC) { + + *text_address = (char *)kmod_address; + *text_size = driverCode->getLength(); + return 1; + + } else if (kmod_headers->fat_header.magic == FAT_MAGIC || + kmod_headers->fat_header.magic == FAT_CIGAM) { + // CIGAM is byte-swapped MAGIC + + load_return_t load_return; + struct fat_arch fatinfo; + + load_return = fatfile_getarch(NULL, kmod_address, &fatinfo); + if (load_return != LOAD_SUCCESS) { + IOLog("text_address_for_kmod(): Extension \"%s\" " + "doesn't contain code for this computer.\n", kmod_name); + LOG_DELAY(); + return 0; + } + + *text_address = (char *)(kmod_address + fatinfo.offset); + *text_size = fatinfo.size; + return 1; + + } else { + IOLog("text_address_for_kmod(): Extension \"%s\" either " + "isn't code or doesn't contain code for this computer.\n", + kmod_name); + LOG_DELAY(); + return 0; + } + + return 1; +} + + +/********************************************************************* +*********************************************************************/ +bool verify_kmod(const char * kmod_name, kmod_info_t * kmod_info) { + bool result = false; + OSDictionary * extensionsDict = NULL; // don't release + OSDictionary * kmodDict = NULL; // don't release + OSDictionary * plist = NULL; // don't release + OSString * versionString = NULL; // don't release + UInt32 plist_vers; + UInt32 kmod_vers; + + if (strncmp(kmod_name, kmod_info->name, sizeof(kmod_info->name))) { + IOLog("verify_kmod(): kmod loaded as \"%s\" has different " + "identifier \"%s\".\n", kmod_name, kmod_info->name); + LOG_DELAY(); + result = false; + goto finish; + } + + if (!VERS_parse_string(kmod_info->version, + &kmod_vers)) { + + IOLog(VTYELLOW "verify_kmod(): kmod \"%s\" has an invalid " + "version.\n" VTRESET, kmod_info->name); + LOG_DELAY(); + result = false; + goto finish; + } + + + /* Get the dictionary of startup extensions. + * This is keyed by module name. + */ + extensionsDict = getStartupExtensions(); + if (!extensionsDict) { + IOLog("verify_kmod(): No extensions dictionary.\n"); + LOG_DELAY(); + result = false; + goto finish; + } + + kmodDict = OSDynamicCast(OSDictionary, + extensionsDict->getObject(kmod_name)); + if (!kmodDict) { + IOLog("verify_kmod(): Can't find record for kmod \"%s\".\n", + kmod_name); + LOG_DELAY(); + result = false; + goto finish; + } + + plist = OSDynamicCast(OSDictionary, + extensionsDict->getObject("plist")); + if (!kmodDict) { + IOLog("verify_kmod(): Kmod \"%s\" has no property list.\n", + kmod_name); + LOG_DELAY(); + result = false; + goto finish; + } + + versionString = OSDynamicCast(OSString, + extensionsDict->getObject("CFBundleVersion")); + if (!versionString) { + IOLog(VTYELLOW "verify_kmod(): Kmod \"%s\" has no \"CFBundleVersion\" " + "property.\n" VTRESET, + kmod_name); + LOG_DELAY(); + result = false; + goto finish; + } + + if (!VERS_parse_string(versionString->getCStringNoCopy(), + &plist_vers)) { + + IOLog(VTYELLOW "verify_kmod(): Property list for kmod \"%s\" has " + "an invalid version.\n" VTRESET, kmod_info->name); + LOG_DELAY(); + result = false; + goto finish; + } + + if (kmod_vers != plist_vers) { + IOLog(VTYELLOW "verify_kmod(): Kmod \"%s\" and its property list " + "claim different versions (%s & %s).\n" VTRESET, + kmod_info->name, + kmod_info->version, + versionString->getCStringNoCopy()); + LOG_DELAY(); + result = false; + goto finish; + } + + +finish: + + // FIXME: make this really return the result after conversion + return true; + + return result; +} + + +/********************************************************************* +* This function takes a dependency list containing a series of +* already-loaded module names, followed by a single name for a module +* that hasn't yet been loaded. It invokes kld_load_from_memory() to +* build symbol info for the already-loaded modules, and then finally +* loads the actually requested module. +*********************************************************************/ +static +kern_return_t load_kmod(OSArray * dependencyList) { + kern_return_t result = KERN_SUCCESS; + + unsigned int num_dependencies = 0; + kmod_info_t ** kmod_dependencies = NULL; + unsigned int i; + OSString * requestedKmodName; // don't release + const char * requested_kmod_name; + OSString * currentKmodName; // don't release + char * kmod_address; + unsigned long kmod_size; + struct mach_header * kmod_header; + unsigned long kld_result; + int do_kld_unload = 0; + kmod_info_t * kmod_info; + kmod_t kmod_id; + + + /* Separate the requested kmod from its dependencies. + */ + i = dependencyList->getCount(); + if (i == 0) { + IOLog("load_kmod(): Called with empty list.\n"); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } else { + i--; // make i be the index of the last entry + } + + requestedKmodName = OSDynamicCast(OSString, dependencyList->getObject(i)); + if (!requestedKmodName) { + IOLog("load_kmod(): Called with invalid list of kmod names.\n"); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + requested_kmod_name = requestedKmodName->getCStringNoCopy(); + dependencyList->removeObject(i); + + /* If the requested kmod is already loaded, there's no work to do. + */ + kmod_info = kmod_lookupbyname(requested_kmod_name); + if (kmod_info) { + // FIXME: Need to check for version mismatch if already loaded. + result = KERN_SUCCESS; + goto finish; + } + + + /* Do the KLD loads for the already-loaded modules in order to get + * their symbols. + */ + kld_address_func(&address_for_loaded_kmod); + + num_dependencies = dependencyList->getCount(); + kmod_dependencies = (kmod_info_t **)kalloc(num_dependencies * + sizeof(kmod_info_t *)); + if (!kmod_dependencies) { + IOLog("load_kmod(): Failed to allocate memory for dependency array " + "during load of kmod \"%s\".\n", requested_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + for (i = 0; i < num_dependencies; i++) { + + currentKmodName = OSDynamicCast(OSString, + dependencyList->getObject(i)); + + if (!currentKmodName) { + IOLog("load_kmod(): Invalid dependency name at index %d for " + "kmod \"%s\".\n", i, requested_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + const char * current_kmod_name = currentKmodName->getCStringNoCopy(); + + // These globals are needed by the kld_address functions + g_current_kmod_info = kmod_lookupbyname(current_kmod_name); + g_current_kmod_name = current_kmod_name; + + if (!g_current_kmod_info) { + IOLog("load_kmod(): Missing dependency \"%s\".\n", + current_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + /* Record the current kmod as a dependency of the requested + * one. This will be used in building references after the + * load is complete. + */ + kmod_dependencies[i] = g_current_kmod_info; + + /* If the current kmod's size is zero it means that we have a + * fake in-kernel dependency. If so then don't have to arrange + * for its symbol table to be reloaded as it is + * part of the kernel's symbol table.. + */ + if (!g_current_kmod_info->size) + continue; + + if (!get_text_info_for_kmod(current_kmod_name, + &kmod_address, &kmod_size)) { + + IOLog("get_text_info_for_kmod() failed for dependency kmod " + "\"%s\".\n", current_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + kld_result = kld_load_from_memory(&kmod_header, + current_kmod_name, + (char *)kmod_address, + kmod_size); + + if (kld_result) { + do_kld_unload = 1; + } + + if (!kld_result || !link_load_address) { + IOLog("kld_load_from_memory() failed for dependency kmod " + "\"%s\".\n", current_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + kld_forget_symbol("_kmod_info"); + } + + /***** + * Now that we've done all the dependencies, which should have already + * been loaded, we do the last requested module, which should not have + * already been loaded. + */ + kld_address_func(&alloc_for_kmod); + + g_current_kmod_name = requested_kmod_name; + g_current_kmod_info = 0; // there is no kmod yet + + if (!get_text_info_for_kmod(requested_kmod_name, + &kmod_address, &kmod_size)) { + IOLog("load_kmod: get_text_info_for_kmod() failed for " + "kmod \"%s\".\n", requested_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + kld_result = kld_load_from_memory(&kmod_header, + requested_kmod_name, + (char *)kmod_address, + kmod_size); + + if (kld_result) { + do_kld_unload = 1; + } + + if (!kld_result || !link_load_address) { + IOLog("load_kmod(): kld_load_from_memory() failed for " + "kmod \"%s\".\n", requested_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + + /* Copy the linked header and image into the vm_allocated buffer. + * Move each onto the appropriate page-aligned boundary as given + * by the global link_... variables. + */ + bzero((char *)link_buffer_address, link_buffer_size); + // bcopy() is (from, to, length) + bcopy((char *)kmod_header, (char *)link_buffer_address, link_header_size); + bcopy((char *)kmod_header + link_header_size, + (char *)link_buffer_address + round_page(link_header_size), + link_load_size - link_header_size); + + + /* Get the kmod_info struct for the newly-loaded kmod. + */ + if (!kld_lookup("_kmod_info", (unsigned long *)&kmod_info)) { + IOLog("kld_lookup() of \"_kmod_info\" failed for " + "kmod \"%s\".\n", requested_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + + if (!verify_kmod(requested_kmod_name, kmod_info)) { + // verify_kmod() logs a meaningful message + result = KERN_FAILURE; + goto finish; + } + + + /* kld_lookup of _kmod_info yielded the actual linked address, + * so now that we've copied the data into its real place, + * we can set this stuff. + */ + kmod_info->address = link_buffer_address; + kmod_info->size = link_buffer_size; + kmod_info->hdr_size = round_page(link_header_size); + + /* We've written data and instructions, so *flush* the data cache + * and *invalidate* the instruction cache. + */ + flush_dcache(link_buffer_address, link_buffer_size, false); + invalidate_icache(link_buffer_address, link_buffer_size, false); + + + /* Register the new kmod with the kernel proper. + */ + if (kmod_create_internal(kmod_info, &kmod_id) != KERN_SUCCESS) { + IOLog("load_kmod(): kmod_create() failed for " + "kmod \"%s\".\n", requested_kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + IOLog("kmod id %d successfully created at 0x%lx, size %ld.\n", + (unsigned int)kmod_id, link_buffer_address, link_buffer_size); + LOG_DELAY(); + + /* Record dependencies for the newly-loaded kmod. + */ + for (i = 0; i < num_dependencies; i++) { + kmod_info_t * cur_dependency_info; + kmod_t packed_id; + cur_dependency_info = kmod_dependencies[i]; + packed_id = KMOD_PACK_IDS(kmod_id, cur_dependency_info->id); + if (kmod_retain(packed_id) != KERN_SUCCESS) { + IOLog("load_kmod(): kmod_retain() failed for " + "kmod \"%s\".\n", requested_kmod_name); + LOG_DELAY(); + kmod_destroy_internal(kmod_id); + result = KERN_FAILURE; + goto finish; + } + } + + /* Start the kmod (which invokes constructors for I/O Kit + * drivers. + */ + // kmod_start_or_stop(id, start?, user data, datalen) + if (kmod_start_or_stop(kmod_id, 1, 0, 0) != KERN_SUCCESS) { + IOLog("load_kmod(): kmod_start_or_stop() failed for " + "kmod \"%s\".\n", requested_kmod_name); + LOG_DELAY(); + kmod_destroy_internal(kmod_id); + result = KERN_FAILURE; + goto finish; + } + +finish: + + /* Only do a kld_unload_all() if at least one load happened. + */ + if (do_kld_unload) { + kld_unload_all(/* deallocate sets */ 1); + + } + + /* If the link failed, blow away the allocated link buffer. + */ + if (result != KERN_SUCCESS && link_buffer_address) { + vm_deallocate(kernel_map, link_buffer_address, link_buffer_size); + } + + if (kmod_dependencies) { + kfree((unsigned int)kmod_dependencies, + num_dependencies * sizeof(kmod_info_t *)); + } + + /* Reset these static global variables for the next call. + */ + g_current_kmod_name = NULL; + g_current_kmod_info = NULL; + link_buffer_address = 0; + link_load_address = 0; + link_load_size = 0; + link_buffer_size = 0; + link_header_size = 0; + + return result; +} + + +/********************************************************************* +* This is the function that IOCatalogue calls in order to load a kmod. +* It first checks whether the kmod is already loaded. If the kmod +* isn't loaded, this function builds a dependency list and calls +* load_kmod() repeatedly to guarantee that each dependency is in fact +* loaded. +*********************************************************************/ +__private_extern__ +kern_return_t load_kernel_extension(char * kmod_name) { + kern_return_t result = KERN_SUCCESS; + kmod_info_t * kmod_info; + OSArray * dependencyList = NULL; // must release + OSArray * curDependencyList = NULL; // must release + + + /* This must be the very first thing done by this function. + */ + IOLockLock(kld_lock); + + + /* See if the kmod is already loaded. + */ + kmod_info = kmod_lookupbyname(kmod_name); + if (kmod_info) { // NOT checked + result = KERN_SUCCESS; + goto finish; + } + + // FIXME: Need to check whether kmod is built into the kernel! + + /* It isn't loaded; build a dependency list and + * load those. + */ + unsigned int count; + unsigned int i; + dependencyList = getDependencyListForKmod(kmod_name); + if (!dependencyList) { + IOLog("load_kernel_extension(): " + "Can't get dependencies for kernel extension \"%s\".\n", + kmod_name); + LOG_DELAY(); + result = KERN_FAILURE; + goto finish; + } + + if (!verifyCompatibleVersions(dependencyList)) { + IOLog(VTYELLOW "load_kernel_extension(): " + "Version mismatch for kernel extension \"%s\".\n" VTRESET, + kmod_name); + LOG_DELAY(); +#if 0 +// FIXME: This is currently a warning only; when kexts are updated +// this will become an error. + result = KERN_FAILURE; + goto finish; +#else + IOLog(VTYELLOW "Loading anyway.\n" VTRESET); +#endif 0 + } + + count = dependencyList->getCount(); + for (i = 0; i < count; i++) { + kern_return_t load_result; + OSString * curKmodName; // don't release + const char * cur_kmod_name; + + curKmodName = OSDynamicCast(OSString, + dependencyList->getObject(i)); + cur_kmod_name = curKmodName->getCStringNoCopy(); + curDependencyList = getDependencyListForKmod(cur_kmod_name); + + load_result = load_kmod(curDependencyList); + if (load_result != KERN_SUCCESS) { + IOLog("load_kernel_extension(): " + "load_kmod() failed for kmod \"%s\".\n", + cur_kmod_name); + LOG_DELAY(); + result = load_result; + goto finish; + } + curDependencyList->release(); + curDependencyList = NULL; + } + + +finish: + + if (dependencyList) { + dependencyList->release(); + dependencyList = NULL; + } + if (curDependencyList) { + curDependencyList->release(); + curDependencyList = NULL; + } + + /* This must be the very last thing done before returning. + */ + IOLockUnlock(kld_lock); + + return result; +} diff --git a/libsa/libsa/Makefile b/libsa/libsa/Makefile new file mode 100644 index 000000000..7ae937a41 --- /dev/null +++ b/libsa/libsa/Makefile @@ -0,0 +1,30 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = +INSTINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS} +INSTINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS = mach +EXPINC_SUBDIRS_PPC = ${EXPINC_SUBDIRS} ppc +EXPINC_SUBDIRS_I386 = ${EXPINC_SUBDIRS} i386 + +INSTALL_MI_LIST = + +INSTALL_MI_DIR = libsa + +EXPORT_MI_LIST = mkext.h setjmp.h stdlib.h unistd.h + +EXPORT_MI_DIR = libsa + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/libsa/catalogue.h b/libsa/libsa/catalogue.h new file mode 100644 index 000000000..75efe4441 --- /dev/null +++ b/libsa/libsa/catalogue.h @@ -0,0 +1,5 @@ +bool recordStartupExtensions(void); +bool addExtensionsFromArchive(OSData * mkext); +void removeStartupExtension(const char * extensionName); + +OSDictionary * getStartupExtensions(void); diff --git a/libsa/libsa/i386/Makefile b/libsa/libsa/i386/Makefile new file mode 100644 index 000000000..f28a193d0 --- /dev/null +++ b/libsa/libsa/i386/Makefile @@ -0,0 +1,32 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +INSTALL_MD_LIST = + +INSTALL_MD_DIR = libsa/i386 + +EXPORT_MD_LIST = setjmp.h + +EXPORT_MD_DIR = libsa/i386 + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/libsa/i386/setjmp.h b/libsa/libsa/i386/setjmp.h new file mode 100644 index 000000000..0ab263dbf --- /dev/null +++ b/libsa/libsa/i386/setjmp.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Setjmp/longjmp buffer for i386. + */ +#ifndef _I386_SETJMP_H_ +#define _I386_SETJMP_H_ + +typedef int jmp_buf[6]; /* ebx, esi, edi, ebp, esp, eip */ + +#endif /* _I386_SETJMP_H_ */ diff --git a/libsa/libsa/kmod.h b/libsa/libsa/kmod.h new file mode 100644 index 000000000..5b31356d8 --- /dev/null +++ b/libsa/libsa/kmod.h @@ -0,0 +1,11 @@ +#ifdef __cplusplus +extern "C" { +#endif __cplusplus + +#include + +__private_extern__ kern_return_t load_kernel_extension(char * kmod_name); + +#ifdef __cplusplus +}; +#endif __cplusplus diff --git a/libsa/libsa/mach/Makefile b/libsa/libsa/mach/Makefile new file mode 100644 index 000000000..53babf14c --- /dev/null +++ b/libsa/libsa/mach/Makefile @@ -0,0 +1,32 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +INSTALL_MI_LIST = + +INSTALL_MI_DIR = libsa/mach + +EXPORT_MI_LIST = mach.h + +EXPORT_MI_DIR = libsa/mach + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/libsa/mach/mach.h b/libsa/libsa/mach/mach.h new file mode 100644 index 000000000..8c699c2b2 --- /dev/null +++ b/libsa/libsa/mach/mach.h @@ -0,0 +1,12 @@ +#ifndef _LIBSA_MACH_MACH_H_ +#define _LIBSA_MACH_MACH_H_ + +#include +#include + +__private_extern__ vm_map_t mach_task_self(void); + +char *mach_error_string(kern_return_t); + + +#endif _LIBSA_MACH_MACH_H_ diff --git a/libsa/libsa/malloc.h b/libsa/libsa/malloc.h new file mode 100644 index 000000000..f0a0a4e46 --- /dev/null +++ b/libsa/libsa/malloc.h @@ -0,0 +1,42 @@ +#ifndef _LIBSA_MALLOC_H_ +#define _LIBSA_MALLOC_H_ + +#include "stdlib.h" + + +/***** + * These functions are the minimum necessary for use + * by kld and its client. + */ +void * malloc(size_t size); +void * realloc(void * address, size_t new_size); +void free(void * address); + +void malloc_reset(void); // Destroy all memory regions + + +/***** + * These functions aren't compiled into the kernel. + * Their definitions are in the files malloc_debug + * and malloc_unused, in case they're ever needed. + */ +#if 0 +void free_all(void); // "Free" all memory blocks +size_t malloc_size(void * address); +int malloc_is_valid(void * address); + +#ifdef DEBUG +size_t malloc_hiwat(void); +size_t malloc_current_usage(void); +size_t malloc_region_usage(void); +double malloc_peak_usage(void); +double malloc_min_usage(void); +size_t malloc_unused(void); +double malloc_current_efficiency(void); +void malloc_clear_hiwat(void); +void malloc_report(void); +int malloc_sanity_check(void); +#endif DEBUG +#endif 0 + +#endif defined _LIBSA_MALLOC_H_ diff --git a/libsa/libsa/mkext.h b/libsa/libsa/mkext.h new file mode 100644 index 000000000..062786d94 --- /dev/null +++ b/libsa/libsa/mkext.h @@ -0,0 +1,55 @@ +#ifndef _MKEXT_H_ +#define _MKEXT_H_ 1 + +#include +#include + +#include + +#define MKEXT_MAGIC 'MKXT' +#define MKEXT_SIGN 'MOSX' + +#define MKEXT_EXTN ".mkext" + +// All binary values are big-endian + +// If all fields are 0 then this file slot is empty +// If compsize is zero then the file isn't compressed. +typedef struct mkext_file { + size_t offset; // 4 bytes + size_t compsize; // 4 bytes + size_t realsize; // 4 bytes + time_t modifiedsecs; // 4 bytes +} mkext_file; + +// The plist file entry is mandatory, but module may be empty +typedef struct mkext_kext { + mkext_file plist; // 16 bytes + mkext_file module; // 16 bytes +} mkext_kext; + +typedef struct mkext_header { + u_int32_t magic; // 'MKXT' + u_int32_t signature; // 'MOSX' + u_int32_t length; + u_int32_t adler32; + u_int32_t version; // vers resource, currently '1.0.0', 0x01008000 + u_int32_t numkexts; + cpu_type_t cputype; // CPU_TYPE_ANY for fat executables + cpu_subtype_t cpusubtype; // CPU_SUBTYPE_MULITPLE for executables + mkext_kext kext[1]; // 64 bytes/entry +} mkext_header; + +__BEGIN_DECLS +__private_extern__ u_int8_t * +compress_lzss(u_int8_t *dst, u_int32_t dstlen, u_int8_t *src, u_int32_t srclen); + +__private_extern__ int +decompress_lzss(u_int8_t *dst, u_int8_t *src, u_int32_t srclen); + +__private_extern__ u_int32_t +adler32(u_int8_t *src, int32_t length); + +__END_DECLS + +#endif /* _MKEXT_H_ */ diff --git a/libsa/libsa/ppc/Makefile b/libsa/libsa/ppc/Makefile new file mode 100644 index 000000000..6f4a9d96a --- /dev/null +++ b/libsa/libsa/ppc/Makefile @@ -0,0 +1,32 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +INSTALL_MD_LIST = + +INSTALL_MD_DIR = libsa/ppc + +EXPORT_MD_LIST = setjmp.h + +EXPORT_MD_DIR = libsa/ppc + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/libsa/libsa/ppc/setjmp.h b/libsa/libsa/ppc/setjmp.h new file mode 100644 index 000000000..54d17d51b --- /dev/null +++ b/libsa/libsa/ppc/setjmp.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_SETJMP_H_ +#define _PPC_SETJMP_H_ + +/* + * We save the following registers (marked as non-volatile in the ELF spec) + * + * r1 - stack pointer + * r13 - small data area pointer + * r14-r30 - local variables + * r31 - local variable/environment pointer + * + * cr - condition register + * lr - link register (to know where to jump back to) + * xer - fixed point exception register + * + * fpscr - floating point status and control + * f14-f31 - local variables + * + * which comes to 57 words. We round up to 64 for good measure. + */ + +typedef int jmp_buf[64]; + +#endif /* _PPC_SETJMP_H_ */ diff --git a/libsa/libsa/setjmp.h b/libsa/libsa/setjmp.h new file mode 100644 index 000000000..ed021de4e --- /dev/null +++ b/libsa/libsa/setjmp.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _LIBSA_SETJMP_H +#define _LIBSA_SETJMP_H + + +#if defined (__ppc__) +#include "ppc/setjmp.h" +#elif defined (__i386__) +#include "i386/setjmp.h" +#else +#error architecture not supported +#endif + + +__private_extern__ int setjmp( + jmp_buf jmp_buf); + +__private_extern__ void longjmp( + jmp_buf jmp_buf, + int value); + +#endif /* _LIBSA_SETJMP_H */ diff --git a/libsa/libsa/stdlib.h b/libsa/libsa/stdlib.h new file mode 100644 index 000000000..441b121ed --- /dev/null +++ b/libsa/libsa/stdlib.h @@ -0,0 +1,45 @@ +#ifndef _LIBSA_STDLIB_H_ +#define _LIBSA_STDLIB_H_ + + +#ifndef _BSD_SIZE_T_DEFINED_ +#define _BSD_SIZE_T_DEFINED_ +typedef __SIZE_TYPE__ size_t; +#endif + +#ifndef NULL +#define NULL (0) +#endif + + +__private_extern__ char *kld_basefile_name; + + +__private_extern__ void * malloc(size_t size); +__private_extern__ void free(void * address); +__private_extern__ void free_all(void); // "Free" all memory blocks +__private_extern__ void malloc_reset(void); // Destroy all memory regions +__private_extern__ void * realloc(void * address, size_t new_size); + + + +__private_extern__ void qsort( + void * array, + size_t nmembers, + size_t member_size, + int (*)(const void *, const void *)); + +__private_extern__ void * bsearch( + register const void *key, + const void *base0, + size_t nmemb, + register size_t size, + register int (*compar)(const void *, const void *)); + + +/* These are defined in the kernel. + */ +extern long strtol(const char *, char **, int); +extern unsigned long strtoul(const char *, char **, int); + +#endif _LIBSA_STDLIB_H_ diff --git a/libsa/libsa/unistd.h b/libsa/libsa/unistd.h new file mode 100644 index 000000000..2dc5efd1c --- /dev/null +++ b/libsa/libsa/unistd.h @@ -0,0 +1,8 @@ +#ifndef _LIBSA_UNISTD_H_ +#define _LIBSA_UNISTD_H_ + + +#define getpagesize() PAGE_SIZE + + +#endif _LIBSA_UNISTD_H_ diff --git a/libsa/libsa/vers_rsrc.h b/libsa/libsa/vers_rsrc.h new file mode 100644 index 000000000..71ba9051e --- /dev/null +++ b/libsa/libsa/vers_rsrc.h @@ -0,0 +1,29 @@ +#ifndef _LIBSA_VERS_H_ +#define _LIBSA_VERS_H_ + +#include + +typedef union { + UInt32 vnum; + UInt8 bytes[4]; +} VERS_version; + +typedef enum { + VERS_development = 0x20, + VERS_alpha = 0x40, + VERS_beta = 0x60, + VERS_candidate = 0x70, // for interim usage only! + VERS_release = 0x80, + VERS_invalid = 0xff +} VERS_revision; + +#define BCD_combine(l, r) ( (((l) & 0xf) << 4) | ((r) & 0xf) ) +#define BCD_get_left(p) ( ((p) >> 4) & 0xf ) +#define BCD_get_right(p) ( (p) & 0xf ) + +#define BCD_illegal (0xff) // full byte, 11111111 + +int VERS_parse_string(char * vers_string, UInt32 * version_num); +int VERS_string(char * buffer, UInt32 length, UInt32 vers); + +#endif _LIBSA_VERS_H_ diff --git a/libsa/mach.c b/libsa/mach.c new file mode 100644 index 000000000..1c04135fe --- /dev/null +++ b/libsa/mach.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + + +__private_extern__ +vm_map_t mach_task_self(void) { + return kernel_map; +} + +__private_extern__ +char * mach_error_string(int errnum) { + char * string = (char *)malloc(80); + if (string) { + sprintf(string, "mach error # %d", errnum); + } + return string; +} diff --git a/libsa/mach_loader.h b/libsa/mach_loader.h new file mode 100644 index 000000000..8e9715bcc --- /dev/null +++ b/libsa/mach_loader.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1992, NeXT, Inc. + * + * File: kern/mach_loader.h + * + * Mach object file loader API. + * + * HISTORY + * 24-Aug-92 Doug Mitchell at NeXT + * Created. + */ + +#ifndef _BSD_KERN_MACH_LOADER_H_ +#define _BSD_KERN_MACH_LOADER_H_ + +#include + +#include + +typedef int load_return_t; + +typedef struct _load_result { + vm_offset_t mach_header; + vm_offset_t entry_point; + vm_offset_t user_stack; + int thread_count; + unsigned int + /* boolean_t */ unixproc :1, + dynlinker :1, + :0; +} load_result_t; + +load_return_t load_machfile( + struct vnode *vp, + struct mach_header *header, + unsigned long file_offset, + unsigned long macho_size, + load_result_t *result); + +#define LOAD_SUCCESS 0 +#define LOAD_BADARCH 1 /* CPU type/subtype not found */ +#define LOAD_BADMACHO 2 /* malformed mach-o file */ +#define LOAD_SHLIB 3 /* shlib version mismatch */ +#define LOAD_FAILURE 4 /* Miscellaneous error */ +#define LOAD_NOSPACE 5 /* No VM available */ +#define LOAD_PROTECT 6 /* protection violation */ +#define LOAD_RESOURCE 7 /* resource allocation failure */ + +#endif /* _BSD_KERN_MACH_LOADER_H_ */ diff --git a/libsa/malloc.c b/libsa/malloc.c new file mode 100644 index 000000000..d358695ea --- /dev/null +++ b/libsa/malloc.c @@ -0,0 +1,593 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +#include + +#undef CLIENT_DEBUG + + +/********************************************************************* +* I'm not sure this is really necessary.... +*********************************************************************/ +static inline size_t round_to_long(size_t size) { + return (size + sizeof(long int)) & ~(sizeof(long int) - 1); +} + + +typedef struct queue_entry queue_entry; + +/********************************************************************* +* Structure for an allocation region. Each one is created using +* kmem_alloc(), and the whole list of these is destroyed by calling +* malloc_reset(). Client blocks are allocated from a linked list of these +* regions, on a first-fit basis, and are never freed. +*********************************************************************/ +typedef struct malloc_region { + queue_entry links; // Uses queue.h for linked list + vm_size_t region_size; // total size w/ this bookeeping info + + queue_entry block_list; // list of allocated blocks; uses queue.h + + vm_size_t free_size; // size of unused area + void * free_address; // points at the unused area + + char buffer[0]; // beginning of useable area +} malloc_region; + + +/********************************************************************* +* Structure for a client memory block. Contains linked-list pointers, +* a size field giving the TOTAL size of the block, including this +* header, and the address of the client's block. The client block +* field is guaranteed to lie on a 16-byte boundary. +*********************************************************************/ +typedef struct malloc_block { + queue_entry links; // Uses queue.h for linked list + malloc_region * region; +#ifdef CLIENT_DEBUG + size_t request_size; +#endif CLIENT_DEBUG + size_t block_size; // total size w/ all bookeeping info + + // the client's memory block + char buffer[0] __attribute__((aligned(16))); +} malloc_block; + + +/********************************************************************* +* Private functions. +* +* malloc_create_region() +* size - The size in bytes of the region. This is rounded up +* to a multiple of the VM page size. +* Returns a pointer to the new region. +* +* malloc_free_region() +* region - The region to free. +* Returns whatever vm_deallocate() returns. +* +* malloc_create_block_in_region() +* region - The region to alloate a block from. +* size - The total size, including the header, of the block to +* allocate. +* Returns a pointer to the block, or NULL on failure. +* +* malloc_find_block() +* address - The address of the client buffer to find a block for. +* block (out) - The block header for the address. +* region (out) - The region the block was found in, or NULL. +*********************************************************************/ +static malloc_region * malloc_create_region(vm_size_t size); +static kern_return_t malloc_free_region(malloc_region * region); +static malloc_block * malloc_create_block_in_region( + malloc_region * region, + size_t size); +static void malloc_find_block( + void * address, + malloc_block ** block, + malloc_region ** region); +static void malloc_get_free_block( + size_t size, + malloc_block ** block, + malloc_region ** region); + + +/********************************************************************* +* Pointers to the linked list of VM-allocated regions, and a high +* water mark used in testing/debugging. +*********************************************************************/ +static queue_entry malloc_region_list = { + &malloc_region_list, // the "next" field + &malloc_region_list // the "prev" field +}; + +static queue_entry sorted_free_block_list = { + &sorted_free_block_list, + &sorted_free_block_list +}; + +#ifdef CLIENT_DEBUG +static size_t malloc_hiwater_mark = 0; +static long int num_regions = 0; + +static size_t current_block_total = 0; +static double peak_usage = 0.0; +static double min_usage = 100.0; +#endif CLIENT_DEBUG + + +/********************************************************************* +* malloc() +*********************************************************************/ +__private_extern__ +void * malloc(size_t size) { + size_t need_size; + malloc_region * cur_region = NULL; + malloc_region * use_region = NULL; + malloc_block * client_block = NULL; + void * client_buffer = NULL; + + /* Add the size of the block header to the request size. + */ + need_size = round_to_long(size + sizeof(malloc_block)); + + + /* See if there's a previously-freed block that we can reuse. + */ + malloc_get_free_block(need_size, + &client_block, &use_region); + + + /* If we found a free block that we can reuse, then reuse it. + */ + if (client_block != NULL) { + + /* Remove the found block from the list of free blocks + * and tack it onto the list of allocated blocks. + */ + queue_remove(&sorted_free_block_list, client_block, malloc_block *, links); + queue_enter(&use_region->block_list, client_block, malloc_block *, links); + + client_buffer = client_block->buffer; + // Don't return here! There's bookkeeping done below. + + } else { + + /* Didn't find a freed block to reuse. */ + + /* Look for a region with enough unused space to carve out a new block. + */ + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + if (use_region == NULL && cur_region->free_size >= need_size) { + use_region = cur_region; + break; + } + } + + + /* If we haven't found a region with room, create a new one and + * put it at the end of the list of regions. + */ + if (use_region == NULL) { + use_region = malloc_create_region(need_size); + if (use_region == NULL) { + return NULL; + // FIXME: panic? + } + } + + /* Create a new block in the found/created region. + */ + client_block = malloc_create_block_in_region(use_region, need_size); + if (client_block != NULL) { + client_buffer = client_block->buffer; + // Don't return here! There's bookkeeping done below. + } + } + +#ifdef CLIENT_DEBUG + if (client_block != NULL) { + size_t region_usage = malloc_region_usage(); + double current_usage; + + current_block_total += client_block->block_size; + if (region_usage > 0) { + current_usage = (double)current_block_total / (double)malloc_region_usage(); + if (current_usage > peak_usage) { + peak_usage = current_usage; + } + + if (current_usage < min_usage) { + min_usage = current_usage; + } + } + + client_block->request_size = size; + } +#endif CLIENT_DEBUG + + return client_buffer; + +} /* malloc() */ + + +/********************************************************************* +* free() +* +* Moves a block from the allocated list to the free list. Neither +* list is kept sorted! +*********************************************************************/ +__private_extern__ +void free(void * address) { + malloc_region * found_region = NULL; + malloc_block * found_block = NULL; + malloc_block * cur_block = NULL; + + /* Find the block and region for the given address. + */ + malloc_find_block(address, &found_block, &found_region); + + if (found_block == NULL) { + return; + // FIXME: panic? + } + + + /* Remove the found block from the list of allocated blocks + * and tack it onto the list of free blocks. + */ + queue_remove(&found_region->block_list, found_block, malloc_block *, links); + + found_block->links.next = NULL; + queue_iterate(&sorted_free_block_list, cur_block, malloc_block *, links) { + if (cur_block->block_size > found_block->block_size) { + queue_insert_before(&sorted_free_block_list, found_block, cur_block, + malloc_block *, links); + break; + } + } + + + /* If the "next" link is still NULL, then either the list is empty or the + * freed block has to go on the end, so just tack it on. + */ + if (found_block->links.next == NULL) { + queue_enter(&sorted_free_block_list, found_block, malloc_block *, links); + } + + +#ifdef CLIENT_DEBUG + current_block_total -= found_block->block_size; +#endif CLIENT_DEBUG + + return; + +} /* free() */ + + +/********************************************************************* +* malloc_reset() +* +* Walks through the list of VM-allocated regions, destroying them +* all. Any subsequent access by clients to allocated data will cause +* a segmentation fault. +*********************************************************************/ +__private_extern__ +void malloc_reset(void) { + malloc_region * cur_region; + + while (! queue_empty(&malloc_region_list)) { + kern_return_t kern_result; + queue_remove_first(&malloc_region_list, cur_region, + malloc_region *, links); + kern_result = malloc_free_region(cur_region); + if (kern_result != KERN_SUCCESS) { + // what sort of error checking can we even do here? + // printf("malloc_free_region() failed.\n"); + // panic(); + } + } + + return; + +} /* malloc_reset() */ + + +/********************************************************************* +* realloc() +* +* This function simply allocates a new block and copies the existing +* data into it. Nothing too clever here, as cleanup and efficient +* memory usage are not important in this allocator package. +*********************************************************************/ +__private_extern__ +void * realloc(void * address, size_t new_client_size) { + malloc_region * found_region = NULL; + malloc_block * found_block = NULL; + void * new_address; + size_t new_block_size; + size_t copy_bytecount; + + + malloc_find_block(address, &found_block, &found_region); + + + /* If we couldn't find the requested block, + * the caller is in error so return NULL. + */ + if (found_block == NULL) { + // printf("realloc() called with invalid block.\n"); + return NULL; + // FIXME: panic? + } + + + /* Figure out how much memory is actually needed. + */ + new_block_size = new_client_size + sizeof(malloc_block); + + + /* If the new size is <= the current size, don't bother. + */ + if (new_block_size <= found_block->block_size) { +#ifdef CLIENT_DEBUG + if (new_client_size > found_block->request_size) { + found_block->request_size = new_client_size; + } +#endif CLIENT_DEBUG + return address; + } + + + /* Create a new block of the requested size. + */ + new_address = malloc(new_client_size); + + if (new_address == NULL) { + // printf("error in realloc()\n"); + return NULL; + // FIXME: panic? + } + + + /* Copy the data from the old block to the new one. + * Make sure to copy only the lesser of the existing and + * requested new size. (Note: The code above currently + * screens out a realloc to a smaller size, but it might + * not always do that.) + */ + copy_bytecount = found_block->block_size - sizeof(malloc_block); + + if (new_client_size < copy_bytecount) { + copy_bytecount = new_client_size; + } + + memcpy(new_address, address, copy_bytecount); + + + /* Free the old block. + */ + free(address); + + return (void *)new_address; + +} /* realloc() */ + + +/********************************************************************* +********************************************************************** +***** PACKAGE-INTERNAL FUNCTIONS BELOW HERE ***** +********************************************************************** +*********************************************************************/ + + + +/********************************************************************* +* malloc_create_region() +* +* Package-internal function. VM-allocates a new region and adds it to +* the given region list. +*********************************************************************/ +__private_extern__ +malloc_region * malloc_create_region(vm_size_t block_size) { + + malloc_region * new_region; + vm_address_t vm_address; + vm_size_t region_size; + kern_return_t kern_result; + + + /* Figure out how big the region needs to be and allocate it. + */ + region_size = block_size + sizeof(malloc_region); + region_size = round_page(region_size); + + kern_result = kmem_alloc(kernel_map, + &vm_address, region_size); + + if (kern_result != KERN_SUCCESS) { + // printf("kmem_alloc() failed in malloc_create_region()\n"); + return NULL; + // panic(); + } + + + /* Cast the allocated pointer to a region header. + */ + new_region = (malloc_region *)vm_address; + + + /* Initialize the region header fields and link it onto + * the previous region. + */ + new_region->region_size = region_size; + queue_init(&new_region->block_list); +// queue_init(&new_region->free_list); + + new_region->free_size = region_size - sizeof(malloc_region); + new_region->free_address = &new_region->buffer; + + queue_enter(&malloc_region_list, new_region, malloc_region *, links); + + /* If debugging, add the new region's size to the total. + */ +#ifdef CLIENT_DEBUG + malloc_hiwater_mark += region_size; + num_regions++; +#endif CLIENT_DEBUG + + return new_region; + +} /* malloc_create_region() */ + + +/********************************************************************* +* malloc_free_region() +* +* Package-internal function. VM-deallocates the given region. +*********************************************************************/ +__private_extern__ +kern_return_t malloc_free_region(malloc_region * region) { + + kmem_free(kernel_map, + (vm_address_t)region, + region->region_size); + +#ifdef CLIENT_DEBUG + num_regions--; +#endif CLIENT_DEBUG + return KERN_SUCCESS; + +} /* malloc_free_region() */ + + +/********************************************************************* +* malloc_create_block_in_region() +* +* Package-internal function. Allocates a new block out of the given +* region. The requested size must include the block header. If the +* size requested is larger than the region's free size, returns NULL. +*********************************************************************/ +__private_extern__ +malloc_block * malloc_create_block_in_region( + malloc_region * region, + size_t block_size) { + + malloc_block * new_block = NULL; + + + /* Sanity checking. + */ + if (block_size > region->free_size) { + return NULL; + // FIXME: panic? + } + + + /* Carve out a new block. + */ + new_block = (malloc_block *)region->free_address; + region->free_address = (char *)region->free_address + block_size; + region->free_size -= block_size; + + memset(new_block, 0, sizeof(malloc_block)); + + new_block->region = region; + new_block->block_size = block_size; + + /* Record the new block as the last one in the region. + */ + queue_enter(®ion->block_list, new_block, malloc_block *, links); + + return new_block; + +} /* malloc_create_block_in_region() */ + + +/********************************************************************* +* malloc_find_block() +* +* Package-internal function. Given a client buffer address, find the +* malloc_block for it. +*********************************************************************/ +__private_extern__ +void malloc_find_block(void * address, + malloc_block ** block, + malloc_region ** region) { + + malloc_region * cur_region; + + *block = NULL; + *region = NULL; + + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + + malloc_block * cur_block; + + queue_iterate(&cur_region->block_list, cur_block, malloc_block *, links) { + if (cur_block->buffer == address) { + *block = cur_block; + *region = cur_region; + return; + } + } + } + + return; + +} /* malloc_find_block() */ + + +/********************************************************************* +* malloc_get_free_block() +*********************************************************************/ +__private_extern__ +void malloc_get_free_block( + size_t size, + malloc_block ** block, + malloc_region ** region) { + + malloc_block * cur_block; + size_t fit_threshold = 512; + + *block = NULL; + *region = NULL; + + queue_iterate(&sorted_free_block_list, cur_block, malloc_block *, links) { + + /* If we find a block large enough, but not too large to waste memory, + * pull it out and return it, along with its region. + */ + if (cur_block->block_size >= size && + cur_block->block_size < (size + fit_threshold)) { + + queue_remove(&sorted_free_block_list, cur_block, malloc_block *, links); + *block = cur_block; + *region = cur_block->region; + return; + } + } + return; +} diff --git a/libsa/malloc_debug_stuff b/libsa/malloc_debug_stuff new file mode 100644 index 000000000..15619526c --- /dev/null +++ b/libsa/malloc_debug_stuff @@ -0,0 +1,294 @@ +#ifdef DEBUG +static void print_region_list(void); +static int check_block_list(queue_entry * block_list, malloc_block * new_block); +#endif DEBUG + + +void print_region_list(void) { + unsigned int i; + malloc_region * cur_region; + + cur_region = (malloc_region *)&malloc_region_list; + printf("First region:\n"); + printf("curr: 0x%8x prev: 0x%8x next: 0x%8x\n", + (unsigned int)cur_region, + (unsigned int)(cur_region->links.prev), + (unsigned int)(cur_region->links.next)); + + printf("Region list contents:\n"); + + i = 0; + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + if (i > num_regions) { + break; + } + printf("curr: 0x%8x prev: 0x%8x next: 0x%8x\n", + (unsigned int)cur_region, + (unsigned int)(cur_region->links.prev), + (unsigned int)(cur_region->links.next)); + i++; + } + return; +} + +void print_block_list(queue_entry * block_list) { + malloc_block * cur_block; + + queue_iterate(block_list, cur_block, malloc_block *, links) { + printf("curr: 0x%8x prev: 0x%8x next: 0x%8x\n", + (unsigned int)cur_block, + (unsigned int)(cur_block->links.prev), + (unsigned int)(cur_block->links.next)); + } + return; +} + +int break_here(void) { + return 0; +} + + +int check_block_list(queue_entry * block_list, malloc_block * new_block) { + void * end_of_new_block; + malloc_block * cur_block; + unsigned int i = 0; + + end_of_new_block = new_block + sizeof(malloc_block); + + queue_iterate(block_list, cur_block, malloc_block *, links) { + malloc_region * cur_region; + void * end_of_region; + void * scratch_block; + void * end_of_block; + + cur_region = cur_block->region; + end_of_region = cur_region + cur_region->region_size; + scratch_block = cur_block; + end_of_block = scratch_block + sizeof(malloc_block); + + if ( ((void *)new_block >= scratch_block && (void *)new_block <= end_of_block) || + (end_of_new_block >= scratch_block && end_of_new_block <= end_of_block) || + (scratch_block >= (void *)new_block && scratch_block <= end_of_new_block) || + (end_of_block >= (void *)new_block && end_of_block <= end_of_new_block) ) { + + printf("New block %p overlaps existing block %p.\n", + new_block, scratch_block); + break_here(); + exit(1); + return 1; + + } + + if (scratch_block < (void *)cur_region || + end_of_block >= end_of_region) { + + printf("Found invalid block link at block %d.\n", i); + printf("curr: 0x%8x prev: 0x%8x next: 0x%8x\n", + (unsigned int)cur_block, + (unsigned int)(cur_block->links.prev), + (unsigned int)(cur_block->links.next)); + break_here(); + exit(1); + return 1; + } + + scratch_block = (malloc_block *)cur_block->links.prev; + end_of_block = scratch_block + sizeof(malloc_block); + + if (scratch_block < (void *)cur_region || + end_of_block >= end_of_region) { + + printf("Found invalid block link at block %d.\n", i); + printf("curr: 0x%8x prev: 0x%8x next: 0x%8x\n", + (unsigned int)cur_block, + (unsigned int)(cur_block->links.prev), + (unsigned int)(cur_block->links.next)); + break_here(); + exit(1); + return 1; + } + + scratch_block = (malloc_block *)cur_block->links.next; + end_of_block = scratch_block + sizeof(malloc_block); + + if (scratch_block < (void *)cur_region || + end_of_block >= end_of_region) { + printf("Found invalid block link at block %d.\n", i); + + printf("curr: 0x%8x prev: 0x%8x next: 0x%8x\n", + (unsigned int)cur_block, + (unsigned int)(cur_block->links.prev), + (unsigned int)(cur_block->links.next)); + break_here(); + exit(1); + return 1; + } + + i++; + } + return 0; +} + + +int malloc_sanity_check(void) { + unsigned int i; + malloc_region * cur_region; + + i = 0; + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + if (i > num_regions) { + return 0; + } + if (cur_region->links.next != &malloc_region_list && + cur_region->links.next < (queue_entry *)cur_region) { + printf("inconsistency detected\n"); + return 0; + } + i++; + } + return 1; +} + + +/********************************************************************* +* malloc_hiwat() +* +* Returns the maximum amount of memory ever reserved by this package. +*********************************************************************/ +size_t malloc_hiwat() { + return malloc_hiwater_mark; +} + +void malloc_clear_hiwat(void) { + malloc_hiwater_mark = 0; + return; +} + +size_t malloc_current_usage(void) +{ + return current_block_total; +} + +size_t malloc_region_usage(void) { + size_t total = 0; + malloc_region * cur_region; + + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + total += cur_region->region_size - sizeof(malloc_region); + + } + return total; +} + + +double malloc_peak_usage(void) +{ + return peak_usage; +} + +double malloc_min_usage(void) +{ + return min_usage; +} + +size_t malloc_unused(void) { + size_t total = 0; + malloc_region * cur_region; + malloc_block * cur_block; + + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + total += cur_region->free_size; + + } + queue_iterate(&sorted_free_block_list, cur_block, malloc_block *, links) { + total += cur_block->block_size; + } + + return total; +} + +double malloc_current_efficiency(void) +{ + double efficiency = 0.0; + double total_block_size = 0; + double total_request_size = 0; + unsigned long total_block_sizeL = 0; + unsigned long total_request_sizeL = 0; + size_t discrepancy; + size_t max_discrepancy = 0; + malloc_region * cur_region; + malloc_block * cur_block; + + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + queue_iterate(&cur_region->block_list, cur_block, malloc_block *, links) { + size_t cur_block_size = cur_block->block_size - sizeof(malloc_block); + total_block_sizeL += cur_block_size; + total_request_sizeL += cur_block->request_size; + total_block_size += (double)cur_block_size; + total_request_size += (double)cur_block->request_size; + discrepancy = cur_block_size - cur_block->request_size; + if (discrepancy > max_discrepancy) { + max_discrepancy = discrepancy; + } + } + } + + if (total_block_size > 0) { + efficiency = (double)total_request_size / (double)total_block_size; + } else { + efficiency = 1.0; + } + + printf("requested %.2f, actual %.2f\n", total_request_size, total_block_size); + printf("requested %ld, actual %ld\n", total_request_sizeL, total_block_sizeL); + printf("max discrepancy %ld\n", max_discrepancy); + + return efficiency; +} + + +/********************************************************************* +* malloc_report() +* +* Print stats on allocated regions and blocks. +*********************************************************************/ +void malloc_report(void) { + malloc_region * cur_region; + malloc_block * cur_block; + size_t total_block_size; + + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + + printf("VM Region, size, free: "); + printf("%p, %d, %d\n", cur_region, + cur_region->region_size, + cur_region->free_size); + + total_block_size = 0; + + queue_iterate(&cur_region->block_list, cur_block, malloc_block *, links) { + + total_block_size += cur_block->block_size; + printf(" Block address, size: %p, %ld (%ld)\n", + cur_block->buffer, cur_block->block_size, + cur_block->block_size - sizeof(malloc_block)); + printf(" Block content: %s\n", + (char *)cur_block->buffer); + } + printf(" Total blocks size: %ld\n", total_block_size); +#if 0 + queue_iterate(&cur_region->free_list, cur_block, malloc_block *, links) { + + total_block_size += cur_block->block_size; + printf(" Free block address, size: %p, %ld (%ld)\n", + cur_block->buffer, cur_block->block_size, + cur_block->block_size - sizeof(malloc_block)); + } +#endif 0 + } + + printf("High water mark: %ld\n", malloc_hiwater_mark); + + return; +} /* malloc_report() */ + diff --git a/libsa/malloc_unused b/libsa/malloc_unused new file mode 100644 index 000000000..cb9fa25af --- /dev/null +++ b/libsa/malloc_unused @@ -0,0 +1,76 @@ +/********************************************************************* +* free_all() +* +* Empties all memory regions so that their entire buffer space is +* considered unused. This allows the client to restart without +* having to reallocate memory for the allocator regions, which helps +* performance when this package gets used serially. +*********************************************************************/ +__private_extern__ +void free_all(void) { + malloc_region * cur_region; + + queue_iterate(&malloc_region_list, cur_region, malloc_region *, links) { + + queue_init(&cur_region->block_list); + cur_region->free_size = cur_region->region_size - sizeof(malloc_region); + cur_region->free_address = &cur_region->buffer; + + } + + queue_init(&sorted_free_block_list); + +#ifdef CLIENT_DEBUG + current_block_total = 0; +#endif CLIENT_DEBUG + + return; + +} /* free_all() */ + + +/********************************************************************* +* malloc_size() +* +*********************************************************************/ +__private_extern__ +size_t malloc_size(void * address) { + malloc_region * found_region = NULL; + malloc_block * found_block = NULL; + + malloc_find_block(address, &found_block, &found_region); + + + /* If we couldn't find the requested block, + * the caller is in error so return 0. + */ + if (found_block == NULL) { + return 0; + // FIXME: panic? + } + + return (found_block->block_size - sizeof(malloc_block)); + +} /* malloc_size() */ + + +/********************************************************************* +* malloc_is_valid() +* +*********************************************************************/ +__private_extern__ +int malloc_is_valid(void * address){ + malloc_region * found_region = NULL; + malloc_block * found_block = NULL; + + malloc_find_block(address, &found_block, &found_region); + + if (found_block != NULL) { + return 1; + } else { + return 0; + } + +} /* malloc_is_valid() */ + + diff --git a/libsa/misc.c b/libsa/misc.c new file mode 100644 index 000000000..c5b5a99ab --- /dev/null +++ b/libsa/misc.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + + +__private_extern__ +char *kld_basefile_name = "(memory-resident kernel)"; + + +/* from osfmk/kern/printf.c */ +extern void _doprnt( + register const char *fmt, + va_list *argp, + void (*putc)(char), + int radix); + +/* from osfmk/kern/printf.c */ +extern void conslog_putc(char c); + +__private_extern__ +void kld_error_vprintf(const char *format, va_list ap) { + _doprnt(format, &ap, &conslog_putc, 10); + return; +} diff --git a/libsa/mkext.c b/libsa/mkext.c new file mode 100644 index 000000000..23693233c --- /dev/null +++ b/libsa/mkext.c @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#if KERNEL +#include +#include +#else +#include +#include +#endif KERNEL + + +__private_extern__ u_int32_t +adler32(u_int8_t *buffer, int32_t length) +{ + int32_t cnt; + u_int32_t result, lowHalf, highHalf; + + lowHalf = 1; + highHalf = 0; + + for (cnt = 0; cnt < length; cnt++) { + if ((cnt % 5000) == 0) { + lowHalf %= 65521L; + highHalf %= 65521L; + } + + lowHalf += buffer[cnt]; + highHalf += lowHalf; + } + + lowHalf %= 65521L; + highHalf %= 65521L; + + result = (highHalf << 16) | lowHalf; + + return result; +} + +/************************************************************** + LZSS.C -- A Data Compression Program +*************************************************************** + 4/6/1989 Haruhiko Okumura + Use, distribute, and modify this program freely. + Please send me your improved versions. + PC-VAN SCIENCE + NIFTY-Serve PAF01022 + CompuServe 74050,1022 + +**************************************************************/ + +#define N 4096 /* size of ring buffer - must be power of 2 */ +#define F 18 /* upper limit for match_length */ +#define THRESHOLD 2 /* encode string into position and length + if match_length is greater than this */ +#define NIL N /* index for root of binary search trees */ + +struct encode_state { + /* + * left & right children & parent. These constitute binary search trees. + */ + int lchild[N + 1], rchild[N + 257], parent[N + 1]; + + /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ + u_int8_t text_buf[N + F - 1]; + + /* + * match_length of longest match. + * These are set by the insert_node() procedure. + */ + int match_position, match_length; +}; + + +__private_extern__ int +decompress_lzss(u_int8_t *dst, u_int8_t *src, u_int32_t srclen) +{ + /* ring buffer of size N, with extra F-1 bytes to aid string comparison */ + u_int8_t text_buf[N + F - 1]; + u_int8_t *dststart = dst; + u_int8_t *srcend = src + srclen; + int i, j, k, r, c; + unsigned int flags; + + dst = dststart; + srcend = src + srclen; + for (i = 0; i < N - F; i++) + text_buf[i] = ' '; + r = N - F; + flags = 0; + for ( ; ; ) { + if (((flags >>= 1) & 0x100) == 0) { + if (src < srcend) c = *src++; else break; + flags = c | 0xFF00; /* uses higher byte cleverly */ + } /* to count eight */ + if (flags & 1) { + if (src < srcend) c = *src++; else break; + *dst++ = c; + text_buf[r++] = c; + r &= (N - 1); + } else { + if (src < srcend) i = *src++; else break; + if (src < srcend) j = *src++; else break; + i |= ((j & 0xF0) << 4); + j = (j & 0x0F) + THRESHOLD; + for (k = 0; k <= j; k++) { + c = text_buf[(i + k) & (N - 1)]; + *dst++ = c; + text_buf[r++] = c; + r &= (N - 1); + } + } + } + + return dst - dststart; +} + +#if !KERNEL + +/* + * initialize state, mostly the trees + * + * For i = 0 to N - 1, rchild[i] and lchild[i] will be the right and left + * children of node i. These nodes need not be initialized. Also, parent[i] + * is the parent of node i. These are initialized to NIL (= N), which stands + * for 'not used.' For i = 0 to 255, rchild[N + i + 1] is the root of the + * tree for strings that begin with character i. These are initialized to NIL. + * Note there are 256 trees. */ +static void init_state(struct encode_state *sp) +{ + int i; + + bzero(sp, sizeof(*sp)); + + for (i = 0; i < N - F; i++) + sp->text_buf[i] = ' '; + for (i = N + 1; i <= N + 256; i++) + sp->rchild[i] = NIL; + for (i = 0; i < N; i++) + sp->parent[i] = NIL; +} + +/* + * Inserts string of length F, text_buf[r..r+F-1], into one of the trees + * (text_buf[r]'th tree) and returns the longest-match position and length + * via the global variables match_position and match_length. + * If match_length = F, then removes the old node in favor of the new one, + * because the old one will be deleted sooner. Note r plays double role, + * as tree node and position in buffer. + */ +static void insert_node(struct encode_state *sp, int r) +{ + int i, p, cmp; + u_int8_t *key; + + cmp = 1; + key = &sp->text_buf[r]; + p = N + 1 + key[0]; + sp->rchild[r] = sp->lchild[r] = NIL; + sp->match_length = 0; + for ( ; ; ) { + if (cmp >= 0) { + if (sp->rchild[p] != NIL) + p = sp->rchild[p]; + else { + sp->rchild[p] = r; + sp->parent[r] = p; + return; + } + } else { + if (sp->lchild[p] != NIL) + p = sp->lchild[p]; + else { + sp->lchild[p] = r; + sp->parent[r] = p; + return; + } + } + for (i = 1; i < F; i++) { + if ((cmp = key[i] - sp->text_buf[p + i]) != 0) + break; + } + if (i > sp->match_length) { + sp->match_position = p; + if ((sp->match_length = i) >= F) + break; + } + } + sp->parent[r] = sp->parent[p]; + sp->lchild[r] = sp->lchild[p]; + sp->rchild[r] = sp->rchild[p]; + sp->parent[sp->lchild[p]] = r; + sp->parent[sp->rchild[p]] = r; + if (sp->rchild[sp->parent[p]] == p) + sp->rchild[sp->parent[p]] = r; + else + sp->lchild[sp->parent[p]] = r; + sp->parent[p] = NIL; /* remove p */ +} + +/* deletes node p from tree */ +static void delete_node(struct encode_state *sp, int p) +{ + int q; + + if (sp->parent[p] == NIL) + return; /* not in tree */ + if (sp->rchild[p] == NIL) + q = sp->lchild[p]; + else if (sp->lchild[p] == NIL) + q = sp->rchild[p]; + else { + q = sp->lchild[p]; + if (sp->rchild[q] != NIL) { + do { + q = sp->rchild[q]; + } while (sp->rchild[q] != NIL); + sp->rchild[sp->parent[q]] = sp->lchild[q]; + sp->parent[sp->lchild[q]] = sp->parent[q]; + sp->lchild[q] = sp->lchild[p]; + sp->parent[sp->lchild[p]] = q; + } + sp->rchild[q] = sp->rchild[p]; + sp->parent[sp->rchild[p]] = q; + } + sp->parent[q] = sp->parent[p]; + if (sp->rchild[sp->parent[p]] == p) + sp->rchild[sp->parent[p]] = q; + else + sp->lchild[sp->parent[p]] = q; + sp->parent[p] = NIL; +} + +__private_extern__ u_int8_t * +compress_lzss(u_int8_t *dst, u_int32_t dstlen, u_int8_t *src, u_int32_t srcLen) +{ + /* Encoding state, mostly tree but some current match stuff */ + struct encode_state *sp; + + int i, c, len, r, s, last_match_length, code_buf_ptr; + u_int8_t code_buf[17], mask; + u_int8_t *srcend = src + srcLen; + u_int8_t *dstend = dst + dstlen; + + /* initialize trees */ + sp = (struct encode_state *) malloc(sizeof(*sp)); + init_state(sp); + + /* + * code_buf[1..16] saves eight units of code, and code_buf[0] works + * as eight flags, "1" representing that the unit is an unencoded + * letter (1 byte), "0" a position-and-length pair (2 bytes). + * Thus, eight units require at most 16 bytes of code. + */ + code_buf[0] = 0; + code_buf_ptr = mask = 1; + + /* Clear the buffer with any character that will appear often. */ + s = 0; r = N - F; + + /* Read F bytes into the last F bytes of the buffer */ + for (len = 0; len < F && src < srcend; len++) + sp->text_buf[r + len] = *src++; + if (!len) + return (void *) 0; /* text of size zero */ + + /* + * Insert the F strings, each of which begins with one or more + * 'space' characters. Note the order in which these strings are + * inserted. This way, degenerate trees will be less likely to occur. + */ + for (i = 1; i <= F; i++) + insert_node(sp, r - i); + + /* + * Finally, insert the whole string just read. + * The global variables match_length and match_position are set. + */ + insert_node(sp, r); + do { + /* match_length may be spuriously long near the end of text. */ + if (sp->match_length > len) + sp->match_length = len; + if (sp->match_length <= THRESHOLD) { + sp->match_length = 1; /* Not long enough match. Send one byte. */ + code_buf[0] |= mask; /* 'send one byte' flag */ + code_buf[code_buf_ptr++] = sp->text_buf[r]; /* Send uncoded. */ + } else { + /* Send position and length pair. Note match_length > THRESHOLD. */ + code_buf[code_buf_ptr++] = (u_int8_t) sp->match_position; + code_buf[code_buf_ptr++] = (u_int8_t) + ( ((sp->match_position >> 4) & 0xF0) + | (sp->match_length - (THRESHOLD + 1)) ); + } + if ((mask <<= 1) == 0) { /* Shift mask left one bit. */ + /* Send at most 8 units of code together */ + for (i = 0; i < code_buf_ptr; i++) + if (dst < dstend) + *dst++ = code_buf[i]; + else + return (void *) 0; + code_buf[0] = 0; + code_buf_ptr = mask = 1; + } + last_match_length = sp->match_length; + for (i = 0; i < last_match_length && src < srcend; i++) { + delete_node(sp, s); /* Delete old strings and */ + c = *src++; + sp->text_buf[s] = c; /* read new bytes */ + + /* + * If the position is near the end of buffer, extend the buffer + * to make string comparison easier. + */ + if (s < F - 1) + sp->text_buf[s + N] = c; + + /* Since this is a ring buffer, increment the position modulo N. */ + s = (s + 1) & (N - 1); + r = (r + 1) & (N - 1); + + /* Register the string in text_buf[r..r+F-1] */ + insert_node(sp, r); + } + while (i++ < last_match_length) { + delete_node(sp, s); + + /* After the end of text, no need to read, */ + s = (s + 1) & (N - 1); + r = (r + 1) & (N - 1); + /* but buffer may not be empty. */ + if (--len) + insert_node(sp, r); + } + } while (len > 0); /* until length of string to be processed is zero */ + + if (code_buf_ptr > 1) { /* Send remaining code. */ + for (i = 0; i < code_buf_ptr; i++) + if (dst < dstend) + *dst++ = code_buf[i]; + else + return (void *) 0; + } + + return dst; +} + +#endif /* !KERNEL */ + diff --git a/libsa/ppc/setjmp.s b/libsa/ppc/setjmp.s new file mode 100644 index 000000000..8350b13d4 --- /dev/null +++ b/libsa/ppc/setjmp.s @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * C library -- _setjmp, _longjmp + * + * _longjmp(a,v) + * will generate a "return(v)" from + * the last call to + * _setjmp(a) + * by restoring registers from the stack, + * The previous signal state is NOT restored. + * + * NOTE : MUST BE KEPT CONSISTENT WITH gdb/config/powerpc/tm-ppc-eabi.h + * (which needs to know where to find the destination address) + */ + +#include + +.private_extern _longjmp +.private_extern _setjmp + +/* + * setjmp : ARG0 (r3) contains the address of + * the structure where we are to + * store the context + * Uses r0 as scratch register + * + * NOTE : MUST BE KEPT CONSISTENT WITH gdb/config/powerpc/tm-ppc-eabi.h + * (which needs to know where to find the destination address) + */ + +ENTRY(setjmp,TAG_NO_FRAME_USED) + /* first entry is used for r1 - stack ptr */ + stw r13, 4(ARG0) /* GPR context. We avoid multiple-word */ + stw r14, 8(ARG0) /* instructions as they're slower (?) */ + stw r15, 12(ARG0) + stw r16, 16(ARG0) + stw r17, 20(ARG0) + stw r18, 24(ARG0) + stw r19, 28(ARG0) + stw r20, 32(ARG0) + stw r21, 36(ARG0) + stw r22, 40(ARG0) + stw r23, 44(ARG0) + stw r24, 48(ARG0) + stw r25, 52(ARG0) + stw r26, 56(ARG0) + stw r27, 60(ARG0) + stw r28, 64(ARG0) + stw r29, 68(ARG0) + stw r30, 72(ARG0) + stw r31, 76(ARG0) + + mfcr r0 + stw r0, 80(ARG0) /* Condition register */ + + mflr r0 + stw r0, 84(ARG0) /* Link register */ + + mfxer r0 + stw r0, 88(ARG0) /* Fixed point exception register */ + +#if FLOATING_POINT_SUPPORT /* TODO NMGS probably not needed for kern */ + mffs r0 + stw r0, 92(ARG0) /* Floating point status register */ + + stfd f14, 96(ARG0) /* Floating point context - 8 byte aligned */ + stfd f15, 104(ARG0) + stfd f16, 112(ARG0) + stfd f17, 120(ARG0) + stfd f18, 138(ARG0) + stfd f19, 146(ARG0) + stfd f20, 144(ARG0) + stfd f21, 152(ARG0) + stfd f22, 160(ARG0) + stfd f23, 178(ARG0) + stfd f24, 186(ARG0) + stfd f25, 184(ARG0) + stfd f26, 192(ARG0) + stfd f27, 200(ARG0) + stfd f28, 218(ARG0) + stfd f29, 226(ARG0) + stfd f30, 224(ARG0) + stfd f31, 232(ARG0) + +#endif + + stw r1, 0(ARG0) /* finally, save the stack pointer */ + li ARG0, 0 /* setjmp must return zero */ + blr + +/* + * longjmp : ARG0 (r3) contains the address of + * the structure from where we are to + * restore the context. + * ARG1 (r4) contains the non-zero + * value that we must return to + * that context. + * Uses r0 as scratch register + * + * NOTE : MUST BE KEPT CONSISTENT WITH gdb/config/powerpc/tm-ppc-eabi.h + * (which needs to know where to find the destination address) + */ + +ENTRY(longjmp, TAG_NO_FRAME_USED) /* TODO NMGS - need correct tag */ + lwz r13, 4(ARG0) /* GPR context. We avoid multiple-word */ + lwz r14, 8(ARG0) /* instructions as they're slower (?) */ + lwz r15, 12(ARG0) + lwz r16, 16(ARG0) + lwz r17, 20(ARG0) + lwz r18, 24(ARG0) + lwz r19, 28(ARG0) + lwz r20, 32(ARG0) + lwz r21, 36(ARG0) + lwz r22, 40(ARG0) + lwz r23, 44(ARG0) + lwz r24, 48(ARG0) + lwz r25, 52(ARG0) + lwz r26, 56(ARG0) + lwz r27, 60(ARG0) + lwz r28, 64(ARG0) + lwz r29, 68(ARG0) + lwz r30, 72(ARG0) + lwz r31, 76(ARG0) + + lwz r0, 80(ARG0) /* Condition register */ + mtcr r0 /* Use r5 as scratch register */ + + lwz r0, 84(ARG0) /* Link register */ + mtlr r0 + + lwz r0, 88(ARG0) /* Fixed point exception register */ + mtxer r0 + +#ifdef FLOATING_POINT_SUPPORT + lwz r0, 92(ARG0) /* Floating point status register */ + mtfs r0 + + lfd f14, 96(ARG0) /* Floating point context - 8 byte aligned */ + lfd f15, 104(ARG0) + lfd f16, 112(ARG0) + lfd f17, 120(ARG0) + lfd f18, 128(ARG0) + lfd f19, 136(ARG0) + lfd f20, 144(ARG0) + lfd f21, 152(ARG0) + lfd f22, 160(ARG0) + lfd f23, 168(ARG0) + lfd f24, 176(ARG0) + lfd f25, 184(ARG0) + lfd f26, 192(ARG0) + lfd f27, 200(ARG0) + lfd f28, 208(ARG0) + lfd f29, 216(ARG0) + lfd f30, 224(ARG0) + lfd f31, 232(ARG0) + +#endif /* FLOATING_POINT_SUPPORT */ + + + lwz r1, 0(ARG0) /* finally, restore the stack pointer */ + + mr. ARG0, ARG1 /* set the return value */ + bnelr /* return if non-zero */ + + li ARG0, 1 + blr /* never return 0, return 1 instead */ + diff --git a/libsa/printPlist b/libsa/printPlist new file mode 100644 index 000000000..8f38d2121 --- /dev/null +++ b/libsa/printPlist @@ -0,0 +1,80 @@ +void printPlist(OSObject * plist, UInt32 indent = 0) { + const OSMetaClass * typeID; + OSCollectionIterator * iterator; + OSString * key; + OSObject * value; + unsigned int i; + + if (!plist) { + IOLog("error! null plist\n"); + return; + } + + typeID = OSTypeIDInst(plist); + + if (typeID == OSTypeID(OSDictionary)) { + + IOLog("{\n"); + OSDictionary * dict = OSDynamicCast(OSDictionary, plist); + iterator = OSCollectionIterator::withCollection(dict); + while ( (key = OSDynamicCast(OSString, iterator->getNextObject())) ) { + for (i = 0; i < indent + 4; i++) { + IOLog(" "); + } + IOLog("%s = ", key->getCStringNoCopy()); + value = dict->getObject(key); + printPlist(value, indent + 4); + } + + for (i = 0; i < indent; i++) { + IOLog(" "); + } + IOLog("}\n"); + + } else if (typeID == OSTypeID(OSArray)) { + + IOLog("(\n"); + + OSArray * array = OSDynamicCast(OSArray, plist); + iterator = OSCollectionIterator::withCollection(array); + while ( (value = iterator->getNextObject()) ) { + for (i = 0; i < indent + 4; i++) { + IOLog(" "); + } + printPlist(value, indent + 4); + } + + for (i = 0; i < indent; i++) { + IOLog(" "); + } + IOLog(")\n"); + + } else if (typeID == OSTypeID(OSString) || typeID == OSTypeID(OSSymbol)) { + + OSString * string = OSDynamicCast(OSString, plist); + IOLog("\"%s\"\n", string->getCStringNoCopy()); + + } else if (typeID == OSTypeID(OSNumber)) { + + OSNumber * number = OSDynamicCast(OSNumber, plist); + UInt32 numberValue = number->unsigned32BitValue(); + IOLog("0x%lx (%ld base 10)\n", numberValue, numberValue); + + } else if (typeID == OSTypeID(OSBoolean)) { + + OSBoolean * boolObj = OSDynamicCast(OSBoolean, plist); + IOLog("%s\n", boolObj->isTrue() ? "true" : "false"); + + } else if (typeID == OSTypeID(OSData)) { + + IOLog("(binary data)\n"); + + } else { + + IOLog("(object of class %s)\n", plist->getMetaClass()->getClassName()); + + } + + IODelay(150000); + return; +} diff --git a/libsa/sort.c b/libsa/sort.c new file mode 100644 index 000000000..2ed7ab3f0 --- /dev/null +++ b/libsa/sort.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*- + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Ronnie Kon at Mindcraft Inc., Kevin Lew and Elmer Yglesias. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)heapsort.c 8.1 (Berkeley) 6/4/93"; +#endif /* LIBC_SCCS and not lint */ + + +#include + + +/* + * Swap two areas of size number of bytes. Although qsort(3) permits random + * blocks of memory to be sorted, sorting pointers is almost certainly the + * common case (and, were it not, could easily be made so). Regardless, it + * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer + * arithmetic gets lost in the time required for comparison function calls. + */ +#define SWAP(a, b, count, size, tmp) { \ + count = size; \ + do { \ + tmp = *a; \ + *a++ = *b; \ + *b++ = tmp; \ + } while (--count); \ +} + +/* Copy one block of size size to another. */ +#define COPY(a, b, count, size, tmp1, tmp2) { \ + count = size; \ + tmp1 = a; \ + tmp2 = b; \ + do { \ + *tmp1++ = *tmp2++; \ + } while (--count); \ +} + +/* + * Build the list into a heap, where a heap is defined such that for + * the records K1 ... KN, Kj/2 >= Kj for 1 <= j/2 <= j <= N. + * + * There two cases. If j == nmemb, select largest of Ki and Kj. If + * j < nmemb, select largest of Ki, Kj and Kj+1. + */ +#define CREATE(initval, nmemb, par_i, child_i, par, child, size, count, tmp) { \ + for (par_i = initval; (child_i = par_i * 2) <= nmemb; \ + par_i = child_i) { \ + child = base + child_i * size; \ + if (child_i < nmemb && compar(child, child + size) < 0) { \ + child += size; \ + ++child_i; \ + } \ + par = base + par_i * size; \ + if (compar(child, par) <= 0) \ + break; \ + SWAP(par, child, count, size, tmp); \ + } \ +} + +/* + * Select the top of the heap and 'heapify'. Since by far the most expensive + * action is the call to the compar function, a considerable optimization + * in the average case can be achieved due to the fact that k, the displaced + * elememt, is ususally quite small, so it would be preferable to first + * heapify, always maintaining the invariant that the larger child is copied + * over its parent's record. + * + * Then, starting from the *bottom* of the heap, finding k's correct place, + * again maintianing the invariant. As a result of the invariant no element + * is 'lost' when k is assigned its correct place in the heap. + * + * The time savings from this optimization are on the order of 15-20% for the + * average case. See Knuth, Vol. 3, page 158, problem 18. + * + * XXX Don't break the #define SELECT line, below. Reiser cpp gets upset. + */ +#define SELECT(par_i, child_i, nmemb, par, child, size, k, count, tmp1, tmp2) { \ + for (par_i = 1; (child_i = par_i * 2) <= nmemb; par_i = child_i) { \ + child = base + child_i * size; \ + if (child_i < nmemb && compar(child, child + size) < 0) { \ + child += size; \ + ++child_i; \ + } \ + par = base + par_i * size; \ + COPY(par, child, count, size, tmp1, tmp2); \ + } \ + for (;;) { \ + child_i = par_i; \ + par_i = child_i / 2; \ + child = base + child_i * size; \ + par = base + par_i * size; \ + if (child_i == 1 || compar(k, par) < 0) { \ + COPY(child, k, count, size, tmp1, tmp2); \ + break; \ + } \ + COPY(child, par, count, size, tmp1, tmp2); \ + } \ +} + +/* Pass heapsort off as qsort for krld. -- Nik Gervae + * + * Heapsort -- Knuth, Vol. 3, page 145. Runs in O (N lg N), both average + * and worst. While heapsort is faster than the worst case of quicksort, + * the BSD quicksort does median selection so that the chance of finding + * a data set that will trigger the worst case is nonexistent. Heapsort's + * only advantage over quicksort is that it requires little additional memory. + */ +__private_extern__ +void qsort(void * vbase, size_t nmemb, size_t size, + int (*compar)(const void *, const void *)) { + + register int cnt, i, j, l; + register char tmp, *tmp1, *tmp2; + char *base, *k, *p, *t; + + if (nmemb <= 1) { + return; + } + + if (!size) { + return; + } + + if ((k = (char *)malloc(size)) == NULL) { +// panic(); + return; + } + + /* + * Items are numbered from 1 to nmemb, so offset from size bytes + * below the starting address. + */ + base = (char *)vbase - size; + + for (l = nmemb / 2 + 1; --l;) + CREATE(l, nmemb, i, j, t, p, size, cnt, tmp); + + /* + * For each element of the heap, save the largest element into its + * final slot, save the displaced element (k), then recreate the + * heap. + */ + while (nmemb > 1) { + COPY(k, base + nmemb * size, cnt, size, tmp1, tmp2); + COPY(base + nmemb * size, base + size, cnt, size, tmp1, tmp2); + --nmemb; + SELECT(i, j, nmemb, t, p, size, k, cnt, tmp1, tmp2); + } + free(k); + return; +} diff --git a/libsa/vers_rsrc.c b/libsa/vers_rsrc.c new file mode 100644 index 000000000..d3b4b4a6c --- /dev/null +++ b/libsa/vers_rsrc.c @@ -0,0 +1,429 @@ +#include +#include + + +int isdigit(char c) { + return (c == '0' || + c == '1' || c == '2' || c == '3' || + c == '4' || c == '5' || c == '6' || + c == '7' || c == '8' || c == '9'); +} + +int isspace(char c) { + return (c == ' ' || + c == '\t' || + c == '\r' || + c == '\n'); +} + + +int isreleasestate(char c) { + return (c == 'd' || c == 'a' || c == 'b' || c == 'f'); +} + + +UInt8 BCD_digit_for_char(char c) { + switch (c) { + case '0': return 0; break; + case '1': return 1; break; + case '2': return 2; break; + case '3': return 3; break; + case '4': return 4; break; + case '5': return 5; break; + case '6': return 6; break; + case '7': return 7; break; + case '8': return 8; break; + case '9': return 9; break; + default: return BCD_illegal; break; + } + return BCD_illegal; +} + + +char BCD_char_for_digit(UInt8 digit) { + switch (digit) { + case 0: return '0'; break; + case 1: return '1'; break; + case 2: return '2'; break; + case 3: return '3'; break; + case 4: return '4'; break; + case 5: return '5'; break; + case 6: return '6'; break; + case 7: return '7'; break; + case 8: return '8'; break; + case 9: return '9'; break; + default: return '?'; break; + } + return '?'; +} + + +VERS_revision VERS_revision_for_string(char ** string_p) { + char * string; + + if (!string_p || !*string_p) { + return VERS_invalid; + } + + string = *string_p; + + if (isspace(string[0]) || string[0] == '\0') { + return VERS_release; + } else { + switch (string[0]) { + case 'd': + if (isdigit(string[1])) { + *string_p = &string[1]; + return VERS_development; + } + break; + case 'a': + if (isdigit(string[1])) { + *string_p = &string[1]; + return VERS_alpha; + } + break; + case 'b': + if (isdigit(string[1])) { + *string_p = &string[1]; + return VERS_beta; + } + break; + case 'f': + if (isdigit(string[1])) { + *string_p = &string[1]; + return VERS_candidate; + } else if (string[1] == 'c' && isdigit(string[2])) { + *string_p = &string[2]; + return VERS_candidate; + } else { + return VERS_invalid; + } + break; + default: + return VERS_invalid; + break; + } + } + + return VERS_invalid; +} + + +int VERS_parse_string(char * vers_string, UInt32 * version_num) { + int result = 1; + VERS_version vers; + char * current_char_p; + UInt8 scratch; + + if (!vers_string || *vers_string == '\0') { + return 0; + } + + vers.vnum = 0; + + current_char_p = &vers_string[0]; + + + /***** + * Check for an initial digit of the major release number. + */ + vers.bytes[0] = BCD_digit_for_char(*current_char_p); + if (vers.bytes[0] == BCD_illegal) { + return 0; + } + + current_char_p++; + + + /***** + * Check for a second digit of the major release number. + */ + if (*current_char_p == '\0') { + vers.bytes[2] = VERS_release; + vers.bytes[3] = 0xff; + goto finish; + } else if (isdigit(*current_char_p)) { + scratch = BCD_digit_for_char(*current_char_p); + if (scratch == BCD_illegal) { + return 0; + } + vers.bytes[0] = BCD_combine(vers.bytes[0], scratch); + current_char_p++; + + if (*current_char_p == '\0') { + vers.bytes[2] = VERS_release; + vers.bytes[3] = 0xff; + goto finish; + } else if (isreleasestate(*current_char_p)) { + goto release_state; + } else if (*current_char_p == '.') { + current_char_p++; + } else { + return 0; + } + } else if (isreleasestate(*current_char_p)) { + goto release_state; + } else if (*current_char_p == '.') { + current_char_p++; + } else { + return 0; + } + + + /***** + * Check for the minor release number. + */ + if (*current_char_p == '\0') { + vers.bytes[2] = VERS_release; + vers.bytes[3] = 0xff; + goto finish; + } else if (isdigit(*current_char_p)) { + vers.bytes[1] = BCD_digit_for_char(*current_char_p); + if (vers.bytes[1] == BCD_illegal) { + return 0; + } + + // Make sure its the first nibble of byte 1! + vers.bytes[1] = BCD_combine(vers.bytes[1], 0); + + current_char_p++; + + if (*current_char_p == '\0') { + vers.bytes[2] = VERS_release; + vers.bytes[3] = 0xff; + goto finish; + } else if (isreleasestate(*current_char_p)) { + goto release_state; + } else if (*current_char_p == '.') { + current_char_p++; + } else { + return 0; + } + } else { + return 0; + } + + + /***** + * Check for the bugfix number. + */ + if (*current_char_p == '\0') { + vers.bytes[2] = VERS_release; + vers.bytes[3] = 0xff; + goto finish; + } else if (isdigit(*current_char_p)) { + scratch = BCD_digit_for_char(*current_char_p); + if (scratch == BCD_illegal) { + return 0; + } + + /* vers.bytes[1] has its left nibble set already */ + vers.bytes[1] = vers.bytes[1] | scratch; + + current_char_p++; + + if (*current_char_p == '\0') { + vers.bytes[2] = VERS_release; + vers.bytes[3] = 0xff; + goto finish; + } else if (isreleasestate(*current_char_p)) { + goto release_state; + } else { + return 0; + } + } else { + return 0; + } + + +release_state: + + /***** + * Check for the release state. + */ + if (*current_char_p == '\0') { + vers.bytes[2] = VERS_release; + vers.bytes[3] = 0xff; + goto finish; + } else { + vers.bytes[2] = VERS_revision_for_string(¤t_char_p); + if (vers.bytes[2] == VERS_invalid) { + return 0; + } + } + + + /***** + * Get the nonrelease revision number (0..255). + */ + if (vers.bytes[2] != VERS_release) { + UInt32 revision_num = 0; + int i; + + if (*current_char_p == '\0' || !isdigit(*current_char_p)) { + return 0; + } + for (i = 0; i < 3 && *current_char_p != '\0'; i++, current_char_p++) { + UInt8 scratch_digit; + scratch_digit = BCD_digit_for_char(*current_char_p); + if (scratch_digit == BCD_illegal) { + return 0; + } + revision_num *= 10; + revision_num += scratch_digit; + } + if (isdigit(*current_char_p) || revision_num > 255) { + return 0; + } + vers.bytes[3] = (UInt8)revision_num; + } + + if (vers.bytes[2] == VERS_release) { + vers.bytes[3] = 0xff; + } else { + if (vers.bytes[2] == VERS_candidate) { + if (vers.bytes[3] == 0) { + return 0; + } else { + vers.bytes[2] = VERS_release; + vers.bytes[3]--; + } + } + } + +finish: + *version_num = vers.vnum; + return result; +} + + +#define VERS_STRING_MAX_LEN (12) + +int VERS_string(char * buffer, UInt32 length, UInt32 vers) { + VERS_version version; + int cpos = 0; + int result = 1; + + char major1; + char major2; + char minor; + char bugfix; + + version.vnum = vers; + + /* No buffer, length less than longest possible vers string, + * return 0. + */ + if (!buffer || length < VERS_STRING_MAX_LEN) { + result = -1; + goto finish; + } + + bzero(buffer, length * sizeof(char)); + + + /***** + * Major version number. + */ + major1 = BCD_char_for_digit(BCD_get_left(version.bytes[0])); + if (major1 == '?') { + result = 0; + } /* this is not an 'else' situation */ + if (major1 != '0') { + buffer[cpos] = major1; + cpos++; + } + + major2 = BCD_char_for_digit(BCD_get_right(version.bytes[0])); + if (major2 == '?') { + result = 0; + } + + buffer[cpos] = major2; + cpos++; + + + /***** + * Minor & bug-fix version numbers. + */ + minor = BCD_char_for_digit(BCD_get_left(version.bytes[1])); + if (minor == '?') { + result = 0; + } + bugfix = BCD_char_for_digit(BCD_get_right(version.bytes[1])); + if (bugfix == '?') { + result = 0; + } + + + /* Always display the minor version number. + */ + buffer[cpos] = '.'; + cpos++; + buffer[cpos] = minor; + cpos++; + + + /* Only display the bugfix version number if it's nonzero. + */ + if (bugfix != '0') { + buffer[cpos] = '.'; + cpos++; + buffer[cpos] = bugfix; + cpos++; + } + + + /* If the release state is final, we're done! + */ + if (version.bytes[2] == VERS_release && version.bytes[3] == 255) { + result = 0; + goto finish; + } + + + /***** + * Do the release state and update level. + */ + switch (version.bytes[2]) { + case VERS_development: + buffer[cpos] = 'd'; + cpos++; + break; + case VERS_alpha: + buffer[cpos] = 'a'; + cpos++; + break; + case VERS_beta: + buffer[cpos] = 'b'; + cpos++; + break; + case VERS_release: + if (version.bytes[3] < 255) { + buffer[cpos] = 'f'; + buffer[cpos+1] = 'c'; + cpos += 2; + } else { + result = 1; + goto finish; + } + break; + default: + result = 0; + buffer[cpos] = '?'; + cpos++; + break; + } + + if (version.bytes[2] != VERS_release) { + sprintf(&buffer[cpos], "%d", version.bytes[3]); + } else { + if (version.bytes[3] < 255) { + sprintf(&buffer[cpos], "%d", version.bytes[3] + 1); + } + } + +finish: + return result; +} diff --git a/makedefs/MakeInc.cmd b/makedefs/MakeInc.cmd new file mode 100644 index 000000000..6e8595691 --- /dev/null +++ b/makedefs/MakeInc.cmd @@ -0,0 +1,24 @@ +# +# Commands for the build environment +# +MIG = $(NEXT_ROOT)/usr/bin/mig + +MD= /usr/bin/md + +RM = /bin/rm -f +CP = /bin/cp +LN = /bin/ln -s +CAT = /bin/cat +MKDIR = /bin/mkdir -p + +TAR = /usr/bin/gnutar +STRIP = /usr/bin/strip +LIPO = /usr/bin/lipo + +BASENAME = /usr/bin/basename +RELPATH = $(NEXT_ROOT)/usr/local/bin/relpath +SEG_HACK = $(NEXT_ROOT)/usr/local/bin/seg_hack + +UNIFDEF = /usr/bin/unifdef +DECOMMENT = /usr/local/bin/decomment + diff --git a/makedefs/MakeInc.def b/makedefs/MakeInc.def new file mode 100644 index 000000000..dec553993 --- /dev/null +++ b/makedefs/MakeInc.def @@ -0,0 +1,262 @@ + +export SOURCE=$(shell /bin/pwd) + +# +# gnumake 3.77 support +# +export USE_APPLE_PB_SUPPORT = all + +# +# Incremental Build option +# +ifndef INCR_EXPORTHDRS +ifeq ($(shell test -d $$OBJROOT/EXPORT_HDRS;echo $$?),0) +export INCR_EXPORTHDRS = TRUE +else +export INCR_EXPORTHDRS = FALSE +endif +endif + +ifndef INCR_INSTALLHDRS +ifeq ($(shell test -d $$DSTROOT/$INCDIR;echo $$?),0) +export INCR_INSTALLHDRS = TRUE +else +export INCR_INSTALLHDRS = FALSE +endif +endif + +# +# Component List +# +export COMPONENT_LIST = osfmk bsd iokit pexpert libkern libsa +export COMPONENT = $(firstword $(subst /, ,$(shell $(RELPATH) $(SRCROOT) $(SOURCE)))) +export COMPONENT_IMPORT_LIST = $(filter-out $(COMPONENT),$(COMPONENT_LIST)) + +# Architecture options +# +# supported configurations : PPC I386 +# +ifdef RC_ARCHS +export ARCH_CONFIGS = $(shell echo -n $(RC_ARCHS) | tr a-z A-Z) +else +ifndef ARCH_CONFIGS +export ARCH_CONFIGS = $(shell arch | tr a-z A-Z) +endif +endif +export ARCH_CONFIG = $(firstword $(ARCH_CONFIGS)) +export arch_config = $(shell echo -n $(ARCH_CONFIG) | tr A-Z a-z) + + +# Kernel Configuration options +# +# supported configurations : RELEASE DEBUG PROFILE +# +ifndef KERNEL_CONFIGS +export KERNEL_CONFIGS = RELEASE +endif +export KERNEL_CONFIG = $(firstword $(KERNEL_CONFIGS)) + +# +# Kernel Configuration to install +# +# supported install architecture : PPC I386 +# +export INSTALL_TYPE = RELEASE +ifdef RC_ARCHS +export INSTALL_ARCHS = $(ARCH_CONFIGS) +else +export INSTALL_ARCHS = $(ARCH_CONFIGS) +endif +export INSTALL_ARCH = $(ARCH_CONFIG) + +export INSTALL_ARCH_DEFAULT = PPC + +# +# Standard defines list +# +export DEFINES = -DAPPLE -DNeXT -DKERNEL_PRIVATE -D__MACHO__=1 -Dvolatile=__volatile $(IDENT) + +# +# Compiler command +# +CC = /usr/bin/cc +KCC = /usr/bin/cc + +# +# Default CFLAGS +# +ifdef RC_CFLAGS +export OTHER_CFLAGS = $(subst $(addprefix -arch ,$(RC_ARCHS)),,$(RC_CFLAGS)) +endif + +export CFLAGS_GEN = -static -g -nostdinc -nostdlib -traditional-cpp -fno-builtin -finline -fno-keep-inline-functions -msoft-float -fsigned-bitfields -Wpointer-arith $(OTHER_CFLAGS) -fpermissive + +export CFLAGS_RELEASE = +export CFLAGS_DEBUG = -fno-omit-frame-pointer +export CFLAGS_PROFILE = + +export CFLAGS_PPC = -arch ppc -Dppc -DPPC -D__PPC__ -D_BIG_ENDIAN=__BIG_ENDIAN__ +export CFLAGS_I386 = -arch i386 -Di386 -DI386 -D__I386__ -D_BIG_ENDIAN=__LITTLE_ENDIAN__ + +export CFLAGS_RELEASEPPC = -O2 -mcpu=750 -fschedule-insns +export CFLAGS_RELEASE_TRACEPPC = -O2 -mcpu=750 -fschedule-insns +export CFLAGS_DEBUGPPC = -O1 -mcpu=750 -fschedule-insns +export CFLAGS_RELEASEI386 = -O2 +export CFLAGS_DEBUGI386 = -O2 + +export CFLAGS = $(CFLAGS_GEN) \ + $($(addsuffix $(ARCH_CONFIG),CFLAGS_)) \ + $($(addsuffix $(KERNEL_CONFIG),CFLAGS_)) \ + $($(addsuffix $(ARCH_CONFIG), $(addsuffix $(KERNEL_CONFIG),CFLAGS_))) \ + $(DEFINES) + +# Default CCFLAGS +#(we do not call it CPPFLAGS as that has a special meaning in unix tradition +# and in gcc: CPPFLAGS is for C Pre-Processor flags. CCFLAGS has precedent +# in ProjectBuilder because of the .cc extension) +# +CPPFLAGS_GEN = -x c++ -fno-rtti -fno-exceptions -fcheck-new -fvtable-thunks +CPPFLAGS_PPC = +CPPFLAGS_I386 = + +CPPFLAGS = $(CPPFLAGS_GEN) \ + $($(addsuffix $(ARCH_CONFIG),CCFLAGS_)) \ + $($(addsuffix $(KERNEL_CONFIG),CCFLAGS_)) + +# +# Assembler command +# +AS = /usr/bin/cc +S_KCC = /usr/bin/cc + +# +# Default SFLAGS +# +export SFLAGS_GEN = -static -D__ASSEMBLER__ -force_cpusubtype_ALL $(OTHER_CFLAGS) + +export SFLAGS_RELEASE = +export SFLAGS_DEBUG = +export SFLAGS_PROFILE = + +export SFLAGS_PPC = $(CFLAGS_PPC) +export SFLAGS_I386 = $(CFLAGS_I386) + +export SFLAGS = $(SFLAGS_GEN) \ + $($(addsuffix $(ARCH_CONFIG),SFLAGS_)) \ + $($(addsuffix $(KERNEL_CONFIG),SFLAGS_)) \ + $(DEFINES) + +# +# Linker command +# +LD = /usr/bin/ld + +# +# Default LDFLAGS +# +export LDFLAGS_COMPONENT_GEN = -static -r + +export LDFLAGS_COMPONENT_RELEASE = +export LDFLAGS_COMPONENT_DEBUG = +export LDFLAGS_COMPONENT_PROFILE = + +export LDFLAGS_COMPONENT_PPC = -arch ppc +export LDFLAGS_COMPONENT_I386 = -arch i386 + +export LDFLAGS_COMPONENT = $(LDFLAGS_COMPONENT_GEN) \ + $($(addsuffix $(ARCH_CONFIG),LDFLAGS_COMPONENT_)) \ + $($(addsuffix $(KERNEL_CONFIG),LDFLAGS_COMPONENT_)) + +export LDFLAGS_KERNEL_GEN = -static -force_cpusubtype_ALL -segalign 0x1000 + +export LDFLAGS_KERNEL_RELEASE = +# -noseglinkedit +export LDFLAGS_KERNEL_DEBUG = +export LDFLAGS_KERNEL_PROFILE = + +export LDFLAGS_KERNEL_PPC = -arch ppc -segaddr __VECTORS 0x0 -segaddr __TEXT 0x11000 -e __start -sectalign __DATA __common 0x1000 -sectalign __DATA __bss 0x1000 +export LDFLAGS_KERNEL_I386 = -arch i386 -segaddr __TEXT 0x100000 -segaddr __LINKEDIT 0x780000 -e _pstart + +export LDFLAGS_KERNEL = $(LDFLAGS_KERNEL_GEN) \ + $($(addsuffix $(ARCH_CONFIG),LDFLAGS_KERNEL_)) \ + $($(addsuffix $(KERNEL_CONFIG),LDFLAGS_KERNEL_)) + + +# +# Default runtime libraries to be linked with the kernel +# +export LD_KERNEL_LIBS = -lcc_kext + +# +# Default INCFLAGS +# +export INCFLAGS_IMPORT = $(patsubst %, -I$(OBJROOT)/EXPORT_HDRS/%, $(COMPONENT_IMPORT_LIST)) +export INCFLAGS_EXTERN = -I$(OBJROOT)/EXTERN_HDRS -I$(SRCROOT)/EXTERNAL_HEADERS -I$(SRCROOT)/EXTERNAL_HEADERS/bsd +export INCFLAGS_GEN = -I$(SRCROOT)/$(COMPONENT) +export INCFLAGS_POSIX = -I$(OBJROOT)/EXPORT_HDRS/bsd +export INCFLAGS_LOCAL = -I. + +export INCFLAGS = -nostdinc $(INCFLAGS_LOCAL) $(INCFLAGS_GEN) $(INCFLAGS_IMPORT) $(INCFLAGS_EXTERN) $(INCFLAGS_MAKEFILE) + +# +# Default MIGFLAGS +# +export MIGFLAGS = $(DEFINES) $(INCFLAGS) $($(addsuffix $(ARCH_CONFIG),CFLAGS_)) + +# +# Default VPATH +# +empty:= +space:= $(empty) $(empty) +export VPATH_IMPORT = $(subst $(space),:,$(patsubst %,$(OBJROOT)/EXPORT_HDRS/%,$(strip $(COMPONENT_IMPORT_LIST)))): +export VPATH_EXTERN = $(OBJROOT)/EXTERN_HDRS: +export VPATH_GEN = .:$(SOURCE): + +export VPATH = $(VPATH_GEN)$(VPATH_IMPORT)$(VPATH_EXTERN)$(VPATH_MAKEFILE) + +# +# Macros that control installation of kernel and it's header files +# +# install flags for header files +# +INSTALL_FLAGS = -c -m 0444 + +# +# Header file destinations +# +FRAMEDIR = System/Library/Frameworks +ifndef INCDIR + INCDIR = /usr/include +endif +ifndef LCLDIR + LCLDIR = $(FRAMEDIR)/System.framework/Versions/B/PrivateHeaders +endif + +KINCVERS = A +KINCFRAME = $(FRAMEDIR)/Kernel.framework +KINCDIR = $(KINCFRAME)/Versions/$(KINCVERS)/Headers + +# +# Compononent Header file destinations +# +EXPDIR = EXPORT_HDRS/$(COMPONENT) + +# +# Strip Flags +# +export STRIP_FLAGS_RELEASE = -S +export STRIP_FLAGS_RELEASE_TRACE = -S +export STRIP_FLAGS_DEBUG = -S +export STRIP_FLAGS_PROFILE = -S + +export STRIP_FLAGS = $($(addsuffix $(KERNEL_CONFIG),STRIP_FLAGS_)) + +# +# This must be here before any rules are possibly defined by the +# machine dependent makefile fragment so that a plain "make" command +# always works. The config program will emit an appropriate rule to +# cause "all" to depend on every kernel configuration it generates. +# + +default: all + diff --git a/makedefs/MakeInc.dir b/makedefs/MakeInc.dir new file mode 100644 index 000000000..efa1cd788 --- /dev/null +++ b/makedefs/MakeInc.dir @@ -0,0 +1,455 @@ +# +# Install kernel header files +# +installhdrs: exporthdrs installhdrs_mi installhdrs_md + @echo "[ $(SRCROOT) ] make installhdrs installing Kernel.framework"; \ + kincpath=$(DSTROOT)/$(KINCDIR); \ + kframepath=$(DSTROOT)/$(KINCFRAME); \ + $(MKDIR) $$kincpath; \ + chmod -R +w $$kincpath; \ + for i in $(COMPONENT_LIST); do \ + if [ -d $(OBJROOT)/EXPORT_HDRS/$$i ]; then ( \ + cd $(OBJROOT)/EXPORT_HDRS/$$i; \ + pax -ruw -s '/.*CVS.*//' . $$kincpath || true ; \ + ) fi \ + done; \ + cd $(SRCROOT)/EXTERNAL_HEADERS; \ + pax -ruw -s '/.*CVS.*//' . $$kincpath || true; \ + cd $$kframepath/Versions; \ + [ -L Current ] || $(LN) $(KINCVERS) Current; \ + cd $$kframepath; \ + [ -L Headers ] || $(LN) Versions/Current/Headers Headers; \ + find $$kframepath -type f | xargs -s 32000 chmod a-w + +# +# Install header files order +# +.ORDER: installhdrs_mi installhdrs_md + +# +# Install machine independent header files +# +installhdrs_mi: SRCROOT DSTROOT OBJROOT + @echo "[ $(SOURCE) ] make installhdrs_mi "; \ + rel_path=$(shell $(RELPATH) $(SRCROOT) $(SOURCE)); \ + kernel_config=$(INSTALL_TYPE); \ + arch_config=$(INSTALL_ARCH_DEFAULT); \ + installinc_dir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + $(MKDIR) $${installinc_dir}; \ + (cd $${installinc_dir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}/ \ + build_installhdrs_mi \ + ); + +# +# Install machine dependent kernel header files +# +installhdrs_md: SRCROOT DSTROOT OBJROOT + @echo "[ $(SOURCE) ] make installhdrs_md "; \ + rel_path=$(shell $(RELPATH) $(SRCROOT) $(SOURCE)); \ + kernel_config=$(INSTALL_TYPE); \ + for arch_config in $(INSTALL_ARCHS); \ + do \ + $(MKDIR) ${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + (cd ${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}/ \ + build_installhdrs_md \ + ); \ + done; + +# +# Install machine independent kernel header files +# +do_installhdrs_mi: + +build_installhdrs_mi:: + @echo "[ $(SOURCE) ] make build_installhdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + kernel_config=$(INSTALL_TYPE); \ + arch_config=$(ARCH_CONFIG); \ + for installinc_subdir in $(INSTINC_SUBDIRS); \ + do \ + $(MKDIR) $${installinc_subdir}; \ + (cd $${installinc_subdir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=$(SOURCE)$${installinc_subdir}/Makefile \ + SOURCE=$(SOURCE)$${installinc_subdir}/ \ + TARGET=$(TARGET)$${installinc_subdir}/ \ + build_installhdrs_mi \ + ); \ + done; \ + ${MAKE} do_installhdrs_mi; + +# +# Install machine dependent kernel header files +# +do_installhdrs_md: + +build_installhdrs_md:: + @echo "[ $(SOURCE) ] make installhdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + kernel_config=$(KERNEL_CONFIG); \ + arch_config=$(ARCH_CONFIG); \ + for installinc_subdir in $($(addprefix INSTINC_SUBDIRS_, $(ARCH_CONFIG))); \ + do \ + $(MKDIR) $${installinc_subdir}; \ + (cd $${installinc_subdir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=$(SOURCE)$${installinc_subdir}/Makefile \ + SOURCE=$(SOURCE)$${installinc_subdir}/ \ + TARGET=$(TARGET)$${installinc_subdir}/ \ + build_installhdrs_md \ + ); \ + done; \ + ${MAKE} do_installhdrs_md; + +# +# Install kernel header files +# +exporthdrs: exporthdrs_mi exporthdrs_md + +# +# Install header files order +# +.ORDER: exporthdrs_mi exporthdrs_md + +# +# Install machine independent header files +# +do_exporthdrs_mi: + +exporthdrs_mi: SRCROOT DSTROOT OBJROOT + @echo "[ ${SOURCE} ] make exporthdrs_mi "; \ + rel_path=$(shell $(RELPATH) $(SRCROOT) $(SOURCE)); \ + kernel_config=$(INSTALL_TYPE); \ + arch_config=$(INSTALL_ARCH_DEFAULT); \ + exportinc_dir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + $(MKDIR) $${exportinc_dir}; \ + (cd $${exportinc_dir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=$${exportinc_dir}/ \ + build_exporthdrs_mi \ + ); + +# +# Install machine dependent kernel header files +# +exporthdrs_md: SRCROOT DSTROOT OBJROOT + @echo "[ $(SOURCE) ] make exporthdrs_md "; \ + rel_path=$(shell $(RELPATH) $(SRCROOT) $(SOURCE)); \ + kernel_config=$(INSTALL_TYPE); \ + for arch_config in $(ARCH_CONFIGS); \ + do \ + exportinc_dir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + $(MKDIR) $${exportinc_dir}; \ + (cd $${exportinc_dir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=$${exportinc_dir}/ \ + build_exporthdrs_md \ + ); \ + done; + +# +# Install machine independent kernel header files +# +do_exporthdrs_mi: + +build_exporthdrs_mi: + @echo "[ $(SOURCE) ] make build_exporthdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + kernel_config=$(INSTALL_TYPE); \ + arch_config=$(ARCH_CONFIG); \ + for exportinc_subdir in $(EXPINC_SUBDIRS); \ + do \ + $(MKDIR) $${exportinc_subdir}; \ + (cd $${exportinc_subdir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=$(SOURCE)$${exportinc_subdir}/Makefile \ + SOURCE=$(SOURCE)$${exportinc_subdir}/ \ + TARGET=$(TARGET)$${exportinc_subdir}/ \ + build_exporthdrs_mi \ + ); \ + done; \ + ${MAKE} do_exporthdrs_mi; + +# +# Install machine dependent kernel header files +# +do_exporthdrs_md: + +build_exporthdrs_md: + @echo "[ $(SOURCE) ] make exporthdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + kernel_config=$(KERNEL_CONFIG); \ + arch_config=$(ARCH_CONFIG); \ + for exportinc_subdir in $($(addprefix EXPINC_SUBDIRS_, $(ARCH_CONFIG))); \ + do \ + $(MKDIR) $${exportinc_subdir}; \ + (cd $${exportinc_subdir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=$(SOURCE)$${exportinc_subdir}/Makefile \ + SOURCE=$(SOURCE)$${exportinc_subdir}/ \ + TARGET=$(TARGET)$${exportinc_subdir}/ \ + build_exporthdrs_md \ + ); \ + done; \ + ${MAKE} do_exporthdrs_md; + +# +# Setup pass for all architectures for all Configuration/Architecture options +# +setup: SRCROOT DSTROOT OBJROOT + @echo "[ $(SOURCE) ] make setup"; \ + rel_path=$(shell $(RELPATH) $(SRCROOT) $(SOURCE)); \ + for kernel_config in $(KERNEL_CONFIGS); \ + do \ + for arch_config in $(ARCH_CONFIGS); \ + do \ + setup_subdir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + $(MKDIR) $${setup_subdir}; \ + (cd $${setup_subdir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=$${setup_subdir}/ \ + build_setup \ + ); \ + done; \ + done; + +do_build_setup: + +build_setup: + @echo "[ $(SOURCE) ] make build_setup $(COMPONENT) $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + kernel_config=$(KERNEL_CONFIG); \ + arch_config=$(ARCH_CONFIG); \ + for setup_subdir in $(SETUP_SUBDIRS) $($(addprefix SETUP_SUBDIRS_, $(ARCH_CONFIG))); \ + do \ + $(MKDIR) $${setup_subdir}; \ + (cd $${setup_subdir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/$${setup_subdir}/Makefile \ + SOURCE=${SOURCE}/$${setup_subdir}/ \ + TARGET=${TARGET}/$${setup_subdir}/ \ + build_setup \ + ); \ + done; \ + ${MAKE} do_build_setup; + + +# +# Build all architectures for all Configuration/Architecture options +# +all: SRCROOT DSTROOT OBJROOT + @echo "[ $(SOURCE) ] make all"; \ + rel_path=$(shell $(RELPATH) $(SRCROOT) $(SOURCE)); \ + for kernel_config in $(KERNEL_CONFIGS); \ + do \ + for arch_config in $(ARCH_CONFIGS); \ + do \ + build_subdir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + $(MKDIR) $${build_subdir}; \ + (cd $${build_subdir}; \ + new_component=$(firstword $(subst /, ,$(subst $(shell $(RELPATH) -d / $(SRCROOT) $(SRCROOT)),,$(addprefix $(shell $(RELPATH) -d / $(SOURCE) $(SOURCE))/,$${rel_path}))));\ + target_subdir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${new_component}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=$${target_subdir}/ \ + build_all \ + ); \ + done; \ + done; + +# +# Build all architectures for all Configuration/Architecture options +# +do_build_all: + +build_all: + @echo "[ $(SOURCE) ] make build_all $(COMPONENT) $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + kernel_config=$(KERNEL_CONFIG); \ + arch_config=$(ARCH_CONFIG); \ + for comp_subdir in $(COMP_SUBDIRS) $($(addprefix COMP_SUBDIRS_, $(ARCH_CONFIG))); \ + do \ + $(MKDIR) $${comp_subdir}; \ + (cd $${comp_subdir}; \ + new_component=$(firstword $(subst /, ,$(subst $(shell $(RELPATH) -d / $(SRCROOT) $(SRCROOT)),,$(addprefix $(shell $(RELPATH) -d / $(SOURCE) $(SOURCE))/, $${comp_subdir}))));\ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/$${comp_subdir}/Makefile \ + SOURCE=${SOURCE}$${comp_subdir}/ \ + TARGET=${OBJROOT}/$${kernel_config}_$${arch_config}/$${new_component} \ + build_all \ + ); \ + done; \ + ${MAKE} do_build_all; + +# +# Build all architectures for all Configuration/Architecture options +# +mach_kernel: SRCROOT DSTROOT OBJROOT + @echo "[ $(SOURCE) ] make mach_kernel"; \ + for kernel_config in $(KERNEL_CONFIGS); \ + do \ + for arch_config in $(ARCH_CONFIGS); \ + do \ + build_subdir=${OBJROOT}/$${kernel_config}_$${arch_config}; \ + $(MKDIR) $${build_subdir}; \ + (cd $${build_subdir}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=$${build_subdir}/ \ + build_mach_kernel \ + ); \ + done; \ + done; + +# +# Build all architectures for all Configuration/Architecture options +# +do_build_mach_kernel: + +build_mach_kernel: + @echo "[ $(SOURCE) ] make build_mach_kernel $(COMPONENT) $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + ${MAKE} do_build_mach_kernel; + +# +# +# Install dependencies order +# +.ORDER: SRCROOT DSTROOT OBJROOT SYMROOT installhdrs exporthdrs all + +# +# Install kernel based on RC_ARCHS for all INSTALL_TYPES +# Install kernel header files based on RC_ARCHS +# +install: SRCROOT DSTROOT OBJROOT SYMROOT installhdrs all + @echo "[ $(SOURCE) ] make install"; \ + rel_path=$(shell $(RELPATH) $(SRCROOT) $(SOURCE)); \ + for kernel_config in $(INSTALL_TYPE); \ + do \ + for arch_config in $(INSTALL_ARCHS); \ + do \ + install_subdir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${rel_path}; \ + $(MKDIR) $${install_subdir}; \ + (cd $${install_subdir}; \ + new_component=$(firstword $(subst /, ,$(subst $(shell $(RELPATH) -d / $(SRCROOT) $(SRCROOT)),,$(addprefix $(shell $(RELPATH) -d / $(SOURCE) $(SOURCE))/,$${rel_path}))));\ + target_subdir=${OBJROOT}/$${kernel_config}_$${arch_config}/$${new_component}; \ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/Makefile \ + SOURCE=${SOURCE}/ \ + TARGET=$${target_subdir}/ \ + build_install \ + ); \ + done; \ + done; + +# +# Install for all architectures for all Configuration/Architecture options +# +setup_build_install: + +do_build_install: + +build_install: + @echo "[ $(SOURCE) ] make build_install $(COMPONENT) $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + ${MAKE} setup_build_install; \ + kernel_config=$(KERNEL_CONFIG); \ + arch_config=$(ARCH_CONFIG); \ + for install_subdir in $(INST_SUBDIRS); \ + do \ + $(MKDIR) $${install_subdir}; \ + (cd $${install_subdir}; \ + new_component=$(firstword $(subst /, ,$(subst $(shell $(RELPATH) -d / $(SRCROOT) $(SRCROOT)),,$(addprefix $(shell $(RELPATH) -d / $(SOURCE) $(SOURCE))/, $${comp_subdir}))));\ + ${MAKE} KERNEL_CONFIG=$${kernel_config} \ + ARCH_CONFIG=$${arch_config} \ + MAKEFILES=${SOURCE}/$${install_subdir}/Makefile \ + SOURCE=${SOURCE}$${install_subdir}/ \ + TARGET=${OBJROOT}/$${kernel_config}_$${arch_config}/$${new_component} \ + build_install \ + ); \ + done; \ + ${MAKE} do_build_install; + + +# +# Install source tree +# +installsrc: SRCROOT + pax -rw . ${SRCROOT} + + + +# +# Clean up source tree +# +clean: SRCROOT + +# +# Build source file list for cscope database and tags +# +cscope.files: + @echo "Building file list for cscope and tags" + @find . -name '*.h' -type f > _cscope.files 2> /dev/null + @echo bsd/ufs/ufs/ufs_readwrite.c >> _cscope.files 2> /dev/null + @for i in `echo ${ALL_SUBDIRS}`; \ + do \ + cat ${SRCROOT}/$${i}/conf/files ${SRCROOT}/$${i}/conf/files.ppc; \ + cat ${SRCROOT}/$${i}/conf/files.i386; \ + done | \ + sed -e '/^#/d' -e '/^[ ]*$$/d' -e '/^OPTIONS\//d' | \ + sed -e '1,$$s/^\([^ ]*\)[ ].*$$/\1/' >> _cscope.files 2> /dev/null + @sort < _cscope.files > cscope.files 2> /dev/null + @rm -f _cscope.files 2> /dev/null + +# +# Build cscope database +# +cscope: cscope.files + @echo "Building cscope database" + @cscope -b 2> /dev/null + +# +# Build tags +# + +tags: cscope.files + @echo "Building ctags" + @-xargs ctags -dtw < cscope.files 2> /dev/null || \ + echo "Phantom files detected!" 2>&1 > /dev/null + @echo "Building etags" + @-cat cscope.files | etags -l auto -S - 2> /dev/null + + +SRCROOT DSTROOT OBJROOT SYMROOT: ALWAYS + @if [ -n "${$@}" ]; \ + then \ + exit 0; \ + else \ + echo Must define $@; \ + exit 1; \ + fi + +ALWAYS: + diff --git a/makedefs/MakeInc.rule b/makedefs/MakeInc.rule new file mode 100644 index 000000000..ed02f4b6d --- /dev/null +++ b/makedefs/MakeInc.rule @@ -0,0 +1,490 @@ +# +# Generic Install rules +# + +# +# gvdl: Let's optionally have a different list of local installs and +# regular installs. +# +ifndef INSTALL_MI_LCL_LIST + INSTALL_MI_LCL_LIST = $(INSTALL_MI_LIST) +endif + +ifndef INSTALL_MI_LCL_GEN_LIST + INSTALL_MI_LCL_GEN_LIST = $(INSTALL_MI_GEN_LIST) +endif + +ifndef INSTALL_MD_LCL_LIST + INSTALL_MD_LCL_LIST = $(INSTALL_MD_LIST) +endif + +ifndef INSTALL_MD_LCL_GEN_LIST + INSTALL_MD_LCL_GEN_LIST = $(INSTALL_MD_GEN_LIST) +endif + +ifeq ($(INCR_INSTALLHDRS), TRUE) + +INSTALL_MI_INC_FILES = $(addprefix $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR)/, $(INSTALL_MI_LIST)) +INSTALL_MI_GEN_INC_FILES = $(addprefix $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR)/, $(INSTALL_MI_GEN_LIST)) +INSTALL_MI_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/, $(INSTALL_MI_LCL_LIST)) +INSTALL_MI_GEN_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/, $(INSTALL_MI_LCL_GEN_LIST)) + +$(INSTALL_MI_INC_FILES) $(INSTALL_MI_GEN_INC_FILES): $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR); \ + filename=`$(BASENAME) $<`; \ + filename_strip=$(addsuffix .strip,$${filename}); \ + $(RM) $(RMFLAGS) $@; \ + $(MKDIR) ./incdir; \ + echo garbage > ./incdir/$${filename_strip}; \ + $(UNIFDEF) -UKERNEL_PRIVATE -UDRIVER_PRIVATE \ + $< > ./incdir/$${filename} || \ + $(DECOMMENT) ./incdir/$${filename} r > \ + ./incdir/$${filename_strip}; \ + if [ -s ./incdir/$${filename_strip} ]; \ + then ( \ + install $(INSTALL_FLAGS) ./incdir/$${filename} $(dir $@);\ + ); \ + else \ + echo Header file $< not exported; \ + fi; \ + $(RM) -rf ./incdir; + +$(INSTALL_MI_LCL_FILES) $(INSTALL_MI_GEN_LCL_FILES): $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR); \ + $(RM) $(RMFLAGS) $@; \ + install $(INSTALL_FLAGS) $< $(dir $@); + +INSTALL_MD_INC_FILES = $(addprefix $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR)/, $(INSTALL_MD_LIST)) +INSTALL_MD_GEN_INC_FILES = $(addprefix $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR)/, $(INSTALL_MD_GEN_LIST)) +INSTALL_MD_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/, $(INSTALL_MD_LCL_LIST)) +INSTALL_MD_GEN_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/, $(INSTALL_MD_LCL_GEN_LIST)) + +$(INSTALL_MD_INC_FILES) $(INSTALL_MD_GEN_INC_FILES): $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR); \ + filename=`$(BASENAME) $<`; \ + filename_strip=$(addsuffix .strip,$${filename}); \ + $(RM) $(RMFLAGS) $@; \ + $(MKDIR) ./incdir; \ + echo garbage > ./incdir/$${filename_strip}; \ + $(UNIFDEF) -UKERNEL_PRIVATE -UDRIVER_PRIVATE \ + $< > ./incdir/$${filename} || \ + $(DECOMMENT) ./incdir/$${filename} r > \ + ./incdir/$${filename_strip}; \ + if [ -s ./incdir/$${filename_strip} ]; \ + then ( \ + install $(INSTALL_FLAGS) ./incdir/$${filename} $(dir $@);\ + ); \ + else \ + echo Header file $< not exported; \ + fi; \ + $(RM) -rf ./incdir; + +$(INSTALL_MD_LCL_FILES) $(INSTALL_MD_GEN_LCL_FILES): $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR); \ + $(RM) $(RMFLAGS) $@; \ + install $(INSTALL_FLAGS) $< $(dir $@); + +setup_installhdrs_mi: + @echo "[ $(SOURCE) ] make setup_installhdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_installhdrs_mi: $(INSTALL_MI_INC_FILES) $(INSTALL_MI_GEN_INC_FILES) $(INSTALL_MI_LCL_FILES) $(INSTALL_MI_GEN_LCL_FILES) + @echo "[ $(SOURCE) ] make do_installhdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_installhdrs_md: + @echo "[ $(SOURCE) ] make setup_installhdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_installhdrs_md: $(INSTALL_MD_INC_FILES) $(INSTALL_MD_GEN_INC_FILES) $(INSTALL_MD_LCL_FILES) $(INSTALL_MD_GEN_LCL_FILES) + @echo "[ $(SOURCE) ] make do_installhdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +else + +INSTALL_MI_INC_FILES = $(addprefix $(SOURCE), $(INSTALL_MI_LIST)) +INSTALL_MI_GEN_INC_FILES = $(addprefix $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR)/, $(INSTALL_MI_GEN_LIST)) + +INSTALL_MI_LCL_FILES = $(addprefix $(SOURCE), $(INSTALL_MI_LCL_LIST)) +INSTALL_MI_GEN_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/, $(INSTALL_MI_LCL_GEN_LIST)) + + +$(INSTALL_MI_GEN_INC_FILES): $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR); \ + filename=`$(BASENAME) $<`; \ + filename_strip=$(addsuffix .strip,$${filename}); \ + $(RM) $(RMFLAGS) $@; \ + $(MKDIR) ./incdir; \ + echo garbage > ./incdir/$${filename_strip}; \ + $(UNIFDEF) -UKERNEL_PRIVATE -UDRIVER_PRIVATE \ + $< > ./incdir/$${filename} || \ + $(DECOMMENT) ./incdir/$${filename} r > \ + ./incdir/$${filename_strip}; \ + if [ -s ./incdir/$${filename_strip} ]; \ + then ( \ + install $(INSTALL_FLAGS) ./incdir/$${filename} $(dir $@);\ + ); \ + else \ + echo Header file $< not exported; \ + fi; \ + $(RM) -rf ./incdir; + +$(INSTALL_MI_GEN_LCL_FILES): $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR); \ + $(RM) $(RMFLAGS) $@; \ + install $(INSTALL_FLAGS) $< $(dir $@); + +INSTALL_MD_INC_FILES = $(addprefix $(SOURCE), $(INSTALL_MD_LIST)) +INSTALL_MD_GEN_INC_FILES = $(addprefix $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR)/, $(INSTALL_MD_GEN_LIST)) + +INSTALL_MD_LCL_FILES = $(addprefix $(SOURCE), $(INSTALL_MD_LCL_LIST)) +INSTALL_MD_GEN_LCL_FILES = $(addprefix $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/, $(INSTALL_MD_LCL_GEN_LIST)) + +$(INSTALL_MD_GEN_INC_FILES): $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR); \ + filename=`$(BASENAME) $<`; \ + filename_strip=$(addsuffix .strip,$${filename}); \ + $(RM) $(RMFLAGS) $@; \ + $(MKDIR) ./incdir; \ + echo garbage > ./incdir/$${filename_strip}; \ + $(UNIFDEF) -UKERNEL_PRIVATE -UDRIVER_PRIVATE \ + $< > ./incdir/$${filename} || \ + $(DECOMMENT) ./incdir/$${filename} r > \ + ./incdir/$${filename_strip}; \ + if [ -s ./incdir/$${filename_strip} ]; \ + then ( \ + install $(INSTALL_FLAGS) ./incdir/$${filename} $(dir $@);\ + ); \ + else \ + echo Header file $< not exported; \ + fi; \ + $(RM) -rf /incdir; + +$(INSTALL_MD_GEN_LCL_FILES): $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR)/% : % + @true echo Installing $< in $(dir $@); \ + $(MKDIR) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR); \ + $(RM) $(RMFLAGS) $@; \ + install $(INSTALL_FLAGS) $< $(dir $@); + +setup_installhdrs_mi: + @echo "[ $(SOURCE) ] make setup_installhdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_installhdrs_mi: $(INSTALL_MI_GEN_INC_FILES) $(INSTALL_MI_GEN_LCL_FILES) + @echo "[ $(SOURCE) ] make do_installhdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + @if [ -n "$(strip $(INSTALL_MI_LIST))" ]; then \ + if [ -d $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR) ]; then \ + (cd $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR);$(RM) $(RMFLAGS) $(INSTALL_MI_LIST) ); \ + else \ + $(MKDIR) $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR); \ + fi; \ + $(MKDIR) ./incdir; \ + for j in $(INSTALL_MI_LIST); \ + do \ + echo garbage > ./incdir/$$j.strip; \ + $(UNIFDEF) -UKERNEL_PRIVATE -UDRIVER_PRIVATE \ + $(SOURCE)/$$j > ./incdir/$$j || \ + $(DECOMMENT) ./incdir/$$j r > \ + ./incdir/$$j.strip; \ + if [ -s ./incdir/$$j.strip ]; \ + then ( \ + install $(INSTALL_FLAGS) ./incdir/$$j $(DSTROOT)/$(INCDIR)/$(INSTALL_MI_DIR); \ + ); \ + else \ + echo Header file $$j not exported; \ + fi; \ + done; \ + $(RM) -rf ./incdir; \ + fi + @if [ -n "$(strip $(INSTALL_MI_LCL_LIST))" ]; then \ + if [ -d $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR) ]; then \ + (cd $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR);$(RM) $(RMFLAGS) $(INSTALL_MI_LCL_LIST) ); \ + else \ + $(MKDIR) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR); \ + fi; \ + install $(INSTALL_FLAGS) $(INSTALL_MI_LCL_FILES) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MI_DIR); \ + fi + +setup_installhdrs_md: + @echo "[ $(SOURCE) ] make setup_installhdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_installhdrs_md: $(INSTALL_MD_GEN_INC_FILES) $(INSTALL_MD_GEN_LCL_FILES) + @echo "[ $(SOURCE) ] make do_installhdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + @if [ -n "$(strip $(INSTALL_MD_LIST))" ]; then \ + if [ -d $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR) ]; then \ + (cd $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR);$(RM) $(RMFLAGS) $(INSTALL_MD_LIST) ); \ + else \ + $(MKDIR) $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR); \ + fi; \ + $(MKDIR) ./incdir; \ + for j in $(INSTALL_MD_LIST); \ + do \ + echo garbage > ./incdir/$$j.strip; \ + $(UNIFDEF) -UKERNEL_PRIVATE -UDRIVER_PRIVATE \ + $(SOURCE)/$$j > ./incdir/$$j || \ + $(DECOMMENT) ./incdir/$$j r > \ + ./incdir/$$j.strip; \ + if [ -s ./incdir/$$j.strip ]; \ + then ( \ + install $(INSTALL_FLAGS) ./incdir/$$j $(DSTROOT)/$(INCDIR)/$(INSTALL_MD_DIR); \ + ); \ + else \ + echo Header file $$j not exported; \ + fi; \ + done; \ + $(RM) -rf ./incdir; \ + fi + @if [ -n "$(strip $(INSTALL_MD_LCL_LIST))" ]; then \ + if [ -d $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR) ]; then \ + (cd $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR);$(RM) $(RMFLAGS) $(INSTALL_MD_LCL_LIST) ); \ + else \ + $(MKDIR) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR); \ + fi; \ + install $(INSTALL_FLAGS) $(INSTALL_MD_LCL_FILES) $(DSTROOT)/$(LCLDIR)/$(INSTALL_MD_DIR); \ + fi + + +endif + +# +# Generic Export rules +# +ifeq ($(INCR_EXPORTHDRS), TRUE) + +EXPORT_MI_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR)/, $(EXPORT_MI_LIST)) +EXPORT_MI_GEN_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR)/, $(EXPORT_MI_GEN_LIST)) + +$(EXPORT_MI_INC_FILES) $(EXPORT_MI_GEN_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR)/% : % + @true echo Exporting $< in $(dir $@); \ + $(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR); \ + ${CP} -p $< $(dir $@); \ + + +EXPORT_MD_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/, $(EXPORT_MD_LIST)) +EXPORT_MD_GEN_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/, $(EXPORT_MD_GEN_LIST)) + +$(EXPORT_MD_INC_FILES) $(EXPORT_MD_GEN_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/% : % + @true echo Exporting $< in $(dir $@); \ + $(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR); \ + ${CP} -p $< $(dir $@); \ + +setup_exporthdrs_mi: + @echo "[ $(SOURCE) ] make setup_exporthdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_exporthdrs_mi: $(EXPORT_MI_INC_FILES) $(EXPORT_MI_GEN_INC_FILES) + @echo "[ $(SOURCE) ] make do_exporthdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_exporthdrs_md: + @echo "[ $(SOURCE) ] make setup_exporthdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_exporthdrs_md: $(EXPORT_MD_INC_FILES) $(EXPORT_MD_GEN_INC_FILES) + @echo "[ $(SOURCE) ] make do_exporthdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +else + +EXPORT_MI_INC_FILES = $(addprefix $(SOURCE), $(EXPORT_MI_LIST)) +EXPORT_MI_GEN_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR)/, $(EXPORT_MI_GEN_LIST)) + +$(EXPORT_MI_GEN_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR)/% : % + @true echo Exporting $< in $(dir $@); \ + $(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR); \ + ${CP} -p $< $(dir $@); \ + + +EXPORT_MD_INC_FILES = $(addprefix $(SOURCE), $(EXPORT_MD_LIST)) +EXPORT_MD_GEN_INC_FILES = $(addprefix $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/, $(EXPORT_MD_GEN_LIST)) + +$(EXPORT_MD_GEN_INC_FILES): $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR)/% : % + @true echo Exporting $< in $(dir $@); \ + $(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR); \ + ${CP} -p $< $(dir $@); \ + +setup_exporthdrs_mi: + @echo "[ $(SOURCE) ] make setup_exporthdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_exporthdrs_mi: $(EXPORT_MI_GEN_INC_FILES) + @echo "[ $(SOURCE) ] make do_exporthdrs_mi $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + @if [ -n "$(strip $(EXPORT_MI_LIST))" ]; then \ + if [ -d $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR) ]; then \ + (cd $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR);$(RM) $(RMFLAGS) $(EXPORT_MI_LIST) ); \ + else \ + $(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR); \ + fi; \ + ${CP} -p $(EXPORT_MI_INC_FILES) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MI_DIR); \ + fi + +setup_exporthdrs_md: + @echo "[ $(SOURCE) ] make setup_exporthdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_exporthdrs_md: $(EXPORT_MD_GEN_INC_FILES) + @echo "[ $(SOURCE) ] make do_exporthdrs_md $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + @if [ -n "$(strip $(EXPORT_MD_LIST))" ]; then \ + if [ -d $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR) ]; then \ + (cd $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR);$(RM) $(RMFLAGS) $(EXPORT_MD_LIST) ); \ + else \ + $(MKDIR) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR); \ + fi; \ + ${CP} -p $(EXPORT_MD_INC_FILES) $(OBJROOT)/$(EXPDIR)/$(EXPORT_MD_DIR); \ + fi + + +# +endif +# +# Generic Compilation rules +# + +# +# Compilation rules to generate .o from .s +# +COMP_SOBJ_FILES = $(addprefix $(TARGET)$(COMP_OBJ_DIR), $(COMP_SOBJ_LIST)) + +$(COMP_SOBJ_FILES): $(TARGET)$(COMP_OBJ_DIR)%.o : %.s + ${S_KCC} -E -MD ${SFLAGS} -DASSEMBLER $(INCFLAGS) $< > $(patsubst %.o, %.pp, ${@}); + sed '/^\#/d' $(patsubst %.o, %.pp, ${@}) > $(patsubst %.o, %.s, ${@}); + ${S_KCC} ${SFLAGS} -m${arch_config} ${_HOST_AS_FLAGS} -c $(patsubst %.o, %.s, ${@}); + ${RM} ${_RMFLAGS_} $(patsubst %.o, %.pp, ${@}) $(patsubst %.o,%.s,${@}); + +S_RULE_1A=@ls / +S_RULE_1B= ${patsubst %.o,%.s,${@}} > /dev/null +S_RULE_2= ${S_KCC} -E -MD ${SFLAGS} -DASSEMBLER $(INCFLAGS) $< \ + > $(patsubst %.o, %.pp, ${@}); \ + sed '/^\#/d' $(patsubst %.o, %.pp, ${@}) > $(patsubst %.o, %.s, ${@}); +S_RULE_3= ${S_KCC} ${SFLAGS} -m${arch_config} ${_HOST_AS_FLAGS} -c $(patsubst %.o, %.s, ${@});\ + ${RM} ${_RMFLAGS_} $(patsubst %.o, %.pp, ${@}) $(patsubst %.o,%.s,${@}) + +# +# Compilation rules to generate .o from .c +# +COMP_COBJ_FILES = $(addprefix $(TARGET)$(COMP_OBJ_DIR), $(COMP_COBJ_LIST)) + +$(COMP_COBJ_FILES): $(TARGET)$(COMP_OBJ_DIR)%.o : %.c + ${KCC} -c ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} $< + +# +# Compilation rules to generate .o from .c for normal files +# +C_RULE_1A=${KCC} -c ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} +C_RULE_1B=$*.c +C_RULE_2= +C_RULE_3= +C_RULE_4= + +# +# Compilation rules to generate .o from .c for driver files +# +C_RULE_1A_D=${C_RULE_1A} +C_RULE_1B_D=${C_RULE_1B} +C_RULE_2_D=${C_RULE_2} +C_RULE_3_D=${C_RULE_3} +C_RULE_4_D=${C_RULE_4} + +# +# Compilation rules to generate .o from .m +# +M_RULE_1A=${KCC} -c ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} +M_RULE_1B=$*.m +M_RULE_2= +M_RULE_3= +M_RULE_4= + +# +# Compilation rules to generate .co from .cp or .cpo from .cpp +# The config tool slickly changes the last source filename char to 'o' +# for the object filename. +# +P_RULE_1A=${KCC} -o $@ -c ${CPPFLAGS} ${CFLAGS} -MD ${${join $@,_CFLAGS}} ${INCFLAGS} ${${join $@,_INCFLAGS}} +P_RULE_1B=$( $(@:.cpo=.d~) && mv $(@:.cpo=.d~) $(@:.cpo=.d) +P_RULE_3= +P_RULE_4= + +# +# Linker rule to generate a component +# +LD_COMPONENT_OBJ_FILES = $(addprefix $(TARGET)$(COMP_OBJ_DIR), $(LD_COMPONENT_OBJ_LIST)) + +COMPONENT_IMAGE_FILE = $(addprefix $(TARGET), $(COMPONENT_IMAGE)) + +$(COMPONENT_IMAGE_FILE): $(LD_COMPONENT_OBJ_FILES) + @echo "[ creating $(COMPONENT_IMAGE) ]" + $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT_IMAGE_FILE) ${LD_COMPONENT_OBJ_FILES}; + (cd $(TARGET)$(COMP_OBJ_DIR); ${MD} -u Makedep -f -d `ls *.d`); + +setup_build_all: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_all: $(COMP_FILES) $(COMP_COBJ_FILES) $(COMP_SOBJ_FILES) $(COMPONENT_IMAGE_FILE) + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +ifeq ($(shell $(RELPATH) $(SRCROOT) $(SOURCE)), .) +do_build_all: do_build_mach_kernel +endif + +# +# mach_kernel building rules +# +MK_COMPONENT_OBJ_FILES = $(addprefix $(TARGET),$(foreach component,$(COMPONENT_LIST), $(addprefix $(component)/$(firstword $($(addsuffix _KERNEL_CONFIG, $(shell echo -n $(component) | tr a-z A-Z))) $(KERNEL_CONFIG))/, $(addsuffix .o, $(component))))) + +do_build_mach_kernel: + @echo "[ building mach_kernel ]" + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/kernel_newvers \ + "`${CAT} $(SRCROOT)/osfmk/conf/kernelversion.major`" \ + "`${CAT} $(SRCROOT)/osfmk/conf/kernelversion.minor`" \ + "`${CAT} $(SRCROOT)/osfmk/conf/kernelversion.variant`" + ${KCC} $(CFLAGS) $(INCLUDES) -c kernel_vers.c + $(LD) $(LDFLAGS_KERNEL) $(MK_COMPONENT_OBJ_FILES) kernel_vers.o -o $(TARGET)mach_kernel.sys $(LD_KERNEL_LIBS) + $(STRIP) $(STRIP_FLAGS) $(TARGET)mach_kernel.sys -o $(TARGET)mach_kernel +# +# Generic Install rules +# +INSTALL_FILE_FILES = $(addprefix $(DSTROOT)$(INSTALL_FILE_DIR), $(INSTALL_FILE_LIST)) + +force_file_install: + +$(INSTALL_FILE_FILES): $(DSTROOT)$(INSTALL_FILE_DIR)% : $(TARGET)% force_file_install + @echo Installing $< in $@; + @$(MKDIR) $(DSTROOT)$(INSTALL_FILE_DIR); \ + if [ "`echo $(INSTALL_ARCHS) | wc -w`" -eq 1 ]; then \ + $(RM) $(RMFLAGS) $@; \ + install $(INSTALL_FLAGS) $< $(dir $@); \ + else \ + if [ ! -e $@ ]; then \ + echo >empty_file; \ + lipo_arg="$(subst _empty_file, empty_file,$(foreach lipo_arch,$(shell echo -n $(INSTALL_ARCHS) | tr A-Z a-z), $(addprefix -arch , $(addsuffix _empty_file, $(lipo_arch)))))"; \ + $(LIPO) $${lipo_arg} -create -output $@; \ + $(RM) $(RMFLAGS) empty_file; \ + fi; \ + $(LIPO) $@ -replace $(shell echo -n $(ARCH_CONFIG) | tr A-Z a-z) $< -o $@; \ + fi + +INSTALL_FILESYS_FILES = $(addprefix $(SYMROOT)$(INSTALL_FILE_DIR), $(INSTALL_FILE_LIST)) + +force_filesys_install: + +$(INSTALL_FILESYS_FILES): $(SYMROOT)$(INSTALL_FILE_DIR)% : $(TARGET)%.sys force_filesys_install + @echo Installing $< in $@; + @$(MKDIR) $(SYMROOT)$(INSTALL_FILE_DIR); \ + if [ "`echo $(INSTALL_ARCHS) | wc -w`" -eq 1 ]; then \ + $(RM) $(RMFLAGS) $@; \ + install $(INSTALL_FLAGS) $< $(dir $@); \ + else \ + if [ ! -e $@ ]; then \ + echo >empty_file; \ + lipo_arg="$(subst _empty_file, empty_file,$(foreach lipo_arch,$(shell echo -n $(INSTALL_ARCHS) | tr A-Z a-z), $(addprefix -arch , $(addsuffix _empty_file, $(lipo_arch)))))"; \ + $(LIPO) $${lipo_arg} -create -output $@; \ + $(RM) $(RMFLAGS) empty_file; \ + fi; \ + $(LIPO) $@ -replace $(shell echo -n $(ARCH_CONFIG) | tr A-Z a-z) $< -o $@; \ + fi + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_install $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: $(INSTALL_FILESYS_FILES) $(INSTALL_FILE_FILES) + @echo "[ $(SOURCE) ] make do_build_install $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +-include Makedep + diff --git a/osfmk/.gdbinit b/osfmk/.gdbinit new file mode 100644 index 000000000..144e2b894 --- /dev/null +++ b/osfmk/.gdbinit @@ -0,0 +1,1152 @@ +# +# Kernel gdb macros +# +# These gdb macros should be useful during kernel development in +# determining what's going on in the kernel. +# +# All the convenience variables used by these macros begin with $kgm_ + +set $kgm_vers = 2 + +echo Loading Kernel GDB Macros package. Type "help kgm" for more info.\n + +define kgm +printf "These are the kernel gdb macros version %d. ", $kgm_vers +echo Type "help kgm" for more info.\n +end + +document kgm +| These are the kernel gdb macros. These gdb macros are intended to be +| used when debugging a remote kernel via the kdp protocol. Typically, you +| would connect to your remote target like so: +| (gdb) target remote-kdp +| (gdb) attach +| +| The following macros are available in this package: +| +| showalltasks Display a summary listing of tasks +| showallacts Display a summary listing of all activations +| showallstacks Display the kernel stacks for all activations +| showallvm Display a summary listing of all the vm maps +| showallvme Display a summary listing of all the vm map entries +| showallipc Display a summary listing of all the ipc spaces +| showallrights Display a summary listing of all the ipc rights +| showallkmods Display a summary listing of all the kernel modules +| +| showtask Display status of the specified task +| showtaskacts Display the status of all activations in the task +| showtaskstacks Display all kernel stacks for all activations in the task +| showtaskvm Display status of the specified task's vm_map +| showtaskvme Display a summary list of the task's vm_map entries +| showtaskipc Display status of the specified task's ipc space +| showtaskrights Display a summary list of the task's ipc space entries +| +| showact Display status of the specified thread activation +| showactstack Display the kernel stack for the specified activation +| +| showmap Display the status of the specified vm_map +| showmapvme Display a summary list of the specified vm_map's entries +| +| showipc Display the status of the specified ipc space +| showrights Display a summary list of all the rights in an ipc space +| +| showpid Display the status of the process identified by pid +| showproc Display the status of the process identified by a proc pointer +| +| showkmod Display information about a kernel module +| showkmodaddr Given an address, display the kernel module and offset +| +| zprint Display zone information +| +| Type "help " for more specific help on a particular macro. +| Type "show user " to see what the macro is really doing. +end + + +define showkmodheader + printf "kmod address size " + printf "id refs version name\n" +end + +define showkmodint + set $kgm_kmodp = (struct kmod_info *)$arg0 + printf "0x%08x ", $arg0 + printf "0x%08x ", $kgm_kmodp->address + printf "0x%08x ", $kgm_kmodp->size + printf "%3d ", $kgm_kmodp->id + printf "%5d ", $kgm_kmodp->reference_count + printf "%10s ", &$kgm_kmodp->version + printf "%s\n", &$kgm_kmodp->name +end + +set $kgm_kmodmin = 0xffffffff +set $kgm_fkmodmin = 0x00000000 +set $kgm_kmodmax = 0x00000000 +set $kgm_fkmodmax = 0xffffffff +set $kgm_pkmod = 0 +set $kgm_pkmodst = 0 +set $kgm_pkmoden = 0 +define showkmodaddr + printf "0x%x" , $arg0 + if ((unsigned int)$arg0 >= (unsigned int)$kgm_pkmodst) && ((unsigned int)$arg0 <= (unsigned int)$kgm_pkmoden) + set $kgm_off = ((unsigned int)$arg0 - (unsigned int)$kgm_pkmodst) + printf " <%s + 0x%x>", $kgm_pkmod->name, $kgm_off + else + if ((unsigned int)$arg0 <= (unsigned int)$kgm_fkmodmax) && ((unsigned int)$arg0 >= (unsigned int)$kgm_fkmodmin) + set $kgm_kmodp = (struct kmod_info *)kmod + while $kgm_kmodp + set $kgm_kmod = *$kgm_kmodp + if $kgm_kmod.address && ($kgm_kmod.address < $kgm_kmodmin) + set $kgm_kmodmin = $kgm_kmod.address + end + if ($kgm_kmod.address + $kgm_kmod.size) > $kgm_kmodmax + set $kgm_kmodmax = $kgm_kmod.address + end + set $kgm_off = ((unsigned int)$arg0 - (unsigned int)$kgm_kmod.address) + if ($kgm_kmod.address <= $arg0) && ($kgm_off <= $kgm_kmod.size) + printf " <%s + 0x%x>", $kgm_kmodp->name, $kgm_off + set $kgm_pkmod = $kgm_kmodp + set $kgm_pkmodst = $kgm_kmod.address + set $kgm_pkmoden = $kgm_pkmodst + $kgm_kmod.size + set $kgm_kmodp = 0 + else + set $kgm_kmodp = $kgm_kmod.next + end + end + if !$kgm_pkmod + set $kgm_fkmodmin = $kgm_kmodmin + set $kgm_fkmodmax = $kgm_kmodmax + end + end + end +end +document showkmodaddr +| Given an address, print the offset and name for the kmod containing it +| The following is the syntax: +| (gdb) showkmodaddr +end + +define showkmod + showkmodheader + showkmodint $arg0 +end +document showkmod +| Routine to print info about a kernel module +| The following is the syntax: +| (gdb) showkmod +end + +define showallkmods + showkmodheader + set $kgm_kmodp = (struct kmod_info *)kmod + while $kgm_kmodp + showkmodint $kgm_kmodp + set $kgm_kmodp = $kgm_kmodp->next + end +end +document showallkmods +| Routine to print a summary listing of all the kernel modules +| The following is the syntax: +| (gdb) showallkmods +end + +define showactheader + printf " activation " + printf "thread pri state wait_queue wait_event\n" +end + + +define showactint + printf " 0x%08x ", $arg0 + set $kgm_actp = *(Thread_Activation *)$arg0 + if $kgm_actp.thread + set $kgm_thread = *$kgm_actp.thread + printf "0x%08x ", $kgm_actp.thread + printf "%3d ", $kgm_thread.sched_pri + set $kgm_state = $kgm_thread.state + if $kgm_state & 0x80 + printf "I" + end + if $kgm_state & 0x40 + printf "P" + end + if $kgm_state & 0x20 + printf "A" + end + if $kgm_state & 0x10 + printf "H" + end + if $kgm_state & 0x08 + printf "U" + end + if $kgm_state & 0x04 + printf "R" + end + if $kgm_state & 0x02 + printf "S" + end + if $kgm_state & 0x01 + printf "W\t" + printf "0x%08x ", $kgm_thread.wait_queue + output /a $kgm_thread.wait_event + end + if $arg1 != 0 + if ($kgm_thread.kernel_stack != 0) + if ($kgm_thread.stack_privilege != 0) + printf "\n\t\tstack_privilege=0x%08x", $kgm_thread.stack_privilege + end + printf "\n\t\tkernel_stack=0x%08x", $kgm_thread.kernel_stack + set $mysp = $kgm_actp->mact.pcb.ss.r1 + set $prevsp = 0 + printf "\n\t\tstacktop=0x%08x", $mysp + while ($mysp != 0) && (($mysp & 0xf) == 0) && ($mysp < 0xb0000000) && ($mysp > $prevsp) + printf "\n\t\t0x%08x ", $mysp + set $kgm_return = *($mysp + 8) + if ($kgm_return > end) + showkmodaddr $kgm_return + else + output /a * ($mysp + 8) + end + set $prevsp = $mysp + set $mysp = * $mysp + end + printf "\n\t\tstackbottom=0x%08x", $prevsp + else + printf "\n\t\t\tcontinuation=" + output /a $kgm_thread.continuation + end + printf "\n" + else + printf "\n" + end + end +end + +define showact + showactheader + showactint $arg0 0 +end +document showact +| Routine to print out the state of a specific thread activation. +| The following is the syntax: +| (gdb) showact +end + + +define showactstack + showactheader + showactint $arg0 1 +end +document showactstack +| Routine to print out the stack of a specific thread activation. +| The following is the syntax: +| (gdb) showactstack +end + + +define showallacts + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + showtaskheader + showtaskint $kgm_taskp + showactheader + set $kgm_head_actp = &($kgm_taskp->thr_acts) + set $kgm_actp = (Thread_Activation *)($kgm_taskp->thr_acts.next) + while $kgm_actp != $kgm_head_actp + showactint $kgm_actp 0 + set $kgm_actp = (Thread_Activation *)($kgm_actp->thr_acts.next) + end + printf "\n" + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end +end +document showallacts +| Routine to print out a summary listing of all the thread activations. +| The following is the syntax: +| (gdb) showallacts +end + + +define showallstacks + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + showtaskheader + showtaskint $kgm_taskp + set $kgm_head_actp = &($kgm_taskp->thr_acts) + set $kgm_actp = (Thread_Activation *)($kgm_taskp->thr_acts.next) + while $kgm_actp != $kgm_head_actp + showactheader + showactint $kgm_actp 1 + set $kgm_actp = (Thread_Activation *)($kgm_actp->thr_acts.next) + end + printf "\n" + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end +end +document showallstacks +| Routine to print out a summary listing of all the thread kernel stacks. +| The following is the syntax: +| (gdb) showallstacks +end + +define showwaitqmembercount + set $kgm_waitqsubp = (wait_queue_sub_t)$arg0 + set $kgm_sublinksp = &($kgm_waitqsubp->wqs_sublinks) + set $kgm_wql = (wait_queue_link_t)$kgm_sublinksp->next + set $kgm_count = 0 + while ( (queue_entry_t)$kgm_wql != (queue_entry_t)$kgm_sublinksp) + set $kgm_waitqp = $kgm_wql->wql_element->wqe_queue + if !$kgm_found + showwaitqmemberheader + set $kgm_found = 1 + end + showwaitqmemberint $kgm_waitqp + end +end + + +define showwaitqmemberint + set $kgm_waitqp = (wait_queue_t)$arg0 + printf " 0x%08x ", $kgm_waitqp + printf "0x%08x ", $kgm_waitqp->wq_interlock + if ($kgm_waitqp->wq_fifo) + printf "Fifo" + else + printf "Prio" + end + if ($kgm_waitqp->wq_issub) + printf "S" + else + printf " " + end + printf " " + showwaitqwaitercount $kgm_waitqp + showwaitqmembercount $kgm_waitqp + printf "\n" +end + + +define showwaitqmembers + set $kgm_waitqsubp = (wait_queue_sub_t)$arg0 + set $kgm_sublinksp = &($kgm_waitqsubp->wqs_sublinks) + set $kgm_wql = (wait_queue_link_t)$kgm_sublinksp->next + set $kgm_found = 0 + while ( (queue_entry_t)$kgm_wql != (queue_entry_t)$kgm_sublinksp) + set $kgm_waitqp = $kgm_wql->wql_element->wqe_queue + if !$kgm_found + showwaitqmemberheader + set $kgm_found = 1 + end + showwaitqmemberint $kgm_waitqp + end +end + +define showwaitq + set $kgm_waitq = (wait_queue_t)$arg0 + showwaitqheader + showwaitqwaiters + if ($kgm_waitq->wq_issub) + showwaitqmembers + end +end + +define showmapheader + printf "vm_map pmap vm_size " + printf "#ents rpage hint first_free\n" +end + +define showvmeheader + printf " entry start " + printf "prot #page object offset\n" +end + +define showvmint + set $kgm_mapp = (vm_map_t)$arg0 + set $kgm_map = *$kgm_mapp + printf "0x%08x ", $arg0 + printf "0x%08x ", $kgm_map.pmap + printf "0x%08x ", $kgm_map.size + printf "%3d ", $kgm_map.hdr.nentries + printf "%5d ", $kgm_map.pmap->stats.resident_count + printf "0x%08x ", $kgm_map.hint + printf "0x%08x\n", $kgm_map.first_free + if $arg1 != 0 + showvmeheader + set $kgm_head_vmep = &($kgm_mapp->hdr.links) + set $kgm_vmep = $kgm_map.hdr.links.next + while (($kgm_vmep != 0) && ($kgm_vmep != $kgm_head_vmep)) + set $kgm_vme = *$kgm_vmep + printf " 0x%08x ", $kgm_vmep + printf "0x%08x ", $kgm_vme.links.start + printf "%1x", $kgm_vme.protection + printf "%1x", $kgm_vme.max_protection + if $kgm_vme.inheritance == 0x0 + printf "S" + end + if $kgm_vme.inheritance == 0x1 + printf "C" + end + if $kgm_vme.inheritance == 0x2 + printf "-" + end + if $kgm_vme.inheritance == 0x3 + printf "D" + end + if $kgm_vme.is_sub_map + printf "s " + else + if $kgm_vme.needs_copy + printf "n " + else + printf " " + end + end + printf "%5d ",($kgm_vme.links.end - $kgm_vme.links.start) >> 12 + printf "0x%08x ", $kgm_vme.object.vm_object + printf "0x%08x\n", $kgm_vme.offset + set $kgm_vmep = $kgm_vme.links.next + end + end + printf "\n" +end + + +define showmapvme + showmapheader + showvmint $arg0 1 +end +document showmapvme +| Routine to print out a summary listing of all the entries in a vm_map +| The following is the syntax: +| (gdb) showmapvme +end + + +define showmap + showmapheader + showvmint $arg0 0 +end +document showmap +| Routine to print out a summary description of a vm_map +| The following is the syntax: +| (gdb) showmap +end + +define showallvm + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + showtaskheader + showmapheader + showtaskint $kgm_taskp + showvmint $kgm_taskp->map 0 + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end +end +document showallvm +| Routine to print a summary listing of all the vm maps +| The following is the syntax: +| (gdb) showallvm +end + + +define showallvme + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + showtaskheader + showmapheader + showtaskint $kgm_taskp + showvmint $kgm_taskp->map 1 + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end +end +document showallvme +| Routine to print a summary listing of all the vm map entries +| The following is the syntax: +| (gdb) showallvme +end + + +define showipcheader + printf "ipc_space is_table table_next " + printf "flags tsize splaytree splaybase\n" +end + +define showipceheader + printf " name object " + printf "rite urefs destname destination\n" +end + +define showipceint + set $kgm_ie = *(ipc_entry_t)$arg0 + printf " 0x%08x ", $arg1 + printf "0x%08x ", $kgm_ie.ie_object + if $kgm_ie.ie_bits & 0x00100000 + printf "Dead " + printf "%5d\n", $kgm_ie.ie_bits & 0xffff + else + if $kgm_ie.ie_bits & 0x00080000 + printf "SET " + printf "%5d\n", $kgm_ie.ie_bits & 0xffff + else + if $kgm_ie.ie_bits & 0x00010000 + if $kgm_ie.ie_bits & 0x00020000 + printf " SR" + else + printf " S" + end + else + if $kgm_ie.ie_bits & 0x00020000 + printf " R" + end + end + if $kgm_ie.ie_bits & 0x00040000 + printf " O" + end + if $kgm_ie.index.request + printf "n" + else + printf " " + end + if $kgm_ie.ie_bits & 0x00800000 + printf "c" + else + printf " " + end + printf "%5d ", $kgm_ie.ie_bits & 0xffff + showportdest $kgm_ie.ie_object + end + end +end + +define showipcint + set $kgm_isp = (ipc_space_t)$arg0 + set $kgm_is = *$kgm_isp + printf "0x%08x ", $arg0 + printf "0x%08x ", $kgm_is.is_table + printf "0x%08x ", $kgm_is.is_table_next + if $kgm_is.is_growing != 0 + printf "G" + else + printf " " + end + if $kgm_is.is_fast != 0 + printf "F" + else + printf " " + end + if $kgm_is.is_active != 0 + printf "A " + else + printf " " + end + printf "%5d ", $kgm_is.is_table_size + printf "0x%08x ", $kgm_is.is_tree_total + printf "0x%08x\n", &$kgm_isp->is_tree + if $arg1 != 0 + showipceheader + set $kgm_iindex = 0 + set $kgm_iep = $kgm_is.is_table + set $kgm_destspacep = (ipc_space_t)0 + while ( $kgm_iindex < $kgm_is.is_table_size ) + set $kgm_ie = *$kgm_iep + if $kgm_ie.ie_bits & 0x001f0000 + set $kgm_name = (($kgm_iindex << 8)|($kgm_ie.ie_bits >> 24)) + showipceint $kgm_iep $kgm_name + end + set $kgm_iindex = $kgm_iindex + 1 + set $kgm_iep = &($kgm_is.is_table[$kgm_iindex]) + end + if $kgm_is.is_tree_total + printf "Still need to write tree traversal\n" + end + end + printf "\n" +end + + +define showipc + set $kgm_isp = (ipc_space_t)$arg0 + showipcheader + showipcint $kgm_isp 0 +end +document showipc +| Routine to print the status of the specified ipc space +| The following is the syntax: +| (gdb) showipc +end + +define showrights + set $kgm_isp = (ipc_space_t)$arg0 + showipcheader + showipcint $kgm_isp 1 +end +document showrights +| Routine to print a summary list of all the rights in a specified ipc space +| The following is the syntax: +| (gdb) showrights +end + + +define showtaskipc + set $kgm_taskp = (task_t)$arg0 + showtaskheader + showipcheader + showtaskint $kgm_taskp + showipcint $kgm_taskp->itk_space 0 +end +document showtaskipc +| Routine to print the status of the ipc space for a task +| The following is the syntax: +| (gdb) showtaskipc +end + + +define showtaskrights + set $kgm_taskp = (task_t)$arg0 + showtaskheader + showipcheader + showtaskint $kgm_taskp + showipcint $kgm_taskp->itk_space 1 +end +document showtaskrights +| Routine to print a summary listing of all the ipc rights for a task +| The following is the syntax: +| (gdb) showtaskrights +end + +define showallipc + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + showtaskheader + showipcheader + showtaskint $kgm_taskp + showipcint $kgm_taskp->itk_space 0 + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end +end +document showallipc +| Routine to print a summary listing of all the ipc spaces +| The following is the syntax: +| (gdb) showallipc +end + + +define showallrights + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + showtaskheader + showipcheader + showtaskint $kgm_taskp + showipcint $kgm_taskp->itk_space 1 + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end +end +document showallrights +| Routine to print a summary listing of all the ipc rights +| The following is the syntax: +| (gdb) showallrights +end + + +define showtaskvm + set $kgm_taskp = (task_t)$arg0 + showtaskheader + showmapheader + showtaskint $kgm_taskp + showvmint $kgm_taskp->map 0 +end +document showtaskvm +| Routine to print out a summary description of a task's vm_map +| The following is the syntax: +| (gdb) showtaskvm +end + +define showtaskvme + set $kgm_taskp = (task_t)$arg0 + showtaskheader + showmapheader + showtaskint $kgm_taskp + showvmint $kgm_taskp->map 1 +end +document showtaskvme +| Routine to print out a summary listing of a task's vm_map_entries +| The following is the syntax: +| (gdb) showtaskvme +end + + +define showtaskheader + printf "task vm_map ipc_space #acts " + showprocheader +end + + +define showtaskint + set $kgm_task = *(Task *)$arg0 + printf "0x%08x ", $arg0 + printf "0x%08x ", $kgm_task.map + printf "0x%08x ", $kgm_task.itk_space + printf "%3d ", $kgm_task.thr_act_count + showprocint $kgm_task.bsd_info +end + +define showtask + showtaskheader + showtaskint $arg0 +end +document showtask +| Routine to print out info about a task. +| The following is the syntax: +| (gdb) showtask +end + + +define showtaskacts + showtaskheader + set $kgm_taskp = (Task *)$arg0 + showtaskint $kgm_taskp + showactheader + set $kgm_head_actp = &($kgm_taskp->thr_acts) + set $kgm_actp = (Thread_Activation *)($kgm_taskp->thr_acts.next) + while $kgm_actp != $kgm_head_actp + showactint $kgm_actp 0 + set $kgm_actp = (Thread_Activation *)($kgm_actp->thr_acts.next) + end +end +document showtaskacts +| Routine to print a summary listing of the activations in a task +| The following is the syntax: +| (gdb) showtaskacts +end + + +define showtaskstacks + showtaskheader + set $kgm_taskp = (Task *)$arg0 + showtaskint $kgm_taskp + set $kgm_head_actp = &($kgm_taskp->thr_acts) + set $kgm_actp = (Thread_Activation *)($kgm_taskp->thr_acts.next) + while $kgm_actp != $kgm_head_actp + showactheader + showactint $kgm_actp 1 + set $kgm_actp = (Thread_Activation *)($kgm_actp->thr_acts.next) + end +end +document showtaskstacks +| Routine to print a summary listing of the activations in a task and their stacks +| The following is the syntax: +| (gdb) showtaskstacks +end + + +define showalltasks + showtaskheader + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + showtaskint $kgm_taskp + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end +end +document showalltasks +| Routine to print a summary listing of all the tasks +| The following is the syntax: +| (gdb) showalltasks +end + + +define showprocheader + printf " pid proc command\n" +end + +define showprocint + set $kgm_procp = (struct proc *)$arg0 + if $kgm_procp != 0 + printf "%5d ", $kgm_procp->p_pid + printf "0x%08x ", $kgm_procp + printf "%s\n", $kgm_procp->p_comm + else + printf " *0* 0x00000000 --\n" + end +end + +define showpid + showtaskheader + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while $kgm_taskp != $kgm_head_taskp + set $kgm_procp = (struct proc *)$kgm_taskp->bsd_info + if (($kgm_procp != 0) && ($kgm_procp->p_pid == $arg0)) + showtaskint $kgm_taskp + set $kgm_taskp = $kgm_head_taskp + else + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end + end +end +document showpid +| Routine to print a single process by pid +| The following is the syntax: +| (gdb) showpid +end + +define showproc + showtaskheader + set $kgm_procp = (struct proc *)$arg0 + showtaskint $kgm_procp->task $arg1 $arg2 +end + + +define kdb + set switch_debugger=1 + continue +end +document kdb +| kdb - Switch to the inline kernel debugger +| +| usage: kdb +| +| The kdb macro allows you to invoke the inline kernel debugger. +end + +define showpsetheader + printf "portset waitqueue recvname " + printf "flags refs recvname process\n" +end + +define showportheader + printf "port mqueue recvname " + printf "flags refs recvname process\n" +end + +define showportmemberheader + printf " port recvname " + printf "flags refs mqueue msgcount\n" +end + +define showkmsgheader + printf " kmsg size " + printf "disp msgid remote-port local-port\n" +end + +define showkmsgint + printf " 0x%08x ", $arg0 + set $kgm_kmsgh = ((ipc_kmsg_t)$arg0)->ikm_header + printf "0x%08x ", $kgm_kmsgh.msgh_size + if (($kgm_kmsgh.msgh_bits & 0xff) == 19) + printf "rC" + else + printf "rM" + end + if (($kgm_kmsgh.msgh_bits & 0xff00) == (19 < 8)) + printf "lC" + else + printf "lM" + end + if ($kgm_kmsgh.msgh_bits & 0xf0000000) + printf "c" + else + printf "s" + end + printf "%5d ", $kgm_kmsgh.msgh_msgid + printf "0x%08x ", $kgm_kmsgh.msgh_remote_port + printf "0x%08x\n", $kgm_kmsgh.msgh_local_port +end + + + +define showkobject + set $kgm_portp = (ipc_port_t)$arg0 + printf "0x%08x kobject(", $kgm_portp->ip_kobject + set $kgm_kotype = ($kgm_portp->ip_object.io_bits & 0x00000fff) + if ($kgm_kotype == 1) + printf "THREAD" + end + if ($kgm_kotype == 2) + printf "TASK" + end + if ($kgm_kotype == 3) + printf "HOST" + end + if ($kgm_kotype == 4) + printf "HOST_PRIV" + end + if ($kgm_kotype == 5) + printf "PROCESSOR" + end + if ($kgm_kotype == 6) + printf "PSET" + end + if ($kgm_kotype == 7) + printf "PSET_NAME" + end + if ($kgm_kotype == 8) + printf "TIMER" + end + if ($kgm_kotype == 9) + printf "PAGER_REQ" + end + if ($kgm_kotype == 10) + printf "DEVICE" + end + if ($kgm_kotype == 11) + printf "XMM_OBJECT" + end + if ($kgm_kotype == 12) + printf "XMM_PAGER" + end + if ($kgm_kotype == 13) + printf "XMM_KERNEL" + end + if ($kgm_kotype == 14) + printf "XMM_REPLY" + end + if ($kgm_kotype == 15) + printf "NOTDEF 15" + end + if ($kgm_kotype == 16) + printf "NOTDEF 16" + end + if ($kgm_kotype == 17) + printf "HOST_SEC" + end + if ($kgm_kotype == 18) + printf "LEDGER" + end + if ($kgm_kotype == 19) + printf "MASTER_DEV" + end + if ($kgm_kotype == 20) + printf "ACTIVATION" + end + if ($kgm_kotype == 21) + printf "SUBSYSTEM" + end + if ($kgm_kotype == 22) + printf "IO_DONE_QUE" + end + if ($kgm_kotype == 23) + printf "SEMAPHORE" + end + if ($kgm_kotype == 24) + printf "LOCK_SET" + end + if ($kgm_kotype == 25) + printf "CLOCK" + end + if ($kgm_kotype == 26) + printf "CLOCK_CTRL" + end + if ($kgm_kotype == 27) + printf "IOKIT_SPARE" + end + if ($kgm_kotype == 28) + printf "NAMED_MEM" + end + if ($kgm_kotype == 29) + printf "IOKIT_CON" + end + if ($kgm_kotype == 30) + printf "IOKIT_OBJ" + end + if ($kgm_kotype == 31) + printf "UPL" + end + printf ")\n" +end + +define showportdestproc + set $kgm_portp = (ipc_port_t)$arg0 + set $kgm_spacep = $kgm_portp->data.receiver +# check against the previous cached value - this is slow + if ($kgm_spacep != $kgm_destspacep) + set $kgm_destprocp = (struct proc *)0 + set $kgm_head_taskp = &default_pset.tasks + set $kgm_taskp = (Task *)($kgm_head_taskp->next) + while (($kgm_destprocp == 0) && ($kgm_taskp != $kgm_head_taskp)) + set $kgm_destspacep = $kgm_taskp->itk_space + if ($kgm_destspacep == $kgm_spacep) + set $kgm_destprocp = (struct proc *)$kgm_taskp->bsd_info + else + set $kgm_taskp = (Task *)($kgm_taskp->pset_tasks.next) + end + end + end + if $kgm_destprocp != 0 + printf "%s(%d)\n", $kgm_destprocp->p_comm, $kgm_destprocp->p_pid + else + printf "task 0x%08x\n", $kgm_taskp + end +end + +define showportdest + set $kgm_portp = (ipc_port_t)$arg0 + set $kgm_spacep = $kgm_portp->data.receiver + if ($kgm_spacep == ipc_space_kernel) + showkobject $kgm_portp + else + if ($kgm_portp->ip_object.io_bits & 0x80000000) + printf "0x%08x ", $kgm_portp->ip_object.io_receiver_name + showportdestproc $kgm_portp + else + printf "0x%08x inactive-port\n", $kgm_portp + end + end +end + +define showportmember + printf " 0x%08x ", $arg0 + set $kgm_portp = (ipc_port_t)$arg0 + printf "0x%08x ", $kgm_portp->ip_object.io_receiver_name + if ($kgm_portp->ip_object.io_bits & 0x80000000) + printf "A" + else + printf " " + end + if ($kgm_portp->ip_object.io_bits & 0x7fff0000) + printf "Set " + else + printf "Port" + end + printf "%5d ", $kgm_portp->ip_object.io_references + printf "0x%08x ", &($kgm_portp->ip_messages) + printf "0x%08x\n", $kgm_portp->ip_messages.data.port.msgcount +end + +define showportint + printf "0x%08x ", $arg0 + set $kgm_portp = (ipc_port_t)$arg0 + printf "0x%08x ", &($kgm_portp->ip_messages) + printf "0x%08x ", $kgm_portp->ip_object.io_receiver_name + if ($kgm_portp->ip_object.io_bits & 0x80000000) + printf "A" + else + printf "D" + end + printf "Port" + printf "%5d ", $kgm_portp->ip_object.io_references + set $kgm_destspacep = (ipc_space_t)0 + showportdest $kgm_portp + set $kgm_kmsgp = (ipc_kmsg_t)$kgm_portp->ip_messages.data.port.messages.ikmq_base + if $arg1 && $kgm_kmsgp + showkmsgheader + showkmsgint $kgm_kmsgp + set $kgm_kmsgheadp = $kgm_kmsgp + set $kgm_kmsgp = $kgm_kmsgp->ikm_next + while $kgm_kmsgp != $kgm_kmsgheadp + showkmsgint $kgm_kmsgp + set $kgm_kmsgp = $kgm_kmsgp->ikm_next + end + end +end + +define showpsetint + printf "0x%08x ", $arg0 + set $kgm_psetp = (ipc_pset_t)$arg0 + printf "0x%08x ", &($kgm_psetp->ips_messages) + printf "0x%08x ", $kgm_psetp->ips_object.io_receiver_name + if ($kgm_psetp->ips_object.io_bits & 0x80000000) + printf "A" + else + printf "D" + end + printf "Set " + printf "%5d ", $kgm_psetp->ips_object.io_references + set $kgm_sublinksp = &($kgm_psetp->ips_messages.data.set_queue.wqs_sublinks) + set $kgm_wql = (wait_queue_link_t)$kgm_sublinksp->next + set $kgm_found = 0 + while ( (queue_entry_t)$kgm_wql != (queue_entry_t)$kgm_sublinksp) + set $kgm_portp = (ipc_port_t)((int)($kgm_wql->wql_element->wqe_queue) - ((int)$kgm_portoff)) + if !$kgm_found + set $kgm_destspacep = (ipc_space_t)0 + showportdest $kgm_portp + showportmemberheader + set $kgm_found = 1 + end + showportmember $kgm_portp 0 + set $kgm_wql = (wait_queue_link_t)$kgm_wql->wql_sublinks.next + end + if !$kgm_found + printf "--n/e-- --n/e--\n" + end +end + +define showpset + showpsetheader + showpsetint $arg0 1 +end + +define showport + showportheader + showportint $arg0 1 +end + +define showipcobject + set $kgm_object = (ipc_object_t)$arg0 + if ($kgm_objectp->io_bits & 0x7fff0000) + showpset $kgm_objectp + else + showport $kgm_objectp + end +end + +define showmqueue + set $kgm_mqueue = *(ipc_mqueue_t)$arg0 + set $kgm_psetoff = &(((ipc_pset_t)0)->ips_messages) + set $kgm_portoff = &(((ipc_port_t)0)->ip_messages) + if ($kgm_mqueue.data.set_queue.wqs_wait_queue.wq_issub) + set $kgm_pset = (((int)$arg0) - ((int)$kgm_psetoff)) + showpsetheader + showpsetint $kgm_pset 1 + else + showportheader + set $kgm_port = (((int)$arg0) - ((int)$kgm_portoff)) + showportint $kgm_port 1 + end +end + +define zprint_one +set $kgm_zone = (struct zone *)$arg0 + +printf "0x%08x ", $kgm_zone +printf "%8d ",$kgm_zone->count +printf "%8x ",$kgm_zone->cur_size +printf "%8x ",$kgm_zone->max_size +printf "%6d ",$kgm_zone->elem_size +printf "%8x ",$kgm_zone->alloc_size +printf "%s ",$kgm_zone->zone_name + +if ($kgm_zone->exhaustible) + printf "H" +end +if ($kgm_zone->collectable) + printf "C" +end +if ($kgm_zone->expandable) + printf "X" +end +printf "\n" +end + + +define zprint +printf "ZONE COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME\n" +set $kgm_zone_ptr = (struct zone *)first_zone +while ($kgm_zone_ptr != 0) + zprint_one $kgm_zone_ptr + set $kgm_zone_ptr = $kgm_zone_ptr->next_zone +end +printf "\n" +end +document zprint +| Routine to print a summary listing of all the kernel zones +| The following is the syntax: +| (gdb) zprint +end + diff --git a/osfmk/Makefile b/osfmk/Makefile new file mode 100644 index 000000000..6a4024958 --- /dev/null +++ b/osfmk/Makefile @@ -0,0 +1,62 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + mach \ + default_pager \ + device \ + mach_debug \ + profiling \ + UserNotification + +INSTINC_SUBDIRS_PPC = \ + mach \ + profiling + +INSTINC_SUBDIRS_I386 = \ + mach \ + profiling + +EXPINC_SUBDIRS = \ + mach \ + device \ + default_pager \ + mach_debug \ + profiling \ + ddb \ + kern \ + ipc \ + machine \ + UserNotification \ + vm \ + libsa + +EXPINC_SUBDIRS_PPC = \ + mach \ + ppc \ + profiling + +EXPINC_SUBDIRS_I386 = \ + mach \ + i386 \ + profiling + +SETUP_SUBDIRS = \ + conf + +COMP_SUBDIRS = \ + conf + +INST_SUBDIRS = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/UserNotification/KUNCUserNotifications.c b/osfmk/UserNotification/KUNCUserNotifications.c new file mode 100644 index 000000000..a4baa980e --- /dev/null +++ b/osfmk/UserNotification/KUNCUserNotifications.c @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#ifdef KERNEL_CF +// external +#include +#include +#endif + +/* + * DEFINES AND STRUCTURES + */ + +UNDServerRef gUNDServer = UND_SERVER_NULL; + + +struct UNDReply { + decl_mutex_data(,lock) /* UNDReply lock */ + int userLandNotificationKey; + KUNCUserNotificationCallBack callback; + boolean_t inprogress; + ipc_port_t self_port; /* Our port */ +}; + +#define UNDReply_lock(reply) mutex_lock(&reply->lock) +#define UNDReply_lock_try(reply) mutex_lock_try(&(reply)->lock) +#define UNDReply_unlock(reply) mutex_unlock(&(reply)->lock) + +void +UNDReply_deallocate( + UNDReplyRef reply) +{ + ipc_port_t port; + UNDReply_lock(reply); + port = reply->self_port; + ipc_kobject_set(port, IKO_NULL, IKOT_NONE); + reply->self_port = IP_NULL; + UNDReply_unlock(reply); + + ipc_port_dealloc_kernel(port); + kfree((vm_offset_t)reply, sizeof(struct UNDReply)); + return; +} + +/* + * UND Mig Callbacks +*/ + +kern_return_t +UNDAlertCompletedWithResult_rpc ( + UNDReplyRef reply, + int result, + xmlData_t keyRef, /* raw XML bytes */ + mach_msg_type_number_t keyLen) +{ +#ifdef KERNEL_CF + CFStringRef xmlError = NULL; + CFDictionaryRef dict = NULL; +#else + void *dict = (void *)keyRef; +#endif + + if (reply == UND_REPLY_NULL || !reply->inprogress) + return KERN_INVALID_ARGUMENT; + + /* + * JMM - No C vesion of the Unserialize code in-kernel + * and no C type for a CFDictionary either. For now, + * just pass the raw keyRef through. + */ +#ifdef KERNEL_CF + if (keyRef && keyLen) { + dict = IOCFUnserialize(keyRef, NULL, NULL, &xmlError); + } + + if (xmlError) { + CFShow(xmlError); + CFRelease(xmlError); + } +#endif /* KERNEL_CF */ + + if (reply->callback) { + (reply->callback)((KUNCUserNotificationID) reply, result, dict); + } + + UNDReply_lock(reply); + reply->inprogress = FALSE; + reply->userLandNotificationKey = -1; + UNDReply_unlock(reply); + UNDReply_deallocate(reply); + return KERN_SUCCESS; +} + +/* + * Routine: UNDNotificationCreated_rpc + * + * Intermediate routine. Allows the kernel mechanism + * to be informed that the notification request IS + * being processed by the user-level daemon, and how + * to identify that request. + */ +kern_return_t +UNDNotificationCreated_rpc ( + UNDReplyRef reply, + int userLandNotificationKey) +{ + if (reply == UND_REPLY_NULL) + return KERN_INVALID_ARGUMENT; + + UNDReply_lock(reply); + if (reply->inprogress || reply->userLandNotificationKey != -1) { + UNDReply_unlock(reply); + return KERN_INVALID_ARGUMENT; + } + reply->userLandNotificationKey = userLandNotificationKey; + UNDReply_unlock(reply); + return KERN_SUCCESS; +} + +/* + * KUNC Functions +*/ + + +KUNCUserNotificationID +KUNCGetNotificationID() +{ + UNDReplyRef reply; + + reply = (UNDReplyRef) kalloc(sizeof(struct UNDReply)); + if (reply != UND_REPLY_NULL) { + reply->self_port = ipc_port_alloc_kernel(); + if (reply->self_port == IP_NULL) { + kfree((vm_offset_t)reply, sizeof(struct UNDReply)); + reply = UND_REPLY_NULL; + } else { + mutex_init(&reply->lock, ETAP_IO_UNDREPLY); + reply->userLandNotificationKey = -1; + reply->inprogress = FALSE; + ipc_kobject_set(reply->self_port, + (ipc_kobject_t)reply, + IKOT_UND_REPLY); + } + } + return (KUNCUserNotificationID) reply; +} + + +kern_return_t KUNCExecute(char executionPath[1024], int uid, int gid) +{ + return UNDExecute_rpc(gUNDServer, executionPath, uid, gid); +} + +kern_return_t KUNCUserNotificationCancel( + KUNCUserNotificationID id) +{ + UNDReplyRef reply = (UNDReplyRef)id; + kern_return_t kr; + int ulkey; + + if (reply == UND_REPLY_NULL) + return KERN_INVALID_ARGUMENT; + + UNDReply_lock(reply); + if (!reply->inprogress) { + UNDReply_unlock(reply); + return KERN_INVALID_ARGUMENT; + } + + reply->inprogress = FALSE; + if (ulkey = reply->userLandNotificationKey) { + reply->userLandNotificationKey = 0; + UNDReply_unlock(reply); + kr = UNDCancelNotification_rpc(gUNDServer,ulkey); + } else { + UNDReply_unlock(reply); + kr = KERN_SUCCESS; + } + UNDReply_deallocate(reply); + return kr; +} + +kern_return_t +KUNCUserNotificationDisplayNotice( + int timeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle) +{ + kern_return_t kr; + kr = UNDDisplayNoticeSimple_rpc(gUNDServer, + timeout, + flags, + iconPath, + soundPath, + localizationPath, + alertHeader, + alertMessage, + defaultButtonTitle); + return kr; +} + +kern_return_t +KUNCUserNotificationDisplayAlert( + int timeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle, + char *alternateButtonTitle, + char *otherButtonTitle, + unsigned *responseFlags) +{ + kern_return_t kr; + + kr = UNDDisplayAlertSimple_rpc(gUNDServer, + timeout, + flags, + iconPath, + soundPath, + localizationPath, + alertHeader, + alertMessage, + defaultButtonTitle, + alternateButtonTitle, + otherButtonTitle, + responseFlags); + return kr; +} + +kern_return_t +KUNCUserNotificationDisplayFromBundle( + KUNCUserNotificationID id, + char *bundlePath, + char *fileName, + char *fileExtension, + char *messageKey, + char *tokenString, + KUNCUserNotificationCallBack callback, + int contextKey) +{ + UNDReplyRef reply = (UNDReplyRef)id; + ipc_port_t reply_port; + kern_return_t kr; + + if (reply == UND_REPLY_NULL) + return KERN_INVALID_ARGUMENT; + UNDReply_lock(reply); + if (reply->inprogress == TRUE || reply->userLandNotificationKey != -1) { + UNDReply_unlock(reply); + return KERN_INVALID_ARGUMENT; + } + reply->inprogress == TRUE; + reply->callback = callback; + reply_port = ipc_port_make_send(reply->self_port); + UNDReply_unlock(reply); + + kr = UNDDisplayCustomFromBundle_rpc(gUNDServer, + reply_port, + bundlePath, + fileName, + fileExtension, + messageKey, + tokenString); + return kr; +} + +/* + * Routine: convert_port_to_UNDReply + * + * MIG helper routine to convert from a mach port to a + * UNDReply object. + * + * Assumptions: + * Nothing locked. + */ +UNDReplyRef +convert_port_to_UNDReply( + ipc_port_t port) +{ + if (IP_VALID(port)) { + UNDReplyRef reply; + + ip_lock(port); + if (!ip_active(port) || (ip_kotype(port) != IKOT_UND_REPLY)) { + ip_unlock(port); + return UND_REPLY_NULL; + } + reply = (UNDReplyRef) port->ip_kobject; + assert(reply != UND_REPLY_NULL); + ip_unlock(port); + } + return UND_REPLY_NULL; +} + +/* + * User interface for setting the host UserNotification Daemon port. + */ + +kern_return_t +host_set_UNDServer( + host_priv_t host_priv, + UNDServerRef server) +{ + + if (host_priv == HOST_PRIV_NULL || server == UND_SERVER_NULL) + return KERN_INVALID_ARGUMENT; + if (gUNDServer != UND_SERVER_NULL) + ipc_port_dealloc_kernel(gUNDServer); + gUNDServer = server; + return KERN_SUCCESS; +} + +/* + * User interface for retrieving the UserNotification Daemon port. + */ + +kern_return_t +host_get_UNDServer( + host_priv_t host_priv, + UNDServerRef *server) +{ + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + *server = gUNDServer; + return KERN_SUCCESS; +} + diff --git a/osfmk/UserNotification/KUNCUserNotifications.h b/osfmk/UserNotification/KUNCUserNotifications.h new file mode 100644 index 000000000..0351f1d87 --- /dev/null +++ b/osfmk/UserNotification/KUNCUserNotifications.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __USERNOTIFICATION_KUNCUSERNOTIFICATIONS_H +#define __USERNOTIFICATION_KUNCUSERNOTIFICATIONS_H + +#include + + +#include +#include +#include + +__BEGIN_DECLS + +/* + * non blocking notice call. + */ +kern_return_t +KUNCUserNotificationDisplayNotice( + int timeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle); + +/* + * ***BLOCKING*** alert call, returned int value corresponds to the + * pressed button, spin this off in a thread only, or expect your kext to block. + */ +kern_return_t +KUNCUserNotificationDisplayAlert( + int timeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle, + char *alternateButtonTitle, + char *otherButtonTitle, + unsigned *responseFlags); + +/* + * Execute a userland executable with the given path, user and type + */ + +#define kOpenApplicationPath 0 /* essentially executes the path */ +#define kOpenPreferencePanel 1 /* runs the preferences with the foo.preference opened. foo.preference must exist in /System/Library/Preferences */ +#define kOpenApplication 2 /* essentially runs /usr/bin/open on the passed in application name */ + + +#define kOpenAppAsRoot 0 +#define kOpenAppAsConsoleUser 1 + +kern_return_t +KUNCExecute( + char *executionPath, + int openAsUser, + int pathExecutionType); + + +/* KUNC User Notification XML Keys + * + * These are the keys used in the xml plist file passed in to the + * KUNCUserNotitificationDisplayFrom* calls + * + * KUNC Notifications are completely dependent on CFUserNotifications in + * user land. The same restrictions apply, including the number of text fields, + * types of information displayable, etc. + * + * Key Type + * Header string (header displayed on dialog) + * Icon URL string (url of the icon to display) + * Sound URL string (url of the sound to play on display) + * Localization URL string (url of bundle to retrieve localization + * info from, using Localizable.strings files) + * Message string (text of the message, can contain %@'s + * which are filled from tokenString passed in) + * OK Button Title string (title of the "main" button) + * Alternate Button Title string (title of the "alternate" button - + * usually cancel) + * Other Button Title string (title of the "other" button) + * Timeout string (numeric, int - seconds until the dialog + * goes away on it's own) + * Alert Level string (Stop, Notice, Alert, + * Blocking Message string (numeric, 1 or 0 - if 1, the dialog will + * have no buttons) + * Text Field Strings array of strings (each becomes a text field) + * Password Fields array of strings (numeric - each indicates a + * pwd field) + * Popup Button Strings array of strings (each entry becomes a popup + * button string) + * Radio Button Strings array of strings (each becomes a radio button) + * Check Box Strings array of strings (each becomes a check box) + * Selected Radio string (numeric - which radio is selected) + * Checked Boxes array of strings (numeric - each indicates a + * checked field) + * Selected Popup string (numeric - which popup entry is selected) + */ + +/* + * Bundle Calls + * + * Arguments + * + * bundleIdentifier + * path to the actual bundle (not inside of it) + * (i.e. "/System/Library/Extensions/Foo.kext") + * ***NOTE*** + * This WILL change soon to expect the CFBundleIdentifier instead of a bundle path + * fileName + * filename in bundle to retrive the xml from (i.e. "Messages") + * fileExtension + * if fileName has an extension, it goes here (i.e., "dict"); + * messageKey + * name of the xml key in the dictionary in the file to retrieve + * the info from (i.e., "Error Message") + * tokenString + * a string in the form of "foo@bar" where each element is + * seperated by the @ character. This string can be used to + * replace values of the form %@ in the message key in the provided + * dictionary in the xml plist + * specialKey + * user specified key for notification, use this to match return + * values with your requested notification, this value is passed + * back to the client in the callback pararmeter contextKey + */ + +typedef int KUNCUserNotificationID; + +/* + * Reponse value checking & default setting + * + * The reponse value returned in the response Flags of the + * KUNCUserNotificationCallBack can be tested against the following + * enum and 2 defines to determine the state. + */ + +enum { + kKUNCDefaultResponse = 0, + kKUNCAlternateResponse = 1, + kKUNCOtherResponse = 2, + kKUNCCancelResponse = 3 +}; + +#define KUNCCheckBoxChecked(i) (1 << (8 + i)) /* can be used for radio's too */ +#define KUNCPopUpSelection(n) (n << 24) + +/* + * Callback function for KUNCNotifications + */ +typedef void +(*KUNCUserNotificationCallBack)( + int contextKey, + int responseFlags, + void *xmlData); + +/* + * Get a notification ID + */ +KUNCUserNotificationID KUNCGetNotificationID(); + + +/* This function currently requires a bundle path, which kexts cannot currently get. In the future, the CFBundleIdentiofier of the kext will be pass in in place of the bundlePath. */ + +kern_return_t +KUNCUserNotificationDisplayFromBundle( + KUNCUserNotificationID notificationID, + char *bundleIdentifier, + char *fileName, + char *fileExtension, + char *messageKey, + char *tokenString, + KUNCUserNotificationCallBack callback, + int contextKey); + + +kern_return_t +KUNCUserNotificationCancel( + KUNCUserNotificationID notification); + + +__END_DECLS + +#endif /* __USERNOTIFICATION_KUNCUSERNOTIFICATIONS_H */ diff --git a/osfmk/UserNotification/Makefile b/osfmk/UserNotification/Makefile new file mode 100644 index 000000000..240221383 --- /dev/null +++ b/osfmk/UserNotification/Makefile @@ -0,0 +1,91 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = + +INSTINC_SUBDIRS_PPC = + +INSTINC_SUBDIRS_I386 = + +EXPINC_SUBDIRS = + +EXPINC_SUBDIRS_PPC = + +EXPINC_SUBDIRS_I386 = + +MIG_TYPES = \ + UNDTypes.defs + +MIG_DEFS = \ + UNDRequest.defs \ + UNDReply.defs + +DATAFILES = \ + UNDTypes.h \ + ${MIG_TYPES} \ + ${MIG_DEFS} + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_GEN_LIST = + +INSTALL_MI_DIR = UserNotification + +EXPORT_MI_LIST = \ + KUNCUserNotifications.h \ + ${DATAFILES} + +EXPORT_MI_GEN_LIST = + +EXPORT_MI_DIR = UserNotification + +# +# Build path +# +INCFLAGS_MAKEFILE= -I.. + +MIGKSFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_SERVER=1 +MIGKUFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_USER=1 -maxonstack 1024 + +MIG_KUHDRS = \ + UNDRequest.h + +MIG_KUSRC = \ + UNDRequest.c + +MIG_KSHDRS = \ + UNDReplyServer.h + +MIG_KSSRC = \ + UNDReplyServer.c + +COMP_FILES = ${MIG_KUSRC} ${MIG_KSSRC} + +${COMP_FILES} : ${MIG_TYPES} + +${MIG_KUSRC} : \ + %.c : %.defs + ${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ + -user $*.c \ + -header $*.h \ + -server /dev/null \ + -sheader /dev/null \ + $< + +${MIG_KSSRC}: \ + %Server.c : %.defs + ${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ + -user /dev/null \ + -header /dev/null \ + -server $*Server.c \ + -sheader $*Server.h \ + $< + + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/osfmk/UserNotification/UNDReply.defs b/osfmk/UserNotification/UNDReply.defs new file mode 100644 index 000000000..87bb8b6ac --- /dev/null +++ b/osfmk/UserNotification/UNDReply.defs @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + UNDReply 6200; + +#include +#include + +simpleroutine UNDAlertCompletedWithResult_rpc( + reply: UNDReplyRef; + in result: int; + in data: xmlData); + +simpleroutine UNDNotificationCreated_rpc( + reply: UNDReplyRef; + in userLandNotificationKey: int); diff --git a/osfmk/UserNotification/UNDRequest.defs b/osfmk/UserNotification/UNDRequest.defs new file mode 100644 index 000000000..e73bd0391 --- /dev/null +++ b/osfmk/UserNotification/UNDRequest.defs @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERNEL_USER */ + UNDRequest 6000; + +#include +#include + + +/* +-- Messages sent by the UserNotification Client to the Server +*/ + +/* Execution information */ +simpleroutine UNDExecute_rpc( + server: UNDServerRef; + in executionPath: UNDPath; + in uid: int; + in gid: int); + +/* Bundle notice and bundle alert */ + +simpleroutine UNDDisplayNoticeFromBundle_rpc( + server: UNDServerRef; + in reply: UNDReplyRef; + in bundlePath: UNDPath; + in fileName: UNDPath; + in fileExtension: UNDPath; + in messageKey: UNDKey); + +simpleroutine UNDDisplayAlertFromBundle_rpc( + server: UNDServerRef; + in reply: UNDReplyRef; + in bundlePath: UNDPath; + in fileName: UNDKey; + in fileExtension: UNDKey; + in messageKey: UNDKey); + +simpleroutine UNDDisplayCustomFromBundle_rpc( + server: UNDServerRef; + in reply: UNDReplyRef; + in bundlePath: UNDPath; + in fileName: UNDKey; + in fileExtension: UNDKey; + in messageKey: UNDKey; + in tokenKey: UNDPath); + +simpleroutine UNDDisplayCustomFromDictionary_rpc( + server: UNDServerRef; + in reply: UNDReplyRef; + in data: xmlData); + +simpleroutine UNDCancelNotification_rpc( + server: UNDServerRef; + in userLandNotificationKey: int); + +/* + * Just pops up a notice with a single OK button and the label and message + * specified below. As such, there is no acknowledgement from the server. + */ +simpleroutine UNDDisplayNoticeSimple_rpc( + server: UNDServerRef; + in timeout: int; + in flags: unsigned; + in iconPath:UNDLabel; + in soundPath:UNDLabel; + in localizationPath:UNDLabel; + in header: UNDLabel; + in message: UNDMessage; + in defaultButtonTitle:UNDLabel); + +/* + * A synchronous routine to display an alert. This will wait for the + * result to come back. As this can take an exceedingly long time to + * complete (and will block the calling thread for the duration) great + * care should be exercised before using this method. + */ +routine UNDDisplayAlertSimple_rpc( + server: UNDServerRef; + in timeout: int; + in flags: unsigned; + in iconPath:UNDLabel; + in soundPath:UNDLabel; + in localizationPath:UNDLabel; + in header: UNDLabel; + in message: UNDMessage; + in defaultButtonTitle:UNDLabel; + in alternateButtonTitle:UNDLabel; + in otherButtonTitle:UNDLabel; + out response: int); diff --git a/osfmk/UserNotification/UNDTypes.defs b/osfmk/UserNotification/UNDTypes.defs new file mode 100644 index 000000000..8ccefc684 --- /dev/null +++ b/osfmk/UserNotification/UNDTypes.defs @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _USERNOTIFICATION_UNDTYPES_DEFS_ +#define _USERNOTIFICATION_UNDTYPES_DEFS_ + +#include + +/* + * UserNotification message types +*/ + +type UNDMessage = c_string[*:1024]; +type UNDLabel = c_string[*:128]; + +type UNDKey = c_string[*:128]; +type UNDPath = c_string[*:1024]; + +type UNDXMLDict = c_string[*:2048]; + +/* + * serialized XML data + */ +type xmlData = ^ array [] of MACH_MSG_TYPE_BYTE + ctype : xmlData_t; + + +type UNDServerRef = mach_port_t; + +type UNDReplyRef = mach_port_t +#if KERNEL_SERVER + intran: UNDReplyRef convert_port_to_UNDReply(mach_port_t) +#endif /* KERNEL_SERVER */ +#if KERNEL_USER + ctype: mach_port_t +#endif /* KERNEL_USER */ + ; + +import ; + +#endif /* _USERNOTIFICATION_UNDTYPES_DEFS_ */ diff --git a/osfmk/UserNotification/UNDTypes.h b/osfmk/UserNotification/UNDTypes.h new file mode 100644 index 000000000..f80242c42 --- /dev/null +++ b/osfmk/UserNotification/UNDTypes.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __USERNOTIFICATION_UNDTYPES_H +#define __USERNOTIFICATION_UNDTYPES_H + +#include + +typedef char *UNDMessage; +typedef char *UNDLabel; +typedef char *UNDKey; +typedef char *UNDPath; + +/* + * serialized key's, list delimiters, ... + * (sent as out-of-line data in a message) + */ +typedef const char * xmlData_t; + +#ifdef KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE + +/* + * UNDReply definition - used to dispatch UserNotification + * replies back to the in-kernel client. + */ +typedef struct UNDReply *UNDReplyRef; + +extern UNDReplyRef convert_port_to_UNDReply(mach_port_t); + +#else /* !MACH_KERNEL_PRIVATE */ + +typedef struct __UNDReply__ *UNDReplyRef; + +#endif /* !MACH_KERNEL_PRIVATE */ + +#else /* ! KERNEL_PRIVATE */ + +typedef mach_port_t UNDReplyRef; + +#endif /* ! KERNEL_PRIVATE */ + +#define UND_REPLY_NULL ((UNDReplyRef)0) +#define XML_DATA_NULL ((xmlData_t)0) + +#endif /* __USERNOTIFICATION_UNDTPES_H */ + diff --git a/osfmk/conf/MASTER b/osfmk/conf/MASTER new file mode 100644 index 000000000..025c0c4af --- /dev/null +++ b/osfmk/conf/MASTER @@ -0,0 +1,198 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +####################################################################### +# +# Master machine independent configuration file. +# +# Specific configuration files are created based on this and +# the machine specific master file using the doconf script. +# +# Any changes to the master configuration files will affect all +# other configuration files based upon it. +# +####################################################################### +# +# To build a configuration, execute "doconf ." +# Configurations are specified in the "Configurations:" section +# of the MASTER and MASTER.* files as follows: +# +# = [ ... ] +# +# Lines in the MASTER and MASTER.* files are selected based on +# the attribute selector list, found in a comment at the end of +# the line. This is a list of attributes separated by commas. +# The "!" operator selects the line if none of the attributes are +# specified. +# +# For example: +# +# selects a line if "foo" or "bar" are specified. +# selects a line if neither "foo" nor "bar" is +# specified. +# +# Lines with no attributes specified are selected for all +# configurations. +# +####################################################################### +# +# Basic compilation options. +# +# The MACH ident is passed to every kernel file compilation as -DMACH. +# This is useful in order to #ifdef code that is intended to be used in +# a MACH kernel. +# +ident MACH +############################################################################## +# +# MACH configuration options. +# +# TASK_SWAPPER enables code that manages demand for physical memory by +# forcibly suspending tasks when the demand exceeds supply. This +# option should be on. +# +options MACH_KERNEL +options MACH_PAGEMAP +options MACH_LOAD +options MACH_RT +options THREAD_SWAPPER # +options TASK_SWAPPER # +pseudo-device test_device 1 +options ADVISORY_PAGEOUT +########################################################## +# +# conf/config.debug +# +# This defines configuration options that are normally used only during +# kernel code development and debugging. They add run-time error checks or +# statistics gathering, which will slow down the system +# +########################################################## +# +# MACH_ASSERT controls the assert() and ASSERT() macros, used to verify the +# consistency of various algorithms in the kernel. The performance impact +# of this option is significant. +# +options MACH_ASSERT # # +# +# MACH_DEBUG enables the mach_debug_server, a message interface used to +# retrieve or control various statistics. This interface may expose data +# structures that would not normally be allowed outside the kernel, and +# MUST NOT be enabled on a released configuration. +# Other options here enable information retrieval for specific subsystems +# +options MACH_DEBUG # # +# +options MACH_IPC_DEBUG # # +options MACH_VM_DEBUG # # +# +# MACH_MP_DEBUG control the possible dead locks that may occur by controlling +# that IPL level has been raised down to SPL0 after some calls to +# hardclock device driver. +# +options MACH_MP_DEBUG # # +# +# ZONE_DEBUG keeps track of all zalloc()ed elements to perform further +# operations on each element. +# +options ZONE_DEBUG # # +# +# XPR_DEBUG enables the gathering of data through the XPR macros inserted +# into various subsystems. This option is normally only enabled for +# specific performance or behavior studies, as the overhead in both +# code and data space is large. The data is normally retrieved through +# the kernel debugger (kdb) or by reading /dev/kmem. +# +options XPR_DEBUG # # +# +# MACH_LDEBUG controls the internal consistency checks and +# data gathering in the locking package. This also enables a debug-only +# version of simple-locks on uniprocessor machines. The code size and +# performance impact of this option is significant. +# +options MACH_LDEBUG # # + +# +# +# +options KDEBUG # kernel tracing # + +# +# MACH_COUNTERS enables code that handles various counters in the system. +# +options MACH_COUNTERS # # +# +# ETAP The Event Trace Analysis Package enables user-level tasks to monitor +# and analyze kernel events. ETAP supports three modes of tracing: +# +# 1. General event tracing: ETAP_EVENT_MONITOR +# 2. Monitored lock tracing: ETAP_LOCK_MONITOR +# 3. Cumulative lock tracing: ETAP_LOCK_ACCUMULATE +# +# Each of these trace modes are mutually exclusive. +# +# CONFIGURING ETAP: To enable the trace package, the ETAP switch +# along with *ONE* ETAP trace mode is selected. The selected ETAP +# mode determines the level of instrumentation built into the kernel. +# Mode 1 configures event probes through-out the system. Modes 2 & 3 +# add instumentation to the kernel lock mechanisms. +# +# ETAP (and all its trace modes) is mutually exclusive with the +# MACH_LDEBUG option. It is assumed that general lock debugging is +# completed before gathering event information. +# +# ETAP functionality is normally only enabled for event profiling and +# performance studies. Event tracing should not be enabled for release +# configurations, as the code size and performance impact of these +# options are significant. +# +# +#options ETAP # ETAP enable +#options ETAP_EVENT_MONITOR # Monitor events +#options ETAP_LOCK_MONITOR # Monitor lock behavior +#options ETAP_LOCK_ACCUMULATE # Collect cumulative lock data + +########################################################## +# +# This defines configuration options that are normally used only during +# kernel code development and performance characterization. They add run-time +# statistics gathering, which will slow down the system, +# +########################################################## +# +# MACH_PROF enables the profiling server, a message interface used to +# retrieve profiling statistics. +# +#options MACH_PROF # # +# +# MACH_IPC_STATS controls the collection of statistics in the MACH IPC +# subsystem. +# +#options MACH_IPC_STATS +# +# MACH_CO_INFO controls the collection of callout statistics. This +# information is retrieved via a mach_debug message, or though +# /dev/kmem. The runtime impact of the option is minimal. +# +#options MACH_CO_INFO +# +# MACH_CLUSTER_STATS controls the collection of various statistics concerning +# the effectiveness and behavior of the clustered pageout and pagein +# code. +# +#options MACH_CLUSTER_STATS +# +# MACH_SCTIMES enables optional code that can be used to measure the +# execution overhead of performing Mach traps with 1 through 6 +# arguments. +# +#options MACH_SCTIMES +# +# MACH_COUNTERS enables various code-path statistics. Most of these +# are accessed through the debugger. +# +options MACH_COUNTERS # # + diff --git a/osfmk/conf/MASTER.i386 b/osfmk/conf/MASTER.i386 new file mode 100644 index 000000000..f71300ff9 --- /dev/null +++ b/osfmk/conf/MASTER.i386 @@ -0,0 +1,103 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +###################################################################### +# +# Master Apple MacOS X configuration file +# (see the master machine independent +# configuration file for a description of the file format). +# +###################################################################### +# HISTORY +# 5-Jun-1998 Umesh Vaishampayan (umeshv@apple.com) +# Deleted obsolete history. Created tagets like RELEASEX, PROFILEX, +# DEBUGX for MacOS X. These in short term will facilitate development +# concurrent to MacOSX. +###################################################################### +# +# NeXT (PSEUDO-)DEVICES (select any combination) +# ex = Excelan EXOS 202 Ethernet interface +# ip = Interphase V/SMD 3200 disk controller +# od = Canon OMD-1 Optical Disk +# rd = RAM disk +# sd = SCSI disk +# sg = Generic SCSI Device +# st = SCSI tape +# fd = Floppy Disk +# en = Integrated Ethernet controller +# dsp = DSP560001 digital signal processor +# iplmeas = ipl time measurement +# nextp = NeXT Laser Printer +# sound = sound I/O +# vol = removable volume support device +# venip = virtual Ethernet/IP network interface +# zs = Serial device +# +# MULTIPROCESSOR SUPPORT (select exactly one) +# multi = support 4 processors +# uni = supports single processor +# +# SPECIAL CHARACTERISTICS (select any combination) +# gdb = GNU kernel debugger +# posix_kern = POSIX support +# +# CPU TYPE (select exactly one) +# NeXT = FIXME +# +###################################################################### +# +# Standard Apple MacOS X Configurations: +# -------- ---- -------- --------------- +# +# MOSTX = [intel pc mach medium event vol pst gdb kernobjc fixpri simple_clock mdebug kernserv driverkit uxpr kernstack ipc_compat ipc_debug nfsclient nfsserver quota fifo fdesc union portal ffs cd9660 compat_43 revfs] +# RELEASEX = [MOSTX libdriver] +# PROFILEX = [RELEASE profile] +# DEBUGX = [MOSTX libdriver_g debug xpr_debug uxpr diagnostic] +# DEBUG-PROFILEX = [DEBUGX profile] +# osfmk = [intel pc mach small event vol pst gdb kernobjc fixpri simple_clock mkernserv driverkit uxpr kernstack ipc_compat ipc_debug nfsclient nfsserver quota fifo fdesc union portal ffs cd9660 compat_43 revfs mk30 mk30_i386] +# RELEASE = [intel pc iokit mach_pe mach mach_kdp small event vol hd pst gdb kernobjc fixpri simple_clock mkernserv driverkit uxpr kernstack ipc_compat ipc_debug nfsclient nfsserver quota fifo fdesc union portal ffs fb cd9660 compat_43 revfs mk30 mk30_i386] +# DEBUG_KDP = [intel pc iokit mach_pe mach mach_kdp small event vol hd pst gdb kernobjc fixpri simple_clock mkernserv driverkit uxpr kernstack ipc_compat ipc_debug nfsclient nfsserver quota fifo fdesc union portal ffs fb cd9660 compat_43 revfs mk30 mk30_i386 osf_debug debug] +# DEBUG= [intel pc iokit mach_pe mach mach_kdp small event vol hd pst gdb kernobjc fixpri simple_clock mkernserv driverkit uxpr kernstack ipc_compat ipc_debug nfsclient nfsserver quota fifo fdesc union portal ffs fb cd9660 compat_43 revfs mk30 mk30_i386 osf_debug debug] +# RELEASEX = [MOSTX libdriver] +# +###################################################################### +# +machine "i386" # +cpu "i386" # + +pseudo-device cpus 2 + +# choices for platform_bus are pci at386 sqt and kkt +makeoptions OSFMK_MACHINE = "i386" # +makeoptions CCONFIGFLAGS = "-g -O -fno-omit-frame-pointer" # +makeoptions CCONFIGFLAGS = "-O3" # +makeoptions RELOC = "00100000" # +makeoptions SYMADDR = "00780000" # + +options GDB # GNU kernel debugger # +options DEBUG # general debugging code # +options SHOW_SPACE # print size of structures # +options EVENTMETER # event meter support # +options FP_EMUL # floating point emulation # +options PC_SUPPORT # virtual PC support # +options UXPR # user-level XPR package # +options STAT_TIME # time stats +config mach_kernel swap generic # + +options EVENT # + +options MACH_BSD +options IOKIT # # +options MACH_PE # # + +#options DDB # Inline debugger # +options MACH_KDP # KDP # + +# SMP +options MP_V1_1 + +# FIXME +pseudo-device com 2 diff --git a/osfmk/conf/MASTER.ppc b/osfmk/conf/MASTER.ppc new file mode 100644 index 000000000..b2f060952 --- /dev/null +++ b/osfmk/conf/MASTER.ppc @@ -0,0 +1,65 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +###################################################################### +# +# Standard Apple MacOS X Configurations: +# -------- ---- -------- --------------- +# +# RELEASE = [ mach_bsd mach_kdp iokit mach_pe ppc mach ] +# RELEASE_TRACE = [ RELEASE kdebug ] +# DEBUG = [ RELEASE mach_kdb debug ] +# DEBUG_TRACE = [ DEBUG kdebug ] +# PROFILE = [ RELEASE ] +# +###################################################################### +# +############################################################################## +# +# Statistics and timing options. +# +# STAT_TIME indicates that this machine uses a statistical timer for gathering +# usage statistics, and has no higher resolution timer to measure actual +# intervals. +# +options STAT_TIME +#options MACH_MACHINE_ROUTINES + +# +# MACH_PROF enables code for mach profiling. +# +options MACH_PROF # # +############################################################################## +# +# Debug +# +options DEBUG # # + +machine "ppc" +cpu "ppc" +pseudo-device cpus 2 +pseudo-device scc 1 +pseudo-device vc 1 + + + +# Disabled by default, since mklinux does not need this +# unless running multiserver - the atalk stack at time of +# writing inserts a null filter! +#options NET_FILTER_COMPILER + +# Turn on the serial console by uncommenting the this: +#options SERIAL_CONSOLE_DEFAULT + +options MACH_KDP # # +options MACH_KDB # # +options MACH_BSD # # +options IOKIT # # +options MACH_PE # # + +# XXX for bringup, turns on mac disklabels, +# and some other nice stuff for the diskshim +options POWERMAC diff --git a/osfmk/conf/Makefile b/osfmk/conf/Makefile new file mode 100644 index 000000000..c7a6db857 --- /dev/null +++ b/osfmk/conf/Makefile @@ -0,0 +1,68 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + tools + +COMP_SUBDIRS = + +INST_SUBDIRS = + +ifndef OSFMK_KERNEL_CONFIG +export OSFMK_KERNEL_CONFIG = $(KERNEL_CONFIG) +endif + +COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: + make build_setup + +$(COMPOBJROOT)/$(OSFMK_KERNEL_CONFIG)/Makefile: $(SOURCE)/MASTER \ + $(SOURCE)/MASTER.$(arch_config) \ + $(SOURCE)/Makefile.template \ + $(SOURCE)/Makefile.$(arch_config) \ + $(SOURCE)/files \ + $(SOURCE)/files.$(arch_config) \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf + @echo "Running doconf for $(OSFMK_KERNEL_CONFIG)"; + (doconf_target=$(addsuffix /conf, $(TARGET)); \ + echo $${doconf_target};\ + $(MKDIR) $${doconf_target}; \ + cd $${doconf_target}; \ + rm -f $(notdir $?); \ + cp $? $${doconf_target}; \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(arch_config) -d $(TARGET)/$(OSFMK_KERNEL_CONFIG) $(OSFMK_KERNEL_CONFIG); \ + ); + +$(COMPOBJROOT)/$(OSFMK_KERNEL_CONFIG)/platforms.h: $(COMPOBJROOT)/$(OSFMK_KERNEL_CONFIG)/Makefile + (cd $(COMPOBJROOT)/$(OSFMK_KERNEL_CONFIG); \ + ${RM} $@; \ + ${LN} cputypes.h $@; \ + ) + +do_setup_conf: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf \ + $(COMPOBJROOT)/$(OSFMK_KERNEL_CONFIG)/Makefile \ + $(COMPOBJROOT)/$(OSFMK_KERNEL_CONFIG)/platforms.h + +do_all: do_setup_conf + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(OSFMK_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + (cd $(COMPOBJROOT)/$(OSFMK_KERNEL_CONFIG); \ + next_source=$(subst conf/,,$(SOURCE)); \ + ${MAKE} MAKEFILES=$(TARGET)/$(OSFMK_KERNEL_CONFIG)/Makefile \ + SOURCE=$${next_source} \ + TARGET=$(TARGET) \ + build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(OSFMK_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_all: do_all + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/osfmk/conf/Makefile.i386 b/osfmk/conf/Makefile.i386 new file mode 100644 index 000000000..763f8a3e8 --- /dev/null +++ b/osfmk/conf/Makefile.i386 @@ -0,0 +1,10 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for i386 +###################################################################### + +CFLAGS+= -DAT386=1 +SFLAGS+= -DAT386=1 + +###################################################################### +#END Machine dependent Makefile fragment for i386 +###################################################################### diff --git a/osfmk/conf/Makefile.ppc b/osfmk/conf/Makefile.ppc new file mode 100644 index 000000000..ac2face46 --- /dev/null +++ b/osfmk/conf/Makefile.ppc @@ -0,0 +1,23 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for ppc +###################################################################### + +# +# KDB support +# + +makedis: $(SRCROOT)/osfmk/ddb/makedis.c + $(CC) -o $@ $< + +ppc_disasm.o_CFLAGS = -Dperror=db_printf -Dexit=db_error -Dmalloc=db_disasm_malloc + +ppc_disasm : $(SRCROOT)/osfmk/ppc/ppc_disasm.i makedis + ./makedis -w -h ./ppc_disasm.h $(SOURCE_DIR)/osfmk/ppc/ppc_disasm.i > ./ppc_disasm.c + +ppc_disasm.c ppc_disasm.h : ppc_disasm + +db_disasm.o : ppc_disasm.h + +###################################################################### +#END Machine dependent Makefile fragment for ppc +###################################################################### diff --git a/osfmk/conf/Makefile.template b/osfmk/conf/Makefile.template new file mode 100644 index 000000000..2d2c9abd0 --- /dev/null +++ b/osfmk/conf/Makefile.template @@ -0,0 +1,127 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# Export IDENT for sub-makefiles +# +export IDENT + +# +# XXX: INCFLAGS to include libsa prototypes +# +INCFLAGS_MAKEFILE= -I$(SOURCE)libsa + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +# +# XXX: CFLAGS +# +CFLAGS+= -DMACH_KERNEL_PRIVATE + +# +# Directories for mig generated files +# +COMP_SUBDIRS = \ + default_pager \ + device \ + mach_debug \ + mach \ + UserNotification + +COMP_SUBDIRS_I386 = \ + mach + +# +# Make sure we don't remove this by accident if interrupted at the wrong +# time. +# +.PRECIOUS: Makefile + +VERSION_FILES= \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.major \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.minor \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.variant + +COPYRIGHT_FILES = \ + $(SOURCE_DIR)/$(COMPONENT)/conf/copyright.osf \ + $(SOURCE_DIR)/$(COMPONENT)/conf/copyright.cmu + +# +# Theses macros are filled in by the config program depending on the +# current configuration. The MACHDEP macro is replaced by the +# contents of the machine dependent makefile template and the others +# are replaced by the corresponding symbol definitions for the +# configuration. +# + +%OBJS + +%CFILES + +%MFILES + +%SFILES + +%BFILES + +%ORDERED +%MACHDEP + +# +# OBJSDEPS is the set of files (defined in the machine dependent +# template if necessary) which all objects depend on (such as an +# in-line assembler expansion filter) +# +${OBJS}: ${OBJSDEPS} + + +%LOAD + +LDOBJS = $(OBJS) + +$(COMPONENT).o: $(LDOBJS) assym.s + @echo "[ creating $(COMPONENT).o ]" + $(RM) $(RMFLAGS) vers.c + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} + ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c + @echo [ updating $(COMPONENT).o ${OSFMK_KERNEL_CONFIG} ] + $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT).o ${LDOBJS} vers.o + +do_all: $(COMPONENT).o + +do_depend: do_all + ${MD} -u Makedep -f -d `ls *.d`; + +do_build_all: do_depend + +# genassym.o actually is an assembly file, +# we name it genassym.o to help with the automatic +# dependency generation + +genassym.o: $(SOURCE_DIR)/$(COMPONENT)/$(arch_config)/genassym.c + ${KCC} ${CFLAGS} -MD ${_HOST_EXTRA_CFLAGS} -S -o ${@} -c ${INCFLAGS} $< + +assym.s: genassym.o + sed -e '/#DEFINITION#/!d' -e 's/^.*#DEFINITION#//' -e 's/\$$//' genassym.o > ${@} + +${SOBJS}: assym.s + + +%RULES + +-include Makedep + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/osfmk/conf/copyright b/osfmk/conf/copyright new file mode 100644 index 000000000..b585cacab --- /dev/null +++ b/osfmk/conf/copyright @@ -0,0 +1,3 @@ +/* + * Copyright (c) 1988-1995,1997-1999 Apple Computer, Inc. All Rights Reserved. + */ diff --git a/osfmk/conf/copyright.cmu b/osfmk/conf/copyright.cmu new file mode 100644 index 000000000..0262cc90f --- /dev/null +++ b/osfmk/conf/copyright.cmu @@ -0,0 +1,25 @@ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ diff --git a/osfmk/conf/copyright.osf b/osfmk/conf/copyright.osf new file mode 100644 index 000000000..5aeff85c9 --- /dev/null +++ b/osfmk/conf/copyright.osf @@ -0,0 +1,49 @@ +# +# Copyright (C) 1991, 1992, 1993, 1994, 1995 +# Open Software Foundation Research Institute +# +# This software developed by the OSF Research Institute ("OSF/RI"). +# This software, both binary and source (hereafter, Software) is +# copyrighted by Open Software Foundation Research Institute +# and ownership remains with the OSF/RI. +# +# The OSF/RI grants you (hereafter, Licensee) a license to use the +# Software for academic, research and internal business purposes only, +# without a fee. Licensee may distribute the binary and source code (if +# released) to third parties provided that the copyright notice and this +# statement appears on all copies, no charge is associated with such +# copies, and the OSF/RI is publicly and prominently acknowledged as the +# source of this software. +# +# Licensee may make derivative works. However, if Licensee +# distributes any derivative work based on or derived from the +# Software, then Licensee will (1) notify the OSF/RI regarding its +# distribution of the derivative work, (2) clearly notify users +# that such derivative work is a modified version and not the +# original software distributed by the OSF/RI, and (3) the OSF/RI is +# publicly and prominently acknowledged as the source of this software. +# +# Any Licensee wishing to make commercial use of the Software should +# contact the OSF/RI to negotiate an appropriate +# license for such commercial use. Commercial use includes (1) +# integration of all or part of the source code into a product for +# sale or license by or on behalf of Licensee to third parties, or +# (2) distribution of the binary code or source code to third parties +# that need it to utilize a commercial product sold or licensed by or +# on behalf of Licensee. +# +# THE OSF/RI MAKES NO REPRESENTATIONS ABOUT THE SERVICEABILITY OF +# THIS SOFTWARE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT +# EXPRESS OR IMPLIED WARRANTY. THE OSF/RI SHALL NOT BE LIABLE FOR ANY +# DAMAGES SUFFERED BY THE USERS OF THIS SOFTWARE. +# +# By using or copying this Software, Licensee agrees to abide by the +# copyright law and all other applicable laws of the U.S. including, but +# not limited to, export control laws, and the terms of this license. +# The OSF/RI shall have the right to terminate this license immediately by +# written notice upon Licensee's breach of, or non-compliance with, any +# of its terms. Licensee may be held legally responsible for any +# copyright infringement that is caused or encouraged by Licensee's +# failure to abide by the terms of this license. +# +# Comments and questions are welcome and can be sent to ri-software@osf.org diff --git a/osfmk/conf/files b/osfmk/conf/files new file mode 100644 index 000000000..8fedad362 --- /dev/null +++ b/osfmk/conf/files @@ -0,0 +1,260 @@ +# +# @OSF_COPYRIGHT@ +# +# Mach Operating System +# Copyright (c) 1991,1990,1989 Carnegie Mellon University +# All Rights Reserved. +# +# Permission to use, copy, modify and distribute this software and its +# documentation is hereby granted, provided that both the copyright +# notice and this permission notice appear in all copies of the +# software, derivative works or modified versions, and any portions +# thereof, and that both notices appear in supporting documentation. +# +# CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" +# CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR +# ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. +# +# Carnegie Mellon requests users of this software to return to +# +# Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU +# School of Computer Science +# Carnegie Mellon University +# Pittsburgh PA 15213-3890 +# +# any improvements or extensions that they make and grant Carnegie Mellon +# the rights to redistribute these changes. +# + +# +# N.B. "kern/lock.c" is listed as "optional cpus" so that config will +# create a "cpus.h" file. +# + +OPTIONS/dli optional dli +OPTIONS/etap optional etap +OPTIONS/etap_lock_accumulate optional etap_lock_accumulate +OPTIONS/etap_lock_monitor optional etap_lock_monitor +OPTIONS/etap_event_monitor optional etap_event_monitor +OPTIONS/fast_idle optional fast_idle +OPTIONS/kdebug optional kdebug +OPTIONS/mach_assert optional mach_assert +OPTIONS/mach_debug optional mach_debug +OPTIONS/mach_machine_routines.h optional mach_machine_routines +# +OPTIONS/norma_vm optional norma_vm +OPTIONS/norma_task optional norma_task +OPTIONS/norma_ether optional norma_ether +OPTIONS/norma_scsi optional norma_scsi +OPTIONS/mach_cluster_stats optional mach_cluster_stats +OPTIONS/mach_counters optional mach_counters +OPTIONS/mach_host optional mach_host +OPTIONS/mach_ipc_debug optional mach_ipc_debug +OPTIONS/mach_ipc_test optional mach_ipc_test +OPTIONS/mach_kdb optional mach_kdb +OPTIONS/mach_kgdb optional mach_kgdb +OPTIONS/mach_kdp optional mach_kdp +OPTIONS/mach_kprof optional mach_kprof +OPTIONS/mach_ldebug optional mach_ldebug +OPTIONS/mach_lock_mon optional mach_lock_mon +OPTIONS/mach_mp_debug optional mach_mp_debug +OPTIONS/mach_pagemap optional mach_pagemap +OPTIONS/mach_prof optional mach_prof +OPTIONS/mach_rt optional mach_rt +OPTIONS/advisory_pageout optional advisory_pageout +# +# MACH_RT is real-time. MACH_TR is debugging. +# Unfortunate choice of letters. +# +OPTIONS/mach_tr optional mach_tr +OPTIONS/mach_vm_debug optional mach_vm_debug +OPTIONS/mach_page_hash_stats optional mach_page_hash_stats +OPTIONS/mig_debug optional mig_debug +OPTIONS/hw_footprint optional hw_footprint +OPTIONS/simple_clock optional simple_clock +OPTIONS/stat_time optional stat_time +OPTIONS/time_stamp optional time_stamp +OPTIONS/xpr_debug optional xpr_debug +OPTIONS/bootstrap_symbols optional bootstrap_symbols +OPTIONS/fast_tas optional fast_tas +OPTIONS/power_save optional power_save +OPTIONS/zone_debug optional zone_debug +OPTIONS/vm_cpm optional vm_cpm +OPTIONS/task_swapper optional task_swapper +OPTIONS/thread_swapper optional thread_swapper +OPTIONS/stack_usage optional stack_usage + +# Default pager and system pager files, to be moved to separate component + +osfmk/default_pager/default_pager.c standard +osfmk/default_pager/dp_backing_store.c standard +osfmk/default_pager/dp_memory_object.c standard +./default_pager/default_pager_alerts_user.c standard +./default_pager/default_pager_object_server.c standard + +# +# UserNotification files +# +./UserNotification/UNDRequest.c standard +./UserNotification/UNDReplyServer.c standard +osfmk/UserNotification/KUNCUserNotifications.c standard + +osfmk/ddb/db_access.c optional mach_kdb +osfmk/ddb/db_break.c optional mach_kdb +osfmk/ddb/db_command.c optional mach_kdb +osfmk/ddb/db_cond.c optional mach_kdb +osfmk/ddb/db_examine.c optional mach_kdb +osfmk/ddb/db_expr.c optional mach_kdb +osfmk/ddb/db_ext_symtab.c standard +osfmk/ddb/db_input.c optional mach_kdb +osfmk/ddb/db_lex.c optional mach_kdb +osfmk/ddb/db_macro.c optional mach_kdb +osfmk/ddb/db_output.c optional mach_kdb +osfmk/ddb/db_print.c optional mach_kdb +osfmk/ddb/db_run.c optional mach_kdb +osfmk/ddb/db_sym.c optional mach_kdb +osfmk/ddb/db_task_thread.c optional mach_kdb +osfmk/ddb/db_trap.c optional mach_kdb +osfmk/ddb/db_variables.c optional mach_kdb +osfmk/ddb/db_watch.c optional mach_kdb +osfmk/ddb/db_write_cmd.c optional mach_kdb + +osfmk/ddb/tr.c optional mach_tr +osfmk/kdp/kdp.c optional mach_kdp +osfmk/kdp/kdp_udp.c optional mach_kdp +osfmk/ipc/ipc_entry.c standard +osfmk/ipc/ipc_hash.c standard +osfmk/ipc/ipc_init.c standard +osfmk/ipc/ipc_kmsg.c standard +osfmk/ipc/ipc_mqueue.c standard +osfmk/ipc/ipc_notify.c standard +osfmk/ipc/ipc_object.c standard +osfmk/ipc/ipc_port.c standard +osfmk/ipc/ipc_pset.c standard +osfmk/ipc/ipc_right.c standard +osfmk/ipc/ipc_space.c standard +osfmk/ipc/ipc_splay.c standard +osfmk/ipc/ipc_table.c standard +osfmk/ipc/mach_debug.c standard +osfmk/ipc/mach_msg.c standard +osfmk/ipc/mach_port.c standard +osfmk/ipc/mig_log.c optional mig_debug +osfmk/kern/ast.c standard +osfmk/kern/clock.c standard +osfmk/kern/counters.c standard +osfmk/kern/cpu_data.c standard +osfmk/kern/debug.c standard +osfmk/kern/exception.c standard +osfmk/kern/etap.c standard +osfmk/kern/etap_pool.c optional etap +osfmk/kern/host.c standard +osfmk/kern/ipc_clock.c standard +osfmk/kern/ipc_host.c standard +osfmk/kern/ipc_kobject.c standard +osfmk/kern/ipc_mig.c standard +osfmk/kern/ipc_subsystem.c standard +osfmk/kern/ipc_sync.c standard +osfmk/kern/ipc_tt.c standard +osfmk/kern/kalloc.c standard +osfmk/kern/ledger.c standard +osfmk/kern/lock.c optional cpus +osfmk/kern/lock_mon.c optional mach_lock_mon +osfmk/kern/mach_clock.c standard +osfmk/kern/mach_factor.c standard +osfmk/kern/machine.c standard +osfmk/kern/mk_sp.c standard +osfmk/kern/mk_timer.c standard +osfmk/kern/profile.c standard +osfmk/kern/printf.c standard +osfmk/kern/priority.c standard +osfmk/kern/processor.c standard +osfmk/kern/queue.c standard +osfmk/kern/sched_prim.c standard +osfmk/kern/sf.c standard +osfmk/kern/spl.c standard +osfmk/kern/sscanf.c standard +osfmk/kern/startup.c standard +osfmk/kern/subsystem.c standard +osfmk/kern/sync_lock.c standard +osfmk/kern/sync_sema.c standard +osfmk/kern/syscall_emulation.c standard +osfmk/kern/syscall_subr.c standard +osfmk/kern/syscall_sw.c standard +osfmk/kern/task.c standard +osfmk/kern/task_policy.c standard +osfmk/kern/task_swap.c standard +osfmk/kern/thread.c standard +osfmk/kern/thread_act.c standard +osfmk/kern/thread_call.c standard +osfmk/kern/thread_policy.c standard +osfmk/kern/thread_pool.c standard +osfmk/kern/thread_swap.c standard +osfmk/kern/timer.c standard +osfmk/kern/timer_call.c standard +osfmk/kern/wait_queue.c standard +osfmk/kern/xpr.c optional xpr_debug +osfmk/kern/zalloc.c standard +osfmk/kern/bsd_kern.c optional mach_bsd +./mach/clock_server.c standard +./mach/clock_priv_server.c standard +./mach/clock_reply_user.c standard +./mach/exc_user.c standard +./mach/exc_server.c optional mach_bsd +./mach/host_priv_server.c standard +./mach/host_security_server.c standard +./mach/ledger_server.c standard +./mach/lock_set_server.c standard +./mach/mach_host_server.c standard +./mach/mach_port_server.c standard +./mach/memory_object_user.c standard +./mach/memory_object_server.c standard +./mach/memory_object_default_user.c standard +./mach/memory_object_control_server.c standard +./mach/memory_object_default_server.c standard +./mach/memory_object_name_server.c standard +./mach/upl_server.c standard +# +# JMM- right now we don't use the MIG-generated client interface +# for notifications, instead we hand create them. We need to switch +# when we can (i.e. when we can get the send-always behavior covered +# even with MIG-generated code). +# ./mach/notify_user.c standard +# +./mach/processor_server.c standard +./mach/processor_set_server.c standard +./mach/prof_user.c optional mach_prof +./mach/semaphore_server.c standard +./mach/task_server.c standard +./mach/thread_act_server.c standard +./mach/vm_map_server.c standard +osfmk/mach-o/mach_header.c standard +osfmk/vm/memory_object.c standard +osfmk/vm/vm_debug.c standard +osfmk/vm/vm_external.c optional mach_pagemap +osfmk/vm/vm_fault.c standard +osfmk/vm/vm_init.c standard +osfmk/vm/vm_kern.c standard +osfmk/vm/vm_map.c standard +osfmk/vm/vm_object.c standard +osfmk/vm/vm_pageout.c standard +osfmk/vm/vm_resident.c standard +osfmk/vm/vm_shared_memory_server.c standard +osfmk/vm/vm_user.c standard +osfmk/vm/bsd_vm.c optional mach_bsd + +# +# IOKit files, for a while +# +osfmk/device/iokit_rpc.c optional iokit +./device/device_server.c optional iokit +osfmk/device/device_init.c optional iokit + +# kernel module loader interface +osfmk/kern/kmod.c standard + +# +# This file should go away when we get +# libsa (it now only contains string +# funcs). +# +osfmk/device/subrs.c standard diff --git a/osfmk/conf/files.i386 b/osfmk/conf/files.i386 new file mode 100644 index 000000000..fbdac7ab9 --- /dev/null +++ b/osfmk/conf/files.i386 @@ -0,0 +1,96 @@ +OPTIONS/show_space optional show_space +OPTIONS/gdb optional gdb +OPTIONS/iplmeas optional iplmeas +OPTIONS/fb optional fb + + +#machdep/i386/unix_signal.c standard +#machdep/i386/unix_startup.c standard + +OPTIONS/debug optional debug + + +OPTIONS/gprof optional gprof +OPTIONS/db_machine_commands optional db_machine_commands +OPTIONS/dynamic_num_nodes optional dynamic_num_nodes +OPTIONS/vtoc_compat optional vtoc_compat +OPTIONS/fddi optional fddi +OPTIONS/mp_v1_1 optional mp_v1_1 + + + +osfmk/i386/hi_res_clock_map.c optional hi_res_clock + +osfmk/i386/pmap.c standard +osfmk/i386/read_fault.c standard + + +osfmk/ddb/db_aout.c optional mach_kdb + +osfmk/i386/bsd_i386.c optional mach_bsd +osfmk/i386/machdep_call.c optional mach_bsd + +osfmk/i386/_setjmp.s standard +osfmk/i386/ast_check.c optional ipsc386 +osfmk/i386/bcopy.s standard +osfmk/i386/bzero.s standard +osfmk/i386/cpu.c standard +osfmk/i386/cpuid.c standard +osfmk/i386/db_disasm.c optional mach_kdb +osfmk/i386/db_interface.c optional mach_kdb +osfmk/i386/db_trace.c optional mach_kdb +osfmk/i386/fpu.c standard +osfmk/i386/gcc.s standard +osfmk/i386/gdt.c standard +osfmk/i386/hardclock.c standard +osfmk/i386/i386_lock.s standard +osfmk/i386/idt.s standard +osfmk/i386/io_emulate.c standard +osfmk/i386/io_map.c standard +osfmk/i386/iopb.c standard +osfmk/i386/ktss.c standard +osfmk/i386/ldt.c standard +osfmk/i386/loose_ends.c standard +osfmk/i386/locore.s standard +osfmk/i386/start.s standard +osfmk/i386/cswitch.s standard +osfmk/i386/machine_routines.c standard +osfmk/i386/machine_routines_asm.s standard +osfmk/i386/mp_desc.c optional mp_v1_1 +osfmk/i386/ntoh.s standard +osfmk/i386/pcb.c standard +osfmk/i386/phys.c standard +osfmk/i386/rtclock.c standard +osfmk/i386/trap.c standard +osfmk/i386/user_ldt.c standard + +osfmk/i386/AT386/autoconf.c standard +osfmk/i386/AT386/bbclock.c standard +osfmk/i386/AT386/conf.c standard +osfmk/i386/AT386/himem.c optional himem +osfmk/i386/AT386/model_dep.c standard +osfmk/i386/AT386/physmem.c optional physmem device-driver + +osfmk/i386/AT386/mp/mp.c optional mp_v1_1 +osfmk/i386/AT386/mp/mp_v1_1.c optional mp_v1_1 + +osfmk/i386/AT386/video_console.c standard + +osfmk/kern/etap_map.c optional etap device-driver + +osfmk/profiling/i386/profile-md.c optional gprof profiling-routine +osfmk/profiling/i386/profile-asm.s optional gprof profiling-routine +osfmk/profiling/profile-kgmon.c optional gprof profiling-routine +osfmk/profiling/profile-mk.c optional gprof profiling-routine + +osfmk/kdp/ml/i386/kdp_machdep.c optional mach_kdp + +osfmk/kdp/ml/i386/kdp_vm.c optional mach_kdp + +# +# Dummy devices for now +osfmk/OPTIONS/ln optional ln +osfmk/OPTIONS/eisa optional eisa +osfmk/OPTIONS/himem optional himem +osfmk/OPTIONS/ec optional ec +osfmk/OPTIONS/hi_res_clock optional hi_res_clock diff --git a/osfmk/conf/files.ppc b/osfmk/conf/files.ppc new file mode 100644 index 000000000..57f08299e --- /dev/null +++ b/osfmk/conf/files.ppc @@ -0,0 +1,101 @@ +# @OSF_COPYRIGHT@ +# + +OPTIONS/db_machine_commands optional db_machine_commands +OPTIONS/gprof optional gprof +OPTIONS/fpe optional fpe +OPTIONS/fddi optional fddi +OPTIONS/serial_console_default optional serial_console_default +OPTIONS/mp optional mp + +# lowmem_vectors.s must be at head of link line. +# template.mk treats this as a special case and makes sure +# that the file is placed at the front of the line + + +osfmk/ddb/db_aout.c optional mach_kdb +./ppc_disasm.c optional mach_kdb +osfmk/ppc/db_disasm.c optional mach_kdb +osfmk/ppc/db_asm.s optional mach_kdb +osfmk/ppc/db_interface.c optional mach_kdb +osfmk/ppc/db_trace.c optional mach_kdb +osfmk/ppc/db_low_trace.c optional mach_kdb + +osfmk/ppc/lowmem_vectors.s standard +osfmk/ppc/start.s standard +osfmk/ppc/_setjmp.s standard + +osfmk/ppc/cpu.c standard +osfmk/ppc/ppc_init.c standard +osfmk/ppc/ppc_vm_init.c standard +osfmk/ppc/bat_init.c standard +osfmk/ppc/model_dep.c standard +osfmk/ppc/mem.c standard +osfmk/ppc/pmap.c standard +osfmk/ppc/mappings.c standard +osfmk/ppc/savearea.c standard +osfmk/ppc/savearea_asm.s standard +osfmk/ppc/hw_vm.s standard +osfmk/ppc/hw_lock.s standard +osfmk/ppc/misc_asm.s standard +osfmk/ppc/status.c standard +osfmk/ppc/io_map.c standard +osfmk/ppc/trap.c standard +osfmk/ppc/alignment.c standard +osfmk/ppc/pcb.c standard +osfmk/ppc/bits.s standard +osfmk/ppc/stubs.c standard +osfmk/ppc/cswtch.s standard +osfmk/ppc/cache.s standard +osfmk/ppc/movc.s standard +osfmk/ppc/hw_exception.s standard +osfmk/ppc/bzero.s standard +osfmk/ppc/bcopy.s standard +osfmk/ppc/atomic_switch.s standard +osfmk/ppc/PseudoKernel.c standard +osfmk/ppc/misc.c standard +osfmk/ppc/interrupt.c standard +osfmk/ppc/machine_routines.c standard +osfmk/ppc/machine_routines_asm.s standard +#osfmk/ppc/Performance.s standard +osfmk/ppc/Emulate.s standard +osfmk/ppc/AltiAssist.s standard +osfmk/ppc/bsd_ppc.c optional mach_bsd +osfmk/ppc/conf.c standard +osfmk/ppc/rtclock.c standard +osfmk/ppc/Diagnostics.c standard +osfmk/ppc/PPCcalls.c standard +osfmk/ppc/vmachmon.c standard +osfmk/ppc/vmachmon_asm.s standard + +#osfmk/ppc/POWERMAC/ser16550.c standard +osfmk/ppc/POWERMAC/autoconf.c optional xxx +osfmk/ppc/POWERMAC/pci.c optional xxx +osfmk/ppc/POWERMAC/pci_probe.c optional xxx +osfmk/ppc/POWERMAC/pci_compat.c optional xxx + +osfmk/ppc/Firmware.s standard +osfmk/ppc/FirmwareC.c standard +osfmk/ppc/MPinterfaces.s standard +osfmk/ppc/POWERMAC/mp/MP_2p.s standard +osfmk/ppc/POWERMAC/mp/mp.c standard + +osfmk/ppc/aligned_data.s standard + +osfmk/kdp/ml/ppc/kdp_machdep.c optional mach_kdp +osfmk/kdp/ml/ppc/kdp_vm.c optional mach_kdp +osfmk/kdp/ml/ppc/kdp_misc.s optional mach_kdp +#osfmk/kdp/pe/POWERMAC/kdp_mace.c optional mach_kdp + +osfmk/ppc/serial_console.c optional scc device-driver +osfmk/ppc/POWERMAC/serial_io.c optional scc device-driver + +osfmk/ppc/POWERMAC/video_console.c optional vc device-driver +osfmk/ppc/POWERMAC/video_scroll.s optional vc device-driver + +osfmk/ppc/POWERMAC/dbdma.c standard + +# DUMMIES TO FORCE GENERATION OF .h FILES +OPTIONS/hi_res_clock optional hi_res_clock +OPTIONS/bm optional bm +OPTIONS/debug optional debug diff --git a/osfmk/conf/kernelversion.major b/osfmk/conf/kernelversion.major new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/osfmk/conf/kernelversion.major @@ -0,0 +1 @@ +1 diff --git a/osfmk/conf/kernelversion.minor b/osfmk/conf/kernelversion.minor new file mode 100644 index 000000000..00750edc0 --- /dev/null +++ b/osfmk/conf/kernelversion.minor @@ -0,0 +1 @@ +3 diff --git a/osfmk/conf/kernelversion.variant b/osfmk/conf/kernelversion.variant new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/osfmk/conf/kernelversion.variant @@ -0,0 +1 @@ + diff --git a/osfmk/conf/tools/Makefile b/osfmk/conf/tools/Makefile new file mode 100644 index 000000000..fdae6a573 --- /dev/null +++ b/osfmk/conf/tools/Makefile @@ -0,0 +1,38 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + doconf \ + kernel_newvers \ + newvers + +COMP_SUBDIRS = \ + doconf \ + kernel_newvers \ + newvers + +INST_SUBDIRS = \ + + +setup_build_all: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/conf/tools/doconf/Makefile b/osfmk/conf/tools/doconf/Makefile new file mode 100644 index 000000000..2bf0b7a10 --- /dev/null +++ b/osfmk/conf/tools/doconf/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)doconf + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/conf/tools/doconf/doconf.csh b/osfmk/conf/tools/doconf/doconf.csh new file mode 100755 index 000000000..43388c11c --- /dev/null +++ b/osfmk/conf/tools/doconf/doconf.csh @@ -0,0 +1,313 @@ +#!/bin/csh -f +set path = ($path .) +###################################################################### +# HISTORY +# 1-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University +# Added "-verbose" switch, so this script produces no output +# in the normal case. +# +# 10-Oct-87 Mike Accetta (mja) at Carnegie-Mellon University +# Flushed cmu_*.h and spin_locks.h +# [ V5.1(XF18) ] +# +# 6-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# Use MASTER.local and MASTER..local for generation of +# configuration files in addition to MASTER and MASTER.. +# +# 25-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Removed use of obsolete wb_*.h files when building the feature +# list; modified to save the previous configuration file and +# display the differences between it and the new file. +# [ V5.1(F8) ] +# +# 25-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# If there is no /etc/machine just print out a message telling +# user to use the -cpu option. I thought this script was supposed +# to work even without a /etc/machine, but it doesn't... and this +# is the easiest way out. +# +# 13-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Added "romp_fpa.h" file to extra features for the RT. +# [ V5.1(F7) ] +# +# 11-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to maintain the appropriate configuration features file +# in the "machine" directory whenever the corresponding +# configuration is generated. This replaces the old mechanism of +# storing this directly in the file since it was +# machine dependent and also precluded building programs for more +# than one configuration from the same set of sources. +# [ V5.1(F6) ] +# +# 21-Feb-87 Mike Accetta (mja) at Carnegie-Mellon University +# Fixed to require wired-in cpu type names for only those +# machines where the kernel name differs from that provided by +# /etc/machine (i.e. IBMRT => ca and SUN => sun3); updated to +# permit configuration descriptions in both machine indepedent +# and dependent master configuration files so that attributes can +# be grouped accordingly. +# [ V5.1(F3) ] +# +# 17-Jan-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to work from any directory at the same level as +# "conf"; generate configuration from both MASTER and +# MASTER. files; added -cpu switch. +# [ V5.1(F1) ] +# +# 18-Aug-86 Mike Accetta (mja) at Carnegie-Mellon University +# Added -make switch and changed meaning of -config; upgraded to +# allow multiple attributes per configuration and to define +# configurations in terms of these attributes within MASTER. +# +# 14-Apr-83 Mike Accetta (mja) at Carnegie-Mellon University +# Added -config switch to only run /etc/config without +# "make depend" and "make". +# +###################################################################### + +set prog=$0 +set prog=$prog:t +set nonomatch +set OBJDIR=../BUILD +if ("`/usr/bin/uname`" == "Rhapsody" ) then +set CONFIG_DIR=/usr/local/bin +else +set CONFIG_DIR=/usr/bin +endif + +unset domake +unset doconfig +unset beverbose +unset MACHINE +unset profile + +while ($#argv >= 1) + if ("$argv[1]" =~ -*) then + switch ("$argv[1]") + case "-c": + case "-config": + set doconfig + breaksw + case "-m": + case "-make": + set domake + breaksw + case "-cpu": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set MACHINE="$argv[2]" + shift + breaksw + case "-d": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set OBJDIR="$argv[2]" + shift + breaksw + case "-verbose": + set beverbose + breaksw + case "-p": + case "-profile": + set profile + breaksw + default: + echo "${prog}: ${argv[1]}: unknown switch" + exit 1 + breaksw + endsw + shift + else + break + endif +end + +if ($#argv == 0) set argv=(GENERIC) + +if (! $?MACHINE) then + if (-d /NextApps) then + set MACHINE=`hostinfo | awk '/MC680x0/ { printf("m68k") } /MC880x0/ { printf("m88k") }'` + endif +endif + +if (! $?MACHINE) then + if (-f /etc/machine) then + set MACHINE="`/etc/machine`" + else + echo "${prog}: no /etc/machine, specify machine type with -cpu" + echo "${prog}: e.g. ${prog} -cpu VAX CONFIGURATION" + exit 1 + endif +endif + +set FEATURES_EXTRA= + +switch ("$MACHINE") + case IBMRT: + set cpu=ca + set ID=RT + set FEATURES_EXTRA="romp_dualcall.h romp_fpa.h" + breaksw + case SUN: + set cpu=sun3 + set ID=SUN3 + breaksw + default: + set cpu=`echo $MACHINE | tr A-Z a-z` + set ID=`echo $MACHINE | tr a-z A-Z` + breaksw +endsw +set FEATURES=../h/features.h +set FEATURES_H=(cs_*.h mach_*.h net_*.h\ + cputypes.h cpus.h vice.h\ + $FEATURES_EXTRA) +set MASTER_DIR=../conf +set MASTER = ${MASTER_DIR}/MASTER +set MASTER_CPU=${MASTER}.${cpu} + +set MASTER_LOCAL = ${MASTER}.local +set MASTER_CPU_LOCAL = ${MASTER_CPU}.local +if (! -f $MASTER_LOCAL) set MASTER_LOCAL = "" +if (! -f $MASTER_CPU_LOCAL) set MASTER_CPU_LOCAL = "" + +if (! -d $OBJDIR) then + echo "[ creating $OBJDIR ]" + mkdir -p $OBJDIR +endif + +foreach SYS ($argv) + set SYSID=${SYS}_${ID} + set SYSCONF=$OBJDIR/config.$SYSID + set BLDDIR=$OBJDIR + if ($?beverbose) then + echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" + endif + echo +$SYS \ + | \ + cat $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL - \ + $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL \ + | \ + sed -n \ + -e "/^+/{" \ + -e "s;[-+];#&;gp" \ + -e 't loop' \ + -e ': loop' \ + -e 'n' \ + -e '/^#/b loop' \ + -e '/^$/b loop' \ + -e 's;^\([^#]*\).*#[ ]*<\(.*\)>[ ]*$;\2#\1;' \ + -e 't not' \ + -e 's;\([^#]*\).*;#\1;' \ + -e 't not' \ + -e ': not' \ + -e 's;[ ]*$;;' \ + -e 's;^\!\(.*\);\1#\!;' \ + -e 'p' \ + -e 't loop' \ + -e 'b loop' \ + -e '}' \ + -e "/^[^#]/d" \ + -e 's; ; ;g' \ + -e "s;^# *\([^ ]*\)[ ]*=[ ]*\[\(.*\)\].*;\1#\2;p" \ + | \ + awk '-F#' '\ +part == 0 && $1 != "" {\ + m[$1]=m[$1] " " $2;\ + next;\ +}\ +part == 0 && $1 == "" {\ + for (i=NF;i>1;i--){\ + s=substr($i,2);\ + c[++na]=substr($i,1,1);\ + a[na]=s;\ + }\ + while (na > 0){\ + s=a[na];\ + d=c[na--];\ + if (m[s] == "") {\ + f[s]=d;\ + } else {\ + nx=split(m[s],x," ");\ + for (j=nx;j>0;j--) {\ + z=x[j];\ + a[++na]=z;\ + c[na]=d;\ + }\ + }\ + }\ + part=1;\ + next;\ +}\ +part != 0 {\ + if ($1 != "") {\ + n=split($1,x,",");\ + ok=0;\ + for (i=1;i<=n;i++) {\ + if (f[x[i]] == "+") {\ + ok=1;\ + }\ + }\ + if (NF > 2 && ok == 0 || NF <= 2 && ok != 0) {\ + print $2; \ + }\ + } else { \ + print $2; \ + }\ +}\ +' >$SYSCONF.new + if (-z $SYSCONF.new) then + echo "${prog}: ${$SYSID}: no such configuration in $MASTER_DIR/MASTER{,.$cpu}" + rm -f $SYSCONF.new + endif + if (! -d $BLDDIR) then + echo "[ creating $BLDDIR ]" + mkdir -p $BLDDIR + endif +# +# These paths are used by config. +# +# "builddir" is the name of the directory where kernel binaries +# are put. It is a single path element, never absolute, and is +# always relative to "objectdir". "builddir" is used by config +# solely to determine where to put files created by "config" (e.g. +# the created Makefile and *.h's.) +# +# "objectdir" is the name of the directory which will hold "builddir". +# It is a path; if relative, it is relative to the current directory +# where config is run. It's sole use is to be prepended to "builddir" +# to indicate where config-created files are to be placed (see above). +# +# "sourcedir" is the location of the sources used to build the kernel. +# It is a path; if relative, it is relative to the directory specified +# by the concatenation of "objectdir" and "builddir" (i.e. where the +# kernel binaries are put). +# + echo 'builddir "."' >> $SYSCONF.new + set OBJRELDIR=`relpath $OBJROOT $OBJDIR` + echo 'objectdir "'$OBJROOT'/'$OBJRELDIR'"' >> $SYSCONF.new + set SRCDIR=`dirname $SOURCE` + echo 'sourcedir "'$SRCROOT'"' >> $SYSCONF.new + if (-f $SYSCONF) then + diff $SYSCONF $SYSCONF.new + rm -f $SYSCONF.old + mv $SYSCONF $SYSCONF.old + endif + rm -f $SYSCONF + mv $SYSCONF.new $SYSCONF + if ($?doconfig) then + echo "[ configuring $SYSID ]" + if ($?profile) then + $CONFIG_DIR/config -c $MASTER_DIR -p $SYSCONF + else + $CONFIG_DIR/config -c $MASTER_DIR $SYSCONF + endif + endif + if ($?domake) then + echo "[ making $SYSID ]" + (cd $BLDDIR; make) + endif +end diff --git a/osfmk/conf/tools/kernel_newvers/Makefile b/osfmk/conf/tools/kernel_newvers/Makefile new file mode 100644 index 000000000..7c749c5bf --- /dev/null +++ b/osfmk/conf/tools/kernel_newvers/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/) +PROGRAM= $(DSTDIR)kernel_newvers + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/conf/tools/kernel_newvers/kernel_newvers.csh b/osfmk/conf/tools/kernel_newvers/kernel_newvers.csh new file mode 100644 index 000000000..19859a46c --- /dev/null +++ b/osfmk/conf/tools/kernel_newvers/kernel_newvers.csh @@ -0,0 +1,39 @@ +#!/bin/sh - +# +# Mach Operating System +# Copyright (c) 1990 Carnegie-Mellon University +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# kernel_newvers.sh copyright major minor variant +# + +major="$1"; minor="$2"; variant="$3" +version="${major}.${minor}" +if [ -n "$variant" ]; then version="${version}.${variant}"; fi + +objdir="${OBJROOT}/${KERNEL_CONFIG}_${ARCH_CONFIG}" + time=`date` + who=`whoami` + +if [ -z "${objdir}" ] || [ -z "${time}" ]; then exit 1; fi + +CONFIG=`expr "${objdir}" : '.*/\([^/]*\)$'` +objdir=`expr "${objdir}" : '.*/\([^/]*/[^/]*/[^/]*\)$'` +( + /bin/echo "int version_major = ${major};" ; + /bin/echo "int version_minor = ${minor};" ; + /bin/echo "char version_variant[] = \"${variant}\";" ; + /bin/echo "char version[] = \"Darwin Kernel Version ${version}:\\n${time}; ${who}:${objdir}\\n\\n\";" ; + /bin/echo "char osrelease[] = \"${version}\";" ; + /bin/echo "char ostype[] = \"Darwin\";" ; +) > kernel_vers.c + +if [ -s vers.suffix -o ! -f vers.suffix ]; then + rm -f vers.suffix + echo ".${variant}.${CONFIG}" > vers.suffix +fi +exit 0 diff --git a/osfmk/conf/tools/newvers/Makefile b/osfmk/conf/tools/newvers/Makefile new file mode 100644 index 000000000..73603c753 --- /dev/null +++ b/osfmk/conf/tools/newvers/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)newvers + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/conf/tools/newvers/newvers.csh b/osfmk/conf/tools/newvers/newvers.csh new file mode 100644 index 000000000..802c7ed2a --- /dev/null +++ b/osfmk/conf/tools/newvers/newvers.csh @@ -0,0 +1,33 @@ +#!/bin/sh - +# +# Mach Operating System +# Copyright (c) 1990 Carnegie-Mellon University +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# newvers.sh copyright major minor variant +# + +major="$1"; minor="$2"; variant="$3" +v="${major}.${minor}" d=`pwd` h="rcbuilder" t=`date` w=`whoami` +if [ -z "$d" -o -z "$h" -o -z "$t" ]; then + exit 1 +fi +CONFIG=`expr "$d" : '.*/\([^/]*\)$'` +d=`expr "$d" : '.*/\([^/]*/[^/]*/[^/]*\)$'` +( + /bin/echo "int ${COMPONENT}_version_major = ${major};" ; + /bin/echo "int ${COMPONENT}_version_minor = ${minor};" ; + /bin/echo "char ${COMPONENT}_version_variant[] = \"${variant}\";" ; + /bin/echo "char ${COMPONENT}_version[] = \"Mach Component Version ${v}:\\n${t}; $w($h):$d\\n\";" ; + /bin/echo "char ${COMPONENT}_osrelease[] = \"${major}.${minor}\";" ; + /bin/echo "char ${COMPONENT}_ostype[] = \"Mach\";" ; +) > vers.c +if [ -s vers.suffix -o ! -f vers.suffix ]; then + rm -f vers.suffix + echo ".${variant}.${CONFIG}" > vers.suffix +fi +exit 0 diff --git a/osfmk/conf/version.major b/osfmk/conf/version.major new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/osfmk/conf/version.major @@ -0,0 +1 @@ +1 diff --git a/osfmk/conf/version.minor b/osfmk/conf/version.minor new file mode 100644 index 000000000..573541ac9 --- /dev/null +++ b/osfmk/conf/version.minor @@ -0,0 +1 @@ +0 diff --git a/osfmk/conf/version.variant b/osfmk/conf/version.variant new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/osfmk/conf/version.variant @@ -0,0 +1 @@ + diff --git a/osfmk/ddb/Makefile b/osfmk/ddb/Makefile new file mode 100644 index 000000000..266125550 --- /dev/null +++ b/osfmk/ddb/Makefile @@ -0,0 +1,29 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MIG_DEFS = \ + +MIG_HDRS = \ + +DATAFILES = \ + nlist.h stab.h \ + ${MIG_DEFS} + +MIGINCLUDES = \ + +EXPORT_MI_LIST = ${DATAFILES} ${_MIG_HDRS_} ${MIGINCLUDES} + +EXPORT_MI_DIR = ddb + +.ORDER: ${_MIG_HDRS_} ${MIGINCLUDES} + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/ddb/db_access.c b/osfmk/ddb/db_access.c new file mode 100644 index 000000000..75fe96d4d --- /dev/null +++ b/osfmk/ddb/db_access.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +#include +#include /* type definitions */ +#include +#include +#include +#include + + + +/* + * Access unaligned data items on aligned (longword) + * boundaries. + */ + +int db_access_level = DB_ACCESS_LEVEL; + +/* + * This table is for sign-extending things. + * Therefore its entries are signed, and yes + * they are in fact negative numbers. + * So don't put Us in it. Or Ls either. + * Otherwise there is no point having it, n'est pas ? + */ +static int db_extend[sizeof(long)+1] = { /* table for sign-extending */ +#if defined(__arch64__) + 0, + 0xFFFFFFFFFFFFFF80, + 0xFFFFFFFFFFFF8000, + 0xFFFFFFFFFF800000, + 0xFFFFFFFF80000000, + 0xFFFFFF8000000000, + 0xFFFF800000000000, + 0xFF80000000000000, + 0x8000000000000000, +#else /* !defined(__arch64__) */ + 0, + 0xFFFFFF80, + 0xFFFF8000, + 0xFF800000, + 0x80000000 +#endif /* defined(__arch64__) */ +}; + +db_expr_t +db_get_task_value( + db_addr_t addr, + register int size, + boolean_t is_signed, + task_t task) +{ + char data[sizeof(db_expr_t)]; + register db_expr_t value; + register int i; + + db_read_bytes((vm_offset_t)addr, size, data, task); + + value = 0; +#if BYTE_MSF + for (i = 0; i < size; i++) +#else /* BYTE_LSF */ + for (i = size - 1; i >= 0; i--) +#endif + { + value = (value << 8) + (data[i] & 0xFF); + } + + if (size <= sizeof(int)) { + if (is_signed && (value & db_extend[size]) != 0) + value |= db_extend[size]; + } + return (value); +} + +void +db_put_task_value( + db_addr_t addr, + register int size, + register db_expr_t value, + task_t task) +{ + char data[sizeof(db_expr_t)]; + register int i; + +#if BYTE_MSF + for (i = size - 1; i >= 0; i--) +#else /* BYTE_LSF */ + for (i = 0; i < size; i++) +#endif + { + data[i] = value & 0xFF; + value >>= 8; + } + + db_write_bytes((vm_offset_t)addr, size, data, task); +} + +db_expr_t +db_get_value( + db_addr_t addr, + int size, + boolean_t is_signed) +{ + return(db_get_task_value(addr, size, is_signed, TASK_NULL)); +} + +void +db_put_value( + db_addr_t addr, + int size, + db_expr_t value) +{ + db_put_task_value(addr, size, value, TASK_NULL); +} diff --git a/osfmk/ddb/db_access.h b/osfmk/ddb/db_access.h new file mode 100644 index 000000000..89e92c114 --- /dev/null +++ b/osfmk/ddb/db_access.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +/* + * Data access functions for debugger. + */ + +#ifndef _DDB_DB_ACCESS_H_ +#define _DDB_DB_ACCESS_H_ + +#include +#include +#include + +/* implementation dependent access capability */ +#define DB_ACCESS_KERNEL 0 /* only kernel space */ +#define DB_ACCESS_CURRENT 1 /* kernel or current task space */ +#define DB_ACCESS_ANY 2 /* any space */ + +#ifndef DB_ACCESS_LEVEL +#define DB_ACCESS_LEVEL DB_ACCESS_KERNEL +#endif /* DB_ACCESS_LEVEL */ + +#ifndef DB_VALID_KERN_ADDR +#define DB_VALID_KERN_ADDR(addr) ((addr) >= VM_MIN_KERNEL_ADDRESS \ + && (addr) < VM_MAX_KERNEL_ADDRESS) +#define DB_VALID_ADDRESS(addr,user) ((user != 0) ^ DB_VALID_KERN_ADDR(addr)) +#define DB_PHYS_EQ(task1,addr1,task2,addr2) 0 +#define DB_CHECK_ACCESS(addr,size,task) db_is_current_space(task) +#endif /* DB_VALID_KERN_ADDR */ + +extern int db_access_level; + + + +/* Prototypes for functions exported by ddb/db_access.c. + */ +db_expr_t db_get_task_value( + db_addr_t addr, + register int size, + boolean_t is_signed, + task_t task); + +void db_put_task_value( + db_addr_t addr, + register int size, + register db_expr_t value, + task_t task); + +db_expr_t db_get_value( + db_addr_t addr, + int size, + boolean_t is_signed); + +void db_put_value( + db_addr_t addr, + int size, + db_expr_t value); + +#endif /* !_DDB_DB_ACCESS_H_ */ diff --git a/osfmk/ddb/db_aout.c b/osfmk/ddb/db_aout.c new file mode 100644 index 000000000..05d507670 --- /dev/null +++ b/osfmk/ddb/db_aout.c @@ -0,0 +1,955 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +/* + * Symbol table routines for a.out format files. + */ +#include +#include +#include /* data types */ +#include /* For strcpy(), strcmp() */ +#include +#include /* For db_printf() */ +#include + +#ifndef DB_NO_AOUT + +#include /* a.out symbol table */ +#include + +#define private static + +private int aout_db_order_symbols(char *, char *); +private int aout_db_compare_symbols(char *, char *); +private boolean_t aout_db_is_filename(char *); +private boolean_t aout_db_eq_name(struct nlist *, char *, int); + +/* + * An a.out symbol table as loaded into the kernel debugger: + * + * symtab -> size of symbol entries, in bytes + * sp -> first symbol entry + * ... + * ep -> last symbol entry + 1 + * strtab == start of string table + * size of string table in bytes, + * including this word + * -> strings + */ + +/* + * Find pointers to the start and end of the symbol entries, + * given a pointer to the start of the symbol table. + */ +#define db_get_aout_symtab(symtab, sp, ep) \ + (sp = (struct nlist *)(((vm_offset_t *)(symtab)) + 1), \ + ep = (struct nlist *)((char *)sp + *((int *)(symtab)))) + +char *db_sorting_sym_end; + +private int +aout_db_order_symbols( + char *s1, + char *s2) +{ + struct nlist *sym1 = (struct nlist *) s1; + struct nlist *sym2 = (struct nlist *) s2; + + if (sym1->n_value != sym2->n_value) + return (sym1->n_value - sym2->n_value); + else { + return (sym1->n_un.n_name - sym2->n_un.n_name); + } +} + +private int +aout_db_compare_symbols( + char *sym1, + char *sym2) +{ + return (((struct nlist *) sym1)->n_value - + ((struct nlist *) sym2)->n_value); +} + +int db_sorting_limit = 50000; + +boolean_t +aout_db_sym_init( + char * symtab, /* pointer to start of symbol table */ + char * esymtab, /* pointer to end of string table, + for checking - may be rounded up to + integer boundary */ + char * name, + char * task_addr) /* use for this task only */ +{ + struct nlist *sym_start, *sym_end, *dbsym_start, *dbsym_end; + struct nlist *sp; + char *strtab, *dbstrtab; + int strlen; + char *estrtab, *dbestrtab; + unsigned long minsym = ~0; + unsigned long maxsym = 0; + boolean_t sorted; + boolean_t sorting; + extern boolean_t getsymtab(char *, + vm_offset_t *, int *, + vm_offset_t *, vm_size_t *); + int nsyms; + + + if (!getsymtab(symtab, + (vm_offset_t *)&sym_start, &nsyms, + (vm_offset_t *)&strtab, (vm_size_t *)&strlen)) { + return(FALSE); + } + sym_end = sym_start + nsyms; + estrtab = strtab + strlen; + +/* + * We haven't actually started up VM yet, so we can just steal some pages to + * make a working copy of the symbols and strings + */ + + dbsym_start = (struct nlist *)pmap_steal_memory(((unsigned int)sym_end - (unsigned int)sym_start + 4096) & -4096); /* Get space for symbols */ + dbstrtab = (char *)pmap_steal_memory(((unsigned int)estrtab - (unsigned int)strtab + 4096) & -4096); /* Get space for strings */ + + bcopy((char *)sym_start, (char *)dbsym_start, (unsigned int)sym_end - (unsigned int)sym_start); /* Copy symbols */ + bcopy(strtab, dbstrtab, (unsigned int)estrtab - (unsigned int)strtab); /* Copy strings */ + + dbsym_end = dbsym_start + nsyms; + dbestrtab = dbstrtab + strlen; + + sorting = ((dbsym_end - dbsym_start) < db_sorting_limit); + + for (sp = dbsym_start; sp < dbsym_end; sp++) { + register long strx; + strx = sp->n_un.n_strx; + if (strx != 0) { + if (strx > strlen) { + sp->n_un.n_name = 0; + continue; + } + sp->n_un.n_name = dbstrtab + strx; + } + if (sp->n_type != N_ABS) { + if (sp->n_value > 0 && sp->n_value < minsym) + minsym = sp->n_value; + if (sp->n_value > maxsym) + maxsym = sp->n_value; + } + } + + if (maxsym < minsym) + minsym = maxsym = 0; + + if (sorting) { + db_qsort((char *) dbsym_start, dbsym_end - dbsym_start, + sizeof (struct nlist), aout_db_order_symbols); + sorted = TRUE; + } else + sorted = FALSE; + + if (db_add_symbol_table(SYMTAB_AOUT, + (char*)dbsym_start, + (char*)dbsym_end, + name, + 0, + task_addr, + minsym, + maxsym, + sorted)) + { + /* Successfully added symbol table */ + + pmap_protect(kernel_pmap, + (vm_offset_t) dbsym_start, (vm_offset_t) dbsym_end, + VM_PROT_READ|VM_PROT_WRITE); + pmap_protect(kernel_pmap, + (vm_offset_t) dbstrtab, (vm_offset_t) dbestrtab, + VM_PROT_READ|VM_PROT_WRITE); + return TRUE; + } + return FALSE; +} + +/* + * This KLUDGE offsets the n_values of a copied symbol table + */ +void db_clone_offsetXXX(char *, long); +void +db_clone_offsetXXX(char * symtab, long offset) +{ + register struct nlist *sym_start, *sym_end, *sp; + + db_get_aout_symtab((int *)symtab, sym_start, sym_end); + + for (sp = sym_start; sp < sym_end; sp++) + if (sp->n_type != N_ABS) + sp->n_value += offset; +} +/* end KLUDGE */ + +/* + * check file name or not (check xxxx.x pattern) + */ +private boolean_t +aout_db_is_filename(char *name) +{ + while (*name) { + if (*name == '.') { + if (name[1]) + return(TRUE); + } + name++; + } + return(FALSE); +} + +/* + * special name comparison routine with a name in the symbol table entry + */ +private boolean_t +aout_db_eq_name( + struct nlist *sp, + char *name, + int incomplete) +{ + register char *s1, *s2; + + s1 = sp->n_un.n_name; + s2 = name; +#ifndef __NO_UNDERSCORES__ + if (*s1 == '_' && *s2 && *s2 != '_') + s1++; +#endif /* __NO_UNDERSCORES__ */ + while (*s2) { + if (*s1++ != *s2++) { + /* + * check .c .o file name comparison case + */ + if (*s2 == 0 && sp->n_un.n_name <= s1 - 2 + && s1[-2] == '.' && s1[-1] == 'o') + return(TRUE); + return(FALSE); + } + } + if (incomplete) + return(TRUE); + /* + * do special check for + * xxx:yyy for N_FUN + * xxx.ttt for N_DATA and N_BSS + */ + return(*s1 == 0 || (*s1 == ':' && sp->n_type == N_FUN) || + (*s1 == '.' && (sp->n_type == N_DATA || sp->n_type == N_BSS))); +} + +/* + * search a symbol table with name and type + * fp(in,out): last found text file name symbol entry + */ +private struct nlist * +aout_db_search_name( + struct nlist *sp, + struct nlist *ep, + char *name, + int type, + struct nlist **fp, + int incomplete) +{ + struct nlist *file_sp = *fp; + struct nlist *found_sp = 0; + + for ( ; sp < ep; sp++) { + if (sp->n_other) + sp->n_other = 0; + if (sp->n_type == N_TEXT && aout_db_is_filename(sp->n_un.n_name)) + *fp = sp; + if (type) { + if (sp->n_type == type) { + /* dwm_debug: b26 name, mk6 added last param */ + if (aout_db_eq_name(sp, name, 0)) + return(sp); + } + if (sp->n_type == N_SO) + *fp = sp; + continue; + } + if (sp->n_type & N_STAB) + continue; + if (sp->n_un.n_name && aout_db_eq_name(sp, name, incomplete)) { + /* + * In case of qaulified search by a file, + * return it immediately with some check. + * Otherwise, search external one + */ + if (file_sp) { + if ((file_sp == *fp) || (sp->n_type & N_EXT)) + return(sp); + } else if ((sp->n_type & N_EXT) || + (incomplete && !aout_db_is_filename(sp->n_un.n_name))) + return(sp); + else + found_sp = sp; + } + } + return(found_sp); +} + +/* + * Print sorted possible completions for a symbol. + * Use n_other field to mark completion symbols in order + * to speed up sort. + */ +int +aout_db_qualified_print_completion( + db_symtab_t *stab, + char *sym) +{ + struct nlist *sp; + struct nlist *sp1; + struct nlist *ep; + struct nlist *ep1; + struct nlist *fp = 0; + int symlen; + int nsym = 0; + struct nlist *cur; + struct nlist *new; + char *fname; + int func; + int line; + + sp = aout_db_search_name((struct nlist *)stab->start, + (struct nlist *)stab->end, + sym, 0, &fp, 1); + if (sp == (struct nlist *)0) + return 0; + + symlen = strlen(sym); + cur = sp; + while (cur) { + if (strncmp(cur->n_un.n_name, sym, symlen) == 0) + cur->n_other = 1; + else + cur->n_other = 2; + ep = cur; + cur = aout_db_search_name(cur + 1, (struct nlist *)stab->end, + sym, 0, &fp, 1); + } + + sp1 = sp; + for (;;) { + new = cur = sp; + while (++cur <= ep) + if (cur->n_other) { + if (sp1 == sp) + sp1 = cur; + if (strcmp(&cur->n_un.n_name[cur->n_other - 1], + &new->n_un.n_name[new->n_other - 1]) < 0) + new = cur; + else + ep1 = cur; + } + + func = line = 0; + if ((new->n_type & N_EXT) == 0) { + for (cur = new - 1; cur > (struct nlist *)stab->start; cur--) { + if (cur->n_type == N_SO || + (stab->sorted && cur->n_value < new->n_value)) + break; + if (line == 0 && + cur->n_type == N_SLINE && + cur->n_value == new->n_value) + line = cur->n_desc; + if (func == 0 && + cur->n_type == N_FUN && + cur->n_value == new->n_value) + func = 1; + } + + if (cur->n_type == N_SO) + fname = cur->n_un.n_name; + else + fname = (char *)0; + + if (line == 0 || func == 0) + for (cur = new + 1; + cur < (struct nlist *)stab->end; cur++) { + if (cur->n_type == N_SO || + (stab->sorted && cur->n_value > new->n_value)) + break; + if (line == 0 && + cur->n_type == N_SLINE && + cur->n_value == new->n_value) { + line = cur->n_desc; + if (func) + break; + } + if (func == 0 && + cur->n_type == N_FUN && + cur->n_value == new->n_value) { + func = 1; + if (line) + break; + } + } + } else { + fname = (char *)0; + for (cur = new - 1; cur > (struct nlist *)stab->start; cur--) { + if (cur->n_type == N_SO || + (stab->sorted && cur->n_value < new->n_value)) + break; + if (func == 0 && + cur->n_type == N_FUN && + cur->n_value == new->n_value) + func = 1; + } + if (func == 0) + for (cur = new + 1; + cur < (struct nlist *)stab->end; cur++) { + if (cur->n_type == N_SO || + (stab->sorted && cur->n_value > new->n_value)) + break; + if (cur->n_type == N_FUN && + cur->n_value == new->n_value) { + func = 1; + break; + } + } + } + + db_sym_print_completion(stab, &new->n_un.n_name[new->n_other - 1], + func, fname, line); + nsym++; + new->n_other = 0; + + if (new == sp) { + if (sp1 == sp) + break; + sp = sp1; + } else if (new == sp1) + sp1 = sp; + + if (new == ep) + ep = ep1; + } + return nsym; +} + +/* + * search a (possibly incomplete) symbol with file, func and line qualification + */ +private int +aout_db_qualified_search( + db_symtab_t *stab, + char *file, + char *sym, + int line, + db_sym_t *ret, + char **name, + int *len) +{ + register struct nlist *sp = (struct nlist *)stab->start; + struct nlist *ep = (struct nlist *)stab->end; + struct nlist *fp = 0; + struct nlist *found_sp; + unsigned long func_top; + boolean_t in_file; + int nsym = 0; + int i; + char *p; + + if (file == 0 && sym == 0) + return(0); + if (file) { + if ((sp = aout_db_search_name(sp, ep, file, N_TEXT, &fp, 0)) == 0) + return(0); + } + if (sym) { + for (;;) { + sp = aout_db_search_name(sp, ep, sym, (line > 0)? N_FUN: 0, &fp, + (ret == (db_sym_t *)0)); + if (sp == 0) + return(nsym); + if (ret) + break; + + if (strncmp(sp->n_un.n_name, sym, strlen(sym)) == 0) + p = sp->n_un.n_name; + else + p = &sp->n_un.n_name[1]; + + if (*name == (char *)0) { + *name = p; + *len = strlen(p); + } else { + for (i = 0; i < *len; i++) + if ((*name)[i] != p[i]) { + *len = i; + break; + } + } + + nsym++; + sp++; + } + } + if (line > 0) { + if (file && !aout_db_eq_name(fp, file, 0)) + return(0); + found_sp = 0; + if (sp->n_type == N_FUN) { + /* + * qualfied by function name + * search backward because line number entries + * for the function are above it in this case. + */ + func_top = sp->n_value; + if (stab->sorted) { + /* symbols with the same value may have been mixed up */ + do { + sp++; + } while (sp->n_value == func_top); + } + for (sp--; sp >= (struct nlist *)stab->start; sp--) { + if (sp->n_type != N_SLINE) + continue; + if (sp->n_value < func_top) + break; + if (sp->n_desc <= line) { + if (found_sp == 0 || found_sp->n_desc < sp->n_desc) + found_sp = sp; + if (sp->n_desc == line) + break; + } + } + if (sp->n_type != N_SLINE || sp->n_value < func_top) + return(0); + } else { + /* + * qualified by only file name + * search forward in this case + */ + in_file = TRUE; + if (stab->sorted) { + /* symbols with the same value may have been mixed up */ + func_top = sp->n_value; + do { + sp--; + } while (sp->n_value == func_top); + } + for (sp++; sp < ep; sp++) { + if (sp->n_type == N_TEXT + && aout_db_is_filename(sp->n_un.n_name)) + break; /* enter into another file */ + if (sp->n_type == N_SOL) { + in_file = aout_db_eq_name(sp, file, 0); + continue; + } + if (!in_file || sp->n_type != N_SLINE) + continue; + if (sp->n_desc <= line) { + if (found_sp == 0 || found_sp->n_desc < sp->n_desc) + found_sp = sp; + if (sp->n_desc == line) + break; + } + } + } + sp = found_sp; + } + *ret = (db_sym_t) sp; + return(1); +} + +/* + * lookup symbol by name + */ +db_sym_t +aout_db_lookup( + db_symtab_t *stab, + char * symstr) +{ + return(db_sym_parse_and_lookup(aout_db_qualified_search, stab, symstr)); +} + +/* + * lookup (possibly incomplete) symbol by name + */ +int +aout_db_lookup_incomplete( + db_symtab_t *stab, + char * symstr, + char ** name, + int *len, + int *toadd) +{ + return(db_sym_parse_and_lookup_incomplete(aout_db_qualified_search, + stab, symstr, name, len, toadd)); +} + +/* + * Display possible completion for the symbol + */ +int +aout_db_print_completion(stab, symstr) + db_symtab_t *stab; + char * symstr; +{ + + return(db_sym_parse_and_print_completion(aout_db_qualified_print_completion, + stab, symstr)); +} + +db_sym_t +aout_db_search_symbol( + db_symtab_t *symtab, + db_addr_t off, + db_strategy_t strategy, + db_expr_t *diffp) /* in/out */ +{ + register unsigned long diff = *diffp; + register struct nlist *symp = 0; + struct nlist *sp, *ep, *cp; + boolean_t first_pass = FALSE; + + sp = (struct nlist *)symtab->start; + ep = (struct nlist *)symtab->end; + + if (symtab->sorted) { + struct nlist target; + + target.n_value = off; + target.n_un.n_name = (char *) 0; + target.n_other = (char) 0; + db_qsort_limit_search((char *) &target, (char **) &sp, (char **) &ep, + sizeof (struct nlist), aout_db_compare_symbols); + first_pass = TRUE; + } + + try_again: + for (cp = ep-1; cp >= sp; cp--) { + if (cp->n_un.n_name == 0) + continue; + if ((cp->n_type & N_STAB) != 0) + continue; + if (strategy == DB_STGY_XTRN && (cp->n_type & N_EXT) == 0) + continue; + if (off >= cp->n_value) { + if (off - cp->n_value < diff) { + diff = off - cp->n_value; + symp = cp; + if (diff == 0 && (cp->n_type & N_EXT)) + break; + } + else if (off - cp->n_value == diff) { + if (symp == 0) + symp = cp; + else if ((symp->n_type & N_EXT) == 0 && + (cp->n_type & N_EXT) != 0) + symp = cp; /* pick the external symbol */ + } + } + } + if (symp == 0) { + if (first_pass) { + first_pass = FALSE; + sp = (struct nlist *) symtab->start; + goto try_again; + } + *diffp = off; + } + else { + *diffp = diff; + } + return ((db_sym_t)symp); +} + +/* + * Return the name and value for a symbol. + */ +void +aout_db_symbol_values( + db_sym_t sym, + char **namep, + db_expr_t *valuep) +{ + register struct nlist *sp; + + sp = (struct nlist *)sym; + if (namep) + *namep = sp->n_un.n_name; + if (valuep) + *valuep = sp->n_value; +} + +#define X_DB_MAX_DIFF 8 /* maximum allowable diff at the end of line */ +extern int db_search_maxoff; /* maximum acceptable offset */ + +/* + * search symbol by value + */ +db_sym_t +aout_db_search_by_addr( + db_symtab_t *stab, + db_addr_t addr, + char **file, + char **func, + int *line, + db_expr_t *diff, + int *args) +{ + struct nlist *sp, *cp; + register struct nlist *line_sp, *func_sp, *file_sp, *line_func; + unsigned long func_diff, line_diff; + boolean_t found_line = FALSE; + struct nlist *ep = (struct nlist *)stab->end; + boolean_t first_pass = FALSE; + + /* + * 92-May-16 + * Added init of these two... not sure if it's correct, but + * can't be worse than random values.... -- jfriedl@omron.co.jp + */ + func_diff = line_diff = /*HUGE*/0x0fffffff; + + line_sp = func_sp = file_sp = line_func = 0; + *file = *func = 0; + *line = 0; + *args = -1; + + sp = (struct nlist *)stab->start; + if (stab->sorted) { + struct nlist target; + + target.n_value = addr; + target.n_un.n_name = (char *) 0; + target.n_other = (char) 0; + db_qsort_limit_search((char *) &target, (char **) &sp, + (char **) &ep, sizeof (struct nlist), + aout_db_compare_symbols); + first_pass = TRUE; + } + + for (cp = sp; cp < ep; cp++) { + switch(cp->n_type) { + case N_SLINE: + if (cp->n_value <= addr) { + if (line_sp == 0 || line_diff >= addr - cp->n_value) { + if (line_func) + line_func = 0; + line_sp = cp; + line_diff = addr - cp->n_value; + } + } + if (cp->n_value >= addr && line_sp) + found_line = TRUE; + continue; + case N_FUN: + if ((found_line || (line_sp && line_diff < X_DB_MAX_DIFF)) + && line_func == 0) + line_func = cp; + continue; + case N_SO: + if (cp->n_value > addr) + continue; + if (file_sp == 0 || file_sp->n_value <= cp->n_value) + file_sp = cp; + continue; + case N_TEXT: + if (aout_db_is_filename(cp->n_un.n_name)) { + if (cp->n_value > addr) + continue; + if (file_sp == 0 || file_sp->n_value <= cp->n_value) + file_sp = cp; + } else if (cp->n_value <= addr && + (func_sp == 0 || func_diff > addr - cp->n_value)) { + func_sp = cp; + func_diff = addr - cp->n_value; + } + continue; + case N_TEXT|N_EXT: + if (cp->n_value <= addr && + (func_sp == 0 || func_diff >= addr - cp->n_value)) { + func_sp = cp; + func_diff = addr - cp->n_value; + if (func_diff == 0 && file_sp && func_sp && line_sp == 0) + break; + } + default: + if (stab->sorted) { + if ((cp->n_value > addr) && + (cp->n_value - addr > db_search_maxoff)) + break; + } + continue; + } + break; + } + if (first_pass && (!file_sp || !line_sp || !func_sp)) { + first_pass = FALSE; + cp = sp; + sp = (struct nlist *)stab->start; + for (; cp >= sp; cp--) { + switch(cp->n_type) { + case N_SLINE: + if (line_sp) + found_line = TRUE; + continue; + case N_FUN: + if ((found_line || (line_sp && line_diff < X_DB_MAX_DIFF)) + && line_func == 0) + line_func = cp; + continue; + case N_SO: + if (file_sp == 0) + file_sp = cp; + continue; + case N_TEXT: + if (aout_db_is_filename(cp->n_un.n_name)) { + if (file_sp == 0) + file_sp = cp; + } else if (func_sp == 0) { + func_sp = cp; + func_diff = addr - cp->n_value; + } + continue; + case N_TEXT|N_EXT: + if (func_sp == 0) { + func_sp = cp; + func_diff = addr - cp->n_value; + if (func_diff == 0 && file_sp && func_sp + && line_sp == 0) + break; + } + default: + if (line_sp && file_sp && + addr - cp->n_value > db_search_maxoff) + break; + continue; + } + break; + } + } +#if 0 +/* + * XXX - barbou@gr.osf.org + * I don't know if that code is useful to something, but it makes the -gline + * option of gcc useless. + */ + if (line_sp) { + if (line_func == 0 || func_sp == 0 + || line_func->n_value != func_sp->n_value) + line_sp = 0; + } +#else + if (line_sp && !found_line) { + line_sp = 0; + } +#endif + *diff = 0; + if (file_sp) { + *diff = addr - file_sp->n_value; + *file = file_sp->n_un.n_name; + } + if (line_sp) { + *diff = addr - line_sp->n_value; + *line = line_sp->n_desc; + } + if (func_sp) { + *diff = addr - func_sp->n_value; + *func = (func_sp->n_un.n_name[0] == '_')? + func_sp->n_un.n_name + 1: func_sp->n_un.n_name; + if (line_func && (line_func->n_desc & 0x4000)) + *args = line_func->n_desc & 0x3ff; + } + return((db_sym_t) func_sp); +} + +/* + * Find filename and lineno within, given the current pc. + */ +boolean_t +aout_db_line_at_pc( + db_symtab_t *stab, + db_sym_t sym, + char **file, + int *line, + db_expr_t pc) +{ + char *func; + db_expr_t diff; + boolean_t found; + int args; + + found = (aout_db_search_by_addr(stab, (unsigned)pc, file, &func, line, + &diff, &args) + != DB_SYM_NULL); + return(found && func && *file); +} + +/* + * Initialization routine for a.out files. + */ +void +aout_db_init(void) +{ + extern struct mach_header _mh_execute_header; + + aout_db_sym_init((char *) &_mh_execute_header, + (char *)0, "mach", (char *)0); +} + +#endif /* DB_NO_AOUT */ diff --git a/osfmk/ddb/db_aout.h b/osfmk/ddb/db_aout.h new file mode 100644 index 000000000..28b9f4255 --- /dev/null +++ b/osfmk/ddb/db_aout.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Symbol table routines for a.out format files. + */ + +#ifndef _DDB_DB_AOUT_H_ +#define _DDB_DB_AOUT_H_ + +#include /* data types */ +#include /* db_symtab_t */ + +boolean_t aout_db_sym_init( + char * symtab, + char * esymtab, + char * name, + char * task_addr); + +db_sym_t aout_db_lookup( + db_symtab_t *stab, + char * symstr); + +int aout_db_lookup_incomplete( + db_symtab_t *stab, + char * symstr, + char ** name, + int *len, + int *toadd); + +int aout_db_print_completion( + db_symtab_t *stab, + char * symstr); + +db_sym_t aout_db_search_symbol( + db_symtab_t *symtab, + db_addr_t off, + db_strategy_t strategy, + db_expr_t *diffp); /* in/out */ + +void aout_db_symbol_values( + db_sym_t sym, + char **namep, + db_expr_t *valuep); + +db_sym_t aout_db_search_by_addr( + db_symtab_t *stab, + db_addr_t addr, + char **file, + char **func, + int *line, + db_expr_t *diff, + int *args); + +boolean_t aout_db_line_at_pc( + db_symtab_t *stab, + db_sym_t sym, + char **file, + int *line, + db_expr_t pc); + +int aout_db_qualified_print_completion( + db_symtab_t *stab, + char *sym); + +void aout_db_init(void); + +#endif /* !_DDB_DB_AOUT_H_ */ diff --git a/osfmk/ddb/db_break.c b/osfmk/ddb/db_break.c new file mode 100644 index 000000000..4f7df3ec1 --- /dev/null +++ b/osfmk/ddb/db_break.c @@ -0,0 +1,997 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/29 17:35:26 mburg + * MK7.3 merger + * + * Revision 1.2.47.1 1998/02/03 09:23:57 gdt + * Merge up to MK7.3 + * [1998/02/03 09:10:14 gdt] + * + * Revision 1.2.45.1 1997/03/27 18:46:16 barbou + * ri-osc CR1557: re-enable thread-specific breakpoints. + * [1995/09/20 15:23:46 bolinger] + * [97/02/25 barbou] + * + * Revision 1.2.21.6 1996/01/09 19:15:21 devrcs + * Changed declarations of 'register foo' to 'register int foo' + * Fixed printfs which print addresses. + * [1995/12/01 21:41:51 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:02:40 jfraser] + * + * Revision 1.2.21.5 1995/04/07 18:52:54 barbou + * Allow breakpoints on non-resident pages. The breakpoint will + * actually be set when the page is paged in. + * [93/09/23 barbou] + * [95/03/08 barbou] + * + * Revision 1.2.21.4 1995/02/23 21:43:19 alanl + * Merged with DIPC2_SHARED. + * [1995/01/04 20:15:04 alanl] + * + * Revision 1.2.28.1 1994/11/04 09:52:15 dwm + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.4.5 1994/05/06 18:38:52 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Moved struct db_breakpoint from here to db_break.h. + * Merge Alpha changes into osc1.312b source code. + * 64bit cleanup. + * * End1.3merge + * [1994/11/04 08:49:10 dwm] + * + * Revision 1.2.21.2 1994/09/23 01:17:57 ezf + * change marker to not FREE + * [1994/09/22 21:09:19 ezf] + * + * Revision 1.2.21.1 1994/06/11 21:11:24 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:01:06 bolinger] + * + * Revision 1.2.25.2 1994/10/28 18:56:21 rwd + * Delint. + * [94/10/28 rwd] + * + * Revision 1.2.25.1 1994/08/04 01:42:15 mmp + * 23-Jun-94 Stan Smith (stans@ssd.intel.com) + * Let d * delete all breakpoints. + * [1994/06/28 13:54:00 sjs] + * + * Revision 1.2.19.2 1994/04/11 09:34:22 bernadat + * Moved db_breakpoint struct declaration to db_break.h + * [94/03/16 bernadat] + * + * Revision 1.2.19.1 1994/02/08 10:57:22 bernadat + * When setting a breakpoint, force user_space if breakpoint is + * outside kernel_space (like in the case of an emulator). + * [93/09/27 paire] + * + * Changed silly decimal display to hex (to match input conventions). + * Change from NORMA_MK14.6 [93/01/09 sjs] + * [93/07/16 bernadat] + * [94/02/07 bernadat] + * + * Revision 1.2.4.3 1993/07/27 18:26:48 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:10:54 elliston] + * + * Revision 1.2.4.2 1993/06/09 02:19:39 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:55:42 jeffc] + * + * Revision 1.2 1993/04/19 16:01:31 devrcs + * Changes from MK78: + * Removed unused variable from db_delete_cmd(). + * Added declaration for arg 'count' of db_add_thread_breakpoint(). + * [92/05/18 jfriedl] + * Fixed b/tu to b/Tu work if the specified address is valid in the + * target address space but not the current user space. Explicit + * user space breakpoints (b/u, b/Tu, etc) will no longer get + * inserted into the kernel if the specified address is invalid. + * [92/04/18 danner] + * [92/12/18 bruel] + * + * Revision 1.1 1992/09/30 02:00:52 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.11.3.1 92/03/03 16:13:20 jeffreyh + * Pick up changes from TRUNK + * [92/02/26 10:58:37 jeffreyh] + * + * Revision 2.12 92/02/19 16:46:24 elf + * Removed one of the many user-unfriendlinesses. + * [92/02/10 17:48:25 af] + * + * Revision 2.11 91/11/12 11:50:24 rvb + * Fixed db_delete_cmd so that just "d" works in user space. + * [91/10/31 rpd] + * Fixed db_delete_thread_breakpoint for zero task_thd. + * [91/10/30 rpd] + * + * Revision 2.10 91/10/09 15:57:41 af + * Supported thread-oriented break points. + * [91/08/29 tak] + * + * Revision 2.9 91/07/09 23:15:39 danner + * Conditionalized db_map_addr to work right on the luna. Used a + * ifdef luna88k. This is evil, and needs to be fixed. + * [91/07/08 danner] + * + * Revision 2.2 91/04/10 22:54:50 mbj + * Grabbed 3.0 copyright/disclaimer since ddb comes from 3.0. + * [91/04/09 rvb] + * + * Revision 2.7 91/02/05 17:06:00 mrt + * Changed to new Mach copyright + * [91/01/31 16:17:01 mrt] + * + * Revision 2.6 91/01/08 15:09:03 rpd + * Added db_map_equal, db_map_current, db_map_addr. + * [90/11/10 rpd] + * + * Revision 2.5 90/11/05 14:26:32 rpd + * Initialize db_breakpoints_inserted to TRUE. + * [90/11/04 rpd] + * + * Revision 2.4 90/10/25 14:43:33 rwd + * Added map field to breakpoints. + * Added map argument to db_set_breakpoint, db_delete_breakpoint, + * db_find_breakpoint. Added db_find_breakpoint_here. + * [90/10/18 rpd] + * + * Revision 2.3 90/09/28 16:57:07 jsb + * Fixed db_breakpoint_free. + * [90/09/18 rpd] + * + * Revision 2.2 90/08/27 21:49:53 dbg + * Reflected changes in db_printsym()'s calling seq. + * [90/08/20 af] + * Clear breakpoints only if inserted. + * Reduce lint. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +/* + * Breakpoints. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include + + +#define NBREAKPOINTS 100 +#define NTHREAD_LIST (NBREAKPOINTS*3) + +struct db_breakpoint db_break_table[NBREAKPOINTS]; +db_breakpoint_t db_next_free_breakpoint = &db_break_table[0]; +db_breakpoint_t db_free_breakpoints = 0; +db_breakpoint_t db_breakpoint_list = 0; + +static struct db_thread_breakpoint db_thread_break_list[NTHREAD_LIST]; +static db_thread_breakpoint_t db_free_thread_break_list = 0; +static boolean_t db_thread_break_init = FALSE; +static int db_breakpoint_number = 0; + +/* Prototypes for functions local to this file. XXX -- should be static! + */ +static int db_add_thread_breakpoint( + register db_breakpoint_t bkpt, + vm_offset_t task_thd, + int count, + boolean_t task_bpt); + +static int db_delete_thread_breakpoint( + register db_breakpoint_t bkpt, + vm_offset_t task_thd); + +static db_thread_breakpoint_t db_find_thread_breakpoint( + db_breakpoint_t bkpt, + thread_act_t thr_act); + +static void db_force_delete_breakpoint( + db_breakpoint_t bkpt, + vm_offset_t task_thd, + boolean_t is_task); + +db_breakpoint_t db_breakpoint_alloc(void); + +void db_breakpoint_free(register db_breakpoint_t bkpt); + +void db_delete_breakpoint( + task_t task, + db_addr_t addr, + vm_offset_t task_thd); + +void +db_delete_all_breakpoints( + task_t task); + +void db_list_breakpoints(void); + + + +db_breakpoint_t +db_breakpoint_alloc(void) +{ + register db_breakpoint_t bkpt; + + if ((bkpt = db_free_breakpoints) != 0) { + db_free_breakpoints = bkpt->link; + return (bkpt); + } + if (db_next_free_breakpoint == &db_break_table[NBREAKPOINTS]) { + db_printf("All breakpoints used.\n"); + return (0); + } + bkpt = db_next_free_breakpoint; + db_next_free_breakpoint++; + + return (bkpt); +} + +void +db_breakpoint_free(register db_breakpoint_t bkpt) +{ + bkpt->link = db_free_breakpoints; + db_free_breakpoints = bkpt; +} + +static int +db_add_thread_breakpoint( + register db_breakpoint_t bkpt, + vm_offset_t task_thd, + int count, + boolean_t task_bpt) +{ + register db_thread_breakpoint_t tp; + + if (db_thread_break_init == FALSE) { + for (tp = db_thread_break_list; + tp < &db_thread_break_list[NTHREAD_LIST-1]; tp++) + tp->tb_next = tp+1; + tp->tb_next = 0; + db_free_thread_break_list = db_thread_break_list; + db_thread_break_init = TRUE; + } + if (db_free_thread_break_list == 0) + return (-1); + tp = db_free_thread_break_list; + db_free_thread_break_list = tp->tb_next; + tp->tb_is_task = task_bpt; + tp->tb_task_thd = task_thd; + tp->tb_count = count; + tp->tb_init_count = count; + tp->tb_cond = 0; + tp->tb_number = ++db_breakpoint_number; + tp->tb_next = bkpt->threads; + bkpt->threads = tp; + return(0); +} + +static int +db_delete_thread_breakpoint( + register db_breakpoint_t bkpt, + vm_offset_t task_thd) +{ + register db_thread_breakpoint_t tp; + register db_thread_breakpoint_t *tpp; + + if (task_thd == 0) { + /* delete all the thread-breakpoints */ + + for (tpp = &bkpt->threads; (tp = *tpp) != 0; tpp = &tp->tb_next) + db_cond_free(tp); + + *tpp = db_free_thread_break_list; + db_free_thread_break_list = bkpt->threads; + bkpt->threads = 0; + return 0; + } else { + /* delete the specified thread-breakpoint */ + + for (tpp = &bkpt->threads; (tp = *tpp) != 0; tpp = &tp->tb_next) + if (tp->tb_task_thd == task_thd) { + db_cond_free(tp); + *tpp = tp->tb_next; + tp->tb_next = db_free_thread_break_list; + db_free_thread_break_list = tp; + return 0; + } + + return -1; /* not found */ + } +} + +static db_thread_breakpoint_t +db_find_thread_breakpoint( + db_breakpoint_t bkpt, + thread_act_t thr_act) +{ + register db_thread_breakpoint_t tp; + register task_t task = + (thr_act == THR_ACT_NULL || thr_act->kernel_loaded) + ? TASK_NULL : thr_act->task; + + for (tp = bkpt->threads; tp; tp = tp->tb_next) { + if (tp->tb_is_task) { + if (tp->tb_task_thd == (vm_offset_t)task) + break; + continue; + } + if (tp->tb_task_thd == (vm_offset_t)thr_act || tp->tb_task_thd == 0) + break; + } + return(tp); +} + +db_thread_breakpoint_t +db_find_thread_breakpoint_here( + task_t task, + db_addr_t addr) +{ + db_breakpoint_t bkpt; + + bkpt = db_find_breakpoint(task, (db_addr_t)addr); + if (bkpt == 0) + return(0); + return(db_find_thread_breakpoint(bkpt, current_act())); +} + +db_thread_breakpoint_t +db_find_breakpoint_number( + int num, + db_breakpoint_t *bkptp) +{ + register db_thread_breakpoint_t tp; + register db_breakpoint_t bkpt; + + for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) { + for (tp = bkpt->threads; tp; tp = tp->tb_next) { + if (tp->tb_number == num) { + if (bkptp) + *bkptp = bkpt; + return(tp); + } + } + } + return(0); +} + +static void +db_force_delete_breakpoint( + db_breakpoint_t bkpt, + vm_offset_t task_thd, + boolean_t is_task) +{ + db_printf("deleted a stale breakpoint at "); + if (bkpt->task == TASK_NULL || db_lookup_task(bkpt->task) >= 0) + db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task); + else + db_printf("%#X", bkpt->address); + if (bkpt->task) + db_printf(" in task %X", bkpt->task); + if (task_thd) + db_printf(" for %s %X", (is_task)? "task": "thr_act", task_thd); + db_printf("\n"); + db_delete_thread_breakpoint(bkpt, task_thd); +} + +void +db_check_breakpoint_valid(void) +{ + register db_thread_breakpoint_t tbp, tbp_next; + register db_breakpoint_t bkpt, *bkptp; + + bkptp = &db_breakpoint_list; + for (bkpt = *bkptp; bkpt; bkpt = *bkptp) { + if (bkpt->task != TASK_NULL) { + if (db_lookup_task(bkpt->task) < 0) { + db_force_delete_breakpoint(bkpt, 0, FALSE); + *bkptp = bkpt->link; + db_breakpoint_free(bkpt); + continue; + } + } else { + for (tbp = bkpt->threads; tbp; tbp = tbp_next) { + tbp_next = tbp->tb_next; + if (tbp->tb_task_thd == 0) + continue; + if ((tbp->tb_is_task && + db_lookup_task((task_t)(tbp->tb_task_thd)) < 0) || + (!tbp->tb_is_task && + db_lookup_act((thread_act_t)(tbp->tb_task_thd)) < 0)) { + db_force_delete_breakpoint(bkpt, + tbp->tb_task_thd, tbp->tb_is_task); + } + } + if (bkpt->threads == 0) { + db_put_task_value(bkpt->address, BKPT_SIZE, + bkpt->bkpt_inst, bkpt->task); + *bkptp = bkpt->link; + db_breakpoint_free(bkpt); + continue; + } + } + bkptp = &bkpt->link; + } +} + +void +db_set_breakpoint( + task_t task, + db_addr_t addr, + int count, + thread_act_t thr_act, + boolean_t task_bpt) +{ + register db_breakpoint_t bkpt; + db_breakpoint_t alloc_bkpt = 0; + vm_offset_t task_thd; + + bkpt = db_find_breakpoint(task, addr); + if (bkpt) { + if (thr_act == THR_ACT_NULL + || db_find_thread_breakpoint(bkpt, thr_act)) { + db_printf("Already set.\n"); + return; + } + } else { + if (!DB_CHECK_ACCESS(addr, BKPT_SIZE, task)) { + if (task) { + db_printf("Warning: non-resident page for breakpoint at %lX", + addr); + db_printf(" in task %lX.\n", task); + } else { + db_printf("Cannot set breakpoint at %lX in kernel space.\n", + addr); + return; + } + } + alloc_bkpt = bkpt = db_breakpoint_alloc(); + if (bkpt == 0) { + db_printf("Too many breakpoints.\n"); + return; + } + bkpt->task = task; + bkpt->flags = (task && thr_act == THR_ACT_NULL)? + (BKPT_USR_GLOBAL|BKPT_1ST_SET): 0; + bkpt->address = addr; + bkpt->threads = 0; + } + if (db_breakpoint_list == 0) + db_breakpoint_number = 0; + task_thd = (task_bpt) ? (vm_offset_t)(thr_act->task) + : (vm_offset_t)thr_act; + if (db_add_thread_breakpoint(bkpt, task_thd, count, task_bpt) < 0) { + if (alloc_bkpt) + db_breakpoint_free(alloc_bkpt); + db_printf("Too many thread_breakpoints.\n"); + } else { + db_printf("set breakpoint #%x\n", db_breakpoint_number); + if (alloc_bkpt) { + bkpt->link = db_breakpoint_list; + db_breakpoint_list = bkpt; + } + } +} + +void +db_delete_breakpoint( + task_t task, + db_addr_t addr, + vm_offset_t task_thd) +{ + register db_breakpoint_t bkpt; + register db_breakpoint_t *prev; + + for (prev = &db_breakpoint_list; (bkpt = *prev) != 0; + prev = &bkpt->link) { + if ((bkpt->task == task + || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL))) + && bkpt->address == addr) + break; + } + if (bkpt && (bkpt->flags & BKPT_SET_IN_MEM)) { + db_printf("cannot delete it now.\n"); + return; + } + if (bkpt == 0 + || db_delete_thread_breakpoint(bkpt, task_thd) < 0) { + db_printf("Not set.\n"); + return; + } + if (bkpt->threads == 0) { + *prev = bkpt->link; + db_breakpoint_free(bkpt); + } +} + +db_breakpoint_t +db_find_breakpoint( + task_t task, + db_addr_t addr) +{ + register db_breakpoint_t bkpt; + + for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) { + if ((bkpt->task == task + || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL))) + && bkpt->address == addr) + return (bkpt); + } + return (0); +} + +boolean_t +db_find_breakpoint_here( + task_t task, + db_addr_t addr) +{ + register db_breakpoint_t bkpt; + + for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) { + if ((bkpt->task == task + || (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL))) + && bkpt->address == addr) + return(TRUE); + if ((bkpt->flags & BKPT_USR_GLOBAL) == 0 && + DB_PHYS_EQ(task, addr, bkpt->task, bkpt->address)) + return (TRUE); + } + return(FALSE); +} + +boolean_t db_breakpoints_inserted = TRUE; + +void +db_set_breakpoints(void) +{ + register db_breakpoint_t bkpt; + register task_t task; + db_expr_t inst; + thread_act_t cur_act = current_act(); + task_t cur_task = + (cur_act && !cur_act->kernel_loaded) ? + cur_act->task : TASK_NULL; + boolean_t inserted = TRUE; + + if (!db_breakpoints_inserted) { + for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) { + if (bkpt->flags & BKPT_SET_IN_MEM) + continue; + task = bkpt->task; + if (bkpt->flags & BKPT_USR_GLOBAL) { + if ((bkpt->flags & BKPT_1ST_SET) == 0) { + if (cur_task == TASK_NULL) + continue; + task = cur_task; + } else + bkpt->flags &= ~BKPT_1ST_SET; + } + if (DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) { + inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE, + task); + if (inst == BKPT_SET(inst)) + continue; + bkpt->bkpt_inst = inst; + db_put_task_value(bkpt->address, + BKPT_SIZE, + BKPT_SET(bkpt->bkpt_inst), task); + bkpt->flags |= BKPT_SET_IN_MEM; + } else { + inserted = FALSE; + } + } + db_breakpoints_inserted = inserted; + } +} + +void +db_clear_breakpoints(void) +{ + register db_breakpoint_t bkpt, *bkptp; + register task_t task; + db_expr_t inst; + thread_act_t cur_act = current_act(); + task_t cur_task = (cur_act && !cur_act->kernel_loaded) ? + cur_act->task: TASK_NULL; + + if (db_breakpoints_inserted) { + bkptp = &db_breakpoint_list; + for (bkpt = *bkptp; bkpt; bkpt = *bkptp) { + task = bkpt->task; + if (bkpt->flags & BKPT_USR_GLOBAL) { + if (cur_task == TASK_NULL) { + bkptp = &bkpt->link; + continue; + } + task = cur_task; + } + if ((bkpt->flags & BKPT_SET_IN_MEM) + && DB_CHECK_ACCESS(bkpt->address, BKPT_SIZE, task)) { + inst = db_get_task_value(bkpt->address, BKPT_SIZE, FALSE, + task); + if (inst != BKPT_SET(inst)) { + if (bkpt->flags & BKPT_USR_GLOBAL) { + bkptp = &bkpt->link; + continue; + } + db_force_delete_breakpoint(bkpt, 0, FALSE); + *bkptp = bkpt->link; + db_breakpoint_free(bkpt); + continue; + } + db_put_task_value(bkpt->address, BKPT_SIZE, + bkpt->bkpt_inst, task); + bkpt->flags &= ~BKPT_SET_IN_MEM; + } + bkptp = &bkpt->link; + } + db_breakpoints_inserted = FALSE; + } +} + +/* + * Set a temporary breakpoint. + * The instruction is changed immediately, + * so the breakpoint does not have to be on the breakpoint list. + */ +db_breakpoint_t +db_set_temp_breakpoint( + task_t task, + db_addr_t addr) +{ + register db_breakpoint_t bkpt; + + bkpt = db_breakpoint_alloc(); + if (bkpt == 0) { + db_printf("Too many breakpoints.\n"); + return 0; + } + bkpt->task = task; + bkpt->address = addr; + bkpt->flags = BKPT_TEMP; + bkpt->threads = 0; + if (db_add_thread_breakpoint(bkpt, 0, 1, FALSE) < 0) { + if (bkpt) + db_breakpoint_free(bkpt); + db_printf("Too many thread_breakpoints.\n"); + return 0; + } + bkpt->bkpt_inst = db_get_task_value(bkpt->address, BKPT_SIZE, + FALSE, task); + db_put_task_value(bkpt->address, BKPT_SIZE, + BKPT_SET(bkpt->bkpt_inst), task); + return bkpt; +} + +void +db_delete_temp_breakpoint( + task_t task, + db_breakpoint_t bkpt) +{ + db_put_task_value(bkpt->address, BKPT_SIZE, bkpt->bkpt_inst, task); + db_delete_thread_breakpoint(bkpt, 0); + db_breakpoint_free(bkpt); +} + +/* + * List breakpoints. + */ +void +db_list_breakpoints(void) +{ + register db_breakpoint_t bkpt; + + if (db_breakpoint_list == 0) { + db_printf("No breakpoints set\n"); + return; + } + + db_printf(" No Space Task.Act Cnt Address(Cond)\n"); + for (bkpt = db_breakpoint_list; + bkpt != 0; + bkpt = bkpt->link) + { + register db_thread_breakpoint_t tp; + int task_id; + int act_id; + + if (bkpt->threads) { + for (tp = bkpt->threads; tp; tp = tp->tb_next) { + db_printf("%3d ", tp->tb_number); + if (bkpt->flags & BKPT_USR_GLOBAL) + db_printf("user "); + else if (bkpt->task == TASK_NULL) + db_printf("kernel "); + else if ((task_id = db_lookup_task(bkpt->task)) < 0) + db_printf("%0*X ", 2*sizeof(vm_offset_t), bkpt->task); + else + db_printf("task%-3d ", task_id); + if (tp->tb_task_thd == 0) { + db_printf("all "); + } else { + if (tp->tb_is_task) { + task_id = db_lookup_task((task_t)(tp->tb_task_thd)); + if (task_id < 0) + db_printf("%0*X ", 2*sizeof(vm_offset_t), + tp->tb_task_thd); + else + db_printf("task%03d ", task_id); + } else { + thread_act_t thd = (thread_act_t)(tp->tb_task_thd); + task_id = db_lookup_task(thd->task); + act_id = db_lookup_task_act(thd->task, thd); + if (task_id < 0 || act_id < 0) + db_printf("%0*X ", 2*sizeof(vm_offset_t), + tp->tb_task_thd); + else + db_printf("task%03d.%-3d ", task_id, act_id); + } + } + db_printf("%3d ", tp->tb_init_count); + db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task); + if (tp->tb_cond > 0) { + db_printf("("); + db_cond_print(tp); + db_printf(")"); + } + db_printf("\n"); + } + } else { + if (bkpt->task == TASK_NULL) + db_printf(" ? kernel "); + else + db_printf("%*X ", 2*sizeof(vm_offset_t), bkpt->task); + db_printf("(?) "); + db_task_printsym(bkpt->address, DB_STGY_PROC, bkpt->task); + db_printf("\n"); + } + } +} + +void +db_delete_all_breakpoints( + task_t task) +{ + register db_breakpoint_t bkpt; + + bkpt = db_breakpoint_list; + while ( bkpt != 0 ) { + if (bkpt->task == task || + (task != TASK_NULL && (bkpt->flags & BKPT_USR_GLOBAL))) { + db_delete_breakpoint(task, bkpt->address, 0); + bkpt = db_breakpoint_list; + } + else + bkpt = bkpt->link; + + } +} + +/* Delete breakpoint */ +void +db_delete_cmd(void) +{ + register int n; + thread_act_t thr_act; + vm_offset_t task_thd; + boolean_t user_global = FALSE; + boolean_t task_bpt = FALSE; + boolean_t user_space = FALSE; + boolean_t thd_bpt = FALSE; + db_expr_t addr; + int t; + + t = db_read_token(); + if (t == tSLASH) { + t = db_read_token(); + if (t != tIDENT) { + db_printf("Bad modifier \"%s\"\n", db_tok_string); + db_error(0); + } + user_global = db_option(db_tok_string, 'U'); + user_space = (user_global)? TRUE: db_option(db_tok_string, 'u'); + task_bpt = db_option(db_tok_string, 'T'); + thd_bpt = db_option(db_tok_string, 't'); + if (task_bpt && user_global) + db_error("Cannot specify both 'T' and 'U' option\n"); + t = db_read_token(); + } + + if ( t == tSTAR ) { + db_printf("Delete ALL breakpoints\n"); + db_delete_all_breakpoints( (task_t)task_bpt ); + return; + } + + if (t == tHASH) { + db_thread_breakpoint_t tbp; + db_breakpoint_t bkpt; + + if (db_read_token() != tNUMBER) { + db_printf("Bad break point number #%s\n", db_tok_string); + db_error(0); + } + if ((tbp = db_find_breakpoint_number(db_tok_number, &bkpt)) == 0) { + db_printf("No such break point #%d\n", db_tok_number); + db_error(0); + } + db_delete_breakpoint(bkpt->task, bkpt->address, tbp->tb_task_thd); + return; + } + db_unread_token(t); + if (!db_expression(&addr)) { + /* + * We attempt to pick up the user_space indication from db_dot, + * so that a plain "d" always works. + */ + addr = (db_expr_t)db_dot; + if (!user_space && !DB_VALID_ADDRESS(addr, FALSE)) + user_space = TRUE; + } + if (!DB_VALID_ADDRESS(addr, user_space)) { + db_printf("Address %#X is not in %s space\n", addr, + (user_space)? "user": "kernel"); + db_error(0); + } + if (thd_bpt || task_bpt) { + for (n = 0; db_get_next_act(&thr_act, n); n++) { + if (thr_act == THR_ACT_NULL) + db_error("No active thr_act\n"); + if (task_bpt) { + if (thr_act->task == TASK_NULL) + db_error("No task\n"); + task_thd = (vm_offset_t) (thr_act->task); + } else + task_thd = (user_global)? 0: (vm_offset_t) thr_act; + db_delete_breakpoint(db_target_space(thr_act, user_space), + (db_addr_t)addr, task_thd); + } + } else { + db_delete_breakpoint(db_target_space(THR_ACT_NULL, user_space), + (db_addr_t)addr, 0); + } +} + +/* Set breakpoint with skip count */ +#include + +void +db_breakpoint_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + register int n; + thread_act_t thr_act; + boolean_t user_global = db_option(modif, 'U'); + boolean_t task_bpt = db_option(modif, 'T'); + boolean_t user_space; + + if (count == -1) + count = 1; +#if 0 /* CHECKME */ + if (!task_bpt && db_option(modif,'t')) + task_bpt = TRUE; +#endif + + if (task_bpt && user_global) + db_error("Cannot specify both 'T' and 'U'\n"); + user_space = (user_global)? TRUE: db_option(modif, 'u'); + if (user_space && db_access_level < DB_ACCESS_CURRENT) + db_error("User space break point is not supported\n"); + if ((!task_bpt || !user_space) && + !DB_VALID_ADDRESS(addr, user_space)) { + /* if the user has explicitly specified user space, + do not insert a breakpoint into the kernel */ + if (user_space) + db_error("Invalid user space address\n"); + user_space = TRUE; + db_printf("%#X is in user space\n", addr); + db_printf("kernel is from %#X to %#x\n", VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS); + } + if (db_option(modif, 't') || task_bpt) { + for (n = 0; db_get_next_act(&thr_act, n); n++) { + if (thr_act == THR_ACT_NULL) + db_error("No active thr_act\n"); + if (task_bpt && thr_act->task == TASK_NULL) + db_error("No task\n"); + if (db_access_level <= DB_ACCESS_CURRENT && user_space + && thr_act->task != db_current_space()) + db_error("Cannot set break point in inactive user space\n"); + db_set_breakpoint(db_target_space(thr_act, user_space), + (db_addr_t)addr, count, + (user_global)? THR_ACT_NULL: thr_act, + task_bpt); + } + } else { + db_set_breakpoint(db_target_space(THR_ACT_NULL, user_space), + (db_addr_t)addr, + count, THR_ACT_NULL, FALSE); + } +} + +/* list breakpoints */ +void +db_listbreak_cmd(void) +{ + db_list_breakpoints(); +} diff --git a/osfmk/ddb/db_break.h b/osfmk/ddb/db_break.h new file mode 100644 index 000000000..2882408dd --- /dev/null +++ b/osfmk/ddb/db_break.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.18.3 1995/01/06 19:10:05 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup, prototypes. + * [1994/10/14 03:39:52 dwm] + * + * Revision 1.1.18.2 1994/09/23 01:18:04 ezf + * change marker to not FREE + * [1994/09/22 21:09:24 ezf] + * + * Revision 1.1.18.1 1994/06/11 21:11:29 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:03:39 bolinger] + * + * Revision 1.1.16.1 1994/04/11 09:34:32 bernadat + * Moved db_breakpoint struct declaration from db_break.c + * to here. + * [94/03/16 bernadat] + * + * Revision 1.1.12.2 1994/03/17 22:35:24 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:25:41 dwm] + * + * Revision 1.1.12.1 1994/01/12 17:50:30 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:13:00 dwm] + * + * Revision 1.1.4.4 1993/07/27 18:26:51 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:10:59 elliston] + * + * Revision 1.1.4.3 1993/06/07 22:06:31 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 18:57:06 jeffc] + * + * Revision 1.1.4.2 1993/06/02 23:10:21 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:55:49 jeffc] + * + * Revision 1.1 1992/09/30 02:24:12 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.6 91/10/09 15:58:03 af + * Revision 2.5.3.1 91/10/05 13:05:04 jeffreyh + * Added db_thread_breakpoint structure, and added task and threads + * field to db_breakpoint structure. Some status flags were also + * added to keep track user space break point correctly. + * [91/08/29 tak] + * + * Revision 2.5.3.1 91/10/05 13:05:04 jeffreyh + * Added db_thread_breakpoint structure, and added task and threads + * field to db_breakpoint structure. Some status flags were also + * added to keep track user space break point correctly. + * [91/08/29 tak] + * + * Revision 2.5 91/05/14 15:32:35 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:06:06 mrt + * Changed to new Mach copyright + * [91/01/31 16:17:10 mrt] + * + * Revision 2.3 90/10/25 14:43:40 rwd + * Added map field to breakpoints. + * [90/10/18 rpd] + * + * Revision 2.2 90/08/27 21:50:00 dbg + * Modularized typedef names. + * [90/08/20 af] + * Add external defintions. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +#ifndef _DDB_DB_BREAK_H_ +#define _DDB_DB_BREAK_H_ + +#include +#include +#include +#include + +/* + * thread list at the same breakpoint address + */ +struct db_thread_breakpoint { + vm_offset_t tb_task_thd; /* target task or thread */ + boolean_t tb_is_task; /* task qualified */ + short tb_number; /* breakpoint number */ + short tb_init_count; /* skip count(initial value) */ + short tb_count; /* current skip count */ + short tb_cond; /* break condition */ + struct db_thread_breakpoint *tb_next; /* next chain */ +}; +typedef struct db_thread_breakpoint *db_thread_breakpoint_t; + +/* + * Breakpoint. + */ +struct db_breakpoint { + task_t task; /* target task */ + db_addr_t address; /* set here */ + db_thread_breakpoint_t threads; /* thread */ + int flags; /* flags: */ +#define BKPT_SINGLE_STEP 0x2 /* to simulate single step */ +#define BKPT_TEMP 0x4 /* temporary */ +#define BKPT_USR_GLOBAL 0x8 /* global user space break point */ +#define BKPT_SET_IN_MEM 0x10 /* break point is set in memory */ +#define BKPT_1ST_SET 0x20 /* 1st time set of user global bkpt */ + vm_size_t bkpt_inst; /* saved instruction at bkpt */ + struct db_breakpoint *link; /* link in in-use or free chain */ +}; + +typedef struct db_breakpoint *db_breakpoint_t; + + +/* + * Prototypes for functions exported by this module. + */ + +db_thread_breakpoint_t db_find_thread_breakpoint_here( + task_t task, + db_addr_t addr); + +void db_check_breakpoint_valid(void); + +void db_set_breakpoint( + task_t task, + db_addr_t addr, + int count, + thread_act_t thr_act, + boolean_t task_bpt); + +db_breakpoint_t db_find_breakpoint( + task_t task, + db_addr_t addr); + +boolean_t db_find_breakpoint_here( + task_t task, + db_addr_t addr); + +db_thread_breakpoint_t db_find_breakpoint_number( + int num, + db_breakpoint_t *bkptp); + +void db_set_breakpoints(void); + +void db_clear_breakpoints(void); + +db_breakpoint_t db_set_temp_breakpoint( + task_t task, + db_addr_t addr); + +void db_delete_temp_breakpoint( + task_t task, + db_breakpoint_t bkpt); + +void db_delete_cmd(void); + +void db_breakpoint_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_listbreak_cmd(void); + +#endif /* !_DDB_DB_BREAK_H_ */ diff --git a/osfmk/ddb/db_coff.h b/osfmk/ddb/db_coff.h new file mode 100644 index 000000000..b0cad757e --- /dev/null +++ b/osfmk/ddb/db_coff.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1995/02/23 16:34:08 alanl + * Initial file creation. + * [95/02/06 sjs] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_COFF_H_ +#define _DDB_DB_COFF_H_ + +#define DB_NO_AOUT 1 + + +/* + * Symbol table routines for COFF format files. + */ + +boolean_t coff_db_sym_init( + char * symtab, + char * esymtab, + char * name, + char * task_addr); + +db_sym_t coff_db_lookup( + db_symtab_t *stab, + char * symstr); + +int coff_db_lookup_incomplete( + db_symtab_t *stab, + char * symstr, + char ** name, + int *len, + int *toadd); + +int coff_db_print_completion( + db_symtab_t *stab, + char * symstr); + +db_sym_t coff_db_search_symbol( + db_symtab_t *symtab, + db_addr_t off, + db_strategy_t strategy, + db_expr_t *diffp); /* in/out */ + +void coff_db_symbol_values( + db_sym_t sym, + char **namep, + db_expr_t *valuep); + +db_sym_t coff_db_search_by_addr( + db_symtab_t *stab, + db_addr_t addr, + char **file, + char **func, + int *line, + db_expr_t *diff, + int *args); + +boolean_t coff_db_line_at_pc( + db_symtab_t *stab, + db_sym_t sym, + char **file, + int *line, + db_expr_t pc); + +int coff_db_qualified_print_completion( + db_symtab_t *stab, + char *sym); + +void coff_db_init(void); + +#endif /* !_DDB_DB_COFF_H_ */ diff --git a/osfmk/ddb/db_command.c b/osfmk/ddb/db_command.c new file mode 100644 index 000000000..889f447dc --- /dev/null +++ b/osfmk/ddb/db_command.c @@ -0,0 +1,713 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +/* + * Command dispatcher. + */ +#include +#include +#ifdef AT386 +#include +#endif /* AT386 */ + +#include +#include +#include + +#if defined(__alpha) +# include +# if KDEBUG +# include +# endif +#endif /* defined(__alpha) */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include /* For db_stack_trace_cmd(). */ +#include /* For db_show_one_zone, db_show_all_zones. */ +#include /* For db_show_all_slocks(). */ + +#if NORMA_VM +#include +#endif /* NORMA_VM */ + +/* + * Exported global variables + */ +boolean_t db_cmd_loop_done; +jmp_buf_t *db_recover = 0; +db_addr_t db_dot; +db_addr_t db_last_addr; +db_addr_t db_prev; +db_addr_t db_next; + +/* + * if 'ed' style: 'dot' is set at start of last item printed, + * and '+' points to next line. + * Otherwise: 'dot' points to next item, '..' points to last. + */ +boolean_t db_ed_style = TRUE; + +/* + * Results of command search. + */ +#define CMD_UNIQUE 0 +#define CMD_FOUND 1 +#define CMD_NONE 2 +#define CMD_AMBIGUOUS 3 +#define CMD_HELP 4 + +/* Prototypes for functions local to this file. XXX -- should be static! + */ + +void db_command( + struct db_command **last_cmdp, /* IN_OUT */ + db_expr_t *last_countp, /* IN_OUT */ + char *last_modifp, /* IN_OUT */ + struct db_command *cmd_table); + +void db_help_cmd(void); + +void db_output_prompt(void); + +void db_fncall(void); + +void db_cmd_list(struct db_command *table); + +int db_cmd_search( + char * name, + struct db_command * table, + struct db_command ** cmdp); /* out */ + +void db_command_list( + struct db_command **last_cmdp, /* IN_OUT */ + db_expr_t *last_countp, /* IN_OUT */ + char *last_modifp, /* IN_OUT */ + struct db_command *cmd_table); + + + +/* + * Search for command prefix. + */ +int +db_cmd_search( + char * name, + struct db_command * table, + struct db_command ** cmdp) /* out */ +{ + struct db_command *cmd; + int result = CMD_NONE; + + for (cmd = table; cmd->name != 0; cmd++) { + register char *lp; + register char *rp; + register int c; + + lp = name; + rp = cmd->name; + while ((c = *lp) == *rp) { + if (c == 0) { + /* complete match */ + *cmdp = cmd; + return (CMD_UNIQUE); + } + lp++; + rp++; + } + if (c == 0) { + /* end of name, not end of command - + partial match */ + if (result == CMD_FOUND) { + result = CMD_AMBIGUOUS; + /* but keep looking for a full match - + this lets us match single letters */ + } + else { + *cmdp = cmd; + result = CMD_FOUND; + } + } + } + if (result == CMD_NONE) { + /* check for 'help' */ + if (!strncmp(name, "help", strlen(name))) + result = CMD_HELP; + } + return (result); +} + +void +db_cmd_list(struct db_command *table) +{ + register struct db_command *new; + register struct db_command *old; + register struct db_command *cur; + unsigned int l; + unsigned int len; + + len = 1; + for (cur = table; cur->name != 0; cur++) + if ((l = strlen(cur->name)) >= len) + len = l + 1; + + old = (struct db_command *)0; + for (;;) { + new = (struct db_command *)0; + for (cur = table; cur->name != 0; cur++) + if ((new == (struct db_command *)0 || + strcmp(cur->name, new->name) < 0) && + (old == (struct db_command *)0 || + strcmp(cur->name, old->name) > 0)) + new = cur; + if (new == (struct db_command *)0) + return; + db_reserve_output_position(len); + db_printf("%-*s", len, new->name); + old = new; + } +} + +void +db_command( + struct db_command **last_cmdp, /* IN_OUT */ + db_expr_t *last_countp, /* IN_OUT */ + char *last_modifp, /* IN_OUT */ + struct db_command *cmd_table) +{ + struct db_command *cmd; + int t; + char modif[TOK_STRING_SIZE]; + char *modifp = &modif[0]; + db_expr_t addr, count; + boolean_t have_addr; + int result; + + t = db_read_token(); + if (t == tEOL || t == tSEMI_COLON) { + /* empty line repeats last command, at 'next' */ + cmd = *last_cmdp; + count = *last_countp; + modifp = last_modifp; + addr = (db_expr_t)db_next; + have_addr = FALSE; + if (t == tSEMI_COLON) + db_unread_token(t); + } + else if (t == tEXCL) { + db_fncall(); + return; + } + else if (t != tIDENT) { + db_printf("?\n"); + db_flush_lex(); + return; + } + else { + /* + * Search for command + */ + while (cmd_table) { + result = db_cmd_search(db_tok_string, + cmd_table, + &cmd); + switch (result) { + case CMD_NONE: + if (db_exec_macro(db_tok_string) == 0) + return; + db_printf("No such command \"%s\"\n", db_tok_string); + db_flush_lex(); + return; + case CMD_AMBIGUOUS: + db_printf("Ambiguous\n"); + db_flush_lex(); + return; + case CMD_HELP: + db_cmd_list(cmd_table); + db_flush_lex(); + return; + default: + break; + } + if ((cmd_table = cmd->more) != 0) { + t = db_read_token(); + if (t != tIDENT) { + db_cmd_list(cmd_table); + db_flush_lex(); + return; + } + } + } + + if ((cmd->flag & CS_OWN) == 0) { + /* + * Standard syntax: + * command [/modifier] [addr] [,count] + */ + t = db_read_token(); + if (t == tSLASH) { + t = db_read_token(); + if (t != tIDENT) { + db_printf("Bad modifier \"/%s\"\n", db_tok_string); + db_flush_lex(); + return; + } + strcpy(modif, db_tok_string); + } + else { + db_unread_token(t); + modif[0] = '\0'; + } + + if (db_expression(&addr)) { + db_dot = (db_addr_t) addr; + db_last_addr = db_dot; + have_addr = TRUE; + } + else { + addr = (db_expr_t) db_dot; + have_addr = FALSE; + } + t = db_read_token(); + if (t == tCOMMA) { + if (!db_expression(&count)) { + db_printf("Count missing after ','\n"); + db_flush_lex(); + return; + } + } + else { + db_unread_token(t); + count = -1; + } + } + } + if (cmd != 0) { + /* + * Execute the command. + */ + (*cmd->fcn)(addr, have_addr, count, modifp); + + if (cmd->flag & CS_SET_DOT) { + /* + * If command changes dot, set dot to + * previous address displayed (if 'ed' style). + */ + if (db_ed_style) { + db_dot = db_prev; + } + else { + db_dot = db_next; + } + } + else { + /* + * If command does not change dot, + * set 'next' location to be the same. + */ + db_next = db_dot; + } + } + *last_cmdp = cmd; + *last_countp = count; + strcpy(last_modifp, modifp); +} + +void +db_command_list( + struct db_command **last_cmdp, /* IN_OUT */ + db_expr_t *last_countp, /* IN_OUT */ + char *last_modifp, /* IN_OUT */ + struct db_command *cmd_table) +{ + do { + db_command(last_cmdp, last_countp, last_modifp, cmd_table); + db_skip_to_eol(); + } while (db_read_token() == tSEMI_COLON && db_cmd_loop_done == 0); +} + + +extern void db_system_stats(void); + +struct db_command db_show_all_cmds[] = { +#if USLOCK_DEBUG + { "slocks", (db_func) db_show_all_slocks, 0, 0 }, +#endif /* USLOCK_DEBUG */ + { "acts", db_show_all_acts, 0, 0 }, + { "spaces", db_show_all_spaces, 0, 0 }, + { "tasks", db_show_all_acts, 0, 0 }, + /* temporary alias for sanity preservation */ + { "threads", db_show_all_acts, 0, 0 }, + { "zones", db_show_all_zones, 0, 0 }, + { "vmtask", db_show_all_task_vm, 0, 0 }, + { (char *)0 } +}; + +/* XXX */ + +extern void db_show_thread_log(void); +extern void db_show_one_lock(lock_t*); +extern void db_show_etap_log(db_expr_t, int, db_expr_t, char *); + +struct db_command db_show_cmds[] = { + { "all", 0, 0, db_show_all_cmds }, + { "registers", db_show_regs, 0, 0 }, + { "variables", (db_func) db_show_variable, CS_OWN, 0 }, + { "breaks", (db_func) db_listbreak_cmd, 0, 0 }, + { "watches", (db_func) db_listwatch_cmd, 0, 0 }, + { "task", db_show_one_task, 0, 0 }, + { "act", db_show_one_act, 0, 0 }, + { "shuttle", db_show_shuttle, 0, 0 }, +#if 0 + { "thread", db_show_one_thread, 0, 0 }, +#endif + { "vmtask", db_show_one_task_vm, 0, 0 }, + { "macro", (db_func) db_show_macro, CS_OWN, 0 }, + { "runq", (db_func) db_show_runq, 0, 0 }, + { "map", (db_func) vm_map_print, 0, 0 }, + { "object", (db_func) vm_object_print, 0, 0 }, + { "page", (db_func) vm_page_print, 0, 0 }, + { "copy", (db_func) vm_map_copy_print, 0, 0 }, + { "port", (db_func) ipc_port_print, 0, 0 }, + { "pset", (db_func) ipc_pset_print, 0, 0 }, + { "kmsg", (db_func) ipc_kmsg_print, 0, 0 }, + { "msg", (db_func) ipc_msg_print, 0, 0 }, + { "ipc_port", db_show_port_id, 0, 0 }, + { "lock", (db_func)db_show_one_lock, 0, 0 }, +#if NORMA_VM + { "xmm_obj", (db_func) xmm_obj_print, 0, 0 }, + { "xmm_reply", (db_func) xmm_reply_print, 0, 0 }, +#endif /* NORMA_VM */ +#if TRACE_BUFFER + { "tr", db_show_tr, 0, 0 }, +#endif /* TRACE_BUFFER */ + { "space", db_show_one_space, 0, 0 }, + { "system", (db_func) db_system_stats, 0, 0 }, + { "zone", db_show_one_zone, 0, 0 }, + { "simple_lock", db_show_one_simple_lock, 0, 0 }, + { "thread_log", (db_func)db_show_thread_log, 0, 0 }, + { "subsystem", db_show_subsystem, 0, 0 }, + { "shuttle", db_show_shuttle, 0, 0 }, + { "etap_log", db_show_etap_log, 0, 0 }, + { (char *)0, } +}; + +#if NCPUS > 1 +#define db_switch_cpu kdb_on +extern void db_switch_cpu(int); +#endif /* NCPUS > 1 */ + +struct db_command db_command_table[] = { +#if DB_MACHINE_COMMANDS + +/* this must be the first entry, if it exists */ + { "machine", 0, 0, 0 }, +#endif + { "print", (db_func) db_print_cmd, CS_OWN, 0 }, + { "examine", db_examine_cmd, CS_MORE|CS_SET_DOT, 0 }, + { "x", db_examine_cmd, CS_MORE|CS_SET_DOT, 0 }, + { "xf", db_examine_forward, CS_SET_DOT, 0 }, + { "xb", db_examine_backward, CS_SET_DOT, 0 }, + { "search", (db_func) db_search_cmd, CS_OWN|CS_SET_DOT, 0 }, + { "set", (db_func) db_set_cmd, CS_OWN, 0 }, + { "write", db_write_cmd, CS_MORE|CS_SET_DOT, 0 }, + { "w", db_write_cmd, CS_MORE|CS_SET_DOT, 0 }, + { "delete", (db_func) db_delete_cmd, CS_OWN, 0 }, + { "d", (db_func) db_delete_cmd, CS_OWN, 0 }, + { "break", db_breakpoint_cmd, CS_MORE, 0 }, + { "dwatch", db_deletewatch_cmd, CS_MORE, 0 }, + { "watch", db_watchpoint_cmd, CS_MORE, 0 }, + { "step", db_single_step_cmd, 0, 0 }, + { "s", db_single_step_cmd, 0, 0 }, + { "continue", db_continue_cmd, 0, 0 }, + { "c", db_continue_cmd, 0, 0 }, + { "gdb", db_continue_gdb, 0, 0 }, + { "until", db_trace_until_call_cmd, 0, 0 }, + + /* As per request of DNoveck, CR1550, leave this disabled */ +#if 0 /* until CR1440 is fixed, to avoid toe-stubbing */ + { "next", db_trace_until_matching_cmd, 0, 0 }, +#endif + { "match", db_trace_until_matching_cmd, 0 , 0 }, + { "trace", db_stack_trace_cmd, 0, 0 }, + { "cond", (db_func) db_cond_cmd, CS_OWN, 0 }, + { "call", (db_func) db_fncall, CS_OWN, 0 }, + { "macro", (db_func) db_def_macro_cmd, CS_OWN, 0 }, + { "dmacro", (db_func) db_del_macro_cmd, CS_OWN, 0 }, + { "show", 0, 0, db_show_cmds }, +#if NCPUS > 1 + { "cpu", (db_func) db_switch_cpu, 0, 0 }, +#endif /* NCPUS > 1 */ + { "reboot", (db_func) db_reboot, 0, 0 }, +#if defined(__ppc__) + { "lt", db_low_trace, CS_MORE|CS_SET_DOT, 0 }, + { "dl", db_display_long, CS_MORE|CS_SET_DOT, 0 }, + { "dr", db_display_real, CS_MORE|CS_SET_DOT, 0 }, + { "dv", db_display_virtual, CS_MORE|CS_SET_DOT, 0 }, + { "dm", db_display_mappings, CS_MORE|CS_SET_DOT, 0 }, + { "dp", db_display_pmap, CS_MORE, 0 }, + { "ds", db_display_save, CS_MORE|CS_SET_DOT, 0 }, + { "dx", db_display_xregs, CS_MORE|CS_SET_DOT, 0 }, + { "dk", db_display_kmod, CS_MORE, 0 }, + { "gs", db_gsnoop, CS_MORE, 0 }, +#endif + { (char *)0, } +}; + +/* this function should be called to install the machine dependent + commands. It should be called before the debugger is enabled */ +void db_machine_commands_install(struct db_command *ptr) +{ + db_command_table[0].more = ptr; + return; +} + + +struct db_command *db_last_command = 0; +db_expr_t db_last_count = 0; +char db_last_modifier[TOK_STRING_SIZE] = { '\0' }; + +void +db_help_cmd(void) +{ + struct db_command *cmd = db_command_table; + + while (cmd->name != 0) { + db_printf("%-12s", cmd->name); + db_end_line(); + cmd++; + } +} + +int (*ddb_display)(void); + +void +db_command_loop(void) +{ + jmp_buf_t db_jmpbuf; + jmp_buf_t *prev = db_recover; + extern int db_output_line; + extern int db_macro_level; + extern int db_indent; + + /* + * Initialize 'prev' and 'next' to dot. + */ + db_prev = db_dot; + db_next = db_dot; + + if (ddb_display) + (*ddb_display)(); + + db_cmd_loop_done = 0; + while (!db_cmd_loop_done) { + (void) _setjmp(db_recover = &db_jmpbuf); + db_macro_level = 0; + if (db_print_position() != 0) + db_printf("\n"); + db_output_line = 0; + db_indent = 0; + db_reset_more(); + db_output_prompt(); + + (void) db_read_line("!!"); + db_command_list(&db_last_command, &db_last_count, + db_last_modifier, db_command_table); + } + + db_recover = prev; +} + +boolean_t +db_exec_cmd_nest( + char *cmd, + int size) +{ + struct db_lex_context lex_context; + + db_cmd_loop_done = 0; + if (cmd) { + db_save_lex_context(&lex_context); + db_switch_input(cmd, size); + } + db_command_list(&db_last_command, &db_last_count, + db_last_modifier, db_command_table); + if (cmd) + db_restore_lex_context(&lex_context); + return(db_cmd_loop_done == 0); +} + +void +db_error(char *s) +{ + extern int db_macro_level; + +#if defined(__alpha) +# if KDEBUG + extern boolean_t kdebug_mode; + if (kdebug_mode) { + if (s) kprintf(DBG_DEBUG, s); + return; + } +# endif /* KDEBUG */ +#endif /* defined(__alpha) */ + + db_macro_level = 0; + if (db_recover) { + if (s > (char *)1) + db_printf(s); + db_flush_lex(); + _longjmp(db_recover, (s == (char *)1) ? 2 : 1); + } + else + { + if (s > (char *)1) + db_printf(s); + panic("db_error"); + } +} + + +/* + * Call random function: + * !expr(arg,arg,arg) + */ +void +db_fncall(void) +{ + db_expr_t fn_addr; +#define MAXARGS 11 + db_expr_t args[MAXARGS]; + int nargs = 0; + db_expr_t retval; + db_expr_t (*func)(db_expr_t, ...); + int t; + + if (!db_expression(&fn_addr)) { + db_printf("Bad function \"%s\"\n", db_tok_string); + db_flush_lex(); + return; + } + func = (db_expr_t (*) (db_expr_t, ...)) fn_addr; + + t = db_read_token(); + if (t == tLPAREN) { + if (db_expression(&args[0])) { + nargs++; + while ((t = db_read_token()) == tCOMMA) { + if (nargs == MAXARGS) { + db_printf("Too many arguments\n"); + db_flush_lex(); + return; + } + if (!db_expression(&args[nargs])) { + db_printf("Argument missing\n"); + db_flush_lex(); + return; + } + nargs++; + } + db_unread_token(t); + } + if (db_read_token() != tRPAREN) { + db_printf("?\n"); + db_flush_lex(); + return; + } + } + while (nargs < MAXARGS) { + args[nargs++] = 0; + } + + retval = (*func)(args[0], args[1], args[2], args[3], args[4], + args[5], args[6], args[7], args[8], args[9] ); + db_printf(" %#n\n", retval); +} + +boolean_t +db_option( + char *modif, + int option) +{ + register char *p; + + for (p = modif; *p; p++) + if (*p == option) + return(TRUE); + return(FALSE); +} diff --git a/osfmk/ddb/db_command.h b/osfmk/ddb/db_command.h new file mode 100644 index 000000000..9626a5091 --- /dev/null +++ b/osfmk/ddb/db_command.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.15.1 1997/03/27 18:46:27 barbou + * Add #include so that DB_MACHINE_COMMANDS + * can be defined. + * Move here from db_commands.c the prototype for + * db_machine_commands_install(), referenced by PARAGON/model_dep.c. + * [97/02/25 barbou] + * + * Revision 1.1.9.2 1994/09/23 01:18:19 ezf + * change marker to not FREE + * [1994/09/22 21:09:33 ezf] + * + * Revision 1.1.9.1 1994/06/11 21:11:39 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:03:50 bolinger] + * + * Revision 1.1.7.1 1994/04/11 09:34:47 bernadat + * Added db_command struct decalration. + * [94/03/17 bernadat] + * + * Revision 1.1.2.3 1993/07/27 18:26:57 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:11:08 elliston] + * + * Revision 1.1.2.2 1993/06/02 23:10:38 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:00 jeffc] + * + * Revision 1.1 1992/09/30 02:24:14 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.6 91/10/09 15:58:45 af + * Revision 2.5.2.1 91/10/05 13:05:30 jeffreyh + * Added db_exec_conditional_cmd(), and db_option(). + * Deleted db_skip_to_eol(). + * [91/08/29 tak] + * + * Revision 2.5.2.1 91/10/05 13:05:30 jeffreyh + * Added db_exec_conditional_cmd(), and db_option(). + * Deleted db_skip_to_eol(). + * [91/08/29 tak] + * + * Revision 2.5 91/07/09 23:15:46 danner + * Grabbed up to date copyright. + * [91/07/08 danner] + * + * Revision 2.2 91/04/10 16:02:32 mbj + * Grabbed 3.0 copyright/disclaimer since ddb comes from 3.0. + * [91/04/09 rvb] + * + * Revision 2.3 91/02/05 17:06:15 mrt + * Changed to new Mach copyright + * [91/01/31 16:17:28 mrt] + * + * Revision 2.2 90/08/27 21:50:19 dbg + * Replace db_last_address_examined with db_prev, db_next. + * [90/08/22 dbg] + * Created. + * [90/08/07 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +/* + * Command loop declarations. + */ + +#ifndef _DDB_DB_COMMAND_H_ +#define _DDB_DB_COMMAND_H_ + +#include +#include + +typedef void (*db_func)(db_expr_t, int, db_expr_t, char *); + +/* + * Command table + */ +struct db_command { + char * name; /* command name */ + db_func fcn; /* function to call */ + int flag; /* extra info: */ +#define CS_OWN 0x1 /* non-standard syntax */ +#define CS_MORE 0x2 /* standard syntax, but may have other + words at end */ +#define CS_SET_DOT 0x100 /* set dot after command */ + struct db_command *more; /* another level of command */ +}; + + +extern db_addr_t db_dot; /* current location */ +extern db_addr_t db_last_addr; /* last explicit address typed */ +extern db_addr_t db_prev; /* last address examined + or written */ +extern db_addr_t db_next; /* next address to be examined + or written */ + + +/* Prototypes for functions exported by this module. + */ + +void db_command_loop(void); + +void db_machine_commands_install(struct db_command *ptr); + +boolean_t db_exec_cmd_nest( + char *cmd, + int size); + +void db_error(char *s); + +boolean_t db_option( + char *modif, + int option); + +#endif /* !_DDB_DB_COMMAND_H_ */ diff --git a/osfmk/ddb/db_cond.c b/osfmk/ddb/db_cond.c new file mode 100644 index 000000000..f5aa3af0c --- /dev/null +++ b/osfmk/ddb/db_cond.c @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.18.1 1997/03/27 18:46:29 barbou + * ri-osc CR1558: enable use of breakpoint counts even when no + * condition given. + * [1995/09/20 15:24:24 bolinger] + * [97/02/25 barbou] + * + * Revision 1.2.6.2 1996/01/09 19:15:34 devrcs + * Change 'register c' to 'register int c'. + * [1995/12/01 21:42:00 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:02:54 jfraser] + * + * Revision 1.2.6.1 1994/09/23 01:18:27 ezf + * change marker to not FREE + * [1994/09/22 21:09:37 ezf] + * + * Revision 1.2.2.4 1993/08/11 20:37:33 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:32:57 elliston] + * + * Revision 1.2.2.3 1993/07/27 18:26:59 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:11:12 elliston] + * + * Revision 1.2.2.2 1993/06/09 02:19:53 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:04 jeffc] + * + * Revision 1.2 1993/04/19 16:01:51 devrcs + * Changes from mk78: + * Changed errant call of db_error in db_cond_cmd() to db_printf/db_error. + * [92/05/20 jfriedl] + * [93/02/02 bruel] + * + * Revision 1.1 1992/09/30 02:00:58 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2 91/10/09 15:59:09 af + * Revision 2.1.3.1 91/10/05 13:05:38 jeffreyh + * Created to support conditional break point and command execution. + * [91/08/29 tak] + * + * Revision 2.1.3.1 91/10/05 13:05:38 jeffreyh + * Created to support conditional break point and command execution. + * [91/08/29 tak] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include /* For db_printf() */ + +#define DB_MAX_COND 10 /* maximum conditions to be set */ + +int db_ncond_free = DB_MAX_COND; /* free condition */ +struct db_cond { + int c_size; /* size of cond */ + char c_cond_cmd[DB_LEX_LINE_SIZE]; /* cond & cmd */ +} db_cond[DB_MAX_COND]; + +void +db_cond_free(db_thread_breakpoint_t bkpt) +{ + if (bkpt->tb_cond > 0) { + db_cond[bkpt->tb_cond-1].c_size = 0; + db_ncond_free++; + bkpt->tb_cond = 0; + } +} + +boolean_t +db_cond_check(db_thread_breakpoint_t bkpt) +{ + register struct db_cond *cp; + db_expr_t value; + int t; + jmp_buf_t db_jmpbuf; + extern jmp_buf_t *db_recover; + + if (bkpt->tb_cond <= 0) { /* no condition */ + if (--(bkpt->tb_count) > 0) + return(FALSE); + bkpt->tb_count = bkpt->tb_init_count; + return(TRUE); + } + db_dot = PC_REGS(DDB_REGS); + db_prev = db_dot; + db_next = db_dot; + if (_setjmp(db_recover = &db_jmpbuf)) { + /* + * in case of error, return true to enter interactive mode + */ + return(TRUE); + } + + /* + * switch input, and evalutate condition + */ + cp = &db_cond[bkpt->tb_cond - 1]; + db_switch_input(cp->c_cond_cmd, cp->c_size); + if (!db_expression(&value)) { + db_printf("error: condition evaluation error\n"); + return(TRUE); + } + if (value == 0 || --(bkpt->tb_count) > 0) + return(FALSE); + + /* + * execute a command list if exist + */ + bkpt->tb_count = bkpt->tb_init_count; + if ((t = db_read_token()) != tEOL) { + db_unread_token(t); + return(db_exec_cmd_nest(0, 0)); + } + return(TRUE); +} + +void +db_cond_print(db_thread_breakpoint_t bkpt) +{ + register char *p, *ep; + register struct db_cond *cp; + + if (bkpt->tb_cond <= 0) + return; + cp = &db_cond[bkpt->tb_cond-1]; + p = cp->c_cond_cmd; + ep = p + cp->c_size; + while (p < ep) { + if (*p == '\n' || *p == 0) + break; + db_putchar(*p++); + } +} + +void +db_cond_cmd(void) +{ + register int c; + register struct db_cond *cp; + register char *p; + db_expr_t value; + db_thread_breakpoint_t bkpt; + + if (db_read_token() != tHASH || db_read_token() != tNUMBER) { + db_printf("# expected instead of \"%s\"\n", db_tok_string); + db_error(0); + return; + } + if ((bkpt = db_find_breakpoint_number(db_tok_number, 0)) == 0) { + db_printf("No such break point #%d\n", db_tok_number); + db_error(0); + return; + } + /* + * if the break point already has a condition, free it first + */ + if (bkpt->tb_cond > 0) { + cp = &db_cond[bkpt->tb_cond - 1]; + db_cond_free(bkpt); + } else { + if (db_ncond_free <= 0) { + db_error("Too many conditions\n"); + return; + } + for (cp = db_cond; cp < &db_cond[DB_MAX_COND]; cp++) + if (cp->c_size == 0) + break; + if (cp >= &db_cond[DB_MAX_COND]) + panic("bad db_cond_free"); + } + for (c = db_read_char(); c == ' ' || c == '\t'; c = db_read_char()); + for (p = cp->c_cond_cmd; c >= 0; c = db_read_char()) + *p++ = c; + /* + * switch to saved data and call db_expression to check the condition. + * If no condition is supplied, db_expression will return false. + * In this case, clear previous condition of the break point. + * If condition is supplied, set the condition to the permanent area. + * Note: db_expression will not return here, if the condition + * expression is wrong. + */ + db_switch_input(cp->c_cond_cmd, p - cp->c_cond_cmd); + if (!db_expression(&value)) { + /* since condition is already freed, do nothing */ + db_flush_lex(); + return; + } + db_flush_lex(); + db_ncond_free--; + cp->c_size = p - cp->c_cond_cmd; + bkpt->tb_cond = (cp - db_cond) + 1; +} diff --git a/osfmk/ddb/db_cond.h b/osfmk/ddb/db_cond.h new file mode 100644 index 000000000..cb178648d --- /dev/null +++ b/osfmk/ddb/db_cond.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:18:37 ezf + * change marker to not FREE + * [1994/09/22 21:09:41 ezf] + * + * Revision 1.1.2.3 1993/09/17 21:34:31 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:07 robert] + * + * Revision 1.1.2.2 1993/07/27 18:27:04 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:11:18 elliston] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_COND_H_ +#define _DDB_DB_COND_H_ + +#include +#include + +/* Prototypes for functions exported by this module. + */ + +void db_cond_free(db_thread_breakpoint_t bkpt); + +boolean_t db_cond_check(db_thread_breakpoint_t bkpt); + +void db_cond_print(db_thread_breakpoint_t bkpt); + +void db_cond_cmd(void); + +#endif /* !_DDB_DB_COND_H_ */ diff --git a/osfmk/ddb/db_examine.c b/osfmk/ddb/db_examine.c new file mode 100644 index 000000000..19a46186a --- /dev/null +++ b/osfmk/ddb/db_examine.c @@ -0,0 +1,935 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:47 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/24 19:34:23 semeria + * KDP and KDB support + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.42.2 1997/09/12 17:15:15 stephen + * make x/x do zero fill right justified hex display + * [1997/09/12 16:31:04 stephen] + * + * Revision 1.2.42.1 1997/03/27 18:46:31 barbou + * Add 'p' option to the "examine" command - values in + * memory treated as addresses and rendered as sym+offset + * [1995/12/29 21:32:33 mod] + * ri-osc CR1560: make search command output address of any matching + * data it finds (so user knows it did something). + * [1995/09/20 15:24:55 bolinger] + * [97/02/25 barbou] + * + * Revision 1.2.25.5 1996/01/09 19:15:38 devrcs + * Add db_print_loc() & db_print_inst() functions. + * Make 'l' display 32 bits and new 'q' to display 64 bits. + * Allow 'u' to display unsigned decimal values (same as 'U'). + * Changed declarations of 'register foo' to 'register int foo'. + * [1995/12/01 21:42:03 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:02:58 jfraser] + * + * Revision 1.2.25.4 1995/06/13 18:21:27 sjs + * Merge with flipc_shared. + * [95/05/22 sjs] + * + * Revision 1.2.30.1 1995/04/03 17:35:17 randys + * Minor change; allow a repeat count to work properly when multiple + * modifier flags are given to the ddb 'x' command. This allows, + * for instance, examination of multiple words in activations other + * than the current one. + * [95/04/03 randys] + * + * Revision 1.2.25.3 1995/01/06 19:10:09 devrcs + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.6.7 1994/05/06 18:39:09 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Merge Alpha changes into osc1.312b source code. + * 64bit cleanup. + * * End1.3merge + * [1994/11/04 08:49:22 dwm] + * + * Revision 1.2.25.2 1994/09/23 01:18:44 ezf + * change marker to not FREE + * [1994/09/22 21:09:44 ezf] + * + * Revision 1.2.25.1 1994/06/11 21:11:43 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:01:31 bolinger] + * + * Revision 1.2.23.1 1994/02/08 10:57:47 bernadat + * Fixed output of an examine command to have a power of 2 + * number of fields. + * [93/09/29 paire] + * + * Added dump of hexadecimal address in each line of examine command. + * Fixed beginning of line to be always located at position 0. + * [93/08/11 paire] + * [94/02/07 bernadat] + * + * Revision 1.2.21.4 1994/03/17 22:35:27 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:25:43 dwm] + * + * Revision 1.2.21.3 1994/01/12 17:50:40 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:13:08 dwm] + * + * Revision 1.2.21.2 1993/10/12 16:38:58 dwm + * Print '\n' in x/s statements. [rwd] + * [1993/10/12 16:14:41 dwm] + * + * Revision 1.2.6.5 1993/08/11 20:37:37 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:05 elliston] + * + * Revision 1.2.6.4 1993/08/09 19:34:42 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 15:47:32 dswartz] + * + * Revision 1.2.6.3 1993/07/27 18:27:07 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:11:21 elliston] + * + * Revision 1.2.6.2 1993/06/09 02:20:00 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:10 jeffc] + * + * Revision 1.2 1993/04/19 16:01:58 devrcs + * Changes from mk78: + * Added void type to functions that needed it. + * Added init to 'size' in db_search_cmd(). Removed unused variables. + * Other cleanup to quiet gcc warnings. + * [92/05/16 jfriedl] + * x/u now examines current user space. x/t still examines user + * space of the the specified thread. x/tu is redundant. + * To examine an value as unsigned decimal, use x/U. + * [92/04/18 danner] + * [93/02/02 bruel] + * + * Remember count argument when repeating commands instead of the + * default command, also apply all the formats to current address + * instead of incrementing addresses when switching to next format. + * [barbou@gr.osf.org] + * + * Support 'A' format for print 'p' command [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Revision 1.1 1992/09/30 02:01:01 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.7 91/10/09 15:59:28 af + * Revision 2.6.1.1 91/10/05 13:05:49 jeffreyh + * Supported non current task space data examination and search. + * Added 'm' format and db_xcdump to print with hex and characters. + * Added db_examine_{forward, backward}. + * Changed db_print_cmd to support variable number of parameters + * including string constant. + * Included "db_access.h". + * [91/08/29 tak] + * + * Revision 2.6.1.1 91/10/05 13:05:49 jeffreyh + * Supported non current task space data examination and search. + * Added 'm' format and db_xcdump to print with hex and characters. + * Added db_examine_{forward, backward}. + * Changed db_print_cmd to support variable number of parameters + * including string constant. + * Included "db_access.h". + * [91/08/29 tak] + * + * Revision 2.6 91/08/28 11:11:01 jsb + * Added 'A' flag to examine: just like 'a' (address), but prints addr + * as a procedure type, thus printing file/line info if available. + * Useful when called as 'x/Ai'. + * [91/08/13 18:14:55 jsb] + * + * Revision 2.5 91/05/14 15:33:31 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:06:20 mrt + * Changed to new Mach copyright + * [91/01/31 16:17:37 mrt] + * + * Revision 2.3 90/11/07 16:49:23 rpd + * Added db_search_cmd, db_search. + * [90/11/06 rpd] + * + * Revision 2.2 90/08/27 21:50:38 dbg + * Add 'r', 'z' to print and examine formats. + * Change calling sequence of db_disasm. + * db_examine sets db_prev and db_next instead of explicitly + * advancing dot. + * [90/08/20 dbg] + * Reflected changes in db_printsym()'s calling seq. + * [90/08/20 af] + * Reduce lint. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +#include /* For strcpy() */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include /* For db_option() */ +#include +#include +#include +#include +#include + +#define db_act_to_task(thr_act) ((thr_act)? thr_act->task: TASK_NULL) + +char db_examine_format[TOK_STRING_SIZE] = "x"; +int db_examine_count = 1; +db_addr_t db_examine_prev_addr = 0; +thread_act_t db_examine_act = THR_ACT_NULL; + +extern int db_max_width; + + +/* Prototypes for functions local to this file. XXX -- should be static! + */ +int db_xcdump( + db_addr_t addr, + int size, + int count, + task_t task); + +int db_examine_width( + int size, + int *items, + int *remainder); + +/* + * Examine (print) data. + */ +void +db_examine_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + thread_act_t thr_act; + extern char db_last_modifier[]; + + if (modif[0] != '\0') + strcpy(db_examine_format, modif); + + if (count == -1) + count = 1; + db_examine_count = count; + if (db_option(modif, 't')) { + if (modif == db_last_modifier) + thr_act = db_examine_act; + else if (!db_get_next_act(&thr_act, 0)) + return; + } else + if (db_option(modif,'u')) + thr_act = current_act(); + else + thr_act = THR_ACT_NULL; + + db_examine_act = thr_act; + db_examine((db_addr_t) addr, db_examine_format, count, + db_act_to_task(thr_act)); +} + +void +db_examine_forward( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + db_examine(db_next, db_examine_format, db_examine_count, + db_act_to_task(db_examine_act)); +} + +void +db_examine_backward( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + db_examine(db_examine_prev_addr - (db_next - db_examine_prev_addr), + db_examine_format, db_examine_count, + db_act_to_task(db_examine_act)); +} + +int +db_examine_width( + int size, + int *items, + int *remainder) +{ + int sz; + int entry; + int width; + + width = size * 2 + 1; + sz = (db_max_width - (sizeof (void *) * 2 + 4)) / width; + for (entry = 1; (entry << 1) < sz; entry <<= 1) + continue; + + sz = sizeof (void *) * 2 + 4 + entry * width; + while (sz + entry < db_max_width) { + width++; + sz += entry; + } + *remainder = (db_max_width - sz + 1) / 2; + *items = entry; + return width; +} + +void +db_examine( + db_addr_t addr, + char * fmt, /* format string */ + int count, /* repeat count */ + task_t task) +{ + int c; + db_expr_t value; + int size; + int width; + int leader; + int items; + int nitems; + char * fp; + db_addr_t next_addr; + int sz; + + db_examine_prev_addr = addr; + while (--count >= 0) { + fp = fmt; + size = sizeof(int); + width = db_examine_width(size, &items, &leader); + while ((c = *fp++) != 0) { + switch (c) { + case 'b': + size = sizeof(char); + width = db_examine_width(size, &items, &leader); + break; + case 'h': + size = sizeof(short); + width = db_examine_width(size, &items, &leader); + break; + case 'l': + size = sizeof(int); + width = db_examine_width(size, &items, &leader); + break; + case 'q': + size = sizeof(long); + width = db_examine_width(size, &items, &leader); + break; + case 'a': /* address */ + case 'A': /* function address */ + /* always forces a new line */ + if (db_print_position() != 0) + db_printf("\n"); + db_prev = addr; + next_addr = addr + 4; + db_task_printsym(addr, + (c == 'a')?DB_STGY_ANY:DB_STGY_PROC, + task); + db_printf(":\t"); + break; + case 'm': + db_next = db_xcdump(addr, size, count+1, task); + return; + case 't': + case 'u': + break; + default: + restart: + /* Reset next_addr in case we are printing in + multiple formats. */ + next_addr = addr; + if (db_print_position() == 0) { + /* If we hit a new symbol, print it */ + char * name; + db_addr_t off; + + db_find_task_sym_and_offset(addr,&name,&off,task); + if (off == 0) + db_printf("\r%s:\n", name); + db_printf("%#n: ", addr); + for (sz = 0; sz < leader; sz++) + db_putchar(' '); + db_prev = addr; + nitems = items; + } + + switch (c) { + case 'p': /* Addrs rendered symbolically. */ + if( size == sizeof(void *) ) { + char *symName; + db_addr_t offset; + + items = 1; + value = db_get_task_value( next_addr, + sizeof(db_expr_t), FALSE, task ); + db_find_task_sym_and_offset( value, + &symName, &offset, task); + db_printf("\n\t*%8x(%8X) = %s", + next_addr, value, symName ); + if( offset ) { + db_printf("+%X", offset ); + } + next_addr += size; + } + break; + case 'r': /* signed, current radix */ + for (sz = size, next_addr = addr; + sz >= sizeof (db_expr_t); + sz -= sizeof (db_expr_t)) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, + sizeof (db_expr_t), + TRUE,task); + db_printf("%-*r", width, value); + next_addr += sizeof (db_expr_t); + } + if (sz > 0) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, sz, + TRUE, task); + db_printf("%-*R", width, value); + next_addr += sz; + } + break; +#ifdef APPLE + case 'X': /* unsigned hex */ +#endif + case 'x': /* unsigned hex */ + for (sz = size, next_addr = addr; + sz >= sizeof (db_expr_t); + sz -= sizeof (db_expr_t)) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, + sizeof (db_expr_t), + FALSE,task); +#ifdef APPLE + if ( c == 'X') + db_printf("%0*X ", 2*size, value); + else + db_printf("%-*x", width, value); +#else + db_printf("%-*x", width, value); +#endif + next_addr += sizeof (db_expr_t); + } + if (sz > 0) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, sz, + FALSE, task); +#ifdef APPLE + if ( c == 'X') + db_printf("%0*X ", 2*size, value); + else + db_printf("%-*X", width, value); +#else + db_printf("%-*X", width, value); +#endif + next_addr += sz; + } + break; + case 'z': /* signed hex */ + for (sz = size, next_addr = addr; + sz >= sizeof (db_expr_t); + sz -= sizeof (db_expr_t)) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, + sizeof (db_expr_t), + TRUE, task); + db_printf("%-*z", width, value); + next_addr += sizeof (db_expr_t); + } + if (sz > 0) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr,sz, + TRUE,task); + db_printf("%-*Z", width, value); + next_addr += sz; + } + break; + case 'd': /* signed decimal */ + for (sz = size, next_addr = addr; + sz >= sizeof (db_expr_t); + sz -= sizeof (db_expr_t)) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, + sizeof (db_expr_t), + TRUE,task); + db_printf("%-*d", width, value); + next_addr += sizeof (db_expr_t); + } + if (sz > 0) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, sz, + TRUE, task); + db_printf("%-*D", width, value); + next_addr += sz; + } + break; + case 'U': /* unsigned decimal */ + case 'u': + for (sz = size, next_addr = addr; + sz >= sizeof (db_expr_t); + sz -= sizeof (db_expr_t)) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, + sizeof (db_expr_t), + FALSE,task); + db_printf("%-*u", width, value); + next_addr += sizeof (db_expr_t); + } + if (sz > 0) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, sz, + FALSE, task); + db_printf("%-*U", width, value); + next_addr += sz; + } + break; + case 'o': /* unsigned octal */ + for (sz = size, next_addr = addr; + sz >= sizeof (db_expr_t); + sz -= sizeof (db_expr_t)) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, + sizeof (db_expr_t), + FALSE,task); + db_printf("%-*o", width, value); + next_addr += sizeof (db_expr_t); + } + if (sz > 0) { + if (nitems-- == 0) { + db_putchar('\n'); + goto restart; + } + value = db_get_task_value(next_addr, sz, + FALSE, task); + db_printf("%-*o", width, value); + next_addr += sz; + } + break; + case 'c': /* character */ + for (sz = 0, next_addr = addr; + sz < size; + sz++, next_addr++) { + value = db_get_task_value(next_addr,1, + FALSE,task); + if ((value >= ' ' && value <= '~') || + value == '\n' || + value == '\t') + db_printf("%c", value); + else + db_printf("\\%03o", value); + } + break; + case 's': /* null-terminated string */ + size = 0; + for (;;) { + value = db_get_task_value(next_addr,1, + FALSE,task); + next_addr += 1; + size++; + if (value == 0) + break; + if (value >= ' ' && value <= '~') + db_printf("%c", value); + else + db_printf("\\%03o", value); + } + break; + case 'i': /* instruction */ + next_addr = db_disasm(addr, FALSE, task); + size = next_addr - addr; + break; + case 'I': /* instruction, alternate form */ + next_addr = db_disasm(addr, TRUE, task); + size = next_addr - addr; + break; + default: + break; + } + if (db_print_position() != 0) + db_end_line(); + break; + } + } + addr = next_addr; + } + db_next = addr; +} + +/* + * Print value. + */ +char db_print_format = 'x'; + +void +db_print_cmd(void) +{ + db_expr_t value; + int t; + task_t task = TASK_NULL; + + if ((t = db_read_token()) == tSLASH) { + if (db_read_token() != tIDENT) { + db_printf("Bad modifier \"/%s\"\n", db_tok_string); + db_error(0); + /* NOTREACHED */ + } + if (db_tok_string[0]) + db_print_format = db_tok_string[0]; + if (db_option(db_tok_string, 't')) { + if (db_default_act) + task = db_default_act->task; + if (db_print_format == 't') + db_print_format = db_tok_string[1]; + } + } else + db_unread_token(t); + + for ( ; ; ) { + t = db_read_token(); + if (t == tSTRING) { + db_printf("%s", db_tok_string); + continue; + } + db_unread_token(t); + if (!db_expression(&value)) + break; + switch (db_print_format) { + case 'a': + case 'A': + db_task_printsym((db_addr_t)value, + (db_print_format == 'a') ? DB_STGY_ANY: + DB_STGY_PROC, + task); + break; + case 'r': + db_printf("%11r", value); + break; + case 'x': + db_printf("%08x", value); + break; + case 'z': + db_printf("%8z", value); + break; + case 'd': + db_printf("%11d", value); + break; + case 'u': + db_printf("%11u", value); + break; + case 'o': + db_printf("%16o", value); + break; + case 'c': + value = value & 0xFF; + if (value >= ' ' && value <= '~') + db_printf("%c", value); + else + db_printf("\\%03o", value); + break; + default: + db_printf("Unknown format %c\n", db_print_format); + db_print_format = 'x'; + db_error(0); + } + } +} + +void +db_print_loc( + db_addr_t loc, + task_t task) +{ + db_task_printsym(loc, DB_STGY_PROC, task); +} + +void +db_print_inst( + db_addr_t loc, + task_t task) +{ + (void) db_disasm(loc, TRUE, task); +} + +void +db_print_loc_and_inst( + db_addr_t loc, + task_t task) +{ + db_task_printsym(loc, DB_STGY_PROC, task); + db_printf(":\t"); + (void) db_disasm(loc, TRUE, task); +} + +/* + * Search for a value in memory. + * Syntax: search [/bhl] addr value [mask] [,count] [thread] + */ +void +db_search_cmd(void) +{ + int t; + db_addr_t addr; + int size = 0; + db_expr_t value; + db_expr_t mask; + db_addr_t count; + thread_act_t thr_act; + boolean_t thread_flag = FALSE; + register char *p; + + t = db_read_token(); + if (t == tSLASH) { + t = db_read_token(); + if (t != tIDENT) { + bad_modifier: + db_printf("Bad modifier \"/%s\"\n", db_tok_string); + db_flush_lex(); + return; + } + + for (p = db_tok_string; *p; p++) { + switch(*p) { + case 'b': + size = sizeof(char); + break; + case 'h': + size = sizeof(short); + break; + case 'l': + size = sizeof(long); + break; + case 't': + thread_flag = TRUE; + break; + default: + goto bad_modifier; + } + } + } else { + db_unread_token(t); + size = sizeof(int); + } + + if (!db_expression((db_expr_t *) &addr)) { + db_printf("Address missing\n"); + db_flush_lex(); + return; + } + + if (!db_expression(&value)) { + db_printf("Value missing\n"); + db_flush_lex(); + return; + } + + if (!db_expression(&mask)) + mask = ~0; + + t = db_read_token(); + if (t == tCOMMA) { + if (!db_expression((db_expr_t *) &count)) { + db_printf("Count missing\n"); + db_flush_lex(); + return; + } + } else { + db_unread_token(t); + count = -1; /* effectively forever */ + } + if (thread_flag) { + if (!db_get_next_act(&thr_act, 0)) + return; + } else + thr_act = THR_ACT_NULL; + + db_search(addr, size, value, mask, count, db_act_to_task(thr_act)); +} + +void +db_search( + db_addr_t addr, + int size, + db_expr_t value, + db_expr_t mask, + unsigned int count, + task_t task) +{ + while (count-- != 0) { + db_prev = addr; + if ((db_get_task_value(addr,size,FALSE,task) & mask) == value) + break; + addr += size; + } + db_printf("0x%x: ", addr); + db_next = addr; +} + +#define DB_XCDUMP_NC 16 + +int +db_xcdump( + db_addr_t addr, + int size, + int count, + task_t task) +{ + register int i, n; + db_expr_t value; + int bcount; + db_addr_t off; + char *name; + char data[DB_XCDUMP_NC]; + + db_find_task_sym_and_offset(addr, &name, &off, task); + for (n = count*size; n > 0; n -= bcount) { + db_prev = addr; + if (off == 0) { + db_printf("%s:\n", name); + off = -1; + } + db_printf("%0*X:%s", 2*sizeof(db_addr_t), addr, + (size != 1) ? " " : "" ); + bcount = ((n > DB_XCDUMP_NC)? DB_XCDUMP_NC: n); + if (trunc_page(addr) != trunc_page(addr+bcount-1)) { + db_addr_t next_page_addr = trunc_page(addr+bcount-1); + if (!DB_CHECK_ACCESS(next_page_addr, sizeof(int), task)) + bcount = next_page_addr - addr; + } + db_read_bytes((vm_offset_t)addr, bcount, data, task); + for (i = 0; i < bcount && off != 0; i += size) { + if (i % 4 == 0) + db_printf(" "); + value = db_get_task_value(addr, size, FALSE, task); + db_printf("%0*x ", size*2, value); + addr += size; + db_find_task_sym_and_offset(addr, &name, &off, task); + } + db_printf("%*s", + ((DB_XCDUMP_NC-i)/size)*(size*2+1)+(DB_XCDUMP_NC-i)/4, + ""); + bcount = i; + db_printf("%s*", (size != 1)? " ": ""); + for (i = 0; i < bcount; i++) { + value = data[i]; + db_printf("%c", (value >= ' ' && value <= '~')? value: '.'); + } + db_printf("*\n"); + } + return(addr); +} diff --git a/osfmk/ddb/db_examine.h b/osfmk/ddb/db_examine.h new file mode 100644 index 000000000..e59698ed6 --- /dev/null +++ b/osfmk/ddb/db_examine.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.2 1996/01/09 19:15:43 devrcs + * Function prototypes for db_print_loc() & db_print_inst(). + * [1995/12/01 21:42:06 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:03:03 jfraser] + * + * Revision 1.1.7.1 1994/09/23 01:18:55 ezf + * change marker to not FREE + * [1994/09/22 21:09:49 ezf] + * + * Revision 1.1.2.4 1993/09/17 21:34:33 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:11 robert] + * + * Revision 1.1.2.3 1993/08/11 22:12:10 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:11 elliston] + * + * Revision 1.1.2.2 1993/07/27 18:27:12 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:11:28 elliston] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_EXAMINE_H_ +#define _DDB_DB_EXAMINE_H_ + +#include +#include + +/* Prototypes for functions exported by this module. + */ + +void db_examine_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_examine_forward( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_examine_backward( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_examine( + db_addr_t addr, + char * fmt, /* format string */ + int count, /* repeat count */ + task_t task); + +void db_print_cmd(void); + +void db_print_loc( + db_addr_t loc, + task_t task); + +void +db_print_inst( + db_addr_t loc, + task_t task); + +void db_print_loc_and_inst( + db_addr_t loc, + task_t task); + +void db_search_cmd(void); + +void db_search( + db_addr_t addr, + int size, + db_expr_t value, + db_expr_t mask, + unsigned int count, + task_t task); + +#endif /* !_DDB_DB_EXAMINE_H_ */ diff --git a/osfmk/ddb/db_expr.c b/osfmk/ddb/db_expr.c new file mode 100644 index 000000000..d1a27e584 --- /dev/null +++ b/osfmk/ddb/db_expr.c @@ -0,0 +1,573 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.19.1 1997/03/27 18:46:35 barbou + * ri-osc CR1561: make operators "logical and", "logical or" + * lex correctly. + * [1995/09/20 15:26:38 bolinger] + * [97/02/25 barbou] + * + * Revision 1.2.10.2 1995/01/06 19:10:13 devrcs + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.3.5 1994/05/06 18:39:16 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Merge Alpha changes into osc1.312b source code. + * 64bit cleanup. + * * End1.3merge + * [1994/11/04 08:49:27 dwm] + * + * Revision 1.2.10.1 1994/09/23 01:19:06 ezf + * change marker to not FREE + * [1994/09/22 21:09:53 ezf] + * + * Revision 1.2.3.3 1993/07/27 18:27:15 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:11:36 elliston] + * + * Revision 1.2.3.2 1993/06/09 02:20:06 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:16 jeffc] + * + * Revision 1.2 1993/04/19 16:02:09 devrcs + * Allow unprefixed (0x) hexadecimal constants starting by a letter: + * unknown symbols are tentatively interpreted as hexadecimal constants, + * and ambiguities are reported. + * [93/03/24 barbou] + * + * Changes from mk78: + * Removed unused variable from db_unary(). + * [92/05/16 jfriedl] + * [93/02/02 bruel] + * + * Added string format arguments [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Revision 1.1 1992/09/30 02:01:04 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 15:59:46 af + * Revision 2.4.3.1 91/10/05 13:06:04 jeffreyh + * Added relational expression etc. to support condition expression. + * Supported modifier after indirect expression to specify size, + * sign extention and non current task space indirection. + * Changed error messages to print more information. + * [91/08/29 tak] + * + * Revision 2.4.3.1 91/10/05 13:06:04 jeffreyh + * Added relational expression etc. to support condition expression. + * Supported modifier after indirect expression to specify size, + * sign extention and non current task space indirection. + * Changed error messages to print more information. + * [91/08/29 tak] + * + * Revision 2.4 91/05/14 15:33:45 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:06:25 mrt + * Changed to new Mach copyright + * [91/01/31 16:17:46 mrt] + * + * Revision 2.2 90/08/27 21:50:57 dbg + * Use '..' instead of '$$' for db_prev. + * Use '+' for db_next. + * [90/08/22 dbg] + * + * Allow repeated unary operators. + * [90/08/20 dbg] + * + * Reflected back rename of db_symbol_value->db_value_of_name + * [90/08/20 af] + * Reduce lint. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include +#include +#include + + + +/* Prototypes for functions local to this file. XXX -- should be static! + */ +boolean_t db_term(db_expr_t *valuep); +boolean_t db_unary(db_expr_t *valuep); +boolean_t db_mult_expr(db_expr_t *valuep); +boolean_t db_add_expr(db_expr_t *valuep); +boolean_t db_shift_expr(db_expr_t *valuep); +boolean_t db_logical_relation_expr(db_expr_t *valuep); +boolean_t db_logical_and_expr(db_expr_t *valuep); +boolean_t db_logical_or_expr(db_expr_t *valuep); + + +/* try to interpret unknown symbols as hexadecimal constants */ +int db_allow_unprefixed_hexa = 1; + +boolean_t +db_term(db_expr_t *valuep) +{ + int t; + boolean_t valid_symbol = FALSE; + boolean_t valid_hexa = FALSE; + + switch(t = db_read_token()) { + case tIDENT: + if (db_value_of_name(db_tok_string, valuep)) { + valid_symbol = TRUE; + } + if (db_allow_unprefixed_hexa && db_radix == 16 && + db_tok_string) { + char *cp; + int value; + + value = 0; + valid_hexa = TRUE; + for (cp = db_tok_string; *cp; cp++) { + if (*cp >= 'a' && *cp <= 'f') { + value = value * 16 + 10 + (*cp - 'a'); + } else if (*cp >= 'A' && *cp <= 'F') { + value = value * 16 + 10 + (*cp - 'A'); + } else if (*cp >= '0' && *cp <= '9') { + value = value * 16 + (*cp - '0'); + } else { + valid_hexa = FALSE; + break; + } + } + if (valid_hexa) { + if (valid_symbol) { + db_printf("Ambiguous constant %x used as a symbol\n", + value); + } else { + *valuep = (db_expr_t)value; + } + } + } + if (!valid_symbol && !valid_hexa) { + db_printf("Symbol \"%s\" not found\n", db_tok_string); + db_error(0); + /*NOTREACHED*/ + } + return (TRUE); + case tNUMBER: + *valuep = /*(db_expr_t)*/db_tok_number; + return (TRUE); + case tDOT: + *valuep = (db_expr_t)db_dot; + return (TRUE); + case tDOTDOT: + *valuep = (db_expr_t)db_prev; + return (TRUE); + case tPLUS: + *valuep = (db_expr_t) db_next; + return (TRUE); + case tQUOTE: + *valuep = (db_expr_t)db_last_addr; + return (TRUE); + case tDOLLAR: + if (!db_get_variable(valuep)) + return (FALSE); + return (TRUE); + case tLPAREN: + if (!db_expression(valuep)) { + db_error("Unmached ()s\n"); + /*NOTREACHED*/ + } + t = db_read_token(); + if (t != tRPAREN) { + db_printf("')' expected at \"%s...\"\n", db_tok_string); + db_error(0); + /*NOTREACHED*/ + } + return (TRUE); + case tSTRING: + { + static db_tok_offset = 0; + char *sp, *cp; + + sp = (char *)db_tok_string + db_tok_offset; + *valuep = *(int *)sp; + for (cp = sp; + *cp && cp < sp + sizeof (int); + cp++); + if (cp == sp + sizeof (int) && *cp) { + db_tok_offset += sizeof (int); + db_unread_token(t); + } else { + db_tok_offset = 0; + } + return (TRUE); + } + default: + db_unread_token(t); + return (FALSE); + } +} + +int +db_size_option( + char *modif, + boolean_t *u_option, + boolean_t *t_option) +{ + register char *p; + int size = sizeof(int); + + *u_option = FALSE; + *t_option = FALSE; + for (p = modif; *p; p++) { + switch(*p) { + case 'b': + size = sizeof(char); + break; + case 'h': + size = sizeof(short); + break; + case 'l': + size = sizeof(long); + break; + case 'u': + *u_option = TRUE; + break; + case 't': + *t_option = TRUE; + break; + } + } + return(size); +} + +boolean_t +db_unary(db_expr_t *valuep) +{ + int t; + int size; + boolean_t u_opt, t_opt; + task_t task; + extern task_t db_default_task; + + t = db_read_token(); + if (t == tMINUS) { + if (!db_unary(valuep)) { + db_error("Expression syntax error after '-'\n"); + /*NOTREACHED*/ + } + *valuep = -*valuep; + return (TRUE); + } + if (t == tSTAR) { + /* indirection */ + if (!db_unary(valuep)) { + db_error("Expression syntax error after '*'\n"); + /*NOTREACHED*/ + } + task = TASK_NULL; + size = sizeof(db_addr_t); + u_opt = FALSE; + t = db_read_token(); + if (t == tIDENT && db_tok_string[0] == ':') { + size = db_size_option(&db_tok_string[1], &u_opt, &t_opt); + if (t_opt) + task = db_default_task; + } else + db_unread_token(t); + *valuep = db_get_task_value((db_addr_t)*valuep, size, !u_opt, task); + return (TRUE); + } + if (t == tEXCL) { + if (!db_unary(valuep)) { + db_error("Expression syntax error after '!'\n"); + /*NOTREACHED*/ + } + *valuep = (!(*valuep)); + return (TRUE); + } + db_unread_token(t); + return (db_term(valuep)); +} + +boolean_t +db_mult_expr(db_expr_t *valuep) +{ + db_expr_t lhs, rhs; + int t; + char c; + + if (!db_unary(&lhs)) + return (FALSE); + + t = db_read_token(); + while (t == tSTAR || t == tSLASH || t == tPCT || t == tHASH + || t == tBIT_AND) { + c = db_tok_string[0]; + if (!db_term(&rhs)) { + db_printf("Expression syntax error after '%c'\n", c); + db_error(0); + /*NOTREACHED*/ + } + switch(t) { + case tSTAR: + lhs *= rhs; + break; + case tBIT_AND: + lhs &= rhs; + break; + default: + if (rhs == 0) { + db_error("Divide by 0\n"); + /*NOTREACHED*/ + } + if (t == tSLASH) + lhs /= rhs; + else if (t == tPCT) + lhs %= rhs; + else + lhs = ((lhs+rhs-1)/rhs)*rhs; + } + t = db_read_token(); + } + db_unread_token(t); + *valuep = lhs; + return (TRUE); +} + +boolean_t +db_add_expr(db_expr_t *valuep) +{ + db_expr_t lhs, rhs; + int t; + char c; + + if (!db_mult_expr(&lhs)) + return (FALSE); + + t = db_read_token(); + while (t == tPLUS || t == tMINUS || t == tBIT_OR) { + c = db_tok_string[0]; + if (!db_mult_expr(&rhs)) { + db_printf("Expression syntax error after '%c'\n", c); + db_error(0); + /*NOTREACHED*/ + } + if (t == tPLUS) + lhs += rhs; + else if (t == tMINUS) + lhs -= rhs; + else + lhs |= rhs; + t = db_read_token(); + } + db_unread_token(t); + *valuep = lhs; + return (TRUE); +} + +boolean_t +db_shift_expr(db_expr_t *valuep) +{ + db_expr_t lhs, rhs; + int t; + + if (!db_add_expr(&lhs)) + return (FALSE); + + t = db_read_token(); + while (t == tSHIFT_L || t == tSHIFT_R) { + if (!db_add_expr(&rhs)) { + db_printf("Expression syntax error after \"%s\"\n", + (t == tSHIFT_L)? "<<": ">>"); + db_error(0); + /*NOTREACHED*/ + } + if (rhs < 0) { + db_error("Negative shift amount\n"); + /*NOTREACHED*/ + } + if (t == tSHIFT_L) + lhs <<= rhs; + else { + /* Shift right is unsigned */ + lhs = (natural_t) lhs >> rhs; + } + t = db_read_token(); + } + db_unread_token(t); + *valuep = lhs; + return (TRUE); +} + +boolean_t +db_logical_relation_expr(db_expr_t *valuep) +{ + db_expr_t lhs, rhs; + int t; + char op[3]; + + if (!db_shift_expr(&lhs)) + return(FALSE); + + t = db_read_token(); + while (t == tLOG_EQ || t == tLOG_NOT_EQ + || t == tGREATER || t == tGREATER_EQ + || t == tLESS || t == tLESS_EQ) { + op[0] = db_tok_string[0]; + op[1] = db_tok_string[1]; + op[2] = 0; + if (!db_shift_expr(&rhs)) { + db_printf("Expression syntax error after \"%s\"\n", op); + db_error(0); + /*NOTREACHED*/ + } + switch(t) { + case tLOG_EQ: + lhs = (lhs == rhs); + break; + case tLOG_NOT_EQ: + lhs = (lhs != rhs); + break; + case tGREATER: + lhs = (lhs > rhs); + break; + case tGREATER_EQ: + lhs = (lhs >= rhs); + break; + case tLESS: + lhs = (lhs < rhs); + break; + case tLESS_EQ: + lhs = (lhs <= rhs); + break; + } + t = db_read_token(); + } + db_unread_token(t); + *valuep = lhs; + return (TRUE); +} + +boolean_t +db_logical_and_expr(db_expr_t *valuep) +{ + db_expr_t lhs, rhs; + int t; + + if (!db_logical_relation_expr(&lhs)) + return(FALSE); + + t = db_read_token(); + while (t == tLOG_AND) { + if (!db_logical_relation_expr(&rhs)) { + db_error("Expression syntax error after \"&&\"\n"); + /*NOTREACHED*/ + } + lhs = (lhs && rhs); + t = db_read_token(); + } + db_unread_token(t); + *valuep = lhs; + return (TRUE); +} + +boolean_t +db_logical_or_expr(db_expr_t *valuep) +{ + db_expr_t lhs, rhs; + int t; + + if (!db_logical_and_expr(&lhs)) + return(FALSE); + + t = db_read_token(); + while (t == tLOG_OR) { + if (!db_logical_and_expr(&rhs)) { + db_error("Expression syntax error after \"||\"\n"); + /*NOTREACHED*/ + } + lhs = (lhs || rhs); + t = db_read_token(); + } + db_unread_token(t); + *valuep = lhs; + return (TRUE); +} + +int +db_expression(db_expr_t *valuep) +{ + return (db_logical_or_expr(valuep)); +} diff --git a/osfmk/ddb/db_expr.h b/osfmk/ddb/db_expr.h new file mode 100644 index 000000000..a57dca83b --- /dev/null +++ b/osfmk/ddb/db_expr.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:19:18 ezf + * change marker to not FREE + * [1994/09/22 21:09:57 ezf] + * + * Revision 1.1.2.3 1993/09/17 21:34:35 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:14 robert] + * + * Revision 1.1.2.2 1993/07/27 18:27:21 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:11:42 elliston] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_EXPR_H_ +#define _DDB_DB_EXPR_H_ + +#include +#include + + +/* Prototypes for functions exported by this module. + */ + +int db_size_option( + char *modif, + boolean_t *u_option, + boolean_t *t_option); + +int db_expression(db_expr_t *valuep); + +#endif /* !_DDB_DB_EXPR_H_ */ diff --git a/osfmk/ddb/db_ext_symtab.c b/osfmk/ddb/db_ext_symtab.c new file mode 100644 index 000000000..d055fbdeb --- /dev/null +++ b/osfmk/ddb/db_ext_symtab.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ +#include +#include + +#include +#include +#include +#include +#include /* prototype */ + +#if MACH_KDB && MACH_DEBUG +#include +#include +#include +#include +#include +#endif + +/* + * Loads a symbol table for an external file into the kernel debugger. + * The symbol table data is an array of characters. It is assumed that + * the caller and the kernel debugger agree on its format. + */ +kern_return_t +host_load_symbol_table( + host_priv_t host_priv, + task_t task, + char * name, + pointer_t symtab, + mach_msg_type_number_t symtab_count) +{ +#if !MACH_DEBUG || !MACH_KDB + return KERN_FAILURE; +#else + kern_return_t result; + vm_offset_t symtab_start; + vm_offset_t symtab_end; + vm_map_t map; + vm_map_copy_t symtab_copy_object; + + if (host_priv == HOST_PRIV_NULL) + return (KERN_INVALID_ARGUMENT); + + /* + * Copy the symbol table array into the kernel. + * We make a copy of the copy object, and clear + * the old one, so that returning error will not + * deallocate the data twice. + */ + symtab_copy_object = (vm_map_copy_t) symtab; + result = vm_map_copyout( + kernel_map, + &symtab_start, + vm_map_copy_copy(symtab_copy_object)); + if (result != KERN_SUCCESS) + return (result); + + symtab_end = symtab_start + symtab_count; + + /* + * Add the symbol table. + * Do not keep a reference for the task map. XXX + */ + if (task == TASK_NULL) + map = VM_MAP_NULL; + else + map = task->map; + if (!X_db_sym_init((char *)symtab_start, + (char *)symtab_end, + name, + (char *)map)) + { + /* + * Not enough room for symbol table - failure. + */ + (void) vm_deallocate(kernel_map, + symtab_start, + symtab_count); + return (KERN_FAILURE); + } + + /* + * Wire down the symbol table + */ + (void) vm_map_wire(kernel_map, + symtab_start, + round_page(symtab_end), + VM_PROT_READ|VM_PROT_WRITE, FALSE); + + /* + * Discard the original copy object + */ + vm_map_copy_discard(symtab_copy_object); + + return (KERN_SUCCESS); +#endif /* MACH_DEBUG && MACH_KDB */ +} diff --git a/osfmk/ddb/db_input.c b/osfmk/ddb/db_input.c new file mode 100644 index 000000000..7283a4519 --- /dev/null +++ b/osfmk/ddb/db_input.c @@ -0,0 +1,822 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.3.10.2 1994/09/23 01:19:37 ezf + * change marker to not FREE + * [1994/09/22 21:10:05 ezf] + * + * Revision 1.3.10.1 1994/06/11 21:11:48 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:01:41 bolinger] + * + * Revision 1.3.8.2 1994/02/11 14:21:41 paire + * Added string.h header file for strlen declaration. + * [94/02/09 paire] + * + * Revision 1.3.8.1 1994/02/08 10:57:55 bernadat + * Added db_auto_completion variable. + * [93/08/17 paire] + * + * Added support of symbol completion by typing '\t'. + * [93/08/14 paire] + * [94/02/07 bernadat] + * + * Revision 1.3.2.4 1993/08/11 20:37:51 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:21 elliston] + * + * Revision 1.3.2.3 1993/07/27 18:27:30 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:01 elliston] + * + * Revision 1.3.2.2 1993/06/09 02:20:13 gm + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 18:57:14 jeffc] + * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:26 jeffc] + * + * Revision 1.3 1993/04/19 16:02:17 devrcs + * Replaced ^R (redraw) with ^L [barbou@gr.osf.org] + * + * Added ^R and ^S commands for history search commands + * ^U does not erase end of the line anymore. (only erases + * from the beginning of the line to current position). + * [barbou@gr.osf.org] + * + * ^C now erases the entire line. [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Fixed history management: Do not store repeated typed + * command. Null terminate current command in case it is a + * substring of the last command. + * [92/10/02 bernadat] + * + * Revision 1.2 1992/11/25 01:04:24 robert + * integrate changes for norma_14 below + * + * Philippe Bernadat (bernadat) at gr.osf.org 02-Oct-92 + * Fixed history management: Do not store repeated typed + * command. Null terminate current command in case it is a + * substring of the last command. + * [1992/11/20 00:56:07 robert] + * + * integrate changes below for norma_14 + * [1992/11/13 19:21:34 robert] + * + * Revision 1.1 1992/09/30 02:01:08 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.7.3.2 92/09/15 17:14:26 jeffreyh + * Fixed history code. (Only one char. out of 2 was checked to + * compare to last command) + * [barbou@gr.osf.org] + * + * Revision 2.7.3.1 92/03/03 16:13:30 jeffreyh + * Pick up changes from TRUNK + * [92/02/26 10:59:36 jeffreyh] + * + * Revision 2.8 92/02/19 15:07:44 elf + * Added delete_line (Ctrl-U). + * [92/02/17 kivinen] + * + * Added command line history. Ctrl-P = previous, Ctrl-N = next. If + * DB_HISTORY_SIZE is 0 then command history is disabled. + * [92/02/17 kivinen] + * + * Revision 2.7 91/10/09 16:00:03 af + * Revision 2.6.2.1 91/10/05 13:06:12 jeffreyh + * Fixed incorrect db_lbuf_end setting. + * [91/08/29 tak] + * + * Revision 2.6.2.1 91/10/05 13:06:12 jeffreyh + * Fixed incorrect db_lbuf_end setting. + * [91/08/29 tak] + * + * Revision 2.6 91/07/09 23:15:49 danner + * Add include of machine/db_machdep.h to allow machine-specific + * overrides via defines. + * [91/07/08 danner] + * + * Revision 2.5 91/05/14 15:34:03 mrt + * Correcting copyright + * + * Revision 2.4 91/02/14 14:41:53 mrt + * Add input line editing. + * [90/11/11 dbg] + * + * Revision 2.3 91/02/05 17:06:32 mrt + * Changed to new Mach copyright + * [91/01/31 16:18:13 mrt] + * + * Revision 2.2 90/08/27 21:51:03 dbg + * Reduce lint. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef DB_HISTORY_SIZE +#define DB_HISTORY_SIZE 4000 +#endif /* DB_HISTORY_SIZE */ + +/* + * Character input and editing. + */ + +/* + * We don't track output position while editing input, + * since input always ends with a new-line. We just + * reset the line position at the end. + */ +char * db_lbuf_start; /* start of input line buffer */ +char * db_lbuf_end; /* end of input line buffer */ +char * db_lc; /* current character */ +char * db_le; /* one past last character */ +int db_completion; /* number of incomplete symbols matched */ +int db_auto_completion = 10; /* number of line to display without asking */ +#if DB_HISTORY_SIZE != 0 +char db_history[DB_HISTORY_SIZE]; /* start of history buffer */ +int db_history_size = DB_HISTORY_SIZE;/* size of history buffer */ +char * db_history_curr = db_history; /* start of current line */ +char * db_history_last = db_history; /* start of last line */ +char * db_history_prev = (char *) 0; /* start of previous line */ +int db_hist_unmodified = 0; /* unmodified line from history */ +int db_hist_search = 0; /* are we in hist search mode ? */ +char db_hist_search_string[DB_LEX_LINE_SIZE];/* the string to look for */ +int db_hist_ignore_dups = 0; /* don't duplicate commands in hist */ +#endif + +#define CTRL(c) ((c) & 0x1f) +#define isspace(c) ((c) == ' ' || (c) == '\t') +#define BLANK ' ' +#define BACKUP '\b' + + + +/* Prototypes for functions local to this file. XXX -- should be static! + */ +void db_putstring( + char *s, + int count); + +void db_putnchars( + int c, + int count); + +void db_delete( + int n, + int bwd); + +void db_delete_line(void); + +boolean_t db_hist_substring( + char *string, + char *substring); + +boolean_t db_inputchar(int c); + +extern jmp_buf_t *db_recover; + +void +db_putstring( + char *s, + int count) +{ + while (--count >= 0) + cnputc(*s++); +} + +void +db_putnchars( + int c, + int count) +{ + while (--count >= 0) + cnputc(c); +} + +/* + * Delete N characters, forward or backward + */ +#define DEL_FWD 0 +#define DEL_BWD 1 +void +db_delete( + int n, + int bwd) +{ + register char *p; + + if (bwd) { + db_lc -= n; + db_putnchars(BACKUP, n); + } + for (p = db_lc; p < db_le-n; p++) { + *p = *(p+n); + cnputc(*p); + } + db_putnchars(BLANK, n); + db_putnchars(BACKUP, db_le - db_lc); + db_le -= n; +} + +void +db_delete_line(void) +{ + db_delete(db_le - db_lc, DEL_FWD); + db_delete(db_lc - db_lbuf_start, DEL_BWD); + db_le = db_lc = db_lbuf_start; +} + +#if DB_HISTORY_SIZE != 0 +#define INC_DB_CURR() \ + do { \ + db_history_curr++; \ + if (db_history_curr > \ + db_history + db_history_size - 1) \ + db_history_curr = db_history; \ + } while (0) +#define DEC_DB_CURR() \ + do { \ + db_history_curr--; \ + if (db_history_curr < db_history) \ + db_history_curr = db_history + \ + db_history_size - 1; \ + } while (0) +#endif + +/* returs TRUE if "substring" is a substring of "string" */ +boolean_t +db_hist_substring( + char *string, + char *substring) +{ + register char *cp1, *cp2; + + cp1 = string; + while (*cp1) + cp1++; + cp2 = substring; + while (*cp2) + cp2++; + + while (cp2 > substring) { + cp1--; cp2--; + } + + while (cp1 >= string) { + register char *cp3; + + cp2 = substring; + cp3 = cp1; + while (*cp2 && *cp2 == *cp3) { + cp2++; cp3++; + } + if (*cp2 == '\0') { + return TRUE; + } + cp1--; + } + return FALSE; +} + +/* returns TRUE at end-of-line */ +boolean_t +db_inputchar(int c) +{ + char *sym; + char *start; + char *restart; + jmp_buf_t db_jmpbuf; + jmp_buf_t *db_prev; + char *p; + int len; + + switch(db_completion) { + case -1: + db_putchar('\n'); + db_prev = db_recover; + if (_setjmp(db_recover = &db_jmpbuf) == 0 && + (c == 'y' || c == ' ' || c == '\t')) + db_print_completion(db_tok_string); + db_recover = db_prev; + db_completion = 0; + db_reset_more(); + db_output_prompt(); + if (db_le > db_lbuf_start) { + for (start = db_lbuf_start; start < db_le; start++) + db_putchar(*start); + db_putnchars(BACKUP, db_le - db_lc); + } + return(FALSE); + + case 0: + break; + + default: + if (c == '\t') { + db_printf("\nThere are %d possibilities. ", db_completion); + db_printf("Do you really wish to see them all [n] ? "); + db_force_whitespace(); + db_completion = -1; + db_reset_more(); + return(FALSE); + } + db_completion = 0; + break; + } + + switch (c) { + case '\t': + /* symbol completion */ + if (db_lc == db_lbuf_start || db_auto_completion == 0) + break; + if (db_le == db_lbuf_end) { + cnputc('\007'); + break; + } + start = db_lc - 1; + while (start >= db_lbuf_start && + ((*start >= 'A' && *start <= 'Z') || + (*start >= 'a' && *start <= 'z') || + (*start >= '0' && *start <= '9') || + *start == '_' || *start == ':')) + start--; + if (start == db_lc - 1) + break; + if (start > db_lbuf_start && *start == '$') { + cnputc('\007'); + break; + } + sym = db_tok_string; + restart = ++start; + do { + *sym++ = *start++; + } while (start != db_lc && + sym != db_tok_string + sizeof(db_tok_string)); + if (sym == db_tok_string + sizeof(db_tok_string)) { + cnputc('\007'); + break; + } + *sym = '\0'; + db_completion = db_lookup_incomplete(db_tok_string, + sizeof(db_tok_string)); + if (db_completion == 0) { + /* symbol unknown */ + cnputc('\007'); + break; + } + + len = strlen(db_tok_string) - (start - restart); + if (db_completion == 1 && + (db_le == db_lc || + (db_le > db_lc) && *db_lc != ' ')) + len++; + for (p = db_le - 1; p >= db_lc; p--) + *(p + len) = *p; + db_le += len; + for (sym = &db_tok_string[start - restart]; + *sym != '\0'; sym++) + *db_lc++ = *sym; + + if (db_completion == 1 || db_completion > db_auto_completion) { + for (sym = &db_tok_string[start - restart]; + *sym != '\0'; sym++) + cnputc(*sym); + if (db_completion == 1) { + if (db_le == db_lc || + (db_le > db_lc) && *db_lc != ' ') { + cnputc(' '); + *db_lc++ = ' '; + } + db_completion = 0; + } + db_putstring(db_lc, db_le - db_lc); + db_putnchars(BACKUP, db_le - db_lc); + } + + if (db_completion > 1) { + cnputc('\007'); + if (db_completion <= db_auto_completion) { + db_putchar('\n'); + db_print_completion(db_tok_string); + db_completion = 0; + db_reset_more(); + db_output_prompt(); + if (db_le > db_lbuf_start) { + for (start = db_lbuf_start; start < db_le; start++) + db_putchar(*start); + db_putnchars(BACKUP, db_le - db_lc); + } + } + } + break; + + case CTRL('b'): + /* back up one character */ + if (db_lc > db_lbuf_start) { + cnputc(BACKUP); + db_lc--; + } + break; + case CTRL('f'): + /* forward one character */ + if (db_lc < db_le) { + cnputc(*db_lc); + db_lc++; + } + break; + case CTRL('a'): + /* beginning of line */ + while (db_lc > db_lbuf_start) { + cnputc(BACKUP); + db_lc--; + } + break; + case CTRL('e'): + /* end of line */ + while (db_lc < db_le) { + cnputc(*db_lc); + db_lc++; + } + break; + case CTRL('h'): + case 0177: + /* erase previous character */ + if (db_lc > db_lbuf_start) + db_delete(1, DEL_BWD); + break; + case CTRL('d'): + /* erase next character */ + if (db_lc < db_le) + db_delete(1, DEL_FWD); + break; + case CTRL('k'): + /* delete to end of line */ + if (db_lc < db_le) + db_delete(db_le - db_lc, DEL_FWD); + break; + case CTRL('u'): + /* delete to beginning of line */ + if (db_lc > db_lbuf_start) + db_delete(db_lc - db_lbuf_start, DEL_BWD); + break; + case CTRL('t'): + /* twiddle last 2 characters */ + if (db_lc >= db_lbuf_start + 2) { + c = db_lc[-2]; + db_lc[-2] = db_lc[-1]; + db_lc[-1] = c; + cnputc(BACKUP); + cnputc(BACKUP); + cnputc(db_lc[-2]); + cnputc(db_lc[-1]); + } + break; + case CTRL('c'): + case CTRL('g'): + db_delete_line(); +#if DB_HISTORY_SIZE != 0 + db_history_curr = db_history_last; + if (c == CTRL('g') && db_hist_search) { + char *p; + for (p = db_hist_search_string, db_le = db_lbuf_start; + *p; ) { + *db_le++ = *p++; + } + db_lc = db_le; + *db_le = '\0'; + db_putstring(db_lbuf_start, db_le - db_lbuf_start); + } +#endif + break; +#if DB_HISTORY_SIZE != 0 + case CTRL('r'): + if (db_hist_search++ == 0) { + /* starting an history lookup */ + register char *cp1, *cp2; + for (cp1 = db_lbuf_start, cp2 = db_hist_search_string; + cp1 < db_le; + cp1++, cp2++) + *cp2 = *cp1; + *cp2 = '\0'; + db_hist_search++; + } + /* FALL THROUGH */ + case CTRL('p'): + { + char * old_history_curr = db_history_curr; + + if (db_hist_unmodified++ == 0) + db_hist_unmodified++; + DEC_DB_CURR(); + while (db_history_curr != db_history_last) { + DEC_DB_CURR(); + if (*db_history_curr == '\0') { + INC_DB_CURR(); + if (db_hist_search <= 1) { + if (*db_history_curr == '\0') + cnputc('\007'); + else + DEC_DB_CURR(); + break; + } + if (*db_history_curr == '\0') { + cnputc('\007'); + db_history_curr = old_history_curr; + DEC_DB_CURR(); + break; + } + if (db_history_curr != db_history_last && + db_hist_substring(db_history_curr, + db_hist_search_string)) { + DEC_DB_CURR(); + break; + } + DEC_DB_CURR(); + } + } + if (db_history_curr == db_history_last) { + cnputc('\007'); + db_history_curr = old_history_curr; + } else { + register char *p; + INC_DB_CURR(); + db_delete_line(); + for (p = db_history_curr, db_le = db_lbuf_start; + *p; ) { + *db_le++ = *p++; + if (p == db_history + db_history_size) { + p = db_history; + } + } + db_lc = db_le; + *db_le = '\0'; + db_putstring(db_lbuf_start, db_le - db_lbuf_start); + } + break; + } + case CTRL('s'): + if (db_hist_search++ == 0) { + /* starting an history lookup */ + register char *cp1, *cp2; + for (cp1 = db_lbuf_start, cp2 = db_hist_search_string; + cp1 < db_le; + cp1++, cp2++) + *cp2 = *cp1; + *cp2 = '\0'; + db_hist_search++; + } + /* FALL THROUGH */ + case CTRL('n'): + { + char *old_history_curr = db_history_curr; + + if (db_hist_unmodified++ == 0) + db_hist_unmodified++; + while (db_history_curr != db_history_last) { + if (*db_history_curr == '\0') { + if (db_hist_search <= 1) + break; + INC_DB_CURR(); + if (db_history_curr != db_history_last && + db_hist_substring(db_history_curr, + db_hist_search_string)) { + DEC_DB_CURR(); + break; + } + DEC_DB_CURR(); + } + INC_DB_CURR(); + } + if (db_history_curr != db_history_last) { + INC_DB_CURR(); + if (db_history_curr != db_history_last) { + register char *p; + db_delete_line(); + for (p = db_history_curr, + db_le = db_lbuf_start; *p;) { + *db_le++ = *p++; + if (p == db_history + + db_history_size) { + p = db_history; + } + } + db_lc = db_le; + *db_le = '\0'; + db_putstring(db_lbuf_start, + db_le - db_lbuf_start); + } else { + cnputc('\007'); + db_history_curr = old_history_curr; + } + } else { + cnputc('\007'); + db_history_curr = old_history_curr; + } + break; + } +#endif + /* refresh the command line */ + case CTRL('l'): + db_putstring("^L\n", 3); + if (db_le > db_lbuf_start) { + db_putstring(db_lbuf_start, db_le - db_lbuf_start); + db_putnchars(BACKUP, db_le - db_lc); + } + break; + case '\n': + case '\r': +#if DB_HISTORY_SIZE != 0 + /* Check if it same than previous line */ + if (db_history_prev) { + register char *pp, *pc; + + /* Is it unmodified */ + for (p = db_history_prev, pc = db_lbuf_start; + pc != db_le && *p;) { + if (*p != *pc) + break; + if (++p == db_history + db_history_size) { + p = db_history; + } + if (++pc == db_history + db_history_size) { + pc = db_history; + } + } + if (!*p && pc == db_le) { + /* Repeted previous line, not saved */ + db_history_curr = db_history_last; + *db_le++ = c; + db_hist_search = 0; + db_hist_unmodified = 0; + return (TRUE); + } + } + if (db_le != db_lbuf_start && + (db_hist_unmodified == 0 || !db_hist_ignore_dups)) { + db_history_prev = db_history_last; + for (p = db_lbuf_start; p != db_le; p++) { + *db_history_last++ = *p; + if (db_history_last == db_history + + db_history_size) { + db_history_last = db_history; + } + } + *db_history_last++ = '\0'; + } + db_history_curr = db_history_last; +#endif + *db_le++ = c; + db_hist_search = 0; + db_hist_unmodified = 0; + return (TRUE); + default: + if (db_le == db_lbuf_end) { + cnputc('\007'); + } + else if (c >= ' ' && c <= '~') { + for (p = db_le; p > db_lc; p--) + *p = *(p-1); + *db_lc++ = c; + db_le++; + cnputc(c); + db_putstring(db_lc, db_le - db_lc); + db_putnchars(BACKUP, db_le - db_lc); + } + break; + } + if (db_hist_search) + db_hist_search--; + if (db_hist_unmodified) + db_hist_unmodified--; + return (FALSE); +} + +int +db_readline( + char * lstart, + int lsize) +{ + db_force_whitespace(); /* synch output position */ + + db_lbuf_start = lstart; + db_lbuf_end = lstart + lsize - 1; + db_lc = lstart; + db_le = lstart; + + while (!db_inputchar(cngetc())) + continue; + + db_putchar('\n'); /* synch output position */ + + *db_le = 0; + return (db_le - db_lbuf_start); +} + +void +db_check_interrupt(void) +{ + register int c; + + c = cnmaygetc(); + switch (c) { + case -1: /* no character */ + return; + + case CTRL('c'): + db_error((char *)0); + /*NOTREACHED*/ + + case CTRL('s'): + do { + c = cnmaygetc(); + if (c == CTRL('c')) + db_error((char *)0); + } while (c != CTRL('q')); + break; + + default: + /* drop on floor */ + break; + } +} diff --git a/osfmk/ddb/db_input.h b/osfmk/ddb/db_input.h new file mode 100644 index 000000000..c1a89bffd --- /dev/null +++ b/osfmk/ddb/db_input.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:19:48 ezf + * change marker to not FREE + * [1994/09/22 21:10:10 ezf] + * + * Revision 1.1.2.3 1993/09/17 21:34:37 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:17 robert] + * + * Revision 1.1.2.2 1993/07/27 18:27:36 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:08 elliston] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_INPUT_H_ +#define _DDB_DB_INPUT_H_ + +/* Prototypes for functions exported by this module. + */ + +int db_readline( + char * lstart, + int lsize); + +void db_check_interrupt(void); + +#endif /* !_DDB_DB_INPUT_H_ */ diff --git a/osfmk/ddb/db_lex.c b/osfmk/ddb/db_lex.c new file mode 100644 index 000000000..efafadfab --- /dev/null +++ b/osfmk/ddb/db_lex.c @@ -0,0 +1,571 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.11.3 1996/01/09 19:15:49 devrcs + * Change 'register foo' to 'register int foo'. + * [1995/12/01 21:42:12 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:03:11 jfraser] + * + * Revision 1.1.11.2 1995/01/06 19:10:21 devrcs + * mk6 CR668 - 1.3b26 merge + * * Revision 1.1.4.6 1994/05/06 18:39:20 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Merge Alpha changes into osc1.312b source code. + * String protos. + * 64bit cleanup. + * Cleanup to quiet gcc warnings. + * * End1.3merge + * [1994/11/04 08:49:35 dwm] + * + * Revision 1.1.11.1 1994/09/23 01:19:59 ezf + * change marker to not FREE + * [1994/09/22 21:10:14 ezf] + * + * Revision 1.1.4.4 1993/08/11 20:37:55 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:26 elliston] + * + * Revision 1.1.4.3 1993/07/27 18:27:38 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:13 elliston] + * + * Revision 1.1.4.2 1993/06/02 23:11:27 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:32 jeffc] + * + * Revision 1.1 1992/09/30 02:01:10 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 16:00:20 af + * Revision 2.4.3.1 91/10/05 13:06:25 jeffreyh + * Added relational operator tokens and string constant etc. + * Added input switching functions for macro and conditional command. + * Moved skip_to_eol() from db_command.c and added db_last_lp to print + * skipped input data as a warning message. + * Added last input repetition support to db_read_line. + * Changed db_lex() to always set db_tok_string for error message. + * [91/08/29 tak] + * + * Revision 2.4.3.1 91/10/05 13:06:25 jeffreyh + * Added relational operator tokens and string constant etc. + * Added input switching functions for macro and conditional command. + * Moved skip_to_eol() from db_command.c and added db_last_lp to print + * skipped input data as a warning message. + * Added last input repetition support to db_read_line. + * Changed db_lex() to always set db_tok_string for error message. + * [91/08/29 tak] + * + * Revision 2.4 91/05/14 15:34:23 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:06:36 mrt + * Changed to new Mach copyright + * [91/01/31 16:18:20 mrt] + * + * Revision 2.2 90/08/27 21:51:10 dbg + * Add 'dotdot' token. + * [90/08/22 dbg] + * + * Allow backslash to quote any character into an identifier. + * Allow colon in identifier for symbol table qualification. + * [90/08/16 dbg] + * Reduce lint. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +/* + * Lexical analyzer. + */ +#include /* For strcpy(), strncmp(), strlen() */ +#include +#include +#include +#include /* For db_printf() */ + +char db_line[DB_LEX_LINE_SIZE]; +char db_last_line[DB_LEX_LINE_SIZE]; +char *db_lp, *db_endlp; +char *db_last_lp; +int db_look_char = 0; +db_expr_t db_look_token = 0; + + +/* Prototypes for functions local to this file. XXX -- should be static! + */ +void db_flush_line(void); +void db_unread_char(int c); + + +int +db_read_line(char *repeat_last) +{ + int i; + + i = db_readline(db_line, sizeof(db_line)); + if (i == 0) + return (0); /* EOI */ + if (repeat_last) { + if (strncmp(db_line, repeat_last, strlen(repeat_last)) == 0) { + strcpy(db_line, db_last_line); + db_printf("%s", db_line); + i = strlen(db_line); + } else if (db_line[0] != '\n' && db_line[0] != 0) + strcpy(db_last_line, db_line); + } + db_lp = db_line; + db_endlp = db_lp + i; + db_last_lp = db_lp; + db_look_char = 0; + db_look_token = 0; + return (i); +} + +void +db_flush_line(void) +{ + db_lp = db_line; + db_last_lp = db_lp; + db_endlp = db_line; +} + +void +db_switch_input( + char *buffer, + int size) +{ + db_lp = buffer; + db_last_lp = db_lp; + db_endlp = buffer + size; + db_look_char = 0; + db_look_token = 0; +} + +void +db_save_lex_context(register struct db_lex_context *lp) +{ + lp->l_ptr = db_lp; + lp->l_eptr = db_endlp; + lp->l_char = db_look_char; + lp->l_token = db_look_token; +} + +void +db_restore_lex_context(register struct db_lex_context *lp) +{ + db_lp = lp->l_ptr; + db_last_lp = db_lp; + db_endlp = lp->l_eptr; + db_look_char = lp->l_char; + db_look_token = lp->l_token; +} + +int +db_read_char(void) +{ + int c; + + if (db_look_char != 0) { + c = db_look_char; + db_look_char = 0; + } + else if (db_lp >= db_endlp) + c = -1; + else + c = *db_lp++; + return (c); +} + +void +db_unread_char(int c) +{ + db_look_char = c; +} + +void +db_unread_token(int t) +{ + db_look_token = t; +} + +int +db_read_token(void) +{ + int t; + + if (db_look_token) { + t = db_look_token; + db_look_token = 0; + } + else { + db_last_lp = db_lp; + if (db_look_char) + db_last_lp--; + t = db_lex(); + } + return (t); +} + +db_expr_t db_tok_number; +char db_tok_string[TOK_STRING_SIZE]; + +db_expr_t db_radix = 16; + +void +db_flush_lex(void) +{ + db_flush_line(); + db_look_char = 0; + db_look_token = 0; +} + +#define DB_DISP_SKIP 40 /* number of chars to display skip */ + +void +db_skip_to_eol(void) +{ + register int skip; + register int t; + register int n; + register char *p; + + t = db_read_token(); + p = db_last_lp; + for (skip = 0; t != tEOL && t != tSEMI_COLON && t != tEOF; skip++) + t = db_read_token(); + if (t == tSEMI_COLON) + db_unread_token(t); + if (skip != 0) { + while (p < db_last_lp && (*p == ' ' || *p == '\t')) + p++; + db_printf("Warning: Skipped input data \""); + for (n = 0; n < DB_DISP_SKIP && p < db_last_lp; n++) + db_printf("%c", *p++); + if (n >= DB_DISP_SKIP) + db_printf("...."); + db_printf("\"\n"); + } +} + +int +db_lex(void) +{ + register char *cp; + register int c; + + c = db_read_char(); + while (c <= ' ' || c > '~') { + if (c == '\n' || c == -1) + return (tEOL); + c = db_read_char(); + } + + cp = db_tok_string; + *cp++ = c; + + if (c >= '0' && c <= '9') { + /* number */ + int r, digit; + + if (c > '0') + r = db_radix; + else { + c = db_read_char(); + if (c == 'O' || c == 'o') + r = 8; + else if (c == 'T' || c == 't') + r = 10; + else if (c == 'X' || c == 'x') + r = 16; + else { + cp--; + r = db_radix; + db_unread_char(c); + } + c = db_read_char(); + *cp++ = c; + } + db_tok_number = 0; + for (;;) { + if (c >= '0' && c <= ((r == 8) ? '7' : '9')) + digit = c - '0'; + else if (r == 16 && ((c >= 'A' && c <= 'F') || + (c >= 'a' && c <= 'f'))) { + if (c >= 'a') + digit = c - 'a' + 10; + else + digit = c - 'A' + 10; + } + else + break; + db_tok_number = db_tok_number * r + digit; + c = db_read_char(); + if (cp < &db_tok_string[sizeof(db_tok_string)-1]) + *cp++ = c; + } + cp[-1] = 0; + if ((c >= '0' && c <= '9') || + (c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || + (c == '_')) + { + db_printf("Bad character '%c' after number %s\n", + c, db_tok_string); + db_error(0); + db_flush_lex(); + return (tEOF); + } + db_unread_char(c); + return (tNUMBER); + } + if ((c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || + c == '_' || c == '\\' || c == ':') + { + /* identifier */ + if (c == '\\') { + c = db_read_char(); + if (c == '\n' || c == -1) + db_error("Bad '\\' at the end of line\n"); + cp[-1] = c; + } + while (1) { + c = db_read_char(); + if ((c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || + (c >= '0' && c <= '9') || + c == '_' || c == '\\' || c == ':' || c == '.') + { + if (c == '\\') { + c = db_read_char(); + if (c == '\n' || c == -1) + db_error("Bad '\\' at the end of line\n"); + } + *cp++ = c; + if (cp == db_tok_string+sizeof(db_tok_string)) { + db_error("String too long\n"); + db_flush_lex(); + return (tEOF); + } + continue; + } + else { + *cp = '\0'; + break; + } + } + db_unread_char(c); + return (tIDENT); + } + + *cp = 0; + switch (c) { + case '+': + return (tPLUS); + case '-': + return (tMINUS); + case '.': + c = db_read_char(); + if (c == '.') { + *cp++ = c; + *cp = 0; + return (tDOTDOT); + } + db_unread_char(c); + return (tDOT); + case '*': + return (tSTAR); + case '/': + return (tSLASH); + case '=': + c = db_read_char(); + if (c == '=') { + *cp++ = c; + *cp = 0; + return(tLOG_EQ); + } + db_unread_char(c); + return (tEQ); + case '%': + return (tPCT); + case '#': + return (tHASH); + case '(': + return (tLPAREN); + case ')': + return (tRPAREN); + case ',': + return (tCOMMA); + case '\'': + return (tQUOTE); + case '"': + /* string */ + cp = db_tok_string; + c = db_read_char(); + while (c != '"' && c > 0 && c != '\n') { + if (cp >= &db_tok_string[sizeof(db_tok_string)-1]) { + db_error("Too long string\n"); + db_flush_lex(); + return (tEOF); + } + if (c == '\\') { + c = db_read_char(); + switch(c) { + case 'n': + c = '\n'; break; + case 't': + c = '\t'; break; + case '\\': + case '"': + break; + default: + db_printf("Bad escape sequence '\\%c'\n", c); + db_error(0); + db_flush_lex(); + return (tEOF); + } + } + *cp++ = c; + c = db_read_char(); + } + *cp = 0; + if (c != '"') { + db_error("Non terminated string constant\n"); + db_flush_lex(); + return (tEOF); + } + return (tSTRING); + case '$': + return (tDOLLAR); + case '!': + c = db_read_char(); + if (c == '=') { + *cp++ = c; + *cp = 0; + return(tLOG_NOT_EQ); + } + db_unread_char(c); + return (tEXCL); + case '&': + c = db_read_char(); + if (c == '&') { + *cp++ = c; + *cp = 0; + return(tLOG_AND); + } + db_unread_char(c); + return(tBIT_AND); + case '|': + c = db_read_char(); + if (c == '|') { + *cp++ = c; + *cp = 0; + return(tLOG_OR); + } + db_unread_char(c); + return(tBIT_OR); + case '<': + c = db_read_char(); + *cp++ = c; + *cp = 0; + if (c == '<') + return (tSHIFT_L); + if (c == '=') + return (tLESS_EQ); + cp[-1] = 0; + db_unread_char(c); + return(tLESS); + break; + case '>': + c = db_read_char(); + *cp++ = c; + *cp = 0; + if (c == '>') + return (tSHIFT_R); + if (c == '=') + return (tGREATER_EQ); + cp[-1] = 0; + db_unread_char(c); + return (tGREATER); + break; + case ';': + return (tSEMI_COLON); + case '?': + return (tQUESTION); + case -1: + strcpy(db_tok_string, ""); + return (tEOF); + } + db_printf("Bad character '%c'\n", c); + db_flush_lex(); + return (tEOF); +} diff --git a/osfmk/ddb/db_lex.h b/osfmk/ddb/db_lex.h new file mode 100644 index 000000000..6280d3f7b --- /dev/null +++ b/osfmk/ddb/db_lex.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.11.2 1995/01/06 19:10:24 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:39:54 dwm] + * + * Revision 1.1.11.1 1994/09/23 01:20:10 ezf + * change marker to not FREE + * [1994/09/22 21:10:18 ezf] + * + * Revision 1.1.4.3 1993/07/27 18:27:40 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:19 elliston] + * + * Revision 1.1.4.2 1993/06/02 23:11:33 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:37 jeffc] + * + * Revision 1.1 1992/09/30 02:24:17 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 16:00:48 af + * Revision 2.4.3.1 91/10/05 13:06:34 jeffreyh + * Added db_lex_context structure and some routine declarations + * for macro and conditinal command. + * Added relational operator tokens etc. for condition expression. + * Changed TOK_STRING_SIZE from 120 to 64, and defined + * DB_LEX_LINE_SIZE as 256 which was previously embedded + * in db_lex.c as 120. + * [91/08/29 tak] + * Revision 2.4.1 91/07/15 09:30:00 tak + * Added db_lex_context for macro support + * Added some lexical constants to support logical expression etc. + * [91/05/15 13:55:00 tak] + * + * Revision 2.4.3.1 91/10/05 13:06:34 jeffreyh + * Added db_lex_context structure and some routine declarations + * for macro and conditinal command. + * Added relational operator tokens etc. for condition expression. + * Changed TOK_STRING_SIZE from 120 to 64, and defined + * DB_LEX_LINE_SIZE as 256 which was previously embedded + * in db_lex.c as 120. + * [91/08/29 tak] + * + * Revision 2.4.1 91/07/15 09:30:00 tak + * Added db_lex_context for macro support + * Added some lexical constants to support logical expression etc. + * [91/05/15 13:55:00 tak] + * + * Revision 2.4 91/05/14 15:34:38 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:06:41 mrt + * Changed to new Mach copyright + * [91/01/31 16:18:28 mrt] + * + * Revision 2.2 90/08/27 21:51:16 dbg + * Add 'dotdot' token. + * [90/08/22 dbg] + * Export db_flush_lex. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ +/* + * Lexical analyzer. + */ + +#ifndef _DDB_DB_LEX_H_ +#define _DDB_DB_LEX_H_ + +#include /* For db_expr_t */ + +#define TOK_STRING_SIZE 64 +#define DB_LEX_LINE_SIZE 256 + +struct db_lex_context { + int l_char; /* peek char */ + int l_token; /* peek token */ + char *l_ptr; /* line pointer */ + char *l_eptr; /* line end pointer */ +}; + +extern db_expr_t db_tok_number; +extern char db_tok_string[TOK_STRING_SIZE]; +extern db_expr_t db_radix; + +#define tEOF (-1) +#define tEOL 1 +#define tNUMBER 2 +#define tIDENT 3 +#define tPLUS 4 +#define tMINUS 5 +#define tDOT 6 +#define tSTAR 7 +#define tSLASH 8 +#define tEQ 9 +#define tLPAREN 10 +#define tRPAREN 11 +#define tPCT 12 +#define tHASH 13 +#define tCOMMA 14 +#define tQUOTE 15 +#define tDOLLAR 16 +#define tEXCL 17 +#define tSHIFT_L 18 +#define tSHIFT_R 19 +#define tDOTDOT 20 +#define tSEMI_COLON 21 +#define tLOG_EQ 22 +#define tLOG_NOT_EQ 23 +#define tLESS 24 +#define tLESS_EQ 25 +#define tGREATER 26 +#define tGREATER_EQ 27 +#define tBIT_AND 28 +#define tBIT_OR 29 +#define tLOG_AND 30 +#define tLOG_OR 31 +#define tSTRING 32 +#define tQUESTION 33 + +/* Prototypes for functions exported by this module. + */ +int db_read_line(char *repeat_last); + +void db_switch_input( + char *buffer, + int size); + +void db_save_lex_context(struct db_lex_context *lp); + +void db_restore_lex_context(struct db_lex_context *lp); + +int db_read_char(void); + +void db_unread_token(int t); + +int db_read_token(void); + +void db_flush_lex(void); + +void db_skip_to_eol(void); + +int db_lex(void); + +#endif /* !_DDB_DB_LEX_H_ */ diff --git a/osfmk/ddb/db_macro.c b/osfmk/ddb/db_macro.c new file mode 100644 index 000000000..84e69782c --- /dev/null +++ b/osfmk/ddb/db_macro.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.10.4 1996/01/09 19:15:54 devrcs + * Change 'register foo' to 'register int foo'. + * [1995/12/01 21:42:14 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:03:15 jfraser] + * + * Revision 1.2.10.3 1995/01/06 19:10:28 devrcs + * mk6 CR668 - 1.3b26 merge + * fix typing + * [1994/11/04 08:49:38 dwm] + * + * Revision 1.2.10.2 1994/09/23 01:20:19 ezf + * change marker to not FREE + * [1994/09/22 21:10:23 ezf] + * + * Revision 1.2.10.1 1994/06/11 21:11:52 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:01:51 bolinger] + * + * Revision 1.2.8.1 1994/02/08 10:58:03 bernadat + * Fixed reinitialization of db_macro_level to -1. + * Put DB_MACRO_LEVEL and DB_NARGS macros to . + * Changed name of DB_NARGS to DB_MACRO_NARGS. + * Added support of DB_VAR_SHOW. + * [93/08/12 paire] + * [94/02/07 bernadat] + * + * Revision 1.2.2.4 1993/08/11 20:37:58 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:33 elliston] + * + * Revision 1.2.2.3 1993/07/27 18:27:42 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:24 elliston] + * + * Revision 1.2.2.2 1993/06/09 02:20:18 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:40 jeffc] + * + * Revision 1.2 1993/04/19 16:02:25 devrcs + * Changes from mk78: + * Removed unused variable from db_exec_macro(). + * Added include of . + * [92/05/16 jfriedl] + * [93/02/02 bruel] + * + * Revision 1.1 1992/09/30 02:01:12 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2 91/10/09 16:01:09 af + * Revision 2.1.3.1 91/10/05 13:06:40 jeffreyh + * Created for macro support. + * [91/08/29 tak] + * + * Revision 2.1.3.1 91/10/05 13:06:40 jeffreyh + * Created for macro support. + * [91/08/29 tak] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +#include +#include /* For strcmp(), strcpy() */ + +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include +#include + +/* + * debugger macro support + */ + +#define DB_NUSER_MACRO 10 /* max user macros */ + +int db_macro_free = DB_NUSER_MACRO; +struct db_user_macro { + char m_name[TOK_STRING_SIZE]; + char m_lbuf[DB_LEX_LINE_SIZE]; + int m_size; +} db_user_macro[DB_NUSER_MACRO]; + +int db_macro_level = -1; +db_expr_t db_macro_args[DB_MACRO_LEVEL][DB_MACRO_NARGS]; + + +/* Prototypes for functions local to this file. + */ +static struct db_user_macro *db_lookup_macro(char *name); + + +static struct db_user_macro * +db_lookup_macro(char *name) +{ + register struct db_user_macro *mp; + + for (mp = db_user_macro; mp < &db_user_macro[DB_NUSER_MACRO]; mp++) { + if (mp->m_name[0] == 0) + continue; + if (strcmp(mp->m_name, name) == 0) + return(mp); + } + return(0); +} + +void +db_def_macro_cmd(void) +{ + register char *p; + register int c; + register struct db_user_macro *mp, *ep; + + if (db_read_token() != tIDENT) { + db_printf("Bad macro name \"%s\"\n", db_tok_string); + db_error(0); + /* NOTREACHED */ + } + if ((mp = db_lookup_macro(db_tok_string)) == 0) { + if (db_macro_free <= 0) + db_error("Too many macros\n"); + /* NOTREACHED */ + ep = &db_user_macro[DB_NUSER_MACRO]; + for (mp = db_user_macro; mp < ep && mp->m_name[0]; mp++); + if (mp >= ep) + db_error("ddb: internal error(macro)\n"); + /* NOTREACHED */ + db_macro_free--; + strcpy(mp->m_name, db_tok_string); + } + for (c = db_read_char(); c == ' ' || c == '\t'; c = db_read_char()); + for (p = mp->m_lbuf; c > 0; c = db_read_char()) + *p++ = c; + *p = 0; + mp->m_size = p - mp->m_lbuf; +} + +void +db_del_macro_cmd(void) +{ + register struct db_user_macro *mp; + + if (db_read_token() != tIDENT + || (mp = db_lookup_macro(db_tok_string)) == 0) { + db_printf("No such macro \"%s\"\n", db_tok_string); + db_error(0); + /* NOTREACHED */ + } + mp->m_name[0] = 0; + db_macro_free++; +} + +void +db_show_macro(void) +{ + register struct db_user_macro *mp; + int t; + char *name = 0; + + if ((t = db_read_token()) == tIDENT) + name = db_tok_string; + else + db_unread_token(t); + for (mp = db_user_macro; mp < &db_user_macro[DB_NUSER_MACRO]; mp++) { + if (mp->m_name[0] == 0) + continue; + if (name && strcmp(mp->m_name, name)) + continue; + db_printf("%s: %s", mp->m_name, mp->m_lbuf); + } +} + +int +db_exec_macro(char *name) +{ + register struct db_user_macro *mp; + register int n; + + if ((mp = db_lookup_macro(name)) == 0) + return(-1); + if (db_macro_level+1 >= DB_MACRO_LEVEL) { + db_macro_level = -1; + db_error("Too many macro nest\n"); + /* NOTREACHED */ + } + for (n = 0; + n < DB_MACRO_NARGS && + db_expression(&db_macro_args[db_macro_level+1][n]); + n++); + while (n < DB_MACRO_NARGS) + db_macro_args[db_macro_level+1][n++] = 0; + db_macro_level++; + db_exec_cmd_nest(mp->m_lbuf, mp->m_size); + db_macro_level--; + return(0); +} + +int +db_arg_variable( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap) +{ + db_expr_t value; + char *name; + db_addr_t offset; + + if (flag == DB_VAR_SHOW) { + value = db_macro_args[ap->hidden_level][ap->suffix[0]-1]; + db_printf("%#n", value); + db_find_xtrn_task_sym_and_offset(value, &name, &offset, TASK_NULL); + if (name != (char *)0 && offset <= db_maxoff && offset != value) { + db_printf("\t%s", name); + if (offset != 0) + db_printf("+%#r", offset); + } + return(0); + } + + if (ap->level != 1 || ap->suffix[0] < 1 || + ap->suffix[0] > DB_MACRO_NARGS) { + db_error("Bad $arg variable\n"); + /* NOTREACHED */ + } + if (flag == DB_VAR_GET) + *valuep = db_macro_args[db_macro_level][ap->suffix[0]-1]; + else + db_macro_args[db_macro_level][ap->suffix[0]-1] = *valuep; + return(0); +} diff --git a/osfmk/ddb/db_macro.h b/osfmk/ddb/db_macro.h new file mode 100644 index 000000000..ecd0dbed8 --- /dev/null +++ b/osfmk/ddb/db_macro.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:20:28 ezf + * change marker to not FREE + * [1994/09/22 21:10:28 ezf] + * + * Revision 1.1.2.3 1993/09/17 21:34:39 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:20 robert] + * + * Revision 1.1.2.2 1993/07/27 18:27:48 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:28 elliston] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_MACRO_H_ +#define _DDB_DB_MACRO_H_ + +#include +#include + +/* Prototypes for functions exported by this module. + */ +void db_def_macro_cmd(void); + +void db_del_macro_cmd(void); + +void db_show_macro(void); + +int db_exec_macro(char *name); + +int db_arg_variable( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap); + +#endif /* !_DDB_DB_MACRO_H_ */ diff --git a/osfmk/ddb/db_output.c b/osfmk/ddb/db_output.c new file mode 100644 index 000000000..e98eb0757 --- /dev/null +++ b/osfmk/ddb/db_output.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +/* + * Printf and character output for debugger. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Character output - tracks position in line. + * To do this correctly, we should know how wide + * the output device is - then we could zero + * the line position when the output device wraps + * around to the start of the next line. + * + * Instead, we count the number of spaces printed + * since the last printing character so that we + * don't print trailing spaces. This avoids most + * of the wraparounds. + */ + +#ifndef DB_MAX_LINE +#define DB_MAX_LINE 24 /* maximum line */ +#define DB_MAX_WIDTH 132 /* maximum width */ +#endif /* DB_MAX_LINE */ + +#define DB_MIN_MAX_WIDTH 20 /* minimum max width */ +#define DB_MIN_MAX_LINE 3 /* minimum max line */ +#define CTRL(c) ((c) & 0xff) + +int db_output_position = 0; /* output column */ +int db_output_line = 0; /* output line number */ +int db_last_non_space = 0; /* last non-space character */ +int db_last_gen_return = 0; /* last character generated return */ +int db_auto_wrap = 1; /* auto wrap at end of line ? */ +int db_tab_stop_width = 8; /* how wide are tab stops? */ +#define NEXT_TAB(i) \ + ((((i) + db_tab_stop_width) / db_tab_stop_width) * db_tab_stop_width) +int db_max_line = DB_MAX_LINE; /* output max lines */ +int db_max_width = DB_MAX_WIDTH; /* output line width */ + + +/* Prototypes for functions local to this file. XXX -- should be static! + */ +static void db_more(void); +void db_advance_output_position(int new_output_position, + int blank); + + +/* + * Force pending whitespace. + */ +void +db_force_whitespace(void) +{ + register int last_print, next_tab; + + last_print = db_last_non_space; + while (last_print < db_output_position) { + next_tab = NEXT_TAB(last_print); + if (next_tab <= db_output_position) { + cnputc('\t'); + last_print = next_tab; + } + else { + cnputc(' '); + last_print++; + } + } + db_last_non_space = db_output_position; +} + +void +db_reset_more() +{ + db_output_line = 0; +} + +static void +db_more(void) +{ + register char *p; + boolean_t quit_output = FALSE; + +#if defined(__alpha) + extern boolean_t kdebug_mode; + if (kdebug_mode) return; +#endif /* defined(__alpha) */ + for (p = "--db_more--"; *p; p++) + cnputc(*p); + switch(cngetc()) { + case ' ': + db_output_line = 0; + break; + case 'q': + case CTRL('c'): + db_output_line = 0; + quit_output = TRUE; + break; + default: + db_output_line--; + break; + } + p = "\b\b\b\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b\b\b\b"; + while (*p) + cnputc(*p++); + if (quit_output) { + db_error((char *) 0); + /* NOTREACHED */ + } +} + +void +db_advance_output_position(int new_output_position, + int blank) +{ + if (db_max_width >= DB_MIN_MAX_WIDTH + && new_output_position >= db_max_width) { + /* auto new line */ + if (!db_auto_wrap || blank) + cnputc('\n'); + db_output_position = 0; + db_last_non_space = 0; + db_last_gen_return = 1; + db_output_line++; + } else { + db_output_position = new_output_position; + } +} + +boolean_t +db_reserve_output_position(int increment) +{ + if (db_max_width >= DB_MIN_MAX_WIDTH + && db_output_position + increment >= db_max_width) { + /* auto new line */ + if (!db_auto_wrap || db_last_non_space != db_output_position) + cnputc('\n'); + db_output_position = 0; + db_last_non_space = 0; + db_last_gen_return = 1; + db_output_line++; + return TRUE; + } + return FALSE; +} + +/* + * Output character. Buffer whitespace. + */ +void +db_putchar(char c) +{ + if (db_max_line >= DB_MIN_MAX_LINE && db_output_line >= db_max_line-1) + db_more(); + if (c > ' ' && c <= '~') { + /* + * Printing character. + * If we have spaces to print, print them first. + * Use tabs if possible. + */ + db_force_whitespace(); + cnputc(c); + db_last_gen_return = 0; + db_advance_output_position(db_output_position+1, 0); + db_last_non_space = db_output_position; + } + else if (c == '\n') { + /* Return */ + if (db_last_gen_return) { + db_last_gen_return = 0; + } else { + cnputc(c); + db_output_position = 0; + db_last_non_space = 0; + db_output_line++; + db_check_interrupt(); + } + } + else if (c == '\t') { + /* assume tabs every 8 positions */ + db_advance_output_position(NEXT_TAB(db_output_position), 1); + } + else if (c == ' ') { + /* space */ + db_advance_output_position(db_output_position+1, 1); + } + else if (c == '\007') { + /* bell */ + cnputc(c); + } + /* other characters are assumed non-printing */ +} + +/* + * Return output position + */ +int +db_print_position(void) +{ + return (db_output_position); +} + +/* + * End line if too long. + */ +void +db_end_line(void) +{ + if (db_output_position >= db_max_width-1) { + /* auto new line */ + if (!db_auto_wrap) + cnputc('\n'); + db_output_position = 0; + db_last_non_space = 0; + db_last_gen_return = 1; + db_output_line++; + } +} + +/* + * Printing + */ + +void +db_printf(char *fmt, ...) +{ + va_list listp; + +#ifdef luna88k + db_printing(); +#endif + va_start(listp, fmt); + _doprnt(fmt, &listp, db_putchar, db_radix); + va_end(listp); +} + +/* alternate name */ + +void +kdbprintf(char *fmt, ...) +{ + va_list listp; + + va_start(listp, fmt); + _doprnt(fmt, &listp, db_putchar, db_radix); + va_end(listp); +} + +int db_indent = 0; + +/* + * Printing (to console) with indentation. + */ +void +iprintf(char *fmt, ...) +{ + va_list listp; + register int i; + + for (i = db_indent; i > 0; ){ + if (i >= 8) { + kdbprintf("\t"); + i -= 8; + } + else { + kdbprintf(" "); + i--; + } + } + + va_start(listp, fmt); + _doprnt(fmt, &listp, db_putchar, db_radix); + va_end(listp); +} + +void +db_output_prompt(void) +{ + db_printf("db%s", (db_default_act) ? "t": ""); +#if NCPUS > 1 + db_printf("{%d}", cpu_number()); +#endif + db_printf("> "); +} + diff --git a/osfmk/ddb/db_output.h b/osfmk/ddb/db_output.h new file mode 100644 index 000000000..c1a398935 --- /dev/null +++ b/osfmk/ddb/db_output.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.12.2 1994/09/23 01:20:43 ezf + * change marker to not FREE + * [1994/09/22 21:10:36 ezf] + * + * Revision 1.2.12.1 1994/06/11 21:12:00 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:03:58 bolinger] + * + * Revision 1.2.10.2 1994/03/07 16:37:44 paire + * Added definition of indent. + * [94/02/17 paire] + * + * Revision 1.2.10.1 1994/02/08 10:58:14 bernadat + * Added db_reserve_output_position + * db_reset_more + * prototypes + * [94/02/07 bernadat] + * + * Revision 1.2.2.4 1993/08/11 22:12:12 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:44 elliston] + * + * Revision 1.2.2.3 1993/07/27 18:27:52 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:35 elliston] + * + * Revision 1.2.2.2 1993/06/09 02:20:29 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:56:49 jeffc] + * + * Revision 1.2 1993/04/19 16:02:43 devrcs + * Changes from mk78: + * db_printf is void. + * [92/05/18 jfriedl] + * [93/02/03 bruel] + * + * Revision 1.1 1992/09/30 02:24:18 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 15:35:07 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:06:49 mrt + * Changed to new Mach copyright + * [91/01/31 16:18:48 mrt] + * + * Revision 2.2 90/08/27 21:51:32 dbg + * Created. + * [90/08/07 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/90 + */ + +/* + * Printing routines for kernel debugger. + */ + +#ifndef _DDB_DB_OUTPUT_H_ +#define _DDB_DB_OUTPUT_H_ + +#include + +extern int db_indent; + +/* + * Prototypes for functions exported by this module. + */ +void db_force_whitespace(void); +void db_putchar(char c); +int db_print_position(void); +void db_end_line(void); +void db_printf(char *fmt, ...); +void kdbprintf(char *fmt, ...); +void iprintf(char *fmt, ...); +boolean_t db_reserve_output_position(int len); +void db_reset_more(void); +void db_output_prompt(void); +#endif /* !_DDB_DB_OUTPUT_H_ */ diff --git a/osfmk/ddb/db_print.c b/osfmk/ddb/db_print.c new file mode 100644 index 000000000..775ce439b --- /dev/null +++ b/osfmk/ddb/db_print.c @@ -0,0 +1,1061 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +/* + * Miscellaneous printing. + */ +#include + +#include /* For strlen() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for db_vm() */ + +#include +#include + +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include + +#include +#include /*** ??? fix so this can be removed ***/ + +#if TASK_SWAPPER +#include +#endif /* TASK_SWAPPER */ + +/* Prototypes for functions local to this file. XXX -- should be static! + */ + +char *db_act_stat( + register thread_act_t thr_act, + char *status); + +char *db_act_swap_stat( + register thread_act_t thr_act, + char *status); + +void db_print_task( + task_t task, + int task_id, + int flag); + +void db_reset_print_entry( + void); + +void db_print_one_entry( + ipc_entry_t entry, + int index, + mach_port_name_t name, + boolean_t is_pset, + ipc_space_t space); + +int db_port_iterate( + thread_act_t thr_act, + boolean_t is_pset, + boolean_t do_output); + +ipc_port_t db_lookup_port( + thread_act_t thr_act, + int id); + +static void db_print_port_id( + int id, + ipc_port_t port, + unsigned bits, + int n); + +void db_print_act( + thread_act_t thr_act, + int act_id, + int flag); + +void db_print_space( + task_t task, + int task_id, + int flag); + +void db_print_task_vm( + task_t task, + int task_id, + boolean_t title, + char *modif); + +void db_system_stats(void); + + +void +db_show_regs( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + register struct db_variable *regp; + db_expr_t value; + db_addr_t offset; + char * name; + register int i; + struct db_var_aux_param aux_param; + task_t task = TASK_NULL; + + aux_param.modif = modif; + aux_param.thr_act = THR_ACT_NULL; + if (db_option(modif, 't')) { + if (have_addr) { + if (!db_check_act_address_valid((thread_act_t)addr)) + return; + aux_param.thr_act = (thread_act_t)addr; + } else + aux_param.thr_act = db_default_act; + if (aux_param.thr_act != THR_ACT_NULL) + task = aux_param.thr_act->task; + } + for (regp = db_regs; regp < db_eregs; regp++) { + if (regp->max_level > 1) { + db_printf("bad multi-suffixed register %s\n", regp->name); + continue; + } + aux_param.level = regp->max_level; + for (i = regp->low; i <= regp->high; i++) { + aux_param.suffix[0] = i; + db_read_write_variable(regp, &value, DB_VAR_GET, &aux_param); + if (regp->max_level > 0) + db_printf("%s%d%*s", regp->name, i, + 12-strlen(regp->name)-((i<10)?1:2), ""); + else + db_printf("%-12s", regp->name); + db_printf("%#*N", 2+2*sizeof(vm_offset_t), value); + db_find_xtrn_task_sym_and_offset((db_addr_t)value, &name, + &offset, task); + if (name != 0 && offset <= db_maxoff && offset != value) { + db_printf("\t%s", name); + if (offset != 0) + db_printf("+%#r", offset); + } + db_printf("\n"); + } + } +} + +#define OPTION_LONG 0x001 /* long print option */ +#define OPTION_USER 0x002 /* print ps-like stuff */ +#define OPTION_INDENT 0x100 /* print with indent */ +#define OPTION_THREAD_TITLE 0x200 /* print thread title */ +#define OPTION_TASK_TITLE 0x400 /* print thread title */ + +#ifndef DB_TASK_NAME +#define DB_TASK_NAME(task) /* no task name */ +#define DB_TASK_NAME_TITLE "" /* no task name */ +#endif /* DB_TASK_NAME */ + +#ifndef db_act_fp_used +#define db_act_fp_used(thr_act) FALSE +#endif + +char * +db_act_stat( + register thread_act_t thr_act, + char *status) +{ + register char *p = status; + + if (!thr_act->active) { + *p++ = 'D', + *p++ = 'y', + *p++ = 'i', + *p++ = 'n', + *p++ = 'g'; + *p++ = ' '; + } else if (!thr_act->thread) { + *p++ = 'E', + *p++ = 'm', + *p++ = 'p', + *p++ = 't', + *p++ = 'y'; + *p++ = ' '; + } else { + thread_t athread = thr_act->thread; + + *p++ = (athread->state & TH_RUN) ? 'R' : '.'; + *p++ = (athread->state & TH_WAIT) ? 'W' : '.'; + *p++ = (athread->state & TH_SUSP) ? 'S' : '.'; + *p++ = (athread->state & TH_SWAPPED_OUT) ? 'O' : '.'; + *p++ = (athread->state & TH_UNINT) ? 'N' : '.'; + /* show if the FPU has been used */ + *p++ = db_act_fp_used(thr_act) ? 'F' : '.'; + } + *p++ = 0; + return(status); +} + +char * +db_act_swap_stat( + register thread_act_t thr_act, + char *status) +{ + register char *p = status; + +#if THREAD_SWAPPER + switch (thr_act->swap_state & TH_SW_STATE) { + case TH_SW_UNSWAPPABLE: + *p++ = 'U'; + break; + case TH_SW_IN: + *p++ = 'I'; + break; + case TH_SW_GOING_OUT: + *p++ = 'G'; + break; + case TH_SW_WANT_IN: + *p++ = 'W'; + break; + case TH_SW_OUT: + *p++ = 'O'; + break; + case TH_SW_COMING_IN: + *p++ = 'C'; + break; + default: + *p++ = '?'; + break; + } + *p++ = (thr_act->swap_state & TH_SW_TASK_SWAPPING) ? 'T' : '.'; +#endif /* THREAD_SWAPPER */ + *p++ = 0; + + return status; +} + +char *policy_list[] = { "TS", "RR", "??", "FF", + "??", "??", "??", "BE"}; + +void +db_print_act( + thread_act_t thr_act, + int act_id, + int flag) +{ + thread_t athread; + char status[8]; + char swap_status[3]; + char *indent = ""; + int policy; + + if (!thr_act) { + db_printf("db_print_act(NULL)!\n"); + return; + } + + athread = thr_act->thread; + if (flag & OPTION_USER) { + + if (flag & OPTION_LONG) { + if (flag & OPTION_INDENT) + indent = " "; + if (flag & OPTION_THREAD_TITLE) { + db_printf("%s ID: ACT STAT SW STACK SHUTTLE", indent); + db_printf(" SUS PRI WAIT_FUNC\n"); + } + policy = (athread ? athread->policy : 2); + db_printf("%s%3d%c %0*X %s %s %0*X %0*X %3d %3d/%s ", + indent, act_id, + (thr_act == current_act())? '#': ':', + 2*sizeof(vm_offset_t), thr_act, + db_act_stat(thr_act, status), + db_act_swap_stat(thr_act, swap_status), + 2*sizeof(vm_offset_t), (athread ?athread->kernel_stack:0), + 2*sizeof(vm_offset_t), athread, + thr_act->suspend_count, + (athread ? athread->sched_pri : 999), /* XXX */ + policy_list[policy-1]); + if (athread) { + /* no longer TH_SWAP, no continuation to print */ + if (athread->state & TH_WAIT) + db_task_printsym((db_addr_t)athread->wait_event, + DB_STGY_ANY, kernel_task); + } + db_printf("\n"); + } else { + if (act_id % 3 == 0) { + if (flag & OPTION_INDENT) + db_printf("\n "); + } else + db_printf(" "); + db_printf("%3d%c(%0*X,%s)", act_id, + (thr_act == current_act())? '#': ':', + 2*sizeof(vm_offset_t), thr_act, + db_act_stat(thr_act, status)); + } + } else { + if (flag & OPTION_INDENT) + db_printf(" %3d (%0*X) ", act_id, + 2*sizeof(vm_offset_t), thr_act); + else + db_printf("(%0*X) ", 2*sizeof(vm_offset_t), thr_act); + if (athread) { + db_printf("%c%c%c%c%c", + (athread->state & TH_RUN) ? 'R' : ' ', + (athread->state & TH_WAIT) ? 'W' : ' ', + (athread->state & TH_SUSP) ? 'S' : ' ', + (athread->state & TH_UNINT)? 'N' : ' ', + db_act_fp_used(thr_act) ? 'F' : ' '); + /* Obsolete TH_STACK_HANDOFF code, left for now; might enhance + * to print out safe_points instead */ + if (athread->state & TH_STACK_HANDOFF) { + if (athread->continuation) { + db_printf("("); + db_task_printsym((db_addr_t)athread->continuation, + DB_STGY_ANY, kernel_task); + db_printf(")"); + } else { + db_printf("(handoff)"); + } + } + if (athread->state & TH_WAIT) { + db_printf(" "); + db_task_printsym((db_addr_t)athread->wait_event, + DB_STGY_ANY, kernel_task); + } + } else + db_printf("Empty"); + db_printf("\n"); + } +} + +void +db_print_task( + task_t task, + int task_id, + int flag) +{ + thread_act_t thr_act; + int act_id; + char sstate; + + if (flag & OPTION_USER) { + if (flag & OPTION_TASK_TITLE) { + db_printf(" ID: TASK MAP THD RES SUS PR SW %s", + DB_TASK_NAME_TITLE); + if ((flag & OPTION_LONG) == 0) + db_printf(" ACTS"); + db_printf("\n"); + } +#if TASK_SWAPPER + switch ((int) task->swap_state) { + case TASK_SW_IN: + sstate = 'I'; + break; + case TASK_SW_OUT: + sstate = 'O'; + break; + case TASK_SW_GOING_OUT: + sstate = 'G'; + break; + case TASK_SW_COMING_IN: + sstate = 'C'; + break; + case TASK_SW_UNSWAPPABLE: + sstate = 'U'; + break; + default: + sstate = '?'; + break; + } +#else /* TASK_SWAPPER */ + sstate = 'I'; +#endif /* TASK_SWAPPER */ + /*** ??? fix me ***/ + db_printf("%3d: %0*X %0*X %3d %3d %3d %2d %c ", + task_id, 2*sizeof(vm_offset_t), task, + 2*sizeof(vm_offset_t), task->map, + task->thr_act_count, task->res_act_count, + task->suspend_count, + task->priority, + sstate); + DB_TASK_NAME(task); + if (flag & OPTION_LONG) { + if (flag & OPTION_TASK_TITLE) + flag |= OPTION_THREAD_TITLE; + db_printf("\n"); + } else if (task->thr_act_count <= 1) + flag &= ~OPTION_INDENT; + act_id = 0; + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + db_print_act(thr_act, act_id, flag); + flag &= ~OPTION_THREAD_TITLE; + act_id++; + } + if ((flag & OPTION_LONG) == 0) + db_printf("\n"); + } else { + if (flag & OPTION_LONG) { + if (flag & OPTION_TASK_TITLE) { + db_printf(" TASK ACT\n"); + if (task->thr_act_count > 1) + flag |= OPTION_THREAD_TITLE; + } + } + db_printf("%3d (%0*X): ", task_id, 2*sizeof(vm_offset_t), task); + if (task->thr_act_count == 0) { + db_printf("no threads\n"); + } else { + if (task->thr_act_count > 1) { + db_printf("%d threads: \n", task->thr_act_count); + flag |= OPTION_INDENT; + } else + flag &= ~OPTION_INDENT; + act_id = 0; + queue_iterate(&task->thr_acts, thr_act, + thread_act_t, thr_acts) { + db_print_act(thr_act, act_id++, flag); + flag &= ~OPTION_THREAD_TITLE; + } + } + } +} + +void +db_print_space( + task_t task, + int task_id, + int flag) +{ + ipc_space_t space; + thread_act_t act = (thread_act_t)queue_first(&task->thr_acts); + int count; + + count = 0; + space = task->itk_space; + if (act) + count = db_port_iterate(act, FALSE, FALSE); + db_printf("%3d: %08x %08x %08x %sactive %d\n", + task_id, task, space, task->map, + space->is_active? "":"!", count); +} + +void +db_print_task_vm( + task_t task, + int task_id, + boolean_t title, + char *modif) +{ + vm_map_t map; + pmap_t pmap; + vm_size_t size; + long resident; + long wired; + + if (title) { + db_printf("id task map pmap virtual rss pg rss mem wir pg wir mem\n"); + } + + map = task->map; + pmap = vm_map_pmap(map); + + size = db_vm_map_total_size(map); + resident = pmap->stats.resident_count; + wired = pmap->stats.wired_count; + + db_printf("%2d %08x %08x %08x %7dK %6d %6dK %6d %6dK\n", + task_id, + task, + map, + pmap, + size / 1024, + resident, (resident * PAGE_SIZE) / 1024, + wired, (wired * PAGE_SIZE) / 1024); +} + + +void +db_show_one_task_vm( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + thread_act_t thread; + task_t task; + int task_id; + + if (have_addr == FALSE) { + if ((thread = db_default_act) == THR_ACT_NULL) { + if ((thread = current_act()) == THR_ACT_NULL) { + db_printf("no thread.\n"); + return; + } + } + task = thread->task; + } else { + task = (task_t) addr; + } + + task_id = db_lookup_task(task); + if (task_id < 0) { + db_printf("0x%x is not a task_t\n", addr); + return; + } + + db_print_task_vm(task, task_id, TRUE, modif); +} + +void +db_show_all_task_vm( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + task_t task; + int task_id; + boolean_t title = TRUE; + processor_set_t pset = &default_pset; + + task_id = 0; + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + db_print_task_vm(task, task_id, title, modif); + title = FALSE; + task_id++; + } +} + +void +db_show_all_acts( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + task_t task; + int task_id; + int flag; + processor_set_t pset = &default_pset; + + flag = OPTION_TASK_TITLE|OPTION_INDENT; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + task_id = 0; + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + db_print_task(task, task_id, flag); + flag &= ~OPTION_TASK_TITLE; + task_id++; + if ((flag & (OPTION_LONG|OPTION_INDENT)) == OPTION_INDENT) + db_printf("\n"); + } +} + +void +db_show_one_space( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + int flag; + int task_id; + task_t task; + + flag = OPTION_TASK_TITLE; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + if (!have_addr) { + task = db_current_task(); + if (task == TASK_NULL) { + db_error("No task\n"); + /*NOTREACHED*/ + } + } else + task = (task_t) addr; + + if ((task_id = db_lookup_task(task)) < 0) { + db_printf("bad task address 0x%x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + + db_printf(" ID: TASK SPACE MAP COUNT\n"); + db_print_space(task, task_id, flag); +} + +void +db_show_all_spaces( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + task_t task; + int task_id = 0; + int flag; + processor_set_t pset = &default_pset; + + flag = OPTION_TASK_TITLE|OPTION_INDENT; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + db_printf(" ID: TASK SPACE MAP COUNT\n"); + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + db_print_space(task, task_id, flag); + task_id++; + } +} + +db_addr_t +db_task_from_space( + ipc_space_t space, + int *task_id) +{ + task_t task; + int tid = 0; + processor_set_t pset = &default_pset; + + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + if (task->itk_space == space) { + *task_id = tid; + return (db_addr_t)task; + } + tid++; + } + *task_id = 0; + return (0); +} + +void +db_show_one_act( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + int flag; + int act_id; + thread_act_t thr_act; + + flag = OPTION_THREAD_TITLE; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + if (!have_addr) { + thr_act = current_act(); + if (thr_act == THR_ACT_NULL) { + db_error("No thr_act\n"); + /*NOTREACHED*/ + } + } else + thr_act = (thread_act_t) addr; + + if ((act_id = db_lookup_act(thr_act)) < 0) { + db_printf("bad thr_act address %#x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + + if (flag & OPTION_USER) { + db_printf("TASK%d(%0*X):\n", + db_lookup_task(thr_act->task), + 2*sizeof(vm_offset_t), thr_act->task); + db_print_act(thr_act, act_id, flag); + } else { + db_printf("task %d(%0*Xx): thr_act %d", + db_lookup_task(thr_act->task), + 2*sizeof(vm_offset_t), thr_act->task, act_id); + db_print_act(thr_act, act_id, flag); + } + if (db_option(modif, 'i') && thr_act->thread && + (thr_act->thread->state & TH_WAIT) && + thr_act->thread->kernel_stack == 0) { + + db_printf("Wait State: option 0x%x\n", + thr_act->thread->ith_option); + } +} + +void +db_show_one_task( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + int flag; + int task_id; + task_t task; + + flag = OPTION_TASK_TITLE|OPTION_INDENT; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + if (!have_addr) { + task = db_current_task(); + if (task == TASK_NULL) { + db_error("No task\n"); + /*NOTREACHED*/ + } + } else + task = (task_t) addr; + + if ((task_id = db_lookup_task(task)) < 0) { + db_printf("bad task address 0x%x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + + db_print_task(task, task_id, flag); +} + +void +db_show_shuttle( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + thread_shuttle_t shuttle; + thread_act_t thr_act; + + if (have_addr) + shuttle = (thread_shuttle_t) addr; + else { + thr_act = current_act(); + if (thr_act == THR_ACT_NULL) { + db_error("No thr_act\n"); + /*NOTREACHED*/ + } + shuttle = thr_act->thread; + if (shuttle == THREAD_NULL) { + db_error("No shuttle associated with current thr_act\n"); + /*NOTREACHED*/ + } + } + db_printf("shuttle %x:\n", shuttle); + if (shuttle->top_act == THR_ACT_NULL) + db_printf(" no activations\n"); + else { + db_printf(" activations:"); + for (thr_act = shuttle->top_act; thr_act != THR_ACT_NULL; + thr_act = thr_act->lower) { + if (thr_act != shuttle->top_act) + printf(" from"); + printf(" $task%d.%d(%x)", db_lookup_task(thr_act->task), + db_lookup_act(thr_act), thr_act); + } + db_printf("\n"); + } +} + +int +db_port_kmsg_count( + ipc_port_t port) +{ + return (port->ip_messages.imq_msgcount); +} + +static int db_print_ent_cnt = 0; + +void db_reset_print_entry( + void) +{ + db_print_ent_cnt = 0; +} + +void +db_print_one_entry( + ipc_entry_t entry, + int index, + mach_port_name_t name, + boolean_t is_pset, + ipc_space_t space) +{ + ipc_port_t aport = (ipc_port_t)entry->ie_object; + ipc_entry_bits_t bits; + + bits = entry->ie_bits; + if (is_pset && !aport->ip_pset_count) + return; + if (db_print_ent_cnt && db_print_ent_cnt % 2 == 0) + db_printf("\n"); + if (!name) + db_printf("\t%s%d[%x]", + !is_pset && aport->ip_pset_count ? "pset" : "port", + index, + MACH_PORT_MAKE(index, IE_BITS_GEN(bits))); + else + db_printf("\t%s[%x]", + !is_pset && aport->ip_pset_count ? "pset" : "port", + name); + if (!is_pset) { + db_printf("(%s,%x,%d)", + (bits & MACH_PORT_TYPE_RECEIVE)? "r": + (bits & MACH_PORT_TYPE_SEND)? "s": "S", + aport, + db_port_kmsg_count(aport)); + db_print_ent_cnt++; + } + else { + db_printf("(%s,%x,set_count=%d,%d)", + (bits & MACH_PORT_TYPE_RECEIVE)? "r": + (bits & MACH_PORT_TYPE_SEND)? "s": "S", + aport, + aport->ip_pset_count, + db_port_kmsg_count(aport)); + db_print_ent_cnt++; + } +} + +int +db_port_iterate( + thread_act_t thr_act, + boolean_t is_pset, + boolean_t do_output) +{ + ipc_entry_t entry; + ipc_tree_entry_t tentry; + int index; + int size; + int count; + ipc_space_t space; + + count = 0; + space = thr_act->task->itk_space; + entry = space->is_table; + size = space->is_table_size; + db_reset_print_entry(); + for (index = 0; index < size; ++index, ++entry) { + if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) { + if (do_output) + db_print_one_entry(entry, + index, MACH_PORT_NULL, is_pset, space); + ++count; + } + } + for (tentry = ipc_splay_traverse_start(&space->is_tree); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) { + entry = &tentry->ite_entry; + if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) { + if (do_output) + db_print_one_entry(entry, + 0, tentry->ite_name, is_pset, space); + ++count; + } + } + return (count); +} + +ipc_port_t +db_lookup_port( + thread_act_t thr_act, + int id) +{ + register ipc_space_t space; + register ipc_entry_t entry; + + if (thr_act == THR_ACT_NULL) + return(0); + space = thr_act->task->itk_space; + if (id < 0 || id >= space->is_table_size) + return(0); + entry = &space->is_table[id]; + if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) + return((ipc_port_t)entry->ie_object); + return(0); +} + +static void +db_print_port_id( + int id, + ipc_port_t port, + unsigned bits, + int n) +{ + if (n != 0 && n % 3 == 0) + db_printf("\n"); + db_printf("\tport%d(%s,%x)", id, + (bits & MACH_PORT_TYPE_RECEIVE)? "r": + (bits & MACH_PORT_TYPE_SEND)? "s": "S", port); +} + +void +db_show_port_id( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + thread_act_t thr_act; + + if (!have_addr) { + thr_act = current_act(); + if (thr_act == THR_ACT_NULL) { + db_error("No thr_act\n"); + /*NOTREACHED*/ + } + } else + thr_act = (thread_act_t) addr; + if (db_lookup_act(thr_act) < 0) { + db_printf("Bad thr_act address 0x%x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + if (db_port_iterate(thr_act, db_option(modif,'s'), TRUE)) + db_printf("\n"); +} + +/* + * Useful system state when the world has hung. + */ +void +db_system_stats() +{ + extern void db_sched(void); + + db_sched(); + iprintf("\n"); + db_vm(); + iprintf("\n"); + iprintf("\n"); + db_printf("current_{thread/task} 0x%x 0x%x\n", + current_thread(),current_task()); +} + +void db_show_one_runq(run_queue_t runq); + +void +db_show_runq( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + processor_set_t pset = &default_pset; + processor_t proc; + run_queue_t runq; + boolean_t showedany = FALSE; + +#if NCPUS > 1 /* This code has not been tested. */ + queue_iterate(&pset->processors, proc, processor_t, processors) { + runq = &proc->runq; + if (runq->count > 0) { + db_printf("PROCESSOR %x IN SET %x\n", proc, pset); + db_show_one_runq(runq); + showedany = TRUE; + } + } +#endif /* NCPUS > 1 */ +#ifndef NCPUS +#error NCPUS undefined +#endif + runq = &pset->runq; + if (runq->count > 0) { + db_printf("PROCESSOR SET %x\n", pset); + db_show_one_runq(runq); + showedany = TRUE; + } + if (!showedany) + db_printf("No runnable threads\n"); +} + +void +db_show_one_runq( + run_queue_t runq) +{ + int i, task_id, thr_act_id; + queue_t q; + thread_act_t thr_act; + thread_t thread; + task_t task; + + printf("PRI TASK.ACTIVATION\n"); + for (i = runq->highq, q = runq->queues + i; i >= 0; i--, q--) { + if (!queue_empty(q)) { + db_printf("%3d:", i); + queue_iterate(q, thread, thread_t, links) { + thr_act = thread->top_act; + task = thr_act->task; + task_id = db_lookup_task(task); + thr_act_id = db_lookup_task_act(task, thr_act); + db_printf(" %d.%d", task_id, thr_act_id); + } + db_printf("\n"); + } + } +} diff --git a/osfmk/ddb/db_print.h b/osfmk/ddb/db_print.h new file mode 100644 index 000000000..469170e37 --- /dev/null +++ b/osfmk/ddb/db_print.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.31.1 1997/03/27 18:46:44 barbou + * ri-osc CR1566: Add db_show_one_thread() prototype. [dwm] + * [1995/08/28 15:47:07 bolinger] + * [97/02/25 barbou] + * + * Revision 1.1.16.6 1995/02/23 21:43:39 alanl + * Merge with DIPC2_SHARED. + * [1995/01/05 13:30:16 alanl] + * + * Revision 1.1.21.2 1994/12/09 22:11:02 dwm + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * * Rev 1.1.16.4 1994/10/11 16:36:02 emcmanus + * Added db_show_shuttle() and db_show_runq() prototypes. + * [1994/12/09 20:36:53 dwm] + * + * Revision 1.1.21.1 1994/11/10 06:06:47 dwm + * mk6 CR764 - s/spinlock/simple_lock/ (name change only) + * [1994/11/10 05:24:14 dwm] + * + * Revision 1.1.16.3 1994/09/23 01:21:01 ezf + * change marker to not FREE + * [1994/09/22 21:10:46 ezf] + * + * Revision 1.1.16.2 1994/09/16 15:30:07 emcmanus + * Add prototype for db_show_subsystem. + * [1994/09/16 15:29:05 emcmanus] + * + * Revision 1.1.16.1 1994/06/11 21:12:10 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:04:06 bolinger] + * + * Revision 1.1.18.2 1994/12/06 19:43:09 alanl + * Intel merge, Oct 94 code drop. + * Added prototypes for db_show_{one,all}_task_vm + * [94/11/28 mmp] + * + * Revision 1.1.18.1 1994/08/05 19:35:57 mmp + * Remove duplicate prototype for db_show_port_id. + * [1994/08/05 19:31:44 mmp] + * + * Revision 1.1.10.3 1994/04/15 18:41:54 paire + * Changed db_task_from_space prototype. + * [94/03/31 paire] + * + * Revision 1.1.10.2 1994/03/07 16:37:54 paire + * Added ANSI prototype for db_port_kmsg_count routine. + * [94/02/15 paire] + * + * Revision 1.1.10.1 1994/02/08 10:58:27 bernadat + * Added db_show_one_space + * db_show_all_spaces + * db_sys + * prototypes + * [94/02/07 bernadat] + * + * Revision 1.1.2.3 1993/09/17 21:34:40 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:24 robert] + * + * Revision 1.1.2.2 1993/07/27 18:28:01 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:43 elliston] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_PRINT_H_ +#define _DDB_DB_PRINT_H_ + +#include +#include + +/* Prototypes for functions exported by this module. + */ +void db_show_regs( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif); + +void db_show_all_acts( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_one_act( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_one_thread( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_one_task( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_shuttle( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_port_id( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_one_task_vm( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif); + +void db_show_all_task_vm( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif); + +void db_show_one_space( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_all_spaces( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_sys(void); + +int db_port_kmsg_count( + ipc_port_t port); + +db_addr_t db_task_from_space( + ipc_space_t space, + int *task_id); + +void db_show_one_simple_lock( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_one_mutex( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_subsystem( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +void db_show_runq( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +#endif /* !_DDB_DB_PRINT_H_ */ diff --git a/osfmk/ddb/db_run.c b/osfmk/ddb/db_run.c new file mode 100644 index 000000000..d0898f80c --- /dev/null +++ b/osfmk/ddb/db_run.c @@ -0,0 +1,533 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +/* + * Commands to run process. + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include +#include +#include + +boolean_t db_sstep_print; +int db_loop_count; +int db_call_depth; + +int db_inst_count; +int db_last_inst_count; +int db_load_count; +int db_store_count; +int db_max_inst_count = 1000; + +#ifndef db_set_single_step +void db_set_task_single_step( + register db_regs_t *regs, + task_t task); +#else +#define db_set_task_single_step(regs,task) db_set_single_step(regs) +#endif +#ifndef db_clear_single_step +void db_clear_task_single_step( + db_regs_t *regs, + task_t task); +#else +#define db_clear_task_single_step(regs,task) db_clear_single_step(regs) +#endif + +extern jmp_buf_t *db_recover; +boolean_t db_step_again(void); + +boolean_t +db_stop_at_pc( + boolean_t *is_breakpoint, + task_t task, + task_t space) +{ + register db_addr_t pc; + register db_thread_breakpoint_t bkpt; + + db_clear_task_single_step(DDB_REGS, space); + db_clear_breakpoints(); + db_clear_watchpoints(); + pc = PC_REGS(DDB_REGS); + +#ifdef FIXUP_PC_AFTER_BREAK + if (*is_breakpoint) { + /* + * Breakpoint trap. Fix up the PC if the + * machine requires it. + */ + FIXUP_PC_AFTER_BREAK + pc = PC_REGS(DDB_REGS); + } +#endif + + /* + * Now check for a breakpoint at this address. + */ + bkpt = db_find_thread_breakpoint_here(space, pc); + if (bkpt) { + if (db_cond_check(bkpt)) { + *is_breakpoint = TRUE; + return (TRUE); /* stop here */ + } + } + *is_breakpoint = FALSE; + + if (db_run_mode == STEP_INVISIBLE) { + db_run_mode = STEP_CONTINUE; + return (FALSE); /* continue */ + } + if (db_run_mode == STEP_COUNT) { + return (FALSE); /* continue */ + } + if (db_run_mode == STEP_ONCE) { + if (--db_loop_count > 0) { + if (db_sstep_print) { + db_print_loc_and_inst(pc, task); + } + return (FALSE); /* continue */ + } + } + if (db_run_mode == STEP_RETURN) { + jmp_buf_t *prev; + jmp_buf_t db_jmpbuf; + /* WARNING: the following assumes an instruction fits an int */ + db_expr_t ins = db_get_task_value(pc, sizeof(int), FALSE, space); + + /* continue until matching return */ + + prev = db_recover; + if (_setjmp(db_recover = &db_jmpbuf) == 0) { + if (!inst_trap_return(ins) && + (!inst_return(ins) || --db_call_depth != 0)) { + if (db_sstep_print) { + if (inst_call(ins) || inst_return(ins)) { + register int i; + + db_printf("[after %6d /%4d] ", + db_inst_count, + db_inst_count - db_last_inst_count); + db_last_inst_count = db_inst_count; + for (i = db_call_depth; --i > 0; ) + db_printf(" "); + db_print_loc_and_inst(pc, task); + db_printf("\n"); + } + } + if (inst_call(ins)) + db_call_depth++; + db_recover = prev; + if (db_step_again()) + return (FALSE); /* continue */ + } + } + db_recover = prev; + } + if (db_run_mode == STEP_CALLT) { + /* WARNING: the following assumes an instruction fits an int */ + db_expr_t ins = db_get_task_value(pc, sizeof(int), FALSE, space); + + /* continue until call or return */ + + if (!inst_call(ins) && + !inst_return(ins) && + !inst_trap_return(ins)) { + if (db_step_again()) + return (FALSE); /* continue */ + } + } + if (db_find_breakpoint_here(space, pc)) + return(FALSE); + db_run_mode = STEP_NONE; + return (TRUE); +} + +void +db_restart_at_pc( + boolean_t watchpt, + task_t task) +{ + register db_addr_t pc = PC_REGS(DDB_REGS), brpc; + + if ((db_run_mode == STEP_COUNT) || + (db_run_mode == STEP_RETURN) || + (db_run_mode == STEP_CALLT)) { + db_expr_t ins; + + /* + * We are about to execute this instruction, + * so count it now. + */ + + ins = db_get_task_value(pc, sizeof(int), FALSE, task); + db_inst_count++; + db_load_count += db_inst_load(ins); + db_store_count += db_inst_store(ins); +#ifdef SOFTWARE_SSTEP + /* Account for instructions in delay slots */ + brpc = next_instr_address(pc,1,task); + if ((brpc != pc) && (inst_branch(ins) || inst_call(ins))) { + /* Note: this ~assumes an instruction <= sizeof(int) */ + ins = db_get_task_value(brpc, sizeof(int), FALSE, task); + db_inst_count++; + db_load_count += db_inst_load(ins); + db_store_count += db_inst_store(ins); + } +#endif /* SOFTWARE_SSTEP */ + } + + if (db_run_mode == STEP_CONTINUE) { + if (watchpt || db_find_breakpoint_here(task, pc)) { + /* + * Step over breakpoint/watchpoint. + */ + db_run_mode = STEP_INVISIBLE; + db_set_task_single_step(DDB_REGS, task); + } else { + db_set_breakpoints(); + db_set_watchpoints(); + } + } else { + db_set_task_single_step(DDB_REGS, task); + } +} + +/* + * 'n' and 'u' commands might never return. + * Limit the maximum number of steps. + */ + +boolean_t +db_step_again(void) +{ + if (db_inst_count && !(db_inst_count%db_max_inst_count)) { + char c; + db_printf("%d instructions, continue ? (y/n) ", + db_inst_count); + c = cngetc(); + db_printf("\n"); + if(c == 'n') + return(FALSE); + } + return(TRUE); +} + +void +db_single_step( + db_regs_t *regs, + task_t task) +{ + if (db_run_mode == STEP_CONTINUE) { + db_run_mode = STEP_INVISIBLE; + db_set_task_single_step(regs, task); + } +} + +#ifdef SOFTWARE_SSTEP +/* + * Software implementation of single-stepping. + * If your machine does not have a trace mode + * similar to the vax or sun ones you can use + * this implementation, done for the mips. + * Just define the above conditional and provide + * the functions/macros defined below. + * + * extern boolean_t + * inst_branch(), returns true if the instruction might branch + * extern unsigned + * branch_taken(), return the address the instruction might + * branch to + * db_getreg_val(); return the value of a user register, + * as indicated in the hardware instruction + * encoding, e.g. 8 for r8 + * + * next_instr_address(pc,bd,task) returns the address of the first + * instruction following the one at "pc", + * which is either in the taken path of + * the branch (bd==1) or not. This is + * for machines (mips) with branch delays. + * + * A single-step may involve at most 2 breakpoints - + * one for branch-not-taken and one for branch taken. + * If one of these addresses does not already have a breakpoint, + * we allocate a breakpoint and save it here. + * These breakpoints are deleted on return. + */ +db_breakpoint_t db_not_taken_bkpt = 0; +db_breakpoint_t db_taken_bkpt = 0; + +db_breakpoint_t +db_find_temp_breakpoint( + task_t task, + db_addr_t addr) +{ + if (db_taken_bkpt && (db_taken_bkpt->address == addr) && + db_taken_bkpt->task == task) + return db_taken_bkpt; + if (db_not_taken_bkpt && (db_not_taken_bkpt->address == addr) && + db_not_taken_bkpt->task == task) + return db_not_taken_bkpt; + return 0; +} + +void +db_set_task_single_step( + register db_regs_t *regs, + task_t task) +{ + db_addr_t pc = PC_REGS(regs), brpc; + register unsigned int inst; + register boolean_t unconditional; + + /* + * User was stopped at pc, e.g. the instruction + * at pc was not executed. + */ + inst = db_get_task_value(pc, sizeof(int), FALSE, task); + if (inst_branch(inst) || inst_call(inst)) { + extern db_expr_t getreg_val(); /* XXX -- need prototype! */ + + brpc = branch_taken(inst, pc, getreg_val, (unsigned char*)regs); + if (brpc != pc) { /* self-branches are hopeless */ + db_taken_bkpt = db_set_temp_breakpoint(task, brpc); + } else + db_taken_bkpt = 0; + pc = next_instr_address(pc,1,task); + } else + pc = next_instr_address(pc,0,task); + + /* + * check if this control flow instruction is an + * unconditional transfer + */ + + unconditional = inst_unconditional_flow_transfer(inst); + + /* + We only set the sequential breakpoint if previous instruction was not + an unconditional change of flow of control. If the previous instruction + is an unconditional change of flow of control, setting a breakpoint in the + next sequential location may set a breakpoint in data or in another routine, + which could screw up either the program or the debugger. + (Consider, for instance, that the next sequential instruction is the + start of a routine needed by the debugger.) + */ + if (!unconditional && db_find_breakpoint_here(task, pc) == 0 && + (db_taken_bkpt == 0 || db_taken_bkpt->address != pc)) { + db_not_taken_bkpt = db_set_temp_breakpoint(task, pc); + } else + db_not_taken_bkpt = 0; +} + +void +db_clear_task_single_step( + db_regs_t *regs, + task_t task) +{ + if (db_taken_bkpt != 0) { + db_delete_temp_breakpoint(task, db_taken_bkpt); + db_taken_bkpt = 0; + } + if (db_not_taken_bkpt != 0) { + db_delete_temp_breakpoint(task, db_not_taken_bkpt); + db_not_taken_bkpt = 0; + } +} + +#endif /* SOFTWARE_SSTEP */ + +extern int db_cmd_loop_done; + +/* single-step */ +void +db_single_step_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + boolean_t print = FALSE; + + if (count == -1) + count = 1; + + if (modif[0] == 'p') + print = TRUE; + + db_run_mode = STEP_ONCE; + db_loop_count = count; + db_sstep_print = print; + db_inst_count = 0; + db_last_inst_count = 0; + db_load_count = 0; + db_store_count = 0; + + db_cmd_loop_done = 1; +} + +/* trace and print until call/return */ +void +db_trace_until_call_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + boolean_t print = FALSE; + + if (modif[0] == 'p') + print = TRUE; + + db_run_mode = STEP_CALLT; + db_sstep_print = print; + db_inst_count = 0; + db_last_inst_count = 0; + db_load_count = 0; + db_store_count = 0; + + db_cmd_loop_done = 1; +} + +void +db_trace_until_matching_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + boolean_t print = FALSE; + + if (modif[0] == 'p') + print = TRUE; + + db_run_mode = STEP_RETURN; + db_call_depth = 1; + db_sstep_print = print; + db_inst_count = 0; + db_last_inst_count = 0; + db_load_count = 0; + db_store_count = 0; + + db_cmd_loop_done = 1; +} + +/* continue */ +void +db_continue_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + /* + * Though "cont/c" works fairly well, it's not really robust + * enough to use in arbitrary situations, so disable it. + * (Doesn't seem cost-effective to debug and fix what ails + * it.) + */ +#if 0 + if (modif[0] == 'c') + db_run_mode = STEP_COUNT; + else + db_run_mode = STEP_CONTINUE; +#else + db_run_mode = STEP_CONTINUE; +#endif + db_inst_count = 0; + db_last_inst_count = 0; + db_load_count = 0; + db_store_count = 0; + + db_cmd_loop_done = 1; +} + +/* gdb */ +void +db_continue_gdb( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ +#if defined(__ppc__) + db_to_gdb(); +#endif + db_run_mode = STEP_CONTINUE; + db_inst_count = 0; + db_last_inst_count = 0; + db_load_count = 0; + db_store_count = 0; + + db_cmd_loop_done = 1; +} + + + +boolean_t +db_in_single_step(void) +{ + return(db_run_mode != STEP_NONE && db_run_mode != STEP_CONTINUE); +} diff --git a/osfmk/ddb/db_run.h b/osfmk/ddb/db_run.h new file mode 100644 index 000000000..a3544402d --- /dev/null +++ b/osfmk/ddb/db_run.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _DDB_DB_RUN_H_ +#define _DDB_DB_RUN_H_ + +#include +#include +#include + + +/* Prototypes for functions exported by this module. + */ + +boolean_t db_stop_at_pc( + boolean_t *is_breakpoint, + task_t task, + task_t space); + +void db_restart_at_pc( + boolean_t watchpt, + task_t task); + +void db_single_step( + db_regs_t *regs, + task_t task); + +void db_single_step_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_trace_until_call_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_trace_until_matching_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_continue_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_continue_gdb( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +boolean_t db_in_single_step(void); + +#endif /* !_DDB_DB_RUN_H_ */ diff --git a/osfmk/ddb/db_sym.c b/osfmk/ddb/db_sym.c new file mode 100644 index 000000000..1e8bd1dbc --- /dev/null +++ b/osfmk/ddb/db_sym.c @@ -0,0 +1,1806 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.3.22.8 1996/07/31 09:07:24 paire + * Merged with nmk20b7_shared (1.3.47.1) + * [96/07/24 paire] + * + * Revision 1.3.47.1 1996/06/13 12:36:08 bernadat + * Do not assume anymore that VM_MIN_KERNEL_ADDRESS + * is greater or equal than VM_MAX_ADDRESS. + * [96/05/23 bernadat] + * + * Revision 1.3.22.7 1996/01/09 19:16:15 devrcs + * Added db_task_getlinenum() function. (steved) + * Make db_maxval & db_minval long int's for Alpha. + * Changed declarations of 'register foo' to 'register int foo'. + * [1995/12/01 21:42:29 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:03:41 jfraser] + * + * Revision 1.3.22.6 1995/02/28 01:58:46 dwm + * Merged with changes from 1.3.22.5 + * [1995/02/28 01:53:47 dwm] + * + * mk6 CR1120 - Merge mk6pro_shared into cnmk_shared + * remove a couple local protos, now in .h file (for better or worse) + * [1995/02/28 01:12:51 dwm] + * + * Revision 1.3.22.5 1995/02/23 21:43:43 alanl + * Move TR_INIT to model_dep.c (MACH_TR and MACH_KDB shouldn't + * be bound). + * [95/02/16 travos] + * + * Prepend a "db_" to qsort and qsort_limit_search + * (collisions with the real qsort in stdlib.h) + * [95/02/14 travos] + * + * Added X_db_init for object independent formats. + * [95/01/24 sjs] + * + * Merge with DIPC2_SHARED. + * [1995/01/05 13:32:53 alanl] + * + * Revision 1.3.30.2 1994/12/22 20:36:15 bolinger + * Fix ri-osc CR881: enable freer use of symbol table of collocated + * tasks. No point in requiring task to be named for symbols to be + * usable. Also fixed glitch in use of symtab cloning. + * [1994/12/22 20:34:55 bolinger] + * + * Revision 1.3.30.1 1994/11/04 09:53:14 dwm + * mk6 CR668 - 1.3b26 merge + * add arg to *_db_search_by_addr() from mk6 + * * Revision 1.3.4.9 1994/05/13 15:57:14 tmt + * Add hooks for catching calls to uninstalled symbol tables. + * Add XXX_search_by_addr() vectors. + * * Revision 1.3.4.8 1994/05/12 21:59:00 tmt + * Fix numerous db_sym_t/char * mixups. + * Fix and enable db_qualify_ambiguous_names. + * Make dif and newdiff unsigned in symbol searches. + * * Revision 1.3.4.7 1994/05/06 18:39:52 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Fix function prototype declarations. + * Merge Alpha changes into osc1.312b source code. + * String protos. + * Handle multiple, coexisting symbol table types. + * 64bit cleanup. + * Revision 1.3.4.5 1993/10/20 18:58:55 gm + * CR9704: Removed symbol load printf. + * * End1.3merge + * [1994/11/04 08:50:02 dwm] + * + * Revision 1.3.22.5 1995/02/23 21:43:43 alanl + * Move TR_INIT to model_dep.c (MACH_TR and MACH_KDB shouldn't + * be bound). + * [95/02/16 travos] + * + * Prepend a "db_" to qsort and qsort_limit_search + * (collisions with the real qsort in stdlib.h) + * [95/02/14 travos] + * + * Added X_db_init for object independent formats. + * [95/01/24 sjs] + * + * Merge with DIPC2_SHARED. + * [1995/01/05 13:32:53 alanl] + * + * Revision 1.3.30.2 1994/12/22 20:36:15 bolinger + * Fix ri-osc CR881: enable freer use of symbol table of collocated + * tasks. No point in requiring task to be named for symbols to be + * usable. Also fixed glitch in use of symtab cloning. + * [1994/12/22 20:34:55 bolinger] + * + * Revision 1.3.30.1 1994/11/04 09:53:14 dwm + * mk6 CR668 - 1.3b26 merge + * add arg to *_db_search_by_addr() from mk6 + * * Revision 1.3.4.9 1994/05/13 15:57:14 tmt + * Add hooks for catching calls to uninstalled symbol tables. + * Add XXX_search_by_addr() vectors. + * * Revision 1.3.4.8 1994/05/12 21:59:00 tmt + * Fix numerous db_sym_t/char * mixups. + * Fix and enable db_qualify_ambiguous_names. + * Make dif and newdiff unsigned in symbol searches. + * * Revision 1.3.4.7 1994/05/06 18:39:52 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Fix function prototype declarations. + * Merge Alpha changes into osc1.312b source code. + * String protos. + * Handle multiple, coexisting symbol table types. + * 64bit cleanup. + * Revision 1.3.4.5 1993/10/20 18:58:55 gm + * CR9704: Removed symbol load printf. + * * End1.3merge + * [1994/11/04 08:50:02 dwm] + * + * Revision 1.3.22.3 1994/09/23 01:21:37 ezf + * change marker to not FREE + * [1994/09/22 21:10:58 ezf] + * + * Revision 1.3.22.2 1994/06/26 22:58:24 bolinger + * Suppress symbol table range output when table is unsorted, since output + * is meaningless in this case. + * [1994/06/23 20:19:02 bolinger] + * + * Revision 1.3.22.1 1994/06/11 21:12:19 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:02:31 bolinger] + * + * Revision 1.3.17.1 1994/02/08 10:58:40 bernadat + * Check result of X_db_line_at_pc() before + * invoking db_shorten_filename(). + * [93/11/30 bernadat] + * + * Installed ddb_init() routine in a symbol-independent file to call + * symbol-dependent and machine-dependent initialization routines. + * [93/08/27 paire] + * + * Fixed db_shorten_filename() to gobble the last slash. + * Modified db_search_task_symbol_and_line() interface to return + * the number of a function arguments. + * [93/08/19 paire] + * + * Added new arguments to db_sym_print_completion() call. + * [93/08/18 paire] + * + * Added db_lookup_incomplete(), db_sym_parse_and_lookup_incomplete(), + * db_sym_print_completion() and db_completion_print() for support of + * symbol completion. + * [93/08/14 paire] + * [94/02/07 bernadat] + * + * Revision 1.3.15.4 1994/06/08 19:11:23 dswartz + * Preemption merge. + * [1994/06/08 19:10:24 dswartz] + * + * Revision 1.3.20.2 1994/06/01 21:34:39 klj + * Initial preemption code base merge + * + * Revision 1.3.15.3 1994/02/10 02:28:15 bolinger + * Fix db_add_symbol_table() to increase db_maxval if highest-addressed + * symbol in new symtab is greater than its current value. + * [1994/02/09 21:42:12 bolinger] + * + * Revision 1.3.15.2 1994/02/03 21:44:23 bolinger + * Update db_maxval when a symbol table is cloned for kernel-loaded + * server. + * [1994/02/03 20:47:22 bolinger] + * + * Revision 1.3.15.1 1994/02/03 02:41:58 dwm + * Add short-term kludge to provide symbolic info on INKServer. + * [1994/02/03 02:31:17 dwm] + * + * Revision 1.3.4.4 1993/08/11 20:38:11 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:59 elliston] + * + * Revision 1.3.4.3 1993/07/27 18:28:09 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:57 elliston] + * + * Revision 1.3.4.2 1993/06/09 02:20:50 gm + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 18:57:31 jeffc] + * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:10 jeffc] + * + * Revision 1.3 1993/04/19 16:03:09 devrcs + * Protect db_line_at_pc() against null db_last_symtab. + * [1993/02/11 15:37:16 barbou] + * + * Changes from MK78: + * Upped MAXNOSYMTABS from 3 to 5. Now there is space for kernel, + * bootstrap, server, and emulator symbols - plus one for future + * expansion. + * [92/03/21 danner] + * Changed CHAR arg of db_eqname to UNSIGNED. + * Made arg types proper for db_line_at_pc(). + * [92/05/16 jfriedl] + * [92/12/18 bruel] + * + * Sort large symbol tables to speedup lookup. + * Improved symbol lookup (use of max_offset, dichotomic search) + * [barbou@gr.osf.org] + * + * db_add_symbol_table now takes 3 additional arguments. Machine + * dependant modules must provide them. [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Revision 1.2 1992/11/25 01:04:42 robert + * integrate changes below for norma_14 + * [1992/11/13 19:22:44 robert] + * + * Revision 1.1 1992/09/30 02:01:25 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.10.4.1 92/02/18 18:38:53 jeffreyh + * Added db_get_sym(). Simple interface to get symbol names + * knowing the offset. + * [91/12/20 bernadat] + * + * Do not look for symbol names if address + * is to small or to large, otherwise get + * random names like INCLUDE_VERSION+?? + * [91/06/25 bernadat] + * + * Revision 2.10 91/10/09 16:02:30 af + * Revision 2.9.2.1 91/10/05 13:07:27 jeffreyh + * Changed symbol table name qualification syntax from "xxx:yyy" + * to "xxx::yyy" to allow "file:func:line" in "yyy" part. + * "db_sym_parse_and_lookup" is also added for "yyy" part parsing. + * Replaced db_search_symbol with db_search_task_symbol, and moved + * it to "db_sym.h" as a macro. + * Added db_task_printsym, and changed db_printsym to call it. + * Added include "db_task_thread.h". + * Fixed infinite recursion of db_symbol_values. + * [91/08/29 tak] + * + * Revision 2.9.2.1 91/10/05 13:07:27 jeffreyh + * Changed symbol table name qualification syntax from "xxx:yyy" + * to "xxx::yyy" to allow "file:func:line" in "yyy" part. + * "db_sym_parse_and_lookup" is also added for "yyy" part parsing. + * Replaced db_search_symbol with db_search_task_symbol, and moved + * it to "db_sym.h" as a macro. + * Added db_task_printsym, and changed db_printsym to call it. + * Added include "db_task_thread.h". + * Fixed infinite recursion of db_symbol_values. + * [91/08/29 tak] + * + * Revision 2.9 91/07/31 17:31:14 dbg + * Add task pointer and space for string storage to symbol table + * descriptor. + * [91/07/31 dbg] + * + * Revision 2.8 91/07/09 23:16:08 danner + * Changed a printf. + * [91/07/08 danner] + * + * Revision 2.7 91/05/14 15:35:54 mrt + * Correcting copyright + * + * Revision 2.6 91/03/16 14:42:40 rpd + * Changed the default db_maxoff to 4K. + * [91/03/10 rpd] + * + * Revision 2.5 91/02/05 17:07:07 mrt + * Changed to new Mach copyright + * [91/01/31 16:19:17 mrt] + * + * Revision 2.4 90/10/25 14:44:05 rwd + * Changed db_printsym to print unsigned. + * [90/10/19 rpd] + * + * Revision 2.3 90/09/09 23:19:56 rpd + * Avoid totally incorrect guesses of symbol names for small values. + * [90/08/30 17:39:48 af] + * + * Revision 2.2 90/08/27 21:52:18 dbg + * Removed nlist.h. Fixed some type declarations. + * Qualifier character is ':'. + * [90/08/20 dbg] + * Modularized symtab info into a new db_symtab_t type. + * Modified db_add_symbol_table and others accordingly. + * Defined db_sym_t, a new (opaque) type used to represent + * symbols. This should support all sort of future symtable + * formats. Functions like db_qualify take a db_sym_t now. + * New db_symbol_values() function to explode the content + * of a db_sym_t. + * db_search_symbol() replaces db_find_sym_and_offset(), which is + * now a macro defined in our (new) header file. This new + * function accepts more restrictive searches, which are + * entirely delegated to the symtab-specific code. + * Accordingly, db_printsym() accepts a strategy parameter. + * New db_line_at_pc() function. + * Renamed misleading db_eqsym into db_eqname. + * [90/08/20 10:47:06 af] + * + * Created. + * [90/07/25 dbg] + * + * Revision 2.1 90/07/26 16:43:52 dbg + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#include +#include /* For strcpy(), strcmp() */ +#include +#include /* For printf() */ +#include +#include +#include +#include /* For db_printf() */ + +#include /* vm_map_t */ + +/* + * Multiple symbol tables + * + * mach, bootstrap, name_server, default_pager, unix, 1 spare + */ +#define MAXNOSYMTABS 6 + +db_symtab_t db_symtabs[MAXNOSYMTABS] = {{0}}; +int db_nsymtab = 0; + +db_symtab_t *db_last_symtab; + +unsigned long db_maxoff = 0x4000; +extern char end; +unsigned long db_maxval = (unsigned long)&end; +natural_t db_minval = 0x1000; + +/* Prototypes for functions local to this file. XXX -- should be static! + */ +static char *db_qualify( + char *sym, + register char *symtabname); + +boolean_t db_eqname( + char *src, + char *dst, + unsigned c); + +boolean_t db_symbol_is_ambiguous(char *name); + +void db_shorten_filename(char **filenamep); + +void qsort_swap( + register int *a, + register int *b, + register int size); + +void qsort_rotate( + register int *a, + register int *b, + register int *c, + register int size); + +void qsort_recur( + char *left, + char *right, + int eltsize, + int (*compfun)(char *, char *)); + +void qsort_checker( + char *table, + int nbelts, + int eltsize, + int (*compfun)(char *, char *)); + +void bubble_sort( + char *table, + int nbelts, + int eltsize, + int (*compfun)(char *, char *)); + +int no_print_completion( + db_symtab_t *stab, + char *symstr ); +int no_lookup_incomplete( + db_symtab_t *stab, + char *symstr, + char **name, + int *len, + int *toadd); + +/* + * Initialization routine for ddb. + */ +void +ddb_init(void) +{ + X_db_init(); + db_machdep_init(); +} + +/* + * Add symbol table, with given name, to list of symbol tables. + */ +boolean_t +db_add_symbol_table( + int type, + char *start, + char *end, + char *name, + char *ref, + char *map_pointer, + unsigned long minsym, + unsigned long maxsym, + boolean_t sorted) +{ + register db_symtab_t *st; + extern vm_map_t kernel_map; + + if (db_nsymtab >= MAXNOSYMTABS) + return (FALSE); + + st = &db_symtabs[db_nsymtab]; + st->type = type; + st->start = start; + st->end = end; + st->private = ref; + if (map_pointer == (char *)kernel_map || + (VM_MAX_ADDRESS <= VM_MIN_KERNEL_ADDRESS && + VM_MIN_KERNEL_ADDRESS <= minsym)) + st->map_pointer = 0; + else + st->map_pointer = map_pointer; + strcpy(st->name, name); + st->minsym = minsym; + st->maxsym = maxsym; + if (maxsym == 0) + st->sorted = FALSE; + else { + st->sorted = sorted; + if (db_maxval < maxsym + db_maxoff) + db_maxval = maxsym + db_maxoff; + } + db_nsymtab++; + + return (TRUE); +} + +/* + * db_qualify("vm_map", "ux") returns "ux::vm_map". + * + * Note: return value points to static data whose content is + * overwritten by each call... but in practice this seems okay. + */ +static char * +db_qualify( + char *symname, + register char *symtabname) +{ + static char tmp[256]; + register char *s; + + s = tmp; + while (*s++ = *symtabname++) { + } + s[-1] = ':'; + *s++ = ':'; + while (*s++ = *symname++) { + } + return tmp; +} + + +boolean_t +db_eqname( + char *src, + char *dst, + unsigned c) +{ + if (!strcmp(src, dst)) + return (TRUE); + if (src[0] == c) + return (!strcmp(src+1,dst)); + return (FALSE); +} + +boolean_t +db_value_of_name( + char *name, + db_expr_t *valuep) +{ + db_sym_t sym; + + sym = db_lookup(name); + if (sym == DB_SYM_NULL) + return (FALSE); + db_symbol_values(0, sym, &name, valuep); + return (TRUE); +} + +/* + * Display list of possible completions for a symbol. + */ +void +db_print_completion( + char *symstr) +{ + register int i; + int symtab_start = 0; + int symtab_end = db_nsymtab; + register char *cp; + int nsym = 0; + char *name = (char *)0; + int len; + int toadd; + + /* + * Look for, remove, and remember any symbol table specifier. + */ + for (cp = symstr; *cp; cp++) { + if (*cp == ':' && cp[1] == ':') { + *cp = '\0'; + for (i = 0; i < db_nsymtab; i++) { + if (! strcmp(symstr, db_symtabs[i].name)) { + symtab_start = i; + symtab_end = i + 1; + break; + } + } + *cp = ':'; + if (i == db_nsymtab) + return; + symstr = cp+2; + } + } + + /* + * Look in the specified set of symbol tables. + * Return on first match. + */ + for (i = symtab_start; i < symtab_end; i++) { + if (X_db_print_completion(&db_symtabs[i], symstr)) + break; + } +} + +/* + * Lookup a (perhaps incomplete) symbol. + * If the symbol has a qualifier (e.g., ux::vm_map), + * then only the specified symbol table will be searched; + * otherwise, all symbol tables will be searched. + */ +int +db_lookup_incomplete( + char *symstr, + int symlen) +{ + register int i; + int symtab_start = 0; + int symtab_end = db_nsymtab; + register char *cp; + int nsym = 0; + char *name = (char *)0; + int len; + int toadd; + + /* + * Look for, remove, and remember any symbol table specifier. + */ + for (cp = symstr; *cp; cp++) { + if (*cp == ':' && cp[1] == ':') { + *cp = '\0'; + for (i = 0; i < db_nsymtab; i++) { + if (! strcmp(symstr, db_symtabs[i].name)) { + symtab_start = i; + symtab_end = i + 1; + break; + } + } + *cp = ':'; + if (i == db_nsymtab) + return 0; + symstr = cp+2; + } + } + + /* + * Look in the specified set of symbol tables. + * Return on first match. + */ + for (i = symtab_start; i < symtab_end; i++) { + nsym = X_db_lookup_incomplete(&db_symtabs[i], symstr, + &name, &len, &toadd); + if (nsym > 0) { + if (toadd > 0) { + len = strlen(symstr); + if (len + toadd >= symlen) + return 0; + bcopy(&name[len], &symstr[len], toadd); + symstr[len + toadd] = '\0'; + } + break; + } + } + return nsym; +} + +/* + * Lookup a symbol. + * If the symbol has a qualifier (e.g., ux::vm_map), + * then only the specified symbol table will be searched; + * otherwise, all symbol tables will be searched. + */ +db_sym_t +db_lookup(char *symstr) +{ + db_sym_t sp; + register int i; + int symtab_start = 0; + int symtab_end = db_nsymtab; + register char *cp; + + /* + * Look for, remove, and remember any symbol table specifier. + */ + for (cp = symstr; *cp; cp++) { + if (*cp == ':' && cp[1] == ':') { + *cp = '\0'; + for (i = 0; i < db_nsymtab; i++) { + if (! strcmp(symstr, db_symtabs[i].name)) { + symtab_start = i; + symtab_end = i + 1; + break; + } + } + *cp = ':'; + if (i == db_nsymtab) + db_error("Invalid symbol table name\n"); + symstr = cp+2; + } + } + + /* + * Look in the specified set of symbol tables. + * Return on first match. + */ + for (i = symtab_start; i < symtab_end; i++) { + if (sp = X_db_lookup(&db_symtabs[i], symstr)) { + db_last_symtab = &db_symtabs[i]; + return sp; + } + } + return 0; +} + +/* + * Print a symbol completion + */ +void +db_sym_print_completion( + db_symtab_t *stab, + char *name, + int function, + char *fname, + int line) +{ + if (stab != db_symtabs) + db_printf("%s::", stab->name); + db_printf(name); + if (function) { + db_putchar('('); + db_putchar(')'); + } + if (fname) { + db_printf(" [static from %s", fname); + if (line > 0) + db_printf(":%d", line); + db_putchar(']'); + } + db_putchar('\n'); +} + +/* + * Common utility routine to parse a symbol string into a file + * name, a (possibly incomplete) symbol name without line number. + * This routine is called from aout_db_print_completion if the object + * dependent handler supports qualified search with a file name. + * It parses the symbol string, and call an object dependent routine + * with parsed file name and symbol name. + */ +int +db_sym_parse_and_print_completion( + int (*func)(db_symtab_t *, + char *), + db_symtab_t *symtab, + char *symstr) +{ + register char *p; + register int n; + char *sym_name; + char *component[2]; + int nsym; + + /* + * disassemble the symbol into components: [file_name:]symbol + */ + component[0] = symstr; + component[1] = 0; + for (p = symstr, n = 1; *p; p++) { + if (*p == ':') { + if (n == 2) + break; + *p = 0; + component[n++] = p+1; + } + } + if (*p == 0) { + if (n == 1) { + sym_name = component[0]; + } else { + sym_name = component[1]; + } + nsym = func(symtab, sym_name); + } else + nsym = 0; + if (n == 2) + component[1][-1] = ':'; + return nsym; +} + +/* + * Common utility routine to parse a symbol string into a file + * name, a (possibly incomplete) symbol name without line number. + * This routine is called from X_db_lookup_incomplete if the object + * dependent handler supports qualified search with a file name. + * It parses the symbol string, and call an object dependent routine + * with parsed file name and symbol name. + */ +int +db_sym_parse_and_lookup_incomplete( + int (*func)(db_symtab_t *, + char *, + char *, + int, + db_sym_t*, + char **, + int *), + db_symtab_t *symtab, + char *symstr, + char **name, + int *len, + int *toadd) +{ + register char *p; + register int n; + char *file_name = 0; + char *sym_name = 0; + char *component[2]; + int nsym = 0; + + /* + * disassemble the symbol into components: [file_name:]symbol + */ + component[0] = symstr; + component[1] = 0; + for (p = symstr, n = 1; *p; p++) { + if (*p == ':') { + if (n == 2) + break; + *p = 0; + component[n++] = p+1; + } + } + if (*p == 0) { + if (n == 1) { + file_name = 0; + sym_name = component[0]; + } else { + file_name = component[0]; + sym_name = component[1]; + } + nsym = func(symtab, file_name, sym_name, 0, (db_sym_t *)0, + name, len); + if (nsym > 0) + *toadd = *len - strlen(sym_name); + } + if (n == 2) + component[1][-1] = ':'; + return(nsym); +} + +/* + * Common utility routine to parse a symbol string into a file + * name, a symbol name and line number. + * This routine is called from aout_db_lookup if the object dependent + * handler supports qualified search with a file name or a line number. + * It parses the symbol string, and call an object dependent routine + * with parsed file name, symbol name and line number. + */ +db_sym_t +db_sym_parse_and_lookup( + int (*func)(db_symtab_t *, char *, char *, int, + db_sym_t*, char **, int *), + db_symtab_t *symtab, + char *symstr) +{ + register char *p; + register int n; + int n_name; + int line_number; + char *file_name = 0; + char *sym_name = 0; + char *component[3]; + db_sym_t found = DB_SYM_NULL; + + /* + * disassemble the symbol into components: + * [file_name:]symbol[:line_nubmer] + */ + component[0] = symstr; + component[1] = component[2] = 0; + for (p = symstr, n = 1; *p; p++) { + if (*p == ':') { + if (n >= 3) + break; + *p = 0; + component[n++] = p+1; + } + } + if (*p != 0) + goto out; + line_number = 0; + n_name = n; + p = component[n-1]; + if (*p >= '0' && *p <= '9') { + if (n == 1) + goto out; + for (line_number = 0; *p; p++) { + if (*p < '0' || *p > '9') + goto out; + line_number = line_number*10 + *p - '0'; + } + n_name--; + } else if (n >= 3) + goto out; + if (n_name == 1) { + for (p = component[0]; *p && *p != '.'; p++); + if (*p == '.') { + file_name = component[0]; + sym_name = 0; + } else { + file_name = 0; + sym_name = component[0]; + } + } else { + file_name = component[0]; + sym_name = component[1]; + } + (void) func(symtab, file_name, sym_name, line_number, &found, + (char **)0, (int *)0); + +out: + while (--n >= 1) + component[n][-1] = ':'; + return(found); +} + +/* + * Does this symbol name appear in more than one symbol table? + * Used by db_symbol_values to decide whether to qualify a symbol. + */ +boolean_t db_qualify_ambiguous_names = TRUE; + +boolean_t +db_symbol_is_ambiguous(char *name) +{ + register int i; + register + boolean_t found_once = FALSE; + + if (!db_qualify_ambiguous_names) + return FALSE; + + for (i = 0; i < db_nsymtab; i++) { + if (X_db_lookup(&db_symtabs[i], name)) { + if (found_once) + return TRUE; + found_once = TRUE; + } + } + return FALSE; +} + +/* + * Find the closest symbol to val, and return its name + * and the difference between val and the symbol found. + */ +unsigned int db_search_maxoff = 0x4000; +db_sym_t +db_search_task_symbol( + register db_addr_t val, + db_strategy_t strategy, + db_addr_t *offp, /* better be unsigned */ + task_t task) +{ + unsigned long diff, newdiff; + register int i; + db_symtab_t *sp; + db_sym_t ret = DB_SYM_NULL, sym; + vm_map_t map_for_val; + + if (task == TASK_NULL) + task = db_current_task(); + map_for_val = (task == TASK_NULL)? VM_MAP_NULL: task->map; +again: + newdiff = diff = ~0UL; + db_last_symtab = 0; + for (sp = &db_symtabs[0], i = 0; + i < db_nsymtab; + sp++, i++) { + if (((vm_map_t)sp->map_pointer == VM_MAP_NULL || + (vm_map_t)sp->map_pointer == map_for_val) && + (sp->maxsym == 0 || + ((unsigned long) val >= sp->minsym && + (unsigned long) val <= sp->maxsym))) { + sym = X_db_search_symbol(sp, val, strategy, + (db_expr_t *)&newdiff); + if (newdiff < diff) { + db_last_symtab = sp; + diff = newdiff; + ret = sym; + if (diff <= db_search_maxoff) + break; + } + } + } + if (ret == DB_SYM_NULL && map_for_val != VM_MAP_NULL) { + map_for_val = VM_MAP_NULL; + goto again; + } + *offp = diff; + return ret; +} + +/* + * Find the closest symbol to val, and return its name + * and the difference between val and the symbol found. + * Also return the filename and linenumber if available. + */ +db_sym_t +db_search_task_symbol_and_line( + register db_addr_t val, + db_strategy_t strategy, + db_expr_t *offp, + char **filenamep, + int *linenump, + task_t task, + int *argsp) +{ + unsigned long diff, newdiff; + register int i; + db_symtab_t *sp; + db_sym_t ret = DB_SYM_NULL, sym; + vm_map_t map_for_val; + char *func; + char *filename; + int linenum; + int args; + + if (task == TASK_NULL) + task = db_current_task(); + map_for_val = (task == TASK_NULL)? VM_MAP_NULL: task->map; + *filenamep = (char *) 0; + *linenump = 0; + *argsp = -1; + again: + filename = (char *) 0; + linenum = 0; + newdiff = diff = ~0UL; + db_last_symtab = 0; + for (sp = &db_symtabs[0], i = 0; + i < db_nsymtab; + sp++, i++) { + if (((vm_map_t)sp->map_pointer == VM_MAP_NULL || + (vm_map_t)sp->map_pointer == map_for_val) && + (sp->maxsym == 0 || + ((unsigned long) val >= sp->minsym && + (unsigned long) val <= sp->maxsym))) { + sym = X_db_search_by_addr(sp, val, &filename, &func, + &linenum, (db_expr_t *)&newdiff, + &args); + if (sym && newdiff < diff) { + db_last_symtab = sp; + diff = newdiff; + ret = sym; + *filenamep = filename; + *linenump = linenum; + *argsp = args; + if (diff <= db_search_maxoff) + break; + } + } + } + if (ret == DB_SYM_NULL && map_for_val != VM_MAP_NULL) { + map_for_val = VM_MAP_NULL; + goto again; + } + *offp = diff; + if (*filenamep) + db_shorten_filename(filenamep); + return ret; +} + +/* + * Return name and value of a symbol + */ +void +db_symbol_values( + db_symtab_t *stab, + db_sym_t sym, + char **namep, + db_expr_t *valuep) +{ + db_expr_t value; + char *name; + + if (sym == DB_SYM_NULL) { + *namep = 0; + return; + } + if (stab == 0) + stab = db_last_symtab; + + X_db_symbol_values(stab, sym, &name, &value); + + if (db_symbol_is_ambiguous(name)) { + *namep = db_qualify(name, db_last_symtab->name); + }else { + *namep = name; + } + if (valuep) + *valuep = value; +} + + +/* + * Print a the closest symbol to value + * + * After matching the symbol according to the given strategy + * we print it in the name+offset format, provided the symbol's + * value is close enough (eg smaller than db_maxoff). + * We also attempt to print [filename:linenum] when applicable + * (eg for procedure names). + * + * If we could not find a reasonable name+offset representation, + * then we just print the value in hex. Small values might get + * bogus symbol associations, e.g. 3 might get some absolute + * value like _INCLUDE_VERSION or something, therefore we do + * not accept symbols whose value is zero (and use plain hex). + */ + +void +db_task_printsym( + db_expr_t off, + db_strategy_t strategy, + task_t task) +{ + db_addr_t d; + char *filename; + char *name; + db_expr_t value; + int linenum; + db_sym_t cursym; + + if (off >= db_maxval || off < db_minval) { + db_printf("%#n", off); + return; + } + cursym = db_search_task_symbol(off, strategy, &d, task); + + db_symbol_values(0, cursym, &name, &value); + if (name == 0 || d >= db_maxoff || value == 0) { + db_printf("%#n", off); + return; + } + db_printf("%s", name); + if (d) + db_printf("+0x%x", d); + if (strategy == DB_STGY_PROC) { + if (db_line_at_pc(cursym, &filename, &linenum, off)) { + db_printf(" [%s", filename); + if (linenum > 0) + db_printf(":%d", linenum); + db_printf("]"); + } + } +} + +/* + * Return symbol name for a given offset and + * change the offset to be relative to this symbol. + * Very usefull for xpr, when you want to log offsets + * in a user friendly way. + */ + +char null_sym[] = ""; + +char * +db_get_sym(db_expr_t *off) +{ + db_sym_t cursym; + db_expr_t value; + char *name; + db_addr_t d; + + cursym = db_search_symbol(*off, DB_STGY_ANY, &d); + db_symbol_values(0, cursym, &name, &value); + if (name) + *off = d; + else + name = null_sym; + return(name); +} + +void +db_printsym( + db_expr_t off, + db_strategy_t strategy) +{ + db_task_printsym(off, strategy, TASK_NULL); +} + +int db_short_filename = 1; + +void +db_shorten_filename(char **filenamep) +{ + char *cp, *cp_slash; + + if (! *filenamep) + return; + for (cp = cp_slash = *filenamep; *cp; cp++) { + if (*cp == '/') + cp_slash = cp; + } + if (*cp_slash == '/') + *filenamep = cp_slash+1; +} + +int +db_task_getlinenum( + db_expr_t off, + task_t task) +{ + db_addr_t d; + char *filename; + char *name; + db_expr_t value; + int linenum; + db_sym_t cursym; + db_strategy_t strategy = DB_STGY_PROC; + + if (off >= db_maxval || off < db_minval) { + db_printf("%#n", off); + return(-1); + } + cursym = db_search_task_symbol(off, strategy, &d, task); + + db_symbol_values(0, cursym, &name, &value); + if (name == 0 || d >= db_maxoff || value == 0) { + return(-1); + } + if (db_line_at_pc(cursym, &filename, &linenum, off)) + return(linenum); + else + return(-1); +} + +boolean_t +db_line_at_pc( + db_sym_t sym, + char **filename, + int *linenum, + db_expr_t pc) +{ + boolean_t result; + + if (db_last_symtab == 0) + return FALSE; + if (X_db_line_at_pc( db_last_symtab, sym, filename, linenum, pc)) { + if (db_short_filename) + db_shorten_filename(filename); + result = TRUE; + } else + result = FALSE; + return(result); +} + +int qsort_check = 0; + +void +db_qsort( + char *table, + int nbelts, + int eltsize, + int (*compfun)(char *, char *)) +{ + if (nbelts <= 0 || eltsize <= 0 || compfun == 0) { + printf("qsort: invalid parameters\n"); + return; + } + qsort_recur(table, table + nbelts * eltsize, eltsize, compfun); + + if (qsort_check) + qsort_checker(table, nbelts, eltsize, compfun); +} + +void +qsort_swap( + register int *a, + register int *b, + register int size) +{ + register int temp; + char *aa, *bb; + char ctemp; + + for (; size >= sizeof (int); size -= sizeof (int), a++, b++) { + temp = *a; + *a = *b; + *b = temp; + } + aa = (char *)a; + bb = (char *)b; + for (; size > 0; size--, aa++, bb++) { + ctemp = *aa; + *aa = *bb; + *bb = ctemp; + } +} + +/* rotate the three elements to the left */ +void +qsort_rotate( + register int *a, + register int *b, + register int *c, + register int size) +{ + register int temp; + char *aa, *bb, *cc; + char ctemp; + + for (; size >= sizeof (int); size -= sizeof (int), a++, b++, c++) { + temp = *a; + *a = *c; + *c = *b; + *b = temp; + } + aa = (char *)a; + bb = (char *)b; + cc = (char *)c; + for (; size > 0; size--, aa++, bb++, cc++) { + ctemp = *aa; + *aa = *cc; + *cc = *bb; + *bb = ctemp; + } +} + +void +qsort_recur( + char *left, + char *right, + int eltsize, + int (*compfun)(char *, char *)) +{ + char *i, *j; + char *sameleft, *sameright; + + top: + if (left + eltsize - 1 >= right) { + return; + } + + /* partition element (reference for "same"ness */ + sameleft = left + (((right - left) / eltsize) / 2) * eltsize; + sameright = sameleft; + + i = left; + j = right - eltsize; + + again: + while (i < sameleft) { + int comp; + + comp = (*compfun)(i, sameleft); + if (comp == 0) { + /* + * Move to the "same" partition. + */ + /* + * Shift the left part of the "same" partition to + * the left, so that "same" elements stay in their + * original order. + */ + sameleft -= eltsize; + qsort_swap((int *) i, (int *) sameleft, eltsize); + } else if (comp < 0) { + /* + * Stay in the "left" partition. + */ + i += eltsize; + } else { + /* + * Should be moved to the "right" partition. + * Wait until the next loop finds an appropriate + * place to store this element. + */ + break; + } + } + + while (j > sameright) { + int comp; + + comp = (*compfun)(sameright, j); + if (comp == 0) { + /* + * Move to the right of the "same" partition. + */ + sameright += eltsize; + qsort_swap((int *) sameright, (int *) j, eltsize); + } else if (comp > 0) { + /* + * Move to the "left" partition. + */ + if (i == sameleft) { + /* + * Unfortunately, the "left" partition + * has already been fully processed, so + * we have to shift the "same" partition + * to the right to free a "left" element. + * This is done by moving the leftest same + * to the right of the "same" partition. + */ + sameright += eltsize; + qsort_rotate((int *) sameleft, (int*) sameright, + (int *) j, eltsize); + sameleft += eltsize; + i = sameleft; + } else { + /* + * Swap with the "left" partition element + * waiting to be moved to the "right" + * partition. + */ + qsort_swap((int *) i, (int *) j, eltsize); + j -= eltsize; + /* + * Go back to the 1st loop. + */ + i += eltsize; + goto again; + } + } else { + /* + * Stay in the "right" partition. + */ + j -= eltsize; + } + } + + if (i != sameleft) { + /* + * The second loop completed (the"right" partition is ok), + * but we have to go back to the first loop, and deal with + * the element waiting for a place in the "right" partition. + * Let's shift the "same" zone to the left. + */ + sameleft -= eltsize; + qsort_rotate((int *) sameright, (int *) sameleft, (int *) i, + eltsize); + sameright -= eltsize; + j = sameright; + /* + * Go back to 1st loop. + */ + goto again; + } + + /* + * The partitions are correct now. Recur on the smallest side only. + */ + if (sameleft - left >= right - (sameright + eltsize)) { + qsort_recur(sameright + eltsize, right, eltsize, compfun); + /* + * The "right" partition is now completely sorted. + * The "same" partition is OK, so... + * Ignore them, and start the loops again on the + * "left" partition. + */ + right = sameleft; + goto top; + } else { + qsort_recur(left, sameleft, eltsize, compfun); + /* + * The "left" partition is now completely sorted. + * The "same" partition is OK, so ... + * Ignore them, and start the loops again on the + * "right" partition. + */ + left = sameright + eltsize; + goto top; + } +} + +void +qsort_checker( + char *table, + int nbelts, + int eltsize, + int (*compfun)(char *, char *)) +{ + char *curr, *prev, *last; + + prev = table; + curr = prev + eltsize; + last = table + (nbelts * eltsize); + + while (prev < last) { + if ((*compfun)(prev, curr) > 0) { + printf("**** qsort_checker: error between 0x%x and 0x%x!!!\n", prev, curr); + break; + } + prev = curr; + curr += eltsize; + } + printf("qsort_checker: OK\n"); +} + +int qsort_search_debug = 0; + +void +db_qsort_limit_search( + char *target, + char **start, + char **end, + int eltsize, + int (*compfun)(char *, char *)) +{ + register char *left, *right; + char *oleft, *oright, *part; + int nbiter = 0; + int comp; + + oleft = left = *start; + oright = right = *end; + part = (char *) 0; + + while (left < right) { + nbiter++; + part = left + (((right - left) / eltsize) / 2) * eltsize; + comp = (*compfun)(target, part); + if (comp > 0) { + oleft = left; + oright = right; + left = part; + if (left == oleft) + break; + if (qsort_search_debug > 1) + printf(" [ Moved left from 0x%x to 0x%x]\n", + oleft, left); + } else if (comp < 0) { + oright = right; + oleft = left; + right = part; + if (qsort_search_debug > 1) + printf(" [ Moved right from 0x%x to 0x%x]\n", + oright, right); + } else { + if (qsort_search_debug > 1) + printf(" [ FOUND! left=0x%x right=0x%x]\n", + left, right); + for (left = part; + left > *start && (*compfun)(left, part) == 0; + left -= eltsize); + for (right = part + eltsize; + right < *end && (*compfun)(right, part) == 0; + right += eltsize); + oright = right; + oleft = left; + break; + } + } + + if (qsort_search_debug) + printf("[ Limited from %x-%x to %x-%x in %d iters ]\n", + *start, *end, oleft, oright, nbiter); + *start = oleft; + *end = oright; +} + +void +bubble_sort( + char *table, + int nbelts, + int eltsize, + int (*compfun)(char *, char *)) +{ + boolean_t sorted; + char *end; + register char *p; + + end = table + ((nbelts-1) * eltsize); + do { + sorted = TRUE; + for (p = table; p < end; p += eltsize) { + if ((*compfun)(p, p + eltsize) > 0) { + qsort_swap((int *) p, (int *) (p + eltsize), + eltsize); + sorted = FALSE; + } + } + } while (sorted == FALSE); + + if (qsort_check) + qsort_checker(table, nbelts, eltsize, compfun); +} + +vm_offset_t vm_min_inks_addr = VM_MAX_KERNEL_ADDRESS; + +void +db_install_inks( + vm_offset_t base) +{ + /* save addr to demarcate kernel/inks boundary (1st time only) */ + if (vm_min_inks_addr == VM_MAX_KERNEL_ADDRESS) { + vm_min_inks_addr = base; + db_qualify_ambiguous_names = TRUE; + } +} + + +void +db_clone_symtabXXX( + char *clonee, /* which symtab to clone */ + char *cloner, /* in-kernel-server name */ + vm_offset_t base) /* base address of cloner */ +{ + db_symtab_t *st, *st_src; + char * memp; + vm_size_t size; + long offset; + extern vm_offset_t kalloc(vm_size_t); + extern void db_clone_offsetXXX(char *, long); + + if (db_nsymtab >= MAXNOSYMTABS) { + db_printf("db_clone_symtab: Too Many Symbol Tables\n"); + return; + } + + db_install_inks(base); + + st = &db_symtabs[db_nsymtab]; /* destination symtab */ + if ((st_src = db_symtab_cloneeXXX(clonee)) == 0) { + db_printf("db_clone_symtab: clonee (%s) not found\n", clonee); + return; + } + /* alloc new symbols */ + size = (vm_size_t)(st_src->end - st_src->private); + memp = (char *)kalloc( round_page(size) ); + if (!memp) { + db_printf("db_clone_symtab: no memory for symtab\n"); + return; + } + + *st = *st_src; /* bulk copy src -> dest */ + strcpy(st->name, cloner); /* new name */ + st->private = memp; /* copy symbols */ + bcopy((const char *)st_src->private, st->private, size); + st->start = memp + sizeof(int); /* fixup pointers to symtab */ + st->end = memp + *(int *)memp; + st->map_pointer = 0; /* no map because kernel-loaded */ + + /* Offset symbols, leaving strings pointing into st_src */ + offset = base - st_src->minsym; + st->minsym += offset; + st->maxsym += offset; + db_clone_offsetXXX(memp, offset); + db_nsymtab++; + + db_printf( "[ cloned symbol table for %s: range 0x%x to 0x%x %s]\n", + st->name, st->minsym, st->maxsym, + st->sorted ? "(sorted) " : ""); + db_maxval = (unsigned int)st->maxsym + db_maxoff; +} + +db_symtab_t * +db_symtab_cloneeXXX( + char *clonee) +{ + db_symtab_t *st, *st_src; + + st = &db_symtabs[db_nsymtab]; /* destination symtab */ + for (st_src = &db_symtabs[0]; st_src < st; ++st_src) + if (!strcmp(clonee, st_src->name)) + break; + return ((st_src < st) ? st_src : 0); +} + +/* + * Switch into symbol-table specific routines + */ + +#if !defined(__alpha) && !defined(INTEL860) +#define DB_NO_COFF +#endif + +#ifndef DB_NO_AOUT +#include +#endif + +#ifndef DB_NO_COFF +#include +#endif + +static void no_init(void) + +{ + db_printf("Non-existent code for ddb init\n"); +} + +static boolean_t no_sym_init( + char *start, + char *end, + char *name, + char *task_addr) +{ + db_printf("Non-existent code for init of symtab %s\n", name); + return FALSE; +} + +static db_sym_t no_lookup( + db_symtab_t *stab, + char *symstr) +{ + db_printf("Bogus lookup of symbol %s\n", symstr); + return DB_SYM_NULL; +} + +static db_sym_t no_search( + db_symtab_t *stab, + db_addr_t off, + db_strategy_t strategy, + db_expr_t *diffp) +{ + db_printf("Bogus search for offset %#Xn", off); + return DB_SYM_NULL; +} + +static boolean_t no_line_at_pc( + db_symtab_t *stab, + db_sym_t sym, + char **file, + int *line, + db_expr_t pc) +{ + db_printf("Bogus search for pc %#X\n", pc); + return FALSE; +} + +static void no_symbol_values( + db_sym_t sym, + char **namep, + db_expr_t *valuep) +{ + db_printf("Bogus symbol value resolution\n"); + if (namep) *namep = NULL; + if (valuep) *valuep = 0; +} + +static db_sym_t no_search_by_addr( + db_symtab_t *stab, + db_addr_t off, + char **file, + char **func, + int *line, + db_expr_t *diffp, + int *args) +{ + db_printf("Bogus search for address %#X\n", off); + return DB_SYM_NULL; +} + +int +no_print_completion( + db_symtab_t *stab, + char *symstr ) +{ + db_printf("Bogus print completion: not supported\n"); + return 0; +} + +int +no_lookup_incomplete( + db_symtab_t *stab, + char *symstr, + char **name, + int *len, + int *toadd) +{ + db_printf("Bogus lookup incomplete: not supported\n"); + return 0; +} + +#define NONE \ + { no_init, no_sym_init, no_lookup, no_search, \ + no_line_at_pc, no_symbol_values, no_search_by_addr, \ + no_print_completion, no_lookup_incomplete} + +struct db_sym_switch x_db[] = { + + /* BSD a.out format (really, sdb/dbx(1) symtabs) */ +#ifdef DB_NO_AOUT + NONE, +#else /* DB_NO_AOUT */ + { aout_db_init, aout_db_sym_init, aout_db_lookup, aout_db_search_symbol, + aout_db_line_at_pc, aout_db_symbol_values, aout_db_search_by_addr, + aout_db_print_completion, aout_db_lookup_incomplete}, +#endif /* DB_NO_AOUT */ + +#ifdef DB_NO_COFF + NONE, +#else /* DB_NO_COFF */ + { coff_db_init, coff_db_sym_init, coff_db_lookup, coff_db_search_symbol, + coff_db_line_at_pc, coff_db_symbol_values, coff_db_search_by_addr, + coff_db_print_completion, coff_db_lookup_incomplete }, +#endif /* DB_NO_COFF */ + + /* Machdep, not inited here */ + NONE +}; diff --git a/osfmk/ddb/db_sym.h b/osfmk/ddb/db_sym.h new file mode 100644 index 000000000..e5d19a4d9 --- /dev/null +++ b/osfmk/ddb/db_sym.h @@ -0,0 +1,488 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.20.6 1996/01/09 19:16:22 devrcs + * Add proto for db_task_getlinenum(). + * [1995/12/01 21:42:34 jfraser] + * + * Revision 1.2.20.5 1995/02/28 01:58:53 dwm + * Merged with changes from 1.2.20.4 + * [1995/02/28 01:53:54 dwm] + * + * mk6 CR1120 - Merge mk6pro_shared into cnmk_shared + * [1995/02/28 01:12:57 dwm] + * + * Revision 1.2.20.4 1995/02/23 21:43:48 alanl + * Prepend a "db_" to qsort and qsort_limit_search + * (collisions with the real qsort in stdlib.h) + * [95/02/14 travos] + * + * Expanded db_sym_switch structure to make ddb object format dependent; + * this allows us to remove all of the aout dependencies. + * [95/01/24 sjs] + * + * Revision 1.2.23.4 1994/12/22 20:36:20 bolinger + * Fix ri-osc CR881: Fixed glitch in use of symtab cloning hack. + * [1994/12/22 20:35:17 bolinger] + * + * Revision 1.2.23.3 1994/11/02 18:36:07 dwm + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup, prototypes + * fix X_db_search_by_addr macro to match prototype + * [1994/11/02 18:16:20 dwm] + * + * Revision 1.2.20.4 1995/02/23 21:43:48 alanl + * Prepend a "db_" to qsort and qsort_limit_search + * (collisions with the real qsort in stdlib.h) + * [95/02/14 travos] + * + * Expanded db_sym_switch structure to make ddb object format dependent; + * this allows us to remove all of the aout dependencies. + * [95/01/24 sjs] + * + * Revision 1.2.23.4 1994/12/22 20:36:20 bolinger + * Fix ri-osc CR881: Fixed glitch in use of symtab cloning hack. + * [1994/12/22 20:35:17 bolinger] + * + * Revision 1.2.23.3 1994/11/02 18:36:07 dwm + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup, prototypes + * fix X_db_search_by_addr macro to match prototype + * [1994/11/02 18:16:20 dwm] + * + * Revision 1.2.20.2 1994/09/23 01:21:51 ezf + * change marker to not FREE + * [1994/09/22 21:11:04 ezf] + * + * Revision 1.2.20.1 1994/06/11 21:12:25 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:04:14 bolinger] + * + * Revision 1.2.14.1 1994/02/08 10:58:56 bernadat + * Added db_sym_print_completion + * db_sym_parse_and_lookup_incomplete + * db_sym_parse_and_print_completion + * db_print_completion + * db_lookup_incomplete + * ddb_init + * prototypes + * + * Changed func type to db_sym_parse_and_lookup prototype + * + * Added definition of db_maxoff. + * [93/08/12 paire] + * [94/02/07 bernadat] + * + * Revision 1.2.18.1 1994/06/08 19:11:28 dswartz + * Preemption merge. + * [1994/06/08 19:10:27 dswartz] + * + * Revision 1.2.17.2 1994/06/01 21:34:50 klj + * Initial preemption code base merge + * + * Revision 1.2.4.3 1993/07/27 18:28:12 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:02 elliston] + * + * Revision 1.2.4.2 1993/06/09 02:20:56 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:18 jeffc] + * + * Revision 1.2 1993/04/19 16:03:18 devrcs + * Added 3 new fields in db_symtab_t for sorting. + * [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Revision 1.1 1992/09/30 02:24:22 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.6 91/10/09 16:02:45 af + * Revision 2.5.1.1 91/10/05 13:07:39 jeffreyh + * Added macro definitions of db_find_task_sym_and_offset(), + * db_find_xtrn_task_sym_and_offset(), db_search_symbol(). + * [91/08/29 tak] + * + * Revision 2.5.1.1 91/10/05 13:07:39 jeffreyh + * Added macro definitions of db_find_task_sym_and_offset(), + * db_find_xtrn_task_sym_and_offset(), db_search_symbol(). + * [91/08/29 tak] + * + * Revision 2.5 91/07/31 17:31:49 dbg + * Add map pointer and storage for name to db_symtab_t. + * [91/07/30 16:45:08 dbg] + * + * Revision 2.4 91/05/14 15:36:08 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:07:12 mrt + * Changed to new Mach copyright + * [91/01/31 16:19:27 mrt] + * + * Revision 2.2 90/08/27 21:52:39 dbg + * Changed type of db_sym_t to char * - it's a better type for an + * opaque pointer. + * [90/08/22 dbg] + * + * Created. + * [90/08/19 af] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: Alessandro Forin, Carnegie Mellon University + * Date: 8/90 + */ + +#ifndef _DDB_DB_SYM_H_ +#define _DDB_DB_SYM_H_ + +#include +#include +#include +#include + +/* + * This module can handle multiple symbol tables, + * of multiple types, at the same time + */ +#define SYMTAB_NAME_LEN 32 + +typedef struct { + int type; +#define SYMTAB_AOUT 0 +#define SYMTAB_COFF 1 +#define SYMTAB_MACHDEP 2 + char *start; /* symtab location */ + char *end; + char *private; /* optional machdep pointer */ + char *map_pointer; /* symbols are for this map only, + if not null */ + char name[SYMTAB_NAME_LEN]; + /* symtab name */ + unsigned long minsym; /* lowest symbol value */ + unsigned long maxsym; /* highest symbol value */ + boolean_t sorted; /* is this table sorted ? */ +} db_symtab_t; + +extern db_symtab_t *db_last_symtab; /* where last symbol was found */ + +/* + * Symbol representation is specific to the symtab style: + * BSD compilers use dbx' nlist, other compilers might use + * a different one + */ +typedef void * db_sym_t; /* opaque handle on symbols */ +#define DB_SYM_NULL ((db_sym_t)0) + +/* + * Non-stripped symbol tables will have duplicates, for instance + * the same string could match a parameter name, a local var, a + * global var, etc. + * We are most concern with the following matches. + */ +typedef int db_strategy_t; /* search strategy */ + +#define DB_STGY_ANY 0 /* anything goes */ +#define DB_STGY_XTRN 1 /* only external symbols */ +#define DB_STGY_PROC 2 /* only procedures */ + +extern boolean_t db_qualify_ambiguous_names; + /* if TRUE, check across symbol tables + * for multiple occurrences of a name. + * Might slow down quite a bit */ + +extern unsigned long db_maxoff; + +/* Prototypes for functions exported by this module. + */ +extern boolean_t db_add_symbol_table( + int type, + char *start, + char *end, + char *name, + char *ref, + char *map_pointer, + unsigned long minsym, + unsigned long maxsym, + boolean_t sorted); + +extern void db_install_inks( + vm_offset_t base); + +extern boolean_t db_value_of_name( + char *name, + db_expr_t *valuep); + +extern db_sym_t db_lookup(char *symstr); + +extern char * db_get_sym( + db_expr_t * off); + +extern db_sym_t db_sym_parse_and_lookup( + int (*func)(db_symtab_t *, + char *, + char *, + int, + db_sym_t*, + char **, + int *), + db_symtab_t *symtab, + char *symstr); + +extern int db_sym_parse_and_lookup_incomplete( + int (*func)(db_symtab_t *, + char *, + char *, + int, + db_sym_t*, + char **, + int *), + db_symtab_t *symtab, + char *symstr, + char **name, + int *len, + int *toadd); + +extern int db_sym_parse_and_print_completion( + int (*func)(db_symtab_t *, + char *), + db_symtab_t *symtab, + char *symstr); + +extern db_sym_t db_search_task_symbol( + db_addr_t val, + db_strategy_t strategy, + db_addr_t *offp, + task_t task); + +extern db_sym_t db_search_task_symbol_and_line( + db_addr_t val, + db_strategy_t strategy, + db_expr_t *offp, + char **filenamep, + int *linenump, + task_t task, + int *argsp); + +extern void db_symbol_values( + db_symtab_t *stab, + db_sym_t sym, + char **namep, + db_expr_t *valuep); + +extern void db_task_printsym( + db_expr_t off, + db_strategy_t strategy, + task_t task); + +extern void db_printsym( + db_expr_t off, + db_strategy_t strategy); + +extern boolean_t db_line_at_pc( + db_sym_t sym, + char **filename, + int *linenum, + db_expr_t pc); + +extern void db_qsort( + char *table, + int nbelts, + int eltsize, + int (*compfun)(char *, char *)); + +extern void db_qsort_limit_search( + char *target, + char **start, + char **end, + int eltsize, + int (*compfun)(char *, char *)); + +extern void db_sym_print_completion( + db_symtab_t *stab, + char *name, + int function, + char *fname, + int line); + +extern void db_print_completion( + char *symstr); + +extern int db_lookup_incomplete( + char *symstr, + int symlen); + +extern void ddb_init(void); + +extern void db_machdep_init(void); + +extern void db_clone_symtabXXX(char *, char *, vm_offset_t); + +extern db_symtab_t *db_symtab_cloneeXXX(char *); + +extern db_task_getlinenum( db_expr_t, task_t); + +/* Some convenience macros. + */ +#define db_find_sym_and_offset(val,namep,offp) \ + db_symbol_values(0, db_search_symbol(val,DB_STGY_ANY,offp),namep,0) + /* find name&value given approx val */ + +#define db_find_xtrn_sym_and_offset(val,namep,offp) \ + db_symbol_values(0, db_search_symbol(val,DB_STGY_XTRN,offp),namep,0) + /* ditto, but no locals */ + +#define db_find_task_sym_and_offset(val,namep,offp,task) \ + db_symbol_values(0, db_search_task_symbol(val,DB_STGY_ANY,offp,task), \ + namep, 0) /* find name&value given approx val */ + +#define db_find_xtrn_task_sym_and_offset(val,namep,offp,task) \ + db_symbol_values(0, db_search_task_symbol(val,DB_STGY_XTRN,offp,task), \ + namep,0) /* ditto, but no locals */ + +#define db_search_symbol(val,strgy,offp) \ + db_search_task_symbol(val,strgy,offp,0) + /* find symbol in current task */ + +/* + * Symbol table switch, defines the interface + * to symbol-table specific routines. + */ + +extern struct db_sym_switch { + + void (*init)(void); + + boolean_t (*sym_init)( + char *start, + char *end, + char *name, + char *task_addr + ); + + db_sym_t (*lookup)( + db_symtab_t *stab, + char *symstr + ); + db_sym_t (*search_symbol)( + db_symtab_t *stab, + db_addr_t off, + db_strategy_t strategy, + db_expr_t *diffp + ); + + boolean_t (*line_at_pc)( + db_symtab_t *stab, + db_sym_t sym, + char **file, + int *line, + db_expr_t pc + ); + + void (*symbol_values)( + db_sym_t sym, + char **namep, + db_expr_t *valuep + ); + db_sym_t (*search_by_addr)( + db_symtab_t *stab, + db_addr_t off, + char **file, + char **func, + int *line, + db_expr_t *diffp, + int *args + ); + + int (*print_completion)( + db_symtab_t *stab, + char *symstr + ); + + int (*lookup_incomplete)( + db_symtab_t *stab, + char *symstr, + char **name, + int *len, + int *toadd + ); +} x_db[]; + +#ifndef symtab_type +#define symtab_type(s) SYMTAB_AOUT +#endif + +#define X_db_init() x_db[symtab_type(s)].init() +#define X_db_sym_init(s,e,n,t) x_db[symtab_type(s)].sym_init(s,e,n,t) +#define X_db_lookup(s,n) x_db[(s)->type].lookup(s,n) +#define X_db_search_symbol(s,o,t,d) x_db[(s)->type].search_symbol(s,o,t,d) +#define X_db_line_at_pc(s,p,f,l,a) x_db[(s)->type].line_at_pc(s,p,f,l,a) +#define X_db_symbol_values(s,p,n,v) x_db[(s)->type].symbol_values(p,n,v) +#define X_db_search_by_addr(s,a,f,c,l,d,r) \ + x_db[(s)->type].search_by_addr(s,a,f,c,l,d,r) +#define X_db_print_completion(s,p) x_db[(s)->type].print_completion(s,p) +#define X_db_lookup_incomplete(s,p,n,l,t) \ + x_db[(s)->type].lookup_incomplete(s,p,n,l,t) + +#endif /* !_DDB_DB_SYM_H_ */ diff --git a/osfmk/ddb/db_task_thread.c b/osfmk/ddb/db_task_thread.c new file mode 100644 index 000000000..cf8f37ea4 --- /dev/null +++ b/osfmk/ddb/db_task_thread.c @@ -0,0 +1,418 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.16.3 1996/01/09 19:16:26 devrcs + * Make db_lookup_task_id() globally available (remove static). + * Changed declarations of 'register foo' to 'register int foo'. + * [1995/12/01 21:42:37 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:03:48 jfraser] + * + * Revision 1.1.16.2 1994/09/23 01:21:59 ezf + * change marker to not FREE + * [1994/09/22 21:11:09 ezf] + * + * Revision 1.1.16.1 1994/06/11 21:12:29 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:02:43 bolinger] + * + * Revision 1.1.14.1 1994/02/08 10:59:02 bernadat + * Added support of DB_VAR_SHOW. + * [93/08/12 paire] + * [94/02/08 bernadat] + * + * Revision 1.1.12.3 1994/03/17 22:35:35 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:25:50 dwm] + * + * Revision 1.1.12.2 1994/01/17 18:08:54 dwm + * Add patchable integer force_act_lookup to force successful + * lookup, to allow stack trace on orphaned act/thread pairs. + * [1994/01/17 16:06:50 dwm] + * + * Revision 1.1.12.1 1994/01/12 17:50:52 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:13:23 dwm] + * + * Revision 1.1.3.3 1993/07/27 18:28:15 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:06 elliston] + * + * Revision 1.1.3.2 1993/06/02 23:12:39 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:24 jeffc] + * + * Revision 1.1 1992/09/30 02:01:27 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2 91/10/09 16:03:04 af + * Revision 2.1.3.1 91/10/05 13:07:50 jeffreyh + * Created for task/thread handling. + * [91/08/29 tak] + * + * Revision 2.1.3.1 91/10/05 13:07:50 jeffreyh + * Created for task/thread handling. + * [91/08/29 tak] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include + +/* + * Following constants are used to prevent infinite loop of task + * or thread search due to the incorrect list. + */ +#define DB_MAX_TASKID 0x10000 /* max # of tasks */ +#define DB_MAX_THREADID 0x10000 /* max # of threads in a task */ +#define DB_MAX_PSETS 0x10000 /* max # of processor sets */ + +task_t db_default_task; /* default target task */ +thread_act_t db_default_act; /* default target thr_act */ + + + +/* Prototypes for functions local to this file. + */ +task_t db_lookup_task_id(register int task_id); + +static thread_act_t db_lookup_act_id( + task_t task, + register int thread_id); + + + +/* + * search valid task queue, and return the queue position as the task id + */ +int +db_lookup_task(task_t target_task) +{ + register task_t task; + register int task_id; + register processor_set_t pset = &default_pset; + register int npset = 0; + + task_id = 0; + if (npset++ >= DB_MAX_PSETS) + return(-1); + if (queue_first(&pset->tasks) == 0) + return(-1); + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + if (target_task == task) + return(task_id); + if (task_id++ >= DB_MAX_TASKID) + return(-1); + } + return(-1); +} + +/* + * search thread queue of the task, and return the queue position + */ +int +db_lookup_task_act( + task_t task, + thread_act_t target_act) +{ + register thread_act_t thr_act; + register int act_id; + + act_id = 0; + if (queue_first(&task->thr_acts) == 0) + return(-1); + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + if (target_act == thr_act) + return(act_id); + if (act_id++ >= DB_MAX_THREADID) + return(-1); + } + return(-1); +} + +/* + * search thr_act queue of every valid task, and return the queue position + * as the thread id. + */ +int +db_lookup_act(thread_act_t target_act) +{ + register int act_id; + register task_t task; + register processor_set_t pset = &default_pset; + register int ntask = 0; + register int npset = 0; + + if (npset++ >= DB_MAX_PSETS) + return(-1); + if (queue_first(&pset->tasks) == 0) + return(-1); + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + if (ntask++ > DB_MAX_TASKID) + return(-1); + if (task->thr_act_count == 0) + continue; + act_id = db_lookup_task_act(task, target_act); + if (act_id >= 0) + return(act_id); + } + return(-1); +} + +/* + * check the address is a valid thread address + */ +int force_act_lookup = 0; +boolean_t +db_check_act_address_valid(thread_act_t thr_act) +{ + if (!force_act_lookup && db_lookup_act(thr_act) < 0) { + db_printf("Bad thr_act address 0x%x\n", thr_act); + db_flush_lex(); + return(FALSE); + } else + return(TRUE); +} + +/* + * convert task_id(queue postion) to task address + */ +task_t +db_lookup_task_id(register task_id) +{ + register task_t task; + register processor_set_t pset = &default_pset; + register int npset = 0; + + if (task_id > DB_MAX_TASKID) + return(TASK_NULL); + if (npset++ >= DB_MAX_PSETS) + return(TASK_NULL); + if (queue_first(&pset->tasks) == 0) + return(TASK_NULL); + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + if (task_id-- <= 0) + return(task); + } + return(TASK_NULL); +} + +/* + * convert (task_id, act_id) pair to thr_act address + */ +static thread_act_t +db_lookup_act_id( + task_t task, + register int act_id) +{ + register thread_act_t thr_act; + + + if (act_id > DB_MAX_THREADID) + return(THR_ACT_NULL); + if (queue_first(&task->thr_acts) == 0) + return(THR_ACT_NULL); + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + if (act_id-- <= 0) + return(thr_act); + } + return(THR_ACT_NULL); +} + +/* + * get next parameter from a command line, and check it as a valid + * thread address + */ +boolean_t +db_get_next_act( + thread_act_t *actp, + int position) +{ + db_expr_t value; + thread_act_t thr_act; + + *actp = THR_ACT_NULL; + if (db_expression(&value)) { + thr_act = (thread_act_t) value; + if (!db_check_act_address_valid(thr_act)) { + db_flush_lex(); + return(FALSE); + } + } else if (position <= 0) { + thr_act = db_default_act; + } else + return(FALSE); + *actp = thr_act; + return(TRUE); +} + +/* + * check the default thread is still valid + * ( it is called in entering DDB session ) + */ +void +db_init_default_act(void) +{ + if (db_lookup_act(db_default_act) < 0) { + db_default_act = THR_ACT_NULL; + db_default_task = TASK_NULL; + } else + db_default_task = db_default_act->task; +} + +/* + * set or get default thread which is used when /t or :t option is specified + * in the command line + */ +int +db_set_default_act( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap) /* unused */ +{ + thread_act_t thr_act; + int task_id; + int act_id; + + if (flag == DB_VAR_SHOW) { + db_printf("%#n", db_default_act); + task_id = db_lookup_task(db_default_task); + if (task_id != -1) { + act_id = db_lookup_act(db_default_act); + if (act_id != -1) { + db_printf(" (task%d.%d)", task_id, act_id); + } + } + return(0); + } + + if (flag != DB_VAR_SET) { + *valuep = (db_expr_t) db_default_act; + return(0); + } + thr_act = (thread_act_t) *valuep; + if (thr_act != THR_ACT_NULL && !db_check_act_address_valid(thr_act)) + db_error(0); + /* NOTREACHED */ + db_default_act = thr_act; + if (thr_act) + db_default_task = thr_act->task; + return(0); +} + +/* + * convert $taskXXX[.YYY] type DDB variable to task or thread address + */ +int +db_get_task_act( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap) +{ + task_t task; + thread_act_t thr_act; + int task_id; + + if (flag == DB_VAR_SHOW) { + db_printf("%#n", db_default_task); + task_id = db_lookup_task(db_default_task); + if (task_id != -1) + db_printf(" (task%d)", task_id); + return(0); + } + + if (flag != DB_VAR_GET) { + db_error("Cannot set to $task variable\n"); + /* NOTREACHED */ + } + if ((task = db_lookup_task_id(ap->suffix[0])) == TASK_NULL) { + db_printf("no such task($task%d)\n", ap->suffix[0]); + db_error(0); + /* NOTREACHED */ + } + if (ap->level <= 1) { + *valuep = (db_expr_t) task; + return(0); + } + if ((thr_act = db_lookup_act_id(task, ap->suffix[1])) == THR_ACT_NULL){ + db_printf("no such thr_act($task%d.%d)\n", + ap->suffix[0], ap->suffix[1]); + db_error(0); + /* NOTREACHED */ + } + *valuep = (db_expr_t) thr_act; + return(0); +} diff --git a/osfmk/ddb/db_task_thread.h b/osfmk/ddb/db_task_thread.h new file mode 100644 index 000000000..d558ba543 --- /dev/null +++ b/osfmk/ddb/db_task_thread.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.9.1 1994/09/23 01:22:09 ezf + * change marker to not FREE + * [1994/09/22 21:11:13 ezf] + * + * Revision 1.1.7.4 1994/03/17 22:35:38 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:25:53 dwm] + * + * Revision 1.1.7.3 1994/02/03 21:44:27 bolinger + * Change a surviving current_thread() to current_act(). + * [1994/02/03 20:48:03 bolinger] + * + * Revision 1.1.7.2 1994/01/12 17:50:56 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:13:27 dwm] + * + * Revision 1.1.7.1 1994/01/05 19:28:18 bolinger + * Separate notions of "address space" and "task" (i.e., symbol table), + * via new macros db_current_space() and db_is_current_space(); also update + * db_target_space() to treat kernel-loaded tasks correctly. + * [1994/01/04 17:41:47 bolinger] + * + * Revision 1.1.2.4 1993/07/27 18:28:17 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:10 elliston] + * + * Revision 1.1.2.3 1993/06/07 22:06:58 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 18:57:35 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:12:46 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:32 jeffc] + * + * Revision 1.1 1992/09/30 02:24:23 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2 91/10/09 16:03:18 af + * Revision 2.1.3.1 91/10/05 13:08:07 jeffreyh + * Created for task/thread handling. + * [91/08/29 tak] + * + * Revision 2.1.3.1 91/10/05 13:08:07 jeffreyh + * Created for task/thread handling. + * [91/08/29 tak] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _DDB_DB_TASK_THREAD_H_ +#define _DDB_DB_TASK_THREAD_H_ + +#include +#include +#include +#include /* For db_var_aux_param_t */ + +/* + * On behalf of kernel-loaded tasks, distinguish between current task + * (=> symbol table) and current address space (=> where [e.g.] + * breakpoints are set). From ddb's perspective, kernel-loaded tasks + * can retain their own symbol tables, but share the kernel's address + * space. + */ +#define db_current_task() \ + ((current_act())? current_act()->task: TASK_NULL) +#define db_current_space() \ + ((current_act() && !current_act()->kernel_loaded)?\ + current_act()->task: TASK_NULL) +#define db_target_space(thr_act, user_space) \ + ((!(user_space) || ((thr_act) && (thr_act)->kernel_loaded))?\ + TASK_NULL: \ + (thr_act)? \ + (thr_act)->task: db_current_space()) +#define db_is_current_space(task) \ + ((task) == TASK_NULL || (task) == db_current_space()) + +extern task_t db_default_task; /* default target task */ +extern thread_act_t db_default_act; /* default target thr_act */ + + +/* Prototypes for functions exported by this module. + */ + +int db_lookup_act(thread_act_t target_act); + +int db_lookup_task(task_t target_task); + +int db_lookup_task_act( + task_t task, + thread_act_t target_act); + +boolean_t db_check_act_address_valid(thread_act_t thr_act); + +boolean_t db_get_next_act( + thread_act_t *actp, + int position); + +void db_init_default_act(void); + +int db_set_default_act( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap); + +int db_get_task_act( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap); + +#endif /* !_DDB_DB_TASK_THREAD_H_ */ diff --git a/osfmk/ddb/db_trap.c b/osfmk/ddb/db_trap.c new file mode 100644 index 000000000..2fcca75ba --- /dev/null +++ b/osfmk/ddb/db_trap.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +/* + * Trap entry point to kernel debugger. + */ +#include +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include +#include +#include +#include + +extern jmp_buf_t *db_recover; + +extern int db_inst_count; +extern int db_load_count; +extern int db_store_count; + +#if PARAGON860 && NCPUS > 1 +extern int db_first_cpu; +#endif + +void +db_task_trap( + int type, + int code, + boolean_t user_space) +{ + jmp_buf_t db_jmpbuf; + jmp_buf_t *prev; + boolean_t bkpt; + boolean_t watchpt; + task_t task; + task_t task_space; + + task = db_current_task(); + task_space = db_target_space(current_act(), user_space); + bkpt = IS_BREAKPOINT_TRAP(type, code); + watchpt = IS_WATCHPOINT_TRAP(type, code); + + /* + * Note: we look up PC values in an address space (task_space), + * but print symbols using a (task-specific) symbol table, found + * using task. + */ + db_init_default_act(); + db_check_breakpoint_valid(); + if (db_stop_at_pc(&bkpt, task, task_space)) { + if (db_inst_count) { + db_printf("After %d instructions (%d loads, %d stores),\n", + db_inst_count, db_load_count, db_store_count); + } + if (bkpt) + db_printf("Breakpoint at "); + else if (watchpt) + db_printf("Watchpoint at "); + else + db_printf("Stopped at "); + db_dot = PC_REGS(DDB_REGS); + + prev = db_recover; + if (_setjmp(db_recover = &db_jmpbuf) == 0) { +#if defined(__alpha) + db_print_loc(db_dot, task_space); + db_printf("\n\t"); + db_print_inst(db_dot, task_space); +#else /* !defined(__alpha) */ +#if defined(__ppc__) + db_print_loc_and_inst(db_dot, task_space); +#else /* __ppc__ */ + db_print_loc_and_inst(db_dot, task); +#endif /* __ppc__ */ +#endif /* defined(__alpha) */ + } else + db_printf("Trouble printing location %#X.\n", db_dot); + db_recover = prev; + + db_command_loop(); + } + + db_restart_at_pc(watchpt, task_space); +} diff --git a/osfmk/ddb/db_trap.h b/osfmk/ddb/db_trap.h new file mode 100644 index 000000000..60fad74b3 --- /dev/null +++ b/osfmk/ddb/db_trap.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:22:27 ezf + * change marker to not FREE + * [1994/09/22 21:11:20 ezf] + * + * Revision 1.1.2.4 1993/09/17 21:34:42 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:27 robert] + * + * Revision 1.1.2.3 1993/08/03 18:21:39 rod + * ANSI prototypes: prototype thread_kdb_return(). CR #9523. + * [1993/08/03 13:06:06 rod] + * + * Revision 1.1.2.2 1993/07/27 18:28:24 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:18 elliston] + * + * $EndLog$ + */ + +#ifndef _DDB_DB_TRAP_H_ +#define _DDB_DB_TRAP_H_ + +#include + + +/* Prototypes for functions exported by this module. + */ + +void db_task_trap( + int type, + int code, + boolean_t user_space); + +void db_trap( + int type, + int code); + +/* Other exported prototypes + */ + +void thread_kdb_return(void); + +#endif /* !_DDB_DB_TRAP_H_ */ diff --git a/osfmk/ddb/db_variables.c b/osfmk/ddb/db_variables.c new file mode 100644 index 000000000..8e45a4a42 --- /dev/null +++ b/osfmk/ddb/db_variables.c @@ -0,0 +1,819 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.18.5 1996/01/09 19:16:34 devrcs + * Search the alternate register names if configured + * Changed declarations of 'register foo' to 'register int foo'. + * [1995/12/01 21:42:42 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:03:56 jfraser] + * + * Revision 1.2.18.4 1995/02/23 21:43:56 alanl + * Merged with DIPC2_SHARED. + * [1995/01/05 13:35:55 alanl] + * + * Revision 1.2.21.1 1994/11/04 09:53:26 dwm + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.4.6 1994/05/06 18:40:13 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Merge Alpha changes into osc1.312b source code. + * 64bit cleanup. + * * End1.3merge + * [1994/11/04 08:50:12 dwm] + * + * Revision 1.2.18.2 1994/09/23 01:22:35 ezf + * change marker to not FREE + * [1994/09/22 21:11:24 ezf] + * + * Revision 1.2.18.1 1994/06/11 21:12:37 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:03:04 bolinger] + * + * Revision 1.2.23.1 1994/12/06 19:43:18 alanl + * Intel merge, Oct 94 code drop. + * Added db_find_reg_name (came from db_print.c). + * [94/11/28 mmp] + * + * Revision 1.2.16.1 1994/02/08 10:59:08 bernadat + * Added completion variable. + * [93/08/17 paire] + * + * Set up new fields (hidden_xxx) of db_vars[] array that are supposed + * to be helpful to display variables depending on an internal value + * like db_macro_level for macro arguments. + * Added db_auto_wrap as new variable. + * Added "set help" for listing all available variables. + * Added db_show_variable() and db_show_one_variable() + * to print variable values. + * [93/08/12 paire] + * [94/02/08 bernadat] + * + * Revision 1.2.4.4 1993/08/11 20:38:20 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:34:13 elliston] + * + * Revision 1.2.4.3 1993/07/27 18:28:27 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:22 elliston] + * + * Revision 1.2.4.2 1993/06/09 02:21:02 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:43 jeffc] + * + * Revision 1.2 1993/04/19 16:03:25 devrcs + * Changes from mk78: + * Added void to db_read_write_variable(). + * Removed unused variable 'func' from db_set_cmd(). + * [92/05/16 jfriedl] + * [93/02/02 bruel] + * + * Print old value when changing register values. + * [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Revision 1.1 1992/09/30 02:01:31 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 16:03:59 af + * Revision 2.4.3.1 91/10/05 13:08:27 jeffreyh + * Added suffix handling and thread handling of variables. + * Added new variables: lines, task, thread, work and arg. + * Moved db_read_variable and db_write_variable to db_variables.h + * as macros, and added db_read_write_variable instead. + * Changed some error messages. + * [91/08/29 tak] + * + * Revision 2.4.3.1 91/10/05 13:08:27 jeffreyh + * Added suffix handling and thread handling of variables. + * Added new variables: lines, task, thread, work and arg. + * Moved db_read_variable and db_write_variable to db_variables.h + * as macros, and added db_read_write_variable instead. + * Changed some error messages. + * [91/08/29 tak] + * + * Revision 2.4 91/05/14 15:36:57 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:07:19 mrt + * Changed to new Mach copyright + * [91/01/31 16:19:46 mrt] + * + * Revision 2.2 90/08/27 21:53:24 dbg + * New db_read/write_variable functions. Should be used instead + * of dereferencing valuep directly, which might not be a true + * pointer if there is an fcn() access function. + * [90/08/20 af] + * + * Fix declarations. + * Check for trailing garbage after last expression on command line. + * [90/08/10 14:34:54 dbg] + * + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#include +#include /* For strcpy() */ + +#include +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ + +extern db_expr_t db_radix; +extern db_expr_t db_max_width; +extern db_expr_t db_tab_stop_width; +extern db_expr_t db_max_line; +extern db_expr_t db_auto_wrap; +extern db_expr_t db_macro_level; +extern db_expr_t db_auto_completion; + +#define DB_NWORK 32 /* number of work variable */ + +db_expr_t db_work[DB_NWORK]; /* work variable */ + +struct db_variable db_vars[] = { + { "maxoff", (db_expr_t*)&db_maxoff, FCN_NULL }, + { "autowrap", &db_auto_wrap, FCN_NULL }, + { "completion", &db_auto_completion, FCN_NULL }, + { "maxwidth", &db_max_width, FCN_NULL }, + { "radix", &db_radix, FCN_NULL }, + { "tabstops", &db_tab_stop_width, FCN_NULL }, + { "lines", &db_max_line, FCN_NULL }, + { "thr_act", 0, db_set_default_act }, + { "task", 0, db_get_task_act, + 1, 2, -1, -1 }, + { "work", &db_work[0], FCN_NULL, + 1, 1, 0, DB_NWORK-1 }, + { "arg", 0, db_arg_variable, + 1, 1, 1, DB_MACRO_NARGS, + 1, 0, DB_MACRO_LEVEL-1, (int *)&db_macro_level }, +}; +struct db_variable *db_evars = db_vars + sizeof(db_vars)/sizeof(db_vars[0]); + + + +/* Prototypes for functions local to this file. + */ + +static char *db_get_suffix( + register char *suffix, + short *suffix_value); + +static boolean_t db_cmp_variable_name( + struct db_variable *vp, + char *name, + register db_var_aux_param_t ap); + +static int db_find_variable( + struct db_variable **varp, + db_var_aux_param_t ap); + +static int db_set_variable(db_expr_t value); + +void db_list_variable(void); + +static char * +db_get_suffix( + register char *suffix, + short *suffix_value) +{ + register int value; + + for (value = 0; *suffix && *suffix != '.' && *suffix != ':'; suffix++) { + if (*suffix < '0' || *suffix > '9') + return(0); + value = value*10 + *suffix - '0'; + } + *suffix_value = value; + if (*suffix == '.') + suffix++; + return(suffix); +} + +static boolean_t +db_cmp_variable_name( + struct db_variable *vp, + char *name, + register db_var_aux_param_t ap) +{ + register char *var_np, *np; + register int level; + + for (np = name, var_np = vp->name; *var_np; ) { + if (*np++ != *var_np++) + return(FALSE); + } + for (level = 0; *np && *np != ':' && level < vp->max_level; level++){ + if ((np = db_get_suffix(np, &ap->suffix[level])) == 0) + return(FALSE); + } + if ((*np && *np != ':') || level < vp->min_level + || (level > 0 && (ap->suffix[0] < vp->low + || (vp->high >= 0 && ap->suffix[0] > vp->high)))) + return(FALSE); + strcpy(ap->modif, (*np)? np+1: ""); + ap->thr_act = (db_option(ap->modif, 't')?db_default_act: THR_ACT_NULL); + ap->level = level; + ap->hidden_level = -1; + return(TRUE); +} + +static int +db_find_variable( + struct db_variable **varp, + db_var_aux_param_t ap) +{ + int t; + struct db_variable *vp; + + t = db_read_token(); + if (t == tIDENT) { + for (vp = db_vars; vp < db_evars; vp++) { + if (db_cmp_variable_name(vp, db_tok_string, ap)) { + *varp = vp; + return (1); + } + } + for (vp = db_regs; vp < db_eregs; vp++) { + if (db_cmp_variable_name(vp, db_tok_string, ap)) { + *varp = vp; + return (1); + } + } +#if defined(ALTERNATE_REGISTER_DEFS) + for (vp = db_altregs; vp < db_ealtregs; vp++) { + if (db_cmp_variable_name(vp, db_tok_string, ap)) { + *varp = vp; + return (1); + } + } +#endif /* defined(ALTERNATE_REGISTER_DEFS) */ + } + db_printf("Unknown variable \"$%s\"\n", db_tok_string); + db_error(0); + return (0); +} + +int +db_get_variable(db_expr_t *valuep) +{ + struct db_variable *vp; + struct db_var_aux_param aux_param; + char modif[TOK_STRING_SIZE]; + + aux_param.modif = modif; + if (!db_find_variable(&vp, &aux_param)) + return (0); + + db_read_write_variable(vp, valuep, DB_VAR_GET, &aux_param); + + return (1); +} + +static int +db_set_variable(db_expr_t value) +{ + struct db_variable *vp; + struct db_var_aux_param aux_param; + char modif[TOK_STRING_SIZE]; + + aux_param.modif = modif; + if (!db_find_variable(&vp, &aux_param)) + return (0); + + db_read_write_variable(vp, &value, DB_VAR_SET, &aux_param); + + return (1); +} + +void +db_read_write_variable( + struct db_variable *vp, + db_expr_t *valuep, + int rw_flag, + db_var_aux_param_t ap) +{ + int (*func)(struct db_variable*, db_expr_t*,int, db_var_aux_param_t) + = vp->fcn; + struct db_var_aux_param aux_param; + db_expr_t old_value; + + if (ap == 0) { + ap = &aux_param; + ap->modif = ""; + ap->level = 0; + ap->thr_act = THR_ACT_NULL; + } + if (rw_flag == DB_VAR_SET && vp->precious) + db_read_write_variable(vp, &old_value, DB_VAR_GET, ap); + if (func == FCN_NULL) { + if (rw_flag == DB_VAR_SET) + vp->valuep[(ap->level)? (ap->suffix[0] - vp->low): 0] = *valuep; + else + *valuep = vp->valuep[(ap->level)? (ap->suffix[0] - vp->low): 0]; + } else + (*func)(vp, valuep, rw_flag, ap); + if (rw_flag == DB_VAR_SET && vp->precious) + db_printf("\t$%s:%s<%#x>\t%#8n\t=\t%#8n\n", vp->name, + ap->modif, ap->thr_act, old_value, *valuep); +} + +void +db_list_variable(void) +{ + register struct db_variable *new; + register struct db_variable *old; + register struct db_variable *cur; + unsigned int l; + unsigned int len; + short i; + unsigned int j; + + len = 1; + for (cur = db_vars; cur < db_evars; cur++) { + if (cur->min_level > 0 || cur->max_level > 0) { + j = 3 * (cur->max_level - cur->min_level + 1) - 1; + if (cur->max_level > cur->min_level) + j += 2; + } else + j = 0; + if ((l = strlen(cur->name) + j) >= len) + len = l + 1; + } + + old = (struct db_variable *)0; + for (;;) { + new = (struct db_variable *)0; + for (cur = db_vars; cur < db_evars; cur++) + if ((new == (struct db_variable *)0 || + strcmp(cur->name, new->name) < 0) && + (old == (struct db_variable *)0 || + strcmp(cur->name, old->name) > 0)) + new = cur; + if (new == (struct db_variable *)0) + return; + db_reserve_output_position(len); + db_printf(new->name); + j = strlen(new->name); + if (new->min_level > 0) { + db_putchar('?'); + db_putchar('?'); + j += 2; + for (i = new->min_level - 1; i > 0; i--) { + db_putchar('.'); + db_putchar('?'); + db_putchar('?'); + j += 3; + } + if (new->max_level > new->min_level) { + db_putchar('['); + db_putchar('.'); + db_putchar('?'); + db_putchar('?'); + j += 4; + } + i = new->min_level + 1; + } else { + if (new->max_level > new->min_level) { + db_putchar('['); + j++; + } + i = new->min_level; + } + while (i++ < new->max_level) { + db_putchar('.'); + db_putchar('?'); + db_putchar('?'); + j += 3; + } + if (new->max_level > new->min_level) { + db_putchar(']'); + j++; + } + while (j++ < len) + db_putchar(' '); + old = new; + } +} + +void +db_set_cmd(void) +{ + db_expr_t value; + int t; + struct db_variable *vp; + struct db_var_aux_param aux_param; + char modif[TOK_STRING_SIZE]; + + aux_param.modif = modif; + t = db_read_token(); + if (t == tIDENT && strcmp("help", db_tok_string) == 0) { + db_list_variable(); + return; + } + if (t != tDOLLAR) { + db_error("Variable name should be prefixed with $\n"); + return; + } + if (!db_find_variable(&vp, &aux_param)) { + db_error("Unknown variable\n"); + return; + } + + t = db_read_token(); + if (t != tEQ) + db_unread_token(t); + + if (!db_expression(&value)) { + db_error("No value\n"); + return; + } + if ((t = db_read_token()) == tSEMI_COLON) + db_unread_token(t); + else if (t != tEOL) + db_error("?\n"); + + db_read_write_variable(vp, &value, DB_VAR_SET, &aux_param); +} + +void +db_show_one_variable(void) +{ + struct db_variable *cur; + unsigned int len; + unsigned int sl; + unsigned int slen; + short h; + short i; + short j; + short k; + short low; + int hidden_level; + struct db_var_aux_param aux_param; + char *p; + char *q; + char *name; + db_addr_t offset; + + for (cur = db_vars; cur < db_evars; cur++) + if (db_cmp_variable_name(cur, db_tok_string, &aux_param)) + break; + if (cur == db_evars) { + for (cur = db_vars; cur < db_evars; cur++) { + for (q = cur->name, p = db_tok_string; *q && *p == *q; p++,q++) + continue; + if (*q == '\0') + break; + } + if (cur == db_evars) { + db_error("Unknown variable\n"); + return; + } + + for (i = 0; *p && *p != ':' && i < cur->max_level; i++, p = q) + if ((q = db_get_suffix(p, &aux_param.suffix[i])) == 0) + break; + aux_param.level = i; + if ((*p && *p != ':') || + (i > 0 && (aux_param.suffix[0] < cur->low || + (cur->high >= 0 && + aux_param.suffix[0] > cur->high)))) { + db_error("Unknown variable format\n"); + return; + } + + strcpy(aux_param.modif, *p ? p + 1 : ""); + aux_param.thr_act = (db_option(aux_param.modif, 't') ? + db_default_act : THR_ACT_NULL); + } + + if (cur->hidden_level) + if (*cur->hidden_levelp >= cur->hidden_low && + *cur->hidden_levelp <= cur->hidden_high) { + hidden_level = 1; + aux_param.hidden_level = h = *cur->hidden_levelp; + } else { + hidden_level = 0; + aux_param.hidden_level = h = cur->hidden_low; + slen = 1; + for (k = aux_param.level > 0 ? aux_param.suffix[0] : cur->high; + k > 9; k /= 10) + slen++; + } + else + aux_param.hidden_level = -1; + + if ((cur->min_level == 0 && !cur->hidden_level) || cur->high < 0) + j = 0; + else { + if (cur->min_level > 0) { + j = 1; + for (k = aux_param.level > 0 ? + aux_param.suffix[0] : cur->high; k > 9; k /= 10) + j++; + } else + j = 0; + if (cur->hidden_level && hidden_level == 0) { + j += 3; + for (k = aux_param.hidden_level >= 0 ? + aux_param.hidden_level : cur->hidden_high; k > 9; k /= 10) + j++; + } + } + len = strlen(cur->name) + j; + i = low = aux_param.level > 0 ? aux_param.suffix[0] : cur->low; + + for (;;) { + db_printf(cur->name); + j = strlen(cur->name); + if (cur->high >= 0) { + if (cur->min_level > 0) { + db_printf("%d", i); + j++; + for (k = i; k > 9; k /= 10) + j++; + } + if (cur->hidden_level && hidden_level == 0) { + sl = 1; + for (k = i; k > 9; k /= 10) + sl++; + while (sl++ < slen) { + db_putchar(' '); + j++; + } + db_printf("[%d]", h); + j += 3; + for (k = h; k > 9; k /= 10) + j++; + } + } + + while (j++ < len) + db_putchar(' '); + db_putchar(':'); + db_putchar(' '); + + if (cur->fcn) { + aux_param.suffix[0] = i; + (*cur->fcn)(cur, (db_expr_t *)0, DB_VAR_SHOW, &aux_param); + } else { + db_printf("%#n", *(cur->valuep + i)); + db_find_xtrn_task_sym_and_offset(*(cur->valuep + i), &name, + &offset, TASK_NULL); + if (name != (char *)0 && offset <= db_maxoff && + offset != *(cur->valuep + i)) { + db_printf("\t%s", name); + if (offset != 0) + db_printf("+%#r", offset); + } + } + db_putchar('\n'); + if (cur->high < 0) + break; + if (aux_param.level > 0 || i++ == cur->high) { + if (!cur->hidden_level || + hidden_level == 0 || + h++ == cur->hidden_high) + break; + aux_param.hidden_level = h; + i = low; + } + } +} + +void +db_show_variable(void) +{ + struct db_variable *cur; + unsigned int l; + unsigned int len; + unsigned int sl; + unsigned int slen; + short h; + short i; + short j; + short k; + int t; + int t1; + struct db_var_aux_param aux_param; + char *name; + db_addr_t offset; + + switch(t = db_read_token()) { + case tEOL: + case tEOF: + case tSEMI_COLON: + break; + + case tDOLLAR: + t1 = db_read_token(); + if (t1 == tIDENT) { + db_show_one_variable(); + return; + } + db_error("Not a variable name after $\n"); + db_unread_token(t); + return; + + default: + db_error("Variable name should be prefixed with $\n"); + db_unread_token(t); + return; + } + db_unread_token(t); + + slen = len = 1; + for (cur = db_vars; cur < db_evars; cur++) { + if ((cur->min_level == 0 && !cur->hidden_level) || cur->high < 0) + j = 0; + else { + if (cur->min_level > 0) { + j = 1; + for (k = cur->high; k > 9; k /= 10) + j++; + } else + j = 0; + if (cur->hidden_level && + (*cur->hidden_levelp < cur->hidden_low || + *cur->hidden_levelp > cur->hidden_high)) { + j += 3; + for (k = cur->hidden_high; k > 9; k /= 10) + j++; + } + } + if ((l = strlen(cur->name) + j) >= len) + len = l + 1; + } + + aux_param.modif = ""; + aux_param.level = 1; + aux_param.thr_act = THR_ACT_NULL; + + for (cur = db_vars; cur < db_evars; cur++) { + i = cur->low; + if (cur->hidden_level) { + if (*cur->hidden_levelp >= cur->hidden_low && + *cur->hidden_levelp <= cur->hidden_high) { + h = cur->hidden_low - 1; + aux_param.hidden_level = *cur->hidden_levelp; + } else { + h = cur->hidden_low; + aux_param.hidden_level = cur->hidden_low; + } + slen = 1; + for (k = cur->high; k > 9; k /= 10) + slen++; + } else + aux_param.hidden_level = -1; + + if (cur != db_vars && cur->high >= 0 && + (cur->min_level > 0 || cur->hidden_level)) + db_putchar('\n'); + + for (;;) { + db_printf(cur->name); + j = strlen(cur->name); + if (cur->high >= 0) { + if (cur->min_level > 0) { + db_printf("%d", i); + j++; + for (k = i; k > 9; k /= 10) + j++; + } + if (cur->hidden_level && h >= cur->hidden_low) { + sl = 1; + for (k = i; k > 9; k /= 10) + sl++; + while (sl++ < slen) { + db_putchar(' '); + j++; + } + db_printf("[%d]", h); + j += 3; + for (k = h; k > 9; k /= 10) + j++; + } + } + while (j++ < len) + db_putchar(' '); + db_putchar(':'); + db_putchar(' '); + + if (cur->fcn) { + aux_param.suffix[0] = i; + (*cur->fcn)(cur, (db_expr_t *)0, DB_VAR_SHOW, &aux_param); + } else { + db_printf("%#n", *(cur->valuep + i)); + db_find_xtrn_task_sym_and_offset(*(cur->valuep + i), &name, + &offset, TASK_NULL); + if (name != (char *)0 && offset <= db_maxoff && + offset != *(cur->valuep + i)) { + db_printf("\t%s", name); + if (offset != 0) + db_printf("+%#r", offset); + } + } + db_putchar('\n'); + if (cur->high < 0) + break; + if (i++ == cur->high) { + if (!cur->hidden_level || h++ == cur->hidden_high) + break; + aux_param.hidden_level = h; + i = cur->low; + } + } + } +} + +/* + * given a name of a machine register, return a variable pointer to it. + */ +db_variable_t +db_find_reg_name( + char *s) +{ + register db_variable_t regp; + + if ( s == (char *)0 ) + return DB_VAR_NULL; + + for (regp = db_regs; regp < db_eregs; regp++) { + if ( strcmp( s, regp->name) == 0 ) + return regp; + } + return DB_VAR_NULL; +} diff --git a/osfmk/ddb/db_variables.h b/osfmk/ddb/db_variables.h new file mode 100644 index 000000000..d4c62d174 --- /dev/null +++ b/osfmk/ddb/db_variables.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.17.5 1996/01/09 19:16:39 devrcs + * Define alternate register definitions. + * [1995/12/01 21:42:46 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:04:00 jfraser] + * + * Revision 1.2.17.4 1995/02/23 21:44:00 alanl + * Merged with DIPC2_SHARED. + * [1995/01/05 13:36:23 alanl] + * + * Revision 1.2.20.2 1994/10/14 03:47:19 dwm + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:40:00 dwm] + * + * Revision 1.2.17.2 1994/09/23 01:22:42 ezf + * change marker to not FREE + * [1994/09/22 21:11:29 ezf] + * + * Revision 1.2.17.1 1994/06/11 21:12:42 bolinger + * Merge up to NMK17.2. + * [1994/06/11 20:04:23 bolinger] + * + * Revision 1.2.22.1 1994/12/06 19:43:29 alanl + * Intel merge, Oct 94 code drop. + * Define DB_VAR_NULL. + * Add prototype for db_find_reg_name. + * [94/11/23 mmp] + * + * Revision 1.2.15.1 1994/02/08 10:59:16 bernadat + * Added db_show_one_variable & db_show_variable prototypes + * + * Got DB_MACRO_LEVEL and DB_MACRO_NARGS macros from . + * Added new fields (hidden_xxx) into struct db_variable and into + * struct db_var_aux_param. + * Added DB_VAR_SHOW for showing variables. + * [93/08/12 paire] + * [94/02/07 bernadat] + * + * Revision 1.2.4.3 1993/07/27 18:28:29 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:26 elliston] + * + * Revision 1.2.4.2 1993/06/09 02:21:06 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:48 jeffc] + * + * Revision 1.2 1993/04/19 16:03:36 devrcs + * New field used to display old register values with 'set' command + * [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Revision 1.1 1992/09/30 02:24:26 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 16:04:17 af + * Revision 2.4.3.1 91/10/05 13:08:42 jeffreyh + * Added suffix related field to db_variable structure. + * Added macro definitions of db_{read,write}_variable. + * [91/08/29 tak] + * + * Revision 2.4.3.1 91/10/05 13:08:42 jeffreyh + * Added suffix related field to db_variable structure. + * Added macro definitions of db_{read,write}_variable. + * [91/08/29 tak] + * + * Revision 2.4 91/05/14 15:37:12 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:07:23 mrt + * Changed to new Mach copyright + * [91/01/31 16:19:54 mrt] + * + * Revision 2.2 90/08/27 21:53:40 dbg + * Modularized typedef name. Documented the calling sequence of + * the (optional) access function of a variable. Now the valuep + * field can be made opaque, eg be an offset that fcn() resolves. + * [90/08/20 af] + * + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#ifndef _DDB_DB_VARIABLES_H_ +#define _DDB_DB_VARIABLES_H_ + +#include +#include /* For db_expr_t */ + + +#define DB_VAR_LEVEL 3 /* maximum number of suffix level */ + +/* + * auxiliary parameters passed to a variable handler + */ +struct db_var_aux_param { + char *modif; /* option strings */ + short level; /* number of levels */ + short hidden_level; /* hidden level */ + short suffix[DB_VAR_LEVEL]; /* suffix */ + thread_act_t thr_act; /* target thr_act */ +}; + +typedef struct db_var_aux_param *db_var_aux_param_t; + + +/* + * Debugger variables. + */ +struct db_variable { + char *name; /* Name of variable */ + db_expr_t *valuep; /* pointer to value of variable */ + /* function to call when reading/writing */ + int (*fcn)(struct db_variable *,db_expr_t *,int,db_var_aux_param_t); + short min_level; /* number of minimum suffix levels */ + short max_level; /* number of maximum suffix levels */ + short low; /* low value of level 1 suffix */ + short high; /* high value of level 1 suffix */ + boolean_t hidden_level; /* is there a hidden suffix level ? */ + short hidden_low; /* low value of hidden level */ + short hidden_high; /* high value of hidden level */ + int *hidden_levelp; /* value of current hidden level */ + boolean_t precious; /* print old value when affecting ? */ +#define DB_VAR_GET 0 +#define DB_VAR_SET 1 +#define DB_VAR_SHOW 2 +}; + +typedef struct db_variable *db_variable_t; + +#define DB_VAR_NULL (db_variable_t)0 + +#define FCN_NULL ((int (*)(struct db_variable *, \ + db_expr_t *, \ + int, \ + db_var_aux_param_t)) 0) + +#define DB_VAR_LEVEL 3 /* maximum number of suffix level */ +#define DB_MACRO_LEVEL 5 /* max macro nesting */ +#define DB_MACRO_NARGS 10 /* max args per macro */ + +#define db_read_variable(vp, valuep) \ + db_read_write_variable(vp, valuep, DB_VAR_GET, 0) +#define db_write_variable(vp, valuep) \ + db_read_write_variable(vp, valuep, DB_VAR_SET, 0) + + +extern struct db_variable db_vars[]; /* debugger variables */ +extern struct db_variable *db_evars; +extern struct db_variable db_regs[]; /* machine registers */ +extern struct db_variable *db_eregs; + +#if defined(ALTERNATE_REGISTER_DEFS) + +extern struct db_variable db_altregs[]; /* alternate machine regs */ +extern struct db_variable *db_ealtregs; + +#endif /* defined(ALTERNATE_REGISTER_DEFS) */ + +/* Prototypes for functions exported by this module. + */ + +int db_get_variable(db_expr_t *valuep); + +void db_read_write_variable( + struct db_variable *vp, + db_expr_t *valuep, + int rw_flag, + db_var_aux_param_t ap); + +void db_set_cmd(void); + +void db_show_one_variable(void); + +void db_show_variable(void); + +db_variable_t db_find_reg_name(char *s); + +#endif /* !_DDB_DB_VARIABLES_H_ */ diff --git a/osfmk/ddb/db_watch.c b/osfmk/ddb/db_watch.c new file mode 100644 index 000000000..513ebbb48 --- /dev/null +++ b/osfmk/ddb/db_watch.c @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.12.2 1995/01/06 19:11:06 devrcs + * mk6 CR668 - 1.3b26 merge + * * Revision 1.1.3.5 1994/05/06 18:40:29 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Merge Alpha changes into osc1.312b source code. + * 64bit cleanup. + * * End1.3merge + * [1994/11/04 08:50:16 dwm] + * + * Revision 1.1.12.1 1994/09/23 01:22:53 ezf + * change marker to not FREE + * [1994/09/22 21:11:33 ezf] + * + * Revision 1.1.10.1 1994/01/05 19:28:22 bolinger + * Be sure to count kernel-loaded tasks as part of kernel address space + * in locating watchpoints. + * [1994/01/04 17:43:33 bolinger] + * + * Revision 1.1.3.3 1993/07/27 18:28:31 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:30 elliston] + * + * Revision 1.1.3.2 1993/06/02 23:13:14 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:54 jeffc] + * + * Revision 1.1 1992/09/30 02:01:33 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.7 91/10/09 16:04:32 af + * Revision 2.6.3.1 91/10/05 13:08:50 jeffreyh + * Added user space watch point support including non current task. + * Changed "map" field of db_watchpoint structure to "task" + * for a user to easily understand the target space. + * [91/08/29 tak] + * + * Revision 2.6.3.1 91/10/05 13:08:50 jeffreyh + * Added user space watch point support including non current task. + * Changed "map" field of db_watchpoint structure to "task" + * for a user to easily understand the target space. + * [91/08/29 tak] + * + * Revision 2.6 91/05/14 15:37:30 mrt + * Correcting copyright + * + * Revision 2.5 91/02/05 17:07:27 mrt + * Changed to new Mach copyright + * [91/01/31 16:20:02 mrt] + * + * Revision 2.4 91/01/08 15:09:24 rpd + * Use db_map_equal, db_map_current, db_map_addr. + * [90/11/10 rpd] + * + * Revision 2.3 90/11/05 14:26:39 rpd + * Initialize db_watchpoints_inserted to TRUE. + * [90/11/04 rpd] + * + * Revision 2.2 90/10/25 14:44:16 rwd + * Made db_watchpoint_cmd parse a size argument. + * [90/10/17 rpd] + * Generalized the watchpoint support. + * [90/10/16 rwd] + * Created. + * [90/10/16 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: Richard P. Draves, Carnegie Mellon University + * Date: 10/90 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include /* For db_single_step() */ + +/* + * Watchpoints. + */ + +boolean_t db_watchpoints_inserted = TRUE; + +#define NWATCHPOINTS 100 +struct db_watchpoint db_watch_table[NWATCHPOINTS]; +db_watchpoint_t db_next_free_watchpoint = &db_watch_table[0]; +db_watchpoint_t db_free_watchpoints = 0; +db_watchpoint_t db_watchpoint_list = 0; + +extern vm_map_t kernel_map; + + + +/* Prototypes for functions local to this file. XXX -- should be static. + */ + +db_watchpoint_t db_watchpoint_alloc(void); + +void db_watchpoint_free(register db_watchpoint_t watch); + +void db_set_watchpoint( + task_t task, + db_addr_t addr, + vm_size_t size); + +void db_delete_watchpoint( + task_t task, + db_addr_t addr); + +static int db_get_task( + char *modif, + task_t *taskp, + db_addr_t addr); + +void db_list_watchpoints(void); + + + +db_watchpoint_t +db_watchpoint_alloc(void) +{ + register db_watchpoint_t watch; + + if ((watch = db_free_watchpoints) != 0) { + db_free_watchpoints = watch->link; + return (watch); + } + if (db_next_free_watchpoint == &db_watch_table[NWATCHPOINTS]) { + db_printf("All watchpoints used.\n"); + return (0); + } + watch = db_next_free_watchpoint; + db_next_free_watchpoint++; + + return (watch); +} + +void +db_watchpoint_free(register db_watchpoint_t watch) +{ + watch->link = db_free_watchpoints; + db_free_watchpoints = watch; +} + +void +db_set_watchpoint( + task_t task, + db_addr_t addr, + vm_size_t size) +{ + register db_watchpoint_t watch; + + /* + * Should we do anything fancy with overlapping regions? + */ + + for (watch = db_watchpoint_list; watch != 0; watch = watch->link) { + if (watch->task == task && + (watch->loaddr == addr) && + (watch->hiaddr == addr+size)) { + db_printf("Already set.\n"); + return; + } + } + + watch = db_watchpoint_alloc(); + if (watch == 0) { + db_printf("Too many watchpoints.\n"); + return; + } + + watch->task = task; + watch->loaddr = addr; + watch->hiaddr = addr+size; + + watch->link = db_watchpoint_list; + db_watchpoint_list = watch; + + db_watchpoints_inserted = FALSE; +} + +void +db_delete_watchpoint( + task_t task, + db_addr_t addr) +{ + register db_watchpoint_t watch; + register db_watchpoint_t *prev; + + for (prev = &db_watchpoint_list; (watch = *prev) != 0; + prev = &watch->link) { + if (watch->task == task && + (watch->loaddr <= addr) && + (addr < watch->hiaddr)) { + *prev = watch->link; + db_watchpoint_free(watch); + return; + } + } + + db_printf("Not set.\n"); +} + +void +db_list_watchpoints(void) +{ + register db_watchpoint_t watch; + int task_id; + + if (db_watchpoint_list == 0) { + db_printf("No watchpoints set\n"); + return; + } + + db_printf("Space Address Size\n"); + for (watch = db_watchpoint_list; watch != 0; watch = watch->link) { + if (watch->task == TASK_NULL) + db_printf("kernel "); + else { + task_id = db_lookup_task(watch->task); + if (task_id < 0) + db_printf("%*X", 2*sizeof(vm_offset_t), watch->task); + else + db_printf("task%-3d ", task_id); + } + db_printf(" %*X %X\n", 2*sizeof(vm_offset_t), watch->loaddr, + watch->hiaddr - watch->loaddr); + } +} + +static int +db_get_task( + char *modif, + task_t *taskp, + db_addr_t addr) +{ + task_t task = TASK_NULL; + db_expr_t value; + boolean_t user_space; + + user_space = db_option(modif, 'T'); + if (user_space) { + if (db_expression(&value)) { + task = (task_t)value; + if (db_lookup_task(task) < 0) { + db_printf("bad task address %X\n", task); + return(-1); + } + } else { + task = db_default_task; + if (task == TASK_NULL) { + if ((task = db_current_task()) == TASK_NULL) { + db_printf("no task\n"); + return(-1); + } + } + } + } + if (!DB_VALID_ADDRESS(addr, user_space)) { + db_printf("Address %#X is not in %s space\n", addr, + (user_space)? "user": "kernel"); + return(-1); + } + *taskp = task; + return(0); +} + +/* Delete watchpoint */ +void +db_deletewatch_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + task_t task; + + if (db_get_task(modif, &task, addr) < 0) + return; + db_delete_watchpoint(task, addr); +} + +/* Set watchpoint */ +void +db_watchpoint_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + vm_size_t size; + db_expr_t value; + task_t task; + + if (db_get_task(modif, &task, addr) < 0) + return; + if (db_expression(&value)) + size = (vm_size_t) value; + else + size = sizeof(int); + db_set_watchpoint(task, addr, size); +} + +/* list watchpoints */ +void +db_listwatch_cmd(void) +{ + db_list_watchpoints(); +} + +void +db_set_watchpoints(void) +{ + register db_watchpoint_t watch; + vm_map_t map; + + if (!db_watchpoints_inserted) { + for (watch = db_watchpoint_list; watch != 0; watch = watch->link) { + map = (watch->task)? watch->task->map: kernel_map; + pmap_protect(map->pmap, + trunc_page(watch->loaddr), + round_page(watch->hiaddr), + VM_PROT_READ); + } + db_watchpoints_inserted = TRUE; + } +} + +void +db_clear_watchpoints(void) +{ + db_watchpoints_inserted = FALSE; +} + +boolean_t +db_find_watchpoint( + vm_map_t map, + db_addr_t addr, + db_regs_t *regs) +{ + register db_watchpoint_t watch; + db_watchpoint_t found = 0; + register task_t task_space; + + task_space = (vm_map_pmap(map) == kernel_pmap)? + TASK_NULL: db_current_space(); + for (watch = db_watchpoint_list; watch != 0; watch = watch->link) { + if (watch->task == task_space) { + if ((watch->loaddr <= addr) && (addr < watch->hiaddr)) + return (TRUE); + else if ((trunc_page(watch->loaddr) <= addr) && + (addr < round_page(watch->hiaddr))) + found = watch; + } + } + + /* + * We didn't hit exactly on a watchpoint, but we are + * in a protected region. We want to single-step + * and then re-protect. + */ + + if (found) { + db_watchpoints_inserted = FALSE; + db_single_step(regs, task_space); + } + + return (FALSE); +} diff --git a/osfmk/ddb/db_watch.h b/osfmk/ddb/db_watch.h new file mode 100644 index 000000000..2d37eb46f --- /dev/null +++ b/osfmk/ddb/db_watch.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:23:04 ezf + * change marker to not FREE + * [1994/09/22 21:11:39 ezf] + * + * Revision 1.1.2.4 1993/07/27 18:28:34 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:34 elliston] + * + * Revision 1.1.2.3 1993/06/07 22:07:00 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 18:57:38 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:13:21 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:57:59 jeffc] + * + * Revision 1.1 1992/09/30 02:24:28 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 16:04:47 af + * Revision 2.4.3.1 91/10/05 13:09:14 jeffreyh + * Changed "map" field of db_watchpoint structure to "task", + * and also changed paramters of function declarations. + * [91/08/29 tak] + * + * Revision 2.4.3.1 91/10/05 13:09:14 jeffreyh + * Changed "map" field of db_watchpoint structure to "task", + * and also changed paramters of function declarations. + * [91/08/29 tak] + * + * Revision 2.4 91/05/14 15:37:46 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:07:31 mrt + * Changed to new Mach copyright + * [91/01/31 16:20:09 mrt] + * + * Revision 2.2 90/10/25 14:44:21 rwd + * Generalized the watchpoint support. + * [90/10/16 rwd] + * Created. + * [90/10/16 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 10/90 + */ + +#ifndef _DDB_DB_WATCH_H_ +#define _DDB_DB_WATCH_H_ + +#include +#include +#include + +/* + * Watchpoint. + */ + +typedef struct db_watchpoint { + task_t task; /* in this map */ + db_addr_t loaddr; /* from this address */ + db_addr_t hiaddr; /* to this address */ + struct db_watchpoint *link; /* link in in-use or free chain */ +} *db_watchpoint_t; + + + +/* Prototypes for functions exported by this module. + */ + +void db_deletewatch_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_watchpoint_cmd( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +void db_listwatch_cmd(void); + +void db_clear_watchpoints(void); + +void db_set_watchpoints(void); + +boolean_t db_find_watchpoint( + vm_map_t map, + db_addr_t addr, + db_regs_t *regs); + +#endif /* !_DDB_DB_WATCH_H_ */ diff --git a/osfmk/ddb/db_write_cmd.c b/osfmk/ddb/db_write_cmd.c new file mode 100644 index 000000000..fdbf14268 --- /dev/null +++ b/osfmk/ddb/db_write_cmd.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.10.1 1994/09/23 01:23:15 ezf + * change marker to not FREE + * [1994/09/22 21:11:42 ezf] + * + * Revision 1.2.8.3 1994/03/17 22:35:48 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:26:02 dwm] + * + * Revision 1.2.8.2 1994/01/12 17:51:11 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:13:42 dwm] + * + * Revision 1.2.8.1 1994/01/05 19:28:25 bolinger + * Target current address space, not current "task", for writes. + * [1994/01/04 17:44:51 bolinger] + * + * Revision 1.2.2.3 1993/07/27 18:28:36 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:37 elliston] + * + * Revision 1.2.2.2 1993/06/09 02:21:11 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:58:03 jeffc] + * + * Revision 1.2 1993/04/19 16:03:43 devrcs + * Changes from mk78: + * Removed unused variable 'p' from db_write_cmd(). + * [92/05/16 jfriedl] + * Reorganized. w/u now works, instead of just w/tu. + * [92/04/18 danner] + * [93/02/02 bruel] + * + * Revision 1.1 1992/09/30 02:01:35 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.6 91/10/09 16:05:06 af + * Revision 2.5.3.1 91/10/05 13:09:25 jeffreyh + * Added user space write support including inactive task. + * [91/08/29 tak] + * + * Revision 2.5.3.1 91/10/05 13:09:25 jeffreyh + * Added user space write support including inactive task. + * [91/08/29 tak] + * + * Revision 2.5 91/05/14 15:38:04 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:07:35 mrt + * Changed to new Mach copyright + * [91/01/31 16:20:19 mrt] + * + * Revision 2.3 90/10/25 14:44:26 rwd + * Changed db_write_cmd to print unsigned. + * [90/10/19 rpd] + * + * Revision 2.2 90/08/27 21:53:54 dbg + * Set db_prev and db_next instead of explicitly advancing dot. + * [90/08/22 dbg] + * Reflected changes in db_printsym()'s calling seq. + * [90/08/20 af] + * Warn user if nothing was written. + * [90/08/07 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* For db_printf() */ + +/* + * Write to file. + */ +void +db_write_cmd( + db_expr_t address, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + register db_addr_t addr; + register db_expr_t old_value; + db_expr_t new_value; + register int size; + boolean_t wrote_one = FALSE; + boolean_t t_opt, u_opt; + thread_act_t thr_act; + task_t task; + + addr = (db_addr_t) address; + + size = db_size_option(modif, &u_opt, &t_opt); + + if (t_opt) + { + if (!db_get_next_act(&thr_act, 0)) + return; + task = thr_act->task; + } + else + task = db_current_space(); + + /* if user space is not explicitly specified, + look in the kernel */ + if (!u_opt) + task = TASK_NULL; + + if (!DB_VALID_ADDRESS(addr, u_opt)) { + db_printf("Bad address 0x%x\n", addr); + return; + } + + while (db_expression(&new_value)) { + old_value = db_get_task_value(addr, size, FALSE, task); + db_task_printsym(addr, DB_STGY_ANY, task); + db_printf("\t\t%#8n\t=\t%#8n\n", old_value, new_value); + db_put_task_value(addr, size, new_value, task); + addr += size; + + wrote_one = TRUE; + } + + if (!wrote_one) + db_error("Nothing written.\n"); + + db_next = addr; + db_prev = addr - size; +} diff --git a/osfmk/ddb/db_write_cmd.h b/osfmk/ddb/db_write_cmd.h new file mode 100644 index 000000000..21fa8ce65 --- /dev/null +++ b/osfmk/ddb/db_write_cmd.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:23:27 ezf + * change marker to not FREE + * [1994/09/22 21:11:46 ezf] + * + * Revision 1.1.2.3 1993/09/17 21:34:44 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:27:30 robert] + * + * Revision 1.1.2.2 1993/07/27 18:28:41 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:41 elliston] + * + * $EndLog$ + */ +#ifndef _DDB_DB_WRITE_CMD_H_ +#define _DDB_DB_WRITE_CMD_H_ + +#include + +/* Prototypes for functions exported by this module. + */ +void db_write_cmd( + db_expr_t address, + boolean_t have_addr, + db_expr_t count, + char * modif); + +#endif /* !_DDB_DB_WRITE_CMD_H_ */ diff --git a/osfmk/ddb/makedis.c b/osfmk/ddb/makedis.c new file mode 100644 index 000000000..b397eb7c4 --- /dev/null +++ b/osfmk/ddb/makedis.c @@ -0,0 +1,2383 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1997/03/27 18:46:52 barbou + * Created. + * [1997/03/27 13:58:42 barbou] + * + * $EndLog$ + */ + +/* makedis.c - make a disassembler. */ + +/* , + By Eamonn McManus , April 1995. + Copyright 1995 by Eamonn McManus. Non-commercial use is permitted. */ + +/* DESCRIPTION + + This program generates a disassembler in C from a file describing the + opcodes of the machine in question. Lines in the description file are + either comments beginning with #, or contain three fields, with the + first two being terminated by space and the third containing the rest + of the line. Long logical lines can be split onto several physical + lines by ending each one except the last with a \. A logical line + can also be split immediately after a |. Unlike \, | is considered + part of the logical line. Leading spaces on continuation lines + following either \ or | are ignored. + + Here is a concise description of the meanings of the three fields. + Examples later will make it clearer what they are used for. + + The first field of the three is a function name. This will produce + a function or array of the same name in the C output, so it should + not conflict with other identifiers or C keywords. By default the + function named returns a string (a (char *) in C), but if the first + field is preceded by %, the function returns an unsigned long + integer. + + The second field describes the arguments of the function. It consists + of two parts, either but not both of which may be omitted. The first + part is a string which is a bitmask describing the first argument of + the function. Each character of the string represents one bit, + with the least significant bit being the last. A character can be + 0 or 1, representing that constant value, or a letter, representing + part of a bitfield. A given bitfield consists of all of the + contiguous bits containing the same letter. Upper and lower case + letters are considered different. + + The second part of the second field is a list of parameters + describing the parameters of the function, or the parameters after + the first if the bitfield part was present. The list is contained + in parentheses () and the individual parameters are separated by + commas. Spaces are not allowed. Each parameter name is a single + letter, optionally preceded by %. The parameter is an unsigned + long integer if % is present, otherwise a string. Again, upper and + lower case parameter names are different. + + The third field describes the value of the function. If a bitmask + is present in the second field and it contains constant bits (0s or + 1s), then the third field is the value of the function only in the + case where its first argument contains matching values in those bit + positions. There can be many different lines naming the same + function but with different bitpatterns. The generated C code will + arrange to return the value corresponding to the pattern that + matches the actual first argument of the function when it is + called. This argument should not have bits set in positions beyond + those present in the bitpattern. + + It is only allowed for two different lines to name the same function + if there is a bitstring in the second field. It is not allowed for + two such lines to specify exactly the same constant bit values. But + it is allowed for a line to have all the same constant bit values as + another plus some extra constant values. In this case the more + specific line applies when all of its constant bits match, and + otherwise the less specific line applies. + + Apart from the contents of the bitstring, the second field must be + identical on every line referring to a given function, and the + bitstring must always be of the same length. + + For string-valued functions, the third field is the string value. + For integer-valued functions, it is a C integer expression + generating the value. In both cases there may be several special + values: + + - A $ followed by a single letter is replaced by the value of the + argument or bitfield with that name. The value of a bitfield is + shifted as if that bitfield were in the least-significant bit + position. Thus, a single-bit field always has value 0 or 1. + + - A $ followed by the name of a function and an argument list in + parentheses () is replaced by the value returned by the function + with those arguments. An integer value cannot be inserted into a + string without being converted by a function, nor can a string + value be used in an integer expression. + + - A $ followed by a bitstring enclosed in [] is replaced by the + value of that bitstring. The bitstring has the same syntax as in + the second field, described above. Each contiguous sequence of + the same repeated letter in the bitstring is replaced by the + value of the argument or bitfield-argument with that name, + shifted into the appropriate position. + + - A list of strings, separated by |, enclosed in + {}, and followed by an integer expression enclosed in [], is + replaced by the string in the list whose number matches the value + of the expression. The first string in the list is numbered 0. + If there is no string corresponding to the value of the + expression, the behaviour is undefined. The strings in the list + may themselves contain $ or {} operations. + + - A \ followed by any character is replaced by that + character, without regard to any meaning it may usually have. + This is used to obtain strings containing characters such as + {, $, or \. The use of backslash to split long logical + lines takes precedence over this use, so \\ should not appear + at the end of a line. + + The third field may also be a lone colon ":", in which case the + function is assumed to be defined externally and only a function + declaration (prototype) is generated. + + + EXAMPLES + + Here are some examples from the description file for the Z80 + microprocessor. This processor has 8-bit opcodes which are + disassembled by a generated function "inst" which looks like this: + + typedef unsigned long bits; + char *inst(bits code) {...} + + The simplest sort of line in the description file is one that looks + like this: + + inst 01110110 halt + + The first field names the function, "inst". The second field + implies that that function has exactly one argument which is an + integer, and that this line specifies the value of the function + when this integer has the binary value 01110110 (hex 0x76). This + value will be the string "halt". + + A more complex line is one looking like this: + + inst 001aa111 {daa|cpl|scf|ccf}[$a] + + This line is compatible with the previous one, because it has the + same number of bits and the constant bits are different. It + specifies the value of inst when its argument looks like + 001aa111, i.e., for the binary values + 00100111, + 00101111, + 00110111, and + 00111111. The value of $a for these four values will be + respectively binary 00, 01, 10, 11, i.e., 0 to 3. The + corresponding values of the inst function will be "daa", "cpl", + "scf", and "ccf". + + The description defines a helper function "reg8" like this: + + reg8 rrr {b|c|d|e|h|l|(hl)|a}[$r] + + This simply selects one of the eight strings between {} depending + on the value of the argument, which is assumed to be a three-bit + value. This could just as easily have been written: + + reg8 (%r) {b|c|d|e|h|l|(hl)|a}[$r] + + The generated C code is the same -- in each case makedis realises + that the function can be represented by an array rather than + compiling a C function. + + The reg8 function is used in lines like this one: + + inst 01rrrsss ld $reg8($r),$reg8($s) + + Thus if the argument to inst is + 01010011 + then $r is 010 (2) and $s is 011 (3). Since reg8(2) is "d" and + reg8(3) is "e", the value of inst with this argument will be the + string "ld d,e". + + Note that the opcode for "halt" given above matches this pattern, + but because the bitpattern for "halt" is more specific (has more + constant bits) it is the one chosen when the argument is 01110110. + + The description also uses an external C function "hexprint" defined + like this: + + char *hexprint(bits digits, bits n) { + char *p = dis_alloc(digits + 1); + sprintf(p, "%0*lx", (int) digits, n); + return p; + } + + The value of this function is a string containing the number n + spelt out in hex with "digits" digits. In the description + file this function is declared like this: + + hexprint (%w,%n) : + + The names of the parameters are not important in this case as long + as they are letters and are different from each other. + + The hexprint function is used in lines like this one: + + inst 11vvv111 rst $hexprint(2,$v << 3) + + If the argument to inst is + 11011111 + then $v is 011 (3) and the arguments to hexprint are 2 and (3 << 3), + i.e., 0x18. So the value of inst with this argument will be the + string "rst 18". + + Instead of writing $v << 3, it would be possible to write + $[00vvv000]. For instance when $v is binary 011, this becomes + 00011000. The leading 0s could be omitted. + + The $[...] operation is particularly useful for moving bits around. + For instance, the HP PA-RISC opcodes contain bits assigned to + apparently random parts of the instruction word. One of the helper + functions in its description file looks like this: + + im21l aaaaabbccddddddddddde l'$hex($[edddddddddddbbaaaaacc00000000000]) + + So 111110011000000000001 produces 10000000000000111111100000000000. + + The $[...] operation can also be used to spell out binary constants, + since C has no syntax for this. + + + ...More to come... */ + +/* To do: + - More error detection, e.g., bitstring or arg not used in entry. + - Better error recovery -- nearly all errors are currently fatal. + - Clean up type handling, which is somewhat haphazard. It works but there + is stuff that is surely redundant. + - Make generated functions void by default, with $ prefix to indicate + string-value. In a void function, instead of returning a string (or + integer) it would be output via a user-supplied function. + - Further optimise and tidy generated code, e.g.: arrays of one-character + strings could be replaced by arrays of characters; switches with just + one case could be replaced by ifs. + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifndef LONG_BIT +#define LONG_BIT (CHAR_BIT * sizeof (long)) +#endif /* LONG_BIT */ + +#define MAXfunction 32 /* Max function name length. */ +#define MAXBITS LONG_BIT /* Max bitstring length. */ +typedef unsigned long bits; +enum type {T_ERROR, T_UNKNOWN, T_INTEGER, T_STRING}; +const char *const typename[] = {"error", "unknown", "integer", "string"}; +enum walkstringop {COUNTARRAYS, DECLAREARRAYS, COMPILEARRAYS}; +char *bitstype = "unsigned long"; + +int maxfunctionname, maxargwidth; +char *progname = "makedis"; +char **global_argv; +char *filename; +char *headerfilename; +FILE *headerfile; +int lineno; +int indentation; +int debug, dump, warnings; + +/* componentbits has a 1 bit for every possible number of strings we may want + to concatenate together at some stage. A separate C function is compiled + for each such case. */ +bits componentbits; + + +struct entry; +struct arg; +struct string; +struct functioncall; +struct array; +struct bits; +struct bitsplice; + + +int main(int argc, char **argv); +int makedis(FILE *f, char *fname); +struct function *findfunction(char *function); +int parseextern(struct function *fp, FILE *f); +struct function *makefunction(char *function); +int parsebits(struct function *fp, char *bitstring, int nbits); +int parseentrybits(struct entry *ep, char *bitstring, int nbits, int issplice); +int parsecontrol(char *name, char *value); +int parseargs(struct function *fp, FILE *f, int *cp); +int parsestring(struct function *fp, char *str); +enum type makestring(struct function *fp, struct string **stringlink, + char **stringp, char *magic, enum type targettype); +int parsedollar(struct function *fp, char **stringp, struct string *sp); +int parsebitsplice(struct function *fp, char *bitstring, int nbits, + struct string *sp); +int findvariable(struct function *fp, int name, struct string *sp); +int parsefunctioncall(struct function *fp, char *start, char **stringp, + struct string *sp); +int parsearray(struct function *fp, char **stringp, struct string *sp, + enum type t); +void dumpfunctions(void); +void dumpfunction(struct function *fp); +void showentry(FILE *f, struct function *fp, struct entry *ep, bits highlight); +void showbits(FILE *f, struct entry *ep, int nbits, bits highlight); +void showargs(FILE *f, struct arg *ap, int fieldwidth); +void showstring(FILE *f, struct string *sp); +void showstringelement(FILE *f, struct string *sp); +void showfunctioncall(FILE *f, struct functioncall *fcp); +void showarray(FILE *f, struct array *ap); +int outputfunctions(void); +void outputidentity(FILE *f); +int outputdeclarations(void); +void outputconcats(void); +void outputconcat(int n); +void outputconcatheader(FILE *f, int n); +void findarrays(void); +int checkfixedlength(struct array *ap); +int outputfunction(struct function *fp); +void functionarray(struct function *fp); +void functionheader(FILE *f, struct function *fp); +int simplearray(struct array *ap); +void compiletype(FILE *f, enum type *tp); +int functionswitch(struct function *fp, bits mask, bits value); +int compilestring(int assignto, struct string *sp, enum type type); +int compilecheckedstring(int assignto, struct string *sp, enum type type); +void compileassign(int assignto); +void compiletemp(int tempno); +void compiletext(char *s); +int compileconcat(struct string *sp, enum type type); +int compilenull(enum type type); +int compilesimple(struct string *sp, enum type type); +int compilearrayref(struct array *ap); +int compilefunctioncall(struct string *sp); +int walkstring(struct string *sp, enum walkstringop op, int tempno); +int compilearray(struct array *ap); +void compilesimplearray(enum type *tp, char *name, int num, struct array *ap); +void declarearray(struct array *ap); +void compilebitstring(struct bits *bp); +void compilebitsplice(struct bitsplice *splicep); +int bitcount(bits x); +bits allbitsset(int nbits); +void findent(FILE *f); +void indent(void); +void *xrealloc(char *oldp, size_t size); +void *xmalloc(size_t size); +void *xstrdup(char *s); +int prematureeof(void); + + +int main(int argc, char **argv) { + int i; + FILE *f; + + global_argv = argv; + if (argc > 0) + progname = argv[0]; + for (i = 1; i < argc && argv[i][0] == '-'; i++) { + switch (argv[i][1]) { + case 'h': + if (++i >= argc) + goto Usage; + headerfilename = argv[i]; break; + case 'd': + debug = 1; break; + case 'D': + dump = 1; break; + case 'w': + warnings = 1; break; + default: +Usage: + fprintf(stderr, "Usage: %s [file]\n", progname); + return 1; + } + } + if (i == argc) + return makedis(stdin, ""); + if (i + 1 != argc) + goto Usage; + if ((f = fopen(argv[i], "r")) == NULL) { + fprintf(stderr, "%s: %s: %s\n", progname, argv[i], strerror(errno)); + return 1; + } + return makedis(f, argv[i]); +} + + +int makedis(FILE *f, char *fname) { + int c, i; + char function[MAXfunction], bitstring[MAXBITS]; + static char *string = NULL; + int stringlen = 0; + struct function *fp; + + filename = fname; + lineno = 1; + /* Loop for every line in the description. */ + while (1) { + /* Ignore initial spaces and newlines. */ + while (isspace(c = getc(f))) + if (c == '\n') + lineno++; + if (c == EOF) + break; + + /* Ignore comments. # only allowed at start of line. */ + if (c == '#') { + while ((c = getc(f)) != '\n') + if (c == EOF) + return prematureeof(); + lineno++; + continue; + } + + /* Read function name, terminated by space. */ + for (i = 0; i < sizeof function && !isspace(c); i++, c = getc(f)) { + if (c == EOF) + return prematureeof(); + function[i] = c; + } + if (i >= sizeof function) { + fprintf(stderr, "%s: %s(%d): function name is too long: %.*s\n", + progname, filename, lineno, i, function); + return 1; + } + function[i] = '\0'; + + /* Skip to next field. */ + while (isspace(c) && c != '\n') + c = getc(f); + + /* If not a control statement, read bitstring and/or arguments. */ + if (function[0] == ':') + fp = 0; /* Silence gcc. */ + else { + fp = makefunction(function); + if (fp == NULL) + return 1; + + /* Read optional bitstring. */ + for (i = 0; i < sizeof bitstring && isalnum(c); i++, c = getc(f)) { + if (c == EOF) + return prematureeof(); + bitstring[i] = c; + } + if (isalnum(c)) { + fprintf(stderr, "%s: %s(%d): bit string is too long: %.*s\n", + progname, filename, lineno, i, bitstring); + return 1; + } + if (parsebits(fp, bitstring, i) != 0) + return 1; + + /* Read optional arguments. */ + if (parseargs(fp, f, &c) != 0) + return 1; + + /* Skip to next field. */ + while (isspace(c) && c != '\n') + c = getc(f); + + /* : indicates an external (C) function. */ + if (c == ':') { + if (parseextern(fp, f) != 0) + return 1; + continue; + } + } + + /* Read associated text. */ + i = 0; + while (1) { + for ( ; c != '\n'; i++, c = getc(f)) { + if (c == EOF) + return prematureeof(); + if (i >= stringlen) { + stringlen = stringlen * 2 + 16; + string = xrealloc(string, stringlen); + } + string[i] = c; + } + lineno++; + if (i > 0) { + switch (string[i - 1]) { + case '\\': + i--; + /* Fall in... */ + case '|': + while (isspace(c = getc(f)) && c != '\n') ; + continue; + } + } + break; + } + if (i >= stringlen) { + stringlen = stringlen * 2 + 16; + string = xrealloc(string, stringlen); + } + string[i] = '\0'; + + /* Parse the line just read. */ + if (function[0] == ':') { + if (parsecontrol(function + 1, string) != 0) + return 1; + } else { + if (parsestring(fp, string) != 0) + return 1; + } + } + if (dump) + dumpfunctions(); + return outputfunctions(); +} + + +/* A function in the description file. nbits and nargs are -1 until the + real values are known. */ +struct function { + struct function *next; + char *name; + enum type type; + int nbits; /* Number of bits in the bitpattern, 0 if none. */ + int nargs; /* Number of (x,y,...) parameters, 0 if none. */ + char isarray; /* Will be represented by a C array. */ + int fixedlength; /* If a C array, will be a char [][N] not a char *[]. */ + struct entry *first, *last; + /* Links to the value(s) supplied. */ + struct arg *args; /* List of (x,y,...) names and types. */ +}; +struct function *functions; + + +/* Find the function with the given name. If not found, create a structure + for it, fill it out with a template, and return that. */ +struct function *findfunction(char *name) { + struct function *fp; + + for (fp = functions; fp != NULL; fp = fp->next) { + if (strcmp(fp->name, name) == 0) + return fp; + } + if (strlen(name) > maxfunctionname) + maxfunctionname = strlen(name); + fp = xmalloc(sizeof *fp); + fp->next = functions; + functions = fp; + fp->name = xstrdup(name); + fp->type = T_UNKNOWN; + fp->nbits = fp->nargs = -1; /* nbits will be set correctly later. */ + fp->isarray = 0; + fp->first = fp->last = NULL; + return fp; +} + + +/* Parse an external (C) function declaration. This will look something like: + malloc (%s) : + We're called just after seeing the ':'. + Return 0 if parsing is successful, 1 otherwise. */ +int parseextern(struct function *fp, FILE *f) { + int c; + + if ((c = getc(f)) != '\n') { + fprintf(stderr, + "%s: %s(%d): extern declaration should be a lone `:'\n", + progname, filename, lineno); + return 1; + } + if (fp->nbits != 0) { + fprintf(stderr, + "%s: %s(%d): extern functions should not have bitstrings\n", + progname, filename, lineno); + return 1; + } + free(fp->first); + fp->first = fp->last = NULL; + return 0; +} + + +/* A value supplied for a function (the third field in a description line). + In general there can be any number of such values, differing in the + bitpattern supplied. The mask and value fields describe the constant + bits in the bitpattern: mask indicates which bits they are and value + indicates the values of those bits. So this entry matches + ((x & mask) == value). */ +struct entry { + struct entry *next; + bits mask, value; + struct bits *bits; /* List of named bitfields. */ + struct string *string; /* Value of function when bitpattern matched. */ + char done; /* This entry has already been compiled. */ +}; + + +/* We've just seen a definition of function "name". Make a structure for it + if necessary, and a template entry that will describe the value given here. + */ +struct function *makefunction(char *name) { + struct function *fp; + struct entry *ep = xmalloc(sizeof *ep); + enum type type; + + if (name[0] == '%') { + name++; + type = T_INTEGER; + } else + type = T_STRING; + fp = findfunction(name); + if (fp->type == T_UNKNOWN) + fp->type = type; + else if (fp->type != type) { + fprintf(stderr, "%s: %s(%d): function %s previously declared as %s, " + "here as %s\n", progname, filename, lineno, name, + typename[fp->type], typename[type]); + return NULL; + } + ep->next = NULL; + ep->bits = NULL; + ep->done = 0; + if (fp->first != NULL) + fp->last->next = ep; + else + fp->first = ep; + fp->last = ep; + return fp; +} + + +/* A named bitfield within the bitpattern of a function entry, or within a + $[...] bitsplice. The mask covers the bitfield and the shift says how + many 0 bits there are after the last 1 in the mask. */ +struct bits { + struct bits *next; + int shift; + bits mask; + char name; +}; + + +/* Parse the bitstring supplied for the given function. nbits says how many + bits there are; it can legitimately be 0. Return value is 0 on success. */ +int parsebits(struct function *fp, char *bitstring, int nbits) { + if (fp->nbits < 0) + fp->nbits = nbits; + else if (fp->nbits != nbits) { + fprintf(stderr, "%s: %s(%d): bit string of length %d;\n", + progname, filename, lineno, nbits); + fprintf(stderr, " function %s has bit strings of length %d\n", + fp->name, fp->nbits); + return 1; + } + return parseentrybits(fp->last, bitstring, nbits, 0); +} + + +/* Parse a bitstring that is the pattern for a function entry or that is in a + $[...] bitsplice. Put the result in ep. Return value is 0 on success. */ +int parseentrybits(struct entry *ep, char *bitstring, int nbits, int issplice) { + int i, j; + char bit; + bits mask, value, entrymask; + struct bits *bp; + + mask = value = 0; + for (i = 0; i < nbits; i++) { + bit = bitstring[nbits - 1 - i]; + switch (bit) { + case '1': + value |= 1 << i; + /* Fall in... */ + case '0': + mask |= 1 << i; + continue; + } + if (!isalpha(bit)) { + fprintf(stderr, "%s: %s(%d): invalid character in bitstring: %c\n", + progname, filename, lineno, bit); + return 1; + } + if (!issplice) { + for (bp = ep->bits; bp != NULL; bp = bp->next) { + if (bp->name == bit) { + fprintf(stderr, + "%s: %s(%d): bitstring name %c used twice\n", + progname, filename, lineno, bit); + return 1; + } + } + } + entrymask = 1 << i; + for (j = i + 1; j < nbits && bitstring[nbits - 1 - j] == bit; j++) + entrymask |= 1 << j; + bp = xmalloc(sizeof *bp); + bp->shift = i; + bp->mask = entrymask; + bp->name = bit; + bp->next = ep->bits; + ep->bits = bp; + i = j - 1; + } + ep->mask = mask; + ep->value = value; + return 0; +} + + +/* Parse a control line. This looks something like: + :bitstype unsigned int + in which case we will be called with name "bitstype" and + value "unsigned int". */ +int parsecontrol(char *name, char *value) { + if (strcmp(name, "bitstype") == 0) + bitstype = xstrdup(value); + else { + fprintf(stderr, "%s: %s(%d): unrecognised control keyword %s\n", + progname, filename, lineno, name); + return 1; + } + return 0; +} + + +/* A parameter to a function, e.g., x in: + %f aaa(%x) $a + $x */ +struct arg { + struct arg *next; + enum type type; + char name; +}; + + +/* Parse the parameters (x,y,...) to a function and put the result in fp. + The entry that is being built is fp->last. cp points to the opening + (; if it does not point to a ( then there are no parameters. If + this is the first entry for the function, fp->nargs will be -1 and + we will build up an argument list. Otherwise, fp->nargs will be + >= 0 and we will only check that the arguments here are consistent + with what went before. Return value is 0 on success. */ +int parseargs(struct function *fp, FILE *f, int *cp) { + struct arg **arglink, *ap; + struct bits *bp; + int nargs, width; + char name; + enum type t; + + arglink = &fp->args; + width = nargs = 0; + if (*cp == '(') { + *cp = getc(f); + if (*cp != ')') { + width = 1; + while (1) { + nargs++; + width += 2; + if (fp->nargs >= 0 && nargs > fp->nargs) { + fprintf(stderr, + "%s: %s(%d): %d arg(s) instead of %d for %s\n", + progname, filename, lineno, nargs, fp->nargs, + fp->name); + return 1; + } + t = T_STRING; + if (*cp == '%') { + width++; + t = T_INTEGER; + *cp = getc(f); + } + name = *cp; + if (!isalpha(name)) { + fprintf(stderr, + "%s: %s(%d): argument should be letter: %c\n", + progname, filename, lineno, name); + return 1; + } + for (bp = fp->last->bits; bp != NULL; bp = bp->next) { + if (bp->name == name) { + fprintf(stderr, + "%s: %s(%d): %c is a bitstring and an arg\n", + progname, filename, lineno, name); + return 1; + } + } + if (fp->nargs >= 0) { + if ((*arglink)->name != name) { + fprintf(stderr, + "%s: %s(%d): arg %d of %s is %c not %c\n", + progname, filename, lineno, nargs, fp->name, + (*arglink)->name, name); + return 1; + } + if ((*arglink)->type != t) { + fprintf(stderr, + "%s: %s(%d): arg %c of %s: inconsistent type\n", + progname, filename, lineno, name, fp->name); + return 1; + } + } else { + for (ap = fp->args; ap != *arglink; ap = ap->next) { + if (ap->name == name) { + fprintf(stderr, + "%s: %s(%d): argument name %c used twice\n", + progname, filename, lineno, name); + return 1; + } + } + *arglink = xmalloc(sizeof **arglink); + (*arglink)->name = name; + (*arglink)->type = t; + } + arglink = &(*arglink)->next; + *cp = getc(f); + if (*cp == ')') + break; + if (*cp != ',') { + fprintf(stderr, + "%s: %s(%d): bad character in argument list: %c\n" + " (arguments must be single letters)\n", + progname, filename, lineno, *cp); + return 1; + } + *cp = getc(f); + } + } + *cp = getc(f); + } + if (fp->nargs < 0) { + fp->nargs = nargs; + width += fp->nbits; + if (width > maxargwidth) + maxargwidth = width; + } else if (fp->nargs != nargs) { + fprintf(stderr, "%s: %s(%d): argument list of length %d;\n", + progname, filename, lineno, nargs); + fprintf(stderr, " function %s has argument lists of length %d\n", + fp->name, fp->nargs); + return 1; + } + *arglink = NULL; + return 0; +} + + +/* Parse the string describing the value of this entry for our + function. Return 0 on success. */ +int parsestring(struct function *fp, char *str) { + enum type t; + + t = makestring(fp, &fp->last->string, &str, NULL, fp->type); + if (t == T_ERROR) + return 1; + if (fp->type != t && t != T_UNKNOWN) { + fprintf(stderr, "%s: %s(%d): function %s has inconsistent types\n", + progname, filename, lineno, fp->name); + return 1; + } + return 0; +} + + +/* A parsed representation of the whole string describing a value of a + function, or certain strings within that (e.g., array indices). This is a + linked list of substrings whose type is given by the type field. */ +struct string { + struct string *next; + enum elementtype { + S_TEXT, S_BITSTRING, S_BITSPLICE, S_PARAMETER, S_FUNCTIONCALL, S_ARRAY + } type; + union value { /* The fields here correspond to the enum values. */ + char *text; /* plain text */ + struct bits *bits; /* $x where x is a bitfield */ + struct bitsplice *bitsplice; /* $[...] */ + struct arg *parameter; /* $x where x is a parameter */ + struct functioncall *functioncall; /* $func(...) */ + struct array *array; /* {...}[...] */ + } value; +}; + +/* The representation of a function call $func(...) in the description of a + function value. */ +struct functioncall { + struct function *function; + struct stringlist *args; +}; + +/* The representation of an array selection {...|...}[...] in the description + of a function value. tempno is used when constructing a C variable name + that will contain the strings or numbers in an array. */ +struct array { + struct string *index; /* what's between [...] */ + struct stringlist *elements; /* what's between {...} */ + enum type type; /* the type of each element */ + int tempno; +}; + +/* A list of strings, being the list of arguments in a function call or the + list of elements of an array. This is a linked list of linked lists. */ +struct stringlist { + struct stringlist *next; + enum type type; + struct string *string; +}; + + +/* The following are the only characters with special meaning at the top level + of parsing of a function value. When parsing arrays or function calls, + other characters become special. */ +#define MAKESTRING_MAGIC "${"/*}*/ + + +/* Parse a function return-value string or substring and make a struct string + list for it. The string starts at *stringp and ends at a \0 or at any + character in the `magic' string other than { or $. *stringp is updated + to point to the terminating character. The parsed representation is put + at *stringlink. `fp' is the function whose return value is being parsed. + `targettype' is the expected type of the result, if known. + The return value is the actual type. */ +enum type makestring(struct function *fp, struct string **stringlink, + char **stringp, char *magic, enum type targettype) { + char *p, *q; + struct string *sp, **firststringlink; + int n, components; + int parenlevel = 0; + enum type t = targettype, newt; + + if (magic == NULL) + magic = MAKESTRING_MAGIC; + p = *stringp; + firststringlink = stringlink; + components = 0; + while (*p != '\0') { + sp = xmalloc(sizeof *sp); + q = p; + n = 0; + do { + if (strchr(magic, *q) != NULL) { + if (*q != ')' || parenlevel == 0) + break; + } + switch (*q) { + case '(': + parenlevel++; break; + case ')': + parenlevel--; break; + case '\\': + if (q[1] != '\0') + q++; + break; + } + n++; + } while (*++q != '\0'); + if (n > 0) { + sp->type = S_TEXT; + sp->value.text = q = xmalloc(n + 1); + do { + if (*p == '\\') + p++; + *q++ = *p++; + } while (--n > 0); + *q = '\0'; + newt = t; + } else if (*p == '$') { + if (parsedollar(fp, &p, sp) != 0) + return T_ERROR; + switch (sp->type) { + case S_BITSTRING: + case S_BITSPLICE: + newt = T_INTEGER; + break; + case S_PARAMETER: + newt = sp->value.parameter->type; + break; + case S_FUNCTIONCALL: + newt = sp->value.functioncall->function->type; + break; + default: + fprintf(stderr, "makestring type %d\n", sp->type); + abort(); + } + } else if (*p == '{'/*}*/) { + if (parsearray(fp, &p, sp, t) != 0) + return T_ERROR; + newt = sp->value.array->type; + } else { + free(sp); + break; + } + if (t == T_UNKNOWN) + t = newt; + else if (newt != T_UNKNOWN && t != newt) { + if (stringlink == firststringlink) { + fprintf(stderr, "%s: %s(%d): expected %s type:\n", progname, + filename, lineno, typename[t]); + showstringelement(stderr, sp); + return T_ERROR; + } + *stringlink = NULL; + fprintf(stderr, "%s: %s(%d): mixed types in string:\n", + progname, filename, lineno); + showstring(stderr, *firststringlink); + fprintf(stderr, " -- %s\n", typename[t]); + showstringelement(stderr, sp); + fprintf(stderr, " -- %s\n", typename[newt]); + return T_ERROR; + } + *stringlink = sp; + stringlink = &sp->next; + components++; + } + *stringlink = NULL; + *stringp = p; + if (components >= MAXBITS) { + fprintf(stderr, "%s: %s(%d): excessively complicated string\n", + progname, filename, lineno); + return T_ERROR; + } + componentbits |= 1 << components; + return t; +} + + +/* Parse a $ operation at **stringp and update *stringp to point past it. + `fp' is the function whose return value is being parsed. The parsed + item will be put at *sp. Return 0 on success, nonzero on error. */ +int parsedollar(struct function *fp, char **stringp, struct string *sp) { + char *p, *start; + + p = *stringp; + assert(*p == '$'); + start = ++p; + if (*p == '[') + p++; + while (isalnum(*p) || *p == '_') + p++; + if (*start == '[') { + if (*p != ']') { + fprintf(stderr, "%s: %s(%d): missing ] or bad character in $[\n", + progname, filename, lineno); + return 1; + } + *stringp = p + 1; + return parsebitsplice(fp, start + 1, p - start - 1, sp); + } + if (p == start) { + fprintf(stderr, "%s: %s(%d): missing identifier after $\n", progname, + filename, lineno); + return 1; + } + if (p == start + 1) { + if (findvariable(fp, *start, sp) != 0) + return 1; + } else { + if (parsefunctioncall(fp, start, &p, sp) != 0) + return 1; + } + *stringp = p; + return 0; +} + + +/* The representation of a $[...] bitsplice. It is parsed into a + struct entry just as if it were a bitfield parameter, then analysed + into a chain of struct bitsplicebits. These in conjunction with + the constant portion of the struct entry will allow the bitsplice to + be compiled. Each bitsplicebits element represents either a numeric + argument to the current function, in which case it will be shifted + into place; or a bitfield name from the bitfield description of the + current function, in which case it will be shifted by the difference + between the position of the bitfield in the argument and the position + it occurs in the bitsplice. `shift' indicates how much to shift left + the associated value; if it is negative the value is shifted right. + For instance, in a function like this: + %oh xx00(%y) $[yyxx] + the bitsplicebits for y will have shift = 2 and value.arg pointing to y, + and those for x will have shift = -2 and value.mask = binary 1100. + As an optimisation, contiguous bitfields that are also contiguous in the + bitsplice will be combined. For instance: + %oh xxyy00 $[0xxyy0] + will compile the same code as: + %oh zzzz00 $[0zzzz0]. + As another optimisation, a bitfield that occupies the entire bitstring + for a function will be treated like a parameter in that it will not be + masked in the bitsplice. For instance: + %oh xxxxxx $[0xxxxxx0] + will compile the same code as: + %oh (%x) $[0xxxxxx0]. */ +struct bitsplice { + struct entry entry; + int nbits; + struct bitsplicebits *splice; +}; +struct bitsplicebits { + struct bitsplicebits *next; + int shift; + enum elementtype type; + union { + struct arg *arg; + bits mask; + } value; +}; + + +int parsebitsplice(struct function *fp, char *bitstring, int nbits, + struct string *sp) { + struct bitsplice *splicep; + struct bitsplicebits *bsp, *lastbsp, **bspp; + struct bits *bp; + int shift, nfrombits, ntobits; + bits allbits, b; + + splicep = xmalloc(sizeof *splicep); + splicep->nbits = nbits; + if (parseentrybits(&splicep->entry, bitstring, nbits, 1) != 0) + return 1; + bspp = &splicep->splice; + lastbsp = NULL; + for (bp = splicep->entry.bits; bp != NULL; bp = bp->next) { + if (findvariable(fp, bp->name, sp) != 0) + return 1; + shift = bp->shift; + if (sp->type == S_BITSTRING) { + nfrombits = bitcount(sp->value.bits->mask); + ntobits = bitcount(bp->mask); + if (warnings) { + if (nfrombits != ntobits) { + fprintf(stderr, "%s: %s(%d): warning: " + "bitstring $%c %ser than its place " + "in bitsplice\n", + progname, filename, lineno, bp->name, + (nfrombits > ntobits) ? "bigg" : "small"); + } + } + shift -= sp->value.bits->shift; + + /* See if this bitfield can be combined with a previous contiguous + bitfield. */ + if (lastbsp != NULL && lastbsp->type == S_BITSTRING + && lastbsp->shift == shift) { + lastbsp->value.mask |= sp->value.bits->mask; + continue; + } + } else { + assert(sp->type == S_PARAMETER); + if (sp->value.parameter->type != T_INTEGER) { + fprintf(stderr, + "%s: %s(%d): variable %c in $[...] should be integer\n", + progname, filename, lineno, sp->value.parameter->name); + return 1; + } + } + *bspp = bsp = xmalloc(sizeof *bsp); + bsp->type = sp->type; + bsp->shift = shift; + if (sp->type == S_PARAMETER) + bsp->value.arg = sp->value.parameter; + else + bsp->value.mask = sp->value.bits->mask; + bspp = &bsp->next; + lastbsp = bsp; + } + *bspp = NULL; + + /* Look for a spliced element that is the entire bitstring argument to + this function and therefore doesn't need to be masked. */ + allbits = allbitsset(fp->nbits); + for (bsp = splicep->splice; bsp != NULL; bsp = bsp->next) { + if (bsp->type == S_BITSTRING) { + for (b = bsp->value.mask; b != 0 && !(b & 1); b >>= 1) ; + if (b == allbits) + bsp->value.mask = 0; + } + } + sp->type = S_BITSPLICE; + sp->value.bitsplice = splicep; + return 0; +} + + +int findvariable(struct function *fp, int name, struct string *sp) { + struct bits *bp; + struct arg *ap; + + for (bp = fp->last->bits; bp != NULL; bp = bp->next) { + if (bp->name == name) { + sp->type = S_BITSTRING; + sp->value.bits = bp; + return 0; + } + } + for (ap = fp->args; ap != NULL; ap = ap->next) { + if (ap->name == name) { + sp->type = S_PARAMETER; + sp->value.parameter = ap; + return 0; + } + } + fprintf(stderr, "%s: %s(%d): undefined parameter %c\n", progname, filename, + lineno, name); + return 1; +} + + +int parsefunctioncall(struct function *fp, char *start, char **stringp, + struct string *sp) { + char *p; + struct functioncall *fcp; + struct stringlist **arglink, *arg; + enum type t; + + p = *stringp; + if (*p != '(') { + fprintf(stderr, "%s: %s(%d): missing ( after function %.*s\n", progname, + filename, lineno, p - start, start); + return 1; + } + sp->type = S_FUNCTIONCALL; + sp->value.functioncall = fcp = xmalloc(sizeof *fcp); + *p = '\0'; /* Ugly. */ + fcp->function = findfunction(start); + *p = '('; + arglink = &fcp->args; + if (*++p != ')') { + while (1) { + arg = xmalloc(sizeof *arg); + t = makestring(fp, &arg->string, &p, MAKESTRING_MAGIC ",)", + T_UNKNOWN); + if (t == T_ERROR) + return 1; + arg->type = t; + *arglink = arg; + arglink = &arg->next; + if (*p == ')') + break; + assert(*p == ','); + p++; + } + } + *arglink = NULL; + assert(*p == ')'); + *stringp = p + 1; + return 0; +} + + +int parsearray(struct function *fp, char **stringp, struct string *sp, + enum type t) { + char *p; + struct array *ap; + struct stringlist **elementlink, *element; + + p = *stringp; + assert(*p == '{'/*}*/); + sp->type = S_ARRAY; + sp->value.array = ap = xmalloc(sizeof *ap); + ap->tempno = -1; + elementlink = &ap->elements; + ap->type = t; + if (*++p != /*{*/'}') { + while (1) { + element = xmalloc(sizeof *element); + t = makestring(fp, &element->string, &p, + MAKESTRING_MAGIC /*{*/"|}", t); + if (t == T_ERROR) + return 1; + element->type = t; + if (ap->type == T_UNKNOWN) + ap->type = t; + else if (t != T_UNKNOWN && ap->type != t) { + fprintf(stderr, "%s: %s(%d): mixed types in array:\n", + progname, filename, lineno); + showstring(stderr, ap->elements->string); + fprintf(stderr, " -- %s\n", typename[ap->type]); + showstring(stderr, element->string); + fprintf(stderr, " -- %s\n", typename[t]); + return 1; + } + *elementlink = element; + elementlink = &element->next; + if (*p == /*{*/'}') + break; + assert(*p == '|'); + p++; + } + } + *elementlink = NULL; + assert(*p == /*{*/'}'); + if (*++p != '[') { + fprintf(stderr, "%s: %s(%d): missing [index] after array\n", + progname, filename, lineno); + return 1; + } + ++p; + t = makestring(fp, &ap->index, &p, MAKESTRING_MAGIC "]", T_INTEGER); + if (t == T_ERROR) + return 1; + if (t == T_STRING) { + fprintf(stderr, "%s: %s(%d): array index cannot be string:\n", + progname, filename, lineno); + showstring(stderr, ap->index); + return 1; + } + if (*p != ']') { + fprintf(stderr, "%s: %s(%d): [ without ]\n", progname, filename, + lineno); + return 1; + } + *stringp = p + 1; + return 0; +} + + +void dumpfunctions() { + struct function *fp; + + for (fp = functions; fp != NULL; fp = fp->next) + dumpfunction(fp); +} + + +void dumpfunction(struct function *fp) { + struct entry *ep; + + for (ep = fp->first; ep != NULL; ep = ep->next) + showentry(stderr, fp, ep, 0); +} + + +/* Entries are not shown exactly as they would be input, since \ would + need to be provided before some characters such as $ or {. But the + characters "|},]" pose a problem since a \ is only needed in certain + contexts and is annoying otherwise. It's not worth doing this right, + since it's only used for error messages. */ +void showentry(FILE *f, struct function *fp, struct entry *ep, bits highlight) { + if (fp->type == T_INTEGER) + putc('%', f); + fprintf(f, "%-*s ", maxfunctionname + 1, fp->name); + if (fp->nbits == 0 && fp->nargs == 0) + fprintf(f, "%-*s", maxargwidth, "()"); + else { + showbits(f, ep, fp->nbits, 0); + showargs(f, fp->args, maxargwidth - fp->nbits); + } + putc(' ', f); + showstring(f, ep->string); + putc('\n', f); + if (highlight != 0) { + fprintf(f, "%-*s ", maxfunctionname + 1, ""); + showbits(f, ep, fp->nbits, highlight); + putc('\n', f); + } +} + + +void showbits(FILE *f, struct entry *ep, int nbits, bits highlight) { + struct bits *bp; + bits i, value; + char zero, one; + + if (nbits == 0) + return; + i = 1 << (nbits - 1); + bp = ep->bits; + if (highlight) { + value = highlight; + zero = ' '; + one = '^'; + } else { + value = ep->value; + zero = '0'; + one = '1'; + } + do { + if (highlight != 0 || (ep->mask & i)) { + putc((value & i) ? one : zero, f); + i >>= 1; + } else { + assert(bp != NULL && (bp->mask & i)); + do { + putc(bp->name, f); + i >>= 1; + } while (bp->mask & i); + bp = bp->next; + } + } while (i != 0); +} + + +void showargs(FILE *f, struct arg *ap, int fieldwidth) { + int width; + int lastc; + int isint; + + if (ap == NULL) + width = 0; + else { + width = 1; + lastc = '('; + do { + isint = (ap->type == T_INTEGER); + fprintf(f, "%c%s%c", lastc, isint ? "%" : "", ap->name); + width += 2 + isint; + ap = ap->next; + lastc = ','; + } while (ap != NULL); + putc(')', f); + } + fprintf(f, "%-*s", fieldwidth - width, ""); +} + + +void showstring(FILE *f, struct string *sp) { + for ( ; sp != NULL; sp = sp->next) + showstringelement(f, sp); +} + + +void showstringelement(FILE *f, struct string *sp) { + struct bitsplice *bsp; + + switch (sp->type) { + case S_TEXT: + fputs(sp->value.text, f); + break; + case S_BITSTRING: + fprintf(f, "$%c", sp->value.bits->name); + break; + case S_BITSPLICE: + fprintf(f, "$["); + bsp = sp->value.bitsplice; + showbits(f, &bsp->entry, bsp->nbits, 0); + fprintf(f, "]"); + break; + case S_PARAMETER: + fprintf(f, "$%c", sp->value.parameter->name); + break; + case S_FUNCTIONCALL: + showfunctioncall(f, sp->value.functioncall); + break; + case S_ARRAY: + showarray(f, sp->value.array); + break; + default: + fprintf(stderr, "showstring case %d\n", sp->type); + abort(); + } +} + + +void showfunctioncall(FILE *f, struct functioncall *fcp) { + struct stringlist *sp; + char *last; + + fprintf(f, "$%s(", fcp->function->name); + last = ""; + for (sp = fcp->args; sp != NULL; sp = sp->next) { + fputs(last, f); + last = ","; + showstring(f, sp->string); + } + putc(')', f); +} + + +void showarray(FILE *f, struct array *ap) { + struct stringlist *sp; + char *last; + + putc('{'/*}*/, f); + last = ""; + for (sp = ap->elements; sp != NULL; sp = sp->next) { + fputs(last, f); + last = "|"; + showstring(f, sp->string); + } + fputs(/*{*/"}[", f); + showstring(f, ap->index); + putc(']', f); +} + + +const char commonpreamble[] = "\ +typedef %s bits;\n\ +\n\ +"; + +const char concatpreamble[] = "\ +static char *dis_buf;\n\ +static int dis_bufindex, dis_buflen;\n\ +\n\ +void *dis_alloc(size_t size)\n\ +{\n\ + void *p;\n\ + int newindex = dis_bufindex + size;\n\ + if (newindex > dis_buflen) {\n\ + dis_buflen = newindex * 4;\n\ + dis_buf = malloc(dis_buflen);\n\ + /* We can't use realloc because there might be pointers extant into\n\ + the old buffer. So we waste the memory of the old buffer. We\n\ + should soon reach an adequate buffer size and stop leaking. */\n\ + if (dis_buf == 0) {\n\ + perror(\"malloc\");\n\ + exit(1);\n\ + }\n\ + dis_bufindex = 0;\n\ + }\n\ + p = dis_buf + dis_bufindex;\n\ + dis_bufindex = newindex;\n\ + return p;\n\ +}\n\ +\n\ +void dis_done()\n\ +{\n\ + dis_bufindex = 0;\n\ +}\n\ +\n\ +"; + +const char concatdeclarations[] = "\ +#include \n\ +#include \n\ +#include \n\ +\n\ +extern void *dis_realloc(void *p, size_t size); /* User-provided. */\n\ +void *dis_alloc(size_t size);\n\ +void dis_done(void);\n\ +"; + +const char nonconcatpreamble[] = "\ +void dis_done() {}\n\ +"; + + +int outputfunctions() { + struct function *fp; + + outputidentity(stdout); + if (headerfilename != NULL) { + if ((headerfile = fopen(headerfilename, "w")) == NULL) { + fprintf(stderr, "%s: create %s: %s\n", progname, headerfilename, + strerror(errno)); + return 1; + } + outputidentity(headerfile); + fprintf(headerfile, commonpreamble, bitstype); + printf("\n#include \"%s\"\n", headerfilename); + } else + printf(commonpreamble, bitstype); + findarrays(); + if (outputdeclarations() != 0) + return 1; + outputconcats(); + for (fp = functions; fp != NULL; fp = fp->next) { + if (fp->isarray) + functionarray(fp); + } + for (fp = functions; fp != NULL; fp = fp->next) { + if (fp->first != NULL && !fp->isarray) { + if (outputfunction(fp) != 0) + return 1; + } + } + return 0; +} + + +void outputidentity(FILE *f) { + char **p; + + fprintf(f, "/*\n * This file was generated by:\n *"); + for (p = global_argv; *p != NULL; p++) + fprintf(f, " %s", *p); + fprintf(f, "\n */\n\n"); +} + + +int outputdeclarations() { + FILE *f = headerfile ? headerfile : stdout; + struct function *fp; + + for (fp = functions; fp != NULL; fp = fp->next) { + if (fp->type != T_UNKNOWN) { + if (fp->isarray) { + fprintf(f, "extern "); + if (fp->fixedlength > 0) + fprintf(f, "char %s[][%d]", fp->name, fp->fixedlength); + else { + compiletype(f, &fp->type); + fprintf(f, "%s[]", fp->name); + } + } else + functionheader(f, fp); + fprintf(f, ";\n"); + } + } + return 0; +} + + +void outputconcats() { + int i; + + if (componentbits & ~3) { + fputs(concatdeclarations, headerfile ? headerfile : stdout); + fputs(concatpreamble, stdout); + } else + fputs(nonconcatpreamble, stdout); + for (i = 2; i < MAXBITS; i++) { + if (componentbits & (1 << i)) + outputconcat(i); + } +} + + +void outputconcat(int n) { + int i; + char *last; + + assert(n > 1); + if (headerfile) { + outputconcatheader(headerfile, n); + fprintf(headerfile, ";\n"); + } + outputconcatheader(stdout, n); + printf("\n{\n void *p;\n int len = "); + last = ""; + for (i = 0; i < n; i++) { + printf("%sstrlen(p%d)", last, i); + last = " + "; + } + printf(";\n p = dis_alloc(len + 1);\n return "); + for (i = 1; i < n; i++) + printf("strcat("); + printf("strcpy(p, p0)"); + for (i = 1; i < n; i++) + printf(", p%d)", i); + printf(";\n}\n\n"); +} + + +void outputconcatheader(FILE *f, int n) { + int i; + char *last = ""; + + fprintf(f, "char *dis_concat%d(", n); + for (i = 0; i < n; i++) { + fprintf(f, "%schar *p%d", last, i); + last = ", "; + } + fprintf(f, ")"); +} + + +void findarrays() { + struct function *fp; + struct entry *ep; + struct string *estr, *indexstr; + struct bits *bp; + + for (fp = functions; fp != NULL; fp = fp->next) { + if (fp->nbits > 0 && fp->nargs > 0) + continue; + if (fp->nargs > 1) + continue; + ep = fp->first; + if (ep == NULL || ep->next != NULL) + continue; + estr = ep->string; + if (estr == NULL || estr->next != NULL || estr->type != S_ARRAY) + continue; + indexstr = estr->value.array->index; + if (indexstr->next != NULL) + continue; + if (fp->nbits > 0) { + bp = ep->bits; + if (bp == NULL || bp->next != NULL || bp->shift != 0) + continue; + if (bp->mask != allbitsset(fp->nbits)) + continue; + if (indexstr->type != S_BITSTRING || indexstr->value.bits != bp) + continue; + } else { + if (indexstr->type != S_PARAMETER + || indexstr->value.parameter != fp->args) + continue; + } + if (!simplearray(estr->value.array)) + continue; + fp->isarray = 1; + fp->fixedlength = + (fp->type == T_INTEGER) ? 0 : checkfixedlength(estr->value.array); + } +} + + +int checkfixedlength(struct array *ap) { + int len, maxlen, wasted, n; + struct stringlist *lp; + + maxlen = 0; + for (lp = ap->elements; lp != NULL; lp = lp->next) { + if (lp->string == NULL) + continue; + assert(lp->string->type == S_TEXT); + len = strlen(lp->string->value.text); + if (len > maxlen) + maxlen = len; + } + for (wasted = n = 0, lp = ap->elements; lp != NULL; n++, lp = lp->next) { + if (lp->string == NULL) + continue; + wasted += maxlen - strlen(lp->string->value.text); + } + if (wasted < n * sizeof(char *)) /* Should be target's sizeof. */ + return maxlen + 1; + return 0; +} + + +int outputfunction(struct function *fp) { + printf("\n"); + functionheader(stdout, fp); + printf("\n{\n"/*}*/); + switch (functionswitch(fp, 0, 0)) { + case -1: + return 1; + case 0: + if (warnings) { + fprintf(stderr, "%s: warning: not all cases of %s covered\n", + progname, fp->name); + } + } + printf(/*{*/"}\n"); + return 0; +} + + +void functionarray(struct function *fp) { + struct array *ap; + + ap = fp->first->string->value.array; + printf("\n"); + compilesimplearray(&fp->type, fp->name, 0, ap); +} + + +void functionheader(FILE *f, struct function *fp) { + char *last; + struct arg *ap; + + compiletype(f, &fp->type); + fprintf(f, "%s(", fp->name); + last = ""; + if (fp->nbits > 0) { + fprintf(f, "bits code"); + last = ", "; + } + for (ap = fp->args; ap != NULL; ap = ap->next) { + fprintf(f, last); + compiletype(f, &ap->type); + putc(ap->name, f); + last = ", "; + } + if (*last == '\0') + fprintf(f, "void"); + putc(')', f); +} + + +int simplearray(struct array *ap) { + struct stringlist *lp; + + for (lp = ap->elements; lp != NULL; lp = lp->next) { + if (lp->string != NULL + && (lp->string->next != NULL || lp->string->type != S_TEXT)) + break; + } + return (lp == NULL); +} + + +void compiletype(FILE *f, enum type *tp) { + switch (*tp) { + case T_UNKNOWN: + *tp = T_STRING; + /* Fall in... */ + case T_STRING: + fprintf(f, "char *"); + break; + case T_INTEGER: + fprintf(f, "bits "); + break; + default: + fprintf(stderr, "compiletype type %d\n", *tp); + abort(); + } +} + + +/* Generate code for entries in function fp whose bitstring b satisfies + the constraint (b & mask) == value. Return 1 if generated switch + always does `return', 0 if not, -1 on error. + The algorithm is as follows. Scan the eligible entries to find the + largest set of bits not in the passed-in mask which always have a + constant value (are not variable). One `default' entry is allowed + all of whose bits are variable. For each value of the constant bits, + generate a `switch' case and invoke the function recursively with + that value included in the constraint parameters. The recursion + stops when no set of constant bits is found, perhaps because the + mask parameter has all bits set. + This algorithm could be improved. Currently it will fail if there + are input lines "xxyy", "00xx" and "yy00", each of which is default with + respect to the others. The correct behaviour would then be to select + a bit that is sometimes constant and deal with those cases first. + But this problem has not yet arisen in real life. */ +int functionswitch(struct function *fp, bits mask, bits value) { + struct entry *ep, *defaultcase; + bits allbits, constbits, missingcases; + int nhits, ncases, nconstbits, alwaysreturns; + + indentation++; + allbits = allbitsset(fp->nbits); + constbits = allbits & ~mask; + if (debug) { + findent(stderr); + fprintf(stderr, + "functionswitch(%s): (x & 0x%lx) == 0x%lx; const == 0x%lx\n", + fp->name, mask, value, constbits); + } + defaultcase = NULL; + ncases = nhits = 0; + alwaysreturns = 1; + for (ep = fp->first; ep != NULL; ep = ep->next) { + /* If this is not one of the entries under consideration, skip. */ + if (ep->done + || (ep->mask & mask) != mask || (ep->value & mask) != value) + continue; + if (debug) { + findent(stderr); + showentry(stderr, fp, ep, 0); + } + /* If this entry has no constant bits in the still-variable portion, + it's the default. */ + if ((constbits & ep->mask) == 0) { + if (defaultcase != NULL) { + fprintf(stderr, + "%s: function %s: unable to distinguish between:\n", + progname, fp->name); + showentry(stderr, fp, defaultcase, 0); + showentry(stderr, fp, ep, 0); + return -1; + } + defaultcase = ep; + if (debug) { + findent(stderr); + fprintf(stderr, "^^ default case\n"); + } + } else { + if (debug && (constbits & ~ep->mask)) { + findent(stderr); + fprintf(stderr, "const now 0x%lx\n", constbits & ep->mask); + } + constbits &= ep->mask; + nhits++; + } + } + if (nhits > 0) { + indent(); + if (constbits == allbits) + printf("switch (code) {\n"/*}*/); + else + printf("switch (code & 0x%lx) {\n"/*}*/, constbits); + for (ep = fp->first; ep != NULL; ep = ep->next) { + /* If this is not one of the entries under consideration, skip. */ + if ((ep->mask & mask) != mask || (ep->value & mask) != value) + continue; + if (ep->done || ep == defaultcase) + continue; + ncases++; + indent(); + printf("case 0x%lx:\n", ep->value & constbits); + switch (functionswitch(fp, mask | constbits, + value | (ep->value & constbits))) { + case -1: + return -1; + case 0: + alwaysreturns = 0; + indentation++; indent(); indentation--; + printf("break;\n"); + } + } + indent(); + printf(/*{*/"}\n"); + } + nconstbits = bitcount(constbits); + missingcases = ((nconstbits == MAXBITS) ? 0 : 1 << nconstbits) - ncases; + if (alwaysreturns) { + switch (missingcases) { + case 0: + if (defaultcase != NULL) { + fprintf(stderr, "%s: warning: redundant entry:\n", progname); + showentry(stderr, fp, defaultcase, 0); + defaultcase = NULL; + } + break; + case 1: + if (defaultcase != NULL && nconstbits != 0) { + fprintf(stderr, + "%s: warning: variable bit(s) could be constant:\n", + progname); + showentry(stderr, fp, defaultcase, constbits); + break; + } + /* Fall in... */ + default: + alwaysreturns = 0; + } + } + if (defaultcase != NULL) { + /* If defaultcase has some constant bits of its own, recursion will + check that they have the required value. */ + if ((defaultcase->mask & ~mask) == 0) { + alwaysreturns = 1; + if (compilestring(-1, defaultcase->string, fp->type) != 0) + return -1; + defaultcase->done = 1; + } else { + indentation--; + alwaysreturns = functionswitch(fp, mask, value); + indentation++; + } + } + indentation--; + return alwaysreturns; +} + + +int compilestring(int assignto, struct string *sp, enum type type) { + int tempno; + + tempno = walkstring(sp, COUNTARRAYS, assignto); + if (tempno > assignto) { + indent(); + printf("{\n"/*}*/); + indentation++; + (void) walkstring(sp, DECLAREARRAYS, assignto); + if (walkstring(sp, COMPILEARRAYS, assignto) < 0) + return 1; + } + if (compilecheckedstring(assignto, sp, type) != 0) + return 1; + if (tempno > assignto) { + indentation--; + indent(); + printf(/*{*/"}\n"); + } + return 0; +} + + +int compilecheckedstring(int assignto, struct string *sp, enum type type) { + compileassign(assignto); + if (compileconcat(sp, type) != 0) + return 1; + printf(";\n"); + return 0; +} + + +void compileassign(int assignto) { + indent(); + if (assignto < 0) + printf("return "); + else { + compiletemp(assignto); + printf(" = "); + } +} + + +void compiletemp(int tempno) { + printf("t__%d", tempno); +} + + +void compiletext(char *s) { + putchar('"'); + if (s != NULL) { + for ( ; *s != '\0'; s++) { + switch (*s) { + case '"': + case '\\': + putchar('\\'); + } + putchar(*s); + } + } + putchar('"'); +} + + +int compileconcat(struct string *sp, enum type type) { + int elements; + struct string *sp1; + char *last; + + if (sp == NULL) + return compilenull(type); + if (sp->next == NULL) + return compilesimple(sp, type); + if (type != T_INTEGER) { + for (elements = 0, sp1 = sp; sp1 != NULL; elements++, sp1 = sp1->next) ; + printf("dis_concat%d(", elements); + } + last = ""; + for (sp1 = sp; sp1 != NULL; sp1 = sp1->next) { + printf(last); + if (type != T_INTEGER) + last = ", "; + if (sp1->type == S_ARRAY) + compilearrayref(sp1->value.array); + else + if (compilesimple(sp1, type) != 0) + return 1; + } + if (type != T_INTEGER) + printf(")"); + return 0; +} + + +int compilenull(enum type type) { + if (type == T_INTEGER) { + fprintf(stderr, "%s: empty integer expression\n", progname); + return 1; + } + printf("\"\""); + return 0; +} + + +int compilesimple(struct string *sp, enum type type) { + if (sp == NULL) + return compilenull(type); + switch (sp->type) { + case S_TEXT: + if (type == T_INTEGER) + printf("%s", sp->value.text); + else + compiletext(sp->value.text); + break; + case S_BITSTRING: + compilebitstring(sp->value.bits); + break; + case S_BITSPLICE: + compilebitsplice(sp->value.bitsplice); + break; + case S_PARAMETER: + putchar(sp->value.parameter->name); + break; + case S_FUNCTIONCALL: + return compilefunctioncall(sp); + case S_ARRAY: + if (compilearrayref(sp->value.array) != 0) + return 1; + break; + default: + fprintf(stderr, "compilesimple case %d", sp->type); + abort(); + } + return 0; +} + + +int compilearrayref(struct array *ap) { + compiletemp(ap->tempno); + if (simplearray(ap)) { + printf("["); + if (compileconcat(ap->index, T_INTEGER) != 0) + return 1; + printf("]"); + } + return 0; +} + + +int compilefunctioncall(struct string *sp) { + struct function *fp; + struct stringlist *actualp; + struct arg *formalp; + char *last; + int nbits; + enum type formaltype; + + assert(sp->type == S_FUNCTIONCALL); + fp = sp->value.functioncall->function; + printf("%s%c", fp->name, fp->isarray ? '[' : '('); + last = ""; + nbits = fp->nbits; + formalp = fp->args; + actualp = sp->value.functioncall->args; + while (actualp != NULL) { + if (nbits > 0) { + nbits = 0; + formaltype = T_INTEGER; + } else { + if (formalp == NULL) { + fprintf(stderr, "%s: too many arguments to %s:\n", progname, + fp->name); + showstring(stderr, sp); + putc('\n', stderr); + return 1; + } + formaltype = formalp->type; + formalp = formalp->next; + } + if (actualp->type != T_UNKNOWN && actualp->type != formaltype) { + fprintf(stderr, "%s: argument to %s has the wrong type:\n", + progname, fp->name); + showstring(stderr, actualp->string); + putc('\n', stderr); + return 1; + } + printf(last); + last = ", "; + if (compileconcat(actualp->string, formaltype) != 0) + return 1; + actualp = actualp->next; + } + putchar(fp->isarray ? ']' : ')'); + return 0; +} + + +int walkstring(struct string *sp, enum walkstringop op, int tempno) { + struct stringlist *lp; + struct array *ap; + + for ( ; sp != NULL; sp = sp->next) { + switch (sp->type) { + case S_ARRAY: + ap = sp->value.array; + for (lp = ap->elements; lp != NULL; lp = lp->next) + tempno = walkstring(lp->string, op, tempno); + tempno = walkstring(ap->index, op, tempno); + ap->tempno = ++tempno; + switch (op) { + case DECLAREARRAYS: + if (simplearray(ap)) { + indent(); + printf("static "); + compilesimplearray(&ap->type, NULL, tempno, ap); + } else + declarearray(ap); + break; + case COMPILEARRAYS: + if (!simplearray(ap)) + if (compilearray(ap) != 0) + return -1; + break; + default: + break; + } + break; + case S_FUNCTIONCALL: + for (lp = sp->value.functioncall->args; lp != NULL; lp = lp->next) + tempno = walkstring(lp->string, op, tempno); + break; + default: + break; + } + } + return tempno; +} + + +int compilearray(struct array *ap) { + struct stringlist *ep; + int i; + + indent(); + printf("switch ("); + if (compileconcat(ap->index, T_INTEGER) != 0) + return 1; + printf(") {\n"/*}*/); + for (i = 0, ep = ap->elements; ep != NULL; i++, ep = ep->next) { + indent(); + printf("case %d:\n", i); + indentation++; + if (compilecheckedstring(ap->tempno, ep->string, ap->type) != 0) + return 1; + indent(); + printf("break;\n"); + indentation--; + } + indent(); + printf(/*{*/"}\n"); + return 0; +} + + +void compilesimplearray(enum type *tp, char *name, int num, struct array *ap) { + struct stringlist *lp; + int fixedlength; + + fixedlength = (*tp == T_INTEGER) ? 0 : checkfixedlength(ap); + if (fixedlength > 0) + printf("char "); + else + compiletype(stdout, tp); + if (name != NULL) + printf(name); + else + compiletemp(num); + printf("[]"); + if (fixedlength > 0) + printf("[%d]", fixedlength); + printf(" = {\n"/*}*/); + indentation++; + for (lp = ap->elements; lp != NULL; lp = lp->next) { + indent(); + compilesimple(lp->string, lp->type); + printf(",\n"); + } + indentation--; + indent(); + printf(/*{*/"};\n"); +} + + +void declarearray(struct array *ap) { + indent(); + compiletype(stdout, &ap->type); + compiletemp(ap->tempno); + printf(";\n"); +} + + +void compilebitstring(struct bits *bp) { + printf("("); + if (bp->shift != 0) + printf("("); + printf("code & 0x%lx", bp->mask); + if (bp->shift != 0) + printf(") >> %d", bp->shift); + printf(")"); +} + + +void compilebitsplice(struct bitsplice *splicep) { + struct bitsplicebits *bsp; + char *last = ""; + + printf("("); + for (bsp = splicep->splice; bsp != NULL; bsp = bsp->next) { + printf(last); + last = " | "; + if (bsp->type == S_PARAMETER) + putchar(bsp->value.arg->name); + else { + assert(bsp->type == S_BITSTRING); + if (bsp->value.mask == 0) + printf("code"); + else + printf("(code & 0x%lx)", bsp->value.mask); + } + if (bsp->shift > 0) + printf(" << %d", bsp->shift); + else if (bsp->shift < 0) + printf(" >> %d", -bsp->shift); + } + if (splicep->entry.value != 0) + printf("%s0x%lx", last, splicep->entry.value); + printf(")"); +} + + +int bitcount(bits x) { + int nbits; + + for (nbits = 0; x != 0; x >>= 1) { + if (x & 1) + nbits++; + } + return nbits; +} + + +bits allbitsset(int nbits) { + return (nbits == MAXBITS) ? ~0 : (1 << nbits) - 1; +} + + +void findent(FILE *f) { + int i; + + for (i = 1; i < indentation; i += 2) + putc('\t', f); + if (i == indentation) + fputs(" ", f); +} + + +void indent() { + findent(stdout); +} + + +void *xrealloc(char *oldp, size_t size) { + void *p; + + if (oldp == NULL) + p = malloc(size); + else + p = realloc(oldp, size); + if (p == NULL) { + fprintf(stderr, "%s: allocate of %d bytes failed: %s\n", progname, + (int) size, strerror(errno)); + exit(1); + } + return p; +} + + +void *xmalloc(size_t size) { + return xrealloc(NULL, size); +} + + +void *xstrdup(char *s) { + char *p; + + p = xmalloc(strlen(s) + 1); + strcpy(p, s); + return p; +} + + +int prematureeof() { + fprintf(stderr, "%s: %s(%d): premature end of file\n", progname, filename, + lineno); + return 1; +} diff --git a/osfmk/ddb/nlist.h b/osfmk/ddb/nlist.h new file mode 100644 index 000000000..54d868455 --- /dev/null +++ b/osfmk/ddb/nlist.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.11.2 1995/01/06 19:11:11 devrcs + * mk6 CR668 - 1.3b26 merge + * Add padding for alpha, make n_other unsigned, + * fix erroneous def of N_FN. + * [1994/10/14 03:40:03 dwm] + * + * Revision 1.1.11.1 1994/09/23 01:23:37 ezf + * change marker to not FREE + * [1994/09/22 21:11:49 ezf] + * + * Revision 1.1.4.3 1993/07/27 18:28:42 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:44 elliston] + * + * Revision 1.1.4.2 1993/06/02 23:13:34 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:58:08 jeffc] + * + * Revision 1.1 1992/09/30 02:24:29 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 15:38:20 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:07:42 mrt + * Changed to new Mach copyright + * [91/01/31 16:20:26 mrt] + * + * 11-Aug-88 David Golub (dbg) at Carnegie-Mellon University + * Added n_un, n_strx definitions for kernel debugger (from + * a.out.h). + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * nlist.h - symbol table entry structure for an a.out file + * derived from FSF's a.out.gnu.h + * + */ + +#ifndef _DDB_NLIST_H_ +#define _DDB_NLIST_H_ + +struct nlist { + union n_un { + char *n_name; /* symbol name */ + long n_strx; /* index into file string table */ + } n_un; + unsigned char n_type; /* type flag, i.e. N_TEXT etc; see below */ + unsigned char n_other; /* unused */ + short n_desc; /* see */ +#if defined(__alpha) + int n_pad; /* alignment, used to carry framesize info */ +#endif + vm_offset_t n_value; /* value of this symbol (or sdb offset) */ +}; + +/* + * Simple values for n_type. + */ +#define N_UNDF 0 /* undefined */ +#define N_ABS 2 /* absolute */ +#define N_TEXT 4 /* text */ +#define N_DATA 6 /* data */ +#define N_BSS 8 /* bss */ +#define N_FN 0x1e /* file name symbol */ +#define N_EXT 1 /* external bit, or'ed in */ +#define N_TYPE 0x1e /* mask for all the type bits */ +#define N_STAB 0xe0 /* if any of these bits set, a SDB entry */ + +#endif /* !_DDB_NLIST_H_ */ diff --git a/osfmk/ddb/orig/db_print.c b/osfmk/ddb/orig/db_print.c new file mode 100644 index 000000000..59c51daa9 --- /dev/null +++ b/osfmk/ddb/orig/db_print.c @@ -0,0 +1,1380 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/29 17:35:25 mburg + * MK7.3 merger + * + * Revision 1.2.85.1 1998/02/03 09:24:09 gdt + * Merge up to MK7.3 + * [1998/02/03 09:10:24 gdt] + * + * Revision 1.2.81.1 1997/03/27 18:46:38 barbou + * ri-osc CR1565 - clean up db_print_act, removing old !USER code + * which had gotten stale (the option made little sense here anyway). + * Added routine db_show_one_thread() to take either act/shuttle and + * do something sensible. [dwm] Also rationalize plain, /u and /l + * output for "show act", "show task" and "show all acts". + * [1995/08/28 15:47:00 bolinger] + * [97/02/25 barbou] + * + * Revision 1.2.31.13 1996/01/09 19:16:02 devrcs + * Alpha kdebug Changes: + * Correct various header spacing to account for 64-bit addresses. + * Modify db_show_all_*() functions, so the can be called from kdebug. + * ( There's no way to call with "char *modif", so added NULL check. ) + * Changed db_error() calls to DB_ERROR() macro, so we return on error + * on Alpha (we gotta return to kdebug). + * Changed declarations of 'register foo' to 'register int foo'. + * [1995/12/01 21:42:20 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:03:24 jfraser] + * + * Revision 1.2.31.12 1995/10/09 17:03:30 devrcs + * Merge forward. + * [1995/08/24 20:56:42 watkins] + * + * Revision 1.2.59.1 1995/08/04 17:03:17 watkins + * Change to stack per shuttle model. + * [1995/07/19 20:26:13 watkins] + * + * Revision 1.2.31.11 1995/09/18 19:08:49 devrcs + * Merge forward. + * [1995/08/24 20:56:42 watkins] + * + * Revision 1.2.59.1 1995/08/04 17:03:17 watkins + * Change to stack per shuttle model. + * [1995/07/19 20:26:13 watkins] + * + * Revision 1.2.31.10 1995/05/19 15:43:04 bernadat + * Fixed db_print_act for empty activations. + * Let thread swapping be configurable. + * [95/05/19 bernadat] + * + * Revision 1.2.31.9 1995/05/14 18:10:25 dwm + * ri-osc CR1304 - merge (nmk19_latest - nmk19b1) diffs into mainline. + * mk6 CR938 - restore mach_msg hot path + * remove use of now-defunct fields in thread [mmp,dwm] + * [1995/05/14 17:25:05 dwm] + * + * Revision 1.2.31.8 1995/04/07 18:53:00 barbou + * VM Merge - Task Swapper. + * Renamed TH_SWAPPED to TH_STACK_HANDOFF and swap_func to continuation + * to resolve name conflict. + * From kernel/kdb/kdb_mach.c: + * Put in changes for swapping. + * [1991/11/21 20:32:15 mmp] + * [94/07/27 barbou] + * [95/03/08 barbou] + * + * Revision 1.2.31.7 1995/02/28 01:58:38 dwm + * mk6 CR1120 - Merge mk6pro_shared into cnmk_shared + * * Rev1.2.43.1 1995/01/27 22:01:26 bolinger + * * Fix ri-osc CR977: Make "show space" and "show ipc_port" give + * * accurate count of ports active in IPC space. Make "show ipc_port" + * * output task-visible port name. + * [1995/02/28 01:12:46 dwm] + * + * Revision 1.2.31.6 1995/02/23 21:43:34 alanl + * Fix db_show_one_task_vm for thread_act_ts. + * [95/01/09 rwd] + * + * Merged with DIPC2_SHARED. + * [95/01/04 alanl] + * + * Revision 1.2.31.5 1995/01/10 04:49:52 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * Fix "sh thr/ul"; no cont. to print, fix pri/policy format. + * * Rev 1.2.31.4 1994/10/11 16:35:58 emcmanus + * Added "show runq" and "show shuttle". + * [1994/12/09 20:36:49 dwm] + * + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.8.6 1994/05/06 18:39:37 tmt + * Merged osc1.3dec/shared with osc1.3b19 + * Merge Alpha changes into osc1.312b source code. + * 64bit cleanup. + * * End1.3merge + * [1994/11/04 08:49:52 dwm] + * + * Revision 1.2.31.3 1994/09/23 01:20:51 ezf + * change marker to not FREE + * [1994/09/22 21:10:41 ezf] + * + * Revision 1.2.31.2 1994/06/14 17:21:05 bolinger + * Merge up to NMK17.2. + * [1994/06/14 17:20:35 bolinger] + * + * Revision 1.2.23.4 1994/04/15 18:41:31 paire + * Changed interface of db_task_from_space routine. + * [94/03/31 paire] + * + * Revision 1.2.23.3 1994/03/07 16:37:48 paire + * Merge with Intel R1_1 + * Change from NMK14.10 [1993/11/15 16:06:21 rwd] + * + * Enhanced pretty print routine and added db_task_from_space. + * Change from NMK14.10 [93/09/24 sjs] + * [94/02/21 paire] + * + * Exported ANSI prototype of db_port_kmsg_count routine. + * Added header file include for the declaration of db_norma_ipc routine. + * [94/02/15 paire] + * + * Revision 1.2.23.2 1994/02/11 14:21:58 paire + * Added new vm_print.h header file for db_vm declaration. + * [94/02/09 paire] + * + * Revision 1.2.23.1 1994/02/08 10:58:19 bernadat + * print out msgcount for each port in db_port_iterate + * Change from NORMA_MK14.6(August 93) [1993/07/27 12:35:17 mmp] + * + * Removed defintion of db_maxoff (got from ). + * [93/08/12 paire] + * + * Show ipc_space_remote msg counts only if NORMA_IPC is on + * [93/07/21 bernadat] + * + * Add /s option to "show ipc_port" to pick out port sets. + * Change from NORMA_MK14.6 [1993/02/17 16:29:54 dwm] + * [93/07/16 bernadat] + * [94/02/07 bernadat] + * + * Revision 1.2.20.8 1994/06/08 19:11:15 dswartz + * Preemption merge. + * [1994/06/08 19:10:18 dswartz] + * + * Revision 1.2.20.7 1994/04/30 21:28:24 bolinger + * Thread control ops synchronization: now that TH_SUSP is back, + * enable ddb to show it when printing thread state. + * [1994/04/28 21:55:42 bolinger] + * + * Revision 1.2.20.6 1994/03/17 22:35:31 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:25:46 dwm] + * + * Revision 1.2.20.5 1994/01/26 15:43:37 bolinger + * Move kernel_stack from thread to activation. + * [1994/01/25 21:53:11 bolinger] + * + * Revision 1.2.20.4 1994/01/12 17:50:44 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:13:12 dwm] + * + * Revision 1.2.20.3 1993/11/18 18:11:47 dwm + * Coloc: remove continuations entirely; they are incompatible + * with migration, and their volume is obfuscatory. + * [1993/11/18 18:06:27 dwm] + * + * Revision 1.2.20.2 1993/10/12 16:38:50 dwm + * CoLoc: neuter continuations, ifdef USE_CONTINUATIONS. + * [1993/10/12 16:14:46 dwm] + * + * Revision 1.2.8.4 1993/08/11 20:38:06 elliston + * Add ANSI Prototypes. CR #9523. + * [1993/08/11 03:33:51 elliston] + * + * Revision 1.2.8.3 1993/07/27 18:27:55 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:12:39 elliston] + * + * Revision 1.2.8.2 1993/06/09 02:20:35 gm + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 18:57:22 jeffc] + * + * Removed a '#if MACH_FIXPRI' which somehow survived the purge. CR #9131. + * [1993/05/11 20:56:00 dswartz] + * + * Revision 1.2 1993/04/19 16:02:50 devrcs + * Added printout of thread scheduling policy to long form + * of thread display. + * [93/01/28 jat] + * + * Changes from mk78: + * Removed unused variable from db_show_regs(). + * [92/05/16 jfriedl] + * Converted some db_printsyms to db_task_printsyms. + * [92/04/10 danner] + * Changed db_print_thread so that both display formats + * show the floating-point-used status of the thread. + * [92/03/16 rpd] + * [93/02/02 bruel] + * + * Revision 1.1 1992/09/30 02:01:18 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.11.3.2 92/04/08 15:43:10 jeffreyh + * Added i option to show thread. This gives wait state information. + * [92/04/08 sjs] + * + * Revision 2.11.3.1 92/03/03 16:13:34 jeffreyh + * Pick up changes from TRUNK + * [92/02/26 11:00:01 jeffreyh] + * + * Revision 2.13 92/02/20 18:34:28 elf + * Fixed typo. + * [92/02/20 elf] + * + * Revision 2.12 92/02/19 15:07:47 elf + * Added db_thread_fp_used, to avoid machine-dependent conditionals. + * [92/02/19 rpd] + * + * Added 'F' flag to db_thread_stat showing if the thread has a valid + * FPU context. Tested on i386 and pmax. + * [92/02/17 kivinen] + * + * Revision 2.11 91/11/12 11:50:32 rvb + * Added OPTION_USER ("/u") to db_show_all_threads, db_show_one_thread, + * db_show_one_task. Without it, we display old-style information. + * [91/10/31 rpd] + * + * Revision 2.10 91/10/09 16:01:48 af + * Supported "show registers" for non current thread. + * Changed display format of thread and task information. + * Changed "show thread" to print current thread information + * if no thread is specified. + * Added "show_one_task" for "show task" command. + * Added IPC port print routines for "show ipc_port" command. + * [91/08/29 tak] + * + * Revision 2.9 91/08/03 18:17:19 jsb + * In db_print_thread, if the thread is swapped and there is a + * continuation function, print the function name in parentheses + * instead of '(swapped)'. + * [91/07/04 09:59:27 jsb] + * + * Revision 2.8 91/07/31 17:30:43 dbg + * Revise scheduling state machine. + * [91/07/30 16:43:42 dbg] + * + * Revision 2.7 91/07/09 23:15:57 danner + * Fixed a few printf that should be db_printfs. + * [91/07/08 danner] + * + * Revision 2.6 91/05/14 15:35:25 mrt + * Correcting copyright + * + * Revision 2.5 91/02/05 17:06:53 mrt + * Changed to new Mach copyright + * [91/01/31 16:18:56 mrt] + * + * Revision 2.4 90/10/25 14:43:54 rwd + * Changed db_show_regs to print unsigned. + * [90/10/19 rpd] + * Generalized the watchpoint support. + * [90/10/16 rwd] + * + * Revision 2.3 90/09/09 23:19:52 rpd + * Avoid totally incorrect guesses of symbol names for small values. + * [90/08/30 17:39:08 af] + * + * Revision 2.2 90/08/27 21:51:49 dbg + * Insist that 'show thread' be called with an explicit address. + * [90/08/22 dbg] + * + * Fix type for db_maxoff. + * [90/08/20 dbg] + * + * Do not dereference the "valuep" field of a variable directly, + * call the new db_read/write_variable functions instead. + * Reflected changes in symbol lookup functions. + * [90/08/20 af] + * Reduce lint. + * [90/08/10 14:33:44 dbg] + * + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +/* + * Miscellaneous printing. + */ +#include +#include + +#include /* For strlen() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for db_vm() */ + +#include +#include + +#include +#include +#include +#include +#include +#include /* For db_printf() */ +#include + +#include +#include /*** ??? fix so this can be removed ***/ + +#if TASK_SWAPPER +#include +#endif /* TASK_SWAPPER */ + +/* Prototypes for functions local to this file. XXX -- should be static! + */ + +char *db_act_stat( + register thread_act_t thr_act, + char *status); + +char *db_act_swap_stat( + register thread_act_t thr_act, + char *status); + +void db_print_task( + task_t task, + int task_id, + int flag); + +void db_reset_print_entry( + void); + +void db_print_one_entry( + ipc_entry_t entry, + int index, + mach_port_name_t name, + boolean_t is_pset); + +int db_port_iterate( + thread_act_t thr_act, + boolean_t is_pset, + boolean_t do_output); + +ipc_port_t db_lookup_port( + thread_act_t thr_act, + int id); + +static void db_print_port_id( + int id, + ipc_port_t port, + unsigned bits, + int n); + +void db_print_act( + thread_act_t thr_act, + int act_id, + int flag); + +void db_print_space( + task_t task, + int task_id, + int flag); + +void db_print_task_vm( + task_t task, + int task_id, + boolean_t title, + char *modif); + +void db_system_stats(void); + + +void +db_show_regs( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + register struct db_variable *regp; + db_expr_t value; + db_addr_t offset; + char * name; + register int i; + struct db_var_aux_param aux_param; + task_t task = TASK_NULL; + + aux_param.modif = modif; + aux_param.thr_act = THR_ACT_NULL; + if (db_option(modif, 't')) { + if (have_addr) { + if (!db_check_act_address_valid((thread_act_t)addr)) + return; + aux_param.thr_act = (thread_act_t)addr; + } else + aux_param.thr_act = db_default_act; + if (aux_param.thr_act != THR_ACT_NULL) + task = aux_param.thr_act->task; + } + for (regp = db_regs; regp < db_eregs; regp++) { + if (regp->max_level > 1) { + db_printf("bad multi-suffixed register %s\n", regp->name); + continue; + } + aux_param.level = regp->max_level; + for (i = regp->low; i <= regp->high; i++) { + aux_param.suffix[0] = i; + db_read_write_variable(regp, &value, DB_VAR_GET, &aux_param); + if (regp->max_level > 0) + db_printf("%s%d%*s", regp->name, i, + 12-strlen(regp->name)-((i<10)?1:2), ""); + else + db_printf("%-12s", regp->name); + db_printf("%#*N", 2+2*sizeof(vm_offset_t), value); + db_find_xtrn_task_sym_and_offset((db_addr_t)value, &name, + &offset, task); + if (name != 0 && offset <= db_maxoff && offset != value) { + db_printf("\t%s", name); + if (offset != 0) + db_printf("+%#r", offset); + } + db_printf("\n"); + } + } +} + +#define OPTION_LONG 0x001 /* long print option */ +#define OPTION_USER 0x002 /* print ps-like stuff */ +#define OPTION_INDENT 0x100 /* print with indent */ +#define OPTION_THREAD_TITLE 0x200 /* print thread title */ +#define OPTION_TASK_TITLE 0x400 /* print thread title */ + +#ifndef DB_TASK_NAME +#define DB_TASK_NAME(task) /* no task name */ +#define DB_TASK_NAME_TITLE "" /* no task name */ +#endif /* DB_TASK_NAME */ + +#ifndef db_act_fp_used +#define db_act_fp_used(thr_act) FALSE +#endif + +char * +db_act_stat( + register thread_act_t thr_act, + char *status) +{ + register char *p = status; + + if (!thr_act->active) { + *p++ = 'D', + *p++ = 'y', + *p++ = 'i', + *p++ = 'n', + *p++ = 'g'; + *p++ = ' '; + } else if (!thr_act->thread) { + *p++ = 'E', + *p++ = 'm', + *p++ = 'p', + *p++ = 't', + *p++ = 'y'; + *p++ = ' '; + } else { + thread_t athread = thr_act->thread; + + *p++ = (athread->state & TH_RUN) ? 'R' : '.'; + *p++ = (athread->state & TH_WAIT) ? 'W' : '.'; + *p++ = (athread->state & TH_SUSP) ? 'S' : '.'; + *p++ = (athread->state & TH_SWAPPED_OUT) ? 'O' : '.'; + *p++ = (athread->state & TH_UNINT) ? 'N' : '.'; + /* show if the FPU has been used */ + *p++ = db_act_fp_used(thr_act) ? 'F' : '.'; + } + *p++ = 0; + return(status); +} + +char * +db_act_swap_stat( + register thread_act_t thr_act, + char *status) +{ + register char *p = status; + +#if THREAD_SWAPPER + switch (thr_act->swap_state & TH_SW_STATE) { + case TH_SW_UNSWAPPABLE: + *p++ = 'U'; + break; + case TH_SW_IN: + *p++ = 'I'; + break; + case TH_SW_GOING_OUT: + *p++ = 'G'; + break; + case TH_SW_WANT_IN: + *p++ = 'W'; + break; + case TH_SW_OUT: + *p++ = 'O'; + break; + case TH_SW_COMING_IN: + *p++ = 'C'; + break; + default: + *p++ = '?'; + break; + } + *p++ = (thr_act->swap_state & TH_SW_TASK_SWAPPING) ? 'T' : '.'; +#endif /* THREAD_SWAPPER */ + *p++ = 0; + + return status; +} + +char *policy_list[] = { "TS", "RR", "??", "FF", + "??", "??", "??", "BE"}; + +void +db_print_act( + thread_act_t thr_act, + int act_id, + int flag) +{ + thread_t athread; + char status[8]; + char swap_status[3]; + char *indent = ""; + int policy; + + if (!thr_act) { + db_printf("db_print_act(NULL)!\n"); + return; + } + + athread = thr_act->thread; + if (flag & OPTION_USER) { + + if (flag & OPTION_LONG) { + if (flag & OPTION_INDENT) + indent = " "; + if (flag & OPTION_THREAD_TITLE) { + db_printf("%s ID: ACT STAT SW STACK SHUTTLE", indent); + db_printf(" SUS PRI WAIT_FUNC\n"); + } + policy = (athread ? athread->policy : 2); + db_printf("%s%3d%c %0*X %s %s %0*X %0*X %3d %3d/%s ", + indent, act_id, + (thr_act == current_act())? '#': ':', + 2*sizeof(vm_offset_t), thr_act, + db_act_stat(thr_act, status), + db_act_swap_stat(thr_act, swap_status), + 2*sizeof(vm_offset_t), (athread ?athread->kernel_stack:0), + 2*sizeof(vm_offset_t), athread, + thr_act->suspend_count, + (athread ? athread->sched_pri : 999), /* XXX */ + policy_list[policy-1]); + if (athread) { + /* no longer TH_SWAP, no continuation to print */ + if (athread->state & TH_WAIT) + db_task_printsym((db_addr_t)athread->wait_event, + DB_STGY_ANY, kernel_task); + } + db_printf("\n"); + } else { + if (act_id % 3 == 0) { + if (flag & OPTION_INDENT) + db_printf("\n "); + } else + db_printf(" "); + db_printf("%3d%c(%0*X,%s)", act_id, + (thr_act == current_act())? '#': ':', + 2*sizeof(vm_offset_t), thr_act, + db_act_stat(thr_act, status)); + } + } else { + if (flag & OPTION_INDENT) + db_printf(" %3d (%0*X) ", act_id, + 2*sizeof(vm_offset_t), thr_act); + else + db_printf("(%0*X) ", 2*sizeof(vm_offset_t), thr_act); + if (athread) { + db_printf("%c%c%c%c%c", + (athread->state & TH_RUN) ? 'R' : ' ', + (athread->state & TH_WAIT) ? 'W' : ' ', + (athread->state & TH_SUSP) ? 'S' : ' ', + (athread->state & TH_UNINT)? 'N' : ' ', + db_act_fp_used(thr_act) ? 'F' : ' '); + /* Obsolete TH_STACK_HANDOFF code, left for now; might enhance + * to print out safe_points instead */ + if (athread->state & TH_STACK_HANDOFF) { + if (athread->continuation) { + db_printf("("); + db_task_printsym((db_addr_t)athread->continuation, + DB_STGY_ANY, kernel_task); + db_printf(")"); + } else { + db_printf("(handoff)"); + } + } + if (athread->state & TH_WAIT) { + db_printf(" "); + db_task_printsym((db_addr_t)athread->wait_event, + DB_STGY_ANY, kernel_task); + } + } else + db_printf("Empty"); + db_printf("\n"); + } +} + +void +db_print_task( + task_t task, + int task_id, + int flag) +{ + thread_act_t thr_act; + int act_id; + char sstate; + + if (flag & OPTION_USER) { + if (flag & OPTION_TASK_TITLE) { + db_printf(" ID: TASK MAP THD RES SUS PR SW %s", + DB_TASK_NAME_TITLE); + if ((flag & OPTION_LONG) == 0) + db_printf(" ACTS"); + db_printf("\n"); + } +#if TASK_SWAPPER + switch ((int) task->swap_state) { + case TASK_SW_IN: + sstate = 'I'; + break; + case TASK_SW_OUT: + sstate = 'O'; + break; + case TASK_SW_GOING_OUT: + sstate = 'G'; + break; + case TASK_SW_COMING_IN: + sstate = 'C'; + break; + case TASK_SW_UNSWAPPABLE: + sstate = 'U'; + break; + default: + sstate = '?'; + break; + } +#else /* TASK_SWAPPER */ + sstate = 'I'; +#endif /* TASK_SWAPPER */ + /*** ??? fix me ***/ + db_printf("%3d: %0*X %0*X %3d %3d %3d %2d %c ", + task_id, 2*sizeof(vm_offset_t), task, + 2*sizeof(vm_offset_t), task->map, + task->thr_act_count, task->res_act_count, + task->suspend_count, + ((mk_sp_attributes_t)(task->sp_attributes))->priority, + sstate); + DB_TASK_NAME(task); + if (flag & OPTION_LONG) { + if (flag & OPTION_TASK_TITLE) + flag |= OPTION_THREAD_TITLE; + db_printf("\n"); + } else if (task->thr_act_count <= 1) + flag &= ~OPTION_INDENT; + act_id = 0; + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + db_print_act(thr_act, act_id, flag); + flag &= ~OPTION_THREAD_TITLE; + act_id++; + } + if ((flag & OPTION_LONG) == 0) + db_printf("\n"); + } else { + if (flag & OPTION_LONG) { + if (flag & OPTION_TASK_TITLE) { + db_printf(" TASK ACT\n"); + if (task->thr_act_count > 1) + flag |= OPTION_THREAD_TITLE; + } + } + db_printf("%3d (%0*X): ", task_id, 2*sizeof(vm_offset_t), task); + if (task->thr_act_count == 0) { + db_printf("no threads\n"); + } else { + if (task->thr_act_count > 1) { + db_printf("%d threads: \n", task->thr_act_count); + flag |= OPTION_INDENT; + } else + flag &= ~OPTION_INDENT; + act_id = 0; + queue_iterate(&task->thr_acts, thr_act, + thread_act_t, thr_acts) { + db_print_act(thr_act, act_id++, flag); + flag &= ~OPTION_THREAD_TITLE; + } + } + } +} + +void +db_print_space( + task_t task, + int task_id, + int flag) +{ + ipc_space_t space; + thread_act_t act = (thread_act_t)queue_first(&task->thr_acts); + int count; + + count = 0; + space = task->itk_space; + if (act) + count = db_port_iterate(act, FALSE, FALSE); + db_printf("%3d: %08x %08x %08x %sactive %d\n", + task_id, task, space, task->map, + space->is_active? "":"!", count); +} + +void +db_print_task_vm( + task_t task, + int task_id, + boolean_t title, + char *modif) +{ + vm_map_t map; + pmap_t pmap; + vm_size_t size; + long resident; + long wired; + + if (title) { + db_printf("id task map pmap virtual rss pg rss mem wir pg wir mem\n"); + } + + map = task->map; + pmap = vm_map_pmap(map); + + size = db_vm_map_total_size(map); + resident = pmap->stats.resident_count; + wired = pmap->stats.wired_count; + + db_printf("%2d %08x %08x %08x %7dK %6d %6dK %6d %6dK\n", + task_id, + task, + map, + pmap, + size / 1024, + resident, (resident * PAGE_SIZE) / 1024, + wired, (wired * PAGE_SIZE) / 1024); +} + + +void +db_show_one_task_vm( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + thread_act_t thread; + task_t task; + int task_id; + + if (have_addr == FALSE) { + if ((thread = db_default_act) == THR_ACT_NULL) { + if ((thread = current_act()) == THR_ACT_NULL) { + db_printf("no thread.\n"); + return; + } + } + task = thread->task; + } else { + task = (task_t) addr; + } + + task_id = db_lookup_task(task); + if (task_id < 0) { + db_printf("0x%x is not a task_t\n", addr); + return; + } + + db_print_task_vm(task, task_id, TRUE, modif); +} + +void +db_show_all_task_vm( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + task_t task; + int task_id; + boolean_t title = TRUE; + processor_set_t pset; + + task_id = 0; + queue_iterate(&all_psets, pset, processor_set_t, all_psets) { + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + db_print_task_vm(task, task_id, title, modif); + title = FALSE; + task_id++; + } + } +} + +void +db_show_all_acts( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + task_t task; + int task_id; + int flag; + processor_set_t pset; + + flag = OPTION_TASK_TITLE|OPTION_INDENT; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + task_id = 0; + queue_iterate(&all_psets, pset, processor_set_t, all_psets) { + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + db_print_task(task, task_id, flag); + flag &= ~OPTION_TASK_TITLE; + task_id++; + if ((flag & (OPTION_LONG|OPTION_INDENT)) == OPTION_INDENT) + db_printf("\n"); + } + } +} + +void +db_show_one_space( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + int flag; + int task_id; + task_t task; + + flag = OPTION_TASK_TITLE; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + if (!have_addr) { + task = db_current_task(); + if (task == TASK_NULL) { + db_error("No task\n"); + /*NOTREACHED*/ + } + } else + task = (task_t) addr; + + if ((task_id = db_lookup_task(task)) < 0) { + db_printf("bad task address 0x%x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + + db_printf(" ID: TASK SPACE MAP COUNT\n"); + db_print_space(task, task_id, flag); +} + +void +db_show_all_spaces( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + task_t task; + int task_id = 0; + int flag; + processor_set_t pset; + + flag = OPTION_TASK_TITLE|OPTION_INDENT; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + db_printf(" ID: TASK SPACE MAP COUNT\n"); + queue_iterate(&all_psets, pset, processor_set_t, all_psets) { + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + db_print_space(task, task_id, flag); + task_id++; + } + } +} + +db_addr_t +db_task_from_space( + ipc_space_t space, + int *task_id) +{ + task_t task; + int tid = 0; + processor_set_t pset; + + queue_iterate(&all_psets, pset, processor_set_t, all_psets) { + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + if (task->itk_space == space) { + *task_id = tid; + return (db_addr_t)task; + } + tid++; + } + } + *task_id = 0; + return (0); +} + +void +db_show_one_act( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + int flag; + int act_id; + thread_act_t thr_act; + + flag = OPTION_THREAD_TITLE; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + if (!have_addr) { + thr_act = current_act(); + if (thr_act == THR_ACT_NULL) { + db_error("No thr_act\n"); + /*NOTREACHED*/ + } + } else + thr_act = (thread_act_t) addr; + + if ((act_id = db_lookup_act(thr_act)) < 0) { + db_printf("bad thr_act address %#x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + + if (flag & OPTION_USER) { + db_printf("TASK%d(%0*X):\n", + db_lookup_task(thr_act->task), + 2*sizeof(vm_offset_t), thr_act->task); + db_print_act(thr_act, act_id, flag); + } else { + db_printf("task %d(%0*Xx): thr_act %d", + db_lookup_task(thr_act->task), + 2*sizeof(vm_offset_t), thr_act->task, act_id); + db_print_act(thr_act, act_id, flag); + } + if (db_option(modif, 'i') && thr_act->thread && + (thr_act->thread->state & TH_WAIT) && + thr_act->thread->kernel_stack == 0) { + + db_printf("Wait State: option 0x%x\n", + thr_act->thread->ith_option); + } +} + +void +db_show_one_task( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + int flag; + int task_id; + task_t task; + + flag = OPTION_TASK_TITLE|OPTION_INDENT; + if (db_option(modif, 'u')) + flag |= OPTION_USER; + if (db_option(modif, 'l')) + flag |= OPTION_LONG; + + if (!have_addr) { + task = db_current_task(); + if (task == TASK_NULL) { + db_error("No task\n"); + /*NOTREACHED*/ + } + } else + task = (task_t) addr; + + if ((task_id = db_lookup_task(task)) < 0) { + db_printf("bad task address 0x%x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + + db_print_task(task, task_id, flag); +} + +void +db_show_shuttle( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + thread_shuttle_t shuttle; + thread_act_t thr_act; + + if (have_addr) + shuttle = (thread_shuttle_t) addr; + else { + thr_act = current_act(); + if (thr_act == THR_ACT_NULL) { + db_error("No thr_act\n"); + /*NOTREACHED*/ + } + shuttle = thr_act->thread; + if (shuttle == THREAD_NULL) { + db_error("No shuttle associated with current thr_act\n"); + /*NOTREACHED*/ + } + } + db_printf("shuttle %x:\n", shuttle); + if (shuttle->top_act == THR_ACT_NULL) + db_printf(" no activations\n"); + else { + db_printf(" activations:"); + for (thr_act = shuttle->top_act; thr_act != THR_ACT_NULL; + thr_act = thr_act->lower) { + if (thr_act != shuttle->top_act) + printf(" from"); + printf(" $task%d.%d(%x)", db_lookup_task(thr_act->task), + db_lookup_act(thr_act), thr_act); + } + db_printf("\n"); + } +} + +#define db_pset_kmsg_count(port) \ + (ipc_list_count((port)->ip_pset->ips_messages.imq_messages.ikmq_base)) + +int +db_port_kmsg_count( + ipc_port_t port) +{ + return (port->ip_pset ? db_pset_kmsg_count(port) : port->ip_msgcount); +} + +static int db_print_ent_cnt = 0; + +void db_reset_print_entry( + void) +{ + db_print_ent_cnt = 0; +} + +void +db_print_one_entry( + ipc_entry_t entry, + int index, + mach_port_t name, + boolean_t is_pset) +{ + ipc_port_t aport = (ipc_port_t)entry->ie_object; + unsigned bits = entry->ie_bits; + + if (is_pset && !aport->ip_pset) + return; + if (db_print_ent_cnt && db_print_ent_cnt % 2 == 0) + db_printf("\n"); + if (!name) + db_printf("\t%s%d[%x]", + !is_pset && aport->ip_pset ? "pset" : "port", + index, + MACH_PORT_MAKE(index, IE_BITS_GEN(bits))); + else + db_printf("\t%s[%x]", + !is_pset && aport->ip_pset ? "pset" : "port", + name); + if (!is_pset) { + db_printf("(%s,%x,%d)", + (bits & MACH_PORT_TYPE_RECEIVE)? "r": + (bits & MACH_PORT_TYPE_SEND)? "s": "S", + aport, + db_port_kmsg_count(aport)); + db_print_ent_cnt++; + } + else { + db_printf("(%s,%x,set=%x,%d)", + (bits & MACH_PORT_TYPE_RECEIVE)? "r": + (bits & MACH_PORT_TYPE_SEND)? "s": "S", + aport, + aport->ip_pset, + db_pset_kmsg_count(aport)); + db_print_ent_cnt++; + } +} + +int +db_port_iterate( + thread_act_t thr_act, + boolean_t is_pset, + boolean_t do_output) +{ + ipc_entry_t entry; + ipc_tree_entry_t tentry; + int index; + int size; + int count; + ipc_space_t space; + + count = 0; + space = thr_act->task->itk_space; + entry = space->is_table; + size = space->is_table_size; + db_reset_print_entry(); + for (index = 0; index < size; ++index, ++entry) { + if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) { + if (do_output) + db_print_one_entry(entry, + index, (mach_port_t)0, is_pset); + ++count; + } + } + for (tentry = ipc_splay_traverse_start(&space->is_tree); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) { + entry = &tentry->ite_entry; + if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) { + if (do_output) + db_print_one_entry(entry, + 0, tentry->ite_name, is_pset); + ++count; + } + } + return (count); +} + +ipc_port_t +db_lookup_port( + thread_act_t thr_act, + int id) +{ + register ipc_space_t space; + register ipc_entry_t entry; + + if (thr_act == THR_ACT_NULL) + return(0); + space = thr_act->task->itk_space; + if (id < 0 || id >= space->is_table_size) + return(0); + entry = &space->is_table[id]; + if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) + return((ipc_port_t)entry->ie_object); + return(0); +} + +static void +db_print_port_id( + int id, + ipc_port_t port, + unsigned bits, + int n) +{ + if (n != 0 && n % 3 == 0) + db_printf("\n"); + db_printf("\tport%d(%s,%x)", id, + (bits & MACH_PORT_TYPE_RECEIVE)? "r": + (bits & MACH_PORT_TYPE_SEND)? "s": "S", port); +} + +void +db_show_port_id( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + thread_act_t thr_act; + + if (!have_addr) { + thr_act = current_act(); + if (thr_act == THR_ACT_NULL) { + db_error("No thr_act\n"); + /*NOTREACHED*/ + } + } else + thr_act = (thread_act_t) addr; + if (db_lookup_act(thr_act) < 0) { + db_printf("Bad thr_act address 0x%x\n", addr); + db_error(0); + /*NOTREACHED*/ + } + if (db_port_iterate(thr_act, db_option(modif,'s'), TRUE)) + db_printf("\n"); +} + +/* + * Useful system state when the world has hung. + */ +void +db_system_stats() +{ + extern void db_device(void); + extern void db_sched(void); +#if DIPC + extern void db_dipc_stats(void); + extern void db_show_kkt(void); +#endif /* DIPC */ + + db_sched(); + iprintf("\n"); + db_vm(); + iprintf("\n"); + db_device(); +#if DIPC + iprintf("\n"); + db_dipc_stats(); + iprintf("\n"); + db_show_kkt(); +#endif /* DIPC */ + iprintf("\n"); + db_printf("current_{thread/task} 0x%x 0x%x\n", + current_thread(),current_task()); +} + +void db_show_one_runq(run_queue_t runq); + +void +db_show_runq( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + processor_set_t pset; + processor_t proc; + run_queue_t runq; + boolean_t showedany = FALSE; + + queue_iterate(&all_psets, pset, processor_set_t, all_psets) { +#if NCPUS > 1 /* This code has not been tested. */ + queue_iterate(&pset->processors, proc, processor_t, processors) { + runq = &proc->runq; + if (runq->count > 0) { + db_printf("PROCESSOR %x IN SET %x\n", proc, pset); + db_show_one_runq(runq); + showedany = TRUE; + } + } +#endif /* NCPUS > 1 */ +#ifndef NCPUS +#error NCPUS undefined +#endif + runq = &pset->runq; + if (runq->count > 0) { + db_printf("PROCESSOR SET %x\n", pset); + db_show_one_runq(runq); + showedany = TRUE; + } + } + if (!showedany) + db_printf("No runnable threads\n"); +} + +void +db_show_one_runq( + run_queue_t runq) +{ + int i, task_id, thr_act_id; + queue_t q; + thread_act_t thr_act; + thread_t thread; + task_t task; + + printf("PRI TASK.ACTIVATION\n"); + for (i = runq->low, q = runq->runq + i; i < NRQS; i++, q++) { + if (!queue_empty(q)) { + db_printf("%3d:", i); + queue_iterate(q, thread, thread_t, links) { + thr_act = thread->top_act; + task = thr_act->task; + task_id = db_lookup_task(task); + thr_act_id = db_lookup_task_act(task, thr_act); + db_printf(" %d.%d", task_id, thr_act_id); + } + db_printf("\n"); + } + } +} diff --git a/osfmk/ddb/stab.h b/osfmk/ddb/stab.h new file mode 100644 index 000000000..328bd6d59 --- /dev/null +++ b/osfmk/ddb/stab.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.11.2 1995/01/06 19:11:14 devrcs + * mk6 CR668 - 1.3b26 merge + * added N_FRAME, an extension to aout symtabs + * for machines with non-self-describing frame formats + * [1994/10/14 03:40:05 dwm] + * + * Revision 1.1.11.1 1994/09/23 01:23:47 ezf + * change marker to not FREE + * [1994/09/22 21:11:53 ezf] + * + * Revision 1.1.4.3 1993/07/27 18:28:44 elliston + * Add ANSI prototypes. CR #9523. + * [1993/07/27 18:13:49 elliston] + * + * Revision 1.1.4.2 1993/06/02 23:13:40 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:58:12 jeffc] + * + * Revision 1.1 1992/09/30 02:24:31 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2 91/10/09 16:05:28 af + * Revision 2.1 91/10/05 13:02:42 jeffreyh + * Created. + * + * Revision 2.1.1.1 91/10/05 13:03:14 jeffreyh + * Initial MK63 checkin + * + * Revision 2.1.1.1 91/07/31 13:14:49 jeffreyh + * Created from BSD network release #2 + * [91/07/31 jeffreyh] + * + * + */ +/* CMU_ENDHIST */ +/*- + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stab.h 5.2 (Berkeley) 4/4/91 + */ +/* + */ + +#ifndef _DDB_DB_STAB_H_ +#define _DDB_DB_STAB_H_ + +/* + * The following are symbols used by various debuggers and by the Pascal + * compiler. Each of them must have one (or more) of the bits defined by + * the N_STAB mask set. + */ + +#define N_GSYM 0x20 /* global symbol */ +#define N_FNAME 0x22 /* F77 function name */ +#define N_FUN 0x24 /* procedure name */ +#define N_STSYM 0x26 /* data segment variable */ +#define N_LCSYM 0x28 /* bss segment variable */ +#define N_MAIN 0x2a /* main function name */ +#define N_PC 0x30 /* global Pascal symbol */ +#define N_FRAME 0x34 /* stack frame descriptor */ +#define N_RSYM 0x40 /* register variable */ +#define N_SLINE 0x44 /* text segment line number */ +#define N_DSLINE 0x46 /* data segment line number */ +#define N_BSLINE 0x48 /* bss segment line number */ +#define N_SSYM 0x60 /* structure/union element */ +#define N_SO 0x64 /* main source file name */ +#define N_LSYM 0x80 /* stack variable */ +#define N_BINCL 0x82 /* include file beginning */ +#define N_SOL 0x84 /* included source file name */ +#define N_PSYM 0xa0 /* parameter variable */ +#define N_EINCL 0xa2 /* include file end */ +#define N_ENTRY 0xa4 /* alternate entry point */ +#define N_LBRAC 0xc0 /* left bracket */ +#define N_EXCL 0xc2 /* deleted include file */ +#define N_RBRAC 0xe0 /* right bracket */ +#define N_BCOMM 0xe2 /* begin common */ +#define N_ECOMM 0xe4 /* end common */ +#define N_ECOML 0xe8 /* end common (local name) */ +#define N_LENG 0xfe /* length of preceding entry */ + +#endif /* !_DDB_DB_STAB_H_ */ diff --git a/osfmk/ddb/tr.c b/osfmk/ddb/tr.c new file mode 100644 index 000000000..7edac65d5 --- /dev/null +++ b/osfmk/ddb/tr.c @@ -0,0 +1,393 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: ddb/tr.c + * Authors: Alan Langerman, Jeffrey Heller + * Date: 1992 + * + * Internal trace routines. Like old-style XPRs but + * less formatting. + */ + +#include + +#if TRACE_BUFFER +#include +#include +#include +#include +#include + +extern void fc_get(int *); + +/* + * Primitive event tracing facility for kernel debugging. Yes, + * this has some resemblance to XPRs. However, it is primarily + * intended for post-mortem analysis through ddb. + */ + +#define TRACE_MAX (4 * 1024) +#define TRACE_WINDOW 40 + +typedef struct trace_event { + char *funcname; + char *file; + char *fmt; +#if NCPUS > 1 + char cpu_number; +#endif /* NCPUS > 1 */ + unsigned int lineno; + unsigned int tag1; + unsigned int tag2; + unsigned int tag3; + unsigned int tag4; + int indent; + int timestamp[2]; /* largest needed by any clock */ +} trace_event; + +trace_event trace_buffer[TRACE_MAX]; +unsigned long trace_index; +#if NCPUS == 1 +int tr_indent = 0; +#else /* NCPUS == 1 */ +int tr_indent[NCPUS]; +int tr_limit = -1; +#endif /* NCPUS == 1 */ + +decl_simple_lock_data(,trace_lock) + +void +tr_init(void) +{ +#if NCPUS > 1 + int i; + + for(i=0;i 1 */ + + simple_lock_init(&trace_lock, ETAP_DIPC_TRACE); +} + +void +tr( + char *funcname, + char *file, + unsigned int lineno, + char *fmt, + unsigned int tag1, + unsigned int tag2, + unsigned int tag3, + unsigned int tag4) +{ + int s; + register unsigned long ti, tn; +#if NCPUS > 1 + char cpu; +#endif /* NCPUS > 1 */ + +#if PARAGON860 + /* + * The following loop replaces the spl_and_lock sequence that + * would normally be here, as they are too heavy weight. The + * cmpsw (compare-and-swap) call returns -1 if unsuccessful. + */ + do { + ti = trace_index; + tn = ti + 1; + if (tn >= TRACE_MAX - 1) + tn = 0; + } while (cmpsw(ti, tn, &trace_index) == -1); + fc_get(trace_buffer[ti].timestamp); +#else /* PARAGON860 */ + /* + * Until someone does a cmpsw for other platforms, do it + * the slow way + */ + s = splimp(); + simple_lock(&trace_lock); + + ti = trace_index++; + if (trace_index >= TRACE_MAX - 1) + trace_index = 0; + + simple_unlock(&trace_lock); + splx(s); + + fc_get(trace_buffer[ti].timestamp); +/* get_uniq_timestamp(trace_buffer[ti].timestamp);*/ +#endif /* PARAGON860 */ + + trace_buffer[ti].funcname = funcname; + trace_buffer[ti].file = file; + trace_buffer[ti].lineno = lineno; + trace_buffer[ti].fmt = fmt; + trace_buffer[ti].tag1 = tag1; + trace_buffer[ti].tag2 = tag2; + trace_buffer[ti].tag3 = tag3; + trace_buffer[ti].tag4 = tag4; +#if NCPUS == 1 + trace_buffer[ti].indent = tr_indent; +#else /* NCPUS == 1 */ + mp_disable_preemption(); + cpu = cpu_number(); + trace_buffer[ti].indent = tr_indent[cpu]; + trace_buffer[ti].cpu_number = cpu; + mp_enable_preemption(); +#endif /* NCPUS == 1 */ +} + +#if MACH_KDB +#include + +/* + * Forward. + */ +void show_tr( + unsigned long index, + unsigned long range, + unsigned long show_extra); + +int matches( + char *pattern, + char *target); + +void parse_tr( + unsigned long index, + unsigned long range); + +/* + * The blank array must be a bit bigger than + * MAX_BLANKS to leave room for a terminating NULL. + */ +#define MAX_BLANKS 16 +char blanks[MAX_BLANKS+4]; + +void +show_tr( + unsigned long index, + unsigned long range, + unsigned long show_extra) +{ + char *filename, *cp; +#if PARAGON860 + trace_event *last_trace; +#endif /* PARAGON860 */ + unsigned int level; + int old_history; + int i; + + if (index == -1) { + index = trace_index - (TRACE_WINDOW-4); + range = TRACE_WINDOW; + } else if (index == 0) { + index = trace_index - (TRACE_WINDOW-4); + range = TRACE_WINDOW; + show_extra = 0; + } + if (index + range > TRACE_MAX) + range = TRACE_MAX - index; +#if PARAGON860 + last_trace = &trace_buffer[index-1]; +#endif /* PARAGON860 */ + level = trace_buffer[index-1].indent; + /* + * Set up the indentation buffer + */ + memset(blanks, ' ', trace_buffer[index].indent); + blanks[trace_buffer[index].indent] = '\0'; + for (i = index; i < index + range; ++i) { +#if NCPUS > 1 + if ((tr_limit != -1) && + (trace_buffer[i].cpu_number != tr_limit)) + continue; +#endif /* NCPUS > 1 */ + if (trace_buffer[i].file == (char *) 0 || + trace_buffer[i].funcname == (char *) 0 || + trace_buffer[i].lineno == 0 || + trace_buffer[i].fmt == 0) { + db_printf("[%04x%s]\n", i, + i >= trace_index ? "*" : ""); + continue; + } + + old_history = (i >= trace_index); + + /* + * Adjust the blank count if necessary + */ + if (level != trace_buffer[i].indent) { + level = trace_buffer[i].indent; + if (level >= MAX_BLANKS) + level = MAX_BLANKS; + memset(blanks, ' ', level); + blanks[level] = '\0'; + } + + for (cp = trace_buffer[i].file; *cp; ++cp) + if (*cp == '/') + filename = cp + 1; +#if NCPUS > 1 + db_printf("{%02d}",trace_buffer[i].cpu_number); +#endif /* NCPUS > 1 */ + db_printf("[%04x%s] %s%-16s", i, old_history ? "*" : "", + blanks, trace_buffer[i].funcname); + + if (show_extra) { + if (show_extra > 0) { + db_printf(" (%x/%8x)", + trace_buffer[i].timestamp[0], + trace_buffer[i].timestamp[1]); +#if PARAGON860 + /* + * For Paragon only, we compute and + * print out deltas on the timestamps + * accumulated in the tr buffer. One + * interesting case: it is meaningless + * to compute this delta for the last + * current entry in the log. + */ + if (old_history && + ((last_trace - trace_buffer) + < trace_index)) + db_printf("(N/A)"); + else + db_printf("(%d)", + timer_subtime( + trace_buffer[i].timestamp, + last_trace->timestamp)); +#endif /*PARAGON860*/ + db_printf(" "); + } + if (show_extra > 1) { + db_printf("(%s:%05d):\n\t", + filename, trace_buffer[i].lineno); + } + } else + db_printf(": "); + db_printf(trace_buffer[i].fmt, trace_buffer[i].tag1, + trace_buffer[i].tag2, trace_buffer[i].tag3, + trace_buffer[i].tag4); + db_printf("\n"); +#if PARAGON860 + last_trace = &trace_buffer[i]; +#endif /* PARAGON860 */ + } +} + + +int +matches( + char *pattern, + char *target) +{ + char *cp, *cp1, *cp2; + + for (cp = target; *cp; ++cp) { + for (cp2 = pattern, cp1 = cp; *cp2 && *cp1; ++cp2, ++cp1) + if (*cp2 != *cp1) + break; + if (!*cp2) + return 1; + } + return 0; +} + + +char parse_tr_buffer[100] = "KMSG"; + +void +parse_tr( + unsigned long index, + unsigned long range) +{ + int i; + char *filename, *cp; + char *string = parse_tr_buffer; + + if (index == 0) { + index = trace_index - (TRACE_WINDOW-4); + range = TRACE_WINDOW; + } + if (index + range > TRACE_MAX) + range = TRACE_MAX - index; + for (i = index; i < index + range; ++i) { +#if NCPUS > 1 + if ((tr_limit != -1) && + (trace_buffer[i].cpu_number != tr_limit)) + continue; +#endif /* NCPUS > 1 */ + if (trace_buffer[i].file == (char *) 0 || + trace_buffer[i].funcname == (char *) 0 || + trace_buffer[i].lineno == 0 || + trace_buffer[i].fmt == 0) { + db_printf("[%04x%s]\n", i, + i >= trace_index ? "*" : ""); + continue; + } + if (!matches(string, trace_buffer[i].fmt)) + continue; + for (cp = trace_buffer[i].file; *cp; ++cp) + if (*cp == '/') + filename = cp + 1; +#if NCPUS > 1 + db_printf("{%02d}",trace_buffer[i].cpu_number); +#endif /* NCPUS > 1 */ + db_printf("[%04x%s] %s", i, i >= trace_index ? "*" : "", + trace_buffer[i].funcname); + db_printf(": "); + db_printf(trace_buffer[i].fmt, trace_buffer[i].tag1, + trace_buffer[i].tag2, trace_buffer[i].tag3, + trace_buffer[i].tag4); + db_printf("\n"); + } +} + + +void +db_show_tr( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + int flag, level; + + flag = 0, level = 0; + if (db_option(modif, 'l')) { + flag = 1; + level = -1; + } + if (db_option(modif, 'a')) { + flag = 2; + level = -1; + } + + TR_SHOW(level, 0, flag); +} + +#endif /* MACH_KDB */ + +#endif /* TRACE_BUFFER */ diff --git a/osfmk/ddb/tr.h b/osfmk/ddb/tr.h new file mode 100644 index 000000000..4288e3de5 --- /dev/null +++ b/osfmk/ddb/tr.h @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:09 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.11.1 1997/03/27 18:47:01 barbou + * Merge smp_shared merges into mainline. + * [1996/09/19 13:55:17 addis] + * Make tr_indent NCPU safe. + * [95/10/09 rwd] + * Added TR_INIT() macro. + * Change from NMK16.1 [93/09/22 paire] + * [94/02/04 paire] + * [97/02/25 barbou] + * + * Revision 1.1.6.1 1995/02/23 16:34:23 alanl + * Taken from DIPC2_SHARED. Change to !FREE Copyright. + * [95/01/05 rwd] + * + * Revision 1.1.4.4 1994/08/18 01:07:26 alanl + * + Allow tracing strictly based on MACH_TR; + * don't also require MACH_ASSERT (alanl). + * + ANSI-fication: cast tr arguments (alanl). + * + Added tr_indent and macros to use it (sjs). + * [1994/08/18 01:06:09 alanl] + * + * Revision 1.1.4.3 1994/08/08 17:59:35 rwd + * Include mach_tr.h + * [94/08/08 rwd] + * + * Revision 1.1.4.2 1994/08/05 19:36:08 mmp + * Added prototype for db_show_tr. + * + * Conditionalize on MACH_TR + * [94/07/20 rwd] + * + * Revision 1.1.4.1 1994/08/04 01:43:04 mmp + * DIPC: moved from norma/ to ddb/. Updated includes. + * [1994/08/03 13:37:46 mmp] + * + * Revision 1.1.9.1 1994/03/07 16:55:24 paire + * Added ANSI prototypes. + * [94/02/15 paire] + * + * Added TR_INIT() macro. + * Change from NMK16.1 [93/09/22 paire] + * [94/02/04 paire] + * + * Revision 1.1.2.2 1993/06/02 23:57:10 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:22:08 jeffc] + * + * Revision 1.1 1992/09/30 02:34:09 robert + * Initial revision + * + * $EndLog$ + */ + +/* + * File: ddb/tr.h + * Author: Alan Langerman, Jeffrey Heller + * Date: 1992 + * + * Internal trace routines. Like old-style XPRs but + * less formatting. + */ + +#include +#include + +#include + +/* + * Originally, we only wanted tracing when + * MACH_TR and MACH_ASSERT were turned on + * together. Now, there's no reason why + * MACH_TR and MACH_ASSERT can't be completely + * orthogonal. + */ +#define TRACE_BUFFER (MACH_TR) + +/* + * Log events in a circular trace buffer for future debugging. + * Events are unsigned integers. Each event has a descriptive + * message. + * + * TR_DECL must be used at the beginning of a routine using + * one of the tr calls. The macro should be passed the name + * of the function surrounded by quotation marks, e.g., + * TR_DECL("netipc_recv_intr"); + * and should be terminated with a semi-colon. The TR_DECL + * must be the *last* declaration in the variable declaration + * list, or syntax errors will be introduced when TRACE_BUFFER + * is turned off. + */ +#ifndef _DDB_TR_H_ +#define _DDB_TR_H_ + +#if TRACE_BUFFER + +#include + +#define __ui__ (unsigned int) +#define TR_INIT() tr_init() +#define TR_SHOW(a,b,c) show_tr((a),(b),(c)) +#define TR_DECL(funcname) char *__ntr_func_name__ = funcname +#define tr1(msg) \ + tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \ + 0,0,0,0) +#define tr2(msg,tag1) \ + tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \ + __ui__(tag1),0,0,0) +#define tr3(msg,tag1,tag2) \ + tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \ + __ui__(tag1),__ui__(tag2),0,0) +#define tr4(msg,tag1,tag2,tag3) \ + tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \ + __ui__(tag1),__ui__(tag2),__ui__(tag3),0) +#define tr5(msg,tag1,tag2,tag3,tag4) \ + tr(__ntr_func_name__, __FILE__, __LINE__, (msg), \ + __ui__(tag1),__ui__(tag2),__ui__(tag3),__ui__(tag4)) + +/* + * Adjust tr log indentation based on function + * call graph. + */ +#if NCPUS == 1 +extern int tr_indent; +#define tr_start() tr_indent++ +#define tr_stop() tr_indent-- +#else /* NCPUS == 1 */ +extern int tr_indent[NCPUS]; +#define tr_start() tr_indent[cpu_number()]++ +#define tr_stop() (--tr_indent[cpu_number()]<0?tr_indent[cpu_number()]=0:0); +#endif /* NCPUS == 1 */ + +extern void tr_init(void); +extern void tr( + char *funcname, + char *file, + unsigned int lineno, + char *fmt, + unsigned int tag1, + unsigned int tag2, + unsigned int tag3, + unsigned int tag4); + +extern void db_show_tr( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif); + +#else /* TRACE_BUFFER */ + +#define TR_INIT() +#define TR_SHOW(a,b,c) +#define TR_DECL(funcname) +#define tr1(msg) +#define tr2(msg, tag1) +#define tr3(msg, tag1, tag2) +#define tr4(msg, tag1, tag2, tag3) +#define tr5(msg, tag1, tag2, tag3, tag4) +#define tr_start() +#define tr_stop() + +#endif /* TRACE_BUFFER */ + +#endif /* _DDB_TR_H_ */ diff --git a/osfmk/default_pager/Makefile b/osfmk/default_pager/Makefile new file mode 100644 index 000000000..d0adb5c24 --- /dev/null +++ b/osfmk/default_pager/Makefile @@ -0,0 +1,138 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MIG_TYPES = \ + default_pager_types.defs + +MIG_DEFS = \ + default_pager_alerts.defs + +MIG_USHDRS = \ + +MIG_UUHDRS = \ + + +MIGINCLUDES = ${MIG_UUHDRS} ${MIG_USHDRS} + +DATAFILES = \ + default_pager_types.h \ + ${MIG_DEFS} + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_GEN_LIST = ${MIGINCLUDES} + +INSTALL_MI_DIR = default_pager + +EXPORT_MI_LIST = \ + ${DATAFILES} + +EXPORT_MI_GEN_LIST = \ + ${MIGINCLUDES} + +EXPORT_MI_DIR = default_pager + +.ORDER: ${MIG_HDRS} ${MIGINCLUDES} + +${MIGINCLUDES} : ${MIG_TYPES} + +.ORDER: ${MIG_UUHDRS} + +${MIG_UUHDRS} : \ + %.h : %.defs + $(MIG) $(MIGFLAGS) \ + -server /dev/null \ + -user /dev/null \ + -header $@ \ + $< + +.ORDER: ${MIG_USHDRS} + +${MIG_USHDRS} : \ + %_server.h : %.defs + $(MIG) $(MIGFLAGS) \ + -server /dev/null \ + -user /dev/null \ + -header /dev/null \ + -sheader $@ \ + $< + +# +# Build path +# +INCFLAGS_MAKEFILE= -I.. + +MIGKSFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_SERVER=1 +MIGKUFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_USER=1 -maxonstack 1024 + +# +# MIG-generated headers that are traditionally used by kernel +# level code. +# +MIG_KUHDRS = \ + default_pager_alerts.h + +MIG_KUSRC = \ + default_pager_alerts_user.c + +MIG_KSHDRS = \ + default_pager_object.h + +MIG_KSSRC = \ + default_pager_object_server.c + +# +# JMM - +# Since there are two generated header files with the same name, one for +# install and export, the other for internal use (and they are different) +# we can't explicitly list two rules for the same target. So rules for +# generating internal headers will be handled implicitly by creating rules +# to generate the internal C sources, and the headers get created as a +# side-effect. +# +# This is all temporary scaffolding, as we are moving to a model where +# the MIG-generated code is identical in all environments. At first, it +# will contain some environment-specific ifdefs, but over time should not +# even require that as we move towards making all the environments look +# the same. +# +COMP_FILES = ${MIG_KUSRC} ${MIG_KSSRC} + +${COMP_FILES} : ${MIG_TYPES} + +.ORDER: ${MIG_KUSRC} + +${MIG_KUSRC} : \ + %_user.c : %.defs + ${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ + -user $*_user.c \ + -header $*.h \ + -server /dev/null \ + -sheader /dev/null \ + $< + +.ORDER: ${MIG_KSSRC} + +${MIG_KSSRC}: \ + %_server.c : %.defs + ${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ + -user /dev/null \ + -header /dev/null \ + -server $*_server.c \ + -sheader $*_server.h \ + $< + + +include $(MakeInc_rule) +include $(MakeInc_dir) + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/default_pager/Makefile.template b/osfmk/default_pager/Makefile.template new file mode 100644 index 000000000..ec89e2f12 --- /dev/null +++ b/osfmk/default_pager/Makefile.template @@ -0,0 +1,43 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MIGKSFLAGS = -DKERNEL_SERVER +MIGKUFLAGS = -DKERNEL_USER -maxonstack 1024 + +DEFAULT_PAGER_FILES = default_pager_object_server.h default_pager_object_server.c + +DEFAULT_PAGER_ALERTS = default_pager_alerts.h default_pager_alerts.c + + +OTHERS = ${DEFAULT_PAGER_FILES} ${DEFAULT_PAGER_ALERTS} + +.ORDER: ${DEFAULT_PAGER_FILES} ${DEFAULT_PAGER_ALERTS} + +${DEFAULT_PAGER_FILES}: default_pager_object.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader default_pager_object_server.h \ + -server default_pager_object_server.c \ + $< + +${DEFAULT_PAGER_ALERTS}: default_pager_alerts.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKUFLAGS} \ + -header default_pager_alerts.h \ + -user default_pager_alerts.c \ + -sheader /dev/null \ + -server /dev/null \ + $< + + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/default_pager/default_pager.c b/osfmk/default_pager/default_pager.c new file mode 100644 index 000000000..cbd50979e --- /dev/null +++ b/osfmk/default_pager/default_pager.c @@ -0,0 +1,976 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * $Log: default_pager.c,v $ + * Revision 1.7 2001/01/15 20:03:32 lindak + * Merged PR-2523198-116-3 into Cheetah from magee which fixes all of the + * following: + * 2430517 2445360 2511207 2513779 2523198 2581705 2585290 2595323 2596060 + * 2597427 2601360 + * + * Revision 1.6.260.1 2001/01/14 10:02:17 jmagee + * A conglomeration of fixes related to races in the termination of processes + * + * Bug #2430517 - Race condition between port death and BSD causes wait4() to fail + * Bug #2445360 - Process hung in sigsuspend waiting for SIGCHLD + * Bug #2511207 - IPC thread_act disable too "loose" + * Bug #2513779 - thread_continue is NOT a continuation routine + * Bug #2523198 - shuttleless activation during thread creation + * Bug #2581705 - 4F8: panic in task_info + * Bug #2585290 - PANIC: thread_deallocate: released last reference on map + * Bug #2595323 - Cheetah4K9: Lost user context + * Bug #2596060 - can't get mutex interlock in vm_map_deallocate / load_machfile + * Bug #2601360 - killing CrashReporter causes process hangs + * Submitted by: jmagee + * Reviewed by: Youngworth Umesh Ramesh + * + * Revision 1.6 2000/10/13 06:21:06 lindak + * Merged PR-2425995-2 into Cheetah (youngworth Need Pager and VM support for + * 64 bit address space) + * + * Revision 1.5.804.1 2000/10/12 17:29:25 youngwor + * Changes for base 64 bit data path support + * + * Revision 1.5.782.1 2000/10/12 14:02:32 youngwor + * Changes to support 64 bit data path throughout the kernel. + * Bug #: 2425995 + * Submitted by: Chris Youngworth + * Reviewed by: + * + * Revision 1.5 2000/01/26 05:56:22 wsanchez + * Add APSL + * + * Revision 1.4 1999/07/20 02:55:34 lindak + * Merged PR-2291281-1 into Beaker (magee Kernel Components kobject groupings) + * + * Revision 1.3.674.1 1999/07/20 00:33:02 jmagee + * Workaround for partial EMMI components work + * + * Revision 1.3 1999/02/24 16:55:12 wsanchez + * PR-2308031 + * + * Revision 1.2.168.1 1999/02/23 20:43:52 semeria + * Component Header files phase 1 + * + * Revision 1.2 1998/12/01 00:24:41 wsanchez + * Merged in CDY_DP1 (chris: default pager) + * + * Revision 1.1.2.2 1998/11/25 21:32:17 youngwor + * fix errant comment format + * + * Revision 1.1.2.1 1998/11/24 22:39:57 youngwor + * Check-in of support for the in-kernel default pager + * + * Revision 1.1.1.1 1998/03/07 02:26:31 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.84.2 1997/03/27 18:45:15 barbou + * submit adidtional picco changes + * [1996/09/12 22:09:29 robert] + * AXP pool merge. + * [97/02/25 barbou] + * + * Revision 1.2.84.1 1996/11/29 16:54:38 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * Added -v option (verbose flag) and tests before warning printfs + * [1996/07/29 12:25:54 stephen] + * + * Revision 1.2.34.16 1996/07/31 06:41:45 paire + * Merged with nmk20b7_shared (1.2.77.1) + * [96/05/30 paire] + * + * Revision 1.2.77.1 1996/04/12 06:30:58 paire + * Changed signature of default_pager_thread to (void ()(void *)). + * Replaced bzero() by memset(). + * [96/01/30 paire] + * + * Revision 1.2.34.15 1995/08/21 20:52:09 devrcs + * Initialize dpt_initialized_p element of + * default_pager_thread_tb and set it to true after thread starts + * up. Wait until all threads have signalled ready before + * telling the bootstrap process that it's ok to go ahead. + * [95/07/10 randys] + * + * Revision 1.2.34.14 1995/06/12 18:44:00 dwm + * ri-osc CR1394 - allow argument from bootstrap to set cluster size, + * Usage: default_pager clsize=4 sd0b, for example + * [1995/06/12 18:40:21 dwm] + * + * Revision 1.2.34.13 1995/05/31 07:55:10 emcmanus + * Use mach_msg instead of mach_msg_overwrite_trap so that message + * operations can be interrupted without provoking a default-pager + * panic. Remote gdb does this. + * [1995/05/31 07:54:21 emcmanus] + * + * Revision 1.2.34.12 1995/05/25 20:36:39 mmp + * Removed TEMPORARILY_USE_OLD_INIT and the !TEMPORARILY_USE_OLD_INIT + * code. The change to use m_o_init was not temporary. + * [1995/05/25 19:59:17 mmp] + * + * Revision 1.2.34.11 1995/04/07 18:50:57 barbou + * Merged into mainline: + * Revision 1.2.34.10 1995/02/27 18:24:08 mmp + * Replaced m_o_notify with m_o_init; used m_o_change_attributes + * instead of m_o_establish; removed m_o_rejected. + * [1995/02/27 18:22:40 mmp] + * Revision 1.2.34.9 1995/02/23 21:15:48 alanl + * Use system_priority instead of server_priority. Fix locking + * with regards to pager_extend! + * Merged with DIPC2_SHARED. + * [1995/02/23 21:14:55 alanl] + * [95/03/08 barbou] + * + * VM-MK6 Merge. + * Started from the modified b26 file. + * Integrated the following MK6 changes: + * + * Fix ri-osc CR846: Avoid use of fixed BASEPRI_SYSTEM; use new + * host_info() interface to determine priority dynamically. + * [1994/12/23 15:39:32 bolinger] + * mk6 CR668 - 1.3b26 merge + * Correct local btodb() def; change port_to_ds() et al. to work + * with port names returned by current merged kernel. + * [1994/12/03 02:10:30 bolinger] + * mk6 CR668 - 1.3b26 merge + * Did not bring forward PAGING_MEMORY support. Did bring forward + * NORMA support -- can be deleted when proven no longer needed. + * [1994/11/10 15:32:12 bolinger] + * [95/01/10 barbou] + * [95/03/08 barbou] + * + * Revision 1.2.56.2 1995/02/13 14:40:41 barbou + * VM-MK6 Merge. + * Started from the modified b26 file. + * Integrated the following MK6 changes: + * + * Fix ri-osc CR846: Avoid use of fixed BASEPRI_SYSTEM; use new + * host_info() interface to determine priority dynamically. + * [1994/12/23 15:39:32 bolinger] + * mk6 CR668 - 1.3b26 merge + * Correct local btodb() def; change port_to_ds() et al. to work + * with port names returned by current merged kernel. + * [1994/12/03 02:10:30 bolinger] + * mk6 CR668 - 1.3b26 merge + * Did not bring forward PAGING_MEMORY support. Did bring forward + * NORMA support -- can be deleted when proven no longer needed. + * [1994/11/10 15:32:12 bolinger] + * [95/01/10 barbou] + * + * Revision 1.2.46.3 1994/11/02 14:57:23 barbou + * Use new task_swappable() interface to make our task unswappable. + * [94/11/02 barbou] + * + * Revision 1.2.46.2 1994/10/10 15:28:48 barbou + * VM Merge - Default Pager Clustering. + * + * Also split this file in three: + * default_pager.c contains code that deals with threads and + * incoming messages. + * dp_memory_object.c contains memory object management code. + * dp_backing_store.c contains backing store management code. + * [94/10/10 barbou] + * + * Revision 1.2.6.23 1994/05/16 16:43:50 jph + * CR8809 -- Fix messages when paging space is exhausted. + * CR10905 -- Disallow overlapped paging areas. + * [1994/05/16 16:43:04 jph] + * + * Revision 1.2.6.22 1994/04/01 18:42:34 jph + * CR10550 -- Add backing store info interfaces. + * CR10718 -- Fix pagein error path. + * [1994/04/01 18:40:13 jph] + * + * Revision 1.2.6.21 1994/03/04 18:34:49 jeffc + * CR10636 -- delete all NMK15_COMPAT support. + * [1994/03/04 14:50:44 jeffc] + * + * Revision 1.2.6.20 1994/02/16 14:22:24 jph + * CR10554 -- Multi-page requests now handled, albeit crudely. + * Fixed leak in data_request for partial page reads. + * Tidied up code to be at least consistent. + * Fixed ASSERTIONS option and bad assert (name_refs in terminate). + * [1994/02/16 14:20:47 jph] + * + * Revision 1.2.6.19 1994/02/07 22:41:25 jph + * Merged with changes from 1.2.6.18 + * [1994/02/07 22:40:25 jph] + * + * CR10433 -- Upgrade default pager. + * Add device argument capability. + * Removed defunct file_io.h reference. + * Replaced pager_{lock_init,lock,unlock,lock_try} macros. + * Moved cthreads globals to top of file from middle. + * Removed "id" field of "partition_t" - not needed. + * Added "device", "offset", "count" and "record_shift" fields + * to "partition_t" to record backing store device info. + * Removed "p_read", "p_write" and "p_private" fields from + * "partition_t" - Unneeded filesystem abstraction. + * Merge "struct dstruct" fields into the "struct dpager", + * delete "struct dstruct" and "default_pager_t". + * Added "struct bstruct" and "all_backing_store" to hold list + * of all backing store ports. + * Simplify arguments to create_paging_partition(). + * Delete part_id(), add_paging_file() and default_pager_setup() routines. + * Added backing_store_port_alloc(), log2() routine. + * Added vm_page_mask and vm_page_shift to augment vm_page_size. + * [1994/02/07 22:28:15 jph] + * + * Revision 1.2.6.18 1994/02/01 19:44:38 collins + * CR9926: Set the default pager scheduling policy to round-robin with + * a priority of BASEPRI_SYSTEM. + * [1994/02/01 14:56:05 collins] + * + * Revision 1.2.6.17 1994/01/27 17:04:21 chasb + * Expand Copyright markers + * [1994/01/27 16:32:40 chasb] + * + * Revision 1.2.6.16 1994/01/26 18:42:03 collins + * CR10474: Change any_t to void *. + * [1994/01/26 18:39:47 collins] + * + * Revision 1.2.6.15 1994/01/25 17:02:40 jeffc + * CR10107 -- Mach spec compliance - eliminate copy_call + * [1994/01/24 21:23:43 jeffc] + * + * Revision 1.2.6.14 1994/01/20 16:58:18 meissner + * CR 10468 - Make initialization have proper number of {}'s. + * [1994/01/19 19:02:57 meissner] + * + * Revision 1.2.6.13 1993/12/03 20:53:51 jvs + * Trusted pager throttling changes. CR 10108 + * [1993/12/03 20:53:09 jvs] + * + * Revision 1.2.6.12 1993/12/02 17:22:34 jph + * CR10254 -- Fix warning about unused ledger/ security ports. + * [1993/12/02 15:59:30 jph] + * + * Revision 1.2.6.11 1993/11/24 20:30:31 jph + * CR9801 brezak merge, ledgers, security and NMK15_COMPAT + * [1993/11/23 22:52:33 jph] + * + * New bootstrap_ports() signature. + * [1993/11/23 20:58:25 jph] + * + * Revision 1.2.6.10 1993/11/23 18:05:47 watkins + * Increment send right for object in mo_notify. + * [1993/11/23 18:04:35 watkins] + * + * Revision 1.2.6.9 1993/11/16 21:49:42 watkins + * Remove pager_name argument from memory_object_terminate + * and memory_object_create, as per spec. Remove mo_init + * and flesh out mo_notify. Extend maps for reads beyond the + * end. Add xpr traces. + * [1993/11/16 21:29:43 watkins] + * + * Revision 1.2.6.8 1993/10/20 18:50:13 gm + * CR9928: Remove bootstrap_port lookup. + * CR9990: Remove code that deletes initial stack. + * [1993/10/20 12:34:40 gm] + * + * Revision 1.2.6.7 1993/10/08 17:32:08 jeffc + * CR9508 - Delete typed IPC code + * [1993/09/28 17:27:02 jeffc] + * + * Revision 1.2.6.6 1993/10/08 16:08:14 jeffc + * CR9792 - delete obsolete memory_object_data_write message. + * [1993/10/08 15:59:49 jeffc] + * + * Revision 1.2.6.5 1993/10/05 21:57:08 watkins + * New memory object attribute interfaces comply with spec. + * [1993/10/05 21:53:27 watkins] + * + * Revision 1.2.6.4 1993/09/16 18:38:39 jeffc + * CR9792 - delete defunct EMMI interfaces + * [1993/09/15 20:02:07 jeffc] + * + * Revision 1.2.6.3 1993/08/05 17:57:08 gm + * CR9627: Moved def_pager_setup and bootstrap code here. Removed + * EXT_PAGER code. Fixed up code problems with more agressive warning + * in gcc. Added full prototype support. Changed internal interfaces + * that had unions as return values to take pointer arguments instead. + * Delete bootstrap code since their is now a separate bootstrap task. + * Removed set_ras_address() since it should be provided by a machine + * dependent file on machines that need it. Changed to get priv + * ports using mach interfaces instead of argv. + * [1993/07/09 19:11:36 gm] + * + * Revision 1.2.6.2 1993/06/09 02:08:56 gm + * Conditionalize no_senders_check for untyped IPC. CR #9058. + * [1993/05/11 18:19:30 rod] + * + * Add header files to pick up definitions of Mach traps and + * wiring interfaces. + * [1993/05/14 15:37:15 jeffc] + * + * Fix ANSI C violations and warnings. + * [1993/05/13 21:05:22 jeffc] + * + * Remove dependency on own pathname. + * [1993/05/12 17:53:18 jeffc] + * + * Revision 1.2 1993/04/19 15:07:02 devrcs + * Added trailer support to untyped ipc. [travos@osf.org, fdr@osf.org] + * [1993/04/06 18:14:54 travos] + * + * Merge untyped ipc: + * Added untyped support to bootstrap_compat(). + * [1993/04/02 17:37:59 rod] + * + * Share more code when building the in kernel version + * of the pager. + * [93/03/19 bernadat] + * + * Fix memory_object_synchronize hang. + * [1993/03/15 13:21:59 david] + * + * memory_object_synchronize define twice + * [1993/03/03 15:09:30 david] + * + * remerge with 1.1.2.3 + * [1993/03/03 14:26:14 david] + * + * Add memory_object_synchronize stub + * [1993/03/03 11:04:05 david] + * + * Fixed a deadlock bug in internal pager configuration. + * [93/02/25 bernadat] + * + * moved out of mach_kernel directory + * [1993/02/27 13:56:35 david] + * + * Modified to use the same new interface (default_pager_object.defs) for both + * configurations. + * [1993/02/17 13:40:18 bruel] + * + * Added stubs for new exception interface. + * [93/02/11 bruel] + * + * Modified from mk78. + * Added the ufs_pager_option. + * [93/01/29 bruel] + * + * Yup, it works. Undefine CHECKSUM, debug and + * DEBUG_READER_CONFLICTS again. + * [92/12/03 ian] + * + * Update CHECKSUM to work with current dp_map union. + * [92/12/03 ian] + * + * Define debug CHECKSUM and DEBUG_READER_CONFLICTS. + * [92/11/28 ian] + * + * Eliminated use of old memory object calls (set_attributes, data_write, data_provided). + * [92/09/25 jsb] + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.12 92/07/20 13:32:18 cmaeda + * Added private version of set_ras_address for fast_tas support. + * [92/05/11 14:31:52 cmaeda] + * + * Revision 2.11 92/05/05 10:03:46 danner + * For merge purposes, backed-out the unstable stuff. + * [92/05/04 11:12:01 af] + * + * Now we can page an object across partitions. + * Initial rough ideas about automatically extending + * paging space. + * [92/03/11 02:23:58 af] + * + * Revision 2.10 92/03/06 13:58:48 rpd + * Fixed pager_dealloc_page calls in pager_dealloc (from af). + * Removed chatty printfs. + * [92/03/06 rpd] + * + * Revision 2.9 92/03/05 15:58:35 rpd + * Changed PAGEMAP_ENTRIES from 128 to 64. From af. + * [92/03/05 rpd] + * + * Revision 2.8 92/03/03 12:12:04 rpd + * Changed to catch exception messages and handle bootstrap requests. + * Added partition_init. + * [92/03/03 rpd] + * + * Revision 2.7 92/02/25 11:22:38 elf + * Accept creation of objects bigger than any one partition, in + * anticipation of the code that will page across partitions. + * Since we are at it, also proceed with no paging partitions: + * rely on killing unlucky objects on pageouts. + * [92/02/25 af] + * + * Revision 2.6 92/02/23 23:00:31 elf + * Copyright updated, corrected history. + * [92/02/23 elf] + * + * Revision 2.5 92/02/23 22:25:35 elf + * Improved handling of big objects, fixed a deadlock in + * object relocation, improved printouts. + * Now only crash if out of memory, otherwise use the old + * code that just marked the object as in-error. + * [92/02/23 13:25:49 af] + * + * As per jsb instructions, removed all NORMA conditionals. + * Rename port names to odd values, a trivial heuristic that + * makes name conflicts even more unlikely. + * [92/02/22 af] + * + * Refined the port name conflict problem. Instead of renaming + * ports that we send to, just set aside the memory that we cannot + * use. When objects get deleted put back the memory in the system. + * [92/02/21 af] + * + * Added renaming of request and name ports (from af). + * [92/02/21 danner] + * + * Many changes. Now supports adding/removing paging files, it does + * not immediately panic if a paging file fills up but relocates the + * object elsewhere, it uses the precious attribute in data_supply + * to reduce paging space usage (under USE_PRECIOUS conditional, + * enabled). + * [92/02/19 17:29:54 af] + * + * Two mods: changed bitmap ops to work one int at a time rather + * than one byte at a time. This helps under load, e.g. when the + * paging file is large and busy. Second mod to use port-to-pointer + * casting in lookups, rather than hash+list searching. This not + * only helps under load (I see >600 objects on my pmax) but also + * increases parallelism a little. + * Shrunk the code size by one page in the process. + * [92/02/14 01:44:23 af] + * + * Revision 2.4 92/01/23 15:19:41 rpd + * Changed to not include mig server interfaces. + * [92/01/23 rpd] + * + * Revision 2.3 92/01/14 16:43:14 rpd + * Moved mach/default_pager_object.defs to mach/default_pager.defs. + * Revised default_pager_info etc. for their new definitions. + * Removed (now) unnecessary #define's to rename kernel functions. + * [92/01/13 rpd] + * Added page_size to default_pager_info. + * Added default_pager_object_pages. + * [92/01/03 rpd] + * + * Updated to handle name ports from memory_object_create. + * Changed to remember the name ports associated with objects. + * Changed default_pager_objects to return the name ports. + * [91/12/28 rpd] + * + * Added default_pager_objects. + * [91/12/15 rpd] + * + * Revision 2.2 92/01/03 19:56:21 dbg + * Simplify locking. + * [91/10/02 dbg] + * + * Convert to run outside of kernel. + * [91/09/04 dbg] + * + * Revision 2.17 91/08/29 13:44:27 jsb + * A couple quick changes for NORMA_VM. Will be fixed later. + * + * Revision 2.16 91/08/28 16:59:29 jsb + * Fixed the default values of default_pager_internal_count and + * default_pager_external_count. + * [91/08/28 rpd] + * + * Revision 2.15 91/08/28 11:09:32 jsb + * Added seqnos_memory_object_change_completed. + * From dlb: use memory_object_data_supply for pagein when buffer is + * going to be deallocated. + * From me: don't use data_supply under NORMA_VM (will be fixed). + * [91/08/26 14:30:07 jsb] + * + * Changed to process requests in parallel when possible. + * + * Don't bother keeping track of mscount. + * [91/08/16 rpd] + * Added default_pager_info. + * [91/08/15 rpd] + * + * Added sequence numbers to the memory object interface. + * Changed to use no-senders notifications. + * Changed to keep track of port rights and not use mach_port_destroy. + * Added dummy supply-completed and data-return stubs. + * [91/08/13 rpd] + * + * Revision 2.14 91/05/18 14:28:32 rpd + * Don't give privileges to threads handling external objects. + * [91/04/06 rpd] + * Enhanced to use multiple threads, for performance and to avoid + * a deadlock caused by default_pager_object_create. + * Added locking to partitions. + * Added locking to pager_port_hashtable. + * Changed pager_port_hash to something reasonable. + * [91/04/03 rpd] + * + * Revision 2.13 91/05/14 15:21:41 mrt + * Correcting copyright + * + * Revision 2.12 91/03/16 14:41:26 rpd + * Updated for new kmem_alloc interface. + * Fixed memory_object_create to zero the new pager structure. + * [91/03/03 rpd] + * Removed thread_swappable. + * [91/01/18 rpd] + * + * Revision 2.11 91/02/05 17:00:49 mrt + * Changed to new copyright + * [91/01/28 14:54:31 mrt] + * + * Revision 2.10 90/09/09 14:31:01 rpd + * Use decl_simple_lock_data. + * [90/08/30 rpd] + * + * Revision 2.9 90/08/27 21:44:51 dbg + * Add definitions of NBBY, howmany. + * [90/07/16 dbg] + * + * Revision 2.8 90/06/02 14:45:22 rpd + * Changed default_pager_object_create so the out argument + * is a poly send right. + * [90/05/03 rpd] + * Removed references to keep_wired_memory. + * [90/04/29 rpd] + * Converted to new IPC. + * Removed data-request queue. + * [90/03/26 21:30:57 rpd] + * + * Revision 2.7 90/03/14 21:09:58 rwd + * Call default_pager_object_server and add + * default_pager_object_create + * [90/01/22 rwd] + * + * Revision 2.6 90/01/11 11:41:08 dbg + * Use bootstrap-task print routines. + * [89/12/20 dbg] + * + * De-lint. + * [89/12/06 dbg] + * + * Revision 2.5 89/12/08 19:52:03 rwd + * Turn off CHECKSUM + * [89/12/06 rwd] + * + * Revision 2.4 89/10/23 12:01:54 dbg + * Change pager_read_offset and pager_write_offset to return block + * number as function result. default_read()'s caller must now + * deallocate data if not the same as the data buffer passed in. + * Add register declarations and clean up loops a bit. + * [89/10/19 dbg] + * + * Oops - nothing like having your debugging code introduce bugs... + * [89/10/17 dbg] + * + * Revision 2.3 89/10/16 15:21:59 rwd + * debugging: checksum pages in each object. + * [89/10/04 dbg] + * + * Revision 2.2 89/09/08 11:22:06 dbg + * Wait for default_partition to be set. + * [89/09/01 dbg] + * + * Modified to call outside routines for read and write. + * Removed disk structure. Added part_create. + * Reorganized code. + * [89/07/11 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * Default pager. + * Threads management. + * Requests handling. + */ + +#include "default_pager_internal.h" +#include +#include +#include +#include +#include + +char my_name[] = "(default pager): "; + +#if DEFAULT_PAGER_DEBUG +int debug_mask = 0; +#endif /* DEFAULT_PAGER_DEBUG */ + +/* + * Use 16 Kbyte stacks instead of the default 64K. + * Use 4 Kbyte waiting stacks instead of the default 8K. + */ + +vm_size_t cthread_stack_size = 16 *1024; +extern vm_size_t cthread_wait_stack_size; + +unsigned long long vm_page_mask; +int vm_page_shift; + +int norma_mk; + +boolean_t verbose; + +/* task_t default_pager_self; */ /* Our task port. */ +mutex_t dpt_lock; /* lock for the dpt array struct */ +default_pager_thread_t **dpt_array; + +MACH_PORT_FACE default_pager_default_set; /* Port set for "default" thread. */ +MACH_PORT_FACE default_pager_default_port;/* Port for memory_object_create. */ +MACH_PORT_FACE default_pager_internal_set; /* Port set for internal objects. */ +MACH_PORT_FACE default_pager_external_set; /* Port set for external objects. */ + +#define DEFAULT_PAGER_INTERNAL_COUNT (4) + + +/* Memory created by default_pager_object_create should mostly be resident. */ +#define DEFAULT_PAGER_EXTERNAL_COUNT (2) + +unsigned int default_pager_internal_count = DEFAULT_PAGER_INTERNAL_COUNT; +/* Number of "internal" threads. */ +unsigned int default_pager_external_count = DEFAULT_PAGER_EXTERNAL_COUNT; +/* Number of "external" threads. */ + +/* + * Forward declarations. + */ +boolean_t default_pager_notify_server(mach_msg_header_t *, + mach_msg_header_t *); +boolean_t default_pager_demux_object(mach_msg_header_t *, + mach_msg_header_t *); +boolean_t default_pager_demux_default(mach_msg_header_t *, + mach_msg_header_t *); +default_pager_thread_t *start_default_pager_thread(int, boolean_t); +void default_pager(void); +void default_pager_thread(void *); +void default_pager_initialize(void); +void default_pager_set_policy(MACH_PORT_FACE); +boolean_t dp_parse_argument(char *); /* forward; */ +unsigned int d_to_i(char *); /* forward; */ + + +extern int vstruct_def_clshift; + + +/* + * Initialize and Run the default pager + */ +void +default_pager(void) +{ + int i, id; + static char here[] = "default_pager"; + mach_msg_options_t server_options; + default_pager_thread_t dpt; + kern_return_t kr; + + + + /* + * Give me space for the thread array and zero it. + */ + i = default_pager_internal_count + default_pager_external_count + 1; + dpt_array = (default_pager_thread_t **) + kalloc(i * sizeof(default_pager_thread_t *)); + memset(dpt_array, 0, i * sizeof(default_pager_thread_t *)); + + /* Setup my thread structure. */ + id = 0; + dpt.dpt_buffer = 0; + dpt.dpt_internal = FALSE; + dpt.dpt_initialized_p = TRUE; + dpt_array[0] = &dpt; + + /* + * Now we create the threads that will actually + * manage objects. + */ + + for (i = 0; i < default_pager_internal_count; i++) { + dpt_array[id] = (default_pager_thread_t *) + kalloc(sizeof (default_pager_thread_t)); + if (dpt_array[id] == NULL) + Panic("alloc pager thread"); + kr = vm_allocate(kernel_map, &((dpt_array[id])->dpt_buffer), + vm_page_size << vstruct_def_clshift, TRUE); + if (kr != KERN_SUCCESS) + Panic("alloc thread buffer"); + kr = vm_map_wire(kernel_map, (dpt_array[id])->dpt_buffer, + ((dpt_array[id])->dpt_buffer) + +(vm_page_size << vstruct_def_clshift), + VM_PROT_DEFAULT, + FALSE); + if (kr != KERN_SUCCESS) + Panic("wire thread buffer"); + (dpt_array[id])->dpt_internal = TRUE; + (dpt_array[id])->dpt_initialized_p = TRUE; + (dpt_array[id])->checked_out = FALSE; + id++; + } + DPT_LOCK_INIT(dpt_lock); +} + + + + + + +/* simple utility: only works for 2^n */ +int +local_log2( + unsigned int n) +{ + register int i = 0; + + if(n == 0) return 0; + + while ((n & 1) == 0) { + i++; + n >>= 1; + } + return i; +} + + + + +/* another simple utility, d_to_i(char*) supporting only decimal + * and devoid of range checking; obscure name chosen deliberately + * to avoid confusion with semantic-rich POSIX routines */ +unsigned int +d_to_i(char * arg) +{ + unsigned int rval = 0; + char ch; + + while ((ch = *arg++) && ch >= '0' && ch <= '9') { + rval *= 10; + rval += ch - '0'; + } + return(rval); +} + + + + +/* + * Check for non-disk-partition arguments of the form + * attribute=argument + * returning TRUE if one if found + */ +boolean_t dp_parse_argument(char *av) +{ + char *rhs = av; + static char here[] = "dp_parse_argument"; + + /* Check for '-v' flag */ + + if (av[0] == '-' && av[1] == 'v' && av[2] == 0) { + verbose = TRUE ; + return TRUE; + } + + /* + * If we find a '=' followed by an argument in the string, + * check for known arguments + */ + while (*rhs && *rhs != '=') + rhs++; + if (*rhs && *++rhs) { + /* clsize=N pages */ + if (strprefix(av,"cl")) { + if (!bs_set_default_clsize(d_to_i(rhs))) + dprintf(("Bad argument (%s) - ignored\n", av)); + return(TRUE); + } + /* else if strprefix(av,"another_argument")) { + handle_another_argument(av); + return(TRUE); + } */ + } + return(FALSE); +} + +int +start_def_pager(char *bs_device) +{ + int my_node; +/* + MACH_PORT_FACE master_device_port; +*/ + MACH_PORT_FACE security_port; +/* + MACH_PORT_FACE root_ledger_wired; + MACH_PORT_FACE root_ledger_paged; +*/ + static char here[] = "main"; + int need_dp_init = 1; + + + +/* + default_pager_host_port = ipc_port_make_send(realhost.host_priv_self); + master_device_port = ipc_port_make_send(master_device_port); + root_ledger_wired = ipc_port_make_send(root_wired_ledger_port); + root_ledger_paged = ipc_port_make_send(root_paged_ledger_port); +*/ + security_port = ipc_port_make_send(realhost.host_security_self); + + +#if NORMA_VM + norma_mk = 1; +#else + norma_mk = 0; +#endif + + + /* setup read buffers, etc */ + default_pager_initialize(); + default_pager(); +} + +/* + * Return TRUE if string 2 is a prefix of string 1. + */ +boolean_t +strprefix(register const char *s1, register const char *s2) +{ + register int c; + + while ((c = *s2++) != '\0') { + if (c != *s1++) + return (FALSE); + } + return (TRUE); +} + + +kern_return_t +default_pager_info( + MACH_PORT_FACE pager, + default_pager_info_t *infop) +{ + vm_size_t pages_total, pages_free; + + if (pager != default_pager_default_port) + return KERN_INVALID_ARGUMENT; + + bs_global_info(&pages_total, &pages_free); + + infop->dpi_total_space = ptoa(pages_total); + infop->dpi_free_space = ptoa(pages_free); + infop->dpi_page_size = vm_page_size; + + return KERN_SUCCESS; +} + + +void +default_pager_initialize() +{ + kern_return_t kr; + static char here[] = "default_pager_initialize"; + + + /* + * Exported DMM port. + */ + default_pager_default_port = ipc_port_alloc_kernel(); + + + /* + * Export pager interfaces. + */ +#ifdef USER_PAGER + if ((kr = netname_check_in(name_server_port, "UserPager", + default_pager_self, + default_pager_default_port)) + != KERN_SUCCESS) { + dprintf(("netname_check_in returned 0x%x\n", kr)); + exit(1); + } +#else /* USER_PAGER */ + { + int clsize; + ipc_port_t DMM; + + DMM = ipc_port_make_send(default_pager_default_port); + clsize = (vm_page_size << vstruct_def_clshift); + kr = host_default_memory_manager(host_priv_self(), &DMM, clsize); + if ((kr != KERN_SUCCESS) || (DMM != MACH_PORT_NULL)) + Panic("default memory manager"); + + } +#endif /* USER_PAGER */ + + + /* + * Vm variables. + */ + vm_page_mask = vm_page_size - 1; + vm_page_shift = local_log2(vm_page_size); + + /* + * List of all vstructs. + */ + VSL_LOCK_INIT(); + queue_init(&vstruct_list.vsl_queue); + queue_init(&vstruct_list.vsl_leak_queue); + vstruct_list.vsl_count = 0; + + VSTATS_LOCK_INIT(&global_stats.gs_lock); + + bs_initialize(); +} + diff --git a/osfmk/default_pager/default_pager_alerts.defs b/osfmk/default_pager/default_pager_alerts.defs new file mode 100644 index 000000000..bbf143bd1 --- /dev/null +++ b/osfmk/default_pager/default_pager_alerts.defs @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: default_pager/default_pager_alerts.defs + * + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERNEL_USER */ +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + default_pager_alerts 2295; + +#include +#include + + +simpleroutine default_pager_space_alert( + alert_port : mach_port_t; + in flags : int); + diff --git a/osfmk/default_pager/default_pager_internal.h b/osfmk/default_pager/default_pager_internal.h new file mode 100644 index 000000000..a2ac7d610 --- /dev/null +++ b/osfmk/default_pager/default_pager_internal.h @@ -0,0 +1,773 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * Default pager. + * General definitions. + */ + +#ifndef _DEFAULT_PAGER_INTERNAL_H_ +#define _DEFAULT_PAGER_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Default option settings. + */ +#ifndef PARALLEL +#define PARALLEL 1 +#endif + +#ifndef CHECKSUM +#define CHECKSUM 0 +#endif + +#define MACH_PORT_FACE mach_port_t + +#ifndef USE_PRECIOUS +#define USE_PRECIOUS TRUE +#endif + +#ifdef USER_PAGER +#define UP(stuff) stuff +#else /* USER_PAGER */ +#define UP(stuff) +#endif /* USER_PAGER */ + +extern int norma_mk; /* is the kernel configured with NORMA ? */ + +#ifndef MACH_KERNEL +extern struct mutex dprintf_lock; +#define PRINTF_LOCK_INIT() mutex_init(&dprintf_lock) +#define PRINTF_LOCK() mutex_lock(&dprintf_lock) +#define PRINTF_UNLOCK() mutex_unlock(&dprintf_lock) +#endif + +#ifndef MACH_KERNEL +#define dprintf(args) \ + do { \ + PRINTF_LOCK(); \ + printf("%s[%d]%s: ", my_name, dp_thread_id(), here); \ + printf args; \ + PRINTF_UNLOCK(); \ + } while (0) +#else +#define dprintf(args) \ + do { \ + printf("%s[KERNEL:]%s: ", my_name, here); \ + printf args; \ + } while (0) +#endif + +/* + * Debug. + */ +extern char my_name[]; + +#define DEFAULT_PAGER_DEBUG 0 + +#if DEFAULT_PAGER_DEBUG + +extern int debug_mask; +#define DEBUG_MSG_EXTERNAL 0x00000001 +#define DEBUG_MSG_INTERNAL 0x00000002 +#define DEBUG_MO_EXTERNAL 0x00000100 +#define DEBUG_MO_INTERNAL 0x00000200 +#define DEBUG_VS_EXTERNAL 0x00010000 +#define DEBUG_VS_INTERNAL 0x00020000 +#define DEBUG_BS_EXTERNAL 0x01000000 +#define DEBUG_BS_INTERNAL 0x02000000 + +#define DEBUG(level, args) \ + do { \ + if (debug_mask & (level)) \ + dprintf(args); \ + } while (0) + +#define ASSERT(expr) \ + do { \ + if (!(expr)) \ +#ifndef MACH_KERNEL + panic("%s[%d]%s: assertion failed in %s line %d: %s",\ + my_name, dp_thread_id(), here, \ + __FILE__, __LINE__, # expr); \ +#else + panic("%s[KERNEL]%s: assertion failed in %s line %d: %s",\ + my_name, here, __FILE__, __LINE__, # expr); \ +#endif + } while (0) + +#else /* DEFAULT_PAGER_DEBUG */ + +#define DEBUG(level, args) here[0] = here[0] +#define ASSERT(clause) here[0] = here[0] + +#endif /* DEFAULT_PAGER_DEBUG */ + +#ifndef MACH_KERNEL +extern char *mach_error_string(kern_return_t); +#endif + +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) + +#define PAGER_SUCCESS 0 +#define PAGER_FULL 1 +#define PAGER_ERROR 2 + +/* + * VM and IPC globals. + */ +#ifdef MACH_KERNEL +#define vm_page_size page_size +extern vm_size_t page_size; +#else +extern vm_object_size_t vm_page_size; +#endif +extern unsigned long long vm_page_mask; +extern int vm_page_shift; + +#ifndef MACH_KERNEL +#define ptoa(p) ((p)*vm_page_size) +#define atop(a) ((a)/vm_page_size) +#endif +#define howmany(a,b) (((a) + (b) - 1)/(b)) + +#ifdef MACH_KERNEL +extern mutex_t dpt_lock; /* Lock for the dpt array */ +extern unsigned int default_pager_internal_count; +extern MACH_PORT_FACE default_pager_host_port; +/* extern task_t default_pager_self; */ /* dont need or want */ +extern MACH_PORT_FACE default_pager_internal_set; +extern MACH_PORT_FACE default_pager_external_set; +extern MACH_PORT_FACE default_pager_default_port; +extern MACH_PORT_FACE default_pager_default_set; +#else +extern mach_port_t default_pager_host_port; +extern task_port_t default_pager_self; +extern mach_port_t default_pager_internal_set; +extern mach_port_t default_pager_external_set; +extern mach_port_t default_pager_default_port; +extern mach_port_t default_pager_default_set; +#endif + +typedef struct default_pager_thread { +#ifndef MACH_KERNEL + cthread_t dpt_thread; /* Server thread. */ +#endif + vm_offset_t dpt_buffer; /* Read buffer. */ + boolean_t dpt_internal; /* Do we handle internal objects? */ +#ifndef MACH_KERNEL + int dpt_id; /* thread id for printf */ +#else + int checked_out; +#endif + boolean_t dpt_initialized_p; /* Thread is ready for requests. */ +} default_pager_thread_t; + +#ifdef MACH_KERNEL +extern default_pager_thread_t **dpt_array; +#endif + +/* + * Global statistics. + */ +struct { + unsigned int gs_pageout_calls; /* # pageout calls */ + unsigned int gs_pagein_calls; /* # pagein calls */ + unsigned int gs_pages_in; /* # pages paged in (total) */ + unsigned int gs_pages_out; /* # pages paged out (total) */ + unsigned int gs_pages_unavail; /* # zero-fill pages */ + unsigned int gs_pages_init; /* # page init requests */ + unsigned int gs_pages_init_writes; /* # page init writes */ + VSTATS_LOCK_DECL(gs_lock) +} global_stats; +#define GSTAT(clause) VSTATS_ACTION(&global_stats.gs_lock, (clause)) + +/* + * Cluster related definitions. + * Clusters are sized in number of pages per cluster. + * Cluster sizes must be powers of two. + * + * These numbers are related to the struct vs_map, + * defined below. + */ +#define MAX_CLUSTER_SIZE 8 +#define MAX_CLUSTER_SHIFT 3 +#define NO_CLSIZE 0 + +/* + * bit map related macros + */ +#define NBBY 8 /* bits per byte XXX */ +#define BYTEMASK 0xff +#define setbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) |= 1<<((i)%NBBY)) +#define clrbit(a,i) (*(((char *)(a)) + ((i)/NBBY)) &= ~(1<<((i)%NBBY))) +#define isset(a,i) (*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) +#define isclr(a,i) ((*(((char *)(a)) + ((i)/NBBY)) & (1<<((i)%NBBY))) == 0) + +/* + * Default Pager. + * Backing Store Management. + */ + +#define BS_MAXPRI 4 +#define BS_MINPRI 0 +#define BS_NOPRI -1 +#define BS_FULLPRI -2 + +/* + * Mapping between backing store port and backing store object. + */ +struct backing_store { + queue_chain_t bs_links; /* link in backing_store_list */ +#ifdef MACH_KERNEL + mutex_t bs_lock; /* lock for the structure */ +#else + struct mutex bs_lock; /* lock for the structure */ +#endif + MACH_PORT_FACE bs_port; /* backing store port */ + int bs_priority; + int bs_clsize; /* cluster size in pages */ + + /* statistics */ + unsigned int bs_pages_free; /* # unallocated pages */ + unsigned int bs_pages_total; /* # pages (total) */ + unsigned int bs_pages_in; /* # page read requests */ + unsigned int bs_pages_in_fail; /* # page read errors */ + unsigned int bs_pages_out; /* # page write requests */ + unsigned int bs_pages_out_fail; /* # page write errors */ +}; +typedef struct backing_store *backing_store_t; +#define BACKING_STORE_NULL ((backing_store_t) 0) +#define BS_STAT(bs, clause) VSTATS_ACTION(&(bs)->bs_lock, (clause)) + +#ifdef MACH_KERNEL +#define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock, ETAP_DPAGE_BS) +#else +#define BS_LOCK_INIT(bs) mutex_init(&(bs)->bs_lock) +#endif +#define BS_LOCK(bs) mutex_lock(&(bs)->bs_lock) +#define BS_UNLOCK(bs) mutex_unlock(&(bs)->bs_lock) + +struct backing_store_list_head { + queue_head_t bsl_queue; +#ifdef MACH_KERNEL + mutex_t bsl_lock; +#else + struct mutex bsl_lock; +#endif +}; +extern struct backing_store_list_head backing_store_list; + +#ifdef MACH_KERNEL +#define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock, ETAP_DPAGE_BSL) +#else +#define BSL_LOCK_INIT() mutex_init(&backing_store_list.bsl_lock) +#endif +#define BSL_LOCK() mutex_lock(&backing_store_list.bsl_lock) +#define BSL_UNLOCK() mutex_unlock(&backing_store_list.bsl_lock) + +/* + * Paging segment management. + * Controls allocation of blocks within paging area. + */ +struct paging_segment { + /* device management */ + union { + MACH_PORT_FACE dev; /* Port to device */ + struct vnode *vnode; /* vnode for bs file */ + } storage_type; + unsigned int ps_segtype; /* file type or partition */ + MACH_PORT_FACE ps_device; /* Port to device */ + vm_offset_t ps_offset; /* Offset of segment within device */ + vm_offset_t ps_recnum; /* Number of device records in segment*/ + unsigned int ps_pgnum; /* Number of pages in segment */ + unsigned int ps_record_shift;/* Bit shift: pages to device records */ + + /* clusters and pages */ + unsigned int ps_clshift; /* Bit shift: clusters to pages */ + unsigned int ps_ncls; /* Number of clusters in segment */ + unsigned int ps_clcount; /* Number of free clusters */ + unsigned int ps_pgcount; /* Number of free pages */ + long ps_hint; /* Hint of where to look next. */ + + /* bitmap */ +#ifdef MACH_KERNEL + mutex_t ps_lock; /* Lock for contents of struct */ +#else + struct mutex ps_lock; /* Lock for contents of struct */ +#endif + unsigned char *ps_bmap; /* Map of used clusters */ + + /* backing store */ + backing_store_t ps_bs; /* Backing store segment belongs to */ + + boolean_t ps_going_away; /* Destroy attempt in progress */ +}; + +#define ps_vnode storage_type.vnode +#define ps_device storage_type.dev +#define PS_PARTITION 1 +#define PS_FILE 2 + +typedef struct paging_segment *paging_segment_t; + +#define PAGING_SEGMENT_NULL ((paging_segment_t) 0) + +#ifdef MACH_KERNEL +#define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock, ETAP_DPAGE_SEGMENT) +#else +#define PS_LOCK_INIT(ps) mutex_init(&(ps)->ps_lock) +#endif +#define PS_LOCK(ps) mutex_lock(&(ps)->ps_lock) +#define PS_UNLOCK(ps) mutex_unlock(&(ps)->ps_lock) + +typedef unsigned int pseg_index_t; + +#define INVALID_PSEG_INDEX ((pseg_index_t)-1) +#define NULL_PSEG_INDEX ((pseg_index_t) 0) +/* + * MAX_PSEG_INDEX value is related to struct vs_map below. + * "0" is reserved for empty map entries (no segment). + */ +#define MAX_PSEG_INDEX 63 /* 0 is reserved for empty map */ +#define MAX_NUM_PAGING_SEGMENTS MAX_PSEG_INDEX + +/* paging segments array */ +extern paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS]; +#ifdef MACH_KERNEL +extern mutex_t paging_segments_lock; +#else +extern struct mutex paging_segments_lock; +#endif +extern int paging_segment_count; /* number of active paging segments */ +extern int paging_segment_max; /* highest used paging segment index */ +extern int ps_select_array[DEFAULT_PAGER_BACKING_STORE_MAXPRI+1]; + +#ifdef MACH_KERNEL +#define PSL_LOCK_INIT() mutex_init(&paging_segments_lock, ETAP_DPAGE_SEGLIST) +#else +#define PSL_LOCK_INIT() mutex_init(&paging_segments_lock) +#endif +#define PSL_LOCK() mutex_lock(&paging_segments_lock) +#define PSL_UNLOCK() mutex_unlock(&paging_segments_lock) + +/* + * Vstruct manipulation. The vstruct is the pager's internal + * representation of vm objects it manages. There is one vstruct allocated + * per vm object. + * + * The following data structures are defined for vstruct and vm object + * management. + */ + +/* + * vs_map + * A structure used only for temporary objects. It is the element + * contained in the vs_clmap structure, which contains information + * about which clusters and pages in an object are present on backing + * store (a paging file). + * Note that this structure and its associated constants may change + * with minimal impact on code. The only function which knows the + * internals of this structure is ps_clmap(). + * + * If it is necessary to change the maximum number of paging segments + * or pages in a cluster, then this structure is the one most + * affected. The constants and structures which *may* change are: + * MAX_CLUSTER_SIZE + * MAX_CLUSTER_SHIFT + * MAX_NUM_PAGING_SEGMENTS + * VSTRUCT_DEF_CLSHIFT + * struct vs_map and associated macros and constants (VSM_*) + * (only the macro definitions need change, the exported (inside the + * pager only) interfaces remain the same; the constants are for + * internal vs_map manipulation only). + * struct clbmap (below). + */ +struct vs_map { + unsigned int vsmap_entry:23, /* offset in paging segment */ + vsmap_psindex:8, /* paging segment */ + vsmap_error:1, + vsmap_bmap:16, + vsmap_alloc:16; +}; + +typedef struct vs_map *vs_map_t; + + +#define VSM_ENTRY_NULL 0x7fffff + +/* + * Exported macros for manipulating the vs_map structure -- + * checking status, getting and setting bits. + */ +#define VSCLSIZE(vs) (1 << (vs)->vs_clshift) +#define VSM_ISCLR(vsm) (((vsm).vsmap_entry == VSM_ENTRY_NULL) && \ + ((vsm).vsmap_error == 0)) +#define VSM_ISERR(vsm) ((vsm).vsmap_error) +#define VSM_SETCLOFF(vsm, val) ((vsm).vsmap_entry = (val)) +#define VSM_SETERR(vsm, err) ((vsm).vsmap_error = 1, \ + (vsm).vsmap_entry = (err)) +#define VSM_GETERR(vsm) ((vsm).vsmap_entry) +#define VSM_SETPG(vsm, page) ((vsm).vsmap_bmap |= (1 << (page))) +#define VSM_CLRPG(vsm, page) ((vsm).vsmap_bmap &= ~(1 << (page))) +#define VSM_SETPS(vsm, psindx) ((vsm).vsmap_psindex = (psindx)) +#define VSM_PSINDEX(vsm) ((vsm).vsmap_psindex) +#define VSM_PS(vsm) paging_segments[(vsm).vsmap_psindex] +#define VSM_BMAP(vsm) ((vsm).vsmap_bmap) +#define VSM_CLOFF(vsm) ((vsm).vsmap_entry) +#define VSM_CLR(vsm) ((vsm).vsmap_entry = VSM_ENTRY_NULL, \ + (vsm).vsmap_psindex = 0, \ + (vsm).vsmap_error = 0, \ + (vsm).vsmap_bmap = 0, \ + (vsm).vsmap_alloc = 0) +#define VSM_ALLOC(vsm) ((vsm).vsmap_alloc) +#define VSM_SETALLOC(vsm, page) ((vsm).vsmap_alloc |= (1 << (page))) +#define VSM_CLRALLOC(vsm, page) ((vsm).vsmap_alloc &= ~(1 << (page))) + +/* + * Constants and macros for dealing with vstruct maps, + * which comprise vs_map structures, which + * map vm objects to backing storage (paging files and clusters). + */ +#define CLMAP_THRESHOLD 512 /* bytes */ +#define CLMAP_ENTRIES (CLMAP_THRESHOLD/sizeof(struct vs_map)) +#define CLMAP_SIZE(ncls) (ncls*sizeof(struct vs_map)) + +#define INDIRECT_CLMAP_ENTRIES(ncls) (((ncls-1)/CLMAP_ENTRIES) + 1) +#define INDIRECT_CLMAP_SIZE(ncls) (INDIRECT_CLMAP_ENTRIES(ncls) * sizeof(struct vs_map *)) +#define INDIRECT_CLMAP(size) (CLMAP_SIZE(size) > CLMAP_THRESHOLD) + +#define RMAPSIZE(blocks) (howmany(blocks,NBBY)) + +#define CL_FIND 1 +#define CL_ALLOC 2 + +/* + * clmap + * + * A cluster map returned by ps_clmap. It is an abstracted cluster of + * pages. It gives the caller information about the cluster + * desired. On read it tells the caller if a cluster is mapped, and if so, + * which of its pages are valid. It should not be referenced directly, + * except by ps_clmap; macros should be used. If the number of pages + * in a cluster needs to be more than 32, then the struct clbmap must + * become larger. + */ +struct clbmap { + unsigned int clb_map; +}; + +struct clmap { + paging_segment_t cl_ps; /* paging segment backing cluster */ + int cl_numpages; /* number of valid pages */ + struct clbmap cl_bmap; /* map of pages in cluster */ + int cl_error; /* cluster error value */ + struct clbmap cl_alloc; /* map of allocated pages in cluster */ +}; + +#define CLMAP_ERROR(clm) (clm).cl_error +#define CLMAP_PS(clm) (clm).cl_ps +#define CLMAP_NPGS(clm) (clm).cl_numpages +#define CLMAP_ISSET(clm,i) ((1<<(i))&((clm).cl_bmap.clb_map)) +#define CLMAP_ALLOC(clm) (clm).cl_alloc.clb_map +/* + * Shift off unused bits in a partial cluster + */ +#define CLMAP_SHIFT(clm,vs) \ + (clm)->cl_bmap.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages) +#define CLMAP_SHIFTALLOC(clm,vs) \ + (clm)->cl_alloc.clb_map >>= (VSCLSIZE(vs) - (clm)->cl_numpages) + +typedef struct vstruct_alias { + vm_offset_t name; + struct vstruct *vs; +} vstruct_alias_t; + +/* + * VM Object Structure: This is the structure used to manage pagers associated + * to VM objects.Mapping between pager port and paging object. + */ + +typedef struct vstruct { + queue_chain_t vs_links; /* Link in pager-port list */ +#ifdef MACH_KERNEL + hw_lock_data_t vs_lock; /* Lock for the structure */ +#else + struct mutex vs_lock; /* Lock for the structure */ +#endif + MACH_PORT_FACE vs_mem_obj_port; /* Memory object port */ + mach_port_seqno_t vs_next_seqno; /* next sequence number to issue */ + mach_port_seqno_t vs_seqno; /* Pager port sequence number */ + MACH_PORT_FACE vs_control_port;/* Memory object's control port */ + mach_port_urefs_t vs_control_refs; /* Mem object's control port refs */ + MACH_PORT_FACE vs_object_name; /* Name port */ + mach_port_urefs_t vs_name_refs; /* Name port user-refs */ + +#ifdef MACH_KERNEL + boolean_t vs_waiting_seqno;/* to wait on seqno */ + boolean_t vs_waiting_read; /* to wait on readers */ + boolean_t vs_waiting_write;/* to wait on writers */ + boolean_t vs_waiting_refs; /* to wait on refs */ + boolean_t vs_waiting_async;/* to wait on async_pending */ +#else + event_t vs_waiting_seqno;/* to wait on seqno */ + event_t vs_waiting_read; /* to wait on readers */ + event_t vs_waiting_write;/* to wait on writers */ + event_t vs_waiting_refs; /* to wait on refs */ + event_t vs_waiting_async;/* to wait on async_pending */ +#endif + unsigned int vs_readers; /* Reads in progress */ + unsigned int vs_writers; /* Writes in progress */ + + unsigned int vs_errors; /* Pageout error count */ + + int vs_clshift; /* Bit shift: clusters to pages */ + int vs_size; /* Object size in clusters */ + int vs_indirect:1, /* Is the map indirect ? */ + vs_xfer_pending:1; /* xfering out of a seg ? */ + int vs_async_pending; /* Count of pending async writes */ +#ifdef MACH_KERNEL + mutex_t vs_map_lock; /* to protect map below */ +#else + struct mutex vs_map_lock; /* to protect map below */ +#endif + union { + struct vs_map *vsu_dmap; /* Direct map of clusters */ + struct vs_map **vsu_imap; /* Indirect map of clusters */ + } vs_un; +} *vstruct_t; + +#define vs_dmap vs_un.vsu_dmap +#define vs_imap vs_un.vsu_imap +#define MEM_OBJ_CTL(vs) ((vs)->vs_control_port) + +#define VSTRUCT_NULL ((vstruct_t) 0) + +#ifdef MACH_KERNEL +#define DPT_LOCK_INIT(lock) mutex_init(&(lock), ETAP_DPAGE_VSTRUCT) +#define DPT_LOCK(lock) mutex_lock(&(lock)) +#define DPT_UNLOCK(lock) mutex_unlock(&(lock)) +#define VS_LOCK_INIT(vs) hw_lock_init(&(vs)->vs_lock) +#define VS_TRY_LOCK(vs) (VS_LOCK(vs),TRUE) +#define VS_LOCK(vs) hw_lock_lock(&(vs)->vs_lock) +#define VS_UNLOCK(vs) hw_lock_unlock(&(vs)->vs_lock) +#else +#define VS_LOCK_INIT(vs) mutex_init(&(vs)->vs_lock, ETAP_DPAGE_VSTRUCT) +#define VS_TRY_LOCK(vs) mutex_try_lock(&(vs)->vs_lock) +#define VS_LOCK(vs) mutex_lock(&(vs)->vs_lock) +#define VS_UNLOCK(vs) mutex_unlock(&(vs)->vs_lock) +#endif + +#ifdef MACH_KERNEL +#define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock, ETAP_DPAGE_VSMAP) +#else +#define VS_MAP_LOCK_INIT(vs) mutex_init(&(vs)->vs_map_lock) +#endif +#define VS_MAP_LOCK(vs) mutex_lock(&(vs)->vs_map_lock) +#ifndef MACH_KERNEL +#define VS_MAP_TRY_LOCK(vs) mutex_try_lock(&(vs)->vs_map_lock) +#else +#define VS_MAP_TRY_LOCK(vs) mutex_try(&(vs)->vs_map_lock) +#endif +#define VS_MAP_UNLOCK(vs) mutex_unlock(&(vs)->vs_map_lock) + +/* + * Data structures and variables dealing with asynchronous + * completion of paging operations. + */ +/* + * vs_async + * A structure passed to ps_write_device for asynchronous completions. + * It contains enough information to complete the write and + * inform the VM of its completion. + */ +struct vs_async { + struct vs_async *vsa_next; /* pointer to next structure */ + vstruct_t vsa_vs; /* the vstruct for the object */ + vm_offset_t vsa_addr; /* the vaddr of the data moved */ + vm_offset_t vsa_offset; /* the object offset of the data */ + vm_size_t vsa_size; /* the number of bytes moved */ + paging_segment_t vsa_ps; /* the paging segment used */ + int vsa_flags; /* flags */ + int vsa_error; /* error, if there is one */ + mutex_t vsa_lock; + MACH_PORT_FACE reply_port; /* associated reply port */ +}; + +/* + * flags values. + */ +#define VSA_READ 0x0001 +#define VSA_WRITE 0x0002 +#define VSA_TRANSFER 0x0004 + +/* + * List of all vstructs. A specific vstruct is + * found directly via its port, this list is + * only used for monitoring purposes by the + * default_pager_object* calls + */ +struct vstruct_list_head { + queue_head_t vsl_queue; +#ifdef MACH_KERNEL + mutex_t vsl_lock; +#else + struct mutex vsl_lock; +#endif + int vsl_count; /* saves code */ + queue_head_t vsl_leak_queue; +}; +extern struct vstruct_list_head vstruct_list; + +#ifdef MACH_KERNEL +#define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock, ETAP_DPAGE_VSLIST) +#else +#define VSL_LOCK_INIT() mutex_init(&vstruct_list.vsl_lock) +#endif +#define VSL_LOCK() mutex_lock(&vstruct_list.vsl_lock) +#define VSL_LOCK_TRY() mutex_try(&vstruct_list.vsl_lock) +#define VSL_UNLOCK() mutex_unlock(&vstruct_list.vsl_lock) + +/* + * Create port alias for vstruct address. + * + * We assume that the last two bits of a vstruct address will be zero due to + * memory allocation restrictions, hence are available for use as a sanity + * check. + */ +#ifdef MACH_KERNEL +#define ISVS 123456 +#define port_is_vs(_port_) \ + ((((struct vstruct_alias *)((_port_)->alias)) != NULL) && \ + (((struct vstruct_alias *)(_port_)->alias)->name==ISVS)) +#define port_to_vs(_port_) \ + ((struct vstruct_alias *)(_port_)->alias)->vs +#define vs_to_port(_vs_) (_vs_->vs_mem_obj_port) +#define vs_lookup(_port_, _vs_) \ + do { \ + if ((((struct vstruct_alias *)(_port_)->alias) == NULL) || \ + (((struct vstruct_alias *)(_port_)->alias)->name!=ISVS)) \ + panic("bad pager port"); \ + _vs_ = port_to_vs(_port_); \ + } while (0) +#else +#define vs_to_port(_vs_) (((vm_offset_t)(_vs_))+1) +#define port_to_vs(_port_) ((vstruct_t)(((vm_offset_t)(_port_))&~3)) +#define port_is_vs(_port_) ((((vm_offset_t)(_port_))&3) == 1) + +#define vs_lookup(_port_, _vs_) \ + do { \ + if (!MACH_PORT_VALID(_port_) || !port_is_vs(_port_) \ + || port_to_vs(_port_)->vs_mem_obj_port != (_port_)) \ + Panic("bad pager port"); \ + _vs_ = port_to_vs(_port_); \ + } while (0) +#endif + +/* + * Cross-module routines declaration. + */ +#ifndef MACH_KERNEL +extern int dp_thread_id(void); +#endif +extern boolean_t device_reply_server(mach_msg_header_t *, + mach_msg_header_t *); +#ifdef MACH_KERNEL +extern void default_pager_no_senders(MACH_PORT_FACE, + mach_port_seqno_t, + mach_port_mscount_t); +#else +extern void default_pager_no_senders(memory_object_t, + mach_port_seqno_t, + mach_port_mscount_t); +#endif +extern int local_log2(unsigned int); +extern void bs_initialize(void); +extern void bs_global_info(vm_size_t *, + vm_size_t *); +extern boolean_t bs_add_device(char *, + MACH_PORT_FACE); +extern vstruct_t ps_vstruct_create(vm_size_t); +extern void ps_vstruct_dealloc(vstruct_t); +extern kern_return_t pvs_cluster_read(vstruct_t, + vm_offset_t, + vm_size_t); +extern kern_return_t vs_cluster_write(vstruct_t, + upl_t, + vm_offset_t, + vm_size_t, + boolean_t, + int); +extern vm_offset_t ps_clmap(vstruct_t, + vm_offset_t, + struct clmap *, + int, + vm_size_t, + int); +extern vm_size_t ps_vstruct_allocated_size(vstruct_t); +extern size_t ps_vstruct_allocated_pages(vstruct_t, + default_pager_page_t *, + size_t); +extern boolean_t bs_set_default_clsize(unsigned int); + +extern boolean_t verbose; + +#endif /* _DEFAULT_PAGER_INTERNAL_H_ */ diff --git a/osfmk/default_pager/default_pager_object.defs b/osfmk/default_pager/default_pager_object.defs new file mode 100644 index 000000000..43595fe6d --- /dev/null +++ b/osfmk/default_pager/default_pager_object.defs @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/default_pager_object.defs + * + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERNEL_USER */ +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + default_pager_object 2275; + +#include +#include +#include + +type vnode_ptr_t = array[1] of int; + +routine default_pager_object_create( + default_pager : mach_port_t; + out memory_object : memory_object_t = + MACH_MSG_TYPE_MAKE_SEND; + object_size : vm_size_t); + +routine default_pager_info( + default_pager : mach_port_t; + out info : default_pager_info_t); + +routine default_pager_objects( + default_pager : mach_port_t; + out objects : default_pager_object_array_t, + Dealloc; + out ports : mach_port_array_t = + array[] of mach_port_move_send_t, + Dealloc); + +routine default_pager_object_pages( + default_pager : mach_port_t; + memory_object : memory_object_name_t; + out pages : default_pager_page_array_t, + Dealloc); + +skip; /* default_pager_paging_file */ + +routine default_pager_backing_store_create( + default_pager : mach_port_t; + in priority : int; + in clsize : int; + out backing_store : mach_port_t = + MACH_MSG_TYPE_MAKE_SEND); + +routine default_pager_backing_store_delete( + backing_store : mach_port_t); + + +#ifdef PAGE_TO_DEVICE +routine default_pager_add_segment( + backing_store : mach_port_t; + in device : mach_port_t; + in offset : recnum_t; + in count : recnum_t; + in record_size : int); +#endif + +routine default_pager_backing_store_info( + backing_store : mach_port_t; + flavor : backing_store_flavor_t; + out info : backing_store_info_t, CountInOut); + +routine default_pager_add_file( + backing_store : mach_port_t; + in vnode : vnode_ptr_t; + in record_size : int; + in size : vm_size_t); + + +routine default_pager_triggers( + default_pager : mach_port_t; + in hi_wat : int; + in lo_wat : int; + in flags : int; + in trigger_port : mach_port_t); + + diff --git a/osfmk/default_pager/default_pager_types.defs b/osfmk/default_pager/default_pager_types.defs new file mode 100644 index 000000000..1eb3c517f --- /dev/null +++ b/osfmk/default_pager/default_pager_types.defs @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * $Log: default_pager_types.defs,v $ + * Revision 1.3 2000/01/26 05:56:23 wsanchez + * Add APSL + * + * Revision 1.2 1998/12/01 00:24:42 wsanchez + * Merged in CDY_DP1 (chris: default pager) + * + * Revision 1.1.2.2 1998/11/25 21:32:17 youngwor + * fix errant comment format + * + * Revision 1.1.2.1 1998/11/24 22:39:58 youngwor + * Check-in of support for the in-kernel default pager + * + * Revision 1.1.1.1 1998/03/07 02:26:33 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.7.3 1995/01/11 19:30:40 devrcs + * mk6 CR668 - 1.3b26 merge + * [1994/11/10 15:34:32 bolinger] + * + * Insert OSC1_3 log. + * + * BEGIN OSC1_3 HISTORY + * + * Revision 1.2.2.6 1994/05/06 19:23:25 tmt + * Merge Alpha changes into osc1.3b19 source. + * [1994/03/29 18:21:06 rmiller] + * 64 bit + * [1994/01/27 14:31:30 picco] + * + * Revision 1.2.2.5 1994/04/01 18:42:58 jph + * CR10550 -- Add backing store info interfaces. + * [1994/04/01 18:40:30 jph] + * + * END OSC1_3 HISTORY + * [1994/11/02 20:48:01 bolinger] + * + * Revision 1.2.7.1 1994/09/23 06:57:07 ezf + * change marker to not FREE + * [1994/09/23 06:54:36 ezf] + * + * Revision 1.2.2.3 1993/08/05 17:57:50 gm + * CR9627: Removed deprecated default_pager_filename_t type. + * [1993/07/09 19:20:12 gm] + * + * Revision 1.2.2.2 1993/06/09 02:11:13 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:42:07 gm] + * + * Revision 1.2 1993/04/19 16:32:44 devrcs + * Untyped ipc merge: + * Introducing new MIG syntax for Untyped IPC (via compile option + * MACH_IPC_TYPED) + * [1993/03/18 09:37:27 rod] + * + * Moved from bootstrap. + * [1993/02/17 13:45:33 bruel] + * + * Fixed comments. + * [1993/02/11 09:26:06 bruel] + * + * Created for external default pager. + * [1993/02/09 14:56:57 bruel] + * + * $EndLog$ + */ + +#ifndef _MACH_DEFAULT_PAGER_TYPES_DEFS_ +#define _MACH_DEFAULT_PAGER_TYPES_DEFS_ + +#include + + +type default_pager_info_t = struct[3] of natural_t; + +type default_pager_object_t = struct[2] of natural_t; +type default_pager_object_array_t = array[] of default_pager_object_t; + +type default_pager_page_t = struct[1] of natural_t; +type default_pager_page_array_t = array[] of default_pager_page_t; + +type backing_store_flavor_t = integer_t; +type backing_store_info_t = array[*:20] of integer_t; + +import ; + +#endif _MACH_DEFAULT_PAGER_TYPES_DEFS_ diff --git a/osfmk/default_pager/default_pager_types.h b/osfmk/default_pager/default_pager_types.h new file mode 100644 index 000000000..96b9206fb --- /dev/null +++ b/osfmk/default_pager/default_pager_types.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + + +#ifndef _MACH_DEFAULT_PAGER_TYPES_H_ +#define _MACH_DEFAULT_PAGER_TYPES_H_ + + +#include + +#ifdef MACH_KERNEL_PRIVATE + +/* + * Remember to update the mig type definitions + * in default_pager_types.defs when adding/removing fields. + */ + +typedef struct default_pager_info { + vm_size_t dpi_total_space; /* size of backing store */ + vm_size_t dpi_free_space; /* how much of it is unused */ + vm_size_t dpi_page_size; /* the pager's vm page size */ +} default_pager_info_t; + +typedef integer_t *backing_store_info_t; +typedef int backing_store_flavor_t; +typedef int *vnode_ptr_t; + +#define BACKING_STORE_BASIC_INFO 1 +#define BACKING_STORE_BASIC_INFO_COUNT \ + (sizeof(struct backing_store_basic_info)/sizeof(integer_t)) +struct backing_store_basic_info { + natural_t pageout_calls; /* # pageout calls */ + natural_t pagein_calls; /* # pagein calls */ + natural_t pages_in; /* # pages paged in (total) */ + natural_t pages_out; /* # pages paged out (total) */ + natural_t pages_unavail; /* # zero-fill pages */ + natural_t pages_init; /* # page init requests */ + natural_t pages_init_writes; /* # page init writes */ + + natural_t bs_pages_total; /* # pages (total) */ + natural_t bs_pages_free; /* # unallocated pages */ + natural_t bs_pages_in; /* # page read requests */ + natural_t bs_pages_in_fail; /* # page read errors */ + natural_t bs_pages_out; /* # page write requests */ + natural_t bs_pages_out_fail; /* # page write errors */ + + integer_t bs_priority; + integer_t bs_clsize; +}; +typedef struct backing_store_basic_info *backing_store_basic_info_t; + + +typedef struct default_pager_object { + vm_offset_t dpo_object; /* object managed by the pager */ + vm_size_t dpo_size; /* backing store used for the object */ +} default_pager_object_t; + +typedef default_pager_object_t *default_pager_object_array_t; + + +typedef struct default_pager_page { + vm_offset_t dpp_offset; /* offset of the page in its object */ +} default_pager_page_t; + +typedef default_pager_page_t *default_pager_page_array_t; + +#endif /* MACH_KERNEL_PRIVATE */ + +#define DEFAULT_PAGER_BACKING_STORE_MAXPRI 4 + +#define HI_WAT_ALERT 1 +#define LO_WAT_ALERT 2 + +#endif /* _MACH_DEFAULT_PAGER_TYPES_H_ */ diff --git a/osfmk/default_pager/diag.h b/osfmk/default_pager/diag.h new file mode 100644 index 000000000..e197e2aa8 --- /dev/null +++ b/osfmk/default_pager/diag.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * $Log: diag.h,v $ + * Revision 1.3 2000/01/26 05:56:23 wsanchez + * Add APSL + * + * Revision 1.2 1998/12/01 00:24:42 wsanchez + * Merged in CDY_DP1 (chris: default pager) + * + * Revision 1.1.2.2 1998/11/25 21:32:17 youngwor + * fix errant comment format + * + * Revision 1.1.2.1 1998/11/24 22:39:59 youngwor + * Check-in of support for the in-kernel default pager + * + * Revision 1.1.1.1 1998/03/07 02:26:31 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.3 1995/04/07 18:51:32 barbou + * Changed panic messages format. + * [94/10/10 barbou] + * [95/03/08 barbou] + * + * Revision 1.1.6.2 1995/01/11 19:30:28 devrcs + * mk6 CR668 - 1.3b26 merge + * [1994/11/10 15:31:34 bolinger] + * + * Insert 1.3 log. + * + * BEGIN OSC1_3 HISTORY + * + * Revision 1.1.2.2 1994/04/01 18:45:25 jph + * CR10550 -- Add stats macros for info interfaces. + * [1994/04/01 18:45:06 jph] + * + * Revision 1.1.2.1 1994/02/16 14:22:46 jph + * CR10554 -- Simple assert and panic macros for diagnostics. + * [1994/02/16 14:22:02 jph] + * + * END OSC1_3 HISTORY + * [1994/11/10 15:30:44 bolinger] + * + * $EndLog$ + */ + +#ifndef MACH_KERNEL +#ifdef ASSERTIONS +#define assert(cond) \ + if (!(cond)) panic("%sassertion: %s", my_name, # cond) +#endif +#ifndef ASSERTIONS +#define assert(cond) +#endif +#endif + +#ifndef MACH_KERNEL +#define Panic(aargh) panic("%s[%d]%s: %s", my_name, dp_thread_id(), here, aargh) +#else +#define Panic(aargh) panic("%s[KERNEL]%s: %s", my_name, here, aargh) +#endif + +extern char my_name[]; + +#define VSTATS_ACTION(l, stmt) \ + do { VSTATS_LOCK(l); stmt; VSTATS_UNLOCK(l); } while (0) + +#if !defined(VAGUE_STATS) || (VAGUE_STATS > 0) +#define VSTATS_LOCK_DECL(name) +#define VSTATS_LOCK(l) +#define VSTATS_UNLOCK(l) +#define VSTATS_LOCK_INIT(l) +#else +#define VSTATS_LOCK_DECL(name) struct mutex name; +#define VSTATS_LOCK(l) mutex_lock(l) +#define VSTATS_UNLOCK(l) mutex_unlock(l) +#define VSTATS_LOCK_INIT(l) mutex_init(l) +#endif /* VAGUE_STATS */ + diff --git a/osfmk/default_pager/dp_backing_store.c b/osfmk/default_pager/dp_backing_store.c new file mode 100644 index 000000000..ce90036af --- /dev/null +++ b/osfmk/default_pager/dp_backing_store.c @@ -0,0 +1,3658 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * Default Pager. + * Paging File Management. + */ + +#include +#include "default_pager_internal.h" +#include +#include +#include +#include +#include +#include +#include +#include +/* CDY CDY */ +#include + +/* MAXPHYS derived from bsd/bsd/ppc/param.h, we need a */ +/* universal originating in the kernel, or a formal means of exporting */ +/* from the bsd component */ + +#define MAXPHYS (64 * 1024) +int physical_transfer_cluster_count = 0; + +#define VM_SUPER_CLUSTER 0x10000 + +/* + * 0 means no shift to pages, so == 1 page/cluster. 1 would mean + * 2 pages/cluster, 2 means 4 pages/cluster, and so on. + */ +#define VSTRUCT_DEF_CLSHIFT 2 +int vstruct_def_clshift = VSTRUCT_DEF_CLSHIFT; +int default_pager_clsize = 0; + +/* statistics */ +unsigned int clustered_writes[MAX_CLUSTER_SIZE+1]; +unsigned int clustered_reads[MAX_CLUSTER_SIZE+1]; + +/* + * Globals used for asynchronous paging operations: + * vs_async_list: head of list of to-be-completed I/O ops + * async_num_queued: number of pages completed, but not yet + * processed by async thread. + * async_requests_out: number of pages of requests not completed. + */ + +#if 0 +struct vs_async *vs_async_list; +int async_num_queued; +int async_requests_out; +#endif + + +#define VS_ASYNC_REUSE 1 +struct vs_async *vs_async_free_list; + +mutex_t default_pager_async_lock; /* Protects globals above */ + + +int vs_alloc_async_failed = 0; /* statistics */ +int vs_alloc_async_count = 0; /* statistics */ +struct vs_async *vs_alloc_async(void); /* forward */ +void vs_free_async(struct vs_async *vsa); /* forward */ + + +#define VS_ALLOC_ASYNC() vs_alloc_async() +#define VS_FREE_ASYNC(vsa) vs_free_async(vsa) + +#define VS_ASYNC_LOCK() mutex_lock(&default_pager_async_lock) +#define VS_ASYNC_UNLOCK() mutex_unlock(&default_pager_async_lock) +#define VS_ASYNC_LOCK_INIT() mutex_init(&default_pager_async_lock, \ + ETAP_IO_DEV_PAGEH) +#define VS_ASYNC_LOCK_ADDR() (&default_pager_async_lock) +/* + * Paging Space Hysteresis triggers and the target notification port + * + */ + +unsigned int minimum_pages_remaining = 0; +unsigned int maximum_pages_free = 0; +ipc_port_t min_pages_trigger_port = NULL; +ipc_port_t max_pages_trigger_port = NULL; + +boolean_t bs_low = FALSE; + + + +/* + * Object sizes are rounded up to the next power of 2, + * unless they are bigger than a given maximum size. + */ +vm_size_t max_doubled_size = 4 * 1024 * 1024; /* 4 meg */ + +/* + * List of all backing store and segments. + */ +struct backing_store_list_head backing_store_list; +paging_segment_t paging_segments[MAX_NUM_PAGING_SEGMENTS]; +mutex_t paging_segments_lock; +int paging_segment_max = 0; +int paging_segment_count = 0; +int ps_select_array[BS_MAXPRI+1] = { -1,-1,-1,-1,-1 }; + + +/* + * Total pages free in system + * This differs from clusters committed/avail which is a measure of the + * over commitment of paging segments to backing store. An idea which is + * likely to be deprecated. + */ +unsigned int dp_pages_free = 0; +unsigned int cluster_transfer_minimum = 100; + +kern_return_t ps_write_file(paging_segment_t, upl_t, vm_offset_t, vm_offset_t, unsigned int, int); /* forward */ + +default_pager_thread_t * +get_read_buffer() +{ + int i; + + DPT_LOCK(dpt_lock); + while(TRUE) { + for (i=0; ichecked_out == FALSE) { + dpt_array[i]->checked_out = TRUE; + DPT_UNLOCK(dpt_lock); + return dpt_array[i]; + } + } + assert_wait(&dpt_array, THREAD_UNINT); + DPT_UNLOCK(dpt_lock); + thread_block((void(*)(void))0); + } +} + +void +bs_initialize(void) +{ + int i; + + /* + * List of all backing store. + */ + BSL_LOCK_INIT(); + queue_init(&backing_store_list.bsl_queue); + PSL_LOCK_INIT(); + + VS_ASYNC_LOCK_INIT(); +#if VS_ASYNC_REUSE + vs_async_free_list = NULL; +#endif /* VS_ASYNC_REUSE */ + + for (i = 0; i < MAX_CLUSTER_SIZE+1; i++) { + clustered_writes[i] = 0; + clustered_reads[i] = 0; + } + +} + +/* + * When things do not quite workout... + */ +void bs_no_paging_space(boolean_t); /* forward */ + +void +bs_no_paging_space( + boolean_t out_of_memory) +{ + static char here[] = "bs_no_paging_space"; + + if (out_of_memory) + dprintf(("*** OUT OF MEMORY ***\n")); + panic("bs_no_paging_space: NOT ENOUGH PAGING SPACE"); +} + +void bs_more_space(int); /* forward */ +void bs_commit(int); /* forward */ + +boolean_t user_warned = FALSE; +unsigned int clusters_committed = 0; +unsigned int clusters_available = 0; +unsigned int clusters_committed_peak = 0; + +void +bs_more_space( + int nclusters) +{ + BSL_LOCK(); + /* + * Account for new paging space. + */ + clusters_available += nclusters; + + if (clusters_available >= clusters_committed) { + if (verbose && user_warned) { + printf("%s%s - %d excess clusters now.\n", + my_name, + "paging space is OK now", + clusters_available - clusters_committed); + user_warned = FALSE; + clusters_committed_peak = 0; + } + } else { + if (verbose && user_warned) { + printf("%s%s - still short of %d clusters.\n", + my_name, + "WARNING: paging space over-committed", + clusters_committed - clusters_available); + clusters_committed_peak -= nclusters; + } + } + BSL_UNLOCK(); + + return; +} + +void +bs_commit( + int nclusters) +{ + BSL_LOCK(); + clusters_committed += nclusters; + if (clusters_committed > clusters_available) { + if (verbose && !user_warned) { + user_warned = TRUE; + printf("%s%s - short of %d clusters.\n", + my_name, + "WARNING: paging space over-committed", + clusters_committed - clusters_available); + } + if (clusters_committed > clusters_committed_peak) { + clusters_committed_peak = clusters_committed; + } + } else { + if (verbose && user_warned) { + printf("%s%s - was short of up to %d clusters.\n", + my_name, + "paging space is OK now", + clusters_committed_peak - clusters_available); + user_warned = FALSE; + clusters_committed_peak = 0; + } + } + BSL_UNLOCK(); + + return; +} + +int default_pager_info_verbose = 1; + +void +bs_global_info( + vm_size_t *totalp, + vm_size_t *freep) +{ + vm_size_t pages_total, pages_free; + paging_segment_t ps; + int i; + static char here[] = "bs_global_info"; + + PSL_LOCK(); + pages_total = pages_free = 0; + for (i = 0; i <= paging_segment_max; i++) { + ps = paging_segments[i]; + if (ps == PAGING_SEGMENT_NULL) + continue; + + /* + * no need to lock: by the time this data + * gets back to any remote requestor it + * will be obsolete anyways + */ + pages_total += ps->ps_pgnum; + pages_free += ps->ps_clcount << ps->ps_clshift; + DEBUG(DEBUG_BS_INTERNAL, + ("segment #%d: %d total, %d free\n", + i, ps->ps_pgnum, ps->ps_clcount << ps->ps_clshift)); + } + *totalp = pages_total; + *freep = pages_free; + if (verbose && user_warned && default_pager_info_verbose) { + if (clusters_available < clusters_committed) { + printf("%s %d clusters committed, %d available.\n", + my_name, + clusters_committed, + clusters_available); + } + } + PSL_UNLOCK(); +} + +backing_store_t backing_store_alloc(void); /* forward */ + +backing_store_t +backing_store_alloc(void) +{ + backing_store_t bs; + static char here[] = "backing_store_alloc"; + + bs = (backing_store_t) kalloc(sizeof (struct backing_store)); + if (bs == BACKING_STORE_NULL) + panic("backing_store_alloc: no memory"); + + BS_LOCK_INIT(bs); + bs->bs_port = MACH_PORT_NULL; + bs->bs_priority = 0; + bs->bs_clsize = 0; + bs->bs_pages_total = 0; + bs->bs_pages_in = 0; + bs->bs_pages_in_fail = 0; + bs->bs_pages_out = 0; + bs->bs_pages_out_fail = 0; + + return bs; +} + +backing_store_t backing_store_lookup(MACH_PORT_FACE); /* forward */ + +/* Even in both the component space and external versions of this pager, */ +/* backing_store_lookup will be called from tasks in the application space */ +backing_store_t +backing_store_lookup( + MACH_PORT_FACE port) +{ + backing_store_t bs; + +/* + port is currently backed with a vs structure in the alias field + we could create an ISBS alias and a port_is_bs call but frankly + I see no reason for the test, the bs->port == port check below + will work properly on junk entries. + + if ((port == MACH_PORT_NULL) || port_is_vs(port)) +*/ + if ((port == MACH_PORT_NULL)) + return BACKING_STORE_NULL; + + BSL_LOCK(); + queue_iterate(&backing_store_list.bsl_queue, bs, backing_store_t, + bs_links) { + BS_LOCK(bs); + if (bs->bs_port == port) { + BSL_UNLOCK(); + /* Success, return it locked. */ + return bs; + } + BS_UNLOCK(bs); + } + BSL_UNLOCK(); + return BACKING_STORE_NULL; +} + +void backing_store_add(backing_store_t); /* forward */ + +void +backing_store_add( + backing_store_t bs) +{ + MACH_PORT_FACE port = bs->bs_port; + MACH_PORT_FACE pset = default_pager_default_set; + kern_return_t kr = KERN_SUCCESS; + static char here[] = "backing_store_add"; + + if (kr != KERN_SUCCESS) + panic("backing_store_add: add to set"); + +} + +/* + * Set up default page shift, but only if not already + * set and argument is within range. + */ +boolean_t +bs_set_default_clsize(unsigned int npages) +{ + switch(npages){ + case 1: + case 2: + case 4: + case 8: + if (default_pager_clsize == 0) /* if not yet set */ + vstruct_def_clshift = local_log2(npages); + return(TRUE); + } + return(FALSE); +} + +int bs_get_global_clsize(int clsize); /* forward */ + +int +bs_get_global_clsize( + int clsize) +{ + int i; + MACH_PORT_FACE DMM; + kern_return_t kr; + static char here[] = "bs_get_global_clsize"; + + /* + * Only allow setting of cluster size once. If called + * with no cluster size (default), we use the compiled-in default + * for the duration. The same cluster size is used for all + * paging segments. + */ + if (default_pager_clsize == 0) { + if (norma_mk) { + /* + * On NORMA, don't use clustered paging because + * XMM can't handle it. + */ + vstruct_def_clshift = 0; + } + /* + * Keep cluster size in bit shift because it's quicker + * arithmetic, and easier to keep at a power of 2. + */ + if (clsize != NO_CLSIZE) { + for (i = 0; (1 << i) < clsize; i++); + if (i > MAX_CLUSTER_SHIFT) + i = MAX_CLUSTER_SHIFT; + vstruct_def_clshift = i; + } + default_pager_clsize = (1 << vstruct_def_clshift); + + /* + * Let the user know the new (and definitive) cluster size. + */ + if (verbose) + printf("%scluster size = %d page%s\n", + my_name, default_pager_clsize, + (default_pager_clsize == 1) ? "" : "s"); + /* + * Let the kernel know too, in case it hasn't used the + * default value provided in main() yet. + */ + DMM = default_pager_default_port; + clsize = default_pager_clsize * vm_page_size; /* in bytes */ + kr = host_default_memory_manager(host_priv_self(), + &DMM, + clsize); + if (kr != KERN_SUCCESS) { + panic("bs_get_global_cl_size:host_default_memory_manager"); + } + if (DMM != default_pager_default_port) { + panic("bs_get_global_cl_size:there is another default pager"); + } + } + ASSERT(default_pager_clsize > 0 && + (default_pager_clsize & (default_pager_clsize - 1)) == 0); + + return default_pager_clsize; +} + +kern_return_t +default_pager_backing_store_create( + MACH_PORT_FACE pager, + int priority, + int clsize, /* in bytes */ + MACH_PORT_FACE *backing_store) +{ + backing_store_t bs; + MACH_PORT_FACE port; + kern_return_t kr; + struct vstruct_alias *alias_struct; + static char here[] = "default_pager_backing_store_create"; + + if (pager != default_pager_default_port) + return KERN_INVALID_ARGUMENT; + + bs = backing_store_alloc(); + port = ipc_port_alloc_kernel(); + ipc_port_make_send(port); + assert (port != IP_NULL); + + DEBUG(DEBUG_BS_EXTERNAL, + ("priority=%d clsize=%d bs_port=0x%x\n", + priority, clsize, (int) backing_store)); + + alias_struct = (struct vstruct_alias *) + kalloc(sizeof (struct vstruct_alias)); + if(alias_struct != NULL) { + alias_struct->vs = (struct vstruct *)bs; + alias_struct->name = ISVS; + port->alias = (int) alias_struct; + } + else { + ipc_port_dealloc_kernel((MACH_PORT_FACE)(port)); + kfree((vm_offset_t)bs, sizeof (struct backing_store)); + return KERN_RESOURCE_SHORTAGE; + } + + bs->bs_port = port; + if (priority == DEFAULT_PAGER_BACKING_STORE_MAXPRI) + priority = BS_MAXPRI; + else if (priority == BS_NOPRI) + priority = BS_MAXPRI; + else + priority = BS_MINPRI; + bs->bs_priority = priority; + + bs->bs_clsize = bs_get_global_clsize(atop(clsize)); + + BSL_LOCK(); + queue_enter(&backing_store_list.bsl_queue, bs, backing_store_t, + bs_links); + BSL_UNLOCK(); + + backing_store_add(bs); + + *backing_store = port; + return KERN_SUCCESS; +} + +kern_return_t +default_pager_backing_store_info( + MACH_PORT_FACE backing_store, + backing_store_flavor_t flavour, + backing_store_info_t info, + mach_msg_type_number_t *size) +{ + backing_store_t bs; + backing_store_basic_info_t basic; + int i; + paging_segment_t ps; + + if (flavour != BACKING_STORE_BASIC_INFO || + *size < BACKING_STORE_BASIC_INFO_COUNT) + return KERN_INVALID_ARGUMENT; + + basic = (backing_store_basic_info_t)info; + *size = BACKING_STORE_BASIC_INFO_COUNT; + + VSTATS_LOCK(&global_stats.gs_lock); + basic->pageout_calls = global_stats.gs_pageout_calls; + basic->pagein_calls = global_stats.gs_pagein_calls; + basic->pages_in = global_stats.gs_pages_in; + basic->pages_out = global_stats.gs_pages_out; + basic->pages_unavail = global_stats.gs_pages_unavail; + basic->pages_init = global_stats.gs_pages_init; + basic->pages_init_writes= global_stats.gs_pages_init_writes; + VSTATS_UNLOCK(&global_stats.gs_lock); + + if ((bs = backing_store_lookup(backing_store)) == BACKING_STORE_NULL) + return KERN_INVALID_ARGUMENT; + + basic->bs_pages_total = bs->bs_pages_total; + PSL_LOCK(); + bs->bs_pages_free = 0; + for (i = 0; i <= paging_segment_max; i++) { + ps = paging_segments[i]; + if (ps != PAGING_SEGMENT_NULL && ps->ps_bs == bs) { + PS_LOCK(ps); + bs->bs_pages_free += ps->ps_clcount << ps->ps_clshift; + PS_UNLOCK(ps); + } + } + PSL_UNLOCK(); + basic->bs_pages_free = bs->bs_pages_free; + basic->bs_pages_in = bs->bs_pages_in; + basic->bs_pages_in_fail = bs->bs_pages_in_fail; + basic->bs_pages_out = bs->bs_pages_out; + basic->bs_pages_out_fail= bs->bs_pages_out_fail; + + basic->bs_priority = bs->bs_priority; + basic->bs_clsize = ptoa(bs->bs_clsize); /* in bytes */ + + BS_UNLOCK(bs); + + return KERN_SUCCESS; +} + +int ps_delete(paging_segment_t); /* forward */ + +int +ps_delete( + paging_segment_t ps) +{ + vstruct_t vs; + kern_return_t error = KERN_SUCCESS; + int vs_count; + + VSL_LOCK(); /* get the lock on the list of vs's */ + + /* The lock relationship and sequence is farily complicated */ + /* this code looks at a live list, locking and unlocking the list */ + /* as it traverses it. It depends on the locking behavior of */ + /* default_pager_no_senders. no_senders always locks the vstruct */ + /* targeted for removal before locking the vstruct list. However */ + /* it will remove that member of the list without locking its */ + /* neighbors. We can be sure when we hold a lock on a vstruct */ + /* it cannot be removed from the list but we must hold the list */ + /* lock to be sure that its pointers to its neighbors are valid. */ + /* Also, we can hold off destruction of a vstruct when the list */ + /* lock and the vs locks are not being held by bumping the */ + /* vs_async_pending count. */ + + /* we will choose instead to hold a send right */ + vs_count = vstruct_list.vsl_count; + vs = (vstruct_t) queue_first((queue_entry_t)&(vstruct_list.vsl_queue)); + if(vs == (vstruct_t)&vstruct_list) { + VSL_UNLOCK(); + return KERN_SUCCESS; + } + VS_LOCK(vs); + vs_async_wait(vs); /* wait for any pending async writes */ + if ((vs_count != 0) && (vs != NULL)) + vs->vs_async_pending += 1; /* hold parties calling */ + /* vs_async_wait */ + VS_UNLOCK(vs); + VSL_UNLOCK(); + while((vs_count != 0) && (vs != NULL)) { + /* We take the count of AMO's before beginning the */ + /* transfer of of the target segment. */ + /* We are guaranteed that the target segment cannot get */ + /* more users. We also know that queue entries are */ + /* made at the back of the list. If some of the entries */ + /* we would check disappear while we are traversing the */ + /* list then we will either check new entries which */ + /* do not have any backing store in the target segment */ + /* or re-check old entries. This might not be optimal */ + /* but it will always be correct. The alternative is to */ + /* take a snapshot of the list. */ + vstruct_t next_vs; + + if(dp_pages_free < cluster_transfer_minimum) + error = KERN_FAILURE; + else { + vm_object_t transfer_object; + upl_t upl; + + transfer_object = vm_object_allocate(VM_SUPER_CLUSTER); + error = vm_fault_list_request(transfer_object, + (vm_object_offset_t)0, + VM_SUPER_CLUSTER, &upl, NULL, + 0, UPL_NO_SYNC | UPL_CLEAN_IN_PLACE + | UPL_SET_INTERNAL); + if(error == KERN_SUCCESS) { +#ifndef ubc_sync_working + uc_upl_commit(upl, NULL); + error = ps_vstruct_transfer_from_segment( + vs, ps, transfer_object); +#else + error = ps_vstruct_transfer_from_segment( + vs, ps, upl); + uc_upl_commit(upl, NULL); +#endif + vm_object_deallocate(transfer_object); + } else { + vm_object_deallocate(transfer_object); + error = KERN_FAILURE; + } + } + if(error) { + VS_LOCK(vs); + vs->vs_async_pending -= 1; /* release vs_async_wait */ + if (vs->vs_async_pending == 0) { + VS_UNLOCK(vs); + thread_wakeup(&vs->vs_waiting_async); + } else { + VS_UNLOCK(vs); + } + return KERN_FAILURE; + } + + VSL_LOCK(); + next_vs = (vstruct_t) queue_next(&(vs->vs_links)); + if((next_vs != (vstruct_t)&vstruct_list) && + (vs != next_vs) && (vs_count != 1)) { + VS_LOCK(next_vs); + vs_async_wait(next_vs); /* wait for any */ + /* pending async writes */ + next_vs->vs_async_pending += 1; /* hold parties */ + /* calling vs_async_wait */ + VS_UNLOCK(next_vs); + } + VSL_UNLOCK(); + VS_LOCK(vs); + vs->vs_async_pending -= 1; + if (vs->vs_async_pending == 0) { + VS_UNLOCK(vs); + thread_wakeup(&vs->vs_waiting_async); + } else { + VS_UNLOCK(vs); + } + if((vs == next_vs) || (next_vs == (vstruct_t)&vstruct_list)) + vs = NULL; + else + vs = next_vs; + vs_count--; + } + return KERN_SUCCESS; +} + + +kern_return_t +default_pager_backing_store_delete( + MACH_PORT_FACE backing_store) +{ + backing_store_t bs; + int i; + paging_segment_t ps; + int error; + int interim_pages_removed = 0; + kern_return_t kr; + static char here[] = "default_pager_backing_store_delete"; + + if ((bs = backing_store_lookup(backing_store)) == BACKING_STORE_NULL) + return KERN_INVALID_ARGUMENT; + +#if 0 + /* not implemented */ + BS_UNLOCK(bs); + return KERN_FAILURE; +#endif + + restart: + PSL_LOCK(); + error = KERN_SUCCESS; + for (i = 0; i <= paging_segment_max; i++) { + ps = paging_segments[i]; + if (ps != PAGING_SEGMENT_NULL && + ps->ps_bs == bs && + ! ps->ps_going_away) { + PS_LOCK(ps); + /* disable access to this segment */ + ps->ps_going_away = TRUE; + PS_UNLOCK(ps); + /* + * The "ps" segment is "off-line" now, + * we can try and delete it... + */ + if(dp_pages_free < (cluster_transfer_minimum + + ps->ps_pgcount)) { + error = KERN_FAILURE; + PSL_UNLOCK(); + } + else { + /* remove all pages associated with the */ + /* segment from the list of free pages */ + /* when transfer is through, all target */ + /* segment pages will appear to be free */ + + dp_pages_free -= ps->ps_pgcount; + interim_pages_removed += ps->ps_pgcount; + PSL_UNLOCK(); + error = ps_delete(ps); + } + if (error != KERN_SUCCESS) { + /* + * We couldn't delete the segment, + * probably because there's not enough + * virtual memory left. + * Re-enable all the segments. + */ + PSL_LOCK(); + break; + } + goto restart; + } + } + + if (error != KERN_SUCCESS) { + for (i = 0; i <= paging_segment_max; i++) { + ps = paging_segments[i]; + if (ps != PAGING_SEGMENT_NULL && + ps->ps_bs == bs && + ps->ps_going_away) { + PS_LOCK(ps); + /* re-enable access to this segment */ + ps->ps_going_away = FALSE; + PS_UNLOCK(ps); + } + } + dp_pages_free += interim_pages_removed; + PSL_UNLOCK(); + BS_UNLOCK(bs); + return error; + } + + for (i = 0; i <= paging_segment_max; i++) { + ps = paging_segments[i]; + if (ps != PAGING_SEGMENT_NULL && + ps->ps_bs == bs) { + if(ps->ps_going_away) { + paging_segments[i] = PAGING_SEGMENT_NULL; + paging_segment_count--; + PS_LOCK(ps); + kfree((vm_offset_t)ps->ps_bmap, + RMAPSIZE(ps->ps_ncls)); + kfree((vm_offset_t)ps, sizeof *ps); + } + } + } + + /* Scan the entire ps array separately to make certain we find the */ + /* proper paging_segment_max */ + for (i = 0; i < MAX_NUM_PAGING_SEGMENTS; i++) { + if(paging_segments[i] != PAGING_SEGMENT_NULL) + paging_segment_max = i; + } + + PSL_UNLOCK(); + + /* + * All the segments have been deleted. + * We can remove the backing store. + */ + + /* + * Disable lookups of this backing store. + */ + if((void *)bs->bs_port->alias != NULL) + kfree((vm_offset_t) bs->bs_port->alias, + sizeof (struct vstruct_alias)); + pager_mux_hash_delete((ipc_port_t) (bs->bs_port)); + ipc_port_dealloc_kernel((ipc_port_t) (bs->bs_port)); + bs->bs_port = MACH_PORT_NULL; + BS_UNLOCK(bs); + + /* + * Remove backing store from backing_store list. + */ + BSL_LOCK(); + queue_remove(&backing_store_list.bsl_queue, bs, backing_store_t, + bs_links); + BSL_UNLOCK(); + + /* + * Free the backing store structure. + */ + kfree((vm_offset_t)bs, sizeof *bs); + + return KERN_SUCCESS; +} + +int ps_enter(paging_segment_t); /* forward */ + +int +ps_enter( + paging_segment_t ps) +{ + int i; + + PSL_LOCK(); + + for (i = 0; i < MAX_NUM_PAGING_SEGMENTS; i++) { + if (paging_segments[i] == PAGING_SEGMENT_NULL) + break; + } + + if (i < MAX_NUM_PAGING_SEGMENTS) { + paging_segments[i] = ps; + if (i > paging_segment_max) + paging_segment_max = i; + paging_segment_count++; + if ((ps_select_array[ps->ps_bs->bs_priority] == BS_NOPRI) || + (ps_select_array[ps->ps_bs->bs_priority] == BS_FULLPRI)) + ps_select_array[ps->ps_bs->bs_priority] = 0; + i = 0; + } else { + PSL_UNLOCK(); + return KERN_RESOURCE_SHORTAGE; + } + + PSL_UNLOCK(); + return i; +} + +#ifdef DEVICE_PAGING +kern_return_t +default_pager_add_segment( + MACH_PORT_FACE backing_store, + MACH_PORT_FACE device, + recnum_t offset, + recnum_t count, + int record_size) +{ + backing_store_t bs; + paging_segment_t ps; + int i; + int error; + static char here[] = "default_pager_add_segment"; + + if ((bs = backing_store_lookup(backing_store)) + == BACKING_STORE_NULL) + return KERN_INVALID_ARGUMENT; + + PSL_LOCK(); + for (i = 0; i <= paging_segment_max; i++) { + ps = paging_segments[i]; + if (ps == PAGING_SEGMENT_NULL) + continue; + + /* + * Check for overlap on same device. + */ + if (!(ps->ps_device != device + || offset >= ps->ps_offset + ps->ps_recnum + || offset + count <= ps->ps_offset)) { + PSL_UNLOCK(); + BS_UNLOCK(bs); + return KERN_INVALID_ARGUMENT; + } + } + PSL_UNLOCK(); + + /* + * Set up the paging segment + */ + ps = (paging_segment_t) kalloc(sizeof (struct paging_segment)); + if (ps == PAGING_SEGMENT_NULL) { + BS_UNLOCK(bs); + return KERN_RESOURCE_SHORTAGE; + } + + ps->ps_segtype = PS_PARTITION; + ps->ps_device = device; + ps->ps_offset = offset; + ps->ps_record_shift = local_log2(vm_page_size / record_size); + ps->ps_recnum = count; + ps->ps_pgnum = count >> ps->ps_record_shift; + + ps->ps_pgcount = ps->ps_pgnum; + ps->ps_clshift = local_log2(bs->bs_clsize); + ps->ps_clcount = ps->ps_ncls = ps->ps_pgcount >> ps->ps_clshift; + ps->ps_hint = 0; + + PS_LOCK_INIT(ps); + ps->ps_bmap = (unsigned char *) kalloc(RMAPSIZE(ps->ps_ncls)); + if (!ps->ps_bmap) { + kfree((vm_offset_t)ps, sizeof *ps); + BS_UNLOCK(bs); + return KERN_RESOURCE_SHORTAGE; + } + for (i = 0; i < ps->ps_ncls; i++) { + clrbit(ps->ps_bmap, i); + } + + ps->ps_going_away = FALSE; + ps->ps_bs = bs; + + if ((error = ps_enter(ps)) != 0) { + kfree((vm_offset_t)ps->ps_bmap, RMAPSIZE(ps->ps_ncls)); + kfree((vm_offset_t)ps, sizeof *ps); + BS_UNLOCK(bs); + return KERN_RESOURCE_SHORTAGE; + } + + bs->bs_pages_free += ps->ps_clcount << ps->ps_clshift; + bs->bs_pages_total += ps->ps_clcount << ps->ps_clshift; + BS_UNLOCK(bs); + + PSL_LOCK(); + dp_pages_free += ps->ps_pgcount; + PSL_UNLOCK(); + + bs_more_space(ps->ps_clcount); + + DEBUG(DEBUG_BS_INTERNAL, + ("device=0x%x,offset=0x%x,count=0x%x,record_size=0x%x,shift=%d,total_size=0x%x\n", + device, offset, count, record_size, + ps->ps_record_shift, ps->ps_pgnum)); + + return KERN_SUCCESS; +} + +boolean_t +bs_add_device( + char *dev_name, + MACH_PORT_FACE master) +{ + security_token_t null_security_token = { + { 0, 0 } + }; + MACH_PORT_FACE device; + int info[DEV_GET_SIZE_COUNT]; + mach_msg_type_number_t info_count; + MACH_PORT_FACE bs = MACH_PORT_NULL; + unsigned int rec_size; + recnum_t count; + int clsize; + MACH_PORT_FACE reply_port; + + if (ds_device_open_sync(master, MACH_PORT_NULL, D_READ | D_WRITE, + null_security_token, dev_name, &device)) + return FALSE; + + info_count = DEV_GET_SIZE_COUNT; + if (!ds_device_get_status(device, DEV_GET_SIZE, info, &info_count)) { + rec_size = info[DEV_GET_SIZE_RECORD_SIZE]; + count = info[DEV_GET_SIZE_DEVICE_SIZE] / rec_size; + clsize = bs_get_global_clsize(0); + if (!default_pager_backing_store_create( + default_pager_default_port, + DEFAULT_PAGER_BACKING_STORE_MAXPRI, + (clsize * vm_page_size), + &bs)) { + if (!default_pager_add_segment(bs, device, + 0, count, rec_size)) { + return TRUE; + } + ipc_port_release_receive(bs); + } + } + + ipc_port_release_send(device); + return FALSE; +} +#endif /* DEVICE_PAGING */ + +#if VS_ASYNC_REUSE + +struct vs_async * +vs_alloc_async(void) +{ + struct vs_async *vsa; + MACH_PORT_FACE reply_port; + kern_return_t kr; + + VS_ASYNC_LOCK(); + if (vs_async_free_list == NULL) { + VS_ASYNC_UNLOCK(); + vsa = (struct vs_async *) kalloc(sizeof (struct vs_async)); + if (vsa != NULL) { + /* + * Try allocating a reply port named after the + * address of the vs_async structure. + */ + struct vstruct_alias *alias_struct; + + reply_port = ipc_port_alloc_kernel(); + alias_struct = (struct vstruct_alias *) + kalloc(sizeof (struct vstruct_alias)); + if(alias_struct != NULL) { + alias_struct->vs = (struct vstruct *)vsa; + alias_struct->name = ISVS; + reply_port->alias = (int) alias_struct; + vsa->reply_port = reply_port; + vs_alloc_async_count++; + } + else { + vs_alloc_async_failed++; + ipc_port_dealloc_kernel((MACH_PORT_FACE) + (reply_port)); + kfree((vm_offset_t)vsa, + sizeof (struct vs_async)); + vsa = NULL; + } + } + } else { + vsa = vs_async_free_list; + vs_async_free_list = vs_async_free_list->vsa_next; + VS_ASYNC_UNLOCK(); + } + + return vsa; +} + +void +vs_free_async( + struct vs_async *vsa) +{ + VS_ASYNC_LOCK(); + vsa->vsa_next = vs_async_free_list; + vs_async_free_list = vsa; + VS_ASYNC_UNLOCK(); +} + +#else /* VS_ASYNC_REUSE */ + +struct vs_async * +vs_alloc_async(void) +{ + struct vs_async *vsa; + MACH_PORT_FACE reply_port; + kern_return_t kr; + + vsa = (struct vs_async *) kalloc(sizeof (struct vs_async)); + if (vsa != NULL) { + /* + * Try allocating a reply port named after the + * address of the vs_async structure. + */ + reply_port = ipc_port_alloc_kernel(); + alias_struct = (vstruct_alias *) + kalloc(sizeof (struct vstruct_alias)); + if(alias_struct != NULL) { + alias_struct->vs = reply_port; + alias_struct->name = ISVS; + reply_port->alias = (int) vsa; + vsa->reply_port = reply_port; + vs_alloc_async_count++; + } + else { + vs_alloc_async_failed++; + ipc_port_dealloc_kernel((MACH_PORT_FACE) + (reply_port)); + kfree((vm_offset_t) vsa, + sizeof (struct vs_async)); + vsa = NULL; + } + } + + return vsa; +} + +void +vs_free_async( + struct vs_async *vsa) +{ + static char here[] = "vs_free_async"; + MACH_PORT_FACE reply_port; + kern_return_t kr; + + reply_port = vsa->reply_port; + kfree((vm_offset_t) reply_port->alias, sizeof (struct vstuct_alias)); + kfree((vm_offset_t) vsa, sizeof (struct vs_async)); + pager_mux_hash_delete(reply_port); + ipc_port_dealloc_kernel((MACH_PORT_FACE) (reply_port)); +#if 0 + VS_ASYNC_LOCK(); + vs_alloc_async_count--; + VS_ASYNC_UNLOCK(); +#endif +} + +#endif /* VS_ASYNC_REUSE */ + +vstruct_t +ps_vstruct_create( + vm_size_t size) +{ + vstruct_t vs; + int i; + static char here[] = "ps_vstruct_create"; + + vs = (vstruct_t) kalloc(sizeof (struct vstruct)); + if (vs == VSTRUCT_NULL) { + return VSTRUCT_NULL; + } + + VS_LOCK_INIT(vs); + + /* + * The following fields will be provided later. + */ + vs->vs_mem_obj_port = MACH_PORT_NULL; + vs->vs_seqno = 0; + vs->vs_control_port = MACH_PORT_NULL; + vs->vs_control_refs = 0; + vs->vs_object_name = MACH_PORT_NULL; + vs->vs_name_refs = 0; + +#ifdef MACH_KERNEL + vs->vs_waiting_seqno = FALSE; + vs->vs_waiting_read = FALSE; + vs->vs_waiting_write = FALSE; + vs->vs_waiting_refs = FALSE; + vs->vs_waiting_async = FALSE; +#else + mutex_init(&vs->vs_waiting_seqno, ETAP_DPAGE_VSSEQNO); + mutex_init(&vs->vs_waiting_read, ETAP_DPAGE_VSREAD); + mutex_init(&vs->vs_waiting_write, ETAP_DPAGE_VSWRITE); + mutex_init(&vs->vs_waiting_refs, ETAP_DPAGE_VSREFS); + mutex_init(&vs->vs_waiting_async, ETAP_DPAGE_VSASYNC); +#endif + + vs->vs_readers = 0; + vs->vs_writers = 0; + + vs->vs_errors = 0; + + vs->vs_clshift = local_log2(bs_get_global_clsize(0)); + vs->vs_size = ((atop(round_page(size)) - 1) >> vs->vs_clshift) + 1; + vs->vs_async_pending = 0; + + /* + * Allocate the pmap, either CLMAP_SIZE or INDIRECT_CLMAP_SIZE + * depending on the size of the memory object. + */ + if (INDIRECT_CLMAP(vs->vs_size)) { + vs->vs_imap = (struct vs_map **) + kalloc(INDIRECT_CLMAP_SIZE(vs->vs_size)); + vs->vs_indirect = TRUE; + } else { + vs->vs_dmap = (struct vs_map *) + kalloc(CLMAP_SIZE(vs->vs_size)); + vs->vs_indirect = FALSE; + } + vs->vs_xfer_pending = FALSE; + DEBUG(DEBUG_VS_INTERNAL, + ("map=0x%x, indirect=%d\n", (int) vs->vs_dmap, vs->vs_indirect)); + + /* + * Check to see that we got the space. + */ + if (!vs->vs_dmap) { + kfree((vm_offset_t)vs, sizeof *vs); + return VSTRUCT_NULL; + } + + /* + * Zero the indirect pointers, or clear the direct pointers. + */ + if (vs->vs_indirect) + memset(vs->vs_imap, 0, + INDIRECT_CLMAP_SIZE(vs->vs_size)); + else + for (i = 0; i < vs->vs_size; i++) + VSM_CLR(vs->vs_dmap[i]); + + VS_MAP_LOCK_INIT(vs); + + bs_commit(vs->vs_size); + + return vs; +} + +paging_segment_t ps_select_segment(int, int *); /* forward */ + +paging_segment_t +ps_select_segment( + int shift, + int *psindex) +{ + paging_segment_t ps; + int i; + int j; + static char here[] = "ps_select_segment"; + + /* + * Optimize case where there's only one segment. + * paging_segment_max will index the one and only segment. + */ + + PSL_LOCK(); + if (paging_segment_count == 1) { + paging_segment_t lps; /* used to avoid extra PS_UNLOCK */ + + ps = paging_segments[paging_segment_max]; + *psindex = paging_segment_max; + PS_LOCK(ps); + if (ps->ps_going_away) { + /* this segment is being turned off */ + lps = PAGING_SEGMENT_NULL; + } else { + ASSERT(ps->ps_clshift >= shift); + if (ps->ps_clcount) { + ps->ps_clcount--; + dp_pages_free -= 1 << ps->ps_clshift; + if(min_pages_trigger_port && + (dp_pages_free < minimum_pages_remaining)) { + default_pager_space_alert( + min_pages_trigger_port, + HI_WAT_ALERT); + min_pages_trigger_port = NULL; + bs_low = TRUE; + } + lps = ps; + } else + lps = PAGING_SEGMENT_NULL; + } + PS_UNLOCK(ps); + PSL_UNLOCK(); + return lps; + } + + if (paging_segment_count == 0) { + PSL_UNLOCK(); + return PAGING_SEGMENT_NULL; + } + + for (i = BS_MAXPRI; + i >= BS_MINPRI; i--) { + int start_index; + + if ((ps_select_array[i] == BS_NOPRI) || + (ps_select_array[i] == BS_FULLPRI)) + continue; + start_index = ps_select_array[i]; + + if(!(paging_segments[start_index])) { + j = start_index+1; + physical_transfer_cluster_count = 0; + } + else if ((physical_transfer_cluster_count+1) == (MAXPHYS >> + (((paging_segments[start_index])->ps_clshift) + + page_shift))) { + physical_transfer_cluster_count = 0; + j = start_index + 1; + } else { + physical_transfer_cluster_count+=1; + j = start_index; + if(start_index == 0) + start_index = paging_segment_max; + else + start_index = start_index - 1; + } + + while (1) { + if (j > paging_segment_max) + j = 0; + if ((ps = paging_segments[j]) && + (ps->ps_bs->bs_priority == i)) { + /* + * Force the ps cluster size to be + * >= that of the vstruct. + */ + PS_LOCK(ps); + if (ps->ps_going_away) { + /* this segment is being turned off */ + } else if ((ps->ps_clcount) && + (ps->ps_clshift >= shift)) { + ps->ps_clcount--; + dp_pages_free -= 1 << ps->ps_clshift; + if(min_pages_trigger_port && + (dp_pages_free < + minimum_pages_remaining)) { + default_pager_space_alert( + min_pages_trigger_port, + HI_WAT_ALERT); + min_pages_trigger_port = NULL; + } + PS_UNLOCK(ps); + /* + * found one, quit looking. + */ + ps_select_array[i] = j; + PSL_UNLOCK(); + *psindex = j; + return ps; + } + PS_UNLOCK(ps); + } + if (j == start_index) { + /* + * none at this priority -- mark it full + */ + ps_select_array[i] = BS_FULLPRI; + break; + } + j++; + } + } + PSL_UNLOCK(); + return PAGING_SEGMENT_NULL; +} + +vm_offset_t ps_allocate_cluster(vstruct_t, int *, paging_segment_t); /*forward*/ + +vm_offset_t +ps_allocate_cluster( + vstruct_t vs, + int *psindex, + paging_segment_t use_ps) +{ + int byte_num; + int bit_num = 0; + paging_segment_t ps; + vm_offset_t cluster; + static char here[] = "ps_allocate_cluster"; + + /* + * Find best paging segment. + * ps_select_segment will decrement cluster count on ps. + * Must pass cluster shift to find the most appropriate segment. + */ + /* NOTE: The addition of paging segment delete capability threatened + * to seriously complicate the treatment of paging segments in this + * module and the ones that call it (notably ps_clmap), because of the + * difficulty in assuring that the paging segment would continue to + * exist between being unlocked and locked. This was + * avoided because all calls to this module are based in either + * dp_memory_object calls which rely on the vs lock, or by + * the transfer function which is part of the segment delete path. + * The transfer function which is part of paging segment delete is + * protected from multiple callers by the backing store lock. + * The paging segment delete function treats mappings to a paging + * segment on a vstruct by vstruct basis, locking the vstruct targeted + * while data is transferred to the remaining segments. This is in + * line with the view that incomplete or in-transition mappings between + * data, a vstruct, and backing store are protected by the vs lock. + * This and the ordering of the paging segment "going_away" bit setting + * protects us. + */ + if (use_ps != PAGING_SEGMENT_NULL) { + ps = use_ps; + PSL_LOCK(); + PS_LOCK(ps); + ps->ps_clcount--; + dp_pages_free -= 1 << ps->ps_clshift; + PSL_UNLOCK(); + if(min_pages_trigger_port && + (dp_pages_free < minimum_pages_remaining)) { + default_pager_space_alert( + min_pages_trigger_port, + HI_WAT_ALERT); + min_pages_trigger_port = NULL; + } + PS_UNLOCK(ps); + } else if ((ps = ps_select_segment(vs->vs_clshift, psindex)) == + PAGING_SEGMENT_NULL) { +#if 0 + bs_no_paging_space(TRUE); +#endif +#if 0 + if (verbose) +#endif + dprintf(("no space in available paging segments; " + "swapon suggested\n")); + /* the count got off maybe, reset to zero */ + dp_pages_free = 0; + if(min_pages_trigger_port) { + default_pager_space_alert( + min_pages_trigger_port, HI_WAT_ALERT); + min_pages_trigger_port = NULL; + bs_low = TRUE; + } + return (vm_offset_t) -1; + } + ASSERT(ps->ps_clcount != 0); + + /* + * Look for an available cluster. At the end of the loop, + * byte_num is the byte offset and bit_num is the bit offset of the + * first zero bit in the paging segment bitmap. + */ + PS_LOCK(ps); + byte_num = ps->ps_hint; + for (; byte_num < howmany(ps->ps_ncls, NBBY); byte_num++) { + if (*(ps->ps_bmap + byte_num) != BYTEMASK) { + for (bit_num = 0; bit_num < NBBY; bit_num++) { + if (isclr((ps->ps_bmap + byte_num), bit_num)) + break; + } + ASSERT(bit_num != NBBY); + break; + } + } + ps->ps_hint = byte_num; + cluster = (byte_num*NBBY) + bit_num; + + /* Space was reserved, so this must be true */ + ASSERT(cluster < ps->ps_ncls); + + setbit(ps->ps_bmap, cluster); + PS_UNLOCK(ps); + + return cluster; +} + +void ps_deallocate_cluster(paging_segment_t, vm_offset_t); /* forward */ + +void +ps_deallocate_cluster( + paging_segment_t ps, + vm_offset_t cluster) +{ + + if (cluster >= (vm_offset_t) ps->ps_ncls) + panic("ps_deallocate_cluster: Invalid cluster number"); + + /* + * Lock the paging segment, clear the cluster's bitmap and increment the + * number of free cluster. + */ + PSL_LOCK(); + PS_LOCK(ps); + clrbit(ps->ps_bmap, cluster); + ++ps->ps_clcount; + dp_pages_free += 1 << ps->ps_clshift; + PSL_UNLOCK(); + if(max_pages_trigger_port && (dp_pages_free > maximum_pages_free)) { + default_pager_space_alert(max_pages_trigger_port, LO_WAT_ALERT); + max_pages_trigger_port = NULL; + } + + /* + * Move the hint down to the freed cluster if it is + * less than the current hint. + */ + if ((cluster/NBBY) < ps->ps_hint) { + ps->ps_hint = (cluster/NBBY); + } + + PS_UNLOCK(ps); + + /* + * If we're freeing space on a full priority, reset the array. + */ + PSL_LOCK(); + if (ps_select_array[ps->ps_bs->bs_priority] == BS_FULLPRI) + ps_select_array[ps->ps_bs->bs_priority] = 0; + PSL_UNLOCK(); + + return; +} + +void ps_dealloc_vsmap(struct vs_map *, vm_size_t); /* forward */ + +void +ps_dealloc_vsmap( + struct vs_map *vsmap, + vm_size_t size) +{ + int i; + for (i = 0; i < size; i++) + if (!VSM_ISCLR(vsmap[i]) && !VSM_ISERR(vsmap[i])) + ps_deallocate_cluster(VSM_PS(vsmap[i]), + VSM_CLOFF(vsmap[i])); +} + +void +ps_vstruct_dealloc( + vstruct_t vs) +{ + int i; + spl_t s; + static char here[] = "ps_vstruct_dealloc"; + + VS_MAP_LOCK(vs); + + /* + * If this is an indirect structure, then we walk through the valid + * (non-zero) indirect pointers and deallocate the clusters + * associated with each used map entry (via ps_dealloc_vsmap). + * When all of the clusters in an indirect block have been + * freed, we deallocate the block. When all of the indirect + * blocks have been deallocated we deallocate the memory + * holding the indirect pointers. + */ + if (vs->vs_indirect) { + for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) { + if (vs->vs_imap[i] != NULL) { + ps_dealloc_vsmap(vs->vs_imap[i], CLMAP_ENTRIES); + kfree((vm_offset_t)vs->vs_imap[i], + CLMAP_THRESHOLD); + } + } + kfree((vm_offset_t)vs->vs_imap, + INDIRECT_CLMAP_SIZE(vs->vs_size)); + } else { + /* + * Direct map. Free used clusters, then memory. + */ + ps_dealloc_vsmap(vs->vs_dmap, vs->vs_size); + kfree((vm_offset_t)vs->vs_dmap, CLMAP_SIZE(vs->vs_size)); + } + VS_MAP_UNLOCK(vs); + + bs_commit(- vs->vs_size); + + ip_lock(vs_to_port(vs)); + (vs_to_port(vs))->ip_destination = 0; + (vs_to_port(vs))->ip_receiver_name = MACH_PORT_NULL; + + s= splsched(); + imq_lock(&vs_to_port(vs)->ip_messages); + (vs_to_port(vs))->ip_mscount = 0; + (vs_to_port(vs))->ip_messages.imq_seqno = 0; + imq_unlock(&vs_to_port(vs)->ip_messages); + splx(s); + + ip_unlock(vs_to_port(vs)); + pager_mux_hash_delete((ipc_port_t) vs_to_port(vs)); + ipc_port_release_receive(vs_to_port(vs)); + /* + * Do this *after* deallocating the port name + */ + kfree((vm_offset_t)vs, sizeof *vs); +} + +int ps_map_extend(vstruct_t, int); /* forward */ + +int ps_map_extend( + vstruct_t vs, + int new_size) +{ + struct vs_map **new_imap; + struct vs_map *new_dmap = NULL; + int newdsize; + int i; + void *old_map = NULL; + int old_map_size = 0; + + if (vs->vs_size >= new_size) { + /* + * Someone has already done the work. + */ + return 0; + } + + /* + * If the new size extends into the indirect range, then we have one + * of two cases: we are going from indirect to indirect, or we are + * going from direct to indirect. If we are going from indirect to + * indirect, then it is possible that the new size will fit in the old + * indirect map. If this is the case, then just reset the size of the + * vstruct map and we are done. If the new size will not + * fit into the old indirect map, then we have to allocate a new + * indirect map and copy the old map pointers into this new map. + * + * If we are going from direct to indirect, then we have to allocate a + * new indirect map and copy the old direct pages into the first + * indirect page of the new map. + * NOTE: allocating memory here is dangerous, as we're in the + * pageout path. + */ + if (INDIRECT_CLMAP(new_size)) { + int new_map_size = INDIRECT_CLMAP_SIZE(new_size); + + /* + * Get a new indirect map and zero it. + */ + old_map_size = INDIRECT_CLMAP_SIZE(vs->vs_size); + if (vs->vs_indirect && + (new_map_size == old_map_size)) { + bs_commit(new_size - vs->vs_size); + vs->vs_size = new_size; + return 0; + } + + new_imap = (struct vs_map **)kalloc(new_map_size); + if (new_imap == NULL) { + return -1; + } + memset(new_imap, 0, new_map_size); + + if (vs->vs_indirect) { + /* Copy old entries into new map */ + memcpy(new_imap, vs->vs_imap, old_map_size); + /* Arrange to free the old map */ + old_map = (void *) vs->vs_imap; + newdsize = 0; + } else { /* Old map was a direct map */ + /* Allocate an indirect page */ + if ((new_imap[0] = (struct vs_map *) + kalloc(CLMAP_THRESHOLD)) == NULL) { + kfree((vm_offset_t)new_imap, new_map_size); + return -1; + } + new_dmap = new_imap[0]; + newdsize = CLMAP_ENTRIES; + } + } else { + new_imap = NULL; + newdsize = new_size; + /* + * If the new map is a direct map, then the old map must + * also have been a direct map. All we have to do is + * to allocate a new direct map, copy the old entries + * into it and free the old map. + */ + if ((new_dmap = (struct vs_map *) + kalloc(CLMAP_SIZE(new_size))) == NULL) { + return -1; + } + } + if (newdsize) { + + /* Free the old map */ + old_map = (void *) vs->vs_dmap; + old_map_size = CLMAP_SIZE(vs->vs_size); + + /* Copy info from the old map into the new map */ + memcpy(new_dmap, vs->vs_dmap, old_map_size); + + /* Initialize the rest of the new map */ + for (i = vs->vs_size; i < newdsize; i++) + VSM_CLR(new_dmap[i]); + } + if (new_imap) { + vs->vs_imap = new_imap; + vs->vs_indirect = TRUE; + } else + vs->vs_dmap = new_dmap; + bs_commit(new_size - vs->vs_size); + vs->vs_size = new_size; + if (old_map) + kfree((vm_offset_t)old_map, old_map_size); + return 0; +} + +vm_offset_t +ps_clmap( + vstruct_t vs, + vm_offset_t offset, + struct clmap *clmap, + int flag, + vm_size_t size, + int error) +{ + vm_offset_t cluster; /* The cluster of offset. */ + vm_offset_t newcl; /* The new cluster allocated. */ + vm_offset_t newoff; + int i; + struct vs_map *vsmap; + static char here[] = "ps_clmap"; + + VS_MAP_LOCK(vs); + + ASSERT(vs->vs_dmap); + cluster = atop(offset) >> vs->vs_clshift; + + /* + * Initialize cluster error value + */ + clmap->cl_error = 0; + + /* + * If the object has grown, extend the page map. + */ + if (cluster >= vs->vs_size) { + if (flag == CL_FIND) { + /* Do not allocate if just doing a lookup */ + VS_MAP_UNLOCK(vs); + return (vm_offset_t) -1; + } + if (ps_map_extend(vs, cluster + 1)) { + VS_MAP_UNLOCK(vs); + return (vm_offset_t) -1; + } + } + + /* + * Look for the desired cluster. If the map is indirect, then we + * have a two level lookup. First find the indirect block, then + * find the actual cluster. If the indirect block has not yet + * been allocated, then do so. If the cluster has not yet been + * allocated, then do so. + * + * If any of the allocations fail, then return an error. + * Don't allocate if just doing a lookup. + */ + if (vs->vs_indirect) { + long ind_block = cluster/CLMAP_ENTRIES; + + /* Is the indirect block allocated? */ + vsmap = vs->vs_imap[ind_block]; + if (vsmap == NULL) { + if (flag == CL_FIND) { + VS_MAP_UNLOCK(vs); + return (vm_offset_t) -1; + } + + /* Allocate the indirect block */ + vsmap = (struct vs_map *) kalloc(CLMAP_THRESHOLD); + if (vsmap == NULL) { + VS_MAP_UNLOCK(vs); + return (vm_offset_t) -1; + } + /* Initialize the cluster offsets */ + for (i = 0; i < CLMAP_ENTRIES; i++) + VSM_CLR(vsmap[i]); + vs->vs_imap[ind_block] = vsmap; + } + } else + vsmap = vs->vs_dmap; + + ASSERT(vsmap); + vsmap += cluster%CLMAP_ENTRIES; + + /* + * At this point, vsmap points to the struct vs_map desired. + * + * Look in the map for the cluster, if there was an error on a + * previous write, flag it and return. If it is not yet + * allocated, then allocate it, if we're writing; if we're + * doing a lookup and the cluster's not allocated, return error. + */ + if (VSM_ISERR(*vsmap)) { + clmap->cl_error = VSM_GETERR(*vsmap); + VS_MAP_UNLOCK(vs); + return (vm_offset_t) -1; + } else if (VSM_ISCLR(*vsmap)) { + int psindex; + + if (flag == CL_FIND) { + /* + * If there's an error and the entry is clear, then + * we've run out of swap space. Record the error + * here and return. + */ + if (error) { + VSM_SETERR(*vsmap, error); + } + VS_MAP_UNLOCK(vs); + return (vm_offset_t) -1; + } else { + /* + * Attempt to allocate a cluster from the paging segment + */ + newcl = ps_allocate_cluster(vs, &psindex, + PAGING_SEGMENT_NULL); + if (newcl == -1) { + VS_MAP_UNLOCK(vs); + return (vm_offset_t) -1; + } + VSM_CLR(*vsmap); + VSM_SETCLOFF(*vsmap, newcl); + VSM_SETPS(*vsmap, psindex); + } + } else + newcl = VSM_CLOFF(*vsmap); + + /* + * Fill in pertinent fields of the clmap + */ + clmap->cl_ps = VSM_PS(*vsmap); + clmap->cl_numpages = VSCLSIZE(vs); + clmap->cl_bmap.clb_map = (unsigned int) VSM_BMAP(*vsmap); + + /* + * Byte offset in paging segment is byte offset to cluster plus + * byte offset within cluster. It looks ugly, but should be + * relatively quick. + */ + ASSERT(trunc_page(offset) == offset); + newcl = ptoa(newcl) << vs->vs_clshift; + newoff = offset & ((1<<(vm_page_shift + vs->vs_clshift)) - 1); + if (flag == CL_ALLOC) { + /* + * set bits in the allocation bitmap according to which + * pages were requested. size is in bytes. + */ + i = atop(newoff); + while ((size > 0) && (i < VSCLSIZE(vs))) { + VSM_SETALLOC(*vsmap, i); + i++; + size -= vm_page_size; + } + } + clmap->cl_alloc.clb_map = (unsigned int) VSM_ALLOC(*vsmap); + if (newoff) { + /* + * Offset is not cluster aligned, so number of pages + * and bitmaps must be adjusted + */ + clmap->cl_numpages -= atop(newoff); + CLMAP_SHIFT(clmap, vs); + CLMAP_SHIFTALLOC(clmap, vs); + } + + /* + * + * The setting of valid bits and handling of write errors + * must be done here, while we hold the lock on the map. + * It logically should be done in ps_vs_write_complete(). + * The size and error information has been passed from + * ps_vs_write_complete(). If the size parameter is non-zero, + * then there is work to be done. If error is also non-zero, + * then the error number is recorded in the cluster and the + * entire cluster is in error. + */ + if (size && flag == CL_FIND) { + vm_offset_t off = (vm_offset_t) 0; + + if (!error) { + for (i = VSCLSIZE(vs) - clmap->cl_numpages; size > 0; + i++) { + VSM_SETPG(*vsmap, i); + size -= vm_page_size; + } + ASSERT(i <= VSCLSIZE(vs)); + } else { + BS_STAT(clmap->cl_ps->ps_bs, + clmap->cl_ps->ps_bs->bs_pages_out_fail += + atop(size)); + off = VSM_CLOFF(*vsmap); + VSM_SETERR(*vsmap, error); + } + /* + * Deallocate cluster if error, and no valid pages + * already present. + */ + if (off != (vm_offset_t) 0) + ps_deallocate_cluster(clmap->cl_ps, off); + VS_MAP_UNLOCK(vs); + return (vm_offset_t) 0; + } else + VS_MAP_UNLOCK(vs); + + DEBUG(DEBUG_VS_INTERNAL, + ("returning 0x%X,vs=0x%X,vsmap=0x%X,flag=%d\n", + newcl+newoff, (int) vs, (int) vsmap, flag)); + DEBUG(DEBUG_VS_INTERNAL, + (" clmap->cl_ps=0x%X,cl_numpages=%d,clbmap=0x%x,cl_alloc=%x\n", + (int) clmap->cl_ps, clmap->cl_numpages, + (int) clmap->cl_bmap.clb_map, (int) clmap->cl_alloc.clb_map)); + + return (newcl + newoff); +} + +void ps_clunmap(vstruct_t, vm_offset_t, vm_size_t); /* forward */ + +void +ps_clunmap( + vstruct_t vs, + vm_offset_t offset, + vm_size_t length) +{ + vm_offset_t cluster; /* The cluster number of offset */ + struct vs_map *vsmap; + static char here[] = "ps_clunmap"; + + VS_MAP_LOCK(vs); + + /* + * Loop through all clusters in this range, freeing paging segment + * clusters and map entries as encountered. + */ + while (length > 0) { + vm_offset_t newoff; + int i; + + cluster = atop(offset) >> vs->vs_clshift; + if (vs->vs_indirect) /* indirect map */ + vsmap = vs->vs_imap[cluster/CLMAP_ENTRIES]; + else + vsmap = vs->vs_dmap; + if (vsmap == NULL) { + VS_MAP_UNLOCK(vs); + return; + } + vsmap += cluster%CLMAP_ENTRIES; + if (VSM_ISCLR(*vsmap)) { + length -= vm_page_size; + offset += vm_page_size; + continue; + } + /* + * We've got a valid mapping. Clear it and deallocate + * paging segment cluster pages. + * Optimize for entire cluster cleraing. + */ + if (newoff = (offset&((1<<(vm_page_shift+vs->vs_clshift))-1))) { + /* + * Not cluster aligned. + */ + ASSERT(trunc_page(newoff) == newoff); + i = atop(newoff); + } else + i = 0; + while ((i < VSCLSIZE(vs)) && (length > 0)) { + VSM_CLRPG(*vsmap, i); + VSM_CLRALLOC(*vsmap, i); + length -= vm_page_size; + offset += vm_page_size; + i++; + } + + /* + * If map entry is empty, clear and deallocate cluster. + */ + if (!VSM_ALLOC(*vsmap)) { + ps_deallocate_cluster(VSM_PS(*vsmap), + VSM_CLOFF(*vsmap)); + VSM_CLR(*vsmap); + } + } + + VS_MAP_UNLOCK(vs); +} + +void ps_vs_write_complete(vstruct_t, vm_offset_t, vm_size_t, int); /* forward */ + +void +ps_vs_write_complete( + vstruct_t vs, + vm_offset_t offset, + vm_size_t size, + int error) +{ + struct clmap clmap; + + /* + * Get the struct vsmap for this cluster. + * Use READ, even though it was written, because the + * cluster MUST be present, unless there was an error + * in the original ps_clmap (e.g. no space), in which + * case, nothing happens. + * + * Must pass enough information to ps_clmap to allow it + * to set the vs_map structure bitmap under lock. + */ + (void) ps_clmap(vs, offset, &clmap, CL_FIND, size, error); +} + +void vs_cl_write_complete(vstruct_t, paging_segment_t, vm_offset_t, vm_offset_t, vm_size_t, boolean_t, int); /* forward */ + +void +vs_cl_write_complete( + vstruct_t vs, + paging_segment_t ps, + vm_offset_t offset, + vm_offset_t addr, + vm_size_t size, + boolean_t async, + int error) +{ + static char here[] = "vs_cl_write_complete"; + kern_return_t kr; + + if (error) { + /* + * For internal objects, the error is recorded on a + * per-cluster basis by ps_clmap() which is called + * by ps_vs_write_complete() below. + */ + dprintf(("write failed error = 0x%x\n", error)); + /* add upl_abort code here */ + } else + GSTAT(global_stats.gs_pages_out += atop(size)); + /* + * Notify the vstruct mapping code, so it can do its accounting. + */ + ps_vs_write_complete(vs, offset, size, error); + + if (async) { + VS_LOCK(vs); + ASSERT(vs->vs_async_pending > 0); + vs->vs_async_pending -= size; + if (vs->vs_async_pending == 0) { + VS_UNLOCK(vs); + /* mutex_unlock(&vs->vs_waiting_async); */ + thread_wakeup(&vs->vs_waiting_async); + } else { + VS_UNLOCK(vs); + } + } +} + +#ifdef DEVICE_PAGING +kern_return_t device_write_reply(MACH_PORT_FACE, kern_return_t, io_buf_len_t); + +kern_return_t +device_write_reply( + MACH_PORT_FACE reply_port, + kern_return_t device_code, + io_buf_len_t bytes_written) +{ + struct vs_async *vsa; + static char here[] = "device_write_reply"; + + vsa = (struct vs_async *) + ((struct vstruct_alias *)(reply_port->alias))->vs; + + if (device_code == KERN_SUCCESS && bytes_written != vsa->vsa_size) { + device_code = KERN_FAILURE; + } + + vsa->vsa_error = device_code; + + + ASSERT(vsa->vsa_vs != VSTRUCT_NULL); + if(vsa->vsa_flags & VSA_TRANSFER) { + /* revisit when async disk segments redone */ + if(vsa->vsa_error) { + /* need to consider error condition. re-write data or */ + /* throw it away here. */ + vm_offset_t ioaddr; + if(vm_map_copyout(kernel_map, &ioaddr, + (vm_map_copy_t)vsa->vsa_addr) != KERN_SUCCESS) + panic("vs_cluster_write: unable to copy source list\n"); + vm_deallocate(kernel_map, ioaddr, vsa->vsa_size); + } + ps_vs_write_complete(vsa->vsa_vs, vsa->vsa_offset, + vsa->vsa_size, vsa->vsa_error); + } else { + vs_cl_write_complete(vsa->vsa_vs, vsa->vsa_ps, vsa->vsa_offset, + vsa->vsa_addr, vsa->vsa_size, TRUE, + vsa->vsa_error); + } + VS_FREE_ASYNC(vsa); + + return KERN_SUCCESS; +} + +kern_return_t device_write_reply_inband(MACH_PORT_FACE, kern_return_t, io_buf_len_t); +kern_return_t +device_write_reply_inband( + MACH_PORT_FACE reply_port, + kern_return_t return_code, + io_buf_len_t bytes_written) +{ + panic("device_write_reply_inband: illegal"); + return KERN_SUCCESS; +} + +kern_return_t device_read_reply(MACH_PORT_FACE, kern_return_t, io_buf_ptr_t, mach_msg_type_number_t); +kern_return_t +device_read_reply( + MACH_PORT_FACE reply_port, + kern_return_t return_code, + io_buf_ptr_t data, + mach_msg_type_number_t dataCnt) +{ + struct vs_async *vsa; + vsa = (struct vs_async *) + ((struct vstruct_alias *)(reply_port->alias))->vs; + vsa->vsa_addr = (vm_offset_t)data; + vsa->vsa_size = (vm_size_t)dataCnt; + vsa->vsa_error = return_code; + thread_wakeup(&vsa->vsa_lock); + return KERN_SUCCESS; +} + +kern_return_t device_read_reply_inband(MACH_PORT_FACE, kern_return_t, io_buf_ptr_inband_t, mach_msg_type_number_t); +kern_return_t +device_read_reply_inband( + MACH_PORT_FACE reply_port, + kern_return_t return_code, + io_buf_ptr_inband_t data, + mach_msg_type_number_t dataCnt) +{ + panic("device_read_reply_inband: illegal"); + return KERN_SUCCESS; +} + +kern_return_t device_read_reply_overwrite(MACH_PORT_FACE, kern_return_t, io_buf_len_t); +kern_return_t +device_read_reply_overwrite( + MACH_PORT_FACE reply_port, + kern_return_t return_code, + io_buf_len_t bytes_read) +{ + panic("device_read_reply_overwrite: illegal\n"); + return KERN_SUCCESS; +} + +kern_return_t device_open_reply(MACH_PORT_FACE, kern_return_t, MACH_PORT_FACE); +kern_return_t +device_open_reply( + MACH_PORT_FACE reply_port, + kern_return_t return_code, + MACH_PORT_FACE device_port) +{ + panic("device_open_reply: illegal\n"); + return KERN_SUCCESS; +} + +kern_return_t ps_read_device(paging_segment_t, vm_offset_t, vm_offset_t *, unsigned int, unsigned int *, int); /* forward */ + +kern_return_t +ps_read_device( + paging_segment_t ps, + vm_offset_t offset, + vm_offset_t *bufferp, + unsigned int size, + unsigned int *residualp, + int flags) +{ + kern_return_t kr; + recnum_t dev_offset; + unsigned int bytes_wanted; + unsigned int bytes_read; + unsigned int total_read; + vm_offset_t dev_buffer; + vm_offset_t buf_ptr; + unsigned int records_read; + static char here[] = "ps_read_device"; + struct vs_async *vsa; + mutex_t vs_waiting_read_reply; + + device_t device; + vm_map_copy_t device_data = NULL; + default_pager_thread_t *dpt = NULL; + + device = dev_port_lookup(ps->ps_device); + clustered_reads[atop(size)]++; + + dev_offset = (ps->ps_offset + + (offset >> (vm_page_shift - ps->ps_record_shift))); + bytes_wanted = size; + total_read = 0; + *bufferp = (vm_offset_t)NULL; + + do { + vsa = VS_ALLOC_ASYNC(); + if (vsa) { + vsa->vsa_vs = NULL; + vsa->vsa_addr = 0; + vsa->vsa_offset = 0; + vsa->vsa_size = 0; + vsa->vsa_ps = NULL; + } + mutex_init(&vsa->vsa_lock, ETAP_DPAGE_VSSEQNO); + ip_lock(vsa->reply_port); + vsa->reply_port->ip_sorights++; + ip_reference(vsa->reply_port); + ip_unlock(vsa->reply_port); + kr = ds_device_read_common(device, + vsa->reply_port, + (mach_msg_type_name_t) + MACH_MSG_TYPE_MOVE_SEND_ONCE, + (dev_mode_t) 0, + dev_offset, + bytes_wanted, + (IO_READ | IO_CALL), + (io_buf_ptr_t *) &dev_buffer, + (mach_msg_type_number_t *) &bytes_read); + if(kr == MIG_NO_REPLY) { + assert_wait(&vsa->vsa_lock, THREAD_UNINT); + thread_block((void(*)(void))0); + + dev_buffer = vsa->vsa_addr; + bytes_read = (unsigned int)vsa->vsa_size; + kr = vsa->vsa_error; + } + VS_FREE_ASYNC(vsa); + if (kr != KERN_SUCCESS || bytes_read == 0) { + break; + } + total_read += bytes_read; + + /* + * If we got the entire range, use the returned dev_buffer. + */ + if (bytes_read == size) { + *bufferp = (vm_offset_t)dev_buffer; + break; + } + +#if 1 + dprintf(("read only %d bytes out of %d\n", + bytes_read, bytes_wanted)); +#endif + if(dpt == NULL) { + dpt = get_read_buffer(); + buf_ptr = dpt->dpt_buffer; + *bufferp = (vm_offset_t)buf_ptr; + } + /* + * Otherwise, copy the data into the provided buffer (*bufferp) + * and append the rest of the range as it comes in. + */ + memcpy((void *) buf_ptr, (void *) dev_buffer, bytes_read); + buf_ptr += bytes_read; + bytes_wanted -= bytes_read; + records_read = (bytes_read >> + (vm_page_shift - ps->ps_record_shift)); + dev_offset += records_read; + DEBUG(DEBUG_VS_INTERNAL, + ("calling vm_deallocate(addr=0x%X,size=0x%X)\n", + dev_buffer, bytes_read)); + if (vm_deallocate(kernel_map, dev_buffer, bytes_read) + != KERN_SUCCESS) + Panic("dealloc buf"); + } while (bytes_wanted); + + *residualp = size - total_read; + if((dev_buffer != *bufferp) && (total_read != 0)) { + vm_offset_t temp_buffer; + vm_allocate(kernel_map, &temp_buffer, total_read, TRUE); + memcpy((void *) temp_buffer, (void *) *bufferp, total_read); + if(vm_map_copyin_page_list(kernel_map, temp_buffer, total_read, + VM_MAP_COPYIN_OPT_SRC_DESTROY | + VM_MAP_COPYIN_OPT_STEAL_PAGES | + VM_MAP_COPYIN_OPT_PMAP_ENTER, + (vm_map_copy_t *)&device_data, FALSE)) + panic("ps_read_device: cannot copyin locally provided buffer\n"); + } + else if((kr == KERN_SUCCESS) && (total_read != 0) && (dev_buffer != 0)){ + if(vm_map_copyin_page_list(kernel_map, dev_buffer, bytes_read, + VM_MAP_COPYIN_OPT_SRC_DESTROY | + VM_MAP_COPYIN_OPT_STEAL_PAGES | + VM_MAP_COPYIN_OPT_PMAP_ENTER, + (vm_map_copy_t *)&device_data, FALSE)) + panic("ps_read_device: cannot copyin backing store provided buffer\n"); + } + else { + device_data = NULL; + } + *bufferp = (vm_offset_t)device_data; + + if(dpt != NULL) { + /* Free the receive buffer */ + dpt->checked_out = 0; + thread_wakeup(&dpt_array); + } + return KERN_SUCCESS; +} + +kern_return_t ps_write_device(paging_segment_t, vm_offset_t, vm_offset_t, unsigned int, struct vs_async *); /* forward */ + +kern_return_t +ps_write_device( + paging_segment_t ps, + vm_offset_t offset, + vm_offset_t addr, + unsigned int size, + struct vs_async *vsa) +{ + recnum_t dev_offset; + io_buf_len_t bytes_to_write, bytes_written; + recnum_t records_written; + kern_return_t kr; + MACH_PORT_FACE reply_port; + static char here[] = "ps_write_device"; + + + + clustered_writes[atop(size)]++; + + dev_offset = (ps->ps_offset + + (offset >> (vm_page_shift - ps->ps_record_shift))); + bytes_to_write = size; + + if (vsa) { + /* + * Asynchronous write. + */ + reply_port = vsa->reply_port; + ip_lock(reply_port); + reply_port->ip_sorights++; + ip_reference(reply_port); + ip_unlock(reply_port); + { + device_t device; + device = dev_port_lookup(ps->ps_device); + + vsa->vsa_addr = addr; + kr=ds_device_write_common(device, + reply_port, + (mach_msg_type_name_t) MACH_MSG_TYPE_MOVE_SEND_ONCE, + (dev_mode_t) 0, + dev_offset, + (io_buf_ptr_t) addr, + size, + (IO_WRITE | IO_CALL), + &bytes_written); + } + if ((kr != KERN_SUCCESS) && (kr != MIG_NO_REPLY)) { + if (verbose) + dprintf(("%s0x%x, addr=0x%x," + "size=0x%x,offset=0x%x\n", + "device_write_request returned ", + kr, addr, size, offset)); + BS_STAT(ps->ps_bs, + ps->ps_bs->bs_pages_out_fail += atop(size)); + /* do the completion notification to free resources */ + device_write_reply(reply_port, kr, 0); + return PAGER_ERROR; + } + } else do { + /* + * Synchronous write. + */ + { + device_t device; + device = dev_port_lookup(ps->ps_device); + kr=ds_device_write_common(device, + IP_NULL, 0, + (dev_mode_t) 0, + dev_offset, + (io_buf_ptr_t) addr, + size, + (IO_WRITE | IO_SYNC | IO_KERNEL_BUF), + &bytes_written); + } + if (kr != KERN_SUCCESS) { + dprintf(("%s0x%x, addr=0x%x,size=0x%x,offset=0x%x\n", + "device_write returned ", + kr, addr, size, offset)); + BS_STAT(ps->ps_bs, + ps->ps_bs->bs_pages_out_fail += atop(size)); + return PAGER_ERROR; + } + if (bytes_written & ((vm_page_size >> ps->ps_record_shift) - 1)) + Panic("fragmented write"); + records_written = (bytes_written >> + (vm_page_shift - ps->ps_record_shift)); + dev_offset += records_written; +#if 1 + if (bytes_written != bytes_to_write) { + dprintf(("wrote only %d bytes out of %d\n", + bytes_written, bytes_to_write)); + } +#endif + bytes_to_write -= bytes_written; + addr += bytes_written; + } while (bytes_to_write > 0); + + return PAGER_SUCCESS; +} + + +#else /* !DEVICE_PAGING */ + +kern_return_t +ps_read_device( + paging_segment_t ps, + vm_offset_t offset, + vm_offset_t *bufferp, + unsigned int size, + unsigned int *residualp, + int flags) +{ + panic("ps_read_device not supported"); +} + +ps_write_device( + paging_segment_t ps, + vm_offset_t offset, + vm_offset_t addr, + unsigned int size, + struct vs_async *vsa) +{ + panic("ps_write_device not supported"); +} + +#endif /* DEVICE_PAGING */ +void pvs_object_data_provided(vstruct_t, upl_t, vm_offset_t, vm_size_t); /* forward */ + +void +pvs_object_data_provided( + vstruct_t vs, + upl_t upl, + vm_offset_t offset, + vm_size_t size) +{ + static char here[] = "pvs_object_data_provided"; + + DEBUG(DEBUG_VS_INTERNAL, + ("buffer=0x%x,offset=0x%x,size=0x%x\n", + upl, offset, size)); + + ASSERT(size > 0); + GSTAT(global_stats.gs_pages_in += atop(size)); + + +#if USE_PRECIOUS + ps_clunmap(vs, offset, size); +#endif /* USE_PRECIOUS */ + +} + +kern_return_t +pvs_cluster_read( + vstruct_t vs, + vm_offset_t offset, + vm_size_t cnt) +{ + vm_offset_t actual_offset; + vm_offset_t buffer; + paging_segment_t ps; + struct clmap clmap; + upl_t upl; + kern_return_t error = KERN_SUCCESS; + int size, size_wanted, i; + unsigned int residual; + unsigned int request_flags; + int unavail_size; + default_pager_thread_t *dpt; + boolean_t dealloc; + static char here[] = "pvs_cluster_read"; + + /* + * This loop will be executed once per cluster referenced. + * Typically this means once, since it's unlikely that the + * VM system will ask for anything spanning cluster boundaries. + * + * If there are holes in a cluster (in a paging segment), we stop + * reading at the hole, inform the VM of any data read, inform + * the VM of an unavailable range, then loop again, hoping to + * find valid pages later in the cluster. This continues until + * the entire range has been examined, and read, if present. + */ + +#if USE_PRECIOUS + request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_PRECIOUS; +#else + request_flags = UPL_NO_SYNC | UPL_CLEAN_IN_PLACE ; +#endif + while (cnt && (error == KERN_SUCCESS)) { + actual_offset = ps_clmap(vs, offset, &clmap, CL_FIND, 0, 0); + + if (actual_offset == (vm_offset_t) -1) { + + /* + * Either a failure due to an error on a previous + * write or a zero fill on demand page. In either case, + * optimize to do one reply for all pages up to next + * cluster boundary. + */ + unsigned int local_size, clmask, clsize; + + clmask = (vm_page_size << vs->vs_clshift) - 1; + clsize = vm_page_size << vs->vs_clshift; + clmask = clsize - 1; + local_size = clsize - (offset & clmask); + ASSERT(local_size); + local_size = MIN(local_size, cnt); + + upl_system_list_request((vm_object_t) + vs->vs_control_port->ip_kobject, + offset, local_size, local_size, + &upl, NULL, 0, request_flags); + if (clmap.cl_error) { + uc_upl_abort(upl, UPL_ABORT_ERROR); + } else { + uc_upl_abort(upl, UPL_ABORT_UNAVAILABLE); + } + + cnt -= local_size; + offset += local_size; + continue; + } + + /* + * Count up contiguous available or unavailable + * pages. + */ + ps = CLMAP_PS(clmap); + ASSERT(ps); + size = 0; + unavail_size = 0; + + for (i = 0; + (size < cnt) && (unavail_size < cnt) && + (i < CLMAP_NPGS(clmap)); i++) { + if (CLMAP_ISSET(clmap, i)) { + if (unavail_size != 0) + break; + size += vm_page_size; + BS_STAT(ps->ps_bs, + ps->ps_bs->bs_pages_in++); + } else { + if (size != 0) + break; + unavail_size += vm_page_size; + } + } + /* + * Let VM system know about holes in clusters. + */ + if (size == 0) { + ASSERT(unavail_size); + GSTAT(global_stats.gs_pages_unavail += + atop(unavail_size)); + upl_system_list_request((vm_object_t) + vs->vs_control_port->ip_kobject, + offset, unavail_size, + unavail_size, &upl, NULL, 0, + request_flags); + uc_upl_abort(upl, UPL_ABORT_UNAVAILABLE); + cnt -= unavail_size; + offset += unavail_size; + continue; + } + + upl_system_list_request((vm_object_t) + vs->vs_control_port->ip_kobject, + offset, size, size, &upl, + NULL, 0, request_flags | UPL_SET_INTERNAL); + if(ps->ps_segtype == PS_PARTITION) { +/* + error = ps_read_device(ps, actual_offset, upl, + size, &residual, 0); +*/ + } else { + error = ps_read_file(ps, upl, actual_offset, + size, &residual, 0); + } + + /* + * Adjust counts and send response to VM. Optimize for the + * common case, i.e. no error and/or partial data. + * If there was an error, then we need to error the entire + * range, even if some data was successfully read. + * If there was a partial read we may supply some + * data and may error some as well. In all cases the + * VM must receive some notification for every page in the + * range. + */ + if ((error == KERN_SUCCESS) && (residual == 0)) { + /* + * Got everything we asked for, supply the data to + * the VM. Note that as a side effect of supplying + * the data, the buffer holding the supplied data is + * deallocated from the pager's address space. + */ + pvs_object_data_provided(vs, upl, offset, size); + } else { + size_wanted = size; + if (error == KERN_SUCCESS) { + if (residual == size) { + /* + * If a read operation returns no error + * and no data moved, we turn it into + * an error, assuming we're reading at + * or beyong EOF. + * Fall through and error the entire + * range. + */ + error = KERN_FAILURE; + } else { + /* + * Otherwise, we have partial read. If + * the part read is a integral number + * of pages supply it. Otherwise round + * it up to a page boundary, zero fill + * the unread part, and supply it. + * Fall through and error the remainder + * of the range, if any. + */ + int fill, lsize; + + fill = residual & ~vm_page_size; + lsize = (size - residual) + fill; + pvs_object_data_provided(vs, upl, + offset, lsize); + cnt -= lsize; + offset += lsize; + if (size -= lsize) { + error = KERN_FAILURE; + } + } + } + + /* + * If there was an error in any part of the range, tell + * the VM. Deallocate the remainder of the buffer. + * Note that error is explicitly checked again since + * it can be modified above. + */ + if (error != KERN_SUCCESS) { + BS_STAT(ps->ps_bs, + ps->ps_bs->bs_pages_in_fail += + atop(size)); + } + } + cnt -= size; + offset += size; + + } /* END while (cnt && (error == 0)) */ + return error; +} + +int vs_do_async_write = 1; + +kern_return_t +vs_cluster_write( + vstruct_t vs, + upl_t internal_upl, + vm_offset_t offset, + vm_size_t cnt, + boolean_t dp_internal, + int flags) +{ + vm_offset_t actual_offset; /* Offset within paging segment */ + vm_offset_t size; + vm_offset_t transfer_size; + vm_offset_t subx_size; + int error = 0; + struct clmap clmap; + paging_segment_t ps; + struct vs_async *vsa; + vm_map_copy_t copy; + static char here[] = "vs_cluster_write"; + + upl_t upl; + upl_page_info_t *page_list; + upl_page_info_t pl[20]; + vm_offset_t mobj_base_addr; + vm_offset_t mobj_target_addr; + int mobj_size; + int page_index; + int list_size; + int cl_size; + + + ps = PAGING_SEGMENT_NULL; + + if (!dp_internal) { + int request_flags; + int super_size; + vm_offset_t upl_offset; + + cl_size = (1 << vs->vs_clshift) * vm_page_size; + + if (bs_low) { + super_size = cl_size; + request_flags = UPL_NOBLOCK | + UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | + UPL_NO_SYNC | UPL_SET_INTERNAL; + } else { + super_size = VM_SUPER_CLUSTER; + request_flags = UPL_NOBLOCK | UPL_CLEAN_IN_PLACE | + UPL_RET_ONLY_DIRTY | UPL_COPYOUT_FROM | + UPL_NO_SYNC | UPL_SET_INTERNAL; + } + + + upl_system_list_request((vm_object_t) + vs->vs_control_port->ip_kobject, + offset, cnt, super_size, + &upl, NULL, + 0, request_flags); + + mobj_base_addr = upl->offset; + list_size = upl->size; + + page_list = UPL_GET_INTERNAL_PAGE_LIST(upl); + memcpy(pl, page_list, + sizeof(upl_page_info_t) * (list_size/page_size)); + + /* Now parcel up the 64k transfer, do at most cluster size */ + /* at a time. */ + upl_offset = 0; + page_index = 0; + mobj_target_addr = mobj_base_addr; + + for (transfer_size = list_size; transfer_size != 0;) { + actual_offset = ps_clmap(vs, mobj_target_addr, + &clmap, CL_ALLOC, + transfer_size < cl_size ? + transfer_size : cl_size, 0); + + if (actual_offset == (vm_offset_t) -1) { + for(;transfer_size != 0;) { + if(UPL_PAGE_PRESENT(pl, page_index)) { + uc_upl_abort_range(upl, + upl_offset, + transfer_size, + UPL_ABORT_FREE_ON_EMPTY); + break; + } + transfer_size-=page_size; + upl_offset += vm_page_size; + page_index++; + } + error = 1; + break; + } + cnt = MIN(transfer_size, + CLMAP_NPGS(clmap) * vm_page_size); + ps = CLMAP_PS(clmap); + + while (cnt > 0) { + /* attempt to send entire cluster */ + subx_size = 0; + + while (cnt > 0) { + /* do the biggest contiguous transfer of dirty */ + /* pages */ + if (UPL_DIRTY_PAGE(pl, page_index) || + UPL_PRECIOUS_PAGE(pl, page_index)){ + page_index++; + subx_size += vm_page_size; + cnt -= vm_page_size; + } else { + if (subx_size == 0) { + actual_offset += vm_page_size; + mobj_target_addr += vm_page_size; + + if(UPL_PAGE_PRESENT(pl, page_index)) { + uc_upl_commit_range(upl, + upl_offset, + vm_page_size, + TRUE, pl); + } + + upl_offset += vm_page_size; + transfer_size -= vm_page_size; + page_index++; + cnt -= vm_page_size; + } else { + break; + } + } + } + if (subx_size) { + + error = ps_write_file(ps, upl, upl_offset, + actual_offset, subx_size, flags); + if (error) { + actual_offset += subx_size; + mobj_target_addr += subx_size; + upl_offset += subx_size; + transfer_size -= subx_size; + + for(;transfer_size != 0;) { + if(UPL_PAGE_PRESENT(pl, page_index)) { + uc_upl_abort_range(upl, + upl_offset, + transfer_size, + UPL_ABORT_FREE_ON_EMPTY); + break; + } + transfer_size-=page_size; + upl_offset += vm_page_size; + page_index++; + } + break; + } + + ps_vs_write_complete(vs, mobj_target_addr, + subx_size, error); + } + actual_offset += subx_size; + mobj_target_addr += subx_size; + upl_offset += subx_size; + + transfer_size -= subx_size; + subx_size = 0; + } + if (error) + break; + } + } else { + assert(cnt <= (vm_page_size << vs->vs_clshift)); + list_size = cnt; + + page_index = 0; + /* The caller provides a mapped_data which is derived */ + /* from a temporary object. The targeted pages are */ + /* guaranteed to be set at offset 0 in the mapped_data */ + /* The actual offset however must still be derived */ + /* from the offset in the vs in question */ + mobj_base_addr = offset; + mobj_target_addr = mobj_base_addr; + + for (transfer_size = list_size; transfer_size != 0;) { + actual_offset = ps_clmap(vs, mobj_target_addr, + &clmap, CL_ALLOC, + transfer_size < cl_size ? + transfer_size : cl_size, 0); + if(actual_offset == (vm_offset_t) -1) { + error = 1; + break; + } + cnt = MIN(transfer_size, + CLMAP_NPGS(clmap) * vm_page_size); + ps = CLMAP_PS(clmap); + /* Assume that the caller has given us contiguous */ + /* pages */ + if(cnt) { + error = ps_write_file(ps, internal_upl, + 0, actual_offset, + cnt, flags); + if (error) + break; + ps_vs_write_complete(vs, mobj_target_addr, + cnt, error); + } + if (error) + break; + actual_offset += cnt; + mobj_target_addr += cnt; + transfer_size -= cnt; + cnt = 0; + + if (error) + break; + } + } + if(error) + return KERN_FAILURE; + else + return KERN_SUCCESS; +} + +vm_size_t +ps_vstruct_allocated_size( + vstruct_t vs) +{ + int num_pages; + struct vs_map *vsmap; + int i, j, k; + + num_pages = 0; + if (vs->vs_indirect) { + /* loop on indirect maps */ + for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) { + vsmap = vs->vs_imap[i]; + if (vsmap == NULL) + continue; + /* loop on clusters in this indirect map */ + for (j = 0; j < CLMAP_ENTRIES; j++) { + if (VSM_ISCLR(vsmap[j]) || + VSM_ISERR(vsmap[j])) + continue; + /* loop on pages in this cluster */ + for (k = 0; k < VSCLSIZE(vs); k++) { + if ((VSM_BMAP(vsmap[j])) & (1 << k)) + num_pages++; + } + } + } + } else { + vsmap = vs->vs_dmap; + if (vsmap == NULL) + return 0; + /* loop on clusters in the direct map */ + for (j = 0; j < CLMAP_ENTRIES; j++) { + if (VSM_ISCLR(vsmap[j]) || + VSM_ISERR(vsmap[j])) + continue; + /* loop on pages in this cluster */ + for (k = 0; k < VSCLSIZE(vs); k++) { + if ((VSM_BMAP(vsmap[j])) & (1 << k)) + num_pages++; + } + } + } + + return ptoa(num_pages); +} + +size_t +ps_vstruct_allocated_pages( + vstruct_t vs, + default_pager_page_t *pages, + size_t pages_size) +{ + int num_pages; + struct vs_map *vsmap; + vm_offset_t offset; + int i, j, k; + + num_pages = 0; + offset = 0; + if (vs->vs_indirect) { + /* loop on indirect maps */ + for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) { + vsmap = vs->vs_imap[i]; + if (vsmap == NULL) { + offset += (vm_page_size * CLMAP_ENTRIES * + VSCLSIZE(vs)); + continue; + } + /* loop on clusters in this indirect map */ + for (j = 0; j < CLMAP_ENTRIES; j++) { + if (VSM_ISCLR(vsmap[j]) || + VSM_ISERR(vsmap[j])) { + offset += vm_page_size * VSCLSIZE(vs); + continue; + } + /* loop on pages in this cluster */ + for (k = 0; k < VSCLSIZE(vs); k++) { + if ((VSM_BMAP(vsmap[j])) & (1 << k)) { + num_pages++; + if (num_pages < pages_size) + pages++->dpp_offset = + offset; + } + offset += vm_page_size; + } + } + } + } else { + vsmap = vs->vs_dmap; + if (vsmap == NULL) + return 0; + /* loop on clusters in the direct map */ + for (j = 0; j < CLMAP_ENTRIES; j++) { + if (VSM_ISCLR(vsmap[j]) || + VSM_ISERR(vsmap[j])) { + offset += vm_page_size * VSCLSIZE(vs); + continue; + } + /* loop on pages in this cluster */ + for (k = 0; k < VSCLSIZE(vs); k++) { + if ((VSM_BMAP(vsmap[j])) & (1 << k)) { + num_pages++; + if (num_pages < pages_size) + pages++->dpp_offset = offset; + } + offset += vm_page_size; + } + } + } + + return num_pages; +} + + +kern_return_t +ps_vstruct_transfer_from_segment( + vstruct_t vs, + paging_segment_t segment, +#ifndef ubc_sync_working + vm_object_t transfer_object) +#else + upl_t upl) +#endif +{ + struct vs_map *vsmap; + struct vs_map old_vsmap; + struct vs_map new_vsmap; + int i, j, k; + + VS_LOCK(vs); /* block all work on this vstruct */ + /* can't allow the normal multiple write */ + /* semantic because writes may conflict */ + vs->vs_xfer_pending = TRUE; + vs_wait_for_sync_writers(vs); + vs_start_write(vs); + vs_wait_for_readers(vs); + /* we will unlock the vs to allow other writes while transferring */ + /* and will be guaranteed of the persistance of the vs struct */ + /* because the caller of ps_vstruct_transfer_from_segment bumped */ + /* vs_async_pending */ + /* OK we now have guaranteed no other parties are accessing this */ + /* vs. Now that we are also supporting simple lock versions of */ + /* vs_lock we cannot hold onto VS_LOCK as we may block below. */ + /* our purpose in holding it before was the multiple write case */ + /* we now use the boolean xfer_pending to do that. We can use */ + /* a boolean instead of a count because we have guaranteed single */ + /* file access to this code in its caller */ + VS_UNLOCK(vs); +vs_changed: + if (vs->vs_indirect) { + int vsmap_size; + int clmap_off; + /* loop on indirect maps */ + for (i = 0; i < INDIRECT_CLMAP_ENTRIES(vs->vs_size); i++) { + vsmap = vs->vs_imap[i]; + if (vsmap == NULL) + continue; + /* loop on clusters in this indirect map */ + clmap_off = (vm_page_size * CLMAP_ENTRIES * + VSCLSIZE(vs) * i); + if(i+1 == INDIRECT_CLMAP_ENTRIES(vs->vs_size)) + vsmap_size = vs->vs_size - (CLMAP_ENTRIES * i); + else + vsmap_size = CLMAP_ENTRIES; + for (j = 0; j < vsmap_size; j++) { + if (VSM_ISCLR(vsmap[j]) || + VSM_ISERR(vsmap[j]) || + (VSM_PS(vsmap[j]) != segment)) + continue; + if(vs_cluster_transfer(vs, + (vm_page_size * (j << vs->vs_clshift)) + + clmap_off, + vm_page_size << vs->vs_clshift, +#ifndef ubc_sync_working + transfer_object) +#else + upl) +#endif + != KERN_SUCCESS) { + VS_LOCK(vs); + vs->vs_xfer_pending = FALSE; + VS_UNLOCK(vs); + vs_finish_write(vs); + return KERN_FAILURE; + } + /* allow other readers/writers during transfer*/ + VS_LOCK(vs); + vs->vs_xfer_pending = FALSE; + VS_UNLOCK(vs); + vs_finish_write(vs); + VS_LOCK(vs); + vs->vs_xfer_pending = TRUE; + VS_UNLOCK(vs); + vs_wait_for_sync_writers(vs); + vs_start_write(vs); + vs_wait_for_readers(vs); + if (!(vs->vs_indirect)) { + goto vs_changed; + } + } + } + } else { + vsmap = vs->vs_dmap; + if (vsmap == NULL) { + VS_LOCK(vs); + vs->vs_xfer_pending = FALSE; + VS_UNLOCK(vs); + vs_finish_write(vs); + return KERN_SUCCESS; + } + /* loop on clusters in the direct map */ + for (j = 0; j < vs->vs_size; j++) { + if (VSM_ISCLR(vsmap[j]) || + VSM_ISERR(vsmap[j]) || + (VSM_PS(vsmap[j]) != segment)) + continue; + if(vs_cluster_transfer(vs, + vm_page_size * (j << vs->vs_clshift), + vm_page_size << vs->vs_clshift, +#ifndef ubc_sync_working + transfer_object) != KERN_SUCCESS) { +#else + upl) != KERN_SUCCESS) { +#endif + VS_LOCK(vs); + vs->vs_xfer_pending = FALSE; + VS_UNLOCK(vs); + vs_finish_write(vs); + return KERN_FAILURE; + } + /* allow other readers/writers during transfer*/ + VS_LOCK(vs); + vs->vs_xfer_pending = FALSE; + VS_UNLOCK(vs); + vs_finish_write(vs); + VS_LOCK(vs); + vs->vs_xfer_pending = TRUE; + VS_UNLOCK(vs); + vs_wait_for_sync_writers(vs); + vs_start_write(vs); + vs_wait_for_readers(vs); + if (vs->vs_indirect) { + goto vs_changed; + } + } + } + + VS_LOCK(vs); + vs->vs_xfer_pending = FALSE; + VS_UNLOCK(vs); + vs_finish_write(vs); + return KERN_SUCCESS; +} + + + +vs_map_t +vs_get_map_entry( + vstruct_t vs, + vm_offset_t offset) +{ + struct vs_map *vsmap; + vm_offset_t cluster; + + cluster = atop(offset) >> vs->vs_clshift; + if (vs->vs_indirect) { + long ind_block = cluster/CLMAP_ENTRIES; + + /* Is the indirect block allocated? */ + vsmap = vs->vs_imap[ind_block]; + if(vsmap == (vs_map_t) NULL) + return vsmap; + } else + vsmap = vs->vs_dmap; + vsmap += cluster%CLMAP_ENTRIES; + return vsmap; +} + +kern_return_t +vs_cluster_transfer( + vstruct_t vs, + vm_offset_t offset, + vm_size_t cnt, +#ifndef ubc_sync_working + vm_object_t transfer_object) +#else + upl_t upl) +#endif +{ + vm_offset_t actual_offset; + paging_segment_t ps; + struct clmap clmap; + kern_return_t error = KERN_SUCCESS; + int size, size_wanted, i; + unsigned int residual; + int unavail_size; + default_pager_thread_t *dpt; + boolean_t dealloc; + struct vs_map *vsmap_ptr; + struct vs_map read_vsmap; + struct vs_map original_read_vsmap; + struct vs_map write_vsmap; + upl_t sync_upl; +#ifndef ubc_sync_working + upl_t upl; +#endif + + vm_offset_t ioaddr; + + static char here[] = "vs_cluster_transfer"; + + /* vs_cluster_transfer reads in the pages of a cluster and + * then writes these pages back to new backing store. The + * segment the pages are being read from is assumed to have + * been taken off-line and is no longer considered for new + * space requests. + */ + + /* + * This loop will be executed once per cluster referenced. + * Typically this means once, since it's unlikely that the + * VM system will ask for anything spanning cluster boundaries. + * + * If there are holes in a cluster (in a paging segment), we stop + * reading at the hole, then loop again, hoping to + * find valid pages later in the cluster. This continues until + * the entire range has been examined, and read, if present. The + * pages are written as they are read. If a failure occurs after + * some pages are written the unmap call at the bottom of the loop + * recovers the backing store and the old backing store remains + * in effect. + */ + + /* uc_upl_map(kernel_map, upl, &ioaddr); */ + + VSM_CLR(write_vsmap); + VSM_CLR(original_read_vsmap); + /* grab the actual object's pages to sync with I/O */ + while (cnt && (error == KERN_SUCCESS)) { + vsmap_ptr = vs_get_map_entry(vs, offset); + actual_offset = ps_clmap(vs, offset, &clmap, CL_FIND, 0, 0); + + if (actual_offset == (vm_offset_t) -1) { + + /* + * Nothing left to write in this cluster at least + * set write cluster information for any previous + * write, clear for next cluster, if there is one + */ + unsigned int local_size, clmask, clsize; + + clsize = vm_page_size << vs->vs_clshift; + clmask = clsize - 1; + local_size = clsize - (offset & clmask); + ASSERT(local_size); + local_size = MIN(local_size, cnt); + + /* This cluster has no data in it beyond what may */ + /* have been found on a previous iteration through */ + /* the loop "write_vsmap" */ + *vsmap_ptr = write_vsmap; + VSM_CLR(write_vsmap); + VSM_CLR(original_read_vsmap); + + cnt -= local_size; + offset += local_size; + continue; + } + + /* + * Count up contiguous available or unavailable + * pages. + */ + ps = CLMAP_PS(clmap); + ASSERT(ps); + size = 0; + unavail_size = 0; + for (i = 0; + (size < cnt) && (unavail_size < cnt) && + (i < CLMAP_NPGS(clmap)); i++) { + if (CLMAP_ISSET(clmap, i)) { + if (unavail_size != 0) + break; + size += vm_page_size; + BS_STAT(ps->ps_bs, + ps->ps_bs->bs_pages_in++); + } else { + if (size != 0) + break; + unavail_size += vm_page_size; + } + } + + if (size == 0) { + ASSERT(unavail_size); + cnt -= unavail_size; + offset += unavail_size; + if((offset & ((vm_page_size << vs->vs_clshift) - 1)) + == 0) { + /* There is no more to transfer in this + cluster + */ + *vsmap_ptr = write_vsmap; + VSM_CLR(write_vsmap); + VSM_CLR(original_read_vsmap); + } + continue; + } + + if(VSM_ISCLR(original_read_vsmap)) + original_read_vsmap = *vsmap_ptr; + + if(ps->ps_segtype == PS_PARTITION) { +/* + NEED TO BE WITH SYNC & NO COMMIT + error = ps_read_device(ps, actual_offset, &buffer, + size, &residual, flags); +*/ + } else { +#ifndef ubc_sync_working + error = vm_fault_list_request(transfer_object, +(vm_object_offset_t) (actual_offset & ((vm_page_size << vs->vs_clshift) - 1)), + size, &upl, NULL, + 0, UPL_NO_SYNC | UPL_CLEAN_IN_PLACE + | UPL_SET_INTERNAL); + if (error == KERN_SUCCESS) { + error = ps_read_file(ps, upl, actual_offset, + size, &residual, 0); + if(error) + uc_upl_commit(upl, NULL); + } + +#else + /* NEED TO BE WITH SYNC & NO COMMIT & NO RDAHEAD*/ + error = ps_read_file(ps, upl, actual_offset, + size, &residual, + (UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD)); +#endif + } + + read_vsmap = *vsmap_ptr; + + + /* + * Adjust counts and put data in new BS. Optimize for the + * common case, i.e. no error and/or partial data. + * If there was an error, then we need to error the entire + * range, even if some data was successfully read. + * + */ + if ((error == KERN_SUCCESS) && (residual == 0)) { + /* + * Got everything we asked for, supply the data to + * the new BS. Note that as a side effect of supplying + * the data, the buffer holding the supplied data is + * deallocated from the pager's address space unless + * the write is unsuccessful. + */ + + /* note buffer will be cleaned up in all cases by */ + /* internal_cluster_write or if an error on write */ + /* the vm_map_copy_page_discard call */ + *vsmap_ptr = write_vsmap; + +#ifndef ubc_sync_working + error = vm_fault_list_request(transfer_object, +(vm_object_offset_t) (actual_offset & ((vm_page_size << vs->vs_clshift) - 1)), + size, &upl, NULL, + 0, UPL_NO_SYNC | UPL_CLEAN_IN_PLACE + | UPL_SET_INTERNAL); + if(vs_cluster_write(vs, upl, offset, + size, TRUE, 0) != KERN_SUCCESS) { + uc_upl_commit(upl, NULL); +#else + if(vs_cluster_write(vs, upl, offset, + size, TRUE, UPL_IOSYNC | UPL_NOCOMMIT ) != KERN_SUCCESS) { +#endif + error = KERN_FAILURE; + if(!(VSM_ISCLR(*vsmap_ptr))) { + /* unmap the new backing store object */ + ps_clunmap(vs, offset, size); + } + /* original vsmap */ + *vsmap_ptr = original_read_vsmap; + VSM_CLR(write_vsmap); + } else { + if((offset + size) & + ((vm_page_size << vs->vs_clshift) + - 1)) { + /* There is more to transfer in this + cluster + */ + write_vsmap = *vsmap_ptr; + *vsmap_ptr = read_vsmap; + } else { + /* discard the old backing object */ + write_vsmap = *vsmap_ptr; + *vsmap_ptr = read_vsmap; + ps_clunmap(vs, offset, size); + *vsmap_ptr = write_vsmap; + VSM_CLR(write_vsmap); + VSM_CLR(original_read_vsmap); + } + } + } else { + size_wanted = size; + if (error == KERN_SUCCESS) { + if (residual == size) { + /* + * If a read operation returns no error + * and no data moved, we turn it into + * an error, assuming we're reading at + * or beyond EOF. + * Fall through and error the entire + * range. + */ + error = KERN_FAILURE; + *vsmap_ptr = write_vsmap; + if(!(VSM_ISCLR(*vsmap_ptr))) { + /* unmap the new backing store object */ + ps_clunmap(vs, offset, size); + } + *vsmap_ptr = original_read_vsmap; + VSM_CLR(write_vsmap); + continue; + } else { + /* + * Otherwise, we have partial read. + * This is also considered an error + * for the purposes of cluster transfer + */ + error = KERN_FAILURE; + *vsmap_ptr = write_vsmap; + if(!(VSM_ISCLR(*vsmap_ptr))) { + /* unmap the new backing store object */ + ps_clunmap(vs, offset, size); + } + *vsmap_ptr = original_read_vsmap; + VSM_CLR(write_vsmap); + continue; + } + } + + } + cnt -= size; + offset += size; + + } /* END while (cnt && (error == 0)) */ + if(!VSM_ISCLR(write_vsmap)) + *vsmap_ptr = write_vsmap; + + /* uc_upl_un_map(kernel_map, upl); */ + return error; +} + +kern_return_t +default_pager_add_file(MACH_PORT_FACE backing_store, + int *vp, + int record_size, + long size) +{ + backing_store_t bs; + paging_segment_t ps; + int i; + int error; + static char here[] = "default_pager_add_file"; + + if ((bs = backing_store_lookup(backing_store)) + == BACKING_STORE_NULL) + return KERN_INVALID_ARGUMENT; + + PSL_LOCK(); + for (i = 0; i <= paging_segment_max; i++) { + ps = paging_segments[i]; + if (ps == PAGING_SEGMENT_NULL) + continue; + if (ps->ps_segtype != PS_FILE) + continue; + + /* + * Check for overlap on same device. + */ + if (ps->ps_vnode == (struct vnode *)vp) { + PSL_UNLOCK(); + BS_UNLOCK(bs); + return KERN_INVALID_ARGUMENT; + } + } + PSL_UNLOCK(); + + /* + * Set up the paging segment + */ + ps = (paging_segment_t) kalloc(sizeof (struct paging_segment)); + if (ps == PAGING_SEGMENT_NULL) { + BS_UNLOCK(bs); + return KERN_RESOURCE_SHORTAGE; + } + + ps->ps_segtype = PS_FILE; + ps->ps_vnode = (struct vnode *)vp; + ps->ps_offset = 0; + ps->ps_record_shift = local_log2(vm_page_size / record_size); + ps->ps_recnum = size; + ps->ps_pgnum = size >> ps->ps_record_shift; + + ps->ps_pgcount = ps->ps_pgnum; + ps->ps_clshift = local_log2(bs->bs_clsize); + ps->ps_clcount = ps->ps_ncls = ps->ps_pgcount >> ps->ps_clshift; + ps->ps_hint = 0; + + PS_LOCK_INIT(ps); + ps->ps_bmap = (unsigned char *) kalloc(RMAPSIZE(ps->ps_ncls)); + if (!ps->ps_bmap) { + kfree((vm_offset_t)ps, sizeof *ps); + BS_UNLOCK(bs); + return KERN_RESOURCE_SHORTAGE; + } + for (i = 0; i < ps->ps_ncls; i++) { + clrbit(ps->ps_bmap, i); + } + + ps->ps_going_away = FALSE; + ps->ps_bs = bs; + + if ((error = ps_enter(ps)) != 0) { + kfree((vm_offset_t)ps->ps_bmap, RMAPSIZE(ps->ps_ncls)); + kfree((vm_offset_t)ps, sizeof *ps); + BS_UNLOCK(bs); + return KERN_RESOURCE_SHORTAGE; + } + + bs->bs_pages_free += ps->ps_clcount << ps->ps_clshift; + bs->bs_pages_total += ps->ps_clcount << ps->ps_clshift; + PSL_LOCK(); + dp_pages_free += ps->ps_pgcount; + PSL_UNLOCK(); + + BS_UNLOCK(bs); + + bs_more_space(ps->ps_clcount); + + DEBUG(DEBUG_BS_INTERNAL, + ("device=0x%x,offset=0x%x,count=0x%x,record_size=0x%x,shift=%d,total_size=0x%x\n", + device, offset, size, record_size, + ps->ps_record_shift, ps->ps_pgnum)); + + return KERN_SUCCESS; +} + + + +kern_return_t ps_read_file(paging_segment_t, upl_t, vm_offset_t, unsigned int, unsigned int *, int); /* forward */ + +kern_return_t +ps_read_file( + paging_segment_t ps, + upl_t upl, + vm_offset_t offset, + unsigned int size, + unsigned int *residualp, + int flags) +{ + vm_object_offset_t f_offset; + int error = 0; + int result; + static char here[] = "ps_read_file"; + + + clustered_reads[atop(size)]++; + + f_offset = (vm_object_offset_t)(ps->ps_offset + offset); + + /* for transfer case we need to pass uploffset and flags */ + error = vnode_pagein(ps->ps_vnode, + upl, (vm_offset_t)0, f_offset, (vm_size_t)size, flags, NULL); + + /* The vnode_pagein semantic is somewhat at odds with the existing */ + /* device_read semantic. Partial reads are not experienced at this */ + /* level. It is up to the bit map code and cluster read code to */ + /* check that requested data locations are actually backed, and the */ + /* pagein code to either read all of the requested data or return an */ + /* error. */ + + if (error) + result = KERN_FAILURE; + else { + *residualp = 0; + result = KERN_SUCCESS; + } + return result; + +} + +kern_return_t +ps_write_file( + paging_segment_t ps, + upl_t upl, + vm_offset_t upl_offset, + vm_offset_t offset, + unsigned int size, + int flags) +{ + vm_object_offset_t f_offset; + kern_return_t result; + static char here[] = "ps_write_file"; + + int error = 0; + + clustered_writes[atop(size)]++; + f_offset = (vm_object_offset_t)(ps->ps_offset + offset); + + if (vnode_pageout(ps->ps_vnode, + upl, upl_offset, f_offset, (vm_size_t)size, flags, NULL)) + result = KERN_FAILURE; + else + result = KERN_SUCCESS; + + return result; +} + +kern_return_t +default_pager_triggers(MACH_PORT_FACE default_pager, + int hi_wat, + int lo_wat, + int flags, + MACH_PORT_FACE trigger_port) +{ + + if(flags & HI_WAT_ALERT) { + if(min_pages_trigger_port) + ipc_port_release_send(min_pages_trigger_port); + min_pages_trigger_port = trigger_port; + minimum_pages_remaining = hi_wat/vm_page_size; + bs_low = FALSE; + } + if(flags & LO_WAT_ALERT) { + if(max_pages_trigger_port) + ipc_port_release_send(max_pages_trigger_port); + max_pages_trigger_port = trigger_port; + maximum_pages_free = lo_wat/vm_page_size; + } +} diff --git a/osfmk/default_pager/dp_memory_object.c b/osfmk/default_pager/dp_memory_object.c new file mode 100644 index 000000000..6ab4c91f9 --- /dev/null +++ b/osfmk/default_pager/dp_memory_object.c @@ -0,0 +1,1447 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * Default Pager. + * Memory Object Management. + */ + +#include "default_pager_internal.h" +#include +#include /* include for upl_t */ + + +/* + * List of all vstructs. A specific vstruct is + * found directly via its port, this list is + * only used for monitoring purposes by the + * default_pager_object* calls and by ps_delete + * when abstract memory objects must be scanned + * to remove any live storage on a segment which + * is to be removed. + */ +struct vstruct_list_head vstruct_list; + +void vstruct_list_insert(vstruct_t vs); /* forward */ + +void +vstruct_list_insert( + vstruct_t vs) +{ + VSL_LOCK(); + queue_enter(&vstruct_list.vsl_queue, vs, vstruct_t, vs_links); + vstruct_list.vsl_count++; + VSL_UNLOCK(); +} + +void vstruct_list_delete(vstruct_t vs); /* forward */ + +void +vstruct_list_delete( + vstruct_t vs) +{ + queue_remove(&vstruct_list.vsl_queue, vs, vstruct_t, vs_links); + vstruct_list.vsl_count--; +} + +/* + * We use the sequence numbers on requests to regulate + * our parallelism. In general, we allow multiple reads and writes + * to proceed in parallel, with the exception that reads must + * wait for previous writes to finish. (Because the kernel might + * generate a data-request for a page on the heels of a data-write + * for the same page, and we must avoid returning stale data.) + * terminate requests wait for proceeding reads and writes to finish. + */ + +unsigned int default_pager_total = 0; /* debugging */ +unsigned int default_pager_wait_seqno = 0; /* debugging */ +unsigned int default_pager_wait_read = 0; /* debugging */ +unsigned int default_pager_wait_write = 0; /* debugging */ +unsigned int default_pager_wait_refs = 0; /* debugging */ + +void vs_async_wait(vstruct_t); /* forward */ + +void +vs_async_wait( + vstruct_t vs) +{ + static char here[] = "vs_async_wait"; + + ASSERT(vs->vs_async_pending >= 0); + while (vs->vs_async_pending > 0) { + vs->vs_waiting_async = TRUE; + assert_wait(&vs->vs_waiting_async, THREAD_UNINT); + VS_UNLOCK(vs); + thread_block((void (*)(void))0); + VS_LOCK(vs); + } + ASSERT(vs->vs_async_pending == 0); +} + +#if PARALLEL +void vs_lock(vstruct_t, mach_port_seqno_t); +void vs_unlock(vstruct_t); +void vs_start_read(vstruct_t); +void vs_wait_for_readers(vstruct_t); +void vs_finish_read(vstruct_t); +void vs_start_write(vstruct_t); +void vs_wait_for_writers(vstruct_t); +void vs_finish_write(vstruct_t); +void vs_wait_for_refs(vstruct_t); +void vs_finish_refs(vstruct_t); + +/* + * Waits for correct sequence number. Leaves pager locked. + * JMM - Sequence numbers guarantee ordering, but in a preemptible + * kernel, they are generated without locks, and so their + * generation order is undefined (and therefore unreliable). + * Since we ned to fix this anyway, and I needed to get rid + * rid of asymmetry in the interface definitions, I have + * punted this to here. + */ +void +vs_lock( + vstruct_t vs, + mach_port_seqno_t seqno) +{ + default_pager_total++; + VS_LOCK(vs); + + seqno = vs->vs_next_seqno++; + + while (vs->vs_seqno != seqno) { + default_pager_wait_seqno++; + vs->vs_waiting_seqno = TRUE; + assert_wait(&vs->vs_waiting_seqno, THREAD_UNINT); + VS_UNLOCK(vs); + thread_block((void (*)(void))0); + VS_LOCK(vs); + } +} + +/* + * Increments sequence number and unlocks pager. + */ +void +vs_unlock(vstruct_t vs) +{ + boolean_t need_wakeups = vs->vs_waiting_seqno; + + vs->vs_waiting_seqno = FALSE; + vs->vs_seqno++; + VS_UNLOCK(vs); + if (need_wakeups) + thread_wakeup(&vs->vs_waiting_seqno); +} + +/* + * Start a read - one more reader. Pager must be locked. + */ +void +vs_start_read( + vstruct_t vs) +{ + vs->vs_readers++; +} + +/* + * Wait for readers. Unlocks and relocks pager if wait needed. + */ +void +vs_wait_for_readers( + vstruct_t vs) +{ + while (vs->vs_readers != 0) { + default_pager_wait_read++; + vs->vs_waiting_read = TRUE; + assert_wait(&vs->vs_waiting_read, THREAD_UNINT); + VS_UNLOCK(vs); + thread_block((void (*)(void))0); + VS_LOCK(vs); + } +} + +/* + * Finish a read. Pager is unlocked and returns unlocked. + */ +void +vs_finish_read( + vstruct_t vs) +{ + VS_LOCK(vs); + if (--vs->vs_readers == 0) { + boolean_t need_wakeups = vs->vs_waiting_read; + + vs->vs_waiting_read = FALSE; + VS_UNLOCK(vs); + if (need_wakeups) + thread_wakeup(&vs->vs_waiting_read); + } else + VS_UNLOCK(vs); +} + +/* + * Start a write - one more writer. Pager must be locked. + */ +void +vs_start_write( + vstruct_t vs) +{ + vs->vs_writers++; +} + +/* + * Wait for writers. Unlocks and relocks pager if wait needed. + */ +void +vs_wait_for_writers( + vstruct_t vs) +{ + while (vs->vs_writers != 0) { + default_pager_wait_write++; + vs->vs_waiting_write = TRUE; + assert_wait(&vs->vs_waiting_write, THREAD_UNINT); + VS_UNLOCK(vs); + thread_block((void (*)(void))0); + VS_LOCK(vs); + } + vs_async_wait(vs); +} + +/* This is to be used for the transfer from segment code ONLY */ +/* The transfer code holds off vs destruction by keeping the */ +/* vs_async_wait count non-zero. It will not ocnflict with */ +/* other writers on an async basis because it only writes on */ +/* a cluster basis into fresh (as of sync time) cluster locations */ +void +vs_wait_for_sync_writers( + vstruct_t vs) +{ + while (vs->vs_writers != 0) { + default_pager_wait_write++; + vs->vs_waiting_write = TRUE; + assert_wait(&vs->vs_waiting_write, THREAD_UNINT); + VS_UNLOCK(vs); + thread_block((void (*)(void))0); + VS_LOCK(vs); + } +} + + +/* + * Finish a write. Pager is unlocked and returns unlocked. + */ +void +vs_finish_write( + vstruct_t vs) +{ + VS_LOCK(vs); + if (--vs->vs_writers == 0) { + boolean_t need_wakeups = vs->vs_waiting_write; + + vs->vs_waiting_write = FALSE; + VS_UNLOCK(vs); + if (need_wakeups) + thread_wakeup(&vs->vs_waiting_write); + } else + VS_UNLOCK(vs); +} + +/* + * Wait for concurrent default_pager_objects. + * Unlocks and relocks pager if wait needed. + */ +void +vs_wait_for_refs( + vstruct_t vs) +{ + while (vs->vs_name_refs == 0) { + default_pager_wait_refs++; + vs->vs_waiting_refs = TRUE; + assert_wait(&vs->vs_waiting_refs, THREAD_UNINT); + VS_UNLOCK(vs); + thread_block((void (*)(void))0); + VS_LOCK(vs); + } +} + +/* + * Finished creating name refs - wake up waiters. + */ +void +vs_finish_refs( + vstruct_t vs) +{ + boolean_t need_wakeups = vs->vs_waiting_refs; + vs->vs_waiting_refs = FALSE; + if (need_wakeups) + thread_wakeup(&vs->vs_waiting_refs); +} + +#else /* PARALLEL */ + +#define vs_lock(vs,seqno) +#define vs_unlock(vs) +#define vs_start_read(vs) +#define vs_wait_for_readers(vs) +#define vs_finish_read(vs) +#define vs_start_write(vs) +#define vs_wait_for_writers(vs) +#define vs_wait_for_sync_writers(vs) +#define vs_finish_write(vs) +#define vs_wait_for_refs(vs) +#define vs_finish_refs(vs) + +#endif /* PARALLEL */ + +vstruct_t vs_object_create(vm_size_t); /* forward */ + +vstruct_t +vs_object_create( + vm_size_t size) +{ + vstruct_t vs; + static char here[] = "vs_object_create"; + + /* + * Allocate a vstruct. If there are any problems, then report them + * to the console. + */ + vs = ps_vstruct_create(size); + if (vs == VSTRUCT_NULL) { + dprintf(("vs_object_create: unable to allocate %s\n", + "-- either run swapon command or reboot")); + return VSTRUCT_NULL; + } + + return vs; +} + +mach_port_urefs_t default_pager_max_urefs = 10000; + +/* + * Check user reference count on memory object control port. + * Vstruct must be locked. + * Unlocks and re-locks vstruct if needs to call kernel. + */ +void vs_check_request(vstruct_t, MACH_PORT_FACE); /* forward */ + +void +vs_check_request( + vstruct_t vs, + MACH_PORT_FACE control_port) +{ + mach_port_delta_t delta; + kern_return_t kr; + static char here[] = "vs_check_request"; + + if (++vs->vs_control_refs > default_pager_max_urefs) { + delta = 1 - vs->vs_control_refs; + vs->vs_control_refs = 1; + + VS_UNLOCK(vs); + + /* + * Deallocate excess user references. + */ + + { +/* find a better interface for this, what will we use as a component */ + int i; + delta = -delta; + for(i=0; ivs_mem_obj_port; + MACH_PORT_FACE pset; + mach_port_mscount_t sync; + MACH_PORT_FACE previous; + kern_return_t kr; + static char here[] = "default_pager_add"; + + /* + * The port currently has a make-send count of zero, + * because either we just created the port or we just + * received the port in a memory_object_create request. + */ + + if (internal) { + /* possibly generate an immediate no-senders notification */ + sync = 0; + pset = default_pager_internal_set; + } else { + /* delay notification till send right is created */ + sync = 1; + pset = default_pager_external_set; + } + + ipc_port_make_sonce(mem_obj); + ip_lock(mem_obj); /* unlocked in nsrequest below */ + ipc_port_nsrequest(mem_obj, sync, mem_obj, &previous); +} + + +/* + * Routine: dp_memory_object_create + * Purpose: + * Handle requests for memory objects from the + * kernel. + * Notes: + * Because we only give out the default memory + * manager port to the kernel, we don't have to + * be so paranoid about the contents. + */ +kern_return_t +dp_memory_object_create( + MACH_PORT_FACE dmm, + MACH_PORT_FACE *new_mem_obj, + vm_size_t new_size) +{ + mach_port_seqno_t seqno; + vstruct_t vs; + MACH_PORT_FACE pager; + static char here[] = "memory_object_create"; + + assert(dmm == default_pager_default_port); + + vs = vs_object_create(new_size); + if (vs == VSTRUCT_NULL) + return KERN_RESOURCE_SHORTAGE; + + pager = *new_mem_obj = ipc_port_alloc_kernel(); + assert (pager != IP_NULL); + (void) ipc_port_make_send(pager); + + { + struct vstruct_alias *alias_struct; + + alias_struct = (struct vstruct_alias *) + kalloc(sizeof(struct vstruct_alias)); + if(alias_struct != NULL) { + alias_struct->vs = vs; + alias_struct->name = ISVS; + pager->alias = (int) alias_struct; + } + else Panic("Out of kernel memory"); + + /* JMM - Add binding to this pager under components */ + pager_mux_hash_insert(pager, &dp_memory_object_subsystem); + vs->vs_next_seqno = 0; + pager->ip_receiver = ipc_space_kernel; + } + + /* + * Set up associations between this port + * and this default_pager structure + */ + + vs->vs_mem_obj_port = pager; + + /* + * After this, other threads might receive requests + * for this memory object or find it in the port list. + */ + + vstruct_list_insert(vs); + default_pager_add(vs, TRUE); + + return KERN_SUCCESS; +} + +kern_return_t +dp_memory_object_init( + MACH_PORT_FACE mem_obj, + MACH_PORT_FACE control_port, + vm_size_t pager_page_size) +{ + mach_port_seqno_t seqno; + vstruct_t vs; + static char here[] = "memory_object_init"; + + assert(pager_page_size == vm_page_size); + + vs_lookup(mem_obj, vs); + vs_lock(vs, seqno); + + if (vs->vs_control_port != MACH_PORT_NULL) + Panic("bad request"); + + vs->vs_control_port = control_port; + vs->vs_control_refs = 1; + vs->vs_object_name = MACH_PORT_NULL; + vs->vs_name_refs = 1; + + vs_unlock(vs); + + return KERN_SUCCESS; +} + +kern_return_t +dp_memory_object_synchronize( + MACH_PORT_FACE mem_obj, + MACH_PORT_FACE control_port, + vm_object_offset_t offset, + vm_offset_t length, + vm_sync_t flags) +{ + mach_port_seqno_t seqno; + vstruct_t vs; + static char here[] = "memory_object_synchronize"; + + vs_lookup(mem_obj, vs); + vs_lock(vs, seqno); + vs_check_request(vs, control_port); + vs_unlock(vs); + + memory_object_synchronize_completed( + vm_object_lookup(control_port), + offset, length); + + return KERN_SUCCESS; +} + +kern_return_t +dp_memory_object_terminate( + MACH_PORT_FACE mem_obj, + MACH_PORT_FACE control_port) +{ + mach_port_seqno_t seqno; + vstruct_t vs; + mach_port_urefs_t request_refs; + kern_return_t kr; + static char here[] = "memory_object_terminate"; + + /* + * control port is a receive right, not a send right. + */ + + vs_lookup(mem_obj, vs); + vs_lock(vs, seqno); + + /* + * Wait for read and write requests to terminate. + */ + + vs_wait_for_readers(vs); + vs_wait_for_writers(vs); + + /* + * After memory_object_terminate both memory_object_init + * and a no-senders notification are possible, so we need + * to clean up the request and name ports but leave + * the mem_obj port. + * + * A concurrent default_pager_objects might be allocating + * more references for the name port. In this case, + * we must first wait for it to finish. + */ + + vs_wait_for_refs(vs); + + vs->vs_control_port = MACH_PORT_NULL; + + /* a bit of special case ugliness here. Wakeup any waiting reads */ + /* these data requests had to be removed from the seqno traffic */ + /* based on a performance bottleneck with large memory objects */ + /* the problem will right itself with the new component based */ + /* synchronous interface. The new async will be able to return */ + /* failure during its sync phase. In the mean time ... */ + + thread_wakeup(&vs->vs_waiting_write); + thread_wakeup(&vs->vs_waiting_async); + + request_refs = vs->vs_control_refs; + vs->vs_control_refs = 0; + + vs->vs_object_name = MACH_PORT_NULL; + + assert(vs->vs_name_refs != 0); + vs->vs_name_refs = 0; + + vs_unlock(vs); + + /* + * Now we deallocate our various port rights. + */ + + { + int i; + for(i=0; ialias != (int)NULL) + kfree((vm_offset_t) (control_port->alias), + sizeof(struct vstruct_alias)); + ipc_port_release_receive(control_port); + return KERN_SUCCESS; +} + +void +default_pager_no_senders( + MACH_PORT_FACE mem_obj, + mach_port_seqno_t seqno, + mach_port_mscount_t mscount) +{ + vstruct_t vs; + static char here[] = "default_pager_no_senders"; + + /* + * Because we don't give out multiple send rights + * for a memory object, there can't be a race + * between getting a no-senders notification + * and creating a new send right for the object. + * Hence we don't keep track of mscount. + */ + + vs_lookup(mem_obj, vs); + vs_lock(vs, seqno); + vs_async_wait(vs); /* wait for pending async IO */ + + /* do not delete the vs structure until the referencing pointers */ + /* in the vstruct list have been expunged */ + + /* get VSL_LOCK out of order by using TRY mechanism */ + while(!VSL_LOCK_TRY()) { + VS_UNLOCK(vs); + VSL_LOCK(); + VSL_UNLOCK(); + VS_LOCK(vs); + vs_async_wait(vs); /* wait for pending async IO */ + } + /* + * We shouldn't get a no-senders notification + * when the kernel has the object cached. + */ + if (vs->vs_control_port != MACH_PORT_NULL) + Panic("bad request"); + + /* + * Unlock the pager (though there should be no one + * waiting for it). + */ + VS_UNLOCK(vs); + + /* + * Remove the memory object port association, and then + * the destroy the port itself. We must remove the object + * from the port list before deallocating the pager, + * because of default_pager_objects. + */ + vstruct_list_delete(vs); + ps_vstruct_dealloc(vs); + + /* + * Recover memory that we might have wasted because + * of name conflicts + */ + while (!queue_empty(&vstruct_list.vsl_leak_queue)) { + vs = (vstruct_t) queue_first(&vstruct_list.vsl_leak_queue); + queue_remove_first(&vstruct_list.vsl_leak_queue, vs, vstruct_t, + vs_links); + kfree((vm_offset_t) vs, sizeof *vs); + } + VSL_UNLOCK(); +} + +kern_return_t +dp_memory_object_data_request( + MACH_PORT_FACE mem_obj, + MACH_PORT_FACE reply_to, + vm_object_offset_t offset, + vm_size_t length, + vm_prot_t protection_required) +{ + mach_port_seqno_t seqno; + vstruct_t vs; + static char here[] = "memory_object_data_request"; + + GSTAT(global_stats.gs_pagein_calls++); + + + /* CDY at this moment vs_lookup panics when presented with the wrong */ + /* port. As we are expanding this pager to support user interfaces */ + /* this should be changed to return kern_failure */ + vs_lookup(mem_obj, vs); + vs_lock(vs, seqno); + vs_check_request(vs, reply_to); + + /* We are going to relax the strict sequencing here for performance */ + /* reasons. We can do this because we know that the read and */ + /* write threads are different and we rely on synchronization */ + /* of read and write requests at the cache memory_object level */ + /* break out wait_for_writers, all of this goes away when */ + /* we get real control of seqno with the new component interface */ + if (vs->vs_writers != 0) { + /* you can't hold on to the seqno and go */ + /* to sleep like that */ + vs_unlock(vs); /* bump internal count of seqno */ + VS_LOCK(vs); + while (vs->vs_writers != 0) { + default_pager_wait_write++; + vs->vs_waiting_write = TRUE; + assert_wait(&vs->vs_waiting_write, THREAD_UNINT); + VS_UNLOCK(vs); + thread_block((void (*)(void))0); + VS_LOCK(vs); + vs_async_wait(vs); + } + if(vs->vs_control_port == MACH_PORT_NULL) { + VS_UNLOCK(vs); + return KERN_FAILURE; + } + vs_start_read(vs); + VS_UNLOCK(vs); + } else { + vs_start_read(vs); + vs_unlock(vs); + } + + /* + * Request must be on a page boundary and a multiple of pages. + */ + if ((offset & vm_page_mask) != 0 || (length & vm_page_mask) != 0) + Panic("bad alignment"); + + pvs_cluster_read(vs, (vm_offset_t)offset, length); + + vs_finish_read(vs); + + return KERN_SUCCESS; +} + +/* + * memory_object_data_initialize: check whether we already have each page, and + * write it if we do not. The implementation is far from optimized, and + * also assumes that the default_pager is single-threaded. + */ +/* It is questionable whether or not a pager should decide what is relevant */ +/* and what is not in data sent from the kernel. Data initialize has been */ +/* changed to copy back all data sent to it in preparation for its eventual */ +/* merge with data return. It is the kernel that should decide what pages */ +/* to write back. As of the writing of this note, this is indeed the case */ +/* the kernel writes back one page at a time through this interface */ + +kern_return_t +dp_memory_object_data_initialize( + MACH_PORT_FACE mem_obj, + MACH_PORT_FACE control_port, + vm_object_offset_t offset, + pointer_t addr, + vm_size_t data_cnt) +{ + mach_port_seqno_t seqno; + vstruct_t vs; + static char here[] = "memory_object_data_initialize"; + +#ifdef lint + control_port++; +#endif /* lint */ + + DEBUG(DEBUG_MO_EXTERNAL, + ("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n", + (int)mem_obj, (int)offset, (int)data_cnt)); + GSTAT(global_stats.gs_pages_init += atop(data_cnt)); + + vs_lookup(mem_obj, vs); + vs_lock(vs, seqno); + vs_check_request(vs, control_port); + vs_start_write(vs); + vs_unlock(vs); + + /* + * Write the data via clustered writes. vs_cluster_write will + * loop if the address range specified crosses cluster + * boundaries. + */ + vs_cluster_write(vs, 0, (vm_offset_t)offset, data_cnt, FALSE, 0); + + vs_finish_write(vs); + + return KERN_SUCCESS; +} + +kern_return_t +dp_memory_object_lock_completed( + memory_object_t mem_obj, + MACH_PORT_FACE control_port, + vm_object_offset_t offset, + vm_size_t length) +{ + mach_port_seqno_t seqno; + static char here[] = "memory_object_lock_completed"; + +#ifdef lint + mem_obj++; + seqno++; + control_port++; + offset++; + length++; +#endif /* lint */ + + Panic("illegal"); + return KERN_FAILURE; +} + +kern_return_t +dp_memory_object_data_unlock( + memory_object_t mem_obj, + MACH_PORT_FACE control_port, + vm_object_offset_t offset, + vm_size_t data_cnt, + vm_prot_t desired_access) +{ + static char here[] = "memory_object_data_unlock"; + + Panic("illegal"); + return KERN_FAILURE; +} + + +kern_return_t +dp_memory_object_supply_completed( + memory_object_t mem_obj, + MACH_PORT_FACE control_port, + vm_object_offset_t offset, + vm_size_t length, + kern_return_t result, + vm_offset_t error_offset) +{ + static char here[] = "memory_object_supply_completed"; + + Panic("illegal"); + return KERN_FAILURE; +} + +kern_return_t +dp_memory_object_data_return( + MACH_PORT_FACE mem_obj, + MACH_PORT_FACE control_port, + vm_object_offset_t offset, + pointer_t addr, + vm_size_t data_cnt, + boolean_t dirty, + boolean_t kernel_copy) +{ + mach_port_seqno_t seqno; + vstruct_t vs; + static char here[] = "memory_object_data_return"; + +#ifdef lint + control_port++; + dirty++; + kernel_copy++; +#endif /* lint */ + + DEBUG(DEBUG_MO_EXTERNAL, + ("mem_obj=0x%x,offset=0x%x,addr=0x%xcnt=0x%x\n", + (int)mem_obj, (int)offset, (int)addr, (int)data_cnt)); + GSTAT(global_stats.gs_pageout_calls++); + + /* This routine is called by the pageout thread. The pageout thread */ + /* cannot be blocked by read activities unless the read activities */ + /* Therefore the grant of vs lock must be done on a try versus a */ + /* blocking basis. The code below relies on the fact that the */ + /* interface is synchronous. Should this interface be again async */ + /* for some type of pager in the future the pages will have to be */ + /* returned through a separate, asynchronous path. */ + + vs_lookup(mem_obj, vs); + + default_pager_total++; + if(!VS_TRY_LOCK(vs)) { + /* the call below will not be done by caller when we have */ + /* a synchronous interface */ + /* return KERN_LOCK_OWNED; */ + upl_t upl; + upl_system_list_request((vm_object_t) + vs->vs_control_port->ip_kobject, + offset, data_cnt, data_cnt, &upl, NULL, 0, + UPL_NOBLOCK | UPL_CLEAN_IN_PLACE + | UPL_NO_SYNC | UPL_COPYOUT_FROM); + uc_upl_abort(upl,0); + ipc_port_release_send(control_port); + return KERN_SUCCESS; + } + + + + if ((vs->vs_seqno != vs->vs_next_seqno++) || (vs->vs_xfer_pending)) { + upl_t upl; + vs->vs_next_seqno--; + VS_UNLOCK(vs); + /* the call below will not be done by caller when we have */ + /* a synchronous interface */ + /* return KERN_LOCK_OWNED; */ + upl_system_list_request((vm_object_t) + vs->vs_control_port->ip_kobject, + offset, data_cnt, data_cnt, &upl, NULL, 0, + UPL_NOBLOCK | UPL_CLEAN_IN_PLACE + | UPL_NO_SYNC | UPL_COPYOUT_FROM); + uc_upl_abort(upl,0); + ipc_port_release_send(control_port); + return KERN_SUCCESS; + } + + if ((data_cnt % vm_page_size) != 0) + Panic("bad alignment"); + + vs_start_write(vs); + + + vs->vs_async_pending += 1; /* protect from backing store contraction */ + + /* unroll vs_check_request to avoid re-locking vs */ + + if (++vs->vs_control_refs > default_pager_max_urefs) { + mach_port_delta_t delta; + + delta = 1 - vs->vs_control_refs; + vs->vs_control_refs = 1; + + vs_unlock(vs); + + /* + * Deallocate excess user references. + */ + + { + int i; + delta = -delta; + for(i=0; ivs_async_pending -= 1; /* release vs_async_wait */ + if (vs->vs_async_pending == 0) { + VS_UNLOCK(vs); + thread_wakeup(&vs->vs_waiting_async); + } else { + VS_UNLOCK(vs); + } + + + return KERN_SUCCESS; +} + +kern_return_t +dp_memory_object_change_completed( + memory_object_t mem_obj, + memory_object_control_t memory_control, + memory_object_flavor_t flavor) +{ + static char here[] = "memory_object_change_completed"; + + Panic("illegal"); + return KERN_FAILURE; +} + +/* + * Create an external object. + */ +kern_return_t +default_pager_object_create( + MACH_PORT_FACE pager, + MACH_PORT_FACE *mem_obj, + vm_size_t size) +{ + vstruct_t vs; + MACH_PORT_FACE port; + kern_return_t result; + struct vstruct_alias *alias_struct; + static char here[] = "default_pager_object_create"; + + + if (pager != default_pager_default_port) + return KERN_INVALID_ARGUMENT; + + vs = vs_object_create(size); + + port = ipc_port_alloc_kernel(); + ipc_port_make_send(port); + /* register abstract memory object port with pager mux routine */ + /* (directs kernel internal calls to the right pager). */ + alias_struct = (struct vstruct_alias *) + kalloc(sizeof(struct vstruct_alias)); + if(alias_struct != NULL) { + alias_struct->vs = vs; + alias_struct->name = ISVS; + port->alias = (int) alias_struct; + } + else Panic("Out of kernel memory"); + + /* + * Set up associations between these ports + * and this vstruct structure + */ + + vs->vs_mem_obj_port = port; + vstruct_list_insert(vs); + default_pager_add(vs, FALSE); + + *mem_obj = port; + + return KERN_SUCCESS; +} + +kern_return_t +default_pager_objects( + MACH_PORT_FACE pager, + default_pager_object_array_t *objectsp, + mach_msg_type_number_t *ocountp, + mach_port_array_t *portsp, + mach_msg_type_number_t *pcountp) +{ + vm_offset_t oaddr = 0; /* memory for objects */ + vm_size_t osize = 0; /* current size */ + default_pager_object_t * objects; + unsigned int opotential; + + vm_offset_t paddr = 0; /* memory for ports */ + vm_size_t psize = 0; /* current size */ + MACH_PORT_FACE * ports; + unsigned int ppotential; + + unsigned int actual; + unsigned int num_objects; + kern_return_t kr; + vstruct_t entry; + static char here[] = "default_pager_objects"; +/* + if (pager != default_pager_default_port) + return KERN_INVALID_ARGUMENT; +*/ + + /* start with the inline memory */ + + kr = vm_map_copyout(ipc_kernel_map, (vm_offset_t *)&objects, + (vm_map_copy_t) *objectsp); + + if (kr != KERN_SUCCESS) + return kr; + + osize = round_page(*ocountp * sizeof * objects); + kr = vm_map_wire(ipc_kernel_map, + trunc_page((vm_offset_t)objects), + round_page(((vm_offset_t)objects) + osize), + VM_PROT_READ|VM_PROT_WRITE, FALSE); + osize=0; + + *objectsp = objects; + /* we start with the inline space */ + + + num_objects = 0; + opotential = *ocountp; + + ports = (MACH_PORT_FACE *) *portsp; + ppotential = *pcountp; + + VSL_LOCK(); + + /* + * We will send no more than this many + */ + actual = vstruct_list.vsl_count; + VSL_UNLOCK(); + + if (opotential < actual) { + vm_offset_t newaddr; + vm_size_t newsize; + + newsize = 2 * round_page(actual * sizeof * objects); + + kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE); + if (kr != KERN_SUCCESS) + goto nomemory; + + oaddr = newaddr; + osize = newsize; + opotential = osize / sizeof * objects; + objects = (default_pager_object_t *)oaddr; + } + + if (ppotential < actual) { + vm_offset_t newaddr; + vm_size_t newsize; + + newsize = 2 * round_page(actual * sizeof * ports); + + kr = vm_allocate(kernel_map, &newaddr, newsize, TRUE); + if (kr != KERN_SUCCESS) + goto nomemory; + + paddr = newaddr; + psize = newsize; + ppotential = psize / sizeof * ports; + ports = (MACH_PORT_FACE *)paddr; + } + + /* + * Now scan the list. + */ + + VSL_LOCK(); + + num_objects = 0; + queue_iterate(&vstruct_list.vsl_queue, entry, vstruct_t, vs_links) { + + MACH_PORT_FACE port; + vm_size_t size; + + if ((num_objects >= opotential) || + (num_objects >= ppotential)) { + + /* + * This should be rare. In any case, + * we will only miss recent objects, + * because they are added at the end. + */ + break; + } + + /* + * Avoid interfering with normal operations + */ + if (!VS_MAP_TRY_LOCK(entry)) + goto not_this_one; + size = ps_vstruct_allocated_size(entry); + VS_MAP_UNLOCK(entry); + + VS_LOCK(entry); + + port = entry->vs_object_name; + if (port == MACH_PORT_NULL) { + + /* + * The object is waiting for no-senders + * or memory_object_init. + */ + VS_UNLOCK(entry); + goto not_this_one; + } + + /* + * We need a reference for the reply message. + * While we are unlocked, the bucket queue + * can change and the object might be terminated. + * memory_object_terminate will wait for us, + * preventing deallocation of the entry. + */ + + if (--entry->vs_name_refs == 0) { + VS_UNLOCK(entry); + + /* keep the list locked, wont take long */ + + { + int i; + for(i=0; ivs_name_refs += default_pager_max_urefs; + vs_finish_refs(entry); + } + VS_UNLOCK(entry); + + /* the arrays are wired, so no deadlock worries */ + + objects[num_objects].dpo_object = (vm_offset_t) entry; + objects[num_objects].dpo_size = size; + ports [num_objects++] = port; + continue; + + not_this_one: + /* + * Do not return garbage + */ + objects[num_objects].dpo_object = (vm_offset_t) 0; + objects[num_objects].dpo_size = 0; + ports [num_objects++] = MACH_PORT_NULL; + + } + + VSL_UNLOCK(); + + /* + * Deallocate and clear unused memory. + * (Returned memory will automagically become pageable.) + */ + + if (objects == *objectsp) { + + /* + * Our returned information fit inline. + * Nothing to deallocate. + */ + *ocountp = num_objects; + } else if (actual == 0) { + (void) vm_deallocate(kernel_map, oaddr, osize); + + /* return zero items inline */ + *ocountp = 0; + } else { + vm_offset_t used; + + used = round_page(actual * sizeof * objects); + + if (used != osize) + (void) vm_deallocate(kernel_map, + oaddr + used, osize - used); + + *objectsp = objects; + *ocountp = num_objects; + } + + if (ports == (MACH_PORT_FACE *)*portsp) { + + /* + * Our returned information fit inline. + * Nothing to deallocate. + */ + + *pcountp = num_objects; + } else if (actual == 0) { + (void) vm_deallocate(kernel_map, paddr, psize); + + /* return zero items inline */ + *pcountp = 0; + } else { + vm_offset_t used; + + used = round_page(actual * sizeof * ports); + + if (used != psize) + (void) vm_deallocate(kernel_map, + paddr + used, psize - used); + + *portsp = (mach_port_array_t)ports; + *pcountp = num_objects; + } + (void) vm_map_unwire(kernel_map, (vm_offset_t)objects, + *ocountp + (vm_offset_t)objects, FALSE); + (void) vm_map_copyin(kernel_map, (vm_offset_t)objects, + *ocountp, TRUE, (vm_map_copy_t *)objectsp); + + return KERN_SUCCESS; + + nomemory: + { + register int i; + for (i = 0; i < num_objects; i++) + ipc_port_dealloc_kernel(ports[i]); + } + + if (objects != *objectsp) + (void) vm_deallocate(kernel_map, oaddr, osize); + + if (ports != (MACH_PORT_FACE *)*portsp) + (void) vm_deallocate(kernel_map, paddr, psize); + + return KERN_RESOURCE_SHORTAGE; +} + +kern_return_t +default_pager_object_pages( + MACH_PORT_FACE pager, + MACH_PORT_FACE object, + default_pager_page_array_t *pagesp, + mach_msg_type_number_t *countp) +{ + vm_offset_t addr; /* memory for page offsets */ + vm_size_t size = 0; /* current memory size */ + default_pager_page_t * pages; + unsigned int potential, actual; + kern_return_t kr; + +/* + if (pager != default_pager_default_port) + return KERN_INVALID_ARGUMENT; +*/ + kr = vm_map_copyout(ipc_kernel_map, (vm_offset_t *)&pages, + (vm_map_copy_t) *pagesp); + + if (kr != KERN_SUCCESS) + return kr; + + size = round_page(*countp * sizeof * pages); + kr = vm_map_wire(ipc_kernel_map, + trunc_page((vm_offset_t)pages), + round_page(((vm_offset_t)pages) + size), + VM_PROT_READ|VM_PROT_WRITE, FALSE); + size=0; + + *pagesp = pages; + /* we start with the inline space */ + + addr = (vm_offset_t)pages; + potential = *countp; + + for (;;) { + vstruct_t entry; + + VSL_LOCK(); + queue_iterate(&vstruct_list.vsl_queue, entry, vstruct_t, + vs_links) { + VS_LOCK(entry); + if (entry->vs_object_name == object) { + VSL_UNLOCK(); + goto found_object; + } + VS_UNLOCK(entry); + } + VSL_UNLOCK(); + + /* did not find the object */ + + if (pages != *pagesp) + (void) vm_deallocate(kernel_map, addr, size); + return KERN_INVALID_ARGUMENT; + + found_object: + + if (!VS_MAP_TRY_LOCK(entry)) { + /* oh well bad luck */ + int wait_result; + + VS_UNLOCK(entry); + + assert_wait_timeout( 1, THREAD_INTERRUPTIBLE); + wait_result = thread_block((void (*)(void)) 0); + if (wait_result != THREAD_TIMED_OUT) + thread_cancel_timer(); + continue; + } + + actual = ps_vstruct_allocated_pages(entry, pages, potential); + VS_MAP_UNLOCK(entry); + VS_UNLOCK(entry); + + if (actual <= potential) + break; + + /* allocate more memory */ + + if (pages != *pagesp) + (void) vm_deallocate(kernel_map, addr, size); + size = round_page(actual * sizeof * pages); + kr = vm_allocate(kernel_map, &addr, size, TRUE); + if (kr != KERN_SUCCESS) + return kr; + pages = (default_pager_page_t *)addr; + potential = size / sizeof * pages; + } + + /* + * Deallocate and clear unused memory. + * (Returned memory will automagically become pageable.) + */ + + if (pages == *pagesp) { + + /* + * Our returned information fit inline. + * Nothing to deallocate. + */ + + *countp = actual; + } else if (actual == 0) { + (void) vm_deallocate(kernel_map, addr, size); + + /* return zero items inline */ + *countp = 0; + } else { + vm_offset_t used; + + used = round_page(actual * sizeof * pages); + + if (used != size) + (void) vm_deallocate(kernel_map, + addr + used, size - used); + + *pagesp = pages; + *countp = actual; + } + (void) vm_map_unwire(kernel_map, (vm_offset_t)pages, + *countp + (vm_offset_t)pages, FALSE); + (void) vm_map_copyin(kernel_map, (vm_offset_t)pages, + *countp, TRUE, (vm_map_copy_t *)pagesp); + return KERN_SUCCESS; +} diff --git a/osfmk/device/Makefile b/osfmk/device/Makefile new file mode 100644 index 000000000..f856daeed --- /dev/null +++ b/osfmk/device/Makefile @@ -0,0 +1,57 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MIG_DEFS = \ + device.defs + +MIG_HDRS = \ + +DATAFILES = \ + device_types.h device_port.h device_types.defs \ + ${MIG_DEFS} + +MIGINCLUDES = \ + +INSTALL_MI_LIST = ${DATAFILES} ${_MIG_HDRS_} ${MIGINCLUDES} + +INSTALL_MI_DIR = device + +EXPORT_MI_LIST = ${DATAFILES} ${_MIG_HDRS_} ${MIGINCLUDES} + +EXPORT_MI_DIR = device + +.ORDER: ${MIG_HDRS} ${MIGINCLUDES} + +# +# Build path +# +INCFLAGS_MAKEFILE= -I.. + +MIGKSFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_SERVER=1 +MIGKUFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_USER=1 + +DEVICE_FILES = device_server.h device_server.c + +COMP_FILES = ${DEVICE_FILES} + +.ORDER: ${DEVICE_FILES} + +${DEVICE_FILES}: device.defs + ${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader device_server.h \ + -server device_server.c \ + $< + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/device/device.defs b/osfmk/device/device.defs new file mode 100644 index 000000000..1a91b69a9 --- /dev/null +++ b/osfmk/device/device.defs @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: device/device.defs + * Author: Douglas Orr + * Feb 10, 1988 + * Abstract: + * Mach device support. Mach devices are accessed through + * block and character device interfaces to the kernel. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + iokit 2800; + +#include +#include +#include +#include + +import ; + +serverprefix is_; + +type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE | polymorphic + ctype: mach_port_t; + +#if IOKIT + +type io_name_t = c_string[*:128]; +type io_string_t = c_string[*:512]; +type io_struct_inband_t = array[*:4096] of char; +type io_scalar_inband_t = array[*:16] of int; +type io_async_ref_t = array[*:8] of natural_t; +type io_buf_ptr_t = ^array[] of MACH_MSG_TYPE_INTEGER_8; + +type io_object_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: io_object_t iokit_lookup_object_port(mach_port_t) + outtran: mach_port_t iokit_make_object_port(io_object_t) + destructor: iokit_remove_reference(io_object_t) +#endif /* KERNEL_SERVER */ + ; + +type io_connect_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: io_connect_t iokit_lookup_connect_port(mach_port_t) + outtran: mach_port_t iokit_make_connect_port(io_connect_t) + destructor: iokit_remove_reference(io_connect_t) +#endif /* KERNEL_SERVER */ + ; + +routine io_object_get_class( + object : io_object_t; + out className : io_name_t + ); + +routine io_object_conforms_to( + object : io_object_t; + in className : io_name_t; + out conforms : boolean_t + ); + +routine io_iterator_next( + iterator : io_object_t; + out object : io_object_t + ); + +routine io_iterator_reset( + iterator : io_object_t + ); + +routine io_service_get_matching_services( + master_port : mach_port_t; + in matching : io_string_t; + out existing : io_object_t + ); + +routine io_service_add_notification_old( + master_port : mach_port_t; + in notification_type : io_name_t; + in matching : io_string_t; + in wake_port : mach_port_make_send_t; + in reference : natural_t; + out notification : io_object_t + ); + +routine io_registry_create_iterator( + master_port : mach_port_t; + in plane : io_name_t; + in options : int; + out iterator : io_object_t + ); + +routine io_registry_iterator_enter_entry( + iterator : io_object_t + ); + +routine io_registry_iterator_exit_entry( + iterator : io_object_t + ); + +routine io_registry_entry_from_path( + master_port : mach_port_t; + in path : io_string_t; + out registry_entry : io_object_t + ); + +routine io_registry_entry_get_name( + registry_entry : io_object_t; + out name : io_name_t + ); + +routine io_registry_entry_get_properties( + registry_entry : io_object_t; + out properties : io_buf_ptr_t, physicalcopy + ); + +routine io_registry_entry_get_property( + registry_entry : io_object_t; + in property_name : io_name_t; + out data : io_struct_inband_t, CountInOut + ); + +routine io_registry_entry_get_child_iterator( + registry_entry : io_object_t; + in plane : io_name_t; + out iterator : io_object_t + ); + +routine io_registry_entry_get_parent_iterator( + registry_entry : io_object_t; + in plane : io_name_t; + out iterator : io_object_t + ); + +routine io_service_open( + service : io_object_t; + in owningTask : task_t; + in connect_type : int; + out connection : io_connect_t + ); + +routine io_service_close( + connection : io_connect_t + ); + +routine io_connect_get_service( + connection : io_connect_t; + out service : io_object_t + ); + +routine io_connect_set_notification_port( + connection : io_connect_t; + in notification_type : int; + in port : mach_port_make_send_t; + in reference : int + ); + +routine io_connect_map_memory( + connection : io_connect_t; + in memory_type : int; + in into_task : task_t; + inout address : vm_address_t; + inout size : vm_size_t; + in flags : int + ); + +routine io_connect_add_client( + connection : io_connect_t; + in connect_to : io_connect_t + ); + +routine io_connect_set_properties( + connection : io_connect_t; + in properties : io_buf_ptr_t, physicalcopy; + out result : natural_t + ); + + +routine io_connect_method_scalarI_scalarO( + connection : io_connect_t; + in selector : int; + in input : io_scalar_inband_t; + out output : io_scalar_inband_t, CountInOut + ); + +routine io_connect_method_scalarI_structureO( + connection : io_connect_t; + in selector : int; + in input : io_scalar_inband_t; + out output : io_struct_inband_t, CountInOut + ); + +routine io_connect_method_scalarI_structureI( + connection : io_connect_t; + in selector : int; + in input : io_scalar_inband_t; + in inputStruct : io_struct_inband_t + ); + +routine io_connect_method_structureI_structureO( + connection : io_connect_t; + in selector : int; + in input : io_struct_inband_t; + out output : io_struct_inband_t, CountInOut + ); + +routine io_registry_entry_get_path( + registry_entry : io_object_t; + in plane : io_name_t; + out path : io_string_t + ); + +routine io_registry_get_root_entry( + master_port : mach_port_t; + out root : io_object_t + ); + +routine io_registry_entry_set_properties( + registry_entry : io_object_t; + in properties : io_buf_ptr_t, physicalcopy; + out result : natural_t + ); + +routine io_registry_entry_in_plane( + registry_entry : io_object_t; + in plane : io_name_t; + out inPlane : boolean_t + ); + +routine io_object_get_retain_count( + object : io_object_t; + out retainCount : int + ); + +routine io_service_get_busy_state( + service : io_object_t; + out busyState : int + ); + +routine io_service_wait_quiet( + service : io_object_t; + wait_time : mach_timespec_t + ); + +routine io_registry_entry_create_iterator( + registry_entry : io_object_t; + in plane : io_name_t; + in options : int; + out iterator : io_object_t + ); + +routine io_iterator_is_valid( + iterator : io_object_t; + out is_valid : boolean_t + ); + +routine io_make_matching( + master_port : mach_port_t; + in of_type : int; + in options : int; + in input : io_struct_inband_t; + out matching : io_string_t + ); + +routine io_catalog_send_data( + master_port : mach_port_t; + in flag : int; + in inData : io_buf_ptr_t; + out result : natural_t + ); + +routine io_catalog_terminate( + master_port : mach_port_t; + in flag : int; + in name : io_name_t + ); + +routine io_catalog_get_data( + master_port : mach_port_t; + in flag : int; + out outData : io_buf_ptr_t + ); + +routine io_catalog_get_gen_count( + master_port : mach_port_t; + out genCount : int + ); + +routine io_catalog_module_loaded( + master_port : mach_port_t; + in name : io_name_t + ); + +routine io_catalog_reset( + master_port : mach_port_t; + in flag : int + ); + +routine io_service_request_probe( + service : io_object_t; + in options : int + ); + +routine io_registry_entry_get_name_in_plane( + registry_entry : io_object_t; + in plane : io_name_t; + out name : io_name_t + ); + +routine io_service_match_property_table( + service : io_object_t; + in matching : io_string_t; + out matches : boolean_t + ); + +routine io_async_method_scalarI_scalarO( + connection : io_connect_t; + in wake_port : mach_port_make_send_t; + in reference : io_async_ref_t; + in selector : int; + in input : io_scalar_inband_t; + out output : io_scalar_inband_t, CountInOut + ); + +routine io_async_method_scalarI_structureO( + connection : io_connect_t; + in wake_port : mach_port_make_send_t; + in reference : io_async_ref_t; + in selector : int; + in input : io_scalar_inband_t; + out output : io_struct_inband_t, CountInOut + ); + +routine io_async_method_scalarI_structureI( + connection : io_connect_t; + in wake_port : mach_port_make_send_t; + in reference : io_async_ref_t; + in selector : int; + in input : io_scalar_inband_t; + in inputStruct : io_struct_inband_t + ); + +routine io_async_method_structureI_structureO( + connection : io_connect_t; + in wake_port : mach_port_make_send_t; + in reference : io_async_ref_t; + in selector : int; + in input : io_struct_inband_t; + out output : io_struct_inband_t, CountInOut + ); + +routine io_service_add_notification( + master_port : mach_port_t; + in notification_type : io_name_t; + in matching : io_string_t; + in wake_port : mach_port_make_send_t; + in reference : io_async_ref_t; + out notification : io_object_t + ); + +routine io_service_add_interest_notification( + service : io_object_t; + in type_of_interest : io_name_t; + in wake_port : mach_port_make_send_t; + in reference : io_async_ref_t; + out notification : io_object_t + ); + +routine io_service_acknowledge_notification( + service : io_object_t; + in notify_ref : natural_t; + in response : natural_t + ); + +routine io_connect_get_notification_semaphore( + connection : io_connect_t; + in notification_type : natural_t; + out semaphore : semaphore_t + ); + + +routine io_connect_unmap_memory( + connection : io_connect_t; + in memory_type : int; + in into_task : task_t; + in address : vm_address_t + ); + +#endif + + diff --git a/osfmk/device/device_init.c b/osfmk/device/device_init.c new file mode 100644 index 000000000..db8fe7a58 --- /dev/null +++ b/osfmk/device/device_init.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + * + * Initialize device service as part of kernel task. + */ +#include +#include +#include +#include +#include +#include + +ipc_port_t master_device_port; + +void +device_service_create(void) +{ + master_device_port = ipc_port_alloc_kernel(); + if (master_device_port == IP_NULL) + panic("can't allocate master device port"); + + ipc_kobject_set(master_device_port, 1, IKOT_MASTER_DEVICE); + host_set_io_master(host_priv_self(), + ipc_port_make_send(master_device_port)); + +#if 0 + ds_init(); + net_io_init(); + device_pager_init(); + datadev_init(); + + (void) kernel_thread(kernel_task, io_done_thread, (char *)0); + (void) kernel_thread(kernel_task, net_thread, (char *)0); +#endif +} diff --git a/osfmk/device/device_port.h b/osfmk/device/device_port.h new file mode 100644 index 000000000..df76f3f0a --- /dev/null +++ b/osfmk/device/device_port.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + */ + +#ifndef _DEVICE_DEVICE_PORT_H_ +#define _DEVICE_DEVICE_PORT_H_ + +#include + +/* + * Master privileged I/O object for this host + */ +extern mach_port_t master_device_port; + +#endif /* _DEVICE_DEVICE_PORT_H_ */ diff --git a/osfmk/device/device_types.defs b/osfmk/device/device_types.defs new file mode 100644 index 000000000..9c7dd5183 --- /dev/null +++ b/osfmk/device/device_types.defs @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + * + * Common definitions for device interface types. + */ + +#ifndef _DEVICE_DEVICE_TYPES_DEFS_ +#define _DEVICE_DEVICE_TYPES_DEFS_ + +/* + * Basic types + */ + +#include +#include + +type recnum_t = unsigned32; +type dev_mode_t = unsigned32; +type dev_flavor_t = unsigned32; +type dev_name_t = c_string[*:128]; +type dev_status_t = array[*:1024] of int; +type io_buf_len_t = integer_t; +type io_buf_ptr_t = ^array[] of MACH_MSG_TYPE_INTEGER_8; +type io_buf_ptr_inband_t= array[*:128] of char; +type filter_t = short; +type filter_array_t = array[*:64] of filter_t; + +type device_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: device_t dev_port_lookup(mach_port_t) + outtran: mach_port_t convert_device_to_port(device_t) + destructor: device_deallocate(device_t) +#endif /* KERNEL_SERVER */ + ; + +type io_done_queue_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: io_done_queue_t io_done_queue_port_lookup(mach_port_t) + outtran: mach_port_t convert_io_done_queue_to_port(io_done_queue_t) +#endif KERNEL_SERVER + ; + +import ; + +#endif /* _DEVICE_DEVICE_TYPES_DEFS_ */ diff --git a/osfmk/device/device_types.h b/osfmk/device/device_types.h new file mode 100644 index 000000000..1e6c2e85f --- /dev/null +++ b/osfmk/device/device_types.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + */ + +#ifndef DEVICE_TYPES_H +#define DEVICE_TYPES_H + +/* + * Types for device interface. + */ +#include +#include + +/* + * IO buffer - out-of-line array of characters. + */ +typedef char * io_buf_ptr_t; + +/* + * Some types for IOKit. + */ + +#ifdef IOKIT + +/* must match device_types.defs */ +typedef char io_name_t[128]; +typedef char io_string_t[512]; +typedef char io_struct_inband_t[4096]; +typedef int io_scalar_inband_t[16]; +typedef natural_t io_async_ref_t[8]; + +#ifdef MACH_KERNEL + +typedef struct IOObject * io_object_t; +typedef io_object_t io_connect_t; + +extern void iokit_remove_reference( io_object_t obj ); + +extern io_object_t iokit_lookup_object_port( ipc_port_t port ); +extern io_connect_t iokit_lookup_connect_port( ipc_port_t port ); + +extern ipc_port_t iokit_make_object_port( io_object_t obj ); +extern ipc_port_t iokit_make_connect_port( io_connect_t obj ); + +#else + +#ifndef __IOKIT_PORTS_DEFINED__ +#define __IOKIT_PORTS_DEFINED__ +typedef mach_port_t io_object_t; +#endif /* __IOKIT_PORTS_DEFINED__ */ + +#endif /* MACH_KERNEL */ + +#endif /* IOKIT */ + +#endif /* DEVICE_TYPES_H */ + diff --git a/osfmk/device/iokit_rpc.c b/osfmk/device/iokit_rpc.c new file mode 100644 index 000000000..637047f7e --- /dev/null +++ b/osfmk/device/iokit_rpc.c @@ -0,0 +1,402 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* spl definitions */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +#include + +#ifdef __ppc__ +#include +#include +#endif +#include + +#define EXTERN +#define MIGEXTERN + +/* + * Functions in iokit:IOUserClient.cpp + */ + +extern void iokit_add_reference( io_object_t obj ); + +extern void iokit_remove_reference( io_object_t obj ); + +extern ipc_port_t iokit_port_for_object( io_object_t obj, + ipc_kobject_type_t type ); + +extern kern_return_t iokit_client_died( io_object_t obj, + ipc_port_t port, ipc_kobject_type_t type ); + +extern kern_return_t +iokit_client_memory_for_type( + io_object_t connect, + unsigned int type, + unsigned int * flags, + vm_address_t * address, + vm_size_t * size ); + +/* + * Lookup a device by its port. + * Doesn't consume the naked send right; produces a device reference. + */ +MIGEXTERN io_object_t +iokit_lookup_object_port( + ipc_port_t port) +{ + register io_object_t obj; + + if (!IP_VALID(port)) + return (NULL); + + ip_lock(port); + if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) { + obj = (io_object_t) port->ip_kobject; + iokit_add_reference( obj ); + } + else + obj = NULL; + + ip_unlock(port); + + return( obj ); +} + +MIGEXTERN io_object_t +iokit_lookup_connect_port( + ipc_port_t port) +{ + register io_object_t obj; + + if (!IP_VALID(port)) + return (NULL); + + ip_lock(port); + if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) { + obj = (io_object_t) port->ip_kobject; + iokit_add_reference( obj ); + } + else + obj = NULL; + + ip_unlock(port); + + return( obj ); +} + +EXTERN io_object_t +iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space) +{ + io_object_t obj = NULL; + + if (connectRef && MACH_PORT_VALID((mach_port_name_t)connectRef)) { + ipc_port_t port; + kern_return_t kr; + + kr = ipc_object_translate(space, (mach_port_name_t)connectRef, MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port); + + if (kr == KERN_SUCCESS) { + assert(IP_VALID(port)); + + if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) { + obj = (io_object_t) port->ip_kobject; + iokit_add_reference(obj); + } + + ip_unlock(port); + } + } + + return obj; +} + +EXTERN io_object_t +iokit_lookup_connect_ref_current_task(io_object_t connectRef) +{ + return iokit_lookup_connect_ref(connectRef, current_space()); +} + +/* + * Get the port for a device. + * Consumes a device reference; produces a naked send right. + */ +MIGEXTERN ipc_port_t +iokit_make_object_port( + io_object_t obj ) +{ + register ipc_port_t port; + + if( obj == NULL) + return IP_NULL; + + port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT ); + if( port) + port = ipc_port_make_send( port); + + iokit_remove_reference( obj ); + + return( port); +} + +MIGEXTERN ipc_port_t +iokit_make_connect_port( + io_object_t obj ) +{ + register ipc_port_t port; + + if( obj == NULL) + return IP_NULL; + + port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT ); + if( port) + port = ipc_port_make_send( port); + + iokit_remove_reference( obj ); + + return( port); +} + + +EXTERN ipc_port_t +iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ); + +int gIOKitPortCount; + +EXTERN ipc_port_t +iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ) +{ + ipc_port_t notify; + ipc_port_t port; + + do { + + /* Allocate port, keeping a reference for it. */ + port = ipc_port_alloc_kernel(); + if( port == IP_NULL) + continue; + + /* set kobject & type */ +// iokit_add_reference( obj ); + ipc_kobject_set( port, (ipc_kobject_t) obj, type); + + /* Request no-senders notifications on the port. */ + notify = ipc_port_make_sonce( port); + ip_lock( port); + ipc_port_nsrequest( port, 1, notify, ¬ify); + assert( notify == IP_NULL); + gIOKitPortCount++; + + } while( FALSE); + + return( port ); +} + + +EXTERN kern_return_t +iokit_destroy_object_port( ipc_port_t port ) +{ + ipc_kobject_set( port, IKO_NULL, IKOT_NONE); + +// iokit_remove_reference( obj ); + + ipc_port_dealloc_kernel( port); + gIOKitPortCount--; + + return( KERN_SUCCESS); +} + +EXTERN mach_port_name_t +iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) +{ + kern_return_t kr; + ipc_port_t port; + mach_port_name_t name; + + if( obj == NULL) + return MACH_PORT_NULL; + + port = iokit_port_for_object( obj, type ); + if( port) + port = ipc_port_make_send( port); + if( port == IP_NULL) + return MACH_PORT_NULL; + + kr = ipc_object_copyout( task->itk_space, (ipc_object_t) port, + MACH_MSG_TYPE_PORT_SEND, TRUE, &name); + + if( kr != KERN_SUCCESS) + name = MACH_PORT_NULL; + + iokit_remove_reference( obj ); + + return( name ); +} + +/* + * Handle the No-More_Senders notification generated from a device port destroy. + * Since there are no longer any tasks which hold a send right to this device + * port a NMS notification has been generated. + */ + +static void +iokit_no_senders( mach_no_senders_notification_t * notification ) +{ + ipc_port_t port; + io_object_t obj = NULL; + ipc_kobject_type_t type; + + port = (ipc_port_t) notification->not_header.msgh_remote_port; + + // convert a port to io_object_t. + if( IP_VALID(port)) { + ip_lock(port); + if( ip_active(port)) { + obj = (io_object_t) port->ip_kobject; + type = ip_kotype( port ); + if( (IKOT_IOKIT_OBJECT == type) + || (IKOT_IOKIT_CONNECT == type)) + iokit_add_reference( obj ); + else + obj = NULL; + } + ip_unlock(port); + + if( obj ) { + (void) iokit_client_died( obj, port, type ); + iokit_remove_reference( obj ); + } + } +} + + +EXTERN +boolean_t +iokit_notify( mach_msg_header_t * msg ) +{ + switch (msg->msgh_id) { + case MACH_NOTIFY_NO_SENDERS: + iokit_no_senders((mach_no_senders_notification_t *) msg); + return TRUE; + + case MACH_NOTIFY_PORT_DELETED: + case MACH_NOTIFY_PORT_DESTROYED: + case MACH_NOTIFY_SEND_ONCE: + case MACH_NOTIFY_DEAD_NAME: + default: + printf("iokit_notify: strange notification %ld\n", msg->msgh_id); + return FALSE; + } +} + +kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, + vm_size_t length, unsigned int options) +{ + vm_size_t off; + vm_prot_t prot; + int memattr; + struct phys_entry *pp; + pmap_t pmap = map->pmap; + + prot = (options & kIOMapReadOnly) + ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); + +#if __ppc__ + + switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ + + case kIOMapDefaultCache: + default: + if(pp = pmap_find_physentry(pa)) { /* Find physical address */ + memattr = ((pp->pte1 & 0x00000078) >> 3); /* Use physical attributes as default */ + } + else { /* If no physical, just hard code attributes */ + memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED; + } + break; + + case kIOMapInhibitCache: + memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED; + break; + + case kIOMapWriteThruCache: + memattr = PTE_WIMG_WT_CACHED_COHERENT_GUARDED; + break; + + case kIOMapCopybackCache: + memattr = PTE_WIMG_CB_CACHED_COHERENT; + break; + } + + pmap_map_block(pmap, va, pa, length, prot, memattr, 0); /* Set up a block mapped area */ + +#else +// enter each page's physical address in the target map + for (off = 0; off < length; off += page_size) { /* Loop for the whole length */ + pmap_enter(pmap, va + off, pa + off, prot, TRUE); /* Map it in */ + } +#endif + + return( KERN_SUCCESS ); +} + +void IOGetTime( mach_timespec_t * clock_time); +void IOGetTime( mach_timespec_t * clock_time) +{ + *clock_time = clock_get_system_value(); +} diff --git a/osfmk/device/subrs.c b/osfmk/device/subrs.c new file mode 100644 index 000000000..ad8b9c34f --- /dev/null +++ b/osfmk/device/subrs.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + *(C)UNIX System Laboratories, Inc. all or some portions of this file are + *derived from material licensed to the University of California by + *American Telephone and Telegraph Co. or UNIX System Laboratories, + *Inc. and are reproduced herein with the permission of UNIX System + *Laboratories, Inc. + */ + +/* + * Mach Operating System + * Copyright (c) 1993,1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Copyright (c) 1988 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/* + * Random device subroutines and stubs. + */ + +#include +#include + +/* String routines, from CMU */ +#ifdef strcpy +#undef strcmp +#undef strncmp +#undef strcpy +#undef strncpy +#undef strlen +#endif + +/* + * Abstract: + * strcmp (s1, s2) compares the strings "s1" and "s2". + * It returns 0 if the strings are identical. It returns + * > 0 if the first character that differs in the two strings + * is larger in s1 than in s2 or if s1 is longer than s2 and + * the contents are identical up to the length of s2. + * It returns < 0 if the first differing character is smaller + * in s1 than in s2 or if s1 is shorter than s2 and the + * contents are identical upto the length of s1. + */ + +int +strcmp( + register const char *s1, + register const char *s2) +{ + register unsigned int a, b; + + do { + a = *s1++; + b = *s2++; + if (a != b) + return a-b; /* includes case when + 'a' is zero and 'b' is not zero + or vice versa */ + } while (a != '\0'); + + return 0; /* both are zero */ +} + +/* + * Abstract: + * strncmp (s1, s2, n) compares the strings "s1" and "s2" + * in exactly the same way as strcmp does. Except the + * comparison runs for at most "n" characters. + */ + +int +strncmp( + register const char *s1, + register const char *s2, + size_t n) +{ + register unsigned int a, b; + + while (n != 0) { + a = *s1++; + b = *s2++; + if (a != b) + return a-b; /* includes case when + 'a' is zero and 'b' is not zero + or vice versa */ + if (a == '\0') + return 0; /* both are zero */ + n--; + } + + return 0; +} + +/* + * Abstract: + * strcpy copies the contents of the string "from" including + * the null terminator to the string "to". A pointer to "to" + * is returned. + */ + +char * +strcpy( + register char *to, + register const char *from) +{ + register char *ret = to; + + while ((*to++ = *from++) != '\0') + continue; + + return ret; +} + + +/* + * Abstract: + * strncpy copies "count" characters from the "from" string to + * the "to" string. If "from" contains less than "count" characters + * "to" will be padded with null characters until exactly "count" + * characters have been written. The return value is a pointer + * to the "to" string. + */ + +char * +strncpy( + char *s1, + const char *s2, + size_t n) +{ + char *os1 = s1; + unsigned long i; + + for (i = 0; i < n;) + if ((*s1++ = *s2++) == '\0') + for (i++; i < n; i++) + *s1++ = '\0'; + else + i++; + return (os1); +} + + +#if !defined(__alpha) + +/* + * Abstract: + * strlen returns the number of characters in "string" preceeding + * the terminating null character. + */ + +size_t +strlen( + register const char *string) +{ + register const char *ret = string; + + while (*string++ != '\0') + continue; + return string - 1 - ret; +} +#endif /* !defined(__alpha) */ + +/* + * atoi: + * + * This function converts an ascii string into an integer. + * + * input : string + * output : a number + */ + +int +atoi( + u_char *cp) +{ + int number; + + for (number = 0; ('0' <= *cp) && (*cp <= '9'); cp++) + number = (number * 10) + (*cp - '0'); + + return( number ); +} + +/* + * convert an ASCII string (decimal radix) to an integer + * inputs: + * p string pointer. + * t char **, return a pointer to the cahr which terminates the + * numeric string. + * returns: + * integer value of the numeric string. + * side effect: + * pointer to terminating char. + */ + +int +atoi_term( + char *p, /* IN */ + char **t) /* OUT */ +{ + register int n; + register int f; + + n = 0; + f = 0; + for(;;p++) { + switch(*p) { + case ' ': + case '\t': + continue; + case '-': + f++; + case '+': + p++; + } + break; + } + while(*p >= '0' && *p <= '9') + n = n*10 + *p++ - '0'; + + /* return pointer to terminating character */ + if ( t ) + *t = p; + + return(f? -n: n); +} + +/* + * convert an integer to an ASCII string. + * inputs: + * num integer to be converted + * str string pointer. + * + * outputs: + * pointer to string start. + */ + +char * +itoa( + int num, + char *str) +{ + char digits[11]; + register char *dp; + register char *cp = str; + + if (num == 0) { + *cp++ = '0'; + } + else { + dp = digits; + while (num) { + *dp++ = '0' + num % 10; + num /= 10; + } + while (dp != digits) { + *cp++ = *--dp; + } + } + *cp++ = '\0'; + + return str; +} + +char * +strcat( + register char *dest, + register const char *src) +{ + char *old = dest; + + while (*dest) + ++dest; + while (*dest++ = *src++) + ; + return (old); +} + diff --git a/osfmk/i386/AT386/asm_startup.h b/osfmk/i386/AT386/asm_startup.h new file mode 100644 index 000000000..c22b98ac8 --- /dev/null +++ b/osfmk/i386/AT386/asm_startup.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef __MACHO__ +/* + * Startup code for an i386 on an AT. + * Kernel is loaded starting at 1MB. + * Protected mode, paging disabled. + */ + + popl %eax + cmpl $-1,%eax /* new calling convention */ + je 0f + +/* + * Old calling convention + * + * %esp -> boottype (deprecated) + * size of extended memory (K) + * size of conventional memory (K) + * boothowto (deprecated) + * esym (if KDB set up) + */ +#define SYS_REBOOT_COMPAT 1 +#if SYS_REBOOT_COMPAT + movl %eax,PA(EXT(boottype)) +#endif + popl PA(EXT(extmem)) /* extended memory, in K */ + popl PA(EXT(cnvmem)) /* conventional memory, in K */ + popl %edx /* old boothowto */ +#if SYS_REBOOT_COMPAT +#define RB_SINGLE 0x2 +#define RB_HALT 0x8 +#define RB_ALTBOOT 0x40 + testb $(RB_SINGLE),%edx /* old RB_SINGLE flag ? */ + je 2f + incl PA(EXT(startup_single_user)) +2: testb $(RB_HALT),%edx /* old RB_HALT flag ? */ + je 2f + incl PA(EXT(halt_in_debugger)) +2: testb $(RB_ALTBOOT),%edx /* old RB_ALTBOOT flag ? */ + je 2f + incl PA(EXT(cons_is_com1)) +2: +#if NCPUS > 1 + shrl $0x8,%edx + movb %edx,PA(EXT(wncpu)) /* old want ncpus flag */ +#endif +#endif + + popl %eax /* get boot_string & esym */ +#if SYS_REBOOT_COMPAT + movl %eax, %esi + lea PA(EXT(boot_string_store)), %edi + movl PA(EXT(boot_string_sz)), %ecx + cld + rep + movsb +#endif + +/* + * Move symbol table out of the way of BSS. + * + * When kernel is loaded, at the start of BSS we have: + * _edata: + * .long kern_sym_size + * .long boot_image_size + * .long load_info_size + * sym_start: + * kernel symbols + * .align ALIGN + * boot_start: + * bootstrap image + * .align ALIGN + * load_info_start: + * bootstrap load information + * + * all of which must be moved somewhere else, since it + * is sitting in the kernel BSS. In addition, the bootstrap + * image must be moved to a machine page boundary, so that we get: + * + * _edata: + * BSS + * _end: <- kern_sym_start (VA) + * kernel symbols . (kern_sym_size) + * : <- boot_start (VA) + * bootstrap image + * <- load_info_start (VA) + * load information + * <- %ebx (PA) + * + */ + lea PA(EXT(edata))+4-1,%esi /* point to symbol size word */ + andl $~0x3,%esi + movl (%esi),%edx /* get symbol size */ + + lea PA(EXT(end))+NBPG-1(%edx),%edi + /* point after BSS, add symbol */ + /* size, and round up to */ + andl $-NBPG,%edi /* machine page boundary */ + + lea -KVTOPHYS(%edi),%eax /* save virtual address */ + movl %eax,PA(EXT(boot_start)) /* of start of bootstrap */ + movl 4(%esi),%ecx /* get size of bootstrap */ + movl %ecx,PA(EXT(boot_size)) /* save size of bootstrap */ + lea -KVTOPHYS(%edi,%ecx),%eax + movl %eax,PA(EXT(load_info_start)) + /* save virtual address */ + /* of start of loader info */ + movl 8(%esi),%eax /* get size of loader info */ + movl %eax,PA(EXT(load_info_size)) + /* save size of loader info */ + addl %eax,%ecx /* get total size to move */ + + leal 12(%esi,%edx),%esi /* point to start of boot image - source */ + + leal (%edi,%ecx),%ebx /* point to new location of */ + /* end of bootstrap - next */ + /* available physical address */ + + lea -4(%esi,%ecx),%esi /* point to end of src - 4 */ + lea -4(%edi,%ecx),%edi /* point to end of dst - 4 */ + shrl $2,%ecx /* move by longs */ + std /* move backwards */ + rep + movsl /* move bootstrap and loader_info */ + cld /* reset direction flag */ + + movl $EXT(end),PA(EXT(kern_sym_start)) + /* save virtual address */ + /* of start of symbols */ + movl %edx,PA(EXT(kern_sym_size)) /* save symbol table size */ + testl %edx,%edx /* any symbols? */ + jz 1f /* if so: */ + + /* %esi points to start of boot-4 */ + /* == end of symbol table (source) - 4 */ + leal PA(EXT(end))-4(%edx),%edi /* point to end of dst - 4 */ + movl %edx,%ecx /* copy size */ + shrl $2,%ecx /* move by longs */ + std /* move backwards */ + rep + movsl /* move symbols */ + cld /* reset direction flag */ + + jmp 1f + +/* + * New calling convention + * + * %esp -> -1 + * size of extended memory (K) + * size of conventional memory (K) + * kern_sym_start + * kern_sym_size + * kern_args_start + * kern_args_size + * boot_sym_start + * boot_sym_size + * boot_args_start + * boot_args_size + * boot_start + * boot_size + * boot_region_desc + * boot_region_count + * boot_thread_state_flavor + * boot_thread_state + * boot_thread_state_count + * env_start + * env_size + * top of loaded memory + */ + +#define MEM_BASE 0 + +#define BOOT_TO_VIRT (MEM_BASE-(KVTOPHYS)) + .globl EXT(boot_start) + +0: + popl PA(EXT(extmem)) /* extended memory, in K */ + popl PA(EXT(cnvmem)) /* conventional memory, in K */ + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(kern_sym_start)) + popl PA(EXT(kern_sym_size)) + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(kern_args_start)) + popl PA(EXT(kern_args_size)) + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(boot_sym_start)) + popl PA(EXT(boot_sym_size)) + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(boot_args_start)) + popl PA(EXT(boot_args_size)) + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(boot_start)) + popl PA(EXT(boot_size)) + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(boot_region_desc)) + popl PA(EXT(boot_region_count)) + popl PA(EXT(boot_thread_state_flavor)) + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(boot_thread_state)) + popl PA(EXT(boot_thread_state_count)) + popl %eax + addl $BOOT_TO_VIRT,%eax /* convert to virtual address */ + movl %eax,PA(EXT(env_start)) + popl PA(EXT(env_size)) + popl %ebx /* mem top */ + addl $MEM_BASE,%ebx /* translate */ +1: +#else + cld + call PA(EXT(i386_preinit)) + movl %eax,%ebx +#endif diff --git a/osfmk/i386/AT386/autoconf.c b/osfmk/i386/AT386/autoconf.c new file mode 100644 index 000000000..7a5d3d7f6 --- /dev/null +++ b/osfmk/i386/AT386/autoconf.c @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* FIXME - Remove when device/ is completely pulled out of OSFMK */ +int dev_indirect_count = 0; +int dev_indirect_list = 0; diff --git a/osfmk/i386/AT386/bbclock.c b/osfmk/i386/AT386/bbclock.c new file mode 100644 index 000000000..4f69e0ba2 --- /dev/null +++ b/osfmk/i386/AT386/bbclock.c @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + Copyright 1988, 1989 by Intel Corporation, Santa Clara, California. + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby +granted, provided that the above copyright notice appears in all +copies and that both the copyright notice and this permission notice +appear in supporting documentation, and that the name of Intel +not be used in advertising or publicity pertaining to distribution +of the software without specific, written prior permission. + +INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, +IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, +NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* local data */ +static int month[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + +extern char dectohexdec( + int n); +extern int hexdectodec( + char c); +extern int yeartoday( + int yr); +extern void rtcput( + struct rtc_st * regs); +extern int rtcget( + struct rtc_st * regs); + +#define LOCK_BBC() splclock() +#define UNLOCK_BBC(s) splx(s) + +/* + * Configure battery-backed clock. + */ +int +bbc_config(void) +{ + int BbcFlag; + struct rtc_st rtclk; + +#if NCPUS > 1 && AT386 + mp_disable_preemption(); + if (cpu_number() != master_cpu) { + mp_enable_preemption(); + return(1); + } +#endif + /* + * Setup device. + */ + outb(RTC_ADDR, RTC_A); + outb(RTC_DATA, RTC_DIV2 | RTC_RATE6); + outb(RTC_ADDR, RTC_B); + outb(RTC_DATA, RTC_HM); + + /* + * Probe the device by trying to read it. + */ + BbcFlag = (rtcget(&rtclk) ? 0 : 1); + if (BbcFlag) + printf("battery clock configured\n"); + else + printf("WARNING: Battery Clock Failure!\n"); +#if NCPUS > 1 && AT386 + mp_enable_preemption(); +#endif + return (BbcFlag); +} + +/* + * Get the current clock time. + */ +kern_return_t +bbc_gettime( + mach_timespec_t *cur_time) /* OUT */ +{ + struct rtc_st rtclk; + time_t n; + int sec, min, hr, dom, mon, yr; + int i, days = 0; + spl_t s; + thread_t thread; + +#if NCPUS > 1 && AT386 + if ((thread = current_thread()) != THREAD_NULL) { + thread_bind(thread, master_processor); + mp_disable_preemption(); + if (current_processor() != master_processor) { + mp_enable_preemption(); + thread_block((void (*)) 0); + } else { + mp_enable_preemption(); + } + } +#endif + s = LOCK_BBC(); + rtcget(&rtclk); + sec = hexdectodec(rtclk.rtc_sec); + min = hexdectodec(rtclk.rtc_min); + hr = hexdectodec(rtclk.rtc_hr); + dom = hexdectodec(rtclk.rtc_dom); + mon = hexdectodec(rtclk.rtc_mon); + yr = hexdectodec(rtclk.rtc_yr); + yr = (yr < 70) ? yr+100 : yr; + n = sec + 60 * min + 3600 * hr; + n += (dom - 1) * 3600 * 24; + if (yeartoday(yr) == 366) + month[1] = 29; + for (i = mon - 2; i >= 0; i--) + days += month[i]; + month[1] = 28; + for (i = 70; i < yr; i++) + days += yeartoday(i); + n += days * 3600 * 24; + cur_time->tv_sec = n; + cur_time->tv_nsec = 0; + UNLOCK_BBC(s); + +#if NCPUS > 1 && AT386 + if (thread != THREAD_NULL) + thread_bind(thread, PROCESSOR_NULL); +#endif + return (KERN_SUCCESS); +} + +/* + * Set the current clock time. + */ +kern_return_t +bbc_settime( + mach_timespec_t *new_time) +{ + struct rtc_st rtclk; + time_t n; + int diff, i, j; + spl_t s; + thread_t thread; + +#if NCPUS > 1 && AT386 + if ((thread = current_thread()) != THREAD_NULL) { + thread_bind(thread, master_processor); + mp_disable_preemption(); + if (current_processor() != master_processor) { + mp_enable_preemption(); + thread_block((void (*)) 0); + } else { + mp_enable_preemption(); + } + } +#endif + s = LOCK_BBC(); + rtcget(&rtclk); + diff = 0; + n = (new_time->tv_sec - diff) % (3600 * 24); /* hrs+mins+secs */ + rtclk.rtc_sec = dectohexdec(n%60); + n /= 60; + rtclk.rtc_min = dectohexdec(n%60); + rtclk.rtc_hr = dectohexdec(n/60); + n = (new_time->tv_sec - diff) / (3600 * 24); /* days */ + rtclk.rtc_dow = (n + 4) % 7; /* 1/1/70 is Thursday */ + for (j = 1970; n >= (i = yeartoday(j)); j++) + n -= i; + rtclk.rtc_yr = dectohexdec(j % 100); + if (yeartoday(j) == 366) + month[1] = 29; + for (i = 0; n >= month[i]; i++) + n -= month[i]; + month[1] = 28; + rtclk.rtc_mon = dectohexdec(++i); + rtclk.rtc_dom = dectohexdec(++n); + rtcput(&rtclk); + UNLOCK_BBC(s); + +#if NCPUS > 1 && AT386 + if (thread != THREAD_NULL) + thread_bind(current_thread(), PROCESSOR_NULL); +#endif + return (KERN_SUCCESS); +} + +/* + * Get clock device attributes. + */ +kern_return_t +bbc_getattr( + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + if (*count != 1) + return (KERN_FAILURE); + switch (flavor) { + + case CLOCK_GET_TIME_RES: /* >0 res */ + *(clock_res_t *) attr = NSEC_PER_SEC; + break; + + case CLOCK_ALARM_CURRES: /* =0 no alarm */ + case CLOCK_ALARM_MINRES: + case CLOCK_ALARM_MAXRES: + *(clock_res_t *) attr = 0; + break; + + default: + return (KERN_INVALID_VALUE); + } + return (KERN_SUCCESS); +} + + +/* DEVICE SPECIFIC ROUTINES */ + +int +rtcget( + struct rtc_st * regs) +{ + outb(RTC_ADDR, RTC_D); + if (inb(RTC_DATA) & RTC_VRT == 0) + return (-1); + outb(RTC_ADDR, RTC_A); + while (inb(RTC_DATA) & RTC_UIP) /* busy wait */ + outb(RTC_ADDR, RTC_A); + load_rtc((unsigned char *)regs); + return (0); +} + +void +rtcput( + struct rtc_st * regs) +{ + register unsigned char x; + + outb(RTC_ADDR, RTC_B); + x = inb(RTC_DATA); + outb(RTC_ADDR, RTC_B); + outb(RTC_DATA, x | RTC_SET); + save_rtc((unsigned char *)regs); + outb(RTC_ADDR, RTC_B); + outb(RTC_DATA, x & ~RTC_SET); +} + +int +yeartoday( + int year) +{ + return((year % 4) ? 365 : + ((year % 100) ? 366 : ((year % 400) ? 365: 366))); +} + +int +hexdectodec( + char n) +{ + return ((((n >> 4) & 0x0F) * 10) + (n & 0x0F)); +} + +char +dectohexdec( + int n) +{ + return ((char)(((n / 10) << 4) & 0xF0) | ((n % 10) & 0x0F)); +} diff --git a/osfmk/i386/AT386/bbclock_entries.h b/osfmk/i386/AT386/bbclock_entries.h new file mode 100644 index 000000000..1fd8ca1d7 --- /dev/null +++ b/osfmk/i386/AT386/bbclock_entries.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +extern kern_return_t bbc_gettime( + mach_timespec_t * curtime); +extern kern_return_t bbc_settime( + mach_timespec_t * curtime); + +#define NO_SETALRM (void (*) (mach_timespec_t * alarm_time))0 diff --git a/osfmk/i386/AT386/conf.c b/osfmk/i386/AT386/conf.c new file mode 100644 index 000000000..67632a323 --- /dev/null +++ b/osfmk/i386/AT386/conf.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Device switch for i386 AT bus. + */ + +#include +#include +#include + +#include + +/* + * Clock device subsystem configuration. The clock_list[] + * table contains the clock structures for all clocks in + * the system. + */ + +extern struct clock_ops sysclk_ops, calend_ops; + +/* + * List of clock devices. + */ +struct clock clock_list[] = { + + /* SYSTEM_CLOCK */ + { &sysclk_ops, 0, 0, 0 }, + + /* CALENDAR_CLOCK */ + { &calend_ops, 0, 0, 0 }, +}; +int clock_count = sizeof(clock_list) / sizeof(clock_list[0]); diff --git a/osfmk/i386/AT386/config.h b/osfmk/i386/AT386/config.h new file mode 100644 index 000000000..44fd99948 --- /dev/null +++ b/osfmk/i386/AT386/config.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:38 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:39 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1996/04/17 17:48:30 davidp + * Needed for SVR4 device drivers + * [1996/04/11 13:03:48 davidp] + * + * Revision 1.1.1.2 1996/04/10 12:28:49 calvert + * Needed for SVR4 device drivers + * + * $EndLog$ + */ diff --git a/osfmk/i386/AT386/cram.h b/osfmk/i386/AT386/cram.h new file mode 100644 index 000000000..a56bef22f --- /dev/null +++ b/osfmk/i386/AT386/cram.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:38 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:39 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:33:48 ezf + * change marker to not FREE + * [1994/09/22 21:16:06 ezf] + * + * Revision 1.1.2.2 1993/06/02 23:19:06 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:01:43 jeffc] + * + * Revision 1.1 1992/09/30 02:26:37 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/05/14 16:22:13 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:16:50 mrt + * Changed to new Mach copyright + * [91/02/01 17:42:45 mrt] + * + * Revision 2.3 90/11/26 14:49:30 rvb + * jsb bet me to XMK34, sigh ... + * [90/11/26 rvb] + * Synched 2.5 & 3.0 at I386q (r1.5.1.3) & XMK35 (r2.3) + * [90/11/15 rvb] + * + * Revision 2.2 90/05/03 15:41:48 dbg + * First checkin. + * + * Revision 1.5.1.2 90/02/28 15:49:23 rvb + * Fix numerous typo's in Olivetti disclaimer. + * [90/02/28 rvb] + * + * Revision 1.5.1.1 90/01/08 13:31:57 rvb + * Add Olivetti copyright. + * [90/01/08 rvb] + * + * Revision 1.5 89/09/25 12:26:32 rvb + * File was provided by Intel 9/18/89. + * [89/09/23 rvb] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * cram.h + */ + +/* + * Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc., + * Cupertino, California. + * + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies and that both the copyright notice and this permission notice + * appear in supporting documentation, and that the name of Olivetti + * not be used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * + * OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, + * IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ + +/* + * outb(CMOS_ADDR, addr); + * result = inb(CMOS_DATA); + * + * where "addr" tells what value you want to read (some are listed + * below). Interrupts should be disabled while you do this. + */ + +/* I/O ports */ + +#define CMOS_ADDR 0x70 /* port for CMOS ram address */ +#define CMOS_DATA 0x71 /* port for CMOS ram data */ + + +/* Addresses, related masks, and potential results */ + +#define CMOS_EB 0x14 /* read Equipment Byte */ +#define CM_SCRMSK 0x30 /* mask for EB query to get screen */ +#define CM_EGA_VGA 0x00 /* "not CGA or MONO" */ +#define CM_CGA_40 0x10 +#define CM_CGA_80 0x20 +#define CM_MONO_80 0x30 + diff --git a/osfmk/i386/AT386/himem.c b/osfmk/i386/AT386/himem.c new file mode 100644 index 000000000..1c13d7695 --- /dev/null +++ b/osfmk/i386/AT386/himem.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:38 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:39 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.3.17.7 1995/08/21 20:33:13 devrcs + * ri-osc CR1547: Fix himem buffer translation to cope with non + * page-aligned addresses. + * [1995/08/08 16:51:58 bolinger] + * + * Revision 1.3.17.6 1995/02/24 15:51:12 alanl + * DIPC: Merge from nmk17b2 to nmk18b8. + * Notes: lock package cleanup. + * [95/01/23 alanl] + * [95/02/24 alanl] + * + * Revision 1.3.17.5 1995/01/26 22:14:52 ezf + * removed extraneous CMU CR + * [1995/01/26 20:24:45 ezf] + * + * Revision 1.3.17.4 1995/01/10 04:51:04 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * * Rev 1.3.17.3 1994/10/21 18:41:39 joe + * Added ETAP support + * [1994/12/09 20:37:48 dwm] + * + * mk6 CR764 - s/spinlock/simple_lock/ (name change only) + * [1994/11/10 05:25:33 dwm] + * + * mk6 CR668 - 1.3b26 merge + * * Revision 1.3.5.8 1994/05/06 18:44:06 tmt + * Fix prototypes for new device signatures. + * * Revision 1.3.5.6 1993/12/10 18:08:15 jeffc + * CR10305 -- locking bug in himem_reserve(): change call to + * vm_page_free to VM_PAGE_FREE. + * * Revision 1.3.5.5 1993/11/19 17:56:58 jeffc + * CR10125 -- Uninitialized lock in himem_convert. Add himem_init + * CR9461 -- Locking bug in himem_convert - must retake lock after + * thread_sleep. + * * End1.3merge + * [1994/11/04 09:07:39 dwm] + * + * Revision 1.3.17.1 1994/06/14 03:04:20 toshi + * Merge MK6 and NMK17 + * [1994/06/14 01:06:55 toshi] + * + * Revision 1.3.15.2 1994/06/08 21:14:24 dswartz + * Preemption merge. + * [1994/06/08 21:12:29 dswartz] + * + * Revision 1.3.15.1 1994/05/19 20:30:23 dwm + * mk6 CR 74. Locking bug in himem_reserve(): use VM_PAGE_FREE. + * mk6 CR 9461. Init hil_lock used by himem_convert(); + * retake lock after sleeping. + * [1994/05/19 20:30:07 dwm] + * + * Revision 1.3.11.1 1994/02/09 07:27:07 bernadat + * Added himem_init() for module initialization. + * [93/08/12 paire] + * + * Take back hil_lock lock on return from thread_sleep() + * [93/07/16 bernadat] + * + * Add vm_page_gobble() calls where needed. (dwm bug #542) + * Change from NORMA_MK14.6 [1993/02/09 22:24:00 dwm] + * [93/07/16 bernadat] + * [94/02/08 bernadat] + * + * Revision 1.3.5.4 1993/08/09 19:37:19 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:50:02 dswartz] + * + * Revision 1.3.5.3 1993/08/03 22:21:26 bernard + * CR#9523 - ANSI prototype fixes. + * [1993/08/03 15:34:10 bernard] + * + * Revision 1.3.5.2 1993/06/09 02:25:18 gm + * CR9157 - Find himem.h in the right place. + * [1993/05/28 17:27:23 brezak] + * + * Revision 1.3 1993/04/19 16:09:46 devrcs + * make endif tags ansi compliant/include files + * [1993/02/20 21:46:44 david] + * + * Print an appropriate message when going out of HIMEM pages. + * [93/01/26 bernadat] + * + * Revision 1.2 1992/11/25 01:07:08 robert + * integrate changes below for norma_14 + * [1992/11/13 19:28:44 robert] + * + * $EndLog$ + */ + +/* + * support of memory above 16 Megs for DMA limited to memory + * below 16 Megs. Copies high memory lo low memory before DMA + * write operations and does the reverse at completion time for + * DMA read operations + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +hil_t hil_head; +decl_simple_lock_data(,hil_lock) + +#if HIMEM_STATS +int himem_request; /* number of requests */ +int himem_used; /* number of times used */ +#endif /* HIMEM_STATS */ + +void +himem_init( + void) +{ + simple_lock_init(&hil_lock, ETAP_VM_HIMEM); +} + +/* + * Called by drivers, this indicates himem that this driver might need + * to allocate as many as npages pages in a single I/O DMA transfer + */ + +void +himem_reserve( + int npages) +{ + register i = 0; + vm_page_t free_head = VM_PAGE_NULL; + vm_page_t low; + hil_t hil; + spl_t ipl; + extern vm_offset_t avail_end; + + if (avail_end <= HIGH_MEM) + return; + hil = (hil_t)kalloc(npages*sizeof(struct himem_link)); + if (hil == (hil_t)0) + panic("himem_reserve: kalloc failed\n"); + + for (i=0; i < npages-1; i++) + (hil+i)->next = hil+i+1; + + /* + * This is the only way of getting low physical pages + * wtithout changing VM internals + */ + for (i=0; i != npages;) { + if ((low = vm_page_grab()) == VM_PAGE_NULL) + panic("No low memory pages for himem\n"); + vm_page_gobble(low); /* mark as consumed internally */ + if (_high_mem_page(low->phys_addr)) { + low->pageq.next = (queue_entry_t)free_head; + free_head = low; + } else { + (hil+i)->low_page = low->phys_addr; + i++; + } + } + + for (low = free_head; low; low = free_head) { + free_head = (vm_page_t) low->pageq.next; + VM_PAGE_FREE(low); + } + + ipl = splhi(); + simple_lock(&hil_lock); + (hil+npages-1)->next = hil_head; + hil_head = hil; + simple_unlock(&hil_lock); + splx(ipl); +} + +/* + * Called by driver at DMA initialization time. Converts a high memory + * physical page to a low memory one. If operation is a write, + * [phys_addr, phys_addr+length-1] is copied to new page. Caller must + * provide a pointer to a pointer to a himem_list. This is used to store + * all the conversions and is use at completion time to revert the pages. + * This pointer must point to a null hil_t value for the call on the first + * page of a DMA transfer. + */ + +vm_offset_t +himem_convert( + vm_offset_t phys_addr, + vm_size_t length, + int io_op, + hil_t *hil) +{ + hil_t h; + spl_t ipl; + vm_offset_t offset = phys_addr & (I386_PGBYTES - 1); + + assert (offset + length <= I386_PGBYTES); + + ipl = splhi(); + simple_lock(&hil_lock); + while (!(h = hil_head)) { + printf("WARNING: out of HIMEM pages\n"); + thread_sleep_simple_lock((event_t)&hil_head, + simple_lock_addr(hil_lock), FALSE); + simple_lock (&hil_lock); + } + hil_head = hil_head->next; + simple_unlock(&hil_lock); + splx(ipl); + + h->high_addr = phys_addr; + + if (io_op == D_WRITE) { + bcopy((char *)phystokv(phys_addr), (char *)phystokv(h->low_page + offset), + length); + h->length = 0; + } else { + h->length = length; + } + h->offset = offset; + + assert(!*hil || (*hil)->high_addr); + + h->next = *hil; + *hil = h; + return(h->low_page + offset); +} + +/* + * Called by driver at DMA completion time. Converts a list of low memory + * physical page to the original high memory one. If operation was read, + * [phys_addr, phys_addr+lenght-1] is copied to original page + */ + +void +himem_revert( + hil_t hil) +{ + hil_t next; + boolean_t wakeup = FALSE; + spl_t ipl; + + while(hil) { + if (hil->length) { + bcopy((char *)phystokv(hil->low_page + hil->offset), + (char *)phystokv(hil->high_addr), + hil->length); + } + hil->high_addr = 0; + hil->length = 0; + hil->offset = 0; + next = hil->next; + ipl = splhi(); + simple_lock(&hil_lock); + if (!(hil->next = hil_head)) + wakeup = TRUE; + hil_head = hil; + simple_unlock(&hil_lock); + splx(ipl); + hil = next; + } + if (wakeup) + thread_wakeup((event_t)&hil_head); +} diff --git a/osfmk/i386/AT386/himem.h b/osfmk/i386/AT386/himem.h new file mode 100644 index 000000000..c0ae26d10 --- /dev/null +++ b/osfmk/i386/AT386/himem.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:38 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:39 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.3.11.4 1995/12/15 10:49:49 bernadat + * cbus includes moved to busses/cbus + * [95/12/15 bernadat] + * + * Revision 1.3.11.3 1995/08/21 20:33:23 devrcs + * ri-osc CR1547: Fix himem buffer translation to cope with non + * page-aligned addresses. + * [1995/08/08 16:52:06 bolinger] + * + * Revision 1.3.11.2 1995/01/26 22:14:56 ezf + * removed extraneous CMU CR + * [1995/01/26 20:24:48 ezf] + * + * Revision 1.3.9.2 1994/06/08 21:14:27 dswartz + * Preemption merge. + * [1994/06/08 21:12:31 dswartz] + * + * Revision 1.3.9.1 1994/05/19 20:30:30 dwm + * mk6 CR 80. Add himem_init prototype. + * [1994/05/19 20:30:10 dwm] + * + * Revision 1.3.2.3 1993/08/09 19:37:21 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:50:06 dswartz] + * + * Revision 1.3.2.2 1993/06/09 02:25:24 gm + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 18:58:40 jeffc] + * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:01:57 jeffc] + * + * Revision 1.3 1993/04/19 16:09:54 devrcs + * Use free copyright + * [1993/03/03 12:12:12 bernadat] + * + * Revision 1.2 1992/11/25 01:07:16 robert + * integrate changes below for norma_14 + * [1992/11/13 19:28:57 robert] + * + * $EndLog$ + */ + +#ifndef _I386AT_HIMEM_H_ +#define _I386AT_HIMEM_H_ + +/* + * support of memory above 16 Megs for DMA limited to memory + * below 16 Megs. + */ + +#include + +#define HIMEM_STATS 0 + +#if HIMEM_STATS +extern int himem_request; +extern int himem_used; +#endif /* HIMEM_STATS */ + +struct himem_link { + struct himem_link *next; + vm_offset_t high_addr; /* physical address */ + vm_offset_t low_page; /* physical page */ + vm_offset_t offset; /* offset on page */ + vm_size_t length; +}; + +typedef struct himem_link *hil_t; + + +#define HIGH_MEM ((vm_offset_t) 0xf00000) + +#define _high_mem_page(x) ((vm_offset_t)(x) >= HIGH_MEM) + + +#if HIMEM_STATS +#define high_mem_page(x) \ + (++himem_request && _high_mem_page(x) && ++himem_used) + +#else /* HIMEM_STATS */ +#define high_mem_page(x) _high_mem_page(x) +#endif /* HIMEM_STATS */ + +extern void himem_init(void); +extern void himem_reserve( + int npages); +extern vm_offset_t himem_convert( + vm_offset_t paddr, + vm_size_t len, + int op, + hil_t * hil); +extern void himem_revert( + hil_t hil); + +#endif /* _I386AT_HIMEM_H_ */ diff --git a/osfmk/i386/AT386/iso_scan_font.h b/osfmk/i386/AT386/iso_scan_font.h new file mode 100644 index 000000000..25ac9734f --- /dev/null +++ b/osfmk/i386/AT386/iso_scan_font.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * ISO Latin-1 Font + * + * Copyright (c) 2000 + * Ka-Ping Yee + * + * This font may be freely used for any purpose. + */ + +/* + * adjusted 'A' 'V' to improve their dense appearance (ie. lightened) + * adjusted 'i' 'l' to improve their flow within a word (ie. widened) + * adjusted 'E' 'F' '#' + */ + +unsigned char iso_font[256*16] = { +/* 0 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 1 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 2 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 3 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 4 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 5 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 6 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 7 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 8 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 9 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 10 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 11 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 12 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 13 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 14 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 15 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 16 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 17 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 18 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 19 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 20 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 21 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 22 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 23 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 24 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 25 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 26 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 27 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 28 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 29 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 30 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 31 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 32 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 33 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 34 */ 0x00,0x00,0x6c,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 35 */ 0x00,0x00,0x00,0x36,0x36,0x7f,0x36,0x36,0x7f,0x36,0x36,0x00,0x00,0x00,0x00,0x00, +/* 36 */ 0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x3e,0x68,0x68,0x6b,0x3e,0x08,0x08,0x00,0x00, +/* 37 */ 0x00,0x00,0x00,0x33,0x13,0x18,0x08,0x0c,0x04,0x06,0x32,0x33,0x00,0x00,0x00,0x00, +/* 38 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x6c,0x3e,0x33,0x33,0x7b,0xce,0x00,0x00,0x00,0x00, +/* 39 */ 0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 40 */ 0x00,0x00,0x30,0x18,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x18,0x30,0x00,0x00,0x00, +/* 41 */ 0x00,0x00,0x0c,0x18,0x18,0x30,0x30,0x30,0x30,0x30,0x18,0x18,0x0c,0x00,0x00,0x00, +/* 42 */ 0x00,0x00,0x00,0x00,0x00,0x36,0x1c,0x7f,0x1c,0x36,0x00,0x00,0x00,0x00,0x00,0x00, +/* 43 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00, +/* 44 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, +/* 45 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 46 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 47 */ 0x00,0x00,0x60,0x20,0x30,0x10,0x18,0x08,0x0c,0x04,0x06,0x02,0x03,0x00,0x00,0x00, +/* 48 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x6b,0x6b,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 49 */ 0x00,0x00,0x18,0x1e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 50 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x30,0x18,0x0c,0x06,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 51 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x3c,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 52 */ 0x00,0x00,0x30,0x38,0x3c,0x36,0x33,0x7f,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00, +/* 53 */ 0x00,0x00,0x7f,0x03,0x03,0x3f,0x60,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 54 */ 0x00,0x00,0x3c,0x06,0x03,0x03,0x3f,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 55 */ 0x00,0x00,0x7f,0x60,0x30,0x30,0x18,0x18,0x18,0x0c,0x0c,0x0c,0x00,0x00,0x00,0x00, +/* 56 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x3e,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 57 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7e,0x60,0x60,0x60,0x30,0x1e,0x00,0x00,0x00,0x00, +/* 58 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 59 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, +/* 60 */ 0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00, +/* 61 */ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 62 */ 0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00, +/* 63 */ 0x00,0x00,0x3e,0x63,0x60,0x30,0x30,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 64 */ 0x00,0x00,0x3c,0x66,0x73,0x7b,0x6b,0x6b,0x7b,0x33,0x06,0x3c,0x00,0x00,0x00,0x00, +/* 65 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 66 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x3f,0x63,0x63,0x63,0x63,0x3f,0x00,0x00,0x00,0x00, +/* 67 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x00,0x00,0x00,0x00, +/* 68 */ 0x00,0x00,0x1f,0x33,0x63,0x63,0x63,0x63,0x63,0x63,0x33,0x1f,0x00,0x00,0x00,0x00, +/* 69 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 70 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, +/* 71 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x73,0x63,0x63,0x66,0x7c,0x00,0x00,0x00,0x00, +/* 72 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 73 */ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 74 */ 0x00,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00,0x00,0x00,0x00, +/* 75 */ 0x00,0x00,0x63,0x33,0x1b,0x0f,0x07,0x07,0x0f,0x1b,0x33,0x63,0x00,0x00,0x00,0x00, +/* 76 */ 0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 77 */ 0x00,0x00,0x63,0x63,0x77,0x7f,0x7f,0x6b,0x6b,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 78 */ 0x00,0x00,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, +/* 79 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 80 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, +/* 81 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x6f,0x7b,0x3e,0x30,0x60,0x00,0x00, +/* 82 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x1b,0x33,0x63,0x63,0x00,0x00,0x00,0x00, +/* 83 */ 0x00,0x00,0x3e,0x63,0x03,0x03,0x0e,0x38,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 84 */ 0x00,0x00,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 85 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 86 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, +/* 87 */ 0x00,0x00,0x63,0x63,0x6b,0x6b,0x6b,0x6b,0x7f,0x36,0x36,0x36,0x00,0x00,0x00,0x00, +/* 88 */ 0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x36,0x36,0x63,0x63,0x00,0x00,0x00,0x00, +/* 89 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 90 */ 0x00,0x00,0x7f,0x30,0x30,0x18,0x18,0x0c,0x0c,0x06,0x06,0x7f,0x00,0x00,0x00,0x00, +/* 91 */ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00, +/* 92 */ 0x00,0x00,0x03,0x02,0x06,0x04,0x0c,0x08,0x18,0x10,0x30,0x20,0x60,0x00,0x00,0x00, +/* 93 */ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00, +/* 94 */ 0x00,0x08,0x1c,0x36,0x63,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 95 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00, +/* 96 */ 0x00,0x00,0x0c,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 97 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 98 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x00,0x00,0x00,0x00, +/* 99 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 100 */ 0x00,0x00,0x60,0x60,0x60,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 101 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 102 */ 0x00,0x00,0x3c,0x66,0x06,0x1f,0x06,0x06,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00, +/* 103 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0x63,0x3e,0x00, +/* 104 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 105 */ 0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 106 */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00, +/* 107 */ 0x00,0x00,0x03,0x03,0x03,0x63,0x33,0x1b,0x0f,0x1f,0x33,0x63,0x00,0x00,0x00,0x00, +/* 108 */ 0x00,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 109 */ 0x00,0x00,0x00,0x00,0x00,0x35,0x6b,0x6b,0x6b,0x6b,0x6b,0x6b,0x00,0x00,0x00,0x00, +/* 110 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 111 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 112 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x03,0x03,0x03,0x00, +/* 113 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0xe0,0x60,0x00, +/* 114 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, +/* 115 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x0e,0x38,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 116 */ 0x00,0x00,0x00,0x0c,0x0c,0x3e,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 117 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 118 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, +/* 119 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x6b,0x6b,0x6b,0x3e,0x36,0x36,0x00,0x00,0x00,0x00, +/* 120 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x36,0x1c,0x1c,0x1c,0x36,0x63,0x00,0x00,0x00,0x00, +/* 121 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, +/* 122 */ 0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x30,0x18,0x0c,0x06,0x7f,0x00,0x00,0x00,0x00, +/* 123 */ 0x00,0x00,0x70,0x18,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00, +/* 124 */ 0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00, +/* 125 */ 0x00,0x00,0x0e,0x18,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00, +/* 126 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 127 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 128 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 129 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 130 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 131 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 132 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 133 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 134 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 135 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 136 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 137 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 138 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 139 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 140 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 141 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 142 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 143 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 144 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 145 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 146 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 147 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 148 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 149 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 150 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 151 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 152 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 153 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 154 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 155 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 156 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 157 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 158 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 159 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 160 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 161 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00, +/* 162 */ 0x00,0x00,0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x0b,0x6b,0x3e,0x08,0x08,0x00,0x00, +/* 163 */ 0x00,0x00,0x1c,0x36,0x06,0x06,0x1f,0x06,0x06,0x07,0x6f,0x3b,0x00,0x00,0x00,0x00, +/* 164 */ 0x00,0x00,0x00,0x00,0x66,0x3c,0x66,0x66,0x66,0x3c,0x66,0x00,0x00,0x00,0x00,0x00, +/* 165 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x7e,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, +/* 166 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 167 */ 0x00,0x3c,0x66,0x0c,0x1e,0x33,0x63,0x66,0x3c,0x18,0x33,0x1e,0x00,0x00,0x00,0x00, +/* 168 */ 0x00,0x00,0x36,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 169 */ 0x00,0x00,0x3c,0x42,0x99,0xa5,0x85,0xa5,0x99,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, +/* 170 */ 0x00,0x1e,0x30,0x3e,0x33,0x3b,0x36,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 171 */ 0x00,0x00,0x00,0x00,0x00,0x6c,0x36,0x1b,0x1b,0x36,0x6c,0x00,0x00,0x00,0x00,0x00, +/* 172 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x60,0x60,0x00,0x00,0x00,0x00,0x00,0x00, +/* 173 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 174 */ 0x00,0x00,0x3c,0x42,0x9d,0xa5,0x9d,0xa5,0xa5,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, +/* 175 */ 0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 176 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 177 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x7e,0x00,0x00,0x00,0x00,0x00, +/* 178 */ 0x00,0x1e,0x33,0x18,0x0c,0x06,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 179 */ 0x00,0x1e,0x33,0x18,0x30,0x33,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 180 */ 0x00,0x30,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 181 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x76,0x6e,0x06,0x06,0x03,0x00, +/* 182 */ 0x00,0x00,0x7e,0x2f,0x2f,0x2f,0x2e,0x28,0x28,0x28,0x28,0x28,0x00,0x00,0x00,0x00, +/* 183 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 184 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x30,0x1e,0x00, +/* 185 */ 0x00,0x0c,0x0e,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 186 */ 0x00,0x1e,0x33,0x33,0x33,0x33,0x1e,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 187 */ 0x00,0x00,0x00,0x00,0x00,0x1b,0x36,0x6c,0x6c,0x36,0x1b,0x00,0x00,0x00,0x00,0x00, +/* 188 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, +/* 189 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x1c,0x36,0x18,0x0c,0x3e,0x00,0x00, +/* 190 */ 0x00,0x1c,0x36,0x18,0x36,0x1c,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, +/* 191 */ 0x00,0x00,0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x06,0x06,0x03,0x63,0x3e,0x00,0x00, +/* 192 */ 0x0c,0x18,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 193 */ 0x18,0x0c,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 194 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 195 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 196 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 197 */ 0x1c,0x36,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 198 */ 0x00,0x00,0xfe,0x33,0x33,0x33,0xff,0x33,0x33,0x33,0x33,0xf3,0x00,0x00,0x00,0x00, +/* 199 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x18,0x30,0x1e,0x00, +/* 200 */ 0x0c,0x18,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 201 */ 0x18,0x0c,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 202 */ 0x08,0x14,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 203 */ 0x36,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 204 */ 0x0c,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 205 */ 0x30,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 206 */ 0x18,0x24,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 207 */ 0x66,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 208 */ 0x00,0x00,0x1e,0x36,0x66,0x66,0x6f,0x66,0x66,0x66,0x36,0x1e,0x00,0x00,0x00,0x00, +/* 209 */ 0x6e,0x3b,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, +/* 210 */ 0x06,0x0c,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 211 */ 0x30,0x18,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 212 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 213 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 214 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 215 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0x18,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00, +/* 216 */ 0x00,0x20,0x3e,0x73,0x73,0x6b,0x6b,0x6b,0x6b,0x67,0x67,0x3e,0x02,0x00,0x00,0x00, +/* 217 */ 0x0c,0x18,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 218 */ 0x18,0x0c,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 219 */ 0x08,0x14,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 220 */ 0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 221 */ 0x30,0x18,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 222 */ 0x00,0x00,0x0f,0x06,0x3e,0x66,0x66,0x66,0x66,0x3e,0x06,0x0f,0x00,0x00,0x00,0x00, +/* 223 */ 0x00,0x00,0x1e,0x33,0x33,0x1b,0x33,0x63,0x63,0x63,0x63,0x3b,0x00,0x00,0x00,0x00, +/* 224 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 225 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 226 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 227 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 228 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 229 */ 0x00,0x1c,0x36,0x1c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 230 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0xdb,0xd8,0xfe,0x1b,0xdb,0x76,0x00,0x00,0x00,0x00, +/* 231 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x18,0x30,0x1e,0x00, +/* 232 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 233 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 234 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 235 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 236 */ 0x00,0x06,0x0c,0x18,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 237 */ 0x00,0x18,0x0c,0x06,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 238 */ 0x00,0x08,0x1c,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 239 */ 0x00,0x00,0x36,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 240 */ 0x00,0x00,0x2c,0x18,0x34,0x60,0x7c,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00, +/* 241 */ 0x00,0x00,0x6e,0x3b,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 242 */ 0x00,0x06,0x0c,0x18,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 243 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 244 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 245 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 246 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 247 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, +/* 248 */ 0x00,0x00,0x00,0x00,0x20,0x3e,0x73,0x6b,0x6b,0x6b,0x67,0x3e,0x02,0x00,0x00,0x00, +/* 249 */ 0x00,0x06,0x0c,0x18,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 250 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 251 */ 0x00,0x08,0x1c,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 252 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 253 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, +/* 254 */ 0x00,0x00,0x0f,0x06,0x06,0x3e,0x66,0x66,0x66,0x66,0x66,0x3e,0x06,0x06,0x0f,0x00, +/* 255 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00 +}; + +#define ISO_CHAR_MIN 0x00 +#define ISO_CHAR_MAX 0xFF +#define ISO_CHAR_HEIGHT 16 diff --git a/osfmk/i386/AT386/kernBootStruct.h b/osfmk/i386/AT386/kernBootStruct.h new file mode 100644 index 000000000..0978b9fe7 --- /dev/null +++ b/osfmk/i386/AT386/kernBootStruct.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * kernBootStruct.h + * What the booter leaves behind for the kernel. + */ + +/* The config table has room for 13 drivers if their config files + * are the maximum size allowed. + */ +#define CONFIG_SIZE (13 * 4096) + +/* Maximum number of boot drivers supported, assuming their + * config files fit in the bootstruct. + */ +#define NDRIVERS 64 + +typedef struct { + char *address; // address where driver was loaded + int size; // entry point for driver +} driver_config_t; + +typedef struct { + unsigned short major_vers; // == 0 if not present + unsigned short minor_vers; + unsigned long cs32_base; + unsigned long cs16_base; + unsigned long ds_base; + unsigned long cs_length; + unsigned long ds_length; + unsigned long entry_offset; + union { + struct { + unsigned long mode_16 :1; + unsigned long mode_32 :1; + unsigned long idle_slows_cpu :1; + unsigned long reserved :29; + } f; + unsigned long data; + } flags; + unsigned long connected; +} APM_config_t; + +typedef struct _EISA_slot_info_t { + union { + struct { + unsigned char duplicateID :4; + unsigned char slotType :1; + unsigned char prodIDPresent :1; + unsigned char dupIDPresent :1; + } s; + unsigned char d; + } u_ID; + unsigned char configMajor; + unsigned char configMinor; + unsigned short checksum; + unsigned char numFunctions; + union { + struct { + unsigned char fnTypesPresent :1; + unsigned char memoryPresent :1; + unsigned char irqPresent :1; + unsigned char dmaPresent :1; + unsigned char portRangePresent:1; + unsigned char portInitPresent :1; + unsigned char freeFormPresent :1; + unsigned char reserved:1; + } s; + unsigned char d; + } u_resources; + unsigned char id[8]; +} EISA_slot_info_t; + +typedef struct _EISA_func_info_t { + unsigned char slot; + unsigned char function; + unsigned char reserved[2]; + unsigned char data[320]; +} EISA_func_info_t; + +#define NUM_EISA_SLOTS 64 + +typedef struct _PCI_bus_info_t { + union { + struct { + unsigned char configMethod1 :1; + unsigned char configMethod2 :1; + unsigned char :2; + unsigned char specialCycle1 :1; + unsigned char specialCycle2 :1; + } s; + unsigned char d; + } u_bus; + unsigned char maxBusNum; + unsigned char majorVersion; + unsigned char minorVersion; + unsigned char BIOSPresent; +} PCI_bus_info_t; + +/* + * Video information.. + */ + +struct boot_video { + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_display; /* Display Code (if Applicable */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth */ +}; + +typedef struct boot_video boot_video; + +#define BOOT_STRING_LEN 160 + +typedef struct { + short version; + char bootString[BOOT_STRING_LEN];// string we booted with + int magicCookie; // KERNBOOTMAGIC if struct valid + int numIDEs; // how many IDE drives + int rootdev; // booters guess as to rootdev + int convmem; // conventional memory + int extmem; // extended memory + char boot_file[128]; // name of the kernel we booted + int first_addr0; // first address for kern convmem + int diskInfo[4]; // bios info for bios dev 80-83 + int graphicsMode; // did we boot in graphics mode? + int kernDev; // device kernel was fetched from + int numBootDrivers; // number of drivers loaded by booter + char *configEnd; // pointer to end of config files + int kaddr; // kernel load address + int ksize; // size of kernel + void *rld_entry; // entry point for standalone rld + + driver_config_t driverConfig[NDRIVERS]; + APM_config_t apm_config; + + char _reserved[7500]; + + boot_video video; + + PCI_bus_info_t pciInfo; + + int eisaConfigFunctions; + EISA_slot_info_t eisaSlotInfo[NUM_EISA_SLOTS];// EISA slot information + + char config[CONFIG_SIZE]; // the config file contents +} KERNBOOTSTRUCT; + +#define GRAPHICS_MODE 1 +#define TEXT_MODE 0 + +#define KERNSTRUCT_ADDR ((KERNBOOTSTRUCT *)0x11000) +#define KERNBOOTMAGIC 0xa7a7a7a7 + +#ifndef EISA_CONFIG_ADDR +#define EISA_CONFIG_ADDR 0x20000 +#define EISA_CONFIG_LEN 0x10000 +#endif + +#ifndef KERNEL +extern KERNBOOTSTRUCT *kernBootStruct; +#endif diff --git a/osfmk/i386/AT386/machdep.mk b/osfmk/i386/AT386/machdep.mk new file mode 100644 index 000000000..00c31d4df --- /dev/null +++ b/osfmk/i386/AT386/machdep.mk @@ -0,0 +1,57 @@ +# +# @OSF_COPYRIGHT@ +# +# +# HISTORY +# +# Revision 1.1.1.1 1998/09/22 21:05:39 wsanchez +# Import of Mac OS X kernel (~semeria) +# +# Revision 1.1.1.1 1998/03/07 02:25:38 wsanchez +# Import of OSF Mach kernel (~mburg) +# +# Revision 1.1.17.1 1996/11/29 16:56:53 stephen +# nmklinux_1.0b3_shared into pmk1.1 +# Export "kd.h". +# [96/01/29 barbou] +# +# Revision 1.1.8.1 1994/09/23 01:45:12 ezf +# change marker to not FREE +# [1994/09/22 21:19:36 ezf] +# +# Revision 1.1.4.2 1993/06/04 15:12:16 jeffc +# CR9193 - Added export for "machine" headers. +# Created from mach_kernel/include/i386/AT386/machdep.mk. +# [1993/06/04 13:40:42 jeffc] +# +# Revision 1.3 93/04/19 16:17:41 devrcs +# updated for ODE 1.2 reno make +# [1993/02/20 21:37:01 david] +# +# $EndLog$ + +AT386_DATAFILES = disk.h kd.h + +#AT386_DATAFILES = asm_startup.h \ +# atbus.h \ +# blitreg.h \ +# blituser.h \ +# blitvar.h \ +# comreg.h \ +# cram.h \ +# disk.h \ +# eisa.h \ +# himem.h \ +# fdreg.h \ +# hdreg.h \ +# i8250.h \ +# i82586.h \ +# if_3c501.h \ +# if_3c503.h \ +# if_ns8390.h \ +# if_pc586.h \ +# if_wd8003.h \ +# kd.h \ +# kd_queue.h \ +# kdsoft.h \ +# rtc.h diff --git a/osfmk/i386/AT386/misc_protos.h b/osfmk/i386/AT386/misc_protos.h new file mode 100644 index 000000000..6b6f6a2d8 --- /dev/null +++ b/osfmk/i386/AT386/misc_protos.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _AT386_MISC_PROTOS_H_ +#define _AT386_MISC_PROTOS_H_ + +#include /* for dev_t */ +#include /* for vm_offset_t */ + +/* + * i386/AT386/model_dep.c + */ + +extern void i386_init(void); +extern void machine_init(void); +extern void machine_startup(void); + +/* + * i386/AT386/kd.c + */ + +extern void cninit(void); +extern void kdreboot(void); + +/* + * i386/locore.s + */ + +extern void kdb_kintr(void); +extern void kgdb_kintr(void); + +/* + * i386/db_interface.c + */ + +extern void kdb_console(void); + +/* + * i386/bcopy.s + */ + +extern void bcopy16( + char * from, + char * to, + int count); + +typedef void (*i386_intr_t)(void); +#endif + diff --git a/osfmk/i386/AT386/model_dep.c b/osfmk/i386/AT386/model_dep.c new file mode 100644 index 000000000..1e43a8458 --- /dev/null +++ b/osfmk/i386/AT386/model_dep.c @@ -0,0 +1,667 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * File: model_dep.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * Copyright (C) 1986, Avadis Tevanian, Jr., Michael Wayne Young + * + * Basic initialization for I386 - ISA bus machines. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if MACH_KDB +#include +#endif /* MACH_KDB */ +#include +#ifdef __MACHO__ +#include +#include +#include +#endif + +#if NCPUS > 1 +#include +#endif /* NCPUS */ + +#if MP_V1_1 +#include +#endif /* MP_V1_1 */ + +vm_size_t mem_size = 0; +vm_offset_t first_addr = 0; /* set by start.s - keep out of bss */ +vm_offset_t first_avail = 0;/* first after page tables */ +vm_offset_t last_addr; + +vm_offset_t avail_start, avail_end; +vm_offset_t virtual_avail, virtual_end; +vm_offset_t hole_start, hole_end; +vm_offset_t avail_next; +unsigned int avail_remaining; + +/* parameters passed from bootstrap loader */ +int cnvmem = 0; /* must be in .data section */ +int extmem = 0; + +/* FIXME!! REMOVE WHEN OSFMK DEVICES ARE COMPLETELY PULLED OUT */ +int dev_name_count = 0; +int dev_name_list = 0; + +#ifndef __MACHO__ +extern char edata, end; +#endif + +extern char version[]; + +int rebootflag = 0; /* exported to kdintr */ + + +void parse_arguments(void); +const char *getenv(const char *); + +#define BOOT_LINE_LENGTH 160 +char boot_string_store[BOOT_LINE_LENGTH] = {0}; +char *boot_string = (char *)0; +int boot_string_sz = BOOT_LINE_LENGTH; +int boottype = 0; + +#if __MACHO__ +#include +vm_offset_t edata, etext, end; + +extern struct mach_header _mh_execute_header; +void *sectTEXTB; int sectSizeTEXT; +void *sectDATAB; int sectSizeDATA; +void *sectOBJCB; int sectSizeOBJC; +void *sectLINKB; int sectSizeLINK; + +/* Kernel boot information */ +KERNBOOTSTRUCT kernBootStructData; +KERNBOOTSTRUCT *kernBootStruct; +#endif + +vm_offset_t kern_args_start = 0; /* kernel arguments */ +vm_size_t kern_args_size = 0; /* size of kernel arguments */ + +#ifdef __MACHO__ + +unsigned long +i386_preinit() +{ + int i; + struct segment_command *sgp; + struct section *sp; + + sgp = (struct segment_command *) getsegbyname("__DATA"); + if (sgp) { + sp = (struct section *) firstsect(sgp); + if (sp) { + do { + if (sp->flags & S_ZEROFILL) + bzero((char *) sp->addr, sp->size); + } while (sp = (struct section *)nextsect(sgp, sp)); + } + } + + + bcopy((char *) KERNSTRUCT_ADDR, (char *) &kernBootStructData, + sizeof(kernBootStructData)); + + kernBootStruct = &kernBootStructData; + + end = getlastaddr(); + + for (i = 0; i < kernBootStruct->numBootDrivers; i++) + end += kernBootStruct->driverConfig[i].size; + + end = round_page(end); + + return end; +} +#endif + +/* + * Cpu initialization. Running virtual, but without MACH VM + * set up. First C routine called. + */ +void +machine_startup(void) +{ + +#ifdef __MACHO__ + + + /* Now copy over various bits.. */ + cnvmem = kernBootStruct->convmem; + extmem = kernBootStruct->extmem; + kern_args_start = (vm_offset_t) kernBootStruct->bootString; + kern_args_size = strlen(kernBootStruct->bootString); + boottype = kernBootStruct->rootdev; + + /* Now retrieve addresses for end, edata, and etext + * from MACH-O headers. + */ + + sectTEXTB = (void *) getsegdatafromheader( + &_mh_execute_header, "__TEXT", §SizeTEXT); + sectDATAB = (void *) getsegdatafromheader( + &_mh_execute_header, "__DATA", §SizeDATA); + sectOBJCB = (void *) getsegdatafromheader( + &_mh_execute_header, "__OBJC", §SizeOBJC); + sectLINKB = (void *) getsegdatafromheader( + &_mh_execute_header, "__LINKEDIT", §SizeLINK); + + etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; + edata = (vm_offset_t) sectDATAB + sectSizeDATA; +#endif + + /* + * Parse startup arguments + */ + parse_arguments(); + + disableDebugOuput = FALSE; + debug_mode = TRUE; + + printf_init(); /* Init this in case we need debugger */ + panic_init(); /* Init this in case we need debugger */ + + PE_init_platform(FALSE, kernBootStruct); + PE_init_kprintf(FALSE); + PE_init_printf(FALSE); + + /* + * Do basic VM initialization + */ + i386_init(); + + PE_init_platform(TRUE, kernBootStruct); + PE_init_kprintf(TRUE); + PE_init_printf(TRUE); + +#if MACH_KDB + + /* + * Initialize the kernel debugger. + */ + ddb_init(); + + /* + * Cause a breakpoint trap to the debugger before proceeding + * any further if the proper option bit was specified in + * the boot flags. + * + * XXX use -a switch to invoke kdb, since there's no + * boot-program switch to turn on RB_HALT! + */ + + if (halt_in_debugger) { + printf("inline call to debugger(machine_startup)\n"); + Debugger("inline call"); + } +#endif /* MACH_KDB */ + TR_INIT(); + + printf(version); + + machine_slot[0].is_cpu = TRUE; + machine_slot[0].running = TRUE; +#ifdef MACH_BSD + /* FIXME */ + machine_slot[0].cpu_type = CPU_TYPE_I386; + machine_slot[0].cpu_subtype = CPU_SUBTYPE_PENTPRO; +#else + machine_slot[0].cpu_type = cpuid_cputype(0); + machine_slot[0].cpu_subtype = CPU_SUBTYPE_AT386; +#endif + + /* + * Start the system. + */ +#if NCPUS > 1 + mp_desc_init(0); +#endif /* NCPUS */ + + setup_main(); +} + + +vm_offset_t env_start = 0; /* environment */ +vm_size_t env_size = 0; /* size of environment */ + +/* + * Parse command line arguments. + */ +void +parse_arguments(void) +{ + char *p = (char *) kern_args_start; + char *endp = (char *) kern_args_start + kern_args_size - 1; + char ch; + + if (kern_args_start == 0) + return; + while (p < endp) { + if (*p++ != '-') { + while (*p++ != '\0') + ; + continue; + } + while (ch = *p++) { + switch (ch) { + case 'h': + halt_in_debugger = 1; + break; + case 'm': /* -m??: memory size Mbytes*/ + mem_size = atoi_term(p, &p)*1024*1024; + break; + case 'k': /* -k??: memory size Kbytes */ + mem_size = atoi_term(p, &p)*1024; + break; + default: +#if NCPUS > 1 && AT386 + if (ch > '0' && ch <= '9') + wncpu = ch - '0'; +#endif /* NCPUS > 1 && AT386 */ + break; + } + } + } +} + +const char * +getenv(const char *name) +{ + int len = strlen(name); + const char *p = (const char *)env_start; + const char *endp = p + env_size; + + while (p < endp) { + if (len >= endp - p) + break; + if (strncmp(name, p, len) == 0 && *(p + len) == '=') + return p + len + 1; + while (*p++) + ; + } + return NULL; +} + +extern void +calibrate_delay(void); + +/* + * Find devices. The system is alive. + */ +void +machine_init(void) +{ + int unit; + const char *p; + int n; + + /* + * Adjust delay count before entering drivers + */ + + calibrate_delay(); + + /* + * Display CPU identification + */ + cpuid_cpu_display("CPU identification", 0); + cpuid_cache_display("CPU configuration", 0); + +#if MP_V1_1 + mp_v1_1_init(); +#endif /* MP_V1_1 */ + + /* + * Set up to use floating point. + */ + init_fpu(); + +#if 0 +#if NPCI > 0 + dma_zones_init(); +#endif /* NPCI > 0 */ +#endif + + /* + * Configure clock devices. + */ + clock_config(); +} + +/* + * Halt a cpu. + */ +void +halt_cpu(void) +{ + halt_all_cpus(FALSE); +} + +int reset_mem_on_reboot = 1; + +/* + * Halt the system or reboot. + */ +void +halt_all_cpus( + boolean_t reboot) +{ + if (reboot) { + /* + * Tell the BIOS not to clear and test memory. + */ + if (! reset_mem_on_reboot) + *(unsigned short *)phystokv(0x472) = 0x1234; + + kdreboot(); + } + else { + rebootflag = 1; + printf("In tight loop: hit ctl-alt-del to reboot\n"); + (void) spllo(); + } + for (;;) + continue; +} + +/* + * Basic VM initialization. + */ + +void +i386_init(void) +{ + int i,j; /* Standard index vars. */ + vm_size_t bios_hole_size; + +#ifndef __MACHO__ + /* + * Zero the BSS. + */ + + bzero((char *)&edata,(unsigned)(&end - &edata)); +#endif + + boot_string = &boot_string_store[0]; + + /* + * Initialize the pic prior to any possible call to an spl. + */ + + set_cpu_model(); + vm_set_page_size(); + + /* + * Initialize the Event Trace Analysis Package + * Static Phase: 1 of 2 + */ + etap_init_phase1(); + + /* + * Compute the memory size. + */ + +#if 1 + /* FIXME + * fdisk needs to change to use a sysctl instead of + * opening /dev/kmem and reading out the kernboot structure + */ + + first_addr = (char *)(KERNSTRUCT_ADDR) + sizeof(KERNBOOTSTRUCT); +#else +#if NCPUS > 1 + first_addr = 0x1000; +#else + /* First two pages are used to boot the other cpus. */ + /* TODO - reclaim pages after all cpus have booted */ + + first_addr = 0x3000; +#endif +#endif + + /* BIOS leaves data in low memory */ + last_addr = 1024*1024 + extmem*1024; + /* extended memory starts at 1MB */ + + bios_hole_size = 1024*1024 - trunc_page((vm_offset_t)(1024 * cnvmem)); + + /* + * Initialize for pmap_free_pages and pmap_next_page. + * These guys should be page-aligned. + */ + + hole_start = trunc_page((vm_offset_t)(1024 * cnvmem)); + hole_end = round_page((vm_offset_t)first_avail); + + /* + * compute mem_size + */ + + if (mem_size != 0) { + if (mem_size < (last_addr) - bios_hole_size) + last_addr = mem_size + bios_hole_size; + } + + first_addr = round_page(first_addr); + last_addr = trunc_page(last_addr); + mem_size = last_addr - bios_hole_size; + + avail_start = first_addr; + avail_end = last_addr; + avail_next = avail_start; + + /* + * Initialize kernel physical map, mapping the + * region from loadpt to avail_start. + * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS. + */ + + +#if NCPUS > 1 && AT386 + /* + * Must Allocate interrupt stacks before kdb is called and also + * before vm is initialized. Must find out number of cpus first. + */ + /* + * Get number of cpus to boot, passed as an optional argument + * boot: mach [-sah#] # from 0 to 9 is the number of cpus to boot + */ + if (wncpu == -1) { + /* + * "-1" check above is to allow for old boot loader to pass + * wncpu through boothowto. New boot loader uses environment. + */ + const char *cpus; + if ((cpus = getenv("cpus")) != NULL) { + /* only a single digit for now */ + if ((*cpus > '0') && (*cpus <= '9')) + wncpu = *cpus - '0'; + } else + wncpu = NCPUS; + } + mp_probe_cpus(); + interrupt_stack_alloc(); + +#endif /* NCPUS > 1 && AT386 */ + + pmap_bootstrap(0); + + avail_remaining = atop((avail_end - avail_start) - + (hole_end - hole_start)); +} + +unsigned int +pmap_free_pages(void) +{ + return avail_remaining; +} + +boolean_t +pmap_next_page( + vm_offset_t *addrp) +{ + if (avail_next == avail_end) + return FALSE; + + /* skip the hole */ + + if (avail_next == hole_start) + avail_next = hole_end; + + *addrp = avail_next; + avail_next += PAGE_SIZE; + avail_remaining--; + + return TRUE; +} + +boolean_t +pmap_valid_page( + vm_offset_t x) +{ + return ((avail_start <= x) && (x < avail_end)); +} + +/*XXX*/ +void fc_get(mach_timespec_t *ts); +#include +#include +extern kern_return_t sysclk_gettime( + mach_timespec_t *cur_time); +void fc_get(mach_timespec_t *ts) { + (void )sysclk_gettime(ts); +} + +void +Debugger( + const char *message) +{ + printf("Debugger called: <%s>\n", message); + + __asm__("int3"); +} + +void +display_syscall(int syscall) +{ + printf("System call happened %d\n", syscall); +} + +#if XPR_DEBUG && (NCPUS == 1 || MP_V1_1) + +extern kern_return_t sysclk_gettime_interrupts_disabled( + mach_timespec_t *cur_time); + +int xpr_time(void) +{ + mach_timespec_t time; + + sysclk_gettime_interrupts_disabled(&time); + return(time.tv_sec*1000000 + time.tv_nsec/1000); +} +#endif /* XPR_DEBUG && (NCPUS == 1 || MP_V1_1) */ + +enable_bluebox() +{ +} +disable_bluebox() +{ +} + +char * +machine_boot_info(char *buf, vm_size_t size) +{ + *buf ='\0'; + return buf; +} + diff --git a/osfmk/i386/AT386/mp/boot.h b/osfmk/i386/AT386/mp/boot.h new file mode 100644 index 000000000..e6551c15f --- /dev/null +++ b/osfmk/i386/AT386/mp/boot.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:39 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:40 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 01:45:53 ezf + * change marker to not FREE + * [1994/09/22 21:19:54 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:27:00 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:02:53 jeffc] + * + * Revision 1.2 1993/04/19 16:12:08 devrcs + * Fixed Copyrights + * [92/12/16 bernadat] + * + * Changed MP_GDT from 1200 to 1100 to save unused space. + * [92/12/08 bernadat] + * + * Revision 1.1 1992/09/30 02:27:14 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.1.3.1 92/04/30 11:57:14 bernadat + * Moved from cbus to here, applies to both Corollary + * and SystemPro + * [92/04/08 bernadat] + * + * Revision 2.1.9.1 92/02/18 18:34:14 jeffreyh + * Created + * [91/06/27 05:00:05 bernadat] + * + */ +/* CMU_ENDHIST */ + +/* + * Define where to store boot code for slaves + */ + +#define MP_BOOT 0x1000 /* address where slave boots are loaded */ +#define MP_BOOTSEG 0x100 +#define MP_GDT 0x1100 /* temporary gdt address for boot */ +#define MP_BOOTSTACK 0x800 /* stack for boot */ +#define MP_MACH_START MP_BOOTSTACK /* contains address where to jump + after boot */ diff --git a/osfmk/i386/AT386/mp/mp.c b/osfmk/i386/AT386/mp/mp.c new file mode 100644 index 000000000..df8f04a51 --- /dev/null +++ b/osfmk/i386/AT386/mp/mp.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#include +#include + +#if NCPUS > 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int cpu_int_word[NCPUS]; + +extern void cpu_interrupt(int cpu); +extern int get_ncpus(void); + +/* + * Generate a clock interrupt on next running cpu + * + * Instead of having the master processor interrupt + * all active processors, each processor in turn interrupts + * the next active one. This avoids all slave processors + * accessing the same R/W data simultaneously. + */ + +void +slave_clock(void) +{ +} + +void +i386_signal_cpus(int event) +{ +} + +/*ARGSUSED*/ +void +init_ast_check( + processor_t processor) +{ +} + +void +cause_ast_check( + processor_t processor) +{ +} + +/*ARGSUSED*/ +kern_return_t +cpu_start( + int slot_num) +{ + printf("cpu_start not implemented\n"); + return (KERN_FAILURE); +} + + +int real_ncpus; +int wncpu = -1; + +/* + * Find out how many cpus will run + */ + +void +mp_probe_cpus(void) +{ + int i; + + /* + * get real number of cpus + */ + + real_ncpus = get_ncpus(); + + if (wncpu <= 0) + wncpu = NCPUS; + + /* + * Ignore real number of cpus it if number of requested cpus + * is smaller. + * Keep it if number of requested cpu is null or larger. + */ + + if (real_ncpus < wncpu) + wncpu = real_ncpus; +#if MP_V1_1 + { + extern void validate_cpus(int); + + /* + * We do NOT have CPUS numbered contiguously. + */ + + validate_cpus(wncpu); + } +#else + for (i=0; i < wncpu; i++) + machine_slot[i].is_cpu = TRUE; +#endif +} + +/* + * invoke kdb on slave processors + */ + +void +remote_kdb(void) +{ +} + +/* + * Clear kdb interrupt + */ + +void +clear_kdb_intr(void) +{ +} +#else /* NCPUS > 1 */ +int cpu_int_word[NCPUS]; +#endif /* NCPUS > 1 */ diff --git a/osfmk/i386/AT386/mp/mp.h b/osfmk/i386/AT386/mp/mp.h new file mode 100644 index 000000000..e63593cf1 --- /dev/null +++ b/osfmk/i386/AT386/mp/mp.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386AT_MP_H_ +#define _I386AT_MP_H_ + +#include +#include +#include +#include +#include + +#if NCPUS > 1 +#include +#include + +#define CPU_NUMBER(r) \ + movl EXT(lapic_id), r ; \ + movl 0(r),r ; \ + shrl $LAPIC_ID_SHIFT, r; \ + andl $LAPIC_ID_MASK, r + + +#define MP_IPL SPL6 /* software interrupt level */ + +/* word describing the reason for the interrupt, one per cpu */ + +#ifndef ASSEMBLER +#include +extern cpu_int_word[]; +extern real_ncpus; /* real number of cpus */ +extern wncpu; /* wanted number of cpus */ +decl_simple_lock_data(extern,kdb_lock) /* kdb lock */ + +extern int kdb_cpu; /* current cpu running kdb */ +extern int kdb_debug; +extern int kdb_is_slave[]; +extern int kdb_active[]; +#endif /* ASSEMBLER */ + +#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit))) + + +/* + * Device driver synchronization. + * + * at386_io_lock(op) and at386_io_unlock() are called + * by device drivers when accessing H/W. The underlying + * Processing is machine dependant. But the op argument + * to the at386_io_lock is generic + */ + +#define MP_DEV_OP_MAX 4 +#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */ + +/* + * If the caller specifies an op value different than MP_DEV_WAIT, the + * at386_io_lock function must return true if lock was successful else + * false + */ + +#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */ +#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */ +#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */ +#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */ + +#else /* NCPUS > 1 */ +#define at386_io_lock_state() +#define at386_io_lock(op) (TRUE) +#define at386_io_unlock() +#if MP_V1_1 +#include +#endif /* MP_V1_1 */ +#endif /* NCPUS > 1 */ + +#if MACH_RT +#define _DISABLE_PREEMPTION(r) \ + movl $CPD_PREEMPTION_LEVEL,r ; \ + incl %gs:(r) + +#define _ENABLE_PREEMPTION(r) \ + movl $CPD_PREEMPTION_LEVEL,r ; \ + decl %gs:(r) ; \ + jne 9f ; \ + pushl %eax ; \ + pushl %ecx ; \ + pushl %edx ; \ + call EXT(kernel_preempt_check) ; \ + popl %edx ; \ + popl %ecx ; \ + popl %eax ; \ +9: + +#define _ENABLE_PREEMPTION_NO_CHECK(r) \ + movl $CPD_PREEMPTION_LEVEL,r ; \ + decl %gs:(r) + +#if MACH_ASSERT +#define DISABLE_PREEMPTION(r) \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call EXT(_disable_preemption); \ + popl %edx; \ + popl %ecx; \ + popl %eax +#define ENABLE_PREEMPTION(r) \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call EXT(_enable_preemption); \ + popl %edx; \ + popl %ecx; \ + popl %eax +#define ENABLE_PREEMPTION_NO_CHECK(r) \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call EXT(_enable_preemption_no_check); \ + popl %edx; \ + popl %ecx; \ + popl %eax +#if NCPUS > 1 +#define MP_DISABLE_PREEMPTION(r) \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call EXT(_mp_disable_preemption); \ + popl %edx; \ + popl %ecx; \ + popl %eax +#define MP_ENABLE_PREEMPTION(r) \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call EXT(_mp_enable_preemption); \ + popl %edx; \ + popl %ecx; \ + popl %eax +#define MP_ENABLE_PREEMPTION_NO_CHECK(r) \ + pushl %eax; \ + pushl %ecx; \ + pushl %edx; \ + call EXT(_mp_enable_preemption_no_check); \ + popl %edx; \ + popl %ecx; \ + popl %eax +#else /* NCPUS > 1 */ +#define MP_DISABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION_NO_CHECK(r) +#endif /* NCPUS > 1 */ +#else /* MACH_ASSERT */ +#define DISABLE_PREEMPTION(r) _DISABLE_PREEMPTION(r) +#define ENABLE_PREEMPTION(r) _ENABLE_PREEMPTION(r) +#define ENABLE_PREEMPTION_NO_CHECK(r) _ENABLE_PREEMPTION_NO_CHECK(r) +#if NCPUS > 1 +#define MP_DISABLE_PREEMPTION(r) _DISABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION(r) _ENABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION_NO_CHECK(r) _ENABLE_PREEMPTION_NO_CHECK(r) +#else /* NCPUS > 1 */ +#define MP_DISABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION_NO_CHECK(r) +#endif /* NCPUS > 1 */ +#endif /* MACH_ASSERT */ + +#else /* MACH_RT */ +#define DISABLE_PREEMPTION(r) +#define ENABLE_PREEMPTION(r) +#define ENABLE_PREEMPTION_NO_CHECK(r) +#define MP_DISABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION(r) +#define MP_ENABLE_PREEMPTION_NO_CHECK(r) +#endif /* MACH_RT */ + +#endif /* _I386AT_MP_H_ */ diff --git a/osfmk/i386/AT386/mp/mp_events.h b/osfmk/i386/AT386/mp/mp_events.h new file mode 100644 index 000000000..2a2e60998 --- /dev/null +++ b/osfmk/i386/AT386/mp/mp_events.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef __AT386_MP_EVENTS__ +#define __AT386_MP_EVENTS__ + +/* Interrupt types */ + +#define MP_TLB_FLUSH 0x00 +#define MP_CLOCK 0x01 +#define MP_KDB 0x02 +#define MP_AST 0x03 +#define MP_SOFTCLOCK 0x04 +#define MP_INT_AVAIL 0x05 +#define MP_AST_URGENT 0x06 +#define MP_TLB_RELOAD 0x07 + +#ifndef ASSEMBLER +extern void i386_signal_cpus(int event); +#endif + +#endif diff --git a/osfmk/i386/AT386/mp/mp_v1_1.c b/osfmk/i386/AT386/mp/mp_v1_1.c new file mode 100644 index 000000000..51d1da78b --- /dev/null +++ b/osfmk/i386/AT386/mp/mp_v1_1.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MP_DEBUG 1 + +#if MP_DEBUG +vm_offset_t bios_start; +#endif /* MP_DEBUG */ + +unsigned int lapic_id_initdata = 0; +int lapic_id = (int)&lapic_id_initdata; +vm_offset_t lapic_start; + +void lapic_init(void); +int get_ncpus(void); +void validate_cpus(int ncpus); +void cpu_interrupt(int cpu); +void slave_boot(int cpu); + +boolean_t mp_v1_1_initialized = FALSE; + +void +mp_v1_1_init(void) +{ + /*WILL BE REMOVED IN FUTURE REVISION!!! !*/ + /* SIMPLY COMMENTED OUT FOR THE MOMENT */ + return; +} + +void +lapic_init(void) +{ +} + +void +cpu_interrupt( + int cpu) +{ +} + +#if NCPUS > 1 +void +slave_boot( + int cpu) +{ +} + +void +start_other_cpus(void) +{ +} + +void +validate_cpus(int ncpus) +{ + int i; + for(i=0;i 1 */ + +#if MACH_KDB +#include + +#define TRAP_DEBUG 0 /* Must match interrupt.s and spl.s */ + + +#if TRAP_DEBUG +#define MTRAPS 100 +struct mp_trap_hist_struct { + unsigned char type; + unsigned char data[5]; +} trap_hist[MTRAPS], *cur_trap_hist = trap_hist, + *max_trap_hist = &trap_hist[MTRAPS]; + +void db_trap_hist(void); + +/* + * SPL: + * 1: new spl + * 2: old spl + * 3: new tpr + * 4: old tpr + * INT: + * 1: int vec + * 2: old spl + * 3: new spl + * 4: post eoi tpr + * 5: exit tpr + */ + +void +db_trap_hist(void) +{ + int i,j; + for(i=0;i=cur_trap_hist)?"*":" ", + (trap_hist[i].type == 1)?"SPL":"INT"); + for(j=0;j<5;j++) + db_printf(" %02x", trap_hist[i].data[j]); + db_printf("\n"); + } + +} +#endif /* TRAP_DEBUG */ + +void db_lapic(int cpu); +unsigned int db_remote_read(int cpu, int reg); +void db_ioapic(unsigned int); +void kdb_console(void); + +void +kdb_console(void) +{ +} + +#define BOOLP(a) ((a)?' ':'!') + +static char *DM[8] = { + "Fixed", + "Lowest Priority", + "Invalid", + "Invalid", + "NMI", + "Reset", + "Invalid", + "ExtINT"}; + +unsigned int +db_remote_read(int cpu, int reg) +{ + return -1; +} + +void +db_lapic(int cpu) +{ +} + +void +db_ioapic(unsigned int ind) +{ +} + +#endif /* MACH_KDB */ diff --git a/osfmk/i386/AT386/mp/mp_v1_1.h b/osfmk/i386/AT386/mp/mp_v1_1.h new file mode 100644 index 000000000..de330f41b --- /dev/null +++ b/osfmk/i386/AT386/mp/mp_v1_1.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _MP_MP_V1_1_H_ +#define _MP_MP_V1_1_H_ + +#include +#include +#include + +struct MP_Config_EntryP { + unsigned char Entry_Type; + unsigned char Local_Apic_Id; + unsigned char Local_Apic_Version; + unsigned char CPU_Flags; + unsigned int CPU_Signature; + unsigned int Feature_Flags; + unsigned int Reserved[2]; +}; + +/* Entry types */ + +#define MP_CPU_ENTRY 0 /* Processor entry */ +#define MP_BUS_ENTRY 1 /* bus entry */ +#define MP_IO_APIC_ENTRY 2 /* I/O APIC entry */ +#define MP_IO_INT_ENTRY 3 /* I/O Interrupt assignment */ +#define MP_LOC_INT_ENTRY 4 /* Local Interrupt assignment */ + +struct MP_Config_EntryB { + unsigned char Entry_Type; + unsigned char Bus_Id; + char Ident[6]; +}; + +struct MP_Config_EntryA { + unsigned char Entry_Type; + unsigned char IO_Apic_Id; + unsigned char IO_Apic_Version; + unsigned char IO_Apic_Flags; + vm_offset_t IO_Apic_Address; +}; + +struct MP_Config_EntryI { + unsigned char Entry_Type; + unsigned char Int_Type; + unsigned short Int_Flag; + unsigned char Source_Bus; + unsigned char Source_IRQ; + unsigned char Dest_IO_Apic; + unsigned char Dest_INTIN; +}; +struct MP_Config_EntryL { + unsigned char Entry_Type; + unsigned char Int_Type; + unsigned short Int_Flag; + unsigned char Source_Bus; + unsigned char Source_IRQ; + unsigned char Dest_Local_Apic; + unsigned char Dest_INTIN; +}; + +struct MP_FPS_struct { + unsigned int Signature; + vm_offset_t Config_Ptr; + unsigned char Length; + unsigned char Spec_Rev; + unsigned char CheckSum; + unsigned char Feature[5]; +}; + +struct MP_Config_Table { + unsigned int Signature; + unsigned short Length; + unsigned char Spec_Rev; + unsigned char CheckSum; + char OEM[8]; + char PROD[12]; + vm_offset_t OEM_Ptr; + unsigned short OEM_Size; + unsigned short Entries; + vm_offset_t Local_Apic; + unsigned int Reserved; +}; + +#define IMCR_ADDRESS 0x22 +#define IMCR_DATA 0x23 +#define IMCR_SELECT 0x70 +#define IMCR_APIC_ENABLE 0x01 + +#if 0 +extern boolean_t mp_v1_1_take_irq(int pic, + int unit, + int spl, + i386_intr_t intr); + +extern boolean_t mp_v1_1_reset_irq(int pic, + int *unit, + int *spl, + i386_intr_t *intr); + +#endif + +void mp_v1_1_init(void); +boolean_t mp_v1_1_io_lock(int, struct processor **); +void mp_v1_1_io_unlock(struct processor *); + +/* Intel default Configurations */ + +#define MP_PROPRIETARY_CONF 0 +#define MP_ISA_CONF 1 +#define MP_EISA_1_CONF 2 +#define MP_EISA_2_CONF 3 +#define MP_MCA_CONF 4 +#define MP_ISA_PCI_CONF 5 +#define MP_EISA_PCI_CONF 6 +#define MP_MCA_PCI_CONF 7 + +#if NCPUS > 1 +#define at386_io_lock_state() panic("at386_io_lock_state called") +#define at386_io_lock(x) panic("at386_io_lock called"); +#define at386_io_unlock() panic("at386_io_unlock") +#endif /* NCPUS > 1 */ + +#endif /* _MP_MP_V1_1_H_ */ diff --git a/osfmk/i386/AT386/mp/slave_boot.s b/osfmk/i386/AT386/mp/slave_boot.s new file mode 100644 index 000000000..4f55d2024 --- /dev/null +++ b/osfmk/i386/AT386/mp/slave_boot.s @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + + +#include "i386/asm.h" +#include "i386/AT386/mp/boot.h" + +#define CR0_PE_ON 0x1 +#define CR0_PE_OFF 0xfffffffe + + .file "slave_boot.s" + + .text + +#define LJMP(segment,address) \ + .byte 0xea ;\ + .long address-EXT(slave_boot_base) ;\ + .word segment + +#define LGDT(address) \ + .word 0x010f ;\ + .byte 0x15 ;\ + .long address-EXT(slave_boot_base) + +ENTRY(slave_boot_base) + /* code is loaded at 0x0:0x1000 */ + /* ljmp to the next instruction to set up %cs */ + data16 + LJMP(MP_BOOTSEG, EXT(slave_pstart)) + +ENTRY(slave_pstart) + /* set up %ds */ + mov %cs, %ax + mov %ax, %ds + + /* set up %ss and %esp */ + data16 + mov $MP_BOOTSEG, %eax + mov %ax, %ss + data16 + mov $MP_BOOTSTACK, %esp + + /*set up %es */ + mov %ax, %es + + /* change to protected mode */ + data16 + call EXT(real_to_prot) + + push MP_MACH_START + call EXT(startprog) + +/* + real_to_prot() + transfer from real mode to protected mode. +*/ + +ENTRY(real_to_prot) + /* guarantee that interrupt is disabled when in prot mode */ + cli + + /* load the gdtr */ + addr16 + data16 + LGDT(EXT(gdtr)) + + /* set the PE bit of CR0 */ + mov %cr0, %eax + + data16 + or $CR0_PE_ON, %eax + mov %eax, %cr0 + + /* make intrasegment jump to flush the processor pipeline and */ + /* reload CS register */ + data16 + LJMP(0x08, xprot) + +xprot: + /* we are in USE32 mode now */ + /* set up the protective mode segment registers : DS, SS, ES */ + mov $0x10, %eax + movw %ax, %ds + movw %ax, %ss + movw %ax, %es + + ret + +/* + startprog(phyaddr) + start the program on protected mode where phyaddr is the entry point +*/ + +ENTRY(startprog) + push %ebp + mov %esp, %ebp + + mov 0x8(%ebp), %ecx /* entry offset */ + mov $0x28, %ebx /* segment */ + push %ebx + push %ecx + + /* set up %ds and %es */ + mov $0x20, %ebx + movw %bx, %ds + movw %bx, %es + + lret + + + . = MP_GDT-MP_BOOT /* GDT location */ +ENTRY(Gdt) + +/* Segment Descriptor + * + * 31 24 19 16 7 0 + * ------------------------------------------------------------ + * | | |B| |A| | | |1|0|E|W|A| | + * | BASE 31..24 |G|/|0|V| LIMIT |P|DPL| TYPE | BASE 23:16 | + * | | |D| |L| 19..16| | |1|1|C|R|A| | + * ------------------------------------------------------------ + * | | | + * | BASE 15..0 | LIMIT 15..0 | + * | | | + * ------------------------------------------------------------ + */ + .word 0,0 /* 0x0 : null */ + .byte 0,0,0,0 + + .word 0xffff,MP_BOOT /* 0x8 : boot code */ + .byte 0,0x9e,0x40,0 + + .word 0xffff,MP_BOOT /* 0x10 : boot data */ + .byte 0,0x92,0x40,0 + + .word 0xffff,MP_BOOT /* 0x18 : boot code, 16 bits */ + .byte 0,0x9e,0x0,0 + + .word 0xffff,0 /* 0x20 : init data */ + .byte 0,0x92,0xcf,0 + + .word 0xffff,0 /* 0x28 : init code */ + .byte 0,0x9e,0xcf,0 + +ENTRY(gdtr) + .short 48 /* limit (8*6 segs) */ + .short MP_GDT /* base low */ + .short 0 /* base high */ + +ENTRY(slave_boot_end) + + + + + + + + + + + + + + diff --git a/osfmk/i386/AT386/physmem_entries.h b/osfmk/i386/AT386/physmem_entries.h new file mode 100644 index 000000000..ffb48fa50 --- /dev/null +++ b/osfmk/i386/AT386/physmem_entries.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:39 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:38 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1996/11/29 16:56:56 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * Created. Prototypes for the "physmem" device. + * [1996/11/22 15:25:06 barbou] + * + * $EndLog$ + */ + +extern io_return_t physmem_open( + dev_t dev, + dev_mode_t flag, + io_req_t ior); +extern void physmem_close( + dev_t dev); +extern io_return_t physmem_read( + dev_t dev, + io_req_t ior); +extern io_return_t physmem_write( + dev_t dev, + io_req_t ior); +extern io_return_t physmem_getstat( + dev_t dev, + dev_flavor_t flavor, + dev_status_t data, + mach_msg_type_number_t * count); +extern io_return_t physmem_setstat( + dev_t dev, + dev_flavor_t flavor, + dev_status_t data, + mach_msg_type_number_t count); +extern vm_offset_t physmem_mmap( + dev_t dev, + vm_offset_t off, + vm_prot_t prot); +extern io_return_t phsymem_async_in( + dev_t dev, + ipc_port_t rcv_port, + int pri, + filter_t *filter, + mach_msg_type_number_t fcount, + device_t device); +extern void physmem_reset( + dev_t dev); +extern boolean_t phsymem_port_death( + dev_t dev, + ipc_port_t port); +extern io_return_t physmem_dev_info( + dev_t dev, + dev_flavor_t flavor, + char * info); diff --git a/osfmk/i386/AT386/rtc.h b/osfmk/i386/AT386/rtc.h new file mode 100644 index 000000000..9dd18e914 --- /dev/null +++ b/osfmk/i386/AT386/rtc.h @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:39 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:40 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:47:30 ezf + * change marker to not FREE + * [1994/09/22 21:20:22 ezf] + * + * Revision 1.1.2.3 1993/08/09 19:39:04 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:51:17 dswartz] + * + * Revision 1.1.2.2 1993/06/02 23:21:32 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:03:17 jeffc] + * + * Revision 1.1 1992/09/30 02:27:20 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.7 91/05/14 16:30:03 mrt + * Correcting copyright + * + * Revision 2.6 91/03/16 14:47:03 rpd + * Fixed ioctl definitions for ANSI C. + * [91/02/20 rpd] + * + * Revision 2.5 91/02/05 17:20:25 mrt + * Changed to new Mach copyright + * [91/02/01 17:47:16 mrt] + * + * Revision 2.4 90/11/26 14:51:02 rvb + * jsb bet me to XMK34, sigh ... + * [90/11/26 rvb] + * Synched 2.5 & 3.0 at I386q (r1.5.1.3) & XMK35 (r2.4) + * [90/11/15 rvb] + * + * Revision 1.5.1.2 90/07/27 11:27:06 rvb + * Fix Intel Copyright as per B. Davies authorization. + * [90/07/27 rvb] + * + * Revision 2.2 90/05/03 15:46:11 dbg + * First checkin. + * + * Revision 1.5.1.1 90/01/08 13:29:46 rvb + * Add Intel copyright. + * [90/01/08 rvb] + * + * Revision 1.5 89/09/25 12:27:37 rvb + * File was provided by Intel 9/18/89. + * [89/09/23 rvb] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Copyright 1988, 1989 by Intel Corporation, Santa Clara, California. + * + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies and that both the copyright notice and this permission notice + * appear in supporting documentation, and that the name of Intel + * not be used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * + * INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, + * IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define RTC_ADDR 0x70 /* I/O port address for register select */ +#define RTC_DATA 0x71 /* I/O port address for data read/write */ + +/* + * Register A definitions + */ +#define RTC_A 0x0a /* register A address */ +#define RTC_UIP 0x80 /* Update in progress bit */ +#define RTC_DIV0 0x00 /* Time base of 4.194304 MHz */ +#define RTC_DIV1 0x10 /* Time base of 1.048576 MHz */ +#define RTC_DIV2 0x20 /* Time base of 32.768 KHz */ +#define RTC_RATE6 0x06 /* interrupt rate of 976.562 */ + +/* + * Register B definitions + */ +#define RTC_B 0x0b /* register B address */ +#define RTC_SET 0x80 /* stop updates for time set */ +#define RTC_PIE 0x40 /* Periodic interrupt enable */ +#define RTC_AIE 0x20 /* Alarm interrupt enable */ +#define RTC_UIE 0x10 /* Update ended interrupt enable */ +#define RTC_SQWE 0x08 /* Square wave enable */ +#define RTC_DM 0x04 /* Date mode, 1 = binary, 0 = BCD */ +#define RTC_HM 0x02 /* hour mode, 1 = 24 hour, 0 = 12 hour */ +#define RTC_DSE 0x01 /* Daylight savings enable */ + +/* + * Register C definitions + */ +#define RTC_C 0x0c /* register C address */ +#define RTC_IRQF 0x80 /* IRQ flag */ +#define RTC_PF 0x40 /* PF flag bit */ +#define RTC_AF 0x20 /* AF flag bit */ +#define RTC_UF 0x10 /* UF flag bit */ + +/* + * Register D definitions + */ +#define RTC_D 0x0d /* register D address */ +#define RTC_VRT 0x80 /* Valid RAM and time bit */ + +#define RTC_NREG 0x0e /* number of RTC registers */ +#define RTC_NREGP 0x0a /* number of RTC registers to set time */ + +#define RTCRTIME _IOR('c', 0x01, struct rtc_st) /* Read time from RTC */ +#define RTCSTIME _IOW('c', 0x02, struct rtc_st) /* Set time into RTC */ + +struct rtc_st { + char rtc_sec; + char rtc_asec; + char rtc_min; + char rtc_amin; + char rtc_hr; + char rtc_ahr; + char rtc_dow; + char rtc_dom; + char rtc_mon; + char rtc_yr; + char rtc_statusa; + char rtc_statusb; + char rtc_statusc; + char rtc_statusd; +}; + +/* + * this macro reads contents of real time clock to specified buffer + */ +#define load_rtc(regs) \ +{\ + register int i; \ + \ + for (i = 0; i < RTC_NREG; i++) { \ + outb(RTC_ADDR, i); \ + (regs)[i] = inb(RTC_DATA); \ + } \ +} + +/* + * this macro writes contents of specified buffer to real time clock + */ +#define save_rtc(regs) \ +{ \ + register int i; \ + for (i = 0; i < RTC_NREGP; i++) { \ + outb(RTC_ADDR, i); \ + outb(RTC_DATA, (regs)[i]);\ + } \ +} + + diff --git a/osfmk/i386/AT386/video_console.c b/osfmk/i386/AT386/video_console.c new file mode 100644 index 000000000..c795dadf6 --- /dev/null +++ b/osfmk/i386/AT386/video_console.c @@ -0,0 +1,1940 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* MACH PPC - video_console.c + * + * Original based on NetBSD's mac68k/dev/ite.c driver + * + * This driver differs in + * - MACH driver"ized" + * - Uses phys_copy and flush_cache to in several places + * for performance optimizations + * - 7x15 font + * - Black background and white (character) foreground + * - Assumes 6100/7100/8100 class of machine + * + * The original header follows... + * + * + * NetBSD: ite.c,v 1.16 1995/07/17 01:24:34 briggs Exp + * + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: ite.c 1.28 92/12/20$ + * + * @(#)ite.c 8.2 (Berkeley) 1/12/94 + */ + +/* + * ite.c + * + * The ite module handles the system console; that is, stuff printed + * by the kernel and by user programs while "desktop" and X aren't + * running. Some (very small) parts are based on hp300's 4.4 ite.c, + * hence the above copyright. + * + * -- Brad and Lawrence, June 26th, 1994 + * + */ +#include +#include /* spl definitions */ +#include "iso_scan_font.h" +#include +#include +#include +#include +#include "video_console.h" + +#define CHARWIDTH 8 +#define CHARHEIGHT 16 + +#define ATTR_NONE 0 +#define ATTR_BOLD 1 +#define ATTR_UNDER 2 +#define ATTR_REVERSE 4 + +enum vt100state_e { + ESnormal, /* Nothing yet */ + ESesc, /* Got ESC */ + ESsquare, /* Got ESC [ */ + ESgetpars, /* About to get or getting the parameters */ + ESgotpars, /* Finished getting the parameters */ + ESfunckey, /* Function key */ + EShash, /* DEC-specific stuff (screen align, etc.) */ + ESsetG0, /* Specify the G0 character set */ + ESsetG1, /* Specify the G1 character set */ + ESask, + EScharsize, + ESignore /* Ignore this sequence */ +} vt100state = ESnormal; + +static struct vc_info vinfo; +#define IS_TEXT_MODE (vinfo.v_type == TEXT_MODE) + +/* Calculated in vccninit(): */ +static int vc_wrap_mode = 1, vc_relative_origin = 0; +static int vc_charset_select = 0, vc_save_charset_s = 0; +static int vc_charset[2] = { 0, 0 }; +static int vc_charset_save[2] = { 0, 0 }; + +/* VT100 state: */ +#define MAXPARS 16 +static int x = 0, y = 0, savex, savey; +static int par[MAXPARS], numpars, hanging_cursor, attr, saveattr; + +/* VT100 tab stops & scroll region */ +static char tab_stops[255]; +static int scrreg_top, scrreg_bottom; + +/* Misc */ +void vc_flush_forward_buffer(void); +void vc_store_char(unsigned char); + +/* + * For the color support (Michel Pollet) + */ +unsigned char vc_color_index_table[33] = + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }; + +unsigned long vc_color_depth_masks[4] = + { 0x000000FF, 0x00007FFF, 0x00FFFFFF }; + +unsigned long vc_colors[8][3] = { + { 0xFFFFFFFF, 0x00000000, 0x00000000 }, /* black */ + { 0x23232323, 0x7C007C00, 0x00FF0000 }, /* red */ + { 0xb9b9b9b9, 0x03e003e0, 0x0000FF00 }, /* green */ + { 0x05050505, 0x7FE07FE0, 0x00FFFF00 }, /* yellow */ + { 0xd2d2d2d2, 0x001f001f, 0x000000FF}, /* blue */ +// { 0x80808080, 0x31933193, 0x00666699 }, /* blue */ + { 0x18181818, 0x7C1F7C1F, 0x00FF00FF }, /* magenta */ + { 0xb4b4b4b4, 0x03FF03FF, 0x0000FFFF }, /* cyan */ + { 0x00000000, 0x7FFF7FFF, 0x00FFFFFF } /* white */ +}; + +unsigned long vc_color_mask = 0; +unsigned long vc_color_fore = 0; +unsigned long vc_color_back = 0; +int vc_normal_background = 1; + +/* + * For the jump scroll and buffering (Michel Pollet) + * 80*22 means on a 80*24 screen, the screen will + * scroll jump almost a full screen + * keeping only what's necessary for you to be able to read ;-) + */ +#define VC_MAX_FORWARD_SIZE (80*22) + +/* + * Delay between console updates in clock hz units, the larger the + * delay the fuller the jump-scroll buffer will be and so the faster the + * (scrolling) output. The smaller the delay, the less jerky the + * display. Heuristics show that at 10 touch-typists (Mike!) complain + */ +#define VC_CONSOLE_UPDATE_TIMEOUT 5 + +static unsigned char vc_forward_buffer[VC_MAX_FORWARD_SIZE]; +static long vc_forward_buffer_size = 0; +decl_simple_lock_data(,vc_forward_lock) + +/* Set to 1 by initialize_screen() */ +static int vc_initialized = 0; + +/* Function pointers initialized via initialize_screen() */ +static struct { + void (*initialize)(struct vc_info * vinfo_p); + void (*paintchar)(unsigned char c, int x, int y, int attrs); + void (*scrolldown)(int num); + void (*scrollup)(int num); + void (*clear_screen)(int xx, int yy, int which); + void (*show_cursor)(int x, int y); + void (*hide_cursor)(int x, int y); + void (*update_color)(int color, int fore); +} vc_ops; + +/* + * New Rendering code from Michel Pollet + */ + +#define REN_MAX_DEPTH 32 +/* that's the size for a 32 bits buffer... */ +#define REN_MAX_SIZE (128L*1024) +unsigned char renderedFont[REN_MAX_SIZE]; + +/* Rendered Font Size */ +unsigned long vc_rendered_font_size = REN_MAX_SIZE; +long vc_rendered_error = 0; + +/* If the one bit table was reversed */ +short vc_one_bit_reversed = 0; + +/* Size of a character in the table (bytes) */ +int vc_rendered_char_size = 0; + +/* +# Attribute codes: +# 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed +# Text color codes: +# 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white +# Background color codes: +# 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white +*/ + +#define VC_RESET_BACKGROUND 40 +#define VC_RESET_FOREGROUND 37 + +static void vc_color_set(int color) +{ + if (vinfo.v_depth < 8) + return; + if (color >= 30 && color <= 37) { + vc_color_fore = vc_colors[color-30][vc_color_index_table[vinfo.v_depth]]; + if ( vc_ops.update_color ) vc_ops.update_color(color - 30, 1); + } + if (color >= 40 && color <= 47) { + vc_color_back = vc_colors[color-40][vc_color_index_table[vinfo.v_depth]]; + if ( vc_ops.update_color ) vc_ops.update_color(color - 40, 0); + vc_normal_background = color == 40; + } +} + +static void vc_render_font(short olddepth, short newdepth) +{ + int charIndex; /* index in ISO font */ + union { + unsigned char *charptr; + unsigned short *shortptr; + unsigned long *longptr; + } current; /* current place in rendered font, multiple types. */ + + unsigned char *theChar; /* current char in iso_font */ + + if (olddepth == newdepth) + return; /* nothing to do */ + + vc_rendered_font_size = REN_MAX_SIZE; + if (newdepth == 1) { + vc_rendered_char_size = 16; + if (!vc_one_bit_reversed) { /* reverse the font for the blitter */ + int i; + for (i = 0; i < ((ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size); i++) { + if (iso_font[i]) { + unsigned char mask1 = 0x80; + unsigned char mask2 = 0x01; + unsigned char val = 0; + while (mask1) { + if (iso_font[i] & mask1) + val |= mask2; + mask1 >>= 1; + mask2 <<= 1; + } + renderedFont[i] = ~val; + } else renderedFont[i] = 0xff; + } + vc_one_bit_reversed = 1; + } + return; + } + { + long csize = newdepth / 8; /* bytes per pixel */ + vc_rendered_char_size = csize ? CHARHEIGHT * (csize * CHARWIDTH) : + /* for 2 & 4 */ CHARHEIGHT * (CHARWIDTH/(6-newdepth)); + csize = (ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size; + if (csize > vc_rendered_font_size) { + vc_rendered_error = csize; + return; + } else + vc_rendered_font_size = csize; + } + + current.charptr = renderedFont; + theChar = iso_font; + for (charIndex = ISO_CHAR_MIN; charIndex <= ISO_CHAR_MAX; charIndex++) { + int line; + for (line = 0; line < CHARHEIGHT; line++) { + unsigned char mask = 1; + do { + switch (newdepth) { + case 2: { + unsigned char value = 0; + if (*theChar & mask) value |= 0xC0; mask <<= 1; + if (*theChar & mask) value |= 0x30; mask <<= 1; + if (*theChar & mask) value |= 0x0C; mask <<= 1; + if (*theChar & mask) value |= 0x03; + value = ~value; + *current.charptr++ = value; + } + break; + case 4: + { + unsigned char value = 0; + if (*theChar & mask) value |= 0xF0; mask <<= 1; + if (*theChar & mask) value |= 0x0F; + value = ~value; + *current.charptr++ = value; + } + break; + case 8: + *current.charptr++ = (*theChar & mask) ? 0xff : 0; + break; + case 16: + *current.shortptr++ = (*theChar & mask) ? 0xFFFF : 0; + break; + + case 32: + *current.longptr++ = (*theChar & mask) ? 0xFFFFFFFF : 0; + break; + } + mask <<= 1; + } while (mask); /* while the single bit drops to the right */ + theChar++; + } + } +} + +static void vc_paint_char1(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned char *theChar; + unsigned char *where; + int i; + + theChar = (unsigned char*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned char*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ + *where = *theChar++; + + where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned char val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + unsigned char mask1 = 0xC0, mask2 = 0x40; + int bit = 0; + for (bit = 0; bit < 7; bit++) { + if ((save & mask1) == mask2) + val &= ~mask2; + mask1 >>= 1; + mask2 >>= 1; + } + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + *where = val; + + where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void vc_paint_char2(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned short *theChar; + unsigned short *where; + int i; + + theChar = (unsigned short*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned short*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * 2)); + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ + *where = *theChar++; + + where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned short val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + unsigned short mask1 = 0xF000, mask2 = 0x3000; + int bit = 0; + for (bit = 0; bit < 7; bit++) { + if ((save & mask1) == mask2) + val &= ~mask2; + mask1 >>= 2; + mask2 >>= 2; + } + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + *where = val; + + where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void vc_paint_char4(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * 4)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ + *where = *theChar++; + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + unsigned long mask1 = 0xff000000, mask2 = 0x0F000000; + int bit = 0; + for (bit = 0; bit < 7; bit++) { + if ((save & mask1) == mask2) + val &= ~mask2; + mask1 >>= 4; + mask2 >>= 4; + } + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + *where = val; + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void vc_paint_char8c(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * CHARWIDTH)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attr? FLY !*/ + unsigned long *store = where; + int x; + for (x = 0; x < 2; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 2; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !(save & 0xFF000000)) + val |= 0xff000000; + if ((save & 0xFFFF0000) == 0xFF000000) + val |= 0x00FF0000; + if ((save & 0x00FFFF00) == 0x00FF0000) + val |= 0x0000FF00; + if ((save & 0x0000FFFF) == 0x0000FF00) + val |= 0x000000FF; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + lastpixel = save & 0xff; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} +static void vc_paint_char16c(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * CHARWIDTH * 2)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ + unsigned long *store = where; + int x; + for (x = 0; x < 4; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 4; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (save == 0xFFFF0000) val |= 0xFFFF; + else if (lastpixel && !(save & 0xFFFF0000)) + val |= 0xFFFF0000; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + + *store++ = val; + lastpixel = save & 0x7fff; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} +static void vc_paint_char32c(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * CHARWIDTH * 4)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ + unsigned long *store = where; + int x; + for (x = 0; x < 8; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 8; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !save) + val = 0xFFFFFFFF; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + lastpixel = save; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +/* + * That's a plain dumb reverse of the cursor position + * It do a binary reverse, so it will not looks good when we have + * color support. we'll see that later + */ +static void reversecursor(int xx, int yy) +{ + union { + unsigned char *charptr; + unsigned short *shortptr; + unsigned long *longptr; + } where; + int line, col; + + where.longptr = (unsigned long*)(vinfo.v_baseaddr + + (y * CHARHEIGHT * vinfo.v_rowbytes) + + (x /** CHARWIDTH*/ * vinfo.v_depth)); + for (line = 0; line < CHARHEIGHT; line++) { + switch (vinfo.v_depth) { + case 1: + *where.charptr = ~*where.charptr; + break; + case 2: + *where.shortptr = ~*where.shortptr; + break; + case 4: + *where.longptr = ~*where.longptr; + break; +/* that code still exists because since characters on the screen are + * of different colors that reverse function may not work if the + * cursor is on a character that is in a different color that the + * current one. When we have buffering, things will work better. MP + */ +#ifdef 1 /*VC_BINARY_REVERSE*/ + case 8: + where.longptr[0] = ~where.longptr[0]; + where.longptr[1] = ~where.longptr[1]; + break; + case 16: + for (col = 0; col < 4; col++) + where.longptr[col] = ~where.longptr[col]; + break; + case 32: + for (col = 0; col < 8; col++) + where.longptr[col] = ~where.longptr[col]; + break; +#else + case 8: + for (col = 0; col < 8; col++) + where.charptr[col] = where.charptr[col] != (vc_color_fore & vc_color_mask) ? + vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; + break; + case 16: + for (col = 0; col < 8; col++) + where.shortptr[col] = where.shortptr[col] != (vc_color_fore & vc_color_mask) ? + vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; + break; + case 32: + for (col = 0; col < 8; col++) + where.longptr[col] = where.longptr[col] != (vc_color_fore & vc_color_mask) ? + vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; + break; +#endif + } + where.charptr += vinfo.v_rowbytes; + } +} + + +static void +scrollup(int num) +{ + unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; + + linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; + rowline = vinfo.v_rowbytes / 4; + rowscanline = vinfo.v_rowscanbytes / 4; + + to = (unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs); + from = to + (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + + i = (scrreg_bottom - scrreg_top) - num; + + while (i-- > 0) { + for (line = 0; line < CHARHEIGHT; line++) { + /* + * Only copy what is displayed + */ +#if 1 + bcopy((unsigned int) from, (unsigned int) to, + vinfo.v_rowscanbytes); +#else + video_scroll_up((unsigned int) from, + (unsigned int) (from+(vinfo.v_rowscanbytes/4)), + (unsigned int) to); +#endif + + from += rowline; + to += rowline; + } + } + + /* Now set the freed up lines to the background colour */ + + + to = ((unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs)) + + ((scrreg_bottom - scrreg_top - num) * linelongs); + + for (linelongs = CHARHEIGHT * num; linelongs-- > 0;) { + from = to; + for (i = 0; i < rowscanline; i++) + *to++ = vc_color_back; + + to = from + rowline; + } + +} + +static void +scrolldown(int num) +{ + unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; + + linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; + rowline = vinfo.v_rowbytes / 4; + rowscanline = vinfo.v_rowscanbytes / 4; + + + to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_bottom) + - (rowline - rowscanline); + from = to - (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + + i = (scrreg_bottom - scrreg_top) - num; + + while (i-- > 0) { + for (line = 0; line < CHARHEIGHT; line++) { + /* + * Only copy what is displayed + */ +#if 1 + bcopy(from-(vinfo.v_rowscanbytes/4), to, + vinfo.v_rowscanbytes); +#else + + video_scroll_down((unsigned int) from, + (unsigned int) (from-(vinfo.v_rowscanbytes/4)), + (unsigned int) to); +#endif + + from -= rowline; + to -= rowline; + } + } + + /* Now set the freed up lines to the background colour */ + + to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_top); + + for (line = CHARHEIGHT * num; line > 0; line--) { + from = to; + + for (i = 0; i < rowscanline; i++) + *(to++) = vc_color_back; + + to = from + rowline; + } + +} + + +static void +clear_line(int which) +{ + int start, end, i; + + /* + * This routine runs extremely slowly. I don't think it's + * used all that often, except for To end of line. I'll go + * back and speed this up when I speed up the whole vc + * module. --LK + */ + + switch (which) { + case 0: /* To end of line */ + start = x; + end = vinfo.v_columns-1; + break; + case 1: /* To start of line */ + start = 0; + end = x; + break; + default: + case 2: /* Whole line */ + start = 0; + end = vinfo.v_columns-1; + break; + } + + for (i = start; i <= end; i++) { + vc_ops.paintchar(' ', i, y, ATTR_NONE); + } + +} + +static void +clear_screen(int xx, int yy, int which) +{ + unsigned long *p, *endp, *row; + int linelongs, col; + int rowline, rowlongs; + + rowline = vinfo.v_rowscanbytes / 4; + rowlongs = vinfo.v_rowbytes / 4; + + p = (unsigned long*) vinfo.v_baseaddr;; + endp = (unsigned long*) vinfo.v_baseaddr; + + linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; + + switch (which) { + case 0: /* To end of screen */ + clear_line(0); + if (y < vinfo.v_rows - 1) { + p += (y + 1) * linelongs; + endp += rowlongs * vinfo.v_height; + } + break; + case 1: /* To start of screen */ + clear_line(1); + if (y > 1) { + endp += (y + 1) * linelongs; + } + break; + case 2: /* Whole screen */ + endp += rowlongs * vinfo.v_height; + break; + } + + for (row = p ; row < endp ; row += rowlongs) { + for (col = 0; col < rowline; col++) + *(row+col) = vc_color_back; + } + +} + +static void +reset_tabs(void) +{ + int i; + + for (i = 0; i<= vinfo.v_columns; i++) { + tab_stops[i] = ((i % 8) == 0); + } + +} + +static void +vt100_reset(void) +{ + reset_tabs(); + scrreg_top = 0; + scrreg_bottom = vinfo.v_rows; + attr = ATTR_NONE; + vc_charset[0] = vc_charset[1] = 0; + vc_charset_select = 0; + vc_wrap_mode = 1; + vc_relative_origin = 0; + vc_color_set(VC_RESET_BACKGROUND); + vc_color_set(VC_RESET_FOREGROUND); + +} + +static void +putc_normal(unsigned char ch) +{ + switch (ch) { + case '\a': /* Beep */ + { + extern int asc_ringbell(); //In IOBSDConsole.cpp + int rang; + + rang = asc_ringbell(); + + if ( !rang && !IS_TEXT_MODE ) { + /* + * No sound hardware, invert the screen twice instead + */ + unsigned long *ptr; + int i, j; + /* XOR the screen twice */ + for (i = 0; i < 2 ; i++) { + /* For each row, xor the scanbytes */ + for (ptr = (unsigned long*)vinfo.v_baseaddr; + ptr < (unsigned long*)(vinfo.v_baseaddr + + (vinfo.v_height * vinfo.v_rowbytes)); + ptr += (vinfo.v_rowbytes / + sizeof (unsigned long*))) + for (j = 0; + j < vinfo.v_rowscanbytes / + sizeof (unsigned long*); + j++) + *(ptr+j) =~*(ptr+j); + } + } + } + break; + + case 127: /* Delete */ + case '\b': /* Backspace */ + if (hanging_cursor) { + hanging_cursor = 0; + } else + if (x > 0) { + x--; + } + break; + case '\t': /* Tab */ + while (x < vinfo.v_columns && !tab_stops[++x]); + if (x >= vinfo.v_columns) + x = vinfo.v_columns-1; + break; + case 0x0b: + case 0x0c: + case '\n': /* Line feed */ + if (y >= scrreg_bottom -1 ) { + vc_ops.scrollup(1); + y = scrreg_bottom - 1; + } else { + y++; + } + /*break; Pass thru */ + case '\r': /* Carriage return */ + x = 0; + hanging_cursor = 0; + break; + case 0x0e: /* Select G1 charset (Control-N) */ + vc_charset_select = 1; + break; + case 0x0f: /* Select G0 charset (Control-O) */ + vc_charset_select = 0; + break; + case 0x18 : /* CAN : cancel */ + case 0x1A : /* like cancel */ + /* well, i do nothing here, may be later */ + break; + case '\033': /* Escape */ + vt100state = ESesc; + hanging_cursor = 0; + break; + default: + if (ch >= ' ') { + if (hanging_cursor) { + x = 0; + if (y >= scrreg_bottom -1 ) { + vc_ops.scrollup(1); + y = scrreg_bottom - 1; + } else { + y++; + } + hanging_cursor = 0; + } + vc_ops.paintchar((ch >= 0x60 && ch <= 0x7f) ? ch + vc_charset[vc_charset_select] + : ch, x, y, attr); + if (x == vinfo.v_columns - 1) { + hanging_cursor = vc_wrap_mode; + } else { + x++; + } + } + break; + } + +} + +static void +putc_esc(unsigned char ch) +{ + vt100state = ESnormal; + + switch (ch) { + case '[': + vt100state = ESsquare; + break; + case 'c': /* Reset terminal */ + vt100_reset(); + vc_ops.clear_screen(x, y, 2); + x = y = 0; + break; + case 'D': /* Line feed */ + case 'E': + if (y >= scrreg_bottom -1) { + vc_ops.scrollup(1); + y = scrreg_bottom - 1; + } else { + y++; + } + if (ch == 'E') x = 0; + break; + case 'H': /* Set tab stop */ + tab_stops[x] = 1; + break; + case 'M': /* Cursor up */ + if (y <= scrreg_top) { + vc_ops.scrolldown(1); + y = scrreg_top; + } else { + y--; + } + break; + case '>': + vt100_reset(); + break; + case '7': /* Save cursor */ + savex = x; + savey = y; + saveattr = attr; + vc_save_charset_s = vc_charset_select; + vc_charset_save[0] = vc_charset[0]; + vc_charset_save[1] = vc_charset[1]; + break; + case '8': /* Restore cursor */ + x = savex; + y = savey; + attr = saveattr; + vc_charset_select = vc_save_charset_s; + vc_charset[0] = vc_charset_save[0]; + vc_charset[1] = vc_charset_save[1]; + break; + case 'Z': /* return terminal ID */ + break; + case '#': /* change characters height */ + vt100state = EScharsize; + break; + case '(': + vt100state = ESsetG0; + break; + case ')': /* character set sequence */ + vt100state = ESsetG1; + break; + case '=': + break; + default: + /* Rest not supported */ + break; + } + +} + +static void +putc_askcmd(unsigned char ch) +{ + if (ch >= '0' && ch <= '9') { + par[numpars] = (10*par[numpars]) + (ch-'0'); + return; + } + vt100state = ESnormal; + + switch (par[0]) { + case 6: + vc_relative_origin = ch == 'h'; + break; + case 7: /* wrap around mode h=1, l=0*/ + vc_wrap_mode = ch == 'h'; + break; + default: + break; + } + +} + +static void +putc_charsizecmd(unsigned char ch) +{ + vt100state = ESnormal; + + switch (ch) { + case '3' : + case '4' : + case '5' : + case '6' : + break; + case '8' : /* fill 'E's */ + { + int xx, yy; + for (yy = 0; yy < vinfo.v_rows; yy++) + for (xx = 0; xx < vinfo.v_columns; xx++) + vc_ops.paintchar('E', xx, yy, ATTR_NONE); + } + break; + } + +} + +static void +putc_charsetcmd(int charset, unsigned char ch) +{ + vt100state = ESnormal; + + switch (ch) { + case 'A' : + case 'B' : + default: + vc_charset[charset] = 0; + break; + case '0' : /* Graphic characters */ + case '2' : + vc_charset[charset] = 0x21; + break; + } + +} + +static void +putc_gotpars(unsigned char ch) +{ + int i; + + if (ch < ' ') { + /* special case for vttest for handling cursor + movement in escape sequences */ + putc_normal(ch); + vt100state = ESgotpars; + return; + } + vt100state = ESnormal; + switch (ch) { + case 'A': /* Up */ + y -= par[0] ? par[0] : 1; + if (y < scrreg_top) + y = scrreg_top; + break; + case 'B': /* Down */ + y += par[0] ? par[0] : 1; + if (y >= scrreg_bottom) + y = scrreg_bottom - 1; + break; + case 'C': /* Right */ + x += par[0] ? par[0] : 1; + if (x >= vinfo.v_columns) + x = vinfo.v_columns-1; + break; + case 'D': /* Left */ + x -= par[0] ? par[0] : 1; + if (x < 0) + x = 0; + break; + case 'H': /* Set cursor position */ + case 'f': + x = par[1] ? par[1] - 1 : 0; + y = par[0] ? par[0] - 1 : 0; + if (vc_relative_origin) + y += scrreg_top; + hanging_cursor = 0; + break; + case 'X': /* clear p1 characters */ + if (numpars) { + int i; + for (i = x; i < x + par[0]; i++) + vc_ops.paintchar(' ', i, y, ATTR_NONE); + } + break; + case 'J': /* Clear part of screen */ + vc_ops.clear_screen(x, y, par[0]); + break; + case 'K': /* Clear part of line */ + clear_line(par[0]); + break; + case 'g': /* tab stops */ + switch (par[0]) { + case 1: + case 2: /* reset tab stops */ + /* reset_tabs(); */ + break; + case 3: /* Clear every tabs */ + { + int i; + + for (i = 0; i <= vinfo.v_columns; i++) + tab_stops[i] = 0; + } + break; + case 0: + tab_stops[x] = 0; + break; + } + break; + case 'm': /* Set attribute */ + for (i = 0; i < numpars; i++) { + switch (par[i]) { + case 0: + attr = ATTR_NONE; + vc_color_set(VC_RESET_BACKGROUND); + vc_color_set(VC_RESET_FOREGROUND); + break; + case 1: + attr |= ATTR_BOLD; + break; + case 4: + attr |= ATTR_UNDER; + break; + case 7: + attr |= ATTR_REVERSE; + break; + case 22: + attr &= ~ATTR_BOLD; + break; + case 24: + attr &= ~ATTR_UNDER; + break; + case 27: + attr &= ~ATTR_REVERSE; + break; + case 5: + case 25: /* blink/no blink */ + break; + default: + vc_color_set(par[i]); + break; + } + } + break; + case 'r': /* Set scroll region */ + x = y = 0; + /* ensure top < bottom, and both within limits */ + if ((numpars > 0) && (par[0] < vinfo.v_rows)) { + scrreg_top = par[0] ? par[0] - 1 : 0; + if (scrreg_top < 0) + scrreg_top = 0; + } else { + scrreg_top = 0; + } + if ((numpars > 1) && (par[1] <= vinfo.v_rows) && (par[1] > par[0])) { + scrreg_bottom = par[1]; + if (scrreg_bottom > vinfo.v_rows) + scrreg_bottom = vinfo.v_rows; + } else { + scrreg_bottom = vinfo.v_rows; + } + if (vc_relative_origin) + y = scrreg_top; + break; + } + +} + +static void +putc_getpars(unsigned char ch) +{ + if (ch == '?') { + vt100state = ESask; + return; + } + if (ch == '[') { + vt100state = ESnormal; + /* Not supported */ + return; + } + if (ch == ';' && numpars < MAXPARS - 1) { + numpars++; + } else + if (ch >= '0' && ch <= '9') { + par[numpars] *= 10; + par[numpars] += ch - '0'; + } else { + numpars++; + vt100state = ESgotpars; + putc_gotpars(ch); + } +} + +static void +putc_square(unsigned char ch) +{ + int i; + + for (i = 0; i < MAXPARS; i++) { + par[i] = 0; + } + + numpars = 0; + vt100state = ESgetpars; + + putc_getpars(ch); + +} + +void +vc_putchar(char ch) +{ + if (!ch) { + return; /* ignore null characters */ + } + + switch (vt100state) { + default:vt100state = ESnormal; /* FALLTHROUGH */ + case ESnormal: + putc_normal(ch); + break; + case ESesc: + putc_esc(ch); + break; + case ESsquare: + putc_square(ch); + break; + case ESgetpars: + putc_getpars(ch); + break; + case ESgotpars: + putc_gotpars(ch); + break; + case ESask: + putc_askcmd(ch); + break; + case EScharsize: + putc_charsizecmd(ch); + break; + case ESsetG0: + putc_charsetcmd(0, ch); + break; + case ESsetG1: + putc_charsetcmd(1, ch); + break; + } + + if (x >= vinfo.v_columns) { + x = vinfo.v_columns - 1; + } + if (x < 0) { + x = 0; + } + if (y >= vinfo.v_rows) { + y = vinfo.v_rows - 1; + } + if (y < 0) { + y = 0; + } + +} + +/* + * Actually draws the buffer, handle the jump scroll + */ +void vc_flush_forward_buffer(void) +{ + if (vc_forward_buffer_size) { + int start = 0; + vc_ops.hide_cursor(x, y); + do { + int i; + int plaintext = 1; + int drawlen = start; + int jump = 0; + int param = 0, changebackground = 0; + enum vt100state_e vtState = vt100state; + /* + * In simple words, here we're pre-parsing the text to look for + * + Newlines, for computing jump scroll + * + /\033\[[0-9;]*]m/ to continue on + * any other sequence will stop. We don't want to have cursor + * movement escape sequences while we're trying to pre-scroll + * the screen. + * We have to be extra carefull about the sequences that changes + * the background color to prevent scrolling in those + * particular cases. + * That parsing was added to speed up 'man' and 'color-ls' a + * zillion time (at least). It's worth it, trust me. + * (mail Nick Stephen for a True Performance Graph) + * Michel Pollet + */ + for (i = start; i < vc_forward_buffer_size && plaintext; i++) { + drawlen++; + switch (vtState) { + case ESnormal: + switch (vc_forward_buffer[i]) { + case '\033': + vtState = ESesc; + break; + case '\n': + jump++; + break; + } + break; + case ESesc: + switch (vc_forward_buffer[i]) { + case '[': + vtState = ESgetpars; + param = 0; + changebackground = 0; + break; + default: + plaintext = 0; + break; + } + break; + case ESgetpars: + if ((vc_forward_buffer[i] >= '0' && + vc_forward_buffer[i] <= '9') || + vc_forward_buffer[i] == ';') { + if (vc_forward_buffer[i] >= '0' && + vc_forward_buffer[i] <= '9') + param = (param*10)+(vc_forward_buffer[i]-'0'); + else { + if (param >= 40 && param <= 47) + changebackground = 1; + if (!vc_normal_background && + !param) + changebackground = 1; + param = 0; + } + break; /* continue on */ + } + vtState = ESgotpars; + /* fall */ + case ESgotpars: + switch (vc_forward_buffer[i]) { + case 'm': + vtState = ESnormal; + if (param >= 40 && param <= 47) + changebackground = 1; + if (!vc_normal_background && + !param) + changebackground = 1; + if (changebackground) { + plaintext = 0; + jump = 0; + /* REALLY don't jump */ + } + /* Yup ! we've got it */ + break; + default: + plaintext = 0; + break; + } + break; + default: + plaintext = 0; + break; + } + + } + + /* + * Then we look if it would be appropriate to forward jump + * the screen before drawing + */ + if (jump && (scrreg_bottom - scrreg_top) > 2) { + jump -= scrreg_bottom - y - 1; + if (jump > 0 ) { + if (jump >= scrreg_bottom - scrreg_top) + jump = scrreg_bottom - scrreg_top -1; + y -= jump; + vc_ops.scrollup(jump); + } + } + /* + * and we draw what we've found to the parser + */ + for (i = start; i < drawlen; i++) + vc_putchar(vc_forward_buffer[start++]); + /* + * Continue sending characters to the parser until we're sure we're + * back on normal characters. + */ + for (i = start; i < vc_forward_buffer_size && + vt100state != ESnormal ; i++) + vc_putchar(vc_forward_buffer[start++]); + /* Then loop again if there still things to draw */ + } while (start < vc_forward_buffer_size); + vc_forward_buffer_size = 0; + vc_ops.show_cursor(x, y); + } +} + +int +vcputc(int l, int u, int c) +{ + if ( vc_initialized ) + { + vc_store_char(c); + vc_flush_forward_buffer(); + } + return 0; +} + +/* + * Immediate character display.. kernel printf uses this. Make sure + * pre-clock printfs get flushed and that panics get fully displayed. + */ + +void cnputc(char ch) +{ + vcputc(0, 0, ch); +} + +/* + * Store characters to be drawn 'later', handle overflows + */ + +void +vc_store_char(unsigned char c) +{ + + /* Either we're really buffering stuff or we're not yet because + * the probe hasn't been done. If we're not, then we can only + * ever have a maximum of one character in the buffer waiting to + * be flushed + */ + + vc_forward_buffer[vc_forward_buffer_size++] = (unsigned char)c; + + switch (vc_forward_buffer_size) { + case 1: + /* If we're adding the first character to the buffer, + * start the timer, otherwise it is already running. + */ + break; + case VC_MAX_FORWARD_SIZE: + vc_flush_forward_buffer(); + break; + default: + /* + * the character will be flushed on timeout + */ + break; + } +} + +static void +vc_initialize(struct vc_info * vinfo_p) +{ + vinfo.v_rows = vinfo.v_height / CHARHEIGHT; + vinfo.v_columns = vinfo.v_width / CHARWIDTH; + + if (vinfo.v_depth >= 8) { + vinfo.v_rowscanbytes = (vinfo.v_depth / 8) * vinfo.v_width; + } else { + vinfo.v_rowscanbytes = vinfo.v_width / (8 / vinfo.v_depth); + } + + vc_render_font(1, vinfo.v_depth); + vc_color_mask = vc_color_depth_masks[vc_color_index_table[vinfo.v_depth]]; + vt100_reset(); + switch (vinfo.v_depth) { + default: + case 1: + vc_ops.paintchar = vc_paint_char1; + break; + case 2: + vc_ops.paintchar = vc_paint_char2; + break; + case 4: + vc_ops.paintchar = vc_paint_char4; + break; + case 8: + vc_ops.paintchar = vc_paint_char8c; + break; + case 16: + vc_ops.paintchar = vc_paint_char16c; + break; + case 32: + vc_ops.paintchar = vc_paint_char32c; + break; + } +} + +void +vcattach(void) +{ + if (vinfo.v_depth >= 8) + printf("\033[31mC\033[32mO\033[33mL\033[34mO\033[35mR\033[0m "); + printf("video console at 0x%lx (%ldx%ldx%ld)\n", vinfo.v_baseaddr, + vinfo.v_width, vinfo.v_height, vinfo.v_depth); + +#if 0 // XXX - FIXME + /* + * Added for the buffering and jump scrolling + */ + /* Init our lock */ + simple_lock_init(&vc_forward_lock, ETAP_IO_TTY); + + vc_forward_buffer_enabled = 1; +#else // FIXME TOO!!! + /* Init our lock */ + simple_lock_init(&vc_forward_lock, ETAP_IO_TTY); +#endif +} + + +struct vc_progress_element { + unsigned int version; + unsigned int flags; + unsigned int time; + unsigned char count; + unsigned char res[3]; + int width; + int height; + int dx; + int dy; + int transparent; + unsigned int res2[3]; + unsigned char data[0]; +}; +typedef struct vc_progress_element vc_progress_element; + +static vc_progress_element * vc_progress; +static unsigned char * vc_progress_data; +static boolean_t vc_progress_enable; +static unsigned char * vc_clut; +static unsigned int vc_progress_tick; +static boolean_t vc_graphics_mode; +static boolean_t vc_acquired; +static boolean_t vc_need_clear; + +void vc_blit_rect_8c( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned char * dst; + int line, col; + unsigned char data; + + dst = (unsigned char *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + *(dst + col) = data; + } + dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); + } + +} + +void vc_blit_rect_8m( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned char * dst; + int line, col; + unsigned int data; + + dst = (unsigned char *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + data *= 3; + *(dst + col) = ((19595 * vc_clut[data + 0] + + 38470 * vc_clut[data + 1] + + 7471 * vc_clut[data + 2] ) / 65536); + } + dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); + } +} + +void vc_blit_rect_16( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned short * dst; + int line, col; + unsigned int data; + + dst = (volatile unsigned short *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 2)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + data *= 3; + *(dst + col) = ( (0xf8 & (vc_clut[data + 0])) << 7) + | ( (0xf8 & (vc_clut[data + 1])) << 2) + | ( (0xf8 & (vc_clut[data + 2])) >> 3); + } + dst = (volatile unsigned short *) (((int)dst) + vinfo.v_rowbytes); + } +} + +void vc_blit_rect_32( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned int * dst; + int line, col; + unsigned int data; + + dst = (volatile unsigned int *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 4)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + data *= 3; + *(dst + col) = (vc_clut[data + 0] << 16) + | (vc_clut[data + 1] << 8) + | (vc_clut[data + 2]); + } + dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); + } +} + +void vc_blit_rect( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + switch( vinfo.v_depth) { + case 8: + vc_blit_rect_8c( x, y, width, height, transparent, dataPtr); + break; + case 16: + vc_blit_rect_16( x, y, width, height, transparent, dataPtr); + break; + case 32: + vc_blit_rect_32( x, y, width, height, transparent, dataPtr); + break; + } +} + +void vc_progress_task( void * arg ) +{ + spl_t s; + int count = (int) arg; + int x, y, width, height; + unsigned char * data; + + s = splhigh(); + simple_lock(&vc_forward_lock); + + if( vc_progress_enable) { + count++; + if( count >= vc_progress->count) + count = 0; + + width = vc_progress->width; + height = vc_progress->height; + x = vc_progress->dx; + y = vc_progress->dy; + data = vc_progress_data; + data += count * width * height; + if( 1 & vc_progress->flags) { + x += (vinfo.v_width / 2); + x += (vinfo.v_height / 2); + } + vc_blit_rect( x, y, width, height, + vc_progress->transparent,data ); + + timeout( vc_progress_task, (void *) count, + vc_progress_tick ); + } + simple_unlock(&vc_forward_lock); + splx(s); +} + +void vc_display_icon( vc_progress_element * desc, + unsigned char * data ) +{ + int x, y, width, height; + + if( vc_acquired && vc_graphics_mode && vc_clut) { + + width = desc->width; + height = desc->height; + x = desc->dx; + y = desc->dy; + if( 1 & desc->flags) { + x += (vinfo.v_width / 2); + y += (vinfo.v_height / 2); + } + vc_blit_rect( x, y, width, height, desc->transparent, data ); + } +} + +boolean_t +vc_progress_set( boolean_t enable ) +{ + spl_t s; + + if( !vc_progress) + return( FALSE ); + + s = splhigh(); + simple_lock(&vc_forward_lock); + + if( vc_progress_enable != enable) { + vc_progress_enable = enable; + if( enable) + timeout(vc_progress_task, (void *) 0, + vc_progress_tick ); + else + untimeout( vc_progress_task, (void *) 0 ); + } + + simple_unlock(&vc_forward_lock); + splx(s); + + return( TRUE ); +} + + +boolean_t +vc_progress_initialize( vc_progress_element * desc, + unsigned char * data, + unsigned char * clut ) +{ + if( (!clut) || (!desc) || (!data)) + return( FALSE ); + vc_clut = clut; + + vc_progress = desc; + vc_progress_data = data; + vc_progress_tick = vc_progress->time * hz / 1000; + + return( TRUE ); +} + +extern int disableConsoleOutput; + +void vc_clear_screen( void ) +{ + vc_ops.hide_cursor(x, y); + vt100_reset(); + x = y = 0; + vc_ops.clear_screen(x, y, 2); + vc_ops.show_cursor(x, y); +}; + +void +initialize_screen(Boot_Video * boot_vinfo, int op) +{ + if ( boot_vinfo ) + { + vinfo.v_width = boot_vinfo->v_width; + vinfo.v_height = boot_vinfo->v_height; + vinfo.v_depth = boot_vinfo->v_depth; + vinfo.v_rowbytes = boot_vinfo->v_rowBytes; + vinfo.v_baseaddr = boot_vinfo->v_baseAddr; + vinfo.v_type = boot_vinfo->v_display; + + if ( IS_TEXT_MODE ) + { + // Text mode setup by the booter. + + vc_ops.initialize = tc_initialize; + vc_ops.paintchar = tc_putchar; + vc_ops.scrolldown = tc_scrolldown; + vc_ops.scrollup = tc_scrollup; + vc_ops.clear_screen = tc_clear_screen; + vc_ops.hide_cursor = tc_hide_cursor; + vc_ops.show_cursor = tc_show_cursor; + vc_ops.update_color = tc_update_color; + } + else + { + // Graphics mode setup by the booter. + + vc_ops.initialize = vc_initialize; + vc_ops.paintchar = 0; + vc_ops.scrolldown = scrolldown; + vc_ops.scrollup = scrollup; + vc_ops.clear_screen = clear_screen; + vc_ops.hide_cursor = reversecursor; + vc_ops.show_cursor = reversecursor; + vc_ops.update_color = 0; + } + + vc_ops.initialize(&vinfo); + + // vc_clear_screen(); + + vc_initialized = 1; + } + + switch ( op ) { + + case kPEGraphicsMode: + vc_graphics_mode = TRUE; + disableConsoleOutput = TRUE; + vc_acquired = TRUE; + break; + + case kPETextMode: + vc_graphics_mode = FALSE; + disableConsoleOutput = FALSE; + vc_acquired = TRUE; + vc_clear_screen(); + break; + + case kPETextScreen: + vc_progress_set( FALSE ); + disableConsoleOutput = FALSE; + if( vc_need_clear) { + vc_need_clear = FALSE; + vc_clear_screen(); + } + break; + + case kPEEnableScreen: + if ( vc_acquired) { + if( vc_graphics_mode) + vc_progress_set( TRUE ); + else + vc_clear_screen(); + } + break; + + case kPEDisableScreen: + vc_progress_set( FALSE ); + break; + + case kPEAcquireScreen: + vc_need_clear = (FALSE == vc_acquired); + vc_acquired = TRUE; + vc_progress_set( vc_graphics_mode ); + disableConsoleOutput = vc_graphics_mode; + if( vc_need_clear && !vc_graphics_mode) { + vc_need_clear = FALSE; + vc_clear_screen(); + } + break; + + case kPEReleaseScreen: + vc_acquired = FALSE; + vc_progress_set( FALSE ); + disableConsoleOutput = TRUE; + break; + } +} diff --git a/osfmk/i386/AT386/video_console.h b/osfmk/i386/AT386/video_console.h new file mode 100644 index 000000000..ccf059eff --- /dev/null +++ b/osfmk/i386/AT386/video_console.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __PEXPERT_VIDEO_CONSOLE_H +#define __PEXPERT_VIDEO_CONSOLE_H + +/* + * Video console properties. + */ +struct vc_info { + unsigned long v_height; /* pixels */ + unsigned long v_width; /* pixels */ + unsigned long v_depth; + unsigned long v_rowbytes; + unsigned long v_baseaddr; + unsigned long v_type; + char v_name[32]; + unsigned long v_physaddr; + unsigned long v_rows; /* characters */ + unsigned long v_columns; /* characters */ + unsigned long v_rowscanbytes; /* Actualy number of bytes used for display per row */ + unsigned long v_reserved[5]; +}; + +/* + * From text_console.c + */ +extern void tc_putchar(unsigned char ch, int x, int y, int attrs); +extern void tc_scrolldown(int lines); +extern void tc_scrollup(int lines); +extern void tc_clear_screen(int x, int y, int operation); +extern void tc_show_cursor(int x, int y); +extern void tc_hide_cursor(int x, int y); +extern void tc_initialize(struct vc_info * vinfo_p); +extern void tc_update_color(int color, int fore); + +#endif /* !__PEXPERT_VIDEO_CONSOLE_H */ diff --git a/osfmk/i386/Makefile b/osfmk/i386/Makefile new file mode 100644 index 000000000..8e606d055 --- /dev/null +++ b/osfmk/i386/Makefile @@ -0,0 +1,31 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + asm.h eflags.h exec.h machlimits.h \ + endian.h trap.h soundcard.h + +EXPORT_ONLY_FILES = cpu_number.h \ + hw_lock_types.h \ + io_map_entries.h \ + lock.h \ + machine_routines.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = i386 + +EXPORT_MD_LIST = ${EXPORT_ONLY_FILES} + +EXPORT_MD_DIR = i386 + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/i386/_setjmp.s b/osfmk/i386/_setjmp.s new file mode 100644 index 000000000..cde520aea --- /dev/null +++ b/osfmk/i386/_setjmp.s @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * C library -- _setjmp, _longjmp + * + * _longjmp(a,v) + * will generate a "return(v)" from + * the last call to + * _setjmp(a) + * by restoring registers from the stack, + * The previous signal state is NOT restored. + * + */ + +#include + +ENTRY(_setjmp) + movl 4(%esp),%ecx # fetch buffer + movl %ebx,0(%ecx) + movl %esi,4(%ecx) + movl %edi,8(%ecx) + movl %ebp,12(%ecx) # save frame pointer of caller + popl %edx + movl %esp,16(%ecx) # save stack pointer of caller + movl %edx,20(%ecx) # save pc of caller + xorl %eax,%eax + jmp *%edx + +ENTRY(_longjmp) + movl 8(%esp),%eax # return(v) + movl 4(%esp),%ecx # fetch buffer + movl 0(%ecx),%ebx + movl 4(%ecx),%esi + movl 8(%ecx),%edi + movl 12(%ecx),%ebp + movl 16(%ecx),%esp + orl %eax,%eax + jnz 0f + incl %eax +0: jmp *20(%ecx) # done, return.... diff --git a/osfmk/i386/apic.h b/osfmk/i386/apic.h new file mode 100644 index 000000000..2b62c0d06 --- /dev/null +++ b/osfmk/i386/apic.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#define LAPIC_START 0xFEE00000 +#define LAPIC_SIZE 0x00000400 + +#define LAPIC_ID 0x00000020 +#define LAPIC_ID_SHIFT 24 +#define LAPIC_ID_MASK 0x0F +#define LAPIC_VERSION 0x00000030 +#define LAPIC_VERSION_MASK 0xFF +#define LAPIC_TPR 0x00000080 +#define LAPIC_TPR_MASK 0xFF +#define LAPIC_APR 0x00000090 +#define LAPIC_APR_MASK 0xFF +#define LAPIC_PPR 0x000000A0 +#define LAPIC_PPR_MASK 0xFF +#define LAPIC_EOI 0x000000B0 +#define LAPIC_REMOTE_READ 0x000000C0 +#define LAPIC_LDR 0x000000D0 +#define LAPIC_LDR_SHIFT 24 +#define LAPIC_DFR 0x000000E0 +#define LAPIC_DFR_FLAT 0xFFFFFFFF +#define LAPIC_DFR_CLUSTER 0x0FFFFFFF +#define LAPIC_DFR_SHIFT 28 +#define LAPIC_SVR 0x000000F0 +#define LAPIC_SVR_MASK 0x0FF +#define LAPIC_SVR_ENABLE 0x100 +#define LAPIC_SVR_FOCUS_OFF 0x200 +#define LAPIC_ISR_BASE 0x00000100 +#define LAPIC_TMR_BASE 0x00000180 +#define LAPIC_IRR_BASE 0x00000200 +#define LAPIC_ERROR_STATUS 0x00000280 +#define LAPIC_ICR 0x00000300 +#define LAPIC_ICR_VECTOR_MASK 0x000FF +#define LAPIC_ICR_DM_MASK 0x00700 +#define LAPIC_ICR_DM_FIXED 0x00000 +#define LAPIC_ICR_DM_LOWEST 0x00100 +#define LAPIC_ICR_DM_SMI 0x00200 +#define LAPIC_ICR_DM_REMOTE 0x00300 +#define LAPIC_ICR_DM_NMI 0x00400 +#define LAPIC_ICR_DM_INIT 0x00500 +#define LAPIC_ICR_DM_STARTUP 0x00600 +#define LAPIC_ICR_DM_LOGICAL 0x00800 +#define LAPIC_ICR_DS_PENDING 0x01000 +#define LAPIC_ICR_LEVEL_ASSERT 0x04000 +#define LAPIC_ICR_TRIGGER_LEVEL 0x08000 +#define LAPIC_ICR_RR_MASK 0x30000 +#define LAPIC_ICR_RR_INVALID 0x00000 +#define LAPIC_ICR_RR_INPROGRESS 0x10000 +#define LAPIC_ICR_RR_VALID 0x20000 +#define LAPIC_ICR_DSS_MASK 0xC0000 +#define LAPIC_ICR_DSS_DEST 0x00000 +#define LAPIC_ICR_DSS_SELF 0x40000 +#define LAPIC_ICR_DSS_ALL 0x80000 +#define LAPIC_ICR_DSS_OTHERS 0xC0000 +#define LAPIC_ICRD 0x00000310 +#define LAPIC_ICRD_DEST_SHIFT 24 +#define LAPIC_LVT_TIMER 0x00000320 +#define LAPIC_LVT_LINT0 0x00000350 +#define LAPIC_LVT_LINT1 0x00000360 +#define LAPIC_LVT_ERROR 0x00000370 +#define LAPIC_LVT_VECTOR_MASK 0x0000F +#define LAPIC_LVT_DM_SHIFT 8 +#define LAPIC_LVT_DM_MASK 0x00007 +#define LAPIC_LVT_DM_FIXED 0x00000 +#define LAPIC_LVT_DM_NMI 0x00400 +#define LAPIC_LVT_DM_EXTINT 0x00700 +#define LAPIC_LVT_DS_PENDING 0x01000 +#define LAPIC_LVT_IP_PLRITY_LOW 0x02000 +#define LAPIC_LVT_REMOTE_IRR 0x04000 +#define LAPIC_LVT_TM_LEVEL 0x08000 +#define LAPIC_LVT_MASKED 0x10000 +#define LAPIC_LVT_PERIODIC 0x20000 +#define LAPIC_INITIAL_COUNT_TIMER 0x00000380 +#define LAPIC_CURRENT_COUNT_TIMER 0x00000390 +#define LAPIC_TIMER_DIVIDE_CONFIG 0x000003E0 + +#define IOAPIC_START 0xFEC00000 +#define IOAPIC_SIZE 0x00000020 + +#define IOAPIC_RSELECT 0x00000000 +#define IOAPIC_RWINDOW 0x00000010 +#define IOA_R_ID 0x00 +#define IOA_R_ID_SHIFT 24 +#define IOA_R_VERSION 0x01 +#define IOA_R_VERSION_MASK 0xFF +#define IOA_R_VERSION_ME_SHIFT 16 +#define IOA_R_VERSION_ME_MASK 0xFF +#define IOA_R_REDIRECTION 0x10 +#define IOA_R_R_VECTOR_MASK 0x000FF +#define IOA_R_R_DM_MASK 0x00700 +#define IOA_R_R_DM_FIXED 0x00000 +#define IOA_R_R_DM_LOWEST 0x00100 +#define IOA_R_R_DM_NMI 0x00400 +#define IOA_R_R_DM_RESET 0x00500 +#define IOA_R_R_DM_EXTINT 0x00700 +#define IOA_R_R_DEST_LOGICAL 0x00800 +#define IOA_R_R_DS_PENDING 0x01000 +#define IOA_R_R_IP_PLRITY_LOW 0x02000 +#define IOA_R_R_TM_LEVEL 0x08000 +#define IOA_R_R_MASKED 0x10000 diff --git a/osfmk/i386/arch_types.h b/osfmk/i386/arch_types.h new file mode 100644 index 000000000..c04037860 --- /dev/null +++ b/osfmk/i386/arch_types.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _MACHINE_ARCH_TYPES_H_ +#define _MACHINE_ARCH_TYPES_H_ + + +typedef signed char bit8_t; /* signed 8-bit quantity */ +typedef unsigned char u_bit8_t; /* unsigned 8-bit quantity */ + +typedef short bit16_t; /* signed 16-bit quantity */ +typedef unsigned short u_bit16_t; /* unsigned 16-bit quantity */ + +typedef int bit32_t; /* signed 32-bit quantity */ +typedef unsigned int u_bit32_t; /* unsigned 32-bit quantity */ + +#endif diff --git a/osfmk/i386/asm.h b/osfmk/i386/asm.h new file mode 100644 index 000000000..1bd157fb2 --- /dev/null +++ b/osfmk/i386/asm.h @@ -0,0 +1,289 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _I386_ASM_H_ +#define _I386_ASM_H_ + +#ifdef _KERNEL +#include +#endif /* _KERNEL */ + +#ifdef MACH_KERNEL +#include +#else /* !MACH_KERNEL */ +#define MACH_KDB 0 +#endif /* !MACH_KERNEL */ + + +#if defined(MACH_KERNEL) || defined(_KERNEL) +#include +#endif /* MACH_KERNEL || _KERNEL */ + + +#define S_ARG0 4(%esp) +#define S_ARG1 8(%esp) +#define S_ARG2 12(%esp) +#define S_ARG3 16(%esp) + +#define FRAME pushl %ebp; movl %esp, %ebp +#define EMARF leave + +#define B_ARG0 8(%ebp) +#define B_ARG1 12(%ebp) +#define B_ARG2 16(%ebp) +#define B_ARG3 20(%ebp) + +/* There is another definition of ALIGN for .c sources */ +#ifdef ASSEMBLER +#define ALIGN 2 +#endif /* ASSEMBLER */ + +#ifndef FALIGN +#define FALIGN ALIGN +#endif + +#define LB(x,n) n +#if __STDC__ +#ifndef __NO_UNDERSCORES__ +#define LCL(x) L ## x +#define EXT(x) _ ## x +#define LEXT(x) _ ## x ## : +#else +#define LCL(x) .L ## x +#define EXT(x) x +#define LEXT(x) x ## : +#endif +#define LBc(x,n) n ## : +#define LBb(x,n) n ## b +#define LBf(x,n) n ## f +#else /* __STDC__ */ +#ifndef __NO_UNDERSCORES__ +#define LCL(x) L/**/x +#define EXT(x) _/**/x +#define LEXT(x) _/**/x/**/: +#else /* __NO_UNDERSCORES__ */ +#define LCL(x) .L/**/x +#define EXT(x) x +#define LEXT(x) x/**/: +#endif /* __NO_UNDERSCORES__ */ +#define LBc(x,n) n/**/: +#define LBb(x,n) n/**/b +#define LBf(x,n) n/**/f +#endif /* __STDC__ */ + +#define SVC .byte 0x9a; .long 0; .word 0x7 + +#define RPC_SVC .byte 0x9a; .long 0; .word 0xf + +#define String .asciz +#define Value .word +#define Times(a,b) (a*b) +#define Divide(a,b) (a/b) + +#define INB inb %dx, %al +#define OUTB outb %al, %dx +#define INL inl %dx, %eax +#define OUTL outl %eax, %dx + +#define data16 .byte 0x66 +#define addr16 .byte 0x67 + +#if !GPROF +#define MCOUNT + +#elif defined(__SHARED__) +#define MCOUNT ; .data;\ + .align ALIGN;\ + LBc(x, 8) .long 0;\ + .text;\ + Gpush;\ + Gload;\ + leal Gotoff(LBb(x,8)),%edx;\ + Egaddr(%eax,_mcount_ptr);\ + Gpop;\ + call *(%eax); + +#else /* !GPROF, !__SHARED__ */ +#define MCOUNT ; .data;\ + .align ALIGN;\ + LBc(x, 8) .long 0;\ + .text;\ + movl $LBb(x,8),%edx;\ + call *EXT(_mcount_ptr); + +#endif /* GPROF */ + +#ifdef __ELF__ +#define ELF_FUNC(x) .type x,@function +#define ELF_DATA(x) .type x,@object +#define ELF_SIZE(x,s) .size x,s +#else +#define ELF_FUNC(x) +#define ELF_DATA(x) +#define ELF_SIZE(x,s) +#endif + +#define Entry(x) .globl EXT(x); ELF_FUNC(EXT(x)); .align FALIGN; LEXT(x) +#define ENTRY(x) Entry(x) MCOUNT +#define ENTRY2(x,y) .globl EXT(x); .globl EXT(y); \ + ELF_FUNC(EXT(x)); ELF_FUNC(EXT(y)); \ + .align FALIGN; LEXT(x); LEXT(y) \ + MCOUNT +#if __STDC__ +#define ASENTRY(x) .globl x; .align FALIGN; x ## : ELF_FUNC(x) MCOUNT +#else +#define ASENTRY(x) .globl x; .align FALIGN; x: ELF_FUNC(x) MCOUNT +#endif /* __STDC__ */ + +#define DATA(x) .globl EXT(x); ELF_DATA(EXT(x)); .align ALIGN; LEXT(x) + +#define End(x) ELF_SIZE(x,.-x) +#define END(x) End(EXT(x)) +#define ENDDATA(x) END(x) +#define Enddata(x) End(x) + +/* + * ELF shared library accessor macros. + * Gpush saves the %ebx register used for the GOT address + * Gpop pops %ebx if we need a GOT + * Gload loads %ebx with the GOT address if shared libraries are used + * Gcall calls an external function. + * Gotoff allows you to reference local labels. + * Gotoff2 allows you to reference local labels with an index reg. + * Gotoff3 allows you to reference local labels with an index reg & size. + * Gaddr loads up a register with an address of an external item. + * Gstack is the number of bytes that Gpush pushes on the stack. + * + * Varients of the above with E or L prefixes do EXT(name) or LCL(name) + * respectively. + */ + +#ifndef __SHARED__ +#define Gpush +#define Gpop +#define Gload +#define Gcall(func) call func +#define Gotoff(lab) lab +#define Gotoff2(l,r) l(r) +#define Gotoff3(l,r,s) l(,r,s) +#define Gaddr(to,lab) movl $lab,to +#define Gcmp(lab,reg) cmpl $lab,reg +#define Gmemload(lab,reg) movl lab,reg +#define Gmemstore(reg,lab,tmp) movl reg,lab +#define Gstack 0 + +#else +#ifdef __ELF__ /* ELF shared libraries */ +#define Gpush pushl %ebx +#define Gpop popl %ebx +#define Gload call 9f; 9: popl %ebx; addl $_GLOBAL_OFFSET_TABLE_+[.-9b],%ebx +#define Gcall(func) call EXT(func)@PLT +#define Gotoff(lab) lab@GOTOFF(%ebx) +#define Gotoff2(l,r) l@GOTOFF(%ebx,r) +#define Gotoff3(l,r,s) l@GOTOFF(%ebx,r,s) +#define Gaddr(to,lab) movl lab@GOT(%ebx),to +#define Gcmp(lab,reg) cmpl reg,lab@GOT(%ebx) +#define Gmemload(lab,reg) movl lab@GOT(%ebx),reg; movl (reg),reg +#define Gmemstore(reg,lab,tmp) movl lab@GOT(%ebx),tmp; movl reg,(tmp) +#define Gstack 4 + +#else /* ROSE shared libraries */ +#define Gpush +#define Gpop +#define Gload +#define Gcall(func) call *9f; .data; .align ALIGN; 9: .long func; .text +#define Gotoff(lab) lab +#define Gotoff2(l,r) l(r) +#define Gotoff3(l,r,s) l(,r,s) +#define Gaddr(to,lab) movl 9f,to; .data; .align ALIGN; 9: .long lab; .text +#define Gcmp(lab,reg) cmpl reg,9f; .data; .align ALIGN; 9: .long lab; .text +#define Gmemload(lab,reg) movl 9f,reg; movl (reg),reg; .data; .align ALIGN; 9: .long lab; .text +#define Gmemstore(reg,lab,tmp) movl 9f,tmp; movl reg,(tmp); .data; .align ALIGN; 9: .long lab; .text +#define Gstack 0 +#endif /* __ELF__ */ +#endif /* __SHARED__ */ + +/* Egotoff is not provided, since external symbols should not use @GOTOFF + relocations. */ +#define Egcall(func) Gcall(EXT(func)) +#define Egaddr(to,lab) Gaddr(to,EXT(lab)) +#define Egcmp(lab,reg) Gcmp(EXT(lab),reg) +#define Egmemload(lab,reg) Gmemload(EXT(lab),reg) +#define Egmemstore(reg,lab,tmp) Gmemstore(reg,EXT(lab),tmp) + +#define Lgotoff(lab) Gotoff(LCL(lab)) +#define Lgotoff2(l,r) Gotoff2(LCL(l),r) +#define Lgotoff3(l,r,s) Gotoff3(LCL(l),r,s) +#define Lgcmp(lab,reg) Gcmp(LCL(lab),reg) +#define Lgmemload(lab,reg) movl Lgotoff(lab),reg +#define Lgmemstore(reg,lab,tmp) movl reg,Lgotoff(lab) + +#ifdef ASSEMBLER +#if MACH_KDB +#include +/* + * This pseudo-assembler line is added so that there will be at least + * one N_SO entry in the symbol stable to define the current file name. + */ +#endif /* MACH_KDB */ + +#else /* NOT ASSEMBLER */ + +/* These defines are here for .c files that wish to reference global symbols + * within __asm__ statements. + */ +#ifndef __NO_UNDERSCORES__ +#define CC_SYM_PREFIX "_" +#else +#define CC_SYM_PREFIX "" +#endif /* __NO_UNDERSCORES__ */ +#endif /* ASSEMBLER */ + +#endif /* _I386_ASM_H_ */ diff --git a/osfmk/i386/ast.h b/osfmk/i386/ast.h new file mode 100644 index 000000000..9f201def5 --- /dev/null +++ b/osfmk/i386/ast.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _I386_AST_H_ +#define _I386_AST_H_ + +/* + * Machine-dependent AST file for machines with no hardware AST support. + * + * For the I386, we define AST_I386_FP to handle delayed + * floating-point exceptions. The FPU may interrupt on errors + * while the user is not running (in kernel or other thread running). + */ + +#define AST_I386_FP 0x80000000 +#define MACHINE_AST_PER_THREAD AST_I386_FP + + +#endif /* _I386_AST_H_ */ diff --git a/osfmk/i386/ast_check.c b/osfmk/i386/ast_check.c new file mode 100644 index 000000000..15d3cd6bc --- /dev/null +++ b/osfmk/i386/ast_check.c @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ +#include +#include + diff --git a/osfmk/i386/ast_types.h b/osfmk/i386/ast_types.h new file mode 100644 index 000000000..6ca56f471 --- /dev/null +++ b/osfmk/i386/ast_types.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_AST_TYPES_H_ +#define _I386_AST_TYPES_H_ + +/* + * Data type for remote ast_check() invocation support. Currently + * not implemented. Do this first to avoid include problems. + */ +typedef int ast_check_t; + +#endif /* _I386_AST_TYPES_H_ */ diff --git a/osfmk/i386/bcopy.s b/osfmk/i386/bcopy.s new file mode 100644 index 000000000..b592689e8 --- /dev/null +++ b/osfmk/i386/bcopy.s @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +/* void *memcpy((void *) to, (const void *) from, (size_t) bcount) */ + +ENTRY(memcpy) + pushl %edi + pushl %esi + movl 8+ 4(%esp),%edi /* to */ + movl %edi,%eax /* returns its first argument */ + movl 8+ 8(%esp),%esi /* from */ +memcpy_common: + movl 8+ 12(%esp),%edx /* number of bytes */ + cld +/* move longs*/ + movl %edx,%ecx + sarl $2,%ecx + rep + movsl +/* move bytes*/ + movl %edx,%ecx + andl $3,%ecx + rep + movsb + popl %esi + popl %edi + ret + +/* void bcopy((const char *) from, (char *) to, (unsigned int) count) */ + +ENTRY(bcopy_no_overwrite) + pushl %edi + pushl %esi + movl 8+ 8(%esp),%edi /* to */ + movl 8+ 4(%esp),%esi /* from */ + jmp memcpy_common + +/* bcopy16(from, to, bcount) using word moves */ + +ENTRY(bcopy16) + pushl %edi + pushl %esi + movl 8+12(%esp),%edx /* 8 for the two pushes above */ + movl 8+ 8(%esp),%edi + movl 8+ 4(%esp),%esi +/* move words */ +0: cld + movl %edx,%ecx + sarl $1,%ecx + rep + movsw +/* move bytes */ + movl %edx,%ecx + andl $1,%ecx + rep + movsb + popl %esi + popl %edi + ret + diff --git a/osfmk/i386/bsd_i386.c b/osfmk/i386/bsd_i386.c new file mode 100644 index 000000000..50c24d204 --- /dev/null +++ b/osfmk/i386/bsd_i386.c @@ -0,0 +1,471 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifdef MACH_BSD +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define USRSTACK 0xc0000000 + +kern_return_t +thread_userstack( + thread_t, + int, + thread_state_t, + unsigned int, + vm_offset_t * +); + +kern_return_t +thread_entrypoint( + thread_t, + int, + thread_state_t, + unsigned int, + vm_offset_t * +); + +struct i386_saved_state * +get_user_regs( + thread_act_t); + +void +act_thread_dup( + thread_act_t, + thread_act_t +); + +unsigned int get_msr_exportmask(void); + +unsigned int get_msr_nbits(void); + +unsigned int get_msr_rbits(void); + +/* + * thread_userstack: + * + * Return the user stack pointer from the machine + * dependent thread state info. + */ +kern_return_t +thread_userstack( + thread_t thread, + int flavor, + thread_state_t tstate, + unsigned int count, + vm_offset_t *user_stack +) +{ + struct i386_saved_state *state; + i386_thread_state_t *state25; + vm_offset_t uesp; + + /* + * Set a default. + */ + if (*user_stack == 0) + *user_stack = USRSTACK; + + switch (flavor) { + case i386_THREAD_STATE: /* FIXME */ + state25 = (i386_thread_state_t *) tstate; + *user_stack = state25->esp ? state25->esp : USRSTACK; + break; + + case i386_NEW_THREAD_STATE: + if (count < i386_NEW_THREAD_STATE_COUNT) + return (KERN_INVALID_ARGUMENT); + else { + state = (struct i386_saved_state *) tstate; + uesp = state->uesp; + } + + /* + * If a valid user stack is specified, use it. + */ + *user_stack = uesp ? uesp : USRSTACK; + break; + default : + return (KERN_INVALID_ARGUMENT); + } + + return (KERN_SUCCESS); +} + +kern_return_t +thread_entrypoint( + thread_t thread, + int flavor, + thread_state_t tstate, + unsigned int count, + vm_offset_t *entry_point +) +{ + struct i386_saved_state *state; + i386_thread_state_t *state25; + + /* + * Set a default. + */ + if (*entry_point == 0) + *entry_point = VM_MIN_ADDRESS; + + switch (flavor) { + case i386_THREAD_STATE: + state25 = (i386_thread_state_t *) tstate; + *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS; + break; + + case i386_NEW_THREAD_STATE: + if (count < i386_THREAD_STATE_COUNT) + return (KERN_INVALID_ARGUMENT); + else { + state = (struct i386_saved_state *) tstate; + + /* + * If a valid entry point is specified, use it. + */ + *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS; + } + break; + } + + return (KERN_SUCCESS); +} + +struct i386_saved_state * +get_user_regs(thread_act_t th) +{ + if (th->mact.pcb) + return(USER_REGS(th)); + else { + printf("[get_user_regs: thread does not have pcb]"); + return NULL; + } +} + +/* + * Duplicate parent state in child + * for U**X fork. + */ +void +act_thread_dup( + thread_act_t parent, + thread_act_t child +) +{ + struct i386_saved_state *parent_state, *child_state; + struct i386_machine_state *ims; + struct i386_float_state floatregs; + +#ifdef XXX + /* Save the FPU state */ + if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->mact.pcb) { + fp_state_save(parent); + } +#endif + + if (child->mact.pcb == NULL + || parent->mact.pcb == NULL) { + panic("[thread_dup, child (%x) or parent (%x) is NULL!]", + child->mact.pcb, parent->mact.pcb); + return; + } + + /* Copy over the i386_saved_state registers */ + child->mact.pcb->iss = parent->mact.pcb->iss; + + /* Check to see if parent is using floating point + * and if so, copy the registers to the child + * FIXME - make sure this works. + */ + + if (parent->mact.pcb->ims.ifps) { + if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS) + fpu_set_state(child, &floatregs); + } + + /* FIXME - should a user specified LDT, TSS and V86 info + * be duplicated as well?? - probably not. + */ +} + +/* + * FIXME - thread_set_child + */ + +void thread_set_child(thread_act_t child, int pid); +void +thread_set_child(thread_act_t child, int pid) +{ + child->mact.pcb->iss.eax = pid; + child->mact.pcb->iss.edx = 1; + child->mact.pcb->iss.efl &= ~EFL_CF; +} + + + +/* + * Move pages from one kernel virtual address to another. + * Both addresses are assumed to reside in the Sysmap, + * and size must be a multiple of the page size. + */ +void +pagemove( + register caddr_t from, + register caddr_t to, + int size) +{ + pmap_movepage((unsigned long)from, (unsigned long)to, (vm_size_t)size); +} + +/* + * System Call handling code + */ + +#define ERESTART -1 /* restart syscall */ +#define EJUSTRETURN -2 /* don't modify regs, just return */ + +struct sysent { /* system call table */ + unsigned short sy_narg; /* number of args */ + char sy_parallel; /* can execute in parallel */ + char sy_funnel; /* funnel type */ + unsigned long (*sy_call)(void *, void *, int *); /* implementing function */ +}; + +#define KERNEL_FUNNEL 1 +#define NETWORK_FUNNEL 2 + +extern funnel_t * kernel_flock; +extern funnel_t * network_flock; + +extern struct sysent sysent[]; + +void *get_bsdtask_info( + task_t); + +int set_bsduthreadargs (thread_act_t, struct i386_saved_state *, void *); + +void * get_bsduthreadarg(thread_act_t); + +void unix_syscall(struct i386_saved_state *); + +void +unix_syscall_return(int error) +{ + panic("unix_syscall_return not implemented yet!!"); +} + + +void +unix_syscall(struct i386_saved_state *regs) +{ + thread_act_t thread; + void *p, *vt; + unsigned short code; + struct sysent *callp; + int nargs, error; + int *rval; + int funnel_type; + vm_offset_t params; + extern int nsysent; + + thread = current_act(); + p = get_bsdtask_info(current_task()); + rval = (int *)get_bsduthreadrval(thread); + + //printf("[scall : eax %x]", regs->eax); + code = regs->eax; + params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int)); + callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; + if (callp == sysent) { + code = fuword(params); + params += sizeof (int); + callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; + } + + vt = get_bsduthreadarg(thread); + + if ((nargs = (callp->sy_narg * sizeof (int))) && + (error = copyin((char *) params, (char *)vt , nargs)) != 0) { + regs->eax = error; + regs->efl |= EFL_CF; + thread_exception_return(); + /* NOTREACHED */ + } + + rval[0] = 0; + rval[1] = regs->edx; + + if(callp->sy_funnel == NETWORK_FUNNEL) { + (void) thread_funnel_set(network_flock, TRUE); + } + else { + (void) thread_funnel_set(kernel_flock, TRUE); + } + set_bsduthreadargs(thread, regs, NULL); + + if (callp->sy_narg > 8) + panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg); + + + error = (*(callp->sy_call))(p, (void *) vt, rval); + + if (error == ERESTART) { + regs->eip -= 7; + } + else if (error != EJUSTRETURN) { + if (error) { + regs->eax = error; + regs->efl |= EFL_CF; /* carry bit */ + } else { /* (not error) */ + regs->eax = rval[0]; + regs->edx = rval[1]; + regs->efl &= ~EFL_CF; + } + } + + (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); + + thread_exception_return(); + /* NOTREACHED */ +} + + +void +machdep_syscall( struct i386_saved_state *regs) +{ + int trapno, nargs; + machdep_call_t *entry; + thread_t thread; + + trapno = regs->eax; + if (trapno < 0 || trapno >= machdep_call_count) { + regs->eax = (unsigned int)kern_invalid(); + + thread_exception_return(); + /* NOTREACHED */ + } + + entry = &machdep_call_table[trapno]; + nargs = entry->nargs; + + if (nargs > 0) { + int args[nargs]; + + if (copyin((char *) regs->uesp + sizeof (int), + (char *) args, + nargs * sizeof (int))) { + + regs->eax = KERN_INVALID_ADDRESS; + + thread_exception_return(); + /* NOTREACHED */ + } + + asm volatile(" + 1: + mov (%2),%%eax; + pushl %%eax; + sub $4,%2; + dec %1; + jne 1b; + mov %3,%%eax; + call *%%eax; + mov %%eax,%0" + + : "=r" (regs->eax) + : "r" (nargs), + "r" (&args[nargs - 1]), + "g" (entry->routine) + : "ax", "cx", "dx", "sp"); + } + else + regs->eax = (unsigned int)(*entry->routine)(); + + (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); + + thread_exception_return(); + /* NOTREACHED */ +} + + +kern_return_t +thread_set_cthread_self(int self) +{ + current_act()->mact.pcb->cthread_self = (unsigned int)self; + + return (KERN_SUCCESS); +} + +kern_return_t +thread_get_cthread_self(void) +{ + return ((kern_return_t)current_act()->mact.pcb->cthread_self); +} + +void +mach25_syscall(struct i386_saved_state *regs) +{ + printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n", + regs->eip, regs->eax, -regs->eax); + panic("FIXME!"); +} + +#endif /* MACH_BSD */ + +#undef current_thread +thread_act_t +current_thread(void) +{ + return(current_thread_fast()); +} diff --git a/osfmk/i386/bzero.s b/osfmk/i386/bzero.s new file mode 100644 index 000000000..e1d668695 --- /dev/null +++ b/osfmk/i386/bzero.s @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + /* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +/* + * void *memset(void * addr, int pattern, size_t length) + */ + +ENTRY(memset) + pushl %edi + movl 4+ 4(%esp),%edi /* addr */ + movb 4+ 8(%esp),%al /* pattern */ + movl 4+ 12(%esp),%edx /* length */ + movb %al,%ah + movw %ax,%cx + shll $16,%eax + movw %cx,%ax + cld +/* zero longs */ + movl %edx,%ecx + shrl $2,%ecx + rep + stosl +/* zero bytes */ + movl %edx,%ecx + andl $3,%ecx + rep + stosb + movl 4+ 4(%esp),%eax /* returns its first argument */ + popl %edi + ret + +/* + * void bzero(char * addr, unsigned int length) + */ +Entry(blkclr) +ENTRY(bzero) + pushl %edi + movl 4+ 4(%esp),%edi /* addr */ + movl 4+ 8(%esp),%edx /* length */ + xorl %eax,%eax + cld +/* zero longs */ + movl %edx,%ecx + shrl $2,%ecx + rep + stosl +/* zero bytes */ + movl %edx,%ecx + andl $3,%ecx + rep + stosb + popl %edi + ret diff --git a/osfmk/i386/cpu.c b/osfmk/i386/cpu.c new file mode 100644 index 000000000..5d6d05cae --- /dev/null +++ b/osfmk/i386/cpu.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: i386/cpu.c + * + * cpu specific routines + */ + +#include +#include +#include +#include + +/*ARGSUSED*/ +kern_return_t +cpu_control( + int slot_num, + processor_info_t info, + unsigned int count) +{ + printf("cpu_control not implemented\n"); + return (KERN_FAILURE); +} + +/*ARGSUSED*/ +kern_return_t +cpu_info_count( + processor_flavor_t flavor, + unsigned int *count) +{ + *count = 0; + return (KERN_FAILURE); +} + +/*ARGSUSED*/ +kern_return_t +cpu_info( + processor_flavor_t flavor, + int slot_num, + processor_info_t info, + unsigned int *count) +{ + printf("cpu_info not implemented\n"); + return (KERN_FAILURE); +} + +void +cpu_sleep() +{ + printf("cpu_sleep not implemented\n"); +} diff --git a/osfmk/i386/cpu_data.h b/osfmk/i386/cpu_data.h new file mode 100644 index 000000000..97fb613de --- /dev/null +++ b/osfmk/i386/cpu_data.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef I386_CPU_DATA +#define I386_CPU_DATA + +#include +#include + +#if defined(__GNUC__) + +#include +#include + +#if 0 +#ifndef __OPTIMIZE__ +#define extern static +#endif +#endif + +/* + * Everyone within the osfmk part of the kernel can use the fast + * inline versions of these routines. Everyone outside, must call + * the real thing, + */ +extern thread_t __inline__ current_thread_fast(void); +extern thread_t __inline__ current_thread_fast(void) +{ + register thread_t ct; + register int idx = (int)&((cpu_data_t *)0)->active_thread; + + __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (ct) : "r" (idx)); + + return (ct); +} + +#define current_thread() current_thread_fast() + +extern int __inline__ get_preemption_level(void); +extern void __inline__ disable_preemption(void); +extern void __inline__ enable_preemption(void); +extern void __inline__ enable_preemption_no_check(void); +extern void __inline__ mp_disable_preemption(void); +extern void __inline__ mp_enable_preemption(void); +extern void __inline__ mp_enable_preemption_no_check(void); +extern int __inline__ get_simple_lock_count(void); +extern int __inline__ get_interrupt_level(void); + +extern int __inline__ get_preemption_level(void) +{ + register int idx = (int)&((cpu_data_t *)0)->preemption_level; + register int pl; + + __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx)); + + return (pl); +} + +extern void __inline__ disable_preemption(void) +{ +#if MACH_ASSERT + extern void _disable_preemption(void); + + _disable_preemption(); +#else /* MACH_ASSERT */ + register int idx = (int)&((cpu_data_t *)0)->preemption_level; + + __asm__ volatile (" incl %%gs:(%0)" : : "r" (idx)); +#endif /* MACH_ASSERT */ +} + +extern void __inline__ enable_preemption(void) +{ +#if MACH_ASSERT + extern void _enable_preemption(void); + + assert(get_preemption_level() > 0); + _enable_preemption(); +#else /* MACH_ASSERT */ + extern void kernel_preempt_check (void); + register int idx = (int)&((cpu_data_t *)0)->preemption_level; + register void (*kpc)(void)= kernel_preempt_check; + + __asm__ volatile ("decl %%gs:(%0); jne 1f; \ + call %1; 1:" + : /* no outputs */ + : "r" (idx), "r" (kpc) + : "%eax", "%ecx", "%edx", "cc", "memory"); +#endif /* MACH_ASSERT */ +} + +extern void __inline__ enable_preemption_no_check(void) +{ +#if MACH_ASSERT + extern void _enable_preemption_no_check(void); + + assert(get_preemption_level() > 0); + _enable_preemption_no_check(); +#else /* MACH_ASSERT */ + register int idx = (int)&((cpu_data_t *)0)->preemption_level; + + __asm__ volatile ("decl %%gs:(%0)" + : /* no outputs */ + : "r" (idx) + : "cc", "memory"); +#endif /* MACH_ASSERT */ +} + +extern void __inline__ mp_disable_preemption(void) +{ +#if NCPUS > 1 + disable_preemption(); +#endif /* NCPUS > 1 */ +} + +extern void __inline__ mp_enable_preemption(void) +{ +#if NCPUS > 1 + enable_preemption(); +#endif /* NCPUS > 1 */ +} + +extern void __inline__ mp_enable_preemption_no_check(void) +{ +#if NCPUS > 1 + enable_preemption_no_check(); +#endif /* NCPUS > 1 */ +} + +extern int __inline__ get_simple_lock_count(void) +{ + register int idx = (int)&((cpu_data_t *)0)->simple_lock_count; + register int pl; + + __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx)); + + return (pl); +} + +extern int __inline__ get_interrupt_level(void) +{ + register int idx = (int)&((cpu_data_t *)0)->interrupt_level; + register int pl; + + __asm__ volatile (" movl %%gs:(%1),%0" : "=r" (pl) : "r" (idx)); + + return (pl); +} + +#if 0 +#ifndef __OPTIMIZE__ +#undef extern +#endif +#endif + +#else /* !defined(__GNUC__) */ + +#endif /* defined(__GNUC__) */ + +#endif /* I386_CPU_DATA */ diff --git a/osfmk/i386/cpu_number.h b/osfmk/i386/cpu_number.h new file mode 100644 index 000000000..7c15b8526 --- /dev/null +++ b/osfmk/i386/cpu_number.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Machine-dependent definitions for cpu identification. + * + */ +#ifndef _I386_CPU_NUMBER_H_ +#define _I386_CPU_NUMBER_H_ + +extern int cpu_number(void); + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include + +#include + +#if MP_V1_1 +#include +#include + +extern int lapic_id; + +extern __inline__ int cpu_number(void) +{ + register int cpu; + + __asm__ volatile ("movl " CC_SYM_PREFIX "lapic_id, %0\n" + " movl 0(%0), %0\n" + " shrl %1, %0\n" + " andl %2, %0" + : "=r" (cpu) + : "i" (LAPIC_ID_SHIFT), "i" (LAPIC_ID_MASK)); + + return(cpu); +} +#else /* MP_V1_1 */ +/* + * At least one corollary cpu type does not have local memory at all. + * The only way I found to store the cpu number was in some 386/486 + * system register. cr3 has bits 0, 1, 2 and 5, 6, 7, 8, 9, 10, 11 + * available. Right now we use 0, 1 and 2. So we are limited to 8 cpus. + * For more cpus, we could use bits 5 - 11 with a shift. + * + * Even for other machines, like COMPAQ this is much faster the inb/outb + * 4 cycles instead of 10 to 30. + */ +#if defined(__GNUC__) +#if NCPUS > 8 +#error cpu_number() definition only works for #cpus <= 8 +#else + +extern __inline__ int cpu_number(void) +{ + register int cpu; + + __asm__ volatile ("movl %%cr3, %0\n" + " andl $0x7, %0" + : "=r" (cpu)); + return(cpu); +} +#endif +#endif /* defined(__GNUC__) */ + +#endif /* MP_V1_1 */ + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _I386_CPU_NUMBER_H_ */ diff --git a/osfmk/i386/cpuid.c b/osfmk/i386/cpuid.c new file mode 100644 index 000000000..d66f01d30 --- /dev/null +++ b/osfmk/i386/cpuid.c @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Values from http://einstein.et.tudelft.nl/~offerman/chiplist.html + * (dated 18 Oct 1995) + */ + +#include +#include + +/* + * Generic product array (before CPUID) + */ +unsigned int cpuid_i386_freq[] = { 12, 16, 20, 25, 33, 0 }; +unsigned int cpuid_i486_freq[] = { 20, 25, 33, 50, 0 }; + +struct cpuid_product cpuid_generic[] = { + { + 0, CPUID_FAMILY_386, 0, + 80, cpuid_i386_freq, "i386" + }, + { + 0, CPUID_FAMILY_486, 0, + 240, cpuid_i486_freq, "i486" + }, +}; + +/* + * INTEL product array + */ +unsigned int cpuid_i486_dx_freq[] = { 20, 25, 33, 0 }; +unsigned int cpuid_i486_dx_s_freq[] = { 50, 0 }; +unsigned int cpuid_i486_sx_freq[] = { 16, 20, 25, 33, 0 }; +unsigned int cpuid_i486_dx2_freq[] = { 32, 40, 50, 66, 0 }; +unsigned int cpuid_i486_sl_freq[] = { 25, 33, 0 }; +unsigned int cpuid_i486_sx2_freq[] = { 50, 0 }; +unsigned int cpuid_i486_dx2wb_freq[] = { 50, 66, 0 }; +unsigned int cpuid_i486_dx4_freq[] = { 90, 100, 0 }; + +unsigned int cpuid_i486_dx2wb_od_freq[] = { 32, 40, 50, 66, 0 }; +unsigned int cpuid_i486_dx4_od_freq[] = { 75, 99, 0 }; + +unsigned int cpuid_p5_freq[] = { 60, 66, 0 }; +unsigned int cpuid_p54_freq[] = { 60, 66, 75, 90, 100, 120, 133, 166, 200, 0 }; + +unsigned int cpuid_p24t_freq[] = { 25, 33, 0 }; +unsigned int cpuid_p24ct_freq[] = { 63, 83, 0 }; + +unsigned int cpuid_pii_freq[] = { 300, 0 }; + +struct cpuid_product cpuid_intel[] = { + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX, + 240, cpuid_i486_dx_freq, "Intel 486DX" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX_S, + 240, cpuid_i486_dx_s_freq, "Intel 486DX-S" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_SX, + 240, cpuid_i486_sx_freq, "Intel 486SX" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2, + 240, cpuid_i486_dx2_freq, "Intel 486DX2" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_SL, + 240, cpuid_i486_sl_freq, "Intel 486SL" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_SX2, + 240, cpuid_i486_sx2_freq, "Intel 486SX2" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2WB, + 240, cpuid_i486_dx2wb_freq, "Intel 486DX2WB" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_486, CPUID_MODEL_I486_DX4, + 240, cpuid_i486_dx4_freq, "Intel 486DX4" + }, + { + CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2, + 240, cpuid_i486_dx2_freq, "Intel 486DX2 OverDrive" + }, + { + CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_486, CPUID_MODEL_I486_DX2WB, + 240, cpuid_i486_dx2wb_od_freq, "Intel 486DX2WB OverDrive" + }, + { + CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_486, CPUID_MODEL_I486_DX4, + 240, cpuid_i486_dx4_od_freq, "Intel 486DX4 OverDrive" + }, + { + CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_P5, CPUID_MODEL_P24T, + 208, cpuid_p24t_freq, "Intel Pentium P24T OverDrive" + }, + { + CPUID_TYPE_OVERDRIVE, CPUID_FAMILY_P5, CPUID_MODEL_P54, + 207, cpuid_p24ct_freq, "Intel Pentium P24CT OverDrive" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_P5, CPUID_MODEL_P5A, + 207, cpuid_p5_freq, "Intel Pentium P5 rev A" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_P5, CPUID_MODEL_P5, + 207, cpuid_p5_freq, "Intel Pentium P5" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_P5, CPUID_MODEL_P54, + 207, cpuid_p54_freq, "Intel Pentium P54" + }, + { + CPUID_TYPE_OEM, CPUID_FAMILY_PPRO, CPUID_MODEL_PII, + 480, cpuid_pii_freq, "Intel Pentium II" + } +}; +unsigned int cpuid_intel_size = sizeof (cpuid_intel) / sizeof (cpuid_intel[0]); + +/* + * AMD product arrays + */ +unsigned int cpuid_am486_dx_freq[] = { 33, 40, 0 }; +unsigned int cpuid_am486_dx2_freq[] = { 50, 66, 80, 99, 0 }; +unsigned int cpuid_am486_dx4_freq[] = { 99, 120, 133, 0 }; +unsigned int cpuid_am486_dx4wb_freq[] = { 99, 120, 133, 0 }; + +/* + * UMC product array + */ +unsigned int cpuid_u5sd_freq[] = { 25, 33, 40, 0 }; +unsigned int cpuid_u5s_freq[] = { 25, 33, 40, 0 }; + +/* + * Vendor ID array + */ +struct cpuid_name cpuid_name[] = { + { CPUID_VID_INTEL, + cpuid_intel, sizeof (cpuid_intel) / sizeof (cpuid_intel[0]) + }, + { CPUID_VID_UMC, + (struct cpuid_product *)0, + }, + { CPUID_VID_AMD, + (struct cpuid_product *)0, + }, + { CPUID_VID_CYRIX, + (struct cpuid_product *)0, + }, + { CPUID_VID_NEXTGEN, + (struct cpuid_product *)0 + }, + { "", + cpuid_generic, sizeof (cpuid_generic) / sizeof (cpuid_generic[0]) + }, + { (char *)0, + } +}; + +/* + * Feature Flag values + */ +char *cpuid_flag[] = { + "FPU", /* Floating point unit on-chip */ + "VME", /* Virtual Mode Extension */ + "DE", /* Debugging Extension */ + "PSE", /* Page Size Extension */ + "TSC", /* Time Stamp Counter */ + "MSR", /* Model Specific Registers */ + "PAE", /* Physical Address Extension */ + "MCE", /* Machine Check Exception */ + "CX8", /* CMPXCHG8 Instruction sSupported */ + "APIC", /* Local APIC Supported */ + "(bit 10)", + "(bit 11)", + "MTRR", /* Machine Type Range Register */ + "PGE", /* Page Global Enable */ + "MCA", /* Machine Check Architecture */ + "CMOV", /* Conditional Move Instruction Supported */ + "(bit 16)", + "(bit 17)", + "(bit 18)", + "(bit 19)", + "(bit 20)", + "(bit 21)", + "(bit 22)", + "MMX", /* Supports MMX instructions */ + "(bit 24)", + "(bit 25)", + "(bit 26)", + "(bit 27)", + "(bit 28)", + "(bit 29)", + "(bit 30)", + "(bit 31)", +}; + +/* + * Cache description array + */ +struct cpuid_cache_desc cpuid_cache_desc[] = { + { CPUID_CACHE_ITLB_4K, + "Instruction TBL, 4K, pages 4-way set associative, 64 entries" + }, + { CPUID_CACHE_ITLB_4M, + "Instruction TBL, 4M, pages 4-way set associative, 4 entries" + }, + { CPUID_CACHE_DTLB_4K, + "Data TBL, 4K pages, 4-way set associative, 64 entries" + }, + { CPUID_CACHE_DTLB_4M, + "Data TBL, 4M pages, 4-way set associative, 4 entries" + }, + { CPUID_CACHE_ICACHE_8K, + "Instruction L1 cache, 8K, 4-way set associative, 32byte line size" + }, + { CPUID_CACHE_DCACHE_8K, + "Data L1 cache, 8K, 2-way set associative, 32byte line size" + }, + { CPUID_CACHE_UCACHE_128K, + "Unified L2 cache, 128K, 4-way set associative, 32byte line size" + }, + { CPUID_CACHE_UCACHE_256K, + "Unified L2 cache, 256K, 4-way set associative, 32byte line size" + }, + { CPUID_CACHE_UCACHE_512K, + "Unified L2 cache, 512K, 4-way set associative, 32byte line size" + }, + { CPUID_CACHE_NULL, + (char *)0 + } +}; + +/* + * CPU identification + */ +unsigned int cpuid_value; +unsigned char cpuid_type; +unsigned char cpuid_family; +unsigned char cpuid_model; +unsigned char cpuid_stepping; +unsigned int cpuid_feature; +char cpuid_vid[CPUID_VID_SIZE + 1]; +unsigned char cpuid_cache[CPUID_CACHE_SIZE]; + +/* + * Return correct CPU_TYPE + */ +/*ARGSUSED*/ +cpu_type_t +cpuid_cputype( + int my_cpu) +{ +#ifndef MACH_BSD /* FIXME - add more family/chip types */ + switch (cpuid_family) { + case CPUID_FAMILY_PPRO: + return (CPU_TYPE_PENTIUMPRO); + case CPUID_FAMILY_P5: + return (CPU_TYPE_PENTIUM); + case CPUID_FAMILY_486: + return (CPU_TYPE_I486); + default: + break; + } +#endif + return (CPU_TYPE_I386); +} + +/* + * Display processor signature + */ +/*ARGSUSED*/ +void +cpuid_cpu_display( + char *header, + int my_cpu) +{ + struct cpuid_name *name; + unsigned int i; + unsigned int *freq; + unsigned int mhz; + unsigned int feature; + char **flag; + extern unsigned int delaycount; + + /* + * Identify vendor ID + */ + for (name = cpuid_name; name->name != (char *)0; name++) { + char *p = name->name; + char *q = cpuid_vid; + while (*p == *q && *p != 0) { + p++; + q++; + } + if (*p == '\0' && *q == '\0') + break; + } + if (name->name == (char *)0) { + printf("Unrecognized processor vendor id = '%s'\n", cpuid_vid); + return; + } + + /* + * Identify Product ID + */ + for (i = 0; i < name->size; i++) + if (name->product[i].type == cpuid_type && + name->product[i].family == cpuid_family && + name->product[i].model == cpuid_model) + break; + if (i == name->size) { + printf("%s processor (type = 0x%x, family = 0x%x, model = 0x%x)\n", + "Unrecognized", cpuid_type, cpuid_family, cpuid_model); + return; + } + + /* + * Look for frequency and adjust it to known values + */ + mhz = (1000 * delaycount) / name->product[i].delay; + for (freq = name->product[i].frequency; *freq != 0; freq++) + if (*freq >= mhz) + break; + if (*freq == 0) + mhz = *(freq - 1); + else if (freq == name->product[i].frequency) + mhz = *freq; + else if (*freq - mhz > mhz - *(freq - 1)) + mhz = *(freq - 1); + else if (*freq != mhz) + mhz = *freq; + + /* + * Display product and frequency + */ + printf("%s: %s at %d MHz (signature = %d/%d/%d/%d)\n", + header, name->product[i].name, mhz, cpuid_type, + cpuid_family, cpuid_model, cpuid_stepping); + + /* + * Display feature (if any) + */ + if (cpuid_feature) { + i = 0; + flag = cpuid_flag; + for (feature = cpuid_feature; feature != 0; feature >>= 1) { + if (feature & 1) + if (i == 0) { + printf("%s: %s", header, *flag); + i = 1; + } else + printf(", %s", *flag); + flag++; + } + printf("\n"); + } +} + +/* + * Display processor configuration information + */ +/*ARGSUSED*/ +void +cpuid_cache_display( + char *header, + int my_cpu) +{ + struct cpuid_cache_desc *desc; + unsigned int i; + + if (cpuid_cache[CPUID_CACHE_VALID] == 1) + for (i = 0; i < CPUID_CACHE_SIZE; i++) { + if (i != CPUID_CACHE_VALID || cpuid_cache[i] == CPUID_CACHE_NULL) + continue; + for (desc = cpuid_cache_desc; + desc->description != (char *)0; desc++) + if (desc->value == cpuid_cache[i]) + break; + if (desc->description != (char *)0) + printf("%s: %s\n", header, desc->description); + } +} diff --git a/osfmk/i386/cpuid.h b/osfmk/i386/cpuid.h new file mode 100644 index 000000000..a500c8d02 --- /dev/null +++ b/osfmk/i386/cpuid.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * x86 CPU identification + * + * TODO : Add TI/Thomson processors + */ + +#ifndef _MACHINE_CPUID_H_ +#define _MACHINE_CPUID_H_ + +#define CPUID_VID_SIZE 12 +#define CPUID_VID_INTEL "GenuineIntel" +#define CPUID_VID_UMC "UMC UMC UMC " +#define CPUID_VID_AMD "AuthenticAMD" +#define CPUID_VID_CYRIX "CyrixInstead" +#define CPUID_VID_NEXTGEN "NexGenDriven" + +#define CPUID_FEATURE_FPU 0x00000001 /* Floating point unit on-chip */ +#define CPUID_FEATURE_VME 0x00000002 /* Virtual Mode Extension */ +#define CPUID_FEATURE_IOB 0x00000004 /* I/O Breakpoints */ +#define CPUID_FEATURE_PSE 0x00000008 /* Page Size Extension */ +#define CPUID_FEATURE_TSC 0x00000010 /* Time Stamp Counter */ +#define CPUID_FEATURE_MSR 0x00000020 /* Model Specific Registers */ +#define CPUID_FEATURE_MCE 0x00000080 /* Machine Check Exception */ +#define CPUID_FEATURE_CX8 0x00000100 /* CMPXCHG8B */ +#define CPUID_FEATURE_APIC 0x00000200 /* On-chip APIC */ +#define CPUID_FEATURE_MTRR 0x00001000 /* Memory Type Range Register */ +#define CPUID_FEATURE_PGE 0x00002000 /* Page Global Enable */ +#define CPUID_FEATURE_MCA 0x00004000 /* Machine Check Architecture */ +#define CPUID_FEATURE_CMOV 0x00008000 /* Conditional Move Instruction */ + +#define CPUID_TYPE_OEM 0x0 /* Original processor */ +#define CPUID_TYPE_OVERDRIVE 0x1 /* Overdrive processor */ +#define CPUID_TYPE_DUAL 0x2 /* Can be used as dual processor */ +#define CPUID_TYPE_RESERVED 0x3 /* Reserved */ + +#define CPUID_FAMILY_386 0x3 /* Intel 386 (not part of CPUID) */ +#define CPUID_FAMILY_486 0x4 /* Intel 486 */ +#define CPUID_FAMILY_P5 0x5 /* Intel Pentium */ +#define CPUID_FAMILY_PPRO 0x6 /* Intel Pentium Pro */ + +#define CPUID_MODEL_I386_DX 0x0 /* Intel 386 (not part of CPUID) */ + +#define CPUID_MODEL_I486_DX 0x0 /* Intel 486DX */ +#define CPUID_MODEL_I486_DX_S 0x1 /* Intel 486DX-S */ +#define CPUID_MODEL_I486_SX 0x2 /* Intel 486SX */ +#define CPUID_MODEL_I486_DX2 0x3 /* Intel 486DX2 */ +#define CPUID_MODEL_I486_SL 0x4 /* Intel 486SL */ +#define CPUID_MODEL_I486_SX2 0x5 /* Intel 486SX2 */ +#define CPUID_MODEL_I486_DX2WB 0x7 /* Intel 486DX2WB */ +#define CPUID_MODEL_I486_DX4 0x8 /* Intel 486DX4 */ +#define CPUID_MODEL_I486_DX4WB 0x9 /* Intel 486DX4WB */ + +#define CPUID_MODEL_AM486_DX 0x1 /* AMD 486DX */ +#define CPUID_MODEL_AM486_DX2 0x3 /* AMD 486DX2 */ +#define CPUID_MODEL_AM486_DX2WB 0x7 /* AMD 486DX2WB */ +#define CPUID_MODEL_AM486_DX4 0x8 /* AMD 486DX4 */ +#define CPUID_MODEL_AM486_DX4WB 0x9 /* AMD 486DX4WB */ +#define CPUID_MODEL_AM486_5X86 0xE /* AMD 5x86 */ +#define CPUID_MODEL_AM486_5X86WB 0xF /* AMD 5x86WB */ + +#define CPUID_MODEL_CYRIX5X86 0x9 /* CYRIX 5X86 */ + +#define CPUID_MODEL_UMC5SD 0x1 /* UMC U5SD */ +#define CPUID_MODEL_UMC5S 0x2 /* UMC U5S */ +#define CPUID_MODEL_UMC486_DX2 0x3 /* UMC U486_DX2 */ +#define CPUID_MODEL_UMC486_SX2 0x5 /* UMC U486_SX2 */ + +#define CPUID_MODEL_P5A 0x0 /* Intel P5 60/66 Step A */ +#define CPUID_MODEL_P5 0x1 /* Intel P5 60/66 */ +#define CPUID_MODEL_P54 0x2 /* Intel P5 75/80/100/120/133/166 */ +#define CPUID_MODEL_P24T 0x3 /* Intel P5 Overdrive 63/83 */ + +#define CPUID_MODEL_P6 0x1 /* Intel P6 */ +#define CPUID_MODEL_PII 0x3 /* Intel PII */ + +#define CPUID_CACHE_SIZE 16 /* Number of descriptor vales */ +#define CPUID_CACHE_VALID 4 /* Index of descriptor validity */ + +#define CPUID_CACHE_NULL 0x00 /* NULL */ +#define CPUID_CACHE_ITLB_4K 0x01 /* Instruction TLB, 4K pages */ +#define CPUID_CACHE_ITLB_4M 0x02 /* Instruction TLB, 4M pages */ +#define CPUID_CACHE_DTLB_4K 0x03 /* Data TLB, 4K pages */ +#define CPUID_CACHE_DTLB_4M 0x04 /* Data TLB, 4M pages */ +#define CPUID_CACHE_ICACHE_8K 0x06 /* Instruction cache, 8K */ +#define CPUID_CACHE_DCACHE_8K 0x0A /* Data cache, 8K */ +#define CPUID_CACHE_UCACHE_128K 0x41 /* Unified cache, 128K */ +#define CPUID_CACHE_UCACHE_256K 0x42 /* Unified cache, 256K */ +#define CPUID_CACHE_UCACHE_512K 0x43 /* Unified cache, 512K */ + +#ifndef ASSEMBLER +#include + +extern unsigned int cpuid_value; +extern unsigned char cpuid_type; +extern unsigned char cpuid_family; +extern unsigned char cpuid_model; +extern unsigned char cpuid_stepping; +extern unsigned int cpuid_feature; +extern char cpuid_vid[]; +extern unsigned char cpuid_cache[]; + +/* + * Product ID arrays per vendor + */ +struct cpuid_product { + unsigned char type; /* CPU type */ + unsigned char family; /* CPU family */ + unsigned char model; /* CPU model */ + unsigned int delay; /* 1MHz Delay (scale 1000) */ + unsigned int *frequency; /* Frequency array */ + char *name; /* Model name */ +}; + +/* + * Vendor ID structure + */ +struct cpuid_name { + char *name; /* Vendor ID name */ + struct cpuid_product *product; /* product array */ + unsigned int size; /* #elements in product array */ +}; + +/* + * Cache ID description structure + */ +struct cpuid_cache_desc { + unsigned char value; /* Descriptor value */ + char *description; /* Cache description */ +}; + +/* + * External declarations + */ +extern cpu_type_t cpuid_cputype(int); +extern void cpuid_cpu_display(char *, int); +extern void cpuid_cache_display(char *, int); + +#endif /* ASSEMBLER */ +#endif /* _MACHINE_CPUID_H_ */ diff --git a/osfmk/i386/cswitch.s b/osfmk/i386/cswitch.s new file mode 100644 index 000000000..00f69024d --- /dev/null +++ b/osfmk/i386/cswitch.s @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include + +#include +#include +#include + +#if NCPUS > 1 + +#ifdef SYMMETRY +#include +#endif + +#if AT386 +#include +#endif /* AT386 */ + +#define CX(addr, reg) addr(,reg,4) + +#else /* NCPUS == 1 */ + +#define CPU_NUMBER(reg) +#define CX(addr,reg) addr + +#endif /* NCPUS == 1 */ + +/* + * Context switch routines for i386. + */ + +Entry(Load_context) + movl S_ARG0,%ecx /* get thread */ + movl TH_KERNEL_STACK(%ecx),%ecx /* get kernel stack */ + lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%edx + /* point to stack top */ + CPU_NUMBER(%eax) + movl %ecx,CX(EXT(active_stacks),%eax) /* store stack address */ + movl %edx,CX(EXT(kernel_stack),%eax) /* store stack top */ + + movl KSS_ESP(%ecx),%esp /* switch stacks */ + movl KSS_ESI(%ecx),%esi /* restore registers */ + movl KSS_EDI(%ecx),%edi + movl KSS_EBP(%ecx),%ebp + movl KSS_EBX(%ecx),%ebx + xorl %eax,%eax /* return zero (no old thread) */ + jmp *KSS_EIP(%ecx) /* resume thread */ + +/* + * This really only has to save registers + * when there is no explicit continuation. + */ + +Entry(Switch_context) + CPU_NUMBER(%edx) + movl CX(EXT(active_stacks),%edx),%ecx /* get old kernel stack */ + + movl %ebx,KSS_EBX(%ecx) /* save registers */ + movl %ebp,KSS_EBP(%ecx) + movl %edi,KSS_EDI(%ecx) + movl %esi,KSS_ESI(%ecx) + popl KSS_EIP(%ecx) /* save return PC */ + movl %esp,KSS_ESP(%ecx) /* save SP */ + + movl 0(%esp),%eax /* get old thread */ + movl 4(%esp),%ebx /* get continuation */ + movl %ebx,TH_CONTINUATION(%eax) /* save continuation */ + movl %ecx,TH_KERNEL_STACK(%eax) /* save kernel stack */ + + movl 8(%esp),%esi /* get new thread */ + movl $CPD_ACTIVE_THREAD,%ecx + movl %esi,%gs:(%ecx) /* new thread is active */ + movl TH_KERNEL_STACK(%esi),%ecx /* get its kernel stack */ + lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%ecx),%ebx + /* point to stack top */ + + movl %ecx,CX(EXT(active_stacks),%edx) /* set current stack */ + movl %ebx,CX(EXT(kernel_stack),%edx) /* set stack top */ + + movl TH_TOP_ACT(%esi),%esi /* get new_thread->top_act */ + cmpl $0,ACT_KLOADED(%esi) /* check kernel-loaded flag */ + je 0f + movl %esi,CX(EXT(active_kloaded),%edx) + jmp 1f +0: + movl $0,CX(EXT(active_kloaded),%edx) +1: + movl KSS_ESP(%ecx),%esp /* switch stacks */ + movl KSS_ESI(%ecx),%esi /* restore registers */ + movl KSS_EDI(%ecx),%edi + movl KSS_EBP(%ecx),%ebp + movl KSS_EBX(%ecx),%ebx + jmp *KSS_EIP(%ecx) /* return old thread */ + +Entry(Thread_continue) + pushl %eax /* push the thread argument */ + xorl %ebp,%ebp /* zero frame pointer */ + call *%ebx /* call real continuation */ + +#if NCPUS > 1 +/* + * void switch_to_shutdown_context(thread_t thread, + * void (*routine)(processor_t), + * processor_t processor) + * + * saves the kernel context of the thread, + * switches to the interrupt stack, + * continues the thread (with thread_continue), + * then runs routine on the interrupt stack. + * + * Assumes that the thread is a kernel thread (thus + * has no FPU state) + */ +Entry(switch_to_shutdown_context) + CPU_NUMBER(%edx) + movl EXT(active_stacks)(,%edx,4),%ecx /* get old kernel stack */ + movl %ebx,KSS_EBX(%ecx) /* save registers */ + movl %ebp,KSS_EBP(%ecx) + movl %edi,KSS_EDI(%ecx) + movl %esi,KSS_ESI(%ecx) + popl KSS_EIP(%ecx) /* save return PC */ + movl %esp,KSS_ESP(%ecx) /* save SP */ + + movl 0(%esp),%eax /* get old thread */ + movl $0,TH_CONTINUATION(%eax) /* clear continuation */ + movl %ecx,TH_KERNEL_STACK(%eax) /* save old stack */ + movl 4(%esp),%ebx /* get routine to run next */ + movl 8(%esp),%esi /* get its argument */ + + movl CX(EXT(interrupt_stack),%edx),%ecx /* point to its intr stack */ + lea INTSTACK_SIZE(%ecx),%esp /* switch to it (top) */ + + pushl %eax /* push thread */ + call EXT(thread_dispatch) /* reschedule thread */ + addl $4,%esp /* clean stack */ + + pushl %esi /* push argument */ + call *%ebx /* call routine to run */ + hlt /* (should never return) */ + +#endif /* NCPUS > 1 */ + + .text + + .globl EXT(locore_end) +LEXT(locore_end) + diff --git a/osfmk/i386/db_disasm.c b/osfmk/i386/db_disasm.c new file mode 100644 index 000000000..67e73801f --- /dev/null +++ b/osfmk/i386/db_disasm.c @@ -0,0 +1,1820 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:36 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:37 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.8.3 1996/07/31 09:43:35 paire + * Merged with nmk20b7_shared (1.2.11.1) + * [96/06/10 paire] + * + * Revision 1.2.11.1 1996/05/14 13:49:36 paire + * Added support for new cmpxchg8b, cpuid, rdtsc, rdwmr, rsm and wrmsr + * Pentium instructions + * [95/11/23 paire] + * + * Revision 1.2.8.2 1994/09/23 01:50:45 ezf + * change marker to not FREE + * [1994/09/22 21:21:17 ezf] + * + * Revision 1.2.8.1 1994/09/16 15:26:28 emcmanus + * Only skip over GAS-inserted NOPs after branches if they are really + * NOPs; this depends at least on assembler options. + * [1994/09/16 15:26:03 emcmanus] + * + * Revision 1.2.6.3 1994/02/19 15:40:34 bolinger + * For load/store counting, mark all varieties of "call" as writing + * memory. + * [1994/02/15 20:25:18 bolinger] + * + * Revision 1.2.6.2 1994/02/14 21:46:49 dwm + * Warning repair + * [1994/02/14 21:46:14 dwm] + * + * Revision 1.2.6.1 1994/02/12 23:26:05 bolinger + * Implement load/store counting for ddb "until" command. + * [1994/02/12 03:34:55 bolinger] + * + * Revision 1.2.2.3 1993/08/09 19:39:21 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:44:13 dswartz] + * + * Revision 1.2.2.2 1993/06/09 02:27:29 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:03:54 jeffc] + * + * Revision 1.2 1993/04/19 16:12:57 devrcs + * Print file names and lineno on branch instructions. + * [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Revision 1.1 1992/09/30 02:02:19 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5.3.1 92/03/03 16:14:27 jeffreyh + * Pick up changes from TRUNK + * [92/02/26 11:05:06 jeffreyh] + * + * Revision 2.6 92/01/03 20:05:00 dbg + * Add a switch to disassemble 16-bit code. + * Fix spelling of 'lods' opcodes. + * [91/10/30 dbg] + * + * Revision 2.5 91/10/09 16:05:58 af + * Supported disassemble of non current task by passing task parameter. + * [91/08/29 tak] + * + * Revision 2.4 91/05/14 16:05:04 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:11:03 mrt + * Changed to new Mach copyright + * [91/02/01 17:31:03 mrt] + * + * Revision 2.2 90/08/27 21:55:56 dbg + * Fix register operand for move to/from control/test/debug + * register instructions. Add i486 instructions. + * [90/08/27 dbg] + * + * Import db_sym.h. Print instruction displacements in + * current radix (signed). Change calling sequence of + * db_disasm. + * [90/08/21 dbg] + * Fix includes. + * [90/08/08 dbg] + * Created. + * [90/07/25 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Instruction disassembler. + */ + +#include +#include + +#include +#include +#include + +#include +#include + +struct i_addr { + int is_reg; /* if reg, reg number is in 'disp' */ + int disp; + char * base; + char * index; + int ss; +}; + +/* Forward */ + +extern db_addr_t db_read_address( + db_addr_t loc, + int short_addr, + int regmodrm, + struct i_addr * addrp, + task_t task); +extern void db_print_address( + char * seg, + int size, + struct i_addr *addrp, + task_t task); +extern db_addr_t db_disasm_esc( + db_addr_t loc, + int inst, + int short_addr, + int size, + char * seg, + task_t task); + +/* + * Switch to disassemble 16-bit code. + */ +boolean_t db_disasm_16 = FALSE; + +/* + * Size attributes + */ +#define BYTE 0 +#define WORD 1 +#define LONG 2 +#define QUAD 3 +#define SNGL 4 +#define DBLR 5 +#define EXTR 6 +#define SDEP 7 +#define NONE 8 + +/* + * Addressing modes + */ +#define E 1 /* general effective address */ +#define Eind 2 /* indirect address (jump, call) */ +#define Ew 3 /* address, word size */ +#define Eb 4 /* address, byte size */ +#define R 5 /* register, in 'reg' field */ +#define Rw 6 /* word register, in 'reg' field */ +#define Ri 7 /* register in instruction */ +#define S 8 /* segment reg, in 'reg' field */ +#define Si 9 /* segment reg, in instruction */ +#define A 10 /* accumulator */ +#define BX 11 /* (bx) */ +#define CL 12 /* cl, for shifts */ +#define DX 13 /* dx, for IO */ +#define SI 14 /* si */ +#define DI 15 /* di */ +#define CR 16 /* control register */ +#define DR 17 /* debug register */ +#define TR 18 /* test register */ +#define I 19 /* immediate, unsigned */ +#define Is 20 /* immediate, signed */ +#define Ib 21 /* byte immediate, unsigned */ +#define Ibs 22 /* byte immediate, signed */ +#define Iw 23 /* word immediate, unsigned */ +#define Il 24 /* long immediate */ +#define O 25 /* direct address */ +#define Db 26 /* byte displacement from EIP */ +#define Dl 27 /* long displacement from EIP */ +#define o1 28 /* constant 1 */ +#define o3 29 /* constant 3 */ +#define OS 30 /* immediate offset/segment */ +#define ST 31 /* FP stack top */ +#define STI 32 /* FP stack */ +#define X 33 /* extended FP op */ +#define XA 34 /* for 'fstcw %ax' */ + +struct inst { + char * i_name; /* name */ + short i_has_modrm; /* has regmodrm byte */ + short i_size; /* operand size */ + int i_mode; /* addressing modes */ + char * i_extra; /* pointer to extra opcode table */ +}; + +#define op1(x) (x) +#define op2(x,y) ((x)|((y)<<8)) +#define op3(x,y,z) ((x)|((y)<<8)|((z)<<16)) + +struct finst { + char * f_name; /* name for memory instruction */ + int f_size; /* size for memory instruction */ + int f_rrmode; /* mode for rr instruction */ + char * f_rrname; /* name for rr instruction + (or pointer to table) */ +}; + +char * db_Grp6[] = { + "sldt", + "str", + "lldt", + "ltr", + "verr", + "verw", + "", + "" +}; + +char * db_Grp7[] = { + "sgdt", + "sidt", + "lgdt", + "lidt", + "smsw", + "", + "lmsw", + "invlpg" +}; + +char * db_Grp8[] = { + "", + "", + "", + "", + "bt", + "bts", + "btr", + "btc" +}; + +struct inst db_inst_0f0x[] = { +/*00*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp6 }, +/*01*/ { "", TRUE, NONE, op1(Ew), (char *)db_Grp7 }, +/*02*/ { "lar", TRUE, LONG, op2(E,R), 0 }, +/*03*/ { "lsl", TRUE, LONG, op2(E,R), 0 }, +/*04*/ { "", FALSE, NONE, 0, 0 }, +/*05*/ { "", FALSE, NONE, 0, 0 }, +/*06*/ { "clts", FALSE, NONE, 0, 0 }, +/*07*/ { "", FALSE, NONE, 0, 0 }, + +/*08*/ { "invd", FALSE, NONE, 0, 0 }, +/*09*/ { "wbinvd",FALSE, NONE, 0, 0 }, +/*0a*/ { "", FALSE, NONE, 0, 0 }, +/*0b*/ { "", FALSE, NONE, 0, 0 }, +/*0c*/ { "", FALSE, NONE, 0, 0 }, +/*0d*/ { "", FALSE, NONE, 0, 0 }, +/*0e*/ { "", FALSE, NONE, 0, 0 }, +/*0f*/ { "", FALSE, NONE, 0, 0 }, +}; + +struct inst db_inst_0f2x[] = { +/*20*/ { "mov", TRUE, LONG, op2(CR,E), 0 }, /* use E for reg */ +/*21*/ { "mov", TRUE, LONG, op2(DR,E), 0 }, /* since mod == 11 */ +/*22*/ { "mov", TRUE, LONG, op2(E,CR), 0 }, +/*23*/ { "mov", TRUE, LONG, op2(E,DR), 0 }, +/*24*/ { "mov", TRUE, LONG, op2(TR,E), 0 }, +/*25*/ { "", FALSE, NONE, 0, 0 }, +/*26*/ { "mov", TRUE, LONG, op2(E,TR), 0 }, +/*27*/ { "", FALSE, NONE, 0, 0 }, + +/*28*/ { "", FALSE, NONE, 0, 0 }, +/*29*/ { "", FALSE, NONE, 0, 0 }, +/*2a*/ { "", FALSE, NONE, 0, 0 }, +/*2b*/ { "", FALSE, NONE, 0, 0 }, +/*2c*/ { "", FALSE, NONE, 0, 0 }, +/*2d*/ { "", FALSE, NONE, 0, 0 }, +/*2e*/ { "", FALSE, NONE, 0, 0 }, +/*2f*/ { "", FALSE, NONE, 0, 0 }, +}; +struct inst db_inst_0f3x[] = { +/*30*/ { "rdtsc", FALSE, NONE, 0, 0 }, +/*31*/ { "rdmsr", FALSE, NONE, 0, 0 }, +/*32*/ { "wrmsr", FALSE, NONE, 0, 0 }, +/*33*/ { "", FALSE, NONE, 0, 0 }, +/*34*/ { "", FALSE, NONE, 0, 0 }, +/*35*/ { "", FALSE, NONE, 0, 0 }, +/*36*/ { "", FALSE, NONE, 0, 0 }, +/*37*/ { "", FALSE, NONE, 0, 0 }, + +/*38*/ { "", FALSE, NONE, 0, 0 }, +/*39*/ { "", FALSE, NONE, 0, 0 }, +/*3a*/ { "", FALSE, NONE, 0, 0 }, +/*3b*/ { "", FALSE, NONE, 0, 0 }, +/*3c*/ { "", FALSE, NONE, 0, 0 }, +/*3d*/ { "", FALSE, NONE, 0, 0 }, +/*3e*/ { "", FALSE, NONE, 0, 0 }, +/*3f*/ { "", FALSE, NONE, 0, 0 }, +}; + +struct inst db_inst_0f8x[] = { +/*80*/ { "jo", FALSE, NONE, op1(Dl), 0 }, +/*81*/ { "jno", FALSE, NONE, op1(Dl), 0 }, +/*82*/ { "jb", FALSE, NONE, op1(Dl), 0 }, +/*83*/ { "jnb", FALSE, NONE, op1(Dl), 0 }, +/*84*/ { "jz", FALSE, NONE, op1(Dl), 0 }, +/*85*/ { "jnz", FALSE, NONE, op1(Dl), 0 }, +/*86*/ { "jbe", FALSE, NONE, op1(Dl), 0 }, +/*87*/ { "jnbe", FALSE, NONE, op1(Dl), 0 }, + +/*88*/ { "js", FALSE, NONE, op1(Dl), 0 }, +/*89*/ { "jns", FALSE, NONE, op1(Dl), 0 }, +/*8a*/ { "jp", FALSE, NONE, op1(Dl), 0 }, +/*8b*/ { "jnp", FALSE, NONE, op1(Dl), 0 }, +/*8c*/ { "jl", FALSE, NONE, op1(Dl), 0 }, +/*8d*/ { "jnl", FALSE, NONE, op1(Dl), 0 }, +/*8e*/ { "jle", FALSE, NONE, op1(Dl), 0 }, +/*8f*/ { "jnle", FALSE, NONE, op1(Dl), 0 }, +}; + +struct inst db_inst_0f9x[] = { +/*90*/ { "seto", TRUE, NONE, op1(Eb), 0 }, +/*91*/ { "setno", TRUE, NONE, op1(Eb), 0 }, +/*92*/ { "setb", TRUE, NONE, op1(Eb), 0 }, +/*93*/ { "setnb", TRUE, NONE, op1(Eb), 0 }, +/*94*/ { "setz", TRUE, NONE, op1(Eb), 0 }, +/*95*/ { "setnz", TRUE, NONE, op1(Eb), 0 }, +/*96*/ { "setbe", TRUE, NONE, op1(Eb), 0 }, +/*97*/ { "setnbe",TRUE, NONE, op1(Eb), 0 }, + +/*98*/ { "sets", TRUE, NONE, op1(Eb), 0 }, +/*99*/ { "setns", TRUE, NONE, op1(Eb), 0 }, +/*9a*/ { "setp", TRUE, NONE, op1(Eb), 0 }, +/*9b*/ { "setnp", TRUE, NONE, op1(Eb), 0 }, +/*9c*/ { "setl", TRUE, NONE, op1(Eb), 0 }, +/*9d*/ { "setnl", TRUE, NONE, op1(Eb), 0 }, +/*9e*/ { "setle", TRUE, NONE, op1(Eb), 0 }, +/*9f*/ { "setnle",TRUE, NONE, op1(Eb), 0 }, +}; + +struct inst db_inst_0fax[] = { +/*a0*/ { "push", FALSE, NONE, op1(Si), 0 }, +/*a1*/ { "pop", FALSE, NONE, op1(Si), 0 }, +/*a2*/ { "cpuid", FALSE, NONE, 0, 0 }, +/*a3*/ { "bt", TRUE, LONG, op2(E,R), 0 }, +/*a4*/ { "shld", TRUE, LONG, op3(Ib,E,R), 0 }, +/*a5*/ { "shld", TRUE, LONG, op3(CL,E,R), 0 }, +/*a6*/ { "", FALSE, NONE, 0, 0 }, +/*a7*/ { "", FALSE, NONE, 0, 0 }, + +/*a8*/ { "push", FALSE, NONE, op1(Si), 0 }, +/*a9*/ { "pop", FALSE, NONE, op1(Si), 0 }, +/*aa*/ { "rsm", FALSE, NONE, 0, 0 }, +/*ab*/ { "bts", TRUE, LONG, op2(E,R), 0 }, +/*ac*/ { "shrd", TRUE, LONG, op3(Ib,E,R), 0 }, +/*ad*/ { "shrd", TRUE, LONG, op3(CL,E,R), 0 }, +/*a6*/ { "", FALSE, NONE, 0, 0 }, +/*a7*/ { "imul", TRUE, LONG, op2(E,R), 0 }, +}; + +struct inst db_inst_0fbx[] = { +/*b0*/ { "", FALSE, NONE, 0, 0 }, +/*b1*/ { "", FALSE, NONE, 0, 0 }, +/*b2*/ { "lss", TRUE, LONG, op2(E, R), 0 }, +/*b3*/ { "bts", TRUE, LONG, op2(R, E), 0 }, +/*b4*/ { "lfs", TRUE, LONG, op2(E, R), 0 }, +/*b5*/ { "lgs", TRUE, LONG, op2(E, R), 0 }, +/*b6*/ { "movzb", TRUE, LONG, op2(E, R), 0 }, +/*b7*/ { "movzw", TRUE, LONG, op2(E, R), 0 }, + +/*b8*/ { "", FALSE, NONE, 0, 0 }, +/*b9*/ { "", FALSE, NONE, 0, 0 }, +/*ba*/ { "", TRUE, LONG, op2(Is, E), (char *)db_Grp8 }, +/*bb*/ { "btc", TRUE, LONG, op2(R, E), 0 }, +/*bc*/ { "bsf", TRUE, LONG, op2(E, R), 0 }, +/*bd*/ { "bsr", TRUE, LONG, op2(E, R), 0 }, +/*be*/ { "movsb", TRUE, LONG, op2(E, R), 0 }, +/*bf*/ { "movsw", TRUE, LONG, op2(E, R), 0 }, +}; + +struct inst db_inst_0fcx[] = { +/*c0*/ { "xadd", TRUE, BYTE, op2(R, E), 0 }, +/*c1*/ { "xadd", TRUE, LONG, op2(R, E), 0 }, +/*c2*/ { "", FALSE, NONE, 0, 0 }, +/*c3*/ { "", FALSE, NONE, 0, 0 }, +/*c4*/ { "", FALSE, NONE, 0, 0 }, +/*c5*/ { "", FALSE, NONE, 0, 0 }, +/*c6*/ { "", FALSE, NONE, 0, 0 }, +/*c7*/ { "cmpxchg8b", FALSE, NONE, op1(E), 0 }, +/*c8*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +/*c9*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +/*ca*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +/*cb*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +/*cc*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +/*cd*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +/*ce*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +/*cf*/ { "bswap", FALSE, LONG, op1(Ri), 0 }, +}; + +struct inst db_inst_0fdx[] = { +/*c0*/ { "cmpxchg",TRUE, BYTE, op2(R, E), 0 }, +/*c1*/ { "cmpxchg",TRUE, LONG, op2(R, E), 0 }, +/*c2*/ { "", FALSE, NONE, 0, 0 }, +/*c3*/ { "", FALSE, NONE, 0, 0 }, +/*c4*/ { "", FALSE, NONE, 0, 0 }, +/*c5*/ { "", FALSE, NONE, 0, 0 }, +/*c6*/ { "", FALSE, NONE, 0, 0 }, +/*c7*/ { "", FALSE, NONE, 0, 0 }, +/*c8*/ { "", FALSE, NONE, 0, 0 }, +/*c9*/ { "", FALSE, NONE, 0, 0 }, +/*ca*/ { "", FALSE, NONE, 0, 0 }, +/*cb*/ { "", FALSE, NONE, 0, 0 }, +/*cc*/ { "", FALSE, NONE, 0, 0 }, +/*cd*/ { "", FALSE, NONE, 0, 0 }, +/*ce*/ { "", FALSE, NONE, 0, 0 }, +/*cf*/ { "", FALSE, NONE, 0, 0 }, +}; + +struct inst *db_inst_0f[] = { + db_inst_0f0x, + 0, + db_inst_0f2x, + db_inst_0f3x, + 0, + 0, + 0, + 0, + db_inst_0f8x, + db_inst_0f9x, + db_inst_0fax, + db_inst_0fbx, + db_inst_0fcx, + db_inst_0fdx, + 0, + 0 +}; + +char * db_Esc92[] = { + "fnop", "", "", "", "", "", "", "" +}; +char * db_Esc93[] = { + "", "", "", "", "", "", "", "" +}; +char * db_Esc94[] = { + "fchs", "fabs", "", "", "ftst", "fxam", "", "" +}; +char * db_Esc95[] = { + "fld1", "fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","" +}; +char * db_Esc96[] = { + "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp", + "fincstp" +}; +char * db_Esc97[] = { + "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos" +}; + +char * db_Esca4[] = { + "", "fucompp","", "", "", "", "", "" +}; + +char * db_Escb4[] = { + "", "", "fnclex","fninit","", "", "", "" +}; + +char * db_Esce3[] = { + "", "fcompp","", "", "", "", "", "" +}; + +char * db_Escf4[] = { + "fnstsw","", "", "", "", "", "", "" +}; + +struct finst db_Esc8[] = { +/*0*/ { "fadd", SNGL, op2(STI,ST), 0 }, +/*1*/ { "fmul", SNGL, op2(STI,ST), 0 }, +/*2*/ { "fcom", SNGL, op2(STI,ST), 0 }, +/*3*/ { "fcomp", SNGL, op2(STI,ST), 0 }, +/*4*/ { "fsub", SNGL, op2(STI,ST), 0 }, +/*5*/ { "fsubr", SNGL, op2(STI,ST), 0 }, +/*6*/ { "fdiv", SNGL, op2(STI,ST), 0 }, +/*7*/ { "fdivr", SNGL, op2(STI,ST), 0 }, +}; + +struct finst db_Esc9[] = { +/*0*/ { "fld", SNGL, op1(STI), 0 }, +/*1*/ { "", NONE, op1(STI), "fxch" }, +/*2*/ { "fst", SNGL, op1(X), (char *)db_Esc92 }, +/*3*/ { "fstp", SNGL, op1(X), (char *)db_Esc93 }, +/*4*/ { "fldenv", NONE, op1(X), (char *)db_Esc94 }, +/*5*/ { "fldcw", NONE, op1(X), (char *)db_Esc95 }, +/*6*/ { "fnstenv",NONE, op1(X), (char *)db_Esc96 }, +/*7*/ { "fnstcw", NONE, op1(X), (char *)db_Esc97 }, +}; + +struct finst db_Esca[] = { +/*0*/ { "fiadd", WORD, 0, 0 }, +/*1*/ { "fimul", WORD, 0, 0 }, +/*2*/ { "ficom", WORD, 0, 0 }, +/*3*/ { "ficomp", WORD, 0, 0 }, +/*4*/ { "fisub", WORD, op1(X), (char *)db_Esca4 }, +/*5*/ { "fisubr", WORD, 0, 0 }, +/*6*/ { "fidiv", WORD, 0, 0 }, +/*7*/ { "fidivr", WORD, 0, 0 } +}; + +struct finst db_Escb[] = { +/*0*/ { "fild", WORD, 0, 0 }, +/*1*/ { "", NONE, 0, 0 }, +/*2*/ { "fist", WORD, 0, 0 }, +/*3*/ { "fistp", WORD, 0, 0 }, +/*4*/ { "", WORD, op1(X), (char *)db_Escb4 }, +/*5*/ { "fld", EXTR, 0, 0 }, +/*6*/ { "", WORD, 0, 0 }, +/*7*/ { "fstp", EXTR, 0, 0 }, +}; + +struct finst db_Escc[] = { +/*0*/ { "fadd", DBLR, op2(ST,STI), 0 }, +/*1*/ { "fmul", DBLR, op2(ST,STI), 0 }, +/*2*/ { "fcom", DBLR, op2(ST,STI), 0 }, +/*3*/ { "fcomp", DBLR, op2(ST,STI), 0 }, +/*4*/ { "fsub", DBLR, op2(ST,STI), "fsubr" }, +/*5*/ { "fsubr", DBLR, op2(ST,STI), "fsub" }, +/*6*/ { "fdiv", DBLR, op2(ST,STI), "fdivr" }, +/*7*/ { "fdivr", DBLR, op2(ST,STI), "fdiv" }, +}; + +struct finst db_Escd[] = { +/*0*/ { "fld", DBLR, op1(STI), "ffree" }, +/*1*/ { "", NONE, 0, 0 }, +/*2*/ { "fst", DBLR, op1(STI), 0 }, +/*3*/ { "fstp", DBLR, op1(STI), 0 }, +/*4*/ { "frstor", NONE, op1(STI), "fucom" }, +/*5*/ { "", NONE, op1(STI), "fucomp" }, +/*6*/ { "fnsave", NONE, 0, 0 }, +/*7*/ { "fnstsw", NONE, 0, 0 }, +}; + +struct finst db_Esce[] = { +/*0*/ { "fiadd", LONG, op2(ST,STI), "faddp" }, +/*1*/ { "fimul", LONG, op2(ST,STI), "fmulp" }, +/*2*/ { "ficom", LONG, 0, 0 }, +/*3*/ { "ficomp", LONG, op1(X), (char *)db_Esce3 }, +/*4*/ { "fisub", LONG, op2(ST,STI), "fsubrp" }, +/*5*/ { "fisubr", LONG, op2(ST,STI), "fsubp" }, +/*6*/ { "fidiv", LONG, op2(ST,STI), "fdivrp" }, +/*7*/ { "fidivr", LONG, op2(ST,STI), "fdivp" }, +}; + +struct finst db_Escf[] = { +/*0*/ { "fild", LONG, 0, 0 }, +/*1*/ { "", LONG, 0, 0 }, +/*2*/ { "fist", LONG, 0, 0 }, +/*3*/ { "fistp", LONG, 0, 0 }, +/*4*/ { "fbld", NONE, op1(XA), (char *)db_Escf4 }, +/*5*/ { "fld", QUAD, 0, 0 }, +/*6*/ { "fbstp", NONE, 0, 0 }, +/*7*/ { "fstp", QUAD, 0, 0 }, +}; + +struct finst *db_Esc_inst[] = { + db_Esc8, db_Esc9, db_Esca, db_Escb, + db_Escc, db_Escd, db_Esce, db_Escf +}; + +char * db_Grp1[] = { + "add", + "or", + "adc", + "sbb", + "and", + "sub", + "xor", + "cmp" +}; + +char * db_Grp2[] = { + "rol", + "ror", + "rcl", + "rcr", + "shl", + "shr", + "shl", + "sar" +}; + +struct inst db_Grp3[] = { + { "test", TRUE, NONE, op2(I,E), 0 }, + { "test", TRUE, NONE, op2(I,E), 0 }, + { "not", TRUE, NONE, op1(E), 0 }, + { "neg", TRUE, NONE, op1(E), 0 }, + { "mul", TRUE, NONE, op2(E,A), 0 }, + { "imul", TRUE, NONE, op2(E,A), 0 }, + { "div", TRUE, NONE, op2(E,A), 0 }, + { "idiv", TRUE, NONE, op2(E,A), 0 }, +}; + +struct inst db_Grp4[] = { + { "inc", TRUE, BYTE, op1(E), 0 }, + { "dec", TRUE, BYTE, op1(E), 0 }, + { "", TRUE, NONE, 0, 0 }, + { "", TRUE, NONE, 0, 0 }, + { "", TRUE, NONE, 0, 0 }, + { "", TRUE, NONE, 0, 0 }, + { "", TRUE, NONE, 0, 0 }, + { "", TRUE, NONE, 0, 0 } +}; + +struct inst db_Grp5[] = { + { "inc", TRUE, LONG, op1(E), 0 }, + { "dec", TRUE, LONG, op1(E), 0 }, + { "call", TRUE, NONE, op1(Eind),0 }, + { "lcall", TRUE, NONE, op1(Eind),0 }, + { "jmp", TRUE, NONE, op1(Eind),0 }, + { "ljmp", TRUE, NONE, op1(Eind),0 }, + { "push", TRUE, LONG, op1(E), 0 }, + { "", TRUE, NONE, 0, 0 } +}; + +struct inst db_inst_table[256] = { +/*00*/ { "add", TRUE, BYTE, op2(R, E), 0 }, +/*01*/ { "add", TRUE, LONG, op2(R, E), 0 }, +/*02*/ { "add", TRUE, BYTE, op2(E, R), 0 }, +/*03*/ { "add", TRUE, LONG, op2(E, R), 0 }, +/*04*/ { "add", FALSE, BYTE, op2(Is, A), 0 }, +/*05*/ { "add", FALSE, LONG, op2(Is, A), 0 }, +/*06*/ { "push", FALSE, NONE, op1(Si), 0 }, +/*07*/ { "pop", FALSE, NONE, op1(Si), 0 }, + +/*08*/ { "or", TRUE, BYTE, op2(R, E), 0 }, +/*09*/ { "or", TRUE, LONG, op2(R, E), 0 }, +/*0a*/ { "or", TRUE, BYTE, op2(E, R), 0 }, +/*0b*/ { "or", TRUE, LONG, op2(E, R), 0 }, +/*0c*/ { "or", FALSE, BYTE, op2(I, A), 0 }, +/*0d*/ { "or", FALSE, LONG, op2(I, A), 0 }, +/*0e*/ { "push", FALSE, NONE, op1(Si), 0 }, +/*0f*/ { "", FALSE, NONE, 0, 0 }, + +/*10*/ { "adc", TRUE, BYTE, op2(R, E), 0 }, +/*11*/ { "adc", TRUE, LONG, op2(R, E), 0 }, +/*12*/ { "adc", TRUE, BYTE, op2(E, R), 0 }, +/*13*/ { "adc", TRUE, LONG, op2(E, R), 0 }, +/*14*/ { "adc", FALSE, BYTE, op2(Is, A), 0 }, +/*15*/ { "adc", FALSE, LONG, op2(Is, A), 0 }, +/*16*/ { "push", FALSE, NONE, op1(Si), 0 }, +/*17*/ { "pop", FALSE, NONE, op1(Si), 0 }, + +/*18*/ { "sbb", TRUE, BYTE, op2(R, E), 0 }, +/*19*/ { "sbb", TRUE, LONG, op2(R, E), 0 }, +/*1a*/ { "sbb", TRUE, BYTE, op2(E, R), 0 }, +/*1b*/ { "sbb", TRUE, LONG, op2(E, R), 0 }, +/*1c*/ { "sbb", FALSE, BYTE, op2(Is, A), 0 }, +/*1d*/ { "sbb", FALSE, LONG, op2(Is, A), 0 }, +/*1e*/ { "push", FALSE, NONE, op1(Si), 0 }, +/*1f*/ { "pop", FALSE, NONE, op1(Si), 0 }, + +/*20*/ { "and", TRUE, BYTE, op2(R, E), 0 }, +/*21*/ { "and", TRUE, LONG, op2(R, E), 0 }, +/*22*/ { "and", TRUE, BYTE, op2(E, R), 0 }, +/*23*/ { "and", TRUE, LONG, op2(E, R), 0 }, +/*24*/ { "and", FALSE, BYTE, op2(I, A), 0 }, +/*25*/ { "and", FALSE, LONG, op2(I, A), 0 }, +/*26*/ { "", FALSE, NONE, 0, 0 }, +/*27*/ { "aaa", FALSE, NONE, 0, 0 }, + +/*28*/ { "sub", TRUE, BYTE, op2(R, E), 0 }, +/*29*/ { "sub", TRUE, LONG, op2(R, E), 0 }, +/*2a*/ { "sub", TRUE, BYTE, op2(E, R), 0 }, +/*2b*/ { "sub", TRUE, LONG, op2(E, R), 0 }, +/*2c*/ { "sub", FALSE, BYTE, op2(Is, A), 0 }, +/*2d*/ { "sub", FALSE, LONG, op2(Is, A), 0 }, +/*2e*/ { "", FALSE, NONE, 0, 0 }, +/*2f*/ { "das", FALSE, NONE, 0, 0 }, + +/*30*/ { "xor", TRUE, BYTE, op2(R, E), 0 }, +/*31*/ { "xor", TRUE, LONG, op2(R, E), 0 }, +/*32*/ { "xor", TRUE, BYTE, op2(E, R), 0 }, +/*33*/ { "xor", TRUE, LONG, op2(E, R), 0 }, +/*34*/ { "xor", FALSE, BYTE, op2(I, A), 0 }, +/*35*/ { "xor", FALSE, LONG, op2(I, A), 0 }, +/*36*/ { "", FALSE, NONE, 0, 0 }, +/*37*/ { "daa", FALSE, NONE, 0, 0 }, + +/*38*/ { "cmp", TRUE, BYTE, op2(R, E), 0 }, +/*39*/ { "cmp", TRUE, LONG, op2(R, E), 0 }, +/*3a*/ { "cmp", TRUE, BYTE, op2(E, R), 0 }, +/*3b*/ { "cmp", TRUE, LONG, op2(E, R), 0 }, +/*3c*/ { "cmp", FALSE, BYTE, op2(Is, A), 0 }, +/*3d*/ { "cmp", FALSE, LONG, op2(Is, A), 0 }, +/*3e*/ { "", FALSE, NONE, 0, 0 }, +/*3f*/ { "aas", FALSE, NONE, 0, 0 }, + +/*40*/ { "inc", FALSE, LONG, op1(Ri), 0 }, +/*41*/ { "inc", FALSE, LONG, op1(Ri), 0 }, +/*42*/ { "inc", FALSE, LONG, op1(Ri), 0 }, +/*43*/ { "inc", FALSE, LONG, op1(Ri), 0 }, +/*44*/ { "inc", FALSE, LONG, op1(Ri), 0 }, +/*45*/ { "inc", FALSE, LONG, op1(Ri), 0 }, +/*46*/ { "inc", FALSE, LONG, op1(Ri), 0 }, +/*47*/ { "inc", FALSE, LONG, op1(Ri), 0 }, + +/*48*/ { "dec", FALSE, LONG, op1(Ri), 0 }, +/*49*/ { "dec", FALSE, LONG, op1(Ri), 0 }, +/*4a*/ { "dec", FALSE, LONG, op1(Ri), 0 }, +/*4b*/ { "dec", FALSE, LONG, op1(Ri), 0 }, +/*4c*/ { "dec", FALSE, LONG, op1(Ri), 0 }, +/*4d*/ { "dec", FALSE, LONG, op1(Ri), 0 }, +/*4e*/ { "dec", FALSE, LONG, op1(Ri), 0 }, +/*4f*/ { "dec", FALSE, LONG, op1(Ri), 0 }, + +/*50*/ { "push", FALSE, LONG, op1(Ri), 0 }, +/*51*/ { "push", FALSE, LONG, op1(Ri), 0 }, +/*52*/ { "push", FALSE, LONG, op1(Ri), 0 }, +/*53*/ { "push", FALSE, LONG, op1(Ri), 0 }, +/*54*/ { "push", FALSE, LONG, op1(Ri), 0 }, +/*55*/ { "push", FALSE, LONG, op1(Ri), 0 }, +/*56*/ { "push", FALSE, LONG, op1(Ri), 0 }, +/*57*/ { "push", FALSE, LONG, op1(Ri), 0 }, + +/*58*/ { "pop", FALSE, LONG, op1(Ri), 0 }, +/*59*/ { "pop", FALSE, LONG, op1(Ri), 0 }, +/*5a*/ { "pop", FALSE, LONG, op1(Ri), 0 }, +/*5b*/ { "pop", FALSE, LONG, op1(Ri), 0 }, +/*5c*/ { "pop", FALSE, LONG, op1(Ri), 0 }, +/*5d*/ { "pop", FALSE, LONG, op1(Ri), 0 }, +/*5e*/ { "pop", FALSE, LONG, op1(Ri), 0 }, +/*5f*/ { "pop", FALSE, LONG, op1(Ri), 0 }, + +/*60*/ { "pusha", FALSE, LONG, 0, 0 }, +/*61*/ { "popa", FALSE, LONG, 0, 0 }, +/*62*/ { "bound", TRUE, LONG, op2(E, R), 0 }, +/*63*/ { "arpl", TRUE, NONE, op2(Ew,Rw), 0 }, + +/*64*/ { "", FALSE, NONE, 0, 0 }, +/*65*/ { "", FALSE, NONE, 0, 0 }, +/*66*/ { "", FALSE, NONE, 0, 0 }, +/*67*/ { "", FALSE, NONE, 0, 0 }, + +/*68*/ { "push", FALSE, LONG, op1(I), 0 }, +/*69*/ { "imul", TRUE, LONG, op3(I,E,R), 0 }, +/*6a*/ { "push", FALSE, LONG, op1(Ib), 0 }, +/*6b*/ { "imul", TRUE, LONG, op3(Ibs,E,R),0 }, +/*6c*/ { "ins", FALSE, BYTE, op2(DX, DI), 0 }, +/*6d*/ { "ins", FALSE, LONG, op2(DX, DI), 0 }, +/*6e*/ { "outs", FALSE, BYTE, op2(SI, DX), 0 }, +/*6f*/ { "outs", FALSE, LONG, op2(SI, DX), 0 }, + +/*70*/ { "jo", FALSE, NONE, op1(Db), 0 }, +/*71*/ { "jno", FALSE, NONE, op1(Db), 0 }, +/*72*/ { "jb", FALSE, NONE, op1(Db), 0 }, +/*73*/ { "jnb", FALSE, NONE, op1(Db), 0 }, +/*74*/ { "jz", FALSE, NONE, op1(Db), 0 }, +/*75*/ { "jnz", FALSE, NONE, op1(Db), 0 }, +/*76*/ { "jbe", FALSE, NONE, op1(Db), 0 }, +/*77*/ { "jnbe", FALSE, NONE, op1(Db), 0 }, + +/*78*/ { "js", FALSE, NONE, op1(Db), 0 }, +/*79*/ { "jns", FALSE, NONE, op1(Db), 0 }, +/*7a*/ { "jp", FALSE, NONE, op1(Db), 0 }, +/*7b*/ { "jnp", FALSE, NONE, op1(Db), 0 }, +/*7c*/ { "jl", FALSE, NONE, op1(Db), 0 }, +/*7d*/ { "jnl", FALSE, NONE, op1(Db), 0 }, +/*7e*/ { "jle", FALSE, NONE, op1(Db), 0 }, +/*7f*/ { "jnle", FALSE, NONE, op1(Db), 0 }, + +/*80*/ { "", TRUE, BYTE, op2(I, E), (char *)db_Grp1 }, +/*81*/ { "", TRUE, LONG, op2(I, E), (char *)db_Grp1 }, +/*82*/ { "", TRUE, BYTE, op2(Is,E), (char *)db_Grp1 }, +/*83*/ { "", TRUE, LONG, op2(Ibs,E), (char *)db_Grp1 }, +/*84*/ { "test", TRUE, BYTE, op2(R, E), 0 }, +/*85*/ { "test", TRUE, LONG, op2(R, E), 0 }, +/*86*/ { "xchg", TRUE, BYTE, op2(R, E), 0 }, +/*87*/ { "xchg", TRUE, LONG, op2(R, E), 0 }, + +/*88*/ { "mov", TRUE, BYTE, op2(R, E), 0 }, +/*89*/ { "mov", TRUE, LONG, op2(R, E), 0 }, +/*8a*/ { "mov", TRUE, BYTE, op2(E, R), 0 }, +/*8b*/ { "mov", TRUE, LONG, op2(E, R), 0 }, +/*8c*/ { "mov", TRUE, NONE, op2(S, Ew), 0 }, +/*8d*/ { "lea", TRUE, LONG, op2(E, R), 0 }, +/*8e*/ { "mov", TRUE, NONE, op2(Ew, S), 0 }, +/*8f*/ { "pop", TRUE, LONG, op1(E), 0 }, + +/*90*/ { "nop", FALSE, NONE, 0, 0 }, +/*91*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 }, +/*92*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 }, +/*93*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 }, +/*94*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 }, +/*95*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 }, +/*96*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 }, +/*97*/ { "xchg", FALSE, LONG, op2(A, Ri), 0 }, + +/*98*/ { "cbw", FALSE, SDEP, 0, "cwde" }, /* cbw/cwde */ +/*99*/ { "cwd", FALSE, SDEP, 0, "cdq" }, /* cwd/cdq */ +/*9a*/ { "lcall", FALSE, NONE, op1(OS), 0 }, +/*9b*/ { "wait", FALSE, NONE, 0, 0 }, +/*9c*/ { "pushf", FALSE, LONG, 0, 0 }, +/*9d*/ { "popf", FALSE, LONG, 0, 0 }, +/*9e*/ { "sahf", FALSE, NONE, 0, 0 }, +/*9f*/ { "lahf", FALSE, NONE, 0, 0 }, + +/*a0*/ { "mov", FALSE, BYTE, op2(O, A), 0 }, +/*a1*/ { "mov", FALSE, LONG, op2(O, A), 0 }, +/*a2*/ { "mov", FALSE, BYTE, op2(A, O), 0 }, +/*a3*/ { "mov", FALSE, LONG, op2(A, O), 0 }, +/*a4*/ { "movs", FALSE, BYTE, op2(SI,DI), 0 }, +/*a5*/ { "movs", FALSE, LONG, op2(SI,DI), 0 }, +/*a6*/ { "cmps", FALSE, BYTE, op2(SI,DI), 0 }, +/*a7*/ { "cmps", FALSE, LONG, op2(SI,DI), 0 }, + +/*a8*/ { "test", FALSE, BYTE, op2(I, A), 0 }, +/*a9*/ { "test", FALSE, LONG, op2(I, A), 0 }, +/*aa*/ { "stos", FALSE, BYTE, op1(DI), 0 }, +/*ab*/ { "stos", FALSE, LONG, op1(DI), 0 }, +/*ac*/ { "lods", FALSE, BYTE, op1(SI), 0 }, +/*ad*/ { "lods", FALSE, LONG, op1(SI), 0 }, +/*ae*/ { "scas", FALSE, BYTE, op1(SI), 0 }, +/*af*/ { "scas", FALSE, LONG, op1(SI), 0 }, + +/*b0*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, +/*b1*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, +/*b2*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, +/*b3*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, +/*b4*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, +/*b5*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, +/*b6*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, +/*b7*/ { "mov", FALSE, BYTE, op2(I, Ri), 0 }, + +/*b8*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, +/*b9*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, +/*ba*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, +/*bb*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, +/*bc*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, +/*bd*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, +/*be*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, +/*bf*/ { "mov", FALSE, LONG, op2(I, Ri), 0 }, + +/*c0*/ { "", TRUE, BYTE, op2(Ib, E), (char *)db_Grp2 }, +/*c1*/ { "", TRUE, LONG, op2(Ib, E), (char *)db_Grp2 }, +/*c2*/ { "ret", FALSE, NONE, op1(Iw), 0 }, +/*c3*/ { "ret", FALSE, NONE, 0, 0 }, +/*c4*/ { "les", TRUE, LONG, op2(E, R), 0 }, +/*c5*/ { "lds", TRUE, LONG, op2(E, R), 0 }, +/*c6*/ { "mov", TRUE, BYTE, op2(I, E), 0 }, +/*c7*/ { "mov", TRUE, LONG, op2(I, E), 0 }, + +/*c8*/ { "enter", FALSE, NONE, op2(Ib, Iw), 0 }, +/*c9*/ { "leave", FALSE, NONE, 0, 0 }, +/*ca*/ { "lret", FALSE, NONE, op1(Iw), 0 }, +/*cb*/ { "lret", FALSE, NONE, 0, 0 }, +/*cc*/ { "int", FALSE, NONE, op1(o3), 0 }, +/*cd*/ { "int", FALSE, NONE, op1(Ib), 0 }, +/*ce*/ { "into", FALSE, NONE, 0, 0 }, +/*cf*/ { "iret", FALSE, NONE, 0, 0 }, + +/*d0*/ { "", TRUE, BYTE, op2(o1, E), (char *)db_Grp2 }, +/*d1*/ { "", TRUE, LONG, op2(o1, E), (char *)db_Grp2 }, +/*d2*/ { "", TRUE, BYTE, op2(CL, E), (char *)db_Grp2 }, +/*d3*/ { "", TRUE, LONG, op2(CL, E), (char *)db_Grp2 }, +/*d4*/ { "aam", TRUE, NONE, 0, 0 }, +/*d5*/ { "aad", TRUE, NONE, 0, 0 }, +/*d6*/ { "", FALSE, NONE, 0, 0 }, +/*d7*/ { "xlat", FALSE, BYTE, op1(BX), 0 }, + +/*d8*/ { "", TRUE, NONE, 0, (char *)db_Esc8 }, +/*d9*/ { "", TRUE, NONE, 0, (char *)db_Esc9 }, +/*da*/ { "", TRUE, NONE, 0, (char *)db_Esca }, +/*db*/ { "", TRUE, NONE, 0, (char *)db_Escb }, +/*dc*/ { "", TRUE, NONE, 0, (char *)db_Escc }, +/*dd*/ { "", TRUE, NONE, 0, (char *)db_Escd }, +/*de*/ { "", TRUE, NONE, 0, (char *)db_Esce }, +/*df*/ { "", TRUE, NONE, 0, (char *)db_Escf }, + +/*e0*/ { "loopne",FALSE, NONE, op1(Db), 0 }, +/*e1*/ { "loope", FALSE, NONE, op1(Db), 0 }, +/*e2*/ { "loop", FALSE, NONE, op1(Db), 0 }, +/*e3*/ { "jcxz", FALSE, SDEP, op1(Db), "jecxz" }, +/*e4*/ { "in", FALSE, BYTE, op2(Ib, A), 0 }, +/*e5*/ { "in", FALSE, LONG, op2(Ib, A) , 0 }, +/*e6*/ { "out", FALSE, BYTE, op2(A, Ib), 0 }, +/*e7*/ { "out", FALSE, LONG, op2(A, Ib) , 0 }, + +/*e8*/ { "call", FALSE, NONE, op1(Dl), 0 }, +/*e9*/ { "jmp", FALSE, NONE, op1(Dl), 0 }, +/*ea*/ { "ljmp", FALSE, NONE, op1(OS), 0 }, +/*eb*/ { "jmp", FALSE, NONE, op1(Db), 0 }, +/*ec*/ { "in", FALSE, BYTE, op2(DX, A), 0 }, +/*ed*/ { "in", FALSE, LONG, op2(DX, A) , 0 }, +/*ee*/ { "out", FALSE, BYTE, op2(A, DX), 0 }, +/*ef*/ { "out", FALSE, LONG, op2(A, DX) , 0 }, + +/*f0*/ { "", FALSE, NONE, 0, 0 }, +/*f1*/ { "", FALSE, NONE, 0, 0 }, +/*f2*/ { "", FALSE, NONE, 0, 0 }, +/*f3*/ { "", FALSE, NONE, 0, 0 }, +/*f4*/ { "hlt", FALSE, NONE, 0, 0 }, +/*f5*/ { "cmc", FALSE, NONE, 0, 0 }, +/*f6*/ { "", TRUE, BYTE, 0, (char *)db_Grp3 }, +/*f7*/ { "", TRUE, LONG, 0, (char *)db_Grp3 }, + +/*f8*/ { "clc", FALSE, NONE, 0, 0 }, +/*f9*/ { "stc", FALSE, NONE, 0, 0 }, +/*fa*/ { "cli", FALSE, NONE, 0, 0 }, +/*fb*/ { "sti", FALSE, NONE, 0, 0 }, +/*fc*/ { "cld", FALSE, NONE, 0, 0 }, +/*fd*/ { "std", FALSE, NONE, 0, 0 }, +/*fe*/ { "", TRUE, NONE, 0, (char *)db_Grp4 }, +/*ff*/ { "", TRUE, NONE, 0, (char *)db_Grp5 }, +}; + +struct inst db_bad_inst = + { "???", FALSE, NONE, 0, 0 } +; + +#define f_mod(byte) ((byte)>>6) +#define f_reg(byte) (((byte)>>3)&0x7) +#define f_rm(byte) ((byte)&0x7) + +#define sib_ss(byte) ((byte)>>6) +#define sib_index(byte) (((byte)>>3)&0x7) +#define sib_base(byte) ((byte)&0x7) + +char * db_index_reg_16[8] = { + "%bx,%si", + "%bx,%di", + "%bp,%si", + "%bp,%di", + "%si", + "%di", + "%bp", + "%bx" +}; + +char * db_reg[3][8] = { + "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh", + "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di", + "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi" +}; + +char * db_seg_reg[8] = { + "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "", "" +}; + +/* + * lengths for size attributes + */ +int db_lengths[] = { + 1, /* BYTE */ + 2, /* WORD */ + 4, /* LONG */ + 8, /* QUAD */ + 4, /* SNGL */ + 8, /* DBLR */ + 10, /* EXTR */ +}; + +#define get_value_inc(result, loc, size, is_signed, task) \ + result = db_get_task_value((loc), (size), (is_signed), (task)); \ + (loc) += (size); + +/* + * Read address at location and return updated location. + */ +db_addr_t +db_read_address( + db_addr_t loc, + int short_addr, + int regmodrm, + struct i_addr *addrp, /* out */ + task_t task) +{ + int mod, rm, sib, index, ss, disp; + + mod = f_mod(regmodrm); + rm = f_rm(regmodrm); + + if (mod == 3) { + addrp->is_reg = TRUE; + addrp->disp = rm; + return (loc); + } + addrp->is_reg = FALSE; + addrp->index = 0; + + if (short_addr) { + addrp->index = 0; + addrp->ss = 0; + switch (mod) { + case 0: + if (rm == 6) { + get_value_inc(disp, loc, 2, TRUE, task); + addrp->disp = disp; + addrp->base = 0; + } + else { + addrp->disp = 0; + addrp->base = db_index_reg_16[rm]; + } + break; + case 1: + get_value_inc(disp, loc, 1, TRUE, task); + addrp->disp = disp; + addrp->base = db_index_reg_16[rm]; + break; + case 2: + get_value_inc(disp, loc, 2, TRUE, task); + addrp->disp = disp; + addrp->base = db_index_reg_16[rm]; + break; + } + } + else { + if (mod != 3 && rm == 4) { + get_value_inc(sib, loc, 1, FALSE, task); + rm = sib_base(sib); + index = sib_index(sib); + if (index != 4) + addrp->index = db_reg[LONG][index]; + addrp->ss = sib_ss(sib); + } + + switch (mod) { + case 0: + if (rm == 5) { + get_value_inc(addrp->disp, loc, 4, FALSE, task); + addrp->base = 0; + } + else { + addrp->disp = 0; + addrp->base = db_reg[LONG][rm]; + } + break; + + case 1: + get_value_inc(disp, loc, 1, TRUE, task); + addrp->disp = disp; + addrp->base = db_reg[LONG][rm]; + break; + + case 2: + get_value_inc(disp, loc, 4, FALSE, task); + addrp->disp = disp; + addrp->base = db_reg[LONG][rm]; + break; + } + } + return (loc); +} + +void +db_print_address( + char * seg, + int size, + struct i_addr *addrp, + task_t task) +{ + if (addrp->is_reg) { + db_printf("%s", db_reg[size][addrp->disp]); + return; + } + + if (seg) { + db_printf("%s:", seg); + } + + if (addrp->base != 0 || addrp->index != 0) { + db_printf("%#n", addrp->disp); + db_printf("("); + if (addrp->base) + db_printf("%s", addrp->base); + if (addrp->index) + db_printf(",%s,%d", addrp->index, 1<ss); + db_printf(")"); + } else + db_task_printsym((db_addr_t)addrp->disp, DB_STGY_ANY, task); +} + +/* + * Disassemble floating-point ("escape") instruction + * and return updated location. + */ +db_addr_t +db_disasm_esc( + db_addr_t loc, + int inst, + int short_addr, + int size, + char * seg, + task_t task) +{ + int regmodrm; + struct finst *fp; + int mod; + struct i_addr address; + char * name; + + get_value_inc(regmodrm, loc, 1, FALSE, task); + fp = &db_Esc_inst[inst - 0xd8][f_reg(regmodrm)]; + mod = f_mod(regmodrm); + if (mod != 3) { + /* + * Normal address modes. + */ + loc = db_read_address(loc, short_addr, regmodrm, &address, task); + db_printf(fp->f_name); + switch(fp->f_size) { + case SNGL: + db_printf("s"); + break; + case DBLR: + db_printf("l"); + break; + case EXTR: + db_printf("t"); + break; + case WORD: + db_printf("s"); + break; + case LONG: + db_printf("l"); + break; + case QUAD: + db_printf("q"); + break; + default: + break; + } + db_printf("\t"); + db_print_address(seg, BYTE, &address, task); + } + else { + /* + * 'reg-reg' - special formats + */ + switch (fp->f_rrmode) { + case op2(ST,STI): + name = (fp->f_rrname) ? fp->f_rrname : fp->f_name; + db_printf("%s\t%%st,%%st(%d)",name,f_rm(regmodrm)); + break; + case op2(STI,ST): + name = (fp->f_rrname) ? fp->f_rrname : fp->f_name; + db_printf("%s\t%%st(%d),%%st",name, f_rm(regmodrm)); + break; + case op1(STI): + name = (fp->f_rrname) ? fp->f_rrname : fp->f_name; + db_printf("%s\t%%st(%d)",name, f_rm(regmodrm)); + break; + case op1(X): + db_printf("%s", ((char **)fp->f_rrname)[f_rm(regmodrm)]); + break; + case op1(XA): + db_printf("%s\t%%ax", + ((char **)fp->f_rrname)[f_rm(regmodrm)]); + break; + default: + db_printf(""); + break; + } + } + + return (loc); +} + +/* + * Disassemble instruction at 'loc'. 'altfmt' specifies an + * (optional) alternate format. Return address of start of + * next instruction. + */ +db_addr_t +db_disasm( + db_addr_t loc, + boolean_t altfmt, + task_t task) +{ + int inst; + int size; + int short_addr; + char * seg; + struct inst * ip; + char * i_name; + int i_size; + int i_mode; + int regmodrm; + boolean_t first; + int displ; + int prefix; + int imm; + int imm2; + int len; + struct i_addr address; + char *filename; + int linenum; + + get_value_inc(inst, loc, 1, FALSE, task); + if (db_disasm_16) { + short_addr = TRUE; + size = WORD; + } + else { + short_addr = FALSE; + size = LONG; + } + seg = 0; + + /* + * Get prefixes + */ + prefix = TRUE; + do { + switch (inst) { + case 0x66: /* data16 */ + if (size == LONG) + size = WORD; + else + size = LONG; + break; + case 0x67: + short_addr = !short_addr; + break; + case 0x26: + seg = "%es"; + break; + case 0x36: + seg = "%ss"; + break; + case 0x2e: + seg = "%cs"; + break; + case 0x3e: + seg = "%ds"; + break; + case 0x64: + seg = "%fs"; + break; + case 0x65: + seg = "%gs"; + break; + case 0xf0: + db_printf("lock "); + break; + case 0xf2: + db_printf("repne "); + break; + case 0xf3: + db_printf("repe "); /* XXX repe VS rep */ + break; + default: + prefix = FALSE; + break; + } + if (prefix) { + get_value_inc(inst, loc, 1, FALSE, task); + } + } while (prefix); + + if (inst >= 0xd8 && inst <= 0xdf) { + loc = db_disasm_esc(loc, inst, short_addr, size, seg, task); + db_printf("\n"); + return (loc); + } + + if (inst == 0x0f) { + get_value_inc(inst, loc, 1, FALSE, task); + ip = db_inst_0f[inst>>4]; + if (ip == 0) { + ip = &db_bad_inst; + } + else { + ip = &ip[inst&0xf]; + } + } + else + ip = &db_inst_table[inst]; + + if (ip->i_has_modrm) { + get_value_inc(regmodrm, loc, 1, FALSE, task); + loc = db_read_address(loc, short_addr, regmodrm, &address, task); + } + + i_name = ip->i_name; + i_size = ip->i_size; + i_mode = ip->i_mode; + + if (ip->i_extra == (char *)db_Grp1 || + ip->i_extra == (char *)db_Grp2 || + ip->i_extra == (char *)db_Grp6 || + ip->i_extra == (char *)db_Grp7 || + ip->i_extra == (char *)db_Grp8) { + i_name = ((char **)ip->i_extra)[f_reg(regmodrm)]; + } + else if (ip->i_extra == (char *)db_Grp3) { + ip = (struct inst *)ip->i_extra; + ip = &ip[f_reg(regmodrm)]; + i_name = ip->i_name; + i_mode = ip->i_mode; + } + else if (ip->i_extra == (char *)db_Grp4 || + ip->i_extra == (char *)db_Grp5) { + ip = (struct inst *)ip->i_extra; + ip = &ip[f_reg(regmodrm)]; + i_name = ip->i_name; + i_mode = ip->i_mode; + i_size = ip->i_size; + } + + if (i_size == SDEP) { + if (size == WORD) + db_printf(i_name); + else + db_printf(ip->i_extra); + } + else { + db_printf(i_name); + if (i_size != NONE) { + if (i_size == BYTE) { + db_printf("b"); + size = BYTE; + } + else if (i_size == WORD) { + db_printf("w"); + size = WORD; + } + else if (size == WORD) + db_printf("w"); + else + db_printf("l"); + } + } + db_printf("\t"); + for (first = TRUE; + i_mode != 0; + i_mode >>= 8, first = FALSE) + { + if (!first) + db_printf(","); + + switch (i_mode & 0xFF) { + + case E: + db_print_address(seg, size, &address, task); + break; + + case Eind: + db_printf("*"); + db_print_address(seg, size, &address, task); + break; + + case Ew: + db_print_address(seg, WORD, &address, task); + break; + + case Eb: + db_print_address(seg, BYTE, &address, task); + break; + + case R: + db_printf("%s", db_reg[size][f_reg(regmodrm)]); + break; + + case Rw: + db_printf("%s", db_reg[WORD][f_reg(regmodrm)]); + break; + + case Ri: + db_printf("%s", db_reg[size][f_rm(inst)]); + break; + + case S: + db_printf("%s", db_seg_reg[f_reg(regmodrm)]); + break; + + case Si: + db_printf("%s", db_seg_reg[f_reg(inst)]); + break; + + case A: + db_printf("%s", db_reg[size][0]); /* acc */ + break; + + case BX: + if (seg) + db_printf("%s:", seg); + db_printf("(%s)", short_addr ? "%bx" : "%ebx"); + break; + + case CL: + db_printf("%%cl"); + break; + + case DX: + db_printf("%%dx"); + break; + + case SI: + if (seg) + db_printf("%s:", seg); + db_printf("(%s)", short_addr ? "%si" : "%esi"); + break; + + case DI: + db_printf("%%es:(%s)", short_addr ? "%di" : "%edi"); + break; + + case CR: + db_printf("%%cr%d", f_reg(regmodrm)); + break; + + case DR: + db_printf("%%dr%d", f_reg(regmodrm)); + break; + + case TR: + db_printf("%%tr%d", f_reg(regmodrm)); + break; + + case I: + len = db_lengths[size]; + get_value_inc(imm, loc, len, FALSE, task);/* unsigned */ + db_printf("$%#n", imm); + break; + + case Is: + len = db_lengths[size]; + get_value_inc(imm, loc, len, TRUE, task); /* signed */ + db_printf("$%#r", imm); + break; + + case Ib: + get_value_inc(imm, loc, 1, FALSE, task); /* unsigned */ + db_printf("$%#n", imm); + break; + + case Ibs: + get_value_inc(imm, loc, 1, TRUE, task); /* signed */ + db_printf("$%#r", imm); + break; + + case Iw: + get_value_inc(imm, loc, 2, FALSE, task); /* unsigned */ + db_printf("$%#n", imm); + break; + + case Il: + get_value_inc(imm, loc, 4, FALSE, task); + db_printf("$%#n", imm); + break; + + case O: + if (short_addr) { + get_value_inc(displ, loc, 2, TRUE, task); + } + else { + get_value_inc(displ, loc, 4, TRUE, task); + } + if (seg) + db_printf("%s:%#r",seg, displ); + else + db_task_printsym((db_addr_t)displ, DB_STGY_ANY, task); + break; + + case Db: + get_value_inc(displ, loc, 1, TRUE, task); + if (short_addr) { + /* offset only affects low 16 bits */ + displ = (loc & 0xffff0000) + | ((loc + displ) & 0xffff); + } + else + displ = displ + loc; + db_task_printsym((db_addr_t)displ,DB_STGY_ANY,task); + if (db_line_at_pc(0, &filename, &linenum, displ)) { + db_printf(" [%s", filename); + if (linenum > 0) + db_printf(":%d", linenum); + db_printf("]"); + } + break; + + case Dl: + if (short_addr) { + get_value_inc(displ, loc, 2, TRUE, task); + /* offset only affects low 16 bits */ + displ = (loc & 0xffff0000) + | ((loc + displ) & 0xffff); + } + else { + get_value_inc(displ, loc, 4, TRUE, task); + displ = displ + loc; + } + db_task_printsym((db_addr_t)displ, DB_STGY_ANY, task); + if (db_line_at_pc(0, &filename, &linenum, displ)) { + db_printf(" [%s", filename); + if (linenum > 0) + db_printf(":%d", linenum); + db_printf("]"); + } + break; + + case o1: + db_printf("$1"); + break; + + case o3: + db_printf("$3"); + break; + + case OS: + if (short_addr) { + get_value_inc(imm, loc, 2, FALSE, task); /* offset */ + } + else { + get_value_inc(imm, loc, 4, FALSE, task); /* offset */ + } + get_value_inc(imm2, loc, 2, FALSE, task); /* segment */ + db_printf("$%#n,%#n", imm2, imm); + break; + } + } + + if (altfmt == 0 && !db_disasm_16) { + if (inst == 0xe9 || inst == 0xeb) { /* jmp, Dl or Db */ + /* + * GAS pads to longword boundary after unconditional jumps. + */ + while (loc & (4-1)) { + get_value_inc(inst, loc, 0, FALSE, task); + if (inst != 0x90) /* nop */ + break; + loc++; + } + } + } + db_printf("\n"); + return (loc); +} + +/* + * Classify instructions by whether they read or write memory. + */ + +#define DBLS_LOAD 0x01 /* instruction reads from memory */ +#define DBLS_STORE 0x02 /* instruction writes to memory */ + +#define DBLS_MODRM 0x10 /* instruction uses mod r/m byte */ +#define DBLS_SECOND 0x20 /* instruction does two operations */ +#define DBLS_ESCAPE 0x40 /* escape to two-byte opcodes */ +#define DBLS_SWREG 0x80 /* need to switch on reg bits of mod r/m */ + +#define DBLS_MODS 0xf0 +#define DBLS_LMASK (DBLS_MODS|DBLS_LOAD) +#define DBLS_SMASK (DBLS_MODS|DBLS_STORE) + +char db_ldstrtab[] = { + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x02, 0x01, + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x02, 0x40, + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x02, 0x01, + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x02, 0x01, + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x01, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x11, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x12, 0x12, 0x00, 0x12, 0x11, 0x11, 0x13, 0x13, + 0x12, 0x12, 0x11, 0x11, 0x12, 0x00, 0x11, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x00, 0x02, 0x01, 0x00, 0x00, + 0x01, 0x01, 0x02, 0x02, 0x03, 0x03, 0x21, 0x21, + 0x00, 0x00, 0x02, 0x02, 0x01, 0x01, 0x01, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x13, 0x13, 0x00, 0x00, 0x01, 0x01, 0x12, 0x12, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x13, 0x13, 0x13, 0x13, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x13, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x13, +}; + +unsigned char db_ldstrtab0f[] = { + 0x80, 0x80, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, + 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, 0x12, + 0x02, 0x01, 0x00, 0x11, 0x13, 0x13, 0x00, 0x00, + 0x02, 0x01, 0x12, 0x13, 0x13, 0x13, 0x00, 0x11, + 0x00, 0x00, 0x01, 0x13, 0x01, 0x01, 0x11, 0x11, + 0x00, 0x00, 0x80, 0x13, 0x13, 0x13, 0x11, 0x11, + + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +int db_inst_swreg(boolean_t, unsigned long, unsigned char); + +/* + * Given four bytes of instruction (stored as an int, not an + * array of characters), compute if the instruction reads + * memory. + */ +int +db_inst_load( + unsigned long insw) +{ + unsigned char insb, bits; + + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab[insb]; + if (!(bits & DBLS_LOAD)) + return (0); + while (1) { + switch (bits & DBLS_MODS) { + case 0: + return (1); + case DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0); + case DBLS_SECOND|DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0 ? 2 : 0); + case DBLS_SECOND: + return (2); + case DBLS_ESCAPE: + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab0f[insb]; + break; + case DBLS_SWREG: + return (db_inst_swreg(TRUE, insw, insb)); + default: + panic ("db_inst_load: unknown mod bits"); + } + } +} + +/* + * Given four bytes of instruction (stored as an int, not an + * array of characters), compute if the instruction writes + * memory. + */ +int +db_inst_store( + unsigned long insw) +{ + unsigned char insb, bits; + + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab[insb]; + if (!(bits & DBLS_STORE)) + return (0); + while (1) { + switch (bits & DBLS_MODS) { + case 0: + return (1); + case DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0); + case DBLS_SECOND|DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0 ? 2 : 0); + case DBLS_SECOND: + return (2); + case DBLS_ESCAPE: + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab0f[insb]; + break; + case DBLS_SWREG: + return (db_inst_swreg(FALSE, insw, insb)); + default: + panic ("db_inst_store: unknown mod bits"); + } + } +} + +/* + * Parse a mod r/m byte to see if extended opcode reads + * or writes memory. + */ +int +db_inst_swreg( + boolean_t isload, + unsigned long insw, + unsigned char insb) +{ + unsigned char modrm = insw & 0xff; + + switch (insb) { + case 0x00: + switch (modrm & 0x38) { + case 0x00: + case 0x08: + case 0x10: + case 0x18: + return ((modrm & 0xc0) != 0xc0); + } + break; + case 0x01: + switch (modrm & 0x38) { + case 0x00: + case 0x08: + case 0x10: + case 0x18: + return ((modrm & 0xc0) != 0xc0 ? 2 : 0); + case 0x20: + case 0x30: + return ((modrm & 0xc0) != 0xc0); + } + break; + case 0xba: + if (isload) + return ((modrm & 0xc0) != 0xc0); + switch (modrm & 0x38) { + case 0x28: + case 0x30: + case 0x38: + return ((modrm & 0xc0) != 0xc0); + } + break; + } + return (0); +} diff --git a/osfmk/i386/db_gcc_aout.c b/osfmk/i386/db_gcc_aout.c new file mode 100644 index 000000000..15fd0003d --- /dev/null +++ b/osfmk/i386/db_gcc_aout.c @@ -0,0 +1,681 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * COPYRIGHT NOTICE + * + * Copyright (c) 1990, 1991, 1992, 1993 Open Software Foundation, Inc. + * + * Permission is hereby granted to use, copy, modify and freely distribute + * the software in this file and its documentation for any purpose without + * fee, provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. Further, provided that the name of Open + * Software Foundation, Inc. ("OSF") not be used in advertising or + * publicity pertaining to distribution of the software without prior + * written permission from OSF. OSF makes no representations about the + * suitability of this software for any purpose. It is provided "as is" + * without express or implied warranty. + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:36 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:37 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.2.3 1994/01/28 17:23:00 chasb + * Expand Copyrights + * [1994/01/27 19:40:16 chasb] + * + * Revision 1.2.2.2 1993/06/09 02:27:36 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:04:03 jeffc] + * + * Revision 1.2 1993/04/19 16:13:10 devrcs + * pick up file_io.h from bootstrap directory + * [1993/02/27 15:01:09 david] + * + * Added new arguments and a missing one to db_add_symbol_table + * [barbou@gr.osf.org] + * [92/12/03 bernadat] + * + * Added gcc symbol table handling based on db_aout.c (Revsion 2.4) + * [91/07/31 tak] + * + * Revision 1.1 1992/09/30 02:02:23 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.1 91/07/31 13:13:51 jeffreyh + * Created. + * + * 31-Jul-91 Jeffrey Heller (tak) at Open Software Foundation + * Added gcc symbol table handling based on db_aout.c (Revsion 2.4) + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Symbol table routines for a.out format files. + */ + +#include +#include /* data types */ +#include + +#ifdef DB_GCC_AOUT + +#include /* a.out symbol table */ +#include + +/* + * An a.out symbol table as loaded into the kernel debugger: + * + * symtab -> size of symbol entries, in bytes + * sp -> first symbol entry + * ... + * ep -> last symbol entry + 1 + * strtab == start of string table + * size of string table in bytes, + * including this word + * -> strings + */ + +/* + * Find pointers to the start and end of the symbol entries, + * given a pointer to the start of the symbol table. + */ +#define db_get_aout_symtab(symtab, sp, ep) \ + (sp = (struct nlist *)((symtab) + 1), \ + ep = (struct nlist *)((char *)sp + *(symtab))) + +X_db_sym_init(symtab, esymtab, name) + int * symtab; /* pointer to start of symbol table */ + char * esymtab; /* pointer to end of string table, + for checking - rounded up to integer + boundary */ + char * name; +{ + register struct nlist *sym_start, *sym_end; + register struct nlist *sp; + register char * strtab; + register int strlen; + + db_get_aout_symtab(symtab, sym_start, sym_end); + + strtab = (char *)sym_end; + strlen = *(int *)strtab; + + if (strtab + ((strlen + sizeof(int) - 1) & ~(sizeof(int)-1)) + != esymtab) + { + db_printf("[ %s symbol table not valid ]\n", name); + return; + } + + db_printf("[ preserving %#x bytes of %s symbol table ]\n", + esymtab - (char *)symtab, name); + + for (sp = sym_start; sp < sym_end; sp++) { + register int strx; + strx = sp->n_un.n_strx; + if (strx != 0) { + if (strx > strlen) { + db_printf("Bad string table index (%#x)\n", strx); + sp->n_un.n_name = 0; + continue; + } + sp->n_un.n_name = strtab + strx; + } + } + + db_add_symbol_table(sym_start, sym_end, name, (char *)symtab, + 0, 0, 0, FALSE); +} + +/* + * check file name or not (check xxxx.x pattern) + */ +boolean_t +X_db_is_filename(name) + register char *name; +{ + while (*name) { + if (*name == '.') { + if (name[1]) + return(TRUE); + } + name++; + } + return(FALSE); +} + +/* + * special name comparison routine with a name in the symbol table entry + */ +boolean_t +X_db_eq_name(sp, name) + struct nlist *sp; + char *name; +{ + register char *s1, *s2; + + s1 = sp->n_un.n_name; + s2 = name; + if (*s1 == '_' && *s2 && *s2 != '_') + s1++; + while (*s2) { + if (*s1++ != *s2++) { + /* + * check .c .o file name comparison case + */ + if (*s2 == 0 && sp->n_un.n_name <= s1 - 2 + && s1[-2] == '.' && s1[-1] == 'o') + return(TRUE); + return(FALSE); + } + } + /* + * do special check for + * xxx:yyy for N_FUN + * xxx.ttt for N_DATA and N_BSS + */ + return(*s1 == 0 || (*s1 == ':' && sp->n_type == N_FUN) || + (*s1 == '.' && (sp->n_type == N_DATA || sp->n_type == N_BSS))); +} + +/* + * search a symbol table with name and type + * fp(in,out): last found text file name symbol entry + */ +struct nlist * +X_db_search_name(sp, ep, name, type, fp) + register struct nlist *sp; + struct nlist *ep; + char *name; + int type; + struct nlist **fp; +{ + struct nlist *file_sp = *fp; + struct nlist *found_sp = 0; + + for ( ; sp < ep; sp++) { + if (sp->n_type == N_TEXT && X_db_is_filename(sp->n_un.n_name)) + *fp = sp; + if (type) { + if (sp->n_type == type) { + if (X_db_eq_name(sp, name)) + return(sp); + } + if (sp->n_type == N_SO) + *fp = sp; + continue; + } + if (sp->n_type & N_STAB) + continue; + if (sp->n_un.n_name && X_db_eq_name(sp, name)) { + /* + * In case of qaulified search by a file, + * return it immediately with some check. + * Otherwise, search external one + */ + if (file_sp) { + if ((file_sp == *fp) || (sp->n_type & N_EXT)) + return(sp); + } else if (sp->n_type & N_EXT) + return(sp); + else + found_sp = sp; + } + } + return(found_sp); +} + +/* + * search a symbol with file, func and line qualification + */ +struct nlist * +X_db_qualified_search(stab, file, sym, line) + db_symtab_t *stab; + char *file; + char *sym; + int line; +{ + register struct nlist *sp = (struct nlist *)stab->start; + struct nlist *ep = (struct nlist *)stab->end; + struct nlist *fp = 0; + struct nlist *found_sp; + unsigned func_top; + boolean_t in_file; + + if (file == 0 && sym == 0) + return(0); + if (file) { + if ((sp = X_db_search_name(sp, ep, file, N_TEXT, &fp)) == 0) + return(0); + } + if (sym) { + sp = X_db_search_name(sp, ep, sym, (line > 0)? N_FUN: 0, &fp); + if (sp == 0) + return(0); + } + if (line > 0) { + if (file && !X_db_eq_name(fp, file)) + return(0); + found_sp = 0; + if (sp->n_type == N_FUN) { + /* + * qualfied by function name + * search backward because line number entries + * for the function are above it in this case. + */ + func_top = sp->n_value; + for (sp--; sp >= (struct nlist *)stab->start; sp--) { + if (sp->n_type != N_SLINE) + continue; + if (sp->n_value < func_top) + break; + if (sp->n_desc <= line) { + if (found_sp == 0 || found_sp->n_desc < sp->n_desc) + found_sp = sp; + if (sp->n_desc == line) + break; + } + } + if (sp->n_type != N_SLINE || sp->n_value < func_top) + return(0); + } else { + /* + * qualified by only file name + * search forward in this case + */ + in_file = TRUE; + for (sp++; sp < ep; sp++) { + if (sp->n_type == N_TEXT + && X_db_is_filename(sp->n_un.n_name)) + break; /* enter into another file */ + if (sp->n_type == N_SOL) { + in_file = X_db_eq_name(sp, file); + continue; + } + if (!in_file || sp->n_type != N_SLINE) + continue; + if (sp->n_desc <= line) { + if (found_sp == 0 || found_sp->n_desc < sp->n_desc) + found_sp = sp; + if (sp->n_desc == line) + break; + } + } + } + sp = found_sp; + } + return(sp); +} + +/* + * lookup symbol by name + */ +db_sym_t +X_db_lookup(stab, symstr) + db_symtab_t *stab; + char * symstr; +{ + register char *p; + register n; + int n_name; + int line_number; + char *file_name = 0; + char *sym_name = 0; + char *component[3]; + struct nlist *found = 0; + + /* + * disassemble component: [file_name:]symbol[:line_nubmer] + */ + component[0] = symstr; + component[1] = component[2] = 0; + for (p = symstr, n = 1; *p; p++) { + if (*p == ':') { + if (n >= 3) + break; + *p = 0; + component[n++] = p+1; + } + } + if (*p != 0) + goto out; + line_number = 0; + n_name = n; + p = component[n-1]; + if (*p >= '0' && *p <= '9') { + if (n == 1) + goto out; + for (line_number = 0; *p; p++) { + if (*p < '0' || *p > '9') + goto out; + line_number = line_number*10 + *p - '0'; + } + n_name--; + } else if (n >= 3) + goto out; + if (n_name == 1) { + if (X_db_is_filename(component[0])) { + file_name = component[0]; + sym_name = 0; + } else { + file_name = 0; + sym_name = component[0]; + } + } else { + file_name = component[0]; + sym_name = component[1]; + } + found = X_db_qualified_search(stab, file_name, sym_name, line_number); + +out: + while (--n > 1) + component[n][-1] = ':'; + return((db_sym_t) found); +} + +db_sym_t +X_db_search_symbol(symtab, off, strategy, diffp) + db_symtab_t * symtab; + register + db_addr_t off; + db_strategy_t strategy; + db_expr_t *diffp; /* in/out */ +{ + register unsigned int diff = *diffp; + register struct nlist *symp = 0; + register struct nlist *sp, *ep; + + sp = (struct nlist *)symtab->start; + ep = (struct nlist *)symtab->end; + + for (; sp < ep; sp++) { + if (sp->n_un.n_name == 0) + continue; + if ((sp->n_type & N_STAB) != 0) + continue; + if (off >= sp->n_value) { + if (off - sp->n_value < diff) { + diff = off - sp->n_value; + symp = sp; + if (diff == 0 && (sp->n_type & N_EXT)) + break; + } + else if (off - sp->n_value == diff) { + if (symp == 0) + symp = sp; + else if ((symp->n_type & N_EXT) == 0 && + (sp->n_type & N_EXT) != 0) + symp = sp; /* pick the external symbol */ + } + } + } + if (symp == 0) { + *diffp = off; + } + else { + *diffp = diff; + } + return ((db_sym_t)symp); +} + +/* + * Return the name and value for a symbol. + */ +void +X_db_symbol_values(sym, namep, valuep) + db_sym_t sym; + char **namep; + db_expr_t *valuep; +{ + register struct nlist *sp; + + sp = (struct nlist *)sym; + if (namep) + *namep = sp->n_un.n_name; + if (valuep) + *valuep = sp->n_value; +} + +#define X_DB_MAX_DIFF 8 /* maximum allowable diff at the end of line */ + +/* + * search symbol by value + */ +X_db_search_by_addr(stab, addr, file, func, line, diff) + db_symtab_t *stab; + register unsigned addr; + char **file; + char **func; + int *line; + unsigned *diff; +{ + register struct nlist *sp; + register struct nlist *line_sp, *func_sp, *file_sp, *line_func; + register func_diff, line_diff; + boolean_t found_line = FALSE; + struct nlist *ep = (struct nlist *)stab->end; + + line_sp = func_sp = file_sp = line_func = 0; + *file = *func = 0; + *line = 0; + for (sp = (struct nlist *)stab->start; sp < ep; sp++) { + switch(sp->n_type) { + case N_SLINE: + if (sp->n_value <= addr) { + if (line_sp == 0 || line_diff >= addr - sp->n_value) { + if (line_func) + line_func = 0; + line_sp = sp; + line_diff = addr - sp->n_value; + } + } + if (sp->n_value >= addr && line_sp) + found_line = TRUE; + continue; + case N_FUN: + if ((found_line || (line_sp && line_diff < X_DB_MAX_DIFF)) + && line_func == 0) + line_func = sp; + continue; + case N_TEXT: + if (X_db_is_filename(sp->n_un.n_name)) { + if (sp->n_value > addr) + continue; + if (file_sp == 0 || file_sp->n_value < sp->n_value) + file_sp = sp; + } else if (sp->n_value <= addr && + (func_sp == 0 || func_diff > addr - sp->n_value)) { + func_sp = sp; + func_diff = addr - sp->n_value; + } + continue; + case N_TEXT|N_EXT: + if (sp->n_value <= addr && + (func_sp == 0 || func_diff >= addr - sp->n_value)) { + func_sp = sp; + func_diff = addr - sp->n_value; + if (func_diff == 0 && file_sp && func_sp) + break; + } + default: + continue; + } + break; + } + if (line_sp) { + if (line_func == 0 || func_sp == 0 + || line_func->n_value != func_sp->n_value) + line_sp = 0; + } + if (file_sp) { + *diff = addr - file_sp->n_value; + *file = file_sp->n_un.n_name; + } + if (func_sp) { + *diff = addr - func_sp->n_value; + *func = (func_sp->n_un.n_name[0] == '_')? + func_sp->n_un.n_name + 1: func_sp->n_un.n_name; + } + if (line_sp) { + *diff = addr - line_sp->n_value; + *line = line_sp->n_desc; + } + return(file_sp || func_sp || line_sp); +} + +/* ARGSUSED */ +boolean_t +X_db_line_at_pc(stab, sym, file, line, pc) + db_symtab_t *stab; + db_sym_t sym; + char **file; + int *line; + db_expr_t pc; +{ + char *func; + unsigned diff; + boolean_t found; + + found = X_db_search_by_addr(stab,(unsigned)pc,file,&func,line,&diff); + return(found && func && *file); +} + +/* + * Initialization routine for a.out files. + */ +kdb_init() +{ + extern char *esym; + extern int end; + + if (esym > (char *)&end) { + X_db_sym_init((int *)&end, esym, "mach"); + } +} + +/* + * Read symbol table from file. + * (should be somewhere else) + */ +#include +#include + +read_symtab_from_file(fp, symtab_name) + struct file *fp; + char * symtab_name; +{ + vm_size_t resid; + kern_return_t result; + vm_offset_t symoff; + vm_size_t symsize; + vm_offset_t stroff; + vm_size_t strsize; + vm_size_t table_size; + vm_offset_t symtab; + + if (!get_symtab(fp, &symoff, &symsize)) { + boot_printf("[ error %d reading %s file header ]\n", + result, symtab_name); + return; + } + + stroff = symoff + symsize; + result = read_file(fp, (vm_offset_t)stroff, + (vm_offset_t)&strsize, sizeof(strsize), &resid); + if (result || resid) { + boot_printf("[ no valid symbol table present for %s ]\n", + symtab_name); + return; + } + + table_size = sizeof(int) + symsize + strsize; + table_size = (table_size + sizeof(int)-1) & ~(sizeof(int)-1); + + result = kmem_alloc_wired(kernel_map, &symtab, table_size); + if (result) { + boot_printf("[ error %d allocating space for %s symbol table ]\n", + result, symtab_name); + return; + } + + *(int *)symtab = symsize; + + result = read_file(fp, symoff, + symtab + sizeof(int), symsize, &resid); + if (result || resid) { + boot_printf("[ error %d reading %s symbol table ]\n", + result, symtab_name); + return; + } + + result = read_file(fp, stroff, + symtab + sizeof(int) + symsize, strsize, &resid); + if (result || resid) { + boot_printf("[ error %d reading %s string table ]\n", + result, symtab_name); + return; + } + + X_db_sym_init((int *)symtab, + (char *)(symtab + table_size), + symtab_name); + +} + +#endif /* DB_GCC_AOUT */ diff --git a/osfmk/i386/db_interface.c b/osfmk/i386/db_interface.c new file mode 100644 index 000000000..bd3a52369 --- /dev/null +++ b/osfmk/i386/db_interface.c @@ -0,0 +1,1043 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Interface to new debugger. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int db_active = 0; +int db_pass_thru[NCPUS]; +struct i386_saved_state *i386_last_saved_statep; +struct i386_saved_state i386_nested_saved_state; +unsigned i386_last_kdb_sp; + +vm_offset_t db_stacks[NCPUS]; + +extern thread_act_t db_default_act; + +#if MACH_MP_DEBUG +extern int masked_state_cnt[]; +#endif /* MACH_MP_DEBUG */ + +/* + * Enter KDB through a keyboard trap. + * We show the registers as of the keyboard interrupt + * instead of those at its call to KDB. + */ +struct int_regs { + int gs; + int fs; + int edi; + int esi; + int ebp; + int ebx; + struct i386_interrupt_state *is; +}; + +extern char * trap_type[]; +extern int TRAP_TYPES; + +/* Forward */ + +extern void kdbprinttrap( + int type, + int code, + int *pc, + int sp); +extern void kdb_kentry( + struct int_regs *int_regs); +extern int db_user_to_kernel_address( + task_t task, + vm_offset_t addr, + unsigned *kaddr, + int flag); +extern void db_write_bytes_user_space( + vm_offset_t addr, + int size, + char *data, + task_t task); +extern int db_search_null( + task_t task, + unsigned *svaddr, + unsigned evaddr, + unsigned *skaddr, + int flag); +extern int kdb_enter(int); +extern void kdb_leave(void); +extern void lock_kdb(void); +extern void unlock_kdb(void); + +/* + * kdb_trap - field a TRACE or BPT trap + */ + + +extern jmp_buf_t *db_recover; +spl_t saved_ipl[NCPUS]; /* just to know what IPL was before trap */ +struct i386_saved_state *saved_state[NCPUS]; + +/* + * Translate the state saved in a task state segment into an + * exception frame. Since we "know" we always want the state + * in a ktss, we hard-wire that in, rather than indexing the gdt + * with tss_sel to derive a pointer to the desired tss. + */ +void +db_tss_to_frame( + int tss_sel, + struct i386_saved_state *regs) +{ + extern struct i386_tss ktss; + int mycpu = cpu_number(); + struct i386_tss *tss; + +#if NCPUS == 1 + tss = &ktss; /* XXX */ +#else /* NCPUS > 1 */ + tss = mp_ktss[mycpu]; /* XXX */ +#endif /* NCPUS > 1 */ + + /* + * ddb will overwrite whatever's in esp, so put esp0 elsewhere, too. + */ + regs->esp = tss->esp0; + regs->efl = tss->eflags; + regs->eip = tss->eip; + regs->trapno = tss->ss0; /* XXX */ + regs->err = tss->esp0; /* XXX */ + regs->eax = tss->eax; + regs->ecx = tss->ecx; + regs->edx = tss->edx; + regs->ebx = tss->ebx; + regs->uesp = tss->esp; + regs->ebp = tss->ebp; + regs->esi = tss->esi; + regs->edi = tss->edi; + regs->es = tss->es; + regs->ss = tss->ss; + regs->cs = tss->cs; + regs->ds = tss->ds; + regs->fs = tss->fs; + regs->gs = tss->gs; +} + +/* + * Compose a call to the debugger from the saved state in regs. (No + * reason not to do this in C.) + */ +boolean_t +db_trap_from_asm( + struct i386_saved_state *regs) +{ + int code; + int type; + + type = regs->trapno; + code = regs->err; + return (kdb_trap(type, code, regs)); +} + +int +kdb_trap( + int type, + int code, + struct i386_saved_state *regs) +{ + extern char etext; + boolean_t trap_from_user; + spl_t s = splhigh(); + + switch (type) { + case T_DEBUG: /* single_step */ + { + extern int dr_addr[]; + int addr; + int status = dr6(); + + if (status & 0xf) { /* hmm hdw break */ + addr = status & 0x8 ? dr_addr[3] : + status & 0x4 ? dr_addr[2] : + status & 0x2 ? dr_addr[1] : + dr_addr[0]; + regs->efl |= EFL_RF; + db_single_step_cmd(addr, 0, 1, "p"); + } + } + case T_INT3: /* breakpoint */ + case T_WATCHPOINT: /* watchpoint */ + case -1: /* keyboard interrupt */ + break; + + default: + if (db_recover) { + i386_nested_saved_state = *regs; + db_printf("Caught "); + if (type < 0 || type > TRAP_TYPES) + db_printf("type %d", type); + else + db_printf("%s", trap_type[type]); + db_printf(" trap, code = %x, pc = %x\n", + code, regs->eip); + splx(s); + db_error(""); + /*NOTREACHED*/ + } + kdbprinttrap(type, code, (int *)®s->eip, regs->uesp); + } + +#if NCPUS > 1 + disable_preemption(); +#endif /* NCPUS > 1 */ + + saved_ipl[cpu_number()] = s; + saved_state[cpu_number()] = regs; + + i386_last_saved_statep = regs; + i386_last_kdb_sp = (unsigned) &type; + +#if NCPUS > 1 + if (!kdb_enter(regs->eip)) + goto kdb_exit; +#endif /* NCPUS > 1 */ + + /* Should switch to kdb's own stack here. */ + + if (!IS_USER_TRAP(regs, &etext)) { + bzero((char *)&ddb_regs, sizeof (ddb_regs)); + *(struct i386_saved_state_from_kernel *)&ddb_regs = + *(struct i386_saved_state_from_kernel *)regs; + trap_from_user = FALSE; + } + else { + ddb_regs = *regs; + trap_from_user = TRUE; + } + if (!trap_from_user) { + /* + * Kernel mode - esp and ss not saved + */ + ddb_regs.uesp = (int)®s->uesp; /* kernel stack pointer */ + ddb_regs.ss = KERNEL_DS; + } + + db_active++; + db_task_trap(type, code, trap_from_user); + db_active--; + + regs->eip = ddb_regs.eip; + regs->efl = ddb_regs.efl; + regs->eax = ddb_regs.eax; + regs->ecx = ddb_regs.ecx; + regs->edx = ddb_regs.edx; + regs->ebx = ddb_regs.ebx; + if (trap_from_user) { + /* + * user mode - saved esp and ss valid + */ + regs->uesp = ddb_regs.uesp; /* user stack pointer */ + regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */ + } + regs->ebp = ddb_regs.ebp; + regs->esi = ddb_regs.esi; + regs->edi = ddb_regs.edi; + regs->es = ddb_regs.es & 0xffff; + regs->cs = ddb_regs.cs & 0xffff; + regs->ds = ddb_regs.ds & 0xffff; + regs->fs = ddb_regs.fs & 0xffff; + regs->gs = ddb_regs.gs & 0xffff; + + if ((type == T_INT3) && + (db_get_task_value(regs->eip, + BKPT_SIZE, + FALSE, + db_target_space(current_act(), + trap_from_user)) + == BKPT_INST)) + regs->eip += BKPT_SIZE; + +#if NCPUS > 1 +kdb_exit: + kdb_leave(); +#endif /* NCPUS > 1 */ + + saved_state[cpu_number()] = 0; + +#if MACH_MP_DEBUG + masked_state_cnt[cpu_number()] = 0; +#endif /* MACH_MP_DEBUG */ + +#if NCPUS > 1 + enable_preemption(); +#endif /* NCPUS > 1 */ + + splx(s); + + /* Allow continue to upper layers of exception handling if + * trap was not a debugging trap. + */ + + if (trap_from_user && type != T_DEBUG && type != T_INT3 + && type != T_WATCHPOINT) + return 0; + else + return (1); +} + +/* + * Enter KDB through a keyboard trap. + * We show the registers as of the keyboard interrupt + * instead of those at its call to KDB. + */ + +spl_t kdb_oldspl; + +void +kdb_kentry( + struct int_regs *int_regs) +{ + extern char etext; + boolean_t trap_from_user; + struct i386_interrupt_state *is = int_regs->is; + struct i386_saved_state regs; + spl_t s; + + s = splhigh(); + kdb_oldspl = s; + + if (IS_USER_TRAP(is, &etext)) + { + regs.uesp = ((int *)(is+1))[0]; + regs.ss = ((int *)(is+1))[1]; + } + else { + regs.ss = KERNEL_DS; + regs.uesp= (int)(is+1); + } + regs.efl = is->efl; + regs.cs = is->cs; + regs.eip = is->eip; + regs.eax = is->eax; + regs.ecx = is->ecx; + regs.edx = is->edx; + regs.ebx = int_regs->ebx; + regs.ebp = int_regs->ebp; + regs.esi = int_regs->esi; + regs.edi = int_regs->edi; + regs.ds = is->ds; + regs.es = is->es; + regs.fs = int_regs->fs; + regs.gs = int_regs->gs; + +#if NCPUS > 1 + disable_preemption(); +#endif /* NCPUS > 1 */ + + saved_state[cpu_number()] = ®s; + +#if NCPUS > 1 + if (!kdb_enter(regs.eip)) + goto kdb_exit; +#endif /* NCPUS > 1 */ + + bcopy((char *)®s, (char *)&ddb_regs, sizeof (ddb_regs)); + trap_from_user = IS_USER_TRAP(&ddb_regs, &etext); + + db_active++; + db_task_trap(-1, 0, trap_from_user); + db_active--; + + if (trap_from_user) { + ((int *)(is+1))[0] = ddb_regs.uesp; + ((int *)(is+1))[1] = ddb_regs.ss & 0xffff; + } + is->efl = ddb_regs.efl; + is->cs = ddb_regs.cs & 0xffff; + is->eip = ddb_regs.eip; + is->eax = ddb_regs.eax; + is->ecx = ddb_regs.ecx; + is->edx = ddb_regs.edx; + int_regs->ebx = ddb_regs.ebx; + int_regs->ebp = ddb_regs.ebp; + int_regs->esi = ddb_regs.esi; + int_regs->edi = ddb_regs.edi; + is->ds = ddb_regs.ds & 0xffff; + is->es = ddb_regs.es & 0xffff; + int_regs->fs = ddb_regs.fs & 0xffff; + int_regs->gs = ddb_regs.gs & 0xffff; + +#if NCPUS > 1 +kdb_exit: + kdb_leave(); +#endif /* NCPUS > 1 */ + saved_state[cpu_number()] = 0; + +#if NCPUS > 1 + enable_preemption(); +#endif /* NCPUS > 1 */ + + splx(s); +} + +/* + * Print trap reason. + */ + +void +kdbprinttrap( + int type, + int code, + int *pc, + int sp) +{ + printf("kernel: "); + if (type < 0 || type > TRAP_TYPES) + db_printf("type %d", type); + else + db_printf("%s", trap_type[type]); + db_printf(" trap, code=%x eip@%x = %x esp=%x\n", + code, pc, *(int *)pc, sp); + db_run_mode = STEP_CONTINUE; +} + +int +db_user_to_kernel_address( + task_t task, + vm_offset_t addr, + unsigned *kaddr, + int flag) +{ + register pt_entry_t *ptp; + + ptp = pmap_pte(task->map->pmap, addr); + if (ptp == PT_ENTRY_NULL || (*ptp & INTEL_PTE_VALID) == 0) { + if (flag) { + db_printf("\nno memory is assigned to address %08x\n", addr); + db_error(0); + /* NOTREACHED */ + } + return(-1); + } + *kaddr = (unsigned)ptetokv(*ptp) + (addr & (INTEL_PGBYTES-1)); + return(0); +} + +/* + * Read bytes from kernel address space for debugger. + */ + +void +db_read_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task) +{ + register char *src; + register int n; + unsigned kern_addr; + + src = (char *)addr; + if (task == kernel_task || task == TASK_NULL) { + while (--size >= 0) { + if (addr++ > VM_MAX_KERNEL_ADDRESS) { + db_printf("\nbad address %x\n", addr); + db_error(0); + /* NOTREACHED */ + } + *data++ = *src++; + } + return; + } + while (size > 0) { + if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0) + return; + src = (char *)kern_addr; + n = intel_trunc_page(addr+INTEL_PGBYTES) - addr; + if (n > size) + n = size; + size -= n; + addr += n; + while (--n >= 0) + *data++ = *src++; + } +} + +/* + * Write bytes to kernel address space for debugger. + */ + +void +db_write_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task) +{ + register char *dst; + + register pt_entry_t *ptep0 = 0; + pt_entry_t oldmap0 = 0; + vm_offset_t addr1; + register pt_entry_t *ptep1 = 0; + pt_entry_t oldmap1 = 0; + extern char etext; + + if (task && task != kernel_task) { + db_write_bytes_user_space(addr, size, data, task); + return; + } + + + if (addr >= VM_MIN_KERNEL_LOADED_ADDRESS) { + db_write_bytes_user_space(addr, size, data, kernel_task); + return; + } + + if (addr >= VM_MIN_KERNEL_ADDRESS && + addr <= (vm_offset_t)&etext) + { + ptep0 = pmap_pte(kernel_pmap, addr); + oldmap0 = *ptep0; + *ptep0 |= INTEL_PTE_WRITE; + + addr1 = i386_trunc_page(addr + size - 1); + if (i386_trunc_page(addr) != addr1) { + /* data crosses a page boundary */ + + ptep1 = pmap_pte(kernel_pmap, addr1); + oldmap1 = *ptep1; + *ptep1 |= INTEL_PTE_WRITE; + } + flush_tlb(); + } + + dst = (char *)addr; + + while (--size >= 0) { + if (addr++ > VM_MAX_KERNEL_ADDRESS) { + db_printf("\nbad address %x\n", addr); + db_error(0); + /* NOTREACHED */ + } + *dst++ = *data++; + } + + if (ptep0) { + *ptep0 = oldmap0; + if (ptep1) { + *ptep1 = oldmap1; + } + flush_tlb(); + } +} + +void +db_write_bytes_user_space( + vm_offset_t addr, + int size, + char *data, + task_t task) +{ + register char *dst; + register int n; + unsigned kern_addr; + + while (size > 0) { + if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0) + return; + dst = (char *)kern_addr; + n = intel_trunc_page(addr+INTEL_PGBYTES) - addr; + if (n > size) + n = size; + size -= n; + addr += n; + while (--n >= 0) + *dst++ = *data++; + } +} + +boolean_t +db_check_access( + vm_offset_t addr, + int size, + task_t task) +{ + register n; + unsigned kern_addr; + + if (task == kernel_task || task == TASK_NULL) { + if (kernel_task == TASK_NULL) + return(TRUE); + task = kernel_task; + } else if (task == TASK_NULL) { + if (current_act() == THR_ACT_NULL) + return(FALSE); + task = current_act()->task; + } + while (size > 0) { + if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0) + return(FALSE); + n = intel_trunc_page(addr+INTEL_PGBYTES) - addr; + if (n > size) + n = size; + size -= n; + addr += n; + } + return(TRUE); +} + +boolean_t +db_phys_eq( + task_t task1, + vm_offset_t addr1, + task_t task2, + vm_offset_t addr2) +{ + unsigned kern_addr1, kern_addr2; + + if ((addr1 & (INTEL_PGBYTES-1)) != (addr2 & (INTEL_PGBYTES-1))) + return(FALSE); + if (task1 == TASK_NULL) { + if (current_act() == THR_ACT_NULL) + return(FALSE); + task1 = current_act()->task; + } + if (db_user_to_kernel_address(task1, addr1, &kern_addr1, 0) < 0 || + db_user_to_kernel_address(task2, addr2, &kern_addr2, 0) < 0) + return(FALSE); + return(kern_addr1 == kern_addr2); +} + +#define DB_USER_STACK_ADDR (VM_MIN_KERNEL_ADDRESS) +#define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(INTEL_PGBYTES*3)) + +int +db_search_null( + task_t task, + unsigned *svaddr, + unsigned evaddr, + unsigned *skaddr, + int flag) +{ + register unsigned vaddr; + register unsigned *kaddr; + + kaddr = (unsigned *)*skaddr; + for (vaddr = *svaddr; vaddr > evaddr; vaddr -= sizeof(unsigned)) { + if (vaddr % INTEL_PGBYTES == 0) { + vaddr -= sizeof(unsigned); + if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0) + return(-1); + kaddr = (unsigned *)*skaddr; + } else { + vaddr -= sizeof(unsigned); + kaddr--; + } + if ((*kaddr == 0) ^ (flag == 0)) { + *svaddr = vaddr; + *skaddr = (unsigned)kaddr; + return(0); + } + } + return(-1); +} + +void +db_task_name( + task_t task) +{ + register char *p; + register n; + unsigned vaddr, kaddr; + + vaddr = DB_USER_STACK_ADDR; + kaddr = 0; + + /* + * skip nulls at the end + */ + if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0) < 0) { + db_printf(DB_NULL_TASK_NAME); + return; + } + /* + * search start of args + */ + if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1) < 0) { + db_printf(DB_NULL_TASK_NAME); + return; + } + + n = DB_TASK_NAME_LEN-1; + p = (char *)kaddr + sizeof(unsigned); + for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0; + vaddr++, p++, n--) { + if (vaddr % INTEL_PGBYTES == 0) { + (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0); + p = (char*)kaddr; + } + db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p); + } + while (n-- >= 0) /* compare with >= 0 for one more space */ + db_printf(" "); +} + +#if NCPUS == 1 + +void +db_machdep_init(void) +{ + db_stacks[0] = (vm_offset_t)(db_stack_store + + INTSTACK_SIZE - sizeof (natural_t)); + dbtss.esp0 = (int)(db_task_stack_store + + INTSTACK_SIZE - sizeof (natural_t)); + dbtss.esp = dbtss.esp0; + dbtss.eip = (int)&db_task_start; +} + +#else /* NCPUS > 1 */ + +/* + * Code used to synchronize kdb among all cpus, one active at a time, switch + * from on to another using kdb_on! #cpu or cpu #cpu + */ + +decl_simple_lock_data(, kdb_lock) /* kdb lock */ + +#define db_simple_lock_init(l, e) hw_lock_init(&((l)->interlock)) +#define db_simple_lock_try(l) hw_lock_try(&((l)->interlock)) +#define db_simple_unlock(l) hw_lock_unlock(&((l)->interlock)) + +int kdb_cpu = -1; /* current cpu running kdb */ +int kdb_debug = 0; +int kdb_is_slave[NCPUS]; +int kdb_active[NCPUS]; +volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus holding + breakpoints (ie: cpus that did not + insert back breakpoints) */ +extern boolean_t db_breakpoints_inserted; + +void +db_machdep_init(void) +{ + int c; + + db_simple_lock_init(&kdb_lock, ETAP_MISC_KDB); + for (c = 0; c < NCPUS; ++c) { + db_stacks[c] = (vm_offset_t) (db_stack_store + + (INTSTACK_SIZE * (c + 1)) - sizeof (natural_t)); + if (c == master_cpu) { + dbtss.esp0 = (int)(db_task_stack_store + + (INTSTACK_SIZE * (c + 1)) - sizeof (natural_t)); + dbtss.esp = dbtss.esp0; + dbtss.eip = (int)&db_task_start; + /* + * The TSS for the debugging task on each slave CPU + * is set up in mp_desc_init(). + */ + } + } +} + +/* + * Called when entering kdb: + * Takes kdb lock. If if we were called remotely (slave state) we just + * wait for kdb_cpu to be equal to cpu_number(). Otherwise enter kdb if + * not active on another cpu. + * If db_pass_thru[cpu_number()] > 0, then kdb can't stop now. + */ + +int +kdb_enter(int pc) +{ + int my_cpu; + int retval; + +#if NCPUS > 1 + disable_preemption(); +#endif /* NCPUS > 1 */ + + my_cpu = cpu_number(); + + if (db_pass_thru[my_cpu]) { + retval = 0; + goto kdb_exit; + } + + kdb_active[my_cpu]++; + lock_kdb(); + + if (kdb_debug) + db_printf("kdb_enter: cpu %d, is_slave %d, kdb_cpu %d, run mode %d pc %x (%x) holds %d\n", + my_cpu, kdb_is_slave[my_cpu], kdb_cpu, + db_run_mode, pc, *(int *)pc, cpus_holding_bkpts); + if (db_breakpoints_inserted) + cpus_holding_bkpts++; + if (kdb_cpu == -1 && !kdb_is_slave[my_cpu]) { + kdb_cpu = my_cpu; + remote_kdb(); /* stop other cpus */ + retval = 1; + } else if (kdb_cpu == my_cpu) + retval = 1; + else + retval = 0; + +kdb_exit: +#if NCPUS > 1 + enable_preemption(); +#endif /* NCPUS > 1 */ + + return (retval); +} + +void +kdb_leave(void) +{ + int my_cpu; + boolean_t wait = FALSE; + +#if NCPUS > 1 + disable_preemption(); +#endif /* NCPUS > 1 */ + + my_cpu = cpu_number(); + + if (db_run_mode == STEP_CONTINUE) { + wait = TRUE; + kdb_cpu = -1; + } + if (db_breakpoints_inserted) + cpus_holding_bkpts--; + if (kdb_is_slave[my_cpu]) + kdb_is_slave[my_cpu]--; + if (kdb_debug) + db_printf("kdb_leave: cpu %d, kdb_cpu %d, run_mode %d pc %x (%x) holds %d\n", + my_cpu, kdb_cpu, db_run_mode, + ddb_regs.eip, *(int *)ddb_regs.eip, + cpus_holding_bkpts); + clear_kdb_intr(); + unlock_kdb(); + kdb_active[my_cpu]--; + +#if NCPUS > 1 + enable_preemption(); +#endif /* NCPUS > 1 */ + + if (wait) { + while(cpus_holding_bkpts); + } +} + +void +lock_kdb(void) +{ + int my_cpu; + register i; + extern void kdb_console(void); + +#if NCPUS > 1 + disable_preemption(); +#endif /* NCPUS > 1 */ + + my_cpu = cpu_number(); + + for(;;) { + kdb_console(); + if (kdb_cpu != -1 && kdb_cpu != my_cpu) { + continue; + } + if (db_simple_lock_try(&kdb_lock)) { + if (kdb_cpu == -1 || kdb_cpu == my_cpu) + break; + db_simple_unlock(&kdb_lock); + } + } + +#if NCPUS > 1 + enable_preemption(); +#endif /* NCPUS > 1 */ +} + +#if TIME_STAMP +extern unsigned old_time_stamp; +#endif /* TIME_STAMP */ + +void +unlock_kdb(void) +{ + db_simple_unlock(&kdb_lock); +#if TIME_STAMP + old_time_stamp = 0; +#endif /* TIME_STAMP */ +} + + +#ifdef __STDC__ +#define KDB_SAVE(type, name) extern type name; type name##_save = name +#define KDB_RESTORE(name) name = name##_save +#else /* __STDC__ */ +#define KDB_SAVE(type, name) extern type name; type name/**/_save = name +#define KDB_RESTORE(name) name = name/**/_save +#endif /* __STDC__ */ + +#define KDB_SAVE_CTXT() \ + KDB_SAVE(int, db_run_mode); \ + KDB_SAVE(boolean_t, db_sstep_print); \ + KDB_SAVE(int, db_loop_count); \ + KDB_SAVE(int, db_call_depth); \ + KDB_SAVE(int, db_inst_count); \ + KDB_SAVE(int, db_last_inst_count); \ + KDB_SAVE(int, db_load_count); \ + KDB_SAVE(int, db_store_count); \ + KDB_SAVE(boolean_t, db_cmd_loop_done); \ + KDB_SAVE(jmp_buf_t *, db_recover); \ + KDB_SAVE(db_addr_t, db_dot); \ + KDB_SAVE(db_addr_t, db_last_addr); \ + KDB_SAVE(db_addr_t, db_prev); \ + KDB_SAVE(db_addr_t, db_next); \ + KDB_SAVE(db_regs_t, ddb_regs); + +#define KDB_RESTORE_CTXT() \ + KDB_RESTORE(db_run_mode); \ + KDB_RESTORE(db_sstep_print); \ + KDB_RESTORE(db_loop_count); \ + KDB_RESTORE(db_call_depth); \ + KDB_RESTORE(db_inst_count); \ + KDB_RESTORE(db_last_inst_count); \ + KDB_RESTORE(db_load_count); \ + KDB_RESTORE(db_store_count); \ + KDB_RESTORE(db_cmd_loop_done); \ + KDB_RESTORE(db_recover); \ + KDB_RESTORE(db_dot); \ + KDB_RESTORE(db_last_addr); \ + KDB_RESTORE(db_prev); \ + KDB_RESTORE(db_next); \ + KDB_RESTORE(ddb_regs); + +/* + * switch to another cpu + */ + +void +kdb_on( + int cpu) +{ + KDB_SAVE_CTXT(); + if (cpu < 0 || cpu >= NCPUS || !kdb_active[cpu]) + return; + db_set_breakpoints(); + db_set_watchpoints(); + kdb_cpu = cpu; + unlock_kdb(); + lock_kdb(); + db_clear_breakpoints(); + db_clear_watchpoints(); + KDB_RESTORE_CTXT(); + if (kdb_cpu == -1) {/* someone continued */ + kdb_cpu = cpu_number(); + db_continue_cmd(0, 0, 0, ""); + } +} + +#endif /* NCPUS > 1 */ + +void db_reboot( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + boolean_t reboot = TRUE; + char *cp, c; + + cp = modif; + while ((c = *cp++) != 0) { + if (c == 'r') /* reboot */ + reboot = TRUE; + if (c == 'h') /* halt */ + reboot = FALSE; + } + halt_all_cpus(reboot); +} diff --git a/osfmk/i386/db_machdep.h b/osfmk/i386/db_machdep.h new file mode 100644 index 000000000..09acd12c9 --- /dev/null +++ b/osfmk/i386/db_machdep.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _I386_DB_MACHDEP_H_ +#define _I386_DB_MACHDEP_H_ + +/* + * Machine-dependent defines for new kernel debugger. + */ + +#include +#include +#include +#include /* for thread_status */ +#include +#include + +typedef vm_offset_t db_addr_t; /* address - unsigned */ +typedef int db_expr_t; /* expression - signed */ + +typedef struct i386_saved_state db_regs_t; +db_regs_t ddb_regs; /* register state */ +#define DDB_REGS (&ddb_regs) +extern int db_active; /* ddb is active */ + +#define PC_REGS(regs) ((db_addr_t)(regs)->eip) + +#define BKPT_INST 0xcc /* breakpoint instruction */ +#define BKPT_SIZE (1) /* size of breakpoint inst */ +#define BKPT_SET(inst) (BKPT_INST) + +#define FIXUP_PC_AFTER_BREAK ddb_regs.eip -= 1; + +#define db_clear_single_step(regs) ((regs)->efl &= ~EFL_TF) +#define db_set_single_step(regs) ((regs)->efl |= EFL_TF) + +#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_INT3) +#define IS_WATCHPOINT_TRAP(type, code) ((type) == T_WATCHPOINT) + +#define I_CALL 0xe8 +#define I_CALLI 0xff +#define I_RET 0xc3 +#define I_IRET 0xcf + +#define inst_trap_return(ins) (((ins)&0xff) == I_IRET) +#define inst_return(ins) (((ins)&0xff) == I_RET) +#define inst_call(ins) (((ins)&0xff) == I_CALL || \ + (((ins)&0xff) == I_CALLI && \ + ((ins)&0x3800) == 0x1000)) + +int db_inst_load(unsigned long); +int db_inst_store(unsigned long); + +/* access capability and access macros */ + +#define DB_ACCESS_LEVEL 2 /* access any space */ +#define DB_CHECK_ACCESS(addr,size,task) \ + db_check_access(addr,size,task) +#define DB_PHYS_EQ(task1,addr1,task2,addr2) \ + db_phys_eq(task1,addr1,task2,addr2) +#define DB_VALID_KERN_ADDR(addr) \ + ((addr) >= VM_MIN_KERNEL_ADDRESS && \ + (addr) < VM_MAX_KERNEL_ADDRESS) +#define DB_VALID_ADDRESS(addr,user) \ + ((!(user) && DB_VALID_KERN_ADDR(addr)) || \ + ((user) && (addr) < VM_MAX_ADDRESS)) + +/* + * Given pointer to i386_saved_state, determine if it represents + * a thread executing a) in user space, b) in the kernel, or c) + * in a kernel-loaded task. Return true for cases a) and c). + */ +#define IS_USER_TRAP(regs, etext) ((((regs)->cs & 3) != 0) || \ + (current_act() && \ + current_act()->kernel_loaded && \ + ((char *)(regs)->eip > (etext)))) + +extern boolean_t db_check_access( + vm_offset_t addr, + int size, + task_t task); +extern boolean_t db_phys_eq( + task_t task1, + vm_offset_t addr1, + task_t task2, + vm_offset_t addr2); +extern db_addr_t db_disasm( + db_addr_t loc, + boolean_t altfmt, + task_t task); +extern void db_read_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task); +extern void db_write_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task); +extern void db_stack_trace_cmd( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif); +extern void db_reboot( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif); + +/* macros for printing OS server dependent task name */ + +#define DB_TASK_NAME(task) db_task_name(task) +#define DB_TASK_NAME_TITLE "COMMAND " +#define DB_TASK_NAME_LEN 23 +#define DB_NULL_TASK_NAME "? " + +extern void db_task_name( + task_t task); + +/* macro for checking if a thread has used floating-point */ + +#define db_act_fp_used(act) (act && act->mact.pcb->ims.ifps) + +extern void db_tss_to_frame( + int tss_sel, + struct i386_saved_state *regs); +extern int kdb_trap( + int type, + int code, + struct i386_saved_state *regs); +extern boolean_t db_trap_from_asm( + struct i386_saved_state *regs); +extern int dr6(void); +extern void kdb_on( + int cpu); +extern void cnpollc( + boolean_t on); + +#endif /* _I386_DB_MACHDEP_H_ */ diff --git a/osfmk/i386/db_trace.c b/osfmk/i386/db_trace.c new file mode 100644 index 000000000..41349103f --- /dev/null +++ b/osfmk/i386/db_trace.c @@ -0,0 +1,817 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +extern jmp_buf_t *db_recover; +extern struct i386_saved_state *saved_state[]; + +struct i386_kernel_state ddb_null_kregs; + +/* + * Stack trace. + */ + +extern vm_offset_t vm_min_inks_addr; /* set by db_clone_symtabXXX */ +#define INKSERVER(va) (((vm_offset_t)(va)) >= vm_min_inks_addr) + +#if NCPUS > 1 +extern vm_offset_t interrupt_stack[]; +#define ININTSTACK(va) \ + (((vm_offset_t)(va)) >= interrupt_stack[cpu_number()] &&\ + (((vm_offset_t)(va)) < interrupt_stack[cpu_number()] + \ + INTSTACK_SIZE)) +#else /* NCPUS > 1 */ +extern char intstack[]; +#define ININTSTACK(va) \ + (((vm_offset_t)(va)) >= (vm_offset_t)intstack && \ + (((vm_offset_t)(va)) < ((vm_offset_t)&intstack) + \ + INTSTACK_SIZE)) +#endif /* NCPUS > 1 */ + +#define INKERNELSTACK(va, th) \ + (th == THR_ACT_NULL || \ + (((vm_offset_t)(va)) >= th->thread->kernel_stack && \ + (((vm_offset_t)(va)) < th->thread->kernel_stack + \ + KERNEL_STACK_SIZE)) || \ + ININTSTACK(va)) + +struct i386_frame { + struct i386_frame *f_frame; + int f_retaddr; + int f_arg0; +}; + +#define TRAP 1 +#define INTERRUPT 2 +#define SYSCALL 3 + +db_addr_t db_user_trap_symbol_value = 0; +db_addr_t db_kernel_trap_symbol_value = 0; +db_addr_t db_interrupt_symbol_value = 0; +db_addr_t db_return_to_iret_symbol_value = 0; +db_addr_t db_syscall_symbol_value = 0; +boolean_t db_trace_symbols_found = FALSE; + +struct i386_kregs { + char *name; + int offset; +} i386_kregs[] = { + { "ebx", (int)(&((struct i386_kernel_state *)0)->k_ebx) }, + { "esp", (int)(&((struct i386_kernel_state *)0)->k_esp) }, + { "ebp", (int)(&((struct i386_kernel_state *)0)->k_ebp) }, + { "edi", (int)(&((struct i386_kernel_state *)0)->k_edi) }, + { "esi", (int)(&((struct i386_kernel_state *)0)->k_esi) }, + { "eip", (int)(&((struct i386_kernel_state *)0)->k_eip) }, + { 0 }, +}; + +/* Forward */ + +extern int * db_lookup_i386_kreg( + char *name, + int *kregp); +extern int db_i386_reg_value( + struct db_variable * vp, + db_expr_t * val, + int flag, + db_var_aux_param_t ap); +extern void db_find_trace_symbols(void); +extern int db_numargs( + struct i386_frame *fp, + task_t task); +extern void db_nextframe( + struct i386_frame **lfp, + struct i386_frame **fp, + db_addr_t *ip, + int frame_type, + thread_act_t thr_act); +extern int _setjmp( + jmp_buf_t * jb); + +/* + * Machine register set. + */ +struct db_variable db_regs[] = { + { "cs", (int *)&ddb_regs.cs, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "ds", (int *)&ddb_regs.ds, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "es", (int *)&ddb_regs.es, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "fs", (int *)&ddb_regs.fs, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "gs", (int *)&ddb_regs.gs, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "ss", (int *)&ddb_regs.ss, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "eax",(int *)&ddb_regs.eax, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "ecx",(int *)&ddb_regs.ecx, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "edx",(int *)&ddb_regs.edx, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "ebx",(int *)&ddb_regs.ebx, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "esp",(int *)&ddb_regs.uesp,db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "ebp",(int *)&ddb_regs.ebp, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "esi",(int *)&ddb_regs.esi, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "edi",(int *)&ddb_regs.edi, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "eip",(int *)&ddb_regs.eip, db_i386_reg_value, 0, 0, 0, 0, TRUE }, + { "efl",(int *)&ddb_regs.efl, db_i386_reg_value, 0, 0, 0, 0, TRUE }, +}; +struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]); + +int * +db_lookup_i386_kreg( + char *name, + int *kregp) +{ + register struct i386_kregs *kp; + + for (kp = i386_kregs; kp->name; kp++) { + if (strcmp(name, kp->name) == 0) + return((int *)((int)kregp + kp->offset)); + } + return(0); +} + +int +db_i386_reg_value( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap) +{ + extern char etext; + int *dp = 0; + db_expr_t null_reg = 0; + register thread_act_t thr_act = ap->thr_act; + extern unsigned int_stack_high; + int cpu; + + if (db_option(ap->modif, 'u')) { + if (thr_act == THR_ACT_NULL) { + if ((thr_act = current_act()) == THR_ACT_NULL) + db_error("no user registers\n"); + } + if (thr_act == current_act()) { + if (IS_USER_TRAP(&ddb_regs, &etext)) + dp = vp->valuep; + else if (ddb_regs.ebp < int_stack_high) + db_error("cannot get/set user registers in nested interrupt\n"); + } + } else { + if (thr_act == THR_ACT_NULL || thr_act == current_act()) { + dp = vp->valuep; + } else { + if (thr_act->thread && + !(thr_act->thread->state & TH_STACK_HANDOFF) && + thr_act->thread->kernel_stack) { + int cpu; + + for (cpu = 0; cpu < NCPUS; cpu++) { + if (machine_slot[cpu].running == TRUE && + cpu_data[cpu].active_thread == thr_act->thread && saved_state[cpu]) { + dp = (int *) (((int)saved_state[cpu]) + + (((int) vp->valuep) - + (int) &ddb_regs)); + break; + } + } + if (dp == 0 && thr_act && thr_act->thread) + dp = db_lookup_i386_kreg(vp->name, + (int *)(STACK_IKS(thr_act->thread->kernel_stack))); + if (dp == 0) + dp = &null_reg; + } else if (thr_act->thread && + (thr_act->thread->state&TH_STACK_HANDOFF)){ + /* only EIP is valid */ + if (vp->valuep == (int *) &ddb_regs.eip) { + dp = (int *)(&thr_act->thread->continuation); + } else { + dp = &null_reg; + } + } + } + } + if (dp == 0) { + int cpu; + + if (!db_option(ap->modif, 'u')) { + for (cpu = 0; cpu < NCPUS; cpu++) { + if (machine_slot[cpu].running == TRUE && + cpu_data[cpu].active_thread == thr_act->thread && saved_state[cpu]) { + dp = (int *) (((int)saved_state[cpu]) + + (((int) vp->valuep) - + (int) &ddb_regs)); + break; + } + } + } + if (dp == 0) { + if (!thr_act || thr_act->mact.pcb == 0) + db_error("no pcb\n"); + dp = (int *)((int)(&thr_act->mact.pcb->iss) + + ((int)vp->valuep - (int)&ddb_regs)); + } + } + if (flag == DB_VAR_SET) + *dp = *valuep; + else + *valuep = *dp; + return(0); +} + +void +db_find_trace_symbols(void) +{ + db_expr_t value; + boolean_t found_some; + + found_some = FALSE; + if (db_value_of_name(CC_SYM_PREFIX "user_trap", &value)) { + db_user_trap_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (db_value_of_name(CC_SYM_PREFIX "kernel_trap", &value)) { + db_kernel_trap_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (db_value_of_name(CC_SYM_PREFIX "interrupt", &value)) { + db_interrupt_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (db_value_of_name(CC_SYM_PREFIX "return_to_iret", &value)) { + db_return_to_iret_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (db_value_of_name(CC_SYM_PREFIX "syscall", &value)) { + db_syscall_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (found_some) + db_trace_symbols_found = TRUE; +} + +/* + * Figure out how many arguments were passed into the frame at "fp". + */ +int db_numargs_default = 5; + +int +db_numargs( + struct i386_frame *fp, + task_t task) +{ + int *argp; + int inst; + int args; + extern char etext; + + argp = (int *)db_get_task_value((int)&fp->f_retaddr, 4, FALSE, task); + if (argp < (int *)VM_MIN_KERNEL_ADDRESS || (char *)argp > &etext) + args = db_numargs_default; + else if (!DB_CHECK_ACCESS((int)argp, 4, task)) + args = db_numargs_default; + else { + inst = db_get_task_value((int)argp, 4, FALSE, task); + if ((inst & 0xff) == 0x59) /* popl %ecx */ + args = 1; + else if ((inst & 0xffff) == 0xc483) /* addl %n, %esp */ + args = ((inst >> 16) & 0xff) / 4; + else + args = db_numargs_default; + } + return (args); +} + +struct interrupt_frame { + struct i386_frame *if_frame; /* point to next frame */ + int if_retaddr; /* return address to _interrupt */ + int if_unit; /* unit number */ + int if_spl; /* saved spl */ + int if_iretaddr; /* _return_to_{iret,iret_i} */ + int if_edx; /* old sp(iret) or saved edx(iret_i) */ + int if_ecx; /* saved ecx(iret_i) */ + int if_eax; /* saved eax(iret_i) */ + int if_eip; /* saved eip(iret_i) */ + int if_cs; /* saved cs(iret_i) */ + int if_efl; /* saved efl(iret_i) */ +}; + +/* + * Figure out the next frame up in the call stack. + * For trap(), we print the address of the faulting instruction and + * proceed with the calling frame. We return the ip that faulted. + * If the trap was caused by jumping through a bogus pointer, then + * the next line in the backtrace will list some random function as + * being called. It should get the argument list correct, though. + * It might be possible to dig out from the next frame up the name + * of the function that faulted, but that could get hairy. + */ +void +db_nextframe( + struct i386_frame **lfp, /* in/out */ + struct i386_frame **fp, /* in/out */ + db_addr_t *ip, /* out */ + int frame_type, /* in */ + thread_act_t thr_act) /* in */ +{ + extern char * trap_type[]; + extern int TRAP_TYPES; + + struct i386_saved_state *saved_regs; + struct interrupt_frame *ifp; + struct i386_interrupt_state *isp; + task_t task = (thr_act != THR_ACT_NULL)? thr_act->task: TASK_NULL; + + switch(frame_type) { + case TRAP: + /* + * We know that trap() has 1 argument and we know that + * it is an (strcut i386_saved_state *). + */ + saved_regs = (struct i386_saved_state *) + db_get_task_value((int)&((*fp)->f_arg0),4,FALSE,task); + if (saved_regs->trapno >= 0 && saved_regs->trapno < TRAP_TYPES) { + db_printf(">>>>> %s trap at ", + trap_type[saved_regs->trapno]); + } else { + db_printf(">>>>> trap (number %d) at ", + saved_regs->trapno & 0xffff); + } + db_task_printsym(saved_regs->eip, DB_STGY_PROC, task); + db_printf(" <<<<<\n"); + *fp = (struct i386_frame *)saved_regs->ebp; + *ip = (db_addr_t)saved_regs->eip; + break; + case INTERRUPT: + if (*lfp == 0) { + db_printf(">>>>> interrupt <<<<<\n"); + goto miss_frame; + } + db_printf(">>>>> interrupt at "); + ifp = (struct interrupt_frame *)(*lfp); + *fp = ifp->if_frame; + if (ifp->if_iretaddr == db_return_to_iret_symbol_value) + *ip = ((struct i386_interrupt_state *) ifp->if_edx)->eip; + else + *ip = (db_addr_t) ifp->if_eip; + db_task_printsym(*ip, DB_STGY_PROC, task); + db_printf(" <<<<<\n"); + break; + case SYSCALL: + if (thr_act != THR_ACT_NULL && thr_act->mact.pcb) { + *ip = (db_addr_t) thr_act->mact.pcb->iss.eip; + *fp = (struct i386_frame *) thr_act->mact.pcb->iss.ebp; + break; + } + /* falling down for unknown case */ + default: + miss_frame: + *ip = (db_addr_t) + db_get_task_value((int)&(*fp)->f_retaddr, 4, FALSE, task); + *lfp = *fp; + *fp = (struct i386_frame *) + db_get_task_value((int)&(*fp)->f_frame, 4, FALSE, task); + break; + } +} + +void +db_stack_trace_cmd( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + struct i386_frame *frame, *lastframe; + int *argp; + db_addr_t callpc, lastcallpc; + int frame_type; + boolean_t kernel_only = TRUE; + boolean_t trace_thread = FALSE; + boolean_t trace_all_threads = FALSE; + int thcount = 0; + char *filename; + int linenum; + task_t task; + thread_act_t th, top_act; + int user_frame; + int frame_count; + jmp_buf_t *prev; + jmp_buf_t db_jmp_buf; + queue_entry_t act_list; + + if (!db_trace_symbols_found) + db_find_trace_symbols(); + + { + register char *cp = modif; + register char c; + + while ((c = *cp++) != 0) { + if (c == 't') + trace_thread = TRUE; + if (c == 'T') { + trace_all_threads = TRUE; + trace_thread = TRUE; + } + if (c == 'u') + kernel_only = FALSE; + } + } + + if (trace_all_threads) { + if (!have_addr && !trace_thread) { + have_addr = TRUE; + trace_thread = TRUE; + act_list = &(current_task()->thr_acts); + addr = (db_expr_t) queue_first(act_list); + } else if (trace_thread) { + if (have_addr) { + if (!db_check_act_address_valid((thread_act_t)addr)) { + if (db_lookup_task((task_t)addr) == -1) + return; + act_list = &(((task_t)addr)->thr_acts); + addr = (db_expr_t) queue_first(act_list); + } else { + act_list = &(((thread_act_t)addr)->task->thr_acts); + thcount = db_lookup_task_act(((thread_act_t)addr)->task, + (thread_act_t)addr); + } + } else { + th = db_default_act; + if (th == THR_ACT_NULL) + th = current_act(); + if (th == THR_ACT_NULL) { + db_printf("no active thr_act\n"); + return; + } + have_addr = TRUE; + act_list = &th->task->thr_acts; + addr = (db_expr_t) queue_first(act_list); + } + } + } + + if (count == -1) + count = 65535; + + next_thread: + top_act = THR_ACT_NULL; + + user_frame = 0; + frame_count = count; + + if (!have_addr && !trace_thread) { + frame = (struct i386_frame *)ddb_regs.ebp; + callpc = (db_addr_t)ddb_regs.eip; + th = current_act(); + task = (th != THR_ACT_NULL)? th->task: TASK_NULL; + } else if (trace_thread) { + if (have_addr) { + th = (thread_act_t) addr; + if (!db_check_act_address_valid(th)) + return; + } else { + th = db_default_act; + if (th == THR_ACT_NULL) + th = current_act(); + if (th == THR_ACT_NULL) { + db_printf("no active thread\n"); + return; + } + } + if (trace_all_threads) + db_printf("---------- Thread 0x%x (#%d of %d) ----------\n", + addr, thcount, th->task->thr_act_count); + + next_activation: + user_frame = 0; + + task = th->task; + if (th == current_act()) { + frame = (struct i386_frame *)ddb_regs.ebp; + callpc = (db_addr_t)ddb_regs.eip; + } else { + if (th->mact.pcb == 0) { + db_printf("thread has no pcb\n"); + return; + } + if (!th->thread) { + register struct i386_saved_state *iss = + &th->mact.pcb->iss; + + db_printf("thread has no shuttle\n"); +#if 0 + frame = (struct i386_frame *) (iss->ebp); + callpc = (db_addr_t) (iss->eip); +#else + goto thread_done; +#endif + } + else if ((th->thread->state & TH_STACK_HANDOFF) || + th->thread->kernel_stack == 0) { + register struct i386_saved_state *iss = + &th->mact.pcb->iss; + + db_printf("Continuation "); + db_task_printsym((db_expr_t)th->thread->continuation, + DB_STGY_PROC, task); + db_printf("\n"); + frame = (struct i386_frame *) (iss->ebp); + callpc = (db_addr_t) (iss->eip); + } else { + int cpu; + + for (cpu = 0; cpu < NCPUS; cpu++) { + if (machine_slot[cpu].running == TRUE && + cpu_data[cpu].active_thread == th->thread && + saved_state[cpu]) { + break; + } + } + if (top_act != THR_ACT_NULL) { + /* + * Trying to get the backtrace of an activation + * which is not the top_most one in the RPC chain: + * use the activation's pcb. + */ + register struct i386_saved_state *iss = + &th->mact.pcb->iss; + frame = (struct i386_frame *) (iss->ebp); + callpc = (db_addr_t) (iss->eip); + } else { + if (cpu == NCPUS) { + register struct i386_kernel_state *iks; + int r; + + iks = STACK_IKS(th->thread->kernel_stack); + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + frame = (struct i386_frame *) (iks->k_ebp); + callpc = (db_addr_t) (iks->k_eip); + } else { + /* + * The kernel stack has probably been + * paged out (swapped out activation). + */ + db_recover = prev; + if (r == 2) /* 'q' from db_more() */ + db_error(0); + db_printf("\n", + iks); + goto thread_done; + } + db_recover = prev; + } else { + db_printf(">>>>> active on cpu %d <<<<<\n", + cpu); + frame = (struct i386_frame *) + saved_state[cpu]->ebp; + callpc = (db_addr_t) saved_state[cpu]->eip; + } + } + } + } + } else { + frame = (struct i386_frame *)addr; + th = (db_default_act)? db_default_act: current_act(); + task = (th != THR_ACT_NULL)? th->task: TASK_NULL; + callpc = (db_addr_t)db_get_task_value((int)&frame->f_retaddr, + 4, + FALSE, + (user_frame) ? task : 0); + } + + if (!INKERNELSTACK((unsigned)frame, th)) { + db_printf(">>>>> user space <<<<<\n"); + if (kernel_only) + goto thread_done; + user_frame++; + } else if (INKSERVER(callpc) && INKSERVER(frame)) { + db_printf(">>>>> INKserver space <<<<<\n"); + } + + lastframe = 0; + lastcallpc = (db_addr_t) 0; + while (frame_count-- && frame != 0) { + int narg; + char * name; + db_expr_t offset; + db_addr_t call_func = 0; + int r; + + db_symbol_values(NULL, + db_search_task_symbol_and_line( + callpc, + DB_STGY_XTRN, + &offset, + &filename, + &linenum, + (user_frame) ? task : 0, + &narg), + &name, (db_expr_t *)&call_func); + if (user_frame == 0) { + if (call_func == db_user_trap_symbol_value || + call_func == db_kernel_trap_symbol_value) { + frame_type = TRAP; + narg = 1; + } else if (call_func == db_interrupt_symbol_value) { + frame_type = INTERRUPT; + goto next_frame; + } else if (call_func == db_syscall_symbol_value) { + frame_type = SYSCALL; + goto next_frame; + } else { + frame_type = 0; + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + if (narg < 0) + narg = db_numargs(frame, + (user_frame) ? task : 0); + db_recover = prev; + } else { + db_recover = prev; + goto thread_done; + } + } + } else { + frame_type = 0; + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + if (narg < 0) + narg = db_numargs(frame, + (user_frame) ? task : 0); + db_recover = prev; + } else { + db_recover = prev; + goto thread_done; + } + } + + if (name == 0 || offset > db_maxoff) { + db_printf("0x%x 0x%x(", frame, callpc); + offset = 0; + } else + db_printf("0x%x %s(", frame, name); + + argp = &frame->f_arg0; + while (narg > 0) { + int value; + + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + value = db_get_task_value((int)argp, + 4, + FALSE, + (user_frame) ? task : 0); + } else { + db_recover = prev; + if (r == 2) /* 'q' from db_more() */ + db_error(0); + db_printf("... )"); + if (offset) + db_printf("+%x", offset); + if (filename) { + db_printf(" [%s", filename); + if (linenum > 0) + db_printf(":%d", linenum); + db_printf("]"); + } + db_printf("\n"); + goto thread_done; + } + db_recover = prev; + db_printf("%x", value); + argp++; + if (--narg != 0) + db_printf(","); + } + if (narg < 0) + db_printf("..."); + db_printf(")"); + if (offset) { + db_printf("+%x", offset); + } + if (filename) { + db_printf(" [%s", filename); + if (linenum > 0) + db_printf(":%d", linenum); + db_printf("]"); + } + db_printf("\n"); + + next_frame: + lastcallpc = callpc; + db_nextframe(&lastframe, &frame, &callpc, frame_type, + (user_frame) ? th : THR_ACT_NULL); + + if (frame == 0) { + if (th->lower != THR_ACT_NULL) { + if (top_act == THR_ACT_NULL) + top_act = th; + th = th->lower; + db_printf(">>>>> next activation 0x%x ($task%d.%d) <<<<<\n", + th, + db_lookup_task(th->task), + db_lookup_task_act(th->task, th)); + goto next_activation; + } + /* end of chain */ + break; + } + if (!INKERNELSTACK(lastframe, th) || + !INKERNELSTACK((unsigned)frame, th)) + user_frame++; + if (user_frame == 1) { + db_printf(">>>>> user space <<<<<\n"); + if (kernel_only) + break; + } else if ((!INKSERVER(lastframe) || !INKSERVER(lastcallpc)) && + (INKSERVER(callpc) && INKSERVER(frame))) { + db_printf(">>>>> inkserver space <<<<<\n"); + } + if (frame <= lastframe) { + if ((INKERNELSTACK(lastframe, th) && + !INKERNELSTACK(frame, th)) || + (INKSERVER(lastframe) ^ INKSERVER(frame))) + continue; + db_printf("Bad frame pointer: 0x%x\n", frame); + break; + } + } + + thread_done: + if (trace_all_threads) { + if (top_act != THR_ACT_NULL) + th = top_act; + th = (thread_act_t) queue_next(&th->thr_acts); + if (! queue_end(act_list, (queue_entry_t) th)) { + db_printf("\n"); + addr = (db_expr_t) th; + thcount++; + goto next_thread; + + } + } +} diff --git a/osfmk/i386/eflags.h b/osfmk/i386/eflags.h new file mode 100644 index 000000000..bb5d56d4a --- /dev/null +++ b/osfmk/i386/eflags.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _I386_EFLAGS_H_ +#define _I386_EFLAGS_H_ + +/* + * i386 flags register + */ + +#ifndef EFL_CF /* FIXME - this is pulled from mach/i386/eflags.h */ +#define EFL_CF 0x00000001 /* carry */ +#define EFL_PF 0x00000004 /* parity of low 8 bits */ +#define EFL_AF 0x00000010 /* carry out of bit 3 */ +#define EFL_ZF 0x00000040 /* zero */ +#define EFL_SF 0x00000080 /* sign */ +#define EFL_TF 0x00000100 /* trace trap */ +#define EFL_IF 0x00000200 /* interrupt enable */ +#define EFL_DF 0x00000400 /* direction */ +#define EFL_OF 0x00000800 /* overflow */ +#define EFL_IOPL 0x00003000 /* IO privilege level: */ +#define EFL_IOPL_KERNEL 0x00000000 /* kernel */ +#define EFL_IOPL_USER 0x00003000 /* user */ +#define EFL_NT 0x00004000 /* nested task */ +#define EFL_RF 0x00010000 /* resume without tracing */ +#define EFL_VM 0x00020000 /* virtual 8086 mode */ +#define EFL_AC 0x00040000 /* alignment check */ +#define EFL_VIF 0x00080000 /* virtual interrupt flag */ +#define EFL_VIP 0x00100000 /* virtual interrupt pending */ +#define EFL_ID 0x00200000 /* cpuID instruction */ +#endif + +#define EFL_USER_SET (EFL_IF) +#define EFL_USER_CLEAR (EFL_IOPL|EFL_NT|EFL_RF) + +#endif /* _I386_EFLAGS_H_ */ diff --git a/osfmk/i386/endian.h b/osfmk/i386/endian.h new file mode 100644 index 000000000..55de05e4d --- /dev/null +++ b/osfmk/i386/endian.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _MACHINE_ENDIAN_H_ +#define _MACHINE_ENDIAN_H_ + +/* + * Definitions for byte order, + * according to byte significance from low address to high. + */ +#define LITTLE_ENDIAN 1234 /* least-significant byte first (vax) */ +#define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ +#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp) */ + +#define BYTE_ORDER LITTLE_ENDIAN /* byte order on i386 */ +#define ENDIAN LITTLE + +/* + * Macros for network/external number representation conversion. + */ + +unsigned short ntohs(unsigned short), htons(unsigned short); +unsigned long ntohl(unsigned long), htonl(unsigned long); + +/* + * Use GNUC support to inline the byteswappers. + */ + +extern __inline__ +unsigned short +ntohs(unsigned short w_int) +{ + register unsigned short w = w_int; + __asm__ volatile("xchgb %h1,%b1" : "=q" (w) : "0" (w)); + return (w); /* zero-extend for compat */ +} + +#define htons ntohs + +extern __inline__ +unsigned long +ntohl(register unsigned long value) +{ + register unsigned long l = value; + __asm__ volatile("bswap %0" : "=r" (l) : "0" (l)); + return l; +} + +#define htonl ntohl + +#define NTOHL(x) (x) = ntohl((unsigned long)x) +#define NTOHS(x) (x) = ntohs((unsigned short)x) +#define HTONL(x) (x) = htonl((unsigned long)x) +#define HTONS(x) (x) = htons((unsigned short)x) + +#endif diff --git a/osfmk/i386/exec.h b/osfmk/i386/exec.h new file mode 100644 index 000000000..72526d180 --- /dev/null +++ b/osfmk/i386/exec.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * exec stucture in an a.out file derived from FSF's + * a.out.gnu.h file. + */ + +#ifndef _EXEC_ +#define _EXEC_ 1 + +/* + * Header prepended to each a.out file. + */ +struct exec +{ +#ifdef sun + unsigned short a_machtype; /* machine type */ + unsigned short a_info; /* Use macros N_MAGIC, etc for access */ +#else /* sun */ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ +#endif /* sun */ + unsigned long a_text; /* length of text, in bytes */ + unsigned long a_data; /* length of data, in bytes */ + unsigned long a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned long a_syms; /* length of symbol table data in file, in bytes */ + unsigned long a_entry; /* start address */ + unsigned long a_trsize; /* length of relocation info for text, in bytes */ + unsigned long a_drsize; /* length of relocation info for data, in bytes */ +}; + +/* Code indicating object file or impure executable. */ +#define OMAGIC 0407 +/* Code indicating pure executable. */ +#define NMAGIC 0410 +/* Code indicating demand-paged executable. */ +#define ZMAGIC 0413 + +#ifdef sun +/* Sun machine types */ + +#define M_OLDSUN2 0 /* old sun-2 executable files */ +#define M_68010 1 /* runs on either 68010 or 68020 */ +#define M_68020 2 /* runs only on 68020 */ +#endif /* sun */ + +#endif /* _EXEC_ */ diff --git a/osfmk/i386/flipc_page.h b/osfmk/i386/flipc_page.h new file mode 100644 index 000000000..542c8a446 --- /dev/null +++ b/osfmk/i386/flipc_page.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include + +/* + * Machine specific defines to allow flipc to work with pages. + * Included from flipc_usermsg.c only. + */ +#define FLIPC_PAGESIZE I386_PGBYTES +#define FLIPC_PAGERND_FN i386_round_page +#define FLIPC_BTOP i386_btop diff --git a/osfmk/i386/fpu.c b/osfmk/i386/fpu.c new file mode 100644 index 000000000..0bce98884 --- /dev/null +++ b/osfmk/i386/fpu.c @@ -0,0 +1,763 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1992-1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if 0 +#include +extern int curr_ipl; +#define ASSERT_IPL(L) \ +{ \ + if (curr_ipl != L) { \ + printf("IPL is %d, expected %d\n", curr_ipl, L); \ + panic("fpu: wrong ipl"); \ + } \ +} +#else +#define ASSERT_IPL(L) +#endif + +int fp_kind = FP_387; /* 80387 present */ +zone_t ifps_zone; /* zone for FPU save area */ + +#if NCPUS == 1 +volatile thread_act_t fp_act = THR_ACT_NULL; + /* thread whose state is in FPU */ + /* always THR_ACT_NULL if emulating FPU */ +volatile thread_act_t fp_intr_act = THR_ACT_NULL; + + +#define clear_fpu() \ + { \ + set_ts(); \ + fp_act = THR_ACT_NULL; \ + } + +#else /* NCPUS > 1 */ +#define clear_fpu() \ + { \ + set_ts(); \ + } + +#endif + +/* Forward */ + +extern void fpinit(void); +extern void fp_save( + thread_act_t thr_act); +extern void fp_load( + thread_act_t thr_act); + +/* + * Look for FPU and initialize it. + * Called on each CPU. + */ +void +init_fpu(void) +{ + unsigned short status, control; + + /* + * Check for FPU by initializing it, + * then trying to read the correct bit patterns from + * the control and status registers. + */ + set_cr0(get_cr0() & ~(CR0_EM|CR0_TS)); /* allow use of FPU */ + + fninit(); + status = fnstsw(); + fnstcw(&control); + + if ((status & 0xff) == 0 && + (control & 0x103f) == 0x3f) + { +#if 0 + /* + * We have a FPU of some sort. + * Compare -infinity against +infinity + * to check whether we have a 287 or a 387. + */ + volatile double fp_infinity, fp_one, fp_zero; + fp_one = 1.0; + fp_zero = 0.0; + fp_infinity = fp_one / fp_zero; + if (fp_infinity == -fp_infinity) { + /* + * We have an 80287. + */ + fp_kind = FP_287; + __asm__ volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */ + } + else +#endif + { + /* + * We have a 387. + */ + fp_kind = FP_387; + } + /* + * Trap wait instructions. Turn off FPU for now. + */ + set_cr0(get_cr0() | CR0_TS | CR0_MP); + } + else + { + /* + * NO FPU. + */ + fp_kind = FP_NO; + set_cr0(get_cr0() | CR0_EM); + } +} + +/* + * Initialize FP handling. + */ +void +fpu_module_init(void) +{ + ifps_zone = zinit(sizeof(struct i386_fpsave_state), + THREAD_MAX * sizeof(struct i386_fpsave_state), + THREAD_CHUNK * sizeof(struct i386_fpsave_state), + "i386 fpsave state"); +} + +/* + * Free a FPU save area. + * Called only when thread terminating - no locking necessary. + */ +void +fp_free(fps) + struct i386_fpsave_state *fps; +{ +ASSERT_IPL(SPL0); +#if NCPUS == 1 + if ((fp_act != THR_ACT_NULL) && (fp_act->mact.pcb->ims.ifps == fps)) { + /* + * Make sure we don't get FPU interrupts later for + * this thread + */ + fwait(); + + /* Mark it free and disable access */ + clear_fpu(); + } +#endif /* NCPUS == 1 */ + zfree(ifps_zone, (vm_offset_t) fps); +} + +/* + * Set the floating-point state for a thread. + * If the thread is not the current thread, it is + * not running (held). Locking needed against + * concurrent fpu_set_state or fpu_get_state. + */ +kern_return_t +fpu_set_state( + thread_act_t thr_act, + struct i386_float_state *state) +{ + register pcb_t pcb; + register struct i386_fpsave_state *ifps; + register struct i386_fpsave_state *new_ifps; + +ASSERT_IPL(SPL0); + if (fp_kind == FP_NO) + return KERN_FAILURE; + + assert(thr_act != THR_ACT_NULL); + pcb = thr_act->mact.pcb; + +#if NCPUS == 1 + + /* + * If this thread`s state is in the FPU, + * discard it; we are replacing the entire + * FPU state. + */ + if (fp_act == thr_act) { + fwait(); /* wait for possible interrupt */ + clear_fpu(); /* no state in FPU */ + } +#endif + + if (state->initialized == 0) { + /* + * new FPU state is 'invalid'. + * Deallocate the fp state if it exists. + */ + simple_lock(&pcb->lock); + ifps = pcb->ims.ifps; + pcb->ims.ifps = 0; + simple_unlock(&pcb->lock); + + if (ifps != 0) { + zfree(ifps_zone, (vm_offset_t) ifps); + } + } + else { + /* + * Valid state. Allocate the fp state if there is none. + */ + register struct i386_fp_save *user_fp_state; + register struct i386_fp_regs *user_fp_regs; + + user_fp_state = (struct i386_fp_save *) &state->hw_state[0]; + user_fp_regs = (struct i386_fp_regs *) + &state->hw_state[sizeof(struct i386_fp_save)]; + + new_ifps = 0; + Retry: + simple_lock(&pcb->lock); + ifps = pcb->ims.ifps; + if (ifps == 0) { + if (new_ifps == 0) { + simple_unlock(&pcb->lock); + new_ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + goto Retry; + } + ifps = new_ifps; + new_ifps = 0; + pcb->ims.ifps = ifps; + } + + /* + * Ensure that reserved parts of the environment are 0. + */ + bzero((char *)&ifps->fp_save_state, sizeof(struct i386_fp_save)); + + ifps->fp_save_state.fp_control = user_fp_state->fp_control; + ifps->fp_save_state.fp_status = user_fp_state->fp_status; + ifps->fp_save_state.fp_tag = user_fp_state->fp_tag; + ifps->fp_save_state.fp_eip = user_fp_state->fp_eip; + ifps->fp_save_state.fp_cs = user_fp_state->fp_cs; + ifps->fp_save_state.fp_opcode = user_fp_state->fp_opcode; + ifps->fp_save_state.fp_dp = user_fp_state->fp_dp; + ifps->fp_save_state.fp_ds = user_fp_state->fp_ds; + ifps->fp_regs = *user_fp_regs; + + simple_unlock(&pcb->lock); + if (new_ifps != 0) + zfree(ifps_zone, (vm_offset_t) ifps); + } + + return KERN_SUCCESS; +} + +/* + * Get the floating-point state for a thread. + * If the thread is not the current thread, it is + * not running (held). Locking needed against + * concurrent fpu_set_state or fpu_get_state. + */ +kern_return_t +fpu_get_state( + thread_act_t thr_act, + register struct i386_float_state *state) +{ + register pcb_t pcb; + register struct i386_fpsave_state *ifps; + +ASSERT_IPL(SPL0); + if (fp_kind == FP_NO) + return KERN_FAILURE; + + assert(thr_act != THR_ACT_NULL); + pcb = thr_act->mact.pcb; + + simple_lock(&pcb->lock); + ifps = pcb->ims.ifps; + if (ifps == 0) { + /* + * No valid floating-point state. + */ + simple_unlock(&pcb->lock); + bzero((char *)state, sizeof(struct i386_float_state)); + return KERN_SUCCESS; + } + + /* Make sure we`ve got the latest fp state info */ + /* If the live fpu state belongs to our target */ +#if NCPUS == 1 + if (thr_act == fp_act) +#else + if (thr_act == current_act()) +#endif + { + clear_ts(); + fp_save(thr_act); + clear_fpu(); + } + + state->fpkind = fp_kind; + state->exc_status = 0; + + { + register struct i386_fp_save *user_fp_state; + register struct i386_fp_regs *user_fp_regs; + + state->initialized = ifps->fp_valid; + + user_fp_state = (struct i386_fp_save *) &state->hw_state[0]; + user_fp_regs = (struct i386_fp_regs *) + &state->hw_state[sizeof(struct i386_fp_save)]; + + /* + * Ensure that reserved parts of the environment are 0. + */ + bzero((char *)user_fp_state, sizeof(struct i386_fp_save)); + + user_fp_state->fp_control = ifps->fp_save_state.fp_control; + user_fp_state->fp_status = ifps->fp_save_state.fp_status; + user_fp_state->fp_tag = ifps->fp_save_state.fp_tag; + user_fp_state->fp_eip = ifps->fp_save_state.fp_eip; + user_fp_state->fp_cs = ifps->fp_save_state.fp_cs; + user_fp_state->fp_opcode = ifps->fp_save_state.fp_opcode; + user_fp_state->fp_dp = ifps->fp_save_state.fp_dp; + user_fp_state->fp_ds = ifps->fp_save_state.fp_ds; + *user_fp_regs = ifps->fp_regs; + } + simple_unlock(&pcb->lock); + + return KERN_SUCCESS; +} + +/* + * Initialize FPU. + * + * Raise exceptions for: + * invalid operation + * divide by zero + * overflow + * + * Use 53-bit precision. + */ +void +fpinit(void) +{ + unsigned short control; + +ASSERT_IPL(SPL0); + clear_ts(); + fninit(); + fnstcw(&control); + control &= ~(FPC_PC|FPC_RC); /* Clear precision & rounding control */ + control |= (FPC_PC_53 | /* Set precision */ + FPC_RC_RN | /* round-to-nearest */ + FPC_ZE | /* Suppress zero-divide */ + FPC_OE | /* and overflow */ + FPC_UE | /* underflow */ + FPC_IE | /* Allow NaNQs and +-INF */ + FPC_DE | /* Allow denorms as operands */ + FPC_PE); /* No trap for precision loss */ + fldcw(control); +} + +/* + * Coprocessor not present. + */ + +void +fpnoextflt(void) +{ + /* + * Enable FPU use. + */ +ASSERT_IPL(SPL0); + clear_ts(); +#if NCPUS == 1 + + /* + * If this thread`s state is in the FPU, we are done. + */ + if (fp_act == current_act()) + return; + + /* Make sure we don't do fpsave() in fp_intr while doing fpsave() + * here if the current fpu instruction generates an error. + */ + fwait(); + /* + * If another thread`s state is in the FPU, save it. + */ + if (fp_act != THR_ACT_NULL) { + fp_save(fp_act); + } + + /* + * Give this thread the FPU. + */ + fp_act = current_act(); + +#endif /* NCPUS == 1 */ + + /* + * Load this thread`s state into the FPU. + */ + fp_load(current_act()); +} + +/* + * FPU overran end of segment. + * Re-initialize FPU. Floating point state is not valid. + */ + +void +fpextovrflt(void) +{ + register thread_act_t thr_act = current_act(); + register pcb_t pcb; + register struct i386_fpsave_state *ifps; + +#if NCPUS == 1 + + /* + * Is exception for the currently running thread? + */ + if (fp_act != thr_act) { + /* Uh oh... */ + panic("fpextovrflt"); + } +#endif + + /* + * This is a non-recoverable error. + * Invalidate the thread`s FPU state. + */ + pcb = thr_act->mact.pcb; + simple_lock(&pcb->lock); + ifps = pcb->ims.ifps; + pcb->ims.ifps = 0; + simple_unlock(&pcb->lock); + + /* + * Re-initialize the FPU. + */ + clear_ts(); + fninit(); + + /* + * And disable access. + */ + clear_fpu(); + + if (ifps) + zfree(ifps_zone, (vm_offset_t) ifps); + + /* + * Raise exception. + */ + i386_exception(EXC_BAD_ACCESS, VM_PROT_READ|VM_PROT_EXECUTE, 0); + /*NOTREACHED*/ +} + +/* + * FPU error. Called by AST. + */ + +void +fpexterrflt(void) +{ + register thread_act_t thr_act = current_act(); + +ASSERT_IPL(SPL0); +#if NCPUS == 1 + /* + * Since FPU errors only occur on ESC or WAIT instructions, + * the current thread should own the FPU. If it didn`t, + * we should have gotten the task-switched interrupt first. + */ + if (fp_act != THR_ACT_NULL) { + panic("fpexterrflt"); + return; + } + + /* + * Check if we got a context switch between the interrupt and the AST + * This can happen if the interrupt arrived after the FPU AST was + * checked. In this case, raise the exception in fp_load when this + * thread next time uses the FPU. Remember exception condition in + * fp_valid (extended boolean 2). + */ + if (fp_intr_act != thr_act) { + if (fp_intr_act == THR_ACT_NULL) { + panic("fpexterrflt: fp_intr_act == THR_ACT_NULL"); + return; + } + fp_intr_act->mact.pcb->ims.ifps->fp_valid = 2; + fp_intr_act = THR_ACT_NULL; + return; + } + fp_intr_act = THR_ACT_NULL; +#else /* NCPUS == 1 */ + /* + * Save the FPU state and turn off the FPU. + */ + fp_save(thr_act); +#endif /* NCPUS == 1 */ + + /* + * Raise FPU exception. + * Locking not needed on pcb->ims.ifps, + * since thread is running. + */ + i386_exception(EXC_ARITHMETIC, + EXC_I386_EXTERR, + thr_act->mact.pcb->ims.ifps->fp_save_state.fp_status); + /*NOTREACHED*/ +} + +/* + * Save FPU state. + * + * Locking not needed: + * . if called from fpu_get_state, pcb already locked. + * . if called from fpnoextflt or fp_intr, we are single-cpu + * . otherwise, thread is running. + */ + +void +fp_save( + thread_act_t thr_act) +{ + register pcb_t pcb = thr_act->mact.pcb; + register struct i386_fpsave_state *ifps = pcb->ims.ifps; + + if (ifps != 0 && !ifps->fp_valid) { + /* registers are in FPU */ + ifps->fp_valid = TRUE; + fnsave(&ifps->fp_save_state); + } +} + +/* + * Restore FPU state from PCB. + * + * Locking not needed; always called on the current thread. + */ + +void +fp_load( + thread_act_t thr_act) +{ + register pcb_t pcb = thr_act->mact.pcb; + register struct i386_fpsave_state *ifps; + +ASSERT_IPL(SPL0); + ifps = pcb->ims.ifps; + if (ifps == 0) { + ifps = (struct i386_fpsave_state *) zalloc(ifps_zone); + bzero((char *)ifps, sizeof *ifps); + pcb->ims.ifps = ifps; + fpinit(); +#if 1 +/* + * I'm not sure this is needed. Does the fpu regenerate the interrupt in + * frstor or not? Without this code we may miss some exceptions, with it + * we might send too many exceptions. + */ + } else if (ifps->fp_valid == 2) { + /* delayed exception pending */ + + ifps->fp_valid = TRUE; + clear_fpu(); + /* + * Raise FPU exception. + * Locking not needed on pcb->ims.ifps, + * since thread is running. + */ + i386_exception(EXC_ARITHMETIC, + EXC_I386_EXTERR, + thr_act->mact.pcb->ims.ifps->fp_save_state.fp_status); + /*NOTREACHED*/ +#endif + } else { + frstor(ifps->fp_save_state); + } + ifps->fp_valid = FALSE; /* in FPU */ +} + +/* + * Allocate and initialize FP state for current thread. + * Don't load state. + * + * Locking not needed; always called on the current thread. + */ +void +fp_state_alloc(void) +{ + pcb_t pcb = current_act()->mact.pcb; + struct i386_fpsave_state *ifps; + + ifps = (struct i386_fpsave_state *)zalloc(ifps_zone); + bzero((char *)ifps, sizeof *ifps); + pcb->ims.ifps = ifps; + + ifps->fp_valid = TRUE; + ifps->fp_save_state.fp_control = (0x037f + & ~(FPC_IM|FPC_ZM|FPC_OM|FPC_PC)) + | (FPC_PC_53|FPC_IC_AFF); + ifps->fp_save_state.fp_status = 0; + ifps->fp_save_state.fp_tag = 0xffff; /* all empty */ +} + + +/* + * fpflush(thread_act_t) + * Flush the current act's state, if needed + * (used by thread_terminate_self to ensure fp faults + * aren't satisfied by overly general trap code in the + * context of the reaper thread) + */ +void +fpflush(thread_act_t thr_act) +{ +#if NCPUS == 1 + if (fp_act && thr_act == fp_act) { + clear_ts(); + fwait(); + clear_fpu(); + } +#else + /* not needed on MP x86s; fp not lazily evaluated */ +#endif +} + + +/* + * Handle a coprocessor error interrupt on the AT386. + * This comes in on line 5 of the slave PIC at SPL1. + */ + +void +fpintr(void) +{ + spl_t s; + thread_act_t thr_act = current_act(); + +ASSERT_IPL(SPL1); + /* + * Turn off the extended 'busy' line. + */ + outb(0xf0, 0); + + /* + * Save the FPU context to the thread using it. + */ +#if NCPUS == 1 + if (fp_act == THR_ACT_NULL) { + printf("fpintr: FPU not belonging to anyone!\n"); + clear_ts(); + fninit(); + clear_fpu(); + return; + } + + if (fp_act != thr_act) { + /* + * FPU exception is for a different thread. + * When that thread again uses the FPU an exception will be + * raised in fp_load. Remember the condition in fp_valid (== 2). + */ + clear_ts(); + fp_save(fp_act); + fp_act->mact.pcb->ims.ifps->fp_valid = 2; + fninit(); + clear_fpu(); + /* leave fp_intr_act THR_ACT_NULL */ + return; + } + if (fp_intr_act != THR_ACT_NULL) + panic("fp_intr: already caught intr"); + fp_intr_act = thr_act; +#endif /* NCPUS == 1 */ + + clear_ts(); + fp_save(thr_act); + fninit(); + clear_fpu(); + + /* + * Since we are running on the interrupt stack, we must + * signal the thread to take the exception when we return + * to user mode. Use an AST to do this. + * + * Don`t set the thread`s AST field. If the thread is + * descheduled before it takes the AST, it will notice + * the FPU error when it reloads its FPU state. + */ + s = splsched(); + mp_disable_preemption(); + ast_on(AST_I386_FP); + mp_enable_preemption(); + splx(s); +} diff --git a/osfmk/i386/fpu.h b/osfmk/i386/fpu.h new file mode 100644 index 000000000..2b1f9acd7 --- /dev/null +++ b/osfmk/i386/fpu.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_FPU_H_ +#define _I386_FPU_H_ + +/* + * Macro definitions for routines to manipulate the + * floating-point processor. + */ + +#include +#include +#include +#include +#include +#include + +/* + * FPU instructions. + */ +#define fninit() \ + __asm__ volatile("fninit") + +#define fnstcw(control) \ + __asm__("fnstcw %0" : "=m" (*(unsigned short *)(control))) + +#define fldcw(control) \ + __asm__ volatile("fldcw %0" : : "m" (*(unsigned short *) &(control)) ) + +extern unsigned short fnstsw(void); + +extern __inline__ unsigned short fnstsw(void) +{ + unsigned short status; + __asm__ volatile("fnstsw %0" : "=ma" (status)); + return(status); +} + +#define fnclex() \ + __asm__ volatile("fnclex") + +#define fnsave(state) \ + __asm__ volatile("fnsave %0" : "=m" (*state)) + +#define frstor(state) \ + __asm__ volatile("frstor %0" : : "m" (state)) + +#define fwait() \ + __asm__("fwait"); + + +#define fpu_load_context(pcb) + +/* + * Save thread`s FPU context. + * If only one CPU, we just set the task-switched bit, + * to keep the new thread from using the coprocessor. + * If multiple CPUs, we save the entire state. + */ +#if NCPUS > 1 +#define fpu_save_context(thread) \ + { \ + register struct i386_fpsave_state *ifps; \ + ifps = (thread)->top_act->mact.pcb->ims.ifps; \ + if (ifps != 0 && !ifps->fp_valid) { \ + /* registers are in FPU - save to memory */ \ + ifps->fp_valid = TRUE; \ + fnsave(&ifps->fp_save_state); \ + } \ + set_ts(); \ + } + +#else /* NCPUS == 1 */ +#define fpu_save_context(thread) \ + { \ + set_ts(); \ + } + +#endif /* NCPUS == 1 */ + + +extern int fp_kind; + +extern void init_fpu(void); +extern void fpu_module_init(void); +extern void fp_free( + struct i386_fpsave_state * fps); +extern kern_return_t fpu_set_state( + thread_act_t thr_act, + struct i386_float_state * st); +extern kern_return_t fpu_get_state( + thread_act_t thr_act, + struct i386_float_state * st); +extern void fpnoextflt(void); +extern void fpextovrflt(void); +extern void fpexterrflt(void); +extern void fp_state_alloc(void); +extern void fpintr(void); +extern void fpflush(thread_act_t); + +#endif /* _I386_FPU_H_ */ diff --git a/osfmk/i386/gcc.s b/osfmk/i386/gcc.s new file mode 100644 index 000000000..6537364ce --- /dev/null +++ b/osfmk/i386/gcc.s @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +ENTRY(__divsi3) + movl 4(%esp), %eax + cdq + idivl 8(%esp), %eax + ret + +ENTRY(__udivsi3) + movl 4(%esp), %eax + xorl %edx, %edx + divl 8(%esp), %eax + ret + diff --git a/osfmk/i386/gdb_defs.h b/osfmk/i386/gdb_defs.h new file mode 100644 index 000000000..c8942d570 --- /dev/null +++ b/osfmk/i386/gdb_defs.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _I386_GDB_DEFS_H_ +#define _I386_GDB_DEFS_H_ + +/* + * GDB DEPENDENT DEFINITIONS + * + * The following definitions match data descriptions in the gdb source file + * gdb/include/AT386/tm.h. They cannot be independently modified. + */ + +typedef struct { + unsigned int eax; + unsigned int ecx; + unsigned int edx; + unsigned int ebx; + unsigned int esp; + unsigned int ebp; + unsigned int esi; + unsigned int edi; + unsigned int eip; + unsigned int efl; + unsigned int cs; + unsigned int ss; + unsigned int ds; + unsigned int es; + unsigned int fs; + unsigned int gs; + unsigned int reason; +} kgdb_regs_t; + +#define NUM_REGS 16 +#define REGISTER_BYTES (NUM_REGS * 4) + +#endif /* _I386_GDB_DEFS_H_ */ + diff --git a/osfmk/i386/gdt.c b/osfmk/i386/gdt.c new file mode 100644 index 000000000..8d9f57b57 --- /dev/null +++ b/osfmk/i386/gdt.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Global descriptor table. + */ +#include +#include +#include +#include +#include + +#ifdef MACH_BSD +extern int trap_unix_syscall(void), trap_mach25_syscall(void), + trap_machdep_syscall(void), syscall(void); +#endif + +struct fake_descriptor gdt[GDTSZ] = { +/* 0x000 */ { 0, 0, 0, 0 }, /* always NULL */ +/* 0x008 */ { LINEAR_KERNEL_ADDRESS + VM_MIN_ADDRESS, + (VM_MAX_KERNEL_ADDRESS-1-VM_MIN_KERNEL_ADDRESS)>>12, + SZ_32|SZ_G, + ACC_P|ACC_PL_K|ACC_CODE_R + }, /* kernel code */ +/* 0x010 */ { LINEAR_KERNEL_ADDRESS + VM_MIN_ADDRESS, + (VM_MAX_KERNEL_ADDRESS-1-VM_MIN_KERNEL_ADDRESS)>>12, + SZ_32|SZ_G, + ACC_P|ACC_PL_K|ACC_DATA_W + }, /* kernel data */ +/* 0x018 */ { LINEAR_KERNEL_ADDRESS + (unsigned int)ldt, + LDTSZ*sizeof(struct fake_descriptor)-1, + 0, + ACC_P|ACC_PL_K|ACC_LDT + }, /* local descriptor table */ +/* 0x020 */ { LINEAR_KERNEL_ADDRESS + (unsigned int)&ktss, + sizeof(struct i386_tss)-1, + 0, + ACC_P|ACC_PL_K|ACC_TSS + }, /* TSS for this processor */ +#ifdef MACH_BSD +/* 0x28 */ { (unsigned int) &trap_unix_syscall, + KERNEL_CS, + 0, /* no parameters */ + ACC_P|ACC_PL_U|ACC_CALL_GATE + }, +/* 0x30 */ { (unsigned int) &trap_mach25_syscall, + KERNEL_CS, + 0, /* no parameters */ + ACC_P|ACC_PL_U|ACC_CALL_GATE + }, +/* 0x38 */ { (unsigned int) &trap_machdep_syscall, + KERNEL_CS, + 0, /* no parameters */ + ACC_P|ACC_PL_U|ACC_CALL_GATE + }, +#else +/* 0x028 */ { 0, 0, 0, 0 }, /* per-thread LDT */ +/* 0x030 */ { 0, 0, 0, 0 }, /* per-thread TSS for IO bitmap */ +/* 0x038 */ { 0, 0, 0, 0 }, +#endif +/* 0x040 */ { 0, 0, 0, 0 }, +/* 0x048 */ { LINEAR_KERNEL_ADDRESS + (unsigned int)&cpu_data[0], + sizeof(cpu_data)-1, + SZ_32, + ACC_P|ACC_PL_K|ACC_DATA_W + }, /* per-CPU current thread address */ +#if MACH_KDB +/* 0x050 */ { LINEAR_KERNEL_ADDRESS + (unsigned int)&dbtss, + sizeof(struct i386_tss)-1, + 0, + ACC_P|ACC_PL_K|ACC_TSS + } /* TSS for this processor */ +#endif /* MACH_KDB */ +}; diff --git a/osfmk/i386/genassym.c b/osfmk/i386/genassym.c new file mode 100644 index 000000000..0acd35ea8 --- /dev/null +++ b/osfmk/i386/genassym.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include +#include +#include +#include +#include + +/* + * Pass field offsets to assembly code. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if NCPUS > 1 +#include +#endif + +extern void kernel_preempt_check(void); +cpu_data_t cpu_data[NCPUS]; + +/* + * genassym.c is used to produce an + * assembly file which, intermingled with unuseful assembly code, + * has all the necessary definitions emitted. This assembly file is + * then postprocessed with sed to extract only these definitions + * and thus the final assyms.s is created. + * + * This convoluted means is necessary since the structure alignment + * and packing may be different between the host machine and the + * target so we are forced into using the cross compiler to generate + * the values, but we cannot run anything on the target machine. + */ + +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE)0)->MEMBER) + +#if 0 +#define DECLARE(SYM,VAL) \ + __asm("#DEFINITION#\t.set\t" SYM ",\t%0" : : "n" ((u_int)(VAL))) +#else +#define DECLARE(SYM,VAL) \ + __asm("#DEFINITION##define " SYM "\t%0" : : "n" ((u_int)(VAL))) +#endif + +int main( + int argc, + char ** argv); + +int +main( + int argc, + char **argv) +{ + + DECLARE("AST_URGENT", AST_URGENT); + +#if MACH_LDEBUG + /* + * XXX + */ +#define SIMPLE_LOCK_TAG 0x5353 +#define MUTEX_TAG 0x4d4d + DECLARE("TH_MUTEX_COUNT", offsetof(thread_t, mutex_count)); + DECLARE("SIMPLE_LOCK_TAG", SIMPLE_LOCK_TAG); + DECLARE("MUTEX_TAG", MUTEX_TAG); +#endif /* MACH_LDEBUG */ + DECLARE("TH_RECOVER", offsetof(thread_t, recover)); + DECLARE("TH_CONTINUATION", offsetof(thread_t, continuation)); + DECLARE("TH_TOP_ACT", offsetof(thread_t, top_act)); + DECLARE("TH_KERNEL_STACK", offsetof(thread_t, kernel_stack)); + + DECLARE("TASK_EMUL", offsetof(task_t, eml_dispatch)); + DECLARE("TASK_MACH_EXC_PORT", + offsetof(task_t, exc_actions[EXC_MACH_SYSCALL].port)); + + /* These fields are being added on demand */ + DECLARE("ACT_MACH_EXC_PORT", + offsetof(thread_act_t, exc_actions[EXC_MACH_SYSCALL].port)); + + DECLARE("ACT_THREAD", offsetof(thread_act_t, thread)); + DECLARE("ACT_TASK", offsetof(thread_act_t, task)); + DECLARE("ACT_PCB", offsetof(thread_act_t, mact.pcb)); + DECLARE("ACT_KLOADED", offsetof(thread_act_t, kernel_loaded)); + DECLARE("ACT_KLOADING", offsetof(thread_act_t, kernel_loading)); + DECLARE("ACT_LOWER", offsetof(thread_act_t, lower)); + DECLARE("ACT_MAP", offsetof(thread_act_t, map)); + + DECLARE("MAP_PMAP", offsetof(vm_map_t, pmap)); + + /* XXX Until rpc buffers move from kernel stack to activation */ + DECLARE("RPC_CLIENT_BUF_SIZE", + 2 * RPC_KBUF_SIZE * sizeof(int) + + RPC_DESC_COUNT * sizeof(rpc_copy_state_data_t) + + 40 * sizeof(int)); + + DECLARE("HOST_NAME", offsetof(host_t, host_self)); + + DECLARE("DISP_MIN", offsetof(eml_dispatch_t, disp_min)); + DECLARE("DISP_COUNT", offsetof(eml_dispatch_t, disp_count)); + DECLARE("DISP_VECTOR", offsetof(eml_dispatch_t, disp_vector[0])); + +#define IKS ((size_t) (STACK_IKS(0))) + + DECLARE("KSS_EBX", IKS + offsetof(struct i386_kernel_state *, k_ebx)); + DECLARE("KSS_ESP", IKS + offsetof(struct i386_kernel_state *, k_esp)); + DECLARE("KSS_EBP", IKS + offsetof(struct i386_kernel_state *, k_ebp)); + DECLARE("KSS_EDI", IKS + offsetof(struct i386_kernel_state *, k_edi)); + DECLARE("KSS_ESI", IKS + offsetof(struct i386_kernel_state *, k_esi)); + DECLARE("KSS_EIP", IKS + offsetof(struct i386_kernel_state *, k_eip)); + + DECLARE("IKS_SIZE", sizeof(struct i386_kernel_state)); + DECLARE("IEL_SIZE", sizeof(struct i386_exception_link)); + + DECLARE("PCB_FPS", offsetof(pcb_t, ims.ifps)); + DECLARE("PCB_ISS", offsetof(pcb_t, iss)); + + DECLARE("FP_VALID", offsetof(struct i386_fpsave_state *,fp_valid)); + DECLARE("FP_SAVE_STATE", + offsetof(struct i386_fpsave_state *, fp_save_state)); + + DECLARE("R_CS", offsetof(struct i386_saved_state *, cs)); + DECLARE("R_SS", offsetof(struct i386_saved_state *, ss)); + DECLARE("R_UESP", offsetof(struct i386_saved_state *, uesp)); + DECLARE("R_EBP", offsetof(struct i386_saved_state *, ebp)); + DECLARE("R_EAX", offsetof(struct i386_saved_state *, eax)); + DECLARE("R_EBX", offsetof(struct i386_saved_state *, ebx)); + DECLARE("R_ECX", offsetof(struct i386_saved_state *, ecx)); + DECLARE("R_EDX", offsetof(struct i386_saved_state *, edx)); + DECLARE("R_ESI", offsetof(struct i386_saved_state *, esi)); + DECLARE("R_EDI", offsetof(struct i386_saved_state *, edi)); + DECLARE("R_TRAPNO", offsetof(struct i386_saved_state *, trapno)); + DECLARE("R_ERR", offsetof(struct i386_saved_state *, err)); + DECLARE("R_EFLAGS", offsetof(struct i386_saved_state *, efl)); + DECLARE("R_EIP", offsetof(struct i386_saved_state *, eip)); + DECLARE("R_CR2", offsetof(struct i386_saved_state *, cr2)); + DECLARE("ISS_SIZE", sizeof (struct i386_saved_state)); + + DECLARE("I_ECX", offsetof(struct i386_interrupt_state *, ecx)); + DECLARE("I_EIP", offsetof(struct i386_interrupt_state *, eip)); + DECLARE("I_CS", offsetof(struct i386_interrupt_state *, cs)); + DECLARE("I_EFL", offsetof(struct i386_interrupt_state *, efl)); + + DECLARE("NBPG", I386_PGBYTES); + DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS); + DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS); + DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS); + DECLARE("LINEAR_KERNELBASE", LINEAR_KERNEL_ADDRESS); + DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); + + DECLARE("PDESHIFT", PDESHIFT); + DECLARE("PTESHIFT", PTESHIFT); + DECLARE("PTEMASK", PTEMASK); + + DECLARE("PTE_PFN", INTEL_PTE_PFN); + DECLARE("PTE_V", INTEL_PTE_VALID); + DECLARE("PTE_W", INTEL_PTE_WRITE); + DECLARE("PTE_INVALID", ~INTEL_PTE_VALID); + + DECLARE("IDTSZ", IDTSZ); + DECLARE("GDTSZ", GDTSZ); + DECLARE("LDTSZ", LDTSZ); + + DECLARE("KERNEL_CS", KERNEL_CS); + DECLARE("KERNEL_DS", KERNEL_DS); + DECLARE("USER_CS", USER_CS); + DECLARE("USER_DS", USER_DS); + DECLARE("KERNEL_TSS", KERNEL_TSS); + DECLARE("KERNEL_LDT", KERNEL_LDT); +#if MACH_KDB + DECLARE("DEBUG_TSS", DEBUG_TSS); +#endif /* MACH_KDB */ + + DECLARE("CPU_DATA", CPU_DATA); + DECLARE("CPD_ACTIVE_THREAD", + offsetof(cpu_data_t *, active_thread)); +#if MACH_RT + DECLARE("CPD_PREEMPTION_LEVEL", + offsetof(cpu_data_t *, preemption_level)); +#endif /* MACH_RT */ + DECLARE("CPD_INTERRUPT_LEVEL", + offsetof(cpu_data_t *, interrupt_level)); + DECLARE("CPD_SIMPLE_LOCK_COUNT", + offsetof(cpu_data_t *,simple_lock_count)); + + DECLARE("PTES_PER_PAGE", NPTES); + DECLARE("INTEL_PTE_KERNEL", INTEL_PTE_VALID|INTEL_PTE_WRITE); + + DECLARE("KERNELBASEPDE", + (LINEAR_KERNEL_ADDRESS >> PDESHIFT) * + sizeof(pt_entry_t)); + + DECLARE("TSS_ESP0", offsetof(struct i386_tss *, esp0)); + DECLARE("TSS_SS0", offsetof(struct i386_tss *, ss0)); + DECLARE("TSS_LDT", offsetof(struct i386_tss *, ldt)); + DECLARE("TSS_PDBR", offsetof(struct i386_tss *, cr3)); + DECLARE("TSS_LINK", offsetof(struct i386_tss *, back_link)); + + DECLARE("K_TASK_GATE", ACC_P|ACC_PL_K|ACC_TASK_GATE); + DECLARE("K_TRAP_GATE", ACC_P|ACC_PL_K|ACC_TRAP_GATE); + DECLARE("U_TRAP_GATE", ACC_P|ACC_PL_U|ACC_TRAP_GATE); + DECLARE("K_INTR_GATE", ACC_P|ACC_PL_K|ACC_INTR_GATE); + DECLARE("K_TSS", ACC_P|ACC_PL_K|ACC_TSS); + + /* + * usimple_lock fields + */ + DECLARE("USL_INTERLOCK", offsetof(usimple_lock_t, interlock)); + + DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); +#if NCPUS > 1 + DECLARE("MP_GDT", offsetof(struct mp_desc_table *, gdt[0])); + DECLARE("MP_IDT", offsetof(struct mp_desc_table *, idt[0])); +#endif /* NCPUS > 1 */ +#if !STAT_TIME + DECLARE("LOW_BITS", offsetof(struct timer *, low_bits)); + DECLARE("HIGH_BITS", offsetof(struct timer *, high_bits)); + DECLARE("HIGH_BITS_CHECK", offsetof(struct timer *, high_bits_check)); + DECLARE("TIMER_HIGH_UNIT", TIMER_HIGH_UNIT); + DECLARE("TH_SYS_TIMER", offsetof(struct timer *, system_timer)); + DECLARE("TH_USER_TIMER", offsetof(struct timer *, user_timer)); +#endif + + return (0); +} + + +/* Dummy to keep linker quiet */ +void +kernel_preempt_check(void) +{ +} diff --git a/osfmk/i386/hardclock.c b/osfmk/i386/hardclock.c new file mode 100644 index 000000000..31121460f --- /dev/null +++ b/osfmk/i386/hardclock.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Clock interrupt. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#if MACH_MP_DEBUG +#include /* for HZ */ +#endif /* MACH_MP_DEBUG */ + +extern char return_to_iret[]; + +#if TIME_STAMP && NCPUS > 1 +extern unsigned time_stamp; +unsigned old_time_stamp, time_stamp_cum, nstamps; + +/* + * If H/W provides a counter, record number of ticks and cumulated + * time stamps to know timestamps rate. + * This should go away when ALARMCLOCKS installed + */ +#define time_stamp_stat() \ + if (my_cpu == 0) \ + if (!old_time_stamp) { \ + old_time_stamp = time_stamp; \ + nstamps = 0; \ + } else { \ + nstamps++; \ + time_stamp_cum = (time_stamp - old_time_stamp); \ + } +#else /* TIME_STAMP && AT386 && NCPUS > 1 */ +#define time_stamp_stat() +#endif /* TIME_STAMP && AT386 && NCPUS > 1 */ + +#if MACH_KPROF +int masked_pc[NCPUS]; +int missed_clock[NCPUS]; +int detect_lost_tick = 0; +#endif /* MACH_KPROF */ + +#if MACH_MP_DEBUG +int masked_state_cnt[NCPUS]; +int masked_state_max = 10*HZ; +#endif /* MACH_MP_DEBUG */ + +/* + * In the interest of a fast clock interrupt service path, + * this routine should be folded into assembly language with + * a direct interrupt vector on the i386. The "pit" interrupt + * should always call the rtclock_intr() routine on the master + * processor. The return value of the rtclock_intr() routine + * indicates whether HZ rate clock processing should be + * performed. (On the Sequent, all slave processors will + * run at HZ rate). For now, we'll leave this routine in C + * (with TIME_STAMP, MACH_MP_DEBUG and MACH_KPROF code this + * routine is way too large for assembler anyway). + */ + +#ifdef PARANOID_KDB +int paranoid_debugger = TRUE; +int paranoid_count = 1000; +int paranoid_current = 0; +int paranoid_cpu = 0; +#endif /* PARANOID_KDB */ + +void +hardclock(struct i386_interrupt_state *regs) /* saved registers */ +{ + int mycpu; + register unsigned pc; + register boolean_t usermode; + + mp_disable_preemption(); + mycpu = cpu_number(); + +#ifdef PARANOID_KDB + if (paranoid_cpu == mycpu && + paranoid_current++ >= paranoid_count) { + paranoid_current = 0; + if (paranoid_debugger) + Debugger("hardclock"); + } +#endif /* PARANOID_KDB */ + +#if 0 +#if MACH_MP_DEBUG + /* + * Increments counter of clock ticks handled under a masked state. + * Debugger() is called if masked state is kept during 1 sec. + * The counter is reset by splx() when ipl mask is set back to SPL0, + * and by spl0(). + */ + if (SPL_CMP_GT((old_ipl & 0xFF), SPL0)) { + if (masked_state_cnt[mycpu]++ >= masked_state_max) { + int max_save = masked_state_max; + + masked_state_cnt[mycpu] = 0; + masked_state_max = 0x7fffffff; + + if (ret_addr == return_to_iret) { + usermode = (regs->efl & EFL_VM) || + ((regs->cs & 0x03) != 0); + pc = (unsigned)regs->eip; + } else { + usermode = FALSE; + pc = (unsigned) + ((struct i386_interrupt_state *)&old_ipl)->eip; + } + printf("looping at high IPL, usermode=%d pc=0x%x\n", + usermode, pc); + Debugger(""); + + masked_state_cnt[mycpu] = 0; + masked_state_max = max_save; + } + } else + masked_state_cnt[mycpu] = 0; +#endif /* MACH_MP_DEBUG */ +#endif + +#if MACH_KPROF + /* + * If we were masked against the clock skip call + * to rtclock_intr(). When MACH_KPROF is set, the + * clock frequency of the master-cpu is confined + * to the HZ rate. + */ + if (SPL_CMP_LT(old_ipl & 0xFF, SPL7)) +#endif /* MACH_KPROF */ + /* + * The master processor executes the rtclock_intr() routine + * on every clock tick. The rtclock_intr() routine returns + * a zero value on a HZ tick boundary. + */ + if (mycpu == master_cpu) { + if (rtclock_intr() != 0) { + mp_enable_preemption(); + return; + } + } + + /* + * The following code is executed at HZ rate by all processors + * in the system. This implies that the clock rate on slave + * processors must be HZ rate. + */ + + time_stamp_stat(); + +#if 0 + if (ret_addr == return_to_iret) { + /* + * A kernel-loaded task executing within itself will look like + * "kernel mode", here. This is correct with syscalls + * implemented using migrating threads, because it means that + * the time spent in the server by a client thread will be + * treated as "system" time for the client thread (and nothing + * for the server). This conforms to the CPU reporting for an + * integrated kernel. + */ +#endif + usermode = (regs->efl & EFL_VM) || ((regs->cs & 0x03) != 0); + pc = (unsigned)regs->eip; +#if 0 + } else { + usermode = FALSE; + pc = (unsigned)((struct i386_interrupt_state *)&old_ipl)->eip; + } +#endif + +#if MACH_KPROF + /* + * If we were masked against the clock, just memorize pc + * and the fact that the clock interrupt is delayed + */ + if (SPL_CMP_GE((old_ipl & 0xFF), SPL7)) { + assert(!usermode); + if (missed_clock[mycpu]++ && detect_lost_tick > 1) + Debugger("Mach_KPROF"); + masked_pc[mycpu] = pc; + } else +#endif /* MACH_KPROF */ + + hertz_tick(usermode, pc); + +#if NCPUS >1 + /* + * Instead of having the master processor interrupt + * all active processors, each processor in turn interrupts + * the next active one. This avoids all slave processors + * accessing the same R/W data simultaneously. + */ + slave_clock(); +#endif /* NCPUS >1 && AT386 */ + + mp_enable_preemption(); +} + +#if MACH_KPROF +void +delayed_clock(void) +{ + int i; + int my_cpu; + + mp_disable_preemption(); + my_cpu = cpu_number(); + + if (missed_clock[my_cpu] > 1 && detect_lost_tick) + printf("hardclock: missed %d clock interrupt(s) at %x\n", + missed_clock[my_cpu]-1, masked_pc[my_cpu]); + if (my_cpu == master_cpu) { + i = rtclock_intr(); + assert(i == 0); + } + hertz_tick(0, masked_pc[my_cpu]); + missed_clock[my_cpu] = 0; + + mp_enable_preemption(); +} +#endif /* MACH_KPROF */ diff --git a/osfmk/i386/hardclock_entries.h b/osfmk/i386/hardclock_entries.h new file mode 100644 index 000000000..80001989d --- /dev/null +++ b/osfmk/i386/hardclock_entries.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:36 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:37 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.1 1994/09/23 01:54:13 ezf + * change marker to not FREE + * [1994/09/22 21:22:49 ezf] + * + * Revision 1.1.2.3 1993/09/17 21:35:16 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:28:26 robert] + * + * Revision 1.1.2.2 1993/08/09 19:39:51 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:44:52 dswartz] + * + * $EndLog$ + */ + +extern void hardclock(struct i386_interrupt_state *regs); +extern void delayed_clock(void); diff --git a/osfmk/i386/hi_res_clock.h b/osfmk/i386/hi_res_clock.h new file mode 100644 index 000000000..f9c006310 --- /dev/null +++ b/osfmk/i386/hi_res_clock.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _MACHINE_HI_RES_CLOCK_H_ +#define _MACHINE_HI_RES_CLOCK_H_ + +#include +#include + +extern vm_offset_t hi_res_clk_mmap( + dev_t dev, + vm_offset_t off, + int prot); + +extern void clock_thread_386( + int loop_iterations_per_tick, + int *high_res_clock); + +#endif /* _MACHINE_HI_RES_CLOCK_H_ */ diff --git a/osfmk/i386/hi_res_clock_map.c b/osfmk/i386/hi_res_clock_map.c new file mode 100644 index 000000000..66ca626ff --- /dev/null +++ b/osfmk/i386/hi_res_clock_map.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:36 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:37 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.1 1994/09/23 01:54:44 ezf + * change marker to not FREE + * [1994/09/22 21:23:10 ezf] + * + * Revision 1.1.2.2 1993/08/24 09:39:55 rod + * Created for iX86 common high resolution clock common code. CR #9400. + * [1993/08/17 11:26:08 rod] + * + * $EndLog$ + */ + +#include +#include + +extern int *high_res_clock; + +vm_offset_t +hi_res_clk_mmap( + dev_t dev, + vm_offset_t off, + int prot) +{ + if (prot & VM_PROT_WRITE) return (-1); + return (i386_btop(pmap_extract(pmap_kernel(), + (vm_offset_t) high_res_clock))); +} diff --git a/osfmk/i386/hw_lock_types.h b/osfmk/i386/hw_lock_types.h new file mode 100644 index 000000000..57ba52cbc --- /dev/null +++ b/osfmk/i386/hw_lock_types.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * Machine-dependent simple locks for the i386. + */ + +#ifndef _I386_HW_LOCK_TYPES_H_ +#define _I386_HW_LOCK_TYPES_H_ + +/* + * The "hardware lock". Low-level locking primitives that + * MUST be exported by machine-dependent code; this abstraction + * must provide atomic, non-blocking mutual exclusion that + * is invulnerable to uniprocessor or SMP races, interrupts, + * traps or any other events. + * + * hw_lock_data_t machine-specific lock data structure + * hw_lock_t pointer to hw_lock_data_t + * + * An implementation must export these data types and must + * also provide routines to manipulate them (see prototypes, + * below). These routines may be external, inlined, optimized, + * or whatever, based on the kernel configuration. In the event + * that the implementation wishes to define its own prototypes, + * macros, or inline functions, it may define LOCK_HW_PROTOS + * to disable the definitions below. + * + * Mach does not expect these locks to support statistics, + * debugging, tracing or any other complexity. In certain + * configurations, Mach will build other locking constructs + * on top of this one. A correctly functioning Mach port need + * only implement these locks to be successful. However, + * greater efficiency may be gained with additional machine- + * dependent optimizations for the locking constructs defined + * later in kern/lock.h.. + */ +typedef volatile char hw_lock_data_t; +typedef hw_lock_data_t *hw_lock_t; +#define hw_lock_addr(hwl) (&(hwl)) + + +#endif /* _I386_HW_LOCK_TYPES_H_ */ + diff --git a/osfmk/i386/i386_lock.s b/osfmk/i386/i386_lock.s new file mode 100644 index 000000000..db7420db5 --- /dev/null +++ b/osfmk/i386/i386_lock.s @@ -0,0 +1,814 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +#include +#include +#include +#include +#include +#include +#include + + +/* + * When performance isn't the only concern, it's + * nice to build stack frames... + */ +#define BUILD_STACK_FRAMES ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB) + +#if BUILD_STACK_FRAMES + +#define L_PC 4(%ebp) +#define L_ARG0 8(%ebp) +#define L_ARG1 12(%ebp) + +#define SWT_HI -4(%ebp) +#define SWT_LO -8(%ebp) +#define MISSED -12(%ebp) + +#else /* BUILD_STACK_FRAMES */ + +#undef FRAME +#undef EMARF +#define FRAME +#define EMARF +#define L_PC (%esp) +#define L_ARG0 4(%esp) +#define L_ARG1 8(%esp) + +#endif /* BUILD_STACK_FRAMES */ + + +#define M_ILK (%edx) +#define M_LOCKED 1(%edx) +#define M_WAITERS 2(%edx) +#if MACH_LDEBUG +#define M_TYPE 4(%edx) +#define M_PC 8(%edx) +#define M_THREAD 12(%edx) +#endif /* MACH_LDEBUG */ + +#include +#if (NCPUS > 1) +#define CX(addr,reg) addr(,reg,4) +#else +#define CPU_NUMBER(reg) +#define CX(addr,reg) addr +#endif /* (NCPUS > 1) */ + +#if MACH_LDEBUG +/* + * Routines for general lock debugging. + */ +#define S_TYPE 4(%edx) +#define S_PC 8(%edx) +#define S_THREAD 12(%edx) +#define S_DURATIONH 16(%edx) +#define S_DURATIONL 20(%edx) + +/* + * Checks for expected lock types and calls "panic" on + * mismatch. Detects calls to Mutex functions with + * type simplelock and vice versa. + */ +#define CHECK_MUTEX_TYPE() \ + cmpl $MUTEX_TAG, M_TYPE ; \ + je 1f ; \ + pushl $2f ; \ + call EXT(panic) ; \ + hlt ; \ + .data ; \ +2: String "not a mutex!" ; \ + .text ; \ +1: + +#define CHECK_SIMPLE_LOCK_TYPE() \ + cmpl $SIMPLE_LOCK_TAG,S_TYPE ; \ + je 1f ; \ + pushl $2f ; \ + call EXT(panic) ; \ + hlt ; \ + .data ; \ +2: String "not a simple lock!" ; \ + .text ; \ +1: + +/* + * If one or more simplelocks are currently held by a thread, + * an attempt to acquire a mutex will cause this check to fail + * (since a mutex lock may context switch, holding a simplelock + * is not a good thing). + */ +#if 0 /*MACH_RT - 11/12/99 - lion@apple.com disable check for now*/ +#define CHECK_PREEMPTION_LEVEL() \ + movl $CPD_PREEMPTION_LEVEL,%eax ; \ + cmpl $0,%gs:(%eax) ; \ + je 1f ; \ + pushl $2f ; \ + call EXT(panic) ; \ + hlt ; \ + .data ; \ +2: String "preemption_level != 0!" ; \ + .text ; \ +1: +#else /* MACH_RT */ +#define CHECK_PREEMPTION_LEVEL() +#endif /* MACH_RT */ + +#define CHECK_NO_SIMPLELOCKS() \ + movl $CPD_SIMPLE_LOCK_COUNT,%eax ; \ + cmpl $0,%gs:(%eax) ; \ + je 1f ; \ + pushl $2f ; \ + call EXT(panic) ; \ + hlt ; \ + .data ; \ +2: String "simple_locks_held!" ; \ + .text ; \ +1: + +/* + * Verifies return to the correct thread in "unlock" situations. + */ +#define CHECK_THREAD(thd) \ + movl $CPD_ACTIVE_THREAD,%eax ; \ + movl %gs:(%eax),%ecx ; \ + testl %ecx,%ecx ; \ + je 1f ; \ + cmpl %ecx,thd ; \ + je 1f ; \ + pushl $2f ; \ + call EXT(panic) ; \ + hlt ; \ + .data ; \ +2: String "wrong thread!" ; \ + .text ; \ +1: + +#define CHECK_MYLOCK(thd) \ + movl $CPD_ACTIVE_THREAD,%eax ; \ + movl %gs:(%eax),%ecx ; \ + testl %ecx,%ecx ; \ + je 1f ; \ + cmpl %ecx,thd ; \ + jne 1f ; \ + pushl $2f ; \ + call EXT(panic) ; \ + hlt ; \ + .data ; \ +2: String "mylock attempt!" ; \ + .text ; \ +1: + +#define METER_SIMPLE_LOCK_LOCK(reg) \ + pushl reg ; \ + call EXT(meter_simple_lock) ; \ + popl reg + +#define METER_SIMPLE_LOCK_UNLOCK(reg) \ + pushl reg ; \ + call EXT(meter_simple_unlock) ; \ + popl reg + +#else /* MACH_LDEBUG */ +#define CHECK_MUTEX_TYPE() +#define CHECK_SIMPLE_LOCK_TYPE +#define CHECK_THREAD(thd) +#define CHECK_PREEMPTION_LEVEL() +#define CHECK_NO_SIMPLELOCKS() +#define CHECK_MYLOCK(thd) +#define METER_SIMPLE_LOCK_LOCK(reg) +#define METER_SIMPLE_LOCK_UNLOCK(reg) +#endif /* MACH_LDEBUG */ + + +/* + * void hw_lock_init(hw_lock_t) + * + * Initialize a hardware lock. + */ +ENTRY(hw_lock_init) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + xorl %eax,%eax + movb %al,0(%edx) /* clear the lock */ + EMARF + ret + +/* + * void hw_lock_lock(hw_lock_t) + * + * Acquire lock, spinning until it becomes available. + * MACH_RT: also return with preemption disabled. + */ +ENTRY(hw_lock_lock) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + +1: DISABLE_PREEMPTION(%eax) + movb $1,%cl + xchgb 0(%edx),%cl /* try to acquire the HW lock */ + testb %cl,%cl /* success? */ + jne 3f + + EMARF /* if yes, then nothing left to do */ + ret + +3: ENABLE_PREEMPTION(%eax) /* no reason we can't be preemptable now */ + + movb $1,%cl +2: testb %cl,0(%edx) /* spin checking lock value in cache */ + jne 2b /* non-zero means locked, keep spinning */ + jmp 1b /* zero means unlocked, try to grab it */ + +/* + * void hw_lock_unlock(hw_lock_t) + * + * Unconditionally release lock. + * MACH_RT: release preemption level. + */ +ENTRY(hw_lock_unlock) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + xorl %eax,%eax + xchgb 0(%edx),%al /* clear the lock... a mov instruction */ + /* ...might be cheaper and less paranoid */ + ENABLE_PREEMPTION(%eax) + EMARF + ret + +/* + * unsigned int hw_lock_try(hw_lock_t) + * MACH_RT: returns with preemption disabled on success. + */ +ENTRY(hw_lock_try) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + + DISABLE_PREEMPTION(%eax) + movb $1,%cl + xchgb 0(%edx),%cl /* try to acquire the HW lock */ + testb %cl,%cl /* success? */ + jne 1f /* if yes, let the caller know */ + + movl $1,%eax /* success */ + EMARF + ret + +1: ENABLE_PREEMPTION(%eax) /* failure: release preemption... */ + xorl %eax,%eax /* ...and return failure */ + EMARF + ret + +/* + * unsigned int hw_lock_held(hw_lock_t) + * MACH_RT: doesn't change preemption state. + * N.B. Racy, of course. + */ +ENTRY(hw_lock_held) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + + movb $1,%cl + testb %cl,0(%edx) /* check lock value */ + jne 1f /* non-zero means locked */ + xorl %eax,%eax /* tell caller: lock wasn't locked */ + EMARF + ret + +1: movl $1,%eax /* tell caller: lock was locked */ + EMARF + ret + + + +#if 0 + + +ENTRY(_usimple_lock_init) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + xorl %eax,%eax + movb %al,USL_INTERLOCK(%edx) /* unlock the HW lock */ + EMARF + ret + +ENTRY(_simple_lock) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + + CHECK_SIMPLE_LOCK_TYPE() + + DISABLE_PREEMPTION(%eax) + +sl_get_hw: + movb $1,%cl + xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */ + testb %cl,%cl /* did we succeed? */ + +#if MACH_LDEBUG + je 5f + CHECK_MYLOCK(S_THREAD) + jmp sl_get_hw +5: +#else /* MACH_LDEBUG */ + jne sl_get_hw /* no, try again */ +#endif /* MACH_LDEBUG */ + +#if MACH_LDEBUG + movl L_PC,%ecx + movl %ecx,S_PC + movl $CPD_ACTIVE_THREAD,%eax + movl %gs:(%eax),%ecx + movl %ecx,S_THREAD + incl CX(EXT(simple_lock_count),%eax) +#if 0 + METER_SIMPLE_LOCK_LOCK(%edx) +#endif +#if NCPUS == 1 + pushf + pushl %edx + cli + call EXT(lock_stack_push) + popl %edx + popfl +#endif /* NCPUS == 1 */ +#endif /* MACH_LDEBUG */ + + EMARF + ret + +ENTRY(_simple_lock_try) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + + CHECK_SIMPLE_LOCK_TYPE() + + DISABLE_PREEMPTION(%eax) + + movb $1,%cl + xchgb USL_INTERLOCK(%edx),%cl /* try to acquire the HW lock */ + testb %cl,%cl /* did we succeed? */ + jne 1f /* no, return failure */ + +#if MACH_LDEBUG + movl L_PC,%ecx + movl %ecx,S_PC + movl $CPD_ACTIVE_THREAD,%eax + movl %gs:(%eax),%ecx + movl %ecx,S_THREAD + incl CX(EXT(simple_lock_count),%eax) +#if 0 + METER_SIMPLE_LOCK_LOCK(%edx) +#endif +#if NCPUS == 1 + pushf + pushl %edx + cli + call EXT(lock_stack_push) + popl %edx + popfl +#endif /* NCPUS == 1 */ +#endif /* MACH_LDEBUG */ + + movl $1,%eax /* return success */ + + EMARF + ret + +1: + ENABLE_PREEMPTION(%eax) + + xorl %eax,%eax /* and return failure */ + + EMARF + ret + +ENTRY(_simple_unlock) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + + CHECK_SIMPLE_LOCK_TYPE() + CHECK_THREAD(S_THREAD) + +#if MACH_LDEBUG + xorl %eax,%eax + movl %eax,S_THREAD /* disown thread */ + MP_DISABLE_PREEMPTION(%eax) + CPU_NUMBER(%eax) + decl CX(EXT(simple_lock_count),%eax) + MP_ENABLE_PREEMPTION(%eax) +#if 0 + METER_SIMPLE_LOCK_UNLOCK(%edx) +#endif +#if NCPUS == 1 + pushf + pushl %edx + cli + call EXT(lock_stack_pop) + popl %edx + popfl +#endif /* NCPUS == 1 */ +#endif /* MACH_LDEBUG */ + + xorb %cl,%cl + xchgb USL_INTERLOCK(%edx),%cl /* unlock the HW lock */ + + ENABLE_PREEMPTION(%eax) + + EMARF + ret + +#endif /* 0 */ + + +ENTRY(mutex_init) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + xorl %eax,%eax + movb %al,M_ILK /* clear interlock */ + movb %al,M_LOCKED /* clear locked flag */ + movw %ax,M_WAITERS /* init waiter count */ + +#if MACH_LDEBUG + movl $MUTEX_TAG,M_TYPE /* set lock type */ + movl %eax,M_PC /* init caller pc */ + movl %eax,M_THREAD /* and owning thread */ +#endif +#if ETAP_LOCK_TRACE + movl L_ARG1,%ecx /* fetch event type */ + pushl %ecx /* push event type */ + pushl %edx /* push mutex address */ + call EXT(etap_mutex_init) /* init ETAP data */ + addl $8,%esp +#endif /* ETAP_LOCK_TRACE */ + + EMARF + ret + +ENTRY(_mutex_lock) + FRAME + +#if ETAP_LOCK_TRACE + subl $12,%esp /* make room for locals */ + movl $0,SWT_HI /* set wait time to zero (HI) */ + movl $0,SWT_LO /* set wait time to zero (LO) */ + movl $0,MISSED /* clear local miss marker */ +#endif /* ETAP_LOCK_TRACE */ + + movl L_ARG0,%edx /* fetch lock pointer */ + + CHECK_MUTEX_TYPE() + CHECK_NO_SIMPLELOCKS() + CHECK_PREEMPTION_LEVEL() + +ml_retry: + DISABLE_PREEMPTION(%eax) + +ml_get_hw: + movb $1,%cl + xchgb %cl,M_ILK + testb %cl,%cl /* did we succeed? */ + jne ml_get_hw /* no, try again */ + +/* +/ Beware of a race between this code path and the inline ASM fast-path locking +/ sequence which attempts to lock a mutex by directly setting the locked flag +/ +*/ + + movb $1,%cl + xchgb %cl,M_LOCKED /* try to set locked flag */ + testb %cl,%cl /* is the mutex locked? */ + jne ml_fail /* yes, we lose */ + +#if MACH_LDEBUG + movl L_PC,%ecx + movl %ecx,M_PC + movl $CPD_ACTIVE_THREAD,%eax + movl %gs:(%eax),%ecx + movl %ecx,M_THREAD + testl %ecx,%ecx + je 3f + incl TH_MUTEX_COUNT(%ecx) +3: +#endif + + xorb %cl,%cl + xchgb %cl,M_ILK + + ENABLE_PREEMPTION(%eax) + +#if ETAP_LOCK_TRACE + movl L_PC,%eax /* fetch pc */ + pushl SWT_LO /* push wait time (low) */ + pushl SWT_HI /* push wait time (high) */ + pushl %eax /* push pc */ + pushl %edx /* push mutex address */ + call EXT(etap_mutex_hold) /* collect hold timestamp */ + addl $16+12,%esp /* clean up stack, adjusting for locals */ +#endif /* ETAP_LOCK_TRACE */ + + EMARF + ret + +ml_fail: +#if ETAP_LOCK_TRACE + cmp $0,MISSED /* did we already take a wait timestamp? */ + jne ml_block /* yup. carry-on */ + pushl %edx /* push mutex address */ + call EXT(etap_mutex_miss) /* get wait timestamp */ + movl %eax,SWT_HI /* set wait time (high word) */ + movl %edx,SWT_LO /* set wait time (low word) */ + popl %edx /* clean up stack */ + movl $1,MISSED /* mark wait timestamp as taken */ +#endif /* ETAP_LOCK_TRACE */ + +ml_block: + CHECK_MYLOCK(M_THREAD) + pushl %edx /* push mutex address */ + call EXT(mutex_lock_wait) /* wait for the lock */ + addl $4,%esp + movl L_ARG0,%edx /* refetch lock pointer */ + jmp ml_retry /* and try again */ + +ENTRY(_mutex_try) + FRAME + +#if ETAP_LOCK_TRACE + subl $8,%esp /* make room for locals */ + movl $0,SWT_HI /* set wait time to zero (HI) */ + movl $0,SWT_LO /* set wait time to zero (LO) */ +#endif /* ETAP_LOCK_TRACE */ + + movl L_ARG0,%edx /* fetch lock pointer */ + + CHECK_MUTEX_TYPE() + CHECK_NO_SIMPLELOCKS() + + xorl %eax,%eax + movb $1,%al /* locked value for mutex */ + xchgb %al,M_LOCKED /* swap locked values */ + xorb $1,%al /* generate return value */ + +#if MACH_LDEBUG || ETAP_LOCK_TRACE + testl %eax,%eax /* did we succeed? */ + je 2f /* no, skip */ +#endif + +#if MACH_LDEBUG + movl L_PC,%ecx + movl %ecx,M_PC + movl $CPD_ACTIVE_THREAD,%ecx + movl %gs:(%ecx),%ecx + movl %ecx,M_THREAD + testl %ecx,%ecx + je 1f + incl TH_MUTEX_COUNT(%ecx) +1: +#endif + +#if ETAP_LOCK_TRACE + movl L_PC,%eax /* fetch pc */ + pushl SWT_LO /* push wait time (low) */ + pushl SWT_HI /* push wait time (high) */ + pushl %eax /* push pc */ + pushl %edx /* push mutex address */ + call EXT(etap_mutex_hold) /* get start hold timestamp */ + addl $16,%esp /* clean up stack, adjusting for locals */ + movl $1,%eax /* put back successful return value */ +#endif /* ETAP_LOCK_TRACE */ + +#if MACH_LDEBUG || ETAP_LOCK_TRACE +2: +#if ETAP_LOCK_TRACE + addl $8,%esp /* pop stack claimed on entry */ +#endif +#endif + + EMARF + ret + +ENTRY(mutex_unlock) + FRAME + movl L_ARG0,%edx /* fetch lock pointer */ + +#if ETAP_LOCK_TRACE + pushl %edx /* push mutex address */ + call EXT(etap_mutex_unlock) /* collect ETAP data */ + popl %edx /* restore mutex address */ +#endif /* ETAP_LOCK_TRACE */ + + CHECK_MUTEX_TYPE() + CHECK_THREAD(M_THREAD) + + DISABLE_PREEMPTION(%eax) + +mu_get_hw: + movb $1,%cl + xchgb %cl,M_ILK + testb %cl,%cl /* did we succeed? */ + jne mu_get_hw /* no, try again */ + + cmpw $0,M_WAITERS /* are there any waiters? */ + jne mu_wakeup /* yes, more work to do */ + +mu_doit: +#if MACH_LDEBUG + xorl %eax,%eax + movl %eax,M_THREAD /* disown thread */ + movl $CPD_ACTIVE_THREAD,%eax + movl %gs:(%eax),%ecx + testl %ecx,%ecx + je 0f + decl TH_MUTEX_COUNT(%ecx) +0: +#endif + + xorb %cl,%cl + xchgb %cl,M_LOCKED /* unlock the mutex */ + + xorb %cl,%cl + xchgb %cl,M_ILK + + ENABLE_PREEMPTION(%eax) + + EMARF + ret + +mu_wakeup: + pushl %edx /* push mutex address */ + call EXT(mutex_unlock_wakeup)/* yes, wake a thread */ + addl $4,%esp + movl L_ARG0,%edx /* refetch lock pointer */ + jmp mu_doit + +ENTRY(interlock_unlock) + FRAME + movl L_ARG0,%edx + + xorb %cl,%cl + xchgb %cl,M_ILK + + ENABLE_PREEMPTION(%eax) + + EMARF + ret + + +ENTRY(_disable_preemption) +#if MACH_RT + _DISABLE_PREEMPTION(%eax) +#endif /* MACH_RT */ + ret + +ENTRY(_enable_preemption) +#if MACH_RT +#if MACH_ASSERT + movl $CPD_PREEMPTION_LEVEL,%eax + cmpl $0,%gs:(%eax) + jg 1f + pushl %gs:(%eax) + pushl $2f + call EXT(panic) + hlt + .data +2: String "_enable_preemption: preemption_level(%d) < 0!" + .text +1: +#endif /* MACH_ASSERT */ + _ENABLE_PREEMPTION(%eax) +#endif /* MACH_RT */ + ret + +ENTRY(_enable_preemption_no_check) +#if MACH_RT +#if MACH_ASSERT + movl $CPD_PREEMPTION_LEVEL,%eax + cmpl $0,%gs:(%eax) + jg 1f + pushl $2f + call EXT(panic) + hlt + .data +2: String "_enable_preemption_no_check: preemption_level <= 0!" + .text +1: +#endif /* MACH_ASSERT */ + _ENABLE_PREEMPTION_NO_CHECK(%eax) +#endif /* MACH_RT */ + ret + + +ENTRY(_mp_disable_preemption) +#if MACH_RT && NCPUS > 1 + _DISABLE_PREEMPTION(%eax) +#endif /* MACH_RT && NCPUS > 1*/ + ret + +ENTRY(_mp_enable_preemption) +#if MACH_RT && NCPUS > 1 +#if MACH_ASSERT + movl $CPD_PREEMPTION_LEVEL,%eax + cmpl $0,%gs:(%eax) + jg 1f + pushl %gs:(%eax) + pushl $2f + call EXT(panic) + hlt + .data +2: String "_mp_enable_preemption: preemption_level (%d) <= 0!" + .text +1: +#endif /* MACH_ASSERT */ + _ENABLE_PREEMPTION(%eax) +#endif /* MACH_RT && NCPUS > 1 */ + ret + +ENTRY(_mp_enable_preemption_no_check) +#if MACH_RT && NCPUS > 1 +#if MACH_ASSERT + movl $CPD_PREEMPTION_LEVEL,%eax + cmpl $0,%gs:(%eax) + jg 1f + pushl $2f + call EXT(panic) + hlt + .data +2: String "_mp_enable_preemption_no_check: preemption_level <= 0!" + .text +1: +#endif /* MACH_ASSERT */ + _ENABLE_PREEMPTION_NO_CHECK(%eax) +#endif /* MACH_RT && NCPUS > 1 */ + ret + + +ENTRY(i_bit_set) + movl S_ARG0,%edx + movl S_ARG1,%eax + lock + bts %dl,(%eax) + ret + +ENTRY(i_bit_clear) + movl S_ARG0,%edx + movl S_ARG1,%eax + lock + btr %dl,(%eax) + ret + +ENTRY(bit_lock) + movl S_ARG0,%ecx + movl S_ARG1,%eax +1: + lock + bts %ecx,(%eax) + jb 1b + ret + +ENTRY(bit_lock_try) + movl S_ARG0,%ecx + movl S_ARG1,%eax + lock + bts %ecx,(%eax) + jb bit_lock_failed + ret /* %eax better not be null ! */ +bit_lock_failed: + xorl %eax,%eax + ret + +ENTRY(bit_unlock) + movl S_ARG0,%ecx + movl S_ARG1,%eax + lock + btr %ecx,(%eax) + ret diff --git a/osfmk/i386/idt.s b/osfmk/i386/idt.s new file mode 100644 index 000000000..e68a8f4fd --- /dev/null +++ b/osfmk/i386/idt.s @@ -0,0 +1,409 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +#include +#include +#include + +/* + * Interrupt descriptor table and code vectors for it. + */ +#define IDT_BASE_ENTRY(vec,seg,type) \ + .data ;\ + .long vec ;\ + .word seg ;\ + .byte 0 ;\ + .byte type ;\ + .text + +#define IDT_ENTRY(vec,type) IDT_BASE_ENTRY(vec,KERNEL_CS,type) + +/* + * No error code. Clear error code and push trap number. + */ +#define EXCEPTION(n,name) \ + IDT_ENTRY(EXT(name),K_TRAP_GATE);\ +Entry(name) ;\ + pushl $0 ;\ + pushl $(n) ;\ + jmp EXT(alltraps) + +/* + * Interrupt from user. Clear error code and push trap number. + */ +#define EXCEP_USR(n,name) \ + IDT_ENTRY(EXT(name),U_TRAP_GATE);\ +Entry(name) ;\ + pushl $0 ;\ + pushl $(n) ;\ + jmp EXT(alltraps) + +/* + * Special interrupt code. + */ +#define EXCEP_SPC(n,name) \ + IDT_ENTRY(EXT(name),K_TRAP_GATE) + +/* + * Special interrupt code from user. + */ +#define EXCEP_SPC_USR(n,name) \ + IDT_ENTRY(EXT(name),U_TRAP_GATE) + +/* + * Extra-special interrupt code. Note that no offset may be + * specified in a task gate descriptor, so name is ignored. + */ +#define EXCEP_TASK(n,name) \ + IDT_BASE_ENTRY(0,DEBUG_TSS,K_TASK_GATE) + +/* + * Error code has been pushed. Push trap number. + */ +#define EXCEP_ERR(n,name) \ + IDT_ENTRY(EXT(name),K_TRAP_GATE);\ +Entry(name) ;\ + pushl $(n) ;\ + jmp EXT(alltraps) + +/* + * Interrupt. + */ +#define INTERRUPT(n) \ + IDT_ENTRY(0f,K_INTR_GATE) ;\ +0: ;\ + pushl %eax ;\ + movl $(n),%eax ;\ + jmp EXT(all_intrs) + + .data +Entry(idt) + .text + +EXCEPTION(0x00,t_zero_div) +EXCEP_SPC(0x01,t_debug) +INTERRUPT(0x02) /* NMI */ +EXCEP_USR(0x03,t_int3) +EXCEP_USR(0x04,t_into) +EXCEP_USR(0x05,t_bounds) +EXCEPTION(0x06,t_invop) +EXCEPTION(0x07,t_nofpu) +#if MACH_KDB +EXCEP_TASK(0x08,db_task_dbl_fault) +#else +EXCEPTION(0x08,a_dbl_fault) +#endif +EXCEPTION(0x09,a_fpu_over) +EXCEPTION(0x0a,a_inv_tss) +EXCEP_SPC(0x0b,t_segnp) +#if MACH_KDB +EXCEP_TASK(0x0c,db_task_stk_fault) +#else +EXCEP_ERR(0x0c,t_stack_fault) +#endif +EXCEP_SPC(0x0d,t_gen_prot) +EXCEP_SPC(0x0e,t_page_fault) +EXCEPTION(0x0f,t_trap_0f) +EXCEPTION(0x10,t_fpu_err) +EXCEPTION(0x11,t_trap_11) +EXCEPTION(0x12,t_trap_12) +EXCEPTION(0x13,t_trap_13) +EXCEPTION(0x14,t_trap_14) +EXCEPTION(0x15,t_trap_15) +EXCEPTION(0x16,t_trap_16) +EXCEPTION(0x17,t_trap_17) +EXCEPTION(0x18,t_trap_18) +EXCEPTION(0x19,t_trap_19) +EXCEPTION(0x1a,t_trap_1a) +EXCEPTION(0x1b,t_trap_1b) +EXCEPTION(0x1c,t_trap_1c) +EXCEPTION(0x1d,t_trap_1d) +EXCEPTION(0x1e,t_trap_1e) +EXCEPTION(0x1f,t_trap_1f) + +INTERRUPT(0x20) +INTERRUPT(0x21) +INTERRUPT(0x22) +INTERRUPT(0x23) +INTERRUPT(0x24) +INTERRUPT(0x25) +INTERRUPT(0x26) +INTERRUPT(0x27) +INTERRUPT(0x28) +INTERRUPT(0x29) +INTERRUPT(0x2a) +INTERRUPT(0x2b) +INTERRUPT(0x2c) +INTERRUPT(0x2d) +INTERRUPT(0x2e) +INTERRUPT(0x2f) + +INTERRUPT(0x30) +INTERRUPT(0x31) +INTERRUPT(0x32) +INTERRUPT(0x33) +INTERRUPT(0x34) +INTERRUPT(0x35) +INTERRUPT(0x36) +INTERRUPT(0x37) +INTERRUPT(0x38) +INTERRUPT(0x39) +INTERRUPT(0x3a) +INTERRUPT(0x3b) +INTERRUPT(0x3c) +INTERRUPT(0x3d) +INTERRUPT(0x3e) +INTERRUPT(0x3f) + +INTERRUPT(0x40) +INTERRUPT(0x41) +INTERRUPT(0x42) +INTERRUPT(0x43) +INTERRUPT(0x44) +INTERRUPT(0x45) +INTERRUPT(0x46) +INTERRUPT(0x47) +INTERRUPT(0x48) +INTERRUPT(0x49) +INTERRUPT(0x4a) +INTERRUPT(0x4b) +INTERRUPT(0x4c) +INTERRUPT(0x4d) +INTERRUPT(0x4e) +INTERRUPT(0x4f) + +INTERRUPT(0x50) +INTERRUPT(0x51) +INTERRUPT(0x52) +INTERRUPT(0x53) +INTERRUPT(0x54) +INTERRUPT(0x55) +INTERRUPT(0x56) +INTERRUPT(0x57) +INTERRUPT(0x58) +INTERRUPT(0x59) +INTERRUPT(0x5a) +INTERRUPT(0x5b) +INTERRUPT(0x5c) +INTERRUPT(0x5d) +INTERRUPT(0x5e) +INTERRUPT(0x5f) + +INTERRUPT(0x60) +INTERRUPT(0x61) +INTERRUPT(0x62) +INTERRUPT(0x63) +INTERRUPT(0x64) +INTERRUPT(0x65) +INTERRUPT(0x66) +INTERRUPT(0x67) +INTERRUPT(0x68) +INTERRUPT(0x69) +INTERRUPT(0x6a) +INTERRUPT(0x6b) +INTERRUPT(0x6c) +INTERRUPT(0x6d) +INTERRUPT(0x6e) +INTERRUPT(0x6f) + +INTERRUPT(0x70) +INTERRUPT(0x71) +INTERRUPT(0x72) +INTERRUPT(0x73) +INTERRUPT(0x74) +INTERRUPT(0x75) +INTERRUPT(0x76) +INTERRUPT(0x77) +INTERRUPT(0x78) +INTERRUPT(0x79) +INTERRUPT(0x7a) +INTERRUPT(0x7b) +INTERRUPT(0x7c) +INTERRUPT(0x7d) +INTERRUPT(0x7e) +INTERRUPT(0x7f) + +EXCEP_SPC_USR(0x80,syscall_int80) +INTERRUPT(0x81) +INTERRUPT(0x82) +INTERRUPT(0x83) +INTERRUPT(0x84) +INTERRUPT(0x85) +INTERRUPT(0x86) +INTERRUPT(0x87) +INTERRUPT(0x88) +INTERRUPT(0x89) +INTERRUPT(0x8a) +INTERRUPT(0x8b) +INTERRUPT(0x8c) +INTERRUPT(0x8d) +INTERRUPT(0x8e) +INTERRUPT(0x8f) + +INTERRUPT(0x90) +INTERRUPT(0x91) +INTERRUPT(0x92) +INTERRUPT(0x93) +INTERRUPT(0x94) +INTERRUPT(0x95) +INTERRUPT(0x96) +INTERRUPT(0x97) +INTERRUPT(0x98) +INTERRUPT(0x99) +INTERRUPT(0x9a) +INTERRUPT(0x9b) +INTERRUPT(0x9c) +INTERRUPT(0x9d) +INTERRUPT(0x9e) +INTERRUPT(0x9f) + +INTERRUPT(0xa0) +INTERRUPT(0xa1) +INTERRUPT(0xa2) +INTERRUPT(0xa3) +INTERRUPT(0xa4) +INTERRUPT(0xa5) +INTERRUPT(0xa6) +INTERRUPT(0xa7) +INTERRUPT(0xa8) +INTERRUPT(0xa9) +INTERRUPT(0xaa) +INTERRUPT(0xab) +INTERRUPT(0xac) +INTERRUPT(0xad) +INTERRUPT(0xae) +INTERRUPT(0xaf) + +INTERRUPT(0xb0) +INTERRUPT(0xb1) +INTERRUPT(0xb2) +INTERRUPT(0xb3) +INTERRUPT(0xb4) +INTERRUPT(0xb5) +INTERRUPT(0xb6) +INTERRUPT(0xb7) +INTERRUPT(0xb8) +INTERRUPT(0xb9) +INTERRUPT(0xba) +INTERRUPT(0xbb) +INTERRUPT(0xbc) +INTERRUPT(0xbd) +INTERRUPT(0xbe) +INTERRUPT(0xbf) + +INTERRUPT(0xc0) +INTERRUPT(0xc1) +INTERRUPT(0xc2) +INTERRUPT(0xc3) +INTERRUPT(0xc4) +INTERRUPT(0xc5) +INTERRUPT(0xc6) +INTERRUPT(0xc7) +INTERRUPT(0xc8) +INTERRUPT(0xc9) +INTERRUPT(0xca) +INTERRUPT(0xcb) +INTERRUPT(0xcc) +INTERRUPT(0xcd) +INTERRUPT(0xce) +INTERRUPT(0xcf) + +INTERRUPT(0xd0) +INTERRUPT(0xd1) +INTERRUPT(0xd2) +INTERRUPT(0xd3) +INTERRUPT(0xd4) +INTERRUPT(0xd5) +INTERRUPT(0xd6) +INTERRUPT(0xd7) +INTERRUPT(0xd8) +INTERRUPT(0xd9) +INTERRUPT(0xda) +INTERRUPT(0xdb) +INTERRUPT(0xdc) +INTERRUPT(0xdd) +INTERRUPT(0xde) +INTERRUPT(0xdf) + +INTERRUPT(0xe0) +INTERRUPT(0xe1) +INTERRUPT(0xe2) +INTERRUPT(0xe3) +INTERRUPT(0xe4) +INTERRUPT(0xe5) +INTERRUPT(0xe6) +INTERRUPT(0xe7) +INTERRUPT(0xe8) +INTERRUPT(0xe9) +INTERRUPT(0xea) +INTERRUPT(0xeb) +INTERRUPT(0xec) +INTERRUPT(0xed) +INTERRUPT(0xee) +INTERRUPT(0xef) + +INTERRUPT(0xf0) +INTERRUPT(0xf1) +INTERRUPT(0xf2) +INTERRUPT(0xf3) +INTERRUPT(0xf4) +INTERRUPT(0xf5) +INTERRUPT(0xf6) +INTERRUPT(0xf7) +INTERRUPT(0xf8) +INTERRUPT(0xf9) +INTERRUPT(0xfa) +INTERRUPT(0xfb) +INTERRUPT(0xfc) +INTERRUPT(0xfd) +INTERRUPT(0xfe) +EXCEPTION(0xff,t_preempt) + diff --git a/osfmk/i386/intel_read_fault.h b/osfmk/i386/intel_read_fault.h new file mode 100644 index 000000000..fbecc7966 --- /dev/null +++ b/osfmk/i386/intel_read_fault.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _INTEL_H_ +#define _INTEL_H_ + +#include +#include + +/* + * Exported functions + */ +extern kern_return_t intel_read_fault( + vm_map_t map, + vm_offset_t vaddr); + +#endif /* _INTEL_H_ */ diff --git a/osfmk/i386/io_emulate.c b/osfmk/i386/io_emulate.c new file mode 100644 index 000000000..0c955e130 --- /dev/null +++ b/osfmk/i386/io_emulate.c @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +int +emulate_io( + struct i386_saved_state *regs, + int opcode, + int io_port) +{ +#if 1 + /* At the moment, we are not allowing I/O emulation + * + * FIXME - this should probably change due to + * the Window Server's need to map I/O ports into its space. + */ + + return EM_IO_ERROR; +#else + thread_t thread = current_thread(); + at386_io_lock_state(); + + if (iopl_emulate(regs, opcode, io_port)) + return EM_IO_DONE; + + if (iopb_check_mapping(thread, iopl_device)) + return EM_IO_ERROR; + + /* + * Check for send rights to the IOPL device port. + */ + if (iopl_device_port == IP_NULL) + return EM_IO_ERROR; + { + ipc_space_t space = current_space(); + mach_port_name_t name; + ipc_entry_t entry; + boolean_t has_rights = FALSE; + ipc_entry_bits_t *capability; + + is_write_lock(space); + assert(space->is_active); + + if (ipc_right_reverse(space, (ipc_object_t) iopl_device_port, + &name, &entry, &capability)) { + /* iopl_device_port is locked and active */ + if (capability[space->server_id] & MACH_PORT_TYPE_SEND) + has_rights = TRUE; + ip_unlock(iopl_device_port); + } + + is_write_unlock(space); + if (!has_rights) { + return EM_IO_ERROR; + } + } + + /* + * Map the IOPL port set into the thread. + */ + + if (i386_io_port_add(thread, iopl_device) + != KERN_SUCCESS) + return EM_IO_ERROR; + + /* + * Make the thread use its IO_TSS to get the IO permissions; + * it may not have had one before this. + */ + act_machine_switch_pcb(thread->top_act); + + return EM_IO_RETRY; +#endif +} diff --git a/osfmk/i386/io_emulate.h b/osfmk/i386/io_emulate.h new file mode 100644 index 000000000..239fd081c --- /dev/null +++ b/osfmk/i386/io_emulate.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_IO_EMULATE_H_ +#define _I386_IO_EMULATE_H_ + +/* + * Return codes from IO emulation. + */ +extern int emulate_io( + struct i386_saved_state *regs, + int opcode, + int io_port); + +#define EM_IO_DONE 0 /* IO instruction executed, proceed */ +#define EM_IO_RETRY 1 /* IO port mapped, retry instruction */ +#define EM_IO_ERROR 2 /* IO port not mapped */ + +#endif /* _I386_IO_EMULATE_H_ */ diff --git a/osfmk/i386/io_map.c b/osfmk/i386/io_map.c new file mode 100644 index 000000000..b83e3eb02 --- /dev/null +++ b/osfmk/i386/io_map.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include +#include +#include + +extern vm_offset_t virtual_avail; + +/* + * Allocate and map memory for devices that may need to be mapped before + * Mach VM is running. + */ +vm_offset_t +io_map(phys_addr, size) + vm_offset_t phys_addr; + vm_size_t size; +{ + vm_offset_t start; + + if (kernel_map == VM_MAP_NULL) { + /* + * VM is not initialized. Grab memory. + */ + start = virtual_avail; + virtual_avail += round_page(size); + } + else { + (void) kmem_alloc_pageable(kernel_map, &start, round_page(size)); + } + (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size), + VM_PROT_READ|VM_PROT_WRITE); + return (start); +} diff --git a/osfmk/i386/io_map_entries.h b/osfmk/i386/io_map_entries.h new file mode 100644 index 000000000..37b6f6aa0 --- /dev/null +++ b/osfmk/i386/io_map_entries.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +extern vm_offset_t io_map( + vm_offset_t phys_addr, + vm_size_t size); + diff --git a/osfmk/i386/io_port.h b/osfmk/i386/io_port.h new file mode 100644 index 000000000..4d0684917 --- /dev/null +++ b/osfmk/i386/io_port.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_IO_PORT_H_ +#define _I386_IO_PORT_H_ +/* + * IO register definitions. + */ +typedef unsigned short io_reg_t; + +#define IO_REG_NULL (0x00ff) /* reserved */ + +/* + * Allocate and destroy io port sets for users to map into + * threads. + */ + +#if 0 +extern void io_port_create( + device_t device, + io_reg_t * portlist); +extern void io_port_destroy( + device_t device); + +#endif +#endif /* _I386_IO_PORT_H_ */ diff --git a/osfmk/i386/iopb.c b/osfmk/i386/iopb.c new file mode 100644 index 000000000..ec00e33f2 --- /dev/null +++ b/osfmk/i386/iopb.c @@ -0,0 +1,662 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Code to manipulate IO permission bitmaps. + */ + + + +#include +#include + +#include + +#include +#include +#include +#include +#include + + +#include +#include +#include +#include + +void +iopb_init(void) +{ +} + + +void +iopb_destroy(iopb_tss_t io_tss) +{ +} + +#if 0 /* Code removed until better solution comes on board */ +/* + * A set of ports for an IO device. + */ +struct io_port { + device_t device; /* Mach device */ + queue_chain_t dev_list; /* link in device list */ + queue_chain_t io_use_list; /* List of threads that use it */ + io_reg_t *io_port_list; /* list of IO ports that use it */ + /* list ends with IO_REG_NULL */ +}; +typedef struct io_port *io_port_t; + +/* + * Lookup table for device -> io_port mapping + * (a linked list - I don't expect too many) + */ +queue_head_t device_to_io_port_list; + +/* + * Cross-reference: + * all threads that have IO ports mapped + * all IO ports that have threads mapped + */ +struct io_use { + queue_chain_t psq; /* Links from port set */ + queue_chain_t tsq; /* links from tss */ + io_port_t ps; /* Port set */ + iopb_tss_t ts; /* Task segment */ +}; +typedef struct io_use *io_use_t; + +/* + * Big lock for the whole mess. + */ +decl_simple_lock_data(,iopb_lock) + +/* Forward */ + +extern void io_bitmap_init( + isa_iopb bp); +extern void io_bitmap_set( + isa_iopb bp, + io_reg_t *bit_list); +extern void io_bitmap_clear( + isa_iopb bp, + io_reg_t *bit_list); +extern io_port_t device_to_io_port_lookup( + device_t device); +extern void io_tss_init( + iopb_tss_t io_tss); + + +/* + * Initialize the package. + */ +void +iopb_init(void) +{ + queue_init(&device_to_io_port_list); + simple_lock_init(&iopb_lock, ETAP_IO_IOPB); +} + +/* + * Initialize bitmap (set all bits to OFF == 1) + */ +void +io_bitmap_init( + isa_iopb bp) +{ + register unsigned char *b; + register int s; + + s = sizeof(isa_iopb); + b = bp; + + do { + *b++ = ~0; + } while (--s >= 0); +} + +/* + * Set selected bits in bitmap to ON == 0 + */ +void +io_bitmap_set( + isa_iopb bp, + io_reg_t *bit_list) +{ + io_reg_t io_bit; + + while ((io_bit = *bit_list++) != IO_REG_NULL) { + bp[io_bit>>3] &= ~(1 << (io_bit & 0x7)); + } +} + +/* + * Set selected bits in bitmap to OFF == 1 + */ +void +io_bitmap_clear( + isa_iopb bp, + io_reg_t *bit_list) +{ + io_reg_t io_bit; + + while ((io_bit = *bit_list++) != IO_REG_NULL) { + bp[io_bit>>3] |= (1 << (io_bit & 0x7)); + } +} + +/* + * Lookup an io-port set by device + */ +io_port_t +device_to_io_port_lookup( + device_t device) +{ + register io_port_t io_port; + + queue_iterate(&device_to_io_port_list, io_port, io_port_t, dev_list) { + if (io_port->device == device) { + return io_port; + } + } + return 0; +} + +/* + * [exported] + * Create an io_port set + */ +void +io_port_create( + device_t device, + io_reg_t *io_port_list) +{ + register io_port_t io_port, old_io_port; + + io_port = (io_port_t) kalloc(sizeof(struct io_port)); + + simple_lock(&iopb_lock); + if (device_to_io_port_lookup(device) != 0) { + simple_unlock(&iopb_lock); + kfree((vm_offset_t) io_port, sizeof(struct io_port)); + return; + } + + io_port->device = device; + queue_init(&io_port->io_use_list); + io_port->io_port_list = io_port_list; + + /* + * Enter in lookup list. + */ + queue_enter(&device_to_io_port_list, io_port, io_port_t, dev_list); + + simple_unlock(&iopb_lock); +} + +/* + * [exported] + * Destroy an io port set, removing any IO mappings. + */ +void +io_port_destroy( + device_t device) +{ + io_port_t io_port; + io_use_t iu; + + simple_lock(&iopb_lock); + io_port = device_to_io_port_lookup(device); + if (io_port == 0) { + simple_unlock(&iopb_lock); + return; + } + + queue_iterate(&io_port->io_use_list, iu, io_use_t, psq) { + iopb_tss_t io_tss; + io_tss = iu->ts; + io_bitmap_clear(io_tss->bitmap, io_port->io_port_list); + queue_remove(&io_tss->io_port_list, iu, io_use_t, tsq); + } + queue_remove(&device_to_io_port_list, io_port, io_port_t, dev_list); + simple_unlock(&iopb_lock); + + while (!queue_empty(&io_port->io_use_list)) { + iu = (io_use_t) queue_first(&io_port->io_use_list); + queue_remove(&io_port->io_use_list, iu, io_use_t, psq); + kfree((vm_offset_t)iu, sizeof(struct io_use)); + } + + kfree((vm_offset_t)io_port, sizeof(struct io_port)); +} + +/* + * Initialize an IO TSS. + */ +void +io_tss_init( + iopb_tss_t io_tss) +{ + vm_offset_t addr = (vm_offset_t) io_tss; + vm_size_t size = (char *)&io_tss->barrier - (char *)io_tss; + + bzero((char *)&io_tss->tss, sizeof(struct i386_tss)); + io_tss->tss.io_bit_map_offset + = (char *)&io_tss->bitmap - (char *)io_tss; + io_tss->tss.ss0 = KERNEL_DS; + io_bitmap_init(io_tss->bitmap); + io_tss->barrier = ~0; + queue_init(&io_tss->io_port_list); + addr += LINEAR_KERNEL_ADDRESS; + io_tss->iopb_desc[0] = ((size-1) & 0xffff) + | ((addr & 0xffff) << 16); + io_tss->iopb_desc[1] = ((addr & 0x00ff0000) >> 16) + | ((ACC_TSS|ACC_PL_K|ACC_P) << 8) + | ((size-1) & 0x000f0000) + | (addr & 0xff000000); +} + +/* + * [exported] + * Create an IOPB_TSS + */ +iopb_tss_t +iopb_create(void) +{ + register iopb_tss_t ts; + + ts = (iopb_tss_t) kalloc(sizeof (struct iopb_tss)); + io_tss_init(ts); + return (ts); +} + +/* + * [exported] + * Destroy an IOPB_TSS + */ +void +iopb_destroy( + iopb_tss_t io_tss) +{ + io_use_t iu; + io_port_t io_port; + + simple_lock(&iopb_lock); + + queue_iterate(&io_tss->io_port_list, iu, io_use_t, tsq) { + io_port = iu->ps; + /* skip bitmap clear - entire bitmap will vanish */ + queue_remove(&io_port->io_use_list, iu, io_use_t, psq); + } + + simple_unlock(&iopb_lock); + + while (!queue_empty(&io_tss->io_port_list)) { + iu = (io_use_t) queue_first(&io_tss->io_port_list); + queue_remove(&io_tss->io_port_list, iu, io_use_t, tsq); + kfree((vm_offset_t)iu, sizeof(struct io_use)); + } + + kfree((vm_offset_t)io_tss, sizeof(struct iopb_tss)); +} + +/* + * Add an IO mapping to a thread. + */ +kern_return_t +i386_io_port_add( + thread_t thread, + device_t device) +{ + pcb_t pcb; + iopb_tss_t io_tss, new_io_tss; + io_port_t io_port; + io_use_t iu, old_iu; + + if (thread == THREAD_NULL + || device == DEVICE_NULL) + return KERN_INVALID_ARGUMENT; + + pcb = thread->top_act->mact.pcb; + + new_io_tss = 0; + iu = (io_use_t) kalloc(sizeof(struct io_use)); + + Retry: + simple_lock(&iopb_lock); + + /* find the io_port_t for the device */ + io_port = device_to_io_port_lookup(device); + if (io_port == 0) { + /* + * Device does not have IO ports available. + */ + simple_unlock(&iopb_lock); + if (new_io_tss) + kfree((vm_offset_t)new_io_tss, sizeof(struct iopb_tss)); + kfree((vm_offset_t) iu, sizeof(struct io_use)); + return KERN_INVALID_ARGUMENT; + } + + /* Have the IO port. */ + + /* Make sure the thread has a TSS. */ + + simple_lock(&pcb->lock); + io_tss = pcb->ims.io_tss; + if (io_tss == 0) { + if (new_io_tss == 0) { + /* + * Allocate an IO-tss. + */ + simple_unlock(&pcb->lock); + simple_unlock(&iopb_lock); + + new_io_tss = (iopb_tss_t) kalloc(sizeof(struct iopb_tss)); + io_tss_init(new_io_tss); + + goto Retry; + } + io_tss = new_io_tss; + pcb->ims.io_tss = io_tss; + new_io_tss = 0; + } + + /* + * Have io_port and io_tss. + * See whether device is already mapped. + */ + queue_iterate(&io_tss->io_port_list, old_iu, io_use_t, tsq) { + if (old_iu->ps == io_port) { + /* + * Already mapped. + */ + simple_unlock(&pcb->lock); + simple_unlock(&iopb_lock); + + kfree((vm_offset_t)iu, sizeof(struct io_use)); + if (new_io_tss) + kfree((vm_offset_t)new_io_tss, sizeof(struct iopb_tss)); + return KERN_SUCCESS; + } + } + + /* + * Add mapping. + */ + iu->ps = io_port; + iu->ts = io_tss; + queue_enter(&io_port->io_use_list, iu, io_use_t, psq); + queue_enter(&io_tss->io_port_list, iu, io_use_t, tsq); + io_bitmap_set(io_tss->bitmap, io_port->io_port_list); + + simple_unlock(&pcb->lock); + simple_unlock(&iopb_lock); + + if (new_io_tss) + kfree((vm_offset_t)new_io_tss, sizeof(struct iopb_tss)); + return KERN_SUCCESS; + +} + +/* + * Remove an IO mapping from a thread. + */ +kern_return_t +i386_io_port_remove( + thread_t thread, + device_t device) +{ + pcb_t pcb; + iopb_tss_t io_tss; + io_port_t io_port; + io_use_t iu; + + if (thread == THREAD_NULL + || device == DEVICE_NULL) + return KERN_INVALID_ARGUMENT; + + pcb = thread->top_act->mact.pcb; + + simple_lock(&iopb_lock); + + /* find the io_port_t for the device */ + + io_port = device_to_io_port_lookup(device); + if (io_port == 0) { + /* + * Device does not have IO ports available. + */ + simple_unlock(&iopb_lock); + return KERN_INVALID_ARGUMENT; + } + + simple_lock(&pcb->lock); + io_tss = pcb->ims.io_tss; + if (io_tss == 0) { + simple_unlock(&pcb->lock); + simple_unlock(&iopb_lock); + return KERN_INVALID_ARGUMENT; /* not mapped */ + } + + /* + * Find the mapping. + */ + queue_iterate(&io_tss->io_port_list, iu, io_use_t, tsq) { + if (iu->ps == io_port) { + /* + * Found mapping. Remove it. + */ + io_bitmap_clear(io_tss->bitmap, io_port->io_port_list); + + queue_remove(&io_port->io_use_list, iu, io_use_t, psq); + queue_remove(&io_tss->io_port_list, iu, io_use_t, tsq); + + simple_unlock(&pcb->lock); + simple_unlock(&iopb_lock); + + kfree((vm_offset_t)iu, sizeof(struct io_use)); + + return KERN_SUCCESS; + } + } + + /* + * No mapping. + */ + return KERN_INVALID_ARGUMENT; +} + +/* + * Return the IO ports mapped into a thread. + */ + +kern_return_t +i386_io_port_list(thread, list, list_count) + thread_t thread; + device_t **list; + unsigned int *list_count; +{ + register pcb_t pcb; + register iopb_tss_t io_tss; + unsigned int count, alloc_count; + device_t *devices; + vm_size_t size_needed, size; + vm_offset_t addr; + int i; + + if (thread == THREAD_NULL) + return KERN_INVALID_ARGUMENT; + + pcb = thread->top_act->mact.pcb; + + alloc_count = 16; /* a guess */ + + do { + size_needed = alloc_count * sizeof(ipc_port_t); + if (size_needed <= size) + break; + + if (size != 0) + kfree(addr, size); + + assert(size_needed > 0); + size = size_needed; + + addr = kalloc(size); + if (addr == 0) + return KERN_RESOURCE_SHORTAGE; + + devices = (device_t *)addr; + count = 0; + + simple_lock(&iopb_lock); + simple_lock(&pcb->lock); + io_tss = pcb->ims.io_tss; + if (io_tss != 0) { + register io_use_t iu; + + queue_iterate(&io_tss->io_port_list, iu, io_use_t, tsq) { + if (++count < alloc_count) { + *devices = iu->ps->device; + device_reference(*devices); + devices++; + } + } + } + simple_unlock(&pcb->lock); + simple_unlock(&iopb_lock); + } while (count > alloc_count); + + if (count == 0) { + /* + * No IO ports + */ + *list = 0; + *list_count = 0; + + if (size != 0) + kfree(addr, size); + } + else { + /* + * If we allocated too much, must copy. + */ + size_needed = count * sizeof(ipc_port_t); + if (size_needed < size) { + vm_offset_t new_addr; + + new_addr = kalloc(size_needed); + if (new_addr == 0) { + for (i = 0; i < count; i++) + device_deallocate(devices[i]); + kfree(addr, size); + return KERN_RESOURCE_SHORTAGE; + } + + bcopy((char *)addr, (char *)new_addr, size_needed); + kfree(addr, size); + devices = (device_t *)new_addr; + } + + for (i = 0; i < count; i++) + ((ipc_port_t *)devices)[i] = + convert_device_to_port(devices[i]); + } + *list = devices; + *list_count = count; + + return KERN_SUCCESS; +} + +/* + * Check whether an IO device is mapped to a particular thread. + * Used to support the 'iopl' device automatic mapping. + */ +boolean_t +iopb_check_mapping( + thread_t thread, + device_t device) +{ + pcb_t pcb; + io_port_t io_port; + io_use_t iu; + + pcb = thread->top_act->mact.pcb; + + simple_lock(&iopb_lock); + + /* Find the io port for the device */ + + io_port = device_to_io_port_lookup(device); + if (io_port == 0) { + simple_unlock(&iopb_lock); + return FALSE; + } + + /* Look up the mapping in the device`s mapping list. */ + + queue_iterate(&io_port->io_use_list, iu, io_use_t, psq) { + if (iu->ts == pcb->ims.io_tss) { + /* + * Device is mapped. + */ + simple_unlock(&iopb_lock); + return TRUE; + } + } + simple_unlock(&iopb_lock); + return FALSE; +} +#endif diff --git a/osfmk/i386/iopb.h b/osfmk/i386/iopb.h new file mode 100644 index 000000000..bd9705115 --- /dev/null +++ b/osfmk/i386/iopb.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_IOPB_H_ +#define _I386_IOPB_H_ + +#include +#include + +/* + * IO permission bitmap. + * + * Allows only IO ports 0 .. 0xffff: for ISA machines. + */ + +#define iopb_howmany(a,b) (((a)+(b)-1)/(b)) + +#define IOPB_MAX 0xffff /* x86 allows ports 0..ffff */ +#define IOPB_BYTES (iopb_howmany(IOPB_MAX+1,8)) + +typedef unsigned char isa_iopb[IOPB_BYTES]; + +/* + * An IO permission map is a task segment with an IO permission bitmap. + */ + +struct iopb_tss { + struct i386_tss tss; /* task state segment */ + isa_iopb bitmap; /* bitmap of mapped IO ports */ + unsigned int barrier; /* bitmap barrier for CPU slop */ + queue_head_t io_port_list; /* list of mapped IO ports */ + int iopb_desc[2]; /* descriptor for this TSS */ +}; + +typedef struct iopb_tss *iopb_tss_t; + +#endif /* _I386_IOPB_H_ */ diff --git a/osfmk/i386/iopb_entries.h b/osfmk/i386/iopb_entries.h new file mode 100644 index 000000000..8b50a7eb2 --- /dev/null +++ b/osfmk/i386/iopb_entries.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#if 0 +extern boolean_t iopb_check_mapping( + thread_t thread, + device_t device); +extern kern_return_t i386_io_port_add( + thread_t thread, + device_t device); +extern kern_return_t i386_io_port_remove( + thread_t thread, + device_t device); +extern kern_return_t i386_io_port_list( + thread_t thread, + device_t ** list, + unsigned int * list_count); +extern void iopb_init(void); +extern iopb_tss_t iopb_create(void); +extern void iopb_destroy( + iopb_tss_t iopb); +#endif diff --git a/osfmk/i386/ipl.h b/osfmk/i386/ipl.h new file mode 100644 index 000000000..db59db5d1 --- /dev/null +++ b/osfmk/i386/ipl.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* +Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760 +All Rights Reserved. + +Permission to use, copy, modify, and distribute this +software and its documentation for any purpose and +without fee is hereby granted, provided that the above +copyright notice appears in all copies and that both the +copyright notice and this permission notice appear in +supporting documentation, and that the name of Prime +Computer, Inc. not be used in advertising or publicity +pertaining to distribution of the software without +specific, written prior permission. + +THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER, +INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN +NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY +SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR +OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include + +#define SPL0 0 +#define SPL1 1 +#define SPL2 2 +#define SPL3 3 +#define SPL4 4 +#define SPL5 5 +#define SPL6 6 + +#define SPLPP 5 +#define SPLTTY 6 +#define SPLNI 6 + +#define IPLHI 8 +#define SPLHI IPLHI + +#if MACH_KPROF +#define SPL7 7 +#else +#define SPL7 IPLHI +#endif + +#define SPL_CMP_GT(a, b) ((unsigned)(a) > (unsigned)(b)) +#define SPL_CMP_LT(a, b) ((unsigned)(a) < (unsigned)(b)) +#define SPL_CMP_GE(a, b) ((unsigned)(a) >= (unsigned)(b)) +#define SPL_CMP_LE(a, b) ((unsigned)(a) <= (unsigned)(b)) + +#ifdef MACH_KERNEL +#ifndef ASSEMBLER + +#include + +extern i386_intr_t ivect[]; +extern int iunit[]; +extern unsigned char intpri[]; +#endif /* ASSEMBLER */ +#endif /* MACH_KERNEL */ diff --git a/osfmk/i386/ktss.c b/osfmk/i386/ktss.c new file mode 100644 index 000000000..23a39df8f --- /dev/null +++ b/osfmk/i386/ktss.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Kernel task state segment. + * + * We don't use the i386 task switch mechanism. We need a TSS + * only to hold the kernel stack pointer for the current thread. + * + * XXX multiprocessor?? + */ +#include +#include +#include + +struct i386_tss ktss = { + 0, /* back link */ + 0, /* esp0 */ + KERNEL_DS, /* ss0 */ + 0, /* esp1 */ + 0, /* ss1 */ + 0, /* esp2 */ + 0, /* ss2 */ + 0, /* cr3 */ + 0, /* eip */ + 0, /* eflags */ + 0, /* eax */ + 0, /* ecx */ + 0, /* edx */ + 0, /* ebx */ + 0, /* esp */ + 0, /* ebp */ + 0, /* esi */ + 0, /* edi */ + 0, /* es */ + 0, /* cs */ + 0, /* ss */ + 0, /* ds */ + 0, /* fs */ + 0, /* gs */ + KERNEL_LDT, /* ldt */ + 0, /* trace_trap */ + 0x0FFF /* IO bitmap offset - + beyond end of TSS segment, + so no bitmap */ +}; + +#if MACH_KDB + +struct i386_tss dbtss = { + 0, /* back link */ + 0, /* esp0 */ + KERNEL_DS, /* ss0 */ + 0, /* esp1 */ + 0, /* ss1 */ + 0, /* esp2 */ + 0, /* ss2 */ + 0, /* cr3 */ + 0, /* eip */ + 0, /* eflags */ + 0, /* eax */ + 0, /* ecx */ + 0, /* edx */ + 0, /* ebx */ + 0, /* esp */ + 0, /* ebp */ + 0, /* esi */ + 0, /* edi */ + KERNEL_DS, /* es */ + KERNEL_CS, /* cs */ + KERNEL_DS, /* ss */ + KERNEL_DS, /* ds */ + KERNEL_DS, /* fs */ + KERNEL_DS, /* gs */ + KERNEL_LDT, /* ldt */ + 0, /* trace_trap */ + 0x0FFF /* IO bitmap offset - + beyond end of TSS segment, + so no bitmap */ +}; + +#endif /* MACH_KDB */ diff --git a/osfmk/i386/ldt.c b/osfmk/i386/ldt.c new file mode 100644 index 000000000..824610f9b --- /dev/null +++ b/osfmk/i386/ldt.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * "Local" descriptor table. At the moment, all tasks use the + * same LDT. + */ +#include +#include +#include + +extern int syscall(void); +extern int mach_rpc(void); + +struct fake_descriptor ldt[LDTSZ] = { +/*007*/ { (unsigned int)&syscall, + KERNEL_CS, + 0, /* no parameters */ + ACC_P|ACC_PL_U|ACC_CALL_GATE + }, /* call gate for system calls */ +/*00F*/ { (unsigned int)&mach_rpc, + KERNEL_CS, + 0, /* no parameters */ + ACC_P|ACC_PL_U|ACC_CALL_GATE + }, /* call gate for mach rpc */ +/*017*/ { 0, + (VM_MAX_ADDRESS-VM_MIN_ADDRESS-1)>>12, + SZ_32|SZ_G, + ACC_P|ACC_PL_U|ACC_CODE_R + }, /* user code segment */ +/*01F*/ { 0, + (VM_MAX_ADDRESS-VM_MIN_ADDRESS-1)>>12, + SZ_32|SZ_G, + ACC_P|ACC_PL_U|ACC_DATA_W + }, /* user data segment */ +}; diff --git a/osfmk/i386/lock.h b/osfmk/i386/lock.h new file mode 100644 index 000000000..f68004bfb --- /dev/null +++ b/osfmk/i386/lock.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1998 Apple Computer + * All Rights Reserved + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Machine-dependent simple locks for the i386. + */ + +#ifndef _I386_LOCK_H_ +#define _I386_LOCK_H_ + +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include +#include + + +#if defined(__GNUC__) + +/* + * General bit-lock routines. + */ + +#define bit_lock(bit,l) \ + __asm__ volatile(" jmp 1f \n \ + 0: btl %0, %1 \n \ + jb 0b \n \ + 1: lock \n \ + btsl %0,%1 \n \ + jb 0b" : \ + : \ + "r" (bit), "m" (*(volatile int *)(l)) : \ + "memory"); + +#define bit_unlock(bit,l) \ + __asm__ volatile(" lock \n \ + btrl %0,%1" : \ + : \ + "r" (bit), "m" (*(volatile int *)(l))); + +/* + * Set or clear individual bits in a long word. + * The locked access is needed only to lock access + * to the word, not to individual bits. + */ + +#define i_bit_set(bit,l) \ + __asm__ volatile(" lock \n \ + btsl %0,%1" : \ + : \ + "r" (bit), "m" (*(volatile int *)(l))); + +#define i_bit_clear(bit,l) \ + __asm__ volatile(" lock \n \ + btrl %0,%1" : \ + : \ + "r" (bit), "m" (*(volatile int *)(l))); + +extern __inline__ unsigned long i_bit_isset(unsigned int testbit, volatile unsigned long *word) +{ + int bit; + + __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit) + : "m" (word), "ir" (testbit)); + return bit; +} + +extern __inline__ char xchgb(volatile char * cp, char new); + +extern __inline__ void atomic_incl(long * p, long delta); +extern __inline__ void atomic_incs(short * p, short delta); +extern __inline__ void atomic_incb(char * p, char delta); + +extern __inline__ void atomic_decl(long * p, long delta); +extern __inline__ void atomic_decs(short * p, short delta); +extern __inline__ void atomic_decb(char * p, char delta); + +extern __inline__ long atomic_getl(long * p); +extern __inline__ short atomic_gets(short * p); +extern __inline__ char atomic_getb(char * p); + +extern __inline__ void atomic_setl(long * p, long value); +extern __inline__ void atomic_sets(short * p, short value); +extern __inline__ void atomic_setb(char * p, char value); + +extern __inline__ char xchgb(volatile char * cp, char new) +{ + register char old = new; + + __asm__ volatile (" xchgb %0,%2" : + "=q" (old) : + "0" (new), "m" (*(volatile char *)cp) : "memory"); + return (old); +} + +extern __inline__ void atomic_incl(long * p, long delta) +{ +#if NEED_ATOMIC + __asm__ volatile (" lock \n \ + addl %0,%1" : \ + : \ + "r" (delta), "m" (*(volatile long *)p)); +#else /* NEED_ATOMIC */ + *p += delta; +#endif /* NEED_ATOMIC */ +} + +extern __inline__ void atomic_incs(short * p, short delta) +{ +#if NEED_ATOMIC + __asm__ volatile (" lock \n \ + addw %0,%1" : \ + : \ + "q" (delta), "m" (*(volatile short *)p)); +#else /* NEED_ATOMIC */ + *p += delta; +#endif /* NEED_ATOMIC */ +} + +extern __inline__ void atomic_incb(char * p, char delta) +{ +#if NEED_ATOMIC + __asm__ volatile (" lock \n \ + addb %0,%1" : \ + : \ + "q" (delta), "m" (*(volatile char *)p)); +#else /* NEED_ATOMIC */ + *p += delta; +#endif /* NEED_ATOMIC */ +} + +extern __inline__ void atomic_decl(long * p, long delta) +{ +#if NCPUS > 1 + __asm__ volatile (" lock \n \ + subl %0,%1" : \ + : \ + "r" (delta), "m" (*(volatile long *)p)); +#else /* NCPUS > 1 */ + *p -= delta; +#endif /* NCPUS > 1 */ +} + +extern __inline__ void atomic_decs(short * p, short delta) +{ +#if NEED_ATOMIC + __asm__ volatile (" lock \n \ + subw %0,%1" : \ + : \ + "q" (delta), "m" (*(volatile short *)p)); +#else /* NEED_ATOMIC */ + *p -= delta; +#endif /* NEED_ATOMIC */ +} + +extern __inline__ void atomic_decb(char * p, char delta) +{ +#if NEED_ATOMIC + __asm__ volatile (" lock \n \ + subb %0,%1" : \ + : \ + "q" (delta), "m" (*(volatile char *)p)); +#else /* NEED_ATOMIC */ + *p -= delta; +#endif /* NEED_ATOMIC */ +} + +extern __inline__ long atomic_getl(long * p) +{ + return (*p); +} + +extern __inline__ short atomic_gets(short * p) +{ + return (*p); +} + +extern __inline__ char atomic_getb(char * p) +{ + return (*p); +} + +extern __inline__ void atomic_setl(long * p, long value) +{ + *p = value; +} + +extern __inline__ void atomic_sets(short * p, short value) +{ + *p = value; +} + +extern __inline__ void atomic_setb(char * p, char value) +{ + *p = value; +} + + +#else /* !defined(__GNUC__) */ + +extern void i_bit_set( + int index, + void *addr); + +extern void i_bit_clear( + int index, + void *addr); + +extern void bit_lock( + int index, + void *addr); + +extern void bit_unlock( + int index, + void *addr); + +/* + * All other routines defined in __GNUC__ case lack + * definitions otherwise. - XXX + */ + +#endif /* !defined(__GNUC__) */ + + +#if !(USLOCK_DEBUG || USLOCK_STATS) +/* + * Take responsibility for production-quality usimple_locks. + * Let the portable lock package build simple_locks in terms + * of usimple_locks, which is done efficiently with macros. + * Currently, these aren't inlined although they probably + * should be. The portable lock package is used for the + * usimple_lock prototypes and data declarations. + * + * For non-production configurations, punt entirely to the + * portable lock package. + * + * N.B. I've left in the hooks for ETAP, so we can + * compare the performance of stats-gathering on top + * of "production" locks v. stats-gathering on top + * of portable, C-based locks. + */ +#define USIMPLE_LOCK_CALLS +#endif /* !(USLOCK_DEBUG || USLOCK_STATS) */ + + +#if MACH_RT || (NCPUS > 1) || MACH_LDEBUG +#if MACH_LDEBUG || !MACH_RT +#define mutex_try(m) (!(m)->interlock && _mutex_try(m)) +#define mutex_lock(m) \ +MACRO_BEGIN \ + assert(assert_wait_possible()); \ + _mutex_lock((m)); \ +MACRO_END + +#else /* MACH_LDEBUG || !MACH_RT */ +#define mutex_try(m) (!(m)->interlock && \ + !xchgb ((volatile char *)&((m)->locked), 1)) +#define mutex_lock(m) \ +MACRO_BEGIN \ + assert(assert_wait_possible()); \ + _mutex_lock (m); \ +MACRO_END + +#endif /* MACH_LDEBUG || !MACH_RT */ +#else /* MACH_RT || (NCPUS > 1) || MACH_LDEBUG */ +#define mutex_try _mutex_try +#define mutex_lock _mutex_lock +#endif /* MACH_RT || (NCPUS > 1) || MACH_LDEBUG */ + +#else /* !MACH_KERNEL_PRIVATE */ + +#define mutex_try _mutex_try +#define mutex_lock(m) \ +MACRO_BEGIN \ + assert(assert_wait_possible()); \ + _mutex_lock((m)); \ +MACRO_END + +#endif /* !MACH_KERNEL_PRIVATE */ + +extern void kernel_preempt_check (void); + +#endif /* _I386_LOCK_H_ */ + diff --git a/osfmk/i386/locore.s b/osfmk/i386/locore.s new file mode 100644 index 000000000..152fa5a9a --- /dev/null +++ b/osfmk/i386/locore.s @@ -0,0 +1,3294 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define PREEMPT_DEBUG_LOG 0 + +#if __MACHO__ +/* Under Mach-O, etext is a variable which contains + * the last text address + */ +#define ETEXT_ADDR (EXT(etext)) +#else +/* Under ELF and other non-Mach-O formats, the address of + * etext represents the last text address + */ +#define ETEXT_ADDR $EXT(etext) +#endif + +#if NCPUS > 1 + +#define CX(addr,reg) addr(,reg,4) + +#else +#define CPU_NUMBER(reg) +#define CX(addr,reg) addr + +#endif /* NCPUS > 1 */ + + .text +locore_start: + +/* + * Fault recovery. + */ + +#ifdef __MACHO__ +#define RECOVERY_SECTION .section __VECTORS, __recover +#define RETRY_SECTION .section __VECTORS, __retries +#else +#define RECOVERY_SECTION .text +#define RECOVERY_SECTION .text +#endif + +#define RECOVER_TABLE_START \ + .align 2 ; \ + .globl EXT(recover_table) ;\ +LEXT(recover_table) ;\ + .text + +#define RECOVER(addr) \ + .align 2; \ + .long 9f ;\ + .long addr ;\ + .text ;\ +9: + +#define RECOVER_TABLE_END \ + .align 2 ;\ + .globl EXT(recover_table_end) ;\ +LEXT(recover_table_end) ;\ + .text + +/* + * Retry table for certain successful faults. + */ +#define RETRY_TABLE_START \ + .align 3; \ + .globl EXT(retry_table) ;\ +LEXT(retry_table) ;\ + .text + +#define RETRY(addr) \ + .align 3 ;\ + .long 9f ;\ + .long addr ;\ + .text ;\ +9: + +#define RETRY_TABLE_END \ + .align 3; \ + .globl EXT(retry_table_end) ;\ +LEXT(retry_table_end) ;\ + .text + +/* + * Allocate recovery and retry tables. + */ + RECOVERY_SECTION + RECOVER_TABLE_START + RETRY_SECTION + RETRY_TABLE_START + +/* + * Timing routines. + */ +#if STAT_TIME + +#define TIME_TRAP_UENTRY +#define TIME_TRAP_UEXIT +#define TIME_INT_ENTRY +#define TIME_INT_EXIT + +#else /* microsecond timing */ + +/* + * Microsecond timing. + * Assumes a free-running microsecond counter. + * no TIMER_MAX check needed. + */ + +/* + * There is only one current time-stamp per CPU, since only + * the time-stamp in the current timer is used. + * To save time, we allocate the current time-stamps here. + */ + .comm EXT(current_tstamp), 4*NCPUS + +/* + * Update time on user trap entry. + * 11 instructions (including cli on entry) + * Assumes CPU number in %edx. + * Uses %ebx, %ecx. + */ +#define TIME_TRAP_UENTRY \ + cli /* block interrupts */ ;\ + movl VA_ETC,%ebx /* get timer value */ ;\ + movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\ + movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\ + subl %ecx,%ebx /* elapsed = new-old */ ;\ + movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\ + addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\ + jns 0f /* if overflow, */ ;\ + call timer_normalize /* normalize timer */ ;\ +0: addl $(TH_SYS_TIMER-TH_USER_TIMER),%ecx ;\ + /* switch to sys timer */;\ + movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\ + sti /* allow interrupts */ + +/* + * update time on user trap exit. + * 10 instructions. + * Assumes CPU number in %edx. + * Uses %ebx, %ecx. + */ +#define TIME_TRAP_UEXIT \ + cli /* block interrupts */ ;\ + movl VA_ETC,%ebx /* get timer */ ;\ + movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\ + movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\ + subl %ecx,%ebx /* elapsed = new-old */ ;\ + movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\ + addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\ + jns 0f /* if overflow, */ ;\ + call timer_normalize /* normalize timer */ ;\ +0: addl $(TH_USER_TIMER-TH_SYS_TIMER),%ecx ;\ + /* switch to user timer */;\ + movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ + +/* + * update time on interrupt entry. + * 9 instructions. + * Assumes CPU number in %edx. + * Leaves old timer in %ebx. + * Uses %ecx. + */ +#define TIME_INT_ENTRY \ + movl VA_ETC,%ecx /* get timer */ ;\ + movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\ + movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\ + subl %ebx,%ecx /* elapsed = new-old */ ;\ + movl CX(EXT(current_timer),%edx),%ebx /* get current timer */;\ + addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\ + leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\ + lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\ + movl %ecx,CX(EXT(current_timer),%edx) /* set timer */ + +/* + * update time on interrupt exit. + * 11 instructions + * Assumes CPU number in %edx, old timer in %ebx. + * Uses %eax, %ecx. + */ +#define TIME_INT_EXIT \ + movl VA_ETC,%eax /* get timer */ ;\ + movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\ + movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\ + subl %ecx,%eax /* elapsed = new-old */ ;\ + movl CX(EXT(current_timer),%edx),%ecx /* get current timer */;\ + addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\ + jns 0f /* if overflow, */ ;\ + call timer_normalize /* normalize timer */ ;\ +0: testb $0x80,LOW_BITS+3(%ebx) /* old timer overflow? */;\ + jz 0f /* if overflow, */ ;\ + movl %ebx,%ecx /* get old timer */ ;\ + call timer_normalize /* normalize timer */ ;\ +0: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */ + + +/* + * Normalize timer in ecx. + * Preserves edx; clobbers eax. + */ + .align ALIGN +timer_high_unit: + .long TIMER_HIGH_UNIT /* div has no immediate opnd */ + +timer_normalize: + pushl %edx /* save registersz */ + pushl %eax + xorl %edx,%edx /* clear divisor high */ + movl LOW_BITS(%ecx),%eax /* get divisor low */ + divl timer_high_unit,%eax /* quotient in eax */ + /* remainder in edx */ + addl %eax,HIGH_BITS_CHECK(%ecx) /* add high_inc to check */ + movl %edx,LOW_BITS(%ecx) /* remainder to low_bits */ + addl %eax,HIGH_BITS(%ecx) /* add high_inc to high bits */ + popl %eax /* restore register */ + popl %edx + ret + +/* + * Switch to a new timer. + */ +Entry(timer_switch) + CPU_NUMBER(%edx) /* get this CPU */ + movl VA_ETC,%ecx /* get timer */ + movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */ + movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */ + subl %ecx,%eax /* elapsed = new - old */ + movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ + addl %eax,LOW_BITS(%ecx) /* add to low bits */ + jns 0f /* if overflow, */ + call timer_normalize /* normalize timer */ +0: + movl S_ARG0,%ecx /* get new timer */ + movl %ecx,CX(EXT(current_timer),%edx) /* set timer */ + ret + +/* + * Initialize the first timer for a CPU. + */ +Entry(start_timer) + CPU_NUMBER(%edx) /* get this CPU */ + movl VA_ETC,%ecx /* get timer */ + movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */ + movl S_ARG0,%ecx /* get timer */ + movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */ + ret + +#endif /* accurate timing */ + +/* + * Encapsulate the transfer of exception stack frames between a PCB + * and a thread stack. Since the whole point of these is to emulate + * a call or exception that changes privilege level, both macros + * assume that there is no user esp or ss stored in the source + * frame (because there was no change of privilege to generate them). + */ + +/* + * Transfer a stack frame from a thread's user stack to its PCB. + * We assume the thread and stack addresses have been loaded into + * registers (our arguments). + * + * The macro overwrites edi, esi, ecx and whatever registers hold the + * thread and stack addresses (which can't be one of the above three). + * The thread address is overwritten with the address of its saved state + * (where the frame winds up). + * + * Must be called on kernel stack. + */ +#define FRAME_STACK_TO_PCB(thread, stkp) ;\ + movl ACT_PCB(thread),thread /* get act`s PCB */ ;\ + leal PCB_ISS(thread),%edi /* point to PCB`s saved state */;\ + movl %edi,thread /* save for later */ ;\ + movl stkp,%esi /* point to start of frame */ ;\ + movl $R_UESP,%ecx ;\ + sarl $2,%ecx /* word count for transfer */ ;\ + cld /* we`re incrementing */ ;\ + rep ;\ + movsl /* transfer the frame */ ;\ + addl $R_UESP,stkp /* derive true "user" esp */ ;\ + movl stkp,R_UESP(thread) /* store in PCB */ ;\ + movl $0,%ecx ;\ + mov %ss,%cx /* get current ss */ ;\ + movl %ecx,R_SS(thread) /* store in PCB */ + +/* + * Transfer a stack frame from a thread's PCB to the stack pointed + * to by the PCB. We assume the thread address has been loaded into + * a register (our argument). + * + * The macro overwrites edi, esi, ecx and whatever register holds the + * thread address (which can't be one of the above three). The + * thread address is overwritten with the address of its saved state + * (where the frame winds up). + * + * Must be called on kernel stack. + */ +#define FRAME_PCB_TO_STACK(thread) ;\ + movl ACT_PCB(thread),%esi /* get act`s PCB */ ;\ + leal PCB_ISS(%esi),%esi /* point to PCB`s saved state */;\ + movl R_UESP(%esi),%edi /* point to end of dest frame */;\ + movl ACT_MAP(thread),%ecx /* get act's map */ ;\ + movl MAP_PMAP(%ecx),%ecx /* get map's pmap */ ;\ + cmpl EXT(kernel_pmap), %ecx /* If kernel loaded task */ ;\ + jz 1f /* use kernel data segment */ ;\ + movl $USER_DS,%cx /* else use user data segment */;\ + mov %cx,%es ;\ +1: ;\ + movl $R_UESP,%ecx ;\ + subl %ecx,%edi /* derive start of frame */ ;\ + movl %edi,thread /* save for later */ ;\ + sarl $2,%ecx /* word count for transfer */ ;\ + cld /* we`re incrementing */ ;\ + rep ;\ + movsl /* transfer the frame */ ;\ + mov %ss,%cx /* restore kernel segments */ ;\ + mov %cx,%es + +#undef PDEBUG + +#ifdef PDEBUG + +/* + * Traditional, not ANSI. + */ +#define CAH(label) \ + .data ;\ + .globl label/**/count ;\ +label/**/count: ;\ + .long 0 ;\ + .globl label/**/limit ;\ +label/**/limit: ;\ + .long 0 ;\ + .text ;\ + addl $1,%ss:label/**/count ;\ + cmpl $0,label/**/limit ;\ + jz label/**/exit ;\ + pushl %eax ;\ +label/**/loop: ;\ + movl %ss:label/**/count,%eax ;\ + cmpl %eax,%ss:label/**/limit ;\ + je label/**/loop ;\ + popl %eax ;\ +label/**/exit: + +#else /* PDEBUG */ + +#define CAH(label) + +#endif /* PDEBUG */ + +#if MACH_KDB +/* + * Last-ditch debug code to handle faults that might result + * from entering kernel (from collocated server) on an invalid + * stack. On collocated entry, there's no hardware-initiated + * stack switch, so a valid stack must be in place when an + * exception occurs, or we may double-fault. + * + * In case of a double-fault, our only recourse is to switch + * hardware "tasks", so that we avoid using the current stack. + * + * The idea here is just to get the processor into the debugger, + * post-haste. No attempt is made to fix up whatever error got + * us here, so presumably continuing from the debugger will + * simply land us here again -- at best. + */ +#if 0 +/* + * Note that the per-fault entry points are not currently + * functional. The only way to make them work would be to + * set up separate TSS's for each fault type, which doesn't + * currently seem worthwhile. (The offset part of a task + * gate is always ignored.) So all faults that task switch + * currently resume at db_task_start. + */ +/* + * Double fault (Murphy's point) - error code (0) on stack + */ +Entry(db_task_dbl_fault) + popl %eax + movl $(T_DOUBLE_FAULT),%ebx + jmp db_task_start +/* + * Segment not present - error code on stack + */ +Entry(db_task_seg_np) + popl %eax + movl $(T_SEGMENT_NOT_PRESENT),%ebx + jmp db_task_start +/* + * Stack fault - error code on (current) stack + */ +Entry(db_task_stk_fault) + popl %eax + movl $(T_STACK_FAULT),%ebx + jmp db_task_start +/* + * General protection fault - error code on stack + */ +Entry(db_task_gen_prot) + popl %eax + movl $(T_GENERAL_PROTECTION),%ebx + jmp db_task_start +#endif /* 0 */ +/* + * The entry point where execution resumes after last-ditch debugger task + * switch. + */ +Entry(db_task_start) + movl %esp,%edx + subl $ISS_SIZE,%edx + movl %edx,%esp /* allocate i386_saved_state on stack */ + movl %eax,R_ERR(%esp) + movl %ebx,R_TRAPNO(%esp) + pushl %edx +#if NCPUS > 1 + CPU_NUMBER(%edx) + movl CX(EXT(mp_dbtss),%edx),%edx + movl TSS_LINK(%edx),%eax +#else + movl EXT(dbtss)+TSS_LINK,%eax +#endif + pushl %eax /* pass along selector of previous TSS */ + call EXT(db_tss_to_frame) + popl %eax /* get rid of TSS selector */ + call EXT(db_trap_from_asm) + addl $0x4,%esp + /* + * And now...? + */ + iret /* ha, ha, ha... */ +#endif /* MACH_KDB */ + +/* + * Trap/interrupt entry points. + * + * All traps must create the following save area on the PCB "stack": + * + * gs + * fs + * es + * ds + * edi + * esi + * ebp + * cr2 if page fault - otherwise unused + * ebx + * edx + * ecx + * eax + * trap number + * error code + * eip + * cs + * eflags + * user esp - if from user + * user ss - if from user + * es - if from V86 thread + * ds - if from V86 thread + * fs - if from V86 thread + * gs - if from V86 thread + * + */ + +/* + * General protection or segment-not-present fault. + * Check for a GP/NP fault in the kernel_return + * sequence; if there, report it as a GP/NP fault on the user's instruction. + * + * esp-> 0: trap code (NP or GP) + * 4: segment number in error + * 8 eip + * 12 cs + * 16 eflags + * 20 old registers (trap is from kernel) + */ +Entry(t_gen_prot) + pushl $(T_GENERAL_PROTECTION) /* indicate fault type */ + jmp trap_check_kernel_exit /* check for kernel exit sequence */ + +Entry(t_segnp) + pushl $(T_SEGMENT_NOT_PRESENT) + /* indicate fault type */ + +trap_check_kernel_exit: + testl $(EFL_VM),16(%esp) /* is trap from V86 mode? */ + jnz EXT(alltraps) /* isn`t kernel trap if so */ + testl $3,12(%esp) /* is trap from kernel mode? */ + jne EXT(alltraps) /* if so: */ + /* check for the kernel exit sequence */ + cmpl $EXT(kret_iret),8(%esp) /* on IRET? */ + je fault_iret + cmpl $EXT(kret_popl_ds),8(%esp) /* popping DS? */ + je fault_popl_ds + cmpl $EXT(kret_popl_es),8(%esp) /* popping ES? */ + je fault_popl_es + cmpl $EXT(kret_popl_fs),8(%esp) /* popping FS? */ + je fault_popl_fs + cmpl $EXT(kret_popl_gs),8(%esp) /* popping GS? */ + je fault_popl_gs +take_fault: /* if none of the above: */ + jmp EXT(alltraps) /* treat as normal trap. */ + +/* + * GP/NP fault on IRET: CS or SS is in error. + * All registers contain the user's values. + * + * on SP is + * 0 trap number + * 4 errcode + * 8 eip + * 12 cs --> trapno + * 16 efl --> errcode + * 20 user eip + * 24 user cs + * 28 user eflags + * 32 user esp + * 36 user ss + */ +fault_iret: + movl %eax,8(%esp) /* save eax (we don`t need saved eip) */ + popl %eax /* get trap number */ + movl %eax,12-4(%esp) /* put in user trap number */ + popl %eax /* get error code */ + movl %eax,16-8(%esp) /* put in user errcode */ + popl %eax /* restore eax */ + CAH(fltir) + jmp EXT(alltraps) /* take fault */ + +/* + * Fault restoring a segment register. The user's registers are still + * saved on the stack. The offending segment register has not been + * popped. + */ +fault_popl_ds: + popl %eax /* get trap number */ + popl %edx /* get error code */ + addl $12,%esp /* pop stack to user regs */ + jmp push_es /* (DS on top of stack) */ +fault_popl_es: + popl %eax /* get trap number */ + popl %edx /* get error code */ + addl $12,%esp /* pop stack to user regs */ + jmp push_fs /* (ES on top of stack) */ +fault_popl_fs: + popl %eax /* get trap number */ + popl %edx /* get error code */ + addl $12,%esp /* pop stack to user regs */ + jmp push_gs /* (FS on top of stack) */ +fault_popl_gs: + popl %eax /* get trap number */ + popl %edx /* get error code */ + addl $12,%esp /* pop stack to user regs */ + jmp push_segregs /* (GS on top of stack) */ + +push_es: + pushl %es /* restore es, */ +push_fs: + pushl %fs /* restore fs, */ +push_gs: + pushl %gs /* restore gs. */ +push_segregs: + movl %eax,R_TRAPNO(%esp) /* set trap number */ + movl %edx,R_ERR(%esp) /* set error code */ + CAH(fltpp) + jmp trap_set_segs /* take trap */ + +/* + * Debug trap. Check for single-stepping across system call into + * kernel. If this is the case, taking the debug trap has turned + * off single-stepping - save the flags register with the trace + * bit set. + */ +Entry(t_debug) + testl $(EFL_VM),8(%esp) /* is trap from V86 mode? */ + jnz 0f /* isn`t kernel trap if so */ + testl $3,4(%esp) /* is trap from kernel mode? */ + jnz 0f /* if so: */ + cmpl $syscall_entry,(%esp) /* system call entry? */ + jne 0f /* if so: */ + /* flags are sitting where syscall */ + /* wants them */ + addl $8,%esp /* remove eip/cs */ + jmp syscall_entry_2 /* continue system call entry */ + +0: pushl $0 /* otherwise: */ + pushl $(T_DEBUG) /* handle as normal */ + jmp EXT(alltraps) /* debug fault */ + +/* + * Page fault traps save cr2. + */ +Entry(t_page_fault) + pushl $(T_PAGE_FAULT) /* mark a page fault trap */ + pusha /* save the general registers */ + movl %cr2,%eax /* get the faulting address */ + movl %eax,12(%esp) /* save in esp save slot */ + jmp trap_push_segs /* continue fault */ + +/* + * All 'exceptions' enter here with: + * esp-> trap number + * error code + * old eip + * old cs + * old eflags + * old esp if trapped from user + * old ss if trapped from user + * + * NB: below use of CPU_NUMBER assumes that macro will use correct + * segment register for any kernel data accesses. + */ +Entry(alltraps) + pusha /* save the general registers */ +trap_push_segs: + pushl %ds /* save the segment registers */ + pushl %es + pushl %fs + pushl %gs + +trap_set_segs: + movl %ss,%ax + movl %ax,%ds + movl %ax,%es /* switch to kernel data seg */ + cld /* clear direction flag */ + testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */ + jnz trap_from_user /* user mode trap if so */ + testb $3,R_CS(%esp) /* user mode trap? */ + jnz trap_from_user + CPU_NUMBER(%edx) + cmpl $0,CX(EXT(active_kloaded),%edx) + je trap_from_kernel /* if clear, truly in kernel */ +#ifdef FIXME + cmpl ETEXT_ADDR,R_EIP(%esp) /* pc within kernel? */ + jb trap_from_kernel +#endif +trap_from_kloaded: + /* + * We didn't enter here "through" PCB (i.e., using ring 0 stack), + * so transfer the stack frame into the PCB explicitly, then + * start running on resulting "PCB stack". We have to set + * up a simulated "uesp" manually, since there's none in the + * frame. + */ + mov $CPU_DATA,%dx + mov %dx,%gs + CAH(atstart) + CPU_NUMBER(%edx) + movl CX(EXT(active_kloaded),%edx),%ebx + movl CX(EXT(kernel_stack),%edx),%eax + xchgl %esp,%eax + FRAME_STACK_TO_PCB(%ebx,%eax) + CAH(atend) + jmp EXT(take_trap) + +trap_from_user: + mov $CPU_DATA,%ax + mov %ax,%gs + + CPU_NUMBER(%edx) + TIME_TRAP_UENTRY + + movl CX(EXT(kernel_stack),%edx),%ebx + xchgl %ebx,%esp /* switch to kernel stack */ + /* user regs pointer already set */ +LEXT(take_trap) + pushl %ebx /* record register save area */ + pushl %ebx /* pass register save area to trap */ + call EXT(user_trap) /* call user trap routine */ + movl 4(%esp),%esp /* switch back to PCB stack */ + +/* + * Return from trap or system call, checking for ASTs. + * On PCB stack. + */ + +LEXT(return_from_trap) + CPU_NUMBER(%edx) + cmpl $0,CX(EXT(need_ast),%edx) + je EXT(return_to_user) /* if we need an AST: */ + + movl CX(EXT(kernel_stack),%edx),%esp + /* switch to kernel stack */ + pushl $0 /* push preemption flag */ + call EXT(i386_astintr) /* take the AST */ + addl $4,%esp /* pop preemption flag */ + popl %esp /* switch back to PCB stack (w/exc link) */ + jmp EXT(return_from_trap) /* and check again (rare) */ + /* ASTs after this point will */ + /* have to wait */ + +/* + * Arrange the checks needed for kernel-loaded (or kernel-loading) + * threads so that branch is taken in kernel-loaded case. + */ +LEXT(return_to_user) + TIME_TRAP_UEXIT + CPU_NUMBER(%eax) + cmpl $0,CX(EXT(active_kloaded),%eax) + jnz EXT(return_xfer_stack) + movl $CPD_ACTIVE_THREAD,%ebx + movl %gs:(%ebx),%ebx /* get active thread */ + movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */ + cmpl $0,ACT_KLOADING(%ebx) /* check if kernel-loading */ + jnz EXT(return_kernel_loading) + +#if MACH_RT +#if MACH_ASSERT + movl $CPD_PREEMPTION_LEVEL,%ebx + cmpl $0,%gs:(%ebx) + je EXT(return_from_kernel) + int $3 +#endif /* MACH_ASSERT */ +#endif /* MACH_RT */ + +/* + * Return from kernel mode to interrupted thread. + */ + +LEXT(return_from_kernel) +LEXT(kret_popl_gs) + popl %gs /* restore segment registers */ +LEXT(kret_popl_fs) + popl %fs +LEXT(kret_popl_es) + popl %es +LEXT(kret_popl_ds) + popl %ds + popa /* restore general registers */ + addl $8,%esp /* discard trap number and error code */ + +LEXT(kret_iret) + iret /* return from interrupt */ + + +LEXT(return_xfer_stack) + /* + * If we're on PCB stack in a kernel-loaded task, we have + * to transfer saved state back to thread stack and swap + * stack pointers here, because the hardware's not going + * to do so for us. + */ + CAH(rxsstart) + CPU_NUMBER(%eax) + movl CX(EXT(kernel_stack),%eax),%esp + movl CX(EXT(active_kloaded),%eax),%eax + FRAME_PCB_TO_STACK(%eax) + movl %eax,%esp + CAH(rxsend) + jmp EXT(return_from_kernel) + +/* + * Hate to put this here, but setting up a separate swap_func for + * kernel-loaded threads no longer works, since thread executes + * "for a while" (i.e., until it reaches glue code) when first + * created, even if it's nominally suspended. Hence we can't + * transfer the PCB when the thread first resumes, because we + * haven't initialized it yet. + */ +/* + * Have to force transfer to new stack "manually". Use a string + * move to transfer all of our saved state to the stack pointed + * to by iss.uesp, then install a pointer to it as our current + * stack pointer. + */ +LEXT(return_kernel_loading) + CPU_NUMBER(%eax) + movl CX(EXT(kernel_stack),%eax),%esp + movl $CPD_ACTIVE_THREAD,%ebx + movl %gs:(%ebx),%ebx /* get active thread */ + movl TH_TOP_ACT(%ebx),%ebx /* get thread->top_act */ + movl %ebx,%edx /* save for later */ + movl $0,ACT_KLOADING(%edx) /* clear kernel-loading bit */ + FRAME_PCB_TO_STACK(%ebx) + movl %ebx,%esp /* start running on new stack */ + movl $1,ACT_KLOADED(%edx) /* set kernel-loaded bit */ + movl %edx,CX(EXT(active_kloaded),%eax) /* set cached indicator */ + jmp EXT(return_from_kernel) + +/* + * Trap from kernel mode. No need to switch stacks or load segment registers. + */ +trap_from_kernel: +#if MACH_KDB || MACH_KGDB + mov $CPU_DATA,%ax + mov %ax,%gs + movl %esp,%ebx /* save current stack */ + + cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */ + jb 6f /* OK if so */ + +#if MACH_KGDB + cmpl $0,EXT(kgdb_active) /* Unexpected trap in kgdb */ + je 0f /* no */ + + pushl %esp /* Already on kgdb stack */ + cli + call EXT(kgdb_trap) + addl $4,%esp + jmp EXT(return_from_kernel) +0: /* should kgdb handle this exception? */ + cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */ + je 2f /* yes */ + cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp) /* page fault? */ + je 2f /* yes */ +1: + cli /* disable interrupts */ + CPU_NUMBER(%edx) /* get CPU number */ + movl CX(EXT(kgdb_stacks),%edx),%ebx + xchgl %ebx,%esp /* switch to kgdb stack */ + pushl %ebx /* pass old sp as an arg */ + call EXT(kgdb_from_kernel) + popl %esp /* switch back to kernel stack */ + jmp EXT(return_from_kernel) +2: +#endif /* MACH_KGDB */ + +#if MACH_KDB + cmpl $0,EXT(db_active) /* could trap be from ddb? */ + je 3f /* no */ +#if NCPUS > 1 + CPU_NUMBER(%edx) /* see if this CPU is in ddb */ + cmpl $0,CX(EXT(kdb_active),%edx) + je 3f /* no */ +#endif /* NCPUS > 1 */ + pushl %esp + call EXT(db_trap_from_asm) + addl $0x4,%esp + jmp EXT(return_from_kernel) + +3: + /* + * Dilemma: don't want to switch to kernel_stack if trap + * "belongs" to ddb; don't want to switch to db_stack if + * trap "belongs" to kernel. So have to duplicate here the + * set of trap types that kernel_trap() handles. Note that + * "unexpected" page faults will not be handled by kernel_trap(). + * In this panic-worthy case, we fall into the debugger with + * kernel_stack containing the call chain that led to the + * bogus fault. + */ + movl R_TRAPNO(%esp),%edx + cmpl $(T_PAGE_FAULT),%edx + je 4f + cmpl $(T_NO_FPU),%edx + je 4f + cmpl $(T_FPU_FAULT),%edx + je 4f + cmpl $(T_FLOATING_POINT_ERROR),%edx + je 4f + cmpl $(T_PREEMPT),%edx + jne 7f +4: +#endif /* MACH_KDB */ + + CPU_NUMBER(%edx) /* get CPU number */ + cmpl CX(EXT(kernel_stack),%edx),%esp + /* if not already on kernel stack, */ + ja 5f /* check some more */ + cmpl CX(EXT(active_stacks),%edx),%esp + ja 6f /* on kernel stack: no switch */ +5: + movl CX(EXT(kernel_stack),%edx),%esp +6: + pushl %ebx /* save old stack */ + pushl %ebx /* pass as parameter */ + call EXT(kernel_trap) /* to kernel trap routine */ + addl $4,%esp /* pop parameter */ + testl %eax,%eax + jne 8f + /* + * If kernel_trap returns false, trap wasn't handled. + */ +7: +#if MACH_KDB + CPU_NUMBER(%edx) + movl CX(EXT(db_stacks),%edx),%esp + pushl %ebx /* pass old stack as parameter */ + call EXT(db_trap_from_asm) +#endif /* MACH_KDB */ +#if MACH_KGDB + cli /* disable interrupts */ + CPU_NUMBER(%edx) /* get CPU number */ + movl CX(EXT(kgdb_stacks),%edx),%esp + pushl %ebx /* pass old stack as parameter */ + call EXT(kgdb_from_kernel) +#endif /* MACH_KGDB */ + addl $4,%esp /* pop parameter */ + testl %eax,%eax + jne 8f + /* + * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap + * wasn't handled. + */ + pushl %ebx /* pass old stack as parameter */ + call EXT(panic_trap) + addl $4,%esp /* pop parameter */ +8: + movl %ebx,%esp /* get old stack (from callee-saves reg) */ +#else /* MACH_KDB || MACH_KGDB */ + pushl %esp /* pass parameter */ + call EXT(kernel_trap) /* to kernel trap routine */ + addl $4,%esp /* pop parameter */ +#endif /* MACH_KDB || MACH_KGDB */ + +#if MACH_RT + CPU_NUMBER(%edx) + + movl CX(EXT(need_ast),%edx),%eax /* get pending asts */ + testl $AST_URGENT,%eax /* any urgent preemption? */ + je EXT(return_from_kernel) /* no, nothing to do */ + cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */ + je EXT(return_from_kernel) /* no, skip it */ + cmpl $T_PREEMPT,48(%esp) /* preempt request? */ + jne EXT(return_from_kernel) /* no, nothing to do */ + movl CX(EXT(kernel_stack),%edx),%eax + movl %esp,%ecx + xorl %eax,%ecx + andl $(-KERNEL_STACK_SIZE),%ecx + testl %ecx,%ecx /* are we on the kernel stack? */ + jne EXT(return_from_kernel) /* no, skip it */ + +#if PREEMPT_DEBUG_LOG + pushl 28(%esp) /* stack pointer */ + pushl 24+4(%esp) /* frame pointer */ + pushl 56+8(%esp) /* stack pointer */ + pushl $0f + call EXT(log_thread_action) + addl $16, %esp + .data +0: String "trap preempt eip" + .text +#endif /* PREEMPT_DEBUG_LOG */ + + pushl $1 /* push preemption flag */ + call EXT(i386_astintr) /* take the AST */ + addl $4,%esp /* pop preemption flag */ +#endif /* MACH_RT */ + + jmp EXT(return_from_kernel) + +/* + * Called as a function, makes the current thread + * return from the kernel as if from an exception. + */ + + .globl EXT(thread_exception_return) + .globl EXT(thread_bootstrap_return) +LEXT(thread_exception_return) +LEXT(thread_bootstrap_return) + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ + jmp EXT(return_from_trap) + +Entry(call_continuation) + movl S_ARG0,%eax /* get continuation */ + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + addl $(-3-IKS_SIZE),%ecx + movl %ecx,%esp /* pop the stack */ + xorl %ebp,%ebp /* zero frame pointer */ + jmp *%eax /* goto continuation */ + +#if 0 +#define LOG_INTERRUPT(info,msg) \ + pushal ; \ + pushl msg ; \ + pushl info ; \ + call EXT(log_thread_action) ; \ + add $8,%esp ; \ + popal +#define CHECK_INTERRUPT_TIME(n) \ + pushal ; \ + pushl $n ; \ + call EXT(check_thread_time) ; \ + add $4,%esp ; \ + popal +#else +#define LOG_INTERRUPT(info,msg) +#define CHECK_INTERRUPT_TIME(n) +#endif + +imsg_start: + String "interrupt start" +imsg_end: + String "interrupt end" + +/* + * All interrupts enter here. + * old %eax on stack; interrupt number in %eax. + */ +Entry(all_intrs) + pushl %ecx /* save registers */ + pushl %edx + cld /* clear direction flag */ + + cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */ + jb int_from_intstack /* if not: */ + + pushl %ds /* save segment registers */ + pushl %es + mov %ss,%dx /* switch to kernel segments */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + + CPU_NUMBER(%edx) + + movl CX(EXT(int_stack_top),%edx),%ecx + xchgl %ecx,%esp /* switch to interrupt stack */ + +#if STAT_TIME + pushl %ecx /* save pointer to old stack */ +#else + pushl %ebx /* save %ebx - out of the way */ + /* so stack looks the same */ + pushl %ecx /* save pointer to old stack */ + TIME_INT_ENTRY /* do timing */ +#endif + +#if MACH_RT + movl $CPD_PREEMPTION_LEVEL,%edx + incl %gs:(%edx) +#endif /* MACH_RT */ + + movl $CPD_INTERRUPT_LEVEL,%edx + incl %gs:(%edx) + + pushl %eax /* Push trap number */ + call EXT(PE_incoming_interrupt) /* call generic interrupt routine */ + addl $4,%esp /* Pop trap number */ + + .globl EXT(return_to_iret) +LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ + + movl $CPD_INTERRUPT_LEVEL,%edx + decl %gs:(%edx) + +#if MACH_RT + movl $CPD_PREEMPTION_LEVEL,%edx + decl %gs:(%edx) +#endif /* MACH_RT */ + +#if STAT_TIME +#else + TIME_INT_EXIT /* do timing */ + movl 4(%esp),%ebx /* restore the extra reg we saved */ +#endif + + popl %esp /* switch back to old stack */ + + CPU_NUMBER(%edx) + movl CX(EXT(need_ast),%edx),%eax + testl %eax,%eax /* any pending asts? */ + je 1f /* no, nothing to do */ + testl $(EFL_VM),I_EFL(%esp) /* if in V86 */ + jnz ast_from_interrupt /* take it */ + testb $3,I_CS(%esp) /* user mode, */ + jnz ast_from_interrupt /* take it */ +#ifdef FIXME + cmpl ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */ + jnb ast_from_interrupt /* take it */ +#endif + +#if MACH_RT + cmpl $0,EXT(preemptable) /* kernel-mode, preemption enabled? */ + je 1f /* no, skip it */ + movl $CPD_PREEMPTION_LEVEL,%ecx + cmpl $0,%gs:(%ecx) /* preemption masked? */ + jne 1f /* yes, skip it */ + testl $AST_URGENT,%eax /* any urgent requests? */ + je 1f /* no, skip it */ + cmpl $LEXT(locore_end),I_EIP(%esp) /* are we in locore code? */ + jb 1f /* yes, skip it */ + movl CX(EXT(kernel_stack),%edx),%eax + movl %esp,%ecx + xorl %eax,%ecx + andl $(-KERNEL_STACK_SIZE),%ecx + testl %ecx,%ecx /* are we on the kernel stack? */ + jne 1f /* no, skip it */ + +/* + * Take an AST from kernel space. We don't need (and don't want) + * to do as much as the case where the interrupt came from user + * space. + */ +#if PREEMPT_DEBUG_LOG + pushl $0 + pushl $0 + pushl I_EIP+8(%esp) + pushl $0f + call EXT(log_thread_action) + addl $16, %esp + .data +0: String "intr preempt eip" + .text +#endif /* PREEMPT_DEBUG_LOG */ + + sti + pushl $1 /* push preemption flag */ + call EXT(i386_astintr) /* take the AST */ + addl $4,%esp /* pop preemption flag */ +#endif /* MACH_RT */ + +1: + pop %es /* restore segment regs */ + pop %ds + pop %edx + pop %ecx + pop %eax + iret /* return to caller */ + +int_from_intstack: +#if MACH_RT + movl $CPD_PREEMPTION_LEVEL,%edx + incl %gs:(%edx) +#endif /* MACH_RT */ + + movl $CPD_INTERRUPT_LEVEL,%edx + incl %gs:(%edx) + + pushl %eax /* Push trap number */ + + call EXT(PE_incoming_interrupt) + +LEXT(return_to_iret_i) /* ( label for kdb_kintr) */ + + addl $4,%esp /* pop trap number */ + + movl $CPD_INTERRUPT_LEVEL,%edx + decl %gs:(%edx) + +#if MACH_RT + movl $CPD_PREEMPTION_LEVEL,%edx + decl %gs:(%edx) +#endif /* MACH_RT */ + + pop %edx /* must have been on kernel segs */ + pop %ecx + pop %eax /* no ASTs */ + iret + +/* + * Take an AST from an interrupt. + * On PCB stack. + * sp-> es -> edx + * ds -> ecx + * edx -> eax + * ecx -> trapno + * eax -> code + * eip + * cs + * efl + * esp + * ss + */ +ast_from_interrupt: + pop %es /* restore all registers ... */ + pop %ds + popl %edx + popl %ecx + popl %eax + sti /* Reenable interrupts */ + pushl $0 /* zero code */ + pushl $0 /* zero trap number */ + pusha /* save general registers */ + push %ds /* save segment registers */ + push %es + push %fs + push %gs + mov %ss,%dx /* switch to kernel segments */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + + /* + * See if we interrupted a kernel-loaded thread executing + * in its own task. + */ + CPU_NUMBER(%edx) + testl $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */ + jnz 0f /* user mode trap if so */ + testb $3,R_CS(%esp) + jnz 0f /* user mode, back to normal */ +#ifdef FIXME + cmpl ETEXT_ADDR,R_EIP(%esp) + jb 0f /* not kernel-loaded, back to normal */ +#endif + + /* + * Transfer the current stack frame by hand into the PCB. + */ + CAH(afistart) + movl CX(EXT(active_kloaded),%edx),%eax + movl CX(EXT(kernel_stack),%edx),%ebx + xchgl %ebx,%esp + FRAME_STACK_TO_PCB(%eax,%ebx) + CAH(afiend) + TIME_TRAP_UENTRY + jmp 3f +0: + TIME_TRAP_UENTRY + + movl CX(EXT(kernel_stack),%edx),%eax + /* switch to kernel stack */ + xchgl %eax,%esp +3: + pushl %eax + pushl $0 /* push preemption flag */ + call EXT(i386_astintr) /* take the AST */ + addl $4,%esp /* pop preemption flag */ + popl %esp /* back to PCB stack */ + jmp EXT(return_from_trap) /* return */ + +#if MACH_KDB || MACH_KGDB +/* + * kdb_kintr: enter kdb from keyboard interrupt. + * Chase down the stack frames until we find one whose return + * address is the interrupt handler. At that point, we have: + * + * frame-> saved %ebp + * return address in interrupt handler + * ivect + * saved SPL + * return address == return_to_iret_i + * saved %edx + * saved %ecx + * saved %eax + * saved %eip + * saved %cs + * saved %efl + * + * OR: + * frame-> saved %ebp + * return address in interrupt handler + * ivect + * saved SPL + * return address == return_to_iret + * pointer to save area on old stack + * [ saved %ebx, if accurate timing ] + * + * old stack: saved %es + * saved %ds + * saved %edx + * saved %ecx + * saved %eax + * saved %eip + * saved %cs + * saved %efl + * + * Call kdb, passing it that register save area. + */ + +#if MACH_KGDB +Entry(kgdb_kintr) +#endif /* MACH_KGDB */ +#if MACH_KDB +Entry(kdb_kintr) +#endif /* MACH_KDB */ + movl %ebp,%eax /* save caller`s frame pointer */ + movl $EXT(return_to_iret),%ecx /* interrupt return address 1 */ + movl $EXT(return_to_iret_i),%edx /* interrupt return address 2 */ + +0: cmpl 16(%eax),%ecx /* does this frame return to */ + /* interrupt handler (1)? */ + je 1f + cmpl $kdb_from_iret,16(%eax) + je 1f + cmpl 16(%eax),%edx /* interrupt handler (2)? */ + je 2f /* if not: */ + cmpl $kdb_from_iret_i,16(%eax) + je 2f + movl (%eax),%eax /* try next frame */ + jmp 0b + +1: movl $kdb_from_iret,16(%eax) /* returns to kernel/user stack */ + ret + +2: movl $kdb_from_iret_i,16(%eax) + /* returns to interrupt stack */ + ret + +/* + * On return from keyboard interrupt, we will execute + * kdb_from_iret_i + * if returning to an interrupt on the interrupt stack + * kdb_from_iret + * if returning to an interrupt on the user or kernel stack + */ +kdb_from_iret: + /* save regs in known locations */ +#if STAT_TIME + pushl %ebx /* caller`s %ebx is in reg */ +#else + movl 4(%esp),%eax /* get caller`s %ebx */ + pushl %eax /* push on stack */ +#endif + pushl %ebp + pushl %esi + pushl %edi + push %fs + push %gs +#if MACH_KGDB + cli + pushl %esp /* pass regs */ + call EXT(kgdb_kentry) /* to kgdb */ + addl $4,%esp /* pop parameters */ +#endif /* MACH_KGDB */ +#if MACH_KDB + pushl %esp /* pass regs */ + call EXT(kdb_kentry) /* to kdb */ + addl $4,%esp /* pop parameters */ +#endif /* MACH_KDB */ + pop %gs /* restore registers */ + pop %fs + popl %edi + popl %esi + popl %ebp +#if STAT_TIME + popl %ebx +#else + popl %eax + movl %eax,4(%esp) +#endif + jmp EXT(return_to_iret) /* normal interrupt return */ + +kdb_from_iret_i: /* on interrupt stack */ + pop %edx /* restore saved registers */ + pop %ecx + pop %eax + pushl $0 /* zero error code */ + pushl $0 /* zero trap number */ + pusha /* save general registers */ + push %ds /* save segment registers */ + push %es + push %fs + push %gs +#if MACH_KGDB + cli /* disable interrupts */ + CPU_NUMBER(%edx) /* get CPU number */ + movl CX(EXT(kgdb_stacks),%edx),%ebx + xchgl %ebx,%esp /* switch to kgdb stack */ + pushl %ebx /* pass old sp as an arg */ + call EXT(kgdb_from_kernel) + popl %esp /* switch back to interrupt stack */ +#endif /* MACH_KGDB */ +#if MACH_KDB + pushl %esp /* pass regs, */ + pushl $0 /* code, */ + pushl $-1 /* type to kdb */ + call EXT(kdb_trap) + addl $12,%esp +#endif /* MACH_KDB */ + pop %gs /* restore segment registers */ + pop %fs + pop %es + pop %ds + popa /* restore general registers */ + addl $8,%esp + iret + +#endif /* MACH_KDB || MACH_KGDB */ + + +/* + * Mach RPC enters through a call gate, like a system call. + */ + +Entry(mach_rpc) + pushf /* save flags as soon as possible */ + pushl %eax /* save system call number */ + pushl $0 /* clear trap number slot */ + + pusha /* save the general registers */ + pushl %ds /* and the segment registers */ + pushl %es + pushl %fs + pushl %gs + + mov %ss,%dx /* switch to kernel data segment */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + +/* + * Shuffle eflags,eip,cs into proper places + */ + + movl R_EIP(%esp),%ebx /* eflags are in EIP slot */ + movl R_CS(%esp),%ecx /* eip is in CS slot */ + movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */ + movl %ecx,R_EIP(%esp) /* fix eip */ + movl %edx,R_CS(%esp) /* fix cs */ + movl %ebx,R_EFLAGS(%esp) /* fix eflags */ + + CPU_NUMBER(%edx) + TIME_TRAP_UENTRY + + negl %eax /* get system call number */ + shll $4,%eax /* manual indexing */ + +/* + * Check here for mach_rpc from kernel-loaded task -- + * - Note that kernel-loaded task returns via real return. + * We didn't enter here "through" PCB (i.e., using ring 0 stack), + * so transfer the stack frame into the PCB explicitly, then + * start running on resulting "PCB stack". We have to set + * up a simulated "uesp" manually, since there's none in the + * frame. + */ + cmpl $0,CX(EXT(active_kloaded),%edx) + jz 2f + CAH(mrstart) + movl CX(EXT(active_kloaded),%edx),%ebx + movl CX(EXT(kernel_stack),%edx),%edx + xchgl %edx,%esp + + FRAME_STACK_TO_PCB(%ebx,%edx) + CAH(mrend) + + CPU_NUMBER(%edx) + jmp 3f + +2: + CPU_NUMBER(%edx) + movl CX(EXT(kernel_stack),%edx),%ebx + /* get current kernel stack */ + xchgl %ebx,%esp /* switch stacks - %ebx points to */ + /* user registers. */ + +3: + +/* + * Register use on entry: + * eax contains syscall number + * ebx contains user regs pointer + */ +#undef RPC_TRAP_REGISTERS +#ifdef RPC_TRAP_REGISTERS + pushl R_ESI(%ebx) + pushl R_EDI(%ebx) + pushl R_ECX(%ebx) + pushl R_EDX(%ebx) +#else + movl EXT(mach_trap_table)(%eax),%ecx + /* get number of arguments */ + jecxz 2f /* skip argument copy if none */ + movl R_UESP(%ebx),%esi /* get user stack pointer */ + lea 4(%esi,%ecx,4),%esi /* skip user return address, */ + /* and point past last argument */ + /* edx holds cpu number from above */ + movl CX(EXT(active_kloaded),%edx),%edx + /* point to current thread */ + orl %edx,%edx /* if ! kernel-loaded, check addr */ + jz 4f /* else */ + mov %ds,%dx /* kernel data segment access */ + jmp 5f +4: + cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */ + ja mach_call_addr /* address error if not */ + movl $USER_DS,%edx /* user data segment access */ +5: + mov %dx,%fs + movl %esp,%edx /* save kernel ESP for error recovery */ +1: + subl $4,%esi + RECOVERY_SECTION + RECOVER(mach_call_addr_push) + pushl %fs:(%esi) /* push argument on stack */ + loop 1b /* loop for all arguments */ +#endif + +/* + * Register use on entry: + * eax contains syscall number + * ebx contains user regs pointer + */ +2: + CAH(call_call) + call *EXT(mach_trap_table)+4(%eax) + /* call procedure */ + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ + movl %eax,R_EAX(%esp) /* save return value */ + jmp EXT(return_from_trap) /* return to user */ + + +/* + * Special system call entry for "int 0x80", which has the "eflags" + * register saved at the right place already. + * Fall back to the common syscall path after saving the registers. + * + * esp -> old eip + * old cs + * old eflags + * old esp if trapped from user + * old ss if trapped from user + * + * XXX: for the moment, we don't check for int 0x80 from kernel mode. + */ +Entry(syscall_int80) + pushl %eax /* save system call number */ + pushl $0 /* clear trap number slot */ + + pusha /* save the general registers */ + pushl %ds /* and the segment registers */ + pushl %es + pushl %fs + pushl %gs + + mov %ss,%dx /* switch to kernel data segment */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + + jmp syscall_entry_3 + +/* + * System call enters through a call gate. Flags are not saved - + * we must shuffle stack to look like trap save area. + * + * esp-> old eip + * old cs + * old esp + * old ss + * + * eax contains system call number. + * + * NB: below use of CPU_NUMBER assumes that macro will use correct + * correct segment register for any kernel data accesses. + */ +Entry(syscall) +syscall_entry: + pushf /* save flags as soon as possible */ +syscall_entry_2: + pushl %eax /* save system call number */ + pushl $0 /* clear trap number slot */ + + pusha /* save the general registers */ + pushl %ds /* and the segment registers */ + pushl %es + pushl %fs + pushl %gs + + mov %ss,%dx /* switch to kernel data segment */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + +/* + * Shuffle eflags,eip,cs into proper places + */ + + movl R_EIP(%esp),%ebx /* eflags are in EIP slot */ + movl R_CS(%esp),%ecx /* eip is in CS slot */ + movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */ + movl %ecx,R_EIP(%esp) /* fix eip */ + movl %edx,R_CS(%esp) /* fix cs */ + movl %ebx,R_EFLAGS(%esp) /* fix eflags */ + +syscall_entry_3: + CPU_NUMBER(%edx) +/* + * Check here for syscall from kernel-loaded task -- + * We didn't enter here "through" PCB (i.e., using ring 0 stack), + * so transfer the stack frame into the PCB explicitly, then + * start running on resulting "PCB stack". We have to set + * up a simulated "uesp" manually, since there's none in the + * frame. + */ + cmpl $0,CX(EXT(active_kloaded),%edx) + jz 0f + CAH(scstart) + movl CX(EXT(active_kloaded),%edx),%ebx + movl CX(EXT(kernel_stack),%edx),%edx + xchgl %edx,%esp + FRAME_STACK_TO_PCB(%ebx,%edx) + CAH(scend) + TIME_TRAP_UENTRY + CPU_NUMBER(%edx) + jmp 1f + +0: + TIME_TRAP_UENTRY + + CPU_NUMBER(%edx) + movl CX(EXT(kernel_stack),%edx),%ebx + /* get current kernel stack */ + xchgl %ebx,%esp /* switch stacks - %ebx points to */ + /* user registers. */ + /* user regs pointer already set */ + +/* + * Check for MACH or emulated system call + * Register use (from here till we begin processing call): + * eax contains system call number + * ebx points to user regs + */ +1: + movl $CPD_ACTIVE_THREAD,%edx + movl %gs:(%edx),%edx /* get active thread */ + /* point to current thread */ + movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ + movl ACT_TASK(%edx),%edx /* point to task */ + movl TASK_EMUL(%edx),%edx /* get emulation vector */ + orl %edx,%edx /* if none, */ + je syscall_native /* do native system call */ + movl %eax,%ecx /* copy system call number */ + subl DISP_MIN(%edx),%ecx /* get displacement into syscall */ + /* vector table */ + jl syscall_native /* too low - native system call */ + cmpl DISP_COUNT(%edx),%ecx /* check range */ + jnl syscall_native /* too high - native system call */ + movl DISP_VECTOR(%edx,%ecx,4),%edx + /* get the emulation vector */ + orl %edx,%edx /* emulated system call if not zero */ + jnz syscall_emul + +/* + * Native system call. + * Register use on entry: + * eax contains syscall number + * ebx points to user regs + */ +syscall_native: + negl %eax /* get system call number */ + jl mach_call_range /* out of range if it was positive */ + + cmpl EXT(mach_trap_count),%eax /* check system call table bounds */ + jg mach_call_range /* error if out of range */ + shll $4,%eax /* manual indexing */ + + movl EXT(mach_trap_table)+4(%eax),%edx + /* get procedure */ + cmpl $EXT(kern_invalid),%edx /* if not "kern_invalid" */ + jne mach_syscall_native /* go on with Mach syscall */ + + movl $CPD_ACTIVE_THREAD,%edx + movl %gs:(%edx),%edx /* get active thread */ + /* point to current thread */ + movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ + movl ACT_TASK(%edx),%edx /* point to task */ + movl TASK_EMUL(%edx),%edx /* get emulation vector */ + orl %edx,%edx /* if it exists, */ + jne mach_syscall_native /* do native system call */ + shrl $4,%eax /* restore syscall number */ + jmp mach_call_range /* try it as a "server" syscall */ + +mach_syscall_native: + movl $CPD_ACTIVE_THREAD,%edx + movl %gs:(%edx),%edx /* get active thread */ + + movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ + movl ACT_MACH_EXC_PORT(%edx),%edx + movl $EXT(realhost),%ecx + movl HOST_NAME(%ecx),%ecx + cmpl %edx,%ecx /* act->mach_exc_port = host_name ? */ + je do_native_call /* -> send to kernel, do not collect $200 */ + cmpl $0,%edx /* thread->mach_exc_port = null ? */ + je try_task /* try task */ + jmp mach_syscall_exception + /* NOT REACHED */ + +try_task: + movl $CPD_ACTIVE_THREAD,%edx + movl %gs:(%edx),%edx /* get active thread */ + + movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ + movl ACT_TASK(%edx),%edx /* point to task */ + movl TASK_MACH_EXC_PORT(%edx),%edx + movl $EXT(realhost),%ecx + movl HOST_NAME(%ecx),%ecx + cmpl %edx,%ecx /* thread->mach_exc_port = host_name ? */ + je do_native_call /* -> send to kernel */ + cmpl $0,%edx /* thread->mach_exc_port = null ? */ + je EXT(syscall_failed) /* try task */ + jmp mach_syscall_exception + /* NOT REACHED */ + +/* + * Register use on entry: + * eax contains syscall number + * ebx contains user regs pointer + */ +do_native_call: + movl EXT(mach_trap_table)(%eax),%ecx + /* get number of arguments */ + jecxz mach_call_call /* skip argument copy if none */ + movl R_UESP(%ebx),%esi /* get user stack pointer */ + lea 4(%esi,%ecx,4),%esi /* skip user return address, */ + /* and point past last argument */ + CPU_NUMBER(%edx) + movl CX(EXT(active_kloaded),%edx),%edx + /* point to current thread */ + orl %edx,%edx /* if kernel-loaded, skip addr check */ + jz 0f /* else */ + mov %ds,%dx /* kernel data segment access */ + jmp 1f +0: + cmpl $(VM_MAX_ADDRESS),%esi /* in user space? */ + ja mach_call_addr /* address error if not */ + movl $USER_DS,%edx /* user data segment access */ +1: + mov %dx,%fs + movl %esp,%edx /* save kernel ESP for error recovery */ +2: + subl $4,%esi + RECOVERY_SECTION + RECOVER(mach_call_addr_push) + pushl %fs:(%esi) /* push argument on stack */ + loop 2b /* loop for all arguments */ + +/* + * Register use on entry: + * eax contains syscall number + * ebx contains user regs pointer + */ +mach_call_call: + + CAH(call_call) + +#if ETAP_EVENT_MONITOR + cmpl $0x200, %eax /* is this mach_msg? */ + jz make_syscall /* if yes, don't record event */ + + pushal /* Otherwise: save registers */ + pushl %eax /* push syscall number on stack*/ + call EXT(etap_machcall_probe1) /* call event begin probe */ + add $4,%esp /* restore stack */ + popal /* restore registers */ + + call *EXT(mach_trap_table)+4(%eax) /* call procedure */ + pushal + call EXT(etap_machcall_probe2) /* call event end probe */ + popal + jmp skip_syscall /* syscall already made */ +#endif /* ETAP_EVENT_MONITOR */ + +make_syscall: + call *EXT(mach_trap_table)+4(%eax) /* call procedure */ +skip_syscall: + + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ + movl %eax,R_EAX(%esp) /* save return value */ + jmp EXT(return_from_trap) /* return to user */ + +/* + * Address out of range. Change to page fault. + * %esi holds failing address. + * Register use on entry: + * ebx contains user regs pointer + */ +mach_call_addr_push: + movl %edx,%esp /* clean parameters from stack */ +mach_call_addr: + movl %esi,R_CR2(%ebx) /* set fault address */ + movl $(T_PAGE_FAULT),R_TRAPNO(%ebx) + /* set page-fault trap */ + movl $(T_PF_USER),R_ERR(%ebx) + /* set error code - read user space */ + CAH(call_addr) + jmp EXT(take_trap) /* treat as a trap */ + +/* + * try sending mach system call exception to server + * Register use on entry: + * eax contains syscall number + */ +mach_syscall_exception: + push %eax /* code (syscall no.) */ + movl %esp,%edx + push $1 /* code_cnt = 1 */ + push %edx /* exception_type_t (see i/f docky) */ + push $EXC_MACH_SYSCALL /* exception */ + + CAH(exception) + call EXT(exception) + /* no return */ + +/* + * System call out of range. Treat as invalid-instruction trap. + * (? general protection?) + * Register use on entry: + * eax contains syscall number + */ +mach_call_range: + movl $CPD_ACTIVE_THREAD,%edx + movl %gs:(%edx),%edx /* get active thread */ + + movl TH_TOP_ACT(%edx),%edx /* get thread->top_act */ + movl ACT_TASK(%edx),%edx /* point to task */ + movl TASK_EMUL(%edx),%edx /* get emulation vector */ + orl %edx,%edx /* if emulator, */ + jne EXT(syscall_failed) /* handle as illegal instruction */ + /* else generate syscall exception: */ + push %eax + movl %esp,%edx + push $1 /* code_cnt = 1 */ + push %edx /* exception_type_t (see i/f docky) */ + push $EXC_SYSCALL + CAH(call_range) + call EXT(exception) + /* no return */ + + .globl EXT(syscall_failed) +LEXT(syscall_failed) + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ + CPU_NUMBER(%edx) + movl CX(EXT(kernel_stack),%edx),%ebx + /* get current kernel stack */ + xchgl %ebx,%esp /* switch stacks - %ebx points to */ + /* user registers. */ + /* user regs pointer already set */ + + movl $(T_INVALID_OPCODE),R_TRAPNO(%ebx) + /* set invalid-operation trap */ + movl $0,R_ERR(%ebx) /* clear error code */ + CAH(failed) + jmp EXT(take_trap) /* treat as a trap */ + +/* + * User space emulation of system calls. + * edx - user address to handle syscall + * + * User stack will become: + * uesp-> eflags + * eip + * Register use on entry: + * ebx contains user regs pointer + * edx contains emulator vector address + */ +syscall_emul: + movl R_UESP(%ebx),%edi /* get user stack pointer */ + CPU_NUMBER(%eax) + movl CX(EXT(active_kloaded),%eax),%eax + orl %eax,%eax /* if thread not kernel-loaded, */ + jz 0f /* do address checks */ + subl $8,%edi + mov %ds,%ax /* kernel data segment access */ + jmp 1f /* otherwise, skip them */ +0: + cmpl $(VM_MAX_ADDRESS),%edi /* in user space? */ + ja syscall_addr /* address error if not */ + subl $8,%edi /* push space for new arguments */ + cmpl $(VM_MIN_ADDRESS),%edi /* still in user space? */ + jb syscall_addr /* error if not */ + movl $USER_DS,%ax /* user data segment access */ +1: + mov %ax,%fs + movl R_EFLAGS(%ebx),%eax /* move flags */ + RECOVERY_SECTION + RECOVER(syscall_addr) + movl %eax,%fs:0(%edi) /* to user stack */ + movl R_EIP(%ebx),%eax /* move eip */ + RECOVERY_SECTION + RECOVER(syscall_addr) + movl %eax,%fs:4(%edi) /* to user stack */ + movl %edi,R_UESP(%ebx) /* set new user stack pointer */ + movl %edx,R_EIP(%ebx) /* change return address to trap */ + movl %ebx,%esp /* back to PCB stack */ + CAH(emul) + jmp EXT(return_from_trap) /* return to user */ + + +/* + * Address error - address is in %edi. + * Register use on entry: + * ebx contains user regs pointer + */ +syscall_addr: + movl %edi,R_CR2(%ebx) /* set fault address */ + movl $(T_PAGE_FAULT),R_TRAPNO(%ebx) + /* set page-fault trap */ + movl $(T_PF_USER),R_ERR(%ebx) + /* set error code - read user space */ + CAH(addr) + jmp EXT(take_trap) /* treat as a trap */ + +/* */ +/* + * Utility routines. + */ + + +/* + * Copy from user address space. + * arg0: user address + * arg1: kernel address + * arg2: byte count + */ +Entry(copyinmsg) +ENTRY(copyin) + pushl %esi + pushl %edi /* save registers */ + + movl 8+S_ARG0,%esi /* get user start address */ + movl 8+S_ARG1,%edi /* get kernel destination address */ + movl 8+S_ARG2,%edx /* get count */ + + lea 0(%esi,%edx),%eax /* get user end address + 1 */ + + movl $CPD_ACTIVE_THREAD,%ecx + movl %gs:(%ecx),%ecx /* get active thread */ + movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */ + movl ACT_MAP(%ecx),%ecx /* get act->map */ + movl MAP_PMAP(%ecx),%ecx /* get map->pmap */ + cmpl EXT(kernel_pmap), %ecx + jz 1f + movl $USER_DS,%cx /* user data segment access */ + mov %cx,%ds +1: + cmpl %esi,%eax + jb copyin_fail /* fail if wrap-around */ + cld /* count up */ + movl %edx,%ecx /* move by longwords first */ + shrl $2,%ecx + RECOVERY_SECTION + RECOVER(copyin_fail) + rep + movsl /* move longwords */ + movl %edx,%ecx /* now move remaining bytes */ + andl $3,%ecx + RECOVERY_SECTION + RECOVER(copyin_fail) + rep + movsb + xorl %eax,%eax /* return 0 for success */ +copy_ret: + mov %ss,%di /* restore kernel data segment */ + mov %di,%ds + + popl %edi /* restore registers */ + popl %esi + ret /* and return */ + +copyin_fail: + movl $EFAULT,%eax /* return error for failure */ + jmp copy_ret /* pop frame and return */ + +/* + * Copy string from user address space. + * arg0: user address + * arg1: kernel address + * arg2: max byte count + * arg3: actual byte count (OUT) + */ +Entry(copyinstr) + pushl %esi + pushl %edi /* save registers */ + + movl 8+S_ARG0,%esi /* get user start address */ + movl 8+S_ARG1,%edi /* get kernel destination address */ + movl 8+S_ARG2,%edx /* get count */ + + lea 0(%esi,%edx),%eax /* get user end address + 1 */ + + movl $CPD_ACTIVE_THREAD,%ecx + movl %gs:(%ecx),%ecx /* get active thread */ + movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */ + movl ACT_MAP(%ecx),%ecx /* get act->map */ + movl MAP_PMAP(%ecx),%ecx /* get map->pmap */ + cmpl EXT(kernel_pmap), %ecx + jne 0f + mov %ds,%cx /* kernel data segment access */ + jmp 1f +0: + movl $USER_DS,%cx /* user data segment access */ +1: + mov %cx,%fs + xorl %eax,%eax + cmpl $0,%edx + je 4f +2: + RECOVERY_SECTION + RECOVER(copystr_fail) /* copy bytes... */ + movb %fs:(%esi),%eax + incl %esi + testl %edi,%edi /* if kernel address is ... */ + jz 3f /* not NULL */ + movb %eax,(%edi) /* copy the byte */ + incl %edi +3: + decl %edx + je 5f /* Zero count.. error out */ + cmpl $0,%eax + jne 2b /* .. a NUL found? */ + jmp 4f +5: + movl $ENAMETOOLONG,%eax /* String is too long.. */ +4: + xorl %eax,%eax /* return zero for success */ + movl 8+S_ARG3,%edi /* get OUT len ptr */ + cmpl $0,%edi + jz copystr_ret /* if null, just return */ + subl 8+S_ARG0,%esi + movl %esi,(%edi) /* else set OUT arg to xfer len */ +copystr_ret: + popl %edi /* restore registers */ + popl %esi + ret /* and return */ + +copystr_fail: + movl $EFAULT,%eax /* return error for failure */ + jmp copy_ret /* pop frame and return */ + +/* + * Copy to user address space. + * arg0: kernel address + * arg1: user address + * arg2: byte count + */ +Entry(copyoutmsg) +ENTRY(copyout) + pushl %esi + pushl %edi /* save registers */ + pushl %ebx + + movl 12+S_ARG0,%esi /* get kernel start address */ + movl 12+S_ARG1,%edi /* get user start address */ + movl 12+S_ARG2,%edx /* get count */ + + leal 0(%edi,%edx),%eax /* get user end address + 1 */ + + movl $CPD_ACTIVE_THREAD,%ecx + movl %gs:(%ecx),%ecx /* get active thread */ + movl TH_TOP_ACT(%ecx),%ecx /* get thread->top_act */ + movl ACT_MAP(%ecx),%ecx /* get act->map */ + movl MAP_PMAP(%ecx),%ecx /* get map->pmap */ + cmpl EXT(kernel_pmap), %ecx + jne 0f + mov %ds,%cx /* else kernel data segment access */ + jmp 1f +0: + movl $USER_DS,%cx +1: + mov %cx,%es + +/* + * Check whether user address space is writable + * before writing to it - hardware is broken. + * + * Skip check if "user" address is really in + * kernel space (i.e., if it's in a kernel-loaded + * task). + * + * Register usage: + * esi/edi source/dest pointers for rep/mov + * ecx counter for rep/mov + * edx counts down from 3rd arg + * eax count of bytes for each (partial) page copy + * ebx shadows edi, used to adjust edx + */ + movl %edi,%ebx /* copy edi for syncing up */ +copyout_retry: + /* if restarting after a partial copy, put edx back in sync, */ + addl %ebx,%edx /* edx -= (edi - ebx); */ + subl %edi,%edx / + movl %edi,%ebx /* ebx = edi; */ + + mov %es,%cx + cmpl $USER_DS,%cx /* If kernel data segment */ + jnz 0f /* skip check */ + + cmpb $(CPUID_FAMILY_386), EXT(cpuid_family) + ja 0f + + movl %cr3,%ecx /* point to page directory */ +#if NCPUS > 1 + andl $(~0x7), %ecx /* remove cpu number */ +#endif /* NCPUS > 1 && AT386 */ + movl %edi,%eax /* get page directory bits */ + shrl $(PDESHIFT),%eax /* from user address */ + movl KERNELBASE(%ecx,%eax,4),%ecx + /* get page directory pointer */ + testl $(PTE_V),%ecx /* present? */ + jz 0f /* if not, fault is OK */ + andl $(PTE_PFN),%ecx /* isolate page frame address */ + movl %edi,%eax /* get page table bits */ + shrl $(PTESHIFT),%eax + andl $(PTEMASK),%eax /* from user address */ + leal KERNELBASE(%ecx,%eax,4),%ecx + /* point to page table entry */ + movl (%ecx),%eax /* get it */ + testl $(PTE_V),%eax /* present? */ + jz 0f /* if not, fault is OK */ + testl $(PTE_W),%eax /* writable? */ + jnz 0f /* OK if so */ +/* + * Not writable - must fake a fault. Turn off access to the page. + */ + andl $(PTE_INVALID),(%ecx) /* turn off valid bit */ + movl %cr3,%eax /* invalidate TLB */ + movl %eax,%cr3 +0: +/* + * Copy only what fits on the current destination page. + * Check for write-fault again on the next page. + */ + leal NBPG(%edi),%eax /* point to */ + andl $(-NBPG),%eax /* start of next page */ + subl %edi,%eax /* get number of bytes to that point */ + cmpl %edx,%eax /* bigger than count? */ + jle 1f /* if so, */ + movl %edx,%eax /* use count */ +1: + cld /* count up */ + movl %eax,%ecx /* move by longwords first */ + shrl $2,%ecx + RECOVERY_SECTION + RECOVER(copyout_fail) + RETRY_SECTION + RETRY(copyout_retry) + rep + movsl + movl %eax,%ecx /* now move remaining bytes */ + andl $3,%ecx + RECOVERY_SECTION + RECOVER(copyout_fail) + RETRY_SECTION + RETRY(copyout_retry) + rep + movsb /* move */ + movl %edi,%ebx /* copy edi for syncing up */ + subl %eax,%edx /* and decrement count */ + jg copyout_retry /* restart on next page if not done */ + xorl %eax,%eax /* return 0 for success */ +copyout_ret: + mov %ss,%di /* restore kernel segment */ + mov %di,%es + + popl %ebx + popl %edi /* restore registers */ + popl %esi + ret /* and return */ + +copyout_fail: + movl $EFAULT,%eax /* return error for failure */ + jmp copyout_ret /* pop frame and return */ + +/* + * FPU routines. + */ + +/* + * Initialize FPU. + */ +ENTRY(_fninit) + fninit + ret + +/* + * Read control word + */ +ENTRY(_fstcw) + pushl %eax /* get stack space */ + fstcw (%esp) + popl %eax + ret + +/* + * Set control word + */ +ENTRY(_fldcw) + fldcw 4(%esp) + ret + +/* + * Read status word + */ +ENTRY(_fnstsw) + xor %eax,%eax /* clear high 16 bits of eax */ + fnstsw %ax /* read FP status */ + ret + +/* + * Clear FPU exceptions + */ +ENTRY(_fnclex) + fnclex + ret + +/* + * Clear task-switched flag. + */ +ENTRY(_clts) + clts + ret + +/* + * Save complete FPU state. Save error for later. + */ +ENTRY(_fpsave) + movl 4(%esp),%eax /* get save area pointer */ + fnsave (%eax) /* save complete state, including */ + /* errors */ + ret + +/* + * Restore FPU state. + */ +ENTRY(_fprestore) + movl 4(%esp),%eax /* get save area pointer */ + frstor (%eax) /* restore complete state */ + ret + +/* + * Set cr3 + */ +ENTRY(set_cr3) +#if NCPUS > 1 + CPU_NUMBER(%eax) + orl 4(%esp), %eax +#else /* NCPUS > 1 && AT386 */ + movl 4(%esp),%eax /* get new cr3 value */ +#endif /* NCPUS > 1 && AT386 */ + /* + * Don't set PDBR to a new value (hence invalidating the + * "paging cache") if the new value matches the current one. + */ + movl %cr3,%edx /* get current cr3 value */ + cmpl %eax,%edx + je 0f /* if two are equal, don't set */ + movl %eax,%cr3 /* load it (and flush cache) */ +0: + ret + +/* + * Read cr3 + */ +ENTRY(get_cr3) + movl %cr3,%eax +#if NCPUS > 1 + andl $(~0x7), %eax /* remove cpu number */ +#endif /* NCPUS > 1 && AT386 */ + ret + +/* + * Flush TLB + */ +ENTRY(flush_tlb) + movl %cr3,%eax /* flush tlb by reloading CR3 */ + movl %eax,%cr3 /* with itself */ + ret + +/* + * Read cr2 + */ +ENTRY(get_cr2) + movl %cr2,%eax + ret + +/* + * Read cr4 + */ +ENTRY(get_cr4) + .byte 0x0f,0x20,0xe0 /* movl %cr4, %eax */ + ret + +/* + * Write cr4 + */ +ENTRY(set_cr4) + movl 4(%esp), %eax + .byte 0x0f,0x22,0xe0 /* movl %eax, %cr4 */ + ret + +/* + * Read ldtr + */ +Entry(get_ldt) + xorl %eax,%eax + sldt %ax + ret + +/* + * Set ldtr + */ +Entry(set_ldt) + lldt 4(%esp) + ret + +/* + * Read task register. + */ +ENTRY(get_tr) + xorl %eax,%eax + str %ax + ret + +/* + * Set task register. Also clears busy bit of task descriptor. + */ +ENTRY(set_tr) + movl S_ARG0,%eax /* get task segment number */ + subl $8,%esp /* push space for SGDT */ + sgdt 2(%esp) /* store GDT limit and base (linear) */ + movl 4(%esp),%edx /* address GDT */ + movb $(K_TSS),5(%edx,%eax) /* fix access byte in task descriptor */ + ltr %ax /* load task register */ + addl $8,%esp /* clear stack */ + ret /* and return */ + +/* + * Set task-switched flag. + */ +ENTRY(_setts) + movl %cr0,%eax /* get cr0 */ + orl $(CR0_TS),%eax /* or in TS bit */ + movl %eax,%cr0 /* set cr0 */ + ret + +/* + * io register must not be used on slaves (no AT bus) + */ +#define ILL_ON_SLAVE + + +#if MACH_ASSERT + +#define ARG0 B_ARG0 +#define ARG1 B_ARG1 +#define ARG2 B_ARG2 +#define PUSH_FRAME FRAME +#define POP_FRAME EMARF + +#else /* MACH_ASSERT */ + +#define ARG0 S_ARG0 +#define ARG1 S_ARG1 +#define ARG2 S_ARG2 +#define PUSH_FRAME +#define POP_FRAME + +#endif /* MACH_ASSERT */ + + +#if MACH_KDB || MACH_ASSERT + +/* + * Following routines are also defined as macros in i386/pio.h + * Compile then when MACH_KDB is configured so that they + * can be invoked from the debugger. + */ + +/* + * void outb(unsigned char *io_port, + * unsigned char byte) + * + * Output a byte to an IO port. + */ +ENTRY(outb) + PUSH_FRAME + ILL_ON_SLAVE + movl ARG0,%edx /* IO port address */ + movl ARG1,%eax /* data to output */ + outb %al,%dx /* send it out */ + POP_FRAME + ret + +/* + * unsigned char inb(unsigned char *io_port) + * + * Input a byte from an IO port. + */ +ENTRY(inb) + PUSH_FRAME + ILL_ON_SLAVE + movl ARG0,%edx /* IO port address */ + xor %eax,%eax /* clear high bits of register */ + inb %dx,%al /* get the byte */ + POP_FRAME + ret + +/* + * void outw(unsigned short *io_port, + * unsigned short word) + * + * Output a word to an IO port. + */ +ENTRY(outw) + PUSH_FRAME + ILL_ON_SLAVE + movl ARG0,%edx /* IO port address */ + movl ARG1,%eax /* data to output */ + outw %ax,%dx /* send it out */ + POP_FRAME + ret + +/* + * unsigned short inw(unsigned short *io_port) + * + * Input a word from an IO port. + */ +ENTRY(inw) + PUSH_FRAME + ILL_ON_SLAVE + movl ARG0,%edx /* IO port address */ + xor %eax,%eax /* clear high bits of register */ + inw %dx,%ax /* get the word */ + POP_FRAME + ret + +/* + * void outl(unsigned int *io_port, + * unsigned int byte) + * + * Output an int to an IO port. + */ +ENTRY(outl) + PUSH_FRAME + ILL_ON_SLAVE + movl ARG0,%edx /* IO port address*/ + movl ARG1,%eax /* data to output */ + outl %eax,%dx /* send it out */ + POP_FRAME + ret + +/* + * unsigned int inl(unsigned int *io_port) + * + * Input an int from an IO port. + */ +ENTRY(inl) + PUSH_FRAME + ILL_ON_SLAVE + movl ARG0,%edx /* IO port address */ + inl %dx,%eax /* get the int */ + POP_FRAME + ret + +#endif /* MACH_KDB || MACH_ASSERT*/ + +/* + * void loutb(unsigned byte *io_port, + * unsigned byte *data, + * unsigned int count) + * + * Output an array of bytes to an IO port. + */ +ENTRY(loutb) +ENTRY(outsb) + PUSH_FRAME + ILL_ON_SLAVE + movl %esi,%eax /* save register */ + movl ARG0,%edx /* get io port number */ + movl ARG1,%esi /* get data address */ + movl ARG2,%ecx /* get count */ + cld /* count up */ + rep + outsb /* output */ + movl %eax,%esi /* restore register */ + POP_FRAME + ret + + +/* + * void loutw(unsigned short *io_port, + * unsigned short *data, + * unsigned int count) + * + * Output an array of shorts to an IO port. + */ +ENTRY(loutw) +ENTRY(outsw) + PUSH_FRAME + ILL_ON_SLAVE + movl %esi,%eax /* save register */ + movl ARG0,%edx /* get io port number */ + movl ARG1,%esi /* get data address */ + movl ARG2,%ecx /* get count */ + cld /* count up */ + rep + outsw /* output */ + movl %eax,%esi /* restore register */ + POP_FRAME + ret + +/* + * void loutw(unsigned short io_port, + * unsigned int *data, + * unsigned int count) + * + * Output an array of longs to an IO port. + */ +ENTRY(loutl) +ENTRY(outsl) + PUSH_FRAME + ILL_ON_SLAVE + movl %esi,%eax /* save register */ + movl ARG0,%edx /* get io port number */ + movl ARG1,%esi /* get data address */ + movl ARG2,%ecx /* get count */ + cld /* count up */ + rep + outsl /* output */ + movl %eax,%esi /* restore register */ + POP_FRAME + ret + + +/* + * void linb(unsigned char *io_port, + * unsigned char *data, + * unsigned int count) + * + * Input an array of bytes from an IO port. + */ +ENTRY(linb) +ENTRY(insb) + PUSH_FRAME + ILL_ON_SLAVE + movl %edi,%eax /* save register */ + movl ARG0,%edx /* get io port number */ + movl ARG1,%edi /* get data address */ + movl ARG2,%ecx /* get count */ + cld /* count up */ + rep + insb /* input */ + movl %eax,%edi /* restore register */ + POP_FRAME + ret + + +/* + * void linw(unsigned short *io_port, + * unsigned short *data, + * unsigned int count) + * + * Input an array of shorts from an IO port. + */ +ENTRY(linw) +ENTRY(insw) + PUSH_FRAME + ILL_ON_SLAVE + movl %edi,%eax /* save register */ + movl ARG0,%edx /* get io port number */ + movl ARG1,%edi /* get data address */ + movl ARG2,%ecx /* get count */ + cld /* count up */ + rep + insw /* input */ + movl %eax,%edi /* restore register */ + POP_FRAME + ret + + +/* + * void linl(unsigned short io_port, + * unsigned int *data, + * unsigned int count) + * + * Input an array of longs from an IO port. + */ +ENTRY(linl) +ENTRY(insl) + PUSH_FRAME + ILL_ON_SLAVE + movl %edi,%eax /* save register */ + movl ARG0,%edx /* get io port number */ + movl ARG1,%edi /* get data address */ + movl ARG2,%ecx /* get count */ + cld /* count up */ + rep + insl /* input */ + movl %eax,%edi /* restore register */ + POP_FRAME + ret + + +/* + * int inst_fetch(int eip, int cs); + * + * Fetch instruction byte. Return -1 if invalid address. + */ + .globl EXT(inst_fetch) +LEXT(inst_fetch) + movl S_ARG1, %eax /* get segment */ + movw %ax,%fs /* into FS */ + movl S_ARG0, %eax /* get offset */ + RETRY_SECTION + RETRY(EXT(inst_fetch)) /* re-load FS on retry */ + RECOVERY_SECTION + RECOVER(EXT(inst_fetch_fault)) + movzbl %fs:(%eax),%eax /* load instruction byte */ + ret + +LEXT(inst_fetch_fault) + movl $-1,%eax /* return -1 if error */ + ret + + +#if MACH_KDP +/* + * kdp_copy_kmem(char *src, char *dst, int count) + * + * Similar to copyin except that both addresses are kernel addresses. + */ + +ENTRY(kdp_copy_kmem) + pushl %esi + pushl %edi /* save registers */ + + movl 8+S_ARG0,%esi /* get kernel start address */ + movl 8+S_ARG1,%edi /* get kernel destination address */ + + movl 8+S_ARG2,%edx /* get count */ + + lea 0(%esi,%edx),%eax /* get kernel end address + 1 */ + + cmpl %esi,%eax + jb kdp_vm_read_fail /* fail if wrap-around */ + cld /* count up */ + movl %edx,%ecx /* move by longwords first */ + shrl $2,%ecx + RECOVERY_SECTION + RECOVER(kdp_vm_read_fail) + rep + movsl /* move longwords */ + movl %edx,%ecx /* now move remaining bytes */ + andl $3,%ecx + RECOVERY_SECTION + RECOVER(kdp_vm_read_fail) + rep + movsb +kdp_vm_read_done: + movl 8+S_ARG2,%edx /* get count */ + subl %ecx,%edx /* Return number of bytes transfered */ + movl %edx,%eax + + popl %edi /* restore registers */ + popl %esi + ret /* and return */ + +kdp_vm_read_fail: + xorl %eax,%eax /* didn't copy a thing. */ + + popl %edi + popl %esi + ret +#endif + + +/* + * Done with recovery and retry tables. + */ + RECOVERY_SECTION + RECOVER_TABLE_END + RETRY_SECTION + RETRY_TABLE_END + + + +ENTRY(dr6) + movl %db6, %eax + ret + +/* dr(address, type, len, persistence) + */ +ENTRY(dr0) + movl S_ARG0, %eax + movl %eax,EXT(dr_addr) + movl %eax, %db0 + movl $0, %ecx + jmp 0f +ENTRY(dr1) + movl S_ARG0, %eax + movl %eax,EXT(dr_addr)+1*4 + movl %eax, %db1 + movl $2, %ecx + jmp 0f +ENTRY(dr2) + movl S_ARG0, %eax + movl %eax,EXT(dr_addr)+2*4 + movl %eax, %db2 + movl $4, %ecx + jmp 0f + +ENTRY(dr3) + movl S_ARG0, %eax + movl %eax,EXT(dr_addr)+3*4 + movl %eax, %db3 + movl $6, %ecx + +0: + pushl %ebp + movl %esp, %ebp + + movl %db7, %edx + movl %edx,EXT(dr_addr)+4*4 + andl dr_msk(,%ecx,2),%edx /* clear out new entry */ + movl %edx,EXT(dr_addr)+5*4 + movzbl B_ARG3, %eax + andb $3, %al + shll %cl, %eax + orl %eax, %edx + + movzbl B_ARG1, %eax + andb $3, %al + addb $0x10, %ecx + shll %cl, %eax + orl %eax, %edx + + movzbl B_ARG2, %eax + andb $3, %al + addb $0x2, %ecx + shll %cl, %eax + orl %eax, %edx + + movl %edx, %db7 + movl %edx,EXT(dr_addr)+7*4 + movl %edx, %eax + leave + ret + + .data + +DATA(preemptable) /* Not on an MP (makes cpu_number() usage unsafe) */ +#if MACH_RT && (NCPUS == 1) + .long 0 /* FIXME -- Currently disabled */ +#else + .long 0 /* FIX ME -- Currently disabled */ +#endif /* MACH_RT && (NCPUS == 1) */ + +dr_msk: + .long ~0x000f0003 + .long ~0x00f0000c + .long ~0x0f000030 + .long ~0xf00000c0 +ENTRY(dr_addr) + .long 0,0,0,0 + .long 0,0,0,0 + .text + +/* + * Determine cpu model and set global cpuid_xxx variables + * + * Relies on 386 eflags bit 18 (AC) always being zero & 486 preserving it. + * Relies on 486 eflags bit 21 (ID) always being zero & 586 preserving it. + * Relies on CPUID instruction for next x86 generations + * (assumes cpuid-family-homogenous MPs; else convert to per-cpu array) + */ + +ENTRY(set_cpu_model) + FRAME + pushl %ebx /* save ebx */ + andl $~0x3,%esp /* Align stack to avoid AC fault */ + pushfl /* push EFLAGS */ + popl %eax /* pop into eax */ + movl %eax,%ecx /* Save original EFLAGS */ + xorl $(EFL_AC+EFL_ID),%eax /* toggle ID,AC bits */ + pushl %eax /* push new value */ + popfl /* through the EFLAGS register */ + pushfl /* and back */ + popl %eax /* into eax */ + movb $(CPUID_FAMILY_386),EXT(cpuid_family) + pushl %ecx /* push original EFLAGS */ + popfl /* restore EFLAGS */ + xorl %ecx,%eax /* see what changed */ + testl $EFL_AC,%eax /* test AC bit */ + jz 0f /* if AC toggled (486 or higher) */ + + movb $(CPUID_FAMILY_486),EXT(cpuid_family) + testl $EFL_ID,%eax /* test ID bit */ + jz 0f /* if ID toggled use cpuid instruction */ + + xorl %eax,%eax /* get vendor identification string */ + .word 0xA20F /* cpuid instruction */ + movl %eax,EXT(cpuid_value) /* Store high value */ + movl %ebx,EXT(cpuid_vid) /* Store byte 0-3 of Vendor ID */ + movl %edx,EXT(cpuid_vid)+4 /* Store byte 4-7 of Vendor ID */ + movl %ecx,EXT(cpuid_vid)+8 /* Store byte 8-B of Vendor ID */ + movl $1,%eax /* get processor signature */ + .word 0xA20F /* cpuid instruction */ + movl %edx,EXT(cpuid_feature) /* Store feature flags */ + movl %eax,%ecx /* Save original signature */ + andb $0xF,%al /* Get Stepping ID */ + movb %al,EXT(cpuid_stepping) /* Save Stepping ID */ + movl %ecx,%eax /* Get original signature */ + shrl $4,%eax /* Shift Stepping ID */ + movl %eax,%ecx /* Save original signature */ + andb $0xF,%al /* Get Model */ + movb %al,EXT(cpuid_model) /* Save Model */ + movl %ecx,%eax /* Get original signature */ + shrl $4,%eax /* Shift Stepping ID */ + movl %eax,%ecx /* Save original signature */ + andb $0xF,%al /* Get Family */ + movb %al,EXT(cpuid_family) /* Save Family */ + movl %ecx,%eax /* Get original signature */ + shrl $4,%eax /* Shift Stepping ID */ + andb $0x3,%al /* Get Type */ + movb %al,EXT(cpuid_type) /* Save Type */ + + movl EXT(cpuid_value),%eax /* Get high value */ + cmpl $2,%eax /* Test if processor configuration */ + jle 0f /* is present */ + movl $2,%eax /* get processor configuration */ + .word 0xA20F /* cpuid instruction */ + movl %eax,EXT(cpuid_cache) /* Store byte 0-3 of configuration */ + movl %ebx,EXT(cpuid_cache)+4 /* Store byte 4-7 of configuration */ + movl %ecx,EXT(cpuid_cache)+8 /* Store byte 8-B of configuration */ + movl %edx,EXT(cpuid_cache)+12 /* Store byte C-F of configuration */ +0: + popl %ebx /* restore ebx */ + EMARF + ret /* return */ + +ENTRY(get_cr0) + movl %cr0, %eax + ret + +ENTRY(set_cr0) + movl 4(%esp), %eax + movl %eax, %cr0 + ret + +#ifndef SYMMETRY + +/* + * ffs(mask) + */ +ENTRY(ffs) + bsfl S_ARG0, %eax + jz 0f + incl %eax + ret +0: xorl %eax, %eax + ret + +/* + * cpu_shutdown() + * Force reboot + */ + +null_idtr: + .word 0 + .long 0 + +Entry(cpu_shutdown) + lidt null_idtr /* disable the interrupt handler */ + xor %ecx,%ecx /* generate a divide by zero */ + div %ecx,%eax /* reboot now */ + ret /* this will "never" be executed */ + +#endif /* SYMMETRY */ + + +/* + * setbit(int bitno, int *s) - set bit in bit string + */ +ENTRY(setbit) + movl S_ARG0, %ecx /* bit number */ + movl S_ARG1, %eax /* address */ + btsl %ecx, (%eax) /* set bit */ + ret + +/* + * clrbit(int bitno, int *s) - clear bit in bit string + */ +ENTRY(clrbit) + movl S_ARG0, %ecx /* bit number */ + movl S_ARG1, %eax /* address */ + btrl %ecx, (%eax) /* clear bit */ + ret + +/* + * ffsbit(int *s) - find first set bit in bit string + */ +ENTRY(ffsbit) + movl S_ARG0, %ecx /* address */ + movl $0, %edx /* base offset */ +0: + bsfl (%ecx), %eax /* check argument bits */ + jnz 1f /* found bit, return */ + addl $4, %ecx /* increment address */ + addl $32, %edx /* increment offset */ + jmp 0b /* try again */ +1: + addl %edx, %eax /* return offset */ + ret + +/* + * testbit(int nr, volatile void *array) + * + * Test to see if the bit is set within the bit string + */ + +ENTRY(testbit) + movl S_ARG0,%eax /* Get the bit to test */ + movl S_ARG1,%ecx /* get the array string */ + btl %eax,(%ecx) + sbbl %eax,%eax + ret + +ENTRY(get_pc) + movl 4(%ebp),%eax + ret + +#if ETAP + +ENTRY(etap_get_pc) + movl 4(%ebp), %eax /* fetch pc of caller */ + ret + +ENTRY(tvals_to_etap) + movl S_ARG0, %eax + movl $1000000000, %ecx + mull %ecx + addl S_ARG1, %eax + adc $0, %edx + ret + +/* etap_time_t + * etap_time_sub(etap_time_t stop, etap_time_t start) + * + * 64bit subtract, returns stop - start + */ +ENTRY(etap_time_sub) + movl S_ARG0, %eax /* stop.low */ + movl S_ARG1, %edx /* stop.hi */ + subl S_ARG2, %eax /* stop.lo - start.lo */ + sbbl S_ARG3, %edx /* stop.hi - start.hi */ + ret + +#endif /* ETAP */ + +#if NCPUS > 1 + +ENTRY(minsecurity) + pushl %ebp + movl %esp,%ebp +/* + * jail: set the EIP to "jail" to block a kernel thread. + * Useful to debug synchronization problems on MPs. + */ +ENTRY(jail) + jmp EXT(jail) + +#endif /* NCPUS > 1 */ + +/* + * delay(microseconds) + */ + +ENTRY(delay) + movl 4(%esp),%eax + testl %eax, %eax + jle 3f + movl EXT(delaycount), %ecx +1: + movl %ecx, %edx +2: + decl %edx + jne 2b + decl %eax + jne 1b +3: + ret + +/* + * unsigned int + * div_scale(unsigned int dividend, + * unsigned int divisor, + * unsigned int *scale) + * + * This function returns (dividend << *scale) //divisor where *scale + * is the largest possible value before overflow. This is used in + * computation where precision must be achieved in order to avoid + * floating point usage. + * + * Algorithm: + * *scale = 0; + * while (((dividend >> *scale) >= divisor)) + * (*scale)++; + * *scale = 32 - *scale; + * return ((dividend << *scale) / divisor); + */ +ENTRY(div_scale) + PUSH_FRAME + xorl %ecx, %ecx /* *scale = 0 */ + xorl %eax, %eax + movl ARG0, %edx /* get dividend */ +0: + cmpl ARG1, %edx /* if (divisor > dividend) */ + jle 1f /* goto 1f */ + addl $1, %ecx /* (*scale)++ */ + shrdl $1, %edx, %eax /* dividend >> 1 */ + shrl $1, %edx /* dividend >> 1 */ + jmp 0b /* goto 0b */ +1: + divl ARG1 /* (dividend << (32 - *scale)) / divisor */ + movl ARG2, %edx /* get scale */ + movl $32, (%edx) /* *scale = 32 */ + subl %ecx, (%edx) /* *scale -= %ecx */ + POP_FRAME + ret + +/* + * unsigned int + * mul_scale(unsigned int multiplicand, + * unsigned int multiplier, + * unsigned int *scale) + * + * This function returns ((multiplicand * multiplier) >> *scale) where + * scale is the largest possible value before overflow. This is used in + * computation where precision must be achieved in order to avoid + * floating point usage. + * + * Algorithm: + * *scale = 0; + * while (overflow((multiplicand * multiplier) >> *scale)) + * (*scale)++; + * return ((multiplicand * multiplier) >> *scale); + */ +ENTRY(mul_scale) + PUSH_FRAME + xorl %ecx, %ecx /* *scale = 0 */ + movl ARG0, %eax /* get multiplicand */ + mull ARG1 /* multiplicand * multiplier */ +0: + cmpl $0, %edx /* if (!overflow()) */ + je 1f /* goto 1 */ + addl $1, %ecx /* (*scale)++ */ + shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */ + shrl $1, %edx /* (multiplicand * multiplier) >> 1 */ + jmp 0b +1: + movl ARG2, %edx /* get scale */ + movl %ecx, (%edx) /* set *scale */ + POP_FRAME + ret + +#if NCPUS > 1 +ENTRY(_cpu_number) + CPU_NUMBER(%eax) + ret +#endif /* NCPUS > 1 */ + +#ifdef MACH_BSD +/* + * BSD System call entry point.. + */ + +Entry(trap_unix_syscall) + pushf /* save flags as soon as possible */ + pushl %eax /* save system call number */ + pushl $0 /* clear trap number slot */ + + pusha /* save the general registers */ + pushl %ds /* and the segment registers */ + pushl %es + pushl %fs + pushl %gs + + mov %ss,%dx /* switch to kernel data segment */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + +/* + * Shuffle eflags,eip,cs into proper places + */ + + movl R_EIP(%esp),%ebx /* eflags are in EIP slot */ + movl R_CS(%esp),%ecx /* eip is in CS slot */ + movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */ + movl %ecx,R_EIP(%esp) /* fix eip */ + movl %edx,R_CS(%esp) /* fix cs */ + movl %ebx,R_EFLAGS(%esp) /* fix eflags */ + + CPU_NUMBER(%edx) + TIME_TRAP_UENTRY + + negl %eax /* get system call number */ + shll $4,%eax /* manual indexing */ + + CPU_NUMBER(%edx) + movl CX(EXT(kernel_stack),%edx),%ebx + /* get current kernel stack */ + xchgl %ebx,%esp /* switch stacks - %ebx points to */ + /* user registers. */ + +/* + * Register use on entry: + * eax contains syscall number + * ebx contains user regs pointer + */ + CAH(call_call) + pushl %ebx /* Push the regs set onto stack */ + call EXT(unix_syscall) + popl %ebx + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ + movl %eax,R_EAX(%esp) /* save return value */ + jmp EXT(return_from_trap) /* return to user */ + +/* + * Entry point for machdep system calls.. + */ + +Entry(trap_machdep_syscall) + pushf /* save flags as soon as possible */ + pushl %eax /* save system call number */ + pushl $0 /* clear trap number slot */ + + pusha /* save the general registers */ + pushl %ds /* and the segment registers */ + pushl %es + pushl %fs + pushl %gs + + mov %ss,%dx /* switch to kernel data segment */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + +/* + * Shuffle eflags,eip,cs into proper places + */ + + movl R_EIP(%esp),%ebx /* eflags are in EIP slot */ + movl R_CS(%esp),%ecx /* eip is in CS slot */ + movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */ + movl %ecx,R_EIP(%esp) /* fix eip */ + movl %edx,R_CS(%esp) /* fix cs */ + movl %ebx,R_EFLAGS(%esp) /* fix eflags */ + + CPU_NUMBER(%edx) + TIME_TRAP_UENTRY + + negl %eax /* get system call number */ + shll $4,%eax /* manual indexing */ + + CPU_NUMBER(%edx) + movl CX(EXT(kernel_stack),%edx),%ebx + /* get current kernel stack */ + xchgl %ebx,%esp /* switch stacks - %ebx points to */ + /* user registers. */ + +/* + * Register use on entry: + * eax contains syscall number + * ebx contains user regs pointer + */ + CAH(call_call) + pushl %ebx + call EXT(machdep_syscall) + popl %ebx + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ + movl %eax,R_EAX(%esp) /* save return value */ + jmp EXT(return_from_trap) /* return to user */ + +Entry(trap_mach25_syscall) + pushf /* save flags as soon as possible */ + pushl %eax /* save system call number */ + pushl $0 /* clear trap number slot */ + + pusha /* save the general registers */ + pushl %ds /* and the segment registers */ + pushl %es + pushl %fs + pushl %gs + + mov %ss,%dx /* switch to kernel data segment */ + mov %dx,%ds + mov %dx,%es + mov $CPU_DATA,%dx + mov %dx,%gs + +/* + * Shuffle eflags,eip,cs into proper places + */ + + movl R_EIP(%esp),%ebx /* eflags are in EIP slot */ + movl R_CS(%esp),%ecx /* eip is in CS slot */ + movl R_EFLAGS(%esp),%edx /* cs is in EFLAGS slot */ + movl %ecx,R_EIP(%esp) /* fix eip */ + movl %edx,R_CS(%esp) /* fix cs */ + movl %ebx,R_EFLAGS(%esp) /* fix eflags */ + + CPU_NUMBER(%edx) + TIME_TRAP_UENTRY + + negl %eax /* get system call number */ + shll $4,%eax /* manual indexing */ + + CPU_NUMBER(%edx) + movl CX(EXT(kernel_stack),%edx),%ebx + /* get current kernel stack */ + xchgl %ebx,%esp /* switch stacks - %ebx points to */ + /* user registers. */ + +/* + * Register use on entry: + * eax contains syscall number + * ebx contains user regs pointer + */ + CAH(call_call) + pushl %ebx + call EXT(mach25_syscall) + popl %ebx + movl %esp,%ecx /* get kernel stack */ + or $(KERNEL_STACK_SIZE-1),%ecx + movl -3-IKS_SIZE(%ecx),%esp /* switch back to PCB stack */ + movl %eax,R_EAX(%esp) /* save return value */ + jmp EXT(return_from_trap) /* return to user */ + +#endif diff --git a/osfmk/i386/loose_ends.c b/osfmk/i386/loose_ends.c new file mode 100644 index 000000000..305bc37ea --- /dev/null +++ b/osfmk/i386/loose_ends.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +#include + +#include +#include +#include +#include +#include +#include + + /* + * Should be rewritten in asm anyway. + */ +/* + * ovbcopy - like bcopy, but recognizes overlapping ranges and handles + * them correctly. + */ + +void +ovbcopy( + const char *from, + char *to, + vm_size_t bytes) /* num bytes to copy */ +{ + /* Assume that bcopy copies left-to-right (low addr first). */ + if (from + bytes <= to || to + bytes <= from || to == from) + bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/ + else if (from > to) + bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */ + else { + /* to > from: overlapping, and must copy right-to-left. */ + from += bytes - 1; + to += bytes - 1; + while (bytes-- > 0) + *to-- = *from--; + } +} + +void +bcopy( + const char *from, + char *to, + vm_size_t bytes) /* num bytes to copy */ +{ + ovbcopy(from, to, bytes); +} + +int bcmp( + const char *a, + const char *b, + vm_size_t len) +{ + if (len == 0) + return 0; + + do + if (*a++ != *b++) + break; + while (--len); + + return len; +} + +#if MACH_ASSERT + +/* + * Machine-dependent routine to fill in an array with up to callstack_max + * levels of return pc information. + */ +void machine_callstack( + natural_t *buf, + vm_size_t callstack_max) +{ +} + +#endif /* MACH_ASSERT */ diff --git a/osfmk/i386/mach_param.h b/osfmk/i386/mach_param.h new file mode 100644 index 000000000..7b2a7e7ce --- /dev/null +++ b/osfmk/i386/mach_param.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Machine-dependent parameters for i386. + */ + +#define HZ (100) + /* clock tick each 10 ms. */ diff --git a/osfmk/i386/machdep_call.c b/osfmk/i386/machdep_call.c new file mode 100644 index 000000000..b340b6fad --- /dev/null +++ b/osfmk/i386/machdep_call.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Machine dependent kernel calls. + * + * HISTORY + * + * 17 June 1992 ? at NeXT + * Created. + */ + +#include + +#include + +extern kern_return_t kern_invalid(); +extern kern_return_t thread_get_cthread_self(); +extern kern_return_t thread_set_cthread_self(); +extern kern_return_t PCcreate(), PCldt(), PCresume(); +extern kern_return_t PCcopyBIOSData(), PCmapBIOSRom(); +extern kern_return_t PCsizeBIOSExtData(), PCcopyBIOSExtData(); + +machdep_call_t machdep_call_table[] = { + { + thread_get_cthread_self, + 0 + }, + { + thread_set_cthread_self, + 1 + }, + { + kern_invalid, /* old th_create() */ + 0 + }, +#ifdef FIXME + { + PCcreate, + 3 + }, + { + PCldt, + 3 + }, + { + PCresume, + 0 + }, + { + PCcopyBIOSData, + 1 + }, + { + PCsizeBIOSExtData, + 0 + }, + { + PCcopyBIOSExtData, + 1 + }, + { + PCmapBIOSRom, + 3 + }, +#endif +}; + +int machdep_call_count = + (sizeof (machdep_call_table) / sizeof (machdep_call_t)); diff --git a/osfmk/i386/machdep_call.h b/osfmk/i386/machdep_call.h new file mode 100644 index 000000000..beeb51304 --- /dev/null +++ b/osfmk/i386/machdep_call.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1992 NeXT Computer, Inc. + * + * Machine dependent kernel call table defines. + * + * HISTORY + * + * 17 June 1992 ? at NeXT + * Created. + */ + +typedef kern_return_t (*machdep_call_routine_t)(); + +typedef struct { + machdep_call_routine_t routine; + int nargs; +} machdep_call_t; + +extern machdep_call_t machdep_call_table[]; +extern int machdep_call_count; diff --git a/osfmk/i386/machine_routines.c b/osfmk/i386/machine_routines.c new file mode 100644 index 000000000..95ce5958b --- /dev/null +++ b/osfmk/i386/machine_routines.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +/* IO memory map services */ + +/* Map memory map IO space */ +vm_offset_t ml_io_map( + vm_offset_t phys_addr, + vm_size_t size) +{ + return(io_map(phys_addr,size)); +} + +/* boot memory allocation */ +vm_offset_t ml_static_malloc( + vm_size_t size) +{ + return((vm_offset_t)NULL); +} + +vm_offset_t +ml_static_ptovirt( + vm_offset_t paddr) +{ + return phystokv(paddr); +} + +void +ml_static_mfree( + vm_offset_t vaddr, + vm_size_t size) +{ + return; +} + +/* virtual to physical on wired pages */ +vm_offset_t ml_vtophys( + vm_offset_t vaddr) +{ + return kvtophys(vaddr); +} + +/* Interrupt handling */ + +/* Get Interrupts Enabled */ +boolean_t ml_get_interrupts_enabled(void) +{ + unsigned long flags; + + __asm__ volatile("pushf; popl %0" : "=r" (flags)); + return (flags & EFL_IF) != 0; +} + +/* Set Interrupts Enabled */ +boolean_t ml_set_interrupts_enabled(boolean_t enable) +{ + unsigned long flags; + + __asm__ volatile("pushf; popl %0" : "=r" (flags)); + + if (enable) + __asm__ volatile("sti"); + else + __asm__ volatile("cli"); + + return (flags & EFL_IF) != 0; +} + +/* Check if running at interrupt context */ +boolean_t ml_at_interrupt_context(void) +{ + return get_interrupt_level() != 0; +} + +/* Generate a fake interrupt */ +void ml_cause_interrupt(void) +{ + panic("ml_cause_interrupt not defined yet on Intel"); +} + +/* Initialize Interrupts */ +void ml_install_interrupt_handler( + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + boolean_t current_state; + + current_state = ml_get_interrupts_enabled(); + + PE_install_interrupt_handler(nub, source, target, + (IOInterruptHandler) handler, refCon); + + (void) ml_set_interrupts_enabled(current_state); +} + +void +machine_signal_idle( + processor_t processor) +{ +} + +/* Stubs for pc tracing mechanism */ + +int *pc_trace_buf; +int pc_trace_cnt = 0; + +int +set_be_bit() +{ + return(0); +} + +int +clr_be_bit() +{ + return(0); +} + +int +be_tracing() +{ + return(0); +} diff --git a/osfmk/i386/machine_routines.h b/osfmk/i386/machine_routines.h new file mode 100644 index 000000000..920164a3b --- /dev/null +++ b/osfmk/i386/machine_routines.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _I386_MACHINE_ROUTINES_H_ +#define _I386_MACHINE_ROUTINES_H_ + +#include +#include +#include +#include + + +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) +/* IO memory map services */ + +/* Map memory map IO space */ +vm_offset_t ml_io_map( + vm_offset_t phys_addr, + vm_size_t size); + +/* boot memory allocation */ +vm_offset_t ml_static_malloc( + vm_size_t size); + +#endif + +vm_offset_t +ml_static_ptovirt( + vm_offset_t); + +void ml_static_mfree( + vm_offset_t, + vm_size_t); + +/* virtual to physical on wired pages */ +vm_offset_t ml_vtophys( + vm_offset_t vaddr); + +/* Interrupt handling */ + +/* Get Interrupts Enabled */ +boolean_t ml_get_interrupts_enabled(void); + +/* Set Interrupts Enabled */ +boolean_t ml_set_interrupts_enabled(boolean_t enable); + +/* Check if running at interrupt context */ +boolean_t ml_at_interrupt_context(void); + +/* Generate a fake interrupt */ +void ml_cause_interrupt(void); + +/* Initialize Interrupts */ +void ml_install_interrupt_handler( + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon); + +#ifdef MACH_KERNEL_PRIVATE +/* check pending timers */ +#define machine_clock_assist() + +void machine_idle(void); + +void machine_signal_idle( + processor_t processor); +#endif + +/* Type for the IPI Hander */ +typedef void (*ipi_handler_t)(void); + +/* Register a processor */ +kern_return_t ml_processor_register( + cpu_id_t cpu_id, + vm_offset_t start_paddr, + processor_t *processor, + ipi_handler_t *ipi_handler, + boolean_t boot_cpu); + +void ml_get_timebase(unsigned long long *timestamp); + +#endif /* _I386_MACHINE_ROUTINES_H_ */ diff --git a/osfmk/i386/machine_routines_asm.s b/osfmk/i386/machine_routines_asm.s new file mode 100644 index 000000000..2c5162fd3 --- /dev/null +++ b/osfmk/i386/machine_routines_asm.s @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +/* +** ml_get_timebase() +** +** Entry - %esp contains pointer to 64 bit structure. +** +** Exit - 64 bit structure filled in. +** +*/ +ENTRY(ml_get_timebase) + + movl S_ARG0, %ecx + + rdtsc + + movl %edx, 0(%ecx) + movl %eax, 4(%ecx) + + ret diff --git a/osfmk/i386/machine_rpc.h b/osfmk/i386/machine_rpc.h new file mode 100644 index 000000000..73e7bb253 --- /dev/null +++ b/osfmk/i386/machine_rpc.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#ifndef _MACHINE_RPC_H_ +#define _MACHINE_RPC_H_ + +#if ETAP_EVENT_MONITOR +#define ETAP_EXCEPTION_PROBE(_f, _th, _ex, _sysnum) \ + if (_ex == EXC_SYSCALL) { \ + ETAP_PROBE_DATA(ETAP_P_SYSCALL_UNIX, \ + _f, \ + _th, \ + _sysnum, \ + sizeof(int)); \ + } +#else /* ETAP_EVENT_MONITOR */ +#define ETAP_EXCEPTION_PROBE(_f, _th, _ex, _sysnum) +#endif /* ETAP_EVENT_MONITOR */ + +extern void exception_return_wrapper( void ); + +#endif /* _MACHINE_RPC_H_ */ diff --git a/osfmk/i386/machlimits.h b/osfmk/i386/machlimits.h new file mode 100644 index 000000000..5aefbace2 --- /dev/null +++ b/osfmk/i386/machlimits.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Copyright (c) 1988 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * @(#)machlimits.h 7.1 (Berkeley) 2/15/89 + */ +#ifndef _MACH_MACHLIMITS_H_ +#define _MACH_MACHLIMITS_H_ + +#define CHAR_BIT 8 /* number of bits in a char */ + +#define SCHAR_MAX 127 /* max value for a signed char */ +#define SCHAR_MIN (-128) /* min value for a signed char */ + +#define UCHAR_MAX 255U /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ + +#define USHRT_MAX 65535U /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ + +#define UINT_MAX 0xFFFFFFFFU /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ + +#define ULONG_MAX UINT_MAX /* max value for an unsigned long */ +#define LONG_MAX INT_MAX /* max value for a long */ +#define LONG_MIN INT_MIN /* min value for a long */ + +/* Must be at least two, for internationalization (NLS/KJI) */ +#define MB_LEN_MAX 4 /* multibyte characters */ + +#endif /* _MACH_MACHLIMITS_H_ */ + diff --git a/osfmk/i386/machparam.h b/osfmk/i386/machparam.h new file mode 100644 index 000000000..34e24e808 --- /dev/null +++ b/osfmk/i386/machparam.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Machine-dependent SPL definitions. + * + * SPLs are true functions on i386, defined elsewhere. + */ diff --git a/osfmk/i386/misc_protos.h b/osfmk/i386/misc_protos.h new file mode 100644 index 000000000..b666379d2 --- /dev/null +++ b/osfmk/i386/misc_protos.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include + +extern void get_root_device(void); +extern void picinit(void); +extern void slave_clock(void); +extern void interrupt_processor( + int cpu); +extern void mp_probe_cpus(void); +extern void remote_kdb(void); +extern void clear_kdb_intr(void); +extern void set_cpu_model(void); +extern void cpu_shutdown(void); +extern void fix_desc( + void * desc, + int num_desc); +extern void cnpollc( + boolean_t on); +extern void form_pic_mask(void); +extern void intnull( + int unit); +extern char * i386_boot_info( + char *buf, + vm_size_t buf_len); + +extern void blkclr( + const char *from, + int nbytes); + +extern void kdb_kintr(void); +extern void kdb_console(void); + +extern unsigned long ntohl(unsigned long); + +extern unsigned int div_scale( + unsigned int dividend, + unsigned int divisor, + unsigned int *scale); + +extern unsigned int mul_scale( + unsigned int multiplicand, + unsigned int multiplier, + unsigned int *scale); diff --git a/osfmk/i386/mp_desc.c b/osfmk/i386/mp_desc.c new file mode 100644 index 000000000..dcab9ae43 --- /dev/null +++ b/osfmk/i386/mp_desc.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#include + +#if NCPUS > 1 + +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include + +/* + * The i386 needs an interrupt stack to keep the PCB stack from being + * overrun by interrupts. All interrupt stacks MUST lie at lower addresses + * than any thread`s kernel stack. + */ + +/* + * Addresses of bottom and top of interrupt stacks. + */ +vm_offset_t interrupt_stack[NCPUS]; +vm_offset_t int_stack_top[NCPUS]; + +/* + * Barrier address. + */ +vm_offset_t int_stack_high; + +/* + * First cpu`s interrupt stack. + */ +extern char intstack[]; /* bottom */ +extern char eintstack[]; /* top */ + +/* + * We allocate interrupt stacks from physical memory. + */ +extern +vm_offset_t avail_start; + +/* + * Multiprocessor i386/i486 systems use a separate copy of the + * GDT, IDT, LDT, and kernel TSS per processor. The first three + * are separate to avoid lock contention: the i386 uses locked + * memory cycles to access the descriptor tables. The TSS is + * separate since each processor needs its own kernel stack, + * and since using a TSS marks it busy. + */ + +/* + * Allocated descriptor tables. + */ +struct mp_desc_table *mp_desc_table[NCPUS] = { 0 }; + +/* + * Pointer to TSS for access in load_context. + */ +struct i386_tss *mp_ktss[NCPUS] = { 0 }; + +#if MACH_KDB +/* + * Pointer to TSS for debugger use. + */ +struct i386_tss *mp_dbtss[NCPUS] = { 0 }; +#endif /* MACH_KDB */ + +/* + * Pointer to GDT to reset the KTSS busy bit. + */ +struct fake_descriptor *mp_gdt[NCPUS] = { 0 }; +struct fake_descriptor *mp_idt[NCPUS] = { 0 }; + +/* + * Allocate and initialize the per-processor descriptor tables. + */ + +struct fake_descriptor ldt_desc_pattern = { + (unsigned int) 0, + LDTSZ * sizeof(struct fake_descriptor) - 1, + 0, + ACC_P|ACC_PL_K|ACC_LDT +}; +struct fake_descriptor tss_desc_pattern = { + (unsigned int) 0, + sizeof(struct i386_tss), + 0, + ACC_P|ACC_PL_K|ACC_TSS +}; + +struct fake_descriptor cpudata_desc_pattern = { + (unsigned int) 0, + sizeof(cpu_data_t)-1, + SZ_32, + ACC_P|ACC_PL_K|ACC_DATA_W +}; + +struct mp_desc_table * +mp_desc_init( + int mycpu) +{ + register struct mp_desc_table *mpt; + + if (mycpu == master_cpu) { + /* + * Master CPU uses the tables built at boot time. + * Just set the TSS and GDT pointers. + */ + mp_ktss[mycpu] = &ktss; +#if MACH_KDB + mp_dbtss[mycpu] = &dbtss; +#endif /* MACH_KDB */ + mp_gdt[mycpu] = gdt; + mp_idt[mycpu] = idt; + return 0; + } + else { + mpt = mp_desc_table[mycpu]; + mp_ktss[mycpu] = &mpt->ktss; + mp_gdt[mycpu] = mpt->gdt; + mp_idt[mycpu] = mpt->idt; + + /* + * Copy the tables + */ + bcopy((char *)idt, + (char *)mpt->idt, + sizeof(idt)); + bcopy((char *)gdt, + (char *)mpt->gdt, + sizeof(gdt)); + bcopy((char *)ldt, + (char *)mpt->ldt, + sizeof(ldt)); + bzero((char *)&mpt->ktss, + sizeof(struct i386_tss)); + bzero((char *)&cpu_data[mycpu], + sizeof(cpu_data_t)); +#if MACH_KDB + mp_dbtss[mycpu] = &mpt->dbtss; + bcopy((char *)&dbtss, + (char *)&mpt->dbtss, + sizeof(struct i386_tss)); +#endif /* MACH_KDB */ + + /* + * Fix up the entries in the GDT to point to + * this LDT and this TSS. + */ + mpt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern; + mpt->gdt[sel_idx(KERNEL_LDT)].offset = + LINEAR_KERNEL_ADDRESS + (unsigned int) mpt->ldt; + fix_desc(&mpt->gdt[sel_idx(KERNEL_LDT)], 1); + + mpt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern; + mpt->gdt[sel_idx(KERNEL_TSS)].offset = + LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->ktss; + fix_desc(&mpt->gdt[sel_idx(KERNEL_TSS)], 1); + + mpt->gdt[sel_idx(CPU_DATA)] = cpudata_desc_pattern; + mpt->gdt[sel_idx(CPU_DATA)].offset = + LINEAR_KERNEL_ADDRESS + (unsigned int) &cpu_data[mycpu]; + fix_desc(&mpt->gdt[sel_idx(CPU_DATA)], 1); + +#if MACH_KDB + mpt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern; + mpt->gdt[sel_idx(DEBUG_TSS)].offset = + LINEAR_KERNEL_ADDRESS + (unsigned int) &mpt->dbtss; + fix_desc(&mpt->gdt[sel_idx(DEBUG_TSS)], 1); + + mpt->dbtss.esp0 = (int)(db_task_stack_store + + (INTSTACK_SIZE * (mycpu + 1)) - sizeof (natural_t)); + mpt->dbtss.esp = mpt->dbtss.esp0; + mpt->dbtss.eip = (int)&db_task_start; +#endif /* MACH_KDB */ + + mpt->ktss.ss0 = KERNEL_DS; + mpt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */ + + return mpt; + } +} + +/* + * Called after all CPUs have been found, but before the VM system + * is running. The machine array must show which CPUs exist. + */ +void +interrupt_stack_alloc(void) +{ + register int i; + int cpu_count; + vm_offset_t stack_start; + struct mp_desc_table *mpt; + + /* + * Count the number of CPUs. + */ + cpu_count = 0; + for (i = 0; i < NCPUS; i++) + if (machine_slot[i].is_cpu) + cpu_count++; + + /* + * Allocate an interrupt stack for each CPU except for + * the master CPU (which uses the bootstrap stack) + */ + stack_start = phystokv(avail_start); + avail_start = round_page(avail_start + INTSTACK_SIZE*(cpu_count-1)); + bzero((char *)stack_start, INTSTACK_SIZE*(cpu_count-1)); + + /* + * Set up pointers to the top of the interrupt stack. + */ + for (i = 0; i < NCPUS; i++) { + if (i == master_cpu) { + interrupt_stack[i] = (vm_offset_t) intstack; + int_stack_top[i] = (vm_offset_t) eintstack; + } + else if (machine_slot[i].is_cpu) { + interrupt_stack[i] = stack_start; + int_stack_top[i] = stack_start + INTSTACK_SIZE; + + stack_start += INTSTACK_SIZE; + } + } + + /* + * Allocate descriptor tables for each CPU except for + * the master CPU (which already has them initialized) + */ + + mpt = (struct mp_desc_table *) phystokv(avail_start); + avail_start = round_page((vm_offset_t)avail_start + + sizeof(struct mp_desc_table)*(cpu_count-1)); + for (i = 0; i < NCPUS; i++) + if (i != master_cpu) + mp_desc_table[i] = mpt++; + + + /* + * Set up the barrier address. All thread stacks MUST + * be above this address. + */ + /* + * intstack is at higher addess than stack_start for AT mps + * so int_stack_high must point at eintstack. + * XXX + * But what happens if a kernel stack gets allocated below + * 1 Meg ? Probably never happens, there is only 640 K available + * There. + */ + int_stack_high = (vm_offset_t) eintstack; +} + +#endif /* NCPUS > 1 */ diff --git a/osfmk/i386/mp_desc.h b/osfmk/i386/mp_desc.h new file mode 100644 index 000000000..9882f2ae3 --- /dev/null +++ b/osfmk/i386/mp_desc.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_MP_DESC_H_ +#define _I386_MP_DESC_H_ + +#include +#include + +/* + * Multiprocessor i386/i486 systems use a separate copy of the + * GDT, IDT, LDT, and kernel TSS per processor. The first three + * are separate to avoid lock contention: the i386 uses locked + * memory cycles to access the descriptor tables. The TSS is + * separate since each processor needs its own kernel stack, + * and since using a TSS marks it busy. + */ + +#include +#include + +/* + * The descriptor tables are together in a structure + * allocated one per processor (except for the boot processor). + * Note that dbtss could be conditionalized on MACH_KDB, but + * doing so increases misconfiguration risk. + */ +struct mp_desc_table { + struct fake_descriptor idt[IDTSZ]; /* IDT */ + struct fake_descriptor gdt[GDTSZ]; /* GDT */ + struct fake_descriptor ldt[LDTSZ]; /* LDT */ + struct i386_tss ktss; + struct i386_tss dbtss; +}; + +/* + * They are pointed to by a per-processor array. + */ +extern struct mp_desc_table *mp_desc_table[NCPUS]; + +/* + * The kernel TSS gets its own pointer. + */ +extern struct i386_tss *mp_ktss[NCPUS]; +#if MACH_KDB +extern struct i386_tss *mp_dbtss[NCPUS]; +#endif /* MACH_KDB */ + +/* + * So does the GDT and IDT. + */ +extern struct fake_descriptor *mp_gdt[NCPUS]; +extern struct fake_descriptor *mp_idt[NCPUS]; + + +/* + * Each CPU calls this routine to set up its descriptor tables. + */ + +extern struct mp_desc_table * mp_desc_init( + int cpu); +extern void interrupt_stack_alloc(void); + +#endif /* _I386_MP_DESC_H_ */ diff --git a/osfmk/i386/net_filter.c b/osfmk/i386/net_filter.c new file mode 100644 index 000000000..cda06b521 --- /dev/null +++ b/osfmk/i386/net_filter.c @@ -0,0 +1,1554 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1993 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include + +#define NET_REG_EAX 0 /* data segment register */ +#define NET_REG_EDX 1 /* header segment register */ +#define NET_REG_EBX 2 /* free register */ +#define NET_REG_ESI 3 /* free register */ +#define NET_REG_EDI 4 /* free register */ +#define NET_REG_MAX 5 /* number of available registers */ + +struct net_opt { + filter_t val; /* value */ + unsigned char reg; /* register associated */ + unsigned char used; /* how many times it will be used */ +}; + +boolean_t net_filter_enable = FALSE; + +/* + * Forward declarations. + */ +void net_filter_optimize( + struct net_opt net_o[], + unsigned net_len, + int reg[], + unsigned nbreg); + +/* + * Compilation of a source network filter into i386 instructions. + */ +filter_fct_t +net_filter_alloc( + filter_t *fpstart, + unsigned int fplen, + unsigned int *len) +{ + filter_t *fp; + unsigned int op; + unsigned int arg; + unsigned char *p; + filter_fct_t top; + unsigned char *pend; + unsigned char *pend_old; + unsigned int loop; + unsigned int use_header; + unsigned int use_data; + unsigned int push_ecx; + int reg[NET_REG_MAX]; + struct net_opt net_o[NET_MAX_FILTER]; + int net_i; + unsigned net_j; + unsigned i; + unsigned push; + unsigned false_pad; + struct net_opt *pn; + + /* + * Addresses of end_true and end_false from the end of the program. + */ +#define PEND_TRUE (pend_old - (11 + push + false_pad)) +#define PEND_FALSE (pend_old - (4 + push)) + + /* + * Don't produce anything if net_filter generation is not enabled. + */ + if (!net_filter_enable) { + *len = 0; + return ((filter_fct_t)0); + } + + /* + * called as (*filter)(data, data_count, header) + * + * %esp -> stack; + * %ecx -> arg; + * %eax -> data (if needed); + * %edx -> header (if needed); + */ + loop = 0; + p = (unsigned char *)0; + pend = 0; + use_header = 0; + use_data = 0; + net_j = 0; + false_pad = sizeof(int) - 1; + + /* + * The compiler needs 3 passes to produce the compiled net_filter: + * 0) compute without optimization the maximum size of the object + * routine (one run), + * 1) try to reduce the size of the object procedure (many runs), + * 2) produce final object code (one run). + */ + for (;;) { + if (loop == 0) + pend += 14; + + else if (loop == 1) { + if (reg[NET_REG_EBX] == -1) { + /* push and pop it */ + pend++; + push = 1; + } else + push = 0; + if (reg[NET_REG_ESI] == -1) { + /* push and pop it */ + pend++; + push++; + } + if (reg[NET_REG_EDI] == -1) { + /* push and pop it */ + pend++; + push++; + } + if (push) { + /* restore %esp */ + push += 3; + } + + if (use_data) + pend += 3; + if (use_header) + pend += 3; + pend += 8; + + } else { + *p++ = 0x55; /* pushl %ebp */ + *p++ = 0x89; /* movl %esp, %ebp */ + *p++ = 0xE5; + if (reg[NET_REG_EBX] == -1) + *p++ = 0x53; /* pushl %ebx */ + if (reg[NET_REG_ESI] == -1) + *p++ = 0x56; /* pushl %esi */ + if (reg[NET_REG_EDI] == -1) + *p++ = 0x57; /* pushl %edi */ + *p++ = 0xB9; /* movl $1, %ecx */ + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + if (use_data) { + *p++ = 0x8B; /* movl 0x8(%ebp), %eax */ + *p++ = 0x45; + *p++ = 0x08; + } + if (use_header) { + *p++ = 0x8B; /* movl 0x10(%ebp), %edx */ + *p++ = 0x55; + *p++ = 0x10; + } + } + push_ecx = 1; + net_i = -1; + + fp = fpstart; + while (fp - fpstart < fplen) + { + arg = *fp++; + op = NETF_OP(arg); + arg = NETF_ARG(arg); + + switch (arg) { + case NETF_NOPUSH: + /* + * arg = *sp++; + */ + if (push_ecx) { + push_ecx = 0; + break; + } + if (loop < 2) + pend++; + else + *p++ = 0x59; /* popl %ecx */ + break; + + case NETF_PUSHZERO: + /* + * arg = 0; + */ + if (loop < 2) { + if (push_ecx) { + pend++; + push_ecx = 0; + } + pend += 2; + } else { + if (push_ecx) { + *p++ = 0x51; /* pushl %ecx */ + push_ecx = 0; + } + *p++ = 0x31; /* xorl %ecx, %ecx */ + *p++ = 0xC9; + } + break; + + case NETF_PUSHLIT: + /* + * arg = *fp++; + */ + if (loop < 2) { + if (push_ecx) { + pend++; + push_ecx = 0; + } + pend += 5; + } else { + if (push_ecx) { + *p++ = 0x51; /* pushl %ecx */ + push_ecx = 0; + } + *p++ = 0xB9; /* movl *fp, %ecx */ + *p++ = *(unsigned char *)fp; + *p++ = *(((unsigned char *)fp) + 1); + *p++ = 0x0; + *p++ = 0x0; + } + fp++; + break; + + case NETF_PUSHIND: + /* + * arg = *sp++; + * if (arg >= data_count) + * return FALSE; + * arg = data_word[arg]; + */ + if (loop < 2) { + if (push_ecx) + push_ecx = 0; + else + pend++; + if (loop == 0) + use_data = 1; + if (loop == 0 || + PEND_FALSE - (pend + 5) >= 128) + pend += 14; + else + pend += 10; + break; + } + + if (push_ecx) + push_ecx = 0; + else + *p++ = 0x59; /* popl %ecx */ + *p++ = 0x39; /* cmpl 0xC(%ebp), %ecx */ + *p++ = 0x4D; + *p++ = 0x0C; + + if (PEND_FALSE - (p + 2) >= 128) { + *p++ = 0x0F; /* jle end_false */ + *p++ = 0x8E; + *(p+0) = PEND_FALSE - (p + 4); + *(p+1) = (PEND_FALSE - (p + 4)) >> 8; + *(p+2) = (PEND_FALSE - (p + 4)) >> 16; + *(p+3) = (PEND_FALSE - (p + 4)) >> 24; + p += 4; + + } else { + *p++ = 0x7E; /* jle end_false */ + *p = PEND_FALSE - (p + 1); + p++; + } + + *p++ = 0x0F; /* movzwl 0(%eax,%ecx,2), %ecx */ + *p++ = 0xB7; + *p++ = 0x4C; + *p++ = 0x48; + *p++ = 0x00; + break; + + case NETF_PUSHHDRIND: + /* + * arg = *sp++; + * if (arg >= (NET_HDW_HDR_MAX / + * sizeof(unsigned short)) + * return FALSE; + * arg = header_word[arg]; + */ + if (loop < 2) { + if (push_ecx) + push_ecx = 0; + else + pend++; + if (loop == 0) + use_header = 1; + if (loop == 0 || + PEND_FALSE - (pend + 8) >= 128) + pend += 17; + else + pend += 13; + break; + } + + if (push_ecx) + push_ecx = 0; + else + *p++ = 0x59; /* popl %ecx */ + *p++ = 0x81; /* cmpl %ecx, */ + *p++ = 0xF9; + *p++ = NET_HDW_HDR_MAX / + sizeof(unsigned short); + *p++ = (NET_HDW_HDR_MAX / + sizeof(unsigned short)) >> 8; + *p++ = (NET_HDW_HDR_MAX / + sizeof(unsigned short)) >> 16; + *p++ = (NET_HDW_HDR_MAX / + sizeof(unsigned short)) >> 24; + + if (PEND_FALSE - (p + 2) >= 128) { + *p++ = 0x0F; /* jge end_false */ + *p++ = 0x8D; + *(p+0) = PEND_FALSE - (p + 4); + *(p+1) = (PEND_FALSE - (p + 4)) >> 8; + *(p+2) = (PEND_FALSE - (p + 4)) >> 16; + *(p+3) = (PEND_FALSE - (p + 4)) >> 24; + p += 4; + + } else { + *p++ = 0x7D; /* jge end_false */ + *p = PEND_FALSE - (p + 1); + p++; + } + + *p++ = 0x0F; /* movzwl 0(%edx,%ecx,2), %ecx */ + *p++ = 0xB7; + *p++ = 0x4C; + *p++ = 0x4A; + *p++ = 0x00; + break; + + default: + if (arg >= NETF_PUSHSTK) { + arg -= NETF_PUSHSTK; + /* + * arg = sp[arg]; + */ + arg <<= 2; + if (loop < 2) { + if (push_ecx) { + pend++; + push_ecx = 0; + } + pend += (arg < 128) ? 4 : 7; + break; + } + + if (push_ecx) { + *p++ = 0x51; /* pushl %ecx */ + push_ecx = 0; + } + *p++ = 0x8B; /* movl arg(%esp), %ecx */ + if (arg < 128) { + *p++ = 0x4C; + *p++ = 0x24; + *p++ = arg; + } else { + *p++ = 0x8C; + *p++ = 0x24; + *p++ = arg; + *p++ = arg >> 8; + *p++ = arg >> 16; + *p++ = arg >> 24; + } + + } else if (arg >= NETF_PUSHHDR) { + arg -= NETF_PUSHHDR; + /* + * arg = header_word[arg]; + */ + arg <<= 1; + if (loop < 2) { + if (push_ecx) { + pend++; + push_ecx = 0; + } + if (loop == 0) { + use_header = 1; + net_o[net_j++].val = + arg + NETF_PUSHHDR; + } else { + net_i++; + assert(net_i < net_j); + pn = &net_o[net_i]; + assert(reg[NET_REG_EDX] + == -2); + assert(pn->used == 0 || + reg[pn->reg] + != -2); + assert(pn->val == arg + + NETF_PUSHHDR); + if (pn->used > 0 && + reg[pn->reg] >= 0 && + net_o[reg[pn->reg]] + .val == pn->val) { + pend += 2; + break; + } + } + pend += (arg < 128) ? 5 : 8; + if (loop == 1 && pn->used > 1 && + (reg[pn->reg] < 0 || + net_o[reg[pn->reg]].val != + pn->val)) { + reg[pn->reg] = net_i; + pend += 2; + } + break; + } + + if (push_ecx) { + *p++ = 0x51; /* pushl %ecx */ + push_ecx = 0; + } + + net_i++; + assert(net_i < net_j); + pn = &net_o[net_i]; + assert(reg[NET_REG_EDX] == -2); + assert(pn->used == 0 || + reg[pn->reg] != -2); + assert(pn->val == arg + NETF_PUSHHDR); + if (pn->used > 0 && + reg[pn->reg] >= 0 && + net_o[reg[pn->reg]].val == + pn->val) { + *p++ = 0x89; + switch (pn->reg) { + case NET_REG_EAX: + /* movl %eax, %ecx */ + *p++ = 0xC1; + break; + + case NET_REG_EBX: + /* movl %ebx, %ecx */ + *p++ = 0xD9; + break; + + case NET_REG_ESI: + /* movl %esi, %ecx */ + *p++ = 0xF1; + break; + + case NET_REG_EDI: + /* movl %edi, %ecx */ + *p++ = 0xF9; + break; + } + break; + } + + *p++ = 0x0F;/* movzwl arg(%edx),%ecx */ + *p++ = 0xB7; + if (arg < 128) { + *p++ = 0x4C; + *p++ = 0x22; + *p++ = arg; + } else { + *p++ = 0x8C; + *p++ = 0x22; + *p++ = arg; + *p++ = arg >> 8; + *p++ = arg >> 16; + *p++ = arg >> 24; + } + + if (pn->used > 1 && + (reg[pn->reg] == -1 || + net_o[reg[pn->reg]].val != + pn->val)) { + reg[pn->reg] = net_i; + *p++ = 0x89; + assert(net_o[net_i].reg != + NET_REG_EDX); + switch (net_o[net_i].reg) { + case NET_REG_EAX: + /* movl %ecx, %eax */ + *p++ = 0xC8; + break; + case NET_REG_EBX: + /* movl %ecx, %ebx */ + *p++ = 0xCB; + break; + case NET_REG_ESI: + /* movl %ecx, %esi */ + *p++ = 0xCE; + break; + case NET_REG_EDI: + /* movl %ecx, %edi */ + *p++ = 0xCF; + break; + } + } + + } else { + arg -= NETF_PUSHWORD; + /* + * if (arg >= data_count) + * return FALSE; + * arg = data_word[arg]; + */ + if (loop < 2) { + if (push_ecx) { + pend++; + push_ecx = 0; + } + if (loop == 0) { + use_data = 1; + net_o[net_j++].val = + arg + NETF_PUSHWORD; + } else { + net_i++; + assert(net_i < net_j); + pn = &net_o[net_i]; + assert(reg[NET_REG_EAX] + == -2); + assert(pn->used == 0 || + reg[pn->reg] + != -2); + assert(pn->val == arg + + NETF_PUSHWORD); + if (pn->used > 0 && + reg[pn->reg] >= 0 && + net_o[reg[pn->reg]] + .val == pn->val) { + pend += 2; + break; + } + } + arg <<= 1; + pend += (arg < 128) ? 4 : 7; + if (loop == 0 || + (PEND_FALSE - + (pend + 2)) >= 128) + pend += 6; + else + pend += 2; + + if (arg < 128) + pend += 5; + else + pend += 8; + if (loop == 1 && pn->used > 1 && + (reg[pn->reg] < 0 || + net_o[reg[pn->reg]].val != + pn->val)) { + reg[pn->reg] = net_i; + pend += 2; + } + break; + } + + if (push_ecx) { + *p++ = 0x51; /* pushl %ecx */ + push_ecx = 0; + } + + net_i++; + assert(net_i < net_j); + pn = &net_o[net_i]; + assert(reg[NET_REG_EAX] == -2); + assert(pn->used == 0 || + reg[pn->reg] != -2); + assert(pn->val == arg + NETF_PUSHWORD); + if (pn->used > 0 && + reg[pn->reg] >= 0 && + net_o[reg[pn->reg]].val == + pn->val) { + *p++ = 0x89; + switch (pn->reg) { + case NET_REG_EDX: + /* movl %edx, %ecx */ + *p++ = 0xD1; + break; + + case NET_REG_EBX: + /* movl %ebx, %ecx */ + *p++ = 0xD9; + break; + + case NET_REG_ESI: + /* movl %esi, %ecx */ + *p++ = 0xF1; + break; + + case NET_REG_EDI: + /* movl %edi, %ecx */ + *p++ = 0xF9; + break; + } + break; + } + + /* cmpl 0xC(%ebp), */ + arg <<= 1; + if (arg < 128) { + *p++ = 0x83; + *p++ = 0x7D; + *p++ = 0x0C; + *p++ = arg; + } else { + *p++ = 0x81; + *p++ = 0x7D; + *p++ = 0x0C; + *p++ = arg; + *p++ = arg >> 8; + *p++ = arg >> 16; + *p++ = arg >> 24; + } + + if (PEND_FALSE - (p + 2) >= 128) { + *p++ = 0x0F;/* jle end_false */ + *p++ = 0x8E; + *(p+0) = PEND_FALSE - (p + 4); + *(p+1) = (PEND_FALSE - (p + 4)) + >> 8; + *(p+2) = (PEND_FALSE - (p + 4)) + >> 16; + *(p+3) = (PEND_FALSE - (p + 4)) + >> 24; + p += 4; + + } else { + *p++ = 0x7E;/* jle end_false */ + *p = PEND_FALSE - (p + 1); + p++; + } + + *p++ = 0x0F;/* movzwl arg(%eax),%ecx */ + *p++ = 0xB7; + if (arg < 128) { + *p++ = 0x4C; + *p++ = 0x20; + *p++ = arg; + } else { + *p++ = 0x8C; + *p++ = 0x20; + *p++ = arg; + *p++ = arg >> 8; + *p++ = arg >> 16; + *p++ = arg >> 24; + } + + if (pn->used > 1 && + (reg[pn->reg] == -1 || + net_o[reg[pn->reg]].val != + pn->val)) { + reg[pn->reg] = net_i; + *p++ = 0x89; + assert(net_o[net_i].reg != + NET_REG_EAX); + switch (net_o[net_i].reg) { + case NET_REG_EDX: + /* movl %ecx, %edx */ + *p++ = 0xCA; + break; + case NET_REG_EBX: + /* movl %ecx, %ebx */ + *p++ = 0xCB; + break; + case NET_REG_ESI: + /* movl %ecx, %esi */ + *p++ = 0xCE; + break; + case NET_REG_EDI: + /* movl %ecx, %edi */ + *p++ = 0xCF; + break; + } + } + } + break; + } + + switch (op) { + case NETF_OP(NETF_NOP): + /* + * *--sp = arg; + */ + push_ecx = 1; + break; + + case NETF_OP(NETF_AND): + /* + * *sp &= arg; + */ + if (loop < 2) + pend += 3; + else { + *p++ = 0x21; /* andl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + } + break; + + case NETF_OP(NETF_OR): + /* + * *sp |= arg; + */ + if (loop < 2) + pend += 3; + else { + *p++ = 0x09; /* orl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + } + break; + + case NETF_OP(NETF_XOR): + /* + * sp ^= arg; + */ + if (loop < 2) + pend += 3; + else { + *p++ = 0x31; /* xorl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + } + break; + + case NETF_OP(NETF_EQ): + /* + * *sp = (*sp == arg); + */ + if (loop < 2) { + pend += 14; + /* + * Pad to longword boundary (cf dissas). + */ + if (i = ((pend - (unsigned char *)0) & + (sizeof(int) - 1))) + pend += (sizeof(int) - i); + pend += 7; + break; + } + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + i = ((p - (unsigned char *)top) + 11) & + (sizeof(int) - 1); + *p++ = 0x74; /* je .+9+ */ + *p++ = 0x09 + (i ? sizeof(int) - i : 0); + *p++ = 0xC7; /* movl $0, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + + i = ((p - (unsigned char *)top) + 2) & + (sizeof(int) - 1); + *p++ = 0xEB; /* jmp .+7+ */ + *p++ = 0x07 + (i ? sizeof(int) - i : 0); + + /* + * Pad to longword boundary (cf dissas). + */ + if (i = (p - (unsigned char *)top) & + (sizeof(int) - 1)) + while (i++ < sizeof(int)) + *p++ = 0x90; /* nop */ + *p++ = 0xC7; /* movl $1, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + break; + + case NETF_OP(NETF_NEQ): + /* + * *sp = (*sp != arg); + */ + if (loop < 2) { + pend += 14; + /* + * Pad to longword boundary (cf dissas). + */ + if (i = ((pend - (unsigned char *)0) & + (sizeof(int) - 1))) + pend += (sizeof(int) - i); + pend += 7; + break; + } + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + i = ((p - (unsigned char *)top) + 11) & + (sizeof(int) - 1); + *p++ = 0x75; /* jne .+9+ */ + *p++ = 0x09 + (i ? sizeof(int) - i : 0); + *p++ = 0xC7; /* movl $0, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + + i = ((p - (unsigned char *)top) + 2) & + (sizeof(int) - 1); + *p++ = 0xEB; /* jmp .+7+ */ + *p++ = 0x07 + (i ? sizeof(int) - i : 0); + + /* + * Pad to longword boundary (cf dissas). + */ + if (i = (p - (unsigned char *)top) & + (sizeof(int) - 1)) + while (i++ < sizeof(int)) + *p++ = 0x90; /* nop */ + *p++ = 0xC7; /* movl $1, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + break; + + case NETF_OP(NETF_LT): + /* + * *sp = (*sp < arg); + */ + if (loop < 2) { + pend += 14; + /* + * Pad to longword boundary (cf dissas). + */ + if (i = ((pend - (unsigned char *)0) & + (sizeof(int) - 1))) + pend += (sizeof(int) - i); + pend += 7; + break; + } + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + i = ((p - (unsigned char *)top) + 11) & + (sizeof(int) - 1); + *p++ = 0x7C; /* jl .+9+ */ + *p++ = 0x09 + (i ? sizeof(int) - i : 0); + *p++ = 0xC7; /* movl $0, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + + i = ((p - (unsigned char *)top) + 2) & + (sizeof(int) - 1); + *p++ = 0xEB; /* jmp .+7+ */ + *p++ = 0x07 + (i ? sizeof(int) - i : 0); + + /* + * Pad to longword boundary (cf dissas). + */ + if (i = (p - (unsigned char *)top) & + (sizeof(int) - 1)) + while (i++ < sizeof(int)) + *p++ = 0x90; /* nop */ + *p++ = 0xC7; /* movl $1, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + break; + + case NETF_OP(NETF_LE): + /* + * *sp = (*sp <= arg); + */ + if (loop < 2) { + pend += 14; + /* + * Pad to longword boundary (cf dissas). + */ + if (i = ((pend - (unsigned char *)0) & + (sizeof(int) - 1))) + pend += (sizeof(int) - i); + pend += 7; + break; + } + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + i = ((p - (unsigned char *)top) + 11) & + (sizeof(int) - 1); + *p++ = 0x7E; /* jle .+9+ */ + *p++ = 0x09 + (i ? sizeof(int) - i : 0); + *p++ = 0xC7; /* movl $0, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + + i = ((p - (unsigned char *)top) + 2) & + (sizeof(int) - 1); + *p++ = 0xEB; /* jmp .+7+ */ + *p++ = 0x07 + (i ? sizeof(int) - i : 0); + + /* + * Pad to longword boundary (cf dissas). + */ + if (i = (p - (unsigned char *)top) & + (sizeof(int) - 1)) + while (i++ < sizeof(int)) + *p++ = 0x90; /* nop */ + *p++ = 0xC7; /* movl $1, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + break; + + case NETF_OP(NETF_GT): + /* + * *sp = (*sp > arg); + */ + if (loop < 2) { + pend += 14; + /* + * Pad to longword boundary (cf dissas). + */ + if (i = ((pend - (unsigned char *)0) & + (sizeof(int) - 1))) + pend += (sizeof(int) - i); + pend += 7; + break; + } + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + i = ((p - (unsigned char *)top) + 11) & + (sizeof(int) - 1); + *p++ = 0x7F; /* jg .+9+ */ + *p++ = 0x09 + (i ? sizeof(int) - i : 0); + *p++ = 0xC7; /* movl $0, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + + i = ((p - (unsigned char *)top) + 2) & + (sizeof(int) - 1); + *p++ = 0xEB; /* jmp .+7+ */ + *p++ = 0x07 + (i ? sizeof(int) - i : 0); + + /* + * Pad to longword boundary (cf dissas). + */ + if (i = (p - (unsigned char *)top) & + (sizeof(int) - 1)) + while (i++ < sizeof(int)) + *p++ = 0x90; /* nop */ + *p++ = 0xC7; /* movl $1, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + break; + + case NETF_OP(NETF_GE): + /* + * *sp = (*sp >= arg); + */ + if (loop < 2) { + pend += 14; + /* + * Pad to longword boundary (cf dissas). + */ + if (i = ((pend - (unsigned char *)0) & + (sizeof(int) - 1))) + pend += (sizeof(int) - i); + pend += 7; + break; + } + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + i = ((p - (unsigned char *)top) + 11) & + (sizeof(int) - 1); + *p++ = 0x7D; /* jge .+9+ */ + *p++ = 0x09 + (i ? sizeof(int) - i : 0); + *p++ = 0xC7; /* movl $0, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + + i = ((p - (unsigned char *)top) + 2) & + (sizeof(int) - 1); + *p++ = 0xEB; /* jmp .+7+ */ + *p++ = 0x07 + (i ? sizeof(int) - i : 0); + + /* + * Pad to longword boundary (cf dissas). + */ + if (i = (p - (unsigned char *)top) & + (sizeof(int) - 1)) + while (i++ < sizeof(int)) + *p++ = 0x90; /* nop */ + *p++ = 0xC7; /* movl $1, 0(%esp) */ + *p++ = 0x04; + *p++ = 0x24; + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + break; + + case NETF_OP(NETF_COR): + /* + * if (*sp++ == arg) + * return (TRUE); + */ + if (loop < 2) { + if (loop == 0 || + PEND_TRUE - (pend + 5) >= 128) + pend += 12; + else + pend += 8; + break; + } + + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + if (PEND_TRUE - (p + 2) >= 128) { + *p++ = 0x0F; /* je end_true */ + *p++ = 0x84; + *(p+0) = PEND_TRUE - (p + 4); + *(p+1) = (PEND_TRUE - (p + 4)) >> 8; + *(p+2) = (PEND_TRUE - (p + 4)) >> 16; + *(p+3) = (PEND_TRUE - (p + 4)) >> 24; + p += 4; + + } else { + *p++ = 0x74; /* je end_true */ + *p = PEND_TRUE - (p + 1); + p++; + } + + *p++ = 0x83; /* addl $4, %esp */ + *p++ = 0xC4; + *p++ = 0x04; + break; + + case NETF_OP(NETF_CAND): + /* + * if (*sp++ != arg) + * return (FALSE); + */ + if (loop < 2) { + if (loop == 0 || + PEND_FALSE - (pend + 5) >= 128) + pend += 12; + else + pend += 8; + break; + } + + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + if (PEND_FALSE - (p + 2) >= 128) { + *p++ = 0x0F; /* jne end_false */ + *p++ = 0x85; + *(p+0) = PEND_FALSE - (p + 4); + *(p+1) = (PEND_FALSE - (p + 4)) >> 8; + *(p+2) = (PEND_FALSE - (p + 4)) >> 16; + *(p+3) = (PEND_FALSE - (p + 4)) >> 24; + p += 4; + + } else { + *p++ = 0x75; /* jne end_false */ + *p = PEND_FALSE - (p + 1); + p++; + } + + *p++ = 0x83; /* addl $4, %esp */ + *p++ = 0xC4; + *p++ = 0x04; + break; + + case NETF_OP(NETF_CNOR): + /* + * if (*sp++ == arg) + * return (FALSE); + */ + if (loop < 2) { + if (loop == 0 || + PEND_FALSE - (pend + 5) >= 128) + pend += 12; + else + pend += 8; + break; + } + + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + if (PEND_FALSE - (p + 2) >= 128) { + *p++ = 0x0F; /* je end_false */ + *p++ = 0x84; + *(p+0) = PEND_FALSE - (p + 4); + *(p+1) = (PEND_FALSE - (p + 4)) >> 8; + *(p+2) = (PEND_FALSE - (p + 4)) >> 16; + *(p+3) = (PEND_FALSE - (p + 4)) >> 24; + p += 4; + } else { + *p++ = 0x74; /* je end_false */ + *p = PEND_FALSE - (p + 1); + p++; + } + + *p++ = 0x83; /* addl $4, %esp */ + *p++ = 0xC4; + *p++ = 0x04; + break; + + case NETF_OP(NETF_CNAND): + /* + * if (*sp++ != arg) + * return (TRUE); + */ + if (loop < 2) { + if (loop == 0 || + PEND_TRUE - (pend + 5) >= 128) + pend += 12; + else + pend += 8; + break; + } + + *p++ = 0x39; /* cmpl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + + if (PEND_TRUE - (p + 2) >= 128) { + *p++ = 0x0F; /* jne end_true */ + *p++ = 0x85; + *(p+0) = PEND_TRUE - (p + 4); + *(p+1) = (PEND_TRUE - (p + 4)) >> 8; + *(p+2) = (PEND_TRUE - (p + 4)) >> 16; + *(p+3) = (PEND_TRUE - (p + 4)) >> 24; + p += 4; + + } else { + *p++ = 0x75; /* jne end_true */ + *p = PEND_TRUE - (p + 1); + p++; + } + + *p++ = 0x83; /* addl $4, %esp */ + *p++ = 0xC4; + *p++ = 0x04; + break; + + case NETF_OP(NETF_LSH): + /* + * *sp <<= arg; + */ + if (loop < 2) + pend += 3; + else { + *p++ = 0xD3; /* sall (%esp), %cl */ + *p++ = 0x24; + *p++ = 0x24; + } + break; + + case NETF_OP(NETF_RSH): + /* + * *sp >>= arg; + */ + if (loop < 2) + pend += 3; + else { + *p++ = 0xD3; /* sarl (%esp), %cl */ + *p++ = 0x3C; + *p++ = 0x24; + } + break; + + case NETF_OP(NETF_ADD): + /* + * *sp += arg; + */ + if (loop < 2) + pend += 3; + else { + *p++ = 0x01; /* addl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + } + break; + + case NETF_OP(NETF_SUB): + /* + * *sp -= arg; + */ + if (loop < 2) + pend += 3; + else { + *p++ = 0x29; /* subl (%esp), %ecx */ + *p++ = 0x0C; + *p++ = 0x24; + } + break; + } + } + + /* + * return ((*sp) ? TRUE : FALSE); + */ + if (loop < 2) { + if (push_ecx) { + pend += 12; + push_ecx = 0; + } else + pend += 13; + /* + * Pad to longword boundary (cf dissas). + */ + i = (pend - (unsigned char *)0) & (sizeof(int) - 1); + false_pad = i ? sizeof(int) - i : 0; + pend += 4 + push + false_pad; + } else { + if (push_ecx) { + *p++ = 0x83; /* cmpl %ecx, $0 */ + *p++ = 0xF9; + *p++ = 0x00; + push_ecx = 0; + } else { + *p++ = 0x83; /* cmpl (%esp), $0 */ + *p++ = 0x3C; + *p++ = 0x24; + *p++ = 0x00; + } + + i = ((p - (unsigned char *)top) + 9) & + (sizeof(int) - 1); + false_pad = i ? sizeof(int) - i : 0; + *p++ = 0x74; /* je end_false */ + *p++ = 0x07 + false_pad; + + *p++ = 0xB8; /* movl $1, %eax */ + *p++ = 0x01; + *p++ = 0x00; + *p++ = 0x00; + *p++ = 0x00; + + *p++ = 0xEB; /* jmp .+2+ */ + *p++ = 0x02 + false_pad; + + /* + * Pad to longword boundary (cf dissas). + */ + for (i = 0; i < false_pad; i++) + *p++ = 0x90; /* nop */ + *p++ = 0x31; /* xorl %eax, %eax */ + *p++ = 0xC0; + if (push) { + *p++ = 0x8D; /* leal -(%ebx), %esp */ + *p++ = 0x65; + *p++ = -((push - 3) * 4); + } + if (reg[NET_REG_EDI] >= 0) + *p++ = 0x5F; /* pop %edi */ + if (reg[NET_REG_ESI] >= 0) + *p++ = 0x5E; /* pop %esi */ + if (reg[NET_REG_EBX] >= 0) + *p++ = 0x5B; /* pop %ebx */ + *p++ = 0xC9; /* leave */ + *p++ = 0xC3; /* ret */ + } + + /* + * Prepare next loop if any. + */ + if (loop == 2) + break; + + if (loop == 1 && pend == pend_old) { + loop = 2; + *len = pend - (unsigned char *)0; + top = (filter_fct_t)kalloc(*len); + p = (unsigned char *)top; + pend_old = p + (pend - (unsigned char *)0); + } else { + if (loop == 0) { + loop = 1; + /* + * Compute and optimize free registers usage. + */ + for (i = 0; i < NET_REG_MAX; i++) + reg[i] = -1; + if (use_data) + reg[NET_REG_EAX] = -2; + if (use_header) + reg[NET_REG_EDX] = -2; + net_filter_optimize(net_o, net_j, + reg, NET_REG_MAX); + } + pend_old = pend; + pend = 0; + } + for (i = 0; i < NET_REG_MAX; i++) + if (reg[i] != -2) + reg[i] = -1; + } + return (top); +} + +void +net_filter_free( + filter_fct_t fp, + unsigned int len) +{ + kfree((vm_offset_t)fp, len); +} + +/* + * Try to compute how to use (if needed) extra registers to store + * values read more than once. + * + * Input : net_o is an array of used values (only .val is valid). + * net_len is the length of net_o. + * reg is an array of available registers (-2 ==> used register). + * nbreg is the maximum number of registers. + * + * Output : net_o is an array of register usage. + * .used == 0 ==> do not used any register. + * .used >= 2 ==> how many times the .reg register + * will be used. + * reg is an array of used registers. + * == -2 ==> unused or unavailable register. + * >= 0 ==> used register. + * + * N.B. This procedure is completely machine-independent and should take place + * in a file of the device directory. + */ +void +net_filter_optimize( + struct net_opt net_o[], + unsigned net_len, + int reg[], + unsigned nbreg) +{ + unsigned i; + unsigned j; + unsigned nbnet; + unsigned avail; + unsigned used; + unsigned first; + unsigned max; + unsigned last; + struct net_opt *p; + struct net_opt *q; + + avail = 0; + for (i = 0; i < nbreg; i++) + if (reg[i] != -2) + avail++; + if (avail == 0) + return; + + /* + * First step: set up used field. + */ + p = &net_o[net_len]; + while (p != net_o) { + for (q = p--; q < &net_o[net_len]; q++) + if (q->val == p->val) { + p->used = q->used + 1; + break; + } + if (q == &net_o[net_len]) + p->used = 1; + } + + /* + * Second step: choose best register and update used field. + */ + if (net_len > 0) { + if (net_o[0].used == 1) + used = net_o[0].used = 0; + else { + net_o[0].reg = 0; + used = 1; + } + + for (p = &net_o[1]; p < &net_o[net_len]; p++) { + max = 0; + first = avail; + for (i = 0; i < avail; i++) { + q = p; + j = 0; + while (q-- != net_o) + if (q->used > 0 && q->reg == i) { + if (q->used == 1) + first = i; + j = 1; + break; + } + if (j == 0) + continue; + + if (q->val == p->val) { + p->reg = i; + break; + } + + if (p->used == 1) + continue; + + if (first == avail && used == avail) { + j = 1; + for (q = p+1; q->val != p->val; p++) + j++; + if (j > max) { + max = j; + last = i; + } + } + } + if (i < avail) + continue; + + if (p->used > 1) { + if (first != avail) + p->reg = first; + else if (used < avail) + p->reg = used++; + else + p->reg = last; + } else + p->used = 0; + } + } + + /* + * Third step: associate correct register number and keep max value. + */ + for (p = net_o; p < &net_o[net_len]; p++) { + if (p->used == 0) + continue; + i = first = 0; + for (;;) { + if (reg[i] != -2) { + if (first == p->reg) { + p->reg = i; + break; + } + first++; + } + i++; + } + } + + /* + * Forth step: invalidate useless registers. + */ + if (net_len == 0) { + for (i = 0; i < nbreg; i++) + if (reg[i] != -2) + reg[i] = -2; + + } else if (used < avail) { + first = 0; + for (i = 0; i < nbreg; i++) + if (reg[i] != -2) + if (first >= used) + reg[i] = -2; + else + first++; + } +} diff --git a/osfmk/i386/ntoh.h b/osfmk/i386/ntoh.h new file mode 100644 index 000000000..2f9e26ce2 --- /dev/null +++ b/osfmk/i386/ntoh.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +#ifndef _MACHINE_NTOH_H_ +#define _MACHINE_NTOH_H_ + +#ifdef BYTE_MSF +#undef BYTE_MSF /* i386 byte order is *NOT* Most Significant First */ +#endif + +extern unsigned long ntohl( + unsigned long arg); + +extern unsigned long htonl( + unsigned long arg); + +extern unsigned short ntohs( + unsigned short arg); + +extern unsigned short htons( + unsigned short arg); +#endif /* _MACHINE_NTOH_H_ */ diff --git a/osfmk/i386/ntoh.s b/osfmk/i386/ntoh.s new file mode 100644 index 000000000..47868223d --- /dev/null +++ b/osfmk/i386/ntoh.s @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:37 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:38 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 01:59:41 ezf + * change marker to not FREE + * [1994/09/22 21:25:28 ezf] + * + * Revision 1.1.2.2 1993/06/02 23:26:45 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:06:38 jeffc] + * + * Revision 1.1 1992/09/30 02:43:08 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:12:50 mrt + * Correcting copyright + * + * Revision 2.3 91/02/14 15:04:55 mrt + * Changed to new Mach copyright + * + * + * Revision 2.2 90/05/03 15:34:56 dbg + * First checkin. + * + * New a.out and coff compatible .s files. + * [89/10/16 rvb] + * + * Revision 1.3 89/02/26 12:35:37 gm0w + * Changes for cleanup. + * + * 16-Feb-89 Robert Baron (rvb) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +Entry(ntohl) +ENTRY(htonl) + movl 4(%esp), %eax + rorw $8, %ax + ror $16,%eax + rorw $8, %ax + ret + + +Entry(ntohs) +ENTRY(htons) + movzwl 4(%esp), %eax + rorw $8, %ax + ret diff --git a/osfmk/i386/pcb.c b/osfmk/i386/pcb.c new file mode 100644 index 000000000..b75cf689c --- /dev/null +++ b/osfmk/i386/pcb.c @@ -0,0 +1,1473 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Maps state flavor to number of words in the state: + */ +unsigned int state_count[] = { + /* FLAVOR_LIST */ 0, + i386_NEW_THREAD_STATE_COUNT, + i386_FLOAT_STATE_COUNT, + i386_ISA_PORT_MAP_STATE_COUNT, + i386_V86_ASSIST_STATE_COUNT, + i386_REGS_SEGS_STATE_COUNT, + i386_THREAD_SYSCALL_STATE_COUNT, + /* THREAD_STATE_NONE */ 0, + i386_SAVED_STATE_COUNT, +}; + +/* Forward */ + +void act_machine_throughcall(thread_act_t thr_act); +extern thread_t Switch_context( + thread_t old, + void (*cont)(void), + thread_t new); +extern void Thread_continue(void); +extern void Load_context( + thread_t thread); + +/* + * consider_machine_collect: + * + * Try to collect machine-dependent pages + */ +void +consider_machine_collect() +{ +} + +void +consider_machine_adjust() +{ +} + + +/* + * machine_kernel_stack_init: + * + * Initialize a kernel stack which has already been + * attached to its thread_activation. + */ + +void +machine_kernel_stack_init( + thread_t thread, + void (*start_pos)(thread_t)) +{ + thread_act_t thr_act = thread->top_act; + vm_offset_t stack; + + assert(thr_act); + stack = thread->kernel_stack; + assert(stack); + +#if MACH_ASSERT + if (watchacts & WA_PCB) { + printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", + thread,stack,start_pos); + printf("\tstack_iks=%x, stack_iel=%x\n", + STACK_IKS(stack), STACK_IEL(stack)); + } +#endif /* MACH_ASSERT */ + + /* + * We want to run at start_pos, giving it as an argument + * the return value from Load_context/Switch_context. + * Thread_continue takes care of the mismatch between + * the argument-passing/return-value conventions. + * This function will not return normally, + * so we don`t have to worry about a return address. + */ + STACK_IKS(stack)->k_eip = (int) Thread_continue; + STACK_IKS(stack)->k_ebx = (int) start_pos; + STACK_IKS(stack)->k_esp = (int) STACK_IEL(stack); + + /* + * Point top of kernel stack to user`s registers. + */ + STACK_IEL(stack)->saved_state = &thr_act->mact.pcb->iss; +} + + +#if NCPUS > 1 +#define curr_gdt(mycpu) (mp_gdt[mycpu]) +#define curr_ktss(mycpu) (mp_ktss[mycpu]) +#else +#define curr_gdt(mycpu) (gdt) +#define curr_ktss(mycpu) (&ktss) +#endif + +#define gdt_desc_p(mycpu,sel) \ + ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)]) + +void +act_machine_switch_pcb( thread_act_t new_act ) +{ + pcb_t pcb = new_act->mact.pcb; + int mycpu; + { + register iopb_tss_t tss = pcb->ims.io_tss; + vm_offset_t pcb_stack_top; + + assert(new_act->thread != NULL); + assert(new_act->thread->kernel_stack != 0); + STACK_IEL(new_act->thread->kernel_stack)->saved_state = + &new_act->mact.pcb->iss; + + /* + * Save a pointer to the top of the "kernel" stack - + * actually the place in the PCB where a trap into + * kernel mode will push the registers. + * The location depends on V8086 mode. If we are + * not in V8086 mode, then a trap into the kernel + * won`t save the v86 segments, so we leave room. + */ + + pcb_stack_top = (pcb->iss.efl & EFL_VM) + ? (int) (&pcb->iss + 1) + : (int) (&pcb->iss.v86_segs); + + mp_disable_preemption(); + mycpu = cpu_number(); + + if (tss == 0) { + /* + * No per-thread IO permissions. + * Use standard kernel TSS. + */ + if (!(gdt_desc_p(mycpu,KERNEL_TSS)->access & ACC_TSS_BUSY)) + set_tr(KERNEL_TSS); + curr_ktss(mycpu)->esp0 = pcb_stack_top; + } + else { + /* + * Set the IO permissions. Use this thread`s TSS. + */ + *gdt_desc_p(mycpu,USER_TSS) + = *(struct real_descriptor *)tss->iopb_desc; + tss->tss.esp0 = pcb_stack_top; + set_tr(USER_TSS); + gdt_desc_p(mycpu,KERNEL_TSS)->access &= ~ ACC_TSS_BUSY; + } + } + + { + register user_ldt_t ldt = pcb->ims.ldt; + /* + * Set the thread`s LDT. + */ + if (ldt == 0) { + /* + * Use system LDT. + */ + set_ldt(KERNEL_LDT); + } + else { + /* + * Thread has its own LDT. + */ + *gdt_desc_p(mycpu,USER_LDT) = ldt->desc; + set_ldt(USER_LDT); + } + } + mp_enable_preemption(); + /* + * Load the floating-point context, if necessary. + */ + fpu_load_context(pcb); + +} + +/* + * flush out any lazily evaluated HW state in the + * owning thread's context, before termination. + */ +void +thread_machine_flush( thread_act_t cur_act ) +{ + fpflush(cur_act); +} + +/* + * Switch to the first thread on a CPU. + */ +void +load_context( + thread_t new) +{ + act_machine_switch_pcb(new->top_act); + Load_context(new); +} + +/* + * Number of times we needed to swap an activation back in before + * switching to it. + */ +int switch_act_swapins = 0; + +/* + * machine_switch_act + * + * Machine-dependent details of activation switching. Called with + * RPC locks held and preemption disabled. + */ +void +machine_switch_act( + thread_t thread, + thread_act_t old, + thread_act_t new, + int cpu) +{ + /* + * Switch the vm, ast and pcb context. + * Save FP registers if in use and set TS (task switch) bit. + */ + fpu_save_context(thread); + + active_stacks[cpu] = thread->kernel_stack; + ast_context(new, cpu); + + PMAP_SWITCH_CONTEXT(old, new, cpu); + act_machine_switch_pcb(new); +} + +/* + * Switch to a new thread. + * Save the old thread`s kernel state or continuation, + * and return it. + */ +thread_t +switch_context( + thread_t old, + void (*continuation)(void), + thread_t new) +{ + register thread_act_t old_act = old->top_act, + new_act = new->top_act; + +#if MACH_RT + assert(old_act->kernel_loaded || + active_stacks[cpu_number()] == old_act->thread->kernel_stack); + assert (get_preemption_level() == 1); +#endif + check_simple_locks(); + + /* + * Save FP registers if in use. + */ + fpu_save_context(old); + +#if MACH_ASSERT + if (watchacts & WA_SWITCH) + printf("\tswitch_context(old=%x con=%x new=%x)\n", + old, continuation, new); +#endif /* MACH_ASSERT */ + + /* + * Switch address maps if need be, even if not switching tasks. + * (A server activation may be "borrowing" a client map.) + */ + { + int mycpu = cpu_number(); + + PMAP_SWITCH_CONTEXT(old_act, new_act, mycpu) + } + + /* + * Load the rest of the user state for the new thread + */ + act_machine_switch_pcb(new_act); + return(Switch_context(old, continuation, new)); +} + +void +pcb_module_init(void) +{ + fpu_module_init(); + iopb_init(); +} + +void +pcb_init( register thread_act_t thr_act ) +{ + register pcb_t pcb; + + assert(thr_act->mact.pcb == (pcb_t)0); + pcb = thr_act->mact.pcb = &thr_act->mact.xxx_pcb; + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("pcb_init(%x) pcb=%x\n", thr_act, pcb); +#endif /* MACH_ASSERT */ + + /* + * We can't let random values leak out to the user. + * (however, act_create() zeroed the entire thr_act, mact, pcb) + * bzero((char *) pcb, sizeof *pcb); + */ + simple_lock_init(&pcb->lock, ETAP_MISC_PCB); + + /* + * Guarantee that the bootstrapped thread will be in user + * mode. + */ + pcb->iss.cs = USER_CS; + pcb->iss.ss = USER_DS; + pcb->iss.ds = USER_DS; + pcb->iss.es = USER_DS; + pcb->iss.fs = USER_DS; + pcb->iss.gs = USER_DS; + pcb->iss.efl = EFL_USER_SET; +} + +/* + * Adjust saved register state for thread belonging to task + * created with kernel_task_create(). + */ +void +pcb_user_to_kernel( + thread_act_t thr_act) +{ + register pcb_t pcb = thr_act->mact.pcb; + + pcb->iss.cs = KERNEL_CS; + pcb->iss.ss = KERNEL_DS; + pcb->iss.ds = KERNEL_DS; + pcb->iss.es = KERNEL_DS; + pcb->iss.fs = KERNEL_DS; + pcb->iss.gs = CPU_DATA; +} + +void +pcb_terminate( + register thread_act_t thr_act) +{ + register pcb_t pcb = thr_act->mact.pcb; + + assert(pcb); + + if (pcb->ims.io_tss != 0) + iopb_destroy(pcb->ims.io_tss); + if (pcb->ims.ifps != 0) + fp_free(pcb->ims.ifps); + if (pcb->ims.ldt != 0) + user_ldt_free(pcb->ims.ldt); + thr_act->mact.pcb = (pcb_t)0; +} + +/* + * pcb_collect: + * + * Attempt to free excess pcb memory. + */ + +void +pcb_collect( + register thread_act_t thr_act) +{ + /* accomplishes very little */ +} + +/* + * act_machine_sv_free + * release saveareas associated with an act. if flag is true, release + * user level savearea(s) too, else don't + */ +void +act_machine_sv_free(thread_act_t act, int flag) +{ + +} + +/* + * act_machine_set_state: + * + * Set the status of the specified thread. Called with "appropriate" + * thread-related locks held (see act_lock_thread()), so + * thr_act->thread is guaranteed not to change. + */ + +kern_return_t +act_machine_set_state( + thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count) +{ + int kernel_act = thr_act->kernel_loading || + thr_act->kernel_loaded; + +#if MACH_ASSERT + if (watchacts & WA_STATE) + printf("act_%x act_m_set_state(thr_act=%x,flav=%x,st=%x,cnt=%x)\n", + current_act(), thr_act, flavor, tstate, count); +#endif /* MACH_ASSERT */ + + switch (flavor) { + case THREAD_SYSCALL_STATE: + { + register struct thread_syscall_state *state; + register struct i386_saved_state *saved_state = USER_REGS(thr_act); + + state = (struct thread_syscall_state *) tstate; + saved_state->eax = state->eax; + saved_state->edx = state->edx; + if (kernel_act) + saved_state->efl = state->efl; + else + saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET; + saved_state->eip = state->eip; + saved_state->uesp = state->esp; + break; + } + + case i386_SAVED_STATE: + { + register struct i386_saved_state *state; + register struct i386_saved_state *saved_state; + + if (count < i386_SAVED_STATE_COUNT) { + return(KERN_INVALID_ARGUMENT); + } + + state = (struct i386_saved_state *) tstate; + + saved_state = USER_REGS(thr_act); + + /* + * General registers + */ + saved_state->edi = state->edi; + saved_state->esi = state->esi; + saved_state->ebp = state->ebp; + saved_state->uesp = state->uesp; + saved_state->ebx = state->ebx; + saved_state->edx = state->edx; + saved_state->ecx = state->ecx; + saved_state->eax = state->eax; + saved_state->eip = state->eip; + if (kernel_act) + saved_state->efl = state->efl; + else + saved_state->efl = (state->efl & ~EFL_USER_CLEAR) + | EFL_USER_SET; + + /* + * Segment registers. Set differently in V8086 mode. + */ + if (state->efl & EFL_VM) { + /* + * Set V8086 mode segment registers. + */ + saved_state->cs = state->cs & 0xffff; + saved_state->ss = state->ss & 0xffff; + saved_state->v86_segs.v86_ds = state->ds & 0xffff; + saved_state->v86_segs.v86_es = state->es & 0xffff; + saved_state->v86_segs.v86_fs = state->fs & 0xffff; + saved_state->v86_segs.v86_gs = state->gs & 0xffff; + + /* + * Zero protected mode segment registers. + */ + saved_state->ds = 0; + saved_state->es = 0; + saved_state->fs = 0; + saved_state->gs = 0; + + if (thr_act->mact.pcb->ims.v86s.int_table) { + /* + * Hardware assist on. + */ + thr_act->mact.pcb->ims.v86s.flags = + state->efl & (EFL_TF | EFL_IF); + } + } + else if (!kernel_act) { + /* + * 386 mode. Set segment registers for flat + * 32-bit address space. + */ + saved_state->cs = USER_CS; + saved_state->ss = USER_DS; + saved_state->ds = USER_DS; + saved_state->es = USER_DS; + saved_state->fs = USER_DS; + saved_state->gs = USER_DS; + } + else { + /* + * User setting segment registers. + * Code and stack selectors have already been + * checked. Others will be reset by 'iret' + * if they are not valid. + */ + saved_state->cs = state->cs; + saved_state->ss = state->ss; + saved_state->ds = state->ds; + saved_state->es = state->es; + saved_state->fs = state->fs; + saved_state->gs = state->gs; + } + break; + } + + case i386_NEW_THREAD_STATE: + case i386_REGS_SEGS_STATE: + { + register struct i386_new_thread_state *state; + register struct i386_saved_state *saved_state; + + if (count < i386_NEW_THREAD_STATE_COUNT) { + return(KERN_INVALID_ARGUMENT); + } + + if (flavor == i386_REGS_SEGS_STATE) { + /* + * Code and stack selectors must not be null, + * and must have user protection levels. + * Only the low 16 bits are valid. + */ + state->cs &= 0xffff; + state->ss &= 0xffff; + state->ds &= 0xffff; + state->es &= 0xffff; + state->fs &= 0xffff; + state->gs &= 0xffff; + + if (!kernel_act && + (state->cs == 0 || (state->cs & SEL_PL) != SEL_PL_U + || state->ss == 0 || (state->ss & SEL_PL) != SEL_PL_U)) + return KERN_INVALID_ARGUMENT; + } + + state = (struct i386_new_thread_state *) tstate; + + saved_state = USER_REGS(thr_act); + + /* + * General registers + */ + saved_state->edi = state->edi; + saved_state->esi = state->esi; + saved_state->ebp = state->ebp; + saved_state->uesp = state->uesp; + saved_state->ebx = state->ebx; + saved_state->edx = state->edx; + saved_state->ecx = state->ecx; + saved_state->eax = state->eax; + saved_state->eip = state->eip; + if (kernel_act) + saved_state->efl = state->efl; + else + saved_state->efl = (state->efl & ~EFL_USER_CLEAR) + | EFL_USER_SET; + + /* + * Segment registers. Set differently in V8086 mode. + */ + if (state->efl & EFL_VM) { + /* + * Set V8086 mode segment registers. + */ + saved_state->cs = state->cs & 0xffff; + saved_state->ss = state->ss & 0xffff; + saved_state->v86_segs.v86_ds = state->ds & 0xffff; + saved_state->v86_segs.v86_es = state->es & 0xffff; + saved_state->v86_segs.v86_fs = state->fs & 0xffff; + saved_state->v86_segs.v86_gs = state->gs & 0xffff; + + /* + * Zero protected mode segment registers. + */ + saved_state->ds = 0; + saved_state->es = 0; + saved_state->fs = 0; + saved_state->gs = 0; + + if (thr_act->mact.pcb->ims.v86s.int_table) { + /* + * Hardware assist on. + */ + thr_act->mact.pcb->ims.v86s.flags = + state->efl & (EFL_TF | EFL_IF); + } + } + else if (flavor == i386_NEW_THREAD_STATE && !kernel_act) { + /* + * 386 mode. Set segment registers for flat + * 32-bit address space. + */ + saved_state->cs = USER_CS; + saved_state->ss = USER_DS; + saved_state->ds = USER_DS; + saved_state->es = USER_DS; + saved_state->fs = USER_DS; + saved_state->gs = USER_DS; + } + else { + /* + * User setting segment registers. + * Code and stack selectors have already been + * checked. Others will be reset by 'iret' + * if they are not valid. + */ + saved_state->cs = state->cs; + saved_state->ss = state->ss; + saved_state->ds = state->ds; + saved_state->es = state->es; + saved_state->fs = state->fs; + saved_state->gs = state->gs; + } + break; + } + + case i386_FLOAT_STATE: { + + if (count < i386_FLOAT_STATE_COUNT) + return(KERN_INVALID_ARGUMENT); + + return fpu_set_state(thr_act,(struct i386_float_state*)tstate); + } + + /* + * Temporary - replace by i386_io_map + */ + case i386_ISA_PORT_MAP_STATE: { + register struct i386_isa_port_map_state *state; + register iopb_tss_t tss; + + if (count < i386_ISA_PORT_MAP_STATE_COUNT) + return(KERN_INVALID_ARGUMENT); + + break; + } + + case i386_V86_ASSIST_STATE: + { + register struct i386_v86_assist_state *state; + vm_offset_t int_table; + int int_count; + + if (count < i386_V86_ASSIST_STATE_COUNT) + return KERN_INVALID_ARGUMENT; + + state = (struct i386_v86_assist_state *) tstate; + int_table = state->int_table; + int_count = state->int_count; + + if (int_table >= VM_MAX_ADDRESS || + int_table + + int_count * sizeof(struct v86_interrupt_table) + > VM_MAX_ADDRESS) + return KERN_INVALID_ARGUMENT; + + thr_act->mact.pcb->ims.v86s.int_table = int_table; + thr_act->mact.pcb->ims.v86s.int_count = int_count; + + thr_act->mact.pcb->ims.v86s.flags = + USER_REGS(thr_act)->efl & (EFL_TF | EFL_IF); + break; + } + + case i386_THREAD_STATE: { + struct i386_saved_state *saved_state; + i386_thread_state_t *state25; + + saved_state = USER_REGS(thr_act); + state25 = (i386_thread_state_t *)tstate; + + saved_state->eax = state25->eax; + saved_state->ebx = state25->ebx; + saved_state->ecx = state25->ecx; + saved_state->edx = state25->edx; + saved_state->edi = state25->edi; + saved_state->esi = state25->esi; + saved_state->ebp = state25->ebp; + saved_state->uesp = state25->esp; + saved_state->efl = (state25->eflags & ~EFL_USER_CLEAR) + | EFL_USER_SET; + saved_state->eip = state25->eip; + saved_state->cs = USER_CS; /* FIXME? */ + saved_state->ss = USER_DS; + saved_state->ds = USER_DS; + saved_state->es = USER_DS; + saved_state->fs = USER_DS; + saved_state->gs = USER_DS; + } + break; + + default: + return(KERN_INVALID_ARGUMENT); + } + + return(KERN_SUCCESS); +} + +/* + * thread_getstatus: + * + * Get the status of the specified thread. + */ + + +kern_return_t +act_machine_get_state( + thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) +{ +#if MACH_ASSERT + if (watchacts & WA_STATE) + printf("act_%x act_m_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n", + current_act(), thr_act, flavor, tstate, + count, (count ? *count : 0)); +#endif /* MACH_ASSERT */ + + switch (flavor) { + + case i386_SAVED_STATE: + { + register struct i386_saved_state *state; + register struct i386_saved_state *saved_state; + + if (*count < i386_SAVED_STATE_COUNT) + return(KERN_INVALID_ARGUMENT); + + state = (struct i386_saved_state *) tstate; + saved_state = USER_REGS(thr_act); + + /* + * First, copy everything: + */ + *state = *saved_state; + + if (saved_state->efl & EFL_VM) { + /* + * V8086 mode. + */ + state->ds = saved_state->v86_segs.v86_ds & 0xffff; + state->es = saved_state->v86_segs.v86_es & 0xffff; + state->fs = saved_state->v86_segs.v86_fs & 0xffff; + state->gs = saved_state->v86_segs.v86_gs & 0xffff; + + if (thr_act->mact.pcb->ims.v86s.int_table) { + /* + * Hardware assist on + */ + if ((thr_act->mact.pcb->ims.v86s.flags & + (EFL_IF|V86_IF_PENDING)) == 0) + state->efl &= ~EFL_IF; + } + } + else { + /* + * 386 mode. + */ + state->ds = saved_state->ds & 0xffff; + state->es = saved_state->es & 0xffff; + state->fs = saved_state->fs & 0xffff; + state->gs = saved_state->gs & 0xffff; + } + *count = i386_SAVED_STATE_COUNT; + break; + } + + case i386_NEW_THREAD_STATE: + case i386_REGS_SEGS_STATE: + { + register struct i386_new_thread_state *state; + register struct i386_saved_state *saved_state; + + if (*count < i386_NEW_THREAD_STATE_COUNT) + return(KERN_INVALID_ARGUMENT); + + state = (struct i386_new_thread_state *) tstate; + saved_state = USER_REGS(thr_act); + + /* + * General registers. + */ + state->edi = saved_state->edi; + state->esi = saved_state->esi; + state->ebp = saved_state->ebp; + state->ebx = saved_state->ebx; + state->edx = saved_state->edx; + state->ecx = saved_state->ecx; + state->eax = saved_state->eax; + state->eip = saved_state->eip; + state->efl = saved_state->efl; + state->uesp = saved_state->uesp; + + state->cs = saved_state->cs; + state->ss = saved_state->ss; + if (saved_state->efl & EFL_VM) { + /* + * V8086 mode. + */ + state->ds = saved_state->v86_segs.v86_ds & 0xffff; + state->es = saved_state->v86_segs.v86_es & 0xffff; + state->fs = saved_state->v86_segs.v86_fs & 0xffff; + state->gs = saved_state->v86_segs.v86_gs & 0xffff; + + if (thr_act->mact.pcb->ims.v86s.int_table) { + /* + * Hardware assist on + */ + if ((thr_act->mact.pcb->ims.v86s.flags & + (EFL_IF|V86_IF_PENDING)) == 0) + state->efl &= ~EFL_IF; + } + } + else { + /* + * 386 mode. + */ + state->ds = saved_state->ds & 0xffff; + state->es = saved_state->es & 0xffff; + state->fs = saved_state->fs & 0xffff; + state->gs = saved_state->gs & 0xffff; + } + *count = i386_NEW_THREAD_STATE_COUNT; + break; + } + + case THREAD_SYSCALL_STATE: + { + register struct thread_syscall_state *state; + register struct i386_saved_state *saved_state = USER_REGS(thr_act); + + state = (struct thread_syscall_state *) tstate; + state->eax = saved_state->eax; + state->edx = saved_state->edx; + state->efl = saved_state->efl; + state->eip = saved_state->eip; + state->esp = saved_state->uesp; + *count = i386_THREAD_SYSCALL_STATE_COUNT; + break; + } + + case THREAD_STATE_FLAVOR_LIST: + if (*count < 5) + return (KERN_INVALID_ARGUMENT); + tstate[0] = i386_NEW_THREAD_STATE; + tstate[1] = i386_FLOAT_STATE; + tstate[2] = i386_ISA_PORT_MAP_STATE; + tstate[3] = i386_V86_ASSIST_STATE; + tstate[4] = THREAD_SYSCALL_STATE; + *count = 5; + break; + + case i386_FLOAT_STATE: { + + if (*count < i386_FLOAT_STATE_COUNT) + return(KERN_INVALID_ARGUMENT); + + *count = i386_FLOAT_STATE_COUNT; + return fpu_get_state(thr_act,(struct i386_float_state *)tstate); + } + + /* + * Temporary - replace by i386_io_map + */ + case i386_ISA_PORT_MAP_STATE: { + register struct i386_isa_port_map_state *state; + register iopb_tss_t tss; + + if (*count < i386_ISA_PORT_MAP_STATE_COUNT) + return(KERN_INVALID_ARGUMENT); + + state = (struct i386_isa_port_map_state *) tstate; + tss = thr_act->mact.pcb->ims.io_tss; + + if (tss == 0) { + int i; + + /* + * The thread has no ktss, so no IO permissions. + */ + + for (i = 0; i < sizeof state->pm; i++) + state->pm[i] = 0xff; + } else { + /* + * The thread has its own ktss. + */ + + bcopy((char *) tss->bitmap, + (char *) state->pm, + sizeof state->pm); + } + + *count = i386_ISA_PORT_MAP_STATE_COUNT; + break; + } + + case i386_V86_ASSIST_STATE: + { + register struct i386_v86_assist_state *state; + + if (*count < i386_V86_ASSIST_STATE_COUNT) + return KERN_INVALID_ARGUMENT; + + state = (struct i386_v86_assist_state *) tstate; + state->int_table = thr_act->mact.pcb->ims.v86s.int_table; + state->int_count = thr_act->mact.pcb->ims.v86s.int_count; + + *count = i386_V86_ASSIST_STATE_COUNT; + break; + } + + case i386_THREAD_STATE: { + struct i386_saved_state *saved_state; + i386_thread_state_t *state; + + saved_state = USER_REGS(thr_act); + state = (i386_thread_state_t *)tstate; + + state->eax = saved_state->eax; + state->ebx = saved_state->ebx; + state->ecx = saved_state->ecx; + state->edx = saved_state->edx; + state->edi = saved_state->edi; + state->esi = saved_state->esi; + state->ebp = saved_state->ebp; + state->esp = saved_state->uesp; + state->eflags = saved_state->efl; + state->eip = saved_state->eip; + state->cs = saved_state->cs; + state->ss = saved_state->ss; + state->ds = saved_state->ds; + state->es = saved_state->es; + state->fs = saved_state->fs; + state->gs = saved_state->gs; + break; + } + + default: + return(KERN_INVALID_ARGUMENT); + } + + return(KERN_SUCCESS); +} + +/* + * Alter the thread`s state so that a following thread_exception_return + * will make the thread return 'retval' from a syscall. + */ +void +thread_set_syscall_return( + thread_t thread, + kern_return_t retval) +{ + thread->top_act->mact.pcb->iss.eax = retval; +} + +/* + * Initialize the machine-dependent state for a new thread. + */ +kern_return_t +thread_machine_create(thread_t thread, thread_act_t thr_act, void (*start_pos)(thread_t)) +{ + MachineThrAct_t mact = &thr_act->mact; + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", + thread, thr_act, start_pos); +#endif /* MACH_ASSERT */ + + assert(thread != NULL); + assert(thr_act != NULL); + + /* + * Allocate a kernel stack per shuttle + */ + thread->kernel_stack = (int)stack_alloc(thread,start_pos); + assert(thread->kernel_stack != 0); + + /* + * Point top of kernel stack to user`s registers. + */ + STACK_IEL(thread->kernel_stack)->saved_state = &mact->pcb->iss; + + /* + * Utah code fiddles with pcb here - (we don't need to) + */ + return(KERN_SUCCESS); +} + +/* + * Machine-dependent cleanup prior to destroying a thread + */ +void +thread_machine_destroy( thread_t thread ) +{ + spl_t s; + + if (thread->kernel_stack != 0) { + s = splsched(); + stack_free(thread); + splx(s); + } +} + +/* + * This is used to set the current thr_act/thread + * when starting up a new processor + */ +void +thread_machine_set_current( thread_t thread ) +{ + register int my_cpu; + + mp_disable_preemption(); + my_cpu = cpu_number(); + + cpu_data[my_cpu].active_thread = thread; + active_kloaded[my_cpu] = + thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; + + mp_enable_preemption(); +} + + +/* + * Pool of kernel activations. + */ + +void act_machine_init() +{ + int i; + thread_act_t thr_act; + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("act_machine_init()\n"); +#endif /* MACH_ASSERT */ + + /* Good to verify this once */ + assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); + + /* + * If we start using kernel activations, + * would normally create kernel_thread_pool here, + * populating it from the act_zone + */ +} + +kern_return_t +act_machine_create(task_t task, thread_act_t thr_act) +{ + MachineThrAct_t mact = &thr_act->mact; + pcb_t pcb; + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("act_machine_create(task=%x,thr_act=%x) pcb=%x\n", + task,thr_act, &mact->xxx_pcb); +#endif /* MACH_ASSERT */ + + /* + * Clear & Init the pcb (sets up user-mode s regs) + */ + pcb_init(thr_act); + + return KERN_SUCCESS; +} + +void +act_virtual_machine_destroy(thread_act_t thr_act) +{ + return; +} + +void +act_machine_destroy(thread_act_t thr_act) +{ + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("act_machine_destroy(0x%x)\n", thr_act); +#endif /* MACH_ASSERT */ + + pcb_terminate(thr_act); +} + +void +act_machine_return(int code) +{ + thread_act_t thr_act = current_act(); + +#if MACH_ASSERT + /* + * We don't go through the locking dance here needed to + * acquire thr_act->thread safely. + */ + + if (watchacts & WA_EXIT) + printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n", + code, thr_act, thr_act->ref_count, + thr_act->thread, thr_act->thread->ref_count); +#endif /* MACH_ASSERT */ + + /* + * This code is called with nothing locked. + * It also returns with nothing locked, if it returns. + * + * This routine terminates the current thread activation. + * If this is the only activation associated with its + * thread shuttle, then the entire thread (shuttle plus + * activation) is terminated. + */ + assert( code == KERN_TERMINATED ); + assert( thr_act ); + +#ifdef CALLOUT_RPC_MODEL + /* + * JMM - RPC is not going to be done with a callout/direct- + * stack manipulation mechanism. Instead we will return/ + * unwind normally as if from a continuation. + */ + act_lock_thread(thr_act); + + if (thr_act->thread->top_act != thr_act) { + /* + * this is not the top activation; + * if possible, we should clone the shuttle so that + * both the root RPC-chain and the soon-to-be-orphaned + * RPC-chain have shuttles + * + * JMM - Cloning shuttles isn't the right approach. We + * need to alert the higher up activations to return our + * shuttle (because scheduling attributes may TRUELY be + * unique and not cloneable. + */ + act_unlock_thread(thr_act); + panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED"); + } + + if (thr_act->lower != THR_ACT_NULL) { + thread_t cur_thread = current_thread(); + thread_act_t cur_act; + struct ipc_port *iplock; + + /* send it an appropriate return code */ + thr_act->lower->alerts |= SERVER_TERMINATED; + install_special_handler(thr_act->lower); + + /* Return to previous act with error code */ + act_locked_act_reference(thr_act); /* keep it around */ + act_switch_swapcheck(cur_thread, (ipc_port_t)0); + (void) switch_act(THR_ACT_NULL); + /* assert(thr_act->ref_count == 0); */ /* XXX */ + cur_act = cur_thread->top_act; + MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED; + + machine_kernel_stack_init(cur_thread, mach_rpc_return_error); + /* + * The following unlocks must be done separately since fields + * used by `act_unlock_thread()' have been cleared, meaning + * that it would not release all of the appropriate locks. + */ + iplock = thr_act->pool_port; /* remember for unlock call */ + rpc_unlock(cur_thread); + if (iplock) ip_unlock(iplock); /* must be done separately */ + act_unlock(thr_act); + act_deallocate(thr_act); /* free it */ + Load_context(cur_thread); + /*NOTREACHED*/ + + panic("act_machine_return: TALKING ZOMBIE! (2)"); + } + act_unlock_thread(thr_act); + +#endif /* CALLOUT_RPC_MODEL */ + + /* This is the only activation attached to the shuttle... */ + /* terminate the entire thread (shuttle plus activation) */ + + assert(thr_act->thread->top_act == thr_act); + thread_terminate_self(); + + /*NOTREACHED*/ + + panic("act_machine_return: TALKING ZOMBIE! (1)"); +} + + +/* + * Perform machine-dependent per-thread initializations + */ +void +thread_machine_init(void) +{ + pcb_module_init(); +} + +/* + * Some routines for debugging activation code + */ +static void dump_handlers(thread_act_t); +void dump_regs(thread_act_t); + +static void +dump_handlers(thread_act_t thr_act) +{ + ReturnHandler *rhp = thr_act->handlers; + int counter = 0; + + printf("\t"); + while (rhp) { + if (rhp == &thr_act->special_handler){ + if (rhp->next) + printf("[NON-Zero next ptr(%x)]", rhp->next); + printf("special_handler()->"); + break; + } + printf("hdlr_%d(%x)->",counter,rhp->handler); + rhp = rhp->next; + if (++counter > 32) { + printf("Aborting: HUGE handler chain\n"); + break; + } + } + printf("HLDR_NULL\n"); +} + +void +dump_regs(thread_act_t thr_act) +{ + if (thr_act->mact.pcb) { + register struct i386_saved_state *ssp = USER_REGS(thr_act); + /* Print out user register state */ + printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n", + ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx); + printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n", + ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp); + printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss); + } +} + +int +dump_act(thread_act_t thr_act) +{ + if (!thr_act) + return(0); + + printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n", + thr_act, thr_act->ref_count, + thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, + thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); + + if (thr_act->pool_port) { + thread_pool_t actpp = &thr_act->pool_port->ip_thread_pool; + printf("\tpool(acts_p=%x, waiting=%d) pool_next %x\n", + actpp->thr_acts, actpp->waiting, thr_act->thread_pool_next); + }else + printf("\tno thread_pool\n"); + + printf("\talerts=%x mask=%x susp=%d user_stop=%d active=%x ast=%x\n", + thr_act->alerts, thr_act->alert_mask, + thr_act->suspend_count, thr_act->user_stop_count, + thr_act->active, thr_act->ast); + printf("\thi=%x lo=%x\n", thr_act->higher, thr_act->lower); + printf("\tpcb=%x\n", thr_act->mact.pcb); + + if (thr_act->thread && thr_act->thread->kernel_stack) { + vm_offset_t stack = thr_act->thread->kernel_stack; + + printf("\tk_stk %x eip %x ebx %x esp %x iss %x\n", + stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx, + STACK_IKS(stack)->k_esp, STACK_IEL(stack)->saved_state); + } + + dump_handlers(thr_act); + dump_regs(thr_act); + return((int)thr_act); +} +unsigned int +get_useraddr() +{ + + thread_act_t thr_act = current_act(); + + if (thr_act->mact.pcb) + return(thr_act->mact.pcb->iss.eip); + else + return(0); + +} + +void +thread_swapin_mach_alloc(thread_t thread) +{ + + /* 386 does not have saveareas */ + +} +/* + * detach and return a kernel stack from a thread + */ + +vm_offset_t +stack_detach(thread_t thread) +{ + vm_offset_t stack; + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH), + thread, thread->priority, + thread->sched_pri, 0, + 0); + + stack = thread->kernel_stack; + thread->kernel_stack = 0; + return(stack); +} + +/* + * attach a kernel stack to a thread and initialize it + */ + +void +stack_attach(struct thread_shuttle *thread, + vm_offset_t stack, + void (*start_pos)(thread_t)) +{ + struct i386_kernel_state *statep; + thread_act_t thr_act; + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), + thread, thread->priority, + thread->sched_pri, continuation, + 0); + + assert(stack); + statep = STACK_IKS(stack); + thread->kernel_stack = stack; + + statep->k_eip = (unsigned long) Thread_continue; + statep->k_ebx = (unsigned long) start_pos; + statep->k_esp = (unsigned long) STACK_IEL(stack); + + STACK_IEL(stack)->saved_state = &thr_act->mact.pcb->iss; + + return; +} + +/* + * move a stack from old to new thread + */ + +void +stack_handoff(thread_t old, + thread_t new) +{ + + vm_offset_t stack; + pmap_t new_pmap; + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF), + thread, thread->priority, + thread->sched_pri, continuation, + 0); + + assert(new->top_act); + assert(old->top_act); + + stack = stack_detach(old); + stack_attach(new, stack, 0); + + new_pmap = new->top_act->task->map->pmap; + if (old->top_act->task->map->pmap != new_pmap) + PMAP_ACTIVATE_MAP(new->top_act->task->map, cpu_number()); + + thread_machine_set_current(new); + + active_stacks[cpu_number()] = new->kernel_stack; + + return; +} diff --git a/osfmk/i386/phys.c b/osfmk/i386/phys.c new file mode 100644 index 000000000..f5060f2b9 --- /dev/null +++ b/osfmk/i386/phys.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +#include + +#include +#include +#include +#include +#include + +/* + * pmap_zero_page zeros the specified (machine independent) page. + */ +void +pmap_zero_page( + vm_offset_t p) +{ + assert(p != vm_page_fictitious_addr); + bzero((char *)phystokv(p), PAGE_SIZE); +} + +/* + * pmap_zero_part_page + * zeros the specified (machine independent) part of a page. + */ +void +pmap_zero_part_page( + vm_offset_t p, + vm_offset_t offset, + vm_size_t len) +{ + assert(p != vm_page_fictitious_addr); + assert(offset + len <= PAGE_SIZE); + + bzero((char *)phystokv(p) + offset, len); +} + +/* + * pmap_copy_page copies the specified (machine independent) pages. + */ +void +pmap_copy_page( + vm_offset_t src, + vm_offset_t dst) +{ + assert(src != vm_page_fictitious_addr); + assert(dst != vm_page_fictitious_addr); + + memcpy((void *)phystokv(dst), (void *)phystokv(src), PAGE_SIZE); +} + +/* + * pmap_copy_page copies the specified (machine independent) pages. + */ +void +pmap_copy_part_page( + vm_offset_t src, + vm_offset_t src_offset, + vm_offset_t dst, + vm_offset_t dst_offset, + vm_size_t len) +{ + assert(src != vm_page_fictitious_addr); + assert(dst != vm_page_fictitious_addr); + assert(((dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE); + assert(((src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE); + + memcpy((void *)(phystokv(dst) + dst_offset), + (void *)(phystokv(src) + src_offset), len); +} + +/* + * pmap_copy_part_lpage copies part of a virtually addressed page + * to a physically addressed page. + */ +void +pmap_copy_part_lpage( + vm_offset_t src, + vm_offset_t dst, + vm_offset_t dst_offset, + vm_size_t len) +{ + assert(src != vm_page_fictitious_addr); + assert(dst != vm_page_fictitious_addr); + assert(((dst & PAGE_MASK) + dst_offset + len) <= PAGE_SIZE); + + memcpy((void *)(phystokv(dst) + dst_offset), (void *)src, len); +} + +/* + * pmap_copy_part_rpage copies part of a physically addressed page + * to a virtually addressed page. + */ +void +pmap_copy_part_rpage( + vm_offset_t src, + vm_offset_t src_offset, + vm_offset_t dst, + vm_size_t len) +{ + assert(src != vm_page_fictitious_addr); + assert(dst != vm_page_fictitious_addr); + assert(((src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE); + + memcpy((void *)dst, (void *)(phystokv(src) + src_offset), len); +} + +/* + * kvtophys(addr) + * + * Convert a kernel virtual address to a physical address + */ +vm_offset_t +kvtophys( + vm_offset_t addr) +{ + pt_entry_t *pte; + + if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL) + return 0; + return i386_trunc_page(*pte) | (addr & INTEL_OFFMASK); +} diff --git a/osfmk/i386/pic.h b/osfmk/i386/pic.h new file mode 100644 index 000000000..1c9311a52 --- /dev/null +++ b/osfmk/i386/pic.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* +Copyright (c) 1988,1989 Prime Computer, Inc. Natick, MA 01760 +All Rights Reserved. + +Permission to use, copy, modify, and distribute this +software and its documentation for any purpose and +without fee is hereby granted, provided that the above +copyright notice appears in all copies and that both the +copyright notice and this permission notice appear in +supporting documentation, and that the name of Prime +Computer, Inc. not be used in advertising or publicity +pertaining to distribution of the software without +specific, written prior permission. + +THIS SOFTWARE IS PROVIDED "AS IS", AND PRIME COMPUTER, +INC. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS +SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN +NO EVENT SHALL PRIME COMPUTER, INC. BE LIABLE FOR ANY +SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY +DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR +PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR +OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef _I386_PIC_H_ +#define _I386_PIC_H_ + +#include + +#define NINTR 0x10 +#define NPICS 0x02 + +/* +** The following are definitions used to locate the PICs in the system +*/ + +#define ADDR_PIC_BASE 0x20 +#define OFF_ICW 0x00 +#define OFF_OCW 0x01 +#define SIZE_PIC 0x80 + +/* +** The following banks of definitions ICW1, ICW2, ICW3, and ICW4 are used +** to define the fields of the various ICWs for initialisation of the PICs +*/ + +/* +** ICW1 +*/ + +#define ICW_TEMPLATE 0x10 + +#define LEVL_TRIGGER 0x08 +#define EDGE_TRIGGER 0x00 +#define ADDR_INTRVL4 0x04 +#define ADDR_INTRVL8 0x00 +#define SINGLE__MODE 0x02 +#define CASCADE_MODE 0x00 +#define ICW4__NEEDED 0x01 +#define NO_ICW4_NEED 0x00 + +/* +** ICW2 +*/ + +#define PICM_VECTBASE 0x40 +#define PICS_VECTBASE PICM_VECTBASE + 0x08 + +/* +** ICW3 +*/ + +#define SLAVE_ON_IR0 0x01 +#define SLAVE_ON_IR1 0x02 +#define SLAVE_ON_IR2 0x04 +#define SLAVE_ON_IR3 0x08 +#define SLAVE_ON_IR4 0x10 +#define SLAVE_ON_IR5 0x20 +#define SLAVE_ON_IR6 0x40 +#define SLAVE_ON_IR7 0x80 + +#define I_AM_SLAVE_0 0x00 +#define I_AM_SLAVE_1 0x01 +#define I_AM_SLAVE_2 0x02 +#define I_AM_SLAVE_3 0x03 +#define I_AM_SLAVE_4 0x04 +#define I_AM_SLAVE_5 0x05 +#define I_AM_SLAVE_6 0x06 +#define I_AM_SLAVE_7 0x07 + +/* +** ICW4 +*/ + +#define SNF_MODE_ENA 0x10 +#define SNF_MODE_DIS 0x00 +#define BUFFERD_MODE 0x08 +#define NONBUFD_MODE 0x00 +#define AUTO_EOI_MOD 0x02 +#define NRML_EOI_MOD 0x00 +#define I8086_EMM_MOD 0x01 +#define SET_MCS_MODE 0x00 + +/* +** OCW1 +*/ +#define PICM_MASK 0xFF +#define PICS_MASK 0xFF +/* +** OCW2 +*/ + +#define NON_SPEC_EOI 0x20 +#define SPECIFIC_EOI 0x30 +#define ROT_NON_SPEC 0x50 +#define SET_ROT_AEOI 0x40 +#define RSET_ROTAEOI 0x00 +#define ROT_SPEC_EOI 0x70 +#define SET_PRIORITY 0x60 +#define NO_OPERATION 0x20 + +#define SEND_EOI_IR0 0x00 +#define SEND_EOI_IR1 0x01 +#define SEND_EOI_IR2 0x02 +#define SEND_EOI_IR3 0x03 +#define SEND_EOI_IR4 0x04 +#define SEND_EOI_IR5 0x05 +#define SEND_EOI_IR6 0x06 +#define SEND_EOI_IR7 0x07 + +/* +** OCW3 +*/ + +#define OCW_TEMPLATE 0x08 +#define SPECIAL_MASK 0x40 +#define MASK_MDE_SET 0x20 +#define MASK_MDE_RST 0x00 +#define POLL_COMMAND 0x04 +#define NO_POLL_CMND 0x00 +#define READ_NEXT_RD 0x02 +#define READ_IR_ONRD 0x00 +#define READ_IS_ONRD 0x01 + +#endif /* _I386_PIC_H_ */ diff --git a/osfmk/i386/pio.h b/osfmk/i386/pio.h new file mode 100644 index 000000000..309714aaf --- /dev/null +++ b/osfmk/i386/pio.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +#ifndef I386_PIO_H +#define I386_PIO_H +#include +#include + +typedef unsigned short i386_ioport_t; + +/* read a longword */ +extern unsigned long inl( + i386_ioport_t port); +/* read a shortword */ +extern unsigned short inw( + i386_ioport_t port); +/* read a byte */ +extern unsigned char inb( + i386_ioport_t port); +/* write a longword */ +extern void outl( + i386_ioport_t port, + unsigned long datum); +/* write a word */ +extern void outw( + i386_ioport_t port, + unsigned short datum); +/* write a longword */ +extern void outb( + i386_ioport_t port, + unsigned char datum); + +/* input an array of longwords */ +extern void linl( + i386_ioport_t port, + int * data, + int count); +/* output an array of longwords */ +extern void loutl( + i386_ioport_t port, + int * data, + int count); + +/* input an array of words */ +extern void linw( + i386_ioport_t port, + int * data, + int count); +/* output an array of words */ +extern void loutw( + i386_ioport_t port, + int * data, + int count); + +/* input an array of bytes */ +extern void linb( + i386_ioport_t port, + char * data, + int count); +/* output an array of bytes */ +extern void loutb( + i386_ioport_t port, + char * data, + int count); + +#if defined(__GNUC__) && (!MACH_ASSERT) +extern __inline__ unsigned long inl( + i386_ioport_t port) +{ + unsigned long datum; + __asm__ volatile("inl %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ unsigned short inw( + i386_ioport_t port) +{ + unsigned short datum; + __asm__ volatile(".byte 0x66; inl %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ unsigned char inb( + i386_ioport_t port) +{ + unsigned char datum; + __asm__ volatile("inb %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ void outl( + i386_ioport_t port, + unsigned long datum) +{ + __asm__ volatile("outl %0, %1" : : "a" (datum), "d" (port)); +} + +extern __inline__ void outw( + i386_ioport_t port, + unsigned short datum) +{ + __asm__ volatile(".byte 0x66; outl %0, %1" : : "a" (datum), "d" (port)); +} + +extern __inline__ void outb( + i386_ioport_t port, + unsigned char datum) +{ + __asm__ volatile("outb %0, %1" : : "a" (datum), "d" (port)); +} +#endif /* defined(__GNUC__) && (!MACH_ASSERT) */ +#endif /* I386_PIO_H */ diff --git a/osfmk/i386/pit.h b/osfmk/i386/pit.h new file mode 100644 index 000000000..ab14da5c1 --- /dev/null +++ b/osfmk/i386/pit.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + Copyright 1988, 1989 by Intel Corporation, Santa Clara, California. + + All Rights Reserved + +Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby +granted, provided that the above copyright notice appears in all +copies and that both the copyright notice and this permission notice +appear in supporting documentation, and that the name of Intel +not be used in advertising or publicity pertaining to distribution +of the software without specific, written prior permission. + +INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, +IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, +NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#include +/* Definitions for 8254 Programmable Interrupt Timer ports on AT 386 */ +#define PITCTR0_PORT 0x40 /* counter 0 port */ +#define PITCTR1_PORT 0x41 /* counter 1 port */ +#define PITCTR2_PORT 0x42 /* counter 2 port */ +#define PITCTL_PORT 0x43 /* PIT control port */ +#define PITAUX_PORT 0x61 /* PIT auxiliary port */ +/* bits used in auxiliary control port for timer 2 */ +#define PITAUX_GATE2 0x01 /* aux port, PIT gate 2 input */ +#define PITAUX_OUT2 0x02 /* aux port, PIT clock out 2 enable */ + +/* Following are used for Timer 0 */ +#define PIT_C0 0x00 /* select counter 0 */ +#define PIT_LOADMODE 0x30 /* load least significant byte followed + * by most significant byte */ +#define PIT_NDIVMODE 0x04 /*divide by N counter */ +#define PIT_SQUAREMODE 0x06 /* square-wave mode */ + +/* Used for Timer 1. Used for delay calculations in countdown mode */ +#define PIT_C1 0x40 /* select counter 1 */ +#define PIT_READMODE 0x30 /* read or load least significant byte + * followed by most significant byte */ +#define PIT_RATEMODE 0x06 /* square-wave mode for USART */ + +/* + * Clock speed for the timer in hz divided by the constant HZ + * (defined in param.h) + */ +#define CLKNUM 1193167 + +#if EXL +/* added micro-timer support. --- csy */ +typedef struct time_latch { + time_t ticks; /* time in HZ since boot */ + time_t uticks; /* time in 1.25 MHZ */ +/* don't need these two for now. --- csy */ +/* time_t secs; \* seconds since boot */ +/* time_t epochsecs; \* seconds since epoch */ + } time_latch; +/* a couple in-line assembly codes for efficiency. */ +asm int intr_disable() +{ + pushfl + cli +} + +asm int intr_restore() +{ + popfl +} + +#endif /* EXL */ diff --git a/osfmk/i386/pmap.c b/osfmk/i386/pmap.c new file mode 100644 index 000000000..a4d2b70b3 --- /dev/null +++ b/osfmk/i386/pmap.c @@ -0,0 +1,2962 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: pmap.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * (These guys wrote the Vax version) + * + * Physical Map management code for Intel i386, i486, and i860. + * + * Manages physical address maps. + * + * In addition to hardware address maps, this + * module is called upon to provide software-use-only + * maps which may or may not be stored in the same + * form as hardware maps. These pseudo-maps are + * used to store intermediate results from copy + * operations to and from address spaces. + * + * Since the information managed by this module is + * also stored by the logical address mapping module, + * this module may throw away valid virtual-to-physical + * mappings at almost any time. However, invalidations + * of virtual-to-physical mappings must be done as + * requested. + * + * In order to cope with hardware architectures which + * make virtual-to-physical map invalidates expensive, + * this module may delay invalidate or reduced protection + * operations until such time as they are actually + * necessary. This module is given full information as + * to which processors are currently using which maps, + * and to when physical maps must be made correct. + */ + +#include + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include /* prototyping */ +#include + +#include + +#if MACH_KDB +#include +#include +#include +#include +#endif /* MACH_KDB */ + +#include + +#if NCPUS > 1 +#include +#endif + +/* + * Forward declarations for internal functions. + */ +void pmap_expand( + pmap_t map, + vm_offset_t v); + +extern void pmap_remove_range( + pmap_t pmap, + vm_offset_t va, + pt_entry_t *spte, + pt_entry_t *epte); + +void phys_attribute_clear( + vm_offset_t phys, + int bits); + +boolean_t phys_attribute_test( + vm_offset_t phys, + int bits); + +void pmap_set_modify(vm_offset_t phys); + +void phys_attribute_set( + vm_offset_t phys, + int bits); + + +#ifndef set_dirbase +void set_dirbase(vm_offset_t dirbase); +#endif /* set_dirbase */ + +#define PA_TO_PTE(pa) (pa_to_pte((pa) - VM_MIN_KERNEL_ADDRESS)) +#define iswired(pte) ((pte) & INTEL_PTE_WIRED) + +pmap_t real_pmap[NCPUS]; + +#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = (pte_entry); +#define WRITE_PTE_FAST(pte_p, pte_entry) *(pte_p) = (pte_entry); + +/* + * Private data structures. + */ + +/* + * For each vm_page_t, there is a list of all currently + * valid virtual mappings of that page. An entry is + * a pv_entry_t; the list is the pv_table. + */ + +typedef struct pv_entry { + struct pv_entry *next; /* next pv_entry */ + pmap_t pmap; /* pmap where mapping lies */ + vm_offset_t va; /* virtual address for mapping */ +} *pv_entry_t; + +#define PV_ENTRY_NULL ((pv_entry_t) 0) + +pv_entry_t pv_head_table; /* array of entries, one per page */ + +/* + * pv_list entries are kept on a list that can only be accessed + * with the pmap system locked (at SPLVM, not in the cpus_active set). + * The list is refilled from the pv_list_zone if it becomes empty. + */ +pv_entry_t pv_free_list; /* free list at SPLVM */ +decl_simple_lock_data(,pv_free_list_lock) + +#define PV_ALLOC(pv_e) { \ + simple_lock(&pv_free_list_lock); \ + if ((pv_e = pv_free_list) != 0) { \ + pv_free_list = pv_e->next; \ + } \ + simple_unlock(&pv_free_list_lock); \ +} + +#define PV_FREE(pv_e) { \ + simple_lock(&pv_free_list_lock); \ + pv_e->next = pv_free_list; \ + pv_free_list = pv_e; \ + simple_unlock(&pv_free_list_lock); \ +} + +zone_t pv_list_zone; /* zone of pv_entry structures */ + +/* + * Each entry in the pv_head_table is locked by a bit in the + * pv_lock_table. The lock bits are accessed by the physical + * address of the page they lock. + */ + +char *pv_lock_table; /* pointer to array of bits */ +#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) + +/* + * First and last physical addresses that we maintain any information + * for. Initialized to zero so that pmap operations done before + * pmap_init won't touch any non-existent structures. + */ +vm_offset_t vm_first_phys = (vm_offset_t) 0; +vm_offset_t vm_last_phys = (vm_offset_t) 0; +boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ + +/* + * Index into pv_head table, its lock bits, and the modify/reference + * bits starting at vm_first_phys. + */ + +#define pa_index(pa) (atop(pa - vm_first_phys)) + +#define pai_to_pvh(pai) (&pv_head_table[pai]) +#define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table) +#define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table) + +/* + * Array of physical page attribites for managed pages. + * One byte per physical page. + */ +char *pmap_phys_attributes; + +/* + * Physical page attributes. Copy bits from PTE definition. + */ +#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */ +#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */ + +/* + * Amount of virtual memory mapped by one + * page-directory entry. + */ +#define PDE_MAPPED_SIZE (pdetova(1)) + +/* + * We allocate page table pages directly from the VM system + * through this object. It maps physical memory. + */ +vm_object_t pmap_object = VM_OBJECT_NULL; + +/* + * Locking and TLB invalidation + */ + +/* + * Locking Protocols: + * + * There are two structures in the pmap module that need locking: + * the pmaps themselves, and the per-page pv_lists (which are locked + * by locking the pv_lock_table entry that corresponds to the pv_head + * for the list in question.) Most routines want to lock a pmap and + * then do operations in it that require pv_list locking -- however + * pmap_remove_all and pmap_copy_on_write operate on a physical page + * basis and want to do the locking in the reverse order, i.e. lock + * a pv_list and then go through all the pmaps referenced by that list. + * To protect against deadlock between these two cases, the pmap_lock + * is used. There are three different locking protocols as a result: + * + * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only + * the pmap. + * + * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read + * lock on the pmap_lock (shared read), then lock the pmap + * and finally the pv_lists as needed [i.e. pmap lock before + * pv_list lock.] + * + * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...) + * Get a write lock on the pmap_lock (exclusive write); this + * also guaranteees exclusive access to the pv_lists. Lock the + * pmaps as needed. + * + * At no time may any routine hold more than one pmap lock or more than + * one pv_list lock. Because interrupt level routines can allocate + * mbufs and cause pmap_enter's, the pmap_lock and the lock on the + * kernel_pmap can only be held at splhigh. + */ + +#if NCPUS > 1 +/* + * We raise the interrupt level to splhigh, to block interprocessor + * interrupts during pmap operations. We must take the CPU out of + * the cpus_active set while interrupts are blocked. + */ +#define SPLVM(spl) { \ + spl = splhigh(); \ + mp_disable_preemption(); \ + i_bit_clear(cpu_number(), &cpus_active); \ + mp_enable_preemption(); \ +} + +#define SPLX(spl) { \ + mp_disable_preemption(); \ + i_bit_set(cpu_number(), &cpus_active); \ + mp_enable_preemption(); \ + splx(spl); \ +} + +/* + * Lock on pmap system + */ +lock_t pmap_system_lock; + +#define PMAP_READ_LOCK(pmap, spl) { \ + SPLVM(spl); \ + lock_read(&pmap_system_lock); \ + simple_lock(&(pmap)->lock); \ +} + +#define PMAP_WRITE_LOCK(spl) { \ + SPLVM(spl); \ + lock_write(&pmap_system_lock); \ +} + +#define PMAP_READ_UNLOCK(pmap, spl) { \ + simple_unlock(&(pmap)->lock); \ + lock_read_done(&pmap_system_lock); \ + SPLX(spl); \ +} + +#define PMAP_WRITE_UNLOCK(spl) { \ + lock_write_done(&pmap_system_lock); \ + SPLX(spl); \ +} + +#define PMAP_WRITE_TO_READ_LOCK(pmap) { \ + simple_lock(&(pmap)->lock); \ + lock_write_to_read(&pmap_system_lock); \ +} + +#define LOCK_PVH(index) lock_pvh_pai(index) + +#define UNLOCK_PVH(index) unlock_pvh_pai(index) + +#define PMAP_FLUSH_TLBS() \ +{ \ + flush_tlb(); \ + i386_signal_cpus(MP_TLB_FLUSH); \ +} + +#define PMAP_RELOAD_TLBS() { \ + i386_signal_cpus(MP_TLB_RELOAD); \ + set_cr3(kernel_pmap->pdirbase); \ +} + +#define PMAP_INVALIDATE_PAGE(map, addr) { \ + if (map == kernel_pmap) \ + invlpg((vm_offset_t) addr); \ + else \ + flush_tlb(); \ + i386_signal_cpus(MP_TLB_FLUSH); \ +} + +#else /* NCPUS > 1 */ + +#if MACH_RT +#define SPLVM(spl) { (spl) = splhigh(); } +#define SPLX(spl) splx (spl) +#else /* MACH_RT */ +#define SPLVM(spl) +#define SPLX(spl) +#endif /* MACH_RT */ + +#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl) +#define PMAP_WRITE_LOCK(spl) SPLVM(spl) +#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl) +#define PMAP_WRITE_UNLOCK(spl) SPLX(spl) +#define PMAP_WRITE_TO_READ_LOCK(pmap) + +#if MACH_RT +#define LOCK_PVH(index) disable_preemption() +#define UNLOCK_PVH(index) enable_preemption() +#else /* MACH_RT */ +#define LOCK_PVH(index) +#define UNLOCK_PVH(index) +#endif /* MACH_RT */ + +#define PMAP_FLUSH_TLBS() flush_tlb() +#define PMAP_RELOAD_TLBS() set_cr3(kernel_pmap->pdirbase) +#define PMAP_INVALIDATE_PAGE(map, addr) { \ + if (map == kernel_pmap) \ + invlpg((vm_offset_t) addr); \ + else \ + flush_tlb(); \ +} + +#endif /* NCPUS > 1 */ + +#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */ + +#if NCPUS > 1 +/* + * Structures to keep track of pending TLB invalidations + */ +cpu_set cpus_active; +cpu_set cpus_idle; +volatile boolean_t cpu_update_needed[NCPUS]; + + +#endif /* NCPUS > 1 */ + +/* + * Other useful macros. + */ +#define current_pmap() (vm_map_pmap(current_act()->map)) +#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0) + +struct pmap kernel_pmap_store; +pmap_t kernel_pmap; + +struct zone *pmap_zone; /* zone of pmap structures */ + +int pmap_debug = 0; /* flag for debugging prints */ +int ptes_per_vm_page; /* number of hardware ptes needed + to map one VM page. */ +unsigned int inuse_ptepages_count = 0; /* debugging */ + +/* + * Pmap cache. Cache is threaded through ref_count field of pmap. + * Max will eventually be constant -- variable for experimentation. + */ +int pmap_cache_max = 32; +int pmap_alloc_chunk = 8; +pmap_t pmap_cache_list; +int pmap_cache_count; +decl_simple_lock_data(,pmap_cache_lock) + +extern vm_offset_t hole_start, hole_end; + +extern char end; + +/* + * Page directory for kernel. + */ +pt_entry_t *kpde = 0; /* set by start.s - keep out of bss */ + +#if DEBUG_ALIAS +#define PMAP_ALIAS_MAX 32 +struct pmap_alias { + vm_offset_t rpc; + pmap_t pmap; + vm_offset_t va; + int cookie; +#define PMAP_ALIAS_COOKIE 0xdeadbeef +} pmap_aliasbuf[PMAP_ALIAS_MAX]; +int pmap_alias_index = 0; +extern vm_offset_t get_rpc(); + +#endif /* DEBUG_ALIAS */ + +/* + * Given an offset and a map, compute the address of the + * pte. If the address is invalid with respect to the map + * then PT_ENTRY_NULL is returned (and the map may need to grow). + * + * This is only used in machine-dependent code. + */ + +pt_entry_t * +pmap_pte( + register pmap_t pmap, + register vm_offset_t addr) +{ + register pt_entry_t *ptp; + register pt_entry_t pte; + + pte = pmap->dirbase[pdenum(pmap, addr)]; + if ((pte & INTEL_PTE_VALID) == 0) + return(PT_ENTRY_NULL); + ptp = (pt_entry_t *)ptetokv(pte); + return(&ptp[ptenum(addr)]); + +} + +#define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(pmap, addr)]) + +#define DEBUG_PTE_PAGE 0 + +#if DEBUG_PTE_PAGE +void +ptep_check( + ptep_t ptep) +{ + register pt_entry_t *pte, *epte; + int ctu, ctw; + + /* check the use and wired counts */ + if (ptep == PTE_PAGE_NULL) + return; + pte = pmap_pte(ptep->pmap, ptep->va); + epte = pte + INTEL_PGBYTES/sizeof(pt_entry_t); + ctu = 0; + ctw = 0; + while (pte < epte) { + if (pte->pfn != 0) { + ctu++; + if (pte->wired) + ctw++; + } + pte += ptes_per_vm_page; + } + + if (ctu != ptep->use_count || ctw != ptep->wired_count) { + printf("use %d wired %d - actual use %d wired %d\n", + ptep->use_count, ptep->wired_count, ctu, ctw); + panic("pte count"); + } +} +#endif /* DEBUG_PTE_PAGE */ + +/* + * Map memory at initialization. The physical addresses being + * mapped are not managed and are never unmapped. + * + * For now, VM is already on, we only need to map the + * specified memory. + */ +vm_offset_t +pmap_map( + register vm_offset_t virt, + register vm_offset_t start, + register vm_offset_t end, + register vm_prot_t prot) +{ + register int ps; + + ps = PAGE_SIZE; + while (start < end) { + pmap_enter(kernel_pmap, virt, start, prot, FALSE); + virt += ps; + start += ps; + } + return(virt); +} + +/* + * Back-door routine for mapping kernel VM at initialization. + * Useful for mapping memory outside the range + * Sets no-cache, A, D. + * [vm_first_phys, vm_last_phys) (i.e., devices). + * Otherwise like pmap_map. + */ +vm_offset_t +pmap_map_bd( + register vm_offset_t virt, + register vm_offset_t start, + register vm_offset_t end, + vm_prot_t prot) +{ + register pt_entry_t template; + register pt_entry_t *pte; + + template = pa_to_pte(start) + | INTEL_PTE_NCACHE + | INTEL_PTE_REF + | INTEL_PTE_MOD + | INTEL_PTE_WIRED + | INTEL_PTE_VALID; + if (prot & VM_PROT_WRITE) + template |= INTEL_PTE_WRITE; + + while (start < end) { + pte = pmap_pte(kernel_pmap, virt); + if (pte == PT_ENTRY_NULL) + panic("pmap_map_bd: Invalid kernel address\n"); + WRITE_PTE_FAST(pte, template) + pte_increment_pa(template); + virt += PAGE_SIZE; + start += PAGE_SIZE; + } + + PMAP_FLUSH_TLBS(); + + return(virt); +} + +extern int cnvmem; +extern char *first_avail; +extern vm_offset_t virtual_avail, virtual_end; +extern vm_offset_t avail_start, avail_end, avail_next; + +/* + * Bootstrap the system enough to run with virtual memory. + * Map the kernel's code and data, and allocate the system page table. + * Called with mapping OFF. Page_size must already be set. + * + * Parameters: + * load_start: PA where kernel was loaded + * avail_start PA of first available physical page - + * after kernel page tables + * avail_end PA of last available physical page + * virtual_avail VA of first available page - + * after kernel page tables + * virtual_end VA of last available page - + * end of kernel address space + * + * &start_text start of kernel text + * &etext end of kernel text + */ + +void +pmap_bootstrap( + vm_offset_t load_start) +{ + vm_offset_t va, tva, paddr; + pt_entry_t template; + pt_entry_t *pde, *pte, *ptend; + vm_size_t morevm; /* VM space for kernel map */ + + /* + * Set ptes_per_vm_page for general use. + */ + ptes_per_vm_page = PAGE_SIZE / INTEL_PGBYTES; + + /* + * The kernel's pmap is statically allocated so we don't + * have to use pmap_create, which is unlikely to work + * correctly at this part of the boot sequence. + */ + + kernel_pmap = &kernel_pmap_store; + +#if NCPUS > 1 + lock_init(&pmap_system_lock, + FALSE, /* NOT a sleep lock */ + ETAP_VM_PMAP_SYS, + ETAP_VM_PMAP_SYS_I); +#endif /* NCPUS > 1 */ + + simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL); + simple_lock_init(&pv_free_list_lock, ETAP_VM_PMAP_FREE); + + kernel_pmap->ref_count = 1; + + /* + * The kernel page directory has been allocated; + * its virtual address is in kpde. + * + * Enough kernel page table pages have been allocated + * to map low system memory, kernel text, kernel data/bss, + * kdb's symbols, and the page directory and page tables. + * + * No other physical memory has been allocated. + */ + + /* + * Start mapping virtual memory to physical memory, 1-1, + * at end of mapped memory. + */ + + virtual_avail = phystokv(avail_start); + virtual_end = phystokv(avail_end); + + pde = kpde; + pde += pdenum(kernel_pmap, virtual_avail); + + if (pte_to_pa(*pde) == 0) { + /* This pte has not been allocated */ + pte = 0; ptend = 0; + } + else { + pte = (pt_entry_t *)ptetokv(*pde); + /* first pte of page */ + ptend = pte+NPTES; /* last pte of page */ + pte += ptenum(virtual_avail); /* point to pte that + maps first avail VA */ + pde++; /* point pde to first empty slot */ + } + + template = pa_to_pte(avail_start) + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; + + for (va = virtual_avail; va < virtual_end; va += INTEL_PGBYTES) { + if (pte >= ptend) { + pte = (pt_entry_t *)phystokv(virtual_avail); + ptend = pte + NPTES; + virtual_avail = (vm_offset_t)ptend; + if (virtual_avail == hole_start) + virtual_avail = hole_end; + *pde = PA_TO_PTE((vm_offset_t) pte) + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; + pde++; + } + WRITE_PTE_FAST(pte, template) + pte++; + pte_increment_pa(template); + } + + avail_start = virtual_avail - VM_MIN_KERNEL_ADDRESS; + avail_next = avail_start; + + /* + * Figure out maximum kernel address. + * Kernel virtual space is: + * - at least three times physical memory + * - at least VM_MIN_KERNEL_ADDRESS + * - limited by VM_MAX_KERNEL_ADDRESS + */ + + morevm = 3*avail_end; + if (virtual_end + morevm > VM_MAX_KERNEL_ADDRESS) + morevm = VM_MAX_KERNEL_ADDRESS - virtual_end + 1; + +/* + * startup requires additional virtual memory (for tables, buffers, + * etc.). The kd driver may also require some of that memory to + * access the graphics board. + * + */ + *(int *)&template = 0; + + /* + * Leave room for kernel-loaded servers, which have been linked at + * addresses from VM_MIN_KERNEL_LOADED_ADDRESS to + * VM_MAX_KERNEL_LOADED_ADDRESS. + */ + if (virtual_end + morevm < VM_MAX_KERNEL_LOADED_ADDRESS + 1) + morevm = VM_MAX_KERNEL_LOADED_ADDRESS + 1 - virtual_end; + + + virtual_end += morevm; + for (tva = va; tva < virtual_end; tva += INTEL_PGBYTES) { + if (pte >= ptend) { + pmap_next_page(&paddr); + pte = (pt_entry_t *)phystokv(paddr); + ptend = pte + NPTES; + *pde = PA_TO_PTE((vm_offset_t) pte) + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; + pde++; + } + WRITE_PTE_FAST(pte, template) + pte++; + } + + virtual_avail = va; + + /* Push the virtual avail address above hole_end */ + if (virtual_avail < hole_end) + virtual_avail = hole_end; + + /* + * c.f. comment above + * + */ + virtual_end = va + morevm; + while (pte < ptend) + *pte++ = 0; + + /* + * invalidate user virtual addresses + */ + memset((char *)kpde, + 0, + pdenum(kernel_pmap,VM_MIN_KERNEL_ADDRESS)*sizeof(pt_entry_t)); + kernel_pmap->dirbase = kpde; + printf("Kernel virtual space from 0x%x to 0x%x.\n", + VM_MIN_KERNEL_ADDRESS, virtual_end); + + avail_start = avail_next; + printf("Available physical space from 0x%x to 0x%x\n", + avail_start, avail_end); + + kernel_pmap->pdirbase = kvtophys((vm_offset_t)kernel_pmap->dirbase); + +} + +void +pmap_virtual_space( + vm_offset_t *startp, + vm_offset_t *endp) +{ + *startp = virtual_avail; + *endp = virtual_end; +} + +/* + * Initialize the pmap module. + * Called by vm_init, to initialize any structures that the pmap + * system needs to map virtual memory. + */ +void +pmap_init(void) +{ + register long npages; + vm_offset_t addr; + register vm_size_t s; + int i; + + /* + * Allocate memory for the pv_head_table and its lock bits, + * the modify bit array, and the pte_page table. + */ + + npages = atop(avail_end - avail_start); + s = (vm_size_t) (sizeof(struct pv_entry) * npages + + pv_lock_table_size(npages) + + npages); + + s = round_page(s); + if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS) + panic("pmap_init"); + + memset((char *)addr, 0, s); + + /* + * Allocate the structures first to preserve word-alignment. + */ + pv_head_table = (pv_entry_t) addr; + addr = (vm_offset_t) (pv_head_table + npages); + + pv_lock_table = (char *) addr; + addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages)); + + pmap_phys_attributes = (char *) addr; + + /* + * Create the zone of physical maps, + * and of the physical-to-virtual entries. + */ + s = (vm_size_t) sizeof(struct pmap); + pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */ + s = (vm_size_t) sizeof(struct pv_entry); + pv_list_zone = zinit(s, 10000*s, 4096, "pv_list"); /* XXX */ + + /* + * Only now, when all of the data structures are allocated, + * can we set vm_first_phys and vm_last_phys. If we set them + * too soon, the kmem_alloc_wired above will try to use these + * data structures and blow up. + */ + + vm_first_phys = avail_start; + vm_last_phys = avail_end; + pmap_initialized = TRUE; + + /* + * Initializie pmap cache. + */ + pmap_cache_list = PMAP_NULL; + pmap_cache_count = 0; + simple_lock_init(&pmap_cache_lock, ETAP_VM_PMAP_CACHE); +} + + +#define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end)) + + +#define valid_page(x) (pmap_initialized && pmap_valid_page(x)) + +boolean_t +pmap_verify_free( + vm_offset_t phys) +{ + pv_entry_t pv_h; + int pai; + spl_t spl; + boolean_t result; + + assert(phys != vm_page_fictitious_addr); + if (!pmap_initialized) + return(TRUE); + + if (!pmap_valid_page(phys)) + return(FALSE); + + PMAP_WRITE_LOCK(spl); + + pai = pa_index(phys); + pv_h = pai_to_pvh(pai); + + result = (pv_h->pmap == PMAP_NULL); + PMAP_WRITE_UNLOCK(spl); + + return(result); +} + +/* + * Create and return a physical map. + * + * If the size specified for the map + * is zero, the map is an actual physical + * map, and may be referenced by the + * hardware. + * + * If the size specified is non-zero, + * the map will be used in software only, and + * is bounded by that size. + */ +pmap_t +pmap_create( + vm_size_t size) +{ + register pmap_t p; + register pmap_statistics_t stats; + + /* + * A software use-only map doesn't even need a map. + */ + + if (size != 0) { + return(PMAP_NULL); + } + + /* + * Try to get cached pmap, if this fails, + * allocate a pmap struct from the pmap_zone. Then allocate + * the page descriptor table from the pd_zone. + */ + + simple_lock(&pmap_cache_lock); + while ((p = pmap_cache_list) == PMAP_NULL) { + + vm_offset_t dirbases; + register int i; + + simple_unlock(&pmap_cache_lock); + +#if NCPUS > 1 + /* + * XXX NEEDS MP DOING ALLOC logic so that if multiple processors + * XXX get here, only one allocates a chunk of pmaps. + * (for now we'll just let it go - safe but wasteful) + */ +#endif + + /* + * Allocate a chunck of pmaps. Single kmem_alloc_wired + * operation reduces kernel map fragmentation. + */ + + if (kmem_alloc_wired(kernel_map, &dirbases, + pmap_alloc_chunk * INTEL_PGBYTES) + != KERN_SUCCESS) + panic("pmap_create.1"); + + for (i = pmap_alloc_chunk; i > 0 ; i--) { + p = (pmap_t) zalloc(pmap_zone); + if (p == PMAP_NULL) + panic("pmap_create.2"); + + /* + * Initialize pmap. Don't bother with + * ref count as cache list is threaded + * through it. It'll be set on cache removal. + */ + p->dirbase = (pt_entry_t *) dirbases; + dirbases += INTEL_PGBYTES; + memcpy(p->dirbase, kpde, INTEL_PGBYTES); + p->pdirbase = kvtophys((vm_offset_t)p->dirbase); + + simple_lock_init(&p->lock, ETAP_VM_PMAP); + p->cpus_using = 0; + + /* + * Initialize statistics. + */ + stats = &p->stats; + stats->resident_count = 0; + stats->wired_count = 0; + + /* + * Insert into cache + */ + simple_lock(&pmap_cache_lock); + p->ref_count = (int) pmap_cache_list; + pmap_cache_list = p; + pmap_cache_count++; + simple_unlock(&pmap_cache_lock); + } + simple_lock(&pmap_cache_lock); + } + + assert(p->stats.resident_count == 0); + assert(p->stats.wired_count == 0); + p->stats.resident_count = 0; + p->stats.wired_count = 0; + + pmap_cache_list = (pmap_t) p->ref_count; + p->ref_count = 1; + pmap_cache_count--; + simple_unlock(&pmap_cache_lock); + + return(p); +} + +/* + * Retire the given physical map from service. + * Should only be called if the map contains + * no valid mappings. + */ + +void +pmap_destroy( + register pmap_t p) +{ + register pt_entry_t *pdep; + register vm_offset_t pa; + register int c; + spl_t s; + register vm_page_t m; + + if (p == PMAP_NULL) + return; + + SPLVM(s); + simple_lock(&p->lock); + c = --p->ref_count; + if (c == 0) { + register int my_cpu; + + mp_disable_preemption(); + my_cpu = cpu_number(); + + /* + * If some cpu is not using the physical pmap pointer that it + * is supposed to be (see set_dirbase), we might be using the + * pmap that is being destroyed! Make sure we are + * physically on the right pmap: + */ + + + if (real_pmap[my_cpu] == p) { + PMAP_CPU_CLR(p, my_cpu); + real_pmap[my_cpu] = kernel_pmap; + PMAP_RELOAD_TLBS(); + } + mp_enable_preemption(); + } + simple_unlock(&p->lock); + SPLX(s); + + if (c != 0) { + return; /* still in use */ + } + + /* + * Free the memory maps, then the + * pmap structure. + */ + pdep = p->dirbase; + while (pdep < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]) { + if (*pdep & INTEL_PTE_VALID) { + pa = pte_to_pa(*pdep); + vm_object_lock(pmap_object); + m = vm_page_lookup(pmap_object, pa); + if (m == VM_PAGE_NULL) + panic("pmap_destroy: pte page not in object"); + vm_page_lock_queues(); + vm_page_free(m); + inuse_ptepages_count--; + vm_object_unlock(pmap_object); + vm_page_unlock_queues(); + + /* + * Clear pdes, this might be headed for the cache. + */ + c = ptes_per_vm_page; + do { + *pdep = 0; + pdep++; + } while (--c > 0); + } + else { + pdep += ptes_per_vm_page; + } + + } + assert(p->stats.resident_count == 0); + assert(p->stats.wired_count == 0); + + /* + * Add to cache if not already full + */ + simple_lock(&pmap_cache_lock); + if (pmap_cache_count <= pmap_cache_max) { + p->ref_count = (int) pmap_cache_list; + pmap_cache_list = p; + pmap_cache_count++; + simple_unlock(&pmap_cache_lock); + } + else { + simple_unlock(&pmap_cache_lock); + kmem_free(kernel_map, (vm_offset_t)p->dirbase, INTEL_PGBYTES); + zfree(pmap_zone, (vm_offset_t) p); + } +} + +/* + * Add a reference to the specified pmap. + */ + +void +pmap_reference( + register pmap_t p) +{ + spl_t s; + + if (p != PMAP_NULL) { + SPLVM(s); + simple_lock(&p->lock); + p->ref_count++; + simple_unlock(&p->lock); + SPLX(s); + } +} + +/* + * Remove a range of hardware page-table entries. + * The entries given are the first (inclusive) + * and last (exclusive) entries for the VM pages. + * The virtual address is the va for the first pte. + * + * The pmap must be locked. + * If the pmap is not the kernel pmap, the range must lie + * entirely within one pte-page. This is NOT checked. + * Assumes that the pte-page exists. + */ + +/* static */ +void +pmap_remove_range( + pmap_t pmap, + vm_offset_t va, + pt_entry_t *spte, + pt_entry_t *epte) +{ + register pt_entry_t *cpte; + int num_removed, num_unwired; + int pai; + vm_offset_t pa; + +#if DEBUG_PTE_PAGE + if (pmap != kernel_pmap) + ptep_check(get_pte_page(spte)); +#endif /* DEBUG_PTE_PAGE */ + num_removed = 0; + num_unwired = 0; + + for (cpte = spte; cpte < epte; + cpte += ptes_per_vm_page, va += PAGE_SIZE) { + + pa = pte_to_pa(*cpte); + if (pa == 0) + continue; + + num_removed++; + if (iswired(*cpte)) + num_unwired++; + + if (!valid_page(pa)) { + + /* + * Outside range of managed physical memory. + * Just remove the mappings. + */ + register int i = ptes_per_vm_page; + register pt_entry_t *lpte = cpte; + do { + *lpte = 0; + lpte++; + } while (--i > 0); + continue; + } + + pai = pa_index(pa); + LOCK_PVH(pai); + + /* + * Get the modify and reference bits. + */ + { + register int i; + register pt_entry_t *lpte; + + i = ptes_per_vm_page; + lpte = cpte; + do { + pmap_phys_attributes[pai] |= + *lpte & (PHYS_MODIFIED|PHYS_REFERENCED); + *lpte = 0; + lpte++; + } while (--i > 0); + } + + /* + * Remove the mapping from the pvlist for + * this physical page. + */ + { + register pv_entry_t pv_h, prev, cur; + + pv_h = pai_to_pvh(pai); + if (pv_h->pmap == PMAP_NULL) { + panic("pmap_remove: null pv_list!"); + } + if (pv_h->va == va && pv_h->pmap == pmap) { + /* + * Header is the pv_entry. Copy the next one + * to header and free the next one (we cannot + * free the header) + */ + cur = pv_h->next; + if (cur != PV_ENTRY_NULL) { + *pv_h = *cur; + PV_FREE(cur); + } + else { + pv_h->pmap = PMAP_NULL; + } + } + else { + cur = pv_h; + do { + prev = cur; + if ((cur = prev->next) == PV_ENTRY_NULL) { + panic("pmap-remove: mapping not in pv_list!"); + } + } while (cur->va != va || cur->pmap != pmap); + prev->next = cur->next; + PV_FREE(cur); + } + UNLOCK_PVH(pai); + } + } + + /* + * Update the counts + */ + assert(pmap->stats.resident_count >= num_removed); + pmap->stats.resident_count -= num_removed; + assert(pmap->stats.wired_count >= num_unwired); + pmap->stats.wired_count -= num_unwired; +} + +/* + * Remove the given range of addresses + * from the specified map. + * + * It is assumed that the start and end are properly + * rounded to the hardware page size. + */ + +void +pmap_remove( + pmap_t map, + vm_offset_t s, + vm_offset_t e) +{ + spl_t spl; + register pt_entry_t *pde; + register pt_entry_t *spte, *epte; + vm_offset_t l; + + if (map == PMAP_NULL) + return; + + PMAP_READ_LOCK(map, spl); + + pde = pmap_pde(map, s); + + while (s < e) { + l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1); + if (l > e) + l = e; + if (*pde & INTEL_PTE_VALID) { + spte = (pt_entry_t *)ptetokv(*pde); + spte = &spte[ptenum(s)]; + epte = &spte[intel_btop(l-s)]; + pmap_remove_range(map, s, spte, epte); + } + s = l; + pde++; + } + + PMAP_FLUSH_TLBS(); + + PMAP_READ_UNLOCK(map, spl); +} + +/* + * Routine: pmap_page_protect + * + * Function: + * Lower the permission for all mappings to a given + * page. + */ +void +pmap_page_protect( + vm_offset_t phys, + vm_prot_t prot) +{ + pv_entry_t pv_h, prev; + register pv_entry_t pv_e; + register pt_entry_t *pte; + int pai; + register pmap_t pmap; + spl_t spl; + boolean_t remove; + + assert(phys != vm_page_fictitious_addr); + if (!valid_page(phys)) { + /* + * Not a managed page. + */ + return; + } + + /* + * Determine the new protection. + */ + switch (prot) { + case VM_PROT_READ: + case VM_PROT_READ|VM_PROT_EXECUTE: + remove = FALSE; + break; + case VM_PROT_ALL: + return; /* nothing to do */ + default: + remove = TRUE; + break; + } + + /* + * Lock the pmap system first, since we will be changing + * several pmaps. + */ + + PMAP_WRITE_LOCK(spl); + + pai = pa_index(phys); + pv_h = pai_to_pvh(pai); + + /* + * Walk down PV list, changing or removing all mappings. + * We do not have to lock the pv_list because we have + * the entire pmap system locked. + */ + if (pv_h->pmap != PMAP_NULL) { + + prev = pv_e = pv_h; + do { + pmap = pv_e->pmap; + /* + * Lock the pmap to block pmap_extract and similar routines. + */ + simple_lock(&pmap->lock); + + { + register vm_offset_t va; + + va = pv_e->va; + pte = pmap_pte(pmap, va); + + /* + * Consistency checks. + */ + /* assert(*pte & INTEL_PTE_VALID); XXX */ + /* assert(pte_to_phys(*pte) == phys); */ + + /* + * Invalidate TLBs for all CPUs using this mapping. + */ + PMAP_INVALIDATE_PAGE(pmap, va); + } + + /* + * Remove the mapping if new protection is NONE + * or if write-protecting a kernel mapping. + */ + if (remove || pmap == kernel_pmap) { + /* + * Remove the mapping, collecting any modify bits. + */ + if (iswired(*pte)) + panic("pmap_remove_all removing a wired page"); + + { + register int i = ptes_per_vm_page; + + do { + pmap_phys_attributes[pai] |= + *pte & (PHYS_MODIFIED|PHYS_REFERENCED); + *pte++ = 0; + } while (--i > 0); + } + + assert(pmap->stats.resident_count >= 1); + pmap->stats.resident_count--; + + /* + * Remove the pv_entry. + */ + if (pv_e == pv_h) { + /* + * Fix up head later. + */ + pv_h->pmap = PMAP_NULL; + } + else { + /* + * Delete this entry. + */ + prev->next = pv_e->next; + PV_FREE(pv_e); + } + } + else { + /* + * Write-protect. + */ + register int i = ptes_per_vm_page; + + do { + *pte &= ~INTEL_PTE_WRITE; + pte++; + } while (--i > 0); + + /* + * Advance prev. + */ + prev = pv_e; + } + + simple_unlock(&pmap->lock); + + } while ((pv_e = prev->next) != PV_ENTRY_NULL); + + /* + * If pv_head mapping was removed, fix it up. + */ + if (pv_h->pmap == PMAP_NULL) { + pv_e = pv_h->next; + if (pv_e != PV_ENTRY_NULL) { + *pv_h = *pv_e; + PV_FREE(pv_e); + } + } + } + + PMAP_WRITE_UNLOCK(spl); +} + +/* + * Set the physical protection on the + * specified range of this map as requested. + * Will not increase permissions. + */ +void +pmap_protect( + pmap_t map, + vm_offset_t s, + vm_offset_t e, + vm_prot_t prot) +{ + register pt_entry_t *pde; + register pt_entry_t *spte, *epte; + vm_offset_t l; + spl_t spl; + + + if (map == PMAP_NULL) + return; + + /* + * Determine the new protection. + */ + switch (prot) { + case VM_PROT_READ: + case VM_PROT_READ|VM_PROT_EXECUTE: + break; + case VM_PROT_READ|VM_PROT_WRITE: + case VM_PROT_ALL: + return; /* nothing to do */ + default: + pmap_remove(map, s, e); + return; + } + + /* + * If write-protecting in the kernel pmap, + * remove the mappings; the i386 ignores + * the write-permission bit in kernel mode. + * + * XXX should be #if'd for i386 + */ + + if (cpuid_family == CPUID_FAMILY_386) + if (map == kernel_pmap) { + pmap_remove(map, s, e); + return; + } + + SPLVM(spl); + simple_lock(&map->lock); + + + pde = pmap_pde(map, s); + while (s < e) { + l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1); + if (l > e) + l = e; + if (*pde & INTEL_PTE_VALID) { + spte = (pt_entry_t *)ptetokv(*pde); + spte = &spte[ptenum(s)]; + epte = &spte[intel_btop(l-s)]; + + while (spte < epte) { + if (*spte & INTEL_PTE_VALID) + *spte &= ~INTEL_PTE_WRITE; + spte++; + } + } + s = l; + pde++; + } + + PMAP_FLUSH_TLBS(); + + simple_unlock(&map->lock); + SPLX(spl); +} + + + +/* + * Insert the given physical page (p) at + * the specified virtual address (v) in the + * target physical map with the protection requested. + * + * If specified, the page will be wired down, meaning + * that the related pte cannot be reclaimed. + * + * NB: This is the only routine which MAY NOT lazy-evaluate + * or lose information. That is, this routine must actually + * insert this page into the given map NOW. + */ +void +pmap_enter( + register pmap_t pmap, + vm_offset_t v, + register vm_offset_t pa, + vm_prot_t prot, + boolean_t wired) +{ + register pt_entry_t *pte; + register pv_entry_t pv_h; + register int i, pai; + pv_entry_t pv_e; + pt_entry_t template; + spl_t spl; + vm_offset_t old_pa; + + XPR(0x80000000, "%x/%x: pmap_enter %x/%x/%x\n", + current_thread()->top_act, + current_thread(), + pmap, v, pa); + + assert(pa != vm_page_fictitious_addr); + if (pmap_debug) + printf("pmap(%x, %x)\n", v, pa); + if (pmap == PMAP_NULL) + return; + + if (cpuid_family == CPUID_FAMILY_386) + if (pmap == kernel_pmap && (prot & VM_PROT_WRITE) == 0 + && !wired /* hack for io_wire */ ) { + /* + * Because the 386 ignores write protection in kernel mode, + * we cannot enter a read-only kernel mapping, and must + * remove an existing mapping if changing it. + * + * XXX should be #if'd for i386 + */ + PMAP_READ_LOCK(pmap, spl); + + pte = pmap_pte(pmap, v); + if (pte != PT_ENTRY_NULL && pte_to_pa(*pte) != 0) { + /* + * Invalidate the translation buffer, + * then remove the mapping. + */ + PMAP_INVALIDATE_PAGE(pmap, v); + pmap_remove_range(pmap, v, pte, + pte + ptes_per_vm_page); + } + PMAP_READ_UNLOCK(pmap, spl); + return; + } + + /* + * Must allocate a new pvlist entry while we're unlocked; + * zalloc may cause pageout (which will lock the pmap system). + * If we determine we need a pvlist entry, we will unlock + * and allocate one. Then we will retry, throughing away + * the allocated entry later (if we no longer need it). + */ + pv_e = PV_ENTRY_NULL; +Retry: + PMAP_READ_LOCK(pmap, spl); + + /* + * Expand pmap to include this pte. Assume that + * pmap is always expanded to include enough hardware + * pages to map one VM page. + */ + + while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) { + /* + * Must unlock to expand the pmap. + */ + PMAP_READ_UNLOCK(pmap, spl); + + pmap_expand(pmap, v); + + PMAP_READ_LOCK(pmap, spl); + } + /* + * Special case if the physical page is already mapped + * at this address. + */ + old_pa = pte_to_pa(*pte); + if (old_pa == pa) { + /* + * May be changing its wired attribute or protection + */ + + template = pa_to_pte(pa) | INTEL_PTE_VALID; + if (pmap != kernel_pmap) + template |= INTEL_PTE_USER; + if (prot & VM_PROT_WRITE) + template |= INTEL_PTE_WRITE; + if (wired) { + template |= INTEL_PTE_WIRED; + if (!iswired(*pte)) + pmap->stats.wired_count++; + } + else { + if (iswired(*pte)) { + assert(pmap->stats.wired_count >= 1); + pmap->stats.wired_count--; + } + } + + PMAP_INVALIDATE_PAGE(pmap, v); + + i = ptes_per_vm_page; + do { + if (*pte & INTEL_PTE_MOD) + template |= INTEL_PTE_MOD; + WRITE_PTE(pte, template) + pte++; + pte_increment_pa(template); + } while (--i > 0); + + goto Done; + } + + /* + * Outline of code from here: + * 1) If va was mapped, update TLBs, remove the mapping + * and remove old pvlist entry. + * 2) Add pvlist entry for new mapping + * 3) Enter new mapping. + * + * SHARING_FAULTS complicates this slightly in that it cannot + * replace the mapping, but must remove it (because adding the + * pvlist entry for the new mapping may remove others), and + * hence always enters the new mapping at step 3) + * + * If the old physical page is not managed step 1) is skipped + * (except for updating the TLBs), and the mapping is + * overwritten at step 3). If the new physical page is not + * managed, step 2) is skipped. + */ + + if (old_pa != (vm_offset_t) 0) { + + PMAP_INVALIDATE_PAGE(pmap, v); + +#if DEBUG_PTE_PAGE + if (pmap != kernel_pmap) + ptep_check(get_pte_page(pte)); +#endif /* DEBUG_PTE_PAGE */ + + /* + * Don't do anything to pages outside valid memory here. + * Instead convince the code that enters a new mapping + * to overwrite the old one. + */ + + if (valid_page(old_pa)) { + + pai = pa_index(old_pa); + LOCK_PVH(pai); + + assert(pmap->stats.resident_count >= 1); + pmap->stats.resident_count--; + if (iswired(*pte)) { + assert(pmap->stats.wired_count >= 1); + pmap->stats.wired_count--; + } + i = ptes_per_vm_page; + do { + pmap_phys_attributes[pai] |= + *pte & (PHYS_MODIFIED|PHYS_REFERENCED); + WRITE_PTE(pte, 0) + pte++; + pte_increment_pa(template); + } while (--i > 0); + + /* + * Put pte back to beginning of page since it'll be + * used later to enter the new page. + */ + pte -= ptes_per_vm_page; + + /* + * Remove the mapping from the pvlist for + * this physical page. + */ + { + register pv_entry_t prev, cur; + + pv_h = pai_to_pvh(pai); + if (pv_h->pmap == PMAP_NULL) { + panic("pmap_enter: null pv_list!"); + } + if (pv_h->va == v && pv_h->pmap == pmap) { + /* + * Header is the pv_entry. Copy the next one + * to header and free the next one (we cannot + * free the header) + */ + cur = pv_h->next; + if (cur != PV_ENTRY_NULL) { + *pv_h = *cur; + pv_e = cur; + } + else { + pv_h->pmap = PMAP_NULL; + } + } + else { + cur = pv_h; + do { + prev = cur; + if ((cur = prev->next) == PV_ENTRY_NULL) { + panic("pmap_enter: mapping not in pv_list!"); + } + } while (cur->va != v || cur->pmap != pmap); + prev->next = cur->next; + pv_e = cur; + } + } + UNLOCK_PVH(pai); + } + else { + + /* + * old_pa is not managed. Pretend it's zero so code + * at Step 3) will enter new mapping (overwriting old + * one). Do removal part of accounting. + */ + old_pa = (vm_offset_t) 0; + assert(pmap->stats.resident_count >= 1); + pmap->stats.resident_count--; + if (iswired(*pte)) { + assert(pmap->stats.wired_count >= 1); + pmap->stats.wired_count--; + } + } + } + + if (valid_page(pa)) { + + /* + * Step 2) Enter the mapping in the PV list for this + * physical page. + */ + + pai = pa_index(pa); + + +#if SHARING_FAULTS +RetryPvList: + /* + * We can return here from the sharing fault code below + * in case we removed the only entry on the pv list and thus + * must enter the new one in the list header. + */ +#endif /* SHARING_FAULTS */ + LOCK_PVH(pai); + pv_h = pai_to_pvh(pai); + + if (pv_h->pmap == PMAP_NULL) { + /* + * No mappings yet + */ + pv_h->va = v; + pv_h->pmap = pmap; + pv_h->next = PV_ENTRY_NULL; + } + else { +#if DEBUG + { + /* + * check that this mapping is not already there + * or there is no alias for this mapping in the same map + */ + pv_entry_t e = pv_h; + while (e != PV_ENTRY_NULL) { + if (e->pmap == pmap && e->va == v) + panic("pmap_enter: already in pv_list"); + e = e->next; + } + } +#endif /* DEBUG */ +#if SHARING_FAULTS + { + /* + * do sharing faults. + * if we find an entry on this pv list in the same address + * space, remove it. we know there will not be more + * than one. + */ + pv_entry_t e = pv_h; + pt_entry_t *opte; + + while (e != PV_ENTRY_NULL) { + if (e->pmap == pmap) { + /* + * Remove it, drop pv list lock first. + */ + UNLOCK_PVH(pai); + + opte = pmap_pte(pmap, e->va); + assert(opte != PT_ENTRY_NULL); + /* + * Invalidate the translation buffer, + * then remove the mapping. + */ + PMAP_INVALIDATE_PAGE(pmap, e->va); + pmap_remove_range(pmap, e->va, opte, + opte + ptes_per_vm_page); + /* + * We could have remove the head entry, + * so there could be no more entries + * and so we have to use the pv head entry. + * so, go back to the top and try the entry + * again. + */ + goto RetryPvList; + } + e = e->next; + } + + /* + * check that this mapping is not already there + */ + e = pv_h; + while (e != PV_ENTRY_NULL) { + if (e->pmap == pmap) + panic("pmap_enter: alias in pv_list"); + e = e->next; + } + } +#endif /* SHARING_FAULTS */ +#if DEBUG_ALIAS + { + /* + * check for aliases within the same address space. + */ + pv_entry_t e = pv_h; + vm_offset_t rpc = get_rpc(); + + while (e != PV_ENTRY_NULL) { + if (e->pmap == pmap) { + /* + * log this entry in the alias ring buffer + * if it's not there already. + */ + struct pmap_alias *pma; + int ii, logit; + + logit = TRUE; + for (ii = 0; ii < pmap_alias_index; ii++) { + if (pmap_aliasbuf[ii].rpc == rpc) { + /* found it in the log already */ + logit = FALSE; + break; + } + } + if (logit) { + pma = &pmap_aliasbuf[pmap_alias_index]; + pma->pmap = pmap; + pma->va = v; + pma->rpc = rpc; + pma->cookie = PMAP_ALIAS_COOKIE; + if (++pmap_alias_index >= PMAP_ALIAS_MAX) + panic("pmap_enter: exhausted alias log"); + } + } + e = e->next; + } + } +#endif /* DEBUG_ALIAS */ + /* + * Add new pv_entry after header. + */ + if (pv_e == PV_ENTRY_NULL) { + PV_ALLOC(pv_e); + if (pv_e == PV_ENTRY_NULL) { + UNLOCK_PVH(pai); + PMAP_READ_UNLOCK(pmap, spl); + + /* + * Refill from zone. + */ + pv_e = (pv_entry_t) zalloc(pv_list_zone); + goto Retry; + } + } + pv_e->va = v; + pv_e->pmap = pmap; + pv_e->next = pv_h->next; + pv_h->next = pv_e; + /* + * Remember that we used the pvlist entry. + */ + pv_e = PV_ENTRY_NULL; + } + UNLOCK_PVH(pai); + } + + /* + * Step 3) Enter and count the mapping. + */ + + pmap->stats.resident_count++; + + /* + * Build a template to speed up entering - + * only the pfn changes. + */ + template = pa_to_pte(pa) | INTEL_PTE_VALID; + if (pmap != kernel_pmap) + template |= INTEL_PTE_USER; + if (prot & VM_PROT_WRITE) + template |= INTEL_PTE_WRITE; + if (wired) { + template |= INTEL_PTE_WIRED; + pmap->stats.wired_count++; + } + i = ptes_per_vm_page; + do { + WRITE_PTE(pte, template) + pte++; + pte_increment_pa(template); + } while (--i > 0); +Done: + if (pv_e != PV_ENTRY_NULL) { + PV_FREE(pv_e); + } + + PMAP_READ_UNLOCK(pmap, spl); +} + +/* + * Routine: pmap_change_wiring + * Function: Change the wiring attribute for a map/virtual-address + * pair. + * In/out conditions: + * The mapping must already exist in the pmap. + */ +void +pmap_change_wiring( + register pmap_t map, + vm_offset_t v, + boolean_t wired) +{ + register pt_entry_t *pte; + register int i; + spl_t spl; + + /* + * We must grab the pmap system lock because we may + * change a pte_page queue. + */ + PMAP_READ_LOCK(map, spl); + + if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL) + panic("pmap_change_wiring: pte missing"); + + if (wired && !iswired(*pte)) { + /* + * wiring down mapping + */ + map->stats.wired_count++; + i = ptes_per_vm_page; + do { + *pte++ |= INTEL_PTE_WIRED; + } while (--i > 0); + } + else if (!wired && iswired(*pte)) { + /* + * unwiring mapping + */ + assert(map->stats.wired_count >= 1); + map->stats.wired_count--; + i = ptes_per_vm_page; + do { + *pte++ &= ~INTEL_PTE_WIRED; + } while (--i > 0); + } + + PMAP_READ_UNLOCK(map, spl); +} + +/* + * Routine: pmap_extract + * Function: + * Extract the physical page address associated + * with the given map/virtual_address pair. + */ + +vm_offset_t +pmap_extract( + register pmap_t pmap, + vm_offset_t va) +{ + register pt_entry_t *pte; + register vm_offset_t pa; + spl_t spl; + + SPLVM(spl); + simple_lock(&pmap->lock); + if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL) + pa = (vm_offset_t) 0; + else if (!(*pte & INTEL_PTE_VALID)) + pa = (vm_offset_t) 0; + else + pa = pte_to_pa(*pte) + (va & INTEL_OFFMASK); + simple_unlock(&pmap->lock); + SPLX(spl); + return(pa); +} + +/* + * Routine: pmap_expand + * + * Expands a pmap to be able to map the specified virtual address. + * + * Allocates new virtual memory for the P0 or P1 portion of the + * pmap, then re-maps the physical pages that were in the old + * pmap to be in the new pmap. + * + * Must be called with the pmap system and the pmap unlocked, + * since these must be unlocked to use vm_allocate or vm_deallocate. + * Thus it must be called in a loop that checks whether the map + * has been expanded enough. + * (We won't loop forever, since page tables aren't shrunk.) + */ +void +pmap_expand( + register pmap_t map, + register vm_offset_t v) +{ + pt_entry_t *pdp; + register vm_page_t m; + register vm_offset_t pa; + register int i; + spl_t spl; + + if (map == kernel_pmap) + panic("pmap_expand"); + + /* + * We cannot allocate the pmap_object in pmap_init, + * because it is called before the zone package is up. + * Allocate it now if it is missing. + */ + if (pmap_object == VM_OBJECT_NULL) + pmap_object = vm_object_allocate(avail_end); + + /* + * Allocate a VM page for the level 2 page table entries. + */ + while ((m = vm_page_grab()) == VM_PAGE_NULL) + VM_PAGE_WAIT(); + + /* + * Map the page to its physical address so that it + * can be found later. + */ + pa = m->phys_addr; + vm_object_lock(pmap_object); + vm_page_insert(m, pmap_object, pa); + vm_page_lock_queues(); + vm_page_wire(m); + inuse_ptepages_count++; + vm_object_unlock(pmap_object); + vm_page_unlock_queues(); + + /* + * Zero the page. + */ + memset((void *)phystokv(pa), 0, PAGE_SIZE); + + PMAP_READ_LOCK(map, spl); + /* + * See if someone else expanded us first + */ + if (pmap_pte(map, v) != PT_ENTRY_NULL) { + PMAP_READ_UNLOCK(map, spl); + vm_object_lock(pmap_object); + vm_page_lock_queues(); + vm_page_free(m); + inuse_ptepages_count--; + vm_page_unlock_queues(); + vm_object_unlock(pmap_object); + return; + } + + /* + * Set the page directory entry for this page table. + * If we have allocated more than one hardware page, + * set several page directory entries. + */ + + i = ptes_per_vm_page; + pdp = &map->dirbase[pdenum(map, v) & ~(i-1)]; + do { + *pdp = pa_to_pte(pa) + | INTEL_PTE_VALID + | INTEL_PTE_USER + | INTEL_PTE_WRITE; + pdp++; + pa += INTEL_PGBYTES; + } while (--i > 0); + + PMAP_READ_UNLOCK(map, spl); + return; +} + +/* + * Copy the range specified by src_addr/len + * from the source map to the range dst_addr/len + * in the destination map. + * + * This routine is only advisory and need not do anything. + */ +#if 0 +void +pmap_copy( + pmap_t dst_pmap, + pmap_t src_pmap, + vm_offset_t dst_addr, + vm_size_t len, + vm_offset_t src_addr) +{ +#ifdef lint + dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++; +#endif /* lint */ +} +#endif/* 0 */ + +int collect_ref; +int collect_unref; + +/* + * Routine: pmap_collect + * Function: + * Garbage collects the physical map system for + * pages which are no longer used. + * Success need not be guaranteed -- that is, there + * may well be pages which are not referenced, but + * others may be collected. + * Usage: + * Called by the pageout daemon when pages are scarce. + */ +void +pmap_collect( + pmap_t p) +{ + register pt_entry_t *pdp, *ptp; + pt_entry_t *eptp; + vm_offset_t pa; + int wired; + spl_t spl; + + if (p == PMAP_NULL) + return; + + if (p == kernel_pmap) + return; + + /* + * Garbage collect map. + */ + PMAP_READ_LOCK(p, spl); + PMAP_FLUSH_TLBS(); + + for (pdp = p->dirbase; + pdp < &p->dirbase[pdenum(p, LINEAR_KERNEL_ADDRESS)]; + pdp += ptes_per_vm_page) + { + if (*pdp & INTEL_PTE_VALID) + if(*pdp & INTEL_PTE_REF) { + *pdp &= ~INTEL_PTE_REF; + collect_ref++; + } else { + collect_unref++; + pa = pte_to_pa(*pdp); + ptp = (pt_entry_t *)phystokv(pa); + eptp = ptp + NPTES*ptes_per_vm_page; + + /* + * If the pte page has any wired mappings, we cannot + * free it. + */ + wired = 0; + { + register pt_entry_t *ptep; + for (ptep = ptp; ptep < eptp; ptep++) { + if (iswired(*ptep)) { + wired = 1; + break; + } + } + } + if (!wired) { + /* + * Remove the virtual addresses mapped by this pte page. + */ + pmap_remove_range(p, + pdetova(pdp - p->dirbase), + ptp, + eptp); + + /* + * Invalidate the page directory pointer. + */ + { + register int i = ptes_per_vm_page; + register pt_entry_t *pdep = pdp; + do { + *pdep++ = 0; + } while (--i > 0); + } + + PMAP_READ_UNLOCK(p, spl); + + /* + * And free the pte page itself. + */ + { + register vm_page_t m; + + vm_object_lock(pmap_object); + m = vm_page_lookup(pmap_object, pa); + if (m == VM_PAGE_NULL) + panic("pmap_collect: pte page not in object"); + vm_page_lock_queues(); + vm_page_free(m); + inuse_ptepages_count--; + vm_page_unlock_queues(); + vm_object_unlock(pmap_object); + } + + PMAP_READ_LOCK(p, spl); + } + } + } + PMAP_READ_UNLOCK(p, spl); + return; + +} + +/* + * Routine: pmap_kernel + * Function: + * Returns the physical map handle for the kernel. + */ +#if 0 +pmap_t +pmap_kernel(void) +{ + return (kernel_pmap); +} +#endif/* 0 */ + +/* + * pmap_zero_page zeros the specified (machine independent) page. + * See machine/phys.c or machine/phys.s for implementation. + */ +#if 0 +void +pmap_zero_page( + register vm_offset_t phys) +{ + register int i; + + assert(phys != vm_page_fictitious_addr); + i = PAGE_SIZE / INTEL_PGBYTES; + phys = intel_pfn(phys); + + while (i--) + zero_phys(phys++); +} +#endif/* 0 */ + +/* + * pmap_copy_page copies the specified (machine independent) page. + * See machine/phys.c or machine/phys.s for implementation. + */ +#if 0 +void +pmap_copy_page( + vm_offset_t src, + vm_offset_t dst) +{ + int i; + + assert(src != vm_page_fictitious_addr); + assert(dst != vm_page_fictitious_addr); + i = PAGE_SIZE / INTEL_PGBYTES; + + while (i--) { + copy_phys(intel_pfn(src), intel_pfn(dst)); + src += INTEL_PGBYTES; + dst += INTEL_PGBYTES; + } +} +#endif/* 0 */ + +/* + * Routine: pmap_pageable + * Function: + * Make the specified pages (by pmap, offset) + * pageable (or not) as requested. + * + * A page which is not pageable may not take + * a fault; therefore, its page table entry + * must remain valid for the duration. + * + * This routine is merely advisory; pmap_enter + * will specify that these pages are to be wired + * down (or not) as appropriate. + */ +void +pmap_pageable( + pmap_t pmap, + vm_offset_t start, + vm_offset_t end, + boolean_t pageable) +{ +#ifdef lint + pmap++; start++; end++; pageable++; +#endif /* lint */ +} + +/* + * Clear specified attribute bits. + */ +void +phys_attribute_clear( + vm_offset_t phys, + int bits) +{ + pv_entry_t pv_h; + register pv_entry_t pv_e; + register pt_entry_t *pte; + int pai; + register pmap_t pmap; + spl_t spl; + + assert(phys != vm_page_fictitious_addr); + if (!valid_page(phys)) { + /* + * Not a managed page. + */ + return; + } + + /* + * Lock the pmap system first, since we will be changing + * several pmaps. + */ + + PMAP_WRITE_LOCK(spl); + + pai = pa_index(phys); + pv_h = pai_to_pvh(pai); + + /* + * Walk down PV list, clearing all modify or reference bits. + * We do not have to lock the pv_list because we have + * the entire pmap system locked. + */ + if (pv_h->pmap != PMAP_NULL) { + /* + * There are some mappings. + */ + for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) { + + pmap = pv_e->pmap; + /* + * Lock the pmap to block pmap_extract and similar routines. + */ + simple_lock(&pmap->lock); + + { + register vm_offset_t va; + + va = pv_e->va; + pte = pmap_pte(pmap, va); + +#if 0 + /* + * Consistency checks. + */ + assert(*pte & INTEL_PTE_VALID); + /* assert(pte_to_phys(*pte) == phys); */ +#endif + + /* + * Invalidate TLBs for all CPUs using this mapping. + */ + PMAP_INVALIDATE_PAGE(pmap, va); + } + + /* + * Clear modify or reference bits. + */ + { + register int i = ptes_per_vm_page; + do { + *pte++ &= ~bits; + } while (--i > 0); + } + simple_unlock(&pmap->lock); + } + } + + pmap_phys_attributes[pai] &= ~bits; + + PMAP_WRITE_UNLOCK(spl); +} + +/* + * Check specified attribute bits. + */ +boolean_t +phys_attribute_test( + vm_offset_t phys, + int bits) +{ + pv_entry_t pv_h; + register pv_entry_t pv_e; + register pt_entry_t *pte; + int pai; + register pmap_t pmap; + spl_t spl; + + assert(phys != vm_page_fictitious_addr); + if (!valid_page(phys)) { + /* + * Not a managed page. + */ + return (FALSE); + } + + /* + * Lock the pmap system first, since we will be checking + * several pmaps. + */ + + PMAP_WRITE_LOCK(spl); + + pai = pa_index(phys); + pv_h = pai_to_pvh(pai); + + if (pmap_phys_attributes[pai] & bits) { + PMAP_WRITE_UNLOCK(spl); + return (TRUE); + } + + /* + * Walk down PV list, checking all mappings. + * We do not have to lock the pv_list because we have + * the entire pmap system locked. + */ + if (pv_h->pmap != PMAP_NULL) { + /* + * There are some mappings. + */ + for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) { + + pmap = pv_e->pmap; + /* + * Lock the pmap to block pmap_extract and similar routines. + */ + simple_lock(&pmap->lock); + + { + register vm_offset_t va; + + va = pv_e->va; + pte = pmap_pte(pmap, va); + +#if 0 + /* + * Consistency checks. + */ + assert(*pte & INTEL_PTE_VALID); + /* assert(pte_to_phys(*pte) == phys); */ +#endif + } + + /* + * Check modify or reference bits. + */ + { + register int i = ptes_per_vm_page; + + do { + if (*pte++ & bits) { + simple_unlock(&pmap->lock); + PMAP_WRITE_UNLOCK(spl); + return (TRUE); + } + } while (--i > 0); + } + simple_unlock(&pmap->lock); + } + } + PMAP_WRITE_UNLOCK(spl); + return (FALSE); +} + +/* + * Set specified attribute bits. + */ +void +phys_attribute_set( + vm_offset_t phys, + int bits) +{ + int spl; + + assert(phys != vm_page_fictitious_addr); + if (!valid_page(phys)) { + /* + * Not a managed page. + */ + return; + } + + /* + * Lock the pmap system and set the requested bits in + * the phys attributes array. Don't need to bother with + * ptes because the test routine looks here first. + */ + + PMAP_WRITE_LOCK(spl); + pmap_phys_attributes[pa_index(phys)] |= bits; + PMAP_WRITE_UNLOCK(spl); +} + +/* + * Set the modify bit on the specified physical page. + */ + +void pmap_set_modify( + register vm_offset_t phys) +{ + phys_attribute_set(phys, PHYS_MODIFIED); +} + +/* + * Clear the modify bits on the specified physical page. + */ + +void +pmap_clear_modify( + register vm_offset_t phys) +{ + phys_attribute_clear(phys, PHYS_MODIFIED); +} + +/* + * pmap_is_modified: + * + * Return whether or not the specified physical page is modified + * by any physical maps. + */ + +boolean_t +pmap_is_modified( + register vm_offset_t phys) +{ + return (phys_attribute_test(phys, PHYS_MODIFIED)); +} + +/* + * pmap_clear_reference: + * + * Clear the reference bit on the specified physical page. + */ + +void +pmap_clear_reference( + vm_offset_t phys) +{ + phys_attribute_clear(phys, PHYS_REFERENCED); +} + +/* + * pmap_is_referenced: + * + * Return whether or not the specified physical page is referenced + * by any physical maps. + */ + +boolean_t +pmap_is_referenced( + vm_offset_t phys) +{ + return (phys_attribute_test(phys, PHYS_REFERENCED)); +} + +/* + * Set the modify bit on the specified range + * of this map as requested. + * + * This optimization stands only if each time the dirty bit + * in vm_page_t is tested, it is also tested in the pmap. + */ +void +pmap_modify_pages( + pmap_t map, + vm_offset_t s, + vm_offset_t e) +{ + spl_t spl; + register pt_entry_t *pde; + register pt_entry_t *spte, *epte; + vm_offset_t l; + + if (map == PMAP_NULL) + return; + + PMAP_READ_LOCK(map, spl); + + pde = pmap_pde(map, s); + while (s && s < e) { + l = (s + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE-1); + if (l > e) + l = e; + if (*pde & INTEL_PTE_VALID) { + spte = (pt_entry_t *)ptetokv(*pde); + if (l) { + spte = &spte[ptenum(s)]; + epte = &spte[intel_btop(l-s)]; + } else { + epte = &spte[intel_btop(PDE_MAPPED_SIZE)]; + spte = &spte[ptenum(s)]; + } + while (spte < epte) { + if (*spte & INTEL_PTE_VALID) { + *spte |= (INTEL_PTE_MOD | INTEL_PTE_WRITE); + } + spte++; + } + } + s = l; + pde++; + } + PMAP_FLUSH_TLBS(); + PMAP_READ_UNLOCK(map, spl); +} + + +void +invalidate_icache(vm_offset_t addr, unsigned cnt, int phys) +{ + return; +} +void +flush_dcache(vm_offset_t addr, unsigned count, int phys) +{ + return; +} + +#if NCPUS > 1 + +void inline +pmap_wait_for_clear() +{ + register int my_cpu; + spl_t s; + register pmap_t my_pmap; + + mp_disable_preemption(); + my_cpu = cpu_number(); + + + my_pmap = real_pmap[my_cpu]; + + if (!(my_pmap && pmap_in_use(my_pmap, my_cpu))) + my_pmap = kernel_pmap; + + /* + * Raise spl to splhigh (above splip) to block out pmap_extract + * from IO code (which would put this cpu back in the active + * set). + */ + s = splhigh(); + + /* + * Wait for any pmap updates in progress, on either user + * or kernel pmap. + */ + while (*(volatile hw_lock_t)&my_pmap->lock.interlock || + *(volatile hw_lock_t)&kernel_pmap->lock.interlock) { + continue; + } + + splx(s); + mp_enable_preemption(); +} + +void +pmap_flush_tlb_interrupt(void) { + pmap_wait_for_clear(); + + flush_tlb(); +} + +void +pmap_reload_tlb_interrupt(void) { + pmap_wait_for_clear(); + + set_cr3(kernel_pmap->pdirbase); +} + + +#endif /* NCPUS > 1 */ + +#if MACH_KDB + +/* show phys page mappings and attributes */ + +extern void db_show_page(vm_offset_t pa); + +void +db_show_page(vm_offset_t pa) +{ + pv_entry_t pv_h; + int pai; + char attr; + + pai = pa_index(pa); + pv_h = pai_to_pvh(pai); + + attr = pmap_phys_attributes[pai]; + printf("phys page %x ", pa); + if (attr & PHYS_MODIFIED) + printf("modified, "); + if (attr & PHYS_REFERENCED) + printf("referenced, "); + if (pv_h->pmap || pv_h->next) + printf(" mapped at\n"); + else + printf(" not mapped\n"); + for (; pv_h; pv_h = pv_h->next) + if (pv_h->pmap) + printf("%x in pmap %x\n", pv_h->va, pv_h->pmap); +} + +#endif /* MACH_KDB */ + +#if MACH_KDB +void db_kvtophys(vm_offset_t); +void db_show_vaddrs(pt_entry_t *); + +/* + * print out the results of kvtophys(arg) + */ +void +db_kvtophys( + vm_offset_t vaddr) +{ + db_printf("0x%x", kvtophys(vaddr)); +} + +/* + * Walk the pages tables. + */ +void +db_show_vaddrs( + pt_entry_t *dirbase) +{ + pt_entry_t *ptep, *pdep, tmp; + int x, y, pdecnt, ptecnt; + + if (dirbase == 0) { + dirbase = kernel_pmap->dirbase; + } + if (dirbase == 0) { + db_printf("need a dirbase...\n"); + return; + } + dirbase = (pt_entry_t *) ((unsigned long) dirbase & ~INTEL_OFFMASK); + + db_printf("dirbase: 0x%x\n", dirbase); + + pdecnt = ptecnt = 0; + pdep = &dirbase[0]; + for (y = 0; y < NPDES; y++, pdep++) { + if (((tmp = *pdep) & INTEL_PTE_VALID) == 0) { + continue; + } + pdecnt++; + ptep = (pt_entry_t *) ((*pdep) & ~INTEL_OFFMASK); + db_printf("dir[%4d]: 0x%x\n", y, *pdep); + for (x = 0; x < NPTES; x++, ptep++) { + if (((tmp = *ptep) & INTEL_PTE_VALID) == 0) { + continue; + } + ptecnt++; + db_printf(" tab[%4d]: 0x%x, va=0x%x, pa=0x%x\n", + x, + *ptep, + (y << 22) | (x << 12), + *ptep & ~INTEL_OFFMASK); + } + } + + db_printf("total: %d tables, %d page table entries.\n", pdecnt, ptecnt); + +} +#endif /* MACH_KDB */ + +#include +#if MACH_VM_DEBUG +#include + +int +pmap_list_resident_pages( + register pmap_t pmap, + register vm_offset_t *listp, + register int space) +{ + return 0; +} +#endif /* MACH_VM_DEBUG */ + +#ifdef MACH_BSD +/* + * pmap_pagemove + * + * BSD support routine to reassign virtual addresses. + */ + +void +pmap_movepage(unsigned long from, unsigned long to, vm_size_t size) +{ + spl_t spl; + pt_entry_t *pte, saved_pte; + /* Lock the kernel map */ + + + while (size > 0) { + PMAP_READ_LOCK(kernel_pmap, spl); + pte = pmap_pte(kernel_pmap, from); + if (pte == NULL) + panic("pmap_pagemove from pte NULL"); + saved_pte = *pte; + PMAP_READ_UNLOCK(kernel_pmap, spl); + + pmap_enter(kernel_pmap, to, i386_trunc_page(*pte), + VM_PROT_READ|VM_PROT_WRITE, *pte & INTEL_PTE_WIRED); + + pmap_remove(kernel_pmap, from, from+PAGE_SIZE); + + PMAP_READ_LOCK(kernel_pmap, spl); + pte = pmap_pte(kernel_pmap, to); + if (pte == NULL) + panic("pmap_pagemove 'to' pte NULL"); + + *pte = saved_pte; + PMAP_READ_UNLOCK(kernel_pmap, spl); + + from += PAGE_SIZE; + to += PAGE_SIZE; + size -= PAGE_SIZE; + } + + /* Get the processors to update the TLBs */ + PMAP_FLUSH_TLBS(); + +} + +kern_return_t bmapvideo(vm_offset_t *info); +kern_return_t bmapvideo(vm_offset_t *info) { + + extern struct vc_info vinfo; +#ifdef NOTIMPLEMENTED + (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */ +#endif + return KERN_SUCCESS; +} + +kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr); +kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { + +#ifdef NOTIMPLEMENTED + pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr); /* Map it in */ +#endif + return KERN_SUCCESS; +} + +kern_return_t bmapmapr(vm_offset_t va); +kern_return_t bmapmapr(vm_offset_t va) { + +#ifdef NOTIMPLEMENTED + mapping_remove(current_act()->task->map->pmap, va); /* Remove map */ +#endif + return KERN_SUCCESS; +} +#endif + diff --git a/osfmk/i386/pmap.h b/osfmk/i386/pmap.h new file mode 100644 index 000000000..f2a7c9511 --- /dev/null +++ b/osfmk/i386/pmap.h @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: pmap.h + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Machine-dependent structures for the physical map module. + */ + +#ifndef _PMAP_MACHINE_ +#define _PMAP_MACHINE_ 1 + +#ifndef ASSEMBLER + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Define the generic in terms of the specific + */ + +#define INTEL_PGBYTES I386_PGBYTES +#define INTEL_PGSHIFT I386_PGSHIFT +#define intel_btop(x) i386_btop(x) +#define intel_ptob(x) i386_ptob(x) +#define intel_round_page(x) i386_round_page(x) +#define intel_trunc_page(x) i386_trunc_page(x) +#define trunc_intel_to_vm(x) trunc_i386_to_vm(x) +#define round_intel_to_vm(x) round_i386_to_vm(x) +#define vm_to_intel(x) vm_to_i386(x) + +/* + * i386/i486/i860 Page Table Entry + */ + +typedef unsigned int pt_entry_t; +#define PT_ENTRY_NULL ((pt_entry_t *) 0) + +#endif /* ASSEMBLER */ + +#define INTEL_OFFMASK 0xfff /* offset within page */ +#define PDESHIFT 22 /* page descriptor shift */ +#define PDEMASK 0x3ff /* mask for page descriptor index */ +#define PTESHIFT 12 /* page table shift */ +#define PTEMASK 0x3ff /* mask for page table index */ + +/* + * Convert kernel virtual address to linear address + */ + +#define kvtolinear(a) ((a)+LINEAR_KERNEL_ADDRESS) + +/* + * Convert address offset to page descriptor index + */ +#define pdenum(pmap, a) (((((pmap) == kernel_pmap) ? \ + kvtolinear(a) : (a)) \ + >> PDESHIFT) & PDEMASK) + +/* + * Convert page descriptor index to user virtual address + */ +#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT) + +/* + * Convert address offset to page table index + */ +#define ptenum(a) (((a) >> PTESHIFT) & PTEMASK) + +#define NPTES (intel_ptob(1)/sizeof(pt_entry_t)) +#define NPDES (intel_ptob(1)/sizeof(pt_entry_t)) + +/* + * Hardware pte bit definitions (to be used directly on the ptes + * without using the bit fields). + */ + +#define INTEL_PTE_VALID 0x00000001 +#define INTEL_PTE_WRITE 0x00000002 +#define INTEL_PTE_USER 0x00000004 +#define INTEL_PTE_WTHRU 0x00000008 +#define INTEL_PTE_NCACHE 0x00000010 +#define INTEL_PTE_REF 0x00000020 +#define INTEL_PTE_MOD 0x00000040 +#define INTEL_PTE_WIRED 0x00000200 +#define INTEL_PTE_PFN 0xfffff000 + +#define pa_to_pte(a) ((a) & INTEL_PTE_PFN) +#define pte_to_pa(p) ((p) & INTEL_PTE_PFN) +#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1) + +/* + * Convert page table entry to kernel virtual address + */ +#define ptetokv(a) (phystokv(pte_to_pa(a))) + +#ifndef ASSEMBLER +typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */ + /* changed by other processors */ + +struct pmap { + pt_entry_t *dirbase; /* page directory pointer register */ + vm_offset_t pdirbase; /* phys. address of dirbase */ + int ref_count; /* reference count */ + decl_simple_lock_data(,lock) /* lock on map */ + struct pmap_statistics stats; /* map statistics */ + cpu_set cpus_using; /* bitmap of cpus using pmap */ +}; + +/* + * Optimization avoiding some TLB flushes when switching to + * kernel-loaded threads. This is effective only for i386: + * Since user task, kernel task and kernel loaded tasks share the + * same virtual space (with appropriate protections), any pmap + * allows mapping kernel and kernel loaded tasks. + * + * The idea is to avoid switching to another pmap unnecessarily when + * switching to a kernel-loaded task, or when switching to the kernel + * itself. + * + * We store the pmap we are really using (from which we fetched the + * dirbase value) in real_pmap[cpu_number()]. + * + * Invariant: + * current_pmap() == real_pmap[cpu_number()] || current_pmap() == kernel_pmap. + */ + +extern struct pmap *real_pmap[NCPUS]; + +#include +/* + * If switching to the kernel pmap, don't incur the TLB cost of switching + * to its page tables, since all maps include the kernel map as a subset. + * Simply record that this CPU is logically on the kernel pmap (see + * pmap_destroy). + * + * Similarly, if switching to a pmap (other than kernel_pmap that is already + * in use, don't do anything to the hardware, to avoid a TLB flush. + */ + +#if NCPUS > 1 +#define PMAP_CPU_SET(pmap, my_cpu) i_bit_set(my_cpu, &((pmap)->cpus_using)) +#define PMAP_CPU_CLR(pmap, my_cpu) i_bit_clear(my_cpu, &((pmap)->cpus_using)) +#else /* NCPUS > 1 */ +#define PMAP_CPU_SET(pmap,my_cpu) (pmap)->cpus_using = TRUE +#define PMAP_CPU_CLR(pmap,my_cpu) (pmap)->cpus_using = FALSE +#endif /* NCPUS > 1 */ + + +#define set_dirbase(mypmap, my_cpu) { \ + struct pmap **ppmap = &real_pmap[my_cpu]; \ + vm_offset_t pdirbase = (mypmap)->pdirbase; \ + \ + if (*ppmap == (vm_offset_t)NULL) { \ + *ppmap = (mypmap); \ + PMAP_CPU_SET((mypmap), my_cpu); \ + set_cr3(pdirbase); \ + } else if ((mypmap) != kernel_pmap && (mypmap) != *ppmap ) { \ + if (*ppmap != kernel_pmap) \ + PMAP_CPU_CLR(*ppmap, my_cpu); \ + *ppmap = (mypmap); \ + PMAP_CPU_SET((mypmap), my_cpu); \ + set_cr3(pdirbase); \ + } \ + assert((mypmap) == *ppmap || (mypmap) == kernel_pmap); \ +} + +#if NCPUS > 1 +/* + * List of cpus that are actively using mapped memory. Any + * pmap update operation must wait for all cpus in this list. + * Update operations must still be queued to cpus not in this + * list. + */ +extern cpu_set cpus_active; + +/* + * List of cpus that are idle, but still operating, and will want + * to see any kernel pmap updates when they become active. + */ +extern cpu_set cpus_idle; + + +/* + * External declarations for PMAP_ACTIVATE. + */ + +extern void process_pmap_updates(struct pmap *pmap); +extern void pmap_update_interrupt(void); + +#endif /* NCPUS > 1 */ + +/* + * Machine dependent routines that are used only for i386/i486/i860. + */ +extern vm_offset_t (phystokv)( + vm_offset_t pa); + +extern vm_offset_t (kvtophys)( + vm_offset_t addr); + +extern pt_entry_t *pmap_pte( + struct pmap *pmap, + vm_offset_t addr); + +extern vm_offset_t pmap_map( + vm_offset_t virt, + vm_offset_t start, + vm_offset_t end, + vm_prot_t prot); + +extern vm_offset_t pmap_map_bd( + vm_offset_t virt, + vm_offset_t start, + vm_offset_t end, + vm_prot_t prot); + +extern void pmap_bootstrap( + vm_offset_t load_start); + +extern boolean_t pmap_valid_page( + vm_offset_t pa); + +extern int pmap_list_resident_pages( + struct pmap *pmap, + vm_offset_t *listp, + int space); + +extern void flush_tlb(void); +extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); +extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); + + +/* + * Macros for speed. + */ + +#if NCPUS > 1 + +#include + +/* + * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage + * fields to control TLB invalidation on other CPUS. + */ + +#define PMAP_ACTIVATE_KERNEL(my_cpu) { \ + \ + /* \ + * Let pmap updates proceed while we wait for this pmap. \ + */ \ + i_bit_clear((my_cpu), &cpus_active); \ + \ + /* \ + * Lock the pmap to put this cpu in its active set. \ + * Wait for updates here. \ + */ \ + simple_lock(&kernel_pmap->lock); \ + \ + /* \ + * Mark that this cpu is using the pmap. \ + */ \ + i_bit_set((my_cpu), &kernel_pmap->cpus_using); \ + \ + /* \ + * Mark this cpu active - IPL will be lowered by \ + * load_context(). \ + */ \ + i_bit_set((my_cpu), &cpus_active); \ + \ + simple_unlock(&kernel_pmap->lock); \ +} + +#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ + /* \ + * Mark pmap no longer in use by this cpu even if \ + * pmap is locked against updates. \ + */ \ + i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \ +} + +#define PMAP_ACTIVATE_MAP(map, my_cpu) { \ + register struct pmap *tpmap; \ + \ + tpmap = vm_map_pmap(map); \ + if (tpmap == kernel_pmap) { \ + /* \ + * If this is the kernel pmap, switch to its page tables. \ + */ \ + set_dirbase(kernel_pmap, my_cpu); \ + } \ + else { \ + /* \ + * Let pmap updates proceed while we wait for this pmap. \ + */ \ + i_bit_clear((my_cpu), &cpus_active); \ + \ + /* \ + * Lock the pmap to put this cpu in its active set. \ + * Wait for updates here. \ + */ \ + simple_lock(&tpmap->lock); \ + \ + /* \ + * No need to invalidate the TLB - the entire user pmap \ + * will be invalidated by reloading dirbase. \ + */ \ + set_dirbase(tpmap, my_cpu); \ + \ + /* \ + * Mark this cpu active - IPL will be lowered by \ + * load_context(). \ + */ \ + i_bit_set((my_cpu), &cpus_active); \ + \ + simple_unlock(&tpmap->lock); \ + } \ +} + +#define PMAP_DEACTIVATE_MAP(map, my_cpu) + +#define PMAP_ACTIVATE_USER(th, my_cpu) { \ + spl_t spl; \ + \ + spl = splhigh(); \ + PMAP_ACTIVATE_MAP(th->map, my_cpu) \ + splx(spl); \ +} + +#define PMAP_DEACTIVATE_USER(th, my_cpu) { \ + spl_t spl; \ + \ + spl = splhigh(); \ + PMAP_DEACTIVATE_MAP(th->map, my_cpu) \ + splx(spl); \ +} + +#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ + spl_t spl; \ + \ + if (old_th->map != new_th->map) { \ + spl = splhigh(); \ + PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ + PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ + splx(spl); \ + } \ +} + +#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ + spl_t spl; \ + \ + spl = splhigh(); \ + PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ + th->map = new_map; \ + PMAP_ACTIVATE_MAP(th->map, my_cpu); \ + splx(spl); \ +} + +#if MP_V1_1 +#define set_led(cpu) +#define clear_led(cpu) +#endif /* MP_V1_1 */ + +#define MARK_CPU_IDLE(my_cpu) { \ + /* \ + * Mark this cpu idle, and remove it from the active set, \ + * since it is not actively using any pmap. Signal_cpus \ + * will notice that it is idle, and avoid signaling it, \ + * but will queue the update request for when the cpu \ + * becomes active. \ + */ \ + int s = splhigh(); \ + i_bit_set((my_cpu), &cpus_idle); \ + i_bit_clear((my_cpu), &cpus_active); \ + splx(s); \ + set_led(my_cpu); \ +} + +#define MARK_CPU_ACTIVE(my_cpu) { \ + \ + int s = splhigh(); \ + /* \ + * If a kernel_pmap update was requested while this cpu \ + * was idle, process it as if we got the interrupt. \ + * Before doing so, remove this cpu from the idle set. \ + * Since we do not grab any pmap locks while we flush \ + * our TLB, another cpu may start an update operation \ + * before we finish. Removing this cpu from the idle \ + * set assures that we will receive another update \ + * interrupt if this happens. \ + */ \ + i_bit_clear((my_cpu), &cpus_idle); \ + \ + /* \ + * Mark that this cpu is now active. \ + */ \ + i_bit_set((my_cpu), &cpus_active); \ + splx(s); \ + clear_led(my_cpu); \ +} + +#else /* NCPUS > 1 */ + +/* + * With only one CPU, we just have to indicate whether the pmap is + * in use. + */ + +#define PMAP_ACTIVATE_KERNEL(my_cpu) { \ + kernel_pmap->cpus_using = TRUE; \ +} + +#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \ + kernel_pmap->cpus_using = FALSE; \ +} + +#define PMAP_ACTIVATE_MAP(map, my_cpu) \ + set_dirbase(vm_map_pmap(map), my_cpu) + +#define PMAP_DEACTIVATE_MAP(map, my_cpu) + +#define PMAP_ACTIVATE_USER(th, my_cpu) \ + PMAP_ACTIVATE_MAP(th->map, my_cpu) + +#define PMAP_DEACTIVATE_USER(th, my_cpu) \ + PMAP_DEACTIVATE_MAP(th->map, my_cpu) + +#define PMAP_SWITCH_CONTEXT(old_th, new_th, my_cpu) { \ + if (old_th->map != new_th->map) { \ + PMAP_DEACTIVATE_MAP(old_th->map, my_cpu); \ + PMAP_ACTIVATE_MAP(new_th->map, my_cpu); \ + } \ +} + +#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ + PMAP_DEACTIVATE_MAP(th->map, my_cpu); \ + th->map = new_map; \ + PMAP_ACTIVATE_MAP(th->map, my_cpu); \ +} + +#endif /* NCPUS > 1 */ + +#define PMAP_CONTEXT(pmap, thread) + +#define pmap_kernel_va(VA) \ + (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) + +#define pmap_resident_count(pmap) ((pmap)->stats.resident_count) +#define pmap_phys_address(frame) ((vm_offset_t) (intel_ptob(frame))) +#define pmap_phys_to_frame(phys) ((int) (intel_btop(phys))) +#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr) +#define pmap_attribute(pmap,addr,size,attr,value) \ + (KERN_INVALID_ADDRESS) +#endif /* ASSEMBLER */ + +#endif /* _PMAP_MACHINE_ */ diff --git a/osfmk/i386/proc_reg.h b/osfmk/i386/proc_reg.h new file mode 100644 index 000000000..ef752ca61 --- /dev/null +++ b/osfmk/i386/proc_reg.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Processor registers for i386 and i486. + */ +#ifndef _I386_PROC_REG_H_ +#define _I386_PROC_REG_H_ + +/* + * Model Specific Registers + */ +#define MSR_P5_TSC 0x10 /* Time Stamp Register */ +#define MSR_P5_CESR 0x11 /* Control and Event Select Register */ +#define MSR_P5_CTR0 0x12 /* Counter #0 */ +#define MSR_P5_CTR1 0x13 /* Counter #1 */ + +#define MSR_P5_CESR_PC 0x0200 /* Pin Control */ +#define MSR_P5_CESR_CC 0x01C0 /* Counter Control mask */ +#define MSR_P5_CESR_ES 0x003F /* Event Control mask */ + +#define MSR_P5_CESR_SHIFT 16 /* Shift to get Counter 1 */ +#define MSR_P5_CESR_MASK (MSR_P5_CESR_PC|\ + MSR_P5_CESR_CC|\ + MSR_P5_CESR_ES) /* Mask Counter */ + +#define MSR_P5_CESR_CC_CLOCK 0x0100 /* Clock Counting (otherwise Event) */ +#define MSR_P5_CESR_CC_DISABLE 0x0000 /* Disable counter */ +#define MSR_P5_CESR_CC_CPL012 0x0040 /* Count if the CPL == 0, 1, 2 */ +#define MSR_P5_CESR_CC_CPL3 0x0080 /* Count if the CPL == 3 */ +#define MSR_P5_CESR_CC_CPL 0x00C0 /* Count regardless of the CPL */ + +#define MSR_P5_CESR_ES_DATA_READ 0x000000 /* Data Read */ +#define MSR_P5_CESR_ES_DATA_WRITE 0x000001 /* Data Write */ +#define MSR_P5_CESR_ES_DATA_RW 0x101000 /* Data Read or Write */ +#define MSR_P5_CESR_ES_DATA_TLB_MISS 0x000010 /* Data TLB Miss */ +#define MSR_P5_CESR_ES_DATA_READ_MISS 0x000011 /* Data Read Miss */ +#define MSR_P5_CESR_ES_DATA_WRITE_MISS 0x000100 /* Data Write Miss */ +#define MSR_P5_CESR_ES_DATA_RW_MISS 0x101001 /* Data Read or Write Miss */ +#define MSR_P5_CESR_ES_HIT_EM 0x000101 /* Write (hit) to M|E state */ +#define MSR_P5_CESR_ES_DATA_CACHE_WB 0x000110 /* Cache lines written back */ +#define MSR_P5_CESR_ES_EXTERNAL_SNOOP 0x000111 /* External Snoop */ +#define MSR_P5_CESR_ES_CACHE_SNOOP_HIT 0x001000 /* Data cache snoop hits */ +#define MSR_P5_CESR_ES_MEM_ACCESS_PIPE 0x001001 /* Mem. access in both pipes */ +#define MSR_P5_CESR_ES_BANK_CONFLICTS 0x001010 /* Bank conflicts */ +#define MSR_P5_CESR_ES_MISALIGNED 0x001011 /* Misaligned Memory or I/O */ +#define MSR_P5_CESR_ES_CODE_READ 0x001100 /* Code Read */ +#define MSR_P5_CESR_ES_CODE_TLB_MISS 0x001101 /* Code TLB miss */ +#define MSR_P5_CESR_ES_CODE_CACHE_MISS 0x001110 /* Code Cache miss */ +#define MSR_P5_CESR_ES_SEGMENT_LOADED 0x001111 /* Any segment reg. loaded */ +#define MSR_P5_CESR_ES_BRANCHE 0x010010 /* Branches */ +#define MSR_P5_CESR_ES_BTB_HIT 0x010011 /* BTB Hits */ +#define MSR_P5_CESR_ES_BRANCHE_BTB 0x010100 /* Taken branch or BTB Hit */ +#define MSR_P5_CESR_ES_PIPELINE_FLUSH 0x010101 /* Pipeline Flushes */ +#define MSR_P5_CESR_ES_INSTRUCTION 0x010110 /* Instruction executed */ +#define MSR_P5_CESR_ES_INSTRUCTION_V 0x010111 /* Inst. executed (v-pipe) */ +#define MSR_P5_CESR_ES_BUS_CYCLE 0x011000 /* Clocks while bus cycle */ +#define MSR_P5_CESR_ES_FULL_WRITE_BUF 0x011001 /* Clocks while full wrt buf. */ +#define MSR_P5_CESR_ES_DATA_MEM_READ 0x011010 /* Pipeline waiting for read */ +#define MSR_P5_CESR_ES_WRITE_EM 0x011011 /* Stall on write E|M state */ +#define MSR_P5_CESR_ES_LOCKED_CYCLE 0x011100 /* Locked bus cycles */ +#define MSR_P5_CESR_ES_IO_CYCLE 0x011101 /* I/O Read or Write cycles */ +#define MSR_P5_CESR_ES_NON_CACHEABLE 0x011110 /* Non-cacheable Mem. read */ +#define MSR_P5_CESR_ES_AGI 0x011111 /* Stall because of AGI */ +#define MSR_P5_CESR_ES_FLOP 0x100010 /* Floating Point operations */ +#define MSR_P5_CESR_ES_BREAK_DR0 0x100011 /* Breakpoint matches on DR0 */ +#define MSR_P5_CESR_ES_BREAK_DR1 0x100100 /* Breakpoint matches on DR1 */ +#define MSR_P5_CESR_ES_BREAK_DR2 0x100101 /* Breakpoint matches on DR2 */ +#define MSR_P5_CESR_ES_BREAK_DR3 0x100110 /* Breakpoint matches on DR3 */ +#define MSR_P5_CESR_ES_HARDWARE_IT 0x100111 /* Hardware interrupts */ + +/* + * CR0 + */ +#define CR0_PG 0x80000000 /* Enable paging */ +#define CR0_CD 0x40000000 /* i486: Cache disable */ +#define CR0_NW 0x20000000 /* i486: No write-through */ +#define CR0_AM 0x00040000 /* i486: Alignment check mask */ +#define CR0_WP 0x00010000 /* i486: Write-protect kernel access */ +#define CR0_NE 0x00000020 /* i486: Handle numeric exceptions */ +#define CR0_ET 0x00000010 /* Extension type is 80387 */ + /* (not official) */ +#define CR0_TS 0x00000008 /* Task switch */ +#define CR0_EM 0x00000004 /* Emulate coprocessor */ +#define CR0_MP 0x00000002 /* Monitor coprocessor */ +#define CR0_PE 0x00000001 /* Enable protected mode */ + +/* + * CR4 + */ +#define CR4_MCE 0x00000040 /* p5: Machine Check Exceptions */ +#define CR4_PSE 0x00000010 /* p5: Page Size Extensions */ +#define CR4_DE 0x00000008 /* p5: Debugging Extensions */ +#define CR4_TSD 0x00000004 /* p5: Time Stamp Disable */ +#define CR4_PVI 0x00000002 /* p5: Protected-mode Virtual Interrupts */ +#define CR4_VME 0x00000001 /* p5: Virtual-8086 Mode Extensions */ + +#ifndef ASSEMBLER +extern unsigned int get_cr0(void); +extern void set_cr0( + unsigned int value); +extern unsigned int get_cr2(void); +extern unsigned int get_cr3(void); +extern void set_cr3( + unsigned int value); +extern unsigned int get_cr4(void); +extern void set_cr4( + unsigned int value); + +#define set_ts() \ + set_cr0(get_cr0() | CR0_TS) +extern void clear_ts(void); + +extern unsigned short get_tr(void); +extern void set_tr( + unsigned int seg); + +extern unsigned short get_ldt(void); +extern void set_ldt( + unsigned int seg); +#ifdef __GNUC__ +extern __inline__ unsigned int get_cr0(void) +{ + register unsigned int cr0; + __asm__ volatile("mov %%cr0, %0" : "=r" (cr0)); + return(cr0); +} + +extern __inline__ void set_cr0(unsigned int value) +{ + __asm__ volatile("mov %0, %%cr0" : : "r" (value)); +} + +extern __inline__ unsigned int get_cr2(void) +{ + register unsigned int cr2; + __asm__ volatile("mov %%cr2, %0" : "=r" (cr2)); + return(cr2); +} + +#if NCPUS > 1 && AT386 +/* + * get_cr3 and set_cr3 are more complicated for the MPs. cr3 is where + * the cpu number gets stored. The MP versions live in locore.s + */ +#else /* NCPUS > 1 && AT386 */ +extern __inline__ unsigned int get_cr3(void) +{ + register unsigned int cr3; + __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); + return(cr3); +} + +extern __inline__ void set_cr3(unsigned int value) +{ + __asm__ volatile("mov %0, %%cr3" : : "r" (value)); +} +#endif /* NCPUS > 1 && AT386 */ + +extern __inline__ void clear_ts(void) +{ + __asm__ volatile("clts"); +} + +extern __inline__ unsigned short get_tr(void) +{ + unsigned short seg; + __asm__ volatile("str %0" : "=rm" (seg)); + return(seg); +} + +extern __inline__ void set_tr(unsigned int seg) +{ + __asm__ volatile("ltr %0" : : "rm" ((unsigned short)(seg))); +} + +extern __inline__ unsigned short get_ldt(void) +{ + unsigned short seg; + __asm__ volatile("sldt %0" : "=rm" (seg)); + return(seg); +} + +extern __inline__ void set_ldt(unsigned int seg) +{ + __asm__ volatile("lldt %0" : : "rm" ((unsigned short)(seg))); +} + +extern __inline__ void flush_tlb(void) +{ + unsigned long cr3_temp; + __asm__ volatile("movl %%cr3, %0; movl %0, %%cr3" : "=r" (cr3_temp) :: "memory"); +} + +extern __inline__ void invlpg(unsigned long addr) +{ + __asm__ volatile("invlpg (%0)" :: "r" (addr) : "memory"); +} +#endif /* __GNUC__ */ +#endif /* ASSEMBLER */ + +#endif /* _I386_PROC_REG_H_ */ diff --git a/osfmk/i386/read_fault.c b/osfmk/i386/read_fault.c new file mode 100644 index 000000000..3cd758ac5 --- /dev/null +++ b/osfmk/i386/read_fault.c @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +/* + * Expansion of vm_fault for read fault in kernel mode. + * Must enter the mapping as writable, since the i386 + * (and i860 in i386 compatability mode) ignores write + * protection in kernel mode. + * + * Note that this routine can be called for pmap's other + * than the kernel_pmap, in which case it just enters + * a read-only mapping. (See e.g. kernel_trap().) + */ +kern_return_t +intel_read_fault( + vm_map_t map, + vm_offset_t vaddr) +{ + vm_map_version_t version; /* Map version for + verification */ + vm_object_t object; /* Top-level object */ + vm_object_offset_t offset; /* Top-level offset */ + vm_prot_t prot; /* Protection for mapping */ + vm_behavior_t behavior; /* Expected paging behavior */ + vm_object_offset_t lo_offset, hi_offset; + vm_page_t result_page; /* Result of vm_fault_page */ + vm_page_t top_page; /* Placeholder page */ + boolean_t wired; /* Is map region wired? */ + kern_return_t result; + register vm_page_t m; + vm_map_t pmap_map; + vm_map_t original_map = map; + + RetryFault: + + map = original_map; + + /* + * Find the backing store object and offset into it + * to begin search. + */ + vm_map_lock_read(map); + result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version, + &object, &offset, &prot, &wired, + &behavior, &lo_offset, + &hi_offset, &pmap_map); + + vm_map_unlock_read(map); + + if (result != KERN_SUCCESS) { + return (result); + } + + if(pmap_map != map) { + vm_map_reference(pmap_map); + vm_map_unlock_read(pmap_map); + } + + /* + * Make a reference to this object to prevent its + * disposal while we are playing with it. + */ + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_object_paging_begin(object); + + result = vm_fault_page(object, offset, VM_PROT_READ, FALSE, + THREAD_ABORTSAFE, + lo_offset, hi_offset, behavior, + &prot, &result_page, &top_page, (int *)0, + 0, map->no_zero_fill, FALSE); + + if (result != VM_FAULT_SUCCESS) { + vm_object_deallocate(object); + if(pmap_map != map) { + vm_map_deallocate(pmap_map); + } + + switch (result) { + case VM_FAULT_RETRY: + goto RetryFault; + case VM_FAULT_INTERRUPTED: + return (KERN_SUCCESS); + case VM_FAULT_MEMORY_SHORTAGE: + VM_PAGE_WAIT(); + goto RetryFault; + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + goto RetryFault; + case VM_FAULT_MEMORY_ERROR: + return (KERN_MEMORY_ERROR); + } + } + + m = result_page; + + /* + * How to clean up the result of vm_fault_page. This + * happens whether the mapping is entered or not. + */ + +#define UNLOCK_AND_DEALLOCATE \ + MACRO_BEGIN \ + vm_fault_cleanup(m->object, top_page); \ + vm_object_deallocate(object); \ + MACRO_END + + /* + * What to do with the resulting page from vm_fault_page + * if it doesn't get entered into the physical map: + */ + +#define RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PAGE_WAKEUP_DONE(m); \ + vm_page_lock_queues(); \ + if (!m->active && !m->inactive) \ + vm_page_activate(m); \ + vm_page_unlock_queues(); \ + MACRO_END + + /* + * We must verify that the maps have not changed. + */ + vm_object_unlock(m->object); + + if ((map != original_map) || !vm_map_verify(map, &version)) { + vm_object_t retry_object; + vm_object_offset_t retry_offset; + vm_prot_t retry_prot; + + if (map != pmap_map) { + vm_map_deallocate(pmap_map); + } + + map = original_map; + vm_map_lock_read(map); + + result = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ, &version, + &retry_object, &retry_offset, &retry_prot, + &wired, &behavior, &lo_offset, + &hi_offset, &pmap_map); + + if (result != KERN_SUCCESS) { + vm_map_unlock_read(map); + vm_object_lock(m->object); + RELEASE_PAGE(m); + UNLOCK_AND_DEALLOCATE; + return (result); + } + + if (map != pmap_map) { + vm_map_reference(pmap_map); + } + + vm_object_unlock(retry_object); + + if (retry_object != object || retry_offset != offset) { + vm_object_lock(m->object); + RELEASE_PAGE(m); + vm_map_unlock_read(map); + if(pmap_map != map) { + vm_map_unlock_read(pmap_map); + vm_map_deallocate(pmap_map); + } + UNLOCK_AND_DEALLOCATE; + goto RetryFault; + } + } + + /* + * Put the page in the physical map. + */ + + PMAP_ENTER(pmap_map->pmap, vaddr, m, VM_PROT_READ, wired); + + if(pmap_map != map) { + vm_map_unlock_read(pmap_map); + vm_map_deallocate(pmap_map); + } + + vm_object_lock(m->object); + vm_page_lock_queues(); + if (!m->active && !m->inactive) + vm_page_activate(m); + m->reference = TRUE; + vm_page_unlock_queues(); + + vm_map_verify_done(map, &version); + PAGE_WAKEUP_DONE(m); + + UNLOCK_AND_DEALLOCATE; + +#undef UNLOCK_AND_DEALLOCATE +#undef RELEASE_PAGE + return (KERN_SUCCESS); +} + diff --git a/osfmk/i386/rtclock.c b/osfmk/i386/rtclock.c new file mode 100644 index 000000000..f7cd98e50 --- /dev/null +++ b/osfmk/i386/rtclock.c @@ -0,0 +1,1071 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * File: i386/rtclock.c + * Purpose: Routines for handling the machine dependent + * real-time clock. This clock is generated by + * the Intel 8254 Programmable Interval Timer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* HZ */ +#include +#include +#include /* for kernel_map */ +#include +#include +#include +#include +#include +#include + +int sysclk_config(void); + +int sysclk_init(void); + +kern_return_t sysclk_gettime( + mach_timespec_t *cur_time); + +kern_return_t sysclk_getattr( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); + +kern_return_t sysclk_setattr( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t count); + +void sysclk_setalarm( + mach_timespec_t *alarm_time); + +extern void (*IOKitRegisterInterruptHook)(void *, int irq, int isclock); + +/* + * Lists of clock routines. + */ +struct clock_ops sysclk_ops = { + sysclk_config, sysclk_init, + sysclk_gettime, 0, + sysclk_getattr, sysclk_setattr, + sysclk_setalarm, +}; + +int calend_config(void); + +int calend_init(void); + +kern_return_t calend_gettime( + mach_timespec_t *cur_time); + +kern_return_t calend_settime( + mach_timespec_t *cur_time); + +kern_return_t calend_getattr( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); + +struct clock_ops calend_ops = { + calend_config, calend_init, + calend_gettime, calend_settime, + calend_getattr, 0, + 0, +}; + +/* local data declarations */ +mach_timespec_t *RtcTime = (mach_timespec_t *)0; +mach_timespec_t *RtcAlrm; +clock_res_t RtcDelt; + +/* global data declarations */ +struct { + AbsoluteTime abstime; + + mach_timespec_t time; + mach_timespec_t alarm_time; /* time of next alarm */ + + mach_timespec_t calend_offset; + boolean_t calend_is_set; + + AbsoluteTime timer_deadline; + boolean_t timer_is_set; + clock_timer_func_t timer_expire; + + clock_res_t new_ires; /* pending new resolution (nano ) */ + clock_res_t intr_nsec; /* interrupt resolution (nano) */ + + decl_simple_lock_data(,lock) /* real-time clock device lock */ +} rtclock; + +unsigned int clknum; /* clks per second */ +unsigned int new_clknum; /* pending clknum */ +unsigned int time_per_clk; /* time per clk in ZHZ */ +unsigned int clks_per_int; /* clks per interrupt */ +unsigned int clks_per_int_99; +int rtc_intr_count; /* interrupt counter */ +int rtc_intr_hertz; /* interrupts per HZ */ +int rtc_intr_freq; /* interrupt frequency */ +int rtc_print_lost_tick; /* print lost tick */ + +/* + * Macros to lock/unlock real-time clock device. + */ +#define LOCK_RTC(s) \ +MACRO_BEGIN \ + (s) = splclock(); \ + simple_lock(&rtclock.lock); \ +MACRO_END + +#define UNLOCK_RTC(s) \ +MACRO_BEGIN \ + simple_unlock(&rtclock.lock); \ + splx(s); \ +MACRO_END + +/* + * i8254 control. ** MONUMENT ** + * + * The i8254 is a traditional PC device with some arbitrary characteristics. + * Basically, it is a register that counts at a fixed rate and can be + * programmed to generate an interrupt every N counts. The count rate is + * clknum counts per second (see pit.h), historically 1193167 we believe. + * Various constants are computed based on this value, and we calculate + * them at init time for execution efficiency. To obtain sufficient + * accuracy, some of the calculation are most easily done in floating + * point and then converted to int. + * + * We want an interrupt every 10 milliseconds, approximately. The count + * which will do that is clks_per_int. However, that many counts is not + * *exactly* 10 milliseconds; it is a bit more or less depending on + * roundoff. The actual time per tick is calculated and saved in + * rtclock.intr_nsec, and it is that value which is added to the time + * register on each tick. + * + * The i8254 counter can be read between interrupts in order to determine + * the time more accurately. The counter counts down from the preset value + * toward 0, and we have to handle the case where the counter has been + * reset just before being read and before the interrupt has been serviced. + * Given a count since the last interrupt, the time since then is given + * by (count * time_per_clk). In order to minimize integer truncation, + * we perform this calculation in an arbitrary unit of time which maintains + * the maximum precision, i.e. such that one tick is 1.0e9 of these units, + * or close to the precision of a 32-bit int. We then divide by this unit + * (which doesn't lose precision) to get nanoseconds. For notation + * purposes, this unit is defined as ZHZ = zanoseconds per nanosecond. + * + * This sequence to do all this is in sysclk_gettime. For efficiency, this + * sequence also needs the value that the counter will have if it has just + * overflowed, so we precompute that also. ALSO, certain platforms + * (specifically the DEC XL5100) have been observed to have problem + * with latching the counter, and they occasionally (say, one out of + * 100,000 times) return a bogus value. Hence, the present code reads + * the counter twice and checks for a consistent pair of values. + * + * Some attributes of the rt clock can be changed, including the + * interrupt resolution. We default to the minimum resolution (10 ms), + * but allow a finer resolution to be requested. The assumed frequency + * of the clock can also be set since it appears that the actual + * frequency of real-world hardware can vary from the nominal by + * 200 ppm or more. When the frequency is set, the values above are + * recomputed and we continue without resetting or changing anything else. + */ +#define RTC_MINRES (NSEC_PER_SEC / HZ) /* nsec per tick */ +#define RTC_MAXRES (RTC_MINRES / 20) /* nsec per tick */ +#define ZANO (1000000000) +#define ZHZ (ZANO / (NSEC_PER_SEC / HZ)) +#define READ_8254(val) { \ + outb(PITCTL_PORT, PIT_C0); \ + (val) = inb(PITCTR0_PORT); \ + (val) |= inb(PITCTR0_PORT) << 8 ; } + +/* + * Calibration delay counts. + */ +unsigned int delaycount = 10; +unsigned int microdata = 50; + +/* + * Forward decl. + */ + +extern int measure_delay(int us); +void rtc_setvals( unsigned int, clock_res_t ); + +/* + * Initialize non-zero clock structure values. + */ +void +rtc_setvals( + unsigned int new_clknum, + clock_res_t new_ires + ) +{ + unsigned int timeperclk; + unsigned int scale0; + unsigned int scale1; + unsigned int res; + + clknum = new_clknum; + rtc_intr_freq = (NSEC_PER_SEC / new_ires); + rtc_intr_hertz = rtc_intr_freq / HZ; + clks_per_int = (clknum + (rtc_intr_freq / 2)) / rtc_intr_freq; + clks_per_int_99 = clks_per_int - clks_per_int/100; + + /* + * The following calculations are done with scaling integer operations + * in order that the integer results are accurate to the lsb. + */ + timeperclk = div_scale(ZANO, clknum, &scale0); /* 838.105647 nsec */ + + time_per_clk = mul_scale(ZHZ, timeperclk, &scale1); /* 83810 */ + if (scale0 > scale1) + time_per_clk >>= (scale0 - scale1); + else if (scale0 < scale1) + panic("rtc_clock: time_per_clk overflow\n"); + + /* + * Notice that rtclock.intr_nsec is signed ==> use unsigned int res + */ + res = mul_scale(clks_per_int, timeperclk, &scale1); /* 10000276 */ + if (scale0 > scale1) + rtclock.intr_nsec = res >> (scale0 - scale1); + else + panic("rtc_clock: rtclock.intr_nsec overflow\n"); + + rtc_intr_count = 1; + RtcDelt = rtclock.intr_nsec/2; +} + +/* + * Configure the real-time clock device. Return success (1) + * or failure (0). + */ + +int +sysclk_config(void) +{ + int RtcFlag; + int pic; + +#if NCPUS > 1 + mp_disable_preemption(); + if (cpu_number() != master_cpu) { + mp_enable_preemption(); + return(1); + } + mp_enable_preemption(); +#endif + /* + * Setup device. + */ +#if MP_V1_1 + { + extern boolean_t mp_v1_1_initialized; + if (mp_v1_1_initialized) + pic = 2; + else + pic = 0; + } +#else + pic = 0; /* FIXME .. interrupt registration moved to AppleIntelClock */ +#endif + + + /* + * We should attempt to test the real-time clock + * device here. If it were to fail, we should panic + * the system. + */ + RtcFlag = /* test device */1; + printf("realtime clock configured\n"); + + simple_lock_init(&rtclock.lock, ETAP_NO_TRACE); + return (RtcFlag); +} + +/* + * Initialize the real-time clock device. Return success (1) + * or failure (0). Since the real-time clock is required to + * provide canonical mapped time, we allocate a page to keep + * the clock time value. In addition, various variables used + * to support the clock are initialized. Note: the clock is + * not started until rtclock_reset is called. + */ +int +sysclk_init(void) +{ + vm_offset_t *vp; +#if NCPUS > 1 + mp_disable_preemption(); + if (cpu_number() != master_cpu) { + mp_enable_preemption(); + return(1); + } + mp_enable_preemption(); +#endif + + RtcTime = &rtclock.time; + rtc_setvals( CLKNUM, RTC_MINRES ); /* compute constants */ + return (1); +} + +static volatile unsigned int last_ival = 0; + +/* + * Get the clock device time. This routine is responsible + * for converting the device's machine dependent time value + * into a canonical mach_timespec_t value. + */ +kern_return_t +sysclk_gettime( + mach_timespec_t *cur_time) /* OUT */ +{ + mach_timespec_t itime = {0, 0}; + unsigned int val, val2; + int s; + + if (!RtcTime) { + /* Uninitialized */ + cur_time->tv_nsec = 0; + cur_time->tv_sec = 0; + return (KERN_SUCCESS); + } + + /* + * Inhibit interrupts. Determine the incremental + * time since the last interrupt. (This could be + * done in assembler for a bit more speed). + */ + LOCK_RTC(s); + do { + READ_8254(val); /* read clock */ + READ_8254(val2); /* read clock */ + } while ( val2 > val || val2 < val - 10 ); + if ( val > clks_per_int_99 ) { + outb( 0x0a, 0x20 ); /* see if interrupt pending */ + if ( inb( 0x20 ) & 1 ) + itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ + } + itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; + if ( itime.tv_nsec < last_ival ) { + if (rtc_print_lost_tick) + printf( "rtclock: missed clock interrupt.\n" ); + } + last_ival = itime.tv_nsec; + cur_time->tv_sec = rtclock.time.tv_sec; + cur_time->tv_nsec = rtclock.time.tv_nsec; + UNLOCK_RTC(s); + ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); + return (KERN_SUCCESS); +} + +kern_return_t +sysclk_gettime_internal( + mach_timespec_t *cur_time) /* OUT */ +{ + mach_timespec_t itime = {0, 0}; + unsigned int val, val2; + + if (!RtcTime) { + /* Uninitialized */ + cur_time->tv_nsec = 0; + cur_time->tv_sec = 0; + return (KERN_SUCCESS); + } + + /* + * Inhibit interrupts. Determine the incremental + * time since the last interrupt. (This could be + * done in assembler for a bit more speed). + */ + do { + READ_8254(val); /* read clock */ + READ_8254(val2); /* read clock */ + } while ( val2 > val || val2 < val - 10 ); + if ( val > clks_per_int_99 ) { + outb( 0x0a, 0x20 ); /* see if interrupt pending */ + if ( inb( 0x20 ) & 1 ) + itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ + } + itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; + if ( itime.tv_nsec < last_ival ) { + if (rtc_print_lost_tick) + printf( "rtclock: missed clock interrupt.\n" ); + } + last_ival = itime.tv_nsec; + cur_time->tv_sec = rtclock.time.tv_sec; + cur_time->tv_nsec = rtclock.time.tv_nsec; + ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); + return (KERN_SUCCESS); +} + +/* + * Get the clock device time when ALL interrupts are already disabled. + * Same as above except for turning interrupts off and on. + * This routine is responsible for converting the device's machine dependent + * time value into a canonical mach_timespec_t value. + */ +void +sysclk_gettime_interrupts_disabled( + mach_timespec_t *cur_time) /* OUT */ +{ + mach_timespec_t itime = {0, 0}; + unsigned int val; + + if (!RtcTime) { + /* Uninitialized */ + cur_time->tv_nsec = 0; + cur_time->tv_sec = 0; + return; + } + + simple_lock(&rtclock.lock); + + /* + * Copy the current time knowing that we cant be interrupted + * between the two longwords and so dont need to use MTS_TO_TS + */ + READ_8254(val); /* read clock */ + if ( val > clks_per_int_99 ) { + outb( 0x0a, 0x20 ); /* see if interrupt pending */ + if ( inb( 0x20 ) & 1 ) + itime.tv_nsec = rtclock.intr_nsec; /* yes, add a tick */ + } + itime.tv_nsec += ((clks_per_int - val) * time_per_clk) / ZHZ; + if ( itime.tv_nsec < last_ival ) { + if (rtc_print_lost_tick) + printf( "rtclock: missed clock interrupt.\n" ); + } + last_ival = itime.tv_nsec; + cur_time->tv_sec = rtclock.time.tv_sec; + cur_time->tv_nsec = rtclock.time.tv_nsec; + ADD_MACH_TIMESPEC(cur_time, ((mach_timespec_t *)&itime)); + + simple_unlock(&rtclock.lock); +} + +static +natural_t +get_uptime_ticks(void) +{ + natural_t result = 0; + unsigned int val, val2; + + if (!RtcTime) + return (result); + + /* + * Inhibit interrupts. Determine the incremental + * time since the last interrupt. (This could be + * done in assembler for a bit more speed). + */ + do { + READ_8254(val); /* read clock */ + READ_8254(val2); /* read clock */ + } while (val2 > val || val2 < val - 10); + if (val > clks_per_int_99) { + outb(0x0a, 0x20); /* see if interrupt pending */ + if (inb(0x20) & 1) + result = rtclock.intr_nsec; /* yes, add a tick */ + } + result += ((clks_per_int - val) * time_per_clk) / ZHZ; + if (result < last_ival) { + if (rtc_print_lost_tick) + printf( "rtclock: missed clock interrupt.\n" ); + } + + return (result); +} + +/* + * Get clock device attributes. + */ +kern_return_t +sysclk_getattr( + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + spl_t s; + + if (*count != 1) + return (KERN_FAILURE); + switch (flavor) { + + case CLOCK_GET_TIME_RES: /* >0 res */ +#if (NCPUS == 1 || (MP_V1_1 && 0)) + LOCK_RTC(s); + *(clock_res_t *) attr = 1000; + UNLOCK_RTC(s); + break; +#endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ + case CLOCK_ALARM_CURRES: /* =0 no alarm */ + LOCK_RTC(s); + *(clock_res_t *) attr = rtclock.intr_nsec; + UNLOCK_RTC(s); + break; + + case CLOCK_ALARM_MAXRES: + *(clock_res_t *) attr = RTC_MAXRES; + break; + + case CLOCK_ALARM_MINRES: + *(clock_res_t *) attr = RTC_MINRES; + break; + + default: + return (KERN_INVALID_VALUE); + } + return (KERN_SUCCESS); +} + +/* + * Set clock device attributes. + */ +kern_return_t +sysclk_setattr( + clock_flavor_t flavor, + clock_attr_t attr, /* IN */ + mach_msg_type_number_t count) /* IN */ +{ + spl_t s; + int freq; + int adj; + clock_res_t new_ires; + + if (count != 1) + return (KERN_FAILURE); + switch (flavor) { + + case CLOCK_GET_TIME_RES: + case CLOCK_ALARM_MAXRES: + case CLOCK_ALARM_MINRES: + return (KERN_FAILURE); + + case CLOCK_ALARM_CURRES: + new_ires = *(clock_res_t *) attr; + + /* + * The new resolution must be within the predetermined + * range. If the desired resolution cannot be achieved + * to within 0.1%, an error is returned. + */ + if (new_ires < RTC_MAXRES || new_ires > RTC_MINRES) + return (KERN_INVALID_VALUE); + freq = (NSEC_PER_SEC / new_ires); + adj = (((clknum % freq) * new_ires) / clknum); + if (adj > (new_ires / 1000)) + return (KERN_INVALID_VALUE); + /* + * Record the new alarm resolution which will take effect + * on the next HZ aligned clock tick. + */ + LOCK_RTC(s); + if ( freq != rtc_intr_freq ) { + rtclock.new_ires = new_ires; + new_clknum = clknum; + } + UNLOCK_RTC(s); + return (KERN_SUCCESS); + + default: + return (KERN_INVALID_VALUE); + } +} + +/* + * Set next alarm time for the clock device. This call + * always resets the time to deliver an alarm for the + * clock. + */ +void +sysclk_setalarm( + mach_timespec_t *alarm_time) +{ + spl_t s; + + LOCK_RTC(s); + rtclock.alarm_time = *alarm_time; + RtcAlrm = &rtclock.alarm_time; + UNLOCK_RTC(s); +} + +/* + * Configure the calendar clock. + */ +int +calend_config(void) +{ + return bbc_config(); +} + +/* + * Initialize calendar clock. + */ +int +calend_init(void) +{ + return (1); +} + +/* + * Get the current clock time. + */ +kern_return_t +calend_gettime( + mach_timespec_t *cur_time) /* OUT */ +{ + spl_t s; + + LOCK_RTC(s); + if (!rtclock.calend_is_set) { + UNLOCK_RTC(s); + return (KERN_FAILURE); + } + + (void) sysclk_gettime_internal(cur_time); + ADD_MACH_TIMESPEC(cur_time, &rtclock.calend_offset); + UNLOCK_RTC(s); + + return (KERN_SUCCESS); +} + +/* + * Set the current clock time. + */ +kern_return_t +calend_settime( + mach_timespec_t *new_time) +{ + mach_timespec_t curr_time; + spl_t s; + + LOCK_RTC(s); + (void) sysclk_gettime_internal(&curr_time); + rtclock.calend_offset = *new_time; + SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); + rtclock.calend_is_set = TRUE; + UNLOCK_RTC(s); + + (void) bbc_settime(new_time); + + return (KERN_SUCCESS); +} + +/* + * Get clock device attributes. + */ +kern_return_t +calend_getattr( + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + spl_t s; + + if (*count != 1) + return (KERN_FAILURE); + switch (flavor) { + + case CLOCK_GET_TIME_RES: /* >0 res */ +#if (NCPUS == 1 || (MP_V1_1 && 0)) + LOCK_RTC(s); + *(clock_res_t *) attr = 1000; + UNLOCK_RTC(s); + break; +#else /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ + LOCK_RTC(s); + *(clock_res_t *) attr = rtclock.intr_nsec; + UNLOCK_RTC(s); + break; +#endif /* (NCPUS == 1 || (MP_V1_1 && 0)) && AT386 */ + + case CLOCK_ALARM_CURRES: /* =0 no alarm */ + case CLOCK_ALARM_MINRES: + case CLOCK_ALARM_MAXRES: + *(clock_res_t *) attr = 0; + break; + + default: + return (KERN_INVALID_VALUE); + } + return (KERN_SUCCESS); +} + +void +clock_adjust_calendar( + clock_res_t nsec) +{ + spl_t s; + + LOCK_RTC(s); + if (rtclock.calend_is_set) + ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec); + UNLOCK_RTC(s); +} + +void +clock_initialize_calendar(void) +{ + mach_timespec_t bbc_time, curr_time; + spl_t s; + + if (bbc_gettime(&bbc_time) != KERN_SUCCESS) + return; + + LOCK_RTC(s); + if (!rtclock.calend_is_set) { + (void) sysclk_gettime_internal(&curr_time); + rtclock.calend_offset = bbc_time; + SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); + rtclock.calend_is_set = TRUE; + } + UNLOCK_RTC(s); +} + +mach_timespec_t +clock_get_calendar_offset(void) +{ + mach_timespec_t result = MACH_TIMESPEC_ZERO; + spl_t s; + + LOCK_RTC(s); + if (rtclock.calend_is_set) + result = rtclock.calend_offset; + UNLOCK_RTC(s); + + return (result); +} + +void +clock_timebase_info( + mach_timebase_info_t info) +{ + spl_t s; + + LOCK_RTC(s); + info->numer = info->denom = 1; + UNLOCK_RTC(s); +} + +void +clock_set_timer_deadline( + AbsoluteTime deadline) +{ + spl_t s; + + LOCK_RTC(s); + rtclock.timer_deadline = deadline; + rtclock.timer_is_set = TRUE; + UNLOCK_RTC(s); +} + +void +clock_set_timer_func( + clock_timer_func_t func) +{ + spl_t s; + + LOCK_RTC(s); + if (rtclock.timer_expire == NULL) + rtclock.timer_expire = func; + UNLOCK_RTC(s); +} + + + +/* + * Load the count register and start the clock. + */ +#define RTCLOCK_RESET() { \ + outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); \ + outb(PITCTR0_PORT, (clks_per_int & 0xff)); \ + outb(PITCTR0_PORT, (clks_per_int >> 8)); \ +} + +/* + * Reset the clock device. This causes the realtime clock + * device to reload its mode and count value (frequency). + * Note: the CPU should be calibrated + * before starting the clock for the first time. + */ + +void +rtclock_reset(void) +{ + int s; + +#if NCPUS > 1 && !(MP_V1_1 && 0) + mp_disable_preemption(); + if (cpu_number() != master_cpu) { + mp_enable_preemption(); + return; + } + mp_enable_preemption(); +#endif /* NCPUS > 1 && AT386 && !MP_V1_1 */ + LOCK_RTC(s); + RTCLOCK_RESET(); + UNLOCK_RTC(s); +} + +/* + * Real-time clock device interrupt. Called only on the + * master processor. Updates the clock time and upcalls + * into the higher level clock code to deliver alarms. + */ +int +rtclock_intr(void) +{ + AbsoluteTime abstime; + mach_timespec_t clock_time; + int i; + spl_t s; + + /* + * Update clock time. Do the update so that the macro + * MTS_TO_TS() for reading the mapped time works (e.g. + * update in order: mtv_csec, mtv_time.tv_nsec, mtv_time.tv_sec). + */ + LOCK_RTC(s); + i = rtclock.time.tv_nsec + rtclock.intr_nsec; + if (i < NSEC_PER_SEC) + rtclock.time.tv_nsec = i; + else { + rtclock.time.tv_nsec = i - NSEC_PER_SEC; + rtclock.time.tv_sec++; + } + /* note time now up to date */ + last_ival = 0; + + ADD_ABSOLUTETIME_TICKS(&rtclock.abstime, NSEC_PER_SEC/HZ); + abstime = rtclock.abstime; + if (rtclock.timer_is_set && + CMP_ABSOLUTETIME(&rtclock.timer_deadline, &abstime) <= 0) { + rtclock.timer_is_set = FALSE; + UNLOCK_RTC(s); + + (*rtclock.timer_expire)(abstime); + + LOCK_RTC(s); + } + + /* + * Perform alarm clock processing if needed. The time + * passed up is incremented by a half-interrupt tick + * to trigger alarms closest to their desired times. + * The clock_alarm_intr() routine calls sysclk_setalrm() + * before returning if later alarms are pending. + */ + + if (RtcAlrm && (RtcAlrm->tv_sec < RtcTime->tv_sec || + (RtcAlrm->tv_sec == RtcTime->tv_sec && + RtcDelt >= RtcAlrm->tv_nsec - RtcTime->tv_nsec))) { + clock_time.tv_sec = 0; + clock_time.tv_nsec = RtcDelt; + ADD_MACH_TIMESPEC (&clock_time, RtcTime); + RtcAlrm = 0; + UNLOCK_RTC(s); + /* + * Call clock_alarm_intr() without RTC-lock. + * The lock ordering is always CLOCK-lock + * before RTC-lock. + */ + clock_alarm_intr(SYSTEM_CLOCK, &clock_time); + LOCK_RTC(s); + } + + /* + * On a HZ-tick boundary: return 0 and adjust the clock + * alarm resolution (if requested). Otherwise return a + * non-zero value. + */ + if ((i = --rtc_intr_count) == 0) { + if (rtclock.new_ires) { + rtc_setvals(new_clknum, rtclock.new_ires); + RTCLOCK_RESET(); /* lock clock register */ + rtclock.new_ires = 0; + } + rtc_intr_count = rtc_intr_hertz; + } + UNLOCK_RTC(s); + return (i); +} + +void +clock_get_uptime( + AbsoluteTime *result) +{ + natural_t ticks; + spl_t s; + + LOCK_RTC(s); + ticks = get_uptime_ticks(); + *result = rtclock.abstime; + UNLOCK_RTC(s); + + ADD_ABSOLUTETIME_TICKS(result, ticks); +} + +void +clock_interval_to_deadline( + natural_t interval, + natural_t scale_factor, + AbsoluteTime *result) +{ + AbsoluteTime abstime; + + clock_get_uptime(result); + + clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); + + ADD_ABSOLUTETIME(result, &abstime); +} + +void +clock_interval_to_absolutetime_interval( + natural_t interval, + natural_t scale_factor, + AbsoluteTime *result) +{ + AbsoluteTime_to_scalar(result) = (uint64_t)interval * scale_factor; +} + +void +clock_absolutetime_interval_to_deadline( + AbsoluteTime abstime, + AbsoluteTime *result) +{ + clock_get_uptime(result); + + ADD_ABSOLUTETIME(result, &abstime); +} + +void +absolutetime_to_nanoseconds( + AbsoluteTime abstime, + UInt64 *result) +{ + *result = AbsoluteTime_to_scalar(&abstime); +} + +void +nanoseconds_to_absolutetime( + UInt64 nanoseconds, + AbsoluteTime *result) +{ + AbsoluteTime_to_scalar(result) = nanoseconds; +} + +/* + * measure_delay(microseconds) + * + * Measure elapsed time for delay calls + * Returns microseconds. + * + * Microseconds must not be too large since the counter (short) + * will roll over. Max is about 13 ms. Values smaller than 1 ms are ok. + * This uses the assumed frequency of the rt clock which is emperically + * accurate to only about 200 ppm. + */ + +int +measure_delay( + int us) +{ + unsigned int lsb, val; + + outb(PITCTL_PORT, PIT_C0|PIT_NDIVMODE|PIT_READMODE); + outb(PITCTR0_PORT, 0xff); /* set counter to max value */ + outb(PITCTR0_PORT, 0xff); + delay(us); + outb(PITCTL_PORT, PIT_C0); + lsb = inb(PITCTR0_PORT); + val = (inb(PITCTR0_PORT) << 8) | lsb; + val = 0xffff - val; + val *= 1000000; + val /= CLKNUM; + return(val); +} + +/* + * calibrate_delay(void) + * + * Adjust delaycount. Called from startup before clock is started + * for normal interrupt generation. + */ + +void +calibrate_delay(void) +{ + unsigned val; + int prev = 0; + register int i; + + printf("adjusting delay count: %d", delaycount); + for (i=0; i<10; i++) { + prev = delaycount; + /* + * microdata must not be to large since measure_timer + * will not return accurate values if the counter (short) + * rolls over + */ + val = measure_delay(microdata); + delaycount *= microdata; + delaycount += val-1; /* round up to upper us */ + delaycount /= val; + if (delaycount <= 0) + delaycount = 1; + if (delaycount != prev) + printf(" %d", delaycount); + } + printf("\n"); +} + +#if MACH_KDB +void +test_delay(void); + +void +test_delay(void) +{ + register i; + + for (i = 0; i < 10; i++) + printf("%d, %d\n", i, measure_delay(i)); + for (i = 10; i <= 100; i+=10) + printf("%d, %d\n", i, measure_delay(i)); +} +#endif /* MACH_KDB */ diff --git a/osfmk/i386/rtclock_entries.h b/osfmk/i386/rtclock_entries.h new file mode 100644 index 000000000..151882afe --- /dev/null +++ b/osfmk/i386/rtclock_entries.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +extern int rtc_config(void); +extern int rtc_init(void); +extern kern_return_t rtc_gettime( + mach_timespec_t * curtime); +extern void rtc_gettime_interrupts_disabled( + mach_timespec_t * curtime); +extern kern_return_t rtc_settime( + mach_timespec_t * curtime); +extern kern_return_t rtc_getattr( + clock_flavor_t flavor, + clock_attr_t ttr, + mach_msg_type_number_t * count); +extern kern_return_t rtc_setattr( + clock_flavor_t flavor, + clock_attr_t ttr, + mach_msg_type_number_t count); +extern void rtc_setalrm( + mach_timespec_t * alarmtime); +extern void rtclock_reset(void); +extern int rtclock_intr(void); +extern void calibrate_delay(void); diff --git a/osfmk/i386/sched_param.h b/osfmk/i386/sched_param.h new file mode 100644 index 000000000..7aecc1b1f --- /dev/null +++ b/osfmk/i386/sched_param.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Scheduler parameters. + */ + +#ifndef _I386_SCHED_PARAM_H_ +#define _I386_SCHED_PARAM_H_ + +#endif _I386_SCHED_PARAM_H_ diff --git a/osfmk/i386/seg.h b/osfmk/i386/seg.h new file mode 100644 index 000000000..21c63306a --- /dev/null +++ b/osfmk/i386/seg.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _I386_SEG_H_ +#define _I386_SEG_H_ +#include + +/* + * i386 segmentation. + */ + +#ifndef __ASSEMBLER__ +/* + * Real segment descriptor. + */ +struct real_descriptor { + unsigned int limit_low:16, /* limit 0..15 */ + base_low:16, /* base 0..15 */ + base_med:8, /* base 16..23 */ + access:8, /* access byte */ + limit_high:4, /* limit 16..19 */ + granularity:4, /* granularity */ + base_high:8; /* base 24..31 */ +}; + +struct real_gate { + unsigned int offset_low:16, /* offset 0..15 */ + selector:16, + word_count:8, + access:8, + offset_high:16; /* offset 16..31 */ +}; + +/* + * We build descriptors and gates in a 'fake' format to let the + * fields be contiguous. We shuffle them into the real format + * at runtime. + */ +struct fake_descriptor { + unsigned int offset:32; /* offset */ + unsigned int lim_or_seg:20; /* limit */ + /* or segment, for gate */ + unsigned int size_or_wdct:4; /* size/granularity */ + /* word count, for gate */ + unsigned int access:8; /* access */ +}; +#endif /*__ASSEMBLER__*/ + +#define SZ_32 0x4 /* 32-bit segment */ +#define SZ_G 0x8 /* 4K limit field */ + +#define ACC_A 0x01 /* accessed */ +#define ACC_TYPE 0x1e /* type field: */ + +#define ACC_TYPE_SYSTEM 0x00 /* system descriptors: */ + +#define ACC_LDT 0x02 /* LDT */ +#define ACC_CALL_GATE_16 0x04 /* 16-bit call gate */ +#define ACC_TASK_GATE 0x05 /* task gate */ +#define ACC_TSS 0x09 /* task segment */ +#define ACC_CALL_GATE 0x0c /* call gate */ +#define ACC_INTR_GATE 0x0e /* interrupt gate */ +#define ACC_TRAP_GATE 0x0f /* trap gate */ + +#define ACC_TSS_BUSY 0x02 /* task busy */ + +#define ACC_TYPE_USER 0x10 /* user descriptors */ + +#define ACC_DATA 0x10 /* data */ +#define ACC_DATA_W 0x12 /* data, writable */ +#define ACC_DATA_E 0x14 /* data, expand-down */ +#define ACC_DATA_EW 0x16 /* data, expand-down, + writable */ +#define ACC_CODE 0x18 /* code */ +#define ACC_CODE_R 0x1a /* code, readable */ +#define ACC_CODE_C 0x1c /* code, conforming */ +#define ACC_CODE_CR 0x1e /* code, conforming, + readable */ +#define ACC_PL 0x60 /* access rights: */ +#define ACC_PL_K 0x00 /* kernel access only */ +#define ACC_PL_U 0x60 /* user access */ +#define ACC_P 0x80 /* segment present */ + +/* + * Components of a selector + */ +#define SEL_LDTS 0x04 /* local selector */ +#define SEL_PL 0x03 /* privilege level: */ +#define SEL_PL_K 0x00 /* kernel selector */ +#define SEL_PL_U 0x03 /* user selector */ + +/* + * Convert selector to descriptor table index. + */ +#define sel_idx(sel) ((sel)>>3) + +/* + * User descriptors for MACH - 32-bit flat address space + */ +#define USER_SCALL 0x07 /* system call gate */ +#define USER_RPC 0x0f /* mach rpc call gate */ +#define USER_CS 0x17 /* user code segment */ +#define USER_DS 0x1f /* user data segment */ + +#define LDTSZ 4 + +/* + * Kernel descriptors for MACH - 32-bit flat address space. + */ +#define KERNEL_CS 0x08 /* kernel code */ +#define KERNEL_DS 0x10 /* kernel data */ +#define KERNEL_LDT 0x18 /* master LDT */ +#define KERNEL_TSS 0x20 /* master TSS (uniprocessor) */ +#ifdef MACH_BSD +#define BSD_SCALL_SEL 0x28 /* BSD System calls */ +#define MK25_SCALL_SEL 0x30 /* MK25 System Calls */ +#define MACHDEP_SCALL_SEL 0x38 /* Machdep SYstem calls */ +#else +#define USER_LDT 0x28 /* place for per-thread LDT */ +#define USER_TSS 0x30 /* place for per-thread TSS + that holds IO bitmap */ +#define FPE_CS 0x38 /* floating-point emulator code */ +#endif +#define USER_FPREGS 0x40 /* user-mode access to saved + floating-point registers */ +#define CPU_DATA 0x48 /* per-cpu data */ + +#ifdef MACH_BSD +#define USER_LDT 0x58 +#define USER_TSS 0x60 +#define FPE_CS 0x68 +#endif + +#if MACH_KDB +#define DEBUG_TSS 0x50 /* debug TSS (uniprocessor) */ + +#ifdef MACH_BSD +#define GDTSZ 14 +#else +#define GDTSZ 11 +#endif +#else + +#ifdef MACH_BSD +#define GDTSZ 13 +#else +#define GDTSZ 10 +#endif +#endif + +/* + * Interrupt table is always 256 entries long. + */ +#define IDTSZ 256 + +#endif /* _I386_SEG_H_ */ diff --git a/osfmk/i386/setjmp.h b/osfmk/i386/setjmp.h new file mode 100644 index 000000000..0f98cd323 --- /dev/null +++ b/osfmk/i386/setjmp.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Setjmp/longjmp buffer for i386. + */ +#ifndef _I386_SETJMP_H_ +#define _I386_SETJMP_H_ + +typedef struct jmp_buf { + int jmp_buf[6]; /* ebx, esi, edi, ebp, esp, eip */ +} jmp_buf_t; + +#endif /* _I386_SETJMP_H_ */ diff --git a/osfmk/i386/setjmp.s b/osfmk/i386/setjmp.s new file mode 100644 index 000000000..5c8af20b3 --- /dev/null +++ b/osfmk/i386/setjmp.s @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ diff --git a/osfmk/i386/stab.h b/osfmk/i386/stab.h new file mode 100644 index 000000000..897f79e2c --- /dev/null +++ b/osfmk/i386/stab.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Copyright (c) 1991 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)stab.h 5.2 (Berkeley) 4/4/91 + */ +/* + */ + + +/* + * The following are symbols used by various debuggers and by the Pascal + * compiler. Each of them must have one (or more) of the bits defined by + * the N_STAB mask set. + */ + +#define N_GSYM 0x20 /* global symbol */ +#define N_FNAME 0x22 /* F77 function name */ +#define N_FUN 0x24 /* procedure name */ +#define N_STSYM 0x26 /* data segment variable */ +#define N_LCSYM 0x28 /* bss segment variable */ +#define N_MAIN 0x2a /* main function name */ +#define N_PC 0x30 /* global Pascal symbol */ +#define N_RSYM 0x40 /* register variable */ +#define N_SLINE 0x44 /* text segment line number */ +#define N_DSLINE 0x46 /* data segment line number */ +#define N_BSLINE 0x48 /* bss segment line number */ +#define N_SSYM 0x60 /* structure/union element */ +#define N_SO 0x64 /* main source file name */ +#define N_LSYM 0x80 /* stack variable */ +#define N_BINCL 0x82 /* include file beginning */ +#define N_SOL 0x84 /* included source file name */ +#define N_PSYM 0xa0 /* parameter variable */ +#define N_EINCL 0xa2 /* include file end */ +#define N_ENTRY 0xa4 /* alternate entry point */ +#define N_LBRAC 0xc0 /* left bracket */ +#define N_EXCL 0xc2 /* deleted include file */ +#define N_RBRAC 0xe0 /* right bracket */ +#define N_BCOMM 0xe2 /* begin common */ +#define N_ECOMM 0xe4 /* end common */ +#define N_ECOML 0xe8 /* end common (local name) */ +#define N_LENG 0xfe /* length of preceding entry */ diff --git a/osfmk/i386/start.s b/osfmk/i386/start.s new file mode 100644 index 000000000..ac9bc3394 --- /dev/null +++ b/osfmk/i386/start.s @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include + +#include +#include +#include + +#if NCPUS > 1 + +#define CX(addr,reg) addr(,reg,4) + +#else + +#define CPU_NUMBER(reg) +#define CX(addr,reg) addr + +#endif /* NCPUS > 1 */ + +#include + +/* + * GAS won't handle an intersegment jump with a relocatable offset. + */ +#define LJMP(segment,address) \ + .byte 0xea ;\ + .long address ;\ + .word segment + + + +#define KVTOPHYS (-KERNELBASE) +#define KVTOLINEAR LINEAR_KERNELBASE + + +#define PA(addr) (addr)+KVTOPHYS +#define VA(addr) (addr)-KVTOPHYS + + .data + .align 2 + .globl EXT(_kick_buffer_) +EXT(_kick_buffer_): + .long 1 + .long 3 + .set .,.+16836 +/* + * Interrupt and bootup stack for initial processor. + */ + .align ALIGN + .globl EXT(intstack) +EXT(intstack): + .set ., .+INTSTACK_SIZE + .globl EXT(eintstack) +EXT(eintstack:) + +#if NCPUS == 1 + .globl EXT(int_stack_high) /* all interrupt stacks */ +EXT(int_stack_high): /* must lie below this */ + .long EXT(eintstack) /* address */ + + .globl EXT(int_stack_top) /* top of interrupt stack */ +EXT(int_stack_top): + .long EXT(eintstack) +#endif + +#if MACH_KDB +/* + * Kernel debugger stack for each processor. + */ + .align ALIGN + .globl EXT(db_stack_store) +EXT(db_stack_store): + .set ., .+(INTSTACK_SIZE*NCPUS) + +/* + * Stack for last-ditch debugger task for each processor. + */ + .align ALIGN + .globl EXT(db_task_stack_store) +EXT(db_task_stack_store): + .set ., .+(INTSTACK_SIZE*NCPUS) +#endif /* MACH_KDB */ + +/* + * per-processor kernel debugger stacks + */ + .align ALIGN + .globl EXT(kgdb_stack_store) +EXT(kgdb_stack_store): + .set ., .+(INTSTACK_SIZE*NCPUS) + + +/* + * Pointers to GDT and IDT. These contain linear addresses. + */ + .align ALIGN + .globl EXT(gdtptr) +LEXT(gdtptr) + .word Times(8,GDTSZ)-1 + .long EXT(gdt)+KVTOLINEAR + + .align ALIGN + .globl EXT(idtptr) +LEXT(idtptr) + .word Times(8,IDTSZ)-1 + .long EXT(idt)+KVTOLINEAR + +#if NCPUS > 1 + .data + /* + * start_lock is very special. We initialize the + * lock at allocation time rather than at run-time. + * Although start_lock should be an instance of a + * hw_lock, we hand-code all manipulation of the lock + * because the hw_lock code may require function calls; + * and we'd rather not introduce another dependency on + * a working stack at this point. + */ + .globl EXT(start_lock) +EXT(start_lock): + .long 0 /* synchronizes processor startup */ + + .globl EXT(master_is_up) +EXT(master_is_up): + .long 0 /* 1 when OK for other processors */ + /* to start */ + .globl EXT(mp_boot_pde) +EXT(mp_boot_pde): + .long 0 +#endif /* NCPUS > 1 */ + +/* + * All CPUs start here. + * + * Environment: + * protected mode, no paging, flat 32-bit address space. + * (Code/data/stack segments have base == 0, limit == 4G) + */ + .text + .align ALIGN + .globl EXT(pstart) + .globl EXT(_start) +LEXT(_start) +LEXT(pstart) + mov $0,%ax /* fs must be zeroed; */ + mov %ax,%fs /* some bootstrappers don`t do this */ + mov %ax,%gs + +#if NCPUS > 1 + jmp 1f +0: cmpl $0,PA(EXT(start_lock)) + jne 0b +1: movb $1,%eax + xchgl %eax,PA(EXT(start_lock)) /* locked */ + testl %eax,%eax + jnz 0b + + cmpl $0,PA(EXT(master_is_up)) /* are we first? */ + jne EXT(slave_start) /* no -- system already up. */ + movl $1,PA(EXT(master_is_up)) /* others become slaves */ +#endif /* NCPUS > 1 */ + +/* + * Get startup parameters. + */ + +#include + +/* + * Build initial page table directory and page tables. + * %ebx holds first available physical address. + */ + + addl $(NBPG-1),%ebx /* round first avail physical addr */ + andl $(-NBPG),%ebx /* to machine page size */ + leal -KVTOPHYS(%ebx),%eax /* convert to virtual address */ + movl %eax,PA(EXT(kpde)) /* save as kernel page table directory */ + movl %ebx,%cr3 /* set physical address in CR3 now */ + + movl %ebx,%edi /* clear page table directory */ + movl $(PTES_PER_PAGE),%ecx /* one page of ptes */ + xorl %eax,%eax + cld + rep + stosl /* edi now points to next page */ + +/* + * Use next few pages for page tables. + */ + addl $(KERNELBASEPDE),%ebx /* point to pde for kernel base */ + movl %edi,%esi /* point to end of current pte page */ + +/* + * Enter 1-1 mappings for kernel and for kernel page tables. + */ + movl $(INTEL_PTE_KERNEL),%eax /* set up pte prototype */ +0: + cmpl %esi,%edi /* at end of pte page? */ + jb 1f /* if so: */ + movl %edi,%edx /* get pte address (physical) */ + andl $(-NBPG),%edx /* mask out offset in page */ + orl $(INTEL_PTE_KERNEL),%edx /* add pte bits */ + movl %edx,(%ebx) /* set pde */ + addl $4,%ebx /* point to next pde */ + movl %edi,%esi /* point to */ + addl $(NBPG),%esi /* end of new pte page */ +1: + movl %eax,(%edi) /* set pte */ + addl $4,%edi /* advance to next pte */ + addl $(NBPG),%eax /* advance to next phys page */ + cmpl %edi,%eax /* have we mapped this pte page yet? */ + jb 0b /* loop if not */ + +/* + * Zero rest of last pte page. + */ + xor %eax,%eax /* don`t map yet */ +2: cmpl %esi,%edi /* at end of pte page? */ + jae 3f + movl %eax,(%edi) /* zero mapping */ + addl $4,%edi + jmp 2b +3: + +#if NCPUS > 1 +/* + * Grab (waste?) another page for a bootstrap page directory + * for the other CPUs. We don't want the running CPUs to see + * addresses 0..3fffff mapped 1-1. + */ + movl %edi,PA(EXT(mp_boot_pde)) /* save its physical address */ + movl $(PTES_PER_PAGE),%ecx /* and clear it */ + rep + stosl +#endif /* NCPUS > 1 */ + movl %edi,PA(EXT(first_avail)) /* save first available phys addr */ + +/* + * pmap_bootstrap will enter rest of mappings. + */ + +/* + * Fix initial descriptor tables. + */ + lea PA(EXT(idt)),%esi /* fix IDT */ + movl $(IDTSZ),%ecx + movl $(PA(fix_idt_ret)),%ebx + jmp fix_desc_common /* (cannot use stack) */ +fix_idt_ret: + + lea PA(EXT(gdt)),%esi /* fix GDT */ + movl $(GDTSZ),%ecx + movl $(PA(fix_gdt_ret)),%ebx + jmp fix_desc_common /* (cannot use stack) */ +fix_gdt_ret: + + lea PA(EXT(ldt)),%esi /* fix LDT */ + movl $(LDTSZ),%ecx + movl $(PA(fix_ldt_ret)),%ebx + jmp fix_desc_common /* (cannot use stack) */ +fix_ldt_ret: + +/* + * Turn on paging. + */ + movl %cr3,%eax /* retrieve kernel PDE phys address */ + movl KERNELBASEPDE(%eax),%ecx + movl %ecx,(%eax) /* set it also as pte for location */ + /* 0..3fffff, so that the code */ + /* that enters paged mode is mapped */ + /* to identical addresses after */ + /* paged mode is enabled */ + + addl $4,%eax /* 400000..7fffff */ + movl KERNELBASEPDE(%eax),%ecx + movl %ecx,(%eax) + + movl $EXT(pag_start),%ebx /* first paged code address */ + + movl %cr0,%eax + orl $(CR0_PG),%eax /* set PG bit in CR0 */ + orl $(CR0_WP),%eax + movl %eax,%cr0 /* to enable paging */ + + jmp *%ebx /* flush prefetch queue */ + +/* + * We are now paging, and can run with correct addresses. + */ +LEXT(pag_start) + lgdt EXT(gdtptr) /* load GDT */ + lidt EXT(idtptr) /* load IDT */ + LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */ + +/* + * Master is now running with correct addresses. + */ +LEXT(vstart) + mov $(KERNEL_DS),%ax /* set kernel data segment */ + mov %ax,%ds + mov %ax,%es + mov %ax,%ss + mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */ + /* for traps to kernel */ +#if MACH_KDB + mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */ + mov %cr3,%eax /* get PDBR into debug TSS */ + mov %eax,EXT(dbtss)+TSS_PDBR + mov $0,%eax +#endif + + movw $(KERNEL_LDT),%ax /* get LDT segment */ + lldt %ax /* load LDT */ +#if MACH_KDB + mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */ + mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */ +#endif + movw $(KERNEL_TSS),%ax + ltr %ax /* set up KTSS */ + + mov $CPU_DATA,%ax + mov %ax,%gs + + lea EXT(eintstack),%esp /* switch to the bootup stack */ + call EXT(machine_startup) /* run C code */ + /*NOTREACHED*/ + hlt + +#if NCPUS > 1 +/* + * master_up is used by the master cpu to signify that it is done + * with the interrupt stack, etc. See the code in pstart and svstart + * that this interlocks with. + */ + .align ALIGN + .globl EXT(master_up) +LEXT(master_up) + pushl %ebp /* set up */ + movl %esp,%ebp /* stack frame */ + movl $0,%ecx /* unlock start_lock */ + xchgl %ecx,EXT(start_lock) /* since we are no longer using */ + /* bootstrap stack */ + leave /* pop stack frame */ + ret + +/* + * We aren't the first. Call slave_main to initialize the processor + * and get Mach going on it. + */ + .align ALIGN + .globl EXT(slave_start) +LEXT(slave_start) + cli /* disable interrupts, so we don`t */ + /* need IDT for a while */ + movl EXT(kpde)+KVTOPHYS,%ebx /* get PDE virtual address */ + addl $(KVTOPHYS),%ebx /* convert to physical address */ + + movl PA(EXT(mp_boot_pde)),%edx /* point to the bootstrap PDE */ + movl KERNELBASEPDE(%ebx),%eax + /* point to pte for KERNELBASE */ + movl %eax,KERNELBASEPDE(%edx) + /* set in bootstrap PDE */ + movl %eax,(%edx) /* set it also as pte for location */ + /* 0..3fffff, so that the code */ + /* that enters paged mode is mapped */ + /* to identical addresses after */ + /* paged mode is enabled */ + movl %edx,%cr3 /* use bootstrap PDE to enable paging */ + + movl $EXT(spag_start),%edx /* first paged code address */ + + movl %cr0,%eax + orl $(CR0_PG),%eax /* set PG bit in CR0 */ + orl $(CR0_WP),%eax + movl %eax,%cr0 /* to enable paging */ + + jmp *%edx /* flush prefetch queue. */ + +/* + * We are now paging, and can run with correct addresses. + */ +LEXT(spag_start) + + lgdt EXT(gdtptr) /* load GDT */ + lidt EXT(idtptr) /* load IDT */ + LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */ + +/* + * Slave is now running with correct addresses. + */ +LEXT(svstart) + mov $(KERNEL_DS),%ax /* set kernel data segment */ + mov %ax,%ds + mov %ax,%es + mov %ax,%ss + + movl %ebx,%cr3 /* switch to the real kernel PDE */ + + CPU_NUMBER(%eax) + movl CX(EXT(interrupt_stack),%eax),%esp /* get stack */ + addl $(INTSTACK_SIZE),%esp /* point to top */ + xorl %ebp,%ebp /* for completeness */ + + movl $0,%ecx /* unlock start_lock */ + xchgl %ecx,EXT(start_lock) /* since we are no longer using */ + /* bootstrap stack */ + +/* + * switch to the per-cpu descriptor tables + */ + + pushl %eax /* pass CPU number */ + call EXT(mp_desc_init) /* set up local table */ + /* pointer returned in %eax */ + subl $4,%esp /* get space to build pseudo-descriptors */ + + CPU_NUMBER(%eax) + movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */ + movl CX(EXT(mp_gdt),%eax),%edx + addl $KVTOLINEAR,%edx + movl %edx,2(%esp) /* point to local GDT (linear address) */ + lgdt 0(%esp) /* load new GDT */ + + movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */ + movl CX(EXT(mp_idt),%eax),%edx + addl $KVTOLINEAR,%edx + movl %edx,2(%esp) /* point to local IDT (linear address) */ + lidt 0(%esp) /* load new IDT */ + + movw $(KERNEL_LDT),%ax + lldt %ax /* load new LDT */ + + movw $(KERNEL_TSS),%ax + ltr %ax /* load new KTSS */ + + mov $CPU_DATA,%ax + mov %ax,%gs + + call EXT(slave_main) /* start MACH */ + /*NOTREACHED*/ + hlt +#endif /* NCPUS > 1 */ + +/* + * Convert a descriptor from fake to real format. + * + * Calls from assembly code: + * %ebx = return address (physical) CANNOT USE STACK + * %esi = descriptor table address (physical) + * %ecx = number of descriptors + * + * Calls from C: + * 0(%esp) = return address + * 4(%esp) = descriptor table address (physical) + * 8(%esp) = number of descriptors + * + * Fake descriptor format: + * bytes 0..3 base 31..0 + * bytes 4..5 limit 15..0 + * byte 6 access byte 2 | limit 19..16 + * byte 7 access byte 1 + * + * Real descriptor format: + * bytes 0..1 limit 15..0 + * bytes 2..3 base 15..0 + * byte 4 base 23..16 + * byte 5 access byte 1 + * byte 6 access byte 2 | limit 19..16 + * byte 7 base 31..24 + * + * Fake gate format: + * bytes 0..3 offset + * bytes 4..5 selector + * byte 6 word count << 4 (to match fake descriptor) + * byte 7 access byte 1 + * + * Real gate format: + * bytes 0..1 offset 15..0 + * bytes 2..3 selector + * byte 4 word count + * byte 5 access byte 1 + * bytes 6..7 offset 31..16 + */ + .globl EXT(fix_desc) +LEXT(fix_desc) + pushl %ebp /* set up */ + movl %esp,%ebp /* stack frame */ + pushl %esi /* save registers */ + pushl %ebx + movl B_ARG0,%esi /* point to first descriptor */ + movl B_ARG1,%ecx /* get number of descriptors */ + lea 0f,%ebx /* get return address */ + jmp fix_desc_common /* call internal routine */ +0: popl %ebx /* restore registers */ + popl %esi + leave /* pop stack frame */ + ret /* return */ + +fix_desc_common: +0: + movw 6(%esi),%dx /* get access byte */ + movb %dh,%al + andb $0x14,%al + cmpb $0x04,%al /* gate or descriptor? */ + je 1f + +/* descriptor */ + movl 0(%esi),%eax /* get base in eax */ + rol $16,%eax /* swap 15..0 with 31..16 */ + /* (15..0 in correct place) */ + movb %al,%dl /* combine bits 23..16 with ACC1 */ + /* in dh/dl */ + movb %ah,7(%esi) /* store bits 31..24 in correct place */ + movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */ + movl %eax,0(%esi) /* store (bytes 0..3 correct) */ + movw %dx,4(%esi) /* store bytes 4..5 */ + jmp 2f + +/* gate */ +1: + movw 4(%esi),%ax /* get selector */ + shrb $4,%dl /* shift word count to proper place */ + movw %dx,4(%esi) /* store word count / ACC1 */ + movw 2(%esi),%dx /* get offset 16..31 */ + movw %dx,6(%esi) /* store in correct place */ + movw %ax,2(%esi) /* store selector in correct place */ +2: + addl $8,%esi /* bump to next descriptor */ + loop 0b /* repeat */ + jmp *%ebx /* all done */ + +/* + * put arg in kbd leds and spin a while + * eats eax, ecx, edx + */ +#define K_RDWR 0x60 +#define K_CMD_LEDS 0xed +#define K_STATUS 0x64 +#define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */ +#define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */ + +ENTRY(set_kbd_leds) + mov S_ARG0,%cl /* save led value */ + +0: inb $(K_STATUS),%al /* get kbd status */ + testb $(K_IBUF_FULL),%al /* input busy? */ + jne 0b /* loop until not */ + + mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */ + outb %al,$(K_RDWR) /* to kbd */ + +0: inb $(K_STATUS),%al /* get kbd status */ + testb $(K_OBUF_FULL),%al /* output present? */ + je 0b /* loop if not */ + + inb $(K_RDWR),%al /* read status (and discard) */ + +0: inb $(K_STATUS),%al /* get kbd status */ + testb $(K_IBUF_FULL),%al /* input busy? */ + jne 0b /* loop until not */ + + mov %cl,%al /* move led value */ + outb %al,$(K_RDWR) /* to kbd */ + + movl $10000000,%ecx /* spin */ +0: nop + nop + loop 0b /* a while */ + + ret diff --git a/osfmk/i386/task.h b/osfmk/i386/task.h new file mode 100644 index 000000000..931b12a2a --- /dev/null +++ b/osfmk/i386/task.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * No machine dependant task fields + */ + +#define MACHINE_TASK + diff --git a/osfmk/i386/thread.h b/osfmk/i386/thread.h new file mode 100644 index 000000000..0aa894348 --- /dev/null +++ b/osfmk/i386/thread.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: machine/thread.h + * + * This file contains the structure definitions for the thread + * state as applied to I386 processors. + */ + +#ifndef _I386_THREAD_H_ +#define _I386_THREAD_H_ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +/* + * i386_exception_link: + * + * This structure lives at the high end of the kernel stack. + * It points to the current thread`s user registers. + */ +struct i386_exception_link { + struct i386_saved_state *saved_state; +}; + + +/* + * On the kernel stack is: + * stack: ... + * struct i386_exception_link + * struct i386_kernel_state + * stack+KERNEL_STACK_SIZE + */ + +#define STACK_IKS(stack) \ + ((struct i386_kernel_state *)((stack) + KERNEL_STACK_SIZE) - 1) +#define STACK_IEL(stack) \ + ((struct i386_exception_link *)STACK_IKS(stack) - 1) + +#if NCPUS > 1 +#include +#endif + +/* + * Boot-time data for master (or only) CPU + */ +extern struct fake_descriptor idt[IDTSZ]; +extern struct fake_descriptor gdt[GDTSZ]; +extern struct fake_descriptor ldt[LDTSZ]; +extern struct i386_tss ktss; +#if MACH_KDB +extern char db_stack_store[]; +extern char db_task_stack_store[]; +extern struct i386_tss dbtss; +extern void db_task_start(void); +#endif /* MACH_KDB */ +#if NCPUS > 1 +#define curr_gdt(mycpu) (mp_gdt[mycpu]) +#define curr_ktss(mycpu) (mp_ktss[mycpu]) +#else +#define curr_gdt(mycpu) (gdt) +#define curr_ktss(mycpu) (&ktss) +#endif + +#define gdt_desc_p(mycpu,sel) \ + ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)]) + +/* + * Return address of the function that called current function, given + * address of the first parameter of current function. + */ +#define GET_RETURN_PC(addr) (*((vm_offset_t *)addr - 1)) + +/* + * Defining this indicates that MD code will supply an exception() + * routine, conformant with kern/exception.c (dependency alert!) + * but which does wonderfully fast, machine-dependent magic. + */ +#define MACHINE_FAST_EXCEPTION 1 + +/* + * MD Macro to fill up global stack state, + * keeping the MD structure sizes + games private + */ +#define MACHINE_STACK_STASH(stack) \ +MACRO_BEGIN \ + mp_disable_preemption(); \ + kernel_stack[cpu_number()] = (stack) + \ + (KERNEL_STACK_SIZE - sizeof (struct i386_exception_link) \ + - sizeof (struct i386_kernel_state)), \ + active_stacks[cpu_number()] = (stack); \ + mp_enable_preemption(); \ +MACRO_END + +#endif /* _I386_THREAD_H_ */ diff --git a/osfmk/i386/thread_act.h b/osfmk/i386/thread_act.h new file mode 100644 index 000000000..66e5af0c3 --- /dev/null +++ b/osfmk/i386/thread_act.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _I386_THREAD_ACT_H_ +#define _I386_THREAD_ACT_H_ + +#include +#include +#include +#include + +#include + +#include +#include +#include + +/* + * i386_saved_state: + * + * Has been exported to servers. See: mach/i386/thread_status.h + * + * This structure corresponds to the state of user registers + * as saved upon kernel entry. It lives in the pcb. + * It is also pushed onto the stack for exceptions in the kernel. + * For performance, it is also used directly in syscall exceptions + * if the server has requested i386_THREAD_STATE flavor for the exception + * port. + * + * We define the following as an alias for the "esp" field of the + * structure, because we actually save cr2 here, not the kernel esp. + */ +#define cr2 esp + +/* + * Save area for user floating-point state. + * Allocated only when necessary. + */ + +struct i386_fpsave_state { + boolean_t fp_valid; + struct i386_fp_save fp_save_state; + struct i386_fp_regs fp_regs; +}; + +/* + * v86_assist_state: + * + * This structure provides data to simulate 8086 mode + * interrupts. It lives in the pcb. + */ + +struct v86_assist_state { + vm_offset_t int_table; + unsigned short int_count; + unsigned short flags; /* 8086 flag bits */ +}; +#define V86_IF_PENDING 0x8000 /* unused bit */ + +/* + * i386_interrupt_state: + * + * This structure describes the set of registers that must + * be pushed on the current ring-0 stack by an interrupt before + * we can switch to the interrupt stack. + */ + +struct i386_interrupt_state { + int es; + int ds; + int edx; + int ecx; + int eax; + int eip; + int cs; + int efl; +}; + +/* + * i386_kernel_state: + * + * This structure corresponds to the state of kernel registers + * as saved in a context-switch. It lives at the base of the stack. + */ + +struct i386_kernel_state { + int k_ebx; /* kernel context */ + int k_esp; + int k_ebp; + int k_edi; + int k_esi; + int k_eip; +}; + +/* + * i386_machine_state: + * + * This structure corresponds to special machine state. + * It lives in the pcb. It is not saved by default. + */ + +struct i386_machine_state { + iopb_tss_t io_tss; + struct user_ldt * ldt; + struct i386_fpsave_state *ifps; + struct v86_assist_state v86s; +}; + +typedef struct pcb { + struct i386_interrupt_state iis[2]; /* interrupt and NMI */ + struct i386_saved_state iss; + struct i386_machine_state ims; +#ifdef MACH_BSD + unsigned long cthread_self; /* for use of cthread package */ +#endif + decl_simple_lock_data(,lock) +} *pcb_t; + +/* + * Maps state flavor to number of words in the state: + */ +extern unsigned int state_count[]; + + +#define USER_REGS(ThrAct) (&(ThrAct)->mact.pcb->iss) + +#define act_machine_state_ptr(ThrAct) (thread_state_t)USER_REGS(ThrAct) + + +#define is_user_thread(ThrAct) \ + ((USER_REGS(ThrAct)->efl & EFL_VM) \ + || ((USER_REGS(ThrAct)->cs & 0x03) != 0)) + +#define user_pc(ThrAct) (USER_REGS(ThrAct)->eip) +#define user_sp(ThrAct) (USER_REGS(ThrAct)->uesp) + +#define syscall_emulation_sync(task) /* do nothing */ + +typedef struct MachineThrAct { + /* + * pointer to process control block + * (actual storage may as well be here, too) + */ + struct pcb xxx_pcb; + pcb_t pcb; + +} MachineThrAct, *MachineThrAct_t; + +#endif /* _I386_THREAD_ACT_H_ */ diff --git a/osfmk/i386/timer.h b/osfmk/i386/timer.h new file mode 100644 index 000000000..713bb946a --- /dev/null +++ b/osfmk/i386/timer.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_TIMER_H_ +#define _I386_TIMER_H_ + +/* + * Machine dependent timer definitions. + */ + +#include + + +#endif /* _I386_TIMER_H_ */ diff --git a/osfmk/i386/trap.c b/osfmk/i386/trap.c new file mode 100644 index 000000000..cd7925ee7 --- /dev/null +++ b/osfmk/i386/trap.c @@ -0,0 +1,1296 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Hardware trap/fault handler. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_KGDB +#include +#endif /* MACH_KGDB */ + +#include + +#if MACH_KGDB +#include +#endif /* MACH_KGDB */ + +#if MACH_KDB +#include +#include +#include +#include +#endif /* MACH_KDB */ + +#include + +#include + +/* + * Forward declarations + */ +extern void user_page_fault_continue( + kern_return_t kr); + +extern boolean_t v86_assist( + thread_t thread, + struct i386_saved_state *regs); + +extern boolean_t check_io_fault( + struct i386_saved_state *regs); + +extern int inst_fetch( + int eip, + int cs); + +void +thread_syscall_return( + kern_return_t ret) +{ + register thread_act_t thr_act = current_act(); + register struct i386_saved_state *regs = USER_REGS(thr_act); + regs->eax = ret; + thread_exception_return(); + /*NOTREACHED*/ +} + + +#if MACH_KDB +boolean_t debug_all_traps_with_kdb = FALSE; +extern struct db_watchpoint *db_watchpoint_list; +extern boolean_t db_watchpoints_inserted; +extern boolean_t db_breakpoints_inserted; + +void +thread_kdb_return(void) +{ + register thread_act_t thr_act = current_act(); + register thread_t cur_thr = current_thread(); + register struct i386_saved_state *regs = USER_REGS(thr_act); + + if (kdb_trap(regs->trapno, regs->err, regs)) { +#if MACH_LDEBUG + assert(cur_thr->mutex_count == 0); +#endif /* MACH_LDEBUG */ + check_simple_locks(); + thread_exception_return(); + /*NOTREACHED*/ + } +} +boolean_t let_ddb_vm_fault = FALSE; + +#if NCPUS > 1 +extern int kdb_active[NCPUS]; +#endif /* NCPUS > 1 */ + +#endif /* MACH_KDB */ + +void +user_page_fault_continue( + kern_return_t kr) +{ + register thread_act_t thr_act = current_act(); + register thread_t cur_thr = current_thread(); + register struct i386_saved_state *regs = USER_REGS(thr_act); + + if ((kr == KERN_SUCCESS) && (kr == KERN_ABORTED)) { +#if MACH_KDB + if (!db_breakpoints_inserted) { + db_set_breakpoints(); + } + if (db_watchpoint_list && + db_watchpoints_inserted && + (regs->err & T_PF_WRITE) && + db_find_watchpoint(thr_act->map, + (vm_offset_t)regs->cr2, + regs)) + kdb_trap(T_WATCHPOINT, 0, regs); +#endif /* MACH_KDB */ + thread_exception_return(); + /*NOTREACHED*/ + } + +#if MACH_KDB + if (debug_all_traps_with_kdb && + kdb_trap(regs->trapno, regs->err, regs)) { +#if MACH_LDEBUG + assert(cur_thr->mutex_count == 0); +#endif /* MACH_LDEBUG */ + check_simple_locks(); + thread_exception_return(); + /*NOTREACHED*/ + } +#endif /* MACH_KDB */ + + i386_exception(EXC_BAD_ACCESS, kr, regs->cr2); + /*NOTREACHED*/ +} + +/* + * Fault recovery in copyin/copyout routines. + */ +struct recovery { + int fault_addr; + int recover_addr; +}; + +extern struct recovery recover_table[]; +extern struct recovery recover_table_end[]; + +/* + * Recovery from Successful fault in copyout does not + * return directly - it retries the pte check, since + * the 386 ignores write protection in kernel mode. + */ +extern struct recovery retry_table[]; +extern struct recovery retry_table_end[]; + +char * trap_type[] = {TRAP_NAMES}; +int TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]); + +/* + * Trap from kernel mode. Only page-fault errors are recoverable, + * and then only in special circumstances. All other errors are + * fatal. Return value indicates if trap was handled. + */ +boolean_t +kernel_trap( + register struct i386_saved_state *regs) +{ + int exc; + int code; + int subcode; + int interruptible; + register int type; + vm_map_t map; + kern_return_t result; + register thread_t thread; + thread_act_t thr_act; + etap_data_t probe_data; + pt_entry_t *pte; + extern vm_offset_t vm_last_phys; + + type = regs->trapno; + code = regs->err; + thread = current_thread(); + thr_act = current_act(); + + ETAP_DATA_LOAD(probe_data[0], regs->trapno); + ETAP_DATA_LOAD(probe_data[1], MACH_PORT_NULL); + ETAP_DATA_LOAD(probe_data[2], MACH_PORT_NULL); + ETAP_PROBE_DATA(ETAP_P_EXCEPTION, + 0, + thread, + &probe_data, + ETAP_DATA_ENTRY*3); + + switch (type) { + case T_PREEMPT: + return (TRUE); + + case T_NO_FPU: + fpnoextflt(); + return (TRUE); + + case T_FPU_FAULT: + fpextovrflt(); + return (TRUE); + + case T_FLOATING_POINT_ERROR: + fpexterrflt(); + return (TRUE); + + case T_PAGE_FAULT: + /* + * If the current map is a submap of the kernel map, + * and the address is within that map, fault on that + * map. If the same check is done in vm_fault + * (vm_map_lookup), we may deadlock on the kernel map + * lock. + */ +#if MACH_KDB + mp_disable_preemption(); + if (db_active +#if NCPUS > 1 + && kdb_active[cpu_number()] +#endif /* NCPUS > 1 */ + && !let_ddb_vm_fault) { + /* + * Force kdb to handle this one. + */ + mp_enable_preemption(); + return (FALSE); + } + mp_enable_preemption(); +#endif /* MACH_KDB */ + subcode = regs->cr2; /* get faulting address */ + + if (subcode > LINEAR_KERNEL_ADDRESS) { + map = kernel_map; + subcode -= LINEAR_KERNEL_ADDRESS; + } else if (thr_act == THR_ACT_NULL || thread == THREAD_NULL) + map = kernel_map; + else { + map = thr_act->map; + } + +#if MACH_KDB + /* + * Check for watchpoint on kernel static data. + * vm_fault would fail in this case + */ + if (map == kernel_map && + db_watchpoint_list && + db_watchpoints_inserted && + (code & T_PF_WRITE) && + (vm_offset_t)subcode < vm_last_phys && + ((*(pte = pmap_pte(kernel_pmap, (vm_offset_t)subcode))) & + INTEL_PTE_WRITE) == 0) { + *pte = INTEL_PTE_VALID | INTEL_PTE_WRITE | + pa_to_pte(trunc_page((vm_offset_t)subcode) - + VM_MIN_KERNEL_ADDRESS); + result = KERN_SUCCESS; + } else +#endif /* MACH_KDB */ + { + /* + * Since the 386 ignores write protection in + * kernel mode, always try for write permission + * first. If that fails and the fault was a + * read fault, retry with read permission. + */ + if (map == kernel_map) { + register struct recovery *rp; + + interruptible = THREAD_UNINT; + for (rp = recover_table; rp < recover_table_end; rp++) { + if (regs->eip == rp->fault_addr) { + interruptible = THREAD_ABORTSAFE; + break; + } + } + } + + result = vm_fault(map, + trunc_page((vm_offset_t)subcode), + VM_PROT_READ|VM_PROT_WRITE, + FALSE, + (map == kernel_map) ? interruptible : THREAD_ABORTSAFE); + } +#if MACH_KDB + if (result == KERN_SUCCESS) { + /* Look for watchpoints */ + if (db_watchpoint_list && + db_watchpoints_inserted && + (code & T_PF_WRITE) && + db_find_watchpoint(map, + (vm_offset_t)subcode, regs)) + kdb_trap(T_WATCHPOINT, 0, regs); + } + else +#endif /* MACH_KDB */ + if ((code & T_PF_WRITE) == 0 && + result == KERN_PROTECTION_FAILURE) + { + /* + * Must expand vm_fault by hand, + * so that we can ask for read-only access + * but enter a (kernel)writable mapping. + */ + result = intel_read_fault(map, + trunc_page((vm_offset_t)subcode)); + } + + if (result == KERN_SUCCESS) { + /* + * Certain faults require that we back up + * the EIP. + */ + register struct recovery *rp; + + for (rp = retry_table; rp < retry_table_end; rp++) { + if (regs->eip == rp->fault_addr) { + regs->eip = rp->recover_addr; + break; + } + } + return (TRUE); + } + + /* fall through */ + + case T_GENERAL_PROTECTION: + + /* + * If there is a failure recovery address + * for this fault, go there. + */ + { + register struct recovery *rp; + + for (rp = recover_table; + rp < recover_table_end; + rp++) { + if (regs->eip == rp->fault_addr) { + regs->eip = rp->recover_addr; + return (TRUE); + } + } + } + + /* + * Check thread recovery address also - + * v86 assist uses it. + */ + if (thread->recover) { + regs->eip = thread->recover; + thread->recover = 0; + return (TRUE); + } + + /* + * Unanticipated page-fault errors in kernel + * should not happen. + */ + /* fall through... */ + + default: + /* + * ...and return failure, so that locore can call into + * debugger. + */ +#if MACH_KDP + kdp_i386_trap(type, regs, result, regs->cr2); +#endif + return (FALSE); + } + return (TRUE); +} + +/* + * Called if both kernel_trap() and kdb_trap() fail. + */ +void +panic_trap( + register struct i386_saved_state *regs) +{ + int code; + register int type; + + type = regs->trapno; + code = regs->err; + + printf("trap type %d, code = %x, pc = %x\n", + type, code, regs->eip); + panic("trap"); +} + + +/* + * Trap from user mode. + */ +void +user_trap( + register struct i386_saved_state *regs) +{ + int exc; + int code; + int subcode; + register int type; + vm_map_t map; + vm_prot_t prot; + kern_return_t result; + register thread_act_t thr_act = current_act(); + thread_t thread = (thr_act ? thr_act->thread : THREAD_NULL); + boolean_t kernel_act = thr_act->kernel_loaded; + etap_data_t probe_data; + + if (regs->efl & EFL_VM) { + /* + * If hardware assist can handle exception, + * continue execution. + */ + if (v86_assist(thread, regs)) + return; + } + + type = regs->trapno; + code = 0; + subcode = 0; + + switch (type) { + + case T_DIVIDE_ERROR: + exc = EXC_ARITHMETIC; + code = EXC_I386_DIV; + break; + + case T_DEBUG: + exc = EXC_BREAKPOINT; + code = EXC_I386_SGL; + break; + + case T_INT3: + exc = EXC_BREAKPOINT; + code = EXC_I386_BPT; + break; + + case T_OVERFLOW: + exc = EXC_ARITHMETIC; + code = EXC_I386_INTO; + break; + + case T_OUT_OF_BOUNDS: + exc = EXC_SOFTWARE; + code = EXC_I386_BOUND; + break; + + case T_INVALID_OPCODE: + exc = EXC_BAD_INSTRUCTION; + code = EXC_I386_INVOP; + break; + + case T_NO_FPU: + case 32: /* XXX */ + fpnoextflt(); + return; + + case T_FPU_FAULT: + fpextovrflt(); + return; + + case 10: /* invalid TSS == iret with NT flag set */ + exc = EXC_BAD_INSTRUCTION; + code = EXC_I386_INVTSSFLT; + subcode = regs->err & 0xffff; + break; + + case T_SEGMENT_NOT_PRESENT: + exc = EXC_BAD_INSTRUCTION; + code = EXC_I386_SEGNPFLT; + subcode = regs->err & 0xffff; + break; + + case T_STACK_FAULT: + exc = EXC_BAD_INSTRUCTION; + code = EXC_I386_STKFLT; + subcode = regs->err & 0xffff; + break; + + case T_GENERAL_PROTECTION: + if (!(regs->efl & EFL_VM)) { + if (check_io_fault(regs)) + return; + } + exc = EXC_BAD_INSTRUCTION; + code = EXC_I386_GPFLT; + subcode = regs->err & 0xffff; + break; + + case T_PAGE_FAULT: + subcode = regs->cr2; + prot = VM_PROT_READ|VM_PROT_WRITE; + if (kernel_act == FALSE) { + if (!(regs->err & T_PF_WRITE)) + prot = VM_PROT_READ; + (void) user_page_fault_continue(vm_fault(thr_act->map, + trunc_page((vm_offset_t)subcode), + prot, + FALSE, + THREAD_ABORTSAFE)); + /* NOTREACHED */ + } + else { + if (subcode > LINEAR_KERNEL_ADDRESS) { + map = kernel_map; + subcode -= LINEAR_KERNEL_ADDRESS; + } + result = vm_fault(thr_act->map, + trunc_page((vm_offset_t)subcode), + prot, + FALSE, + (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE); + if ((result != KERN_SUCCESS) && (result != KERN_ABORTED)) { + /* + * Must expand vm_fault by hand, + * so that we can ask for read-only access + * but enter a (kernel) writable mapping. + */ + result = intel_read_fault(thr_act->map, + trunc_page((vm_offset_t)subcode)); + } + user_page_fault_continue(result); + /*NOTREACHED*/ + } + break; + + case T_FLOATING_POINT_ERROR: + fpexterrflt(); + return; + + default: +#if MACH_KGDB + Debugger("Unanticipated user trap"); + return; +#endif /* MACH_KGDB */ +#if MACH_KDB + if (kdb_trap(type, regs->err, regs)) + return; +#endif /* MACH_KDB */ + printf("user trap type %d, code = %x, pc = %x\n", + type, regs->err, regs->eip); + panic("user trap"); + return; + } + +#if MACH_KDB + if (debug_all_traps_with_kdb && + kdb_trap(type, regs->err, regs)) + return; +#endif /* MACH_KDB */ + +#if ETAP_EVENT_MONITOR + if (thread != THREAD_NULL) { + ETAP_DATA_LOAD(probe_data[0], regs->trapno); + ETAP_DATA_LOAD(probe_data[1], + thr_act->exc_actions[exc].port); + ETAP_DATA_LOAD(probe_data[2], + thr_act->task->exc_actions[exc].port); + ETAP_PROBE_DATA(ETAP_P_EXCEPTION, + 0, + thread, + &probe_data, + ETAP_DATA_ENTRY*3); + } +#endif /* ETAP_EVENT_MONITOR */ + + i386_exception(exc, code, subcode); + /*NOTREACHED*/ +} + +/* + * V86 mode assist for interrupt handling. + */ +boolean_t v86_assist_on = TRUE; +boolean_t v86_unsafe_ok = FALSE; +boolean_t v86_do_sti_cli = TRUE; +boolean_t v86_do_sti_immediate = FALSE; + +#define V86_IRET_PENDING 0x4000 + +int cli_count = 0; +int sti_count = 0; + +boolean_t +v86_assist( + thread_t thread, + register struct i386_saved_state *regs) +{ + register struct v86_assist_state *v86 = &thread->top_act->mact.pcb->ims.v86s; + +/* + * Build an 8086 address. Use only when off is known to be 16 bits. + */ +#define Addr8086(seg,off) ((((seg) & 0xffff) << 4) + (off)) + +#define EFL_V86_SAFE ( EFL_OF | EFL_DF | EFL_TF \ + | EFL_SF | EFL_ZF | EFL_AF \ + | EFL_PF | EFL_CF ) + struct iret_32 { + int eip; + int cs; + int eflags; + }; + struct iret_16 { + unsigned short ip; + unsigned short cs; + unsigned short flags; + }; + union iret_struct { + struct iret_32 iret_32; + struct iret_16 iret_16; + }; + + struct int_vec { + unsigned short ip; + unsigned short cs; + }; + + if (!v86_assist_on) + return FALSE; + + /* + * If delayed STI pending, enable interrupts. + * Turn off tracing if on only to delay STI. + */ + if (v86->flags & V86_IF_PENDING) { + v86->flags &= ~V86_IF_PENDING; + v86->flags |= EFL_IF; + if ((v86->flags & EFL_TF) == 0) + regs->efl &= ~EFL_TF; + } + + if (regs->trapno == T_DEBUG) { + + if (v86->flags & EFL_TF) { + /* + * Trace flag was also set - it has priority + */ + return FALSE; /* handle as single-step */ + } + /* + * Fall through to check for interrupts. + */ + } + else if (regs->trapno == T_GENERAL_PROTECTION) { + /* + * General protection error - must be an 8086 instruction + * to emulate. + */ + register int eip; + boolean_t addr_32 = FALSE; + boolean_t data_32 = FALSE; + int io_port; + + /* + * Set up error handler for bad instruction/data + * fetches. + */ + __asm__("movl $(addr_error), %0" : : "m" (thread->recover)); + + eip = regs->eip; + while (TRUE) { + unsigned char opcode; + + if (eip > 0xFFFF) { + thread->recover = 0; + return FALSE; /* GP fault: IP out of range */ + } + + opcode = *(unsigned char *)Addr8086(regs->cs,eip); + eip++; + switch (opcode) { + case 0xf0: /* lock */ + case 0xf2: /* repne */ + case 0xf3: /* repe */ + case 0x2e: /* cs */ + case 0x36: /* ss */ + case 0x3e: /* ds */ + case 0x26: /* es */ + case 0x64: /* fs */ + case 0x65: /* gs */ + /* ignore prefix */ + continue; + + case 0x66: /* data size */ + data_32 = TRUE; + continue; + + case 0x67: /* address size */ + addr_32 = TRUE; + continue; + + case 0xe4: /* inb imm */ + case 0xe5: /* inw imm */ + case 0xe6: /* outb imm */ + case 0xe7: /* outw imm */ + io_port = *(unsigned char *)Addr8086(regs->cs, eip); + eip++; + goto do_in_out; + + case 0xec: /* inb dx */ + case 0xed: /* inw dx */ + case 0xee: /* outb dx */ + case 0xef: /* outw dx */ + case 0x6c: /* insb */ + case 0x6d: /* insw */ + case 0x6e: /* outsb */ + case 0x6f: /* outsw */ + io_port = regs->edx & 0xffff; + + do_in_out: + if (!data_32) + opcode |= 0x6600; /* word IO */ + + switch (emulate_io(regs, opcode, io_port)) { + case EM_IO_DONE: + /* instruction executed */ + break; + case EM_IO_RETRY: + /* port mapped, retry instruction */ + thread->recover = 0; + return TRUE; + case EM_IO_ERROR: + /* port not mapped */ + thread->recover = 0; + return FALSE; + } + break; + + case 0xfa: /* cli */ + if (!v86_do_sti_cli) { + thread->recover = 0; + return (FALSE); + } + + v86->flags &= ~EFL_IF; + /* disable simulated interrupts */ + cli_count++; + break; + + case 0xfb: /* sti */ + if (!v86_do_sti_cli) { + thread->recover = 0; + return (FALSE); + } + + if ((v86->flags & EFL_IF) == 0) { + if (v86_do_sti_immediate) { + v86->flags |= EFL_IF; + } else { + v86->flags |= V86_IF_PENDING; + regs->efl |= EFL_TF; + } + /* single step to set IF next inst. */ + } + sti_count++; + break; + + case 0x9c: /* pushf */ + { + int flags; + vm_offset_t sp; + int size; + + flags = regs->efl; + if ((v86->flags & EFL_IF) == 0) + flags &= ~EFL_IF; + + if ((v86->flags & EFL_TF) == 0) + flags &= ~EFL_TF; + else flags |= EFL_TF; + + sp = regs->uesp; + if (!addr_32) + sp &= 0xffff; + else if (sp > 0xffff) + goto stack_error; + size = (data_32) ? 4 : 2; + if (sp < size) + goto stack_error; + sp -= size; + if (copyout((char *)&flags, + (char *)Addr8086(regs->ss,sp), + size)) + goto addr_error; + if (addr_32) + regs->uesp = sp; + else + regs->uesp = (regs->uesp & 0xffff0000) | sp; + break; + } + + case 0x9d: /* popf */ + { + vm_offset_t sp; + int nflags; + + sp = regs->uesp; + if (!addr_32) + sp &= 0xffff; + else if (sp > 0xffff) + goto stack_error; + + if (data_32) { + if (sp > 0xffff - sizeof(int)) + goto stack_error; + nflags = *(int *)Addr8086(regs->ss,sp); + sp += sizeof(int); + } + else { + if (sp > 0xffff - sizeof(short)) + goto stack_error; + nflags = *(unsigned short *) + Addr8086(regs->ss,sp); + sp += sizeof(short); + } + if (addr_32) + regs->uesp = sp; + else + regs->uesp = (regs->uesp & 0xffff0000) | sp; + + if (v86->flags & V86_IRET_PENDING) { + v86->flags = nflags & (EFL_TF | EFL_IF); + v86->flags |= V86_IRET_PENDING; + } else { + v86->flags = nflags & (EFL_TF | EFL_IF); + } + regs->efl = (regs->efl & ~EFL_V86_SAFE) + | (nflags & EFL_V86_SAFE); + break; + } + case 0xcf: /* iret */ + { + vm_offset_t sp; + int nflags; + int size; + union iret_struct iret_struct; + + v86->flags &= ~V86_IRET_PENDING; + sp = regs->uesp; + if (!addr_32) + sp &= 0xffff; + else if (sp > 0xffff) + goto stack_error; + + if (data_32) { + if (sp > 0xffff - sizeof(struct iret_32)) + goto stack_error; + iret_struct.iret_32 = + *(struct iret_32 *) Addr8086(regs->ss,sp); + sp += sizeof(struct iret_32); + } + else { + if (sp > 0xffff - sizeof(struct iret_16)) + goto stack_error; + iret_struct.iret_16 = + *(struct iret_16 *) Addr8086(regs->ss,sp); + sp += sizeof(struct iret_16); + } + if (addr_32) + regs->uesp = sp; + else + regs->uesp = (regs->uesp & 0xffff0000) | sp; + + if (data_32) { + eip = iret_struct.iret_32.eip; + regs->cs = iret_struct.iret_32.cs & 0xffff; + nflags = iret_struct.iret_32.eflags; + } + else { + eip = iret_struct.iret_16.ip; + regs->cs = iret_struct.iret_16.cs; + nflags = iret_struct.iret_16.flags; + } + + v86->flags = nflags & (EFL_TF | EFL_IF); + regs->efl = (regs->efl & ~EFL_V86_SAFE) + | (nflags & EFL_V86_SAFE); + break; + } + default: + /* + * Instruction not emulated here. + */ + thread->recover = 0; + return FALSE; + } + break; /* exit from 'while TRUE' */ + } + regs->eip = (regs->eip & 0xffff0000 | eip); + } + else { + /* + * Not a trap we handle. + */ + thread->recover = 0; + return FALSE; + } + + if ((v86->flags & EFL_IF) && ((v86->flags & V86_IRET_PENDING)==0)) { + + struct v86_interrupt_table *int_table; + int int_count; + int vec; + int i; + + int_table = (struct v86_interrupt_table *) v86->int_table; + int_count = v86->int_count; + + vec = 0; + for (i = 0; i < int_count; int_table++, i++) { + if (!int_table->mask && int_table->count > 0) { + int_table->count--; + vec = int_table->vec; + break; + } + } + if (vec != 0) { + /* + * Take this interrupt + */ + vm_offset_t sp; + struct iret_16 iret_16; + struct int_vec int_vec; + + sp = regs->uesp & 0xffff; + if (sp < sizeof(struct iret_16)) + goto stack_error; + sp -= sizeof(struct iret_16); + iret_16.ip = regs->eip; + iret_16.cs = regs->cs; + iret_16.flags = regs->efl & 0xFFFF; + if ((v86->flags & EFL_TF) == 0) + iret_16.flags &= ~EFL_TF; + else iret_16.flags |= EFL_TF; + + (void) memcpy((char *) &int_vec, + (char *) (sizeof(struct int_vec) * vec), + sizeof (struct int_vec)); + if (copyout((char *)&iret_16, + (char *)Addr8086(regs->ss,sp), + sizeof(struct iret_16))) + goto addr_error; + regs->uesp = (regs->uesp & 0xFFFF0000) | (sp & 0xffff); + regs->eip = int_vec.ip; + regs->cs = int_vec.cs; + regs->efl &= ~EFL_TF; + v86->flags &= ~(EFL_IF | EFL_TF); + v86->flags |= V86_IRET_PENDING; + } + } + + thread->recover = 0; + return TRUE; + + /* + * On address error, report a page fault. + * XXX report GP fault - we don`t save + * the faulting address. + */ + addr_error: + __asm__("addr_error:;"); + thread->recover = 0; + return FALSE; + + /* + * On stack address error, return stack fault (12). + */ + stack_error: + thread->recover = 0; + regs->trapno = T_STACK_FAULT; + return FALSE; +} + +/* + * Handle AST traps for i386. + * Check for delayed floating-point exception from + * AT-bus machines. + */ + +extern void log_thread_action (thread_t, char *); + +void +i386_astintr(int preemption) +{ + int mycpu; + ast_t mask = AST_ALL; + spl_t s; + thread_t self = current_thread(); + + s = splsched(); /* block interrupts to check reasons */ + mp_disable_preemption(); + mycpu = cpu_number(); + if (need_ast[mycpu] & AST_I386_FP) { + /* + * AST was for delayed floating-point exception - + * FP interrupt occured while in kernel. + * Turn off this AST reason and handle the FPU error. + */ + + ast_off(AST_I386_FP); + mp_enable_preemption(); + splx(s); + + fpexterrflt(); + } + else { + /* + * Not an FPU trap. Handle the AST. + * Interrupts are still blocked. + */ + +#ifdef XXX + if (preemption) { + + /* + * We don't want to process any AST if we were in + * kernel-mode and the current thread is in any + * funny state (waiting and/or suspended). + */ + + thread_lock (self); + + if (thread_not_preemptable(self) || self->preempt) { + ast_off(AST_URGENT); + thread_unlock (self); + mp_enable_preemption(); + splx(s); + return; + } + else mask = AST_PREEMPT; + mp_enable_preemption(); + +/* + self->preempt = TH_NOT_PREEMPTABLE; +*/ + + thread_unlock (self); + } else { + mp_enable_preemption(); + } +#else + mp_enable_preemption(); +#endif + + ast_taken(preemption, mask, s +#if FAST_IDLE + ,NO_IDLE_THREAD +#endif /* FAST_IDLE */ + ); +/* + self->preempt = TH_PREEMPTABLE; +*/ + } +} + +/* + * Handle exceptions for i386. + * + * If we are an AT bus machine, we must turn off the AST for a + * delayed floating-point exception. + * + * If we are providing floating-point emulation, we may have + * to retrieve the real register values from the floating point + * emulator. + */ +void +i386_exception( + int exc, + int code, + int subcode) +{ + spl_t s; + exception_data_type_t codes[EXCEPTION_CODE_MAX]; + + /* + * Turn off delayed FPU error handling. + */ + s = splsched(); + mp_disable_preemption(); + ast_off(AST_I386_FP); + mp_enable_preemption(); + splx(s); + + codes[0] = code; /* new exception interface */ + codes[1] = subcode; + exception(exc, codes, 2); + /*NOTREACHED*/ +} + +boolean_t +check_io_fault( + struct i386_saved_state *regs) +{ + int eip, opcode, io_port; + boolean_t data_16 = FALSE; + + /* + * Get the instruction. + */ + eip = regs->eip; + + for (;;) { + opcode = inst_fetch(eip, regs->cs); + eip++; + switch (opcode) { + case 0x66: /* data-size prefix */ + data_16 = TRUE; + continue; + + case 0xf3: /* rep prefix */ + case 0x26: /* es */ + case 0x2e: /* cs */ + case 0x36: /* ss */ + case 0x3e: /* ds */ + case 0x64: /* fs */ + case 0x65: /* gs */ + continue; + + case 0xE4: /* inb imm */ + case 0xE5: /* inl imm */ + case 0xE6: /* outb imm */ + case 0xE7: /* outl imm */ + /* port is immediate byte */ + io_port = inst_fetch(eip, regs->cs); + eip++; + break; + + case 0xEC: /* inb dx */ + case 0xED: /* inl dx */ + case 0xEE: /* outb dx */ + case 0xEF: /* outl dx */ + case 0x6C: /* insb */ + case 0x6D: /* insl */ + case 0x6E: /* outsb */ + case 0x6F: /* outsl */ + /* port is in DX register */ + io_port = regs->edx & 0xFFFF; + break; + + default: + return FALSE; + } + break; + } + + if (data_16) + opcode |= 0x6600; /* word IO */ + + switch (emulate_io(regs, opcode, io_port)) { + case EM_IO_DONE: + /* instruction executed */ + regs->eip = eip; + return TRUE; + + case EM_IO_RETRY: + /* port mapped, retry instruction */ + return TRUE; + + case EM_IO_ERROR: + /* port not mapped */ + return FALSE; + } + return FALSE; +} + +void +kernel_preempt_check (void) +{ + mp_disable_preemption(); + if ((need_ast[cpu_number()] & AST_URGENT) && +#if NCPUS > 1 + get_interrupt_level() == 1 +#else /* NCPUS > 1 */ + get_interrupt_level() == 0 +#endif /* NCPUS > 1 */ + ) { + mp_enable_preemption_no_check(); + __asm__ volatile (" int $0xff"); + } else { + mp_enable_preemption_no_check(); + } +} + +#if MACH_KDB + +extern void db_i386_state(struct i386_saved_state *regs); + +#include + +void +db_i386_state( + struct i386_saved_state *regs) +{ + db_printf("eip %8x\n", regs->eip); + db_printf("trap %8x\n", regs->trapno); + db_printf("err %8x\n", regs->err); + db_printf("efl %8x\n", regs->efl); + db_printf("ebp %8x\n", regs->ebp); + db_printf("esp %8x\n", regs->esp); + db_printf("uesp %8x\n", regs->uesp); + db_printf("cs %8x\n", regs->cs & 0xff); + db_printf("ds %8x\n", regs->ds & 0xff); + db_printf("es %8x\n", regs->es & 0xff); + db_printf("fs %8x\n", regs->fs & 0xff); + db_printf("gs %8x\n", regs->gs & 0xff); + db_printf("ss %8x\n", regs->ss & 0xff); + db_printf("eax %8x\n", regs->eax); + db_printf("ebx %8x\n", regs->ebx); + db_printf("ecx %8x\n", regs->ecx); + db_printf("edx %8x\n", regs->edx); + db_printf("esi %8x\n", regs->esi); + db_printf("edi %8x\n", regs->edi); +} + +#endif /* MACH_KDB */ diff --git a/osfmk/i386/trap.h b/osfmk/i386/trap.h new file mode 100644 index 000000000..1b90618f3 --- /dev/null +++ b/osfmk/i386/trap.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _I386_TRAP_H_ +#define _I386_TRAP_H_ + +/* + * Hardware trap vectors for i386. + */ +#define T_DIVIDE_ERROR 0 +#define T_DEBUG 1 +#define T_NMI 2 /* non-maskable interrupt */ +#define T_INT3 3 /* int 3 instruction */ +#define T_OVERFLOW 4 /* overflow test */ +#define T_OUT_OF_BOUNDS 5 /* bounds check */ +#define T_INVALID_OPCODE 6 /* invalid op code */ +#define T_NO_FPU 7 /* no floating point */ +#define T_DOUBLE_FAULT 8 /* double fault */ +#define T_FPU_FAULT 9 +/* 10 */ +#define T_SEGMENT_NOT_PRESENT 11 +#define T_STACK_FAULT 12 +#define T_GENERAL_PROTECTION 13 +#define T_PAGE_FAULT 14 +/* 15 */ +#define T_FLOATING_POINT_ERROR 16 +#define T_WATCHPOINT 17 +#define T_PREEMPT 255 + +#define TRAP_NAMES "divide error", "debug trap", "NMI", "breakpoint", \ + "overflow", "bounds check", "invalid opcode", \ + "no coprocessor", "double fault", "coprocessor overrun", \ + "invalid TSS", "segment not present", "stack bounds", \ + "general protection", "page fault", "(reserved)", \ + "coprocessor error", "watchpoint" + +/* + * Page-fault trap codes. + */ +#define T_PF_PROT 0x1 /* protection violation */ +#define T_PF_WRITE 0x2 /* write access */ +#define T_PF_USER 0x4 /* from user state */ + +#if !defined(ASSEMBLER) && defined(MACH_KERNEL) + +#include + +extern void i386_exception( + int exc, + int code, + int subcode); + +extern void thread_exception_return(void); + +extern boolean_t kernel_trap( + struct i386_saved_state *regs); + +extern void panic_trap( + struct i386_saved_state *regs); + +extern void user_trap( + struct i386_saved_state *regs); + +extern void i386_astintr(int preemption); + +#endif /* !ASSEMBLER && MACH_KERNEL */ + +#endif /* _I386_TRAP_H_ */ diff --git a/osfmk/i386/tss.h b/osfmk/i386/tss.h new file mode 100644 index 000000000..21604bcd3 --- /dev/null +++ b/osfmk/i386/tss.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _I386_TSS_H_ +#define _I386_TSS_H_ + +/* + * i386 Task State Segment + */ +struct i386_tss { + int back_link; /* segment number of previous task, + if nested */ + int esp0; /* initial stack pointer ... */ + int ss0; /* and segment for ring 0 */ + int esp1; /* initial stack pointer ... */ + int ss1; /* and segment for ring 1 */ + int esp2; /* initial stack pointer ... */ + int ss2; /* and segment for ring 2 */ + int cr3; /* CR3 - page table directory + physical address */ + int eip; + int eflags; + int eax; + int ecx; + int edx; + int ebx; + int esp; /* current stack pointer */ + int ebp; + int esi; + int edi; + int es; + int cs; + int ss; /* current stack segment */ + int ds; + int fs; + int gs; + int ldt; /* local descriptor table segment */ + unsigned short trace_trap; /* trap on switch to this task */ + unsigned short io_bit_map_offset; + /* offset to start of IO permission + bit map */ +}; + +#endif /* _I386_TSS_H_ */ diff --git a/osfmk/i386/user_ldt.c b/osfmk/i386/user_ldt.c new file mode 100644 index 000000000..5e27532e7 --- /dev/null +++ b/osfmk/i386/user_ldt.c @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * User LDT management. + * Each thread in a task may have its own LDT. + */ + +#include +#include +#include + +#include + +#include +#include +#include + +char acc_type[8][3] = { + /* code stack data */ + { 0, 0, 1 }, /* data */ + { 0, 1, 1 }, /* data, writable */ + { 0, 0, 1 }, /* data, expand-down */ + { 0, 1, 1 }, /* data, writable, expand-down */ + { 1, 0, 0 }, /* code */ + { 1, 0, 1 }, /* code, readable */ + { 1, 0, 0 }, /* code, conforming */ + { 1, 0, 1 }, /* code, readable, conforming */ +}; + +extern struct fake_descriptor ldt[]; /* for system call gate */ + +#if 0 +/* Forward */ + +extern boolean_t selector_check( + thread_t thread, + int sel, + int type); + +boolean_t +selector_check( + thread_t thread, + int sel, + int type) +{ + struct user_ldt *ldt; + int access; + + ldt = thread->top_act->mact.pcb->ims.ldt; + if (ldt == 0) { + switch (type) { + case S_CODE: + return sel == USER_CS; + case S_STACK: + return sel == USER_DS; + case S_DATA: + return sel == 0 || + sel == USER_CS || + sel == USER_DS; + } + } + + if (type != S_DATA && sel == 0) + return FALSE; + if ((sel & (SEL_LDTS|SEL_PL)) != (SEL_LDTS|SEL_PL_U) + || sel > ldt->desc.limit_low) + return FALSE; + + access = ldt->ldt[sel_idx(sel)].access; + + if ((access & (ACC_P|ACC_PL|ACC_TYPE_USER)) + != (ACC_P|ACC_PL_U|ACC_TYPE_USER)) + return FALSE; + /* present, pl == pl.user, not system */ + + return acc_type[(access & 0xe)>>1][type]; +} + +/* + * Add the descriptors to the LDT, starting with + * the descriptor for 'first_selector'. + */ + +kern_return_t +i386_set_ldt( + thread_act_t thr_act, + int first_selector, + descriptor_list_t desc_list, + mach_msg_type_number_t count) +{ + user_ldt_t new_ldt, old_ldt, temp; + struct real_descriptor *dp; + int i; + int min_selector = 0; + pcb_t pcb; + vm_size_t ldt_size_needed; + int first_desc = sel_idx(first_selector); + vm_map_copy_t old_copy_object; + thread_t thread; + + if (first_desc < min_selector || first_desc > 8191) + return KERN_INVALID_ARGUMENT; + if (first_desc + count >= 8192) + return KERN_INVALID_ARGUMENT; + if (thr_act == THR_ACT_NULL) + return KERN_INVALID_ARGUMENT; + if ((thread = act_lock_thread(thr_act)) == THREAD_NULL) { + act_unlock_thread(thr_act); + return KERN_INVALID_ARGUMENT; + } + if (thread == current_thread()) + min_selector = LDTSZ; + act_unlock_thread(thr_act); + + /* + * We must copy out desc_list to the kernel map, and wire + * it down (we touch it while the PCB is locked). + * + * We make a copy of the copyin object, and clear + * out the old one, so that the MIG stub will have a + * a empty (but valid) copyin object to discard. + */ + { + kern_return_t kr; + vm_offset_t dst_addr; + + old_copy_object = (vm_map_copy_t) desc_list; + + kr = vm_map_copyout(ipc_kernel_map, &dst_addr, + vm_map_copy_copy(old_copy_object)); + if (kr != KERN_SUCCESS) + return kr; + + (void) vm_map_wire(ipc_kernel_map, + trunc_page(dst_addr), + round_page(dst_addr + + count * sizeof(struct real_descriptor)), + VM_PROT_READ|VM_PROT_WRITE, FALSE); + desc_list = (descriptor_list_t) dst_addr; + } + + for (i = 0, dp = (struct real_descriptor *) desc_list; + i < count; + i++, dp++) + { + switch (dp->access & ~ACC_A) { + case 0: + case ACC_P: + /* valid empty descriptor */ + break; + case ACC_P | ACC_CALL_GATE: + /* Mach kernel call */ + *dp = *(struct real_descriptor *) + &ldt[sel_idx(USER_SCALL)]; + break; + case ACC_P | ACC_PL_U | ACC_DATA: + case ACC_P | ACC_PL_U | ACC_DATA_W: + case ACC_P | ACC_PL_U | ACC_DATA_E: + case ACC_P | ACC_PL_U | ACC_DATA_EW: + case ACC_P | ACC_PL_U | ACC_CODE: + case ACC_P | ACC_PL_U | ACC_CODE_R: + case ACC_P | ACC_PL_U | ACC_CODE_C: + case ACC_P | ACC_PL_U | ACC_CODE_CR: + case ACC_P | ACC_PL_U | ACC_CALL_GATE_16: + case ACC_P | ACC_PL_U | ACC_CALL_GATE: + break; + default: + (void) vm_map_remove(ipc_kernel_map, + (vm_offset_t) desc_list, + count * sizeof(struct real_descriptor), + VM_MAP_REMOVE_KUNWIRE); + return KERN_INVALID_ARGUMENT; + } + } + ldt_size_needed = sizeof(struct real_descriptor) + * (first_desc + count); + + pcb = thr_act->mact.pcb; + new_ldt = 0; + Retry: + simple_lock(&pcb->lock); + old_ldt = pcb->ims.ldt; + if (old_ldt == 0 || + old_ldt->desc.limit_low + 1 < ldt_size_needed) + { + /* + * No old LDT, or not big enough + */ + if (new_ldt == 0) { + simple_unlock(&pcb->lock); + + new_ldt = (user_ldt_t) kalloc(ldt_size_needed + + sizeof(struct real_descriptor)); + new_ldt->desc.limit_low = ldt_size_needed - 1; + new_ldt->desc.limit_high = 0; + new_ldt->desc.base_low = + ((vm_offset_t)&new_ldt->ldt[0]) & 0xffff; + new_ldt->desc.base_med = + (((vm_offset_t)&new_ldt->ldt[0]) >> 16) + & 0xff; + new_ldt->desc.base_high = + ((vm_offset_t)&new_ldt->ldt[0]) >> 24; + new_ldt->desc.access = ACC_P | ACC_LDT; + new_ldt->desc.granularity = 0; + + goto Retry; + } + + /* + * Have new LDT. If there was a an old ldt, copy descriptors + * from old to new. Otherwise copy the default ldt. + */ + if (old_ldt) { + bcopy((char *)&old_ldt->ldt[0], + (char *)&new_ldt->ldt[0], + old_ldt->desc.limit_low + 1); + } + else if (thr_act == current_act()) { + struct real_descriptor template = {0, 0, 0, ACC_P, 0, 0 ,0}; + + for (dp = &new_ldt->ldt[0], i = 0; i < first_desc; i++, dp++) { + if (i < LDTSZ) + *dp = *(struct real_descriptor *) &ldt[i]; + else + *dp = template; + } + } + + temp = old_ldt; + old_ldt = new_ldt; /* use new LDT from now on */ + new_ldt = temp; /* discard old LDT */ + + pcb->ims.ldt = old_ldt; /* new LDT for thread */ + } + + /* + * Install new descriptors. + */ + bcopy((char *)desc_list, + (char *)&old_ldt->ldt[first_desc], + count * sizeof(struct real_descriptor)); + + simple_unlock(&pcb->lock); + + if (new_ldt) + kfree((vm_offset_t)new_ldt, + new_ldt->desc.limit_low+1+sizeof(struct real_descriptor)); + + /* + * Free the descriptor list. + */ + (void) vm_map_remove(ipc_kernel_map, (vm_offset_t) desc_list, + count * sizeof(struct real_descriptor), + VM_MAP_REMOVE_KUNWIRE); + return KERN_SUCCESS; +} + +kern_return_t +i386_get_ldt( + thread_act_t thr_act, + int first_selector, + int selector_count, /* number wanted */ + descriptor_list_t *desc_list, /* in/out */ + mach_msg_type_number_t *count) /* in/out */ +{ + struct user_ldt *user_ldt; + pcb_t pcb = thr_act->mact.pcb; + int first_desc = sel_idx(first_selector); + unsigned int ldt_count; + vm_size_t ldt_size; + vm_size_t size, size_needed; + vm_offset_t addr; + thread_t thread; + + if (thr_act == THR_ACT_NULL || (thread = thr_act->thread)==THREAD_NULL) + return KERN_INVALID_ARGUMENT; + + if (first_desc < 0 || first_desc > 8191) + return KERN_INVALID_ARGUMENT; + if (first_desc + selector_count >= 8192) + return KERN_INVALID_ARGUMENT; + + addr = 0; + size = 0; + + for (;;) { + simple_lock(&pcb->lock); + user_ldt = pcb->ims.ldt; + if (user_ldt == 0) { + simple_unlock(&pcb->lock); + if (addr) + kmem_free(ipc_kernel_map, addr, size); + *count = 0; + return KERN_SUCCESS; + } + + /* + * Find how many descriptors we should return. + */ + ldt_count = (user_ldt->desc.limit_low + 1) / + sizeof (struct real_descriptor); + ldt_count -= first_desc; + if (ldt_count > selector_count) + ldt_count = selector_count; + + ldt_size = ldt_count * sizeof(struct real_descriptor); + + /* + * Do we have the memory we need? + */ + if (ldt_count <= *count) + break; /* fits in-line */ + + size_needed = round_page(ldt_size); + if (size_needed <= size) + break; + + /* + * Unlock the pcb and allocate more memory + */ + simple_unlock(&pcb->lock); + + if (size != 0) + kmem_free(ipc_kernel_map, addr, size); + + size = size_needed; + + if (kmem_alloc(ipc_kernel_map, &addr, size) + != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + } + + /* + * copy out the descriptors + */ + bcopy((char *)&user_ldt->ldt[first_desc], + (char *)addr, + ldt_size); + *count = ldt_count; + simple_unlock(&pcb->lock); + + if (addr) { + vm_size_t size_used, size_left; + vm_map_copy_t memory; + + /* + * Free any unused memory beyond the end of the last page used + */ + size_used = round_page(ldt_size); + if (size_used != size) + kmem_free(ipc_kernel_map, + addr + size_used, size - size_used); + + /* + * Zero the remainder of the page being returned. + */ + size_left = size_used - ldt_size; + if (size_left > 0) + bzero((char *)addr + ldt_size, size_left); + + /* + * Unwire the memory and make it into copyin form. + */ + (void) vm_map_unwire(ipc_kernel_map, trunc_page(addr), + round_page(addr + size_used), FALSE); + (void) vm_map_copyin(ipc_kernel_map, addr, size_used, + TRUE, &memory); + *desc_list = (descriptor_list_t) memory; + } + + return KERN_SUCCESS; +} + +#endif +void +user_ldt_free( + user_ldt_t user_ldt) +{ + kfree((vm_offset_t)user_ldt, + user_ldt->desc.limit_low+1+sizeof(struct real_descriptor)); +} diff --git a/osfmk/i386/user_ldt.h b/osfmk/i386/user_ldt.h new file mode 100644 index 000000000..77768c912 --- /dev/null +++ b/osfmk/i386/user_ldt.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _I386_USER_LDT_H_ +#define _I386_USER_LDT_H_ + +/* + * User LDT management. + * + * Each thread in a task may have its own LDT. + */ + +#include + +struct user_ldt { + struct real_descriptor desc; /* descriptor for self */ + struct real_descriptor ldt[1]; /* descriptor table (variable) */ +}; +typedef struct user_ldt * user_ldt_t; + +/* + * Check code/stack/data selector values against LDT if present. + */ +#define S_CODE 0 /* code segment */ +#define S_STACK 1 /* stack segment */ +#define S_DATA 2 /* data segment */ + +extern boolean_t selector_check( + thread_t thread, + int sel, + int type); +extern void user_ldt_free( + user_ldt_t ldt); + +#endif /* _I386_USER_LDT_H_ */ diff --git a/osfmk/i386/vm_tuning.h b/osfmk/i386/vm_tuning.h new file mode 100644 index 000000000..ba65481ba --- /dev/null +++ b/osfmk/i386/vm_tuning.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: i386/vm_tuning.h + * + * VM tuning parameters for the i386 (without reference bits). + */ + +#ifndef _I386_VM_TUNING_H_ +#define _I386_VM_TUNING_H_ + +#endif /* _I386_VM_TUNING_H_ */ diff --git a/osfmk/i386/xpr.h b/osfmk/i386/xpr.h new file mode 100644 index 000000000..85b2cbd13 --- /dev/null +++ b/osfmk/i386/xpr.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: xpr.h + * + * Machine dependent module for the XPR tracing facility. + */ + +#include +#include +#include +#include + +#if NCPUS == 1 || MP_V1_1 +extern int xpr_time(void); +#define XPR_TIMESTAMP xpr_time() + +#else /* NCPUS == 1 || MP_V1_1 */ + +#define XPR_TIMESTAMP (0) + +#endif /* NCPUS == 1 || MP_V1_1 */ + diff --git a/osfmk/ipc/Makefile b/osfmk/ipc/Makefile new file mode 100644 index 000000000..27ef16887 --- /dev/null +++ b/osfmk/ipc/Makefile @@ -0,0 +1,25 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = + +EXPORT_ONLY_FILES = \ + ipc_types.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = ipc + +EXPORT_MI_LIST = ${DATAFILES} ${EXPORT_ONLY_FILES} + +EXPORT_MI_DIR = ipc + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/osfmk/ipc/ipc_entry.c b/osfmk/ipc/ipc_entry.c new file mode 100644 index 000000000..3e7ea9077 --- /dev/null +++ b/osfmk/ipc/ipc_entry.c @@ -0,0 +1,982 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_entry.c + * Author: Rich Draves + * Date: 1989 + * + * Primitive functions to manipulate translation entries. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#if MACH_KDB +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include + +zone_t ipc_tree_entry_zone; + + + +/* + * Forward declarations + */ +boolean_t ipc_entry_tree_collision( + ipc_space_t space, + mach_port_name_t name); + +/* + * Routine: ipc_entry_tree_collision + * Purpose: + * Checks if "name" collides with an allocated name + * in the space's tree. That is, returns TRUE + * if the splay tree contains a name with the same + * index as "name". + * Conditions: + * The space is locked (read or write) and active. + */ + +boolean_t +ipc_entry_tree_collision( + ipc_space_t space, + mach_port_name_t name) +{ + mach_port_index_t index; + mach_port_name_t lower, upper; + + assert(space->is_active); + + /* + * Check if we collide with the next smaller name + * or the next larger name. + */ + + ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper); + + index = MACH_PORT_INDEX(name); + return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) || + ((upper != 0) && (MACH_PORT_INDEX(upper) == index))); +} + +/* + * Routine: ipc_entry_lookup + * Purpose: + * Searches for an entry, given its name. + * Conditions: + * The space must be read or write locked throughout. + * The space must be active. + */ + +ipc_entry_t +ipc_entry_lookup( + ipc_space_t space, + mach_port_name_t name) +{ + mach_port_index_t index; + ipc_entry_t entry; + + assert(space->is_active); + + + index = MACH_PORT_INDEX(name); + /* + * If space is fast, we assume no splay tree and name within table + * bounds, but still check generation numbers (if enabled) and + * look for null entries. + */ + if (is_fast_space(space)) { + entry = &space->is_table[index]; + if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) || + IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + entry = IE_NULL; + } + else + if (index < space->is_table_size) { + entry = &space->is_table[index]; + if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name)) + if (entry->ie_bits & IE_BITS_COLLISION) { + assert(space->is_tree_total > 0); + goto tree_lookup; + } else + entry = IE_NULL; + else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + entry = IE_NULL; + } else if (space->is_tree_total == 0) + entry = IE_NULL; + else { + tree_lookup: + entry = (ipc_entry_t) + ipc_splay_tree_lookup(&space->is_tree, name); + /* with sub-space introduction, an entry may appear in */ + /* the splay tree and yet not show rights for this subspace */ + if(entry != IE_NULL) { + if(!(IE_BITS_TYPE(entry->ie_bits))) + entry = IE_NULL; + } + } + + assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits)); + return entry; +} + +/* + * Routine: ipc_entry_get + * Purpose: + * Tries to allocate an entry out of the space. + * Conditions: + * The space is write-locked and active throughout. + * An object may be locked. Will not allocate memory. + * Returns: + * KERN_SUCCESS A free entry was found. + * KERN_NO_SPACE No entry allocated. + */ + +kern_return_t +ipc_entry_get( + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp) +{ + ipc_entry_t table; + mach_port_index_t first_free; + ipc_entry_t free_entry; + + assert(space->is_active); + + { + table = space->is_table; + first_free = table->ie_next; + + if (first_free == 0) + return KERN_NO_SPACE; + + free_entry = &table[first_free]; + table->ie_next = free_entry->ie_next; + } + + /* + * Initialize the new entry. We need only + * increment the generation number and clear ie_request. + */ + { + mach_port_name_t new_name; + mach_port_gen_t gen; + + gen = IE_BITS_NEW_GEN(free_entry->ie_bits); + free_entry->ie_bits = gen; + free_entry->ie_request = 0; + + /* + * The new name can't be MACH_PORT_NULL because index + * is non-zero. It can't be MACH_PORT_DEAD because + * the table isn't allowed to grow big enough. + * (See comment in ipc/ipc_table.h.) + */ + new_name = MACH_PORT_MAKE(first_free, gen); + assert(MACH_PORT_VALID(new_name)); + *namep = new_name; + } + + assert(free_entry->ie_object == IO_NULL); + + *entryp = free_entry; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_entry_alloc + * Purpose: + * Allocate an entry out of the space. + * Conditions: + * The space is not locked before, but it is write-locked after + * if the call is successful. May allocate memory. + * Returns: + * KERN_SUCCESS An entry was allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NO_SPACE No room for an entry in the space. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry. + */ + +kern_return_t +ipc_entry_alloc( + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp) +{ + kern_return_t kr; + + is_write_lock(space); + + for (;;) { + if (!space->is_active) { + is_write_unlock(space); + return KERN_INVALID_TASK; + } + + kr = ipc_entry_get(space, namep, entryp); + if (kr == KERN_SUCCESS) + return kr; + + kr = ipc_entry_grow_table(space, ITS_SIZE_NONE); + if (kr != KERN_SUCCESS) + return kr; /* space is unlocked */ + } +} + +/* + * Routine: ipc_entry_alloc_name + * Purpose: + * Allocates/finds an entry with a specific name. + * If an existing entry is returned, its type will be nonzero. + * Conditions: + * The space is not locked before, but it is write-locked after + * if the call is successful. May allocate memory. + * Returns: + * KERN_SUCCESS Found existing entry with same name. + * KERN_SUCCESS Allocated a new entry. + * KERN_INVALID_TASK The space is dead. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_entry_alloc_name( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp) +{ + mach_port_index_t index = MACH_PORT_INDEX(name); + mach_port_gen_t gen = MACH_PORT_GEN(name); + ipc_tree_entry_t tentry = ITE_NULL; + + assert(MACH_PORT_VALID(name)); + + + is_write_lock(space); + + for (;;) { + ipc_entry_t entry; + ipc_tree_entry_t tentry2; + ipc_table_size_t its; + + if (!space->is_active) { + is_write_unlock(space); + if (tentry) ite_free(tentry); + return KERN_INVALID_TASK; + } + + /* + * If we are under the table cutoff, + * there are usually four cases: + * 1) The entry is reserved (index 0) + * 2) The entry is inuse, for the same name + * 3) The entry is inuse, for a different name + * 4) The entry is free + * For a task with a "fast" IPC space, we disallow + * cases 1) and 3), because ports cannot be renamed. + */ + if (index < space->is_table_size) { + ipc_entry_t table = space->is_table; + + entry = &table[index]; + + if (index == 0) { + assert(!IE_BITS_TYPE(entry->ie_bits)); + assert(!IE_BITS_GEN(entry->ie_bits)); + } else if (IE_BITS_TYPE(entry->ie_bits)) { + if (IE_BITS_GEN(entry->ie_bits) == gen) { + *entryp = entry; + assert(!tentry); + return KERN_SUCCESS; + } + } else { + mach_port_index_t free_index, next_index; + + /* + * Rip the entry out of the free list. + */ + + for (free_index = 0; + (next_index = table[free_index].ie_next) + != index; + free_index = next_index) + continue; + + table[free_index].ie_next = + table[next_index].ie_next; + + entry->ie_bits = gen; + entry->ie_request = 0; + *entryp = entry; + + assert(entry->ie_object == IO_NULL); + if (is_fast_space(space)) + assert(!tentry); + else if (tentry) + ite_free(tentry); + return KERN_SUCCESS; + } + } + + /* + * In a fast space, ipc_entry_alloc_name may be + * used only to add a right to a port name already + * known in this space. + */ + if (is_fast_space(space)) { + is_write_unlock(space); + assert(!tentry); + return KERN_FAILURE; + } + + /* + * Before trying to allocate any memory, + * check if the entry already exists in the tree. + * This avoids spurious resource errors. + * The splay tree makes a subsequent lookup/insert + * of the same name cheap, so this costs little. + */ + + if ((space->is_tree_total > 0) && + ((tentry2 = ipc_splay_tree_lookup(&space->is_tree, name)) + != ITE_NULL)) { + assert(tentry2->ite_space == space); + assert(IE_BITS_TYPE(tentry2->ite_bits)); + + *entryp = &tentry2->ite_entry; + if (tentry) ite_free(tentry); + return KERN_SUCCESS; + } + + its = space->is_table_next; + + /* + * Check if the table should be grown. + * + * Note that if space->is_table_size == its->its_size, + * then we won't ever try to grow the table. + * + * Note that we are optimistically assuming that name + * doesn't collide with any existing names. (So if + * it were entered into the tree, is_tree_small would + * be incremented.) This is OK, because even in that + * case, we don't lose memory by growing the table. + */ + if ((space->is_table_size <= index) && + (index < its->its_size) && + (((its->its_size - space->is_table_size) * + sizeof(struct ipc_entry)) < + ((space->is_tree_small + 1) * + sizeof(struct ipc_tree_entry)))) { + kern_return_t kr; + + /* + * Can save space by growing the table. + * Because the space will be unlocked, + * we must restart. + */ + + kr = ipc_entry_grow_table(space, ITS_SIZE_NONE); + assert(kr != KERN_NO_SPACE); + if (kr != KERN_SUCCESS) { + /* space is unlocked */ + if (tentry) ite_free(tentry); + return kr; + } + + continue; + } + + /* + * If a splay-tree entry was allocated previously, + * go ahead and insert it into the tree. + */ + + if (tentry != ITE_NULL) { + + space->is_tree_total++; + + if (index < space->is_table_size) { + entry = &space->is_table[index]; + entry->ie_bits |= IE_BITS_COLLISION; + } else if ((index < its->its_size) && + !ipc_entry_tree_collision(space, name)) + space->is_tree_small++; + + ipc_splay_tree_insert(&space->is_tree, name, tentry); + tentry->ite_bits = 0; + tentry->ite_request = 0; + tentry->ite_object = IO_NULL; + tentry->ite_space = space; + *entryp = &tentry->ite_entry; + return KERN_SUCCESS; + } + + /* + * Allocate a tree entry and try again. + */ + + is_write_unlock(space); + tentry = ite_alloc(); + if (tentry == ITE_NULL) + return KERN_RESOURCE_SHORTAGE; + is_write_lock(space); + } +} + +/* + * Routine: ipc_entry_dealloc + * Purpose: + * Deallocates an entry from a space. + * Conditions: + * The space must be write-locked throughout. + * The space must be active. + */ + +void +ipc_entry_dealloc( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) +{ + ipc_entry_t table; + ipc_entry_num_t size; + mach_port_index_t index; + + assert(space->is_active); + assert(entry->ie_object == IO_NULL); + assert(entry->ie_request == 0); + + index = MACH_PORT_INDEX(name); + table = space->is_table; + size = space->is_table_size; + + if (is_fast_space(space)) { + assert(index < size); + assert(entry == &table[index]); + assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name)); + assert(!(entry->ie_bits & IE_BITS_COLLISION)); + entry->ie_bits &= IE_BITS_GEN_MASK; + entry->ie_next = table->ie_next; + table->ie_next = index; + return; + } + + + if ((index < size) && (entry == &table[index])) { + assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name)); + + if (entry->ie_bits & IE_BITS_COLLISION) { + struct ipc_splay_tree small, collisions; + ipc_tree_entry_t tentry; + mach_port_name_t tname; + boolean_t pick; + ipc_entry_bits_t bits; + ipc_object_t obj; + + /* must move an entry from tree to table */ + + ipc_splay_tree_split(&space->is_tree, + MACH_PORT_MAKE(index+1, 0), + &collisions); + ipc_splay_tree_split(&collisions, + MACH_PORT_MAKE(index, 0), + &small); + + pick = ipc_splay_tree_pick(&collisions, + &tname, &tentry); + assert(pick); + assert(MACH_PORT_INDEX(tname) == index); + + entry->ie_object = obj = tentry->ite_object; + entry->ie_bits = tentry->ite_bits|MACH_PORT_GEN(tname); + entry->ie_request = tentry->ite_request; + + assert(tentry->ite_space == space); + + if (IE_BITS_TYPE(tentry->ite_bits)==MACH_PORT_TYPE_SEND) { + ipc_hash_global_delete(space, obj, + tname, tentry); + ipc_hash_local_insert(space, obj, + index, entry); + } + + ipc_splay_tree_delete(&collisions, tname, tentry); + + assert(space->is_tree_total > 0); + space->is_tree_total--; + + /* check if collision bit should still be on */ + + pick = ipc_splay_tree_pick(&collisions, + &tname, &tentry); + if (pick) { + entry->ie_bits |= IE_BITS_COLLISION; + ipc_splay_tree_join(&space->is_tree, + &collisions); + } + + ipc_splay_tree_join(&space->is_tree, &small); + + } else { + entry->ie_bits &= IE_BITS_GEN_MASK; + entry->ie_next = table->ie_next; + table->ie_next = index; + } + + } else { + ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry; + + assert(tentry->ite_space == space); + + ipc_splay_tree_delete(&space->is_tree, name, tentry); + + assert(space->is_tree_total > 0); + space->is_tree_total--; + + if (index < size) { + ipc_entry_t ientry = &table[index]; + + assert(ientry->ie_bits & IE_BITS_COLLISION); + + if (!ipc_entry_tree_collision(space, name)) + ientry->ie_bits &= ~IE_BITS_COLLISION; + + } else if ((index < space->is_table_next->its_size) && + !ipc_entry_tree_collision(space, name)) { + + assert(space->is_tree_small > 0); + + space->is_tree_small--; + } + } +} + +/* + * Routine: ipc_entry_grow_table + * Purpose: + * Grows the table in a space. + * Conditions: + * The space must be write-locked and active before. + * If successful, it is also returned locked. + * Allocates memory. + * Returns: + * KERN_SUCCESS Grew the table. + * KERN_SUCCESS Somebody else grew the table. + * KERN_SUCCESS The space died. + * KERN_NO_SPACE Table has maximum size already. + * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table. + */ + +kern_return_t +ipc_entry_grow_table( + ipc_space_t space, + int target_size) +{ + ipc_entry_num_t osize, size, nsize, psize; + + do { + boolean_t reallocated=FALSE; + + ipc_entry_t otable, table; + ipc_table_size_t oits, its, nits; + mach_port_index_t i, free_index; + + assert(space->is_active); + + if (space->is_growing) { + /* + * Somebody else is growing the table. + * We just wait for them to finish. + */ + + assert_wait((event_t) space, THREAD_UNINT); + is_write_unlock(space); + thread_block((void (*)(void)) 0); + is_write_lock(space); + return KERN_SUCCESS; + } + + otable = space->is_table; + + its = space->is_table_next; + size = its->its_size; + + /* + * Since is_table_next points to the next natural size + * we can identify the current size entry. + */ + oits = its - 1; + osize = oits->its_size; + + /* + * If there is no target size, then the new size is simply + * specified by is_table_next. If there is a target + * size, then search for the next entry. + */ + if (target_size != ITS_SIZE_NONE) { + if (target_size <= osize) { + is_write_unlock(space); + return KERN_SUCCESS; + } + + psize = osize; + while ((psize != size) && (target_size > size)) { + psize = size; + its++; + size = its->its_size; + } + if (psize == size) { + is_write_unlock(space); + return KERN_NO_SPACE; + } + } + nits = its + 1; + nsize = nits->its_size; + + if (osize == size) { + is_write_unlock(space); + return KERN_NO_SPACE; + } + + assert((osize < size) && (size <= nsize)); + + /* + * OK, we'll attempt to grow the table. + * The realloc requires that the old table + * remain in existence. + */ + + space->is_growing = TRUE; + is_write_unlock(space); + + if (it_entries_reallocable(oits)) { + table = it_entries_realloc(oits, otable, its); + reallocated=TRUE; + } + else { + table = it_entries_alloc(its); + } + + is_write_lock(space); + space->is_growing = FALSE; + + /* + * We need to do a wakeup on the space, + * to rouse waiting threads. We defer + * this until the space is unlocked, + * because we don't want them to spin. + */ + + if (table == IE_NULL) { + is_write_unlock(space); + thread_wakeup((event_t) space); + return KERN_RESOURCE_SHORTAGE; + } + + if (!space->is_active) { + /* + * The space died while it was unlocked. + */ + + is_write_unlock(space); + thread_wakeup((event_t) space); + it_entries_free(its, table); + is_write_lock(space); + return KERN_SUCCESS; + } + + assert(space->is_table == otable); + assert((space->is_table_next == its) || + (target_size != ITS_SIZE_NONE)); + assert(space->is_table_size == osize); + + space->is_table = table; + space->is_table_size = size; + space->is_table_next = nits; + + /* + * If we did a realloc, it remapped the data. + * Otherwise we copy by hand first. Then we have + * to zero the new part and the old local hash + * values. + */ + if (!reallocated) + (void) memcpy((void *) table, (const void *) otable, + osize * (sizeof(struct ipc_entry))); + + for (i = 0; i < osize; i++) + table[i].ie_index = 0; + + (void) memset((void *) (table + osize) , 0, + ((size - osize) * (sizeof(struct ipc_entry)))); + + /* + * Put old entries into the reverse hash table. + */ + for (i = 0; i < osize; i++) { + ipc_entry_t entry = &table[i]; + + if (IE_BITS_TYPE(entry->ie_bits)==MACH_PORT_TYPE_SEND) { + ipc_hash_local_insert(space, entry->ie_object, + i, entry); + } + } + + /* + * If there are entries in the splay tree, + * then we have work to do: + * 1) transfer entries to the table + * 2) update is_tree_small + */ + assert(!is_fast_space(space) || space->is_tree_total == 0); + if (space->is_tree_total > 0) { + mach_port_index_t index; + boolean_t delete; + struct ipc_splay_tree ignore; + struct ipc_splay_tree move; + struct ipc_splay_tree small; + ipc_entry_num_t nosmall; + ipc_tree_entry_t tentry; + + /* + * The splay tree divides into four regions, + * based on the index of the entries: + * 1) 0 <= index < osize + * 2) osize <= index < size + * 3) size <= index < nsize + * 4) nsize <= index + * + * Entries in the first part are ignored. + * Entries in the second part, that don't + * collide, are moved into the table. + * Entries in the third part, that don't + * collide, are counted for is_tree_small. + * Entries in the fourth part are ignored. + */ + + ipc_splay_tree_split(&space->is_tree, + MACH_PORT_MAKE(nsize, 0), + &small); + ipc_splay_tree_split(&small, + MACH_PORT_MAKE(size, 0), + &move); + ipc_splay_tree_split(&move, + MACH_PORT_MAKE(osize, 0), + &ignore); + + /* move entries into the table */ + + for (tentry = ipc_splay_traverse_start(&move); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&move, delete)) { + + mach_port_name_t name; + mach_port_gen_t gen; + mach_port_type_t type; + ipc_entry_bits_t bits; + ipc_object_t obj; + ipc_entry_t entry; + + name = tentry->ite_name; + gen = MACH_PORT_GEN(name); + index = MACH_PORT_INDEX(name); + + assert(tentry->ite_space == space); + assert((osize <= index) && (index < size)); + + entry = &table[index]; + bits = entry->ie_bits; + if (IE_BITS_TYPE(bits)) { + assert(IE_BITS_GEN(bits) != gen); + entry->ie_bits |= IE_BITS_COLLISION; + delete = FALSE; + continue; + } + + bits = tentry->ite_bits; + type = IE_BITS_TYPE(bits); + assert(type != MACH_PORT_TYPE_NONE); + + entry->ie_bits = bits | gen; + entry->ie_request = tentry->ite_request; + entry->ie_object = obj = tentry->ite_object; + + if (type == MACH_PORT_TYPE_SEND) { + ipc_hash_global_delete(space, obj, + name, tentry); + ipc_hash_local_insert(space, obj, + index, entry); + } + space->is_tree_total--; + delete = TRUE; + } + ipc_splay_traverse_finish(&move); + + /* count entries for is_tree_small */ + + nosmall = 0; index = 0; + for (tentry = ipc_splay_traverse_start(&small); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&small, FALSE)) { + mach_port_index_t nindex; + + nindex = MACH_PORT_INDEX(tentry->ite_name); + + if (nindex != index) { + nosmall++; + index = nindex; + } + } + ipc_splay_traverse_finish(&small); + + assert(nosmall <= (nsize - size)); + assert(nosmall <= space->is_tree_total); + space->is_tree_small = nosmall; + + /* put the splay tree back together */ + + ipc_splay_tree_join(&space->is_tree, &small); + ipc_splay_tree_join(&space->is_tree, &move); + ipc_splay_tree_join(&space->is_tree, &ignore); + } + + /* + * Add entries in the new part which still aren't used + * to the free list. Add them in reverse order, + * and set the generation number to -1, so that + * early allocations produce "natural" names. + */ + + free_index = table[0].ie_next; + for (i = size-1; i >= osize; --i) { + ipc_entry_t entry = &table[i]; + + if (entry->ie_bits == 0) { + entry->ie_bits = IE_BITS_GEN_MASK; + entry->ie_next = free_index; + free_index = i; + } + } + table[0].ie_next = free_index; + + /* + * Now we need to free the old table. + * If the space dies or grows while unlocked, + * then we can quit here. + */ + is_write_unlock(space); + thread_wakeup((event_t) space); + + it_entries_free(oits, otable); + is_write_lock(space); + if (!space->is_active || (space->is_table_next != nits)) + return KERN_SUCCESS; + + /* + * We might have moved enough entries from + * the splay tree into the table that + * the table can be profitably grown again. + * + * Note that if size == nsize, then + * space->is_tree_small == 0. + */ + } while ((space->is_tree_small > 0) && + (((nsize - size) * sizeof(struct ipc_entry)) < + (space->is_tree_small * sizeof(struct ipc_tree_entry)))); + + return KERN_SUCCESS; +} + + +#if MACH_KDB +#include +#define printf kdbprintf + +ipc_entry_t db_ipc_object_by_name( + task_t task, + mach_port_name_t name); + + +ipc_entry_t +db_ipc_object_by_name( + task_t task, + mach_port_name_t name) +{ + ipc_space_t space = task->itk_space; + ipc_entry_t entry; + + + entry = ipc_entry_lookup(space, name); + if(entry != IE_NULL) { + iprintf("(task 0x%x, name 0x%x) ==> object 0x%x\n", + task, name, entry->ie_object); + return (ipc_entry_t) entry->ie_object; + } + return entry; +} +#endif /* MACH_KDB */ diff --git a/osfmk/ipc/ipc_entry.h b/osfmk/ipc/ipc_entry.h new file mode 100644 index 000000000..57e393c6e --- /dev/null +++ b/osfmk/ipc/ipc_entry.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_entry.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for translation entries, which represent + * tasks' capabilities for ports and port sets. + */ + +#ifndef _IPC_IPC_ENTRY_H_ +#define _IPC_IPC_ENTRY_H_ + +#include +#include +#include +#include +#include +#include + +/* + * Spaces hold capabilities for ipc_object_t's. + * Each ipc_entry_t records a capability. Most capabilities have + * small names, and the entries are elements of a table. + * Capabilities can have large names, and a splay tree holds + * those entries. The cutoff point between the table and the tree + * is adjusted dynamically to minimize memory consumption. + * + * The ie_index field of entries in the table implements + * a ordered hash table with open addressing and linear probing. + * This hash table converts (space, object) -> name. + * It is used independently of the other fields. + * + * Free (unallocated) entries in the table have null ie_object + * fields. The ie_bits field is zero except for IE_BITS_GEN. + * The ie_next (ie_request) field links free entries into a free list. + * + * The first entry in the table (index 0) is always free. + * It is used as the head of the free list. + */ + +typedef natural_t ipc_entry_bits_t; +typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */ + +typedef struct ipc_entry { + struct ipc_object *ie_object; + ipc_entry_bits_t ie_bits; + union { + mach_port_index_t next; /* next in freelist, or... */ + ipc_table_index_t request; /* dead name request notify */ + } index; + union { + mach_port_index_t table; + struct ipc_tree_entry *tree; + } hash; +} *ipc_entry_t; + +#define IE_NULL ((ipc_entry_t) 0) + +#define ie_request index.request +#define ie_next index.next +#define ie_index hash.table + +#define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */ +#define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK) + +#define IE_BITS_TYPE_MASK 0x001f0000 /* 5 bits of capability type */ +#define IE_BITS_TYPE(bits) ((bits) & IE_BITS_TYPE_MASK) + +#define IE_BITS_COLLISION 0x00800000 /* 1 bit for collisions */ + + +#ifndef NO_PORT_GEN +#define IE_BITS_GEN_MASK 0xff000000 /* 8 bits for generation */ +#define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK) +#define IE_BITS_GEN_ONE 0x04000000 /* low bit of generation */ +#define IE_BITS_NEW_GEN(old) (((old) + IE_BITS_GEN_ONE) & IE_BITS_GEN_MASK) +#else +#define IE_BITS_GEN_MASK 0 +#define IE_BITS_GEN(bits) 0 +#define IE_BITS_GEN_ONE 0 +#define IE_BITS_NEW_GEN(old) (old) +#endif /* !USE_PORT_GEN */ + + +#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */ + +typedef struct ipc_tree_entry { + struct ipc_entry ite_entry; + mach_port_name_t ite_name; + struct ipc_space *ite_space; + struct ipc_tree_entry *ite_lchild; + struct ipc_tree_entry *ite_rchild; +} *ipc_tree_entry_t; + +#define ITE_NULL ((ipc_tree_entry_t) 0) + +#define ite_bits ite_entry.ie_bits +#define ite_object ite_entry.ie_object +#define ite_request ite_entry.ie_request +#define ite_next ite_entry.hash.tree + +extern zone_t ipc_tree_entry_zone; + +#define ite_alloc() ((ipc_tree_entry_t) zalloc(ipc_tree_entry_zone)) +#define ite_free(ite) zfree(ipc_tree_entry_zone, (vm_offset_t) (ite)) + +/* + * Exported interfaces + */ + +/* Search for entry in a space by name */ +extern ipc_entry_t ipc_entry_lookup( + ipc_space_t space, + mach_port_name_t name); + +/* Allocate an entry in a space */ +extern kern_return_t ipc_entry_get( + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp); + +/* Allocate an entry in a space, growing the space if necessary */ +extern kern_return_t ipc_entry_alloc( + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp); + +/* Allocate/find an entry in a space with a specific name */ +extern kern_return_t ipc_entry_alloc_name( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp); + +/* Deallocate an entry from a space */ +extern void ipc_entry_dealloc( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); + +/* Grow the table in a space */ +extern kern_return_t ipc_entry_grow_table( + ipc_space_t space, + int target_size); + +#endif /* _IPC_IPC_ENTRY_H_ */ diff --git a/osfmk/ipc/ipc_hash.c b/osfmk/ipc/ipc_hash.c new file mode 100644 index 000000000..844501695 --- /dev/null +++ b/osfmk/ipc/ipc_hash.c @@ -0,0 +1,682 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_hash.c + * Author: Rich Draves + * Date: 1989 + * + * Entry hash table operations. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if MACH_IPC_DEBUG +#include +#include +#include +#include +#endif /* MACH_IPC_DEBUG */ + +/* + * Forward declarations + */ + +/* Lookup (space, obj) in global hash table */ +boolean_t ipc_hash_global_lookup( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_tree_entry_t *entryp); + +/* Insert an entry into the global reverse hash table */ +void ipc_hash_global_insert( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_tree_entry_t entry); + +/* Delete an entry from the local reverse hash table */ +void ipc_hash_local_delete( + ipc_space_t space, + ipc_object_t obj, + mach_port_index_t index, + ipc_entry_t entry); + +/* + * Routine: ipc_hash_lookup + * Purpose: + * Converts (space, obj) -> (name, entry). + * Returns TRUE if an entry was found. + * Conditions: + * The space must be locked (read or write) throughout. + */ + +boolean_t +ipc_hash_lookup( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp) +{ + boolean_t rv; + + rv = ipc_hash_local_lookup(space, obj, namep, entryp); + if (!rv) { + assert(!is_fast_space(space) || space->is_tree_hash == 0); + if (space->is_tree_hash > 0) + rv = ipc_hash_global_lookup(space, obj, namep, + (ipc_tree_entry_t *) entryp); + } + return (rv); +} + +/* + * Routine: ipc_hash_insert + * Purpose: + * Inserts an entry into the appropriate reverse hash table, + * so that ipc_hash_lookup will find it. + * Conditions: + * The space must be write-locked. + */ + +void +ipc_hash_insert( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry) +{ + mach_port_index_t index; + + index = MACH_PORT_INDEX(name); + if ((index < space->is_table_size) && + (entry == &space->is_table[index])) + ipc_hash_local_insert(space, obj, index, entry); + else { + assert(!is_fast_space(space)); + ipc_hash_global_insert(space, obj, name, + (ipc_tree_entry_t) entry); + } +} + +/* + * Routine: ipc_hash_delete + * Purpose: + * Deletes an entry from the appropriate reverse hash table. + * Conditions: + * The space must be write-locked. + */ + +void +ipc_hash_delete( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry) +{ + mach_port_index_t index; + + index = MACH_PORT_INDEX(name); + if ((index < space->is_table_size) && + (entry == &space->is_table[index])) + ipc_hash_local_delete(space, obj, index, entry); + else { + assert(!is_fast_space(space)); + ipc_hash_global_delete(space, obj, name, + (ipc_tree_entry_t) entry); + } +} + +/* + * The global reverse hash table holds splay tree entries. + * It is a simple open-chaining hash table with singly-linked buckets. + * Each bucket is locked separately, with an exclusive lock. + * Within each bucket, move-to-front is used. + */ + +typedef natural_t ipc_hash_index_t; + +ipc_hash_index_t ipc_hash_global_size; +ipc_hash_index_t ipc_hash_global_mask; + +#define IH_GLOBAL_HASH(space, obj) \ + (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \ + (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \ + ipc_hash_global_mask) + +typedef struct ipc_hash_global_bucket { + decl_mutex_data(, ihgb_lock_data) + ipc_tree_entry_t ihgb_head; +} *ipc_hash_global_bucket_t; + +#define IHGB_NULL ((ipc_hash_global_bucket_t) 0) + +#define ihgb_lock_init(ihgb) mutex_init(&(ihgb)->ihgb_lock_data, \ + ETAP_IPC_IHGB) +#define ihgb_lock(ihgb) mutex_lock(&(ihgb)->ihgb_lock_data) +#define ihgb_unlock(ihgb) mutex_unlock(&(ihgb)->ihgb_lock_data) + +ipc_hash_global_bucket_t ipc_hash_global_table; + +/* + * Routine: ipc_hash_global_lookup + * Purpose: + * Converts (space, obj) -> (name, entry). + * Looks in the global table, for splay tree entries. + * Returns TRUE if an entry was found. + * Conditions: + * The space must be locked (read or write) throughout. + */ + +boolean_t +ipc_hash_global_lookup( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_tree_entry_t *entryp) +{ + ipc_hash_global_bucket_t bucket; + ipc_tree_entry_t this, *last; + + assert(space != IS_NULL); + assert(obj != IO_NULL); + + assert(!is_fast_space(space)); + bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)]; + ihgb_lock(bucket); + + if ((this = bucket->ihgb_head) != ITE_NULL) { + if ((this->ite_object == obj) && + (this->ite_space == space)) { + /* found it at front; no need to move */ + + *namep = this->ite_name; + *entryp = this; + } else for (last = &this->ite_next; + (this = *last) != ITE_NULL; + last = &this->ite_next) { + if ((this->ite_object == obj) && + (this->ite_space == space)) { + /* found it; move to front */ + + *last = this->ite_next; + this->ite_next = bucket->ihgb_head; + bucket->ihgb_head = this; + + *namep = this->ite_name; + *entryp = this; + break; + } + } + } + + ihgb_unlock(bucket); + return this != ITE_NULL; +} + +/* + * Routine: ipc_hash_global_insert + * Purpose: + * Inserts an entry into the global reverse hash table. + * Conditions: + * The space must be write-locked. + */ + +void +ipc_hash_global_insert( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_tree_entry_t entry) +{ + ipc_hash_global_bucket_t bucket; + + + assert(!is_fast_space(space)); + + + assert(entry->ite_name == name); + assert(space != IS_NULL); + assert(entry->ite_space == space); + assert(obj != IO_NULL); + assert(entry->ite_object == obj); + + space->is_tree_hash++; + assert(space->is_tree_hash <= space->is_tree_total); + + bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)]; + ihgb_lock(bucket); + + /* insert at front of bucket */ + + entry->ite_next = bucket->ihgb_head; + bucket->ihgb_head = entry; + + ihgb_unlock(bucket); +} + +/* + * Routine: ipc_hash_global_delete + * Purpose: + * Deletes an entry from the global reverse hash table. + * Conditions: + * The space must be write-locked. + */ + +void +ipc_hash_global_delete( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_tree_entry_t entry) +{ + ipc_hash_global_bucket_t bucket; + ipc_tree_entry_t this, *last; + + assert(!is_fast_space(space)); + + assert(entry->ite_name == name); + assert(space != IS_NULL); + assert(entry->ite_space == space); + assert(obj != IO_NULL); + assert(entry->ite_object == obj); + + assert(space->is_tree_hash > 0); + space->is_tree_hash--; + + bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)]; + ihgb_lock(bucket); + + for (last = &bucket->ihgb_head; + (this = *last) != ITE_NULL; + last = &this->ite_next) { + if (this == entry) { + /* found it; remove from bucket */ + + *last = this->ite_next; + break; + } + } + assert(this != ITE_NULL); + + ihgb_unlock(bucket); +} + +/* + * Each space has a local reverse hash table, which holds + * entries from the space's table. In fact, the hash table + * just uses a field (ie_index) in the table itself. + * + * The local hash table is an open-addressing hash table, + * which means that when a collision occurs, instead of + * throwing the entry into a bucket, the entry is rehashed + * to another position in the table. In this case the rehash + * is very simple: linear probing (ie, just increment the position). + * This simple rehash makes deletions tractable (they're still a pain), + * but it means that collisions tend to build up into clumps. + * + * Because at least one entry in the table (index 0) is always unused, + * there will always be room in the reverse hash table. If a table + * with n slots gets completely full, the reverse hash table will + * have one giant clump of n-1 slots and one free slot somewhere. + * Because entries are only entered into the reverse table if they + * are pure send rights (not receive, send-once, port-set, + * or dead-name rights), and free entries of course aren't entered, + * I expect the reverse hash table won't get unreasonably full. + * + * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2, + * pp. 135-142.) may be desirable here. They can dramatically help + * unsuccessful lookups. But unsuccessful lookups are almost always + * followed by insertions, and those slow down somewhat. They + * also can help deletions somewhat. Successful lookups aren't affected. + * So possibly a small win; probably nothing significant. + */ + +#define IH_LOCAL_HASH(obj, size) \ + ((((mach_port_index_t) (obj)) >> 6) % (size)) + +/* + * Routine: ipc_hash_local_lookup + * Purpose: + * Converts (space, obj) -> (name, entry). + * Looks in the space's local table, for table entries. + * Returns TRUE if an entry was found. + * Conditions: + * The space must be locked (read or write) throughout. + */ + +boolean_t +ipc_hash_local_lookup( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp) +{ + ipc_entry_t table; + ipc_entry_num_t size; + mach_port_index_t hindex, index; + + assert(space != IS_NULL); + assert(obj != IO_NULL); + + table = space->is_table; + size = space->is_table_size; + hindex = IH_LOCAL_HASH(obj, size); + + /* + * Ideally, table[hindex].ie_index is the name we want. + * However, must check ie_object to verify this, + * because collisions can happen. In case of a collision, + * search farther along in the clump. + */ + + while ((index = table[hindex].ie_index) != 0) { + ipc_entry_t entry = &table[index]; + + if (entry->ie_object == obj) { + *entryp = entry; + *namep = MACH_PORT_MAKE(index, + IE_BITS_GEN(entry->ie_bits)); + return TRUE; + } + + if (++hindex == size) + hindex = 0; + } + + return FALSE; +} + +/* + * Routine: ipc_hash_local_insert + * Purpose: + * Inserts an entry into the space's reverse hash table. + * Conditions: + * The space must be write-locked. + */ + +void +ipc_hash_local_insert( + ipc_space_t space, + ipc_object_t obj, + mach_port_index_t index, + ipc_entry_t entry) +{ + ipc_entry_t table; + ipc_entry_num_t size; + mach_port_index_t hindex; + + assert(index != 0); + assert(space != IS_NULL); + assert(obj != IO_NULL); + + table = space->is_table; + size = space->is_table_size; + hindex = IH_LOCAL_HASH(obj, size); + + assert(entry == &table[index]); + assert(entry->ie_object == obj); + + /* + * We want to insert at hindex, but there may be collisions. + * If a collision occurs, search for the end of the clump + * and insert there. + */ + + while (table[hindex].ie_index != 0) { + if (++hindex == size) + hindex = 0; + } + + table[hindex].ie_index = index; +} + +/* + * Routine: ipc_hash_local_delete + * Purpose: + * Deletes an entry from the space's reverse hash table. + * Conditions: + * The space must be write-locked. + */ + +void +ipc_hash_local_delete( + ipc_space_t space, + ipc_object_t obj, + mach_port_index_t index, + ipc_entry_t entry) +{ + ipc_entry_t table; + ipc_entry_num_t size; + mach_port_index_t hindex, dindex; + + assert(index != MACH_PORT_NULL); + assert(space != IS_NULL); + assert(obj != IO_NULL); + + table = space->is_table; + size = space->is_table_size; + hindex = IH_LOCAL_HASH(obj, size); + + assert(entry == &table[index]); + assert(entry->ie_object == obj); + + /* + * First check we have the right hindex for this index. + * In case of collision, we have to search farther + * along in this clump. + */ + + while (table[hindex].ie_index != index) { + if (++hindex == size) + hindex = 0; + } + + /* + * Now we want to set table[hindex].ie_index = 0. + * But if we aren't the last index in a clump, + * this might cause problems for lookups of objects + * farther along in the clump that are displaced + * due to collisions. Searches for them would fail + * at hindex instead of succeeding. + * + * So we must check the clump after hindex for objects + * that are so displaced, and move one up to the new hole. + * + * hindex - index of new hole in the clump + * dindex - index we are checking for a displaced object + * + * When we move a displaced object up into the hole, + * it creates a new hole, and we have to repeat the process + * until we get to the end of the clump. + */ + + for (dindex = hindex; index != 0; hindex = dindex) { + for (;;) { + mach_port_index_t tindex; + ipc_object_t tobj; + + if (++dindex == size) + dindex = 0; + assert(dindex != hindex); + + /* are we at the end of the clump? */ + + index = table[dindex].ie_index; + if (index == 0) + break; + + /* is this a displaced object? */ + + tobj = table[index].ie_object; + assert(tobj != IO_NULL); + tindex = IH_LOCAL_HASH(tobj, size); + + if ((dindex < hindex) ? + ((dindex < tindex) && (tindex <= hindex)) : + ((dindex < tindex) || (tindex <= hindex))) + break; + } + + table[hindex].ie_index = index; + } +} + +/* + * Routine: ipc_hash_init + * Purpose: + * Initialize the reverse hash table implementation. + */ + +void +ipc_hash_init(void) +{ + ipc_hash_index_t i; + + /* if not configured, initialize ipc_hash_global_size */ + + if (ipc_hash_global_size == 0) { + ipc_hash_global_size = ipc_tree_entry_max >> 8; + if (ipc_hash_global_size < 32) + ipc_hash_global_size = 32; + } + + /* make sure it is a power of two */ + + ipc_hash_global_mask = ipc_hash_global_size - 1; + if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) { + natural_t bit; + + /* round up to closest power of two */ + + for (bit = 1;; bit <<= 1) { + ipc_hash_global_mask |= bit; + ipc_hash_global_size = ipc_hash_global_mask + 1; + + if ((ipc_hash_global_size & ipc_hash_global_mask) == 0) + break; + } + } + + /* allocate ipc_hash_global_table */ + + ipc_hash_global_table = (ipc_hash_global_bucket_t) + kalloc((vm_size_t) (ipc_hash_global_size * + sizeof(struct ipc_hash_global_bucket))); + assert(ipc_hash_global_table != IHGB_NULL); + + /* and initialize it */ + + for (i = 0; i < ipc_hash_global_size; i++) { + ipc_hash_global_bucket_t bucket; + + bucket = &ipc_hash_global_table[i]; + ihgb_lock_init(bucket); + bucket->ihgb_head = ITE_NULL; + } +} + +#if MACH_IPC_DEBUG + +/* + * Routine: ipc_hash_info + * Purpose: + * Return information about the global reverse hash table. + * Fills the buffer with as much information as possible + * and returns the desired size of the buffer. + * Conditions: + * Nothing locked. The caller should provide + * possibly-pageable memory. + */ + + +ipc_hash_index_t +ipc_hash_info( + hash_info_bucket_t *info, + mach_msg_type_number_t count) +{ + ipc_hash_index_t i; + + if (ipc_hash_global_size < count) + count = ipc_hash_global_size; + + for (i = 0; i < count; i++) { + ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i]; + unsigned int bucket_count = 0; + ipc_tree_entry_t entry; + + ihgb_lock(bucket); + for (entry = bucket->ihgb_head; + entry != ITE_NULL; + entry = entry->ite_next) + bucket_count++; + ihgb_unlock(bucket); + + /* don't touch pageable memory while holding locks */ + info[i].hib_count = bucket_count; + } + + return ipc_hash_global_size; +} + +#endif /* MACH_IPC_DEBUG */ diff --git a/osfmk/ipc/ipc_hash.h b/osfmk/ipc/ipc_hash.h new file mode 100644 index 000000000..2b864e5fc --- /dev/null +++ b/osfmk/ipc/ipc_hash.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_hash.h + * Author: Rich Draves + * Date: 1989 + * + * Declarations of entry hash table operations. + */ + +#ifndef _IPC_IPC_HASH_H_ +#define _IPC_IPC_HASH_H_ + +#include +#include +#include + +/* + * Exported interfaces + */ + +/* Lookup (space, obj) in the appropriate reverse hash table */ +extern boolean_t ipc_hash_lookup( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp); + +/* Insert an entry into the appropriate reverse hash table */ +extern void ipc_hash_insert( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry); + +/* Delete an entry from the appropriate reverse hash table */ +extern void ipc_hash_delete( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry); + +/* + * For use by functions that know what they're doing: + * the global primitives, for splay tree entries, + * and the local primitives, for table entries. + */ + +/* Delete an entry from the global reverse hash table */ +extern void ipc_hash_global_delete( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_tree_entry_t entry); + +/* Lookup (space, obj) in local hash table */ +extern boolean_t ipc_hash_local_lookup( + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp); + +/* Inserts an entry into the local reverse hash table */ +extern void ipc_hash_local_insert( + ipc_space_t space, + ipc_object_t obj, + mach_port_index_t index, + ipc_entry_t entry); + +/* Initialize the reverse hash table implementation */ +extern void ipc_hash_init(void); + +#include + +#if MACH_IPC_DEBUG + +extern natural_t ipc_hash_info( + hash_info_bucket_t *info, + mach_msg_type_number_t count); + +#endif /* MACH_IPC_DEBUG */ + +#endif /* _IPC_IPC_HASH_H_ */ diff --git a/osfmk/ipc/ipc_init.c b/osfmk/ipc/ipc_init.c new file mode 100644 index 000000000..a32e5c24a --- /dev/null +++ b/osfmk/ipc/ipc_init.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_init.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to initialize the IPC system. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* NDR_record */ + +vm_map_t ipc_kernel_map; +vm_size_t ipc_kernel_map_size = 1024 * 1024; + +vm_map_t ipc_kernel_copy_map; +#define IPC_KERNEL_COPY_MAP_SIZE (8 * 1024 * 1024) +vm_size_t ipc_kernel_copy_map_size = IPC_KERNEL_COPY_MAP_SIZE; +vm_size_t ipc_kmsg_max_vm_space = (IPC_KERNEL_COPY_MAP_SIZE * 7)/8; + +int ipc_space_max = SPACE_MAX; +int ipc_tree_entry_max = ITE_MAX; +int ipc_port_max = PORT_MAX; +int ipc_pset_max = SET_MAX; + +extern void mig_init(void); +extern void ikm_cache_init(void); + +/* + * Routine: ipc_bootstrap + * Purpose: + * Initialization needed before the kernel task + * can be created. + */ + +void +ipc_bootstrap(void) +{ + kern_return_t kr; + + ipc_port_multiple_lock_init(); + + ipc_port_timestamp_lock_init(); + ipc_port_timestamp_data = 0; + + /* all IPC zones should be exhaustible */ + + ipc_space_zone = zinit(sizeof(struct ipc_space), + ipc_space_max * sizeof(struct ipc_space), + sizeof(struct ipc_space), + "ipc spaces"); +#if 0 + /* make it exhaustible */ + zone_change(ipc_space_zone, Z_EXHAUST, TRUE); +#endif + + ipc_tree_entry_zone = + zinit(sizeof(struct ipc_tree_entry), + ipc_tree_entry_max * sizeof(struct ipc_tree_entry), + sizeof(struct ipc_tree_entry), + "ipc tree entries"); +#if 0 + /* make it exhaustible */ + zone_change(ipc_tree_entry_zone, Z_EXHAUST, TRUE); +#endif + + /* + * populate all port(set) zones + */ + ipc_object_zones[IOT_PORT] = + zinit(sizeof(struct ipc_port), + ipc_port_max * sizeof(struct ipc_port), + sizeof(struct ipc_port), + "ipc ports"); + /* + * XXX Can't make the port zone exhaustible because the kernel + * XXX panics when port allocation for an internal object fails. + *zone_change(ipc_object_zones[IOT_PORT], Z_EXHAUST, TRUE); + */ + + ipc_object_zones[IOT_PORT_SET] = + zinit(sizeof(struct ipc_pset), + ipc_pset_max * sizeof(struct ipc_pset), + sizeof(struct ipc_pset), + "ipc port sets"); + /* make it exhaustible */ + zone_change(ipc_object_zones[IOT_PORT_SET], Z_EXHAUST, TRUE); + + /* create special spaces */ + + kr = ipc_space_create_special(&ipc_space_kernel); + assert(kr == KERN_SUCCESS); + + + kr = ipc_space_create_special(&ipc_space_reply); + assert(kr == KERN_SUCCESS); + + /* initialize modules with hidden data structures */ + +#if MACH_ASSERT + ipc_port_debug_init(); +#endif + mig_init(); + ipc_table_init(); + ipc_notify_init(); + ipc_hash_init(); + ipc_kmsg_init(); + semaphore_init(); + lock_set_init(); +} + +/* + * XXX tunable, belongs in mach.message.h + */ +#define MSG_OOL_SIZE_SMALL_MAX 4096 +vm_size_t msg_ool_size_small; + +/* + * Routine: ipc_init + * Purpose: + * Final initialization of the IPC system. + */ + +void +ipc_init(void) +{ + kern_return_t retval; + vm_offset_t min, max; + extern vm_size_t kalloc_max_prerounded; + + retval = kmem_suballoc(kernel_map, &min, ipc_kernel_map_size, + TRUE, TRUE, &ipc_kernel_map); + if (retval != KERN_SUCCESS) + panic("ipc_init: kmem_suballoc of ipc_kernel_map failed"); + + retval = kmem_suballoc(kernel_map, &min, ipc_kernel_copy_map_size, + TRUE, TRUE, &ipc_kernel_copy_map); + if (retval != KERN_SUCCESS) + panic("ipc_init: kmem_suballoc of ipc_kernel_copy_map failed"); + + ipc_kernel_copy_map->no_zero_fill = TRUE; + ipc_kernel_copy_map->wait_for_space = TRUE; + + /* + * As an optimization, 'small' out of line data regions using a + * physical copy strategy are copied into kalloc'ed buffers. + * The value of 'small' is determined here. Requests kalloc() + * with sizes greater or equal to kalloc_max_prerounded may fail. + */ + if (kalloc_max_prerounded <= MSG_OOL_SIZE_SMALL_MAX) { + msg_ool_size_small = kalloc_max_prerounded; + } + else { + msg_ool_size_small = MSG_OOL_SIZE_SMALL_MAX; + } + + ipc_host_init(); +} diff --git a/osfmk/ipc/ipc_init.h b/osfmk/ipc/ipc_init.h new file mode 100644 index 000000000..dbe2831f2 --- /dev/null +++ b/osfmk/ipc/ipc_init.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:15 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 02:07:56 ezf + * change marker to not FREE + * [1994/09/22 21:29:04 ezf] + * + * Revision 1.1.2.4 1993/07/22 16:16:03 rod + * Add ANSI prototypes. CR #9523. + * [1993/07/22 13:29:57 rod] + * + * Revision 1.1.2.3 1993/06/07 22:10:25 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:01:24 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:31:04 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:09:31 jeffc] + * + * Revision 1.1 1992/09/30 02:28:50 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:32:45 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:21:42 mrt + * Changed to new Mach copyright + * [91/02/01 15:45:16 mrt] + * + * Revision 2.2 90/06/02 14:49:59 rpd + * Created for new IPC. + * [90/03/26 20:55:26 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_init.h + * Author: Rich Draves + * Date: 1989 + * + * Declarations of functions to initialize the IPC system. + */ + +#ifndef _IPC_IPC_INIT_H_ +#define _IPC_IPC_INIT_H_ + +extern int ipc_space_max; +extern int ipc_tree_entry_max; +extern int ipc_port_max; +extern int ipc_pset_max; + +/* + * Exported interfaces + */ + +/* IPC initialization needed before creation of kernel task */ +extern void ipc_bootstrap(void); + +/* Remaining IPC initialization */ +extern void ipc_init(void); + +#endif /* _IPC_IPC_INIT_H_ */ diff --git a/osfmk/ipc/ipc_kmsg.c b/osfmk/ipc/ipc_kmsg.c new file mode 100644 index 000000000..04932517e --- /dev/null +++ b/osfmk/ipc/ipc_kmsg.c @@ -0,0 +1,3091 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_kmsg.c + * Author: Rich Draves + * Date: 1989 + * + * Operations on kernel messages. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern vm_map_t ipc_kernel_copy_map; +extern vm_size_t ipc_kmsg_max_vm_space; +extern vm_size_t msg_ool_size_small; + +#define MSG_OOL_SIZE_SMALL msg_ool_size_small + + +/* + * Forward declarations + */ + +void ipc_kmsg_clean( + ipc_kmsg_t kmsg); + +void ipc_kmsg_clean_body( + ipc_kmsg_t kmsg, + mach_msg_type_number_t number); + +void ipc_kmsg_clean_partial( + ipc_kmsg_t kmsg, + mach_msg_type_number_t number, + vm_offset_t paddr, + vm_size_t length); + +mach_msg_return_t ipc_kmsg_copyout_body( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist); + +mach_msg_return_t ipc_kmsg_copyin_body( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map); + +void ikm_cache_init(void); +/* + * We keep a per-processor cache of kernel message buffers. + * The cache saves the overhead/locking of using kalloc/kfree. + * The per-processor cache seems to miss less than a per-thread cache, + * and it also uses less memory. Access to the cache doesn't + * require locking. + */ +#define IKM_STASH 16 /* # of cache entries per cpu */ +ipc_kmsg_t ipc_kmsg_cache[ NCPUS ][ IKM_STASH ]; +unsigned int ipc_kmsg_cache_avail[NCPUS]; + +/* + * Routine: ipc_kmsg_init + * Purpose: + * Initialize the kmsg system. For each CPU, we need to + * pre-stuff the kmsg cache. + */ +void +ipc_kmsg_init() +{ + unsigned int cpu, i; + + for (cpu = 0; cpu < NCPUS; ++cpu) { + for (i = 0; i < IKM_STASH; ++i) { + ipc_kmsg_t kmsg; + + kmsg = (ipc_kmsg_t) + kalloc(ikm_plus_overhead(IKM_SAVED_MSG_SIZE)); + if (kmsg == IKM_NULL) + panic("ipc_kmsg_init"); + ikm_init(kmsg, IKM_SAVED_MSG_SIZE); + ipc_kmsg_cache[cpu][i] = kmsg; + } + ipc_kmsg_cache_avail[cpu] = IKM_STASH; + } +} + +/* + * Routine: ipc_kmsg_alloc + * Purpose: + * Allocate a kernel message structure. If we can get one from + * the cache, that is best. Otherwise, allocate a new one. + * Conditions: + * Nothing locked. + */ +ipc_kmsg_t +ipc_kmsg_alloc( + mach_msg_size_t msg_and_trailer_size) +{ + ipc_kmsg_t kmsg; + + if ((msg_and_trailer_size <= IKM_SAVED_MSG_SIZE)) { + unsigned int cpu, i; + + disable_preemption(); + cpu = cpu_number(); + if ((i = ipc_kmsg_cache_avail[cpu]) > 0) { + assert(i <= IKM_STASH); + kmsg = ipc_kmsg_cache[cpu][--i]; + ipc_kmsg_cache_avail[cpu] = i; + ikm_check_init(kmsg, IKM_SAVED_MSG_SIZE); + enable_preemption(); + return (kmsg); + } + enable_preemption(); + } + + /* round up for ikm_cache */ + if (msg_and_trailer_size < IKM_SAVED_MSG_SIZE) + msg_and_trailer_size = IKM_SAVED_MSG_SIZE; + + kmsg = (ipc_kmsg_t)kalloc(ikm_plus_overhead(msg_and_trailer_size)); + if (kmsg != IKM_NULL) { + ikm_init(kmsg, msg_and_trailer_size); + } + return(kmsg); +} + +/* + * Routine: ipc_kmsg_free + * Purpose: + * Free a kernel message buffer. If the kms is preallocated + * to a port, just "put it back (marked unused)." We have to + * do this with the port locked. The port may have its hold + * on our message released. In that case, we have to just + * revert the message to a traditional one and free it normally. + * Conditions: + * Nothing locked. + */ + +void +ipc_kmsg_free( + ipc_kmsg_t kmsg) +{ + mach_msg_size_t size = kmsg->ikm_size; + ipc_port_t port; + + /* + * Check to see if the message is bound to the port. If so, + * mark it not in use. If the port isn't already dead, then + * leave the message associated with it. Otherwise, free it + * (not to the cache). + */ + port = ikm_prealloc_inuse_port(kmsg); + if (port != IP_NULL) { + ip_lock(port); + ikm_prealloc_clear_inuse(kmsg, port); + if (ip_active(port) && (port->ip_premsg == kmsg)) { + assert(IP_PREALLOC(port)); + ip_unlock(port); + return; + } + ip_unlock(port); + goto free_it; + } + + /* + * Peek and see if it has to go back in the cache. + */ + if (kmsg->ikm_size == IKM_SAVED_MSG_SIZE && + ipc_kmsg_cache_avail[cpu_number()] < IKM_STASH) { + unsigned int cpu, i; + + disable_preemption(); + cpu = cpu_number(); + + i = ipc_kmsg_cache_avail[cpu]; + if (i < IKM_STASH) { + assert(i >= 0); + ipc_kmsg_cache[cpu][i] = kmsg; + ipc_kmsg_cache_avail[cpu] = i + 1; + enable_preemption(); + return; + } + enable_preemption(); + } + + free_it: + kfree((vm_offset_t) kmsg, ikm_plus_overhead(size)); +} + + +/* + * Routine: ipc_kmsg_enqueue + * Purpose: + * Enqueue a kmsg. + */ + +void +ipc_kmsg_enqueue( + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg) +{ + ipc_kmsg_enqueue_macro(queue, kmsg); +} + +/* + * Routine: ipc_kmsg_dequeue + * Purpose: + * Dequeue and return a kmsg. + */ + +ipc_kmsg_t +ipc_kmsg_dequeue( + ipc_kmsg_queue_t queue) +{ + ipc_kmsg_t first; + + first = ipc_kmsg_queue_first(queue); + + if (first != IKM_NULL) + ipc_kmsg_rmqueue_first_macro(queue, first); + + return first; +} + +/* + * Routine: ipc_kmsg_rmqueue + * Purpose: + * Pull a kmsg out of a queue. + */ + +void +ipc_kmsg_rmqueue( + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg) +{ + ipc_kmsg_t next, prev; + + assert(queue->ikmq_base != IKM_NULL); + + next = kmsg->ikm_next; + prev = kmsg->ikm_prev; + + if (next == kmsg) { + assert(prev == kmsg); + assert(queue->ikmq_base == kmsg); + + queue->ikmq_base = IKM_NULL; + } else { + if (queue->ikmq_base == kmsg) + queue->ikmq_base = next; + + next->ikm_prev = prev; + prev->ikm_next = next; + } + /* XXX Temporary debug logic */ + assert(kmsg->ikm_next = IKM_BOGUS); + assert(kmsg->ikm_prev = IKM_BOGUS); +} + +/* + * Routine: ipc_kmsg_queue_next + * Purpose: + * Return the kmsg following the given kmsg. + * (Or IKM_NULL if it is the last one in the queue.) + */ + +ipc_kmsg_t +ipc_kmsg_queue_next( + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg) +{ + ipc_kmsg_t next; + + assert(queue->ikmq_base != IKM_NULL); + + next = kmsg->ikm_next; + if (queue->ikmq_base == next) + next = IKM_NULL; + + return next; +} + +/* + * Routine: ipc_kmsg_destroy + * Purpose: + * Destroys a kernel message. Releases all rights, + * references, and memory held by the message. + * Frees the message. + * Conditions: + * No locks held. + */ + +void +ipc_kmsg_destroy( + ipc_kmsg_t kmsg) +{ + ipc_kmsg_queue_t queue; + boolean_t empty; + + /* + * ipc_kmsg_clean can cause more messages to be destroyed. + * Curtail recursion by queueing messages. If a message + * is already queued, then this is a recursive call. + */ + + queue = &(current_thread()->ith_messages); + empty = ipc_kmsg_queue_empty(queue); + ipc_kmsg_enqueue(queue, kmsg); + + if (empty) { + /* must leave kmsg in queue while cleaning it */ + + while ((kmsg = ipc_kmsg_queue_first(queue)) != IKM_NULL) { + ipc_kmsg_clean(kmsg); + ipc_kmsg_rmqueue(queue, kmsg); + ipc_kmsg_free(kmsg); + } + } +} + +/* + * Routine: ipc_kmsg_destroy_dest + * Purpose: + * Destroys a kernel message. Releases all rights, + * references, and memory held by the message (including + * the destination port reference. + * Frees the message. + * Conditions: + * No locks held. + */ + +ipc_kmsg_destroy_dest( + ipc_kmsg_t kmsg) +{ + ipc_port_t port; + + port = kmsg->ikm_header.msgh_remote_port; + + ipc_port_release(port); + kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL; + ipc_kmsg_destroy(kmsg); +} + +/* + * Routine: ipc_kmsg_clean_body + * Purpose: + * Cleans the body of a kernel message. + * Releases all rights, references, and memory. + * + * Conditions: + * No locks held. + */ + +void +ipc_kmsg_clean_body( + ipc_kmsg_t kmsg, + mach_msg_type_number_t number) +{ + mach_msg_descriptor_t *saddr, *eaddr; + + if ( number == 0 ) + return; + + saddr = (mach_msg_descriptor_t *) + ((mach_msg_base_t *) &kmsg->ikm_header + 1); + eaddr = saddr + number; + + for ( ; saddr < eaddr; saddr++ ) { + + switch (saddr->type.type) { + + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc; + + dsc = &saddr->port; + + /* + * Destroy port rights carried in the message + */ + if (!IO_VALID((ipc_object_t) dsc->name)) + continue; + ipc_object_destroy((ipc_object_t) dsc->name, dsc->disposition); + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR : { + mach_msg_ool_descriptor_t *dsc; + + dsc = &saddr->out_of_line; + + /* + * Destroy memory carried in the message + */ + if (dsc->size == 0) { + assert(dsc->address == (void *) 0); + } else { + vm_map_copy_discard((vm_map_copy_t) dsc->address); + } + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR : { + ipc_object_t *objects; + mach_msg_type_number_t j; + mach_msg_ool_ports_descriptor_t *dsc; + + dsc = &saddr->ool_ports; + objects = (ipc_object_t *) dsc->address; + + if (dsc->count == 0) { + break; + } + + assert(objects != (ipc_object_t *) 0); + + /* destroy port rights carried in the message */ + + for (j = 0; j < dsc->count; j++) { + ipc_object_t object = objects[j]; + + if (!IO_VALID(object)) + continue; + + ipc_object_destroy(object, dsc->disposition); + } + + /* destroy memory carried in the message */ + + assert(dsc->count != 0); + + kfree((vm_offset_t) dsc->address, + (vm_size_t) dsc->count * sizeof(mach_port_name_t)); + break; + } + default : { + printf("cleanup: don't understand this type of descriptor\n"); + } + } + } +} + +/* + * Routine: ipc_kmsg_clean_partial + * Purpose: + * Cleans a partially-acquired kernel message. + * number is the index of the type descriptor + * in the body of the message that contained the error. + * If dolast, the memory and port rights in this last + * type spec are also cleaned. In that case, number + * specifies the number of port rights to clean. + * Conditions: + * Nothing locked. + */ + +void +ipc_kmsg_clean_partial( + ipc_kmsg_t kmsg, + mach_msg_type_number_t number, + vm_offset_t paddr, + vm_size_t length) +{ + ipc_object_t object; + mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits; + + object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port; + assert(IO_VALID(object)); + ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits)); + + object = (ipc_object_t) kmsg->ikm_header.msgh_local_port; + if (IO_VALID(object)) + ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits)); + + if (paddr) { + (void) vm_deallocate(ipc_kernel_copy_map, paddr, length); + } + + ipc_kmsg_clean_body(kmsg, number); +} + +/* + * Routine: ipc_kmsg_clean + * Purpose: + * Cleans a kernel message. Releases all rights, + * references, and memory held by the message. + * Conditions: + * No locks held. + */ + +void +ipc_kmsg_clean( + ipc_kmsg_t kmsg) +{ + ipc_object_t object; + mach_msg_bits_t mbits; + + mbits = kmsg->ikm_header.msgh_bits; + object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port; + if (IO_VALID(object)) + ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits)); + + object = (ipc_object_t) kmsg->ikm_header.msgh_local_port; + if (IO_VALID(object)) + ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits)); + + if (mbits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_body_t *body; + + body = (mach_msg_body_t *) (&kmsg->ikm_header + 1); + ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count); + } +} + +/* + * Routine: ipc_kmsg_set_prealloc + * Purpose: + * Assign a kmsg as a preallocated message buffer to a port. + * Conditions: + * port locked. + */ + +void +ipc_kmsg_set_prealloc( + ipc_kmsg_t kmsg, + ipc_port_t port) +{ + assert(kmsg->ikm_prealloc == IP_NULL); + + kmsg->ikm_prealloc = IP_NULL; + IP_SET_PREALLOC(port, kmsg); +} + +/* + * Routine: ipc_kmsg_clear_prealloc + * Purpose: + * Release the Assignment of a preallocated message buffer from a port. + * Conditions: + * port locked. + */ +void +ipc_kmsg_clear_prealloc( + ipc_kmsg_t kmsg, + ipc_port_t port) +{ + assert(kmsg->ikm_prealloc == port); + + kmsg->ikm_prealloc = IP_NULL; + IP_CLEAR_PREALLOC(port, kmsg); +} + +/* + * Routine: ipc_kmsg_get + * Purpose: + * Allocates a kernel message buffer. + * Copies a user message to the message buffer. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Acquired a message buffer. + * MACH_SEND_MSG_TOO_SMALL Message smaller than a header. + * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple. + * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer. + * MACH_SEND_INVALID_DATA Couldn't copy message data. + */ + +mach_msg_return_t +ipc_kmsg_get( + mach_msg_header_t *msg, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp) +{ + mach_msg_size_t msg_and_trailer_size; + ipc_kmsg_t kmsg; + mach_msg_format_0_trailer_t *trailer; + mach_port_name_t dest_name; + ipc_entry_t dest_entry; + ipc_port_t dest_port; + + if ((size < sizeof(mach_msg_header_t)) || (size & 3)) + return MACH_SEND_MSG_TOO_SMALL; + + msg_and_trailer_size = size + MAX_TRAILER_SIZE; + + kmsg = ipc_kmsg_alloc(msg_and_trailer_size); + + if (kmsg == IKM_NULL) + return MACH_SEND_NO_BUFFER; + + if (copyinmsg((char *) msg, (char *) &kmsg->ikm_header, size)) { + ipc_kmsg_free(kmsg); + return MACH_SEND_INVALID_DATA; + } + + kmsg->ikm_header.msgh_size = size; + + /* + * I reserve for the trailer the largest space (MAX_TRAILER_SIZE) + * However, the internal size field of the trailer (msgh_trailer_size) + * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize + * the cases where no implicit data is requested. + */ + trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t)&kmsg->ikm_header + size); + trailer->msgh_sender = current_thread()->top_act->task->sec_token; + trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; + + *kmsgp = kmsg; + return MACH_MSG_SUCCESS; +} + +/* + * Routine: ipc_kmsg_get_from_kernel + * Purpose: + * Allocates a kernel message buffer. + * Copies a kernel message to the message buffer. + * Only resource errors are allowed. + * Conditions: + * Nothing locked. + * Ports in header are ipc_port_t. + * Returns: + * MACH_MSG_SUCCESS Acquired a message buffer. + * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer. + */ + +mach_msg_return_t +ipc_kmsg_get_from_kernel( + mach_msg_header_t *msg, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp) +{ + ipc_kmsg_t kmsg; + mach_msg_size_t msg_and_trailer_size; + mach_msg_format_0_trailer_t *trailer; + ipc_port_t dest_port; + + assert(size >= sizeof(mach_msg_header_t)); + assert((size & 3) == 0); + + assert(IP_VALID((ipc_port_t) msg->msgh_remote_port)); + dest_port = (ipc_port_t)msg->msgh_remote_port; + + msg_and_trailer_size = size + MAX_TRAILER_SIZE; + + /* + * See if the port has a pre-allocated kmsg for kernel + * clients. These are set up for those kernel clients + * which cannot afford to wait. + */ + if (IP_PREALLOC(dest_port)) { + ip_lock(dest_port); + if (!ip_active(dest_port)) { + ip_unlock(dest_port); + return MACH_SEND_NO_BUFFER; + } + assert(IP_PREALLOC(dest_port)); + kmsg = dest_port->ip_premsg; + if (msg_and_trailer_size > kmsg->ikm_size) { + ip_unlock(dest_port); + return MACH_SEND_TOO_LARGE; + } + if (ikm_prealloc_inuse(kmsg)) { + ip_unlock(dest_port); + return MACH_SEND_NO_BUFFER; + } + ikm_prealloc_set_inuse(kmsg, dest_port); + ip_unlock(dest_port); + } else { + kmsg = ipc_kmsg_alloc(msg_and_trailer_size); + if (kmsg == IKM_NULL) + return MACH_SEND_NO_BUFFER; + } + + (void) memcpy((void *) &kmsg->ikm_header, (const void *) msg, size); + + kmsg->ikm_header.msgh_size = size; + + /* + * I reserve for the trailer the largest space (MAX_TRAILER_SIZE) + * However, the internal size field of the trailer (msgh_trailer_size) + * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to + * optimize the cases where no implicit data is requested. + */ + trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)&kmsg->ikm_header + size); + trailer->msgh_sender = KERNEL_SECURITY_TOKEN; + trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; + + *kmsgp = kmsg; + return MACH_MSG_SUCCESS; +} + +/* + * Routine: ipc_kmsg_send + * Purpose: + * Send a message. The message holds a reference + * for the destination port in the msgh_remote_port field. + * + * If unsuccessful, the caller still has possession of + * the message and must do something with it. If successful, + * the message is queued, given to a receiver, destroyed, + * or handled directly by the kernel via mach_msg. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS The message was accepted. + * MACH_SEND_TIMED_OUT Caller still has message. + * MACH_SEND_INTERRUPTED Caller still has message. + */ +mach_msg_return_t +ipc_kmsg_send( + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_msg_timeout_t timeout) +{ + kern_return_t save_wait_result; + + ipc_port_t port; + port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port; + assert(IP_VALID(port)); + + ip_lock(port); + + if (port->ip_receiver == ipc_space_kernel) { + + /* + * We can check ip_receiver == ipc_space_kernel + * before checking that the port is active because + * ipc_port_dealloc_kernel clears ip_receiver + * before destroying a kernel port. + */ + assert(ip_active(port)); + port->ip_messages.imq_seqno++; + ip_unlock(port); + + current_task()->messages_sent++; + + /* + * Call the server routine, and get the reply message to send. + */ + kmsg = ipc_kobject_server(kmsg); + if (kmsg == IKM_NULL) + return MACH_MSG_SUCCESS; + + port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port; + assert(IP_VALID(port)); + ip_lock(port); + /* fall thru with reply - same options */ + } + + /* + * Can't deliver to a dead port. + * However, we can pretend it got sent + * and was then immediately destroyed. + */ + if (!ip_active(port)) { + /* + * We can't let ipc_kmsg_destroy deallocate + * the port right, because we might end up + * in an infinite loop trying to deliver + * a send-once notification. + */ + + ip_release(port); + ip_check_unlock(port); + kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL; + ipc_kmsg_destroy(kmsg); + return MACH_MSG_SUCCESS; + } + + if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) { + ip_unlock(port); + + /* don't allow the creation of a circular loop */ + + ipc_kmsg_destroy(kmsg); + return MACH_MSG_SUCCESS; + } + + /* + * We have a valid message and a valid reference on the port. + * we can unlock the port and call mqueue_send() on it's message + * queue. + */ + ip_unlock(port); + return (ipc_mqueue_send(&port->ip_messages, kmsg, option, timeout)); +} + +/* + * Routine: ipc_kmsg_put + * Purpose: + * Copies a message buffer to a user message. + * Copies only the specified number of bytes. + * Frees the message buffer. + * Conditions: + * Nothing locked. The message buffer must have clean + * header fields. + * Returns: + * MACH_MSG_SUCCESS Copied data out of message buffer. + * MACH_RCV_INVALID_DATA Couldn't copy to user message. + */ + +mach_msg_return_t +ipc_kmsg_put( + mach_msg_header_t *msg, + ipc_kmsg_t kmsg, + mach_msg_size_t size) +{ + mach_msg_return_t mr; + + if (copyoutmsg((const char *) &kmsg->ikm_header, (char *) msg, size)) + mr = MACH_RCV_INVALID_DATA; + else + mr = MACH_MSG_SUCCESS; + + ipc_kmsg_free(kmsg); + return mr; +} + +/* + * Routine: ipc_kmsg_put_to_kernel + * Purpose: + * Copies a message buffer to a kernel message. + * Frees the message buffer. + * No errors allowed. + * Conditions: + * Nothing locked. + */ + +void +ipc_kmsg_put_to_kernel( + mach_msg_header_t *msg, + ipc_kmsg_t kmsg, + mach_msg_size_t size) +{ + (void) memcpy((void *) msg, (const void *) &kmsg->ikm_header, size); + + ipc_kmsg_free(kmsg); +} + +/* + * Routine: ipc_kmsg_copyin_header + * Purpose: + * "Copy-in" port rights in the header of a message. + * Operates atomically; if it doesn't succeed the + * message header and the space are left untouched. + * If it does succeed the remote/local port fields + * contain object pointers instead of port names, + * and the bits field is updated. The destination port + * will be a valid port pointer. + * + * The notify argument implements the MACH_SEND_CANCEL option. + * If it is not MACH_PORT_NULL, it should name a receive right. + * If the processing of the destination port would generate + * a port-deleted notification (because the right for the + * destination port is destroyed and it had a request for + * a dead-name notification registered), and the port-deleted + * notification would be sent to the named receive right, + * then it isn't sent and the send-once right for the notify + * port is quietly destroyed. + * + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Successful copyin. + * MACH_SEND_INVALID_HEADER + * Illegal value in the message header bits. + * MACH_SEND_INVALID_DEST The space is dead. + * MACH_SEND_INVALID_NOTIFY + * Notify is non-null and doesn't name a receive right. + * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.) + * MACH_SEND_INVALID_DEST Can't copyin destination port. + * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.) + * MACH_SEND_INVALID_REPLY Can't copyin reply port. + * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.) + */ + +mach_msg_return_t +ipc_kmsg_copyin_header( + mach_msg_header_t *msg, + ipc_space_t space, + mach_port_name_t notify) +{ + mach_msg_bits_t mbits = msg->msgh_bits & MACH_MSGH_BITS_USER; + mach_port_name_t dest_name = (mach_port_name_t)msg->msgh_remote_port; + mach_port_name_t reply_name = (mach_port_name_t)msg->msgh_local_port; + kern_return_t kr; + + mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits); + mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits); + ipc_object_t dest_port, reply_port; + ipc_port_t dest_soright, reply_soright; + ipc_port_t notify_port; + + if ((mbits != msg->msgh_bits) || + (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type)) || + ((reply_type == 0) ? + (reply_name != MACH_PORT_NULL) : + !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))) + return MACH_SEND_INVALID_HEADER; + + reply_soright = IP_NULL; /* in case we go to invalid dest early */ + + is_write_lock(space); + if (!space->is_active) + goto invalid_dest; + + if (!MACH_PORT_VALID(dest_name)) + goto invalid_dest; + + if (notify != MACH_PORT_NULL) { + ipc_entry_t entry; + + if ((entry = ipc_entry_lookup(space, notify)) == IE_NULL) { + is_write_unlock(space); + return MACH_SEND_INVALID_NOTIFY; + } + if((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) { + is_write_unlock(space); + return MACH_SEND_INVALID_NOTIFY; + } + + notify_port = (ipc_port_t) entry->ie_object; + } + + if (dest_name == reply_name) { + ipc_entry_t entry; + mach_port_name_t name = dest_name; + + /* + * Destination and reply ports are the same! + * This is a little tedious to make atomic, because + * there are 25 combinations of dest_type/reply_type. + * However, most are easy. If either is move-sonce, + * then there must be an error. If either are + * make-send or make-sonce, then we must be looking + * at a receive right so the port can't die. + * The hard cases are the combinations of + * copy-send and make-send. + */ + + entry = ipc_entry_lookup(space, name); + if (entry == IE_NULL) + goto invalid_dest; + + assert(reply_type != 0); /* because name not null */ + + if (!ipc_right_copyin_check(space, name, entry, reply_type)) + goto invalid_reply; + + if ((dest_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) || + (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE)) { + /* + * Why must there be an error? To get a valid + * destination, this entry must name a live + * port (not a dead name or dead port). However + * a successful move-sonce will destroy a + * live entry. Therefore the other copyin, + * whatever it is, would fail. We've already + * checked for reply port errors above, + * so report a destination error. + */ + + goto invalid_dest; + } else if ((dest_type == MACH_MSG_TYPE_MAKE_SEND) || + (dest_type == MACH_MSG_TYPE_MAKE_SEND_ONCE) || + (reply_type == MACH_MSG_TYPE_MAKE_SEND) || + (reply_type == MACH_MSG_TYPE_MAKE_SEND_ONCE)) { + kr = ipc_right_copyin(space, name, entry, + dest_type, FALSE, + &dest_port, &dest_soright); + if (kr != KERN_SUCCESS) + goto invalid_dest; + + /* + * Either dest or reply needs a receive right. + * We know the receive right is there, because + * of the copyin_check and copyin calls. Hence + * the port is not in danger of dying. If dest + * used the receive right, then the right needed + * by reply (and verified by copyin_check) will + * still be there. + */ + + assert(IO_VALID(dest_port)); + assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE); + assert(dest_soright == IP_NULL); + + kr = ipc_right_copyin(space, name, entry, + reply_type, TRUE, + &reply_port, &reply_soright); + + assert(kr == KERN_SUCCESS); + assert(reply_port == dest_port); + assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE); + assert(reply_soright == IP_NULL); + } else if ((dest_type == MACH_MSG_TYPE_COPY_SEND) && + (reply_type == MACH_MSG_TYPE_COPY_SEND)) { + /* + * To make this atomic, just do one copy-send, + * and dup the send right we get out. + */ + + kr = ipc_right_copyin(space, name, entry, + dest_type, FALSE, + &dest_port, &dest_soright); + if (kr != KERN_SUCCESS) + goto invalid_dest; + + assert(entry->ie_bits & MACH_PORT_TYPE_SEND); + assert(dest_soright == IP_NULL); + + /* + * It's OK if the port we got is dead now, + * so reply_port is IP_DEAD, because the msg + * won't go anywhere anyway. + */ + + reply_port = (ipc_object_t) + ipc_port_copy_send((ipc_port_t) dest_port); + reply_soright = IP_NULL; + } else if ((dest_type == MACH_MSG_TYPE_MOVE_SEND) && + (reply_type == MACH_MSG_TYPE_MOVE_SEND)) { + /* + * This is an easy case. Just use our + * handy-dandy special-purpose copyin call + * to get two send rights for the price of one. + */ + + kr = ipc_right_copyin_two(space, name, entry, + &dest_port, &dest_soright); + if (kr != KERN_SUCCESS) + goto invalid_dest; + + /* the entry might need to be deallocated */ + if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + ipc_entry_dealloc(space, name, entry); + + reply_port = dest_port; + reply_soright = IP_NULL; + } else { + ipc_port_t soright; + + assert(((dest_type == MACH_MSG_TYPE_COPY_SEND) && + (reply_type == MACH_MSG_TYPE_MOVE_SEND)) || + ((dest_type == MACH_MSG_TYPE_MOVE_SEND) && + (reply_type == MACH_MSG_TYPE_COPY_SEND))); + + /* + * To make this atomic, just do a move-send, + * and dup the send right we get out. + */ + + kr = ipc_right_copyin(space, name, entry, + MACH_MSG_TYPE_MOVE_SEND, FALSE, + &dest_port, &soright); + if (kr != KERN_SUCCESS) + goto invalid_dest; + + /* the entry might need to be deallocated */ + + if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + ipc_entry_dealloc(space, name, entry); + + /* + * It's OK if the port we got is dead now, + * so reply_port is IP_DEAD, because the msg + * won't go anywhere anyway. + */ + + reply_port = (ipc_object_t) + ipc_port_copy_send((ipc_port_t) dest_port); + + if (dest_type == MACH_MSG_TYPE_MOVE_SEND) { + dest_soright = soright; + reply_soright = IP_NULL; + } else { + dest_soright = IP_NULL; + reply_soright = soright; + } + } + } else if (!MACH_PORT_VALID(reply_name)) { + ipc_entry_t entry; + + /* + * No reply port! This is an easy case + * to make atomic. Just copyin the destination. + */ + + entry = ipc_entry_lookup(space, dest_name); + if (entry == IE_NULL) + goto invalid_dest; + + kr = ipc_right_copyin(space, dest_name, entry, + dest_type, FALSE, + &dest_port, &dest_soright); + if (kr != KERN_SUCCESS) + goto invalid_dest; + + /* the entry might need to be deallocated */ + + if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + ipc_entry_dealloc(space, dest_name, entry); + + reply_port = (ipc_object_t) reply_name; + reply_soright = IP_NULL; + } else { + ipc_entry_t dest_entry, reply_entry; + ipc_port_t saved_reply; + + /* + * This is the tough case to make atomic. + * The difficult problem is serializing with port death. + * At the time we copyin dest_port, it must be alive. + * If reply_port is alive when we copyin it, then + * we are OK, because we serialize before the death + * of both ports. Assume reply_port is dead at copyin. + * Then if dest_port dies/died after reply_port died, + * we are OK, because we serialize between the death + * of the two ports. So the bad case is when dest_port + * dies after its copyin, reply_port dies before its + * copyin, and dest_port dies before reply_port. Then + * the copyins operated as if dest_port was alive + * and reply_port was dead, which shouldn't have happened + * because they died in the other order. + * + * Note that it is easy for a user task to tell if + * a copyin happened before or after a port died. + * For example, suppose both dest and reply are + * send-once rights (types are both move-sonce) and + * both rights have dead-name requests registered. + * If a port dies before copyin, a dead-name notification + * is generated and the dead name's urefs are incremented, + * and if the copyin happens first, a port-deleted + * notification is generated. + * + * Note that although the entries are different, + * dest_port and reply_port might still be the same. + * + * JMM - The code to handle this was too expensive and, anyway, + * we intend to separate the dest lookup from the reply copyin + * by a wide margin, so the user will have to learn to deal! + * I will be making the change soon! + */ + + dest_entry = ipc_entry_lookup(space, dest_name); + if (dest_entry == IE_NULL) + goto invalid_dest; + + reply_entry = ipc_entry_lookup(space, reply_name); + if (reply_entry == IE_NULL) + goto invalid_reply; + + assert(dest_entry != reply_entry); /* names are not equal */ + assert(reply_type != 0); /* because reply_name not null */ + + if (!ipc_right_copyin_check(space, reply_name, reply_entry, + reply_type)) + goto invalid_reply; + + kr = ipc_right_copyin(space, dest_name, dest_entry, + dest_type, FALSE, + &dest_port, &dest_soright); + if (kr != KERN_SUCCESS) + goto invalid_dest; + + assert(IO_VALID(dest_port)); + + kr = ipc_right_copyin(space, reply_name, reply_entry, + reply_type, TRUE, + &reply_port, &reply_soright); + + assert(kr == KERN_SUCCESS); + + /* the entries might need to be deallocated */ + + if (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE) + ipc_entry_dealloc(space, reply_name, reply_entry); + + if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE) + ipc_entry_dealloc(space, dest_name, dest_entry); + } + + /* + * At this point, dest_port, reply_port, + * dest_soright, reply_soright are all initialized. + * Any defunct entries have been deallocated. + * The space is still write-locked, and we need to + * make the MACH_SEND_CANCEL check. The notify_port pointer + * is still usable, because the copyin code above won't ever + * deallocate a receive right, so its entry still exists + * and holds a ref. Note notify_port might even equal + * dest_port or reply_port. + */ + + if ((notify != MACH_PORT_NULL) && + (dest_soright == notify_port)) { + ipc_port_release_sonce(dest_soright); + dest_soright = IP_NULL; + } + + is_write_unlock(space); + + if (dest_soright != IP_NULL) + ipc_notify_port_deleted(dest_soright, dest_name); + + if (reply_soright != IP_NULL) + ipc_notify_port_deleted(reply_soright, reply_name); + + dest_type = ipc_object_copyin_type(dest_type); + reply_type = ipc_object_copyin_type(reply_type); + + msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) | + MACH_MSGH_BITS(dest_type, reply_type)); + msg->msgh_remote_port = (ipc_port_t)dest_port; + msg->msgh_local_port = (ipc_port_t)reply_port; + + return MACH_MSG_SUCCESS; + +invalid_reply: + is_write_unlock(space); + return MACH_SEND_INVALID_REPLY; + +invalid_dest: + is_write_unlock(space); + if (reply_soright != IP_NULL) + ipc_notify_port_deleted(reply_soright, reply_name); + return MACH_SEND_INVALID_DEST; +} + +/* + * Routine: ipc_kmsg_copyin_body + * Purpose: + * "Copy-in" port rights and out-of-line memory + * in the message body. + * + * In all failure cases, the message is left holding + * no rights or memory. However, the message buffer + * is not deallocated. If successful, the message + * contains a valid destination port. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Successful copyin. + * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory. + * MACH_SEND_INVALID_RIGHT Can't copyin port right in body. + * MACH_SEND_INVALID_TYPE Bad type specification. + * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data. + * MACH_SEND_INVALID_RT_OOL_SIZE OOL Buffer too large for RT + * MACH_MSG_INVALID_RT_DESCRIPTOR Dealloc and RT are incompatible + */ + +mach_msg_return_t +ipc_kmsg_copyin_body( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map) +{ + ipc_object_t dest; + mach_msg_body_t *body; + mach_msg_descriptor_t *saddr, *eaddr; + boolean_t complex; + mach_msg_return_t mr; + boolean_t use_page_lists, steal_pages; + int i; + kern_return_t kr; + vm_size_t space_needed = 0; + vm_offset_t paddr = 0; + mach_msg_descriptor_t *sstart; + vm_map_copy_t copy = VM_MAP_COPY_NULL; + + /* + * Determine if the target is a kernel port. + */ + dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port; + complex = FALSE; + use_page_lists = ipc_kobject_vm_page_list(ip_kotype((ipc_port_t)dest)); + steal_pages = ipc_kobject_vm_page_steal(ip_kotype((ipc_port_t)dest)); + + body = (mach_msg_body_t *) (&kmsg->ikm_header + 1); + saddr = (mach_msg_descriptor_t *) (body + 1); + eaddr = saddr + body->msgh_descriptor_count; + + /* make sure the message does not ask for more msg descriptors + * than the message can hold. + */ + + if (eaddr <= saddr || + eaddr > (mach_msg_descriptor_t *) (&kmsg->ikm_header + + kmsg->ikm_header.msgh_size)) { + ipc_kmsg_clean_partial(kmsg,0,0,0); + return MACH_SEND_MSG_TOO_SMALL; + } + + /* + * Make an initial pass to determine kernal VM space requirements for + * physical copies. + */ + for (sstart = saddr; sstart < eaddr; sstart++) { + + if (sstart->type.type == MACH_MSG_OOL_DESCRIPTOR || + sstart->type.type == MACH_MSG_OOL_VOLATILE_DESCRIPTOR) { + + assert(!(sstart->out_of_line.copy == MACH_MSG_PHYSICAL_COPY && + (use_page_lists || steal_pages))); + + if (sstart->out_of_line.copy != MACH_MSG_PHYSICAL_COPY && + sstart->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) { + /* + * Invalid copy option + */ + ipc_kmsg_clean_partial(kmsg,0,0,0); + return MACH_SEND_INVALID_TYPE; + } + + if ((sstart->out_of_line.size >= MSG_OOL_SIZE_SMALL) && + (sstart->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) && + !(sstart->out_of_line.deallocate)) { + + /* + * Out-of-line memory descriptor, accumulate kernel + * memory requirements + */ + space_needed += round_page(sstart->out_of_line.size); + if (space_needed > ipc_kmsg_max_vm_space) { + + /* + * Per message kernel memory limit exceeded + */ + ipc_kmsg_clean_partial(kmsg,0,0,0); + return MACH_MSG_VM_KERNEL; + } + } + } + } + + /* + * Allocate space in the pageable kernel ipc copy map for all the + * ool data that is to be physically copied. Map is marked wait for + * space. + */ + if (space_needed) { + if (vm_allocate(ipc_kernel_copy_map, &paddr, space_needed, TRUE) != + KERN_SUCCESS) { + ipc_kmsg_clean_partial(kmsg,0,0,0); + return MACH_MSG_VM_KERNEL; + } + } + + /* + * handle the OOL regions and port descriptors. + * the check for complex messages was done earlier. + */ + + for (i = 0, sstart = saddr; sstart < eaddr; sstart++) { + + switch (sstart->type.type) { + + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_type_name_t name; + ipc_object_t object; + mach_msg_port_descriptor_t *dsc; + + dsc = &sstart->port; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = dsc->disposition; + dsc->disposition = ipc_object_copyin_type(name); + + if (!MACH_PORT_VALID((mach_port_name_t)dsc->name)) { + complex = TRUE; + break; + } + kr = ipc_object_copyin(space, (mach_port_name_t)dsc->name, name, &object); + if (kr != KERN_SUCCESS) { + ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed); + return MACH_SEND_INVALID_RIGHT; + } + if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity((ipc_port_t) object, + (ipc_port_t) dest)) { + kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + } + dsc->name = (ipc_port_t) object; + complex = TRUE; + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + vm_size_t length; + boolean_t dealloc; + vm_offset_t addr; + vm_offset_t kaddr; + mach_msg_ool_descriptor_t *dsc; + + dsc = &sstart->out_of_line; + dealloc = dsc->deallocate; + addr = (vm_offset_t) dsc->address; + + length = dsc->size; + + if (length == 0) { + dsc->address = 0; + } else if (use_page_lists) { + int options; + + /* + * Use page list copy mechanism if specified. + */ + if (steal_pages == FALSE) { + /* + * XXX Temporary Hackaround. + * XXX Because the same page + * XXX might be in more than one + * XXX out of line region, steal + * XXX (busy) pages from previous + * XXX region so that this copyin + * XXX won't block (permanently). + */ + if (copy != VM_MAP_COPY_NULL) + vm_map_copy_steal_pages(copy); + } + + /* + * Set up options for copying in page list. + * If deallocating, steal pages to prevent + * vm code from lazy evaluating deallocation. + */ + options = VM_PROT_READ; + if (dealloc) { + options |= VM_MAP_COPYIN_OPT_SRC_DESTROY | + VM_MAP_COPYIN_OPT_STEAL_PAGES; + } + else if (steal_pages) { + options |= VM_MAP_COPYIN_OPT_STEAL_PAGES; + } + + if (vm_map_copyin_page_list(map, addr, length, options, + ©, FALSE) + != KERN_SUCCESS) { + + ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed); + return MACH_SEND_INVALID_MEMORY; + } + + dsc->address = (void *) copy; + dsc->copy = MACH_MSG_PAGE_LIST_COPY_T; + + } else if ((length >= MSG_OOL_SIZE_SMALL) && + (dsc->copy == MACH_MSG_PHYSICAL_COPY) && !dealloc) { + + /* + * If the request is a physical copy and the source + * is not being deallocated, then allocate space + * in the kernel's pageable ipc copy map and copy + * the data in. The semantics guarantee that the + * data will have been physically copied before + * the send operation terminates. Thus if the data + * is not being deallocated, we must be prepared + * to page if the region is sufficiently large. + */ + if (copyin((const char *) addr, (char *) paddr, + length)) { + ipc_kmsg_clean_partial(kmsg, i, paddr, + space_needed); + return MACH_SEND_INVALID_MEMORY; + } + + /* + * The kernel ipc copy map is marked no_zero_fill. + * If the transfer is not a page multiple, we need + * to zero fill the balance. + */ + if (!page_aligned(length)) { + (void) memset((void *) (paddr + length), 0, + round_page(length) - length); + } + if (vm_map_copyin(ipc_kernel_copy_map, paddr, length, + TRUE, ©) != KERN_SUCCESS) { + ipc_kmsg_clean_partial(kmsg, i, paddr, + space_needed); + return MACH_MSG_VM_KERNEL; + } + dsc->address = (void *) copy; + paddr += round_page(length); + space_needed -= round_page(length); + } else { + + /* + * Make a vm_map_copy_t of the of the data. If the + * data is small, this will do an optimized physical + * copy. Otherwise, it will do a virtual copy. + * + * NOTE: A virtual copy is OK if the original is being + * deallocted, even if a physical copy was requested. + */ + kr = vm_map_copyin(map, addr, length, dealloc, ©); + if (kr != KERN_SUCCESS) { + ipc_kmsg_clean_partial(kmsg,i,paddr,space_needed); + return (kr == KERN_RESOURCE_SHORTAGE) ? + MACH_MSG_VM_KERNEL : + MACH_SEND_INVALID_MEMORY; + } + dsc->address = (void *) copy; + } + complex = TRUE; + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + vm_size_t length; + vm_offset_t data; + vm_offset_t addr; + ipc_object_t *objects; + int j; + mach_msg_type_name_t name; + mach_msg_ool_ports_descriptor_t *dsc; + + dsc = &sstart->ool_ports; + addr = (vm_offset_t) dsc->address; + + /* calculate length of data in bytes, rounding up */ + length = dsc->count * sizeof(mach_port_name_t); + + if (length == 0) { + complex = TRUE; + dsc->address = (void *) 0; + break; + } + + data = kalloc(length); + + if (data == 0) { + ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed); + return MACH_SEND_NO_BUFFER; + } + + if (copyinmap(map, addr, data, length)) { + kfree(data, length); + ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed); + return MACH_SEND_INVALID_MEMORY; + } + + if (dsc->deallocate) { + (void) vm_deallocate(map, addr, length); + } + + dsc->address = (void *) data; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = dsc->disposition; + dsc->disposition = ipc_object_copyin_type(name); + + objects = (ipc_object_t *) data; + + for ( j = 0; j < dsc->count; j++) { + mach_port_name_t port = (mach_port_name_t) objects[j]; + ipc_object_t object; + + if (!MACH_PORT_VALID(port)) + continue; + + kr = ipc_object_copyin(space, port, name, &object); + + if (kr != KERN_SUCCESS) { + int k; + + for(k = 0; k < j; k++) { + object = objects[k]; + if (!MACH_PORT_VALID(port)) + continue; + ipc_object_destroy(object, dsc->disposition); + } + kfree(data, length); + ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed); + return MACH_SEND_INVALID_RIGHT; + } + + if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity( + (ipc_port_t) object, + (ipc_port_t) dest)) + kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + + objects[j] = object; + } + + complex = TRUE; + break; + } + default: { + /* + * Invalid descriptor + */ + ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed); + return MACH_SEND_INVALID_TYPE; + } + } + i++ ; + } + + if (!complex) + kmsg->ikm_header.msgh_bits &= ~MACH_MSGH_BITS_COMPLEX; + return MACH_MSG_SUCCESS; +} + + +/* + * Routine: ipc_kmsg_copyin + * Purpose: + * "Copy-in" port rights and out-of-line memory + * in the message. + * + * In all failure cases, the message is left holding + * no rights or memory. However, the message buffer + * is not deallocated. If successful, the message + * contains a valid destination port. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Successful copyin. + * MACH_SEND_INVALID_HEADER + * Illegal value in the message header bits. + * MACH_SEND_INVALID_NOTIFY Bad notify port. + * MACH_SEND_INVALID_DEST Can't copyin destination port. + * MACH_SEND_INVALID_REPLY Can't copyin reply port. + * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory. + * MACH_SEND_INVALID_RIGHT Can't copyin port right in body. + * MACH_SEND_INVALID_TYPE Bad type specification. + * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data. + */ + +mach_msg_return_t +ipc_kmsg_copyin( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_port_name_t notify) +{ + mach_msg_return_t mr; + + mr = ipc_kmsg_copyin_header(&kmsg->ikm_header, space, notify); + if (mr != MACH_MSG_SUCCESS) + return mr; + + if ((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) + return MACH_MSG_SUCCESS; + + return( ipc_kmsg_copyin_body( kmsg, space, map) ); +} + +/* + * Routine: ipc_kmsg_copyin_from_kernel + * Purpose: + * "Copy-in" port rights and out-of-line memory + * in a message sent from the kernel. + * + * Because the message comes from the kernel, + * the implementation assumes there are no errors + * or peculiarities in the message. + * + * Returns TRUE if queueing the message + * would result in a circularity. + * Conditions: + * Nothing locked. + */ + +void +ipc_kmsg_copyin_from_kernel( + ipc_kmsg_t kmsg) +{ + mach_msg_bits_t bits = kmsg->ikm_header.msgh_bits; + mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits); + mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits); + ipc_object_t remote = (ipc_object_t) kmsg->ikm_header.msgh_remote_port; + ipc_object_t local = (ipc_object_t) kmsg->ikm_header.msgh_local_port; + + /* translate the destination and reply ports */ + + ipc_object_copyin_from_kernel(remote, rname); + if (IO_VALID(local)) + ipc_object_copyin_from_kernel(local, lname); + + /* + * The common case is a complex message with no reply port, + * because that is what the memory_object interface uses. + */ + + if (bits == (MACH_MSGH_BITS_COMPLEX | + MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) { + bits = (MACH_MSGH_BITS_COMPLEX | + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0)); + + kmsg->ikm_header.msgh_bits = bits; + } else { + bits = (MACH_MSGH_BITS_OTHER(bits) | + MACH_MSGH_BITS(ipc_object_copyin_type(rname), + ipc_object_copyin_type(lname))); + + kmsg->ikm_header.msgh_bits = bits; + if ((bits & MACH_MSGH_BITS_COMPLEX) == 0) + return; + } + { + mach_msg_descriptor_t *saddr, *eaddr; + mach_msg_body_t *body; + + body = (mach_msg_body_t *) (&kmsg->ikm_header + 1); + saddr = (mach_msg_descriptor_t *) (body + 1); + eaddr = (mach_msg_descriptor_t *) saddr + body->msgh_descriptor_count; + + for ( ; saddr < eaddr; saddr++) { + + switch (saddr->type.type) { + + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_type_name_t name; + ipc_object_t object; + mach_msg_port_descriptor_t *dsc; + + dsc = &saddr->port; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = dsc->disposition; + object = (ipc_object_t) dsc->name; + dsc->disposition = ipc_object_copyin_type(name); + + if (!IO_VALID(object)) { + break; + } + + ipc_object_copyin_from_kernel(object, name); + + /* CDY avoid circularity when the destination is also */ + /* the kernel. This check should be changed into an */ + /* assert when the new kobject model is in place since*/ + /* ports will not be used in kernel to kernel chats */ + + if (((ipc_port_t)remote)->ip_receiver != ipc_space_kernel) { + if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity((ipc_port_t) object, + (ipc_port_t) remote)) { + kmsg->ikm_header.msgh_bits |= + MACH_MSGH_BITS_CIRCULAR; + } + } + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + /* + * The sender should supply ready-made memory, i.e. + * a vm_map_copy_t, so we don't need to do anything. + */ + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + ipc_object_t *objects; + int j; + mach_msg_type_name_t name; + mach_msg_ool_ports_descriptor_t *dsc; + + dsc = &saddr->ool_ports; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = dsc->disposition; + dsc->disposition = ipc_object_copyin_type(name); + + objects = (ipc_object_t *) dsc->address; + + for ( j = 0; j < dsc->count; j++) { + ipc_object_t object = objects[j]; + + if (!IO_VALID(object)) + continue; + + ipc_object_copyin_from_kernel(object, name); + + if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity( + (ipc_port_t) object, + (ipc_port_t) remote)) + kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + } + break; + } + default: { +#if MACH_ASSERT + panic("ipc_kmsg_copyin_from_kernel: bad descriptor"); +#endif /* MACH_ASSERT */ + } + } + } + } +} + +/* + * Routine: ipc_kmsg_copyout_header + * Purpose: + * "Copy-out" port rights in the header of a message. + * Operates atomically; if it doesn't succeed the + * message header and the space are left untouched. + * If it does succeed the remote/local port fields + * contain port names instead of object pointers, + * and the bits field is updated. + * + * The notify argument implements the MACH_RCV_NOTIFY option. + * If it is not MACH_PORT_NULL, it should name a receive right. + * If the process of receiving the reply port creates a + * new right in the receiving task, then the new right is + * automatically registered for a dead-name notification, + * with the notify port supplying the send-once right. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Copied out port rights. + * MACH_RCV_INVALID_NOTIFY + * Notify is non-null and doesn't name a receive right. + * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.) + * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE + * The space is dead. + * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE + * No room in space for another name. + * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL + * Couldn't allocate memory for the reply port. + * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL + * Couldn't allocate memory for the dead-name request. + */ + +mach_msg_return_t +ipc_kmsg_copyout_header( + mach_msg_header_t *msg, + ipc_space_t space, + mach_port_name_t notify) +{ + mach_msg_bits_t mbits = msg->msgh_bits; + ipc_port_t dest = (ipc_port_t) msg->msgh_remote_port; + + assert(IP_VALID(dest)); + + { + mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits); + mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits); + ipc_port_t reply = (ipc_port_t) msg->msgh_local_port; + mach_port_name_t dest_name, reply_name; + + if (IP_VALID(reply)) { + ipc_port_t notify_port; + ipc_entry_t entry; + kern_return_t kr; + + /* + * Handling notify (for MACH_RCV_NOTIFY) is tricky. + * The problem is atomically making a send-once right + * from the notify port and installing it for a + * dead-name request in the new entry, because this + * requires two port locks (on the notify port and + * the reply port). However, we can safely make + * and consume send-once rights for the notify port + * as long as we hold the space locked. This isn't + * an atomicity problem, because the only way + * to detect that a send-once right has been created + * and then consumed if it wasn't needed is by getting + * at the receive right to look at ip_sorights, and + * because the space is write-locked status calls can't + * lookup the notify port receive right. When we make + * the send-once right, we lock the notify port, + * so any status calls in progress will be done. + */ + + is_write_lock(space); + + for (;;) { + ipc_port_request_index_t request; + + if (!space->is_active) { + is_write_unlock(space); + return (MACH_RCV_HEADER_ERROR| + MACH_MSG_IPC_SPACE); + } + + if (notify != MACH_PORT_NULL) { + notify_port = ipc_port_lookup_notify(space, + notify); + if (notify_port == IP_NULL) { + is_write_unlock(space); + return MACH_RCV_INVALID_NOTIFY; + } + } else + notify_port = IP_NULL; + + if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) && + ipc_right_reverse(space, (ipc_object_t) reply, + &reply_name, &entry)) { + /* reply port is locked and active */ + + /* + * We don't need the notify_port + * send-once right, but we can't release + * it here because reply port is locked. + * Wait until after the copyout to + * release the notify port right. + */ + + assert(entry->ie_bits & + MACH_PORT_TYPE_SEND_RECEIVE); + break; + } + + ip_lock(reply); + if (!ip_active(reply)) { + ip_release(reply); + ip_check_unlock(reply); + + if (notify_port != IP_NULL) + ipc_port_release_sonce(notify_port); + + ip_lock(dest); + is_write_unlock(space); + + reply = IP_DEAD; + reply_name = MACH_PORT_DEAD; + goto copyout_dest; + } + + reply_name = (mach_port_name_t)reply; + kr = ipc_entry_get(space, &reply_name, &entry); + if (kr != KERN_SUCCESS) { + ip_unlock(reply); + + if (notify_port != IP_NULL) + ipc_port_release_sonce(notify_port); + + /* space is locked */ + kr = ipc_entry_grow_table(space, + ITS_SIZE_NONE); + if (kr != KERN_SUCCESS) { + /* space is unlocked */ + + if (kr == KERN_RESOURCE_SHORTAGE) + return (MACH_RCV_HEADER_ERROR| + MACH_MSG_IPC_KERNEL); + else + return (MACH_RCV_HEADER_ERROR| + MACH_MSG_IPC_SPACE); + } + /* space is locked again; start over */ + + continue; + } + assert(IE_BITS_TYPE(entry->ie_bits) == + MACH_PORT_TYPE_NONE); + assert(entry->ie_object == IO_NULL); + + if (notify_port == IP_NULL) { + /* not making a dead-name request */ + + entry->ie_object = (ipc_object_t) reply; + break; + } + + kr = ipc_port_dnrequest(reply, reply_name, + notify_port, &request); + if (kr != KERN_SUCCESS) { + ip_unlock(reply); + + ipc_port_release_sonce(notify_port); + + ipc_entry_dealloc(space, reply_name, entry); + is_write_unlock(space); + + ip_lock(reply); + if (!ip_active(reply)) { + /* will fail next time around loop */ + + ip_unlock(reply); + is_write_lock(space); + continue; + } + + kr = ipc_port_dngrow(reply, ITS_SIZE_NONE); + /* port is unlocked */ + if (kr != KERN_SUCCESS) + return (MACH_RCV_HEADER_ERROR| + MACH_MSG_IPC_KERNEL); + + is_write_lock(space); + continue; + } + + notify_port = IP_NULL; /* don't release right below */ + + entry->ie_object = (ipc_object_t) reply; + entry->ie_request = request; + break; + } + + /* space and reply port are locked and active */ + + ip_reference(reply); /* hold onto the reply port */ + + kr = ipc_right_copyout(space, reply_name, entry, + reply_type, TRUE, (ipc_object_t) reply); + /* reply port is unlocked */ + assert(kr == KERN_SUCCESS); + + if (notify_port != IP_NULL) + ipc_port_release_sonce(notify_port); + + ip_lock(dest); + is_write_unlock(space); + } else { + /* + * No reply port! This is an easy case. + * We only need to have the space locked + * when checking notify and when locking + * the destination (to ensure atomicity). + */ + + is_read_lock(space); + if (!space->is_active) { + is_read_unlock(space); + return MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE; + } + + if (notify != MACH_PORT_NULL) { + ipc_entry_t entry; + + /* must check notify even though it won't be used */ + + if ((entry = ipc_entry_lookup(space, notify)) == IE_NULL) { + is_read_unlock(space); + return MACH_RCV_INVALID_NOTIFY; + } + + if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) { + is_read_unlock(space); + return MACH_RCV_INVALID_NOTIFY; + } + } + + ip_lock(dest); + is_read_unlock(space); + + reply_name = (mach_port_name_t) reply; + } + + /* + * At this point, the space is unlocked and the destination + * port is locked. (Lock taken while space was locked.) + * reply_name is taken care of; we still need dest_name. + * We still hold a ref for reply (if it is valid). + * + * If the space holds receive rights for the destination, + * we return its name for the right. Otherwise the task + * managed to destroy or give away the receive right between + * receiving the message and this copyout. If the destination + * is dead, return MACH_PORT_DEAD, and if the receive right + * exists somewhere else (another space, in transit) + * return MACH_PORT_NULL. + * + * Making this copyout operation atomic with the previous + * copyout of the reply port is a bit tricky. If there was + * no real reply port (it wasn't IP_VALID) then this isn't + * an issue. If the reply port was dead at copyout time, + * then we are OK, because if dest is dead we serialize + * after the death of both ports and if dest is alive + * we serialize after reply died but before dest's (later) death. + * So assume reply was alive when we copied it out. If dest + * is alive, then we are OK because we serialize before + * the ports' deaths. So assume dest is dead when we look at it. + * If reply dies/died after dest, then we are OK because + * we serialize after dest died but before reply dies. + * So the hard case is when reply is alive at copyout, + * dest is dead at copyout, and reply died before dest died. + * In this case pretend that dest is still alive, so + * we serialize while both ports are alive. + * + * Because the space lock is held across the copyout of reply + * and locking dest, the receive right for dest can't move + * in or out of the space while the copyouts happen, so + * that isn't an atomicity problem. In the last hard case + * above, this implies that when dest is dead that the + * space couldn't have had receive rights for dest at + * the time reply was copied-out, so when we pretend + * that dest is still alive, we can return MACH_PORT_NULL. + * + * If dest == reply, then we have to make it look like + * either both copyouts happened before the port died, + * or both happened after the port died. This special + * case works naturally if the timestamp comparison + * is done correctly. + */ + + copyout_dest: + + if (ip_active(dest)) { + ipc_object_copyout_dest(space, (ipc_object_t) dest, + dest_type, &dest_name); + /* dest is unlocked */ + } else { + ipc_port_timestamp_t timestamp; + + timestamp = dest->ip_timestamp; + ip_release(dest); + ip_check_unlock(dest); + + if (IP_VALID(reply)) { + ip_lock(reply); + if (ip_active(reply) || + IP_TIMESTAMP_ORDER(timestamp, + reply->ip_timestamp)) + dest_name = MACH_PORT_DEAD; + else + dest_name = MACH_PORT_NULL; + ip_unlock(reply); + } else + dest_name = MACH_PORT_DEAD; + } + + if (IP_VALID(reply)) + ipc_port_release(reply); + + msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) | + MACH_MSGH_BITS(reply_type, dest_type)); + msg->msgh_local_port = (ipc_port_t)dest_name; + msg->msgh_remote_port = (ipc_port_t)reply_name; + } + + return MACH_MSG_SUCCESS; +} + +/* + * Routine: ipc_kmsg_copyout_object + * Purpose: + * Copy-out a port right. Always returns a name, + * even for unsuccessful return codes. Always + * consumes the supplied object. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS The space acquired the right + * (name is valid) or the object is dead (MACH_PORT_DEAD). + * MACH_MSG_IPC_SPACE No room in space for the right, + * or the space is dead. (Name is MACH_PORT_NULL.) + * MACH_MSG_IPC_KERNEL Kernel resource shortage. + * (Name is MACH_PORT_NULL.) + */ + +mach_msg_return_t +ipc_kmsg_copyout_object( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep) +{ + kern_return_t kr; + + if (!IO_VALID(object)) { + *namep = (mach_port_name_t) object; + return MACH_MSG_SUCCESS; + } + + kr = ipc_object_copyout(space, object, msgt_name, TRUE, namep); + if (kr != KERN_SUCCESS) { + ipc_object_destroy(object, msgt_name); + + if (kr == KERN_INVALID_CAPABILITY) + *namep = MACH_PORT_DEAD; + else { + *namep = MACH_PORT_NULL; + + if (kr == KERN_RESOURCE_SHORTAGE) + return MACH_MSG_IPC_KERNEL; + else + return MACH_MSG_IPC_SPACE; + } + } + + return MACH_MSG_SUCCESS; +} + +/* + * Routine: ipc_kmsg_copyout_body + * Purpose: + * "Copy-out" port rights and out-of-line memory + * in the body of a message. + * + * The error codes are a combination of special bits. + * The copyout proceeds despite errors. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Successful copyout. + * MACH_MSG_IPC_SPACE No room for port right in name space. + * MACH_MSG_VM_SPACE No room for memory in address space. + * MACH_MSG_IPC_KERNEL Resource shortage handling port right. + * MACH_MSG_VM_KERNEL Resource shortage handling memory. + * MACH_MSG_INVALID_RT_DESCRIPTOR Descriptor incompatible with RT + */ + +mach_msg_return_t +ipc_kmsg_copyout_body( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist) +{ + mach_msg_body_t *body; + mach_msg_descriptor_t *saddr, *eaddr; + mach_msg_return_t mr = MACH_MSG_SUCCESS; + kern_return_t kr; + vm_offset_t data; + mach_msg_descriptor_t *sstart, *send; + + body = (mach_msg_body_t *) (&kmsg->ikm_header + 1); + saddr = (mach_msg_descriptor_t *) (body + 1); + eaddr = saddr + body->msgh_descriptor_count; + + /* + * Do scatter list setup + */ + if (slist != MACH_MSG_BODY_NULL) { + sstart = (mach_msg_descriptor_t *) (slist + 1); + send = sstart + slist->msgh_descriptor_count; + } + else { + sstart = MACH_MSG_DESCRIPTOR_NULL; + } + + for ( ; saddr < eaddr; saddr++ ) { + + switch (saddr->type.type) { + + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc; + + /* + * Copyout port right carried in the message + */ + dsc = &saddr->port; + mr |= ipc_kmsg_copyout_object(space, + (ipc_object_t) dsc->name, + dsc->disposition, + (mach_port_name_t *) &dsc->name); + + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR : { + vm_offset_t rcv_addr; + vm_offset_t snd_addr; + mach_msg_ool_descriptor_t *dsc; + mach_msg_copy_options_t copy_option; + + SKIP_PORT_DESCRIPTORS(sstart, send); + + dsc = &saddr->out_of_line; + + assert(dsc->copy != MACH_MSG_KALLOC_COPY_T); + assert(dsc->copy != MACH_MSG_PAGE_LIST_COPY_T); + + copy_option = dsc->copy; + + if ((snd_addr = (vm_offset_t) dsc->address) != 0) { + if (sstart != MACH_MSG_DESCRIPTOR_NULL && + sstart->out_of_line.copy == MACH_MSG_OVERWRITE) { + + /* + * There is an overwrite descriptor specified in the + * scatter list for this ool data. The descriptor + * has already been verified + */ + rcv_addr = (vm_offset_t) sstart->out_of_line.address; + dsc->copy = MACH_MSG_OVERWRITE; + } else { + dsc->copy = MACH_MSG_ALLOCATE; + } + + /* + * Whether the data was virtually or physically + * copied we have a vm_map_copy_t for it. + * If there's an overwrite region specified + * overwrite it, otherwise do a virtual copy out. + */ + if (dsc->copy == MACH_MSG_OVERWRITE) { + kr = vm_map_copy_overwrite(map, rcv_addr, + (vm_map_copy_t) dsc->address, TRUE); + } else { + kr = vm_map_copyout(map, &rcv_addr, + (vm_map_copy_t) dsc->address); + } + if (kr != KERN_SUCCESS) { + if (kr == KERN_RESOURCE_SHORTAGE) + mr |= MACH_MSG_VM_KERNEL; + else + mr |= MACH_MSG_VM_SPACE; + vm_map_copy_discard((vm_map_copy_t) dsc->address); + dsc->address = 0; + INCREMENT_SCATTER(sstart); + break; + } + dsc->address = (void *) rcv_addr; + } + INCREMENT_SCATTER(sstart); + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR : { + vm_offset_t addr; + mach_port_name_t *objects; + mach_msg_type_number_t j; + vm_size_t length; + mach_msg_ool_ports_descriptor_t *dsc; + + SKIP_PORT_DESCRIPTORS(sstart, send); + + dsc = &saddr->ool_ports; + + length = dsc->count * sizeof(mach_port_name_t); + + if (length != 0) { + if (sstart != MACH_MSG_DESCRIPTOR_NULL && + sstart->ool_ports.copy == MACH_MSG_OVERWRITE) { + + /* + * There is an overwrite descriptor specified in the + * scatter list for this ool data. The descriptor + * has already been verified + */ + addr = (vm_offset_t) sstart->out_of_line.address; + dsc->copy = MACH_MSG_OVERWRITE; + } + else { + + /* + * Dynamically allocate the region + */ + int anywhere = VM_MAKE_TAG(VM_MEMORY_MACH_MSG)| + VM_FLAGS_ANYWHERE; + + dsc->copy = MACH_MSG_ALLOCATE; + if ((kr = vm_allocate(map, &addr, length, + anywhere)) != KERN_SUCCESS) { + ipc_kmsg_clean_body(kmsg, + body->msgh_descriptor_count); + dsc->address = 0; + + if (kr == KERN_RESOURCE_SHORTAGE){ + mr |= MACH_MSG_VM_KERNEL; + } else { + mr |= MACH_MSG_VM_SPACE; + } + INCREMENT_SCATTER(sstart); + break; + } + } + } else { + INCREMENT_SCATTER(sstart); + break; + } + + + objects = (mach_port_name_t *) dsc->address ; + + /* copyout port rights carried in the message */ + + for ( j = 0; j < dsc->count ; j++) { + ipc_object_t object = + (ipc_object_t) objects[j]; + + mr |= ipc_kmsg_copyout_object(space, object, + dsc->disposition, &objects[j]); + } + + /* copyout to memory allocated above */ + + data = (vm_offset_t) dsc->address; + (void) copyoutmap(map, data, addr, length); + kfree(data, length); + + dsc->address = (void *) addr; + INCREMENT_SCATTER(sstart); + break; + } + default : { + panic("untyped IPC copyout body: invalid message descriptor"); + } + } + } + return mr; +} + +/* + * Routine: ipc_kmsg_copyout + * Purpose: + * "Copy-out" port rights and out-of-line memory + * in the message. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Copied out all rights and memory. + * MACH_RCV_INVALID_NOTIFY Bad notify port. + * Rights and memory in the message are intact. + * MACH_RCV_HEADER_ERROR + special bits + * Rights and memory in the message are intact. + * MACH_RCV_BODY_ERROR + special bits + * The message header was successfully copied out. + * As much of the body was handled as possible. + */ + +mach_msg_return_t +ipc_kmsg_copyout( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_port_name_t notify, + mach_msg_body_t *slist) +{ + mach_msg_return_t mr; + + mr = ipc_kmsg_copyout_header(&kmsg->ikm_header, space, notify); + if (mr != MACH_MSG_SUCCESS) + return mr; + + if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mr = ipc_kmsg_copyout_body(kmsg, space, map, slist); + + if (mr != MACH_MSG_SUCCESS) + mr |= MACH_RCV_BODY_ERROR; + } + + return mr; +} + +/* + * Routine: ipc_kmsg_copyout_pseudo + * Purpose: + * Does a pseudo-copyout of the message. + * This is like a regular copyout, except + * that the ports in the header are handled + * as if they are in the body. They aren't reversed. + * + * The error codes are a combination of special bits. + * The copyout proceeds despite errors. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Successful copyout. + * MACH_MSG_IPC_SPACE No room for port right in name space. + * MACH_MSG_VM_SPACE No room for memory in address space. + * MACH_MSG_IPC_KERNEL Resource shortage handling port right. + * MACH_MSG_VM_KERNEL Resource shortage handling memory. + */ + +mach_msg_return_t +ipc_kmsg_copyout_pseudo( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist) +{ + mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits; + ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port; + ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port; + mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits); + mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits); + mach_port_name_t dest_name, reply_name; + mach_msg_return_t mr; + + assert(IO_VALID(dest)); + + mr = (ipc_kmsg_copyout_object(space, dest, dest_type, &dest_name) | + ipc_kmsg_copyout_object(space, reply, reply_type, &reply_name)); + + kmsg->ikm_header.msgh_bits = mbits &~ MACH_MSGH_BITS_CIRCULAR; + kmsg->ikm_header.msgh_remote_port = (ipc_port_t)dest_name; + kmsg->ikm_header.msgh_local_port = (ipc_port_t)reply_name; + + if (mbits & MACH_MSGH_BITS_COMPLEX) { + mr |= ipc_kmsg_copyout_body(kmsg, space, map, slist); + } + + return mr; +} + +/* + * Routine: ipc_kmsg_copyout_dest + * Purpose: + * Copies out the destination port in the message. + * Destroys all other rights and memory in the message. + * Conditions: + * Nothing locked. + */ + +void +ipc_kmsg_copyout_dest( + ipc_kmsg_t kmsg, + ipc_space_t space) +{ + mach_msg_bits_t mbits; + ipc_object_t dest; + ipc_object_t reply; + mach_msg_type_name_t dest_type; + mach_msg_type_name_t reply_type; + mach_port_name_t dest_name, reply_name; + + mbits = kmsg->ikm_header.msgh_bits; + dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port; + reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port; + dest_type = MACH_MSGH_BITS_REMOTE(mbits); + reply_type = MACH_MSGH_BITS_LOCAL(mbits); + + assert(IO_VALID(dest)); + + io_lock(dest); + if (io_active(dest)) { + ipc_object_copyout_dest(space, dest, dest_type, &dest_name); + /* dest is unlocked */ + } else { + io_release(dest); + io_check_unlock(dest); + dest_name = MACH_PORT_DEAD; + } + + if (IO_VALID(reply)) { + ipc_object_destroy(reply, reply_type); + reply_name = MACH_PORT_NULL; + } else + reply_name = (mach_port_name_t) reply; + + kmsg->ikm_header.msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) | + MACH_MSGH_BITS(reply_type, dest_type)); + kmsg->ikm_header.msgh_local_port = (ipc_port_t)dest_name; + kmsg->ikm_header.msgh_remote_port = (ipc_port_t)reply_name; + + if (mbits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_body_t *body; + + body = (mach_msg_body_t *) (&kmsg->ikm_header + 1); + ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count); + } +} +/* + * Routine: ipc_kmsg_copyin_scatter + * Purpose: + * allocate and copyin a scatter list + * Algorithm: + * The gather (kmsg) is valid since it has been copied in. + * Gather list descriptors are sequentially paired with scatter + * list descriptors, with port descriptors in either list ignored. + * Descriptors are consistent if the type fileds match and size + * of the scatter descriptor is less than or equal to the + * size of the gather descriptor. A MACH_MSG_ALLOCATE copy + * strategy in a scatter descriptor matches any size in the + * corresponding gather descriptor assuming they are the same type. + * Either list may be larger than the other. During the + * subsequent copy out, excess scatter descriptors are ignored + * and excess gather descriptors default to dynamic allocation. + * + * In the case of a size error, the scatter list is released. + * Conditions: + * Nothing locked. + * Returns: + * the allocated message body containing the scatter list. + */ + +mach_msg_body_t * +ipc_kmsg_copyin_scatter( + mach_msg_header_t *msg, + mach_msg_size_t slist_size, + ipc_kmsg_t kmsg) +{ + mach_msg_body_t *slist; + mach_msg_body_t *body; + mach_msg_descriptor_t *gstart, *gend; + mach_msg_descriptor_t *sstart, *send; + + + if (slist_size < sizeof(mach_msg_base_t)) + return MACH_MSG_BODY_NULL; + + slist_size -= sizeof(mach_msg_header_t); + slist = (mach_msg_body_t *)kalloc(slist_size); + if (slist == MACH_MSG_BODY_NULL) + return slist; + + if (copyin((char *) (msg + 1), (char *)slist, slist_size)) { + kfree((vm_offset_t)slist, slist_size); + return MACH_MSG_BODY_NULL; + } + + if ((slist->msgh_descriptor_count* sizeof(mach_msg_descriptor_t) + + sizeof(mach_msg_size_t)) > slist_size) { + kfree((vm_offset_t)slist, slist_size); + return MACH_MSG_BODY_NULL; + } + + body = (mach_msg_body_t *) (&kmsg->ikm_header + 1); + gstart = (mach_msg_descriptor_t *) (body + 1); + gend = gstart + body->msgh_descriptor_count; + + sstart = (mach_msg_descriptor_t *) (slist + 1); + send = sstart + slist->msgh_descriptor_count; + + while (gstart < gend) { + mach_msg_descriptor_type_t g_type; + + /* + * Skip port descriptors in gather list. + */ + g_type = gstart->type.type; + + if (g_type != MACH_MSG_PORT_DESCRIPTOR) { + + /* + * A scatter list with a 0 descriptor count is treated as an + * automatic size mismatch. + */ + if (slist->msgh_descriptor_count == 0) { + kfree((vm_offset_t)slist, slist_size); + return MACH_MSG_BODY_NULL; + } + + /* + * Skip port descriptors in scatter list. + */ + while (sstart < send) { + if (sstart->type.type != MACH_MSG_PORT_DESCRIPTOR) + break; + sstart++; + } + + /* + * No more scatter descriptors, we're done + */ + if (sstart >= send) { + break; + } + + /* + * Check type, copy and size fields + */ + if (g_type == MACH_MSG_OOL_DESCRIPTOR || + g_type == MACH_MSG_OOL_VOLATILE_DESCRIPTOR) { + if (sstart->type.type != MACH_MSG_OOL_DESCRIPTOR && + sstart->type.type != MACH_MSG_OOL_VOLATILE_DESCRIPTOR) { + kfree((vm_offset_t)slist, slist_size); + return MACH_MSG_BODY_NULL; + } + if (sstart->out_of_line.copy == MACH_MSG_OVERWRITE && + gstart->out_of_line.size > sstart->out_of_line.size) { + kfree((vm_offset_t)slist, slist_size); + return MACH_MSG_BODY_NULL; + } + } + else { + if (sstart->type.type != MACH_MSG_OOL_PORTS_DESCRIPTOR) { + kfree((vm_offset_t)slist, slist_size); + return MACH_MSG_BODY_NULL; + } + if (sstart->ool_ports.copy == MACH_MSG_OVERWRITE && + gstart->ool_ports.count > sstart->ool_ports.count) { + kfree((vm_offset_t)slist, slist_size); + return MACH_MSG_BODY_NULL; + } + } + sstart++; + } + gstart++; + } + return slist; +} + + +/* + * Routine: ipc_kmsg_free_scatter + * Purpose: + * Deallocate a scatter list. Since we actually allocated + * a body without a header, and since the header was originally + * accounted for in slist_size, we have to ajust it down + * before freeing the scatter list. + */ +void +ipc_kmsg_free_scatter( + mach_msg_body_t *slist, + mach_msg_size_t slist_size) +{ + slist_size -= sizeof(mach_msg_header_t); + kfree((vm_offset_t)slist, slist_size); +} + + +/* + * Routine: ipc_kmsg_copyout_to_kernel + * Purpose: + * Copies out the destination and reply ports in the message. + * Leaves all other rights and memory in the message alone. + * Conditions: + * Nothing locked. + * + * Derived from ipc_kmsg_copyout_dest. + * Use by mach_msg_rpc_from_kernel (which used to use copyout_dest). + * We really do want to save rights and memory. + */ + +void +ipc_kmsg_copyout_to_kernel( + ipc_kmsg_t kmsg, + ipc_space_t space) +{ + ipc_object_t dest; + ipc_object_t reply; + mach_msg_type_name_t dest_type; + mach_msg_type_name_t reply_type; + mach_port_name_t dest_name, reply_name; + + dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port; + reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port; + dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits); + reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header.msgh_bits); + + assert(IO_VALID(dest)); + + io_lock(dest); + if (io_active(dest)) { + ipc_object_copyout_dest(space, dest, dest_type, &dest_name); + /* dest is unlocked */ + } else { + io_release(dest); + io_check_unlock(dest); + dest_name = MACH_PORT_DEAD; + } + + reply_name = (mach_port_name_t) reply; + + kmsg->ikm_header.msgh_bits = + (MACH_MSGH_BITS_OTHER(kmsg->ikm_header.msgh_bits) | + MACH_MSGH_BITS(reply_type, dest_type)); + kmsg->ikm_header.msgh_local_port = (ipc_port_t)dest_name; + kmsg->ikm_header.msgh_remote_port = (ipc_port_t)reply_name; +} + +#include +#if MACH_KDB + +#include +#include +/* + * Forward declarations + */ +void ipc_msg_print_untyped( + mach_msg_body_t *body); + +char * ipc_type_name( + int type_name, + boolean_t received); + +void ipc_print_type_name( + int type_name); + +char * +msgh_bit_decode( + mach_msg_bits_t bit); + +char * +mm_copy_options_string( + mach_msg_copy_options_t option); + +void db_print_msg_uid(mach_msg_header_t *); + + +char * +ipc_type_name( + int type_name, + boolean_t received) +{ + switch (type_name) { + case MACH_MSG_TYPE_PORT_NAME: + return "port_name"; + + case MACH_MSG_TYPE_MOVE_RECEIVE: + if (received) { + return "port_receive"; + } else { + return "move_receive"; + } + + case MACH_MSG_TYPE_MOVE_SEND: + if (received) { + return "port_send"; + } else { + return "move_send"; + } + + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + if (received) { + return "port_send_once"; + } else { + return "move_send_once"; + } + + case MACH_MSG_TYPE_COPY_SEND: + return "copy_send"; + + case MACH_MSG_TYPE_MAKE_SEND: + return "make_send"; + + case MACH_MSG_TYPE_MAKE_SEND_ONCE: + return "make_send_once"; + + default: + return (char *) 0; + } +} + +void +ipc_print_type_name( + int type_name) +{ + char *name = ipc_type_name(type_name, TRUE); + if (name) { + printf("%s", name); + } else { + printf("type%d", type_name); + } +} + +/* + * ipc_kmsg_print [ debug ] + */ +void +ipc_kmsg_print( + ipc_kmsg_t kmsg) +{ + iprintf("kmsg=0x%x\n", kmsg); + iprintf("ikm_next=0x%x, prev=0x%x, size=%d", + kmsg->ikm_next, + kmsg->ikm_prev, + kmsg->ikm_size); + printf("\n"); + ipc_msg_print(&kmsg->ikm_header); +} + +char * +msgh_bit_decode( + mach_msg_bits_t bit) +{ + switch (bit) { + case MACH_MSGH_BITS_COMPLEX: return "complex"; + case MACH_MSGH_BITS_CIRCULAR: return "circular"; + default: return (char *) 0; + } +} + +/* + * ipc_msg_print [ debug ] + */ +void +ipc_msg_print( + mach_msg_header_t *msgh) +{ + mach_msg_bits_t mbits; + unsigned int bit, i; + char *bit_name; + int needs_comma; + + mbits = msgh->msgh_bits; + iprintf("msgh_bits=0x%x: l=0x%x,r=0x%x\n", + mbits, + MACH_MSGH_BITS_LOCAL(msgh->msgh_bits), + MACH_MSGH_BITS_REMOTE(msgh->msgh_bits)); + + mbits = MACH_MSGH_BITS_OTHER(mbits) & MACH_MSGH_BITS_USED; + db_indent += 2; + if (mbits) + iprintf("decoded bits: "); + needs_comma = 0; + for (i = 0, bit = 1; i < sizeof(mbits) * 8; ++i, bit <<= 1) { + if ((mbits & bit) == 0) + continue; + bit_name = msgh_bit_decode((mach_msg_bits_t)bit); + if (bit_name) + printf("%s%s", needs_comma ? "," : "", bit_name); + else + printf("%sunknown(0x%x),", needs_comma ? "," : "", bit); + ++needs_comma; + } + if (msgh->msgh_bits & ~MACH_MSGH_BITS_USED) { + printf("%sunused=0x%x,", needs_comma ? "," : "", + msgh->msgh_bits & ~MACH_MSGH_BITS_USED); + } + printf("\n"); + db_indent -= 2; + + needs_comma = 1; + if (msgh->msgh_remote_port) { + iprintf("remote=0x%x(", msgh->msgh_remote_port); + ipc_print_type_name(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits)); + printf(")"); + } else { + iprintf("remote=null"); + } + + if (msgh->msgh_local_port) { + printf("%slocal=0x%x(", needs_comma ? "," : "", + msgh->msgh_local_port); + ipc_print_type_name(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits)); + printf(")\n"); + } else { + printf("local=null\n"); + } + + iprintf("msgh_id=%d, size=%d\n", + msgh->msgh_id, + msgh->msgh_size); + + if (mbits & MACH_MSGH_BITS_COMPLEX) { + ipc_msg_print_untyped((mach_msg_body_t *) (msgh + 1)); + } +} + + +char * +mm_copy_options_string( + mach_msg_copy_options_t option) +{ + char *name; + + switch (option) { + case MACH_MSG_PHYSICAL_COPY: + name = "PHYSICAL"; + break; + case MACH_MSG_VIRTUAL_COPY: + name = "VIRTUAL"; + break; + case MACH_MSG_OVERWRITE: + name = "OVERWRITE"; + break; + case MACH_MSG_ALLOCATE: + name = "ALLOCATE"; + break; + case MACH_MSG_KALLOC_COPY_T: + name = "KALLOC_COPY_T"; + break; + case MACH_MSG_PAGE_LIST_COPY_T: + name = "PAGE_LIST_COPY_T"; + break; + default: + name = "unknown"; + break; + } + return name; +} + +void +ipc_msg_print_untyped( + mach_msg_body_t *body) +{ + mach_msg_descriptor_t *saddr, *send; + mach_msg_descriptor_type_t type; + + iprintf("%d descriptors %d: \n", body->msgh_descriptor_count); + + saddr = (mach_msg_descriptor_t *) (body + 1); + send = saddr + body->msgh_descriptor_count; + + for ( ; saddr < send; saddr++ ) { + + type = saddr->type.type; + + switch (type) { + + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc; + + dsc = &saddr->port; + iprintf("-- PORT name = 0x%x disp = ", dsc->name); + ipc_print_type_name(dsc->disposition); + printf("\n"); + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + mach_msg_ool_descriptor_t *dsc; + + dsc = &saddr->out_of_line; + iprintf("-- OOL%s addr = 0x%x size = 0x%x copy = %s %s\n", + type == MACH_MSG_OOL_DESCRIPTOR ? "" : " VOLATILE", + dsc->address, dsc->size, + mm_copy_options_string(dsc->copy), + dsc->deallocate ? "DEALLOC" : ""); + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR : { + mach_msg_ool_ports_descriptor_t *dsc; + + dsc = &saddr->ool_ports; + + iprintf("-- OOL_PORTS addr = 0x%x count = 0x%x ", + dsc->address, dsc->count); + printf("disp = "); + ipc_print_type_name(dsc->disposition); + printf(" copy = %s %s\n", + mm_copy_options_string(dsc->copy), + dsc->deallocate ? "DEALLOC" : ""); + break; + } + + default: { + iprintf("-- UNKNOWN DESCRIPTOR 0x%x\n", type); + break; + } + } + } +} +#endif /* MACH_KDB */ diff --git a/osfmk/ipc/ipc_kmsg.h b/osfmk/ipc/ipc_kmsg.h new file mode 100644 index 000000000..9ebb1402d --- /dev/null +++ b/osfmk/ipc/ipc_kmsg.h @@ -0,0 +1,416 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_kmsg.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for kernel messages. + */ + +#ifndef _IPC_IPC_KMSG_H_ +#define _IPC_IPC_KMSG_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * This structure is only the header for a kmsg buffer; + * the actual buffer is normally larger. The rest of the buffer + * holds the body of the message. + * + * In a kmsg, the port fields hold pointers to ports instead + * of port names. These pointers hold references. + * + * The ikm_header.msgh_remote_port field is the destination + * of the message. + */ + + +typedef struct ipc_kmsg { + struct ipc_kmsg *ikm_next; + struct ipc_kmsg *ikm_prev; + ipc_port_t ikm_prealloc; /* port we were preallocated from */ + mach_msg_size_t ikm_size; + mach_msg_header_t ikm_header; +} *ipc_kmsg_t; + +#define IKM_NULL ((ipc_kmsg_t) 0) + +#define IKM_OVERHEAD \ + (sizeof(struct ipc_kmsg) - sizeof(mach_msg_header_t)) + +#define ikm_plus_overhead(size) ((mach_msg_size_t)((size) + IKM_OVERHEAD)) +#define ikm_less_overhead(size) ((mach_msg_size_t)((size) - IKM_OVERHEAD)) + +/* + * XXX For debugging. + */ +#define IKM_BOGUS ((ipc_kmsg_t) 0xffffff10) + +/* + * The size of the kernel message buffers that will be cached. + * IKM_SAVED_KMSG_SIZE includes overhead; IKM_SAVED_MSG_SIZE doesn't. + */ + +#define IKM_SAVED_MSG_SIZE ikm_less_overhead(256) + +#define ikm_prealloc_inuse_port(kmsg) \ + ((kmsg)->ikm_prealloc) + +#define ikm_prealloc_inuse(kmsg) \ + ((kmsg)->ikm_prealloc != IP_NULL) + +#define ikm_prealloc_set_inuse(kmsg, port) \ +MACRO_BEGIN \ + assert(port != IP_NULL); \ + (kmsg)->ikm_prealloc = port; \ +MACRO_END + +#define ikm_prealloc_clear_inuse(kmsg, port) \ +MACRO_BEGIN \ + (kmsg)->ikm_prealloc = IP_NULL; \ +MACRO_END + + +#define ikm_init(kmsg, size) \ +MACRO_BEGIN \ + (kmsg)->ikm_size = (size); \ + (kmsg)->ikm_prealloc = IP_NULL; \ + assert((kmsg)->ikm_prev = (kmsg)->ikm_next = IKM_BOGUS); \ +MACRO_END + +#define ikm_check_init(kmsg, size) \ +MACRO_BEGIN \ + assert((kmsg)->ikm_size == (size)); \ + assert((kmsg)->ikm_prev == IKM_BOGUS); \ + assert((kmsg)->ikm_next == IKM_BOGUS); \ +MACRO_END + +struct ipc_kmsg_queue { + struct ipc_kmsg *ikmq_base; +}; + +typedef struct ipc_kmsg_queue *ipc_kmsg_queue_t; + +#define IKMQ_NULL ((ipc_kmsg_queue_t) 0) + + +/* + * Exported interfaces + */ + +#define ipc_kmsg_queue_init(queue) \ +MACRO_BEGIN \ + (queue)->ikmq_base = IKM_NULL; \ +MACRO_END + +#define ipc_kmsg_queue_empty(queue) ((queue)->ikmq_base == IKM_NULL) + +/* Enqueue a kmsg */ +extern void ipc_kmsg_enqueue( + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg); + +/* Dequeue and return a kmsg */ +extern ipc_kmsg_t ipc_kmsg_dequeue( + ipc_kmsg_queue_t queue); + +/* Pull a kmsg out of a queue */ +extern void ipc_kmsg_rmqueue( + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg); + +#define ipc_kmsg_queue_first(queue) ((queue)->ikmq_base) + +/* Return the kmsg following the given kmsg */ +extern ipc_kmsg_t ipc_kmsg_queue_next( + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg); + +#define ipc_kmsg_rmqueue_first_macro(queue, kmsg) \ +MACRO_BEGIN \ + register ipc_kmsg_t _next; \ + \ + assert((queue)->ikmq_base == (kmsg)); \ + \ + _next = (kmsg)->ikm_next; \ + if (_next == (kmsg)) { \ + assert((kmsg)->ikm_prev == (kmsg)); \ + (queue)->ikmq_base = IKM_NULL; \ + } else { \ + register ipc_kmsg_t _prev = (kmsg)->ikm_prev; \ + \ + (queue)->ikmq_base = _next; \ + _next->ikm_prev = _prev; \ + _prev->ikm_next = _next; \ + } \ + /* XXX Debug paranoia ASSIGNMENTS */ \ + assert(kmsg->ikm_next = IKM_BOGUS); \ + assert(kmsg->ikm_prev = IKM_BOGUS); \ +MACRO_END + +#define ipc_kmsg_enqueue_macro(queue, kmsg) \ +MACRO_BEGIN \ + register ipc_kmsg_t _first = (queue)->ikmq_base; \ + \ + if (_first == IKM_NULL) { \ + (queue)->ikmq_base = (kmsg); \ + (kmsg)->ikm_next = (kmsg); \ + (kmsg)->ikm_prev = (kmsg); \ + } else { \ + register ipc_kmsg_t _last = _first->ikm_prev; \ + \ + (kmsg)->ikm_next = _first; \ + (kmsg)->ikm_prev = _last; \ + _first->ikm_prev = (kmsg); \ + _last->ikm_next = (kmsg); \ + } \ +MACRO_END + +/* scatter list macros */ + +#define SKIP_PORT_DESCRIPTORS(s, e) \ +MACRO_BEGIN \ + if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \ + while ((s) < (e)) { \ + if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \ + break; \ + (s)++; \ + } \ + if ((s) >= (e)) \ + (s) = MACH_MSG_DESCRIPTOR_NULL; \ + } \ +MACRO_END + +#define INCREMENT_SCATTER(s) \ +MACRO_BEGIN \ + if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \ + (s)++; \ + } \ +MACRO_END + +/* + * extern void + * ipc_kmsg_send_always(ipc_kmsg_t); + * + * Unfortunately, to avoid warnings/lint about unused variables + * when assertions are turned off, we need two versions of this. + */ +#if MACH_ASSERT + +#define ipc_kmsg_send_always(kmsg) \ +MACRO_BEGIN \ + mach_msg_return_t mr; \ + \ + mr = ipc_kmsg_send((kmsg), MACH_SEND_ALWAYS, \ + MACH_MSG_TIMEOUT_NONE); \ + assert(mr == MACH_MSG_SUCCESS); \ +MACRO_END + +#else /* MACH_ASSERT */ + +#define ipc_kmsg_send_always(kmsg) \ +MACRO_BEGIN \ + (void) ipc_kmsg_send((kmsg), MACH_SEND_ALWAYS, \ + MACH_MSG_TIMEOUT_NONE); \ +MACRO_END + +#endif /* MACH_ASSERT */ + +/* Allocate a kernel message */ +extern ipc_kmsg_t ipc_kmsg_alloc( + mach_msg_size_t size); + +/* Free a kernel message buffer */ +extern void ipc_kmsg_free( + ipc_kmsg_t kmsg); + +/* Destroy kernel message */ +extern void ipc_kmsg_destroy( + ipc_kmsg_t kmsg); + +/* Preallocate a kernel message buffer */ +extern void ipc_kmsg_set_prealloc( + ipc_kmsg_t kmsg, + ipc_port_t port); + +/* Clear a kernel message buffer */ +extern void ipc_kmsg_clear_prealloc( + ipc_kmsg_t kmsg, + ipc_port_t port); + +/* Allocate a kernel message buffer and copy a user message to the buffer */ +extern mach_msg_return_t ipc_kmsg_get( + mach_msg_header_t *msg, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp); + +/* Allocate a kernel message buffer and copy a kernel message to the buffer */ +extern mach_msg_return_t ipc_kmsg_get_from_kernel( + mach_msg_header_t *msg, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp); + +/* Send a message to a port */ +extern mach_msg_return_t ipc_kmsg_send( + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_msg_timeout_t timeout); + +/* Copy a kernel message buffer to a user message */ +extern mach_msg_return_t ipc_kmsg_put( + mach_msg_header_t *msg, + ipc_kmsg_t kmsg, + mach_msg_size_t size); + +/* Copy a kernel message buffer to a kernel message */ +extern void ipc_kmsg_put_to_kernel( + mach_msg_header_t *msg, + ipc_kmsg_t kmsg, + mach_msg_size_t size); + +/* Copyin port rights in the header of a message */ +extern mach_msg_return_t ipc_kmsg_copyin_header( + mach_msg_header_t *msg, + ipc_space_t space, + mach_port_name_t notify); + +/* Copyin port rights and out-of-line memory from a user message */ +extern mach_msg_return_t ipc_kmsg_copyin( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_port_name_t notify); + +/* Copyin port rights and out-of-line memory from a kernel message */ +extern void ipc_kmsg_copyin_from_kernel( + ipc_kmsg_t kmsg); + +/* Copyout port rights in the header of a message */ +extern mach_msg_return_t ipc_kmsg_copyout_header( + mach_msg_header_t *msg, + ipc_space_t space, + mach_port_name_t notify); + +/* Copyout a port right returning a name */ +extern mach_msg_return_t ipc_kmsg_copyout_object( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep); + +/* Copyout the header and body to a user message */ +extern mach_msg_return_t ipc_kmsg_copyout( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_port_name_t notify, + mach_msg_body_t *slist); + +/* Copyout port rights and out-of-line memory from the body of a message */ +extern mach_msg_return_t ipc_kmsg_copyout_body( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist); + +/* Copyout port rights and out-of-line memory to a user message, + not reversing the ports in the header */ +extern mach_msg_return_t ipc_kmsg_copyout_pseudo( + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist); + +/* Copyout the destination port in the message */ +extern void ipc_kmsg_copyout_dest( + ipc_kmsg_t kmsg, + ipc_space_t space); + +/* kernel's version of ipc_kmsg_copyout_dest */ +extern void ipc_kmsg_copyout_to_kernel( + ipc_kmsg_t kmsg, + ipc_space_t space); + +/* copyin a scatter list and check consistency */ +extern mach_msg_body_t *ipc_kmsg_copyin_scatter( + mach_msg_header_t *msg, + mach_msg_size_t slist_size, + ipc_kmsg_t kmsg); + +/* free a scatter list */ +extern void ipc_kmsg_free_scatter( + mach_msg_body_t *slist, + mach_msg_size_t slist_size); + +#include +#if MACH_KDB + +/* Do a formatted dump of a kernel message */ +extern void ipc_kmsg_print( + ipc_kmsg_t kmsg); + +/* Do a formatted dump of a user message */ +extern void ipc_msg_print( + mach_msg_header_t *msgh); + +#endif /* MACH_KDB */ + +#endif /* _IPC_IPC_KMSG_H_ */ diff --git a/osfmk/ipc/ipc_machdep.h b/osfmk/ipc/ipc_machdep.h new file mode 100644 index 000000000..f528d8913 --- /dev/null +++ b/osfmk/ipc/ipc_machdep.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:16 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1995/01/06 19:45:43 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:19:20 dwm] + * + * Revision 1.1.3.1 1994/05/06 18:47:26 tmt + * Merge this file in from the osc1.3dec tree. + * [1994/03/30 21:33:42 berube] + * Created from mk80. + * [93/10/05 bruel] + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2 93/01/14 17:32:59 danner + * Created. + * [92/12/10 af] + */ +/* CMU_END_HIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * At times, we need to know the size of a port in bits + */ + +/* 64 bit machines */ +#if defined(__alpha) +#define PORT_T_SIZE_IN_BITS 64 +#endif + +/* default, 32 bit machines */ +#if !defined(PORT_T_SIZE_IN_BITS) +#define PORT_T_SIZE_IN_BITS 32 +#endif + diff --git a/osfmk/ipc/ipc_mqueue.c b/osfmk/ipc/ipc_mqueue.c new file mode 100644 index 000000000..0c7502e6a --- /dev/null +++ b/osfmk/ipc/ipc_mqueue.c @@ -0,0 +1,1004 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_mqueue.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to manipulate IPC message queues. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +int ipc_mqueue_full; /* address is event for queue space */ +int ipc_mqueue_rcv; /* address is event for message arrival */ + +#define TR_ENABLE 0 + +/* + * Routine: ipc_mqueue_init + * Purpose: + * Initialize a newly-allocated message queue. + */ +void +ipc_mqueue_init( + ipc_mqueue_t mqueue, + boolean_t is_set) +{ + if (is_set) { + wait_queue_sub_init(&mqueue->imq_set_queue, SYNC_POLICY_FIFO); + } else { + wait_queue_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO); + ipc_kmsg_queue_init(&mqueue->imq_messages); + mqueue->imq_seqno = 0; + mqueue->imq_msgcount = 0; + mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT; + mqueue->imq_fullwaiters = FALSE; + } +} + +/* + * Routine: ipc_mqueue_member + * Purpose: + * Indicate whether the (port) mqueue is a member of + * this portset's mqueue. We do this by checking + * whether the portset mqueue's waitq is an member of + * the port's mqueue waitq. + * Conditions: + * the portset's mqueue is not already a member + * this may block while allocating linkage structures. + */ + +boolean_t +ipc_mqueue_member( + ipc_mqueue_t port_mqueue, + ipc_mqueue_t set_mqueue) +{ + wait_queue_t port_waitq = &port_mqueue->imq_wait_queue; + wait_queue_t set_waitq = &set_mqueue->imq_wait_queue; + + return (wait_queue_member(port_waitq, set_waitq)); + +} + +/* + * Routine: ipc_mqueue_remove + * Purpose: + * Remove the association between the queue and the specified + * subordinate message queue. + */ + +kern_return_t +ipc_mqueue_remove( + ipc_mqueue_t mqueue, + ipc_mqueue_t sub_mqueue) +{ + wait_queue_t mq_waitq = &mqueue->imq_wait_queue; + wait_queue_sub_t sub_waitq = &sub_mqueue->imq_set_queue; + + if (wait_queue_member(mq_waitq, sub_waitq)) { + wait_queue_unlink(mq_waitq, sub_waitq); + return KERN_SUCCESS; + } + return KERN_NOT_IN_SET; +} + +/* + * Routine: ipc_mqueue_remove_one + * Purpose: + * Find and remove one subqueue from the queue. + * Conditions: + * Will return the set mqueue that was removed + */ +void +ipc_mqueue_remove_one( + ipc_mqueue_t mqueue, + ipc_mqueue_t *sub_queuep) +{ + wait_queue_t mq_waitq = &mqueue->imq_wait_queue; + + wait_queue_unlink_one(mq_waitq, (wait_queue_sub_t *)sub_queuep); + return; +} + + +/* + * Routine: ipc_mqueue_add + * Purpose: + * Associate the portset's mqueue with the port's mqueue. + * This has to be done so that posting the port will wakeup + * a portset waiter. If there are waiters on the portset + * mqueue and messages on the port mqueue, try to match them + * up now. + * Conditions: + * May block. + */ +kern_return_t +ipc_mqueue_add( + ipc_mqueue_t port_mqueue, + ipc_mqueue_t set_mqueue) +{ + wait_queue_t port_waitq = &port_mqueue->imq_wait_queue; + wait_queue_sub_t set_waitq = &set_mqueue->imq_set_queue; + ipc_kmsg_queue_t kmsgq; + ipc_kmsg_t kmsg, next; + kern_return_t kr; + spl_t s; + + kr = wait_queue_link(port_waitq, set_waitq); + if (kr != KERN_SUCCESS) + return kr; + + /* + * Now that the set has been added to the port, there may be + * messages queued on the port and threads waiting on the set + * waitq. Lets get them together. + */ + s = splsched(); + imq_lock(port_mqueue); + kmsgq = &port_mqueue->imq_messages; + for (kmsg = ipc_kmsg_queue_first(kmsgq); + kmsg != IKM_NULL; + kmsg = next) { + next = ipc_kmsg_queue_next(kmsgq, kmsg); + + for (;;) { + thread_t th; + + th = wait_queue_wakeup_identity_locked(port_waitq, + IPC_MQUEUE_RECEIVE, + THREAD_AWAKENED, + FALSE); + /* waitq/mqueue still locked, thread locked */ + + if (th == THREAD_NULL) + goto leave; + + /* + * Found a receiver. see if they can handle the message + * correctly (the message is not too large for them, or + * they didn't care to be informed that the message was + * too large). If they can't handle it, take them off + * the list and let them go back and figure it out and + * just move onto the next. + */ + if (th->ith_msize < + kmsg->ikm_header.msgh_size + + REQUESTED_TRAILER_SIZE(th->ith_option)) { + th->ith_state = MACH_RCV_TOO_LARGE; + th->ith_msize = kmsg->ikm_header.msgh_size; + if (th->ith_option & MACH_RCV_LARGE) { + /* + * let him go without message + */ + th->ith_kmsg = IKM_NULL; + th->ith_seqno = 0; + thread_unlock(th); + continue; /* find another thread */ + } + } else { + th->ith_state = MACH_MSG_SUCCESS; + } + + /* + * This thread is going to take this message, + * so give it to him. + */ + ipc_mqueue_release_msgcount(port_mqueue); + ipc_kmsg_rmqueue(kmsgq, kmsg); + th->ith_kmsg = kmsg; + th->ith_seqno = port_mqueue->imq_seqno++; + thread_unlock(th); + break; /* go to next message */ + } + + } + leave: + imq_unlock(port_mqueue); + splx(s); + return KERN_SUCCESS; +} + +/* + * Routine: ipc_mqueue_changed + * Purpose: + * Wake up receivers waiting in a message queue. + * Conditions: + * The message queue is locked. + */ + +void +ipc_mqueue_changed( + ipc_mqueue_t mqueue) +{ + wait_queue_wakeup_all_locked(&mqueue->imq_wait_queue, + IPC_MQUEUE_RECEIVE, + THREAD_RESTART, + FALSE); /* unlock waitq? */ +} + + + + +/* + * Routine: ipc_mqueue_send + * Purpose: + * Send a message to a message queue. The message holds a reference + * for the destination port for this message queue in the + * msgh_remote_port field. + * + * If unsuccessful, the caller still has possession of + * the message and must do something with it. If successful, + * the message is queued, given to a receiver, or destroyed. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS The message was accepted. + * MACH_SEND_TIMED_OUT Caller still has message. + * MACH_SEND_INTERRUPTED Caller still has message. + */ +mach_msg_return_t +ipc_mqueue_send( + ipc_mqueue_t mqueue, + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_msg_timeout_t timeout) +{ + int save_wait_result; + spl_t s; + + /* + * Don't block if: + * 1) We're under the queue limit. + * 2) Caller used the MACH_SEND_ALWAYS internal option. + * 3) Message is sent to a send-once right. + */ + s = splsched(); + imq_lock(mqueue); + + if (!imq_full(mqueue) || + (option & MACH_SEND_ALWAYS) || + (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) == + MACH_MSG_TYPE_PORT_SEND_ONCE)) { + mqueue->imq_msgcount++; + imq_unlock(mqueue); + splx(s); + } else { + + /* + * We have to wait for space to be granted to us. + */ + if ((option & MACH_SEND_TIMEOUT) && (timeout == 0)) { + imq_unlock(mqueue); + splx(s); + return MACH_SEND_TIMED_OUT; + } + mqueue->imq_fullwaiters = TRUE; + wait_queue_assert_wait_locked(&mqueue->imq_wait_queue, + IPC_MQUEUE_FULL, + THREAD_ABORTSAFE, + TRUE); /* unlock? */ + /* wait/mqueue is unlocked */ + splx(s); + + if (option & MACH_SEND_TIMEOUT) + thread_set_timer(timeout, 1000*NSEC_PER_USEC); + + counter(c_ipc_mqueue_send_block++); + save_wait_result = thread_block((void (*)(void)) 0); + + switch (save_wait_result) { + case THREAD_TIMED_OUT: + assert(option & MACH_SEND_TIMEOUT); + return MACH_SEND_TIMED_OUT; + + case THREAD_AWAKENED: + /* we can proceed - inherited msgcount from waker */ + if (option & MACH_SEND_TIMEOUT) + thread_cancel_timer(); + break; + + case THREAD_INTERRUPTED: + if (option & MACH_SEND_TIMEOUT) + thread_cancel_timer(); + return MACH_SEND_INTERRUPTED; + + case THREAD_RESTART: + default: + panic("ipc_mqueue_send"); + } + } + + ipc_mqueue_post(mqueue, kmsg); + return MACH_MSG_SUCCESS; +} + +/* + * Routine: ipc_mqueue_release_msgcount + * Purpose: + * Release a message queue reference in the case where we + * found a waiter. + * + * Conditions: + * The message queue is locked + */ +void +ipc_mqueue_release_msgcount( + ipc_mqueue_t mqueue) +{ + assert(imq_held(mqueue)); + assert(mqueue->imq_msgcount > 0); + + mqueue->imq_msgcount--; + if (!imq_full(mqueue) && mqueue->imq_fullwaiters) { + if (wait_queue_wakeup_one_locked(&mqueue->imq_wait_queue, + IPC_MQUEUE_FULL, + THREAD_AWAKENED, + FALSE) != KERN_SUCCESS) { + mqueue->imq_fullwaiters = FALSE; + } else { + mqueue->imq_msgcount++; /* gave it away */ + } + } +} + +/* + * Routine: ipc_mqueue_post + * Purpose: + * Post a message to a waiting receiver or enqueue it. If a + * receiver is waiting, we can release our reserved space in + * the message queue. + * + * Conditions: + * If we need to queue, our space in the message queue is reserved. + */ +void +ipc_mqueue_post( + register ipc_mqueue_t mqueue, + register ipc_kmsg_t kmsg) +{ + + spl_t s; + + /* + * While the msg queue is locked, we have control of the + * kmsg, so the ref in it for the port is still good. + * + * Check for a receiver for the message. + */ + s = splsched(); + imq_lock(mqueue); + for (;;) { + wait_queue_t waitq = &mqueue->imq_wait_queue; + thread_t receiver; + + receiver = wait_queue_wakeup_identity_locked(waitq, + IPC_MQUEUE_RECEIVE, + THREAD_AWAKENED, + FALSE); + /* waitq still locked, thread locked */ + + if (receiver == THREAD_NULL) { + /* + * no receivers; queue kmsg + */ + assert(mqueue->imq_msgcount > 0); + ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg); + break; + } + + /* + * We found a waiting thread. + * If the message is too large or the scatter list is too small + * the thread we wake up will get that as its status. + */ + if (receiver->ith_msize < + (kmsg->ikm_header.msgh_size) + + REQUESTED_TRAILER_SIZE(receiver->ith_option)) { + receiver->ith_msize = kmsg->ikm_header.msgh_size; + receiver->ith_state = MACH_RCV_TOO_LARGE; + } else { + receiver->ith_state = MACH_MSG_SUCCESS; + } + + /* + * If there is no problem with the upcoming receive, or the + * receiver thread didn't specifically ask for special too + * large error condition, go ahead and select it anyway. + */ + if ((receiver->ith_state == MACH_MSG_SUCCESS) || + !(receiver->ith_option & MACH_RCV_LARGE)) { + + receiver->ith_kmsg = kmsg; + receiver->ith_seqno = mqueue->imq_seqno++; + thread_unlock(receiver); + + /* we didn't need our reserved spot in the queue */ + ipc_mqueue_release_msgcount(mqueue); + break; + } + + /* + * Otherwise, this thread needs to be released to run + * and handle its error without getting the message. We + * need to go back and pick another one. + */ + receiver->ith_kmsg = IKM_NULL; + receiver->ith_seqno = 0; + thread_unlock(receiver); + } + + imq_unlock(mqueue); + splx(s); + + current_task()->messages_sent++; + return; +} + + +kern_return_t +ipc_mqueue_receive_results(void) +{ + thread_t self = current_thread(); + mach_msg_option_t option = self->ith_option; + kern_return_t saved_wait_result = self->wait_result; + kern_return_t mr; + + /* + * why did we wake up? + */ + switch (saved_wait_result) { + case THREAD_TIMED_OUT: + self->ith_state = MACH_RCV_TIMED_OUT; + return; + + case THREAD_INTERRUPTED: + if (option & MACH_RCV_TIMEOUT) + thread_cancel_timer(); + self->ith_state = MACH_RCV_INTERRUPTED; + return; + + case THREAD_RESTART: + /* something bad happened to the port/set */ + if (option & MACH_RCV_TIMEOUT) + thread_cancel_timer(); + self->ith_state = MACH_RCV_PORT_CHANGED; + return; + + case THREAD_AWAKENED: + /* + * We do not need to go select a message, somebody + * handed us one (or a too-large indication). + */ + if (option & MACH_RCV_TIMEOUT) + thread_cancel_timer(); + + mr = MACH_MSG_SUCCESS; + + switch (self->ith_state) { + case MACH_RCV_SCATTER_SMALL: + case MACH_RCV_TOO_LARGE: + /* + * Somebody tried to give us a too large + * message. If we indicated that we cared, + * then they only gave us the indication, + * otherwise they gave us the indication + * AND the message anyway. + */ + if (option & MACH_RCV_LARGE) { + return; + } + + case MACH_MSG_SUCCESS: + return; + + default: + panic("ipc_mqueue_receive_results: strange ith_state"); + } + + default: + panic("ipc_mqueue_receive_results: strange wait_result"); + } +} + +void +ipc_mqueue_receive_continue(void) +{ + ipc_mqueue_receive_results(); + mach_msg_receive_continue(); /* hard-coded for now */ +} + +/* + * Routine: ipc_mqueue_receive + * Purpose: + * Receive a message from a message queue. + * + * If continuation is non-zero, then we might discard + * our kernel stack when we block. We will continue + * after unblocking by executing continuation. + * + * If resume is true, then we are resuming a receive + * operation after a blocked receive discarded our stack. + * Conditions: + * Our caller must hold a reference for the port or port set + * to which this queue belongs, to keep the queue + * from being deallocated. + * + * The kmsg is returned with clean header fields + * and with the circular bit turned off. + * Returns: + * MACH_MSG_SUCCESS Message returned in kmsgp. + * MACH_RCV_TOO_LARGE Message size returned in kmsgp. + * MACH_RCV_TIMED_OUT No message obtained. + * MACH_RCV_INTERRUPTED No message obtained. + * MACH_RCV_PORT_DIED Port/set died; no message. + * MACH_RCV_PORT_CHANGED Port moved into set; no msg. + * + */ + +void +ipc_mqueue_receive( + ipc_mqueue_t mqueue, + mach_msg_option_t option, + mach_msg_size_t max_size, + mach_msg_timeout_t timeout, + int interruptible) +{ + ipc_port_t port; + mach_msg_return_t mr, mr2; + ipc_kmsg_queue_t kmsgs; + kern_return_t save_wait_result; + thread_t self; + ipc_kmsg_t *kmsgp; + mach_port_seqno_t *seqnop; + spl_t s; + + s = splsched(); + imq_lock(mqueue); + + if (imq_is_set(mqueue)) { + wait_queue_link_t wql; + ipc_mqueue_t port_mq; + queue_t q; + + q = &mqueue->imq_setlinks; + + /* + * If we are waiting on a portset mqueue, we need to see if + * any of the member ports have work for us. If so, try to + * deliver one of those messages. By holding the portset's + * mqueue lock during the search, we tie up any attempts by + * mqueue_deliver or portset membership changes that may + * cross our path. But this is a lock order violation, so we + * have to do it "softly." If we don't find a message waiting + * for us, we will assert our intention to wait while still + * holding that lock. When we release the lock, the deliver/ + * change will succeed and find us. + */ + search_set: + queue_iterate(q, wql, wait_queue_link_t, wql_sublinks) { + port_mq = (ipc_mqueue_t)wql->wql_queue; + kmsgs = &port_mq->imq_messages; + + if (!imq_lock_try(port_mq)) { + imq_unlock(mqueue); + splx(s); + delay(1); + s = splsched(); + imq_lock(mqueue); + goto search_set; /* start again at beginning - SMP */ + } + + /* + * If there is still a message to be had, we will + * try to select it (may not succeed because of size + * and options). In any case, we deliver those + * results back to the user. + * + * We also move the port's linkage to the tail of the + * list for this set (fairness). Future versions will + * sort by timestamp or priority. + */ + if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) { + imq_unlock(port_mq); + continue; + } + queue_remove(q, wql, wait_queue_link_t, wql_sublinks); + queue_enter(q, wql, wait_queue_link_t, wql_sublinks); + imq_unlock(mqueue); + + ipc_mqueue_select(port_mq, option, max_size); + imq_unlock(port_mq); + splx(s); + return; + + } + + } else { + + /* + * Receive on a single port. Just try to get the messages. + */ + kmsgs = &mqueue->imq_messages; + if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) { + ipc_mqueue_select(mqueue, option, max_size); + imq_unlock(mqueue); + splx(s); + return; + } + } + + /* + * Looks like we'll have to block. The mqueue we will + * block on (whether the set's or the local port's) is + * still locked. + */ + self = current_thread(); + if (option & MACH_RCV_TIMEOUT) { + if (timeout == 0) { + imq_unlock(mqueue); + splx(s); + self->ith_state = MACH_RCV_TIMED_OUT; + return; + } + } + + self->ith_state = MACH_RCV_IN_PROGRESS; + self->ith_option = option; + self->ith_msize = max_size; + + wait_queue_assert_wait_locked(&mqueue->imq_wait_queue, + IPC_MQUEUE_RECEIVE, + interruptible, + TRUE); /* unlock? */ + /* mqueue/waitq is unlocked */ + splx(s); + + if (option & MACH_RCV_TIMEOUT) { + thread_set_timer(timeout, 1000*NSEC_PER_USEC); + } + + if (interruptible == THREAD_ABORTSAFE) { + counter(c_ipc_mqueue_receive_block_user++); + } else { + counter(c_ipc_mqueue_receive_block_kernel++); + } + +#if defined (__i386__) + thread_block((void (*)(void))0); +#else + if (self->ith_continuation) { + thread_block(ipc_mqueue_receive_continue); + } else { + thread_block((void (*)(void))0); + } +#endif + + ipc_mqueue_receive_results(); /* if we fell thru */ +} + + +/* + * Routine: ipc_mqueue_select + * Purpose: + * A receiver discovered that there was a message on the queue + * before he had to block. Pick the message off the queue and + * "post" it to himself. + * Conditions: + * mqueue locked. + * There is a message. + * Returns: + * MACH_MSG_SUCCESS Actually selected a message for ourselves. + * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large + */ +void +ipc_mqueue_select( + ipc_mqueue_t mqueue, + mach_msg_option_t option, + mach_msg_size_t max_size) +{ + thread_t self = current_thread(); + ipc_kmsg_t kmsg; + mach_port_seqno_t seqno; + mach_msg_return_t mr; + + mr = MACH_MSG_SUCCESS; + + + /* + * Do some sanity checking of our ability to receive + * before pulling the message off the queue. + */ + kmsg = ipc_kmsg_queue_first(&mqueue->imq_messages); + + assert(kmsg != IKM_NULL); + + if (kmsg->ikm_header.msgh_size + + REQUESTED_TRAILER_SIZE(option) > max_size) { + mr = MACH_RCV_TOO_LARGE; + } + + /* + * If we really can't receive it, but we had the + * MACH_RCV_LARGE option set, then don't take it off + * the queue, instead return the appropriate error + * (and size needed). + */ + if ((mr == MACH_RCV_TOO_LARGE) && (option & MACH_RCV_LARGE)) { + self->ith_kmsg = IKM_NULL; + self->ith_msize = kmsg->ikm_header.msgh_size; + self->ith_seqno = 0; + self->ith_state = mr; + return; + } + + ipc_kmsg_rmqueue_first_macro(&mqueue->imq_messages, kmsg); + ipc_mqueue_release_msgcount(mqueue); + self->ith_seqno = mqueue->imq_seqno++; + self->ith_kmsg = kmsg; + self->ith_state = mr; + + current_task()->messages_received++; + return; +} + +/* + * Routine: ipc_mqueue_destroy + * Purpose: + * Destroy a message queue. Set any blocked senders running. + * Destroy the kmsgs in the queue. + * Conditions: + * Nothing locked. + * Receivers were removed when the receive right was "changed" + */ +void +ipc_mqueue_destroy( + ipc_mqueue_t mqueue) +{ + ipc_kmsg_queue_t kmqueue; + ipc_kmsg_t kmsg; + spl_t s; + + + s = splsched(); + imq_lock(mqueue); + /* + * rouse all blocked senders + */ + mqueue->imq_fullwaiters = FALSE; + wait_queue_wakeup_all_locked(&mqueue->imq_wait_queue, + IPC_MQUEUE_FULL, + THREAD_AWAKENED, + FALSE); + + kmqueue = &mqueue->imq_messages; + + while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) { + imq_unlock(mqueue); + splx(s); + + ipc_kmsg_destroy_dest(kmsg); + + s = splsched(); + imq_lock(mqueue); + } + imq_unlock(mqueue); + splx(s); +} + +/* + * Routine: ipc_mqueue_set_qlimit + * Purpose: + * Changes a message queue limit; the maximum number + * of messages which may be queued. + * Conditions: + * Nothing locked. + */ + +void +ipc_mqueue_set_qlimit( + ipc_mqueue_t mqueue, + mach_port_msgcount_t qlimit) +{ + spl_t s; + + /* wake up senders allowed by the new qlimit */ + s = splsched(); + imq_lock(mqueue); + if (qlimit > mqueue->imq_qlimit) { + mach_port_msgcount_t i, wakeup; + + /* caution: wakeup, qlimit are unsigned */ + wakeup = qlimit - mqueue->imq_qlimit; + + for (i = 0; i < wakeup; i++) { + if (wait_queue_wakeup_one_locked(&mqueue->imq_wait_queue, + IPC_MQUEUE_FULL, + THREAD_AWAKENED, + FALSE) == KERN_NOT_WAITING) { + mqueue->imq_fullwaiters = FALSE; + break; + } + } + } + mqueue->imq_qlimit = qlimit; + imq_unlock(mqueue); + splx(s); +} + +/* + * Routine: ipc_mqueue_set_seqno + * Purpose: + * Changes an mqueue's sequence number. + * Conditions: + * Caller holds a reference to the queue's containing object. + */ +void +ipc_mqueue_set_seqno( + ipc_mqueue_t mqueue, + mach_port_seqno_t seqno) +{ + spl_t s; + + s = splsched(); + imq_lock(mqueue); + mqueue->imq_seqno = seqno; + imq_unlock(mqueue); + splx(s); +} + + +/* + * Routine: ipc_mqueue_copyin + * Purpose: + * Convert a name in a space to a message queue. + * Conditions: + * Nothing locked. If successful, the caller gets a ref for + * for the object. This ref ensures the continued existence of + * the queue. + * Returns: + * MACH_MSG_SUCCESS Found a message queue. + * MACH_RCV_INVALID_NAME The space is dead. + * MACH_RCV_INVALID_NAME The name doesn't denote a right. + * MACH_RCV_INVALID_NAME + * The denoted right is not receive or port set. + * MACH_RCV_IN_SET Receive right is a member of a set. + */ + +mach_msg_return_t +ipc_mqueue_copyin( + ipc_space_t space, + mach_port_name_t name, + ipc_mqueue_t *mqueuep, + ipc_object_t *objectp) +{ + ipc_entry_t entry; + ipc_object_t object; + ipc_mqueue_t mqueue; + + is_read_lock(space); + if (!space->is_active) { + is_read_unlock(space); + return MACH_RCV_INVALID_NAME; + } + + entry = ipc_entry_lookup(space, name); + if (entry == IE_NULL) { + is_read_unlock(space); + return MACH_RCV_INVALID_NAME; + } + + object = entry->ie_object; + + if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) { + ipc_port_t port; + ipc_pset_t pset; + + port = (ipc_port_t) object; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + is_read_unlock(space); + mqueue = &port->ip_messages; + + } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) { + ipc_pset_t pset; + + pset = (ipc_pset_t) object; + assert(pset != IPS_NULL); + + ips_lock(pset); + assert(ips_active(pset)); + assert(pset->ips_local_name == name); + is_read_unlock(space); + + mqueue = &pset->ips_messages; + } else { + is_read_unlock(space); + return MACH_RCV_INVALID_NAME; + } + + /* + * At this point, the object is locked and active, + * the space is unlocked, and mqueue is initialized. + */ + + io_reference(object); + io_unlock(object); + + *objectp = object; + *mqueuep = mqueue; + return MACH_MSG_SUCCESS; +} + diff --git a/osfmk/ipc/ipc_mqueue.h b/osfmk/ipc/ipc_mqueue.h new file mode 100644 index 000000000..85e0b1d92 --- /dev/null +++ b/osfmk/ipc/ipc_mqueue.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_mqueue.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for message queues. + */ + +#ifndef _IPC_IPC_MQUEUE_H_ +#define _IPC_IPC_MQUEUE_H_ + +#include + +#include + +#include +#include +#include + +#include +#include +#include + +typedef struct ipc_mqueue { + union { + struct { + struct wait_queue wait_queue; + struct ipc_kmsg_queue messages; + mach_port_msgcount_t msgcount; + mach_port_msgcount_t qlimit; + mach_port_seqno_t seqno; + boolean_t fullwaiters; + } port; + struct wait_queue_sub set_queue; + } data; +} *ipc_mqueue_t; + +#define IMQ_NULL ((ipc_mqueue_t) 0) + +#define imq_wait_queue data.port.wait_queue +#define imq_messages data.port.messages +#define imq_msgcount data.port.msgcount +#define imq_qlimit data.port.qlimit +#define imq_seqno data.port.seqno +#define imq_fullwaiters data.port.fullwaiters + +#define imq_set_queue data.set_queue +#define imq_setlinks data.set_queue.wqs_sublinks +#define imq_is_set(mq) wait_queue_is_sub(&(mq)->imq_set_queue) + +#define imq_lock(mq) wait_queue_lock(&(mq)->imq_wait_queue) +#define imq_lock_try(mq) wait_queue_lock_try(&(mq)->imq_wait_queue) +#define imq_unlock(mq) wait_queue_unlock(&(mq)->imq_wait_queue) +#define imq_held(mq) wait_queue_held(&(mq)->imq_wait_queue) + +#define imq_full(mq) ((mq)->imq_msgcount >= (mq)->imq_qlimit) + +extern int ipc_mqueue_full; +extern int ipc_mqueue_rcv; + +#define IPC_MQUEUE_FULL (event_t)&ipc_mqueue_full +#define IPC_MQUEUE_RECEIVE (event_t)&ipc_mqueue_rcv + +/* + * Exported interfaces + */ + +/* Initialize a newly-allocated message queue */ +extern void ipc_mqueue_init( + ipc_mqueue_t mqueue, + boolean_t is_set); + +/* Move messages from one queue to another */ +extern void ipc_mqueue_move( + ipc_mqueue_t dest, + ipc_mqueue_t source, + ipc_port_t port); + +/* Wake up receivers waiting in a message queue */ +extern void ipc_mqueue_changed( + ipc_mqueue_t mqueue); + +/* Send a message to a port */ +extern mach_msg_return_t ipc_mqueue_send( + ipc_mqueue_t mqueue, + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_msg_timeout_t timeout); + +/* Deliver message to message queue or waiting receiver */ +extern void ipc_mqueue_post( + ipc_mqueue_t mqueue, + ipc_kmsg_t kmsg); + +/* Receive a message from a message queue */ +extern void ipc_mqueue_receive( + ipc_mqueue_t mqueue, + mach_msg_option_t option, + mach_msg_size_t max_size, + mach_msg_timeout_t timeout, + int interruptible); + +/* Continuation routine for message receive */ +extern void ipc_mqueue_receive_continue(void); + +/* Select a message from a queue and try to post it to ourself */ +extern void ipc_mqueue_select( + ipc_mqueue_t mqueue, + mach_msg_option_t option, + mach_msg_size_t max_size); + +/* Clear a message count reservation */ +extern void ipc_mqueue_release_msgcount( + ipc_mqueue_t mqueue); + +/* Change a queue limit */ +extern void ipc_mqueue_set_qlimit( + ipc_mqueue_t mqueue, + mach_port_msgcount_t qlimit); + +/* Change a queue's sequence number */ +extern void ipc_mqueue_set_seqno( + ipc_mqueue_t mqueue, + mach_port_seqno_t seqno); + +/* Convert a name in a space to a message queue */ +extern mach_msg_return_t ipc_mqueue_copyin( + ipc_space_t space, + mach_port_name_t name, + ipc_mqueue_t *mqueuep, + ipc_object_t *objectp); + +#endif /* _IPC_IPC_MQUEUE_H_ */ diff --git a/osfmk/ipc/ipc_notify.c b/osfmk/ipc/ipc_notify.c new file mode 100644 index 000000000..4b311ded0 --- /dev/null +++ b/osfmk/ipc/ipc_notify.c @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_notify.c + * Author: Rich Draves + * Date: 1989 + * + * Notification-sending functions. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Forward declarations + */ +void ipc_notify_init_port_deleted( + mach_port_deleted_notification_t *n); + +void ipc_notify_init_port_destroyed( + mach_port_destroyed_notification_t *n); + +void ipc_notify_init_no_senders( + mach_no_senders_notification_t *n); + +void ipc_notify_init_send_once( + mach_send_once_notification_t *n); + +void ipc_notify_init_dead_name( + mach_dead_name_notification_t *n); + +mach_port_deleted_notification_t ipc_notify_port_deleted_template; +mach_port_destroyed_notification_t ipc_notify_port_destroyed_template; +mach_no_senders_notification_t ipc_notify_no_senders_template; +mach_send_once_notification_t ipc_notify_send_once_template; +mach_dead_name_notification_t ipc_notify_dead_name_template; + +/* + * Routine: ipc_notify_init_port_deleted + * Purpose: + * Initialize a template for port-deleted notifications. + */ + +void +ipc_notify_init_port_deleted( + mach_port_deleted_notification_t *n) +{ + mach_msg_header_t *m = &n->not_header; + + m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); + m->msgh_local_port = MACH_PORT_NULL; + m->msgh_remote_port = MACH_PORT_NULL; + m->msgh_id = MACH_NOTIFY_PORT_DELETED; + m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); + + n->not_port = MACH_PORT_NULL; + n->NDR = NDR_record; + n->trailer.msgh_seqno = 0; + n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; + n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; +} + +/* + * Routine: ipc_notify_init_port_destroyed + * Purpose: + * Initialize a template for port-destroyed notifications. + */ + +void +ipc_notify_init_port_destroyed( + mach_port_destroyed_notification_t *n) +{ + mach_msg_header_t *m = &n->not_header; + + m->msgh_bits = MACH_MSGH_BITS_COMPLEX | + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); + m->msgh_local_port = MACH_PORT_NULL; + m->msgh_remote_port = MACH_PORT_NULL; + m->msgh_id = MACH_NOTIFY_PORT_DESTROYED; + m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); + + n->not_body.msgh_descriptor_count = 1; + n->not_port.disposition = MACH_MSG_TYPE_PORT_RECEIVE; + n->not_port.name = MACH_PORT_NULL; + n->not_port.type = MACH_MSG_PORT_DESCRIPTOR; + n->trailer.msgh_seqno = 0; + n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; + n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; +} + +/* + * Routine: ipc_notify_init_no_senders + * Purpose: + * Initialize a template for no-senders notifications. + */ + +void +ipc_notify_init_no_senders( + mach_no_senders_notification_t *n) +{ + mach_msg_header_t *m = &n->not_header; + + m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); + m->msgh_local_port = MACH_PORT_NULL; + m->msgh_remote_port = MACH_PORT_NULL; + m->msgh_id = MACH_NOTIFY_NO_SENDERS; + m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); + + n->NDR = NDR_record; + n->trailer.msgh_seqno = 0; + n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; + n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; + n->not_count = 0; +} + +/* + * Routine: ipc_notify_init_send_once + * Purpose: + * Initialize a template for send-once notifications. + */ + +void +ipc_notify_init_send_once( + mach_send_once_notification_t *n) +{ + mach_msg_header_t *m = &n->not_header; + + m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); + m->msgh_local_port = MACH_PORT_NULL; + m->msgh_remote_port = MACH_PORT_NULL; + m->msgh_id = MACH_NOTIFY_SEND_ONCE; + m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); + n->trailer.msgh_seqno = 0; + n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; + n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; +} + +/* + * Routine: ipc_notify_init_dead_name + * Purpose: + * Initialize a template for dead-name notifications. + */ + +void +ipc_notify_init_dead_name( + mach_dead_name_notification_t *n) +{ + mach_msg_header_t *m = &n->not_header; + + m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0); + m->msgh_local_port = MACH_PORT_NULL; + m->msgh_remote_port = MACH_PORT_NULL; + m->msgh_id = MACH_NOTIFY_DEAD_NAME; + m->msgh_size = ((int)sizeof *n) - sizeof(mach_msg_format_0_trailer_t); + + n->not_port = MACH_PORT_NULL; + n->NDR = NDR_record; + n->trailer.msgh_seqno = 0; + n->trailer.msgh_sender = KERNEL_SECURITY_TOKEN; + n->trailer.msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + n->trailer.msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; +} + +/* + * Routine: ipc_notify_init + * Purpose: + * Initialize the notification subsystem. + */ + +void +ipc_notify_init(void) +{ + ipc_notify_init_port_deleted(&ipc_notify_port_deleted_template); + ipc_notify_init_port_destroyed(&ipc_notify_port_destroyed_template); + ipc_notify_init_no_senders(&ipc_notify_no_senders_template); + ipc_notify_init_send_once(&ipc_notify_send_once_template); + ipc_notify_init_dead_name(&ipc_notify_dead_name_template); +} + +/* + * Routine: ipc_notify_port_deleted + * Purpose: + * Send a port-deleted notification. + * Conditions: + * Nothing locked. + * Consumes a ref/soright for port. + */ + +void +ipc_notify_port_deleted( + ipc_port_t port, + mach_port_name_t name) +{ + ipc_kmsg_t kmsg; + mach_port_deleted_notification_t *n; + + kmsg = ipc_kmsg_alloc(sizeof *n); + if (kmsg == IKM_NULL) { + printf("dropped port-deleted (0x%08x, 0x%x)\n", port, name); + ipc_port_release_sonce(port); + return; + } + + n = (mach_port_deleted_notification_t *) &kmsg->ikm_header; + *n = ipc_notify_port_deleted_template; + + n->not_header.msgh_remote_port = port; + n->not_port = name; + + ipc_kmsg_send_always(kmsg); +} + +/* + * Routine: ipc_notify_port_destroyed + * Purpose: + * Send a port-destroyed notification. + * Conditions: + * Nothing locked. + * Consumes a ref/soright for port. + * Consumes a ref for right, which should be a receive right + * prepped for placement into a message. (In-transit, + * or in-limbo if a circularity was detected.) + */ + +void +ipc_notify_port_destroyed( + ipc_port_t port, + ipc_port_t right) +{ + ipc_kmsg_t kmsg; + mach_port_destroyed_notification_t *n; + + kmsg = ipc_kmsg_alloc(sizeof *n); + if (kmsg == IKM_NULL) { + printf("dropped port-destroyed (0x%08x, 0x%08x)\n", + port, right); + ipc_port_release_sonce(port); + ipc_port_release_receive(right); + return; + } + + n = (mach_port_destroyed_notification_t *) &kmsg->ikm_header; + *n = ipc_notify_port_destroyed_template; + + n->not_header.msgh_remote_port = port; + n->not_port.name = right; + + ipc_kmsg_send_always(kmsg); +} + +/* + * Routine: ipc_notify_no_senders + * Purpose: + * Send a no-senders notification. + * Conditions: + * Nothing locked. + * Consumes a ref/soright for port. + */ + +void +ipc_notify_no_senders( + ipc_port_t port, + mach_port_mscount_t mscount) +{ + ipc_kmsg_t kmsg; + mach_no_senders_notification_t *n; + + kmsg = ipc_kmsg_alloc(sizeof *n); + if (kmsg == IKM_NULL) { + printf("dropped no-senders (0x%08x, %u)\n", port, mscount); + ipc_port_release_sonce(port); + return; + } + + n = (mach_no_senders_notification_t *) &kmsg->ikm_header; + *n = ipc_notify_no_senders_template; + + n->not_header.msgh_remote_port = port; + n->not_count = mscount; + + ipc_kmsg_send_always(kmsg); +} + +/* + * Routine: ipc_notify_send_once + * Purpose: + * Send a send-once notification. + * Conditions: + * Nothing locked. + * Consumes a ref/soright for port. + */ + +void +ipc_notify_send_once( + ipc_port_t port) +{ + ipc_kmsg_t kmsg; + mach_send_once_notification_t *n; + + kmsg = ipc_kmsg_alloc(sizeof *n); + if (kmsg == IKM_NULL) { + printf("dropped send-once (0x%08x)\n", port); + ipc_port_release_sonce(port); + return; + } + + n = (mach_send_once_notification_t *) &kmsg->ikm_header; + *n = ipc_notify_send_once_template; + + n->not_header.msgh_remote_port = port; + + ipc_kmsg_send_always(kmsg); +} + +/* + * Routine: ipc_notify_dead_name + * Purpose: + * Send a dead-name notification. + * Conditions: + * Nothing locked. + * Consumes a ref/soright for port. + */ + +void +ipc_notify_dead_name( + ipc_port_t port, + mach_port_name_t name) +{ + ipc_kmsg_t kmsg; + mach_dead_name_notification_t *n; + + kmsg = ipc_kmsg_alloc(sizeof *n); + if (kmsg == IKM_NULL) { + printf("dropped dead-name (0x%08x, 0x%x)\n", port, name); + ipc_port_release_sonce(port); + return; + } + + n = (mach_dead_name_notification_t *) &kmsg->ikm_header; + *n = ipc_notify_dead_name_template; + + n->not_header.msgh_remote_port = port; + n->not_port = name; + + ipc_kmsg_send_always(kmsg); +} diff --git a/osfmk/ipc/ipc_notify.h b/osfmk/ipc/ipc_notify.h new file mode 100644 index 000000000..8c0bc9eb1 --- /dev/null +++ b/osfmk/ipc/ipc_notify.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_notify.h + * Author: Rich Draves + * Date: 1989 + * + * Declarations of notification-sending functions. + */ + +#ifndef _IPC_IPC_NOTIFY_H_ +#define _IPC_IPC_NOTIFY_H_ + +/* + * Exported interfaces + */ + +/* Initialize the notification subsystem */ +extern void ipc_notify_init(void); + +/* Send a port-deleted notification */ +extern void ipc_notify_port_deleted( + ipc_port_t port, + mach_port_name_t name); + +/* Send a port-destroyed notification */ +extern void ipc_notify_port_destroyed( + ipc_port_t port, + ipc_port_t right); + +/* Send a no-senders notification */ +extern void ipc_notify_no_senders( + ipc_port_t port, + mach_port_mscount_t mscount); + +/* Send a send-once notification */ +extern void ipc_notify_send_once( + ipc_port_t port); + +/* Send a dead-name notification */ +extern void ipc_notify_dead_name( + ipc_port_t port, + mach_port_name_t name); + +#endif /* _IPC_IPC_NOTIFY_H_ */ diff --git a/osfmk/ipc/ipc_object.c b/osfmk/ipc/ipc_object.c new file mode 100644 index 000000000..a20d572b8 --- /dev/null +++ b/osfmk/ipc/ipc_object.c @@ -0,0 +1,1066 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_object.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to manipulate IPC objects. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +zone_t ipc_object_zones[IOT_NUMBER]; + +/* + * Routine: ipc_object_reference + * Purpose: + * Take a reference to an object. + */ + +void +ipc_object_reference( + ipc_object_t object) +{ + io_lock(object); + assert(object->io_references > 0); + io_reference(object); + io_unlock(object); +} + +/* + * Routine: ipc_object_release + * Purpose: + * Release a reference to an object. + */ + +void +ipc_object_release( + ipc_object_t object) +{ + io_lock(object); + assert(object->io_references > 0); + io_release(object); + io_check_unlock(object); +} + +/* + * Routine: ipc_object_translate + * Purpose: + * Look up an object in a space. + * Conditions: + * Nothing locked before. If successful, the object + * is returned locked. The caller doesn't get a ref. + * Returns: + * KERN_SUCCESS Object returned locked. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote the correct right. + */ + +kern_return_t +ipc_object_translate( + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + ipc_object_t *objectp) +{ + ipc_entry_t entry; + ipc_object_t object; + kern_return_t kr; + + kr = ipc_right_lookup_read(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is read-locked and active */ + + if ((entry->ie_bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) { + is_read_unlock(space); + return KERN_INVALID_RIGHT; + } + + object = entry->ie_object; + assert(object != IO_NULL); + + io_lock(object); + is_read_unlock(space); + + *objectp = object; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_object_translate_two + * Purpose: + * Look up two objects in a space. + * Conditions: + * Nothing locked before. If successful, the objects + * are returned locked. The caller doesn't get a ref. + * Returns: + * KERN_SUCCESS Objects returned locked. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME A name doesn't denote a right. + * KERN_INVALID_RIGHT A name doesn't denote the correct right. + */ + +kern_return_t +ipc_object_translate_two( + ipc_space_t space, + mach_port_name_t name1, + mach_port_right_t right1, + ipc_object_t *objectp1, + mach_port_name_t name2, + mach_port_right_t right2, + ipc_object_t *objectp2) +{ + ipc_entry_t entry1; + ipc_entry_t entry2; + ipc_object_t object; + kern_return_t kr; + + kr = ipc_right_lookup_two_read(space, name1, &entry1, name2, &entry2); + if (kr != KERN_SUCCESS) + return kr; + /* space is read-locked and active */ + + if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) { + is_read_unlock(space); + return KERN_INVALID_RIGHT; + } + + if ((entry2->ie_bits & MACH_PORT_TYPE(right2)) == MACH_PORT_TYPE_NONE) { + is_read_unlock(space); + return KERN_INVALID_RIGHT; + } + + object = entry1->ie_object; + assert(object != IO_NULL); + io_lock(object); + *objectp1 = object; + + object = entry2->ie_object; + assert(object != IO_NULL); + io_lock(object); + *objectp2 = object; + + is_read_unlock(space); + return KERN_SUCCESS; +} + +/* + * Routine: ipc_object_alloc_dead + * Purpose: + * Allocate a dead-name entry. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The dead name is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NO_SPACE No room for an entry in the space. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_object_alloc_dead( + ipc_space_t space, + mach_port_name_t *namep) +{ + ipc_entry_t entry; + kern_return_t kr; + + int i; + + + kr = ipc_entry_alloc(space, namep, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked */ + + /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */ + + assert(entry->ie_object == IO_NULL); + entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1; + + is_write_unlock(space); + return KERN_SUCCESS; +} + +/* + * Routine: ipc_object_alloc_dead_name + * Purpose: + * Allocate a dead-name entry, with a specific name. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The dead name is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NAME_EXISTS The name already denotes a right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_object_alloc_dead_name( + ipc_space_t space, + mach_port_name_t name) +{ + ipc_entry_t entry; + kern_return_t kr; + + int i; + + + kr = ipc_entry_alloc_name(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked */ + + if (ipc_right_inuse(space, name, entry)) + return KERN_NAME_EXISTS; + + /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */ + + assert(entry->ie_object == IO_NULL); + entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1; + + is_write_unlock(space); + return KERN_SUCCESS; +} + +/* + * Routine: ipc_object_alloc + * Purpose: + * Allocate an object. + * Conditions: + * Nothing locked. If successful, the object is returned locked. + * The caller doesn't get a reference for the object. + * Returns: + * KERN_SUCCESS The object is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NO_SPACE No room for an entry in the space. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_object_alloc( + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t *namep, + ipc_object_t *objectp) +{ + ipc_object_t object; + ipc_entry_t entry; + kern_return_t kr; + + assert(otype < IOT_NUMBER); + assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type); + assert(type != MACH_PORT_TYPE_NONE); + assert(urefs <= MACH_PORT_UREFS_MAX); + + object = io_alloc(otype); + if (object == IO_NULL) + return KERN_RESOURCE_SHORTAGE; + + if (otype == IOT_PORT) { + ipc_port_t port = (ipc_port_t)object; + + bzero((char *)port, sizeof(*port)); + } else if (otype == IOT_PORT_SET) { + ipc_pset_t pset = (ipc_pset_t)object; + + bzero((char *)pset, sizeof(*pset)); + } + + io_lock_init(object); + *namep = (mach_port_name_t)object; + kr = ipc_entry_alloc(space, namep, &entry); + if (kr != KERN_SUCCESS) { + io_free(otype, object); + return kr; + } + /* space is write-locked */ + + entry->ie_bits |= type | urefs; + entry->ie_object = object; + + io_lock(object); + is_write_unlock(space); + + object->io_references = 1; /* for entry, not caller */ + object->io_bits = io_makebits(TRUE, otype, 0); + + *objectp = object; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_object_alloc_name + * Purpose: + * Allocate an object, with a specific name. + * Conditions: + * Nothing locked. If successful, the object is returned locked. + * The caller doesn't get a reference for the object. + * Returns: + * KERN_SUCCESS The object is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NAME_EXISTS The name already denotes a right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_object_alloc_name( + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t name, + ipc_object_t *objectp) +{ + ipc_object_t object; + ipc_entry_t entry; + kern_return_t kr; + + assert(otype < IOT_NUMBER); + assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type); + assert(type != MACH_PORT_TYPE_NONE); + assert(urefs <= MACH_PORT_UREFS_MAX); + + object = io_alloc(otype); + if (object == IO_NULL) + return KERN_RESOURCE_SHORTAGE; + + if (otype == IOT_PORT) { + ipc_port_t port = (ipc_port_t)object; + + bzero((char *)port, sizeof(*port)); + } else if (otype == IOT_PORT_SET) { + ipc_pset_t pset = (ipc_pset_t)object; + + bzero((char *)pset, sizeof(*pset)); + } + + io_lock_init(object); + kr = ipc_entry_alloc_name(space, name, &entry); + if (kr != KERN_SUCCESS) { + io_free(otype, object); + return kr; + } + /* space is write-locked */ + + if (ipc_right_inuse(space, name, entry)) { + io_free(otype, object); + return KERN_NAME_EXISTS; + } + + entry->ie_bits |= type | urefs; + entry->ie_object = object; + + io_lock(object); + is_write_unlock(space); + + object->io_references = 1; /* for entry, not caller */ + object->io_bits = io_makebits(TRUE, otype, 0); + + *objectp = object; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_object_copyin_type + * Purpose: + * Convert a send type name to a received type name. + */ + +mach_msg_type_name_t +ipc_object_copyin_type( + mach_msg_type_name_t msgt_name) +{ + switch (msgt_name) { + + case MACH_MSG_TYPE_MOVE_RECEIVE: + case MACH_MSG_TYPE_COPY_RECEIVE: + return MACH_MSG_TYPE_PORT_RECEIVE; + + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + case MACH_MSG_TYPE_MAKE_SEND_ONCE: + return MACH_MSG_TYPE_PORT_SEND_ONCE; + + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MAKE_SEND: + case MACH_MSG_TYPE_COPY_SEND: + return MACH_MSG_TYPE_PORT_SEND; + + default: + return MACH_MSG_TYPE_PORT_NONE; + } +} + +/* + * Routine: ipc_object_copyin + * Purpose: + * Copyin a capability from a space. + * If successful, the caller gets a ref + * for the resulting object, unless it is IO_DEAD. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Acquired an object, possibly IO_DEAD. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME Name doesn't exist in space. + * KERN_INVALID_RIGHT Name doesn't denote correct right. + */ + +kern_return_t +ipc_object_copyin( + ipc_space_t space, + mach_port_name_t name, + mach_msg_type_name_t msgt_name, + ipc_object_t *objectp) +{ + ipc_entry_t entry; + ipc_port_t soright; + kern_return_t kr; + + int i; + + /* + * Could first try a read lock when doing + * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND, + * and MACH_MSG_TYPE_MAKE_SEND_ONCE. + */ + + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked and active */ + + kr = ipc_right_copyin(space, name, entry, + msgt_name, TRUE, + objectp, &soright); + if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + ipc_entry_dealloc(space, name, entry); + is_write_unlock(space); + + if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) + ipc_notify_port_deleted(soright, name); + + return kr; +} + +/* + * Routine: ipc_object_copyin_from_kernel + * Purpose: + * Copyin a naked capability from the kernel. + * + * MACH_MSG_TYPE_MOVE_RECEIVE + * The receiver must be ipc_space_kernel. + * Consumes the naked receive right. + * MACH_MSG_TYPE_COPY_SEND + * A naked send right must be supplied. + * The port gains a reference, and a send right + * if the port is still active. + * MACH_MSG_TYPE_MAKE_SEND + * The receiver must be ipc_space_kernel. + * The port gains a reference and a send right. + * MACH_MSG_TYPE_MOVE_SEND + * Consumes a naked send right. + * MACH_MSG_TYPE_MAKE_SEND_ONCE + * The port gains a reference and a send-once right. + * Receiver also be the caller of device subsystem, + * so no assertion. + * MACH_MSG_TYPE_MOVE_SEND_ONCE + * Consumes a naked send-once right. + * Conditions: + * Nothing locked. + */ + +void +ipc_object_copyin_from_kernel( + ipc_object_t object, + mach_msg_type_name_t msgt_name) +{ + assert(IO_VALID(object)); + + switch (msgt_name) { + case MACH_MSG_TYPE_MOVE_RECEIVE: { + ipc_port_t port = (ipc_port_t) object; + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name != MACH_PORT_NULL); + assert(port->ip_receiver == ipc_space_kernel); + + /* relevant part of ipc_port_clear_receiver */ + ipc_port_set_mscount(port, 0); + + port->ip_receiver_name = MACH_PORT_NULL; + port->ip_destination = IP_NULL; + ip_unlock(port); + break; + } + + case MACH_MSG_TYPE_COPY_SEND: { + ipc_port_t port = (ipc_port_t) object; + + ip_lock(port); + if (ip_active(port)) { + assert(port->ip_srights > 0); + port->ip_srights++; + } + ip_reference(port); + ip_unlock(port); + break; + } + + case MACH_MSG_TYPE_MAKE_SEND: { + ipc_port_t port = (ipc_port_t) object; + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name != MACH_PORT_NULL); + assert(port->ip_receiver == ipc_space_kernel); + + ip_reference(port); + port->ip_mscount++; + port->ip_srights++; + ip_unlock(port); + break; + } + + case MACH_MSG_TYPE_MOVE_SEND: + /* move naked send right into the message */ + break; + + case MACH_MSG_TYPE_MAKE_SEND_ONCE: { + ipc_port_t port = (ipc_port_t) object; + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name != MACH_PORT_NULL); + + ip_reference(port); + port->ip_sorights++; + ip_unlock(port); + break; + } + + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + /* move naked send-once right into the message */ + break; + + default: + panic("ipc_object_copyin_from_kernel: strange rights"); + } +} + +/* + * Routine: ipc_object_destroy + * Purpose: + * Destroys a naked capability. + * Consumes a ref for the object. + * + * A receive right should be in limbo or in transit. + * Conditions: + * Nothing locked. + */ + +void +ipc_object_destroy( + ipc_object_t object, + mach_msg_type_name_t msgt_name) +{ + assert(IO_VALID(object)); + assert(io_otype(object) == IOT_PORT); + + switch (msgt_name) { + case MACH_MSG_TYPE_PORT_SEND: + ipc_port_release_send((ipc_port_t) object); + break; + + case MACH_MSG_TYPE_PORT_SEND_ONCE: + ipc_notify_send_once((ipc_port_t) object); + break; + + case MACH_MSG_TYPE_PORT_RECEIVE: + ipc_port_release_receive((ipc_port_t) object); + break; + + default: + panic("ipc_object_destroy: strange rights"); + } +} + +/* + * Routine: ipc_object_copyout + * Purpose: + * Copyout a capability, placing it into a space. + * If successful, consumes a ref for the object. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Copied out object, consumed ref. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_CAPABILITY The object is dead. + * KERN_NO_SPACE No room in space for another right. + * KERN_RESOURCE_SHORTAGE No memory available. + * KERN_UREFS_OVERFLOW Urefs limit exceeded + * and overflow wasn't specified. + */ + +kern_return_t +ipc_object_copyout( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t *namep) +{ + mach_port_name_t name; + ipc_entry_t entry; + kern_return_t kr; + + assert(IO_VALID(object)); + assert(io_otype(object) == IOT_PORT); + + is_write_lock(space); + + for (;;) { + if (!space->is_active) { + is_write_unlock(space); + return KERN_INVALID_TASK; + } + + if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) && + ipc_right_reverse(space, object, &name, &entry)) { + /* object is locked and active */ + + assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE); + break; + } + + name = (mach_port_name_t)object; + kr = ipc_entry_get(space, &name, &entry); + if (kr != KERN_SUCCESS) { + /* unlocks/locks space, so must start again */ + + kr = ipc_entry_grow_table(space, ITS_SIZE_NONE); + if (kr != KERN_SUCCESS) + return kr; /* space is unlocked */ + + continue; + } + + assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE); + assert(entry->ie_object == IO_NULL); + + io_lock(object); + if (!io_active(object)) { + io_unlock(object); + ipc_entry_dealloc(space, name, entry); + is_write_unlock(space); + return KERN_INVALID_CAPABILITY; + } + + entry->ie_object = object; + break; + } + + /* space is write-locked and active, object is locked and active */ + + kr = ipc_right_copyout(space, name, entry, + msgt_name, overflow, object); + /* object is unlocked */ + is_write_unlock(space); + + if (kr == KERN_SUCCESS) + *namep = name; + return kr; +} + +/* + * Routine: ipc_object_copyout_name + * Purpose: + * Copyout a capability, placing it into a space. + * The specified name is used for the capability. + * If successful, consumes a ref for the object. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Copied out object, consumed ref. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_CAPABILITY The object is dead. + * KERN_RESOURCE_SHORTAGE No memory available. + * KERN_UREFS_OVERFLOW Urefs limit exceeded + * and overflow wasn't specified. + * KERN_RIGHT_EXISTS Space has rights under another name. + * KERN_NAME_EXISTS Name is already used. + */ + +kern_return_t +ipc_object_copyout_name( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t name) +{ + mach_port_name_t oname; + ipc_entry_t oentry; + ipc_entry_t entry; + kern_return_t kr; + + int i; + + assert(IO_VALID(object)); + assert(io_otype(object) == IOT_PORT); + + kr = ipc_entry_alloc_name(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked and active */ + + if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) && + ipc_right_reverse(space, object, &oname, &oentry)) { + /* object is locked and active */ + + if (name != oname) { + io_unlock(object); + + if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + ipc_entry_dealloc(space, name, entry); + + is_write_unlock(space); + return KERN_RIGHT_EXISTS; + } + + assert(entry == oentry); + assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE); + } else { + if (ipc_right_inuse(space, name, entry)) + return KERN_NAME_EXISTS; + + assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE); + assert(entry->ie_object == IO_NULL); + + io_lock(object); + if (!io_active(object)) { + io_unlock(object); + ipc_entry_dealloc(space, name, entry); + is_write_unlock(space); + return KERN_INVALID_CAPABILITY; + } + + entry->ie_object = object; + } + + /* space is write-locked and active, object is locked and active */ + + kr = ipc_right_copyout(space, name, entry, + msgt_name, overflow, object); + /* object is unlocked */ + is_write_unlock(space); + return kr; +} + +/* + * Routine: ipc_object_copyout_dest + * Purpose: + * Translates/consumes the destination right of a message. + * This is unlike normal copyout because the right is consumed + * in a funny way instead of being given to the receiving space. + * The receiver gets his name for the port, if he has receive + * rights, otherwise MACH_PORT_NULL. + * Conditions: + * The object is locked and active. Nothing else locked. + * The object is unlocked and loses a reference. + */ + +void +ipc_object_copyout_dest( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep) +{ + mach_port_name_t name; + + assert(IO_VALID(object)); + assert(io_active(object)); + + io_release(object); + + /* + * If the space is the receiver/owner of the object, + * then we quietly consume the right and return + * the space's name for the object. Otherwise + * we destroy the right and return MACH_PORT_NULL. + */ + + switch (msgt_name) { + case MACH_MSG_TYPE_PORT_SEND: { + ipc_port_t port = (ipc_port_t) object; + ipc_port_t nsrequest = IP_NULL; + mach_port_mscount_t mscount; + + if (port->ip_receiver == space) + name = port->ip_receiver_name; + else + name = MACH_PORT_NULL; + + assert(port->ip_srights > 0); + if (--port->ip_srights == 0 && + port->ip_nsrequest != IP_NULL) { + nsrequest = port->ip_nsrequest; + port->ip_nsrequest = IP_NULL; + mscount = port->ip_mscount; + ip_unlock(port); + ipc_notify_no_senders(nsrequest, mscount); + } else + ip_unlock(port); + break; + } + + case MACH_MSG_TYPE_PORT_SEND_ONCE: { + ipc_port_t port = (ipc_port_t) object; + + assert(port->ip_sorights > 0); + + if (port->ip_receiver == space) { + /* quietly consume the send-once right */ + + port->ip_sorights--; + name = port->ip_receiver_name; + ip_unlock(port); + } else { + /* + * A very bizarre case. The message + * was received, but before this copyout + * happened the space lost receive rights. + * We can't quietly consume the soright + * out from underneath some other task, + * so generate a send-once notification. + */ + + ip_reference(port); /* restore ref */ + ip_unlock(port); + + ipc_notify_send_once(port); + name = MACH_PORT_NULL; + } + + break; + } + + default: + panic("ipc_object_copyout_dest: strange rights"); + } + + *namep = name; +} + +/* + * Routine: ipc_object_rename + * Purpose: + * Rename an entry in a space. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Renamed the entry. + * KERN_INVALID_TASK The space was dead. + * KERN_INVALID_NAME oname didn't denote an entry. + * KERN_NAME_EXISTS nname already denoted an entry. + * KERN_RESOURCE_SHORTAGE Couldn't allocate new entry. + */ + +kern_return_t +ipc_object_rename( + ipc_space_t space, + mach_port_name_t oname, + mach_port_name_t nname) +{ + ipc_entry_t oentry, nentry; + kern_return_t kr; + + int i; + + kr = ipc_entry_alloc_name(space, nname, &nentry); + if (kr != KERN_SUCCESS) + return kr; + + /* space is write-locked and active */ + + if (ipc_right_inuse(space, nname, nentry)) { + /* space is unlocked */ + return KERN_NAME_EXISTS; + } + + /* don't let ipc_entry_lookup see the uninitialized new entry */ + + if ((oname == nname) || + ((oentry = ipc_entry_lookup(space, oname)) == IE_NULL)) { + ipc_entry_dealloc(space, nname, nentry); + is_write_unlock(space); + return KERN_INVALID_NAME; + } + + kr = ipc_right_rename(space, oname, oentry, nname, nentry); + /* space is unlocked */ + return kr; +} + +#if MACH_ASSERT +/* + * Check whether the object is a port if so, free it. But + * keep track of that fact. + */ +void +io_free( + unsigned int otype, + ipc_object_t object) +{ + ipc_port_t port; + + if (otype == IOT_PORT) { + port = (ipc_port_t) object; +#if MACH_ASSERT + ipc_port_track_dealloc(port); +#endif /* MACH_ASSERT */ + } + zfree(ipc_object_zones[otype], (vm_offset_t) object); +} +#endif /* MACH_ASSERT */ + +#include +#if MACH_KDB + +#include + +#define printf kdbprintf + +/* + * Routine: ipc_object_print + * Purpose: + * Pretty-print an object for kdb. + */ + +char *ikot_print_array[IKOT_MAX_TYPE] = { + "(NONE) ", + "(THREAD) ", + "(TASK) ", + "(HOST) ", + "(HOST_PRIV) ", + "(PROCESSOR) ", + "(PSET) ", + "(PSET_NAME) ", + "(TIMER) ", + "(PAGER_REQUEST) ", + "(DEVICE) ", /* 10 */ + "(XMM_OBJECT) ", + "(XMM_PAGER) ", + "(XMM_KERNEL) ", + "(XMM_REPLY) ", + "(NOTDEF 15) ", + "(NOTDEF 16) ", + "(HOST_SECURITY) ", + "(LEDGER) ", + "(MASTER_DEVICE) ", + "(ACTIVATION) ", /* 20 */ + "(SUBSYSTEM) ", + "(IO_DONE_QUEUE) ", + "(SEMAPHORE) ", + "(LOCK_SET) ", + "(CLOCK) ", + "(CLOCK_CTRL) ", /* 26 */ + "(IOKIT_SPARE) ", /* 27 */ + "(NAMED_MEM_ENTRY) ", /* 28 */ + "(IOKIT_CONNECT) ", + "(IOKIT_OBJECT) ", /* 30 */ + "(UPL) ", + /* << new entries here */ + "(UNKNOWN) " /* magic catchall */ +}; +/* Please keep in sync with kern/ipc_kobject.h */ + +void +ipc_object_print( + ipc_object_t object) +{ + int kotype; + + iprintf("%s", io_active(object) ? "active" : "dead"); + printf(", refs=%d", object->io_references); + printf(", otype=%d", io_otype(object)); + kotype = io_kotype(object); + if (kotype >= 0 && kotype < IKOT_MAX_TYPE) + printf(", kotype=%d %s\n", io_kotype(object), + ikot_print_array[kotype]); + else + printf(", kotype=0x%x %s\n", io_kotype(object), + ikot_print_array[IKOT_UNKNOWN]); +} + +#endif /* MACH_KDB */ diff --git a/osfmk/ipc/ipc_object.h b/osfmk/ipc/ipc_object.h new file mode 100644 index 000000000..67a0e4910 --- /dev/null +++ b/osfmk/ipc/ipc_object.h @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_object.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for IPC objects, for which tasks have capabilities. + */ + +#ifndef _IPC_IPC_OBJECT_H_ +#define _IPC_IPC_OBJECT_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +typedef natural_t ipc_object_refs_t; /* for ipc/ipc_object.h */ +typedef natural_t ipc_object_bits_t; +typedef natural_t ipc_object_type_t; + +/* + * There is no lock in the ipc_object; it is in the enclosing kernel + * data structure (rpc_common_data) used by both ipc_port and ipc_pset. + * The ipc_object is used to both tag and reference count these two data + * structures, and (Noto Bene!) pointers to either of these or the + * ipc_object at the head of these are freely cast back and forth; hence + * the ipc_object MUST BE FIRST in the ipc_common_data. + * + * If the RPC implementation enabled user-mode code to use kernel-level + * data structures (as ours used to), this peculiar structuring would + * avoid having anything in user code depend on the kernel configuration + * (with which lock size varies). + */ +struct ipc_object { + ipc_object_refs_t io_references; + ipc_object_bits_t io_bits; + port_name_t io_receiver_name; + struct thread_pool io_thread_pool; +#if NCPUS == 1 + usimple_lock_data_t io_lock_data; +#else + decl_mutex_data(, io_lock_data) +#endif +}; + +/* + * Legacy defines. Should use IPC_OBJECT_NULL, etc... + */ +#define IO_NULL ((ipc_object_t) 0) +#define IO_DEAD ((ipc_object_t) -1) +#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD)) + +/* + * IPC steals the high-order bits from the kotype to use + * for its own purposes. This allows IPC to record facts + * about ports that aren't otherwise obvious from the + * existing port fields. In particular, IPC can optionally + * mark a port for no more senders detection. Any change + * to IO_BITS_PORT_INFO must be coordinated with bitfield + * definitions in ipc_port.h. + */ +#define IO_BITS_PORT_INFO 0x0000f000 /* stupid port tricks */ +#define IO_BITS_KOTYPE 0x00000fff /* used by the object */ +#define IO_BITS_OTYPE 0x7fff0000 /* determines a zone */ +#define IO_BITS_ACTIVE 0x80000000 /* is object alive? */ + +#define io_active(io) ((io)->io_bits & IO_BITS_ACTIVE) + +#define io_otype(io) (((io)->io_bits & IO_BITS_OTYPE) >> 16) +#define io_kotype(io) ((io)->io_bits & IO_BITS_KOTYPE) + +#define io_makebits(active, otype, kotype) \ + (((active) ? IO_BITS_ACTIVE : 0) | ((otype) << 16) | (kotype)) + +/* + * Object types: ports, port sets, kernel-loaded ports + */ +#define IOT_PORT 0 +#define IOT_PORT_SET 1 +#define IOT_NUMBER 2 /* number of types used */ + +extern zone_t ipc_object_zones[IOT_NUMBER]; + +#define io_alloc(otype) \ + ((ipc_object_t) zalloc(ipc_object_zones[(otype)])) + +#if MACH_ASSERT +/* + * Call the routine for io_free so that checking can be performed. + */ +extern void io_free( + unsigned int otype, + ipc_object_t object); + +#else /* MACH_ASSERT */ +#define io_free(otype, io) \ + zfree(ipc_object_zones[(otype)], (vm_offset_t) (io)) +#endif /* MACH_ASSERT */ + +/* + * Here we depend on the ipc_object being first within the ipc_common_data, + * which is first within the rpc_common_data, which in turn must be first + * within any kernel data structure needing to lock an ipc_object + * (ipc_port and ipc_pset). + */ +#if NCPUS == 1 + +#define io_lock_init(io) \ + usimple_lock_init(&(io)-io_lock_data, ETAP_IPC_OBJECT) +#define io_lock(io) \ + usimple_lock(&(io)->io_lock_data) +#define io_lock_try(io) \ + usimple_lock_try(&(io)->io_lock_data) +#define io_unlock(io) \ + usimple_unlock(&(io)->io_lock_data) + +#else /* NCPUS == 1 */ + +#define io_lock_init(io) \ + mutex_init(&(io)->io_lock_data, ETAP_IPC_OBJECT) +#define io_lock(io) \ + mutex_lock(&(io)->io_lock_data) +#define io_lock_try(io) \ + mutex_try(&(io)->io_lock_data) +#define io_unlock(io) \ + mutex_unlock(&(io)->io_lock_data) + +#endif /* NCPUS == 1 */ + +#if NCPUS > 1 +#define _VOLATILE_ volatile +#else /* NCPUS > 1 */ +#define _VOLATILE_ +#endif /* NCPUS > 1 */ + +#define io_check_unlock(io) \ +MACRO_BEGIN \ + _VOLATILE_ ipc_object_refs_t _refs = (io)->io_references; \ + \ + io_unlock(io); \ + if (_refs == 0) \ + io_free(io_otype(io), io); \ +MACRO_END + +/* Sanity check the ref count. If it is 0, we may be doubly zfreeing. + * If it is larger than max int, it has been corrupted, probably by being + * modified into an address (this is architecture dependent, but it's + * safe to assume there cannot really be max int references). + * + * NOTE: The 0 test alone will not catch double zfreeing of ipc_port + * structs, because the io_references field is the first word of the struct, + * and zfree modifies that to point to the next free zone element. + */ +#define IO_MAX_REFERENCES \ + (unsigned)(~0 ^ (1 << (sizeof(int)*BYTE_SIZE - 1))) + +#define io_reference(io) \ +MACRO_BEGIN \ + assert((io)->io_references < IO_MAX_REFERENCES); \ + (io)->io_references++; \ +MACRO_END + +#define io_release(io) \ +MACRO_BEGIN \ + assert((io)->io_references > 0 && \ + (io)->io_references <= IO_MAX_REFERENCES); \ + (io)->io_references--; \ +MACRO_END + +/* + * Exported interfaces + */ + +/* Take a reference to an object */ +extern void ipc_object_reference( + ipc_object_t object); + +/* Release a reference to an object */ +extern void ipc_object_release( + ipc_object_t object); + +/* Look up an object in a space */ +extern kern_return_t ipc_object_translate( + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + ipc_object_t *objectp); + +/* Look up two objects in a space, locking them in the order described */ +extern kern_return_t ipc_object_translate_two( + ipc_space_t space, + mach_port_name_t name1, + mach_port_right_t right1, + ipc_object_t *objectp1, + mach_port_name_t name2, + mach_port_right_t right2, + ipc_object_t *objectp2); + +/* Allocate a dead-name entry */ +extern kern_return_t +ipc_object_alloc_dead( + ipc_space_t space, + mach_port_name_t *namep); + +/* Allocate a dead-name entry, with a specific name */ +extern kern_return_t ipc_object_alloc_dead_name( + ipc_space_t space, + mach_port_name_t name); + +/* Allocate an object */ +extern kern_return_t ipc_object_alloc( + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t *namep, + ipc_object_t *objectp); + +/* Allocate an object, with a specific name */ +extern kern_return_t ipc_object_alloc_name( + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t name, + ipc_object_t *objectp); + +/* Convert a send type name to a received type name */ +extern mach_msg_type_name_t ipc_object_copyin_type( + mach_msg_type_name_t msgt_name); + +/* Copyin a capability from a space */ +extern kern_return_t ipc_object_copyin( + ipc_space_t space, + mach_port_name_t name, + mach_msg_type_name_t msgt_name, + ipc_object_t *objectp); + +/* Copyin a naked capability from the kernel */ +extern void ipc_object_copyin_from_kernel( + ipc_object_t object, + mach_msg_type_name_t msgt_name); + +/* Destroy a naked capability */ +extern void ipc_object_destroy( + ipc_object_t object, + mach_msg_type_name_t msgt_name); + +/* Copyout a capability, placing it into a space */ +extern kern_return_t ipc_object_copyout( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t *namep); + +/* Copyout a capability with a name, placing it into a space */ +extern kern_return_t ipc_object_copyout_name( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t name); + +/* Translate/consume the destination right of a message */ +extern void ipc_object_copyout_dest( + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep); + +/* Rename an entry in a space */ +extern kern_return_t ipc_object_rename( + ipc_space_t space, + mach_port_name_t oname, + mach_port_name_t nname); + +#if MACH_KDB +/* Pretty-print an ipc object */ + +extern void ipc_object_print( + ipc_object_t object); + +#endif /* MACH_KDB */ + +#endif /* _IPC_IPC_OBJECT_H_ */ diff --git a/osfmk/ipc/ipc_port.c b/osfmk/ipc/ipc_port.c new file mode 100644 index 000000000..7cd8a39f7 --- /dev/null +++ b/osfmk/ipc/ipc_port.c @@ -0,0 +1,1931 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_port.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to manipulate IPC ports. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_KDB +#include +#include +#include +#endif /* MACH_KDB */ + +#include + +decl_mutex_data(, ipc_port_multiple_lock_data) +decl_mutex_data(, ipc_port_timestamp_lock_data) +ipc_port_timestamp_t ipc_port_timestamp_data; + +#if MACH_ASSERT +void ipc_port_init_debug( + ipc_port_t port); +#endif /* MACH_ASSERT */ + +#if MACH_KDB && ZONE_DEBUG +/* Forwards */ +void print_type_ports(unsigned, unsigned); +void print_ports(void); +#endif /* MACH_KDB && ZONE_DEBUG */ + +/* + * Routine: ipc_port_timestamp + * Purpose: + * Retrieve a timestamp value. + */ + +ipc_port_timestamp_t +ipc_port_timestamp(void) +{ + ipc_port_timestamp_t timestamp; + + ipc_port_timestamp_lock(); + timestamp = ipc_port_timestamp_data++; + ipc_port_timestamp_unlock(); + + return timestamp; +} + +/* + * Routine: ipc_port_dnrequest + * Purpose: + * Try to allocate a dead-name request slot. + * If successful, returns the request index. + * Otherwise returns zero. + * Conditions: + * The port is locked and active. + * Returns: + * KERN_SUCCESS A request index was found. + * KERN_NO_SPACE No index allocated. + */ + +kern_return_t +ipc_port_dnrequest( + ipc_port_t port, + mach_port_name_t name, + ipc_port_t soright, + ipc_port_request_index_t *indexp) +{ + ipc_port_request_t ipr, table; + ipc_port_request_index_t index; + + assert(ip_active(port)); + assert(name != MACH_PORT_NULL); + assert(soright != IP_NULL); + + table = port->ip_dnrequests; + if (table == IPR_NULL) + return KERN_NO_SPACE; + + index = table->ipr_next; + if (index == 0) + return KERN_NO_SPACE; + + ipr = &table[index]; + assert(ipr->ipr_name == MACH_PORT_NULL); + + table->ipr_next = ipr->ipr_next; + ipr->ipr_name = name; + ipr->ipr_soright = soright; + + *indexp = index; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_port_dngrow + * Purpose: + * Grow a port's table of dead-name requests. + * Conditions: + * The port must be locked and active. + * Nothing else locked; will allocate memory. + * Upon return the port is unlocked. + * Returns: + * KERN_SUCCESS Grew the table. + * KERN_SUCCESS Somebody else grew the table. + * KERN_SUCCESS The port died. + * KERN_RESOURCE_SHORTAGE Couldn't allocate new table. + * KERN_NO_SPACE Couldn't grow to desired size + */ + +kern_return_t +ipc_port_dngrow( + ipc_port_t port, + int target_size) +{ + ipc_table_size_t its; + ipc_port_request_t otable, ntable; + + assert(ip_active(port)); + + otable = port->ip_dnrequests; + if (otable == IPR_NULL) + its = &ipc_table_dnrequests[0]; + else + its = otable->ipr_size + 1; + + if (target_size != ITS_SIZE_NONE) { + if ((otable != IPR_NULL) && + (target_size <= otable->ipr_size->its_size)) { + ip_unlock(port); + return KERN_SUCCESS; + } + while ((its->its_size) && (its->its_size < target_size)) { + its++; + } + if (its->its_size == 0) { + ip_unlock(port); + return KERN_NO_SPACE; + } + } + + ip_reference(port); + ip_unlock(port); + + if ((its->its_size == 0) || + ((ntable = it_dnrequests_alloc(its)) == IPR_NULL)) { + ipc_port_release(port); + return KERN_RESOURCE_SHORTAGE; + } + + ip_lock(port); + ip_release(port); + + /* + * Check that port is still active and that nobody else + * has slipped in and grown the table on us. Note that + * just checking port->ip_dnrequests == otable isn't + * sufficient; must check ipr_size. + */ + + if (ip_active(port) && + (port->ip_dnrequests == otable) && + ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) { + ipc_table_size_t oits; + ipc_table_elems_t osize, nsize; + ipc_port_request_index_t free, i; + + /* copy old table to new table */ + + if (otable != IPR_NULL) { + oits = otable->ipr_size; + osize = oits->its_size; + free = otable->ipr_next; + + (void) memcpy((void *)(ntable + 1), + (const void *)(otable + 1), + (osize - 1) * sizeof(struct ipc_port_request)); + } else { + osize = 1; + free = 0; + } + + nsize = its->its_size; + assert(nsize > osize); + + /* add new elements to the new table's free list */ + + for (i = osize; i < nsize; i++) { + ipc_port_request_t ipr = &ntable[i]; + + ipr->ipr_name = MACH_PORT_NULL; + ipr->ipr_next = free; + free = i; + } + + ntable->ipr_next = free; + ntable->ipr_size = its; + port->ip_dnrequests = ntable; + ip_unlock(port); + + if (otable != IPR_NULL) { + it_dnrequests_free(oits, otable); + } + } else { + ip_check_unlock(port); + it_dnrequests_free(its, ntable); + } + + return KERN_SUCCESS; +} + +/* + * Routine: ipc_port_dncancel + * Purpose: + * Cancel a dead-name request and return the send-once right. + * Conditions: + * The port must locked and active. + */ + +ipc_port_t +ipc_port_dncancel( + ipc_port_t port, + mach_port_name_t name, + ipc_port_request_index_t index) +{ + ipc_port_request_t ipr, table; + ipc_port_t dnrequest; + + assert(ip_active(port)); + assert(name != MACH_PORT_NULL); + assert(index != 0); + + table = port->ip_dnrequests; + assert(table != IPR_NULL); + + ipr = &table[index]; + dnrequest = ipr->ipr_soright; + assert(ipr->ipr_name == name); + + /* return ipr to the free list inside the table */ + + ipr->ipr_name = MACH_PORT_NULL; + ipr->ipr_next = table->ipr_next; + table->ipr_next = index; + + return dnrequest; +} + +/* + * Routine: ipc_port_pdrequest + * Purpose: + * Make a port-deleted request, returning the + * previously registered send-once right. + * Just cancels the previous request if notify is IP_NULL. + * Conditions: + * The port is locked and active. It is unlocked. + * Consumes a ref for notify (if non-null), and + * returns previous with a ref (if non-null). + */ + +void +ipc_port_pdrequest( + ipc_port_t port, + ipc_port_t notify, + ipc_port_t *previousp) +{ + ipc_port_t previous; + + assert(ip_active(port)); + + previous = port->ip_pdrequest; + port->ip_pdrequest = notify; + ip_unlock(port); + + *previousp = previous; +} + +/* + * Routine: ipc_port_nsrequest + * Purpose: + * Make a no-senders request, returning the + * previously registered send-once right. + * Just cancels the previous request if notify is IP_NULL. + * Conditions: + * The port is locked and active. It is unlocked. + * Consumes a ref for notify (if non-null), and + * returns previous with a ref (if non-null). + */ + +void +ipc_port_nsrequest( + ipc_port_t port, + mach_port_mscount_t sync, + ipc_port_t notify, + ipc_port_t *previousp) +{ + ipc_port_t previous; + mach_port_mscount_t mscount; + + assert(ip_active(port)); + + previous = port->ip_nsrequest; + mscount = port->ip_mscount; + + if ((port->ip_srights == 0) && (sync <= mscount) && + (notify != IP_NULL)) { + port->ip_nsrequest = IP_NULL; + ip_unlock(port); + ipc_notify_no_senders(notify, mscount); + } else { + port->ip_nsrequest = notify; + ip_unlock(port); + } + + *previousp = previous; +} + + +/* + * Routine: ipc_port_clear_receiver + * Purpose: + * Prepares a receive right for transmission/destruction. + * Conditions: + * The port is locked and active. + */ + +void +ipc_port_clear_receiver( + ipc_port_t port) +{ + spl_t s; + + assert(ip_active(port)); + + /* + * pull ourselves from any sets. + */ + if (port->ip_pset_count != 0) { + ipc_pset_remove_all(port); + port->ip_pset_count = 0; + } + + /* + * Send anyone waiting on the port's queue directly away. + * Also clear the mscount and seqno. + */ + s = splsched(); + imq_lock(&port->ip_messages); + ipc_mqueue_changed(&port->ip_messages); + ipc_port_set_mscount(port, 0); + port->ip_messages.imq_seqno = 0; + imq_unlock(&port->ip_messages); + splx(s); +} + +/* + * Routine: ipc_port_init + * Purpose: + * Initializes a newly-allocated port. + * Doesn't touch the ip_object fields. + */ + +void +ipc_port_init( + ipc_port_t port, + ipc_space_t space, + mach_port_name_t name) +{ + /* port->ip_kobject doesn't have to be initialized */ + + port->ip_receiver = space; + port->ip_receiver_name = name; + + port->ip_mscount = 0; + port->ip_srights = 0; + port->ip_sorights = 0; + + port->ip_nsrequest = IP_NULL; + port->ip_pdrequest = IP_NULL; + port->ip_dnrequests = IPR_NULL; + + port->ip_pset_count = 0; + port->ip_premsg = IKM_NULL; + + thread_pool_init(&port->ip_thread_pool); + + port->ip_subsystem = RPC_SUBSYSTEM_NULL; + +#if MACH_ASSERT + ipc_port_init_debug(port); +#endif /* MACH_ASSERT */ + + ipc_mqueue_init(&port->ip_messages, FALSE /* set */); +} + +/* + * Routine: ipc_port_alloc + * Purpose: + * Allocate a port. + * Conditions: + * Nothing locked. If successful, the port is returned + * locked. (The caller doesn't have a reference.) + * Returns: + * KERN_SUCCESS The port is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NO_SPACE No room for an entry in the space. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_port_alloc( + ipc_space_t space, + mach_port_name_t *namep, + ipc_port_t *portp) +{ + ipc_port_t port; + mach_port_name_t name; + kern_return_t kr; + + kr = ipc_object_alloc(space, IOT_PORT, + MACH_PORT_TYPE_RECEIVE, 0, + &name, (ipc_object_t *) &port); + if (kr != KERN_SUCCESS) + return kr; + + /* port is locked */ + + ipc_port_init(port, space, name); + + *namep = name; + *portp = port; + + return KERN_SUCCESS; +} + +/* + * Routine: ipc_port_alloc_name + * Purpose: + * Allocate a port, with a specific name. + * Conditions: + * Nothing locked. If successful, the port is returned + * locked. (The caller doesn't have a reference.) + * Returns: + * KERN_SUCCESS The port is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NAME_EXISTS The name already denotes a right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_port_alloc_name( + ipc_space_t space, + mach_port_name_t name, + ipc_port_t *portp) +{ + ipc_port_t port; + kern_return_t kr; + + kr = ipc_object_alloc_name(space, IOT_PORT, + MACH_PORT_TYPE_RECEIVE, 0, + name, (ipc_object_t *) &port); + if (kr != KERN_SUCCESS) + return kr; + + /* port is locked */ + + ipc_port_init(port, space, name); + + *portp = port; + + return KERN_SUCCESS; +} + +/* + * Generate dead name notifications. Called from ipc_port_destroy. + * Port is unlocked but still has reference(s); + * dnrequests was taken from port while the port + * was locked but the port now has port->ip_dnrequests set to IPR_NULL. + */ +void +ipc_port_dnnotify( + ipc_port_t port, + ipc_port_request_t dnrequests) +{ + ipc_table_size_t its = dnrequests->ipr_size; + ipc_table_elems_t size = its->its_size; + ipc_port_request_index_t index; + + for (index = 1; index < size; index++) { + ipc_port_request_t ipr = &dnrequests[index]; + mach_port_name_t name = ipr->ipr_name; + ipc_port_t soright; + + if (name == MACH_PORT_NULL) + continue; + + soright = ipr->ipr_soright; + assert(soright != IP_NULL); + + ipc_notify_dead_name(soright, name); + } + + it_dnrequests_free(its, dnrequests); +} + +/* + * Routine: ipc_port_destroy + * Purpose: + * Destroys a port. Cleans up queued messages. + * + * If the port has a backup, it doesn't get destroyed, + * but is sent in a port-destroyed notification to the backup. + * Conditions: + * The port is locked and alive; nothing else locked. + * The caller has a reference, which is consumed. + * Afterwards, the port is unlocked and dead. + */ + +void +ipc_port_destroy( + ipc_port_t port) +{ + ipc_port_t pdrequest, nsrequest; + ipc_mqueue_t mqueue; + ipc_kmsg_queue_t kmqueue; + ipc_kmsg_t kmsg; + ipc_port_request_t dnrequests; + thread_pool_t thread_pool; + + assert(ip_active(port)); + /* port->ip_receiver_name is garbage */ + /* port->ip_receiver/port->ip_destination is garbage */ + assert(port->ip_pset_count == 0); + assert(port->ip_mscount == 0); + + /* first check for a backup port */ + + pdrequest = port->ip_pdrequest; + if (pdrequest != IP_NULL) { + /* we assume the ref for pdrequest */ + port->ip_pdrequest = IP_NULL; + + /* make port be in limbo */ + port->ip_receiver_name = MACH_PORT_NULL; + port->ip_destination = IP_NULL; + ip_unlock(port); + + if (!ipc_port_check_circularity(port, pdrequest)) { + /* consumes our refs for port and pdrequest */ + ipc_notify_port_destroyed(pdrequest, port); + return; + } else { + /* consume pdrequest and destroy port */ + ipc_port_release_sonce(pdrequest); + } + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_pset_count == 0); + assert(port->ip_mscount == 0); + assert(port->ip_pdrequest == IP_NULL); + assert(port->ip_receiver_name == MACH_PORT_NULL); + assert(port->ip_destination == IP_NULL); + + /* fall through and destroy the port */ + } + + /* once port is dead, we don't need to keep it locked */ + + port->ip_object.io_bits &= ~IO_BITS_ACTIVE; + port->ip_timestamp = ipc_port_timestamp(); + + /* save for later */ + dnrequests = port->ip_dnrequests; + port->ip_dnrequests = IPR_NULL; + + /* + * If the port has a preallocated message buffer and that buffer + * is not inuse, free it. If it has and inuse one, then the kmsg + * free will detect that we freed the association and it can free it + * like a normal buffer. + */ + if (IP_PREALLOC(port)) { + kmsg = port->ip_premsg; + assert(kmsg != IKM_NULL); + if (!ikm_prealloc_inuse(kmsg)) { + ikm_prealloc_clear_inuse(kmsg, port); + IP_CLEAR_PREALLOC(port, kmsg); + ipc_kmsg_free(kmsg); + } else { + assert(ikm_prealloc_inuse_port(kmsg) == port); + ikm_prealloc_clear_inuse(kmsg, port); + IP_CLEAR_PREALLOC(port, kmsg); + } + } + + ip_unlock(port); + + /* wakeup any threads waiting on this pool port for an activation */ + if ((thread_pool = &port->ip_thread_pool) != THREAD_POOL_NULL) + thread_pool_wakeup(thread_pool); + + /* throw away no-senders request */ + + nsrequest = port->ip_nsrequest; + if (nsrequest != IP_NULL) + ipc_notify_send_once(nsrequest); /* consumes ref */ + + /* destroy any queued messages */ + mqueue = &port->ip_messages; + ipc_mqueue_destroy(mqueue); + + /* generate dead-name notifications */ + if (dnrequests != IPR_NULL) { + ipc_port_dnnotify(port, dnrequests); + } + + ipc_kobject_destroy(port); + + if (port->ip_subsystem != RPC_SUBSYSTEM_NULL) { + subsystem_deallocate((subsystem_t) port->ip_kobject); + } + + /* XXXX Perhaps should verify that ip_thread_pool is empty! */ + + ipc_port_release(port); /* consume caller's ref */ +} + +/* + * Routine: ipc_port_check_circularity + * Purpose: + * Check if queueing "port" in a message for "dest" + * would create a circular group of ports and messages. + * + * If no circularity (FALSE returned), then "port" + * is changed from "in limbo" to "in transit". + * + * That is, we want to set port->ip_destination == dest, + * but guaranteeing that this doesn't create a circle + * port->ip_destination->ip_destination->... == port + * Conditions: + * No ports locked. References held for "port" and "dest". + */ + +boolean_t +ipc_port_check_circularity( + ipc_port_t port, + ipc_port_t dest) +{ + ipc_port_t base; + + assert(port != IP_NULL); + assert(dest != IP_NULL); + + if (port == dest) + return TRUE; + base = dest; + + /* + * First try a quick check that can run in parallel. + * No circularity if dest is not in transit. + */ + + ip_lock(port); + if (ip_lock_try(dest)) { + if (!ip_active(dest) || + (dest->ip_receiver_name != MACH_PORT_NULL) || + (dest->ip_destination == IP_NULL)) + goto not_circular; + + /* dest is in transit; further checking necessary */ + + ip_unlock(dest); + } + ip_unlock(port); + + ipc_port_multiple_lock(); /* massive serialization */ + + /* + * Search for the end of the chain (a port not in transit), + * acquiring locks along the way. + */ + + for (;;) { + ip_lock(base); + + if (!ip_active(base) || + (base->ip_receiver_name != MACH_PORT_NULL) || + (base->ip_destination == IP_NULL)) + break; + + base = base->ip_destination; + } + + /* all ports in chain from dest to base, inclusive, are locked */ + + if (port == base) { + /* circularity detected! */ + + ipc_port_multiple_unlock(); + + /* port (== base) is in limbo */ + + assert(ip_active(port)); + assert(port->ip_receiver_name == MACH_PORT_NULL); + assert(port->ip_destination == IP_NULL); + + while (dest != IP_NULL) { + ipc_port_t next; + + /* dest is in transit or in limbo */ + + assert(ip_active(dest)); + assert(dest->ip_receiver_name == MACH_PORT_NULL); + + next = dest->ip_destination; + ip_unlock(dest); + dest = next; + } + + return TRUE; + } + + /* + * The guarantee: lock port while the entire chain is locked. + * Once port is locked, we can take a reference to dest, + * add port to the chain, and unlock everything. + */ + + ip_lock(port); + ipc_port_multiple_unlock(); + + not_circular: + + /* port is in limbo */ + + assert(ip_active(port)); + assert(port->ip_receiver_name == MACH_PORT_NULL); + assert(port->ip_destination == IP_NULL); + + ip_reference(dest); + port->ip_destination = dest; + + /* now unlock chain */ + + while (port != base) { + ipc_port_t next; + + /* port is in transit */ + + assert(ip_active(port)); + assert(port->ip_receiver_name == MACH_PORT_NULL); + assert(port->ip_destination != IP_NULL); + + next = port->ip_destination; + ip_unlock(port); + port = next; + } + + /* base is not in transit */ + + assert(!ip_active(base) || + (base->ip_receiver_name != MACH_PORT_NULL) || + (base->ip_destination == IP_NULL)); + ip_unlock(base); + + return FALSE; +} + +/* + * Routine: ipc_port_lookup_notify + * Purpose: + * Make a send-once notify port from a receive right. + * Returns IP_NULL if name doesn't denote a receive right. + * Conditions: + * The space must be locked (read or write) and active. + * Being the active space, we can rely on thread server_id + * context to give us the proper server level sub-order + * within the space. + */ + +ipc_port_t +ipc_port_lookup_notify( + ipc_space_t space, + mach_port_name_t name) +{ + ipc_port_t port; + ipc_entry_t entry; + + assert(space->is_active); + + entry = ipc_entry_lookup(space, name); + if (entry == IE_NULL) + return IP_NULL; + if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) + return IP_NULL; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + + ip_reference(port); + port->ip_sorights++; + ip_unlock(port); + + return port; +} + +/* + * Routine: ipc_port_make_send + * Purpose: + * Make a naked send right from a receive right. + * Conditions: + * The port is not locked but it is active. + */ + +ipc_port_t +ipc_port_make_send( + ipc_port_t port) +{ + assert(IP_VALID(port)); + + ip_lock(port); + assert(ip_active(port)); + port->ip_mscount++; + port->ip_srights++; + ip_reference(port); + ip_unlock(port); + + return port; +} + +/* + * Routine: ipc_port_copy_send + * Purpose: + * Make a naked send right from another naked send right. + * IP_NULL -> IP_NULL + * IP_DEAD -> IP_DEAD + * dead port -> IP_DEAD + * live port -> port + ref + * Conditions: + * Nothing locked except possibly a space. + */ + +ipc_port_t +ipc_port_copy_send( + ipc_port_t port) +{ + ipc_port_t sright; + + if (!IP_VALID(port)) + return port; + + ip_lock(port); + if (ip_active(port)) { + assert(port->ip_srights > 0); + + ip_reference(port); + port->ip_srights++; + sright = port; + } else + sright = IP_DEAD; + ip_unlock(port); + + return sright; +} + +/* + * Routine: ipc_port_copyout_send + * Purpose: + * Copyout a naked send right (possibly null/dead), + * or if that fails, destroy the right. + * Conditions: + * Nothing locked. + */ + +mach_port_name_t +ipc_port_copyout_send( + ipc_port_t sright, + ipc_space_t space) +{ + mach_port_name_t name; + + if (IP_VALID(sright)) { + kern_return_t kr; + + kr = ipc_object_copyout(space, (ipc_object_t) sright, + MACH_MSG_TYPE_PORT_SEND, TRUE, &name); + if (kr != KERN_SUCCESS) { + ipc_port_release_send(sright); + + if (kr == KERN_INVALID_CAPABILITY) + name = MACH_PORT_DEAD; + else + name = MACH_PORT_NULL; + } + } else + name = (mach_port_name_t) sright; + + return name; +} + +/* + * Routine: ipc_port_release_send + * Purpose: + * Release a (valid) naked send right. + * Consumes a ref for the port. + * Conditions: + * Nothing locked. + */ + +void +ipc_port_release_send( + ipc_port_t port) +{ + ipc_port_t nsrequest = IP_NULL; + mach_port_mscount_t mscount; + + assert(IP_VALID(port)); + + ip_lock(port); + ip_release(port); + + if (!ip_active(port)) { + ip_check_unlock(port); + return; + } + + assert(port->ip_srights > 0); + + if (--port->ip_srights == 0 && + port->ip_nsrequest != IP_NULL) { + nsrequest = port->ip_nsrequest; + port->ip_nsrequest = IP_NULL; + mscount = port->ip_mscount; + ip_unlock(port); + ipc_notify_no_senders(nsrequest, mscount); + /* + * Check that there are no other locks taken, because + * [norma_]ipc_notify_no_senders routines may block. + */ + check_simple_locks(); + } else + ip_unlock(port); +} + +/* + * Routine: ipc_port_make_sonce + * Purpose: + * Make a naked send-once right from a receive right. + * Conditions: + * The port is not locked but it is active. + */ + +ipc_port_t +ipc_port_make_sonce( + ipc_port_t port) +{ + assert(IP_VALID(port)); + + ip_lock(port); + assert(ip_active(port)); + port->ip_sorights++; + ip_reference(port); + ip_unlock(port); + + return port; +} + +/* + * Routine: ipc_port_release_sonce + * Purpose: + * Release a naked send-once right. + * Consumes a ref for the port. + * + * In normal situations, this is never used. + * Send-once rights are only consumed when + * a message (possibly a send-once notification) + * is sent to them. + * Conditions: + * Nothing locked except possibly a space. + */ + +void +ipc_port_release_sonce( + ipc_port_t port) +{ + assert(IP_VALID(port)); + + ip_lock(port); + + assert(port->ip_sorights > 0); + + port->ip_sorights--; + + ip_release(port); + + if (!ip_active(port)) { + ip_check_unlock(port); + return; + } + + ip_unlock(port); +} + +/* + * Routine: ipc_port_release_receive + * Purpose: + * Release a naked (in limbo or in transit) receive right. + * Consumes a ref for the port; destroys the port. + * Conditions: + * Nothing locked. + */ + +void +ipc_port_release_receive( + ipc_port_t port) +{ + ipc_port_t dest; + + assert(IP_VALID(port)); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == MACH_PORT_NULL); + dest = port->ip_destination; + + ipc_port_destroy(port); /* consumes ref, unlocks */ + + if (dest != IP_NULL) + ipc_port_release(dest); +} + +/* + * Routine: ipc_port_alloc_special + * Purpose: + * Allocate a port in a special space. + * The new port is returned with one ref. + * If unsuccessful, IP_NULL is returned. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +ipc_port_alloc_special( + ipc_space_t space) +{ + ipc_port_t port; + + port = (ipc_port_t) io_alloc(IOT_PORT); + if (port == IP_NULL) + return IP_NULL; + + bzero((char *)port, sizeof(*port)); + io_lock_init(&port->ip_object); + port->ip_references = 1; + port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0); + + ipc_port_init(port, space, 1); + + return port; +} + +/* + * Routine: ipc_port_dealloc_special + * Purpose: + * Deallocate a port in a special space. + * Consumes one ref for the port. + * Conditions: + * Nothing locked. + */ + +void +ipc_port_dealloc_special( + ipc_port_t port, + ipc_space_t space) +{ + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name != MACH_PORT_NULL); + assert(port->ip_receiver == space); + + /* + * We clear ip_receiver_name and ip_receiver to simplify + * the ipc_space_kernel check in ipc_mqueue_send. + */ + + port->ip_receiver_name = MACH_PORT_NULL; + port->ip_receiver = IS_NULL; + + /* relevant part of ipc_port_clear_receiver */ + ipc_port_set_mscount(port, 0); + port->ip_messages.imq_seqno = 0; + + ipc_port_destroy(port); +} + + +#if MACH_ASSERT +/* + * Keep a list of all allocated ports. + * Allocation is intercepted via ipc_port_init; + * deallocation is intercepted via io_free. + */ +queue_head_t port_alloc_queue; +decl_mutex_data(,port_alloc_queue_lock) + +unsigned long port_count = 0; +unsigned long port_count_warning = 20000; +unsigned long port_timestamp = 0; + +void db_port_stack_trace( + ipc_port_t port); +void db_ref( + int refs); +int db_port_walk( + unsigned int verbose, + unsigned int display, + unsigned int ref_search, + unsigned int ref_target); + +/* + * Initialize global state needed for run-time + * port debugging. + */ +void +ipc_port_debug_init(void) +{ + queue_init(&port_alloc_queue); + mutex_init(&port_alloc_queue_lock, ETAP_IPC_PORT_ALLOCQ); +} + + +/* + * Initialize all of the debugging state in a port. + * Insert the port into a global list of all allocated ports. + */ +void +ipc_port_init_debug( + ipc_port_t port) +{ + unsigned int i; + + port->ip_thread = (unsigned long) current_thread(); + port->ip_timetrack = port_timestamp++; + for (i = 0; i < IP_CALLSTACK_MAX; ++i) + port->ip_callstack[i] = 0; + for (i = 0; i < IP_NSPARES; ++i) + port->ip_spares[i] = 0; + + /* + * Machine-dependent routine to fill in an + * array with up to IP_CALLSTACK_MAX levels + * of return pc information. + */ + machine_callstack(&port->ip_callstack[0], IP_CALLSTACK_MAX); + +#if 0 + mutex_lock(&port_alloc_queue_lock); + ++port_count; + if (port_count_warning > 0 && port_count >= port_count_warning) + assert(port_count < port_count_warning); + queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links); + mutex_unlock(&port_alloc_queue_lock); +#endif +} + + +/* + * Remove a port from the queue of allocated ports. + * This routine should be invoked JUST prior to + * deallocating the actual memory occupied by the port. + */ +void +ipc_port_track_dealloc( + ipc_port_t port) +{ +#if 0 + mutex_lock(&port_alloc_queue_lock); + assert(port_count > 0); + --port_count; + queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links); + mutex_unlock(&port_alloc_queue_lock); +#endif +} + +#endif /* MACH_ASSERT */ + + +#if MACH_KDB + +#include +#include + +#define printf kdbprintf +extern int db_indent; + +int +db_port_queue_print( + ipc_port_t port); + +/* + * ipc_entry_print - pretty-print an ipc_entry + */ +static void ipc_entry_print(struct ipc_entry *, char *); /* forward */ + +static void ipc_entry_print(struct ipc_entry *iep, char *tag) +{ + ipc_entry_bits_t bits = iep->ie_bits; + + iprintf("%s @", tag); + printf(" 0x%x, bits=%x object=%x\n", iep, bits, iep->ie_object); + db_indent += 2; + iprintf("urefs=%x ", IE_BITS_UREFS(bits)); + printf("type=%x gen=%x\n", IE_BITS_TYPE(bits), IE_BITS_GEN(bits)); + db_indent -= 2; +} + +/* + * Routine: ipc_port_print + * Purpose: + * Pretty-print a port for kdb. + */ +int ipc_port_print_long = 0; /* set for more detail */ + +void +ipc_port_print( + ipc_port_t port, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + extern int db_indent; + db_addr_t task; + int task_id; + int nmsgs; + int verbose = 0; +#if MACH_ASSERT + int i, needs_db_indent, items_printed; +#endif /* MACH_ASSERT */ + + if (db_option(modif, 'l') || db_option(modif, 'v')) + ++verbose; + + printf("port 0x%x\n", port); + + db_indent += 2; + + ipc_object_print(&port->ip_object); + + if (ipc_port_print_long) { + iprintf("pool=0x%x", port->ip_thread_pool); + printf("\n"); + } + + if (!ip_active(port)) { + iprintf("timestamp=0x%x", port->ip_timestamp); + } else if (port->ip_receiver_name == MACH_PORT_NULL) { + iprintf("destination=0x%x (", port->ip_destination); + if (port->ip_destination != MACH_PORT_NULL && + (task = db_task_from_space(port->ip_destination-> + ip_receiver, &task_id))) + printf("task%d at 0x%x", task_id, task); + else + printf("unknown"); + printf(")"); + } else { + iprintf("receiver=0x%x (", port->ip_receiver); + if (port->ip_receiver == ipc_space_kernel) + printf("kernel"); + else if (port->ip_receiver == ipc_space_reply) + printf("reply"); + else if (port->ip_receiver == default_pager_space) + printf("default_pager"); + else if (task = db_task_from_space(port->ip_receiver, &task_id)) + printf("task%d at 0x%x", task_id, task); + else + printf("unknown"); + printf(")"); + } + printf(", receiver_name=0x%x\n", port->ip_receiver_name); + + iprintf("mscount=%d", port->ip_mscount); + printf(", srights=%d", port->ip_srights); + printf(", sorights=%d\n", port->ip_sorights); + + iprintf("nsrequest=0x%x", port->ip_nsrequest); + printf(", pdrequest=0x%x", port->ip_pdrequest); + printf(", dnrequests=0x%x\n", port->ip_dnrequests); + + iprintf("pset_count=0x%x", port->ip_pset_count); + printf(", seqno=%d", port->ip_messages.imq_seqno); + printf(", msgcount=%d", port->ip_messages.imq_msgcount); + printf(", qlimit=%d\n", port->ip_messages.imq_qlimit); + + iprintf("kmsgs=0x%x", port->ip_messages.imq_messages.ikmq_base); + printf(", rcvrs queue=0x%x", port->ip_messages.imq_wait_queue); + printf(", kobj=0x%x\n", port->ip_kobject); + + iprintf("premsg=0x%x", port->ip_premsg); + +#if MACH_ASSERT + /* don't bother printing callstack or queue links */ + iprintf("ip_thread=0x%x, ip_timetrack=0x%x\n", + port->ip_thread, port->ip_timetrack); + items_printed = 0; + needs_db_indent = 1; + for (i = 0; i < IP_NSPARES; ++i) { + if (port->ip_spares[i] != 0) { + if (needs_db_indent) { + iprintf(""); + needs_db_indent = 0; + } + printf("%sip_spares[%d] = %d", + items_printed ? ", " : "", i, + port->ip_spares[i]); + if (++items_printed >= 4) { + needs_db_indent = 1; + printf("\n"); + items_printed = 0; + } + } + } +#endif /* MACH_ASSERT */ + + if (verbose) { + iprintf("kmsg queue contents:\n"); + db_indent += 2; + nmsgs = db_port_queue_print(port); + db_indent -= 2; + iprintf("...total kmsgs: %d\n", nmsgs); + } + + db_indent -=2; +} + +ipc_port_t +ipc_name_to_data( + task_t task, + mach_port_name_t name) +{ + ipc_space_t space; + ipc_entry_t entry; + + if (task == TASK_NULL) { + db_printf("port_name_to_data: task is null\n"); + return (0); + } + if ((space = task->itk_space) == 0) { + db_printf("port_name_to_data: task->itk_space is null\n"); + return (0); + } + if (!space->is_active) { + db_printf("port_name_to_data: task->itk_space not active\n"); + return (0); + } + if ((entry = ipc_entry_lookup(space, name)) == 0) { + db_printf("port_name_to_data: lookup yields zero\n"); + return (0); + } + return ((ipc_port_t)entry->ie_object); +} + +#if ZONE_DEBUG +void +print_type_ports(type, dead) + unsigned type; + unsigned dead; +{ + ipc_port_t port; + int n; + + n = 0; + for (port = (ipc_port_t)first_element(ipc_object_zones[IOT_PORT]); + port; + port = (ipc_port_t)next_element(ipc_object_zones[IOT_PORT], + (vm_offset_t)port)) + if (ip_kotype(port) == type && + (!dead || !ip_active(port))) { + if (++n % 5) + printf("0x%x\t", port); + else + printf("0x%x\n", port); + } + if (n % 5) + printf("\n"); +} + +void +print_ports(void) +{ + ipc_port_t port; + int total_port_count; + int space_null_count; + int space_kernel_count; + int space_reply_count; + int space_pager_count; + int space_other_count; + + struct { + int total_count; + int dead_count; + } port_types[IKOT_MAX_TYPE]; + + total_port_count = 0; + + bzero((char *)&port_types[0], sizeof(port_types)); + space_null_count = 0; + space_kernel_count = 0; + space_reply_count = 0; + space_pager_count = 0; + space_other_count = 0; + + for (port = (ipc_port_t)first_element(ipc_object_zones[IOT_PORT]); + port; + port = (ipc_port_t)next_element(ipc_object_zones[IOT_PORT], + (vm_offset_t)port)) { + total_port_count++; + if (ip_kotype(port) >= IKOT_MAX_TYPE) { + port_types[IKOT_UNKNOWN].total_count++; + if (!io_active(&port->ip_object)) + port_types[IKOT_UNKNOWN].dead_count++; + } else { + port_types[ip_kotype(port)].total_count++; + if (!io_active(&port->ip_object)) + port_types[ip_kotype(port)].dead_count++; + } + + if (!port->ip_receiver) + space_null_count++; + else if (port->ip_receiver == ipc_space_kernel) + space_kernel_count++; + else if (port->ip_receiver == ipc_space_reply) + space_reply_count++; + else if (port->ip_receiver == default_pager_space) + space_pager_count++; + else + space_other_count++; + } + printf("\n%7d total ports\n\n", total_port_count); + +#define PRINT_ONE_PORT_TYPE(name) \ + printf("%7d %s", port_types[IKOT_##name].total_count, # name); \ + if (port_types[IKOT_##name].dead_count) \ + printf(" (%d dead ports)", port_types[IKOT_##name].dead_count);\ + printf("\n"); + + PRINT_ONE_PORT_TYPE(NONE); + PRINT_ONE_PORT_TYPE(THREAD); + PRINT_ONE_PORT_TYPE(TASK); + PRINT_ONE_PORT_TYPE(HOST); + PRINT_ONE_PORT_TYPE(HOST_PRIV); + PRINT_ONE_PORT_TYPE(PROCESSOR); + PRINT_ONE_PORT_TYPE(PSET); + PRINT_ONE_PORT_TYPE(PSET_NAME); + PRINT_ONE_PORT_TYPE(PAGING_REQUEST); + PRINT_ONE_PORT_TYPE(XMM_OBJECT); + PRINT_ONE_PORT_TYPE(DEVICE); + PRINT_ONE_PORT_TYPE(XMM_PAGER); + PRINT_ONE_PORT_TYPE(XMM_KERNEL); + PRINT_ONE_PORT_TYPE(XMM_REPLY); + PRINT_ONE_PORT_TYPE(CLOCK); + PRINT_ONE_PORT_TYPE(CLOCK_CTRL); + PRINT_ONE_PORT_TYPE(MASTER_DEVICE); + PRINT_ONE_PORT_TYPE(UNKNOWN); + printf("\nipc_space:\n\n"); + printf("NULL KERNEL REPLY PAGER OTHER\n"); + printf("%d %d %d %d %d\n", + space_null_count, + space_kernel_count, + space_reply_count, + space_pager_count, + space_other_count + ); +} + +#endif /* ZONE_DEBUG */ + + +/* + * Print out all the kmsgs in a queue. Aggregate kmsgs with + * identical message ids into a single entry. Count up the + * amount of inline and out-of-line data consumed by each + * and every kmsg. + * + */ + +#define KMSG_MATCH_FIELD(kmsg) ((unsigned int) kmsg->ikm_header.msgh_id) +#define DKQP_LONG(kmsg) FALSE +char *dkqp_long_format = "(%3d) <%10d> 0x%x %10d %10d\n"; +char *dkqp_format = "(%3d) <%10d> 0x%x %10d %10d\n"; + +int +db_kmsg_queue_print( + ipc_kmsg_t kmsg); +int +db_kmsg_queue_print( + ipc_kmsg_t kmsg) +{ + ipc_kmsg_t ikmsg, first_kmsg; + register int icount; + mach_msg_id_t cur_id; + unsigned int inline_total, ool_total; + int nmsgs; + + iprintf("Count msgh_id kmsg addr inline bytes ool bytes\n"); + inline_total = ool_total = (vm_size_t) 0; + cur_id = KMSG_MATCH_FIELD(kmsg); + for (icount = 0, nmsgs = 0, first_kmsg = ikmsg = kmsg; + kmsg != IKM_NULL && (kmsg != first_kmsg || nmsgs == 0); + kmsg = kmsg->ikm_next) { + ++nmsgs; + if (!(KMSG_MATCH_FIELD(kmsg) == cur_id)) { + iprintf(DKQP_LONG(kmsg) ? dkqp_long_format:dkqp_format, + icount, cur_id, ikmsg, inline_total,ool_total); + cur_id = KMSG_MATCH_FIELD(kmsg); + icount = 1; + ikmsg = kmsg; + inline_total = ool_total = 0; + } else { + icount++; + } + if (DKQP_LONG(kmsg)) + inline_total += kmsg->ikm_size; + else + inline_total += kmsg->ikm_header.msgh_size; + } + iprintf(DKQP_LONG(kmsg) ? dkqp_long_format : dkqp_format, + icount, cur_id, ikmsg, inline_total, ool_total); + return nmsgs; +} + + +/* + * Process all of the messages on a port - prints out the + * number of occurences of each message type, and the first + * kmsg with a particular msgh_id. + */ +int +db_port_queue_print( + ipc_port_t port) +{ + ipc_kmsg_t kmsg; + + if (ipc_kmsg_queue_empty(&port->ip_messages.imq_messages)) + return 0; + kmsg = ipc_kmsg_queue_first(&port->ip_messages.imq_messages); + return db_kmsg_queue_print(kmsg); +} + + +#if MACH_ASSERT +#include +#include + +#define FUNC_NULL ((void (*)) 0) +#define MAX_REFS 5 /* bins for tracking ref counts */ + +/* + * Translate port's cache of call stack pointers + * into symbolic names. + */ +void +db_port_stack_trace( + ipc_port_t port) +{ + unsigned int i; + + for (i = 0; i < IP_CALLSTACK_MAX; ++i) { + iprintf("[%d] 0x%x\t", i, port->ip_callstack[i]); + if (port->ip_callstack[i] != 0 && + DB_VALID_KERN_ADDR(port->ip_callstack[i])) + db_printsym(port->ip_callstack[i], DB_STGY_PROC); + printf("\n"); + } +} + + +typedef struct port_item { + unsigned long item; + unsigned long count; +} port_item; + + +#define ITEM_MAX 400 +typedef struct port_track { + char *name; + unsigned long max; + unsigned long warning; + port_item items[ITEM_MAX]; +} port_track; + +port_track port_callers; /* match against calling addresses */ +port_track port_threads; /* match against allocating threads */ +port_track port_spaces; /* match against ipc spaces */ + +void port_track_init( + port_track *trackp, + char *name); +void port_item_add( + port_track *trackp, + unsigned long item); +void port_track_sort( + port_track *trackp); +void port_track_print( + port_track *trackp, + void (*func)(port_item *)); +void port_callers_print( + port_item *p); + +void +port_track_init( + port_track *trackp, + char *name) +{ + port_item *i; + + trackp->max = trackp->warning = 0; + trackp->name = name; + for (i = trackp->items; i < trackp->items + ITEM_MAX; ++i) + i->item = i->count = 0; +} + + +void +port_item_add( + port_track *trackp, + unsigned long item) +{ + port_item *limit, *i; + + limit = trackp->items + trackp->max; + for (i = trackp->items; i < limit; ++i) + if (i->item == item) { + i->count++; + return; + } + if (trackp->max >= ITEM_MAX) { + if (trackp->warning++ == 0) + iprintf("%s: no room\n", trackp->name); + return; + } + i->item = item; + i->count = 1; + trackp->max++; +} + + +/* + * Simple (and slow) bubble sort. + */ +void +port_track_sort( + port_track *trackp) +{ + port_item *limit, *p; + port_item temp; + boolean_t unsorted; + + limit = trackp->items + trackp->max - 1; + do { + unsorted = FALSE; + for (p = trackp->items; p < limit - 1; ++p) { + if (p->count < (p+1)->count) { + temp = *p; + *p = *(p+1); + *(p+1) = temp; + unsorted = TRUE; + } + } + } while (unsorted == TRUE); +} + + +void +port_track_print( + port_track *trackp, + void (*func)(port_item *)) +{ + port_item *limit, *p; + + limit = trackp->items + trackp->max; + iprintf("%s:\n", trackp->name); + for (p = trackp->items; p < limit; ++p) { + if (func != FUNC_NULL) + (*func)(p); + else + iprintf("0x%x\t%8d\n", p->item, p->count); + } +} + + +void +port_callers_print( + port_item *p) +{ + iprintf("0x%x\t%8d\t", p->item, p->count); + db_printsym(p->item, DB_STGY_PROC); + printf("\n"); +} + + +/* + * Show all ports with a given reference count. + */ +void +db_ref( + int refs) +{ + db_port_walk(1, 1, 1, refs); +} + + +/* + * Examine all currently allocated ports. + * Options: + * verbose display suspicious ports + * display print out each port encountered + * ref_search restrict examination to ports with + * a specified reference count + * ref_target reference count for ref_search + */ +int +db_port_walk( + unsigned int verbose, + unsigned int display, + unsigned int ref_search, + unsigned int ref_target) +{ + ipc_port_t port; + unsigned int ref_overflow, refs, i, ref_inactive_overflow; + unsigned int no_receiver, no_match; + unsigned int ref_counts[MAX_REFS]; + unsigned int inactive[MAX_REFS]; + unsigned int ipc_ports = 0; + unsigned int proxies = 0, principals = 0; + + iprintf("Allocated port count is %d\n", port_count); + no_receiver = no_match = ref_overflow = 0; + ref_inactive_overflow = 0; + for (i = 0; i < MAX_REFS; ++i) { + ref_counts[i] = 0; + inactive[i] = 0; + } + port_track_init(&port_callers, "port callers"); + port_track_init(&port_threads, "port threads"); + port_track_init(&port_spaces, "port spaces"); + if (ref_search) + iprintf("Walking ports of ref_count=%d.\n", ref_target); + else + iprintf("Walking all ports.\n"); + + queue_iterate(&port_alloc_queue, port, ipc_port_t, ip_port_links) { + char *port_type; + + port_type = " IPC port"; + if (ip_active(port)) + ipc_ports++; + + refs = port->ip_references; + if (ref_search && refs != ref_target) + continue; + + if (refs >= MAX_REFS) { + if (ip_active(port)) + ++ref_overflow; + else + ++ref_inactive_overflow; + } else { + if (refs == 0 && verbose) + iprintf("%s 0x%x has ref count of zero!\n", + port_type, port); + if (ip_active(port)) + ref_counts[refs]++; + else + inactive[refs]++; + } + port_item_add(&port_threads, (unsigned long) port->ip_thread); + for (i = 0; i < IP_CALLSTACK_MAX; ++i) { + if (port->ip_callstack[i] != 0 && + DB_VALID_KERN_ADDR(port->ip_callstack[i])) + port_item_add(&port_callers, + port->ip_callstack[i]); + } + if (!ip_active(port)) { + if (verbose) + iprintf("%s 0x%x, inactive, refcnt %d\n", + port_type, port, refs); + continue; + } + + if (port->ip_receiver_name == MACH_PORT_NULL) { + iprintf("%s 0x%x, no receiver, refcnt %d\n", + port, refs); + ++no_receiver; + continue; + } + if (port->ip_receiver == ipc_space_kernel || + port->ip_receiver == ipc_space_reply || + ipc_entry_lookup(port->ip_receiver, + port->ip_receiver_name) + != IE_NULL) { + port_item_add(&port_spaces, + (unsigned long)port->ip_receiver); + if (display) { + iprintf( "%s 0x%x time 0x%x ref_cnt %d\n", + port_type, port, + port->ip_timetrack, refs); + } + continue; + } + iprintf("%s 0x%x, rcvr 0x%x, name 0x%x, ref %d, no match\n", + port_type, port, port->ip_receiver, + port->ip_receiver_name, refs); + ++no_match; + } + iprintf("Active port type summary:\n"); + iprintf("\tlocal IPC %6d\n", ipc_ports); + iprintf("summary:\tcallers %d threads %d spaces %d\n", + port_callers.max, port_threads.max, port_spaces.max); + + iprintf("\tref_counts:\n"); + for (i = 0; i < MAX_REFS; ++i) + iprintf("\t ref_counts[%d] = %d\n", i, ref_counts[i]); + + iprintf("\t%d ports w/o receivers, %d w/o matches\n", + no_receiver, no_match); + + iprintf("\tinactives:"); + if ( ref_inactive_overflow || inactive[0] || inactive[1] || + inactive[2] || inactive[3] || inactive[4] ) + printf(" [0]=%d [1]=%d [2]=%d [3]=%d [4]=%d [5+]=%d\n", + inactive[0], inactive[1], inactive[2], + inactive[3], inactive[4], ref_inactive_overflow); + else + printf(" No inactive ports.\n"); + + port_track_sort(&port_spaces); + port_track_print(&port_spaces, FUNC_NULL); + port_track_sort(&port_threads); + port_track_print(&port_threads, FUNC_NULL); + port_track_sort(&port_callers); + port_track_print(&port_callers, port_callers_print); + return 0; +} + + +#endif /* MACH_ASSERT */ + +#endif /* MACH_KDB */ diff --git a/osfmk/ipc/ipc_port.h b/osfmk/ipc/ipc_port.h new file mode 100644 index 000000000..5bd9e5857 --- /dev/null +++ b/osfmk/ipc/ipc_port.h @@ -0,0 +1,443 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_port.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for ports. + */ + +#ifndef _IPC_IPC_PORT_H_ +#define _IPC_IPC_PORT_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * A receive right (port) can be in four states: + * 1) dead (not active, ip_timestamp has death time) + * 2) in a space (ip_receiver_name != 0, ip_receiver points + * to the space but doesn't hold a ref for it) + * 3) in transit (ip_receiver_name == 0, ip_destination points + * to the destination port and holds a ref for it) + * 4) in limbo (ip_receiver_name == 0, ip_destination == IP_NULL) + * + * If the port is active, and ip_receiver points to some space, + * then ip_receiver_name != 0, and that space holds receive rights. + * If the port is not active, then ip_timestamp contains a timestamp + * taken when the port was destroyed. + */ + +typedef unsigned int ipc_port_timestamp_t; + +typedef unsigned int ipc_port_flags_t; + +struct ipc_port { + + /* + * Initial sub-structure in common with ipc_pset and rpc_port + * First element is an ipc_object + */ + struct ipc_object ip_object; + + union { + struct ipc_space *receiver; + struct ipc_port *destination; + ipc_port_timestamp_t timestamp; + } data; + + ipc_kobject_t ip_kobject; + struct rpc_subsystem * ip_subsystem; + mach_port_mscount_t ip_mscount; + mach_port_rights_t ip_srights; + mach_port_rights_t ip_sorights; + + struct ipc_port *ip_nsrequest; + struct ipc_port *ip_pdrequest; + struct ipc_port_request *ip_dnrequests; + + unsigned int ip_pset_count; + struct ipc_mqueue ip_messages; + struct ipc_kmsg *ip_premsg; + +#if NORMA_VM + /* + * These fields are needed for the use of XMM. + * Few ports need this information; it should + * be kept in XMM instead (TBD). XXX + */ + long ip_norma_xmm_object_refs; + struct ipc_port *ip_norma_xmm_object; +#endif + +#if MACH_ASSERT +#define IP_NSPARES 10 +#define IP_CALLSTACK_MAX 10 + queue_chain_t ip_port_links; /* all allocated ports */ + natural_t ip_thread; /* who made me? thread context */ + unsigned long ip_timetrack; /* give an idea of "when" created */ + natural_t ip_callstack[IP_CALLSTACK_MAX]; /* stack trace */ + unsigned long ip_spares[IP_NSPARES]; /* for debugging */ +#endif /* MACH_ASSERT */ + int alias; +}; + + +#define ip_references ip_object.io_references +#define ip_bits ip_object.io_bits +#define ip_receiver_name ip_object.io_receiver_name + +#define ip_thread_pool ip_object.io_thread_pool + +#define ip_receiver data.receiver +#define ip_destination data.destination +#define ip_timestamp data.timestamp + +#define IP_NULL IPC_PORT_NULL +#define IP_DEAD IPC_PORT_DEAD +#define IP_VALID(port) IPC_PORT_VALID(port) + +#define ip_active(port) io_active(&(port)->ip_object) +#define ip_lock_init(port) io_lock_init(&(port)->ip_object) +#define ip_lock(port) io_lock(&(port)->ip_object) +#define ip_lock_try(port) io_lock_try(&(port)->ip_object) +#define ip_unlock(port) io_unlock(&(port)->ip_object) +#define ip_check_unlock(port) io_check_unlock(&(port)->ip_object) + +#define ip_reference(port) io_reference(&(port)->ip_object) +#define ip_release(port) io_release(&(port)->ip_object) + +#define ip_kotype(port) io_kotype(&(port)->ip_object) + +/* + * JMM - Preallocation flag + * This flag indicates that there is a message buffer preallocated for this + * port and we should use that when sending (from the kernel) rather than + * allocate a new one. This avoids deadlocks during notification message + * sends by critical system threads (which may be needed to free memory and + * therefore cannot be blocked waiting for memory themselves). + */ +#define IP_BIT_PREALLOC 0x00008000 /* preallocated mesg */ +#define IP_PREALLOC(port) ((port)->ip_bits & IP_BIT_PREALLOC) + +#define IP_SET_PREALLOC(port, kmsg) \ +MACRO_BEGIN \ + (port)->ip_bits |= IP_BIT_PREALLOC; \ + (port)->ip_premsg = (kmsg); \ +MACRO_END + +#define IP_CLEAR_PREALLOC(port, kmsg) \ +MACRO_BEGIN \ + assert((port)->ip_premsg == kmsg); \ + (port)->ip_bits &= ~IP_BIT_PREALLOC; \ + (port)->ip_premsg = IKM_NULL; \ +MACRO_END + +typedef ipc_table_index_t ipc_port_request_index_t; + +typedef struct ipc_port_request { + union { + struct ipc_port *port; + ipc_port_request_index_t index; + } notify; + + union { + mach_port_name_t name; + struct ipc_table_size *size; + } name; +} *ipc_port_request_t; + +#define ipr_next notify.index +#define ipr_size name.size + +#define ipr_soright notify.port +#define ipr_name name.name + +#define IPR_NULL ((ipc_port_request_t) 0) + +/* + * Taking the ipc_port_multiple lock grants the privilege + * to lock multiple ports at once. No ports must locked + * when it is taken. + */ + +decl_mutex_data(extern,ipc_port_multiple_lock_data) + +#define ipc_port_multiple_lock_init() \ + mutex_init(&ipc_port_multiple_lock_data, ETAP_IPC_PORT_MULT) + +#define ipc_port_multiple_lock() \ + mutex_lock(&ipc_port_multiple_lock_data) + +#define ipc_port_multiple_unlock() \ + mutex_unlock(&ipc_port_multiple_lock_data) + +/* + * The port timestamp facility provides timestamps + * for port destruction. It is used to serialize + * mach_port_names with port death. + */ + +decl_mutex_data(extern,ipc_port_timestamp_lock_data) +extern ipc_port_timestamp_t ipc_port_timestamp_data; + +#define ipc_port_timestamp_lock_init() \ + mutex_init(&ipc_port_timestamp_lock_data, ETAP_IPC_PORT_TIME) + +#define ipc_port_timestamp_lock() \ + mutex_lock(&ipc_port_timestamp_lock_data) + +#define ipc_port_timestamp_unlock() \ + mutex_unlock(&ipc_port_timestamp_lock_data) + +/* Retrieve a port timestamp value */ +extern ipc_port_timestamp_t ipc_port_timestamp(void); + +/* + * Compares two timestamps, and returns TRUE if one + * happened before two. Note that this formulation + * works when the timestamp wraps around at 2^32, + * as long as one and two aren't too far apart. + */ + +#define IP_TIMESTAMP_ORDER(one, two) ((int) ((one) - (two)) < 0) + +#define ipc_port_translate_receive(space, name, portp) \ + ipc_object_translate((space), (name), \ + MACH_PORT_RIGHT_RECEIVE, \ + (ipc_object_t *) (portp)) + +#define ipc_port_translate_send(space, name, portp) \ + ipc_object_translate((space), (name), \ + MACH_PORT_RIGHT_SEND, \ + (ipc_object_t *) (portp)) + +/* Allocate a dead-name request slot */ +extern kern_return_t +ipc_port_dnrequest( + ipc_port_t port, + mach_port_name_t name, + ipc_port_t soright, + ipc_port_request_index_t *indexp); + +/* Grow a port's table of dead-name requests */ +extern kern_return_t ipc_port_dngrow( + ipc_port_t port, + int target_size); + +/* Cancel a dead-name request and return the send-once right */ +extern ipc_port_t ipc_port_dncancel( + ipc_port_t port, + mach_port_name_t name, + ipc_port_request_index_t index); + +#define ipc_port_dnrename(port, index, oname, nname) \ +MACRO_BEGIN \ + ipc_port_request_t ipr, table; \ + \ + assert(ip_active(port)); \ + \ + table = port->ip_dnrequests; \ + assert(table != IPR_NULL); \ + \ + ipr = &table[index]; \ + assert(ipr->ipr_name == oname); \ + \ + ipr->ipr_name = nname; \ +MACRO_END + +/* Make a port-deleted request */ +extern void ipc_port_pdrequest( + ipc_port_t port, + ipc_port_t notify, + ipc_port_t *previousp); + +/* Make a no-senders request */ +extern void ipc_port_nsrequest( + ipc_port_t port, + mach_port_mscount_t sync, + ipc_port_t notify, + ipc_port_t *previousp); + +#define ipc_port_set_mscount(port, mscount) \ +MACRO_BEGIN \ + assert(ip_active(port)); \ + \ + (port)->ip_mscount = (mscount); \ +MACRO_END + +/* Prepare a receive right for transmission/destruction */ +extern void ipc_port_clear_receiver( + ipc_port_t port); + +/* Initialize a newly-allocated port */ +extern void ipc_port_init( + ipc_port_t port, + ipc_space_t space, + mach_port_name_t name); + +/* Allocate a port */ +extern kern_return_t ipc_port_alloc( + ipc_space_t space, + mach_port_name_t *namep, + ipc_port_t *portp); + +/* Allocate a port, with a specific name */ +extern kern_return_t ipc_port_alloc_name( + ipc_space_t space, + mach_port_name_t name, + ipc_port_t *portp); + +/* Generate dead name notifications */ +extern void ipc_port_dnnotify( + ipc_port_t port, + ipc_port_request_t dnrequests); + +/* Destroy a port */ +extern void ipc_port_destroy( + ipc_port_t port); + +/* Check if queueing "port" in a message for "dest" would create a circular + group of ports and messages */ +extern boolean_t +ipc_port_check_circularity( + ipc_port_t port, + ipc_port_t dest); + +/* Make a send-once notify port from a receive right */ +extern ipc_port_t ipc_port_lookup_notify( + ipc_space_t space, + mach_port_name_t name); + +/* Make a naked send right from a receive right */ +extern ipc_port_t ipc_port_make_send( + ipc_port_t port); + +/* Make a naked send right from another naked send right */ +extern ipc_port_t ipc_port_copy_send( + ipc_port_t port); + +/* Copyout a naked send right */ +extern mach_port_name_t ipc_port_copyout_send( + ipc_port_t sright, + ipc_space_t space); + +/* Release a (valid) naked send right */ +extern void ipc_port_release_send( + ipc_port_t port); + +/* Make a naked send-once right from a receive right */ +extern ipc_port_t ipc_port_make_sonce( + ipc_port_t port); + +/* Release a naked send-once right */ +extern void ipc_port_release_sonce( + ipc_port_t port); + +/* Release a naked (in limbo or in transit) receive right */ +extern void ipc_port_release_receive( + ipc_port_t port); + +/* Allocate a port in a special space */ +extern ipc_port_t ipc_port_alloc_special( + ipc_space_t space); + +/* Deallocate a port in a special space */ +extern void ipc_port_dealloc_special( + ipc_port_t port, + ipc_space_t space); + +#if MACH_ASSERT +/* Track low-level port deallocation */ +extern void ipc_port_track_dealloc( + ipc_port_t port); + +/* Initialize general port debugging state */ +extern void ipc_port_debug_init(void); +#endif /* MACH_ASSERT */ + +#define ipc_port_alloc_kernel() \ + ipc_port_alloc_special(ipc_space_kernel) +#define ipc_port_dealloc_kernel(port) \ + ipc_port_dealloc_special((port), ipc_space_kernel) + +#define ipc_port_alloc_reply() \ + ipc_port_alloc_special(ipc_space_reply) +#define ipc_port_dealloc_reply(port) \ + ipc_port_dealloc_special((port), ipc_space_reply) + +#define ipc_port_reference(port) \ + ipc_object_reference(&(port)->ip_object) + +#define ipc_port_release(port) \ + ipc_object_release(&(port)->ip_object) + +#endif /* _IPC_IPC_PORT_H_ */ diff --git a/osfmk/ipc/ipc_print.h b/osfmk/ipc/ipc_print.h new file mode 100644 index 000000000..f35dff5fb --- /dev/null +++ b/osfmk/ipc/ipc_print.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:16 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.3 1995/02/23 17:31:31 alanl + * DIPC: Merge from nmk17b2 to nmk18b8. + * [95/01/03 mmp] + * + * Revision 1.1.7.3 1994/11/29 01:21:22 robert + * re-submit for failed CF backup + * [1994/11/29 01:17:55 robert] + * + * Revision 1.1.7.2 1994/11/28 23:58:36 travos + * Add MACH_KDB ifdef. + * [1994/11/28 23:53:46 travos] + * + * Revision 1.1.7.1 1994/08/04 02:22:55 mmp + * NOTE: file was moved back to b11 version for dipc2_shared. + * Update prototype for ipc_port_print. + * [1994/08/03 19:26:56 mmp] + * + * Revision 1.1.8.2 1994/09/23 02:10:26 ezf + * change marker to not FREE + * [1994/09/22 21:30:09 ezf] + * + * Revision 1.1.8.1 1994/08/07 20:46:08 bolinger + * Merge up to colo_b7. + * [1994/08/01 20:59:21 bolinger] + * + * Revision 1.1.2.2 1993/08/02 16:12:25 jeffc + * CR9523 -- New file to hold prototypes for ddb print + * functions in the ipc system. + * [1993/07/29 20:13:45 jeffc] + * + * $EndLog$ + */ + +#ifndef IPC_PRINT_H +#define IPC_PRINT_H + +#include +#include + +extern void ipc_pset_print( + ipc_pset_t pset); + +#include + +#if MACH_KDB +#include + +extern void ipc_port_print( + ipc_port_t port, + boolean_t have_addr, + db_expr_t count, + char *modif); + +#include + +extern void ipc_kmsg_print( + ipc_kmsg_t kmsg); + +#include + +extern void ipc_msg_print( + mach_msg_header_t *msgh); + +extern ipc_port_t ipc_name_to_data( + task_t task, + mach_port_name_t name); + +#endif /* MACH_KDB */ +#endif /* IPC_PRINT_H */ diff --git a/osfmk/ipc/ipc_pset.c b/osfmk/ipc/ipc_pset.c new file mode 100644 index 000000000..01a4cfa55 --- /dev/null +++ b/osfmk/ipc/ipc_pset.c @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_pset.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to manipulate IPC port sets. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Routine: ipc_pset_alloc + * Purpose: + * Allocate a port set. + * Conditions: + * Nothing locked. If successful, the port set is returned + * locked. (The caller doesn't have a reference.) + * Returns: + * KERN_SUCCESS The port set is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NO_SPACE No room for an entry in the space. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_pset_alloc( + ipc_space_t space, + mach_port_name_t *namep, + ipc_pset_t *psetp) +{ + ipc_pset_t pset; + mach_port_name_t name; + kern_return_t kr; + + kr = ipc_object_alloc(space, IOT_PORT_SET, + MACH_PORT_TYPE_PORT_SET, 0, + &name, (ipc_object_t *) &pset); + if (kr != KERN_SUCCESS) + return kr; + /* pset is locked */ + + pset->ips_local_name = name; + pset->ips_pset_self = pset; + ipc_mqueue_init(&pset->ips_messages, TRUE /* set */); + thread_pool_init(&pset->ips_thread_pool); + + *namep = name; + *psetp = pset; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_pset_alloc_name + * Purpose: + * Allocate a port set, with a specific name. + * Conditions: + * Nothing locked. If successful, the port set is returned + * locked. (The caller doesn't have a reference.) + * Returns: + * KERN_SUCCESS The port set is allocated. + * KERN_INVALID_TASK The space is dead. + * KERN_NAME_EXISTS The name already denotes a right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_pset_alloc_name( + ipc_space_t space, + mach_port_name_t name, + ipc_pset_t *psetp) +{ + ipc_pset_t pset; + kern_return_t kr; + + + kr = ipc_object_alloc_name(space, IOT_PORT_SET, + MACH_PORT_TYPE_PORT_SET, 0, + name, (ipc_object_t *) &pset); + if (kr != KERN_SUCCESS) + return kr; + /* pset is locked */ + + pset->ips_local_name = name; + pset->ips_pset_self = pset; + ipc_mqueue_init(&pset->ips_messages, TRUE /* set */); + thread_pool_init(&pset->ips_thread_pool); + + *psetp = pset; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_pset_member + * Purpose: + * Checks to see if a port is a member of a pset + * Conditions: + * Both port and port set are locked. + * The port must be active. + */ +boolean_t +ipc_pset_member( + ipc_pset_t pset, + ipc_port_t port) +{ + assert(ip_active(port)); + + return (ipc_mqueue_member(&port->ip_messages, &pset->ips_messages)); +} + + +/* + * Routine: ipc_pset_add + * Purpose: + * Puts a port into a port set. + * The port set gains a reference. + * Conditions: + * Both port and port set are locked and active. + * The owner of the port set is also receiver for the port. + */ + +kern_return_t +ipc_pset_add( + ipc_pset_t pset, + ipc_port_t port) +{ + assert(ips_active(pset)); + assert(ip_active(port)); + + if (ipc_pset_member(pset, port)) + return KERN_ALREADY_IN_SET; + + ips_reference(pset); + port->ip_pset_count++; + + ipc_mqueue_add(&port->ip_messages, &pset->ips_messages); + + return KERN_SUCCESS; +} + + + +/* + * Routine: ipc_pset_remove + * Purpose: + * Removes a port from a port set. + * The port set loses a reference. + * Conditions: + * Both port and port set are locked. + * The port must be active. + */ + +kern_return_t +ipc_pset_remove( + ipc_pset_t pset, + ipc_port_t port) +{ + mach_msg_return_t mr; + + assert(ip_active(port)); + + if (port->ip_pset_count == 0) + return KERN_NOT_IN_SET; + + mr = ipc_mqueue_remove(&port->ip_messages, &pset->ips_messages); + + if (mr == MACH_MSG_SUCCESS) { + port->ip_pset_count--; + ips_release(pset); + } + return mr; +} + +/* + * Routine: ipc_pset_remove_all + * Purpose: + * Removes a port from all it's port sets. + * Each port set loses a reference. + * Conditions: + * port is locked and active. + */ + +kern_return_t +ipc_pset_remove_all( + ipc_port_t port) +{ + ipc_pset_mqueue_t pset_mqueue; + ipc_pset_t pset; + + assert(ip_active(port)); + + if (port->ip_pset_count == 0) + return KERN_NOT_IN_SET; + + /* + * Remove each port set's mqueue from the port's (one at a time). + */ + while (port->ip_pset_count > 0) { + ipc_mqueue_remove_one(&port->ip_messages, + (ipc_mqueue_t)&pset_mqueue); + assert(pset_mqueue != (ipc_pset_mqueue_t)0); + port->ip_pset_count--; + + pset = pset_mqueue->ipsm_pset; + ipc_pset_release(pset); /* locks and unlocks pset */ + + } + + assert(port->ip_pset_count == 0); + return KERN_SUCCESS; +} + + +/* + * Routine: ipc_pset_destroy + * Purpose: + * Destroys a port_set. + * + * Doesn't remove members from the port set; + * that happens lazily. + * Conditions: + * The port_set is locked and alive. + * The caller has a reference, which is consumed. + * Afterwards, the port_set is unlocked and dead. + */ + +void +ipc_pset_destroy( + ipc_pset_t pset) +{ + spl_t s; + + assert(ips_active(pset)); + + pset->ips_object.io_bits &= ~IO_BITS_ACTIVE; + + s = splsched(); + imq_lock(&pset->ips_messages); + ipc_mqueue_changed(&pset->ips_messages); + imq_unlock(&pset->ips_messages); + splx(s); + + /* XXXX Perhaps ought to verify ips_thread_pool is empty */ + + ips_release(pset); /* consume the ref our caller gave us */ + ips_check_unlock(pset); +} + +#include +#if MACH_KDB + +#include + +#define printf kdbprintf + +int +ipc_list_count( + struct ipc_kmsg *base) +{ + register int count = 0; + + if (base) { + struct ipc_kmsg *kmsg = base; + + ++count; + while (kmsg && kmsg->ikm_next != base + && kmsg->ikm_next != IKM_BOGUS){ + kmsg = kmsg->ikm_next; + ++count; + } + } + return(count); +} + +/* + * Routine: ipc_pset_print + * Purpose: + * Pretty-print a port set for kdb. + */ + +void +ipc_pset_print( + ipc_pset_t pset) +{ + extern int db_indent; + + printf("pset 0x%x\n", pset); + + db_indent += 2; + + ipc_object_print(&pset->ips_object); + iprintf("local_name = 0x%x\n", pset->ips_local_name); + iprintf("%d kmsgs => 0x%x", + ipc_list_count(pset->ips_messages.imq_messages.ikmq_base), + pset->ips_messages.imq_messages.ikmq_base); + printf(",rcvrs queue= 0x%x\n", &pset->ips_messages.imq_wait_queue); + + db_indent -=2; +} + +#endif /* MACH_KDB */ diff --git a/osfmk/ipc/ipc_pset.h b/osfmk/ipc/ipc_pset.h new file mode 100644 index 000000000..0b5b568c3 --- /dev/null +++ b/osfmk/ipc/ipc_pset.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_pset.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for port sets. + */ + +#ifndef _IPC_IPC_PSET_H_ +#define _IPC_IPC_PSET_H_ + +#include +#include +#include +#include +#include +#include + +#include + +typedef struct ipc_pset { + + /* + * Initial sub-structure in common with all ipc_objects. + */ + struct ipc_object ips_object; + + struct ipc_mqueue ips_messages; + struct ipc_pset *ips_pset_self; /* self (used from ipsm) */ +} *ipc_pset_t; + +typedef struct ipc_pset_mqueue { + struct ipc_mqueue ipsm_messages; + struct ipc_pset *ipsm_pset; +} *ipc_pset_mqueue_t; + +#define ips_references ips_object.io_references +#define ips_local_name ips_object.io_receiver_name +#define ips_thread_pool ips_object.io_thread_pool + +#define IPS_NULL ((ipc_pset_t) IO_NULL) + +#define ips_active(pset) io_active(&(pset)->ips_object) +#define ips_lock(pset) io_lock(&(pset)->ips_object) +#define ips_lock_try(pset) io_lock_try(&(pset)->ips_object) +#define ips_unlock(pset) io_unlock(&(pset)->ips_object) +#define ips_check_unlock(pset) io_check_unlock(&(pset)->ips_object) +#define ips_reference(pset) io_reference(&(pset)->ips_object) +#define ips_release(pset) io_release(&(pset)->ips_object) + +/* Allocate a port set */ +extern kern_return_t ipc_pset_alloc( + ipc_space_t space, + mach_port_name_t *namep, + ipc_pset_t *psetp); + +/* Allocate a port set, with a specific name */ +extern kern_return_t ipc_pset_alloc_name( + ipc_space_t space, + mach_port_name_t name, + ipc_pset_t *psetp); + +/* Add a port to a port set */ +extern kern_return_t ipc_pset_add( + ipc_pset_t pset, + ipc_port_t port); + +/* Remove a port from a port set */ +extern kern_return_t ipc_pset_remove( + ipc_pset_t pset, + ipc_port_t port); + +/* Remove a port from all its current port sets */ +extern kern_return_t ipc_pset_remove_all( + ipc_port_t port); + +/* Destroy a port_set */ +extern void ipc_pset_destroy( + ipc_pset_t pset); + +#define ipc_pset_reference(pset) \ + ipc_object_reference(&(pset)->ips_object) + +#define ipc_pset_release(pset) \ + ipc_object_release(&(pset)->ips_object) + + +#if MACH_KDB +int ipc_list_count(struct ipc_kmsg *base); +#endif /* MACH_KDB */ + +#endif /* _IPC_IPC_PSET_H_ */ diff --git a/osfmk/ipc/ipc_right.c b/osfmk/ipc/ipc_right.c new file mode 100644 index 000000000..f94bcc903 --- /dev/null +++ b/osfmk/ipc/ipc_right.c @@ -0,0 +1,2150 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_right.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to manipulate IPC capabilities. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Routine: ipc_right_lookup_write + * Purpose: + * Finds an entry in a space, given the name. + * Conditions: + * Nothing locked. If successful, the space is write-locked. + * Returns: + * KERN_SUCCESS Found an entry. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME Name doesn't exist in space. + */ + +kern_return_t +ipc_right_lookup_write( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp) +{ + ipc_entry_t entry; + + assert(space != IS_NULL); + + is_write_lock(space); + + if (!space->is_active) { + is_write_unlock(space); + return KERN_INVALID_TASK; + } + + if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) { + is_write_unlock(space); + return KERN_INVALID_NAME; + } + + *entryp = entry; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_right_lookup_two_write + * Purpose: + * Like ipc_right_lookup except that it returns two + * entries for two different names that were looked + * up under the same space lock. + * Conditions: + * Nothing locked. If successful, the space is write-locked. + * Returns: + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME Name doesn't exist in space. + */ + +kern_return_t +ipc_right_lookup_two_write( + ipc_space_t space, + mach_port_name_t name1, + ipc_entry_t *entryp1, + mach_port_name_t name2, + ipc_entry_t *entryp2) +{ + ipc_entry_t entry1; + ipc_entry_t entry2; + + assert(space != IS_NULL); + + is_write_lock(space); + + if (!space->is_active) { + is_write_unlock(space); + return KERN_INVALID_TASK; + } + + if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) { + is_write_unlock(space); + return KERN_INVALID_NAME; + } + if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) { + is_write_unlock(space); + return KERN_INVALID_NAME; + } + *entryp1 = entry1; + *entryp2 = entry2; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_right_reverse + * Purpose: + * Translate (space, object) -> (name, entry). + * Only finds send/receive rights. + * Returns TRUE if an entry is found; if so, + * the object is locked and active. + * Conditions: + * The space must be locked (read or write) and active. + * Nothing else locked. + */ + +boolean_t +ipc_right_reverse( + ipc_space_t space, + ipc_object_t object, + mach_port_name_t *namep, + ipc_entry_t *entryp) +{ + ipc_port_t port; + mach_port_name_t name; + ipc_entry_t entry; + + /* would switch on io_otype to handle multiple types of object */ + + assert(space->is_active); + assert(io_otype(object) == IOT_PORT); + + port = (ipc_port_t) object; + + ip_lock(port); + if (!ip_active(port)) { + ip_unlock(port); + + return FALSE; + } + + if (port->ip_receiver == space) { + name = port->ip_receiver_name; + assert(name != MACH_PORT_NULL); + + entry = ipc_entry_lookup(space, name); + + assert(entry != IE_NULL); + assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE); + assert(port == (ipc_port_t) entry->ie_object); + + *namep = name; + *entryp = entry; + return TRUE; + } + + if (ipc_hash_lookup(space, (ipc_object_t) port, namep, entryp)) { + assert((entry = *entryp) != IE_NULL); + assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND); + assert(port == (ipc_port_t) entry->ie_object); + + return TRUE; + } + + ip_unlock(port); + return FALSE; +} + +/* + * Routine: ipc_right_dnrequest + * Purpose: + * Make a dead-name request, returning the previously + * registered send-once right. If notify is IP_NULL, + * just cancels the previously registered request. + * + * This interacts with the IE_BITS_COMPAT, because they + * both use ie_request. If this is a compat entry, then + * previous always gets IP_NULL. If notify is IP_NULL, + * then the entry remains a compat entry. Otherwise + * the real dead-name request is registered and the entry + * is no longer a compat entry. + * Conditions: + * Nothing locked. May allocate memory. + * Only consumes/returns refs if successful. + * Returns: + * KERN_SUCCESS Made/canceled dead-name request. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME Name doesn't exist in space. + * KERN_INVALID_RIGHT Name doesn't denote port/dead rights. + * KERN_INVALID_ARGUMENT Name denotes dead name, but + * immediate is FALSE or notify is IP_NULL. + * KERN_UREFS_OVERFLOW Name denotes dead name, but + * generating immediate notif. would overflow urefs. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_right_dnrequest( + ipc_space_t space, + mach_port_name_t name, + boolean_t immediate, + ipc_port_t notify, + ipc_port_t *previousp) +{ + ipc_port_t previous; + + for (;;) { + ipc_entry_t entry; + ipc_entry_bits_t bits; + kern_return_t kr; + + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked and active */ + bits = entry->ie_bits; + if (bits & MACH_PORT_TYPE_PORT_RIGHTS) { + ipc_port_t port; + ipc_port_request_index_t request; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (!ipc_right_check(space, port, name, entry)) { + /* port is locked and active */ + + if (notify == IP_NULL) { + previous = ipc_right_dncancel_macro( + space, port, name, entry); + + ip_unlock(port); + is_write_unlock(space); + break; + } + + /* + * If a registered soright exists, + * want to atomically switch with it. + * If ipc_port_dncancel finds us a + * soright, then the following + * ipc_port_dnrequest will reuse + * that slot, so we are guaranteed + * not to unlock and retry. + */ + + previous = ipc_right_dncancel_macro(space, + port, name, entry); + + kr = ipc_port_dnrequest(port, name, notify, + &request); + if (kr != KERN_SUCCESS) { + assert(previous == IP_NULL); + is_write_unlock(space); + + kr = ipc_port_dngrow(port, + ITS_SIZE_NONE); + /* port is unlocked */ + if (kr != KERN_SUCCESS) + return kr; + + continue; + } + + assert(request != 0); + ip_unlock(port); + + entry->ie_request = request; + is_write_unlock(space); + break; + } else { + + /* + * Our capability bits were changed by ipc_right_check + * because it found an inactive port and removed our + * references to it (converting our entry into a dead + * one). Reload the bits (and obviously we can't use + * the port name anymore). + */ + bits = entry->ie_bits; + + } + + assert(bits & MACH_PORT_TYPE_DEAD_NAME); + } + + if ((bits & MACH_PORT_TYPE_DEAD_NAME) && + immediate && (notify != IP_NULL)) { + mach_port_urefs_t urefs = IE_BITS_UREFS(bits); + + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + assert(urefs > 0); + + if (MACH_PORT_UREFS_OVERFLOW(urefs, 1)) { + is_write_unlock(space); + return KERN_UREFS_OVERFLOW; + } + + (entry->ie_bits)++; /* increment urefs */ + is_write_unlock(space); + + ipc_notify_dead_name(notify, name); + previous = IP_NULL; + break; + } + + is_write_unlock(space); + if (bits & MACH_PORT_TYPE_PORT_OR_DEAD) + return KERN_INVALID_ARGUMENT; + else + return KERN_INVALID_RIGHT; + } + + *previousp = previous; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_right_dncancel + * Purpose: + * Cancel a dead-name request and return the send-once right. + * Afterwards, entry->ie_request == 0. + * Conditions: + * The space must be write-locked; the port must be locked. + * The port must be active; the space doesn't have to be. + */ + +ipc_port_t +ipc_right_dncancel( + ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry) +{ + ipc_port_t dnrequest; + + assert(ip_active(port)); + assert(port == (ipc_port_t) entry->ie_object); + + dnrequest = ipc_port_dncancel(port, name, entry->ie_request); + entry->ie_request = 0; + + return dnrequest; +} + +/* + * Routine: ipc_right_inuse + * Purpose: + * Check if an entry is being used. + * Returns TRUE if it is. + * Conditions: + * The space is write-locked and active. + * It is unlocked if the entry is inuse. + */ + +boolean_t +ipc_right_inuse( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) +{ + if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE) { + is_write_unlock(space); + return TRUE; + } + return FALSE; +} + +/* + * Routine: ipc_right_check + * Purpose: + * Check if the port has died. If it has, + * clean up the entry and return TRUE. + * Conditions: + * The space is write-locked; the port is not locked. + * If returns FALSE, the port is also locked and active. + * Otherwise, entry is converted to a dead name, freeing + * a reference to port. + */ + +boolean_t +ipc_right_check( + ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry) +{ + ipc_entry_bits_t bits; + + assert(space->is_active); + assert(port == (ipc_port_t) entry->ie_object); + + ip_lock(port); + if (ip_active(port)) + return FALSE; + ip_unlock(port); + + /* this was either a pure send right or a send-once right */ + + bits = entry->ie_bits; + assert((bits & MACH_PORT_TYPE_RECEIVE) == 0); + assert(IE_BITS_UREFS(bits) > 0); + + if (bits & MACH_PORT_TYPE_SEND) { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); + } else { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); + assert(IE_BITS_UREFS(bits) == 1); + } + + + ipc_port_release(port); + + /* convert entry to dead name */ + + if ((bits & MACH_PORT_TYPE_SEND) && !(bits & MACH_PORT_TYPE_RECEIVE)) + ipc_hash_delete(space, (ipc_object_t)port, name, entry); + + bits = (bits &~ IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME; + + /* + * If there was a notification request outstanding on this + * name, and since the port went dead, that notification + * must already be on its way up from the port layer. We + * don't need the index of the notification port anymore. + * + * JMM - We also add a reference to the entry since the + * notification only carries the name and NOT a reference + * (or right). This makes for pretty loose reference + * counting, since it is only happenstance that we + * detected the notification in progress like this. + * But most (all?) calls that try to deal with this entry + * will also come through here, so the reference gets added + * before the entry gets used eventually (I would rather it + * be explicit in the notification generation, though) + */ + if (entry->ie_request != 0) { + assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX); + entry->ie_request = 0; + bits++; + } + entry->ie_bits = bits; + entry->ie_object = IO_NULL; + return TRUE; +} + +/* + * Routine: ipc_right_clean + * Purpose: + * Cleans up an entry in a dead space. + * The entry isn't deallocated or removed + * from reverse hash tables. + * Conditions: + * The space is dead and unlocked. + */ + +void +ipc_right_clean( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) +{ + ipc_entry_bits_t bits; + mach_port_type_t type; + + bits = entry->ie_bits; + type = IE_BITS_TYPE(bits); + + assert(!space->is_active); + + /* + * IE_BITS_COMPAT/ipc_right_dncancel doesn't have this + * problem, because we check that the port is active. If + * we didn't cancel IE_BITS_COMPAT, ipc_port_destroy + * would still work, but dead space refs would accumulate + * in ip_dnrequests. They would use up slots in + * ip_dnrequests and keep the spaces from being freed. + */ + + switch (type) { + case MACH_PORT_TYPE_DEAD_NAME: + assert(entry->ie_request == 0); + assert(entry->ie_object == IO_NULL); + break; + + case MACH_PORT_TYPE_PORT_SET: { + ipc_pset_t pset = (ipc_pset_t) entry->ie_object; + + assert(entry->ie_request == 0); + assert(pset != IPS_NULL); + + ips_lock(pset); + assert(ips_active(pset)); + + ipc_pset_destroy(pset); /* consumes ref, unlocks */ + break; + } + + case MACH_PORT_TYPE_SEND: + case MACH_PORT_TYPE_RECEIVE: + case MACH_PORT_TYPE_SEND_RECEIVE: + case MACH_PORT_TYPE_SEND_ONCE: { + ipc_port_t port = (ipc_port_t) entry->ie_object; + ipc_port_t dnrequest; + ipc_port_t nsrequest = IP_NULL; + mach_port_mscount_t mscount; + + assert(port != IP_NULL); + ip_lock(port); + + if (!ip_active(port)) { + ip_release(port); + ip_check_unlock(port); + break; + } + + dnrequest = ipc_right_dncancel_macro(space, port, + name, entry); + + if (type & MACH_PORT_TYPE_SEND) { + assert(port->ip_srights > 0); + if (--port->ip_srights == 0 + ) { + nsrequest = port->ip_nsrequest; + if (nsrequest != IP_NULL) { + port->ip_nsrequest = IP_NULL; + mscount = port->ip_mscount; + } + } + } + + if (type & MACH_PORT_TYPE_RECEIVE) { + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + + ipc_port_clear_receiver(port); + ipc_port_destroy(port); /* consumes our ref, unlocks */ + } else if (type & MACH_PORT_TYPE_SEND_ONCE) { + assert(port->ip_sorights > 0); + ip_unlock(port); + + ipc_notify_send_once(port); /* consumes our ref */ + } else { + assert(port->ip_receiver != space); + + ip_release(port); + ip_unlock(port); /* port is active */ + } + + if (nsrequest != IP_NULL) + ipc_notify_no_senders(nsrequest, mscount); + + if (dnrequest != IP_NULL) + ipc_notify_port_deleted(dnrequest, name); + break; + } + + default: + panic("ipc_right_clean: strange type"); + } +} + +/* + * Routine: ipc_right_destroy + * Purpose: + * Destroys an entry in a space. + * Conditions: + * The space is write-locked. + * The space must be active. + * Returns: + * KERN_SUCCESS The entry was destroyed. + */ + +kern_return_t +ipc_right_destroy( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) +{ + ipc_entry_bits_t bits; + mach_port_type_t type; + + bits = entry->ie_bits; + entry->ie_bits &= ~IE_BITS_TYPE_MASK; + type = IE_BITS_TYPE(bits); + + assert(space->is_active); + + switch (type) { + case MACH_PORT_TYPE_DEAD_NAME: + assert(entry->ie_request == 0); + assert(entry->ie_object == IO_NULL); + + ipc_entry_dealloc(space, name, entry); + break; + + case MACH_PORT_TYPE_PORT_SET: { + ipc_pset_t pset = (ipc_pset_t) entry->ie_object; + + assert(entry->ie_request == 0); + assert(pset != IPS_NULL); + + entry->ie_object = IO_NULL; + /* port sets are not sharable entries on a subspace basis */ + /* so there is no need to check the subspace array here */ + ipc_entry_dealloc(space, name, entry); + + ips_lock(pset); + assert(ips_active(pset)); + + ipc_pset_destroy(pset); /* consumes ref, unlocks */ + break; + } + + case MACH_PORT_TYPE_SEND: + case MACH_PORT_TYPE_RECEIVE: + case MACH_PORT_TYPE_SEND_RECEIVE: + case MACH_PORT_TYPE_SEND_ONCE: { + ipc_port_t port = (ipc_port_t) entry->ie_object; + ipc_port_t nsrequest = IP_NULL; + mach_port_mscount_t mscount; + ipc_port_t dnrequest; + + assert(port != IP_NULL); + + if (type == MACH_PORT_TYPE_SEND) + ipc_hash_delete(space, (ipc_object_t) port, + name, entry); + + ip_lock(port); + + if (!ip_active(port)) { + assert((type & MACH_PORT_TYPE_RECEIVE) == 0); + ip_release(port); + ip_check_unlock(port); + + entry->ie_request = 0; + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + + break; + } + + dnrequest = ipc_right_dncancel_macro(space, port, name, entry); + + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + + if (type & MACH_PORT_TYPE_SEND) { + assert(port->ip_srights > 0); + if (--port->ip_srights == 0) { + nsrequest = port->ip_nsrequest; + if (nsrequest != IP_NULL) { + port->ip_nsrequest = IP_NULL; + mscount = port->ip_mscount; + } + } + } + + if (type & MACH_PORT_TYPE_RECEIVE) { + assert(ip_active(port)); + assert(port->ip_receiver == space); + + if (port->ip_subsystem) + subsystem_deallocate( + port->ip_subsystem->subsystem); + ipc_port_clear_receiver(port); + ipc_port_destroy(port); /* consumes our ref, unlocks */ + } else if (type & MACH_PORT_TYPE_SEND_ONCE) { + assert(port->ip_sorights > 0); + ip_unlock(port); + + ipc_notify_send_once(port); /* consumes our ref */ + } else { + assert(port->ip_receiver != space); + + ip_release(port); + ip_unlock(port); + } + + if (nsrequest != IP_NULL) + ipc_notify_no_senders(nsrequest, mscount); + + if (dnrequest != IP_NULL) + ipc_notify_port_deleted(dnrequest, name); + break; + } + + default: + panic("ipc_right_destroy: strange type"); + } + + return KERN_SUCCESS; +} + +/* + * Routine: ipc_right_dealloc + * Purpose: + * Releases a send/send-once/dead-name user ref. + * Like ipc_right_delta with a delta of -1, + * but looks at the entry to determine the right. + * Conditions: + * The space is write-locked, and is unlocked upon return. + * The space must be active. + * Returns: + * KERN_SUCCESS A user ref was released. + * KERN_INVALID_RIGHT Entry has wrong type. + */ + +kern_return_t +ipc_right_dealloc( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) +{ + + ipc_entry_bits_t bits; + mach_port_type_t type; + + bits = entry->ie_bits; + type = IE_BITS_TYPE(bits); + + + assert(space->is_active); + + switch (type) { + case MACH_PORT_TYPE_DEAD_NAME: { + dead_name: + + assert(IE_BITS_UREFS(bits) > 0); + assert(entry->ie_request == 0); + assert(entry->ie_object == IO_NULL); + + if (IE_BITS_UREFS(bits) == 1) { + ipc_entry_dealloc(space, name, entry); + } + else + entry->ie_bits = bits-1; /* decrement urefs */ + + is_write_unlock(space); + break; + } + + case MACH_PORT_TYPE_SEND_ONCE: { + ipc_port_t port, dnrequest; + + assert(IE_BITS_UREFS(bits) == 1); + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + + bits = entry->ie_bits; + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + goto dead_name; + } + /* port is locked and active */ + + assert(port->ip_sorights > 0); + + dnrequest = ipc_right_dncancel_macro(space, port, name, entry); + ip_unlock(port); + + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + + is_write_unlock(space); + + ipc_notify_send_once(port); + + if (dnrequest != IP_NULL) + ipc_notify_port_deleted(dnrequest, name); + break; + } + + case MACH_PORT_TYPE_SEND: { + ipc_port_t port; + ipc_port_t dnrequest = IP_NULL; + ipc_port_t nsrequest = IP_NULL; + mach_port_mscount_t mscount; + + + assert(IE_BITS_UREFS(bits) > 0); + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + bits = entry->ie_bits; + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + goto dead_name; + } + /* port is locked and active */ + + assert(port->ip_srights > 0); + + if (IE_BITS_UREFS(bits) == 1) { + if (--port->ip_srights == 0) { + nsrequest = port->ip_nsrequest; + if (nsrequest != IP_NULL) { + port->ip_nsrequest = IP_NULL; + mscount = port->ip_mscount; + } + } + + dnrequest = ipc_right_dncancel_macro(space, port, + name, entry); + ipc_hash_delete(space, (ipc_object_t) port, + name, entry); + + ip_release(port); + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + + } else + entry->ie_bits = bits-1; /* decrement urefs */ + + /* even if dropped a ref, port is active */ + ip_unlock(port); + is_write_unlock(space); + + if (nsrequest != IP_NULL) + ipc_notify_no_senders(nsrequest, mscount); + + if (dnrequest != IP_NULL) + ipc_notify_port_deleted(dnrequest, name); + break; + } + + case MACH_PORT_TYPE_SEND_RECEIVE: { + ipc_port_t port; + ipc_port_t nsrequest = IP_NULL; + mach_port_mscount_t mscount; + + assert(IE_BITS_UREFS(bits) > 0); + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + assert(port->ip_srights > 0); + + if (IE_BITS_UREFS(bits) == 1) { + if (--port->ip_srights == 0) { + nsrequest = port->ip_nsrequest; + if (nsrequest != IP_NULL) { + port->ip_nsrequest = IP_NULL; + mscount = port->ip_mscount; + } + } + + entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK | + MACH_PORT_TYPE_SEND); + } else + entry->ie_bits = bits-1; /* decrement urefs */ + + ip_unlock(port); + is_write_unlock(space); + + if (nsrequest != IP_NULL) + ipc_notify_no_senders(nsrequest, mscount); + break; + } + + default: + is_write_unlock(space); + return KERN_INVALID_RIGHT; + } + + return KERN_SUCCESS; +} + +/* + * Routine: ipc_right_delta + * Purpose: + * Modifies the user-reference count for a right. + * May deallocate the right, if the count goes to zero. + * Conditions: + * The space is write-locked, and is unlocked upon return. + * The space must be active. + * Returns: + * KERN_SUCCESS Count was modified. + * KERN_INVALID_RIGHT Entry has wrong type. + * KERN_INVALID_VALUE Bad delta for the right. + * KERN_UREFS_OVERFLOW OK delta, except would overflow. + */ + +kern_return_t +ipc_right_delta( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_right_t right, + mach_port_delta_t delta) +{ + ipc_entry_bits_t bits; + + bits = entry->ie_bits; + + +/* + * The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the + * switch below. It is used to keep track of those cases (in DIPC) + * where we have postponed the dropping of a port reference. Since + * the dropping of the reference could cause the port to disappear + * we postpone doing so when we are holding the space lock. + */ + + assert(space->is_active); + assert(right < MACH_PORT_RIGHT_NUMBER); + + /* Rights-specific restrictions and operations. */ + + switch (right) { + case MACH_PORT_RIGHT_PORT_SET: { + ipc_pset_t pset; + + if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) + goto invalid_right; + + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET); + assert(IE_BITS_UREFS(bits) == 0); + assert(entry->ie_request == 0); + + if (delta == 0) + goto success; + + if (delta != -1) + goto invalid_value; + + pset = (ipc_pset_t) entry->ie_object; + assert(pset != IPS_NULL); + + + + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + + + ips_lock(pset); + assert(ips_active(pset)); + is_write_unlock(space); + + ipc_pset_destroy(pset); /* consumes ref, unlocks */ + break; + } + + case MACH_PORT_RIGHT_RECEIVE: { + ipc_port_t port; + ipc_port_t dnrequest = IP_NULL; + + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + goto invalid_right; + + if (delta == 0) + goto success; + + if (delta != -1) + goto invalid_value; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + /* + * The port lock is needed for ipc_right_dncancel; + * otherwise, we wouldn't have to take the lock + * until just before dropping the space lock. + */ + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + + if (bits & MACH_PORT_TYPE_SEND) { + assert(IE_BITS_TYPE(bits) == + MACH_PORT_TYPE_SEND_RECEIVE); + assert(IE_BITS_UREFS(bits) > 0); + assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX); + assert(port->ip_srights > 0); + + /* + * The remaining send right turns into a + * dead name. Notice we don't decrement + * ip_srights, generate a no-senders notif, + * or use ipc_right_dncancel, because the + * port is destroyed "first". + */ + bits &= ~IE_BITS_TYPE_MASK; + bits |= MACH_PORT_TYPE_DEAD_NAME; + if (entry->ie_request) { + entry->ie_request = 0; + bits++; + } + entry->ie_bits = bits; + entry->ie_object = IO_NULL; + } else { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE); + assert(IE_BITS_UREFS(bits) == 0); + + dnrequest = ipc_right_dncancel_macro(space, port, + name, entry); + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + } + is_write_unlock(space); + + ipc_port_clear_receiver(port); + ipc_port_destroy(port); /* consumes ref, unlocks */ + + if (dnrequest != IP_NULL) + ipc_notify_port_deleted(dnrequest, name); + break; + } + + case MACH_PORT_RIGHT_SEND_ONCE: { + ipc_port_t port, dnrequest; + + if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) + goto invalid_right; + + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); + assert(IE_BITS_UREFS(bits) == 1); + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE)); + goto invalid_right; + } + /* port is locked and active */ + + assert(port->ip_sorights > 0); + + if ((delta > 0) || (delta < -1)) { + ip_unlock(port); + goto invalid_value; + } + + if (delta == 0) { + ip_unlock(port); + goto success; + } + + dnrequest = ipc_right_dncancel_macro(space, port, name, entry); + ip_unlock(port); + + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + + is_write_unlock(space); + + ipc_notify_send_once(port); + + if (dnrequest != IP_NULL) + ipc_notify_port_deleted(dnrequest, name); + break; + } + + case MACH_PORT_RIGHT_DEAD_NAME: { + mach_port_urefs_t urefs; + + if (bits & MACH_PORT_TYPE_SEND_RIGHTS) { + ipc_port_t port; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (!ipc_right_check(space, port, name, entry)) { + /* port is locked and active */ + ip_unlock(port); + goto invalid_right; + } + bits = entry->ie_bits; + } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) + goto invalid_right; + + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + assert(IE_BITS_UREFS(bits) > 0); + assert(entry->ie_object == IO_NULL); + assert(entry->ie_request == 0); + + urefs = IE_BITS_UREFS(bits); + if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) + goto invalid_value; + if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) + goto urefs_overflow; + + if ((urefs + delta) == 0) { + ipc_entry_dealloc(space, name, entry); + } + else + entry->ie_bits = bits + delta; + + is_write_unlock(space); + + break; + } + + case MACH_PORT_RIGHT_SEND: { + mach_port_urefs_t urefs; + ipc_port_t port; + ipc_port_t dnrequest = IP_NULL; + ipc_port_t nsrequest = IP_NULL; + mach_port_mscount_t mscount; + + if ((bits & MACH_PORT_TYPE_SEND) == 0) + goto invalid_right; + + /* maximum urefs for send is MACH_PORT_UREFS_MAX-1 */ + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0); + goto invalid_right; + } + /* port is locked and active */ + + assert(port->ip_srights > 0); + + urefs = IE_BITS_UREFS(bits); + if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) { + ip_unlock(port); + goto invalid_value; + } + if (MACH_PORT_UREFS_OVERFLOW(urefs+1, delta)) { + ip_unlock(port); + goto urefs_overflow; + } + + if ((urefs + delta) == 0) { + if (--port->ip_srights == 0) { + nsrequest = port->ip_nsrequest; + if (nsrequest != IP_NULL) { + port->ip_nsrequest = IP_NULL; + mscount = port->ip_mscount; + } + } + + if (bits & MACH_PORT_TYPE_RECEIVE) { + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + assert(IE_BITS_TYPE(bits) == + MACH_PORT_TYPE_SEND_RECEIVE); + + entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK| + MACH_PORT_TYPE_SEND); + } else { + assert(IE_BITS_TYPE(bits) == + MACH_PORT_TYPE_SEND); + + dnrequest = ipc_right_dncancel_macro(space, port, + name, entry); + ipc_hash_delete(space, (ipc_object_t) port, + name, entry); + + ip_release(port); + + entry->ie_object = IO_NULL; + ipc_entry_dealloc(space, name, entry); + } + } else + entry->ie_bits = bits + delta; + + /* even if dropped a ref, port is active */ + ip_unlock(port); + is_write_unlock(space); + + if (nsrequest != IP_NULL) + ipc_notify_no_senders(nsrequest, mscount); + + if (dnrequest != IP_NULL) + ipc_notify_port_deleted(dnrequest, name); + break; + } + + default: + panic("ipc_right_delta: strange right"); + } + + return KERN_SUCCESS; + + success: + is_write_unlock(space); + return KERN_SUCCESS; + + invalid_right: + is_write_unlock(space); + return KERN_INVALID_RIGHT; + + invalid_value: + is_write_unlock(space); + return KERN_INVALID_VALUE; + + urefs_overflow: + is_write_unlock(space); + return KERN_UREFS_OVERFLOW; +} + +/* + * Routine: ipc_right_info + * Purpose: + * Retrieves information about the right. + * Conditions: + * The space is write-locked, and is unlocked upon return + * if the call is unsuccessful. The space must be active. + * Returns: + * KERN_SUCCESS Retrieved info; space still locked. + */ + +kern_return_t +ipc_right_info( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_type_t *typep, + mach_port_urefs_t *urefsp) +{ + ipc_entry_bits_t bits; + mach_port_type_t type; + ipc_port_request_index_t request; + + bits = entry->ie_bits; + + if (bits & MACH_PORT_TYPE_SEND_RIGHTS) { + ipc_port_t port = (ipc_port_t) entry->ie_object; + + if (ipc_right_check(space, port, name, entry)) { + bits = entry->ie_bits; + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + } else + ip_unlock(port); + } + + type = IE_BITS_TYPE(bits); + request = entry->ie_request; + + if (request != 0) + type |= MACH_PORT_TYPE_DNREQUEST; + + *typep = type; + *urefsp = IE_BITS_UREFS(bits); + return KERN_SUCCESS; +} + +/* + * Routine: ipc_right_copyin_check + * Purpose: + * Check if a subsequent ipc_right_copyin would succeed. + * Conditions: + * The space is locked (read or write) and active. + */ + +boolean_t +ipc_right_copyin_check( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name) +{ + ipc_entry_bits_t bits; + + bits= entry->ie_bits; + assert(space->is_active); + + switch (msgt_name) { + case MACH_MSG_TYPE_MAKE_SEND: + case MACH_MSG_TYPE_MAKE_SEND_ONCE: + case MACH_MSG_TYPE_MOVE_RECEIVE: + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + return FALSE; + + break; + + case MACH_MSG_TYPE_COPY_SEND: + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: { + ipc_port_t port; + boolean_t active; + + if (bits & MACH_PORT_TYPE_DEAD_NAME) + break; + + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + return FALSE; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + ip_lock(port); + active = ip_active(port); + ip_unlock(port); + + if (!active) { + break; + } + + if (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE) { + if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) + return FALSE; + } else { + if ((bits & MACH_PORT_TYPE_SEND) == 0) + return FALSE; + } + + break; + } + + default: + panic("ipc_right_copyin_check: strange rights"); + } + + return TRUE; +} + +/* + * Routine: ipc_right_copyin + * Purpose: + * Copyin a capability from a space. + * If successful, the caller gets a ref + * for the resulting object, unless it is IO_DEAD, + * and possibly a send-once right which should + * be used in a port-deleted notification. + * + * If deadok is not TRUE, the copyin operation + * will fail instead of producing IO_DEAD. + * + * The entry is never deallocated (except + * when KERN_INVALID_NAME), so the caller + * should deallocate the entry if its type + * is MACH_PORT_TYPE_NONE. + * Conditions: + * The space is write-locked and active. + * Returns: + * KERN_SUCCESS Acquired an object, possibly IO_DEAD. + * KERN_INVALID_RIGHT Name doesn't denote correct right. + */ + +kern_return_t +ipc_right_copyin( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + boolean_t deadok, + ipc_object_t *objectp, + ipc_port_t *sorightp) +{ + ipc_entry_bits_t bits; + + bits = entry->ie_bits; + + assert(space->is_active); + + switch (msgt_name) { + case MACH_MSG_TYPE_MAKE_SEND: { + ipc_port_t port; + + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + goto invalid_right; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + + port->ip_mscount++; + port->ip_srights++; + ip_reference(port); + ip_unlock(port); + + *objectp = (ipc_object_t) port; + *sorightp = IP_NULL; + break; + } + + case MACH_MSG_TYPE_MAKE_SEND_ONCE: { + ipc_port_t port; + + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + goto invalid_right; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + + port->ip_sorights++; + ip_reference(port); + ip_unlock(port); + + *objectp = (ipc_object_t) port; + *sorightp = IP_NULL; + break; + } + + case MACH_MSG_TYPE_MOVE_RECEIVE: { + ipc_port_t port; + ipc_port_t dnrequest = IP_NULL; + + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + goto invalid_right; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + + if (bits & MACH_PORT_TYPE_SEND) { + assert(IE_BITS_TYPE(bits) == + MACH_PORT_TYPE_SEND_RECEIVE); + assert(IE_BITS_UREFS(bits) > 0); + assert(port->ip_srights > 0); + + ipc_hash_insert(space, (ipc_object_t) port, + name, entry); + ip_reference(port); + } else { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE); + assert(IE_BITS_UREFS(bits) == 0); + + dnrequest = ipc_right_dncancel_macro(space, port, + name, entry); + entry->ie_object = IO_NULL; + } + entry->ie_bits = bits &~ MACH_PORT_TYPE_RECEIVE; + + ipc_port_clear_receiver(port); + + port->ip_receiver_name = MACH_PORT_NULL; + port->ip_destination = IP_NULL; + ip_unlock(port); + + *objectp = (ipc_object_t) port; + *sorightp = dnrequest; + break; + } + + case MACH_MSG_TYPE_COPY_SEND: { + ipc_port_t port; + + if (bits & MACH_PORT_TYPE_DEAD_NAME) + goto copy_dead; + + /* allow for dead send-once rights */ + + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + goto invalid_right; + + assert(IE_BITS_UREFS(bits) > 0); + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + bits = entry->ie_bits; + goto copy_dead; + } + /* port is locked and active */ + + if ((bits & MACH_PORT_TYPE_SEND) == 0) { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); + assert(port->ip_sorights > 0); + + ip_unlock(port); + goto invalid_right; + } + + assert(port->ip_srights > 0); + + port->ip_srights++; + ip_reference(port); + ip_unlock(port); + + *objectp = (ipc_object_t) port; + *sorightp = IP_NULL; + break; + } + + case MACH_MSG_TYPE_MOVE_SEND: { + ipc_port_t port; + ipc_port_t dnrequest = IP_NULL; + + if (bits & MACH_PORT_TYPE_DEAD_NAME) + goto move_dead; + + /* allow for dead send-once rights */ + + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + goto invalid_right; + + assert(IE_BITS_UREFS(bits) > 0); + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + bits = entry->ie_bits; + goto move_dead; + } + /* port is locked and active */ + + if ((bits & MACH_PORT_TYPE_SEND) == 0) { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); + assert(port->ip_sorights > 0); + + ip_unlock(port); + goto invalid_right; + } + + assert(port->ip_srights > 0); + + if (IE_BITS_UREFS(bits) == 1) { + if (bits & MACH_PORT_TYPE_RECEIVE) { + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + assert(IE_BITS_TYPE(bits) == + MACH_PORT_TYPE_SEND_RECEIVE); + + ip_reference(port); + } else { + assert(IE_BITS_TYPE(bits) == + MACH_PORT_TYPE_SEND); + + dnrequest = ipc_right_dncancel_macro(space, port, + name, entry); + ipc_hash_delete(space, (ipc_object_t) port, + name, entry); + entry->ie_object = IO_NULL; + } + entry->ie_bits = bits &~ + (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND); + } else { + port->ip_srights++; + ip_reference(port); + entry->ie_bits = bits-1; /* decrement urefs */ + } + + ip_unlock(port); + + *objectp = (ipc_object_t) port; + *sorightp = dnrequest; + break; + } + + case MACH_MSG_TYPE_MOVE_SEND_ONCE: { + ipc_port_t port; + ipc_port_t dnrequest; + + if (bits & MACH_PORT_TYPE_DEAD_NAME) + goto move_dead; + + /* allow for dead send rights */ + + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + goto invalid_right; + + assert(IE_BITS_UREFS(bits) > 0); + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + bits = entry->ie_bits; + goto move_dead; + } + /* port is locked and active */ + + if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) { + assert(bits & MACH_PORT_TYPE_SEND); + assert(port->ip_srights > 0); + + ip_unlock(port); + goto invalid_right; + } + + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); + assert(IE_BITS_UREFS(bits) == 1); + assert(port->ip_sorights > 0); + + dnrequest = ipc_right_dncancel_macro(space, port, name, entry); + ip_unlock(port); + + entry->ie_object = IO_NULL; + entry->ie_bits = bits &~ + (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND_ONCE); + + *objectp = (ipc_object_t) port; + *sorightp = dnrequest; + break; + } + + default: + invalid_right: + return KERN_INVALID_RIGHT; + } + + return KERN_SUCCESS; + + copy_dead: + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + assert(IE_BITS_UREFS(bits) > 0); + assert(entry->ie_request == 0); + assert(entry->ie_object == 0); + + if (!deadok) + goto invalid_right; + + *objectp = IO_DEAD; + *sorightp = IP_NULL; + return KERN_SUCCESS; + + move_dead: + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + assert(IE_BITS_UREFS(bits) > 0); + assert(entry->ie_request == 0); + assert(entry->ie_object == 0); + + if (!deadok) + goto invalid_right; + + if (IE_BITS_UREFS(bits) == 1) { + bits &= ~MACH_PORT_TYPE_DEAD_NAME; + } + entry->ie_bits = bits-1; /* decrement urefs */ + + *objectp = IO_DEAD; + *sorightp = IP_NULL; + return KERN_SUCCESS; + +} + +/* + * Routine: ipc_right_copyin_undo + * Purpose: + * Undoes the effects of an ipc_right_copyin + * of a send/send-once right that is dead. + * (Object is either IO_DEAD or a dead port.) + * Conditions: + * The space is write-locked and active. + */ + +void +ipc_right_copyin_undo( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + ipc_object_t object, + ipc_port_t soright) +{ + ipc_entry_bits_t bits; + + bits = entry->ie_bits; + + assert(space->is_active); + + assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || + (msgt_name == MACH_MSG_TYPE_COPY_SEND) || + (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); + + if (soright != IP_NULL) { + assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || + (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); + assert(object != IO_DEAD); + + entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) | + MACH_PORT_TYPE_DEAD_NAME | 2); + + } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE) { + assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || + (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); + + entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) | + MACH_PORT_TYPE_DEAD_NAME | 1); + } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME) { + assert(object == IO_DEAD); + assert(IE_BITS_UREFS(bits) > 0); + + if (msgt_name != MACH_MSG_TYPE_COPY_SEND) { + assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX); + entry->ie_bits = bits+1; /* increment urefs */ + } + } else { + assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || + (msgt_name == MACH_MSG_TYPE_COPY_SEND)); + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); + assert(object != IO_DEAD); + assert(entry->ie_object == object); + assert(IE_BITS_UREFS(bits) > 0); + + if (msgt_name != MACH_MSG_TYPE_COPY_SEND) { + assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX-1); + entry->ie_bits = bits+1; /* increment urefs */ + } + + /* + * May as well convert the entry to a dead name. + * (Or if it is a compat entry, destroy it.) + */ + + (void) ipc_right_check(space, (ipc_port_t) object, + name, entry); + /* object is dead so it is not locked */ + } + + /* release the reference acquired by copyin */ + + if (object != IO_DEAD) + ipc_object_release(object); +} + +/* + * Routine: ipc_right_copyin_two + * Purpose: + * Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND + * and deadok == FALSE, except that this moves two + * send rights at once. + * Conditions: + * The space is write-locked and active. + * The object is returned with two refs/send rights. + * Returns: + * KERN_SUCCESS Acquired an object. + * KERN_INVALID_RIGHT Name doesn't denote correct right. + */ + +kern_return_t +ipc_right_copyin_two( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + ipc_object_t *objectp, + ipc_port_t *sorightp) +{ + ipc_entry_bits_t bits; + mach_port_urefs_t urefs; + ipc_port_t port; + ipc_port_t dnrequest = IP_NULL; + + assert(space->is_active); + + bits = entry->ie_bits; + + if ((bits & MACH_PORT_TYPE_SEND) == 0) + goto invalid_right; + + urefs = IE_BITS_UREFS(bits); + if (urefs < 2) + goto invalid_right; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, name, entry)) { + goto invalid_right; + } + /* port is locked and active */ + + assert(port->ip_srights > 0); + + if (urefs == 2) { + if (bits & MACH_PORT_TYPE_RECEIVE) { + assert(port->ip_receiver_name == name); + assert(port->ip_receiver == space); + assert(IE_BITS_TYPE(bits) == + MACH_PORT_TYPE_SEND_RECEIVE); + + port->ip_srights++; + ip_reference(port); + ip_reference(port); + } else { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); + + dnrequest = ipc_right_dncancel_macro(space, port, + name, entry); + + port->ip_srights++; + ip_reference(port); + ipc_hash_delete(space, (ipc_object_t) port, + name, entry); + entry->ie_object = IO_NULL; + } + entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND); + } else { + port->ip_srights += 2; + ip_reference(port); + ip_reference(port); + entry->ie_bits = bits-2; /* decrement urefs */ + } + ip_unlock(port); + + *objectp = (ipc_object_t) port; + *sorightp = dnrequest; + return KERN_SUCCESS; + + invalid_right: + return KERN_INVALID_RIGHT; +} + +/* + * Routine: ipc_right_copyout + * Purpose: + * Copyout a capability to a space. + * If successful, consumes a ref for the object. + * + * Always succeeds when given a newly-allocated entry, + * because user-reference overflow isn't a possibility. + * + * If copying out the object would cause the user-reference + * count in the entry to overflow, and overflow is TRUE, + * then instead the user-reference count is left pegged + * to its maximum value and the copyout succeeds anyway. + * Conditions: + * The space is write-locked and active. + * The object is locked and active. + * The object is unlocked; the space isn't. + * Returns: + * KERN_SUCCESS Copied out capability. + * KERN_UREFS_OVERFLOW User-refs would overflow; + * guaranteed not to happen with a fresh entry + * or if overflow=TRUE was specified. + */ + +kern_return_t +ipc_right_copyout( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + ipc_object_t object) +{ + ipc_entry_bits_t bits; + ipc_port_t port; + + bits = entry->ie_bits; + + assert(IO_VALID(object)); + assert(io_otype(object) == IOT_PORT); + assert(io_active(object)); + assert(entry->ie_object == object); + + port = (ipc_port_t) object; + + switch (msgt_name) { + case MACH_MSG_TYPE_PORT_SEND_ONCE: + + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); + assert(port->ip_sorights > 0); + + /* transfer send-once right and ref to entry */ + ip_unlock(port); + + entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); + break; + + case MACH_MSG_TYPE_PORT_SEND: + assert(port->ip_srights > 0); + + if (bits & MACH_PORT_TYPE_SEND) { + mach_port_urefs_t urefs = IE_BITS_UREFS(bits); + + assert(port->ip_srights > 1); + assert(urefs > 0); + assert(urefs < MACH_PORT_UREFS_MAX); + + if (urefs+1 == MACH_PORT_UREFS_MAX) { + if (overflow) { + /* leave urefs pegged to maximum */ + + port->ip_srights--; + ip_release(port); + ip_unlock(port); + return KERN_SUCCESS; + } + + ip_unlock(port); + return KERN_UREFS_OVERFLOW; + } + + port->ip_srights--; + ip_release(port); + ip_unlock(port); + } else if (bits & MACH_PORT_TYPE_RECEIVE) { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE); + assert(IE_BITS_UREFS(bits) == 0); + + /* transfer send right to entry */ + ip_release(port); + ip_unlock(port); + } else { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); + assert(IE_BITS_UREFS(bits) == 0); + + /* transfer send right and ref to entry */ + ip_unlock(port); + + /* entry is locked holding ref, so can use port */ + + ipc_hash_insert(space, (ipc_object_t) port, + name, entry); + } + + entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; + break; + + case MACH_MSG_TYPE_PORT_RECEIVE: { + ipc_port_t dest; + + assert(port->ip_mscount == 0); + assert(port->ip_receiver_name == MACH_PORT_NULL); + dest = port->ip_destination; + + port->ip_receiver_name = name; + port->ip_receiver = space; + + assert((bits & MACH_PORT_TYPE_RECEIVE) == 0); + + if (bits & MACH_PORT_TYPE_SEND) { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); + assert(IE_BITS_UREFS(bits) > 0); + assert(port->ip_srights > 0); + + ip_release(port); + ip_unlock(port); + + /* entry is locked holding ref, so can use port */ + + ipc_hash_delete(space, (ipc_object_t) port, + name, entry); + } else { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); + assert(IE_BITS_UREFS(bits) == 0); + + /* transfer ref to entry */ + ip_unlock(port); + } + entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE; + + if (dest != IP_NULL) + ipc_port_release(dest); + break; + } + + default: + panic("ipc_right_copyout: strange rights"); + } + + return KERN_SUCCESS; +} + +/* + * Routine: ipc_right_rename + * Purpose: + * Transfer an entry from one name to another. + * The old entry is deallocated. + * Conditions: + * The space is write-locked and active. + * The new entry is unused. Upon return, + * the space is unlocked. + * Returns: + * KERN_SUCCESS Moved entry to new name. + */ + +kern_return_t +ipc_right_rename( + ipc_space_t space, + mach_port_name_t oname, + ipc_entry_t oentry, + mach_port_name_t nname, + ipc_entry_t nentry) +{ + ipc_port_request_index_t request = oentry->ie_request; + ipc_entry_bits_t bits = oentry->ie_bits; + ipc_object_t object = oentry->ie_object; + + assert(space->is_active); + assert(oname != nname); + + /* + * If IE_BITS_COMPAT, we can't allow the entry to be renamed + * if the port is dead. (This would foil ipc_port_destroy.) + * Instead we should fail because oentry shouldn't exist. + * Note IE_BITS_COMPAT implies ie_request != 0. + */ + + if (request != 0) { + ipc_port_t port; + + assert(bits & MACH_PORT_TYPE_PORT_RIGHTS); + port = (ipc_port_t) object; + assert(port != IP_NULL); + + if (ipc_right_check(space, port, oname, oentry)) { + request = 0; + object = IO_NULL; + bits = oentry->ie_bits; + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); + assert(oentry->ie_request == 0); + } else { + /* port is locked and active */ + + ipc_port_dnrename(port, request, oname, nname); + ip_unlock(port); + oentry->ie_request = 0; + } + } + + /* initialize nentry before letting ipc_hash_insert see it */ + + assert((nentry->ie_bits & IE_BITS_RIGHT_MASK) == 0); + nentry->ie_bits |= bits & IE_BITS_RIGHT_MASK; + nentry->ie_request = request; + nentry->ie_object = object; + + switch (IE_BITS_TYPE(bits)) { + case MACH_PORT_TYPE_SEND: { + ipc_port_t port; + + port = (ipc_port_t) object; + assert(port != IP_NULL); + + /* remember, there are no other share entries possible */ + /* or we can't do the rename. Therefore we do not need */ + /* to check the other subspaces */ + ipc_hash_delete(space, (ipc_object_t) port, oname, oentry); + ipc_hash_insert(space, (ipc_object_t) port, nname, nentry); + break; + } + + case MACH_PORT_TYPE_RECEIVE: + case MACH_PORT_TYPE_SEND_RECEIVE: { + ipc_port_t port; + + port = (ipc_port_t) object; + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + assert(port->ip_receiver_name == oname); + assert(port->ip_receiver == space); + + port->ip_receiver_name = nname; + ip_unlock(port); + break; + } + + case MACH_PORT_TYPE_PORT_SET: { + ipc_pset_t pset; + + pset = (ipc_pset_t) object; + assert(pset != IPS_NULL); + + ips_lock(pset); + assert(ips_active(pset)); + assert(pset->ips_local_name == oname); + + pset->ips_local_name = nname; + ips_unlock(pset); + break; + } + + case MACH_PORT_TYPE_SEND_ONCE: + case MACH_PORT_TYPE_DEAD_NAME: + break; + + default: + panic("ipc_right_rename: strange rights"); + } + + assert(oentry->ie_request == 0); + oentry->ie_object = IO_NULL; + ipc_entry_dealloc(space, oname, oentry); + is_write_unlock(space); + + return KERN_SUCCESS; +} diff --git a/osfmk/ipc/ipc_right.h b/osfmk/ipc/ipc_right.h new file mode 100644 index 000000000..0623ba61c --- /dev/null +++ b/osfmk/ipc/ipc_right.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_right.h + * Author: Rich Draves + * Date: 1989 + * + * Declarations of functions to manipulate IPC capabilities. + */ + +#ifndef _IPC_IPC_RIGHT_H_ +#define _IPC_IPC_RIGHT_H_ + +#include +#include +#include +#include + +#define ipc_right_lookup_read ipc_right_lookup_write +#define ipc_right_lookup_two_read ipc_right_lookup_two_write + +/* Find an entry in a space, given the name */ +extern kern_return_t ipc_right_lookup_write( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp); + +/* Find two entries in a space, given two names */ +extern kern_return_t ipc_right_lookup_two_write( + ipc_space_t space, + mach_port_name_t name1, + ipc_entry_t *entryp1, + mach_port_name_t name2, + ipc_entry_t *entryp2); + +/* Translate (space, object) -> (name, entry) */ +extern boolean_t ipc_right_reverse( + ipc_space_t space, + ipc_object_t object, + mach_port_name_t *namep, + ipc_entry_t *entryp); + +/* Make a dead-name request, returning the registered send-once right */ +extern kern_return_t ipc_right_dnrequest( + ipc_space_t space, + mach_port_name_t name, + boolean_t immediate, + ipc_port_t notify, + ipc_port_t *previousp); + +/* Cancel a dead-name request and return the send-once right */ +extern ipc_port_t ipc_right_dncancel( + ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry); + +#define ipc_right_dncancel_macro(space, port, name, entry) \ + ((entry->ie_request == 0) ? IP_NULL : \ + ipc_right_dncancel((space), (port), (name), (entry))) + +/* Check if an entry is being used */ +extern boolean_t ipc_right_inuse( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); + +/* Check if the port has died */ +extern boolean_t ipc_right_check( + ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry); + +/* Clean up an entry in a dead space */ +extern void ipc_right_clean( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); + +/* Destroy an entry in a space */ +extern kern_return_t ipc_right_destroy( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); + +/* Release a send/send-once/dead-name user reference */ +extern kern_return_t ipc_right_dealloc( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); + +/* Modify the user-reference count for a right */ +extern kern_return_t ipc_right_delta( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_right_t right, + mach_port_delta_t delta); + +/* Retrieve information about a right */ +extern kern_return_t ipc_right_info( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_type_t *typep, + mach_port_urefs_t *urefsp); + +/* Check if a subsequent ipc_right_copyin would succeed */ +extern boolean_t ipc_right_copyin_check( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name); + +/* Copyin a capability from a space */ +extern kern_return_t ipc_right_copyin( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + boolean_t deadok, + ipc_object_t *objectp, + ipc_port_t *sorightp); + +/* Undo the effects of an ipc_right_copyin */ +extern void ipc_right_copyin_undo( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + ipc_object_t object, + ipc_port_t soright); + +/* Copyin two send rights from a space */ +extern kern_return_t ipc_right_copyin_two( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + ipc_object_t *objectp, + ipc_port_t *sorightp); + +/* Copyout a capability to a space */ +extern kern_return_t ipc_right_copyout( + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + ipc_object_t object); + +/* Reanme a capability */ +extern kern_return_t ipc_right_rename( + ipc_space_t space, + mach_port_name_t oname, + ipc_entry_t oentry, + mach_port_name_t nname, + ipc_entry_t nentry); + +#endif /* _IPC_IPC_RIGHT_H_ */ diff --git a/osfmk/ipc/ipc_space.c b/osfmk/ipc/ipc_space.c new file mode 100644 index 000000000..c5b207a22 --- /dev/null +++ b/osfmk/ipc/ipc_space.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_space.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to manipulate IPC capability spaces. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +zone_t ipc_space_zone; +ipc_space_t ipc_space_kernel; +ipc_space_t ipc_space_reply; +#if MACH_KDB +ipc_space_t default_pager_space; +#endif /* MACH_KDB */ + +/* + * Routine: ipc_space_reference + * Routine: ipc_space_release + * Purpose: + * Function versions of the IPC space macros. + * The "is_" cover macros can be defined to use the + * macros or the functions, as desired. + */ + +void +ipc_space_reference( + ipc_space_t space) +{ + ipc_space_reference_macro(space); +} + +void +ipc_space_release( + ipc_space_t space) +{ + ipc_space_release_macro(space); +} + +/* + * Routine: ipc_space_create + * Purpose: + * Creates a new IPC space. + * + * The new space has two references, one for the caller + * and one because it is active. + * Conditions: + * Nothing locked. Allocates memory. + * Returns: + * KERN_SUCCESS Created a space. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_space_create( + ipc_table_size_t initial, + ipc_space_t *spacep) +{ + ipc_space_t space; + ipc_entry_t table; + ipc_entry_num_t new_size; + mach_port_index_t index; + + space = is_alloc(); + if (space == IS_NULL) + return KERN_RESOURCE_SHORTAGE; + + table = it_entries_alloc(initial); + if (table == IE_NULL) { + is_free(space); + return KERN_RESOURCE_SHORTAGE; + } + + new_size = initial->its_size; + memset((void *) table, 0, new_size * sizeof(struct ipc_entry)); + + /* + * Initialize the free list in the table. + * Add the entries in reverse order, and + * set the generation number to -1, so that + * initial allocations produce "natural" names. + */ + for (index = 0; index < new_size; index++) { + ipc_entry_t entry = &table[index]; + + entry->ie_bits = IE_BITS_GEN_MASK; + entry->ie_next = index+1; + } + table[new_size-1].ie_next = 0; + + is_ref_lock_init(space); + space->is_references = 2; + + is_lock_init(space); + space->is_active = TRUE; + space->is_growing = FALSE; + space->is_table = table; + space->is_table_size = new_size; + space->is_table_next = initial+1; + + ipc_splay_tree_init(&space->is_tree); + space->is_tree_total = 0; + space->is_tree_small = 0; + space->is_tree_hash = 0; + + *spacep = space; + return KERN_SUCCESS; +} + +/* + * Routine: ipc_space_create_special + * Purpose: + * Create a special space. A special space + * doesn't hold rights in the normal way. + * Instead it is place-holder for holding + * disembodied (naked) receive rights. + * See ipc_port_alloc_special/ipc_port_dealloc_special. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Created a space. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +ipc_space_create_special( + ipc_space_t *spacep) +{ + ipc_space_t space; + + space = is_alloc(); + if (space == IS_NULL) + return KERN_RESOURCE_SHORTAGE; + + is_ref_lock_init(space); + space->is_references = 1; + + is_lock_init(space); + space->is_active = FALSE; + + *spacep = space; + return KERN_SUCCESS; +} + +/* + * ipc_space_clean - remove all port references from an ipc space. + * + * In order to follow the traditional semantic, ipc_space_destroy + * will not destroy the entire port table of a shared space. Instead + * it will simply clear its own sub-space. + */ +void +ipc_space_clean( + ipc_space_t space) +{ + ipc_tree_entry_t tentry; + ipc_entry_t table; + ipc_entry_num_t size; + mach_port_index_t index; + + /* + * If somebody is trying to grow the table, + * we must wait until they finish and figure + * out the space died. + */ + is_write_lock(space); + while (space->is_growing) { + assert_wait((event_t) space, THREAD_UNINT); + is_write_unlock(space); + thread_block((void (*)(void)) 0); + is_write_lock(space); + } + + /* + * Now we can futz with it since we have the write lock. + */ +#if MACH_KDB + if (space == default_pager_space) + default_pager_space = IS_NULL; +#endif /* MACH_KDB */ + + table = space->is_table; + size = space->is_table_size; + + for (index = 0; index < size; index++) { + ipc_entry_t entry = &table[index]; + mach_port_type_t type; + + type = IE_BITS_TYPE(entry->ie_bits); + if (type != MACH_PORT_TYPE_NONE) { + mach_port_name_t name = MACH_PORT_MAKE(index, + IE_BITS_GEN(entry->ie_bits)); + ipc_right_destroy(space, name, entry); + } + } + + /* + * JMM - Now the table is cleaned out. We don't bother shrinking the + * size of the table at this point, but we probably should if it is + * really large. Lets just clean up the splay tree. + */ + start_splay: + for (tentry = ipc_splay_traverse_start(&space->is_tree); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) { + int i; + mach_port_type_t type; + mach_port_name_t name = tentry->ite_name; + + type = IE_BITS_TYPE(tentry->ite_bits); + /* + * If it is a real right, then destroy it. This will have the + * side effect of removing it from the splay, so start over. + */ + if(type != MACH_PORT_TYPE_NONE) { + ipc_splay_traverse_finish(&space->is_tree); + ipc_right_destroy(space, name, &tentry->ite_entry); + goto start_splay; + } + } + ipc_splay_traverse_finish(&space->is_tree); + is_write_unlock(space); +} + + +/* + * Routine: ipc_space_destroy + * Purpose: + * Marks the space as dead and cleans up the entries. + * Does nothing if the space is already dead. + * Conditions: + * Nothing locked. + */ + +void +ipc_space_destroy( + ipc_space_t space) +{ + boolean_t active; + ipc_tree_entry_t tentry; + ipc_entry_t table; + ipc_entry_num_t size; + mach_port_index_t index; + + assert(space != IS_NULL); + + is_write_lock(space); + active = space->is_active; + space->is_active = FALSE; + is_write_unlock(space); + + if (!active) + return; + + + /* + * If somebody is trying to grow the table, + * we must wait until they finish and figure + * out the space died. + */ + is_read_lock(space); + while (space->is_growing) { + assert_wait((event_t) space, THREAD_UNINT); + is_read_unlock(space); + thread_block((void (*)(void)) 0); + is_read_lock(space); + } + + is_read_unlock(space); + /* + * Now we can futz with it unlocked. + */ +#if MACH_KDB + if (space == default_pager_space) + default_pager_space = IS_NULL; +#endif /* MACH_KDB */ + + table = space->is_table; + size = space->is_table_size; + + for (index = 0; index < size; index++) { + ipc_entry_t entry = &table[index]; + mach_port_type_t type; + + type = IE_BITS_TYPE(entry->ie_bits); + if (type != MACH_PORT_TYPE_NONE) { + mach_port_name_t name; + + name = MACH_PORT_MAKE(index, + IE_BITS_GEN(entry->ie_bits)); + ipc_right_clean(space, name, entry); + } + } + + it_entries_free(space->is_table_next-1, table); + space->is_table_size = 0; + + for (tentry = ipc_splay_traverse_start(&space->is_tree); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) { + mach_port_type_t type; + mach_port_name_t name = tentry->ite_name; + + type = IE_BITS_TYPE(tentry->ite_bits); + assert(type != MACH_PORT_TYPE_NONE); + + ipc_right_clean(space, name, &tentry->ite_entry); + + if(type == MACH_PORT_TYPE_SEND) + ipc_hash_global_delete(space, tentry->ite_object, + name, tentry); + } + ipc_splay_traverse_finish(&space->is_tree); + + /* + * Because the space is now dead, + * we must release the "active" reference for it. + * Our caller still has his reference. + */ + is_release(space); +} diff --git a/osfmk/ipc/ipc_space.h b/osfmk/ipc/ipc_space.h new file mode 100644 index 000000000..33431cf9b --- /dev/null +++ b/osfmk/ipc/ipc_space.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_space.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for IPC spaces of capabilities. + */ + +#ifndef _IPC_IPC_SPACE_H_ +#define _IPC_IPC_SPACE_H_ + + +#include +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Every task has a space of IPC capabilities. + * IPC operations like send and receive use this space. + * IPC kernel calls manipulate the space of the target task. + * + * Every space has a non-NULL is_table with is_table_size entries. + * A space may have a NULL is_tree. is_tree_small records the + * number of entries in the tree that, if the table were to grow + * to the next larger size, would move from the tree to the table. + * + * is_growing marks when the table is in the process of growing. + * When the table is growing, it can't be freed or grown by another + * thread, because of krealloc/kmem_realloc's requirements. + * + */ + +typedef natural_t ipc_space_refs_t; + +struct ipc_space { + decl_mutex_data(,is_ref_lock_data) + ipc_space_refs_t is_references; + + decl_mutex_data(,is_lock_data) + boolean_t is_active; /* is the space alive? */ + boolean_t is_growing; /* is the space growing? */ + ipc_entry_t is_table; /* an array of entries */ + ipc_entry_num_t is_table_size; /* current size of table */ + struct ipc_table_size *is_table_next; /* info for larger table */ + struct ipc_splay_tree is_tree; /* a splay tree of entries */ + ipc_entry_num_t is_tree_total; /* number of entries in the tree */ + ipc_entry_num_t is_tree_small; /* # of small entries in the tree */ + ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */ + boolean_t is_fast; /* for is_fast_space() */ +}; + +#define IS_NULL ((ipc_space_t) 0) + +extern zone_t ipc_space_zone; + +#define is_alloc() ((ipc_space_t) zalloc(ipc_space_zone)) +#define is_free(is) zfree(ipc_space_zone, (vm_offset_t) (is)) + +extern ipc_space_t ipc_space_kernel; +extern ipc_space_t ipc_space_reply; +#if DIPC +extern ipc_space_t ipc_space_remote; +#endif /* DIPC */ +#if DIPC || MACH_KDB +extern ipc_space_t default_pager_space; +#endif /* DIPC || MACH_KDB */ + +#define is_fast_space(is) ((is)->is_fast) + +#define is_ref_lock_init(is) mutex_init(&(is)->is_ref_lock_data, \ + ETAP_IPC_IS_REF) + +#define ipc_space_reference_macro(is) \ +MACRO_BEGIN \ + mutex_lock(&(is)->is_ref_lock_data); \ + assert((is)->is_references > 0); \ + (is)->is_references++; \ + mutex_unlock(&(is)->is_ref_lock_data); \ +MACRO_END + +#define ipc_space_release_macro(is) \ +MACRO_BEGIN \ + ipc_space_refs_t _refs; \ + \ + mutex_lock(&(is)->is_ref_lock_data); \ + assert((is)->is_references > 0); \ + _refs = --(is)->is_references; \ + mutex_unlock(&(is)->is_ref_lock_data); \ + \ + if (_refs == 0) \ + is_free(is); \ +MACRO_END + +#define is_lock_init(is) mutex_init(&(is)->is_lock_data, ETAP_IPC_IS) + +#define is_read_lock(is) mutex_lock(&(is)->is_lock_data) +#define is_read_unlock(is) mutex_unlock(&(is)->is_lock_data) + +#define is_write_lock(is) mutex_lock(&(is)->is_lock_data) +#define is_write_lock_try(is) mutex_try(&(is)->is_lock_data) +#define is_write_unlock(is) mutex_unlock(&(is)->is_lock_data) + +#define is_reference(is) ipc_space_reference(is) +#define is_release(is) ipc_space_release(is) + +#define is_write_to_read_lock(is) + +#define current_space_fast() (current_task_fast()->itk_space) +#define current_space() (current_space_fast()) + +#else /* !MACH_KERNEL_PRIVATE */ + +extern ipc_space_t current_space(void); + +#endif /* MACH_KERNEL_PRIVATE */ + +/* Take a reference on a space */ +extern void ipc_space_reference( + ipc_space_t space); + +/* Realase a reference on a space */ +extern void ipc_space_release( + ipc_space_t space); + + +/* Create new IPC space */ +extern kern_return_t ipc_space_create( + ipc_table_size_t initial, + ipc_space_t *spacep); + +/* Create a special IPC space */ +extern kern_return_t ipc_space_create_special( + ipc_space_t *spacep); + +/* Mark a space as dead and cleans up the entries*/ +extern void ipc_space_destroy( + ipc_space_t space); + +#endif /* _IPC_IPC_SPACE_H_ */ diff --git a/osfmk/ipc/ipc_splay.c b/osfmk/ipc/ipc_splay.c new file mode 100644 index 000000000..ca7817be2 --- /dev/null +++ b/osfmk/ipc/ipc_splay.c @@ -0,0 +1,994 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:28 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:16 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 02:11:47 ezf + * change marker to not FREE + * [1994/09/22 21:30:41 ezf] + * + * Revision 1.1.2.3 1993/07/22 16:17:25 rod + * Add ANSI prototypes. CR #9523. + * [1993/07/22 13:33:20 rod] + * + * Revision 1.1.2.2 1993/06/02 23:33:40 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:11:07 jeffc] + * + * Revision 1.1 1992/09/30 02:08:11 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 16:10:41 af + * Revision 2.4.2.1 91/09/16 10:16:00 rpd + * Added MACH_PORT_SMALLEST, MACH_PORT_LARGEST definitions to reduce lint. + * [91/09/02 rpd] + * + * Revision 2.4.2.1 91/09/16 10:16:00 rpd + * Added MACH_PORT_SMALLEST, MACH_PORT_LARGEST definitions to reduce lint. + * [91/09/02 rpd] + * + * Revision 2.4 91/05/14 16:37:08 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:23:52 mrt + * Changed to new Mach copyright + * [91/02/01 15:51:43 mrt] + * + * Revision 2.2 90/06/02 14:51:49 rpd + * Created for new IPC. + * [90/03/26 21:03:46 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_splay.c + * Author: Rich Draves + * Date: 1989 + * + * Primitive splay tree operations. + */ + +#include +#include +#include +#include +#include + +/* + * Splay trees are self-adjusting binary search trees. + * They have the following attractive properties: + * 1) Space efficient; only two pointers per entry. + * 2) Robust performance; amortized O(log n) per operation. + * 3) Recursion not needed. + * This makes them a good fall-back data structure for those + * entries that don't fit into the lookup table. + * + * The paper by Sleator and Tarjan, JACM v. 32, no. 3, pp. 652-686, + * describes the splaying operation. ipc_splay_prim_lookup + * and ipc_splay_prim_assemble implement the top-down splay + * described on p. 669. + * + * The tree is stored in an unassembled form. If ist_root is null, + * then the tree has no entries. Otherwise, ist_name records + * the value used for the last lookup. ist_root points to the + * middle tree obtained from the top-down splay. ist_ltree and + * ist_rtree point to left and right subtrees, whose entries + * are all smaller (larger) than those in the middle tree. + * ist_ltreep and ist_rtreep are pointers to fields in the + * left and right subtrees. ist_ltreep points to the rchild field + * of the largest entry in ltree, and ist_rtreep points to the + * lchild field of the smallest entry in rtree. The pointed-to + * fields aren't initialized. If the left (right) subtree is null, + * then ist_ltreep (ist_rtreep) points to the ist_ltree (ist_rtree) + * field in the splay structure itself. + * + * The primary advantage of the unassembled form is that repeated + * unsuccessful lookups are efficient. In particular, an unsuccessful + * lookup followed by an insert only requires one splaying operation. + * + * The traversal algorithm works via pointer inversion. + * When descending down the tree, child pointers are reversed + * to point back to the parent entry. When ascending, + * the pointers are restored to their original value. + * + * The biggest potential problem with the splay tree implementation + * is that the operations, even lookup, require an exclusive lock. + * If IPC spaces are protected with exclusive locks, then + * the splay tree doesn't require its own lock, and ist_lock/ist_unlock + * needn't do anything. If IPC spaces are protected with read/write + * locks then ist_lock/ist_unlock should provide exclusive access. + * + * If it becomes important to let lookups run in parallel, + * or if the restructuring makes lookups too expensive, then + * there is hope. Use a read/write lock on the splay tree. + * Keep track of the number of entries in the tree. When doing + * a lookup, first try a non-restructuring lookup with a read lock held, + * with a bound (based on log of size of the tree) on the number of + * entries to traverse. If the lookup runs up against the bound, + * then take a write lock and do a reorganizing lookup. + * This way, if lookups only access roughly balanced parts + * of the tree, then lookups run in parallel and do no restructuring. + * + * The traversal algorithm currently requires an exclusive lock. + * If that is a problem, the tree could be changed from an lchild/rchild + * representation to a leftmost child/right sibling representation. + * In conjunction with non-restructing lookups, this would let + * lookups and traversals all run in parallel. But this representation + * is more complicated and would slow down the operations. + */ + +/* + * Boundary values to hand to ipc_splay_prim_lookup: + */ + +#define MACH_PORT_SMALLEST ((mach_port_name_t) 0) +#define MACH_PORT_LARGEST ((mach_port_name_t) ~0) + +/* + * Routine: ipc_splay_prim_lookup + * Purpose: + * Searches for the node labeled name in the splay tree. + * Returns three nodes (treep, ltreep, rtreep) and + * two pointers to nodes (ltreepp, rtreepp). + * + * ipc_splay_prim_lookup splits the supplied tree into + * three subtrees, left, middle, and right, returned + * in ltreep, treep, and rtreep. + * + * If name is present in the tree, then it is at + * the root of the middle tree. Otherwise, the root + * of the middle tree is the last node traversed. + * + * ipc_splay_prim_lookup returns a pointer into + * the left subtree, to the rchild field of its + * largest node, in ltreepp. It returns a pointer + * into the right subtree, to the lchild field of its + * smallest node, in rtreepp. + */ + +static void +ipc_splay_prim_lookup( + mach_port_name_t name, + ipc_tree_entry_t tree, + ipc_tree_entry_t *treep, + ipc_tree_entry_t *ltreep, + ipc_tree_entry_t **ltreepp, + ipc_tree_entry_t *rtreep, + ipc_tree_entry_t **rtreepp) +{ + mach_port_name_t tname; /* temp name */ + ipc_tree_entry_t lchild, rchild; /* temp child pointers */ + + assert(tree != ITE_NULL); + +#define link_left \ +MACRO_BEGIN \ + *ltreep = tree; \ + ltreep = &tree->ite_rchild; \ + tree = *ltreep; \ +MACRO_END + +#define link_right \ +MACRO_BEGIN \ + *rtreep = tree; \ + rtreep = &tree->ite_lchild; \ + tree = *rtreep; \ +MACRO_END + +#define rotate_left \ +MACRO_BEGIN \ + ipc_tree_entry_t temp = tree; \ + \ + tree = temp->ite_rchild; \ + temp->ite_rchild = tree->ite_lchild; \ + tree->ite_lchild = temp; \ +MACRO_END + +#define rotate_right \ +MACRO_BEGIN \ + ipc_tree_entry_t temp = tree; \ + \ + tree = temp->ite_lchild; \ + temp->ite_lchild = tree->ite_rchild; \ + tree->ite_rchild = temp; \ +MACRO_END + + while (name != (tname = tree->ite_name)) { + if (name < tname) { + /* descend to left */ + + lchild = tree->ite_lchild; + if (lchild == ITE_NULL) + break; + tname = lchild->ite_name; + + if ((name < tname) && + (lchild->ite_lchild != ITE_NULL)) + rotate_right; + link_right; + if ((name > tname) && + (lchild->ite_rchild != ITE_NULL)) + link_left; + } else { + /* descend to right */ + + rchild = tree->ite_rchild; + if (rchild == ITE_NULL) + break; + tname = rchild->ite_name; + + if ((name > tname) && + (rchild->ite_rchild != ITE_NULL)) + rotate_left; + link_left; + if ((name < tname) && + (rchild->ite_lchild != ITE_NULL)) + link_right; + } + + assert(tree != ITE_NULL); + } + + *treep = tree; + *ltreepp = ltreep; + *rtreepp = rtreep; + +#undef link_left +#undef link_right +#undef rotate_left +#undef rotate_right +} + +/* + * Routine: ipc_splay_prim_assemble + * Purpose: + * Assembles the results of ipc_splay_prim_lookup + * into a splay tree with the found node at the root. + * + * ltree and rtree are by-reference so storing + * through ltreep and rtreep can change them. + */ + +static void +ipc_splay_prim_assemble( + ipc_tree_entry_t tree, + ipc_tree_entry_t *ltree, + ipc_tree_entry_t *ltreep, + ipc_tree_entry_t *rtree, + ipc_tree_entry_t *rtreep) +{ + assert(tree != ITE_NULL); + + *ltreep = tree->ite_lchild; + *rtreep = tree->ite_rchild; + + tree->ite_lchild = *ltree; + tree->ite_rchild = *rtree; +} + +/* + * Routine: ipc_splay_tree_init + * Purpose: + * Initialize a raw splay tree for use. + */ + +void +ipc_splay_tree_init( + ipc_splay_tree_t splay) +{ + splay->ist_root = ITE_NULL; +} + +/* + * Routine: ipc_splay_tree_pick + * Purpose: + * Picks and returns a random entry in a splay tree. + * Returns FALSE if the splay tree is empty. + */ + +boolean_t +ipc_splay_tree_pick( + ipc_splay_tree_t splay, + mach_port_name_t *namep, + ipc_tree_entry_t *entryp) +{ + ipc_tree_entry_t root; + + ist_lock(splay); + + root = splay->ist_root; + if (root != ITE_NULL) { + *namep = root->ite_name; + *entryp = root; + } + + ist_unlock(splay); + + return root != ITE_NULL; +} + +/* + * Routine: ipc_splay_tree_lookup + * Purpose: + * Finds an entry in a splay tree. + * Returns ITE_NULL if not found. + */ + +ipc_tree_entry_t +ipc_splay_tree_lookup( + ipc_splay_tree_t splay, + mach_port_name_t name) +{ + ipc_tree_entry_t root; + + ist_lock(splay); + + root = splay->ist_root; + if (root != ITE_NULL) { + if (splay->ist_name != name) { + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + ipc_splay_prim_lookup(name, root, &root, + &splay->ist_ltree, &splay->ist_ltreep, + &splay->ist_rtree, &splay->ist_rtreep); + splay->ist_name = name; + splay->ist_root = root; + } + + if (name != root->ite_name) + root = ITE_NULL; + } + + ist_unlock(splay); + + return root; +} + +/* + * Routine: ipc_splay_tree_insert + * Purpose: + * Inserts a new entry into a splay tree. + * The caller supplies a new entry. + * The name can't already be present in the tree. + */ + +void +ipc_splay_tree_insert( + ipc_splay_tree_t splay, + mach_port_name_t name, + ipc_tree_entry_t entry) +{ + ipc_tree_entry_t root; + + assert(entry != ITE_NULL); + + ist_lock(splay); + + root = splay->ist_root; + if (root == ITE_NULL) { + entry->ite_lchild = ITE_NULL; + entry->ite_rchild = ITE_NULL; + } else { + if (splay->ist_name != name) { + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + ipc_splay_prim_lookup(name, root, &root, + &splay->ist_ltree, &splay->ist_ltreep, + &splay->ist_rtree, &splay->ist_rtreep); + } + + assert(root->ite_name != name); + + if (name < root->ite_name) { + assert(root->ite_lchild == ITE_NULL); + + *splay->ist_ltreep = ITE_NULL; + *splay->ist_rtreep = root; + } else { + assert(root->ite_rchild == ITE_NULL); + + *splay->ist_ltreep = root; + *splay->ist_rtreep = ITE_NULL; + } + + entry->ite_lchild = splay->ist_ltree; + entry->ite_rchild = splay->ist_rtree; + } + + entry->ite_name = name; + splay->ist_root = entry; + splay->ist_name = name; + splay->ist_ltreep = &splay->ist_ltree; + splay->ist_rtreep = &splay->ist_rtree; + + ist_unlock(splay); +} + +/* + * Routine: ipc_splay_tree_delete + * Purpose: + * Deletes an entry from a splay tree. + * The name must be present in the tree. + * Frees the entry. + * + * The "entry" argument isn't currently used. + * Other implementations might want it, though. + */ + +void +ipc_splay_tree_delete( + ipc_splay_tree_t splay, + mach_port_name_t name, + ipc_tree_entry_t entry) +{ + ipc_tree_entry_t root, saved; + + ist_lock(splay); + + root = splay->ist_root; + assert(root != ITE_NULL); + + if (splay->ist_name != name) { + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + ipc_splay_prim_lookup(name, root, &root, + &splay->ist_ltree, &splay->ist_ltreep, + &splay->ist_rtree, &splay->ist_rtreep); + } + + assert(root->ite_name == name); + assert(root == entry); + + *splay->ist_ltreep = root->ite_lchild; + *splay->ist_rtreep = root->ite_rchild; + ite_free(root); + + root = splay->ist_ltree; + saved = splay->ist_rtree; + + if (root == ITE_NULL) + root = saved; + else if (saved != ITE_NULL) { + /* + * Find the largest node in the left subtree, and splay it + * to the root. Then add the saved right subtree. + */ + + ipc_splay_prim_lookup(MACH_PORT_LARGEST, root, &root, + &splay->ist_ltree, &splay->ist_ltreep, + &splay->ist_rtree, &splay->ist_rtreep); + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + + assert(root->ite_rchild == ITE_NULL); + root->ite_rchild = saved; + } + + splay->ist_root = root; + if (root != ITE_NULL) { + splay->ist_name = root->ite_name; + splay->ist_ltreep = &splay->ist_ltree; + splay->ist_rtreep = &splay->ist_rtree; + } + + ist_unlock(splay); +} + +/* + * Routine: ipc_splay_tree_split + * Purpose: + * Split a splay tree. Puts all entries smaller than "name" + * into a new tree, "small". + * + * Doesn't do locking on "small", because nobody else + * should be fiddling with the uninitialized tree. + */ + +void +ipc_splay_tree_split( + ipc_splay_tree_t splay, + mach_port_name_t name, + ipc_splay_tree_t small) +{ + ipc_tree_entry_t root; + + ipc_splay_tree_init(small); + + ist_lock(splay); + + root = splay->ist_root; + if (root != ITE_NULL) { + /* lookup name, to get it (or last traversed) to the top */ + + if (splay->ist_name != name) { + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + ipc_splay_prim_lookup(name, root, &root, + &splay->ist_ltree, &splay->ist_ltreep, + &splay->ist_rtree, &splay->ist_rtreep); + } + + if (root->ite_name < name) { + /* root goes into small */ + + *splay->ist_ltreep = root->ite_lchild; + *splay->ist_rtreep = ITE_NULL; + root->ite_lchild = splay->ist_ltree; + assert(root->ite_rchild == ITE_NULL); + + small->ist_root = root; + small->ist_name = root->ite_name; + small->ist_ltreep = &small->ist_ltree; + small->ist_rtreep = &small->ist_rtree; + + /* rtree goes into splay */ + + root = splay->ist_rtree; + splay->ist_root = root; + if (root != ITE_NULL) { + splay->ist_name = root->ite_name; + splay->ist_ltreep = &splay->ist_ltree; + splay->ist_rtreep = &splay->ist_rtree; + } + } else { + /* root stays in splay */ + + *splay->ist_ltreep = root->ite_lchild; + root->ite_lchild = ITE_NULL; + + splay->ist_root = root; + splay->ist_name = name; + splay->ist_ltreep = &splay->ist_ltree; + + /* ltree goes into small */ + + root = splay->ist_ltree; + small->ist_root = root; + if (root != ITE_NULL) { + small->ist_name = root->ite_name; + small->ist_ltreep = &small->ist_ltree; + small->ist_rtreep = &small->ist_rtree; + } + } + } + + ist_unlock(splay); +} + +/* + * Routine: ipc_splay_tree_join + * Purpose: + * Joins two splay trees. Merges the entries in "small", + * which must all be smaller than the entries in "splay", + * into "splay". + */ + +void +ipc_splay_tree_join( + ipc_splay_tree_t splay, + ipc_splay_tree_t small) +{ + ipc_tree_entry_t sroot; + + /* pull entries out of small */ + + ist_lock(small); + + sroot = small->ist_root; + if (sroot != ITE_NULL) { + ipc_splay_prim_assemble(sroot, + &small->ist_ltree, small->ist_ltreep, + &small->ist_rtree, small->ist_rtreep); + small->ist_root = ITE_NULL; + } + + ist_unlock(small); + + /* put entries, if any, into splay */ + + if (sroot != ITE_NULL) { + ipc_tree_entry_t root; + + ist_lock(splay); + + root = splay->ist_root; + if (root == ITE_NULL) { + root = sroot; + } else { + /* get smallest entry in splay tree to top */ + + if (splay->ist_name != MACH_PORT_SMALLEST) { + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + ipc_splay_prim_lookup(MACH_PORT_SMALLEST, + root, &root, + &splay->ist_ltree, &splay->ist_ltreep, + &splay->ist_rtree, &splay->ist_rtreep); + } + + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + + assert(root->ite_lchild == ITE_NULL); + assert(sroot->ite_name < root->ite_name); + root->ite_lchild = sroot; + } + + splay->ist_root = root; + splay->ist_name = root->ite_name; + splay->ist_ltreep = &splay->ist_ltree; + splay->ist_rtreep = &splay->ist_rtree; + + ist_unlock(splay); + } +} + +/* + * Routine: ipc_splay_tree_bounds + * Purpose: + * Given a name, returns the largest value present + * in the tree that is smaller than or equal to the name, + * or ~0 if no such value exists. Similarly, returns + * the smallest value present that is greater than or + * equal to the name, or 0 if no such value exists. + * + * Hence, if + * lower = upper, then lower = name = upper + * and name is present in the tree + * lower = ~0 and upper = 0, + * then the tree is empty + * lower = ~0 and upper > 0, then name < upper + * and upper is smallest value in tree + * lower < ~0 and upper = 0, then lower < name + * and lower is largest value in tree + * lower < ~0 and upper > 0, then lower < name < upper + * and they are tight bounds on name + * + * (Note MACH_PORT_SMALLEST = 0 and MACH_PORT_LARGEST = ~0.) + */ + +void +ipc_splay_tree_bounds( + ipc_splay_tree_t splay, + mach_port_name_t name, + mach_port_name_t *lowerp, + mach_port_name_t *upperp) +{ + ipc_tree_entry_t root; + + ist_lock(splay); + + root = splay->ist_root; + if (root == ITE_NULL) { + *lowerp = MACH_PORT_LARGEST; + *upperp = MACH_PORT_SMALLEST; + } else { + mach_port_name_t rname; + + if (splay->ist_name != name) { + ipc_splay_prim_assemble(root, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + ipc_splay_prim_lookup(name, root, &root, + &splay->ist_ltree, &splay->ist_ltreep, + &splay->ist_rtree, &splay->ist_rtreep); + splay->ist_name = name; + splay->ist_root = root; + } + + rname = root->ite_name; + + /* + * OK, it's a hack. We convert the ltreep and rtreep + * pointers back into real entry pointers, + * so we can pick the names out of the entries. + */ + + if (rname <= name) + *lowerp = rname; + else if (splay->ist_ltreep == &splay->ist_ltree) + *lowerp = MACH_PORT_LARGEST; + else { + ipc_tree_entry_t entry; + + entry = (ipc_tree_entry_t) + ((char *)splay->ist_ltreep - + ((char *)&root->ite_rchild - + (char *)root)); + *lowerp = entry->ite_name; + } + + if (rname >= name) + *upperp = rname; + else if (splay->ist_rtreep == &splay->ist_rtree) + *upperp = MACH_PORT_SMALLEST; + else { + ipc_tree_entry_t entry; + + entry = (ipc_tree_entry_t) + ((char *)splay->ist_rtreep - + ((char *)&root->ite_lchild - + (char *)root)); + *upperp = entry->ite_name; + } + } + + ist_unlock(splay); +} + +/* + * Routine: ipc_splay_traverse_start + * Routine: ipc_splay_traverse_next + * Routine: ipc_splay_traverse_finish + * Purpose: + * Perform a symmetric order traversal of a splay tree. + * Usage: + * for (entry = ipc_splay_traverse_start(splay); + * entry != ITE_NULL; + * entry = ipc_splay_traverse_next(splay, delete)) { + * do something with entry + * } + * ipc_splay_traverse_finish(splay); + * + * If "delete" is TRUE, then the current entry + * is removed from the tree and deallocated. + * + * During the traversal, the splay tree is locked. + */ + +ipc_tree_entry_t +ipc_splay_traverse_start( + ipc_splay_tree_t splay) +{ + ipc_tree_entry_t current, parent; + + ist_lock(splay); + + current = splay->ist_root; + if (current != ITE_NULL) { + ipc_splay_prim_assemble(current, + &splay->ist_ltree, splay->ist_ltreep, + &splay->ist_rtree, splay->ist_rtreep); + + parent = ITE_NULL; + + while (current->ite_lchild != ITE_NULL) { + ipc_tree_entry_t next; + + next = current->ite_lchild; + current->ite_lchild = parent; + parent = current; + current = next; + } + + splay->ist_ltree = current; + splay->ist_rtree = parent; + } + + return current; +} + +ipc_tree_entry_t +ipc_splay_traverse_next( + ipc_splay_tree_t splay, + boolean_t delete) +{ + ipc_tree_entry_t current, parent; + + /* pick up where traverse_entry left off */ + + current = splay->ist_ltree; + parent = splay->ist_rtree; + assert(current != ITE_NULL); + + if (!delete) + goto traverse_right; + + /* we must delete current and patch the tree */ + + if (current->ite_lchild == ITE_NULL) { + if (current->ite_rchild == ITE_NULL) { + /* like traverse_back, but with deletion */ + + if (parent == ITE_NULL) { + ite_free(current); + + splay->ist_root = ITE_NULL; + return ITE_NULL; + } + + if (current->ite_name < parent->ite_name) { + ite_free(current); + + current = parent; + parent = current->ite_lchild; + current->ite_lchild = ITE_NULL; + goto traverse_entry; + } else { + ite_free(current); + + current = parent; + parent = current->ite_rchild; + current->ite_rchild = ITE_NULL; + goto traverse_back; + } + } else { + ipc_tree_entry_t prev; + + prev = current; + current = current->ite_rchild; + ite_free(prev); + goto traverse_left; + } + } else { + if (current->ite_rchild == ITE_NULL) { + ipc_tree_entry_t prev; + + prev = current; + current = current->ite_lchild; + ite_free(prev); + goto traverse_back; + } else { + ipc_tree_entry_t prev; + ipc_tree_entry_t ltree, rtree; + ipc_tree_entry_t *ltreep, *rtreep; + + /* replace current with largest of left children */ + + prev = current; + ipc_splay_prim_lookup(MACH_PORT_LARGEST, + current->ite_lchild, ¤t, + <ree, <reep, &rtree, &rtreep); + ipc_splay_prim_assemble(current, + <ree, ltreep, &rtree, rtreep); + + assert(current->ite_rchild == ITE_NULL); + current->ite_rchild = prev->ite_rchild; + ite_free(prev); + goto traverse_right; + } + } + /*NOTREACHED*/ + + /* + * A state machine: for each entry, we + * 1) traverse left subtree + * 2) traverse the entry + * 3) traverse right subtree + * 4) traverse back to parent + */ + + traverse_left: + if (current->ite_lchild != ITE_NULL) { + ipc_tree_entry_t next; + + next = current->ite_lchild; + current->ite_lchild = parent; + parent = current; + current = next; + goto traverse_left; + } + + traverse_entry: + splay->ist_ltree = current; + splay->ist_rtree = parent; + return current; + + traverse_right: + if (current->ite_rchild != ITE_NULL) { + ipc_tree_entry_t next; + + next = current->ite_rchild; + current->ite_rchild = parent; + parent = current; + current = next; + goto traverse_left; + } + + traverse_back: + if (parent == ITE_NULL) { + splay->ist_root = current; + return ITE_NULL; + } + + if (current->ite_name < parent->ite_name) { + ipc_tree_entry_t prev; + + prev = current; + current = parent; + parent = current->ite_lchild; + current->ite_lchild = prev; + goto traverse_entry; + } else { + ipc_tree_entry_t prev; + + prev = current; + current = parent; + parent = current->ite_rchild; + current->ite_rchild = prev; + goto traverse_back; + } +} + +void +ipc_splay_traverse_finish( + ipc_splay_tree_t splay) +{ + ipc_tree_entry_t root; + + root = splay->ist_root; + if (root != ITE_NULL) { + splay->ist_name = root->ite_name; + splay->ist_ltreep = &splay->ist_ltree; + splay->ist_rtreep = &splay->ist_rtree; + } + + ist_unlock(splay); +} + diff --git a/osfmk/ipc/ipc_splay.h b/osfmk/ipc/ipc_splay.h new file mode 100644 index 000000000..a25350d5d --- /dev/null +++ b/osfmk/ipc/ipc_splay.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_splay.h + * Author: Rich Draves + * Date: 1989 + * + * Declarations of primitive splay tree operations. + */ + +#ifndef _IPC_IPC_SPLAY_H_ +#define _IPC_IPC_SPLAY_H_ + +#include +#include +#include +#include + +typedef struct ipc_splay_tree { + mach_port_name_t ist_name; /* name used in last lookup */ + ipc_tree_entry_t ist_root; /* root of middle tree */ + ipc_tree_entry_t ist_ltree; /* root of left tree */ + ipc_tree_entry_t *ist_ltreep; /* pointer into left tree */ + ipc_tree_entry_t ist_rtree; /* root of right tree */ + ipc_tree_entry_t *ist_rtreep; /* pointer into right tree */ +} *ipc_splay_tree_t; + +#define ist_lock(splay) /* no locking */ +#define ist_unlock(splay) /* no locking */ + +/* Initialize a raw splay tree */ +extern void ipc_splay_tree_init( + ipc_splay_tree_t splay); + +/* Pick a random entry in a splay tree */ +extern boolean_t ipc_splay_tree_pick( + ipc_splay_tree_t splay, + mach_port_name_t *namep, + ipc_tree_entry_t *entryp); + +/* Find an entry in a splay tree */ +extern ipc_tree_entry_t ipc_splay_tree_lookup( + ipc_splay_tree_t splay, + mach_port_name_t name); + +/* Insert a new entry into a splay tree */ +extern void ipc_splay_tree_insert( + ipc_splay_tree_t splay, + mach_port_name_t name, + ipc_tree_entry_t entry); + +/* Delete an entry from a splay tree */ +extern void ipc_splay_tree_delete( + ipc_splay_tree_t splay, + mach_port_name_t name, + ipc_tree_entry_t entry); + +/* Split a splay tree */ +extern void ipc_splay_tree_split( + ipc_splay_tree_t splay, + mach_port_name_t name, + ipc_splay_tree_t entry); + +/* Join two splay trees */ +extern void ipc_splay_tree_join( + ipc_splay_tree_t splay, + ipc_splay_tree_t small); + +/* Do a bounded splay tree lookup */ +extern void ipc_splay_tree_bounds( + ipc_splay_tree_t splay, + mach_port_name_t name, + mach_port_name_t *lowerp, + mach_port_name_t *upperp); + +/* Initialize a symmetric order traversal of a splay tree */ +extern ipc_tree_entry_t ipc_splay_traverse_start( + ipc_splay_tree_t splay); + +/* Return the next entry in a symmetric order traversal of a splay tree */ +extern ipc_tree_entry_t ipc_splay_traverse_next( + ipc_splay_tree_t splay, + boolean_t delete); + +/* Terminate a symmetric order traversal of a splay tree */ +extern void ipc_splay_traverse_finish( + ipc_splay_tree_t splay); + +#endif /* _IPC_IPC_SPLAY_H_ */ diff --git a/osfmk/ipc/ipc_table.c b/osfmk/ipc/ipc_table.c new file mode 100644 index 000000000..90d4cd9b6 --- /dev/null +++ b/osfmk/ipc/ipc_table.c @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:28 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/06/01 17:29:25 youngwor + * Added infrastructure for shared port space support + * + * Revision 1.1.1.1 1998/03/07 02:26:16 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.10.1 1994/09/23 02:12:16 ezf + * change marker to not FREE + * [1994/09/22 21:30:49 ezf] + * + * Revision 1.2.2.3 1993/07/22 16:17:30 rod + * Add ANSI prototypes. CR #9523. + * [1993/07/22 13:33:29 rod] + * + * Revision 1.2.2.2 1993/06/02 23:33:55 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:11:14 jeffc] + * + * Revision 1.2 1992/11/25 01:09:56 robert + * integrate changes below for norma_14 + * + * Philippe Bernadat (bernadat) at gr.osf.org + * Limit ipc table allocation chunks to 8 pages, otherwise + * the kernel might dead lock because of VM_PAGE_FREE_RESERVED + * limited to 15. [dlb@osf.org & barbou@gr.osf.org] + * [1992/11/13 19:31:46 robert] + * + * Revision 1.1 1992/09/30 02:08:13 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.6 91/10/09 16:11:08 af + * Revision 2.5.2.1 91/09/16 10:16:06 rpd + * Removed unused variables. + * [91/09/02 rpd] + * + * Revision 2.5.2.1 91/09/16 10:16:06 rpd + * Removed unused variables. + * [91/09/02 rpd] + * + * Revision 2.5 91/05/14 16:37:35 mrt + * Correcting copyright + * + * Revision 2.4 91/03/16 14:48:52 rpd + * Added ipc_table_realloc and ipc_table_reallocable. + * [91/03/04 rpd] + * + * Revision 2.3 91/02/05 17:24:15 mrt + * Changed to new Mach copyright + * [91/02/01 15:52:05 mrt] + * + * Revision 2.2 90/06/02 14:51:58 rpd + * Created for new IPC. + * [90/03/26 21:04:20 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_table.c + * Author: Rich Draves + * Date: 1989 + * + * Functions to manipulate tables of IPC capabilities. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Forward declarations + */ +void ipc_table_fill( + ipc_table_size_t its, + unsigned int num, + unsigned int min, + vm_size_t elemsize); + +/* + * We borrow the kalloc map, rather than creating + * yet another submap of the kernel map. + */ + +extern vm_map_t kalloc_map; + +ipc_table_size_t ipc_table_entries; +unsigned int ipc_table_entries_size = 512; + +ipc_table_size_t ipc_table_dnrequests; +unsigned int ipc_table_dnrequests_size = 64; + +void +ipc_table_fill( + ipc_table_size_t its, /* array to fill */ + unsigned int num, /* size of array */ + unsigned int min, /* at least this many elements */ + vm_size_t elemsize) /* size of elements */ +{ + unsigned int index; + vm_size_t minsize = min * elemsize; + vm_size_t size; + vm_size_t incrsize; + + /* first use powers of two, up to the page size */ + + for (index = 0, size = 1; + (index < num) && (size < PAGE_SIZE); + size <<= 1) { + if (size >= minsize) { + its[index].its_size = size / elemsize; + index++; + } + } + + /* then increments of a page, then two pages, etc. */ + + for (incrsize = PAGE_SIZE; index < num;) { + unsigned int period; + + for (period = 0; + (period < 15) && (index < num); + period++, size += incrsize) { + if (size >= minsize) { + its[index].its_size = size / elemsize; + index++; + } + } + if (incrsize < (PAGE_SIZE << 3)) + incrsize <<= 1; + } +} + +void +ipc_table_init(void) +{ + ipc_table_entries = (ipc_table_size_t) + kalloc(sizeof(struct ipc_table_size) * + ipc_table_entries_size); + assert(ipc_table_entries != ITS_NULL); + + ipc_table_fill(ipc_table_entries, ipc_table_entries_size - 1, + 4, sizeof(struct ipc_entry) + + sizeof(ipc_entry_bits_t) + + sizeof(ipc_table_index_t)); + + /* the last two elements should have the same size */ + + ipc_table_entries[ipc_table_entries_size - 1].its_size = + ipc_table_entries[ipc_table_entries_size - 2].its_size; + + + ipc_table_dnrequests = (ipc_table_size_t) + kalloc(sizeof(struct ipc_table_size) * + ipc_table_dnrequests_size); + assert(ipc_table_dnrequests != ITS_NULL); + + ipc_table_fill(ipc_table_dnrequests, ipc_table_dnrequests_size - 1, + 2, sizeof(struct ipc_port_request)); + + /* the last element should have zero size */ + + ipc_table_dnrequests[ipc_table_dnrequests_size - 1].its_size = 0; +} + +/* + * Routine: ipc_table_alloc + * Purpose: + * Allocate a table. + * Conditions: + * May block. + */ + +vm_offset_t +ipc_table_alloc( + vm_size_t size) +{ + vm_offset_t table; + + if (size < PAGE_SIZE) + table = kalloc(size); + else + if (kmem_alloc(kalloc_map, &table, size) != KERN_SUCCESS) + table = 0; + + return table; +} + +/* + * Routine: ipc_table_realloc + * Purpose: + * Reallocate a big table. + * + * The new table remaps the old table, + * so copying is not necessary. + * Conditions: + * Only works for page-size or bigger tables. + * May block. + */ + +vm_offset_t +ipc_table_realloc( + vm_size_t old_size, + vm_offset_t old_table, + vm_size_t new_size) +{ + vm_offset_t new_table; + + if (kmem_realloc(kalloc_map, old_table, old_size, + &new_table, new_size) != KERN_SUCCESS) + new_table = 0; + + return new_table; +} + +/* + * Routine: ipc_table_free + * Purpose: + * Free a table allocated with ipc_table_alloc or + * ipc_table_realloc. + * Conditions: + * May block. + */ + +void +ipc_table_free( + vm_size_t size, + vm_offset_t table) +{ + if (size < PAGE_SIZE) + kfree(table, size); + else + kmem_free(kalloc_map, table, size); +} diff --git a/osfmk/ipc/ipc_table.h b/osfmk/ipc/ipc_table.h new file mode 100644 index 000000000..0fe414ca9 --- /dev/null +++ b/osfmk/ipc/ipc_table.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:28 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/05/29 23:50:33 youngwor + * Added infrastructure for shared port space support + * + * Revision 1.1.1.1 1998/03/07 02:26:16 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.3 1995/01/06 19:46:05 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:41:41 dwm] + * + * Revision 1.1.8.2 1994/09/23 02:12:26 ezf + * change marker to not FREE + * [1994/09/22 21:30:53 ezf] + * + * Revision 1.1.8.1 1994/08/18 23:11:45 widyono + * RT IPC from RT2_SHARED + * [1994/08/18 15:49:24 widyono] + * + * Revision 1.1.6.1 1994/07/29 07:33:22 widyono + * Define default target size, ITS_SIZE_NONE + * [1994/07/28 22:27:01 widyono] + * + * Revision 1.1.2.4 1993/07/22 16:17:33 rod + * Add ANSI prototypes. CR #9523. + * [1993/07/22 13:33:33 rod] + * + * Revision 1.1.2.3 1993/06/07 22:11:46 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:02:58 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:34:02 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:11:17 jeffc] + * + * Revision 1.1 1992/09/30 02:29:14 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/05/14 16:37:52 mrt + * Correcting copyright + * + * Revision 2.4 91/03/16 14:49:01 rpd + * Added ipc_table_realloc. + * [91/03/04 rpd] + * + * Revision 2.3 91/02/05 17:24:19 mrt + * Changed to new Mach copyright + * [91/02/01 15:52:19 mrt] + * + * Revision 2.2 90/06/02 14:52:02 rpd + * Created for new IPC. + * [90/03/26 21:04:35 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_table.h + * Author: Rich Draves + * Date: 1989 + * + * Definitions for tables, used for IPC capabilities (ipc_entry_t) + * and dead-name requests (ipc_port_request_t). + */ + +#ifndef _IPC_IPC_TABLE_H_ +#define _IPC_IPC_TABLE_H_ + +#include +#include + +/* + * The is_table_next field of an ipc_space_t points to + * an ipc_table_size structure. These structures must + * be elements of an array, ipc_table_entries. + * + * The array must end with two elements with the same its_size value. + * Except for the terminating element, the its_size values must + * be strictly increasing. The largest (last) its_size value + * must be less than or equal to MACH_PORT_INDEX(MACH_PORT_DEAD). + * This ensures that + * 1) MACH_PORT_INDEX(MACH_PORT_DEAD) isn't a valid index + * in the table, so ipc_entry_get won't allocate it. + * 2) MACH_PORT_MAKE(index+1, 0) and MAKE_PORT_MAKE(size, 0) + * won't ever overflow. + * + * + * The ipr_size field of the first element in a table of + * dead-name requests (ipc_port_request_t) points to the + * ipc_table_size structure. The structures must be elements + * of ipc_table_dnrequests. ipc_table_dnrequests must end + * with an element with zero its_size, and except for this last + * element, the its_size values must be strictly increasing. + * + * The is_table_next field points to the ipc_table_size structure + * for the next larger size of table, not the one currently in use. + * The ipr_size field points to the currently used ipc_table_size. + */ + +typedef natural_t ipc_table_index_t; /* index into tables */ +typedef natural_t ipc_table_elems_t; /* size of tables */ + +typedef struct ipc_table_size { + ipc_table_elems_t its_size; /* number of elements in table */ +} *ipc_table_size_t; + +#define ITS_NULL ((ipc_table_size_t) 0) +#define ITS_SIZE_NONE -1 + +extern ipc_table_size_t ipc_table_entries; +extern ipc_table_size_t ipc_table_dnrequests; + +/* Initialize IPC capabilities table storage */ +extern void ipc_table_init(void); + +/* + * Note that ipc_table_alloc, ipc_table_realloc, and ipc_table_free + * all potentially use the VM system. Hence simple locks can't + * be held across them. + * + * We can't use a copying realloc, because the realloc happens + * with the data unlocked. ipc_table_realloc remaps the data, + * so it is OK. + */ + +/* Allocate a table */ +extern vm_offset_t ipc_table_alloc( + vm_size_t size); + +/* Reallocate a big table */ +extern vm_offset_t ipc_table_realloc( + vm_size_t old_size, + vm_offset_t old_table, + vm_size_t new_size); + +/* Free a table */ +extern void ipc_table_free( + vm_size_t size, + vm_offset_t table); + +#define it_entries_alloc(its) \ + ((ipc_entry_t) \ + ipc_table_alloc(round_page( \ + (its)->its_size * sizeof(struct ipc_entry)))) + +#define it_entries_reallocable(its) \ + ((its)->its_size * sizeof(struct ipc_entry) \ + >= PAGE_SIZE) + +#define it_entries_realloc(its, table, nits) \ + ((ipc_entry_t) \ + ipc_table_realloc( \ + round_page((its)->its_size * sizeof(struct ipc_entry)), \ + (vm_offset_t)(table), \ + round_page((nits)->its_size * sizeof(struct ipc_entry)) \ + )) + +#define it_entries_free(its, table) \ + ipc_table_free( \ + round_page((its)->its_size * sizeof(struct ipc_entry)), \ + (vm_offset_t)(table) \ + ) + +#define it_dnrequests_alloc(its) \ + ((ipc_port_request_t) \ + ipc_table_alloc((its)->its_size * \ + sizeof(struct ipc_port_request))) + +#define it_dnrequests_free(its, table) \ + ipc_table_free((its)->its_size * \ + sizeof(struct ipc_port_request), \ + (vm_offset_t)(table)) + +#endif /* _IPC_IPC_TABLE_H_ */ diff --git a/osfmk/ipc/ipc_types.h b/osfmk/ipc/ipc_types.h new file mode 100644 index 000000000..cebbc57f7 --- /dev/null +++ b/osfmk/ipc/ipc_types.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Define Basic IPC types available to callers. + * These are not intended to be used directly, but + * are used to define other types available through + * port.h and mach_types.h for in-kernel entities. + */ +#ifndef _IPC_TYPES_H_ +#define _IPC_TYPES_H_ + +#ifdef KERNEL_PRIVATE + +#if !defined(MACH_KERNEL_PRIVATE) + +/* + * For kernel code that resides outside of mach + * we define empty structs so that everything will + * remain strongly typed, without giving out + * implementation details. + */ +struct ipc_object ; +struct ipc_space ; +struct ipc_port ; + +#endif /* !MACH_KERNEL_PRIVATE */ + +typedef struct ipc_object *ipc_object_t; +typedef struct ipc_space *ipc_space_t; +typedef struct ipc_port *ipc_port_t; + +#define IPC_OBJECT_NULL ((ipc_object_t) 0) +#define IPC_OBJECT_DEAD ((ipc_object_t)~0) +#define IPC_OBJECT_VALID(io) (((io) != IPC_OBJECT_NULL) && \ + ((io) != IPC_OBJECT_DEAD)) + +#define IPC_PORT_NULL ((ipc_port_t) 0) +#define IPC_PORT_DEAD ((ipc_port_t)~0) +#define IPC_PORT_VALID(port) (((port) != IPC_PORT_NULL) && \ + ((port) != IPC_PORT_DEAD)) + +#define IPC_SPACE_NULL ((ipc_space_t) 0) + +#endif /* KERNEL_PRIVATE */ + +#endif /* _IPC_TYPES_H_ */ diff --git a/osfmk/ipc/mach_debug.c b/osfmk/ipc/mach_debug.c new file mode 100644 index 000000000..7fe99ea81 --- /dev/null +++ b/osfmk/ipc/mach_debug.c @@ -0,0 +1,580 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/mach_debug.c + * Author: Rich Draves + * Date: 1989 + * + * Exported IPC debug calls. + */ +#include + +#include +#include +#include +#include +#include +#include +#include + +#if MACH_IPC_DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +/* + * Routine: mach_port_get_srights [kernel call] + * Purpose: + * Retrieve the number of extant send rights + * that a receive right has. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Retrieved number of send rights. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote receive rights. + */ + +kern_return_t +mach_port_get_srights( + ipc_space_t space, + mach_port_name_t name, + mach_port_rights_t *srightsp) +{ +#if !MACH_IPC_DEBUG + return KERN_FAILURE; +#else + ipc_port_t port; + kern_return_t kr; + mach_port_rights_t srights; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + srights = port->ip_srights; + ip_unlock(port); + + *srightsp = srights; + return KERN_SUCCESS; +#endif /* MACH_IPC_DEBUG */ +} + +/* + * Routine: host_ipc_hash_info + * Purpose: + * Return information about the global reverse hash table. + * Conditions: + * Nothing locked. Obeys CountInOut protocol. + * Returns: + * KERN_SUCCESS Returned information. + * KERN_INVALID_HOST The host is null. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +host_ipc_hash_info( + host_t host, + hash_info_bucket_array_t *infop, + mach_msg_type_number_t *countp) +{ +#if !MACH_IPC_DEBUG + return KERN_FAILURE; +#else + vm_offset_t addr; + vm_size_t size; + hash_info_bucket_t *info; + unsigned int potential, actual; + kern_return_t kr; + + if (host == HOST_NULL) + return KERN_INVALID_HOST; + + /* start with in-line data */ + + info = *infop; + potential = *countp; + + for (;;) { + actual = ipc_hash_info(info, potential); + if (actual <= potential) + break; + + /* allocate more memory */ + + if (info != *infop) + kmem_free(ipc_kernel_map, addr, size); + + size = round_page(actual * sizeof *info); + kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size); + if (kr != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + + info = (hash_info_bucket_t *) addr; + potential = size/sizeof *info; + } + + if (info == *infop) { + /* data fit in-line; nothing to deallocate */ + + *countp = actual; + } else if (actual == 0) { + kmem_free(ipc_kernel_map, addr, size); + + *countp = 0; + } else { + vm_map_copy_t copy; + vm_size_t used; + + used = round_page(actual * sizeof *info); + + if (used != size) + kmem_free(ipc_kernel_map, addr + used, size - used); + + kr = vm_map_copyin(ipc_kernel_map, addr, used, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + *infop = (hash_info_bucket_t *) copy; + *countp = actual; + } + + return KERN_SUCCESS; +#endif /* MACH_IPC_DEBUG */ +} + +/* + * Routine: mach_port_space_info + * Purpose: + * Returns information about an IPC space. + * Conditions: + * Nothing locked. Obeys CountInOut protocol. + * Returns: + * KERN_SUCCESS Returned information. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +mach_port_space_info( + ipc_space_t space, + ipc_info_space_t *infop, + ipc_info_name_array_t *tablep, + mach_msg_type_number_t *tableCntp, + ipc_info_tree_name_array_t *treep, + mach_msg_type_number_t *treeCntp) +{ +#if !MACH_IPC_DEBUG + return KERN_FAILURE; +#else + ipc_info_name_t *table_info; + unsigned int table_potential, table_actual; + vm_offset_t table_addr; + vm_size_t table_size; + ipc_info_tree_name_t *tree_info; + unsigned int tree_potential, tree_actual; + vm_offset_t tree_addr; + vm_size_t tree_size; + ipc_tree_entry_t tentry; + ipc_entry_t table; + ipc_entry_num_t tsize; + mach_port_index_t index; + kern_return_t kr; + ipc_entry_bits_t *capability; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + /* start with in-line memory */ + + table_info = *tablep; + table_potential = *tableCntp; + tree_info = *treep; + tree_potential = *treeCntp; + + for (;;) { + is_read_lock(space); + if (!space->is_active) { + is_read_unlock(space); + if (table_info != *tablep) + kmem_free(ipc_kernel_map, + table_addr, table_size); + if (tree_info != *treep) + kmem_free(ipc_kernel_map, + tree_addr, tree_size); + return KERN_INVALID_TASK; + } + + table_actual = space->is_table_size; + tree_actual = space->is_tree_total; + + if ((table_actual <= table_potential) && + (tree_actual <= tree_potential)) + break; + + is_read_unlock(space); + + if (table_actual > table_potential) { + if (table_info != *tablep) + kmem_free(ipc_kernel_map, + table_addr, table_size); + + table_size = round_page(table_actual * + sizeof *table_info); + kr = kmem_alloc(ipc_kernel_map, + &table_addr, table_size); + if (kr != KERN_SUCCESS) { + if (tree_info != *treep) + kmem_free(ipc_kernel_map, + tree_addr, tree_size); + + return KERN_RESOURCE_SHORTAGE; + } + + table_info = (ipc_info_name_t *) table_addr; + table_potential = table_size/sizeof *table_info; + } + + if (tree_actual > tree_potential) { + if (tree_info != *treep) + kmem_free(ipc_kernel_map, + tree_addr, tree_size); + + tree_size = round_page(tree_actual * + sizeof *tree_info); + kr = kmem_alloc(ipc_kernel_map, + &tree_addr, tree_size); + if (kr != KERN_SUCCESS) { + if (table_info != *tablep) + kmem_free(ipc_kernel_map, + table_addr, table_size); + + return KERN_RESOURCE_SHORTAGE; + } + + tree_info = (ipc_info_tree_name_t *) tree_addr; + tree_potential = tree_size/sizeof *tree_info; + } + } + /* space is read-locked and active; we have enough wired memory */ + + infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD); + infop->iis_table_size = space->is_table_size; + infop->iis_table_next = space->is_table_next->its_size; + infop->iis_tree_size = space->is_tree_total; + infop->iis_tree_small = space->is_tree_small; + infop->iis_tree_hash = space->is_tree_hash; + + table = space->is_table; + tsize = space->is_table_size; + + for (index = 0; index < tsize; index++) { + ipc_info_name_t *iin = &table_info[index]; + ipc_entry_t entry = &table[index]; + ipc_entry_bits_t bits; + + bits = entry->ie_bits; + iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits)); + iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE; + iin->iin_type = IE_BITS_TYPE(bits); + iin->iin_urefs = IE_BITS_UREFS(bits); + iin->iin_object = (vm_offset_t) entry->ie_object; + iin->iin_next = entry->ie_next; + iin->iin_hash = entry->ie_index; + } + + for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0; + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) { + ipc_info_tree_name_t *iitn = &tree_info[index++]; + ipc_info_name_t *iin = &iitn->iitn_name; + ipc_entry_t entry = &tentry->ite_entry; + ipc_entry_bits_t bits = entry->ie_bits; + + assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE); + + iin->iin_name = tentry->ite_name; + iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE; + iin->iin_type = IE_BITS_TYPE(bits); + iin->iin_urefs = IE_BITS_UREFS(bits); + iin->iin_object = (vm_offset_t) entry->ie_object; + iin->iin_next = entry->ie_next; + iin->iin_hash = entry->ie_index; + + if (tentry->ite_lchild == ITE_NULL) + iitn->iitn_lchild = MACH_PORT_NULL; + else + iitn->iitn_lchild = tentry->ite_lchild->ite_name; + + if (tentry->ite_rchild == ITE_NULL) + iitn->iitn_rchild = MACH_PORT_NULL; + else + iitn->iitn_rchild = tentry->ite_rchild->ite_name; + + } + ipc_splay_traverse_finish(&space->is_tree); + is_read_unlock(space); + + if (table_info == *tablep) { + /* data fit in-line; nothing to deallocate */ + + *tableCntp = table_actual; + } else if (table_actual == 0) { + kmem_free(ipc_kernel_map, table_addr, table_size); + + *tableCntp = 0; + } else { + vm_size_t size_used, rsize_used; + vm_map_copy_t copy; + + /* kmem_alloc doesn't zero memory */ + + size_used = table_actual * sizeof *table_info; + rsize_used = round_page(size_used); + + if (rsize_used != table_size) + kmem_free(ipc_kernel_map, + table_addr + rsize_used, + table_size - rsize_used); + + if (size_used != rsize_used) + bzero((char *) (table_addr + size_used), + rsize_used - size_used); + + kr = vm_map_unwire(ipc_kernel_map, table_addr, + table_addr + rsize_used, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + *tablep = (ipc_info_name_t *) copy; + *tableCntp = table_actual; + } + + if (tree_info == *treep) { + /* data fit in-line; nothing to deallocate */ + + *treeCntp = tree_actual; + } else if (tree_actual == 0) { + kmem_free(ipc_kernel_map, tree_addr, tree_size); + + *treeCntp = 0; + } else { + vm_size_t size_used, rsize_used; + vm_map_copy_t copy; + + /* kmem_alloc doesn't zero memory */ + + size_used = tree_actual * sizeof *tree_info; + rsize_used = round_page(size_used); + + if (rsize_used != tree_size) + kmem_free(ipc_kernel_map, + tree_addr + rsize_used, + tree_size - rsize_used); + + if (size_used != rsize_used) + bzero((char *) (tree_addr + size_used), + rsize_used - size_used); + + kr = vm_map_unwire(ipc_kernel_map, tree_addr, + tree_addr + rsize_used, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + *treep = (ipc_info_tree_name_t *) copy; + *treeCntp = tree_actual; + } + + return KERN_SUCCESS; +#endif /* MACH_IPC_DEBUG */ +} + +/* + * Routine: mach_port_dnrequest_info + * Purpose: + * Returns information about the dead-name requests + * registered with the named receive right. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Retrieved information. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote receive rights. + */ + +kern_return_t +mach_port_dnrequest_info( + ipc_space_t space, + mach_port_name_t name, + unsigned int *totalp, + unsigned int *usedp) +{ +#if !MACH_IPC_DEBUG + return KERN_FAILURE; +#else + unsigned int total, used; + ipc_port_t port; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + if (port->ip_dnrequests == IPR_NULL) { + total = 0; + used = 0; + } else { + ipc_port_request_t dnrequests = port->ip_dnrequests; + ipc_port_request_index_t index; + + total = dnrequests->ipr_size->its_size; + + for (index = 1, used = 0; + index < total; index++) { + ipc_port_request_t ipr = &dnrequests[index]; + + if (ipr->ipr_name != MACH_PORT_NULL) + used++; + } + } + ip_unlock(port); + + *totalp = total; + *usedp = used; + return KERN_SUCCESS; +#endif /* MACH_IPC_DEBUG */ +} + +/* + * Routine: mach_port_kernel_object [kernel call] + * Purpose: + * Retrieve the type and address of the kernel object + * represented by a send or receive right. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Retrieved kernel object info. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote + * send or receive rights. + */ + +kern_return_t +mach_port_kernel_object( + ipc_space_t space, + mach_port_name_t name, + unsigned int *typep, + vm_offset_t *addrp) +{ +#if !MACH_IPC_DEBUG + return KERN_FAILURE; +#else + ipc_entry_t entry; + ipc_port_t port; + kern_return_t kr; + + kr = ipc_right_lookup_read(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is read-locked and active */ + + if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) { + is_read_unlock(space); + return KERN_INVALID_RIGHT; + } + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + ip_lock(port); + is_read_unlock(space); + + if (!ip_active(port)) { + ip_unlock(port); + return KERN_INVALID_RIGHT; + } + + *typep = (unsigned int) ip_kotype(port); + *addrp = (vm_offset_t) port->ip_kobject; + ip_unlock(port); + return KERN_SUCCESS; + +#endif /* MACH_IPC_DEBUG */ +} diff --git a/osfmk/ipc/mach_msg.c b/osfmk/ipc/mach_msg.c new file mode 100644 index 000000000..5c5364323 --- /dev/null +++ b/osfmk/ipc/mach_msg.c @@ -0,0 +1,1933 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/mach_msg.c + * Author: Rich Draves + * Date: 1989 + * + * Exported message traps. See mach/message.h. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include /* JMM - to support handoff hack */ +#include +#include + +/* + * Forward declarations + */ + +mach_msg_return_t mach_msg_send( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_timeout_t timeout, + mach_port_name_t notify); + +mach_msg_return_t mach_msg_receive( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + void (*continuation)(mach_msg_return_t), + mach_msg_size_t slist_size); + + +mach_msg_return_t msg_receive_error( + ipc_kmsg_t kmsg, + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_port_seqno_t seqno, + ipc_space_t space); + +/* the size of each trailer has to be listed here for copyout purposes */ +mach_msg_trailer_size_t trailer_size[] = { + sizeof(mach_msg_trailer_t), + sizeof(mach_msg_seqno_trailer_t), + sizeof(mach_msg_security_trailer_t) }; + +security_token_t KERNEL_SECURITY_TOKEN = KERNEL_SECURITY_TOKEN_VALUE; + +mach_msg_format_0_trailer_t trailer_template = { + /* mach_msg_trailer_type_t */ MACH_MSG_TRAILER_FORMAT_0, + /* mach_msg_trailer_size_t */ MACH_MSG_TRAILER_MINIMUM_SIZE, + /* mach_port_seqno_t */ 0, + /* security_token_t */ KERNEL_SECURITY_TOKEN_VALUE +}; + +/* + * Routine: mach_msg_send + * Purpose: + * Send a message. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Sent the message. + * MACH_SEND_MSG_TOO_SMALL Message smaller than a header. + * MACH_SEND_NO_BUFFER Couldn't allocate buffer. + * MACH_SEND_INVALID_DATA Couldn't copy message data. + * MACH_SEND_INVALID_HEADER + * Illegal value in the message header bits. + * MACH_SEND_INVALID_DEST The space is dead. + * MACH_SEND_INVALID_NOTIFY Bad notify port. + * MACH_SEND_INVALID_DEST Can't copyin destination port. + * MACH_SEND_INVALID_REPLY Can't copyin reply port. + * MACH_SEND_TIMED_OUT Timeout expired without delivery. + * MACH_SEND_INTERRUPTED Delivery interrupted. + * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request. + * MACH_SEND_WILL_NOTIFY Msg-accepted notif. requested. + * MACH_SEND_NOTIFY_IN_PROGRESS + * This space has already forced a message to this port. + */ + +mach_msg_return_t +mach_msg_send( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_timeout_t timeout, + mach_port_name_t notify) +{ + ipc_space_t space = current_space(); + vm_map_t map = current_map(); + ipc_kmsg_t kmsg; + mach_msg_return_t mr; + + mr = ipc_kmsg_get(msg, send_size, &kmsg); + + if (mr != MACH_MSG_SUCCESS) + return mr; + + if (option & MACH_SEND_CANCEL) { + if (notify == MACH_PORT_NULL) + mr = MACH_SEND_INVALID_NOTIFY; + else + mr = ipc_kmsg_copyin(kmsg, space, map, notify); + } else + mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL); + if (mr != MACH_MSG_SUCCESS) { + ipc_kmsg_free(kmsg); + return mr; + } + + mr = ipc_kmsg_send(kmsg, option & MACH_SEND_TIMEOUT, timeout); + + if (mr != MACH_MSG_SUCCESS) { + mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL); + (void) ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size); + } + + return mr; +} + +/* + * Routine: mach_msg_receive + * Purpose: + * Receive a message. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Received a message. + * MACH_RCV_INVALID_NAME The name doesn't denote a right, + * or the denoted right is not receive or port set. + * MACH_RCV_IN_SET Receive right is a member of a set. + * MACH_RCV_TOO_LARGE Message wouldn't fit into buffer. + * MACH_RCV_TIMED_OUT Timeout expired without a message. + * MACH_RCV_INTERRUPTED Reception interrupted. + * MACH_RCV_PORT_DIED Port/set died while receiving. + * MACH_RCV_PORT_CHANGED Port moved into set while receiving. + * MACH_RCV_INVALID_DATA Couldn't copy to user buffer. + * MACH_RCV_INVALID_NOTIFY Bad notify port. + * MACH_RCV_HEADER_ERROR + */ + +mach_msg_return_t +mach_msg_receive_results(void) +{ + thread_t self = current_thread(); + ipc_space_t space = current_space(); + vm_map_t map = current_map(); + + ipc_object_t object = self->ith_object; + mach_msg_return_t mr = self->ith_state; + mach_msg_header_t *msg = self->ith_msg; + mach_msg_option_t option = self->ith_option; + ipc_kmsg_t kmsg = self->ith_kmsg; + mach_port_seqno_t seqno = self->ith_seqno; + mach_msg_size_t slist_size = self->ith_scatter_list_size; + + mach_msg_format_0_trailer_t *trailer; + + ipc_object_release(object); + + if (mr != MACH_MSG_SUCCESS) { + + if (mr == MACH_RCV_TOO_LARGE ) { + if (option & MACH_RCV_LARGE) { + /* + * We need to inform the user-level code that it needs more + * space. The value for how much space was returned in the + * msize save area instead of the message (which was left on + * the queue). + */ + if (copyout((char *) &self->ith_msize, + (char *) &msg->msgh_size, + sizeof(mach_msg_size_t))) + mr = MACH_RCV_INVALID_DATA; + goto out; + } + + if (msg_receive_error(kmsg, msg, option, seqno, space) + == MACH_RCV_INVALID_DATA) + mr = MACH_RCV_INVALID_DATA; + } + goto out; + } + + trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)&kmsg->ikm_header + + round_msg(kmsg->ikm_header.msgh_size)); + if (option & MACH_RCV_TRAILER_MASK) { + trailer->msgh_seqno = seqno; + trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option); + } + + /* + * If MACH_RCV_OVERWRITE was specified, try to get the scatter + * list and verify it against the contents of the message. If + * there is any problem with it, we will continue without it as + * normal. + */ + if (option & MACH_RCV_OVERWRITE) { + mach_msg_size_t slist_size = self->ith_scatter_list_size; + mach_msg_body_t *slist; + + slist = ipc_kmsg_copyin_scatter(msg, slist_size, kmsg); + mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL, slist); + ipc_kmsg_free_scatter(slist, slist_size); + } else { + mr = ipc_kmsg_copyout(kmsg, space, map, + MACH_PORT_NULL, MACH_MSG_BODY_NULL); + } + + if (mr != MACH_MSG_SUCCESS) { + if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { + if (ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size + + trailer->msgh_trailer_size) == MACH_RCV_INVALID_DATA) + mr = MACH_RCV_INVALID_DATA; + } + else { + if (msg_receive_error(kmsg, msg, option, seqno, space) + == MACH_RCV_INVALID_DATA) + mr = MACH_RCV_INVALID_DATA; + } + goto out; + } + mr = ipc_kmsg_put(msg, + kmsg, + kmsg->ikm_header.msgh_size + + trailer->msgh_trailer_size); + out: + return mr; +} + +mach_msg_return_t +mach_msg_receive( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + void (*continuation)(mach_msg_return_t), + mach_msg_size_t slist_size) +{ + thread_t self = current_thread(); + ipc_space_t space = current_space(); + vm_map_t map = current_map(); + ipc_object_t object; + ipc_mqueue_t mqueue; + ipc_kmsg_t kmsg; + mach_port_seqno_t seqno; + mach_msg_return_t mr; + mach_msg_body_t *slist; + mach_msg_format_0_trailer_t *trailer; + + mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object); + if (mr != MACH_MSG_SUCCESS) { + return mr; + } + /* hold ref for object */ + + self->ith_msg = msg; + self->ith_object = object; + self->ith_msize = rcv_size; + self->ith_option = option; + self->ith_scatter_list_size = slist_size; + self->ith_continuation = continuation; + + ipc_mqueue_receive(mqueue, option, rcv_size, timeout, THREAD_ABORTSAFE); + return mach_msg_receive_results(); +} + +void +mach_msg_receive_continue(void) +{ + thread_t self = current_thread(); + + (*self->ith_continuation)(mach_msg_receive_results()); +} + +/* + * Toggle this to compile the hotpath in/out + * If compiled in, the run-time toggle "enable_hotpath" below + * eases testing & debugging + */ +#define ENABLE_HOTPATH 1 /* Hacked on for now */ + +#if ENABLE_HOTPATH +/* + * These counters allow tracing of hotpath behavior under test loads. + * A couple key counters are unconditional (see below). + */ +#define HOTPATH_DEBUG 0 /* Toggle to include lots of counters */ +#if HOTPATH_DEBUG +#define HOT(expr) expr + +unsigned int c_mmot_FIRST = 0; /* Unused First Counter */ +unsigned int c_mmot_combined_S_R = 0; /* hotpath candidates */ +unsigned int c_mach_msg_trap_switch_fast = 0; /* hotpath successes */ +unsigned int c_mmot_kernel_send = 0; /* kernel server */ +unsigned int c_mmot_cold_000 = 0; /* see below ... */ +unsigned int c_mmot_smallsendsize = 0; +unsigned int c_mmot_oddsendsize = 0; +unsigned int c_mmot_bigsendsize = 0; +unsigned int c_mmot_copyinmsg_fail = 0; +unsigned int c_mmot_g_slow_copyin3 = 0; +unsigned int c_mmot_cold_006 = 0; +unsigned int c_mmot_cold_007 = 0; +unsigned int c_mmot_cold_008 = 0; +unsigned int c_mmot_cold_009 = 0; +unsigned int c_mmot_cold_010 = 0; +unsigned int c_mmot_cold_012 = 0; +unsigned int c_mmot_cold_013 = 0; +unsigned int c_mmot_cold_014 = 0; +unsigned int c_mmot_cold_016 = 0; +unsigned int c_mmot_cold_018 = 0; +unsigned int c_mmot_cold_019 = 0; +unsigned int c_mmot_cold_020 = 0; +unsigned int c_mmot_cold_021 = 0; +unsigned int c_mmot_cold_022 = 0; +unsigned int c_mmot_cold_023 = 0; +unsigned int c_mmot_cold_024 = 0; +unsigned int c_mmot_cold_025 = 0; +unsigned int c_mmot_cold_026 = 0; +unsigned int c_mmot_cold_027 = 0; +unsigned int c_mmot_hot_fSR_ok = 0; +unsigned int c_mmot_cold_029 = 0; +unsigned int c_mmot_cold_030 = 0; +unsigned int c_mmot_cold_031 = 0; +unsigned int c_mmot_cold_032 = 0; +unsigned int c_mmot_cold_033 = 0; +unsigned int c_mmot_bad_rcvr = 0; +unsigned int c_mmot_rcvr_swapped = 0; +unsigned int c_mmot_rcvr_locked = 0; +unsigned int c_mmot_rcvr_tswapped = 0; +unsigned int c_mmot_rcvr_freed = 0; +unsigned int c_mmot_g_slow_copyout6 = 0; +unsigned int c_mmot_g_slow_copyout5 = 0; +unsigned int c_mmot_cold_037 = 0; +unsigned int c_mmot_cold_038 = 0; +unsigned int c_mmot_cold_039 = 0; +unsigned int c_mmot_g_slow_copyout4 = 0; +unsigned int c_mmot_g_slow_copyout3 = 0; +unsigned int c_mmot_hot_ok1 = 0; +unsigned int c_mmot_hot_ok2 = 0; +unsigned int c_mmot_hot_ok3 = 0; +unsigned int c_mmot_g_slow_copyout1 = 0; +unsigned int c_mmot_g_slow_copyout2 = 0; +unsigned int c_mmot_getback_fast_copyin = 0; +unsigned int c_mmot_cold_048 = 0; +unsigned int c_mmot_getback_FastSR = 0; +unsigned int c_mmot_cold_050 = 0; +unsigned int c_mmot_cold_051 = 0; +unsigned int c_mmot_cold_052 = 0; +unsigned int c_mmot_cold_053 = 0; +unsigned int c_mmot_fastkernelreply = 0; +unsigned int c_mmot_cold_055 = 0; +unsigned int c_mmot_getback_fast_put = 0; +unsigned int c_mmot_LAST = 0; /* End Marker - Unused */ + +void db_mmot_zero_counters(void); /* forward; */ +void db_mmot_show_counters(void); /* forward; */ + +void /* Call from the debugger to clear all counters */ +db_mmot_zero_counters(void) +{ + register unsigned int *ip = &c_mmot_FIRST; + while (ip <= &c_mmot_LAST) + *ip++ = 0; +} + +void /* Call from the debugger to show all counters */ +db_mmot_show_counters(void) +{ +#define xx(str) printf("%s: %d\n", # str, str); + + xx(c_mmot_combined_S_R); + xx(c_mach_msg_trap_switch_fast); + xx(c_mmot_kernel_send); + xx(c_mmot_cold_000); + xx(c_mmot_smallsendsize); + xx(c_mmot_oddsendsize); + xx(c_mmot_bigsendsize); + xx(c_mmot_copyinmsg_fail); + xx(c_mmot_g_slow_copyin3); + xx(c_mmot_cold_006); + xx(c_mmot_cold_007); + xx(c_mmot_cold_008); + xx(c_mmot_cold_009); + xx(c_mmot_cold_010); + xx(c_mmot_cold_012); + xx(c_mmot_cold_013); + xx(c_mmot_cold_014); + xx(c_mmot_cold_016); + xx(c_mmot_cold_018); + xx(c_mmot_cold_019); + xx(c_mmot_cold_020); + xx(c_mmot_cold_021); + xx(c_mmot_cold_022); + xx(c_mmot_cold_023); + xx(c_mmot_cold_024); + xx(c_mmot_cold_025); + xx(c_mmot_cold_026); + xx(c_mmot_cold_027); + xx(c_mmot_hot_fSR_ok); + xx(c_mmot_cold_029); + xx(c_mmot_cold_030); + xx(c_mmot_cold_031); + xx(c_mmot_cold_032); + xx(c_mmot_cold_033); + xx(c_mmot_bad_rcvr); + xx(c_mmot_rcvr_swapped); + xx(c_mmot_rcvr_locked); + xx(c_mmot_rcvr_tswapped); + xx(c_mmot_rcvr_freed); + xx(c_mmot_g_slow_copyout6); + xx(c_mmot_g_slow_copyout5); + xx(c_mmot_cold_037); + xx(c_mmot_cold_038); + xx(c_mmot_cold_039); + xx(c_mmot_g_slow_copyout4); + xx(c_mmot_g_slow_copyout3); + xx(c_mmot_g_slow_copyout1); + xx(c_mmot_hot_ok3); + xx(c_mmot_hot_ok2); + xx(c_mmot_hot_ok1); + xx(c_mmot_g_slow_copyout2); + xx(c_mmot_getback_fast_copyin); + xx(c_mmot_cold_048); + xx(c_mmot_getback_FastSR); + xx(c_mmot_cold_050); + xx(c_mmot_cold_051); + xx(c_mmot_cold_052); + xx(c_mmot_cold_053); + xx(c_mmot_fastkernelreply); + xx(c_mmot_cold_055); + xx(c_mmot_getback_fast_put); + +#undef xx +} + +#else /* !HOTPATH_DEBUG */ + +/* + * Duplicate just these few so we can always do a quick sanity check + */ +unsigned int c_mmot_combined_S_R = 0; /* hotpath candidates */ +unsigned int c_mach_msg_trap_switch_fast = 0; /* hotpath successes */ +unsigned int c_mmot_kernel_send = 0; /* kernel server calls */ +#define HOT(expr) /* no optional counters */ + +#endif /* !HOTPATH_DEBUG */ + +boolean_t enable_hotpath = TRUE; /* Patchable, just in case ... */ +#endif /* HOTPATH_ENABLE */ + +/* + * Routine: mach_msg_overwrite_trap [mach trap] + * Purpose: + * Possibly send a message; possibly receive a message. + * Conditions: + * Nothing locked. + * Returns: + * All of mach_msg_send and mach_msg_receive error codes. + */ + +mach_msg_return_t +mach_msg_overwrite_trap( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify, + mach_msg_header_t *rcv_msg, + mach_msg_size_t scatter_list_size) +{ + register mach_msg_header_t *hdr; + mach_msg_return_t mr = MACH_MSG_SUCCESS; + /* mask out some of the options before entering the hot path */ + mach_msg_option_t masked_option = + option & ~(MACH_SEND_TRAILER|MACH_RCV_TRAILER_MASK|MACH_RCV_LARGE); + int i; + +#if ENABLE_HOTPATH + /* BEGINNING OF HOT PATH */ + if ((masked_option == (MACH_SEND_MSG|MACH_RCV_MSG)) && enable_hotpath) { + register thread_t self = current_thread(); + register mach_msg_format_0_trailer_t *trailer; + + ipc_space_t space = current_act()->task->itk_space; + ipc_kmsg_t kmsg; + register ipc_port_t dest_port; + ipc_object_t rcv_object; + register ipc_mqueue_t rcv_mqueue; + mach_msg_size_t reply_size; + ipc_kmsg_t rcv_kmsg; + + c_mmot_combined_S_R++; + + /* + * This case is divided into ten sections, each + * with a label. There are five optimized + * sections and six unoptimized sections, which + * do the same thing but handle all possible + * cases and are slower. + * + * The five sections for an RPC are + * 1) Get request message into a buffer. + * 2) Copyin request message and rcv_name. + * (fast_copyin or slow_copyin) + * 3) Enqueue request and dequeue reply. + * (fast_send_receive or + * slow_send and slow_receive) + * 4) Copyout reply message. + * (fast_copyout or slow_copyout) + * 5) Put reply message to user's buffer. + * + * Keep the locking hierarchy firmly in mind. + * (First spaces, then ports, then port sets, + * then message queues.) Only a non-blocking + * attempt can be made to acquire locks out of + * order, or acquire two locks on the same level. + * Acquiring two locks on the same level will + * fail if the objects are really the same, + * unless simple locking is disabled. This is OK, + * because then the extra unlock does nothing. + * + * There are two major reasons these RPCs can't use + * ipc_thread_switch, and use slow_send/slow_receive: + * 1) Kernel RPCs. + * 2) Servers fall behind clients, so + * client doesn't find a blocked server thread and + * server finds waiting messages and can't block. + */ + + mr = ipc_kmsg_get(msg, send_size, &kmsg); + if (mr != KERN_SUCCESS) { + return mr; + } + hdr = &kmsg->ikm_header; + trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr + + send_size); + + fast_copyin: + /* + * optimized ipc_kmsg_copyin/ipc_mqueue_copyin + * + * We have the request message data in kmsg. + * Must still do copyin, send, receive, etc. + * + * If the message isn't simple, we can't combine + * ipc_kmsg_copyin_header and ipc_mqueue_copyin, + * because copyin of the message body might + * affect rcv_name. + */ + + switch (hdr->msgh_bits) { + case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, + MACH_MSG_TYPE_MAKE_SEND_ONCE): { + register ipc_entry_t table; + register ipc_entry_num_t size; + register ipc_port_t reply_port; + + /* sending a request message */ + + { + register mach_port_index_t index; + register mach_port_gen_t gen; + + { + register mach_port_name_t reply_name = + (mach_port_name_t)hdr->msgh_local_port; + + if (reply_name != rcv_name) { + HOT(c_mmot_g_slow_copyin3++); + goto slow_copyin; + } + + /* optimized ipc_entry_lookup of reply_name */ + + index = MACH_PORT_INDEX(reply_name); + gen = MACH_PORT_GEN(reply_name); + + is_read_lock(space); + assert(space->is_active); + + size = space->is_table_size; + table = space->is_table; + + { + register ipc_entry_t entry; + register ipc_entry_bits_t bits; + + if (index < size) { + entry = &table[index]; + bits = entry->ie_bits; + if (IE_BITS_GEN(bits) != gen || + (bits & IE_BITS_COLLISION)) { + entry = IE_NULL; + } + } else { + entry = IE_NULL; + } + if (entry == IE_NULL) { + entry = ipc_entry_lookup(space, reply_name); + if (entry == IE_NULL) { + HOT(c_mmot_cold_006++); + goto abort_request_copyin; + } + bits = entry->ie_bits; + } + + /* check type bit */ + + if (! (bits & MACH_PORT_TYPE_RECEIVE)) { + HOT(c_mmot_cold_007++); + goto abort_request_copyin; + } + + reply_port = (ipc_port_t) entry->ie_object; + assert(reply_port != IP_NULL); + } + } + } + + /* optimized ipc_entry_lookup of dest_name */ + + { + register mach_port_index_t index; + register mach_port_gen_t gen; + + { + register mach_port_name_t dest_name = + (mach_port_name_t)hdr->msgh_remote_port; + + index = MACH_PORT_INDEX(dest_name); + gen = MACH_PORT_GEN(dest_name); + + { + register ipc_entry_t entry; + register ipc_entry_bits_t bits; + + if (index < size) { + entry = &table[index]; + bits = entry->ie_bits; + if (IE_BITS_GEN(bits) != gen || + (bits & IE_BITS_COLLISION)) { + entry = IE_NULL; + } + } else { + entry = IE_NULL; + } + if (entry == IE_NULL) { + entry = ipc_entry_lookup(space, dest_name); + if (entry == IE_NULL) { + HOT(c_mmot_cold_008++); + goto abort_request_copyin; + } + bits = entry->ie_bits; + } + + /* check type bit */ + + if (! (bits & MACH_PORT_TYPE_SEND)) { + HOT(c_mmot_cold_009++); + goto abort_request_copyin; + } + + assert(IE_BITS_UREFS(bits) > 0); + + dest_port = (ipc_port_t) entry->ie_object; + assert(dest_port != IP_NULL); + } + } + } + + /* + * To do an atomic copyin, need simultaneous + * locks on both ports and the space. If + * dest_port == reply_port, and simple locking is + * enabled, then we will abort. Otherwise it's + * OK to unlock twice. + */ + + ip_lock(dest_port); + if (!ip_active(dest_port) || + !ip_lock_try(reply_port)) { + ip_unlock(dest_port); + HOT(c_mmot_cold_010++); + goto abort_request_copyin; + } + is_read_unlock(space); + + assert(dest_port->ip_srights > 0); + dest_port->ip_srights++; + ip_reference(dest_port); + + assert(ip_active(reply_port)); + assert(reply_port->ip_receiver_name == + (mach_port_name_t)hdr->msgh_local_port); + assert(reply_port->ip_receiver == space); + + reply_port->ip_sorights++; + ip_reference(reply_port); + + hdr->msgh_bits = + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, + MACH_MSG_TYPE_PORT_SEND_ONCE); + hdr->msgh_remote_port = dest_port; + hdr->msgh_local_port = reply_port; + + /* make sure we can queue to the destination */ + + if (dest_port->ip_receiver == ipc_space_kernel) { + /* + * The kernel server has a reference to + * the reply port, which it hands back + * to us in the reply message. We do + * not need to keep another reference to + * it. + */ + ip_unlock(reply_port); + + assert(ip_active(dest_port)); + dest_port->ip_messages.imq_seqno++; + ip_unlock(dest_port); + goto kernel_send; + } + + if (imq_full(&dest_port->ip_messages)) { + HOT(c_mmot_cold_013++); + goto abort_request_send_receive; + } + + /* optimized ipc_mqueue_copyin */ + + rcv_object = (ipc_object_t) reply_port; + io_reference(rcv_object); + rcv_mqueue = &reply_port->ip_messages; + io_unlock(rcv_object); + HOT(c_mmot_hot_fSR_ok++); + goto fast_send_receive; + + abort_request_copyin: + is_read_unlock(space); + goto slow_copyin; + + abort_request_send_receive: + ip_unlock(dest_port); + ip_unlock(reply_port); + goto slow_send; + } + + case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): { + register ipc_entry_num_t size; + register ipc_entry_t table; + + /* sending a reply message */ + + { + register mach_port_name_t reply_name = + (mach_port_name_t)hdr->msgh_local_port; + + if (reply_name != MACH_PORT_NULL) { + HOT(c_mmot_cold_018++); + goto slow_copyin; + } + } + + is_write_lock(space); + assert(space->is_active); + + /* optimized ipc_entry_lookup */ + + size = space->is_table_size; + table = space->is_table; + + { + register ipc_entry_t entry; + register mach_port_gen_t gen; + register mach_port_index_t index; + ipc_table_index_t *requests; + + { + register mach_port_name_t dest_name = + (mach_port_name_t)hdr->msgh_remote_port; + + index = MACH_PORT_INDEX(dest_name); + gen = MACH_PORT_GEN(dest_name); + } + + if (index >= size) { + HOT(c_mmot_cold_019++); + goto abort_reply_dest_copyin; + } + + entry = &table[index]; + + /* check generation, collision bit, and type bit */ + + if ((entry->ie_bits & (IE_BITS_GEN_MASK| + IE_BITS_COLLISION| + MACH_PORT_TYPE_SEND_ONCE)) != + (gen | MACH_PORT_TYPE_SEND_ONCE)) { + HOT(c_mmot_cold_020++); + goto abort_reply_dest_copyin; + } + + /* optimized ipc_right_copyin */ + + assert(IE_BITS_TYPE(entry->ie_bits) == + MACH_PORT_TYPE_SEND_ONCE); + assert(IE_BITS_UREFS(entry->ie_bits) == 1); + + if (entry->ie_request != 0) { + HOT(c_mmot_cold_021++); + goto abort_reply_dest_copyin; + } + + dest_port = (ipc_port_t) entry->ie_object; + assert(dest_port != IP_NULL); + + ip_lock(dest_port); + if (!ip_active(dest_port)) { + ip_unlock(dest_port); + HOT(c_mmot_cold_022++); + goto abort_reply_dest_copyin; + } + + assert(dest_port->ip_sorights > 0); + + /* optimized ipc_entry_dealloc */ + + + entry->ie_bits = gen; + entry->ie_next = table->ie_next; + table->ie_next = index; + entry->ie_object = IO_NULL; + } + + hdr->msgh_bits = + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, + 0); + hdr->msgh_remote_port = dest_port; + + /* make sure we can queue to the destination */ + + assert(dest_port->ip_receiver != ipc_space_kernel); + + /* optimized ipc_entry_lookup/ipc_mqueue_copyin */ + + { + register ipc_entry_t entry; + register ipc_entry_bits_t bits; + + { + register mach_port_index_t index; + register mach_port_gen_t gen; + + index = MACH_PORT_INDEX(rcv_name); + gen = MACH_PORT_GEN(rcv_name); + + if (index < size) { + entry = &table[index]; + bits = entry->ie_bits; + if (IE_BITS_GEN(bits) != gen || + (bits & IE_BITS_COLLISION)) { + entry = IE_NULL; + } + } else { + entry = IE_NULL; + } + if (entry == IE_NULL) { + entry = ipc_entry_lookup(space, rcv_name); + if (entry == IE_NULL) { + HOT(c_mmot_cold_024++); + goto abort_reply_rcv_copyin; + } + bits = entry->ie_bits; + } + + } + + /* check type bits; looking for receive or set */ +#if 0 + /* + * JMM - The check below for messages in the receive + * mqueue is insufficient to work with port sets, since + * the messages stay in the port queues. For now, don't + * allow portsets (but receiving on portsets when sending + * a message to a send-once right is actually a very + * common case (so we should re-enable). + */ + if (bits & MACH_PORT_TYPE_PORT_SET) { + register ipc_pset_t rcv_pset; + + rcv_pset = (ipc_pset_t) entry->ie_object; + assert(rcv_pset != IPS_NULL); + + ips_lock(rcv_pset); + assert(ips_active(rcv_pset)); + + rcv_object = (ipc_object_t) rcv_pset; + rcv_mqueue = &rcv_pset->ips_messages; + } else +#endif /* 0 */ + if (bits & MACH_PORT_TYPE_RECEIVE) { + register ipc_port_t rcv_port; + + rcv_port = (ipc_port_t) entry->ie_object; + assert(rcv_port != IP_NULL); + + if (!ip_lock_try(rcv_port)) { + HOT(c_mmot_cold_025++); + goto abort_reply_rcv_copyin; + } + assert(ip_active(rcv_port)); + + if (rcv_port->ip_pset_count != 0) { + ip_unlock(rcv_port); + HOT(c_mmot_cold_026++); + goto abort_reply_rcv_copyin; + } + + rcv_object = (ipc_object_t) rcv_port; + rcv_mqueue = &rcv_port->ip_messages; + } else { + HOT(c_mmot_cold_027++); + goto abort_reply_rcv_copyin; + } + } + + is_write_unlock(space); + io_reference(rcv_object); + io_unlock(rcv_object); + HOT(c_mmot_hot_fSR_ok++); + goto fast_send_receive; + + abort_reply_dest_copyin: + is_write_unlock(space); + HOT(c_mmot_cold_029++); + goto slow_copyin; + + abort_reply_rcv_copyin: + ip_unlock(dest_port); + is_write_unlock(space); + HOT(c_mmot_cold_030++); + goto slow_send; + } + + default: + HOT(c_mmot_cold_031++); + goto slow_copyin; + } + /*NOTREACHED*/ + + fast_send_receive: + /* + * optimized ipc_mqueue_send/ipc_mqueue_receive + * + * Finished get/copyin of kmsg and copyin of rcv_name. + * space is unlocked, dest_port is locked, + * we can queue kmsg to dest_port, + * rcv_mqueue is set, and rcv_object holds a ref + * so the mqueue cannot go away. + * + * JMM - For now, rcv_object is just a port. Portsets + * are disabled for the time being. + */ + + assert(ip_active(dest_port)); + assert(dest_port->ip_receiver != ipc_space_kernel); + assert(!imq_full(&dest_port->ip_messages) || + (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == + MACH_MSG_TYPE_PORT_SEND_ONCE)); + assert((hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0); + + { + register ipc_mqueue_t dest_mqueue; + wait_queue_t waitq; + thread_t receiver; +#if THREAD_SWAPPER + thread_act_t rcv_act; +#endif + spl_t s; + + s = splsched(); + dest_mqueue = &dest_port->ip_messages; + waitq = &dest_mqueue->imq_wait_queue; + imq_lock(dest_mqueue); + + wait_queue_peek_locked(waitq, IPC_MQUEUE_RECEIVE, &receiver, &waitq); + /* queue still locked, thread locked - but still on q */ + + if (receiver == THREAD_NULL) { + abort_send_receive: + imq_unlock(dest_mqueue); + splx(s); + ip_unlock(dest_port); + ipc_object_release(rcv_object); + HOT(c_mmot_cold_032++); + goto slow_send; + } + + assert(receiver->wait_queue == waitq); + assert(receiver->wait_event == IPC_MQUEUE_RECEIVE); + + /* + * See if it is still running on another processor (trying to + * block itself). If so, fall off. + * + * JMM - We have an opportunity here. Since the thread is locked + * and we find it runnable, it must still be trying to get into + * thread_block on itself. We could just "hand him the message" + * and let him go (thread_go_locked()) and then fall down into a + * slow receive for ourselves. Only his RECEIVE_TOO_LARGE handling + * runs afoul of that. Clean this up! + */ + if ((receiver->state & TH_RUN|TH_WAIT) != TH_WAIT) { + assert(NCPUS > 1); + HOT(c_mmot_cold_033++); + fall_off: + thread_unlock(receiver); + if (waitq != &dest_mqueue->imq_wait_queue) + wait_queue_unlock(waitq); + goto abort_send_receive; + } + + /* + * Check that the receiver can stay on the hot path. + */ + if (send_size + REQUESTED_TRAILER_SIZE(receiver->ith_option) > + receiver->ith_msize) { + /* + * The receiver can't accept the message. + */ + HOT(c_mmot_bad_rcvr++); + goto fall_off; + } + +#if THREAD_SWAPPER + /* + * Receiver looks okay -- is it swapped in? + */ + rpc_lock(receiver); + rcv_act = receiver->top_act; + if (rcv_act->swap_state != TH_SW_IN && + rcv_act->swap_state != TH_SW_UNSWAPPABLE) { + rpc_unlock(receiver); + HOT(c_mmot_rcvr_swapped++); + goto fall_off; + } + + /* + * Make sure receiver stays swapped in (if we can). + */ + if (!act_lock_try(rcv_act)) { /* out of order! */ + rpc_unlock(receiver); + HOT(c_mmot_rcvr_locked++); + goto fall_off; + } + + /* + * Check for task swapping in progress affecting + * receiver. Since rcv_act is attached to a shuttle, + * its swap_state is covered by shuttle's thread_lock() + * (sigh). + */ + if ((rcv_act->swap_state != TH_SW_IN && + rcv_act->swap_state != TH_SW_UNSWAPPABLE) || + rcv_act->ast & AST_SWAPOUT) { + act_unlock(rcv_act); + rpc_unlock(receiver); + HOT(c_mmot_rcvr_tswapped++); + goto fall_off; + } + + /* + * We don't need to make receiver unswappable here -- holding + * act_lock() of rcv_act is sufficient to prevent either thread + * or task swapping from changing its state (see swapout_scan(), + * task_swapout()). Don't release lock till receiver's state + * is consistent. Its task may then be marked for swapout, + * but that's life. + */ + rpc_unlock(receiver); + /* + * NB: act_lock(rcv_act) still held + */ +#endif /* THREAD_SWAPPER */ + + /* + * Before committing to the handoff, make sure that we are + * really going to block (i.e. there are no messages already + * queued for us. This violates lock ordering, so make sure + * we don't deadlock. After the trylock succeeds below, we + * may have up to 3 message queues locked: + * - the dest port mqueue + * - a portset mqueue (where waiting receiver was found) + * - finally our own rcv_mqueue + * + * JMM - Need to make this check appropriate for portsets as + * well before re-enabling them. + */ + if (!imq_lock_try(rcv_mqueue)) { + goto fall_off; + } + if (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages) != IKM_NULL) { + imq_unlock(rcv_mqueue); + HOT(c_mmot_cold_033++); + goto fall_off; + } + + /* At this point we are committed to do the "handoff". */ + c_mach_msg_trap_switch_fast++; + + /* + * JMM - Go ahead and pull the receiver from the runq. If the + * runq wasn't the one for the mqueue, unlock it. + */ + wait_queue_pull_thread_locked(waitq, + receiver, + (waitq != &dest_mqueue->imq_wait_queue)); + + /* + * Store the kmsg and seqno where the receiver can pick it up. + */ + receiver->ith_state = MACH_MSG_SUCCESS; + receiver->ith_kmsg = kmsg; + receiver->ith_seqno = dest_mqueue->imq_seqno++; + + /* + * Inline thread_go_locked + * + * JMM - Including hacked in version of setrun scheduler op + * that doesn't try to put thread on a runq. + */ + { + receiver->sp_state = MK_SP_RUNNABLE; + + receiver->state &= ~(TH_WAIT|TH_UNINT); + receiver->state |= TH_RUN; + receiver->wait_result = THREAD_AWAKENED; + } + + thread_unlock(receiver); +#if THREAD_SWAPPER + act_unlock(rcv_act); +#endif /* THREAD_SWAPPER */ + + imq_unlock(dest_mqueue); + ip_unlock(dest_port); + current_task()->messages_sent++; + + + /* + * Put self on receive port's queue. + * Also save state that the sender of + * our reply message needs to determine if it + * can hand off directly back to us. + */ + self->ith_msg = (rcv_msg) ? rcv_msg : msg; + self->ith_object = rcv_object; /* still holds reference */ + self->ith_msize = rcv_size; + self->ith_option = option; + self->ith_scatter_list_size = scatter_list_size; + self->ith_continuation = thread_syscall_return; + + waitq = &rcv_mqueue->imq_wait_queue; + wait_queue_assert_wait_locked(waitq, + IPC_MQUEUE_RECEIVE, + THREAD_ABORTSAFE, + TRUE); /* unlock? */ + /* rcv_mqueue is unlocked */ + + /* Inline thread_block_reason (except don't select a new + * new thread (we already have one), and don't turn off ASTs + * (we don't want two threads to hog all the CPU by handing + * off to each other). + */ + { + if (self->funnel_state & TH_FN_OWNED) { + self->funnel_state = TH_FN_REFUNNEL; + KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, self->funnel_lock, 3, 0, 0, 0); + funnel_unlock(self->funnel_lock); + + } + + machine_clock_assist(); + + thread_lock(self); + if (self->state & TH_ABORT) + clear_wait_internal(self, THREAD_INTERRUPTED); + thread_unlock(self); + + /* + * Switch directly to receiving thread, and block + * this thread as though it had called ipc_mqueue_receive. + */ +#if defined (__i386__) + thread_run(self, (void (*)(void))0, receiver); +#else + thread_run(self, ipc_mqueue_receive_continue, receiver); +#endif + + /* if we fell thru */ + if (self->funnel_state & TH_FN_REFUNNEL) { + kern_return_t wait_result2; + + wait_result2 = self->wait_result; + self->funnel_state = 0; + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 6, 0, 0, 0); + funnel_lock(self->funnel_lock); + KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 6, 0, 0, 0); + self->funnel_state = TH_FN_OWNED; + self->wait_result = wait_result2; + } + splx(s); + } + + ipc_mqueue_receive_continue(); + /* NOTREACHED */ + } + + fast_copyout: + /* + * Nothing locked and no references held, except + * we have kmsg with msgh_seqno filled in. Must + * still check against rcv_size and do + * ipc_kmsg_copyout/ipc_kmsg_put. + */ + + reply_size = send_size + trailer->msgh_trailer_size; + if (rcv_size < reply_size) { + HOT(c_mmot_g_slow_copyout6++); + goto slow_copyout; + } + + /* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */ + + switch (hdr->msgh_bits) { + case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, + MACH_MSG_TYPE_PORT_SEND_ONCE): { + ipc_port_t reply_port = + (ipc_port_t) hdr->msgh_local_port; + mach_port_name_t dest_name, reply_name; + + /* receiving a request message */ + + if (!IP_VALID(reply_port)) { + HOT(c_mmot_g_slow_copyout5++); + goto slow_copyout; + } + + is_write_lock(space); + assert(space->is_active); + + /* + * To do an atomic copyout, need simultaneous + * locks on both ports and the space. If + * dest_port == reply_port, and simple locking is + * enabled, then we will abort. Otherwise it's + * OK to unlock twice. + */ + + ip_lock(dest_port); + if (!ip_active(dest_port) || + !ip_lock_try(reply_port)) { + HOT(c_mmot_cold_037++); + goto abort_request_copyout; + } + + if (!ip_active(reply_port)) { + ip_unlock(reply_port); + HOT(c_mmot_cold_038++); + goto abort_request_copyout; + } + + assert(reply_port->ip_sorights > 0); + ip_unlock(reply_port); + + { + register ipc_entry_t table; + register ipc_entry_t entry; + register mach_port_index_t index; + + /* optimized ipc_entry_get */ + + table = space->is_table; + index = table->ie_next; + + if (index == 0) { + HOT(c_mmot_cold_039++); + goto abort_request_copyout; + } + + entry = &table[index]; + table->ie_next = entry->ie_next; + entry->ie_request = 0; + + { + register mach_port_gen_t gen; + + assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0); + gen = IE_BITS_NEW_GEN(entry->ie_bits); + + reply_name = MACH_PORT_MAKE(index, gen); + + /* optimized ipc_right_copyout */ + + entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1); + } + + assert(MACH_PORT_VALID(reply_name)); + entry->ie_object = (ipc_object_t) reply_port; + is_write_unlock(space); + } + + /* optimized ipc_object_copyout_dest */ + + assert(dest_port->ip_srights > 0); + ip_release(dest_port); + + if (dest_port->ip_receiver == space) + dest_name = dest_port->ip_receiver_name; + else + dest_name = MACH_PORT_NULL; + + if ((--dest_port->ip_srights == 0) && + (dest_port->ip_nsrequest != IP_NULL)) { + ipc_port_t nsrequest; + mach_port_mscount_t mscount; + + /* a rather rare case */ + + nsrequest = dest_port->ip_nsrequest; + mscount = dest_port->ip_mscount; + dest_port->ip_nsrequest = IP_NULL; + ip_unlock(dest_port); + ipc_notify_no_senders(nsrequest, mscount); + } else + ip_unlock(dest_port); + + hdr->msgh_bits = + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, + MACH_MSG_TYPE_PORT_SEND); + hdr->msgh_remote_port = (mach_port_t)reply_name; + hdr->msgh_local_port = (mach_port_t)dest_name; + HOT(c_mmot_hot_ok1++); + goto fast_put; + + abort_request_copyout: + ip_unlock(dest_port); + is_write_unlock(space); + HOT(c_mmot_g_slow_copyout4++); + goto slow_copyout; + } + + case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): { + register mach_port_name_t dest_name; + + /* receiving a reply message */ + + ip_lock(dest_port); + if (!ip_active(dest_port)) { + ip_unlock(dest_port); + HOT(c_mmot_g_slow_copyout3++); + goto slow_copyout; + } + + /* optimized ipc_object_copyout_dest */ + + assert(dest_port->ip_sorights > 0); + + if (dest_port->ip_receiver == space) { + ip_release(dest_port); + dest_port->ip_sorights--; + dest_name = dest_port->ip_receiver_name; + ip_unlock(dest_port); + } else { + ip_unlock(dest_port); + + ipc_notify_send_once(dest_port); + dest_name = MACH_PORT_NULL; + } + + hdr->msgh_bits = MACH_MSGH_BITS(0, + MACH_MSG_TYPE_PORT_SEND_ONCE); + hdr->msgh_remote_port = MACH_PORT_NULL; + hdr->msgh_local_port = (ipc_port_t)dest_name; + HOT(c_mmot_hot_ok2++); + goto fast_put; + } + + case MACH_MSGH_BITS_COMPLEX| + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): { + register mach_port_name_t dest_name; + + /* receiving a complex reply message */ + + ip_lock(dest_port); + if (!ip_active(dest_port)) { + ip_unlock(dest_port); + HOT(c_mmot_g_slow_copyout1++); + goto slow_copyout; + } + + /* optimized ipc_object_copyout_dest */ + + assert(dest_port->ip_sorights > 0); + + if (dest_port->ip_receiver == space) { + ip_release(dest_port); + dest_port->ip_sorights--; + dest_name = dest_port->ip_receiver_name; + ip_unlock(dest_port); + } else { + ip_unlock(dest_port); + + ipc_notify_send_once(dest_port); + dest_name = MACH_PORT_NULL; + } + + hdr->msgh_bits = + MACH_MSGH_BITS_COMPLEX | + MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND_ONCE); + hdr->msgh_remote_port = MACH_PORT_NULL; + hdr->msgh_local_port = (mach_port_t)dest_name; + + mr = ipc_kmsg_copyout_body(kmsg, space, + current_map(), + MACH_MSG_BODY_NULL); + if (mr != MACH_MSG_SUCCESS) { + if (ipc_kmsg_put(msg, kmsg, hdr->msgh_size + + trailer->msgh_trailer_size) == + MACH_RCV_INVALID_DATA) + return MACH_RCV_INVALID_DATA; + else + return mr | MACH_RCV_BODY_ERROR; + } + HOT(c_mmot_hot_ok3++); + goto fast_put; + } + + default: + HOT(c_mmot_g_slow_copyout2++); + goto slow_copyout; + } + /*NOTREACHED*/ + + fast_put: + mr = ipc_kmsg_put(rcv_msg ? rcv_msg : msg, + kmsg, + hdr->msgh_size + trailer->msgh_trailer_size); + if (mr != MACH_MSG_SUCCESS) { + return MACH_RCV_INVALID_DATA; + } + current_task()->messages_received++; + return mr; + + + /* BEGINNING OF WARM PATH */ + + /* + * The slow path has a few non-register temporary + * variables used only for call-by-reference. + */ + + slow_copyin: + { + ipc_kmsg_t temp_kmsg; + mach_port_seqno_t temp_seqno; + ipc_object_t temp_rcv_object; + ipc_mqueue_t temp_rcv_mqueue; + register mach_port_name_t reply_name = + (mach_port_name_t)hdr->msgh_local_port; + + + /* + * We have the message data in kmsg, but + * we still need to copyin, send it, + * receive a reply, and do copyout. + */ + + mr = ipc_kmsg_copyin(kmsg, space, current_map(), + MACH_PORT_NULL); + if (mr != MACH_MSG_SUCCESS) { + ipc_kmsg_free(kmsg); + return(mr); + } + + /* try to get back on optimized path */ + + if ((reply_name != rcv_name) || + (hdr->msgh_bits & MACH_MSGH_BITS_CIRCULAR)) { + HOT(c_mmot_cold_048++); + goto slow_send; + } + + dest_port = (ipc_port_t) hdr->msgh_remote_port; + assert(IP_VALID(dest_port)); + + ip_lock(dest_port); + if (!ip_active(dest_port)) { + ip_unlock(dest_port); + goto slow_send; + } + + if (dest_port->ip_receiver == ipc_space_kernel) { + dest_port->ip_messages.imq_seqno++; + ip_unlock(dest_port); + goto kernel_send; + } + + if (!imq_full(&dest_port->ip_messages) || + (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == + MACH_MSG_TYPE_PORT_SEND_ONCE)) + { + /* + * Try an optimized ipc_mqueue_copyin. + * It will work if this is a request message. + */ + + register ipc_port_t reply_port; + + reply_port = (ipc_port_t) hdr->msgh_local_port; + if (IP_VALID(reply_port)) { + if (ip_lock_try(reply_port)) { + if (ip_active(reply_port) && + reply_port->ip_receiver == space && + reply_port->ip_receiver_name == rcv_name && + reply_port->ip_pset_count == 0) + { + /* Grab a reference to the reply port. */ + rcv_object = (ipc_object_t) reply_port; + io_reference(rcv_object); + rcv_mqueue = &reply_port->ip_messages; + io_unlock(rcv_object); + HOT(c_mmot_getback_FastSR++); + goto fast_send_receive; + } + ip_unlock(reply_port); + } + } + } + + ip_unlock(dest_port); + HOT(c_mmot_cold_050++); + goto slow_send; + + kernel_send: + /* + * Special case: send message to kernel services. + * The request message has been copied into the + * kmsg. Nothing is locked. + */ + + { + register ipc_port_t reply_port; + mach_port_seqno_t local_seqno; + spl_t s; + + /* + * Perform the kernel function. + */ + c_mmot_kernel_send++; + + current_task()->messages_sent++; + + kmsg = ipc_kobject_server(kmsg); + if (kmsg == IKM_NULL) { + /* + * No reply. Take the + * slow receive path. + */ + HOT(c_mmot_cold_051++); + goto slow_get_rcv_port; + } + + /* + * Check that: + * the reply port is alive + * we hold the receive right + * the name has not changed. + * the port is not in a set + * If any of these are not true, + * we cannot directly receive the reply + * message. + */ + hdr = &kmsg->ikm_header; + send_size = hdr->msgh_size; + trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr + + round_msg(send_size)); + reply_port = (ipc_port_t) hdr->msgh_remote_port; + ip_lock(reply_port); + + if ((!ip_active(reply_port)) || + (reply_port->ip_receiver != space) || + (reply_port->ip_receiver_name != rcv_name) || + (reply_port->ip_pset_count != 0)) + { + ip_unlock(reply_port); + ipc_kmsg_send_always(kmsg); + HOT(c_mmot_cold_052++); + goto slow_get_rcv_port; + } + + s = splsched(); + rcv_mqueue = &reply_port->ip_messages; + imq_lock(rcv_mqueue); + + /* keep port locked, and don`t change ref count yet */ + + /* + * If there are messages on the port + * or other threads waiting for a message, + * we cannot directly receive the reply. + */ + if (!wait_queue_empty(&rcv_mqueue->imq_wait_queue) || + (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages) != IKM_NULL)) + { + imq_unlock(rcv_mqueue); + splx(s); + ip_unlock(reply_port); + ipc_kmsg_send_always(kmsg); + HOT(c_mmot_cold_053++); + goto slow_get_rcv_port; + } + + /* + * We can directly receive this reply. + * Since there were no messages queued + * on the reply port, there should be + * no threads blocked waiting to send. + */ + dest_port = reply_port; + local_seqno = rcv_mqueue->imq_seqno++; + imq_unlock(rcv_mqueue); + splx(s); + + /* + * inline ipc_object_release. + * Port is still locked. + * Reference count was not incremented. + */ + ip_check_unlock(reply_port); + + if (option & MACH_RCV_TRAILER_MASK) { + trailer->msgh_seqno = local_seqno; + trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option); + } + /* copy out the kernel reply */ + HOT(c_mmot_fastkernelreply++); + goto fast_copyout; + } + + slow_send: + /* + * Nothing is locked. We have acquired kmsg, but + * we still need to send it and receive a reply. + */ + + mr = ipc_kmsg_send(kmsg, MACH_MSG_OPTION_NONE, + MACH_MSG_TIMEOUT_NONE); + if (mr != MACH_MSG_SUCCESS) { + mr |= ipc_kmsg_copyout_pseudo(kmsg, space, + current_map(), + MACH_MSG_BODY_NULL); + + (void) ipc_kmsg_put(msg, kmsg, hdr->msgh_size); + return(mr); + } + + slow_get_rcv_port: + /* + * We have sent the message. Copy in the receive port. + */ + mr = ipc_mqueue_copyin(space, rcv_name, + &temp_rcv_mqueue, &temp_rcv_object); + if (mr != MACH_MSG_SUCCESS) { + return(mr); + } + rcv_mqueue = temp_rcv_mqueue; + rcv_object = temp_rcv_object; + /* hold ref for rcv_object */ + + slow_receive: + /* + * Now we have sent the request and copied in rcv_name, + * and hold ref for rcv_object (to keep mqueue alive). + * Just receive a reply and try to get back to fast path. + */ + + self->ith_continuation = (void (*)(mach_msg_return_t))0; + ipc_mqueue_receive(rcv_mqueue, + MACH_MSG_OPTION_NONE, + MACH_MSG_SIZE_MAX, + MACH_MSG_TIMEOUT_NONE, + THREAD_ABORTSAFE); + + mr = self->ith_state; + temp_kmsg = self->ith_kmsg; + temp_seqno = self->ith_seqno; + + ipc_object_release(rcv_object); + + if (mr != MACH_MSG_SUCCESS) { + return(mr); + } + + kmsg = temp_kmsg; + hdr = &kmsg->ikm_header; + send_size = hdr->msgh_size; + trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t) hdr + + round_msg(send_size)); + if (option & MACH_RCV_TRAILER_MASK) { + trailer->msgh_seqno = temp_seqno; + trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option); + } + dest_port = (ipc_port_t) hdr->msgh_remote_port; + HOT(c_mmot_cold_055++); + goto fast_copyout; + + slow_copyout: + /* + * Nothing locked and no references held, except + * we have kmsg with msgh_seqno filled in. Must + * still check against rcv_size and do + * ipc_kmsg_copyout/ipc_kmsg_put. + */ + + reply_size = send_size + trailer->msgh_trailer_size; + if (rcv_size < reply_size) { + if (msg_receive_error(kmsg, msg, option, temp_seqno, + space) == MACH_RCV_INVALID_DATA) { + mr = MACH_RCV_INVALID_DATA; + return(mr); + } + else { + mr = MACH_RCV_TOO_LARGE; + return(mr); + } + } + + mr = ipc_kmsg_copyout(kmsg, space, current_map(), + MACH_PORT_NULL, MACH_MSG_BODY_NULL); + if (mr != MACH_MSG_SUCCESS) { + if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { + if (ipc_kmsg_put(msg, kmsg, reply_size) == + MACH_RCV_INVALID_DATA) + mr = MACH_RCV_INVALID_DATA; + } + else { + if (msg_receive_error(kmsg, msg, option, + temp_seqno, space) == MACH_RCV_INVALID_DATA) + mr = MACH_RCV_INVALID_DATA; + } + + return(mr); + } + + /* try to get back on optimized path */ + HOT(c_mmot_getback_fast_put++); + goto fast_put; + + /*NOTREACHED*/ + } + } /* END OF HOT PATH */ +#endif /* ENABLE_HOTPATH */ + + if (option & MACH_SEND_MSG) { + mr = mach_msg_send(msg, option, send_size, + timeout, notify); + if (mr != MACH_MSG_SUCCESS) { + return mr; + } + } + + if (option & MACH_RCV_MSG) { + mach_msg_header_t *rcv; + + /* + * 1. MACH_RCV_OVERWRITE is on, and rcv_msg is our scatter list + * and receive buffer + * 2. MACH_RCV_OVERWRITE is off, and rcv_msg might be the + * alternate receive buffer (separate send and receive buffers). + */ + if (option & MACH_RCV_OVERWRITE) + rcv = rcv_msg; + else if (rcv_msg != MACH_MSG_NULL) + rcv = rcv_msg; + else + rcv = msg; + mr = mach_msg_receive(rcv, option, rcv_size, rcv_name, + timeout, thread_syscall_return, scatter_list_size); + thread_syscall_return(mr); + } + + return MACH_MSG_SUCCESS; +} + +/* + * Routine: msg_receive_error [internal] + * Purpose: + * Builds a minimal header/trailer and copies it to + * the user message buffer. Invoked when in the case of a + * MACH_RCV_TOO_LARGE or MACH_RCV_BODY_ERROR error. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS minimal header/trailer copied + * MACH_RCV_INVALID_DATA copyout to user buffer failed + */ + +mach_msg_return_t +msg_receive_error( + ipc_kmsg_t kmsg, + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_port_seqno_t seqno, + ipc_space_t space) +{ + mach_msg_format_0_trailer_t *trailer; + + /* + * Copy out the destination port in the message. + * Destroy all other rights and memory in the message. + */ + ipc_kmsg_copyout_dest(kmsg, space); + + /* + * Build a minimal message with the requested trailer. + */ + trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)&kmsg->ikm_header + + round_msg(sizeof(mach_msg_header_t))); + kmsg->ikm_header.msgh_size = sizeof(mach_msg_header_t); + bcopy( (char *)&trailer_template, + (char *)trailer, + sizeof(trailer_template)); + if (option & MACH_RCV_TRAILER_MASK) { + trailer->msgh_seqno = seqno; + trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option); + } + + /* + * Copy the message to user space + */ + if (ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size + + trailer->msgh_trailer_size) == MACH_RCV_INVALID_DATA) + return(MACH_RCV_INVALID_DATA); + else + return(MACH_MSG_SUCCESS); +} diff --git a/osfmk/ipc/mach_port.c b/osfmk/ipc/mach_port.c new file mode 100644 index 000000000..78afb8353 --- /dev/null +++ b/osfmk/ipc/mach_port.c @@ -0,0 +1,1790 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/mach_port.c + * Author: Rich Draves + * Date: 1989 + * + * Exported kernel calls. See mach/mach_port.defs. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Forward declarations + */ +void mach_port_names_helper( + ipc_port_timestamp_t timestamp, + ipc_entry_t entry, + mach_port_name_t name, + mach_port_name_t *names, + mach_port_type_t *types, + ipc_entry_num_t *actualp, + ipc_space_t space); + +void mach_port_gst_helper( + ipc_pset_t pset, + ipc_port_t port, + ipc_entry_num_t maxnames, + mach_port_name_t *names, + ipc_entry_num_t *actualp); + + +/* Zeroed template of qos flags */ + +static mach_port_qos_t qos_template; + +/* + * Routine: mach_port_names_helper + * Purpose: + * A helper function for mach_port_names. + */ + +void +mach_port_names_helper( + ipc_port_timestamp_t timestamp, + ipc_entry_t entry, + mach_port_name_t name, + mach_port_name_t *names, + mach_port_type_t *types, + ipc_entry_num_t *actualp, + ipc_space_t space) +{ + ipc_entry_bits_t bits; + ipc_port_request_index_t request; + mach_port_type_t type; + ipc_entry_num_t actual; + + bits = entry->ie_bits; + request = entry->ie_request; + if (bits & MACH_PORT_TYPE_SEND_RIGHTS) { + ipc_port_t port; + boolean_t died; + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + /* + * The timestamp serializes mach_port_names + * with ipc_port_destroy. If the port died, + * but after mach_port_names started, pretend + * that it isn't dead. + */ + + ip_lock(port); + died = (!ip_active(port) && + IP_TIMESTAMP_ORDER(port->ip_timestamp, timestamp)); + ip_unlock(port); + + if (died) { + /* pretend this is a dead-name entry */ + + bits &= ~(IE_BITS_TYPE_MASK); + bits |= MACH_PORT_TYPE_DEAD_NAME; + if (request != 0) + bits++; + request = 0; + } + } + + type = IE_BITS_TYPE(bits); + if (request != 0) + type |= MACH_PORT_TYPE_DNREQUEST; + + actual = *actualp; + names[actual] = name; + types[actual] = type; + *actualp = actual+1; +} + +/* + * Routine: mach_port_names [kernel call] + * Purpose: + * Retrieves a list of the rights present in the space, + * along with type information. (Same as returned + * by mach_port_type.) The names are returned in + * no particular order, but they (and the type info) + * are an accurate snapshot of the space. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Arrays of names and types returned. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +mach_port_names( + ipc_space_t space, + mach_port_name_t **namesp, + mach_msg_type_number_t *namesCnt, + mach_port_type_t **typesp, + mach_msg_type_number_t *typesCnt) +{ + ipc_entry_bits_t *capability; + ipc_tree_entry_t tentry; + ipc_entry_t table; + ipc_entry_num_t tsize; + mach_port_index_t index; + ipc_entry_num_t actual; /* this many names */ + ipc_port_timestamp_t timestamp; /* logical time of this operation */ + mach_port_name_t *names; + mach_port_type_t *types; + kern_return_t kr; + + vm_size_t size; /* size of allocated memory */ + vm_offset_t addr1; /* allocated memory, for names */ + vm_offset_t addr2; /* allocated memory, for types */ + vm_map_copy_t memory1; /* copied-in memory, for names */ + vm_map_copy_t memory2; /* copied-in memory, for types */ + + /* safe simplifying assumption */ + assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t)); + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + size = 0; + + for (;;) { + ipc_entry_num_t bound; + vm_size_t size_needed; + + is_read_lock(space); + if (!space->is_active) { + is_read_unlock(space); + if (size != 0) { + kmem_free(ipc_kernel_map, addr1, size); + kmem_free(ipc_kernel_map, addr2, size); + } + return KERN_INVALID_TASK; + } + + /* upper bound on number of names in the space */ + + bound = space->is_table_size + space->is_tree_total; + size_needed = round_page(bound * sizeof(mach_port_name_t)); + + if (size_needed <= size) + break; + + is_read_unlock(space); + + if (size != 0) { + kmem_free(ipc_kernel_map, addr1, size); + kmem_free(ipc_kernel_map, addr2, size); + } + size = size_needed; + + kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE); + if (kr != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + + kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE); + if (kr != KERN_SUCCESS) { + kmem_free(ipc_kernel_map, addr1, size); + return KERN_RESOURCE_SHORTAGE; + } + + /* can't fault while we hold locks */ + + kr = vm_map_wire(ipc_kernel_map, addr1, addr1 + size, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_wire(ipc_kernel_map, addr2, addr2 + size, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + assert(kr == KERN_SUCCESS); + } + /* space is read-locked and active */ + + names = (mach_port_name_t *) addr1; + types = (mach_port_type_t *) addr2; + actual = 0; + + timestamp = ipc_port_timestamp(); + + table = space->is_table; + tsize = space->is_table_size; + + for (index = 0; index < tsize; index++) { + ipc_entry_t entry = &table[index]; + ipc_entry_bits_t bits = entry->ie_bits; + + if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) { + mach_port_name_t name; + + name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits)); + mach_port_names_helper(timestamp, entry, name, names, + types, &actual, space); + } + } + + for (tentry = ipc_splay_traverse_start(&space->is_tree); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) { + ipc_entry_t entry = &tentry->ite_entry; + mach_port_name_t name = tentry->ite_name; + + assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE); + mach_port_names_helper(timestamp, entry, name, names, + types, &actual, space); + } + ipc_splay_traverse_finish(&space->is_tree); + is_read_unlock(space); + + if (actual == 0) { + memory1 = VM_MAP_COPY_NULL; + memory2 = VM_MAP_COPY_NULL; + + if (size != 0) { + kmem_free(ipc_kernel_map, addr1, size); + kmem_free(ipc_kernel_map, addr2, size); + } + } else { + vm_size_t size_used; + vm_size_t vm_size_used; + + size_used = actual * sizeof(mach_port_name_t); + vm_size_used = round_page(size_used); + + /* + * Make used memory pageable and get it into + * copied-in form. Free any unused memory. + */ + + kr = vm_map_unwire(ipc_kernel_map, + addr1, addr1 + vm_size_used, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_unwire(ipc_kernel_map, + addr2, addr2 + vm_size_used, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_copyin(ipc_kernel_map, addr1, size_used, + TRUE, &memory1); + assert(kr == KERN_SUCCESS); + + kr = vm_map_copyin(ipc_kernel_map, addr2, size_used, + TRUE, &memory2); + assert(kr == KERN_SUCCESS); + + if (vm_size_used != size) { + kmem_free(ipc_kernel_map, + addr1 + vm_size_used, size - vm_size_used); + kmem_free(ipc_kernel_map, + addr2 + vm_size_used, size - vm_size_used); + } + } + + *namesp = (mach_port_name_t *) memory1; + *namesCnt = actual; + *typesp = (mach_port_type_t *) memory2; + *typesCnt = actual; + return KERN_SUCCESS; +} + +/* + * Routine: mach_port_type [kernel call] + * Purpose: + * Retrieves the type of a right in the space. + * The type is a bitwise combination of one or more + * of the following type bits: + * MACH_PORT_TYPE_SEND + * MACH_PORT_TYPE_RECEIVE + * MACH_PORT_TYPE_SEND_ONCE + * MACH_PORT_TYPE_PORT_SET + * MACH_PORT_TYPE_DEAD_NAME + * In addition, the following pseudo-type bits may be present: + * MACH_PORT_TYPE_DNREQUEST + * A dead-name notification is requested. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Type is returned. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + */ + +kern_return_t +mach_port_type( + ipc_space_t space, + mach_port_name_t name, + mach_port_type_t *typep) +{ + mach_port_urefs_t urefs; + ipc_entry_t entry; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (name == MACH_PORT_NULL) + return KERN_INVALID_NAME; + + if (name == MACH_PORT_DEAD) { + *typep = MACH_PORT_TYPE_DEAD_NAME; + return KERN_SUCCESS; + } + + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked and active */ + + kr = ipc_right_info(space, name, entry, typep, &urefs); + if (kr == KERN_SUCCESS) + is_write_unlock(space); + /* space is unlocked */ + return kr; +} + +/* + * Routine: mach_port_rename [kernel call] + * Purpose: + * Changes the name denoting a right, + * from oname to nname. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The right is renamed. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The oname doesn't denote a right. + * KERN_INVALID_VALUE The nname isn't a legal name. + * KERN_NAME_EXISTS The nname already denotes a right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +mach_port_rename( + ipc_space_t space, + mach_port_name_t oname, + mach_port_name_t nname) +{ + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(oname)) + return KERN_INVALID_NAME; + + if (!MACH_PORT_VALID(nname)) + return KERN_INVALID_VALUE; + + return ipc_object_rename(space, oname, nname); +} + +/* + * Routine: mach_port_allocate_name [kernel call] + * Purpose: + * Allocates a right in a space, using a specific name + * for the new right. Possible rights: + * MACH_PORT_RIGHT_RECEIVE + * MACH_PORT_RIGHT_PORT_SET + * MACH_PORT_RIGHT_DEAD_NAME + * + * A new port (allocated with MACH_PORT_RIGHT_RECEIVE) + * has no extant send or send-once rights and no queued + * messages. Its queue limit is MACH_PORT_QLIMIT_DEFAULT + * and its make-send count is 0. It is not a member of + * a port set. It has no registered no-senders or + * port-destroyed notification requests. + * + * A new port set has no members. + * + * A new dead name has one user reference. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The right is allocated. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE The name isn't a legal name. + * KERN_INVALID_VALUE "right" isn't a legal kind of right. + * KERN_NAME_EXISTS The name already denotes a right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + * + * Restrictions on name allocation: NT bits are reserved by kernel, + * must be set on any chosen name. Can't do this at all in kernel + * loaded server. + */ + +kern_return_t +mach_port_allocate_name( + ipc_space_t space, + mach_port_right_t right, + mach_port_name_t name) +{ + kern_return_t kr; + mach_port_qos_t qos = qos_template; + + qos.name = TRUE; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_VALUE; + + kr = mach_port_allocate_full (space, right, SUBSYSTEM_NULL, + &qos, &name); + return (kr); +} + +/* + * Routine: mach_port_allocate [kernel call] + * Purpose: + * Allocates a right in a space. Like mach_port_allocate_name, + * except that the implementation picks a name for the right. + * The name may be any legal name in the space that doesn't + * currently denote a right. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The right is allocated. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE "right" isn't a legal kind of right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + * KERN_NO_SPACE No room in space for another right. + */ + +kern_return_t +mach_port_allocate( + ipc_space_t space, + mach_port_right_t right, + mach_port_name_t *namep) +{ + kern_return_t kr; + mach_port_qos_t qos = qos_template; + + kr = mach_port_allocate_full (space, right, SUBSYSTEM_NULL, + &qos, namep); + return (kr); +} + +/* + * Routine: mach_port_allocate_qos [kernel call] + * Purpose: + * Allocates a right, with qos options, in a space. Like + * mach_port_allocate_name, except that the implementation + * picks a name for the right. The name may be any legal name + * in the space that doesn't currently denote a right. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The right is allocated. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE "right" isn't a legal kind of right. + * KERN_INVALID_ARGUMENT The qos request was invalid. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + * KERN_NO_SPACE No room in space for another right. + */ + +kern_return_t +mach_port_allocate_qos( + ipc_space_t space, + mach_port_right_t right, + mach_port_qos_t *qosp, + mach_port_name_t *namep) +{ + kern_return_t kr; + + if (qosp->name == TRUE) + return KERN_INVALID_ARGUMENT; + kr = mach_port_allocate_full (space, right, SUBSYSTEM_NULL, + qosp, namep); + return (kr); +} + +/* + * Routine: mach_port_allocate_subsystem [kernel call] + * Purpose: + * Allocates a receive right in a space. Like + * mach_port_allocate, except that the caller specifies an + * RPC subsystem that is to be used to implement RPC's to the + * port. When possible, allocate rpc subsystem ports without + * nms, since within RPC ports are intended to be used for + * identity only (i.e. nms is painful in the distributed case + * and we don't need or want it for RPC anyway). + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The right is allocated. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + * KERN_NO_SPACE No room in space for another right. + * KERN_INVALID_ARGUMENT bogus subsystem + */ + +kern_return_t +mach_port_allocate_subsystem( + ipc_space_t space, + subsystem_t subsystem, + mach_port_name_t *namep) +{ + kern_return_t kr; + ipc_port_t port; + mach_port_qos_t qos = qos_template; + + kr = mach_port_allocate_full (space, + MACH_PORT_RIGHT_RECEIVE, + subsystem, &qos, namep); + return (kr); +} + +/* + * Routine: mach_port_allocate_full [kernel call] + * Purpose: + * Allocates a right in a space. Supports all of the + * special cases, such as specifying a subsystem, + * a specific name, a real-time port, etc. + * The name may be any legal name in the space that doesn't + * currently denote a right. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The right is allocated. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE "right" isn't a legal kind of right. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + * KERN_NO_SPACE No room in space for another right. + */ + +kern_return_t +mach_port_allocate_full( + ipc_space_t space, + mach_port_right_t right, + subsystem_t subsystem, + mach_port_qos_t *qosp, + mach_port_name_t *namep) +{ + ipc_kmsg_t kmsg; + kern_return_t kr; + + if (space == IS_NULL) + return (KERN_INVALID_TASK); + + if (qosp->name) { + if (!MACH_PORT_VALID (*namep)) + return (KERN_INVALID_VALUE); + if (is_fast_space (space)) + return (KERN_FAILURE); + } + + if (subsystem != SUBSYSTEM_NULL) { + if (right != MACH_PORT_RIGHT_RECEIVE) + return (KERN_INVALID_VALUE); + } + + if (qosp->prealloc) { + mach_msg_size_t size = qosp->len + MAX_TRAILER_SIZE; + if (right != MACH_PORT_RIGHT_RECEIVE) + return (KERN_INVALID_VALUE); + kmsg = (ipc_kmsg_t)kalloc(ikm_plus_overhead(size)); + if (kmsg == IKM_NULL) + return (KERN_RESOURCE_SHORTAGE); + ikm_init(kmsg, size); + } + + switch (right) { + case MACH_PORT_RIGHT_RECEIVE: + { + ipc_port_t port; + + if (qosp->name) + kr = ipc_port_alloc_name(space, *namep, &port); + else + kr = ipc_port_alloc(space, namep, &port); + if (kr == KERN_SUCCESS) { + if (qosp->prealloc) + ipc_kmsg_set_prealloc(kmsg, port); + + if (subsystem != SUBSYSTEM_NULL) { + port->ip_subsystem = &subsystem->user; + subsystem_reference (subsystem); + } + ip_unlock(port); + + } else if (qosp->prealloc) + ipc_kmsg_free(kmsg); + break; + } + + case MACH_PORT_RIGHT_PORT_SET: + { + ipc_pset_t pset; + + if (qosp->name) + kr = ipc_pset_alloc_name(space, *namep, &pset); + else + kr = ipc_pset_alloc(space, namep, &pset); + if (kr == KERN_SUCCESS) + ips_unlock(pset); + break; + } + + case MACH_PORT_RIGHT_DEAD_NAME: + kr = ipc_object_alloc_dead(space, namep); + break; + + default: + kr = KERN_INVALID_VALUE; + break; + } + + return (kr); +} + +/* + * Routine: mach_port_destroy [kernel call] + * Purpose: + * Cleans up and destroys all rights denoted by a name + * in a space. The destruction of a receive right + * destroys the port, unless a port-destroyed request + * has been made for it; the destruction of a port-set right + * destroys the port set. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The name is destroyed. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + */ + +kern_return_t +mach_port_destroy( + ipc_space_t space, + mach_port_name_t name) +{ + ipc_entry_t entry; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_SUCCESS; + + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked and active */ + + kr = ipc_right_destroy(space, name, entry); + is_write_unlock(space); + return kr; +} + +/* + * Routine: mach_port_deallocate [kernel call] + * Purpose: + * Deallocates a user reference from a send right, + * send-once right, or a dead-name right. May + * deallocate the right, if this is the last uref, + * and destroy the name, if it doesn't denote + * other rights. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS The uref is deallocated. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT The right isn't correct. + */ + +kern_return_t +mach_port_deallocate( + ipc_space_t space, + mach_port_name_t name) +{ + ipc_entry_t entry; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_SUCCESS; + + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked */ + + kr = ipc_right_dealloc(space, name, entry); /* unlocks space */ + return kr; +} + +/* + * Routine: mach_port_get_refs [kernel call] + * Purpose: + * Retrieves the number of user references held by a right. + * Receive rights, port-set rights, and send-once rights + * always have one user reference. Returns zero if the + * name denotes a right, but not the queried right. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Number of urefs returned. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE "right" isn't a legal value. + * KERN_INVALID_NAME The name doesn't denote a right. + */ + +kern_return_t +mach_port_get_refs( + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + mach_port_urefs_t *urefsp) +{ + mach_port_type_t type; + mach_port_urefs_t urefs; + ipc_entry_t entry; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (right >= MACH_PORT_RIGHT_NUMBER) + return KERN_INVALID_VALUE; + + if (!MACH_PORT_VALID(name)) { + if (right == MACH_PORT_RIGHT_SEND || + right == MACH_PORT_RIGHT_SEND_ONCE) { + *urefsp = 1; + return KERN_SUCCESS; + } + return KERN_INVALID_NAME; + } + + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked and active */ + + kr = ipc_right_info(space, name, entry, &type, &urefs); /* unlocks */ + if (kr != KERN_SUCCESS) + return kr; /* space is unlocked */ + is_write_unlock(space); + + if (type & MACH_PORT_TYPE(right)) + switch (right) { + case MACH_PORT_RIGHT_SEND_ONCE: + assert(urefs == 1); + /* fall-through */ + + case MACH_PORT_RIGHT_PORT_SET: + case MACH_PORT_RIGHT_RECEIVE: + *urefsp = 1; + break; + + case MACH_PORT_RIGHT_DEAD_NAME: + case MACH_PORT_RIGHT_SEND: + assert(urefs > 0); + *urefsp = urefs; + break; + + default: + panic("mach_port_get_refs: strange rights"); + } + else + *urefsp = 0; + + return kr; +} + +/* + * Routine: mach_port_mod_refs + * Purpose: + * Modifies the number of user references held by a right. + * The resulting number of user references must be non-negative. + * If it is zero, the right is deallocated. If the name + * doesn't denote other rights, it is destroyed. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Modified number of urefs. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE "right" isn't a legal value. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote specified right. + * KERN_INVALID_VALUE Impossible modification to urefs. + * KERN_UREFS_OVERFLOW Urefs would overflow. + */ + +kern_return_t +mach_port_mod_refs( + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + mach_port_delta_t delta) +{ + ipc_entry_t entry; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (right >= MACH_PORT_RIGHT_NUMBER) + return KERN_INVALID_VALUE; + + if (!MACH_PORT_VALID(name)) { + if (right == MACH_PORT_RIGHT_SEND || + right == MACH_PORT_RIGHT_SEND_ONCE) + return KERN_SUCCESS; + return KERN_INVALID_NAME; + } + + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is write-locked and active */ + + kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */ + return kr; +} + + +/* + * Routine: mach_port_set_mscount [kernel call] + * Purpose: + * Changes a receive right's make-send count. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Set make-send count. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote receive rights. + */ + +kern_return_t +mach_port_set_mscount( + ipc_space_t space, + mach_port_name_t name, + mach_port_mscount_t mscount) +{ + ipc_port_t port; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + ipc_port_set_mscount(port, mscount); + + ip_unlock(port); + return KERN_SUCCESS; +} + +/* + * Routine: mach_port_set_seqno [kernel call] + * Purpose: + * Changes a receive right's sequence number. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Set sequence number. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote receive rights. + */ + +kern_return_t +mach_port_set_seqno( + ipc_space_t space, + mach_port_name_t name, + mach_port_seqno_t seqno) +{ + ipc_port_t port; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + ipc_mqueue_set_seqno(&port->ip_messages, seqno); + + ip_unlock(port); + return KERN_SUCCESS; +} + +/* + * Routine: mach_port_gst_helper + * Purpose: + * A helper function for mach_port_get_set_status. + */ + +void +mach_port_gst_helper( + ipc_pset_t pset, + ipc_port_t port, + ipc_entry_num_t maxnames, + mach_port_name_t *names, + ipc_entry_num_t *actualp) +{ + ipc_pset_t ip_pset; + mach_port_name_t name; + + assert(port != IP_NULL); + + ip_lock(port); + assert(ip_active(port)); + + name = port->ip_receiver_name; + assert(name != MACH_PORT_NULL); + + ip_unlock(port); + + if (ipc_pset_member(pset, port)) { + ipc_entry_num_t actual = *actualp; + + if (actual < maxnames) + names[actual] = name; + + *actualp = actual+1; + } +} + +/* + * Routine: mach_port_get_set_status [kernel call] + * Purpose: + * Retrieves a list of members in a port set. + * Returns the space's name for each receive right member. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Retrieved list of members. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME The name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote a port set. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +mach_port_get_set_status( + ipc_space_t space, + mach_port_name_t name, + mach_port_name_t **members, + mach_msg_type_number_t *membersCnt) +{ + ipc_entry_num_t actual; /* this many members */ + ipc_entry_num_t maxnames; /* space for this many members */ + kern_return_t kr; + + vm_size_t size; /* size of allocated memory */ + vm_offset_t addr; /* allocated memory */ + vm_map_copy_t memory; /* copied-in memory */ + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + size = PAGE_SIZE; /* initial guess */ + + for (;;) { + ipc_tree_entry_t tentry; + ipc_entry_t entry, table; + ipc_entry_num_t tsize; + mach_port_index_t index; + mach_port_name_t *names; + ipc_pset_t pset; + + kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); + if (kr != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + + /* can't fault while we hold locks */ + + kr = vm_map_wire(ipc_kernel_map, addr, addr + size, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + assert(kr == KERN_SUCCESS); + + kr = ipc_right_lookup_read(space, name, &entry); + if (kr != KERN_SUCCESS) { + kmem_free(ipc_kernel_map, addr, size); + return kr; + } + /* space is read-locked and active */ + + if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) { + is_read_unlock(space); + kmem_free(ipc_kernel_map, addr, size); + return KERN_INVALID_RIGHT; + } + + pset = (ipc_pset_t) entry->ie_object; + assert(pset != IPS_NULL); + /* the port set must be active */ + + names = (mach_port_name_t *) addr; + maxnames = size / sizeof(mach_port_name_t); + actual = 0; + + table = space->is_table; + tsize = space->is_table_size; + + for (index = 0; index < tsize; index++) { + ipc_entry_t ientry = &table[index]; + + if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) { + ipc_port_t port = + (ipc_port_t) ientry->ie_object; + + mach_port_gst_helper(pset, port, + maxnames, names, &actual); + } + } + + for (tentry = ipc_splay_traverse_start(&space->is_tree); + tentry != ITE_NULL; + tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) { + ipc_entry_bits_t bits = tentry->ite_bits; + + assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE); + + if (bits & MACH_PORT_TYPE_RECEIVE) { + ipc_port_t port = (ipc_port_t) tentry->ite_object; + + mach_port_gst_helper(pset, port, maxnames, + names, &actual); + } + } + ipc_splay_traverse_finish(&space->is_tree); + is_read_unlock(space); + + if (actual <= maxnames) + break; + + /* didn't have enough memory; allocate more */ + + kmem_free(ipc_kernel_map, addr, size); + size = round_page(actual * sizeof(mach_port_name_t)) + PAGE_SIZE; + } + + if (actual == 0) { + memory = VM_MAP_COPY_NULL; + + kmem_free(ipc_kernel_map, addr, size); + } else { + vm_size_t size_used; + vm_size_t vm_size_used; + + size_used = actual * sizeof(mach_port_name_t); + vm_size_used = round_page(size_used); + + /* + * Make used memory pageable and get it into + * copied-in form. Free any unused memory. + */ + + kr = vm_map_unwire(ipc_kernel_map, + addr, addr + vm_size_used, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_copyin(ipc_kernel_map, addr, size_used, + TRUE, &memory); + assert(kr == KERN_SUCCESS); + + if (vm_size_used != size) + kmem_free(ipc_kernel_map, + addr + vm_size_used, size - vm_size_used); + } + + *members = (mach_port_name_t *) memory; + *membersCnt = actual; + return KERN_SUCCESS; +} + +/* + * Routine: mach_port_move_member [kernel call] + * Purpose: + * If after is MACH_PORT_NULL, removes member + * from the port set it is in. Otherwise, adds + * member to after, removing it from any set + * it might already be in. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Moved the port. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME Member didn't denote a right. + * KERN_INVALID_RIGHT Member didn't denote a receive right. + * KERN_INVALID_NAME After didn't denote a right. + * KERN_INVALID_RIGHT After didn't denote a port set right. + * KERN_NOT_IN_SET + * After is MACH_PORT_NULL and Member isn't in a port set. + */ + +kern_return_t +mach_port_move_member( + ipc_space_t space, + mach_port_name_t member, + mach_port_name_t after) +{ + ipc_entry_t entry; + ipc_port_t port; + ipc_pset_t nset; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(member)) + return KERN_INVALID_RIGHT; + + if (after == MACH_PORT_DEAD) + return KERN_INVALID_RIGHT; + + kr = ipc_right_lookup_read(space, member, &entry); + if (kr != KERN_SUCCESS) + return kr; + /* space is read-locked and active */ + + if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) { + is_read_unlock(space); + return KERN_INVALID_RIGHT; + } + + port = (ipc_port_t) entry->ie_object; + assert(port != IP_NULL); + + if (after == MACH_PORT_NULL) + nset = IPS_NULL; + else { + entry = ipc_entry_lookup(space, after); + if (entry == IE_NULL) { + is_read_unlock(space); + return KERN_INVALID_NAME; + } + + if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) { + is_read_unlock(space); + return KERN_INVALID_RIGHT; + } + + nset = (ipc_pset_t) entry->ie_object; + assert(nset != IPS_NULL); + } + ip_lock(port); + ipc_pset_remove_all(port); + + if (nset != IPS_NULL) { + ips_lock(nset); + kr = ipc_pset_add(nset, port); + ips_unlock(nset); + } + ip_unlock(port); + is_read_unlock(space); + return kr; +} + +/* + * Routine: mach_port_request_notification [kernel call] + * Purpose: + * Requests a notification. The caller supplies + * a send-once right for the notification to use, + * and the call returns the previously registered + * send-once right, if any. Possible types: + * + * MACH_NOTIFY_PORT_DESTROYED + * Requests a port-destroyed notification + * for a receive right. Sync should be zero. + * MACH_NOTIFY_NO_SENDERS + * Requests a no-senders notification for a + * receive right. If there are currently no + * senders, sync is less than or equal to the + * current make-send count, and a send-once right + * is supplied, then an immediate no-senders + * notification is generated. + * MACH_NOTIFY_DEAD_NAME + * Requests a dead-name notification for a send + * or receive right. If the name is already a + * dead name, sync is non-zero, and a send-once + * right is supplied, then an immediate dead-name + * notification is generated. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Requested a notification. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE Bad id value. + * KERN_INVALID_NAME Name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote appropriate right. + * KERN_INVALID_CAPABILITY The notify port is dead. + * MACH_NOTIFY_PORT_DESTROYED: + * KERN_INVALID_VALUE Sync isn't zero. + * MACH_NOTIFY_DEAD_NAME: + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + * KERN_INVALID_ARGUMENT Name denotes dead name, but + * sync is zero or notify is IP_NULL. + * KERN_UREFS_OVERFLOW Name denotes dead name, but + * generating immediate notif. would overflow urefs. + */ + +kern_return_t +mach_port_request_notification( + ipc_space_t space, + mach_port_name_t name, + mach_msg_id_t id, + mach_port_mscount_t sync, + ipc_port_t notify, + ipc_port_t *previousp) +{ + kern_return_t kr; + ipc_entry_t entry; + ipc_port_t port; + + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (notify == IP_DEAD) + return KERN_INVALID_CAPABILITY; + +#if NOTYET + /* + * Requesting notifications on RPC ports is an error. + */ + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) + return kr; + + port = (ipc_port_t) entry->ie_object; + + if (port->ip_subsystem != NULL) { + is_write_unlock(space); + panic("mach_port_request_notification: on RPC port!!"); + return KERN_INVALID_CAPABILITY; + } + is_write_unlock(space); +#endif /* NOTYET */ + + + switch (id) { + case MACH_NOTIFY_PORT_DESTROYED: { + ipc_port_t port, previous; + + if (sync != 0) + return KERN_INVALID_VALUE; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + ipc_port_pdrequest(port, notify, &previous); + /* port is unlocked */ + + *previousp = previous; + break; + } + + case MACH_NOTIFY_NO_SENDERS: { + ipc_port_t port; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + ipc_port_nsrequest(port, sync, notify, previousp); + /* port is unlocked */ + break; + } + + case MACH_NOTIFY_DEAD_NAME: + + if (!MACH_PORT_VALID(name)) { + /* + * Already dead. + * Should do immediate delivery check - + * will do that in the near future. + */ + return KERN_INVALID_ARGUMENT; + } + + kr = ipc_right_dnrequest(space, name, sync != 0, + notify, previousp); + if (kr != KERN_SUCCESS) + return kr; + break; + + default: + return KERN_INVALID_VALUE; + } + + return KERN_SUCCESS; +} + +/* + * Routine: mach_port_insert_right [kernel call] + * Purpose: + * Inserts a right into a space, as if the space + * voluntarily received the right in a message, + * except that the right gets the specified name. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Inserted the right. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE The name isn't a legal name. + * KERN_NAME_EXISTS The name already denotes a right. + * KERN_INVALID_VALUE Message doesn't carry a port right. + * KERN_INVALID_CAPABILITY Port is null or dead. + * KERN_UREFS_OVERFLOW Urefs limit would be exceeded. + * KERN_RIGHT_EXISTS Space has rights under another name. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +mach_port_insert_right( + ipc_space_t space, + mach_port_name_t name, + ipc_port_t poly, + mach_msg_type_name_t polyPoly) +{ + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name) || + !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly)) + return KERN_INVALID_VALUE; + + if (!IO_VALID((ipc_object_t) poly)) + return KERN_INVALID_CAPABILITY; + + return ipc_object_copyout_name(space, (ipc_object_t) poly, + polyPoly, FALSE, name); +} + +/* + * Routine: mach_port_extract_right [kernel call] + * Purpose: + * Extracts a right from a space, as if the space + * voluntarily sent the right to the caller. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Extracted the right. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_VALUE Requested type isn't a port right. + * KERN_INVALID_NAME Name doesn't denote a right. + * KERN_INVALID_RIGHT Name doesn't denote appropriate right. + */ + +kern_return_t +mach_port_extract_right( + ipc_space_t space, + mach_port_name_t name, + mach_msg_type_name_t msgt_name, + ipc_port_t *poly, + mach_msg_type_name_t *polyPoly) +{ + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_MSG_TYPE_PORT_ANY(msgt_name)) + return KERN_INVALID_VALUE; + + if (!MACH_PORT_VALID(name)) { + /* + * really should copy out a dead name, if it is a send or + * send-once right being copied, but instead return an + * error for now. + */ + return KERN_INVALID_RIGHT; + } + + kr = ipc_object_copyin(space, name, msgt_name, (ipc_object_t *) poly); + + if (kr == KERN_SUCCESS) + *polyPoly = ipc_object_copyin_type(msgt_name); + return kr; +} + + +kern_return_t +mach_port_get_attributes( + ipc_space_t space, + mach_port_name_t name, + int flavor, + mach_port_info_t info, + mach_msg_type_number_t *count) +{ + ipc_port_t port; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + switch (flavor) { + case MACH_PORT_LIMITS_INFO: { + mach_port_limits_t *lp = (mach_port_limits_t *)info; + + if (*count < MACH_PORT_LIMITS_INFO_COUNT) + return KERN_FAILURE; + + if (!MACH_PORT_VALID(name)) { + *count = 0; + break; + } + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + lp->mpl_qlimit = port->ip_messages.imq_qlimit; + *count = MACH_PORT_LIMITS_INFO_COUNT; + ip_unlock(port); + break; + } + + case MACH_PORT_RECEIVE_STATUS: { + mach_port_status_t *statusp = (mach_port_status_t *)info; + spl_t s; + + if (*count < MACH_PORT_RECEIVE_STATUS_COUNT) + return KERN_FAILURE; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + statusp->mps_pset = port->ip_pset_count; + + s = splsched(); + imq_lock(&port->ip_messages); + statusp->mps_seqno = port->ip_messages.imq_seqno; + statusp->mps_qlimit = port->ip_messages.imq_qlimit; + statusp->mps_msgcount = port->ip_messages.imq_msgcount; + imq_unlock(&port->ip_messages); + splx(s); + + statusp->mps_mscount = port->ip_mscount; + statusp->mps_sorights = port->ip_sorights; + statusp->mps_srights = port->ip_srights > 0; + statusp->mps_pdrequest = port->ip_pdrequest != IP_NULL; + statusp->mps_nsrequest = port->ip_nsrequest != IP_NULL; + statusp->mps_flags = 0; + + *count = MACH_PORT_RECEIVE_STATUS_COUNT; + ip_unlock(port); + break; + } + + case MACH_PORT_DNREQUESTS_SIZE: { + ipc_port_request_t table; + + if (*count < MACH_PORT_DNREQUESTS_SIZE_COUNT) + return KERN_FAILURE; + + if (!MACH_PORT_VALID(name)) { + *(int *)info = 0; + break; + } + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + table = port->ip_dnrequests; + if (table == IPR_NULL) + *(int *)info = 0; + else + *(int *)info = table->ipr_size->its_size; + *count = MACH_PORT_DNREQUESTS_SIZE_COUNT; + ip_unlock(port); + break; + } + + default: + return KERN_INVALID_ARGUMENT; + /*NOTREACHED*/ + } + + return KERN_SUCCESS; +} + +kern_return_t +mach_port_set_attributes( + ipc_space_t space, + mach_port_name_t name, + int flavor, + mach_port_info_t info, + mach_msg_type_number_t count) +{ + ipc_port_t port; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + switch (flavor) { + + case MACH_PORT_LIMITS_INFO: { + mach_port_limits_t *mplp = (mach_port_limits_t *)info; + + if (count < MACH_PORT_LIMITS_INFO_COUNT) + return KERN_FAILURE; + + if (mplp->mpl_qlimit > MACH_PORT_QLIMIT_MAX) + return KERN_INVALID_VALUE; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + ipc_mqueue_set_qlimit(&port->ip_messages, mplp->mpl_qlimit); + ip_unlock(port); + break; + } + case MACH_PORT_DNREQUESTS_SIZE: { + if (count < MACH_PORT_DNREQUESTS_SIZE_COUNT) + return KERN_FAILURE; + + if (!MACH_PORT_VALID(name)) + return KERN_INVALID_RIGHT; + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) + return kr; + /* port is locked and active */ + + kr = ipc_port_dngrow(port, *(int *)info); + if (kr != KERN_SUCCESS) + return kr; + break; + } + default: + return KERN_INVALID_ARGUMENT; + /*NOTREACHED*/ + } + return KERN_SUCCESS; +} + +/* + * Routine: mach_port_insert_member [kernel call] + * Purpose: + * Add the receive right, specified by name, to + * a portset. + * The port cannot already be a member of the set. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Moved the port. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME name didn't denote a right. + * KERN_INVALID_RIGHT name didn't denote a receive right. + * KERN_INVALID_NAME pset_name didn't denote a right. + * KERN_INVALID_RIGHT pset_name didn't denote a portset right. + * KERN_ALREADY_IN_SET name was already a member of pset. + */ + +kern_return_t +mach_port_insert_member( + ipc_space_t space, + mach_port_name_t name, + mach_port_name_t psname) +{ + ipc_object_t obj; + ipc_object_t psobj; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) + return KERN_INVALID_RIGHT; + + kr = ipc_object_translate_two(space, + name, MACH_PORT_RIGHT_RECEIVE, &obj, + psname, MACH_PORT_RIGHT_PORT_SET, &psobj); + if (kr != KERN_SUCCESS) + return kr; + + /* obj and psobj are locked (and were locked in that order) */ + assert(psobj != IO_NULL); + assert(obj != IO_NULL); + + kr = ipc_pset_add((ipc_pset_t)psobj, (ipc_port_t)obj); + io_unlock(psobj); + io_unlock(obj); + return kr; +} + +/* + * Routine: mach_port_extract_member [kernel call] + * Purpose: + * Remove a port from one portset that it is a member of. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Moved the port. + * KERN_INVALID_TASK The space is null. + * KERN_INVALID_TASK The space is dead. + * KERN_INVALID_NAME Member didn't denote a right. + * KERN_INVALID_RIGHT Member didn't denote a receive right. + * KERN_INVALID_NAME After didn't denote a right. + * KERN_INVALID_RIGHT After didn't denote a port set right. + * KERN_NOT_IN_SET + * After is MACH_PORT_NULL and Member isn't in a port set. + */ + +kern_return_t +mach_port_extract_member( + ipc_space_t space, + mach_port_name_t name, + mach_port_name_t psname) +{ + mach_port_name_t oldname; + ipc_object_t psobj; + ipc_object_t obj; + kern_return_t kr; + + if (space == IS_NULL) + return KERN_INVALID_TASK; + + if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) + return KERN_INVALID_RIGHT; + + kr = ipc_object_translate_two(space, + name, MACH_PORT_RIGHT_RECEIVE, &obj, + psname, MACH_PORT_RIGHT_PORT_SET, &psobj); + if (kr != KERN_SUCCESS) + return kr; + + /* obj and psobj are both locked (and were locked in that order) */ + assert(psobj != IO_NULL); + assert(obj != IO_NULL); + + kr = ipc_pset_remove((ipc_pset_t)psobj, (ipc_port_t)obj); + io_unlock(psobj); + io_unlock(obj); + return kr; +} + diff --git a/osfmk/ipc/mig_log.c b/osfmk/ipc/mig_log.c new file mode 100644 index 000000000..9d9e85e55 --- /dev/null +++ b/osfmk/ipc/mig_log.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:16 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:14:23 ezf + * change marker to not FREE + * [1994/09/22 21:31:33 ezf] + * + * Revision 1.2.2.4 1993/08/03 18:29:18 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 16:11:07 gm] + * + * Revision 1.2.2.3 1993/07/22 16:18:15 rod + * Add ANSI prototypes. CR #9523. + * [1993/07/22 13:34:22 rod] + * + * Revision 1.2.2.2 1993/06/09 02:33:38 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:11:41 jeffc] + * + * Revision 1.2 1993/04/19 16:23:26 devrcs + * Untyped ipc merge: + * Support for logging and tracing within the MIG stubs + * [1993/02/24 14:49:29 travos] + * + * $EndLog$ + */ + +#ifdef MACH_KERNEL +#include +#endif + +#include +#include + +int mig_tracing, mig_errors, mig_full_tracing; + +/* + * Tracing facilities for MIG generated stubs. + * + * At the moment, there is only a printf, which is + * activated through the runtime switch: + * mig_tracing to call MigEventTracer + * mig_errors to call MigEventErrors + * For this to work, MIG has to run with the -L option, + * and the mig_debug flags has to be selected + * + * In the future, it will be possible to collect infos + * on the use of MACH IPC with an application similar + * to netstat. + * + * A new option will be generated accordingly to the + * kernel configuration rules, e.g + * #include + */ + +void +MigEventTracer( + mig_who_t who, + mig_which_event_t what, + mach_msg_id_t msgh_id, + unsigned int size, + unsigned int kpd, + unsigned int retcode, + unsigned int ports, + unsigned int oolports, + unsigned int ool, + char *file, + unsigned int line) +{ + printf("%d|%d|%d", who, what, msgh_id); + if (mig_full_tracing) + printf(" -- sz%d|kpd%d|ret(0x%x)|p%d|o%d|op%d|%s, %d", + size, kpd, retcode, ports, oolports, ool, file, line); + printf("\n"); +} + +void +MigEventErrors( + mig_who_t who, + mig_which_error_t what, + void *par, + char *file, + unsigned int line) +{ + if (what == MACH_MSG_ERROR_UNKNOWN_ID) + printf("%d|%d|%d -- %s %d\n", who, what, *(int *)par, file, line); + else + printf("%d|%d|%s -- %s %d\n", who, what, (char *)par, file, line); +} diff --git a/osfmk/ipc/port.h b/osfmk/ipc/port.h new file mode 100644 index 000000000..52ffaaf68 --- /dev/null +++ b/osfmk/ipc/port.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: ipc/ipc_port.h + * Author: Rich Draves + * Date: 1989 + * + * Implementation specific complement to mach/port.h. + */ + +#ifndef _IPC_PORT_H_ +#define _IPC_PORT_H_ + +#include + +#define MACH_PORT_NGEN(name) MACH_PORT_MAKE(0, MACH_PORT_GEN(name)) + +/* + * Typedefs for code cleanliness. These must all have + * the same (unsigned) type as mach_port_name_t. + */ + +typedef mach_port_name_t mach_port_index_t; /* index values */ +typedef mach_port_name_t mach_port_gen_t; /* generation numbers */ + + +#define MACH_PORT_UREFS_MAX ((mach_port_urefs_t) ((1 << 16) - 1)) + +#define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \ + (((delta) > 0) && \ + ((((urefs) + (delta)) <= (urefs)) || \ + (((urefs) + (delta)) > MACH_PORT_UREFS_MAX))) + +#define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \ + (((delta) < 0) && (-(delta) > (urefs))) + +#endif /* _IPC_PORT_H_ */ diff --git a/osfmk/kdp/kdp.c b/osfmk/kdp/kdp.c new file mode 100644 index 000000000..a6e8abc71 --- /dev/null +++ b/osfmk/kdp/kdp.c @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * + * kdp.c -- Kernel Debugging Protocol. + * + */ + +#include +#include + +#include +#include + +int kdp_vm_read( caddr_t, caddr_t, unsigned int); +int kdp_vm_write( caddr_t, caddr_t, unsigned int); + +#define DO_ALIGN 1 /* align all packet data accesses */ + +#define KDP_TEST_HARNESS 0 +#if KDP_TEST_HARNESS +#define dprintf(x) kprintf x +#else +#define dprintf(x) +#endif + +static kdp_dispatch_t + dispatch_table[KDP_TERMINATION - KDP_CONNECT + 1] = + { +/* 0 */ kdp_connect, +/* 1 */ kdp_disconnect, +/* 2 */ kdp_hostinfo, +/* 3 */ kdp_regions, +/* 4 */ kdp_maxbytes, +/* 5 */ kdp_readmem, +/* 6 */ kdp_writemem, +/* 7 */ kdp_readregs, +/* 8 */ kdp_writeregs, +/* 9 */ kdp_unknown, +/* A */ kdp_unknown, +/* B */ kdp_suspend, +/* C */ kdp_resumecpus, +/* D */ kdp_unknown, +/* E */ kdp_unknown, + }; + +kdp_glob_t kdp; +int kdp_flag=0; + +boolean_t +kdp_packet( + unsigned char *pkt, + int *len, + unsigned short *reply_port +) +{ + static unsigned aligned_pkt[1538/sizeof(unsigned)+1]; // max ether pkt + kdp_pkt_t *rd = (kdp_pkt_t *)&aligned_pkt; + int plen = *len; + unsigned int req; + boolean_t ret; + +#if DO_ALIGN + bcopy((char *)pkt, (char *)rd, sizeof(aligned_pkt)); +#else + rd = (kdp_pkt_t *)pkt; +#endif + if (plen < sizeof (rd->hdr) || rd->hdr.len != plen) { + printf("kdp_packet bad len pkt %d hdr %d\n", plen, rd->hdr.len); + + return (FALSE); + } + + if (rd->hdr.is_reply) { + printf("kdp_packet reply recvd req %x seq %x\n", + rd->hdr.request, rd->hdr.seq); + + return (FALSE); + } + + req = rd->hdr.request; + if (req < KDP_CONNECT || req > KDP_TERMINATION) { + printf("kdp_packet bad request %x len %d seq %x key %x\n", + rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key); + + return (FALSE); + } + + ret = ((*dispatch_table[req - KDP_CONNECT])(rd, len, reply_port)); +#if DO_ALIGN + bcopy((char *)rd, (char *) pkt, *len); +#endif + return ret; +} + +static boolean_t +kdp_unknown( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_pkt_t *rd = (kdp_pkt_t *)pkt; + + printf("kdp_unknown request %x len %d seq %x key %x\n", + rd->hdr.request, rd->hdr.len, rd->hdr.seq, rd->hdr.key); + + return (FALSE); +} + +static boolean_t +kdp_connect( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_connect_req_t *rq = &pkt->connect_req; + int plen = *len; + kdp_connect_reply_t *rp = &pkt->connect_reply; + + if (plen < sizeof (*rq)) + return (FALSE); + + dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting)); + + if (kdp.is_conn) { + if (rq->hdr.seq == kdp.conn_seq) /* duplicate request */ + rp->error = KDPERR_NO_ERROR; + else + rp->error = KDPERR_ALREADY_CONNECTED; + } + else { + kdp.reply_port = rq->req_reply_port; + kdp.exception_port = rq->exc_note_port; + kdp.is_conn = TRUE; + kdp.conn_seq = rq->hdr.seq; + + rp->error = KDPERR_NO_ERROR; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + if (current_debugger == KDP_CUR_DB) + active_debugger=1; + + return (TRUE); +} + +static boolean_t +kdp_disconnect( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_disconnect_req_t *rq = &pkt->disconnect_req; + int plen = *len; + kdp_disconnect_reply_t *rp = &pkt->disconnect_reply; + + if (plen < sizeof (*rq)) + return (FALSE); + + if (!kdp.is_conn) + return (FALSE); + + dprintf(("kdp_disconnect\n")); + + *reply_port = kdp.reply_port; + + kdp.reply_port = kdp.exception_port = 0; + kdp.is_halted = kdp.is_conn = FALSE; + kdp.exception_seq = kdp.conn_seq = 0; + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + *len = rp->hdr.len; + + if (current_debugger == KDP_CUR_DB) + active_debugger=0; + + return (TRUE); +} + +static boolean_t +kdp_hostinfo( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_hostinfo_req_t *rq = &pkt->hostinfo_req; + int plen = *len; + kdp_hostinfo_reply_t *rp = &pkt->hostinfo_reply; + + if (plen < sizeof (*rq)) + return (FALSE); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + kdp_machine_hostinfo(&rp->hostinfo); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_suspend( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_suspend_req_t *rq = &pkt->suspend_req; + int plen = *len; + kdp_suspend_reply_t *rp = &pkt->suspend_reply; + + if (plen < sizeof (*rq)) + return (FALSE); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + dprintf(("kdp_suspend\n")); + + kdp.is_halted = TRUE; + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_resumecpus( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_resumecpus_req_t *rq = &pkt->resumecpus_req; + int plen = *len; + kdp_resumecpus_reply_t *rp = &pkt->resumecpus_reply; + + if (plen < sizeof (*rq)) + return (FALSE); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + dprintf(("kdp_resumecpus %x\n", rq->cpu_mask)); + + kdp.is_halted = FALSE; + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_writemem( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_writemem_req_t *rq = &pkt->writemem_req; + int plen = *len; + kdp_writemem_reply_t *rp = &pkt->writemem_reply; + int cnt; + + if (plen < sizeof (*rq)) + return (FALSE); + + if (rq->nbytes > MAX_KDP_DATA_SIZE) + rp->error = KDPERR_BAD_NBYTES; + else { + dprintf(("kdp_writemem addr %x size %d\n", rq->address, rq->nbytes)); + + cnt = kdp_vm_write((caddr_t)rq->data, (caddr_t)rq->address, rq->nbytes); + rp->error = KDPERR_NO_ERROR; + } + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_readmem( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_readmem_req_t *rq = &pkt->readmem_req; + int plen = *len; + kdp_readmem_reply_t *rp = &pkt->readmem_reply; + int cnt; + + if (plen < sizeof (*rq)) + return (FALSE); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + if (rq->nbytes > MAX_KDP_DATA_SIZE) + rp->error = KDPERR_BAD_NBYTES; + else { + unsigned int n = rq->nbytes; + + dprintf(("kdp_readmem addr %x size %d\n", rq->address, rq->nbytes)); + + cnt = kdp_vm_read((caddr_t)rq->address, (caddr_t)rp->data, rq->nbytes); + rp->error = KDPERR_NO_ERROR; + + rp->hdr.len += cnt; + } + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_maxbytes( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_maxbytes_req_t *rq = &pkt->maxbytes_req; + int plen = *len; + kdp_maxbytes_reply_t *rp = &pkt->maxbytes_reply; + + if (plen < sizeof (*rq)) + return (FALSE); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + dprintf(("kdp_maxbytes\n")); + + rp->max_bytes = MAX_KDP_DATA_SIZE; + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_regions( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_regions_req_t *rq = &pkt->regions_req; + int plen = *len; + kdp_regions_reply_t *rp = &pkt->regions_reply; + kdp_region_t *r; + + if (plen < sizeof (*rq)) + return (FALSE); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + dprintf(("kdp_regions\n")); + + r = rp->regions; + rp->nregions = 0; + + (vm_offset_t)r->address = 0; + r->nbytes = 0xffffffff; + + r->protection = VM_PROT_ALL; r++; rp->nregions++; + + rp->hdr.len += rp->nregions * sizeof (kdp_region_t); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_writeregs( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_writeregs_req_t *rq = &pkt->writeregs_req; + int plen = *len; + int size; + kdp_writeregs_reply_t *rp = &pkt->writeregs_reply; + + if (plen < sizeof (*rq)) + return (FALSE); + + size = rq->hdr.len - sizeof(kdp_hdr_t) - sizeof(unsigned int); + rp->error = kdp_machine_write_regs(rq->cpu, rq->flavor, rq->data, &size); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} + +static boolean_t +kdp_readregs( + kdp_pkt_t *pkt, + int *len, + unsigned short *reply_port +) +{ + kdp_readregs_req_t *rq = &pkt->readregs_req; + int plen = *len; + kdp_readregs_reply_t *rp = &pkt->readregs_reply; + int size; + + if (plen < sizeof (*rq)) + return (FALSE); + + rp->hdr.is_reply = 1; + rp->hdr.len = sizeof (*rp); + + rp->error = kdp_machine_read_regs(rq->cpu, rq->flavor, rp->data, &size); + rp->hdr.len += size; + + *reply_port = kdp.reply_port; + *len = rp->hdr.len; + + return (TRUE); +} diff --git a/osfmk/kdp/kdp.h b/osfmk/kdp/kdp.h new file mode 100644 index 000000000..a13ef5f08 --- /dev/null +++ b/osfmk/kdp/kdp.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * + * kdp.h -- exported interface to kdp module + * + */ + +/* Raise exception in debugger. */ + +void +kdp_raise_exception( + unsigned int exception, + unsigned int code, + unsigned int subcode, + void *saved_state +); + +/* Reset debugger state. */ + +void +kdp_reset(void); + diff --git a/osfmk/kdp/kdp_en_debugger.h b/osfmk/kdp/kdp_en_debugger.h new file mode 100644 index 000000000..9edc1069d --- /dev/null +++ b/osfmk/kdp/kdp_en_debugger.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * Ethernet debugger header file + * + * HISTORY + * + * 29 May 97 Dieter Siegmund at Apple. + * Created. + */ + +typedef void (*kdp_send_t)(void * pkt, unsigned int pkt_len); +typedef void (*kdp_receive_t)(void * pkt, unsigned int * pkt_len, + unsigned int timeout); +void +kdp_register_send_receive(kdp_send_t send, kdp_receive_t receive); diff --git a/osfmk/kdp/kdp_internal.h b/osfmk/kdp/kdp_internal.h new file mode 100644 index 000000000..eedbcd1f0 --- /dev/null +++ b/osfmk/kdp/kdp_internal.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * + * kdp_internal.h -- internal definitions for kdp module + * + */ + +#include +#include + +typedef struct { + unsigned short reply_port; + unsigned int conn_seq; + boolean_t is_conn; + void *saved_state; + boolean_t is_halted; + unsigned short exception_port; + unsigned char exception_seq; + boolean_t exception_ack_needed; +} kdp_glob_t; + +extern kdp_glob_t kdp; +extern int kdp_flag; + +typedef boolean_t +(*kdp_dispatch_t) ( + kdp_pkt_t *, + int *, + unsigned short * +); + +boolean_t +kdp_packet( + unsigned char *, + int *, + unsigned short * +); + +void +kdp_exception( + unsigned char *, + int *, + unsigned short *, + unsigned int, + unsigned int, + unsigned int +); + +boolean_t +kdp_exception_ack( + unsigned char *, + int +); + +void +kdp_panic( + const char *msg +); + +void +kdp_reset( + void +); + +void +kdp_reboot( + void +); + +void +kdp_us_spin( + int usec +); + +int +kdp_intr_disbl( + void +); + +void +kdp_intr_enbl( + int s +); + +kdp_error_t +kdp_machine_read_regs( + unsigned int cpu, + unsigned int flavor, + char *data, + int *size +); + +kdp_error_t +kdp_machine_write_regs( + unsigned int cpu, + unsigned int flavor, + char *data, + int *size +); + +void +kdp_machine_hostinfo( + kdp_hostinfo_t *hostinfo +); + +void +kdp_sync_cache( + void +); + + diff --git a/osfmk/kdp/kdp_private.h b/osfmk/kdp/kdp_private.h new file mode 100644 index 000000000..021758873 --- /dev/null +++ b/osfmk/kdp/kdp_private.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * + * kdp_private.h -- private functions for kdp.c + * + */ + +static boolean_t +kdp_unknown( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_connect( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_disconnect( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_hostinfo( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_suspend( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_readregs( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_writeregs( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_regions( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_maxbytes( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_readmem( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_writemem( + kdp_pkt_t *, + int *, + unsigned short * +); + +static boolean_t +kdp_resumecpus( + kdp_pkt_t *, + int *, + unsigned short * +); + diff --git a/osfmk/kdp/kdp_protocol.h b/osfmk/kdp/kdp_protocol.h new file mode 100644 index 000000000..31fe7a3f5 --- /dev/null +++ b/osfmk/kdp/kdp_protocol.h @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* Copyright (c) 1991 by NeXT Computer, Inc. + * + * File: services/kdp.h + * + * Definition of remote debugger protocol. + * + * HISTORY + * 27-Oct-91 Mike DeMoney (mike@next.com) + * Created + */ + +#include + +/* + * Retransmit parameters + */ +#if DDEBUG_DEBUG || DEBUG_DEBUG +#define KDP_REXMIT_SECS 20 /* rexmit if no ack in 3 secs */ +#else /* DDEBUG_DEBUG || DEBUG_DEBUG */ +#define KDP_REXMIT_SECS 3 /* rexmit if no ack in 3 secs */ +#endif /* DDEBUG_DEBUG || DEBUG_DEBUG */ +#define KDP_REXMIT_TRIES 8 /* xmit 8 times, then give up */ + +/* + * (NMI) Attention Max Wait Time + * Remote will resume unless KDP requests is received within this + * many seconds after an attention (nmi) packet is sent. + */ +#define KDP_MAX_ATTN_WAIT 30 /* wait max of 30 seconds */ + +/* + * Well-known UDP port, debugger side. + * FIXME: This is what the 68K guys use, but beats me how they chose it... + */ +#define KDP_REMOTE_PORT 41139 /* pick one and register it */ + +/* + * UDP ports, KDB side. 5 port numbers are reserved for each port (request + * and exception). This allows multiple KDBs to run on one host. + */ +#define UDP_HOST_COMM_BASE 41140 +#define UDP_HOST_EXCEP_BASE 41145 +#define NUM_UDP_HOST_PORTS 5 + +/* + * Requests + */ +typedef enum { + /* connection oriented requests */ + KDP_CONNECT, KDP_DISCONNECT, + + /* obtaining client info */ + KDP_HOSTINFO, KDP_REGIONS, KDP_MAXBYTES, + + /* memory access */ + KDP_READMEM, KDP_WRITEMEM, + + /* register access */ + KDP_READREGS, KDP_WRITEREGS, + + /* executable image info */ + KDP_LOAD, KDP_IMAGEPATH, + + /* execution control */ + KDP_SUSPEND, KDP_RESUMECPUS, + + /* exception and termination notification, NOT true requests */ + KDP_EXCEPTION, KDP_TERMINATION, + + /* remote reboot request */ + KDP_HOSTREBOOT +} kdp_req_t; + +/* + * Common KDP packet header + */ +typedef struct { + kdp_req_t request:7; /* request type */ + unsigned is_reply:1; /* 0 => request, 1 => reply */ + unsigned seq:8; /* sequence number within session */ + unsigned len:16; /* length of entire pkt including hdr */ + unsigned key; /* session key */ +} kdp_hdr_t; + +/* + * KDP errors + */ +typedef enum { + KDPERR_NO_ERROR = 0, + KDPERR_ALREADY_CONNECTED, + KDPERR_BAD_NBYTES, + KDPERR_BADFLAVOR /* bad flavor in w/r regs */ +} kdp_error_t; + +/* + * KDP requests and reply packet formats + */ + +/* + * KDP_CONNECT + */ +typedef struct { /* KDP_CONNECT request */ + kdp_hdr_t hdr; + unsigned short req_reply_port; /* udp port which to send replies */ + unsigned short exc_note_port; /* udp port which to send exc notes */ + char greeting[0]; /* "greetings", null-terminated */ +} kdp_connect_req_t; + +typedef struct { /* KDP_CONNECT reply */ + kdp_hdr_t hdr; + kdp_error_t error; +} kdp_connect_reply_t; + +/* + * KDP_DISCONNECT + */ +typedef struct { /* KDP_DISCONNECT request */ + kdp_hdr_t hdr; +} kdp_disconnect_req_t; + +typedef struct { /* KDP_DISCONNECT reply */ + kdp_hdr_t hdr; +} kdp_disconnect_reply_t; + +/* + * KDP_HOSTINFO + */ +typedef struct { /* KDP_HOSTINFO request */ + kdp_hdr_t hdr; +} kdp_hostinfo_req_t; + +typedef struct { + unsigned cpus_mask; /* bit is 1 if cpu present */ + int cpu_type; + int cpu_subtype; +} kdp_hostinfo_t; + +typedef struct { /* KDP_HOSTINFO reply */ + kdp_hdr_t hdr; + kdp_hostinfo_t hostinfo; +} kdp_hostinfo_reply_t; + +/* + * KDP_REGIONS + */ +typedef struct { /* KDP_REGIONS request */ + kdp_hdr_t hdr; +} kdp_regions_req_t; + +#define VM_PROT_VOLATILE ((vm_prot_t) 0x08) /* not cacheable */ +#define VM_PROT_SPARSE ((vm_prot_t) 0x10) /* sparse addr space */ + +typedef struct { + void *address; + unsigned nbytes; + vm_prot_t protection; +} kdp_region_t; + +typedef struct { /* KDP_REGIONS reply */ + kdp_hdr_t hdr; + unsigned nregions; + kdp_region_t regions[0]; +} kdp_regions_reply_t; + +/* + * KDP_MAXBYTES + */ +typedef struct { /* KDP_MAXBYTES request */ + kdp_hdr_t hdr; +} kdp_maxbytes_req_t; + +typedef struct { /* KDP_MAXBYTES reply */ + kdp_hdr_t hdr; + unsigned max_bytes; +} kdp_maxbytes_reply_t; + +/* + * KDP_READMEM + */ +typedef struct { /* KDP_READMEM request */ + kdp_hdr_t hdr; + void *address; + unsigned nbytes; +} kdp_readmem_req_t; + +typedef struct { /* KDP_READMEM reply */ + kdp_hdr_t hdr; + kdp_error_t error; + char data[0]; +} kdp_readmem_reply_t; + +/* + * KDP_WRITEMEM + */ +typedef struct { /* KDP_WRITEMEM request */ + kdp_hdr_t hdr; + void *address; + unsigned nbytes; + char data[0]; +} kdp_writemem_req_t; + +typedef struct { /* KDP_WRITEMEM reply */ + kdp_hdr_t hdr; + kdp_error_t error; +} kdp_writemem_reply_t; + +/* + * KDP_READREGS + */ +typedef struct { /* KDP_READREGS request */ + kdp_hdr_t hdr; + unsigned cpu; + unsigned flavor; +} kdp_readregs_req_t; + +typedef struct { /* KDP_READREGS reply */ + kdp_hdr_t hdr; + kdp_error_t error; /* could be KDPERR_BADFLAVOR */ + char data[0]; +} kdp_readregs_reply_t; + +/* + * KDP_WRITEREGS + */ +typedef struct { /* KDP_WRITEREGS request */ + kdp_hdr_t hdr; + unsigned cpu; + unsigned flavor; + char data[0]; +} kdp_writeregs_req_t; + +typedef struct { /* KDP_WRITEREGS reply */ + kdp_hdr_t hdr; + kdp_error_t error; +} kdp_writeregs_reply_t; + +/* + * KDP_LOAD + */ +typedef struct { /* KDP_LOAD request */ + kdp_hdr_t hdr; + char file_args[0]; +} kdp_load_req_t; + +typedef struct { /* KDP_LOAD reply */ + kdp_hdr_t hdr; + kdp_error_t error; +} kdp_load_reply_t; + +/* + * KDP_IMAGEPATH + */ +typedef struct { /* KDP_IMAGEPATH request */ + kdp_hdr_t hdr; +} kdp_imagepath_req_t; + +typedef struct { /* KDP_IMAGEPATH reply */ + kdp_hdr_t hdr; + char path[0]; +} kdp_imagepath_reply_t; + +/* + * KDP_SUSPEND + */ +typedef struct { /* KDP_SUSPEND request */ + kdp_hdr_t hdr; +} kdp_suspend_req_t; + +typedef struct { /* KDP_SUSPEND reply */ + kdp_hdr_t hdr; +} kdp_suspend_reply_t; + +/* + * KDP_RESUMECPUS + */ +typedef struct { /* KDP_RESUMECPUS request */ + kdp_hdr_t hdr; + unsigned cpu_mask; +} kdp_resumecpus_req_t; + +typedef struct { /* KDP_RESUMECPUS reply */ + kdp_hdr_t hdr; +} kdp_resumecpus_reply_t; + +/* + * Exception notifications + * (Exception notifications are not requests, and in fact travel from + * the remote debugger to the gdb agent KDB.) + */ +typedef struct { /* exc. info for one cpu */ + unsigned cpu; + /* + * Following info is defined as + * per + */ + unsigned exception; + unsigned code; + unsigned subcode; +} kdp_exc_info_t; + +typedef struct { /* KDP_EXCEPTION notification */ + kdp_hdr_t hdr; + unsigned n_exc_info; + kdp_exc_info_t exc_info[0]; +} kdp_exception_t; + +typedef struct { /* KDP_EXCEPTION acknowledgement */ + kdp_hdr_t hdr; +} kdp_exception_ack_t; + +/* + * Child termination messages + */ +typedef enum { + KDP_FAULT = 0, /* child took fault (internal use) */ + KDP_EXIT, /* child exited */ + KDP_POWEROFF, /* child power-off */ + KDP_REBOOT, /* child reboot */ + KDP_COMMAND_MODE /* child exit to mon command_mode */ +} kdp_termination_code_t; + +typedef struct { /* KDP_TERMINATION notification */ + kdp_hdr_t hdr; + kdp_termination_code_t term_code; + unsigned exit_code; +} kdp_termination_t; + +typedef struct { + kdp_hdr_t hdr; +} kdp_termination_ack_t; + +typedef union { + kdp_hdr_t hdr; + kdp_connect_req_t connect_req; + kdp_connect_reply_t connect_reply; + kdp_disconnect_req_t disconnect_req; + kdp_disconnect_reply_t disconnect_reply; + kdp_hostinfo_req_t hostinfo_req; + kdp_hostinfo_reply_t hostinfo_reply; + kdp_regions_req_t regions_req; + kdp_regions_reply_t regions_reply; + kdp_maxbytes_req_t maxbytes_req; + kdp_maxbytes_reply_t maxbytes_reply; + kdp_readmem_req_t readmem_req; + kdp_readmem_reply_t readmem_reply; + kdp_writemem_req_t writemem_req; + kdp_writemem_reply_t writemem_reply; + kdp_readregs_req_t readregs_req; + kdp_readregs_reply_t readregs_reply; + kdp_writeregs_req_t writeregs_req; + kdp_writeregs_reply_t writeregs_reply; + kdp_load_req_t load_req; + kdp_load_reply_t load_reply; + kdp_imagepath_req_t imagepath_req; + kdp_imagepath_reply_t imagepath_reply; + kdp_suspend_req_t suspend_req; + kdp_suspend_reply_t suspend_reply; + kdp_resumecpus_req_t resumecpus_req; + kdp_resumecpus_reply_t resumecpus_reply; + kdp_exception_t exception; + kdp_exception_ack_t exception_ack; + kdp_termination_t termination; + kdp_termination_ack_t termination_ack; +} kdp_pkt_t; + +#define MAX_KDP_PKT_SIZE 1200 /* max packet size */ +#define MAX_KDP_DATA_SIZE 1024 /* max r/w data per packet */ diff --git a/osfmk/kdp/kdp_udp.c b/osfmk/kdp/kdp_udp.c new file mode 100644 index 000000000..15b416c3d --- /dev/null +++ b/osfmk/kdp/kdp_udp.c @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1993 NeXT Computer, Inc. All rights reserved. + * + * kdp_udp.c -- Kernel Debugging Protocol UDP implementation. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DO_ALIGN 1 /* align all packet data accesses */ + +extern int kdp_getc(void); + +static +u_short ip_id; /* ip packet ctr, for ids */ + +/* @(#)udp_usrreq.c 2.2 88/05/23 4.0NFSSRC SMI; from UCB 7.1 6/5/86 */ + +/* + * UDP protocol implementation. + * Per RFC 768, August, 1980. + */ +#define UDP_TTL 60 /* deflt time to live for UDP packets */ +int udp_ttl=UDP_TTL; +static unsigned char exception_seq; + +static struct { + unsigned char data[KDP_MAXPACKET]; + unsigned int off, len; + boolean_t input; +} pkt, saved_reply; + +struct { + struct { + struct in_addr in; + struct ether_addr ea; + } loc; + struct { + struct in_addr in; + struct ether_addr ea; + } rmt; +} adr; + +static char +*exception_message[] = { + "Unknown", + "Memory access", /* EXC_BAD_ACCESS */ + "Failed instruction", /* EXC_BAD_INSTRUCTION */ + "Arithmetic", /* EXC_ARITHMETIC */ + "Emulation", /* EXC_EMULATION */ + "Software", /* EXC_SOFTWARE */ + "Breakpoint" /* EXC_BREAKPOINT */ +}; + +static kdp_send_t kdp_en_send_pkt = 0; +static kdp_receive_t kdp_en_recv_pkt = 0; + +static void kdp_handler( void *); + +void +kdp_register_send_receive(kdp_send_t send, kdp_receive_t receive) +{ +#define KDP_READY 0x1 + + kdp_en_send_pkt = send; + kdp_en_recv_pkt = receive; + kdp_flag |= KDP_READY; + if (current_debugger == NO_CUR_DB) + current_debugger = KDP_CUR_DB; + if (halt_in_debugger) { + kdp_call(); + halt_in_debugger=0; + } +} + +static +void +enaddr_copy( + void *src, + void *dst +) +{ + bcopy((char *)src, (char *)dst, sizeof (struct ether_addr)); +} + +static +unsigned short +ip_sum( + unsigned char *c, + unsigned int hlen +) +{ + unsigned int high, low, sum; + + high = low = 0; + while (hlen-- > 0) { + low += c[1] + c[3]; + high += c[0] + c[2]; + + c += sizeof (int); + } + + sum = (high << 8) + low; + sum = (sum >> 16) + (sum & 65535); + + return (sum > 65535 ? sum - 65535 : sum); +} + +static +void +kdp_reply( + unsigned short reply_port +) +{ + struct udpiphdr aligned_ui, *ui = &aligned_ui; + struct ip aligned_ip, *ip = &aligned_ip; + struct in_addr tmp_ipaddr; + struct ether_addr tmp_enaddr; + struct ether_header *eh; + + if (!pkt.input) + kdp_panic("kdp_reply"); + + pkt.off -= sizeof (struct udpiphdr); + +#if DO_ALIGN + bcopy((char *)&pkt.data[pkt.off], (char *)ui, sizeof(*ui)); +#else + ui = (struct udpiphdr *)&pkt.data[pkt.off]; +#endif + ui->ui_next = ui->ui_prev = 0; + ui->ui_x1 = 0; + ui->ui_pr = IPPROTO_UDP; + ui->ui_len = htons((u_short)pkt.len + sizeof (struct udphdr)); + tmp_ipaddr = ui->ui_src; + ui->ui_src = ui->ui_dst; + ui->ui_dst = tmp_ipaddr; + ui->ui_sport = htons(KDP_REMOTE_PORT); + ui->ui_dport = reply_port; + ui->ui_ulen = ui->ui_len; + ui->ui_sum = 0; +#if DO_ALIGN + bcopy((char *)ui, (char *)&pkt.data[pkt.off], sizeof(*ui)); + + bcopy((char *)&pkt.data[pkt.off], (char *)ip, sizeof(*ip)); +#else + ip = (struct ip *)&pkt.data[pkt.off]; +#endif + ip->ip_len = htons(sizeof (struct udpiphdr) + pkt.len); + ip->ip_v = IPVERSION; + ip->ip_id = htons(ip_id++); + ip->ip_hl = sizeof (struct ip) >> 2; + ip->ip_ttl = udp_ttl; + ip->ip_sum = 0; + ip->ip_sum = htons(~ip_sum((unsigned char *)ip, ip->ip_hl)); +#if DO_ALIGN + bcopy((char *)ip, (char *)&pkt.data[pkt.off], sizeof(*ip)); +#endif + + pkt.len += sizeof (struct udpiphdr); + + pkt.off -= sizeof (struct ether_header); + + eh = (struct ether_header *)&pkt.data[pkt.off]; + enaddr_copy(eh->ether_shost, &tmp_enaddr); + enaddr_copy(eh->ether_dhost, eh->ether_shost); + enaddr_copy(&tmp_enaddr, eh->ether_dhost); + eh->ether_type = htons(ETHERTYPE_IP); + + pkt.len += sizeof (struct ether_header); + + // save reply for possible retransmission + bcopy((char *)&pkt, (char *)&saved_reply, sizeof(pkt)); + + (*kdp_en_send_pkt)(&pkt.data[pkt.off], pkt.len); + + // increment expected sequence number + exception_seq++; +} + +static +void +kdp_send( + unsigned short remote_port +) +{ + struct udpiphdr aligned_ui, *ui = &aligned_ui; + struct ip aligned_ip, *ip = &aligned_ip; + struct ether_header *eh; + + if (pkt.input) + kdp_panic("kdp_send"); + + pkt.off -= sizeof (struct udpiphdr); + +#if DO_ALIGN + bcopy((char *)&pkt.data[pkt.off], (char *)ui, sizeof(*ui)); +#else + ui = (struct udpiphdr *)&pkt.data[pkt.off]; +#endif + ui->ui_next = ui->ui_prev = 0; + ui->ui_x1 = 0; + ui->ui_pr = IPPROTO_UDP; + ui->ui_len = htons((u_short)pkt.len + sizeof (struct udphdr)); + ui->ui_src = adr.loc.in; + ui->ui_dst = adr.rmt.in; + ui->ui_sport = htons(KDP_REMOTE_PORT); + ui->ui_dport = remote_port; + ui->ui_ulen = ui->ui_len; + ui->ui_sum = 0; +#if DO_ALIGN + bcopy((char *)ui, (char *)&pkt.data[pkt.off], sizeof(*ui)); + bcopy((char *)&pkt.data[pkt.off], (char *)ip, sizeof(*ip)); +#else + ip = (struct ip *)&pkt.data[pkt.off]; +#endif + ip->ip_len = htons(sizeof (struct udpiphdr) + pkt.len); + ip->ip_v = IPVERSION; + ip->ip_id = htons(ip_id++); + ip->ip_hl = sizeof (struct ip) >> 2; + ip->ip_ttl = udp_ttl; + ip->ip_sum = 0; + ip->ip_sum = htons(~ip_sum((unsigned char *)ip, ip->ip_hl)); +#if DO_ALIGN + bcopy((char *)ip, (char *)&pkt.data[pkt.off], sizeof(*ip)); +#endif + + pkt.len += sizeof (struct udpiphdr); + + pkt.off -= sizeof (struct ether_header); + + eh = (struct ether_header *)&pkt.data[pkt.off]; + enaddr_copy(&adr.loc.ea, eh->ether_shost); + enaddr_copy(&adr.rmt.ea, eh->ether_dhost); + eh->ether_type = htons(ETHERTYPE_IP); + + pkt.len += sizeof (struct ether_header); + + (*kdp_en_send_pkt)(&pkt.data[pkt.off], pkt.len); +} + +static +void +kdp_poll( + void +) +{ + struct ether_header *eh; + struct udpiphdr aligned_ui, *ui = &aligned_ui; + struct ip aligned_ip, *ip = &aligned_ip; + static int msg_printed; + + if (pkt.input) + kdp_panic("kdp_poll"); + + if (!kdp_en_recv_pkt || !kdp_en_send_pkt) { + if( msg_printed == 0) { + msg_printed = 1; + printf("kdp_poll: no debugger device\n"); + } + return; + } + + pkt.off = 0; + (*kdp_en_recv_pkt)(pkt.data, &pkt.len, 3/* ms */); + + if (pkt.len == 0) + return; + + if (pkt.len < (sizeof (struct ether_header) + sizeof (struct udpiphdr))) + return; + + eh = (struct ether_header *)&pkt.data[pkt.off]; + pkt.off += sizeof (struct ether_header); + if (ntohs(eh->ether_type) != ETHERTYPE_IP) { + return; + } + +#if DO_ALIGN + bcopy((char *)&pkt.data[pkt.off], (char *)ui, sizeof(*ui)); + bcopy((char *)&pkt.data[pkt.off], (char *)ip, sizeof(*ip)); +#else + ui = (struct udpiphdr *)&pkt.data[pkt.off]; + ip = (struct ip *)&pkt.data[pkt.off]; +#endif + + pkt.off += sizeof (struct udpiphdr); + if (ui->ui_pr != IPPROTO_UDP) { + return; + } + + if (ip->ip_hl > (sizeof (struct ip) >> 2)) { + return; + } + + if (ntohs(ui->ui_dport) != KDP_REMOTE_PORT) { + return; + } + + if (!kdp.is_conn) { + enaddr_copy(eh->ether_dhost, &adr.loc.ea); + adr.loc.in = ui->ui_dst; + + enaddr_copy(eh->ether_shost, &adr.rmt.ea); + adr.rmt.in = ui->ui_src; + } + + /* + * Calculate kdp packet length. + */ + pkt.len = ntohs((u_short)ui->ui_ulen) - sizeof (struct udphdr); + pkt.input = TRUE; + +} + +static +void +kdp_handler( + void *saved_state +) +{ + unsigned short reply_port; + kdp_hdr_t aligned_hdr, *hdr = &aligned_hdr; + + + kdp.saved_state = saved_state; // see comment in kdp_raise_exception + + do { + while (!pkt.input) + kdp_poll(); + +#if DO_ALIGN + bcopy((char *)&pkt.data[pkt.off], (char *)hdr, sizeof(*hdr)); +#else + hdr = (kdp_hdr_t *)&pkt.data[pkt.off]; +#endif + + // ignore replies -- we're not expecting them anyway. + if (hdr->is_reply) { + goto again; + } + + // check for retransmitted request + if (hdr->seq == (exception_seq - 1)) { + /* retransmit last reply */ + (*kdp_en_send_pkt)(&saved_reply.data[saved_reply.off], + saved_reply.len); + goto again; + } else if (hdr->seq != exception_seq) { + printf("kdp: bad sequence %d (want %d)\n", + hdr->seq, exception_seq); + goto again; + } + + if (kdp_packet((unsigned char*)&pkt.data[pkt.off], + (int *)&pkt.len, + (unsigned short *)&reply_port)) { + kdp_reply(reply_port); + } + +again: + pkt.input = FALSE; + } while (kdp.is_halted); +} + +static +void +kdp_connection_wait( + void +) +{ + unsigned short reply_port; + boolean_t kdp_call_kdb(); + + printf("\nWaiting for remote debugger connection.\n"); +#ifdef MACH_PE + if( 0 != kdp_getc()) +#endif + { + printf("Options..... Type\n"); + printf("------------ ----\n"); + printf("continue.... 'c'\n"); + printf("reboot...... 'r'\n"); +#if MACH_KDB + printf("enter kdb... 'k'\n"); +#endif + } + + exception_seq = 0; + do { + kdp_hdr_t aligned_hdr, *hdr = &aligned_hdr; + + while (!pkt.input) { + int c; + c = kdp_getc(); + switch(c) { + case 'c': + printf("Continuing...\n"); + return; + case 'r': + printf("Rebooting...\n"); + kdp_reboot(); + break; +#if MACH_KDB + case 'k': + printf("calling kdb...\n"); + if (kdp_call_kdb()) + return; + else + printf("not implemented...\n"); +#endif + default: + break; + } + kdp_poll(); + } + + // check for sequence number of 0 +#if DO_ALIGN + bcopy((char *)&pkt.data[pkt.off], (char *)hdr, sizeof(*hdr)); +#else + hdr = (kdp_hdr_t *)&pkt.data[pkt.off]; +#endif + if (hdr->request == KDP_HOSTREBOOT) { + kdp_reboot(); + /* should not return! */ + } + if ((hdr->request == KDP_CONNECT) && + !hdr->is_reply && (hdr->seq == exception_seq)) { + if (kdp_packet((unsigned char *)&pkt.data[pkt.off], + (int *)&pkt.len, + (unsigned short *)&reply_port)) + kdp_reply(reply_port); + } + + pkt.input = FALSE; + } while (!kdp.is_conn); + + if (current_debugger == KDP_CUR_DB) + active_debugger=1; + printf("Connected to remote debugger.\n"); +} + +static +void +kdp_send_exception( + unsigned int exception, + unsigned int code, + unsigned int subcode +) +{ + unsigned short remote_port; + unsigned int timeout_count; + + timeout_count = 300; // should be about 30 seconds + do { + pkt.off = sizeof (struct ether_header) + sizeof (struct udpiphdr); + kdp_exception((unsigned char *)&pkt.data[pkt.off], + (int *)&pkt.len, + (unsigned short *)&remote_port, + (unsigned int)exception, + (unsigned int)code, + (unsigned int)subcode); + + kdp_send(remote_port); + +again: + kdp_poll(); + + if (pkt.input) { + if (!kdp_exception_ack(&pkt.data[pkt.off], pkt.len)) { + pkt.input = FALSE; + goto again; + } + } else { + pkt.input = FALSE; + goto again; + } + pkt.input = FALSE; + if (kdp.exception_ack_needed) + kdp_us_spin(100000); // 1/10 sec + + } while (kdp.exception_ack_needed && timeout_count--); + + if (kdp.exception_ack_needed) { + // give up & disconnect + printf("kdp: exception ack timeout\n"); + kdp_reset(); + } +} + +void +kdp_raise_exception( + unsigned int exception, + unsigned int code, + unsigned int subcode, + void *saved_state +) +{ + int s; + int index; + + if (saved_state == 0) + printf("kdp_raise_exception with NULL state\n"); + + index = exception; + if (exception != EXC_BREAKPOINT) { + if (exception > EXC_BREAKPOINT || exception < EXC_BAD_ACCESS) { + index = 0; + } + printf("%s exception (%x,%x,%x)\n", + exception_message[index], + exception, code, subcode); + } + + kdp_sync_cache(); + + /* XXX WMG it seems that sometimes it doesn't work to let kdp_handler + * do this. I think the client and the host can get out of sync. + */ + kdp.saved_state = saved_state; + + if (pkt.input) + kdp_panic("kdp_raise_exception"); + + if (!kdp.is_conn) + kdp_connection_wait(); + else + kdp_send_exception(exception, code, subcode); + + if (kdp.is_conn) { + kdp.is_halted = TRUE; /* XXX */ + kdp_handler(saved_state); + if (!kdp.is_conn) + printf("Remote debugger disconnected.\n"); + } + + kdp_sync_cache(); +} + +void +kdp_reset(void) +{ + kdp.reply_port = kdp.exception_port = 0; + kdp.is_halted = kdp.is_conn = FALSE; + kdp.exception_seq = kdp.conn_seq = 0; +} + diff --git a/osfmk/kdp/kdp_udp.h b/osfmk/kdp/kdp_udp.h new file mode 100644 index 000000000..f8db005d4 --- /dev/null +++ b/osfmk/kdp/kdp_udp.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include /* OSSwap functions */ + +#define ETHERMTU 1500 +#define ETHERHDRSIZE 14 +#define ETHERCRC 4 +#define KDP_MAXPACKET (ETHERHDRSIZE + ETHERMTU + ETHERCRC) + +struct in_addr { + u_long s_addr; +}; + +struct ether_addr { + u_char ether_addr_octet[6]; +}; + +typedef struct ether_addr enet_addr_t; + +struct ipovly { + caddr_t ih_next, ih_prev; /* for protocol sequence q's */ + u_char ih_x1; /* (unused) */ + u_char ih_pr; /* protocol */ + short ih_len; /* protocol length */ + struct in_addr ih_src; /* source internet address */ + struct in_addr ih_dst; /* destination internet address */ +}; + +struct udphdr { + u_short uh_sport; /* source port */ + u_short uh_dport; /* destination port */ + short uh_ulen; /* udp length */ + u_short uh_sum; /* udp checksum */ +}; + +struct udpiphdr { + struct ipovly ui_i; /* overlaid ip structure */ + struct udphdr ui_u; /* udp header */ +}; +#define ui_next ui_i.ih_next +#define ui_prev ui_i.ih_prev +#define ui_x1 ui_i.ih_x1 +#define ui_pr ui_i.ih_pr +#define ui_len ui_i.ih_len +#define ui_src ui_i.ih_src +#define ui_dst ui_i.ih_dst +#define ui_sport ui_u.uh_sport +#define ui_dport ui_u.uh_dport +#define ui_ulen ui_u.uh_ulen +#define ui_sum ui_u.uh_sum + +struct ip { + union { + u_long ip_w; + struct { + unsigned int +#if _BIG_ENDIAN == __LITTLE_ENDIAN__ + ip_xhl:4, /* header length */ + ip_xv:4, /* version */ + ip_xtos:8, /* type of service */ + ip_xlen:16; /* total length */ +#endif +#if _BIG_ENDIAN == __BIG_ENDIAN__ + ip_xv:4, /* version */ + ip_xhl:4, /* header length */ + ip_xtos:8, /* type of service */ + ip_xlen:16; /* total length */ +#endif + } ip_x; + } ip_vhltl; + u_short ip_id; /* identification */ + short ip_off; /* fragment offset field */ +#define IP_DF 0x4000 /* dont fragment flag */ +#define IP_MF 0x2000 /* more fragments flag */ +#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ + u_char ip_ttl; /* time to live */ + u_char ip_p; /* protocol */ + u_short ip_sum; /* checksum */ + struct in_addr ip_src,ip_dst; /* source and dest address */ +}; +#define ip_v ip_vhltl.ip_x.ip_xv +#define ip_hl ip_vhltl.ip_x.ip_xhl +#define ip_tos ip_vhltl.ip_x.ip_xtos +#define ip_len ip_vhltl.ip_x.ip_xlen + +#define IPPROTO_UDP 17 +#define IPVERSION 4 + +struct ether_header { + u_char ether_dhost[6]; + u_char ether_shost[6]; + u_short ether_type; +}; + +typedef struct ether_header ether_header_t; + +#define ETHERTYPE_IP 0x0800 /* IP protocol */ + +#define ntohs(x) OSSwapBigToHostInt16(x) +#define htons(x) OSSwapHostToBigInt16(x) diff --git a/osfmk/kdp/ml/i386/kdp_machdep.c b/osfmk/kdp/ml/i386/kdp_machdep.c new file mode 100644 index 000000000..77182fb97 --- /dev/null +++ b/osfmk/kdp/ml/i386/kdp_machdep.c @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1994 NeXT Computer, Inc. All rights reserved. + * + * machdep/ppc/kdp_machdep.c + * + * Machine-dependent code for Remote Debugging Protocol + * + * March, 1997 Created. Umesh Vaishampayan [umeshv@NeXT.com] + * + */ + +#include +#include +#include +#include +#include + +#define KDP_TEST_HARNESS 0 +#if KDP_TEST_HARNESS +#define dprintf(x) printf x +#else +#define dprintf(x) +#endif + +void print_saved_state(void *); +void kdp_call(void); +void kdp_i386_trap(unsigned int, struct i386_saved_state *, kern_return_t, vm_offset_t); +int kdp_getc(void); + +void +kdp_exception( + unsigned char *pkt, + int *len, + unsigned short *remote_port, + unsigned int exception, + unsigned int code, + unsigned int subcode +) +{ + kdp_exception_t *rq = (kdp_exception_t *)pkt; + + rq->hdr.request = KDP_EXCEPTION; + rq->hdr.is_reply = 0; + rq->hdr.seq = kdp.exception_seq; + rq->hdr.key = 0; + rq->hdr.len = sizeof (*rq); + + rq->n_exc_info = 1; + rq->exc_info[0].cpu = 0; + rq->exc_info[0].exception = exception; + rq->exc_info[0].code = code; + rq->exc_info[0].subcode = subcode; + + rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t); + + bcopy((char *)rq, (char *)pkt, rq->hdr.len); + + kdp.exception_ack_needed = TRUE; + + *remote_port = kdp.exception_port; + *len = rq->hdr.len; +} + +boolean_t +kdp_exception_ack( + unsigned char *pkt, + int len +) +{ + kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt; + + if (len < sizeof (*rq)) + return(FALSE); + + if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) + return(FALSE); + + dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq)); + + if (rq->hdr.seq == kdp.exception_seq) { + kdp.exception_ack_needed = FALSE; + kdp.exception_seq++; + } + return(TRUE); +} + +void +kdp_getstate( + i386_thread_state_t *state +) +{ + struct i386_saved_state *saved_state; + + saved_state = (struct i386_saved_state *)kdp.saved_state; + + *state = (i386_thread_state_t) { 0 }; + state->eax = saved_state->eax; + state->ebx = saved_state->ebx; + state->ecx = saved_state->ecx; + state->edx = saved_state->edx; + state->edi = saved_state->edi; + state->esi = saved_state->esi; + state->ebp = saved_state->ebp; + + if ((saved_state->cs & 0x3) == 0){ /* Kernel State */ + state->esp = (unsigned int) &saved_state->uesp; + state->ss = KERNEL_DS; + } else { + state->esp = saved_state->uesp; + state->ss = saved_state->ss; + } + + state->eflags = saved_state->efl; + state->eip = saved_state->eip; + state->cs = saved_state->cs; + state->ds = saved_state->ds; + state->es = saved_state->es; + state->fs = saved_state->fs; + state->gs = saved_state->gs; +} + + +void +kdp_setstate( + i386_thread_state_t *state +) +{ + struct i386_saved_state *saved_state; + + saved_state = (struct i386_saved_state *)kdp.saved_state; + + saved_state->eax = state->eax; + saved_state->ebx = state->ebx; + saved_state->ecx = state->ecx; + saved_state->edx = state->edx; + saved_state->edi = state->edi; + saved_state->esi = state->esi; + saved_state->ebp = state->ebp; + saved_state->efl = state->eflags; +#if 0 + saved_state->frame.eflags &= ~( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR ); + saved_state->frame.eflags |= ( EFL_IF | EFL_SET ); +#endif + saved_state->eip = state->eip; + saved_state->fs = state->fs; + saved_state->gs = state->gs; +} + + +kdp_error_t +kdp_machine_read_regs( + unsigned int cpu, + unsigned int flavor, + char *data, + int *size +) +{ + switch (flavor) { + + case i386_THREAD_STATE: + dprintf(("kdp_readregs THREAD_STATE\n")); + kdp_getstate((i386_thread_state_t *)data); + *size = sizeof (i386_thread_state_t); + return KDPERR_NO_ERROR; + + case i386_THREAD_FPSTATE: + dprintf(("kdp_readregs THREAD_FPSTATE\n")); + *(i386_thread_fpstate_t *)data = (i386_thread_fpstate_t) { 0 }; + *size = sizeof (i386_thread_fpstate_t); + return KDPERR_NO_ERROR; + + default: + dprintf(("kdp_readregs bad flavor %d\n")); + return KDPERR_BADFLAVOR; + } +} + +kdp_error_t +kdp_machine_write_regs( + unsigned int cpu, + unsigned int flavor, + char *data, + int *size +) +{ + switch (flavor) { + + case i386_THREAD_STATE: + dprintf(("kdp_writeregs THREAD_STATE\n")); + kdp_setstate((i386_thread_state_t *)data); + return KDPERR_NO_ERROR; + + case i386_THREAD_FPSTATE: + dprintf(("kdp_writeregs THREAD_FPSTATE\n")); + return KDPERR_NO_ERROR; + + default: + dprintf(("kdp_writeregs bad flavor %d\n")); + return KDPERR_BADFLAVOR; + } +} + + + +void +kdp_machine_hostinfo( + kdp_hostinfo_t *hostinfo +) +{ + machine_slot_t m; + int i; + + hostinfo->cpus_mask = 0; + + for (i = 0; i < machine_info.max_cpus; i++) { + m = &machine_slot[i]; + if (!m->is_cpu) + continue; + + hostinfo->cpus_mask |= (1 << i); + } + + /* FIXME?? */ + hostinfo->cpu_type = CPU_TYPE_I386; + hostinfo->cpu_subtype = CPU_SUBTYPE_486; +} + +void +kdp_panic( + const char *msg +) +{ + printf("kdp panic: %s\n", msg); + __asm__ volatile("hlt"); +} + + +void +kdp_reboot(void) +{ + kdreboot(); +} + +int +kdp_intr_disbl(void) +{ + return splhigh(); +} + +void +kdp_intr_enbl(int s) +{ + splx(s); +} + +int +kdp_getc() +{ + return cnmaygetc(); +} + +void +kdp_us_spin(int usec) +{ + extern void delay(int); + + delay(usec/100); +} + +void print_saved_state(void *state) +{ + struct i386_saved_state *saved_state; + + saved_state = state; + + printf("pc = 0x%x\n", saved_state->eip); + printf("cr3= 0x%x\n", saved_state->cr2); + printf("rp = TODO FIXME\n"); + printf("sp = 0x%x\n", saved_state->esp); + +} + +void +kdp_sync_cache() +{ + return; /* No op here. */ +} + +void +kdp_call() +{ + __asm__ volatile ("int $3"); /* Let the processor do the work */ +} + + +typedef struct _cframe_t { + struct _cframe_t *prev; + unsigned caller; + unsigned args[0]; +} cframe_t; + + +#define MAX_FRAME_DELTA 65536 + +void +kdp_i386_backtrace(void *_frame, int nframes) +{ + cframe_t *frame = (cframe_t *)_frame; + int i; + + for (i=0; i VM_MAX_KERNEL_ADDRESS) { + goto invalid; + } + printf("frame %x called by %x ", + frame, frame->caller); + printf("args %x %x %x %x\n", + frame->args[0], frame->args[1], + frame->args[2], frame->args[3]); + if ((frame->prev < frame) || /* wrong direction */ + ((frame->prev - frame) > MAX_FRAME_DELTA)) { + goto invalid; + } + frame = frame->prev; + } + return; +invalid: + printf("invalid frame pointer %x\n",frame->prev); +} + +void +kdp_i386_trap( + unsigned int trapno, + struct i386_saved_state *saved_state, + kern_return_t result, + vm_offset_t va +) +{ + unsigned int exception, subcode = 0, code; + + if (trapno != T_INT3 && trapno != T_DEBUG) + printf("unexpected kernel trap %x eip %x\n", trapno, saved_state->eip); + + switch (trapno) { + + case T_DIVIDE_ERROR: + exception = EXC_ARITHMETIC; + code = EXC_I386_DIVERR; + break; + + case T_OVERFLOW: + exception = EXC_SOFTWARE; + code = EXC_I386_INTOFLT; + break; + + case T_OUT_OF_BOUNDS: + exception = EXC_ARITHMETIC; + code = EXC_I386_BOUNDFLT; + break; + + case T_INVALID_OPCODE: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_INVOPFLT; + break; + + case T_SEGMENT_NOT_PRESENT: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_SEGNPFLT; + subcode = saved_state->err; + break; + + case T_STACK_FAULT: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_STKFLT; + subcode = saved_state->err; + break; + + case T_GENERAL_PROTECTION: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_GPFLT; + subcode = saved_state->err; + break; + + case T_PAGE_FAULT: + exception = EXC_BAD_ACCESS; + code = result; + subcode = va; + break; + + case T_WATCHPOINT: + exception = EXC_SOFTWARE; + code = EXC_I386_ALIGNFLT; + break; + + case T_DEBUG: + case T_INT3: + exception = EXC_BREAKPOINT; + code = EXC_I386_BPTFLT; + break; + + default: + exception = EXC_BAD_INSTRUCTION; + code = trapno; + break; + } + +// kdp_i386_backtrace((void *) saved_state->ebp, 10); + + kdp_raise_exception(exception, code, subcode, saved_state); +} + +boolean_t +kdp_call_kdb( + void) +{ + return(FALSE); +} diff --git a/osfmk/kdp/ml/i386/kdp_vm.c b/osfmk/kdp/ml/i386/kdp_vm.c new file mode 100644 index 000000000..2d870d71a --- /dev/null +++ b/osfmk/kdp/ml/i386/kdp_vm.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +unsigned kdp_vm_read( caddr_t, caddr_t, unsigned); +unsigned kdp_vm_write( caddr_t, caddr_t, unsigned); +unsigned kdp_copy_kmem( caddr_t, caddr_t, unsigned); + +/* + * + */ +unsigned kdp_vm_read( + caddr_t src, + caddr_t dst, + unsigned len) +{ + return kdp_copy_kmem(src, dst, len); +} + +/* + * + */ +unsigned kdp_vm_write( + caddr_t src, + caddr_t dst, + unsigned len) +{ + return kdp_copy_kmem(src, dst, len); +} + diff --git a/osfmk/kdp/ml/ppc/kdp_asm.s b/osfmk/kdp/ml/ppc/kdp_asm.s new file mode 100644 index 000000000..4e53ce0b7 --- /dev/null +++ b/osfmk/kdp/ml/ppc/kdp_asm.s @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include + +/* void kdp_call_with_ctx(int type, struct ppc_thread_state *ssp) + * + * Switch on kdp stack and enter the debugger. On return, + * switch back to the previous stack + * + * If the kdp stack is not free, we allocate ourselves a frame below + * the current kdp frame. This should never occur in a perfect world. + */ + +ENTRY(kdp_call_with_ctx, TAG_NO_FRAME_USED) + + mfmsr r7 /* Get the MSR */ + mflr r0 + rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interruptions enable bit */ + mtmsr r7 + mfsprg r8,0 /* Get the per_proc block address */ + stw r0, FM_LR_SAVE(r1) /* save lr in the current frame */ + + lwz r9, PP_DEBSTACKPTR(r8) /* get kdp stack pointer */ + cmpwi r9, 0 + bne 0f + +#ifdef LET_KDP_REENTER + mr r9, r1 /* get current stack pointer */ + subi r9, r9, FM_REDZONE + SS_SIZE +#else + bl EXT(kdp_print_backtrace) +#endif + +0: + stw r1, FM_ARG0(r9) /* Store old stack pointer */ + li r0, 0 + stw r0, PP_DEBSTACKPTR(r8) /* Mark kdp stack as busy */ + + subi r1, r9, FM_SIZE + stw r0, FM_BACKPTR(r1) + + bl EXT(kdp_trap) + + mfmsr r0 /* Get the MSR */ + addi r1, r1, FM_SIZE + rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interruptions enable bit */ + mtmsr r0 + + mfsprg r8,0 /* Get the per_proc block address */ + + stw r1, PP_DEBSTACKPTR(r8) /* Mark gdb stack as free */ + lwz r1, FM_ARG0(r1) + lwz r0, FM_LR_SAVE(r1) + mtlr r0 + + blr + + diff --git a/osfmk/kdp/ml/ppc/kdp_machdep.c b/osfmk/kdp/ml/ppc/kdp_machdep.c new file mode 100644 index 000000000..f6cef9205 --- /dev/null +++ b/osfmk/kdp/ml/ppc/kdp_machdep.c @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1994 NeXT Computer, Inc. All rights reserved. + * + * machdep/ppc/kdp_machdep.c + * + * Machine-dependent code for Remote Debugging Protocol + * + * March, 1997 Created. Umesh Vaishampayan [umeshv@NeXT.com] + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define KDP_TEST_HARNESS 0 +#if KDP_TEST_HARNESS +#define dprintf(x) kprintf x +#else +#define dprintf(x) +#endif + +void print_saved_state(void *); +void kdp_call(void); +void kdp_trap( unsigned int, struct ppc_thread_state *); +int kdp_getc(void); +boolean_t kdp_call_kdb(void); + +void +kdp_exception( + unsigned char *pkt, + int *len, + unsigned short *remote_port, + unsigned int exception, + unsigned int code, + unsigned int subcode +) +{ + struct { + kdp_exception_t pkt; + kdp_exc_info_t exc; + } aligned_pkt; + kdp_exception_t *rq = (kdp_exception_t *)&aligned_pkt; + + bcopy((char *)pkt, (char *)rq, sizeof(*rq)); + rq->hdr.request = KDP_EXCEPTION; + rq->hdr.is_reply = 0; + rq->hdr.seq = kdp.exception_seq; + rq->hdr.key = 0; + rq->hdr.len = sizeof (*rq) + sizeof(kdp_exc_info_t); + + rq->n_exc_info = 1; + rq->exc_info[0].cpu = 0; + rq->exc_info[0].exception = exception; + rq->exc_info[0].code = code; + rq->exc_info[0].subcode = subcode; + + rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t); + + bcopy((char *)rq, (char *)pkt, rq->hdr.len); + + kdp.exception_ack_needed = TRUE; + + *remote_port = kdp.exception_port; + *len = rq->hdr.len; +} + +boolean_t +kdp_exception_ack( + unsigned char *pkt, + int len +) +{ + kdp_exception_ack_t aligned_pkt; + kdp_exception_ack_t *rq = (kdp_exception_ack_t *)&aligned_pkt; + + if (len < sizeof (*rq)) + return(FALSE); + + bcopy((char *)pkt, (char *)rq, sizeof(*rq)); + + if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) + return(FALSE); + + dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq)); + + if (rq->hdr.seq == kdp.exception_seq) { + kdp.exception_ack_needed = FALSE; + kdp.exception_seq++; + } + return(TRUE); +} + +static void +kdp_getintegerstate( + struct ppc_thread_state *state +) +{ + struct ppc_thread_state *saved_state; + + saved_state = kdp.saved_state; + + bzero((char *)state,sizeof (struct ppc_thread_state)) ; + + state->srr0 = saved_state->srr0; + state->srr1 = saved_state->srr1; + state->r0 = saved_state->r0; + state->r1 = saved_state->r1; + state->r2 = saved_state->r2; + state->r3 = saved_state->r3; + state->r4 = saved_state->r4; + state->r5 = saved_state->r5; + state->r6 = saved_state->r6; + state->r7 = saved_state->r7; + state->r8 = saved_state->r8; + state->r9 = saved_state->r9; + state->r10 = saved_state->r10; + state->r11 = saved_state->r11; + state->r12 = saved_state->r12; + state->r13 = saved_state->r13; + state->r14 = saved_state->r14; + state->r15 = saved_state->r15; + state->r16 = saved_state->r16; + state->r17 = saved_state->r17; + state->r18 = saved_state->r18; + state->r19 = saved_state->r19; + state->r20 = saved_state->r20; + state->r21 = saved_state->r21; + state->r22 = saved_state->r22; + state->r23 = saved_state->r23; + state->r24 = saved_state->r24; + state->r25 = saved_state->r25; + state->r26 = saved_state->r26; + state->r27 = saved_state->r27; + state->r28 = saved_state->r28; + state->r29 = saved_state->r29; + state->r30 = saved_state->r30; + state->r31 = saved_state->r31; + state->cr = saved_state->cr; + state->xer = saved_state->xer; + state->lr = saved_state->lr; + state->ctr = saved_state->ctr; + state->mq = saved_state->mq; /* This is BOGUS ! (601) ONLY */ +} + +kdp_error_t +kdp_machine_read_regs( + unsigned int cpu, + unsigned int flavor, + char *data, + int *size +) +{ + switch (flavor) { + + case PPC_THREAD_STATE: + dprintf(("kdp_readregs THREAD_STATE\n")); + kdp_getintegerstate((struct ppc_thread_state *)data); + *size = PPC_THREAD_STATE_COUNT * sizeof(int); + return KDPERR_NO_ERROR; + + case PPC_FLOAT_STATE: + dprintf(("kdp_readregs THREAD_FPSTATE\n")); + bzero((char *)data ,sizeof(struct ppc_float_state)); + *size = PPC_FLOAT_STATE_COUNT * sizeof(int); + return KDPERR_NO_ERROR; + + default: + dprintf(("kdp_readregs bad flavor %d\n")); + return KDPERR_BADFLAVOR; + } +} + +static void +kdp_setintegerstate( + struct ppc_thread_state *state +) +{ + struct ppc_thread_state *saved_state; + + saved_state = kdp.saved_state; + + saved_state->srr0 = state->srr0; + saved_state->srr1 = state->srr1; + saved_state->r0 = state->r0; + saved_state->r1 = state->r1; + saved_state->r2 = state->r2; + saved_state->r3 = state->r3; + saved_state->r4 = state->r4; + saved_state->r5 = state->r5; + saved_state->r6 = state->r6; + saved_state->r7 = state->r7; + saved_state->r8 = state->r8; + saved_state->r9 = state->r9; + saved_state->r10 = state->r10; + saved_state->r11 = state->r11; + saved_state->r12 = state->r12; + saved_state->r13 = state->r13; + saved_state->r14 = state->r14; + saved_state->r15 = state->r15; + saved_state->r16 = state->r16; + saved_state->r17 = state->r17; + saved_state->r18 = state->r18; + saved_state->r19 = state->r19; + saved_state->r20 = state->r20; + saved_state->r21 = state->r21; + saved_state->r22 = state->r22; + saved_state->r23 = state->r23; + saved_state->r24 = state->r24; + saved_state->r25 = state->r25; + saved_state->r26 = state->r26; + saved_state->r27 = state->r27; + saved_state->r28 = state->r28; + saved_state->r29 = state->r29; + saved_state->r30 = state->r30; + saved_state->r31 = state->r31; + saved_state->cr = state->cr; + saved_state->xer = state->xer; + saved_state->lr = state->lr; + saved_state->ctr = state->ctr; + saved_state->mq = state->mq; /* BOGUS! (601)ONLY */ +} + +kdp_error_t +kdp_machine_write_regs( + unsigned int cpu, + unsigned int flavor, + char *data, + int *size +) +{ + switch (flavor) { + + case PPC_THREAD_STATE: + dprintf(("kdp_writeregs THREAD_STATE\n")); + kdp_setintegerstate((struct ppc_thread_state *)data); + +#if KDP_TEST_HARNESS + DumpTheSave((struct savearea *)data); /* (TEST/DEBUG) */ +#endif + return KDPERR_NO_ERROR; + + case PPC_FLOAT_STATE: + dprintf(("kdp_writeregs THREAD_FPSTATE\n")); + return KDPERR_NO_ERROR; + + default: + dprintf(("kdp_writeregs bad flavor %d\n")); + return KDPERR_BADFLAVOR; + } +} + +void +kdp_machine_hostinfo( + kdp_hostinfo_t *hostinfo +) +{ + machine_slot_t m; + int i; + + hostinfo->cpus_mask = 0; + + for (i = 0; i < machine_info.max_cpus; i++) { + m = &machine_slot[i]; + if (!m->is_cpu) + continue; + + hostinfo->cpus_mask |= (1 << i); + if (hostinfo->cpu_type == 0) { + hostinfo->cpu_type = m->cpu_type; + hostinfo->cpu_subtype = m->cpu_subtype; + } + } +} + +void +kdp_panic( + const char *msg +) +{ + printf("kdp panic: %s\n", msg); + while(1) {} +} + + +void +kdp_reboot(void) +{ + halt_all_cpus(TRUE);; +} + +int +kdp_intr_disbl(void) +{ + return (splhigh()); +} + +void +kdp_intr_enbl(int s) +{ + splx(s); +} + +void +kdp_us_spin(int usec) +{ + extern void delay(int); + + delay(usec/100); +} + +void print_saved_state(void *state) +{ + struct ppc_thread_state *saved_state; + + saved_state = state; + + printf("pc = 0x%x\n", saved_state->srr0); + printf("msr = 0x%x\n", saved_state->srr1); + printf("rp = 0x%x\n", saved_state->lr); + printf("sp = 0x%x\n", saved_state->r1); + +} + +void +kdp_call() +{ + Debugger("inline call to debugger(machine_startup)"); +} + +/* + * table to convert system specific code to generic codes for kdb + */ +int kdp_trap_codes[] = { + EXC_BAD_ACCESS, /* 0x0000 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x0100 System reset */ + EXC_BAD_ACCESS, /* 0x0200 Machine check */ + EXC_BAD_ACCESS, /* 0x0300 Data access */ + EXC_BAD_ACCESS, /* 0x0400 Instruction access */ + EXC_BAD_ACCESS, /* 0x0500 External interrupt */ + EXC_BAD_ACCESS, /* 0x0600 Alignment */ + EXC_BREAKPOINT, /* 0x0700 Program - fp exc, ill/priv instr, trap */ + EXC_ARITHMETIC, /* 0x0800 Floating point disabled */ + EXC_SOFTWARE, /* 0x0900 Decrementer */ + EXC_BAD_ACCESS, /* 0x0A00 I/O controller interface */ + EXC_BAD_ACCESS, /* 0x0B00 INVALID EXCEPTION */ + EXC_SOFTWARE, /* 0x0C00 System call exception */ + EXC_BREAKPOINT, /* 0x0D00 Trace */ + EXC_SOFTWARE, /* 0x0E00 FP assist */ + EXC_SOFTWARE, /* 0x0F00 Performance monitoring */ + EXC_ARITHMETIC, /* 0x0F20 Altivec disabled */ + EXC_BAD_ACCESS, /* 0x1000 Instruction PTE miss */ + EXC_BAD_ACCESS, /* 0x1100 Data load PTE miss */ + EXC_BAD_ACCESS, /* 0x1200 Data store PTE miss */ + EXC_BREAKPOINT, /* 0x1300 Instruction bkpt */ + EXC_SOFTWARE, /* 0x1400 System management */ + EXC_BAD_ACCESS, /* 0x1500 INVALID EXCEPTION */ + EXC_ARITHMETIC, /* 0x1600 Altivec Assist */ + EXC_BAD_ACCESS, /* 0x1700 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1800 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1900 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1A00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1B00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1C00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1D00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1E00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x1F00 INVALID EXCEPTION */ + EXC_BREAKPOINT, /* 0x2000 Run Mode/Trace */ + EXC_BAD_ACCESS, /* 0x2100 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2200 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2300 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2400 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2500 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2600 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2700 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2800 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2900 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2A00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2B00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2C00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2D00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2E00 INVALID EXCEPTION */ + EXC_BAD_ACCESS, /* 0x2F00 INVALID EXCEPTION */ + EXC_SOFTWARE /* 0x3000 AST trap (software) */ +}; + +int +kdp_getc() +{ + return(cnmaygetc()); +} + +int kdp_backtrace; +int kdp_sr_dump; +int kdp_dabr; +int kdp_noisy; + +#define kdp_code(x) kdp_trap_codes[((x)==T_AST?0x31:(x)/T_VECTOR_SIZE)] + +void +kdp_trap( + unsigned int exception, + struct ppc_thread_state *saved_state +) +{ + unsigned int *fp; + unsigned int register sp; + struct ppc_thread_state *state; + + if (kdp_noisy) { + if (kdp_backtrace) { + printf("\nvector=%x, \n", exception/4); +#ifdef XXX + regDump(saved_state); +#endif + sp = saved_state->r1; + printf("stack backtrace - sp(%x) ", sp); + fp = (unsigned int *) *((unsigned int *)sp); + while (fp) { + printf("0x%08x ", fp[2]); + fp = (unsigned int *)*fp; + } + printf("\n"); + } +#ifdef XXX + if (kdp_sr_dump) { + dump_segment_registers(); + } +#endif + + printf("vector=%d ", exception/4); + } + + kdp_raise_exception(kdp_code(exception), 0, 0, saved_state); + + if (kdp_noisy) + printf("kdp_trap: kdp_raise_exception() ret\n"); + + if (*((int *)saved_state->srr0) == 0x7c800008) + saved_state->srr0 += 4; /* BKPT_SIZE */ + + if(saved_state->srr1 & (MASK(MSR_SE) | MASK(MSR_BE))) { /* Are we just stepping or continuing */ + db_run_mode = STEP_ONCE; /* We are stepping */ + } + else db_run_mode = STEP_CONTINUE; /* Otherwise we are continuing */ + + +#ifdef XXX + mtspr(dabr, kdp_dabr); +#endif +} + +boolean_t +kdp_call_kdb( + void) +{ + switch_debugger=1; + return(TRUE); +} + +void kdp_print_registers(struct ppc_saved_state *state) +{ + int i; + for (i=0; i<32; i++) { + if ((i % 8) == 0) + printf("\n%4d :",i); + printf(" %08x",*(&state->r0+i)); + } + printf("\n"); + printf("cr = 0x%08x\t\t",state->cr); + printf("xer = 0x%08x\n",state->xer); + printf("lr = 0x%08x\t\t",state->lr); + printf("ctr = 0x%08x\n",state->ctr); + printf("srr0(iar) = 0x%08x\t\t",state->srr0); + printf("srr1(msr) = 0x%08B\n",state->srr1, + "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18" + "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT"); + printf("mq = 0x%08x\t\t",state->mq); + printf("sr_copyin = 0x%08x\n",state->sr_copyin); + printf("\n"); +} + +void +kdp_print_backtrace( + unsigned int exception, + struct ppc_saved_state *saved_state) +{ + extern void kdp_print_registers(struct ppc_saved_state *); + extern void print_backtrace(struct ppc_saved_state *); + extern unsigned int debug_mode, disableDebugOuput; + + disableDebugOuput = FALSE; + debug_mode = TRUE; + printf("re-entering kdp:\n"); + printf("vector=%x, \n", exception/4); + kdp_print_registers(saved_state); + print_backtrace(saved_state); + printf("panic: We are hanging here...\n"); + while(1); +} diff --git a/osfmk/kdp/ml/ppc/kdp_misc.s b/osfmk/kdp/ml/ppc/kdp_misc.s new file mode 100644 index 000000000..7ddbf68c1 --- /dev/null +++ b/osfmk/kdp/ml/ppc/kdp_misc.s @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +.set kLog2CacheLineSize, 5 +.set kCacheLineSize, 32 + +ENTRY(kdp_flush_cache, TAG_NO_FRAME_USED) + cmpi cr0,0,r4,0 /* is this zero length? */ + add r4,r3,r4 /* calculate last byte + 1 */ + subi r4,r4,1 /* calculate last byte */ + + srwi r5,r3,kLog2CacheLineSize /* calc first cache line index */ + srwi r4,r4,kLog2CacheLineSize /* calc last cache line index */ + beq cr0, LdataToCodeDone /* done if zero length */ + + subf r4,r5,r4 /* calc diff (# lines minus 1) */ + addi r4,r4,1 /* # of cache lines to flush */ + slwi r5,r5,kLog2CacheLineSize /* calc addr of first cache line */ + + /* flush the data cache lines */ + mr r3,r5 /* starting address for loop */ + mtctr r4 /* loop count */ +LdataToCodeFlushLoop: + dcbf 0, r3 /* flush the data cache line */ + addi r3,r3,kCacheLineSize /* advance to next cache line */ + bdnz LdataToCodeFlushLoop /* loop until count is zero */ + sync /* wait until RAM is valid */ + + /* invalidate the code cache lines */ + mr r3,r5 /* starting address for loop */ + mtctr r4 /* loop count */ +LdataToCodeInvalidateLoop: + icbi 0, r3 /* invalidate code cache line */ + addi r3,r3,kCacheLineSize /* advance to next cache line */ + bdnz LdataToCodeInvalidateLoop /* loop until count is zero */ + sync /* wait until last icbi completes */ + isync /* discard prefetched instructions */ +LdataToCodeDone: + blr /* return nothing */ + +ENTRY(kdp_sync_cache, TAG_NO_FRAME_USED) + sync /* data sync */ + isync /* inst sync */ + blr /* return nothing */ + +ENTRY(kdp_xlate_off, TAG_NO_FRAME_USED) + mfmsr r3 + rlwinm r4, r3, 0, MSR_DR_BIT+1, MSR_IR_BIT-1 + mtmsr r4 + isync + blr + +ENTRY(kdp_xlate_restore, TAG_NO_FRAME_USED) + mtmsr r3 + isync + blr + diff --git a/osfmk/kdp/ml/ppc/kdp_vm.c b/osfmk/kdp/ml/ppc/kdp_vm.c new file mode 100644 index 000000000..dad44c78c --- /dev/null +++ b/osfmk/kdp/ml/ppc/kdp_vm.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +pmap_t kdp_pmap=0; +boolean_t kdp_trans_off=0; + +unsigned kdp_xlate_off(void); +void kdp_xlate_restore(unsigned); +void kdp_flush_cache(vm_offset_t, unsigned); +vm_offset_t kdp_vtophys(pmap_t pmap, vm_offset_t vaddr); +void kdp_bcopy( unsigned char *, unsigned char *, unsigned); +void kdp_pmemcpy( vm_offset_t , vm_offset_t, unsigned); +unsigned kdp_vm_read( caddr_t, caddr_t, unsigned); +unsigned kdp_vm_write( caddr_t, caddr_t, unsigned); + +extern vm_offset_t kvtophys(vm_offset_t); + + +/* + * + */ +vm_offset_t kdp_vtophys( + pmap_t pmap, + vm_offset_t va) +{ + register mapping *mp; + register vm_offset_t pa; + + pa = LRA(pmap->space,(void *)va); + + if (pa != 0) + return(pa); + + mp = hw_lock_phys_vir(pmap->space, va); + if((unsigned int)mp&1) { + return 0; + } + + if(!mp) { /* If it was not a normal page */ + pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ + return pa; /* Return physical address */ + } + + mp = hw_cpv(mp); + + if(!mp->physent) { + pa = (vm_offset_t)((mp->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); + } else { + pa = (vm_offset_t)((mp->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); + hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); + } + + return(pa); +} + +/* + * + */ +void kdp_bcopy( + unsigned char *src, + unsigned char *dst, + unsigned cnt) +{ + while (cnt--) + *dst++ = *src++; +} + +/* + * + */ +unsigned kdp_vm_read( + caddr_t src, + caddr_t dst, + unsigned len) +{ + vm_offset_t cur_virt_src, cur_virt_dst; + vm_offset_t cur_phys_src; + unsigned resid, cnt; + unsigned msr; + +#ifdef KDP_VM_READ_DEBUG + kprintf("kdp_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]); +#endif + if (kdp_trans_off) { + cur_virt_src = (vm_offset_t) ((int)src & 0x0fffffff); + cur_virt_dst = (vm_offset_t)dst; + resid = len; + + while (resid != 0) { + cur_phys_src = cur_virt_src; + cnt = ((cur_virt_src + NBPG) & (-NBPG)) - cur_virt_src; + if (cnt > resid) cnt = resid; + msr = kdp_xlate_off(); + kdp_bcopy((unsigned char *)cur_phys_src, + (unsigned char *)cur_virt_dst, cnt); + kdp_xlate_restore(msr); + cur_virt_src +=cnt; + cur_virt_dst +=cnt; + resid -= cnt; + } + } else { + cur_virt_src = (vm_offset_t)src; + cur_virt_dst = (vm_offset_t)dst; + resid = len; + + while (resid != 0) { + if (kdp_pmap) { + if ((cur_phys_src = + kdp_vtophys(kdp_pmap,trunc_page(cur_virt_src))) == 0) + goto exit; + cur_phys_src += (cur_virt_src & PAGE_MASK); + } else { + if ((cur_phys_src = kdp_vtophys(kernel_pmap,cur_virt_src)) == 0) + goto exit; + } + + cnt = ((cur_virt_src + NBPG) & (-NBPG)) - cur_virt_src; + if (cnt > resid) cnt = resid; + if (kdp_pmap) { +#ifdef KDP_VM_READ_DEBUG + kprintf("kdp_vm_read2: pmap %x, virt %x, phys %x\n", + kdp_pmap, cur_virt_src, cur_phys_src); +#endif + msr = kdp_xlate_off(); + kdp_bcopy((unsigned char *)cur_phys_src, + (unsigned char *)cur_virt_dst, cnt); + kdp_xlate_restore(msr); + } else { + kdp_bcopy((unsigned char *)cur_virt_src, + (unsigned char *)cur_virt_dst, cnt); + } + cur_virt_src +=cnt; + cur_virt_dst +=cnt; + resid -= cnt; + } + } +exit: +#ifdef KDP_VM_READ_DEBUG + kprintf("kdp_vm_read: ret %08X\n", len-resid); +#endif + return (len-resid); +} + +/* + * + */ +unsigned kdp_vm_write( + caddr_t src, + caddr_t dst, + unsigned len) +{ + vm_offset_t cur_virt_src, cur_virt_dst; + vm_offset_t cur_phys_src, cur_phys_dst; + unsigned resid, cnt, cnt_src, cnt_dst; + unsigned msr; + +#ifdef KDP_VM_WRITE_DEBUG + printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *)src)[0], ((unsigned long *)src)[1]); +#endif + + cur_virt_src = (vm_offset_t)src; + cur_virt_dst = (vm_offset_t)dst; + resid = len; + + while (resid != 0) { + if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) + goto exit; + if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) + goto exit; + + cnt_src = ((cur_phys_src + NBPG) & (-NBPG)) - cur_phys_src; + cnt_dst = ((cur_phys_dst + NBPG) & (-NBPG)) - cur_phys_dst; + + if (cnt_src > cnt_dst) + cnt = cnt_dst; + else + cnt = cnt_src; + if (cnt > resid) + cnt = resid; + + msr = kdp_xlate_off(); + kdp_bcopy((unsigned char *)cur_virt_src, (unsigned char *)cur_phys_dst, cnt); + kdp_flush_cache(cur_phys_dst, cnt); + kdp_xlate_restore(msr); + + cur_virt_src +=cnt; + cur_virt_dst +=cnt; + resid -= cnt; + } +exit: + return (len-resid); +} + diff --git a/osfmk/kdp/pe/POWERMAC/kdp_mace.c b/osfmk/kdp/pe/POWERMAC/kdp_mace.c new file mode 100644 index 000000000..542f1257f --- /dev/null +++ b/osfmk/kdp/pe/POWERMAC/kdp_mace.c @@ -0,0 +1,672 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1997 Apple Computer, Inc. + * + * ethernet driver for mace on-board ethernet + * + * HISTORY + * + * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 + * - ripped off code from MK/LINUX, turned it into a polled-mode + * driver for the PCI (8500) class machines + * + * Dieter Siegmund (dieter@next.com) Fri Mar 21 12:41:29 PST 1997 + * - reworked to support a BSD-style interface, and to support kdb polled + * interface and interrupt-driven interface concurrently + * + * Justin Walker (justin@apple.com) Tue May 20 10:29:29 PDT 1997 + * - Added multicast support + * + * Dieter Siegmund (dieter@next.com) Thu May 29 15:02:29 PDT 1997 + * - fixed problem with sending arp packets for ip address 0.0.0.0 + * - use kdp_register_send_receive() instead of defining + * en_send_pkt/en_recv_pkt routines to avoid name space + * collisions with IOEthernetDebugger and allow these routines to be + * overridden by a driverkit-style driver + * + * Dieter Siegmund (dieter@apple.com) Tue Jun 24 18:29:15 PDT 1997 + * - don't let the adapter auto-strip 802.3 receive frames, it messes + * up the frame size logic + * + * Dieter Siegmund (dieter@apple.com) Tue Aug 5 16:24:52 PDT 1997 + * - handle multicast address deletion correctly + */ +#ifdef MACE_DEBUG +/* + * Caveat: MACE_DEBUG delimits some code that is getting kind of + * stale. Before blindly turning on MACE_DEBUG for your + * testing, take a look at the code enabled by it to check + * that it is reasonably sane. + */ +#endif + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "kdp_mace.h" + +struct kdp_mace_copy_desc { + int * len; + char * data; +}; +static mace_t mace; + +#define MACE_DMA_AREA_SIZE \ + (ETHER_RX_NUM_DBDMA_BUFS * ETHERNET_BUF_SIZE + PG_SIZE) +static unsigned long + mace_rx_dma_area[(MACE_DMA_AREA_SIZE + + sizeof(long))/sizeof(long)]; +static unsigned long + mace_tx_dma_area[(ETHERNET_BUF_SIZE + PG_SIZE + + sizeof(long))/sizeof(long)]; + +#ifdef MACE_DEBUG +static unsigned char testBuffer[PG_SIZE * 4]; +static unsigned char testMsg[] = "mace ethernet interface test"; +#endif + +static void polled_send_pkt(char * data, int len); +static void polled_receive_pkt(char *data, int *len, int timeout_ms); + +void kdp_mace_reset(mace_t *); +void kdp_mace_geteh(unsigned char *); +void kdp_mace_setup_dbdma(void); +boolean_t kdp_mace_init(void * baseAddresses[3], unsigned char * netAddr); +#ifdef MACE_DEBUG +static void printContiguousEtherPacket(u_char *, int); +static void send_test_packet(void); +#endif + +typedef int (*funcptr)(char *, int, void *); +int kdp_mace_recv_pkt(funcptr , void *); + +#ifdef MACE_DEBUG +static int +macAddrsEqual(unsigned char * one, unsigned char * two) +{ + int i; + + for (i = 0; i < NUM_EN_ADDR_BYTES; i++) + if (*one++ != *two++) + return 0; + return 1; +} + +static __inline__ int +isprint(unsigned char c) +{ + return (c >= 0x20 && c <= 0x7e); +} + +static void +printEtherHeader(enet_addr_t * dh, enet_addr_t * sh, u_short etype) +{ + u_char * dhost = dh->ether_addr_octet; + u_char * shost = sh->ether_addr_octet; + + printf("Dst: %x:%x:%x:%x:%x:%x Src: %x:%x:%x:%x:%x:%x Type: 0x%x\n", + dhost[0], dhost[1], dhost[2], dhost[3], dhost[4], dhost[5], + shost[0], shost[1], shost[2], shost[3], shost[4], shost[5], + etype); +} + +static void +printData(u_char * data_p, int n_bytes) +{ +#define CHARS_PER_LINE 16 + char line_buf[CHARS_PER_LINE + 1]; + int line_pos; + int offset; + + for (line_pos = 0, offset = 0; offset < n_bytes; offset++, data_p++) { + if (line_pos == 0) { + printf("%04d ", offset); + } + + line_buf[line_pos] = isprint(*data_p) ? *data_p : '.'; + printf(" %02x", *data_p); + line_pos++; + if (line_pos == CHARS_PER_LINE) { + line_buf[CHARS_PER_LINE] = '\0'; + printf(" %s\n", line_buf); + line_pos = 0; + } + } + if (line_pos) { /* need to finish up the line */ + for (; line_pos < CHARS_PER_LINE; line_pos++) { + printf(" "); + line_buf[line_pos] = ' '; + } + line_buf[CHARS_PER_LINE] = '\0'; + printf(" %s\n", line_buf); + } +} + +static void +printEtherPacket(enet_addr_t * dhost, enet_addr_t * shost, u_short type, + u_char * data_p, int n_bytes) +{ + printEtherHeader(dhost, shost, type); + printData(data_p, n_bytes); +} + +static void +printContiguousEtherPacket(u_char * data_p, int n_bytes) +{ + printEtherPacket((enet_addr_t *)data_p, + (enet_addr_t *)(data_p + NUM_EN_ADDR_BYTES), + *((u_short *)(data_p + (NUM_EN_ADDR_BYTES * 2))), + data_p, n_bytes); +} +#endif + + +/* + * kdp_mace_reset + * + * Reset the board.. + */ +void +kdp_mace_reset(mace_t * m) +{ + dbdma_reset(m->rv_dbdma); + dbdma_reset(m->tx_dbdma); +} + + +/* + * kdp_mace_geteh: + * + * This function gets the ethernet address (array of 6 unsigned + * bytes) from the MACE board registers. + * + */ +void +kdp_mace_geteh(unsigned char *ep) +{ + int i; + unsigned char ep_temp; + + mace.ereg->iac = IAC_PHYADDR; eieio(); + + for (i = 0; i < ETHER_ADD_SIZE; i++) { + ep_temp = mace.ereg->padr; eieio(); + *ep++ = ep_temp; + } +} + +/* + * mace_seteh: + * + * This function sets the ethernet address (array of 6 unsigned + * bytes) on the MACE board. + */ +static void +mace_seteh(unsigned char *ep) +{ + int i; + unsigned char status; + + if (mace.chip_id != MACE_REVISION_A2) { + mace.ereg->iac = IAC_ADDRCHG|IAC_PHYADDR; eieio(); + + while ((status = mace.ereg->iac)) { + if ((status & IAC_ADDRCHG) == 0) { + eieio(); + break; + } + eieio(); + } + } + else { + /* start to load the address.. */ + mace.ereg->iac = IAC_PHYADDR; eieio(); + } + + for (i = 0; i < NUM_EN_ADDR_BYTES; i++) { + mace.ereg->padr = *(ep+i); eieio(); + } + return; +} + +/* + * kdp_mace_setup_dbdma + * + * Setup various dbdma pointers. + */ +void +kdp_mace_setup_dbdma() +{ + mace_t * m = &mace; + int i; + dbdma_command_t * d; + vm_offset_t address; + dbdma_regmap_t * regmap; + +#define ALIGN_MASK 0xfffffffcUL + if (m->rv_dma_area == 0) { + m->rv_dma_area = (unsigned char *) + ((((unsigned long)mace_rx_dma_area) + 3) & ALIGN_MASK); + m->rv_dma = dbdma_alloc(ETHER_RX_NUM_DBDMA_BUFS + 2); + m->tx_dma = dbdma_alloc(TX_NUM_DBDMA); + m->tx_dma_area = (unsigned char *) + ((((unsigned long)mace_tx_dma_area) + 3) & ALIGN_MASK); + } + + /* set up a ring of buffers */ + d = m->rv_dma; + for (i = 0; i < ETHER_RX_NUM_DBDMA_BUFS; i++, d++) { + address = (vm_offset_t) kvtophys((vm_offset_t)&m->rv_dma_area[i*ETHERNET_BUF_SIZE]); + DBDMA_BUILD(d, DBDMA_CMD_IN_LAST, 0, ETHERNET_BUF_SIZE, + address, DBDMA_INT_ALWAYS, + DBDMA_WAIT_NEVER, + DBDMA_BRANCH_NEVER); + } + + /* stop when we hit the end of the list */ + DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, DBDMA_INT_ALWAYS, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + + /* branch to command at "address" ie. element 0 of the "array" */ + DBDMA_BUILD(d, DBDMA_CMD_NOP, 0, 0, 0, DBDMA_INT_NEVER, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_ALWAYS); + address = (vm_offset_t) kvtophys((vm_offset_t)m->rv_dma); + dbdma_st4_endian(&d->d_cmddep, address); + + m->rv_head = 0; + m->rv_tail = ETHER_RX_NUM_DBDMA_BUFS; /* always contains DBDMA_CMD_STOP */ + regmap = m->rv_dbdma; + + /* stop/init/restart dma channel */ + dbdma_reset(regmap); + dbdma_reset(m->tx_dbdma); + + /* Set the wait value.. */ + dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x00)); + + /* Set the tx wait value */ + regmap = m->tx_dbdma; + dbdma_st4_endian(®map->d_wait, DBDMA_SET_CNTRL(0x20)); + + flush_dcache((vm_offset_t)m->rv_dma, + sizeof(dbdma_command_t) * (ETHER_RX_NUM_DBDMA_BUFS + 2), + FALSE); + /* start receiving */ + dbdma_start(m->rv_dbdma, m->rv_dma); +} + +#ifdef MACE_DEBUG +static void +send_test_packet() +{ + unsigned char * tp; + + bzero((char *)testBuffer, sizeof(testBuffer)); + + tp = testBuffer; + + /* send self-addressed packet */ + bcopy((char *)&mace.macaddr[0], (char *)tp, NUM_EN_ADDR_BYTES); + tp += NUM_EN_ADDR_BYTES; + bcopy((char *)&mace.macaddr[0], (char *)tp, NUM_EN_ADDR_BYTES); + tp += NUM_EN_ADDR_BYTES; + *tp++ = 0; + *tp++ = 0; + bcopy((char *)testMsg, (char *)tp, sizeof(testMsg)); + polled_send_pkt((char *)testBuffer, 80); + return; +} +#endif + +/* + * Function: kdp_mace_init + * + * Purpose: + * Called early on, initializes the adapter and readies it for + * kdb kernel debugging. + */ +boolean_t +kdp_mace_init(void * baseAddresses[3], unsigned char * netAddr) +{ + unsigned char status; + mace_t * m = &mace; + struct mace_board * ereg; + int mpc = 0; + int i; + + bzero((char *)&mace, sizeof(mace)); + + /* get the ethernet registers' mapped address */ + ereg = m->ereg + = (struct mace_board *) baseAddresses[0]; + m->tx_dbdma = (dbdma_regmap_t *) baseAddresses[1]; + m->rv_dbdma = (dbdma_regmap_t *) baseAddresses[2]; + + for (i = 0; i < NUM_EN_ADDR_BYTES; i++) + m->macaddr[i] = netAddr[i]; + + /* Reset the board & AMIC.. */ + kdp_mace_reset(m); + + /* grab the MACE chip rev */ + m->chip_id = (ereg->chipid2 << 8 | ereg->chipid1); + + /* don't auto-strip for 802.3 */ + m->ereg->rcvfc &= ~(RCVFC_ASTRPRCV); + + /* set the ethernet address */ + mace_seteh(mace.macaddr); + { + unsigned char macaddr[NUM_EN_ADDR_BYTES]; + kdp_mace_geteh(macaddr); + printf("mace ethernet [%02x:%02x:%02x:%02x:%02x:%02x]\n", + macaddr[0], macaddr[1], macaddr[2], + macaddr[3], macaddr[4], macaddr[5]); + } + + /* Now clear the Multicast filter */ + if (m->chip_id != MACE_REVISION_A2) { + ereg->iac = IAC_ADDRCHG|IAC_LOGADDR; eieio(); + + while ((status = ereg->iac)) { + if ((status & IAC_ADDRCHG) == 0) + break; + eieio(); + } + eieio(); + } + else { + ereg->iac = IAC_LOGADDR; eieio(); + } + { + int i; + + for (i=0; i < 8; i++) + { ereg->ladrf = 0; + eieio(); + } + } + + /* register interrupt routines */ + kdp_mace_setup_dbdma(); + + /* Start the chip... */ + m->ereg->maccc = MACCC_ENXMT|MACCC_ENRCV; eieio(); + { + volatile char ch = mace.ereg->ir; eieio(); + } + + delay(500); /* paranoia */ + mace.ereg->imr = 0xfe; eieio(); + + /* register our debugger routines */ + kdp_register_send_receive((kdp_send_t)polled_send_pkt, + (kdp_receive_t)polled_receive_pkt); + +#ifdef MACE_DEBUG + printf("Testing 1 2 3\n"); + send_test_packet(); + printf("Testing 1 2 3\n"); + send_test_packet(); + printf("Testing 1 2 3\n"); + send_test_packet(); + do { + static unsigned char buf[ETHERNET_BUF_SIZE]; + int len; + int nmpc = mace.ereg->mpc; eieio(); + + if (nmpc > mpc) { + mpc = nmpc; + printf("mpc %d\n", mpc); + } + polled_receive_pkt((char *)buf, &len, 100); + if (len > 0) { + printf("rx %d\n", len); + printContiguousEtherPacket(buf, len); + } + } while(1); +#endif + + return TRUE; +} + +#ifdef MACE_DEBUG +static void +kdp_mace_txstatus(char * msg) +{ + dbdma_regmap_t * dmap = mace.tx_dbdma; + volatile unsigned long status; + volatile unsigned long intr; + volatile unsigned long branch; + volatile unsigned long wait; + + status = dbdma_ld4_endian(&dmap->d_status); eieio(); + intr = dbdma_ld4_endian(&dmap->d_intselect); eieio(); + branch = dbdma_ld4_endian(&dmap->d_branch); eieio(); + wait = dbdma_ld4_endian(&dmap->d_wait); eieio(); + printf("(%s s=0x%x i=0x%x b=0x%x w=0x%x)", msg, status, intr, branch, + wait); + return; +} +#endif + +static void +kdp_mace_tx_dbdma(char * data, int len) +{ + unsigned long count; + dbdma_command_t * d; + unsigned long page; + + d = mace.tx_dma; + page = ((unsigned long) data) & PG_MASK; + if ((page + len) <= PG_SIZE) { /* one piece dma */ + DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, + len, + (vm_offset_t) kvtophys((vm_offset_t) data), + DBDMA_INT_NEVER, + DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); + } + else { /* two piece dma */ + count = PG_SIZE - page; + DBDMA_BUILD(d, DBDMA_CMD_OUT_MORE, DBDMA_KEY_STREAM0, + count, + (vm_offset_t)kvtophys((vm_offset_t) data), + DBDMA_INT_NEVER, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + DBDMA_BUILD(d, DBDMA_CMD_OUT_LAST, DBDMA_KEY_STREAM0, + len - count, (vm_offset_t) + kvtophys((vm_offset_t)((unsigned char *)data + count)), + DBDMA_INT_NEVER, + DBDMA_WAIT_IF_FALSE, DBDMA_BRANCH_NEVER); + } + d++; + DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, + 1, kvtophys((vm_offset_t) &mace.ereg->xmtfs),DBDMA_INT_NEVER, +// 1, &mace.ereg->xmtfs,DBDMA_INT_NEVER, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + DBDMA_BUILD(d, DBDMA_CMD_LOAD_QUAD, DBDMA_KEY_SYSTEM, + 1, kvtophys((vm_offset_t) &mace.ereg->ir), DBDMA_INT_ALWAYS, +// 1, &mace.ereg->ir, DBDMA_INT_ALWAYS, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + d++; + DBDMA_BUILD(d, DBDMA_CMD_STOP, 0, 0, 0, 0, 0, 0); + + flush_dcache((vm_offset_t)mace.tx_dma, + sizeof(dbdma_command_t) * TX_NUM_DBDMA, + FALSE); + dbdma_start(mace.tx_dbdma, mace.tx_dma); + return; + +} + +static void +waitForDBDMADone(char * msg) +{ + { + /* wait for tx dma completion */ + dbdma_regmap_t * dmap = mace.tx_dbdma; + int i; + volatile unsigned long val; + + i = 0; + do { + val = dbdma_ld4_endian(&dmap->d_status); eieio(); + delay(50); + i++; + } while ((i < 100000) && (val & DBDMA_CNTRL_ACTIVE)); + if (i == 100000) + printf("mace(%s): kdp_mace_tx_dbdma poll timed out 0x%x", msg, val); + } +} + +int +kdp_mace_recv_pkt(funcptr pktfunc, void * p) +{ + vm_offset_t address; + struct mace_board * board; + long bytes; + int done = 0; + int doContinue = 0; + mace_t * m; + unsigned long resid; + unsigned short status; + int tail; + + m = &mace; + board = m->ereg; + + /* remember where the tail was */ + tail = m->rv_tail; + for (done = 0; (done == 0) && (m->rv_head != tail);) { + dbdma_command_t * dmaHead; + + dmaHead = &m->rv_dma[m->rv_head]; + resid = dbdma_ld4_endian(&dmaHead->d_status_resid); + status = (resid >> 16); + bytes = resid & 0xffff; + bytes = ETHERNET_BUF_SIZE - bytes - 8; /* strip off FCS/CRC */ + + if ((status & DBDMA_ETHERNET_EOP) == 0) { + /* no packets are ready yet */ + break; + } + doContinue = 1; + /* if the packet is good, pass it up */ + if (bytes >= (ETHER_MIN_PACKET - 4)) { + char * dmaPacket; + dmaPacket = (char *)&m->rv_dma_area[m->rv_head * ETHERNET_BUF_SIZE]; + done = (*pktfunc)(dmaPacket, bytes, p); + } + /* mark the head as the new tail in the dma channel command list */ + DBDMA_BUILD(dmaHead, DBDMA_CMD_STOP, 0, 0, 0, DBDMA_INT_ALWAYS, + DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER); + flush_dcache((vm_offset_t)dmaHead, + sizeof(*dmaHead), + FALSE); + eieio(); + + /* make the tail an available dma'able entry */ + { + dbdma_command_t * dmaTail; + dmaTail = &m->rv_dma[m->rv_tail]; + address = kvtophys((vm_offset_t) + &m->rv_dma_area[m->rv_tail*ETHERNET_BUF_SIZE]); + // this command is live so write it carefully + DBDMA_ST4_ENDIAN(&dmaTail->d_address, address); + dmaTail->d_status_resid = 0; + dmaTail->d_cmddep = 0; + eieio(); + DBDMA_ST4_ENDIAN(&dmaTail->d_cmd_count, + ((DBDMA_CMD_IN_LAST) << 28) | ((0) << 24) | + ((DBDMA_INT_ALWAYS) << 20) | + ((DBDMA_BRANCH_NEVER) << 18) | ((DBDMA_WAIT_NEVER) << 16) | + (ETHERNET_BUF_SIZE)); + eieio(); + flush_dcache((vm_offset_t)dmaTail, + sizeof(*dmaTail), + FALSE); + } + /* head becomes the tail */ + m->rv_tail = m->rv_head; + + /* advance the head */ + m->rv_head++; + if (m->rv_head == (ETHER_RX_NUM_DBDMA_BUFS + 1)) + m->rv_head = 0; + } + if (doContinue) { + sync(); + dbdma_continue(m->rv_dbdma); + } + return (done); +} + +static int +kdp_mace_copy(char * pktBuf, int len, void * p) +{ + struct kdp_mace_copy_desc * cp = (struct kdp_mace_copy_desc *)p; + + bcopy((char *)pktBuf, (char *)cp->data, len); + *cp->len = len; + return (1); /* signal that we're done */ +} + +/* kdb debugger routines */ +static void +polled_send_pkt(char * data, int len) +{ + waitForDBDMADone("mace: polled_send_pkt start"); + kdp_mace_tx_dbdma(data, len); + waitForDBDMADone("mace: polled_send_pkt end"); + return; +} + +static void +polled_receive_pkt(char *data, int *len, int timeout_ms) +{ + struct kdp_mace_copy_desc cp; + + cp.len = len; + cp.data = data; + + timeout_ms *= 1000; + *len = 0; + while (kdp_mace_recv_pkt(kdp_mace_copy, (void *)&cp) == 0) { + if (timeout_ms <= 0) + break; + delay(50); + timeout_ms -= 50; + } + return; +} diff --git a/osfmk/kdp/pe/POWERMAC/kdp_mace.h b/osfmk/kdp/pe/POWERMAC/kdp_mace.h new file mode 100644 index 000000000..f6e978bff --- /dev/null +++ b/osfmk/kdp/pe/POWERMAC/kdp_mace.h @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * MKLINUX-1.0DR2 + */ +/* + * PMach Operating System + * Copyright (c) 1995 Santa Clara University + * All Rights Reserved. + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: if_3c501.h + * Author: Philippe Bernadat + * Date: 1989 + * Copyright (c) 1989 OSF Research Institute + * + * 3COM Etherlink 3C501 Mach Ethernet drvier + */ +/* + Copyright 1990 by Open Software Foundation, +Cambridge, MA. + + All Rights Reserved + + Permission to use, copy, modify, and distribute this software and +its documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appears in all copies and +that both the copyright notice and this permission notice appear in +supporting documentation, and that the name of OSF or Open Software +Foundation not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + + OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, +IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, +NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION +WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + + + +#define ENETPAD(n) char n[15] + +/* 0x50f0a000 */ +struct mace_board { + volatile unsigned char rcvfifo; /* 00 receive fifo */ + ENETPAD(epad0); + volatile unsigned char xmtfifo; /* 01 transmit fifo */ + ENETPAD(epad1); + volatile unsigned char xmtfc; /* 02 transmit frame control */ + ENETPAD(epad2); + volatile unsigned char xmtfs; /* 03 transmit frame status */ + ENETPAD(epad3); + volatile unsigned char xmtrc; /* 04 transmit retry count */ + ENETPAD(epad4); + volatile unsigned char rcvfc; /* 05 receive frame control -- 4 bytes */ + ENETPAD(epad5); + volatile unsigned char rcvfs; /* 06 receive frame status */ + ENETPAD(epad6); + volatile unsigned char fifofc; /* 07 fifo frame count */ + ENETPAD(epad7); + volatile unsigned char ir; /* 08 interrupt */ + ENETPAD(epad8); + volatile unsigned char imr; /* 09 interrupt mask */ + ENETPAD(epad9); + volatile unsigned char pr; /* 10 poll */ + ENETPAD(epad10); + volatile unsigned char biucc; /* 11 bus interface unit configuration control */ + ENETPAD(epad11); + volatile unsigned char fifocc; /* 12 fifo configuration control */ + ENETPAD(epad12); + volatile unsigned char maccc; /* 13 media access control configuration control */ + ENETPAD(epad13); + volatile unsigned char plscc; /* 14 physical layer signalling configuration control */ + ENETPAD(epad14); + volatile unsigned char phycc; /* 15 physical layer configuration control */ + ENETPAD(epad15); + volatile unsigned char chipid1; /* 16 chip identification LSB */ + ENETPAD(epad16); + volatile unsigned char chipid2; /* 17 chip identification MSB */ + ENETPAD(epad17); + volatile unsigned char iac; /* 18 internal address configuration */ + ENETPAD(epad18); + volatile unsigned char res1; /* 19 */ + ENETPAD(epad19); + volatile unsigned char ladrf; /* 20 logical address filter -- 8 bytes */ + ENETPAD(epad20); + volatile unsigned char padr; /* 21 physical address -- 6 bytes */ + ENETPAD(epad21); + volatile unsigned char res2; /* 22 */ + ENETPAD(epad22); + volatile unsigned char res3; /* 23 */ + ENETPAD(epad23); + volatile unsigned char mpc; /* 24 missed packet count */ + ENETPAD(epad24); + volatile unsigned char res4; /* 25 */ + ENETPAD(epad25); + volatile unsigned char rntpc; /* 26 runt packet count */ + ENETPAD(epad26); + volatile unsigned char rcvcc; /* 27 receive collision count */ + ENETPAD(epad27); + volatile unsigned char res5; /* 28 */ + ENETPAD(epad28); + volatile unsigned char utr; /* 29 user test */ + ENETPAD(epad29); + volatile unsigned char res6; /* 30 */ + ENETPAD(epad30); + volatile unsigned char res7; /* 31 */ + }; + +/* + * Chip Revisions.. + */ + +#define MACE_REVISION_B0 0x0940 +#define MACE_REVISION_A2 0x0941 + +/* xmtfc */ +#define XMTFC_DRTRY 0X80 +#define XMTFC_DXMTFCS 0x08 +#define XMTFC_APADXNT 0x01 + +/* xmtfs */ +#define XMTFS_XNTSV 0x80 +#define XMTFS_XMTFS 0x40 +#define XMTFS_LCOL 0x20 +#define XMTFS_MORE 0x10 +#define XMTFS_ONE 0x08 +#define XMTFS_DEFER 0x04 +#define XMTFS_LCAR 0x02 +#define XMTFS_RTRY 0x01 + +/* xmtrc */ +#define XMTRC_EXDEF 0x80 + +/* rcvfc */ +#define RCVFC_LLRCV 0x08 +#define RCVFC_M_R 0x04 +#define RCVFC_ASTRPRCV 0x01 + +/* rcvfs */ +#define RCVFS_OFLO 0x80 +#define RCVFS_CLSN 0x40 +#define RCVFS_FRAM 0x20 +#define RCVFS_FCS 0x10 +#define RCVFS_REVCNT 0x0f + +/* fifofc */ +#define FIFOCC_XFW_8 0x00 +#define FIFOCC_XFW_16 0x40 +#define FIFOCC_XFW_32 0x80 +#define FIFOCC_XFW_XX 0xc0 +#define FIFOCC_RFW_16 0x00 +#define FIFOCC_RFW_32 0x10 +#define FIFOCC_RFW_64 0x20 +#define FIFOCC_RFW_XX 0x30 +#define FIFOCC_XFWU 0x08 +#define FIFOCC_RFWU 0x04 +#define FIFOCC_XBRST 0x02 +#define FIFOCC_RBRST 0x01 + + +/* ir */ +#define IR_JAB 0x80 +#define IR_BABL 0x40 +#define IR_CERR 0x20 +#define IR_RCVCCO 0x10 +#define IR_RNTPCO 0x08 +#define IR_MPCO 0x04 +#define IR_RCVINT 0x02 +#define IR_XMTINT 0x01 + +/* imr */ +#define IMR_MJAB 0x80 +#define IMR_MBABL 0x40 +#define IMR_MCERR 0x20 +#define IMR_MRCVCCO 0x10 +#define IMR_MRNTPCO 0x08 +#define IMR_MMPCO 0x04 +#define IMR_MRCVINT 0x02 +#define IMR_MXMTINT 0x01 + +/* pr */ +#define PR_XMTSV 0x80 +#define PR_TDTREQ 0x40 +#define PR_RDTREQ 0x20 + +/* biucc */ +#define BIUCC_BSWP 0x40 +#define BIUCC_XMTSP04 0x00 +#define BIUCC_XMTSP16 0x10 +#define BIUCC_XMTSP64 0x20 +#define BIUCC_XMTSP112 0x30 +#define BIUCC_SWRST 0x01 + +/* fifocc */ +#define FIFOCC_XMTFW08W 0x00 +#define FIFOCC_XMTFW16W 0x40 +#define FIFOCC_XMTFW32W 0x80 + +#define FIFOCC_RCVFW16 0x00 +#define FIFOCC_RCVFW32 0x10 +#define FIFOCC_RCVFW64 0x20 + +#define FIFOCC_XMTFWU 0x08 +#define FIFOCC_RCVFWU 0x04 +#define FIFOCC_XMTBRST 0x02 +#define FIFOCC_RCVBRST 0x01 + +/* maccc */ +#define MACCC_PROM 0x80 +#define MACCC_DXMT2PD 0x40 +#define MACCC_EMBA 0x20 +#define MACCC_DRCVPA 0x08 +#define MACCC_DRCVBC 0x04 +#define MACCC_ENXMT 0x02 +#define MACCC_ENRCV 0x01 + +/* plscc */ +#define PLSCC_XMTSEL 0x08 +#define PLSCC_AUI 0x00 +#define PLSCC_TENBASE 0x02 +#define PLSCC_DAI 0x04 +#define PLSCC_GPSI 0x06 +#define PLSCC_ENPLSIO 0x01 + +/* phycc */ +#define PHYCC_LNKFL 0x80 +#define PHYCC_DLNKTST 0x40 +#define PHYCC_REVPOL 0x20 +#define PHYCC_DAPC 0x10 +#define PHYCC_LRT 0x08 +#define PHYCC_ASEL 0x04 +#define PHYCC_RWAKE 0x02 +#define PHYCC_AWAKE 0x01 + +/* iac */ +#define IAC_ADDRCHG 0x80 +#define IAC_PHYADDR 0x04 +#define IAC_LOGADDR 0x02 + +/* utr */ +#define UTR_RTRE 0x80 +#define UTR_RTRD 0x40 +#define UTR_RPA 0x20 +#define UTR_FCOLL 0x10 +#define UTR_RCVFCSE 0x08 + +#define UTR_NOLOOP 0x00 +#define UTR_EXTLOOP 0x02 +#define UTR_INLOOP 0x04 +#define UTR_INLOOP_M 0x06 + +#define ENET_PHYADDR_LEN 6 +#define ENET_HEADER 14 + +#define BFRSIZ 2048 +#define ETHER_ADD_SIZE 6 /* size of a MAC address */ +#define DSF_LOCK 1 +#define DSF_RUNNING 2 +#define MOD_ENAL 1 +#define MOD_PROM 2 + +/* + * MACE Chip revision codes + */ +#define MACERevA2 0x0941 +#define MACERevB0 0x0940 + +/* + * Defines and device state + * Dieter Siegmund (dieter@next.com) Thu Feb 27 18:25:33 PST 1997 + */ + +#define PG_SIZE 0x1000UL +#define PG_MASK (PG_SIZE - 1UL) + +#define ETHERMTU 1500 +#define ETHER_RX_NUM_DBDMA_BUFS 32 +#define ETHERNET_BUF_SIZE (ETHERMTU + 36) +#define ETHER_MIN_PACKET 64 +#define TX_NUM_DBDMA 6 +#define NUM_EN_ADDR_BYTES 6 + +#define DBDMA_ETHERNET_EOP 0x40 + +typedef struct mace_s { + struct mace_board * ereg; /* ethernet register set address */ + dbdma_regmap_t * tx_dbdma; + dbdma_regmap_t * rv_dbdma; + unsigned char macaddr[NUM_EN_ADDR_BYTES]; /* mac address */ + int chip_id; + dbdma_command_t *rv_dma; + dbdma_command_t *tx_dma; + unsigned char *rv_dma_area; + unsigned char *tx_dma_area; + int rv_tail; + int rv_head; +} mace_t; + + diff --git a/osfmk/kern/Makefile b/osfmk/kern/Makefile new file mode 100644 index 000000000..7c9e1f38f --- /dev/null +++ b/osfmk/kern/Makefile @@ -0,0 +1,50 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = + +EXPORT_ONLY_FILES = \ + assert.h \ + clock.h \ + cpu_number.h \ + cpu_data.h \ + debug.h \ + ipc_mig.h \ + kalloc.h \ + kern_types.h \ + lock.h \ + host.h \ + mach_param.h \ + macro_help.h \ + processor.h \ + queue.h \ + sched_prim.h \ + simple_lock.h \ + simple_lock_types.h \ + sync_lock.h \ + task.h \ + thread.h \ + thread_act.h \ + thread_call.h \ + time_out.h \ + wait_queue.h \ + zalloc.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = kern + +EXPORT_MI_LIST = ${DATAFILES} ${EXPORT_ONLY_FILES} + +EXPORT_MI_DIR = kern + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/kern/assert.h b/osfmk/kern/assert.h new file mode 100644 index 000000000..b7c9317e7 --- /dev/null +++ b/osfmk/kern/assert.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_ASSERT_H_ +#define _KERN_ASSERT_H_ + +/* assert.h 4.2 85/01/21 */ + +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#endif + +/* Assert error */ +extern void Assert( + const char *file, + int line, + const char *expression); + +#if MACH_ASSERT + +#define assert(ex) \ +MACRO_BEGIN \ + if (!(ex)) \ + Assert(__FILE__, __LINE__, # ex); \ +MACRO_END +#define assert_static(x) assert(x) + +#else /* MACH_ASSERT */ + +#define assert(ex) ((void)0) +#define assert_static(ex) + +#endif /* MACH_ASSERT */ + +#endif /* _KERN_ASSERT_H_ */ diff --git a/osfmk/kern/ast.c b/osfmk/kern/ast.c new file mode 100644 index 000000000..f454fe9a3 --- /dev/null +++ b/osfmk/kern/ast.c @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * + * This file contains routines to check whether an ast is needed. + * + * ast_check() - check whether ast is needed for interrupt or context + * switch. Usually called by clock interrupt handler. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if TASK_SWAPPER +#include +#endif /* TASK_SWAPPER */ + +volatile ast_t need_ast[NCPUS]; + +void +ast_init(void) +{ +#ifndef MACHINE_AST + register int i; + + for (i=0; itop_act; +#ifdef MACH_BSD + extern void bsd_ast(thread_act_t); + extern void bsdinit_task(void); +#endif + + mp_disable_preemption(); + mycpu = cpu_number(); + reasons = need_ast[mycpu] & mask; + need_ast[mycpu] &= ~reasons; + mp_enable_preemption(); + + ml_set_interrupts_enabled(interrupt); + + /* + * No ast for an idle thread + */ + if (self->state & TH_IDLE) + return; + + /* + * Check for preemption + */ + if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) { + reasons &= ~AST_URGENT; + if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) { + mp_disable_preemption(); + mypr = current_processor(); + if (csw_needed(self, mypr)) { + reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM); + } + mp_enable_preemption(); + } + if (reasons & (AST_BLOCK | AST_QUANTUM)) { + counter(c_ast_taken_block++); + thread_block_reason((void (*)(void))0, + (reasons & (AST_BLOCK | AST_QUANTUM))); + } + if (reasons == 0) + return; + } + +#ifdef MACH_BSD + /* + * Check for BSD hardcoded hooks + */ + if (reasons & AST_BSD) { + thread_ast_clear(act,AST_BSD); + bsd_ast(act); + } + if (reasons & AST_BSD_INIT) { + thread_ast_clear(act,AST_BSD_INIT); + bsdinit_task(); + } +#endif + +#if TASK_SWAPPER + /* must be before AST_APC */ + if (reasons & AST_SWAPOUT) { + spl_t s; + swapout_ast(); + s = splsched(); + mp_disable_preemption(); + mycpu = cpu_number(); + if (need_ast[mycpu] & AST_APC) { + /* generated in swapout_ast() to get suspended */ + reasons |= AST_APC; /* process now ... */ + need_ast[mycpu] &= ~AST_APC; /* ... and not later */ + } + mp_enable_preemption(); + splx(s); + } +#endif /* TASK_SWAPPER */ + + /* + * migration APC hook + */ + if (reasons & AST_APC) { + act_execute_returnhandlers(); + } + + /* + * thread_block needs to know if the thread's quantum + * expired so the thread can be put on the tail of + * run queue. One of the previous actions might well + * have woken a high-priority thread, so we also use + * csw_needed check. + */ + reasons &= (AST_BLOCK | AST_QUANTUM); + if (reasons == 0) { + mp_disable_preemption(); + mypr = current_processor(); + if (csw_needed(self, mypr)) { + reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM); + } + mp_enable_preemption(); + } + if ((reasons & (AST_BLOCK | AST_QUANTUM)) && + (wait_queue_assert_possible(self))) { + counter(c_ast_taken_block++); + /* + * JMM - SMP machines don't like blocking at a continuation + * here - why not? Could be a combination of set_state and + * suspension on the thread_create_running API? + * + * thread_block_reason(thread_exception_return, reasons); + */ + thread_block_reason((void (*)(void))0, reasons); + } +} + +void +ast_check(void) +{ + register int mycpu; + register processor_t myprocessor; + register thread_t thread = current_thread(); + spl_t s = splsched(); + + mp_disable_preemption(); + mycpu = cpu_number(); + + /* + * Check processor state for ast conditions. + */ + myprocessor = cpu_to_processor(mycpu); + switch(myprocessor->state) { + case PROCESSOR_OFF_LINE: + case PROCESSOR_IDLE: + case PROCESSOR_DISPATCHING: + /* + * No ast. + */ + break; + +#if NCPUS > 1 + case PROCESSOR_ASSIGN: + /* + * Need ast to force action thread onto processor. + * + * XXX Should check if action thread is already there. + */ + ast_on(AST_BLOCK); + break; +#endif /* NCPUS > 1 */ + + case PROCESSOR_RUNNING: + case PROCESSOR_SHUTDOWN: + /* + * Propagate thread ast to processor. If we already + * need an ast, don't look for more reasons. + */ + ast_propagate(current_act()->ast); + if (ast_needed(mycpu)) + break; + + /* + * Context switch check. + */ + if (csw_needed(thread, myprocessor)) { + ast_on((myprocessor->first_quantum ? + AST_BLOCK : AST_QUANTUM)); + } + break; + + default: + panic("ast_check: Bad processor state"); + } + mp_enable_preemption(); + splx(s); +} + +/* + * JMM - Temporary exports to other components + */ +#undef ast_on +#undef ast_off + +void +ast_on(ast_t reason) +{ + boolean_t intr; + + intr = ml_set_interrupts_enabled(FALSE); + ast_on_fast(reason); + (void *)ml_set_interrupts_enabled(intr); +} + +void +ast_off(ast_t reason) +{ + ast_off_fast(reason); +} diff --git a/osfmk/kern/ast.h b/osfmk/kern/ast.h new file mode 100644 index 000000000..efda6b3a1 --- /dev/null +++ b/osfmk/kern/ast.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * kern/ast.h: Definitions for Asynchronous System Traps. + */ + +#ifndef _KERN_AST_H_ +#define _KERN_AST_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include + +/* + * A CPU takes an AST when it is about to return to user code. + * Instead of going back to user code, it calls ast_taken. + * Machine-dependent code is responsible for maintaining + * a set of reasons for an AST, and passing this set to ast_taken. + */ + +typedef unsigned int ast_t; + +/* + * Bits for reasons + */ +#define AST_NONE 0x00 +#define AST_HALT 0x01 +#define AST_TERMINATE 0x02 +#define AST_BLOCK 0x04 +#define AST_UNUSED 0x08 +#define AST_QUANTUM 0x10 +#define AST_APC 0x20 /* migration APC hook */ +#define AST_URGENT 0x40 + +/* + * JMM - This is here temporarily. AST_BSD is used to simulate a + * general purpose mechanism for setting asynchronous procedure calls + * from the outside. + */ +#define AST_BSD 0x80 +#define AST_BSD_INIT 0x100 + +#define AST_SWAPOUT 0x20000 + +#define AST_ALL (~AST_NONE) + +#define AST_SCHEDULING (AST_HALT | AST_TERMINATE | AST_BLOCK | AST_SWAPOUT) +#define AST_PREEMPT (AST_BLOCK | AST_QUANTUM | AST_URGENT) + +extern volatile ast_t need_ast[NCPUS]; + +#ifdef MACHINE_AST +/* + * machine/ast.h is responsible for defining aston and astoff. + */ +#else /* MACHINE_AST */ + +#define aston(mycpu) +#define astoff(mycpu) + +#endif /* MACHINE_AST */ + +/* Initialize module */ +extern void ast_init(void); + +/* Handle ASTs */ +extern void ast_taken( + boolean_t preemption, + ast_t mask, + boolean_t interrupt); + +/* Check for pending ASTs */ +extern void ast_check(void); + +/* + * Per-thread ASTs are reset at context-switch time. + */ +#ifndef MACHINE_AST_PER_THREAD +#define MACHINE_AST_PER_THREAD 0 +#endif + +#define AST_PER_THREAD (MACHINE_AST_PER_THREAD|AST_HALT|AST_TERMINATE|AST_APC|AST_BSD) + +/* + * ast_needed, ast_on, ast_off, ast_context, and ast_propagate + * assume splsched. + */ + +#define ast_needed(mycpu) need_ast[mycpu] + +#define ast_on_fast(reasons) \ +MACRO_BEGIN \ + int mycpu = cpu_number(); \ + if ((need_ast[mycpu] |= (reasons)) != AST_NONE) \ + { aston(mycpu); } \ +MACRO_END + +#define ast_off_fast(reasons) \ +MACRO_BEGIN \ + int mycpu = cpu_number(); \ + if ((need_ast[mycpu] &= ~(reasons)) == AST_NONE) \ + { astoff(mycpu); } \ +MACRO_END + +#define ast_propagate(reasons) ast_on(reasons) + +#define ast_context(act, mycpu) \ +MACRO_BEGIN \ + assert(mycpu == cpu_number()); \ + if ((need_ast[mycpu] = \ + (need_ast[mycpu] &~ AST_PER_THREAD) | (act)->ast) \ + != AST_NONE) \ + { aston(mycpu); } \ + else \ + { astoff(mycpu); } \ +MACRO_END + +#define ast_on(reason) ast_on_fast(reason) +#define ast_off(reason) ast_off_fast(reason) + +#define thread_ast_set(act, reason) (act)->ast |= (reason) +#define thread_ast_clear(act, reason) (act)->ast &= ~(reason) +#define thread_ast_clear_all(act) (act)->ast = AST_NONE + +/* + * NOTE: if thread is the current thread, thread_ast_set should + * be followed by ast_propagate(). + */ + +#ifdef MACH_KERNEL_PRIVATE + +#define ast_urgency() (need_ast[cpu_number()] & AST_URGENT) + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_AST_H_ */ diff --git a/osfmk/kern/bits.c b/osfmk/kern/bits.c new file mode 100644 index 000000000..085ccd9a6 --- /dev/null +++ b/osfmk/kern/bits.c @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:35 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/29 17:35:55 mburg + * MK7.3 merger + * + * Revision 1.1.24.1 1998/02/03 09:27:19 gdt + * Merge up to MK7.3 + * [1998/02/03 09:12:57 gdt] + * + * Revision 1.1.21.1 1996/11/29 16:57:21 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * Added explanatory note. + * [1996/04/10 16:54:46 emcmanus] + * + * Revision 1.1.22.1 1997/06/17 02:57:05 devrcs + * Added `testbit()' routine. + * [1996/03/18 15:21:50 rkc] + * + * Revision 1.1.7.3 1995/01/10 05:10:36 devrcs + * mk6 CR801 - copyright marker not FREE_ + * [1994/12/01 19:24:54 dwm] + * + * Revision 1.1.7.1 1994/06/14 16:59:49 bolinger + * Merge up to NMK17.2. + * [1994/06/14 16:53:29 bolinger] + * + * Revision 1.1.5.1 1994/04/11 09:36:31 bernadat + * Checked in NMK16_2 revision + * [94/03/15 bernadat] + * + * Revision 1.1.3.1 1993/12/23 08:53:13 bernadat + * Checked in bolinger_860ci revision. + * [93/11/29 bernadat] + * + * Revision 1.1.1.2 1993/09/12 15:44:20 bolinger + * Initial checkin of 860 modifications; MD files from NMK14.8. + * + * $EndLog$ + */ +/* + * C version of bit manipulation routines now required by kernel. + * Should be replaced with assembler versions in any real port. + * + * Note that these routines use little-endian numbering for bits (i.e., + * the bit number corresponds to the associated power-of-2). + */ +#include /* for BYTE_SIZE */ + +#define INT_SIZE (BYTE_SIZE * sizeof (int)) + +/* + * Set indicated bit in bit string. + */ +void +setbit(int bitno, int *s) +{ + for ( ; INT_SIZE <= bitno; bitno -= INT_SIZE, ++s) + ; + *s |= 1 << bitno; +} + +/* + * Clear indicated bit in bit string. + */ +void +clrbit(int bitno, int *s) +{ + for ( ; INT_SIZE <= bitno; bitno -= INT_SIZE, ++s) + ; + *s &= ~(1 << bitno); +} + +/* + * Find first bit set in bit string. + */ +int +ffsbit(int *s) +{ + int offset, mask; + + for (offset = 0; !*s; offset += INT_SIZE, ++s) + ; + for (mask = 1; mask; mask <<= 1, ++offset) + if (mask & *s) + return (offset); + /* + * Shouldn't get here + */ + return (0); +} + +/* + * Test if indicated bit is set in bit string. + */ +int +testbit(int bitno, int *s) +{ + for ( ; INT_SIZE <= bitno; bitno -= INT_SIZE, ++s) + ; + return(*s & (1 << bitno)); +} diff --git a/osfmk/kern/bsd_kern.c b/osfmk/kern/bsd_kern.c new file mode 100644 index 000000000..24017dd53 --- /dev/null +++ b/osfmk/kern/bsd_kern.c @@ -0,0 +1,503 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef thread_should_halt +#undef ipc_port_release +#undef thread_ast_set +#undef thread_ast_clear + +decl_simple_lock_data(extern,reaper_lock) +extern queue_head_t reaper_queue; + +/* BSD KERN COMPONENT INTERFACE */ + +vm_address_t bsd_init_task = 0; +char init_task_failure_data[1024]; + +thread_act_t get_firstthread(task_t); +vm_map_t get_task_map(task_t); +ipc_space_t get_task_ipcspace(task_t); +boolean_t is_kerneltask(task_t); +boolean_t is_thread_idle(thread_t); +boolean_t is_thread_running(thread_t); +thread_shuttle_t getshuttle_thread( thread_act_t); +thread_act_t getact_thread( thread_shuttle_t); +vm_offset_t get_map_min( vm_map_t); +vm_offset_t get_map_max( vm_map_t); +int get_task_userstop(task_t); +int get_thread_userstop(thread_act_t); +int inc_task_userstop(task_t); +boolean_t thread_should_abort(thread_shuttle_t); +boolean_t current_thread_aborted(void); +void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *); +void ipc_port_release(ipc_port_t); +void thread_ast_set(thread_act_t, ast_t); +void thread_ast_clear(thread_act_t, ast_t); +boolean_t is_thread_active(thread_t); +event_t get_thread_waitevent(thread_t); +kern_return_t get_thread_waitresult(thread_t); +vm_size_t get_vmmap_size(vm_map_t); +int get_vmmap_entries(vm_map_t); +int get_task_numacts(task_t); +thread_act_t get_firstthread(task_t task); +kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int); + +kern_return_t bsd_refvm_object(vm_object_t object); + + +/* + * + */ +void *get_bsdtask_info(task_t t) +{ + return(t->bsd_info); +} + +/* + * + */ +void set_bsdtask_info(task_t t,void * v) +{ + t->bsd_info=v; +} + +/* + * + */ +void *get_bsdthread_info(thread_act_t th) +{ + return(th->uthread); +} + +/* + * XXX: wait for BSD to fix signal code + * Until then, we cannot block here. We know the task + * can't go away, so we make sure it is still active after + * retrieving the first thread for extra safety. + */ +thread_act_t get_firstthread(task_t task) +{ + thread_act_t thr_act; + + thr_act = (thread_act_t)queue_first(&task->thr_acts); + if (thr_act == (thread_act_t)&task->thr_acts) + thr_act = THR_ACT_NULL; + if (!task->active) + return(THR_ACT_NULL); + return(thr_act); +} + +kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast) +{ + + thread_act_t inc; + thread_act_t ninc; + thread_act_t thr_act; + thread_t th; + + task_lock(task); + if (!task->active) { + task_unlock(task); + return(KERN_FAILURE); + } + + thr_act = THR_ACT_NULL; + for (inc = (thread_act_t)queue_first(&task->thr_acts); + inc != (thread_act_t)&task->thr_acts; + inc = ninc) { + th = act_lock_thread(inc); + if ((inc->active) && ((th->state & TH_ABORT) != TH_ABORT)) { + thr_act = inc; + break; + } + act_unlock_thread(inc); + ninc = (thread_act_t)queue_next(&inc->thr_acts); + } +out: + if (thact) + *thact = thr_act; + + if (thshut) + *thshut = thr_act? thr_act->thread: THREAD_NULL ; + if (thr_act) { + if (setast) { + thread_ast_set(thr_act, AST_BSD); + if (current_act() == thr_act) + ast_on(AST_BSD); + } + act_unlock_thread(thr_act); + } + task_unlock(task); + + if (thr_act) + return(KERN_SUCCESS); + else + return(KERN_FAILURE); +} + +/* + * + */ +vm_map_t get_task_map(task_t t) +{ + return(t->map); +} + +/* + * + */ +ipc_space_t get_task_ipcspace(task_t t) +{ + return(t->itk_space); +} + +int get_task_numacts(task_t t) +{ + return(t->thr_act_count); +} + +/* + * Reset the current task's map by taking a reference + * on the new map. The old map reference is returned. + */ +vm_map_t +swap_task_map(task_t task,vm_map_t map) +{ + vm_map_t old_map; + + vm_map_reference(map); + task_lock(task); + old_map = task->map; + task->map = map; + task_unlock(task); + return old_map; +} + +/* + * Reset the current act map. + * The caller donates us a reference to the new map + * and we donote our reference to the old map to him. + */ +vm_map_t +swap_act_map(thread_act_t thr_act,vm_map_t map) +{ + vm_map_t old_map; + + act_lock(thr_act); + old_map = thr_act->map; + thr_act->map = map; + act_unlock(thr_act); + return old_map; +} + +/* + * + */ +pmap_t get_task_pmap(task_t t) +{ + return(t->map->pmap); +} + +/* + * + */ +pmap_t get_map_pmap(vm_map_t map) +{ + return(map->pmap); +} +/* + * + */ +task_t get_threadtask(thread_act_t th) +{ + return(th->task); +} + + +/* + * + */ +boolean_t is_thread_idle(thread_t th) +{ + return((th->state & TH_IDLE) == TH_IDLE); +} + +/* + * + */ +boolean_t is_thread_running(thread_t th) +{ + return((th->state & TH_RUN) == TH_RUN); +} + +/* + * + */ +thread_shuttle_t +getshuttle_thread( + thread_act_t th) +{ +#ifdef DEBUG + assert(th->thread); +#endif + return(th->thread); +} + +/* + * + */ +thread_act_t +getact_thread( + thread_shuttle_t th) +{ +#ifdef DEBUG + assert(th->top_act); +#endif + return(th->top_act); +} + +/* + * + */ +vm_offset_t +get_map_min( + vm_map_t map) +{ + return(vm_map_min(map)); +} + +/* + * + */ +vm_offset_t +get_map_max( + vm_map_t map) +{ + return(vm_map_max(map)); +} +vm_size_t +get_vmmap_size( + vm_map_t map) +{ + return(map->size); +} + +int +get_vmsubmap_entries( + vm_map_t map, + vm_object_offset_t start, + vm_object_offset_t end) +{ + int total_entries = 0; + vm_map_entry_t entry; + + vm_map_lock(map); + entry = vm_map_first_entry(map); + while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { + entry = entry->vme_next; + } + + while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + if(entry->is_sub_map) { + total_entries += + get_vmsubmap_entries(entry->object.sub_map, + entry->offset, + entry->offset + + (entry->vme_end - entry->vme_start)); + } else { + total_entries += 1; + } + entry = entry->vme_next; + } + vm_map_unlock(map); + return(total_entries); +} + +int +get_vmmap_entries( + vm_map_t map) +{ + int total_entries = 0; + vm_map_entry_t entry; + + vm_map_lock(map); + entry = vm_map_first_entry(map); + + while(entry != vm_map_to_entry(map)) { + if(entry->is_sub_map) { + total_entries += + get_vmsubmap_entries(entry->object.sub_map, + entry->offset, + entry->offset + + (entry->vme_end - entry->vme_start)); + } else { + total_entries += 1; + } + entry = entry->vme_next; + } + vm_map_unlock(map); + return(total_entries); +} + +/* + * + */ +/* + * + */ +int +get_task_userstop( + task_t task) +{ + return(task->user_stop_count); +} + +/* + * + */ +int +get_thread_userstop( + thread_act_t th) +{ + return(th->user_stop_count); +} + +/* + * + */ +int +inc_task_userstop( + task_t task) +{ + int i=0; + i = task->user_stop_count; + task->user_stop_count++; + return(i); +} + + +/* + * + */ +boolean_t +thread_should_abort( + thread_shuttle_t th) +{ + return( (!th->top_act || !th->top_act->active || + th->state & TH_ABORT)); +} + +/* + * + */ +boolean_t +current_thread_aborted ( + void) +{ + thread_t th = current_thread(); + + return(!th->top_act || (th->state & TH_ABORT)); +} + +/* + * + */ +void +task_act_iterate_wth_args( + task_t task, + void (*func_callback)(thread_act_t, void *), + void *func_arg) +{ + thread_act_t inc, ninc; + + task_lock(task); + for (inc = (thread_act_t)queue_first(&task->thr_acts); + inc != (thread_act_t)&task->thr_acts; + inc = ninc) { + ninc = (thread_act_t)queue_next(&inc->thr_acts); + (void) (*func_callback)(inc, func_arg); + } + task_unlock(task); +} + +void +ipc_port_release( + ipc_port_t port) +{ + ipc_object_release(&(port)->ip_object); +} + +void +thread_ast_set( + thread_act_t act, + ast_t reason) +{ + act->ast |= reason; +} +void +thread_ast_clear( + thread_act_t act, + ast_t reason) +{ + act->ast &= ~(reason); +} + +boolean_t +is_thread_active( + thread_shuttle_t th) +{ + return(th->active); +} + +event_t +get_thread_waitevent( + thread_shuttle_t th) +{ + return(th->wait_event); +} + +kern_return_t +get_thread_waitresult( + thread_shuttle_t th) +{ + return(th->wait_result); +} + +kern_return_t +bsd_refvm_object(vm_object_t object) +{ + vm_object_reference(object); + return(KERN_SUCCESS); +} + diff --git a/osfmk/kern/call_entry.h b/osfmk/kern/call_entry.h new file mode 100644 index 000000000..df0f3b916 --- /dev/null +++ b/osfmk/kern/call_entry.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. + * All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Private declarations for thread-based callouts. + * + * HISTORY + * + * 10 July 1999 (debo) + * Pulled into Mac OS X (microkernel). + * + * 3 July 1993 (debo) + * Created. + */ + +#ifndef _KERN_CALL_ENTRY_H_ +#define _KERN_CALL_ENTRY_H_ + +#ifdef MACH_KERNEL_PRIVATE +#include + +typedef void *call_entry_param_t; +typedef void (*call_entry_func_t)( + call_entry_param_t param0, + call_entry_param_t param1); + +typedef struct call_entry { + queue_chain_t q_link; + call_entry_func_t func; + call_entry_param_t param0; + call_entry_param_t param1; + AbsoluteTime deadline; + enum { + IDLE, + PENDING, + DELAYED } state; +} call_entry_data_t; + +#define call_entry_setup(entry, pfun, p0) \ +MACRO_BEGIN \ + (entry)->func = (call_entry_func_t)(pfun); \ + (entry)->param0 = (call_entry_param_t)(p0); \ + (entry)->state = IDLE; \ +MACRO_END + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_CALL_ENTRY_H_ */ diff --git a/osfmk/kern/clock.c b/osfmk/kern/clock.c new file mode 100644 index 000000000..32cac4eba --- /dev/null +++ b/osfmk/kern/clock.c @@ -0,0 +1,867 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: kern/clock.c + * Purpose: Routines for the creation and use of kernel + * alarm clock services. This file and the ipc + * routines in kern/ipc_clock.c constitute the + * machine-independent clock service layer. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +/* + * Exported interface + */ + +#include +#include + +/* local data declarations */ +decl_simple_lock_data(static,ClockLock) /* clock system synchronization */ +static struct zone *alarm_zone; /* zone for user alarms */ +static struct alarm *alrmfree; /* alarm free list pointer */ +static struct alarm *alrmdone; /* alarm done list pointer */ +static long alrm_seqno; /* uniquely identifies alarms */ +static thread_call_data_t alarm_deliver; + +/* backwards compatibility */ +int hz = HZ; /* GET RID OF THIS !!! */ +int tick = (1000000 / HZ); /* GET RID OF THIS !!! */ + +/* external declarations */ +extern struct clock clock_list[]; +extern int clock_count; + +/* local clock subroutines */ +static +void flush_alarms( + clock_t clock); + +static +void post_alarm( + clock_t clock, + alarm_t alarm); + +static +int check_time( + alarm_type_t alarm_type, + mach_timespec_t *alarm_time, + mach_timespec_t *clock_time); + +static +void clock_alarm_deliver( + thread_call_param_t p0, + thread_call_param_t p1); + +/* + * Macros to lock/unlock clock system. + */ +#define LOCK_CLOCK(s) \ + s = splclock(); \ + simple_lock(&ClockLock); + +#define UNLOCK_CLOCK(s) \ + simple_unlock(&ClockLock); \ + splx(s); + +/* + * Configure the clock system. (Not sure if we need this, + * as separate from clock_init()). + */ +void +clock_config(void) +{ + clock_t clock; + register int i; + + if (cpu_number() != master_cpu) + panic("clock_config"); + + /* + * Configure clock devices. + */ + simple_lock_init(&ClockLock, ETAP_MISC_CLOCK); + for (i = 0; i < clock_count; i++) { + clock = &clock_list[i]; + if (clock->cl_ops) { + if ((*clock->cl_ops->c_config)() == 0) + clock->cl_ops = 0; + } + } + + /* start alarm sequence numbers at 0 */ + alrm_seqno = 0; +} + +/* + * Initialize the clock system. + */ +void +clock_init(void) +{ + clock_t clock; + register int i; + + /* + * Initialize basic clock structures. + */ + for (i = 0; i < clock_count; i++) { + clock = &clock_list[i]; + if (clock->cl_ops) + (*clock->cl_ops->c_init)(); + } +} + +/* + * Initialize the clock ipc service facility. + */ +void +clock_service_create(void) +{ + clock_t clock; + register int i; + + mk_timer_initialize(); + + /* + * Initialize ipc clock services. + */ + for (i = 0; i < clock_count; i++) { + clock = &clock_list[i]; + if (clock->cl_ops) { + ipc_clock_init(clock); + ipc_clock_enable(clock); + } + } + + /* + * Initialize clock service alarms. + */ + i = sizeof(struct alarm); + alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms"); + + /* + * Initialize the clock alarm delivery mechanism. + */ + thread_call_setup(&alarm_deliver, clock_alarm_deliver, NULL); +} + +/* + * Get the service port on a clock. + */ +kern_return_t +host_get_clock_service( + host_t host, + clock_id_t clock_id, + clock_t *clock) /* OUT */ +{ + if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) { + *clock = CLOCK_NULL; + return (KERN_INVALID_ARGUMENT); + } + + *clock = &clock_list[clock_id]; + if ((*clock)->cl_ops == 0) + return (KERN_FAILURE); + return (KERN_SUCCESS); +} + +/* + * Get the control port on a clock. + */ +kern_return_t +host_get_clock_control( + host_priv_t host_priv, + clock_id_t clock_id, + clock_t *clock) /* OUT */ +{ + if (host_priv == HOST_PRIV_NULL || clock_id < 0 || clock_id >= clock_count) { + *clock = CLOCK_NULL; + return (KERN_INVALID_ARGUMENT); + } + + *clock = &clock_list[clock_id]; + if ((*clock)->cl_ops == 0) + return (KERN_FAILURE); + return (KERN_SUCCESS); +} + +/* + * Get the current clock time. + */ +kern_return_t +clock_get_time( + clock_t clock, + mach_timespec_t *cur_time) /* OUT */ +{ + if (clock == CLOCK_NULL) + return (KERN_INVALID_ARGUMENT); + return ((*clock->cl_ops->c_gettime)(cur_time)); +} + +/* + * Get clock attributes. + */ +kern_return_t +clock_get_attributes( + clock_t clock, + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + kern_return_t (*getattr)( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); + + if (clock == CLOCK_NULL) + return (KERN_INVALID_ARGUMENT); + if (getattr = clock->cl_ops->c_getattr) + return((*getattr)(flavor, attr, count)); + else + return (KERN_FAILURE); +} + +/* + * Set the current clock time. + */ +kern_return_t +clock_set_time( + clock_t clock, + mach_timespec_t new_time) +{ + mach_timespec_t *clock_time; + kern_return_t (*settime)( + mach_timespec_t *clock_time); + + if (clock == CLOCK_NULL) + return (KERN_INVALID_ARGUMENT); + if ((settime = clock->cl_ops->c_settime) == 0) + return (KERN_FAILURE); + clock_time = &new_time; + if (BAD_MACH_TIMESPEC(clock_time)) + return (KERN_INVALID_VALUE); + + /* + * Flush all outstanding alarms. + */ + flush_alarms(clock); + + /* + * Set the new time. + */ + return ((*settime)(clock_time)); +} + +/* + * Set the clock alarm resolution. + */ +kern_return_t +clock_set_attributes( + clock_t clock, + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t count) +{ + kern_return_t (*setattr)( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t count); + + if (clock == CLOCK_NULL) + return (KERN_INVALID_ARGUMENT); + if (setattr = clock->cl_ops->c_setattr) + return ((*setattr)(flavor, attr, count)); + else + return (KERN_FAILURE); +} + +/* + * Setup a clock alarm. + */ +kern_return_t +clock_alarm( + clock_t clock, + alarm_type_t alarm_type, + mach_timespec_t alarm_time, + ipc_port_t alarm_port, + mach_msg_type_name_t alarm_port_type) +{ + alarm_t alarm; + mach_timespec_t clock_time; + int chkstat; + kern_return_t reply_code; + spl_t s; + + if (clock == CLOCK_NULL) + return (KERN_INVALID_ARGUMENT); + if (clock->cl_ops->c_setalrm == 0) + return (KERN_FAILURE); + if (IP_VALID(alarm_port) == 0) + return (KERN_INVALID_CAPABILITY); + + /* + * Check alarm parameters. If parameters are invalid, + * send alarm message immediately. + */ + (*clock->cl_ops->c_gettime)(&clock_time); + chkstat = check_time(alarm_type, &alarm_time, &clock_time); + if (chkstat <= 0) { + reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS); + clock_alarm_reply(alarm_port, alarm_port_type, + reply_code, alarm_type, clock_time); + return (KERN_SUCCESS); + } + + /* + * Get alarm and add to clock alarm list. + */ + + LOCK_CLOCK(s); + if ((alarm = alrmfree) == 0) { + UNLOCK_CLOCK(s); + alarm = (alarm_t) zalloc(alarm_zone); + if (alarm == 0) + return (KERN_RESOURCE_SHORTAGE); + LOCK_CLOCK(s); + } + else + alrmfree = alarm->al_next; + + alarm->al_status = ALARM_CLOCK; + alarm->al_time = alarm_time; + alarm->al_type = alarm_type; + alarm->al_port = alarm_port; + alarm->al_port_type = alarm_port_type; + alarm->al_clock = clock; + alarm->al_seqno = alrm_seqno++; + post_alarm(clock, alarm); + UNLOCK_CLOCK(s); + + return (KERN_SUCCESS); +} + +/* + * Sleep on a clock. System trap. User-level libmach clock_sleep + * interface call takes a mach_timespec_t sleep_time argument which it + * converts to sleep_sec and sleep_nsec arguments which are then + * passed to clock_sleep_trap. + */ +kern_return_t +clock_sleep_trap( + mach_port_name_t clock_name, + sleep_type_t sleep_type, + int sleep_sec, + int sleep_nsec, + mach_timespec_t *wakeup_time) +{ + clock_t clock; + mach_timespec_t swtime; + kern_return_t rvalue; + + /* + * Convert the trap parameters. + */ + if (clock_name != MACH_PORT_NULL) + clock = port_name_to_clock(clock_name); + else + clock = &clock_list[SYSTEM_CLOCK]; + + swtime.tv_sec = sleep_sec; + swtime.tv_nsec = sleep_nsec; + + /* + * Call the actual clock_sleep routine. + */ + rvalue = clock_sleep_internal(clock, sleep_type, &swtime); + + /* + * Return current time as wakeup time. + */ + if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) { + copyout((char *)&swtime, (char *)wakeup_time, + sizeof(mach_timespec_t)); + } + return (rvalue); +} + +/* + * Kernel internally callable clock sleep routine. The calling + * thread is suspended until the requested sleep time is reached. + */ +kern_return_t +clock_sleep_internal( + clock_t clock, + sleep_type_t sleep_type, + mach_timespec_t *sleep_time) +{ + alarm_t alarm; + mach_timespec_t clock_time; + kern_return_t rvalue; + int chkstat; + spl_t s; + + if (clock == CLOCK_NULL) + return (KERN_INVALID_ARGUMENT); + if (clock->cl_ops->c_setalrm == 0) + return (KERN_FAILURE); + + /* + * Check sleep parameters. If parameters are invalid + * return an error, otherwise post alarm request. + */ + (*clock->cl_ops->c_gettime)(&clock_time); + + chkstat = check_time(sleep_type, sleep_time, &clock_time); + if (chkstat < 0) + return (KERN_INVALID_VALUE); + rvalue = KERN_SUCCESS; + if (chkstat > 0) { + /* + * Get alarm and add to clock alarm list. + */ + + LOCK_CLOCK(s); + if ((alarm = alrmfree) == 0) { + UNLOCK_CLOCK(s); + alarm = (alarm_t) zalloc(alarm_zone); + if (alarm == 0) + return (KERN_RESOURCE_SHORTAGE); + LOCK_CLOCK(s); + } + else + alrmfree = alarm->al_next; + + alarm->al_time = *sleep_time; + alarm->al_status = ALARM_SLEEP; + post_alarm(clock, alarm); + + /* + * Wait for alarm to occur. + */ + assert_wait((event_t)alarm, THREAD_ABORTSAFE); + UNLOCK_CLOCK(s); + /* should we force spl(0) at this point? */ + thread_block((void (*)(void)) 0); + /* we should return here at ipl0 */ + + /* + * Note if alarm expired normally or whether it + * was aborted. If aborted, delete alarm from + * clock alarm list. Return alarm to free list. + */ + LOCK_CLOCK(s); + if (alarm->al_status != ALARM_DONE) { + /* This means we were interrupted and that + thread->wait_result != THREAD_AWAKENED. */ + if ((alarm->al_prev)->al_next = alarm->al_next) + (alarm->al_next)->al_prev = alarm->al_prev; + rvalue = KERN_ABORTED; + } + *sleep_time = alarm->al_time; + alarm->al_status = ALARM_FREE; + alarm->al_next = alrmfree; + alrmfree = alarm; + UNLOCK_CLOCK(s); + } + else + *sleep_time = clock_time; + + return (rvalue); +} + +/* + * CLOCK INTERRUPT SERVICE ROUTINES. + */ + +/* + * Service clock alarm interrupts. Called from machine dependent + * layer at splclock(). The clock_id argument specifies the clock, + * and the clock_time argument gives that clock's current time. + */ +void +clock_alarm_intr( + clock_id_t clock_id, + mach_timespec_t *clock_time) +{ + clock_t clock; + register alarm_t alrm1; + register alarm_t alrm2; + mach_timespec_t *alarm_time; + spl_t s; + + clock = &clock_list[clock_id]; + + /* + * Update clock alarm list. All alarms that are due are moved + * to the alarmdone list to be serviced by the alarm_thread. + */ + + LOCK_CLOCK(s); + alrm1 = (alarm_t) &clock->cl_alarm; + while (alrm2 = alrm1->al_next) { + alarm_time = &alrm2->al_time; + if (CMP_MACH_TIMESPEC(alarm_time, clock_time) > 0) + break; + + /* + * Alarm has expired, so remove it from the + * clock alarm list. + */ + if (alrm1->al_next = alrm2->al_next) + (alrm1->al_next)->al_prev = alrm1; + + /* + * If a clock_sleep() alarm, wakeup the thread + * which issued the clock_sleep() call. + */ + if (alrm2->al_status == ALARM_SLEEP) { + alrm2->al_next = 0; + alrm2->al_status = ALARM_DONE; + alrm2->al_time = *clock_time; + thread_wakeup((event_t)alrm2); + } + + /* + * If a clock_alarm() alarm, place the alarm on + * the alarm done list and schedule the alarm + * delivery mechanism. + */ + else { + assert(alrm2->al_status == ALARM_CLOCK); + if (alrm2->al_next = alrmdone) + alrmdone->al_prev = alrm2; + else + thread_call_enter(&alarm_deliver); + alrm2->al_prev = (alarm_t) &alrmdone; + alrmdone = alrm2; + alrm2->al_status = ALARM_DONE; + alrm2->al_time = *clock_time; + } + } + + /* + * Setup the clock dependent layer to deliver another + * interrupt for the next pending alarm. + */ + if (alrm2) + (*clock->cl_ops->c_setalrm)(alarm_time); + UNLOCK_CLOCK(s); +} + +/* + * ALARM DELIVERY ROUTINES. + */ + +static void +clock_alarm_deliver( + thread_call_param_t p0, + thread_call_param_t p1) +{ + register alarm_t alrm; + kern_return_t code; + spl_t s; + + LOCK_CLOCK(s); + while (alrm = alrmdone) { + if (alrmdone = alrm->al_next) + alrmdone->al_prev = (alarm_t) &alrmdone; + UNLOCK_CLOCK(s); + + code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED); + if (alrm->al_port != IP_NULL) { + /* Deliver message to designated port */ + if (IP_VALID(alrm->al_port)) { + clock_alarm_reply(alrm->al_port, alrm->al_port_type, code, + alrm->al_type, alrm->al_time); + } + + LOCK_CLOCK(s); + alrm->al_status = ALARM_FREE; + alrm->al_next = alrmfree; + alrmfree = alrm; + } + else + panic("clock_alarm_deliver"); + } + + UNLOCK_CLOCK(s); +} + +/* + * CLOCK PRIVATE SERVICING SUBROUTINES. + */ + +/* + * Flush all pending alarms on a clock. All alarms + * are activated and timestamped correctly, so any + * programs waiting on alarms/threads will proceed + * with accurate information. + */ +static +void +flush_alarms( + clock_t clock) +{ + register alarm_t alrm1, alrm2; + spl_t s; + + /* + * Flush all outstanding alarms. + */ + LOCK_CLOCK(s); + alrm1 = (alarm_t) &clock->cl_alarm; + while (alrm2 = alrm1->al_next) { + /* + * Remove alarm from the clock alarm list. + */ + if (alrm1->al_next = alrm2->al_next) + (alrm1->al_next)->al_prev = alrm1; + + /* + * If a clock_sleep() alarm, wakeup the thread + * which issued the clock_sleep() call. + */ + if (alrm2->al_status == ALARM_SLEEP) { + alrm2->al_next = 0; + thread_wakeup((event_t)alrm2); + } + else { + /* + * If a clock_alarm() alarm, place the alarm on + * the alarm done list and wakeup the dedicated + * kernel alarm_thread to service the alarm. + */ + assert(alrm2->al_status == ALARM_CLOCK); + if (alrm2->al_next = alrmdone) + alrmdone->al_prev = alrm2; + else + thread_wakeup((event_t)&alrmdone); + alrm2->al_prev = (alarm_t) &alrmdone; + alrmdone = alrm2; + } + } + UNLOCK_CLOCK(s); +} + +/* + * Post an alarm on a clock's active alarm list. The alarm is + * inserted in time-order into the clock's active alarm list. + * Always called from within a LOCK_CLOCK() code section. + */ +static +void +post_alarm( + clock_t clock, + alarm_t alarm) +{ + register alarm_t alrm1, alrm2; + mach_timespec_t *alarm_time; + mach_timespec_t *queue_time; + + /* + * Traverse alarm list until queue time is greater + * than alarm time, then insert alarm. + */ + alarm_time = &alarm->al_time; + alrm1 = (alarm_t) &clock->cl_alarm; + while (alrm2 = alrm1->al_next) { + queue_time = &alrm2->al_time; + if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0) + break; + alrm1 = alrm2; + } + alrm1->al_next = alarm; + alarm->al_next = alrm2; + alarm->al_prev = alrm1; + if (alrm2) + alrm2->al_prev = alarm; + + /* + * If the inserted alarm is the 'earliest' alarm, + * reset the device layer alarm time accordingly. + */ + if (clock->cl_alarm.al_next == alarm) + (*clock->cl_ops->c_setalrm)(alarm_time); +} + +/* + * Check the validity of 'alarm_time' and 'alarm_type'. If either + * argument is invalid, return a negative value. If the 'alarm_time' + * is now, return a 0 value. If the 'alarm_time' is in the future, + * return a positive value. + */ +static +int +check_time( + alarm_type_t alarm_type, + mach_timespec_t *alarm_time, + mach_timespec_t *clock_time) +{ + int result; + + if (BAD_ALRMTYPE(alarm_type)) + return (-1); + if (BAD_MACH_TIMESPEC(alarm_time)) + return (-1); + if ((alarm_type & ALRMTYPE) == TIME_RELATIVE) + ADD_MACH_TIMESPEC(alarm_time, clock_time); + + result = CMP_MACH_TIMESPEC(alarm_time, clock_time); + + return ((result >= 0)? result: 0); +} + +mach_timespec_t +clock_get_system_value(void) +{ + clock_t clock = &clock_list[SYSTEM_CLOCK]; + mach_timespec_t value; + + (void) (*clock->cl_ops->c_gettime)(&value); + + return value; +} + +mach_timespec_t +clock_get_calendar_value(void) +{ + clock_t clock = &clock_list[CALENDAR_CLOCK]; + mach_timespec_t value = MACH_TIMESPEC_ZERO; + + (void) (*clock->cl_ops->c_gettime)(&value); + + return value; +} + +void +clock_set_calendar_value( + mach_timespec_t value) +{ + clock_t clock = &clock_list[CALENDAR_CLOCK]; + + (void) (*clock->cl_ops->c_settime)(&value); +} + +void +clock_deadline_for_periodic_event( + AbsoluteTime interval, + AbsoluteTime abstime, + AbsoluteTime *deadline) +{ + assert(AbsoluteTime_to_scalar(&interval) != 0); + + ADD_ABSOLUTETIME(deadline, &interval); + + if ( AbsoluteTime_to_scalar(deadline) <= + AbsoluteTime_to_scalar(&abstime) ) { + *deadline = abstime; + clock_get_uptime(&abstime); + ADD_ABSOLUTETIME(deadline, &interval); + + if ( AbsoluteTime_to_scalar(deadline) <= + AbsoluteTime_to_scalar(&abstime) ) { + *deadline = abstime; + ADD_ABSOLUTETIME(deadline, &interval); + } + } +} + +void +mk_timebase_info( + uint32_t *delta, + uint32_t *abs_to_ns_numer, + uint32_t *abs_to_ns_denom, + uint32_t *proc_to_abs_numer, + uint32_t *proc_to_abs_denom) +{ + mach_timebase_info_data_t info; + uint32_t one = 1; + + clock_timebase_info(&info); + + copyout((void *)&one, (void *)delta, sizeof (uint32_t)); + + copyout((void *)&info.numer, (void *)abs_to_ns_numer, sizeof (uint32_t)); + copyout((void *)&info.denom, (void *)abs_to_ns_denom, sizeof (uint32_t)); + + copyout((void *)&one, (void *)proc_to_abs_numer, sizeof (uint32_t)); + copyout((void *)&one, (void *)proc_to_abs_denom, sizeof (uint32_t)); +} + +kern_return_t +mach_timebase_info( + mach_timebase_info_t out_info) +{ + mach_timebase_info_data_t info; + + clock_timebase_info(&info); + + copyout((void *)&info, (void *)out_info, sizeof (info)); + + return (KERN_SUCCESS); +} + +kern_return_t +mach_wait_until( + uint64_t deadline) +{ + int wait_result; + + assert_wait((event_t)&mach_wait_until, THREAD_ABORTSAFE); + thread_set_timer_deadline(scalar_to_AbsoluteTime(&deadline)); + wait_result = thread_block((void (*)) 0); + if (wait_result != THREAD_TIMED_OUT) + thread_cancel_timer(); + + return ((wait_result == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS); +} diff --git a/osfmk/kern/clock.h b/osfmk/kern/clock.h new file mode 100644 index 000000000..0ed37d707 --- /dev/null +++ b/osfmk/kern/clock.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: kern/clock.h + * Purpose: Data structures for the kernel alarm clock + * facility. This file is used only by kernel + * level clock facility routines. + */ + +#ifndef _KERN_CLOCK_H_ +#define _KERN_CLOCK_H_ + +#include + +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include + +/* + * Actual clock alarm structure. Used for user clock_sleep() and + * clock_alarm() calls. Alarms are allocated from the alarm free + * list and entered in time priority order into the active alarm + * chain of the target clock. + */ +struct alarm { + struct alarm *al_next; /* next alarm in chain */ + struct alarm *al_prev; /* previous alarm in chain */ + int al_status; /* alarm status */ + mach_timespec_t al_time; /* alarm time */ + struct { /* message alarm data */ + int type; /* alarm type */ + ipc_port_t port; /* alarm port */ + mach_msg_type_name_t + port_type; /* alarm port type */ + struct clock *clock; /* alarm clock */ + void *data; /* alarm data */ + } al_alrm; +#define al_type al_alrm.type +#define al_port al_alrm.port +#define al_port_type al_alrm.port_type +#define al_clock al_alrm.clock +#define al_data al_alrm.data + long al_seqno; /* alarm sequence number */ +}; +typedef struct alarm alarm_data_t; + +/* alarm status */ +#define ALARM_FREE 0 /* alarm is on free list */ +#define ALARM_SLEEP 1 /* active clock_sleep() */ +#define ALARM_CLOCK 2 /* active clock_alarm() */ +#define ALARM_DONE 4 /* alarm has expired */ + +/* + * Clock operations list structure. Contains vectors to machine + * dependent clock routines. The routines c_config, c_init, and + * c_gettime must be implemented for every clock device. + */ +struct clock_ops { + int (*c_config)(void); /* configuration */ + + int (*c_init)(void); /* initialize */ + + kern_return_t (*c_gettime)( /* get time */ + mach_timespec_t *cur_time); + + kern_return_t (*c_settime)( /* set time */ + mach_timespec_t *clock_time); + + kern_return_t (*c_getattr)( /* get attributes */ + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); + + kern_return_t (*c_setattr)( /* set attributes */ + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t count); + + void (*c_setalrm)( /* set next alarm */ + mach_timespec_t *alarm_time); +}; +typedef struct clock_ops *clock_ops_t; +typedef struct clock_ops clock_ops_data_t; + +/* + * Actual clock object data structure. Contains the machine + * dependent operations list, clock operations ports, and a + * chain of pending alarms. + */ +struct clock { + clock_ops_t cl_ops; /* operations list */ + struct ipc_port *cl_service; /* service port */ + struct ipc_port *cl_control; /* control port */ + struct { /* alarm chain head */ + struct alarm *al_next; + } cl_alarm; +}; +typedef struct clock clock_data_t; + +/* + * Configure the clock system. + */ +extern void clock_config(void); +/* + * Initialize the clock system. + */ +extern void clock_init(void); + +/* + * Initialize the clock ipc service facility. + */ +extern void clock_service_create(void); + +/* + * Service clock alarm interrupts. Called from machine dependent + * layer at splclock(). The clock_id argument specifies the clock, + * and the clock_time argument gives that clock's current time. + */ +extern void clock_alarm_intr( + clock_id_t clock_id, + mach_timespec_t *clock_time); + +extern kern_return_t clock_sleep_internal( + clock_t clock, + sleep_type_t sleep_type, + mach_timespec_t *sleep_time); + +typedef void (*clock_timer_func_t)( + AbsoluteTime timestamp); + +extern void clock_set_timer_func( + clock_timer_func_t func); + +extern void clock_set_timer_deadline( + AbsoluteTime deadline); + +extern void mk_timebase_info( + uint32_t *delta, + uint32_t *abs_to_ns_numer, + uint32_t *abs_to_ns_denom, + uint32_t *proc_to_abs_numer, + uint32_t *proc_to_abs_denom); + +#define scalar_to_AbsoluteTime(x) (*(AbsoluteTime *)(x)) + +#endif /* MACH_KERNEL_PRIVATE */ + +#define MACH_TIMESPEC_SEC_MAX (0 - 1) +#define MACH_TIMESPEC_NSEC_MAX (NSEC_PER_SEC - 1) + +#define MACH_TIMESPEC_MAX ((mach_timespec_t) { \ + MACH_TIMESPEC_SEC_MAX, \ + MACH_TIMESPEC_NSEC_MAX } ) +#define MACH_TIMESPEC_ZERO ((mach_timespec_t) { 0, 0 } ) + +#define ADD_MACH_TIMESPEC_NSEC(t1, nsec) \ + do { \ + (t1)->tv_nsec += (clock_res_t)(nsec); \ + if ((clock_res_t)(nsec) > 0 && \ + (t1)->tv_nsec >= NSEC_PER_SEC) { \ + (t1)->tv_nsec -= NSEC_PER_SEC; \ + (t1)->tv_sec += 1; \ + } \ + else if ((clock_res_t)(nsec) < 0 && \ + (t1)->tv_nsec < 0) { \ + (t1)->tv_nsec += NSEC_PER_SEC; \ + (t1)->tv_sec -= 1; \ + } \ + } while (0) + +extern mach_timespec_t clock_get_system_value(void); + +extern mach_timespec_t clock_get_calendar_value(void); + +extern void clock_set_calendar_value( + mach_timespec_t value); + +extern void clock_adjust_calendar( + clock_res_t nsec); + +extern void clock_initialize_calendar(void); + +extern mach_timespec_t clock_get_calendar_offset(void); + +#define AbsoluteTime_to_scalar(x) (*(uint64_t *)(x)) + +/* t1 < = > t2 */ +#define CMP_ABSOLUTETIME(t1, t2) \ + (AbsoluteTime_to_scalar(t1) > \ + AbsoluteTime_to_scalar(t2)? (int)+1 : \ + (AbsoluteTime_to_scalar(t1) < \ + AbsoluteTime_to_scalar(t2)? (int)-1 : 0)) + +/* t1 += t2 */ +#define ADD_ABSOLUTETIME(t1, t2) \ + (AbsoluteTime_to_scalar(t1) += \ + AbsoluteTime_to_scalar(t2)) + +/* t1 -= t2 */ +#define SUB_ABSOLUTETIME(t1, t2) \ + (AbsoluteTime_to_scalar(t1) -= \ + AbsoluteTime_to_scalar(t2)) + +#define ADD_ABSOLUTETIME_TICKS(t1, ticks) \ + (AbsoluteTime_to_scalar(t1) += \ + (integer_t)(ticks)) + +extern void clock_timebase_info( + mach_timebase_info_t info); + +extern void clock_get_uptime( + AbsoluteTime *result); + +extern void clock_interval_to_deadline( + natural_t interval, + natural_t scale_factor, + AbsoluteTime *result); + +extern void clock_interval_to_absolutetime_interval( + natural_t interval, + natural_t scale_factor, + AbsoluteTime *result); + +extern void clock_absolutetime_interval_to_deadline( + AbsoluteTime abstime, + AbsoluteTime *result); + +extern void clock_deadline_for_periodic_event( + AbsoluteTime interval, + AbsoluteTime abstime, + AbsoluteTime *deadline); + +extern void clock_delay_for_interval( + natural_t interval, + natural_t scale_factor); + +extern void clock_delay_until( + AbsoluteTime deadline); + +extern void absolutetime_to_nanoseconds( + AbsoluteTime abstime, + UInt64 *result); + +extern void nanoseconds_to_absolutetime( + UInt64 nanoseconds, + AbsoluteTime *result); + +#endif /* _KERN_CLOCK_H_ */ diff --git a/osfmk/kern/counters.c b/osfmk/kern/counters.c new file mode 100644 index 000000000..5116219f0 --- /dev/null +++ b/osfmk/kern/counters.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:35 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:54 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.13.7 1995/02/24 15:19:11 alanl + * Merge with DIPC2_SHARED. + * [1995/02/22 20:31:50 alanl] + * + * Revision 1.1.21.1 1994/11/04 10:06:28 dwm + * mk6 CR668 - 1.3b26 merge + * remove unused counters + * * Revision 1.1.2.4 1994/01/06 17:53:55 jeffc + * CR9854 -- Missing exception_raise_state counters + * CR10394 -- instrument vm_map_simplify + * * End1.3merge + * [1994/11/04 09:20:23 dwm] + * + * Revision 1.1.13.5 1994/09/23 02:15:57 ezf + * change marker to not FREE + * [1994/09/22 21:32:09 ezf] + * + * Revision 1.1.13.4 1994/09/16 06:29:22 dwm + * mk6 CR551 - remove unused SAFE_VM_FAULT pseudo-continuation, + * remove unused args from vm_page_wait, vm_fault(_page). + * Also, fix vm_page_wait counters. + * [1994/09/16 06:23:24 dwm] + * + * Revision 1.1.13.3 1994/09/10 21:45:51 bolinger + * Merge up to NMK17.3 + * [1994/09/08 19:57:27 bolinger] + * + * Revision 1.1.13.2 1994/06/21 17:28:40 dlb + * Add two vm_fault counters from latest NMK17 version. + * [94/06/17 dlb] + * + * Revision 1.1.13.1 1994/06/14 16:59:58 bolinger + * Merge up to NMK17.2. + * [1994/06/14 16:53:39 bolinger] + * + * Revision 1.1.8.2 1994/03/17 22:40:02 dwm + * dead code removal: thread swapping. + * [1994/03/17 21:29:18 dwm] + * + * Revision 1.1.8.1 1993/11/18 18:14:54 dwm + * Coloc: remove continuations entirely; + * [1993/11/18 18:09:54 dwm] + * + * Revision 1.1.2.3 1993/06/07 22:12:34 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:04:06 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:35:48 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:12:06 jeffc] + * + * Revision 1.1 1992/09/30 02:08:53 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 16:40:19 mrt + * Correcting copyright + * + * Revision 2.2 91/03/16 15:15:51 rpd + * Created. + * [91/03/13 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +#include + +/* + * We explicitly initialize the counters to make + * them contiguous in the kernel's data space. + * This makes them easier to examine with ddb. + */ + +mach_counter_t c_thread_invoke_csw = 0; +mach_counter_t c_thread_invoke_hits = 0; +mach_counter_t c_thread_invoke_misses = 0; +mach_counter_t c_thread_invoke_same = 0; +mach_counter_t c_thread_invoke_same_cont = 0; +mach_counter_t c_incoming_interrupts = 0; +mach_counter_t c_syscalls_unix = 0; +mach_counter_t c_syscalls_mach = 0; + +#if MACH_COUNTERS +mach_counter_t c_action_thread_block = 0; +mach_counter_t c_ast_taken_block = 0; +mach_counter_t c_clock_ticks = 0; +mach_counter_t c_dev_io_blocks = 0; +mach_counter_t c_dev_io_tries = 0; +mach_counter_t c_idle_thread_block = 0; +mach_counter_t c_idle_thread_handoff = 0; +mach_counter_t c_io_done_thread_block = 0; +mach_counter_t c_ipc_mqueue_receive_block_kernel = 0; +mach_counter_t c_ipc_mqueue_receive_block_user = 0; +mach_counter_t c_ipc_mqueue_send_block = 0; +mach_counter_t c_net_thread_block = 0; +mach_counter_t c_reaper_thread_block = 0; +mach_counter_t c_sched_thread_block = 0; +mach_counter_t c_stacks_current = 0; +mach_counter_t c_stacks_max = 0; +mach_counter_t c_stacks_min = 0; +mach_counter_t c_swtch_block = 0; +mach_counter_t c_swtch_pri_block = 0; +mach_counter_t c_thread_switch_block = 0; +mach_counter_t c_thread_switch_handoff = 0; +mach_counter_t c_vm_fault_page_block_backoff_kernel = 0; +mach_counter_t c_vm_fault_page_block_busy_kernel = 0; +mach_counter_t c_vm_fault_retry_on_w_prot; +mach_counter_t c_vm_fault_wait_on_unlock; +mach_counter_t c_vm_map_simplified_lower = 0; +mach_counter_t c_vm_map_simplified_upper = 0; +mach_counter_t c_vm_map_simplify_called = 0; +mach_counter_t c_vm_page_wait_block = 0; +mach_counter_t c_vm_pageout_block = 0; +mach_counter_t c_vm_pageout_scan_block = 0; +mach_counter_t c_vm_fault_retry_on_w_prot = 0; +mach_counter_t c_vm_fault_wait_on_unlock = 0; +#endif /* MACH_COUNTERS */ diff --git a/osfmk/kern/counters.h b/osfmk/kern/counters.h new file mode 100644 index 000000000..7921e0172 --- /dev/null +++ b/osfmk/kern/counters.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:35 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:54 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.13.7 1995/02/24 15:19:14 alanl + * Merge with DIPC2_SHARED. + * [1995/02/22 20:19:55 alanl] + * + * Revision 1.1.19.4 1994/11/04 10:16:23 dwm + * mk6 CR668 - 1.3b26 merge + * add counters, then remove unused items + * [1994/11/04 09:45:39 dwm] + * + * Revision 1.1.13.5 1994/09/23 02:16:08 ezf + * change marker to not FREE + * [1994/09/22 21:32:13 ezf] + * + * Revision 1.1.13.4 1994/09/16 06:29:25 dwm + * mk6 CR551 - remove unused SAFE_VM_FAULT pseudo-continuation, + * remove unused args from vm_page_wait, vm_fault(_page). + * Fix vm_page_wait counters, and rm thread_handoff counter. + * [1994/09/16 06:23:26 dwm] + * + * Revision 1.1.13.3 1994/09/10 21:45:55 bolinger + * Merge up to NMK17.3 + * [1994/09/08 19:57:29 bolinger] + * + * Revision 1.1.13.2 1994/06/21 17:28:43 dlb + * Add two vm_fault counters from NMK17. + * [94/06/17 dlb] + * + * Revision 1.1.10.3 1994/06/15 09:12:05 paire + * Corrected spelling of c_vm_fault_wait_on_unlock variable. + * [94/06/15 paire] + * + * Revision 1.1.13.1 1994/06/14 17:00:01 bolinger + * Merge up to NMK17.2. + * [1994/06/14 16:53:41 bolinger] + * + * Revision 1.1.10.2 1994/05/30 07:37:03 bernadat + * Added new c_vm_fault_retry_on_unlock and c_vm_fault_retry_on_w_prot. + * Sorted the whole list of counters. + * [paire@gr.osf.org] + * [94/05/26 bernadat] + * + * Revision 1.1.10.1 1994/02/11 14:25:21 paire + * Added missing c_exception_raise_state_block and + * c_exception_raise_state_identity_block counters. + * Change from NMK16.1 [93/08/09 paire] + * [94/02/04 paire] + * + * Revision 1.1.2.3 1993/06/07 22:12:36 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:04:11 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:35:54 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:12:09 jeffc] + * + * Revision 1.1 1992/09/30 02:29:32 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 16:40:30 mrt + * Correcting copyright + * + * Revision 2.2 91/03/16 15:16:06 rpd + * Created. + * [91/03/13 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_COUNTERS_ +#define _KERN_COUNTERS_ + +#include + +/* + * We can count various interesting events and paths. + * + * Use counter() to change the counters, eg: + * counter(c_idle_thread_block++); + * Use counter_always() for non-conditional counters. + */ + +#define counter_always(code) code + +#if MACH_COUNTERS + +#define counter(code) counter_always(code) + +#else /* MACH_COUNTERS */ + +#define counter(code) + +#endif /* MACH_COUNTERS */ + +/* + * We define the counters with individual integers, + * instead of a big structure, so that ddb + * will know the addresses of the counters. + */ + +typedef unsigned int mach_counter_t; + +extern mach_counter_t c_thread_invoke_csw; +extern mach_counter_t c_thread_invoke_same; +extern mach_counter_t c_thread_invoke_same_cont; +extern mach_counter_t c_thread_invoke_misses; +extern mach_counter_t c_thread_invoke_hits; +extern mach_counter_t c_incoming_interrupts; +extern mach_counter_t c_syscalls_unix; +extern mach_counter_t c_syscalls_mach; + +#if MACH_COUNTERS +extern mach_counter_t c_action_thread_block; +extern mach_counter_t c_ast_taken_block; +extern mach_counter_t c_clock_ticks; +extern mach_counter_t c_dev_io_blocks; +extern mach_counter_t c_dev_io_tries; +extern mach_counter_t c_idle_thread_block; +extern mach_counter_t c_idle_thread_handoff; +extern mach_counter_t c_io_done_thread_block; +extern mach_counter_t c_ipc_mqueue_receive_block_kernel; +extern mach_counter_t c_ipc_mqueue_receive_block_user; +extern mach_counter_t c_ipc_mqueue_send_block; +extern mach_counter_t c_net_thread_block; +extern mach_counter_t c_reaper_thread_block; +extern mach_counter_t c_sched_thread_block; +extern mach_counter_t c_stacks_current; +extern mach_counter_t c_stacks_max; +extern mach_counter_t c_stacks_min; +extern mach_counter_t c_swtch_block; +extern mach_counter_t c_swtch_pri_block; +extern mach_counter_t c_thread_switch_block; +extern mach_counter_t c_thread_switch_handoff; +extern mach_counter_t c_vm_fault_page_block_backoff_kernel; +extern mach_counter_t c_vm_fault_page_block_busy_kernel; +extern mach_counter_t c_vm_fault_retry_on_w_prot; +extern mach_counter_t c_vm_fault_wait_on_unlock; +extern mach_counter_t c_vm_map_simplified_lower; +extern mach_counter_t c_vm_map_simplified_upper; +extern mach_counter_t c_vm_map_simplify_called; +extern mach_counter_t c_vm_page_wait_block; +extern mach_counter_t c_vm_pageout_block; +extern mach_counter_t c_vm_pageout_scan_block; +extern mach_counter_t c_vm_fault_retry_on_w_prot; +extern mach_counter_t c_vm_fault_wait_on_unlock; +#endif /* MACH_COUNTERS */ + +#endif /* _KERN_COUNTERS_ */ + diff --git a/osfmk/kern/cpu_data.c b/osfmk/kern/cpu_data.c new file mode 100644 index 000000000..9051e7b27 --- /dev/null +++ b/osfmk/kern/cpu_data.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include +#include + +#include +#include +#include +#include + +int master_cpu = 0; + +#ifdef PPC + +cpu_data_t cpu_data[NCPUS] = + { { THREAD_NULL, /* active_thread */ + 0, /* preemption_level */ + 0, /* simple_lock_cout */ + 0 /* interrupt_level */ + }, }; + +#else /* PPC */ + +cpu_data_t cpu_data[NCPUS]; + +#endif /* PPC */ diff --git a/osfmk/kern/cpu_data.h b/osfmk/kern/cpu_data.h new file mode 100644 index 000000000..7ff974ca3 --- /dev/null +++ b/osfmk/kern/cpu_data.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _CPU_DATA_H_ +#define _CPU_DATA_H_ + +#ifdef MACH_KERNEL_PRIVATE +#include +#include + +typedef struct +{ + thread_t active_thread; + int preemption_level; + int simple_lock_count; + int interrupt_level; +#ifdef __I386__ + int cpu_number; /* Logical CPU number */ + int cpu_phys_number; /* Physical CPU Number */ +#endif +} cpu_data_t; + +extern cpu_data_t cpu_data[NCPUS]; + +#include + +#else /* !MACH_KERNEL_PRIVATE */ + +extern thread_t current_thread(void); +#define get_preemption_level() _get_preeption_level() +#define get_simple_lock_count() _get_simple_lock_count() +#define disable_preemption() _disable_preemption() +#define enable_preemption() _enable_preemption() +#define enable_preemption_no_check() _enable_preemption_no_check() +#define mp_disable_preemption() _mp_disable_preemption() +#define mp_enable_preemption() _mp_enable_preemption() +#define mp_enable_preemption_no_check() _mp_enable_preemption_no_check() + +#endif /* !MACH_KERNEL_PRIVATE */ + +#endif /* _CPU_DATA_H_ */ diff --git a/osfmk/kern/cpu_number.h b/osfmk/kern/cpu_number.h new file mode 100644 index 000000000..afcc656f7 --- /dev/null +++ b/osfmk/kern/cpu_number.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_CPU_NUMBER_H_ +#define _KERN_CPU_NUMBER_H_ + +#include + +/* + * Definitions for cpu identification in multi-processors. + */ + +extern int master_cpu; /* 'master' processor - keeps time */ + +#endif /* _KERN_CPU_NUMBER_H_ */ diff --git a/osfmk/kern/debug.c b/osfmk/kern/debug.c new file mode 100644 index 000000000..b0e7f043d --- /dev/null +++ b/osfmk/kern/debug.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned int halt_in_debugger = 0; +unsigned int switch_debugger = 0; +unsigned int current_debugger = 0; +unsigned int active_debugger = 0; +unsigned int debug_mode=0; +unsigned int disableDebugOuput = TRUE; +unsigned int systemLogDiags = FALSE; + +int mach_assert = 1; + +const char *panicstr; +decl_simple_lock_data(,panic_lock) +int paniccpu; +volatile int panicwait; +volatile int nestedpanic= 0; +unsigned int panic_is_inited = 0; +unsigned int return_on_panic = 0; +wait_queue_t save_waits[NCPUS]; + +void +Assert( + const char *file, + int line, + const char *expression) +{ + if (!mach_assert) { + return; + } + panic("{%d} Assertion failed: file \"%s\", line %d: %s\n", + cpu_number(), file, line, expression); +} + +/* + * Carefully use the panic_lock. There's always a chance that + * somehow we'll call panic before getting to initialize the + * panic_lock -- in this case, we'll assume that the world is + * in uniprocessor mode and just avoid using the panic lock. + */ +#define PANIC_LOCK() \ +MACRO_BEGIN \ + if (panic_is_inited) \ + simple_lock(&panic_lock); \ +MACRO_END + +#define PANIC_UNLOCK() \ +MACRO_BEGIN \ + if (panic_is_inited) \ + simple_unlock(&panic_lock); \ +MACRO_END + + +void +panic_init(void) +{ + simple_lock_init(&panic_lock, ETAP_NO_TRACE); + panic_is_inited = 1; +} + +void +panic(const char *str, ...) +{ + va_list listp; + spl_t s; + thread_t thread; + + s = splhigh(); + + thread = current_thread(); /* Get failing thread */ + save_waits[cpu_number()] = thread->wait_queue; /* Save the old value */ + thread->wait_queue = 0; /* Clear the wait so we do not get double panics when we try locks */ + + mp_disable_preemption(); + disableDebugOuput = FALSE; + debug_mode = TRUE; +restart: + PANIC_LOCK(); + if (panicstr) { + if (cpu_number() != paniccpu) { + PANIC_UNLOCK(); + /* + * Wait until message has been printed to identify correct + * cpu that made the first panic. + */ + while (panicwait) + continue; + goto restart; + } else { + nestedpanic +=1; + PANIC_UNLOCK(); + Debugger("double panic"); + mp_enable_preemption(); + splx(s); + printf("double panic: We are hanging here...\n"); + while(1); + /* NOTREACHED */ + } + } + panicstr = str; + paniccpu = cpu_number(); + panicwait = 1; + + PANIC_UNLOCK(); + printf("panic(cpu %d): ", (unsigned) paniccpu); + va_start(listp, str); + _doprnt(str, &listp, cnputc, 0); + va_end(listp); + printf("\n"); + + /* + * Release panicwait indicator so that other cpus may call Debugger(). + */ + panicwait = 0; + Debugger("panic"); + /* + * Release panicstr so that we can handle normally other panics. + */ + PANIC_LOCK(); + panicstr = (char *)0; + PANIC_UNLOCK(); + mp_enable_preemption(); + splx(s); + thread->wait_queue = save_waits[cpu_number()]; /* Restore the wait queue */ + if (return_on_panic) + return; + printf("panic: We are hanging here...\n"); + while(1); + /* NOTREACHED */ +} + +void +log(int level, char *fmt, ...) +{ + va_list listp; + extern void conslog_putc(char); + +#ifdef lint + level++; +#endif /* lint */ +#ifdef MACH_BSD + disable_preemption(); + va_start(listp, fmt); + _doprnt(fmt, &listp, conslog_putc, 0); + va_end(listp); + enable_preemption(); +#endif +} diff --git a/osfmk/kern/debug.h b/osfmk/kern/debug.h new file mode 100644 index 000000000..e5c63e642 --- /dev/null +++ b/osfmk/kern/debug.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _KERN_DEBUG_H_ +#define _KERN_DEBUG_H_ + + +extern unsigned int systemLogDiags; + +#ifdef MACH_KERNEL_PRIVATE + +extern unsigned int halt_in_debugger; + +extern unsigned int switch_debugger; + +extern unsigned int current_debugger; +#define NO_CUR_DB 0x0 +#define KDP_CUR_DB 0x1 +#define KDB_CUR_DB 0x2 + +extern unsigned int active_debugger; +extern unsigned int debug_mode; +extern unsigned int disableDebugOuput; + +extern int db_run_mode; + +/* modes the system may be running in */ + +#define STEP_NONE 0 +#define STEP_ONCE 1 +#define STEP_RETURN 2 +#define STEP_CALLT 3 +#define STEP_CONTINUE 4 +#define STEP_INVISIBLE 5 +#define STEP_COUNT 6 +#define STEP_TRACE 7 /* Show all calls to functions and returns */ + +extern char *panicstr; + +extern unsigned int nestedpanic; + +#endif /* MACH_KERNEL_PRIVATE */ + +#define DB_HALT 0x1 +#define DB_PRT 0x2 +#define DB_NMI 0x4 +#define DB_KPRT 0x8 +#define DB_KDB 0x10 +#define DB_SLOG 0x20 + +#endif /* _KERN_DEBUG_H_ */ diff --git a/osfmk/kern/etap.c b/osfmk/kern/etap.c new file mode 100644 index 000000000..d1f5ad913 --- /dev/null +++ b/osfmk/kern/etap.c @@ -0,0 +1,1866 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: etap.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for kernel_map, ipc_kernel_map */ +#if ETAP_MONITOR +#include +#include +#include +#include +#include +#include +#include +#include +/*#include */ +#include +#include +#include +#include +#include +#include +#include +#endif +#if MACH_KDB +#include +#include +#include +#if 0 /* WHY?? */ +#include +#endif +#endif + +/* + * Forwards + */ + +kern_return_t +etap_get_info(host_priv_t, int*, int*, vm_offset_t*, vm_offset_t*, + int*, int*, int*, int*); + +kern_return_t +etap_mon_reconfig(host_priv_t, int); + +kern_return_t +etap_new_probe(host_priv_t, vm_address_t, vm_size_t, boolean_t, vm_address_t); + +kern_return_t +etap_trace_thread(thread_act_t, boolean_t); + +void +etap_trace_reset(int); + +void +etap_interrupt_probe(int, int); + +void +etap_machcall_probe1(int); + +void +etap_machcall_probe2(void); + +void +etap_print(void); + + +#if ETAP + +#ifndef max +#define max(x,y) ((x > y) ? x : y) +#endif /* max */ + +event_table_t +etap_event_table_find(etap_event_t); + +/* ======================= + * ETAP Lock definitions + * ======================= + */ + +#if ETAP_LOCK_TRACE +#define etap_lock simple_lock_no_trace +#define etap_unlock simple_unlock_no_trace +#else /* ETAP_LOCK_TRACE */ +#define etap_lock simple_lock +#define etap_unlock simple_unlock +#endif /* ETAP_LOCK_TRACE */ + +#define event_table_lock() etap_lock(&event_table_lock) +#define event_table_unlock() etap_unlock(&event_table_lock) + +#define cumulative_buffer_lock(s) \ +MACRO_BEGIN \ + s = splhigh(); \ + etap_lock(&cbuff_lock); \ +MACRO_END + +#define cumulative_buffer_unlock(s) \ +MACRO_BEGIN \ + etap_unlock(&cbuff_lock); \ + splx(s); \ +MACRO_END + + +#if ETAP_LOCK_ACCUMULATE + +/* ======================================== + * ETAP Cumulative lock trace definitions + * ======================================== + */ + +int cbuff_width = ETAP_CBUFF_WIDTH; + +/* + * Cumulative buffer declaration + * + * For both protection and mapping purposes, the cumulative + * buffer must be aligned on a page boundary. Since the cumulative + * buffer must be statically defined, page boundary alignment is not + * garenteed. Instead, the buffer is allocated with 2 extra pages. + * The cumulative buffer pointer will round up to the nearest page. + * + * This will garentee page boundary alignment. + */ + +#define TWO_PAGES 16384 /* XXX does this apply ??*/ +#define CBUFF_ALLOCATED_SIZE sizeof(struct cumulative_buffer)+TWO_PAGES + +decl_simple_lock_data (,cbuff_lock) +#if MACH_LDEBUG +simple_lock_t cbuff_locks; +#else +simple_lock_data_t cbuff_locks; +#endif +char cbuff_allocated [CBUFF_ALLOCATED_SIZE]; +cumulative_buffer_t cbuff = {0}; + +#endif /* ETAP_LOCK_ACCUMULATE */ + +#if ETAP_MONITOR + +int mbuff_entries = ETAP_MBUFF_ENTRIES; + +/* + * Create an array of pointers to monitor buffers. + * The buffers themselves are allocated at run-time. + */ + +struct monitor_buffer *mbuff[NCPUS]; +#endif /* ETAP_MONITOR */ + +/* ========================== + * Event table declarations + * ========================== + */ + +decl_simple_lock_data(,event_table_lock) + +const struct event_table_entry event_table_init[] = +{ + + /*-----------------------------------------------------------------------* + * ETAP EVENT TRACE STATUS TEXT NAME DYNAMIC * + *-----------------------------------------------------------------------*/ + +#if ETAP_EVENT_MONITOR + {ETAP_P_USER_EVENT0 , ETAP_TRACE_OFF , "p_user_event0" , STATIC}, + {ETAP_P_USER_EVENT1 , ETAP_TRACE_OFF , "p_user_event1" , STATIC}, + {ETAP_P_USER_EVENT2 , ETAP_TRACE_OFF , "p_user_event2" , STATIC}, + {ETAP_P_USER_EVENT3 , ETAP_TRACE_OFF , "p_user_event3" , STATIC}, + {ETAP_P_USER_EVENT4 , ETAP_TRACE_OFF , "p_user_event4" , STATIC}, + {ETAP_P_USER_EVENT5 , ETAP_TRACE_OFF , "p_user_event5" , STATIC}, + {ETAP_P_USER_EVENT6 , ETAP_TRACE_OFF , "p_user_event6" , STATIC}, + {ETAP_P_USER_EVENT7 , ETAP_TRACE_OFF , "p_user_event7" , STATIC}, + {ETAP_P_USER_EVENT8 , ETAP_TRACE_OFF , "p_user_event8" , STATIC}, + {ETAP_P_USER_EVENT9 , ETAP_TRACE_OFF , "p_user_event9" , STATIC}, + {ETAP_P_USER_EVENT10 , ETAP_TRACE_OFF , "p_user_event10" , STATIC}, + {ETAP_P_USER_EVENT11 , ETAP_TRACE_OFF , "p_user_event11" , STATIC}, + {ETAP_P_USER_EVENT12 , ETAP_TRACE_OFF , "p_user_event12" , STATIC}, + {ETAP_P_USER_EVENT13 , ETAP_TRACE_OFF , "p_user_event13" , STATIC}, + {ETAP_P_USER_EVENT14 , ETAP_TRACE_OFF , "p_user_event14" , STATIC}, + {ETAP_P_USER_EVENT15 , ETAP_TRACE_OFF , "p_user_event15" , STATIC}, + {ETAP_P_USER_EVENT16 , ETAP_TRACE_OFF , "p_user_event16" , STATIC}, + {ETAP_P_USER_EVENT17 , ETAP_TRACE_OFF , "p_user_event17" , STATIC}, + {ETAP_P_USER_EVENT18 , ETAP_TRACE_OFF , "p_user_event18" , STATIC}, + {ETAP_P_USER_EVENT19 , ETAP_TRACE_OFF , "p_user_event19" , STATIC}, + {ETAP_P_USER_EVENT20 , ETAP_TRACE_OFF , "p_user_event20" , STATIC}, + {ETAP_P_USER_EVENT21 , ETAP_TRACE_OFF , "p_user_event21" , STATIC}, + {ETAP_P_USER_EVENT22 , ETAP_TRACE_OFF , "p_user_event22" , STATIC}, + {ETAP_P_USER_EVENT23 , ETAP_TRACE_OFF , "p_user_event23" , STATIC}, + {ETAP_P_USER_EVENT24 , ETAP_TRACE_OFF , "p_user_event24" , STATIC}, + {ETAP_P_USER_EVENT25 , ETAP_TRACE_OFF , "p_user_event25" , STATIC}, + {ETAP_P_USER_EVENT26 , ETAP_TRACE_OFF , "p_user_event26" , STATIC}, + {ETAP_P_USER_EVENT27 , ETAP_TRACE_OFF , "p_user_event27" , STATIC}, + {ETAP_P_USER_EVENT28 , ETAP_TRACE_OFF , "p_user_event28" , STATIC}, + {ETAP_P_USER_EVENT29 , ETAP_TRACE_OFF , "p_user_event29" , STATIC}, + {ETAP_P_USER_EVENT30 , ETAP_TRACE_OFF , "p_user_event30" , STATIC}, + {ETAP_P_USER_EVENT31 , ETAP_TRACE_OFF , "p_user_event31" , STATIC}, + {ETAP_P_SYSCALL_MACH , ETAP_TRACE_OFF , "p_syscall_mach" , STATIC}, + {ETAP_P_SYSCALL_UNIX , ETAP_TRACE_OFF , "p_syscall_unix" , STATIC}, + {ETAP_P_THREAD_LIFE , ETAP_TRACE_OFF , "p_thread_life" , STATIC}, + {ETAP_P_THREAD_CTX , ETAP_TRACE_OFF , "p_thread_ctx" , STATIC}, + {ETAP_P_RPC , ETAP_TRACE_OFF , "p_rpc" , STATIC}, + {ETAP_P_INTERRUPT , ETAP_TRACE_OFF , "p_interrupt" , STATIC}, + {ETAP_P_ACT_ABORT , ETAP_TRACE_OFF , "p_act_abort" , STATIC}, + {ETAP_P_PRIORITY , ETAP_TRACE_OFF , "p_priority" , STATIC}, + {ETAP_P_EXCEPTION , ETAP_TRACE_OFF , "p_exception" , STATIC}, + {ETAP_P_DEPRESSION , ETAP_TRACE_OFF , "p_depression" , STATIC}, + {ETAP_P_MISC , ETAP_TRACE_OFF , "p_misc" , STATIC}, + {ETAP_P_DETAP , ETAP_TRACE_OFF , "p_detap" , STATIC}, +#endif /* ETAP_EVENT_MONITOR */ + +#if ETAP_LOCK_TRACE + {ETAP_VM_BUCKET , ETAP_TRACE_OFF , "vm_bucket" , STATIC},/**/ + {ETAP_VM_HIMEM , ETAP_TRACE_OFF , "vm_himem" , STATIC}, + {ETAP_VM_MAP , ETAP_TRACE_OFF , "vm_map" , 1}, + {ETAP_VM_MAP_I , ETAP_TRACE_OFF , "vm_map_i" , 2}, + {ETAP_VM_MEMMAN , ETAP_TRACE_OFF , "vm_memman" , STATIC},/**/ + {ETAP_VM_MSYNC , ETAP_TRACE_OFF , "vm_msync" , 3}, + {ETAP_VM_OBJ , ETAP_TRACE_OFF , "vm_obj" , 4}, + {ETAP_VM_OBJ_CACHE , ETAP_TRACE_OFF , "vm_obj_cache" , 5}, + {ETAP_VM_PAGE_ALLOC , ETAP_TRACE_OFF , "vm_page_alloc" , STATIC},/**/ + {ETAP_VM_PAGEOUT , ETAP_TRACE_OFF , "vm_pageout" , STATIC}, + {ETAP_VM_PAGEQ , ETAP_TRACE_OFF , "vm_pageq" , STATIC}, + {ETAP_VM_PAGEQ_FREE , ETAP_TRACE_OFF , "vm_pageq_free" , STATIC}, + {ETAP_VM_PMAP , ETAP_TRACE_OFF , "vm_pmap" , 6}, + {ETAP_VM_PMAP_CACHE , ETAP_TRACE_OFF , "vm_pmap_cache" , STATIC}, + {ETAP_VM_PMAP_FREE , ETAP_TRACE_OFF , "vm_pmap_free" , STATIC}, + {ETAP_VM_PMAP_KERNEL , ETAP_TRACE_OFF , "vm_pmap_kern" , STATIC}, + {ETAP_VM_PMAP_SYS , ETAP_TRACE_OFF , "vm_pmap_sys" , 7}, + {ETAP_VM_PMAP_SYS_I , ETAP_TRACE_OFF , "vm_pmap_sys_i" , 8}, + {ETAP_VM_PMAP_UPDATE , ETAP_TRACE_OFF , "vm_pmap_update" , STATIC}, + {ETAP_VM_PREPPIN , ETAP_TRACE_OFF , "vm_preppin" , STATIC}, + {ETAP_VM_RESULT , ETAP_TRACE_OFF , "vm_result" , 9}, + {ETAP_VM_TEST , ETAP_TRACE_OFF , "vm_tes" , STATIC},/**/ + {ETAP_VM_PMAP_PHYSENTRIES, ETAP_TRACE_OFF , "vm_pmap_physentries", STATIC}, + {ETAP_VM_PMAP_SID , ETAP_TRACE_OFF , "vm_pmap_sid" , STATIC}, + {ETAP_VM_PMAP_PTE , ETAP_TRACE_OFF , "vm_pmap_pte" , STATIC}, + {ETAP_VM_PMAP_PTE_OVFLW , ETAP_TRACE_OFF , "vm_pmap_pte_ovflw", STATIC}, + {ETAP_VM_PMAP_TLB , ETAP_TRACE_OFF , "vm_pmap_tlb" , STATIC}, + + {ETAP_IPC_IHGB , ETAP_TRACE_OFF , "ipc_ihgb" , 10},/**/ + {ETAP_IPC_IS , ETAP_TRACE_OFF , "ipc_is" , 11},/**/ + {ETAP_IPC_IS_REF , ETAP_TRACE_OFF , "ipc_is_ref" , 12},/**/ + {ETAP_IPC_MQUEUE , ETAP_TRACE_OFF , "ipc_mqueue" , STATIC},/**/ + {ETAP_IPC_OBJECT , ETAP_TRACE_OFF , "ipc_object" , STATIC},/**/ + {ETAP_IPC_PORT_MULT , ETAP_TRACE_OFF , "ipc_port_mult" , 13},/**/ + {ETAP_IPC_PORT_TIME , ETAP_TRACE_OFF , "ipc_port_time" , 14},/**/ + {ETAP_IPC_RPC , ETAP_TRACE_OFF , "ipc_rpc" , 15},/**/ + {ETAP_IPC_PORT_ALLOCQ , ETAP_TRACE_OFF , "ipc_port_allocq" , STATIC},/**/ + + {ETAP_IO_AHA , ETAP_TRACE_OFF , "io_aha" , STATIC}, + {ETAP_IO_CHIP , ETAP_TRACE_OFF , "io_chip" , STATIC}, + {ETAP_IO_DEV , ETAP_TRACE_OFF , "io_dev" , 16},/**/ + {ETAP_IO_DEV_NUM , ETAP_TRACE_OFF , "io_dev_num" , STATIC}, + {ETAP_IO_DEV_PAGEH , ETAP_TRACE_OFF , "io_dev_pageh" , STATIC},/**/ + {ETAP_IO_DEV_PAGER , ETAP_TRACE_OFF , "io_dev_pager" , STATIC},/**/ + {ETAP_IO_DEV_PORT , ETAP_TRACE_OFF , "io_dev_port" , STATIC},/**/ + {ETAP_IO_DEV_REF , ETAP_TRACE_OFF , "io_dev_new" , 17},/**/ + {ETAP_IO_DEVINS , ETAP_TRACE_OFF , "io_devins" , STATIC}, + {ETAP_IO_DONE_LIST , ETAP_TRACE_OFF , "io_done_list" , STATIC}, + {ETAP_IO_DONE_Q , ETAP_TRACE_OFF , "io_doneq" , 18}, + {ETAP_IO_DONE_REF , ETAP_TRACE_OFF , "io_done_ref" , 19}, + {ETAP_IO_EAHA , ETAP_TRACE_OFF , "io_eaha" , STATIC}, + {ETAP_IO_HD_PROBE , ETAP_TRACE_OFF , "io_hd_probe" , STATIC}, + {ETAP_IO_IOPB , ETAP_TRACE_OFF , "io_iopb" , STATIC}, + {ETAP_IO_KDQ , ETAP_TRACE_OFF , "io_kdq" , STATIC}, + {ETAP_IO_KDTTY , ETAP_TRACE_OFF , "io_kdtty" , STATIC}, + {ETAP_IO_REQ , ETAP_TRACE_OFF , "io_req" , 20}, + {ETAP_IO_TARGET , ETAP_TRACE_OFF , "io_target" , STATIC}, + {ETAP_IO_TTY , ETAP_TRACE_OFF , "io_tty" , STATIC}, + {ETAP_IO_IOP_LOCK , ETAP_TRACE_OFF , "io_iop" , STATIC},/**/ + {ETAP_IO_DEV_NAME , ETAP_TRACE_OFF , "io_dev_name" , STATIC},/**/ + {ETAP_IO_CDLI , ETAP_TRACE_OFF , "io_cdli" , STATIC},/**/ + {ETAP_IO_HIPPI_FILTER , ETAP_TRACE_OFF , "io_hippi_filter" , STATIC},/**/ + {ETAP_IO_HIPPI_SRC , ETAP_TRACE_OFF , "io_hippi_src" , STATIC},/**/ + {ETAP_IO_HIPPI_DST , ETAP_TRACE_OFF , "io_hippi_dst" , STATIC},/**/ + {ETAP_IO_HIPPI_PKT , ETAP_TRACE_OFF , "io_hippi_pkt" , STATIC},/**/ + {ETAP_IO_NOTIFY , ETAP_TRACE_OFF , "io_notify" , STATIC},/**/ + {ETAP_IO_DATADEV , ETAP_TRACE_OFF , "io_data_device" , STATIC},/**/ + {ETAP_IO_OPEN , ETAP_TRACE_OFF , "io_open" , STATIC}, + {ETAP_IO_OPEN_I , ETAP_TRACE_OFF , "io_open_i" , STATIC}, + + {ETAP_THREAD_ACT , ETAP_TRACE_OFF , "th_act" , 21}, + {ETAP_THREAD_ACTION , ETAP_TRACE_OFF , "th_action" , STATIC}, + {ETAP_THREAD_LOCK , ETAP_TRACE_OFF , "th_lock" , 22}, + {ETAP_THREAD_LOCK_SET , ETAP_TRACE_OFF , "th_lock_set" , 23}, + {ETAP_THREAD_NEW , ETAP_TRACE_OFF , "th_new" , 24}, + {ETAP_THREAD_PSET , ETAP_TRACE_OFF , "th_pset" , STATIC},/**/ + {ETAP_THREAD_PSET_ALL , ETAP_TRACE_OFF , "th_pset_all" , STATIC}, + {ETAP_THREAD_PSET_RUNQ , ETAP_TRACE_OFF , "th_pset_runq" , STATIC}, + {ETAP_THREAD_PSET_IDLE , ETAP_TRACE_OFF , "th_pset_idle" , STATIC}, + {ETAP_THREAD_PSET_QUANT , ETAP_TRACE_OFF , "th_pset_quant" , STATIC}, + {ETAP_THREAD_PROC , ETAP_TRACE_OFF , "th_proc" , STATIC}, + {ETAP_THREAD_PROC_RUNQ , ETAP_TRACE_OFF , "th_proc_runq" , STATIC}, + {ETAP_THREAD_REAPER , ETAP_TRACE_OFF , "th_reaper" , STATIC}, + {ETAP_THREAD_RPC , ETAP_TRACE_OFF , "th_rpc" , 25}, + {ETAP_THREAD_SEMA , ETAP_TRACE_OFF , "th_sema" , 26}, + {ETAP_THREAD_STACK , ETAP_TRACE_OFF , "th_stack" , STATIC}, + {ETAP_THREAD_STACK_USAGE , ETAP_TRACE_OFF , "th_stack_usage" , STATIC}, + {ETAP_THREAD_TASK_NEW , ETAP_TRACE_OFF , "th_task_new" , 27}, + {ETAP_THREAD_TASK_ITK , ETAP_TRACE_OFF , "th_task_itk" , 28}, + {ETAP_THREAD_ULOCK , ETAP_TRACE_OFF , "th_ulock" , 29}, + {ETAP_THREAD_WAIT , ETAP_TRACE_OFF , "th_wait" , STATIC}, + {ETAP_THREAD_WAKE , ETAP_TRACE_OFF , "th_wake" , 30}, + {ETAP_THREAD_ACT_LIST , ETAP_TRACE_OFF , "th_act_list" , 31}, + {ETAP_THREAD_TASK_SWAP , ETAP_TRACE_OFF , "th_task_swap" , 32}, + {ETAP_THREAD_TASK_SWAPOUT, ETAP_TRACE_OFF , "th_task_swapout" , 33}, + {ETAP_THREAD_SWAPPER , ETAP_TRACE_OFF , "th_swapper" , STATIC}, + + {ETAP_NET_IFQ , ETAP_TRACE_OFF , "net_ifq" , STATIC}, + {ETAP_NET_KMSG , ETAP_TRACE_OFF , "net_kmsg" , STATIC}, + {ETAP_NET_MBUF , ETAP_TRACE_OFF , "net_mbuf" , STATIC},/**/ + {ETAP_NET_POOL , ETAP_TRACE_OFF , "net_pool" , STATIC}, + {ETAP_NET_Q , ETAP_TRACE_OFF , "net_q" , STATIC}, + {ETAP_NET_QFREE , ETAP_TRACE_OFF , "net_qfree" , STATIC}, + {ETAP_NET_RCV , ETAP_TRACE_OFF , "net_rcv" , STATIC}, + {ETAP_NET_RCV_PLIST , ETAP_TRACE_OFF , "net_rcv_plist" , STATIC},/**/ + {ETAP_NET_THREAD , ETAP_TRACE_OFF , "net_thread" , STATIC}, + + {ETAP_NORMA_XMM , ETAP_TRACE_OFF , "norma_xmm" , STATIC}, + {ETAP_NORMA_XMMOBJ , ETAP_TRACE_OFF , "norma_xmmobj" , STATIC}, + {ETAP_NORMA_XMMCACHE , ETAP_TRACE_OFF , "norma_xmmcache" , STATIC}, + {ETAP_NORMA_MP , ETAP_TRACE_OFF , "norma_mp" , STATIC}, + {ETAP_NORMA_VOR , ETAP_TRACE_OFF , "norma_vor" , STATIC},/**/ + {ETAP_NORMA_TASK , ETAP_TRACE_OFF , "norma_task" , 38},/**/ + + {ETAP_DIPC_CLEANUP , ETAP_TRACE_OFF , "dipc_cleanup" , STATIC},/**/ + {ETAP_DIPC_MSG_PROG , ETAP_TRACE_OFF , "dipc_msgp_prog" , STATIC},/**/ + {ETAP_DIPC_PREP_QUEUE , ETAP_TRACE_OFF , "dipc_prep_queue" , STATIC},/**/ + {ETAP_DIPC_PREP_FILL , ETAP_TRACE_OFF , "dipc_prep_fill" , STATIC},/**/ + {ETAP_DIPC_MIGRATE , ETAP_TRACE_OFF , "dipc_migrate" , STATIC},/**/ + {ETAP_DIPC_DELIVER , ETAP_TRACE_OFF , "dipc_deliver" , STATIC},/**/ + {ETAP_DIPC_RECV_SYNC , ETAP_TRACE_OFF , "dipc_recv_sync" , STATIC},/**/ + {ETAP_DIPC_RPC , ETAP_TRACE_OFF , "dipc_rpc" , STATIC},/**/ + {ETAP_DIPC_MSG_REQ , ETAP_TRACE_OFF , "dipc_msg_req" , STATIC},/**/ + {ETAP_DIPC_MSG_ORDER , ETAP_TRACE_OFF , "dipc_msg_order" , STATIC},/**/ + {ETAP_DIPC_MSG_PREPQ , ETAP_TRACE_OFF , "dipc_msg_prepq" , STATIC},/**/ + {ETAP_DIPC_MSG_FREE , ETAP_TRACE_OFF , "dipc_msg_free" , STATIC},/**/ + {ETAP_DIPC_KMSG_AST , ETAP_TRACE_OFF , "dipc_kmsg_ast" , STATIC},/**/ + {ETAP_DIPC_TEST_LOCK , ETAP_TRACE_OFF , "dipc_test_lock" , STATIC},/**/ + {ETAP_DIPC_SPINLOCK , ETAP_TRACE_OFF , "dipc_spinlock" , STATIC},/**/ + {ETAP_DIPC_TRACE , ETAP_TRACE_OFF , "dipc_trace" , STATIC},/**/ + {ETAP_DIPC_REQ_CALLBACK , ETAP_TRACE_OFF , "dipc_req_clbck" , STATIC},/**/ + {ETAP_DIPC_PORT_NAME , ETAP_TRACE_OFF , "dipc_port_name" , STATIC},/**/ + {ETAP_DIPC_RESTART_PORT , ETAP_TRACE_OFF , "dipc_restart_port", STATIC},/**/ + {ETAP_DIPC_ZERO_PAGE , ETAP_TRACE_OFF , "dipc_zero_page" , STATIC},/**/ + {ETAP_DIPC_BLOCKED_NODE , ETAP_TRACE_OFF , "dipc_blocked_node", STATIC},/**/ + {ETAP_DIPC_TIMER , ETAP_TRACE_OFF , "dipc_timer" , STATIC},/**/ + {ETAP_DIPC_SPECIAL_PORT , ETAP_TRACE_OFF , "dipc_special_port", STATIC},/**/ + + {ETAP_KKT_TEST_WORK , ETAP_TRACE_OFF , "kkt_test_work" , STATIC},/**/ + {ETAP_KKT_TEST_MP , ETAP_TRACE_OFF , "kkt_work_mp" , STATIC},/**/ + {ETAP_KKT_NODE , ETAP_TRACE_OFF , "kkt_node" , STATIC},/**/ + {ETAP_KKT_CHANNEL_LIST , ETAP_TRACE_OFF , "kkt_channel_list" , STATIC},/**/ + {ETAP_KKT_CHANNEL , ETAP_TRACE_OFF , "kkt_channel" , STATIC},/**/ + {ETAP_KKT_HANDLE , ETAP_TRACE_OFF , "kkt_handle" , STATIC},/**/ + {ETAP_KKT_MAP , ETAP_TRACE_OFF , "kkt_map" , STATIC},/**/ + {ETAP_KKT_RESOURCE , ETAP_TRACE_OFF , "kkt_resource" , STATIC},/**/ + + {ETAP_XKERNEL_MASTER , ETAP_TRACE_OFF , "xkernel_master" , STATIC},/**/ + {ETAP_XKERNEL_EVENT , ETAP_TRACE_OFF , "xkernel_event" , STATIC},/**/ + {ETAP_XKERNEL_ETHINPUT , ETAP_TRACE_OFF , "xkernel_input" , STATIC},/**/ + + {ETAP_MISC_AST , ETAP_TRACE_OFF , "m_ast" , STATIC}, + {ETAP_MISC_CLOCK , ETAP_TRACE_OFF , "m_clock" , STATIC}, + {ETAP_MISC_EMULATE , ETAP_TRACE_OFF , "m_emulate" , 34}, + {ETAP_MISC_EVENT , ETAP_TRACE_OFF , "m_event" , STATIC}, + {ETAP_MISC_KDB , ETAP_TRACE_OFF , "m_kdb" , STATIC}, + {ETAP_MISC_PCB , ETAP_TRACE_OFF , "m_pcb" , 35}, + {ETAP_MISC_PRINTF , ETAP_TRACE_OFF , "m_printf" , STATIC}, + {ETAP_MISC_Q , ETAP_TRACE_OFF , "m_q" , STATIC}, + {ETAP_MISC_RPC_SUBSYS , ETAP_TRACE_OFF , "m_rpc_sub" , 36}, + {ETAP_MISC_RT_CLOCK , ETAP_TRACE_OFF , "m_rt_clock" , STATIC}, + {ETAP_MISC_SD_POOL , ETAP_TRACE_OFF , "m_sd_pool" , STATIC}, + {ETAP_MISC_TIMER , ETAP_TRACE_OFF , "m_timer" , STATIC}, + {ETAP_MISC_UTIME , ETAP_TRACE_OFF , "m_utime" , STATIC}, + {ETAP_MISC_XPR , ETAP_TRACE_OFF , "m_xpr" , STATIC}, + {ETAP_MISC_ZONE , ETAP_TRACE_OFF , "m_zone" , 37}, + {ETAP_MISC_ZONE_ALL , ETAP_TRACE_OFF , "m_zone_all" , STATIC}, + {ETAP_MISC_ZONE_GET , ETAP_TRACE_OFF , "m_zone_get" , STATIC}, + {ETAP_MISC_ZONE_PTABLE , ETAP_TRACE_OFF , "m_zone_ptable" , STATIC},/**/ + {ETAP_MISC_LEDGER , ETAP_TRACE_OFF , "m_ledger" , STATIC},/**/ + {ETAP_MISC_SCSIT_TGT , ETAP_TRACE_OFF , "m_scsit_tgt_lock" , STATIC},/**/ + {ETAP_MISC_SCSIT_SELF , ETAP_TRACE_OFF , "m_scsit_self_lock", STATIC},/**/ + {ETAP_MISC_SPL , ETAP_TRACE_OFF , "m_spl_lock" , STATIC},/**/ + {ETAP_MISC_MASTER , ETAP_TRACE_OFF , "m_master" , STATIC},/**/ + {ETAP_MISC_FLOAT , ETAP_TRACE_OFF , "m_float" , STATIC},/**/ + {ETAP_MISC_GROUP , ETAP_TRACE_OFF , "m_group" , STATIC},/**/ + {ETAP_MISC_FLIPC , ETAP_TRACE_OFF , "m_flipc" , STATIC},/**/ + {ETAP_MISC_MP_IO , ETAP_TRACE_OFF , "m_mp_io" , STATIC},/**/ + {ETAP_MISC_KERNEL_TEST , ETAP_TRACE_OFF , "m_kernel_test" , STATIC},/**/ + + {ETAP_NO_TRACE , ETAP_TRACE_OFF , "NEVER_TRACE" , STATIC}, +#endif /* ETAP_LOCK_TRACE */ +}; + +/* + * Variable initially pointing to the event table, then to its mappable + * copy. The cast is needed to discard the `const' qualifier; without it + * gcc issues a warning. + */ +event_table_t event_table = (event_table_t) event_table_init; + +/* + * Linked list of pointers into event_table_init[] so they can be switched + * into the mappable copy when it is made. + */ +struct event_table_chain *event_table_chain; + +/* + * max number of event types in the event table + */ + +int event_table_max = sizeof(event_table_init)/sizeof(struct event_table_entry); + +const struct subs_table_entry subs_table_init[] = +{ + /*------------------------------------------* + * ETAP SUBSYSTEM TEXT NAME * + *------------------------------------------*/ + +#if ETAP_EVENT_MONITOR + {ETAP_SUBS_PROBE , "event_probes" }, +#endif /* ETAP_EVENT_MONITOR */ + +#if ETAP_LOCK_TRACE + {ETAP_SUBS_LOCK_DIPC , "lock_dipc" }, + {ETAP_SUBS_LOCK_IO , "lock_io" }, + {ETAP_SUBS_LOCK_IPC , "lock_ipc" }, + {ETAP_SUBS_LOCK_KKT , "lock_kkt" }, + {ETAP_SUBS_LOCK_MISC , "lock_misc" }, + {ETAP_SUBS_LOCK_NET , "lock_net" }, + {ETAP_SUBS_LOCK_NORMA , "lock_norma" }, + {ETAP_SUBS_LOCK_THREAD , "lock_thread" }, + {ETAP_SUBS_LOCK_VM , "lock_vm" }, + {ETAP_SUBS_LOCK_XKERNEL , "lock_xkernel" }, +#endif /* ETAP_LOCK_TRACE */ +}; + +/* + * Variable initially pointing to the subsystem table, then to its mappable + * copy. + */ +subs_table_t subs_table = (subs_table_t) subs_table_init; + +/* + * max number of subsystem types in the subsystem table + */ + +int subs_table_max = sizeof(subs_table_init)/sizeof(struct subs_table_entry); + +#if ETAP_MONITOR +#define MAX_NAME_SIZE 35 + +#define SYS_TABLE_MACH_TRAP 0 +#define SYS_TABLE_MACH_MESSAGE 1 +#define SYS_TABLE_UNIX_SYSCALL 2 +#define SYS_TABLE_INTERRUPT 3 +#define SYS_TABLE_EXCEPTION 4 + + +extern char *system_table_lookup (unsigned int table, + unsigned int number); + + +char *mach_trap_names[] = { +/* 0 */ "undefined", +/* 1 */ NULL, +/* 2 */ NULL, +/* 3 */ NULL, +/* 4 */ NULL, +/* 5 */ NULL, +/* 6 */ NULL, +/* 7 */ NULL, +/* 8 */ NULL, +/* 9 */ NULL, +/* 10 */ NULL, +/* 11 */ NULL, +/* 12 */ NULL, +/* 13 */ NULL, +/* 14 */ NULL, +/* 15 */ NULL, +/* 16 */ NULL, +/* 17 */ NULL, +/* 18 */ NULL, +/* 19 */ NULL, +/* 20 */ NULL, +/* 21 */ NULL, +/* 22 */ NULL, +/* 23 */ NULL, +/* 24 */ NULL, +/* 25 */ NULL, +/* 26 */ "mach_reply_port", +/* 27 */ "mach_thread_self", +/* 28 */ "mach_task_self", +/* 29 */ "mach_host_self", +/* 30 */ "vm_read_overwrite", +/* 31 */ "vm_write", +/* 32 */ "mach_msg_overwrite_trap", +/* 33 */ NULL, +/* 34 */ NULL, +#ifdef i386 +/* 35 */ "mach_rpc_trap", +/* 36 */ "mach_rpc_return_trap", +#else +/* 35 */ NULL, +/* 36 */ NULL, +#endif /* i386 */ +/* 37 */ NULL, +/* 38 */ NULL, +/* 39 */ NULL, +/* 40 */ NULL, +/* 41 */ "init_process", +/* 42 */ NULL, +/* 43 */ "map_fd", +/* 44 */ NULL, +/* 45 */ NULL, +/* 46 */ NULL, +/* 47 */ NULL, +/* 48 */ NULL, +/* 49 */ NULL, +/* 50 */ NULL, +/* 51 */ NULL, +/* 52 */ NULL, +/* 53 */ NULL, +/* 54 */ NULL, +/* 55 */ NULL, +/* 56 */ NULL, +/* 57 */ NULL, +/* 58 */ NULL, +/* 59 */ "swtch_pri", +/* 60 */ "swtch", +/* 61 */ "thread_switch", +/* 62 */ "clock_sleep_trap", +/* 63 */ NULL, +/* 64 */ NULL, +/* 65 */ NULL, +/* 66 */ NULL, +/* 67 */ NULL, +/* 68 */ NULL, +/* 69 */ NULL, +/* 70 */ NULL, +/* 71 */ NULL, +/* 72 */ NULL, +/* 73 */ NULL, +/* 74 */ NULL, +/* 75 */ NULL, +/* 76 */ NULL, +/* 77 */ NULL, +/* 78 */ NULL, +/* 79 */ NULL, +/* 80 */ NULL, +/* 81 */ NULL, +/* 82 */ NULL, +/* 83 */ NULL, +/* 84 */ NULL, +/* 85 */ NULL, +/* 86 */ NULL, +/* 87 */ NULL, +/* 88 */ NULL, +/* 89 */ NULL, +/* 90 */ NULL, +/* 91 */ NULL, +/* 92 */ NULL, +/* 93 */ NULL, +/* 94 */ NULL, +/* 95 */ NULL, +/* 96 */ NULL, +/* 97 */ NULL, +/* 98 */ NULL, +/* 99 */ NULL, +/* 100 */ NULL, +/* 101 */ NULL, +/* 102 */ NULL, +/* 103 */ NULL, +/* 104 */ NULL, +/* 105 */ NULL, +/* 106 */ NULL, +/* 107 */ NULL, +/* 108 */ NULL, +/* 109 */ NULL, +}; +#define N_MACH_TRAP_NAMES (sizeof mach_trap_names / sizeof mach_trap_names[0]) +#define mach_trap_name(nu) \ + (((nu) < N_MACH_TRAP_NAMES) ? mach_trap_names[nu] : NULL) + +struct table_entry { + char name[MAX_NAME_SIZE]; + u_int number; +}; + +/* + * Mach message table + * + * Note: Most mach system calls are actually implemented as messages. + */ +struct table_entry mach_message_table[] = { + subsystem_to_name_map_bootstrap, + subsystem_to_name_map_clock, + subsystem_to_name_map_clock_reply, + subsystem_to_name_map_default_pager_object, + subsystem_to_name_map_device, + subsystem_to_name_map_device_reply, + subsystem_to_name_map_device_request, + subsystem_to_name_map_exc, +/* subsystem_to_name_map_mach,*/ + subsystem_to_name_map_mach_debug, +/* subsystem_to_name_map_mach_host,*/ + subsystem_to_name_map_mach_norma, + subsystem_to_name_map_mach_port, + subsystem_to_name_map_memory_object, + subsystem_to_name_map_memory_object_default, + subsystem_to_name_map_notify, + subsystem_to_name_map_prof, + subsystem_to_name_map_sync +}; + +int mach_message_table_entries = sizeof(mach_message_table) / + sizeof(struct table_entry); + + +#endif + +/* + * ================================ + * Initialization routines for ETAP + * ================================ + */ + +/* + * ROUTINE: etap_init_phase1 [internal] + * + * FUNCTION: Event trace instrumentation initialization phase + * one of two. The static phase. The cumulative buffer + * is initialized. + * + * NOTES: The cumulative buffer is statically allocated and + * must be initialized before the first simple_lock_init() + * or lock_init() call is made. + * + * The first lock init call is made before dynamic allocation + * is available. Hence, phase one is executed before dynamic + * memory allocation is available. + * + */ + +void +etap_init_phase1(void) +{ +#if ETAP_LOCK_ACCUMULATE || MACH_ASSERT + int x; +#if MACH_ASSERT + boolean_t out_of_order; +#endif /* MACH_ASSERT */ +#endif /* ETAP_LOCK_ACCUMULATE || MACH_ASSERT */ + +#if ETAP_LOCK_ACCUMULATE + /* + * Initialize Cumulative Buffer + * + * Note: The cumulative buffer is statically allocated. + * This static allocation is necessary since most + * of the lock_init calls are made before dynamic + * allocation routines are available. + */ + + /* + * Align cumulative buffer pointer to a page boundary + * (so it can be maped). + */ + + bzero(&cbuff_allocated[0], CBUFF_ALLOCATED_SIZE); + cbuff = (cumulative_buffer_t) round_page(&cbuff_allocated); + + simple_lock_init(&cbuff_lock, ETAP_NO_TRACE); + + /* + * Set the starting point for cumulative buffer entry + * reservations. + * + * This value must leave enough head room in the + * cumulative buffer to contain all dynamic events. + */ + + for (x=0; x < event_table_max; x++) + if (event_table[x].dynamic > cbuff->static_start) + cbuff->static_start = event_table[x].dynamic; + + cbuff->next = cbuff->static_start; +#endif /* ETAP_LOCK_ACCUMULATE */ + + /* + * Initialize the event table lock + */ + + simple_lock_init(&event_table_lock, ETAP_NO_TRACE); + +#if MACH_ASSERT + /* + * Check that events are in numerical order so we can do a binary + * search on them. Even better would be to make event numbers be + * simple contiguous indexes into event_table[], but that would + * break the coding of subsystems in the event number. + */ + out_of_order = FALSE; + for (x = 1; x < event_table_max; x++) { + if (event_table[x - 1].event > event_table[x].event) { + printf("events out of order: %s > %s\n", + event_table[x - 1].name, event_table[x].name); + out_of_order = TRUE; + } + } + if (out_of_order) + panic("etap_init_phase1"); +#endif /* MACH_ASSERT */ +} + + +/* + * ROUTINE: etap_init_phase2 [internal] + * + * FUNCTION: Event trace instrumentation initialization phase + * two of two. The dynamic phase. The monitored buffers + * are dynamically allocated and initialized. Cumulative + * dynamic entry locks are allocated and initialized. The + * start_data_pool is initialized. + * + * NOTES: Phase two is executed once dynamic memory allocation + * is available. + * + */ + +void +etap_init_phase2(void) +{ + int size; + int x; + int ret; + vm_offset_t table_copy; + struct event_table_chain *chainp; + + /* + * Make mappable copies of the event_table and the subs_table. + * These tables were originally mapped as they appear in the + * kernel image, but that meant that other kernel variables could + * end up being mapped with them, which is ugly. It also didn't + * work on the HP/PA, where pages with physical address == virtual + * do not have real pmap entries allocated and therefore can't be + * mapped elsewhere. + */ + size = sizeof event_table_init + sizeof subs_table_init; + ret = kmem_alloc(kernel_map, &table_copy, size); + if (ret != KERN_SUCCESS) + panic("ETAP: error allocating table copies"); + event_table = (event_table_t) table_copy; + subs_table = (subs_table_t) (table_copy + sizeof event_table_init); + bcopy((char *) event_table_init, (char *) event_table, + sizeof event_table_init); + bcopy((char *) subs_table_init, (char *) subs_table, + sizeof subs_table_init); + + /* Switch pointers from the old event_table to the new. */ + for (chainp = event_table_chain; chainp != NULL; + chainp = chainp->event_table_link) { + x = chainp->event_tablep - event_table_init; + assert(x < event_table_max); + chainp->event_tablep = event_table + x; + } + +#if ETAP_LOCK_ACCUMULATE + + /* + * Because several dynamic locks can point to a single + * cumulative buffer entry, dynamic lock writes to the + * entry are synchronized. + * + * The spin locks are allocated here. + * + */ +#if MACH_LDEBUG + size = sizeof(simple_lock_t) * cbuff->static_start; +#else + /* + * Note: These locks are different from traditional spin locks. + * They are of type int instead of type simple_lock_t. + * We can reduce lock size this way, since no tracing will + * EVER be performed on these locks. + */ + size = sizeof(simple_lock_data_t) * cbuff->static_start; +#endif + + ret = kmem_alloc(kernel_map, (vm_offset_t *) &cbuff_locks, size); + + if (ret != KERN_SUCCESS) + panic("ETAP: error allocating cumulative write locks"); + +#if MACH_LDEBUG + for(x = 0; x < cbuff->static_start; ++x) { + simple_lock_init(&cbuff_locks[x], ETAP_NO_TRACE); + } +#else + bzero((const char *) cbuff_locks, size); +#endif + +#endif /* ETAP_LOCK_ACCUMULATE */ + + +#if ETAP_MONITOR + + /* + * monitor buffer allocation + */ + + size = ((mbuff_entries-1) * sizeof(struct mbuff_entry)) + + sizeof(struct monitor_buffer); + + for (x=0; x < NCPUS; x++) { + ret = kmem_alloc(kernel_map, + (vm_offset_t *) &mbuff[x], + size); + + if (ret != KERN_SUCCESS) + panic ("ETAP: error allocating monitor buffer\n"); + + /* zero fill buffer */ + bzero((char *) mbuff[x], size); + } + +#endif /* ETAP_MONITOR */ + + +#if ETAP_LOCK_TRACE + + /* + * Initialize the start_data_pool + */ + + init_start_data_pool(); + +#endif /* ETAP_LOCK_TRACE */ +} + + +#if ETAP_LOCK_ACCUMULATE + +/* + * ROUTINE: etap_cbuff_reserve [internal] + * + * FUNCTION: The cumulative buffer operation which returns a pointer + * to a free entry in the cumulative buffer. + * + * NOTES: Disables interrupts. + * + */ + +cbuff_entry_t +etap_cbuff_reserve(event_table_t etp) +{ + cbuff_entry_t avail; + unsigned short de; + spl_t s; + + /* see if type pointer is initialized */ + if (etp == EVENT_TABLE_NULL || etp->event == ETAP_NO_TRACE) + return (CBUFF_ENTRY_NULL); + + /* check for DYNAMIC lock */ + if (de = etp->dynamic) { + if (de <= cbuff->static_start) + return (&cbuff->entry[de-1]); + else { + printf("ETAP: dynamic lock index error [%lu]\n", de); + return (CBUFF_ENTRY_NULL); + } + } + + cumulative_buffer_lock(s); + + /* if buffer is full, reservation requests fail */ + if (cbuff->next >= ETAP_CBUFF_ENTRIES) { + cumulative_buffer_unlock(s); + return (CBUFF_ENTRY_NULL); + } + + avail = &cbuff->entry[cbuff->next++]; + + cumulative_buffer_unlock(s); + + return (avail); +} + +#endif /* ETAP_LOCK_ACCUMULATE */ + +/* + * ROUTINE: etap_event_table_assign [internal] + * + * FUNCTION: Returns a pointer to the assigned event type table entry, + * using the event type as the index key. + * + */ + +event_table_t +etap_event_table_find(etap_event_t event) +{ + int last_before, first_after, try; + + /* Binary search for the event number. last_before is the highest- + numbered element known to be <= the number we're looking for; + first_after is the lowest-numbered element known to be >. */ + last_before = 0; + first_after = event_table_max; + while (last_before < first_after) { + try = (last_before + first_after) >> 1; + if (event_table[try].event == event) + return (&event_table[try]); + else if (event_table[try].event < event) + last_before = try; + else + first_after = try; + } + return EVENT_TABLE_NULL; +} + +void +etap_event_table_assign(struct event_table_chain *chainp, etap_event_t event) +{ + event_table_t event_tablep; + + event_tablep = etap_event_table_find(event); + if (event_tablep == EVENT_TABLE_NULL) + printf("\nETAP: event not found in event table: %x\n", event); + else { + if (event_table == event_table_init) { + chainp->event_table_link = event_table_chain; + event_table_chain = chainp; + } + chainp->event_tablep = event_tablep; + } +} + +#endif /* ETAP */ + +/* + * + * MESSAGE: etap_get_info [exported] + * + * FUNCTION: provides the server with ETAP buffer configurations. + * + */ + +kern_return_t +etap_get_info( + host_priv_t host_priv, + int *et_entries, + int *st_entries, + vm_offset_t *et_offset, + vm_offset_t *st_offset, + int *cb_width, + int *mb_size, + int *mb_entries, + int *mb_cpus) +{ + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + +#if ETAP + *et_entries = event_table_max; + *st_entries = subs_table_max; + *et_offset = (vm_offset_t) ((char*) event_table - + trunc_page((char*) event_table)); + *st_offset = (vm_offset_t) ((char*) subs_table - + trunc_page((char*) subs_table)); +#else /* ETAP */ + *et_entries = 0; + *st_entries = 0; + *et_offset = 0; + *st_offset = 0; +#endif /* ETAP */ + +#if ETAP_LOCK_ACCUMULATE + *cb_width = cbuff_width; +#else /* ETAP_LOCK_ACCUMULATE */ + *cb_width = 0; +#endif /* ETAP_LOCK_ACCUMULATE */ + +#if ETAP_MONITOR + *mb_size = ((mbuff_entries-1) * sizeof(struct mbuff_entry)) + + sizeof(struct monitor_buffer); + *mb_entries = mbuff_entries; + *mb_cpus = NCPUS; +#else /* ETAP_MONITOR */ + *mb_size = 0; + *mb_entries = 0; + *mb_cpus = 0; +#endif /* ETAP_MONITOR */ + + return (KERN_SUCCESS); +} + +/* + * ROUTINE: etap_trace_event [exported] + * + * FUNCTION: The etap_trace_event system call is the user's interface to + * the ETAP kernel instrumentation. + * + * This call allows the user to enable and disable tracing modes + * on specific event types. The call also supports a reset option, + * where the cumulative buffer data and all event type tracing + * is reset to zero. When the reset option is used, a new + * interval width can also be defined using the op parameter. + * + */ + +kern_return_t +etap_trace_event ( + unsigned short mode, + unsigned short type, + boolean_t enable, + unsigned int nargs, + unsigned short args[]) +{ +#if ETAP + event_table_t event_tablep; + kern_return_t ret; + int i, args_size; + unsigned short status_mask; + unsigned short *tmp_args; + + /* + * Initialize operation + */ + + if (mode == ETAP_RESET) { + etap_trace_reset(nargs); + return (KERN_SUCCESS); + } + + status_mask = mode & type; + + /* + * Copy args array from user space to kernel space + */ + + args_size = nargs * sizeof *args; + tmp_args = (unsigned short *) kalloc(args_size); + + if (tmp_args == NULL) + return (KERN_NO_SPACE); + + if (copyin((const char *) args, (char *) tmp_args, args_size)) + return (KERN_INVALID_ADDRESS); + + /* + * Change appropriate status fields in the event table + */ + + event_table_lock(); + + for (i = 0; i < nargs; i++) { + if (tmp_args[i] != ETAP_NO_TRACE) { + event_tablep = etap_event_table_find(tmp_args[i]); + if (event_tablep == EVENT_TABLE_NULL) + break; + if (enable) + event_tablep->status |= status_mask; + else + event_tablep->status &= ~status_mask; + } + } + + ret = (i < nargs) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS; + + event_table_unlock(); + + kfree((vm_offset_t) tmp_args, args_size); + + return (ret); + +#else /* ETAP */ + + return (KERN_FAILURE); + +#endif /* ETAP */ +} + + +#if ETAP + +/* + * ROUTINE: etap_trace_reset [internal] + * + * FUNCTION: Turns off all tracing and erases all the data accumulated + * in the cumulative buffer. If the user defined a new + * cumulative buffer interval width, it will be assigned here. + * + */ +void +etap_trace_reset(int new_interval) +{ + event_table_t scan; + int x; + register s; + + /* + * Wipe out trace fields in event table + */ + + scan = event_table; + + event_table_lock(); + + for (x=0; x < event_table_max; x++) { + scan->status = ETAP_TRACE_OFF; + scan++; + } + + event_table_unlock(); + +#if ETAP_LOCK_ACCUMULATE + + /* + * Wipe out cumulative buffer statistical fields for all entries + */ + + cumulative_buffer_lock(s); + + for (x=0; x < ETAP_CBUFF_ENTRIES; x++) { + bzero ((char *) &cbuff->entry[x].hold, + sizeof(struct cbuff_data)); + bzero ((char *) &cbuff->entry[x].wait, + sizeof(struct cbuff_data)); + bzero ((char *) &cbuff->entry[x].hold_interval[0], + sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS); + bzero ((char *) &cbuff->entry[x].wait_interval[0], + sizeof(unsigned long) * ETAP_CBUFF_IBUCKETS); + } + + /* + * Assign interval width if the user defined a new one. + */ + + if (new_interval != 0) + cbuff_width = new_interval; + + cumulative_buffer_unlock(s); + +#endif /* ETAP_LOCK_ACCUMULATE */ +} + +#endif /* ETAP */ + +/* + * ROUTINE: etap_probe [exported] + * + * FUNCTION: The etap_probe system call serves as a user-level probe, + * allowing user-level code to store event data into + * the monitored buffer(s). + */ + +kern_return_t +etap_probe( + unsigned short event_type, + unsigned short event_id, + unsigned int data_size, /* total size in bytes */ + etap_data_t *data) +{ + +#if ETAP_MONITOR + + mbuff_entry_t mbuff_entryp; + int cpu; + int free; + spl_t s; + + + if (data_size > ETAP_DATA_SIZE) + return (KERN_INVALID_ARGUMENT); + + if (event_table[event_type].status == ETAP_TRACE_OFF || + event_table[event_type].event != event_type) + return (KERN_NO_ACCESS); + + mp_disable_preemption(); + cpu = cpu_number(); + s = splhigh(); + + free = mbuff[cpu]->free; + mbuff_entryp = &mbuff[cpu]->entry[free]; + + /* + * Load monitor buffer entry + */ + + ETAP_TIMESTAMP(mbuff_entryp->time); + mbuff_entryp->event = event_id; + mbuff_entryp->flags = USER_EVENT; + mbuff_entryp->instance = (u_int) current_thread(); + mbuff_entryp->pc = 0; + + if (data != ETAP_DATA_NULL) + copyin((const char *) data, + (char *) mbuff_entryp->data, + data_size); + + mbuff[cpu]->free = (free+1) % mbuff_entries; + + if (mbuff[cpu]->free == 0) + mbuff[cpu]->timestamp++; + + splx(s); + mp_enable_preemption(); + + return (KERN_SUCCESS); + +#else /* ETAP_MONITOR */ + return (KERN_FAILURE); +#endif /* ETAP_MONITOR */ +} + +/* + * ROUTINE: etap_trace_thread [exported] + * + * FUNCTION: Toggles thread's ETAP trace status bit. + */ + +kern_return_t +etap_trace_thread( + thread_act_t thr_act, + boolean_t trace_status) +{ +#if ETAP_EVENT_MONITOR + + thread_t thread; + boolean_t old_status; + etap_data_t probe_data; + spl_t s; + + if (thr_act == THR_ACT_NULL) + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(thr_act); + + if (thread == THREAD_NULL) { + act_unlock_thread(thr_act); + return (KERN_INVALID_ARGUMENT); + } + + s = splsched(); + thread_lock(thread); + + old_status = thread->etap_trace; + thread->etap_trace = trace_status; + + ETAP_DATA_LOAD(probe_data[0],thr_act->task); + ETAP_DATA_LOAD(probe_data[1],thr_act); + ETAP_DATA_LOAD(probe_data[2],thread->sched_pri); + + thread_unlock(thread); + splx(s); + + act_unlock_thread(thr_act); + + /* + * Thread creation (ETAP_P_THREAD_LIFE: BEGIN) is ONLY recorded + * here since a threads trace status is disabled by default. + */ + if (trace_status == TRUE && old_status == FALSE) { + ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE, + EVENT_BEGIN, + thread, + &probe_data, + ETAP_DATA_ENTRY*3); + } + + /* + * Thread termination is (falsely) recorded here if the trace + * status has been disabled. This event is recorded to allow + * users the option of tracing a portion of a threads execution. + */ + if (trace_status == FALSE && old_status == TRUE) { + ETAP_PROBE_DATA(ETAP_P_THREAD_LIFE, + EVENT_END, + thread, + &probe_data, + ETAP_DATA_ENTRY*3); + } + + return (KERN_SUCCESS); + +#else /* ETAP_EVENT_MONITOR */ + return (KERN_FAILURE); +#endif /* ETAP_EVENT_MONITOR */ +} + +/* + * ROUTINE: etap_mon_reconfig [exported] + * + * FUNCTION: Reallocates monitor buffers to hold specified number + * of entries. + * + * NOTES: In multiprocessor (SMP) case, a lock needs to be added + * here and in data collection macros to protect access + * to mbuff_entries. + */ +kern_return_t +etap_mon_reconfig( + host_priv_t host_priv, + int nentries) +{ +#if ETAP_EVENT_MONITOR + struct monitor_buffer *nmbuff[NCPUS], *ombuff[NCPUS]; + int s, size, osize, i, ret; + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + + if (nentries <= 0) /* must be at least 1 */ + return (KERN_FAILURE); + + size = ((nentries-1) * sizeof(struct mbuff_entry)) + + sizeof(struct monitor_buffer); + + for (i = 0; i < NCPUS; ++i) { + ret = kmem_alloc(kernel_map, + (vm_offset_t *)&nmbuff[i], + size); + if (ret != KERN_SUCCESS) { + if (i > 0) { + int j; + + for (j = 0; j < i; ++j) { + kmem_free(kernel_map, + (vm_offset_t)nmbuff[j], + size); + } + } + return (ret); + } + bzero((char *) nmbuff[i], size); + } + osize = ((mbuff_entries-1) * sizeof (struct mbuff_entry)) + + sizeof (struct monitor_buffer); + + s = splhigh(); + event_table_lock(); + for (i = 0; i < NCPUS; ++i) { + ombuff[i] = mbuff[i]; + mbuff[i] = nmbuff[i]; + } + mbuff_entries = nentries; + event_table_unlock(); + splx(s); + + for (i = 0; i < NCPUS; ++i) { + kmem_free(kernel_map, + (vm_offset_t)ombuff[i], + osize); + } + return (KERN_SUCCESS); +#else + return (KERN_FAILURE); +#endif /* ETAP_MONITOR */ +} + +/* + * ROUTINE: etap_new_probe [exported] + * + * FUNCTION: Reallocates monitor probe table, adding a new entry + * + */ +kern_return_t +etap_new_probe( + host_priv_t host_priv, + vm_address_t name, + vm_size_t namlen, + boolean_t trace_on, + vm_address_t id) +{ +#if ETAP_EVENT_MONITOR + event_table_t newtable, oldtable; + unsigned short i, nid; + int s; + vm_size_t newsize = (event_table_max + 1) * + sizeof (struct event_table_entry); + boolean_t duplicate_name = FALSE; + kern_return_t ret; + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + + if (namlen > EVENT_NAME_LENGTH - 1) + return (KERN_INVALID_ARGUMENT); + + if ((ret = kmem_alloc(kernel_map, (vm_address_t *)&newtable, + newsize)) != KERN_SUCCESS) + return (ret); + + bcopy((const char *)event_table, (char *)newtable, event_table_max * + sizeof (struct event_table_entry)); + + if (copyin((const char *)name, + (char *)&newtable[event_table_max].name, namlen)) + return (KERN_INVALID_ADDRESS); + + newtable[event_table_max].name[EVENT_NAME_LENGTH - 1] = '\0'; + newtable[event_table_max].status = trace_on; + newtable[event_table_max].dynamic = 0; + + for (nid = i = 0; i < event_table_max; ++i) { + if (strcmp((char *)newtable[event_table_max].name, + newtable[i].name) == 0) { + duplicate_name = TRUE; + printf("duplicate name\n"); + } + nid = max(nid, newtable[i].event); + } + ++nid; + + if (nid >= ETAP_NO_TRACE || duplicate_name == TRUE) { + kmem_free(kernel_map, (vm_address_t)newtable, newsize); + if (nid >= ETAP_NO_TRACE) { + printf("KERN_RESOURCE_SHORTAGE\n"); + return (KERN_RESOURCE_SHORTAGE); + } + else { + printf("KERN_NAME_EXISTS\n"); + return (KERN_NAME_EXISTS); + } + } + + newtable[event_table_max].event = nid; + + s = splhigh(); + event_table_lock(); + oldtable = event_table; + event_table = newtable; + ++event_table_max; + event_table_unlock(); + splx(s); + + if (oldtable != event_table_init) + kmem_free(kernel_map, (vm_address_t)oldtable, + (event_table_max - 1) * + sizeof (struct event_table_entry)); + + *(unsigned short *)id = nid; + + return (KERN_SUCCESS); +#else + return (KERN_FAILURE); +#endif /* ETAP_EVENT_MONITOR */ + +} +/* + * ETAP trap probe hooks + */ + +void +etap_interrupt_probe(int interrupt, int flag_setting) +{ + u_short flag; + + if (flag_setting == 1) + flag = EVENT_BEGIN; + else + flag = EVENT_END; + + ETAP_PROBE_DATA_COND(ETAP_P_INTERRUPT, + flag, + current_thread(), + &interrupt, + sizeof(int), + 1); +} + +void +etap_machcall_probe1(int syscall) +{ + ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, + EVENT_BEGIN | SYSCALL_TRAP, + current_thread(), + &syscall, + sizeof(int)); +} + +void +etap_machcall_probe2(void) +{ + ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, + EVENT_END | SYSCALL_TRAP, + current_thread(), + 0, + 0); +} + +static void print_user_event(mbuff_entry_t); +static void print_kernel_event(mbuff_entry_t, boolean_t); +static void print_lock_event(mbuff_entry_t, const char *); + +#if MACH_KDB +void db_show_etap_log(db_expr_t, boolean_t, db_expr_t, char *); +/* + * + * ROUTINE: etap_print [internal] + * + * FUNCTION: print each mbuff table (for use in debugger) + * + */ +void +db_show_etap_log( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ +#if ETAP_MONITOR + int cpu = cpu_number(), last, i, first, step, end, restart; + boolean_t show_data = FALSE; + + last = (mbuff[cpu]->free - 1) % mbuff_entries; + + if(db_option(modif, 'r')) { + first = last; + step = -1; + end = -1; + restart = mbuff_entries - 1; + } else { + first = last + 1; + step = 1; + end = mbuff_entries; + restart = 0; + } + + if(db_option(modif, 'd')) + show_data = TRUE; + + for(i = first; i != end; i += step) { + if (mbuff[cpu]->entry[i].flags & USER_EVENT) + print_user_event(&mbuff[cpu]->entry[i]); + else + print_kernel_event(&mbuff[cpu]->entry[i], show_data); + } + for(i = restart; i != first; i += step) { + if (mbuff[cpu]->entry[i].flags & USER_EVENT) + print_user_event(&mbuff[cpu]->entry[i]); + else + print_kernel_event(&mbuff[cpu]->entry[i], show_data); + } +#else + printf("ETAP event monitor not configured\n"); +#endif /* ETAP_MONITOR */ +} + +#if ETAP_MONITOR +static +void +print_user_event(mbuff_entry_t record) +{ + char *s, buf[256]; + + db_printf("%x: %x%08x: ", record->instance, record->time.tv_sec, + record->time.tv_nsec); + switch (record->pc) + { + case ETAP_P_USER_EVENT0: s = "0"; break; + case ETAP_P_USER_EVENT1: s = "1"; break; + case ETAP_P_USER_EVENT2: s = "2"; break; + case ETAP_P_USER_EVENT3: s = "3"; break; + case ETAP_P_USER_EVENT4: s = "4"; break; + case ETAP_P_USER_EVENT5: s = "5"; break; + case ETAP_P_USER_EVENT6: s = "6"; break; + case ETAP_P_USER_EVENT7: s = "7"; break; + case ETAP_P_USER_EVENT8: s = "8"; break; + case ETAP_P_USER_EVENT9: s = "9"; break; + case ETAP_P_USER_EVENT10: s = "10"; break; + case ETAP_P_USER_EVENT11: s = "11"; break; + case ETAP_P_USER_EVENT12: s = "12"; break; + case ETAP_P_USER_EVENT13: s = "13"; break; + case ETAP_P_USER_EVENT14: s = "14"; break; + case ETAP_P_USER_EVENT15: s = "15"; break; + case ETAP_P_USER_EVENT16: s = "16"; break; + case ETAP_P_USER_EVENT17: s = "17"; break; + case ETAP_P_USER_EVENT18: s = "18"; break; + case ETAP_P_USER_EVENT19: s = "19"; break; + case ETAP_P_USER_EVENT20: s = "20"; break; + case ETAP_P_USER_EVENT21: s = "21"; break; + case ETAP_P_USER_EVENT22: s = "22"; break; + case ETAP_P_USER_EVENT23: s = "23"; break; + case ETAP_P_USER_EVENT24: s = "24"; break; + case ETAP_P_USER_EVENT25: s = "25"; break; + case ETAP_P_USER_EVENT26: s = "26"; break; + case ETAP_P_USER_EVENT27: s = "27"; break; + case ETAP_P_USER_EVENT28: s = "28"; break; + case ETAP_P_USER_EVENT29: s = "29"; break; + case ETAP_P_USER_EVENT30: s = "30"; break; + case ETAP_P_USER_EVENT31: s = "31"; break; + default: + sprintf(buf, "dynamic %x", record->pc); + s = buf; + break; + } + + db_printf("user probe %s: [%x] data = %x %x %x %x\n", + s, + record->event, + record->data[0], + record->data[1], + record->data[2], + record->data[3]); +} + +static +void +print_kernel_event(mbuff_entry_t record, boolean_t data) +{ + char *text_name; + int i; + + /* assume zero event means that record was never written to */ + if(record->event == 0) + return; + + db_printf("%x: %x%08x: ", record->instance, record->time.tv_sec, + record->time.tv_nsec); + + switch (record->event) { + + case ETAP_P_THREAD_LIFE : + if (record->flags & EVENT_BEGIN) + db_printf("thread created [T:%x A:%x] P:%d\n", + record->data[0], + record->data[1], + record->data[2]); + else + db_printf("thread terminated [T:%x A:%x] P:%d\n", + record->data[0], + record->data[1], + record->data[2]); + break; + + case ETAP_P_SYSCALL_MACH : + if (record->flags & SYSCALL_TRAP) + text_name = system_table_lookup(SYS_TABLE_MACH_TRAP, + record->data[0]); + else + text_name = system_table_lookup(SYS_TABLE_MACH_MESSAGE, + record->data[0]); + + if (record->flags & EVENT_BEGIN) + db_printf("mach enter: %s [%x]\n", + text_name, + record->data[0]); + else + db_printf("mach exit :\n"); + break; + + case ETAP_P_SYSCALL_UNIX : + text_name = system_table_lookup(SYS_TABLE_UNIX_SYSCALL, + record->data[0]); + + if (record->flags & EVENT_BEGIN) + db_printf("unix enter: %s\n", text_name); + else + db_printf("unix exit : %s\n", text_name); + break; + + case ETAP_P_THREAD_CTX : + if (record->flags & EVENT_END) + db_printf("context switch to %x ", + record->data[0]); + else /* EVENT_BEGIN */ + db_printf("context switch from %x ", + record->data[0]); + + switch (record->data[1]) { + case BLOCKED_ON_SEMAPHORE : + db_printf("R: semaphore\n"); break; + case BLOCKED_ON_LOCK : + db_printf("R: lock\n"); break; + case BLOCKED_ON_MUTEX_LOCK : + db_printf("R: mutex lock\n"); break; + case BLOCKED_ON_COMPLEX_LOCK : + db_printf("R: complex lock\n"); break; + case BLOCKED_ON_PORT_RCV : + db_printf("R: port receive\n"); break; + case BLOCKED_ON_REAPER_DONE : + db_printf("R: reaper thread done\n"); break; + case BLOCKED_ON_IDLE_DONE : + db_printf("R: idle thread done\n"); break; + case BLOCKED_ON_TERMINATION : + db_printf("R: termination\n"); break; + default : + if (record->data[2]) + db_printf("R: ast %x\n", record->data[2]); + else + db_printf("R: undefined block\n"); + }; + break; + + case ETAP_P_INTERRUPT : + if (record->flags & EVENT_BEGIN) { + text_name = system_table_lookup(SYS_TABLE_INTERRUPT, + record->data[0]); + db_printf("intr enter: %s\n", text_name); + } else + db_printf("intr exit\n"); + break; + + case ETAP_P_ACT_ABORT : + db_printf("activation abort [A %x : S %x]\n", + record->data[1], + + record->data[0]); + break; + + case ETAP_P_PRIORITY : + db_printf("priority changed for %x N:%d O:%d\n", + record->data[0], + record->data[1], + record->data[2]); + break; + + case ETAP_P_EXCEPTION : + text_name = system_table_lookup(SYS_TABLE_EXCEPTION, + record->data[0]); + db_printf("exception: %s\n", text_name); + break; + + case ETAP_P_DEPRESSION : + if (record->flags & EVENT_BEGIN) + db_printf("priority depressed\n"); + else { + if (record->data[0] == 0) + db_printf("priority undepressed : timed out\n"); + else + db_printf("priority undepressed : self inflicted\n"); + } + break; + + case ETAP_P_MISC : + db_printf("flags: %x data: %x %x %x %x\n", record->flags, + record->data[0], record->data[1], record->data[2], + record->data[3]); + break; + + case ETAP_P_DETAP : + printf("flags: %x rtc: %x %09x dtime: %x %09x\n", + record->flags, record->data[0], record->data[1], + record->data[2], record->data[3]); + break; + + default: + for(i = 0; event_table_init[i].event != ETAP_NO_TRACE; ++i) + if(record->event == event_table_init[i].event) { + print_lock_event(record, event_table_init[i].name); + return; + } + db_printf("Unknown event: %d\n", record->event); + break; + } + if(data) + db_printf(" Data: %08x %08x %08x %08x\n", record->data[0], + record->data[1], record->data[2], record->data[3]); +} + +void print_lock_event(mbuff_entry_t record, const char *name) +{ + char *sym1, *sym2; + db_addr_t offset1, offset2; + + db_find_sym_and_offset(record->data[0], &sym1, &offset1); + + db_printf("%15s", name); + if (record->flags & SPIN_LOCK) + printf(" spin "); + else if (record->flags & READ_LOCK) + printf(" read "); + else if (record->flags & WRITE_LOCK) + printf(" write "); + else + printf(" undef "); + + if (record->flags & ETAP_CONTENTION) { + db_printf("wait lock %s+%x\n", + sym1, offset1); + } + else if (record->flags & ETAP_DURATION) { + db_find_sym_and_offset(record->data[1], &sym2, &offset2); + db_printf("lock %x+%x unlock %x+%x\n", + sym1, offset1, sym2, offset2); + } else { + db_printf("illegal op: neither HOLD or WAIT are specified\n"); + } + +} + +char * +system_table_lookup(unsigned int table, unsigned int number) +{ + int x; + char *name = NULL; + unsigned int offset; + + switch (table) { + case SYS_TABLE_MACH_TRAP: + name = mach_trap_name(number >> 4); + break; + case SYS_TABLE_MACH_MESSAGE: + for (x=0; x < mach_message_table_entries; x++) { + if (mach_message_table[x].number == number) { + name = mach_message_table[x].name; + break; + } + } + break; + case SYS_TABLE_UNIX_SYSCALL: + number = -number; + name = syscall_name(number); + break; + case SYS_TABLE_INTERRUPT: + db_find_sym_and_offset((int)ivect[number], &name, &offset); + break; + case SYS_TABLE_EXCEPTION: + name = exception_name(number); + break; + } + return (name != NULL) ? name : "undefined"; +} + +#endif /* MACH_KDB */ +#endif /* ETAP_MONITOR */ diff --git a/osfmk/kern/etap_macros.h b/osfmk/kern/etap_macros.h new file mode 100644 index 000000000..7935b6276 --- /dev/null +++ b/osfmk/kern/etap_macros.h @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * The Event Trace Analysis Package + * ================================ + * + * Function: Traces micro-kernel events. + * + * Macro Notes: Several macros are added throughout the lock code. + * These macros allow for convenient configuration + * and code readability. + * + * The macro prefixes determine a specific trace + * configuration operation: + * + * CUM - Cumulative trace specific operation. + * MON - Monitored trace specific operation. + * ETAP - Both a cumulative and monitored trace + * operation. + */ + + +#ifndef _KERN_ETAP_MACROS_H_ +#define _KERN_ETAP_MACROS_H_ + +#include +#include +#include +#include +#include + + +#if ETAP + +#include +#include +#include +#include + +extern void etap_init_phase1(void); +extern void etap_init_phase2(void); +extern void etap_event_table_assign(struct event_table_chain *, etap_event_t); +extern unsigned int etap_get_pc(void); +extern event_table_t event_table; +extern subs_table_t subs_table; + +/* + * Time Macros + */ + +#define ETAP_TIMESTAMP(t) rtc_gettime_interrupts_disabled(&t) +#define ETAP_TIME_SUM(t,sum_me) t += sum_me +#define ETAP_TIME_SUB(t,stop,start) \ +MACRO_BEGIN \ + (t) = (stop); \ + SUB_MACH_TIMESPEC(&(t), &(start)); \ +MACRO_END +#define ETAP_TIME_SQR(t,sqr_me) t += sqr_me*sqr_me +#define ETAP_TIME_DIV(r,n,d) r = (u_short) n/d +#define ETAP_TIME_IS_ZERO(t) ((t).tv_sec == 0) +#define ETAP_TIME_CLEAR(t) ((t).tv_sec = 0) +#define ETAP_TIME_GREATER(t1,t2) ((t1) > (t2)) + +#else /* ETAP */ + +#define etap_init_phase1() +#define etap_init_phase2() +#define etap_event_table_assign(event) +#define ETAP_TIMESTAMP(t) +#define ETAP_TIME_SUB(t,start,stop) +#define ETAP_TIME_CLEAR(t) + +#endif /* ETAP */ + + +/* + * =================================================== + * ETAP: cumulative trace specific macros + * =================================================== + */ + +#if ETAP_LOCK_ACCUMULATE + +extern cbuff_entry_t etap_cbuff_reserve(event_table_t); +#if MACH_LDEBUG +extern simple_lock_t cbuff_locks; +#else +extern simple_lock_data_t cbuff_locks; +#endif +extern int cbuff_width; + +/* + * If cumulative hold tracing is enabled for the event (i.e., acquired lock), + * the CUM_HOLD_ACCUMULATE macro will update the appropriate cumulative buffer + * entry with the newly collected hold data. + */ + +#define CUM_HOLD_ACCUMULATE(cp,total_time,dynamic,trace) \ +MACRO_BEGIN \ + u_short _bucket; \ + if ((cp) != CBUFF_ENTRY_NULL && ((trace) & CUM_DURATION)) { \ + if (dynamic) \ + simple_lock_no_trace(&cbuff_locks[dynamic-1]); \ + (cp)->hold.triggered++; \ + ETAP_TIME_SUM((cp)->hold.time,(total_time)); \ + ETAP_TIME_SQR((cp)->hold.time_sq,(total_time)); \ + if (ETAP_TIME_IS_ZERO((cp)->hold.min_time) || \ + ETAP_TIME_GREATER((cp)->hold.min_time,(total_time))) \ + (cp)->hold.min_time = (total_time); \ + if (ETAP_TIME_GREATER((total_time),(cp)->hold.max_time)) \ + (cp)->hold.max_time = (total_time); \ + ETAP_TIME_DIV(_bucket,(total_time),cbuff_width); \ + if (_bucket >= ETAP_CBUFF_IBUCKETS) \ + (cp)->hold_interval[ETAP_CBUFF_IBUCKETS-1]++; \ + else \ + (cp)->hold_interval[_bucket]++; \ + if (dynamic) \ + simple_unlock_no_trace(&cbuff_locks[dynamic-1]); \ + } \ +MACRO_END + +/* + * If cumulative wait tracing is enabled for the event (i.e., acquired lock), + * the CUM_WAIT_ACCUMULATE macro will update the appropriate cumulative + * buffer entry with the newly collected wait data. + */ + +#define CUM_WAIT_ACCUMULATE(cp,total_time,dynamic,trace) \ +MACRO_BEGIN \ + u_short _bucket; \ + if ((cp) != CBUFF_ENTRY_NULL && ((trace) & CUM_CONTENTION)) { \ + if (dynamic) \ + simple_lock_no_trace(&cbuff_locks[dynamic-1]); \ + (cp)->wait.triggered++; \ + ETAP_TIME_SUM((cp)->wait.time,(total_time)); \ + ETAP_TIME_SQR((cp)->wait.time_sq,(total_time)); \ + if (ETAP_TIME_IS_ZERO((cp)->wait.min_time) || \ + ETAP_TIME_GREATER((cp)->wait.min_time,(total_time))) \ + (cp)->wait.min_time = (total_time); \ + if (ETAP_TIME_GREATER((total_time),(cp)->wait.max_time)) \ + (cp)->wait.max_time = (total_time); \ + ETAP_TIME_DIV(_bucket,(total_time),cbuff_width); \ + if (_bucket >= ETAP_CBUFF_IBUCKETS) \ + (cp)->wait_interval[ETAP_CBUFF_IBUCKETS-1]++; \ + else \ + (cp)->wait_interval[_bucket]++; \ + if (dynamic) \ + simple_unlock_no_trace(&cbuff_locks[dynamic-1]); \ + } \ +MACRO_END + +/* + * Initially a lock's cbuff_read pointer is set to CBUFF_ENTRY_NULL. This + * saves space in the cumulative buffer in the event that a read lock is + * not acquired. In the case that a read lock is acquired, the + * CUM_READ_ENTRY_RESERVE macro is called. Here a cumulative + * record is reserved and initialized. + */ + +#define CUM_READ_ENTRY_RESERVE(l,cp,trace) \ +MACRO_BEGIN \ + if ((cp) == CBUFF_ENTRY_NULL && (trace) & ETAP_CUMULATIVE) { \ + (cp) = etap_cbuff_reserve(lock_event_table(l)); \ + if ((cp) != CBUFF_ENTRY_NULL) { \ + (cp)->event = lock_event_table(l)->event; \ + (cp)->instance = (u_int) l; \ + (cp)->kind = READ_LOCK; \ + } \ + } \ +MACRO_END + +#else /* ETAP_LOCK_ACCUMULATE */ +#define etap_cbuff_reserve(et) +#define CUM_HOLD_ACCUMULATE(cp,t,d,tr) +#define CUM_WAIT_ACCUMULATE(cp,t,d,tr) +#define CUM_READ_ENTRY_RESERVE(l,rep,tr) +#endif /* ETAP_LOCK_ACCUMULATE */ + +/* + * =============================================== + * ETAP: monitor trace specific macros + * =============================================== + */ + +#if ETAP_MONITOR +extern int mbuff_entries; +extern monitor_buffer_t mbuff[]; +#endif /* ETAP_MONITOR */ + + +#if ETAP_LOCK_MONITOR + +/* + * If monitor tracing is enabled for the lock, the + * MON_DATA_COLLECT macro will write collected lock data to + * the next slot in a cpu specific monitor buffer. Circular + * buffer maintenance is also performed here. + */ + +#define MON_DATA_COLLECT(l,e,total_time,type,op,trace) \ +MACRO_BEGIN \ + mbuff_entry_t _mp; \ + int _cpu, _ent, _s; \ + if ((trace) & op) { \ + mp_disable_preemption(); \ + _cpu = cpu_number(); \ + _s = splhigh(); \ + _ent = mbuff[_cpu]->free; \ + _mp = &mbuff[_cpu]->entry[_ent]; \ + _mp->event = lock_event_table(l)->event; \ + _mp->flags = ((op) | (type)); \ + _mp->instance = (u_int) (l); \ + _mp->time = (total_time); \ + _mp->data[0] = (e)->start_pc; \ + _mp->data[1] = (e)->end_pc; \ + mbuff[_cpu]->free = (_ent+1) % mbuff_entries; \ + if (mbuff[_cpu]->free == 0) \ + mbuff[_cpu]->timestamp++; \ + splx(_s); \ + mp_enable_preemption(); \ + } \ +MACRO_END + +#define MON_CLEAR_PCS(l) \ +MACRO_BEGIN \ + (l)->start_pc = 0; \ + (l)->end_pc = 0; \ +MACRO_END + +#define MON_ASSIGN_PC(target,source,trace) \ + if ((trace) & ETAP_MONITORED) target = source + +#else /* ETAP_LOCK_MONITOR */ +#define MON_DATA_COLLECT(l,le,tt,t,o,tr) +#define MON_GET_PC(pc,tr) +#define MON_CLEAR_PCS(l) +#define MON_ASSIGN_PC(t,s,tr) +#endif /* ETAP_LOCK_MONITOR */ + + +#if ETAP_EVENT_MONITOR + +#define ETAP_PROBE_DATA_COND(_event, _flags, _thread, _data, _size, _cond) \ +MACRO_BEGIN \ + mbuff_entry_t _mp; \ + int _cpu, _ent, _s; \ + if (event_table[_event].status && (_cond)) { \ + mp_disable_preemption(); \ + _cpu = cpu_number(); \ + _s = splhigh(); \ + _ent = mbuff[_cpu]->free; \ + _mp = &mbuff[_cpu]->entry[_ent]; \ + ETAP_TIMESTAMP(_mp->time); \ + _mp->pc = etap_get_pc(); \ + _mp->event = _event; \ + _mp->flags = KERNEL_EVENT | _flags; \ + _mp->instance = (u_int) _thread; \ + bcopy((char *) _data, (char *) _mp->data, _size); \ + mbuff[_cpu]->free = (_ent+1) % mbuff_entries; \ + if (mbuff[_cpu]->free == 0) \ + mbuff[_cpu]->timestamp++; \ + splx(_s); \ + mp_enable_preemption(); \ + } \ +MACRO_END + +#define ETAP_PROBE(_event, _flags, _thread) \ + ETAP_PROBE_DATA_COND(_event, _flags, _thread, 0, 0, 1) + +#define ETAP_PROBE_DATA(_event, _flags, _thread, _data, _size) \ + ETAP_PROBE_DATA_COND(_event, _flags, _thread, _data, _size, \ + (_thread)->etap_trace) + +#define ETAP_DATA_LOAD(ed, x) ((ed) = (u_int) (x)) +#define ETAP_SET_REASON(_th, _reason) ((_th)->etap_reason = (_reason)) + +#else /* ETAP_EVENT_MONITOR */ +#define ETAP_PROBE(e,f,th) +#define ETAP_PROBE_DATA(e,f,th,d,s) +#define ETAP_PROBE_DATA_COND(e,f,th,d,s,c) +#define ETAP_DATA_LOAD(d,x); +#define ETAP_SET_REASON(t,r) +#endif /* ETAP_EVENT_MONITOR */ + +/* + * ================================= + * ETAP: general lock macros + * ================================= + */ + +#if ETAP_LOCK_TRACE + +#define ETAP_TOTAL_TIME(t,stop,start) \ + ETAP_TIME_SUB((t),(stop),(start)) + +#define ETAP_DURATION_TIMESTAMP(e,trace) \ +MACRO_BEGIN \ + if ((trace) & ETAP_DURATION) \ + ETAP_TIMESTAMP((e)->start_hold_time); \ +MACRO_END + +#define ETAP_COPY_START_HOLD_TIME(entry,time,trace) \ +MACRO_BEGIN \ + if ((trace) & ETAP_DURATION) \ + (entry)->start_hold_time = time; \ +MACRO_END + +#define ETAP_CONTENTION_TIMESTAMP(e,trace) \ +MACRO_BEGIN \ + if ((trace) & ETAP_CONTENTION) \ + ETAP_TIMESTAMP((e)->start_wait_time); \ +MACRO_END + +#define ETAP_STAMP(event_table,trace,dynamic) \ +MACRO_BEGIN \ + if ((event_table) != EVENT_TABLE_NULL) { \ + (dynamic) = (event_table)->dynamic; \ + (trace) = (event_table)->status; \ + } \ +MACRO_END + +#define ETAP_WHOLE_OP(l) \ + (!(ETAP_TIME_IS_ZERO((l)->u.s.start_hold_time))) +#define ETAP_DURATION_ENABLED(trace) ((trace) & ETAP_DURATION) +#define ETAP_CONTENTION_ENABLED(trace) ((trace) & ETAP_CONTENTION) + +/* + * The ETAP_CLEAR_TRACE_DATA macro sets the etap specific fields + * of the simple_lock_t structure to zero. + * + * This is always done just before a simple lock is released. + */ + +#define ETAP_CLEAR_TRACE_DATA(l) \ +MACRO_BEGIN \ + ETAP_TIME_CLEAR((l)->u.s.start_hold_time); \ + MON_CLEAR_PCS((l)); \ +MACRO_END + + +/* ================================================== + * The ETAP_XXX_ENTRY macros manipulate the locks + * start_list (a linked list of start data). + * ================================================== + */ + +#define ETAP_CREATE_ENTRY(entry,trace) \ +MACRO_BEGIN \ + if ((trace) & ETAP_TRACE_ON) \ + (entry) = get_start_data_node(); \ +MACRO_END + +#define ETAP_LINK_ENTRY(l,entry,trace) \ +MACRO_BEGIN \ + if ((trace) & ETAP_TRACE_ON) { \ + (entry)->next = (l)->u.s.start_list; \ + (l)->u.s.start_list = (entry); \ + (entry)->thread_id = (u_int) current_thread(); \ + ETAP_TIME_CLEAR((entry)->start_wait_time); \ + } \ +MACRO_END + +#define ETAP_FIND_ENTRY(l,entry,trace) \ +MACRO_BEGIN \ + u_int _ct; \ + _ct = (u_int) current_thread(); \ + (entry) = (l)->u.s.start_list; \ + while ((entry) != SD_ENTRY_NULL && (entry)->thread_id != _ct) \ + (entry) = (entry)->next; \ + if ((entry) == SD_ENTRY_NULL) \ + (trace) = 0; \ +MACRO_END + +#define ETAP_UNLINK_ENTRY(l,entry) \ +MACRO_BEGIN \ + boolean_t _first = TRUE; \ + start_data_node_t _prev; \ + u_int _ct; \ + _ct = (u_int) current_thread(); \ + (entry) = (l)->u.s.start_list; \ + while ((entry) != SD_ENTRY_NULL && (entry)->thread_id != _ct){ \ + _prev = (entry); \ + (entry) = (entry)->next; \ + _first = FALSE; \ + } \ + if (entry != SD_ENTRY_NULL) { \ + if (_first) \ + (l)->u.s.start_list = (entry)->next; \ + else \ + _prev->next = (entry)->next; \ + (entry)->next = SD_ENTRY_NULL; \ + } \ +MACRO_END + +#define ETAP_DESTROY_ENTRY(entry) \ +MACRO_BEGIN \ + if ((entry) != SD_ENTRY_NULL) \ + free_start_data_node ((entry)); \ +MACRO_END + +#else /* ETAP_LOCK_TRACE */ +#define ETAP_TOTAL_TIME(t,stop,start) +#define ETAP_DURATION_TIMESTAMP(le,tr) +#define ETAP_CONTENTION_TIMESTAMP(le,tr) +#define ETAP_COPY_START_HOLD_TIME(le,t,tr) +#define ETAP_STAMP(tt,tr,d) +#define ETAP_DURATION_ENABLED(tr) (0) /* always fails */ +#define ETAP_CONTENTION_ENABLED(tr) (0) /* always fails */ +#define ETAP_CLEAR_TRACE_DATA(l) +#define ETAP_CREATE_ENTRY(e,tr) +#define ETAP_LINK_ENTRY(l,e,tr) +#define ETAP_FIND_ENTRY(l,e,tr) +#define ETAP_UNLINK_ENTRY(l,e) +#define ETAP_DESTROY_ENTRY(e) +#endif /* ETAP_LOCK_TRACE */ + +#endif /* _KERN_ETAP_MACROS_H_ */ diff --git a/osfmk/kern/etap_map.c b/osfmk/kern/etap_map.c new file mode 100644 index 000000000..76b125c60 --- /dev/null +++ b/osfmk/kern/etap_map.c @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:54 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1996/09/17 16:26:58 bruel + * use standalone includes only + * [1996/09/17 15:38:08 bruel] + * + * Revision 1.1.4.1 1996/02/02 12:16:40 emcmanus + * Copied from nmk20b5_shared. + * [1996/02/01 16:56:11 emcmanus] + * + * Revision 1.1.2.1 1995/12/30 17:12:07 emcmanus + * Renamed from i386/etap_map.c and made this file machine-independent. + * Delete declarations of event_table and subs_table, now declared with + * different types in etap_macros.h. + * [1995/12/30 17:03:55 emcmanus] + * + * Revision 1.1.2.4 1995/10/09 17:07:21 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:48:15 joe] + * + * Revision 1.1.2.3 1995/09/18 19:10:05 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:48:15 joe] + * + * Revision 1.1.2.2 1995/01/10 04:51:59 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * tweak signatures, a la osc1.3b26 + * [1994/12/09 20:38:32 dwm] + * + * mk6 CR801 - new file for mk6_shared from cnmk_shared. + * [1994/12/01 21:11:35 dwm] + * + * Revision 1.1.2.1 1994/10/21 18:35:57 joe + * Initial ETAP submission + * [1994/10/20 19:21:39 joe] + * + * $EndLog$ + */ +/* + * File : etap_map.c + * + * Pseudo-device driver to calculate the virtual addresses + * of all mappable ETAP buffers and tables: event table, + * subsystem table, cumulative buffer and monitor buffers. + * + */ +/* + * Minor device number representation: + * + * 0 = ETAP_TABLE_EVENT + * 1 = ETAP_TABLE_SUBSYSTEM + * 2 = ETAP_BUFFER_CUMULATIVE + * 3 & up = a specific monitor buffer + * + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +#if ETAP_LOCK_ACCUMULATE +extern cumulative_buffer_t cbuff; +#endif /* ETAP_LOCK_ACCUMULATE */ + +#if ETAP_MONITOR +extern monitor_buffer_t mbuff[]; +#endif /* ETAP_MONITOR */ + + +/* + * etap_map_open - Check for valid minor device + */ + +io_return_t +etap_map_open( + dev_t dev, + dev_mode_t flags, + io_req_t ior) +{ + int buffer = minor(dev); + + if (buffer >= ETAP_MAX_DEVICES) + return(D_NO_SUCH_DEVICE); + + return(D_SUCCESS); +} + +vm_offset_t +etap_map_mmap ( + dev_t dev, + vm_offset_t off, + vm_prot_t prot) +{ + int buffer = minor(dev); + vm_offset_t addr; + + /* + * Check request validity + */ + + if (prot & VM_PROT_WRITE) + return(KERN_PROTECTION_FAILURE); + + if (buffer < 0 || buffer >= ETAP_MAX_DEVICES) + return(KERN_INVALID_ARGUMENT); + + switch(buffer) { + case ETAP_TABLE_EVENT : + addr = trunc_page((char *) event_table) + off; + break; + case ETAP_TABLE_SUBSYSTEM : + addr = trunc_page((char *) subs_table) + off; + break; + case ETAP_BUFFER_CUMULATIVE : +#if ETAP_LOCK_ACCUMULATE + addr = (vm_offset_t) cbuff + off; + break; +#else /* ETAP_LOCK_ACCUMULATE */ + return(KERN_INVALID_ARGUMENT); +#endif /* ETAP_LOCK_ACCUMULATE */ + + default : +#if ETAP_MONITOR + addr = (vm_offset_t) mbuff[buffer - 3] + off; + break; +#else /* ETAP_MONITOR */ + return(KERN_INVALID_ARGUMENT); +#endif /* ETAP_MONITOR */ + + } + return machine_btop(pmap_extract(pmap_kernel(), addr)); +} diff --git a/osfmk/kern/etap_map.h b/osfmk/kern/etap_map.h new file mode 100644 index 000000000..1e6349d0f --- /dev/null +++ b/osfmk/kern/etap_map.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:54 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1996/02/02 12:16:46 emcmanus + * Copied from nmk20b5_shared. + * [1996/02/01 16:56:16 emcmanus] + * + * Revision 1.1.2.1 1995/12/30 17:12:11 emcmanus + * Renamed from i386/etap_map.h and fixed parentheses in ETAP_MAX_DEVICES. + * [1995/12/30 17:04:00 emcmanus] + * + * Revision 1.1.2.4 1995/10/09 17:07:25 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:48:18 joe] + * + * Revision 1.1.2.3 1995/09/18 19:10:09 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:48:18 joe] + * + * Revision 1.1.2.2 1995/01/10 04:52:03 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * tweak protos, a la osc1.3b26 + * [1994/12/09 20:38:34 dwm] + * + * mk6 CR801 - new file for mk6_shared from cnmk_shared. + * [1994/12/01 21:11:38 dwm] + * + * Revision 1.1.2.1 1994/10/21 18:36:01 joe + * Initial ETAP submission + * [1994/10/20 19:21:40 joe] + * + * $EndLog$ + */ +/* + * File : etap_map.h + */ + +#ifndef _ETAP_MAP_H_ +#define _ETAP_MAP_H_ + +#define ETAP_MAX_DEVICES (3+NCPUS) + + +extern io_return_t etap_map_open( + dev_t dev, + dev_mode_t flags, + io_req_t ior); + +extern vm_offset_t etap_map_mmap( + dev_t dev, + vm_offset_t off, + vm_prot_t prot); + +#endif /* _ETAP_MAP_H_ */ diff --git a/osfmk/kern/etap_options.h b/osfmk/kern/etap_options.h new file mode 100644 index 000000000..4af3a4022 --- /dev/null +++ b/osfmk/kern/etap_options.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:35 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:54 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.9.2 1995/10/09 17:13:48 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:34:10 joe] + * + * Revision 1.1.6.1 1995/05/11 20:57:18 burke + * Update ETAP changes. + * [1995/05/09 17:15:03 burke] + * + * Revision 1.1.9.1 1995/09/18 19:13:34 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:34:10 joe] + * + * Revision 1.1.6.1 1995/05/11 20:57:18 burke + * Update ETAP changes. + * [1995/05/09 17:15:03 burke] + * + * Revision 1.1.3.1 1994/12/14 18:55:51 joe + * ETAP nswc merge + * [1994/12/14 17:07:33 joe] + * + * Revision 1.1.1.2 1994/12/12 15:34:48 joe + * Initial check-in + * + * $EndLog$ + */ +/* + * ETAP build options are selected using the config.debug configuration file. + * + * ETAP options are: + * ETAP_LOCK_ACCUMULATE - Cumulative lock tracing + * ETAP_LOCK_MONITOR - Monitor lock behavior + * ETAP_EVENT_MONITOR - Monitor general events + * + * Derived options are: + * ETAP_LOCK_TRACE - Equals one if either cumulative or monitored + * lock tracing is configured (zero otherwise). + * ETAP_MONITOR - Equals one if either lock or event monitoring + * is configured (zero otherwise). + */ + +#ifndef _KERN_ETAP_OPTIONS_H_ +#define _KERN_ETAP_OPTIONS_H_ + +#ifdef ETAP_DYNAMIC_OPTIONS +#include +#include +#include +#include +#else +#define ETAP 0 +#define ETAP_LOCK_MONITOR 0 +#define ETAP_LOCK_ACCUMULATE 0 +#define ETAP_EVENT_MONITOR 0 +#endif + +#if ETAP_LOCK_MONITOR || ETAP_LOCK_ACCUMULATE +#define ETAP_LOCK_TRACE 1 +#else /* ETAP_LOCK_MONITOR || ETAP_LOCK_ACCUMULATE */ +#define ETAP_LOCK_TRACE 0 +#endif /* ETAP_LOCK_MONITOR || ETAP_LOCK_ACCUMULATE */ + +#if ETAP_LOCK_MONITOR || ETAP_EVENT_MONITOR +#define ETAP_MONITOR 1 +#else /* ETAP_LOCK_MONITOR || ETAP_EVENT_MONITOR */ +#define ETAP_MONITOR 0 +#endif /* ETAP_LOCK_MONITOR || ETAP_EVENT_MONITOR */ + +#endif /* _KERN_ETAP_OPTIONS_H_ */ diff --git a/osfmk/kern/etap_pool.c b/osfmk/kern/etap_pool.c new file mode 100644 index 000000000..2e3971ace --- /dev/null +++ b/osfmk/kern/etap_pool.c @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:54 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.12.1 1996/09/17 16:27:00 bruel + * fixed bzero prototype. + * [96/09/17 bruel] + * + * Revision 1.1.2.4 1995/10/09 17:13:51 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:34:15 joe] + * + * Revision 1.1.2.3 1995/09/18 19:13:37 devrcs + * Merged in RT3_SHARED ETAP code. + * [1995/09/13 18:34:15 joe] + * + * Revision 1.1.2.2 1995/01/10 05:11:15 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * patch up spinlock references ==> simplelock + * [1994/12/09 20:54:30 dwm] + * + * mk6 CR801 - new file for mk6_shared from cnmk_shared. + * [1994/12/01 21:11:49 dwm] + * + * Revision 1.1.2.1 1994/10/21 18:28:50 joe + * Initial ETAP submission + * [1994/10/20 19:31:33 joe] + * + * $EndLog$ + */ +/* + * File: etap_pool.c + * + * etap_pool.c contains the functions for maintenance + * of the start_data_pool. The start_data_pool is + * used by the ETAP package. Its primary + * objective is to provide start_data_nodes to complex + * locks so they can hold start information for read + * locks (since multiple readers can acquire a read + * lock). Each complex lock will maintain a linked + * list of these nodes. + * + * NOTES: The start_data_pool is used instead of zalloc to + * eliminate complex lock dependancies. If zalloc was used, + * then no complex locks could be used in zalloc code paths. + * This is both difficult and unrealistic, since zalloc + * allocates memory dynamically. Hence, this dependancy is + * eliminated with the use of the statically allocated + * start_data_pool. + * + */ + +#include +#include +#include +#include +#include + +#if ETAP_LOCK_TRACE + +/* + * Statically allocate the start data pool, + * header and lock. + */ + +struct start_data_node sd_pool [SD_POOL_ENTRIES]; /* static buffer */ +start_data_node_t sd_free_list; /* pointer to free node list */ +int sd_sleepers; /* number of blocked threads */ + +simple_lock_data_t sd_pool_lock; + + +/* + * Interrupts must be disabled while the + * sd_pool_lock is taken. + */ + +#define pool_lock(s) \ +MACRO_BEGIN \ + s = splhigh(); \ + simple_lock(&sd_pool_lock); \ +MACRO_END + +#define pool_unlock(s) \ +MACRO_BEGIN \ + simple_unlock(&sd_pool_lock); \ + splx(s); \ +MACRO_END + + +/* + * ROUTINE: init_start_data_pool + * + * FUNCTION: Initialize the start_data_pool: + * - create the free list chain for the max + * number of entries. + * - initialize the sd_pool_lock + */ + +void +init_start_data_pool(void) +{ + int x; + + simple_lock_init(&sd_pool_lock, ETAP_MISC_SD_POOL); + + /* + * Establish free list pointer chain + */ + + for (x=0; x < SD_POOL_ENTRIES-1; x++) + sd_pool[x].next = &sd_pool[x+1]; + + sd_pool[SD_POOL_ENTRIES-1].next = SD_ENTRY_NULL; + sd_free_list = &sd_pool[0]; + sd_sleepers = 0; +} + +/* + * ROUTINE: get_start_data_node + * + * FUNCTION: Returns a free node from the start data pool + * to the caller. If none are available, the + * call will block, then try again. + */ + +start_data_node_t +get_start_data_node(void) +{ + start_data_node_t avail_node; + spl_t s; + + pool_lock(s); + + /* + * If the pool does not have any nodes available, + * block until one becomes free. + */ + + while (sd_free_list == SD_ENTRY_NULL) { + + sd_sleepers++; + assert_wait((event_t) &sd_pool[0], THREAD_UNINT); + pool_unlock(s); + + printf ("DEBUG-KERNEL: empty start_data_pool\n"); + thread_block((void (*)(void)) 0); + + pool_lock(s); + sd_sleepers--; + } + + avail_node = sd_free_list; + sd_free_list = sd_free_list->next; + + pool_unlock(s); + + bzero ((char *) avail_node, sizeof(struct start_data_node)); + avail_node->next = SD_ENTRY_NULL; + + return (avail_node); +} + +/* + * ROUTINE: free_start_data_node + * + * FUNCTION: Releases start data node back to the sd_pool, + * so that it can be used again. + */ + +void +free_start_data_node ( + start_data_node_t node) +{ + boolean_t wakeup = FALSE; + spl_t s; + + if (node == SD_ENTRY_NULL) + return; + + pool_lock(s); + + node->next = sd_free_list; + sd_free_list = node; + + if (sd_sleepers) + wakeup = TRUE; + + pool_unlock(s); + + if (wakeup) + thread_wakeup((event_t) &sd_pool[0]); +} + +#endif /* ETAP_LOCK_TRACE */ diff --git a/osfmk/kern/etap_pool.h b/osfmk/kern/etap_pool.h new file mode 100644 index 000000000..7114cd52b --- /dev/null +++ b/osfmk/kern/etap_pool.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:54 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.4 1995/10/09 17:13:55 devrcs + * Merged RT3_SHARED version into `mainline.' + * [1995/09/13 16:17:31 joe] + * + * Revision 1.1.2.3 1995/09/18 19:13:40 devrcs + * Merged RT3_SHARED version into `mainline.' + * [1995/09/13 16:17:31 joe] + * + * Revision 1.1.2.2 1995/01/10 05:11:19 devrcs + * mk6 CR801 - new file for mk6_shared from cnmk_shared. + * [1994/12/01 21:11:51 dwm] + * + * Revision 1.1.2.1 1994/10/21 18:28:53 joe + * Initial ETAP submission + * [1994/10/20 19:31:35 joe] + * + * $EndLog$ + */ +/* + * File : etap_pool.h + * + * The start_data_node structure is primarily needed to hold + * start information for read locks (since multiple readers + * can acquire a read lock). For consistency, however, the + * structure is used for write locks as well. Each complex + * lock will maintain a linked list of these structures. + */ + +#ifndef _KERN_ETAP_POOL_H_ +#define _KERN_ETAP_POOL_H_ + +#include +#include +#include + +#if ETAP_LOCK_TRACE + +#include +#include +#include +#include + +struct start_data_node { + unsigned int thread_id; /* thread id */ + etap_time_t start_hold_time; /* time of last acquisition */ + etap_time_t start_wait_time; /* time of first miss */ + unsigned int start_pc; /* pc of acquiring function */ + unsigned int end_pc; /* pc of relinquishing function */ + struct start_data_node *next; /* pointer to next list entry */ +}; + +typedef struct start_data_node* start_data_node_t; + +/* + * The start_data_node pool is statically + * allocated and privatly maintained + */ + +#define SD_POOL_ENTRIES (NCPUS * 256) + +extern void init_start_data_pool(void); +extern start_data_node_t get_start_data_node(void); +extern void free_start_data_node(start_data_node_t); + +#else /* ETAP_LOCK_TRACE */ +typedef boolean_t start_data_node_t; +#define get_start_data_node() +#define free_start_start_data_node(node) +#endif /* ETAP_LOCK_TRACE */ + +#define SD_ENTRY_NULL ((start_data_node_t) 0) + +#endif /* _KERN_ETAP_POOL_H_ */ diff --git a/osfmk/kern/exception.c b/osfmk/kern/exception.c new file mode 100644 index 000000000..70c1d0e0a --- /dev/null +++ b/osfmk/kern/exception.c @@ -0,0 +1,317 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* JMM - will become exception.h */ +#include + +#if MACH_KDB +#include +#endif /* MACH_KDB */ + +#if MACH_KDB + +#include + +#if iPSC386 || iPSC860 +boolean_t debug_user_with_kdb = TRUE; +#else +boolean_t debug_user_with_kdb = FALSE; +#endif + +#endif /* MACH_KDB */ + +unsigned long c_thr_exc_raise = 0; +unsigned long c_thr_exc_raise_state = 0; +unsigned long c_thr_exc_raise_state_id = 0; +unsigned long c_tsk_exc_raise = 0; +unsigned long c_tsk_exc_raise_state = 0; +unsigned long c_tsk_exc_raise_state_id = 0; + + +/* + * Routine: exception_deliver + * Purpose: + * Make an upcall to the exception server provided. + * Conditions: + * Nothing locked and no resources held. + * Called from an exception context, so + * thread_exception_return and thread_kdb_return + * are possible. + * Returns: + * If the exception was not handled by this handler + */ +void +exception_deliver( + exception_type_t exception, + exception_data_t code, + mach_msg_type_number_t codeCnt, + struct exception_action *excp, + mutex_t *mutex) +{ + thread_act_t a_self = current_act(); + ipc_port_t exc_port; + int behavior; + int flavor; + kern_return_t kr; + + /* + * Save work if we are terminating. + * Just go back to our AST handler. + */ + if (!a_self->active) + thread_exception_return(); + + /* + * Snapshot the exception action data under lock for consistency. + * Hold a reference to the port over the exception_raise_* calls + * so it can't be destroyed. This seems like overkill, but keeps + * the port from disappearing between now and when + * ipc_object_copyin_from_kernel is finally called. + */ + mutex_lock(mutex); + exc_port = excp->port; + if (!IP_VALID(exc_port)) { + mutex_unlock(mutex); + return; + } + ip_lock(exc_port); + if (!ip_active(exc_port)) { + ip_unlock(exc_port); + mutex_unlock(mutex); + return; + } + ip_reference(exc_port); + exc_port->ip_srights++; + ip_unlock(exc_port); + + flavor = excp->flavor; + behavior = excp->behavior; + mutex_unlock(mutex); + + switch (behavior) { + case EXCEPTION_STATE: { + mach_msg_type_number_t state_cnt; + natural_t state[ THREAD_MACHINE_STATE_MAX ]; + + c_thr_exc_raise_state++; + state_cnt = state_count[flavor]; + kr = thread_getstatus(a_self, flavor, + (thread_state_t)state, + &state_cnt); + if (kr == KERN_SUCCESS) { + kr = exception_raise_state(exc_port, exception, + code, codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); + if (kr == MACH_MSG_SUCCESS) + kr = thread_setstatus(a_self, flavor, + (thread_state_t)state, + state_cnt); + } + + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + thread_exception_return(); + /*NOTREACHED*/ + return; + } + + case EXCEPTION_DEFAULT: + c_thr_exc_raise++; + kr = exception_raise(exc_port, + retrieve_act_self_fast(a_self), + retrieve_task_self_fast(a_self->task), + exception, + code, codeCnt); + + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + thread_exception_return(); + /*NOTREACHED*/ + return; + + case EXCEPTION_STATE_IDENTITY: { + mach_msg_type_number_t state_cnt; + natural_t state[ THREAD_MACHINE_STATE_MAX ]; + + c_thr_exc_raise_state_id++; + state_cnt = state_count[flavor]; + kr = thread_getstatus(a_self, flavor, + (thread_state_t)state, + &state_cnt); + if (kr == KERN_SUCCESS) { + kr = exception_raise_state_identity(exc_port, + retrieve_act_self_fast(a_self), + retrieve_task_self_fast(a_self->task), + exception, + code, codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); + if (kr == MACH_MSG_SUCCESS) + kr = thread_setstatus(a_self, flavor, + (thread_state_t)state, + state_cnt); + } + + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + thread_exception_return(); + /*NOTREACHED*/ + return; + } + + default: + panic ("bad exception behavior!"); + }/* switch */ +} + +/* + * Routine: exception + * Purpose: + * The current thread caught an exception. + * We make an up-call to the thread's exception server. + * Conditions: + * Nothing locked and no resources held. + * Called from an exception context, so + * thread_exception_return and thread_kdb_return + * are possible. + * Returns: + * Doesn't return. + */ +void +exception( + exception_type_t exception, + exception_data_t code, + mach_msg_type_number_t codeCnt) +{ + thread_act_t thr_act; + task_t task; + host_priv_t host_priv; + struct exception_action *excp; + mutex_t *mutex; + + assert(exception != EXC_RPC_ALERT); + + if (exception == KERN_SUCCESS) + panic("exception"); + + /* + * Try to raise the exception at the activation level. + */ + thr_act = current_act(); + mutex = mutex_addr(thr_act->lock); + excp = &thr_act->exc_actions[exception]; + exception_deliver(exception, code, codeCnt, excp, mutex); + + /* + * Maybe the task level will handle it. + */ + task = current_task(); + mutex = mutex_addr(task->lock); + excp = &task->exc_actions[exception]; + exception_deliver(exception, code, codeCnt, excp, mutex); + + /* + * How about at the host level? + */ + host_priv = host_priv_self(); + mutex = mutex_addr(host_priv->lock); + excp = &host_priv->exc_actions[exception]; + exception_deliver(exception, code, codeCnt, excp, mutex); + + /* + * Nobody handled it, terminate the task. + */ + +#if MACH_KDB + if (debug_user_with_kdb) { + /* + * Debug the exception with kdb. + * If kdb handles the exception, + * then thread_kdb_return won't return. + */ + db_printf("No exception server, calling kdb...\n"); + thread_kdb_return(); + } +#endif /* MACH_KDB */ + + (void) task_terminate(task); + thread_exception_return(); + /*NOTREACHED*/ +} diff --git a/osfmk/kern/exception.h b/osfmk/kern/exception.h new file mode 100644 index 000000000..360da95fc --- /dev/null +++ b/osfmk/kern/exception.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _EXCEPTION_H_ +#define _EXCEPTION_H_ + +#include +#include +#include +#include + +/* + * Common storage for exception actions. + * There are arrays of these maintained at the activation, task, and host. + */ +struct exception_action { + struct ipc_port *port; /* exception port */ + thread_state_flavor_t flavor; /* state flavor to send */ + exception_behavior_t behavior; /* exception type to raise */ +}; + +/* Make an up-call to a thread's exception server */ +extern void exception( + exception_type_t exception, + exception_data_t code, + mach_msg_type_number_t codeCnt); + +#endif /* _EXCEPTION_H_ */ diff --git a/osfmk/kern/host.c b/osfmk/kern/host.c new file mode 100644 index 000000000..9b8f7d7a4 --- /dev/null +++ b/osfmk/kern/host.c @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * host.c + * + * Non-ipc host functions. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if DIPC +#include +#include +#endif + +vm_statistics_data_t vm_stat[NCPUS]; + +host_data_t realhost; + +kern_return_t +host_processors( + host_priv_t host_priv, + processor_array_t *processor_list, + mach_msg_type_number_t *countp) +{ + register int i; + register processor_t *tp; + vm_offset_t addr; + unsigned int count; + + if (host_priv == HOST_PRIV_NULL) + return(KERN_INVALID_ARGUMENT); + + assert(host_priv == &realhost); + + /* + * Determine how many processors we have. + * (This number shouldn't change.) + */ + + count = 0; + for (i = 0; i < NCPUS; i++) + if (machine_slot[i].is_cpu) + count++; + + if (count == 0) + panic("host_processors"); + + addr = kalloc((vm_size_t) (count * sizeof(mach_port_t))); + if (addr == 0) + return KERN_RESOURCE_SHORTAGE; + + tp = (processor_t *) addr; + for (i = 0; i < NCPUS; i++) + if (machine_slot[i].is_cpu) + *tp++ = cpu_to_processor(i); + + *countp = count; + *processor_list = (processor_array_t)addr; + + /* do the conversion that Mig should handle */ + + tp = (processor_t *) addr; + for (i = 0; i < count; i++) + ((mach_port_t *) tp)[i] = + (mach_port_t)convert_processor_to_port(tp[i]); + + return KERN_SUCCESS; +} + +kern_return_t +host_info( + host_t host, + host_flavor_t flavor, + host_info_t info, + mach_msg_type_number_t *count) +{ + + if (host == HOST_NULL) + return(KERN_INVALID_ARGUMENT); + + switch(flavor) { + + case HOST_BASIC_INFO: + { + register host_basic_info_t basic_info; + + /* + * Basic information about this host. + */ + if (*count < HOST_BASIC_INFO_COUNT) + return(KERN_FAILURE); + + basic_info = (host_basic_info_t) info; + + basic_info->max_cpus = machine_info.max_cpus; + basic_info->avail_cpus = machine_info.avail_cpus; + basic_info->memory_size = machine_info.memory_size; + basic_info->cpu_type = + machine_slot[master_processor->slot_num].cpu_type; + basic_info->cpu_subtype = + machine_slot[master_processor->slot_num].cpu_subtype; + + *count = HOST_BASIC_INFO_COUNT; + + return(KERN_SUCCESS); + } + + case HOST_SCHED_INFO: + { + register host_sched_info_t sched_info; + extern int tick; /* XXX */ + + /* + * Return scheduler information. + */ + if (*count < HOST_SCHED_INFO_COUNT) + return(KERN_FAILURE); + + sched_info = (host_sched_info_t) info; + + sched_info->min_timeout = tick / 1000; /* XXX */ + sched_info->min_quantum = tick / 1000; /* XXX */ + + *count = HOST_SCHED_INFO_COUNT; + + return(KERN_SUCCESS); + } + + case HOST_RESOURCE_SIZES: + { + /* + * Return sizes of kernel data structures + */ + if (*count < HOST_RESOURCE_SIZES_COUNT) + return(KERN_FAILURE); + + /* XXX Fail until ledgers are implemented */ + return(KERN_INVALID_ARGUMENT); + } + + case HOST_PRIORITY_INFO: + { + register host_priority_info_t priority_info; + + if (*count < HOST_PRIORITY_INFO_COUNT) + return(KERN_FAILURE); + + priority_info = (host_priority_info_t) info; + + priority_info->kernel_priority = MINPRI_KERNBAND; + priority_info->system_priority = MINPRI_KERNBAND; + priority_info->server_priority = MINPRI_HIGHBAND; + priority_info->user_priority = BASEPRI_DEFAULT; + priority_info->depress_priority = DEPRESSPRI; + priority_info->idle_priority = IDLEPRI; + priority_info->minimum_priority = MINPRI_STANDARD; + priority_info->maximum_priority = MAXPRI_HIGHBAND; + + *count = HOST_PRIORITY_INFO_COUNT; + + return(KERN_SUCCESS); + } + + /* + * JMM - Temporary check to see if semaphore traps are + * supported on this machine. Sadly, just trying to call + * the traps gets your process terminated instead of + * returning an error, so we have to query during mach_init + * to see if the machine supports them. + * + * KERN_INVALID_ARGUMENT - kernel has no semaphore traps + * KERN_SUCCESS - kernel has sema traps (up to semaphore_signal_wait) + * KERN_SEMAPHORE_DESTROYED - kernel has the latest semaphore traps + */ + case HOST_SEMAPHORE_TRAPS: + { + *count = 0; + return KERN_SUCCESS; + } + + default: + return(KERN_INVALID_ARGUMENT); + } +} + +kern_return_t +host_statistics( + host_t host, + host_flavor_t flavor, + host_info_t info, + mach_msg_type_number_t *count) +{ + + if (host == HOST_NULL) + return(KERN_INVALID_HOST); + + switch(flavor) { + + case HOST_LOAD_INFO: { + register host_load_info_t load_info; + extern integer_t avenrun[3], mach_factor[3]; + + if (*count < HOST_LOAD_INFO_COUNT) + return(KERN_FAILURE); + + load_info = (host_load_info_t) info; + + bcopy((char *) avenrun, + (char *) load_info->avenrun, + sizeof avenrun); + bcopy((char *) mach_factor, + (char *) load_info->mach_factor, + sizeof mach_factor); + + *count = HOST_LOAD_INFO_COUNT; + return(KERN_SUCCESS); + } + + case HOST_VM_INFO: { + register vm_statistics_t stat; + vm_statistics_data_t host_vm_stat; + extern int vm_page_free_count, vm_page_active_count, + vm_page_inactive_count, vm_page_wire_count; + + if (*count < HOST_VM_INFO_COUNT) + return(KERN_FAILURE); + + stat = &vm_stat[0]; + host_vm_stat = *stat; +#if NCPUS > 1 + { + register int i; + + for (i = 1; i < NCPUS; i++) { + stat++; + host_vm_stat.zero_fill_count += + stat->zero_fill_count; + host_vm_stat.reactivations += + stat->reactivations; + host_vm_stat.pageins += stat->pageins; + host_vm_stat.pageouts += stat->pageouts; + host_vm_stat.faults += stat->faults; + host_vm_stat.cow_faults += stat->cow_faults; + host_vm_stat.lookups += stat->lookups; + host_vm_stat.hits += stat->hits; + } + } +#endif + + stat = (vm_statistics_t) info; + + stat->free_count = vm_page_free_count; + stat->active_count = vm_page_active_count; + stat->inactive_count = vm_page_inactive_count; + stat->wire_count = vm_page_wire_count; + stat->zero_fill_count = host_vm_stat.zero_fill_count; + stat->reactivations = host_vm_stat.reactivations; + stat->pageins = host_vm_stat.pageins; + stat->pageouts = host_vm_stat.pageouts; + stat->faults = host_vm_stat.faults; + stat->cow_faults = host_vm_stat.cow_faults; + stat->lookups = host_vm_stat.lookups; + stat->hits = host_vm_stat.hits; + + *count = HOST_VM_INFO_COUNT; + return(KERN_SUCCESS); + } + + case HOST_CPU_LOAD_INFO: { + host_cpu_load_info_t cpu_load_info; + unsigned long ticks_value1, ticks_value2; + int i; + +#define GET_TICKS_VALUE(__cpu,__state) \ +MACRO_BEGIN \ + do { \ + ticks_value1 = *(volatile integer_t *) \ + (&machine_slot[(__cpu)].cpu_ticks[(__state)]); \ + ticks_value2 = *(volatile integer_t *) \ + (&machine_slot[(__cpu)].cpu_ticks[(__state)]); \ + } while (ticks_value1 != ticks_value2); \ + cpu_load_info->cpu_ticks[(__state)] += ticks_value1; \ +MACRO_END + + if (*count < HOST_CPU_LOAD_INFO_COUNT) + return KERN_FAILURE; + + cpu_load_info = (host_cpu_load_info_t) info; + + cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0; + cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; + cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0; + cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0; + for (i = 0; i < NCPUS; i++) { + if (!machine_slot[i].is_cpu || + !machine_slot[i].running) + continue; + GET_TICKS_VALUE(i, CPU_STATE_USER); + GET_TICKS_VALUE(i, CPU_STATE_NICE); + GET_TICKS_VALUE(i, CPU_STATE_SYSTEM); + GET_TICKS_VALUE(i, CPU_STATE_IDLE); + } + + *count = HOST_CPU_LOAD_INFO_COUNT; + return KERN_SUCCESS; + } + + default: + return(KERN_INVALID_ARGUMENT); + } +} + +/* + * Get host statistics that require privilege. + * None for now, just call the un-privileged version. + */ +kern_return_t +host_priv_statistics( + host_priv_t host_priv, + host_flavor_t flavor, + host_info_t info, + mach_msg_type_number_t *count) +{ + return(host_statistics((host_t)host_priv, flavor, info, count)); +} + + +kern_return_t +host_page_size( + host_t host, + vm_size_t *out_page_size) +{ + if (host == HOST_NULL) + return(KERN_INVALID_ARGUMENT); + + *out_page_size = PAGE_SIZE; + + return(KERN_SUCCESS); +} + +/* + * Return kernel version string (more than you ever + * wanted to know about what version of the kernel this is). + */ + +kern_return_t +host_kernel_version( + host_t host, + kernel_version_t out_version) +{ + extern char version[]; + + if (host == HOST_NULL) + return(KERN_INVALID_ARGUMENT); + + (void) strncpy(out_version, version, sizeof(kernel_version_t)); + + return(KERN_SUCCESS); +} + +/* + * host_processor_sets: + * + * List all processor sets on the host. + */ +kern_return_t +host_processor_sets( + host_priv_t host_priv, + processor_set_name_array_t *pset_list, + mach_msg_type_number_t *count) +{ + vm_offset_t addr; + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + + /* + * Allocate memory. Can be pageable because it won't be + * touched while holding a lock. + */ + + addr = kalloc((vm_size_t) sizeof(mach_port_t)); + if (addr == 0) + return KERN_RESOURCE_SHORTAGE; + + /* take ref for convert_pset_name_to_port */ + pset_reference(&default_pset); + /* do the conversion that Mig should handle */ + *((ipc_port_t *) addr) = convert_pset_name_to_port(&default_pset); + + *pset_list = (processor_set_array_t)addr; + *count = 1; + + return KERN_SUCCESS; +} + +/* + * host_processor_set_priv: + * + * Return control port for given processor set. + */ +kern_return_t +host_processor_set_priv( + host_priv_t host_priv, + processor_set_t pset_name, + processor_set_t *pset) +{ + if ((host_priv == HOST_PRIV_NULL) || (pset_name == PROCESSOR_SET_NULL)) { + *pset = PROCESSOR_SET_NULL; + return(KERN_INVALID_ARGUMENT); + } + + *pset = pset_name; + pset_reference(*pset); + return(KERN_SUCCESS); +} + +/* + * host_processor_info + * + * Return info about the processors on this host. It will return + * the number of processors, and the specific type of info requested + * in an OOL array. + */ +kern_return_t +host_processor_info( + host_t host, + processor_flavor_t flavor, + natural_t *proc_count, + processor_info_array_t *proc_info, + mach_msg_type_number_t *proc_info_count) +{ + int i; + int num; + int count; + vm_size_t size; + vm_offset_t addr; + kern_return_t kr; + vm_map_copy_t copy; + processor_info_t proc_data; + + if (host == HOST_NULL) + return KERN_INVALID_ARGUMENT; + + kr = processor_info_count(flavor, &count); + if (kr != KERN_SUCCESS) { + return kr; + } + + for (num = i = 0; i < NCPUS; i++) + if (machine_slot[i].is_cpu) + num++; + + size = (vm_size_t)round_page(num * count * sizeof(natural_t)); + + kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); + if (kr != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + + kr = vm_map_wire(ipc_kernel_map, addr, addr + size, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + if (kr != KERN_SUCCESS) { + kmem_free(ipc_kernel_map, addr, size); + return KERN_RESOURCE_SHORTAGE; + } + + proc_data = (processor_info_t) addr; + for (i = 0; i < NCPUS; i++) { + int count2 = count; + host_t host2; + + if (machine_slot[i].is_cpu) { + kr = processor_info(cpu_to_processor(i), + flavor, + &host2, + proc_data, + &count2); + if (kr != KERN_SUCCESS) { + kmem_free(ipc_kernel_map, addr, size); + return kr; + } + assert(count == count2); + proc_data += count; + } + } + + kr = vm_map_unwire(ipc_kernel_map, addr, addr + size, FALSE); + assert(kr == KERN_SUCCESS); + size = (vm_size_t)(num * count * sizeof(natural_t)); + kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©); + assert(kr == KERN_SUCCESS); + + *proc_count = num; + *proc_info = (processor_info_array_t) copy; + *proc_info_count = num * count; + return(KERN_SUCCESS); +} + + +/* + * host_get_io_master + * + * Return the IO master access port for this host. + */ +kern_return_t +host_get_io_master( + host_t host, + io_master_t *io_master) +{ + if (host == HOST_NULL) + return KERN_INVALID_ARGUMENT; + *io_master = ipc_port_copy_send(realhost.io_master); + return KERN_SUCCESS; +} + +#define io_master_deallocate(x) + +/* + * host_get_io_master + * + * Return the IO master access port for this host. + */ +kern_return_t +host_set_io_master( + host_priv_t host_priv, + io_master_t io_master) +{ + io_master_t old_master; + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + + old_master = realhost.io_master; + realhost.io_master = io_master; + io_master_deallocate(old_master); + return KERN_SUCCESS; +} + +/* + * User interface for setting a special port. + * + * Only permits the user to set a user-owned special port + * ID, rejecting a kernel-owned special port ID. + * + * A special kernel port cannot be set up using this + * routine; use kernel_set_special_port() instead. + */ +kern_return_t +host_set_special_port( + host_priv_t host_priv, + int id, + ipc_port_t port) +{ +#if DIPC + return norma_set_special_port(host_priv, id, port); +#else + return KERN_FAILURE; +#endif +} + + +/* + * User interface for retrieving a special port. + * + * When all processing is local, this call does not block. + * If processing goes remote to discover a remote UID, + * this call blocks but not indefinitely. If the remote + * node does not exist, has panic'ed, or is booting but + * hasn't yet turned on DIPC, then we expect the transport + * to return an error. + * + * This routine always returns SUCCESS, even if there's + * no resulting port. + * + * Note that there is nothing to prevent a user special + * port from disappearing after it has been discovered by + * the caller; thus, using a special port can always result + * in a "port not valid" error. + */ + +kern_return_t +host_get_special_port( + host_priv_t host_priv, + int node, + int id, + ipc_port_t *portp) +{ +#if DIPC + return norma_get_special_port(host_priv, node, id, portp); +#else + return KERN_FAILURE; +#endif +} + +host_t +host_self(void) +{ + return &realhost; +} + +host_priv_t +host_priv_self(void) +{ + return &realhost; +} + +host_security_t +host_security_self(void) +{ + return &realhost; +} + diff --git a/osfmk/kern/host.h b/osfmk/kern/host.h new file mode 100644 index 000000000..de71e5790 --- /dev/null +++ b/osfmk/kern/host.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * kern/host.h + * + * Definitions for host data structures. + * + */ + +#ifndef _KERN_HOST_H_ +#define _KERN_HOST_H_ + +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#include + +struct host { + decl_mutex_data(,lock) /* lock to protect exceptions */ + ipc_port_t host_self; + ipc_port_t host_priv_self; + ipc_port_t host_security_self; + ipc_port_t io_master; + struct exception_action exc_actions[EXC_TYPES_COUNT]; +}; + +typedef struct host host_data_t; + +extern host_data_t realhost; + +#define host_lock(host) mutex_lock(&(host)->lock) +#define host_unlock(host) mutex_unlock(&(host)->lock) + +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * Access routines for inside the kernel. + */ +extern host_t host_self(void); +extern host_priv_t host_priv_self(void); +extern host_security_t host_security_self(void); + +#endif /* _KERN_HOST_H_ */ diff --git a/osfmk/kern/host_statistics.h b/osfmk/kern/host_statistics.h new file mode 100644 index 000000000..2d5da2207 --- /dev/null +++ b/osfmk/kern/host_statistics.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * kern/host_statistics.h + * + * Definitions for host VM/event statistics data structures. + * + */ + +#ifndef _KERN_HOST_STATISTICS_H_ +#define _KERN_HOST_STATISTICS_H_ + +#include +#include +#include + +extern vm_statistics_data_t vm_stat[]; + +#define VM_STAT(event) \ +MACRO_BEGIN \ + mp_disable_preemption(); \ + vm_stat[cpu_number()].event; \ + mp_enable_preemption(); \ +MACRO_END + +#endif /* _KERN_HOST_STATISTICS_H_ */ diff --git a/osfmk/kern/ipc_clock.c b/osfmk/kern/ipc_clock.c new file mode 100644 index 000000000..a8524dbe5 --- /dev/null +++ b/osfmk/kern/ipc_clock.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: kern/ipc_clock.c + * Purpose: Routines to support ipc semantics of new kernel + * alarm clock facility. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Routine: ipc_clock_init + * Purpose: + * Initialize ipc control of a clock. + */ +void +ipc_clock_init( + clock_t clock) +{ + ipc_port_t port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_clock_init"); + clock->cl_service = port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_clock_init"); + clock->cl_control = port; +} + +/* + * Routine: ipc_clock_enable + * Purpose: + * Enable ipc access to a clock. + */ +void +ipc_clock_enable( + clock_t clock) +{ + ipc_kobject_set(clock->cl_service, + (ipc_kobject_t) clock, IKOT_CLOCK); + ipc_kobject_set(clock->cl_control, + (ipc_kobject_t) clock, IKOT_CLOCK_CTRL); +} + +/* + * Routine: convert_port_to_clock + * Purpose: + * Convert from a port to a clock. + * Doesn't consume the port ref; produces a clock ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +clock_t +convert_port_to_clock( + ipc_port_t port) +{ + clock_t clock = CLOCK_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + ((ip_kotype(port) == IKOT_CLOCK) || + (ip_kotype(port) == IKOT_CLOCK_CTRL))) { + clock = (clock_t) port->ip_kobject; + } + ip_unlock(port); + } + return (clock); +} + +/* + * Routine: convert_port_to_clock_ctrl + * Purpose: + * Convert from a port to a clock. + * Doesn't consume the port ref; produces a clock ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +clock_t +convert_port_to_clock_ctrl( + ipc_port_t port) +{ + clock_t clock = CLOCK_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_CLOCK_CTRL)) { + clock = (clock_t) port->ip_kobject; + } + ip_unlock(port); + } + return (clock); +} + +/* + * Routine: convert_clock_to_port + * Purpose: + * Convert from a clock to a port. + * Produces a naked send right which may be invalid. + * Conditions: + * Nothing locked. + */ +ipc_port_t +convert_clock_to_port( + clock_t clock) +{ + ipc_port_t port; + + port = ipc_port_make_send(clock->cl_service); + return (port); +} + +/* + * Routine: convert_clock_ctrl_to_port + * Purpose: + * Convert from a clock to a port. + * Produces a naked send right which may be invalid. + * Conditions: + * Nothing locked. + */ +ipc_port_t +convert_clock_ctrl_to_port( + clock_t clock) +{ + ipc_port_t port; + + port = ipc_port_make_send(clock->cl_control); + return (port); +} + +/* + * Routine: port_name_to_clock + * Purpose: + * Convert from a clock name to a clock pointer. + */ +clock_t +port_name_to_clock( + mach_port_name_t clock_name) +{ + clock_t clock = CLOCK_NULL; + ipc_space_t space; + ipc_port_t port; + + if (clock_name == 0) + return (clock); + space = current_space(); + if (ipc_port_translate_send(space, clock_name, &port) != KERN_SUCCESS) + return (clock); + if (ip_active(port) && (ip_kotype(port) == IKOT_CLOCK)) + clock = (clock_t) port->ip_kobject; + ip_unlock(port); + return (clock); +} diff --git a/osfmk/kern/ipc_host.c b/osfmk/kern/ipc_host.c new file mode 100644 index 000000000..6cb65051b --- /dev/null +++ b/osfmk/kern/ipc_host.c @@ -0,0 +1,861 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * kern/ipc_host.c + * + * Routines to implement host ports. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Forward declarations + */ + +void +ipc_processor_terminate( + processor_t processor); + +void +ipc_processor_disable( + processor_t processor); + +boolean_t +ref_pset_port_locked( + ipc_port_t port, boolean_t matchn, processor_set_t *ppset); + +/* + * ipc_host_init: set up various things. + */ + +void ipc_host_init(void) +{ + ipc_port_t port; + int i; + + /* + * Allocate and set up the two host ports. + */ + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_host_init"); + + ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST); + realhost.host_self = port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_host_init"); + + ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV); + realhost.host_priv_self = port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_host_init"); + + ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_SECURITY); + realhost.host_security_self = port; + + realhost.io_master = IP_NULL; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + realhost.exc_actions[i].port = IP_NULL; + }/* for */ + + /* + * Set up ipc for default processor set. + */ + ipc_pset_init(&default_pset); + ipc_pset_enable(&default_pset); + + /* + * And for master processor + */ + ipc_processor_init(master_processor); + ipc_processor_enable(master_processor); +} + +/* + * Routine: host_self_trap [mach trap] + * Purpose: + * Give the caller send rights for his own host port. + * Conditions: + * Nothing locked. + * Returns: + * MACH_PORT_NULL if there are any resource failures + * or other errors. + */ + +mach_port_name_t +host_self_trap(void) +{ + ipc_port_t sright; + + sright = ipc_port_copy_send(current_task()->itk_host); + return ipc_port_copyout_send(sright, current_space()); +} + +/* + * ipc_processor_init: + * + * Initialize ipc access to processor by allocating port. + */ + +void +ipc_processor_init( + processor_t processor) +{ + ipc_port_t port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_processor_init"); + processor->processor_self = port; +} + +/* + * ipc_processor_enable: + * + * Enable ipc control of processor by setting port object. + */ +void +ipc_processor_enable( + processor_t processor) +{ + ipc_port_t myport; + + myport = processor->processor_self; + ipc_kobject_set(myport, (ipc_kobject_t) processor, IKOT_PROCESSOR); +} + +/* + * ipc_processor_disable: + * + * Disable ipc control of processor by clearing port object. + */ +void +ipc_processor_disable( + processor_t processor) +{ + ipc_port_t myport; + + myport = processor->processor_self; + if (myport == IP_NULL) + return; + ipc_kobject_set(myport, IKO_NULL, IKOT_NONE); +} + +/* + * ipc_processor_terminate: + * + * Processor is off-line. Destroy ipc control port. + */ +void +ipc_processor_terminate( + processor_t processor) +{ + ipc_port_t myport; + spl_t s; + + s = splsched(); + processor_lock(processor); + myport = processor->processor_self; + if (myport == IP_NULL) { + processor_unlock(processor); + splx(s); + return; + } + + processor->processor_self = IP_NULL; + processor_unlock(processor); + splx(s); + + ipc_port_dealloc_kernel(myport); +} + +/* + * ipc_pset_init: + * + * Initialize ipc control of a processor set by allocating its ports. + */ + +void +ipc_pset_init( + processor_set_t pset) +{ + ipc_port_t port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_pset_init"); + pset->pset_self = port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_pset_init"); + pset->pset_name_self = port; +} + +/* + * ipc_pset_enable: + * + * Enable ipc access to a processor set. + */ +void +ipc_pset_enable( + processor_set_t pset) +{ + pset_lock(pset); + if (pset->active) { + ipc_kobject_set(pset->pset_self, + (ipc_kobject_t) pset, IKOT_PSET); + ipc_kobject_set(pset->pset_name_self, + (ipc_kobject_t) pset, IKOT_PSET_NAME); + pset->ref_count += 2; + } + pset_unlock(pset); +} + +/* + * ipc_pset_disable: + * + * Disable ipc access to a processor set by clearing the port objects. + * Caller must hold pset lock and a reference to the pset. Ok to + * just decrement pset reference count as a result. + */ +void +ipc_pset_disable( + processor_set_t pset) +{ + ipc_kobject_set(pset->pset_self, IKO_NULL, IKOT_NONE); + ipc_kobject_set(pset->pset_name_self, IKO_NULL, IKOT_NONE); + pset->ref_count -= 2; +} + +/* + * ipc_pset_terminate: + * + * Processor set is dead. Deallocate the ipc control structures. + */ +void +ipc_pset_terminate( + processor_set_t pset) +{ + ipc_port_dealloc_kernel(pset->pset_self); + ipc_port_dealloc_kernel(pset->pset_name_self); +} + +/* + * processor_set_default, processor_set_default_priv: + * + * Return ports for manipulating default_processor set. MiG code + * differentiates between these two routines. + */ +kern_return_t +processor_set_default( + host_t host, + processor_set_t *pset) +{ + if (host == HOST_NULL) + return(KERN_INVALID_ARGUMENT); + + *pset = &default_pset; + pset_reference(*pset); + return(KERN_SUCCESS); +} + +/* + * Routine: convert_port_to_host + * Purpose: + * Convert from a port to a host. + * Doesn't consume the port ref; the host produced may be null. + * Conditions: + * Nothing locked. + */ + +host_t +convert_port_to_host( + ipc_port_t port) +{ + host_t host = HOST_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + ((ip_kotype(port) == IKOT_HOST) || + (ip_kotype(port) == IKOT_HOST_PRIV) + )) + host = (host_t) port->ip_kobject; + ip_unlock(port); + } + + return host; +} + +/* + * Routine: convert_port_to_host_priv + * Purpose: + * Convert from a port to a host. + * Doesn't consume the port ref; the host produced may be null. + * Conditions: + * Nothing locked. + */ + +host_t +convert_port_to_host_priv( + ipc_port_t port) +{ + host_t host = HOST_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_HOST_PRIV)) + host = (host_t) port->ip_kobject; + ip_unlock(port); + } + + return host; +} + +/* + * Routine: convert_port_to_processor + * Purpose: + * Convert from a port to a processor. + * Doesn't consume the port ref; + * the processor produced may be null. + * Conditions: + * Nothing locked. + */ + +processor_t +convert_port_to_processor( + ipc_port_t port) +{ + processor_t processor = PROCESSOR_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_PROCESSOR)) + processor = (processor_t) port->ip_kobject; + ip_unlock(port); + } + + return processor; +} + +/* + * Routine: convert_port_to_pset + * Purpose: + * Convert from a port to a pset. + * Doesn't consume the port ref; produces a pset ref, + * which may be null. + * Conditions: + * Nothing locked. + */ + +processor_set_t +convert_port_to_pset( + ipc_port_t port) +{ + boolean_t r; + processor_set_t pset = PROCESSOR_SET_NULL; + + r = FALSE; + while (!r && IP_VALID(port)) { + ip_lock(port); + r = ref_pset_port_locked(port, FALSE, &pset); + /* port unlocked */ + } + return pset; +} + +/* + * Routine: convert_port_to_pset_name + * Purpose: + * Convert from a port to a pset. + * Doesn't consume the port ref; produces a pset ref, + * which may be null. + * Conditions: + * Nothing locked. + */ + +processor_set_name_t +convert_port_to_pset_name( + ipc_port_t port) +{ + boolean_t r; + processor_set_t pset = PROCESSOR_SET_NULL; + + r = FALSE; + while (!r && IP_VALID(port)) { + ip_lock(port); + r = ref_pset_port_locked(port, TRUE, &pset); + /* port unlocked */ + } + return pset; +} + +boolean_t +ref_pset_port_locked(ipc_port_t port, boolean_t matchn, processor_set_t *ppset) +{ + processor_set_t pset; + + pset = PROCESSOR_SET_NULL; + if (ip_active(port) && + ((ip_kotype(port) == IKOT_PSET) || + (matchn && (ip_kotype(port) == IKOT_PSET_NAME)))) { + pset = (processor_set_t) port->ip_kobject; + if (!pset_lock_try(pset)) { + ip_unlock(port); + mutex_pause(); + return (FALSE); + } + pset->ref_count++; + pset_unlock(pset); + } + *ppset = pset; + ip_unlock(port); + return (TRUE); +} + +/* + * Routine: convert_host_to_port + * Purpose: + * Convert from a host to a port. + * Produces a naked send right which may be invalid. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +convert_host_to_port( + host_t host) +{ + ipc_port_t port; + + port = ipc_port_make_send(host->host_self); + + return port; +} + +/* + * Routine: convert_processor_to_port + * Purpose: + * Convert from a processor to a port. + * Produces a naked send right which may be invalid. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +convert_processor_to_port( + processor_t processor) +{ + ipc_port_t port; + spl_t s; + + s = splsched(); + processor_lock(processor); + + if (processor->processor_self != IP_NULL) + port = ipc_port_make_send(processor->processor_self); + else + port = IP_NULL; + + processor_unlock(processor); + splx(s); + + return port; +} + +/* + * Routine: convert_pset_to_port + * Purpose: + * Convert from a pset to a port. + * Consumes a pset ref; produces a naked send right + * which may be invalid. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +convert_pset_to_port( + processor_set_t pset) +{ + ipc_port_t port; + + pset_lock(pset); + if (pset->active) + port = ipc_port_make_send(pset->pset_self); + else + port = IP_NULL; + pset_unlock(pset); + + pset_deallocate(pset); + return port; +} + +/* + * Routine: convert_pset_name_to_port + * Purpose: + * Convert from a pset to a port. + * Consumes a pset ref; produces a naked send right + * which may be invalid. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +convert_pset_name_to_port( + processor_set_name_t pset) +{ + ipc_port_t port; + + pset_lock(pset); + if (pset->active) + port = ipc_port_make_send(pset->pset_name_self); + else + port = IP_NULL; + pset_unlock(pset); + + pset_deallocate(pset); + return port; +} + +/* + * Routine: convert_port_to_host_security + * Purpose: + * Convert from a port to a host security. + * Doesn't consume the port ref; the port produced may be null. + * Conditions: + * Nothing locked. + */ + +host_t +convert_port_to_host_security( + ipc_port_t port) +{ + host_t host = HOST_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_HOST_SECURITY)) + host = (host_t) port->ip_kobject; + ip_unlock(port); + } + + return host; +} + +/* + * Routine: host_set_exception_ports [kernel call] + * Purpose: + * Sets the host exception port, flavor and + * behavior for the exception types specified by the mask. + * There will be one send right per exception per valid + * port. + * Conditions: + * Nothing locked. If successful, consumes + * the supplied send right. + * Returns: + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The host_priv is not valid, + * Illegal mask bit set. + * Illegal exception behavior + */ +kern_return_t +host_set_exception_ports( + host_priv_t host_priv, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) +{ + register int i; + ipc_port_t old_port[EXC_TYPES_COUNT]; + + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_ARGUMENT; + } + + assert(host_priv == &realhost); + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + if (IP_VALID(new_port)) { + switch (new_behavior) { + case EXCEPTION_DEFAULT: + case EXCEPTION_STATE: + case EXCEPTION_STATE_IDENTITY: + break; + default: + return KERN_INVALID_ARGUMENT; + } + } + /* Cannot easily check "new_flavor", but that just means that + * the flavor in the generated exception message might be garbage: + * GIGO + */ + host_lock(host_priv); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + old_port[i] = host_priv->exc_actions[i].port; + host_priv->exc_actions[i].port = + ipc_port_copy_send(new_port); + host_priv->exc_actions[i].behavior = new_behavior; + host_priv->exc_actions[i].flavor = new_flavor; + } else + old_port[i] = IP_NULL; + }/* for */ + + /* + * Consume send rights without any lock held. + */ + host_unlock(host_priv); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + if (IP_VALID(old_port[i])) + ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ + ipc_port_release_send(new_port); + + return KERN_SUCCESS; +} + +/* + * Routine: host_get_exception_ports [kernel call] + * Purpose: + * Clones a send right for each of the host's exception + * ports specified in the mask and returns the behaviour + * and flavor of said port. + * + * Returns upto [in} CountCnt elements. + * + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Extracted a send right. + * KERN_INVALID_ARGUMENT Invalid host_priv specified, + * Invalid special port, + * Illegal mask bit set. + * KERN_FAILURE The thread is dead. + */ +kern_return_t +host_get_exception_ports( + host_priv_t host_priv, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors ) +{ + register int i, + j, + count; + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + assert (host_priv == &realhost); + + host_lock(host_priv); + + count = 0; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + for (j = 0; j < count; j++) { +/* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if (host_priv->exc_actions[i].port == ports[j] && + host_priv->exc_actions[i].behavior == behaviors[j] + && host_priv->exc_actions[i].flavor == flavors[j]) + { + masks[j] |= (1 << i); + break; + } + }/* for */ + if (j == count) { + masks[j] = (1 << i); + ports[j] = + ipc_port_copy_send(host_priv->exc_actions[i].port); + behaviors[j] = host_priv->exc_actions[i].behavior; + flavors[j] = host_priv->exc_actions[i].flavor; + count++; + if (count > *CountCnt) { + break; + } + } + } + }/* for */ + host_unlock(host_priv); + + *CountCnt = count; + return KERN_SUCCESS; +} + +kern_return_t +host_swap_exception_ports( + host_priv_t host_priv, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors ) +{ + register int i, + j, + count; + ipc_port_t old_port[EXC_TYPES_COUNT]; + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_ARGUMENT; + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + if (IP_VALID(new_port)) { + switch (new_behavior) { + case EXCEPTION_DEFAULT: + case EXCEPTION_STATE: + case EXCEPTION_STATE_IDENTITY: + break; + default: + return KERN_INVALID_ARGUMENT; + } + } + /* Cannot easily check "new_flavor", but that just means that + * the flavor in the generated exception message might be garbage: + * GIGO */ + + host_lock(host_priv); + + count = 0; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + for (j = 0; j < count; j++) { +/* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if (host_priv->exc_actions[i].port == ports[j] && + host_priv->exc_actions[i].behavior == behaviors[j] + && host_priv->exc_actions[i].flavor == flavors[j]) + { + masks[j] |= (1 << i); + break; + } + }/* for */ + if (j == count) { + masks[j] = (1 << i); + ports[j] = + ipc_port_copy_send(host_priv->exc_actions[i].port); + behaviors[j] = host_priv->exc_actions[i].behavior; + flavors[j] = host_priv->exc_actions[i].flavor; + count++; + } + old_port[i] = host_priv->exc_actions[i].port; + host_priv->exc_actions[i].port = + ipc_port_copy_send(new_port); + host_priv->exc_actions[i].behavior = new_behavior; + host_priv->exc_actions[i].flavor = new_flavor; + if (count > *CountCnt) { + break; + } + } else + old_port[i] = IP_NULL; + }/* for */ + host_unlock(host_priv); + + /* + * Consume send rights without any lock held. + */ + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + if (IP_VALID(old_port[i])) + ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ + ipc_port_release_send(new_port); + *CountCnt = count; + + return KERN_SUCCESS; +} diff --git a/osfmk/kern/ipc_host.h b/osfmk/kern/ipc_host.h new file mode 100644 index 000000000..6b4a0916b --- /dev/null +++ b/osfmk/kern/ipc_host.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_IPC_HOST_H_ +#define _KERN_IPC_HOST_H_ + +#include +#include + +/* Initialize IPC host services */ +extern void ipc_host_init(void); + +/* Initialize ipc access to processor by allocating a port */ +extern void ipc_processor_init( + processor_t processor); + +/* Enable ipc control of processor by setting port object */ +extern void ipc_processor_enable( + processor_t processor); + +/* Initialize ipc control of a processor set */ +extern void ipc_pset_init( + processor_set_t pset); + +/* Enable ipc access to a processor set */ +extern void ipc_pset_enable( + processor_set_t pset); + +/* Disable ipc access to a processor set */ +extern void ipc_pset_disable( + processor_set_t pset); + +/* Deallocate the ipc control structures for a processor set */ +extern void ipc_pset_terminate( + processor_set_t pset); + +/* Initialize ipc control of a clock */ +extern void ipc_clock_init( + clock_t clock); + +/* Enable ipc access to a clock */ +extern void ipc_clock_enable( + clock_t clock); + +/* Convert from a port to a clock */ +extern clock_t convert_port_to_clock( + ipc_port_t port); + +/* Convert from a port to a clock control */ +extern clock_t convert_port_to_clock_ctrl( + ipc_port_t port); + +/* Convert from a clock to a port */ +extern ipc_port_t convert_clock_to_port( + clock_t clock); + +/* Convert from a clock control to a port */ +extern ipc_port_t convert_clock_ctrl_to_port( + clock_t clock); + +/* Convert from a clock name to a clock pointer */ +extern clock_t port_name_to_clock( + mach_port_name_t clock_name); + +/* Convert from a port to a host */ +extern host_t convert_port_to_host( + ipc_port_t port); + +/* Convert from a port to a host privilege port */ +extern host_t convert_port_to_host_priv( + ipc_port_t port); + +/* Convert from a port to a host paging port */ +extern host_t convert_port_to_host_paging( + ipc_port_t port); + +/* Convert from a host to a port */ +extern ipc_port_t convert_host_to_port( + host_t host); + +/* Convert from a port to a processor */ +extern processor_t convert_port_to_processor( + ipc_port_t port); + +/* Convert from a processor to a port */ +extern ipc_port_t convert_processor_to_port( + processor_t processor); + +/* Convert from a port to a processor set */ +extern processor_set_t convert_port_to_pset( + ipc_port_t port); + +/* Convert from a port to a processor set name */ +extern processor_set_t convert_port_to_pset_name( + ipc_port_t port); + +/* Convert from a processor set to a port */ +extern ipc_port_t convert_pset_to_port( + processor_set_t processor); + +/* Convert from a processor set name to a port */ +extern ipc_port_t convert_pset_name_to_port( + processor_set_t processor); + +/* Convert from a port to a host security port */ +extern host_t convert_port_to_host_security( + ipc_port_t port); + + +#endif /* _KERN_IPC_HOST_H_ */ diff --git a/osfmk/kern/ipc_kobject.c b/osfmk/kern/ipc_kobject.c new file mode 100644 index 000000000..5eaef10b7 --- /dev/null +++ b/osfmk/kern/ipc_kobject.c @@ -0,0 +1,764 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/ipc_kobject.c + * Author: Rich Draves + * Date: 1989 + * + * Functions for letting a port represent a kernel object. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +void + def_pager_hash_insert( + ipc_port_t name_port); +void pager_mux_hash_insert( + ipc_port_t port, + rpc_subsystem_t rec); +void pager_mux_hash_delete( + ipc_port_t port); +rpc_subsystem_t pager_mux_hash_lookup( + ipc_port_t port); + + +/* + * Routine: ipc_kobject_notify + * Purpose: + * Deliver notifications to kobjects that care about them. + */ +boolean_t +ipc_kobject_notify( + mach_msg_header_t *request_header, + mach_msg_header_t *reply_header); + +#include + +typedef struct { + mach_msg_id_t num; + mig_routine_t routine; + int size; +#if MACH_COUNTERS + mach_counter_t callcount; +#endif +} mig_hash_t; + +#define MAX_MIG_ENTRIES 1024 +#define MIG_HASH(x) (x) + +#ifndef max +#define max(a,b) (((a) > (b)) ? (a) : (b)) +#endif /* max */ + +mig_hash_t mig_buckets[MAX_MIG_ENTRIES]; +int mig_table_max_displ; +mach_msg_size_t mig_reply_size; + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_MACHINE_ROUTINES +#include +#endif /* MACH_MACHINE_ROUTINES */ +#if XK_PROXY +#include +#endif /* XK_PROXY */ + + +rpc_subsystem_t mig_e[] = { + (rpc_subsystem_t)&mach_port_subsystem, + (rpc_subsystem_t)&mach_host_subsystem, + (rpc_subsystem_t)&host_priv_subsystem, + (rpc_subsystem_t)&host_security_subsystem, + (rpc_subsystem_t)&clock_subsystem, + (rpc_subsystem_t)&clock_priv_subsystem, + (rpc_subsystem_t)&processor_subsystem, + (rpc_subsystem_t)&processor_set_subsystem, + (rpc_subsystem_t)&is_iokit_subsystem, + (rpc_subsystem_t)&dp_memory_object_subsystem, + (rpc_subsystem_t)&dp_memory_object_default_subsystem, + (rpc_subsystem_t)&memory_object_name_subsystem, + (rpc_subsystem_t)&default_pager_object_subsystem, + (rpc_subsystem_t)&lock_set_subsystem, + (rpc_subsystem_t)&ledger_subsystem, + (rpc_subsystem_t)&semaphore_subsystem, + (rpc_subsystem_t)&task_subsystem, + (rpc_subsystem_t)&thread_act_subsystem, + (rpc_subsystem_t)&vm_map_subsystem, + (rpc_subsystem_t)&UNDReply_subsystem, + +#if XK_PROXY + (rpc_subsystem_t)&do_uproxy_xk_uproxy_subsystem, +#endif /* XK_PROXY */ +#if MACH_MACHINE_ROUTINES + (rpc_subsystem_t)&MACHINE_SUBSYSTEM, +#endif /* MACH_MACHINE_ROUTINES */ +#if MCMSG && iPSC860 + (rpc_subsystem_t)&mcmsg_info_subsystem, +#endif /* MCMSG && iPSC860 */ +}; + +void +mig_init(void) +{ + register unsigned int i, n = sizeof(mig_e)/sizeof(rpc_subsystem_t); + register unsigned int howmany; + register mach_msg_id_t j, pos, nentry, range; + + for (i = 0; i < n; i++) { + range = mig_e[i]->end - mig_e[i]->start; + if (!mig_e[i]->start || range < 0) + panic("the msgh_ids in mig_e[] aren't valid!"); + mig_reply_size = max(mig_reply_size, mig_e[i]->maxsize); + + for (j = 0; j < range; j++) { + if (mig_e[i]->routine[j].stub_routine) { + /* Only put real entries in the table */ + nentry = j + mig_e[i]->start; + for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1; + mig_buckets[pos].num; + pos = ++pos % MAX_MIG_ENTRIES, howmany++) { + if (mig_buckets[pos].num == nentry) { + printf("message id = %d\n", nentry); + panic("multiple entries with the same msgh_id"); + } + if (howmany == MAX_MIG_ENTRIES) + panic("the mig dispatch table is too small"); + } + + mig_buckets[pos].num = nentry; + mig_buckets[pos].routine = mig_e[i]->routine[j].stub_routine; + if (mig_e[i]->routine[j].max_reply_msg) + mig_buckets[pos].size = mig_e[i]->routine[j].max_reply_msg; + else + mig_buckets[pos].size = mig_e[i]->maxsize; + + mig_table_max_displ = max(howmany, mig_table_max_displ); + } + } + } +} + + +#define PAGER_MUX_HASH_COUNT 127 + + +struct pager_mux_entry { + queue_chain_t links; + ipc_port_t name; + rpc_subsystem_t pager_object; +}; +typedef struct pager_mux_entry *pager_mux_entry_t; + +queue_head_t pager_mux_hashtable[PAGER_MUX_HASH_COUNT]; +zone_t pager_mux_hash_zone; + +decl_mutex_data(,pager_mux_hash_lock) + +#define pager_mux_hash(name_port) \ + (((natural_t)(name_port) & 0xffffff) % PAGER_MUX_HASH_COUNT) + + +rpc_subsystem_t +pager_mux_hash_lookup( + ipc_port_t name_port) +{ + register queue_t bucket; + register pager_mux_entry_t entry; + register rpc_subsystem_t pager_object; + + bucket = &pager_mux_hashtable[pager_mux_hash(name_port)]; + + mutex_lock(&pager_mux_hash_lock); + for (entry = (pager_mux_entry_t)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (pager_mux_entry_t)queue_next(&entry->links)) { + if (entry->name == name_port) { + pager_object = entry->pager_object; + /* don't need to reference the object, it can't disappear */ + /* pager_mux_reference(pager_object); */ + mutex_unlock(&pager_mux_hash_lock); + return (pager_object); + } + } + mutex_unlock(&pager_mux_hash_lock); + return (rpc_subsystem_t)0; +} + + +void +pager_mux_hash_init(void) +{ + register int i; + register vm_size_t size; + + size = sizeof(struct pager_mux_entry); + pager_mux_hash_zone = zinit( + size, + size * 2000, + PAGE_SIZE, + "pager mux port hash"); + for (i = 0; i < PAGER_MUX_HASH_COUNT; i++) + queue_init(&pager_mux_hashtable[i]); + mutex_init(&pager_mux_hash_lock, ETAP_IO_DEV_PAGEH); +} + + +void +pager_mux_hash_insert( + ipc_port_t name_port, + rpc_subsystem_t pager_object) +{ + register pager_mux_entry_t new_entry; + + new_entry = (pager_mux_entry_t) zalloc(pager_mux_hash_zone); + new_entry->links.prev = (queue_entry_t) 0; + new_entry->links.next = (queue_entry_t) 0; + new_entry->name = name_port; + new_entry->pager_object = pager_object; + + mutex_lock(&pager_mux_hash_lock); + queue_enter((&pager_mux_hashtable[pager_mux_hash(name_port)]), + new_entry, pager_mux_entry_t, links); + mutex_unlock(&pager_mux_hash_lock); +} + +void +pager_mux_hash_delete( + ipc_port_t name_port) +{ + register queue_t bucket; + register pager_mux_entry_t entry; + + bucket = &pager_mux_hashtable[pager_mux_hash(name_port)]; + + mutex_lock(&pager_mux_hash_lock); + for (entry = (pager_mux_entry_t)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (pager_mux_entry_t)queue_next(&entry->links)) { + if (entry->name == name_port) { + queue_remove(bucket, entry, pager_mux_entry_t, links); + zfree(pager_mux_hash_zone, (vm_offset_t)entry); + break; + } + } + mutex_unlock(&pager_mux_hash_lock); +} + + +/* + * Routine: ipc_kobject_server + * Purpose: + * Handle a message sent to the kernel. + * Generates a reply message. + * Version for Untyped IPC. + * Conditions: + * Nothing locked. + */ + +ipc_kmsg_t +ipc_kobject_server( + ipc_kmsg_t request) +{ + mach_msg_size_t reply_size; + ipc_kmsg_t reply; + kern_return_t kr; + mig_routine_t routine; + ipc_port_t *destp; + mach_msg_format_0_trailer_t *trailer; + register mig_hash_t *ptr; + unsigned int th; + + /* Only fetch current thread if ETAP is configured */ + ETAP_DATA_LOAD(th, current_thread()); + ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, + EVENT_BEGIN, + ((thread_t) th), + &request->ikm_header.msgh_id, + sizeof(int)); + /* + * Find out corresponding mig_hash entry if any + */ + { + register int key = request->ikm_header.msgh_id; + register int i = MIG_HASH(key); + register int max_iter = mig_table_max_displ; + + do + ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES]; + while (key != ptr->num && ptr->num && --max_iter); + + if (!ptr->routine || key != ptr->num) { + ptr = (mig_hash_t *)0; + reply_size = mig_reply_size; + } else { + reply_size = ptr->size; +#if MACH_COUNTER + ptr->callcount++; +#endif + } + } + + /* round up for trailer size */ + reply_size += MAX_TRAILER_SIZE; + reply = ipc_kmsg_alloc(reply_size); + + if (reply == IKM_NULL) { + printf("ipc_kobject_server: dropping request\n"); + ipc_kmsg_destroy(request); + return IKM_NULL; + } + + /* + * Initialize reply message. + */ + { +#define InP ((mach_msg_header_t *) &request->ikm_header) +#define OutP ((mig_reply_error_t *) &reply->ikm_header) + + OutP->NDR = NDR_record; + OutP->Head.msgh_size = sizeof(mig_reply_error_t); + + OutP->Head.msgh_bits = + MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0); + OutP->Head.msgh_remote_port = InP->msgh_local_port; + OutP->Head.msgh_local_port = MACH_PORT_NULL; + OutP->Head.msgh_id = InP->msgh_id + 100; + +#undef InP +#undef OutP + } + + /* + * Find the routine to call, and call it + * to perform the kernel function + */ + { + if (ptr) { + (*ptr->routine)(&request->ikm_header, &reply->ikm_header); + kernel_task->messages_received++; + } + else { + if (!ipc_kobject_notify(&request->ikm_header, &reply->ikm_header)){ +#if MACH_IPC_TEST + printf("ipc_kobject_server: bogus kernel message, id=%d\n", + request->ikm_header.msgh_id); +#endif /* MACH_IPC_TEST */ + _MIG_MSGID_INVALID(request->ikm_header.msgh_id); + + ((mig_reply_error_t *) &reply->ikm_header)->RetCode + = MIG_BAD_ID; + } + else + kernel_task->messages_received++; + } + kernel_task->messages_sent++; + } + + /* + * Destroy destination. The following code differs from + * ipc_object_destroy in that we release the send-once + * right instead of generating a send-once notification + * (which would bring us here again, creating a loop). + * It also differs in that we only expect send or + * send-once rights, never receive rights. + * + * We set msgh_remote_port to IP_NULL so that the kmsg + * destroy routines don't try to destroy the port twice. + */ + destp = (ipc_port_t *) &request->ikm_header.msgh_remote_port; + switch (MACH_MSGH_BITS_REMOTE(request->ikm_header.msgh_bits)) { + case MACH_MSG_TYPE_PORT_SEND: + ipc_port_release_send(*destp); + break; + + case MACH_MSG_TYPE_PORT_SEND_ONCE: + ipc_port_release_sonce(*destp); + break; + + default: + panic("ipc_object_destroy: strange destination rights"); + } + *destp = IP_NULL; + + if (!(reply->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) && + ((mig_reply_error_t *) &reply->ikm_header)->RetCode != KERN_SUCCESS) + kr = ((mig_reply_error_t *) &reply->ikm_header)->RetCode; + else + kr = KERN_SUCCESS; + + if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) { + /* + * The server function is responsible for the contents + * of the message. The reply port right is moved + * to the reply message, and we have deallocated + * the destination port right, so we just need + * to free the kmsg. + */ + ipc_kmsg_free(request); + + } else { + /* + * The message contents of the request are intact. + * Destroy everthing except the reply port right, + * which is needed in the reply message. + */ + request->ikm_header.msgh_local_port = MACH_PORT_NULL; + ipc_kmsg_destroy(request); + } + + if (kr == MIG_NO_REPLY) { + /* + * The server function will send a reply message + * using the reply port right, which it has saved. + */ + + ipc_kmsg_free(reply); + + ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, + EVENT_END, + ((thread_t) th), + &request->ikm_header.msgh_id, + sizeof(int)); + + return IKM_NULL; + } else if (!IP_VALID((ipc_port_t)reply->ikm_header.msgh_remote_port)) { + /* + * Can't queue the reply message if the destination + * (the reply port) isn't valid. + */ + + ipc_kmsg_destroy(reply); + + ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, + EVENT_END, + ((thread_t) th), + &request->ikm_header.msgh_id, + sizeof(int)); + + return IKM_NULL; + } + + trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)&reply->ikm_header + (int)reply->ikm_header.msgh_size); + trailer->msgh_sender = KERNEL_SECURITY_TOKEN; + trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; + + ETAP_PROBE_DATA(ETAP_P_SYSCALL_MACH, + EVENT_END, + ((thread_t) th), + &request->ikm_header.msgh_id, + sizeof(int)); + + return reply; +} + +/* + * Routine: ipc_kobject_set + * Purpose: + * Make a port represent a kernel object of the given type. + * The caller is responsible for handling refs for the + * kernel object, if necessary. + * Conditions: + * Nothing locked. The port must be active if setting + * a kobject linkage. Clearing a linkage is OK on an + * inactive port. + */ +void +ipc_kobject_set( + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type) +{ + ip_lock(port); + ipc_kobject_set_atomically(port, kobject, type); + ip_unlock(port); +} + +void +ipc_kobject_set_atomically( + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type) +{ + assert(type == IKOT_NONE || ip_active(port)); +#if MACH_ASSERT + port->ip_spares[2] = (port->ip_bits & IO_BITS_KOTYPE); +#endif /* MACH_ASSERT */ + port->ip_bits = (port->ip_bits &~ IO_BITS_KOTYPE) | type; + port->ip_kobject = kobject; +} + +/* + * Routine: ipc_kobject_destroy + * Purpose: + * Release any kernel object resources associated + * with the port, which is being destroyed. + * + * This should only be needed when resources are + * associated with a user's port. In the normal case, + * when the kernel is the receiver, the code calling + * ipc_port_dealloc_kernel should clean up the resources. + * Conditions: + * The port is not locked, but it is dead. + */ + +void +ipc_kobject_destroy( + ipc_port_t port) +{ + switch (ip_kotype(port)) { + + case IKOT_TIMER: + mk_timer_port_destroy(port); + break; + + case IKOT_NAMED_ENTRY: + mach_destroy_memory_entry(port); + break; + + case IKOT_UPL: + mach_destroy_upl(port); + break; + + default: /* XXX (bogon) */ + vm_object_destroy(port); + break; + } +} + + +extern int vnode_pager_workaround; + +boolean_t +ipc_kobject_notify( + mach_msg_header_t *request_header, + mach_msg_header_t *reply_header) +{ + ipc_port_t port = (ipc_port_t) request_header->msgh_remote_port; + rpc_subsystem_t paging_subsystem_object; + mach_port_seqno_t seqno; + + ((mig_reply_error_t *) reply_header)->RetCode = MIG_NO_REPLY; + switch (request_header->msgh_id) { + case MACH_NOTIFY_NO_SENDERS: + if(ip_kotype(port) == IKOT_NAMED_ENTRY) { + ip_lock(port); + + /* + * Bring the sequence number and mscount in + * line with ipc_port_destroy assertion. + */ + port->ip_mscount = 0; + port->ip_messages.imq_seqno = 0; + ipc_port_destroy(port); /* releases lock */ + return TRUE; + } + paging_subsystem_object = pager_mux_hash_lookup( + (ipc_port_t)request_header->msgh_remote_port); + if(paging_subsystem_object == (rpc_subsystem_t) + &dp_memory_object_subsystem) { + default_pager_no_senders( + (ipc_port_t)request_header->msgh_remote_port, + seqno, + (mach_port_mscount_t) + ((mach_no_senders_notification_t *) + request_header)->not_count); + (ipc_port_t)reply_header->msgh_remote_port + = MACH_PORT_NULL; + return TRUE; + } + if(paging_subsystem_object == (rpc_subsystem_t) + &vnode_pager_workaround) { + vnode_pager_no_senders( + (ipc_port_t)request_header->msgh_remote_port, + (mach_port_mscount_t) + ((mach_no_senders_notification_t *) + request_header)->not_count); + (ipc_port_t)reply_header->msgh_remote_port + = MACH_PORT_NULL; + return TRUE; + } + break; + case MACH_NOTIFY_PORT_DELETED: + case MACH_NOTIFY_PORT_DESTROYED: + case MACH_NOTIFY_SEND_ONCE: + case MACH_NOTIFY_DEAD_NAME: + break; + + default: + return FALSE; + } + switch (ip_kotype(port)) { + +#ifdef IOKIT + case IKOT_IOKIT_OBJECT: + case IKOT_IOKIT_CONNECT: + case IKOT_IOKIT_SPARE: + { + extern boolean_t iokit_notify( mach_msg_header_t *msg); + + return iokit_notify(request_header); + } +#endif + default: + return FALSE; + } +} + + + +#include +#if MACH_COUNTERS && MACH_KDB + +#include +#include + +#define printf kdbprintf + +extern void kobjserver_stats(void); +extern void bucket_stats_print(mig_hash_t *bucket); + +extern void kobjserver_stats_clear(void); + + +void +kobjserver_stats_clear(void) +{ + int i; + for (i = 0; i < MAX_MIG_ENTRIES; i++) { + mig_buckets[i].callcount = 0; + } +} + +void +kobjserver_stats(void) +{ + register unsigned int i, n = sizeof(mig_e)/sizeof(rpc_subsystem_t); + register unsigned int howmany; + register mach_msg_id_t j, pos, nentry, range; + + db_printf("Kobject server call counts:\n"); + for (i = 0; i < n; i++) { + db_printf(" "); + db_printsym((vm_offset_t)mig_e[i], DB_STGY_ANY); + db_printf(":\n"); + range = mig_e[i]->end - mig_e[i]->start; + if (!mig_e[i]->start || range < 0) continue; + + for (j = 0; j < range; j++) { + nentry = j + mig_e[i]->start; + for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1; + mig_buckets[pos].num; + pos = ++pos % MAX_MIG_ENTRIES, howmany++) { + if (mig_buckets[pos].num == nentry) + bucket_stats_print(&mig_buckets[pos]); + } + } + } +} + +void +bucket_stats_print(mig_hash_t *bucket) +{ + if (bucket->callcount) { + db_printf(" "); + db_printsym((vm_offset_t)bucket->routine, DB_STGY_ANY); + db_printf(" (%d):\t%d\n", bucket->num, bucket->callcount); + } +} + + +#endif /* MACH_COUNTERS && MACH_KDB */ diff --git a/osfmk/kern/ipc_kobject.h b/osfmk/kern/ipc_kobject.h new file mode 100644 index 000000000..d2a517c55 --- /dev/null +++ b/osfmk/kern/ipc_kobject.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/ipc_kobject.h + * Author: Rich Draves + * Date: 1989 + * + * Declarations for letting a port represent a kernel object. + */ + +#include +#include + +#ifndef _KERN_IPC_KOBJECT_H_ +#define _KERN_IPC_KOBJECT_H_ + +#ifdef KERNEL_PRIVATE +/* + * This is the legacy in-kernel ipc-object mechanism. Over the next + * several months, this will be phased out in favor of a mechanism that + * is less Mach IPC specific, and common across in-mach, in-kernel-component, + * and user-level-component (Plugin) models. + */ +#include +#include + +typedef natural_t ipc_kobject_type_t; + +#define IKOT_NONE 0 +#define IKOT_THREAD 1 +#define IKOT_TASK 2 +#define IKOT_HOST 3 +#define IKOT_HOST_PRIV 4 +#define IKOT_PROCESSOR 5 +#define IKOT_PSET 6 +#define IKOT_PSET_NAME 7 +#define IKOT_TIMER 8 +#define IKOT_PAGING_REQUEST 9 +#define IKOT_DEVICE 10 +#define IKOT_XMM_OBJECT 11 +#define IKOT_XMM_PAGER 12 +#define IKOT_XMM_KERNEL 13 +#define IKOT_XMM_REPLY 14 +#define IKOT_UND_REPLY 15 +/* NOT DEFINED 16 */ +#define IKOT_HOST_SECURITY 17 +#define IKOT_LEDGER 18 +#define IKOT_MASTER_DEVICE 19 +#define IKOT_ACT 20 +#define IKOT_SUBSYSTEM 21 +#define IKOT_IO_DONE_QUEUE 22 +#define IKOT_SEMAPHORE 23 +#define IKOT_LOCK_SET 24 +#define IKOT_CLOCK 25 +#define IKOT_CLOCK_CTRL 26 +#define IKOT_IOKIT_SPARE 27 +#define IKOT_NAMED_ENTRY 28 +#define IKOT_IOKIT_CONNECT 29 +#define IKOT_IOKIT_OBJECT 30 +#define IKOT_UPL 31 + /* << new entries here */ +#define IKOT_UNKNOWN 32 /* magic catchall */ +#define IKOT_MAX_TYPE 33 /* # of IKOT_ types */ + +/* Please keep ipc/ipc_object.c:ikot_print_array up to date */ + +#define is_ipc_kobject(ikot) ((ikot) != IKOT_NONE) + +/* + * Define types of kernel objects that use page lists instead + * of entry lists for copyin of out of line memory. + */ + +#define ipc_kobject_vm_page_list(ikot) \ + ((ikot) == IKOT_PAGING_REQUEST || (ikot) == IKOT_DEVICE) + +#define ipc_kobject_vm_page_steal(ikot) ((ikot) == IKOT_PAGING_REQUEST) + +/* Initialize kernel server dispatch table */ +extern void mig_init(void); + +/* Dispatch a kernel server function */ +extern ipc_kmsg_t ipc_kobject_server( + ipc_kmsg_t request); + +/* Make a port represent a kernel object of the given type */ +extern void ipc_kobject_set( + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type); + +extern void ipc_kobject_set_atomically( + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type); + +/* Release any kernel object resources associated with a port */ +extern void ipc_kobject_destroy( + ipc_port_t port); + +#define null_conversion(port) (port) + +#endif /* KERNEL_PRIVATE */ + +#endif /* _KERN_IPC_KOBJECT_H_ */ diff --git a/osfmk/kern/ipc_mig.c b/osfmk/kern/ipc_mig.c new file mode 100644 index 000000000..928832f10 --- /dev/null +++ b/osfmk/kern/ipc_mig.c @@ -0,0 +1,476 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Default (zeroed) template for qos */ + +static mach_port_qos_t qos_template; + +/* + * Routine: mach_msg_send_from_kernel + * Purpose: + * Send a message from the kernel. + * + * This is used by the client side of KernelUser interfaces + * to implement SimpleRoutines. Currently, this includes + * memory_object messages. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Sent the message. + * MACH_MSG_SEND_NO_BUFFER Destination port had inuse fixed bufer + * MACH_SEND_INVALID_DEST Bad destination port. + */ + +mach_msg_return_t +mach_msg_send_from_kernel( + mach_msg_header_t *msg, + mach_msg_size_t send_size) +{ + ipc_kmsg_t kmsg; + mach_msg_return_t mr; + + if (!MACH_PORT_VALID((mach_port_name_t)msg->msgh_remote_port)) + return MACH_SEND_INVALID_DEST; + + mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); + if (mr != MACH_MSG_SUCCESS) + return mr; + + ipc_kmsg_copyin_from_kernel(kmsg); + ipc_kmsg_send_always(kmsg); + + return MACH_MSG_SUCCESS; +} + +/* + * Routine: mach_msg_rpc_from_kernel + * Purpose: + * Send a message from the kernel and receive a reply. + * Uses ith_rpc_reply for the reply port. + * + * This is used by the client side of KernelUser interfaces + * to implement Routines. + * Conditions: + * Nothing locked. + * Returns: + * MACH_MSG_SUCCESS Sent the message. + * MACH_RCV_PORT_DIED The reply port was deallocated. + */ + +mach_msg_return_t +mach_msg_rpc_from_kernel( + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size) +{ + thread_t self = current_thread(); + ipc_port_t reply; + ipc_kmsg_t kmsg; + mach_port_seqno_t seqno; + mach_msg_return_t mr; + + assert(MACH_PORT_VALID((mach_port_name_t)msg->msgh_remote_port)); + assert(msg->msgh_local_port == MACH_PORT_NULL); + + mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); + if (mr != MACH_MSG_SUCCESS) + return mr; + + rpc_lock(self); + + reply = self->ith_rpc_reply; + if (reply == IP_NULL) { + rpc_unlock(self); + reply = ipc_port_alloc_reply(); + rpc_lock(self); + if ((reply == IP_NULL) || + (self->ith_rpc_reply != IP_NULL)) + panic("mach_msg_rpc_from_kernel"); + self->ith_rpc_reply = reply; + } + + /* insert send-once right for the reply port */ + kmsg->ikm_header.msgh_local_port = reply; + kmsg->ikm_header.msgh_bits |= + MACH_MSGH_BITS(0, MACH_MSG_TYPE_MAKE_SEND_ONCE); + + ipc_port_reference(reply); + rpc_unlock(self); + + ipc_kmsg_copyin_from_kernel(kmsg); + + ipc_kmsg_send_always(kmsg); + + for (;;) { + ipc_mqueue_t mqueue; + + ip_lock(reply); + if ( !ip_active(reply)) { + ip_unlock(reply); + ipc_port_release(reply); + return MACH_RCV_PORT_DIED; + } + if (!self->top_act || !self->top_act->active) { + ip_unlock(reply); + ipc_port_release(reply); + return MACH_RCV_INTERRUPTED; + } + + assert(reply->ip_pset_count == 0); + mqueue = &reply->ip_messages; + ip_unlock(reply); + + self->ith_continuation = (void (*)(mach_msg_return_t))0; + + ipc_mqueue_receive(mqueue, + MACH_MSG_OPTION_NONE, + MACH_MSG_SIZE_MAX, + MACH_MSG_TIMEOUT_NONE, + THREAD_INTERRUPTIBLE); + + mr = self->ith_state; + kmsg = self->ith_kmsg; + seqno = self->ith_seqno; + + if (mr == MACH_MSG_SUCCESS) + { + break; + } + + assert(mr == MACH_RCV_INTERRUPTED); + + if (self->top_act && self->top_act->handlers) { + ipc_port_release(reply); + return(mr); + } + } + ipc_port_release(reply); + + /* + * XXXXX Set manually for now ... + * No, why even bother, since the effort is wasted? + * + { mach_msg_format_0_trailer_t *trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)&kmsg->ikm_header + kmsg->ikm_header.msgh_size); + trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; + } + *****/ + + if (rcv_size < kmsg->ikm_header.msgh_size) { + ipc_kmsg_copyout_dest(kmsg, ipc_space_reply); + ipc_kmsg_put_to_kernel(msg, kmsg, kmsg->ikm_header.msgh_size); + return MACH_RCV_TOO_LARGE; + } + + /* + * We want to preserve rights and memory in reply! + * We don't have to put them anywhere; just leave them + * as they are. + */ + + ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply); + ipc_kmsg_put_to_kernel(msg, kmsg, kmsg->ikm_header.msgh_size); + return MACH_MSG_SUCCESS; +} + + +/************** These Calls are set up for kernel-loaded tasks **************/ +/************** Apple does not plan on supporting that. These **************/ +/************** need to be reworked to deal with the kernel **************/ +/************** proper to eliminate the kernel specific code MIG **************/ +/************** must generate. **************/ + + +/* + * Routine: mach_msg + * Purpose: + * Like mach_msg_overwrite_trap except that message buffers + * live in kernel space. Doesn't handle any options. + * + * This is used by in-kernel server threads to make + * kernel calls, to receive request messages, and + * to send reply messages. + * Conditions: + * Nothing locked. + * Returns: + */ + +mach_msg_return_t +mach_msg_overwrite( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify, + mach_msg_header_t *rcv_msg, + mach_msg_size_t rcv_msg_size) +{ + ipc_space_t space = current_space(); + vm_map_t map = current_map(); + ipc_kmsg_t kmsg; + mach_port_seqno_t seqno; + mach_msg_return_t mr; + mach_msg_format_0_trailer_t *trailer; + + if (option & MACH_SEND_MSG) { + mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); + if (mr != MACH_MSG_SUCCESS) + panic("mach_msg"); + + mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL); + if (mr != MACH_MSG_SUCCESS) { + ipc_kmsg_free(kmsg); + return mr; + } + + do + mr = ipc_kmsg_send(kmsg, MACH_MSG_OPTION_NONE, + MACH_MSG_TIMEOUT_NONE); + while (mr == MACH_SEND_INTERRUPTED); + assert(mr == MACH_MSG_SUCCESS); + } + + if (option & MACH_RCV_MSG) { + thread_t self = current_thread(); + + do { + ipc_object_t object; + ipc_mqueue_t mqueue; + + mr = ipc_mqueue_copyin(space, rcv_name, + &mqueue, &object); + if (mr != MACH_MSG_SUCCESS) + return mr; + /* hold ref for object */ + + self->ith_continuation = (void (*)(mach_msg_return_t))0; + ipc_mqueue_receive(mqueue, + MACH_MSG_OPTION_NONE, + MACH_MSG_SIZE_MAX, + MACH_MSG_TIMEOUT_NONE, + THREAD_ABORTSAFE); + mr = self->ith_state; + kmsg = self->ith_kmsg; + seqno = self->ith_seqno; + + ipc_object_release(object); + + } while (mr == MACH_RCV_INTERRUPTED); + if (mr != MACH_MSG_SUCCESS) + return mr; + + trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)&kmsg->ikm_header + kmsg->ikm_header.msgh_size); + if (option & MACH_RCV_TRAILER_MASK) { + trailer->msgh_seqno = seqno; + trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(option); + } + + if (rcv_size < (kmsg->ikm_header.msgh_size + trailer->msgh_trailer_size)) { + ipc_kmsg_copyout_dest(kmsg, space); + ipc_kmsg_put_to_kernel(msg, kmsg, sizeof *msg); + return MACH_RCV_TOO_LARGE; + } + + mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL, + MACH_MSG_BODY_NULL); + if (mr != MACH_MSG_SUCCESS) { + if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { + ipc_kmsg_put_to_kernel(msg, kmsg, + kmsg->ikm_header.msgh_size + trailer->msgh_trailer_size); + } else { + ipc_kmsg_copyout_dest(kmsg, space); + ipc_kmsg_put_to_kernel(msg, kmsg, sizeof *msg); + } + + return mr; + } + + ipc_kmsg_put_to_kernel(msg, kmsg, + kmsg->ikm_header.msgh_size + trailer->msgh_trailer_size); + } + + return MACH_MSG_SUCCESS; +} + +/* + * Routine: mig_get_reply_port + * Purpose: + * Called by client side interfaces living in the kernel + * to get a reply port. This port is used for + * mach_msg() calls which are kernel calls. + */ +mach_port_t +mig_get_reply_port(void) +{ + thread_t self = current_thread(); + + assert(self->ith_mig_reply == (mach_port_t)0); + + /* + * JMM - for now we have no real clients of this under the kernel + * loaded server model because we only have one of those. In order + * to avoid MIG changes, we just return null here - and return] + * references to ipc_port_t's instead of names. + * + * if (self->ith_mig_reply == MACH_PORT_NULL) + * self->ith_mig_reply = mach_reply_port(); + */ + return self->ith_mig_reply; +} + +/* + * Routine: mig_dealloc_reply_port + * Purpose: + * Called by client side interfaces to get rid of a reply port. + * Shouldn't ever be called inside the kernel, because + * kernel calls shouldn't prompt Mig to call it. + */ + +void +mig_dealloc_reply_port( + mach_port_t reply_port) +{ + panic("mig_dealloc_reply_port"); +} + +/* + * Routine: mig_put_reply_port + * Purpose: + * Called by client side interfaces after each RPC to + * let the client recycle the reply port if it wishes. + */ +void +mig_put_reply_port( + mach_port_t reply_port) +{ +} + +/* + * mig_strncpy.c - by Joshua Block + * + * mig_strncp -- Bounded string copy. Does what the library routine strncpy + * OUGHT to do: Copies the (null terminated) string in src into dest, a + * buffer of length len. Assures that the copy is still null terminated + * and doesn't overflow the buffer, truncating the copy if necessary. + * + * Parameters: + * + * dest - Pointer to destination buffer. + * + * src - Pointer to source string. + * + * len - Length of destination buffer. + */ +int +mig_strncpy( + char *dest, + char *src, + int len) +{ + int i = 0; + + if (len > 0) + if (dest != NULL) { + if (src != NULL) + for (i=1; i +#include + +/* + * Define the trace points for MIG-generated calls. One traces the input parameters + * to MIG called things, another traces the outputs, and one traces bad message IDs. + */ +#ifdef _MIG_TRACE_PARAMETERS_ + +#define __BeforeRcvCallTrace(msgid,arg1,arg2,arg3,arg4) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); + +#define __AfterRcvCallTrace(msgid,arg1,arg2,arg3,arg4) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); + +#define __BeforeSimpleCallTrace(msgid,arg1,arg2,arg3,arg4) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); + +#define __AfterSimpleCallTrace(msgid,arg1,arg2,arg3,arg4) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); + +#else /* !_MIG_TRACE_PARAMETERS_ */ + +#define __BeforeRcvRpc(msgid, _NAME_) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); + +#define __AfterRcvRpc(msgid, _NAME_) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); + + +#define __BeforeRcvSimple(msgid, _NAME_) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); + +#define __AfterRcvSimple(msgid, _NAME_) \ + KERNEL_DEBUG_CONSTANT(KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); + +#endif /* !_MIG_TRACE_PARAMETERS_ */ + +#define _MIG_MSGID_INVALID(msgid) \ + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_MSGID_INVALID, (msgid)), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)) + +/* Send a message from the kernel */ +extern mach_msg_return_t mach_msg_send_from_kernel( + mach_msg_header_t *msg, + mach_msg_size_t send_size); + + +extern mach_msg_return_t mach_msg_rpc_from_kernel( + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size); + +extern void mach_msg_receive_continue(void); + +#endif /* _IPC_MIG_H_ */ diff --git a/osfmk/kern/ipc_subsystem.c b/osfmk/kern/ipc_subsystem.c new file mode 100644 index 000000000..125a0b6f5 --- /dev/null +++ b/osfmk/kern/ipc_subsystem.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/29 17:35:56 mburg + * MK7.3 merger + * + * Revision 1.1.10.1 1998/02/03 09:28:28 gdt + * Merge up to MK7.3 + * [1998/02/03 09:13:40 gdt] + * + * Revision 1.1.8.1 1997/06/17 02:57:46 devrcs + * Added `ipc_subsystem_terminate().' + * [1997/03/18 18:25:52 rkc] + * + * Revision 1.1.5.1 1994/09/23 02:19:57 ezf + * change marker to not FREE + * [1994/09/22 21:33:39 ezf] + * + * Revision 1.1.3.1 1994/01/20 11:05:46 emcmanus + * Copied for submission. + * [1994/01/20 11:04:25 emcmanus] + * + * Revision 1.1.1.2 1994/01/13 02:40:32 condict + * IPC support for the RPC subsytem object (server co-location). + * + * $EndLog$ + */ + +/* + * File: kern/ipc_subsystem.c + * Purpose: Routines to support ipc semantics of new kernel + * RPC subsystem descriptions + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Routine: ipc_subsystem_init + * Purpose: + * Initialize ipc control of a subsystem. + */ +void +ipc_subsystem_init( + subsystem_t subsystem) +{ + ipc_port_t port; + + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) + panic("ipc_subsystem_init"); + subsystem->ipc_self = port; +} + +/* + * Routine: ipc_subsystem_enable + * Purpose: + * Enable ipc access to a subsystem. + */ +void +ipc_subsystem_enable( + subsystem_t subsystem) +{ + ipc_kobject_set(subsystem->ipc_self, + (ipc_kobject_t) subsystem, IKOT_SUBSYSTEM); +} + + +/* + * Routine: ipc_subsystem_disable + * Purpose: + * Disable IPC access to a subsystem. + * Conditions: + * Nothing locked. + */ + +void +ipc_subsystem_disable( + subsystem_t subsystem) +{ + ipc_port_t kport; + + kport = subsystem->ipc_self; + if (kport != IP_NULL) + ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); +} + +/* + * Routine: ipc_subsystem_terminate + * Purpose: + * Clean up and destroy a subsystem's IPC state. + */ +void +ipc_subsystem_terminate( + subsystem_t subsystem) +{ + ipc_port_dealloc_kernel(subsystem->ipc_self); +} + + +/* + * Routine: convert_port_to_subsystem + * Purpose: + * Convert from a port to a subsystem. + * Doesn't consume the port ref; produces a subsystem ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +subsystem_t +convert_port_to_subsystem( + ipc_port_t port) +{ + subsystem_t subsystem = SUBSYSTEM_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_SUBSYSTEM)) { + subsystem = (subsystem_t) port->ip_kobject; + } + ip_unlock(port); + } + return (subsystem); +} + + +/* + * Routine: convert_subsystem_to_port + * Purpose: + * Convert from a subsystem to a port. + * Produces a naked send right which may be invalid. + * Conditions: + * Nothing locked. + */ +ipc_port_t +convert_subsystem_to_port( + subsystem_t subsystem) +{ + ipc_port_t port; + + port = ipc_port_make_send(subsystem->ipc_self); + return (port); +} + diff --git a/osfmk/kern/ipc_subsystem.h b/osfmk/kern/ipc_subsystem.h new file mode 100644 index 000000000..bcd7cf284 --- /dev/null +++ b/osfmk/kern/ipc_subsystem.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1994/09/23 02:20:11 ezf + * change marker to not FREE + * [1994/09/22 21:33:42 ezf] + * + * Revision 1.1.3.1 1994/01/20 11:05:50 emcmanus + * Copied for submission. + * [1994/01/20 11:04:35 emcmanus] + * + * Revision 1.1.1.2 1994/01/13 02:41:12 condict + * Declarations for kern/ipc_subsystem.c + * + * $EndLog$ + */ + +#ifndef _KERN_IPC_SUBSYSTEM_H_ +#define _KERN_IPC_SUBSYSTEM_H_ + +#include +#include +#include +#include +#include + + +/* Initialize a subsystem's IPC state */ +extern void ipc_subsystem_init( + subsystem_t subsystem); + +/* Enable a subsystem for IPC access */ +extern void ipc_subsystem_enable( + subsystem_t subsystem); + +/* Disable IPC access to a subsystem */ +extern void ipc_subsystem_disable( + subsystem_t subsystem); + +/* Clean up and destroy a subsystem's IPC state */ +extern void ipc_subsystem_terminate( + subsystem_t subsystem); + +/* Convert from a port to a subsystem */ +extern subsystem_t convert_port_to_subsystem( + ipc_port_t port); + +/* Convert from a subsystem to a port */ +extern ipc_port_t convert_subsystem_to_port( + subsystem_t subsystem); + +#endif /* _KERN_IPC_SUBSYSTEM_H_ */ diff --git a/osfmk/kern/ipc_sync.c b/osfmk/kern/ipc_sync.c new file mode 100644 index 000000000..34c8ffc00 --- /dev/null +++ b/osfmk/kern/ipc_sync.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +kern_return_t +port_name_to_semaphore( + mach_port_name_t name, + semaphore_t *semaphorep) +{ + semaphore_t semaphore; + ipc_port_t kern_port; + kern_return_t kr; + + if (!MACH_PORT_VALID(name)) { + *semaphorep = SEMAPHORE_NULL; + return KERN_INVALID_NAME; + } + + kr = ipc_object_translate(current_space(), name, MACH_PORT_RIGHT_SEND, + (ipc_object_t *) &kern_port); + if (kr != KERN_SUCCESS) { + *semaphorep = SEMAPHORE_NULL; + return kr; + } + /* have the port locked */ + assert(IP_VALID(kern_port)); + + if (!ip_active(kern_port) || (ip_kotype(kern_port) != IKOT_SEMAPHORE)) { + ip_unlock(kern_port); + *semaphorep = SEMAPHORE_NULL; + return KERN_INVALID_ARGUMENT; + } + + semaphore = (semaphore_t) kern_port->ip_kobject; + assert(semaphore != SEMAPHORE_NULL); + semaphore_reference(semaphore); + ip_unlock(kern_port); + + *semaphorep = semaphore; + return KERN_SUCCESS; +} + +semaphore_t +convert_port_to_semaphore (ipc_port_t port) +{ + semaphore_t semaphore = SEMAPHORE_NULL; + + if (IP_VALID (port)) { + ip_lock(port); + if (ip_active(port) && (ip_kotype(port) == IKOT_SEMAPHORE)) { + semaphore = (semaphore_t) port->ip_kobject; + semaphore_reference(semaphore); + } + ip_unlock(port); + } + + return (semaphore); +} + + +ipc_port_t +convert_semaphore_to_port (semaphore_t semaphore) +{ + ipc_port_t port; + + if (semaphore != SEMAPHORE_NULL) + port = ipc_port_make_send(semaphore->port); + else + port = IP_NULL; + + return (port); +} + +lock_set_t +convert_port_to_lock_set (ipc_port_t port) +{ + lock_set_t lock_set = LOCK_SET_NULL; + + if (IP_VALID (port)) { + ip_lock(port); + if (ip_active(port) && (ip_kotype(port) == IKOT_LOCK_SET)) { + lock_set = (lock_set_t) port->ip_kobject; + lock_set_reference(lock_set); + } + ip_unlock(port); + } + + return (lock_set); +} + +ipc_port_t +convert_lock_set_to_port (lock_set_t lock_set) +{ + ipc_port_t port; + + if (lock_set != LOCK_SET_NULL) + port = ipc_port_make_send(lock_set->port); + else + port = IP_NULL; + + return (port); +} + diff --git a/osfmk/kern/ipc_sync.h b/osfmk/kern/ipc_sync.h new file mode 100644 index 000000000..5d18af44d --- /dev/null +++ b/osfmk/kern/ipc_sync.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +#ifndef _KERN_IPC_SYNC_H_ +#define _KERN_IPC_SYNC_H_ + +#include +#include +#include + +semaphore_t convert_port_to_semaphore (ipc_port_t port); +ipc_port_t convert_semaphore_to_port (semaphore_t semaphore); + +lock_set_t convert_port_to_lock_set (ipc_port_t port); +ipc_port_t convert_lock_set_to_port (lock_set_t lock_set); + +kern_return_t port_name_to_semaphore( + mach_port_name_t name, + semaphore_t *semaphore); +#endif /* _KERN_IPC_SYNC_H_ */ diff --git a/osfmk/kern/ipc_tt.c b/osfmk/kern/ipc_tt.c new file mode 100644 index 000000000..a32975e15 --- /dev/null +++ b/osfmk/kern/ipc_tt.c @@ -0,0 +1,1741 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: ipc_tt.c + * Purpose: + * Task and thread related IPC functions. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Routine: ipc_task_init + * Purpose: + * Initialize a task's IPC state. + * + * If non-null, some state will be inherited from the parent. + * The parent must be appropriately initialized. + * Conditions: + * Nothing locked. + */ + +void +ipc_task_init( + task_t task, + task_t parent) +{ + ipc_space_t space; + ipc_port_t kport; + kern_return_t kr; + int i; + + + kr = ipc_space_create(&ipc_table_entries[0], &space); + if (kr != KERN_SUCCESS) + panic("ipc_task_init"); + + + kport = ipc_port_alloc_kernel(); + if (kport == IP_NULL) + panic("ipc_task_init"); + + itk_lock_init(task); + task->itk_self = kport; + task->itk_sself = ipc_port_make_send(kport); + task->itk_space = space; + space->is_fast = task->kernel_loaded; + + if (parent == TASK_NULL) { + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + task->exc_actions[i].port = IP_NULL; + }/* for */ + task->exc_actions[EXC_MACH_SYSCALL].port = + ipc_port_make_send(realhost.host_self); + task->itk_host = ipc_port_make_send(realhost.host_self); + task->itk_bootstrap = IP_NULL; + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + task->itk_registered[i] = IP_NULL; + } else { + itk_lock(parent); + assert(parent->itk_self != IP_NULL); + + /* inherit registered ports */ + + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + task->itk_registered[i] = + ipc_port_copy_send(parent->itk_registered[i]); + + /* inherit exception and bootstrap ports */ + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + task->exc_actions[i].port = + ipc_port_copy_send(parent->exc_actions[i].port); + task->exc_actions[i].flavor = + parent->exc_actions[i].flavor; + task->exc_actions[i].behavior = + parent->exc_actions[i].behavior; + }/* for */ + task->itk_host = + ipc_port_copy_send(parent->itk_host); + + task->itk_bootstrap = + ipc_port_copy_send(parent->itk_bootstrap); + + itk_unlock(parent); + } +} + +/* + * Routine: ipc_task_enable + * Purpose: + * Enable a task for IPC access. + * Conditions: + * Nothing locked. + */ + +void +ipc_task_enable( + task_t task) +{ + ipc_port_t kport; + + itk_lock(task); + kport = task->itk_self; + if (kport != IP_NULL) + ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK); + itk_unlock(task); +} + +/* + * Routine: ipc_task_disable + * Purpose: + * Disable IPC access to a task. + * Conditions: + * Nothing locked. + */ + +void +ipc_task_disable( + task_t task) +{ + ipc_port_t kport; + + itk_lock(task); + kport = task->itk_self; + if (kport != IP_NULL) + ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); + itk_unlock(task); +} + +/* + * Routine: ipc_task_terminate + * Purpose: + * Clean up and destroy a task's IPC state. + * Conditions: + * Nothing locked. The task must be suspended. + * (Or the current thread must be in the task.) + */ + +void +ipc_task_terminate( + task_t task) +{ + ipc_port_t kport; + int i; + + itk_lock(task); + kport = task->itk_self; + + if (kport == IP_NULL) { + /* the task is already terminated (can this happen?) */ + itk_unlock(task); + return; + } + + task->itk_self = IP_NULL; + itk_unlock(task); + + /* release the naked send rights */ + + if (IP_VALID(task->itk_sself)) + ipc_port_release_send(task->itk_sself); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (IP_VALID(task->exc_actions[i].port)) { + ipc_port_release_send(task->exc_actions[i].port); + } + }/* for */ + if (IP_VALID(task->itk_host)) + ipc_port_release_send(task->itk_host); + + if (IP_VALID(task->itk_bootstrap)) + ipc_port_release_send(task->itk_bootstrap); + + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + if (IP_VALID(task->itk_registered[i])) + ipc_port_release_send(task->itk_registered[i]); + + ipc_port_release_send(task->wired_ledger_port); + ipc_port_release_send(task->paged_ledger_port); + + /* destroy the kernel port */ + ipc_port_dealloc_kernel(kport); +} + +/* + * Routine: ipc_thread_init + * Purpose: + * Initialize a thread's IPC state. + * Conditions: + * Nothing locked. + */ + +void +ipc_thread_init( + thread_t thread) +{ + ipc_kmsg_queue_init(&thread->ith_messages); + thread->ith_mig_reply = MACH_PORT_NULL; + thread->ith_rpc_reply = IP_NULL; +} + +/* + * Routine: ipc_thread_terminate + * Purpose: + * Clean up and destroy a thread's IPC state. + * Conditions: + * Nothing locked. The thread must be suspended. + * (Or be the current thread.) + */ + +void +ipc_thread_terminate( + thread_t thread) +{ + assert(ipc_kmsg_queue_empty(&thread->ith_messages)); + + if (thread->ith_rpc_reply != IP_NULL) + ipc_port_dealloc_reply(thread->ith_rpc_reply); + thread->ith_rpc_reply = IP_NULL; +} + +/* + * Routine: ipc_thr_act_init + * Purpose: + * Initialize an thr_act's IPC state. + * Conditions: + * Nothing locked. + */ + +void +ipc_thr_act_init(task_t task, thread_act_t thr_act) +{ + ipc_port_t kport; int i; + + kport = ipc_port_alloc_kernel(); + if (kport == IP_NULL) + panic("ipc_thr_act_init"); + + thr_act->ith_self = kport; + thr_act->ith_sself = ipc_port_make_send(kport); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + thr_act->exc_actions[i].port = IP_NULL; + + thr_act->exc_actions[EXC_MACH_SYSCALL].port = + ipc_port_make_send(realhost.host_self); + + ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT); +} + +void +ipc_thr_act_disable(thread_act_t thr_act) +{ + int i; + ipc_port_t kport; + + kport = thr_act->ith_self; + + if (kport != IP_NULL) + ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); +} + +void +ipc_thr_act_terminate(thread_act_t thr_act) +{ + ipc_port_t kport; int i; + + kport = thr_act->ith_self; + + if (kport == IP_NULL) { + /* the thread is already terminated (can this happen?) */ + return; + } + + thr_act->ith_self = IP_NULL; + + /* release the naked send rights */ + + if (IP_VALID(thr_act->ith_sself)) + ipc_port_release_send(thr_act->ith_sself); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (IP_VALID(thr_act->exc_actions[i].port)) + ipc_port_release_send(thr_act->exc_actions[i].port); + } + + /* destroy the kernel port */ + ipc_port_dealloc_kernel(kport); +} + +/* + * Routine: retrieve_task_self_fast + * Purpose: + * Optimized version of retrieve_task_self, + * that only works for the current task. + * + * Return a send right (possibly null/dead) + * for the task's user-visible self port. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +retrieve_task_self_fast( + register task_t task) +{ + register ipc_port_t port; + + assert(task == current_task()); + + itk_lock(task); + assert(task->itk_self != IP_NULL); + + if ((port = task->itk_sself) == task->itk_self) { + /* no interposing */ + + ip_lock(port); + assert(ip_active(port)); + ip_reference(port); + port->ip_srights++; + ip_unlock(port); + } else + port = ipc_port_copy_send(port); + itk_unlock(task); + + return port; +} + +/* + * Routine: retrieve_act_self_fast + * Purpose: + * Optimized version of retrieve_thread_self, + * that only works for the current thread. + * + * Return a send right (possibly null/dead) + * for the thread's user-visible self port. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +retrieve_act_self_fast(thread_act_t thr_act) +{ + register ipc_port_t port; + + assert(thr_act == current_act()); + act_lock(thr_act); + assert(thr_act->ith_self != IP_NULL); + + if ((port = thr_act->ith_sself) == thr_act->ith_self) { + /* no interposing */ + + ip_lock(port); + assert(ip_active(port)); + ip_reference(port); + port->ip_srights++; + ip_unlock(port); + } else + port = ipc_port_copy_send(port); + act_unlock(thr_act); + + return port; +} + +/* + * Routine: task_self_trap [mach trap] + * Purpose: + * Give the caller send rights for his own task port. + * Conditions: + * Nothing locked. + * Returns: + * MACH_PORT_NULL if there are any resource failures + * or other errors. + */ + +mach_port_name_t +task_self_trap(void) +{ + task_t task = current_task(); + ipc_port_t sright; + + sright = retrieve_task_self_fast(task); + return ipc_port_copyout_send(sright, task->itk_space); +} + +/* + * Routine: thread_self_trap [mach trap] + * Purpose: + * Give the caller send rights for his own thread port. + * Conditions: + * Nothing locked. + * Returns: + * MACH_PORT_NULL if there are any resource failures + * or other errors. + */ + +mach_port_name_t +thread_self_trap(void) +{ + thread_act_t thr_act = current_act(); + task_t task = thr_act->task; + ipc_port_t sright; + + sright = retrieve_act_self_fast(thr_act); + return ipc_port_copyout_send(sright, task->itk_space); +} + +/* + * Routine: mach_reply_port [mach trap] + * Purpose: + * Allocate a port for the caller. + * Conditions: + * Nothing locked. + * Returns: + * MACH_PORT_NULL if there are any resource failures + * or other errors. + */ + +mach_port_name_t +mach_reply_port(void) +{ + ipc_port_t port; + mach_port_name_t name; + kern_return_t kr; + + kr = ipc_port_alloc(current_task()->itk_space, &name, &port); + if (kr == KERN_SUCCESS) + ip_unlock(port); + else + name = MACH_PORT_NULL; + + return name; +} + +/* + * Routine: task_get_special_port [kernel call] + * Purpose: + * Clones a send right for one of the task's + * special ports. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Extracted a send right. + * KERN_INVALID_ARGUMENT The task is null. + * KERN_FAILURE The task/space is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +task_get_special_port( + task_t task, + int which, + ipc_port_t *portp) +{ + ipc_port_t *whichp; + ipc_port_t port; + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + + switch (which) { + case TASK_KERNEL_PORT: + whichp = &task->itk_sself; + break; + + case TASK_HOST_PORT: + whichp = &task->itk_host; + break; + + case TASK_BOOTSTRAP_PORT: + whichp = &task->itk_bootstrap; + break; + + case TASK_WIRED_LEDGER_PORT: + whichp = &task->wired_ledger_port; + break; + + case TASK_PAGED_LEDGER_PORT: + whichp = &task->paged_ledger_port; + break; + + default: + return KERN_INVALID_ARGUMENT; + } + + itk_lock(task); + if (task->itk_self == IP_NULL) { + itk_unlock(task); + return KERN_FAILURE; + } + + port = ipc_port_copy_send(*whichp); + itk_unlock(task); + + *portp = port; + return KERN_SUCCESS; +} + +/* + * Routine: task_set_special_port [kernel call] + * Purpose: + * Changes one of the task's special ports, + * setting it to the supplied send right. + * Conditions: + * Nothing locked. If successful, consumes + * the supplied send right. + * Returns: + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The task is null. + * KERN_FAILURE The task/space is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +task_set_special_port( + task_t task, + int which, + ipc_port_t port) +{ + ipc_port_t *whichp; + ipc_port_t old; + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + + switch (which) { + case TASK_KERNEL_PORT: + whichp = &task->itk_sself; + break; + + case TASK_HOST_PORT: + whichp = &task->itk_host; + break; + + case TASK_BOOTSTRAP_PORT: + whichp = &task->itk_bootstrap; + break; + + case TASK_WIRED_LEDGER_PORT: + whichp = &task->wired_ledger_port; + break; + + case TASK_PAGED_LEDGER_PORT: + whichp = &task->paged_ledger_port; + break; + + default: + return KERN_INVALID_ARGUMENT; + }/* switch */ + + itk_lock(task); + if (task->itk_self == IP_NULL) { + itk_unlock(task); + return KERN_FAILURE; + } + + old = *whichp; + *whichp = port; + itk_unlock(task); + + if (IP_VALID(old)) + ipc_port_release_send(old); + return KERN_SUCCESS; +} + + +/* + * Routine: mach_ports_register [kernel call] + * Purpose: + * Stash a handful of port send rights in the task. + * Child tasks will inherit these rights, but they + * must use mach_ports_lookup to acquire them. + * + * The rights are supplied in a (wired) kalloc'd segment. + * Rights which aren't supplied are assumed to be null. + * Conditions: + * Nothing locked. If successful, consumes + * the supplied rights and memory. + * Returns: + * KERN_SUCCESS Stashed the port rights. + * KERN_INVALID_ARGUMENT The task is null. + * KERN_INVALID_ARGUMENT The task is dead. + * KERN_INVALID_ARGUMENT Too many port rights supplied. + */ + +kern_return_t +mach_ports_register( + task_t task, + mach_port_array_t memory, + mach_msg_type_number_t portsCnt) +{ + ipc_port_t ports[TASK_PORT_REGISTER_MAX]; + int i; + + if ((task == TASK_NULL) || + (portsCnt > TASK_PORT_REGISTER_MAX)) + return KERN_INVALID_ARGUMENT; + + /* + * Pad the port rights with nulls. + */ + + for (i = 0; i < portsCnt; i++) + ports[i] = memory[i]; + for (; i < TASK_PORT_REGISTER_MAX; i++) + ports[i] = IP_NULL; + + itk_lock(task); + if (task->itk_self == IP_NULL) { + itk_unlock(task); + return KERN_INVALID_ARGUMENT; + } + + /* + * Replace the old send rights with the new. + * Release the old rights after unlocking. + */ + + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { + ipc_port_t old; + + old = task->itk_registered[i]; + task->itk_registered[i] = ports[i]; + ports[i] = old; + } + + itk_unlock(task); + + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + if (IP_VALID(ports[i])) + ipc_port_release_send(ports[i]); + + /* + * Now that the operation is known to be successful, + * we can free the memory. + */ + + if (portsCnt != 0) + kfree((vm_offset_t) memory, + (vm_size_t) (portsCnt * sizeof(mach_port_t))); + + return KERN_SUCCESS; +} + +/* + * Routine: mach_ports_lookup [kernel call] + * Purpose: + * Retrieves (clones) the stashed port send rights. + * Conditions: + * Nothing locked. If successful, the caller gets + * rights and memory. + * Returns: + * KERN_SUCCESS Retrieved the send rights. + * KERN_INVALID_ARGUMENT The task is null. + * KERN_INVALID_ARGUMENT The task is dead. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +mach_ports_lookup( + task_t task, + mach_port_array_t *portsp, + mach_msg_type_number_t *portsCnt) +{ + vm_offset_t memory; + vm_size_t size; + ipc_port_t *ports; + int i; + + kern_return_t kr; + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + + size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t)); + + memory = kalloc(size); + if (memory == 0) + return KERN_RESOURCE_SHORTAGE; + + itk_lock(task); + if (task->itk_self == IP_NULL) { + itk_unlock(task); + + kfree(memory, size); + return KERN_INVALID_ARGUMENT; + } + + ports = (ipc_port_t *) memory; + + /* + * Clone port rights. Because kalloc'd memory + * is wired, we won't fault while holding the task lock. + */ + + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + ports[i] = ipc_port_copy_send(task->itk_registered[i]); + + itk_unlock(task); + + *portsp = (mach_port_array_t) ports; + *portsCnt = TASK_PORT_REGISTER_MAX; + return KERN_SUCCESS; +} + +/* + * Routine: convert_port_to_locked_task + * Purpose: + * Internal helper routine to convert from a port to a locked + * task. Used by several routines that try to convert from a + * task port to a reference on some task related object. + * Conditions: + * Nothing locked, blocking OK. + */ +task_t +convert_port_to_locked_task(ipc_port_t port) +{ + while (IP_VALID(port)) { + task_t task; + + ip_lock(port); + if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) { + ip_unlock(port); + return TASK_NULL; + } + task = (task_t) port->ip_kobject; + assert(task != TASK_NULL); + + /* + * Normal lock ordering puts task_lock() before ip_lock(). + * Attempt out-of-order locking here. + */ + if (task_lock_try(task)) { + ip_unlock(port); + return(task); + } + + ip_unlock(port); + mutex_pause(); + } + return TASK_NULL; +} + +/* + * Routine: convert_port_to_task + * Purpose: + * Convert from a port to a task. + * Doesn't consume the port ref; produces a task ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +task_t +convert_port_to_task( + ipc_port_t port) +{ + task_t task; + + task = convert_port_to_locked_task(port); + if (task) { + task->ref_count++; + task_unlock(task); + } + return task; +} + +/* + * Routine: convert_port_to_space + * Purpose: + * Convert from a port to a space. + * Doesn't consume the port ref; produces a space ref, + * which may be null. + * Conditions: + * Nothing locked. + */ +ipc_space_t +convert_port_to_space( + ipc_port_t port) +{ + ipc_space_t space; + task_t task; + + task = convert_port_to_locked_task(port); + + if (task == TASK_NULL) + return IPC_SPACE_NULL; + + if (!task->active) { + task_unlock(task); + return IPC_SPACE_NULL; + } + + space = task->itk_space; + is_reference(space); + task_unlock(task); + return (space); +} + +upl_t +convert_port_to_upl( + ipc_port_t port) +{ + upl_t upl; + + ip_lock(port); + if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) { + ip_unlock(port); + return (upl_t)NULL; + } + upl = (upl_t) port->ip_kobject; + ip_unlock(port); + upl_lock(upl); + upl->ref_count+=1; + upl_unlock(upl); + return upl; +} + +/* + * Routine: convert_port_entry_to_map + * Purpose: + * Convert from a port specifying an entry or a task + * to a map. Doesn't consume the port ref; produces a map ref, + * which may be null. Unlike convert_port_to_map, the + * port may be task or a named entry backed. + * Conditions: + * Nothing locked. + */ + + +vm_map_t +convert_port_entry_to_map( + ipc_port_t port) +{ + task_t task; + vm_map_t map; + vm_named_entry_t named_entry; + + if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { + while(TRUE) { + ip_lock(port); + if(ip_active(port) && (ip_kotype(port) + == IKOT_NAMED_ENTRY)) { + named_entry = + (vm_named_entry_t)port->ip_kobject; + if (!(mutex_try(&(named_entry)->Lock))) { + ip_unlock(port); + mutex_pause(); + continue; + } + named_entry->ref_count++; + mutex_unlock(&(named_entry)->Lock); + ip_unlock(port); + if ((named_entry->is_sub_map) && + (named_entry->protection + & VM_PROT_WRITE)) { + map = named_entry->backing.map; + } else { + mach_destroy_memory_entry(port); + return VM_MAP_NULL; + } + vm_map_reference_swap(map); + mach_destroy_memory_entry(port); + break; + } + else + return VM_MAP_NULL; + } + } else { + task_t task; + + task = convert_port_to_locked_task(port); + + if (task == TASK_NULL) + return VM_MAP_NULL; + + if (!task->active) { + task_unlock(task); + return VM_MAP_NULL; + } + + map = task->map; + vm_map_reference_swap(map); + task_unlock(task); + } + + return map; +} + +/* + * Routine: convert_port_entry_to_object + * Purpose: + * Convert from a port specifying a named entry to an + * object. Doesn't consume the port ref; produces a map ref, + * which may be null. + * Conditions: + * Nothing locked. + */ + + +vm_object_t +convert_port_entry_to_object( + ipc_port_t port) +{ + vm_object_t object; + vm_named_entry_t named_entry; + + if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { + while(TRUE) { + ip_lock(port); + if(ip_active(port) && (ip_kotype(port) + == IKOT_NAMED_ENTRY)) { + named_entry = + (vm_named_entry_t)port->ip_kobject; + if (!(mutex_try(&(named_entry)->Lock))) { + ip_unlock(port); + mutex_pause(); + continue; + } + named_entry->ref_count++; + mutex_unlock(&(named_entry)->Lock); + ip_unlock(port); + if ((!named_entry->is_sub_map) && + (named_entry->protection + & VM_PROT_WRITE)) { + object = named_entry->object; + } else { + mach_destroy_memory_entry(port); + return (vm_object_t)NULL; + } + vm_object_reference(named_entry->object); + mach_destroy_memory_entry(port); + break; + } + else + return (vm_object_t)NULL; + } + } else { + return (vm_object_t)NULL; + } + + return object; +} + +/* + * Routine: convert_port_to_map + * Purpose: + * Convert from a port to a map. + * Doesn't consume the port ref; produces a map ref, + * which may be null. + * Conditions: + * Nothing locked. + */ + +vm_map_t +convert_port_to_map( + ipc_port_t port) +{ + task_t task; + vm_map_t map; + + task = convert_port_to_locked_task(port); + + if (task == TASK_NULL) + return VM_MAP_NULL; + + if (!task->active) { + task_unlock(task); + return VM_MAP_NULL; + } + + map = task->map; + vm_map_reference_swap(map); + task_unlock(task); + return map; +} + + +/* + * Routine: convert_port_to_act + * Purpose: + * Convert from a port to a thr_act. + * Doesn't consume the port ref; produces an thr_act ref, + * which may be null. + * Conditions: + * Nothing locked. + */ + +thread_act_t +convert_port_to_act( ipc_port_t port ) +{ + boolean_t r; + thread_act_t thr_act = 0; + + r = FALSE; + while (!r && IP_VALID(port)) { + ip_lock(port); + r = ref_act_port_locked(port, &thr_act); + /* port unlocked */ + } + return (thr_act); +} + +boolean_t +ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act ) +{ + thread_act_t thr_act; + + thr_act = 0; + if (ip_active(port) && + (ip_kotype(port) == IKOT_ACT)) { + thr_act = (thread_act_t) port->ip_kobject; + assert(thr_act != THR_ACT_NULL); + + /* + * Normal lock ordering is act_lock(), then ip_lock(). + * Allow out-of-order locking here, using + * act_reference_act_locked() to accomodate it. + */ + if (!act_lock_try(thr_act)) { + ip_unlock(port); + mutex_pause(); + return (FALSE); + } + act_locked_act_reference(thr_act); + act_unlock(thr_act); + } + *pthr_act = thr_act; + ip_unlock(port); + return (TRUE); +} + +/* + * Routine: port_name_to_act + * Purpose: + * Convert from a port name to an act reference + * A name of MACH_PORT_NULL is valid for the null act + * Conditions: + * Nothing locked. + */ +thread_act_t +port_name_to_act( + mach_port_name_t name) +{ + thread_act_t thr_act = THR_ACT_NULL; + ipc_port_t kern_port; + kern_return_t kr; + + if (MACH_PORT_VALID(name)) { + kr = ipc_object_copyin(current_space(), name, + MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *) &kern_port); + if (kr != KERN_SUCCESS) + return THR_ACT_NULL; + + thr_act = convert_port_to_act(kern_port); + + if (IP_VALID(kern_port)) + ipc_port_release_send(kern_port); + } + return thr_act; +} + +task_t +port_name_to_task( + mach_port_name_t name) +{ + ipc_port_t kern_port; + kern_return_t kr; + task_t task = TASK_NULL; + + if (MACH_PORT_VALID(name)) { + kr = ipc_object_copyin(current_space(), name, + MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *) &kern_port); + if (kr != KERN_SUCCESS) + return TASK_NULL; + + task = convert_port_to_task(kern_port); + + if (IP_VALID(kern_port)) + ipc_port_release_send(kern_port); + } + return task; +} + +/* + * Routine: convert_task_to_port + * Purpose: + * Convert from a task to a port. + * Consumes a task ref; produces a naked send right + * which may be invalid. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +convert_task_to_port( + task_t task) +{ + ipc_port_t port; + + itk_lock(task); + if (task->itk_self != IP_NULL) +#if NORMA_TASK + if (task->map == VM_MAP_NULL) + /* norma placeholder task */ + port = ipc_port_copy_send(task->itk_self); + else +#endif /* NORMA_TASK */ + port = ipc_port_make_send(task->itk_self); + else + port = IP_NULL; + itk_unlock(task); + + task_deallocate(task); + return port; +} + +/* + * Routine: convert_act_to_port + * Purpose: + * Convert from a thr_act to a port. + * Consumes an thr_act ref; produces a naked send right + * which may be invalid. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +convert_act_to_port(thr_act) + thread_act_t thr_act; +{ + ipc_port_t port; + + act_lock(thr_act); + if (thr_act->ith_self != IP_NULL) + port = ipc_port_make_send(thr_act->ith_self); + else + port = IP_NULL; + act_unlock(thr_act); + + act_deallocate(thr_act); + return port; +} + +/* + * Routine: space_deallocate + * Purpose: + * Deallocate a space ref produced by convert_port_to_space. + * Conditions: + * Nothing locked. + */ + +void +space_deallocate( + ipc_space_t space) +{ + if (space != IS_NULL) + is_release(space); +} + +/* + * Routine: thread/task_set_exception_ports [kernel call] + * Purpose: + * Sets the thread/task exception port, flavor and + * behavior for the exception types specified by the mask. + * There will be one send right per exception per valid + * port. + * Conditions: + * Nothing locked. If successful, consumes + * the supplied send right. + * Returns: + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The thread is null, + * Illegal mask bit set. + * Illegal exception behavior + * KERN_FAILURE The thread is dead. + */ + +kern_return_t +thread_set_exception_ports( + thread_act_t thr_act, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) +{ + register int i; + ipc_port_t old_port[EXC_TYPES_COUNT]; + + if (!thr_act) + return KERN_INVALID_ARGUMENT; + + if (exception_mask & ~EXC_MASK_ALL) + return KERN_INVALID_ARGUMENT; + + if (IP_VALID(new_port)) { + switch (new_behavior) { + case EXCEPTION_DEFAULT: + case EXCEPTION_STATE: + case EXCEPTION_STATE_IDENTITY: + break; + default: + return KERN_INVALID_ARGUMENT; + } + } + + /* + * Check the validity of the thread_state_flavor by calling the + * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in + * osfmk/mach/ARCHITECTURE/thread_status.h + */ + if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) { + return KERN_INVALID_ARGUMENT; + } + + act_lock(thr_act); + if (!thr_act->active) { + act_unlock(thr_act); + return KERN_FAILURE; + } + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + old_port[i] = thr_act->exc_actions[i].port; + thr_act->exc_actions[i].port = + ipc_port_copy_send(new_port); + thr_act->exc_actions[i].behavior = new_behavior; + thr_act->exc_actions[i].flavor = new_flavor; + } else + old_port[i] = IP_NULL; + }/* for */ + /* + * Consume send rights without any lock held. + */ + act_unlock(thr_act); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + if (IP_VALID(old_port[i])) + ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ + ipc_port_release_send(new_port); + + return KERN_SUCCESS; +}/* thread_set_exception_port */ + +kern_return_t +task_set_exception_ports( + task_t task, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) +{ + register int i; + ipc_port_t old_port[EXC_TYPES_COUNT]; + + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + if (IP_VALID(new_port)) { + switch (new_behavior) { + case EXCEPTION_DEFAULT: + case EXCEPTION_STATE: + case EXCEPTION_STATE_IDENTITY: + break; + default: + return KERN_INVALID_ARGUMENT; + } + } + /* Cannot easily check "new_flavor", but that just means that + * the flavor in the generated exception message might be garbage: + * GIGO */ + + itk_lock(task); + if (task->itk_self == IP_NULL) { + itk_unlock(task); + return KERN_FAILURE; + } + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + old_port[i] = task->exc_actions[i].port; + task->exc_actions[i].port = + ipc_port_copy_send(new_port); + task->exc_actions[i].behavior = new_behavior; + task->exc_actions[i].flavor = new_flavor; + } else + old_port[i] = IP_NULL; + }/* for */ + + /* + * Consume send rights without any lock held. + */ + itk_unlock(task); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + if (IP_VALID(old_port[i])) + ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ + ipc_port_release_send(new_port); + + return KERN_SUCCESS; +}/* task_set_exception_port */ + +/* + * Routine: thread/task_swap_exception_ports [kernel call] + * Purpose: + * Sets the thread/task exception port, flavor and + * behavior for the exception types specified by the + * mask. + * + * The old ports, behavior and flavors are returned + * Count specifies the array sizes on input and + * the number of returned ports etc. on output. The + * arrays must be large enough to hold all the returned + * data, MIG returnes an error otherwise. The masks + * array specifies the corresponding exception type(s). + * + * Conditions: + * Nothing locked. If successful, consumes + * the supplied send right. + * + * Returns upto [in} CountCnt elements. + * Returns: + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The thread is null, + * Illegal mask bit set. + * Illegal exception behavior + * KERN_FAILURE The thread is dead. + */ + +kern_return_t +thread_swap_exception_ports( + thread_act_t thr_act, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors ) +{ + register int i, + j, + count; + ipc_port_t old_port[EXC_TYPES_COUNT]; + + if (!thr_act) + return KERN_INVALID_ARGUMENT; + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + if (IP_VALID(new_port)) { + switch (new_behavior) { + case EXCEPTION_DEFAULT: + case EXCEPTION_STATE: + case EXCEPTION_STATE_IDENTITY: + break; + default: + return KERN_INVALID_ARGUMENT; + } + } + /* Cannot easily check "new_flavor", but that just means that + * the flavor in the generated exception message might be garbage: + * GIGO */ + + act_lock(thr_act); + if (!thr_act->active) { + act_unlock(thr_act); + return KERN_FAILURE; + } + + count = 0; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + for (j = 0; j < count; j++) { +/* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if (thr_act->exc_actions[i].port == ports[j] && + thr_act->exc_actions[i].behavior ==behaviors[j] + && thr_act->exc_actions[i].flavor ==flavors[j]) + { + masks[j] |= (1 << i); + break; + } + }/* for */ + if (j == count) { + masks[j] = (1 << i); + ports[j] = + ipc_port_copy_send(thr_act->exc_actions[i].port); + + behaviors[j] = thr_act->exc_actions[i].behavior; + flavors[j] = thr_act->exc_actions[i].flavor; + count++; + } + + old_port[i] = thr_act->exc_actions[i].port; + thr_act->exc_actions[i].port = + ipc_port_copy_send(new_port); + thr_act->exc_actions[i].behavior = new_behavior; + thr_act->exc_actions[i].flavor = new_flavor; + if (count > *CountCnt) { + break; + } + } else + old_port[i] = IP_NULL; + }/* for */ + + /* + * Consume send rights without any lock held. + */ + act_unlock(thr_act); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + if (IP_VALID(old_port[i])) + ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ + ipc_port_release_send(new_port); + *CountCnt = count; + return KERN_SUCCESS; +}/* thread_swap_exception_ports */ + +kern_return_t +task_swap_exception_ports( + task_t task, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors ) +{ + register int i, + j, + count; + ipc_port_t old_port[EXC_TYPES_COUNT]; + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + if (IP_VALID(new_port)) { + switch (new_behavior) { + case EXCEPTION_DEFAULT: + case EXCEPTION_STATE: + case EXCEPTION_STATE_IDENTITY: + break; + default: + return KERN_INVALID_ARGUMENT; + } + } + /* Cannot easily check "new_flavor", but that just means that + * the flavor in the generated exception message might be garbage: + * GIGO */ + + itk_lock(task); + if (task->itk_self == IP_NULL) { + itk_unlock(task); + return KERN_FAILURE; + } + + count = 0; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + for (j = 0; j < count; j++) { +/* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if (task->exc_actions[i].port == ports[j] && + task->exc_actions[i].behavior == behaviors[j] + && task->exc_actions[i].flavor == flavors[j]) + { + masks[j] |= (1 << i); + break; + } + }/* for */ + if (j == count) { + masks[j] = (1 << i); + ports[j] = + ipc_port_copy_send(task->exc_actions[i].port); + behaviors[j] = task->exc_actions[i].behavior; + flavors[j] = task->exc_actions[i].flavor; + count++; + } + old_port[i] = task->exc_actions[i].port; + task->exc_actions[i].port = + ipc_port_copy_send(new_port); + task->exc_actions[i].behavior = new_behavior; + task->exc_actions[i].flavor = new_flavor; + if (count > *CountCnt) { + break; + } + } else + old_port[i] = IP_NULL; + }/* for */ + + + /* + * Consume send rights without any lock held. + */ + itk_unlock(task); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + if (IP_VALID(old_port[i])) + ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ + ipc_port_release_send(new_port); + *CountCnt = count; + + return KERN_SUCCESS; +}/* task_swap_exception_ports */ + +/* + * Routine: thread/task_get_exception_ports [kernel call] + * Purpose: + * Clones a send right for each of the thread/task's exception + * ports specified in the mask and returns the behaviour + * and flavor of said port. + * + * Returns upto [in} CountCnt elements. + * + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Extracted a send right. + * KERN_INVALID_ARGUMENT The thread is null, + * Invalid special port, + * Illegal mask bit set. + * KERN_FAILURE The thread is dead. + */ + +kern_return_t +thread_get_exception_ports( + thread_act_t thr_act, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors ) +{ + register int i, + j, + count; + + if (!thr_act) + return KERN_INVALID_ARGUMENT; + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + act_lock(thr_act); + if (!thr_act->active) { + act_unlock(thr_act); + return KERN_FAILURE; + } + + count = 0; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + for (j = 0; j < count; j++) { +/* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if (thr_act->exc_actions[i].port == ports[j] && + thr_act->exc_actions[i].behavior ==behaviors[j] + && thr_act->exc_actions[i].flavor == flavors[j]) + { + masks[j] |= (1 << i); + break; + } + }/* for */ + if (j == count) { + masks[j] = (1 << i); + ports[j] = + ipc_port_copy_send(thr_act->exc_actions[i].port); + behaviors[j] = thr_act->exc_actions[i].behavior; + flavors[j] = thr_act->exc_actions[i].flavor; + count++; + if (count >= *CountCnt) { + break; + } + } + } + }/* for */ + + act_unlock(thr_act); + + *CountCnt = count; + return KERN_SUCCESS; +}/* thread_get_exception_ports */ + +kern_return_t +task_get_exception_ports( + task_t task, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors ) +{ + register int i, + j, + count; + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + + if (exception_mask & ~EXC_MASK_ALL) { + return KERN_INVALID_ARGUMENT; + } + + itk_lock(task); + if (task->itk_self == IP_NULL) { + itk_unlock(task); + return KERN_FAILURE; + } + + count = 0; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (exception_mask & (1 << i)) { + for (j = 0; j < count; j++) { +/* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if (task->exc_actions[i].port == ports[j] && + task->exc_actions[i].behavior == behaviors[j] + && task->exc_actions[i].flavor == flavors[j]) + { + masks[j] |= (1 << i); + break; + } + }/* for */ + if (j == count) { + masks[j] = (1 << i); + ports[j] = + ipc_port_copy_send(task->exc_actions[i].port); + behaviors[j] = task->exc_actions[i].behavior; + flavors[j] = task->exc_actions[i].flavor; + count++; + if (count > *CountCnt) { + break; + } + } + } + }/* for */ + + itk_unlock(task); + + *CountCnt = count; + return KERN_SUCCESS; +}/* task_get_exception_ports */ diff --git a/osfmk/kern/ipc_tt.h b/osfmk/kern/ipc_tt.h new file mode 100644 index 000000000..c34731aed --- /dev/null +++ b/osfmk/kern/ipc_tt.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_IPC_TT_H_ +#define _KERN_IPC_TT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Initialize a task's IPC state */ +extern void ipc_task_init( + task_t task, + task_t parent); + +/* Enable a task for IPC access */ +extern void ipc_task_enable( + task_t task); + +/* Disable IPC access to a task */ +extern void ipc_task_disable( + task_t task); + +/* Clean up and destroy a task's IPC state */ +extern void ipc_task_terminate( + task_t task); + +/* Initialize a thread's IPC state */ +extern void ipc_thread_init( + thread_t thread); + +/* Clean up and destroy a thread's IPC state */ +extern void ipc_thread_terminate( + thread_t thread); + +/* Return a send right for the task's user-visible self port */ +extern ipc_port_t retrieve_task_self_fast( + task_t task); + +/* Return a send right for the thread's user-visible self port */ +extern ipc_port_t retrieve_act_self_fast( + thread_act_t); + +/* Convert from a port to a task */ +extern task_t convert_port_to_task( + ipc_port_t port); + +/* Convert from a port entry port to a task */ +extern task_t convert_port_to_task( + ipc_port_t port); + +extern boolean_t ref_task_port_locked( + ipc_port_t port, task_t *ptask); + +/* Convert from a port to a space */ +extern ipc_space_t convert_port_to_space( + ipc_port_t port); + +extern boolean_t ref_space_port_locked( + ipc_port_t port, ipc_space_t *pspace); + +/* Convert from a port to a map */ +extern vm_map_t convert_port_to_map( + ipc_port_t port); + +/* Convert from a map entry port to a map */ +extern vm_map_t convert_port_entry_to_map( + ipc_port_t port); + +/* Convert from a port to a vm_object */ +extern vm_object_t convert_port_entry_to_object( + ipc_port_t port); + +/* Convert from a port to a upl_object */ +extern upl_t convert_port_to_upl( + ipc_port_t port); + +/* Convert from a port to a thread */ +extern thread_act_t convert_port_to_act( + ipc_port_t port); + +extern thread_act_t port_name_to_act( + mach_port_name_t port_name); + +extern boolean_t ref_act_port_locked( + ipc_port_t port, thread_act_t *pthr_act); + +/* Convert from a task to a port */ +extern ipc_port_t convert_task_to_port( + task_t task); + +/* Convert from a thread to a port */ +extern ipc_port_t convert_act_to_port( thread_act_t ); + +/* Deallocate a space ref produced by convert_port_to_space */ +extern void space_deallocate( + ipc_space_t space); + +/* Allocate a reply port */ +extern mach_port_name_t mach_reply_port(void); + +/* Initialize a thread_act's ipc mechanism */ +extern void ipc_thr_act_init(task_t, thread_act_t); + +/* Disable IPC access to a thread_act */ +extern void ipc_thr_act_disable(thread_act_t); + +/* Clean up and destroy a thread_act's IPC state */ +extern void ipc_thr_act_terminate(thread_act_t); + +#endif /* _KERN_IPC_TT_H_ */ diff --git a/osfmk/kern/kalloc.c b/osfmk/kern/kalloc.c new file mode 100644 index 000000000..3fa65de18 --- /dev/null +++ b/osfmk/kern/kalloc.c @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.19.5 1995/02/24 15:20:29 alanl + * Lock package cleanup. + * [95/02/15 alanl] + * + * Merge with DIPC2_SHARED. + * [1995/01/05 15:11:02 alanl] + * + * Revision 1.2.28.2 1994/11/10 06:12:50 dwm + * mk6 CR764 - s/spinlock/simple_lock/ (name change only) + * [1994/11/10 05:28:35 dwm] + * + * Revision 1.2.28.1 1994/11/04 10:07:40 dwm + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.2.4 1993/11/08 15:04:18 gm + * CR9710: Updated to new zinit() and zone_change() interfaces. + * * End1.3merge + * [1994/11/04 09:25:48 dwm] + * + * Revision 1.2.19.3 1994/09/23 02:20:52 ezf + * change marker to not FREE + * [1994/09/22 21:33:57 ezf] + * + * Revision 1.2.19.2 1994/06/14 18:36:36 bolinger + * NMK17.2 merge: Replace simple_lock ops. + * [1994/06/14 18:35:17 bolinger] + * + * Revision 1.2.19.1 1994/06/14 17:04:23 bolinger + * Merge up to NMK17.2. + * [1994/06/14 16:54:19 bolinger] + * + * Revision 1.2.23.3 1994/10/14 12:24:33 sjs + * Removed krealloc_spinl routine: the newer locking scheme makes it + * obsolete. + * [94/10/13 sjs] + * + * Revision 1.2.23.2 1994/08/11 14:42:46 rwd + * Post merge cleanup + * [94/08/09 rwd] + * + * Changed zcollectable to use zchange. + * [94/08/04 rwd] + * + * Revision 1.2.17.2 1994/07/08 01:58:45 alanl + * Change comment to match function name. + * [1994/07/08 01:47:59 alanl] + * + * Revision 1.2.17.1 1994/05/26 16:20:38 sjs + * Added krealloc_spinl: same as krealloc but uses spin locks. + * [94/05/25 sjs] + * + * Revision 1.2.23.1 1994/08/04 02:24:55 mmp + * Added krealloc_spinl: same as krealloc but uses spin locks. + * [94/05/25 sjs] + * + * Revision 1.2.13.1 1994/02/11 14:27:12 paire + * Changed krealloc() to make it work on a MP system. Added a new parameter + * which is the simple lock that should be held while modifying the memory + * area already initialized. + * Change from NMK16.1 [93/09/02 paire] + * + * Do not set debug for kalloc zones as default. It wastes + * to much space. + * Change from NMK16.1 [93/08/16 bernadat] + * [94/02/07 paire] + * + * Revision 1.2.2.3 1993/07/28 17:15:44 bernard + * CR9523 -- Prototypes. + * [1993/07/27 20:14:12 bernard] + * + * Revision 1.2.2.2 1993/06/02 23:37:46 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:12:59 jeffc] + * + * Revision 1.2 1992/12/07 21:28:42 robert + * integrate any changes below for 14.0 (branch from 13.16 base) + * + * Joseph Barrera (jsb) at Carnegie-Mellon University 11-Sep-92 + * Added krealloc. Added kalloc_max_prerounded for quicker choice between + * zalloc and kmem_alloc. Renamed MINSIZE to KALLOC_MINSIZE. + * [1992/12/06 19:47:16 robert] + * + * Revision 1.1 1992/09/30 02:09:23 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.9 91/05/14 16:43:17 mrt + * Correcting copyright + * + * Revision 2.8 91/03/16 14:50:37 rpd + * Updated for new kmem_alloc interface. + * [91/03/03 rpd] + * + * Revision 2.7 91/02/05 17:27:22 mrt + * Changed to new Mach copyright + * [91/02/01 16:14:12 mrt] + * + * Revision 2.6 90/06/19 22:59:06 rpd + * Made the big kalloc zones collectable. + * [90/06/05 rpd] + * + * Revision 2.5 90/06/02 14:54:47 rpd + * Added kalloc_max, kalloc_map_size. + * [90/03/26 22:06:39 rpd] + * + * Revision 2.4 90/01/11 11:43:13 dbg + * De-lint. + * [89/12/06 dbg] + * + * Revision 2.3 89/09/08 11:25:51 dbg + * MACH_KERNEL: remove non-MACH data types. + * [89/07/11 dbg] + * + * Revision 2.2 89/08/31 16:18:59 rwd + * First Checkin + * [89/08/23 15:41:37 rwd] + * + * Revision 2.6 89/08/02 08:03:28 jsb + * Make all kalloc zones 8 MB big. (No more kalloc panics!) + * [89/08/01 14:10:17 jsb] + * + * Revision 2.4 89/04/05 13:03:10 rvb + * Guarantee a zone max of at least 100 elements or 10 pages + * which ever is greater. Afs (AllocDouble()) puts a great demand + * on the 2048 zone and used to blow away. + * [89/03/09 rvb] + * + * Revision 2.3 89/02/25 18:04:39 gm0w + * Changes for cleanup. + * + * Revision 2.2 89/01/18 02:07:04 jsb + * Give each kalloc zone a meaningful name (for panics); + * create a zone for each power of 2 between MINSIZE + * and PAGE_SIZE, instead of using (obsoleted) NQUEUES. + * [89/01/17 10:16:33 jsb] + * + * + * 13-Feb-88 John Seamons (jks) at NeXT + * Updated to use kmem routines instead of vmem routines. + * + * 21-Jun-85 Avadis Tevanian (avie) at Carnegie-Mellon University + * Created. + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/kalloc.c + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * General kernel memory allocator. This allocator is designed + * to be used by the kernel to manage dynamic memory fast. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef MACH_BSD +zone_t kalloc_zone(vm_size_t); +#endif + +vm_map_t kalloc_map; +vm_size_t kalloc_map_size = 8 * 1024 * 1024; +vm_size_t kalloc_max; +vm_size_t kalloc_max_prerounded; + +unsigned int kalloc_large_inuse; +vm_size_t kalloc_large_total; +vm_size_t kalloc_large_max; + +/* + * All allocations of size less than kalloc_max are rounded to the + * next highest power of 2. This allocator is built on top of + * the zone allocator. A zone is created for each potential size + * that we are willing to get in small blocks. + * + * We assume that kalloc_max is not greater than 64K; + * thus 16 is a safe array size for k_zone and k_zone_name. + * + * Note that kalloc_max is somewhat confusingly named. + * It represents the first power of two for which no zone exists. + * kalloc_max_prerounded is the smallest allocation size, before + * rounding, for which no zone exists. + */ + +int first_k_zone = -1; +struct zone *k_zone[16]; +static char *k_zone_name[16] = { + "kalloc.1", "kalloc.2", + "kalloc.4", "kalloc.8", + "kalloc.16", "kalloc.32", + "kalloc.64", "kalloc.128", + "kalloc.256", "kalloc.512", + "kalloc.1024", "kalloc.2048", + "kalloc.4096", "kalloc.8192", + "kalloc.16384", "kalloc.32768" +}; + +/* + * Max number of elements per zone. zinit rounds things up correctly + * Doing things this way permits each zone to have a different maximum size + * based on need, rather than just guessing; it also + * means its patchable in case you're wrong! + */ +unsigned long k_zone_max[16] = { + 1024, /* 1 Byte */ + 1024, /* 2 Byte */ + 1024, /* 4 Byte */ + 1024, /* 8 Byte */ + 1024, /* 16 Byte */ + 4096, /* 32 Byte */ + 4096, /* 64 Byte */ + 4096, /* 128 Byte */ + 4096, /* 256 Byte */ + 1024, /* 512 Byte */ + 1024, /* 1024 Byte */ + 1024, /* 2048 Byte */ + 1024, /* 4096 Byte */ + 4096, /* 8192 Byte */ + 64, /* 16384 Byte */ + 64, /* 32768 Byte */ +}; + +/* + * Initialize the memory allocator. This should be called only + * once on a system wide basis (i.e. first processor to get here + * does the initialization). + * + * This initializes all of the zones. + */ + +void +kalloc_init( + void) +{ + kern_return_t retval; + vm_offset_t min; + vm_size_t size; + register int i; + + retval = kmem_suballoc(kernel_map, &min, kalloc_map_size, + FALSE, TRUE, &kalloc_map); + if (retval != KERN_SUCCESS) + panic("kalloc_init: kmem_suballoc failed"); + + /* + * Ensure that zones up to size 8192 bytes exist. + * This is desirable because messages are allocated + * with kalloc, and messages up through size 8192 are common. + */ + + if (PAGE_SIZE < 16*1024) + kalloc_max = 16*1024; + else + kalloc_max = PAGE_SIZE; + kalloc_max_prerounded = kalloc_max / 2 + 1; + + /* + * Allocate a zone for each size we are going to handle. + * We specify non-paged memory. + */ + for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) { + if (size < KALLOC_MINSIZE) { + k_zone[i] = 0; + continue; + } + if (size == KALLOC_MINSIZE) { + first_k_zone = i; + } + k_zone[i] = zinit(size, k_zone_max[i] * size, size, + k_zone_name[i]); + } +} + +vm_offset_t +kalloc_canblock( + vm_size_t size, + boolean_t canblock) +{ + register int zindex; + register vm_size_t allocsize; + + /* + * If size is too large for a zone, then use kmem_alloc. + * (We use kmem_alloc instead of kmem_alloc_wired so that + * krealloc can use kmem_realloc.) + */ + + if (size >= kalloc_max_prerounded) { + vm_offset_t addr; + + /* kmem_alloc could block so we return if noblock */ + if (!canblock) { + return(0); + } + if (kmem_alloc(kalloc_map, &addr, size) != KERN_SUCCESS) + addr = 0; + + if (addr) { + kalloc_large_inuse++; + kalloc_large_total += size; + + if (kalloc_large_total > kalloc_large_max) + kalloc_large_max = kalloc_large_total; + } + return(addr); + } + + /* compute the size of the block that we will actually allocate */ + + allocsize = KALLOC_MINSIZE; + zindex = first_k_zone; + while (allocsize < size) { + allocsize <<= 1; + zindex++; + } + + /* allocate from the appropriate zone */ + + assert(allocsize < kalloc_max); + return(zalloc_canblock(k_zone[zindex], canblock)); +} + +vm_offset_t +kalloc( + vm_size_t size) +{ + return( kalloc_canblock(size, TRUE) ); +} + +vm_offset_t +kalloc_noblock( + vm_size_t size) +{ + return( kalloc_canblock(size, FALSE) ); +} + + +void +krealloc( + vm_offset_t *addrp, + vm_size_t old_size, + vm_size_t new_size, + simple_lock_t lock) +{ + register int zindex; + register vm_size_t allocsize; + vm_offset_t naddr; + + /* can only be used for increasing allocation size */ + + assert(new_size > old_size); + + /* if old_size is zero, then we are simply allocating */ + + if (old_size == 0) { + simple_unlock(lock); + naddr = kalloc(new_size); + simple_lock(lock); + *addrp = naddr; + return; + } + + /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */ + + if (old_size >= kalloc_max_prerounded) { + old_size = round_page(old_size); + new_size = round_page(new_size); + if (new_size > old_size) { + + if (kmem_realloc(kalloc_map, *addrp, old_size, &naddr, + new_size) != KERN_SUCCESS) { + panic("krealloc: kmem_realloc"); + naddr = 0; + } + + simple_lock(lock); + *addrp = naddr; + + /* kmem_realloc() doesn't free old page range. */ + kmem_free(kalloc_map, *addrp, old_size); + + kalloc_large_total += (new_size - old_size); + + if (kalloc_large_total > kalloc_large_max) + kalloc_large_max = kalloc_large_total; + } + return; + } + + /* compute the size of the block that we actually allocated */ + + allocsize = KALLOC_MINSIZE; + zindex = first_k_zone; + while (allocsize < old_size) { + allocsize <<= 1; + zindex++; + } + + /* if new size fits in old block, then return */ + + if (new_size <= allocsize) { + return; + } + + /* if new size does not fit in zone, kmem_alloc it, else zalloc it */ + + simple_unlock(lock); + if (new_size >= kalloc_max_prerounded) { + if (kmem_alloc(kalloc_map, &naddr, new_size) != KERN_SUCCESS) { + panic("krealloc: kmem_alloc"); + simple_lock(lock); + *addrp = 0; + return; + } + kalloc_large_inuse++; + kalloc_large_total += new_size; + + if (kalloc_large_total > kalloc_large_max) + kalloc_large_max = kalloc_large_total; + } else { + register int new_zindex; + + allocsize <<= 1; + new_zindex = zindex + 1; + while (allocsize < new_size) { + allocsize <<= 1; + new_zindex++; + } + naddr = zalloc(k_zone[new_zindex]); + } + simple_lock(lock); + + /* copy existing data */ + + bcopy((const char *)*addrp, (char *)naddr, old_size); + + /* free old block, and return */ + + zfree(k_zone[zindex], *addrp); + + /* set up new address */ + + *addrp = naddr; +} + + +vm_offset_t +kget( + vm_size_t size) +{ + register int zindex; + register vm_size_t allocsize; + + /* size must not be too large for a zone */ + + if (size >= kalloc_max_prerounded) { + /* This will never work, so we might as well panic */ + panic("kget"); + } + + /* compute the size of the block that we will actually allocate */ + + allocsize = KALLOC_MINSIZE; + zindex = first_k_zone; + while (allocsize < size) { + allocsize <<= 1; + zindex++; + } + + /* allocate from the appropriate zone */ + + assert(allocsize < kalloc_max); + return(zget(k_zone[zindex])); +} + +void +kfree( + vm_offset_t data, + vm_size_t size) +{ + register int zindex; + register vm_size_t freesize; + + /* if size was too large for a zone, then use kmem_free */ + + if (size >= kalloc_max_prerounded) { + kmem_free(kalloc_map, data, size); + + kalloc_large_total -= size; + kalloc_large_inuse--; + + return; + } + + /* compute the size of the block that we actually allocated from */ + + freesize = KALLOC_MINSIZE; + zindex = first_k_zone; + while (freesize < size) { + freesize <<= 1; + zindex++; + } + + /* free to the appropriate zone */ + + assert(freesize < kalloc_max); + zfree(k_zone[zindex], data); +} + +#ifdef MACH_BSD +zone_t +kalloc_zone( + vm_size_t size) +{ + register int zindex = 0; + register vm_size_t allocsize; + + /* compute the size of the block that we will actually allocate */ + + allocsize = size; + if (size <= kalloc_max) { + allocsize = KALLOC_MINSIZE; + zindex = first_k_zone; + while (allocsize < size) { + allocsize <<= 1; + zindex++; + } + return (k_zone[zindex]); + } + return (ZONE_NULL); +} +#endif + + + +kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, + vm_size_t *alloc_size, int *collectable, int *exhaustable) +{ + *count = kalloc_large_inuse; + *cur_size = kalloc_large_total; + *max_size = kalloc_large_max; + *elem_size = kalloc_large_total / kalloc_large_inuse; + *alloc_size = kalloc_large_total / kalloc_large_inuse; + *collectable = 0; + *exhaustable = 0; +} + diff --git a/osfmk/kern/kalloc.h b/osfmk/kern/kalloc.h new file mode 100644 index 000000000..df89dd686 --- /dev/null +++ b/osfmk/kern/kalloc.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_KALLOC_H_ +#define _KERN_KALLOC_H_ + + +#include + +#define KALLOC_MINSIZE 16 + +extern vm_offset_t kalloc( + vm_size_t size); + +extern vm_offset_t kalloc_noblock( + vm_size_t size); + +extern vm_offset_t kget( + vm_size_t size); + +extern void kfree( + vm_offset_t data, + vm_size_t size); + +#ifdef MACH_KERNEL_PRIVATE +#include + +extern void kalloc_init( + void); + +extern void krealloc( + vm_offset_t *addrp, + vm_size_t old_size, + vm_size_t new_size, + simple_lock_t lock); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_KALLOC_H_ */ diff --git a/osfmk/kern/kern_print.h b/osfmk/kern/kern_print.h new file mode 100644 index 000000000..2a5b7d062 --- /dev/null +++ b/osfmk/kern/kern_print.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/01/06 19:47:13 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:19:25 dwm] + * + * Revision 1.1.2.1 1993/11/22 20:14:46 jeffc + * Modularized declarations of ddb print functions. + * [1993/11/22 19:03:03 jeffc] + * + * $EndLog$ + */ + +#ifndef KERN_PRINT_H_ +#define KERN_PRINT_H_ + +#include + +extern void db_show_all_slocks(void); + + +extern void db_show_one_zone( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +extern void db_show_all_zones( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif); + +#endif /* KERN_PRINT_H_ */ diff --git a/osfmk/kern/kern_types.h b/osfmk/kern/kern_types.h new file mode 100644 index 000000000..44d86bd49 --- /dev/null +++ b/osfmk/kern/kern_types.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _KERN_KERN_TYPES_H_ +#define _KERN_KERN_TYPES_H_ + +#ifdef KERNEL_PRIVATE + +#include +#include + +#if !defined(MACH_KERNEL_PRIVATE) + +/* + * Declare empty structure definitions for export to other + * kernel components. This lets us still provide some level + * of type checking, without exposing our internal data + * structures. + */ +struct thread_shuttle ; +struct task ; +struct host ; +struct processor ; +struct processor_set ; +struct thread_activation ; +struct subsystem ; +struct semaphore ; +struct lock_set ; +struct ledger ; +struct alarm ; +struct clock ; +struct zone ; +struct wait_queue ; + +#else /* MACH_KERNEL_PRIVATE */ + +#include +typedef struct clock *clock_t; /* Internal use only */ + +#endif /* MACH_KERNEL_PRIVATE */ + +typedef struct thread_shuttle *thread_t; +typedef struct thread_shuttle *thread_shuttle_t; +typedef struct task *task_t; +typedef struct host *host_t; +typedef struct processor *processor_t; +typedef struct processor_set *processor_set_t; +typedef struct thread_activation *thread_act_t; +typedef struct subsystem *subsystem_t; +typedef struct semaphore *semaphore_t; +typedef struct lock_set *lock_set_t; +typedef struct ledger *ledger_t; +typedef struct alarm *alarm_t; +typedef struct clock *clock_serv_t; +typedef struct clock *clock_ctrl_t; +typedef struct zone *zone_t; +typedef struct wait_queue *wait_queue_t; + +typedef host_t host_priv_t; +typedef host_t host_security_t; +typedef processor_set_t processor_set_name_t; +typedef vm_offset_t ipc_kobject_t; + +typedef void *event_t; /* wait event */ +typedef void (*continuation_t)(void); /* continuation */ + +#define ZONE_NULL ((zone_t) 0) +#define IKO_NULL ((ipc_kobject_t) 0) +#define WAIT_QUEUE_NULL ((wait_queue_t) 0) +#define NO_EVENT ((event_t)0) + + +#endif /* KERNEL_PRIVATE */ + +#endif /* _KERN_KERN_TYPES_H_ */ diff --git a/osfmk/kern/kmod.c b/osfmk/kern/kmod.c new file mode 100644 index 000000000..406028fd2 --- /dev/null +++ b/osfmk/kern/kmod.c @@ -0,0 +1,799 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 1999 Mar 29 rsulack created. + */ + +#include +#include +#include +#include +#include +#include + +#include + +kmod_info_t *kmod = 0; +static int kmod_index = 1; + +decl_simple_lock_data(,kmod_lock) +decl_simple_lock_data(,kmod_queue_lock) + +typedef struct cmd_queue_entry { + queue_chain_t links; + vm_address_t data; + vm_size_t size; +} cmd_queue_entry_t; + +queue_head_t kmod_cmd_queue; + +void +kmod_init() +{ + simple_lock_init(&kmod_lock, ETAP_MISC_Q); + simple_lock_init(&kmod_queue_lock, ETAP_MISC_Q); + queue_init(&kmod_cmd_queue); +} + +kmod_info_t * +kmod_lookupbyid(kmod_t id) +{ + kmod_info_t *k = 0; + + k = kmod; + while (k) { + if (k->id == id) break; + k = k->next; + } + + return k; +} + +kmod_info_t * +kmod_lookupbyname(char * name) +{ + kmod_info_t *k = 0; + + k = kmod; + while (k) { + if (!strcmp(k->name, name)) break; + k = k->next; + } + + return k; +} + +// XXX add a nocopy flag?? + +kern_return_t +kmod_queue_cmd(vm_address_t data, vm_size_t size) +{ + kern_return_t rc; + cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry)); + if (!e) return KERN_RESOURCE_SHORTAGE; + + rc = kmem_alloc(kernel_map, &e->data, size); + if (rc != KERN_SUCCESS) { + kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry)); + return rc; + } + e->size = size; + bcopy((void *)data, (void *)e->data, size); + + simple_lock(&kmod_queue_lock); + enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e); + simple_unlock(&kmod_queue_lock); + + thread_wakeup_one((event_t)&kmod_cmd_queue); + + return KERN_SUCCESS; +} + +kern_return_t +kmod_load_extension(char *name) +{ + kmod_load_extension_cmd_t *data; + vm_size_t size; + + size = sizeof(kmod_load_extension_cmd_t); + data = (kmod_load_extension_cmd_t *)kalloc(size); + if (!data) return KERN_RESOURCE_SHORTAGE; + + data->type = KMOD_LOAD_EXTENSION_PACKET; + strncpy(data->name, name, KMOD_MAX_NAME); + + return kmod_queue_cmd((vm_address_t)data, size); +} + +kern_return_t +kmod_load_extension_with_dependencies(char *name, char **dependencies) +{ + kmod_load_with_dependencies_cmd_t *data; + vm_size_t size; + char **c; + int i, count = 0; + + c = dependencies; + if (c) { + while (*c) { + count++; c++; + } + } + size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1; + data = (kmod_load_with_dependencies_cmd_t *)kalloc(size); + if (!data) return KERN_RESOURCE_SHORTAGE; + + data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET; + strncpy(data->name, name, KMOD_MAX_NAME); + + c = dependencies; + for (i=0; i < count; i++) { + strncpy(data->dependencies[i], *c, KMOD_MAX_NAME); + c++; + } + data->dependencies[count][0] = 0; + + return kmod_queue_cmd((vm_address_t)data, size); +} +kern_return_t +kmod_send_generic(int type, void *generic_data, int size) +{ + kmod_generic_cmd_t *data; + + data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int)); + if (!data) return KERN_RESOURCE_SHORTAGE; + + data->type = type; + bcopy(data->data, generic_data, size); + + return kmod_queue_cmd((vm_address_t)data, size + sizeof(int)); +} + +kern_return_t +kmod_create_internal(kmod_info_t *info, kmod_t *id) +{ + kern_return_t rc; + + if (!info) return KERN_INVALID_ADDRESS; + + // double check for page alignment + if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) { + return KERN_INVALID_ADDRESS; + } + + rc = vm_map_wire(kernel_map, info->address + info->hdr_size, + info->address + info->size, VM_PROT_DEFAULT, FALSE); + if (rc != KERN_SUCCESS) { + return rc; + } + + simple_lock(&kmod_lock); + + // check to see if already loaded + if (kmod_lookupbyname(info->name)) { + simple_unlock(&kmod_lock); + rc = vm_map_unwire(kernel_map, info->address + info->hdr_size, + info->address + info->size, FALSE); + assert(rc == KERN_SUCCESS); + return KERN_INVALID_ARGUMENT; + } + + info->id = kmod_index++; + info->reference_count = 0; + + info->next = kmod; + kmod = info; + + *id = info->id; + + simple_unlock(&kmod_lock); + + printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n", + info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size); + + return KERN_SUCCESS; +} + + +kern_return_t +kmod_create(host_priv_t host_priv, + kmod_info_t *info, + kmod_t *id) +{ + if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST; + return kmod_create_internal(info, id); +} + +kern_return_t +kmod_create_fake(char *name, char *version) +{ + kmod_info_t *info; + + info = (kmod_info_t *)kalloc(sizeof(kmod_info_t)); + if (!info) { + return KERN_RESOURCE_SHORTAGE; + } + + // make de fake + info->info_version = KMOD_INFO_VERSION; + bcopy(name, info->name, KMOD_MAX_NAME); + bcopy(version, info->version, KMOD_MAX_NAME); + info->reference_count = 1; // keep it from unloading, starting, stopping + info->reference_list = 0; + info->address = info->size = info->hdr_size = 0; + info->start = info->stop = 0; + + simple_lock(&kmod_lock); + + // check to see if already "loaded" + if (kmod_lookupbyname(info->name)) { + simple_unlock(&kmod_lock); + return KERN_INVALID_ARGUMENT; + } + + info->id = kmod_index++; + + info->next = kmod; + kmod = info; + + simple_unlock(&kmod_lock); + + return KERN_SUCCESS; +} + +kern_return_t +kmod_destroy_internal(kmod_t id) +{ + kern_return_t rc; + kmod_info_t *k; + kmod_info_t *p; + + simple_lock(&kmod_lock); + + k = p = kmod; + while (k) { + if (k->id == id) { + kmod_reference_t *r, *t; + + if (k->reference_count != 0) { + simple_unlock(&kmod_lock); + return KERN_INVALID_ARGUMENT; + } + + if (k == p) { // first element + kmod = k->next; + } else { + p->next = k->next; + } + simple_unlock(&kmod_lock); + + r = k->reference_list; + while (r) { + r->info->reference_count--; + t = r; + r = r->next; + kfree((vm_offset_t)t, sizeof(struct kmod_reference)); + } + + printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n", + k->name, k->id, k->size / PAGE_SIZE, k->address); + + rc = vm_map_unwire(kernel_map, k->address + k->hdr_size, + k->address + k->size, FALSE); + assert(rc == KERN_SUCCESS); + + rc = vm_deallocate(kernel_map, k->address, k->size); + assert(rc == KERN_SUCCESS); + + return KERN_SUCCESS; + } + p = k; + k = k->next; + } + + simple_unlock(&kmod_lock); + + return KERN_INVALID_ARGUMENT; +} + + +kern_return_t +kmod_destroy(host_priv_t host_priv, + kmod_t id) +{ + if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST; + return kmod_destroy_internal(id); +} + + +kern_return_t +kmod_start_or_stop( + kmod_t id, + int start, + kmod_args_t *data, + mach_msg_type_number_t *dataCount) +{ + kern_return_t rc = KERN_SUCCESS; + void * user_data = 0; + kern_return_t (*func)(); + kmod_info_t *k; + + simple_lock(&kmod_lock); + + k = kmod_lookupbyid(id); + if (!k || k->reference_count) { + simple_unlock(&kmod_lock); + rc = KERN_INVALID_ARGUMENT; + goto finish; + } + + if (start) { + func = (void *)k->start; + } else { + func = (void *)k->stop; + } + + simple_unlock(&kmod_lock); + + // + // call kmod entry point + // + if (data && dataCount && *data && *dataCount) { + vm_map_copyout(kernel_map, (vm_offset_t *)&user_data, (vm_map_copy_t)*data); + } + + rc = (*func)(k, user_data); + +finish: + + if (user_data) { + (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount); + } + if (data) *data = 0; + if (dataCount) *dataCount = 0; + + return rc; +} + + +/* + * The retain and release calls take no user data, but the caller + * may have sent some in error (the MIG definition allows it). + * If this is the case, they will just return that same data + * right back to the caller (since they never touch the *data and + * *dataCount fields). + */ +kern_return_t +kmod_retain(kmod_t id) +{ + kern_return_t rc = KERN_SUCCESS; + + kmod_info_t *t; // reference to + kmod_info_t *f; // reference from + kmod_reference_t *r = 0; + + r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference)); + if (!r) { + rc = KERN_RESOURCE_SHORTAGE; + goto finish; + } + + simple_lock(&kmod_lock); + + t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id)); + f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id)); + if (!t || !f) { + simple_unlock(&kmod_lock); + if (r) kfree((vm_offset_t)r, sizeof(struct kmod_reference)); + rc = KERN_INVALID_ARGUMENT; + goto finish; + } + + r->next = f->reference_list; + r->info = t; + f->reference_list = r; + t->reference_count++; + + simple_unlock(&kmod_lock); + +finish: + + return rc; +} + + +kern_return_t +kmod_release(kmod_t id) +{ + kern_return_t rc = KERN_INVALID_ARGUMENT; + + kmod_info_t *t; // reference to + kmod_info_t *f; // reference from + kmod_reference_t *r = 0; + kmod_reference_t * p; + + simple_lock(&kmod_lock); + + t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id)); + f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id)); + if (!t || !f) { + rc = KERN_INVALID_ARGUMENT; + goto finish; + } + + p = r = f->reference_list; + while (r) { + if (r->info == t) { + if (p == r) { // first element + f->reference_list = r->next; + } else { + p->next = r->next; + } + r->info->reference_count--; + + simple_unlock(&kmod_lock); + kfree((vm_offset_t)r, sizeof(struct kmod_reference)); + rc = KERN_SUCCESS; + goto finish; + } + p = r; + r = r->next; + } + + simple_unlock(&kmod_lock); + +finish: + + return rc; +} + + +kern_return_t +kmod_control(host_priv_t host_priv, + kmod_t id, + kmod_control_flavor_t flavor, + kmod_args_t *data, + mach_msg_type_number_t *dataCount) +{ + kern_return_t rc = KERN_SUCCESS; + + if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST; + + switch (flavor) { + + case KMOD_CNTL_START: + case KMOD_CNTL_STOP: + { + rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START), + data, dataCount); + break; + } + + case KMOD_CNTL_RETAIN: + { + rc = kmod_retain(id); + break; + } + + case KMOD_CNTL_RELEASE: + { + rc = kmod_release(id); + break; + } + + case KMOD_CNTL_GET_CMD: { + + cmd_queue_entry_t *e; + + /* + * Throw away any data the user may have sent in error. + * We must do this, because we are likely to return to + * some data for these commands (thus causing a leak of + * whatever data the user sent us in error). + */ + if (*data && *dataCount) { + vm_map_copy_discard(*data); + *data = 0; + *dataCount = 0; + } + + simple_lock(&kmod_queue_lock); + + if (queue_empty(&kmod_cmd_queue)) { + assert_wait((event_t)&kmod_cmd_queue, THREAD_ABORTSAFE); + simple_unlock(&kmod_queue_lock); + thread_block((void(*)(void))0); + simple_lock(&kmod_queue_lock); + if (queue_empty(&kmod_cmd_queue)) { + // we must have been interrupted! + simple_unlock(&kmod_queue_lock); + return KERN_ABORTED; + } + } + e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue); + + simple_unlock(&kmod_queue_lock); + + rc = vm_map_copyin(kernel_map, e->data, e->size, TRUE, (vm_map_copy_t *)data); + if (rc) { + simple_lock(&kmod_queue_lock); + enqueue_head(&kmod_cmd_queue, (queue_entry_t)e); + simple_unlock(&kmod_queue_lock); + *data = 0; + *dataCount = 0; + return rc; + } + *dataCount = e->size; + + kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry)); + + break; + } + + default: + rc = KERN_INVALID_ARGUMENT; + } + + return rc; +}; + + +kern_return_t +kmod_get_info(host_t host, + kmod_info_array_t *kmods, + mach_msg_type_number_t *kmodCount) +{ + vm_offset_t data; + kmod_info_t *k, *p1; + kmod_reference_t *r, *p2; + int ref_count; + unsigned size = 0; + kern_return_t rc = KERN_SUCCESS; + + *kmods = (void *)0; + *kmodCount = 0; + +retry: + simple_lock(&kmod_lock); + size = 0; + k = kmod; + while (k) { + size += sizeof(kmod_info_t); + r = k->reference_list; + while (r) { + size +=sizeof(kmod_reference_t); + r = r->next; + } + k = k->next; + } + simple_unlock(&kmod_lock); + if (!size) return KERN_SUCCESS; + + rc = kmem_alloc(kernel_map, &data, size); + if (rc) return rc; + + // copy kmod into data, retry if kmod's size has changed (grown) + // the copied out data is tweeked to figure what's what at user level + // change the copied out k->next pointers to point to themselves + // change the k->reference into a count, tack the references on + // the end of the data packet in the order they are found + + simple_lock(&kmod_lock); + k = kmod; p1 = (kmod_info_t *)data; + while (k) { + if ((p1 + 1) > (kmod_info_t *)(data + size)) { + simple_unlock(&kmod_lock); + kmem_free(kernel_map, data, size); + goto retry; + } + + *p1 = *k; + if (k->next) p1->next = k; + p1++; k = k->next; + } + + p2 = (kmod_reference_t *)p1; + k = kmod; p1 = (kmod_info_t *)data; + while (k) { + r = k->reference_list; ref_count = 0; + while (r) { + if ((p2 + 1) > (kmod_reference_t *)(data + size)) { + simple_unlock(&kmod_lock); + kmem_free(kernel_map, data, size); + goto retry; + } + // note the last 'k' in the chain has its next == 0 + // since there can only be one like that, + // this case is handled by the caller + *p2 = *r; + p2++; r = r->next; ref_count++; + } + p1->reference_list = (kmod_reference_t *)ref_count; + p1++; k = k->next; + } + simple_unlock(&kmod_lock); + + rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods); + if (rc) { + kmem_free(kernel_map, data, size); + *kmods = 0; + *kmodCount = 0; + return rc; + } + *kmodCount = size; + + return KERN_SUCCESS; +} + +#include + +extern void *getsectdatafromheader(struct mach_header *mhp, + const char *segname, + const char *sectname, + int *size); + +static kern_return_t +kmod_call_funcs_in_section(struct mach_header *header, const char *sectName) +{ + typedef void (*Routine)(void); + Routine * routines; + int size, i; + + if (header->magic != MH_MAGIC) { + return KERN_INVALID_ARGUMENT; + } + + routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, sectName, &size); + if (!routines) return KERN_SUCCESS; + + size /= sizeof(Routine); + for (i = 0; i < size; i++) { + (*routines[i])(); + } + + return KERN_SUCCESS; +} + +kern_return_t +kmod_initialize_cpp(kmod_info_t *info) +{ + return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor"); +} + +kern_return_t +kmod_finalize_cpp(kmod_info_t *info) +{ + return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor"); +} + +kern_return_t +kmod_default_start(struct kmod_info *ki, void *data) +{ + return KMOD_RETURN_SUCCESS; +} + +kern_return_t +kmod_default_stop(struct kmod_info *ki, void *data) +{ + return KMOD_RETURN_SUCCESS; +} + +#define IS_IN_BACKTRACE 0xdeadbeef +#define IS_A_DEPENDENCY 0xbeefdead + +void +kmod_dump(vm_offset_t *addr, unsigned int cnt) +{ + kmod_info_t *k; + kmod_reference_t *r; + int i, found_one = 0; + + // find backtrace addresses that are inside a kmod + for (i=0; i < cnt; i++, addr++) { + k = kmod; + while (k) { + // XXX - validate page(s) that k points to + if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */ + printf("kmod scan stopped due to missing page: %08X\n", k); + break; + } + if ((*addr >= k->address) && (*addr < (k->address + k->size))) { + // got one, blast info_version, we don't need it at this point + k->info_version = IS_IN_BACKTRACE; + found_one++; + break; + } + k = k->next; + } + } + if (!found_one) return; + + printf("kernel modules in backtrace: "); + k = kmod; + while (k) { + if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */ + printf("kmod scan stopped due to missing page: %08X\n", k); + break; + } + if (k->info_version == IS_IN_BACKTRACE) { + printf("%s(%s)@0x%x ", k->name, k->version, k->address); + } + k = k->next; + } + printf("\n"); + + // look for dependencies + k = kmod; found_one = 0; + while (k) { + if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */ + printf("kmod dependency scan stopped due to missing page: %08X\n", k); + break; + } + if (k->info_version == IS_IN_BACKTRACE) { + r = k->reference_list; + while (r) { + // XXX - validate page(s) that r and r->info point to + if(pmap_extract(kernel_pmap, (vm_offset_t)r) == 0) { /* Exit loop if page not mapped */ + printf("kmod validation scan stopped due to missing page: %08X\n", r); + break; + } + if (r->info->info_version != IS_IN_BACKTRACE) { + r->info->info_version = IS_A_DEPENDENCY; + found_one++; + } + r = r->next; + } + } + k = k->next; + } + if (!found_one) goto cleanup; + + printf("kernel module dependencies: "); + k = kmod; + while (k) { + if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */ + printf("kmod dependency print stopped due to missing page: %08X\n", k); + break; + } + if (k->info_version == IS_A_DEPENDENCY) { + printf("%s(%s)@0x%x ", k->name, k->version, k->address); + } + k = k->next; + } + printf("\n"); + + cleanup: + // in case we double panic + k = kmod; + while (k) { + if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */ + printf("kmod dump cleanup stopped due to missing page: %08X\n", k); + break; + } + k->info_version = KMOD_INFO_VERSION; + k = k->next; + } +} diff --git a/osfmk/kern/ledger.c b/osfmk/kern/ledger.c new file mode 100644 index 000000000..e7e897c49 --- /dev/null +++ b/osfmk/kern/ledger.c @@ -0,0 +1,439 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1995/01/06 19:47:19 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:19:28 dwm] + * + * Revision 1.1.3.4 1994/05/13 20:10:01 tmt + * Changed three unsigned casts to natural_t. + * [1994/05/12 22:12:28 tmt] + * + * Revision 1.1.3.2 1993/11/30 18:26:24 jph + * CR10228 -- Typo in unlock(), ledger_ledger should be child_ledger. + * [1993/11/30 16:10:43 jph] + * + * Revision 1.1.3.1 1993/11/24 21:22:14 jph + * CR9801 brezak merge, ledgers, security and NMK15_COMPAT + * [1993/11/23 22:41:07 jph] + * + * Revision 1.1.1.4 1993/09/08 14:17:36 brezak + * Include for protos. + * + * Revision 1.1.1.3 1993/08/20 14:16:55 brezak + * Created. + * + * $EndLog$ + */ + +/* + * 8/13/93 + * + * This is a half-hearted attempt at providing the parts of the + * ledger facility to satisfy the ledger interfaces. + * + * This implementation basically leaves the (dysfunctional) ledgers + * unfunctional and are mearly here to satisfy the Mach spec interface + * reqirements. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +ledger_t root_wired_ledger; +ledger_t root_paged_ledger; + + +/* Utility routine to handle entries to a ledger */ +kern_return_t +ledger_enter( + ledger_t ledger, + ledger_item_t amount) +{ + /* Need to lock the ledger */ + ledger_lock(ledger); + + if (amount > 0) { + if (ledger->ledger_limit != LEDGER_ITEM_INFINITY && + ledger->ledger_balance + amount > ledger->ledger_limit) { + /* XXX this is where you do BAD things */ + printf("Ledger limit exceeded ! ledger=%x lim=%d balance=%d\n", + ledger, ledger->ledger_limit, + ledger->ledger_balance); + ledger_unlock(ledger); + return(KERN_RESOURCE_SHORTAGE); + } + if ((natural_t)(ledger->ledger_balance + amount) + < LEDGER_ITEM_INFINITY) + ledger->ledger_balance += amount; + else + ledger->ledger_balance = LEDGER_ITEM_INFINITY; + } + else if (amount) { + if (ledger->ledger_balance + amount > 0) + ledger->ledger_balance += amount; + else + ledger->ledger_balance = 0; + } + ledger_unlock(ledger); + return(KERN_SUCCESS); +} + +/* Utility routine to create a new ledger */ +static ledger_t +ledger_allocate( + ledger_item_t limit, + ledger_t ledger_ledger, + ledger_t ledger_parent) +{ + ledger_t ledger; + + ledger = (ledger_t)kalloc(sizeof(ledger_data_t)); + if (ledger == LEDGER_NULL) + return(LEDGER_NULL); + + ledger->ledger_self = ipc_port_alloc_kernel(); + if (ledger->ledger_self == IP_NULL) + return(LEDGER_NULL); + + ledger_lock_init(ledger); + ledger->ledger_limit = limit; + ledger->ledger_balance = 0; + ledger->ledger_service_port = MACH_PORT_NULL; + ledger->ledger_ledger = ledger_ledger; + ledger->ledger_parent = ledger_parent; + ipc_kobject_set(ledger->ledger_self, (ipc_kobject_t)ledger, + IKOT_LEDGER); + + return(ledger); +} + +/* Utility routine to destroy a ledger */ +static void +ledger_deallocate( + ledger_t ledger) +{ + /* XXX can be many send rights (copies) of this */ + ipc_port_dealloc_kernel(ledger->ledger_self); + + /* XXX release send right on service port */ + kfree((vm_offset_t)ledger, sizeof(*ledger)); +} + + +/* + * Inititalize the ledger facility + */ +void ledger_init(void) +{ + /* + * Allocate the root ledgers; wired and paged. + */ + root_wired_ledger = ledger_allocate(LEDGER_ITEM_INFINITY, + LEDGER_NULL, LEDGER_NULL); + if (root_wired_ledger == LEDGER_NULL) + panic("can't allocate root (wired) ledger"); + ipc_port_make_send(root_wired_ledger->ledger_self); + + root_paged_ledger = ledger_allocate(LEDGER_ITEM_INFINITY, + LEDGER_NULL, LEDGER_NULL); + if (root_paged_ledger == LEDGER_NULL) + panic("can't allocate root (paged) ledger"); + ipc_port_make_send(root_paged_ledger->ledger_self); +} + +/* + * Create a subordinate ledger + */ +kern_return_t ledger_create( + ledger_t parent_ledger, + ledger_t ledger_ledger, + ledger_t *new_ledger, + ledger_item_t transfer) +{ + if (parent_ledger == LEDGER_NULL) + return(KERN_INVALID_ARGUMENT); + + if (ledger_ledger == LEDGER_NULL) + return(KERN_INVALID_LEDGER); + + /* + * Allocate a new ledger and change the ledger_ledger for + * its space. + */ + ledger_lock(ledger_ledger); + if ((ledger_ledger->ledger_limit != LEDGER_ITEM_INFINITY) && + (ledger_ledger->ledger_balance + sizeof(ledger_data_t) > + ledger_ledger->ledger_limit)) { + ledger_unlock(ledger_ledger); + return(KERN_RESOURCE_SHORTAGE); + } + + *new_ledger = ledger_allocate(LEDGER_ITEM_INFINITY, ledger_ledger, parent_ledger); + if (*new_ledger == LEDGER_NULL) { + ledger_unlock(ledger_ledger); + return(KERN_RESOURCE_SHORTAGE); + } + + /* + * Now transfer the limit for the new ledger from the parent + */ + ledger_lock(parent_ledger); + if (parent_ledger->ledger_limit != LEDGER_ITEM_INFINITY) { + /* Would the existing balance exceed the new limit ? */ + if (parent_ledger->ledger_limit - transfer < parent_ledger->ledger_balance) { + ledger_unlock(parent_ledger); + ledger_unlock(ledger_ledger); + return(KERN_RESOURCE_SHORTAGE); + } + if (parent_ledger->ledger_limit - transfer > 0) + parent_ledger->ledger_limit -= transfer; + else + parent_ledger->ledger_limit = 0; + } + (*new_ledger)->ledger_limit = transfer; + + /* Charge the ledger against the ledger_ledger */ + ledger_ledger->ledger_balance += sizeof(ledger_data_t); + ledger_unlock(parent_ledger); + + ledger_unlock(ledger_ledger); + + return(KERN_SUCCESS); +} + +/* + * Destroy a ledger + */ +kern_return_t ledger_terminate( + ledger_t ledger) +{ + if (ledger == LEDGER_NULL) + return(KERN_INVALID_ARGUMENT); + + /* You can't deallocate kernel ledgers */ + if (ledger == root_wired_ledger || + ledger == root_paged_ledger) + return(KERN_INVALID_LEDGER); + + /* Lock the ledger */ + ledger_lock(ledger); + + /* the parent ledger gets back the limit */ + ledger_lock(ledger->ledger_parent); + if (ledger->ledger_parent->ledger_limit != LEDGER_ITEM_INFINITY) { + assert((natural_t)(ledger->ledger_parent->ledger_limit + + ledger->ledger_limit) < + LEDGER_ITEM_INFINITY); + ledger->ledger_parent->ledger_limit += ledger->ledger_limit; + } + ledger_unlock(ledger->ledger_parent); + + /* + * XXX The spec says that you have to destroy all objects that + * have been created with this ledger. Nice work eh? For now + * Transfer the balance to the parent and let it worry about + * it. + */ + /* XXX the parent ledger inherits the debt ?? */ + (void) ledger_enter(ledger->ledger_parent, ledger->ledger_balance); + + /* adjust the balance of the creation ledger */ + (void) ledger_enter(ledger->ledger_ledger, -sizeof(*ledger)); + + /* delete the ledger */ + ledger_deallocate(ledger); + + return(KERN_SUCCESS); +} + +/* + * Return the ledger limit and balance + */ +kern_return_t ledger_read( + ledger_t ledger, + ledger_item_t *balance, + ledger_item_t *limit) +{ + if (ledger == LEDGER_NULL) + return(KERN_INVALID_ARGUMENT); + + ledger_lock(ledger); + *balance = ledger->ledger_balance; + *limit = ledger->ledger_limit; + ledger_unlock(ledger); + + return(KERN_SUCCESS); +} + +/* + * Transfer resources from a parent ledger to a child + */ +kern_return_t ledger_transfer( + ledger_t parent_ledger, + ledger_t child_ledger, + ledger_item_t transfer) +{ +#define abs(v) ((v) > 0)?(v):-(v) + + ledger_t src, dest; + ledger_item_t amount = abs(transfer); + + if (parent_ledger == LEDGER_NULL) + return(KERN_INVALID_ARGUMENT); + + if (child_ledger == LEDGER_NULL) + return(KERN_INVALID_ARGUMENT); + + /* Must be different ledgers */ + if (parent_ledger == child_ledger) + return(KERN_INVALID_ARGUMENT); + + if (transfer == 0) + return(KERN_SUCCESS); + + ledger_lock(child_ledger); + ledger_lock(parent_ledger); + + /* XXX Should be the parent you created it from ?? */ + if (parent_ledger != child_ledger->ledger_parent) { + ledger_unlock(parent_ledger); + ledger_unlock(child_ledger); + return(KERN_INVALID_LEDGER); + } + + if (transfer > 0) { + dest = child_ledger; + src = parent_ledger; + } + else { + src = child_ledger; + dest = parent_ledger; + } + + if (src->ledger_limit != LEDGER_ITEM_INFINITY) { + /* Would the existing balance exceed the new limit ? */ + if (src->ledger_limit - amount < src->ledger_balance) { + ledger_unlock(parent_ledger); + ledger_unlock(child_ledger); + return(KERN_RESOURCE_SHORTAGE); + } + if (src->ledger_limit - amount > 0) + src->ledger_limit -= amount; + else + src->ledger_limit = 0; + } + + if (dest->ledger_limit != LEDGER_ITEM_INFINITY) { + if ((natural_t)(dest->ledger_limit + amount) + < LEDGER_ITEM_INFINITY) + dest->ledger_limit += amount; + else + dest->ledger_limit = (LEDGER_ITEM_INFINITY - 1); + } + + ledger_unlock(parent_ledger); + ledger_unlock(child_ledger); + + return(KERN_SUCCESS); +#undef abs +} + +/* + * Routine: convert_port_to_ledger + * Purpose: + * Convert from a port to a ledger. + * Doesn't consume the port ref; the ledger produced may be null. + * Conditions: + * Nothing locked. + */ + +ledger_t +convert_port_to_ledger( + ipc_port_t port) +{ + ledger_t ledger = LEDGER_NULL; + + if (IP_VALID(port)) { + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_LEDGER)) + ledger = (ledger_t) port->ip_kobject; + ip_unlock(port); + } + + return ledger; +} + +/* + * Routine: convert_ledger_to_port + * Purpose: + * Convert from a ledger to a port. + * Produces a naked send right which may be invalid. + * Conditions: + * Nothing locked. + */ + +ipc_port_t +convert_ledger_to_port( + ledger_t ledger) +{ + ipc_port_t port; + + port = ipc_port_make_send(ledger->ledger_self); + + return port; +} + +/* + * Copy a ledger + */ +ipc_port_t +ledger_copy( + ledger_t ledger) +{ + /* XXX reference counting */ + assert(ledger); + return(ipc_port_copy_send(ledger->ledger_self)); +} diff --git a/osfmk/kern/ledger.h b/osfmk/kern/ledger.h new file mode 100644 index 000000000..9fdd5077b --- /dev/null +++ b/osfmk/kern/ledger.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#ifndef _KERN_LEDGER_H_ +#define _KERN_LEDGER_H_ + +#include +#include +#include + +#define LEDGER_ITEM_INFINITY (~0) + +struct ledger { + ipc_port_t ledger_self; + ipc_port_t ledger_service_port; + ledger_item_t ledger_balance; + ledger_item_t ledger_limit; + struct ledger *ledger_ledger; + struct ledger *ledger_parent; + decl_simple_lock_data(,lock) +}; + +typedef struct ledger ledger_data_t; + +#define LEDGER_NULL ((ledger_t)0) + +#define ledger_lock(ledger) simple_lock(&(ledger)->lock) +#define ledger_unlock(ledger) simple_unlock(&(ledger)->lock) +#define ledger_lock_init(ledger) \ + simple_lock_init(&(ledger)->lock, ETAP_MISC_LEDGER) + +extern ledger_t root_wired_ledger; +extern ledger_t root_paged_ledger; + +#define root_wired_ledger_port root_wired_ledger->ledger_self +#define root_paged_ledger_port root_paged_ledger->ledger_self + +extern ipc_port_t convert_ledger_to_port(ledger_t); +extern ipc_port_t ledger_copy(ledger_t); + +extern kern_return_t ledger_enter(ledger_t, ledger_item_t); +extern void ledger_init(void); + +#endif /* _KERN_LEDGER_H_ */ diff --git a/osfmk/kern/lock.c b/osfmk/kern/lock.c new file mode 100644 index 000000000..383bf774b --- /dev/null +++ b/osfmk/kern/lock.c @@ -0,0 +1,2181 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: kern/lock.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Locking primitives implementation + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_KDB +#include +#include +#include +#include +#endif /* MACH_KDB */ + +#ifdef __ppc__ +#include +#include +#endif + +#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG) + +/* + * Some portions of the lock debugging code must run with + * interrupts disabled. This can be machine-dependent, + * but we don't have any good hooks for that at the moment. + * If your architecture is different, add a machine-dependent + * ifdef here for these macros. XXX + */ + +#define DISABLE_INTERRUPTS(s) s = ml_set_interrupts_enabled(FALSE) +#define ENABLE_INTERRUPTS(s) (void)ml_set_interrupts_enabled(s) + +#if NCPUS > 1 +/* Time we loop without holding the interlock. + * The former is for when we cannot sleep, the latter + * for when our thread can go to sleep (loop less) + * we shouldn't retake the interlock at all frequently + * if we cannot go to sleep, since it interferes with + * any other processors. In particular, 100 is too small + * a number for powerpc MP systems because of cache + * coherency issues and differing lock fetch times between + * the processors + */ +unsigned int lock_wait_time[2] = { (unsigned int)-1, 100 } ; +#else /* NCPUS > 1 */ + + /* + * It is silly to spin on a uni-processor as if we + * thought something magical would happen to the + * want_write bit while we are executing. + */ + +unsigned int lock_wait_time[2] = { 0, 0 }; +#endif /* NCPUS > 1 */ + +/* Forwards */ + +#if MACH_KDB +void db_print_simple_lock( + simple_lock_t addr); + +void db_print_mutex( + mutex_t * addr); +#endif /* MACH_KDB */ + + +#if USLOCK_DEBUG +/* + * Perform simple lock checks. + */ +int uslock_check = 1; +int max_lock_loops = 100000000; +decl_simple_lock_data(extern , printf_lock) +decl_simple_lock_data(extern , panic_lock) +#if MACH_KDB && NCPUS > 1 +decl_simple_lock_data(extern , kdb_lock) +#endif /* MACH_KDB && NCPUS >1 */ +#endif /* USLOCK_DEBUG */ + + +/* + * We often want to know the addresses of the callers + * of the various lock routines. However, this information + * is only used for debugging and statistics. + */ +typedef void *pc_t; +#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS) +#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS) +#if ANY_LOCK_DEBUG || ETAP_LOCK_TRACE +#define OBTAIN_PC(pc,l) ((pc) = (void *) GET_RETURN_PC(&(l))) +#else /* ANY_LOCK_DEBUG || ETAP_LOCK_TRACE */ +#ifdef lint +/* + * Eliminate lint complaints about unused local pc variables. + */ +#define OBTAIN_PC(pc,l) ++pc +#else /* lint */ +#define OBTAIN_PC(pc,l) +#endif /* lint */ +#endif /* USLOCK_DEBUG || ETAP_LOCK_TRACE */ + + +/* #ifndef USIMPLE_LOCK_CALLS + * The i386 production version of usimple_locks isn't ready yet. + */ +/* + * Portable lock package implementation of usimple_locks. + */ + +#if ETAP_LOCK_TRACE +#define ETAPCALL(stmt) stmt +void etap_simplelock_init(simple_lock_t, etap_event_t); +void etap_simplelock_unlock(simple_lock_t); +void etap_simplelock_hold(simple_lock_t, pc_t, etap_time_t); +etap_time_t etap_simplelock_miss(simple_lock_t); + +void etap_mutex_init(mutex_t*, etap_event_t); +void etap_mutex_unlock(mutex_t*); +void etap_mutex_hold(mutex_t*, pc_t, etap_time_t); +etap_time_t etap_mutex_miss(mutex_t*); +#else /* ETAP_LOCK_TRACE */ +#define ETAPCALL(stmt) +#endif /* ETAP_LOCK_TRACE */ + +#if USLOCK_DEBUG +#define USLDBG(stmt) stmt +void usld_lock_init(usimple_lock_t, etap_event_t); +void usld_lock_pre(usimple_lock_t, pc_t); +void usld_lock_post(usimple_lock_t, pc_t); +void usld_unlock(usimple_lock_t, pc_t); +void usld_lock_try_pre(usimple_lock_t, pc_t); +void usld_lock_try_post(usimple_lock_t, pc_t); +void usld_lock_held(usimple_lock_t); +void usld_lock_none_held(void); +int usld_lock_common_checks(usimple_lock_t, char *); +#else /* USLOCK_DEBUG */ +#define USLDBG(stmt) +#endif /* USLOCK_DEBUG */ + +/* + * Initialize a usimple_lock. + * + * No change in preemption state. + */ +void +usimple_lock_init( + usimple_lock_t l, + etap_event_t event) +{ + USLDBG(usld_lock_init(l, event)); + ETAPCALL(etap_simplelock_init((l),(event))); + hw_lock_init(&l->interlock); +} + + +/* + * Acquire a usimple_lock. + * + * Returns with preemption disabled. Note + * that the hw_lock routines are responsible for + * maintaining preemption state. + */ +void +usimple_lock( + usimple_lock_t l) +{ + int i; + pc_t pc; +#if ETAP_LOCK_TRACE + etap_time_t start_wait_time; + int no_miss_info = 0; +#endif /* ETAP_LOCK_TRACE */ +#if USLOCK_DEBUG + int count = 0; +#endif /* USLOCK_DEBUG */ + + OBTAIN_PC(pc, l); + USLDBG(usld_lock_pre(l, pc)); +#if ETAP_LOCK_TRACE + ETAP_TIME_CLEAR(start_wait_time); +#endif /* ETAP_LOCK_TRACE */ + +#ifdef __ppc__ + if(!hw_lock_to(&l->interlock, LockTimeOut)) { /* Try to get the lock with a timeout */ + + panic("simple lock deadlock detection - l=%08X, cpu=%d, ret=%08X", l, cpu_number(), pc); + +#else /* __ppc__ */ + while (!hw_lock_try(&l->interlock)) { + ETAPCALL(if (no_miss_info++ == 0) + start_wait_time = etap_simplelock_miss(l)); + while (hw_lock_held(&l->interlock)) { + /* + * Spin watching the lock value in cache, + * without consuming external bus cycles. + * On most SMP architectures, the atomic + * instruction(s) used by hw_lock_try + * cost much, much more than an ordinary + * memory read. + */ +#if USLOCK_DEBUG + if (count++ > max_lock_loops +#if MACH_KDB && NCPUS > 1 + && l != &kdb_lock +#endif /* MACH_KDB && NCPUS > 1 */ + ) { + if (l == &printf_lock) { + return; + } + mp_disable_preemption(); + panic("simple lock deadlock detection - l=%08X (=%08X), cpu=%d, ret=%08X", + l, *hw_lock_addr(l->interlock), cpu_number(), pc); + count = 0; + mp_enable_preemption(); + } +#endif /* USLOCK_DEBUG */ + } +#endif /* 0 */ + } + ETAPCALL(etap_simplelock_hold(l, pc, start_wait_time)); + USLDBG(usld_lock_post(l, pc)); +} + + +/* + * Release a usimple_lock. + * + * Returns with preemption enabled. Note + * that the hw_lock routines are responsible for + * maintaining preemption state. + */ +void +usimple_unlock( + usimple_lock_t l) +{ + pc_t pc; + +// checkNMI(); /* (TEST/DEBUG) */ + + OBTAIN_PC(pc, l); + USLDBG(usld_unlock(l, pc)); + ETAPCALL(etap_simplelock_unlock(l)); + hw_lock_unlock(&l->interlock); +} + + +/* + * Conditionally acquire a usimple_lock. + * + * On success, returns with preemption disabled. + * On failure, returns with preemption in the same state + * as when first invoked. Note that the hw_lock routines + * are responsible for maintaining preemption state. + * + * XXX No stats are gathered on a miss; I preserved this + * behavior from the original assembly-language code, but + * doesn't it make sense to log misses? XXX + */ +unsigned int +usimple_lock_try( + usimple_lock_t l) +{ + pc_t pc; + unsigned int success; + etap_time_t zero_time; + + OBTAIN_PC(pc, l); + USLDBG(usld_lock_try_pre(l, pc)); + if (success = hw_lock_try(&l->interlock)) { + USLDBG(usld_lock_try_post(l, pc)); + ETAP_TIME_CLEAR(zero_time); + ETAPCALL(etap_simplelock_hold(l, pc, zero_time)); + } + return success; +} + +#if ETAP_LOCK_TRACE +void +simple_lock_no_trace( + simple_lock_t l) +{ + pc_t pc; + + OBTAIN_PC(pc, l); + USLDBG(usld_lock_pre(l, pc)); + while (!hw_lock_try(&l->interlock)) { + while (hw_lock_held(&l->interlock)) { + /* + * Spin watching the lock value in cache, + * without consuming external bus cycles. + * On most SMP architectures, the atomic + * instruction(s) used by hw_lock_try + * cost much, much more than an ordinary + * memory read. + */ + } + } + USLDBG(usld_lock_post(l, pc)); +} + +void +simple_unlock_no_trace( + simple_lock_t l) +{ + pc_t pc; + + OBTAIN_PC(pc, l); + USLDBG(usld_unlock(l, pc)); + hw_lock_unlock(&l->interlock); +} + +int +simple_lock_try_no_trace( + simple_lock_t l) +{ + pc_t pc; + unsigned int success; + + OBTAIN_PC(pc, l); + USLDBG(usld_lock_try_pre(l, pc)); + if (success = hw_lock_try(&l->interlock)) { + USLDBG(usld_lock_try_post(l, pc)); + } + return success; +} +#endif /* ETAP_LOCK_TRACE */ + + +#if USLOCK_DEBUG +/* + * Verify that the lock is locked and owned by + * the current thread. + */ +void +usimple_lock_held( + usimple_lock_t l) +{ + usld_lock_held(l); +} + + +/* + * Verify that no usimple_locks are held by + * this processor. Typically used in a + * trap handler when returning to user mode + * or in a path known to relinquish the processor. + */ +void +usimple_lock_none_held(void) +{ + usld_lock_none_held(); +} +#endif /* USLOCK_DEBUG */ + + +#if USLOCK_DEBUG +/* + * States of a usimple_lock. The default when initializing + * a usimple_lock is setting it up for debug checking. + */ +#define USLOCK_CHECKED 0x0001 /* lock is being checked */ +#define USLOCK_TAKEN 0x0002 /* lock has been taken */ +#define USLOCK_INIT 0xBAA0 /* lock has been initialized */ +#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED) +#define USLOCK_CHECKING(l) (uslock_check && \ + ((l)->debug.state & USLOCK_CHECKED)) + +/* + * Maintain a per-cpu stack of acquired usimple_locks. + */ +void usl_stack_push(usimple_lock_t, int); +void usl_stack_pop(usimple_lock_t, int); + +/* + * Trace activities of a particularly interesting lock. + */ +void usl_trace(usimple_lock_t, int, pc_t, const char *); + + +/* + * Initialize the debugging information contained + * in a usimple_lock. + */ +void +usld_lock_init( + usimple_lock_t l, + etap_event_t type) +{ + if (l == USIMPLE_LOCK_NULL) + panic("lock initialization: null lock pointer"); + l->lock_type = USLOCK_TAG; + l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0; + l->debug.lock_cpu = l->debug.unlock_cpu = 0; + l->debug.lock_pc = l->debug.unlock_pc = INVALID_PC; + l->debug.lock_thread = l->debug.unlock_thread = INVALID_THREAD; + l->debug.duration[0] = l->debug.duration[1] = 0; + l->debug.unlock_cpu = l->debug.unlock_cpu = 0; + l->debug.unlock_pc = l->debug.unlock_pc = INVALID_PC; + l->debug.unlock_thread = l->debug.unlock_thread = INVALID_THREAD; +} + + +/* + * These checks apply to all usimple_locks, not just + * those with USLOCK_CHECKED turned on. + */ +int +usld_lock_common_checks( + usimple_lock_t l, + char *caller) +{ + if (l == USIMPLE_LOCK_NULL) + panic("%s: null lock pointer", caller); + if (l->lock_type != USLOCK_TAG) + panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l); + if (!(l->debug.state & USLOCK_INIT)) + panic("%s: 0x%x is not an initialized lock", + caller, (integer_t) l); + return USLOCK_CHECKING(l); +} + + +/* + * Debug checks on a usimple_lock just before attempting + * to acquire it. + */ +/* ARGSUSED */ +void +usld_lock_pre( + usimple_lock_t l, + pc_t pc) +{ + char *caller = "usimple_lock"; + + +#if 0 + printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */ + l->debug.lock_pc, + l->debug.lock_thread, + l->debug.state, + l->debug.lock_cpu, + l->debug.unlock_thread, + l->debug.unlock_cpu, + l->debug.unlock_pc, + caller); +#endif + + if (!usld_lock_common_checks(l, caller)) + return; + +/* + * Note that we have a weird case where we are getting a lock when we are] + * in the process of putting the system to sleep. We are running with no + * current threads, therefore we can't tell if we are trying to retake a lock + * we have or someone on the other processor has it. Therefore we just + * ignore this test if the locking thread is 0. + */ + + if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread && + l->debug.lock_thread == (void *) current_thread()) { + printf("%s: lock 0x%x already locked (at 0x%x) by", + caller, (integer_t) l, l->debug.lock_pc); + printf(" current thread 0x%x (new attempt at pc 0x%x)\n", + l->debug.lock_thread, pc); + panic(caller); + } + mp_disable_preemption(); + usl_trace(l, cpu_number(), pc, caller); + mp_enable_preemption(); +} + + +/* + * Debug checks on a usimple_lock just after acquiring it. + * + * Pre-emption has been disabled at this point, + * so we are safe in using cpu_number. + */ +void +usld_lock_post( + usimple_lock_t l, + pc_t pc) +{ + register int mycpu; + char *caller = "successful usimple_lock"; + + +#if 0 + printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */ + l->debug.lock_pc, + l->debug.lock_thread, + l->debug.state, + l->debug.lock_cpu, + l->debug.unlock_thread, + l->debug.unlock_cpu, + l->debug.unlock_pc, + caller); +#endif + + if (!usld_lock_common_checks(l, caller)) + return; + + if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) + panic("%s: lock 0x%x became uninitialized", + caller, (integer_t) l); + if ((l->debug.state & USLOCK_TAKEN)) + panic("%s: lock 0x%x became TAKEN by someone else", + caller, (integer_t) l); + + mycpu = cpu_number(); + l->debug.lock_thread = (void *)current_thread(); + l->debug.state |= USLOCK_TAKEN; + l->debug.lock_pc = pc; + l->debug.lock_cpu = mycpu; + + usl_stack_push(l, mycpu); + usl_trace(l, mycpu, pc, caller); +} + + +/* + * Debug checks on a usimple_lock just before + * releasing it. Note that the caller has not + * yet released the hardware lock. + * + * Preemption is still disabled, so there's + * no problem using cpu_number. + */ +void +usld_unlock( + usimple_lock_t l, + pc_t pc) +{ + register int mycpu; + char *caller = "usimple_unlock"; + + +#if 0 + printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */ + l->debug.lock_pc, + l->debug.lock_thread, + l->debug.state, + l->debug.lock_cpu, + l->debug.unlock_thread, + l->debug.unlock_cpu, + l->debug.unlock_pc, + caller); +#endif + + if (!usld_lock_common_checks(l, caller)) + return; + + mycpu = cpu_number(); + + if (!(l->debug.state & USLOCK_TAKEN)) + panic("%s: lock 0x%x hasn't been taken", + caller, (integer_t) l); + if (l->debug.lock_thread != (void *) current_thread()) + panic("%s: unlocking lock 0x%x, owned by thread 0x%x", + caller, (integer_t) l, l->debug.lock_thread); + if (l->debug.lock_cpu != mycpu) { + printf("%s: unlocking lock 0x%x on cpu 0x%x", + caller, (integer_t) l, mycpu); + printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu); + panic(caller); + } + usl_trace(l, mycpu, pc, caller); + usl_stack_pop(l, mycpu); + + l->debug.unlock_thread = l->debug.lock_thread; + l->debug.lock_thread = INVALID_PC; + l->debug.state &= ~USLOCK_TAKEN; + l->debug.unlock_pc = pc; + l->debug.unlock_cpu = mycpu; +} + + +/* + * Debug checks on a usimple_lock just before + * attempting to acquire it. + * + * Preemption isn't guaranteed to be disabled. + */ +void +usld_lock_try_pre( + usimple_lock_t l, + pc_t pc) +{ + char *caller = "usimple_lock_try"; + + if (!usld_lock_common_checks(l, caller)) + return; + mp_disable_preemption(); + usl_trace(l, cpu_number(), pc, caller); + mp_enable_preemption(); +} + + +/* + * Debug checks on a usimple_lock just after + * successfully attempting to acquire it. + * + * Preemption has been disabled by the + * lock acquisition attempt, so it's safe + * to use cpu_number. + */ +void +usld_lock_try_post( + usimple_lock_t l, + pc_t pc) +{ + register int mycpu; + char *caller = "successful usimple_lock_try"; + + if (!usld_lock_common_checks(l, caller)) + return; + + if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) + panic("%s: lock 0x%x became uninitialized", + caller, (integer_t) l); + if ((l->debug.state & USLOCK_TAKEN)) + panic("%s: lock 0x%x became TAKEN by someone else", + caller, (integer_t) l); + + mycpu = cpu_number(); + l->debug.lock_thread = (void *) current_thread(); + l->debug.state |= USLOCK_TAKEN; + l->debug.lock_pc = pc; + l->debug.lock_cpu = mycpu; + +#if 0 + printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */ + l->debug.lock_pc, + l->debug.lock_thread, + l->debug.state, + l->debug.lock_cpu, + l->debug.unlock_thread, + l->debug.unlock_cpu, + l->debug.unlock_pc, + caller); +#endif + + usl_stack_push(l, mycpu); + usl_trace(l, mycpu, pc, caller); +} + + +/* + * Determine whether the lock in question is owned + * by the current thread. + */ +void +usld_lock_held( + usimple_lock_t l) +{ + char *caller = "usimple_lock_held"; + + +#if 0 + printf("*** %08X %08X %04X %02X %08X %02X %08X - %s\n", /* (TEST/DEBUG) */ + l->debug.lock_pc, + l->debug.lock_thread, + l->debug.state, + l->debug.lock_cpu, + l->debug.unlock_thread, + l->debug.unlock_cpu, + l->debug.unlock_pc, + caller); +#endif + + if (!usld_lock_common_checks(l, caller)) + return; + + if (!(l->debug.state & USLOCK_TAKEN)) + panic("%s: lock 0x%x hasn't been taken", + caller, (integer_t) l); + if (l->debug.lock_thread != (void *) current_thread()) + panic("%s: lock 0x%x is owned by thread 0x%x", caller, + (integer_t) l, (integer_t) l->debug.lock_thread); + + /* + * The usimple_lock is active, so preemption + * is disabled and the current cpu should + * match the one recorded at lock acquisition time. + */ + if (l->debug.lock_cpu != cpu_number()) + panic("%s: current cpu 0x%x isn't acquiring cpu 0x%x", + caller, cpu_number(), (integer_t) l->debug.lock_cpu); +} + + +/* + * Per-cpu stack of currently active usimple_locks. + * Requires spl protection so that interrupt-level + * locks plug-n-play with their thread-context friends. + */ +#define USLOCK_STACK_DEPTH 20 +usimple_lock_t uslock_stack[NCPUS][USLOCK_STACK_DEPTH]; +unsigned int uslock_stack_index[NCPUS]; +boolean_t uslock_stack_enabled = FALSE; + + +/* + * Record a usimple_lock just acquired on + * the current processor. + * + * Preemption has been disabled by lock + * acquisition, so it's safe to use the cpu number + * specified by the caller. + */ +void +usl_stack_push( + usimple_lock_t l, + int mycpu) +{ + boolean_t s; + + if (uslock_stack_enabled == FALSE) + return; + + DISABLE_INTERRUPTS(s); + assert(uslock_stack_index[mycpu] >= 0); + assert(uslock_stack_index[mycpu] < USLOCK_STACK_DEPTH); + if (uslock_stack_index[mycpu] >= USLOCK_STACK_DEPTH) { + printf("usl_stack_push (cpu 0x%x): too many locks (%d)", + mycpu, uslock_stack_index[mycpu]); + printf(" disabling stacks\n"); + uslock_stack_enabled = FALSE; + ENABLE_INTERRUPTS(s); + return; + } + uslock_stack[mycpu][uslock_stack_index[mycpu]] = l; + uslock_stack_index[mycpu]++; + ENABLE_INTERRUPTS(s); +} + + +/* + * Eliminate the entry for a usimple_lock + * that had been active on the current processor. + * + * Preemption has been disabled by lock + * acquisition, and we haven't yet actually + * released the hardware lock associated with + * this usimple_lock, so it's safe to use the + * cpu number supplied by the caller. + */ +void +usl_stack_pop( + usimple_lock_t l, + int mycpu) +{ + unsigned int i, index; + boolean_t s; + + if (uslock_stack_enabled == FALSE) + return; + + DISABLE_INTERRUPTS(s); + assert(uslock_stack_index[mycpu] > 0); + assert(uslock_stack_index[mycpu] <= USLOCK_STACK_DEPTH); + if (uslock_stack_index[mycpu] == 0) { + printf("usl_stack_pop (cpu 0x%x): not enough locks (%d)", + mycpu, uslock_stack_index[mycpu]); + printf(" disabling stacks\n"); + uslock_stack_enabled = FALSE; + ENABLE_INTERRUPTS(s); + return; + } + index = --uslock_stack_index[mycpu]; + for (i = 0; i <= index; ++i) { + if (uslock_stack[mycpu][i] == l) { + if (i != index) + uslock_stack[mycpu][i] = + uslock_stack[mycpu][index]; + ENABLE_INTERRUPTS(s); + return; + } + } + ENABLE_INTERRUPTS(s); + panic("usl_stack_pop: can't find usimple_lock 0x%x", l); +} + + +/* + * Determine whether any usimple_locks are currently held. + * + * Caller's preemption state is uncertain. If + * preemption has been disabled, this check is accurate. + * Otherwise, this check is just a guess. We do the best + * we can by disabling scheduler interrupts, so at least + * the check is accurate w.r.t. whatever cpu we're running + * on while in this routine. + */ +void +usld_lock_none_held() +{ + register int mycpu; + boolean_t s; + unsigned int locks_held; + char *caller = "usimple_lock_none_held"; + + DISABLE_INTERRUPTS(s); + mp_disable_preemption(); + mycpu = cpu_number(); + locks_held = uslock_stack_index[mycpu]; + mp_enable_preemption(); + ENABLE_INTERRUPTS(s); + if (locks_held > 0) + panic("%s: no locks should be held (0x%x locks held)", + caller, (integer_t) locks_held); +} + + +/* + * For very special cases, set traced_lock to point to a + * specific lock of interest. The result is a series of + * XPRs showing lock operations on that lock. The lock_seq + * value is used to show the order of those operations. + */ +usimple_lock_t traced_lock; +unsigned int lock_seq; + +void +usl_trace( + usimple_lock_t l, + int mycpu, + pc_t pc, + const char * op_name) +{ + if (traced_lock == l) { + XPR(XPR_SLOCK, + "seq %d, cpu %d, %s @ %x\n", + (integer_t) lock_seq, (integer_t) mycpu, + (integer_t) op_name, (integer_t) pc, 0); + lock_seq++; + } +} + + + +#if MACH_KDB +#define printf kdbprintf +void db_show_all_slocks(void); +void +db_show_all_slocks(void) +{ + unsigned int i, index; + int mycpu = cpu_number(); + usimple_lock_t l; + + if (uslock_stack_enabled == FALSE) { + printf("Lock stack not enabled\n"); + return; + } + +#if 0 + if (!mach_slocks_init) + iprintf("WARNING: simple locks stack may not be accurate\n"); +#endif + assert(uslock_stack_index[mycpu] >= 0); + assert(uslock_stack_index[mycpu] <= USLOCK_STACK_DEPTH); + index = uslock_stack_index[mycpu]; + for (i = 0; i < index; ++i) { + l = uslock_stack[mycpu][i]; + iprintf("%d: ", i); + db_printsym((vm_offset_t)l, DB_STGY_ANY); + if (l->debug.lock_pc != INVALID_PC) { + printf(" locked by "); + db_printsym((int)l->debug.lock_pc, DB_STGY_PROC); + } + printf("\n"); + } +} +#endif /* MACH_KDB */ + +#endif /* USLOCK_DEBUG */ + +/* #endif USIMPLE_LOCK_CALLS */ + +/* + * Routine: lock_alloc + * Function: + * Allocate a lock for external users who cannot + * hard-code the structure definition into their + * objects. + * For now just use kalloc, but a zone is probably + * warranted. + */ +lock_t * +lock_alloc( + boolean_t can_sleep, + etap_event_t event, + etap_event_t i_event) +{ + lock_t *l; + + if ((l = (lock_t *)kalloc(sizeof(lock_t))) != 0) + lock_init(l, can_sleep, event, i_event); + return(l); +} + +/* + * Routine: lock_free + * Function: + * Free a lock allocated for external users. + * For now just use kfree, but a zone is probably + * warranted. + */ +void +lock_free( + lock_t *l) +{ + kfree((vm_offset_t)l, sizeof(lock_t)); +} + + +/* + * Routine: lock_init + * Function: + * Initialize a lock; required before use. + * Note that clients declare the "struct lock" + * variables and then initialize them, rather + * than getting a new one from this module. + */ +void +lock_init( + lock_t *l, + boolean_t can_sleep, + etap_event_t event, + etap_event_t i_event) +{ + (void) memset((void *) l, 0, sizeof(lock_t)); + +#if ETAP_LOCK_TRACE + etap_event_table_assign(&l->u.event_table_chain, event); + l->u.s.start_list = SD_ENTRY_NULL; +#endif /* ETAP_LOCK_TRACE */ + + simple_lock_init(&l->interlock, i_event); + l->want_write = FALSE; + l->want_upgrade = FALSE; + l->read_count = 0; + l->can_sleep = can_sleep; + +#if ETAP_LOCK_ACCUMULATE + l->cbuff_write = etap_cbuff_reserve(lock_event_table(l)); + if (l->cbuff_write != CBUFF_ENTRY_NULL) { + l->cbuff_write->event = event; + l->cbuff_write->instance = (unsigned long) l; + l->cbuff_write->kind = WRITE_LOCK; + } + l->cbuff_read = CBUFF_ENTRY_NULL; +#endif /* ETAP_LOCK_ACCUMULATE */ +} + + +/* + * Sleep locks. These use the same data structure and algorithm + * as the spin locks, but the process sleeps while it is waiting + * for the lock. These work on uniprocessor systems. + */ + +#define DECREMENTER_TIMEOUT 1000000 + +void +lock_write( + register lock_t * l) +{ + register int i; + start_data_node_t entry = {0}; + boolean_t lock_miss = FALSE; + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t total_time; + etap_time_t stop_wait_time; + pc_t pc; +#if MACH_LDEBUG + int decrementer; +#endif /* MACH_LDEBUG */ + + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + ETAP_CREATE_ENTRY(entry, trace); + MON_ASSIGN_PC(entry->start_pc, pc, trace); + + simple_lock(&l->interlock); + + /* + * Link the new start_list entry + */ + ETAP_LINK_ENTRY(l, entry, trace); + +#if MACH_LDEBUG + decrementer = DECREMENTER_TIMEOUT; +#endif /* MACH_LDEBUG */ + + /* + * Try to acquire the want_write bit. + */ + while (l->want_write) { + if (!lock_miss) { + ETAP_CONTENTION_TIMESTAMP(entry, trace); + lock_miss = TRUE; + } + + i = lock_wait_time[l->can_sleep ? 1 : 0]; + if (i != 0) { + simple_unlock(&l->interlock); +#if MACH_LDEBUG + if (!--decrementer) + Debugger("timeout - want_write"); +#endif /* MACH_LDEBUG */ + while (--i != 0 && l->want_write) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && l->want_write) { + l->waiting = TRUE; + ETAP_SET_REASON(current_thread(), + BLOCKED_ON_COMPLEX_LOCK); + thread_sleep_simple_lock((event_t) l, + simple_lock_addr(l->interlock), FALSE); + simple_lock(&l->interlock); + } + } + l->want_write = TRUE; + + /* Wait for readers (and upgrades) to finish */ + +#if MACH_LDEBUG + decrementer = DECREMENTER_TIMEOUT; +#endif /* MACH_LDEBUG */ + while ((l->read_count != 0) || l->want_upgrade) { + if (!lock_miss) { + ETAP_CONTENTION_TIMESTAMP(entry,trace); + lock_miss = TRUE; + } + + i = lock_wait_time[l->can_sleep ? 1 : 0]; + if (i != 0) { + simple_unlock(&l->interlock); +#if MACH_LDEBUG + if (!--decrementer) + Debugger("timeout - wait for readers"); +#endif /* MACH_LDEBUG */ + while (--i != 0 && (l->read_count != 0 || + l->want_upgrade)) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && (l->read_count != 0 || l->want_upgrade)) { + l->waiting = TRUE; + ETAP_SET_REASON(current_thread(), + BLOCKED_ON_COMPLEX_LOCK); + thread_sleep_simple_lock((event_t) l, + simple_lock_addr(l->interlock), FALSE); + simple_lock(&l->interlock); + } + } + + /* + * do not collect wait data if either the lock + * was free or no wait traces are enabled. + */ + + if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) { + ETAP_TIMESTAMP(stop_wait_time); + ETAP_TOTAL_TIME(total_time, + stop_wait_time, + entry->start_wait_time); + CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace); + MON_DATA_COLLECT(l, + entry, + total_time, + WRITE_LOCK, + MON_CONTENTION, + trace); + } + + simple_unlock(&l->interlock); + + /* + * Set start hold time if some type of hold tracing is enabled. + * + * Note: if the stop_wait_time was already stamped, use + * it as the start_hold_time instead of doing an + * expensive bus access. + * + */ + + if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) + ETAP_COPY_START_HOLD_TIME(entry, stop_wait_time, trace); + else + ETAP_DURATION_TIMESTAMP(entry, trace); + +} + +void +lock_done( + register lock_t * l) +{ + boolean_t do_wakeup = FALSE; + start_data_node_t entry; + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t stop_hold_time; + etap_time_t total_time; + unsigned long lock_kind; + pc_t pc; + + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + simple_lock(&l->interlock); + + if (l->read_count != 0) { + l->read_count--; + lock_kind = READ_LOCK; + } + else + if (l->want_upgrade) { + l->want_upgrade = FALSE; + lock_kind = WRITE_LOCK; + } + else { + l->want_write = FALSE; + lock_kind = WRITE_LOCK; + } + + /* + * There is no reason to wakeup a waiting thread + * if the read-count is non-zero. Consider: + * we must be dropping a read lock + * threads are waiting only if one wants a write lock + * if there are still readers, they can't proceed + */ + + if (l->waiting && (l->read_count == 0)) { + l->waiting = FALSE; + do_wakeup = TRUE; + } + /* + * Collect hold data if hold tracing is + * enabled. + */ + + /* + * NOTE: All complex locks whose tracing was on when the + * lock was acquired will have an entry in the start_data + * list. + */ + + ETAP_UNLINK_ENTRY(l,entry); + if (ETAP_DURATION_ENABLED(trace) && entry != SD_ENTRY_NULL) { + ETAP_TIMESTAMP (stop_hold_time); + ETAP_TOTAL_TIME (total_time, + stop_hold_time, + entry->start_hold_time); + + if (lock_kind & WRITE_LOCK) + CUM_HOLD_ACCUMULATE (l->cbuff_write, + total_time, + dynamic, + trace); + else { + CUM_READ_ENTRY_RESERVE(l,l->cbuff_read,trace); + CUM_HOLD_ACCUMULATE (l->cbuff_read, + total_time, + dynamic, + trace); + } + MON_ASSIGN_PC(entry->end_pc,pc,trace); + MON_DATA_COLLECT(l,entry, + total_time, + lock_kind, + MON_DURATION, + trace); + } + + simple_unlock(&l->interlock); + + ETAP_DESTROY_ENTRY(entry); + + if (do_wakeup) + thread_wakeup((event_t) l); +} + +void +lock_read( + register lock_t * l) +{ + register int i; + start_data_node_t entry = {0}; + boolean_t lock_miss = FALSE; + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t total_time; + etap_time_t stop_wait_time; + pc_t pc; +#if MACH_LDEBUG + int decrementer; +#endif /* MACH_LDEBUG */ + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + ETAP_CREATE_ENTRY(entry, trace); + MON_ASSIGN_PC(entry->start_pc, pc, trace); + + simple_lock(&l->interlock); + + /* + * Link the new start_list entry + */ + ETAP_LINK_ENTRY(l,entry,trace); + +#if MACH_LDEBUG + decrementer = DECREMENTER_TIMEOUT; +#endif /* MACH_LDEBUG */ + while (l->want_write || l->want_upgrade) { + if (!lock_miss) { + ETAP_CONTENTION_TIMESTAMP(entry, trace); + lock_miss = TRUE; + } + + i = lock_wait_time[l->can_sleep ? 1 : 0]; + + if (i != 0) { + simple_unlock(&l->interlock); +#if MACH_LDEBUG + if (!--decrementer) + Debugger("timeout - wait no writers"); +#endif /* MACH_LDEBUG */ + while (--i != 0 && (l->want_write || l->want_upgrade)) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && (l->want_write || l->want_upgrade)) { + l->waiting = TRUE; + thread_sleep_simple_lock((event_t) l, + simple_lock_addr(l->interlock), FALSE); + simple_lock(&l->interlock); + } + } + + l->read_count++; + + /* + * Do not collect wait data if the lock was free + * or if no wait traces are enabled. + */ + + if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) { + ETAP_TIMESTAMP(stop_wait_time); + ETAP_TOTAL_TIME(total_time, + stop_wait_time, + entry->start_wait_time); + CUM_READ_ENTRY_RESERVE(l, l->cbuff_read, trace); + CUM_WAIT_ACCUMULATE(l->cbuff_read, total_time, dynamic, trace); + MON_DATA_COLLECT(l, + entry, + total_time, + READ_LOCK, + MON_CONTENTION, + trace); + } + simple_unlock(&l->interlock); + + /* + * Set start hold time if some type of hold tracing is enabled. + * + * Note: if the stop_wait_time was already stamped, use + * it instead of doing an expensive bus access. + * + */ + + if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) + ETAP_COPY_START_HOLD_TIME(entry, stop_wait_time, trace); + else + ETAP_DURATION_TIMESTAMP(entry,trace); +} + + +/* + * Routine: lock_read_to_write + * Function: + * Improves a read-only lock to one with + * write permission. If another reader has + * already requested an upgrade to a write lock, + * no lock is held upon return. + * + * Returns TRUE if the upgrade *failed*. + */ + +boolean_t +lock_read_to_write( + register lock_t * l) +{ + register int i; + boolean_t do_wakeup = FALSE; + start_data_node_t entry = {0}; + boolean_t lock_miss = FALSE; + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t total_time; + etap_time_t stop_time; + pc_t pc; +#if MACH_LDEBUG + int decrementer; +#endif /* MACH_LDEBUG */ + + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + simple_lock(&l->interlock); + + l->read_count--; + + /* + * Since the read lock is lost whether the write lock + * is acquired or not, read hold data is collected here. + * This, of course, is assuming some type of hold + * tracing is enabled. + * + * Note: trace is set to zero if the entry does not exist. + */ + + ETAP_FIND_ENTRY(l, entry, trace); + + if (ETAP_DURATION_ENABLED(trace)) { + ETAP_TIMESTAMP(stop_time); + ETAP_TOTAL_TIME(total_time, stop_time, entry->start_hold_time); + CUM_HOLD_ACCUMULATE(l->cbuff_read, total_time, dynamic, trace); + MON_ASSIGN_PC(entry->end_pc, pc, trace); + MON_DATA_COLLECT(l, + entry, + total_time, + READ_LOCK, + MON_DURATION, + trace); + } + + if (l->want_upgrade) { + /* + * Someone else has requested upgrade. + * Since we've released a read lock, wake + * him up. + */ + if (l->waiting && (l->read_count == 0)) { + l->waiting = FALSE; + do_wakeup = TRUE; + } + + ETAP_UNLINK_ENTRY(l, entry); + simple_unlock(&l->interlock); + ETAP_DESTROY_ENTRY(entry); + + if (do_wakeup) + thread_wakeup((event_t) l); + return (TRUE); + } + + l->want_upgrade = TRUE; + + MON_ASSIGN_PC(entry->start_pc, pc, trace); + +#if MACH_LDEBUG + decrementer = DECREMENTER_TIMEOUT; +#endif /* MACH_LDEBUG */ + while (l->read_count != 0) { + if (!lock_miss) { + ETAP_CONTENTION_TIMESTAMP(entry, trace); + lock_miss = TRUE; + } + + i = lock_wait_time[l->can_sleep ? 1 : 0]; + + if (i != 0) { + simple_unlock(&l->interlock); +#if MACH_LDEBUG + if (!--decrementer) + Debugger("timeout - read_count"); +#endif /* MACH_LDEBUG */ + while (--i != 0 && l->read_count != 0) + continue; + simple_lock(&l->interlock); + } + + if (l->can_sleep && l->read_count != 0) { + l->waiting = TRUE; + thread_sleep_simple_lock((event_t) l, + simple_lock_addr(l->interlock), FALSE); + simple_lock(&l->interlock); + } + } + + /* + * do not collect wait data if the lock was free + * or if no wait traces are enabled. + */ + + if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) { + ETAP_TIMESTAMP (stop_time); + ETAP_TOTAL_TIME(total_time, stop_time, entry->start_wait_time); + CUM_WAIT_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace); + MON_DATA_COLLECT(l, + entry, + total_time, + WRITE_LOCK, + MON_CONTENTION, + trace); + } + + simple_unlock(&l->interlock); + + /* + * Set start hold time if some type of hold tracing is enabled + * + * Note: if the stop_time was already stamped, use + * it as the new start_hold_time instead of doing + * an expensive VME access. + * + */ + + if (lock_miss && ETAP_CONTENTION_ENABLED(trace)) + ETAP_COPY_START_HOLD_TIME(entry, stop_time, trace); + else + ETAP_DURATION_TIMESTAMP(entry, trace); + + return (FALSE); +} + +void +lock_write_to_read( + register lock_t * l) +{ + boolean_t do_wakeup = FALSE; + start_data_node_t entry = {0}; + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t stop_hold_time; + etap_time_t total_time; + pc_t pc; + + ETAP_STAMP(lock_event_table(l), trace,dynamic); + + simple_lock(&l->interlock); + + l->read_count++; + if (l->want_upgrade) + l->want_upgrade = FALSE; + else + l->want_write = FALSE; + + if (l->waiting) { + l->waiting = FALSE; + do_wakeup = TRUE; + } + + /* + * Since we are switching from a write lock to a read lock, + * the write lock data is stored and the read lock data + * collection begins. + * + * Note: trace is set to zero if the entry does not exist. + */ + + ETAP_FIND_ENTRY(l, entry, trace); + + if (ETAP_DURATION_ENABLED(trace)) { + ETAP_TIMESTAMP (stop_hold_time); + ETAP_TOTAL_TIME(total_time, stop_hold_time, entry->start_hold_time); + CUM_HOLD_ACCUMULATE(l->cbuff_write, total_time, dynamic, trace); + MON_ASSIGN_PC(entry->end_pc, pc, trace); + MON_DATA_COLLECT(l, + entry, + total_time, + WRITE_LOCK, + MON_DURATION, + trace); + } + + simple_unlock(&l->interlock); + + /* + * Set start hold time if some type of hold tracing is enabled + * + * Note: if the stop_hold_time was already stamped, use + * it as the new start_hold_time instead of doing + * an expensive bus access. + * + */ + + if (ETAP_DURATION_ENABLED(trace)) + ETAP_COPY_START_HOLD_TIME(entry, stop_hold_time, trace); + else + ETAP_DURATION_TIMESTAMP(entry, trace); + + MON_ASSIGN_PC(entry->start_pc, pc, trace); + + if (do_wakeup) + thread_wakeup((event_t) l); +} + + +#if 0 /* Unused */ +/* + * Routine: lock_try_write + * Function: + * Tries to get a write lock. + * + * Returns FALSE if the lock is not held on return. + */ + +boolean_t +lock_try_write( + register lock_t * l) +{ + start_data_node_t entry = {0}; + unsigned short trace = 0; + pc_t pc; + + ETAP_STAMP(lock_event_table(l), trace, trace); + ETAP_CREATE_ENTRY(entry, trace); + + simple_lock(&l->interlock); + + if (l->want_write || l->want_upgrade || l->read_count) { + /* + * Can't get lock. + */ + simple_unlock(&l->interlock); + ETAP_DESTROY_ENTRY(entry); + return(FALSE); + } + + /* + * Have lock. + */ + + l->want_write = TRUE; + + ETAP_LINK_ENTRY(l, entry, trace); + + simple_unlock(&l->interlock); + + MON_ASSIGN_PC(entry->start_pc, pc, trace); + ETAP_DURATION_TIMESTAMP(entry, trace); + + return(TRUE); +} + +/* + * Routine: lock_try_read + * Function: + * Tries to get a read lock. + * + * Returns FALSE if the lock is not held on return. + */ + +boolean_t +lock_try_read( + register lock_t * l) +{ + start_data_node_t entry = {0}; + unsigned short trace = 0; + pc_t pc; + + ETAP_STAMP(lock_event_table(l), trace, trace); + ETAP_CREATE_ENTRY(entry, trace); + + simple_lock(&l->interlock); + + if (l->want_write || l->want_upgrade) { + simple_unlock(&l->interlock); + ETAP_DESTROY_ENTRY(entry); + return(FALSE); + } + + l->read_count++; + + ETAP_LINK_ENTRY(l, entry, trace); + + simple_unlock(&l->interlock); + + MON_ASSIGN_PC(entry->start_pc, pc, trace); + ETAP_DURATION_TIMESTAMP(entry, trace); + + return(TRUE); +} +#endif /* Unused */ + +#if MACH_KDB + +void db_show_one_lock(lock_t *); + + +void +db_show_one_lock( + lock_t *lock) +{ + db_printf("Read_count = 0x%x, %swant_upgrade, %swant_write, ", + lock->read_count, + lock->want_upgrade ? "" : "!", + lock->want_write ? "" : "!"); + db_printf("%swaiting, %scan_sleep\n", + lock->waiting ? "" : "!", lock->can_sleep ? "" : "!"); + db_printf("Interlock:\n"); + db_show_one_simple_lock((db_expr_t)simple_lock_addr(lock->interlock), + TRUE, (db_expr_t)0, (char *)0); +} +#endif /* MACH_KDB */ + +/* + * The C portion of the mutex package. These routines are only invoked + * if the optimized assembler routines can't do the work. + */ + +/* + * Routine: lock_alloc + * Function: + * Allocate a mutex for external users who cannot + * hard-code the structure definition into their + * objects. + * For now just use kalloc, but a zone is probably + * warranted. + */ +mutex_t * +mutex_alloc( + etap_event_t event) +{ + mutex_t *m; + + if ((m = (mutex_t *)kalloc(sizeof(mutex_t))) != 0) + mutex_init(m, event); + return(m); +} + +/* + * Routine: mutex_free + * Function: + * Free a mutex allocated for external users. + * For now just use kfree, but a zone is probably + * warranted. + */ +void +mutex_free( + mutex_t *m) +{ + kfree((vm_offset_t)m, sizeof(mutex_t)); +} + + +/* + * mutex_lock_wait: Invoked if the assembler routine mutex_lock () fails + * because the mutex is already held by another thread. Called with the + * interlock locked and returns with the interlock unlocked. + */ + +void +mutex_lock_wait ( + mutex_t * m) +{ + m->waiters++; + ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK); + thread_sleep_interlock ((event_t) m, &m->interlock, THREAD_UNINT); +} + +/* + * mutex_unlock_wakeup: Invoked if the assembler routine mutex_unlock () + * fails because there are thread(s) waiting for this mutex. Called and + * returns with the interlock locked. + */ + +void +mutex_unlock_wakeup ( + mutex_t * m) +{ + assert(m->waiters); + m->waiters--; + thread_wakeup_one ((event_t) m); +} + +/* + * mutex_pause: Called by former callers of simple_lock_pause(). + */ + +void +mutex_pause(void) +{ + int wait_result; + + assert_wait_timeout( 1, THREAD_INTERRUPTIBLE); + ETAP_SET_REASON(current_thread(), BLOCKED_ON_MUTEX_LOCK); + wait_result = thread_block((void (*)(void))0); + if (wait_result != THREAD_TIMED_OUT) + thread_cancel_timer(); +} + +#if MACH_KDB +/* + * Routines to print out simple_locks and mutexes in a nicely-formatted + * fashion. + */ + +char *simple_lock_labels = "ENTRY ILK THREAD DURATION CALLER"; +char *mutex_labels = "ENTRY LOCKED WAITERS THREAD CALLER"; + +void +db_show_one_simple_lock ( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + simple_lock_t saddr = (simple_lock_t)addr; + + if (saddr == (simple_lock_t)0 || !have_addr) { + db_error ("No simple_lock\n"); + } +#if USLOCK_DEBUG + else if (saddr->lock_type != USLOCK_TAG) + db_error ("Not a simple_lock\n"); +#endif /* USLOCK_DEBUG */ + + db_printf ("%s\n", simple_lock_labels); + db_print_simple_lock (saddr); +} + +void +db_print_simple_lock ( + simple_lock_t addr) +{ + + db_printf ("%08x %3d", addr, *hw_lock_addr(addr->interlock)); +#if USLOCK_DEBUG + db_printf (" %08x", addr->debug.lock_thread); + db_printf (" %08x ", addr->debug.duration[1]); + db_printsym ((int)addr->debug.lock_pc, DB_STGY_ANY); +#endif /* USLOCK_DEBUG */ + db_printf ("\n"); +} + +void +db_show_one_mutex ( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char * modif) +{ + mutex_t * maddr = (mutex_t *)addr; + + if (maddr == (mutex_t *)0 || !have_addr) + db_error ("No mutex\n"); +#if MACH_LDEBUG + else if (maddr->type != MUTEX_TAG) + db_error ("Not a mutex\n"); +#endif /* MACH_LDEBUG */ + + db_printf ("%s\n", mutex_labels); + db_print_mutex (maddr); +} + +void +db_print_mutex ( + mutex_t * addr) +{ + db_printf ("%08x %6d %7d", + addr, *hw_lock_addr(addr->locked), addr->waiters); +#if MACH_LDEBUG + db_printf (" %08x ", addr->thread); + db_printsym (addr->pc, DB_STGY_ANY); +#endif /* MACH_LDEBUG */ + db_printf ("\n"); +} +#endif /* MACH_KDB */ + +#if MACH_LDEBUG +extern void meter_simple_lock ( + simple_lock_t l); +extern void meter_simple_unlock ( + simple_lock_t l); +extern void cyctm05_stamp ( + unsigned long * start); +extern void cyctm05_diff ( + unsigned long * start, + unsigned long * end, + unsigned long * diff); + +#if 0 +simple_lock_data_t loser; +#endif + +void +meter_simple_lock( + simple_lock_t lp) +{ +#if 0 + cyctm05_stamp (lp->duration); +#endif +} + +int long_simple_lock_crash; +int long_simple_lock_time = 0x600; +/* + * This is pretty gawd-awful. XXX + */ +decl_simple_lock_data(extern,kd_tty) + +void +meter_simple_unlock( + simple_lock_t lp) +{ +#if 0 + unsigned long stime[2], etime[2], delta[2]; + + if (lp == &kd_tty) /* XXX */ + return; /* XXX */ + + stime[0] = lp->duration[0]; + stime[1] = lp->duration[1]; + + cyctm05_stamp (etime); + + if (etime[1] < stime[1]) /* XXX */ + return; /* XXX */ + + cyctm05_diff (stime, etime, delta); + + if (delta[1] >= 0x10000) /* XXX */ + return; /* XXX */ + + lp->duration[0] = delta[0]; + lp->duration[1] = delta[1]; + + if (loser.duration[1] < lp->duration[1]) + loser = *lp; + + assert (!long_simple_lock_crash || delta[1] < long_simple_lock_time); +#endif +} +#endif /* MACH_LDEBUG */ + + +#if ETAP_LOCK_TRACE + +/* + * ============================================================== + * ETAP hook when initializing a usimple_lock. May be invoked + * from the portable lock package or from an optimized machine- + * dependent implementation. + * ============================================================== + */ + +void +etap_simplelock_init ( + simple_lock_t l, + etap_event_t event) +{ + ETAP_CLEAR_TRACE_DATA(l); + etap_event_table_assign(&l->u.event_table_chain, event); + +#if ETAP_LOCK_ACCUMULATE + /* reserve an entry in the cumulative buffer */ + l->cbuff_entry = etap_cbuff_reserve(lock_event_table(l)); + /* initialize the entry if one was returned */ + if (l->cbuff_entry != CBUFF_ENTRY_NULL) { + l->cbuff_entry->event = event; + l->cbuff_entry->instance = (unsigned long) l; + l->cbuff_entry->kind = SPIN_LOCK; + } +#endif /* ETAP_LOCK_ACCUMULATE */ +} + + +void +etap_simplelock_unlock( + simple_lock_t l) +{ + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t total_time; + etap_time_t stop_hold_time; + pc_t pc; + + OBTAIN_PC(pc, l); + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + /* + * Calculate & collect hold time data only if + * the hold tracing was enabled throughout the + * whole operation. This prevents collection of + * bogus data caused by mid-operation trace changes. + * + */ + + if (ETAP_DURATION_ENABLED(trace) && ETAP_WHOLE_OP(l)) { + ETAP_TIMESTAMP (stop_hold_time); + ETAP_TOTAL_TIME(total_time, stop_hold_time, + l->u.s.start_hold_time); + CUM_HOLD_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace); + MON_ASSIGN_PC(l->end_pc, pc, trace); + MON_DATA_COLLECT(l, + l, + total_time, + SPIN_LOCK, + MON_DURATION, + trace); + } + ETAP_CLEAR_TRACE_DATA(l); +} + +/* ======================================================================== + * Since the the simple_lock() routine is machine dependant, it must always + * be coded in assembly. The two hook routines below are used to collect + * lock_stat data. + * ======================================================================== + */ + +/* + * ROUTINE: etap_simplelock_miss() + * + * FUNCTION: This spin lock routine is called upon the first + * spin (miss) of the lock. + * + * A timestamp is taken at the beginning of the wait period, + * if wait tracing is enabled. + * + * + * PARAMETERS: + * - lock address. + * - timestamp address. + * + * RETURNS: Wait timestamp value. The timestamp value is later used + * by etap_simplelock_hold(). + * + * NOTES: This routine is NOT ALWAYS called. The lock may be free + * (never spinning). For this reason the pc is collected in + * etap_simplelock_hold(). + * + */ +etap_time_t +etap_simplelock_miss ( + simple_lock_t l) + +{ + unsigned short trace = 0; + unsigned short dynamic = 0; + etap_time_t start_miss_time; + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + if (trace & ETAP_CONTENTION) + ETAP_TIMESTAMP(start_miss_time); + + return(start_miss_time); +} + +/* + * ROUTINE: etap_simplelock_hold() + * + * FUNCTION: This spin lock routine is ALWAYS called once the lock + * is acquired. Here, the contention time is calculated and + * the start hold time is stamped. + * + * PARAMETERS: + * - lock address. + * - PC of the calling function. + * - start wait timestamp. + * + */ + +void +etap_simplelock_hold ( + simple_lock_t l, + pc_t pc, + etap_time_t start_hold_time) +{ + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t total_time; + etap_time_t stop_hold_time; + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + MON_ASSIGN_PC(l->start_pc, pc, trace); + + /* do not collect wait data if lock was free */ + if (ETAP_TIME_IS_ZERO(start_hold_time) && (trace & ETAP_CONTENTION)) { + ETAP_TIMESTAMP(stop_hold_time); + ETAP_TOTAL_TIME(total_time, + stop_hold_time, + start_hold_time); + CUM_WAIT_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace); + MON_DATA_COLLECT(l, + l, + total_time, + SPIN_LOCK, + MON_CONTENTION, + trace); + ETAP_COPY_START_HOLD_TIME(&l->u.s, stop_hold_time, trace); + } + else + ETAP_DURATION_TIMESTAMP(&l->u.s, trace); +} + +void +etap_mutex_init ( + mutex_t *l, + etap_event_t event) +{ + ETAP_CLEAR_TRACE_DATA(l); + etap_event_table_assign(&l->u.event_table_chain, event); + +#if ETAP_LOCK_ACCUMULATE + /* reserve an entry in the cumulative buffer */ + l->cbuff_entry = etap_cbuff_reserve(lock_event_table(l)); + /* initialize the entry if one was returned */ + if (l->cbuff_entry != CBUFF_ENTRY_NULL) { + l->cbuff_entry->event = event; + l->cbuff_entry->instance = (unsigned long) l; + l->cbuff_entry->kind = MUTEX_LOCK; + } +#endif /* ETAP_LOCK_ACCUMULATE */ +} + +etap_time_t +etap_mutex_miss ( + mutex_t *l) +{ + unsigned short trace = 0; + unsigned short dynamic = 0; + etap_time_t start_miss_time; + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + if (trace & ETAP_CONTENTION) + ETAP_TIMESTAMP(start_miss_time); + else + ETAP_TIME_CLEAR(start_miss_time); + + return(start_miss_time); +} + +void +etap_mutex_hold ( + mutex_t *l, + pc_t pc, + etap_time_t start_hold_time) +{ + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t total_time; + etap_time_t stop_hold_time; + + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + MON_ASSIGN_PC(l->start_pc, pc, trace); + + /* do not collect wait data if lock was free */ + if (!ETAP_TIME_IS_ZERO(start_hold_time) && (trace & ETAP_CONTENTION)) { + ETAP_TIMESTAMP(stop_hold_time); + ETAP_TOTAL_TIME(total_time, + stop_hold_time, + start_hold_time); + CUM_WAIT_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace); + MON_DATA_COLLECT(l, + l, + total_time, + MUTEX_LOCK, + MON_CONTENTION, + trace); + ETAP_COPY_START_HOLD_TIME(&l->u.s, stop_hold_time, trace); + } + else + ETAP_DURATION_TIMESTAMP(&l->u.s, trace); +} + +void +etap_mutex_unlock( + mutex_t *l) +{ + unsigned short dynamic = 0; + unsigned short trace = 0; + etap_time_t total_time; + etap_time_t stop_hold_time; + pc_t pc; + + OBTAIN_PC(pc, l); + ETAP_STAMP(lock_event_table(l), trace, dynamic); + + /* + * Calculate & collect hold time data only if + * the hold tracing was enabled throughout the + * whole operation. This prevents collection of + * bogus data caused by mid-operation trace changes. + * + */ + + if (ETAP_DURATION_ENABLED(trace) && ETAP_WHOLE_OP(l)) { + ETAP_TIMESTAMP(stop_hold_time); + ETAP_TOTAL_TIME(total_time, stop_hold_time, + l->u.s.start_hold_time); + CUM_HOLD_ACCUMULATE(l->cbuff_entry, total_time, dynamic, trace); + MON_ASSIGN_PC(l->end_pc, pc, trace); + MON_DATA_COLLECT(l, + l, + total_time, + MUTEX_LOCK, + MON_DURATION, + trace); + } + ETAP_CLEAR_TRACE_DATA(l); +} + +#endif /* ETAP_LOCK_TRACE */ diff --git a/osfmk/kern/lock.h b/osfmk/kern/lock.h new file mode 100644 index 000000000..65cd1f18f --- /dev/null +++ b/osfmk/kern/lock.h @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1998 Apple Computer + * All Rights Reserved + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: kern/lock.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Higher Level Locking primitives definitions + */ + +#ifndef _KERN_LOCK_H_ +#define _KERN_LOCK_H_ + +/* + * Configuration variables: + * + * + * MACH_LDEBUG: record pc and thread of callers, turn on + * all lock debugging. + * + * + * ETAP: The Event Trace Analysis Package (ETAP) monitors + * and records micro-kernel lock behavior and general + * kernel events. ETAP supports two levels of + * tracing for locks: + * - cumulative (ETAP_LOCK_ACCUMULATE) + * - monitored (ETAP_LOCK_MONITOR) + * + * Note: If either level of tracing is configured then + * ETAP_LOCK_TRACE is automatically defined to + * equal one. + * + * Several macros are added throughout the lock code to + * allow for convenient configuration. + */ + +#include +#include +#include +#include + +/* + * The Mach lock package exports the following high-level + * lock abstractions: + * + * Lock Type Properties + * mutex blocking mutual exclusion lock, intended for + * SMP synchronization (vanishes on a uniprocessor); + * supports debugging, statistics, and pre-emption + * lock blocking synchronization permitting multiple + * simultaneous readers or a single writer; supports + * debugging and statistics but not pre-emption + * + * In general, mutex locks are preferred over all others, as the + * mutex supports pre-emption and relinquishes the processor + * upon contention. + * + */ + +/* + * A simple mutex lock. + * Do not change the order of the fields in this structure without + * changing the machine-dependent assembler routines which depend + * on them. + */ +#ifdef MACH_KERNEL_PRIVATE +#include +#include +#include + +typedef struct { + hw_lock_data_t interlock; + hw_lock_data_t locked; + short waiters; +#if MACH_LDEBUG + int type; +#define MUTEX_TAG 0x4d4d + vm_offset_t pc; + vm_offset_t thread; +#endif /* MACH_LDEBUG */ +#if ETAP_LOCK_TRACE + union { /* Must be overlaid on the event_tablep */ + struct event_table_chain event_table_chain; + struct { + event_table_t event_tablep; /* ptr to event table entry */ + etap_time_t start_hold_time; /* Time of last acquistion */ + } s; + } u; +#endif /* ETAP_LOCK_TRACE */ +#if ETAP_LOCK_ACCUMULATE + cbuff_entry_t cbuff_entry; /* cumulative buffer entry */ +#endif /* ETAP_LOCK_ACCUMULATE */ +#if ETAP_LOCK_MONITOR + vm_offset_t start_pc; /* pc where lock operation began */ + vm_offset_t end_pc; /* pc where lock operation ended */ +#endif /* ETAP_LOCK_MONITOR */ +} mutex_t; + +#define decl_mutex_data(class,name) class mutex_t name; +#define mutex_addr(m) (&(m)) + +#if MACH_LDEBUG +#define mutex_held(m) (hw_lock_held(&((m)->locked)) && \ + ((m)->thread == (int)current_thread())) +#else /* MACH_LDEBUG */ +#define mutex_held(m) hw_lock_held(&((m)->locked)) +#endif /* MACH_LDEBUG */ + +#else /* MACH_KERNEL_PRIVATE */ + +typedef struct __mutex__ mutex_t; +extern boolean_t mutex_held(mutex_t*); + +#endif /* !MACH_KERNEL_PRIVATE */ + +extern mutex_t *mutex_alloc (etap_event_t); +extern void mutex_free (mutex_t*); + +extern void mutex_init (mutex_t*, etap_event_t); +extern void _mutex_lock (mutex_t*); +extern void mutex_unlock (mutex_t*); +extern boolean_t _mutex_try (mutex_t*); + +extern void mutex_lock_wait (mutex_t*); +extern void mutex_unlock_wakeup (mutex_t*); +extern void mutex_pause (void); +extern void interlock_unlock (hw_lock_t); + +/* + * The general lock structure. Provides for multiple readers, + * upgrading from read to write, and sleeping until the lock + * can be gained. + * + * On some architectures, assembly language code in the 'inline' + * program fiddles the lock structures. It must be changed in + * concert with the structure layout. + * + * Only the "interlock" field is used for hardware exclusion; + * other fields are modified with normal instructions after + * acquiring the interlock bit. + */ +#ifdef MACH_KERNEL_PRIVATE +typedef struct { + decl_simple_lock_data(,interlock) /* "hardware" interlock field */ + volatile unsigned int + read_count:16, /* No. of accepted readers */ + want_upgrade:1, /* Read-to-write upgrade waiting */ + want_write:1, /* Writer is waiting, or + locked for write */ + waiting:1, /* Someone is sleeping on lock */ + can_sleep:1; /* Can attempts to lock go to sleep? */ +#if ETAP_LOCK_TRACE + union { /* Must be overlaid on the event_tablep */ + struct event_table_chain event_table_chain; + struct { + event_table_t event_tablep; /* ptr to event table entry */ + start_data_node_t start_list; /* linked list of start times + and pcs */ + } s; + } u; +#endif /* ETAP_LOCK_TRACE */ +#if ETAP_LOCK_ACCUMULATE + cbuff_entry_t cbuff_write; /* write cumulative buffer entry */ + cbuff_entry_t cbuff_read; /* read cumulative buffer entry */ +#endif /* ETAP_LOCK_ACCUMULATE */ +} lock_t; + +/* Sleep locks must work even if no multiprocessing */ + +/* + * Complex lock operations + */ + +#if ETAP +/* + * Locks have a pointer into an event_table entry that names the + * corresponding lock event and controls whether it is being traced. + * Initially this pointer is into a read-only table event_table_init[]. + * Once dynamic allocation becomes possible a modifiable copy of the table + * is allocated and pointers are set to within this copy. The pointers + * that were already in place at that point need to be switched to point + * into the copy. To do this we overlay the event_table_chain structure + * onto sufficiently-big elements of the various lock structures so we + * can sweep down this list switching the pointers. The assumption is + * that we will not want to enable tracing before this is done (which is + * after all during kernel bootstrap, before any user tasks are launched). + * + * This is admittedly rather ugly but so were the alternatives: + * - record the event_table pointers in a statically-allocated array + * (dynamic allocation not yet being available) -- but there were + * over 8000 of them; + * - add a new link field to each lock structure; + * - change pointers to array indices -- this adds quite a bit of + * arithmetic to every lock operation that might be traced. + */ +#define lock_event_table(lockp) ((lockp)->u.s.event_tablep) +#define lock_start_hold_time(lockp) ((lockp)->u.s.start_hold_time) +#endif /* ETAP_LOCK_TRACE */ + +extern void lock_init (lock_t*, + boolean_t, + etap_event_t, + etap_event_t); + +#else /* MACH_KERNEL_PRIVATE */ + +typedef struct __lock__ lock_t; +extern lock_t *lock_alloc(boolean_t, etap_event_t, etap_event_t); +void lock_free(lock_t *); + +#endif /* !MACH_KERNEL_PRIVATE */ + +extern void lock_write (lock_t*); +extern void lock_read (lock_t*); +extern void lock_done (lock_t*); +extern void lock_write_to_read (lock_t*); + +#define lock_read_done(l) lock_done(l) +#define lock_write_done(l) lock_done(l) + +extern boolean_t lock_read_to_write (lock_t*); /* vm_map is only user */ +extern unsigned int LockTimeOut; /* Standard lock timeout value */ + +#endif /* _KERN_LOCK_H_ */ diff --git a/osfmk/kern/lock_mon.c b/osfmk/kern/lock_mon.c new file mode 100644 index 000000000..05cfb54a3 --- /dev/null +++ b/osfmk/kern/lock_mon.c @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.3.19.1 1997/09/22 17:39:46 barbou + * MP+RT: protect cpu_number() usage against preemption. + * [97/09/16 barbou] + * + * Revision 1.3.15.4 1995/02/24 15:20:58 alanl + * DIPC: Merge from nmk17b2 to nmk18b8. + * Notes: major lock cleanup. Change kdb_lock and printf_lock + * references to conform with simple_lock declaration rules. + * This code is broken and non-portable; its functionality + * should be subsumed in the regular lock package. + * [95/01/16 alanl] + * + * Revision 1.3.17.2 1994/11/10 06:13:19 dwm + * mk6 CR764 - s/spinlock/simple_lock/ (name change only) + * [1994/11/10 05:28:52 dwm] + * + * Revision 1.3.17.1 1994/11/04 10:07:54 dwm + * mk6 CR668 - 1.3b26 merge + * This file is obviously UNUSED - hence broken; merged anyway + * * Revision 1.3.4.4 1994/05/06 18:50:11 tmt + * Merge in DEC Alpha changes to osc1.3b19. + * Merge Alpha changes into osc1.312b source code. + * 64bit cleanup. + * * End1.3merge + * [1994/11/04 09:25:58 dwm] + * + * Revision 1.3.15.1 1994/09/23 02:21:48 ezf + * change marker to not FREE + * [1994/09/22 21:34:22 ezf] + * + * Revision 1.3.13.1 1994/06/09 14:11:30 dswartz + * Preemption merge. + * [1994/06/09 14:07:06 dswartz] + * + * Revision 1.3.4.2 1993/06/09 02:36:12 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:13:15 jeffc] + * + * Revision 1.3 1993/04/19 16:26:56 devrcs + * Fix for TIME_STAMP configuration. + * [Patrick Petit ] + * [93/02/11 bernadat] + * + * Revision 1.2 1992/11/25 01:11:05 robert + * integrate changes below for norma_14 + * + * Philippe Bernadat (bernadat) at gr.osf.org + * Moved MACH_MP_DEBUG code to kern/lock.c + * [1992/11/13 19:33:47 robert] + * + * Revision 1.1 1992/09/30 02:09:28 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.1.2.1.3.1 92/02/18 19:08:45 jeffreyh + * Created. Might need some work if used on anything but a 386. + * [92/02/11 07:56:50 bernadat] + */ +/* CMU_ENDHIST */ + +/* + * Mach Operating System + * Copyright (c) 1990 Carnegie-Mellon University + * Copyright (c) 1989 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ + +/* + */ + +/* + * Support For MP Debugging + * if MACH_MP_DEBUG is on, we use alternate locking + * routines do detect dealocks + * Support for MP lock monitoring (MACH_LOCK_MON). + * Registers use of locks, contention. + * Depending on hardware also records time spent with locks held + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +decl_simple_lock_data(extern, kdb_lock) +decl_simple_lock_data(extern, printf_lock) + +#if NCPUS > 1 && MACH_LOCK_MON + +#if TIME_STAMP +extern time_stamp_t time_stamp; +#else TIME_STAMP +typedef unsigned int time_stamp_t; +#define time_stamp 0 +#endif TIME_STAMP + +#define LOCK_INFO_MAX (1024*32) +#define LOCK_INFO_HASH_COUNT 1024 +#define LOCK_INFO_PER_BUCKET (LOCK_INFO_MAX/LOCK_INFO_HASH_COUNT) + + +#define HASH_LOCK(lock) ((long)lock>>5 & (LOCK_INFO_HASH_COUNT-1)) + +struct lock_info { + unsigned int success; + unsigned int fail; + unsigned int masked; + unsigned int stack; + unsigned int time; +#if MACH_SLOCKS + simple_lock_data_t * lock; +#endif + vm_offset_t caller; +}; + +struct lock_info_bucket { + struct lock_info info[LOCK_INFO_PER_BUCKET]; +}; + +struct lock_info_bucket lock_info[LOCK_INFO_HASH_COUNT]; +struct lock_info default_lock_info; +unsigned default_lock_stack = 0; + +extern int curr_ipl[]; + + + +struct lock_info * +locate_lock_info(lock) +simple_lock_data_t ** lock; +{ + struct lock_info *li = &(lock_info[HASH_LOCK(*lock)].info[0]); + register i; + + for (i=0; i < LOCK_INFO_PER_BUCKET; i++, li++) + if (li->lock) { + if (li->lock == *lock) + return(li); + } else { + li->lock = *lock; + li->caller = *((vm_offset_t *)lock - 1); + return(li); + } + db_printf("out of lock_info slots\n"); + li = &default_lock_info; + return(li); +} + + +simple_lock(lock) +decl_simple_lock_data(, *lock) +{ + register struct lock_info *li = locate_lock_info(&lock); + + if (current_thread()) + li->stack = current_thread()->lock_stack++; + mp_disable_preemption(); + if (curr_ipl[cpu_number()]) + li->masked++; + mp_enable_preemption(); + if (_simple_lock_try(lock)) + li->success++; + else { + _simple_lock(lock); + li->fail++; + } + li->time = time_stamp - li->time; +} + +simple_lock_try(lock) +decl_simple_lock_data(, *lock) +{ + register struct lock_info *li = locate_lock_info(&lock); + + mp_disable_preemption(); + if (curr_ipl[cpu_number()]) + li->masked++; + mp_enable_preemption(); + if (_simple_lock_try(lock)) { + li->success++; + li->time = time_stamp - li->time; + if (current_thread()) + li->stack = current_thread()->lock_stack++; + return(1); + } else { + li->fail++; + return(0); + } +} + +simple_unlock(lock) +decl_simple_lock_data(, *lock) +{ + register time_stamp_t stamp = time_stamp; + register time_stamp_t *time = &locate_lock_info(&lock)->time; + register unsigned *lock_stack; + + *time = stamp - *time; + _simple_unlock(lock); + if (current_thread()) { + lock_stack = ¤t_thread()->lock_stack; + if (*lock_stack) + (*lock_stack)--; + } +} + +lip() { + lis(4, 1, 0); +} + +#define lock_info_sort lis + +unsigned scurval, ssum; +struct lock_info *sli; + +lock_info_sort(arg, abs, count) +{ + struct lock_info *li, mean; + int bucket = 0; + int i; + unsigned max_val; + unsigned old_val = (unsigned)-1; + struct lock_info *target_li = &lock_info[0].info[0]; + unsigned sum; + unsigned empty, total; + unsigned curval; + + printf("\nSUCCESS FAIL MASKED STACK TIME LOCK/CALLER\n"); + if (!count) + count = 8 ; + while (count && target_li) { + empty = LOCK_INFO_HASH_COUNT; + target_li = 0; + total = 0; + max_val = 0; + mean.success = 0; + mean.fail = 0; + mean.masked = 0; + mean.stack = 0; + mean.time = 0; + mean.lock = (simple_lock_data_t *) &lock_info; + mean.caller = (vm_offset_t) &lock_info; + for (bucket = 0; bucket < LOCK_INFO_HASH_COUNT; bucket++) { + li = &lock_info[bucket].info[0]; + if (li->lock) + empty--; + for (i= 0; i< LOCK_INFO_PER_BUCKET && li->lock; i++, li++) { + if (li->lock == &kdb_lock || li->lock == &printf_lock) + continue; + total++; + curval = *((int *)li + arg); + sum = li->success + li->fail; + if(!sum && !abs) + continue; + scurval = curval; + ssum = sum; + sli = li; + if (!abs) switch(arg) { + case 0: + break; + case 1: + case 2: + curval = (curval*100) / sum; + break; + case 3: + case 4: + curval = curval / sum; + break; + } + if (curval > max_val && curval < old_val) { + max_val = curval; + target_li = li; + } + if (curval == old_val && count != 0) { + print_lock_info(li); + count--; + } + mean.success += li->success; + mean.fail += li->fail; + mean.masked += li->masked; + mean.stack += li->stack; + mean.time += li->time; + } + } + if (target_li) + old_val = max_val; + } + db_printf("\n%d total locks, %d empty buckets", total, empty ); + if (default_lock_info.success) + db_printf(", default: %d", default_lock_info.success + default_lock_info.fail); + db_printf("\n"); + print_lock_info(&mean); +} + +#define lock_info_clear lic + +lock_info_clear() +{ + struct lock_info *li; + int bucket = 0; + int i; + for (bucket = 0; bucket < LOCK_INFO_HASH_COUNT; bucket++) { + li = &lock_info[bucket].info[0]; + for (i= 0; i< LOCK_INFO_PER_BUCKET; i++, li++) { + bzero(li, sizeof(struct lock_info)); + } + } + bzero(&default_lock_info, sizeof(struct lock_info)); +} + +print_lock_info(li) +struct lock_info *li; +{ + int off; + int sum = li->success + li->fail; + db_printf("%d %d/%d %d/%d %d/%d %d/%d ", li->success, + li->fail, (li->fail*100)/sum, + li->masked, (li->masked*100)/sum, + li->stack, li->stack/sum, + li->time, li->time/sum); + db_search_symbol(li->lock, 0, &off); + if (off < 1024) + db_printsym(li->lock, 0); + else { + db_printsym(li->caller, 0); + db_printf("(%X)", li->lock); + } + db_printf("\n"); +} + +#endif NCPUS > 1 && MACH_LOCK_MON + +#if TIME_STAMP + +/* + * Measure lock/unlock operations + */ + +time_lock(loops) +{ + decl_simple_lock_data(, lock) + register time_stamp_t stamp; + register int i; + + + if (!loops) + loops = 1000; + simple_lock_init(&lock); + stamp = time_stamp; + for (i = 0; i < loops; i++) { + simple_lock(&lock); + simple_unlock(&lock); + } + stamp = time_stamp - stamp; + db_printf("%d stamps for simple_locks\n", stamp/loops); +#if MACH_LOCK_MON + stamp = time_stamp; + for (i = 0; i < loops; i++) { + _simple_lock(&lock); + _simple_unlock(&lock); + } + stamp = time_stamp - stamp; + db_printf("%d stamps for _simple_locks\n", stamp/loops); +#endif MACH_LOCK_MON +} +#endif TIME_STAMP + + + + + diff --git a/osfmk/kern/mach_clock.c b/osfmk/kern/mach_clock.c new file mode 100644 index 000000000..09cba548c --- /dev/null +++ b/osfmk/kern/mach_clock.c @@ -0,0 +1,264 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: clock_prim.c + * Author: Avadis Tevanian, Jr. + * Date: 1986 + * + * Clock primitives. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* kernel_map */ +#include /* HZ */ + +#include +#include +#include + +#include + +#if STAT_TIME +#define TICKBUMP(t) timer_bump(t, (1000000/HZ)) +#else +#define TICKBUMP(t) +#endif + +boolean_t profile_kernel_services = TRUE; /* Indicates wether or not we + * account kernel services + * samples for user task */ + +/* + * Hertz rate clock interrupt servicing. Primarily used to + * update CPU statistics, recompute thread priority, and to + * do profiling + */ +void +hertz_tick( + boolean_t usermode, /* executing user code */ + natural_t pc) +{ + thread_act_t thr_act; + register int my_cpu; + register thread_t thread = current_thread(); + int state; +#if MACH_PROF +#ifdef __MACHO__ +#define ETEXT etext + extern long etext; +#else +#define ETEXT &etext + extern char etext; +#endif + boolean_t inkernel; +#endif /* MACH_PROF */ +#if GPROF + struct profile_vars *pv; + prof_uptrint_t s; +#endif + +#ifdef lint + pc++; +#endif /* lint */ + + mp_disable_preemption(); + my_cpu = cpu_number(); + + /* + * The system startup sequence initializes the clock + * before kicking off threads. So it's possible, + * especially when debugging, to wind up here with + * no thread to bill against. So ignore the tick. + */ + if (thread == THREAD_NULL) { + mp_enable_preemption(); + return; + } + +#if MACH_PROF + inkernel = !usermode && (pc < (unsigned int)ETEXT); +#endif /* MACH_PROF */ + + /* + * Hertz processing performed by all processors + * includes statistics gathering, state tracking, + * and quantum updating. + */ + counter(c_clock_ticks++); + +#if GPROF + pv = PROFILE_VARS(my_cpu); +#endif + + if (usermode) { + TICKBUMP(&thread->user_timer); +#if 0 + if (thread->priority < BASEPRI_DEFAULT) + state = CPU_STATE_NICE; + else +#endif + state = CPU_STATE_USER; +#if GPROF + if (pv->active) + PROF_CNT_INC(pv->stats.user_ticks); +#endif + } + else { + switch(processor_ptr[my_cpu]->state) { + + case PROCESSOR_IDLE: + TICKBUMP(&thread->system_timer); + state = CPU_STATE_IDLE; + break; + + default: + TICKBUMP(&thread->system_timer); + state = CPU_STATE_SYSTEM; + break; + } +#if GPROF + if (pv->active) { + if (state == CPU_STATE_SYSTEM) + PROF_CNT_INC(pv->stats.kernel_ticks); + else + PROF_CNT_INC(pv->stats.idle_ticks); + + if ((prof_uptrint_t)pc < _profile_vars.profil_info.lowpc) + PROF_CNT_INC(pv->stats.too_low); + else { + s = (prof_uptrint_t)pc - _profile_vars.profil_info.lowpc; + if (s < pv->profil_info.text_len) { + LHISTCOUNTER *ptr = (LHISTCOUNTER *) pv->profil_buf; + LPROF_CNT_INC(ptr[s / HISTFRACTION]); + } + else + PROF_CNT_INC(pv->stats.too_high); + } + } +#endif + } + + machine_slot[my_cpu].cpu_ticks[state]++; + thread_quantum_update(my_cpu, thread, 1, state); + + /* + * Hertz processing performed by the master-cpu + * exclusively. + */ + if (my_cpu == master_cpu) { +#ifdef MACH_BSD + { + extern void bsd_hardclock( + boolean_t usermode, + natural_t pc, + int ticks); + + bsd_hardclock(usermode, pc, 1); + } +#endif /* MACH_BSD */ + } + +#if MACH_PROF + thr_act = thread->top_act; + if (thr_act->act_profiled) { + if (inkernel && thr_act->map != kernel_map) { + /* + * Non-kernel thread running in kernel + * Register user pc (mach_msg, vm_allocate ...) + */ + if (profile_kernel_services) + profile(user_pc(thr_act), thr_act->profil_buffer); + } + else + /* + * User thread and user mode or + * user (server) thread in kernel-loaded server or + * kernel thread and kernel mode + * register interrupted pc + */ + profile(pc, thr_act->profil_buffer); + } + if (kernel_task->task_profiled) { + if (inkernel && thr_act->map != kernel_map) + /* + * User thread not profiled in kernel mode, + * kernel task profiled, register kernel pc + * for kernel task + */ + profile(pc, kernel_task->profil_buffer); + } +#endif /* MACH_PROF */ + mp_enable_preemption(); +} diff --git a/osfmk/kern/mach_factor.c b/osfmk/kern/mach_factor.c new file mode 100644 index 000000000..eca59d7c6 --- /dev/null +++ b/osfmk/kern/mach_factor.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/mach_factor.c + * Author: Avadis Tevanian, Jr. + * Date: 1986 + * + * Compute the Mach Factor. + */ + +#include + +#include +#include +#include +#include +#include +#include +#if MACH_KERNEL +#include +#include +#endif /* MACH_KERNEL */ + +integer_t avenrun[3] = {0, 0, 0}; +integer_t mach_factor[3] = {0, 0, 0}; + +/* + * Values are scaled by LOAD_SCALE, defined in processor_info.h + */ +static long fract[3] = { + 800, /* (4.0/5.0) 5 second average */ + 966, /* (29.0/30.0) 30 second average */ + 983, /* (59.0/60.) 1 minute average */ +}; + +void +compute_mach_factor(void) +{ + register processor_set_t pset; + register processor_t processor; + register int ncpus; + register int nthreads; + register long factor_now = 0L; + register long average_now = 0L; + register long load_now = 0L; + + pset = &default_pset; + simple_lock(&pset->processors_lock); + if ((ncpus = pset->processor_count) > 0) { + /* + * Count number of threads. + */ + nthreads = pset->runq.count; + processor = (processor_t)queue_first(&pset->processors); + while (!queue_end(&pset->processors, (queue_entry_t)processor)) { + nthreads += processor->runq.count; + + processor = (processor_t)queue_next(&processor->processors); + } + + /* + * account for threads on cpus. + */ + nthreads += ncpus - pset->idle_count; + + /* + * The current thread (running this calculation) + * doesn't count; it's always in the default pset. + */ + if (pset == &default_pset) + nthreads -= 1; + + if (nthreads >= ncpus) + factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1); + else + factor_now = (ncpus - nthreads) * LOAD_SCALE; + + if (nthreads > ncpus) + load_now = (nthreads << SCHED_SHIFT) / ncpus; + else + load_now = 0; + + /* + * Load average and mach factor calculations for + * those that ask about these things. + */ + + average_now = (nthreads * LOAD_SCALE) / ncpus; + + pset->mach_factor = ((pset->mach_factor << 2) + factor_now) / 5; + pset->load_average = ((pset->load_average << 2) + average_now) / 5; + + /* + * sched_load is the only thing used by scheduler. + */ + pset->sched_load = (pset->sched_load + load_now) >> 1; + } + else { + pset->mach_factor = pset->load_average = 0; + pset->sched_load = 0; + } + + simple_unlock(&pset->processors_lock); + + /* + * And some ugly stuff to keep w happy. + */ + { + register int i; + + for (i = 0; i < 3; i++) { + mach_factor[i] = ((mach_factor[i] * fract[i]) + + (factor_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE; + + avenrun[i] = ((avenrun[i] * fract[i]) + + (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE; + } + } +} diff --git a/osfmk/kern/mach_param.h b/osfmk/kern/mach_param.h new file mode 100644 index 000000000..ce2e9d27c --- /dev/null +++ b/osfmk/kern/mach_param.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.9.1 1994/09/23 02:22:28 ezf + * change marker to not FREE + * [1994/09/22 21:34:35 ezf] + * + * Revision 1.1.7.1 1994/01/12 17:54:33 dwm + * Coloc: initial restructuring to follow Utah model. + * added various maxima for act/thread_pool zones + * [1994/01/12 17:29:08 dwm] + * + * Revision 1.1.3.3 1993/06/07 22:13:58 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:06:04 jeffc] + * + * Revision 1.1.3.2 1993/06/02 23:38:46 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:13:30 jeffc] + * + * Revision 1.1 1992/09/30 02:29:52 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5.2.1 92/03/03 16:20:11 jeffreyh + * 19-Feb-92 David L. Black (dlb) at Open Software Foundation + * Double object slop in PORT_MAX, allow for extra (non-task) + * ipc spaces (e.g. ipc_space_remote) in SPACE_MAX + * [92/02/26 11:54:50 jeffreyh] + * + * Revision 2.5 91/05/14 16:44:25 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:27:56 mrt + * Changed to new Mach copyright + * [91/02/01 16:15:07 mrt] + * + * Revision 2.3 90/06/02 14:55:13 rpd + * Added new IPC parameters. + * [90/03/26 22:11:55 rpd] + * + * + * Condensed history: + * Moved TASK_MAX, PORT_MAX, etc. here from mach/mach_param.h (rpd). + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/mach_param.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1986 + * + * Mach system sizing parameters + * + */ + +#ifndef _KERN_MACH_PARAM_H_ +#define _KERN_MACH_PARAM_H_ + +#define THREAD_MAX 1024 /* Max number of threads */ +#define THREAD_CHUNK 64 /* Allocation chunk */ + +#define TASK_MAX 1024 /* Max number of tasks */ +#define TASK_CHUNK 64 /* Allocation chunk */ + +#define ACT_MAX 1024 /* Max number of acts */ +#define ACT_CHUNK 64 /* Allocation chunk */ + +#define THREAD_POOL_MAX 1024 /* Max number of thread_pools */ +#define THREAD_POOL_CHUNK 64 /* Allocation chunk */ + +#define PORT_MAX ((TASK_MAX * 3 + THREAD_MAX) /* kernel */ \ + + (THREAD_MAX * 2) /* user */ \ + + 40000) /* slop for objects */ + /* Number of ports, system-wide */ + +#define SET_MAX (TASK_MAX + THREAD_MAX + 200) + /* Max number of port sets */ + +#define ITE_MAX (1 << 16) /* Max number of splay tree entries */ + +#define SPACE_MAX (TASK_MAX + 5) /* Max number of IPC spaces */ + +#define SEMAPHORE_MAX (PORT_MAX >> 1) /* Maximum number of semaphores */ + +#endif /* _KERN_MACH_PARAM_H_ */ diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c new file mode 100644 index 000000000..35dcb1522 --- /dev/null +++ b/osfmk/kern/machine.c @@ -0,0 +1,506 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/machine.c + * Author: Avadis Tevanian, Jr. + * Date: 1987 + * + * Support for machine independent machine abstraction. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include /*** ??? fix so this can be removed ***/ + +/* + * Exported variables: + */ + +struct machine_info machine_info; +struct machine_slot machine_slot[NCPUS]; + +static queue_head_t processor_action_queue; +static boolean_t processor_action_active; +static thread_call_t processor_action_call; +static thread_call_data_t processor_action_call_data; +decl_simple_lock_data(static,processor_action_lock) + +thread_t machine_wake_thread; + +/* Forwards */ +processor_set_t processor_request_action( + processor_t processor, + processor_set_t new_pset); + +void processor_doaction( + processor_t processor); + +void processor_doshutdown( + processor_t processor); + +/* + * cpu_up: + * + * Flag specified cpu as up and running. Called when a processor comes + * online. + */ +void +cpu_up( + int cpu) +{ + processor_t processor = cpu_to_processor(cpu); + struct machine_slot *ms; + spl_t s; + + /* + * Just twiddle our thumbs; we've got nothing better to do + * yet, anyway. + */ + while (!simple_lock_try(&default_pset.processors_lock)) + continue; + + s = splsched(); + processor_lock(processor); + init_ast_check(processor); + ms = &machine_slot[cpu]; + ms->running = TRUE; + machine_info.avail_cpus++; + pset_add_processor(&default_pset, processor); + processor->state = PROCESSOR_RUNNING; + processor_unlock(processor); + splx(s); + + simple_unlock(&default_pset.processors_lock); +} + +/* + * cpu_down: + * + * Flag specified cpu as down. Called when a processor is about to + * go offline. + */ +void +cpu_down( + int cpu) +{ + processor_t processor; + struct machine_slot *ms; + spl_t s; + + processor = cpu_to_processor(cpu); + + s = splsched(); + processor_lock(processor); + ms = &machine_slot[cpu]; + ms->running = FALSE; + machine_info.avail_cpus--; + /* + * processor has already been removed from pset. + */ + processor->processor_set_next = PROCESSOR_SET_NULL; + processor->state = PROCESSOR_OFF_LINE; + processor_unlock(processor); + splx(s); +} + +kern_return_t +host_reboot( + host_priv_t host_priv, + int options) +{ + if (host_priv == HOST_PRIV_NULL) + return (KERN_INVALID_HOST); + + assert(host_priv == &realhost); + + if (options & HOST_REBOOT_DEBUGGER) { + Debugger("Debugger"); + } + else + halt_all_cpus(!(options & HOST_REBOOT_HALT)); + + return (KERN_SUCCESS); +} + +/* + * processor_request_action: + * + * Common internals of processor_assign and processor_shutdown. + * If new_pset is null, this is a shutdown, else it's an assign + * and caller must donate a reference. + * For assign operations, it returns an old pset that must be deallocated + * if it's not NULL. + * For shutdown operations, it always returns PROCESSOR_SET_NULL. + */ +processor_set_t +processor_request_action( + processor_t processor, + processor_set_t new_pset) +{ + processor_set_t pset, old_next_pset; + + /* + * Processor must be in a processor set. Must lock its idle lock to + * get at processor state. + */ + pset = processor->processor_set; + simple_lock(&pset->idle_lock); + + /* + * If the processor is dispatching, let it finish - it will set its + * state to running very soon. + */ + while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { + simple_unlock(&pset->idle_lock); + simple_lock(&pset->idle_lock); + } + + /* + * Now lock the action queue and do the dirty work. + */ + simple_lock(&processor_action_lock); + + switch (processor->state) { + + case PROCESSOR_IDLE: + /* + * Remove from idle queue. + */ + queue_remove(&pset->idle_queue, processor, + processor_t, processor_queue); + pset->idle_count--; + + /* fall through ... */ + case PROCESSOR_RUNNING: + /* + * Put it on the action queue. + */ + queue_enter(&processor_action_queue, processor, + processor_t,processor_queue); + + /* Fall through ... */ + case PROCESSOR_ASSIGN: + /* + * And ask the action_thread to do the work. + */ + + if (new_pset == PROCESSOR_SET_NULL) { + processor->state = PROCESSOR_SHUTDOWN; + old_next_pset = PROCESSOR_SET_NULL; + } else { + processor->state = PROCESSOR_ASSIGN; + old_next_pset = processor->processor_set_next; + processor->processor_set_next = new_pset; + } + break; + + default: + printf("state: %d\n", processor->state); + panic("processor_request_action: bad state"); + } + + if (processor_action_active == FALSE) { + processor_action_active = TRUE; + simple_unlock(&processor_action_lock); + simple_unlock(&pset->idle_lock); + processor_unlock(processor); + thread_call_enter(processor_action_call); + processor_lock(processor); + } else { + simple_unlock(&processor_action_lock); + simple_unlock(&pset->idle_lock); + } + + return (old_next_pset); +} + +kern_return_t +processor_assign( + processor_t processor, + processor_set_t new_pset, + boolean_t wait) +{ +#ifdef lint + processor++; new_pset++; wait++; +#endif /* lint */ + return (KERN_FAILURE); +} + +/* + * processor_shutdown() queues a processor up for shutdown. + * Any assignment in progress is overriden. + */ +kern_return_t +processor_shutdown( + processor_t processor) +{ + spl_t s; + + s = splsched(); + processor_lock(processor); + if ((processor->state == PROCESSOR_OFF_LINE) || + (processor->state == PROCESSOR_SHUTDOWN)) { + /* + * Already shutdown or being shutdown -- nothing to do. + */ + processor_unlock(processor); + splx(s); + + return (KERN_SUCCESS); + } + + (void) processor_request_action(processor, PROCESSOR_SET_NULL); + + assert_wait((event_t)processor, THREAD_UNINT); + + processor_unlock(processor); + splx(s); + + thread_block((void (*)(void)) 0); + + return (KERN_SUCCESS); +} + +/* + * processor_action() shuts down processors or changes their assignment. + */ +static void +_processor_action( + thread_call_param_t p0, + thread_call_param_t p1) +{ + register processor_t processor; + spl_t s; + + s = splsched(); + simple_lock(&processor_action_lock); + + while (!queue_empty(&processor_action_queue)) { + processor = (processor_t) queue_first(&processor_action_queue); + queue_remove(&processor_action_queue, processor, + processor_t, processor_queue); + simple_unlock(&processor_action_lock); + splx(s); + + processor_doaction(processor); + + s = splsched(); + simple_lock(&processor_action_lock); + } + + processor_action_active = FALSE; + simple_unlock(&processor_action_lock); + splx(s); +} + +void +processor_action(void) +{ + queue_init(&processor_action_queue); + simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION); + processor_action_active = FALSE; + + thread_call_setup(&processor_action_call_data, _processor_action, NULL); + processor_action_call = &processor_action_call_data; +} + +/* + * processor_doaction actually does the shutdown. The trick here + * is to schedule ourselves onto a cpu and then save our + * context back into the runqs before taking out the cpu. + */ +void +processor_doaction( + processor_t processor) +{ + thread_t self = current_thread(); + processor_set_t pset; + thread_t old_thread; + spl_t s; + + /* + * Get onto the processor to shutdown + */ + thread_bind(self, processor); + thread_block((void (*)(void)) 0); + + pset = processor->processor_set; + simple_lock(&pset->processors_lock); + + if (pset->processor_count == 1) { + thread_t thread; + sched_policy_t *policy; + extern void start_cpu_thread(void); + + simple_unlock(&pset->processors_lock); + + /* + * Create the thread, and point it at the routine. + */ + thread = kernel_thread_with_priority(kernel_task, MAXPRI_KERNBAND, + start_cpu_thread, FALSE); + + disable_preemption(); + + s = splsched(); + thread_lock(thread); + thread->state |= TH_RUN; + policy = &sched_policy[thread->policy]; + (void)policy->sp_ops.sp_thread_unblock(policy, thread); + (void)rem_runq(thread); + machine_wake_thread = thread; + thread_unlock(thread); + splx(s); + + simple_lock(&pset->processors_lock); + enable_preemption(); + } + + s = splsched(); + processor_lock(processor); + + /* + * Do shutdown, make sure we live when processor dies. + */ + if (processor->state != PROCESSOR_SHUTDOWN) { + panic("action_thread -- bad processor state"); + } + + pset_remove_processor(pset, processor); + processor_unlock(processor); + simple_unlock(&pset->processors_lock); + + /* + * Clean up. + */ + thread_bind(self, PROCESSOR_NULL); + self->continuation = 0; + old_thread = switch_to_shutdown_context(self, + processor_doshutdown, processor); + thread_dispatch(old_thread); + thread_wakeup((event_t)processor); + splx(s); +} + +/* + * Actually do the processor shutdown. This is called at splsched, + * running on the processor's shutdown stack. + */ + +void +processor_doshutdown( + processor_t processor) +{ + register int cpu = processor->slot_num; + + thread_dispatch(current_thread()); + timer_switch(&kernel_timer[cpu]); + + /* + * OK, now exit this cpu. + */ + PMAP_DEACTIVATE_KERNEL(cpu); + cpu_data[cpu].active_thread = THREAD_NULL; + active_kloaded[cpu] = THR_ACT_NULL; + cpu_down(cpu); + cpu_sleep(); + panic("zombie processor"); + /*NOTREACHED*/ +} + +kern_return_t +host_get_boot_info( + host_priv_t host_priv, + kernel_boot_info_t boot_info) +{ + char *src = ""; + extern char *machine_boot_info( + kernel_boot_info_t boot_info, + vm_size_t buf_len); + + if (host_priv == HOST_PRIV_NULL) + return (KERN_INVALID_HOST); + + assert(host_priv == &realhost); + + /* + * Copy first operator string terminated by '\0' followed by + * standardized strings generated from boot string. + */ + src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX); + if (src != boot_info) + (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX); + + return (KERN_SUCCESS); +} diff --git a/osfmk/kern/machine.h b/osfmk/kern/machine.h new file mode 100644 index 000000000..5c3f211f4 --- /dev/null +++ b/osfmk/kern/machine.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _KERN_MACHINE_H_ +#define _KERN_MACHINE_H_ + +#include +#include +#include +#include + +/* + * Machine support declarations. + */ + +extern thread_t machine_wake_thread; + +extern void processor_action(void); + +extern void cpu_down( + int cpu); + +extern void cpu_up( + int cpu); + +/* + * Must be implemented in machine dependent code. + */ + +/* Initialize machine dependent ast code */ +extern void init_ast_check( + processor_t processor); + +/* Cause check for ast */ +extern void cause_ast_check( + processor_t processor); + +extern kern_return_t cpu_start( + int slot_num); + +extern kern_return_t cpu_control( + int slot_num, + processor_info_t info, + unsigned int count); + +extern thread_t switch_to_shutdown_context( + thread_t thread, + void (*doshutdown)(processor_t), + processor_t processor); + +extern kern_return_t cpu_signal( /* Signal the target CPU */ + int target, + int signal, + unsigned int p1, + unsigned int p2); + +#endif /* _KERN_MACHINE_H_ */ diff --git a/osfmk/kern/macro_help.h b/osfmk/kern/macro_help.h new file mode 100644 index 000000000..be1a48e14 --- /dev/null +++ b/osfmk/kern/macro_help.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 02:23:07 ezf + * change marker to not FREE + * [1994/09/22 21:34:48 ezf] + * + * Revision 1.1.2.3 1993/06/07 22:14:06 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:06:13 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:39:03 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:13:38 jeffc] + * + * Revision 1.1 1992/09/30 02:29:53 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 16:44:49 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:28:09 mrt + * Changed to new Mach copyright + * [91/02/01 16:15:31 mrt] + * + * Revision 2.1 89/08/03 15:53:45 rwd + * Created. + * + * Revision 2.2 88/10/18 03:36:20 mwyoung + * Added a form of return that can be used within macros that + * does not result in "statement not reached" noise. + * [88/10/17 mwyoung] + * + * Add MACRO_BEGIN, MACRO_END. + * [88/10/11 mwyoung] + * + * Created. + * [88/10/08 mwyoung] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/macro_help.h + * + * Provide help in making lint-free macro routines + * + */ + +#ifndef _KERN_MACRO_HELP_H_ +#define _KERN_MACRO_HELP_H_ + +#include + +#ifdef lint +boolean_t NEVER; +boolean_t ALWAYS; +#else /* lint */ +#define NEVER FALSE +#define ALWAYS TRUE +#endif /* lint */ + +#define MACRO_BEGIN do { +#define MACRO_END } while (NEVER) + +#define MACRO_RETURN if (ALWAYS) return + +#endif /* _KERN_MACRO_HELP_H_ */ diff --git a/osfmk/kern/misc_protos.h b/osfmk/kern/misc_protos.h new file mode 100644 index 000000000..329bc4db7 --- /dev/null +++ b/osfmk/kern/misc_protos.h @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _MISC_PROTOS_H_ +#define _MISC_PROTOS_H_ + +#include +#include +#include +#include +#include +#include +#include + +/* Set a bit in a bit array */ +extern void setbit( + int which, + int *bitmap); + +/* Clear a bit in a bit array */ +extern void clrbit( + int which, + int *bitmap); + +/* Find the first set bit in a bit array */ +extern int ffsbit( + int *bitmap); +extern int ffs( + unsigned int mask); + +/* + * Test if indicated bit is set in bit string. + */ +extern int testbit( + int which, + int *bitmap); + +/* Move arbitrarily-aligned data from one array to another */ +extern void bcopy( + const char *from, + char *to, + vm_size_t nbytes); + +/* Move overlapping, arbitrarily aligned data from one array to another */ +/* Not present on all ports */ +extern void ovbcopy( + const char *from, + char *to, + vm_size_t nbytes); + +extern int bcmp( + const char *a, + const char *b, + vm_size_t len); + +/* Zero an arbitrarily aligned array */ +extern void bzero( + char *from, + vm_size_t nbytes); + +/* Move arbitrarily-aligned data from a user space to kernel space */ +extern boolean_t copyin( + const char *user_addr, + char *kernel_addr, + vm_size_t nbytes); + +/* Move a NUL-terminated string from a user space to kernel space */ +extern boolean_t copyinstr( + const char *user_addr, + char *kernel_addr, + vm_size_t max, + vm_size_t *actual); + +/* Move arbitrarily-aligned data from a user space to kernel space */ +extern boolean_t copyinmsg( + const char *user_addr, + char *kernel_addr, + mach_msg_size_t nbytes); + +/* Move arbitrarily-aligned data from a kernel space to user space */ +extern boolean_t copyout( + const char *kernel_addr, + char *user_addr, + vm_size_t nbytes); + +/* Move arbitrarily-aligned data from a kernel space to user space */ +extern boolean_t copyoutmsg( + const char *kernel_addr, + char *user_addr, + mach_msg_size_t nbytes); + +extern int sscanf(const char *input, const char *fmt, ...); + +extern integer_t sprintf(char *buf, const char *fmt, ...); + +extern void printf(const char *format, ...); + +extern void printf_init(void); + +extern void panic(const char *string, ...); + +extern void panic_init(void); + +extern void log(int level, char *fmt, ...); + +void +_doprnt( + register const char *fmt, + va_list *argp, + void (*putc)(char), + int radix); + +extern void safe_gets( + char *str, + int maxlen); + +extern void cnputcusr(char); + +extern void cnputc(char); + +extern int cngetc(void); + +extern int cnmaygetc(void); + +extern int _setjmp( + jmp_buf_t *jmp_buf); + +extern int _longjmp( + jmp_buf_t *jmp_buf, + int value); + +extern void bootstrap_create(void); + +extern void halt_cpu(void); + +extern void halt_all_cpus( + boolean_t reboot); + +extern void Debugger( + const char * message); + +extern void delay( + int n); + +extern char *machine_boot_info( + char *buf, + vm_size_t buf_len); + +/* + * Machine-dependent routine to fill in an array with up to callstack_max + * levels of return pc information. + */ +extern void machine_callstack( + natural_t *buf, + vm_size_t callstack_max); + +extern void consider_machine_collect(void); + +extern void norma_bootstrap(void); + +#if DIPC +extern boolean_t no_bootstrap_task(void); +extern ipc_port_t get_root_master_device_port(void); +#endif /* DIPC */ + +#endif /* _MISC_PROTOS_H_ */ diff --git a/osfmk/kern/mk_sp.c b/osfmk/kern/mk_sp.c new file mode 100644 index 000000000..2ba137750 --- /dev/null +++ b/osfmk/kern/mk_sp.c @@ -0,0 +1,1280 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +/*** + *** ??? The following lines were picked up when code was incorporated + *** into this file from `kern/syscall_subr.c.' These should be moved + *** with the code if it moves again. Otherwise, they should be trimmed, + *** based on the files included above. + ***/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/*** + *** ??? End of lines picked up when code was incorporated + *** into this file from `kern/syscall_subr.c.' + ***/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Forwards */ +void _mk_sp_thread_depress_priority( + sf_object_t policy, + mach_msg_timeout_t depress_time); + +/*** + *** ??? The next two files supply the prototypes for `thread_set_policy()' + *** and `thread_policy.' These routines cannot stay here if they are + *** exported Mach system calls. + ***/ +#include +#include + +/* + * Vector containing standard scheduling policy operations + */ +sp_ops_t mk_sp_ops = { + _mk_sp_thread_update_mpri, + _mk_sp_thread_unblock, + _mk_sp_thread_done, + _mk_sp_thread_begin, + _mk_sp_thread_dispatch, + _mk_sp_thread_attach, + _mk_sp_thread_detach, + _mk_sp_thread_processor, + _mk_sp_thread_processor_set, + _mk_sp_thread_setup, + _mk_sp_swtch_pri, + _mk_sp_thread_switch, + _mk_sp_thread_depress_abort, + _mk_sp_thread_depress_timeout, + _mk_sp_thread_runnable, +}; + +/* Forwards */ +kern_return_t thread_policy_common( + thread_t thread, + int policy, + int data, + processor_set_t pset); + +/* + * Standard operations for MK Scheduling Policy + */ + +sf_return_t +_mk_sp_thread_update_mpri( + sf_object_t policy, + thread_t thread) +{ + if (thread->sched_stamp != sched_tick) + update_priority(thread); + + return(SF_SUCCESS); +} + +sf_return_t +_mk_sp_thread_unblock( + sf_object_t policy, + thread_t thread) +{ + /* indicate thread is now runnable */ + thread->sp_state = MK_SP_RUNNABLE; + + /* place thread at end of appropriate run queue */ + if (!(thread->state&TH_IDLE)) + thread_setrun(thread, TRUE, TAIL_Q); + + return(SF_SUCCESS); +} + +sf_return_t +_mk_sp_thread_done( + sf_object_t policy, + thread_t old_thread) +{ + processor_t myprocessor = cpu_to_processor(cpu_number()); + + /* + * A running thread is being taken off a processor: + * + * - update the thread's `unconsumed_quantum' field + * - update the thread's state field + */ + + old_thread->unconsumed_quantum = myprocessor->quantum; + + if (old_thread->state & TH_WAIT) + old_thread->sp_state = MK_SP_BLOCKED; + + return(SF_SUCCESS); +} + +sf_return_t +_mk_sp_thread_begin( + sf_object_t policy, + thread_t thread) +{ + + processor_t myprocessor = cpu_to_processor(cpu_number()); + processor_set_t pset; + + pset = myprocessor->processor_set; + /* + * The designated thread is about to begin execution: + * + * - update the processor's `quantum' field + */ + /* check for legal thread state */ + assert(thread->sp_state == MK_SP_RUNNABLE); + + if (thread->policy & (POLICY_RR|POLICY_FIFO)) + myprocessor->quantum = thread->unconsumed_quantum; + else + myprocessor->quantum = (thread->bound_processor ? + min_quantum : pset->set_quantum); + + return(SF_SUCCESS); +} + +sf_return_t +_mk_sp_thread_dispatch( + sf_object_t policy, + thread_t old_thread) +{ + if (old_thread->sp_state & MK_SP_RUNNABLE) { + if (old_thread->reason & AST_QUANTUM) { + thread_setrun(old_thread, FALSE, TAIL_Q); + old_thread->unconsumed_quantum = min_quantum; + } + else + thread_setrun(old_thread, FALSE, HEAD_Q); + } + + if (old_thread->sp_state & MK_SP_ATTACHED) { + /* indicate thread is now runnable */ + old_thread->sp_state = MK_SP_RUNNABLE; + + /* place thread at end of appropriate run queue */ + thread_setrun(old_thread, FALSE, TAIL_Q); + } + + return(SF_SUCCESS); +} + +/* + * Thread must already be locked. + */ +sf_return_t +_mk_sp_thread_attach( + sf_object_t policy, + thread_t thread) +{ + thread->sp_state = MK_SP_ATTACHED; + + thread->max_priority = thread->priority = BASEPRI_DEFAULT; + thread->depress_priority = -1; + + thread->cpu_usage = 0; + thread->sched_usage = 0; + thread->sched_stamp = 0; + + thread->unconsumed_quantum = min_quantum; + + /* Reflect this policy in thread data structure */ + thread->policy = policy->policy_id; + + return(SF_SUCCESS); +} + +/* + * Check to make sure that thread is removed from run + * queues and active execution; and clear pending + * priority depression. + * + * Thread must already be locked. + */ +sf_return_t +_mk_sp_thread_detach( + sf_object_t policy, + thread_t thread) +{ + struct run_queue *rq; + + assert(thread->policy == policy->policy_id); + + /* make sure that the thread is no longer on any run queue */ + if (thread->runq != RUN_QUEUE_NULL) { + rq = rem_runq(thread); + if (rq == RUN_QUEUE_NULL) { + panic("mk_sp_thread_detach: missed thread"); + } + } + + /* clear pending priority depression */ + + if (thread->depress_priority >= 0) { + thread->priority = thread->depress_priority; + thread->depress_priority = -1; + if (thread_call_cancel(&thread->depress_timer)) + thread_call_enter(&thread->depress_timer); + } + + /* clear the thread's policy field */ + thread->policy = POLICY_NULL; + + return(SF_SUCCESS); +} + +sf_return_t +_mk_sp_thread_processor( + sf_object_t policy, + thread_t *thread, + processor_t processor) +{ + return(SF_FAILURE); +} + +sf_return_t +_mk_sp_thread_processor_set( + sf_object_t policy, + thread_t thread, + processor_set_t processor_set) +{ + pset_add_thread(processor_set, thread); + + return(SF_SUCCESS); +} + +sf_return_t +_mk_sp_thread_setup( + sf_object_t policy, + thread_t thread) +{ + /* + * Determine thread's state. (It may be an "older" thread + * that has just been associated with this policy.) + */ + if (thread->state & TH_WAIT) + thread->sp_state = MK_SP_BLOCKED; + + /* recompute priority */ + thread->sched_stamp = sched_tick; + compute_priority(thread, TRUE); + + return(SF_SUCCESS); +} + +/* + * thread_priority_internal: + * + * Kernel-internal work function for thread_priority(). Called + * with thread "properly locked" to ensure synchrony with RPC + * (see act_lock_thread()). + */ +kern_return_t +thread_priority_internal( + thread_t thread, + int priority) +{ + kern_return_t result = KERN_SUCCESS; + spl_t s; + + s = splsched(); + thread_lock(thread); + + /* + * Check for violation of max priority + */ + if (priority > thread->max_priority) + priority = thread->max_priority; + + /* + * Set priorities. If a depression is in progress, + * change the priority to restore. + */ + if (thread->depress_priority >= 0) + thread->depress_priority = priority; + else { + thread->priority = priority; + compute_priority(thread, TRUE); + + /* + * If the current thread has changed its + * priority let the ast code decide whether + * a different thread should run. + */ + if (thread == current_thread()) + ast_on(AST_BLOCK); + } + + thread_unlock(thread); + splx(s); + + return (result); +} + +/* + * thread_policy_common: + * + * Set scheduling policy for thread. If pset == PROCESSOR_SET_NULL, + * policy will be checked to make sure it is enabled. + */ +kern_return_t +thread_policy_common( + thread_t thread, + integer_t policy, + integer_t data, + processor_set_t pset) +{ + kern_return_t result = KERN_SUCCESS; + register int temp; + spl_t s; + + if ( thread == THREAD_NULL || + invalid_policy(policy) ) + return(KERN_INVALID_ARGUMENT); + + s = splsched(); + thread_lock(thread); + + /* + * Check if changing policy. + */ + if (policy != thread->policy) { + /* + * Changing policy. Check if new policy is allowed. + */ + if ( pset == PROCESSOR_SET_NULL && + (thread->processor_set->policies & policy) == 0 ) + result = KERN_FAILURE; + else { + if (pset != thread->processor_set) + result = KERN_FAILURE; + else { + /* + * Changing policy. Calculate new + * priority. + */ + thread->policy = policy; + compute_priority(thread, TRUE); + } + } + } + + thread_unlock(thread); + splx(s); + + return (result); +} + +/* + * thread_set_policy + * + * Set scheduling policy and parameters, both base and limit, for + * the given thread. Policy can be any policy implemented by the + * processor set, whether enabled or not. + */ +kern_return_t +thread_set_policy( + thread_act_t thr_act, + processor_set_t pset, + policy_t policy, + policy_base_t base, + mach_msg_type_number_t base_count, + policy_limit_t limit, + mach_msg_type_number_t limit_count) +{ + thread_t thread; + int max, bas, dat, incr; + kern_return_t result = KERN_SUCCESS; + + if ( thr_act == THR_ACT_NULL || + pset == PROCESSOR_SET_NULL ) + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(thr_act); + if (thread == THREAD_NULL) { + act_unlock_thread(thr_act); + + return(KERN_INVALID_ARGUMENT); + } + + if (pset != thread->processor_set) { + act_unlock_thread(thr_act); + + return(KERN_FAILURE); + } + + switch (policy) { + + case POLICY_RR: + { + policy_rr_base_t rr_base = (policy_rr_base_t) base; + policy_rr_limit_t rr_limit = (policy_rr_limit_t) limit; + + if ( base_count != POLICY_RR_BASE_COUNT || + limit_count != POLICY_RR_LIMIT_COUNT ) { + result = KERN_INVALID_ARGUMENT; + break; + } + + dat = rr_base->quantum; + bas = rr_base->base_priority; + max = rr_limit->max_priority; + if (invalid_pri(bas) || invalid_pri(max)) { + result = KERN_INVALID_ARGUMENT; + break; + } + + break; + } + + case POLICY_FIFO: + { + policy_fifo_base_t fifo_base = (policy_fifo_base_t) base; + policy_fifo_limit_t fifo_limit = (policy_fifo_limit_t) limit; + + if ( base_count != POLICY_FIFO_BASE_COUNT || + limit_count != POLICY_FIFO_LIMIT_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + dat = 0; + bas = fifo_base->base_priority; + max = fifo_limit->max_priority; + if (invalid_pri(bas) || invalid_pri(max)) { + result = KERN_INVALID_ARGUMENT; + break; + } + + break; + } + + case POLICY_TIMESHARE: + { + policy_timeshare_base_t ts_base = (policy_timeshare_base_t) base; + policy_timeshare_limit_t ts_limit = + (policy_timeshare_limit_t) limit; + + if ( base_count != POLICY_TIMESHARE_BASE_COUNT || + limit_count != POLICY_TIMESHARE_LIMIT_COUNT ) { + result = KERN_INVALID_ARGUMENT; + break; + } + + dat = 0; + bas = ts_base->base_priority; + max = ts_limit->max_priority; + if (invalid_pri(bas) || invalid_pri(max)) { + result = KERN_INVALID_ARGUMENT; + break; + } + + break; + } + + default: + result = KERN_INVALID_POLICY; + } + + if (result != KERN_SUCCESS) { + act_unlock_thread(thr_act); + + return(result); + } + + result = thread_priority_internal(thread, bas); + if (result == KERN_SUCCESS) + result = thread_policy_common(thread, policy, dat, pset); + act_unlock_thread(thr_act); + + return(result); +} + + +/* + * thread_policy + * + * Set scheduling policy and parameters, both base and limit, for + * the given thread. Policy must be a policy which is enabled for the + * processor set. Change contained threads if requested. + */ +kern_return_t +thread_policy( + thread_act_t thr_act, + policy_t policy, + policy_base_t base, + mach_msg_type_number_t count, + boolean_t set_limit) +{ + thread_t thread; + processor_set_t pset; + kern_return_t result = KERN_SUCCESS; + policy_limit_t limit; + int limcount; + policy_rr_limit_data_t rr_limit; + policy_fifo_limit_data_t fifo_limit; + policy_timeshare_limit_data_t ts_limit; + + if (thr_act == THR_ACT_NULL) + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(thr_act); + pset = thread->processor_set; + if ( thread == THREAD_NULL || + pset == PROCESSOR_SET_NULL ){ + act_unlock_thread(thr_act); + + return(KERN_INVALID_ARGUMENT); + } + + if ( invalid_policy(policy) || + (pset->policies & policy) == 0 ) { + act_unlock_thread(thr_act); + + return(KERN_INVALID_POLICY); + } + + if (set_limit) { + /* + * Set scheduling limits to base priority. + */ + switch (policy) { + + case POLICY_RR: + { + policy_rr_base_t rr_base; + + if (count != POLICY_RR_BASE_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + limcount = POLICY_RR_LIMIT_COUNT; + rr_base = (policy_rr_base_t) base; + rr_limit.max_priority = rr_base->base_priority; + limit = (policy_limit_t) &rr_limit; + + break; + } + + case POLICY_FIFO: + { + policy_fifo_base_t fifo_base; + + if (count != POLICY_FIFO_BASE_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + limcount = POLICY_FIFO_LIMIT_COUNT; + fifo_base = (policy_fifo_base_t) base; + fifo_limit.max_priority = fifo_base->base_priority; + limit = (policy_limit_t) &fifo_limit; + + break; + } + + case POLICY_TIMESHARE: + { + policy_timeshare_base_t ts_base; + + if (count != POLICY_TIMESHARE_BASE_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + limcount = POLICY_TIMESHARE_LIMIT_COUNT; + ts_base = (policy_timeshare_base_t) base; + ts_limit.max_priority = ts_base->base_priority; + limit = (policy_limit_t) &ts_limit; + + break; + } + + default: + result = KERN_INVALID_POLICY; + break; + } + + } + else { + /* + * Use current scheduling limits. Ensure that the + * new base priority will not exceed current limits. + */ + switch (policy) { + + case POLICY_RR: + { + policy_rr_base_t rr_base; + + if (count != POLICY_RR_BASE_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + limcount = POLICY_RR_LIMIT_COUNT; + rr_base = (policy_rr_base_t) base; + if (rr_base->base_priority > thread->max_priority) { + result = KERN_POLICY_LIMIT; + break; + } + + rr_limit.max_priority = thread->max_priority; + limit = (policy_limit_t) &rr_limit; + + break; + } + + case POLICY_FIFO: + { + policy_fifo_base_t fifo_base; + + if (count != POLICY_FIFO_BASE_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + limcount = POLICY_FIFO_LIMIT_COUNT; + fifo_base = (policy_fifo_base_t) base; + if (fifo_base->base_priority > thread->max_priority) { + result = KERN_POLICY_LIMIT; + break; + } + + fifo_limit.max_priority = thread->max_priority; + limit = (policy_limit_t) &fifo_limit; + + break; + } + + case POLICY_TIMESHARE: + { + policy_timeshare_base_t ts_base; + + if (count != POLICY_TIMESHARE_BASE_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + limcount = POLICY_TIMESHARE_LIMIT_COUNT; + ts_base = (policy_timeshare_base_t) base; + if (ts_base->base_priority > thread->max_priority) { + result = KERN_POLICY_LIMIT; + break; + } + + ts_limit.max_priority = thread->max_priority; + limit = (policy_limit_t) &ts_limit; + + break; + } + + default: + result = KERN_INVALID_POLICY; + break; + } + + } + + act_unlock_thread(thr_act); + + if (result == KERN_SUCCESS) + result = thread_set_policy(thr_act, pset, + policy, base, count, limit, limcount); + + return(result); +} + +/* + * Define shifts for simulating (5/8)**n + */ + +shift_data_t wait_shift[32] = { + {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7}, + {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13}, + {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18}, + {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27}}; + +/* + * do_priority_computation: + * + * Calculate new priority for thread based on its base priority plus + * accumulated usage. PRI_SHIFT and PRI_SHIFT_2 convert from + * usage to priorities. SCHED_SHIFT converts for the scaling + * of the sched_usage field by SCHED_SCALE. This scaling comes + * from the multiplication by sched_load (thread_timer_delta) + * in sched.h. sched_load is calculated as a scaled overload + * factor in compute_mach_factor (mach_factor.c). + */ +#ifdef PRI_SHIFT_2 +#if PRI_SHIFT_2 > 0 +#define do_priority_computation(thread, pri) \ + MACRO_BEGIN \ + (pri) = (thread)->priority /* start with base priority */ \ + - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \ + - ((thread)->sched_usage >> (PRI_SHIFT_2 + SCHED_SHIFT)); \ + if ((pri) < MINPRI_STANDARD) \ + (pri) = MINPRI_STANDARD; \ + else \ + if ((pri) > MAXPRI_STANDARD) \ + (pri) = MAXPRI_STANDARD; \ + MACRO_END +#else /* PRI_SHIFT_2 */ +#define do_priority_computation(thread, pri) \ + MACRO_BEGIN \ + (pri) = (thread)->priority /* start with base priority */ \ + - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)) \ + + ((thread)->sched_usage >> (SCHED_SHIFT - PRI_SHIFT_2)); \ + if ((pri) < MINPRI_STANDARD) \ + (pri) = MINPRI_STANDARD; \ + else \ + if ((pri) > MAXPRI_STANDARD) \ + (pri) = MAXPRI_STANDARD; \ + MACRO_END +#endif /* PRI_SHIFT_2 */ +#else /* defined(PRI_SHIFT_2) */ +#define do_priority_computation(thread, pri) \ + MACRO_BEGIN \ + (pri) = (thread)->priority /* start with base priority */ \ + - ((thread)->sched_usage >> (PRI_SHIFT + SCHED_SHIFT)); \ + if ((pri) < MINPRI_STANDARD) \ + (pri) = MINPRI_STANDARD; \ + else \ + if ((pri) > MAXPRI_STANDARD) \ + (pri) = MAXPRI_STANDARD; \ + MACRO_END +#endif /* defined(PRI_SHIFT_2) */ + +/* + * compute_priority: + * + * Compute the effective priority of the specified thread. + * The effective priority computation is as follows: + * + * Take the base priority for this thread and add + * to it an increment derived from its cpu_usage. + * + * The thread *must* be locked by the caller. + */ + +void +compute_priority( + register thread_t thread, + boolean_t resched) +{ + register int pri; + + if (thread->policy == POLICY_TIMESHARE) { + do_priority_computation(thread, pri); + if (thread->depress_priority < 0) + set_pri(thread, pri, resched); + else + thread->depress_priority = pri; + } + else + set_pri(thread, thread->priority, resched); +} + +/* + * compute_my_priority: + * + * Version of compute priority for current thread or thread + * being manipulated by scheduler (going on or off a runq). + * Only used for priority updates. Policy or priority changes + * must call compute_priority above. Caller must have thread + * locked and know it is timesharing and not depressed. + */ + +void +compute_my_priority( + register thread_t thread) +{ + register int pri; + + do_priority_computation(thread, pri); + assert(thread->runq == RUN_QUEUE_NULL); + thread->sched_pri = pri; +} + +#if DEBUG +struct mk_sp_usage { + natural_t cpu_delta, sched_delta; + natural_t sched_tick, ticks; + natural_t cpu_usage, sched_usage, + aged_cpu, aged_sched; + thread_t thread; +} idled_info, loaded_info; +#endif + +/* + * update_priority + * + * Cause the priority computation of a thread that has been + * sleeping or suspended to "catch up" with the system. Thread + * *MUST* be locked by caller. If thread is running, then this + * can only be called by the thread on itself. + */ +void +update_priority( + register thread_t thread) +{ + register unsigned int ticks; + register shift_t shiftp; + + ticks = sched_tick - thread->sched_stamp; + assert(ticks != 0); + + /* + * If asleep for more than 30 seconds forget all + * cpu_usage, else catch up on missed aging. + * 5/8 ** n is approximated by the two shifts + * in the wait_shift array. + */ + thread->sched_stamp += ticks; + thread_timer_delta(thread); + if (ticks > 30) { + thread->cpu_usage = 0; + thread->sched_usage = 0; + } + else { +#if DEBUG + struct mk_sp_usage *sp_usage; +#endif + + thread->cpu_usage += thread->cpu_delta; + thread->sched_usage += thread->sched_delta; + +#if DEBUG + if (thread->state & TH_IDLE) + sp_usage = &idled_info; + else + if (thread == loaded_info.thread) + sp_usage = &loaded_info; + else + sp_usage = NULL; + + if (sp_usage != NULL) { + sp_usage->cpu_delta = thread->cpu_delta; + sp_usage->sched_delta = thread->sched_delta; + sp_usage->sched_tick = thread->sched_stamp; + sp_usage->ticks = ticks; + sp_usage->cpu_usage = thread->cpu_usage; + sp_usage->sched_usage = thread->sched_usage; + sp_usage->thread = thread; + } +#endif + + shiftp = &wait_shift[ticks]; + if (shiftp->shift2 > 0) { + thread->cpu_usage = + (thread->cpu_usage >> shiftp->shift1) + + (thread->cpu_usage >> shiftp->shift2); + thread->sched_usage = + (thread->sched_usage >> shiftp->shift1) + + (thread->sched_usage >> shiftp->shift2); + } + else { + thread->cpu_usage = + (thread->cpu_usage >> shiftp->shift1) - + (thread->cpu_usage >> -(shiftp->shift2)); + thread->sched_usage = + (thread->sched_usage >> shiftp->shift1) - + (thread->sched_usage >> -(shiftp->shift2)); + } + +#if DEBUG + if (sp_usage != NULL) { + sp_usage->aged_cpu = thread->cpu_usage; + sp_usage->aged_sched = thread->sched_usage; + } +#endif + } + thread->cpu_delta = 0; + thread->sched_delta = 0; + + /* + * Recompute priority if appropriate. + */ + if ( thread->policy == POLICY_TIMESHARE && + thread->depress_priority < 0 ) { + register int new_pri; + run_queue_t runq; + + do_priority_computation(thread, new_pri); + if (new_pri != thread->sched_pri) { + runq = rem_runq(thread); + thread->sched_pri = new_pri; + if (runq != RUN_QUEUE_NULL) + thread_setrun(thread, TRUE, TAIL_Q); + } + } +} + +/* + * `mk_sp_swtch_pri()' attempts to context switch (logic in + * thread_block no-ops the context switch if nothing would happen). + * A boolean is returned that indicates whether there is anything + * else runnable. + * + * This boolean can be used by a thread waiting on a + * lock or condition: If FALSE is returned, the thread is justified + * in becoming a resource hog by continuing to spin because there's + * nothing else useful that the processor could do. If TRUE is + * returned, the thread should make one more check on the + * lock and then be a good citizen and really suspend. + */ + +void +_mk_sp_swtch_pri( + sf_object_t policy, + int pri) +{ + register thread_t self = current_thread(); + extern natural_t min_quantum_ms; + +#ifdef lint + pri++; +#endif /* lint */ + + /* + * XXX need to think about depression duration. + * XXX currently using min quantum. + */ + _mk_sp_thread_depress_priority(policy, min_quantum_ms); + + thread_block((void (*)(void)) 0); + + _mk_sp_thread_depress_abort(policy, self); +} + +/* + * thread_switch_continue: + * + * Continuation routine for a thread switch. + * + * Just need to arrange the return value gets sent out correctly and that + * we cancel the timer or the depression called for by the options to the + * thread_switch call. + */ +void +_mk_sp_thread_switch_continue(void) +{ + thread_t self = current_thread(); + int wait_result = self->wait_result; + int option = self->saved.swtch.option; + sf_object_t policy = self->saved.swtch.policy; + + if (option == SWITCH_OPTION_WAIT && wait_result != THREAD_TIMED_OUT) + thread_cancel_timer(); + else if (option == SWITCH_OPTION_DEPRESS) + _mk_sp_thread_depress_abort(policy, self); + thread_syscall_return(KERN_SUCCESS); +} + +/* + * thread_switch: + * + * Context switch. User may supply thread hint. + * + * Fixed priority threads that call this get what they asked for + * even if that violates priority order. + */ +kern_return_t +_mk_sp_thread_switch( + sf_object_t policy, + thread_act_t hint_act, + int option, + mach_msg_timeout_t option_time) +{ + register thread_t self = current_thread(); + register processor_t myprocessor; + int s; + + /* + * Check and use thr_act hint if appropriate. It is not + * appropriate to give a hint that shares the current shuttle. + */ + if (hint_act != THR_ACT_NULL) { + register thread_t thread = act_lock_thread(hint_act); + + if ( thread != THREAD_NULL && + thread != self && + thread->top_act == hint_act ) { + s = splsched(); + thread_lock(thread); + + /* + * Check if the thread is in the right pset. Then + * pull it off its run queue. If it + * doesn't come, then it's not eligible. + */ + if ( thread->processor_set == self->processor_set && + rem_runq(thread) != RUN_QUEUE_NULL ) { + /* + * Hah, got it!! + */ + if (thread->policy & (POLICY_FIFO|POLICY_RR)) { + myprocessor = current_processor(); + + myprocessor->quantum = thread->unconsumed_quantum; + myprocessor->first_quantum = TRUE; + } + thread_unlock(thread); + + act_unlock_thread(hint_act); + act_deallocate(hint_act); + + if (option == SWITCH_OPTION_WAIT) + assert_wait_timeout(option_time, THREAD_ABORTSAFE); + else if (option == SWITCH_OPTION_DEPRESS) + _mk_sp_thread_depress_priority(policy, option_time); + + self->saved.swtch.policy = policy; + self->saved.swtch.option = option; + + thread_run(self, _mk_sp_thread_switch_continue, thread); + splx(s); + + goto out; + } + + thread_unlock(thread); + splx(s); + } + + act_unlock_thread(hint_act); + act_deallocate(hint_act); + } + + /* + * No handoff hint supplied, or hint was wrong. Call thread_block() in + * hopes of running something else. If nothing else is runnable, + * thread_block will detect this. WARNING: thread_switch with no + * option will not do anything useful if the thread calling it is the + * highest priority thread (can easily happen with a collection + * of timesharing threads). + */ + mp_disable_preemption(); + myprocessor = current_processor(); + if ( option != SWITCH_OPTION_NONE || + myprocessor->processor_set->runq.count > 0 || + myprocessor->runq.count > 0 ) { + myprocessor->first_quantum = FALSE; + mp_enable_preemption(); + + if (option == SWITCH_OPTION_WAIT) + assert_wait_timeout(option_time, THREAD_ABORTSAFE); + else if (option == SWITCH_OPTION_DEPRESS) + _mk_sp_thread_depress_priority(policy, option_time); + + self->saved.swtch.policy = policy; + self->saved.swtch.option = option; + + thread_block(_mk_sp_thread_switch_continue); + } + else + mp_enable_preemption(); + +out: + if (option == SWITCH_OPTION_WAIT) + thread_cancel_timer(); + else if (option == SWITCH_OPTION_DEPRESS) + _mk_sp_thread_depress_abort(policy, self); + + return (KERN_SUCCESS); +} + +/* + * mk_sp_thread_depress_priority + * + * Depress thread's priority to lowest possible for specified period. + * Intended for use when thread wants a lock but doesn't know which + * other thread is holding it. As with thread_switch, fixed + * priority threads get exactly what they asked for. Users access + * this by the SWITCH_OPTION_DEPRESS option to thread_switch. A Time + * of zero will result in no timeout being scheduled. + */ +void +_mk_sp_thread_depress_priority( + sf_object_t policy, + mach_msg_timeout_t interval) +{ + register thread_t self = current_thread(); + AbsoluteTime deadline; + boolean_t release = FALSE; + spl_t s; + + s = splsched(); + thread_lock(self); + + if (self->policy == policy->policy_id) { + /* + * If we haven't already saved the priority to be restored + * (depress_priority), then save it. + */ + if (self->depress_priority < 0) + self->depress_priority = self->priority; + else if (thread_call_cancel(&self->depress_timer)) + release = TRUE; + + self->sched_pri = self->priority = DEPRESSPRI; + + if (interval != 0) { + clock_interval_to_deadline( + interval, 1000*NSEC_PER_USEC, &deadline); + thread_call_enter_delayed(&self->depress_timer, deadline); + if (!release) + self->ref_count++; + else + release = FALSE; + } + } + + thread_unlock(self); + splx(s); + + if (release) + thread_deallocate(self); +} + +/* + * mk_sp_thread_depress_timeout: + * + * Timeout routine for priority depression. + */ +void +_mk_sp_thread_depress_timeout( + sf_object_t policy, + register thread_t thread) +{ + spl_t s; + + s = splsched(); + thread_lock(thread); + if (thread->policy == policy->policy_id) { + /* + * If we lose a race with mk_sp_thread_depress_abort, + * then depress_priority might be -1. + */ + if ( thread->depress_priority >= 0 && + !thread_call_is_delayed(&thread->depress_timer, NULL) ) { + thread->priority = thread->depress_priority; + thread->depress_priority = -1; + compute_priority(thread, FALSE); + } + else + if (thread->depress_priority == -2) { + /* + * Thread was temporarily undepressed by thread_suspend, to + * be redepressed in special_handler as it blocks. We need to + * prevent special_handler from redepressing it, since depression + * has timed out: + */ + thread->depress_priority = -1; + } + } + thread_unlock(thread); + splx(s); +} + +/* + * mk_sp_thread_depress_abort: + * + * Prematurely abort priority depression if there is one. + */ +kern_return_t +_mk_sp_thread_depress_abort( + sf_object_t policy, + register thread_t thread) +{ + kern_return_t result = KERN_SUCCESS; + boolean_t release = FALSE; + spl_t s; + + s = splsched(); + thread_lock(thread); + + if (thread->policy == policy->policy_id) { + if (thread->depress_priority >= 0) { + if (thread_call_cancel(&thread->depress_timer)) + release = TRUE; + thread->priority = thread->depress_priority; + thread->depress_priority = -1; + compute_priority(thread, FALSE); + } + else + result = KERN_NOT_DEPRESSED; + } + + thread_unlock(thread); + splx(s); + + if (release) + thread_deallocate(thread); + + return (result); +} + +/* + * mk_sp_thread_runnable: + * + * Return TRUE iff policy believes thread is runnable + */ +boolean_t +_mk_sp_thread_runnable( + sf_object_t policy, + thread_t thread) +{ + return (thread->sp_state == MK_SP_RUNNABLE); +} diff --git a/osfmk/kern/mk_sp.h b/osfmk/kern/mk_sp.h new file mode 100644 index 000000000..c9ebcddd2 --- /dev/null +++ b/osfmk/kern/mk_sp.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _KERN_MK_SP_H_ +#define _KERN_MK_SP_H_ + +/* +* Include Files +*/ + +/* + * Scheduling policy operation prototypes + */ + +sf_return_t _mk_sp_init( + sf_object_t policy, + int policy_id); + +sf_return_t _mk_sp_enable_processor_set( + sf_object_t policy, + processor_set_t processor_set); + +sf_return_t _mk_sp_disable_processor_set( + sf_object_t policy, + processor_set_t processor_set); + +sf_return_t _mk_sp_enable_processor( + sf_object_t policy, + processor_t processor); + +sf_return_t _mk_sp_disable_processor( + sf_object_t policy, + processor_t processor); + +sf_return_t _mk_sp_thread_update_mpri( + sf_object_t policy, + thread_t thread); + +sf_return_t _mk_sp_thread_unblock( + sf_object_t policy, + thread_t thread); + +sf_return_t _mk_sp_thread_done( + sf_object_t policy, + thread_t old_thread); + +sf_return_t _mk_sp_thread_begin( + sf_object_t policy, + thread_t new_thread); + +sf_return_t _mk_sp_thread_dispatch( + sf_object_t policy, + thread_t old_thread); + +sf_return_t _mk_sp_thread_attach( + sf_object_t policy, + thread_t thread); + +sf_return_t _mk_sp_thread_detach( + sf_object_t policy, + thread_t thread); + +sf_return_t _mk_sp_thread_processor( + sf_object_t policy, + thread_t *thread, + processor_t processor); + +sf_return_t _mk_sp_thread_processor_set( + sf_object_t policy, + thread_t thread, + processor_set_t processor_set); + +sf_return_t _mk_sp_thread_setup( + sf_object_t policy, + thread_t thread); + +void _mk_sp_swtch_pri( + sf_object_t policy, + int pri); + +kern_return_t _mk_sp_thread_switch( + sf_object_t policy, + thread_act_t hint_act, + int option, + mach_msg_timeout_t option_time); + +kern_return_t _mk_sp_thread_depress_abort( + sf_object_t policy, + thread_t thread); + +void _mk_sp_thread_depress_timeout( + sf_object_t policy, + thread_t thread); + +boolean_t _mk_sp_thread_runnable( + sf_object_t policy, + thread_t thread); + +#define MK_SP_ATTACHED ( 0x0001 ) +#define MK_SP_RUNNABLE ( 0x0002 ) +#define MK_SP_BLOCKED ( 0x0004 ) + +/* + * Definitions of standard scheduling operations for this policy + */ +extern sp_ops_t mk_sp_ops; + +#endif /* _KERN_MK_SP_H_ */ diff --git a/osfmk/kern/mk_timer.c b/osfmk/kern/mk_timer.c new file mode 100644 index 000000000..0f6850a13 --- /dev/null +++ b/osfmk/kern/mk_timer.c @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 29 June 2000 (debo) + * Created. + */ + +#include +#include + +#include + +#include + +#include +#include + +static zone_t mk_timer_zone; + +static mach_port_qos_t mk_timer_qos = { + FALSE, TRUE, 0, sizeof (mk_timer_expire_msg_t) +}; + +static void mk_timer_expire( + void *p0, + void *p1); + +mach_port_name_t +mk_timer_create(void) +{ + mk_timer_t timer; + ipc_space_t myspace = current_space(); + mach_port_name_t name = MACH_PORT_NULL; + ipc_port_t port; + kern_return_t result; + + timer = (mk_timer_t)zalloc(mk_timer_zone); + if (timer == NULL) + return (MACH_PORT_NULL); + + result = mach_port_allocate_qos(myspace, MACH_PORT_RIGHT_RECEIVE, + &mk_timer_qos, &name); + if (result == KERN_SUCCESS) + result = ipc_port_translate_receive(myspace, name, &port); + + if (result != KERN_SUCCESS) { + zfree(mk_timer_zone, (vm_offset_t)timer); + + return (MACH_PORT_NULL); + } + + simple_lock_init(&timer->lock, ETAP_MISC_TIMER); + call_entry_setup(&timer->call_entry, mk_timer_expire, timer); + timer->is_armed = timer->is_dead = FALSE; + timer->active = 0; + + timer->port = port; + ipc_kobject_set_atomically(port, (ipc_kobject_t)timer, IKOT_TIMER); + + port->ip_srights++; + ip_reference(port); + ip_unlock(port); + + return (name); +} + +void +mk_timer_port_destroy( + ipc_port_t port) +{ + mk_timer_t timer = NULL; + + ip_lock(port); + if (ip_kotype(port) == IKOT_TIMER) { + timer = (mk_timer_t)port->ip_kobject; + assert(timer != NULL); + ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); + simple_lock(&timer->lock); + assert(timer->port == port); + } + ip_unlock(port); + + if (timer != NULL) { + if (thread_call_cancel(&timer->call_entry)) + timer->active--; + timer->is_armed = FALSE; + + timer->is_dead = TRUE; + if (timer->active == 0) { + simple_unlock(&timer->lock); + zfree(mk_timer_zone, (vm_offset_t)timer); + + ipc_port_release_send(port); + return; + } + + simple_unlock(&timer->lock); + } +} + +void +mk_timer_initialize(void) +{ + int s = sizeof (mk_timer_data_t); + + assert(!(mk_timer_zone != NULL)); + + mk_timer_zone = zinit(s, (4096 * s), (16 * s), "mk_timer"); +} + +static void +mk_timer_expire( + void *p0, + void *p1) +{ + AbsoluteTime time_of_posting; + mk_timer_t timer = p0; + ipc_port_t port; + + clock_get_uptime(&time_of_posting); + + simple_lock(&timer->lock); + + if (timer->active > 1) { + timer->active--; + simple_unlock(&timer->lock); + return; + } + + port = timer->port; + assert(port != IP_NULL); + + while ( timer->is_armed && + !thread_call_is_delayed(&timer->call_entry, NULL) ) { + mk_timer_expire_msg_t msg; + + timer->is_armed = FALSE; + + msg.time_of_arming = timer->time_of_arming; + msg.armed_time = timer->call_entry.deadline; + msg.time_of_posting = time_of_posting; + + simple_unlock(&timer->lock); + + msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); + msg.header.msgh_remote_port = port; + msg.header.msgh_local_port = MACH_PORT_NULL; + msg.header.msgh_reserved = msg.header.msgh_id = 0; + + (void) mach_msg_send_from_kernel(&msg.header, sizeof (msg)); + + simple_lock(&timer->lock); + } + + if (--timer->active == 0 && timer->is_dead) { + simple_unlock(&timer->lock); + zfree(mk_timer_zone, (vm_offset_t)timer); + + ipc_port_release_send(port); + return; + } + + simple_unlock(&timer->lock); +} + +kern_return_t +mk_timer_destroy( + mach_port_name_t name) +{ + ipc_space_t myspace = current_space(); + ipc_port_t port; + kern_return_t result; + + result = ipc_port_translate_receive(myspace, name, &port); + if (result != KERN_SUCCESS) + return (result); + + if (ip_kotype(port) == IKOT_TIMER) { + ip_unlock(port); + result = mach_port_destroy(myspace, name); + } + else { + ip_unlock(port); + result = KERN_INVALID_ARGUMENT; + } + + return (result); +} + +kern_return_t +mk_timer_arm( + mach_port_name_t name, + AbsoluteTime expire_time) +{ + AbsoluteTime time_of_arming; + mk_timer_t timer; + ipc_space_t myspace = current_space(); + ipc_port_t port; + kern_return_t result; + + clock_get_uptime(&time_of_arming); + + result = ipc_port_translate_receive(myspace, name, &port); + if (result != KERN_SUCCESS) + return (result); + + if (ip_kotype(port) == IKOT_TIMER) { + timer = (mk_timer_t)port->ip_kobject; + assert(timer != NULL); + simple_lock(&timer->lock); + assert(timer->port == port); + ip_unlock(port); + + timer->time_of_arming = time_of_arming; + timer->is_armed = TRUE; + + if (!thread_call_enter_delayed(&timer->call_entry, expire_time)) + timer->active++; + simple_unlock(&timer->lock); + } + else { + ip_unlock(port); + result = KERN_INVALID_ARGUMENT; + } + + return (result); +} + +kern_return_t +mk_timer_cancel( + mach_port_name_t name, + AbsoluteTime *result_time) +{ + AbsoluteTime armed_time = { 0, 0 }; + mk_timer_t timer; + ipc_space_t myspace = current_space(); + ipc_port_t port; + kern_return_t result; + + result = ipc_port_translate_receive(myspace, name, &port); + if (result != KERN_SUCCESS) + return (result); + + if (ip_kotype(port) == IKOT_TIMER) { + timer = (mk_timer_t)port->ip_kobject; + assert(timer != NULL); + simple_lock(&timer->lock); + assert(timer->port == port); + ip_unlock(port); + + if (timer->is_armed) { + armed_time = timer->call_entry.deadline; + if (thread_call_cancel(&timer->call_entry)) + timer->active--; + timer->is_armed = FALSE; + } + + simple_unlock(&timer->lock); + } + else { + ip_unlock(port); + result = KERN_INVALID_ARGUMENT; + } + + if (result == KERN_SUCCESS) + if ( result_time != NULL && + copyout((void *)&armed_time, (void *)result_time, + sizeof (armed_time)) != 0 ) + result = KERN_FAILURE; + + return (result); +} diff --git a/osfmk/kern/mk_timer.h b/osfmk/kern/mk_timer.h new file mode 100644 index 000000000..3236e7562 --- /dev/null +++ b/osfmk/kern/mk_timer.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 29 June 2000 (debo) + * Created. + */ + +#ifndef _KERN_MK_TIMER_H_ +#define _KERN_MK_TIMER_H_ + +#ifdef MACH_KERNEL_PRIVATE +#include + +#include + +#include + +struct mk_timer { + decl_simple_lock_data(,lock) + call_entry_data_t call_entry; + AbsoluteTime time_of_arming; + boolean_t is_dead:1, + is_armed:1; + int active; + ipc_port_t port; +}; + +typedef struct mk_timer *mk_timer_t, mk_timer_data_t; + +void mk_timer_port_destroy( + ipc_port_t port); + +void mk_timer_initialize(void); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_MK_TIMER_H_ */ diff --git a/osfmk/kern/norma_protos.h b/osfmk/kern/norma_protos.h new file mode 100644 index 000000000..d00d2901a --- /dev/null +++ b/osfmk/kern/norma_protos.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1995/02/23 17:31:45 alanl + * DIPC: Merge from nmk17b2 to nmk18b8. + * [95/01/05 alanl] + * + * Revision 1.1.10.1 1994/12/01 20:43:40 dwm + * mk6 CR801 - copyright marker not FREE_ + * [1994/12/01 19:25:52 dwm] + * + * Revision 1.1.5.2 1994/09/10 21:47:18 bolinger + * Merge up to NMK17.3 + * [1994/09/08 19:58:04 bolinger] + * + * Revision 1.1.5.1 1994/06/21 19:43:06 dlb + * Bring forward to NMK18 + * [1994/06/17 18:58:04 dlb] + * + * Revision 1.1.2.2 1994/07/22 09:54:09 paire + * Added vm_remap_remote prototype. + * [94/07/05 paire] + * + * Revision 1.1.2.1 1994/12/06 20:11:22 alanl + * Initial revision. Moved here from kern/norma_task.h to avoid a + * name collision with the mig-generated kern/norma_task.h. + * [94/12/05 mmp] + * + * $EndLog$ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _KERN_NORMA_PROTOS_H_ +#define _KERN_NORMA_PROTOS_H_ + +extern void task_copy_vm( + ipc_port_t host, + vm_map_t old_map, + boolean_t clone, + boolean_t kill_parent, + ipc_port_t to); + +extern kern_return_t vm_remap_remote( + ipc_port_t target_task_port, + vm_offset_t *target_address, + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + ipc_port_t source_task_port, + vm_offset_t source_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); + +#endif /* _KERN_NORMA_PROTOS_H_ */ diff --git a/osfmk/kern/printf.c b/osfmk/kern/printf.c new file mode 100644 index 000000000..bf870083e --- /dev/null +++ b/osfmk/kern/printf.c @@ -0,0 +1,657 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * Common code for printf et al. + * + * The calling routine typically takes a variable number of arguments, + * and passes the address of the first one. This implementation + * assumes a straightforward, stack implementation, aligned to the + * machine's wordsize. Increasing addresses are assumed to point to + * successive arguments (left-to-right), as is the case for a machine + * with a downward-growing stack with arguments pushed right-to-left. + * + * To write, for example, fprintf() using this routine, the code + * + * fprintf(fd, format, args) + * FILE *fd; + * char *format; + * { + * _doprnt(format, &args, fd); + * } + * + * would suffice. (This example does not handle the fprintf's "return + * value" correctly, but who looks at the return value of fprintf + * anyway?) + * + * This version implements the following printf features: + * + * %d decimal conversion + * %u unsigned conversion + * %x hexadecimal conversion + * %X hexadecimal conversion with capital letters + * %o octal conversion + * %c character + * %s string + * %m.n field width, precision + * %-m.n left adjustment + * %0m.n zero-padding + * %*.* width and precision taken from arguments + * + * This version does not implement %f, %e, or %g. It accepts, but + * ignores, an `l' as in %ld, %lo, %lx, and %lu, and therefore will not + * work correctly on machines for which sizeof(long) != sizeof(int). + * It does not even parse %D, %O, or %U; you should be using %ld, %o and + * %lu if you mean long conversion. + * + * As mentioned, this version does not return any reasonable value. + * + * Permission is granted to use, modify, or propagate this code as + * long as this notice is incorporated. + * + * Steve Summit 3/25/87 + */ + +/* + * Added formats for decoding device registers: + * + * printf("reg = %b", regval, "*") + * + * where is the output base expressed as a control character: + * i.e. '\10' gives octal, '\20' gives hex. Each is a sequence of + * characters, the first of which gives the bit number to be inspected + * (origin 1), and the rest (up to a control character (<= 32)) give the + * name of the register. Thus + * printf("reg = %b\n", 3, "\10\2BITTWO\1BITONE") + * would produce + * reg = 3 + * + * If the second character in is also a control character, it + * indicates the last bit of a bit field. In this case, printf will extract + * bits <1> to <2> and print it. Characters following the second control + * character are printed before the bit field. + * printf("reg = %b\n", 0xb, "\10\4\3FIELD1=\2BITTWO\1BITONE") + * would produce + * reg = b + * + * The %B format is like %b but the bits are numbered from the most + * significant (the bit weighted 31), which is called 1, to the least + * significant, called 32. + */ +/* + * Added for general use: + * # prefix for alternate format: + * 0x (0X) for hex + * leading 0 for octal + * + print '+' if positive + * blank print ' ' if positive + * + * z signed hexadecimal + * r signed, 'radix' + * n unsigned, 'radix' + * + * D,U,O,Z same as corresponding lower-case versions + * (compatibility) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef MACH_BSD +#include +#endif + +#ifdef __ppc__ +#include +#endif + +/* + * Forward declarations + */ +void printnum( + register unsigned int u, + register int base, + void (*putc)(char)); + + +#define isdigit(d) ((d) >= '0' && (d) <= '9') +#define Ctod(c) ((c) - '0') + +#define MAXBUF (sizeof(long int) * 8) /* enough for binary */ + +void +printnum( + register unsigned int u, /* number to print */ + register int base, + void (*putc)(char)) +{ + char buf[MAXBUF]; /* build number here */ + register char * p = &buf[MAXBUF-1]; + static char digs[] = "0123456789abcdef"; + + do { + *p-- = digs[u % base]; + u /= base; + } while (u != 0); + + while (++p != &buf[MAXBUF]) + (*putc)(*p); + +} + +boolean_t _doprnt_truncates = FALSE; + +void +_doprnt( + register const char *fmt, + va_list *argp, + /* character output routine */ + void (*putc)(char), + int radix) /* default radix - for '%r' */ +{ + int length; + int prec; + boolean_t ladjust; + char padc; + long n; + unsigned long u; + int plus_sign; + int sign_char; + boolean_t altfmt, truncate; + int base; + register char c; + int capitals; + + while ((c = *fmt) != '\0') { + if (c != '%') { + (*putc)(c); + fmt++; + continue; + } + + fmt++; + + length = 0; + prec = -1; + ladjust = FALSE; + padc = ' '; + plus_sign = 0; + sign_char = 0; + altfmt = FALSE; + + while (TRUE) { + c = *fmt; + if (c == '#') { + altfmt = TRUE; + } + else if (c == '-') { + ladjust = TRUE; + } + else if (c == '+') { + plus_sign = '+'; + } + else if (c == ' ') { + if (plus_sign == 0) + plus_sign = ' '; + } + else + break; + fmt++; + } + + if (c == '0') { + padc = '0'; + c = *++fmt; + } + + if (isdigit(c)) { + while(isdigit(c)) { + length = 10 * length + Ctod(c); + c = *++fmt; + } + } + else if (c == '*') { + length = va_arg(*argp, int); + c = *++fmt; + if (length < 0) { + ladjust = !ladjust; + length = -length; + } + } + + if (c == '.') { + c = *++fmt; + if (isdigit(c)) { + prec = 0; + while(isdigit(c)) { + prec = 10 * prec + Ctod(c); + c = *++fmt; + } + } + else if (c == '*') { + prec = va_arg(*argp, int); + c = *++fmt; + } + } + + if (c == 'l') + c = *++fmt; /* need it if sizeof(int) < sizeof(long) */ + + truncate = FALSE; + capitals=0; /* Assume lower case printing */ + + switch(c) { + case 'b': + case 'B': + { + register char *p; + boolean_t any; + register int i; + + u = va_arg(*argp, unsigned long); + p = va_arg(*argp, char *); + base = *p++; + printnum(u, base, putc); + + if (u == 0) + break; + + any = FALSE; + while ((i = *p++) != '\0') { + if (*fmt == 'B') + i = 33 - i; + if (*p <= 32) { + /* + * Bit field + */ + register int j; + if (any) + (*putc)(','); + else { + (*putc)('<'); + any = TRUE; + } + j = *p++; + if (*fmt == 'B') + j = 32 - j; + for (; (c = *p) > 32; p++) + (*putc)(c); + printnum((unsigned)( (u>>(j-1)) & ((2<<(i-j))-1)), + base, putc); + } + else if (u & (1<<(i-1))) { + if (any) + (*putc)(','); + else { + (*putc)('<'); + any = TRUE; + } + for (; (c = *p) > 32; p++) + (*putc)(c); + } + else { + for (; *p > 32; p++) + continue; + } + } + if (any) + (*putc)('>'); + break; + } + + case 'c': + c = va_arg(*argp, int); + (*putc)(c); + break; + + case 's': + { + register char *p; + register char *p2; + + if (prec == -1) + prec = 0x7fffffff; /* MAXINT */ + + p = va_arg(*argp, char *); + + if (p == (char *)0) + p = ""; + + if (length > 0 && !ladjust) { + n = 0; + p2 = p; + + for (; *p != '\0' && n < prec; p++) + n++; + + p = p2; + + while (n < length) { + (*putc)(' '); + n++; + } + } + + n = 0; + + while (*p != '\0') { + if (++n > prec || (length > 0 && n > length)) + break; + + (*putc)(*p++); + } + + if (n < length && ladjust) { + while (n < length) { + (*putc)(' '); + n++; + } + } + + break; + } + + case 'o': + truncate = _doprnt_truncates; + case 'O': + base = 8; + goto print_unsigned; + + case 'd': + truncate = _doprnt_truncates; + case 'D': + base = 10; + goto print_signed; + + case 'u': + truncate = _doprnt_truncates; + case 'U': + base = 10; + goto print_unsigned; + + case 'p': + altfmt = TRUE; + case 'x': + truncate = _doprnt_truncates; + base = 16; + goto print_unsigned; + + case 'X': + base = 16; + capitals=16; /* Print in upper case */ + goto print_unsigned; + + case 'z': + truncate = _doprnt_truncates; + base = 16; + goto print_signed; + + case 'Z': + base = 16; + capitals=16; /* Print in upper case */ + goto print_signed; + + case 'r': + truncate = _doprnt_truncates; + case 'R': + base = radix; + goto print_signed; + + case 'n': + truncate = _doprnt_truncates; + case 'N': + base = radix; + goto print_unsigned; + + print_signed: + n = va_arg(*argp, long); + if (n >= 0) { + u = n; + sign_char = plus_sign; + } + else { + u = -n; + sign_char = '-'; + } + goto print_num; + + print_unsigned: + u = va_arg(*argp, unsigned long); + goto print_num; + + print_num: + { + char buf[MAXBUF]; /* build number here */ + register char * p = &buf[MAXBUF-1]; + static char digits[] = "0123456789abcdef0123456789ABCDEF"; + char *prefix = 0; + + if (truncate) u = (long)((int)(u)); + + if (u != 0 && altfmt) { + if (base == 8) + prefix = "0"; + else if (base == 16) + prefix = "0x"; + } + + do { + /* Print in the correct case */ + *p-- = digits[(u % base)+capitals]; + u /= base; + } while (u != 0); + + length -= (&buf[MAXBUF-1] - p); + if (sign_char) + length--; + if (prefix) + length -= strlen((const char *) prefix); + + if (padc == ' ' && !ladjust) { + /* blank padding goes before prefix */ + while (--length >= 0) + (*putc)(' '); + } + if (sign_char) + (*putc)(sign_char); + if (prefix) + while (*prefix) + (*putc)(*prefix++); + if (padc == '0') { + /* zero padding goes after sign and prefix */ + while (--length >= 0) + (*putc)('0'); + } + while (++p != &buf[MAXBUF]) + (*putc)(*p); + + if (ladjust) { + while (--length >= 0) + (*putc)(' '); + } + break; + } + + case '\0': + fmt--; + break; + + default: + (*putc)(c); + } + fmt++; + } +} + +#if MP_PRINTF +boolean_t new_printf_cpu_number = FALSE; +#endif /* MP_PRINTF */ + + +decl_simple_lock_data(,printf_lock) +decl_mutex_data(,sprintf_lock) + +void +printf_init(void) +{ + /* + * Lock is only really needed after the first thread is created. + */ + simple_lock_init(&printf_lock, ETAP_MISC_PRINTF); + mutex_init(&sprintf_lock, ETAP_MISC_PRINTF); +} + +/* derived from boot_gets */ +void +safe_gets( + char *str, + int maxlen) +{ + register char *lp; + register int c; + char *strmax = str + maxlen - 1; /* allow space for trailing 0 */ + + lp = str; + for (;;) { + c = cngetc(); + switch (c) { + case '\n': + case '\r': + printf("\n"); + *lp++ = 0; + return; + + case '\b': + case '#': + case '\177': + if (lp > str) { + printf("\b \b"); + lp--; + } + continue; + + case '@': + case 'u'&037: + lp = str; + printf("\n\r"); + continue; + + default: + if (c >= ' ' && c < '\177') { + if (lp < strmax) { + *lp++ = c; + printf("%c", c); + } + else { + printf("%c", '\007'); /* beep */ + } + } + } + } +} + +void +conslog_putc( + char c) +{ + extern unsigned int debug_mode, disableDebugOuput, disableConsoleOutput; + + if ((debug_mode && !disableDebugOuput) || !disableConsoleOutput) + cnputc(c); + +#ifdef MACH_BSD + log_putc(c); +#endif +} + +void +printf(const char *fmt, ...) +{ + va_list listp; + + disable_preemption(); + va_start(listp, fmt); + _doprnt(fmt, &listp, conslog_putc, 16); + va_end(listp); + enable_preemption(); +} + +static char *copybyte_str; + +static void +copybyte( + char byte) +{ + *copybyte_str++ = byte; + *copybyte_str = '\0'; +} + +int +sprintf(char *buf, const char *fmt, ...) +{ + va_list listp; + + va_start(listp, fmt); + mutex_lock(&sprintf_lock); + copybyte_str = buf; + _doprnt(fmt, &listp, copybyte, 16); + mutex_unlock(&sprintf_lock); + va_end(listp); + return strlen(buf); +} diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c new file mode 100644 index 000000000..d88ade25d --- /dev/null +++ b/osfmk/kern/priority.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: clock_prim.c + * Author: Avadis Tevanian, Jr. + * Date: 1986 + * + * Clock primitives. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /*** ??? fix so this can be removed ***/ +/*** ??? Should this file be MK SP-specific? Or is it more general purpose? ***/ + + + +/* + * USAGE_THRESHOLD is the amount by which usage must change to + * cause a priority shift that moves a thread between run queues. + */ + +#ifdef PRI_SHIFT_2 +#if PRI_SHIFT_2 > 0 +#define USAGE_THRESHOLD (((1 << PRI_SHIFT) + (1 << PRI_SHIFT_2)) << (2 + SCHED_SHIFT)) +#else /* PRI_SHIFT_2 > 0 */ +#define USAGE_THRESHOLD (((1 << PRI_SHIFT) - (1 << -(PRI_SHIFT_2))) << (2 + SCHED_SHIFT)) +#endif /* PRI_SHIFT_2 > 0 */ +#else /* PRI_SHIFT_2 */ +#define USAGE_THRESHOLD (1 << (PRI_SHIFT + 2 + SCHED_SHIFT)) +#endif /* PRI_SHIFT_2 */ + +/* + * thread_quantum_update: + * + * Recalculate the quantum and priority for a thread. + * The number of ticks that has elapsed since we were last called + * is passed as "nticks." + */ + +void +thread_quantum_update( + register int mycpu, + register thread_t thread, + int nticks, + int state) +{ + register int quantum; + register processor_t myprocessor; + register processor_set_t pset; + spl_t s; + + myprocessor = cpu_to_processor(mycpu); + pset = myprocessor->processor_set; + + /* + * Account for thread's utilization of these ticks. + * This assumes that there is *always* a current thread. + * When the processor is idle, it should be the idle thread. + */ + + /* + * Update set_quantum and calculate the current quantum. + */ + pset->set_quantum = pset->machine_quantum[ + (pset->runq.count > pset->processor_count) ? + pset->processor_count : pset->runq.count]; + + if (myprocessor->runq.count != 0) + quantum = min_quantum; + else + quantum = pset->set_quantum; + + /* + * Now recompute the priority of the thread if appropriate. + */ + + { + s = splsched(); + thread_lock(thread); + + if (!(thread->policy & (POLICY_TIMESHARE|POLICY_RR|POLICY_FIFO))) { + thread_unlock(thread); + splx(s); + return; + } + + if (thread->state&TH_IDLE) { + /* Don't try to time-slice idle threads */ + myprocessor->first_quantum = TRUE; + if (thread->sched_stamp != sched_tick) + update_priority(thread); + thread_unlock(thread); + splx(s); + ast_check(); + return; + } + + myprocessor->quantum -= nticks; + /* + * Runtime quantum adjustment. Use quantum_adj_index + * to avoid synchronizing quantum expirations. + */ + if ( quantum != myprocessor->last_quantum && + pset->processor_count > 1 ) { + myprocessor->last_quantum = quantum; + simple_lock(&pset->quantum_adj_lock); + quantum = min_quantum + (pset->quantum_adj_index * + (quantum - min_quantum)) / + (pset->processor_count - 1); + if (++(pset->quantum_adj_index) >= pset->processor_count) + pset->quantum_adj_index = 0; + simple_unlock(&pset->quantum_adj_lock); + } + if (myprocessor->quantum <= 0) { + if (thread->sched_stamp != sched_tick) + update_priority(thread); + else + if ( thread->policy == POLICY_TIMESHARE && + thread->depress_priority < 0 ) { + thread_timer_delta(thread); + thread->sched_usage += thread->sched_delta; + thread->sched_delta = 0; + compute_my_priority(thread); + } + + /* + * This quantum is up, give this thread another. + */ + myprocessor->first_quantum = FALSE; + if (thread->policy == POLICY_TIMESHARE) + myprocessor->quantum += quantum; + else + myprocessor->quantum += min_quantum; + } + /* + * Recompute priority if appropriate. + */ + else { + if (thread->sched_stamp != sched_tick) + update_priority(thread); + else + if ( thread->policy == POLICY_TIMESHARE && + thread->depress_priority < 0 ) { + thread_timer_delta(thread); + if (thread->sched_delta >= USAGE_THRESHOLD) { + thread->sched_usage += thread->sched_delta; + thread->sched_delta = 0; + compute_my_priority(thread); + } + } + } + + thread_unlock(thread); + splx(s); + + /* + * Check for and schedule ast if needed. + */ + ast_check(); + } +} diff --git a/osfmk/kern/processor.c b/osfmk/kern/processor.c new file mode 100644 index 000000000..4e00eef38 --- /dev/null +++ b/osfmk/kern/processor.c @@ -0,0 +1,1122 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * processor.c: processor and processor_set manipulation routines. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_HOST +#include +zone_t pset_zone; +#endif /* MACH_HOST */ + +#include +#include /*** ??? fix so this can be removed ***/ + +/* + * Exported interface + */ +#include + +/* + * Exported variables. + */ +struct processor_set default_pset; +struct processor processor_array[NCPUS]; + +processor_t master_processor; +processor_t processor_ptr[NCPUS]; + +/* Forwards */ +void pset_init( + processor_set_t pset); + +void processor_init( + register processor_t pr, + int slot_num); + +void quantum_set( + processor_set_t pset); + +kern_return_t processor_set_base( + processor_set_t pset, + policy_t policy, + policy_base_t base, + boolean_t change); + +kern_return_t processor_set_limit( + processor_set_t pset, + policy_t policy, + policy_limit_t limit, + boolean_t change); + +kern_return_t processor_set_things( + processor_set_t pset, + mach_port_t **thing_list, + mach_msg_type_number_t *count, + int type); + + +/* + * Bootstrap the processor/pset system so the scheduler can run. + */ +void +pset_sys_bootstrap(void) +{ + register int i; + + pset_init(&default_pset); + for (i = 0; i < NCPUS; i++) { + /* + * Initialize processor data structures. + * Note that cpu_to_processor(i) is processor_ptr[i]. + */ + processor_ptr[i] = &processor_array[i]; + processor_init(processor_ptr[i], i); + } + master_processor = cpu_to_processor(master_cpu); + default_pset.active = TRUE; +} + +/* + * Initialize the given processor_set structure. + */ + +void pset_init( + register processor_set_t pset) +{ + int i; + + /* setup run-queues */ + simple_lock_init(&pset->runq.lock, ETAP_THREAD_PSET_RUNQ); + pset->runq.count = 0; + for (i = 0; i < NRQBM; i++) { + pset->runq.bitmap[i] = 0; + } + setbit(MAXPRI - IDLEPRI, pset->runq.bitmap); + pset->runq.highq = IDLEPRI; + for (i = 0; i < NRQS; i++) { + queue_init(&(pset->runq.queues[i])); + } + + queue_init(&pset->idle_queue); + pset->idle_count = 0; + simple_lock_init(&pset->idle_lock, ETAP_THREAD_PSET_IDLE); + pset->mach_factor = pset->load_average = 0; + pset->sched_load = 0; + queue_init(&pset->processors); + pset->processor_count = 0; + simple_lock_init(&pset->processors_lock, ETAP_THREAD_PSET); + queue_init(&pset->tasks); + pset->task_count = 0; + queue_init(&pset->threads); + pset->thread_count = 0; + pset->ref_count = 1; + pset->active = FALSE; + mutex_init(&pset->lock, ETAP_THREAD_PSET); + pset->pset_self = IP_NULL; + pset->pset_name_self = IP_NULL; + pset->max_priority = MAXPRI_STANDARD; + pset->policies = POLICY_TIMESHARE | POLICY_FIFO | POLICY_RR; + pset->set_quantum = min_quantum; + + pset->quantum_adj_index = 0; + simple_lock_init(&pset->quantum_adj_lock, ETAP_THREAD_PSET_QUANT); + + for (i = 0; i <= NCPUS; i++) { + pset->machine_quantum[i] = min_quantum; + } + + pset->policy_default = POLICY_TIMESHARE; + pset->policy_limit.ts.max_priority = MAXPRI_STANDARD; + pset->policy_limit.rr.max_priority = MAXPRI_STANDARD; + pset->policy_limit.fifo.max_priority = MAXPRI_STANDARD; + pset->policy_base.ts.base_priority = BASEPRI_DEFAULT; + pset->policy_base.rr.base_priority = BASEPRI_DEFAULT; + pset->policy_base.rr.quantum = min_quantum; + pset->policy_base.fifo.base_priority = BASEPRI_DEFAULT; +} + +/* + * Initialize the given processor structure for the processor in + * the slot specified by slot_num. + */ +void +processor_init( + register processor_t pr, + int slot_num) +{ + int i; + + /* setup run-queues */ + simple_lock_init(&pr->runq.lock, ETAP_THREAD_PROC_RUNQ); + pr->runq.count = 0; + for (i = 0; i < NRQBM; i++) { + pr->runq.bitmap[i] = 0; + } + setbit(MAXPRI - IDLEPRI, pr->runq.bitmap); + pr->runq.highq = IDLEPRI; + for (i = 0; i < NRQS; i++) { + queue_init(&(pr->runq.queues[i])); + } + + queue_init(&pr->processor_queue); + pr->state = PROCESSOR_OFF_LINE; + pr->next_thread = THREAD_NULL; + pr->idle_thread = THREAD_NULL; + pr->quantum = 0; + pr->first_quantum = FALSE; + pr->last_quantum = 0; + pr->processor_set = PROCESSOR_SET_NULL; + pr->processor_set_next = PROCESSOR_SET_NULL; + queue_init(&pr->processors); + simple_lock_init(&pr->lock, ETAP_THREAD_PROC); + pr->processor_self = IP_NULL; + pr->slot_num = slot_num; +} + +/* + * pset_remove_processor() removes a processor from a processor_set. + * It can only be called on the current processor. Caller must + * hold lock on current processor and processor set. + */ +void +pset_remove_processor( + processor_set_t pset, + processor_t processor) +{ + if (pset != processor->processor_set) + panic("pset_remove_processor: wrong pset"); + + queue_remove(&pset->processors, processor, processor_t, processors); + processor->processor_set = PROCESSOR_SET_NULL; + pset->processor_count--; + quantum_set(pset); +} + +/* + * pset_add_processor() adds a processor to a processor_set. + * It can only be called on the current processor. Caller must + * hold lock on curent processor and on pset. No reference counting on + * processors. Processor reference to pset is implicit. + */ +void +pset_add_processor( + processor_set_t pset, + processor_t processor) +{ + queue_enter(&pset->processors, processor, processor_t, processors); + processor->processor_set = pset; + pset->processor_count++; + quantum_set(pset); +} + +/* + * pset_remove_task() removes a task from a processor_set. + * Caller must hold locks on pset and task. Pset reference count + * is not decremented; caller must explicitly pset_deallocate. + */ +void +pset_remove_task( + processor_set_t pset, + task_t task) +{ + if (pset != task->processor_set) + return; + + queue_remove(&pset->tasks, task, task_t, pset_tasks); + task->processor_set = PROCESSOR_SET_NULL; + pset->task_count--; +} + +/* + * pset_add_task() adds a task to a processor_set. + * Caller must hold locks on pset and task. Pset references to + * tasks are implicit. + */ +void +pset_add_task( + processor_set_t pset, + task_t task) +{ + queue_enter(&pset->tasks, task, task_t, pset_tasks); + task->processor_set = pset; + pset->task_count++; + pset->ref_count++; +} + +/* + * pset_remove_thread() removes a thread from a processor_set. + * Caller must hold locks on pset and thread. Pset reference count + * is not decremented; caller must explicitly pset_deallocate. + */ +void +pset_remove_thread( + processor_set_t pset, + thread_t thread) +{ + queue_remove(&pset->threads, thread, thread_t, pset_threads); + thread->processor_set = PROCESSOR_SET_NULL; + pset->thread_count--; +} + +/* + * pset_add_thread() adds a thread to a processor_set. + * Caller must hold locks on pset and thread. Pset references to + * threads are implicit. + */ +void +pset_add_thread( + processor_set_t pset, + thread_t thread) +{ + queue_enter(&pset->threads, thread, thread_t, pset_threads); + thread->processor_set = pset; + pset->thread_count++; + pset->ref_count++; +} + +/* + * thread_change_psets() changes the pset of a thread. Caller must + * hold locks on both psets and thread. The old pset must be + * explicitly pset_deallocat()'ed by caller. + */ +void +thread_change_psets( + thread_t thread, + processor_set_t old_pset, + processor_set_t new_pset) +{ + queue_remove(&old_pset->threads, thread, thread_t, pset_threads); + old_pset->thread_count--; + queue_enter(&new_pset->threads, thread, thread_t, pset_threads); + thread->processor_set = new_pset; + new_pset->thread_count++; + new_pset->ref_count++; +} + +/* + * pset_deallocate: + * + * Remove one reference to the processor set. Destroy processor_set + * if this was the last reference. + */ +void +pset_deallocate( + processor_set_t pset) +{ + if (pset == PROCESSOR_SET_NULL) + return; + + pset_lock(pset); + if (--pset->ref_count > 0) { + pset_unlock(pset); + return; + } + + panic("pset_deallocate: default_pset destroyed"); +} + +/* + * pset_reference: + * + * Add one reference to the processor set. + */ +void +pset_reference( + processor_set_t pset) +{ + pset_lock(pset); + pset->ref_count++; + pset_unlock(pset); +} + + +kern_return_t +processor_info_count( + processor_flavor_t flavor, + mach_msg_type_number_t *count) +{ + kern_return_t kr; + + switch (flavor) { + case PROCESSOR_BASIC_INFO: + *count = PROCESSOR_BASIC_INFO_COUNT; + return KERN_SUCCESS; + case PROCESSOR_CPU_LOAD_INFO: + *count = PROCESSOR_CPU_LOAD_INFO_COUNT; + return KERN_SUCCESS; + default: + kr = cpu_info_count(flavor, count); + return kr; + } +} + + +kern_return_t +processor_info( + register processor_t processor, + processor_flavor_t flavor, + host_t *host, + processor_info_t info, + mach_msg_type_number_t *count) +{ + register int i, slot_num, state; + register processor_basic_info_t basic_info; + register processor_cpu_load_info_t cpu_load_info; + kern_return_t kr; + + if (processor == PROCESSOR_NULL) + return(KERN_INVALID_ARGUMENT); + + slot_num = processor->slot_num; + + switch (flavor) { + + case PROCESSOR_BASIC_INFO: + { + if (*count < PROCESSOR_BASIC_INFO_COUNT) + return(KERN_FAILURE); + + basic_info = (processor_basic_info_t) info; + basic_info->cpu_type = machine_slot[slot_num].cpu_type; + basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype; + state = processor->state; + if (state == PROCESSOR_OFF_LINE) + basic_info->running = FALSE; + else + basic_info->running = TRUE; + basic_info->slot_num = slot_num; + if (processor == master_processor) + basic_info->is_master = TRUE; + else + basic_info->is_master = FALSE; + + *count = PROCESSOR_BASIC_INFO_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + case PROCESSOR_CPU_LOAD_INFO: + { + if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) + return(KERN_FAILURE); + + cpu_load_info = (processor_cpu_load_info_t) info; + for (i=0;icpu_ticks[i] = machine_slot[slot_num].cpu_ticks[i]; + + *count = PROCESSOR_CPU_LOAD_INFO_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + default: + { + kr=cpu_info(flavor, slot_num, info, count); + if (kr == KERN_SUCCESS) + *host = &realhost; + return(kr); + } + } +} + +kern_return_t +processor_start( + processor_t processor) +{ + int state; + spl_t s; + kern_return_t kr; + + if (processor == PROCESSOR_NULL) + return(KERN_INVALID_ARGUMENT); + + if (processor == master_processor) + return(cpu_start(processor->slot_num)); + + s = splsched(); + processor_lock(processor); + + state = processor->state; + if (state != PROCESSOR_OFF_LINE) { + processor_unlock(processor); + splx(s); + return(KERN_FAILURE); + } + processor->state = PROCESSOR_START; + processor_unlock(processor); + splx(s); + + if (processor->next_thread == THREAD_NULL) { + thread_t thread; + extern void start_cpu_thread(void); + + thread = kernel_thread_with_priority(kernel_task, MAXPRI_KERNBAND, + start_cpu_thread, FALSE); + + s = splsched(); + thread_lock(thread); + thread_bind_locked(thread, processor); + thread_go_locked(thread, THREAD_AWAKENED); + (void)rem_runq(thread); + processor->next_thread = thread; + thread_unlock(thread); + splx(s); + } + + kr = cpu_start(processor->slot_num); + + if (kr != KERN_SUCCESS) { + s = splsched(); + processor_lock(processor); + processor->state = PROCESSOR_OFF_LINE; + processor_unlock(processor); + splx(s); + } + + return(kr); +} + +kern_return_t +processor_exit( + processor_t processor) +{ + if (processor == PROCESSOR_NULL) + return(KERN_INVALID_ARGUMENT); + + return(processor_shutdown(processor)); +} + +kern_return_t +processor_control( + processor_t processor, + processor_info_t info, + mach_msg_type_number_t count) +{ + if (processor == PROCESSOR_NULL) + return(KERN_INVALID_ARGUMENT); + + return(cpu_control(processor->slot_num, info, count)); +} + +/* + * Precalculate the appropriate system quanta based on load. The + * index into machine_quantum is the number of threads on the + * processor set queue. It is limited to the number of processors in + * the set. + */ + +void +quantum_set( + processor_set_t pset) +{ +#if NCPUS > 1 + register int i, ncpus; + + ncpus = pset->processor_count; + + for (i=1; i <= ncpus; i++) + pset->machine_quantum[i] = ((min_quantum * ncpus) + (i / 2)) / i ; + + pset->machine_quantum[0] = pset->machine_quantum[1]; + + i = (pset->runq.count > ncpus) ? ncpus : pset->runq.count; + pset->set_quantum = pset->machine_quantum[i]; +#else /* NCPUS > 1 */ + default_pset.set_quantum = min_quantum; +#endif /* NCPUS > 1 */ +} + +kern_return_t +processor_set_create( + host_t host, + processor_set_t *new_set, + processor_set_t *new_name) +{ +#ifdef lint + host++; new_set++; new_name++; +#endif /* lint */ + return(KERN_FAILURE); +} + +kern_return_t +processor_set_destroy( + processor_set_t pset) +{ +#ifdef lint + pset++; +#endif /* lint */ + return(KERN_FAILURE); +} + +kern_return_t +processor_get_assignment( + processor_t processor, + processor_set_t *pset) +{ + int state; + + state = processor->state; + if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE) + return(KERN_FAILURE); + + *pset = processor->processor_set; + pset_reference(*pset); + return(KERN_SUCCESS); +} + +kern_return_t +processor_set_info( + processor_set_t pset, + int flavor, + host_t *host, + processor_set_info_t info, + mach_msg_type_number_t *count) +{ + if (pset == PROCESSOR_SET_NULL) + return(KERN_INVALID_ARGUMENT); + + if (flavor == PROCESSOR_SET_BASIC_INFO) { + register processor_set_basic_info_t basic_info; + + if (*count < PROCESSOR_SET_BASIC_INFO_COUNT) + return(KERN_FAILURE); + + basic_info = (processor_set_basic_info_t) info; + + pset_lock(pset); + simple_lock(&pset->processors_lock); + basic_info->processor_count = pset->processor_count; + simple_unlock(&pset->processors_lock); + basic_info->default_policy = pset->policy_default; + pset_unlock(pset); + + *count = PROCESSOR_SET_BASIC_INFO_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) { + register policy_timeshare_base_t ts_base; + + if (*count < POLICY_TIMESHARE_BASE_COUNT) + return(KERN_FAILURE); + + ts_base = (policy_timeshare_base_t) info; + + pset_lock(pset); + *ts_base = pset->policy_base.ts; + pset_unlock(pset); + + *count = POLICY_TIMESHARE_BASE_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) { + register policy_fifo_base_t fifo_base; + + if (*count < POLICY_FIFO_BASE_COUNT) + return(KERN_FAILURE); + + fifo_base = (policy_fifo_base_t) info; + + pset_lock(pset); + *fifo_base = pset->policy_base.fifo; + pset_unlock(pset); + + *count = POLICY_FIFO_BASE_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + else if (flavor == PROCESSOR_SET_RR_DEFAULT) { + register policy_rr_base_t rr_base; + + if (*count < POLICY_RR_BASE_COUNT) + return(KERN_FAILURE); + + rr_base = (policy_rr_base_t) info; + + pset_lock(pset); + *rr_base = pset->policy_base.rr; + pset_unlock(pset); + + *count = POLICY_RR_BASE_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) { + register policy_timeshare_limit_t ts_limit; + + if (*count < POLICY_TIMESHARE_LIMIT_COUNT) + return(KERN_FAILURE); + + ts_limit = (policy_timeshare_limit_t) info; + + pset_lock(pset); + *ts_limit = pset->policy_limit.ts; + pset_unlock(pset); + + *count = POLICY_TIMESHARE_LIMIT_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + else if (flavor == PROCESSOR_SET_FIFO_LIMITS) { + register policy_fifo_limit_t fifo_limit; + + if (*count < POLICY_FIFO_LIMIT_COUNT) + return(KERN_FAILURE); + + fifo_limit = (policy_fifo_limit_t) info; + + pset_lock(pset); + *fifo_limit = pset->policy_limit.fifo; + pset_unlock(pset); + + *count = POLICY_FIFO_LIMIT_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + else if (flavor == PROCESSOR_SET_RR_LIMITS) { + register policy_rr_limit_t rr_limit; + + if (*count < POLICY_RR_LIMIT_COUNT) + return(KERN_FAILURE); + + rr_limit = (policy_rr_limit_t) info; + + pset_lock(pset); + *rr_limit = pset->policy_limit.rr; + pset_unlock(pset); + + *count = POLICY_RR_LIMIT_COUNT; + *host = &realhost; + return(KERN_SUCCESS); + } + else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) { + register int *enabled; + + if (*count < (sizeof(*enabled)/sizeof(int))) + return(KERN_FAILURE); + + enabled = (int *) info; + + pset_lock(pset); + *enabled = pset->policies; + pset_unlock(pset); + + *count = sizeof(*enabled)/sizeof(int); + *host = &realhost; + return(KERN_SUCCESS); + } + + + *host = HOST_NULL; + return(KERN_INVALID_ARGUMENT); +} + +/* + * processor_set_statistics + * + * Returns scheduling statistics for a processor set. + */ +kern_return_t +processor_set_statistics( + processor_set_t pset, + int flavor, + processor_set_info_t info, + mach_msg_type_number_t *count) +{ + if (pset == PROCESSOR_SET_NULL) + return (KERN_INVALID_PROCESSOR_SET); + + if (flavor == PROCESSOR_SET_LOAD_INFO) { + register processor_set_load_info_t load_info; + + if (*count < PROCESSOR_SET_LOAD_INFO_COUNT) + return(KERN_FAILURE); + + load_info = (processor_set_load_info_t) info; + + pset_lock(pset); + load_info->task_count = pset->task_count; + load_info->thread_count = pset->thread_count; + simple_lock(&pset->processors_lock); + load_info->mach_factor = pset->mach_factor; + load_info->load_average = pset->load_average; + simple_unlock(&pset->processors_lock); + pset_unlock(pset); + + *count = PROCESSOR_SET_LOAD_INFO_COUNT; + return(KERN_SUCCESS); + } + + return(KERN_INVALID_ARGUMENT); +} + +/* + * processor_set_max_priority: + * + * Specify max priority permitted on processor set. This affects + * newly created and assigned threads. Optionally change existing + * ones. + */ +kern_return_t +processor_set_max_priority( + processor_set_t pset, + int max_priority, + boolean_t change_threads) +{ + return (KERN_INVALID_ARGUMENT); +} + +/* + * processor_set_policy_enable: + * + * Allow indicated policy on processor set. + */ + +kern_return_t +processor_set_policy_enable( + processor_set_t pset, + int policy) +{ + return (KERN_INVALID_ARGUMENT); +} + +/* + * processor_set_policy_disable: + * + * Forbid indicated policy on processor set. Time sharing cannot + * be forbidden. + */ +kern_return_t +processor_set_policy_disable( + processor_set_t pset, + int policy, + boolean_t change_threads) +{ + return (KERN_INVALID_ARGUMENT); +} + +#define THING_TASK 0 +#define THING_THREAD 1 + +/* + * processor_set_things: + * + * Common internals for processor_set_{threads,tasks} + */ +kern_return_t +processor_set_things( + processor_set_t pset, + mach_port_t **thing_list, + mach_msg_type_number_t *count, + int type) +{ + unsigned int actual; /* this many things */ + int i; + + vm_size_t size, size_needed; + vm_offset_t addr; + + if (pset == PROCESSOR_SET_NULL) + return KERN_INVALID_ARGUMENT; + + size = 0; addr = 0; + + for (;;) { + pset_lock(pset); + if (!pset->active) { + pset_unlock(pset); + return KERN_FAILURE; + } + + if (type == THING_TASK) + actual = pset->task_count; + else + actual = pset->thread_count; + + /* do we have the memory we need? */ + + size_needed = actual * sizeof(mach_port_t); + if (size_needed <= size) + break; + + /* unlock the pset and allocate more memory */ + pset_unlock(pset); + + if (size != 0) + kfree(addr, size); + + assert(size_needed > 0); + size = size_needed; + + addr = kalloc(size); + if (addr == 0) + return KERN_RESOURCE_SHORTAGE; + } + + /* OK, have memory and the processor_set is locked & active */ + + switch (type) { + case THING_TASK: { + task_t *tasks = (task_t *) addr; + task_t task; + + for (i = 0, task = (task_t) queue_first(&pset->tasks); + i < actual; + i++, task = (task_t) queue_next(&task->pset_tasks)) { + /* take ref for convert_task_to_port */ + task_reference(task); + tasks[i] = task; + } + assert(queue_end(&pset->tasks, (queue_entry_t) task)); + break; + } + + case THING_THREAD: { + thread_act_t *thr_acts = (thread_act_t *) addr; + thread_t thread; + thread_act_t thr_act; + queue_head_t *list; + + list = &pset->threads; + thread = (thread_t) queue_first(list); + i = 0; + while (i < actual && !queue_end(list, (queue_entry_t)thread)) { + thr_act = thread_lock_act(thread); + if (thr_act && thr_act->ref_count > 0) { + /* take ref for convert_act_to_port */ + act_locked_act_reference(thr_act); + thr_acts[i] = thr_act; + i++; + } + thread_unlock_act(thread); + thread = (thread_t) queue_next(&thread->pset_threads); + } + if (i < actual) { + actual = i; + size_needed = actual * sizeof(mach_port_t); + } + break; + } + } + + /* can unlock processor set now that we have the task/thread refs */ + pset_unlock(pset); + + if (actual == 0) { + /* no things, so return null pointer and deallocate memory */ + *thing_list = 0; + *count = 0; + + if (size != 0) + kfree(addr, size); + } else { + /* if we allocated too much, must copy */ + + if (size_needed < size) { + vm_offset_t newaddr; + + newaddr = kalloc(size_needed); + if (newaddr == 0) { + switch (type) { + case THING_TASK: { + task_t *tasks = (task_t *) addr; + + for (i = 0; i < actual; i++) + task_deallocate(tasks[i]); + break; + } + + case THING_THREAD: { + thread_t *threads = (thread_t *) addr; + + for (i = 0; i < actual; i++) + thread_deallocate(threads[i]); + break; + } + } + kfree(addr, size); + return KERN_RESOURCE_SHORTAGE; + } + + bcopy((char *) addr, (char *) newaddr, size_needed); + kfree(addr, size); + addr = newaddr; + } + + *thing_list = (mach_port_t *) addr; + *count = actual; + + /* do the conversion that Mig should handle */ + + switch (type) { + case THING_TASK: { + task_t *tasks = (task_t *) addr; + + for (i = 0; i < actual; i++) + (*thing_list)[i] = convert_task_to_port(tasks[i]); + break; + } + + case THING_THREAD: { + thread_act_t *thr_acts = (thread_act_t *) addr; + + for (i = 0; i < actual; i++) + (*thing_list)[i] = convert_act_to_port(thr_acts[i]); + break; + } + } + } + + return(KERN_SUCCESS); +} + + +/* + * processor_set_tasks: + * + * List all tasks in the processor set. + */ +kern_return_t +processor_set_tasks( + processor_set_t pset, + task_array_t *task_list, + mach_msg_type_number_t *count) +{ + return(processor_set_things(pset, (mach_port_t **)task_list, count, THING_TASK)); +} + +/* + * processor_set_threads: + * + * List all threads in the processor set. + */ +kern_return_t +processor_set_threads( + processor_set_t pset, + thread_array_t *thread_list, + mach_msg_type_number_t *count) +{ + return(processor_set_things(pset, (mach_port_t **)thread_list, count, THING_THREAD)); +} + +/* + * processor_set_base: + * + * Specify per-policy base priority for a processor set. Set processor + * set default policy to the given policy. This affects newly created + * and assigned threads. Optionally change existing ones. + */ +kern_return_t +processor_set_base( + processor_set_t pset, + policy_t policy, + policy_base_t base, + boolean_t change) +{ + return (KERN_INVALID_ARGUMENT); +} + +/* + * processor_set_limit: + * + * Specify per-policy limits for a processor set. This affects + * newly created and assigned threads. Optionally change existing + * ones. + */ +kern_return_t +processor_set_limit( + processor_set_t pset, + policy_t policy, + policy_limit_t limit, + boolean_t change) +{ + return (KERN_POLICY_LIMIT); +} + +/* + * processor_set_policy_control + * + * Controls the scheduling attributes governing the processor set. + * Allows control of enabled policies, and per-policy base and limit + * priorities. + */ +kern_return_t +processor_set_policy_control( + processor_set_t pset, + int flavor, + processor_set_info_t policy_info, + mach_msg_type_number_t count, + boolean_t change) +{ + return (KERN_INVALID_ARGUMENT); +} diff --git a/osfmk/kern/processor.h b/osfmk/kern/processor.h new file mode 100644 index 000000000..c7fcd9020 --- /dev/null +++ b/osfmk/kern/processor.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * processor.h: Processor and processor-set definitions. + */ + +#ifndef _KERN_PROCESSOR_H_ +#define _KERN_PROCESSOR_H_ + +/* + * Data structures for managing processors and sets of processors. + */ +#include +#include +#include + +extern struct processor_set default_pset; +extern processor_t master_processor; + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include + +#include +#include +#include +#include +#include + +#if NCPUS > 1 +#include +#endif /* NCPUS > 1 */ + +struct processor_set { + struct run_queue runq; /* runq for this set */ + queue_head_t idle_queue; /* idle processors */ + int idle_count; /* how many ? */ + decl_simple_lock_data(,idle_lock) /* lock for above */ + queue_head_t processors; /* all processors here */ + int processor_count;/* how many ? */ + decl_simple_lock_data(,processors_lock) /* lock for above */ + queue_head_t tasks; /* tasks assigned */ + int task_count; /* how many */ + queue_head_t threads; /* threads in this set */ + int thread_count; /* how many */ + int ref_count; /* structure ref count */ + boolean_t active; /* is pset in use */ + decl_mutex_data(, lock) /* lock for everything else */ + struct ipc_port * pset_self; /* port for operations */ + struct ipc_port * pset_name_self; /* port for information */ + int max_priority; /* maximum priority */ + int policies; /* bit vector for policies */ + int set_quantum; /* current default quantum */ +#if NCPUS > 1 + int quantum_adj_index; /* runtime quantum adj. */ + decl_simple_lock_data(,quantum_adj_lock) /* lock for above */ + int machine_quantum[NCPUS+1]; /* ditto */ +#endif /* NCPUS > 1 */ + integer_t mach_factor; /* mach_factor */ + integer_t load_average; /* load_average */ + long sched_load; /* load avg for scheduler */ + policy_t policy_default; /* per set default */ + policy_base_data_t policy_base; /* base attributes */ + policy_limit_data_t policy_limit; /* limit attributes */ +}; + +struct processor { + struct run_queue runq; /* local runq for this processor */ + queue_chain_t processor_queue;/* idle/assign/shutdown queue link */ + int state; /* See below */ + struct thread_shuttle + *next_thread, /* next thread to run if dispatched */ + *idle_thread; /* this processor's idle thread. */ + int quantum; /* quantum for current thread */ + boolean_t first_quantum; /* first quantum in succession */ + int last_quantum; /* last quantum assigned */ + + processor_set_t processor_set; /* processor set I belong to */ + processor_set_t processor_set_next; /* set I will belong to */ + queue_chain_t processors; /* all processors in set */ + decl_simple_lock_data(,lock) + struct ipc_port *processor_self;/* port for operations */ + int slot_num; /* machine-indep slot number */ +#if NCPUS > 1 + ast_check_t ast_check_data; /* for remote ast_check invocation */ + queue_chain_t softclock_queue;/* cpus handling softclocks */ +#endif /* NCPUS > 1 */ + /* punt id data temporarily */ +}; + +extern struct processor processor_array[NCPUS]; + +/* + * NOTE: The processor->processor_set link is needed in one of the + * scheduler's critical paths. [Figure out where to look for another + * thread to run on this processor.] It is accessed without locking. + * The following access protocol controls this field. + * + * Read from own processor - just read. + * Read from another processor - lock processor structure during read. + * Write from own processor - lock processor structure during write. + * Write from another processor - NOT PERMITTED. + * + */ + +/* + * Processor state locking: + * + * Values for the processor state are defined below. If the processor + * is off-line or being shutdown, then it is only necessary to lock + * the processor to change its state. Otherwise it is only necessary + * to lock its processor set's idle_lock. Scheduler code will + * typically lock only the idle_lock, but processor manipulation code + * will often lock both. + */ + +#define PROCESSOR_OFF_LINE 0 /* Not in system */ +#define PROCESSOR_RUNNING 1 /* Running a normal thread */ +#define PROCESSOR_IDLE 2 /* idle */ +#define PROCESSOR_DISPATCHING 3 /* dispatching (idle -> running) */ +#define PROCESSOR_ASSIGN 4 /* Assignment is changing */ +#define PROCESSOR_SHUTDOWN 5 /* Being shutdown */ +#define PROCESSOR_START 6 /* Being start */ + +/* + * Use processor ptr array to find current processor's data structure. + * This replaces a multiplication (index into processor_array) with + * an array lookup and a memory reference. It also allows us to save + * space if processor numbering gets too sparse. + */ + +extern processor_t processor_ptr[NCPUS]; + +#define cpu_to_processor(i) (processor_ptr[i]) + +#define current_processor() (processor_ptr[cpu_number()]) +#define current_processor_set() (current_processor()->processor_set) + +/* Compatibility -- will go away */ + +#define cpu_state(slot_num) (processor_ptr[slot_num]->state) +#define cpu_idle(slot_num) (cpu_state(slot_num) == PROCESSOR_IDLE) + +/* Useful lock macros */ + +#define pset_lock(pset) mutex_lock(&(pset)->lock) +#define pset_lock_try(pset) mutex_try(&(pset)->lock) +#define pset_unlock(pset) mutex_unlock(&(pset)->lock) + +#define processor_lock(pr) simple_lock(&(pr)->lock) +#define processor_unlock(pr) simple_unlock(&(pr)->lock) + + +extern void pset_sys_bootstrap(void); + +/* Implemented by MD layer */ + +extern void cpu_up( + int cpu); + +extern kern_return_t processor_shutdown( + processor_t processor); + +extern void pset_remove_processor( + processor_set_t pset, + processor_t processor); + +extern void pset_add_processor( + processor_set_t pset, + processor_t processor); + +extern void pset_remove_task( + processor_set_t pset, + task_t task); + +extern void pset_add_task( + processor_set_t pset, + task_t task); + +extern void pset_remove_thread( + processor_set_t pset, + thread_t thread); + +extern void pset_add_thread( + processor_set_t pset, + thread_t thread); + +extern void thread_change_psets( + thread_t thread, + processor_set_t old_pset, + processor_set_t new_pset); + +extern void pset_deallocate( + processor_set_t pset); + +extern void pset_reference( + processor_set_t pset); + +extern kern_return_t processor_assign( + processor_t processor, + processor_set_t new_pset, + boolean_t wait); + +extern kern_return_t processor_info_count( + processor_flavor_t flavor, + mach_msg_type_number_t *count); +#endif /* MACH_KERNEL_PRIVATE */ + +extern kern_return_t processor_start( + processor_t processor); + +extern kern_return_t processor_exit( + processor_t processor); + +#endif /* _KERN_PROCESSOR_H_ */ diff --git a/osfmk/kern/profile.c b/osfmk/kern/profile.c new file mode 100644 index 000000000..ab210613c --- /dev/null +++ b/osfmk/kern/profile.c @@ -0,0 +1,490 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ +#include + +#include +#include + +#if MACH_PROF +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +thread_t profile_thread_id = THREAD_NULL; +int profile_sample_count = 0; /* Provided for looking at from kdb. */ +extern kern_return_t task_suspend(task_t task); /* ack */ + +/* Forwards */ +prof_data_t pbuf_alloc(void); +void pbuf_free( + prof_data_t pbuf); +void profile_thread(void); +void send_last_sample_buf( + prof_data_t pbuf); + +/* + ***************************************************************************** + * profile_thread is the profile/trace kernel support thread. It is started + * by a server/user request through task_sample, or thread_sample. The profile + * thread dequeues messages and sends them to the receive_prof thread, in the + * server, via the send_samples and send_notices mig interface functions. If + * there are no messages in the queue profile thread blocks until wakened by + * profile (called in from mach_clock), or last_sample (called by thread/task_ + * sample). +*/ + +void +profile_thread(void) +{ + spl_t s; + buffer_t buf_entry; + queue_entry_t prof_queue_entry; + prof_data_t pbuf; + kern_return_t kr; + int j; + + thread_swappable(current_act(), FALSE); + + /* Initialise the queue header for the prof_queue */ + mpqueue_init(&prof_queue); + + while (TRUE) { + + /* Dequeue the first buffer. */ + s = splsched(); + mpdequeue_head(&prof_queue, &prof_queue_entry); + splx(s); + + if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) { + assert_wait((event_t) profile_thread, THREAD_UNINT); + thread_block((void (*)(void)) 0); + if (current_thread()->wait_result != THREAD_AWAKENED) + break; + } else + { + int dropped; + + pbuf = buf_entry->p_prof; + kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone, + (mach_msg_type_number_t)buf_entry->p_index); + profile_sample_count += buf_entry->p_index; + if (kr != KERN_SUCCESS) + printf("send_samples(%x, %x, %d) error %x\n", + pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr); + dropped = buf_entry->p_dropped; + if (dropped > 0) { + printf("kernel: profile dropped %d sample%s\n", dropped, + dropped == 1 ? "" : "s"); + buf_entry->p_dropped = 0; + } + + /* Indicate you've finished the dirty job */ + buf_entry->p_full = FALSE; + if (buf_entry->p_wakeme) + thread_wakeup((event_t) &buf_entry->p_wakeme); + } + + } + /* The profile thread has been signalled to exit. Any threads waiting + for the last buffer of samples to be acknowledged should be woken + up now. */ + profile_thread_id = THREAD_NULL; + while (1) { + s = splsched(); + mpdequeue_head(&prof_queue, &prof_queue_entry); + splx(s); + if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) + break; + if (buf_entry->p_wakeme) + thread_wakeup((event_t) &buf_entry->p_wakeme); + } +#if 0 /* XXXXX */ + thread_halt_self(); +#else + panic("profile_thread(): halt_self"); +#endif /* XXXXX */ +} + +/* + ***************************************************************************** + * send_last_sample is the drain mechanism to allow partial profiled buffers + * to be sent to the receive_prof thread in the server. + ***************************************************************************** +*/ + +void +send_last_sample_buf(prof_data_t pbuf) +{ + spl_t s; + buffer_t buf_entry; + + if (pbuf == NULLPROFDATA) + return; + + /* Ask for the sending of the last PC buffer. + * Make a request to the profile_thread by inserting + * the buffer in the send queue, and wake it up. + * The last buffer must be inserted at the head of the + * send queue, so the profile_thread handles it immediatly. + */ + buf_entry = pbuf->prof_area + pbuf->prof_index; + buf_entry->p_prof = pbuf; + + /* + Watch out in case profile thread exits while we are about to + queue data for it. + */ + s = splsched(); + if (profile_thread_id == THREAD_NULL) + splx(s); + else { + buf_entry->p_wakeme = 1; + mpenqueue_tail(&prof_queue, &buf_entry->p_list); + thread_wakeup((event_t) profile_thread); + assert_wait((event_t) &buf_entry->p_wakeme, THREAD_ABORTSAFE); + splx(s); + thread_block((void (*)(void)) 0); + } +} + + +/* + ***************************************************************************** + * add clock tick parameters to profile/trace buffers. Called from the mach_ + * clock heritz_tick function. DCI version stores thread, sp, and pc values + * into the profile/trace buffers. MACH_PROF version just stores pc values. + ***************************************************************************** + */ + +void +profile(natural_t pc, + prof_data_t pbuf) +{ + natural_t inout_val = pc; + buffer_t buf_entry; + + if (pbuf == NULLPROFDATA) + return; + + /* Inserts the PC value in the buffer of the thread */ + set_pbuf_value(pbuf, &inout_val); + switch((int)inout_val) { + case 0: + if (profile_thread_id == THREAD_NULL) { + reset_pbuf_area(pbuf); + } + break; + case 1: + /* Normal case, value successfully inserted */ + break; + case 2 : + /* + * The value we have just inserted caused the + * buffer to be full, and ready to be sent. + * If profile_thread_id is null, the profile + * thread has been killed. Since this generally + * happens only when the O/S server task of which + * it is a part is killed, it is not a great loss + * to throw away the data. + */ + if (profile_thread_id == THREAD_NULL) { + reset_pbuf_area(pbuf); + break; + } + + buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index]; + buf_entry->p_prof = pbuf; + mpenqueue_tail(&prof_queue, &buf_entry->p_list); + + /* Switch to another buffer */ + reset_pbuf_area(pbuf); + + /* Wake up the profile thread */ + if (profile_thread_id != THREAD_NULL) + thread_wakeup((event_t) profile_thread); + break; + + default: + printf("profile : unexpected case\n"); + } +} + +/* + ***************************************************************************** + * pbuf_alloc creates a profile/trace buffer and assoc. zones for storing + * profiled items. + ***************************************************************************** + */ + +prof_data_t +pbuf_alloc(void) +{ + register prof_data_t pbuf; + register int i; + register natural_t *zone; + + pbuf = (prof_data_t)kalloc(sizeof(struct prof_data)); + if (!pbuf) + return(NULLPROFDATA); + pbuf->prof_port = MACH_PORT_NULL; + for (i=0; i< NB_PROF_BUFFER; i++) { + zone = (natural_t *)kalloc(SIZE_PROF_BUFFER*sizeof(natural_t)); + if (!zone) { + i--; + while (i--) + kfree((vm_offset_t)pbuf->prof_area[i].p_zone, + SIZE_PROF_BUFFER*sizeof(natural_t)); + kfree((vm_offset_t)pbuf, sizeof(struct prof_data)); + return(NULLPROFDATA); + } + pbuf->prof_area[i].p_zone = zone; + pbuf->prof_area[i].p_full = FALSE; + } + pbuf->prof_port = MACH_PORT_NULL; + return(pbuf); +} + +/* + ***************************************************************************** + * pbuf_free free memory allocated for storing profile/trace items. Called + * when a task is no longer profiled/traced. Pbuf_free tears down the memory + * alloced in pbuf_alloc. It does not check to see if the structures are valid + * since it is only called by functions in this file. + ***************************************************************************** + */ +void +pbuf_free( + prof_data_t pbuf) +{ + register int i; + + if (pbuf->prof_port) + ipc_port_release_send(pbuf->prof_port); + + for(i=0; i < NB_PROF_BUFFER ; i++) + kfree((vm_offset_t)pbuf->prof_area[i].p_zone, + SIZE_PROF_BUFFER*sizeof(natural_t)); + kfree((vm_offset_t)pbuf, sizeof(struct prof_data)); +} + +#endif /* MACH_PROF */ + +/* + ***************************************************************************** + * Thread_sample is used by MACH_PROF to profile a single thread, and is only + * stub in DCI. + ***************************************************************************** + */ + +kern_return_t +thread_sample( + thread_act_t thr_act, + ipc_port_t reply) +{ + /* + * This routine is called every time that a new thread has made + * a request for the sampling service. We must keep track of the + * correspondance between its identity (thread) and the port + * we are going to use as a reply port to send out the samples resulting + * from its execution. + */ +#if !MACH_PROF + return KERN_FAILURE; +#else + prof_data_t pbuf; + vm_offset_t vmpbuf; + + if (reply != MACH_PORT_NULL) { + if (thr_act->act_profiled) /* yuck! */ + return KERN_INVALID_ARGUMENT; + /* Start profiling this activation, do the initialization. */ + pbuf = pbuf_alloc(); + if ((thr_act->profil_buffer = pbuf) == NULLPROFDATA) { + printf("thread_sample: cannot allocate pbuf\n"); + return KERN_RESOURCE_SHORTAGE; + } + else { + if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) { + printf("mach_sample_thread: cannot set pbuf_nb\n"); + return KERN_FAILURE; + } + reset_pbuf_area(pbuf); + } + pbuf->prof_port = reply; + thr_act->act_profiled = TRUE; + thr_act->act_profiled_own = TRUE; + if (profile_thread_id == THREAD_NULL) + profile_thread_id = kernel_thread(kernel_task, profile_thread); + } else { + if (!thr_act->act_profiled) + return(KERN_INVALID_ARGUMENT); + + thr_act->act_profiled = FALSE; + /* do not stop sampling if thread is not profiled by its own */ + + if (!thr_act->act_profiled_own) + return KERN_SUCCESS; + else + thr_act->act_profiled_own = FALSE; + + send_last_sample_buf(thr_act->profil_buffer); + pbuf_free(thr_act->profil_buffer); + thr_act->profil_buffer = NULLPROFDATA; + } + return KERN_SUCCESS; +#endif /* MACH_PROF */ +} + +/* + ***************************************************************************** + * Task_sample is used to profile/trace tasks - all thread within a task using + * a common profile buffer to collect items generated by the hertz_tick. For + * each task profiled a profile buffer is created that associates a reply port + * (used to send the data to a server thread), task (used for throttling), and + * a zone area (used to store profiled/traced items). + ***************************************************************************** + */ + +kern_return_t +task_sample( + task_t task, + ipc_port_t reply) +{ +#if !MACH_PROF + return KERN_FAILURE; +#else + prof_data_t pbuf=task->profil_buffer; + vm_offset_t vmpbuf; + boolean_t turnon = (reply != MACH_PORT_NULL); + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + if (turnon) /* Do we want to profile this task? */ + { + pbuf = pbuf_alloc(); /* allocate a profile buffer */ + task_lock(task); + if (task->task_profiled) { /* if it is already profiled return so */ + task_unlock(task); + if (pbuf != NULLPROFDATA) + pbuf_free(pbuf); + return(KERN_INVALID_ARGUMENT); + } + if (pbuf == NULLPROFDATA) { + task_unlock(task); + return KERN_RESOURCE_SHORTAGE; /* can't allocate a buffer, quit */ + } + task->profil_buffer = pbuf; + + if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) { + pbuf_free(pbuf); + task_unlock(task); + return KERN_FAILURE; + } + reset_pbuf_area(pbuf); + pbuf->prof_port = reply; /* assoc. buffer with reply port */ + } else { /* We want to stop profiling/tracing */ + task_lock(task); + if (!task->task_profiled) { /* but this task is not being profiled */ + task_unlock(task); + return(KERN_INVALID_ARGUMENT); + } + } + + /* + * turnon = FALSE && task_profile = TRUE || + * turnon = TRUE && task_profile = FALSE + */ + + if (turnon != task->task_profiled) { + int actual, i; + thread_act_t thr_act; + + if (turnon && profile_thread_id == THREAD_NULL) /* 1st time thru? */ + profile_thread_id = /* then start profile thread. */ + kernel_thread(kernel_task, profile_thread); + task->task_profiled = turnon; + actual = task->thr_act_count; + for (i = 0, thr_act = (thread_act_t)queue_first(&task->thr_acts); + i < actual; + i++, thr_act = (thread_act_t)queue_next(&thr_act->thr_acts)) { + if (!thr_act->act_profiled_own) { + thr_act->act_profiled = turnon; + if (turnon) { + thr_act->profil_buffer = task->profil_buffer; + thr_act->act_profiled = TRUE; + } else { + thr_act->act_profiled = FALSE; + thr_act->profil_buffer = NULLPROFDATA; + } + } + } + if (!turnon) { /* drain buffers and clean-up */ + send_last_sample_buf(task->profil_buffer); + pbuf_free(task->profil_buffer); + task->profil_buffer = NULLPROFDATA; + } + } + + task_unlock(task); + return KERN_SUCCESS; +#endif /* MACH_PROF */ +} + diff --git a/osfmk/kern/profile.h b/osfmk/kern/profile.h new file mode 100644 index 000000000..5124b2316 --- /dev/null +++ b/osfmk/kern/profile.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +#ifndef _KERN_PROFILE_H +#define _KERN_PROFILE_H + +#include +#include + +#define NB_PROF_BUFFER 4 /* number of buffers servicing a */ +#define SIZE_PROF_BUFFER 200 /* size of a profil buffer (in natural_t) */ + /* -> at most 1 packet every 2 secs */ + /* profiled thread */ + +struct prof_data { + struct ipc_port *prof_port; /* where to send a full buffer */ + + struct buffer { + queue_chain_t p_list; + natural_t *p_zone; /* points to the actual storage area */ + int p_index; /* next slot to be filled */ + int p_dropped; /* # dropped samples when full */ + boolean_t p_full; /* is the current buffer full ? */ + struct prof_data *p_prof; /* base to get prof_port */ + char p_wakeme; /* do wakeup when sent */ + } prof_area[NB_PROF_BUFFER]; + + int prof_index; /* index of the buffer structure */ + /* currently in use */ + +}; + +typedef struct prof_data *prof_data_t; +#define NULLPROFDATA ((prof_data_t) 0) +typedef struct buffer *buffer_t; +#define NULLPBUF ((buffer_t) 0) + +/* Macros */ + +#define set_pbuf_nb(pbuf, nb) \ + (((nb) >= 0 && (nb) < NB_PROF_BUFFER) \ + ? (pbuf)->prof_index = (nb), 1 \ + : 0) + + +#define get_pbuf_nb(pbuf) \ + (pbuf)->prof_index + + +extern vm_map_t kernel_map; + +/* MACRO set_pbuf_value +** +** enters the value 'val' in the buffer 'pbuf' and returns the following +** indications: 0: means that a fatal error occured: the buffer was full +** (it hasn't been sent yet) +** 1: means that a value has been inserted successfully +** 2: means that we'v just entered the last value causing +** the current buffer to be full.(must switch to +** another buffer and signal the sender to send it) +*/ + +#if MACH_PROF + +#define set_pbuf_value(pbuf, val) \ + { \ + register buffer_t a = &((pbuf)->prof_area[(pbuf)->prof_index]); \ + register int i ;\ + register boolean_t f = a->p_full; \ + \ + if (f == TRUE ) {\ + a->p_dropped++; \ + *(val) = 0L; \ + } else { \ + i = a->p_index++; \ + a->p_zone[i] = *(val); \ + if (i == SIZE_PROF_BUFFER-1) { \ + a->p_full = TRUE; \ + *(val) = 2; \ + } \ + else \ + *(val) = 1; \ + } \ + } + +#define reset_pbuf_area(pbuf) \ + { \ + int i; \ + (pbuf)->prof_index = ((pbuf)->prof_index + 1) % NB_PROF_BUFFER; \ + i = (pbuf)->prof_index; \ + (pbuf)->prof_area[i].p_index = 0; \ + (pbuf)->prof_area[i].p_dropped = 0; \ + } + +#endif /* MACH_PROF */ + +/* +** Global variable: the head of the queue of buffers to send +** It is a queue with locks (uses macros from queue.h) and it +** is shared by hardclock() and the sender_thread() +*/ + +mpqueue_head_t prof_queue; + +extern void profile( + natural_t pc, /* program counter */ + prof_data_t pbuf); /* trace/prof data area */ + +#if MACH_PROF + +#define task_prof_init(task) \ + task->task_profiled = FALSE; \ + task->profil_buffer = NULLPROFDATA; + +#define act_prof_init(thr_act, task) \ + thr_act->act_profiled = task->task_profiled; \ + thr_act->profil_buffer = task->profil_buffer; + +#define task_prof_deallocate(task) \ + if (task->profil_buffer) \ + task_sample(task, MACH_PORT_NULL); \ + +#define act_prof_deallocate(thr_act) \ + if (thr_act->act_profiled_own && thr_act->profil_buffer) \ + thread_sample(thr_act, MACH_PORT_NULL); \ + +extern kern_return_t thread_sample(thread_act_t, ipc_port_t); +extern kern_return_t task_sample(task_t, ipc_port_t); + +#else /* !MACH_PROT */ + +#define task_prof_init(task) +#define act_prof_init(thr_act, task) +#define task_prof_deallocate(task) +#define act_prof_deallocate(thr_act) + + +#endif /* !MACH_PROF */ + +#endif /* _KERN_PROFILE_H */ diff --git a/osfmk/kern/queue.c b/osfmk/kern/queue.c new file mode 100644 index 000000000..2ada9236d --- /dev/null +++ b/osfmk/kern/queue.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:33 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.10.3 1995/03/15 17:21:19 bruel + * compile only if !__GNUC__. + * [95/03/09 bruel] + * + * Revision 1.1.10.2 1995/01/06 19:48:05 devrcs + * mk6 CR668 - 1.3b26 merge + * * Revision 1.1.3.5 1994/05/06 18:51:43 tmt + * Merge in DEC Alpha changes to osc1.3b19. + * Merge Alpha changes into osc1.312b source code. + * Remove ifdef sun around insque and remque. + * * End1.3merge + * [1994/11/04 09:29:15 dwm] + * + * Revision 1.1.10.1 1994/09/23 02:25:00 ezf + * change marker to not FREE + * [1994/09/22 21:35:34 ezf] + * + * Revision 1.1.3.3 1993/07/28 17:16:26 bernard + * CR9523 -- Prototypes. + * [1993/07/21 17:00:38 bernard] + * + * Revision 1.1.3.2 1993/06/02 23:39:41 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:13:58 jeffc] + * + * Revision 1.1 1992/09/30 02:09:52 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:45:45 mrt + * Correcting copyright + * + * Revision 2.3 91/05/08 12:48:22 dbg + * Compile queue routines on vax. + * [91/03/26 dbg] + * + * Revision 2.2 91/02/05 17:28:38 mrt + * Changed to new Mach copyright + * [91/02/01 16:16:22 mrt] + * + * Revision 2.1 89/08/03 15:51:47 rwd + * Created. + * + * 17-Mar-87 David Golub (dbg) at Carnegie-Mellon University + * Created from routines written by David L. Black. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Routines to implement queue package. + */ + +#include + +#if !defined(__GNUC__) + +/* + * Insert element at head of queue. + */ +void +enqueue_head( + register queue_t que, + register queue_entry_t elt) +{ + elt->next = que->next; + elt->prev = que; + elt->next->prev = elt; + que->next = elt; +} + +/* + * Insert element at tail of queue. + */ +void +enqueue_tail( + register queue_t que, + register queue_entry_t elt) +{ + elt->next = que; + elt->prev = que->prev; + elt->prev->next = elt; + que->prev = elt; +} + +/* + * Remove and return element at head of queue. + */ +queue_entry_t +dequeue_head( + register queue_t que) +{ + register queue_entry_t elt; + + if (que->next == que) + return((queue_entry_t)0); + + elt = que->next; + elt->next->prev = que; + que->next = elt->next; + return(elt); +} + +/* + * Remove and return element at tail of queue. + */ +queue_entry_t +dequeue_tail( + register queue_t que) +{ + register queue_entry_t elt; + + if (que->prev == que) + return((queue_entry_t)0); + + elt = que->prev; + elt->prev->next = que; + que->prev = elt->prev; + return(elt); +} + +/* + * Remove arbitrary element from queue. + * Does not check whether element is on queue - the world + * will go haywire if it isn't. + */ + +/*ARGSUSED*/ +void +remqueue( + queue_t que, + register queue_entry_t elt) +{ + elt->next->prev = elt->prev; + elt->prev->next = elt->next; +} + +/* + * Routines to directly imitate the VAX hardware queue + * package. + */ +void +insque( + register queue_entry_t entry, + register queue_entry_t pred) +{ + entry->next = pred->next; + entry->prev = pred; + (pred->next)->prev = entry; + pred->next = entry; +} + +int +remque( + register queue_entry_t elt) +{ + (elt->next)->prev = elt->prev; + (elt->prev)->next = elt->next; + return((int)elt); +} + +#endif diff --git a/osfmk/kern/queue.h b/osfmk/kern/queue.h new file mode 100644 index 000000000..541eef456 --- /dev/null +++ b/osfmk/kern/queue.h @@ -0,0 +1,618 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ +/* + * File: queue.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * Type definitions for generic queues. + * + */ + +#ifndef _KERN_QUEUE_H_ +#define _KERN_QUEUE_H_ + +#include +#include + +/* + * Queue of abstract objects. Queue is maintained + * within that object. + * + * Supports fast removal from within the queue. + * + * How to declare a queue of elements of type "foo_t": + * In the "*foo_t" type, you must have a field of + * type "queue_chain_t" to hold together this queue. + * There may be more than one chain through a + * "foo_t", for use by different queues. + * + * Declare the queue as a "queue_t" type. + * + * Elements of the queue (of type "foo_t", that is) + * are referred to by reference, and cast to type + * "queue_entry_t" within this module. + */ + +/* + * A generic doubly-linked list (queue). + */ + +struct queue_entry { + struct queue_entry *next; /* next element */ + struct queue_entry *prev; /* previous element */ +}; + +typedef struct queue_entry *queue_t; +typedef struct queue_entry queue_head_t; +typedef struct queue_entry queue_chain_t; +typedef struct queue_entry *queue_entry_t; + +/* + * enqueue puts "elt" on the "queue". + * dequeue returns the first element in the "queue". + * remqueue removes the specified "elt" from the specified "queue". + */ + +#define enqueue(queue,elt) enqueue_tail(queue, elt) +#define dequeue(queue) dequeue_head(queue) + +#if !defined(__GNUC__) + +/* Enqueue element to head of queue */ +extern void enqueue_head( + queue_t que, + queue_entry_t elt); + +/* Enqueue element to tail of queue */ +extern void enqueue_tail( + queue_t que, + queue_entry_t elt); + +/* Dequeue element from head of queue */ +extern queue_entry_t dequeue_head( + queue_t que); + +/* Dequeue element from tail of queue */ +extern queue_entry_t dequeue_tail( + queue_t que); + +/* Dequeue element */ +extern void remqueue( + queue_t que, + queue_entry_t elt); + +/* Enqueue element after a particular elem */ +extern void insque( + queue_entry_t entry, + queue_entry_t pred); + +/* Dequeue element */ +extern int remque( + queue_entry_t elt); + +#else + +static __inline__ void +enqueue_head( + queue_t que, + queue_entry_t elt) +{ + elt->next = que->next; + elt->prev = que; + elt->next->prev = elt; + que->next = elt; +} + +static __inline__ void +enqueue_tail( + queue_t que, + queue_entry_t elt) +{ + elt->next = que; + elt->prev = que->prev; + elt->prev->next = elt; + que->prev = elt; +} + +static __inline__ queue_entry_t +dequeue_head( + queue_t que) +{ + register queue_entry_t elt = (queue_entry_t) 0; + + if (que->next != que) { + elt = que->next; + elt->next->prev = que; + que->next = elt->next; + } + + return (elt); +} + +static __inline__ queue_entry_t +dequeue_tail( + queue_t que) +{ + register queue_entry_t elt = (queue_entry_t) 0; + + if (que->prev != que) { + elt = que->prev; + elt->prev->next = que; + que->prev = elt->prev; + } + + return (elt); +} + +static __inline__ void +remqueue( + queue_t que, + queue_entry_t elt) +{ + elt->next->prev = elt->prev; + elt->prev->next = elt->next; +} + +static __inline__ void +insque( + queue_entry_t entry, + queue_entry_t pred) +{ + entry->next = pred->next; + entry->prev = pred; + (pred->next)->prev = entry; + pred->next = entry; +} + +static __inline__ integer_t +remque( + register queue_entry_t elt) +{ + (elt->next)->prev = elt->prev; + (elt->prev)->next = elt->next; + + return((integer_t)elt); +} + +#endif /* defined(__GNUC__) */ + +/* + * Macro: queue_init + * Function: + * Initialize the given queue. + * Header: + * void queue_init(q) + * queue_t q; \* MODIFIED *\ + */ +#define queue_init(q) \ +MACRO_BEGIN \ + (q)->next = (q);\ + (q)->prev = (q);\ +MACRO_END + +/* + * Macro: queue_first + * Function: + * Returns the first entry in the queue, + * Header: + * queue_entry_t queue_first(q) + * queue_t q; \* IN *\ + */ +#define queue_first(q) ((q)->next) + +/* + * Macro: queue_next + * Function: + * Returns the entry after an item in the queue. + * Header: + * queue_entry_t queue_next(qc) + * queue_t qc; + */ +#define queue_next(qc) ((qc)->next) + +/* + * Macro: queue_last + * Function: + * Returns the last entry in the queue. + * Header: + * queue_entry_t queue_last(q) + * queue_t q; \* IN *\ + */ +#define queue_last(q) ((q)->prev) + +/* + * Macro: queue_prev + * Function: + * Returns the entry before an item in the queue. + * Header: + * queue_entry_t queue_prev(qc) + * queue_t qc; + */ +#define queue_prev(qc) ((qc)->prev) + +/* + * Macro: queue_end + * Function: + * Tests whether a new entry is really the end of + * the queue. + * Header: + * boolean_t queue_end(q, qe) + * queue_t q; + * queue_entry_t qe; + */ +#define queue_end(q, qe) ((q) == (qe)) + +/* + * Macro: queue_empty + * Function: + * Tests whether a queue is empty. + * Header: + * boolean_t queue_empty(q) + * queue_t q; + */ +#define queue_empty(q) queue_end((q), queue_first(q)) + + +/*----------------------------------------------------------------*/ +/* + * Macros that operate on generic structures. The queue + * chain may be at any location within the structure, and there + * may be more than one chain. + */ + +/* + * Macro: queue_enter + * Function: + * Insert a new element at the tail of the queue. + * Header: + * void queue_enter(q, elt, type, field) + * queue_t q; + * elt; + * is what's in our queue + * is the chain field in (*) + */ +#define queue_enter(head, elt, type, field) \ +MACRO_BEGIN \ + register queue_entry_t prev; \ + \ + prev = (head)->prev; \ + if ((head) == prev) { \ + (head)->next = (queue_entry_t) (elt); \ + } \ + else { \ + ((type)prev)->field.next = (queue_entry_t)(elt);\ + } \ + (elt)->field.prev = prev; \ + (elt)->field.next = head; \ + (head)->prev = (queue_entry_t) elt; \ +MACRO_END + +/* + * Macro: queue_enter_first + * Function: + * Insert a new element at the head of the queue. + * Header: + * void queue_enter_first(q, elt, type, field) + * queue_t q; + * elt; + * is what's in our queue + * is the chain field in (*) + */ +#define queue_enter_first(head, elt, type, field) \ +MACRO_BEGIN \ + register queue_entry_t next; \ + \ + next = (head)->next; \ + if ((head) == next) { \ + (head)->prev = (queue_entry_t) (elt); \ + } \ + else { \ + ((type)next)->field.prev = (queue_entry_t)(elt);\ + } \ + (elt)->field.next = next; \ + (elt)->field.prev = head; \ + (head)->next = (queue_entry_t) elt; \ +MACRO_END + +/* + * Macro: queue_insert_before + * Function: + * Insert a new element before a given element. + * Header: + * void queue_insert_before(q, elt, cur, type, field) + * queue_t q; + * elt; + * cur; + * is what's in our queue + * is the chain field in (*) + */ +#define queue_insert_before(head, elt, cur, type, field) \ +MACRO_BEGIN \ + register queue_entry_t prev; \ + \ + if ((head) == (queue_entry_t)(cur)) { \ + (elt)->field.next = (head); \ + if ((head)->next == (head)) { /* only element */ \ + (elt)->field.prev = (head); \ + (head)->next = (queue_entry_t)(elt); \ + } else { /* last element */ \ + prev = (elt)->field.prev = (head)->prev; \ + ((type)prev)->field.next = (queue_entry_t)(elt);\ + } \ + (head)->prev = (queue_entry_t)(elt); \ + } else { \ + (elt)->field.next = (queue_entry_t)(cur); \ + if ((head)->next == (queue_entry_t)(cur)) { \ + /* first element */ \ + (elt)->field.prev = (head); \ + (head)->next = (queue_entry_t)(elt); \ + } else { /* middle element */ \ + prev = (elt)->field.prev = (cur)->field.prev; \ + ((type)prev)->field.next = (queue_entry_t)(elt);\ + } \ + (cur)->field.prev = (queue_entry_t)(elt); \ + } \ +MACRO_END + +/* + * Macro: queue_insert_after + * Function: + * Insert a new element after a given element. + * Header: + * void queue_insert_after(q, elt, cur, type, field) + * queue_t q; + * elt; + * cur; + * is what's in our queue + * is the chain field in (*) + */ +#define queue_insert_after(head, elt, cur, type, field) \ +MACRO_BEGIN \ + register queue_entry_t next; \ + \ + if ((head) == (queue_entry_t)(cur)) { \ + (elt)->field.prev = (head); \ + if ((head)->next == (head)) { /* only element */ \ + (elt)->field.next = (head); \ + (head)->prev = (queue_entry_t)(elt); \ + } else { /* first element */ \ + next = (elt)->field.next = (head)->next; \ + ((type)next)->field.prev = (queue_entry_t)(elt);\ + } \ + (head)->next = (queue_entry_t)(elt); \ + } else { \ + (elt)->field.prev = (queue_entry_t)(cur); \ + if ((head)->prev == (queue_entry_t)(cur)) { \ + /* last element */ \ + (elt)->field.next = (head); \ + (head)->prev = (queue_entry_t)(elt); \ + } else { /* middle element */ \ + next = (elt)->field.next = (cur)->field.next; \ + ((type)next)->field.prev = (queue_entry_t)(elt);\ + } \ + (cur)->field.next = (queue_entry_t)(elt); \ + } \ +MACRO_END + +/* + * Macro: queue_field [internal use only] + * Function: + * Find the queue_chain_t (or queue_t) for the + * given element (thing) in the given queue (head) + */ +#define queue_field(head, thing, type, field) \ + (((head) == (thing)) ? (head) : &((type)(thing))->field) + +/* + * Macro: queue_remove + * Function: + * Remove an arbitrary item from the queue. + * Header: + * void queue_remove(q, qe, type, field) + * arguments as in queue_enter + */ +#define queue_remove(head, elt, type, field) \ +MACRO_BEGIN \ + register queue_entry_t next, prev; \ + \ + next = (elt)->field.next; \ + prev = (elt)->field.prev; \ + \ + if ((head) == next) \ + (head)->prev = prev; \ + else \ + ((type)next)->field.prev = prev; \ + \ + if ((head) == prev) \ + (head)->next = next; \ + else \ + ((type)prev)->field.next = next; \ +MACRO_END + +/* + * Macro: queue_remove_first + * Function: + * Remove and return the entry at the head of + * the queue. + * Header: + * queue_remove_first(head, entry, type, field) + * entry is returned by reference + */ +#define queue_remove_first(head, entry, type, field) \ +MACRO_BEGIN \ + register queue_entry_t next; \ + \ + (entry) = (type) ((head)->next); \ + next = (entry)->field.next; \ + \ + if ((head) == next) \ + (head)->prev = (head); \ + else \ + ((type)(next))->field.prev = (head); \ + (head)->next = next; \ +MACRO_END + +/* + * Macro: queue_remove_last + * Function: + * Remove and return the entry at the tail of + * the queue. + * Header: + * queue_remove_last(head, entry, type, field) + * entry is returned by reference + */ +#define queue_remove_last(head, entry, type, field) \ +MACRO_BEGIN \ + register queue_entry_t prev; \ + \ + (entry) = (type) ((head)->prev); \ + prev = (entry)->field.prev; \ + \ + if ((head) == prev) \ + (head)->next = (head); \ + else \ + ((type)(prev))->field.next = (head); \ + (head)->prev = prev; \ +MACRO_END + +/* + * Macro: queue_assign + */ +#define queue_assign(to, from, type, field) \ +MACRO_BEGIN \ + ((type)((from)->prev))->field.next = (to); \ + ((type)((from)->next))->field.prev = (to); \ + *to = *from; \ +MACRO_END + +/* + * Macro: queue_new_head + * Function: + * rebase old queue to new queue head + * Header: + * queue_new_head(old, new, type, field) + * queue_t old; + * queue_t new; + * is what's in our queue + * is the chain field in (*) + */ +#define queue_new_head(old, new, type, field) \ +MACRO_BEGIN \ + if (!queue_empty(new)) { \ + *(new) = *(old); \ + ((type)((new)->next))->field.prev = (new); \ + ((type)((new)->prev))->field.next = (new); \ + } else { \ + queue_init(new); \ + } \ +MACRO_END + +/* + * Macro: queue_iterate + * Function: + * iterate over each item in the queue. + * Generates a 'for' loop, setting elt to + * each item in turn (by reference). + * Header: + * queue_iterate(q, elt, type, field) + * queue_t q; + * elt; + * is what's in our queue + * is the chain field in (*) + */ +#define queue_iterate(head, elt, type, field) \ + for ((elt) = (type) queue_first(head); \ + !queue_end((head), (queue_entry_t)(elt)); \ + (elt) = (type) queue_next(&(elt)->field)) + + +#ifdef MACH_KERNEL_PRIVATE +/*----------------------------------------------------------------*/ +/* + * Define macros for queues with locks. + */ +struct mpqueue_head { + struct queue_entry head; /* header for queue */ + decl_simple_lock_data(, lock) /* lock for queue */ +}; + +typedef struct mpqueue_head mpqueue_head_t; + +#define round_mpq(size) (size) + +#define mpqueue_init(q) \ +MACRO_BEGIN \ + queue_init(&(q)->head); \ + simple_lock_init(&(q)->lock, ETAP_MISC_Q); \ +MACRO_END + +#define mpenqueue_tail(q, elt) \ +MACRO_BEGIN \ + simple_lock(&(q)->lock); \ + enqueue_tail(&(q)->head, elt); \ + simple_unlock(&(q)->lock); \ +MACRO_END + +#define mpdequeue_head(q, elt) \ +MACRO_BEGIN \ + simple_lock(&(q)->lock); \ + if (queue_empty(&(q)->head)) \ + *(elt) = 0; \ + else \ + *(elt) = dequeue_head(&(q)->head); \ + simple_unlock(&(q)->lock); \ +MACRO_END + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_QUEUE_H_ */ diff --git a/osfmk/kern/sched.h b/osfmk/kern/sched.h new file mode 100644 index 000000000..9c98bd104 --- /dev/null +++ b/osfmk/kern/sched.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: sched.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * Header file for scheduler. + * + */ + +#ifndef _KERN_SCHED_H_ +#define _KERN_SCHED_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#if STAT_TIME + +/* + * Statistical timing uses microseconds as timer units. 16 bit shift + * yields priorities. PRI_SHIFT_2 isn't needed. + */ +#define PRI_SHIFT (16 - SCHED_TICK_SHIFT) + +#else /* STAT_TIME */ + +/* + * Otherwise machine provides shift(s) based on time units it uses. + */ +#include + +#endif /* STAT_TIME */ + +#define NRQS 128 /* 128 run queues per cpu */ +#define NRQBM (NRQS / 32) /* number of run queue bit maps */ + +#define MAXPRI (NRQS-1) +#define MINPRI IDLEPRI /* lowest legal priority schedulable */ +#define IDLEPRI 0 /* idle thread priority */ +#define DEPRESSPRI MINPRI /* depress priority */ + +/* + * High-level priority assignments + * + ************************************************************************* + * 127 Reserved (real-time) + * A + * + + * (32 levels) + * + + * V + * 96 Reserved (real-time) + * 95 Kernel mode only + * A + * + + * (16 levels) + * + + * V + * 80 Kernel mode only + * 79 High priority + * A + * + + * (16 levels) + * + + * V + * 64 High priority + * 63 Elevated priorities + * A + * + + * (12 levels) + * + + * V + * 52 Elevated priorities + * 51 Elevated priorities (incl. BSD +nice) + * A + * + + * (20 levels) + * + + * V + * 32 Elevated priorities (incl. BSD +nice) + * 31 Default (default base for threads) + * 30 Lowered priorities (incl. BSD -nice) + * A + * + + * (20 levels) + * + + * V + * 11 Lowered priorities (incl. BSD -nice) + * 10 Lowered priorities (aged pri's) + * A + * + + * (11 levels) + * + + * V + * 0 Lowered priorities (aged pri's / idle) + ************************************************************************* + */ + +#define BASEPRI_REALTIME (MAXPRI - (NRQS / 4) + 1) /* 96 */ + +#define MAXPRI_STANDARD (BASEPRI_REALTIME - 1) /* 95 */ + +#define MAXPRI_KERNBAND MAXPRI_STANDARD /* 95 */ +#define MINPRI_KERNBAND (MAXPRI_KERNBAND - (NRQS / 8) + 1) /* 80 */ + +#define MAXPRI_HIGHBAND (MINPRI_KERNBAND - 1) /* 79 */ +#define MINPRI_HIGHBAND (MAXPRI_HIGHBAND - (NRQS / 8) + 1) /* 64 */ + +#define MAXPRI_MAINBAND (MINPRI_HIGHBAND - 1) /* 63 */ +#define BASEPRI_DEFAULT (MAXPRI_MAINBAND - (NRQS / 4)) /* 31 */ +#define MINPRI_MAINBAND MINPRI /* 0 */ + +#define MINPRI_STANDARD MINPRI_MAINBAND /* 0 */ + +/* + * Macro to check for invalid priorities. + */ +#define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI) + +struct run_queue { + queue_head_t queues[NRQS]; /* one for each priority */ + decl_simple_lock_data(,lock) /* one lock for all queues */ + int bitmap[NRQBM]; /* run queue bitmap array */ + int highq; /* highest runnable queue */ + int count; /* # of runnable threads */ +}; + +typedef struct run_queue *run_queue_t; +#define RUN_QUEUE_NULL ((run_queue_t) 0) + +#define csw_needed(thread, processor) ( \ + ((thread)->state & TH_SUSP) || \ + ((processor)->first_quantum? \ + ((processor)->runq.highq > (thread)->sched_pri || \ + (processor)->processor_set->runq.highq > (thread)->sched_pri) : \ + ((processor)->runq.highq >= (thread)->sched_pri || \ + (processor)->processor_set->runq.highq >= (thread)->sched_pri)) ) + +/* + * Scheduler routines. + */ + +/* Remove thread from its run queue */ +extern run_queue_t rem_runq( + thread_t thread); + +/* Mach factor computation (in mach_factor.c) */ +extern void compute_mach_factor(void); + +/* Update threads quantum (in priority.c) */ +extern void thread_quantum_update( + int mycpu, + thread_t thread, + int nticks, + int state); + +extern int min_quantum; /* defines max context switch rate */ + +/* + * Shift structures for holding update shifts. Actual computation + * is usage = (usage >> shift1) +/- (usage >> abs(shift2)) where the + * +/- is determined by the sign of shift 2. + */ +struct shift { + int shift1; + int shift2; +}; + +typedef struct shift *shift_t, shift_data_t; + +/* + * Age usage (1 << SCHED_TICK_SHIFT) times per second. + */ + +extern unsigned sched_tick; + +#define SCHED_TICK_SHIFT 3 + +#define SCHED_SCALE 128 +#define SCHED_SHIFT 7 + +/* + * thread_timer_delta macro takes care of both thread timers. + */ + +#define thread_timer_delta(thread) \ +MACRO_BEGIN \ + register unsigned delta; \ + \ + delta = 0; \ + TIMER_DELTA((thread)->system_timer, \ + (thread)->system_timer_save, delta); \ + TIMER_DELTA((thread)->user_timer, \ + (thread)->user_timer_save, delta); \ + (thread)->cpu_delta += delta; \ + (thread)->sched_delta += (delta * \ + (thread)->processor_set->sched_load); \ +MACRO_END + +#if SIMPLE_CLOCK +/* + * sched_usec is an exponential average of number of microseconds + * in a second for clock drift compensation. + */ + +extern int sched_usec; +#endif /* SIMPLE_CLOCK */ + +#endif /* _KERN_SCHED_H_ */ diff --git a/osfmk/kern/sched_prim.c b/osfmk/kern/sched_prim.c new file mode 100644 index 000000000..ae49e69ca --- /dev/null +++ b/osfmk/kern/sched_prim.c @@ -0,0 +1,2678 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: sched_prim.c + * Author: Avadis Tevanian, Jr. + * Date: 1986 + * + * Scheduling primitives + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /*** ??? fix so this can be removed ***/ +#include + +#if TASK_SWAPPER +#include +extern int task_swap_on; +#endif /* TASK_SWAPPER */ + +extern int hz; + +#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ +int default_preemption_rate = DEFAULT_PREEMPTION_RATE; + +#define NO_KERNEL_PREEMPT 0 +#define KERNEL_PREEMPT 1 +int kernel_preemption_mode = KERNEL_PREEMPT; + +int min_quantum; +natural_t min_quantum_ms; + +unsigned sched_tick; + +#if SIMPLE_CLOCK +int sched_usec; +#endif /* SIMPLE_CLOCK */ + +/* Forwards */ +void thread_continue(thread_t); + +void wait_queues_init(void); + +void set_pri( + thread_t thread, + int pri, + int resched); + +thread_t choose_pset_thread( + processor_t myprocessor, + processor_set_t pset); + +thread_t choose_thread( + processor_t myprocessor); + +int run_queue_enqueue( + run_queue_t runq, + thread_t thread, + boolean_t tail); + +void idle_thread_continue(void); +void do_thread_scan(void); + +void clear_wait_internal( + thread_t thread, + int result); + +#if DEBUG +void dump_run_queues( + run_queue_t rq); +void dump_run_queue_struct( + run_queue_t rq); +void dump_processor( + processor_t p); +void dump_processor_set( + processor_set_t ps); + +void checkrq( + run_queue_t rq, + char *msg); + +void thread_check( + thread_t thread, + run_queue_t runq); +#endif /*DEBUG*/ + +boolean_t thread_runnable( + thread_t thread); + +/* + * State machine + * + * states are combinations of: + * R running + * W waiting (or on wait queue) + * N non-interruptible + * O swapped out + * I being swapped in + * + * init action + * assert_wait thread_block clear_wait swapout swapin + * + * R RW, RWN R; setrun - - + * RN RWN RN; setrun - - + * + * RW W R - + * RWN WN RN - + * + * W R; setrun WO + * WN RN; setrun - + * + * RO - - R + * + */ + +/* + * Waiting protocols and implementation: + * + * Each thread may be waiting for exactly one event; this event + * is set using assert_wait(). That thread may be awakened either + * by performing a thread_wakeup_prim() on its event, + * or by directly waking that thread up with clear_wait(). + * + * The implementation of wait events uses a hash table. Each + * bucket is queue of threads having the same hash function + * value; the chain for the queue (linked list) is the run queue + * field. [It is not possible to be waiting and runnable at the + * same time.] + * + * Locks on both the thread and on the hash buckets govern the + * wait event field and the queue chain field. Because wakeup + * operations only have the event as an argument, the event hash + * bucket must be locked before any thread. + * + * Scheduling operations may also occur at interrupt level; therefore, + * interrupts below splsched() must be prevented when holding + * thread or hash bucket locks. + * + * The wait event hash table declarations are as follows: + */ + +#define NUMQUEUES 59 + +struct wait_queue wait_queues[NUMQUEUES]; + +#define wait_hash(event) \ + ((((int)(event) < 0)? ~(int)(event): (int)(event)) % NUMQUEUES) + +void +sched_init(void) +{ + /* + * Calculate the minimum quantum + * in ticks. + */ + if (default_preemption_rate < 1) + default_preemption_rate = DEFAULT_PREEMPTION_RATE; + min_quantum = hz / default_preemption_rate; + + /* + * Round up result (4/5) to an + * integral number of ticks. + */ + if (((hz * 10) / default_preemption_rate) - (min_quantum * 10) >= 5) + min_quantum++; + if (min_quantum < 1) + min_quantum = 1; + + min_quantum_ms = (1000 / hz) * min_quantum; + + printf("scheduling quantum is %d ms\n", min_quantum_ms); + + wait_queues_init(); + pset_sys_bootstrap(); /* initialize processor mgmt. */ + processor_action(); + sched_tick = 0; +#if SIMPLE_CLOCK + sched_usec = 0; +#endif /* SIMPLE_CLOCK */ + ast_init(); + sf_init(); +} + +void +wait_queues_init(void) +{ + register int i; + + for (i = 0; i < NUMQUEUES; i++) { + wait_queue_init(&wait_queues[i], SYNC_POLICY_FIFO); + } +} + +/* + * Thread timeout routine, called when timer expires. + */ +void +thread_timer_expire( + timer_call_param_t p0, + timer_call_param_t p1) +{ + thread_t thread = p0; + spl_t s; + + s = splsched(); + wake_lock(thread); + if ( thread->wait_timer_is_set && + !timer_call_is_delayed(&thread->wait_timer, NULL) ) { + thread->wait_timer_active--; + thread->wait_timer_is_set = FALSE; + thread_lock(thread); + if (thread->active) + clear_wait_internal(thread, THREAD_TIMED_OUT); + thread_unlock(thread); + } + else + if (--thread->wait_timer_active == 0) + thread_wakeup_one(&thread->wait_timer_active); + wake_unlock(thread); + splx(s); +} + +/* + * thread_set_timer: + * + * Set a timer for the current thread, if the thread + * is ready to wait. Must be called between assert_wait() + * and thread_block(). + */ +void +thread_set_timer( + natural_t interval, + natural_t scale_factor) +{ + thread_t thread = current_thread(); + AbsoluteTime deadline; + spl_t s; + + s = splsched(); + wake_lock(thread); + thread_lock(thread); + if ((thread->state & TH_WAIT) != 0) { + clock_interval_to_deadline(interval, scale_factor, &deadline); + timer_call_enter(&thread->wait_timer, deadline); + assert(!thread->wait_timer_is_set); + thread->wait_timer_active++; + thread->wait_timer_is_set = TRUE; + } + thread_unlock(thread); + wake_unlock(thread); + splx(s); +} + +void +thread_set_timer_deadline( + AbsoluteTime deadline) +{ + thread_t thread = current_thread(); + spl_t s; + + s = splsched(); + wake_lock(thread); + thread_lock(thread); + if ((thread->state & TH_WAIT) != 0) { + timer_call_enter(&thread->wait_timer, deadline); + assert(!thread->wait_timer_is_set); + thread->wait_timer_active++; + thread->wait_timer_is_set = TRUE; + } + thread_unlock(thread); + wake_unlock(thread); + splx(s); +} + +void +thread_cancel_timer(void) +{ + thread_t thread = current_thread(); + spl_t s; + + s = splsched(); + wake_lock(thread); + if (thread->wait_timer_is_set) { + if (timer_call_cancel(&thread->wait_timer)) + thread->wait_timer_active--; + thread->wait_timer_is_set = FALSE; + } + wake_unlock(thread); + splx(s); +} + +/* + * thread_depress_timeout: + * + * Timeout routine for priority depression. + */ +void +thread_depress_timeout( + thread_call_param_t p0, + thread_call_param_t p1) +{ + thread_t thread = p0; + sched_policy_t *policy; + spl_t s; + + s = splsched(); + thread_lock(thread); + policy = policy_id_to_sched_policy(thread->policy); + thread_unlock(thread); + splx(s); + + if (policy != SCHED_POLICY_NULL) + policy->sp_ops.sp_thread_depress_timeout(policy, thread); + + thread_deallocate(thread); +} + +/* + * Set up thread timeout element when thread is created. + */ +void +thread_timer_setup( + thread_t thread) +{ + timer_call_setup(&thread->wait_timer, thread_timer_expire, thread); + thread->wait_timer_is_set = FALSE; + thread->wait_timer_active = 1; + thread->ref_count++; + + thread_call_setup(&thread->depress_timer, thread_depress_timeout, thread); +} + +void +thread_timer_terminate(void) +{ + thread_t thread = current_thread(); + spl_t s; + + s = splsched(); + wake_lock(thread); + if (thread->wait_timer_is_set) { + if (timer_call_cancel(&thread->wait_timer)) + thread->wait_timer_active--; + thread->wait_timer_is_set = FALSE; + } + + thread->wait_timer_active--; + + while (thread->wait_timer_active > 0) { + assert_wait((event_t)&thread->wait_timer_active, THREAD_UNINT); + wake_unlock(thread); + splx(s); + + thread_block((void (*)(void)) 0); + + s = splsched(); + wake_lock(thread); + } + + wake_unlock(thread); + splx(s); + + thread_deallocate(thread); +} + +/* + * Routine: thread_go_locked + * Purpose: + * Start a thread running. + * Conditions: + * thread lock held, IPC locks may be held. + * thread must have been pulled from wait queue under same lock hold. + */ +void +thread_go_locked( + thread_t thread, + int result) +{ + int state; + sched_policy_t *policy; + sf_return_t sfr; + + assert(thread->at_safe_point == FALSE); + assert(thread->wait_event == NO_EVENT); + assert(thread->wait_queue == WAIT_QUEUE_NULL); + + if (thread->state & TH_WAIT) { + + thread->state &= ~(TH_WAIT|TH_UNINT); + if (!(thread->state & TH_RUN)) { + thread->state |= TH_RUN; +#if THREAD_SWAPPER + if (thread->state & TH_SWAPPED_OUT) + thread_swapin(thread->top_act, FALSE); + else +#endif /* THREAD_SWAPPER */ + { + policy = &sched_policy[thread->policy]; + sfr = policy->sp_ops.sp_thread_unblock(policy, thread); + assert(sfr == SF_SUCCESS); + } + } + thread->wait_result = result; + } + + + /* + * The next few lines are a major hack. Hopefully this will get us + * around all of the scheduling framework hooha. We can't call + * sp_thread_unblock yet because we could still be finishing up the + * durn two stage block on another processor and thread_setrun + * could be called by s_t_u and we'll really be messed up then. + */ + /* Don't mess with this if we are still swapped out */ + if (!(thread->state & TH_SWAPPED_OUT)) + thread->sp_state = MK_SP_RUNNABLE; + +} + +void +thread_mark_wait_locked( + thread_t thread, + int interruptible) +{ + + assert(thread == current_thread()); + + thread->wait_result = -1; /* JMM - Needed for non-assert kernel */ + thread->state |= (interruptible && thread->interruptible) ? + TH_WAIT : (TH_WAIT | TH_UNINT); + thread->at_safe_point = (interruptible == THREAD_ABORTSAFE) && (thread->interruptible); + thread->sleep_stamp = sched_tick; +} + + + +/* + * Routine: assert_wait_timeout + * Purpose: + * Assert that the thread intends to block, + * waiting for a timeout (no user known event). + */ +unsigned int assert_wait_timeout_event; + +void +assert_wait_timeout( + mach_msg_timeout_t msecs, + int interruptible) +{ + spl_t s; + + assert_wait((event_t)&assert_wait_timeout_event, interruptible); + thread_set_timer(msecs, 1000*NSEC_PER_USEC); +} + +/* + * Check to see if an assert wait is possible, without actually doing one. + * This is used by debug code in locks and elsewhere to verify that it is + * always OK to block when trying to take a blocking lock (since waiting + * for the actual assert_wait to catch the case may make it hard to detect + * this case. + */ +boolean_t +assert_wait_possible(void) +{ + + thread_t thread; + extern unsigned int debug_mode; + +#if DEBUG + if(debug_mode) return TRUE; /* Always succeed in debug mode */ +#endif + + thread = current_thread(); + + return (thread == NULL || wait_queue_assert_possible(thread)); +} + +/* + * assert_wait: + * + * Assert that the current thread is about to go to + * sleep until the specified event occurs. + */ +void +assert_wait( + event_t event, + int interruptible) +{ + register wait_queue_t wq; + register int index; + + assert(event != NO_EVENT); + assert(assert_wait_possible()); + + index = wait_hash(event); + wq = &wait_queues[index]; + wait_queue_assert_wait(wq, + event, + interruptible); +} + + +/* + * thread_[un]stop(thread) + * Once a thread has blocked interruptibly (via assert_wait) prevent + * it from running until thread_unstop. + * + * If someone else has already stopped the thread, wait for the + * stop to be cleared, and then stop it again. + * + * Return FALSE if interrupted. + * + * NOTE: thread_hold/thread_suspend should be called on the activation + * before calling thread_stop. TH_SUSP is only recognized when + * a thread blocks and only prevents clear_wait/thread_wakeup + * from restarting an interruptible wait. The wake_active flag is + * used to indicate that someone is waiting on the thread. + */ +boolean_t +thread_stop( + thread_t thread) +{ + spl_t s; + + s = splsched(); + wake_lock(thread); + + while (thread->state & TH_SUSP) { + thread->wake_active = TRUE; + assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE); + wake_unlock(thread); + splx(s); + + thread_block((void (*)(void)) 0); + if (current_thread()->wait_result != THREAD_AWAKENED) + return (FALSE); + + s = splsched(); + wake_lock(thread); + } + thread_lock(thread); + thread->state |= TH_SUSP; + thread_unlock(thread); + + wake_unlock(thread); + splx(s); + + return (TRUE); +} + +/* + * Clear TH_SUSP and if the thread has been stopped and is now runnable, + * put it back on the run queue. + */ +void +thread_unstop( + thread_t thread) +{ + sched_policy_t *policy; + sf_return_t sfr; + spl_t s; + + s = splsched(); + wake_lock(thread); + thread_lock(thread); + + if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP/*|TH_UNINT*/)) == TH_SUSP) { + thread->state = (thread->state & ~TH_SUSP) | TH_RUN; +#if THREAD_SWAPPER + if (thread->state & TH_SWAPPED_OUT) + thread_swapin(thread->top_act, FALSE); + else +#endif /* THREAD_SWAPPER */ + { + policy = &sched_policy[thread->policy]; + sfr = policy->sp_ops.sp_thread_unblock(policy, thread); + assert(sfr == SF_SUCCESS); + } + } + else + if (thread->state & TH_SUSP) { + thread->state &= ~TH_SUSP; + + if (thread->wake_active) { + thread->wake_active = FALSE; + thread_unlock(thread); + wake_unlock(thread); + splx(s); + thread_wakeup((event_t)&thread->wake_active); + + return; + } + } + + thread_unlock(thread); + wake_unlock(thread); + splx(s); +} + +/* + * Wait for the thread's RUN bit to clear + */ +boolean_t +thread_wait( + thread_t thread) +{ + spl_t s; + + s = splsched(); + wake_lock(thread); + + while (thread->state & (TH_RUN/*|TH_UNINT*/)) { + if (thread->last_processor != PROCESSOR_NULL) + cause_ast_check(thread->last_processor); + + thread->wake_active = TRUE; + assert_wait((event_t)&thread->wake_active, THREAD_ABORTSAFE); + wake_unlock(thread); + splx(s); + + thread_block((void (*)(void))0); + if (current_thread()->wait_result != THREAD_AWAKENED) + return (FALSE); + + s = splsched(); + wake_lock(thread); + } + + wake_unlock(thread); + splx(s); + + return (TRUE); +} + + +/* + * thread_stop_wait(thread) + * Stop the thread then wait for it to block interruptibly + */ +boolean_t +thread_stop_wait( + thread_t thread) +{ + if (thread_stop(thread)) { + if (thread_wait(thread)) + return (TRUE); + + thread_unstop(thread); + } + + return (FALSE); +} + + +/* + * Routine: clear_wait_internal + * + * Clear the wait condition for the specified thread. + * Start the thread executing if that is appropriate. + * Arguments: + * thread thread to awaken + * result Wakeup result the thread should see + * Conditions: + * At splsched + * the thread is locked. + */ +void +clear_wait_internal( + thread_t thread, + int result) +{ + /* + * If the thread isn't in a wait queue, just set it running. Otherwise, + * try to remove it from the queue and, if successful, then set it + * running. NEVER interrupt an uninterruptible thread. + */ + if (!((result == THREAD_INTERRUPTED) && (thread->state & TH_UNINT))) { + if (wait_queue_assert_possible(thread) || + (wait_queue_remove(thread) == KERN_SUCCESS)) { + thread_go_locked(thread, result); + } + } +} + + +/* + * clear_wait: + * + * Clear the wait condition for the specified thread. Start the thread + * executing if that is appropriate. + * + * parameters: + * thread thread to awaken + * result Wakeup result the thread should see + */ +void +clear_wait( + thread_t thread, + int result) +{ + spl_t s; + + s = splsched(); + thread_lock(thread); + clear_wait_internal(thread, result); + thread_unlock(thread); + splx(s); +} + + +/* + * thread_wakeup_prim: + * + * Common routine for thread_wakeup, thread_wakeup_with_result, + * and thread_wakeup_one. + * + */ +void +thread_wakeup_prim( + event_t event, + boolean_t one_thread, + int result) +{ + register wait_queue_t wq; + register int index; + + index = wait_hash(event); + wq = &wait_queues[index]; + if (one_thread) + wait_queue_wakeup_one(wq, event, result); + else + wait_queue_wakeup_all(wq, event, result); +} + +/* + * thread_bind: + * + * Force a thread to execute on the specified processor. + * If the thread is currently executing, it may wait until its + * time slice is up before switching onto the specified processor. + * + * A processor of PROCESSOR_NULL causes the thread to be unbound. + * xxx - DO NOT export this to users. + */ +void +thread_bind( + register thread_t thread, + processor_t processor) +{ + spl_t s; + + s = splsched(); + thread_lock(thread); + thread_bind_locked(thread, processor); + thread_unlock(thread); + splx(s); +} + +/* + * Select a thread for this processor (the current processor) to run. + * May select the current thread, which must already be locked. + */ +thread_t +thread_select( + register processor_t myprocessor) +{ + register thread_t thread; + processor_set_t pset; + register run_queue_t runq = &myprocessor->runq; + boolean_t other_runnable; + sched_policy_t *policy; + + /* + * Check for other non-idle runnable threads. + */ + myprocessor->first_quantum = TRUE; + pset = myprocessor->processor_set; + thread = current_thread(); + +#if 0 /* CHECKME! */ + thread->unconsumed_quantum = myprocessor->quantum; +#endif + + simple_lock(&runq->lock); + simple_lock(&pset->runq.lock); + + other_runnable = runq->count > 0 || pset->runq.count > 0; + + if ( thread->state == TH_RUN && + (!other_runnable || + (runq->highq < thread->sched_pri && + pset->runq.highq < thread->sched_pri)) && + thread->processor_set == pset && + (thread->bound_processor == PROCESSOR_NULL || + thread->bound_processor == myprocessor) ) { + + /* I am the highest priority runnable (non-idle) thread */ + simple_unlock(&pset->runq.lock); + simple_unlock(&runq->lock); + + /* Update the thread's meta-priority */ + policy = policy_id_to_sched_policy(thread->policy); + assert(policy != SCHED_POLICY_NULL); + (void)policy->sp_ops.sp_thread_update_mpri(policy, thread); + } + else + if (other_runnable) { + simple_unlock(&pset->runq.lock); + simple_unlock(&runq->lock); + thread = choose_thread(myprocessor); + } + else { + simple_unlock(&pset->runq.lock); + simple_unlock(&runq->lock); + + /* + * Nothing is runnable, so set this processor idle if it + * was running. If it was in an assignment or shutdown, + * leave it alone. Return its idle thread. + */ + simple_lock(&pset->idle_lock); + if (myprocessor->state == PROCESSOR_RUNNING) { + myprocessor->state = PROCESSOR_IDLE; + /* + * XXX Until it goes away, put master on end of queue, others + * XXX on front so master gets used last. + */ + if (myprocessor == master_processor) + queue_enter(&(pset->idle_queue), myprocessor, + processor_t, processor_queue); + else + queue_enter_first(&(pset->idle_queue), myprocessor, + processor_t, processor_queue); + + pset->idle_count++; + } + simple_unlock(&pset->idle_lock); + + thread = myprocessor->idle_thread; + } + + return (thread); +} + + +/* + * Stop running the current thread and start running the new thread. + * If continuation is non-zero, and the current thread is blocked, + * then it will resume by executing continuation on a new stack. + * Returns TRUE if the hand-off succeeds. + * The reason parameter == AST_QUANTUM if the thread blocked + * because its quantum expired. + * Assumes splsched. + */ + + +static thread_t +__current_thread(void) +{ + return (current_thread()); +} + +boolean_t +thread_invoke( + register thread_t old_thread, + register thread_t new_thread, + int reason, + void (*continuation)(void)) +{ + sched_policy_t *policy; + sf_return_t sfr; + void (*lcont)(void); + + /* + * Mark thread interruptible. + */ + thread_lock(new_thread); + new_thread->state &= ~TH_UNINT; + + if (cpu_data[cpu_number()].preemption_level != 1) + panic("thread_invoke: preemption_level %d\n", + cpu_data[cpu_number()].preemption_level); + + + assert(thread_runnable(new_thread)); + + assert(old_thread->continuation == (void (*)(void))0); + + if ((old_thread->sched_mode & TH_MODE_REALTIME) && (!old_thread->stack_privilege)) { + old_thread->stack_privilege = old_thread->kernel_stack; + } + + if (continuation != (void (*)()) 0) { + switch (new_thread->state & TH_STACK_STATE) { + case TH_STACK_HANDOFF: + + /* + * If the old thread has stack privilege, we can't give + * his stack away. So go and get him one and treat this + * as a traditional context switch. + */ + if (old_thread->stack_privilege == current_stack()) + goto get_new_stack; + + /* + * Make the whole handoff/dispatch atomic to match the + * non-handoff case. + */ + disable_preemption(); + + /* + * Set up ast context of new thread and switch to its timer. + */ + new_thread->state &= ~(TH_STACK_HANDOFF|TH_UNINT); + new_thread->last_processor = current_processor(); + ast_context(new_thread->top_act, cpu_number()); + timer_switch(&new_thread->system_timer); + thread_unlock(new_thread); + + old_thread->continuation = continuation; + stack_handoff(old_thread, new_thread); + + wake_lock(old_thread); + thread_lock(old_thread); + act_machine_sv_free(old_thread->top_act); + + /* + * inline thread_dispatch but don't free stack + */ + + switch (old_thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) { + sched_policy_t *policy; + sf_return_t sfr; + + case TH_RUN | TH_UNINT: + case TH_RUN: + /* + * No reason to stop. Put back on a run queue. + */ + old_thread->state |= TH_STACK_HANDOFF; + + /* Get pointer to scheduling policy "object" */ + policy = &sched_policy[old_thread->policy]; + + /* Leave enqueueing thread up to scheduling policy */ + sfr = policy->sp_ops.sp_thread_dispatch(policy, old_thread); + assert(sfr == SF_SUCCESS); + break; + + case TH_RUN | TH_WAIT | TH_UNINT: + case TH_RUN | TH_WAIT: + old_thread->sleep_stamp = sched_tick; + /* fallthrough */ + + case TH_WAIT: /* this happens! */ + /* + * Waiting + */ + old_thread->state |= TH_STACK_HANDOFF; + old_thread->state &= ~TH_RUN; + if (old_thread->state & TH_TERMINATE) + thread_reaper_enqueue(old_thread); + + if (old_thread->wake_active) { + old_thread->wake_active = FALSE; + thread_unlock(old_thread); + wake_unlock(old_thread); + thread_wakeup((event_t)&old_thread->wake_active); + wake_lock(old_thread); + thread_lock(old_thread); + } + break; + + case TH_RUN | TH_IDLE: + /* + * Drop idle thread -- it is already in + * idle_thread_array. + */ + old_thread->state |= TH_STACK_HANDOFF; + break; + + default: + panic("State 0x%x \n",old_thread->state); + } + + /* Get pointer to scheduling policy "object" */ + policy = &sched_policy[old_thread->policy]; + + /* Indicate to sched policy that old thread has stopped execution */ + /*** ??? maybe use a macro -- rkc, 1/4/96 ***/ + sfr = policy->sp_ops.sp_thread_done(policy, old_thread); + assert(sfr == SF_SUCCESS); + thread_unlock(old_thread); + wake_unlock(old_thread); + thread_lock(new_thread); + + assert(thread_runnable(new_thread)); + + /* Get pointer to scheduling policy "object" */ + policy = &sched_policy[new_thread->policy]; + + /* Indicate to sched policy that new thread has started execution */ + /*** ??? maybe use a macro ***/ + sfr = policy->sp_ops.sp_thread_begin(policy, new_thread); + assert(sfr == SF_SUCCESS); + + lcont = new_thread->continuation; + new_thread->continuation = (void(*)(void))0; + + thread_unlock(new_thread); + enable_preemption(); + + counter_always(c_thread_invoke_hits++); + + if (new_thread->funnel_state & TH_FN_REFUNNEL) { + kern_return_t save_wait_result; + new_thread->funnel_state = 0; + save_wait_result = new_thread->wait_result; + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0); + //mutex_lock(new_thread->funnel_lock); + funnel_lock(new_thread->funnel_lock); + KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, new_thread->funnel_lock, 2, 0, 0, 0); + new_thread->funnel_state = TH_FN_OWNED; + new_thread->wait_result = save_wait_result; + } + (void) spllo(); + + assert(lcont); + call_continuation(lcont); + /*NOTREACHED*/ + return TRUE; + + case TH_STACK_COMING_IN: + /* + * waiting for a stack + */ + thread_swapin(new_thread); + thread_unlock(new_thread); + counter_always(c_thread_invoke_misses++); + return FALSE; + + case 0: + /* + * already has a stack - can't handoff + */ + if (new_thread == old_thread) { + + /* same thread but with continuation */ + counter(++c_thread_invoke_same); + thread_unlock(new_thread); + + if (old_thread->funnel_state & TH_FN_REFUNNEL) { + kern_return_t save_wait_result; + + old_thread->funnel_state = 0; + save_wait_result = old_thread->wait_result; + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0); + funnel_lock(old_thread->funnel_lock); + KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, old_thread->funnel_lock, 3, 0, 0, 0); + old_thread->funnel_state = TH_FN_OWNED; + old_thread->wait_result = save_wait_result; + } + (void) spllo(); + call_continuation(continuation); + /*NOTREACHED*/ + } + break; + } + } else { + /* + * check that the new thread has a stack + */ + if (new_thread->state & TH_STACK_STATE) { + get_new_stack: + /* has no stack. if not already waiting for one try to get one */ + if ((new_thread->state & TH_STACK_COMING_IN) || + /* not already waiting. nonblocking try to get one */ + !stack_alloc_try(new_thread, thread_continue)) + { + /* couldn't get one. schedule new thread to get a stack and + return failure so we can try another thread. */ + thread_swapin(new_thread); + thread_unlock(new_thread); + counter_always(c_thread_invoke_misses++); + return FALSE; + } + } else if (old_thread == new_thread) { + counter(++c_thread_invoke_same); + thread_unlock(new_thread); + return TRUE; + } + + /* new thread now has a stack. it has been setup to resume in + thread_continue so it can dispatch the old thread, deal with + funnelling and then go to it's true continuation point */ + } + + new_thread->state &= ~(TH_STACK_HANDOFF | TH_UNINT); + + /* + * Set up ast context of new thread and switch to its timer. + */ + new_thread->last_processor = current_processor(); + ast_context(new_thread->top_act, cpu_number()); + timer_switch(&new_thread->system_timer); + assert(thread_runnable(new_thread)); + + /* + * N.B. On return from the call to switch_context, 'old_thread' + * points at the thread that yielded to us. Unfortunately, at + * this point, there are no simple_locks held, so if we are preempted + * before the call to thread_dispatch blocks preemption, it is + * possible for 'old_thread' to terminate, leaving us with a + * stale thread pointer. + */ + disable_preemption(); + + thread_unlock(new_thread); + + counter_always(c_thread_invoke_csw++); + current_task()->csw++; + + + thread_lock(old_thread); + old_thread->reason = reason; + assert(old_thread->runq == RUN_QUEUE_NULL); + + if (continuation != (void (*)(void))0) + old_thread->continuation = continuation; + + /* Indicate to sched policy that old thread has stopped execution */ + policy = &sched_policy[old_thread->policy]; + /*** ??? maybe use a macro -- ***/ + sfr = policy->sp_ops.sp_thread_done(policy, old_thread); + assert(sfr == SF_SUCCESS); + thread_unlock(old_thread); + + /* + * switch_context is machine-dependent. It does the + * machine-dependent components of a context-switch, like + * changing address spaces. It updates active_threads. + */ + old_thread = switch_context(old_thread, continuation, new_thread); + + /* Now on new thread's stack. Set a local variable to refer to it. */ + new_thread = __current_thread(); + assert(old_thread != new_thread); + + assert(thread_runnable(new_thread)); + + thread_lock(new_thread); + assert(thread_runnable(new_thread)); + /* Indicate to sched policy that new thread has started execution */ + policy = &sched_policy[new_thread->policy]; + /*** ??? maybe use a macro -- rkc, 1/4/96 ***/ + sfr = policy->sp_ops.sp_thread_begin(policy, new_thread); + assert(sfr == SF_SUCCESS); + thread_unlock(new_thread); + + /* + * We're back. Now old_thread is the thread that resumed + * us, and we have to dispatch it. + */ + /* CHECKME! */ +// Code from OSF in Grenoble deleted the following fields. They were +// used in HPPA and 386 code, but not in the PPC for other than +// just setting and resetting. They didn't delete these lines from +// the MACH_RT builds, though, causing compile errors. I'm going +// to make a wild guess and assume we can just delete these. +#if 0 + if (old_thread->preempt == TH_NOT_PREEMPTABLE) { + /* + * Mark that we have been really preempted + */ + old_thread->preempt = TH_PREEMPTED; + } +#endif + thread_dispatch(old_thread); + enable_preemption(); + + /* if we get here and 'continuation' is set that means the + * switch_context() path returned and did not call out + * to the continuation. we will do it manually here */ + if (continuation) { + call_continuation(continuation); + /* NOTREACHED */ + } + + return TRUE; +} + +/* + * thread_continue: + * + * Called when the launching a new thread, at splsched(); + */ +void +thread_continue( + register thread_t old_thread) +{ + register thread_t self; + register void (*continuation)(); + sched_policy_t *policy; + sf_return_t sfr; + + self = current_thread(); + + /* + * We must dispatch the old thread and then + * call the current thread's continuation. + * There might not be an old thread, if we are + * the first thread to run on this processor. + */ + if (old_thread != THREAD_NULL) { + thread_dispatch(old_thread); + + thread_lock(self); + + /* Get pointer to scheduling policy "object" */ + policy = &sched_policy[self->policy]; + + /* Indicate to sched policy that new thread has started execution */ + /*** ??? maybe use a macro -- rkc, 1/4/96 ***/ + sfr = policy->sp_ops.sp_thread_begin(policy,self); + assert(sfr == SF_SUCCESS); + } else { + thread_lock(self); + } + + continuation = self->continuation; + self->continuation = (void (*)(void))0; + thread_unlock(self); + + /* + * N.B. - the following is necessary, since thread_invoke() + * inhibits preemption on entry and reenables before it + * returns. Unfortunately, the first time a newly-created + * thread executes, it magically appears here, and never + * executes the enable_preemption() call in thread_invoke(). + */ + enable_preemption(); + + if (self->funnel_state & TH_FN_REFUNNEL) { + kern_return_t save_wait_result; + self->funnel_state = 0; + save_wait_result = self->wait_result; + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0); + funnel_lock(self->funnel_lock); + KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, self->funnel_lock, 4, 0, 0, 0); + self->wait_result = save_wait_result; + self->funnel_state = TH_FN_OWNED; + } + spllo(); + + assert(continuation); + (*continuation)(); + /*NOTREACHED*/ +} + +#if MACH_LDEBUG || MACH_KDB + +#define THREAD_LOG_SIZE 300 + +struct t64 { + unsigned long h; + unsigned long l; +}; + +struct { + struct t64 stamp; + thread_t thread; + long info1; + long info2; + long info3; + char * action; +} thread_log[THREAD_LOG_SIZE]; + +int thread_log_index; + +void check_thread_time(long n); + + +int check_thread_time_crash; + +#if 0 +void +check_thread_time(long us) +{ + struct t64 temp; + + if (!check_thread_time_crash) + return; + + temp = thread_log[0].stamp; + cyctm05_diff (&thread_log[1].stamp, &thread_log[0].stamp, &temp); + + if (temp.l >= us && thread_log[1].info != 0x49) /* HACK!!! */ + panic ("check_thread_time"); +} +#endif + +void +log_thread_action(char * action, long info1, long info2, long info3) +{ + int i; + spl_t x; + static unsigned int tstamp; + + x = splhigh(); + + for (i = THREAD_LOG_SIZE-1; i > 0; i--) { + thread_log[i] = thread_log[i-1]; + } + + thread_log[0].stamp.h = 0; + thread_log[0].stamp.l = tstamp++; + thread_log[0].thread = current_thread(); + thread_log[0].info1 = info1; + thread_log[0].info2 = info2; + thread_log[0].info3 = info3; + thread_log[0].action = action; +/* strcpy (&thread_log[0].action[0], action);*/ + + splx(x); +} +#endif /* MACH_LDEBUG || MACH_KDB */ + +#if MACH_KDB +#include +void db_show_thread_log(void); + +void +db_show_thread_log(void) +{ + int i; + + db_printf ("%s %s %s %s %s %s\n", " Thread ", " Info1 ", " Info2 ", + " Info3 ", " Timestamp ", "Action"); + + for (i = 0; i < THREAD_LOG_SIZE; i++) { + db_printf ("%08x %08x %08x %08x %08x/%08x %s\n", + thread_log[i].thread, + thread_log[i].info1, + thread_log[i].info2, + thread_log[i].info3, + thread_log[i].stamp.h, + thread_log[i].stamp.l, + thread_log[i].action); + } +} +#endif /* MACH_KDB */ + +/* + * thread_block_reason: + * + * Block the current thread. If the thread is runnable + * then someone must have woken it up between its request + * to sleep and now. In this case, it goes back on a + * run queue. + * + * If a continuation is specified, then thread_block will + * attempt to discard the thread's kernel stack. When the + * thread resumes, it will execute the continuation function + * on a new kernel stack. + */ +counter(mach_counter_t c_thread_block_calls = 0;) + +int +thread_block_reason( + void (*continuation)(void), + int reason) +{ + register thread_t thread = current_thread(); + register processor_t myprocessor; + register thread_t new_thread; + spl_t s; + + counter(++c_thread_block_calls); + + check_simple_locks(); + + machine_clock_assist(); + + s = splsched(); + + if ((thread->funnel_state & TH_FN_OWNED) && !(reason & AST_PREEMPT)) { + thread->funnel_state = TH_FN_REFUNNEL; + KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, thread->funnel_lock, 2, 0, 0, 0); + funnel_unlock(thread->funnel_lock); + } + + myprocessor = current_processor(); + + thread_lock(thread); + if (thread->state & TH_ABORT) + clear_wait_internal(thread, THREAD_INTERRUPTED); + + /* Unconditionally remove either | both */ + ast_off(AST_QUANTUM|AST_BLOCK|AST_URGENT); + + new_thread = thread_select(myprocessor); + assert(new_thread); + assert(thread_runnable(new_thread)); + thread_unlock(thread); + while (!thread_invoke(thread, new_thread, reason, continuation)) { + thread_lock(thread); + new_thread = thread_select(myprocessor); + assert(new_thread); + assert(thread_runnable(new_thread)); + thread_unlock(thread); + } + + if (thread->funnel_state & TH_FN_REFUNNEL) { + kern_return_t save_wait_result; + + save_wait_result = thread->wait_result; + thread->funnel_state = 0; + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0); + funnel_lock(thread->funnel_lock); + KERNEL_DEBUG(0x6032430 | DBG_FUNC_NONE, thread->funnel_lock, 5, 0, 0, 0); + thread->funnel_state = TH_FN_OWNED; + thread->wait_result = save_wait_result; + } + + splx(s); + + return thread->wait_result; +} + +/* + * thread_block: + * + * Now calls thread_block_reason() which forwards the + * the reason parameter to thread_invoke() so it can + * do the right thing if the thread's quantum expired. + */ +int +thread_block( + void (*continuation)(void)) +{ + return thread_block_reason(continuation, 0); +} + +/* + * thread_run: + * + * Switch directly from the current thread to a specified + * thread. Both the current and new threads must be + * runnable. + * + * Assumption: + * at splsched. + */ +int +thread_run( + thread_t old_thread, + void (*continuation)(void), + thread_t new_thread) +{ + while (!thread_invoke(old_thread, new_thread, 0, continuation)) { + register processor_t myprocessor = current_processor(); + thread_lock(old_thread); + new_thread = thread_select(myprocessor); + thread_unlock(old_thread); + } + return old_thread->wait_result; +} + +/* + * Dispatches a running thread that is not on a runq. + * Called at splsched. + */ +void +thread_dispatch( + register thread_t thread) +{ + sched_policy_t *policy; + sf_return_t sfr; + + /* + * If we are discarding the thread's stack, we must do it + * before the thread has a chance to run. + */ + wake_lock(thread); + thread_lock(thread); + +#ifndef i386 + /* no continuations on i386 for now */ + if (thread->continuation != (void (*)())0) { + assert((thread->state & TH_STACK_STATE) == 0); + thread->state |= TH_STACK_HANDOFF; + stack_free(thread); + if (thread->top_act) { + act_machine_sv_free(thread->top_act); + } + } +#endif + + switch (thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_IDLE)) { + + case TH_RUN | TH_UNINT: + case TH_RUN: + /* + * No reason to stop. Put back on a run queue. + */ + /* Leave enqueueing thread up to scheduling policy */ + policy = &sched_policy[thread->policy]; + /*** ??? maybe use a macro ***/ + sfr = policy->sp_ops.sp_thread_dispatch(policy, thread); + assert(sfr == SF_SUCCESS); + break; + + case TH_RUN | TH_WAIT | TH_UNINT: + case TH_RUN | TH_WAIT: + thread->sleep_stamp = sched_tick; + /* fallthrough */ + case TH_WAIT: /* this happens! */ + + /* + * Waiting + */ + thread->state &= ~TH_RUN; + if (thread->state & TH_TERMINATE) + thread_reaper_enqueue(thread); + + if (thread->wake_active) { + thread->wake_active = FALSE; + thread_unlock(thread); + wake_unlock(thread); + thread_wakeup((event_t)&thread->wake_active); + return; + } + break; + + case TH_RUN | TH_IDLE: + /* + * Drop idle thread -- it is already in + * idle_thread_array. + */ + break; + + default: + panic("State 0x%x \n",thread->state); + } + thread_unlock(thread); + wake_unlock(thread); +} + +/* + * Enqueue thread on run queue. Thread must be locked, + * and not already be on a run queue. + */ +int +run_queue_enqueue( + register run_queue_t rq, + register thread_t thread, + boolean_t tail) +{ + register int whichq; + int oldrqcount; + + whichq = thread->sched_pri; + assert(whichq >= MINPRI && whichq <= MAXPRI); + + simple_lock(&rq->lock); /* lock the run queue */ + assert(thread->runq == RUN_QUEUE_NULL); + if (tail) + enqueue_tail(&rq->queues[whichq], (queue_entry_t)thread); + else + enqueue_head(&rq->queues[whichq], (queue_entry_t)thread); + + setbit(MAXPRI - whichq, rq->bitmap); + if (whichq > rq->highq) + rq->highq = whichq; + + oldrqcount = rq->count++; + thread->runq = rq; + thread->whichq = whichq; +#if DEBUG + thread_check(thread, rq); +#endif /* DEBUG */ + simple_unlock(&rq->lock); + + return (oldrqcount); +} + +/* + * thread_setrun: + * + * Make thread runnable; dispatch directly onto an idle processor + * if possible. Else put on appropriate run queue (processor + * if bound, else processor set. Caller must have lock on thread. + * This is always called at splsched. + * The tail parameter, if TRUE || TAIL_Q, indicates that the + * thread should be placed at the tail of the runq. If + * FALSE || HEAD_Q the thread will be placed at the head of the + * appropriate runq. + */ +void +thread_setrun( + register thread_t new_thread, + boolean_t may_preempt, + boolean_t tail) +{ + register processor_t processor; + register run_queue_t runq; + register processor_set_t pset; + thread_t thread; + ast_t ast_flags = AST_BLOCK; + + mp_disable_preemption(); + + assert(!(new_thread->state & TH_SWAPPED_OUT)); + assert(thread_runnable(new_thread)); + + /* + * Update priority if needed. + */ + if (new_thread->sched_stamp != sched_tick) + update_priority(new_thread); + + if (new_thread->policy & (POLICY_FIFO|POLICY_RR)) { + if ( new_thread->sched_pri >= (MAXPRI_KERNBAND - 2) && + kernel_preemption_mode == KERNEL_PREEMPT ) + ast_flags |= AST_URGENT; + } + + assert(new_thread->runq == RUN_QUEUE_NULL); + + /* + * Try to dispatch the thread directly onto an idle processor. + */ + if ((processor = new_thread->bound_processor) == PROCESSOR_NULL) { + /* + * Not bound, any processor in the processor set is ok. + */ + pset = new_thread->processor_set; + if (pset->idle_count > 0) { + simple_lock(&pset->idle_lock); + if (pset->idle_count > 0) { + processor = (processor_t) queue_first(&pset->idle_queue); + queue_remove(&(pset->idle_queue), processor, processor_t, + processor_queue); + pset->idle_count--; + processor->next_thread = new_thread; + processor->state = PROCESSOR_DISPATCHING; + simple_unlock(&pset->idle_lock); + if(processor->slot_num != cpu_number()) + machine_signal_idle(processor); + mp_enable_preemption(); + return; + } + simple_unlock(&pset->idle_lock); + } + + + /* + * Preempt check + */ + runq = &pset->runq; + thread = current_thread(); + processor = current_processor(); + if ( may_preempt && + pset == processor->processor_set && + thread->sched_pri < new_thread->sched_pri ) { + /* + * XXX if we have a non-empty local runq or are + * XXX running a bound thread, ought to check for + * XXX another cpu running lower-pri thread to preempt. + */ + /* + * Turn off first_quantum to allow csw. + */ + processor->first_quantum = FALSE; + + ast_on(ast_flags); + } + + /* + * Put us on the end of the runq, if we are not preempting + * or the guy we are preempting. + */ + run_queue_enqueue(runq, new_thread, tail); + } + else { + /* + * Bound, can only run on bound processor. Have to lock + * processor here because it may not be the current one. + */ + if (processor->state == PROCESSOR_IDLE) { + simple_lock(&processor->lock); + pset = processor->processor_set; + simple_lock(&pset->idle_lock); + if (processor->state == PROCESSOR_IDLE) { + queue_remove(&pset->idle_queue, processor, + processor_t, processor_queue); + pset->idle_count--; + processor->next_thread = new_thread; + processor->state = PROCESSOR_DISPATCHING; + simple_unlock(&pset->idle_lock); + simple_unlock(&processor->lock); + if(processor->slot_num != cpu_number()) + machine_signal_idle(processor); + mp_enable_preemption(); + return; + } + simple_unlock(&pset->idle_lock); + simple_unlock(&processor->lock); + } + + /* + * Cause ast on processor if processor is on line, and the + * currently executing thread is not bound to that processor + * (bound threads have implicit priority over non-bound threads). + * We also avoid sending the AST to the idle thread (if it got + * scheduled in the window between the 'if' above and here), + * since the idle_thread is bound. + */ + runq = &processor->runq; + thread = current_thread(); + if (processor == current_processor()) { + if ( thread->bound_processor == PROCESSOR_NULL || + thread->sched_pri < new_thread->sched_pri ) { + processor->first_quantum = FALSE; + ast_on(ast_flags); + } + + run_queue_enqueue(runq, new_thread, tail); + } + else { + thread = cpu_data[processor->slot_num].active_thread; + if ( run_queue_enqueue(runq, new_thread, tail) == 0 && + processor->state != PROCESSOR_OFF_LINE && + thread && thread->bound_processor != processor ) + cause_ast_check(processor); + } + } + + mp_enable_preemption(); +} + +/* + * set_pri: + * + * Set the priority of the specified thread to the specified + * priority. This may cause the thread to change queues. + * + * The thread *must* be locked by the caller. + */ +void +set_pri( + thread_t thread, + int pri, + boolean_t resched) +{ + register struct run_queue *rq; + + rq = rem_runq(thread); + assert(thread->runq == RUN_QUEUE_NULL); + thread->sched_pri = pri; + if (rq != RUN_QUEUE_NULL) { + if (resched) + thread_setrun(thread, TRUE, TAIL_Q); + else + run_queue_enqueue(rq, thread, TAIL_Q); + } +} + +/* + * rem_runq: + * + * Remove a thread from its run queue. + * The run queue that the process was on is returned + * (or RUN_QUEUE_NULL if not on a run queue). Thread *must* be locked + * before calling this routine. Unusual locking protocol on runq + * field in thread structure makes this code interesting; see thread.h. + */ +run_queue_t +rem_runq( + thread_t thread) +{ + register struct run_queue *rq; + + rq = thread->runq; + /* + * If rq is RUN_QUEUE_NULL, the thread will stay out of the + * run_queues because the caller locked the thread. Otherwise + * the thread is on a runq, but could leave. + */ + if (rq != RUN_QUEUE_NULL) { + simple_lock(&rq->lock); + if (rq == thread->runq) { + /* + * Thread is in a runq and we have a lock on + * that runq. + */ +#if DEBUG + thread_check(thread, rq); +#endif /* DEBUG */ + remqueue(&rq->queues[0], (queue_entry_t)thread); + rq->count--; + + if (queue_empty(rq->queues + thread->sched_pri)) { + /* update run queue status */ + if (thread->sched_pri != IDLEPRI) + clrbit(MAXPRI - thread->sched_pri, rq->bitmap); + rq->highq = MAXPRI - ffsbit(rq->bitmap); + } + thread->runq = RUN_QUEUE_NULL; + simple_unlock(&rq->lock); + } + else { + /* + * The thread left the runq before we could + * lock the runq. It is not on a runq now, and + * can't move again because this routine's + * caller locked the thread. + */ + assert(thread->runq == RUN_QUEUE_NULL); + simple_unlock(&rq->lock); + rq = RUN_QUEUE_NULL; + } + } + + return (rq); +} + + +/* + * choose_thread: + * + * Choose a thread to execute. The thread chosen is removed + * from its run queue. Note that this requires only that the runq + * lock be held. + * + * Strategy: + * Check processor runq first; if anything found, run it. + * Else check pset runq; if nothing found, return idle thread. + * + * Second line of strategy is implemented by choose_pset_thread. + * This is only called on processor startup and when thread_block + * thinks there's something in the processor runq. + */ +thread_t +choose_thread( + processor_t myprocessor) +{ + thread_t thread; + register queue_t q; + register run_queue_t runq; + processor_set_t pset; + + runq = &myprocessor->runq; + pset = myprocessor->processor_set; + + simple_lock(&runq->lock); + if (runq->count > 0 && runq->highq >= pset->runq.highq) { + q = runq->queues + runq->highq; +#if MACH_ASSERT + if (!queue_empty(q)) { +#endif /*MACH_ASSERT*/ + thread = (thread_t)q->next; + ((queue_entry_t)thread)->next->prev = q; + q->next = ((queue_entry_t)thread)->next; + thread->runq = RUN_QUEUE_NULL; + runq->count--; + if (queue_empty(q)) { + if (runq->highq != IDLEPRI) + clrbit(MAXPRI - runq->highq, runq->bitmap); + runq->highq = MAXPRI - ffsbit(runq->bitmap); + } + simple_unlock(&runq->lock); + return (thread); +#if MACH_ASSERT + } + panic("choose_thread"); +#endif /*MACH_ASSERT*/ + /*NOTREACHED*/ + } + + simple_unlock(&runq->lock); + simple_lock(&pset->runq.lock); + return (choose_pset_thread(myprocessor, pset)); +} + + +/* + * choose_pset_thread: choose a thread from processor_set runq or + * set processor idle and choose its idle thread. + * + * Caller must be at splsched and have a lock on the runq. This + * lock is released by this routine. myprocessor is always the current + * processor, and pset must be its processor set. + * This routine chooses and removes a thread from the runq if there + * is one (and returns it), else it sets the processor idle and + * returns its idle thread. + */ +thread_t +choose_pset_thread( + register processor_t myprocessor, + processor_set_t pset) +{ + register run_queue_t runq; + register thread_t thread; + register queue_t q; + + runq = &pset->runq; + if (runq->count > 0) { + q = runq->queues + runq->highq; +#if MACH_ASSERT + if (!queue_empty(q)) { +#endif /*MACH_ASSERT*/ + thread = (thread_t)q->next; + ((queue_entry_t)thread)->next->prev = q; + q->next = ((queue_entry_t)thread)->next; + thread->runq = RUN_QUEUE_NULL; + runq->count--; + if (queue_empty(q)) { + if (runq->highq != IDLEPRI) + clrbit(MAXPRI - runq->highq, runq->bitmap); + runq->highq = MAXPRI - ffsbit(runq->bitmap); + } + simple_unlock(&runq->lock); + return (thread); +#if MACH_ASSERT + } + panic("choose_pset_thread"); +#endif /*MACH_ASSERT*/ + /*NOTREACHED*/ + } + simple_unlock(&runq->lock); + + /* + * Nothing is runnable, so set this processor idle if it + * was running. If it was in an assignment or shutdown, + * leave it alone. Return its idle thread. + */ + simple_lock(&pset->idle_lock); + if (myprocessor->state == PROCESSOR_RUNNING) { + myprocessor->state = PROCESSOR_IDLE; + /* + * XXX Until it goes away, put master on end of queue, others + * XXX on front so master gets used last. + */ + if (myprocessor == master_processor) + queue_enter(&(pset->idle_queue), myprocessor, + processor_t, processor_queue); + else + queue_enter_first(&(pset->idle_queue), myprocessor, + processor_t, processor_queue); + + pset->idle_count++; + } + simple_unlock(&pset->idle_lock); + + return (myprocessor->idle_thread); +} + +/* + * no_dispatch_count counts number of times processors go non-idle + * without being dispatched. This should be very rare. + */ +int no_dispatch_count = 0; + +/* + * This is the idle thread, which just looks for other threads + * to execute. + */ +void +idle_thread_continue(void) +{ + register processor_t myprocessor; + register volatile thread_t *threadp; + register volatile int *gcount; + register volatile int *lcount; + register thread_t new_thread; + register int state; + register processor_set_t pset; + int mycpu; + + mycpu = cpu_number(); + myprocessor = current_processor(); + threadp = (volatile thread_t *) &myprocessor->next_thread; + lcount = (volatile int *) &myprocessor->runq.count; + + for (;;) { +#ifdef MARK_CPU_IDLE + MARK_CPU_IDLE(mycpu); +#endif /* MARK_CPU_IDLE */ + + gcount = (volatile int *)&myprocessor->processor_set->runq.count; + + (void)splsched(); + while ( (*threadp == (volatile thread_t)THREAD_NULL) && + (*gcount == 0) && (*lcount == 0) ) { + + /* check for ASTs while we wait */ + + if (need_ast[mycpu] &~ (AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT)) { + /* don't allow scheduling ASTs */ + need_ast[mycpu] &= ~(AST_SCHEDULING|AST_URGENT|AST_BSD|AST_BSD_INIT); + ast_taken(FALSE, AST_ALL, TRUE); /* back at spllo */ + } + else +#ifdef __ppc__ + machine_idle(); +#else + (void)spllo(); +#endif + machine_clock_assist(); + + (void)splsched(); + } + +#ifdef MARK_CPU_ACTIVE + (void)spllo(); + MARK_CPU_ACTIVE(mycpu); + (void)splsched(); +#endif /* MARK_CPU_ACTIVE */ + + /* + * This is not a switch statement to avoid the + * bounds checking code in the common case. + */ + pset = myprocessor->processor_set; + simple_lock(&pset->idle_lock); +retry: + state = myprocessor->state; + if (state == PROCESSOR_DISPATCHING) { + /* + * Commmon case -- cpu dispatched. + */ + new_thread = *threadp; + *threadp = (volatile thread_t) THREAD_NULL; + myprocessor->state = PROCESSOR_RUNNING; + simple_unlock(&pset->idle_lock); + + thread_lock(new_thread); + simple_lock(&myprocessor->runq.lock); + simple_lock(&pset->runq.lock); + if ( myprocessor->runq.highq > new_thread->sched_pri || + pset->runq.highq > new_thread->sched_pri ) { + simple_unlock(&pset->runq.lock); + simple_unlock(&myprocessor->runq.lock); + + if (new_thread->bound_processor != PROCESSOR_NULL) + run_queue_enqueue(&myprocessor->runq, new_thread, HEAD_Q); + else + run_queue_enqueue(&pset->runq, new_thread, HEAD_Q); + thread_unlock(new_thread); + + counter(c_idle_thread_block++); + thread_block(idle_thread_continue); + } + else { + simple_unlock(&pset->runq.lock); + simple_unlock(&myprocessor->runq.lock); + + /* + * set up quantum for new thread. + */ + if (new_thread->policy & (POLICY_RR|POLICY_FIFO)) + myprocessor->quantum = new_thread->unconsumed_quantum; + else + myprocessor->quantum = pset->set_quantum; + thread_unlock(new_thread); + + myprocessor->first_quantum = TRUE; + counter(c_idle_thread_handoff++); + thread_run(myprocessor->idle_thread, + idle_thread_continue, new_thread); + } + } + else + if (state == PROCESSOR_IDLE) { + if (myprocessor->state != PROCESSOR_IDLE) { + /* + * Something happened, try again. + */ + goto retry; + } + /* + * Processor was not dispatched (Rare). + * Set it running again. + */ + no_dispatch_count++; + pset->idle_count--; + queue_remove(&pset->idle_queue, myprocessor, + processor_t, processor_queue); + myprocessor->state = PROCESSOR_RUNNING; + simple_unlock(&pset->idle_lock); + + counter(c_idle_thread_block++); + thread_block(idle_thread_continue); + } + else + if ( state == PROCESSOR_ASSIGN || + state == PROCESSOR_SHUTDOWN ) { + /* + * Changing processor sets, or going off-line. + * Release next_thread if there is one. Actual + * thread to run is on a runq. + */ + if ((new_thread = (thread_t)*threadp) != THREAD_NULL) { + *threadp = (volatile thread_t) THREAD_NULL; + simple_unlock(&pset->idle_lock); + thread_lock(new_thread); + thread_setrun(new_thread, FALSE, TAIL_Q); + thread_unlock(new_thread); + } else + simple_unlock(&pset->idle_lock); + + counter(c_idle_thread_block++); + thread_block(idle_thread_continue); + } + else { + simple_unlock(&pset->idle_lock); + printf("Bad processor state %d (Cpu %d)\n", + cpu_state(mycpu), mycpu); + panic("idle_thread"); + + } + + (void)spllo(); + } +} + +void +idle_thread(void) +{ + thread_t self = current_thread(); + spl_t s; + + stack_privilege(self); + thread_swappable(current_act(), FALSE); + + s = splsched(); + thread_lock(self); + + self->priority = IDLEPRI; + self->sched_pri = self->priority; + + thread_unlock(self); + splx(s); + + counter(c_idle_thread_block++); + thread_block((void(*)(void))0); + idle_thread_continue(); + /*NOTREACHED*/ +} + +static AbsoluteTime sched_tick_interval, sched_tick_deadline; + +/* + * sched_tick_thread + * + * Update the priorities of all threads periodically. + */ +void +sched_tick_thread_continue(void) +{ + AbsoluteTime abstime; +#if SIMPLE_CLOCK + int new_usec; +#endif /* SIMPLE_CLOCK */ + + clock_get_uptime(&abstime); + + sched_tick++; /* age usage one more time */ +#if SIMPLE_CLOCK + /* + * Compensate for clock drift. sched_usec is an + * exponential average of the number of microseconds in + * a second. It decays in the same fashion as cpu_usage. + */ + new_usec = sched_usec_elapsed(); + sched_usec = (5*sched_usec + 3*new_usec)/8; +#endif /* SIMPLE_CLOCK */ + + /* + * Compute the scheduler load factors. + */ + compute_mach_factor(); + + /* + * Scan the run queues for runnable threads that need to + * have their priorities recalculated. + */ + do_thread_scan(); + + clock_deadline_for_periodic_event(sched_tick_interval, abstime, + &sched_tick_deadline); + + assert_wait((event_t)sched_tick_thread_continue, THREAD_INTERRUPTIBLE); + thread_set_timer_deadline(sched_tick_deadline); + thread_block(sched_tick_thread_continue); + /*NOTREACHED*/ +} + +void +sched_tick_thread(void) +{ + thread_t self = current_thread(); + natural_t rate; + spl_t s; + + stack_privilege(self); + thread_swappable(self->top_act, FALSE); + + s = splsched(); + thread_lock(self); + + self->priority = MAXPRI_STANDARD; + self->sched_pri = self->priority; + + thread_unlock(self); + splx(s); + + rate = (1000 >> SCHED_TICK_SHIFT); + clock_interval_to_absolutetime_interval(rate, USEC_PER_SEC, + &sched_tick_interval); + clock_get_uptime(&sched_tick_deadline); + + thread_block(sched_tick_thread_continue); + /*NOTREACHED*/ +} + +#define MAX_STUCK_THREADS 128 + +/* + * do_thread_scan: scan for stuck threads. A thread is stuck if + * it is runnable but its priority is so low that it has not + * run for several seconds. Its priority should be higher, but + * won't be until it runs and calls update_priority. The scanner + * finds these threads and does the updates. + * + * Scanner runs in two passes. Pass one squirrels likely + * thread ids away in an array (takes out references for them). + * Pass two does the priority updates. This is necessary because + * the run queue lock is required for the candidate scan, but + * cannot be held during updates [set_pri will deadlock]. + * + * Array length should be enough so that restart isn't necessary, + * but restart logic is included. Does not scan processor runqs. + * + */ +thread_t stuck_threads[MAX_STUCK_THREADS]; +int stuck_count = 0; + +/* + * do_runq_scan is the guts of pass 1. It scans a runq for + * stuck threads. A boolean is returned indicating whether + * a retry is needed. + */ +boolean_t +do_runq_scan( + run_queue_t runq) +{ + register queue_t q; + register thread_t thread; + register int count; + spl_t s; + boolean_t result = FALSE; + + s = splsched(); + simple_lock(&runq->lock); + if ((count = runq->count) > 0) { + q = runq->queues + runq->highq; + while (count > 0) { + queue_iterate(q, thread, thread_t, links) { + if ( !(thread->state & (TH_WAIT|TH_SUSP)) && + thread->policy == POLICY_TIMESHARE ) { + if (thread->sched_stamp != sched_tick) { + /* + * Stuck, save its id for later. + */ + if (stuck_count == MAX_STUCK_THREADS) { + /* + * !@#$% No more room. + */ + simple_unlock(&runq->lock); + splx(s); + + return (TRUE); + } + + /* + * Inline version of thread_reference + * XXX - lock ordering problem here: + * thread locks should be taken before runq + * locks: just try and get the thread's locks + * and ignore this thread if we fail, we might + * have better luck next time. + */ + if (simple_lock_try(&thread->lock)) { + thread->ref_count++; + thread_unlock(thread); + stuck_threads[stuck_count++] = thread; + } + else + result = TRUE; + } + } + + count--; + } + + q--; + } + } + simple_unlock(&runq->lock); + splx(s); + + return (result); +} + +boolean_t thread_scan_enabled = TRUE; + +void +do_thread_scan(void) +{ + register boolean_t restart_needed = FALSE; + register thread_t thread; + register processor_set_t pset = &default_pset; + register processor_t processor; + spl_t s; + + if (!thread_scan_enabled) + return; + + do { + restart_needed = do_runq_scan(&pset->runq); + if (!restart_needed) { + simple_lock(&pset->processors_lock); + processor = (processor_t)queue_first(&pset->processors); + while (!queue_end(&pset->processors, (queue_entry_t)processor)) { + if (restart_needed = do_runq_scan(&processor->runq)) + break; + + processor = (processor_t)queue_next(&processor->processors); + } + simple_unlock(&pset->processors_lock); + } + + /* + * Ok, we now have a collection of candidates -- fix them. + */ + while (stuck_count > 0) { + thread = stuck_threads[--stuck_count]; + stuck_threads[stuck_count] = THREAD_NULL; + s = splsched(); + thread_lock(thread); + if (thread->policy == POLICY_TIMESHARE) { + if ( !(thread->state & (TH_WAIT|TH_SUSP)) && + thread->sched_stamp != sched_tick ) + update_priority(thread); + } + thread_unlock(thread); + splx(s); + thread_deallocate(thread); + } + + } while (restart_needed); +} + +/* + * Just in case someone doesn't use the macro + */ +#undef thread_wakeup +void +thread_wakeup( + event_t x); + +void +thread_wakeup( + event_t x) +{ + thread_wakeup_with_result(x, THREAD_AWAKENED); +} + +boolean_t +thread_runnable( + thread_t thread) +{ + sched_policy_t *policy; + + /* Ask sched policy if thread is runnable */ + policy = policy_id_to_sched_policy(thread->policy); + + return ((policy != SCHED_POLICY_NULL)? + policy->sp_ops.sp_thread_runnable(policy, thread) : FALSE); +} + +#if DEBUG + +void +dump_processor_set( + processor_set_t ps) +{ + printf("processor_set: %08x\n",ps); + printf("idle_queue: %08x %08x, idle_count: 0x%x\n", + ps->idle_queue.next,ps->idle_queue.prev,ps->idle_count); + printf("processors: %08x %08x, processor_count: 0x%x\n", + ps->processors.next,ps->processors.prev,ps->processor_count); + printf("tasks: %08x %08x, task_count: 0x%x\n", + ps->tasks.next,ps->tasks.prev,ps->task_count); + printf("threads: %08x %08x, thread_count: 0x%x\n", + ps->threads.next,ps->threads.prev,ps->thread_count); + printf("ref_count: 0x%x, active: %x\n", + ps->ref_count,ps->active); + printf("pset_self: %08x, pset_name_self: %08x\n",ps->pset_self, ps->pset_name_self); + printf("max_priority: 0x%x, policies: 0x%x, set_quantum: 0x%x\n", + ps->max_priority, ps->policies, ps->set_quantum); +} + +#define processor_state(s) (((s)>PROCESSOR_SHUTDOWN)?"*unknown*":states[s]) + +void +dump_processor( + processor_t p) +{ + char *states[]={"OFF_LINE","RUNNING","IDLE","DISPATCHING", + "ASSIGN","SHUTDOWN"}; + + printf("processor: %08x\n",p); + printf("processor_queue: %08x %08x\n", + p->processor_queue.next,p->processor_queue.prev); + printf("state: %8s, next_thread: %08x, idle_thread: %08x\n", + processor_state(p->state), p->next_thread, p->idle_thread); + printf("quantum: %u, first_quantum: %x, last_quantum: %u\n", + p->quantum, p->first_quantum, p->last_quantum); + printf("processor_set: %08x, processor_set_next: %08x\n", + p->processor_set, p->processor_set_next); + printf("processors: %08x %08x\n", p->processors.next,p->processors.prev); + printf("processor_self: %08x, slot_num: 0x%x\n", p->processor_self, p->slot_num); +} + +void +dump_run_queue_struct( + run_queue_t rq) +{ + char dump_buf[80]; + int i; + + for( i=0; i < NRQS; ) { + int j; + + printf("%6s",(i==0)?"runq:":""); + for( j=0; (j<8) && (i < NRQS); j++,i++ ) { + if( rq->queues[i].next == &rq->queues[i] ) + printf( " --------"); + else + printf(" %08x",rq->queues[i].next); + } + printf("\n"); + } + for( i=0; i < NRQBM; ) { + register unsigned int mask; + char *d=dump_buf; + + mask = ~0; + mask ^= (mask>>1); + + do { + *d++ = ((rq->bitmap[i]&mask)?'r':'e'); + mask >>=1; + } while( mask ); + *d = '\0'; + printf("%8s%s\n",((i==0)?"bitmap:":""),dump_buf); + i++; + } + printf("highq: 0x%x, count: %u\n", rq->highq, rq->count); +} + +void +dump_run_queues( + run_queue_t runq) +{ + register queue_t q1; + register int i; + register queue_entry_t e; + + q1 = runq->queues; + for (i = 0; i < NRQS; i++) { + if (q1->next != q1) { + int t_cnt; + + printf("[%u]",i); + for (t_cnt=0, e = q1->next; e != q1; e = e->next) { + printf("\t0x%08x",e); + if( (t_cnt = ++t_cnt%4) == 0 ) + printf("\n"); + } + if( t_cnt ) + printf("\n"); + } + /* else + printf("[%u]\t\n",i); + */ + q1++; + } +} + +void +checkrq( + run_queue_t rq, + char *msg) +{ + register queue_t q1; + register int i, j; + register queue_entry_t e; + register int highq; + + highq = NRQS; + j = 0; + q1 = rq->queues; + for (i = MAXPRI; i >= 0; i--) { + if (q1->next == q1) { + if (q1->prev != q1) { + panic("checkrq: empty at %s", msg); + } + } + else { + if (highq == -1) + highq = i; + + for (e = q1->next; e != q1; e = e->next) { + j++; + if (e->next->prev != e) + panic("checkrq-2 at %s", msg); + if (e->prev->next != e) + panic("checkrq-3 at %s", msg); + } + } + q1++; + } + if (j != rq->count) + panic("checkrq: count wrong at %s", msg); + if (rq->count != 0 && highq > rq->highq) + panic("checkrq: highq wrong at %s", msg); +} + +void +thread_check( + register thread_t thread, + register run_queue_t rq) +{ + register int whichq = thread->sched_pri; + register queue_entry_t queue, entry; + + if (whichq < MINPRI || whichq > MAXPRI) + panic("thread_check: bad pri"); + + if (whichq != thread->whichq) + panic("thread_check: whichq"); + + queue = &rq->queues[whichq]; + entry = queue_first(queue); + while (!queue_end(queue, entry)) { + if (entry == (queue_entry_t)thread) + return; + + entry = queue_next(entry); + } + + panic("thread_check: not found"); +} + +#endif /* DEBUG */ + +#if MACH_KDB +#include +#define printf kdbprintf +extern int db_indent; +void db_sched(void); + +void +db_sched(void) +{ + iprintf("Scheduling Statistics:\n"); + db_indent += 2; + iprintf("Thread invocations: csw %d same %d\n", + c_thread_invoke_csw, c_thread_invoke_same); +#if MACH_COUNTERS + iprintf("Thread block: calls %d\n", + c_thread_block_calls); + iprintf("Idle thread:\n\thandoff %d block %d no_dispatch %d\n", + c_idle_thread_handoff, + c_idle_thread_block, no_dispatch_count); + iprintf("Sched thread blocks: %d\n", c_sched_thread_block); +#endif /* MACH_COUNTERS */ + db_indent -= 2; +} +#endif /* MACH_KDB */ diff --git a/osfmk/kern/sched_prim.h b/osfmk/kern/sched_prim.h new file mode 100644 index 000000000..b978cb01f --- /dev/null +++ b/osfmk/kern/sched_prim.h @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: sched_prim.h + * Author: David Golub + * + * Scheduling primitive definitions file + * + */ + +#ifndef _KERN_SCHED_PRIM_H_ +#define _KERN_SCHED_PRIM_H_ + +#include +#include +#include +#include +#include +#include +#include +#include /*** ??? temp - remove me soon ***/ +#include +#include + +#ifdef MACH_KERNEL_PRIVATE + +#include +/* + * Exported interface to sched_prim.c. + * A few of these functions are actually defined in + * ipc_sched.c, for historical reasons. + */ + +/* Initialize scheduler module */ +extern void sched_init(void); + +/* + * Set up thread timeout element(s) when thread is created. + */ +extern void thread_timer_setup( + thread_t thread); + +extern void thread_timer_terminate(void); + +#define thread_bind_locked(thread, processor) \ + (thread)->bound_processor = (processor) + +/* + * Prevent a thread from restarting after it blocks interruptibly + */ +extern boolean_t thread_stop( + thread_t thread); + +/* + * wait for a thread to stop + */ +extern boolean_t thread_wait( + thread_t thread); + +/* Select a thread to run on a particular processor */ +extern thread_t thread_select( + processor_t myprocessor); + +extern void thread_go_locked( + thread_t thread, + int result); + +/* Stop old thread and run new thread */ +extern boolean_t thread_invoke( + thread_t old_thread, + thread_t new_thread, + int reason, + void (*continuation)(void)); + +/* Called when current thread is given new stack */ +extern void thread_continue( + thread_t old_thread); + +/* Switch directly to a particular thread */ +extern int thread_run( + thread_t old_thread, + void (*continuation)(void), + thread_t new_thread); + +/* Dispatch a thread not on a run queue */ +extern void thread_dispatch( + thread_t thread); + +/* Invoke continuation */ +extern void call_continuation( + void (*continuation)(void)); + +/* Compute effective priority of the specified thread */ +extern void compute_priority( + thread_t thread, + int resched); + +/* Version of compute_priority for current thread or + * thread being manipuldated by scheduler. + */ +extern void compute_my_priority( + thread_t thread); + +/* Periodic scheduler activity */ +extern void sched_tick_thread(void); + +/* Update priority of thread that has been sleeping or suspended. + * Used to "catch up" with the system. + */ +extern void update_priority( + thread_t thread); + +/* Idle thread loop */ +extern void idle_thread(void); + +/* + * thread_sleep_interlock: + * + * Cause the current thread to wait until the specified event + * occurs. The specified HW interlock is unlocked before releasing + * the cpu. (This is a convenient way to sleep without manually + * calling assert_wait). + */ + +#define thread_sleep_interlock(event, lock, interruptible) \ +MACRO_BEGIN \ + assert_wait(event, interruptible); \ + interlock_unlock(lock); \ + thread_block((void (*)(void)) 0); \ +MACRO_END + +/* + * Machine-dependent code must define these functions. + */ + +/* Start thread running */ +extern void thread_bootstrap_return(void); + +/* Return from exception */ +extern void thread_exception_return(void); + +/* Continuation return from syscall */ +extern void thread_syscall_return( + kern_return_t ret); + +extern thread_t switch_context( + thread_t old_thread, + void (*continuation)(void), + thread_t new_thread); + +/* Attach stack to thread */ +extern void machine_kernel_stack_init( + thread_t thread, + void (*start_pos)(thread_t)); + +extern void load_context( + thread_t thread); + +extern thread_act_t switch_act( + thread_act_t act); + +extern void machine_switch_act( + thread_t thread, + thread_act_t old, + thread_act_t new, + int cpu); + +/* + * These functions are either defined in kern/thread.c + * or are defined directly by machine-dependent code. + */ + +/* Allocate an activation stack */ +extern vm_offset_t stack_alloc(thread_t thread, void (*start_pos)(thread_t)); + +/* Free an activation stack */ +extern void stack_free(thread_t thread); + +/* Collect excess kernel stacks */ +extern void stack_collect(void); + +extern void set_pri( + thread_t thread, + int pri, + boolean_t resched); + +/* Block current thread, indicating reason (Block or Quantum expiration) */ +extern int thread_block_reason( + void (*continuation)(void), + int reason); + +/* Make thread runnable */ +extern void thread_setrun( + thread_t thread, + boolean_t may_preempt, + boolean_t tail); +/* + * Flags for thread_setrun() + */ + +#define HEAD_Q 0 /* FALSE */ +#define TAIL_Q 1 /* TRUE */ + +/* Bind thread to a particular processor */ +extern void thread_bind( + thread_t thread, + processor_t processor); + +extern void thread_mark_wait_locked( + thread_t thread, + int interruptible); + +#endif /* MACH_KERNEL_PRIVATE */ + +/* + ****************** Only exported until BSD stops using ******************** + */ + +/* + * Cancel a stop and continue the thread if necessary. + */ +extern void thread_unstop( + thread_t thread); + +/* Wake up thread directly, passing result */ +extern void clear_wait( + thread_t thread, + int result); + +/* Bind thread to a particular processor */ +extern void thread_bind( + thread_t thread, + processor_t processor); + + +/* + * ********************* PUBLIC APIs ************************************ + */ + +/* Set timer for current thread */ +extern void thread_set_timer( + natural_t interval, + natural_t scale_factor); + +extern void thread_set_timer_deadline( + AbsoluteTime deadline); + +extern void thread_cancel_timer(void); + +/* + * thread_stop a thread then wait for it to stop (both of the above) + */ +extern boolean_t thread_stop_wait( + thread_t thread); + +/* Declare thread will wait on a particular event */ +extern void assert_wait( + event_t event, + int interruptflag); + +/* Assert that the thread intends to wait for a timeout */ +extern void assert_wait_timeout( + natural_t msecs, + int interruptflags); + +/* Wake up thread (or threads) waiting on a particular event */ +extern void thread_wakeup_prim( + event_t event, + boolean_t one_thread, + int result); + +/* Block current thread (Block reason) */ +extern int thread_block( + void (*continuation)(void)); + + +/* + * Routines defined as macros + */ + +#define thread_wakeup(x) \ + thread_wakeup_prim((x), FALSE, THREAD_AWAKENED) +#define thread_wakeup_with_result(x, z) \ + thread_wakeup_prim((x), FALSE, (z)) +#define thread_wakeup_one(x) \ + thread_wakeup_prim((x), TRUE, THREAD_AWAKENED) + +/* + * thread_sleep_mutex: + * + * Cause the current thread to wait until the specified event + * occurs. The specified mutex is unlocked before releasing + * the cpu. (This is a convenient way to sleep without manually + * calling assert_wait). + */ + +#define thread_sleep_mutex(event, lock, interruptible) \ +MACRO_BEGIN \ + assert_wait(event, interruptible); \ + mutex_unlock(lock); \ + thread_block((void (*)(void)) 0); \ +MACRO_END + +/* + * thread_sleep_simple_lock: + * + * Cause the current thread to wait until the specified event + * occurs. The specified simple_lock is unlocked before releasing + * the cpu. (This is a convenient way to sleep without manually + * calling assert_wait). + */ + +#define thread_sleep_simple_lock(event, lock, interruptible) \ +MACRO_BEGIN \ + assert_wait(event, interruptible); \ + simple_unlock(lock); \ + thread_block((void (*)(void)) 0); \ +MACRO_END + + +#endif /* _KERN_SCHED_PRIM_H_ */ diff --git a/osfmk/kern/sf.c b/osfmk/kern/sf.c new file mode 100644 index 000000000..bde79e5a2 --- /dev/null +++ b/osfmk/kern/sf.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include +#include +#include + +sched_policy_t sched_policy[MAX_SCHED_POLS]; + +void +sf_init(void) +{ + sched_policy[POLICY_TIMESHARE].policy_id = POLICY_TIMESHARE; + sched_policy[POLICY_RR].policy_id = POLICY_RR; + sched_policy[POLICY_FIFO].policy_id = POLICY_FIFO; + + sched_policy[POLICY_TIMESHARE].sp_ops = + sched_policy[POLICY_RR].sp_ops = + sched_policy[POLICY_FIFO].sp_ops = mk_sp_ops; +} diff --git a/osfmk/kern/sf.h b/osfmk/kern/sf.h new file mode 100644 index 000000000..f3810d0a6 --- /dev/null +++ b/osfmk/kern/sf.h @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _KERN_SF_H_ +#define _KERN_SF_H_ + +/* + * The calls most likely to change are: policy_thread_done and + * policy_thread_begin. They're the policy calls related to + * context switching. I'm not satisfied with what I have now and + * these are the forms I'm trying next. + * + * I still have to merge the data type names from my different sandboxes + * and I don't really talk about locking except for the run queue locking. + * + * There is a big change for run queues: there is a single lock for an + * entire run queue array structure (instead of a lock per queue header). + * It's OK for a policy to reorganize a particular queue BUT it has to + * disable the queue header (sched_queue_disable). Since a queue header + * isn't shared by multiple policies and the framework won't touch the + * queue header if it's disabled, the policy can do anything it wants + * without taking out a global lock. + * + * The only run queue primitives provided are the really fast ones: + * insert at the head (sched_queue_preempt), insert at the tail + * and if the queue was empty check for preemption + * (sched_queue_add_preempt), just insert at the tail + * (sched_queue_add_only), and remove (sched_queue_remove). Everything + * else needs to be done by first disabling the queue header (and then + * you can do whatever you want to the queue). + * + * BTW, the convention here is: + * + * policy_xxx - calls from the framework into policies (via the + * pointers in the policy object) + * + * sched_xxx - scheduling mechanisms provided by the framework + * which can be called by policies. + * + * ---------- + * + * Initializes an instance of a scheduling policy assigning it the + * corresponding policy_id and run queue headers. + * + * policy_init( + * sched_policy_object *policy, + * int policy_id, + * int minpri, maxpri); + * + * Enable/disable a scheduling policy on a processor [set] + * + * policy_enable_processor_set( + * sched_policy_object *policy, / * policy * / + * processor_set_t processor_set ); / * processor set * / + * + * policy_disable_processor_set( + * sched_policy_object *policy, + * processor_set_t processor_set); + * + * policy_enable_processor( + * sched_policy_object *policy, + * processor_t processor ); + * + * policy_disable_processor( + * sched_policy_object *policy, + * processor_t processor); + * + * Notifies the policy that the thread has become runnable + * + * policy_thread_unblock( + * sched_policy_object *policy, + * thread_t thread ) + * + * Notifies the policy that the current thread is done or + * a new thread has been selected to run + * + * policy_thread_done( + * sched_policy_object *policy, + * thread_t *old_thread ); + * + * policy_thread_begin( + * sched_policy_object *policy, + * thread_t *new_thread ); + * + * Attach/detach a thread from the scheduling policy + * + * policy_thread_attach( + * sched_policy_object *policy, + * thread_t *thread ); + * + * policy_thread_detach( + * sched_policy_object *policy, + * thread_t *thread ); + * + * Set the thread's processor [set] + * + * policy_thread_processor( + * sched_policy_object *policy, + * thread_t *thread, + * processor_t processor ); + * + * policy_thread_processor_set( + * sched_policy_object *policy, + * thread_t *thread, + * processor_set_t processor_set); + * + * Scheduling Framework Interfaces + * + * [en/dis]able particular run queue headers on a processor [set], + * + * Lock the run queues, update the mask, unlock the run queues. If + * enabling, check preemption. + * + * sched_queue_enable( + * run_queue_t runq, + * sched_priority_mask *mask ); + * + * sched_queue_disable( + * run_queue_t runq, + * sched_priority_mask *mask ); + * + * Lock the run queues, insert the thread at the head, unlock the + * run queues and preempt (if possible). + * + * sched_queue_preempt( + * integer_t priority, + * thread_t thread, + * run_queue_t run_queues ); + * + * Lock the run queues, add the thread to the tail, unlock the run queues + * and preempt if appropriate. + * + * sched_queue_add_preempt( + * integer_t priority, + * thread_t thread, + * run_queue_t run_queues ); + * + * Lock the run queues, add the thread to the tail, unlock the queues + * but don't check for preemption. + * + * sched_queue_add_only( + * integer_t priority, + * thread_t thread, + * run_queue_t run_queues ); + * + * Lock the run queues, remove the entry the thread, unlock the run queues. + * + * sched_queue_remove( + * thread_t thread ); + */ + +#include +#include +#include +#include + +/* + * Type definitions and constants for MK Scheduling Framework + */ +typedef int sf_return_t; + +/* successful completion */ +#define SF_SUCCESS 0 + +/* error codes */ +#define SF_FAILURE 1 +#define SF_KERN_RESOURCE_SHORTAGE 2 + +/* Scheduler Framework Object -- i.e., a scheduling policy */ +typedef struct sf_policy *sf_object_t; + +/* + * maximum number of scheduling policies that the Scheduling Framework + * will host (picked arbitrarily) + */ +#define MAX_SCHED_POLS 10 + +/********** + * + * Scheduling Framework Interfaces + * + **********/ + +/* Initialize Framework and selected policies */ +void sf_init(void); + +/********** + * + * Scheduling Policy Interfaces + * + **********/ + +/* + * Operation list for scheduling policies. (Modeled after the + * device operations `.../mach_kernel/device/conf.h.') + * + * Key to some abbreviations: + * sp = scheduling policy + * sf = scheduling framework + */ +typedef struct sched_policy_ops { + /* Allow the policy to update the meta-priority of a running thread */ + sf_return_t (*sp_thread_update_mpri)( + sf_object_t policy, + thread_t thread); + + /* Notify the policy that a thread has become runnable */ + sf_return_t (*sp_thread_unblock)( + sf_object_t policy, + thread_t thread); + + /* Notify the policy that the current thread is done */ + /*** ??? Should this call take a `reason' argument? ***/ + sf_return_t (*sp_thread_done)( + sf_object_t policy, + thread_t old_thread); + + /* Notify the policy that a new thread has been selected to run */ + sf_return_t (*sp_thread_begin)( + sf_object_t policy, + thread_t new_thread); + + /* Notify the policy that an old thread is ready to be requeued */ + sf_return_t (*sp_thread_dispatch)( + sf_object_t policy, + thread_t old_thread); + + /* Attach/detach a thread from the scheduling policy */ + sf_return_t (*sp_thread_attach)( + sf_object_t policy, + thread_t thread); + + sf_return_t (*sp_thread_detach)( + sf_object_t policy, + thread_t thread); + + /* Set the thread's processor [set] */ + sf_return_t (*sp_thread_processor)( + sf_object_t policy, + thread_t *thread, + processor_t processor); + + sf_return_t (*sp_thread_processor_set)( + sf_object_t policy, + thread_t thread, + processor_set_t processor_set); + + sf_return_t (*sp_thread_setup)( + sf_object_t policy, + thread_t thread); + + /*** + *** ??? Hopefully, many of the following operations are only + *** temporary. Consequently, they haven't been forced to take + *** the same form as the others just yet. That should happen + *** for all of those that end up being permanent additions to the + *** list of standard operations. + ***/ + + /* `swtch_pri()' routine -- attempt to give up processor */ + void (*sp_swtch_pri)( + sf_object_t policy, + int pri); + + /* `thread_switch()' routine -- context switch w/ optional hint */ + kern_return_t (*sp_thread_switch)( + sf_object_t policy, + thread_act_t hint_act, + int option, + mach_msg_timeout_t option_time); + + /* `thread_depress_abort()' routine -- prematurely abort depression */ + kern_return_t (*sp_thread_depress_abort)( + sf_object_t policy, + thread_t thread); + + /* `thread_depress_timeout()' routine -- timeout on depression */ + void (*sp_thread_depress_timeout)( + sf_object_t policy, + thread_t thread); + + boolean_t (*sp_thread_runnable)( + sf_object_t policy, + thread_t thread); + +} sp_ops_t; + +/********** + * + * Scheduling Policy + * + **********/ + +typedef struct sf_policy { + int policy_id; /* policy number */ + sp_ops_t sp_ops; +} sched_policy_t; + +#define SCHED_POLICY_NULL ((sched_policy_t *) 0) + +#define policy_id_to_sched_policy(policy_id) \ + (((policy_id) != POLICY_NULL)? \ + &sched_policy[(policy_id)] : SCHED_POLICY_NULL) + +extern sched_policy_t sched_policy[MAX_SCHED_POLS]; + +#endif /* _KERN_SF_H_ */ diff --git a/osfmk/kern/simple_lock.h b/osfmk/kern/simple_lock.h new file mode 100644 index 000000000..7529bb018 --- /dev/null +++ b/osfmk/kern/simple_lock.h @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1998 Apple Computer + * All Rights Reserved + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: kern/simple_lock.h (derived from kern/lock.h) + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Simple Locking primitives definitions + */ + +#ifndef _SIMPLE_LOCK_H_ +#define _SIMPLE_LOCK_H_ + +/* + * Configuration variables: + * + * + * MACH_LDEBUG: record pc and thread of callers, turn on + * all lock debugging. + * + * + * ETAP: The Event Trace Analysis Package (ETAP) monitors + * and records micro-kernel lock behavior and general + * kernel events. ETAP supports two levels of + * tracing for locks: + * - cumulative (ETAP_LOCK_ACCUMULATE) + * - monitored (ETAP_LOCK_MONITOR) + * + * Note: If either level of tracing is configured then + * ETAP_LOCK_TRACE is automatically defined to + * equal one. + * + * Several macros are added throughout the lock code to + * allow for convenient configuration. + */ + +#include +#include + +#include +#include +#include + +/* + * The Mach lock package exports the following simple lock abstractions: + * + * Lock Type Properties + * hw_lock lowest level hardware abstraction; atomic, + * non-blocking, mutual exclusion; supports pre-emption + * usimple non-blocking spinning lock, available in all + * kernel configurations; may be used from thread + * and interrupt contexts; supports debugging, + * statistics and pre-emption + * simple non-blocking spinning lock, intended for SMP + * synchronization (vanishes on a uniprocessor); + * supports debugging, statistics and pre-emption + * + * NOTES TO IMPLEMENTORS: there are essentially two versions + * of the lock package. One is portable, written in C, and + * supports all of the various flavors of debugging, statistics, + * uni- versus multi-processor, pre-emption, etc. The "other" + * is whatever set of lock routines is provided by machine-dependent + * code. Presumably, the machine-dependent package is heavily + * optimized and meant for production kernels. + * + * We encourage implementors to focus on highly-efficient, + * production implementations of machine-dependent lock code, + * and use the portable lock package for everything else. + */ + +#ifdef MACH_KERNEL_PRIVATE +/* + * Mach always initializes locks, even those statically + * allocated. + * + * The conditional acquisition call, hw_lock_try, + * must return non-zero on success and zero on failure. + * + * The hw_lock_held operation returns non-zero if the + * lock is set, zero if the lock is clear. This operation + * should be implemented using an ordinary memory read, + * rather than a special atomic instruction, allowing + * a processor to spin in cache waiting for the lock to + * be released without chewing up bus cycles. + */ +extern void hw_lock_init(hw_lock_t); +extern void hw_lock_lock(hw_lock_t); +extern void hw_lock_unlock(hw_lock_t); +extern unsigned int hw_lock_to(hw_lock_t, unsigned int); +extern unsigned int hw_lock_try(hw_lock_t); +extern unsigned int hw_lock_held(hw_lock_t); +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * Machine dependent atomic ops. Probably should be in their own header. + */ +extern unsigned int hw_lock_bit(unsigned int *, unsigned int, unsigned int); +extern unsigned int hw_cpu_sync(unsigned int *, unsigned int); +extern unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int); +extern unsigned int hw_lock_mbits(unsigned int *, unsigned int, unsigned int, + unsigned int, unsigned int); +void hw_unlock_bit(unsigned int *, unsigned int); +extern int hw_atomic_add(int *area, int inc); +extern int hw_atomic_sub(int *area, int dec); +extern unsigned int hw_compare_and_store(unsigned int oldValue, unsigned int newValue, unsigned int *area); +extern void hw_queue_atomic(unsigned int *anchor, unsigned int *elem, unsigned int disp); +extern void hw_queue_atomic_list(unsigned int *anchor, unsigned int *first, unsigned int *last, unsigned int disp); +extern unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp); + + +/* + * The remaining locking constructs may have two versions. + * One version is machine-independent, built in C on top of the + * hw_lock construct. This version supports production, debugging + * and statistics configurations and is portable across architectures. + * + * Any particular port may override some or all of the portable + * lock package for whatever reason -- usually efficiency. + * + * The direct use of hw_locks by machine-independent Mach code + * should be rare; the preferred spinning lock is the simple_lock + * (see below). + */ + +/* + * A "simple" spin lock, providing non-blocking mutual + * exclusion and conditional acquisition. + * + * The usimple_lock exists even in uniprocessor configurations. + * A data structure is always allocated for it and the following + * operations are always defined: + * + * usimple_lock_init lock initialization (mandatory!) + * usimple_lock lock acquisition + * usimple_unlock lock release + * usimple_lock_try conditional lock acquisition; + * non-zero means success + * Simple lock DEBUG interfaces + * usimple_lock_held verify lock already held by me + * usimple_lock_none_held verify no usimple locks are held + * + * The usimple_lock may be used for synchronization between + * thread context and interrupt context, or between a uniprocessor + * and an intelligent device. Obviously, it may also be used for + * multiprocessor synchronization. Its use should be rare; the + * simple_lock is the preferred spinning lock (see below). + * + * The usimple_lock supports optional lock debugging and statistics. + * + * Normally, we expect the usimple_lock data structure to be + * defined here, with its operations implemented in an efficient, + * machine-dependent way. However, any implementation may choose + * to rely on a C-based, portable version of the usimple_lock for + * debugging, statistics, and/or tracing. Three hooks are used in + * the portable lock package to allow the machine-dependent package + * to override some or all of the portable package's features. + * + * The usimple_lock also handles pre-emption. Lock acquisition + * implies disabling pre-emption, while lock release implies + * re-enabling pre-emption. Conditional lock acquisition does + * not assume success: on success, pre-emption is disabled + * but on failure the pre-emption state remains the same as + * the pre-emption state before the acquisition attempt. + */ + +/* + * Each usimple_lock has a type, used for debugging and + * statistics. This type may safely be ignored in a + * production configuration. + * + * The conditional acquisition call, usimple_lock_try, + * must return non-zero on success and zero on failure. + */ +extern void usimple_lock_init(usimple_lock_t,etap_event_t); +extern void usimple_lock(usimple_lock_t); +extern void usimple_unlock(usimple_lock_t); +extern unsigned int usimple_lock_try(usimple_lock_t); +extern void usimple_lock_held(usimple_lock_t); +extern void usimple_lock_none_held(void); + + +/* + * Upon the usimple_lock we define the simple_lock, which + * exists for SMP configurations. These locks aren't needed + * in a uniprocessor configuration, so compile-time tricks + * make them disappear when NCPUS==1. (For debugging purposes, + * however, they can be enabled even on a uniprocessor.) This + * should be the "most popular" spinning lock; the usimple_lock + * and hw_lock should only be used in rare cases. + * + * IMPORTANT: simple_locks that may be shared between interrupt + * and thread context must have their use coordinated with spl. + * The spl level must alway be the same when acquiring the lock. + * Otherwise, deadlock may result. + */ + +#if MACH_KERNEL_PRIVATE +#include +#include + +#if NCPUS == 1 && !ETAP_LOCK_TRACE && !USLOCK_DEBUG +/* + * MACH_RT is a very special case: in the case that the + * machine-dependent lock package hasn't taken responsibility + * but there is no other reason to turn on locks, if MACH_RT + * is turned on locks denote critical, non-preemptable points + * in the code. + * + * Otherwise, simple_locks may be layered directly on top of + * usimple_locks. + * + * N.B. The reason that simple_lock_try may be assumed to + * succeed under MACH_RT is that the definition only is used + * when NCPUS==1 AND because simple_locks shared between thread + * and interrupt context are always acquired with elevated spl. + * Thus, it is never possible to be interrupted in a dangerous + * way while holding a simple_lock. + */ +/* + * for locks and there is no other apparent reason to turn them on. + * So make them disappear. + */ +#define simple_lock_init(l,t) +#define simple_lock(l) disable_preemption() +#define simple_unlock(l) enable_preemption() +#define simple_lock_try(l) (disable_preemption(), 1) +#define simple_lock_addr(lock) ((simple_lock_t)0) +#define __slock_held_func__(l) preemption_is_disabled() +#endif /* NCPUS == 1 && !ETAP_LOCK_TRACE && !USLOCK_DEBUG */ + +#if ETAP_LOCK_TRACE +extern void simple_lock_no_trace(simple_lock_t l); +extern int simple_lock_try_no_trace(simple_lock_t l); +extern void simple_unlock_no_trace(simple_lock_t l); +#endif /* ETAP_LOCK_TRACE */ + +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * If we got to here and we still don't have simple_lock_init + * defined, then we must either be outside the osfmk component, + * running on a true SMP, or need debug. + */ +#if !defined(simple_lock_init) +#define simple_lock_init(l,t) usimple_lock_init(l,t) +#define simple_lock(l) usimple_lock(l) +#define simple_unlock(l) usimple_unlock(l) +#define simple_lock_try(l) usimple_lock_try(l) +#define simple_lock_addr(l) (&(l)) +#define __slock_held_func__(l) usimple_lock_held(l) +#endif / * !defined(simple_lock_init) */ + +#if USLOCK_DEBUG +/* + * Debug-time only: + * + verify that usimple_lock is already held by caller + * + verify that usimple_lock is NOT held by caller + * + verify that current processor owns no usimple_locks + * + * We do not provide a simple_lock_NOT_held function because + * it's impossible to verify when only MACH_RT is turned on. + * In that situation, only preemption is enabled/disabled + * around lock use, and it's impossible to tell which lock + * acquisition caused preemption to be disabled. However, + * note that it's still valid to use check_simple_locks + * when only MACH_RT is turned on -- no locks should be + * held, hence preemption should be enabled. + * Actually, the above isn't strictly true, as explicit calls + * to disable_preemption() need to be accounted for. + */ +#define simple_lock_held(l) __slock_held_func__(l) +#define check_simple_locks() usimple_lock_none_held() +#else /* USLOCK_DEBUG */ +#define simple_lock_held(l) +#define check_simple_locks() +#endif /* USLOCK_DEBUG */ + +#endif /*!_SIMPLE_LOCK_H_*/ diff --git a/osfmk/kern/simple_lock_types.h b/osfmk/kern/simple_lock_types.h new file mode 100644 index 000000000..ef864cc8c --- /dev/null +++ b/osfmk/kern/simple_lock_types.h @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: kern/simple_lock_types.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Simple lock data type definitions + */ + +#ifndef _SIMPLE_LOCK_TYPES_H_ +#define _SIMPLE_LOCK_TYPES_H_ + +#include +#include +#include + +/* + * The Mach lock package exports the following simple lock abstractions: + * + * Lock Type Properties + * hw_lock lowest level hardware abstraction; atomic, + * non-blocking, mutual exclusion; supports pre-emption + * usimple non-blocking spinning lock, available in all + * kernel configurations; may be used from thread + * and interrupt contexts; supports debugging, + * statistics and pre-emption + * simple non-blocking spinning lock, intended for SMP + * synchronization (vanishes on a uniprocessor); + * supports debugging, statistics and pre-emption + * + * NOTES TO IMPLEMENTORS: there are essentially two versions + * of the lock package. One is portable, written in C, and + * supports all of the various flavors of debugging, statistics, + * uni- versus multi-processor, pre-emption, etc. The "other" + * is whatever set of lock routines is provided by machine-dependent + * code. Presumably, the machine-dependent package is heavily + * optimized and meant for production kernels. + * + * We encourage implementors to focus on highly-efficient, + * production implementations of machine-dependent lock code, + * and use the portable lock package for everything else. + */ + + +/* + * All of the remaining locking constructs may have two versions. + * One version is machine-independent, built in C on top of the + * hw_lock construct. This version supports production, debugging + * and statistics configurations and is portable across architectures. + * + * Any particular port may override some or all of the portable + * lock package for whatever reason -- usually efficiency. + * + * The direct use of hw_locks by machine-independent Mach code + * should be rare; the preferred spinning lock is the simple_lock + * (see below). + */ + +/* + * A "simple" spin lock, providing non-blocking mutual + * exclusion and conditional acquisition. + * + * The usimple_lock exists even in uniprocessor configurations. + * A data structure is always allocated for it. + * + * The usimple_lock may be used for synchronization between + * thread context and interrupt context, or between a uniprocessor + * and an intelligent device. Obviously, it may also be used for + * multiprocessor synchronization. Its use should be rare; the + * simple_lock is the preferred spinning lock (see below). + * + * The usimple_lock supports optional lock debugging and statistics. + * + * The usimple_lock may be inlined or optimized in ways that + * depend on the particular machine architecture and kernel + * build configuration; e.g., processor type, number of CPUs, + * production v. debugging. + * + * Normally, we expect the usimple_lock data structure to be + * defined here, with its operations implemented in an efficient, + * machine-dependent way. However, any implementation may choose + * to rely on a C-based, portable version of the usimple_lock for + * debugging, statistics, and/or tracing. Three hooks are used in + * the portable lock package to allow the machine-dependent package + * to override some or all of the portable package's features. + * + * + * The usimple_lock data structure + * can be overriden in a machine-dependent way by defining + * LOCK_USIMPLE_DATA, although we expect this to be unnecessary. + * (Note that if you choose to override LOCK_USIMPLE_DATA, you'd + * better also be prepared to override LOCK_USIMPLE_CALLS.) + * + * The usimple_lock also handles pre-emption. Lock acquisition + * implies disabling pre-emption, while lock release implies + * re-enabling pre-emption. Conditional lock acquisition does + * not assume success: on success, pre-emption is disabled + * but on failure the pre-emption state remains the same as + * the pre-emption state before the acquisition attempt. + */ + +#ifndef USIMPLE_LOCK_DATA +#define USLOCK_DEBUG_DATA 1 /* Always allocate lock debug data for now */ +#if USLOCK_DEBUG_DATA +/* + * + * + * This structure records additional information about lock state + * and recent operations. The data are carefully organized so that + * some portions of it can be examined BEFORE actually acquiring + * the lock -- for instance, the lock_thread field, to detect an + * attempt to acquire a lock already owned by the calling thread. + * All *updates* to this structure are governed by the lock to which + * this structure belongs. + * + * Note cache consistency dependency: being able to examine some + * of the fields in this structure without first acquiring a lock + * implies strongly-ordered cache coherency OR release consistency. + * Perhaps needless to say, acquisition consistency may not suffice. + * However, it's hard to imagine a scenario using acquisition + * consistency that results in using stale data from this structure. + * It would be necessary for the thread manipulating the lock to + * switch to another processor without first executing any instructions + * that would cause the needed consistency updates; basically, without + * taking a lock. Not possible in this kernel! + */ +typedef struct uslock_debug { + void *lock_pc; /* pc where lock operation began */ + void *lock_thread; /* thread that acquired lock */ + unsigned long duration[2]; + unsigned short state; + unsigned char lock_cpu; + void *unlock_thread; /* last thread to release lock */ + unsigned char unlock_cpu; + void *unlock_pc; /* pc where lock operation ended */ +} uslock_debug; +#endif /* USLOCK_DEBUG_DATA */ + +typedef struct slock { + hw_lock_data_t interlock; /* must be first... see lock.c */ +#if USLOCK_DEBUG_DATA + unsigned short lock_type; /* must be second... see lock.c */ +#define USLOCK_TAG 0x5353 + uslock_debug debug; +#endif /* USLOCK_DEBUG_DATA */ +} usimple_lock_data_t, *usimple_lock_t; + +#define USIMPLE_LOCK_NULL ((usimple_lock_t) 0) + +#endif /* USIMPLE_LOCK_DATA */ + +/* + * Upon the usimple_lock we define the simple_lock, which + * exists for SMP configurations. These locks aren't needed + * in a uniprocessor configuration, so compile-time tricks + * make them disappear when NCPUS==1. (For debugging purposes, + * however, they can be enabled even on a uniprocessor.) This + * should be the "most popular" spinning lock; the usimple_lock + * and hw_lock should only be used in rare cases. + * + * IMPORTANT: simple_locks that may be shared between interrupt + * and thread context must have their use coordinated with spl. + * The spl level must alway be the same when acquiring the lock. + * Otherwise, deadlock may result. + * + * Given that, in some configurations, Mach does not need to + * allocate simple_lock data structures, users of simple_locks + * should employ the "decl_simple_lock_data" macro when allocating + * simple_locks. Note that it use should be something like + * decl_simple_lock_data(static,foo_lock) + * WITHOUT any terminating semi-colon. Because the macro expands + * to include its own semi-colon, if one is needed, it may safely + * be used multiple times at arbitrary positions within a structure. + * Adding a semi-colon will cause structure definitions to fail + * when locks are turned off and a naked semi-colon is left behind. + */ + +/* + * Decide whether to allocate simple_lock data structures. + * If the machine-dependent code has turned on LOCK_SIMPLE_DATA, + * then it assumes all responsibility. Otherwise, we need + * these data structures if the configuration includes SMP or + * lock debugging or statistics. + * + * N.B. Simple locks should be declared using + * decl_simple_lock_data(class,name) + * with no trailing semi-colon. This syntax works best because + * - it correctly disappears in production uniprocessor + * configurations, leaving behind no allocated data + * structure + * - it can handle static and extern declarations: + * decl_simple_lock_data(extern,foo) extern + * decl_simple_lock_data(static,foo) static + * decl_simple_lock_data(,foo) ordinary + */ +typedef usimple_lock_data_t *simple_lock_t; + +#ifdef MACH_KERNEL_PRIVATE +#include +#include + +/* + * Turn on the uslock debug (internally to oskmk) when we are using the + * package and mach_ldebug build option is set. + */ +#if (MACH_LDEBUG) && !(defined(LOCK_SIMPLE_DATA)) +#define USLOCK_DEBUG 1 +#else +#define USLOCK_DEBUG 0 +#endif + +#if (defined(LOCK_SIMPLE_DATA) || ((NCPUS == 1) && !USLOCK_DEBUG )) +#define decl_simple_lock_data(class,name) +#endif +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * Outside the mach kernel component, and even within it on SMP or + * debug systems, simple locks are the same as usimple locks. + */ +#if !defined(decl_simple_lock_data) +typedef usimple_lock_data_t simple_lock_data_t; +#define decl_simple_lock_data(class,name) \ +class simple_lock_data_t name; +#endif /* !defined(decl_simple_lock_data) */ + +#endif /* !_SIMPLE_LOCK_TYPES_H_ */ diff --git a/osfmk/kern/spl.c b/osfmk/kern/spl.c new file mode 100644 index 000000000..15d7cf72e --- /dev/null +++ b/osfmk/kern/spl.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +/* + * spl routines + */ + +__private_extern__ spl_t +splhigh( + void) +{ + return(ml_set_interrupts_enabled(FALSE)); +} + +__private_extern__ spl_t +splsched( + void) +{ + return(ml_set_interrupts_enabled(FALSE)); +} + +__private_extern__ spl_t +splclock( + void) +{ + return(ml_set_interrupts_enabled(FALSE)); +} + +__private_extern__ void +spllo( + void) +{ + (void)ml_set_interrupts_enabled(TRUE); +} + +__private_extern__ void +splx( + spl_t l) +{ + ml_set_interrupts_enabled((boolean_t) l); +} diff --git a/osfmk/kern/spl.h b/osfmk/kern/spl.h new file mode 100644 index 000000000..2eae497c5 --- /dev/null +++ b/osfmk/kern/spl.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _KERN_SPL_H_ +#define _KERN_SPL_H_ + +typedef unsigned spl_t; + +extern spl_t (splhigh)(void); /* Block all interrupts */ + +extern spl_t (splsched)(void); /* Block all scheduling activity */ + +extern spl_t (splclock)(void); /* Block clock interrupt */ + +extern void (splx)(spl_t); /* Restore previous level */ + +extern void (spllo)(void); /* Enable all interrupts */ + +#endif /* _KERN_SPL_H_ */ diff --git a/osfmk/kern/sscanf.c b/osfmk/kern/sscanf.c new file mode 100644 index 000000000..ade695877 --- /dev/null +++ b/osfmk/kern/sscanf.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:56 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.2 1997/02/12 12:52:33 stephen + * New file, reimplemented to be sure of copyright status + * Initially only supports character matching and '%d' + * [1997/02/12 12:43:09 stephen] + * + * $EndLog$ + */ + +#include +#include + +#define isdigit(c) ((unsigned) ((c) - '0') < 10U) + +/* + * Scan items from a string in accordance with a format. This is much + * simpler than the C standard function: it only recognises %d without a + * field width, and does not treat space in the format string or the + * input any differently from other characters. The return value is the + * number of characters from the input string that were successfully + * scanned, not the number of format items matched as in standard sscanf. + * e.mcmanus@opengroup.org, 12 Feb 97 + */ +int +sscanf(const char *str, const char *format, ...) +{ + const char *start = str; + va_list args; + + va_start(args, format); + for ( ; *format != '\0'; format++) { + if (*format == '%' && format[1] == 'd') { + int positive; + int value; + int *valp; + + if (*str == '-') { + positive = 0; + str++; + } else + positive = 1; + if (!isdigit(*str)) + break; + value = 0; + do { + value = (value * 10) - (*str - '0'); + str++; + } while (isdigit(*str)); + if (positive) + value = -value; + valp = va_arg(args, int *); + *valp = value; + format++; + } else if (*format == *str) { + str++; + } else + break; + } + va_end(args); + return str - start; +} diff --git a/osfmk/kern/startup.c b/osfmk/kern/startup.c new file mode 100644 index 000000000..7f94aeb7c --- /dev/null +++ b/osfmk/kern/startup.c @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Mach kernel startup. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __ppc__ +#include +#include +#endif + +/* Externs XXX */ +extern void rtclock_reset(void); + +/* Forwards */ +void cpu_launch_first_thread( + thread_t thread); +void start_kernel_threads(void); +void swapin_thread(); + +/* + * Running in virtual memory, on the interrupt stack. + * Does not return. Dispatches initial thread. + * + * Assumes that master_cpu is set. + */ +void +setup_main(void) +{ + thread_t startup_thread; + + sched_init(); + vm_mem_bootstrap(); + ipc_bootstrap(); + vm_mem_init(); + ipc_init(); + pager_mux_hash_init(); + + /* + * As soon as the virtual memory system is up, we record + * that this CPU is using the kernel pmap. + */ + PMAP_ACTIVATE_KERNEL(master_cpu); + +#ifdef __ppc__ + mapping_free_prime(); /* Load up with temporary mapping blocks */ +#endif + + machine_init(); + kmod_init(); + clock_init(); + + init_timers(); + timer_call_initialize(); + + machine_info.max_cpus = NCPUS; + machine_info.memory_size = mem_size; + machine_info.avail_cpus = 0; + machine_info.major_version = KERNEL_MAJOR_VERSION; + machine_info.minor_version = KERNEL_MINOR_VERSION; + + /* + * Initialize the IPC, task, and thread subsystems. + */ + ledger_init(); + swapper_init(); + task_init(); + act_init(); + thread_init(); + subsystem_init(); + + /* + * Initialize the Event Trace Analysis Package. + * Dynamic Phase: 2 of 2 + */ + etap_init_phase2(); + + /* + * Create a kernel thread to start the other kernel + * threads. Thread_resume (from kernel_thread) calls + * thread_setrun, which may look at current thread; + * we must avoid this, since there is no current thread. + */ + startup_thread = kernel_thread_with_priority(kernel_task, MAXPRI_KERNBAND, + start_kernel_threads, FALSE); + + /* + * Pretend it is already running, and resume it. + * Since it looks as if it is running, thread_resume + * will not try to put it on the run queues. + * + * We can do all of this without locking, because nothing + * else is running yet. + */ + startup_thread->state = TH_RUN; + (void) thread_resume(startup_thread->top_act); + /* + * Start the thread. + */ + cpu_launch_first_thread(startup_thread); + /*NOTREACHED*/ + panic("cpu_launch_first_thread returns!"); +} + +/* + * Now running in a thread. Create the rest of the kernel threads + * and the bootstrap task. + */ +void +start_kernel_threads(void) +{ + register int i; + + thread_bind(current_thread(), cpu_to_processor(cpu_number())); + + /* + * Create the idle threads and the other + * service threads. + */ + for (i = 0; i < NCPUS; i++) { + if (1 /*machine_slot[i].is_cpu*/) { + processor_t processor = cpu_to_processor(i); + thread_t thread; + spl_t s; + + thread = kernel_thread_with_priority(kernel_task, + MAXPRI_KERNBAND, idle_thread, FALSE); + s = splsched(); + thread_lock(thread); + thread_bind_locked(thread, processor); + processor->idle_thread = thread; + thread->state |= TH_IDLE; + thread_go_locked(thread, THREAD_AWAKENED); + thread_unlock(thread); + splx(s); + } + } + + /* + * Initialize the thread callout mechanism. + */ + thread_call_initialize(); + + /* + * Invoke some black magic. + */ +#if __ppc__ + mapping_adjust(); +#endif + + /* + * Invoke the thread reaper mechanism. + */ + thread_reaper(); + + /* + * Start the stack swapin thread + */ + kernel_thread(kernel_task, swapin_thread); + + /* + * Invoke the periodic scheduler mechanism. + */ + kernel_thread(kernel_task, sched_tick_thread); + + /* + * Create the clock service. + */ + clock_service_create(); + + /* + * Create the device service. + */ + device_service_create(); + + shared_file_boot_time_init(); + +#ifdef IOKIT + { + PE_init_iokit(); + } +#endif + + /* + * Start the user bootstrap. + */ + + (void) spllo(); /* Allow interruptions */ + +#ifdef MACH_BSD + { + extern void bsd_init(void); + bsd_init(); + } +#endif + + thread_bind(current_thread(), PROCESSOR_NULL); + + /* + * Become the pageout daemon. + */ + + vm_pageout(); + /*NOTREACHED*/ +} + +void +slave_main(void) +{ + processor_t myprocessor = current_processor(); + thread_t thread; + + thread = myprocessor->next_thread; + myprocessor->next_thread = THREAD_NULL; + if (thread == THREAD_NULL) { + thread = machine_wake_thread; + machine_wake_thread = THREAD_NULL; + } + cpu_launch_first_thread(thread); + /*NOTREACHED*/ + panic("slave_main"); +} + +/* + * Now running in a thread context + */ +void +start_cpu_thread(void) +{ + processor_t processor; + + processor = cpu_to_processor(cpu_number()); + + slave_machine_init(); + + if (processor->processor_self == IP_NULL) { + ipc_processor_init(processor); + ipc_processor_enable(processor); + } + +#if 0 + printf("start_cpu_thread done on cpu %x\n", cpu_number()); +#endif + /* TODO: Mark this processor ready to dispatch threads */ + + (void) thread_terminate(current_act()); +} + +/* + * Start up the first thread on a CPU. + */ +void +cpu_launch_first_thread( + thread_t thread) +{ + register int mycpu = cpu_number(); + + /* initialize preemption disabled */ + cpu_data[mycpu].preemption_level = 1; + + cpu_up(mycpu); + start_timer(&kernel_timer[mycpu]); + + if (thread == THREAD_NULL) { + thread = cpu_to_processor(mycpu)->idle_thread; + if (thread == THREAD_NULL) + panic("cpu_launch_first_thread"); + } + + rtclock_reset(); /* start realtime clock ticking */ + PMAP_ACTIVATE_KERNEL(mycpu); + + thread_machine_set_current(thread); + thread_lock(thread); + thread->state &= ~TH_UNINT; + thread_unlock(thread); + timer_switch(&thread->system_timer); + + PMAP_ACTIVATE_USER(thread->top_act, mycpu); + + /* preemption enabled by load_context */ + load_context(thread); + /*NOTREACHED*/ +} diff --git a/osfmk/kern/startup.h b/osfmk/kern/startup.h new file mode 100644 index 000000000..bbd76a447 --- /dev/null +++ b/osfmk/kern/startup.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:56 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.1 1994/09/23 02:26:54 ezf + * change marker to not FREE + * [1994/09/22 21:36:18 ezf] + * + * Revision 1.1.2.3 1993/08/16 18:08:47 bernard + * Clean up MP configuration warnings - CR#9523 + * [1993/08/13 15:29:52 bernard] + * + * Revision 1.1.2.2 1993/08/11 18:04:28 bernard + * Fixed to use machine include file ANSI prototypes - CR#9523 + * [1993/08/11 16:28:27 bernard] + * + * Second pass fixes for ANSI prototypes - CR#9523 + * [1993/08/11 14:20:54 bernard] + * + * $EndLog$ + */ + +#ifndef _KERN_STARTUP_H_ +#define _KERN_STARTUP_H_ + +#include + +/* + * Kernel and machine startup declarations + */ + +/* Initialize kernel */ +extern void setup_main(void); + +/* Initialize machine dependent stuff */ +extern void machine_init(void); + +#if NCPUS > 1 + +extern void slave_main(void); + +/* + * The following must be implemented in machine dependent code. + */ + +/* Slave cpu initialization */ +extern void slave_machine_init(void); + +/* Start slave processors */ +extern void start_other_cpus(void); + +#endif /* NCPUS > 1 */ +#endif /* _KERN_STARTUP_H_ */ diff --git a/osfmk/kern/subsystem.c b/osfmk/kern/subsystem.c new file mode 100644 index 000000000..abf1f146c --- /dev/null +++ b/osfmk/kern/subsystem.c @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/29 17:36:19 mburg + * MK7.3 merger + * + * Revision 1.1.18.1 1998/02/03 09:30:24 gdt + * Merge up to MK7.3 + * [1998/02/03 09:15:10 gdt] + * + * Revision 1.1.16.1 1997/06/17 02:59:23 devrcs + * Added call to `ipc_subsystem_terminate()' to `subsystem_deallocate()' + * to close port leak. + * [1997/03/18 18:25:55 rkc] + * + * Revision 1.1.7.4 1995/01/10 05:14:19 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * * Rev 1.1.7.3 1994/10/19 16:24:57 watkins + * Define subsystem_print if mach_debug. + * [1994/12/09 21:01:02 dwm] + * + * mk6 CR668 - 1.3b26 merge + * splx is void. + * [1994/11/04 09:32:40 dwm] + * + * Revision 1.1.7.2 1994/09/23 02:27:05 ezf + * change marker to not FREE + * [1994/09/22 21:36:22 ezf] + * + * Revision 1.1.7.1 1994/09/16 15:30:10 emcmanus + * Implement "show subsystem" command. + * [1994/09/16 15:29:11 emcmanus] + * + * Revision 1.1.3.4 1994/06/02 01:53:14 bolinger + * mk6 CR125: Initialize subsystem_lock(). + * [1994/06/01 22:30:18 bolinger] + * + * Revision 1.1.3.3 1994/01/21 01:22:58 condict + * Fix too stringent error checking. Change subsys from ool to in-line. + * [1994/01/21 01:19:32 condict] + * + * Revision 1.1.3.2 1994/01/20 16:25:29 condict + * Testing bsubmit. + * [1994/01/20 16:24:32 condict] + * + * Revision 1.1.3.1 1994/01/20 11:09:26 emcmanus + * Copied for submission. + * [1994/01/20 11:08:20 emcmanus] + * + * Revision 1.1.1.4 1994/01/20 02:45:10 condict + * Make user subsystem point at containing system subsytem struct. + * + * Revision 1.1.1.3 1994/01/15 22:01:19 condict + * Validate user subsystem data, convert user ptrs to kernel ptrs. + * + * Revision 1.1.1.2 1994/01/13 02:39:58 condict + * Implementation of RPC subsystem object, for server co-location. + * + * $EndLog$ + */ +/* + * Functions to manipulate RPC subsystem descriptions. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SUBSYSTEM_MIN_SIZE 12 +#define SUBSYSTEM_MAX_SIZE (2*1024*1024) /* What value is correct? */ + +void +subsystem_init( + void) +{ + /* Nothing to do on bootstrap, at the moment. */ +} + +/* + * Routine: mach_subsystem_create + * Purpose: + * Create a new RPC subsystem. + * Conditions: + * Nothing locked. If successful, the subsystem is returned + * unlocked. (The caller has a reference.) + * Returns: + * KERN_SUCCESS The subsystem is allocated. + * KERN_INVALID_TASK The task is dead. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +mach_subsystem_create( + register task_t parent_task, + user_subsystem_t user_subsys, + mach_msg_type_number_t user_subsysCount, + subsystem_t *subsystem_p) +{ + int i; + subsystem_t new_subsystem; + kern_return_t kr; + boolean_t deallocate = FALSE; + vm_size_t size; + vm_offset_t offset; + int num_routines; + boolean_t bad_arg = FALSE; + + if (parent_task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + if (user_subsysCount < SUBSYSTEM_MIN_SIZE || + user_subsysCount > SUBSYSTEM_MAX_SIZE) + return(KERN_INVALID_ARGUMENT); + + /* + * Allocate a subsystem and initialize: + */ + + size = (vm_size_t)user_subsysCount + sizeof(struct subsystem) - + sizeof(struct rpc_subsystem); + new_subsystem = (subsystem_t) kalloc(size); + + if (new_subsystem == 0) + return(KERN_RESOURCE_SHORTAGE); + + new_subsystem->task = parent_task; + new_subsystem->ref_count = 1; /* A reference for our caller */ + new_subsystem->size = size; + subsystem_lock_init(new_subsystem); + + /* Copy the user subsystem data to a permanent place: */ + bcopy((char *)user_subsys, (char *)&(new_subsystem->user), + (int)user_subsysCount); + + /* Validate the user-specified fields of the subsystem: */ + + num_routines = new_subsystem->user.end - new_subsystem->user.start; + if (num_routines < 0 || + (char *)&new_subsystem->user.routine[num_routines] > + (char *)&new_subsystem->user + (int)user_subsysCount + ) { + kfree((vm_offset_t)new_subsystem, size); + return(KERN_INVALID_ADDRESS); + } + + /* The following is for converting the user pointers in the + * subsystem struct to kernel pointers: + */ + offset = (char *)&new_subsystem->user - + (char *)new_subsystem->user.base_addr; /* The user addr */ + + for (i = 0; i < num_routines; i++) { + routine_descriptor_t routine = &new_subsystem->user.routine[i]; + + /* If this is a "skip" routine, ignore it: */ + if (!routine->impl_routine) + continue; + + /* Convert the user arg_descr pointer to a kernel pointer: */ + routine->arg_descr = (routine_arg_descriptor_t) + ((char *)routine->arg_descr + offset); + + if (routine->argc > 1000000 || + routine->argc < routine->descr_count) { + bad_arg = TRUE; + break; + } + /* Validate that the arg_descr field is within the part of + * the struct that follows the routine array: */ + if ((char *)&routine->arg_descr[0] < + (char *)&new_subsystem->user.routine[num_routines] + || + (char *)&routine->arg_descr[routine->descr_count] > + (char *)&new_subsystem->user + (int)user_subsysCount + ) { + printf("Arg descr out of bounds: arg_descr=%x, &routine.num_routines=%x\n", + &routine->arg_descr[0], &new_subsystem->user.routine[num_routines]); + printf(" new_subsys->user + subsysCount = %x\n", + (char *)&new_subsystem->user + (int)user_subsysCount); +#if MACH_DEBUG && MACH_KDB + subsystem_print(new_subsystem); + /* Not all of the arg_descr pointers have necessarily + been corrected, but this just means that we print + the arg_descr from the user's input subsystem + instead of the copy we are building. */ +#endif /* MACH_DEBUG && MACH_KDB */ + bad_arg = TRUE; + break; + } + } + if (bad_arg) { + kfree((vm_offset_t)new_subsystem, size); + return(KERN_INVALID_ADDRESS); + } + + /* Convert the user base address to a kernel address: */ + new_subsystem->user.base_addr = (vm_address_t)&new_subsystem->user; + + /* Make the user subsystem point at the containing system data + * structure, so we can get from a port (which points to the user + * subsystem data) to the system subsystem struct: + */ + new_subsystem->user.subsystem = new_subsystem; + + ipc_subsystem_init(new_subsystem); + + task_lock(parent_task); + if (parent_task->active) { + parent_task->subsystem_count++; + queue_enter(&parent_task->subsystem_list, new_subsystem, + subsystem_t, subsystem_list); + } else + deallocate = TRUE; + task_unlock(parent_task); + + if (deallocate) { + /* release ref we would have given our caller */ + subsystem_deallocate(new_subsystem); + return(KERN_INVALID_TASK); + } + + ipc_subsystem_enable(new_subsystem); + + *subsystem_p = new_subsystem; + return(KERN_SUCCESS); +} + +/* + * Routine: subsystem_reference + * Purpose: + * Increments the reference count on a subsystem. + * Conditions: + * Nothing is locked. + */ +void +subsystem_reference( + register subsystem_t subsystem) +{ + spl_t s; + + if (subsystem == SUBSYSTEM_NULL) + return; + + s = splsched(); + subsystem_lock(subsystem); + subsystem->ref_count++; + subsystem_unlock(subsystem); + splx(s); +} + + + +/* + * Routine: subsystem_deallocate + * Purpose: + * Decrements the reference count on a subsystem. If 0, + * destroys the subsystem. Must have no ports registered on it + * when it is destroyed. + * Conditions: + * The subsystem is locked, and + * the caller has a reference, which is consumed. + */ + +void +subsystem_deallocate( + subsystem_t subsystem) +{ + task_t task; + spl_t s; + + if (subsystem == SUBSYSTEM_NULL) + return; + + s = splsched(); + subsystem_lock(subsystem); + if (--subsystem->ref_count > 0) { + subsystem_unlock(subsystem); + splx(s); + return; + } + + /* + * Count is 0, so destroy the subsystem. Need to restore the + * reference temporarily, and lock the task first: + */ + ipc_subsystem_disable(subsystem); + + subsystem->ref_count = 1; + subsystem_unlock(subsystem); + splx(s); + + task = subsystem->task; + task_lock(task); + s = splsched(); + subsystem_lock(subsystem); + + /* Check again, since we temporarily unlocked the subsystem: */ + if (--subsystem->ref_count == 0) { + + task->subsystem_count--; + queue_remove(&task->subsystem_list, subsystem, subsystem_t, + subsystem_list); + ipc_subsystem_terminate(subsystem); + subsystem_unlock(subsystem); + splx(s); + kfree((vm_offset_t) subsystem, subsystem->size); + task_unlock(task); + return; + } + + ipc_subsystem_enable(subsystem); + + subsystem_unlock(subsystem); + splx(s); + task_unlock(task); +} + + +#include +#if MACH_KDB + +#include +#include +#include +#include + +#define printf kdbprintf + +/* + * Routine: subsystem_print + * Purpose: + * Pretty-print a subsystem for kdb. + */ + +void rpc_subsystem_print(rpc_subsystem_t subsys); + +void +subsystem_print( + subsystem_t subsystem) +{ + extern int db_indent; + + iprintf("subsystem 0x%x\n", subsystem); + + db_indent += 2; + + iprintf("ref %d size %x task %x port %x\n", subsystem->ref_count, + subsystem->size, subsystem->task, subsystem->ipc_self); + rpc_subsystem_print(&subsystem->user); + +/* ipc_object_print(&port->ip_object); + * iprintf("receiver=0x%x", port->ip_receiver); + * printf(", receiver_name=0x%x\n", port->ip_receiver_name); + */ + db_indent -=2; +} + +struct flagnames { + char *name; + int bit; +} arg_type_names[] = { + "port", MACH_RPC_PORT, "array", MACH_RPC_ARRAY, + "variable", MACH_RPC_VARIABLE, "in", MACH_RPC_IN, "out", MACH_RPC_OUT, + "pointer", MACH_RPC_POINTER, "phys_copy", MACH_RPC_PHYSICAL_COPY, + "virt_copy", MACH_RPC_VIRTUAL_COPY, "deallocate", MACH_RPC_DEALLOCATE, + "onstack", MACH_RPC_ONSTACK, "bounded", MACH_RPC_BOUND, +}; + +void +rpc_subsystem_print( + rpc_subsystem_t subsys) +{ + int i, num_routines; + + iprintf("rpc_subsystem 0x%x\n", subsys); + + db_indent += 2; + + num_routines = subsys->end - subsys->start; + iprintf("start %d end %d (%d routines) maxsize %x base %x\n", + subsys->start, subsys->end, num_routines, subsys->maxsize, + subsys->base_addr); + for (i = 0; i < num_routines; i++) { + routine_descriptor_t routine = subsys->routine + i; + routine_arg_descriptor_t args = routine->arg_descr; + int j, type, disposition; + struct flagnames *n; + char *sep; + + iprintf("%x #%d:", routine, subsys->start + i); + if (routine->impl_routine == 0) { + printf(" skip\n"); + continue; + } + printf("\n"); + db_indent += 2; + iprintf("impl "); + db_printsym((db_expr_t) routine->impl_routine, DB_STGY_PROC); + printf("\n"); + iprintf("stub "); + db_printsym((db_expr_t) routine->stub_routine, DB_STGY_PROC); + printf("\n"); + iprintf("argc %d descr_count %d max_reply %x\n", + routine->argc, routine->descr_count, routine->max_reply_msg); + for (j = 0; j < routine->descr_count; j++) { + iprintf("%x desc %d: size %d count %d offset %x type", &args[j], j, + args[j].size, args[j].count, args[j].offset); + sep = " "; + type = args[j].type; + for (n = arg_type_names; n->name != 0; n++) { + if (type & n->bit) { + printf("%s%s", sep, n->name); + sep = "|"; + type &= ~n->bit; /* Might have an alias */ + } + } +#define NAME_MASK (3 << NAME_SHIFT) /* XXX magic numbers */ +#define ACTION_MASK (3 << ACTION_SHIFT) +#define DISPOSITION_MASK (NAME_MASK | ACTION_MASK) + disposition = type & DISPOSITION_MASK; + type &= ~DISPOSITION_MASK; + if (sep[0] != '|' || type != 0) + printf("%s%x", sep, type); + switch (disposition & ACTION_MASK) { + case MACH_RPC_MOVE: printf(" move"); break; + case MACH_RPC_COPY: printf(" copy"); break; + case MACH_RPC_MAKE: printf(" make"); break; + } + switch (disposition & NAME_MASK) { + case MACH_RPC_RECEIVE: printf(" receive"); break; + case MACH_RPC_SEND: printf(" send"); break; + case MACH_RPC_SEND_ONCE: printf(" send-once"); break; + } + printf("\n"); + } + db_indent -= 2; + } + + db_indent -= 2; +} + +void +db_show_subsystem( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + if (!have_addr || addr == 0) { + db_printf("No subsystem\n"); + return; + } + if (db_option(modif, 'r')) + rpc_subsystem_print((rpc_subsystem_t) addr); + else + subsystem_print((subsystem_t) addr); +} + +#endif /* MACH_KDB || MACH_DEBUG */ diff --git a/osfmk/kern/subsystem.h b/osfmk/kern/subsystem.h new file mode 100644 index 000000000..13e0959ad --- /dev/null +++ b/osfmk/kern/subsystem.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Definitions for RPC subsystems. + */ + +#ifndef _IPC_IPC_SUBSYSTEM_H_ +#define _IPC_IPC_SUBSYSTEM_H_ + +#include +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#include + +#define subsystem_lock_init(subsys) \ + simple_lock_init(&(subsys)->lock, ETAP_MISC_RPC_SUBSYS) +#define subsystem_lock(subsys) simple_lock(&(subsys)->lock) +#define subsystem_unlock(subsys) simple_unlock(&(subsys)->lock) + +/* + * A subsystem describes a set of server routines that can be invoked by + * mach_rpc() on the ports that are registered with the subsystem. + * See struct rpc_subsystem in mach/rpc.h, for more details. + */ +struct subsystem { + /* Synchronization/destruction information */ + decl_simple_lock_data(,lock) /* Subsystem lock */ + int ref_count; /* Number of references to me */ + vm_size_t size; /* Number of bytes in this structure */ + /* including the variable length */ + /* user_susbystem description */ + /* Task information */ + task_t task; /* Task to which I belong */ + queue_chain_t subsystem_list; /* list of subsystems in task */ + + /* IPC stuff: */ + struct ipc_port *ipc_self; /* Port naming this subsystem */ + + struct rpc_subsystem user; /* MIG-generated subsystem descr */ +}; + +extern void subsystem_init(void); + +#endif /* MACH_KERNEL_PRIVATE */ + +/* Subsystem create, with 1 reference. */ +extern kern_return_t mach_subsystem_create( + task_t parent_task, + user_subsystem_t user_subsys, + mach_msg_type_number_t user_subsysCount, + subsystem_t *subsystem); + +/* Take additional reference on subsystem (make sure it doesn't go away) */ +extern void subsystem_reference( + subsystem_t subsystem); + +/* Remove one reference on subsystem (it is destroyed if 0 refs remain) */ +extern void subsystem_deallocate( + subsystem_t subsystem); + +#if MACH_KDB +extern void subsystem_print( + subsystem_t subsystem); +#endif /* MACH_KDB */ + +#endif /* _IPC_IPC_SUBSYSTEM_H_ */ diff --git a/osfmk/kern/sync_lock.c b/osfmk/kern/sync_lock.c new file mode 100644 index 000000000..5849f6883 --- /dev/null +++ b/osfmk/kern/sync_lock.c @@ -0,0 +1,862 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: kern/sync_lock.c + * Author: Joseph CaraDonna + * + * Contains RT distributed lock synchronization services. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * Ulock ownership MACROS + * + * Assumes: ulock internal lock is held + */ + +#define ulock_ownership_set(ul, th) \ + MACRO_BEGIN \ + thread_act_t _th_act; \ + _th_act = (th)->top_act; \ + act_lock(_th_act); \ + enqueue (&_th_act->held_ulocks, (queue_entry_t) (ul)); \ + act_unlock(_th_act); \ + (ul)->holder = _th_act; \ + MACRO_END + +#define ulock_ownership_clear(ul) \ + MACRO_BEGIN \ + thread_act_t _th_act; \ + _th_act = (ul)->holder; \ + if (_th_act->active) { \ + act_lock(_th_act); \ + remqueue(&_th_act->held_ulocks, \ + (queue_entry_t) (ul)); \ + act_unlock(_th_act); \ + } else { \ + remqueue(&_th_act->held_ulocks, \ + (queue_entry_t) (ul)); \ + } \ + (ul)->holder = THR_ACT_NULL; \ + MACRO_END + +/* + * Lock set ownership MACROS + */ + +#define lock_set_ownership_set(ls, t) \ + MACRO_BEGIN \ + task_lock((t)); \ + enqueue_head(&(t)->lock_set_list, (queue_entry_t) (ls));\ + (t)->lock_sets_owned++; \ + task_unlock((t)); \ + (ls)->owner = (t); \ + MACRO_END + +#define lock_set_ownership_clear(ls, t) \ + MACRO_BEGIN \ + task_lock((t)); \ + remqueue(&(t)->lock_set_list, (queue_entry_t) (ls)); \ + (t)->lock_sets_owned--; \ + task_unlock((t)); \ + MACRO_END + +unsigned int lock_set_event; +#define LOCK_SET_EVENT ((event_t)&lock_set_event) + +unsigned int lock_set_handoff; +#define LOCK_SET_HANDOFF ((event_t)&lock_set_handoff) + +/* + * ROUTINE: lock_set_init [private] + * + * Initialize the lock_set subsystem. + * + * For now, we don't have anything to do here. + */ +void +lock_set_init(void) +{ + return; +} + + +/* + * ROUTINE: lock_set_create [exported] + * + * Creates a lock set. + * The port representing the lock set is returned as a parameter. + */ +kern_return_t +lock_set_create ( + task_t task, + lock_set_t *new_lock_set, + int n_ulocks, + int policy) +{ + lock_set_t lock_set = LOCK_SET_NULL; + ulock_t ulock; + int size; + int x; + + *new_lock_set = LOCK_SET_NULL; + + if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX) + return KERN_INVALID_ARGUMENT; + + size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1)); + lock_set = (lock_set_t) kalloc (size); + + if (lock_set == LOCK_SET_NULL) + return KERN_RESOURCE_SHORTAGE; + + + lock_set_lock_init(lock_set); + lock_set->n_ulocks = n_ulocks; + lock_set->ref_count = 1; + + /* + * Create and initialize the lock set port + */ + lock_set->port = ipc_port_alloc_kernel(); + if (lock_set->port == IP_NULL) { + /* This will deallocate the lock set */ + lock_set_dereference(lock_set); + return KERN_RESOURCE_SHORTAGE; + } + + ipc_kobject_set (lock_set->port, + (ipc_kobject_t) lock_set, + IKOT_LOCK_SET); + + /* + * Initialize each ulock in the lock set + */ + + for (x=0; x < n_ulocks; x++) { + ulock = (ulock_t) &lock_set->ulock_list[x]; + ulock_lock_init(ulock); + ulock->lock_set = lock_set; + ulock->holder = THR_ACT_NULL; + ulock->blocked = FALSE; + ulock->unstable = FALSE; + ulock->ho_wait = FALSE; + wait_queue_init(&ulock->wait_queue, policy); + } + + lock_set_ownership_set(lock_set, task); + + lock_set->active = TRUE; + *new_lock_set = lock_set; + + return KERN_SUCCESS; +} + +/* + * ROUTINE: lock_set_destroy [exported] + * + * Destroys a lock set. This call will only succeed if the + * specified task is the SAME task name specified at the lock set's + * creation. + * + * NOTES: + * - All threads currently blocked on the lock set's ulocks are awoken. + * - These threads will return with the KERN_LOCK_SET_DESTROYED error. + */ +kern_return_t +lock_set_destroy (task_t task, lock_set_t lock_set) +{ + thread_t thread; + ulock_t ulock; + int i; + + if (task == TASK_NULL || lock_set == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + if (lock_set->owner != task) + return KERN_INVALID_RIGHT; + + lock_set_lock(lock_set); + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + + /* + * Deactivate lock set + */ + lock_set->active = FALSE; + + /* + * If a ulock is currently held in the target lock set: + * + * 1) Wakeup all threads blocked on the ulock (if any). Threads + * may be blocked waiting normally, or waiting for a handoff. + * Blocked threads will return with KERN_LOCK_SET_DESTROYED. + * + * 2) ulock ownership is cleared. + * The thread currently holding the ulock is revoked of its + * ownership. + */ + for (i = 0; i < lock_set->n_ulocks; i++) { + ulock = &lock_set->ulock_list[i]; + + ulock_lock(ulock); + + if (ulock->accept_wait) { + ulock->accept_wait = FALSE; + wait_queue_wakeup_one(&ulock->wait_queue, + LOCK_SET_HANDOFF, + THREAD_RESTART); + } + + if (ulock->holder) { + if (ulock->blocked) { + ulock->blocked = FALSE; + wait_queue_wakeup_all(&ulock->wait_queue, + LOCK_SET_EVENT, + THREAD_RESTART); + } + if (ulock->ho_wait) { + ulock->ho_wait = FALSE; + wait_queue_wakeup_one(&ulock->wait_queue, + LOCK_SET_HANDOFF, + THREAD_RESTART); + } + ulock_ownership_clear(ulock); + } + + ulock_unlock(ulock); + } + + lock_set_unlock(lock_set); + lock_set_ownership_clear(lock_set, task); + + /* + * Deallocate + * + * Drop the lock set reference, which inturn destroys the + * lock set structure if the reference count goes to zero. + */ + + ipc_port_dealloc_kernel(lock_set->port); + lock_set_dereference(lock_set); + + return KERN_SUCCESS; +} + +kern_return_t +lock_acquire (lock_set_t lock_set, int lock_id) +{ + ulock_t ulock; + + if (lock_set == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + if (lock_id < 0 || lock_id >= lock_set->n_ulocks) + return KERN_INVALID_ARGUMENT; + + retry: + lock_set_lock(lock_set); + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + + ulock = (ulock_t) &lock_set->ulock_list[lock_id]; + ulock_lock(ulock); + lock_set_unlock(lock_set); + + /* + * Block the current thread if the lock is already held. + */ + + if (ulock->holder != THR_ACT_NULL) { + int wait_result; + + lock_set_unlock(lock_set); + + if (ulock->holder == current_act()) { + ulock_unlock(ulock); + return KERN_LOCK_OWNED_SELF; + } + + ulock->blocked = TRUE; + wait_queue_assert_wait(&ulock->wait_queue, + LOCK_SET_EVENT, + THREAD_ABORTSAFE); + ulock_unlock(ulock); + + /* + * Block - Wait for lock to become available. + */ + + wait_result = thread_block((void (*)(void))0); + + /* + * Check the result status: + * + * Check to see why thread was woken up. In all cases, we + * already have been removed from the queue. + */ + switch (wait_result) { + case THREAD_AWAKENED: + /* lock transitioned from old locker to us */ + /* he already made us owner */ + return (ulock->unstable) ? KERN_LOCK_UNSTABLE : + KERN_SUCCESS; + + case THREAD_INTERRUPTED: + return KERN_ABORTED; + + case THREAD_RESTART: + goto retry; /* probably a dead lock_set */ + + default: + panic("lock_acquire\n"); + } + } + + /* + * Assign lock ownership + */ + ulock_ownership_set(ulock, current_thread()); + ulock_unlock(ulock); + + return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS; +} + +kern_return_t +lock_release (lock_set_t lock_set, int lock_id) +{ + ulock_t ulock; + + if (lock_set == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + if (lock_id < 0 || lock_id >= lock_set->n_ulocks) + return KERN_INVALID_ARGUMENT; + + ulock = (ulock_t) &lock_set->ulock_list[lock_id]; + + return (lock_release_internal(ulock, current_act())); +} + +kern_return_t +lock_try (lock_set_t lock_set, int lock_id) +{ + ulock_t ulock; + + + if (lock_set == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + if (lock_id < 0 || lock_id >= lock_set->n_ulocks) + return KERN_INVALID_ARGUMENT; + + + lock_set_lock(lock_set); + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + + ulock = (ulock_t) &lock_set->ulock_list[lock_id]; + ulock_lock(ulock); + lock_set_unlock(lock_set); + + /* + * If the lock is already owned, we return without blocking. + * + * An ownership status is returned to inform the caller as to + * whether it already holds the lock or another thread does. + */ + + if (ulock->holder != THR_ACT_NULL) { + lock_set_unlock(lock_set); + + if (ulock->holder == current_act()) { + ulock_unlock(ulock); + return KERN_LOCK_OWNED_SELF; + } + + ulock_unlock(ulock); + return KERN_LOCK_OWNED; + } + + /* + * Add the ulock to the lock set's held_ulocks list. + */ + + ulock_ownership_set(ulock, current_thread()); + ulock_unlock(ulock); + + return (ulock->unstable) ? KERN_LOCK_UNSTABLE : KERN_SUCCESS; +} + +kern_return_t +lock_make_stable (lock_set_t lock_set, int lock_id) +{ + ulock_t ulock; + + + if (lock_set == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + if (lock_id < 0 || lock_id >= lock_set->n_ulocks) + return KERN_INVALID_ARGUMENT; + + + lock_set_lock(lock_set); + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + + ulock = (ulock_t) &lock_set->ulock_list[lock_id]; + ulock_lock(ulock); + lock_set_unlock(lock_set); + + if (ulock->holder != current_act()) { + ulock_unlock(ulock); + return KERN_INVALID_RIGHT; + } + + ulock->unstable = FALSE; + ulock_unlock(ulock); + + return KERN_SUCCESS; +} + +/* + * ROUTINE: lock_make_unstable [internal] + * + * Marks the lock as unstable. + * + * NOTES: + * - All future acquisitions of the lock will return with a + * KERN_LOCK_UNSTABLE status, until the lock is made stable again. + */ +kern_return_t +lock_make_unstable (ulock_t ulock, thread_act_t thr_act) +{ + lock_set_t lock_set; + + + lock_set = ulock->lock_set; + lock_set_lock(lock_set); + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + + ulock_lock(ulock); + lock_set_unlock(lock_set); + + if (ulock->holder != thr_act) { + ulock_unlock(ulock); + return KERN_INVALID_RIGHT; + } + + ulock->unstable = TRUE; + ulock_unlock(ulock); + + return KERN_SUCCESS; +} + +/* + * ROUTINE: lock_release_internal [internal] + * + * Releases the ulock. + * If any threads are blocked waiting for the ulock, one is woken-up. + * + */ +kern_return_t +lock_release_internal (ulock_t ulock, thread_act_t thr_act) +{ + lock_set_t lock_set; + int result; + + + if ((lock_set = ulock->lock_set) == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + lock_set_lock(lock_set); + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + ulock_lock(ulock); + lock_set_unlock(lock_set); + + if (ulock->holder != thr_act) { + ulock_unlock(ulock); + lock_set_unlock(lock_set); + return KERN_INVALID_RIGHT; + } + + /* + * If we have a hint that threads might be waiting, + * try to transfer the lock ownership to a waiting thread + * and wake it up. + */ + if (ulock->blocked) { + wait_queue_t wq = &ulock->wait_queue; + thread_t thread; + spl_t s; + + s = splsched(); + wait_queue_lock(wq); + thread = wait_queue_wakeup_identity_locked(wq, + LOCK_SET_EVENT, + THREAD_AWAKENED, + TRUE); + /* wait_queue now unlocked, thread locked */ + + if (thread != THREAD_NULL) { + /* + * JMM - These ownership transfer macros have a + * locking/race problem. To keep the thread from + * changing states on us (nullifying the ownership + * assignment) we need to keep the thread locked + * during the assignment. But we can't because the + * macros take an activation lock, which is a mutex. + * Since this code was already broken before I got + * here, I will leave it for now. + */ + thread_unlock(thread); + splx(s); + + /* + * Transfer ulock ownership + * from the current thread to the acquisition thread. + */ + ulock_ownership_clear(ulock); + ulock_ownership_set(ulock, thread); + ulock_unlock(ulock); + + return KERN_SUCCESS; + } else { + ulock->blocked = FALSE; + splx(s); + } + } + + /* + * Disown ulock + */ + ulock_ownership_clear(ulock); + ulock_unlock(ulock); + + return KERN_SUCCESS; +} + +kern_return_t +lock_handoff (lock_set_t lock_set, int lock_id) +{ + ulock_t ulock; + int wait_result; + + + if (lock_set == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + if (lock_id < 0 || lock_id >= lock_set->n_ulocks) + return KERN_INVALID_ARGUMENT; + + retry: + lock_set_lock(lock_set); + + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + + ulock = (ulock_t) &lock_set->ulock_list[lock_id]; + ulock_lock(ulock); + lock_set_unlock(lock_set); + + if (ulock->holder != current_act()) { + ulock_unlock(ulock); + lock_set_unlock(lock_set); + return KERN_INVALID_RIGHT; + } + + /* + * If the accepting thread (the receiver) is already waiting + * to accept the lock from the handoff thread (the sender), + * then perform the hand-off now. + */ + + if (ulock->accept_wait) { + wait_queue_t wq = &ulock->wait_queue; + thread_t thread; + spl_t s; + + /* + * See who the lucky devil is, if he is still there waiting. + */ + s = splsched(); + wait_queue_lock(wq); + thread = wait_queue_wakeup_identity_locked( + wq, + LOCK_SET_HANDOFF, + THREAD_AWAKENED, + TRUE); + /* wait queue unlocked, thread locked */ + + /* + * Transfer lock ownership + */ + if (thread != THREAD_NULL) { + /* + * JMM - These ownership transfer macros have a + * locking/race problem. To keep the thread from + * changing states on us (nullifying the ownership + * assignment) we need to keep the thread locked + * during the assignment. But we can't because the + * macros take an activation lock, which is a mutex. + * Since this code was already broken before I got + * here, I will leave it for now. + */ + thread_unlock(thread); + splx(s); + + ulock_ownership_clear(ulock); + ulock_ownership_set(ulock, thread); + ulock->accept_wait = FALSE; + ulock_unlock(ulock); + return KERN_SUCCESS; + } else { + + /* + * OOPS. The accepting thread must have been aborted. + * and is racing back to clear the flag that says is + * waiting for an accept. He will clear it when we + * release the lock, so just fall thru and wait for + * the next accept thread (that's the way it is + * specified). + */ + splx(s); + } + } + + /* + * Indicate that there is a hand-off thread waiting, and then wait + * for an accepting thread. + */ + ulock->ho_wait = TRUE; + wait_queue_assert_wait(&ulock->wait_queue, + LOCK_SET_HANDOFF, + THREAD_ABORTSAFE); + ulock_unlock(ulock); + + ETAP_SET_REASON(current_thread(), BLOCKED_ON_LOCK_HANDOFF); + wait_result = thread_block((void (*)(void))0); + + /* + * If the thread was woken-up via some action other than + * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate), + * then we need to clear the ulock's handoff state. + */ + switch (wait_result) { + + case THREAD_AWAKENED: + return KERN_SUCCESS; + + case THREAD_INTERRUPTED: + ulock_lock(ulock); + assert(ulock->holder == current_act()); + ulock->ho_wait = FALSE; + ulock_unlock(ulock); + return KERN_ABORTED; + + case THREAD_RESTART: + goto retry; + + default: + panic("lock_handoff"); + } +} + +kern_return_t +lock_handoff_accept (lock_set_t lock_set, int lock_id) +{ + ulock_t ulock; + int wait_result; + + + if (lock_set == LOCK_SET_NULL) + return KERN_INVALID_ARGUMENT; + + if (lock_id < 0 || lock_id >= lock_set->n_ulocks) + return KERN_INVALID_ARGUMENT; + + retry: + lock_set_lock(lock_set); + if (!lock_set->active) { + lock_set_unlock(lock_set); + return KERN_LOCK_SET_DESTROYED; + } + + ulock = (ulock_t) &lock_set->ulock_list[lock_id]; + ulock_lock(ulock); + lock_set_unlock(lock_set); + + /* + * If there is another accepting thread that beat us, just + * return with an error. + */ + if (ulock->accept_wait) { + ulock_unlock(ulock); + return KERN_ALREADY_WAITING; + } + + if (ulock->holder == current_act()) { + ulock_unlock(ulock); + return KERN_LOCK_OWNED_SELF; + } + + /* + * If the handoff thread (the sender) is already waiting to + * hand-off the lock to the accepting thread (the receiver), + * then perform the hand-off now. + */ + if (ulock->ho_wait) { + wait_queue_t wq = &ulock->wait_queue; + thread_t thread; + + /* + * See who the lucky devil is, if he is still there waiting. + */ + assert(ulock->holder != THR_ACT_NULL); + thread = ulock->holder->thread; + + if (wait_queue_wakeup_thread(wq, + LOCK_SET_HANDOFF, + thread, + THREAD_AWAKENED) == KERN_SUCCESS) { + /* + * Holder thread was still waiting to give it + * away. Take over ownership. + */ + ulock_ownership_clear(ulock); + ulock_ownership_set(ulock, current_thread()); + ulock->ho_wait = FALSE; + ulock_unlock(ulock); + return (ulock->unstable) ? KERN_LOCK_UNSTABLE : + KERN_SUCCESS; + } + + /* + * OOPS. The owner was aborted out of the handoff. + * He will clear his own flag when he gets back. + * in the meantime, we will wait as if we didn't + * even see his flag (by falling thru). + */ + } + + ulock->accept_wait = TRUE; + wait_queue_assert_wait(&ulock->wait_queue, + LOCK_SET_HANDOFF, + THREAD_ABORTSAFE); + ulock_unlock(ulock); + + ETAP_SET_REASON(current_thread(), BLOCKED_ON_LOCK_HANDOFF); + wait_result = thread_block((void (*)(void))0); + + /* + * If the thread was woken-up via some action other than + * lock_handoff_accept or lock_set_destroy (i.e. thread_terminate), + * then we need to clear the ulock's handoff state. + */ + switch (wait_result) { + + case THREAD_AWAKENED: + return KERN_SUCCESS; + + case THREAD_INTERRUPTED: + ulock_lock(ulock); + ulock->accept_wait = FALSE; + ulock_unlock(ulock); + return KERN_ABORTED; + + case THREAD_RESTART: + goto retry; + + default: + panic("lock_handoff_accept"); + } +} + +/* + * Routine: lock_set_reference + * + * Take out a reference on a lock set. This keeps the data structure + * in existence (but the lock set may be deactivated). + */ +void +lock_set_reference(lock_set_t lock_set) +{ + lock_set_lock(lock_set); + lock_set->ref_count++; + lock_set_unlock(lock_set); +} + +/* + * Routine: lock_set_dereference + * + * Release a reference on a lock set. If this is the last reference, + * the lock set data structure is deallocated. + */ +void +lock_set_dereference(lock_set_t lock_set) +{ + int ref_count; + int size; + + lock_set_lock(lock_set); + ref_count = --(lock_set->ref_count); + lock_set_unlock(lock_set); + + if (ref_count == 0) { + size = sizeof(struct lock_set) + + (sizeof(struct ulock) * (lock_set->n_ulocks - 1)); + kfree((vm_offset_t) lock_set, size); + } +} diff --git a/osfmk/kern/sync_lock.h b/osfmk/kern/sync_lock.h new file mode 100644 index 000000000..040594f8c --- /dev/null +++ b/osfmk/kern/sync_lock.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: kern/sync_lock.h + * Author: Joseph CaraDonna + * + * Contains RT distributed lock synchronization service definitions. + */ + +#ifndef _KERN_SYNC_LOCK_H_ +#define _KERN_SYNC_LOCK_H_ + +#include + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include +#include +#include + +typedef struct ulock { + queue_chain_t thread_link; /* ulocks owned by a thread */ + queue_chain_t held_link; /* ulocks held in the lock set */ + queue_chain_t handoff_link; /* ulocks w/ active handoffs */ + + decl_mutex_data(,lock) /* ulock lock */ + + struct lock_set *lock_set; /* the retaining lock set */ + thread_act_t holder; /* thread_act that holds the lock */ + unsigned int /* flags */ + /* boolean_t */ blocked:1, /* did threads block waiting? */ + /* boolean_t */ unstable:1, /* unstable? (holder died) */ + /* boolean_t */ ho_wait:1, /* handoff thread waiting? */ + /* boolean_t */ accept_wait:1, /* accepting thread waiting? */ + :0; /* force to long boundary */ + + struct wait_queue wait_queue; /* queue of blocked threads */ +} Ulock; + +typedef struct ulock *ulock_t; + +typedef struct lock_set { + queue_chain_t task_link; /* chain of lock sets owned by a task */ + decl_mutex_data(,lock) /* lock set lock */ + task_t owner; /* task that owns the lock set */ + ipc_port_t port; /* lock set port */ + int ref_count; /* reference count */ + + boolean_t active; /* active status */ + int n_ulocks; /* number of ulocks in the lock set */ + + struct ulock ulock_list[1]; /* ulock group list place holder */ +} Lock_Set; + +#define ULOCK_NULL ((ulock_t) 0) + +#define ULOCK_FREE 0 +#define ULOCK_HELD 1 + +#define LOCK_OPERATION_ABORTED(th) ((th)->wait_link.prev != (queue_entry_t) 0) +#define LOCK_OPERATION_COMPLETE(th) ((th)->wait_link.prev = (queue_entry_t) 0) + +/* + * Data structure internal lock macros + */ + +#define lock_set_lock_init(ls) mutex_init(&(ls)->lock, \ + ETAP_THREAD_LOCK_SET) +#define lock_set_lock(ls) mutex_lock(&(ls)->lock) +#define lock_set_unlock(ls) mutex_unlock(&(ls)->lock) + +#define ulock_lock_init(ul) mutex_init(&(ul)->lock, \ + ETAP_THREAD_ULOCK) +#define ulock_lock(ul) mutex_lock(&(ul)->lock) +#define ulock_unlock(ul) mutex_unlock(&(ul)->lock) + +extern void lock_set_init(void); + +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * Forward Declarations + */ + +extern kern_return_t lock_set_create (task_t task, + lock_set_t *new_lock_set, + int n_ulocks, + int policy); + +extern kern_return_t lock_set_destroy (task_t task, + lock_set_t lock_set); + +extern kern_return_t lock_acquire (lock_set_t lock_set, + int lock_id); + +extern kern_return_t lock_release (lock_set_t lock_set, + int lock_id); + +extern kern_return_t lock_try (lock_set_t lock_set, + int lock_id); + +extern kern_return_t lock_make_stable (lock_set_t lock_set, + int lock_id); + +extern kern_return_t lock_make_unstable (ulock_t ulock, + thread_act_t thr_act); + +extern kern_return_t lock_release_internal (ulock_t ulock, + thread_act_t thr_act); + +extern kern_return_t lock_handoff (lock_set_t lock_set, + int lock_id); + +extern kern_return_t lock_handoff_accept (lock_set_t lock_set, + int lock_id); + +extern void lock_set_reference (lock_set_t lock_set); +extern void lock_set_dereference (lock_set_t lock_set); + +#endif /* _KERN_SYNC_LOCK_H_ */ diff --git a/osfmk/kern/sync_sema.c b/osfmk/kern/sync_sema.c new file mode 100644 index 000000000..8fe37c371 --- /dev/null +++ b/osfmk/kern/sync_sema.c @@ -0,0 +1,976 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: kern/sync_sema.c + * Author: Joseph CaraDonna + * + * Contains RT distributed semaphore synchronization services. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned int semaphore_event; +#define SEMAPHORE_EVENT ((event_t)&semaphore_event) + +zone_t semaphore_zone; +unsigned int semaphore_max = SEMAPHORE_MAX; + +/* + * ROUTINE: semaphore_init [private] + * + * Initialize the semaphore mechanisms. + * Right now, we only need to initialize the semaphore zone. + */ +void +semaphore_init(void) +{ + semaphore_zone = zinit(sizeof(struct semaphore), + semaphore_max * sizeof(struct semaphore), + sizeof(struct semaphore), + "semaphores"); +} + +/* + * Routine: semaphore_create + * + * Creates a semaphore. + * The port representing the semaphore is returned as a parameter. + */ +kern_return_t +semaphore_create( + task_t task, + semaphore_t *new_semaphore, + int policy, + int value) +{ + semaphore_t s = SEMAPHORE_NULL; + + + + if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) { + *new_semaphore = SEMAPHORE_NULL; + return KERN_INVALID_ARGUMENT; + } + + s = (semaphore_t) zalloc (semaphore_zone); + + if (s == SEMAPHORE_NULL) { + *new_semaphore = SEMAPHORE_NULL; + return KERN_RESOURCE_SHORTAGE; + } + + wait_queue_init(&s->wait_queue, policy); /* also inits lock */ + s->count = value; + s->ref_count = 1; + + /* + * Create and initialize the semaphore port + */ + s->port = ipc_port_alloc_kernel(); + if (s->port == IP_NULL) { + /* This will deallocate the semaphore */ + semaphore_dereference(s); + *new_semaphore = SEMAPHORE_NULL; + return KERN_RESOURCE_SHORTAGE; + } + + ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE); + + /* + * Associate the new semaphore with the task by adding + * the new semaphore to the task's semaphore list. + * + * Associate the task with the new semaphore by having the + * semaphores task pointer point to the owning task's structure. + */ + task_lock(task); + enqueue_head(&task->semaphore_list, (queue_entry_t) s); + task->semaphores_owned++; + s->owner = task; + s->active = TRUE; + task_unlock(task); + + *new_semaphore = s; + + return KERN_SUCCESS; +} + +/* + * Routine: semaphore_destroy + * + * Destroys a semaphore. This call will only succeed if the + * specified task is the SAME task name specified at the semaphore's + * creation. + * + * All threads currently blocked on the semaphore are awoken. These + * threads will return with the KERN_TERMINATED error. + */ +kern_return_t +semaphore_destroy( + task_t task, + semaphore_t semaphore) +{ + int old_count; + thread_t thread; + spl_t spl_level; + + + if (task == TASK_NULL || semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + /* + * Disown semaphore + */ + task_lock(task); + if (semaphore->owner != task) { + task_unlock(task); + return KERN_INVALID_ARGUMENT; + } + remqueue(&task->semaphore_list, (queue_entry_t) semaphore); + semaphore->owner = TASK_NULL; + task->semaphores_owned--; + task_unlock(task); + + spl_level = splsched(); + semaphore_lock(semaphore); + + /* + * Deactivate semaphore + */ + assert(semaphore->active); + semaphore->active = FALSE; + + /* + * Wakeup blocked threads + */ + old_count = semaphore->count; + semaphore->count = 0; + + if (old_count < 0) { + wait_queue_wakeup_all_locked(&semaphore->wait_queue, + SEMAPHORE_EVENT, + THREAD_RESTART, + TRUE); /* unlock? */ + } else { + semaphore_unlock(semaphore); + } + splx(spl_level); + + /* + * Deallocate + * + * Drop the semaphore reference, which in turn deallocates the + * semaphore structure if the reference count goes to zero. + */ + ipc_port_dealloc_kernel(semaphore->port); + semaphore_dereference(semaphore); + return KERN_SUCCESS; +} + +/* + * Routine: semaphore_signal_internal + * + * Signals the semaphore as direct. + * Assumptions: + * Semaphore is locked. + */ +kern_return_t +semaphore_signal_internal( + semaphore_t semaphore, + thread_act_t thread_act, + int options) +{ + kern_return_t kr; + spl_t spl_level; + + spl_level = splsched(); + semaphore_lock(semaphore); + + if (!semaphore->active) { + semaphore_unlock(semaphore); + splx(spl_level); + return KERN_TERMINATED; + } + + if (thread_act != THR_ACT_NULL) { + if (semaphore->count < 0) { + kr = wait_queue_wakeup_thread_locked( + &semaphore->wait_queue, + SEMAPHORE_EVENT, + thread_act->thread, + THREAD_AWAKENED, + TRUE); /* unlock? */ + } else { + semaphore_unlock(semaphore); + kr = KERN_NOT_WAITING; + } + splx(spl_level); + return kr; + } + + if (options & SEMAPHORE_SIGNAL_ALL) { + int old_count = semaphore->count; + + if (old_count < 0) { + semaphore->count = 0; /* always reset */ + kr = wait_queue_wakeup_all_locked( + &semaphore->wait_queue, + SEMAPHORE_EVENT, + THREAD_AWAKENED, + TRUE); /* unlock? */ + } else { + if (options & SEMAPHORE_SIGNAL_PREPOST) + semaphore->count++; + semaphore_unlock(semaphore); + kr = KERN_SUCCESS; + } + splx(spl_level); + return kr; + } + + if (semaphore->count < 0) { + if (wait_queue_wakeup_one_locked( + &semaphore->wait_queue, + SEMAPHORE_EVENT, + THREAD_AWAKENED, + FALSE) == KERN_SUCCESS) { + semaphore_unlock(semaphore); + splx(spl_level); + return KERN_SUCCESS; + } else + semaphore->count = 0; /* all waiters gone */ + } + + if (options & SEMAPHORE_SIGNAL_PREPOST) { + semaphore->count++; + } + + semaphore_unlock(semaphore); + splx(spl_level); + return KERN_NOT_WAITING; +} + +/* + * Routine: semaphore_signal_thread + * + * If the specified thread_act is blocked on the semaphore, it is + * woken up. If a NULL thread_act was supplied, then any one + * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING + * and the semaphore is unchanged. + */ +kern_return_t +semaphore_signal_thread( + semaphore_t semaphore, + thread_act_t thread_act) +{ + kern_return_t ret; + + if (semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + ret = semaphore_signal_internal(semaphore, + thread_act, + SEMAPHORE_OPTION_NONE); + return ret; +} + +/* + * Routine: semaphore_signal_thread_trap + * + * Trap interface to the semaphore_signal_thread function. + */ +kern_return_t +semaphore_signal_thread_trap( + mach_port_name_t sema_name, + mach_port_name_t thread_name) +{ + + semaphore_t semaphore; + thread_act_t thread_act; + kern_return_t kr; + + /* + * MACH_PORT_NULL is not an error. It means that we want to + * select any one thread that is already waiting, but not to + * pre-post the semaphore. + */ + if (thread_name != MACH_PORT_NULL) { + thread_act = port_name_to_act(thread_name); + if (thread_act == THR_ACT_NULL) + return KERN_INVALID_ARGUMENT; + } else + thread_act = THR_ACT_NULL; + + kr = port_name_to_semaphore(sema_name, &semaphore); + if (kr != KERN_SUCCESS) { + act_deallocate(thread_act); + return kr; + } + kr = semaphore_signal_internal(semaphore, + thread_act, + SEMAPHORE_OPTION_NONE); + semaphore_dereference(semaphore); + act_deallocate(thread_act); + return kr; +} + + + +/* + * Routine: semaphore_signal + * + * Traditional (in-kernel client and MIG interface) semaphore + * signal routine. Most users will access the trap version. + * + * This interface in not defined to return info about whether + * this call found a thread waiting or not. The internal + * routines (and future external routines) do. We have to + * convert those into plain KERN_SUCCESS returns. + */ +kern_return_t +semaphore_signal( + semaphore_t semaphore) +{ + kern_return_t kr; + + if (semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + kr = semaphore_signal_internal(semaphore, + THR_ACT_NULL, + SEMAPHORE_SIGNAL_PREPOST); + if (kr == KERN_NOT_WAITING) + return KERN_SUCCESS; + return kr; +} + +/* + * Routine: semaphore_signal_trap + * + * Trap interface to the semaphore_signal function. + */ +kern_return_t +semaphore_signal_trap( + mach_port_name_t sema_name) +{ + + semaphore_t semaphore; + kern_return_t kr; + + kr = port_name_to_semaphore(sema_name, &semaphore); + if (kr != KERN_SUCCESS) { + return kr; + } + kr = semaphore_signal_internal(semaphore, + THR_ACT_NULL, + SEMAPHORE_SIGNAL_PREPOST); + semaphore_dereference(semaphore); + if (kr == KERN_NOT_WAITING) + return KERN_SUCCESS; + return kr; +} + +/* + * Routine: semaphore_signal_all + * + * Awakens ALL threads currently blocked on the semaphore. + * The semaphore count returns to zero. + */ +kern_return_t +semaphore_signal_all( + semaphore_t semaphore) +{ + kern_return_t kr; + + if (semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + kr = semaphore_signal_internal(semaphore, + THR_ACT_NULL, + SEMAPHORE_SIGNAL_ALL); + if (kr == KERN_NOT_WAITING) + return KERN_SUCCESS; + return kr; +} + +/* + * Routine: semaphore_signal_all_trap + * + * Trap interface to the semaphore_signal_all function. + */ +kern_return_t +semaphore_signal_all_trap( + mach_port_name_t sema_name) +{ + + semaphore_t semaphore; + kern_return_t kr; + + kr = port_name_to_semaphore(sema_name, &semaphore); + if (kr != KERN_SUCCESS) { + return kr; + } + kr = semaphore_signal_internal(semaphore, + THR_ACT_NULL, + SEMAPHORE_SIGNAL_ALL); + semaphore_dereference(semaphore); + if (kr == KERN_NOT_WAITING) + return KERN_SUCCESS; + return kr; +} + +/* + * Routine: semaphore_convert_wait_result + * + * Generate the return code after a semaphore wait/block. It + * takes the wait result as an input and coverts that to an + * appropriate result. + */ +kern_return_t +semaphore_convert_wait_result(int wait_result) +{ + switch (wait_result) { + case THREAD_AWAKENED: + return KERN_SUCCESS; + + case THREAD_TIMED_OUT: + return KERN_OPERATION_TIMED_OUT; + + case THREAD_INTERRUPTED: + return KERN_ABORTED; + + case THREAD_RESTART: + return KERN_TERMINATED; + + default: + panic("semaphore_block\n"); + return KERN_FAILURE; + } +} + +/* + * Routine: semaphore_wait_continue + * + * Common continuation routine after waiting on a semphore. + * It returns directly to user space. + */ +void +semaphore_wait_continue(void) +{ + thread_t self = current_thread(); + int wait_result = self->wait_result; + void (*caller_cont)(kern_return_t) = self->sth_continuation; + + assert(self->sth_waitsemaphore != SEMAPHORE_NULL); + semaphore_dereference(self->sth_waitsemaphore); + if (self->sth_signalsemaphore != SEMAPHORE_NULL) + semaphore_dereference(self->sth_signalsemaphore); + + assert(caller_cont != (void (*)(kern_return_t))0); + (*caller_cont)(semaphore_convert_wait_result(wait_result)); +} + +/* + * Routine: semaphore_timedwait_continue + * + * Common continuation routine after doing a timed wait on a + * semaphore. It clears the timer before calling the semaphore + * routine saved in the thread struct. + */ +void +semaphore_timedwait_continue(void) +{ + thread_t self = current_thread(); + int wait_result = self->wait_result; + void (*caller_cont)(kern_return_t) = self->sth_continuation; + + if (wait_result != THREAD_TIMED_OUT) + thread_cancel_timer(); + + assert(self->sth_waitsemaphore != SEMAPHORE_NULL); + semaphore_dereference(self->sth_waitsemaphore); + if (self->sth_signalsemaphore != SEMAPHORE_NULL) + semaphore_dereference(self->sth_signalsemaphore); + + assert(caller_cont != (void (*)(kern_return_t))0); + (*caller_cont)(semaphore_convert_wait_result(wait_result)); +} + + +/* + * Routine: semaphore_wait_internal + * + * Decrements the semaphore count by one. If the count is + * negative after the decrement, the calling thread blocks + * (possibly at a continuation and/or with a timeout). + * + * Assumptions: + * The reference + * A reference is held on the signal semaphore. + */ +kern_return_t +semaphore_wait_internal( + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + mach_timespec_t *wait_timep, + void (*caller_cont)(kern_return_t)) +{ + void (*continuation)(void); + AbsoluteTime abstime, nsinterval; + boolean_t nonblocking; + int wait_result; + spl_t spl_level; + kern_return_t kr = KERN_ALREADY_WAITING; + + spl_level = splsched(); + semaphore_lock(wait_semaphore); + + /* + * Decide if we really have to wait. + */ + nonblocking = (wait_timep != (mach_timespec_t *)0) ? + (wait_timep->tv_sec == 0 && wait_timep->tv_nsec == 0) : + FALSE; + + if (!wait_semaphore->active) { + kr = KERN_TERMINATED; + } else if (wait_semaphore->count > 0) { + wait_semaphore->count--; + kr = KERN_SUCCESS; + } else if (nonblocking) { + kr = KERN_OPERATION_TIMED_OUT; + } else { + wait_semaphore->count = -1; /* we don't keep an actual count */ + wait_queue_assert_wait_locked(&wait_semaphore->wait_queue, + SEMAPHORE_EVENT, + THREAD_ABORTSAFE, + FALSE); /* unlock? */ + } + semaphore_unlock(wait_semaphore); + splx(spl_level); + + /* + * wait_semaphore is unlocked so we are free to go ahead and + * signal the signal_semaphore (if one was provided). + */ + if (signal_semaphore != SEMAPHORE_NULL) { + kern_return_t signal_kr; + + /* + * lock the signal semaphore reference we got and signal it. + * This will NOT block (we cannot block after having asserted + * our intention to wait above). + */ + signal_kr = semaphore_signal_internal(signal_semaphore, + THR_ACT_NULL, + SEMAPHORE_SIGNAL_PREPOST); + + if (signal_kr == KERN_NOT_WAITING) + signal_kr = KERN_SUCCESS; + else if (signal_kr == KERN_TERMINATED) { + /* + * Uh!Oh! The semaphore we were to signal died. + * We have to get ourselves out of the wait in + * case we get stuck here forever (it is assumed + * that the semaphore we were posting is gating + * the decision by someone else to post the + * semaphore we are waiting on). People will + * discover the other dead semaphore soon enough. + * If we got out of the wait cleanly (someone + * already posted a wakeup to us) then return that + * (most important) result. Otherwise, + * return the KERN_TERMINATED status. + */ + thread_t self = current_thread(); + + clear_wait(self, THREAD_INTERRUPTED); + kr = semaphore_convert_wait_result(self->wait_result); + if (kr == KERN_ABORTED) + kr = KERN_TERMINATED; + } + } + + /* + * If we had an error, or we didn't really need to wait we can + * return now that we have signalled the signal semaphore. + */ + if (kr != KERN_ALREADY_WAITING) + return kr; + + /* + * If it is a timed wait, go ahead and set up the timer. + */ + if (wait_timep != (mach_timespec_t *)0) { + clock_interval_to_absolutetime_interval(wait_timep->tv_sec, + NSEC_PER_SEC, + &abstime); + clock_interval_to_absolutetime_interval(wait_timep->tv_nsec, + 1, + &nsinterval); + ADD_ABSOLUTETIME(&abstime, &nsinterval); + clock_absolutetime_interval_to_deadline(abstime, &abstime); + thread_set_timer_deadline(abstime); + continuation = semaphore_timedwait_continue; + } else { + continuation = semaphore_wait_continue; + } + + /* + * Now, we can block. If the caller supplied a continuation + * pointer of his own for after the block, block with the + * appropriate semaphore continuation. Thiswill gather the + * semaphore results, release references on the semaphore(s), + * and then call the caller's continuation. + */ + if (caller_cont) { + thread_t self = current_thread(); + + self->sth_continuation = caller_cont; + self->sth_waitsemaphore = wait_semaphore; + self->sth_signalsemaphore = signal_semaphore; + wait_result = thread_block(continuation); + } else { + wait_result = thread_block((void (*)(void))0); + } + + /* + * If we came back here (not continuation case) cancel + * any pending timers, convert the wait result to an + * appropriate semaphore return value, and then return + * that. + */ + if (wait_timep && (wait_result != THREAD_TIMED_OUT)) + thread_cancel_timer(); + + return (semaphore_convert_wait_result(wait_result)); +} + + +/* + * Routine: semaphore_wait + * + * Traditional (non-continuation) interface presented to + * in-kernel clients to wait on a semaphore. + */ +kern_return_t +semaphore_wait( + semaphore_t semaphore) +{ + + if (semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + return(semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + (mach_timespec_t *)0, + (void (*)(kern_return_t))0)); +} + +/* + * Trap: semaphore_wait_trap + * + * Trap version of semaphore wait. Called on behalf of user-level + * clients. + */ +kern_return_t +semaphore_wait_trap( + mach_port_name_t name) +{ + semaphore_t semaphore; + kern_return_t kr; + + kr = port_name_to_semaphore(name, &semaphore); + if (kr != KERN_SUCCESS) + return kr; + + kr = semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + (mach_timespec_t *)0, + thread_syscall_return); + semaphore_dereference(semaphore); + return kr; +} + +/* + * Routine: semaphore_timedwait + * + * Traditional (non-continuation) interface presented to + * in-kernel clients to wait on a semaphore with a timeout. + * + * A timeout of {0,0} is considered non-blocking. + */ +kern_return_t +semaphore_timedwait( + semaphore_t semaphore, + mach_timespec_t wait_time) +{ + if (semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + if(BAD_MACH_TIMESPEC(&wait_time)) + return KERN_INVALID_VALUE; + + return (semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + &wait_time, + (void(*)(kern_return_t))0)); + +} + +/* + * Trap: semaphore_timedwait_trap + * + * Trap version of a semaphore_timedwait. The timeout parameter + * is passed in two distinct parts and re-assembled on this side + * of the trap interface (to accomodate calling conventions that + * pass structures as pointers instead of inline in registers without + * having to add a copyin). + * + * A timeout of {0,0} is considered non-blocking. + */ +kern_return_t +semaphore_timedwait_trap( + mach_port_name_t name, + unsigned int sec, + clock_res_t nsec) +{ + semaphore_t semaphore; + mach_timespec_t wait_time; + kern_return_t kr; + + wait_time.tv_sec = sec; + wait_time.tv_nsec = nsec; + if(BAD_MACH_TIMESPEC(&wait_time)) + return KERN_INVALID_VALUE; + + kr = port_name_to_semaphore(name, &semaphore); + if (kr != KERN_SUCCESS) + return kr; + + kr = semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + &wait_time, + thread_syscall_return); + semaphore_dereference(semaphore); + return kr; +} + +/* + * Routine: semaphore_wait_signal + * + * Atomically register a wait on a semaphore and THEN signal + * another. This is the in-kernel entry point that does not + * block at a continuation and does not free a signal_semaphore + * reference. + */ +kern_return_t +semaphore_wait_signal( + semaphore_t wait_semaphore, + semaphore_t signal_semaphore) +{ + if (wait_semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + return(semaphore_wait_internal(wait_semaphore, + signal_semaphore, + (mach_timespec_t *)0, + (void(*)(kern_return_t))0)); +} + +/* + * Trap: semaphore_wait_signal_trap + * + * Atomically register a wait on a semaphore and THEN signal + * another. This is the trap version from user space. + */ +kern_return_t +semaphore_wait_signal_trap( + mach_port_name_t wait_name, + mach_port_name_t signal_name) +{ + semaphore_t wait_semaphore; + semaphore_t signal_semaphore; + kern_return_t kr; + + kr = port_name_to_semaphore(signal_name, &signal_semaphore); + if (kr != KERN_SUCCESS) + return kr; + + kr = port_name_to_semaphore(wait_name, &wait_semaphore); + if (kr != KERN_SUCCESS) { + semaphore_dereference(signal_semaphore); + return kr; + } + + kr = semaphore_wait_internal(wait_semaphore, + signal_semaphore, + (mach_timespec_t *)0, + thread_syscall_return); + + semaphore_dereference(wait_semaphore); + semaphore_dereference(signal_semaphore); + return kr; +} + + +/* + * Routine: semaphore_timedwait_signal + * + * Atomically register a wait on a semaphore and THEN signal + * another. This is the in-kernel entry point that does not + * block at a continuation. + * + * A timeout of {0,0} is considered non-blocking. + */ +kern_return_t +semaphore_timedwait_signal( + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + mach_timespec_t wait_time) +{ + if (wait_semaphore == SEMAPHORE_NULL) + return KERN_INVALID_ARGUMENT; + + if(BAD_MACH_TIMESPEC(&wait_time)) + return KERN_INVALID_VALUE; + + return(semaphore_wait_internal(wait_semaphore, + signal_semaphore, + &wait_time, + (void(*)(kern_return_t))0)); +} + +/* + * Trap: semaphore_timedwait_signal_trap + * + * Atomically register a timed wait on a semaphore and THEN signal + * another. This is the trap version from user space. + */ +kern_return_t +semaphore_timedwait_signal_trap( + mach_port_name_t wait_name, + mach_port_name_t signal_name, + unsigned int sec, + clock_res_t nsec) +{ + semaphore_t wait_semaphore; + semaphore_t signal_semaphore; + mach_timespec_t wait_time; + kern_return_t kr; + + wait_time.tv_sec = sec; + wait_time.tv_nsec = nsec; + if(BAD_MACH_TIMESPEC(&wait_time)) + return KERN_INVALID_VALUE; + + kr = port_name_to_semaphore(signal_name, &signal_semaphore); + if (kr != KERN_SUCCESS) + return kr; + + kr = port_name_to_semaphore(wait_name, &wait_semaphore); + if (kr != KERN_SUCCESS) { + semaphore_dereference(signal_semaphore); + return kr; + } + + kr = semaphore_wait_internal(wait_semaphore, + signal_semaphore, + &wait_time, + thread_syscall_return); + + semaphore_dereference(wait_semaphore); + semaphore_dereference(signal_semaphore); + return kr; +} + + +/* + * Routine: semaphore_reference + * + * Take out a reference on a semaphore. This keeps the data structure + * in existence (but the semaphore may be deactivated). + */ +void +semaphore_reference( + semaphore_t semaphore) +{ + spl_t spl_level; + + spl_level = splsched(); + semaphore_lock(semaphore); + + semaphore->ref_count++; + + semaphore_unlock(semaphore); + splx(spl_level); +} + +/* + * Routine: semaphore_dereference + * + * Release a reference on a semaphore. If this is the last reference, + * the semaphore data structure is deallocated. + */ +void +semaphore_dereference( + semaphore_t semaphore) +{ + int ref_count; + spl_t spl_level; + + if (semaphore != NULL) { + spl_level = splsched(); + semaphore_lock(semaphore); + + ref_count = --(semaphore->ref_count); + + semaphore_unlock(semaphore); + splx(spl_level); + + if (ref_count == 0) { + assert(wait_queue_empty(&semaphore->wait_queue)); + zfree(semaphore_zone, (vm_offset_t)semaphore); + } + } +} diff --git a/osfmk/kern/sync_sema.h b/osfmk/kern/sync_sema.h new file mode 100644 index 000000000..8d50bcfa7 --- /dev/null +++ b/osfmk/kern/sync_sema.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: kern/sync_sema.h + * Author: Joseph CaraDonna + * + * Contains RT distributed semaphore synchronization service definitions. + */ + +#ifndef _KERN_SYNC_SEMA_H_ +#define _KERN_SYNC_SEMA_H_ + +#include +#include + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include +#include + +typedef struct semaphore { + queue_chain_t task_link; /* chain of semaphores owned by a task */ + struct wait_queue wait_queue; /* queue of blocked threads & lock */ + task_t owner; /* task that owns semaphore */ + ipc_port_t port; /* semaphore port */ + int ref_count; /* reference count */ + int count; /* current count value */ + boolean_t active; /* active status */ +} Semaphore; + +#define semaphore_lock(semaphore) wait_queue_lock(&(semaphore)->wait_queue) +#define semaphore_unlock(semaphore) wait_queue_unlock(&(semaphore)->wait_queue) + +extern void semaphore_init(void); + +extern void semaphore_reference (semaphore_t semaphore); +extern void semaphore_dereference (semaphore_t semaphore); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_SYNC_SEMA_H_ */ diff --git a/osfmk/kern/syscall_emulation.c b/osfmk/kern/syscall_emulation.c new file mode 100644 index 000000000..843dd5a1e --- /dev/null +++ b/osfmk/kern/syscall_emulation.c @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and ditribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for syscall_emulation_sync */ + +/* + * Exported interface + */ + +/* + * WARNING: + * This code knows that kalloc() allocates memory most efficiently + * in sizes that are powers of 2, and asks for those sizes. + */ + +/* + * Go from number of entries to size of struct eml_dispatch and back. + */ +#define base_size (sizeof(struct eml_dispatch) - sizeof(eml_routine_t)) +#define count_to_size(count) \ + (base_size + sizeof(vm_offset_t) * (count)) + +#define size_to_count(size) \ + ( ((size) - base_size) / sizeof(vm_offset_t) ) + +/* Forwards */ +kern_return_t +task_set_emulation_vector_internal( + task_t task, + int vector_start, + emulation_vector_t emulation_vector, + mach_msg_type_number_t emulation_vector_count); + +/* + * eml_init: initialize user space emulation code + */ +void +eml_init(void) +{ +} + +/* + * eml_task_reference() [Exported] + * + * Bumps the reference count on the common emulation + * vector. + */ + +void +eml_task_reference( + task_t task, + task_t parent) +{ + register eml_dispatch_t eml; + + if (parent == TASK_NULL) + eml = EML_DISPATCH_NULL; + else + eml = parent->eml_dispatch; + + if (eml != EML_DISPATCH_NULL) { + mutex_lock(&eml->lock); + eml->ref_count++; + mutex_unlock(&eml->lock); + } + task->eml_dispatch = eml; +} + + +/* + * eml_task_deallocate() [Exported] + * + * Cleans up after the emulation code when a process exits. + */ + +void +eml_task_deallocate( + task_t task) +{ + register eml_dispatch_t eml; + + eml = task->eml_dispatch; + if (eml != EML_DISPATCH_NULL) { + int count; + + mutex_lock(&eml->lock); + count = --eml->ref_count; + mutex_unlock(&eml->lock); + + if (count == 0) + kfree((vm_offset_t)eml, count_to_size(eml->disp_count)); + + task->eml_dispatch = EML_DISPATCH_NULL; + } +} + +/* + * task_set_emulation_vector: [Server Entry] + * set a list of emulated system calls for this task. + */ +kern_return_t +task_set_emulation_vector_internal( + task_t task, + int vector_start, + emulation_vector_t emulation_vector, + mach_msg_type_number_t emulation_vector_count) +{ + eml_dispatch_t cur_eml, new_eml, old_eml; + vm_size_t new_size; + int cur_start, cur_end; + int new_start, new_end; + int vector_end; + + if (task == TASK_NULL) + return EML_BAD_TASK; + + vector_end = vector_start + (int) emulation_vector_count; + + /* + * We try to re-use the existing emulation vetor + * if possible. We can reuse the vector if it + * is not shared with another task and if it is + * large enough to contain the entries we are + * supplying. + * + * We must grab the lock on the task to check whether + * there is an emulation vector. + * If the vector is shared or not large enough, we + * need to drop the lock and allocate a new emulation + * vector. + * + * While the lock is dropped, the emulation vector + * may be released by all other tasks (giving us + * exclusive use), or may be enlarged by another + * task_set_emulation_vector call. Therefore, + * after allocating the new emulation vector, we + * must grab the lock again to check whether we + * really need the new vector we just allocated. + * + * Since an emulation vector cannot be altered + * if it is in use by more than one task, the + * task lock is sufficient to protect the vector`s + * start, count, and contents. The lock in the + * vector protects only the reference count. + */ + + old_eml = EML_DISPATCH_NULL; /* vector to discard */ + new_eml = EML_DISPATCH_NULL; /* new vector */ + + for (;;) { + /* + * Find the current emulation vector. + * See whether we can overwrite it. + */ + task_lock(task); + cur_eml = task->eml_dispatch; + if (cur_eml != EML_DISPATCH_NULL) { + cur_start = cur_eml->disp_min; + cur_end = cur_eml->disp_count + cur_start; + + mutex_lock(&cur_eml->lock); + if (cur_eml->ref_count == 1 && + cur_start <= vector_start && + cur_end >= vector_end) + { + /* + * Can use the existing emulation vector. + * Discard any new one we allocated. + */ + mutex_unlock(&cur_eml->lock); + old_eml = new_eml; + break; + } + + if (new_eml != EML_DISPATCH_NULL && + new_start <= cur_start && + new_end >= cur_end) + { + /* + * A new vector was allocated, and it is large enough + * to hold all the entries from the current vector. + * Copy the entries to the new emulation vector, + * deallocate the current one, and use the new one. + */ + + bcopy((char *)&cur_eml->disp_vector[0], + (char *)&new_eml->disp_vector[cur_start-new_start], + cur_eml->disp_count * sizeof(vm_offset_t)); + + + if (--cur_eml->ref_count == 0) + old_eml = cur_eml; /* discard old vector */ + mutex_unlock(&cur_eml->lock); + + task->eml_dispatch = new_eml; + syscall_emulation_sync(task); + cur_eml = new_eml; + break; + } + mutex_unlock(&cur_eml->lock); + + /* + * Need a new emulation vector. + * Ensure it will hold all the entries from + * both the old and new emulation vectors. + */ + new_start = vector_start; + if (new_start > cur_start) + new_start = cur_start; + new_end = vector_end; + if (new_end < cur_end) + new_end = cur_end; + } + else { + /* + * There is no curren emulation vector. + * If a new one was allocated, use it. + */ + if (new_eml != EML_DISPATCH_NULL) { + task->eml_dispatch = new_eml; + cur_eml = new_eml; + break; + } + + /* + * Compute the size needed for the new vector. + */ + new_start = vector_start; + new_end = vector_end; + } + + /* + * Have no vector (or one that is no longer large enough). + * Drop all the locks and allocate a new vector. + * Repeat the loop to check whether the old vector was + * changed while we didn`t hold the locks. + */ + + task_unlock(task); + + if (new_eml != EML_DISPATCH_NULL) + kfree((vm_offset_t)new_eml, count_to_size(new_eml->disp_count)); + + new_size = count_to_size(new_end - new_start); + new_eml = (eml_dispatch_t) kalloc(new_size); + + bzero((char *)new_eml, new_size); + mutex_init(&new_eml->lock, ETAP_MISC_EMULATE); + new_eml->ref_count = 1; + new_eml->disp_min = new_start; + new_eml->disp_count = new_end - new_start; + + continue; + } + + /* + * We have the emulation vector. + * Install the new emulation entries. + */ + bcopy((char *)&emulation_vector[0], + (char *)&cur_eml->disp_vector[vector_start - cur_eml->disp_min], + emulation_vector_count * sizeof(vm_offset_t)); + + task_unlock(task); + + /* + * Discard any old emulation vector we don`t need. + */ + if (old_eml) + kfree((vm_offset_t) old_eml, count_to_size(old_eml->disp_count)); + + return KERN_SUCCESS; +} + +/* + * task_set_emulation_vector: [Server Entry] + * + * Set the list of emulated system calls for this task. + * The list is out-of-line. + */ +kern_return_t +task_set_emulation_vector( + task_t task, + int vector_start, + emulation_vector_t emulation_vector, + mach_msg_type_number_t emulation_vector_count) +{ + kern_return_t kr; + vm_offset_t emul_vector_addr; + + if (task == TASK_NULL) + return EML_BAD_TASK; /* XXX sb KERN_INVALID_ARGUMENT */ + + /* + * XXX - barbou@gr.osf.org. + * If emulation_vector_count is NULL, discard the emulation + * vectors. + * We need a way to do that for emulator-less servers started + * from a classic server. There seems to be no way to get rid + * of or to avoid inheriting the emulation vector !? + */ + if (emulation_vector_count == 0) { + eml_task_deallocate(task); + return KERN_SUCCESS; + } + + + /* + * The emulation vector is really a vm_map_copy_t. + */ + kr = vm_map_copyout(ipc_kernel_map, &emul_vector_addr, + (vm_map_copy_t) emulation_vector); + if (kr != KERN_SUCCESS) + return kr; + + /* + * Can't fault while we hold locks. + */ + kr = vm_map_wire(ipc_kernel_map, + trunc_page(emul_vector_addr), + round_page(emul_vector_addr + + emulation_vector_count * + sizeof(eml_dispatch_t)), + VM_PROT_READ|VM_PROT_WRITE, FALSE); + assert(kr == KERN_SUCCESS); + + /* + * Do the work. + */ + kr = task_set_emulation_vector_internal( + task, + vector_start, + (emulation_vector_t) emul_vector_addr, + emulation_vector_count); + assert(kr == KERN_SUCCESS); + + /* + * Discard the memory + */ + (void) kmem_free(ipc_kernel_map, + emul_vector_addr, + emulation_vector_count * sizeof(eml_dispatch_t)); + + return KERN_SUCCESS; +} + +/* + * task_get_emulation_vector: [Server Entry] + * + * Get the list of emulated system calls for this task. + * List is returned out-of-line. + */ +kern_return_t +task_get_emulation_vector( + task_t task, + int *vector_start, /* out */ + emulation_vector_t *emulation_vector, /* out */ + mach_msg_type_number_t *emulation_vector_count) /* out */ +{ + eml_dispatch_t eml; + vm_size_t vector_size, size; + vm_offset_t addr; + + if (task == TASK_NULL) + return EML_BAD_TASK; + + addr = 0; + size = 0; + + for(;;) { + vm_size_t size_needed; + + task_lock(task); + eml = task->eml_dispatch; + if (eml == EML_DISPATCH_NULL) { + task_unlock(task); + if (addr) + (void) kmem_free(ipc_kernel_map, addr, size); + *vector_start = 0; + *emulation_vector = 0; + *emulation_vector_count = 0; + return KERN_SUCCESS; + } + + /* + * Do we have the memory we need? + */ + vector_size = eml->disp_count * sizeof(vm_offset_t); + + size_needed = round_page(vector_size); + if (size_needed <= size) + break; + + /* + * If not, unlock the task and allocate more memory. + */ + task_unlock(task); + + if (size != 0) + kmem_free(ipc_kernel_map, addr, size); + + size = size_needed; + if (kmem_alloc(ipc_kernel_map, &addr, size) != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + } + + /* + * Copy out the dispatch addresses + */ + *vector_start = eml->disp_min; + *emulation_vector_count = eml->disp_count; + bcopy((char *)eml->disp_vector, + (char *)addr, + vector_size); + + /* + * Unlock the task and free any memory we did not need + */ + task_unlock(task); + { + vm_size_t size_used, size_left; + vm_map_copy_t memory; + + /* + * Free any unused memory beyond the end of the last page used + */ + size_used = round_page(vector_size); + if (size_used != size) + (void) kmem_free(ipc_kernel_map, + addr + size_used, + size - size_used); + + /* + * Zero the remainder of the page being returned. + */ + size_left = size_used - vector_size; + if (size_left > 0) + bzero((char *)addr + vector_size, size_left); + + /* + * Unwire and make memory into copyin form. + */ + (void) vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE); + (void) vm_map_copyin(ipc_kernel_map, addr, vector_size, + TRUE, &memory); + + *emulation_vector = (emulation_vector_t) memory; + } + + return KERN_SUCCESS; +} + +/* + * task_set_emulation: [Server Entry] + * set up for user space emulation of syscalls within this task. + */ +kern_return_t +task_set_emulation( + task_t task, + vm_offset_t routine_entry_pt, + int routine_number) +{ + return task_set_emulation_vector_internal(task, routine_number, + &routine_entry_pt, 1); +} + + + + diff --git a/osfmk/kern/syscall_emulation.h b/osfmk/kern/syscall_emulation.h new file mode 100644 index 000000000..baf87c667 --- /dev/null +++ b/osfmk/kern/syscall_emulation.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_SYSCALL_EMULATION_H_ +#define _KERN_SYSCALL_EMULATION_H_ + +#ifndef ASSEMBLER +#include +#include +#include + +typedef vm_offset_t eml_routine_t; + +typedef struct eml_dispatch { + decl_mutex_data(,lock) /* lock for reference count */ + int ref_count; /* reference count */ + int disp_count; /* count of entries in vector */ + int disp_min; /* index of lowest entry in vector */ + eml_routine_t disp_vector[1]; /* first entry in array of dispatch */ + /* routines (array has disp_count + elements) */ +} *eml_dispatch_t; + +#define EML_ROUTINE_NULL (eml_routine_t)0 +#define EML_DISPATCH_NULL (eml_dispatch_t)0 + +#define EML_SUCCESS (0) + +#define EML_MOD (err_kern|err_sub(2)) +#define EML_BAD_TASK (EML_MOD|0x0001) +#define EML_BAD_CNT (EML_MOD|0x0002) + +/* Per-task initialization */ +extern void eml_init(void); + +/* Take reference on common task emulation vector */ +extern void eml_task_reference( + task_t new_task, + task_t parent_task); + +/* Deallocate reference on common task emulation vector */ +extern void eml_task_deallocate( + task_t task); + +#endif /* ASSEMBLER */ + +#endif /* _KERN_SYSCALL_EMULATION_H_ */ diff --git a/osfmk/kern/syscall_subr.c b/osfmk/kern/syscall_subr.c new file mode 100644 index 000000000..caca3545a --- /dev/null +++ b/osfmk/kern/syscall_subr.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +/* + * swtch and swtch_pri both attempt to context switch (logic in + * thread_block no-ops the context switch if nothing would happen). + * A boolean is returned that indicates whether there is anything + * else runnable. + * + * This boolean can be used by a thread waiting on a + * lock or condition: If FALSE is returned, the thread is justified + * in becoming a resource hog by continuing to spin because there's + * nothing else useful that the processor could do. If TRUE is + * returned, the thread should make one more check on the + * lock and then be a good citizen and really suspend. + */ + +#if 0 + +/* broken..do not enable */ + +swtch_continue() +{ + boolean_t retval; + register processor_t myprocessor; + + mp_disable_preemption(); + myprocessor = current_processor(); + retval = ( +#if NCPUS > 1 + myprocessor->runq.count > 0 || +#endif /*NCPUS > 1*/ + myprocessor->processor_set->runq.count > 0); + mp_enable_preemption(); + return retval; +} + +#endif + +boolean_t +swtch(void) +{ + register processor_t myprocessor; + boolean_t result; + + mp_disable_preemption(); + myprocessor = current_processor(); + if ( +#if NCPUS > 1 + myprocessor->runq.count == 0 && +#endif /* NCPUS > 1 */ + myprocessor->processor_set->runq.count == 0 ) { + mp_enable_preemption(); + + return (FALSE); + } + mp_enable_preemption(); + + counter(c_swtch_block++); + + thread_block((void (*)(void)) 0); + + mp_disable_preemption(); + myprocessor = current_processor(); + result = +#if NCPUS > 1 + myprocessor->runq.count > 0 || +#endif /*NCPUS > 1*/ + myprocessor->processor_set->runq.count > 0; + mp_enable_preemption(); + + return (result); +} + +boolean_t +swtch_pri( + int pri) +{ + thread_t self = current_thread(); + register processor_t myprocessor; + boolean_t result; + sched_policy_t *policy; + spl_t s; + + s = splsched(); + thread_lock(self); + myprocessor = current_processor(); + if ( +#if NCPUS > 1 + myprocessor->runq.count == 0 && +#endif /* NCPUS > 1 */ + myprocessor->processor_set->runq.count == 0 ) { + thread_unlock(self); + splx(s); + + return (FALSE); + } + + policy = &sched_policy[self->policy]; + thread_unlock(self); + splx(s); + + policy->sp_ops.sp_swtch_pri(policy, pri); + + mp_disable_preemption(); + myprocessor = current_processor(); + result = +#if NCPUS > 1 + myprocessor->runq.count > 0 || +#endif /*NCPUS > 1*/ + myprocessor->processor_set->runq.count > 0; + mp_enable_preemption(); + + return (result); +} + +/* + * thread_switch: + * + * Context switch. User may supply thread hint. + */ +kern_return_t +thread_switch( + mach_port_name_t thread_name, + int option, + mach_msg_timeout_t option_time) +{ + register thread_t self = current_thread(); + register thread_act_t hint_act = THR_ACT_NULL; + sched_policy_t *policy; + spl_t s; + + /* + * Process option. + */ + switch (option) { + + case SWITCH_OPTION_NONE: + case SWITCH_OPTION_DEPRESS: + case SWITCH_OPTION_WAIT: + break; + + default: + return (KERN_INVALID_ARGUMENT); + } + + if (thread_name != MACH_PORT_NULL) { + ipc_port_t port; + + if (ipc_port_translate_send(self->top_act->task->itk_space, + thread_name, &port) == KERN_SUCCESS) { + ip_reference(port); + ip_unlock(port); + + hint_act = convert_port_to_act(port); + ipc_port_release(port); + } + } + + s = splsched(); + thread_lock(self); + policy = &sched_policy[self->policy]; + thread_unlock(self); + splx(s); + + /* + * This is a scheduling policy-dependent operation. + * Call the routine associated with the thread's + * scheduling policy. + */ + return (policy->sp_ops. + sp_thread_switch(policy, hint_act, option, option_time)); +} diff --git a/osfmk/kern/syscall_subr.h b/osfmk/kern/syscall_subr.h new file mode 100644 index 000000000..80c86fe82 --- /dev/null +++ b/osfmk/kern/syscall_subr.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_SYSCALL_SUBR_H_ +#define _KERN_SYSCALL_SUBR_H_ + +#include +#include +#include +#include + +/* Attempt to context switch */ +extern boolean_t swtch(void); + +/* Attempt to context switch */ +extern boolean_t swtch_pri( + int pri); + +#endif /* _KERN_SYSCALL_SUBR_H_ */ diff --git a/osfmk/kern/syscall_sw.c b/osfmk/kern/syscall_sw.c new file mode 100644 index 000000000..2bfbd3b90 --- /dev/null +++ b/osfmk/kern/syscall_sw.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include + +#include + +/* Forwards */ +extern kern_return_t kern_invalid(void); +extern mach_port_name_t null_port(void); +extern kern_return_t not_implemented(void); + +/* + * To add a new entry: + * Add an "MACH_TRAP(routine, arg count)" to the table below. + * + * Add trap definition to mach/syscall_sw.h and + * recompile user library. + * + * WARNING: If you add a trap which requires more than 7 + * parameters, mach/{machine}/syscall_sw.h and {machine}/trap.c + * and/or {machine}/locore.s may need to be modified for it + * to work successfully. + * + * WARNING: Don't use numbers 0 through -9. They (along with + * the positive numbers) are reserved for Unix. + */ + +int kern_invalid_debug = 0; + +/* Include declarations of the trap functions. */ + +#include +#include +#include + +#include +#include + +extern kern_return_t iokit_user_client_trap(); + +mach_trap_t mach_trap_table[] = { + MACH_TRAP(kern_invalid, 0), /* 0 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 1 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 2 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 3 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 4 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 5 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 6 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 7 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 8 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 9 */ /* Unix */ + MACH_TRAP(kern_invalid, 0), /* 10 */ + MACH_TRAP(kern_invalid, 0), /* 11 */ + MACH_TRAP(kern_invalid, 0), /* 12 */ + MACH_TRAP(kern_invalid, 0), /* 13 */ + MACH_TRAP(kern_invalid, 0), /* 14 */ + MACH_TRAP(kern_invalid, 0), /* 15 */ + MACH_TRAP(kern_invalid, 0), /* 16 */ + MACH_TRAP(kern_invalid, 0), /* 17 */ + MACH_TRAP(kern_invalid, 0), /* 18 */ + MACH_TRAP(kern_invalid, 0), /* 19 */ + MACH_TRAP(kern_invalid, 0), /* 20 */ + MACH_TRAP(kern_invalid, 0), /* 21 */ + MACH_TRAP(kern_invalid, 0), /* 22 */ + MACH_TRAP(kern_invalid, 0), /* 23 */ + MACH_TRAP(kern_invalid, 0), /* 24 */ + MACH_TRAP(kern_invalid, 0), /* 25 */ + MACH_TRAP(mach_reply_port, 0), /* 26 */ + MACH_TRAP(thread_self_trap, 0), /* 27 */ + MACH_TRAP(task_self_trap, 0), /* 28 */ + MACH_TRAP(host_self_trap, 0), /* 29 */ + MACH_TRAP(kern_invalid, 0), /* 30 */ + MACH_TRAP(kern_invalid, 0), /* 31 */ + MACH_TRAP(mach_msg_overwrite_trap, 9), /* 32 */ + MACH_TRAP(semaphore_signal_trap, 1), /* 33 */ + MACH_TRAP(semaphore_signal_all_trap, 1), /* 34 */ + MACH_TRAP(semaphore_signal_thread_trap, 2), /* 35 */ + MACH_TRAP(semaphore_wait_trap, 1), /* 36 */ + MACH_TRAP(semaphore_wait_signal_trap, 2), /* 37 */ + MACH_TRAP(semaphore_timedwait_trap, 3), /* 38 */ + MACH_TRAP(semaphore_timedwait_signal_trap, 4), /* 39 */ + MACH_TRAP(kern_invalid, 0), /* 40 */ + MACH_TRAP(init_process, 0), /* 41 */ + MACH_TRAP(kern_invalid, 0), /* 42 */ + MACH_TRAP(map_fd, 5), /* 43 */ + MACH_TRAP(kern_invalid, 0), /* 44 */ + MACH_TRAP(task_for_pid, 3), /* 45 */ + MACH_TRAP(pid_for_task, 2), /* 46 */ + MACH_TRAP(kern_invalid, 0), /* 47 */ + MACH_TRAP(macx_swapon, 4), /* 48 */ + MACH_TRAP(macx_swapoff, 2), /* 49 */ + MACH_TRAP(kern_invalid, 0), /* 50 */ + MACH_TRAP(macx_triggers, 4), /* 51 */ + MACH_TRAP(kern_invalid, 0), /* 52 */ + MACH_TRAP(kern_invalid, 0), /* 53 */ + MACH_TRAP(kern_invalid, 0), /* 54 */ + MACH_TRAP(kern_invalid, 0), /* 55 */ + MACH_TRAP(kern_invalid, 0), /* 56 */ + MACH_TRAP(kern_invalid, 0), /* 57 */ + MACH_TRAP(kern_invalid, 0), /* 58 */ + MACH_TRAP(swtch_pri, 1), /* 59 */ + MACH_TRAP(swtch, 0), /* 60 */ + MACH_TRAP(thread_switch, 3), /* 61 */ + MACH_TRAP(clock_sleep_trap, 5), /* 62 */ + MACH_TRAP(kern_invalid,0), /* 63 */ +/* traps 64 - 95 reserved (debo) */ + MACH_TRAP(kern_invalid,0), /* 64 */ + MACH_TRAP(kern_invalid,0), /* 65 */ + MACH_TRAP(kern_invalid,0), /* 66 */ + MACH_TRAP(kern_invalid,0), /* 67 */ + MACH_TRAP(kern_invalid,0), /* 68 */ + MACH_TRAP(kern_invalid,0), /* 69 */ + MACH_TRAP(kern_invalid,0), /* 70 */ + MACH_TRAP(kern_invalid,0), /* 71 */ + MACH_TRAP(kern_invalid,0), /* 72 */ + MACH_TRAP(kern_invalid,0), /* 73 */ + MACH_TRAP(kern_invalid,0), /* 74 */ + MACH_TRAP(kern_invalid,0), /* 75 */ + MACH_TRAP(kern_invalid,0), /* 76 */ + MACH_TRAP(kern_invalid,0), /* 77 */ + MACH_TRAP(kern_invalid,0), /* 78 */ + MACH_TRAP(kern_invalid,0), /* 79 */ + MACH_TRAP(kern_invalid,0), /* 80 */ + MACH_TRAP(kern_invalid,0), /* 81 */ + MACH_TRAP(kern_invalid,0), /* 82 */ + MACH_TRAP(kern_invalid,0), /* 83 */ + MACH_TRAP(kern_invalid,0), /* 84 */ + MACH_TRAP(kern_invalid,0), /* 85 */ + MACH_TRAP(kern_invalid,0), /* 86 */ + MACH_TRAP(kern_invalid,0), /* 87 */ + MACH_TRAP(kern_invalid,0), /* 88 */ + MACH_TRAP(mach_timebase_info, 1), /* 89 */ + MACH_TRAP(mach_wait_until, 2), /* 90 */ + MACH_TRAP(mk_timer_create, 0), /* 91 */ + MACH_TRAP(mk_timer_destroy, 1), /* 92 */ + MACH_TRAP(mk_timer_arm, 3), /* 93 */ + MACH_TRAP(mk_timer_cancel, 2), /* 94 */ + MACH_TRAP(mk_timebase_info, 5), /* 95 */ +/* traps 64 - 95 reserved (debo) */ + MACH_TRAP(kern_invalid,0), /* 96 */ + MACH_TRAP(kern_invalid,0), /* 97 */ + MACH_TRAP(kern_invalid,0), /* 98 */ + MACH_TRAP(kern_invalid,0), /* 99 */ +/* traps 100-107 reserved for iokit (esb) */ + MACH_TRAP(iokit_user_client_trap, 8), + /* 100 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 101 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 102 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 103 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 104 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 105 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 106 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 107 */ /* IOKit */ + MACH_TRAP(kern_invalid,0), /* 108 */ + MACH_TRAP(kern_invalid,0), /* 109 */ + MACH_TRAP(kern_invalid,0), /* 110 */ + MACH_TRAP(kern_invalid,0), /* 111 */ + MACH_TRAP(kern_invalid,0), /* 112 */ + MACH_TRAP(kern_invalid,0), /* 113 */ + MACH_TRAP(kern_invalid,0), /* 114 */ + MACH_TRAP(kern_invalid,0), /* 115 */ + MACH_TRAP(kern_invalid,0), /* 116 */ + MACH_TRAP(kern_invalid,0), /* 117 */ + MACH_TRAP(kern_invalid,0), /* 118 */ + MACH_TRAP(kern_invalid,0), /* 119 */ + MACH_TRAP(kern_invalid,0), /* 120 */ + MACH_TRAP(kern_invalid,0), /* 121 */ + MACH_TRAP(kern_invalid,0), /* 122 */ + MACH_TRAP(kern_invalid,0), /* 123 */ + MACH_TRAP(kern_invalid,0), /* 124 */ + MACH_TRAP(kern_invalid,0), /* 125 */ + MACH_TRAP(kern_invalid,0), /* 126 */ + MACH_TRAP(kern_invalid,0), /* 127 */ +}; + +int mach_trap_count = (sizeof(mach_trap_table) / sizeof(mach_trap_table[0])); + +mach_port_name_t +null_port(void) +{ + if (kern_invalid_debug) Debugger("null_port mach trap"); + return(MACH_PORT_NULL); +} + +kern_return_t +kern_invalid(void) +{ + if (kern_invalid_debug) Debugger("kern_invalid mach trap"); + return(KERN_INVALID_ARGUMENT); +} + +kern_return_t +not_implemented(void) +{ + return(MACH_SEND_INTERRUPTED); +} diff --git a/osfmk/kern/syscall_sw.h b/osfmk/kern/syscall_sw.h new file mode 100644 index 000000000..c8b3d31bd --- /dev/null +++ b/osfmk/kern/syscall_sw.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_SYSCALL_SW_H_ +#define _KERN_SYSCALL_SW_H_ + +#include + +/* + * mach_trap_stack indicates the trap may discard + * its kernel stack. Some architectures may need + * to save more state in the pcb for these traps. + */ + +typedef struct { + int mach_trap_arg_count; + int (*mach_trap_function)(void); + boolean_t mach_trap_stack; +#if !MACH_ASSERT + int mach_trap_unused; +#else + char* mach_trap_name; +#endif /* !MACH_ASSERT */ +} mach_trap_t; + +extern mach_trap_t mach_trap_table[]; +extern int mach_trap_count; +extern kern_return_t kern_invalid(void); + +#if !MACH_ASSERT +#define MACH_TRAP(name, arg_count) \ + { (arg_count), (int (*)(void)) (name), FALSE, 0 } +#else +#define MACH_TRAP(name, arg_count) \ + { (arg_count), (int (*)(void)) (name), FALSE, #name } +#endif /* !MACH_ASSERT */ + +#endif /* _KERN_SYSCALL_SW_H_ */ diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c new file mode 100644 index 000000000..0eaf47eaa --- /dev/null +++ b/osfmk/kern/task.c @@ -0,0 +1,1768 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: kern/task.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, + * David Black + * + * Task management primitives implementation. + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for thread_wakeup */ +#include +#include /*** ??? fix so this can be removed ***/ +#include +#include +#include +#include /* for kernel_map, ipc_kernel_map */ +#include +#include +#include +#if MACH_KDB +#include +#endif /* MACH_KDB */ + +#if TASK_SWAPPER +#include +#endif /* TASK_SWAPPER */ + +/* + * Exported interfaces + */ + +#include +#include +#include + +task_t kernel_task; +zone_t task_zone; + +/* Forwards */ + +void task_hold_locked( + task_t task); +void task_wait_locked( + task_t task); +void task_release_locked( + task_t task); +void task_collect_scan(void); +void task_free( + task_t task ); +void task_synchronizer_destroy_all( + task_t task); +void task_subsystem_destroy_all( + task_t task); + +kern_return_t task_set_ledger( + task_t task, + ledger_t wired, + ledger_t paged); + +void +task_init(void) +{ + task_zone = zinit( + sizeof(struct task), + TASK_MAX * sizeof(struct task), + TASK_CHUNK * sizeof(struct task), + "tasks"); + + eml_init(); + + /* + * Create the kernel task as the first task. + * Task_create_local must assign to kernel_task as a side effect, + * for other initialization. (:-() + */ + if (task_create_local( + TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS) + panic("task_init\n"); + vm_map_deallocate(kernel_task->map); + kernel_task->map = kernel_map; + +#if MACH_ASSERT + if (watchacts & WA_TASK) + printf("task_init: kernel_task = %x map=%x\n", + kernel_task, kernel_map); +#endif /* MACH_ASSERT */ +} + +#if MACH_HOST +void +task_freeze( + task_t task) +{ + task_lock(task); + /* + * If may_assign is false, task is already being assigned, + * wait for that to finish. + */ + while (task->may_assign == FALSE) { + task->assign_active = TRUE; + thread_sleep_mutex((event_t) &task->assign_active, + &task->lock, THREAD_INTERRUPTIBLE); + task_lock(task); + } + task->may_assign = FALSE; + task_unlock(task); + + return; +} + +void +task_unfreeze( + task_t task) +{ + task_lock(task); + assert(task->may_assign == FALSE); + task->may_assign = TRUE; + if (task->assign_active == TRUE) { + task->assign_active = FALSE; + thread_wakeup((event_t)&task->assign_active); + } + task_unlock(task); + + return; +} +#endif /* MACH_HOST */ + +/* + * Create a task running in the kernel address space. It may + * have its own map of size mem_size and may have ipc privileges. + */ +kern_return_t +kernel_task_create( + task_t parent_task, + vm_offset_t map_base, + vm_size_t map_size, + task_t *child_task) +{ + kern_return_t result; + task_t new_task; + vm_map_t old_map; + + /* + * Create the task. + */ + result = task_create_local(parent_task, FALSE, TRUE, &new_task); + if (result != KERN_SUCCESS) + return (result); + + /* + * Task_create_local creates the task with a user-space map. + * We attempt to replace the map and free it afterwards; else + * task_deallocate will free it (can NOT set map to null before + * task_deallocate, this impersonates a norma placeholder task). + * _Mark the memory as pageable_ -- this is what we + * want for images (like servers) loaded into the kernel. + */ + if (map_size == 0) { + vm_map_deallocate(new_task->map); + new_task->map = kernel_map; + *child_task = new_task; + } else { + old_map = new_task->map; + if ((result = kmem_suballoc(kernel_map, &map_base, + map_size, TRUE, FALSE, + &new_task->map)) != KERN_SUCCESS) { + /* + * New task created with ref count of 2 -- decrement by + * one to force task deletion. + */ + printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n", + kernel_map, map_base, map_size); + --new_task->ref_count; + task_deallocate(new_task); + return (result); + } + vm_map_deallocate(old_map); + *child_task = new_task; + } + return (KERN_SUCCESS); +} + +kern_return_t +task_create( + task_t parent_task, + ledger_port_array_t ledger_ports, + mach_msg_type_number_t num_ledger_ports, + boolean_t inherit_memory, + task_t *child_task) /* OUT */ +{ + if (parent_task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + return task_create_local( + parent_task, inherit_memory, FALSE, child_task); +} + +kern_return_t +host_security_create_task_token( + host_security_t host_security, + task_t parent_task, + security_token_t sec_token, + host_priv_t host_priv, + ledger_port_array_t ledger_ports, + mach_msg_type_number_t num_ledger_ports, + boolean_t inherit_memory, + task_t *child_task) /* OUT */ +{ + kern_return_t result; + + if (parent_task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + if (host_security == HOST_NULL) + return(KERN_INVALID_SECURITY); + + result = task_create_local( + parent_task, inherit_memory, FALSE, child_task); + + if (result != KERN_SUCCESS) + return(result); + + result = host_security_set_task_token(host_security, + *child_task, + sec_token, + host_priv); + + if (result != KERN_SUCCESS) + return(result); + + return(result); +} + +kern_return_t +task_create_local( + task_t parent_task, + boolean_t inherit_memory, + boolean_t kernel_loaded, + task_t *child_task) /* OUT */ +{ + task_t new_task; + processor_set_t pset; + + new_task = (task_t) zalloc(task_zone); + + if (new_task == TASK_NULL) + return(KERN_RESOURCE_SHORTAGE); + + /* one ref for just being alive; one for our caller */ + new_task->ref_count = 2; + + if (inherit_memory) + new_task->map = vm_map_fork(parent_task->map); + else + new_task->map = vm_map_create(pmap_create(0), + round_page(VM_MIN_ADDRESS), + trunc_page(VM_MAX_ADDRESS), TRUE); + + mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW); + queue_init(&new_task->subsystem_list); + queue_init(&new_task->thr_acts); + new_task->suspend_count = 0; + new_task->thr_act_count = 0; + new_task->res_act_count = 0; + new_task->active_act_count = 0; + new_task->user_stop_count = 0; + new_task->importance = 0; + new_task->active = TRUE; + new_task->kernel_loaded = kernel_loaded; + new_task->user_data = 0; + new_task->faults = 0; + new_task->cow_faults = 0; + new_task->pageins = 0; + new_task->messages_sent = 0; + new_task->messages_received = 0; + new_task->syscalls_mach = 0; + new_task->syscalls_unix=0; + new_task->csw=0; + +#ifdef MACH_BSD + new_task->bsd_info = 0; +#endif /* MACH_BSD */ + +#if TASK_SWAPPER + new_task->swap_state = TASK_SW_IN; + new_task->swap_flags = 0; + new_task->swap_ast_waiting = 0; + new_task->swap_stamp = sched_tick; + new_task->swap_rss = 0; + new_task->swap_nswap = 0; +#endif /* TASK_SWAPPER */ + + queue_init(&new_task->semaphore_list); + queue_init(&new_task->lock_set_list); + new_task->semaphores_owned = 0; + new_task->lock_sets_owned = 0; + +#if MACH_HOST + new_task->may_assign = TRUE; + new_task->assign_active = FALSE; +#endif /* MACH_HOST */ + eml_task_reference(new_task, parent_task); + + ipc_task_init(new_task, parent_task); + + new_task->total_user_time.seconds = 0; + new_task->total_user_time.microseconds = 0; + new_task->total_system_time.seconds = 0; + new_task->total_system_time.microseconds = 0; + + task_prof_init(new_task); + + if (parent_task != TASK_NULL) { +#if MACH_HOST + /* + * Freeze the parent, so that parent_task->processor_set + * cannot change. + */ + task_freeze(parent_task); +#endif /* MACH_HOST */ + pset = parent_task->processor_set; + if (!pset->active) + pset = &default_pset; + + new_task->policy = parent_task->policy; + + new_task->priority = parent_task->priority; + new_task->max_priority = parent_task->max_priority; + + new_task->sec_token = parent_task->sec_token; + + shared_region_mapping_ref(parent_task->system_shared_region); + new_task->system_shared_region = parent_task->system_shared_region; + + new_task->wired_ledger_port = ledger_copy( + convert_port_to_ledger(parent_task->wired_ledger_port)); + new_task->paged_ledger_port = ledger_copy( + convert_port_to_ledger(parent_task->paged_ledger_port)); + } + else { + pset = &default_pset; + + if (kernel_task == TASK_NULL) { + new_task->policy = POLICY_RR; + + new_task->priority = MINPRI_KERNBAND; + new_task->max_priority = MAXPRI_KERNBAND; + } + else { + new_task->policy = POLICY_TIMESHARE; + + new_task->priority = BASEPRI_DEFAULT; + new_task->max_priority = MAXPRI_HIGHBAND; + } + + new_task->sec_token = KERNEL_SECURITY_TOKEN; + new_task->wired_ledger_port = ledger_copy(root_wired_ledger); + new_task->paged_ledger_port = ledger_copy(root_paged_ledger); + } + + pset_lock(pset); + pset_add_task(pset, new_task); + pset_unlock(pset); +#if MACH_HOST + if (parent_task != TASK_NULL) + task_unfreeze(parent_task); +#endif /* MACH_HOST */ + +#if FAST_TAS + if (inherit_memory) { + new_task->fast_tas_base = parent_task->fast_tas_base; + new_task->fast_tas_end = parent_task->fast_tas_end; + } else { + new_task->fast_tas_base = (vm_offset_t)0; + new_task->fast_tas_end = (vm_offset_t)0; + } +#endif /* FAST_TAS */ + + ipc_task_enable(new_task); + +#if TASK_SWAPPER + task_swapout_eligible(new_task); +#endif /* TASK_SWAPPER */ + +#if MACH_ASSERT + if (watchacts & WA_TASK) + printf("*** task_create_local(par=%x inh=%x) == 0x%x\n", + parent_task, inherit_memory, new_task); +#endif /* MACH_ASSERT */ + + *child_task = new_task; + return(KERN_SUCCESS); +} + +/* + * task_free: + * + * Called by task_deallocate when the task's reference count drops to zero. + * Task is locked. + */ +void +task_free( + task_t task) +{ + processor_set_t pset; + +#if MACH_ASSERT + assert(task != 0); + if (watchacts & (WA_EXIT|WA_TASK)) + printf("task_free(%x(%d)) map ref %d\n", task, task->ref_count, + task->map->ref_count); +#endif /* MACH_ASSERT */ + +#if TASK_SWAPPER + /* task_terminate guarantees that this task is off the list */ + assert((task->swap_state & TASK_SW_ELIGIBLE) == 0); +#endif /* TASK_SWAPPER */ + + eml_task_deallocate(task); + + /* + * Temporarily restore the reference we dropped above, then + * freeze the task so that the task->processor_set field + * cannot change. In the !MACH_HOST case, the logic can be + * simplified, since the default_pset is the only pset. + */ + ++task->ref_count; + task_unlock(task); +#if MACH_HOST + task_freeze(task); +#endif /* MACH_HOST */ + + pset = task->processor_set; + pset_lock(pset); + task_lock(task); + if (--task->ref_count > 0) { + /* + * A new reference appeared (probably from the pset). + * Back out. Must unfreeze inline since we'already + * dropped our reference. + */ +#if MACH_HOST + assert(task->may_assign == FALSE); + task->may_assign = TRUE; + if (task->assign_active == TRUE) { + task->assign_active = FALSE; + thread_wakeup((event_t)&task->assign_active); + } +#endif /* MACH_HOST */ + task_unlock(task); + pset_unlock(pset); + return; + } + pset_remove_task(pset,task); + task_unlock(task); + pset_unlock(pset); + pset_deallocate(pset); + + ipc_task_terminate(task); + shared_region_mapping_dealloc(task->system_shared_region); + + if (task->kernel_loaded) + vm_map_remove(kernel_map, task->map->min_offset, + task->map->max_offset, VM_MAP_NO_FLAGS); + vm_map_deallocate(task->map); + is_release(task->itk_space); + task_prof_deallocate(task); + zfree(task_zone, (vm_offset_t) task); +} + +void +task_deallocate( + task_t task) +{ + if (task != TASK_NULL) { + int c; + + task_lock(task); + c = --task->ref_count; + if (c == 0) + task_free(task); /* unlocks task */ + else + task_unlock(task); + } +} + +void +task_reference( + task_t task) +{ + if (task != TASK_NULL) { + task_lock(task); + task->ref_count++; + task_unlock(task); + } +} + +boolean_t +task_reference_try( + task_t task) +{ + if (task != TASK_NULL) { + if (task_lock_try(task)) { + task->ref_count++; + task_unlock(task); + return TRUE; + } + } + return FALSE; +} + +/* + * task_terminate: + * + * Terminate the specified task. See comments on thread_terminate + * (kern/thread.c) about problems with terminating the "current task." + */ + +kern_return_t +task_terminate( + task_t task) +{ + if (task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + if (task->bsd_info) + return(KERN_FAILURE); + return (task_terminate_internal(task)); +} + +kern_return_t +task_terminate_internal( + task_t task) +{ + thread_act_t thr_act, cur_thr_act; + task_t cur_task; + + assert(task != kernel_task); + + cur_thr_act = current_act(); + cur_task = cur_thr_act->task; + +#if TASK_SWAPPER + /* + * If task is not resident (swapped out, or being swapped + * out), we want to bring it back in (this can block). + * NOTE: The only way that this can happen in the current + * system is if the task is swapped while it has a thread + * in exit(), and the thread does not hit a clean point + * to swap itself before getting here. + * Terminating other tasks is another way to this code, but + * it is not yet fully supported. + * The task_swapin is unconditional. It used to be done + * only if the task is not resident. Swapping in a + * resident task will prevent it from being swapped out + * while it terminates. + */ + task_swapin(task, TRUE); /* TRUE means make it unswappable */ +#endif /* TASK_SWAPPER */ + + /* + * Get the task locked and make sure that we are not racing + * with someone else trying to terminate us. + */ + if (task == cur_task) { + task_lock(task); + } else if (task < cur_task) { + task_lock(task); + task_lock(cur_task); + } else { + task_lock(cur_task); + task_lock(task); + } + + if (!task->active || !cur_thr_act->active) { + /* + * Task or current act is already being terminated. + * Just return an error. If we are dying, this will + * just get us to our AST special handler and that + * will get us to finalize the termination of ourselves. + */ + task_unlock(task); + if (cur_task != task) + task_unlock(cur_task); + return(KERN_FAILURE); + } + if (cur_task != task) + task_unlock(cur_task); + + /* + * Indicate that we want all the threads to stop executing + * at user space by holding the task (we would have held + * each thread independently in thread_terminate_internal - + * but this way we may be more likely to already find it + * held there). Mark the task inactive, and prevent + * further task operations via the task port. + */ + task_hold_locked(task); + task->active = FALSE; + ipc_task_disable(task); + + /* + * Terminate each activation in the task. + * + * Each terminated activation will run it's special handler + * when its current kernel context is unwound. That will + * clean up most of the thread resources. Then it will be + * handed over to the reaper, who will finally remove the + * thread from the task list and free the structures. + * + * We can't terminate the current activation yet, because + * it has to wait for the others in an interruptible state. + * We may also block interruptibly during the rest of the + * cleanup. Wait until the very last to terminate ourself. + * + * But if we have virtual machine state, we need to clean + * that up now, because it may be holding wirings the task's + * map that would get stuck in the vm_map_remove() below. + */ + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + if (thr_act != cur_thr_act) + thread_terminate_internal(thr_act); + else + act_virtual_machine_destroy(thr_act); + } + task_unlock(task); + + /* + * Destroy all synchronizers owned by the task. + */ + task_synchronizer_destroy_all(task); + + /* + * Deallocate all subsystems owned by the task. + */ + task_subsystem_destroy_all(task); + + /* + * Destroy the IPC space, leaving just a reference for it. + */ + if (!task->kernel_loaded) + ipc_space_destroy(task->itk_space); + + /* + * If the current thread is a member of the task + * being terminated, then the last reference to + * the task will not be dropped until the thread + * is finally reaped. To avoid incurring the + * expense of removing the address space regions + * at reap time, we do it explictly here. + */ + (void) vm_map_remove(task->map, + task->map->min_offset, + task->map->max_offset, VM_MAP_NO_FLAGS); + + /* + * Finally, mark ourself for termination and then + * deallocate the task's reference to itself. + */ + if (task == cur_task) + thread_terminate(cur_thr_act); + task_deallocate(task); + + return(KERN_SUCCESS); +} + +/* + * task_halt - Shut the current task down (except for the current thread) in + * preparation for dramatic changes to the task (probably exec). + * We hold the task, terminate all other threads in the task and + * wait for them to terminate, clean up the portspace, and when + * all done, let the current thread go. + */ +kern_return_t +task_halt( + task_t task) +{ + thread_act_t thr_act, cur_thr_act; + task_t cur_task; + + assert(task != kernel_task); + + cur_thr_act = current_act(); + cur_task = cur_thr_act->task; + + if (task != cur_task) { + return(KERN_INVALID_ARGUMENT); + } + +#if TASK_SWAPPER + /* + * If task is not resident (swapped out, or being swapped + * out), we want to bring it back in and make it unswappable. + * This can block, so do it early. + */ + task_swapin(task, TRUE); /* TRUE means make it unswappable */ +#endif /* TASK_SWAPPER */ + + task_lock(task); + + if (!task->active || !cur_thr_act->active) { + /* + * Task or current thread is already being terminated. + * Hurry up and return out of the current kernel context + * so that we run our AST special handler to terminate + * ourselves. + */ + task_unlock(task); + return(KERN_FAILURE); + } + + if (task->thr_act_count > 1) { + /* + * Mark all the threads to keep them from starting any more + * user-level execution. The thread_terminate_internal code + * would do this on a thread by thread basis anyway, but this + * gives us a better chance of not having to wait there. + */ + task_hold_locked(task); + + /* + * Terminate all the other activations in the task. + * + * Each terminated activation will run it's special handler + * when its current kernel context is unwound. That will + * clean up most of the thread resources. Then it will be + * handed over to the reaper, who will finally remove the + * thread from the task list and free the structures. + * + * If the current thread has any virtual machine state + * associated with it, clean that up now before we try + * to clean up the task VM and port spaces. + */ + queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) { + if (thr_act != cur_thr_act) + thread_terminate_internal(thr_act); + else + act_virtual_machine_destroy(thr_act); + } + task_release_locked(task); + } + task_unlock(task); + + /* + * Destroy all synchronizers owned by the task. + */ + task_synchronizer_destroy_all(task); + + /* + * Deallocate all subsystems owned by the task. + */ + task_subsystem_destroy_all(task); + + /* + * Destroy the IPC space, leaving just a reference for it. + */ +#if 0 + if (!task->kernel_loaded) + ipc_space_clean(task->itk_space); +#endif + + /* + * Clean out the address space, as we are going to be + * getting a new one. + */ + (void) vm_map_remove(task->map, + task->map->min_offset, + task->map->max_offset, VM_MAP_NO_FLAGS); + + return KERN_SUCCESS; +} + +/* + * task_hold_locked: + * + * Suspend execution of the specified task. + * This is a recursive-style suspension of the task, a count of + * suspends is maintained. + * + * CONDITIONS: the task is locked and active. + */ +void +task_hold_locked( + register task_t task) +{ + register thread_act_t thr_act; + + assert(task->active); + + task->suspend_count++; + + /* + * Iterate through all the thread_act's and hold them. + */ + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + act_lock_thread(thr_act); + thread_hold(thr_act); + act_unlock_thread(thr_act); + } +} + +/* + * task_hold: + * + * Same as the internal routine above, except that is must lock + * and verify that the task is active. This differs from task_suspend + * in that it places a kernel hold on the task rather than just a + * user-level hold. This keeps users from over resuming and setting + * it running out from under the kernel. + * + * CONDITIONS: the caller holds a reference on the task + */ +kern_return_t +task_hold(task_t task) +{ + kern_return_t kret; + + if (task == TASK_NULL) + return (KERN_INVALID_ARGUMENT); + task_lock(task); + if (!task->active) { + task_unlock(task); + return (KERN_FAILURE); + } + task_hold_locked(task); + task_unlock(task); + + return(KERN_SUCCESS); +} + +/* + * Routine: task_wait_locked + * Wait for all threads in task to stop. + * + * Conditions: + * Called with task locked, active, and held. + */ +void +task_wait_locked( + register task_t task) +{ + register thread_act_t thr_act, cur_thr_act; + + assert(task->active); + assert(task->suspend_count > 0); + + cur_thr_act = current_act(); + /* + * Iterate through all the thread's and wait for them to + * stop. Do not wait for the current thread if it is within + * the task. + */ + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + if (thr_act != cur_thr_act) { + thread_shuttle_t thr_shuttle; + + thr_shuttle = act_lock_thread(thr_act); + thread_wait(thr_shuttle); + act_unlock_thread(thr_act); + } + } +} + +/* + * task_release_locked: + * + * Release a kernel hold on a task. + * + * CONDITIONS: the task is locked and active + */ +void +task_release_locked( + register task_t task) +{ + register thread_act_t thr_act; + + assert(task->active); + + task->suspend_count--; + assert(task->suspend_count >= 0); + + /* + * Iterate through all the thread_act's and hold them. + * Do not hold the current thread_act if it is within the + * task. + */ + queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { + act_lock_thread(thr_act); + thread_release(thr_act); + act_unlock_thread(thr_act); + } +} + +/* + * task_release: + * + * Same as the internal routine above, except that it must lock + * and verify that the task is active. + * + * CONDITIONS: The caller holds a reference to the task + */ +kern_return_t +task_release(task_t task) +{ + kern_return_t kret; + + if (task == TASK_NULL) + return (KERN_INVALID_ARGUMENT); + task_lock(task); + if (!task->active) { + task_unlock(task); + return (KERN_FAILURE); + } + task_release_locked(task); + task_unlock(task); + + return(KERN_SUCCESS); +} + +kern_return_t +task_threads( + task_t task, + thread_act_array_t *thr_act_list, + mach_msg_type_number_t *count) +{ + unsigned int actual; /* this many thr_acts */ + thread_act_t thr_act; + thread_act_t *thr_acts; + thread_t thread; + int i, j; + + vm_size_t size, size_needed; + vm_offset_t addr; + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + + size = 0; addr = 0; + + for (;;) { + task_lock(task); + if (!task->active) { + task_unlock(task); + if (size != 0) + kfree(addr, size); + return KERN_FAILURE; + } + + actual = task->thr_act_count; + + /* do we have the memory we need? */ + size_needed = actual * sizeof(mach_port_t); + if (size_needed <= size) + break; + + /* unlock the task and allocate more memory */ + task_unlock(task); + + if (size != 0) + kfree(addr, size); + + assert(size_needed > 0); + size = size_needed; + + addr = kalloc(size); + if (addr == 0) + return KERN_RESOURCE_SHORTAGE; + } + + /* OK, have memory and the task is locked & active */ + thr_acts = (thread_act_t *) addr; + + for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts); + i < actual; + i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) { + act_lock(thr_act); + if (thr_act->ref_count > 0) { + act_locked_act_reference(thr_act); + thr_acts[j++] = thr_act; + } + act_unlock(thr_act); + } + assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act)); + + actual = j; + size_needed = actual * sizeof(mach_port_t); + + /* can unlock task now that we've got the thr_act refs */ + task_unlock(task); + + if (actual == 0) { + /* no thr_acts, so return null pointer and deallocate memory */ + + *thr_act_list = 0; + *count = 0; + + if (size != 0) + kfree(addr, size); + } else { + /* if we allocated too much, must copy */ + + if (size_needed < size) { + vm_offset_t newaddr; + + newaddr = kalloc(size_needed); + if (newaddr == 0) { + for (i = 0; i < actual; i++) + act_deallocate(thr_acts[i]); + kfree(addr, size); + return KERN_RESOURCE_SHORTAGE; + } + + bcopy((char *) addr, (char *) newaddr, size_needed); + kfree(addr, size); + thr_acts = (thread_act_t *) newaddr; + } + + *thr_act_list = thr_acts; + *count = actual; + + /* do the conversion that Mig should handle */ + + for (i = 0; i < actual; i++) + ((ipc_port_t *) thr_acts)[i] = + convert_act_to_port(thr_acts[i]); + } + + return KERN_SUCCESS; +} + +/* + * Routine: task_suspend + * Implement a user-level suspension on a task. + * + * Conditions: + * The caller holds a reference to the task + */ +kern_return_t +task_suspend( + register task_t task) +{ + if (task == TASK_NULL) + return (KERN_INVALID_ARGUMENT); + + task_lock(task); + if (!task->active) { + task_unlock(task); + return (KERN_FAILURE); + } + if ((task->user_stop_count)++ > 0) { + /* + * If the stop count was positive, the task is + * already stopped and we can exit. + */ + task_unlock(task); + return (KERN_SUCCESS); + } + + /* + * Put a kernel-level hold on the threads in the task (all + * user-level task suspensions added together represent a + * single kernel-level hold). We then wait for the threads + * to stop executing user code. + */ + task_hold_locked(task); + task_wait_locked(task); + task_unlock(task); + return (KERN_SUCCESS); +} + +/* + * Routine: task_resume + * Release a kernel hold on a task. + * + * Conditions: + * The caller holds a reference to the task + */ +kern_return_t +task_resume(register task_t task) +{ + register boolean_t release; + + if (task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + release = FALSE; + task_lock(task); + if (!task->active) { + task_unlock(task); + return(KERN_FAILURE); + } + if (task->user_stop_count > 0) { + if (--(task->user_stop_count) == 0) + release = TRUE; + } + else { + task_unlock(task); + return(KERN_FAILURE); + } + + /* + * Release the task if necessary. + */ + if (release) + task_release_locked(task); + + task_unlock(task); + return(KERN_SUCCESS); +} + +kern_return_t +host_security_set_task_token( + host_security_t host_security, + task_t task, + security_token_t sec_token, + host_priv_t host_priv) +{ + kern_return_t kr; + + if (task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + if (host_security == HOST_NULL) + return(KERN_INVALID_SECURITY); + + task_lock(task); + task->sec_token = sec_token; + task_unlock(task); + + if (host_priv != HOST_PRIV_NULL) { + kr = task_set_special_port(task, + TASK_HOST_PORT, + ipc_port_make_send(realhost.host_priv_self)); + } else { + kr = task_set_special_port(task, + TASK_HOST_PORT, + ipc_port_make_send(realhost.host_self)); + } + return(kr); +} + +/* + * Utility routine to set a ledger + */ +kern_return_t +task_set_ledger( + task_t task, + ledger_t wired, + ledger_t paged) +{ + if (task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + task_lock(task); + if (wired) { + ipc_port_release_send(task->wired_ledger_port); + task->wired_ledger_port = ledger_copy(wired); + } + if (paged) { + ipc_port_release_send(task->paged_ledger_port); + task->paged_ledger_port = ledger_copy(paged); + } + task_unlock(task); + + return(KERN_SUCCESS); +} + +/* + * This routine was added, pretty much exclusively, for registering the + * RPC glue vector for in-kernel short circuited tasks. Rather than + * removing it completely, I have only disabled that feature (which was + * the only feature at the time). It just appears that we are going to + * want to add some user data to tasks in the future (i.e. bsd info, + * task names, etc...), so I left it in the formal task interface. + */ +kern_return_t +task_set_info( + task_t task, + task_flavor_t flavor, + task_info_t task_info_in, /* pointer to IN array */ + mach_msg_type_number_t task_info_count) +{ + vm_map_t map; + + if (task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + switch (flavor) { + default: + return (KERN_INVALID_ARGUMENT); + } + return (KERN_SUCCESS); +} + +kern_return_t +task_info( + task_t task, + task_flavor_t flavor, + task_info_t task_info_out, + mach_msg_type_number_t *task_info_count) +{ + thread_t thread; + vm_map_t map; + + if (task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + + switch (flavor) { + + case TASK_BASIC_INFO: + { + register task_basic_info_t basic_info; + + if (*task_info_count < TASK_BASIC_INFO_COUNT) { + return(KERN_INVALID_ARGUMENT); + } + + basic_info = (task_basic_info_t) task_info_out; + + map = (task == kernel_task) ? kernel_map : task->map; + + basic_info->virtual_size = map->size; + basic_info->resident_size = pmap_resident_count(map->pmap) + * PAGE_SIZE; + + task_lock(task); + basic_info->policy = task->policy; + basic_info->suspend_count = task->user_stop_count; + basic_info->user_time.seconds + = task->total_user_time.seconds; + basic_info->user_time.microseconds + = task->total_user_time.microseconds; + basic_info->system_time.seconds + = task->total_system_time.seconds; + basic_info->system_time.microseconds + = task->total_system_time.microseconds; + task_unlock(task); + + *task_info_count = TASK_BASIC_INFO_COUNT; + break; + } + + case TASK_THREAD_TIMES_INFO: + { + register task_thread_times_info_t times_info; + register thread_t thread; + register thread_act_t thr_act; + + if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) { + return (KERN_INVALID_ARGUMENT); + } + + times_info = (task_thread_times_info_t) task_info_out; + times_info->user_time.seconds = 0; + times_info->user_time.microseconds = 0; + times_info->system_time.seconds = 0; + times_info->system_time.microseconds = 0; + + task_lock(task); + queue_iterate(&task->thr_acts, thr_act, + thread_act_t, thr_acts) + { + time_value_t user_time, system_time; + spl_t s; + + thread = act_lock_thread(thr_act); + + /* Skip empty threads and threads that have migrated + * into this task: + */ + if (!thread || thr_act->pool_port) { + act_unlock_thread(thr_act); + continue; + } + assert(thread); /* Must have thread, if no thread_pool*/ + s = splsched(); + thread_lock(thread); + + thread_read_times(thread, &user_time, &system_time); + + thread_unlock(thread); + splx(s); + act_unlock_thread(thr_act); + + time_value_add(×_info->user_time, &user_time); + time_value_add(×_info->system_time, &system_time); + } + task_unlock(task); + + *task_info_count = TASK_THREAD_TIMES_INFO_COUNT; + break; + } + + case TASK_SCHED_FIFO_INFO: + { + register policy_fifo_base_t fifo_base; + + if (*task_info_count < POLICY_FIFO_BASE_COUNT) + return(KERN_INVALID_ARGUMENT); + + fifo_base = (policy_fifo_base_t) task_info_out; + + task_lock(task); + if (task->policy != POLICY_FIFO) { + task_unlock(task); + return(KERN_INVALID_POLICY); + } + + fifo_base->base_priority = task->priority; + task_unlock(task); + + *task_info_count = POLICY_FIFO_BASE_COUNT; + break; + } + + case TASK_SCHED_RR_INFO: + { + register policy_rr_base_t rr_base; + + if (*task_info_count < POLICY_RR_BASE_COUNT) + return(KERN_INVALID_ARGUMENT); + + rr_base = (policy_rr_base_t) task_info_out; + + task_lock(task); + if (task->policy != POLICY_RR) { + task_unlock(task); + return(KERN_INVALID_POLICY); + } + + rr_base->base_priority = task->priority; + task_unlock(task); + + rr_base->quantum = (min_quantum * tick) / 1000; + + *task_info_count = POLICY_RR_BASE_COUNT; + break; + } + + case TASK_SCHED_TIMESHARE_INFO: + { + register policy_timeshare_base_t ts_base; + + if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) + return(KERN_INVALID_ARGUMENT); + + ts_base = (policy_timeshare_base_t) task_info_out; + + task_lock(task); + if (task->policy != POLICY_TIMESHARE) { + task_unlock(task); + return(KERN_INVALID_POLICY); + } + + ts_base->base_priority = task->priority; + task_unlock(task); + + *task_info_count = POLICY_TIMESHARE_BASE_COUNT; + break; + } + + case TASK_SECURITY_TOKEN: + { + register security_token_t *sec_token_p; + + if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) { + return(KERN_INVALID_ARGUMENT); + } + + sec_token_p = (security_token_t *) task_info_out; + + task_lock(task); + *sec_token_p = task->sec_token; + task_unlock(task); + + *task_info_count = TASK_SECURITY_TOKEN_COUNT; + break; + } + + case TASK_SCHED_INFO: + return(KERN_INVALID_ARGUMENT); + + case TASK_EVENTS_INFO: + { + register task_events_info_t events_info; + + if (*task_info_count < TASK_EVENTS_INFO_COUNT) { + return(KERN_INVALID_ARGUMENT); + } + + events_info = (task_events_info_t) task_info_out; + + task_lock(task); + events_info->faults = task->faults; + events_info->pageins = task->pageins; + events_info->cow_faults = task->cow_faults; + events_info->messages_sent = task->messages_sent; + events_info->messages_received = task->messages_received; + events_info->syscalls_mach = task->syscalls_mach; + events_info->syscalls_unix = task->syscalls_unix; + events_info->csw = task->csw; + task_unlock(task); + + *task_info_count = TASK_EVENTS_INFO_COUNT; + break; + } + + default: + return (KERN_INVALID_ARGUMENT); + } + + return(KERN_SUCCESS); +} + +/* + * task_assign: + * + * Change the assigned processor set for the task + */ +kern_return_t +task_assign( + task_t task, + processor_set_t new_pset, + boolean_t assign_threads) +{ +#ifdef lint + task++; new_pset++; assign_threads++; +#endif /* lint */ + return(KERN_FAILURE); +} + +/* + * task_assign_default: + * + * Version of task_assign to assign to default processor set. + */ +kern_return_t +task_assign_default( + task_t task, + boolean_t assign_threads) +{ + return (task_assign(task, &default_pset, assign_threads)); +} + +/* + * task_get_assignment + * + * Return name of processor set that task is assigned to. + */ +kern_return_t +task_get_assignment( + task_t task, + processor_set_t *pset) +{ + if (!task->active) + return(KERN_FAILURE); + + *pset = task->processor_set; + pset_reference(*pset); + return(KERN_SUCCESS); +} + + +/* + * task_policy + * + * Set scheduling policy and parameters, both base and limit, for + * the given task. Policy must be a policy which is enabled for the + * processor set. Change contained threads if requested. + */ +kern_return_t +task_policy( + task_t task, + policy_t policy_id, + policy_base_t base, + mach_msg_type_number_t count, + boolean_t set_limit, + boolean_t change) +{ + return(KERN_FAILURE); +} + +/* + * task_set_policy + * + * Set scheduling policy and parameters, both base and limit, for + * the given task. Policy can be any policy implemented by the + * processor set, whether enabled or not. Change contained threads + * if requested. + */ +kern_return_t +task_set_policy( + task_t task, + processor_set_t pset, + policy_t policy_id, + policy_base_t base, + mach_msg_type_number_t base_count, + policy_limit_t limit, + mach_msg_type_number_t limit_count, + boolean_t change) +{ + return(KERN_FAILURE); +} + +/* + * task_collect_scan: + * + * Attempt to free resources owned by tasks. + */ + +void +task_collect_scan(void) +{ + register task_t task, prev_task; + processor_set_t pset = &default_pset; + + prev_task = TASK_NULL; + + pset_lock(pset); + pset->ref_count++; + task = (task_t) queue_first(&pset->tasks); + while (!queue_end(&pset->tasks, (queue_entry_t) task)) { + task_reference(task); + pset_unlock(pset); + + pmap_collect(task->map->pmap); + + if (prev_task != TASK_NULL) + task_deallocate(prev_task); + prev_task = task; + + pset_lock(pset); + task = (task_t) queue_next(&task->pset_tasks); + } + pset_unlock(pset); + + pset_deallocate(pset); + + if (prev_task != TASK_NULL) + task_deallocate(prev_task); +} + +boolean_t task_collect_allowed = FALSE; +unsigned task_collect_last_tick = 0; +unsigned task_collect_max_rate = 0; /* in ticks */ + +/* + * consider_task_collect: + * + * Called by the pageout daemon when the system needs more free pages. + */ + +void +consider_task_collect(void) +{ + /* + * By default, don't attempt task collection more frequently + * than once per second. + */ + + if (task_collect_max_rate == 0) + task_collect_max_rate = (2 << SCHED_TICK_SHIFT); + + if (task_collect_allowed && + (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { + task_collect_last_tick = sched_tick; + task_collect_scan(); + } +} + +kern_return_t +task_set_ras_pc( + task_t task, + vm_offset_t pc, + vm_offset_t endpc) +{ +#if FAST_TAS + extern int fast_tas_debug; + + if (fast_tas_debug) { + printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n", + task, pc, endpc); + } + task_lock(task); + task->fast_tas_base = pc; + task->fast_tas_end = endpc; + task_unlock(task); + return KERN_SUCCESS; + +#else /* FAST_TAS */ +#ifdef lint + task++; + pc++; + endpc++; +#endif /* lint */ + + return KERN_FAILURE; + +#endif /* FAST_TAS */ +} + +void +task_synchronizer_destroy_all(task_t task) +{ + semaphore_t semaphore; + lock_set_t lock_set; + + /* + * Destroy owned semaphores + */ + + while (!queue_empty(&task->semaphore_list)) { + semaphore = (semaphore_t) queue_first(&task->semaphore_list); + (void) semaphore_destroy(task, semaphore); + } + + /* + * Destroy owned lock sets + */ + + while (!queue_empty(&task->lock_set_list)) { + lock_set = (lock_set_t) queue_first(&task->lock_set_list); + (void) lock_set_destroy(task, lock_set); + } +} + +void +task_subsystem_destroy_all(task_t task) +{ + subsystem_t subsystem; + + /* + * Destroy owned subsystems + */ + + while (!queue_empty(&task->subsystem_list)) { + subsystem = (subsystem_t) queue_first(&task->subsystem_list); + subsystem_deallocate(subsystem); + } +} + +/* + * task_set_port_space: + * + * Set port name space of task to specified size. + */ + +kern_return_t +task_set_port_space( + task_t task, + int table_entries) +{ + kern_return_t kr; + + is_write_lock(task->itk_space); + kr = ipc_entry_grow_table(task->itk_space, table_entries); + if (kr == KERN_SUCCESS) + is_write_unlock(task->itk_space); + return kr; +} + +/* + * We need to export some functions to other components that + * are currently implemented in macros within the osfmk + * component. Just export them as functions of the same name. + */ +boolean_t is_kerneltask(task_t t) +{ + if (t == kernel_task) + return(TRUE); + else + return((t->kernel_loaded)); +} + +#undef current_task +task_t current_task() +{ + return (current_task_fast()); +} diff --git a/osfmk/kern/task.h b/osfmk/kern/task.h new file mode 100644 index 000000000..36d0fe040 --- /dev/null +++ b/osfmk/kern/task.h @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: task.h + * Author: Avadis Tevanian, Jr. + * + * This file contains the structure definitions for tasks. + * + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + */ + +#ifndef _KERN_TASK_H_ +#define _KERN_TASK_H_ + +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct task { + /* Synchronization/destruction information */ + decl_mutex_data(,lock) /* Task's lock */ + int ref_count; /* Number of references to me */ + boolean_t active; /* Task has not been terminated */ + boolean_t kernel_loaded; /* Created with kernel_task_create() */ + + /* Miscellaneous */ + vm_map_t map; /* Address space description */ + queue_chain_t pset_tasks; /* list of tasks assigned to pset */ + void *user_data; /* Arbitrary data settable via IPC */ + int suspend_count; /* Internal scheduling only */ + +#if TASK_SWAPPER + /* Task swapper data */ + unsigned short swap_state; /* swap state (e.g. IN/OUT) */ + unsigned short swap_flags; /* swap flags (e.g. MAKE_UNSWAPP) */ + unsigned int swap_stamp; /* when last swapped */ + unsigned long swap_rss; /* size (pages) when last swapped */ + int swap_ast_waiting; /* number of threads that have not */ + /* reached a clean point and halted */ + int swap_nswap; /* number of times this task swapped */ + queue_chain_t swapped_tasks; /* list of non-resident tasks */ +#endif /* TASK_SWAPPER */ + + /* Activations in this task */ + queue_head_t thr_acts; /* list of thread_activations */ + int thr_act_count; + int res_act_count; + int active_act_count; /* have not terminate_self yet */ + + processor_set_t processor_set; /* processor set for new threads */ +#if MACH_HOST + boolean_t may_assign; /* can assigned pset be changed? */ + boolean_t assign_active; /* waiting for may_assign */ +#endif /* MACH_HOST */ + + /* User-visible scheduling information */ + integer_t user_stop_count; /* outstanding stops */ + + integer_t importance; /* system-wide importance */ + + integer_t priority; /* base priority */ + integer_t max_priority; /* maximum priority */ + + integer_t policy; /* obsolete */ + + /* Task security token */ + security_token_t sec_token; + + /* Statistics */ + time_value_t total_user_time; /* user time for dead threads */ + time_value_t total_system_time; /* system time for dead threads */ + +#if MACH_PROF + boolean_t task_profiled; /* is task being profiled ? */ + struct prof_data *profil_buffer;/* profile struct if so */ +#endif /* MACH_PROF */ + + /* IPC structures */ + decl_mutex_data(,itk_lock_data) + struct ipc_port *itk_self; /* not a right, doesn't hold ref */ + struct ipc_port *itk_sself; /* a send right */ + struct exception_action exc_actions[EXC_TYPES_COUNT]; + /* a send right each valid element */ + struct ipc_port *itk_host; /* a send right */ + struct ipc_port *itk_bootstrap; /* a send right */ + struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX]; + /* all send rights */ + + struct ipc_space *itk_space; + + /* RPC subsystem information */ + queue_head_t subsystem_list; /* list of subsystems */ + int subsystem_count;/* number of subsystems */ + + /* Synchronizer ownership information */ + queue_head_t semaphore_list; /* list of owned semaphores */ + queue_head_t lock_set_list; /* list of owned lock sets */ + int semaphores_owned; /* number of semaphores owned */ + int lock_sets_owned; /* number of lock sets owned */ + + /* User space system call emulation support */ + struct eml_dispatch *eml_dispatch; + + /* Ledgers */ + struct ipc_port *wired_ledger_port; + struct ipc_port *paged_ledger_port; + +#if NORMA_TASK + long child_node; /* if != -1, node for new children */ +#endif /* NORMA_TASK */ +#if FAST_TAS + vm_offset_t fast_tas_base; + vm_offset_t fast_tas_end; +#endif /* FAST_TAS */ + MACHINE_TASK + integer_t faults; /* faults counter */ + integer_t pageins; /* pageins counter */ + integer_t cow_faults; /* copy on write fault counter */ + integer_t messages_sent; /* messages sent counter */ + integer_t messages_received; /* messages received counter */ + integer_t syscalls_mach; /* mach system call counter */ + integer_t syscalls_unix; /* unix system call counter */ + integer_t csw; /* context switch counter */ +#ifdef MACH_BSD + void *bsd_info; +#endif + vm_offset_t system_shared_region; +} Task; + + +#define task_lock(task) mutex_lock(&(task)->lock) +#define task_lock_try(task) mutex_try(&(task)->lock) +#define task_unlock(task) mutex_unlock(&(task)->lock) + +#define itk_lock_init(task) mutex_init(&(task)->itk_lock_data, \ + ETAP_THREAD_TASK_ITK) +#define itk_lock(task) mutex_lock(&(task)->itk_lock_data) +#define itk_unlock(task) mutex_unlock(&(task)->itk_lock_data) + +/* + * Internal only routines + */ + +/* Initialize task module */ +extern void task_init(void); + +/* task create */ +extern kern_return_t task_create_local( + task_t parent_task, + boolean_t inherit_memory, + boolean_t kernel_loaded, + task_t *child_task); /* OUT */ + +#if MACH_HOST +/* Freeze and unfreeze task from being reassigned processor sets */ +extern void task_freeze( + task_t task); + +extern void task_unfreeze( + task_t task); +#endif /* MACH_HOST */ + +extern void consider_task_collect(void); + +#define current_task_fast() (current_act_fast()->task) +#define current_task() current_task_fast() + +#else /* !MACH_KERNEL_PRIVATE */ + +extern task_t current_task(void); + +#endif /* !MACH_KERNEL_PRIVATE */ + +extern task_t kernel_task; +extern boolean_t is_kerneltask(task_t); + +/* Temporarily hold all threads in a task */ +extern kern_return_t task_hold( + task_t task); + +/* Release temporary hold on all threads in a task */ +extern kern_return_t task_release( + task_t task); + +/* Get a task prepared for major changes */ +extern kern_return_t task_halt( + task_t task); + +/* Take reference on task (make sure it doesn't go away) */ +extern void task_reference(task_t task); + +/* Try to take a reference on task, return false if it would block */ +extern boolean_t task_reference_try(task_t task); + +/* Remove reference to task */ +extern void task_deallocate(task_t task); + +/* JMM - should just be temporary (implementation in bsd_kern still) */ +extern void *get_bsdtask_info(task_t); +extern void set_bsdtask_info(task_t,void *); +extern vm_map_t get_task_map(task_t); +extern vm_map_t swap_task_map(task_t, vm_map_t); +extern pmap_t get_task_pmap(task_t); +#endif /* _KERN_TASK_H_ */ diff --git a/osfmk/kern/task_policy.c b/osfmk/kern/task_policy.c new file mode 100644 index 000000000..4b9eb5fee --- /dev/null +++ b/osfmk/kern/task_policy.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 15 October 2000 (debo) + * Created. + */ + +#include + +kern_return_t +task_policy_set( + task_t task, + task_policy_flavor_t flavor, + task_policy_t policy_info, + mach_msg_type_number_t count) +{ + if (task == TASK_NULL) + return (KERN_INVALID_ARGUMENT); + + switch (flavor) { + + case TASK_CATEGORY_POLICY: + { + task_category_policy_t info = (task_category_policy_t)policy_info; + + if (count < TASK_CATEGORY_POLICY_COUNT) + return (KERN_INVALID_ARGUMENT); + + switch (info->role) { + + case TASK_UNSPECIFIED: + case TASK_FOREGROUND_APPLICATION: + case TASK_BACKGROUND_APPLICATION: + case TASK_CONTROL_APPLICATION: + case TASK_GRAPHICS_SERVER: + return (KERN_SUCCESS); + + default: + return (KERN_INVALID_ARGUMENT); + } + + break; + } + + default: + return (KERN_INVALID_ARGUMENT); + } + + return (KERN_SUCCESS); +} + +kern_return_t +task_policy_get( + task_t task, + task_policy_flavor_t flavor, + task_policy_t policy_info, + mach_msg_type_number_t *count, + boolean_t *get_default) +{ + if (task == TASK_NULL) + return (KERN_INVALID_ARGUMENT); + + switch (flavor) { + + case TASK_CATEGORY_POLICY: + { + task_category_policy_t info = (task_category_policy_t)policy_info; + + if (*count < TASK_CATEGORY_POLICY_COUNT) + return (KERN_INVALID_ARGUMENT); + + info->role = TASK_UNSPECIFIED; + break; + } + + default: + return (KERN_INVALID_ARGUMENT); + } + + return (KERN_SUCCESS); +} diff --git a/osfmk/kern/task_swap.c b/osfmk/kern/task_swap.c new file mode 100644 index 000000000..6744ce3bc --- /dev/null +++ b/osfmk/kern/task_swap.c @@ -0,0 +1,1471 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: kern/task_swap.c + * + * Task residency management primitives implementation. + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* We use something from in here */ + +/* + * Note: if TASK_SWAPPER is disabled, then this file defines only + * a stub version of task_swappable(), so that the service can always + * be defined, even if swapping has been configured out of the kernel. + */ +#if TASK_SWAPPER + +/* temporary debug flags */ +#define TASK_SW_DEBUG 1 +#define TASK_SW_STATS 1 + +int task_swap_debug = 0; +int task_swap_stats = 0; +int task_swap_enable = 1; +int task_swap_on = 1; + +queue_head_t swapped_tasks; /* completely swapped out tasks */ +queue_head_t swapout_thread_q; /* threads to be swapped out */ +mutex_t task_swapper_lock; /* protects above queue */ + +#define task_swapper_lock() mutex_lock(&task_swapper_lock) +#define task_swapper_unlock() mutex_unlock(&task_swapper_lock) + +queue_head_t eligible_tasks; /* tasks eligible for swapout */ +mutex_t task_swapout_list_lock; /* protects above queue */ +#define task_swapout_lock() mutex_lock(&task_swapout_list_lock) +#define task_swapout_unlock() mutex_unlock(&task_swapout_list_lock) + +/* + * The next section of constants and globals are tunable parameters + * used in making swapping decisions. They may be changed dynamically + * without adversely affecting the robustness of the system; however, + * the policy will change, one way or the other. + */ + +#define SHORT_AVG_INTERVAL 5 /* in seconds */ +#define LONG_AVG_INTERVAL 30 /* in seconds */ +#define AVE_SCALE 1024 + +unsigned int short_avg_interval = SHORT_AVG_INTERVAL; +unsigned int long_avg_interval = LONG_AVG_INTERVAL; + +#ifndef MIN_SWAP_PAGEOUT_RATE +#define MIN_SWAP_PAGEOUT_RATE 10 +#endif + +/* + * The following are all stored in fixed-point representation (the actual + * value times AVE_SCALE), to allow more accurate computing of decaying + * averages. So all variables that end with "avg" must be divided by + * AVE_SCALE to convert them or compare them to ints. + */ +unsigned int vm_grab_rate_avg; +unsigned int vm_pageout_rate_avg = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE; +unsigned int vm_pageout_rate_longavg = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE; +unsigned int vm_pageout_rate_peakavg = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE; +unsigned int vm_page_free_avg; /* average free pages over short_avg_interval */ +unsigned int vm_page_free_longavg; /* avg free pages over long_avg_interval */ + +/* + * Trigger task swapping when paging activity reaches + * SWAP_HIGH_WATER_MARK per cent of the maximum paging activity ever observed. + * Turn off task swapping when paging activity goes back down to below + * SWAP_PAGEOUT_LOW_WATER_MARK per cent of the maximum. + * These numbers have been found empirically and might need some tuning... + */ +#ifndef SWAP_PAGEOUT_HIGH_WATER_MARK +#define SWAP_PAGEOUT_HIGH_WATER_MARK 30 +#endif +#ifndef SWAP_PAGEOUT_LOW_WATER_MARK +#define SWAP_PAGEOUT_LOW_WATER_MARK 10 +#endif + +#ifndef MAX_GRAB_RATE +#define MAX_GRAB_RATE ((unsigned int) -1) /* XXX no maximum */ +#endif + +/* + * swap_{start,stop}_pageout_rate start at the minimum value, then increase + * to adjust to the hardware's performance, following the paging rate peaks. + */ +unsigned int swap_pageout_high_water_mark = SWAP_PAGEOUT_HIGH_WATER_MARK; +unsigned int swap_pageout_low_water_mark = SWAP_PAGEOUT_LOW_WATER_MARK; +unsigned int swap_start_pageout_rate = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE * + SWAP_PAGEOUT_HIGH_WATER_MARK / 100; +unsigned int swap_stop_pageout_rate = MIN_SWAP_PAGEOUT_RATE * AVE_SCALE * + SWAP_PAGEOUT_LOW_WATER_MARK / 100; +#if TASK_SW_DEBUG +unsigned int fixed_swap_start_pageout_rate = 0; /* only for testing purpose */ +unsigned int fixed_swap_stop_pageout_rate = 0; /* only for testing purpose */ +#endif /* TASK_SW_DEBUG */ +unsigned int max_grab_rate = MAX_GRAB_RATE; + +#ifndef MIN_SWAP_TIME +#define MIN_SWAP_TIME 1 +#endif + +int min_swap_time = MIN_SWAP_TIME; /* in seconds */ + +#ifndef MIN_RES_TIME +#define MIN_RES_TIME 6 +#endif + +int min_res_time = MIN_RES_TIME; /* in seconds */ + +#ifndef MIN_ACTIVE_TASKS +#define MIN_ACTIVE_TASKS 4 +#endif + +int min_active_tasks = MIN_ACTIVE_TASKS; + +#ifndef TASK_SWAP_CYCLE_TIME +#define TASK_SWAP_CYCLE_TIME 2 +#endif + +int task_swap_cycle_time = TASK_SWAP_CYCLE_TIME; /* in seconds */ + +int last_task_swap_cycle = 0; + +/* temporary statistics */ +int task_swapouts = 0; +int task_swapins = 0; +int task_swaprss_out = 0; /* total rss at swapout time */ +int task_swaprss_in = 0; /* total rss at swapin time */ +int task_swap_total_time = 0; /* total time spent swapped out */ +int tasks_swapped_out = 0; /* number of tasks swapped out now */ + +#ifdef TASK_SW_STATS +#define TASK_STATS_INCR(cnt) (cnt)++ +#else +#define TASK_STATS_INCR(cnt) +#endif /* TASK_SW_STATS */ + +#if TASK_SW_DEBUG +boolean_t on_swapped_list(task_t task); /* forward */ +/* + * Debug function to determine if a task is already on the + * swapped out tasks list. It also checks for tasks on the list + * that are in an illegal state (i.e. swapped in). + */ +boolean_t +on_swapped_list(task_t task) +{ + task_t ltask; + /* task_swapper_lock is locked. */ + + if (queue_empty(&swapped_tasks)) { + return(FALSE); + } + ltask = (task_t)queue_first(&swapped_tasks); + while (!queue_end(&swapped_tasks, (queue_entry_t)ltask)) { + /* check for illegal state */ + if (ltask->swap_state == TASK_SW_IN) { + printf("on_swapped_list and in: 0x%X\n",ltask); + Debugger(""); + } + if (ltask == task) + return(TRUE); + ltask = (task_t)queue_next(<ask->swapped_tasks); + } + return(FALSE); +} +#endif /* TASK_SW_DEBUG */ + +/* + * task_swapper_init: [exported] + */ +void +task_swapper_init() +{ + queue_init(&swapped_tasks); + queue_init(&eligible_tasks); + queue_init(&swapout_thread_q); + mutex_init(&task_swapper_lock, ETAP_THREAD_TASK_SWAP); + mutex_init(&task_swapout_list_lock, ETAP_THREAD_TASK_SWAPOUT); + vm_page_free_avg = vm_page_free_count * AVE_SCALE; + vm_page_free_longavg = vm_page_free_count * AVE_SCALE; +} + +#endif /* TASK_SWAPPER */ + +/* + * task_swappable: [exported] + * + * Make a task swappable or non-swappable. If made non-swappable, + * it will be swapped in. + * + * Locking: task_swapout_lock is taken before task lock. + */ +kern_return_t +task_swappable( + host_priv_t host_priv, + task_t task, + boolean_t make_swappable) +{ + if (host_priv == HOST_PRIV_NULL) + return(KERN_INVALID_ARGUMENT); + + if (task == TASK_NULL) + return(KERN_INVALID_ARGUMENT); + +#if !TASK_SWAPPER + + /* + * If we don't support swapping, this call is purely advisory. + */ + return(KERN_SUCCESS); + +#else /* TASK_SWAPPER */ + + task_lock(task); + if (make_swappable) { + /* make task swappable */ + if (task->swap_state == TASK_SW_UNSWAPPABLE) { + task->swap_state = TASK_SW_IN; + task_unlock(task); + task_swapout_eligible(task); + } + } else { + switch (task->swap_state) { + case TASK_SW_IN: + task->swap_state = TASK_SW_UNSWAPPABLE; + task_unlock(task); + task_swapout_ineligible(task); + break; + case TASK_SW_UNSWAPPABLE: + task_unlock(task); + break; + default: + /* + * swap_state could be TASK_SW_OUT, TASK_SW_GOING_OUT, + * or TASK_SW_COMING_IN. task_swapin handles all + * three, and its default case will catch any bad + * states. + */ + task_unlock(task); + task_swapin(task, TRUE); + break; + } + } + return(KERN_SUCCESS); + +#endif /* TASK_SWAPPER */ + +} + +#if TASK_SWAPPER + +/* + * task_swapout: + * A reference to the task must be held. + * + * Start swapping out a task by sending an AST_SWAPOUT to each thread. + * When the threads reach a clean point, they queue themselves up on the + * swapout_thread_q to be swapped out by the task_swap_swapout_thread. + * The task can be swapped in at any point in this process. + * + * A task will not be fully swapped out (i.e. its map residence count + * at zero) until all currently-swapped threads run and reach + * a clean point, at which time they will be swapped again, + * decrementing the swap_ast_waiting count on the task. + * + * Locking: no locks held upon entry and exit. + * Task_lock is held throughout this function. + */ +kern_return_t +task_swapout(task_t task) +{ + thread_act_t thr_act; + thread_t thread; + queue_head_t *list; + int s; + + task_swapout_lock(); + task_lock(task); + /* + * NOTE: look into turning these into assertions if they + * are invariants. + */ + if ((task->swap_state != TASK_SW_IN) || (!task->active)) { + task_unlock(task); + task_swapout_unlock(); + return(KERN_FAILURE); + } + if (task->swap_flags & TASK_SW_ELIGIBLE) { + queue_remove(&eligible_tasks, task, task_t, swapped_tasks); + task->swap_flags &= ~TASK_SW_ELIGIBLE; + } + task_swapout_unlock(); + + /* set state to avoid races with task_swappable(FALSE) */ + task->swap_state = TASK_SW_GOING_OUT; + task->swap_rss = pmap_resident_count(task->map->pmap); + task_swaprss_out += task->swap_rss; + task->swap_ast_waiting = task->thr_act_count; + + /* + * halt all threads in this task: + * We don't need the thread list lock for traversal. + */ + list = &task->thr_acts; + thr_act = (thread_act_t) queue_first(list); + while (!queue_end(list, (queue_entry_t) thr_act)) { + boolean_t swappable; + thread_act_t ract; + + thread = act_lock_thread(thr_act); + s = splsched(); + if (!thread) + swappable = (thr_act->swap_state != TH_SW_UNSWAPPABLE); + else { + thread_lock(thread); + swappable = TRUE; + for (ract = thread->top_act; ract; ract = ract->lower) + if (ract->swap_state == TH_SW_UNSWAPPABLE) { + swappable = FALSE; + break; + } + } + if (swappable) + thread_ast_set(thr_act, AST_SWAPOUT); + if (thread) + thread_unlock(thread); + splx(s); + assert((thr_act->ast & AST_TERMINATE) == 0); + act_unlock_thread(thr_act); + thr_act = (thread_act_t) queue_next(&thr_act->thr_acts); + } + + task->swap_stamp = sched_tick; + task->swap_nswap++; + assert((task->swap_flags&TASK_SW_WANT_IN) == 0); + /* put task on the queue of swapped out tasks */ + task_swapper_lock(); +#if TASK_SW_DEBUG + if (task_swap_debug && on_swapped_list(task)) { + printf("task 0x%X already on list\n", task); + Debugger(""); + } +#endif /* TASK_SW_DEBUG */ + queue_enter(&swapped_tasks, task, task_t, swapped_tasks); + tasks_swapped_out++; + task_swapouts++; + task_swapper_unlock(); + task_unlock(task); + + return(KERN_SUCCESS); +} + +#ifdef TASK_SW_STATS +int task_sw_race_in = 0; +int task_sw_race_coming_in = 0; +int task_sw_race_going_out = 0; +int task_sw_before_ast = 0; +int task_sw_before_swap = 0; +int task_sw_after_swap = 0; +int task_sw_race_in_won = 0; +int task_sw_unswappable = 0; +int task_sw_act_inactive = 0; +#endif /* TASK_SW_STATS */ + +/* + * thread_swapout_enqueue is called by thread_halt_self when it + * processes AST_SWAPOUT to enqueue threads to be swapped out. + * It must be called at normal interrupt priority for the + * sake of the task_swapper_lock. + * + * There can be races with task swapin here. + * First lock task and decrement swap_ast_waiting count, and if + * it's 0, we can decrement the residence count on the task's map + * and set the task's swap state to TASK_SW_OUT. + */ +void +thread_swapout_enqueue(thread_act_t thr_act) +{ + task_t task = thr_act->task; + task_lock(task); + /* + * If the swap_state is not TASK_SW_GOING_OUT, then + * task_swapin has beaten us to this operation, and + * we have nothing to do. + */ + if (task->swap_state != TASK_SW_GOING_OUT) { + task_unlock(task); + return; + } + if (--task->swap_ast_waiting == 0) { + vm_map_t map = task->map; + task->swap_state = TASK_SW_OUT; + task_unlock(task); + mutex_lock(&map->s_lock); + vm_map_res_deallocate(map); + mutex_unlock(&map->s_lock); + } else + task_unlock(task); + + task_swapper_lock(); + act_lock(thr_act); + if (! (thr_act->swap_state & TH_SW_TASK_SWAPPING)) { + /* + * We lost a race with task_swapin(): don't enqueue. + */ + } else { + queue_enter(&swapout_thread_q, thr_act, + thread_act_t, swap_queue); + thread_wakeup((event_t)&swapout_thread_q); + } + act_unlock(thr_act); + task_swapper_unlock(); +} + +/* + * task_swap_swapout_thread: [exported] + * + * Executes as a separate kernel thread. + * Its job is to swap out threads that have been halted by AST_SWAPOUT. + */ +void +task_swap_swapout_thread(void) +{ + thread_act_t thr_act; + thread_t thread, nthread; + task_t task; + int s; + + thread_swappable(current_act(), FALSE); + stack_privilege(current_thread()); + + spllo(); + + while (TRUE) { + task_swapper_lock(); + while (! queue_empty(&swapout_thread_q)) { + + queue_remove_first(&swapout_thread_q, thr_act, + thread_act_t, swap_queue); + /* + * If we're racing with task_swapin, we need + * to make it safe for it to do remque on the + * thread, so make its links point to itself. + * Allowing this ugliness is cheaper than + * making task_swapin search the entire queue. + */ + act_lock(thr_act); + queue_init((queue_t) &thr_act->swap_queue); + act_unlock(thr_act); + task_swapper_unlock(); + /* + * Wait for thread's RUN bit to be deasserted. + */ + thread = act_lock_thread(thr_act); + if (thread == THREAD_NULL) + act_unlock_thread(thr_act); + else { + boolean_t r; + + thread_reference(thread); + thread_hold(thr_act); + act_unlock_thread(thr_act); + r = thread_stop_wait(thread); + nthread = act_lock_thread(thr_act); + thread_release(thr_act); + thread_deallocate(thread); + act_unlock_thread(thr_act); + if (!r || nthread != thread) { + task_swapper_lock(); + continue; + } + } + task = thr_act->task; + task_lock(task); + /* + * we can race with swapin, which would set the + * state to TASK_SW_IN. + */ + if ((task->swap_state != TASK_SW_OUT) && + (task->swap_state != TASK_SW_GOING_OUT)) { + task_unlock(task); + task_swapper_lock(); + TASK_STATS_INCR(task_sw_race_in_won); + if (thread != THREAD_NULL) + thread_unstop(thread); + continue; + } + nthread = act_lock_thread(thr_act); + if (nthread != thread || thr_act->active == FALSE) { + act_unlock_thread(thr_act); + task_unlock(task); + task_swapper_lock(); + TASK_STATS_INCR(task_sw_act_inactive); + if (thread != THREAD_NULL) + thread_unstop(thread); + continue; + } + s = splsched(); + if (thread != THREAD_NULL) + thread_lock(thread); + /* + * Thread cannot have been swapped out yet because + * TH_SW_TASK_SWAPPING was set in AST. If task_swapin + * beat us here, we either wouldn't have found it on + * the queue, or the task->swap_state would have + * changed. The synchronization is on the + * task's swap_state and the task_lock. + * The thread can't be swapped in any other way + * because its task has been swapped. + */ + assert(thr_act->swap_state & TH_SW_TASK_SWAPPING); + assert(thread == THREAD_NULL || + !(thread->state & (TH_SWAPPED_OUT|TH_RUN))); + assert((thr_act->swap_state & TH_SW_STATE) == TH_SW_IN); + /* assert(thread->state & TH_HALTED); */ + /* this also clears TH_SW_TASK_SWAPPING flag */ + thr_act->swap_state = TH_SW_GOING_OUT; + if (thread != THREAD_NULL) { + if (thread->top_act == thr_act) { + thread->state |= TH_SWAPPED_OUT; + /* + * Once we unlock the task, things can happen + * to the thread, so make sure it's consistent + * for thread_swapout. + */ + } + thread->ref_count++; + thread_unlock(thread); + thread_unstop(thread); + } + splx(s); + act_locked_act_reference(thr_act); + act_unlock_thread(thr_act); + task_unlock(task); + + thread_swapout(thr_act); /* do the work */ + + if (thread != THREAD_NULL) + thread_deallocate(thread); + act_deallocate(thr_act); + task_swapper_lock(); + } + assert_wait((event_t)&swapout_thread_q, THREAD_UNINT); + task_swapper_unlock(); + thread_block((void (*)(void)) 0); + } +} + +/* + * task_swapin: + * + * Make a task resident. + * Performs all of the work to make a task resident and possibly + * non-swappable. If we race with a competing task_swapin call, + * we wait for its completion, then return. + * + * Locking: no locks held upon entry and exit. + * + * Note that TASK_SW_MAKE_UNSWAPPABLE can only be set when the + * state is TASK_SW_COMING_IN. + */ + +kern_return_t +task_swapin(task_t task, boolean_t make_unswappable) +{ + register queue_head_t *list; + register thread_act_t thr_act, next; + thread_t thread; + int s; + boolean_t swappable = TRUE; + + task_lock(task); + switch (task->swap_state) { + case TASK_SW_OUT: + { + vm_map_t map = task->map; + /* + * Task has made it all the way out, which means + * that vm_map_res_deallocate has been done; set + * state to TASK_SW_COMING_IN, then bring map + * back in. We could actually be racing with + * the thread_swapout_enqueue, which does the + * vm_map_res_deallocate, but that race is covered. + */ + task->swap_state = TASK_SW_COMING_IN; + assert(task->swap_ast_waiting == 0); + assert(map->res_count >= 0); + task_unlock(task); + mutex_lock(&map->s_lock); + vm_map_res_reference(map); + mutex_unlock(&map->s_lock); + task_lock(task); + assert(task->swap_state == TASK_SW_COMING_IN); + } + break; + + case TASK_SW_GOING_OUT: + /* + * Task isn't all the way out yet. There is + * still at least one thread not swapped, and + * vm_map_res_deallocate has not been done. + */ + task->swap_state = TASK_SW_COMING_IN; + assert(task->swap_ast_waiting > 0 || + (task->swap_ast_waiting == 0 && + task->thr_act_count == 0)); + assert(task->map->res_count > 0); + TASK_STATS_INCR(task_sw_race_going_out); + break; + case TASK_SW_IN: + assert(task->map->res_count > 0); +#if TASK_SW_DEBUG + task_swapper_lock(); + if (task_swap_debug && on_swapped_list(task)) { + printf("task 0x%X on list, state is SW_IN\n", + task); + Debugger(""); + } + task_swapper_unlock(); +#endif /* TASK_SW_DEBUG */ + TASK_STATS_INCR(task_sw_race_in); + if (make_unswappable) { + task->swap_state = TASK_SW_UNSWAPPABLE; + task_unlock(task); + task_swapout_ineligible(task); + } else + task_unlock(task); + return(KERN_SUCCESS); + case TASK_SW_COMING_IN: + /* + * Raced with another task_swapin and lost; + * wait for other one to complete first + */ + assert(task->map->res_count >= 0); + /* + * set MAKE_UNSWAPPABLE so that whoever is swapping + * the task in will make it unswappable, and return + */ + if (make_unswappable) + task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE; + task->swap_flags |= TASK_SW_WANT_IN; + assert_wait((event_t)&task->swap_state, THREAD_UNINT); + task_unlock(task); + thread_block((void (*)(void)) 0); + TASK_STATS_INCR(task_sw_race_coming_in); + return(KERN_SUCCESS); + case TASK_SW_UNSWAPPABLE: + /* + * This can happen, since task_terminate + * unconditionally calls task_swapin. + */ + task_unlock(task); + return(KERN_SUCCESS); + default: + panic("task_swapin bad state"); + break; + } + if (make_unswappable) + task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE; + assert(task->swap_state == TASK_SW_COMING_IN); + task_swapper_lock(); +#if TASK_SW_DEBUG + if (task_swap_debug && !on_swapped_list(task)) { + printf("task 0x%X not on list\n", task); + Debugger(""); + } +#endif /* TASK_SW_DEBUG */ + queue_remove(&swapped_tasks, task, task_t, swapped_tasks); + tasks_swapped_out--; + task_swapins++; + task_swapper_unlock(); + + /* + * Iterate through all threads for this task and + * release them, as required. They may not have been swapped + * out yet. The task remains locked throughout. + */ + list = &task->thr_acts; + thr_act = (thread_act_t) queue_first(list); + while (!queue_end(list, (queue_entry_t) thr_act)) { + boolean_t need_to_release; + next = (thread_act_t) queue_next(&thr_act->thr_acts); + /* + * Keep task_swapper_lock across thread handling + * to synchronize with task_swap_swapout_thread + */ + task_swapper_lock(); + thread = act_lock_thread(thr_act); + s = splsched(); + if (thr_act->ast & AST_SWAPOUT) { + /* thread hasn't gotten the AST yet, just clear it */ + thread_ast_clear(thr_act, AST_SWAPOUT); + need_to_release = FALSE; + TASK_STATS_INCR(task_sw_before_ast); + splx(s); + act_unlock_thread(thr_act); + } else { + /* + * If AST_SWAPOUT was cleared, then thread_hold, + * or equivalent was done. + */ + need_to_release = TRUE; + /* + * Thread has hit AST, but it may not have + * been dequeued yet, so we need to check. + * NOTE: the thread may have been dequeued, but + * has not yet been swapped (the task_swapper_lock + * has been dropped, but the thread is not yet + * locked), and the TH_SW_TASK_SWAPPING flag may + * not have been cleared. In this case, we will do + * an extra remque, which the task_swap_swapout_thread + * has made safe, and clear the flag, which is also + * checked by the t_s_s_t before doing the swapout. + */ + if (thread) + thread_lock(thread); + if (thr_act->swap_state & TH_SW_TASK_SWAPPING) { + /* + * hasn't yet been dequeued for swapout, + * so clear flags and dequeue it first. + */ + thr_act->swap_state &= ~TH_SW_TASK_SWAPPING; + assert(thr_act->thread == THREAD_NULL || + !(thr_act->thread->state & + TH_SWAPPED_OUT)); + queue_remove(&swapout_thread_q, thr_act, + thread_act_t, swap_queue); + TASK_STATS_INCR(task_sw_before_swap); + } else { + TASK_STATS_INCR(task_sw_after_swap); + /* + * It's possible that the thread was + * made unswappable before hitting the + * AST, in which case it's still running. + */ + if (thr_act->swap_state == TH_SW_UNSWAPPABLE) { + need_to_release = FALSE; + TASK_STATS_INCR(task_sw_unswappable); + } + } + if (thread) + thread_unlock(thread); + splx(s); + act_unlock_thread(thr_act); + } + task_swapper_unlock(); + + /* + * thread_release will swap in the thread if it's been + * swapped out. + */ + if (need_to_release) { + act_lock_thread(thr_act); + thread_release(thr_act); + act_unlock_thread(thr_act); + } + thr_act = next; + } + + if (task->swap_flags & TASK_SW_MAKE_UNSWAPPABLE) { + task->swap_flags &= ~TASK_SW_MAKE_UNSWAPPABLE; + task->swap_state = TASK_SW_UNSWAPPABLE; + swappable = FALSE; + } else { + task->swap_state = TASK_SW_IN; + } + + task_swaprss_in += pmap_resident_count(task->map->pmap); + task_swap_total_time += sched_tick - task->swap_stamp; + /* note when task came back in */ + task->swap_stamp = sched_tick; + if (task->swap_flags & TASK_SW_WANT_IN) { + task->swap_flags &= ~TASK_SW_WANT_IN; + thread_wakeup((event_t)&task->swap_state); + } + assert((task->swap_flags & TASK_SW_ELIGIBLE) == 0); + task_unlock(task); +#if TASK_SW_DEBUG + task_swapper_lock(); + if (task_swap_debug && on_swapped_list(task)) { + printf("task 0x%X on list at end of swap in\n", task); + Debugger(""); + } + task_swapper_unlock(); +#endif /* TASK_SW_DEBUG */ + /* + * Make the task eligible to be swapped again + */ + if (swappable) + task_swapout_eligible(task); + return(KERN_SUCCESS); +} + +void wake_task_swapper(boolean_t now); /* forward */ + +/* + * wake_task_swapper: [exported] + * + * Wakes up task swapper if now == TRUE or if at least + * task_swap_cycle_time has elapsed since the last call. + * + * NOTE: this function is not multithreaded, so if there is + * more than one caller, it must be modified. + */ +void +wake_task_swapper(boolean_t now) +{ + /* last_task_swap_cycle may require locking */ + if (now || + (sched_tick > (last_task_swap_cycle + task_swap_cycle_time))) { + last_task_swap_cycle = sched_tick; + if (task_swap_debug) + printf("wake_task_swapper: waking swapper\n"); + thread_wakeup((event_t)&swapped_tasks); /* poke swapper */ + } +} + +task_t pick_intask(void); /* forward */ +/* + * pick_intask: + * returns a task to be swapped in, or TASK_NULL if nothing suitable is found. + * + * current algorithm: Return the task that has been swapped out the + * longest, as long as it is > min_swap_time. It will be dequeued + * if actually swapped in. + * + * NOTE:********************************************** + * task->swap_rss (the size when the task was swapped out) could be used to + * further refine the selection. Another possibility would be to look at + * the state of the thread(s) to see if the task/threads would run if they + * were swapped in. + * *************************************************** + * + * Locking: no locks held upon entry and exit. + */ +task_t +pick_intask(void) +{ + register task_t task = TASK_NULL; + + task_swapper_lock(); + /* the oldest task is the first one */ + if (!queue_empty(&swapped_tasks)) { + task = (task_t) queue_first(&swapped_tasks); + assert(task != TASK_NULL); + /* Make sure it's been out min_swap_time */ + if ((sched_tick - task->swap_stamp) < min_swap_time) + task = TASK_NULL; + } + task_swapper_unlock(); + return(task); +#if 0 + /* + * This code looks at the entire list of swapped tasks, but since + * it does not yet do anything but look at time swapped, we + * can simply use the fact that the queue is ordered, and take + * the first one off the queue. + */ + task = (task_t)queue_first(&swapped_tasks); + while (!queue_end(&swapped_tasks, (queue_entry_t)task)) { + task_lock(task); + tmp_time = sched_tick - task->swap_stamp; + if (tmp_time > min_swap_time && tmp_time > time_swapped) { + target_task = task; + time_swapped = tmp_time; + } + task_unlock(task); + task = (task_t)queue_next(&task->swapped_tasks); + } + task_swapper_unlock(); + return(target_task); +#endif +} + +task_t pick_outtask(void); /* forward */ +/* + * pick_outtask: + * returns a task to be swapped out, with a reference on the task, + * or NULL if no suitable task is found. + * + * current algorithm: + * + * Examine all eligible tasks. While looking, use the first thread in + * each task as an indication of the task's activity. Count up + * "active" threads (those either runnable or sleeping). If the task + * is active (by these criteria), swapped in, and resident + * for at least min_res_time, then select the task with the largest + * number of pages in memory. If there are less + * than min_active_tasks active tasks in the system, then don't + * swap anything out (this avoids swapping out the only running task + * in the system, for example). + * + * NOTE: the task selected will not be removed from the eligible list. + * This means that it will be selected again if it is not swapped + * out, where it is removed from the list. + * + * Locking: no locks held upon entry and exit. Task_swapout_lock must be + * taken before task locks. + * + * *************************************************** + * TBD: + * This algorithm only examines the first thread in the task. Currently, since + * most swappable tasks in the system are single-threaded, this generalization + * works reasonably well. However, the algorithm should be changed + * to consider all threads in the task if more multi-threaded tasks were used. + * *************************************************** + */ + +#ifdef TASK_SW_STATS +int inactive_task_count = 0; +int empty_task_count = 0; +#endif /* TASK_SW_STATS */ + +task_t +pick_outtask(void) +{ + register task_t task; + register task_t target_task = TASK_NULL; + unsigned long task_rss; + unsigned long target_rss = 0; + boolean_t wired; + boolean_t active; + int nactive = 0; + + task_swapout_lock(); + if (queue_empty(&eligible_tasks)) { + /* not likely to happen */ + task_swapout_unlock(); + return(TASK_NULL); + } + task = (task_t)queue_first(&eligible_tasks); + while (!queue_end(&eligible_tasks, (queue_entry_t)task)) { + int s; + register thread_act_t thr_act; + thread_t th; + + + task_lock(task); + /* + * Don't swap real-time tasks. + * XXX Should we enforce that or can we let really critical + * tasks use task_swappable() to make sure they never end up + * n the eligible list ? + */ + if (task->policy & POLICYCLASS_FIXEDPRI) { + goto tryagain; + } + if (!task->active) { + TASK_STATS_INCR(inactive_task_count); + goto tryagain; + } + if (task->res_act_count == 0) { + TASK_STATS_INCR(empty_task_count); + goto tryagain; + } + assert(!queue_empty(&task->thr_acts)); + thr_act = (thread_act_t)queue_first(&task->thr_acts); + active = FALSE; + th = act_lock_thread(thr_act); + s = splsched(); + if (th != THREAD_NULL) + thread_lock(th); + if ((th == THREAD_NULL) || + (th->state == TH_RUN) || + (th->state & TH_WAIT)) { + /* + * thread is "active": either runnable + * or sleeping. Count it and examine + * it further below. + */ + nactive++; + active = TRUE; + } + if (th != THREAD_NULL) + thread_unlock(th); + splx(s); + act_unlock_thread(thr_act); + if (active && + (task->swap_state == TASK_SW_IN) && + ((sched_tick - task->swap_stamp) > min_res_time)) { + long rescount = pmap_resident_count(task->map->pmap); + /* + * thread must be "active", task must be swapped + * in and resident for at least min_res_time + */ +#if 0 +/* DEBUG Test round-robin strategy. Picking biggest task could cause extreme + * unfairness to such large interactive programs as xterm. Instead, pick the + * first task that has any pages resident: + */ + if (rescount > 1) { + task->ref_count++; + target_task = task; + task_unlock(task); + task_swapout_unlock(); + return(target_task); + } +#else + if (rescount > target_rss) { + /* + * task is not swapped, and it has the + * largest rss seen so far. + */ + task->ref_count++; + target_rss = rescount; + assert(target_task != task); + if (target_task != TASK_NULL) + task_deallocate(target_task); + target_task = task; + } +#endif + } +tryagain: + task_unlock(task); + task = (task_t)queue_next(&task->swapped_tasks); + } + task_swapout_unlock(); + /* only swap out if there are at least min_active_tasks */ + if (nactive < min_active_tasks) { + if (target_task != TASK_NULL) { + task_deallocate(target_task); + target_task = TASK_NULL; + } + } + return(target_task); +} + +#if TASK_SW_DEBUG +void print_pid(task_t task, unsigned long n1, unsigned long n2, + const char *comp, const char *inout); /* forward */ +void +print_pid( + task_t task, + unsigned long n1, + unsigned long n2, + const char *comp, + const char *inout) +{ + long rescount; + task_lock(task); + rescount = pmap_resident_count(task->map->pmap); + task_unlock(task); + printf("task_swapper: swapped %s task %x; %d %s %d; res=%d\n", + inout, task, n1, comp, n2, rescount); +} +#endif + +/* + * task_swapper: [exported] + * + * Executes as a separate kernel thread. + */ +#define MAX_LOOP 3 +void +task_swapper(void) +{ + task_t outtask, intask; + int timeout; + int loopcnt = 0; + boolean_t start_swapping; + boolean_t stop_swapping; + int local_page_free_avg; + extern int hz; + + thread_swappable(current_act(), FALSE); + stack_privilege(current_thread()); + + spllo(); + + for (;;) { + local_page_free_avg = vm_page_free_avg; + while (TRUE) { +#if 0 + if (task_swap_debug) + printf("task_swapper: top of loop; cnt = %d\n",loopcnt); +#endif + intask = pick_intask(); + + start_swapping = ((vm_pageout_rate_avg > swap_start_pageout_rate) || + (vm_grab_rate_avg > max_grab_rate)); + stop_swapping = (vm_pageout_rate_avg < swap_stop_pageout_rate); + + /* + * If a lot of paging is going on, or another task should come + * in but memory is tight, find something to swap out and start + * it. Don't swap any task out if task swapping is disabled. + * vm_page_queue_free_lock protects the vm globals. + */ + outtask = TASK_NULL; + if (start_swapping || + (!stop_swapping && intask && + ((local_page_free_avg / AVE_SCALE) < vm_page_free_target)) + ) { + if (task_swap_enable && + (outtask = pick_outtask()) && + (task_swapout(outtask) == KERN_SUCCESS)) { + unsigned long rss; +#if TASK_SW_DEBUG + if (task_swap_debug) + print_pid(outtask, local_page_free_avg / AVE_SCALE, + vm_page_free_target, "<", + "out"); +#endif + rss = outtask->swap_rss; + if (outtask->swap_nswap == 1) + rss /= 2; /* divide by 2 if never out */ + local_page_free_avg += (rss/short_avg_interval) * AVE_SCALE; + } + if (outtask != TASK_NULL) + task_deallocate(outtask); + } + + /* + * If there is an eligible task to bring in and there are at + * least vm_page_free_target free pages, swap it in. If task + * swapping has been disabled, bring the task in anyway. + */ + if (intask && ((local_page_free_avg / AVE_SCALE) >= + vm_page_free_target || + stop_swapping || !task_swap_enable)) { + if (task_swapin(intask, FALSE) == KERN_SUCCESS) { + unsigned long rss; +#if TASK_SW_DEBUG + if (task_swap_debug) + print_pid(intask, local_page_free_avg / AVE_SCALE, + vm_page_free_target, ">=", + "in"); +#endif + rss = intask->swap_rss; + if (intask->swap_nswap == 1) + rss /= 2; /* divide by 2 if never out */ + local_page_free_avg -= (rss/short_avg_interval) * AVE_SCALE; + } + } + /* + * XXX + * Here we have to decide whether to continue swapping + * in and/or out before sleeping. The decision should + * be made based on the previous action (swapin/out) and + * current system parameters, such as paging rates and + * demand. + * The function, compute_vm_averages, which does these + * calculations, depends on being called every second, + * so we can't just do the same thing. + */ + if (++loopcnt < MAX_LOOP) + continue; + + /* + * Arrange to be awakened if paging is still heavy or there are + * any tasks partially or completely swapped out. (Otherwise, + * the wakeup will come from the external trigger(s).) + */ + timeout = 0; + if (start_swapping) + timeout = task_swap_cycle_time; + else { + task_swapper_lock(); + if (!queue_empty(&swapped_tasks)) + timeout = min_swap_time; + task_swapper_unlock(); + } + assert_wait((event_t)&swapped_tasks, THREAD_UNINT); + if (timeout) { + if (task_swap_debug) + printf("task_swapper: set timeout of %d\n", + timeout); + thread_set_timeout(timeout, NSEC_PER_SEC); + } + if (task_swap_debug) + printf("task_swapper: blocking\n"); + thread_block((void (*)(void)) 0); + if (timeout) { + thread_cancel_timeout(current_thread()); + } + /* reset locals */ + loopcnt = 0; + local_page_free_avg = vm_page_free_avg; + } + } +} + +/* from BSD */ +#define ave(smooth, cnt, time) \ + smooth = ((time - 1) * (smooth) + ((cnt) * AVE_SCALE)) / (time) + +/* + * We estimate the system paging load in more than one metric: + * 1) the total number of calls into the function, vm_page_grab, + * which allocates all page frames for real pages. + * 2) the total number of pages paged in and out of paging files. + * This is a measure of page cleaning and faulting from backing + * store. + * + * When either metric passes a threshold, tasks are swapped out. + */ +long last_grab_count = 0; +long last_pageout_count = 0; + +/* + * compute_vm_averages: [exported] + * + * This function is to be called once a second to calculate average paging + * demand and average numbers of free pages for use by the task swapper. + * Can also be used to wake up task swapper at desired thresholds. + * + * NOTE: this function is single-threaded, and requires locking if + * ever there are multiple callers. + */ +void +compute_vm_averages(void) +{ + extern unsigned long vm_page_grab_count; + long grab_count, pageout_count; + int i; + + ave(vm_page_free_avg, vm_page_free_count, short_avg_interval); + ave(vm_page_free_longavg, vm_page_free_count, long_avg_interval); + + /* + * NOTE: the vm_page_grab_count and vm_stat structure are + * under control of vm_page_queue_free_lock. We're simply reading + * memory here, and the numbers don't depend on each other, so + * no lock is taken. + */ + + grab_count = vm_page_grab_count; + pageout_count = 0; + for (i = 0; i < NCPUS; i++) { + pageout_count += vm_stat[i].pageouts; + } + + ave(vm_pageout_rate_avg, pageout_count - last_pageout_count, + short_avg_interval); + ave(vm_pageout_rate_longavg, pageout_count - last_pageout_count, + long_avg_interval); + ave(vm_grab_rate_avg, grab_count - last_grab_count, + short_avg_interval); + last_grab_count = grab_count; + last_pageout_count = pageout_count; + + /* + * Adjust swap_{start,stop}_pageout_rate to the paging rate peak. + * This is an attempt to find the optimum paging rates at which + * to trigger task swapping on or off to regulate paging activity, + * depending on the hardware capacity. + */ + if (vm_pageout_rate_avg > vm_pageout_rate_peakavg) { + unsigned int desired_max; + + vm_pageout_rate_peakavg = vm_pageout_rate_avg; + swap_start_pageout_rate = + vm_pageout_rate_peakavg * swap_pageout_high_water_mark / 100; + swap_stop_pageout_rate = + vm_pageout_rate_peakavg * swap_pageout_low_water_mark / 100; + } + +#if TASK_SW_DEBUG + /* + * For measurements, allow fixed values. + */ + if (fixed_swap_start_pageout_rate) + swap_start_pageout_rate = fixed_swap_start_pageout_rate; + if (fixed_swap_stop_pageout_rate) + swap_stop_pageout_rate = fixed_swap_stop_pageout_rate; +#endif /* TASK_SW_DEBUG */ + +#if TASK_SW_DEBUG + if (task_swap_stats) + printf("vm_avgs: pageout_rate: %d %d (on/off: %d/%d); page_free: %d %d (tgt: %d)\n", + vm_pageout_rate_avg / AVE_SCALE, + vm_pageout_rate_longavg / AVE_SCALE, + swap_start_pageout_rate / AVE_SCALE, + swap_stop_pageout_rate / AVE_SCALE, + vm_page_free_avg / AVE_SCALE, + vm_page_free_longavg / AVE_SCALE, + vm_page_free_target); +#endif /* TASK_SW_DEBUG */ + + if (vm_page_free_avg / AVE_SCALE <= vm_page_free_target) { + if (task_swap_on) { + /* The following is a delicate attempt to balance the + * need for reasonably rapid response to system + * thrashing, with the equally important desire to + * prevent the onset of swapping simply because of a + * short burst of paging activity. + */ + if ((vm_pageout_rate_longavg > swap_stop_pageout_rate) && + (vm_pageout_rate_avg > swap_start_pageout_rate) || + (vm_pageout_rate_avg > vm_pageout_rate_peakavg) || + (vm_grab_rate_avg > max_grab_rate)) + wake_task_swapper(FALSE); + } + } else /* page demand is low; should consider swapin */ { + if (tasks_swapped_out != 0) + wake_task_swapper(TRUE); + } +} + +void +task_swapout_eligible(task_t task) +{ +#if TASK_SW_DEBUG + task_swapper_lock(); + if (task_swap_debug && on_swapped_list(task)) { + printf("swapout_eligible: task 0x%X on swapped list\n", task); + Debugger(""); + } + task_swapper_unlock(); +#endif + task_swapout_lock(); + task_lock(task); +#if TASK_SW_DEBUG + if (task->swap_flags & TASK_SW_ELIGIBLE) { + printf("swapout_eligible: task 0x%X already eligible\n", task); + } +#endif /* TASK_SW_DEBUG */ + if ((task->swap_state == TASK_SW_IN) && + ((task->swap_flags & TASK_SW_ELIGIBLE) == 0)) { + queue_enter(&eligible_tasks,task,task_t,swapped_tasks); + task->swap_flags |= TASK_SW_ELIGIBLE; + } + task_unlock(task); + task_swapout_unlock(); +} + +void +task_swapout_ineligible(task_t task) +{ +#if TASK_SW_DEBUG + task_swapper_lock(); + if (task_swap_debug && on_swapped_list(task)) { + printf("swapout_ineligible: task 0x%X on swapped list\n", task); + Debugger(""); + } + task_swapper_unlock(); +#endif + task_swapout_lock(); + task_lock(task); +#if TASK_SW_DEBUG + if (!(task->swap_flags & TASK_SW_ELIGIBLE)) + printf("swapout_ineligible: task 0x%X already inel.\n", task); +#endif /* TASK_SW_DEBUG */ + if ((task->swap_state != TASK_SW_IN) && + (task->swap_flags & TASK_SW_ELIGIBLE)) { + queue_remove(&eligible_tasks, task, task_t, swapped_tasks); + task->swap_flags &= ~TASK_SW_ELIGIBLE; + } + task_unlock(task); + task_swapout_unlock(); +} + +int task_swap_ast_aborted = 0; + +/* + * Process an AST_SWAPOUT. + */ +void +swapout_ast() +{ + spl_t s; + thread_act_t act; + thread_t thread; + + act = current_act(); + + /* + * Task is being swapped out. First mark it as suspended + * and halted, then call thread_swapout_enqueue to put + * the thread on the queue for task_swap_swapout_threads + * to swap out the thread. + */ + /* + * Don't swap unswappable threads + */ + thread = act_lock_thread(act); + s = splsched(); + if (thread) + thread_lock(thread); + if ((act->ast & AST_SWAPOUT) == 0) { + /* + * Race with task_swapin. Abort swapout. + */ + task_swap_ast_aborted++; /* not locked XXX */ + if (thread) + thread_unlock(thread); + splx(s); + act_unlock_thread(act); + } else if (act->swap_state == TH_SW_IN) { + /* + * Mark swap_state as TH_SW_TASK_SWAPPING to avoid + * race with thread swapper, which will only + * swap thread if swap_state is TH_SW_IN. + * This way, the thread can only be swapped by + * the task swapping mechanism. + */ + act->swap_state |= TH_SW_TASK_SWAPPING; + /* assert(act->suspend_count == 0); XXX ? */ + if (thread) + thread_unlock(thread); + if (act->suspend_count++ == 0) /* inline thread_hold */ + install_special_handler(act); + /* self->state |= TH_HALTED; */ + thread_ast_clear(act, AST_SWAPOUT); + /* + * Initialize the swap_queue fields to allow an extra + * queue_remove() in task_swapin if we lose the race + * (task_swapin can be called before we complete + * thread_swapout_enqueue). + */ + queue_init((queue_t) &act->swap_queue); + splx(s); + act_unlock_thread(act); + /* this must be called at normal interrupt level */ + thread_swapout_enqueue(act); + } else { + /* thread isn't swappable; continue running */ + assert(act->swap_state == TH_SW_UNSWAPPABLE); + if (thread) + thread_unlock(thread); + thread_ast_clear(act, AST_SWAPOUT); + splx(s); + act_unlock_thread(act); + } +} + +#endif /* TASK_SWAPPER */ diff --git a/osfmk/kern/task_swap.h b/osfmk/kern/task_swap.h new file mode 100644 index 000000000..279caefe6 --- /dev/null +++ b/osfmk/kern/task_swap.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:56 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1995/04/07 19:02:38 barbou + * Merged into mainline. + * [95/03/09 barbou] + * + * Revision 1.1.2.2 1995/02/13 15:35:45 barbou + * Merged/ported to MK6. + * + * Revision 1.1.1.3 94/08/12 15:44:39 barbou + * VM Merge - Task Swapper. + * + * Changed host_priv_t into host_t. + * [94/07/28 barbou] + * + * Revision 1.1.1.2 1994/07/28 15:33:46 barbou + * Copied from IK. + * + * Revision 3.0.3.2 1994/01/20 19:53:01 chasb + * Remove excessively restrictive copyright notice + * [1994/01/20 17:50:40 chasb] + * + * Revision 3.0.3.1 1993/12/20 21:06:49 gupta + * Expanded C O P Y R I G H T + * [1993/12/17 22:19:22 gupta] + * + * Revision 3.0 1992/12/31 22:08:24 ede + * Initial revision for OSF/1 R1.3 + * + * Revision 1.1.4.5 1992/03/16 18:02:52 gmf + * Add TASK_SW_ELIGIBLE flag to swap_flags; prototype + * task_swapout_eligible, task_swapout_ineligible. + * [1992/02/12 22:01:48 gmf] + * + * Revision 1.1.4.4 1992/01/22 22:14:13 gmf + * Change prototype for task_swappable() to use host_priv_t + * instead of host_t. + * [1992/01/17 17:48:13 gmf] + * + * Revision 1.1.4.3 1991/12/10 17:20:55 gmf + * Add extern declaration for new thread. + * Changed TASK_SW_WAIT flag to TASK_SW_WANT_IN. + * [1991/12/10 16:19:10 gmf] + * + * Revision 1.1.4.2 1991/11/21 21:48:35 mmp + * initial task swapping code + * [1991/11/21 21:01:37 mmp] + * + * $EndLog$ + */ + +/* + * File: kern/task_swap.h + * + * Task residency management primitives declarations. + */ + +#ifndef _KERN_TASK_SWAP_H_ +#define _KERN_TASK_SWAP_H_ + +#include + +/* + * swap states + */ +#define TASK_SW_UNSWAPPABLE 1 /* not swappable */ +#define TASK_SW_IN 2 /* swapped in (resident) */ +#define TASK_SW_OUT 3 /* swapped out (non-resident) */ +#define TASK_SW_COMING_IN 4 /* about to be swapped in */ +#define TASK_SW_GOING_OUT 5 /* being swapped out */ + +/* + * swap flags + */ +#define TASK_SW_MAKE_UNSWAPPABLE 0x01 /* make it unswappable */ +#define TASK_SW_WANT_IN 0x02 /* sleeping on state */ +#define TASK_SW_ELIGIBLE 0x04 /* eligible for swapping */ + +/* + * exported routines + */ +extern void task_swapper_init(void); +extern kern_return_t task_swapin( + task_t, /* task */ + boolean_t); /* make_unswappable */ +extern kern_return_t task_swapout(task_t /* task */); +extern void task_swapper(void); +extern void task_swap_swapout_thread(void); +extern void compute_vm_averages(void); +extern kern_return_t task_swappable( + host_priv_t, /* host */ + task_t, /* task */ + boolean_t); /* swappable */ +extern void task_swapout_eligible(task_t /* task */); +extern void task_swapout_ineligible(task_t /* task */); +extern void swapout_ast(void); + +#endif /* _KERN_TASK_SWAP_H_ */ diff --git a/osfmk/kern/template.mk b/osfmk/kern/template.mk new file mode 100644 index 000000000..8d40fb3bd --- /dev/null +++ b/osfmk/kern/template.mk @@ -0,0 +1,68 @@ +# +# @OSF_FREE_COPYRIGHT@ +# +# +# HISTORY +# +# Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez +# Import of Mac OS X kernel (~semeria) +# +# Revision 1.1.1.1 1998/03/07 02:25:56 wsanchez +# Import of OSF Mach kernel (~mburg) +# +# Revision 1.1.4.1 1995/02/23 17:32:24 alanl +# Taken from DIPC2_SHARED. Add -X to MIG ala norma/template.mk +# [1995/02/22 20:46:31 alanl] +# +# Revision 1.1.2.1 1994/08/04 02:26:22 mmp +# Initial revision: NORMA_TASK split out from NORMA_INTERNAL and +# moved here from norma/template.mk. +# [1994/08/03 20:29:11 mmp] +# +# $EndLog$ + +VPATH = ..:../.. + +MIGFLAGS = -MD ${IDENT} -X +MIGKSFLAGS = -DKERNEL_SERVER +MIGKUFLAGS = -DKERNEL_USER + +NORMA_TASK_FILES = \ + norma_task_server.h \ + norma_task_server.c + +NORMA_TASK_USER_FILES = \ + norma_task.h \ + norma_task_user.c + +OTHERS = ${NORMA_TASK_FILES} ${NORMA_TASK_USER_FILES} + +INCFLAGS = -I.. -I../.. +MDINCFLAGS = -I.. -I../.. + +DEPENDENCIES = + +.include <${RULES_MK}> + +.ORDER: ${NORMA_TASK_FILES} + +${NORMA_TASK_FILES}: kern/norma_task.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader norma_task_server.h \ + -server norma_task_server.c \ + ${kern/norma_task.defs:P} + +.ORDER: ${NORMA_TASK_USER_FILES} + +${NORMA_TASK_USER_FILES}: kern/norma_task.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKUFLAGS} \ + -header norma_task.h \ + -user norma_task_user.c \ + -server /dev/null \ + ${kern/norma_task.defs:P} + +.if exists(depend.mk) +.include "depend.mk" +.endif diff --git a/osfmk/kern/thread.c b/osfmk/kern/thread.c new file mode 100644 index 000000000..8d4523b15 --- /dev/null +++ b/osfmk/kern/thread.c @@ -0,0 +1,1999 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/thread.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub + * Date: 1986 + * + * Thread/thread_shuttle management primitives implementation. + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /*** ??? fix so this can be removed ***/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for MACHINE_STACK */ +#include +#include +#include + +/* + * Exported interfaces + */ + +#include +#include + +/* + * Per-Cpu stashed global state + */ +vm_offset_t active_stacks[NCPUS]; /* per-cpu active stacks */ +vm_offset_t kernel_stack[NCPUS]; /* top of active stacks */ +thread_act_t active_kloaded[NCPUS]; /* + act if kernel loaded */ + +struct zone *thread_shuttle_zone; + +queue_head_t reaper_queue; +decl_simple_lock_data(,reaper_lock) +thread_call_t thread_reaper_call; + +extern int tick; + +extern void pcb_module_init(void); + +/* private */ +static struct thread_shuttle thr_sh_template; + +#if MACH_DEBUG +#if STACK_USAGE +static void stack_init(vm_offset_t stack, unsigned int bytes); +void stack_finalize(vm_offset_t stack); +vm_size_t stack_usage(vm_offset_t stack); +#else /*STACK_USAGE*/ +#define stack_init(stack, size) +#define stack_finalize(stack) +#define stack_usage(stack) (vm_size_t)0 +#endif /*STACK_USAGE*/ + +#ifdef MACHINE_STACK +extern +#endif + void stack_statistics( + unsigned int *totalp, + vm_size_t *maxusagep); + +#define STACK_MARKER 0xdeadbeef +#if STACK_USAGE +boolean_t stack_check_usage = TRUE; +#else /* STACK_USAGE */ +boolean_t stack_check_usage = FALSE; +#endif /* STACK_USAGE */ +decl_simple_lock_data(,stack_usage_lock) +vm_size_t stack_max_usage = 0; +vm_size_t stack_max_use = KERNEL_STACK_SIZE - 64; +#endif /* MACH_DEBUG */ + +/* Forwards */ +void thread_collect_scan(void); + +kern_return_t thread_create_shuttle( + thread_act_t thr_act, + integer_t priority, + void (*start)(void), + thread_t *new_thread); + +extern void Load_context( + thread_t thread); + + +/* + * Machine-dependent code must define: + * thread_machine_init + * thread_machine_terminate + * thread_machine_collect + * + * The thread->pcb field is reserved for machine-dependent code. + */ + +#ifdef MACHINE_STACK +/* + * Machine-dependent code must define: + * stack_alloc_try + * stack_alloc + * stack_free + * stack_collect + * and if MACH_DEBUG: + * stack_statistics + */ +#else /* MACHINE_STACK */ +/* + * We allocate stacks from generic kernel VM. + * Machine-dependent code must define: + * machine_kernel_stack_init + * + * The stack_free_list can only be accessed at splsched, + * because stack_alloc_try/thread_invoke operate at splsched. + */ + +decl_simple_lock_data(,stack_lock_data) /* splsched only */ +#define stack_lock() simple_lock(&stack_lock_data) +#define stack_unlock() simple_unlock(&stack_lock_data) + +vm_offset_t stack_free_list; /* splsched only */ +unsigned int stack_free_max = 0; +unsigned int stack_free_count = 0; /* splsched only */ +unsigned int stack_free_limit = 1; /* patchable */ + +unsigned int stack_alloc_hits = 0; /* debugging */ +unsigned int stack_alloc_misses = 0; /* debugging */ + +unsigned int stack_alloc_total = 0; +unsigned int stack_alloc_hiwater = 0; + +/* + * The next field is at the base of the stack, + * so the low end is left unsullied. + */ + +#define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1)) + +/* + * stack_alloc: + * + * Allocate a kernel stack for an activation. + * May block. + */ +vm_offset_t +stack_alloc( + thread_t thread, + void (*start_pos)(thread_t)) +{ + vm_offset_t stack; + spl_t s; + + /* + * We first try the free list. It is probably empty, + * or stack_alloc_try would have succeeded, but possibly + * a stack was freed before the swapin thread got to us. + */ + + s = splsched(); + stack_lock(); + stack = stack_free_list; + if (stack != 0) { + stack_free_list = stack_next(stack); + stack_free_count--; + } + stack_unlock(); + splx(s); + + if (stack == 0) { + /* + * Kernel stacks should be naturally aligned, + * so that it is easy to find the starting/ending + * addresses of a stack given an address in the middle. + */ + + if (kmem_alloc_aligned(kernel_map, &stack, + round_page(KERNEL_STACK_SIZE)) != KERN_SUCCESS) + panic("stack_alloc"); + + stack_alloc_total++; + if (stack_alloc_total > stack_alloc_hiwater) + stack_alloc_hiwater = stack_alloc_total; + +#if MACH_DEBUG + stack_init(stack, round_page(KERNEL_STACK_SIZE)); +#endif /* MACH_DEBUG */ + + /* + * If using fractional pages, free the remainder(s) + */ + if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) { + vm_offset_t ptr = stack + KERNEL_STACK_SIZE; + vm_offset_t endp = stack + round_page(KERNEL_STACK_SIZE); + while (ptr < endp) { +#if MACH_DEBUG + /* + * We need to initialize just the end of the + * region. + */ + stack_init(ptr, (unsigned int) (endp - ptr)); +#endif + stack_lock(); + stack_next(stack) = stack_free_list; + stack_free_list = stack; + if (++stack_free_count > stack_free_max) + stack_free_max = stack_free_count; + stack_unlock(); + ptr += KERNEL_STACK_SIZE; + } + } + } + stack_attach(thread, stack, start_pos); + return (stack); +} + +/* + * stack_free: + * + * Free a kernel stack. + * Called at splsched. + */ + +void +stack_free( + thread_t thread) +{ + vm_offset_t stack = stack_detach(thread); + assert(stack); + if (stack != thread->stack_privilege) { + stack_lock(); + stack_next(stack) = stack_free_list; + stack_free_list = stack; + if (++stack_free_count > stack_free_max) + stack_free_max = stack_free_count; + stack_unlock(); + } +} + +/* + * stack_collect: + * + * Free excess kernel stacks. + * May block. + */ + +void +stack_collect(void) +{ + register vm_offset_t stack; + spl_t s; + + /* If using fractional pages, Cannot just call kmem_free(), + * and we're too lazy to coalesce small chunks. + */ + if (KERNEL_STACK_SIZE < round_page(KERNEL_STACK_SIZE)) + return; + + s = splsched(); + stack_lock(); + while (stack_free_count > stack_free_limit) { + stack = stack_free_list; + stack_free_list = stack_next(stack); + stack_free_count--; + stack_unlock(); + splx(s); + +#if MACH_DEBUG + stack_finalize(stack); +#endif /* MACH_DEBUG */ + kmem_free(kernel_map, stack, KERNEL_STACK_SIZE); + + s = splsched(); + stack_alloc_total--; + stack_lock(); + } + stack_unlock(); + splx(s); +} + + +#if MACH_DEBUG +/* + * stack_statistics: + * + * Return statistics on cached kernel stacks. + * *maxusagep must be initialized by the caller. + */ + +void +stack_statistics( + unsigned int *totalp, + vm_size_t *maxusagep) +{ + spl_t s; + + s = splsched(); + stack_lock(); + +#if STACK_USAGE + if (stack_check_usage) { + vm_offset_t stack; + + /* + * This is pretty expensive to do at splsched, + * but it only happens when someone makes + * a debugging call, so it should be OK. + */ + + for (stack = stack_free_list; stack != 0; + stack = stack_next(stack)) { + vm_size_t usage = stack_usage(stack); + + if (usage > *maxusagep) + *maxusagep = usage; + } + } +#endif /* STACK_USAGE */ + + *totalp = stack_free_count; + stack_unlock(); + splx(s); +} +#endif /* MACH_DEBUG */ + +#endif /* MACHINE_STACK */ + + +stack_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, + vm_size_t *alloc_size, int *collectable, int *exhaustable) +{ + *count = stack_alloc_total - stack_free_count; + *cur_size = KERNEL_STACK_SIZE * stack_alloc_total; + *max_size = KERNEL_STACK_SIZE * stack_alloc_hiwater; + *elem_size = KERNEL_STACK_SIZE; + *alloc_size = KERNEL_STACK_SIZE; + *collectable = 1; + *exhaustable = 0; +} + + +/* + * stack_privilege: + * + * stack_alloc_try on this thread must always succeed. + */ + +void +stack_privilege( + register thread_t thread) +{ + /* + * This implementation only works for the current thread. + */ + + if (thread != current_thread()) + panic("stack_privilege"); + + if (thread->stack_privilege == 0) + thread->stack_privilege = current_stack(); +} + +/* + * stack_alloc_try: + * + * Non-blocking attempt to allocate a kernel stack. + * Called at splsched with the thread locked. + */ + +boolean_t stack_alloc_try( + thread_t thread, + void (*start_pos)(thread_t)) +{ + register vm_offset_t stack; + + if ((stack = thread->stack_privilege) == (vm_offset_t)0) { + stack_lock(); + stack = stack_free_list; + if (stack != (vm_offset_t)0) { + stack_free_list = stack_next(stack); + stack_free_count--; + } + stack_unlock(); + } + + if (stack != 0) { + stack_attach(thread, stack, start_pos); + stack_alloc_hits++; + return TRUE; + } else { + stack_alloc_misses++; + return FALSE; + } +} + +natural_t min_quantum_abstime; +extern natural_t min_quantum_ms; + +void +thread_init(void) +{ + thread_shuttle_zone = zinit( + sizeof(struct thread_shuttle), + THREAD_MAX * sizeof(struct thread_shuttle), + THREAD_CHUNK * sizeof(struct thread_shuttle), + "threads"); + + /* + * Fill in a template thread_shuttle for fast initialization. + * [Fields that must be (or are typically) reset at + * time of creation are so noted.] + */ + + /* thr_sh_template.links (none) */ + thr_sh_template.runq = RUN_QUEUE_NULL; + + + /* thr_sh_template.task (later) */ + /* thr_sh_template.thread_list (later) */ + /* thr_sh_template.pset_threads (later) */ + + /* one ref for pset, one for activation */ + thr_sh_template.ref_count = 2; + + thr_sh_template.wait_event = NO_EVENT; + thr_sh_template.wait_result = KERN_SUCCESS; + thr_sh_template.wait_queue = WAIT_QUEUE_NULL; + thr_sh_template.wake_active = FALSE; + thr_sh_template.state = TH_WAIT|TH_UNINT; + thr_sh_template.interruptible = TRUE; + thr_sh_template.continuation = (void (*)(void))0; + thr_sh_template.top_act = THR_ACT_NULL; + + thr_sh_template.importance = 0; + thr_sh_template.sched_mode = 0; + + thr_sh_template.priority = 0; + thr_sh_template.sched_pri = 0; + thr_sh_template.depress_priority = -1; + thr_sh_template.max_priority = 0; + + thr_sh_template.cpu_usage = 0; + thr_sh_template.sched_usage = 0; + thr_sh_template.sched_stamp = 0; + thr_sh_template.sleep_stamp = 0; + + thr_sh_template.policy = POLICY_NULL; + thr_sh_template.sp_state = 0; + thr_sh_template.unconsumed_quantum = 0; + + thr_sh_template.vm_privilege = FALSE; + + timer_init(&(thr_sh_template.user_timer)); + timer_init(&(thr_sh_template.system_timer)); + thr_sh_template.user_timer_save.low = 0; + thr_sh_template.user_timer_save.high = 0; + thr_sh_template.system_timer_save.low = 0; + thr_sh_template.system_timer_save.high = 0; + thr_sh_template.cpu_delta = 0; + thr_sh_template.sched_delta = 0; + + thr_sh_template.active = FALSE; /* reset */ + + /* thr_sh_template.processor_set (later) */ +#if NCPUS > 1 + thr_sh_template.bound_processor = PROCESSOR_NULL; +#endif /*NCPUS > 1*/ +#if MACH_HOST + thr_sh_template.may_assign = TRUE; + thr_sh_template.assign_active = FALSE; +#endif /* MACH_HOST */ + thr_sh_template.funnel_state = 0; + +#if NCPUS > 1 + /* thr_sh_template.last_processor (later) */ +#endif /* NCPUS > 1 */ + + /* + * Initialize other data structures used in + * this module. + */ + + queue_init(&reaper_queue); + simple_lock_init(&reaper_lock, ETAP_THREAD_REAPER); + thr_sh_template.funnel_lock = THR_FUNNEL_NULL; + +#ifndef MACHINE_STACK + simple_lock_init(&stack_lock_data, ETAP_THREAD_STACK); +#endif /* MACHINE_STACK */ + +#if MACH_DEBUG + simple_lock_init(&stack_usage_lock, ETAP_THREAD_STACK_USAGE); +#endif /* MACH_DEBUG */ + +#if MACH_LDEBUG + thr_sh_template.kthread = FALSE; + thr_sh_template.mutex_count = 0; +#endif /* MACH_LDEBUG */ + + { + AbsoluteTime abstime; + + clock_interval_to_absolutetime_interval( + min_quantum_ms, 1000*NSEC_PER_USEC, &abstime); + assert(abstime.hi == 0 && abstime.lo != 0); + min_quantum_abstime = abstime.lo; + } + + /* + * Initialize any machine-dependent + * per-thread structures necessary. + */ + thread_machine_init(); +} + +void +thread_reaper_enqueue( + thread_t thread) +{ + /* + * thread lock is already held, splsched() + * not necessary here. + */ + simple_lock(&reaper_lock); + + enqueue_tail(&reaper_queue, (queue_entry_t)thread); +#if 0 /* CHECKME! */ + /* + * Since thread has been put in the reaper_queue, it must no longer + * be preempted (otherwise, it could be put back in a run queue). + */ + thread->preempt = TH_NOT_PREEMPTABLE; +#endif + + simple_unlock(&reaper_lock); + + thread_call_enter(thread_reaper_call); +} + + +/* + * Routine: thread_terminate_self + * + * This routine is called by a thread which has unwound from + * its current RPC and kernel contexts and found that it's + * root activation has been marked for extinction. This lets + * it clean up the last few things that can only be cleaned + * up in this context and then impale itself on the reaper + * queue. + * + * When the reaper gets the thread, it will deallocate the + * thread_act's reference on itself, which in turn will release + * its own reference on this thread. By doing things in that + * order, a thread_act will always have a valid thread - but the + * thread may persist beyond having a thread_act (but must never + * run like that). + */ +void +thread_terminate_self(void) +{ + register thread_t thread = current_thread(); + thread_act_t thr_act = thread->top_act; + task_t task = thr_act->task; + int active_acts; + spl_t s; + + /* + * We should be at the base of the inheritance chain. + */ + assert(thr_act->thread == thread); + + /* + * Check to see if this is the last active activation. By + * this we mean the last activation to call thread_terminate_self. + * If so, and the task is associated with a BSD process, we + * need to call BSD and let them clean up. + */ + task_lock(task); + active_acts = --task->active_act_count; + task_unlock(task); + if (!active_acts && task->bsd_info) + proc_exit(task->bsd_info); + +#ifdef CALLOUT_RPC_MODEL + if (thr_act->lower) { + /* + * JMM - RPC will not be using a callout/stack manipulation + * mechanism. instead we will let it return normally as if + * from a continuation. Accordingly, these need to be cleaned + * up a bit. + */ + act_switch_swapcheck(thread, (ipc_port_t)0); + act_lock(thr_act); /* hierarchy violation XXX */ + (void) switch_act(THR_ACT_NULL); + assert(thr_act->ref_count == 1); /* XXX */ + /* act_deallocate(thr_act); XXX */ + prev_act = thread->top_act; + /* + * disable preemption to protect kernel stack changes + * disable_preemption(); + * MACH_RPC_RET(prev_act) = KERN_RPC_SERVER_TERMINATED; + * machine_kernel_stack_init(thread, mach_rpc_return_error); + */ + act_unlock(thr_act); + + /* + * Load_context(thread); + */ + /* NOTREACHED */ + } + +#else /* !CALLOUT_RPC_MODEL */ + + assert(!thr_act->lower); + +#endif /* CALLOUT_RPC_MODEL */ + + s = splsched(); + thread_lock(thread); + thread->active = FALSE; + thread_unlock(thread); + splx(s); + + thread_timer_terminate(); + + /* flush any lazy HW state while in own context */ + thread_machine_flush(thr_act); + + ipc_thread_terminate(thread); + + s = splsched(); + thread_lock(thread); + thread->state |= (TH_HALTED|TH_TERMINATE); + assert((thread->state & TH_UNINT) == 0); +#if 0 /* CHECKME! */ + /* + * Since thread has been put in the reaper_queue, it must no longer + * be preempted (otherwise, it could be put back in a run queue). + */ + thread->preempt = TH_NOT_PREEMPTABLE; +#endif + thread_mark_wait_locked(thread, THREAD_UNINT); + thread_unlock(thread); + /* splx(s); */ + + ETAP_SET_REASON(thread, BLOCKED_ON_TERMINATION); + thread_block((void (*)(void)) 0); + panic("the zombie walks!"); + /*NOTREACHED*/ +} + + +/* + * Create a new thread. + * Doesn't start the thread running; It first must be attached to + * an activation - then use thread_go to start it. + */ +kern_return_t +thread_create_shuttle( + thread_act_t thr_act, + integer_t priority, + void (*start)(void), + thread_t *new_thread) +{ + thread_t new_shuttle; + task_t parent_task = thr_act->task; + processor_set_t pset; + kern_return_t result; + sched_policy_t *policy; + sf_return_t sfr; + int suspcnt; + + assert(!thr_act->thread); + assert(!thr_act->pool_port); + + /* + * Allocate a thread and initialize static fields + */ + new_shuttle = (thread_t)zalloc(thread_shuttle_zone); + if (new_shuttle == THREAD_NULL) + return (KERN_RESOURCE_SHORTAGE); + + *new_shuttle = thr_sh_template; + + thread_lock_init(new_shuttle); + rpc_lock_init(new_shuttle); + wake_lock_init(new_shuttle); + new_shuttle->sleep_stamp = sched_tick; + + pset = parent_task->processor_set; + if (!pset->active) { + pset = &default_pset; + } + pset_lock(pset); + + task_lock(parent_task); + + /* + * Don't need to initialize because the context switch + * code will set it before it can be used. + */ + if (!parent_task->active) { + task_unlock(parent_task); + pset_unlock(pset); + zfree(thread_shuttle_zone, (vm_offset_t) new_shuttle); + return (KERN_FAILURE); + } + + act_attach(thr_act, new_shuttle, 0); + + /* Chain the thr_act onto the task's list */ + queue_enter(&parent_task->thr_acts, thr_act, thread_act_t, thr_acts); + parent_task->thr_act_count++; + parent_task->res_act_count++; + parent_task->active_act_count++; + + /* Associate the thread with that scheduling policy */ + new_shuttle->policy = parent_task->policy; + policy = &sched_policy[new_shuttle->policy]; + sfr = policy->sp_ops.sp_thread_attach(policy, new_shuttle); + if (sfr != SF_SUCCESS) + panic("thread_create_shuttle: sp_thread_attach"); + + /* Associate the thread with the processor set */ + sfr = policy->sp_ops.sp_thread_processor_set(policy, new_shuttle, pset); + if (sfr != SF_SUCCESS) + panic("thread_create_shuttle: sp_thread_proceessor_set"); + + /* Set the thread's scheduling parameters */ + new_shuttle->max_priority = parent_task->max_priority; + new_shuttle->priority = (priority < 0)? parent_task->priority: priority; + if (new_shuttle->priority > new_shuttle->max_priority) + new_shuttle->priority = new_shuttle->max_priority; + sfr = policy->sp_ops.sp_thread_setup(policy, new_shuttle); + if (sfr != SF_SUCCESS) + panic("thread_create_shuttle: sp_thread_setup"); + +#if ETAP_EVENT_MONITOR + new_thread->etap_reason = 0; + new_thread->etap_trace = FALSE; +#endif /* ETAP_EVENT_MONITOR */ + + new_shuttle->active = TRUE; + thr_act->active = TRUE; + pset_unlock(pset); + + + /* + * No need to lock thr_act, since it can't be known to anyone -- + * we set its suspend_count to one more than the task suspend_count + * by calling thread_hold. + */ + thr_act->user_stop_count = 1; + for (suspcnt = thr_act->task->suspend_count + 1; suspcnt; --suspcnt) + thread_hold(thr_act); + task_unlock(parent_task); + + /* + * Thread still isn't runnable yet (our caller will do + * that). Initialize runtime-dependent fields here. + */ + result = thread_machine_create(new_shuttle, thr_act, thread_continue); + assert (result == KERN_SUCCESS); + + machine_kernel_stack_init(new_shuttle, thread_continue); + ipc_thread_init(new_shuttle); + thread_start(new_shuttle, start); + thread_timer_setup(new_shuttle); + + *new_thread = new_shuttle; + + { + long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; + + KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_DATA, 1)) | DBG_FUNC_NONE, + (vm_address_t)new_shuttle, 0,0,0,0); + + kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, + &dbg_arg4); + KERNEL_DEBUG_CONSTANT((TRACEDBG_CODE(DBG_TRACE_STRING, 1)) | DBG_FUNC_NONE, + dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); + } + + return (KERN_SUCCESS); +} + +kern_return_t +thread_create( + task_t task, + thread_act_t *new_act) +{ + thread_act_t thr_act; + thread_t thread; + kern_return_t result; + sched_policy_t *policy; + sf_return_t sfr; + spl_t s; + extern void thread_bootstrap_return(void); + + if (task == TASK_NULL) + return KERN_INVALID_ARGUMENT; + + result = act_create(task, &thr_act); + if (result != KERN_SUCCESS) + return (result); + + result = thread_create_shuttle(thr_act, -1, thread_bootstrap_return, &thread); + if (result != KERN_SUCCESS) { + act_deallocate(thr_act); + return (result); + } + + if (task->kernel_loaded) + thread_user_to_kernel(thread); + + /* Start the thread running (it will immediately suspend itself). */ + s = splsched(); + thread_ast_set(thr_act, AST_APC); + thread_lock(thread); + thread_go_locked(thread, THREAD_AWAKENED); + thread_unlock(thread); + splx(s); + + *new_act = thr_act; + + return (KERN_SUCCESS); +} + +/* + * Update thread that belongs to a task created via kernel_task_create(). + */ +void +thread_user_to_kernel( + thread_t thread) +{ + /* + * Used to set special swap_func here... + */ +} + +kern_return_t +thread_create_running( + register task_t parent_task, + int flavor, + thread_state_t new_state, + mach_msg_type_number_t new_state_count, + thread_act_t *child_act) /* OUT */ +{ + register kern_return_t result; + + result = thread_create(parent_task, child_act); + if (result != KERN_SUCCESS) + return (result); + + result = act_machine_set_state(*child_act, flavor, + new_state, new_state_count); + if (result != KERN_SUCCESS) { + (void) thread_terminate(*child_act); + return (result); + } + + result = thread_resume(*child_act); + if (result != KERN_SUCCESS) { + (void) thread_terminate(*child_act); + return (result); + } + + return (result); +} + +/* + * kernel_thread: + * + * Create and kernel thread in the specified task, and + * optionally start it running. + */ +thread_t +kernel_thread_with_priority( + task_t task, + integer_t priority, + void (*start)(void), + boolean_t start_running) +{ + kern_return_t result; + thread_t thread; + thread_act_t thr_act; + sched_policy_t *policy; + sf_return_t sfr; + spl_t s; + + result = act_create(task, &thr_act); + if (result != KERN_SUCCESS) { + return THREAD_NULL; + } + + result = thread_create_shuttle(thr_act, priority, start, &thread); + if (result != KERN_SUCCESS) { + act_deallocate(thr_act); + return THREAD_NULL; + } + + thread_swappable(thr_act, FALSE); + + s = splsched(); + thread_lock(thread); + + thr_act = thread->top_act; +#if MACH_LDEBUG + thread->kthread = TRUE; +#endif /* MACH_LDEBUG */ + + if (start_running) + thread_go_locked(thread, THREAD_AWAKENED); + + thread_unlock(thread); + splx(s); + + if (start_running) + thread_resume(thr_act); + + act_deallocate(thr_act); + return (thread); +} + +thread_t +kernel_thread( + task_t task, + void (*start)(void)) +{ + return kernel_thread_with_priority(task, -1, start, TRUE); +} + +unsigned int c_weird_pset_ref_exit = 0; /* pset code raced us */ + +void +thread_deallocate( + thread_t thread) +{ + task_t task; + processor_set_t pset; + sched_policy_t *policy; + sf_return_t sfr; + spl_t s; + + if (thread == THREAD_NULL) + return; + + /* + * First, check for new count > 1 (the common case). + * Only the thread needs to be locked. + */ + s = splsched(); + thread_lock(thread); + if (--thread->ref_count > 1) { + thread_unlock(thread); + splx(s); + return; + } + + /* + * Down to pset reference, lets try to clean up. + * However, the processor set may make more. Its lock + * also dominate the thread lock. So, reverse the + * order of the locks and see if its still the last + * reference; + */ + assert(thread->ref_count == 1); /* Else this is an extra dealloc! */ + thread_unlock(thread); + splx(s); + +#if MACH_HOST + thread_freeze(thread); +#endif /* MACH_HOST */ + + pset = thread->processor_set; + pset_lock(pset); + + s = splsched(); + thread_lock(thread); + + if (thread->ref_count > 1) { +#if MACH_HOST + boolean_t need_wakeup = FALSE; + /* + * processor_set made extra reference. + */ + /* Inline the unfreeze */ + thread->may_assign = TRUE; + if (thread->assign_active) { + need_wakeup = TRUE; + thread->assign_active = FALSE; + } +#endif /* MACH_HOST */ + thread_unlock(thread); + splx(s); + pset_unlock(pset); +#if MACH_HOST + if (need_wakeup) + thread_wakeup((event_t)&thread->assign_active); +#endif /* MACH_HOST */ + c_weird_pset_ref_exit++; + return; + } +#if MACH_HOST + assert(thread->assign_active == FALSE); +#endif /* MACH_HOST */ + + /* + * Thread only had pset reference - we can remove it. + */ + if (thread == current_thread()) + panic("thread deallocating itself"); + + /* Detach thread (shuttle) from its sched policy */ + policy = &sched_policy[thread->policy]; + sfr = policy->sp_ops.sp_thread_detach(policy, thread); + if (sfr != SF_SUCCESS) + panic("thread_deallocate: sp_thread_detach"); + + pset_remove_thread(pset, thread); + thread->ref_count = 0; + thread_unlock(thread); /* no more references - safe */ + splx(s); + pset_unlock(pset); + + pset_deallocate(thread->processor_set); + + /* frees kernel stack & other MD resources */ + if (thread->stack_privilege && (thread->stack_privilege != thread->kernel_stack)) { + vm_offset_t stack; + int s = splsched(); + stack = thread->stack_privilege; + stack_free(thread); + thread->kernel_stack = stack; + splx(s); + } + thread->stack_privilege = 0; + thread_machine_destroy(thread); + + zfree(thread_shuttle_zone, (vm_offset_t) thread); +} + +void +thread_reference( + thread_t thread) +{ + spl_t s; + + if (thread == THREAD_NULL) + return; + + s = splsched(); + thread_lock(thread); + thread->ref_count++; + thread_unlock(thread); + splx(s); +} + +/* + * Called with "appropriate" thread-related locks held on + * thread and its top_act for synchrony with RPC (see + * act_lock_thread()). + */ +kern_return_t +thread_info_shuttle( + register thread_act_t thr_act, + thread_flavor_t flavor, + thread_info_t thread_info_out, /* ptr to OUT array */ + mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ +{ + register thread_t thread = thr_act->thread; + int state, flags; + spl_t s; + + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); + + if (flavor == THREAD_BASIC_INFO) { + register thread_basic_info_t basic_info; + + if (*thread_info_count < THREAD_BASIC_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); + + basic_info = (thread_basic_info_t) thread_info_out; + + s = splsched(); + thread_lock(thread); + + /* fill in info */ + + thread_read_times(thread, &basic_info->user_time, + &basic_info->system_time); + + if (thread->policy & (POLICY_TIMESHARE|POLICY_RR|POLICY_FIFO)) { + /* + * Update lazy-evaluated scheduler info because someone wants it. + */ + if (thread->sched_stamp != sched_tick) + update_priority(thread); + + basic_info->sleep_time = 0; + + /* + * To calculate cpu_usage, first correct for timer rate, + * then for 5/8 ageing. The correction factor [3/5] is + * (1/(5/8) - 1). + */ + basic_info->cpu_usage = (thread->cpu_usage << SCHED_TICK_SHIFT) / + (TIMER_RATE / TH_USAGE_SCALE); + basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; +#if SIMPLE_CLOCK + /* + * Clock drift compensation. + */ + basic_info->cpu_usage = + (basic_info->cpu_usage * 1000000) / sched_usec; +#endif /* SIMPLE_CLOCK */ + } + else + basic_info->sleep_time = basic_info->cpu_usage = 0; + + basic_info->policy = thread->policy; + + flags = 0; + if (thread->state & TH_SWAPPED_OUT) + flags = TH_FLAGS_SWAPPED; + else + if (thread->state & TH_IDLE) + flags = TH_FLAGS_IDLE; + + state = 0; + if (thread->state & TH_HALTED) + state = TH_STATE_HALTED; + else + if (thread->state & TH_RUN) + state = TH_STATE_RUNNING; + else + if (thread->state & TH_UNINT) + state = TH_STATE_UNINTERRUPTIBLE; + else + if (thread->state & TH_SUSP) + state = TH_STATE_STOPPED; + else + if (thread->state & TH_WAIT) + state = TH_STATE_WAITING; + + basic_info->run_state = state; + basic_info->flags = flags; + + basic_info->suspend_count = thr_act->user_stop_count; + + thread_unlock(thread); + splx(s); + + *thread_info_count = THREAD_BASIC_INFO_COUNT; + + return (KERN_SUCCESS); + } + else + if (flavor == THREAD_SCHED_TIMESHARE_INFO) { + policy_timeshare_info_t ts_info; + + if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); + + ts_info = (policy_timeshare_info_t)thread_info_out; + + s = splsched(); + thread_lock(thread); + + if (thread->policy != POLICY_TIMESHARE) { + thread_unlock(thread); + splx(s); + + return (KERN_INVALID_POLICY); + } + + ts_info->base_priority = thread->priority; + ts_info->max_priority = thread->max_priority; + ts_info->cur_priority = thread->sched_pri; + + ts_info->depressed = (thread->depress_priority >= 0); + ts_info->depress_priority = thread->depress_priority; + + thread_unlock(thread); + splx(s); + + *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; + + return (KERN_SUCCESS); + } + else + if (flavor == THREAD_SCHED_FIFO_INFO) { + policy_fifo_info_t fifo_info; + + if (*thread_info_count < POLICY_FIFO_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); + + fifo_info = (policy_fifo_info_t)thread_info_out; + + s = splsched(); + thread_lock(thread); + + if (thread->policy != POLICY_FIFO) { + thread_unlock(thread); + splx(s); + + return (KERN_INVALID_POLICY); + } + + fifo_info->base_priority = thread->priority; + fifo_info->max_priority = thread->max_priority; + + fifo_info->depressed = (thread->depress_priority >= 0); + fifo_info->depress_priority = thread->depress_priority; + + thread_unlock(thread); + splx(s); + + *thread_info_count = POLICY_FIFO_INFO_COUNT; + + return (KERN_SUCCESS); + } + else + if (flavor == THREAD_SCHED_RR_INFO) { + policy_rr_info_t rr_info; + + if (*thread_info_count < POLICY_RR_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); + + rr_info = (policy_rr_info_t) thread_info_out; + + s = splsched(); + thread_lock(thread); + + if (thread->policy != POLICY_RR) { + thread_unlock(thread); + splx(s); + + return (KERN_INVALID_POLICY); + } + + rr_info->base_priority = thread->priority; + rr_info->max_priority = thread->max_priority; + rr_info->quantum = min_quantum_ms; + + rr_info->depressed = (thread->depress_priority >= 0); + rr_info->depress_priority = thread->depress_priority; + + thread_unlock(thread); + splx(s); + + *thread_info_count = POLICY_RR_INFO_COUNT; + + return (KERN_SUCCESS); + } + + return (KERN_INVALID_ARGUMENT); +} + +void +thread_doreap( + register thread_t thread) +{ + thread_act_t thr_act; + struct ipc_port *pool_port; + + + thr_act = thread_lock_act(thread); + assert(thr_act && thr_act->thread == thread); + + act_locked_act_reference(thr_act); + pool_port = thr_act->pool_port; + + /* + * Replace `act_unlock_thread()' with individual + * calls. (`act_detach()' can change fields used + * to determine which locks are held, confusing + * `act_unlock_thread()'.) + */ + rpc_unlock(thread); + if (pool_port != IP_NULL) + ip_unlock(pool_port); + act_unlock(thr_act); + + /* Remove the reference held by a rooted thread */ + if (pool_port == IP_NULL) + act_deallocate(thr_act); + + /* Remove the reference held by the thread: */ + act_deallocate(thr_act); +} + +static thread_call_data_t thread_reaper_call_data; + +/* + * reaper_thread: + * + * This kernel thread runs forever looking for threads to destroy + * (when they request that they be destroyed, of course). + * + * The reaper thread will disappear in the next revision of thread + * control when it's function will be moved into thread_dispatch. + */ +static void +_thread_reaper( + thread_call_param_t p0, + thread_call_param_t p1) +{ + register thread_t thread; + spl_t s; + + s = splsched(); + simple_lock(&reaper_lock); + + while ((thread = (thread_t) dequeue_head(&reaper_queue)) != THREAD_NULL) { + simple_unlock(&reaper_lock); + + /* + * wait for run bit to clear + */ + thread_lock(thread); + if (thread->state & TH_RUN) + panic("thread reaper: TH_RUN"); + thread_unlock(thread); + splx(s); + + thread_doreap(thread); + + s = splsched(); + simple_lock(&reaper_lock); + } + + simple_unlock(&reaper_lock); + splx(s); +} + +void +thread_reaper(void) +{ + thread_call_setup(&thread_reaper_call_data, _thread_reaper, NULL); + thread_reaper_call = &thread_reaper_call_data; + + _thread_reaper(NULL, NULL); +} + +kern_return_t +thread_assign( + thread_act_t thr_act, + processor_set_t new_pset) +{ +#ifdef lint + thread++; new_pset++; +#endif /* lint */ + return(KERN_FAILURE); +} + +/* + * thread_assign_default: + * + * Special version of thread_assign for assigning threads to default + * processor set. + */ +kern_return_t +thread_assign_default( + thread_act_t thr_act) +{ + return (thread_assign(thr_act, &default_pset)); +} + +/* + * thread_get_assignment + * + * Return current assignment for this thread. + */ +kern_return_t +thread_get_assignment( + thread_act_t thr_act, + processor_set_t *pset) +{ + thread_t thread; + + if (thr_act == THR_ACT_NULL) + return(KERN_INVALID_ARGUMENT); + thread = act_lock_thread(thr_act); + if (thread == THREAD_NULL) { + act_unlock_thread(thr_act); + return(KERN_INVALID_ARGUMENT); + } + *pset = thread->processor_set; + act_unlock_thread(thr_act); + pset_reference(*pset); + return(KERN_SUCCESS); +} + +/* + * thread_wire: + * + * Specify that the target thread must always be able + * to run and to allocate memory. + */ +kern_return_t +thread_wire( + host_priv_t host_priv, + thread_act_t thr_act, + boolean_t wired) +{ + spl_t s; + thread_t thread; + extern void vm_page_free_reserve(int pages); + + if (thr_act == THR_ACT_NULL || host_priv == HOST_PRIV_NULL) + return (KERN_INVALID_ARGUMENT); + + assert(host_priv == &realhost); + + thread = act_lock_thread(thr_act); + if (thread ==THREAD_NULL) { + act_unlock_thread(thr_act); + return(KERN_INVALID_ARGUMENT); + } + + /* + * This implementation only works for the current thread. + * See stack_privilege. + */ + if (thr_act != current_act()) + return KERN_INVALID_ARGUMENT; + + s = splsched(); + thread_lock(thread); + + if (wired) { + if (thread->vm_privilege == FALSE) + vm_page_free_reserve(1); /* XXX */ + thread->vm_privilege = TRUE; + } else { + if (thread->vm_privilege == TRUE) + vm_page_free_reserve(-1); /* XXX */ + thread->vm_privilege = FALSE; + } + + thread_unlock(thread); + splx(s); + act_unlock_thread(thr_act); + + /* + * Make the thread unswappable. + */ + if (wired) + thread_swappable(thr_act, FALSE); + + return KERN_SUCCESS; +} + +/* + * thread_collect_scan: + * + * Attempt to free resources owned by threads. + */ + +void +thread_collect_scan(void) +{ + /* This code runs very quickly! */ +} + +boolean_t thread_collect_allowed = TRUE; +unsigned thread_collect_last_tick = 0; +unsigned thread_collect_max_rate = 0; /* in ticks */ + +/* + * consider_thread_collect: + * + * Called by the pageout daemon when the system needs more free pages. + */ + +void +consider_thread_collect(void) +{ + /* + * By default, don't attempt thread collection more frequently + * than once a second (one scheduler tick). + */ + + if (thread_collect_max_rate == 0) + thread_collect_max_rate = 2; /* sched_tick is a 1 second resolution 2 here insures at least 1 second interval */ + + if (thread_collect_allowed && + (sched_tick > + (thread_collect_last_tick + thread_collect_max_rate))) { + thread_collect_last_tick = sched_tick; + thread_collect_scan(); + } +} + +#if MACH_DEBUG +#if STACK_USAGE + +vm_size_t +stack_usage( + register vm_offset_t stack) +{ + int i; + + for (i = 0; i < KERNEL_STACK_SIZE/sizeof(unsigned int); i++) + if (((unsigned int *)stack)[i] != STACK_MARKER) + break; + + return KERNEL_STACK_SIZE - i * sizeof(unsigned int); +} + +/* + * Machine-dependent code should call stack_init + * before doing its own initialization of the stack. + */ + +static void +stack_init( + register vm_offset_t stack, + unsigned int bytes) +{ + if (stack_check_usage) { + int i; + + for (i = 0; i < bytes / sizeof(unsigned int); i++) + ((unsigned int *)stack)[i] = STACK_MARKER; + } +} + +/* + * Machine-dependent code should call stack_finalize + * before releasing the stack memory. + */ + +void +stack_finalize( + register vm_offset_t stack) +{ + if (stack_check_usage) { + vm_size_t used = stack_usage(stack); + + simple_lock(&stack_usage_lock); + if (used > stack_max_usage) + stack_max_usage = used; + simple_unlock(&stack_usage_lock); + if (used > stack_max_use) { + printf("stack usage = %x\n", used); + panic("stack overflow"); + } + } +} + +#endif /*STACK_USAGE*/ +#endif /* MACH_DEBUG */ + +kern_return_t +host_stack_usage( + host_t host, + vm_size_t *reservedp, + unsigned int *totalp, + vm_size_t *spacep, + vm_size_t *residentp, + vm_size_t *maxusagep, + vm_offset_t *maxstackp) +{ +#if !MACH_DEBUG + return KERN_NOT_SUPPORTED; +#else + unsigned int total; + vm_size_t maxusage; + + if (host == HOST_NULL) + return KERN_INVALID_HOST; + + simple_lock(&stack_usage_lock); + maxusage = stack_max_usage; + simple_unlock(&stack_usage_lock); + + stack_statistics(&total, &maxusage); + + *reservedp = 0; + *totalp = total; + *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE); + *maxusagep = maxusage; + *maxstackp = 0; + return KERN_SUCCESS; + +#endif /* MACH_DEBUG */ +} + +/* + * Return info on stack usage for threads in a specific processor set + */ +kern_return_t +processor_set_stack_usage( + processor_set_t pset, + unsigned int *totalp, + vm_size_t *spacep, + vm_size_t *residentp, + vm_size_t *maxusagep, + vm_offset_t *maxstackp) +{ +#if !MACH_DEBUG + return KERN_NOT_SUPPORTED; +#else + unsigned int total; + vm_size_t maxusage; + vm_offset_t maxstack; + + register thread_t *threads; + register thread_t thread; + + unsigned int actual; /* this many things */ + unsigned int i; + + vm_size_t size, size_needed; + vm_offset_t addr; + + if (pset == PROCESSOR_SET_NULL) + return KERN_INVALID_ARGUMENT; + + size = 0; addr = 0; + + for (;;) { + pset_lock(pset); + if (!pset->active) { + pset_unlock(pset); + return KERN_INVALID_ARGUMENT; + } + + actual = pset->thread_count; + + /* do we have the memory we need? */ + + size_needed = actual * sizeof(thread_t); + if (size_needed <= size) + break; + + /* unlock the pset and allocate more memory */ + pset_unlock(pset); + + if (size != 0) + kfree(addr, size); + + assert(size_needed > 0); + size = size_needed; + + addr = kalloc(size); + if (addr == 0) + return KERN_RESOURCE_SHORTAGE; + } + + /* OK, have memory and the processor_set is locked & active */ + + threads = (thread_t *) addr; + for (i = 0, thread = (thread_t) queue_first(&pset->threads); + i < actual; + i++, + thread = (thread_t) queue_next(&thread->pset_threads)) { + thread_reference(thread); + threads[i] = thread; + } + assert(queue_end(&pset->threads, (queue_entry_t) thread)); + + /* can unlock processor set now that we have the thread refs */ + pset_unlock(pset); + + /* calculate maxusage and free thread references */ + + total = 0; + maxusage = 0; + maxstack = 0; + for (i = 0; i < actual; i++) { + int cpu; + thread_t thread = threads[i]; + vm_offset_t stack = 0; + + /* + * thread->kernel_stack is only accurate if the + * thread isn't swapped and is not executing. + * + * Of course, we don't have the appropriate locks + * for these shenanigans. + */ + + stack = thread->kernel_stack; + + for (cpu = 0; cpu < NCPUS; cpu++) + if (cpu_data[cpu].active_thread == thread) { + stack = active_stacks[cpu]; + break; + } + + if (stack != 0) { + total++; + + if (stack_check_usage) { + vm_size_t usage = stack_usage(stack); + + if (usage > maxusage) { + maxusage = usage; + maxstack = (vm_offset_t) thread; + } + } + } + + thread_deallocate(thread); + } + + if (size != 0) + kfree(addr, size); + + *totalp = total; + *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE); + *maxusagep = maxusage; + *maxstackp = maxstack; + return KERN_SUCCESS; + +#endif /* MACH_DEBUG */ +} + +static int split_funnel_off = 0; +funnel_t * +funnel_alloc( + int type) +{ + mutex_t *m; + funnel_t * fnl; + if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){ + bzero(fnl, sizeof(funnel_t)); + if ((m = mutex_alloc(0)) == (mutex_t *)NULL) { + kfree(fnl, sizeof(funnel_t)); + return(THR_FUNNEL_NULL); + } + fnl->fnl_mutex = m; + fnl->fnl_type = type; + } + return(fnl); +} + +void +funnel_free( + funnel_t * fnl) +{ + mutex_free(fnl->fnl_mutex); + if (fnl->fnl_oldmutex) + mutex_free(fnl->fnl_oldmutex); + kfree(fnl, sizeof(funnel_t)); +} + +void +funnel_lock( + funnel_t * fnl) +{ + mutex_t * m; + + m = fnl->fnl_mutex; +restart: + mutex_lock(m); + fnl->fnl_mtxholder = current_thread(); + if (split_funnel_off && (m != fnl->fnl_mutex)) { + mutex_unlock(m); + m = fnl->fnl_mutex; + goto restart; + } +} + +void +funnel_unlock( + funnel_t * fnl) +{ + mutex_unlock(fnl->fnl_mutex); + fnl->fnl_mtxrelease = current_thread(); +} + +funnel_t * +thread_funnel_get( + void) +{ + thread_t th = current_thread(); + + if (th->funnel_state & TH_FN_OWNED) { + return(th->funnel_lock); + } + return(THR_FUNNEL_NULL); +} + +boolean_t +thread_funnel_set( + funnel_t * fnl, + boolean_t funneled) +{ + thread_t cur_thread; + boolean_t funnel_state_prev; + boolean_t intr; + + cur_thread = current_thread(); + funnel_state_prev = ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED); + + if (funnel_state_prev != funneled) { + intr = ml_set_interrupts_enabled(FALSE); + + if (funneled == TRUE) { + if (cur_thread->funnel_lock) + panic("Funnel lock called when holding one %x", cur_thread->funnel_lock); + KERNEL_DEBUG(0x6032428 | DBG_FUNC_NONE, + fnl, 1, 0, 0, 0); + funnel_lock(fnl); + KERNEL_DEBUG(0x6032434 | DBG_FUNC_NONE, + fnl, 1, 0, 0, 0); + cur_thread->funnel_state |= TH_FN_OWNED; + cur_thread->funnel_lock = fnl; + } else { + if(cur_thread->funnel_lock->fnl_mutex != fnl->fnl_mutex) + panic("Funnel unlock when not holding funnel"); + cur_thread->funnel_state &= ~TH_FN_OWNED; + KERNEL_DEBUG(0x603242c | DBG_FUNC_NONE, + fnl, 1, 0, 0, 0); + + cur_thread->funnel_lock = THR_FUNNEL_NULL; + funnel_unlock(fnl); + } + (void)ml_set_interrupts_enabled(intr); + } else { + /* if we are trying to acquire funnel recursively + * check for funnel to be held already + */ + if (funneled && (fnl->fnl_mutex != cur_thread->funnel_lock->fnl_mutex)) { + panic("thread_funnel_set: already holding a different funnel"); + } + } + return(funnel_state_prev); +} + +boolean_t +thread_funnel_merge( + funnel_t * fnl, + funnel_t * otherfnl) +{ + mutex_t * m; + mutex_t * otherm; + funnel_t * gfnl; + extern int disable_funnel; + + if ((gfnl = thread_funnel_get()) == THR_FUNNEL_NULL) + panic("thread_funnel_merge called with no funnels held"); + + if (gfnl->fnl_type != 1) + panic("thread_funnel_merge called from non kernel funnel"); + + if (gfnl != fnl) + panic("thread_funnel_merge incorrect invocation"); + + if (disable_funnel || split_funnel_off) + return (KERN_FAILURE); + + m = fnl->fnl_mutex; + otherm = otherfnl->fnl_mutex; + + /* Acquire other funnel mutex */ + mutex_lock(otherm); + split_funnel_off = 1; + disable_funnel = 1; + otherfnl->fnl_mutex = m; + otherfnl->fnl_type = fnl->fnl_type; + otherfnl->fnl_oldmutex = otherm; /* save this for future use */ + + mutex_unlock(otherm); + return(KERN_SUCCESS); +} + +void +thread_set_cont_arg(int arg) +{ + thread_t th = current_thread(); + th->cont_arg = arg; +} + +int +thread_get_cont_arg(void) +{ + thread_t th = current_thread(); + return(th->cont_arg); +} + +/* + * Export routines to other components for things that are done as macros + * within the osfmk component. + */ +#undef thread_should_halt +boolean_t +thread_should_halt( + thread_shuttle_t th) +{ + return(thread_should_halt_fast(th)); +} + diff --git a/osfmk/kern/thread.h b/osfmk/kern/thread.h new file mode 100644 index 000000000..80127b04d --- /dev/null +++ b/osfmk/kern/thread.h @@ -0,0 +1,634 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: thread.h + * Author: Avadis Tevanian, Jr. + * + * This file contains the structure definitions for threads. + * + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + */ + +#ifndef _KERN_THREAD_H_ +#define _KERN_THREAD_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for current_thread */ +#include + +/* + * Logically, a thread of control consists of two parts: + * a thread_shuttle, which may migrate during an RPC, and + * a thread_activation, which remains attached to a task. + * The thread_shuttle is the larger portion of the two-part thread, + * and contains scheduling info, messaging support, accounting info, + * and links to the thread_activation within which the shuttle is + * currently operating. + * + * It might make sense to have the thread_shuttle be a proper sub-structure + * of the thread, with the thread containing links to both the shuttle and + * activation. In order to reduce the scope and complexity of source + * changes and the overhead of maintaining these linkages, we have subsumed + * the shuttle into the thread, calling it a thread_shuttle. + * + * User accesses to threads always come in via the user's thread port, + * which gets translated to a pointer to the target thread_activation. + * Kernel accesses intended to effect the entire thread, typically use + * a pointer to the thread_shuttle (current_thread()) as the target of + * their operations. This makes sense given that we have subsumed the + * shuttle into the thread_shuttle, eliminating one set of linkages. + * Operations effecting only the shuttle may use a thread_shuttle_t + * to indicate this. + * + * The current_act() macro returns a pointer to the current thread_act, while + * the current_thread() macro returns a pointer to the currently active + * thread_shuttle (representing the thread in its entirety). + */ + +/* + * Possible results of thread_block - returned in + * current_thread()->wait_result. + */ +#define THREAD_AWAKENED 0 /* normal wakeup */ +#define THREAD_TIMED_OUT 1 /* timeout expired */ +#define THREAD_INTERRUPTED 2 /* interrupted by clear_wait */ +#define THREAD_RESTART 3 /* restart operation entirely */ + +/* + * Interruptible flags for assert_wait + * + */ +#define THREAD_UNINT 0 /* not interruptible */ +#define THREAD_INTERRUPTIBLE 1 /* may not be restartable */ +#define THREAD_ABORTSAFE 2 /* abortable safely */ + +#ifdef MACH_KERNEL_PRIVATE +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct { + int fnl_type; /* funnel type */ + mutex_t * fnl_mutex; /* underlying mutex for the funnel */ + void * fnl_mtxholder; /* thread (last)holdng mutex */ + void * fnl_mtxrelease; /* thread (last)releasing mutex */ + mutex_t * fnl_oldmutex; /* Mutex before collapsing split funnel */ +} funnel_t; + + +typedef struct thread_shuttle { + /* + * Beginning of thread_shuttle proper. When the thread is on + * a wait queue, these three fields are in treated as an un- + * official union with a wait_queue_element. If you change + * these, you must change that definition as well. + */ + queue_chain_t links; /* current run/wait queue links */ + run_queue_t runq; /* run queue p is on SEE BELOW */ + int whichq; /* which queue level p is on */ + +/* + * NOTE: The runq field in the thread structure has an unusual + * locking protocol. If its value is RUN_QUEUE_NULL, then it is + * locked by the thread_lock, but if its value is something else + * (i.e. a run_queue) then it is locked by that run_queue's lock. + */ + + /* Thread bookkeeping */ + queue_chain_t pset_threads; /* list of all shuttles in proc set */ + + /* Self-preservation */ + decl_simple_lock_data(,lock) /* scheduling lock (thread_lock()) */ + decl_simple_lock_data(,wake_lock) /* covers wake_active (wake_lock())*/ + decl_mutex_data(,rpc_lock) /* RPC lock (rpc_lock()) */ + int ref_count; /* number of references to me */ + + vm_offset_t kernel_stack; /* accurate only if the thread is + not swapped and not executing */ + + vm_offset_t stack_privilege;/* reserved kernel stack */ + + /* Blocking information */ + int reason; /* why we blocked */ + event_t wait_event; /* event we are waiting on */ + kern_return_t wait_result; /* outcome of wait - + may be examined by this thread + WITHOUT locking */ + wait_queue_t wait_queue; /* wait queue we are currently on */ + queue_chain_t wait_link; /* event's wait queue link */ + boolean_t wake_active; /* Someone is waiting for this + thread to become suspended */ + int state; /* Thread state: */ + boolean_t preempt; /* Thread is undergoing preemption */ + boolean_t interruptible; /* Thread is interruptible */ + +#if ETAP_EVENT_MONITOR + int etap_reason; /* real reason why we blocked */ + boolean_t etap_trace; /* ETAP trace status */ +#endif /* ETAP_EVENT_MONITOR */ + +/* + * Thread states [bits or'ed] + */ +#define TH_WAIT 0x01 /* thread is queued for waiting */ +#define TH_SUSP 0x02 /* thread has been asked to stop */ +#define TH_RUN 0x04 /* thread is running or on runq */ +#define TH_UNINT 0x08 /* thread is waiting uninteruptibly */ +#define TH_HALTED 0x10 /* thread is halted at clean point ? */ + +#define TH_ABORT 0x20 /* abort interruptible waits */ +#define TH_SWAPPED_OUT 0x40 /* thread is swapped out */ + +#define TH_IDLE 0x80 /* thread is an idle thread */ + +#define TH_SCHED_STATE (TH_WAIT|TH_SUSP|TH_RUN|TH_UNINT) + +#define TH_STACK_HANDOFF 0x0100 /* thread has no kernel stack */ +#define TH_STACK_COMING_IN 0x0200 /* thread is waiting for kernel stack */ +#define TH_STACK_STATE (TH_STACK_HANDOFF | TH_STACK_COMING_IN) + +#define TH_TERMINATE 0x0400 /* thread is terminating */ + + /* Stack handoff information */ + void (*continuation)(void); /* start here next time dispatched */ + int cont_arg; /* XXX continuation argument */ + + /* Scheduling information */ + integer_t importance; /* task-relative importance */ + integer_t sched_mode; /* scheduling mode bits */ +#define TH_MODE_REALTIME 0x0001 + struct { /* see mach/thread_policy.h */ + natural_t period; + natural_t computation; + natural_t constraint; + boolean_t preemptible; + } realtime; + + integer_t priority; /* base priority */ + integer_t sched_pri; /* scheduled (current) priority */ + integer_t depress_priority; /* priority to restore */ + integer_t max_priority; + + natural_t cpu_usage; /* exp. decaying cpu usage [%cpu] */ + natural_t sched_usage; /* load-weighted cpu usage [sched] */ + natural_t sched_stamp; /* last time priority was updated */ + natural_t sleep_stamp; /* last time in TH_WAIT state */ + + /* 'Obsolete' stuff that cannot be removed yet */ + integer_t policy; + integer_t sp_state; + integer_t unconsumed_quantum; + + /* VM global variables */ + boolean_t vm_privilege; /* can use reserved memory? */ + vm_offset_t recover; /* page fault recovery (copyin/out) */ + + /* IPC data structures */ + + struct ipc_kmsg_queue ith_messages; + + mach_port_t ith_mig_reply; /* reply port for mig */ + mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */ + + /* Various bits of stashed state */ + union { + struct { + mach_msg_return_t state; /* receive state */ + ipc_object_t object; /* object received on */ + mach_msg_header_t *msg; /* receive buffer pointer */ + mach_msg_size_t msize; /* max size for recvd msg */ + mach_msg_option_t option; /* options for receive */ + mach_msg_size_t slist_size; /* scatter list size */ + struct ipc_kmsg *kmsg; /* received message */ + mach_port_seqno_t seqno; /* seqno of recvd message */ + void (*continuation)(mach_msg_return_t); + } receive; + struct { + struct semaphore *waitsemaphore; /* semaphore ref */ + struct semaphore *signalsemaphore; /* semaphore ref */ + int options; /* semaphore options */ + kern_return_t result; /* primary result */ + void (*continuation)(kern_return_t); + } sema; + struct { + struct sf_policy *policy; /* scheduling policy */ + int option; /* switch option */ + } swtch; + char *other; /* catch-all for other state */ + } saved; + + /* Timing data structures */ + timer_data_t user_timer; /* user mode timer */ + timer_data_t system_timer; /* system mode timer */ + timer_data_t depressed_timer;/* depressed priority timer */ + timer_save_data_t user_timer_save; /* saved user timer value */ + timer_save_data_t system_timer_save; /* saved sys timer val. */ + /*** ??? should the next two fields be moved to SP-specific struct?***/ + unsigned int cpu_delta; /* cpu usage since last update */ + unsigned int sched_delta; /* weighted cpu usage since update */ + + /* Timed wait expiration */ + timer_call_data_t wait_timer; + integer_t wait_timer_active; + boolean_t wait_timer_is_set; + + /* Priority depression expiration */ + thread_call_data_t depress_timer; + + /* Ast/Halt data structures */ + boolean_t active; /* how alive is the thread */ + + /* Processor data structures */ + processor_set_t processor_set; /* assigned processor set */ +#if NCPUS > 1 + processor_t bound_processor; /* bound to processor ?*/ +#endif /* NCPUS > 1 */ +#if MACH_HOST + boolean_t may_assign; /* may assignment change? */ + boolean_t assign_active; /* someone waiting for may_assign */ +#endif /* MACH_HOST */ + +#if XKMACHKERNEL + int xk_type; +#endif /* XKMACHKERNEL */ + +#if NCPUS > 1 + processor_t last_processor; /* processor this last ran on */ +#if MACH_LOCK_MON + unsigned lock_stack; /* number of locks held */ +#endif /* MACH_LOCK_MON */ +#endif /* NCPUS > 1 */ + + int at_safe_point; /* thread_abort_safely allowed */ + int funnel_state; +#define TH_FN_OWNED 0x1 /* we own the funnel lock */ +#define TH_FN_REFUNNEL 0x2 /* must reaquire funnel lock when unblocking */ + funnel_t *funnel_lock; +#if MACH_LDEBUG + /* + * Debugging: track acquired mutexes and locks. + * Because a thread can block while holding such + * synchronizers, we think of the thread as + * "owning" them. + */ +#define MUTEX_STACK_DEPTH 20 +#define LOCK_STACK_DEPTH 20 + mutex_t *mutex_stack[MUTEX_STACK_DEPTH]; + lock_t *lock_stack[LOCK_STACK_DEPTH]; + unsigned int mutex_stack_index; + unsigned int lock_stack_index; + unsigned mutex_count; /* XXX to be deleted XXX */ + boolean_t kthread; /* thread is a kernel thread */ +#endif /* MACH_LDEBUG */ + + /* + * End of thread_shuttle proper + */ + + /* + * Migration and thread_activation linkage information + */ + struct thread_activation *top_act; /* "current" thr_act */ + +} Thread_Shuttle; + +#define THREAD_SHUTTLE_NULL ((thread_shuttle_t)0) + +#define ith_state saved.receive.state +#define ith_object saved.receive.object +#define ith_msg saved.receive.msg +#define ith_msize saved.receive.msize +#define ith_option saved.receive.option +#define ith_scatter_list_size saved.receive.slist_size +#define ith_continuation saved.receive.continuation +#define ith_kmsg saved.receive.kmsg +#define ith_seqno saved.receive.seqno + +#define sth_waitsemaphore saved.sema.waitsemaphore +#define sth_signalsemaphore saved.sema.signalsemaphore +#define sth_options saved.sema.options +#define sth_result saved.sema.result +#define sth_continuation saved.sema.continuation + +extern thread_act_t active_kloaded[NCPUS]; /* "" kernel-loaded acts */ +extern vm_offset_t active_stacks[NCPUS]; /* active kernel stacks */ +extern vm_offset_t kernel_stack[NCPUS]; + +#ifndef MACHINE_STACK_STASH +/* + * MD Macro to fill up global stack state, + * keeping the MD structure sizes + games private + */ +#define MACHINE_STACK_STASH(stack) \ +MACRO_BEGIN \ + mp_disable_preemption(); \ + active_stacks[cpu_number()] = (stack); \ + kernel_stack[cpu_number()] = (stack) + KERNEL_STACK_SIZE; \ + mp_enable_preemption(); \ +MACRO_END +#endif /* MACHINE_STACK_STASH */ + +/* + * Kernel-only routines + */ + +/* Initialize thread module */ +extern void thread_init(void); + +/* Take reference on thread (make sure it doesn't go away) */ +extern void thread_reference( + thread_t thread); + +/* Release reference on thread */ +extern void thread_deallocate( + thread_t thread); + +/* Set priority of calling thread */ +extern void thread_set_own_priority( + int priority); + +/* Start a thread at specified routine */ +#define thread_start(thread, start) \ + (thread)->continuation = (start) + + +/* Reaps threads waiting to be destroyed */ +extern void thread_reaper(void); + + +#if MACH_HOST +/* Preclude thread processor set assignement */ +extern void thread_freeze( + thread_t thread); + +/* Assign thread to a processor set */ +extern void thread_doassign( + thread_t thread, + processor_set_t new_pset, + boolean_t release_freeze); + +/* Allow thread processor set assignement */ +extern void thread_unfreeze( + thread_t thread); + +#endif /* MACH_HOST */ + +/* Insure thread always has a kernel stack */ +extern void stack_privilege( + thread_t thread); + +extern void consider_thread_collect(void); + +/* + * Arguments to specify aggressiveness to thread halt. + * Can't have MUST_HALT and SAFELY at the same time. + */ +#define THREAD_HALT_NORMAL 0 +#define THREAD_HALT_MUST_HALT 1 /* no deadlock checks */ +#define THREAD_HALT_SAFELY 2 /* result must be restartable */ + +/* + * Macro-defined routines + */ + +#define thread_pcb(th) ((th)->pcb) + +#define thread_lock_init(th) \ + simple_lock_init(&(th)->lock, ETAP_THREAD_LOCK) +#define thread_lock(th) simple_lock(&(th)->lock) +#define thread_unlock(th) simple_unlock(&(th)->lock) + +#define thread_should_halt_fast(thread) \ + (!(thread)->top_act || \ + !(thread)->top_act->active || \ + (thread)->top_act->ast & (AST_HALT|AST_TERMINATE)) + +#define thread_should_halt(thread) thread_should_halt_fast(thread) + +#define rpc_lock_init(th) mutex_init(&(th)->rpc_lock, ETAP_THREAD_RPC) +#define rpc_lock(th) mutex_lock(&(th)->rpc_lock) +#define rpc_lock_try(th) mutex_try(&(th)->rpc_lock) +#define rpc_unlock(th) mutex_unlock(&(th)->rpc_lock) + +/* + * Lock to cover wake_active only; like thread_lock(), is taken + * at splsched(). Used to avoid calling into scheduler with a + * thread_lock() held. Precedes thread_lock() (and other scheduling- + * related locks) in the system lock ordering. + */ +#define wake_lock_init(th) \ + simple_lock_init(&(th)->wake_lock, ETAP_THREAD_WAKE) +#define wake_lock(th) simple_lock(&(th)->wake_lock) +#define wake_unlock(th) simple_unlock(&(th)->wake_lock) + +static __inline__ vm_offset_t current_stack(void); +static __inline__ vm_offset_t +current_stack(void) +{ + vm_offset_t ret; + + mp_disable_preemption(); + ret = active_stacks[cpu_number()]; + mp_enable_preemption(); + return ret; +} + + +extern void pcb_module_init(void); + +extern void pcb_init( + thread_act_t thr_act); + +extern void pcb_terminate( + thread_act_t thr_act); + +extern void pcb_collect( + thread_act_t thr_act); + +extern void pcb_user_to_kernel( + thread_act_t thr_act); + +extern kern_return_t thread_setstatus( + thread_act_t thr_act, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count); + +extern kern_return_t thread_getstatus( + thread_act_t thr_act, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count); + +extern boolean_t stack_alloc_try( + thread_t thread, + void (*start_pos)(thread_t)); + +/* This routine now used only internally */ +extern kern_return_t thread_info_shuttle( + thread_act_t thr_act, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count); + +extern void thread_user_to_kernel( + thread_t thread); + +/* Machine-dependent routines */ +extern void thread_machine_init(void); + +extern void thread_machine_set_current( + thread_t thread ); + +extern kern_return_t thread_machine_create( + thread_t thread, + thread_act_t thr_act, + void (*start_pos)(thread_t)); + +extern void thread_set_syscall_return( + thread_t thread, + kern_return_t retval); + +extern void thread_machine_destroy( + thread_t thread ); + +extern void thread_machine_flush( + thread_act_t thr_act); + +extern thread_t kernel_thread_with_priority( + task_t task, + integer_t priority, + void (*start)(void), + boolean_t start_running); + +extern void funnel_lock(funnel_t *); + +extern void funnel_unlock(funnel_t *); + +#else /* !MACH_KERNEL_PRIVATE */ + +typedef struct __funnel__ funnel_t; + +extern boolean_t thread_should_halt(thread_t); + +#endif /* !MACH_KERNEL_PRIVATE */ + +#define THR_FUNNEL_NULL (funnel_t *)0 + +extern thread_t kernel_thread( + task_t task, + void (*start)(void)); + +extern void thread_terminate_self(void); + +extern funnel_t * funnel_alloc(int); + +extern funnel_t * thread_funnel_get(void); + +extern boolean_t thread_funnel_set(funnel_t * fnl, boolean_t funneled); + +extern boolean_t thread_funnel_merge(funnel_t * fnl, funnel_t * otherfnl); + +extern void thread_set_cont_arg(int); + +extern int thread_get_cont_arg(void); + +/* JMM - These are only temporary */ +extern boolean_t is_thread_running(thread_t); /* True is TH_RUN */ +extern boolean_t is_thread_idle(thread_t); /* True is TH_IDLE */ +extern event_t get_thread_waitevent(thread_t); +extern kern_return_t get_thread_waitresult(thread_t); + +#endif /* _KERN_THREAD_H_ */ diff --git a/osfmk/kern/thread_act.c b/osfmk/kern/thread_act.c new file mode 100644 index 000000000..459cf608a --- /dev/null +++ b/osfmk/kern/thread_act.c @@ -0,0 +1,2319 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Center for Software Science (CSS). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSS requests users of this software to return to css-dist@cs.utah.edu any + * improvements that they make and grant CSS redistribution rights. + * + * Author: Bryan Ford, University of Utah CSS + * + * Thread_Activation management routines + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /*** ??? fix so this can be removed ***/ +#include +#include + +/* + * Debugging printf control + */ +#if MACH_ASSERT +unsigned int watchacts = 0 /* WA_ALL */ + ; /* Do-it-yourself & patchable */ +#endif + +/* + * Track the number of times we need to swapin a thread to deallocate it. + */ +int act_free_swapin = 0; + +/* + * Forward declarations for functions local to this file. + */ +kern_return_t act_abort( thread_act_t, int); +void special_handler(ReturnHandler *, thread_act_t); +void nudge(thread_act_t); +kern_return_t act_set_state_locked(thread_act_t, int, + thread_state_t, + mach_msg_type_number_t); +kern_return_t act_get_state_locked(thread_act_t, int, + thread_state_t, + mach_msg_type_number_t *); +void act_set_apc(thread_act_t); +void act_clr_apc(thread_act_t); +void act_user_to_kernel(thread_act_t); +void act_ulock_release_all(thread_act_t thr_act); + +void install_special_handler_locked(thread_act_t); + +static zone_t thr_act_zone; + +/* + * Thread interfaces accessed via a thread_activation: + */ + + +/* + * Internal routine to terminate a thread. + * Called with task locked. + */ +kern_return_t +thread_terminate_internal( + register thread_act_t thr_act) +{ + thread_t thread; + task_t task; + struct ipc_port *iplock; + kern_return_t ret; +#if NCPUS > 1 + boolean_t held; +#endif /* NCPUS > 1 */ + +#if THREAD_SWAPPER + thread_swap_disable(thr_act); +#endif /* THREAD_SWAPPER */ + + thread = act_lock_thread(thr_act); + if (!thr_act->active) { + act_unlock_thread(thr_act); + return(KERN_TERMINATED); + } + +#if NCPUS > 1 + /* + * Make sure this thread enters the kernel + */ + if (thread != current_thread()) { + thread_hold(thr_act); + act_unlock_thread(thr_act); + + if (!thread_stop_wait(thread)) { + ret = KERN_ABORTED; + (void)act_lock_thread(thr_act); + thread_release(thr_act); + act_unlock_thread(thr_act); + return (ret); + } + + held = TRUE; + (void)act_lock_thread(thr_act); + } else { + held = FALSE; + } +#endif /* NCPUS > 1 */ + + assert(thr_act->active); + act_disable_task_locked(thr_act); + ret = act_abort(thr_act,FALSE); + act_unlock_thread(thr_act); + +#if NCPUS > 1 + if (held) { + thread_unstop(thread); + (void)act_lock_thread(thr_act); + thread_release(thr_act); + act_unlock_thread(thr_act); + } +#endif /* NCPUS > 1 */ + return(ret); +} + +/* + * Terminate a thread. Called with nothing locked. + * Returns same way. + */ +kern_return_t +thread_terminate( + register thread_act_t thr_act) +{ + task_t task; + kern_return_t ret; + + if (thr_act == THR_ACT_NULL) + return KERN_INVALID_ARGUMENT; + + task = thr_act->task; + if (((task == kernel_task) || (thr_act->kernel_loaded == TRUE)) + && (current_act() != thr_act)) { + return(KERN_FAILURE); + } + + /* + * Take the task lock and then call the internal routine + * that terminates a thread (it needs the task locked). + */ + task_lock(task); + ret = thread_terminate_internal(thr_act); + task_unlock(task); + + /* + * If a kernel thread is terminating itself, force an AST here. + * Kernel threads don't normally pass through the AST checking + * code - and all threads finish their own termination in the + * special handler APC. + */ + if (((thr_act->task == kernel_task) || (thr_act->kernel_loaded == TRUE)) + && (current_act() == thr_act)) { + ast_taken(FALSE, AST_APC, 0); + panic("thread_terminate(): returning from ast_taken() for %x kernel activation\n", thr_act); + } + + return ret; +} + +/* + * thread_hold: + * + * Suspend execution of the specified thread. + * This is a recursive-style suspension of the thread, a count of + * suspends is maintained. + * + * Called with thr_act locked "appropriately" for synchrony with + * RPC (see act_lock_thread()). Returns same way. + */ +void +thread_hold( + register thread_act_t thr_act) +{ + if (thr_act->suspend_count++ == 0) { + install_special_handler(thr_act); + nudge(thr_act); + } +} + +/* + * Decrement internal suspension count for thr_act, setting thread + * runnable when count falls to zero. + * + * Called with thr_act locked "appropriately" for synchrony + * with RPC (see act_lock_thread()). + */ +void +thread_release( + register thread_act_t thr_act) +{ + if( thr_act->suspend_count && + (--thr_act->suspend_count == 0) ) + nudge( thr_act ); +} + +kern_return_t +thread_suspend( + register thread_act_t thr_act) +{ + thread_t thread; + + if (thr_act == THR_ACT_NULL) { + return(KERN_INVALID_ARGUMENT); + } + thread = act_lock_thread(thr_act); + if (!thr_act->active) { + act_unlock_thread(thr_act); + return(KERN_TERMINATED); + } + if (thr_act->user_stop_count++ == 0 && + thr_act->suspend_count++ == 0 ) { + install_special_handler(thr_act); + if (thread && + thr_act == thread->top_act && thread != current_thread()) { + nudge(thr_act); + act_unlock_thread(thr_act); + (void)thread_wait(thread); + } + else { + /* + * No need to wait for target thread + */ + act_unlock_thread(thr_act); + } + } + else { + /* + * Thread is already suspended + */ + act_unlock_thread(thr_act); + } + return(KERN_SUCCESS); +} + +kern_return_t +thread_resume( + register thread_act_t thr_act) +{ + register kern_return_t ret; + spl_t s; + thread_t thread; + + if (thr_act == THR_ACT_NULL) + return(KERN_INVALID_ARGUMENT); + thread = act_lock_thread(thr_act); + ret = KERN_SUCCESS; + + if (thr_act->active) { + if (thr_act->user_stop_count > 0) { + if( --thr_act->user_stop_count == 0 ) { + --thr_act->suspend_count; + nudge( thr_act ); + } + } + else + ret = KERN_FAILURE; + } + else + ret = KERN_TERMINATED; + act_unlock_thread( thr_act ); + return ret; +} + +/* + * This routine walks toward the head of an RPC chain starting at + * a specified thread activation. An alert bit is set and a special + * handler is installed for each thread it encounters. + * + * The target thread act and thread shuttle are already locked. + */ +kern_return_t +post_alert( + register thread_act_t thr_act, + unsigned alert_bits ) +{ + thread_act_t next; + thread_t thread; + + /* + * Chase the chain, setting alert bits and installing + * special handlers for each thread act. + */ + /*** Not yet SMP safe ***/ + /*** Worse, where's the activation locking as the chain is walked? ***/ + for (next = thr_act; next != THR_ACT_NULL; next = next->higher) { + next->alerts |= alert_bits; + install_special_handler_locked(next); + } + + return(KERN_SUCCESS); +} + +/* + * thread_depress_abort: + * + * Prematurely abort priority depression if there is one. + */ +kern_return_t +thread_depress_abort( + register thread_act_t thr_act) +{ + register thread_t thread; + kern_return_t result; + sched_policy_t *policy; + spl_t s; + + if (thr_act == THR_ACT_NULL) + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(thr_act); + /* if activation is terminating, this operation is not meaningful */ + if (!thr_act->active) { + act_unlock_thread(thr_act); + + return (KERN_TERMINATED); + } + + s = splsched(); + thread_lock(thread); + policy = &sched_policy[thread->policy]; + thread_unlock(thread); + splx(s); + + result = policy->sp_ops.sp_thread_depress_abort(policy, thread); + + act_unlock_thread(thr_act); + + return (result); +} + + +/* + * Already locked: all RPC-related locks for thr_act (see + * act_lock_thread()). + */ +kern_return_t +act_abort( thread_act_t thr_act, int chain_break ) +{ + spl_t spl; + thread_t thread; + struct ipc_port *iplock = thr_act->pool_port; + thread_act_t orphan; + kern_return_t kr; + etap_data_t probe_data; + + ETAP_DATA_LOAD(probe_data[0], thr_act); + ETAP_DATA_LOAD(probe_data[1], thr_act->thread); + ETAP_PROBE_DATA(ETAP_P_ACT_ABORT, + 0, + current_thread(), + &probe_data, + ETAP_DATA_ENTRY*2); + + /* + * If the target thread activation is not the head... + */ + if ( thr_act->thread->top_act != thr_act ) { + /* + * mark the activation for abort, + * update the suspend count, + * always install the special handler + */ + install_special_handler(thr_act); + +#ifdef AGRESSIVE_ABORT + /* release state buffer for target's outstanding invocation */ + if (unwind_invoke_state(thr_act) != KERN_SUCCESS) { + panic("unwind_invoke_state failure"); + } + + /* release state buffer for target's incoming invocation */ + if (thr_act->lower != THR_ACT_NULL) { + if (unwind_invoke_state(thr_act->lower) + != KERN_SUCCESS) { + panic("unwind_invoke_state failure"); + } + } + + /* unlink target thread activation from shuttle chain */ + if ( thr_act->lower == THR_ACT_NULL ) { + /* + * This is the root thread activation of the chain. + * Unlink the root thread act from the bottom of + * the chain. + */ + thr_act->higher->lower = THR_ACT_NULL; + } else { + /* + * This thread act is in the middle of the chain. + * Unlink the thread act from the middle of the chain. + */ + thr_act->higher->lower = thr_act->lower; + thr_act->lower->higher = thr_act->higher; + + /* set the terminated bit for RPC return processing */ + thr_act->lower->alerts |= SERVER_TERMINATED; + } + + orphan = thr_act->higher; + + /* remove the activation from its thread pool */ + /* (note: this is okay for "rooted threads," too) */ + act_locked_act_set_thread_pool(thr_act, IP_NULL); + + /* (just to be thorough) release the IP lock */ + if (iplock != IP_NULL) ip_unlock(iplock); + + /* release one more reference for a rooted thread */ + if (iplock == IP_NULL) act_locked_act_deallocate(thr_act); + + /* Presumably, the only reference to this activation is + * now held by the caller of this routine. */ + assert(thr_act->ref_count == 1); +#else /*AGRESSIVE_ABORT*/ + /* If there is a lower activation in the RPC chain... */ + if (thr_act->lower != THR_ACT_NULL) { + /* ...indicate the server activation was terminated */ + thr_act->lower->alerts |= SERVER_TERMINATED; + } + /* Mark (and process) any orphaned activations */ + orphan = thr_act->higher; +#endif /*AGRESSIVE_ABORT*/ + + /* indicate client of orphaned chain has been terminated */ + orphan->alerts |= CLIENT_TERMINATED; + + /* + * Set up posting of alert to headward portion of + * the RPC chain. + */ + /*** fix me -- orphan act is not locked ***/ + post_alert(orphan, ORPHANED); + + /* + * Get attention of head of RPC chain. + */ + nudge(thr_act->thread->top_act); + return (KERN_SUCCESS); + } + + /* + * If the target thread is the end of the chain, the thread + * has to be marked for abort and rip it out of any wait. + */ + spl = splsched(); + thread_lock(thr_act->thread); + if (thr_act->thread->top_act == thr_act) { + thr_act->thread->state |= TH_ABORT; + if (thr_act->thread->state & TH_ABORT) + clear_wait_internal(thr_act->thread, THREAD_INTERRUPTED); + thread_unlock(thr_act->thread); + splx(spl); + install_special_handler(thr_act); + nudge( thr_act ); + } + return KERN_SUCCESS; +} + +kern_return_t +thread_abort( + register thread_act_t thr_act) +{ + int ret; + thread_t thread; + + if (thr_act == THR_ACT_NULL || thr_act == current_act()) + return (KERN_INVALID_ARGUMENT); + /* + * Lock the target thread and the current thread now, + * in case thread_halt() ends up being called below. + */ + thread = act_lock_thread(thr_act); + if (!thr_act->active) { + act_unlock_thread(thr_act); + return(KERN_TERMINATED); + } + + ret = act_abort( thr_act, FALSE ); + act_unlock_thread( thr_act ); + return ret; +} + +kern_return_t +thread_abort_safely( + register thread_act_t thr_act) +{ + thread_t thread; + spl_t s; + + if (thr_act == THR_ACT_NULL || thr_act == current_act()) + return(KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(thr_act); + if (!thr_act->active) { + act_unlock_thread(thr_act); + return(KERN_TERMINATED); + } + if (thread->top_act != thr_act) { + act_unlock_thread(thr_act); + return(KERN_FAILURE); + } + s = splsched(); + thread_lock(thread); + + if ( thread->at_safe_point ) { + /* + * It's an abortable wait, clear it, then + * let the thread go and return successfully. + */ + clear_wait_internal(thread, THREAD_INTERRUPTED); + thread_unlock(thread); + act_unlock_thread(thr_act); + splx(s); + return KERN_SUCCESS; + } + + /* + * if not stopped at a safepoint, just let it go and return failure. + */ + thread_unlock(thread); + act_unlock_thread(thr_act); + splx(s); + return KERN_FAILURE; +} + +/*** backward compatibility hacks ***/ +#include +#include +#include +#include + +kern_return_t +thread_info( + thread_act_t thr_act, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count) +{ + register thread_t thread; + kern_return_t result; + + if (thr_act == THR_ACT_NULL) + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(thr_act); + if (!thr_act->active) { + act_unlock_thread(thr_act); + + return (KERN_TERMINATED); + } + + result = thread_info_shuttle(thr_act, flavor, + thread_info_out, thread_info_count); + + act_unlock_thread(thr_act); + + return (result); +} + +/* + * Routine: thread_get_special_port [kernel call] + * Purpose: + * Clones a send right for one of the thread's + * special ports. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Extracted a send right. + * KERN_INVALID_ARGUMENT The thread is null. + * KERN_FAILURE The thread is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +thread_get_special_port( + thread_act_t thr_act, + int which, + ipc_port_t *portp) +{ + ipc_port_t *whichp; + ipc_port_t port; + thread_t thread; + +#if MACH_ASSERT + if (watchacts & WA_PORT) + printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n", + thr_act, which, portp, (portp ? *portp : 0)); +#endif /* MACH_ASSERT */ + + if (!thr_act) + return KERN_INVALID_ARGUMENT; + thread = act_lock_thread(thr_act); + switch (which) { + case THREAD_KERNEL_PORT: + whichp = &thr_act->ith_sself; + break; + + default: + act_unlock_thread(thr_act); + return KERN_INVALID_ARGUMENT; + } + + if (!thr_act->active) { + act_unlock_thread(thr_act); + return KERN_FAILURE; + } + + port = ipc_port_copy_send(*whichp); + act_unlock_thread(thr_act); + + *portp = port; + return KERN_SUCCESS; +} + +/* + * Routine: thread_set_special_port [kernel call] + * Purpose: + * Changes one of the thread's special ports, + * setting it to the supplied send right. + * Conditions: + * Nothing locked. If successful, consumes + * the supplied send right. + * Returns: + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The thread is null. + * KERN_FAILURE The thread is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +thread_set_special_port( + thread_act_t thr_act, + int which, + ipc_port_t port) +{ + ipc_port_t *whichp; + ipc_port_t old; + thread_t thread; + +#if MACH_ASSERT + if (watchacts & WA_PORT) + printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n", + thr_act, which, port); +#endif /* MACH_ASSERT */ + + if (thr_act == 0) + return KERN_INVALID_ARGUMENT; + + thread = act_lock_thread(thr_act); + switch (which) { + case THREAD_KERNEL_PORT: + whichp = &thr_act->ith_self; + break; + + default: + act_unlock_thread(thr_act); + return KERN_INVALID_ARGUMENT; + } + + if (!thr_act->active) { + act_unlock_thread(thr_act); + return KERN_FAILURE; + } + + old = *whichp; + *whichp = port; + act_unlock_thread(thr_act); + + if (IP_VALID(old)) + ipc_port_release_send(old); + return KERN_SUCCESS; +} + +/* + * thread state should always be accessible by locking the thread + * and copying it. The activation messes things up so for right + * now if it's not the top of the chain, use a special handler to + * get the information when the shuttle returns to the activation. + */ +kern_return_t +thread_get_state( + register thread_act_t thr_act, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + kern_return_t ret; + thread_t thread, nthread; + +#if 0 /* Grenoble - why?? */ + if (thr_act == THR_ACT_NULL || thr_act == current_act()) +#else + if (thr_act == THR_ACT_NULL) +#endif + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(thr_act); + if (!thr_act->active) { + act_unlock_thread(thr_act); + return(KERN_TERMINATED); + } + + thread_hold(thr_act); + while (1) { + if (!thread || thr_act != thread->top_act) + break; + act_unlock_thread(thr_act); + (void)thread_stop_wait(thread); + nthread = act_lock_thread(thr_act); + if (nthread == thread) + break; + thread_unstop(thread); + thread = nthread; + } + ret = act_machine_get_state(thr_act, flavor, + state, state_count); + if (thread && thr_act == thread->top_act) + thread_unstop(thread); + thread_release(thr_act); + act_unlock_thread(thr_act); + + return(ret); +} + +/* + * Change thread's machine-dependent state. Called with nothing + * locked. Returns same way. + */ +kern_return_t +thread_set_state( + register thread_act_t thr_act, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) +{ + kern_return_t ret; + thread_t thread, nthread; + +#if 0 /* Grenoble - why?? */ + if (thr_act == THR_ACT_NULL || thr_act == current_act()) +#else + if (thr_act == THR_ACT_NULL) +#endif + return (KERN_INVALID_ARGUMENT); + /* + * We have no kernel activations, so Utah's MO fails for signals etc. + * + * If we're blocked in the kernel, use non-blocking method, else + * pass locked thr_act+thread in to "normal" act_[gs]et_state(). + */ + + thread = act_lock_thread(thr_act); + if (!thr_act->active) { + act_unlock_thread(thr_act); + return(KERN_TERMINATED); + } + + thread_hold(thr_act); + while (1) { + if (!thread || thr_act != thread->top_act) + break; + act_unlock_thread(thr_act); + (void)thread_stop_wait(thread); + nthread = act_lock_thread(thr_act); + if (nthread == thread) + break; + thread_unstop(thread); + thread = nthread; + } + ret = act_machine_set_state(thr_act, flavor, + state, state_count); + if (thread && thr_act == thread->top_act) + thread_unstop(thread); + thread_release(thr_act); + act_unlock_thread(thr_act); + + return(ret); +} + +/* + * Kernel-internal "thread" interfaces used outside this file: + */ + +kern_return_t +thread_dup( + thread_act_t source_thr_act, + thread_act_t target_thr_act) +{ + kern_return_t ret; + thread_t thread, nthread; + + if (target_thr_act == THR_ACT_NULL || target_thr_act == current_act()) + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(target_thr_act); + if (!target_thr_act->active) { + act_unlock_thread(target_thr_act); + return(KERN_TERMINATED); + } + + thread_hold(target_thr_act); + while (1) { + if (!thread || target_thr_act != thread->top_act) + break; + act_unlock_thread(target_thr_act); + (void)thread_stop_wait(thread); + nthread = act_lock_thread(target_thr_act); + if (nthread == thread) + break; + thread_unstop(thread); + thread = nthread; + } + ret = act_thread_dup(source_thr_act, target_thr_act); + if (thread && target_thr_act == thread->top_act) + thread_unstop(thread); + thread_release(target_thr_act); + act_unlock_thread(target_thr_act); + + return(ret); +} + + +/* + * thread_setstatus: + * + * Set the status of the specified thread. + * Called with (and returns with) no locks held. + */ +kern_return_t +thread_setstatus( + thread_act_t thr_act, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) +{ + kern_return_t kr; + thread_t thread; + + thread = act_lock_thread(thr_act); + assert(thread); + assert(thread->top_act == thr_act); + kr = act_machine_set_state(thr_act, flavor, tstate, count); + act_unlock_thread(thr_act); + return(kr); +} + +/* + * thread_getstatus: + * + * Get the status of the specified thread. + */ +kern_return_t +thread_getstatus( + thread_act_t thr_act, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) +{ + kern_return_t kr; + thread_t thread; + + thread = act_lock_thread(thr_act); + assert(thread); + assert(thread->top_act == thr_act); + kr = act_machine_get_state(thr_act, flavor, tstate, count); + act_unlock_thread(thr_act); + return(kr); +} + +/* + * Kernel-internal thread_activation interfaces used outside this file: + */ + +/* + * act_init() - Initialize activation handling code + */ +void +act_init() +{ + thr_act_zone = zinit( + sizeof(struct thread_activation), + ACT_MAX * sizeof(struct thread_activation), /* XXX */ + ACT_CHUNK * sizeof(struct thread_activation), + "activations"); + act_machine_init(); +} + + +/* + * act_create - Create a new activation in a specific task. + */ +kern_return_t +act_create(task_t task, + thread_act_t *new_act) +{ + thread_act_t thr_act; + int rc; + vm_map_t map; + + thr_act = (thread_act_t)zalloc(thr_act_zone); + if (thr_act == 0) + return(KERN_RESOURCE_SHORTAGE); + +#if MACH_ASSERT + if (watchacts & WA_ACT_LNK) + printf("act_create(task=%x,thr_act@%x=%x)\n", + task, new_act, thr_act); +#endif /* MACH_ASSERT */ + + /* Start by zeroing everything; then init non-zero items only */ + bzero((char *)thr_act, sizeof(*thr_act)); + +#ifdef MACH_BSD + { + /* + * Take care of the uthread allocation + * do it early in order to make KERN_RESOURCE_SHORTAGE + * handling trivial + * uthread_alloc() will bzero the storage allocated. + */ + extern void *uthread_alloc(void); + thr_act->uthread = uthread_alloc(); + if(thr_act->uthread == 0) { + /* Put the thr_act back on the thr_act zone */ + zfree(thr_act_zone, (vm_offset_t)thr_act); + return(KERN_RESOURCE_SHORTAGE); + } + } +#endif /* MACH_BSD */ + + /* + * Start with one reference for the caller and one for the + * act being alive. + */ + act_lock_init(thr_act); + thr_act->ref_count = 2; + + /* Latch onto the task. */ + thr_act->task = task; + task_reference(task); + + /* Initialize sigbufp for High-Watermark buffer allocation */ + thr_act->r_sigbufp = (routine_descriptor_t) &thr_act->r_sigbuf; + thr_act->r_sigbuf_size = sizeof(thr_act->r_sigbuf); + +#if THREAD_SWAPPER + thr_act->swap_state = TH_SW_IN; +#if MACH_ASSERT + thr_act->kernel_stack_swapped_in = TRUE; +#endif /* MACH_ASSERT */ +#endif /* THREAD_SWAPPER */ + + /* special_handler will always be last on the returnhandlers list. */ + thr_act->special_handler.next = 0; + thr_act->special_handler.handler = special_handler; + +#if MACH_PROF + thr_act->act_profiled = FALSE; + thr_act->act_profiled_own = FALSE; + thr_act->profil_buffer = NULLPROFDATA; +#endif + + /* Initialize the held_ulocks queue as empty */ + queue_init(&thr_act->held_ulocks); + + /* Inherit the profiling status of the parent task */ + act_prof_init(thr_act, task); + + ipc_thr_act_init(task, thr_act); + act_machine_create(task, thr_act); + + /* + * If thr_act created in kernel-loaded task, alter its saved + * state to so indicate + */ + if (task->kernel_loaded) { + act_user_to_kernel(thr_act); + } + + /* Cache the task's map and take a reference to it */ + map = task->map; + thr_act->map = map; + + /* Inline vm_map_reference cause we don't want to increment res_count */ + mutex_lock(&map->s_lock); +#if TASK_SWAPPER + assert(map->res_count > 0); + assert(map->ref_count >= map->res_count); +#endif /* TASK_SWAPPER */ + map->ref_count++; + mutex_unlock(&map->s_lock); + + *new_act = thr_act; + return KERN_SUCCESS; +} + +/* + * act_free - called when an thr_act's ref_count drops to zero. + * + * This can only happen after the activation has been reaped, and + * all other references to it have gone away. We can now release + * the last critical resources, unlink the activation from the + * task, and release the reference on the thread shuttle itself. + * + * Called with activation locked. + */ +#if MACH_ASSERT +int dangerous_bzero = 1; /* paranoia & safety */ +#endif + +void +act_free(thread_act_t thr_act) +{ + task_t task; + thread_t thr; + vm_map_t map; + unsigned int ref; + +#if MACH_ASSERT + if (watchacts & WA_EXIT) + printf("act_free(%x(%d)) thr=%x tsk=%x(%d) pport=%x%sactive\n", + thr_act, thr_act->ref_count, thr_act->thread, + thr_act->task, + thr_act->task ? thr_act->task->ref_count : 0, + thr_act->pool_port, + thr_act->active ? " " : " !"); +#endif /* MACH_ASSERT */ + + +#if THREAD_SWAPPER + assert(thr_act->kernel_stack_swapped_in); +#endif /* THREAD_SWAPPER */ + + assert(!thr_act->active); + assert(!thr_act->pool_port); + + task = thr_act->task; + task_lock(task); + + if (thr = thr_act->thread) { + time_value_t user_time, system_time; + + thread_read_times(thr, &user_time, &system_time); + time_value_add(&task->total_user_time, &user_time); + time_value_add(&task->total_system_time, &system_time); + + /* Unlink the thr_act from the task's thr_act list, + * so it doesn't appear in calls to task_threads and such. + * The thr_act still keeps its ref on the task, however. + */ + queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts); + thr_act->thr_acts.next = NULL; + task->thr_act_count--; + +#if THREAD_SWAPPER + /* + * Thread is supposed to be unswappable by now... + */ + assert(thr_act->swap_state == TH_SW_UNSWAPPABLE || + !thread_swap_unwire_stack); +#endif /* THREAD_SWAPPER */ + + task->res_act_count--; + task_unlock(task); + task_deallocate(task); + thread_deallocate(thr); + act_machine_destroy(thr_act); + } else { + /* + * Must have never really gotten started + * no unlinking from the task and no need + * to free the shuttle. + */ + task_unlock(task); + task_deallocate(task); + } + + sigbuf_dealloc(thr_act); + act_prof_deallocate(thr_act); + ipc_thr_act_terminate(thr_act); + + /* + * Drop the cached map reference. + * Inline version of vm_map_deallocate() because we + * don't want to decrement the map's residence count here. + */ + map = thr_act->map; + mutex_lock(&map->s_lock); +#if TASK_SWAPPER + assert(map->res_count >= 0); + assert(map->ref_count > map->res_count); +#endif /* TASK_SWAPPER */ + ref = --map->ref_count; + mutex_unlock(&map->s_lock); + if (ref == 0) + vm_map_destroy(map); + +#ifdef MACH_BSD + { + /* + * Free uthread BEFORE the bzero. + * Not doing so will result in a leak. + */ + extern void uthread_free(void *); + void *ut = thr_act->uthread; + thr_act->uthread = 0; + uthread_free(ut); + } +#endif /* MACH_BSD */ + +#if MACH_ASSERT + if (dangerous_bzero) /* dangerous if we're still using it! */ + bzero((char *)thr_act, sizeof(*thr_act)); +#endif /* MACH_ASSERT */ + /* Put the thr_act back on the thr_act zone */ + zfree(thr_act_zone, (vm_offset_t)thr_act); +} + + +/* + * act_attach - Attach an thr_act to the top of a thread ("push the stack"). + * + * The thread_shuttle must be either the current one or a brand-new one. + * Assumes the thr_act is active but not in use, also, that if it is + * attached to an thread_pool (i.e. the thread_pool pointer is nonzero), + * the thr_act has already been taken off the thread_pool's list. + * + * Already locked: thr_act plus "appropriate" thread-related locks + * (see act_lock_thread()). + */ +void +act_attach( + thread_act_t thr_act, + thread_t thread, + unsigned init_alert_mask) +{ + thread_act_t lower; + +#if MACH_ASSERT + assert(thread == current_thread() || thread->top_act == THR_ACT_NULL); + if (watchacts & WA_ACT_LNK) + printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n", + thr_act, thr_act->ref_count, thread, thread->ref_count, + init_alert_mask); +#endif /* MACH_ASSERT */ + + /* + * Chain the thr_act onto the thread's thr_act stack. + * Set mask and auto-propagate alerts from below. + */ + thr_act->ref_count++; + thr_act->thread = thread; + thr_act->higher = THR_ACT_NULL; /*safety*/ + thr_act->alerts = 0; + thr_act->alert_mask = init_alert_mask; + lower = thr_act->lower = thread->top_act; + + if (lower != THR_ACT_NULL) { + lower->higher = thr_act; + thr_act->alerts = (lower->alerts & init_alert_mask); + } + + thread->top_act = thr_act; +} + +/* + * act_detach + * + * Remove the current thr_act from the top of the current thread, i.e. + * "pop the stack". Assumes already locked: thr_act plus "appropriate" + * thread-related locks (see act_lock_thread). + */ +void +act_detach( + thread_act_t cur_act) +{ + thread_t cur_thread = cur_act->thread; + +#if MACH_ASSERT + if (watchacts & (WA_EXIT|WA_ACT_LNK)) + printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n", + cur_act, cur_act->ref_count, + cur_thread, cur_thread->ref_count, + cur_act->task, + cur_act->task ? cur_act->task->ref_count : 0); +#endif /* MACH_ASSERT */ + + /* Unlink the thr_act from the thread's thr_act stack */ + cur_thread->top_act = cur_act->lower; + cur_act->thread = 0; + cur_act->ref_count--; + assert(cur_act->ref_count > 0); + + thread_pool_put_act(cur_act); + +#if MACH_ASSERT + cur_act->lower = cur_act->higher = THR_ACT_NULL; + if (cur_thread->top_act) + cur_thread->top_act->higher = THR_ACT_NULL; +#endif /* MACH_ASSERT */ + + return; +} + + +/* + * Synchronize a thread operation with RPC. Called with nothing + * locked. Returns with thr_act locked, plus one of four + * combinations of other locks held: + * none - for new activation not yet associated with thread_pool + * or shuttle + * rpc_lock(thr_act->thread) only - for base activation (one + * without pool_port) + * ip_lock(thr_act->pool_port) only - for empty activation (one + * with no associated shuttle) + * both locks - for "active" activation (has shuttle, lives + * on thread_pool) + * If thr_act has an associated shuttle, this function returns + * its address. Otherwise it returns zero. + */ +thread_t +act_lock_thread( + thread_act_t thr_act) +{ + ipc_port_t pport; + + /* + * Allow the shuttle cloning code (q.v., when it + * exists :-}) to obtain ip_lock()'s while holding + * an rpc_lock(). + */ + while (1) { + act_lock(thr_act); + pport = thr_act->pool_port; + if (!pport || ip_lock_try(pport)) { + if (!thr_act->thread) + break; + if (rpc_lock_try(thr_act->thread)) + break; + if (pport) + ip_unlock(pport); + } + act_unlock(thr_act); + mutex_pause(); + } + return (thr_act->thread); +} + +/* + * Unsynchronize with RPC (i.e., undo an act_lock_thread() call). + * Called with thr_act locked, plus thread locks held that are + * "correct" for thr_act's state. Returns with nothing locked. + */ +void +act_unlock_thread(thread_act_t thr_act) +{ + if (thr_act->thread) + rpc_unlock(thr_act->thread); + if (thr_act->pool_port) + ip_unlock(thr_act->pool_port); + act_unlock(thr_act); +} + +/* + * Synchronize with RPC given a pointer to a shuttle (instead of an + * activation). Called with nothing locked; returns with all + * "appropriate" thread-related locks held (see act_lock_thread()). + */ +thread_act_t +thread_lock_act( + thread_t thread) +{ + thread_act_t thr_act; + + while (1) { + rpc_lock(thread); + thr_act = thread->top_act; + if (!thr_act) + break; + if (!act_lock_try(thr_act)) { + rpc_unlock(thread); + mutex_pause(); + continue; + } + if (thr_act->pool_port && + !ip_lock_try(thr_act->pool_port)) { + rpc_unlock(thread); + act_unlock(thr_act); + mutex_pause(); + continue; + } + break; + } + return (thr_act); +} + +/* + * Unsynchronize with RPC starting from a pointer to a shuttle. + * Called with RPC-related locks held that are appropriate to + * shuttle's state; any activation is also locked. + */ +void +thread_unlock_act( + thread_t thread) +{ + thread_act_t thr_act; + + if (thr_act = thread->top_act) { + if (thr_act->pool_port) + ip_unlock(thr_act->pool_port); + act_unlock(thr_act); + } + rpc_unlock(thread); +} + +/* + * switch_act + * + * If a new activation is given, switch to it. If not, + * switch to the lower activation (pop). Returns the old + * activation. This is for RPC support. + */ +thread_act_t +switch_act( + thread_act_t act) +{ + thread_t thread; + thread_act_t old, new; + unsigned cpu; + spl_t spl; + + + disable_preemption(); + + cpu = cpu_number(); + thread = current_thread(); + + /* + * Find the old and new activation for switch. + */ + old = thread->top_act; + + if (act) { + new = act; + new->thread = thread; + } + else { + new = old->lower; + } + + assert(new != THR_ACT_NULL); +#if THREAD_SWAPPER + assert(new->swap_state != TH_SW_OUT && + new->swap_state != TH_SW_COMING_IN); +#endif /* THREAD_SWAPPER */ + + assert(cpu_data[cpu].active_thread == thread); + active_kloaded[cpu] = (new->kernel_loaded) ? new : 0; + + /* This is where all the work happens */ + machine_switch_act(thread, old, new, cpu); + + /* + * Push or pop an activation on the chain. + */ + if (act) { + act_attach(new, thread, 0); + } + else { + act_detach(old); + } + + enable_preemption(); + + return(old); +} + +/* + * install_special_handler + * Install the special returnhandler that handles suspension and + * termination, if it hasn't been installed already. + * + * Already locked: RPC-related locks for thr_act, but not + * scheduling lock (thread_lock()) of the associated thread. + */ +void +install_special_handler( + thread_act_t thr_act) +{ + spl_t spl; + thread_t thread = thr_act->thread; + +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act); +#endif /* MACH_ASSERT */ + + spl = splsched(); + if (thread) + thread_lock(thread); + install_special_handler_locked(thr_act); + act_set_apc(thr_act); + if (thread) + thread_unlock(thread); + splx(spl); +} + +/* + * install_special_handler_locked + * Do the work of installing the special_handler. + * + * Already locked: RPC-related locks for thr_act, plus the + * scheduling lock (thread_lock()) of the associated thread. + */ +void +install_special_handler_locked( + thread_act_t thr_act) +{ + ReturnHandler **rh; + thread_t thread = thr_act->thread; + + /* The work handler must always be the last ReturnHandler on the list, + because it can do tricky things like detach the thr_act. */ + for (rh = &thr_act->handlers; *rh; rh = &(*rh)->next) + /* */ ; + if (rh != &thr_act->special_handler.next) { + *rh = &thr_act->special_handler; + } + if (thread && thr_act == thread->top_act) { + /* + * Temporarily undepress, so target has + * a chance to do locking required to + * block itself in special_handler(). + */ + if (thread->depress_priority >= 0) { + thread->priority = thread->depress_priority; + + /* + * Use special value -2 to indicate need + * to redepress priority in special_handler + * as thread blocks + */ + thread->depress_priority = -2; + compute_priority(thread, FALSE); + } + } + act_set_apc(thr_act); +} + +/* + * JMM - + * These two routines will be enhanced over time to call the general handler registration + * mechanism used by special handlers and alerts. They are hack in for now to avoid + * having to export the gory details of ASTs to the BSD code right now. + */ +extern thread_apc_handler_t bsd_ast; + +kern_return_t +thread_apc_set( + thread_act_t thr_act, + thread_apc_handler_t apc) +{ + assert(apc == bsd_ast); + thread_ast_set(thr_act, AST_BSD); + if (thr_act == current_act()) + ast_propagate(thr_act->ast); + return KERN_SUCCESS; +} + +kern_return_t +thread_apc_clear( + thread_act_t thr_act, + thread_apc_handler_t apc) +{ + assert(apc == bsd_ast); + thread_ast_clear(thr_act, AST_BSD); + if (thr_act == current_act()) + ast_off(AST_BSD); + return KERN_SUCCESS; +} + +/* + * act_set_thread_pool - Assign an activation to a specific thread_pool. + * Fails if the activation is already assigned to another pool. + * If thread_pool == 0, we remove the thr_act from its thread_pool. + * + * Called the port containing thread_pool already locked. + * Returns the same way. + */ +kern_return_t act_set_thread_pool( + thread_act_t thr_act, + ipc_port_t pool_port) +{ + thread_pool_t thread_pool; + +#if MACH_ASSERT + if (watchacts & WA_ACT_LNK) + printf("act_set_thread_pool: %x(%d) -> %x\n", + thr_act, thr_act->ref_count, thread_pool); +#endif /* MACH_ASSERT */ + + if (pool_port == 0) { + thread_act_t *lact; + + if (thr_act->pool_port == 0) + return KERN_SUCCESS; + thread_pool = &thr_act->pool_port->ip_thread_pool; + + for (lact = &thread_pool->thr_acts; *lact; + lact = &((*lact)->thread_pool_next)) { + if (thr_act == *lact) { + *lact = thr_act->thread_pool_next; + break; + } + } + act_lock(thr_act); + thr_act->pool_port = 0; + thr_act->thread_pool_next = 0; + act_unlock(thr_act); + act_deallocate(thr_act); + return KERN_SUCCESS; + } + if (thr_act->pool_port != pool_port) { + thread_pool = &pool_port->ip_thread_pool; + if (thr_act->pool_port != 0) { +#if MACH_ASSERT + if (watchacts & WA_ACT_LNK) + printf("act_set_thread_pool found %x!\n", + thr_act->pool_port); +#endif /* MACH_ASSERT */ + return(KERN_FAILURE); + } + act_lock(thr_act); + thr_act->pool_port = pool_port; + + /* The pool gets a ref to the activation -- have + * to inline operation because thr_act is already + * locked. + */ + act_locked_act_reference(thr_act); + + /* If it is available, + * add it to the thread_pool's available-activation list. + */ + if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) { + thr_act->thread_pool_next = thread_pool->thr_acts; + pool_port->ip_thread_pool.thr_acts = thr_act; + if (thread_pool->waiting) + thread_pool_wakeup(thread_pool); + } + act_unlock(thr_act); + } + + return KERN_SUCCESS; +} + +/* + * act_locked_act_set_thread_pool- Assign activation to a specific thread_pool. + * Fails if the activation is already assigned to another pool. + * If thread_pool == 0, we remove the thr_act from its thread_pool. + * + * Called the port containing thread_pool already locked. + * Also called with the thread activation locked. + * Returns the same way. + * + * This routine is the same as `act_set_thread_pool()' except that it does + * not call `act_deallocate(),' which unconditionally tries to obtain the + * thread activation lock. + */ +kern_return_t act_locked_act_set_thread_pool( + thread_act_t thr_act, + ipc_port_t pool_port) +{ + thread_pool_t thread_pool; + +#if MACH_ASSERT + if (watchacts & WA_ACT_LNK) + printf("act_set_thread_pool: %x(%d) -> %x\n", + thr_act, thr_act->ref_count, thread_pool); +#endif /* MACH_ASSERT */ + + if (pool_port == 0) { + thread_act_t *lact; + + if (thr_act->pool_port == 0) + return KERN_SUCCESS; + thread_pool = &thr_act->pool_port->ip_thread_pool; + + for (lact = &thread_pool->thr_acts; *lact; + lact = &((*lact)->thread_pool_next)) { + if (thr_act == *lact) { + *lact = thr_act->thread_pool_next; + break; + } + } + + thr_act->pool_port = 0; + thr_act->thread_pool_next = 0; + act_locked_act_deallocate(thr_act); + return KERN_SUCCESS; + } + if (thr_act->pool_port != pool_port) { + thread_pool = &pool_port->ip_thread_pool; + if (thr_act->pool_port != 0) { +#if MACH_ASSERT + if (watchacts & WA_ACT_LNK) + printf("act_set_thread_pool found %x!\n", + thr_act->pool_port); +#endif /* MACH_ASSERT */ + return(KERN_FAILURE); + } + thr_act->pool_port = pool_port; + + /* The pool gets a ref to the activation -- have + * to inline operation because thr_act is already + * locked. + */ + act_locked_act_reference(thr_act); + + /* If it is available, + * add it to the thread_pool's available-activation list. + */ + if ((thr_act->thread == 0) && (thr_act->suspend_count == 0)) { + thr_act->thread_pool_next = thread_pool->thr_acts; + pool_port->ip_thread_pool.thr_acts = thr_act; + if (thread_pool->waiting) + thread_pool_wakeup(thread_pool); + } + } + + return KERN_SUCCESS; +} + +/* + * Activation control support routines internal to this file: + */ + +/* + * act_execute_returnhandlers() - does just what the name says + * + * This is called by system-dependent code when it detects that + * thr_act->handlers is non-null while returning into user mode. + * Activations linked onto an thread_pool always have null thr_act->handlers, + * so RPC entry paths need not check it. + */ +void act_execute_returnhandlers( + void) +{ + spl_t s; + thread_t thread; + thread_act_t thr_act = current_act(); + +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("execute_rtn_hdlrs: thr_act=%x\n", thr_act); +#endif /* MACH_ASSERT */ + + s = splsched(); + act_clr_apc(thr_act); + spllo(); + while (1) { + ReturnHandler *rh; + + /* Grab the next returnhandler */ + thread = act_lock_thread(thr_act); + (void)splsched(); + thread_lock(thread); + rh = thr_act->handlers; + if (!rh) { + thread_unlock(thread); + splx(s); + act_unlock_thread(thr_act); + return; + } + thr_act->handlers = rh->next; + thread_unlock(thread); + spllo(); + act_unlock_thread(thr_act); + +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf( (rh == &thr_act->special_handler) ? + "\tspecial_handler\n" : "\thandler=%x\n", + rh->handler); +#endif /* MACH_ASSERT */ + + /* Execute it */ + (*rh->handler)(rh, thr_act); + } +} + +/* + * special_handler_continue + * + * Continuation routine for the special handler blocks. It checks + * to see whether there has been any new suspensions. If so, it + * installs the special handler again. Otherwise, it checks to see + * if the current depression needs to be re-instated (it may have + * been temporarily removed in order to get to this point in a hurry). + */ +void +special_handler_continue(void) +{ + thread_act_t cur_act = current_act(); + thread_t thread = cur_act->thread; + spl_t s; + + if (cur_act->suspend_count) + install_special_handler(cur_act); + else { + s = splsched(); + thread_lock(thread); + if (thread->depress_priority == -2) { + /* + * We were temporarily undepressed by + * install_special_handler; restore priority + * depression. + */ + thread->depress_priority = thread->priority; + thread->priority = thread->sched_pri = DEPRESSPRI; + } + thread_unlock(thread); + splx(s); + } + thread_exception_return(); +} + +/* + * special_handler - handles suspension, termination. Called + * with nothing locked. Returns (if it returns) the same way. + */ +void +special_handler( + ReturnHandler *rh, + thread_act_t cur_act) +{ + spl_t s; + thread_t lthread; + thread_t thread = act_lock_thread(cur_act); + unsigned alert_bits; + exception_data_type_t + codes[EXCEPTION_CODE_MAX]; + kern_return_t kr; + kern_return_t exc_kr; + + assert(thread != THREAD_NULL); +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("\t\tspecial_handler(thr_act=%x(%d))\n", cur_act, + (cur_act ? cur_act->ref_count : 0)); +#endif /* MACH_ASSERT */ + + s = splsched(); + + thread_lock(thread); + thread->state &= ~TH_ABORT; /* clear any aborts */ + thread_unlock(thread); + splx(s); + + /* + * If someone has killed this invocation, + * invoke the return path with a terminated exception. + */ + if (!cur_act->active) { + act_unlock_thread(cur_act); + act_machine_return(KERN_TERMINATED); + } + +#ifdef CALLOUT_RPC_MODEL + /* + * JMM - We don't intend to support this RPC model in Darwin. + * We will support inheritance through chains of activations + * on shuttles, but it will be universal and not just for RPC. + * As such, each activation will always have a base shuttle. + * Our RPC model will probably even support the notion of + * alerts (thrown up the chain of activations to affect the + * work done on our behalf), but the unlinking of the shuttles + * will be completely difference because we will never have + * to clone them. + */ + + /* strip server terminated bit */ + alert_bits = cur_act->alerts & (~SERVER_TERMINATED); + + /* clear server terminated bit */ + cur_act->alerts &= ~SERVER_TERMINATED; + + if ( alert_bits ) { + /* + * currently necessary to coordinate with the exception + * code -fdr + */ + act_unlock_thread(cur_act); + + /* upcall exception/alert port */ + codes[0] = alert_bits; + + /* + * Exception makes a lot of assumptions. If there is no + * exception handler or the exception reply is broken, the + * thread will be terminated and exception will not return. If + * we decide we don't like that behavior, we need to check + * for the existence of an exception port before we call + * exception. + */ + exc_kr = exception( EXC_RPC_ALERT, codes, 1 ); + + /* clear the orphaned and time constraint indications */ + cur_act->alerts &= ~(ORPHANED | TIME_CONSTRAINT_UNSATISFIED); + + /* if this orphaned activation should be terminated... */ + if (exc_kr == KERN_RPC_TERMINATE_ORPHAN) { + /* + * ... terminate the activation + * + * This is done in two steps. First, the activation is + * disabled (prepared for termination); second, the + * `special_handler()' is executed again -- this time + * to terminate the activation. + * (`act_disable_task_locked()' arranges for the + * additional execution of the `special_handler().') + */ + +#if THREAD_SWAPPER + thread_swap_disable(cur_act); +#endif /* THREAD_SWAPPER */ + + /* acquire appropriate locks */ + task_lock(cur_act->task); + act_lock_thread(cur_act); + + /* detach the activation from its task */ + kr = act_disable_task_locked(cur_act); + assert( kr == KERN_SUCCESS ); + + /* release locks */ + task_unlock(cur_act->task); + } + else { + /* acquire activation lock again (released below) */ + act_lock_thread(cur_act); + s = splsched(); + thread_lock(thread); + if (thread->depress_priority == -2) { + /* + * We were temporarily undepressed by + * install_special_handler; restore priority + * depression. + */ + thread->depress_priority = thread->priority; + thread->priority = thread->sched_pri = DEPRESSPRI; + } + thread_unlock(thread); + splx(s); + } + } +#endif /* CALLOUT_RPC_MODEL */ + + /* + * If we're suspended, go to sleep and wait for someone to wake us up. + */ + if (cur_act->suspend_count) { + if( cur_act->handlers == NULL ) { + assert_wait((event_t)&cur_act->suspend_count, + THREAD_ABORTSAFE); + act_unlock_thread(cur_act); + thread_block(special_handler_continue); + /* NOTREACHED */ + } + special_handler_continue(); + } + + act_unlock_thread(cur_act); +} + +/* + * Try to nudge a thr_act into executing its returnhandler chain. + * Ensures that the activation will execute its returnhandlers + * before it next executes any of its user-level code. + * + * Called with thr_act's act_lock() and "appropriate" thread-related + * locks held. (See act_lock_thread().) Returns same way. + */ +void +nudge(thread_act_t thr_act) +{ +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("\tact_%x: nudge(%x)\n", current_act(), thr_act); +#endif /* MACH_ASSERT */ + + /* + * Don't need to do anything at all if this thr_act isn't the topmost. + */ + if (thr_act->thread && thr_act->thread->top_act == thr_act) { + /* + * If it's suspended, wake it up. + * This should nudge it even on another CPU. + */ + thread_wakeup((event_t)&thr_act->suspend_count); + } +} + +/* + * Update activation that belongs to a task created via kernel_task_create(). + */ +void +act_user_to_kernel( + thread_act_t thr_act) +{ + pcb_user_to_kernel(thr_act); + thr_act->kernel_loading = TRUE; +} + +/* + * Already locked: thr_act->task, RPC-related locks for thr_act + * + * Detach an activation from its task, and prepare it to terminate + * itself. + */ +kern_return_t +act_disable_task_locked( + thread_act_t thr_act) +{ + thread_t thread = thr_act->thread; + task_t task = thr_act->task; + +#if MACH_ASSERT + if (watchacts & WA_EXIT) { + printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive task=%x(%d)", + current_act(), thr_act, thr_act->ref_count, + (thr_act->active ? " " : " !"), + thr_act->task, thr_act->task? thr_act->task->ref_count : 0); + if (thr_act->pool_port) + printf(", pool_port %x", thr_act->pool_port); + printf("\n"); + (void) dump_act(thr_act); + } +#endif /* MACH_ASSERT */ + + /* This will allow no more control ops on this thr_act. */ + thr_act->active = 0; + ipc_thr_act_disable(thr_act); + + /* Clean-up any ulocks that are still owned by the thread + * activation (acquired but not released or handed-off). + */ + act_ulock_release_all(thr_act); + + /* When the special_handler gets executed, + * it will see the terminated condition and exit + * immediately. + */ + install_special_handler(thr_act); + + + /* If the target happens to be suspended, + * give it a nudge so it can exit. + */ + if (thr_act->suspend_count) + nudge(thr_act); + + /* Drop the thr_act reference taken for being active. + * (There is still at least one reference left: + * the one we were passed.) + * Inline the deallocate because thr_act is locked. + */ + act_locked_act_deallocate(thr_act); + + return(KERN_SUCCESS); +} + +/* + * act_alert - Register an alert from this activation. + * + * Each set bit is propagated upward from (but not including) this activation, + * until the top of the chain is reached or the bit is masked. + */ +kern_return_t +act_alert(thread_act_t thr_act, unsigned alerts) +{ + thread_t thread = act_lock_thread(thr_act); + +#if MACH_ASSERT + if (watchacts & WA_ACT_LNK) + printf("act_alert %x: %x\n", thr_act, alerts); +#endif /* MACH_ASSERT */ + + if (thread) { + thread_act_t act_up = thr_act; + while ((alerts) && (act_up != thread->top_act)) { + act_up = act_up->higher; + alerts &= act_up->alert_mask; + act_up->alerts |= alerts; + } + /* + * XXXX If we reach the top, and it is blocked in glue + * code, do something to kick it. XXXX + */ + } + act_unlock_thread(thr_act); + + return KERN_SUCCESS; +} + +kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask) +{ + panic("act_alert_mask NOT YET IMPLEMENTED\n"); + return KERN_SUCCESS; +} + +typedef struct GetSetState { + struct ReturnHandler rh; + int flavor; + void *state; + int *pcount; + int result; +} GetSetState; + +/* Local Forward decls */ +kern_return_t get_set_state( + thread_act_t thr_act, int flavor, + thread_state_t state, int *pcount, + void (*handler)(ReturnHandler *rh, thread_act_t thr_act)); +void get_state_handler(ReturnHandler *rh, thread_act_t thr_act); +void set_state_handler(ReturnHandler *rh, thread_act_t thr_act); + +/* + * get_set_state(thr_act ...) + * + * General code to install g/set_state handler. + * Called with thr_act's act_lock() and "appropriate" + * thread-related locks held. (See act_lock_thread().) + */ +kern_return_t +get_set_state(thread_act_t thr_act, int flavor, thread_state_t state, int *pcount, + void (*handler)(ReturnHandler *rh, thread_act_t thr_act)) +{ + GetSetState gss; + spl_t s; + + /* Initialize a small parameter structure */ + gss.rh.handler = handler; + gss.flavor = flavor; + gss.state = state; + gss.pcount = pcount; + gss.result = KERN_ABORTED; /* iff wait below is interrupted */ + + /* Add it to the thr_act's return handler list */ + gss.rh.next = thr_act->handlers; + thr_act->handlers = &gss.rh; + + s = splsched(); + act_set_apc(thr_act); + splx(s); + +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) { + printf("act_%x: get_set_state(thr_act=%x flv=%x state=%x ptr@%x=%x)", + current_act(), thr_act, flavor, state, + pcount, (pcount ? *pcount : 0)); + printf((handler == get_state_handler ? "get_state_hdlr\n" : + (handler == set_state_handler ? "set_state_hdlr\n" : + "hndler=%x\n")), handler); + } +#endif /* MACH_ASSERT */ + + assert(thr_act->thread); /* Callers must ensure these */ + assert(thr_act != current_act()); + for (;;) { + nudge(thr_act); + /* + * Wait must be interruptible to avoid deadlock (e.g.) with + * task_suspend() when caller and target of get_set_state() + * are in same task. + */ + assert_wait((event_t)&gss, THREAD_ABORTSAFE); + act_unlock_thread(thr_act); + thread_block((void (*)(void))0); + if (gss.result != KERN_ABORTED) + break; + if (current_act()->handlers) + act_execute_returnhandlers(); + act_lock_thread(thr_act); + } + +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("act_%x: get_set_state returns %x\n", + current_act(), gss.result); +#endif /* MACH_ASSERT */ + + return gss.result; +} + +void +set_state_handler(ReturnHandler *rh, thread_act_t thr_act) +{ + GetSetState *gss = (GetSetState*)rh; + +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n", + current_act(), rh, thr_act); +#endif /* MACH_ASSERT */ + + gss->result = act_machine_set_state(thr_act, gss->flavor, + gss->state, *gss->pcount); + thread_wakeup((event_t)gss); +} + +void +get_state_handler(ReturnHandler *rh, thread_act_t thr_act) +{ + GetSetState *gss = (GetSetState*)rh; + +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n", + current_act(), rh, thr_act); +#endif /* MACH_ASSERT */ + + gss->result = act_machine_get_state(thr_act, gss->flavor, + gss->state, + (mach_msg_type_number_t *) gss->pcount); + thread_wakeup((event_t)gss); +} + +kern_return_t +act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state, + mach_msg_type_number_t *pcount) +{ +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n", + current_act(), thr_act, flavor, state, pcount, + (pcount? *pcount : 0)); +#endif /* MACH_ASSERT */ + + return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler)); +} + +kern_return_t +act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state, + mach_msg_type_number_t count) +{ +#if MACH_ASSERT + if (watchacts & WA_ACT_HDLR) + printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n", + current_act(), thr_act, flavor, state, count, count); +#endif /* MACH_ASSERT */ + + return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler)); +} + +kern_return_t +act_set_state(thread_act_t thr_act, int flavor, thread_state_t state, + mach_msg_type_number_t count) +{ + if (thr_act == THR_ACT_NULL || thr_act == current_act()) + return(KERN_INVALID_ARGUMENT); + + act_lock_thread(thr_act); + return(act_set_state_locked(thr_act, flavor, state, count)); + +} + +kern_return_t +act_get_state(thread_act_t thr_act, int flavor, thread_state_t state, + mach_msg_type_number_t *pcount) +{ + if (thr_act == THR_ACT_NULL || thr_act == current_act()) + return(KERN_INVALID_ARGUMENT); + + act_lock_thread(thr_act); + return(act_get_state_locked(thr_act, flavor, state, pcount)); +} + +/* + * These two should be called at splsched() + * Set/clear indicator to run APC (layered on ASTs) + */ +void +act_set_apc(thread_act_t thr_act) +{ + thread_ast_set(thr_act, AST_APC); + if (thr_act == current_act()) { + mp_disable_preemption(); + ast_propagate(thr_act->ast); + mp_enable_preemption(); + } +} + +void +act_clr_apc(thread_act_t thr_act) +{ + thread_ast_clear(thr_act, AST_APC); +} + +void +act_ulock_release_all(thread_act_t thr_act) +{ + ulock_t ulock; + + while (!queue_empty(&thr_act->held_ulocks)) { + ulock = (ulock_t) queue_first(&thr_act->held_ulocks); + (void) lock_make_unstable(ulock, thr_act); + (void) lock_release_internal(ulock, thr_act); + } +} + +/* + * Provide routines (for export to other components) of things that + * are implemented as macros insternally. + */ +#undef current_act +thread_act_t +current_act(void) +{ + return(current_act_fast()); +} + +thread_act_t +thread_self(void) +{ + thread_act_t self = current_act_fast(); + + act_reference(self); + return self; +} + +thread_act_t +mach_thread_self(void) +{ + thread_act_t self = current_act_fast(); + + act_reference(self); + return self; +} + +#undef act_reference +void +act_reference( + thread_act_t thr_act) +{ + act_reference_fast(thr_act); +} + +#undef act_deallocate +void +act_deallocate( + thread_act_t thr_act) +{ + act_deallocate_fast(thr_act); +} + diff --git a/osfmk/kern/thread_act.h b/osfmk/kern/thread_act.h new file mode 100644 index 000000000..40fe85fcf --- /dev/null +++ b/osfmk/kern/thread_act.h @@ -0,0 +1,514 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + * + * File: thread_act.h + * + * thread activation definitions + */ +#ifndef _KERN_THREAD_ACT_H_ +#define _KERN_THREAD_ACT_H_ + +#include +#include +#include +#include +#include + + +#ifdef MACH_KERNEL_PRIVATE +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Here is a description of the states an thread_activation may be in. + * + * An activation always has a valid task pointer, and it is always constant. + * The activation is only linked onto the task's activation list until + * the activation is terminated. + * + * An activation is in use or not, depending on whether its thread + * pointer is nonzero. If it is not in use, it is just sitting idly + * waiting to be used by a thread. The thread holds a reference on + * the activation while using it. + * + * An activation lives on an thread_pool if its pool_port pointer is nonzero. + * When in use, it can still live on an thread_pool, but it is not actually + * linked onto the thread_pool's list of available activations. In this case, + * the act will return to its thread_pool as soon as it becomes unused. + * + * An activation is active until thread_terminate is called on it; + * then it is inactive, waiting for all references to be dropped. + * Future control operations on the terminated activation will fail, + * with the exception that act_yank still works if the activation is + * still on an RPC chain. A terminated activation always has null + * thread and pool_port pointers. + * + * An activation is suspended when suspend_count > 0. + * A suspended activation can live on an thread_pool, but it is not + * actually linked onto the thread_pool while suspended. + * + * Locking note: access to data relevant to scheduling state (user_stop_count, + * suspend_count, handlers, special_handler) is controlled by the combination + * of locks acquired by act_lock_thread(). That is, not only must act_lock() + * be held, but RPC through the activation must be frozen (so that the + * thread pointer doesn't change). If a shuttle is associated with the + * activation, then its thread_lock() must also be acquired to change these + * data. Regardless of whether a shuttle is present, the data must be + * altered at splsched(). + */ + +typedef struct ReturnHandler { + struct ReturnHandler *next; + void (*handler)(struct ReturnHandler *rh, + struct thread_activation *thr_act); +} ReturnHandler; + +typedef struct thread_activation { + + /*** task linkage ***/ + + /* Links for task's circular list of activations. The activation + * is only on the task's activation list while active. Must be + * first. + */ + queue_chain_t thr_acts; + + /* Indicators for whether this activation is in the midst of + * resuming or has already been resumed in a kernel-loaded + * task -- these flags are basically for quick access to + * this information. + */ + boolean_t kernel_loaded; /* running in kernel-loaded task */ + boolean_t kernel_loading; /* about to run kernel-loaded */ + + /*** Machine-dependent state ***/ + struct MachineThrAct mact; + + /*** Consistency ***/ + decl_mutex_data(,lock) + decl_simple_lock_data(,sched_lock) + int ref_count; + + /* Reference to the task this activation is in. + * Constant for the life of the activation + */ + struct task *task; + vm_map_t map; /* cached current map */ + + /*** thread_pool-related stuff ***/ + /* Port containing the thread_pool this activation normally lives + * on, zero if none. The port (really the thread_pool) holds a + * reference to the activation as long as this is nonzero (even when + * the activation isn't actually on the thread_pool's list). + */ + struct ipc_port *pool_port; + + /* Link on the thread_pool's list of activations. + * The activation is only actually on the thread_pool's list + * (and hence this is valid) when not in use (thread == 0) and + * not suspended (suspend_count == 0). + */ + struct thread_activation *thread_pool_next; + + /* RPC state */ + union { + struct { + rpc_subsystem_t r_subsystem; +#if 0 /* Grenoble */ + mach_rpc_id_t r_routine_num; + mach_rpc_signature_t r_sig_ptr; + mach_rpc_size_t r_sig_size; +#else + rpc_id_t r_routine_num; + rpc_signature_t r_sig_ptr; /* Stored Client Sig Ptr */ + rpc_size_t r_sig_size; /* Size of Sig stored */ + struct rpc_signature r_sigbuf; /* Static Reservation of Sig Mem */ + routine_descriptor_t r_sigbufp; /* For dynamic storage of Sig */ + vm_size_t r_sigbuf_size; /* Size of buffer allocated for sig */ +#endif + vm_offset_t r_new_argv; + vm_offset_t *r_arg_buf; + vm_offset_t r_arg_buf_data[RPC_KBUF_SIZE]; + rpc_copy_state_t r_state; + rpc_copy_state_data_t r_state_data[RPC_DESC_COUNT]; + unsigned int r_port_flags; + ipc_port_t r_local_port; + void *r_kkt_args; + } regular; + struct { + ipc_port_t r_port; + ipc_port_t r_exc_port; + int r_exc_flavor; + mach_msg_type_number_t r_ostate_cnt; + exception_data_type_t r_code[EXCEPTION_CODE_MAX]; +#if ETAP_EVENT_MONITOR + exception_type_t r_exception; +#endif + } exception; + } rpc_state; + + /*** Thread linkage ***/ + /* Shuttle using this activation, zero if not in use. The shuttle + * holds a reference on the activation while this is nonzero. + */ + struct thread_shuttle *thread; + + /* The rest in this section is only valid when thread is nonzero. */ + + /* Next higher and next lower activation on the thread's activation + * stack. For a topmost activation or the null_act, higher is + * undefined. The bottommost activation is always the null_act. + */ + struct thread_activation *higher, *lower; + + /* Alert bits pending at this activation; some of them may have + * propagated from lower activations. + */ + unsigned alerts; + + /* Mask of alert bits to be allowed to pass through from lower levels. + */ + unsigned alert_mask; + +#if 0 /* Grenoble */ + /* Saved policy and priority of shuttle if changed to migrate into + * higher-priority or more real-time task. Only valid if + * saved_sched_stamp is nonzero and equal to the sched_change_stamp + * in the thread_shuttle. (Otherwise, the policy or priority has + * been explicitly changed in the meantime, and the saved values + * are invalid.) + */ + policy_t saved_policy; + integer_t saved_base_priority; + unsigned int saved_sched_change_stamp; +#endif + /*** Control information ***/ + + /* Number of outstanding suspensions on this activation. */ + int suspend_count; + + /* User-visible scheduling state */ + int user_stop_count; /* outstanding stops */ + + /* ast is needed - see ast.h */ + int ast; + +#if THREAD_SWAPPER + /* task swapper */ + int swap_state; /* swap state (or unswappable flag)*/ + queue_chain_t swap_queue; /* links on swap queues */ +#if MACH_ASSERT + boolean_t kernel_stack_swapped_in; + /* debug for thread swapping */ +#endif /* MACH_ASSERT */ +#endif /* THREAD_SWAPPER */ + + /* This is normally true, but is set to false when the + * activation is terminated. + */ + int active; + + /* Chain of return handlers to be called before the thread is + * allowed to return to this invocation + */ + ReturnHandler *handlers; + + /* A special ReturnHandler attached to the above chain to + * handle suspension and such + */ + ReturnHandler special_handler; + + /* Special ports attached to this activation */ + struct ipc_port *ith_self; /* not a right, doesn't hold ref */ + struct ipc_port *ith_sself; /* a send right */ + struct exception_action exc_actions[EXC_TYPES_COUNT]; + + /* A list of ulocks (a lock set element) currently held by the thread + */ + queue_head_t held_ulocks; + +#if MACH_PROF + /* Profiling data structures */ + boolean_t act_profiled; /* is activation being profiled? */ + boolean_t act_profiled_own; + /* is activation being profiled + * on its own ? */ + struct prof_data *profil_buffer;/* prof struct if either is so */ +#endif /* MACH_PROF */ + +#ifdef MACH_BSD + void *uthread; +#endif + +} Thread_Activation; + +/* RPC state fields */ +#define r_subsystem rpc_state.regular.r_subsystem +#define r_routine_num rpc_state.regular.r_routine_num +#define r_sig_ptr rpc_state.regular.r_sig_ptr +#define r_sig_size rpc_state.regular.r_sig_size +#define r_sigbuf rpc_state.regular.r_sigbuf +#define r_sigbufp rpc_state.regular.r_sigbufp +#define r_sigbuf_size rpc_state.regular.r_sigbuf_size +#define r_new_argv rpc_state.regular.r_new_argv +#define r_arg_buf rpc_state.regular.r_arg_buf +#define r_arg_buf_data rpc_state.regular.r_arg_buf_data +#define r_state rpc_state.regular.r_state +#define r_state_data rpc_state.regular.r_state_data +#define r_port_flags rpc_state.regular.r_port_flags +#define r_local_port rpc_state.regular.r_local_port +#define r_kkt_args rpc_state.regular.r_kkt_args +#define r_port rpc_state.exception.r_port +#define r_exc_port rpc_state.exception.r_exc_port +#define r_exc_flavor rpc_state.exception.r_exc_flavor +#define r_ostate_cnt rpc_state.exception.r_ostate_cnt +#define r_code rpc_state.exception.r_code +#define r_exception rpc_state.exception.r_exception + +/* Alert bits */ +#define SERVER_TERMINATED 0x01 +#define ORPHANED 0x02 +#define CLIENT_TERMINATED 0x04 +#define TIME_CONSTRAINT_UNSATISFIED 0x08 + +#if THREAD_SWAPPER +/* + * Encapsulate the actions needed to ensure that next lower act on + * RPC chain is swapped in. Used at base spl; assumes rpc_lock() + * of thread is held; if port is non-null, assumes its ip_lock() + * is also held. + */ +#define act_switch_swapcheck(thread, port) \ +MACRO_BEGIN \ + thread_act_t __act__ = thread->top_act; \ + \ + while (__act__->lower) { \ + thread_act_t __l__ = __act__->lower; \ + \ + if (__l__->swap_state == TH_SW_IN || \ + __l__->swap_state == TH_SW_UNSWAPPABLE) \ + break; \ + /* \ + * XXX - Do we need to reference __l__? \ + */ \ + if (port) \ + ip_unlock(port); \ + if (!thread_swapin_blocking(__l__)) \ + panic("act_switch_swapcheck: !active"); \ + if (port) \ + ip_lock(port); \ + if (__act__->lower == __l__) \ + break; \ + } \ +MACRO_END + +#else /* !THREAD_SWAPPER */ + +#define act_switch_swapcheck(thread, port) + +#endif /* !THREAD_SWAPPER */ + +#define act_lock_init(thr_act) mutex_init(&(thr_act)->lock, ETAP_THREAD_ACT) +#define act_lock(thr_act) mutex_lock(&(thr_act)->lock) +#define act_lock_try(thr_act) mutex_try(&(thr_act)->lock) +#define act_unlock(thr_act) mutex_unlock(&(thr_act)->lock) + +/* Sanity check the ref count. If it is 0, we may be doubly zfreeing. + * If it is larger than max int, it has been corrupted, probably by being + * modified into an address (this is architecture dependent, but it's + * safe to assume there cannot really be max int references). + */ +#define ACT_MAX_REFERENCES \ + (unsigned)(~0 ^ (1 << (sizeof(int)*BYTE_SIZE - 1))) + +#define act_reference_fast(thr_act) \ + MACRO_BEGIN \ + if (thr_act) { \ + act_lock(thr_act); \ + assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \ + (thr_act)->ref_count++; \ + act_unlock(thr_act); \ + } \ + MACRO_END + +#define act_reference(thr_act) act_reference_fast(thr_act) + +#define act_locked_act_reference(thr_act) \ + MACRO_BEGIN \ + if (thr_act) { \ + assert((thr_act)->ref_count < ACT_MAX_REFERENCES); \ + (thr_act)->ref_count++; \ + } \ + MACRO_END + +#define sigbuf_dealloc(thr_act) \ + if ((thr_act->r_sigbufp) && (thr_act->r_sigbuf_size > \ + sizeof(thr_act->r_sigbuf))) \ + { \ + kfree((vm_offset_t)thr_act->r_sigbufp, \ + thr_act->r_sigbuf_size); \ + thr_act->r_sigbuf_size = 0; \ + } + +#define act_deallocate_fast(thr_act) \ + MACRO_BEGIN \ + if (thr_act) { \ + int new_value; \ + act_lock(thr_act); \ + assert((thr_act)->ref_count > 0 && \ + (thr_act)->ref_count <= ACT_MAX_REFERENCES); \ + new_value = --(thr_act)->ref_count; \ + act_unlock(thr_act); \ + if (new_value == 0) \ + act_free(thr_act); \ + } \ + MACRO_END + +#define act_deallocate(thr_act) act_deallocate_fast(thr_act) + +#define act_locked_act_deallocate(thr_act) \ + MACRO_BEGIN \ + if (thr_act) { \ + int new_value; \ + assert((thr_act)->ref_count > 0 && \ + (thr_act)->ref_count <= ACT_MAX_REFERENCES); \ + new_value = --(thr_act)->ref_count; \ + if (new_value == 0) { \ + panic("a_l_act_deallocate: would free act"); \ + } \ + } \ + MACRO_END + + +extern void act_init(void); +extern kern_return_t act_disable_task_locked(thread_act_t); +extern void thread_release(thread_act_t); +extern kern_return_t thread_dowait(thread_act_t, boolean_t); +extern void thread_hold(thread_act_t); +extern void nudge(thread_act_t); + +extern kern_return_t act_set_thread_pool(thread_act_t, ipc_port_t); +extern kern_return_t act_locked_act_set_thread_pool(thread_act_t, ipc_port_t); +extern kern_return_t thread_get_special_port(thread_act_t, int, + ipc_port_t *); +extern kern_return_t thread_set_special_port(thread_act_t, int, + ipc_port_t); +extern thread_t act_lock_thread(thread_act_t); +extern void act_unlock_thread(thread_act_t); +extern void install_special_handler(thread_act_t); +extern thread_act_t thread_lock_act(thread_t); +extern void thread_unlock_act(thread_t); +extern void act_attach(thread_act_t, thread_t, unsigned); +extern void act_execute_returnhandlers(void); +extern void act_detach(thread_act_t); +extern void act_free(thread_act_t); + +/* machine-dependent functions */ +extern void act_machine_return(kern_return_t); +extern void act_machine_init(void); +extern kern_return_t act_machine_create(struct task *, thread_act_t); +extern void act_machine_destroy(thread_act_t); +extern kern_return_t act_machine_set_state(thread_act_t, + thread_flavor_t, thread_state_t, + mach_msg_type_number_t ); +extern kern_return_t act_machine_get_state(thread_act_t, + thread_flavor_t, thread_state_t, + mach_msg_type_number_t *); +extern void act_machine_switch_pcb(thread_act_t); +extern void act_virtual_machine_destroy(thread_act_t); + +extern kern_return_t act_create(task_t, thread_act_t *); +extern kern_return_t act_get_state(thread_act_t, int, thread_state_t, + mach_msg_type_number_t *); +extern kern_return_t act_set_state(thread_act_t, int, thread_state_t, + mach_msg_type_number_t); + +extern int dump_act(thread_act_t); /* debugging */ + +#define current_act_fast() (current_thread()->top_act) +#define current_act_slow() ((current_thread()) ? \ + current_act_fast() : \ + THR_ACT_NULL) + +#define current_act() current_act_slow() /* JMM - til we find the culprit */ + +#else /* !MACH_KERNEL_PRIVATE */ + +extern thread_act_t current_act(void); +extern void act_reference(thread_act_t); +extern void act_deallocate(thread_act_t); + +#endif /* !MACH_KERNEL_PRIVATE */ + +/* Exported to world */ +extern kern_return_t act_alert(thread_act_t, unsigned); +extern kern_return_t act_alert_mask(thread_act_t, unsigned ); +extern kern_return_t post_alert(thread_act_t, unsigned); + +extern kern_return_t thread_abort(thread_act_t); +extern kern_return_t thread_abort_safely(thread_act_t); +extern kern_return_t thread_resume(thread_act_t); +extern kern_return_t thread_suspend(thread_act_t); +extern kern_return_t thread_terminate(thread_act_t); + +typedef void (thread_apc_handler_t)(thread_act_t); + +extern kern_return_t thread_apc_set(thread_act_t, thread_apc_handler_t); +extern kern_return_t thread_apc_clear(thread_act_t, thread_apc_handler_t); + +extern vm_map_t swap_act_map(thread_act_t, vm_map_t); + +extern void *get_bsdthread_info(thread_act_t); +extern void set_bsdthread_info(thread_act_t, void *); +extern task_t get_threadtask(thread_act_t); + +#endif /* _KERN_THREAD_ACT_H_ */ diff --git a/osfmk/kern/thread_call.c b/osfmk/kern/thread_call.c new file mode 100644 index 000000000..693c80084 --- /dev/null +++ b/osfmk/kern/thread_call.c @@ -0,0 +1,1186 @@ +/* + * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. + * All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Thread-based callout module. + * + * HISTORY + * + * 10 July 1999 (debo) + * Pulled into Mac OS X (microkernel). + * + * 3 July 1993 (debo) + * Created. + */ + +#include + +#include +#include +#include +#include + +#include +#include + +#include + +#define internal_call_num 768 + +#define thread_call_thread_min 4 + +static +thread_call_data_t + internal_call_storage[internal_call_num]; + +decl_simple_lock_data(static,thread_call_lock) + +static +timer_call_data_t + thread_call_delayed_timers[NCPUS]; + +static +queue_head_t + internal_call_free_queue, + pending_call_queue, delayed_call_queue; + +static +queue_head_t + idle_thread_queue; + +static +thread_t + activate_thread; + +static +boolean_t + activate_thread_awake; + +static struct { + int pending_num, + pending_hiwat; + int active_num, + active_hiwat; + int delayed_num, + delayed_hiwat; + int idle_thread_num; + int thread_num, + thread_hiwat, + thread_lowat; +} thread_calls; + +static boolean_t + thread_call_initialized = FALSE; + +static __inline__ thread_call_t + _internal_call_allocate(void); + +static __inline__ void +_internal_call_release( + thread_call_t call +); + +static __inline__ void +_pending_call_enqueue( + thread_call_t call +), +_pending_call_dequeue( + thread_call_t call +), +_delayed_call_enqueue( + thread_call_t call +), +_delayed_call_dequeue( + thread_call_t call +); + +static void __inline__ +_set_delayed_call_timer( + thread_call_t call +); + +static boolean_t +_remove_from_pending_queue( + thread_call_func_t func, + thread_call_param_t param0, + boolean_t remove_all +), +_remove_from_delayed_queue( + thread_call_func_t func, + thread_call_param_t param0, + boolean_t remove_all +); + +static __inline__ void + _call_thread_wake(void); + +static void + _call_thread(void), + _activate_thread(void); + +static void +_delayed_call_timer( + timer_call_param_t p0, + timer_call_param_t p1 +); + +#define qe(x) ((queue_entry_t)(x)) +#define TC(x) ((thread_call_t)(x)) + +/* + * Routine: thread_call_initialize [public] + * + * Description: Initialize this module, called + * early during system initialization. + * + * Preconditions: None. + * + * Postconditions: None. + */ + +void +thread_call_initialize(void) +{ + thread_call_t call; + spl_t s; + int i; + + if (thread_call_initialized) + panic("thread_call_initialize"); + + simple_lock_init(&thread_call_lock, ETAP_MISC_TIMER); + + s = splsched(); + simple_lock(&thread_call_lock); + + queue_init(&pending_call_queue); + queue_init(&delayed_call_queue); + + queue_init(&internal_call_free_queue); + for ( + call = internal_call_storage; + call < &internal_call_storage[internal_call_num]; + call++) { + + enqueue_tail(&internal_call_free_queue, qe(call)); + } + + for (i = 0; i < NCPUS; i++) { + timer_call_setup(&thread_call_delayed_timers[i], + _delayed_call_timer, NULL); + } + + queue_init(&idle_thread_queue); + thread_calls.thread_lowat = thread_call_thread_min; + + activate_thread_awake = TRUE; + thread_call_initialized = TRUE; + + simple_unlock(&thread_call_lock); + splx(s); + + activate_thread = kernel_thread_with_priority(kernel_task, + MAXPRI_KERNBAND-2, _activate_thread, TRUE); +} + +void +thread_call_setup( + thread_call_t call, + thread_call_func_t func, + thread_call_param_t param0 +) +{ + call_entry_setup(call, func, param0); +} + +/* + * Routine: _internal_call_allocate [private, inline] + * + * Purpose: Allocate an internal callout entry. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ thread_call_t +_internal_call_allocate(void) +{ + thread_call_t call; + + if (queue_empty(&internal_call_free_queue)) + panic("_internal_call_allocate"); + + call = TC(dequeue_head(&internal_call_free_queue)); + + return (call); +} + +/* + * Routine: _internal_call_release [private, inline] + * + * Purpose: Release an internal callout entry which + * is no longer pending (or delayed). + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ +void +_internal_call_release( + thread_call_t call +) +{ + if ( call >= internal_call_storage && + call < &internal_call_storage[internal_call_num] ) + enqueue_tail(&internal_call_free_queue, qe(call)); +} + +/* + * Routine: _pending_call_enqueue [private, inline] + * + * Purpose: Place an entry at the end of the + * pending queue, to be executed soon. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ +void +_pending_call_enqueue( + thread_call_t call +) +{ + enqueue_tail(&pending_call_queue, qe(call)); + if (++thread_calls.pending_num > thread_calls.pending_hiwat) + thread_calls.pending_hiwat = thread_calls.pending_num; + + call->state = PENDING; +} + +/* + * Routine: _pending_call_dequeue [private, inline] + * + * Purpose: Remove an entry from the pending queue, + * effectively unscheduling it. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ +void +_pending_call_dequeue( + thread_call_t call +) +{ + (void)remque(qe(call)); + thread_calls.pending_num--; + + call->state = IDLE; +} + +/* + * Routine: _delayed_call_enqueue [private, inline] + * + * Purpose: Place an entry on the delayed queue, + * after existing entries with an earlier + * (or identical) deadline. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ +void +_delayed_call_enqueue( + thread_call_t call +) +{ + thread_call_t current; + + current = TC(queue_first(&delayed_call_queue)); + + while (TRUE) { + if ( queue_end(&delayed_call_queue, qe(current)) || + CMP_ABSOLUTETIME(&call->deadline, + ¤t->deadline) < 0 ) { + current = TC(queue_prev(qe(current))); + break; + } + + current = TC(queue_next(qe(current))); + } + + insque(qe(call), qe(current)); + if (++thread_calls.delayed_num > thread_calls.delayed_hiwat) + thread_calls.delayed_hiwat = thread_calls.delayed_num; + + call->state = DELAYED; +} + +/* + * Routine: _delayed_call_dequeue [private, inline] + * + * Purpose: Remove an entry from the delayed queue, + * effectively unscheduling it. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ +void +_delayed_call_dequeue( + thread_call_t call +) +{ + (void)remque(qe(call)); + thread_calls.delayed_num--; + + call->state = IDLE; +} + +/* + * Routine: _set_delayed_call_timer [private] + * + * Purpose: Reset the timer so that it + * next expires when the entry is due. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ void +_set_delayed_call_timer( + thread_call_t call +) +{ + timer_call_t timer = &thread_call_delayed_timers[cpu_number()]; + + timer_call_enter(timer, call->deadline); +} + +/* + * Routine: _remove_from_pending_queue [private] + * + * Purpose: Remove the first (or all) matching + * entries from the pending queue, + * effectively unscheduling them. + * Returns whether any matching entries + * were found. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static +boolean_t +_remove_from_pending_queue( + thread_call_func_t func, + thread_call_param_t param0, + boolean_t remove_all +) +{ + boolean_t call_removed = FALSE; + thread_call_t call; + + call = TC(queue_first(&pending_call_queue)); + + while (!queue_end(&pending_call_queue, qe(call))) { + if ( call->func == func && + call->param0 == param0 ) { + thread_call_t next = TC(queue_next(qe(call))); + + _pending_call_dequeue(call); + + _internal_call_release(call); + + call_removed = TRUE; + if (!remove_all) + break; + + call = next; + } + else + call = TC(queue_next(qe(call))); + } + + return (call_removed); +} + +/* + * Routine: _remove_from_delayed_queue [private] + * + * Purpose: Remove the first (or all) matching + * entries from the delayed queue, + * effectively unscheduling them. + * Returns whether any matching entries + * were found. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static +boolean_t +_remove_from_delayed_queue( + thread_call_func_t func, + thread_call_param_t param0, + boolean_t remove_all +) +{ + boolean_t call_removed = FALSE; + thread_call_t call; + + call = TC(queue_first(&delayed_call_queue)); + + while (!queue_end(&delayed_call_queue, qe(call))) { + if ( call->func == func && + call->param0 == param0 ) { + thread_call_t next = TC(queue_next(qe(call))); + + _delayed_call_dequeue(call); + + _internal_call_release(call); + + call_removed = TRUE; + if (!remove_all) + break; + + call = next; + } + else + call = TC(queue_next(qe(call))); + } + + return (call_removed); +} + +/* + * Routine: thread_call_func [public] + * + * Purpose: Schedule a function callout. + * Guarantees { function, argument } + * uniqueness if unique_call is TRUE. + * + * Preconditions: Callable from an interrupt context + * below splsched. + * + * Postconditions: None. + */ + +void +thread_call_func( + thread_call_func_t func, + thread_call_param_t param, + boolean_t unique_call +) +{ + thread_call_t call; + int s; + + if (!thread_call_initialized) + panic("thread_call_func"); + + s = splsched(); + simple_lock(&thread_call_lock); + + call = TC(queue_first(&pending_call_queue)); + + while (unique_call && !queue_end(&pending_call_queue, qe(call))) { + if ( call->func == func && + call->param0 == param ) { + break; + } + + call = TC(queue_next(qe(call))); + } + + if (!unique_call || queue_end(&pending_call_queue, qe(call))) { + call = _internal_call_allocate(); + call->func = func; + call->param0 = param; + call->param1 = 0; + + _pending_call_enqueue(call); + + _call_thread_wake(); + } + + simple_unlock(&thread_call_lock); + splx(s); +} + +/* + * Routine: thread_call_func_delayed [public] + * + * Purpose: Schedule a function callout to + * occur at the stated time. + * + * Preconditions: Callable from an interrupt context + * below splsched. + * + * Postconditions: None. + */ + +void +thread_call_func_delayed( + thread_call_func_t func, + thread_call_param_t param, + AbsoluteTime deadline +) +{ + thread_call_t call; + int s; + + if (!thread_call_initialized) + panic("thread_call_func_delayed"); + + s = splsched(); + simple_lock(&thread_call_lock); + + call = _internal_call_allocate(); + call->func = func; + call->param0 = param; + call->param1 = 0; + call->deadline = deadline; + + _delayed_call_enqueue(call); + + if (queue_first(&delayed_call_queue) == qe(call)) + _set_delayed_call_timer(call); + + simple_unlock(&thread_call_lock); + splx(s); +} + +/* + * Routine: thread_call_func_cancel [public] + * + * Purpose: Unschedule a function callout. + * Removes one (or all) + * { function, argument } + * instance(s) from either (or both) + * the pending and the delayed queue, + * in that order. Returns a boolean + * indicating whether any calls were + * cancelled. + * + * Preconditions: Callable from an interrupt context + * below splsched. + * + * Postconditions: None. + */ + +boolean_t +thread_call_func_cancel( + thread_call_func_t func, + thread_call_param_t param, + boolean_t cancel_all +) +{ + boolean_t result; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (cancel_all) + result = _remove_from_pending_queue(func, param, cancel_all) | + _remove_from_delayed_queue(func, param, cancel_all); + else + result = _remove_from_pending_queue(func, param, cancel_all) || + _remove_from_delayed_queue(func, param, cancel_all); + + simple_unlock(&thread_call_lock); + splx(s); + + return (result); +} + +/* + * Routine: thread_call_allocate [public] + * + * Purpose: Allocate an external callout + * entry. + * + * Preconditions: None. + * + * Postconditions: None. + */ + +thread_call_t +thread_call_allocate( + thread_call_func_t func, + thread_call_param_t param0 +) +{ + thread_call_t call = (void *)kalloc(sizeof (thread_call_data_t)); + + call->func = func; + call->param0 = param0; + call->state = IDLE; + + return (call); +} + +/* + * Routine: thread_call_free [public] + * + * Purpose: Free an external callout + * entry. + * + * Preconditions: None. + * + * Postconditions: None. + */ + +boolean_t +thread_call_free( + thread_call_t call +) +{ + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (call->state != IDLE) { + simple_unlock(&thread_call_lock); + splx(s); + + return (FALSE); + } + + simple_unlock(&thread_call_lock); + splx(s); + + kfree((vm_offset_t)call, sizeof (thread_call_data_t)); + + return (TRUE); +} + +/* + * Routine: thread_call_enter [public] + * + * Purpose: Schedule an external callout + * entry to occur "soon". Returns a + * boolean indicating whether the call + * had been already scheduled. + * + * Preconditions: Callable from an interrupt context + * below splsched. + * + * Postconditions: None. + */ + +boolean_t +thread_call_enter( + thread_call_t call +) +{ + boolean_t result = TRUE; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (call->state != PENDING) { + if (call->state == DELAYED) + _delayed_call_dequeue(call); + else if (call->state == IDLE) + result = FALSE; + + _pending_call_enqueue(call); + + _call_thread_wake(); + } + + call->param1 = 0; + + simple_unlock(&thread_call_lock); + splx(s); + + return (result); +} + +boolean_t +thread_call_enter1( + thread_call_t call, + thread_call_param_t param1 +) +{ + boolean_t result = TRUE; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (call->state != PENDING) { + if (call->state == DELAYED) + _delayed_call_dequeue(call); + else if (call->state == IDLE) + result = FALSE; + + _pending_call_enqueue(call); + + _call_thread_wake(); + } + + call->param1 = param1; + + simple_unlock(&thread_call_lock); + splx(s); + + return (result); +} + +/* + * Routine: thread_call_enter_delayed [public] + * + * Purpose: Schedule an external callout + * entry to occur at the stated time. + * Returns a boolean indicating whether + * the call had been already scheduled. + * + * Preconditions: Callable from an interrupt context + * below splsched. + * + * Postconditions: None. + */ + +boolean_t +thread_call_enter_delayed( + thread_call_t call, + AbsoluteTime deadline +) +{ + boolean_t result = TRUE; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (call->state == PENDING) + _pending_call_dequeue(call); + else if (call->state == DELAYED) + _delayed_call_dequeue(call); + else if (call->state == IDLE) + result = FALSE; + + call->param1 = 0; + call->deadline = deadline; + + _delayed_call_enqueue(call); + + if (queue_first(&delayed_call_queue) == qe(call)) + _set_delayed_call_timer(call); + + simple_unlock(&thread_call_lock); + splx(s); + + return (result); +} + +boolean_t +thread_call_enter1_delayed( + thread_call_t call, + thread_call_param_t param1, + AbsoluteTime deadline +) +{ + boolean_t result = TRUE; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (call->state == PENDING) + _pending_call_dequeue(call); + else if (call->state == DELAYED) + _delayed_call_dequeue(call); + else if (call->state == IDLE) + result = FALSE; + + call->param1 = param1; + call->deadline = deadline; + + _delayed_call_enqueue(call); + + if (queue_first(&delayed_call_queue) == qe(call)) + _set_delayed_call_timer(call); + + simple_unlock(&thread_call_lock); + splx(s); + + return (result); +} + +/* + * Routine: thread_call_cancel [public] + * + * Purpose: Unschedule a callout entry. + * Returns a boolean indicating + * whether the call had actually + * been scheduled. + * + * Preconditions: Callable from an interrupt context + * below splsched. + * + * Postconditions: None. + */ + +boolean_t +thread_call_cancel( + thread_call_t call +) +{ + boolean_t result = TRUE; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (call->state == PENDING) + _pending_call_dequeue(call); + else if (call->state == DELAYED) + _delayed_call_dequeue(call); + else + result = FALSE; + + simple_unlock(&thread_call_lock); + splx(s); + + return (result); +} + +/* + * Routine: thread_call_is_delayed [public] + * + * Purpose: Returns a boolean indicating + * whether a call is currently scheduled + * to occur at a later time. Optionally + * returns the expiration time. + * + * Preconditions: Callable from an interrupt context + * below splsched. + * + * Postconditions: None. + */ + +boolean_t +thread_call_is_delayed( + thread_call_t call, + AbsoluteTime *deadline) +{ + boolean_t result = FALSE; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + if (call->state == DELAYED) { + if (deadline != NULL) + *deadline = call->deadline; + result = TRUE; + } + + simple_unlock(&thread_call_lock); + splx(s); + + return (result); +} + +/* + * Routine: _call_thread_wake [private] + * + * Purpose: Wake a callout thread to service + * newly pending callout entries. May wake + * the activate thread to either wake or + * create additional callout threads. + * + * Preconditions: thread_call_lock held. + * + * Postconditions: None. + */ + +static __inline__ +void +_call_thread_wake(void) +{ + thread_t thread_to_wake; + + if (!queue_empty(&idle_thread_queue)) { + queue_remove_first( + &idle_thread_queue, thread_to_wake, thread_t, wait_link); + clear_wait(thread_to_wake, THREAD_AWAKENED); + thread_calls.idle_thread_num--; + } + else + thread_to_wake = THREAD_NULL; + + if (!activate_thread_awake && + (thread_to_wake == THREAD_NULL || thread_calls.thread_num < + (thread_calls.active_num + thread_calls.pending_num))) { + clear_wait(activate_thread, THREAD_AWAKENED); + activate_thread_awake = TRUE; + } +} + +#if defined (__i386__) +#define NO_CONTINUATIONS (1) +#else +#define NO_CONTINUATIONS (0) +#endif + +/* + * Routine: _call_thread [private] + * + * Purpose: Executed by a callout thread. + * + * Preconditions: None. + * + * Postconditions: None. + */ + +static +void +_call_thread_continue(void) +{ + thread_t self = current_thread(); + +#if NO_CONTINUATIONS + loop: +#endif + (void) splsched(); + simple_lock(&thread_call_lock); + + while (thread_calls.pending_num > 0) { + thread_call_t call; + thread_call_func_t func; + thread_call_param_t param0, param1; + + call = TC(dequeue_head(&pending_call_queue)); + thread_calls.pending_num--; + + func = call->func; + param0 = call->param0; + param1 = call->param1; + + call->state = IDLE; + + _internal_call_release(call); + + if (++thread_calls.active_num > thread_calls.active_hiwat) + thread_calls.active_hiwat = thread_calls.active_num; + + if (thread_calls.pending_num > 0) + _call_thread_wake(); + + simple_unlock(&thread_call_lock); + (void) spllo(); + + (*func)(param0, param1); + + (void)thread_funnel_set(self->funnel_lock, FALSE); + + (void) splsched(); + simple_lock(&thread_call_lock); + + thread_calls.active_num--; + } + + if ((thread_calls.thread_num - thread_calls.active_num) <= + thread_calls.thread_lowat) { + queue_enter(&idle_thread_queue, self, thread_t, wait_link); + thread_calls.idle_thread_num++; + + assert_wait(&idle_thread_queue, THREAD_INTERRUPTIBLE); + + simple_unlock(&thread_call_lock); + (void) spllo(); + +#if NO_CONTINUATIONS + thread_block((void (*)(void)) 0); + goto loop; +#else + thread_block(_call_thread_continue); +#endif + /* NOTREACHED */ + } + + thread_calls.thread_num--; + + simple_unlock(&thread_call_lock); + (void) spllo(); + + (void) thread_terminate(self->top_act); + /* NOTREACHED */ +} + +static +void +_call_thread(void) +{ + thread_t self = current_thread(); + + stack_privilege(self); + + _call_thread_continue(); + /* NOTREACHED */ +} + +/* + * Routine: _activate_thread [private] + * + * Purpose: Executed by the activate thread. + * + * Preconditions: None. + * + * Postconditions: Never terminates. + */ + +static +void +_activate_thread_continue(void) +{ +#if NO_CONTINUATIONS + loop: +#endif + (void) splsched(); + simple_lock(&thread_call_lock); + + if (thread_calls.thread_num < + (thread_calls.active_num + thread_calls.pending_num)) { + + if (++thread_calls.thread_num > thread_calls.thread_hiwat) + thread_calls.thread_hiwat = thread_calls.thread_num; + + simple_unlock(&thread_call_lock); + (void) spllo(); + + (void) kernel_thread_with_priority(kernel_task, + MAXPRI_KERNBAND-1, _call_thread, TRUE); +#if NO_CONTINUATIONS + thread_block((void (*)(void)) 0); + goto loop; +#else + thread_block(_activate_thread_continue); +#endif + /* NOTREACHED */ + } + else if (thread_calls.pending_num > 0) { + _call_thread_wake(); + + simple_unlock(&thread_call_lock); + (void) spllo(); + +#if NO_CONTINUATIONS + thread_block((void (*)(void)) 0); + goto loop; +#else + thread_block(_activate_thread_continue); +#endif + /* NOTREACHED */ + } + + assert_wait(&activate_thread_awake, THREAD_INTERRUPTIBLE); + activate_thread_awake = FALSE; + + simple_unlock(&thread_call_lock); + (void) spllo(); + +#if NO_CONTINUATIONS + thread_block((void (*)(void)) 0); + goto loop; +#else + thread_block(_activate_thread_continue); +#endif + /* NOTREACHED */ +} + +static +void +_activate_thread(void) +{ + thread_t self = current_thread(); + + self->vm_privilege = TRUE; + vm_page_free_reserve(2); /* XXX */ + stack_privilege(self); + + _activate_thread_continue(); + /* NOTREACHED */ +} + +static +void +_delayed_call_timer( + timer_call_param_t p0, + timer_call_param_t p1 +) +{ + AbsoluteTime timestamp; + thread_call_t call; + boolean_t new_pending = FALSE; + int s; + + s = splsched(); + simple_lock(&thread_call_lock); + + clock_get_uptime(×tamp); + + call = TC(queue_first(&delayed_call_queue)); + + while (!queue_end(&delayed_call_queue, qe(call))) { + if (CMP_ABSOLUTETIME(&call->deadline, ×tamp) <= 0) { + _delayed_call_dequeue(call); + + _pending_call_enqueue(call); + new_pending = TRUE; + } + else + break; + + call = TC(queue_first(&delayed_call_queue)); + } + + if (!queue_end(&delayed_call_queue, qe(call))) + _set_delayed_call_timer(call); + + if (new_pending) + _call_thread_wake(); + + simple_unlock(&thread_call_lock); + splx(s); +} diff --git a/osfmk/kern/thread_call.h b/osfmk/kern/thread_call.h new file mode 100644 index 000000000..0e63edf1c --- /dev/null +++ b/osfmk/kern/thread_call.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. + * All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Declarations for thread-based callouts. + * + * HISTORY + * + * 10 July 1999 (debo) + * Pulled into Mac OS X (microkernel). + * + * 3 July 1993 (debo) + * Created. + */ + +#ifndef _KERN_THREAD_CALL_H_ +#define _KERN_THREAD_CALL_H_ + +#include + +#include + +typedef struct call_entry *thread_call_t; +typedef void *thread_call_param_t; +typedef void (*thread_call_func_t)( + thread_call_param_t param0, + thread_call_param_t param1); + +boolean_t +thread_call_enter( + thread_call_t call +); +boolean_t +thread_call_enter1( + thread_call_t call, + thread_call_param_t param1 +); +boolean_t +thread_call_enter_delayed( + thread_call_t call, + AbsoluteTime deadline +); +boolean_t +thread_call_enter1_delayed( + thread_call_t call, + thread_call_param_t param1, + AbsoluteTime deadline +); +boolean_t +thread_call_cancel( + thread_call_t call +); +boolean_t +thread_call_is_delayed( + thread_call_t call, + AbsoluteTime *deadline +); + +thread_call_t +thread_call_allocate( + thread_call_func_t func, + thread_call_param_t param0 +); +boolean_t +thread_call_free( + thread_call_t call +); + +/* + * This portion of the interface + * is OBSOLETE and DEPRECATED. It + * will disappear shortly. + */ +void +thread_call_func( + thread_call_func_t func, + thread_call_param_t param, + boolean_t unique_call +); +void +thread_call_func_delayed( + thread_call_func_t func, + thread_call_param_t param, + AbsoluteTime deadline +); + +boolean_t +thread_call_func_cancel( + thread_call_func_t func, + thread_call_param_t param, + boolean_t cancel_all +); +/* End OBSOLETE and DEPRECATED */ + +#ifdef MACH_KERNEL_PRIVATE +#include + +typedef struct call_entry thread_call_data_t; + +void +thread_call_initialize(void); + +void +thread_call_setup( + thread_call_t call, + thread_call_func_t func, + thread_call_param_t param0 +); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_THREAD_CALL_H_ */ diff --git a/osfmk/kern/thread_policy.c b/osfmk/kern/thread_policy.c new file mode 100644 index 000000000..b04cf8f01 --- /dev/null +++ b/osfmk/kern/thread_policy.c @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 15 October 2000 (debo) + * Created. + */ + +#include + +kern_return_t +thread_policy_set( + thread_act_t act, + thread_policy_flavor_t flavor, + thread_policy_t policy_info, + mach_msg_type_number_t count) +{ + kern_return_t result = KERN_SUCCESS; + thread_t thread; + task_t task; + spl_t s; + + if (act == THR_ACT_NULL) + return (KERN_INVALID_ARGUMENT); + + act_lock(act); + task = act->task; + act_unlock(act); + + task_lock(task); + + thread = act_lock_thread(act); + if (!act->active) { + act_unlock_thread(act); + task_unlock(task); + + return (KERN_TERMINATED); + } + + if (thread == THREAD_NULL) { + act_unlock_thread(act); + task_unlock(task); + + return (KERN_NOT_SUPPORTED); + } + +#define thread_priority_set(thread, pri) \ +MACRO_BEGIN \ + if ((thread)->depress_priority >= 0) \ + (thread)->depress_priority = (pri); \ + else { \ + (thread)->priority = (pri); \ + compute_priority((thread), TRUE); \ + \ + if ((thread) == current_thread()) \ + ast_on(AST_BLOCK); \ + } \ +MACRO_END + + switch (flavor) { + + case THREAD_STANDARD_POLICY: + { + integer_t priority; + + s = splsched(); + thread_lock(thread); + + thread->sched_mode &=~ TH_MODE_REALTIME; + + thread->policy = POLICY_TIMESHARE; + + if (thread->importance > MAXPRI) + priority = MAXPRI; + else + if (thread->importance < -MAXPRI) + priority = -MAXPRI; + else + priority = thread->importance; + + priority += task->priority; + + if (priority > thread->max_priority) + priority = thread->max_priority; + else + if (priority < MINPRI) + priority = MINPRI; + + thread_priority_set(thread, priority); + + thread_unlock(thread); + splx(s); + + break; + } + + case THREAD_TIME_CONSTRAINT_POLICY: + { + thread_time_constraint_policy_t info; + + if (count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + info = (thread_time_constraint_policy_t)policy_info; + + s = splsched(); + thread_lock(thread); + + thread->sched_mode |= TH_MODE_REALTIME; + + thread->realtime.period = info->period; + thread->realtime.computation = info->computation; + thread->realtime.constraint = info->constraint; + thread->realtime.preemptible = info->preemptible; + + thread->policy = POLICY_RR; + + thread_priority_set(thread, BASEPRI_REALTIME); + + thread_unlock(thread); + splx(s); + + break; + } + + case THREAD_PRECEDENCE_POLICY: + { + thread_precedence_policy_t info; + + if (count < THREAD_PRECEDENCE_POLICY_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + info = (thread_precedence_policy_t)policy_info; + + s = splsched(); + thread_lock(thread); + + thread->importance = info->importance; + + if (!(thread->sched_mode & TH_MODE_REALTIME)) { + integer_t priority; + + if (thread->importance > MAXPRI) + priority = MAXPRI; + else + if (thread->importance < -MAXPRI) + priority = -MAXPRI; + else + priority = thread->importance; + + priority += task->priority; + + if (priority > thread->max_priority) + priority = thread->max_priority; + else + if (priority < MINPRI) + priority = MINPRI; + + thread_priority_set(thread, priority); + } + + thread_unlock(thread); + splx(s); + + break; + } + + default: + result = KERN_INVALID_ARGUMENT; + break; + } + + act_unlock_thread(act); + + task_unlock(task); + + return (result); +} + +kern_return_t +thread_policy_get( + thread_act_t act, + thread_policy_flavor_t flavor, + thread_policy_t policy_info, + mach_msg_type_number_t *count, + boolean_t *get_default) +{ + kern_return_t result = KERN_SUCCESS; + thread_t thread; + spl_t s; + + if (act == THR_ACT_NULL) + return (KERN_INVALID_ARGUMENT); + + thread = act_lock_thread(act); + if (!act->active) { + act_unlock_thread(act); + + return (KERN_TERMINATED); + } + + if (thread == THREAD_NULL) { + act_unlock_thread(act); + + return (KERN_NOT_SUPPORTED); + } + + switch (flavor) { + + case THREAD_STANDARD_POLICY: + s = splsched(); + thread_lock(thread); + + if (thread->sched_mode & TH_MODE_REALTIME) + *get_default = TRUE; + + thread_unlock(thread); + splx(s); + break; + + case THREAD_TIME_CONSTRAINT_POLICY: + { + thread_time_constraint_policy_t info; + + if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + info = (thread_time_constraint_policy_t)policy_info; + + s = splsched(); + thread_lock(thread); + + if ((thread->sched_mode & TH_MODE_REALTIME) && !(*get_default)) { + info->period = thread->realtime.period; + info->computation = thread->realtime.computation; + info->constraint = thread->realtime.constraint; + info->preemptible = thread->realtime.preemptible; + } + else { + extern natural_t min_quantum_abstime; + + *get_default = TRUE; + + info->period = 0; + info->computation = min_quantum_abstime / 2; + info->constraint = min_quantum_abstime; + info->preemptible = TRUE; + } + + thread_unlock(thread); + splx(s); + + break; + } + + case THREAD_PRECEDENCE_POLICY: + { + thread_precedence_policy_t info; + + if (*count < THREAD_PRECEDENCE_POLICY_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + info = (thread_precedence_policy_t)policy_info; + + if (*get_default) + info->importance = 0; + else { + s = splsched(); + thread_lock(thread); + + info->importance = thread->importance; + + thread_unlock(thread); + splx(s); + } + + break; + } + + default: + result = KERN_INVALID_ARGUMENT; + break; + } + + act_unlock_thread(act); + + return (result); +} diff --git a/osfmk/kern/thread_pool.c b/osfmk/kern/thread_pool.c new file mode 100644 index 000000000..8ee6b2334 --- /dev/null +++ b/osfmk/kern/thread_pool.c @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:57 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.6 1995/08/21 20:44:57 devrcs + * Fix ri-osc CR1405: Zero act->thread_pool_next when act not on pool. + * [1995/07/25 20:19:06 bolinger] + * + * Revision 1.1.7.5 1995/01/18 18:35:00 ezf + * updated Utah CR notice + * [1995/01/18 18:30:33 ezf] + * + * Revision 1.1.7.4 1995/01/10 05:15:20 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * Comments from merged code below, as marked + * [1994/12/09 21:10:54 dwm] + * + * mk6 CR668 - 1.3b26 merge + * event_t casts + * [1994/11/04 09:39:15 dwm] + * + * Revision 1.1.7.3 1994/11/23 16:01:15 devrcs + * BEGIN comments from merge of nmk18b4 - nmk18b7 + * Cleared `handlers' field of activation when returning + * it to thread pool. + * [1994/11/23 03:48:31 burke] + * + * Added an assert to `thread_pool_put_act()' to check + * for presence of handlers when returning an activation + * to its pool. + * [1994/11/18 13:36:29 rkc] + * + * Changed `thread_pool_put_act()'s call to `act_set_thread_pool()' to + * be a call to `act_locked_act_set_thread_pool()' to obey locking + * assumptions. + * [1994/11/10 23:29:51 rkc] + * + * Cosmetic changes to thread_pool_put_act. + * [1994/11/09 21:49:57 watkins] + * + * Check out for merge. + * [1994/11/09 14:16:43 watkins] + * + * Revision 1.1.9.2 1994/11/08 15:32:42 watkins + * Add thread_pool_put_act. + * END comments from merge of nmk18b4 - nmk18b7 + * [1994/11/09 14:16:33 watkins] + * + * Revision 1.1.7.2 1994/09/23 02:31:05 ezf + * change marker to not FREE + * [1994/09/22 21:38:01 ezf] + * + * Revision 1.1.7.1 1994/09/02 02:40:54 watkins + * Check for destroyed thread pool port after thread_pool_get_act + * blocks. + * [1994/09/02 02:37:46 watkins] + * + * Revision 1.1.2.8 1994/06/09 14:14:04 dswartz + * Preemption merge. + * [1994/06/09 14:08:35 dswartz] + * + * Revision 1.1.2.7 1994/06/01 19:30:10 bolinger + * mk6 CR125: Update to reflect new naming for thread_pool of + * thread_act. + * [1994/06/01 19:14:46 bolinger] + * + * Revision 1.1.2.6 1994/03/17 22:38:34 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:28:15 dwm] + * + * Revision 1.1.2.5 1994/02/09 00:42:29 dwm + * Put a variety of debugging code under MACH_ASSERT, + * to enhance PROD performance a bit. + * [1994/02/09 00:35:07 dwm] + * + * Revision 1.1.2.4 1994/02/04 03:46:25 condict + * Put if MACH_ASSERT around debugging printfs. + * [1994/02/04 03:44:10 condict] + * + * Revision 1.1.2.3 1994/01/21 23:45:15 dwm + * Thread_pools now embedded directly in port/pset. + * Adjust thread_pool_create. + * [1994/01/21 23:43:18 dwm] + * + * Revision 1.1.2.2 1994/01/14 18:42:01 bolinger + * Rename thread_pool_block() to thread_pool_get_act() [sic], to + * better reflect its function. Add leading comment and assertion + * checks. + * [1994/01/14 18:18:11 bolinger] + * + * Revision 1.1.2.1 1994/01/12 17:53:17 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:15:21 dwm] + * + * $EndLog$ + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + * + * File: thread_pool.c + * + * thread_pool management routines + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + + +/* Initialize a new EMPTY thread_pool. */ +kern_return_t +thread_pool_init(thread_pool_t new_thread_pool) +{ + assert(new_thread_pool != THREAD_POOL_NULL); + + /* Start with one reference for the caller */ + new_thread_pool->thr_acts = (struct thread_activation *)0; + return KERN_SUCCESS; +} + + +/* + * Obtain an activation from a thread pool, blocking if + * necessary. Return the activation locked, since it's + * in an inconsistent state (not in a pool, not attached + * to a thread). + * + * Called with ip_lock() held for pool_port. Returns + * the same way. + * + * If the thread pool port is destroyed while we are blocked, + * then return a null activation. Callers must check for this + * error case. + */ +thread_act_t +thread_pool_get_act(ipc_port_t pool_port) +{ + thread_pool_t thread_pool = &pool_port->ip_thread_pool; + thread_act_t thr_act; + +#if MACH_ASSERT + assert(thread_pool != THREAD_POOL_NULL); + if (watchacts & WA_ACT_LNK) + printf("thread_pool_block: %x, waiting=%d\n", + thread_pool, thread_pool->waiting); +#endif + + while ((thr_act = thread_pool->thr_acts) == THR_ACT_NULL) { + if (!ip_active(pool_port)) + return THR_ACT_NULL; + thread_pool->waiting = 1; + assert_wait((event_t)thread_pool, THREAD_INTERRUPTIBLE); + ip_unlock(pool_port); + thread_block((void (*)(void)) 0); /* block self */ + ip_lock(pool_port); + } + assert(thr_act->thread == THREAD_NULL); + assert(thr_act->suspend_count == 0); + thread_pool->thr_acts = thr_act->thread_pool_next; + act_lock(thr_act); + thr_act->thread_pool_next = 0; + +#if MACH_ASSERT + if (watchacts & WA_ACT_LNK) + printf("thread_pool_block: return %x, next=%x\n", + thr_act, thread_pool->thr_acts); +#endif + return thr_act; +} + +/* + * thread_pool_put_act + * + * Return an activation to its pool. Assumes the activation + * and pool (if it exists) are locked. + */ +void +thread_pool_put_act( thread_act_t thr_act ) +{ + thread_pool_t thr_pool; + + /* + * Find the thread pool for this activation. + */ + if (thr_act->pool_port) + thr_pool = &thr_act->pool_port->ip_thread_pool; + else + thr_pool = THREAD_POOL_NULL; + + /* + * Return act to the thread_pool's list, if it is still + * alive. Otherwise, remove it from its thread_pool, which + * will deallocate it and destroy it. + */ + if (thr_act->active) { + assert(thr_pool); + thr_act->handlers = NULL; + thr_act->thread_pool_next = thr_pool->thr_acts; + thr_pool->thr_acts = thr_act; + if (thr_pool->waiting) + thread_pool_wakeup(thr_pool); + } else if (thr_pool) { + assert(thr_act->pool_port); + act_locked_act_set_thread_pool(thr_act, IP_NULL); + } + + return; +} + + +/* + * Called with ip_lock() held for port containing thread_pool. + * Returns same way. + */ +void +thread_pool_wakeup(thread_pool_t thread_pool) +{ +#if MACH_ASSERT + assert(thread_pool != THREAD_POOL_NULL); + if (watchacts & WA_ACT_LNK) + printf("thread_pool_wakeup: %x, waiting=%d, head=%x\n", + thread_pool, thread_pool->waiting, thread_pool->thr_acts); +#endif /* MACH_ASSERT */ + + if (thread_pool->waiting) { + thread_wakeup((event_t)thread_pool); + thread_pool->waiting = 0; + } +} diff --git a/osfmk/kern/thread_pool.h b/osfmk/kern/thread_pool.h new file mode 100644 index 000000000..d0bf54fc8 --- /dev/null +++ b/osfmk/kern/thread_pool.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:57 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.4 1995/01/18 18:35:03 ezf + * updated Utah CR notice + * [1995/01/18 18:30:36 ezf] + * + * Revision 1.1.7.3 1995/01/10 05:15:24 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * * Rev 1.1.8.2 1994/11/08 15:33:03 watkins + * Add declaration for thread_pool_put_act. + * [1994/12/09 21:10:56 dwm] + * + * Revision 1.1.7.1 1994/09/23 02:31:15 ezf + * change marker to not FREE + * [1994/09/22 21:38:04 ezf] + * + * Revision 1.1.2.9 1994/06/09 14:14:07 dswartz + * Preemption merge. + * [1994/06/09 14:08:37 dswartz] + * + * Revision 1.1.2.8 1994/06/01 19:30:14 bolinger + * mk6 CR125: Update to reflect changes in access to thread_pool + * of a thread_act. + * [1994/06/01 19:18:25 bolinger] + * + * Revision 1.1.2.7 1994/03/17 22:38:37 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:28:18 dwm] + * + * Revision 1.1.2.6 1994/02/09 00:42:42 dwm + * Put a variety of debugging code under MACH_ASSERT, + * to enhance PROD performance a bit. + * [1994/02/09 00:35:13 dwm] + * + * Revision 1.1.2.5 1994/01/21 23:45:08 dwm + * Thread_pools now embedded directly in port/pset, + * delete refcount, modify protos. + * [1994/01/21 23:43:13 dwm] + * + * Revision 1.1.2.4 1994/01/17 19:09:32 dwm + * Fix ref/dealloc macros, missing semicolon. + * [1994/01/17 19:09:16 dwm] + * + * Revision 1.1.2.3 1994/01/17 18:08:57 dwm + * Add finer grained act tracing. + * [1994/01/17 16:06:54 dwm] + * + * Revision 1.1.2.2 1994/01/14 18:42:05 bolinger + * Update to reflect thread_pool_block() -> thread_pool_get_act() name + * change. + * [1994/01/14 18:18:40 bolinger] + * + * Revision 1.1.2.1 1994/01/12 17:53:21 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:15:24 dwm] + * + * $EndLog$ + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + * + * File: thread_pool.h + * + * Defines the thread_pool: a pool of available activations. + * + */ + +#ifndef _KERN_THREAD_POOL_H_ +#define _KERN_THREAD_POOL_H_ + +#include +#include +#include + +typedef struct thread_pool { + + /* List of available activations, all active but not in use. */ + struct thread_activation *thr_acts; + + /* true if somebody is waiting for an activation from this pool */ + int waiting; + +} thread_pool, *thread_pool_t; +#define THREAD_POOL_NULL ((thread_pool_t)0) + +/* Exported to kern/startup.c only */ +kern_return_t thread_pool_init(thread_pool_t new_thread_pool); + +/* Get an activation from a thread_pool, blocking if need be */ +extern struct thread_activation *thread_pool_get_act( ipc_port_t ); +extern void thread_pool_put_act( thread_act_t ); + +/* Wake up a waiter upon return to thread_pool */ +extern void thread_pool_wakeup( thread_pool_t ); + +#if MACH_ASSERT +/* + * Debugging support - "watchacts", a patchable selective trigger + */ +extern unsigned int watchacts; /* debug printf trigger */ +#define WA_SCHED 0x001 /* kern/sched_prim.c */ +#define WA_THR 0x002 /* kern/thread.c */ +#define WA_ACT_LNK 0x004 /* kern/thread_act.c act mgmt */ +#define WA_ACT_HDLR 0x008 /* kern/thread_act.c act hldrs */ +#define WA_TASK 0x010 /* kern/task.c */ +#define WA_BOOT 0x020 /* bootstrap,startup.c */ +#define WA_PCB 0x040 /* machine/pcb.c */ +#define WA_PORT 0x080 /* ports + port sets */ +#define WA_EXIT 0x100 /* exit path */ +#define WA_SWITCH 0x200 /* context switch (!!) */ +#define WA_STATE 0x400 /* get/set state (!!) */ +#define WA_ALL (~0) +#endif /* MACH_ASSERT */ + +#endif /* _KERN_THREAD_POOL_H_ */ diff --git a/osfmk/kern/thread_swap.c b/osfmk/kern/thread_swap.c new file mode 100644 index 000000000..46bb6cc9e --- /dev/null +++ b/osfmk/kern/thread_swap.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * + * File: kern/thread_swap.c + * Author: Avadis Tevanian, Jr. + * Date: 1987 + * + * Mach thread swapper: + * Swap in threads that need to be run. This is done here + * by the swapper thread since it cannot be done (in general) + * when the kernel tries to place a thread on a run queue. + * + * Note: The act of swapping a thread in Mach does not mean that + * its memory gets forcibly swapped to secondary storage. The memory + * for the task corresponding to a swapped thread is paged out + * through the normal paging mechanism. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for splsched */ +#include +#include +#include + +queue_head_t swapin_queue; +decl_simple_lock_data(, swapper_lock_data) + +#define swapper_lock() simple_lock(&swapper_lock_data) +#define swapper_unlock() simple_unlock(&swapper_lock_data) + +mach_counter_t c_swapin_thread_block; + +/* + * swapper_init: [exported] + * + * Initialize the swapper module. + */ +void swapper_init() +{ + queue_init(&swapin_queue); + simple_lock_init(&swapper_lock_data, ETAP_THREAD_SWAPPER); +} + +/* + * thread_swapin: [exported] + * + * Place the specified thread in the list of threads to swapin. It + * is assumed that the thread is locked, therefore we are at splsched. + * + * We don't bother with stack_alloc_try to optimize swapin; + * our callers have already tried that route. + */ + +void thread_swapin(thread) + thread_t thread; +{ + switch (thread->state & TH_STACK_STATE) { + case TH_STACK_HANDOFF: + /* + * Swapped out - queue for swapin thread. + */ + thread->state = (thread->state & ~TH_STACK_STATE) + | TH_STACK_COMING_IN; + swapper_lock(); + enqueue_tail(&swapin_queue, (queue_entry_t) thread); + swapper_unlock(); + thread_wakeup((event_t) &swapin_queue); + break; + + case TH_STACK_COMING_IN: + /* + * Already queued for swapin thread, or being + * swapped in. + */ + break; + + default: + /* + * Already swapped in. + */ + panic("thread_swapin"); + } +} + +/* + * thread_doswapin: + * + * Swapin the specified thread, if it should be runnable, then put + * it on a run queue. No locks should be held on entry, as it is + * likely that this routine will sleep (waiting for stack allocation). + */ +void thread_doswapin(thread) + register thread_t thread; +{ + spl_t s; + vm_offset_t stack; + + /* + * do machdep allocation + */ + + /* + * Allocate the kernel stack. + */ + stack = stack_alloc(thread, thread_continue); + assert(stack); + + /* + * Place on run queue. + */ + + s = splsched(); + thread_lock(thread); + thread->state &= ~(TH_STACK_HANDOFF | TH_STACK_COMING_IN); + if (thread->state & TH_RUN) + thread_setrun(thread, TRUE, FALSE); + thread_unlock(thread); + (void) splx(s); +} + +/* + * swapin_thread: [exported] + * + * This procedure executes as a kernel thread. Threads that need to + * be swapped in are swapped in by this thread. + */ +void swapin_thread_continue() +{ + for (;;) { + register thread_t thread; + spl_t s; + + s = splsched(); + swapper_lock(); + + while ((thread = (thread_t) dequeue_head(&swapin_queue)) + != THREAD_NULL) { + swapper_unlock(); + (void) splx(s); + + thread_doswapin(thread); /* may block */ + + s = splsched(); + swapper_lock(); + } + + assert_wait((event_t) &swapin_queue, THREAD_UNINT); + swapper_unlock(); + (void) splx(s); + counter(c_swapin_thread_block++); +#if defined (__i386__) + thread_block((void (*)(void)) 0); +#else + thread_block(swapin_thread_continue); +#endif + } +} + +void swapin_thread() +{ + stack_privilege(current_thread()); + current_thread()->vm_privilege = TRUE; + + swapin_thread_continue(); + /*NOTREACHED*/ +} diff --git a/osfmk/kern/thread_swap.h b/osfmk/kern/thread_swap.h new file mode 100644 index 000000000..63dde530c --- /dev/null +++ b/osfmk/kern/thread_swap.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * Copyright (c) 1988 Carnegie-Mellon University + * Copyright (c) 1987 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:57 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.11.7 1995/06/13 18:58:49 bolinger + * Fix ri-osc CR1391: New return type from thread_swapin_blocking(). + * [1995/06/13 18:56:52 bolinger] + * + * Revision 1.1.11.6 1995/06/05 21:46:36 dwm + * ri-osc CR1357 - ensure activation being returned to is swapped in. + * added thread_swapin_blocking [bolinger] + * [1995/06/05 21:34:08 dwm] + * + * Revision 1.1.11.5 1995/05/19 15:48:34 bernadat + * Let thread swapping be configurable. + * [95/05/19 bernadat] + * + * Revision 1.1.11.4 1995/04/07 19:04:46 barbou + * Merged into mainline. + * [95/03/09 barbou] + * + * Revision 1.1.12.2 1995/02/13 15:59:18 barbou + * Merged/ported to MK6. + * + * Revision 1.1.9.3 1994/08/12 14:22:30 barbou + * Overwritten with copy from IK. + * Old kern/thread_swap.h was renamed kern/thread_handoff.c. + * Added prototype for thread_swapout and thread_swapout_enqueue. + * [94/07/28 barbou] + * + * Revision 3.0.3.2 1994/01/20 19:53:20 chasb + * Remove excessively restrictive copyright notice + * [1994/01/20 17:50:56 chasb] + * + * Revision 3.0.3.1 1993/12/20 21:07:59 gupta + * Expanded C O P Y R I G H T + * [1993/12/17 22:19:43 gupta] + * + * Revision 3.0 1992/12/31 22:08:45 ede + * Initial revision for OSF/1 R1.3 + * + * Revision 1.6.2.2 1992/01/22 22:14:42 gmf + * Added TH_SW_TASK_SWAPPING flag to swap_state. This state + * indicates that the thread is about to be swapped out by + * the task swapping mechanism, and prevents the thread + * swapper from doing it first. + * [1992/01/20 22:06:36 gmf] + * + * Revision 1.6 1991/08/15 19:16:39 devrcs + * Prototype all functions, change name to thread_swapper_init. + * [91/06/26 10:45:44 jeffc] + * + * Revision 1.5 91/06/10 16:19:07 devrcs + * Additions to allow thread to be made non-swappable on swap in, + * change thread_swapin interface. + * [91/05/30 15:56:38 jeffc] + * + * Revision 1.4 91/03/04 17:07:14 devrcs + * A small step toward ansiC: commented else/endif/elif trailers. + * [91/01/12 16:39:43 dwm] + * + * Revision 1.3 90/10/07 13:57:13 devrcs + * Added EndLog Marker. + * [90/09/28 09:59:56 gm] + * + * Revision 1.2 90/01/02 20:06:28 gm + * Fixes for first snapshot. + * + * Revision 1.1 89/10/16 19:36:28 gm + * Mach 2.5 and Encore 0.6 merge + * + * Revision 2.4 89/03/09 20:17:07 rpd + * More cleanup. + * + * Revision 2.3 89/02/25 18:10:24 gm0w + * Kernel code cleanup. + * Put entire file under #indef KERNEL. + * [89/02/15 mrt] + * + * Revision 0.0 88/01/21 dbg + * Created. + * [88/01/21 dbg] + * + * $EndLog$ + */ +/* + * File: kern/thread_swap.h + * + * Declarations of thread swap_states and swapping routines. + */ + +/* + * Swap states for threads. + */ + +#ifndef _KERN_THREAD_SWAP_H_ +#define _KERN_THREAD_SWAP_H_ + +#if 1 /* USED CODE */ + +/* + * exported routines + */ + +extern void swapper_init(); +extern void thread_swapin(thread_t thread); +extern void thread_doswapin(thread_t thread); +extern void swapin_thread(); + +#define thread_swappable(act, bool) + + +#else /* UNUSED SWAPPER CODE */ +#if THREAD_SWAPPER +#define TH_SW_STATE 7 /* mask of swap state bits */ +#define TH_SW_UNSWAPPABLE 1 /* not swappable */ +#define TH_SW_IN 2 /* swapped in */ +#define TH_SW_GOING_OUT 3 /* being swapped out */ +#define TH_SW_WANT_IN 4 /* being swapped out, but should + immediately be swapped in */ +#define TH_SW_OUT 5 /* swapped out */ +#define TH_SW_COMING_IN 6 /* queued for swapin, or being + swapped in */ + +#define TH_SW_MAKE_UNSWAPPABLE 8 /*not state, command to swapin_thread */ + +/* + * This flag is only used by the task swapper. It implies that + * the thread is about to be swapped, but hasn't yet. + */ +#define TH_SW_TASK_SWAPPING 0x10 + +/* + * exported routines + */ +extern void thread_swapper_init(void); +extern void swapin_thread(void); +extern void swapout_thread(void); +extern void thread_doswapin(thread_act_t thr_act); +extern void thread_swapin(thread_act_t thr_act, + boolean_t make_unswappable); +extern boolean_t + thread_swapin_blocking(thread_act_t thr_act); +extern void thread_swapout(thread_act_t thr_act); +extern void swapout_threads(boolean_t now); +extern void thread_swapout_enqueue(thread_act_t thr_act); +extern void thread_swap_disable(thread_act_t thr_act); + +extern void thread_swappable(thread_act_t thr_act, boolean_t swappable); + +#else /* THREAD_SWAPPER */ +#define thread_swappable(thr_act, swappable) +#endif /* THREAD_SWAPPER */ + +#endif /* UNUSED SWAPPER CODE */ + +#endif /*_KERN_THREAD_SWAP_H_*/ + diff --git a/osfmk/kern/time_out.h b/osfmk/kern/time_out.h new file mode 100644 index 000000000..344d2bd97 --- /dev/null +++ b/osfmk/kern/time_out.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_TIME_OUT_H_ +#define _KERN_TIME_OUT_H_ + +/* + * Mach time-out facility. + */ + +#include +#include +#include +#include +#include + +/* + * Timers in kernel: + */ + +extern int hz; /* num of ticks per second */ +extern int tick; /* num of usec per tick */ + +typedef void (*timeout_fcn_t)(void *); + +extern void hertz_tick( + boolean_t usermode, /* executing user code */ + natural_t pc); + +/* Set timeout */ +extern void timeout( + timeout_fcn_t fcn, + void *param, + int interval); + +/* Cancel timeout */ +extern void untimeout( + timeout_fcn_t fcn, + void *param); + +#endif /* _KERN_TIME_OUT_H_ */ diff --git a/osfmk/kern/timer.c b/osfmk/kern/timer.c new file mode 100644 index 000000000..996bc6fe6 --- /dev/null +++ b/osfmk/kern/timer.c @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +timer_t current_timer[NCPUS]; +timer_data_t kernel_timer[NCPUS]; + +/* Forwards */ +void timer_grab( + timer_t timer, + timer_save_t save); + +void db_timer_grab( + timer_t timer, + timer_save_t save); + +void db_thread_read_times( + thread_t thread, + time_value_t *user_time_p, + time_value_t *system_time_p); + +/* + * init_timers initializes all non-thread timers and puts the + * service routine on the callout queue. All timers must be + * serviced by the callout routine once an hour. + */ +void +init_timers(void) +{ + register int i; + register timer_t this_timer; + + /* + * Initialize all the kernel timers and start the one + * for this cpu (master) slaves start theirs later. + */ + this_timer = &kernel_timer[0]; + for ( i=0 ; ilow_bits = 0; + this_timer->high_bits = 0; + this_timer->tstamp = 0; + this_timer->high_bits_check = 0; +} + +#if STAT_TIME +#else /* STAT_TIME */ + +#ifdef MACHINE_TIMER_ROUTINES + +/* + * Machine-dependent code implements the timer routines. + */ + +#else /* MACHINE_TIMER_ROUTINES */ + +/* + * start_timer starts the given timer for this cpu. It is called + * exactly once for each cpu during the boot sequence. + */ +void +start_timer( + register timer_t timer) +{ + timer->tstamp = get_timestamp(); + mp_disable_preemption(); + current_timer[cpu_number()] = timer; + mp_enable_preemption(); +} + +/* + * time_trap_uentry does trap entry timing. Caller must lock out + * interrupts and take a timestamp. ts is a timestamp taken after + * interrupts were locked out. Must only be called if trap was + * from user mode. + */ +void +time_trap_uentry( + unsigned ts) +{ + int elapsed; + int mycpu; + timer_t mytimer; + + mp_disable_preemption(); + + /* + * Calculate elapsed time. + */ + mycpu = cpu_number(); + mytimer = current_timer[mycpu]; + elapsed = ts - mytimer->tstamp; +#ifdef TIMER_MAX + if (elapsed < 0) elapsed += TIMER_MAX; +#endif /* TIMER_MAX */ + + /* + * Update current timer. + */ + mytimer->low_bits += elapsed; + mytimer->tstamp = 0; + + if (mytimer->low_bits & TIMER_LOW_FULL) { + timer_normalize(mytimer); + } + + /* + * Record new timer. + */ + mytimer = &(current_thread()->system_timer); + current_timer[mycpu] = mytimer; + mytimer->tstamp = ts; + + mp_enable_preemption(); +} + +/* + * time_trap_uexit does trap exit timing. Caller must lock out + * interrupts and take a timestamp. ts is a timestamp taken after + * interrupts were locked out. Must only be called if returning to + * user mode. + */ +void +time_trap_uexit( + unsigned ts) +{ + int elapsed; + int mycpu; + timer_t mytimer; + + mp_disable_preemption(); + + /* + * Calculate elapsed time. + */ + mycpu = cpu_number(); + mytimer = current_timer[mycpu]; + elapsed = ts - mytimer->tstamp; +#ifdef TIMER_MAX + if (elapsed < 0) elapsed += TIMER_MAX; +#endif /* TIMER_MAX */ + + /* + * Update current timer. + */ + mytimer->low_bits += elapsed; + mytimer->tstamp = 0; + + if (mytimer->low_bits & TIMER_LOW_FULL) { + timer_normalize(mytimer); /* SYSTEMMODE */ + } + + mytimer = &(current_thread()->user_timer); + + /* + * Record new timer. + */ + current_timer[mycpu] = mytimer; + mytimer->tstamp = ts; + + mp_enable_preemption(); +} + +/* + * time_int_entry does interrupt entry timing. Caller must lock out + * interrupts and take a timestamp. ts is a timestamp taken after + * interrupts were locked out. new_timer is the new timer to + * switch to. This routine returns the currently running timer, + * which MUST be pushed onto the stack by the caller, or otherwise + * saved for time_int_exit. + */ +timer_t +time_int_entry( + unsigned ts, + timer_t new_timer) +{ + int elapsed; + int mycpu; + timer_t mytimer; + + mp_disable_preemption(); + + /* + * Calculate elapsed time. + */ + mycpu = cpu_number(); + mytimer = current_timer[mycpu]; + + elapsed = ts - mytimer->tstamp; +#ifdef TIMER_MAX + if (elapsed < 0) elapsed += TIMER_MAX; +#endif /* TIMER_MAX */ + + /* + * Update current timer. + */ + mytimer->low_bits += elapsed; + mytimer->tstamp = 0; + + /* + * Switch to new timer, and save old one on stack. + */ + new_timer->tstamp = ts; + current_timer[mycpu] = new_timer; + + mp_enable_preemption(); + + return(mytimer); +} + +/* + * time_int_exit does interrupt exit timing. Caller must lock out + * interrupts and take a timestamp. ts is a timestamp taken after + * interrupts were locked out. old_timer is the timer value pushed + * onto the stack or otherwise saved after time_int_entry returned + * it. + */ +void +time_int_exit( + unsigned ts, + timer_t old_timer) +{ + int elapsed; + int mycpu; + timer_t mytimer; + + mp_disable_preemption(); + + /* + * Calculate elapsed time. + */ + mycpu = cpu_number(); + mytimer = current_timer[mycpu]; + elapsed = ts - mytimer->tstamp; +#ifdef TIMER_MAX + if (elapsed < 0) elapsed += TIMER_MAX; +#endif /* TIMER_MAX */ + + /* + * Update current timer. + */ + mytimer->low_bits += elapsed; + mytimer->tstamp = 0; + + /* + * If normalization requested, do it. + */ + if (mytimer->low_bits & TIMER_LOW_FULL) { + timer_normalize(mytimer); + } + if (old_timer->low_bits & TIMER_LOW_FULL) { + timer_normalize(old_timer); + } + + /* + * Start timer that was running before interrupt. + */ + old_timer->tstamp = ts; + current_timer[mycpu] = old_timer; + + mp_enable_preemption(); +} + +/* + * timer_switch switches to a new timer. The machine + * dependent routine/macro get_timestamp must return a timestamp. + * Caller must lock out interrupts. + */ +void +timer_switch( + timer_t new_timer) +{ + int elapsed; + int mycpu; + timer_t mytimer; + unsigned ts; + + mp_disable_preemption(); + + /* + * Calculate elapsed time. + */ + mycpu = cpu_number(); + mytimer = current_timer[mycpu]; + ts = get_timestamp(); + elapsed = ts - mytimer->tstamp; +#ifdef TIMER_MAX + if (elapsed < 0) elapsed += TIMER_MAX; +#endif /* TIMER_MAX */ + + /* + * Update current timer. + */ + mytimer->low_bits += elapsed; + mytimer->tstamp = 0; + + /* + * Normalization check + */ + if (mytimer->low_bits & TIMER_LOW_FULL) { + timer_normalize(mytimer); + } + + /* + * Record new timer. + */ + current_timer[mycpu] = new_timer; + new_timer->tstamp = ts; + + mp_enable_preemption(); +} + +#endif /* MACHINE_TIMER_ROUTINES */ +#endif /* STAT_TIME */ + +/* + * timer_normalize normalizes the value of a timer. It is + * called only rarely, to make sure low_bits never overflows. + */ + +void +timer_normalize( + register timer_t timer) +{ + unsigned int high_increment; + + /* + * Calculate high_increment, then write high check field first + * followed by low and high. timer_grab() reads these fields in + * reverse order so if high and high check match, we know + * that the values read are ok. + */ + + high_increment = timer->low_bits/TIMER_HIGH_UNIT; + timer->high_bits_check += high_increment; + timer->low_bits %= TIMER_HIGH_UNIT; + timer->high_bits += high_increment; +} + +/* + * timer_grab() retrieves the value of a timer. + * + * Critical scheduling code uses TIMER_DELTA macro in timer.h + * (called from thread_timer_delta in sched.h). + * + * Keep coherent with db_time_grab below. + */ + +void +timer_grab( + timer_t timer, + timer_save_t save) +{ +#if MACH_ASSERT + unsigned int passes=0; +#endif + do { + (save)->high = (timer)->high_bits; + (save)->low = (timer)->low_bits; + /* + * If the timer was normalized while we were doing this, + * the high_bits value read above and the high_bits check + * value will not match because high_bits_check is the first + * field touched by the normalization procedure, and + * high_bits is the last. + * + * Additions to timer only touch low bits and + * are therefore atomic with respect to this. + */ +#if MACH_ASSERT + passes++; + assert(passes < 10000); +#endif + } while ( (save)->high != (timer)->high_bits_check); +} + +/* + * + * Db_timer_grab(): used by db_thread_read_times. An nonblocking + * version of db_thread_get_times. Keep coherent with timer_grab + * above. + * + */ +void +db_timer_grab( + timer_t timer, + timer_save_t save) +{ + /* Don't worry about coherency */ + + (save)->high = (timer)->high_bits; + (save)->low = (timer)->low_bits; +} + + +/* + * timer_read reads the value of a timer into a time_value_t. If the + * timer was modified during the read, retry. The value returned + * is accurate to the last update; time accumulated by a running + * timer since its last timestamp is not included. + */ + +void +timer_read( + timer_t timer, + register time_value_t *tv) +{ + timer_save_data_t temp; + + timer_grab(timer,&temp); + /* + * Normalize the result + */ +#ifdef TIMER_ADJUST + TIMER_ADJUST(&temp); +#endif /* TIMER_ADJUST */ + tv->seconds = temp.high + temp.low/1000000; + tv->microseconds = temp.low%1000000; +} + +/* + * thread_read_times reads the user and system times from a thread. + * Time accumulated since last timestamp is not included. Should + * be called at splsched() to avoid having user and system times + * be out of step. Doesn't care if caller locked thread. + * + * Needs to be kept coherent with thread_read_times ahead. + */ +void +thread_read_times( + thread_t thread, + time_value_t *user_time_p, + time_value_t *system_time_p) +{ + timer_save_data_t temp; + register timer_t timer; + + timer = &thread->user_timer; + timer_grab(timer, &temp); + +#ifdef TIMER_ADJUST + TIMER_ADJUST(&temp); +#endif /* TIMER_ADJUST */ + user_time_p->seconds = temp.high + temp.low/1000000; + user_time_p->microseconds = temp.low % 1000000; + + timer = &thread->system_timer; + timer_grab(timer, &temp); + +#ifdef TIMER_ADJUST + TIMER_ADJUST(&temp); +#endif /* TIMER_ADJUST */ + system_time_p->seconds = temp.high + temp.low/1000000; + system_time_p->microseconds = temp.low % 1000000; +} + +/* + * Db_thread_read_times: A version of thread_read_times that + * can be called by the debugger. This version does not call + * timer_grab, which can block. Please keep it up to date with + * thread_read_times above. + * + */ +void +db_thread_read_times( + thread_t thread, + time_value_t *user_time_p, + time_value_t *system_time_p) +{ + timer_save_data_t temp; + register timer_t timer; + + timer = &thread->user_timer; + db_timer_grab(timer, &temp); + +#ifdef TIMER_ADJUST + TIMER_ADJUST(&temp); +#endif /* TIMER_ADJUST */ + user_time_p->seconds = temp.high + temp.low/1000000; + user_time_p->microseconds = temp.low % 1000000; + + timer = &thread->system_timer; + timer_grab(timer, &temp); + +#ifdef TIMER_ADJUST + TIMER_ADJUST(&temp); +#endif /* TIMER_ADJUST */ + system_time_p->seconds = temp.high + temp.low/1000000; + system_time_p->microseconds = temp.low % 1000000; +} + +/* + * timer_delta takes the difference of a saved timer value + * and the current one, and updates the saved value to current. + * The difference is returned as a function value. See + * TIMER_DELTA macro (timer.h) for optimization to this. + */ + +unsigned +timer_delta( + register timer_t timer, + timer_save_t save) +{ + timer_save_data_t new_save; + register unsigned result; + + timer_grab(timer,&new_save); + result = (new_save.high - save->high) * TIMER_HIGH_UNIT + + new_save.low - save->low; + save->high = new_save.high; + save->low = new_save.low; + return(result); +} diff --git a/osfmk/kern/timer.h b/osfmk/kern/timer.h new file mode 100644 index 000000000..c55f093da --- /dev/null +++ b/osfmk/kern/timer.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _KERN_TIMER_H_ +#define _KERN_TIMER_H_ + +#include +#include + +#include +#include + +#if STAT_TIME +/* + * Statistical timer definitions - use microseconds in timer, seconds + * in high unit field. No adjustment needed to convert to time_value_t + * as a result. Service timers once an hour. + */ + +#define TIMER_RATE 1000000 +#define TIMER_HIGH_UNIT TIMER_RATE +#undef TIMER_ADJUST + +#else /* STAT_TIME */ +/* + * Machine dependent definitions based on hardware support. + */ + +#include + +#endif /* STAT_TIME */ + +/* + * Definitions for accurate timers. high_bits_check is a copy of + * high_bits that allows reader to verify that values read are ok. + */ + +struct timer { + unsigned low_bits; + unsigned high_bits; + unsigned high_bits_check; + unsigned tstamp; +}; + +typedef struct timer timer_data_t; +typedef struct timer *timer_t; + +/* + * Mask to check if low_bits is in danger of overflowing + */ + +#define TIMER_LOW_FULL 0x80000000 + +/* + * Kernel timers and current timer array. [Exported] + */ + +extern timer_t current_timer[NCPUS]; +extern timer_data_t kernel_timer[NCPUS]; + +/* + * save structure for timer readings. This is used to save timer + * readings for elapsed time computations. + */ + +struct timer_save { + unsigned low; + unsigned high; +}; + +typedef struct timer_save timer_save_data_t, *timer_save_t; + +/* + * Exported kernel interface to timers + */ + +#if STAT_TIME +#define start_timer(timer) +#define timer_switch(timer) +#else /* STAT_TIME */ +/* Start timer for this cpu */ +extern void start_timer( + timer_t timer); + +/* Switch to a new timer */ +extern void timer_switch( + timer_t new_timer); +#endif /* STAT_TIME */ + +/* Initialize timer module */ +extern void init_timers(void); + +/* + * Initializes a single timer. + */ +extern void timer_init( + timer_t this_timer); + +/* Normalize timer value */ +extern void timer_normalize( + timer_t timer); + +/* Read value of timer into tv */ +extern void timer_read( + timer_t timer, + time_value_t *tv); + +/* Read thread times */ +extern void thread_read_times( + thread_t thread, + time_value_t *user_time_p, + time_value_t *system_time_p); + +/* Compute timer difference */ +extern unsigned timer_delta( + timer_t timer, + timer_save_t save); + +#if STAT_TIME +/* + * Macro to bump timer values. + */ +#define timer_bump(timer, usec) \ +MACRO_BEGIN \ + (timer)->low_bits += usec; \ + if ((timer)->low_bits & TIMER_LOW_FULL) { \ + timer_normalize(timer); \ + } \ +MACRO_END + +#else /* STAT_TIME */ +/* + * Exported hardware interface to timers + */ +/* Time trap entry */ +extern void time_trap_uentry( + unsigned ts); + +/* Time trap exit */ +extern void time_trap_uexit( + unsigned ts); + +/* Time interrupt entry */ +extern timer_t time_int_entry( + unsigned ts, + timer_t new_timer); + +/* Time interrrupt exit */ +extern void time_int_exit( + unsigned ts, + timer_t old_timer); + +#endif /* STAT_TIME */ + +/* + * TIMER_DELTA finds the difference between a timer and a saved value, + * and updates the saved value. Look at high_bits check field after + * reading low because that's the first written by a normalize + * operation; this isn't necessary for current usage because + * this macro is only used when the timer can't be normalized: + * thread is not running, or running thread calls it on itself at + * splsched(). + */ + +#define TIMER_DELTA(timer, save, result) \ +MACRO_BEGIN \ + register unsigned temp; \ + \ + temp = (timer).low_bits; \ + if ((save).high != (timer).high_bits_check) { \ + result += timer_delta(&(timer), &(save)); \ + } \ + else { \ + result += temp - (save).low; \ + (save).low = temp; \ + } \ +MACRO_END + +#endif /* _KERN_TIMER_H_ */ diff --git a/osfmk/kern/timer_call.c b/osfmk/kern/timer_call.c new file mode 100644 index 000000000..e1e4bb649 --- /dev/null +++ b/osfmk/kern/timer_call.c @@ -0,0 +1,323 @@ +/* + * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. + * All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Timer interrupt callout module. + * + * HISTORY + * + * 20 December 2000 (debo) + * Created. + */ + +#include + +#include + +#include +#include + +decl_simple_lock_data(static,timer_call_lock) + +static +queue_head_t + delayed_call_queues[NCPUS]; + +static struct { + int pending_num, + pending_hiwat; + int delayed_num, + delayed_hiwat; +} timer_calls; + +static boolean_t + timer_call_initialized = FALSE; + +static void +timer_call_interrupt( + AbsoluteTime timestamp); + +#define qe(x) ((queue_entry_t)(x)) +#define TC(x) ((timer_call_t)(x)) + +void +timer_call_initialize(void) +{ + spl_t s; + int i; + + if (timer_call_initialized) + panic("timer_call_initialize"); + + simple_lock_init(&timer_call_lock, ETAP_MISC_TIMER); + + s = splclock(); + simple_lock(&timer_call_lock); + + for (i = 0; i < NCPUS; i++) + queue_init(&delayed_call_queues[i]); + + clock_set_timer_func((clock_timer_func_t)timer_call_interrupt); + + timer_call_initialized = TRUE; + + simple_unlock(&timer_call_lock); + splx(s); +} + +void +timer_call_setup( + timer_call_t call, + timer_call_func_t func, + timer_call_param_t param0) +{ + call_entry_setup(call, func, param0); +} + +static __inline__ +void +_delayed_call_enqueue( + queue_t queue, + timer_call_t call) +{ + timer_call_t current; + + current = TC(queue_first(queue)); + + while (TRUE) { + if ( queue_end(queue, qe(current)) || + CMP_ABSOLUTETIME(&call->deadline, + ¤t->deadline) < 0 ) { + current = TC(queue_prev(qe(current))); + break; + } + + current = TC(queue_next(qe(current))); + } + + insque(qe(call), qe(current)); + if (++timer_calls.delayed_num > timer_calls.delayed_hiwat) + timer_calls.delayed_hiwat = timer_calls.delayed_num; + + call->state = DELAYED; +} + +static __inline__ +void +_delayed_call_dequeue( + timer_call_t call) +{ + (void)remque(qe(call)); + timer_calls.delayed_num--; + + call->state = IDLE; +} + +static __inline__ +void +_pending_call_enqueue( + queue_t queue, + timer_call_t call) +{ + enqueue_tail(queue, qe(call)); + if (++timer_calls.pending_num > timer_calls.pending_hiwat) + timer_calls.pending_hiwat = timer_calls.pending_num; + + call->state = PENDING; +} + +static __inline__ +void +_pending_call_dequeue( + timer_call_t call) +{ + (void)remque(qe(call)); + timer_calls.pending_num--; + + call->state = IDLE; +} + +static __inline__ +void +_set_delayed_call_timer( + timer_call_t call) +{ + clock_set_timer_deadline(call->deadline); +} + +boolean_t +timer_call_enter( + timer_call_t call, + AbsoluteTime deadline) +{ + boolean_t result = TRUE; + queue_t delayed; + spl_t s; + + s = splclock(); + simple_lock(&timer_call_lock); + + if (call->state == PENDING) + _pending_call_dequeue(call); + else if (call->state == DELAYED) + _delayed_call_dequeue(call); + else if (call->state == IDLE) + result = FALSE; + + call->param1 = 0; + call->deadline = deadline; + + delayed = &delayed_call_queues[cpu_number()]; + + _delayed_call_enqueue(delayed, call); + + if (queue_first(delayed) == qe(call)) + _set_delayed_call_timer(call); + + simple_unlock(&timer_call_lock); + splx(s); + + return (result); +} + +boolean_t +timer_call_enter1( + timer_call_t call, + timer_call_param_t param1, + AbsoluteTime deadline) +{ + boolean_t result = TRUE; + queue_t delayed; + spl_t s; + + s = splclock(); + simple_lock(&timer_call_lock); + + if (call->state == PENDING) + _pending_call_dequeue(call); + else if (call->state == DELAYED) + _delayed_call_dequeue(call); + else if (call->state == IDLE) + result = FALSE; + + call->param1 = param1; + call->deadline = deadline; + + delayed = &delayed_call_queues[cpu_number()]; + + _delayed_call_enqueue(delayed, call); + + if (queue_first(delayed) == qe(call)) + _set_delayed_call_timer(call); + + simple_unlock(&timer_call_lock); + splx(s); + + return (result); +} + +boolean_t +timer_call_cancel( + timer_call_t call) +{ + boolean_t result = TRUE; + spl_t s; + + s = splclock(); + simple_lock(&timer_call_lock); + + if (call->state == PENDING) + _pending_call_dequeue(call); + else if (call->state == DELAYED) + _delayed_call_dequeue(call); + else + result = FALSE; + + simple_unlock(&timer_call_lock); + splx(s); + + return (result); +} + +boolean_t +timer_call_is_delayed( + timer_call_t call, + AbsoluteTime *deadline) +{ + boolean_t result = FALSE; + spl_t s; + + s = splclock(); + simple_lock(&timer_call_lock); + + if (call->state == DELAYED) { + if (deadline != NULL) + *deadline = call->deadline; + result = TRUE; + } + + simple_unlock(&timer_call_lock); + splx(s); + + return (result); +} + +static +void +timer_call_interrupt( + AbsoluteTime timestamp) +{ + timer_call_t call; + queue_t delayed = &delayed_call_queues[cpu_number()]; + + simple_lock(&timer_call_lock); + + call = TC(queue_first(delayed)); + + while (!queue_end(delayed, qe(call))) { + if (CMP_ABSOLUTETIME(&call->deadline, ×tamp) <= 0) { + timer_call_func_t func; + timer_call_param_t param0, param1; + + _delayed_call_dequeue(call); + + func = call->func; + param0 = call->param0; + param1 = call->param1; + + simple_unlock(&timer_call_lock); + + (*func)(param0, param1); + + simple_lock(&timer_call_lock); + } + else + break; + + call = TC(queue_first(delayed)); + } + + if (!queue_end(delayed, qe(call))) + _set_delayed_call_timer(call); + + simple_unlock(&timer_call_lock); +} diff --git a/osfmk/kern/timer_call.h b/osfmk/kern/timer_call.h new file mode 100644 index 000000000..b8e85808c --- /dev/null +++ b/osfmk/kern/timer_call.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 1993-1995, 1999-2000 Apple Computer, Inc. + * All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Declarations for timer interrupt callouts. + * + * HISTORY + * + * 20 December 2000 (debo) + * Created. + */ + +#ifndef _KERN_TIMER_CALL_H_ +#define _KERN_TIMER_CALL_H_ + +#include + +#include + +#ifdef MACH_KERNEL_PRIVATE + +typedef struct call_entry *timer_call_t; +typedef void *timer_call_param_t; +typedef void (*timer_call_func_t)( + timer_call_param_t param0, + timer_call_param_t param1); + +boolean_t +timer_call_enter( + timer_call_t call, + AbsoluteTime deadline); + +boolean_t +timer_call_enter1( + timer_call_t call, + timer_call_param_t param1, + AbsoluteTime deadline); + +boolean_t +timer_call_cancel( + timer_call_t call); + +boolean_t +timer_call_is_delayed( + timer_call_t call, + AbsoluteTime *deadline); + +#include + +typedef struct call_entry timer_call_data_t; + +void +timer_call_initialize(void); + +void +timer_call_setup( + timer_call_t call, + timer_call_func_t func, + timer_call_param_t param0); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_TIMER_CALL_H_ */ diff --git a/osfmk/kern/wait_queue.c b/osfmk/kern/wait_queue.c new file mode 100644 index 000000000..b71aba571 --- /dev/null +++ b/osfmk/kern/wait_queue.c @@ -0,0 +1,1093 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: wait_queue.c (adapted from sched_prim.c) + * Author: Avadis Tevanian, Jr. + * Date: 1986 + * + * Primitives for manipulating wait queues: either global + * ones from sched_prim.c, or private ones associated with + * particular structures(pots, semaphores, etc..). + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +void +wait_queue_init( + wait_queue_t wq, + int policy) +{ + wq->wq_fifo = (policy == SYNC_POLICY_FIFO); + wq->wq_issub = FALSE; + queue_init(&wq->wq_queue); + hw_lock_init(&wq->wq_interlock); +} + +void +wait_queue_sub_init( + wait_queue_sub_t wqsub, + int policy) +{ + wait_queue_init(&wqsub->wqs_wait_queue, policy); + wqsub->wqs_wait_queue.wq_issub = TRUE; + queue_init(&wqsub->wqs_sublinks); +} + +void +wait_queue_link_init( + wait_queue_link_t wql) +{ + queue_init(&wql->wql_links); + queue_init(&wql->wql_sublinks); + wql->wql_queue = WAIT_QUEUE_NULL; + wql->wql_subqueue = WAIT_QUEUE_SUB_NULL; + wql->wql_event = NO_EVENT; +} + + +/* + * Routine: wait_queue_lock + * Purpose: + * Lock the wait queue. + * Conditions: + * the appropriate spl level (if any) is already raised. + */ +void +wait_queue_lock( + wait_queue_t wq) +{ +#ifdef __ppc__ + vm_offset_t pc; + + /* + * Double the standard lock timeout, because wait queues tend + * to iterate over a number of threads - locking each. If there is + * a problem with a thread lock, it normally times out at the wait + * queue level first, hiding the real problem. + */ + pc = GET_RETURN_PC(&wq); + if (!hw_lock_to(&wq->wq_interlock, LockTimeOut * 2)) { + panic("wait queue deadlock detection - wq=0x%x, cpu=%d, ret=0x%x\n", wq, cpu_number(), pc); + } +#else + hw_lock_lock(&wq->wq_interlock); +#endif +} + +/* + * Routine: wait_queue_lock_try + * Purpose: + * Try to lock the wait queue without waiting + * Conditions: + * the appropriate spl level (if any) is already raised. + * Returns: + * TRUE if the lock was acquired + * FALSE if we would have needed to wait + */ +boolean_t +wait_queue_lock_try( + wait_queue_t wq) +{ + return hw_lock_try(&wq->wq_interlock); +} + +/* + * Routine: wait_queue_unlock + * Purpose: + * unlock the wait queue + * Conditions: + * The wait queue is assumed locked. + * appropriate spl level is still maintained + */ +void +wait_queue_unlock( + wait_queue_t wq) +{ + assert(hw_lock_held(&wq->wq_interlock)); + + hw_lock_unlock(&wq->wq_interlock); +} + +int _wait_queue_subordinate; /* phoney event for subordinate wait q elements */ + + +/* + * Routine: wait_queue_member_locked + * Purpose: + * Indicate if this sub queue is a member of the queue + * Conditions: + * The wait queue is locked + * The sub queue is just that, a sub queue + */ +boolean_t +wait_queue_member_locked( + wait_queue_t wq, + wait_queue_sub_t wq_sub) +{ + wait_queue_element_t wq_element; + queue_t q; + + assert(wait_queue_held(wq)); + assert(wait_queue_is_sub(wq_sub)); + + q = &wq->wq_queue; + + wq_element = (wait_queue_element_t) queue_first(q); + while (!queue_end(q, (queue_entry_t)wq_element)) { + + if ((wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE)) { + wait_queue_link_t wql = (wait_queue_link_t)wq_element; + + if (wql->wql_subqueue == wq_sub) + return TRUE; + } + wq_element = (wait_queue_element_t) + queue_next((queue_t) wq_element); + } + return FALSE; +} + + +/* + * Routine: wait_queue_member + * Purpose: + * Indicate if this sub queue is a member of the queue + * Conditions: + * The sub queue is just that, a sub queue + */ +boolean_t +wait_queue_member( + wait_queue_t wq, + wait_queue_sub_t wq_sub) +{ + boolean_t ret; + spl_t s; + + assert(wait_queue_is_sub(wq_sub)); + + s = splsched(); + wait_queue_lock(wq); + ret = wait_queue_member_locked(wq, wq_sub); + wait_queue_unlock(wq); + splx(s); + + return ret; +} + +/* + * Routine: wait_queue_link + * Purpose: + * Insert a subordinate wait queue into a wait queue. This + * requires us to link the two together using a wait_queue_link + * structure that we allocate. + * Conditions: + * The wait queue being inserted must be inited as a sub queue + * The sub waitq is not already linked + * + */ +kern_return_t +wait_queue_link( + wait_queue_t wq, + wait_queue_sub_t wq_sub) +{ + wait_queue_link_t wql; + spl_t s; + + assert(wait_queue_is_sub(wq_sub)); + assert(!wait_queue_member(wq, wq_sub)); + + wql = (wait_queue_link_t) kalloc(sizeof(struct wait_queue_link)); + if (wql == WAIT_QUEUE_LINK_NULL) + return KERN_RESOURCE_SHORTAGE; + + wait_queue_link_init(wql); + + s = splsched(); + wait_queue_lock(wq); + wqs_lock(wq_sub); + + wql->wql_queue = wq; + wql->wql_subqueue = wq_sub; + wql->wql_event = WAIT_QUEUE_SUBORDINATE; + queue_enter(&wq->wq_queue, wql, wait_queue_link_t, wql_links); + queue_enter(&wq_sub->wqs_sublinks, wql, wait_queue_link_t, wql_sublinks); + + wqs_unlock(wq_sub); + wait_queue_unlock(wq); + splx(s); + + return KERN_SUCCESS; +} + +/* + * Routine: wait_queue_unlink + * Purpose: + * Remove the linkage between a wait queue and its subordinate. + * Conditions: + * The wait queue being must be a member sub queue + */ +kern_return_t +wait_queue_unlink( + wait_queue_t wq, + wait_queue_sub_t wq_sub) +{ + wait_queue_element_t wq_element; + queue_t q; + spl_t s; + + assert(wait_queue_is_sub(wq_sub)); + assert(wait_queue_member(wq, wq_sub)); + + s = splsched(); + wait_queue_lock(wq); + wqs_lock(wq_sub); + + q = &wq->wq_queue; + + wq_element = (wait_queue_element_t) queue_first(q); + while (!queue_end(q, (queue_entry_t)wq_element)) { + + if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { + wait_queue_link_t wql = (wait_queue_link_t)wq_element; + queue_t sq; + + if (wql->wql_subqueue == wq_sub) { + sq = &wq_sub->wqs_sublinks; + queue_remove(q, wql, wait_queue_link_t, wql_links); + queue_remove(sq, wql, wait_queue_link_t, wql_sublinks); + wqs_unlock(wq_sub); + wait_queue_unlock(wq); + splx(s); + kfree((vm_offset_t)wql,sizeof(struct wait_queue_link)); + return; + } + } + + wq_element = (wait_queue_element_t) + queue_next((queue_t) wq_element); + } + panic("wait_queue_unlink"); +} + +/* + * Routine: wait_queue_unlink_one + * Purpose: + * Find and unlink one subordinate wait queue + * Conditions: + * Nothing of interest locked. + */ +void +wait_queue_unlink_one( + wait_queue_t wq, + wait_queue_sub_t *wq_subp) +{ + wait_queue_element_t wq_element; + queue_t q; + spl_t s; + + s = splsched(); + wait_queue_lock(wq); + + q = &wq->wq_queue; + + wq_element = (wait_queue_element_t) queue_first(q); + while (!queue_end(q, (queue_entry_t)wq_element)) { + + if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { + wait_queue_link_t wql = (wait_queue_link_t)wq_element; + wait_queue_sub_t wq_sub = wql->wql_subqueue; + queue_t sq; + + wqs_lock(wq_sub); + sq = &wq_sub->wqs_sublinks; + queue_remove(q, wql, wait_queue_link_t, wql_links); + queue_remove(sq, wql, wait_queue_link_t, wql_sublinks); + wqs_unlock(wq_sub); + wait_queue_unlock(wq); + splx(s); + kfree((vm_offset_t)wql,sizeof(struct wait_queue_link)); + *wq_subp = wq_sub; + return; + } + + wq_element = (wait_queue_element_t) + queue_next((queue_t) wq_element); + } + wait_queue_unlock(wq); + splx(s); + *wq_subp = WAIT_QUEUE_SUB_NULL; +} + +/* + * Routine: wait_queue_assert_wait_locked + * Purpose: + * Insert the current thread into the supplied wait queue + * waiting for a particular event to be posted to that queue. + * + * Conditions: + * The wait queue is assumed locked. + * + */ +void +wait_queue_assert_wait_locked( + wait_queue_t wq, + event_t event, + int interruptible, + boolean_t unlock) +{ + thread_t thread = current_thread(); + + thread_lock(thread); + + /* + * This is the extent to which we currently take scheduling attributes + * into account. If the thread is vm priviledged, we stick it at + * the front of the queue. Later, these queues will honor the policy + * value set at wait_queue_init time. + */ + if (thread->vm_privilege) + enqueue_head(&wq->wq_queue, (queue_entry_t) thread); + else + enqueue_tail(&wq->wq_queue, (queue_entry_t) thread); + thread->wait_event = event; + thread->wait_queue = wq; + thread_mark_wait_locked(thread, interruptible); + thread_unlock(thread); + if (unlock) + wait_queue_unlock(wq); +} + +/* + * Routine: wait_queue_assert_wait + * Purpose: + * Insert the current thread into the supplied wait queue + * waiting for a particular event to be posted to that queue. + * + * Conditions: + * nothing of interest locked. + */ +void +wait_queue_assert_wait( + wait_queue_t wq, + event_t event, + int interruptible) +{ + spl_t s; + + s = splsched(); + wait_queue_lock(wq); + wait_queue_assert_wait_locked(wq, event, interruptible, TRUE); + /* wait queue unlocked */ + splx(s); +} + + +/* + * Routine: wait_queue_select_all + * Purpose: + * Select all threads off a wait queue that meet the + * supplied criteria. + * + * Conditions: + * at splsched + * wait queue locked + * wake_queue initialized and ready for insertion + * possibly recursive + * + * Returns: + * a queue of locked threads + */ +void +_wait_queue_select_all( + wait_queue_t wq, + event_t event, + queue_t wake_queue) +{ + wait_queue_element_t wq_element; + wait_queue_element_t wqe_next; + queue_t q; + + q = &wq->wq_queue; + + wq_element = (wait_queue_element_t) queue_first(q); + while (!queue_end(q, (queue_entry_t)wq_element)) { + wqe_next = (wait_queue_element_t) + queue_next((queue_t) wq_element); + + /* + * We may have to recurse if this is a compound wait queue. + */ + if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { + wait_queue_link_t wql = (wait_queue_link_t)wq_element; + wait_queue_t sub_queue; + + /* + * We have to check the subordinate wait queue. + */ + sub_queue = (wait_queue_t)wql->wql_subqueue; + wait_queue_lock(sub_queue); + if (! wait_queue_empty(sub_queue)) + _wait_queue_select_all(sub_queue, event, wake_queue); + wait_queue_unlock(sub_queue); + } else { + + /* + * Otherwise, its a thread. If it is waiting on + * the event we are posting to this queue, pull + * it off the queue and stick it in out wake_queue. + */ + thread_t t = (thread_t)wq_element; + + if (t->wait_event == event) { + thread_lock(t); + remqueue(q, (queue_entry_t) t); + enqueue (wake_queue, (queue_entry_t) t); + t->wait_queue = WAIT_QUEUE_NULL; + t->wait_event = NO_EVENT; + t->at_safe_point = FALSE; + /* returned locked */ + } + } + wq_element = wqe_next; + } +} + +/* + * Routine: wait_queue_wakeup_all_locked + * Purpose: + * Wakeup some number of threads that are in the specified + * wait queue and waiting on the specified event. + * Conditions: + * wait queue already locked (may be released). + * Returns: + * KERN_SUCCESS - Threads were woken up + * KERN_NOT_WAITING - No threads were waiting pair + */ +kern_return_t +wait_queue_wakeup_all_locked( + wait_queue_t wq, + event_t event, + int result, + boolean_t unlock) +{ + queue_head_t wake_queue_head; + queue_t q = &wake_queue_head; + kern_return_t ret = KERN_NOT_WAITING; + + assert(wait_queue_held(wq)); + + queue_init(q); + + /* + * Select the threads that we will wake up. The threads + * are returned to us locked and cleanly removed from the + * wait queue. + */ + _wait_queue_select_all(wq, event, q); + if (unlock) + wait_queue_unlock(wq); + + /* + * For each thread, set it running. + */ + while (!queue_empty (q)) { + thread_t thread = (thread_t) dequeue(q); + thread_go_locked(thread, result); + thread_unlock(thread); + ret = KERN_SUCCESS; + } + return ret; +} + + +/* + * Routine: wait_queue_wakeup_all + * Purpose: + * Wakeup some number of threads that are in the specified + * wait queue and waiting on the specified event. + * + * Conditions: + * Nothing locked + * + * Returns: + * KERN_SUCCESS - Threads were woken up + * KERN_NOT_WAITING - No threads were waiting pair + */ +kern_return_t +wait_queue_wakeup_all( + wait_queue_t wq, + event_t event, + int result) +{ + kern_return_t ret; + spl_t s; + + s = splsched(); + wait_queue_lock(wq); + ret = wait_queue_wakeup_all_locked(wq, event, result, TRUE); + /* lock released */ + splx(s); + + return ret; +} + +/* + * Routine: wait_queue_select_one + * Purpose: + * Select the best thread off a wait queue that meet the + * supplied criteria. + * Conditions: + * at splsched + * wait queue locked + * possibly recursive + * Returns: + * a locked thread - if one found + * Note: + * This is where the sync policy of the wait queue comes + * into effect. For now, we just assume FIFO. + */ +thread_t +_wait_queue_select_one( + wait_queue_t wq, + event_t event) +{ + wait_queue_element_t wq_element; + wait_queue_element_t wqe_next; + thread_t t = THREAD_NULL; + queue_t q; + + assert(wq->wq_fifo); + + q = &wq->wq_queue; + + wq_element = (wait_queue_element_t) queue_first(q); + while (!queue_end(q, (queue_entry_t)wq_element)) { + wqe_next = (wait_queue_element_t) + queue_next((queue_t) wq_element); + + /* + * We may have to recurse if this is a compound wait queue. + */ + if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { + wait_queue_link_t wql = (wait_queue_link_t)wq_element; + wait_queue_t sub_queue; + + /* + * We have to check the subordinate wait queue. + */ + sub_queue = (wait_queue_t)wql->wql_subqueue; + wait_queue_lock(sub_queue); + if (! wait_queue_empty(sub_queue)) { + t = _wait_queue_select_one(sub_queue, event); + } + wait_queue_unlock(sub_queue); + if (t != THREAD_NULL) + return t; + } else { + + /* + * Otherwise, its a thread. If it is waiting on + * the event we are posting to this queue, pull + * it off the queue and stick it in out wake_queue. + */ + thread_t t = (thread_t)wq_element; + + if (t->wait_event == event) { + thread_lock(t); + remqueue(q, (queue_entry_t) t); + t->wait_queue = WAIT_QUEUE_NULL; + t->wait_event = NO_EVENT; + t->at_safe_point = FALSE; + return t; /* still locked */ + } + } + wq_element = wqe_next; + } + return THREAD_NULL; +} + +/* + * Routine: wait_queue_peek_locked + * Purpose: + * Select the best thread from a wait queue that meet the + * supplied criteria, but leave it on the queue you it was + * found on. The thread, and the actual wait_queue the + * thread was found on are identified. + * Conditions: + * at splsched + * wait queue locked + * possibly recursive + * Returns: + * a locked thread - if one found + * a locked waitq - the one the thread was found on + * Note: + * Only the waitq the thread was actually found on is locked + * after this. + */ +void +wait_queue_peek_locked( + wait_queue_t wq, + event_t event, + thread_t *tp, + wait_queue_t *wqp) +{ + wait_queue_element_t wq_element; + wait_queue_element_t wqe_next; + thread_t t; + queue_t q; + + assert(wq->wq_fifo); + + *tp = THREAD_NULL; + + q = &wq->wq_queue; + + wq_element = (wait_queue_element_t) queue_first(q); + while (!queue_end(q, (queue_entry_t)wq_element)) { + wqe_next = (wait_queue_element_t) + queue_next((queue_t) wq_element); + + /* + * We may have to recurse if this is a compound wait queue. + */ + if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { + wait_queue_link_t wql = (wait_queue_link_t)wq_element; + wait_queue_t sub_queue; + + /* + * We have to check the subordinate wait queue. + */ + sub_queue = (wait_queue_t)wql->wql_subqueue; + wait_queue_lock(sub_queue); + if (! wait_queue_empty(sub_queue)) { + wait_queue_peek_locked(sub_queue, event, tp, wqp); + } + if (*tp != THREAD_NULL) + return; /* thread and its waitq locked */ + + wait_queue_unlock(sub_queue); + } else { + + /* + * Otherwise, its a thread. If it is waiting on + * the event we are posting to this queue, return + * it locked, but leave it on the queue. + */ + thread_t t = (thread_t)wq_element; + + if (t->wait_event == event) { + thread_lock(t); + *tp = t; + *wqp = wq; + return; + } + } + wq_element = wqe_next; + } +} + +/* + * Routine: wait_queue_pull_thread_locked + * Purpose: + * Pull a thread that was previously "peeked" off the wait + * queue and (possibly) unlock the waitq. + * Conditions: + * at splsched + * wait queue locked + * thread locked + * Returns: + * with the thread still locked. + */ +void +wait_queue_pull_thread_locked( + wait_queue_t waitq, + thread_t thread, + boolean_t unlock) +{ + + assert(thread->wait_queue == waitq); + + remqueue(&waitq->wq_queue, (queue_entry_t)thread ); + thread->wait_queue = WAIT_QUEUE_NULL; + thread->wait_event = NO_EVENT; + thread->at_safe_point = FALSE; + if (unlock) + wait_queue_unlock(waitq); +} + + +/* + * Routine: wait_queue_select_thread + * Purpose: + * Look for a thread and remove it from the queues, if + * (and only if) the thread is waiting on the supplied + * pair. + * Conditions: + * at splsched + * wait queue locked + * possibly recursive + * Returns: + * KERN_NOT_WAITING: Thread is not waiting here. + * KERN_SUCCESS: It was, and is now removed (returned locked) + */ +kern_return_t +_wait_queue_select_thread( + wait_queue_t wq, + event_t event, + thread_t thread) +{ + wait_queue_element_t wq_element; + wait_queue_element_t wqe_next; + kern_return_t res = KERN_NOT_WAITING; + queue_t q = &wq->wq_queue; + + assert(wq->wq_fifo); + + thread_lock(thread); + if ((thread->wait_queue == wq) && (thread->wait_event == event)) { + remqueue(q, (queue_entry_t) thread); + thread->at_safe_point = FALSE; + thread->wait_event = NO_EVENT; + thread->wait_queue = WAIT_QUEUE_NULL; + /* thread still locked */ + return KERN_SUCCESS; + } + thread_unlock(thread); + + /* + * The wait_queue associated with the thread may be one of this + * wait queue's subordinates. Go see. If so, removing it from + * there is like removing it from here. + */ + wq_element = (wait_queue_element_t) queue_first(q); + while (!queue_end(q, (queue_entry_t)wq_element)) { + wqe_next = (wait_queue_element_t) + queue_next((queue_t) wq_element); + + if (wq_element->wqe_event == WAIT_QUEUE_SUBORDINATE) { + wait_queue_link_t wql = (wait_queue_link_t)wq_element; + wait_queue_t sub_queue; + + sub_queue = (wait_queue_t)wql->wql_subqueue; + wait_queue_lock(sub_queue); + if (! wait_queue_empty(sub_queue)) { + res = _wait_queue_select_thread(sub_queue, + event, + thread); + } + wait_queue_unlock(sub_queue); + if (res == KERN_SUCCESS) + return KERN_SUCCESS; + } + wq_element = wqe_next; + } + return res; +} + + +/* + * Routine: wait_queue_wakeup_identity_locked + * Purpose: + * Select a single thread that is most-eligible to run and set + * set it running. But return the thread locked. + * + * Conditions: + * at splsched + * wait queue locked + * possibly recursive + * Returns: + * a pointer to the locked thread that was awakened + */ +thread_t +wait_queue_wakeup_identity_locked( + wait_queue_t wq, + event_t event, + int result, + boolean_t unlock) +{ + thread_t thread; + + assert(wait_queue_held(wq)); + + thread = _wait_queue_select_one(wq, event); + if (unlock) + wait_queue_unlock(wq); + + if (thread) + thread_go_locked(thread, result); + return thread; /* still locked if not NULL */ +} + + +/* + * Routine: wait_queue_wakeup_one_locked + * Purpose: + * Select a single thread that is most-eligible to run and set + * set it runnings. + * + * Conditions: + * at splsched + * wait queue locked + * possibly recursive + * Returns: + * KERN_SUCCESS: It was, and is, now removed. + * KERN_NOT_WAITING - No thread was waiting pair + */ +kern_return_t +wait_queue_wakeup_one_locked( + wait_queue_t wq, + event_t event, + int result, + boolean_t unlock) +{ + thread_t thread; + + assert(wait_queue_held(wq)); + + thread = _wait_queue_select_one(wq, event); + if (unlock) + wait_queue_unlock(wq); + + if (thread) { + thread_go_locked(thread, result); + thread_unlock(thread); + return KERN_SUCCESS; + } + + return KERN_NOT_WAITING; +} + +/* + * Routine: wait_queue_wakeup_one + * Purpose: + * Wakeup the most appropriate thread that is in the specified + * wait queue for the specified event. + * + * Conditions: + * Nothing locked + * + * Returns: + * KERN_SUCCESS - Thread was woken up + * KERN_NOT_WAITING - No thread was waiting pair + */ +kern_return_t +wait_queue_wakeup_one( + wait_queue_t wq, + event_t event, + int result) +{ + thread_t thread; + spl_t s; + + s = splsched(); + wait_queue_lock(wq); + thread = _wait_queue_select_one(wq, event); + wait_queue_unlock(wq); + + if (thread) { + thread_go_locked(thread, result); + thread_unlock(thread); + splx(s); + return KERN_SUCCESS; + } + + splx(s); + return KERN_NOT_WAITING; +} + + + +/* + * Routine: wait_queue_wakeup_thread_locked + * Purpose: + * Wakeup the particular thread that was specified if and only + * it was in this wait queue (or one of it's subordinate queues) + * and waiting on the specified event. + * + * This is much safer than just removing the thread from + * whatever wait queue it happens to be on. For instance, it + * may have already been awoken from the wait you intended to + * interrupt and waited on something else (like another + * semaphore). + * Conditions: + * at splsched + * wait queue already locked (may be released). + * Returns: + * KERN_SUCCESS - the thread was found waiting and awakened + * KERN_NOT_WAITING - the thread was not waiting here + */ +kern_return_t +wait_queue_wakeup_thread_locked( + wait_queue_t wq, + event_t event, + thread_t thread, + int result, + boolean_t unlock) +{ + kern_return_t res; + + assert(wait_queue_held(wq)); + + /* + * See if the thread was still waiting there. If so, it got + * dequeued and returned locked. + */ + res = _wait_queue_select_thread(wq, event, thread); + if (unlock) + wait_queue_unlock(wq); + + if (res != KERN_SUCCESS) + return KERN_NOT_WAITING; + + thread_go_locked(thread, result); + thread_unlock(thread); + return KERN_SUCCESS; +} + +/* + * Routine: wait_queue_wakeup_thread + * Purpose: + * Wakeup the particular thread that was specified if and only + * it was in this wait queue (or one of it's subordinate queues) + * and waiting on the specified event. + * + * This is much safer than just removing the thread from + * whatever wait queue it happens to be on. For instance, it + * may have already been awoken from the wait you intended to + * interrupt and waited on something else (like another + * semaphore). + * Conditions: + * nothing of interest locked + * we need to assume spl needs to be raised + * Returns: + * KERN_SUCCESS - the thread was found waiting and awakened + * KERN_NOT_WAITING - the thread was not waiting here + */ +kern_return_t +wait_queue_wakeup_thread( + wait_queue_t wq, + event_t event, + thread_t thread, + int result) +{ + kern_return_t res; + spl_t s; + + s = splsched(); + wait_queue_lock(wq); + res = _wait_queue_select_thread(wq, event, thread); + wait_queue_unlock(wq); + + if (res == KERN_SUCCESS) { + thread_go_locked(thread, result); + thread_unlock(thread); + splx(s); + return KERN_SUCCESS; + } + splx(s); + return KERN_NOT_WAITING; +} + + +/* + * Routine: wait_queue_remove + * Purpose: + * Normal removal operations from wait queues drive from the + * wait queue to select a thread. However, if a thread is + * interrupted out of a wait, this routine is called to + * remove it from whatever wait queue it may be in. + * + * Conditions: + * splsched + * thread locked on entry and exit, but may be dropped. + * + * Returns: + * KERN_SUCCESS - if thread was in a wait queue + * KERN_NOT_WAITING - it was not + */ +kern_return_t +wait_queue_remove( + thread_t thread) +{ + wait_queue_t wq = thread->wait_queue; + + if (wq == WAIT_QUEUE_NULL) + return KERN_NOT_WAITING; + + /* + * have to get the locks again in the right order. + */ + thread_unlock(thread); + wait_queue_lock(wq); + thread_lock(thread); + + if (thread->wait_queue == wq) { + remqueue(&wq->wq_queue, (queue_entry_t)thread); + thread->wait_queue = WAIT_QUEUE_NULL; + thread->wait_event = NO_EVENT; + thread->at_safe_point = FALSE; + wait_queue_unlock(wq); + return KERN_SUCCESS; + } else { + wait_queue_unlock(wq); + return KERN_NOT_WAITING; /* anymore */ + } +} + diff --git a/osfmk/kern/wait_queue.h b/osfmk/kern/wait_queue.h new file mode 100644 index 000000000..34e24b5bf --- /dev/null +++ b/osfmk/kern/wait_queue.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _KERN_WAIT_QUEUE_H_ +#define _KERN_WAIT_QUEUE_H_ + +#include /* for wait_queue_t */ +#include + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include + +/* + * wait_queue_t + * This is the definition of the common event wait queue + * that the scheduler APIs understand. It is used + * internally by the gerneralized event waiting mechanism + * (assert_wait), and also for items that maintain their + * own wait queues (such as ports and semaphores). + * + * It is not published to other kernel components. They + * can create wait queues by calling wait_queue_alloc. + * + * NOTE: Hardware locks are used to protect event wait + * queues since interrupt code is free to post events to + * them. + */ +typedef struct wait_queue { + hw_lock_data_t wq_interlock; /* interlock */ + unsigned int /* flags */ + /* boolean_t */ wq_fifo:1, /* fifo wakeup policy? */ + wq_issub:1, /* is waitq linked? */ + :0; /* force to long boundary */ + queue_head_t wq_queue; /* queue of elements */ +} WaitQueue; + +/* + * wait_queue_sub_t + * This is the common definition for a subordinate wait queue. + * These can be linked as members/elements of multiple regular + * wait queues. They have an additional set of linkages to + * identify the linkage structures that point to them. + */ +typedef struct wait_queue_sub { + WaitQueue wqs_wait_queue; /* our wait queue */ + queue_head_t wqs_sublinks; /* links from sub perspective */ +} WaitQueueSub; + +typedef WaitQueueSub *wait_queue_sub_t; + +#define WAIT_QUEUE_SUB_NULL ((wait_queue_sub_t)0) + + +/* + * wait_queue_element_t + * This structure describes the elements on an event wait + * queue. It is the common first fields in a thread shuttle + * and wait_queue_link_t. In that way, a wait queue can + * consist of both thread shuttle elements and links off of + * to other (subordinate) wait queues. + * + * WARNING: The first three fields of the thread shuttle + * definition does not use this definition yet. Any change in + * the layout here will have to be matched with a change there. + */ +typedef struct wait_queue_element { + queue_chain_t wqe_links; /* link of elements on this queue */ + wait_queue_t wqe_queue; /* queue this element is on */ + event_t wqe_event; /* event this element is waiting for */ +} *wait_queue_element_t; + +/* + * wait_queue_link_t + * Specialized wait queue element type for linking subordinate + * event waits queues onto a wait queue. In this way, an event + * can be constructed so that any thread waiting on any number + * of associated wait queues can handle the event, while letting + * the thread only be linked on the single wait queue it blocked on. + * + * One use: ports in multiple portsets. Each thread is queued up + * on the portset that it specifically blocked on during a receive + * operation. Each port's event queue links in all the portset + * event queues of which it is a member. An IPC event post associated + * with that port may wake up any thread from any of those portsets, + * or one that was waiting locally on the port itself. + */ +typedef struct wait_queue_link { + struct wait_queue_element wql_element; /* element on master */ + wait_queue_sub_t wql_subqueue; /* sub queue */ + queue_chain_t wql_sublinks; /* element on sub */ +} *wait_queue_link_t; + +#define WAIT_QUEUE_LINK_NULL ((wait_queue_link_t)0) + +#define wql_links wql_element.wqe_links +#define wql_queue wql_element.wqe_queue +#define wql_event wql_element.wqe_event + +extern int wait_queue_subordinate; +#define WAIT_QUEUE_SUBORDINATE &_wait_queue_subordinate + +extern void wait_queue_init( + wait_queue_t wait_queue, + int policy); + +extern kern_return_t wait_queue_link( + wait_queue_t wait_queue, + wait_queue_sub_t subordinate_queue); + +extern kern_return_t wait_queue_unlink( + wait_queue_t wait_queue, + wait_queue_sub_t subordinate_queue); +extern void wait_queue_unlink_one( + wait_queue_t wait_queue, + wait_queue_sub_t *subordinate_queue_pointer); + +extern boolean_t wait_queue_member_queue( + wait_queue_t wait_queue, + wait_queue_sub_t subordinate_queue); + +extern kern_return_t clear_wait_queue_internal( + thread_t thread, + int result); + +extern kern_return_t wait_queue_remove( + thread_t thread); + +#define wait_queue_assert_possible(thread) \ + ((thread)->wait_queue == WAIT_QUEUE_NULL) + + +#define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue)) + +#define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock)) + +#define wait_queue_is_sub(wqs) ((wqs)->wqs_wait_queue.wq_issub) +#define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue) +#define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue) +#define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue) + +/******** Decomposed interfaces (to build higher level constructs) ***********/ + +extern void wait_queue_lock( + wait_queue_t wait_queue); + +extern void wait_queue_unlock( + wait_queue_t wait_queue); + +extern boolean_t wait_queue_lock_try( + wait_queue_t wait_queue); + +/* assert intent to wait on a locked wait queue */ +extern void wait_queue_assert_wait_locked( + wait_queue_t wait_queue, + event_t wait_event, + int interruptible, + boolean_t unlock); + +/* peek to see which thread would be chosen for a wakeup - but keep on queue */ +extern void wait_queue_peek_locked( + wait_queue_t wait_queue, + event_t event, + thread_t *thread, + wait_queue_t *found_queue); + +/* peek to see which thread would be chosen for a wakeup - but keep on queue */ +extern void wait_queue_pull_thread_locked( + wait_queue_t wait_queue, + thread_t thread, + boolean_t unlock); + +/* wakeup all threads waiting for a particular event on locked queue */ +extern kern_return_t wait_queue_wakeup_one_locked( + wait_queue_t wait_queue, + event_t wake_event, + int result, + boolean_t unlock); + +/* wakeup one thread waiting for a particular event on locked queue */ +extern kern_return_t wait_queue_wakeup_one_locked( + wait_queue_t wait_queue, + event_t wake_event, + int result, + boolean_t unlock); + +/* return the identity of a thread that is waiting for */ +extern thread_t wait_queue_recommend_locked( + wait_queue_t wait_queue, + event_t wake_event); + +/* return identity of a thread awakened for a particular */ +extern thread_t wait_queue_wakeup_identity_locked( + wait_queue_t wait_queue, + event_t wake_event, + int result, + boolean_t unlock); + +/* wakeup thread iff its still waiting for a particular event on locked queue */ +extern kern_return_t wait_queue_wakeup_thread_locked( + wait_queue_t wait_queue, + event_t wake_event, + thread_t thread, + int result, + boolean_t unlock); + +#endif /* MACH_KERNEL_PRIVATE */ + +extern wait_queue_t wait_queue_alloc( + int policy); + +extern void wait_queue_free( + wait_queue_t wait_queue); + +/******** Standalone interfaces (not a part of a higher construct) ************/ + +/* assert intent to wait on pair */ +extern void wait_queue_assert_wait( + wait_queue_t wait_queue, + event_t wait_event, + int interruptible); + +/* wakeup the most appropriate thread waiting on pair */ +extern kern_return_t wait_queue_wakeup_one( + wait_queue_t wait_queue, + event_t wake_event, + int result); + +/* wakeup all the threads waiting on pair */ +extern kern_return_t wait_queue_wakeup_all( + wait_queue_t wait_queue, + event_t wake_event, + int result); + +/* wakeup a specified thread waiting iff waiting on pair */ +extern kern_return_t wait_queue_wakeup_thread( + wait_queue_t wait_queue, + event_t wake_event, + thread_t thread, + int result); + +#endif /* _KERN_WAIT_QUEUE_H_ */ diff --git a/osfmk/kern/xpr.c b/osfmk/kern/xpr.c new file mode 100644 index 000000000..2a8d85f3f --- /dev/null +++ b/osfmk/kern/xpr.c @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:57 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.46.1 1997/09/22 17:41:21 barbou + * MP+RT: protect cpu_number() usage against preemption. + * [97/09/16 barbou] + * + * Revision 1.2.25.5 1996/07/31 09:56:06 paire + * Merged with nmk20b7_shared (1.2.41.1) + * [96/06/10 paire] + * + * Revision 1.2.41.1 1996/04/15 14:35:12 bernadat + * Keep interrupts disabled while accessing XPR_TIMESTAMP. + * [96/04/12 bernadat] + * + * Revision 1.2.25.4 1995/02/24 15:22:42 alanl + * DIPC: Merge from nmk17b2 to nmk18b8. + * Notes: Restore portable locks package, derived from nmk17b2. + * [95/02/07 alanl] + * + * Revision 1.2.28.3 1994/12/09 22:25:16 dwm + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * * Rev 1.2.25.2 1994/10/21 18:30:41 joe + * Added ETAP support + * [1994/12/09 21:10:59 dwm] + * + * Revision 1.2.28.2 1994/11/10 06:15:29 dwm + * mk6 CR764 - s/spinlock/simple_lock/ (name change only) + * [1994/11/10 05:58:48 dwm] + * + * Revision 1.2.28.1 1994/11/04 10:10:56 dwm + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.5.7 1994/05/06 18:54:13 tmt + * Merge in DEC Alpha changes to osc1.3b19. + * include + * 64 bit cleanup. + * * End1.3merge + * [1994/11/04 09:39:17 dwm] + * + * Revision 1.2.25.1 1994/09/23 02:32:39 ezf + * change marker to not FREE + * [1994/09/22 21:38:29 ezf] + * + * Revision 1.2.22.1 1994/06/09 14:14:11 dswartz + * Preemption merge. + * [1994/06/09 14:08:38 dswartz] + * + * Revision 1.2.5.5 1993/08/12 20:16:51 bernard + * Last pass for ANSI prototypes - CR#9523 + * [1993/08/12 15:43:24 bernard] + * + * Revision 1.2.5.4 1993/08/02 17:26:05 rod + * ANSI prototypes: zap explicit include of machine/setjmp.h. CR #9523. + * [1993/08/01 13:36:31 rod] + * + * Revision 1.2.5.3 1993/07/27 18:09:05 rod + * Add ANSI prototypes. CR #9523. + * [1993/07/27 14:33:23 rod] + * + * Revision 1.2.5.2 1993/06/09 02:39:13 gm + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:07:51 jeffc] + * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:15:13 jeffc] + * + * Revision 1.2 1993/04/19 16:31:21 devrcs + * Added void to fcns that still needed it. + * [93/02/05 bruel] + * + * Revision 1.1 1992/09/30 02:10:39 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.9.5.2 92/03/03 16:20:57 jeffreyh + * Fix Log. + * [92/02/24 13:24:44 jeffreyh] + * + * Revision 2.9.5.1 92/02/18 19:13:03 jeffreyh + * Added an xpr_search function to which you can give + * a selection function. + * [92/02/11 08:13:23 bernadat] + * + * Revision 2.9.4.1 92/02/13 18:53:47 jeffreyh + * Added an xpr_search function to which you can give + * a selection function. + * [92/02/11 08:13:23 bernadat] + * + * Revision 2.9.3.1 92/02/11 17:19:59 jeffreyh + * Added an xpr_search function to which you can give + * a selection function. + * [92/02/11 08:13:23 bernadat] + * + * Revision 2.9.2.1 92/02/11 08:13:23 bernadat + * Added an xpr_search function to which you can give + * a selection function. + * + * + * Revision 2.9 91/10/09 16:11:50 af + * Removed xpr_save. Modified xpr_dump to make it useful + * for dumping xpr buffers in user space tasks. + * [91/09/20 rpd] + * + * Turned on xprenable by default. xprbootstrap now preserves + * the original contents of the buffer if xprenable is off. + * [91/09/18 rpd] + * + * Revision 2.8 91/08/28 11:14:56 jsb + * Fixed xprbootstrap to zero the allocate memory. + * [91/08/18 rpd] + * + * Revision 2.7 91/05/18 14:34:37 rpd + * Added xprenable and other minor changes so that the xpr buffer + * may be examined after a spontaneous reboot. + * [91/05/03 rpd] + * Fixed the initialization check in xpr. + * Fixed xpr_dump. + * [91/04/02 rpd] + * + * Revision 2.6 91/05/14 16:50:09 mrt + * Correcting copyright + * + * Revision 2.5 91/03/16 14:53:24 rpd + * Updated for new kmem_alloc interface. + * [91/03/03 rpd] + * + * Revision 2.4 91/02/05 17:31:13 mrt + * Changed to new Mach copyright + * [91/02/01 16:21:17 mrt] + * + * Revision 2.3 90/09/09 14:33:04 rpd + * Use decl_simple_lock_data. + * [90/08/30 rpd] + * + * Revision 2.2 89/11/29 14:09:21 af + * Added xpr_dump() to print on console the content of the buffer, + * only valid for KDB usage. + * [89/11/12 af] + * + * MACH_KERNEL: include sys/cpu_number.h instead of machine/cpu.h. + * Clean up comments. + * [88/12/19 dbg] + * + * Revision 2.1 89/08/03 15:49:11 rwd + * Created. + * + * Revision 2.2 88/12/19 02:48:30 mwyoung + * Fix include file references. + * [88/11/22 02:17:01 mwyoung] + * + * Separate initialization into two phases. + * [88/11/22 01:13:11 mwyoung] + * + * 6-Jan-88 Michael Young (mwyoung) at Carnegie-Mellon University + * Eliminate use of arg6 in order to allow a more shapely event structure. + * + * 30-Dec-87 David Golub (dbg) at Carnegie-Mellon University + * Delinted. + * + * 7-Dec-87 Richard Sanzi (sanzi) at Carnegie-Mellon University + * Added xpr_save() routine. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +#include +/* + * xpr silent tracing circular buffer. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * After a spontaneous reboot, it is desirable to look + * at the old xpr buffer. Assuming xprbootstrap allocates + * the buffer in the same place in physical memory and + * the reboot doesn't clear memory, this should work. + * xprptr will be reset, but the saved value should be OK. + * Just set xprenable false so the buffer isn't overwritten. + */ + +decl_simple_lock_data(,xprlock) +boolean_t xprenable = TRUE; /* Enable xpr tracing */ +int nxprbufs = 0; /* Number of contiguous xprbufs allocated */ +int xprflags = 0; /* Bit mask of xpr flags enabled */ +struct xprbuf *xprbase; /* Pointer to circular buffer nxprbufs*sizeof(xprbuf)*/ +struct xprbuf *xprptr; /* Currently allocated xprbuf */ +struct xprbuf *xprlast; /* Pointer to end of circular buffer */ + +void +xpr( + char *msg, + long arg1, + long arg2, + long arg3, + long arg4, + long arg5) +{ + spl_t s; + register struct xprbuf *x; + + /* If we aren't initialized, ignore trace request */ + if (!xprenable || (xprptr == 0)) + return; + /* Guard against all interrupts and allocate next buffer. */ + + s = splhigh(); + simple_lock(&xprlock); + x = xprptr++; + if (xprptr >= xprlast) { + /* wrap around */ + xprptr = xprbase; + } + /* Save xprptr in allocated memory. */ + *(struct xprbuf **)xprlast = xprptr; + simple_unlock(&xprlock); + x->timestamp = XPR_TIMESTAMP; + splx(s); + x->msg = msg; + x->arg1 = arg1; + x->arg2 = arg2; + x->arg3 = arg3; + x->arg4 = arg4; + x->arg5 = arg5; + mp_disable_preemption(); + x->cpuinfo = cpu_number(); + mp_enable_preemption(); +} + +void +xprbootstrap(void) +{ + vm_offset_t addr; + vm_size_t size; + kern_return_t kr; + + simple_lock_init(&xprlock, ETAP_MISC_XPR); + if (nxprbufs == 0) + return; /* assume XPR support not desired */ + + /* leave room at the end for a saved copy of xprptr */ + size = nxprbufs * sizeof(struct xprbuf) + sizeof xprptr; + + kr = kmem_alloc_wired(kernel_map, &addr, size); + if (kr != KERN_SUCCESS) + panic("xprbootstrap"); + + if (xprenable) { + /* + * If xprenable is set (the default) then we zero + * the buffer so xpr_dump doesn't encounter bad pointers. + * If xprenable isn't set, then we preserve + * the original contents of the buffer. This is useful + * if memory survives reboots, so xpr_dump can show + * the previous buffer contents. + */ + + (void) memset((void *) addr, 0, size); + } + + xprbase = (struct xprbuf *) addr; + xprlast = &xprbase[nxprbufs]; + xprptr = xprbase; /* setting xprptr enables tracing */ +} + +int xprinitial = 0; + +void +xprinit(void) +{ + xprflags |= xprinitial; +} + +#if MACH_KDB +#include + +/* + * Prototypes for functions called from the debugger + */ +void +xpr_dump( + struct xprbuf *base, + int nbufs); + +void +xpr_search( + int arg_index, + int value); + +extern jmp_buf_t *db_recover; + +/* + * Print current content of xpr buffers (KDB's sake) + * Use stack order to make it understandable. + * + * Called as "!xpr_dump" this dumps the kernel's xpr buffer. + * Called with arguments, it can dump xpr buffers in user tasks, + * assuming they use the same format as the kernel. + */ +void +xpr_dump( + struct xprbuf *base, + int nbufs) +{ + jmp_buf_t db_jmpbuf; + jmp_buf_t *prev; + struct xprbuf *last, *ptr; + register struct xprbuf *x; + int i; + spl_t s; + + if (base == 0) { + base = xprbase; + nbufs = nxprbufs; + } + + if (nbufs == 0) + return; + + if (base == xprbase) { + s = splhigh(); + simple_lock(&xprlock); + } + + last = base + nbufs; + ptr = * (struct xprbuf **) last; + + prev = db_recover; + if (_setjmp(db_recover = &db_jmpbuf) == 0) + for (x = ptr, i = 0; i < nbufs; i++) { + if (--x < base) + x = last - 1; + + if (x->msg == 0) + break; + + db_printf("<%d:%x:%x> ", x - base, x->cpuinfo, x->timestamp); + db_printf(x->msg, x->arg1,x->arg2,x->arg3,x->arg4,x->arg5); + } + db_recover = prev; + + if (base == xprbase) { + simple_unlock(&xprlock); + splx(s); + } +} + +/* + * dump xpr table with a selection criteria. + * argument number "arg_index" must equal "value" + */ + +void +xpr_search( + int arg_index, + int value) +{ + jmp_buf_t db_jmpbuf; + jmp_buf_t *prev; + register struct xprbuf *x; + spl_t s; + int n; + + if (!nxprbufs) + return; + + n = nxprbufs; + + s = splhigh(); + simple_lock(&xprlock); + + prev = db_recover; + if (_setjmp(db_recover = &db_jmpbuf) == 0) + for (x = *(struct xprbuf **)xprlast ; n--; ) { + if (--x < xprbase) + x = xprlast - 1; + + if (x->msg == 0) { + break; + } + + if (*((&x->arg1)+arg_index) != value) + continue; + + db_printf("<%d:%d:%x> ", x - xprbase, + x->cpuinfo, x->timestamp); + db_printf(x->msg, x->arg1,x->arg2,x->arg3,x->arg4,x->arg5); + } + db_recover = prev; + + simple_unlock(&xprlock); + splx(s); +} +#endif /* MACH_KDB */ diff --git a/osfmk/kern/xpr.h b/osfmk/kern/xpr.h new file mode 100644 index 000000000..9c528830a --- /dev/null +++ b/osfmk/kern/xpr.h @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:57 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.12.5 1995/02/24 15:22:46 alanl + * Add XPR definition to trace generic XMM activities. + * [95/01/31 alanl] + * + * Revision 1.1.14.3 1994/11/02 18:37:35 dwm + * mk6 CR668 - 1.3b26 merge + * Add MOR token, update XPR names for locks, vm_maps. + * now only a single XPR(...) macro, 5 args always. + * [1994/11/02 18:17:33 dwm] + * + * Revision 1.1.12.3 1994/09/23 02:32:50 ezf + * change marker to not FREE + * [1994/09/22 21:38:33 ezf] + * + * Revision 1.1.12.2 1994/09/10 21:46:57 bolinger + * Merge up to NMK17.3 + * [1994/09/08 19:57:50 bolinger] + * + * Revision 1.1.12.1 1994/06/14 17:13:10 bolinger + * Merge up to NMK17.2. + * [1994/06/14 16:55:44 bolinger] + * + * Revision 1.1.7.2 1994/05/30 07:37:07 bernadat + * Added missing ')' to XPR5. + * [94/05/25 bernadat] + * + * Revision 1.1.7.1 1994/03/24 15:29:18 paire + * Set up correct XPR and XPR[1-5] macros. + * Added XPR_SIMPLE_LOCK define. + * [94/03/08 paire] + * + * Revision 1.1.2.5 1993/08/03 18:29:24 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 17:41:44 gm] + * + * Revision 1.1.2.4 1993/07/27 18:09:08 rod + * Add ANSI prototypes. CR #9523. + * [1993/07/27 10:42:04 rod] + * + * Revision 1.1.2.3 1993/06/07 22:15:39 jeffc + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:07:55 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:42:14 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:15:17 jeffc] + * + * Revision 1.1 1992/09/30 02:30:28 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 16:50:21 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:31:18 mrt + * MACH_KERNEL: removed conditionals. + * [88/12/19 dbg] + * + * Revision 2.1 89/08/03 15:57:39 rwd + * Created. + * + * Revision 2.5 88/12/19 02:51:59 mwyoung + * Added VM system tags. + * [88/11/22 mwyoung] + * + * Revision 2.4 88/08/24 02:55:54 mwyoung + * Adjusted include file references. + * [88/08/17 02:29:56 mwyoung] + * + * + * 9-Apr-88 Daniel Julin (dpj) at Carnegie-Mellon University + * Added flags for TCP and MACH_NP debugging. + * + * 6-Jan-88 Michael Young (mwyoung) at Carnegie-Mellon University + * Make the event structure smaller to make it easier to read from + * kernel debuggers. + * + * 16-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University + * MACH: made XPR_DEBUG definition conditional on MACH + * since the routines invoked under it won't link without MACH. + * [ V5.1(F7) ] + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Include file for xpr circular buffer silent tracing. + * + */ +/* + * If the kernel flag XPRDEBUG is set, the XPR macro is enabled. The + * macro should be invoked something like the following: + * XPR(XPR_SYSCALLS, "syscall: %d, 0x%x\n", syscallno, arg1, 0,0,0); + * which will expand into the following code: + * if (xprflags & XPR_SYSCALLS) + * xpr("syscall: %d, 0x%x\n", syscallno, arg1, 0,0,0); + * Xpr will log the pointer to the printf string and up to 5 arguements, + * along with a timestamp and cpuinfo (for multi-processor systems), into + * a circular buffer. The actual printf processing is delayed until after + * the buffer has been collected. It is assumed that the text/data segments + * of the kernel can easily be reconstructed in a post-processor which + * performs the printf processing. + * + * If the XPRDEBUG compilation switch is not set, the XPR macro expands + * to nothing. + */ + +#ifndef _KERN_XPR_H_ +#define _KERN_XPR_H_ + +#ifdef MACH_KERNEL +#include +#else /* MACH_KERNEL */ +#include +#endif /* MACH_KERNEL */ + +#include + +#if XPR_DEBUG + +#define XPR(flags, msg, arg1, arg2, arg3, arg4, arg5) \ +MACRO_BEGIN \ + if (xprflags & (flags)) { \ + xpr((msg), (long)(arg1), (long)(arg2), \ + (long)(arg3), (long)(arg4), (long)(arg5)); \ + } \ +MACRO_END + +extern int xprflags; + +/* + * flags for message types. + */ +#define XPR_TRAPS (1 << 1) +#define XPR_SCHED (1 << 2) +#define XPR_LOCK (1 << 3) +#define XPR_SLOCK (1 << 4) +#define XPR_PMAP (1 << 6) +#define XPR_VM_MAP (1 << 7) +#define XPR_VM_OBJECT (1 << 8) +#define XPR_VM_OBJECT_CACHE (1 << 9) +#define XPR_VM_PAGE (1 << 10) +#define XPR_VM_PAGEOUT (1 << 11) +#define XPR_MEMORY_OBJECT (1 << 12) +#define XPR_VM_FAULT (1 << 13) +#define XPR_VM_OBJECT_REP (1 << 14) +#define XPR_DEFAULT_PAGER (1 << 15) +#define XPR_INODE_PAGER (1 << 16) +#define XPR_INODE_PAGER_DATA (1 << 17) +#define XPR_XMM (1 << 18) + +#else /* XPR_DEBUG */ +#define XPR(flags, msg, arg1, arg2, arg3, arg4, arg5) +#endif /* XPR_DEBUG */ + +struct xprbuf { + char *msg; + long arg1,arg2,arg3,arg4,arg5; + int timestamp; + int cpuinfo; +}; + +/* Bootstrap XPR facility */ +extern void xprbootstrap(void); + +/* Enable XPR facility */ +extern void xprinit(void); + +/* Log an XPR message */ +extern void xpr( + char *msg, + long arg1, + long arg2, + long arg3, + long arg4, + long arg5); + +#endif /* _KERN_XPR_H_ */ diff --git a/osfmk/kern/zalloc.c b/osfmk/kern/zalloc.c new file mode 100644 index 000000000..12ed64e03 --- /dev/null +++ b/osfmk/kern/zalloc.c @@ -0,0 +1,1801 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: kern/zalloc.c + * Author: Avadis Tevanian, Jr. + * + * Zone-based memory allocator. A zone is a collection of fixed size + * data blocks for which quick allocation/deallocation is possible. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#if MACH_ASSERT +/* Detect use of zone elt after freeing it by two methods: + * (1) Range-check the free-list "next" ptr for sanity. + * (2) Store the ptr in two different words, and compare them against + * each other when re-using the zone elt, to detect modifications; + */ + +#if defined(__alpha) + +#define is_kernel_data_addr(a) \ + (!(a) || IS_SYS_VA(a) && !((a) & (sizeof(long)-1))) + +#else /* !defined(__alpha) */ + +#define is_kernel_data_addr(a) \ + (!(a) || (a) >= VM_MIN_KERNEL_ADDRESS && !((a) & 0x3)) + +#endif /* defined(__alpha) */ + +/* Should we set all words of the zone element to an illegal address + * when it is freed, to help catch usage after freeing? The down-side + * is that this obscures the identity of the freed element. + */ +boolean_t zfree_clear = FALSE; + +#define ADD_TO_ZONE(zone, element) \ +MACRO_BEGIN \ + if (zfree_clear) \ + { int i; \ + for (i=1; \ + i < zone->elem_size/sizeof(vm_offset_t) - 1; \ + i++) \ + ((vm_offset_t *)(element))[i] = 0xdeadbeef; \ + } \ + ((vm_offset_t *)(element))[0] = (zone)->free_elements; \ + (zone)->free_elements = (vm_offset_t) (element); \ + (zone)->count--; \ +MACRO_END + +#define REMOVE_FROM_ZONE(zone, ret, type) \ +MACRO_BEGIN \ + (ret) = (type) (zone)->free_elements; \ + if ((ret) != (type) 0) { \ + if (!is_kernel_data_addr(((vm_offset_t *)(ret))[0])) { \ + panic("A freed zone element has been modified.\n"); \ + } \ + (zone)->count++; \ + (zone)->free_elements = *((vm_offset_t *)(ret)); \ + } \ +MACRO_END +#else /* MACH_ASSERT */ + +#define ADD_TO_ZONE(zone, element) \ +MACRO_BEGIN \ + *((vm_offset_t *)(element)) = (zone)->free_elements; \ + (zone)->free_elements = (vm_offset_t) (element); \ + (zone)->count--; \ +MACRO_END + +#define REMOVE_FROM_ZONE(zone, ret, type) \ +MACRO_BEGIN \ + (ret) = (type) (zone)->free_elements; \ + if ((ret) != (type) 0) { \ + (zone)->count++; \ + (zone)->free_elements = *((vm_offset_t *)(ret)); \ + } \ +MACRO_END + +#endif /* MACH_ASSERT */ + +#if ZONE_DEBUG +#define zone_debug_enabled(z) z->active_zones.next +#endif /* ZONE_DEBUG */ + +/* + * Support for garbage collection of unused zone pages: + */ + +struct zone_page_table_entry { + struct zone_page_table_entry *next; + short in_free_list; + short alloc_count; +}; + +extern struct zone_page_table_entry * zone_page_table; + +#define lock_zone_page_table() simple_lock(&zone_page_table_lock) +#define unlock_zone_page_table() simple_unlock(&zone_page_table_lock) + +#define zone_page(addr) \ + (&(zone_page_table[(atop(((vm_offset_t)addr) - zone_map_min_address))])) + +/* Forwards */ +void zone_page_init( + vm_offset_t addr, + vm_size_t size, + int value); + +void zone_page_alloc( + vm_offset_t addr, + vm_size_t size); + +void zone_add_free_page_list( + struct zone_page_table_entry **free_list, + vm_offset_t addr, + vm_size_t size); +void zone_page_dealloc( + vm_offset_t addr, + vm_size_t size); + +void zone_page_in_use( + vm_offset_t addr, + vm_size_t size); + +void zone_page_free( + vm_offset_t addr, + vm_size_t size); + +boolean_t zone_page_collectable( + vm_offset_t addr, + vm_size_t size); + +void zone_page_keep( + vm_offset_t addr, + vm_size_t size); + +#if ZONE_DEBUG && MACH_KDB +int zone_count( + zone_t z, + int tail); +#endif /* ZONE_DEBUG && MACH_KDB */ + +vm_map_t zone_map = VM_MAP_NULL; + +zone_t zone_zone = ZONE_NULL; /* the zone containing other zones */ + +/* + * The VM system gives us an initial chunk of memory. + * It has to be big enough to allocate the zone_zone + */ + +vm_offset_t zdata; +vm_size_t zdata_size; + +#define lock_zone(zone) \ +MACRO_BEGIN \ + simple_lock(&zone->lock); \ +MACRO_END + +#define unlock_zone(zone) \ +MACRO_BEGIN \ + simple_unlock(&zone->lock); \ +MACRO_END + +#define lock_zone_init(zone) \ +MACRO_BEGIN \ + simple_lock_init(&zone->lock, ETAP_MISC_ZONE); \ +MACRO_END + +#define lock_try_zone(zone) simple_lock_try(&zone->lock) + +kern_return_t zget_space( + vm_offset_t size, + vm_offset_t *result); + +decl_simple_lock_data(,zget_space_lock) +vm_offset_t zalloc_next_space; +vm_offset_t zalloc_end_of_space; +vm_size_t zalloc_wasted_space; + +/* + * Garbage collection map information + */ +decl_simple_lock_data(, zone_page_table_lock) +struct zone_page_table_entry * zone_page_table; +vm_offset_t zone_map_min_address; +vm_offset_t zone_map_max_address; +integer_t zone_pages; + +/* + * Exclude more than one concurrent garbage collection + */ +decl_mutex_data(, zone_gc_lock) + +#define from_zone_map(addr) \ + ((vm_offset_t)(addr) >= zone_map_min_address && \ + (vm_offset_t)(addr) < zone_map_max_address) + +#define ZONE_PAGE_USED 0 +#define ZONE_PAGE_UNUSED -1 + + +/* + * Protects first_zone, last_zone, num_zones, + * and the next_zone field of zones. + */ +decl_simple_lock_data(, all_zones_lock) +zone_t first_zone; +zone_t *last_zone; +int num_zones; + +/* + * zinit initializes a new zone. The zone data structures themselves + * are stored in a zone, which is initially a static structure that + * is initialized by zone_init. + */ +zone_t +zinit( + vm_size_t size, /* the size of an element */ + vm_size_t max, /* maximum memory to use */ + vm_size_t alloc, /* allocation size */ + char *name) /* a name for the zone */ +{ + zone_t z; + + if (zone_zone == ZONE_NULL) { + if (zget_space(sizeof(struct zone), (vm_offset_t *)&z) + != KERN_SUCCESS) + return(ZONE_NULL); + } else + z = (zone_t) zalloc(zone_zone); + if (z == ZONE_NULL) + return(ZONE_NULL); + + /* + * Round off all the parameters appropriately. + */ + if (size < sizeof(z->free_elements)) + size = sizeof(z->free_elements); + size = ((size-1) + sizeof(z->free_elements)) - + ((size-1) % sizeof(z->free_elements)); + if (alloc == 0) + alloc = PAGE_SIZE; + alloc = round_page(alloc); + max = round_page(max); + /* + * We look for an allocation size with least fragmentation + * in the range of 1 - 5 pages. This size will be used unless + * the user suggestion is larger AND has less fragmentation + */ + { vm_size_t best, waste; unsigned int i; + best = PAGE_SIZE; + waste = best % size; + for (i = 2; i <= 5; i++){ vm_size_t tsize, twaste; + tsize = i * PAGE_SIZE; + twaste = tsize % size; + if (twaste < waste) + best = tsize, waste = twaste; + } + if (alloc <= best || (alloc % size >= waste)) + alloc = best; + } + if (max && (max < alloc)) + max = alloc; + + z->free_elements = 0; + z->cur_size = 0; + z->max_size = max; + z->elem_size = size; + z->alloc_size = alloc; + z->zone_name = name; + z->count = 0; + z->doing_alloc = FALSE; + z->exhaustible = FALSE; + z->collectable = TRUE; + z->allows_foreign = FALSE; + z->expandable = TRUE; + z->waiting = FALSE; + +#if ZONE_DEBUG + z->active_zones.next = z->active_zones.prev = 0; + zone_debug_enable(z); +#endif /* ZONE_DEBUG */ + lock_zone_init(z); + + /* + * Add the zone to the all-zones list. + */ + + z->next_zone = ZONE_NULL; + simple_lock(&all_zones_lock); + *last_zone = z; + last_zone = &z->next_zone; + num_zones++; + simple_unlock(&all_zones_lock); + + return(z); +} + +/* + * Cram the given memory into the specified zone. + */ +void +zcram( + register zone_t zone, + vm_offset_t newmem, + vm_size_t size) +{ + register vm_size_t elem_size; + + /* Basic sanity checks */ + assert(zone != ZONE_NULL && newmem != (vm_offset_t)0); + assert(!zone->collectable || zone->allows_foreign + || (from_zone_map(newmem) && from_zone_map(newmem+size-1))); + + elem_size = zone->elem_size; + + lock_zone(zone); + while (size >= elem_size) { + ADD_TO_ZONE(zone, newmem); + if (from_zone_map(newmem)) + zone_page_alloc(newmem, elem_size); + zone->count++; /* compensate for ADD_TO_ZONE */ + size -= elem_size; + newmem += elem_size; + zone->cur_size += elem_size; + } + unlock_zone(zone); +} + +/* + * Contiguous space allocator for non-paged zones. Allocates "size" amount + * of memory from zone_map. + */ + +kern_return_t +zget_space( + vm_offset_t size, + vm_offset_t *result) +{ + vm_offset_t new_space = 0; + vm_size_t space_to_add; + + simple_lock(&zget_space_lock); + while ((zalloc_next_space + size) > zalloc_end_of_space) { + /* + * Add at least one page to allocation area. + */ + + space_to_add = round_page(size); + + if (new_space == 0) { + kern_return_t retval; + /* + * Memory cannot be wired down while holding + * any locks that the pageout daemon might + * need to free up pages. [Making the zget_space + * lock a complex lock does not help in this + * regard.] + * + * Unlock and allocate memory. Because several + * threads might try to do this at once, don't + * use the memory before checking for available + * space again. + */ + + simple_unlock(&zget_space_lock); + + retval = kernel_memory_allocate(zone_map, &new_space, + space_to_add, 0, KMA_KOBJECT|KMA_NOPAGEWAIT); + if (retval != KERN_SUCCESS) + return(retval); + zone_page_init(new_space, space_to_add, + ZONE_PAGE_USED); + simple_lock(&zget_space_lock); + continue; + } + + + /* + * Memory was allocated in a previous iteration. + * + * Check whether the new region is contiguous + * with the old one. + */ + + if (new_space != zalloc_end_of_space) { + /* + * Throw away the remainder of the + * old space, and start a new one. + */ + zalloc_wasted_space += + zalloc_end_of_space - zalloc_next_space; + zalloc_next_space = new_space; + } + + zalloc_end_of_space = new_space + space_to_add; + + new_space = 0; + } + *result = zalloc_next_space; + zalloc_next_space += size; + simple_unlock(&zget_space_lock); + + if (new_space != 0) + kmem_free(zone_map, new_space, space_to_add); + + return(KERN_SUCCESS); +} + + +/* + * Steal memory for the zone package. Called from + * vm_page_bootstrap(). + */ +void +zone_steal_memory(void) +{ + zdata_size = round_page(128*sizeof(struct zone)); + zdata = pmap_steal_memory(zdata_size); +} + + +/* + * Fill a zone with enough memory to contain at least nelem elements. + * Memory is obtained with kmem_alloc_wired from the kernel_map. + * Return the number of elements actually put into the zone, which may + * be more than the caller asked for since the memory allocation is + * rounded up to a full page. + */ +int +zfill( + zone_t zone, + int nelem) +{ + kern_return_t kr; + vm_size_t size; + vm_offset_t memory; + int nalloc; + + assert(nelem > 0); + if (nelem <= 0) + return 0; + size = nelem * zone->elem_size; + size = round_page(size); + kr = kmem_alloc_wired(kernel_map, &memory, size); + if (kr != KERN_SUCCESS) + return 0; + + zone_change(zone, Z_FOREIGN, TRUE); + zcram(zone, memory, size); + nalloc = size / zone->elem_size; + assert(nalloc >= nelem); + + return nalloc; +} + +/* + * Initialize the "zone of zones" which uses fixed memory allocated + * earlier in memory initialization. zone_bootstrap is called + * before zone_init. + */ +void +zone_bootstrap(void) +{ + vm_size_t zone_zone_size; + vm_offset_t zone_zone_space; + + simple_lock_init(&all_zones_lock, ETAP_MISC_ZONE_ALL); + + first_zone = ZONE_NULL; + last_zone = &first_zone; + num_zones = 0; + + simple_lock_init(&zget_space_lock, ETAP_MISC_ZONE_GET); + zalloc_next_space = zdata; + zalloc_end_of_space = zdata + zdata_size; + zalloc_wasted_space = 0; + + /* assertion: nobody else called zinit before us */ + assert(zone_zone == ZONE_NULL); + zone_zone = zinit(sizeof(struct zone), 128 * sizeof(struct zone), + sizeof(struct zone), "zones"); + zone_change(zone_zone, Z_COLLECT, FALSE); + zone_zone_size = zalloc_end_of_space - zalloc_next_space; + zget_space(zone_zone_size, &zone_zone_space); + zcram(zone_zone, zone_zone_space, zone_zone_size); +} + +void +zone_init( + vm_size_t max_zonemap_size) +{ + kern_return_t retval; + vm_offset_t zone_min; + vm_offset_t zone_max; + vm_size_t zone_table_size; + + retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size, + FALSE, TRUE, &zone_map); + if (retval != KERN_SUCCESS) + panic("zone_init: kmem_suballoc failed"); + zone_max = zone_min + round_page(max_zonemap_size); + /* + * Setup garbage collection information: + */ + zone_table_size = atop(zone_max - zone_min) * + sizeof(struct zone_page_table_entry); + if (kmem_alloc_wired(zone_map, (vm_offset_t *) &zone_page_table, + zone_table_size) != KERN_SUCCESS) + panic("zone_init"); + zone_min = (vm_offset_t)zone_page_table + round_page(zone_table_size); + zone_pages = atop(zone_max - zone_min); + zone_map_min_address = zone_min; + zone_map_max_address = zone_max; + simple_lock_init(&zone_page_table_lock, ETAP_MISC_ZONE_PTABLE); + mutex_init(&zone_gc_lock, ETAP_NO_TRACE); + zone_page_init(zone_min, zone_max - zone_min, ZONE_PAGE_UNUSED); +} + + +/* + * zalloc returns an element from the specified zone. + */ +vm_offset_t +zalloc_canblock( + register zone_t zone, + boolean_t canblock) +{ + vm_offset_t addr; + kern_return_t retval; + + assert(zone != ZONE_NULL); + check_simple_locks(); + + lock_zone(zone); + + REMOVE_FROM_ZONE(zone, addr, vm_offset_t); + while (addr == 0) { + /* + * If nothing was there, try to get more + */ + if (zone->doing_alloc) { + if (!canblock) { + unlock_zone(zone); + return(0); + } + /* + * Someone is allocating memory for this zone. + * Wait for it to show up, then try again. + */ + assert_wait((event_t)zone, THREAD_INTERRUPTIBLE); + zone->waiting = TRUE; + unlock_zone(zone); + thread_block((void (*)(void)) 0); + lock_zone(zone); + } + else { + if ((zone->cur_size + zone->elem_size) > + zone->max_size) { + if (zone->exhaustible) + break; + if (zone->expandable) { + /* + * We're willing to overflow certain + * zones, but not without complaining. + * + * This is best used in conjunction + * with the collectable flag. What we + * want is an assurance we can get the + * memory back, assuming there's no + * leak. + */ + zone->max_size += (zone->max_size >> 1); + } else { + unlock_zone(zone); + + if (!canblock) { + return(0); + } + + panic("zalloc: zone \"%s\" empty.", zone->zone_name); + } + } + zone->doing_alloc = TRUE; + unlock_zone(zone); + + if (zone->collectable) { + vm_offset_t space; + vm_size_t alloc_size; + + if (vm_pool_low()) + alloc_size = + round_page(zone->elem_size); + else + alloc_size = zone->alloc_size; + + retval = kernel_memory_allocate(zone_map, + &space, alloc_size, 0, + KMA_KOBJECT|KMA_NOPAGEWAIT); + if (retval == KERN_SUCCESS) { + zone_page_init(space, alloc_size, + ZONE_PAGE_USED); + zcram(zone, space, alloc_size); + } else if (retval != KERN_RESOURCE_SHORTAGE) { + /* would like to cause a zone_gc() */ + + if (!canblock) { + return(0); + } + + panic("zalloc"); + } + lock_zone(zone); + zone->doing_alloc = FALSE; + if (zone->waiting) { + zone->waiting = FALSE; + thread_wakeup((event_t)zone); + } + REMOVE_FROM_ZONE(zone, addr, vm_offset_t); + if (addr == 0 && + retval == KERN_RESOURCE_SHORTAGE) { + unlock_zone(zone); + + if (!canblock) { + return(0); + } + + VM_PAGE_WAIT(); + lock_zone(zone); + } + } else { + vm_offset_t space; + retval = zget_space(zone->elem_size, &space); + + lock_zone(zone); + zone->doing_alloc = FALSE; + if (zone->waiting) { + zone->waiting = FALSE; + thread_wakeup((event_t)zone); + } + if (retval == KERN_SUCCESS) { + zone->count++; + zone->cur_size += zone->elem_size; +#if ZONE_DEBUG + if (zone_debug_enabled(zone)) { + enqueue_tail(&zone->active_zones, (queue_entry_t)space); + } +#endif + unlock_zone(zone); + zone_page_alloc(space, zone->elem_size); +#if ZONE_DEBUG + if (zone_debug_enabled(zone)) + space += sizeof(queue_chain_t); +#endif + return(space); + } + if (retval == KERN_RESOURCE_SHORTAGE) { + unlock_zone(zone); + + if (!canblock) { + return(0); + } + + VM_PAGE_WAIT(); + lock_zone(zone); + } else { + if (!canblock) { + return(0); + } + + panic("zalloc"); + } + } + } + if (addr == 0) + REMOVE_FROM_ZONE(zone, addr, vm_offset_t); + } + +#if ZONE_DEBUG + if (addr && zone_debug_enabled(zone)) { + enqueue_tail(&zone->active_zones, (queue_entry_t)addr); + addr += sizeof(queue_chain_t); + } +#endif + + unlock_zone(zone); + return(addr); +} + + +vm_offset_t +zalloc( + register zone_t zone) +{ + return( zalloc_canblock(zone, TRUE) ); +} + +vm_offset_t +zalloc_noblock( + register zone_t zone) +{ + return( zalloc_canblock(zone, FALSE) ); +} + + +/* + * zget returns an element from the specified zone + * and immediately returns nothing if there is nothing there. + * + * This form should be used when you can not block (like when + * processing an interrupt). + */ +vm_offset_t +zget( + register zone_t zone) +{ + register vm_offset_t addr; + + assert( zone != ZONE_NULL ); + + if (!lock_try_zone(zone)) + return ((vm_offset_t)0); + + REMOVE_FROM_ZONE(zone, addr, vm_offset_t); +#if ZONE_DEBUG + if (addr && zone_debug_enabled(zone)) { + enqueue_tail(&zone->active_zones, (queue_entry_t)addr); + addr += sizeof(queue_chain_t); + } +#endif /* ZONE_DEBUG */ + unlock_zone(zone); + + return(addr); +} + +/* Keep this FALSE by default. Large memory machine run orders of magnitude + slower in debug mode when true. Use debugger to enable if needed */ +boolean_t zone_check = FALSE; + +void +zfree( + register zone_t zone, + vm_offset_t elem) +{ + +#if MACH_ASSERT + /* Basic sanity checks */ + if (zone == ZONE_NULL || elem == (vm_offset_t)0) + panic("zfree: NULL"); + /* zone_gc assumes zones are never freed */ + if (zone == zone_zone) + panic("zfree: freeing to zone_zone breaks zone_gc!"); + if (zone->collectable && !zone->allows_foreign && + (!from_zone_map(elem) || !from_zone_map(elem+zone->elem_size-1))) + panic("zfree: non-allocated memory in collectable zone!"); +#endif + + lock_zone(zone); +#if ZONE_DEBUG + if (zone_debug_enabled(zone)) { + queue_t tmp_elem; + + elem -= sizeof(queue_chain_t); + if (zone_check) { + /* check the zone's consistency */ + + for (tmp_elem = queue_first(&zone->active_zones); + !queue_end(tmp_elem, &zone->active_zones); + tmp_elem = queue_next(tmp_elem)) + if (elem == (vm_offset_t)tmp_elem) + break; + if (elem != (vm_offset_t)tmp_elem) + panic("zfree()ing element from wrong zone"); + } + remqueue(&zone->active_zones, (queue_t) elem); + } +#endif /* ZONE_DEBUG */ + if (zone_check) { + vm_offset_t this; + + /* check the zone's consistency */ + + for (this = zone->free_elements; + this != 0; + this = * (vm_offset_t *) this) + if (!pmap_kernel_va(this) || this == elem) + panic("zfree"); + } + /* + * If elements have one or more pages, and memory is low, + * put it directly back into circulation rather than + * back into a zone, where a non-vm_privileged task can grab it. + * This lessens the impact of a privileged task cycling reserved + * memory into a publicly accessible zone. + */ + if (zone->elem_size >= PAGE_SIZE && + vm_pool_low()){ + assert( !(zone->elem_size & (zone->alloc_size-1)) ); + zone->count--; + zone->cur_size -= zone->elem_size; + zone_page_init(elem, zone->elem_size, ZONE_PAGE_UNUSED); + unlock_zone(zone); + kmem_free(zone_map, elem, zone->elem_size); + return; + } + ADD_TO_ZONE(zone, elem); + unlock_zone(zone); +} + + +/* Change a zone's flags. + * This routine must be called immediately after zinit. + */ +void +zone_change( + zone_t zone, + unsigned int item, + boolean_t value) +{ + assert( zone != ZONE_NULL ); + assert( value == TRUE || value == FALSE ); + + switch(item){ + case Z_EXHAUST: + zone->exhaustible = value; + break; + case Z_COLLECT: + zone->collectable = value; + break; + case Z_EXPAND: + zone->expandable = value; + break; + case Z_FOREIGN: + zone->allows_foreign = value; + break; +#if MACH_ASSERT + default: + panic("Zone_change: Wrong Item Type!"); + /* break; */ +#endif + } + lock_zone_init(zone); +} + +/* + * Return the expected number of free elements in the zone. + * This calculation will be incorrect if items are zfree'd that + * were never zalloc'd/zget'd. The correct way to stuff memory + * into a zone is by zcram. + */ + +integer_t +zone_free_count(zone_t zone) +{ + integer_t free_count; + + lock_zone(zone); + free_count = zone->cur_size/zone->elem_size - zone->count; + unlock_zone(zone); + + assert(free_count >= 0); + + return(free_count); +} + +/* + * zprealloc preallocates wired memory, exanding the specified + * zone to the specified size + */ +void +zprealloc( + zone_t zone, + vm_size_t size) +{ + vm_offset_t addr; + + if (size != 0) { + if (kmem_alloc_wired(zone_map, &addr, size) != KERN_SUCCESS) + panic("zprealloc"); + zone_page_init(addr, size, ZONE_PAGE_USED); + zcram(zone, addr, size); + } +} + +/* + * Zone garbage collection subroutines + * + * These routines have in common the modification of entries in the + * zone_page_table. The latter contains one entry for every page + * in the zone_map. + * + * For each page table entry in the given range: + * + * zone_page_collectable - test if one (in_free_list == alloc_count) + * zone_page_keep - reset in_free_list + * zone_page_in_use - decrements in_free_list + * zone_page_free - increments in_free_list + * zone_page_init - initializes in_free_list and alloc_count + * zone_page_alloc - increments alloc_count + * zone_page_dealloc - decrements alloc_count + * zone_add_free_page_list - adds the page to the free list + * + * Two counts are maintained for each page, the in_free_list count and + * alloc_count. The alloc_count is how many zone elements have been + * allocated from a page. (Note that the page could contain elements + * that span page boundaries. The count includes these elements so + * one element may be counted in two pages.) In_free_list is a count + * of how many zone elements are currently free. If in_free_list is + * equal to alloc_count then the page is eligible for garbage + * collection. + * + * Alloc_count and in_free_list are initialized to the correct values + * for a particular zone when a page is zcram'ed into a zone. Subsequent + * gets and frees of zone elements will call zone_page_in_use and + * zone_page_free which modify the in_free_list count. When the zones + * garbage collector runs it will walk through a zones free element list, + * remove the elements that reside on collectable pages, and use + * zone_add_free_page_list to create a list of pages to be collected. + */ +boolean_t +zone_page_collectable( + vm_offset_t addr, + vm_size_t size) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_page_collectable"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + if (zone_page_table[i].in_free_list == + zone_page_table[i].alloc_count) { + unlock_zone_page_table(); + return (TRUE); + } + } + unlock_zone_page_table(); + return (FALSE); +} + +void +zone_page_keep( + vm_offset_t addr, + vm_size_t size) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_page_keep"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + zone_page_table[i].in_free_list = 0; + } + unlock_zone_page_table(); +} + +void +zone_page_in_use( + vm_offset_t addr, + vm_size_t size) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_page_in_use"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + if (zone_page_table[i].in_free_list > 0) + zone_page_table[i].in_free_list--; + } + unlock_zone_page_table(); +} + +void +zone_page_free( + vm_offset_t addr, + vm_size_t size) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_page_free"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + assert(zone_page_table[i].in_free_list >= 0); + zone_page_table[i].in_free_list++; + } + unlock_zone_page_table(); +} + +void +zone_page_init( + vm_offset_t addr, + vm_size_t size, + int value) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_page_init"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + zone_page_table[i].alloc_count = value; + zone_page_table[i].in_free_list = 0; + } + unlock_zone_page_table(); +} + +void +zone_page_alloc( + vm_offset_t addr, + vm_size_t size) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_page_alloc"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + /* Set alloc_count to (ZONE_PAGE_USED + 1) if + * it was previously set to ZONE_PAGE_UNUSED. + */ + if (zone_page_table[i].alloc_count == ZONE_PAGE_UNUSED) { + zone_page_table[i].alloc_count = 1; + } else { + zone_page_table[i].alloc_count++; + } + } + unlock_zone_page_table(); +} + +void +zone_page_dealloc( + vm_offset_t addr, + vm_size_t size) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_page_dealloc"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + zone_page_table[i].alloc_count--; + } + unlock_zone_page_table(); +} + +void +zone_add_free_page_list( + struct zone_page_table_entry **free_list, + vm_offset_t addr, + vm_size_t size) +{ + natural_t i, j; + +#if MACH_ASSERT + if (!from_zone_map(addr) || !from_zone_map(addr+size-1)) + panic("zone_add_free_page_list"); +#endif + + i = atop(addr-zone_map_min_address); + j = atop((addr+size-1) - zone_map_min_address); + lock_zone_page_table(); + for (; i <= j; i++) { + if (zone_page_table[i].alloc_count == 0) { + zone_page_table[i].next = *free_list; + *free_list = &zone_page_table[i]; + zone_page_table[i].alloc_count = ZONE_PAGE_UNUSED; + zone_page_table[i].in_free_list = 0; + } + } + unlock_zone_page_table(); +} + + +/* This is used for walking through a zone's free element list. + */ +struct zone_free_entry { + struct zone_free_entry * next; +}; + +int reclaim_page_count = 0; + +/* Zone garbage collection + * + * zone_gc will walk through all the free elements in all the + * zones that are marked collectable looking for reclaimable + * pages. zone_gc is called by consider_zone_gc when the system + * begins to run out of memory. + */ +void +zone_gc(void) +{ + unsigned int max_zones; + zone_t z; + unsigned int i; + struct zone_page_table_entry *freep; + struct zone_page_table_entry *zone_free_page_list; + + mutex_lock(&zone_gc_lock); + + /* + * Note that this scheme of locking only to walk the zone list + * assumes that zones are never freed (checked by zfree) + */ + simple_lock(&all_zones_lock); + max_zones = num_zones; + z = first_zone; + simple_unlock(&all_zones_lock); + +#if MACH_ASSERT + lock_zone_page_table(); + for (i = 0; i < zone_pages; i++) + assert(zone_page_table[i].in_free_list == 0); + unlock_zone_page_table(); +#endif /* MACH_ASSERT */ + + zone_free_page_list = (struct zone_page_table_entry *) 0; + + for (i = 0; i < max_zones; i++, z = z->next_zone) { + struct zone_free_entry * prev; + struct zone_free_entry * elt; + struct zone_free_entry * end; + + assert(z != ZONE_NULL); + + if (!z->collectable) + continue; + + lock_zone(z); + + /* + * Do a quick feasability check before we scan the zone: + * skip unless there is likelihood of getting 1+ pages back. + */ + if ((z->cur_size - z->count * z->elem_size) <= (2*PAGE_SIZE)){ + unlock_zone(z); + continue; + } + + /* Count the free elements in each page. This loop + * requires that all in_free_list entries are zero. + * + * Exit the loop early if we need to hurry up and drop + * the lock to allow preemption - but we must fully process + * all elements we looked at so far. + */ + elt = (struct zone_free_entry *)(z->free_elements); + while (!ast_urgency() && (elt != (struct zone_free_entry *)0)) { + if (from_zone_map(elt)) + zone_page_free((vm_offset_t)elt, z->elem_size); + elt = elt->next; + } + end = elt; + + /* Now determine which elements should be removed + * from the free list and, after all the elements + * on a page have been removed, add the element's + * page to a list of pages to be freed. + */ + prev = elt = (struct zone_free_entry *)(z->free_elements); + while (elt != end) { + if (!from_zone_map(elt)) { + prev = elt; + elt = elt->next; + continue; + } + if (zone_page_collectable((vm_offset_t)elt, + z->elem_size)) { + z->cur_size -= z->elem_size; + zone_page_in_use((vm_offset_t)elt, + z->elem_size); + zone_page_dealloc((vm_offset_t)elt, + z->elem_size); + zone_add_free_page_list(&zone_free_page_list, + (vm_offset_t)elt, + z->elem_size); + if (elt == prev) { + elt = elt->next; + z->free_elements =(vm_offset_t)elt; + prev = elt; + } else { + prev->next = elt->next; + elt = elt->next; + } + } else { + /* This element is not eligible for collection + * so clear in_free_list in preparation for a + * subsequent garbage collection pass. + */ + zone_page_keep((vm_offset_t)elt, z->elem_size); + prev = elt; + elt = elt->next; + } + } /* end while(elt != end) */ + + unlock_zone(z); + } + + for (freep = zone_free_page_list; freep != 0; freep = freep->next) { + vm_offset_t free_addr; + + free_addr = zone_map_min_address + + PAGE_SIZE * (freep - zone_page_table); + kmem_free(zone_map, free_addr, PAGE_SIZE); + reclaim_page_count++; + } + mutex_unlock(&zone_gc_lock); +} + +boolean_t zone_gc_allowed = TRUE; /* XXX */ +unsigned zone_gc_last_tick = 0; +unsigned zone_gc_max_rate = 0; /* in ticks */ + +/* + * consider_zone_gc: + * + * Called by the pageout daemon when the system needs more free pages. + */ + +void +consider_zone_gc(void) +{ + /* + * By default, don't attempt zone GC more frequently + * than once a second (which is one scheduler tick). + */ + + if (zone_gc_max_rate == 0) + zone_gc_max_rate = 2; /* sched_tick is a 1 second resolution 2 here insures at least 1 second interval */ + + if (zone_gc_allowed && + (sched_tick > (zone_gc_last_tick + zone_gc_max_rate))) { + zone_gc_last_tick = sched_tick; + zone_gc(); + } +} + +#include +#include +#include +#include +#include +#include + +#include + +kern_return_t +host_zone_info( + host_t host, + zone_name_array_t *namesp, + mach_msg_type_number_t *namesCntp, + zone_info_array_t *infop, + mach_msg_type_number_t *infoCntp) +{ + zone_name_t *names; + vm_offset_t names_addr; + vm_size_t names_size; + zone_info_t *info; + vm_offset_t info_addr; + vm_size_t info_size; + unsigned int max_zones, i; + zone_t z; + zone_name_t *zn; + zone_info_t *zi; + kern_return_t kr; + + if (host == HOST_NULL) + return KERN_INVALID_HOST; + + /* + * We assume that zones aren't freed once allocated. + * We won't pick up any zones that are allocated later. + */ + + simple_lock(&all_zones_lock); +#ifdef ppc + max_zones = num_zones + 4; +#else + max_zones = num_zones + 2; +#endif + z = first_zone; + simple_unlock(&all_zones_lock); + + if (max_zones <= *namesCntp) { + /* use in-line memory */ + + names = *namesp; + } else { + names_size = round_page(max_zones * sizeof *names); + kr = kmem_alloc_pageable(ipc_kernel_map, + &names_addr, names_size); + if (kr != KERN_SUCCESS) + return kr; + names = (zone_name_t *) names_addr; + } + + if (max_zones <= *infoCntp) { + /* use in-line memory */ + + info = *infop; + } else { + info_size = round_page(max_zones * sizeof *info); + kr = kmem_alloc_pageable(ipc_kernel_map, + &info_addr, info_size); + if (kr != KERN_SUCCESS) { + if (names != *namesp) + kmem_free(ipc_kernel_map, + names_addr, names_size); + return kr; + } + + info = (zone_info_t *) info_addr; + } + zn = &names[0]; + zi = &info[0]; + + for (i = 0; i < num_zones; i++) { + struct zone zcopy; + + assert(z != ZONE_NULL); + + lock_zone(z); + zcopy = *z; + unlock_zone(z); + + simple_lock(&all_zones_lock); + z = z->next_zone; + simple_unlock(&all_zones_lock); + + /* assuming here the name data is static */ + (void) strncpy(zn->zn_name, zcopy.zone_name, + sizeof zn->zn_name); + + zi->zi_count = zcopy.count; + zi->zi_cur_size = zcopy.cur_size; + zi->zi_max_size = zcopy.max_size; + zi->zi_elem_size = zcopy.elem_size; + zi->zi_alloc_size = zcopy.alloc_size; + zi->zi_exhaustible = zcopy.exhaustible; + zi->zi_collectable = zcopy.collectable; + + zn++; + zi++; + } + strcpy(zn->zn_name, "kernel_stacks"); + stack_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size, + &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible); + zn++; + zi++; +#ifdef ppc + strcpy(zn->zn_name, "save_areas"); + save_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size, + &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible); + zn++; + zi++; + + strcpy(zn->zn_name, "pmap_mappings"); + mapping_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size, + &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible); + zn++; + zi++; +#endif + strcpy(zn->zn_name, "kalloc.large"); + kalloc_fake_zone_info(&zi->zi_count, &zi->zi_cur_size, &zi->zi_max_size, &zi->zi_elem_size, + &zi->zi_alloc_size, &zi->zi_collectable, &zi->zi_exhaustible); + + if (names != *namesp) { + vm_size_t used; + vm_map_copy_t copy; + + used = max_zones * sizeof *names; + + if (used != names_size) + bzero((char *) (names_addr + used), names_size - used); + + kr = vm_map_copyin(ipc_kernel_map, names_addr, names_size, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + *namesp = (zone_name_t *) copy; + } + *namesCntp = max_zones; + + if (info != *infop) { + vm_size_t used; + vm_map_copy_t copy; + + used = max_zones * sizeof *info; + + if (used != info_size) + bzero((char *) (info_addr + used), info_size - used); + + kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + *infop = (zone_info_t *) copy; + } + *infoCntp = max_zones; + + return KERN_SUCCESS; +} + +#if MACH_KDB +#include +#include +#include + +const char *zone_labels = +"ENTRY COUNT TOT_SZ MAX_SZ ELT_SZ ALLOC_SZ NAME"; + +/* Forwards */ +void db_print_zone( + zone_t addr); + +#if ZONE_DEBUG +void db_zone_check_active( + zone_t zone); +void db_zone_print_active( + zone_t zone); +#endif /* ZONE_DEBUG */ +void db_zone_print_free( + zone_t zone); +void +db_print_zone( + zone_t addr) +{ + struct zone zcopy; + + zcopy = *addr; + + db_printf("%8x %8x %8x %8x %6x %8x %s ", + addr, zcopy.count, zcopy.cur_size, + zcopy.max_size, zcopy.elem_size, + zcopy.alloc_size, zcopy.zone_name); + if (zcopy.exhaustible) + db_printf("H"); + if (zcopy.collectable) + db_printf("C"); + if (zcopy.expandable) + db_printf("X"); + db_printf("\n"); +} + +/*ARGSUSED*/ +void +db_show_one_zone( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + struct zone *z = (zone_t)addr; + + if (z == ZONE_NULL || !have_addr){ + db_error("No Zone\n"); + /*NOTREACHED*/ + } + + db_printf("%s\n", zone_labels); + db_print_zone(z); +} + +/*ARGSUSED*/ +void +db_show_all_zones( + db_expr_t addr, + int have_addr, + db_expr_t count, + char * modif) +{ + zone_t z; + unsigned total = 0; + + /* + * Don't risk hanging by unconditionally locking, + * risk of incoherent data is small (zones aren't freed). + */ + have_addr = simple_lock_try(&all_zones_lock); + count = num_zones; + z = first_zone; + if (have_addr) { + simple_unlock(&all_zones_lock); + } + + db_printf("%s\n", zone_labels); + for ( ; count > 0; count--) { + if (!z) { + db_error("Mangled Zone List\n"); + /*NOTREACHED*/ + } + db_print_zone(z); + total += z->cur_size, + + have_addr = simple_lock_try(&all_zones_lock); + z = z->next_zone; + if (have_addr) { + simple_unlock(&all_zones_lock); + } + } + db_printf("\nTotal %8x", total); + db_printf("\n\nzone_gc() has reclaimed %d pages\n", + reclaim_page_count); +} + +#if ZONE_DEBUG +void +db_zone_check_active( + zone_t zone) +{ + int count = 0; + queue_t tmp_elem; + + if (!zone_debug_enabled(zone) || !zone_check) + return; + tmp_elem = queue_first(&zone->active_zones); + while (count < zone->count) { + count++; + if (tmp_elem == 0) { + printf("unexpected zero element, zone=0x%x, count=%d\n", + zone, count); + assert(FALSE); + break; + } + if (queue_end(tmp_elem, &zone->active_zones)) { + printf("unexpected queue_end, zone=0x%x, count=%d\n", + zone, count); + assert(FALSE); + break; + } + tmp_elem = queue_next(tmp_elem); + } + if (!queue_end(tmp_elem, &zone->active_zones)) { + printf("not at queue_end, zone=0x%x, tmp_elem=0x%x\n", + zone, tmp_elem); + assert(FALSE); + } +} + +void +db_zone_print_active( + zone_t zone) +{ + int count = 0; + queue_t tmp_elem; + + if (!zone_debug_enabled(zone)) { + printf("zone 0x%x debug not enabled\n", zone); + return; + } + if (!zone_check) { + printf("zone_check FALSE\n"); + return; + } + + printf("zone 0x%x, active elements %d\n", zone, zone->count); + printf("active list:\n"); + tmp_elem = queue_first(&zone->active_zones); + while (count < zone->count) { + printf(" 0x%x", tmp_elem); + count++; + if ((count % 6) == 0) + printf("\n"); + if (tmp_elem == 0) { + printf("\nunexpected zero element, count=%d\n", count); + break; + } + if (queue_end(tmp_elem, &zone->active_zones)) { + printf("\nunexpected queue_end, count=%d\n", count); + break; + } + tmp_elem = queue_next(tmp_elem); + } + if (!queue_end(tmp_elem, &zone->active_zones)) + printf("\nnot at queue_end, tmp_elem=0x%x\n", tmp_elem); + else + printf("\n"); +} +#endif /* ZONE_DEBUG */ + +void +db_zone_print_free( + zone_t zone) +{ + int count = 0; + int freecount; + vm_offset_t elem; + + freecount = zone_free_count(zone); + printf("zone 0x%x, free elements %d\n", zone, freecount); + printf("free list:\n"); + elem = zone->free_elements; + while (count < freecount) { + printf(" 0x%x", elem); + count++; + if ((count % 6) == 0) + printf("\n"); + if (elem == 0) { + printf("\nunexpected zero element, count=%d\n", count); + break; + } + elem = *((vm_offset_t *)elem); + } + if (elem != 0) + printf("\nnot at end of free list, elem=0x%x\n", elem); + else + printf("\n"); +} + +#endif /* MACH_KDB */ + + +#if ZONE_DEBUG + +/* should we care about locks here ? */ + +#if MACH_KDB +vm_offset_t +next_element( + zone_t z, + vm_offset_t elt) +{ + if (!zone_debug_enabled(z)) + return(0); + elt -= sizeof(queue_chain_t); + elt = (vm_offset_t) queue_next((queue_t) elt); + if ((queue_t) elt == &z->active_zones) + return(0); + elt += sizeof(queue_chain_t); + return(elt); +} + +vm_offset_t +first_element( + zone_t z) +{ + vm_offset_t elt; + + if (!zone_debug_enabled(z)) + return(0); + if (queue_empty(&z->active_zones)) + return(0); + elt = (vm_offset_t) queue_first(&z->active_zones); + elt += sizeof(queue_chain_t); + return(elt); +} + +/* + * Second arg controls how many zone elements are printed: + * 0 => none + * n, n < 0 => all + * n, n > 0 => last n on active list + */ +int +zone_count( + zone_t z, + int tail) +{ + vm_offset_t elt; + int count = 0; + boolean_t print = (tail != 0); + + if (tail < 0) + tail = z->count; + if (z->count < tail) + tail = 0; + tail = z->count - tail; + for (elt = first_element(z); elt; elt = next_element(z, elt)) { + if (print && tail <= count) + db_printf("%8x\n", elt); + count++; + } + assert(count == z->count); + return(count); +} +#endif /* MACH_KDB */ + +#define zone_in_use(z) ( z->count || z->free_elements ) + +void +zone_debug_enable( + zone_t z) +{ + if (zone_debug_enabled(z) || zone_in_use(z) || + z->alloc_size < (z->elem_size + sizeof(queue_chain_t))) + return; + queue_init(&z->active_zones); + z->elem_size += sizeof(queue_chain_t); +} + +void +zone_debug_disable( + zone_t z) +{ + if (!zone_debug_enabled(z) || zone_in_use(z)) + return; + z->elem_size -= sizeof(queue_chain_t); + z->active_zones.next = z->active_zones.prev = 0; +} +#endif /* ZONE_DEBUG */ diff --git a/osfmk/kern/zalloc.h b/osfmk/kern/zalloc.h new file mode 100644 index 000000000..ae48dfcd9 --- /dev/null +++ b/osfmk/kern/zalloc.h @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: zalloc.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + */ + +#ifndef _KERN_ZALLOC_H_ +#define _KERN_ZALLOC_H_ + +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#include +#include +#include + +/* + * A zone is a collection of fixed size blocks for which there + * is fast allocation/deallocation access. Kernel routines can + * use zones to manage data structures dynamically, creating a zone + * for each type of data structure to be managed. + * + */ + +struct zone { + int count; /* Number of elements used now */ + vm_offset_t free_elements; + vm_size_t cur_size; /* current memory utilization */ + vm_size_t max_size; /* how large can this zone grow */ + vm_size_t elem_size; /* size of an element */ + vm_size_t alloc_size; /* size used for more memory */ + char *zone_name; /* a name for the zone */ + unsigned int + /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */ + /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */ + /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */ + /* boolean_t */ allows_foreign :1,/* (F) allow non-zalloc space */ + /* boolean_t */ doing_alloc :1, /* is zone expanding now? */ + /* boolean_t */ waiting :1; /* is thread waiting for expansion? */ + struct zone * next_zone; /* Link for all-zones list */ +#if ZONE_DEBUG + queue_head_t active_zones; /* active elements */ +#endif /* ZONE_DEBUG */ + decl_simple_lock_data(,lock) /* generic lock */ +}; + + +extern void zone_gc(void); +extern void consider_zone_gc(void); + +/* Steal memory for zone module */ +extern void zone_steal_memory(void); + +/* Bootstrap zone module (create zone zone) */ +extern void zone_bootstrap(void); + +/* Init zone module */ +extern void zone_init(vm_size_t); + +#endif /* ! MACH_KERNEL_PRIVATE */ + + +/* Allocate from zone */ +extern vm_offset_t zalloc( + zone_t zone); + +/* Non-blocking version of zalloc */ +extern vm_offset_t zalloc_noblock( + zone_t zone); + +/* Get from zone free list */ +extern vm_offset_t zget( + zone_t zone); + +/* Create zone */ +extern zone_t zinit( + vm_size_t size, /* the size of an element */ + vm_size_t max, /* maximum memory to use */ + vm_size_t alloc, /* allocation size */ + char *name); /* a name for the zone */ + +/* Free zone element */ +extern void zfree( + zone_t zone, + vm_offset_t elem); + +/* Fill zone with memory */ +extern void zcram( + zone_t zone, + vm_offset_t newmem, + vm_size_t size); + +/* Initially fill zone with specified number of elements */ +extern int zfill( + zone_t zone, + int nelem); +/* Change zone parameters */ +extern void zone_change( + zone_t zone, + unsigned int item, + boolean_t value); + +/* Preallocate space for zone from zone map */ +extern void zprealloc( + zone_t zone, + vm_size_t size); + +/* + * zone_free_count returns a hint as to the current number of free elements + * in the zone. By the time it returns, it may no longer be true (a new + * element might have been added, or an element removed). + * This routine may be used in conjunction with zcram and a lock to regulate + * adding memory to a non-expandable zone. + */ +extern integer_t zone_free_count(zone_t zone); + +/* + * Item definitions for zone_change: + */ +#define Z_EXHAUST 1 /* Make zone exhaustible */ +#define Z_COLLECT 2 /* Make zone collectable */ +#define Z_EXPAND 3 /* Make zone expandable */ +#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign */ + /* (not allocated via zalloc) elements. */ +#ifdef MACH_KERNEL_PRIVATE +#if ZONE_DEBUG +#if MACH_KDB +extern vm_offset_t next_element( + zone_t z, + vm_offset_t elt); + +extern vm_offset_t first_element( + zone_t z); +#endif /* MACH_KDB */ +extern void zone_debug_enable( + zone_t z); + +extern void zone_debug_disable( + zone_t z); +#endif /* ZONE_DEBUG */ +#endif MACH_KERNEL_PRIVATE + +#endif /* _KERN_ZALLOC_H_ */ diff --git a/osfmk/libsa/Makefile b/osfmk/libsa/Makefile new file mode 100644 index 000000000..98c0c2585 --- /dev/null +++ b/osfmk/libsa/Makefile @@ -0,0 +1,24 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + string.h + +INSTALL_MI_LIST = + +INSTALL_MI_DIR = + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/libsa/ctype.h b/osfmk/libsa/ctype.h new file mode 100644 index 000000000..152f2ab4c --- /dev/null +++ b/osfmk/libsa/ctype.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/09/17 16:56:20 bruel + * created from standalone mach servers. + * [96/09/17 bruel] + * + * $EndLog$ + */ + +#ifndef _CTYPE_H_ +#define _CTYPE_H_ + +extern int isalpha(int); +extern int isalnum(int); +extern int iscntrl(int); +extern int isdigit(int); +extern int isgraph(int); +extern int islower(int); +extern int isprint(int); +extern int ispunct(int); +extern int isspace(int); +extern int isupper(int); +extern int isxdigit(int); +extern int toupper(int); +extern int tolower(int); + +extern int isascii(int); +extern int toascii(int); + +extern int (_toupper)(int); +extern int (_tolower)(int); + +#endif /* _CTYPE_H_ */ diff --git a/osfmk/libsa/errno.h b/osfmk/libsa/errno.h new file mode 100644 index 000000000..36b9df2c6 --- /dev/null +++ b/osfmk/libsa/errno.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1997/01/31 15:46:31 emcmanus + * Merged with nmk22b1_shared. + * [1997/01/30 08:42:08 emcmanus] + * + * Revision 1.1.2.5 1996/11/29 13:04:57 emcmanus + * Added EIO for libsa_mach's getclock(). + * [1996/11/29 09:59:19 emcmanus] + * + * Revision 1.1.2.4 1996/11/08 12:02:15 emcmanus + * Replaced errno variable by a macro that calls a function defined + * either in libsa_mach or in a threads library. + * [1996/11/08 11:48:47 emcmanus] + * + * Revision 1.1.2.3 1996/10/14 13:31:46 emcmanus + * Added ETIMEDOUT. + * [1996/10/14 13:29:55 emcmanus] + * + * Revision 1.1.2.2 1996/10/03 17:53:40 emcmanus + * Added new error codes needed by libpthread.a. + * [1996/10/03 16:17:42 emcmanus] + * + * Revision 1.1.2.1 1996/09/30 10:14:32 bruel + * First revision. + * [96/09/30 bruel] + * + * $EndLog$ + */ + +/* + * ANSI C defines EDOM and ERANGE. POSIX defines the remaining values. + * We may at some stage want to surround the extra values with + * #ifdef _POSIX_SOURCE. + * By an extraordinary coincidence, nearly all the values defined here + * correspond exactly to those in OSF/1 and in Linux. Imagine that. + * The exception is ETIMEDOUT, which has different values in the two + * systems. We use the OSF/1 value here. + */ + +extern int *__mach_errno_addr(void); +#define errno (*__mach_errno_addr()) + +#define ESUCCESS 0 /* Success */ +#define EPERM 1 /* Not owner */ +#define ESRCH 3 /* No such process */ +#define EIO 5 /* I/O error */ +#define ENOMEM 12 /* Not enough core */ +#define EBUSY 16 /* Mount device busy */ +#define EINVAL 22 /* Invalid argument */ +#define EDOM 33 /* Argument too large */ +#define ERANGE 34 /* Result too large */ +#define ETIMEDOUT 60 /* Connection timed out */ diff --git a/osfmk/libsa/float.h b/osfmk/libsa/float.h new file mode 100644 index 000000000..a1da5a179 --- /dev/null +++ b/osfmk/libsa/float.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/10/10 13:56:02 yp + * Created. + * [96/10/10 yp] + * + * $EndLog$ + */ + +#ifndef _FLOAT_H_ +#define _FLOAT_H_ +# include +#endif /* _FLOAT_H_ */ diff --git a/osfmk/libsa/i386/float.h b/osfmk/libsa/i386/float.h new file mode 100644 index 000000000..e3d1c1ef1 --- /dev/null +++ b/osfmk/libsa/i386/float.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.2 1998/09/30 21:21:00 wsanchez + * Merged in IntelMerge1 (mburg: Intel support) + * + * Revision 1.1.2.1 1998/09/30 18:19:49 mburg + * Changes for Intel port + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/10/10 13:56:09 yp + * Created. + * [96/10/10 yp] + * + * $EndLog$ + */ + +#ifndef _MACHINE_FLOAT_H_ +#define _MACHINE_FLOAT_H_ +# include +#endif /* _MACHINE_FLOAT_H_ */ diff --git a/osfmk/libsa/i386/math.h b/osfmk/libsa/i386/math.h new file mode 100644 index 000000000..21728795a --- /dev/null +++ b/osfmk/libsa/i386/math.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.2 1998/09/30 21:21:00 wsanchez + * Merged in IntelMerge1 (mburg: Intel support) + * + * Revision 1.1.2.1 1998/09/30 18:19:49 mburg + * Changes for Intel port + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.2 1996/10/10 13:56:11 yp + * Submitted again (ODE problems). + * [96/10/10 yp] + * + * Revision 1.1.2.1 1996/10/10 09:16:45 yp + * Created. + * [96/10/10 yp] + * + * $EndLog$ + */ + +#ifndef _MACHINE_MATH_H_ +#define _MACHINE_MATH_H_ 1 + +#define HUGE_VAL (1.7976931348623157e+308 * 2.0) + +#endif /* _MACHINE_MATH_H_ */ diff --git a/osfmk/libsa/i386/stdarg.h b/osfmk/libsa/i386/stdarg.h new file mode 100644 index 000000000..7a9022fa6 --- /dev/null +++ b/osfmk/libsa/i386/stdarg.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.2 1998/09/30 21:21:00 wsanchez + * Merged in IntelMerge1 (mburg: Intel support) + * + * Revision 1.1.2.1 1998/09/30 18:19:49 mburg + * Changes for Intel port + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/09/17 16:56:26 bruel + * created from standalone mach servers + * [1996/09/17 16:18:07 bruel] + * + * $EndLog$ + */ + +#ifndef _MACHINE_STDARG_H +#define _MACHINE_STDARG_H + +#include + +/* Amount of space required in an argument list for an arg of type TYPE. + TYPE may alternatively be an expression whose type is used. */ + +#define __va_rounded_size(TYPE) \ + (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int)) + +#define va_start(AP, LASTARG) \ + (AP = ((char *) &(LASTARG) + __va_rounded_size (LASTARG))) + +void va_end (va_list); /* Defined in gnulib */ +#define va_end(AP) + +#define va_arg(AP, mode) \ + (AP += __va_rounded_size (mode), \ + *((mode *) (AP - __va_rounded_size (mode)))) + +#endif /* _MACHINE_STDARG_H */ diff --git a/osfmk/libsa/i386/types.h b/osfmk/libsa/i386/types.h new file mode 100644 index 000000000..650acf6a3 --- /dev/null +++ b/osfmk/libsa/i386/types.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.2 1998/09/30 21:21:00 wsanchez + * Merged in IntelMerge1 (mburg: Intel support) + * + * Revision 1.1.2.1 1998/09/30 18:19:50 mburg + * Changes for Intel port + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/09/17 16:56:28 bruel + * created from standalone mach servers + * [1996/09/17 16:18:08 bruel] + * + * Revision 1.1.7.1 1996/04/11 13:46:28 barbou + * Self-Contained Mach Distribution: + * created. + * [95/12/28 barbou] + * [96/03/28 barbou] + * + * $EndLog$ + */ + +#ifndef _MACH_MACHINE_TYPES_H_ +#define _MACH_MACHINE_TYPES_H_ 1 + +typedef long dev_t; /* device number (major+minor) */ + +typedef signed char bit8_t; /* signed 8-bit quantity */ +typedef unsigned char u_bit8_t; /* unsigned 8-bit quantity */ + +typedef short bit16_t; /* signed 16-bit quantity */ +typedef unsigned short u_bit16_t; /* unsigned 16-bit quantity */ + +typedef int bit32_t; /* signed 32-bit quantity */ +typedef unsigned int u_bit32_t; /* unsigned 32-bit quantity */ + +/* Only 32 bits of the "bit64_t" are significant on this 32-bit machine */ +typedef struct { int __val[2]; } bit64_t; /* signed 64-bit quantity */ +typedef struct { unsigned int __val[2]; } u_bit64_t;/* unsigned 64-bit quantity */ +#define _SIG64_BITS __val[0] /* bits of interest (32) */ + +#endif /* _MACH_MACHINE_TYPES_H_ */ + diff --git a/osfmk/libsa/i386/va_list.h b/osfmk/libsa/i386/va_list.h new file mode 100644 index 000000000..aa14743d2 --- /dev/null +++ b/osfmk/libsa/i386/va_list.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.2 1998/09/30 21:21:00 wsanchez + * Merged in IntelMerge1 (mburg: Intel support) + * + * Revision 1.1.2.1 1998/09/30 18:19:50 mburg + * Changes for Intel port + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/09/17 16:56:30 bruel + * created from standalone mach servers + * [1996/09/17 16:18:09 bruel] + * + * $EndLog$ + */ + +#ifndef _MACHINE_VALIST_H +#define _MACHINE_VALIST_H + +/* + * Four possible situations: + * - We are being included by {var,std}args.h (or anyone) before stdio.h. + * define real type. + * + * - We are being included by stdio.h before {var,std}args.h. + * define hidden type for prototypes in stdio, don't pollute namespace. + * + * - We are being included by {var,std}args.h after stdio.h. + * define real type to match hidden type. no longer use hidden type. + * + * - We are being included again after defining the real va_list. + * do nothing. + * + */ + +#if !defined(_HIDDEN_VA_LIST) && !defined(_VA_LIST) +#define _VA_LIST +typedef char *va_list; + +#elif defined(_HIDDEN_VA_LIST) && !defined(_VA_LIST) +#define _VA_LIST +typedef char *__va_list; + +#elif defined(_HIDDEN_VA_LIST) && defined(_VA_LIST) +#undef _HIDDEN_VA_LIST +typedef __va_list va_list; + +#endif + +#endif /* _MACHINE_VALIST_H */ + diff --git a/osfmk/libsa/ieeefloat.h b/osfmk/libsa/ieeefloat.h new file mode 100644 index 000000000..c42f141c6 --- /dev/null +++ b/osfmk/libsa/ieeefloat.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/10/10 13:56:15 yp + * Created. + * [96/10/10 yp] + * + * $EndLog$ + */ + +#ifndef _IEEEFLOAT_H_ +#define _IEEEFLOAT_H_ + +/* + * Copyright (c) 1989, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)float.h 8.1 (Berkeley) 6/10/93 + */ + +#define FLT_RADIX 2 /* b */ +#define FLT_ROUNDS 1 /* FP addition rounds to nearest */ + +#define FLT_MANT_DIG 24 /* p */ +#define FLT_EPSILON 1.19209290E-07F /* b**(1-p) */ +#define FLT_DIG 6 /* floor((p-1)*log10(b))+(b == 10) */ +#define FLT_MIN_EXP -125 /* emin */ +#define FLT_MIN 1.17549435E-38F /* b**(emin-1) */ +#define FLT_MIN_10_EXP -37 /* ceil(log10(b**(emin-1))) */ +#define FLT_MAX_EXP 128 /* emax */ +#define FLT_MAX 3.40282347E+38F /* (1-b**(-p))*b**emax */ +#define FLT_MAX_10_EXP 38 /* floor(log10((1-b**(-p))*b**emax)) */ + +#define DBL_MANT_DIG 53 +#define DBL_EPSILON 2.2204460492503131E-16 +#define DBL_DIG 15 +#define DBL_MIN_EXP -1021 +#define DBL_MIN 2.225073858507201E-308 +#define DBL_MIN_10_EXP -307 +#define DBL_MAX_EXP 1024 +#define DBL_MAX 1.797693134862316E+308 +#define DBL_MAX_10_EXP 308 + +#define LDBL_MANT_DIG DBL_MANT_DIG +#define LDBL_EPSILON DBL_EPSILON +#define LDBL_DIG DBL_DIG +#define LDBL_MIN_EXP DBL_MIN_EXP +#define LDBL_MIN DBL_MIN +#define LDBL_MIN_10_EXP DBL_MIN_10_EXP +#define LDBL_MAX_EXP DBL_MAX_EXP +#define LDBL_MAX DBL_MAX +#define LDBL_MAX_10_EXP DBL_MAX_10_EXP + +#endif /* _IEEEFLOAT_H_ */ diff --git a/osfmk/libsa/machine/stdarg.h b/osfmk/libsa/machine/stdarg.h new file mode 100644 index 000000000..424b40983 --- /dev/null +++ b/osfmk/libsa/machine/stdarg.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_STDARG_H +#define _MACH_MACHINE_STDARG_H + + +#if defined (__ppc__) +#include "ppc/stdarg.h" +#elif defined (__i386__) +#include "i386/stdarg.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_STDARG_H */ diff --git a/osfmk/libsa/machine/stdarg_apple.h b/osfmk/libsa/machine/stdarg_apple.h new file mode 100644 index 000000000..fde11df56 --- /dev/null +++ b/osfmk/libsa/machine/stdarg_apple.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_STDARG_APPLE_H +#define _MACH_MACHINE_STDARG_APPLE_H + + +#if defined (__ppc__) +#include "ppc/stdarg_apple.h" +#elif defined (__i386__) +#include "i386/stdarg_apple.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_STDARG_APPLE_H */ diff --git a/osfmk/libsa/machine/types.h b/osfmk/libsa/machine/types.h new file mode 100644 index 000000000..710311b83 --- /dev/null +++ b/osfmk/libsa/machine/types.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_TYPES_H +#define _MACH_MACHINE_TYPES_H + + +#if defined (__ppc__) +#include "ppc/types.h" +#elif defined (__i386__) +#include "i386/types.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_TYPES_H */ diff --git a/osfmk/libsa/machine/va_list.h b/osfmk/libsa/machine/va_list.h new file mode 100644 index 000000000..95929edb7 --- /dev/null +++ b/osfmk/libsa/machine/va_list.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_VA_LIST_H +#define _MACH_MACHINE_VA_LIST_H + + +#if defined (__ppc__) +#include "ppc/va_list.h" +#elif defined (__i386__) +#include "i386/va_list.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_VA_LIST_H */ diff --git a/osfmk/libsa/math.h b/osfmk/libsa/math.h new file mode 100644 index 000000000..49cffc7d0 --- /dev/null +++ b/osfmk/libsa/math.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1997/01/31 15:46:32 emcmanus + * Merged with nmk22b1_shared. + * [1997/01/30 16:57:28 emcmanus] + * + * Revision 1.1.2.4 1997/01/03 10:11:22 yp + * isnan() prototype for JDK. + * [97/01/03 yp] + * + * Revision 1.1.2.3 1996/11/29 14:33:24 yp + * Added more prototypes. + * [96/11/29 yp] + * + * Revision 1.1.2.2 1996/10/10 13:56:16 yp + * Submitted again (ODE problems). + * [96/10/10 yp] + * + * Revision 1.1.2.1 1996/10/10 09:16:46 yp + * Created. + * [96/10/10 yp] + * + * $EndLog$ + */ + +#ifndef _MATH_H_ +#define _MATH_H_ 1 + +double acos (double); +double acosh (double); +double asin (double); +double asinh (double); +double atan (double); +double atanh (double); +double atan2 (double, double); +double cbrt (double); +double ceil (double); +double copysign (double, double); +double cos (double); +double cosh (double); +double drem (double); +double exp (double); +double expm1 (double); +double fabs (double); +int finite (double); +double floor (double); +double fmod (double, double); +double frexp (double, int *); +int ilogb (double); +int isnan(double); +double ldexp (double, int); +double log (double); +double log10 (double); +double log1p (double); +double logb (double); +double modf (double, double *); +double nextafter (double, double); +double pow (double, double); +double remainder (double, double); +double rint (double); +double scalb (double, double); +double sin (double); +double sinh (double); +double sqrt (double); +double tan (double); +double tanh (double); + +#include + +#endif /* _MATH_H_ */ diff --git a/osfmk/libsa/ppc/float.h b/osfmk/libsa/ppc/float.h new file mode 100644 index 000000000..f2608b5ee --- /dev/null +++ b/osfmk/libsa/ppc/float.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/12/09 16:59:00 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 11:18:39 stephen] + * + * Revision 1.1.2.1 1996/10/10 13:56:09 yp + * Created. + * [96/10/10 yp] + * + * $EndLog$ + */ + +#ifndef _MACHINE_FLOAT_H_ +#define _MACHINE_FLOAT_H_ +# include +#endif /* _MACHINE_FLOAT_H_ */ diff --git a/osfmk/libsa/ppc/math.h b/osfmk/libsa/ppc/math.h new file mode 100644 index 000000000..b094a20cb --- /dev/null +++ b/osfmk/libsa/ppc/math.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/12/09 16:59:02 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 11:18:44 stephen] + * + * Revision 1.1.2.2 1996/10/10 13:56:07 yp + * Submitted again (ODE problems). + * [96/10/10 yp] + * + * Revision 1.1.2.1 1996/10/10 09:16:43 yp + * Created. + * [96/10/10 yp] + * + * $EndLog$ + */ + +#ifndef _MACHINE_MATH_H_ +#define _MACHINE_MATH_H_ 1 + +#define HUGE_VAL (1.701411733192644270e38) + +#endif /* _MACHINE_MATH_H_ */ diff --git a/osfmk/libsa/ppc/stdarg.h b/osfmk/libsa/ppc/stdarg.h new file mode 100644 index 000000000..dcb67693b --- /dev/null +++ b/osfmk/libsa/ppc/stdarg.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#if defined(APPLE) || defined(NeXT) +#include +#else +#endif diff --git a/osfmk/libsa/ppc/stdarg_apple.h b/osfmk/libsa/ppc/stdarg_apple.h new file mode 100644 index 000000000..c1bcea725 --- /dev/null +++ b/osfmk/libsa/ppc/stdarg_apple.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* stdarg.h for GNU. + Note that the type used in va_arg is supposed to match the + actual type **after default promotions**. + Thus, va_arg (..., short) is not valid. */ + +#ifndef _STDARG_H +#ifndef _ANSI_STDARG_H_ +#ifndef __need___va_list +#define _STDARG_H +#define _ANSI_STDARG_H_ +#endif /* not __need___va_list */ +#undef __need___va_list + +#ifdef __clipper__ +#include +#else +#ifdef __m88k__ +#include +#else +#ifdef __i860__ +#include +#else +#ifdef __hppa__ +#include +#else +#ifdef __mips__ +#include +#else +#ifdef __sparc__ +#include +#else +#ifdef __i960__ +#include +#else +#ifdef __alpha__ +#include +#else +#if defined (__H8300__) || defined (__H8300H__) +#include +#else +#if defined (__PPC__) && defined (_CALL_SYSV) +#include +#else + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +#define __GNUC_VA_LIST +#if defined(__svr4__) || defined(_AIX) || defined(_M_UNIX) || defined(__NetBSD__) +typedef char *__gnuc_va_list; +#else +typedef void *__gnuc_va_list; +#endif +#endif + +/* Define the standard macros for the user, + if this invocation was from the user program. */ +#ifdef _STDARG_H + +/* Amount of space required in an argument list for an arg of type TYPE. + TYPE may alternatively be an expression whose type is used. */ + +#if defined(sysV68) +#define __va_rounded_size(TYPE) \ + (((sizeof (TYPE) + sizeof (short) - 1) / sizeof (short)) * sizeof (short)) +#else +#define __va_rounded_size(TYPE) \ + (((sizeof (TYPE) + sizeof (int) - 1) / sizeof (int)) * sizeof (int)) +#endif + +#define va_start(AP, LASTARG) \ + (AP = ((__gnuc_va_list) __builtin_next_arg (LASTARG))) + +#undef va_end +void va_end (__gnuc_va_list); /* Defined in libgcc.a */ +#define va_end(AP) ((void)0) + +/* We cast to void * and then to TYPE * because this avoids + a warning about increasing the alignment requirement. */ + +#if defined (__arm__) || defined (__i386__) || defined (__i860__) || defined (__ns32000__) || defined (__vax__) +/* This is for little-endian machines; small args are padded upward. */ +#define va_arg(AP, TYPE) \ + (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \ + *((TYPE *) (void *) ((char *) (AP) - __va_rounded_size (TYPE)))) +#else /* big-endian */ +/* This is for big-endian machines; small args are padded downward. */ +#define va_arg(AP, TYPE) \ + (AP = (__gnuc_va_list) ((char *) (AP) + __va_rounded_size (TYPE)), \ + *((TYPE *) (void *) ((char *) (AP) \ + - ((sizeof (TYPE) < __va_rounded_size (char) \ + ? sizeof (TYPE) : __va_rounded_size (TYPE)))))) +#endif /* big-endian */ +#endif /* _STDARG_H */ + +#endif /* not powerpc with V.4 calling sequence */ +#endif /* not h8300 */ +#endif /* not alpha */ +#endif /* not i960 */ +#endif /* not sparc */ +#endif /* not mips */ +#endif /* not hppa */ +#endif /* not i860 */ +#endif /* not m88k */ +#endif /* not clipper */ + +#ifdef _STDARG_H +/* Define va_list, if desired, from __gnuc_va_list. */ +/* We deliberately do not define va_list when called from + stdio.h, because ANSI C says that stdio.h is not supposed to define + va_list. stdio.h needs to have access to that data type, + but must not use that name. It should use the name __gnuc_va_list, + which is safe because it is reserved for the implementation. */ + +#ifdef _HIDDEN_VA_LIST /* On OSF1, this means varargs.h is "half-loaded". */ +#undef _VA_LIST +#endif + +#ifdef _BSD_VA_LIST +#undef _BSD_VA_LIST +#endif + +#ifdef __svr4__ +/* SVR4.2 uses _VA_LIST for an internal alias for va_list, + so we must avoid testing it and setting it here. + SVR4 uses _VA_LIST as a flag in stdarg.h, but we should + have no conflict with that. */ +#ifndef _VA_LIST_ +#define _VA_LIST_ +#ifdef __i860__ +#ifndef _VA_LIST +#define _VA_LIST va_list +#endif +#endif /* __i860__ */ +typedef __gnuc_va_list va_list; +#endif /* _VA_LIST_ */ +#else /* not __svr4__ */ + +/* The macro _VA_LIST_ is the same thing used by this file in Ultrix. + But on BSD NET2 we must not test or define or undef it. + (Note that the comments in NET 2's ansi.h + are incorrect for _VA_LIST_--see stdio.h!) */ +#if !defined (_VA_LIST_) || defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__) || defined(WINNT) +/* The macro _VA_LIST_DEFINED is used in Windows NT 3.5 */ +#ifndef _VA_LIST_DEFINED +/* The macro _VA_LIST is used in SCO Unix 3.2. */ +#ifndef _VA_LIST +/* The macro _VA_LIST_T_H is used in the Bull dpx2 */ +#ifndef _VA_LIST_T_H +typedef __gnuc_va_list va_list; +#endif /* not _VA_LIST_T_H */ +#endif /* not _VA_LIST */ +#endif /* not _VA_LIST_DEFINED */ +#if !(defined (__BSD_NET2__) || defined (____386BSD____) || defined (__bsdi__) || defined (__sequent__) || defined (__FreeBSD__)) +#define _VA_LIST_ +#endif +#ifndef _VA_LIST +#define _VA_LIST +#endif +#ifndef _VA_LIST_DEFINED +#define _VA_LIST_DEFINED +#endif +#ifndef _VA_LIST_T_H +#define _VA_LIST_T_H +#endif + +#endif /* not _VA_LIST_, except on certain systems */ + +#endif /* not __svr4__ */ + +#endif /* _STDARG_H */ + +#endif /* not _ANSI_STDARG_H_ */ +#endif /* not _STDARG_H */ diff --git a/osfmk/libsa/ppc/types.h b/osfmk/libsa/ppc/types.h new file mode 100644 index 000000000..939b82d4f --- /dev/null +++ b/osfmk/libsa/ppc/types.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/12/09 16:59:05 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 11:18:55 stephen] + * + * Revision 1.1.2.1 1996/09/17 16:56:35 bruel + * created from standalone mach servers + * [1996/09/17 16:16:17 bruel] + * + * $EndLog$ + */ + +#ifndef _MACH_MACHINE_TYPES_H_ +#define _MACH_MACHINE_TYPES_H_ 1 + +typedef long dev_t; /* device number (major+minor) */ + +typedef signed char bit8_t; /* signed 8-bit quantity */ +typedef unsigned char u_bit8_t; /* unsigned 8-bit quantity */ + +typedef short bit16_t; /* signed 16-bit quantity */ +typedef unsigned short u_bit16_t; /* unsigned 16-bit quantity */ + +typedef int bit32_t; /* signed 32-bit quantity */ +typedef unsigned int u_bit32_t; /* unsigned 32-bit quantity */ + +/* Only 32 bits of the "bit64_t" are significant on this 32-bit machine */ +typedef struct { int __val[2]; } bit64_t; /* signed 64-bit quantity */ +typedef struct { unsigned int __val[2]; } u_bit64_t;/* unsigned 64-bit quantity */ +#define _SIG64_BITS __val[1] /* bits of interest (32) */ + +#endif /* _MACH_MACHINE_TYPES_H_ */ diff --git a/osfmk/libsa/ppc/va_list.h b/osfmk/libsa/ppc/va_list.h new file mode 100644 index 000000000..1c42bff89 --- /dev/null +++ b/osfmk/libsa/ppc/va_list.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#if !defined(APPLE) && !defined(NeXT) +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/12/09 16:59:07 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 11:18:59 stephen] + * + * Revision 1.1.4.1 1996/04/11 14:37:05 emcmanus + * Copied from mainline.ppc. + * [1996/04/11 14:36:22 emcmanus] + * + * Revision 1.1.2.1 1995/12/28 16:37:24 barbou + * Self-Contained Mach Distribution: + * created. + * [95/12/28 barbou] + * + * $EndLog$ + */ + +/* + * Four possible situations: + * - We are being included by {var,std}args.h (or anyone) before stdio.h. + * define real type. + * + * - We are being included by stdio.h before {var,std}args.h. + * define hidden type for prototypes in stdio, don't pollute namespace. + * + * - We are being included by {var,std}args.h after stdio.h. + * define real type to match hidden type. no longer use hidden type. + * + * - We are being included again after defining the real va_list. + * do nothing. + * + */ + +#if !defined(_HIDDEN_VA_LIST) && !defined(_VA_LIST) + +/* Define __gnuc_va_list. */ + +#ifndef __GNUC_VA_LIST +/* + * If this is for internal libc use, don't define + * anything but __gnuc_va_list. + */ +#define __GNUC_VA_LIST +typedef struct { + char gpr; /* index into the array of 8 GPRs stored in the + register save area gpr=0 corresponds to r3, + gpr=1 to r4, etc. */ + char fpr; /* index into the array of 8 FPRs stored in the + register save area fpr=0 corresponds to f1, + fpr=1 to f2, etc. */ + char *overflow_arg_area; /* location on stack that holds the next + overflow argument */ + char *reg_save_area; /* where r3:r10 and f1:f8, if saved are stored */ +} __gnuc_va_list[1]; + +#endif /* not __GNUC_VA_LIST */ + +#define _VA_LIST +typedef struct { + char gpr; /* index into the array of 8 GPRs stored in the + register save area gpr=0 corresponds to r3, + gpr=1 to r4, etc. */ + char fpr; /* index into the array of 8 FPRs stored in the + register save area fpr=0 corresponds to f1, + fpr=1 to f2, etc. */ + char *overflow_arg_area; /* location on stack that holds the next + overflow argument */ + char *reg_save_area; /* where r3:r10 and f1:f8, if saved are stored */ +} va_list[1]; + +#elif defined(_HIDDEN_VA_LIST) && !defined(_VA_LIST) + +#define _VA_LIST +typedef struct { + char gpr; /* index into the array of 8 GPRs stored in the + register save area gpr=0 corresponds to r3, + gpr=1 to r4, etc. */ + char fpr; /* index into the array of 8 FPRs stored in the + register save area fpr=0 corresponds to f1, + fpr=1 to f2, etc. */ + char *overflow_arg_area; /* location on stack that holds the next + overflow argument */ + char *reg_save_area; /* where r3:r10 and f1:f8, if saved are stored */ +} __va_list[1]; + +#elif defined(_HIDDEN_VA_LIST) && defined(_VA_LIST) + +#undef _HIDDEN_VA_LIST +typedef __va_list va_list; + +#endif + +#endif diff --git a/osfmk/libsa/stdarg.h b/osfmk/libsa/stdarg.h new file mode 100644 index 000000000..962196ed7 --- /dev/null +++ b/osfmk/libsa/stdarg.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/09/17 16:56:23 bruel + * created from standalone mach servers. + * [96/09/17 bruel] + * + * $EndLog$ + */ + +#include +#include diff --git a/osfmk/libsa/stdio.h b/osfmk/libsa/stdio.h new file mode 100644 index 000000000..37ecb60a5 --- /dev/null +++ b/osfmk/libsa/stdio.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.3 1996/10/04 11:36:05 emcmanus + * Added fprintf_stderr() prototype, for use by Mach libraries and like + * that might end up being linked with either libc or libsa_mach. + * [1996/10/04 11:31:53 emcmanus] + * + * Revision 1.1.2.2 1996/10/03 17:53:45 emcmanus + * Define NULL. This is currently also (questionably) defined in stdlib.h, + * string.h, and types.h. + * [1996/10/03 16:17:55 emcmanus] + * + * Revision 1.1.2.1 1996/09/17 16:56:18 bruel + * created from standalone mach servers. + * [96/09/17 bruel] + * + * $EndLog$ + */ + +#ifndef _MACH_STDIO_H_ +#define _MACH_STDIO_H_ + +#include + +#ifndef NULL +#define NULL ((void *) 0) +#endif + +extern int sprintf(char *, const char *, ...); +extern int printf(const char *, ...); +extern int vprintf(const char *, va_list ); +extern int vsprintf(char *, const char *, va_list ); + +extern int getchar(void); + +extern int fprintf_stderr(const char *, ...); + +#endif /* _MACH_STDIO_H_ */ diff --git a/osfmk/libsa/stdlib.h b/osfmk/libsa/stdlib.h new file mode 100644 index 000000000..655f8779d --- /dev/null +++ b/osfmk/libsa/stdlib.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1997/02/21 15:43:19 barbou + * Removed "size_t" definition, include "types.h" instead. + * [1997/02/21 15:36:24 barbou] + * + * Revision 1.1.2.3 1996/09/30 10:14:34 bruel + * Added strtol and strtoul prototypes. + * [96/09/30 bruel] + * + * Revision 1.1.2.2 1996/09/23 15:06:22 bruel + * removed bzero and bcopy definitions. + * [96/09/23 bruel] + * + * Revision 1.1.2.1 1996/09/17 16:56:24 bruel + * created from standalone mach servers. + * [96/09/17 bruel] + * + * $EndLog$ + */ + +#ifndef _MACH_STDLIB_H_ +#define _MACH_STDLIB_H_ + +#include + +#ifndef NULL +#define NULL (void *)0 +#endif + +extern int atoi(const char *); + +extern void free(void *); +extern void *malloc(size_t); +extern void *realloc(void *, size_t); + +extern char *getenv(const char *); + +extern void exit(int); + +extern long int strtol (const char *, char **, int); +extern unsigned long int strtoul (const char *, char **, int); + +#endif /* _MACH_STDLIB_H_ */ diff --git a/osfmk/libsa/string.h b/osfmk/libsa/string.h new file mode 100644 index 000000000..486492e95 --- /dev/null +++ b/osfmk/libsa/string.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1997/02/21 15:43:21 barbou + * Removed "size_t" definition, include "types.h" instead. + * [1997/02/21 15:36:54 barbou] + * + * Revision 1.1.2.4 1996/10/10 14:13:33 emcmanus + * Added memmove() prototype. + * [1996/10/10 14:11:51 emcmanus] + * + * Revision 1.1.2.3 1996/10/07 07:20:26 paire + * Added strncat() prototype, since it is defined in libsa_mach. + * [96/10/07 paire] + * + * Revision 1.1.2.2 1996/10/04 11:36:07 emcmanus + * Added strspn() prototype, since it is defined in libsa_mach. + * [1996/10/04 11:31:57 emcmanus] + * + * Revision 1.1.2.1 1996/09/17 16:56:15 bruel + * created for standalone mach servers. + * [96/09/17 bruel] + * + * $EndLog$ + */ + +#ifndef _MACH_STRING_H_ +#define _MACH_STRING_H_ 1 + +#ifdef MACH_KERNEL_PRIVATE +#include +#else +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef NULL +#define NULL 0 +#endif + +extern void *memcpy(void *, const void *, size_t); +extern void *memmove(void *, const void *, size_t); +extern void *memset(void *, int, size_t); + +extern size_t strlen(const char *); +extern char *strcpy(char *, const char *); +extern char *strncpy(char *, const char *, size_t); +extern char *strcat(char *, const char *); +extern char *strncat(char *, const char *, size_t); +extern int strcmp(const char *, const char *); +extern int strncmp(const char *,const char *, size_t); +extern char *strchr(const char *s, int c); +extern size_t strspn(const char *, const char *); + +#ifdef __cplusplus +} +#endif + +#endif /* _MACH_STRING_H_ */ diff --git a/osfmk/libsa/sys/timers.h b/osfmk/libsa/sys/timers.h new file mode 100644 index 000000000..645aba560 --- /dev/null +++ b/osfmk/libsa/sys/timers.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:36 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1997/01/31 15:46:34 emcmanus + * Merged with nmk22b1_shared. + * [1997/01/30 08:47:46 emcmanus] + * + * Revision 1.1.2.2 1996/11/29 13:04:58 emcmanus + * Added TIMEOFDAY and getclock() prototype. + * [1996/11/29 09:59:33 emcmanus] + * + * Revision 1.1.2.1 1996/10/14 13:31:49 emcmanus + * Created. + * [1996/10/14 13:30:09 emcmanus] + * + * $EndLog$ + */ + +#ifndef _SYS_TIMERS_H_ +#define _SYS_TIMERS_H_ + +/* POSIX . For now, we define just enough to be able to build + the pthread library, with its pthread_cond_timedwait() interface. */ +struct timespec { + unsigned long tv_sec; + long tv_nsec; +}; + +#define TIMEOFDAY 1 + +extern int getclock(int, struct timespec *); + +#endif /* _SYS_TIMERS_H_ */ diff --git a/osfmk/libsa/types.h b/osfmk/libsa/types.h new file mode 100644 index 000000000..ec12bec91 --- /dev/null +++ b/osfmk/libsa/types.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/09/17 16:56:21 bruel + * created from standalone mach servers. + * [96/09/17 bruel] + * + * $EndLog$ + */ + +#ifndef _MACH_TYPES_H_ +#define _MACH_TYPES_H_ + +#include "machine/types.h" + +#ifndef _SIZE_T +#define _SIZE_T +typedef unsigned long size_t; +#endif /* _SIZE_T */ + +/* + * Common type definitions that lots of old files seem to want. + */ + +typedef unsigned char u_char; /* unsigned char */ +typedef unsigned short u_short; /* unsigned short */ +typedef unsigned int u_int; /* unsigned int */ +typedef unsigned long u_long; /* unsigned long */ + +typedef struct _quad_ { + unsigned int val[2]; /* 2 32-bit values make... */ +} quad; /* an 8-byte item */ + +typedef char * caddr_t; /* address of a (signed) char */ + +typedef int time_t; /* a signed 32 */ +typedef unsigned int daddr_t; /* an unsigned 32 */ +typedef unsigned int off_t; /* another unsigned 32 */ + + +#define major(i) (((i) >> 8) & 0xFF) +#define minor(i) ((i) & 0xFF) +#define makedev(i,j) ((((i) & 0xFF) << 8) | ((j) & 0xFF)) + +#ifndef NULL +#define NULL ((void *) 0) /* the null pointer */ +#endif + +/* + * Shorthand type definitions for unsigned storage classes + */ +typedef unsigned char uchar_t; +typedef unsigned short ushort_t; +typedef unsigned int uint_t; +typedef unsigned long ulong_t; +typedef volatile unsigned char vuchar_t; +typedef volatile unsigned short vushort_t; +typedef volatile unsigned int vuint_t; +typedef volatile unsigned long vulong_t; +#endif /* _MACH_TYPES_H_ */ diff --git a/osfmk/libsa/va_list.h b/osfmk/libsa/va_list.h new file mode 100644 index 000000000..4034a0ce7 --- /dev/null +++ b/osfmk/libsa/va_list.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:35 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/09/17 16:56:17 bruel + * created from standalone mach servers. + * [96/09/17 bruel] + * + * $EndLog$ + */ + +#include + diff --git a/osfmk/mach-o/loader.h b/osfmk/mach-o/loader.h new file mode 100644 index 000000000..277b2b1e2 --- /dev/null +++ b/osfmk/mach-o/loader.h @@ -0,0 +1,722 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHO_LOADER_H_ +#define _MACHO_LOADER_H_ + +/* + * This file describes the format of mach object files. + */ + +/* + * is needed here for the cpu_type_t and cpu_subtype_t types + * and contains the constants for the possible values of these types. + */ +#include + +/* + * is needed here for the vm_prot_t type and contains the + * constants that are or'ed together for the possible values of this type. + */ +#include + +/* + * is expected to define the flavors of the thread + * states and the structures of those flavors for each machine. + */ +#include + +/* + * The mach header appears at the very beginning of the object file. + */ +struct mach_header { + unsigned long magic; /* mach magic number identifier */ + cpu_type_t cputype; /* cpu specifier */ + cpu_subtype_t cpusubtype; /* machine specifier */ + unsigned long filetype; /* type of file */ + unsigned long ncmds; /* number of load commands */ + unsigned long sizeofcmds; /* the size of all the load commands */ + unsigned long flags; /* flags */ +}; + +/* Constant for the magic field of the mach_header */ +#define MH_MAGIC 0xfeedface /* the mach magic number */ +#define MH_CIGAM NXSwapInt(MH_MAGIC) + +/* + * The layout of the file depends on the filetype. For all but the MH_OBJECT + * file type the segments are padded out and aligned on a segment alignment + * boundary for efficient demand pageing. The MH_EXECUTE, MH_FVMLIB, MH_DYLIB, + * MH_DYLINKER and MH_BUNDLE file types also have the headers included as part + * of their first segment. + * + * The file type MH_OBJECT is a compact format intended as output of the + * assembler and input (and possibly output) of the link editor (the .o + * format). All sections are in one unnamed segment with no segment padding. + * This format is used as an executable format when the file is so small the + * segment padding greatly increases it's size. + * + * The file type MH_PRELOAD is an executable format intended for things that + * not executed under the kernel (proms, stand alones, kernels, etc). The + * format can be executed under the kernel but may demand paged it and not + * preload it before execution. + * + * A core file is in MH_CORE format and can be any in an arbritray legal + * Mach-O file. + * + * Constants for the filetype field of the mach_header + */ +#define MH_OBJECT 0x1 /* relocatable object file */ +#define MH_EXECUTE 0x2 /* demand paged executable file */ +#define MH_FVMLIB 0x3 /* fixed VM shared library file */ +#define MH_CORE 0x4 /* core file */ +#define MH_PRELOAD 0x5 /* preloaded executable file */ +#define MH_DYLIB 0x6 /* dynamicly bound shared library file*/ +#define MH_DYLINKER 0x7 /* dynamic link editor */ +#define MH_BUNDLE 0x8 /* dynamicly bound bundle file */ + +/* Constants for the flags field of the mach_header */ +#define MH_NOUNDEFS 0x1 /* the object file has no undefined + references, can be executed */ +#define MH_INCRLINK 0x2 /* the object file is the output of an + incremental link against a base file + and can't be link edited again */ +#define MH_DYLDLINK 0x4 /* the object file is input for the + dynamic linker and can't be staticly + link edited again */ +#define MH_BINDATLOAD 0x8 /* the object file's undefined + references are bound by the dynamic + linker when loaded. */ +#define MH_PREBOUND 0x10 /* the file has it's dynamic undefined + references prebound. */ + +/* + * The load commands directly follow the mach_header. The total size of all + * of the commands is given by the sizeofcmds field in the mach_header. All + * load commands must have as their first two fields cmd and cmdsize. The cmd + * field is filled in with a constant for that command type. Each command type + * has a structure specifically for it. The cmdsize field is the size in bytes + * of the particular load command structure plus anything that follows it that + * is a part of the load command (i.e. section structures, strings, etc.). To + * advance to the next load command the cmdsize can be added to the offset or + * pointer of the current load command. The cmdsize MUST be a multiple of + * sizeof(long) (this is forever the maximum alignment of any load commands). + * The padded bytes must be zero. All tables in the object file must also + * follow these rules so the file can be memory mapped. Otherwise the pointers + * to these tables will not work well or at all on some machines. With all + * padding zeroed like objects will compare byte for byte. + */ +struct load_command { + unsigned long cmd; /* type of load command */ + unsigned long cmdsize; /* total size of command in bytes */ +}; + +/* Constants for the cmd field of all load commands, the type */ +#define LC_SEGMENT 0x1 /* segment of this file to be mapped */ +#define LC_SYMTAB 0x2 /* link-edit stab symbol table info */ +#define LC_SYMSEG 0x3 /* link-edit gdb symbol table info (obsolete) */ +#define LC_THREAD 0x4 /* thread */ +#define LC_UNIXTHREAD 0x5 /* unix thread (includes a stack) */ +#define LC_LOADFVMLIB 0x6 /* load a specified fixed VM shared library */ +#define LC_IDFVMLIB 0x7 /* fixed VM shared library identification */ +#define LC_IDENT 0x8 /* object identification info (obsolete) */ +#define LC_FVMFILE 0x9 /* fixed VM file inclusion (internal use) */ +#define LC_PREPAGE 0xa /* prepage command (internal use) */ +#define LC_DYSYMTAB 0xb /* dynamic link-edit symbol table info */ +#define LC_LOAD_DYLIB 0xc /* load a dynamicly linked shared library */ +#define LC_ID_DYLIB 0xd /* dynamicly linked shared lib identification */ +#define LC_LOAD_DYLINKER 0xe /* load a dynamic linker */ +#define LC_ID_DYLINKER 0xf /* dynamic linker identification */ +#define LC_PREBOUND_DYLIB 0x10 /* modules prebound for a dynamicly */ + /* linked shared library */ + +/* + * A variable length string in a load command is represented by an lc_str + * union. The strings are stored just after the load command structure and + * the offset is from the start of the load command structure. The size + * of the string is reflected in the cmdsize field of the load command. + * Once again any padded bytes to bring the cmdsize field to a multiple + * of sizeof(long) must be zero. + */ +union lc_str { + unsigned long offset; /* offset to the string */ + char *ptr; /* pointer to the string */ +}; + +/* + * The segment load command indicates that a part of this file is to be + * mapped into the task's address space. The size of this segment in memory, + * vmsize, maybe equal to or larger than the amount to map from this file, + * filesize. The file is mapped starting at fileoff to the beginning of + * the segment in memory, vmaddr. The rest of the memory of the segment, + * if any, is allocated zero fill on demand. The segment's maximum virtual + * memory protection and initial virtual memory protection are specified + * by the maxprot and initprot fields. If the segment has sections then the + * section structures directly follow the segment command and their size is + * reflected in cmdsize. + */ +struct segment_command { + unsigned long cmd; /* LC_SEGMENT */ + unsigned long cmdsize; /* includes sizeof section structs */ + char segname[16]; /* segment name */ + unsigned long vmaddr; /* memory address of this segment */ + unsigned long vmsize; /* memory size of this segment */ + unsigned long fileoff; /* file offset of this segment */ + unsigned long filesize; /* amount to map from the file */ + vm_prot_t maxprot; /* maximum VM protection */ + vm_prot_t initprot; /* initial VM protection */ + unsigned long nsects; /* number of sections in segment */ + unsigned long flags; /* flags */ +}; + +/* Constants for the flags field of the segment_command */ +#define SG_HIGHVM 0x1 /* the file contents for this segment is for + the high part of the VM space, the low part + is zero filled (for stacks in core files) */ +#define SG_FVMLIB 0x2 /* this segment is the VM that is allocated by + a fixed VM library, for overlap checking in + the link editor */ +#define SG_NORELOC 0x4 /* this segment has nothing that was relocated + in it and nothing relocated to it, that is + it maybe safely replaced without relocation*/ + +/* + * A segment is made up of zero or more sections. Non-MH_OBJECT files have + * all of their segments with the proper sections in each, and padded to the + * specified segment alignment when produced by the link editor. The first + * segment of a MH_EXECUTE and MH_FVMLIB format file contains the mach_header + * and load commands of the object file before it's first section. The zero + * fill sections are always last in their segment (in all formats). This + * allows the zeroed segment padding to be mapped into memory where zero fill + * sections might be. + * + * The MH_OBJECT format has all of it's sections in one segment for + * compactness. There is no padding to a specified segment boundary and the + * mach_header and load commands are not part of the segment. + * + * Sections with the same section name, sectname, going into the same segment, + * segname, are combined by the link editor. The resulting section is aligned + * to the maximum alignment of the combined sections and is the new section's + * alignment. The combined sections are aligned to their original alignment in + * the combined section. Any padded bytes to get the specified alignment are + * zeroed. + * + * The format of the relocation entries referenced by the reloff and nreloc + * fields of the section structure for mach object files is described in the + * header file . + */ +struct section { + char sectname[16]; /* name of this section */ + char segname[16]; /* segment this section goes in */ + unsigned long addr; /* memory address of this section */ + unsigned long size; /* size in bytes of this section */ + unsigned long offset; /* file offset of this section */ + unsigned long align; /* section alignment (power of 2) */ + unsigned long reloff; /* file offset of relocation entries */ + unsigned long nreloc; /* number of relocation entries */ + unsigned long flags; /* flags (section type and attributes)*/ + unsigned long reserved1; /* reserved */ + unsigned long reserved2; /* reserved */ +}; + +/* + * The flags field of a section structure is separated into two parts a section + * type and section attributes. The section types are mutually exclusive (it + * can only have one type) but the section attributes are not (it may have more + * than one attribute). + */ +#define SECTION_TYPE 0x000000ff /* 256 section types */ +#define SECTION_ATTRIBUTES 0xffffff00 /* 24 section attributes */ + +/* Constants for the type of a section */ +#define S_REGULAR 0x0 /* regular section */ +#define S_ZEROFILL 0x1 /* zero fill on demand section */ +#define S_CSTRING_LITERALS 0x2 /* section with only literal C strings*/ +#define S_4BYTE_LITERALS 0x3 /* section with only 4 byte literals */ +#define S_8BYTE_LITERALS 0x4 /* section with only 8 byte literals */ +#define S_LITERAL_POINTERS 0x5 /* section with only pointers to */ + /* literals */ +/* + * For the two types of symbol pointers sections and the symbol stubs section + * they have indirect symbol table entries. For each of the entries in the + * section the indirect symbol table entries, in corresponding order in the + * indirect symbol table, start at the index stored in the reserved1 field + * of the section structure. Since the indirect symbol table entries + * correspond to the entries in the section the number of indirect symbol table + * entries is inferred from the size of the section divided by the size of the + * entries in the section. For symbol pointers sections the size of the entries + * in the section is 4 bytes and for symbol stubs sections the byte size of the + * stubs is stored in the reserved2 field of the section structure. + */ +#define S_NON_LAZY_SYMBOL_POINTERS 0x6 /* section with only non-lazy + symbol pointers */ +#define S_LAZY_SYMBOL_POINTERS 0x7 /* section with only lazy symbol + pointers */ +#define S_SYMBOL_STUBS 0x8 /* section with only symbol + stubs, byte size of stub in + the reserved2 field */ +#define S_MOD_INIT_FUNC_POINTERS 0x9 /* section with only function + pointers for initialization*/ +/* + * Constants for the section attributes part of the flags field of a section + * structure. + */ +#define SECTION_ATTRIBUTES_USR 0xff000000 /* User setable attributes */ +#define S_ATTR_PURE_INSTRUCTIONS 0x80000000 /* section contains only true + machine instructions */ +#define SECTION_ATTRIBUTES_SYS 0x00ffff00 /* system setable attributes */ +#define S_ATTR_SOME_INSTRUCTIONS 0x00000400 /* section contains some + machine instructions */ +#define S_ATTR_EXT_RELOC 0x00000200 /* section has external + relocation entries */ +#define S_ATTR_LOC_RELOC 0x00000100 /* section has local + relocation entries */ + + +/* + * The names of segments and sections in them are mostly meaningless to the + * link-editor. But there are few things to support traditional UNIX + * executables that require the link-editor and assembler to use some names + * agreed upon by convention. + * + * The initial protection of the "__TEXT" segment has write protection turned + * off (not writeable). + * + * The link-editor will allocate common symbols at the end of the "__common" + * section in the "__DATA" segment. It will create the section and segment + * if needed. + */ + +/* The currently known segment names and the section names in those segments */ + +#define SEG_PAGEZERO "__PAGEZERO" /* the pagezero segment which has no */ + /* protections and catches NULL */ + /* references for MH_EXECUTE files */ + + +#define SEG_TEXT "__TEXT" /* the tradition UNIX text segment */ +#define SECT_TEXT "__text" /* the real text part of the text */ + /* section no headers, and no padding */ +#define SECT_FVMLIB_INIT0 "__fvmlib_init0" /* the fvmlib initialization */ + /* section */ +#define SECT_FVMLIB_INIT1 "__fvmlib_init1" /* the section following the */ + /* fvmlib initialization */ + /* section */ + +#define SEG_DATA "__DATA" /* the tradition UNIX data segment */ +#define SECT_DATA "__data" /* the real initialized data section */ + /* no padding, no bss overlap */ +#define SECT_BSS "__bss" /* the real uninitialized data section*/ + /* no padding */ +#define SECT_COMMON "__common" /* the section common symbols are */ + /* allocated in by the link editor */ + +#define SEG_OBJC "__OBJC" /* objective-C runtime segment */ +#define SECT_OBJC_SYMBOLS "__symbol_table" /* symbol table */ +#define SECT_OBJC_MODULES "__module_info" /* module information */ +#define SECT_OBJC_STRINGS "__selector_strs" /* string table */ +#define SECT_OBJC_REFS "__selector_refs" /* string table */ + +#define SEG_ICON "__ICON" /* the NeXT icon segment */ +#define SECT_ICON_HEADER "__header" /* the icon headers */ +#define SECT_ICON_TIFF "__tiff" /* the icons in tiff format */ + +#define SEG_LINKEDIT "__LINKEDIT" /* the segment containing all structs */ + /* created and maintained by the link */ + /* editor. Created with -seglinkedit */ + /* option to ld(1) for MH_EXECUTE and */ + /* FVMLIB file types only */ + +#define SEG_UNIXSTACK "__UNIXSTACK" /* the unix stack segment */ + +/* + * Fixed virtual memory shared libraries are identified by two things. The + * target pathname (the name of the library as found for execution), and the + * minor version number. The address of where the headers are loaded is in + * header_addr. + */ +struct fvmlib { + union lc_str name; /* library's target pathname */ + unsigned long minor_version; /* library's minor version number */ + unsigned long header_addr; /* library's header address */ +}; + +/* + * A fixed virtual shared library (filetype == MH_FVMLIB in the mach header) + * contains a fvmlib_command (cmd == LC_IDFVMLIB) to identify the library. + * An object that uses a fixed virtual shared library also contains a + * fvmlib_command (cmd == LC_LOADFVMLIB) for each library it uses. + */ +struct fvmlib_command { + unsigned long cmd; /* LC_IDFVMLIB or LC_LOADFVMLIB */ + unsigned long cmdsize; /* includes pathname string */ + struct fvmlib fvmlib; /* the library identification */ +}; + +/* + * Dynamicly linked shared libraries are identified by two things. The + * pathname (the name of the library as found for execution), and the + * compatibility version number. The pathname must match and the compatibility + * number in the user of the library must be greater than or equal to the + * library being used. The time stamp is used to record the time a library was + * built and copied into user so it can be use to determined if the library used + * at runtime is exactly the same as used to built the program. + */ +struct dylib { + union lc_str name; /* library's path name */ + unsigned long timestamp; /* library's build time stamp */ + unsigned long current_version; /* library's current version number */ + unsigned long compatibility_version;/* library's compatibility vers number*/ +}; + +/* + * A dynamicly linked shared library (filetype == MH_DYLIB in the mach header) + * contains a dylib_command (cmd == LC_ID_DYLIB) to identify the library. + * An object that uses a dynamicly linked shared library also contains a + * dylib_command (cmd == LC_LOAD_DYLIB) for each library it uses. + */ +struct dylib_command { + unsigned long cmd; /* LC_ID_DYLIB or LC_LOAD_DYLIB */ + unsigned long cmdsize; /* includes pathname string */ + struct dylib dylib; /* the library identification */ +}; + +/* + * A program (filetype == MH_EXECUTE) or bundle (filetype == MH_BUNDLE) that is + * prebound to it's dynamic libraries has one of these for each library that + * the static linker used in prebinding. It contains a bit vector for the + * modules in the library. The bits indicate which modules are bound (1) and + * which are not (0) from the library. The bit for module 0 is the low bit + * of the first byte. So the bit for the Nth module is: + * (linked_modules[N/8] >> N%8) & 1 + */ +struct prebound_dylib_command { + unsigned long cmd; /* LC_PREBOUND_DYLIB */ + unsigned long cmdsize; /* includes strings */ + union lc_str name; /* library's path name */ + unsigned long nmodules; /* number of modules in library */ + union lc_str linked_modules; /* bit vector of linked modules */ +}; + +/* + * A program that uses a dynamic linker contains a dylinker_command to identify + * the name of the dynamic linker (LC_LOAD_DYLINKER). And a dynamic linker + * contains a dylinker_command to identify the dynamic linker (LC_ID_DYLINKER). + * A file can have at most one of these. + */ +struct dylinker_command { + unsigned long cmd; /* LC_ID_DYLINKER or LC_LOAD_DYLINKER */ + unsigned long cmdsize; /* includes pathname string */ + union lc_str name; /* dynamic linker's path name */ +}; + +/* + * Thread commands contain machine-specific data structures suitable for + * use in the thread state primitives. The machine specific data structures + * follow the struct thread_command as follows. + * Each flavor of machine specific data structure is preceded by an unsigned + * long constant for the flavor of that data structure, an unsigned long + * that is the count of longs of the size of the state data structure and then + * the state data structure follows. This triple may be repeated for many + * flavors. The constants for the flavors, counts and state data structure + * definitions are expected to be in the header file . + * These machine specific data structures sizes must be multiples of + * sizeof(long). The cmdsize reflects the total size of the thread_command + * and all of the sizes of the constants for the flavors, counts and state + * data structures. + * + * For executable objects that are unix processes there will be one + * thread_command (cmd == LC_UNIXTHREAD) created for it by the link-editor. + * This is the same as a LC_THREAD, except that a stack is automatically + * created (based on the shell's limit for the stack size). Command arguments + * and environment variables are copied onto that stack. + */ +struct thread_command { + unsigned long cmd; /* LC_THREAD or LC_UNIXTHREAD */ + unsigned long cmdsize; /* total size of this command */ + /* unsigned long flavor flavor of thread state */ + /* unsigned long count count of longs in thread state */ + /* struct XXX_thread_state state thread state for this flavor */ + /* ... */ +}; + +/* + * The symtab_command contains the offsets and sizes of the link-edit 4.3BSD + * "stab" style symbol table information as described in the header files + * and . + */ +struct symtab_command { + unsigned long cmd; /* LC_SYMTAB */ + unsigned long cmdsize; /* sizeof(struct symtab_command) */ + unsigned long symoff; /* symbol table offset */ + unsigned long nsyms; /* number of symbol table entries */ + unsigned long stroff; /* string table offset */ + unsigned long strsize; /* string table size in bytes */ +}; + +/* + * This is the second set of the symbolic information which is used to support + * the data structures for the dynamicly link editor. + * + * The original set of symbolic information in the symtab_command which contains + * the symbol and string tables must also be present when this load command is + * present. When this load command is present the symbol table is organized + * into three groups of symbols: + * local symbols (static and debugging symbols) - grouped by module + * defined external symbols - grouped by module (sorted by name if not lib) + * undefined external symbols (sorted by name) + * In this load command there are offsets and counts to each of the three groups + * of symbols. + * + * This load command contains a the offsets and sizes of the following new + * symbolic information tables: + * table of contents + * module table + * reference symbol table + * indirect symbol table + * The first three tables above (the table of contents, module table and + * reference symbol table) are only present if the file is a dynamicly linked + * shared library. For executable and object modules, which are files + * containing only one module, the information that would be in these three + * tables is determined as follows: + * table of contents - the defined external symbols are sorted by name + * module table - the file contains only one module so everything in the + * file is part of the module. + * reference symbol table - is the defined and undefined external symbols + * + * For dynamicly linked shared library files this load command also contains + * offsets and sizes to the pool of relocation entries for all sections + * separated into two groups: + * external relocation entries + * local relocation entries + * For executable and object modules the relocation entries continue to hang + * off the section structures. + */ +struct dysymtab_command { + unsigned long cmd; /* LC_DYSYMTAB */ + unsigned long cmdsize; /* sizeof(struct dysymtab_command) */ + + /* + * The symbols indicated by symoff and nsyms of the LC_SYMTAB load command + * are grouped into the following three groups: + * local symbols (further grouped by the module they are from) + * defined external symbols (further grouped by the module they are from) + * undefined symbols + * + * The local symbols are used only for debugging. The dynamic binding + * process may have to use them to indicate to the debugger the local + * symbols for a module that is being bound. + * + * The last two groups are used by the dynamic binding process to do the + * binding (indirectly through the module table and the reference symbol + * table when this is a dynamicly linked shared library file). + */ + unsigned long ilocalsym; /* index to local symbols */ + unsigned long nlocalsym; /* number of local symbols */ + + unsigned long iextdefsym; /* index to externally defined symbols */ + unsigned long nextdefsym; /* number of externally defined symbols */ + + unsigned long iundefsym; /* index to undefined symbols */ + unsigned long nundefsym; /* number of undefined symbols */ + + /* + * For the for the dynamic binding process to find which module a symbol + * is defined in the table of contents is used (analogous to the ranlib + * structure in an archive) which maps defined external symbols to modules + * they are defined in. This exists only in a dynamicly linked shared + * library file. For executable and object modules the defined external + * symbols are sorted by name and is use as the table of contents. + */ + unsigned long tocoff; /* file offset to table of contents */ + unsigned long ntoc; /* number of entries in table of contents */ + + /* + * To support dynamic binding of "modules" (whole object files) the symbol + * table must reflect the modules that the file was created from. This is + * done by having a module table that has indexes and counts into the merged + * tables for each module. The module structure that these two entries + * refer to is described below. This exists only in a dynamicly linked + * shared library file. For executable and object modules the file only + * contains one module so everything in the file belongs to the module. + */ + unsigned long modtaboff; /* file offset to module table */ + unsigned long nmodtab; /* number of module table entries */ + + /* + * To support dynamic module binding the module structure for each module + * indicates the external references (defined and undefined) each module + * makes. For each module there is an offset and a count into the + * reference symbol table for the symbols that the module references. + * This exists only in a dynamicly linked shared library file. For + * executable and object modules the defined external symbols and the + * undefined external symbols indicates the external references. + */ + unsigned long extrefsymoff; /* offset to referenced symbol table */ + unsigned long nextrefsyms; /* number of referenced symbol table entries */ + + /* + * The sections that contain "symbol pointers" and "routine stubs" have + * indexes and (implied counts based on the size of the section and fixed + * size of the entry) into the "indirect symbol" table for each pointer + * and stub. For every section of these two types the index into the + * indirect symbol table is stored in the section header in the field + * reserved1. An indirect symbol table entry is simply a 32bit index into + * the symbol table to the symbol that the pointer or stub is referring to. + * The indirect symbol table is ordered to match the entries in the section. + */ + unsigned long indirectsymoff; /* file offset to the indirect symbol table */ + unsigned long nindirectsyms; /* number of indirect symbol table entries */ + + /* + * To support relocating an individual module in a library file quickly the + * external relocation entries for each module in the library need to be + * accessed efficiently. Since the relocation entries can't be accessed + * through the section headers for a library file they are separated into + * groups of local and external entries further grouped by module. In this + * case the presents of this load command who's extreloff, nextrel, + * locreloff and nlocrel fields are non-zero indicates that the relocation + * entries of non-merged sections are not referenced through the section + * structures (and the reloff and nreloc fields in the section headers are + * set to zero). + * + * Since the relocation entries are not accessed through the section headers + * this requires the r_address field to be something other than a section + * offset to identify the item to be relocated. In this case r_address is + * set to the offset from the vmaddr of the first LC_SEGMENT command. + * + * The relocation entries are grouped by module and the module table + * entries have indexes and counts into them for the group of external + * relocation entries for that the module. + * + * For sections that are merged across modules there must not be any + * remaining external relocation entries for them (for merged sections + * remaining relocation entries must be local). + */ + unsigned long extreloff; /* offset to external relocation entries */ + unsigned long nextrel; /* number of external relocation entries */ + + /* + * All the local relocation entries are grouped together (they are not + * grouped by their module since they are only used if the object is moved + * from it staticly link edited address). + */ + unsigned long locreloff; /* offset to local relocation entries */ + unsigned long nlocrel; /* number of local relocation entries */ + +}; + +/* + * An indirect symbol table entry is simply a 32bit index into the symbol table + * to the symbol that the pointer or stub is refering to. Unless it is for a + * non-lazy symbol pointer section for a defined symbol which strip(1) as + * removed. In which case it has the value INDIRECT_SYMBOL_LOCAL. If the + * symbol was also absolute INDIRECT_SYMBOL_ABS is or'ed with that. + */ +#define INDIRECT_SYMBOL_LOCAL 0x80000000 +#define INDIRECT_SYMBOL_ABS 0x40000000 + + +/* a table of contents entry */ +struct dylib_table_of_contents { + unsigned long symbol_index; /* the defined external symbol + (index into the symbol table) */ + unsigned long module_index; /* index into the module table this symbol + is defined in */ +}; + +/* a module table entry */ +struct dylib_module { + unsigned long module_name; /* the module name (index into string table) */ + + unsigned long iextdefsym; /* index into externally defined symbols */ + unsigned long nextdefsym; /* number of externally defined symbols */ + unsigned long irefsym; /* index into reference symbol table */ + unsigned long nrefsym; /* number of reference symbol table entries */ + unsigned long ilocalsym; /* index into symbols for local symbols */ + unsigned long nlocalsym; /* number of local symbols */ + + unsigned long iextrel; /* index into external relocation entries */ + unsigned long nextrel; /* number of external relocation entries */ + + unsigned long iinit; /* index into the init section */ + unsigned long ninit; /* number of init section entries */ + + unsigned long /* for this module address of the start of */ + objc_module_info_addr; /* the (__OBJC,__module_info) section */ + unsigned long /* for this module size of */ + objc_module_info_size; /* the (__OBJC,__module_info) section */ +}; + +/* + * The entries in the reference symbol table are used when loading the module + * (both by the static and dynamic link editors) and if the module is unloaded + * or replaced. Therefore all external symbols (defined and undefined) are + * listed in the module's reference table. The flags describe the type of + * reference that is being made. The constants for the flags are defined in + * as they are also used for symbol table entries. + */ +struct dylib_reference { + unsigned long isym:24, /* index into the symbol table */ + flags:8; /* flags to indicate the type of reference */ +}; + +/* + * The symseg_command contains the offset and size of the GNU style + * symbol table information as described in the header file . + * The symbol roots of the symbol segments must also be aligned properly + * in the file. So the requirement of keeping the offsets aligned to a + * multiple of a sizeof(long) translates to the length field of the symbol + * roots also being a multiple of a long. Also the padding must again be + * zeroed. (THIS IS OBSOLETE and no longer supported). + */ +struct symseg_command { + unsigned long cmd; /* LC_SYMSEG */ + unsigned long cmdsize; /* sizeof(struct symseg_command) */ + unsigned long offset; /* symbol segment offset */ + unsigned long size; /* symbol segment size in bytes */ +}; + +/* + * The ident_command contains a free format string table following the + * ident_command structure. The strings are null terminated and the size of + * the command is padded out with zero bytes to a multiple of sizeof(long). + * (THIS IS OBSOLETE and no longer supported). + */ +struct ident_command { + unsigned long cmd; /* LC_IDENT */ + unsigned long cmdsize; /* strings that follow this command */ +}; + +/* + * The fvmfile_command contains a reference to a file to be loaded at the + * specified virtual address. (Presently, this command is reserved for NeXT + * internal use. The kernel ignores this command when loading a program into + * memory). + */ +struct fvmfile_command { + unsigned long cmd; /* LC_FVMFILE */ + unsigned long cmdsize; /* includes pathname string */ + union lc_str name; /* files pathname */ + unsigned long header_addr; /* files virtual address */ +}; + +#endif /*_MACHO_LOADER_H_*/ diff --git a/osfmk/mach-o/mach_header.c b/osfmk/mach-o/mach_header.c new file mode 100644 index 000000000..02e88426b --- /dev/null +++ b/osfmk/mach-o/mach_header.c @@ -0,0 +1,550 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: kern/mach_header.c + * + * Functions for accessing mach-o headers. + * + * HISTORY + * 27-MAR-97 Umesh Vaishampayan (umeshv@NeXT.com) + * Added getsegdatafromheader(); + * + * 29-Jan-92 Mike DeMoney (mike@next.com) + * Made into machine independent form from machdep/m68k/mach_header.c. + * Ifdef'ed out most of this since I couldn't find any references. + */ + +#include +#include +#include + +#ifdef __MACHO__ + +extern struct mach_header _mh_execute_header; + +struct section *getsectbynamefromheader( + struct mach_header *header, + char *seg_name, + char *sect_name); +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name); + +/* + * return the last address (first avail) + */ +#ifdef MACH_BSD +__private_extern__ +#endif +vm_offset_t getlastaddr(void) +{ + struct segment_command *sgp; + vm_offset_t last_addr = 0; + struct mach_header *header = &_mh_execute_header; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if ( sgp->cmd == LC_SEGMENT) { + if (sgp->vmaddr + sgp->vmsize > last_addr) + last_addr = sgp->vmaddr + sgp->vmsize; + } + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return last_addr; +} + +#ifdef XXX_MACH_BSD +__private_extern__ +#endif +struct mach_header ** +getmachheaders(void) +{ + extern struct mach_header _mh_execute_header; + struct mach_header **tl; + + if (kmem_alloc(kernel_map, (vm_offset_t *) &tl, 2*sizeof(struct mach_header *)) != KERN_SUCCESS) + return NULL; + + tl[0] = &_mh_execute_header; + tl[1] = (struct mach_header *)0; + return tl; +} + +/* + * This routine returns the a pointer to the data for the named section in the + * named segment if it exist in the mach header passed to it. Also it returns + * the size of the section data indirectly through the pointer size. Otherwise + * it returns zero for the pointer and the size. + */ +#ifdef MACH_BSD +__private_extern__ +#endif +void * +getsectdatafromheader( + struct mach_header *mhp, + char *segname, + char *sectname, + int *size) +{ + const struct section *sp; + void *result; + + sp = getsectbynamefromheader(mhp, segname, sectname); + if(sp == (struct section *)0){ + *size = 0; + return((char *)0); + } + *size = sp->size; + result = (void *)sp->addr; + return result; +} + +/* + * This routine returns the a pointer to the data for the named segment + * if it exist in the mach header passed to it. Also it returns + * the size of the segment data indirectly through the pointer size. + * Otherwise it returns zero for the pointer and the size. + */ +#ifdef MACH_BSD +__private_extern__ +#endif +void * +getsegdatafromheader( + struct mach_header *mhp, + char *segname, + int *size) +{ + const struct segment_command *sc; + void *result; + + sc = getsegbynamefromheader(mhp, segname); + if(sc == (struct segment_command *)0){ + *size = 0; + return((char *)0); + } + *size = sc->vmsize; + result = (void *)sc->vmaddr; + return result; +} + +/* + * This routine returns the section structure for the named section in the + * named segment for the mach_header pointer passed to it if it exist. + * Otherwise it returns zero. + */ +#ifdef MACH_BSD +__private_extern__ +#endif +struct section * +getsectbynamefromheader( + struct mach_header *mhp, + char *segname, + char *sectname) +{ + struct segment_command *sgp; + struct section *sp; + long i, j; + + sgp = (struct segment_command *) + ((char *)mhp + sizeof(struct mach_header)); + for(i = 0; i < mhp->ncmds; i++){ + if(sgp->cmd == LC_SEGMENT) + if(strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0 || + mhp->filetype == MH_OBJECT){ + sp = (struct section *)((char *)sgp + + sizeof(struct segment_command)); + for(j = 0; j < sgp->nsects; j++){ + if(strncmp(sp->sectname, sectname, + sizeof(sp->sectname)) == 0 && + strncmp(sp->segname, segname, + sizeof(sp->segname)) == 0) + return(sp); + sp = (struct section *)((char *)sp + + sizeof(struct section)); + } + } + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return((struct section *)0); +} + +#ifdef MACH_BSD +__private_extern__ +#endif +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name) +{ + struct segment_command *sgp; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if ( sgp->cmd == LC_SEGMENT + && !strncmp(sgp->segname, seg_name, sizeof(sgp->segname))) + return sgp; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return (struct segment_command *)0; +} + + +/* + * For now at least, all the rest of this seems unused. + * NOTE: The constant in here for segment alignment is machine-dependent, + * so if you include this, define a machine dependent constant for it's + * value. + */ +static struct { + struct segment_command seg; + struct section sect; +} fvm_data = { + { + LC_SEGMENT, // cmd + sizeof(fvm_data), // cmdsize + "__USER", // segname + 0, // vmaddr + 0, // vmsize + 0, // fileoff + 0, // filesize + VM_PROT_READ, // maxprot + VM_PROT_READ, // initprot, + 1, // nsects + 0 // flags + }, + { + "", // sectname + "__USER", // segname + 0, // addr + 0, // size + 0, // offset + 4, // align + 0, // reloff + 0, // nreloc + 0 // flags + } +}; + +#ifdef MACH_BSD +static +#endif +struct segment_command *fvm_seg; + +static struct fvmfile_command *fvmfilefromheader(struct mach_header *header); +static vm_offset_t getsizeofmacho(struct mach_header *header); + +/* + * Return the first segment_command in the header. + */ +#ifdef MACH_BSD +__private_extern__ +#endif +struct segment_command *firstseg(void) +{ + return firstsegfromheader(&_mh_execute_header); +} + +#ifdef MACH_BSD +__private_extern__ +#endif +struct segment_command *firstsegfromheader(struct mach_header *header) +{ + struct segment_command *sgp; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if (sgp->cmd == LC_SEGMENT) + return sgp; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return (struct segment_command *)0; +} + +#ifdef MACH_BSD +__private_extern__ +#endif +struct segment_command *nextseg(struct segment_command *sgp) +{ + struct segment_command *this; + + this = nextsegfromheader(&_mh_execute_header, sgp); + + /* + * For the kernel's header add on the faked segment for the + * USER boot code identified by a FVMFILE_COMMAND in the mach header. + */ + if (!this && sgp != fvm_seg) + this = fvm_seg; + + return this; +} + +#ifdef MACH_BSD +__private_extern__ +#endif +struct segment_command *nextsegfromheader( + struct mach_header *header, + struct segment_command *seg) +{ + struct segment_command *sgp; + int i; + + sgp = (struct segment_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++) { + if (sgp == seg) + break; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + + if (i == header->ncmds) + return (struct segment_command *)0; + + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + for (; i < header->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT) + return sgp; + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + + return (struct segment_command *)0; +} + + +/* + * Return the address of the named Mach-O segment, or NULL. + */ +#ifdef MACH_BSD +__private_extern__ +#endif +struct segment_command *getsegbyname(char *seg_name) +{ + struct segment_command *this; + + this = getsegbynamefromheader(&_mh_execute_header, seg_name); + + /* + * For the kernel's header add on the faked segment for the + * USER boot code identified by a FVMFILE_COMMAND in the mach header. + */ + if (!this && strcmp(seg_name, fvm_seg->segname) == 0) + this = fvm_seg; + + return this; +} + +/* + * This routine returns the a pointer the section structure of the named + * section in the named segment if it exist in the mach executable it is + * linked into. Otherwise it returns zero. + */ +#ifdef MACH_BSD +__private_extern__ +#endif +struct section * +getsectbyname( + char *segname, + char *sectname) +{ + return(getsectbynamefromheader( + (struct mach_header *)&_mh_execute_header, segname, sectname)); +} + +#ifdef MACH_BSD +__private_extern__ +#endif +struct section *firstsect(struct segment_command *sgp) +{ + struct section *sp; + + if (!sgp || sgp->nsects == 0) + return (struct section *)0; + + return (struct section *)(sgp+1); +} + +#ifdef MACH_BSD +__private_extern__ +#endif +struct section *nextsect(struct segment_command *sgp, struct section *sp) +{ + struct section *fsp = firstsect(sgp); + + if (sp - fsp >= sgp->nsects-1) + return (struct section *)0; + + return sp+1; +} + +static struct fvmfile_command *fvmfilefromheader(struct mach_header *header) +{ + struct fvmfile_command *fvp; + int i; + + fvp = (struct fvmfile_command *) + ((char *)header + sizeof(struct mach_header)); + for (i = 0; i < header->ncmds; i++){ + if (fvp->cmd == LC_FVMFILE) + return fvp; + fvp = (struct fvmfile_command *)((char *)fvp + fvp->cmdsize); + } + return (struct fvmfile_command *)0; +} + +/* + * Create a fake USER seg if a fvmfile_command is present. + */ +#ifdef MACH_BSD +__private_extern__ +#endif +struct segment_command *getfakefvmseg(void) +{ + struct segment_command *sgp = getsegbyname("__USER"); + struct fvmfile_command *fvp = fvmfilefromheader(&_mh_execute_header); + struct section *sp; + + if (sgp) + return sgp; + + if (!fvp) + return (struct segment_command *)0; + + fvm_seg = &fvm_data.seg; + sgp = fvm_seg; + sp = &fvm_data.sect; + + sgp->vmaddr = fvp->header_addr; + sgp->vmsize = getsizeofmacho((struct mach_header *)(sgp->vmaddr)); + + strcpy(sp->sectname, fvp->name.ptr); + sp->addr = sgp->vmaddr; + sp->size = sgp->vmsize; + +#if DEBUG + printf("fake fvm seg __USER/\"%s\" at 0x%x, size 0x%x\n", + sp->sectname, sp->addr, sp->size); +#endif /*DEBUG*/ + return sgp; +} + +/* + * Figure out the size the size of the data associated with a + * loaded mach_header. + */ +static vm_offset_t getsizeofmacho(struct mach_header *header) +{ + struct segment_command *sgp; + struct section *sp; + vm_offset_t last_addr; + + last_addr = 0; + for ( sgp = firstsegfromheader(header) + ; sgp + ; sgp = nextsegfromheader(header, sgp)) + { + if (sgp->fileoff + sgp->filesize > last_addr) + last_addr = sgp->fileoff + sgp->filesize; + } + + return last_addr; +} + +#ifdef MACH_KDB +/* + * This routine returns the section command for the symbol table in the + * named segment for the mach_header pointer passed to it if it exist. + * Otherwise it returns zero. + */ +struct symtab_command * +getsectcmdsymtabfromheader( + struct mach_header *mhp) +{ + struct segment_command *sgp; + struct section *sp; + long i; + + sgp = (struct segment_command *) + ((char *)mhp + sizeof(struct mach_header)); + for(i = 0; i < mhp->ncmds; i++){ + if(sgp->cmd == LC_SYMTAB) + return((struct symtab_command *)sgp); + sgp = (struct segment_command *)((char *)sgp + sgp->cmdsize); + } + return(NULL); +} + +boolean_t getsymtab(struct mach_header *header, + vm_offset_t *symtab, + int *nsyms, + vm_offset_t *strtab, + vm_size_t *strtabsize) +{ + struct segment_command *seglink_cmd; + struct symtab_command *symtab_cmd; + + seglink_cmd = NULL; + + if(header->magic != MH_MAGIC) { /* Check if this is a valid header format */ + printf("Attempt to use invalid header (magic = %08X) to find symbol table\n", + header->magic); /* Tell them what's wrong */ + return (FALSE); /* Bye y'all... */ + } + + seglink_cmd = getsegbynamefromheader(header,"__LINKEDIT"); + if (seglink_cmd == NULL) { + return(FALSE); + } + + symtab_cmd = NULL; + symtab_cmd = getsectcmdsymtabfromheader(header); + if (symtab_cmd == NULL) + return(FALSE); + + *nsyms = symtab_cmd->nsyms; + if(symtab_cmd->nsyms == 0) return (FALSE); /* No symbols */ + + *strtabsize = symtab_cmd->strsize; + if(symtab_cmd->strsize == 0) return (FALSE); /* Symbol length is 0 */ + + *symtab = seglink_cmd->vmaddr + symtab_cmd->symoff - + seglink_cmd->fileoff; + + *strtab = seglink_cmd->vmaddr + symtab_cmd->stroff - + seglink_cmd->fileoff; + + return(TRUE); +} +#endif + +#else + +void * getsegdatafromheader( struct mach_header *mhp, char *segname, int *size) +{ + return 0; +} + +#endif diff --git a/osfmk/mach-o/mach_header.h b/osfmk/mach-o/mach_header.h new file mode 100644 index 000000000..8cf484869 --- /dev/null +++ b/osfmk/mach-o/mach_header.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: kern/mach_header.h + * + * Definitions for accessing mach-o headers. + * + * HISTORY + * 29-Jan-92 Mike DeMoney (mike@next.com) + * Made into machine independent form from machdep/m68k/mach_header.h. + * Ifdef'ed out most of this since I couldn't find any references. + */ + +#ifndef _KERN_MACH_HEADER_ +#define _KERN_MACH_HEADER_ + +#include +#include + +#if MACH_KERNEL +struct mach_header **getmachheaders(void); +vm_offset_t getlastaddr(void); + +struct segment_command *firstseg(void); +struct segment_command *firstsegfromheader(struct mach_header *header); +struct segment_command *nextseg(struct segment_command *sgp); +struct segment_command *nextsegfromheader( + struct mach_header *header, + struct segment_command *seg); +struct segment_command *getsegbyname(char *seg_name); +struct segment_command *getsegbynamefromheader( + struct mach_header *header, + char *seg_name); +void *getsegdatafromheader(struct mach_header *, char *, int *); +struct section *getsectbyname(char *seg_name, char *sect_name); +struct section *getsectbynamefromheader( + struct mach_header *header, + char *seg_name, + char *sect_name); +void *getsectdatafromheader(struct mach_header *, char *, char *, int *); +struct section *firstsect(struct segment_command *sgp); +struct section *nextsect(struct segment_command *sgp, struct section *sp); +struct fvmlib_command *fvmlib(void); +struct fvmlib_command *fvmlibfromheader(struct mach_header *header); +struct segment_command *getfakefvmseg(void); +#ifdef MACH_KDB +struct symtab_command *getsectcmdsymtabfromheader(struct mach_header *); +boolean_t getsymtab(struct mach_header *, vm_offset_t *, int *, + vm_offset_t *, vm_size_t *); +#endif + +#endif /* KERNEL */ + +#endif /* _KERN_MACH_HEADER_ */ diff --git a/osfmk/mach/AT386/machdep.mk b/osfmk/mach/AT386/machdep.mk new file mode 100644 index 000000000..59c45cdbd --- /dev/null +++ b/osfmk/mach/AT386/machdep.mk @@ -0,0 +1,35 @@ +# +# @OSF_COPYRIGHT@ +# +# +# HISTORY +# +# Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez +# Import of Mac OS X kernel (~semeria) +# +# Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez +# Import of OSF Mach kernel (~mburg) +# +# Revision 1.1.6.1 1994/09/23 02:33:31 ezf +# change marker to not FREE +# [1994/09/22 21:38:48 ezf] +# +# Revision 1.1.2.2 1993/08/04 19:32:26 gm +# CR9605: Add SUBDIRS to mach_kernel build process. +# [1993/08/03 13:30:04 gm] +# +# $EndLog$ + +T_M_FILES = ${MACH_I386_FILES} + +MACH_I386_FILES = mach_i386_server.c mach_i386_server.h + +.ORDER: ${MACH_I386_FILES} + +${MACH_I386_FILES}: mach/i386/mach_i386.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader mach_i386_server.h \ + -server mach_i386_server.c \ + ${mach/i386/mach_i386.defs:P} diff --git a/osfmk/mach/Makefile b/osfmk/mach/Makefile new file mode 100644 index 000000000..833dcca3c --- /dev/null +++ b/osfmk/mach/Makefile @@ -0,0 +1,309 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + machine + +INSTINC_SUBDIRS_PPC = \ + ppc + +INSTINC_SUBDIRS_I386 = \ + i386 + +EXPINC_SUBDIRS = \ + machine + +EXPINC_SUBDIRS_PPC = \ + ppc + +EXPINC_SUBDIRS_I386 = \ + i386 + +MIG_TYPES = \ + clock_types.defs \ + mach_types.defs \ + std_types.defs + +MIG_DEFS = \ + bootstrap.defs \ + clock.defs \ + clock_priv.defs \ + clock_reply.defs \ + exc.defs \ + host_priv.defs \ + host_security.defs \ + ledger.defs \ + lock_set.defs \ + mach_host.defs \ + mach_port.defs \ + notify.defs \ + processor.defs \ + processor_set.defs \ + task.defs \ + thread_act.defs \ + vm_map.defs \ + upl.defs + +MACH_PRIVATE_DEFS = \ + memory_object.defs \ + memory_object_control.defs \ + memory_object_default.defs \ + memory_object_name.defs + +# +# MIG-generated headers that are traditionally used by user +# level code. +# +MIG_USHDRS = \ + clock_reply_server.h \ + exc_server.h \ + memory_object_server.h \ + memory_object_default_server.h \ + notify_server.h + +MIG_UUHDRS = \ + bootstrap.h \ + clock.h \ + clock_priv.h \ + host_priv.h \ + host_security.h \ + ledger.h \ + lock_set.h \ + mach_host.h \ + mach_port.h \ + memory_object_control.h \ + memory_object_name.h \ + processor.h \ + processor_set.h \ + task.h \ + thread_act.h \ + upl.h \ + vm_map.h + +MIGINCLUDES = ${MIG_UUHDRS} ${MIG_USHDRS} + +DATAFILES = \ + boolean.h \ + boot_info.h \ + clock_types.h \ + error.h \ + exception.h \ + exception_types.h \ + host_info.h \ + host_reboot.h \ + kern_return.h \ + kmod.h \ + mach_ioctl.h \ + mach_param.h \ + mach_time.h \ + mach_traps.h \ + mach_types.h \ + machine.h \ + mach_syscalls.h \ + memory_object_types.h \ + message.h \ + mig.h \ + mig_errors.h \ + mig_log.h \ + ndr.h \ + notify.h \ + policy.h \ + port.h \ + processor_info.h \ + rpc.h \ + semaphore.h \ + shared_memory_server.h \ + std_types.h \ + sync_policy.h \ + syscall_sw.h \ + task_info.h \ + task_ledger.h \ + task_policy.h \ + task_special_ports.h \ + thread_info.h \ + thread_policy.h \ + thread_special_ports.h \ + thread_status.h \ + thread_switch.h \ + time_value.h \ + vm_attributes.h \ + vm_behavior.h \ + vm_inherit.h \ + vm_param.h \ + vm_prot.h \ + vm_region.h \ + vm_statistics.h \ + vm_sync.h \ + vm_types.h \ + ${MIG_TYPES} \ + ${MIG_DEFS} + +INSTALL_MI_LIST = \ + ${DATAFILES} + +INSTALL_MI_GEN_LIST = + +INSTALL_MI_DIR = mach + +EXPORT_MI_LIST = \ + mach_interface.h \ + etap.h etap_events.h \ + ${DATAFILES} + +EXPORT_MI_GEN_LIST = \ + ${MIGINCLUDES} + +EXPORT_MI_DIR = mach + +.ORDER: ${MIGINCLUDES} + +${MIGINCLUDES} : ${MIG_TYPES} + +.ORDER: ${MIG_UUHDRS} + +${MIG_UUHDRS} : \ + %.h : %.defs + $(MIG) $(MIGFLAGS) \ + -server /dev/null \ + -user /dev/null \ + -header $@ \ + $< + +.ORDER: ${MIG_USHDRS} + +${MIG_USHDRS} : \ + %_server.h : %.defs + $(MIG) $(MIGFLAGS) \ + -server /dev/null \ + -user /dev/null \ + -header /dev/null \ + -sheader $@ \ + $< + +# +# Build path +# +COMP_SUBDIRS_I386 = \ + i386 + +INCFLAGS_MAKEFILE= -I.. + +MIGKSFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_SERVER=1 +MIGKUFLAGS = -DMACH_KERNEL_PRIVATE -DKERNEL_USER=1 -maxonstack 1024 +# +# MIG-generated headers that are traditionally used by kernel +# level code. +# +MIG_KUHDRS = \ + clock_reply.h \ + exc.h \ + memory_object.h \ + memory_object_default.h \ + upl.h \ + vm_map.h + +MIG_KUSRC = \ + clock_reply_user.c \ + exc_user.c \ + memory_object_user.c \ + memory_object_default_user.c \ + upl_user.c \ + vm_map_user.c + +MIG_KSHDRS = \ + clock_server.h \ + clock_priv_server.h \ + exc_server.h \ + host_priv_server.h \ + host_security_server.h \ + ledger_server.h \ + lock_set_server.h \ + mach_host_server.h \ + mach_port_server.h \ + memory_object_server.h \ + memory_object_control_server.h \ + memory_object_default_server.h \ + memory_object_name_server.h \ + notify_server.h \ + processor_server.h \ + processor_set_server.h \ + semaphore_server.h \ + task_server.h \ + thread_act_server.h \ + upl_server.h \ + vm_map_server.h + +MIG_KSSRC = \ + clock_server.c \ + clock_priv_server.c \ + exc_server.c \ + host_priv_server.c \ + host_security_server.c \ + ledger_server.c \ + lock_set_server.c \ + mach_host_server.c \ + mach_port_server.c \ + memory_object_server.c \ + memory_object_control_server.c \ + memory_object_default_server.c \ + memory_object_name_server.c \ + notify_server.c \ + processor_server.c \ + processor_set_server.c \ + semaphore_server.c \ + task_server.c \ + thread_act_server.c \ + upl_server.c \ + vm_map_server.c + +# +# JMM - +# Since there are two generated header files with the same name, one for +# install and export, the other for internal use (and they are different) +# we can't explicitly list two rules for the same target. So rules for +# generating internal headers will be handled implicitly by creating rules +# to generate the internal C sources, and the headers get created as a +# side-effect. +# +# +# This is all temporary scaffolding, as we are moving to a model where +# the MIG-generated code is identical in all environments. At first, it +# will contain some environment-specific ifdefs, but over time should not +# even require that as we move towards making all the environments look +# the same. +# +COMP_FILES = ${MIG_KUSRC} ${MIG_KSSRC} + +${COMP_FILES} : ${MIG_TYPES} + +.ORDER: ${MIG_KUSRC} + +${MIG_KUSRC} : \ + %_user.c : %.defs + ${MIG} ${MIGFLAGS} ${MIGKUFLAGS} \ + -user $*_user.c \ + -header $*.h \ + -server /dev/null \ + -sheader /dev/null \ + $< + +.ORDER: ${MIG_KSSRC} + +${MIG_KSSRC}: \ + %_server.c : %.defs + ${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ + -user /dev/null \ + -header /dev/null \ + -server $*_server.c \ + -sheader $*_server.h \ + $< + + +include $(MakeInc_rule) +include $(MakeInc_dir) diff --git a/osfmk/mach/Makefile.template b/osfmk/mach/Makefile.template new file mode 100644 index 000000000..044b05036 --- /dev/null +++ b/osfmk/mach/Makefile.template @@ -0,0 +1,174 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MIGKSFLAGS = -DKERNEL_SERVER +MIGKUFLAGS = -DKERNEL_USER -maxonstack 1024 + +MACH_FILES = mach_server.h mach_server.c + +MACH_PORT_FILES =mach_port_server.h mach_port_server.c + +EXC_FILES = exc_user.h exc_user.c exc_server.c + +MEMORY_OBJECT_FILES = memory_object_user.h memory_object_user.c + +MEMORY_OBJECT_DEFAULT_FILES = memory_object_default.h memory_object_default_user.c + +PROF_FILES = prof_user.c prof.h + +MACH_HOST_FILES = mach_host_server.h mach_host_server.c + +CLOCK_FILES = clock_server.h clock_server.c + +CLOCK_REPLY_FILES = clock_reply.h clock_reply_user.c + +BOOTSTRAP_FILES = bootstrap_server.h bootstrap_server.c + +LEDGER_FILES = ledger_user.c ledger_server.h ledger_server.c + +SYNC_FILES = sync_server.h sync_server.c + +MACH_USER_FILES = mach_user.h mach_user.c + +OTHERS = ${MACH_FILES} ${MACH_PORT_FILES} \ + ${EXC_FILES} \ + ${MEMORY_OBJECT_FILES} ${MEMORY_OBJECT_DEFAULT_FILES} \ + ${PROF_FILES} ${MACH_HOST_FILES} ${LEDGER_FILES} \ + ${CLOCK_FILES} ${CLOCK_REPLY_FILES} ${BOOTSTRAP_FILES} \ + ${BOOTSTRAP_FILES} ${SYNC_FILES} \ + ${MACH_USER_FILES} + +.ORDER: ${MACH_FILES} + +${MACH_FILES}: mach/mach.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader mach_server.h \ + -server mach_server.c \ + $< + +.ORDER: ${MACH_PORT_FILES} + +${MACH_PORT_FILES}: mach/mach_port.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader mach_port_server.h \ + -server mach_port_server.c \ + $< + +.ORDER: ${EXC_FILES} +${EXC_FILES}: mach/exc.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKUFLAGS} \ + -header exc_user.h \ + -user exc_user.c \ + -sheader exc_server.h \ + -server exc_server.c \ + $< + +.ORDER: ${MEMORY_OBJECT_FILES} + +${MEMORY_OBJECT_FILES}: mach/memory_object.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKUFLAGS} -DSEQNOS \ + -header memory_object_user.h \ + -user memory_object_user.c \ + -server /dev/null \ + $< + +.ORDER: ${MEMORY_OBJECT_DEFAULT_FILES} + +${MEMORY_OBJECT_DEFAULT_FILES}: mach/memory_object_default.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKUFLAGS} -DSEQNOS \ + -header memory_object_default.h \ + -user memory_object_default_user.c \ + -server /dev/null \ + $< + +.ORDER: ${PROF_FILES} + +${PROF_FILES}: mach/prof.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKUFLAGS} \ + -header prof.h \ + -iheader prof_internal.h \ + -user prof_user.c \ + -server /dev/null \ + $< + +.ORDER: ${MACH_HOST_FILES} + +${MACH_HOST_FILES}: mach/mach_host.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader mach_host_server.h \ + -server mach_host_server.c \ + $< + +.ORDER: ${CLOCK_FILES} + +${CLOCK_FILES}: mach/clock.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader clock_server.h \ + -server clock_server.c \ + $< + +.ORDER: ${CLOCK_REPLY_FILES} +${CLOCK_REPLY_FILES}: mach/clock_reply.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKUFLAGS} \ + -header clock_reply.h \ + -user clock_reply_user.c \ + -server /dev/null \ + $< + +.ORDER: ${BOOTSTRAP_FILES} + +${BOOTSTRAP_FILES}: mach/bootstrap.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader bootstrap_server.h \ + -server bootstrap_server.c \ + $< + +.ORDER: ${LEDGER_FILES} + +${LEDGER_FILES}: mach/ledger.defs ${MACH_TYPES_DEFS} + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} ${MIGKUFLAGS} \ + -header /dev/null \ + -user ledger_user.c \ + -sheader ledger_server.h \ + -server ledger_server.c \ + $< + +.ORDER: ${SYNC_FILES} + +${SYNC_FILES}: mach/sync.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader sync_server.h \ + -server sync_server.c \ + $< + +${MACH_USER_FILES}: mach/mach.defs + ${_MIG_} -X ${_MIGFLAGS_} ${MIGKUFLAGS} \ + -header mach_user.h \ + -user mach_user.c \ + -server /dev/null \ + $< + + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/mach/alert.h b/osfmk/mach/alert.h new file mode 100644 index 000000000..801aeb6bb --- /dev/null +++ b/osfmk/mach/alert.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.2 1995/01/18 18:35:06 ezf + * updated Utah CR notice + * [1995/01/18 18:30:38 ezf] + * + * Revision 1.1.5.1 1994/09/23 02:33:53 ezf + * change marker to not FREE + * [1994/09/22 21:38:56 ezf] + * + * Revision 1.1.2.1 1994/01/12 17:56:03 dwm + * Coloc: initial restructuring to follow Utah model. + * Alert bit definitions + * [1994/01/12 17:30:19 dwm] + * + * $EndLog$ + */ +/* + * Copyright (c) 1993 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + */ + +#ifndef _MACH_ALERT_H_ +#define _MACH_ALERT_H_ + +#define ALERT_BITS 32 /* Minimum; more may actually be available */ + +/* Request to abort _all_ operations */ +#define ALERT_ABORT_STRONG 0x00000001 + +/* Request to abort restartable operations */ +#define ALERT_ABORT_SAFE 0x00000002 + +/* User-defined alert bits */ +#define ALERT_USER 0xffff0000 + +#endif /* _MACH_ALERT_H_ */ diff --git a/osfmk/mach/boolean.h b/osfmk/mach/boolean.h new file mode 100644 index 000000000..4b41b4462 --- /dev/null +++ b/osfmk/mach/boolean.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:34:07 ezf + * change marker to not FREE + * [1994/09/22 21:39:00 ezf] + * + * Revision 1.2.2.3 1993/08/03 18:22:11 gm + * CR9598: Remove unneeded EXPORT_BOOLEAN and KERNEL ifdefs. Move + * the code inside the include protection and remove the boolean_t + * casts from TRUE and FALSE. + * [1993/08/02 17:49:29 gm] + * + * Revision 1.2.2.2 1993/06/09 02:39:27 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:15:31 jeffc] + * + * Revision 1.2 1993/04/19 16:31:43 devrcs + * ansi C conformance changes + * [1993/02/02 18:52:46 david] + * + * Revision 1.1 1992/09/30 02:30:33 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 16:51:06 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:31:38 mrt + * Changed to new Mach copyright + * [91/02/01 17:16:36 mrt] + * + * Revision 2.1 89/08/03 15:59:35 rwd + * Created. + * + * Revision 2.4 89/02/25 18:12:08 gm0w + * Changes for cleanup. + * + * Revision 2.3 89/02/07 00:51:34 mwyoung + * Relocated from sys/boolean.h + * + * Revision 2.2 88/08/24 02:23:06 mwyoung + * Adjusted include file references. + * [88/08/17 02:09:46 mwyoung] + * + * + * 18-Nov-87 Avadis Tevanian (avie) at Carnegie-Mellon University + * Header file fixup, purge history. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/boolean.h + * + * Boolean data type. + * + */ + +#ifndef BOOLEAN_H_ +#define BOOLEAN_H_ + +/* + * Pick up "boolean_t" type definition + */ + +#ifndef ASSEMBLER +#include +#endif /* ASSEMBLER */ + +/* + * Define TRUE and FALSE, only if they haven't been before, + * and not if they're explicitly refused. + */ + +#ifndef NOBOOL + +#ifndef TRUE +#define TRUE 1 +#endif /* TRUE */ + +#ifndef FALSE +#define FALSE 0 +#endif /* FALSE */ + +#endif /* !defined(NOBOOL) */ + +#endif /* BOOLEAN_H_ */ diff --git a/osfmk/mach/boot_info.h b/osfmk/mach/boot_info.h new file mode 100644 index 000000000..9c9555187 --- /dev/null +++ b/osfmk/mach/boot_info.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.16.4 1996/01/09 19:21:29 devrcs + * Added bootstrap map for alpha. + * This probably should be moved to some MD include file. + * It's not really machine dependent, just a different + * way of doing things. + * [1995/12/01 19:49:04 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:08:36 jfraser] + * + * Revision 1.2.16.3 1995/01/06 19:50:04 devrcs + * mk6 CR668 - 1.3b26 merge + * Added region_desc structure; + * [1994/10/14 03:42:28 dwm] + * + * Revision 1.2.16.2 1994/09/23 02:34:18 ezf + * change marker to not FREE + * [1994/09/22 21:39:03 ezf] + * + * Revision 1.2.16.1 1994/06/13 20:49:19 dlb + * Merge MK6 and NMK17 + * [1994/06/13 20:24:22 dlb] + * + * Revision 1.2.14.1 1994/02/08 11:01:06 bernadat + * Checked in NMK16_1 changes + * [94/02/04 bernadat] + * + * Revision 1.2.12.1 1993/12/23 08:56:06 bernadat + * Added COFF_F. + * [93/11/29 bernadat] + * + * Revision 1.2.3.2 1993/06/24 16:14:07 gm + * CR9371: Moved here from default_pager. + * [1993/06/24 16:08:52 gm] + * + * Revision 1.2.3.2 1993/06/09 02:10:53 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 20:41:54 gm] + * + * Revision 1.2 1993/04/19 16:31:50 devrcs + * Added ROSE support: we need several symbol table entries since ROSE + * can have up to 3 symbol section. + * [93/03/24 bruel] + * + * Use free copyright + * [1993/03/03 12:12:37 bernadat] + * + * Fixed History Revision Comments + * [93/02/24 bernadat] + * + * Created for external default pager. + * [1993/02/09 15:40:42 bruel] + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2 92/01/03 20:19:42 dbg + * Created. + * [91/09/06 dbg] + * + */ +/* CMU_ENDHIST */ +/* + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _MACH_BOOT_INFO_H_ +#define _MACH_BOOT_INFO_H_ + +#include +#include +#include + +/* + * The boot loader uses several instances of the following structure to + * provide a description of the bootstrap task virtual address space, + * consisting of memory regions that need to be mapped from physical pages + * filled by the boot loader, or regions that need to be allocated in the + * new address space. + */ +struct region_desc { + vm_offset_t addr; /* virtual address */ + vm_offset_t offset; /* offset within object */ + vm_size_t size; /* size */ + vm_prot_t prot; /* protection */ + boolean_t mapped; /* mapped or zero-filled */ +}; + +#ifdef __alpha +typedef long physaddr_t; +#define NBOOT_REGIONS 5 + +#define TEXT 0 +#define DATA 1 +#define BSS 2 +#define STACK 3 +#define SYMS 4 + +struct bootstrap_map { + physaddr_t boot_location; + long boot_size; + long boot_entry; + long boot_gp_value; + long boot_region_count; + struct region_desc boot_regions[NBOOT_REGIONS]; +}; + +#endif /* __alpha */ + +#define BOOT_INFO_COMPAT 1 +#if BOOT_INFO_COMPAT +/* + * Old compat code for makeboot produced images + */ + +/* + * Structure of Mach kernel boot file. + */ +#include + +/* + * A Mach kernel boot file consists of the Mach + * kernel image and the bootstrap image, glued + * together. + * + * The first part of the file is a normal executable + * (bootable) file: a.out, coff, or whatever. The + * text and data sizes are set to include the entire + * file. (Some machines do not allow a zero-length + * data segment). + * + * The rest of the file sits where the kernel BSS + * should be. A boot_info record describes the + * sizes of the next 3 sections. Following this + * are the kernel symbol table, the bootstrap image + * (including its symbol table), and the loader + * information for the bootstrap image. Each + * of these sections is padded to an integer (4-byte) + * boundary. + * + * When the file is loaded into memory, the kernel + * text and data segments are at their normal locations. + * + * The boot_info structure appears at the start of + * the bss (at 'edata[]'): + */ + +struct boot_info { + vm_size_t sym_size; /* size of kernel symbols */ + vm_size_t boot_size; /* size of bootstrap image */ + vm_size_t load_info_size; /* size of loader information + for bootstrap image */ +}; + +/* + * The 3 sections must be moved out of BSS for the kernel to run: + * + * The kernel symbol table follows the BSS (at 'end[]'). + * + * The bootstrap image is on the first page boundary (machine page + * size) following the kernel symbol table. + * + * The loader information immediately follows the bootstrap image. + */ + +/* + * Loader information for bootstrap image: + */ + +#define AOUT_F 1 +#define ROSE_F 2 +#define COFF_F 3 + +struct loader_info { + int format; /* symbol table format (A.OUT or ROSE) */ + vm_offset_t text_start; /* text start in memory */ + vm_size_t text_size; /* text size */ + vm_offset_t text_offset; /* text offset in file */ + vm_offset_t data_start; /* data+bss start in memory */ + vm_size_t data_size; /* data size */ + vm_offset_t data_offset; /* data offset in file */ + vm_size_t bss_size; /* BSS size */ + vm_offset_t str_offset; /* strings table offset in file */ + vm_size_t str_size; /* strings table size */ + vm_offset_t sym_offset[4]; /* symbol table offset in file */ + vm_size_t sym_size[4]; /* symbol table size */ + vm_offset_t entry_1; /* 2 words for entry address */ + vm_offset_t entry_2; +} ; + +#define EX_NOT_EXECUTABLE 6000 + +#endif /* BOOT_INFO_COMPAT */ + +#endif /* _MACH_BOOT_INFO_H_ */ diff --git a/osfmk/mach/bootstrap.defs b/osfmk/mach/bootstrap.defs new file mode 100644 index 000000000..3edee9df0 --- /dev/null +++ b/osfmk/mach/bootstrap.defs @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + bootstrap 1000001; /* 999999 + 2 skips */ + +#include +#include + +ServerPrefix do_; + +/* + * Objects to references of type bootstrap_t are returned by: + * task_get_special_port(task_t,...); + * task_get_bootstrap(task_t,...); + */ + +/* + * A task can make this call on its bootstrap port + * to get its privileged ports. + */ +routine bootstrap_ports( + bootstrap : bootstrap_t; + out priv_host : mach_port_t; + out device_master : mach_port_t; + out wired_ledger : mach_port_t; + out paged_ledger : mach_port_t; + out host_security : mach_port_t); + +/* + * A task can use this call to get its argument strings. + */ +routine bootstrap_arguments( + bootstrap : bootstrap_t; + task : task_t; + out arguments : pointer_t, Dealloc); + +/* + * A task can use this call to get its environment strings. + */ +routine bootstrap_environment( + bootstrap : bootstrap_t; + task : task_t; + out environment : pointer_t, Dealloc); + +/* + * A task can use this call to indicate to its bootstrapper that it + * is done with its startup processing. This call is used when the + * task is important enough so that the bootstrap process wants to + * wait for it to come up before continuing with other things. + */ +simpleroutine bootstrap_completed( + bootstrap : bootstrap_t; + task : task_t); + diff --git a/osfmk/mach/clock.defs b/osfmk/mach/clock.defs new file mode 100644 index 000000000..5c94e007e --- /dev/null +++ b/osfmk/mach/clock.defs @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: mach/clock.defs + * Purpose: Kernel clock subsystem definitions. This + * file defines the clock request interface. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + clock 1000; + +#include +#include +#include + +/* + * References to clock objects are returned by: + * host_get_clock_service(host_t,...) + * host_get_clock_control(host_priv_t,...) - Priviledged subclass + */ + +/* + * Get the clock time. + * Available to all. + */ +routine clock_get_time( + clock_serv : clock_serv_t; + out cur_time : mach_timespec_t); + +/* + * Get clock attributes. + * Available to all. + */ +routine clock_get_attributes( + clock_serv : clock_serv_t; + in flavor : clock_flavor_t; + out clock_attr : clock_attr_t, CountInOut); + +/* + * Setup a clock alarm. + * Available to all. + */ +routine clock_alarm( + clock_serv : clock_serv_t; + alarm_type : alarm_type_t; + alarm_time : mach_timespec_t; + alarm_port : clock_reply_t = + MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic); diff --git a/osfmk/mach/clock_priv.defs b/osfmk/mach/clock_priv.defs new file mode 100644 index 000000000..e9be2636f --- /dev/null +++ b/osfmk/mach/clock_priv.defs @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: mach/clock_priv.defs + * Purpose: Kernel clock subsystem definitions. This + * file defines the clock request interface. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + clock_priv 1200; + +#include +#include +#include + +/* + * References to clock_priv objects are returned by: + * host_get_clock_control(host_priv_t,...) - Priviledged subclass + */ + +/* + * Set the clock time. + * Privileged. + */ +routine clock_set_time( + clock_ctrl : clock_ctrl_t; + new_time : mach_timespec_t); + +/* + * Set clock attributes. + * Privileged. + */ +routine clock_set_attributes( + clock_ctrl : clock_ctrl_t; + in flavor : clock_flavor_t; + in clock_attr : clock_attr_t); + diff --git a/osfmk/mach/clock_reply.defs b/osfmk/mach/clock_reply.defs new file mode 100644 index 000000000..c96663319 --- /dev/null +++ b/osfmk/mach/clock_reply.defs @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: clock_reply.defs + * Purpose: Kernel clock subsystem definitions. This + * file defines the clock reply interface. + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERNEL_USER */ + clock_reply 3125107; /* Matches up with old value */ + +#include +#include + + +/* + * Reply routine for clock_alarm. + */ +simpleroutine clock_alarm_reply( + alarm_port : clock_reply_t; + alarm_code : kern_return_t; + alarm_type : alarm_type_t; + alarm_time : mach_timespec_t); diff --git a/osfmk/mach/clock_types.defs b/osfmk/mach/clock_types.defs new file mode 100644 index 000000000..840926dc8 --- /dev/null +++ b/osfmk/mach/clock_types.defs @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: clock_types.defs + * Purpose: + * Clock kernel interface type declarations + */ + +#ifndef _MACH_CLOCK_TYPES_DEFS_ +#define _MACH_CLOCK_TYPES_DEFS_ + +#include + +type clock_serv_t = mach_port_t + ctype: clock_serv_t +#if KERNEL_SERVER + intran: clock_serv_t convert_port_to_clock(mach_port_t) + outtran: mach_port_t convert_clock_to_port(clock_serv_t) +#endif /* KERNEL_SERVER */ + ; + +type clock_ctrl_t = mach_port_t + ctype: clock_ctrl_t +#if KERNEL_SERVER + intran: clock_ctrl_t convert_port_to_clock_ctrl(mach_port_t) + outtran: mach_port_t convert_clock_ctrl_to_port(clock_ctrl_t) +#endif /* KERNEL_SERVER */ + ; + +type clock_reply_t = polymorphic|MACH_MSG_TYPE_MAKE_SEND_ONCE; + +type clock_flavor_t = int; +type clock_attr_t = array[*:1] of int; +type mach_timespec_t = struct[2] of int; +type time_t = int; +type sleep_type_t = int; +type alarm_type_t = int; +type clock_res_t = int; +type clock_id_t = int; + +import ; + +#endif /* _MACH_CLOCK_TYPES_DEFS_ */ diff --git a/osfmk/mach/clock_types.h b/osfmk/mach/clock_types.h new file mode 100644 index 000000000..701bf84c7 --- /dev/null +++ b/osfmk/mach/clock_types.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File: clock_types.h + * Purpose: Clock facility header definitions. These + * definitons are needed by both kernel and + * user-level software. + */ + +/* + * N.B. This interface has been deprecated and the contents + * of this file should be considered obsolete. + */ + +#ifndef _MACH_CLOCK_TYPES_H_ +#define _MACH_CLOCK_TYPES_H_ + +#include + +/* + * Reserved clock id values for default clocks. + */ +#define SYSTEM_CLOCK 0 /* advances monotonically and + * uniformly; set to zero at boot */ +#define CALENDAR_CLOCK 1 /* 'wall' clock; effectively + * synchronized to UTC */ + +#define REALTIME_CLOCK 0 /* obsolete; use SYSTEM or CALENDAR + * clock depending on particular + * requirements */ + +/* + * Type definitions. + */ +typedef int alarm_type_t; /* alarm time type */ +typedef int sleep_type_t; /* sleep time type */ +typedef int clock_id_t; /* clock identification type */ +typedef int clock_flavor_t; /* clock flavor type */ +typedef int *clock_attr_t; /* clock attribute type */ +typedef int clock_res_t; /* clock resolution type */ + +/* + * Attribute names. + */ +#define CLOCK_GET_TIME_RES 1 /* get_time call resolution */ +/* 2 * was map_time call resolution */ +#define CLOCK_ALARM_CURRES 3 /* current alarm resolution */ +#define CLOCK_ALARM_MINRES 4 /* minimum alarm resolution */ +#define CLOCK_ALARM_MAXRES 5 /* maximum alarm resolution */ + +/* + * Normal time specification used by the kernel clock facility. + */ +struct mach_timespec { + unsigned int tv_sec; /* seconds */ + clock_res_t tv_nsec; /* nanoseconds */ +}; +typedef struct mach_timespec mach_timespec_t; + +#define NSEC_PER_USEC 1000 /* nanoseconds per microsecond */ +#define USEC_PER_SEC 1000000 /* microseconds per second */ +#define NSEC_PER_SEC 1000000000 /* nanoseconds per second */ + +#define BAD_MACH_TIMESPEC(t) \ + ((t)->tv_nsec < 0 || (t)->tv_nsec >= NSEC_PER_SEC) + +/* t1 <=> t2, also (t1 - t2) in nsec with max of +- 1 sec */ +#define CMP_MACH_TIMESPEC(t1, t2) \ + ((t1)->tv_sec > (t2)->tv_sec ? +NSEC_PER_SEC : \ + ((t1)->tv_sec < (t2)->tv_sec ? -NSEC_PER_SEC : \ + (t1)->tv_nsec - (t2)->tv_nsec)) + +/* t1 += t2 */ +#define ADD_MACH_TIMESPEC(t1, t2) \ + do { \ + if (((t1)->tv_nsec += (t2)->tv_nsec) >= NSEC_PER_SEC) { \ + (t1)->tv_nsec -= NSEC_PER_SEC; \ + (t1)->tv_sec += 1; \ + } \ + (t1)->tv_sec += (t2)->tv_sec; \ + } while (0) + +/* t1 -= t2 */ +#define SUB_MACH_TIMESPEC(t1, t2) \ + do { \ + if (((t1)->tv_nsec -= (t2)->tv_nsec) < 0) { \ + (t1)->tv_nsec += NSEC_PER_SEC; \ + (t1)->tv_sec -= 1; \ + } \ + (t1)->tv_sec -= (t2)->tv_sec; \ + } while (0) + +/* + * Alarm parameter defines. + */ +#define ALRMTYPE 0xff /* type (8-bit field) */ +#define TIME_ABSOLUTE 0x00 /* absolute time */ +#define TIME_RELATIVE 0x01 /* relative time */ + +#define BAD_ALRMTYPE(t) (((t) &~ TIME_RELATIVE) != 0) + +#endif /* _MACH_CLOCK_TYPES_H_ */ diff --git a/osfmk/mach/error.h b/osfmk/mach/error.h new file mode 100644 index 000000000..2f6d6c4cb --- /dev/null +++ b/osfmk/mach/error.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.10.2 1995/02/23 17:51:15 alanl + * Merge with DIPC2_SHARED. + * [1995/01/03 21:49:04 alanl] + * + * Revision 1.2.10.1 1994/09/23 02:35:28 ezf + * change marker to not FREE + * [1994/09/22 21:39:26 ezf] + * + * Revision 1.2.8.1 1994/08/04 02:27:36 mmp + * NOTE: file was moved back to b11 version for dipc2_shared. + * Added DIPC error system. + * [1994/05/11 17:36:37 alanl] + * + * Revision 1.2.2.3 1993/08/12 21:59:50 jvs + * Correctly prototype mach_error_fn_t typedef. 9523 + * [1993/08/12 21:57:56 jvs] + * + * Revision 1.2.2.2 1993/06/09 02:39:58 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:15:47 jeffc] + * + * Revision 1.2 1993/04/19 16:33:02 devrcs + * make endif tags ansi compliant/include files + * [1993/02/20 21:44:37 david] + * + * Revision 1.1 1992/09/30 02:30:35 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:51:24 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:31:48 mrt + * Changed to new Mach copyright + * [91/02/01 17:16:50 mrt] + * + * Revision 2.2 90/06/02 14:57:47 rpd + * Added err_mach_ipc for new IPC. + * [90/03/26 22:28:42 rpd] + * + * Revision 2.1 89/08/03 16:02:07 rwd + * Created. + * + * Revision 2.4 89/02/25 18:13:18 gm0w + * Changes for cleanup. + * + * Revision 2.3 89/02/07 00:51:57 mwyoung + * Relocated from sys/error.h + * + * Revision 2.2 88/10/18 00:37:31 mwyoung + * Added {system,sub and code}_emask + * [88/10/17 17:06:58 mrt] + * + * Added {system,sub and code}_emask + * + * 12-May-88 Mary Thompson (mrt) at Carnegie Mellon + * Changed mach_error_t from unsigned int to kern_return_t + * which is a 32 bit integer regardless of machine type. + * insigned int was incompatible with old usages of mach_error. + * + * 10-May-88 Douglas Orr (dorr) at Carnegie-Mellon University + * Missing endif replaced + * + * 5-May-88 Mary Thompson (mrt) at Carnegie Mellon + * Changed typedef of mach_error_t from long to unsigned int + * to keep our Camelot users happy. Also moved the nonkernel + * function declarations from here to mach_error.h. + * + * 10-Feb-88 Douglas Orr (dorr) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/error.h + * Purpose: + * error module definitions + * + */ + +#ifndef ERROR_H_ +#define ERROR_H_ +#include + +/* + * error number layout as follows: + * + * hi lo + * | system(6) | subsystem(12) | code(14) | + */ + + +#define err_none (mach_error_t)0 +#define ERR_SUCCESS (mach_error_t)0 +#define ERR_ROUTINE_NIL (mach_error_fn_t)0 + + +#define err_system(x) (((x)&0x3f)<<26) +#define err_sub(x) (((x)&0xfff)<<14) + +#define err_get_system(err) (((err)>>26)&0x3f) +#define err_get_sub(err) (((err)>>14)&0xfff) +#define err_get_code(err) ((err)&0x3fff) + +#define system_emask (err_system(0x3f)) +#define sub_emask (err_sub(0xfff)) +#define code_emask (0x3fff) + + +/* major error systems */ +#define err_kern err_system(0x0) /* kernel */ +#define err_us err_system(0x1) /* user space library */ +#define err_server err_system(0x2) /* user space servers */ +#define err_ipc err_system(0x3) /* old ipc errors */ +#define err_mach_ipc err_system(0x4) /* mach-ipc errors */ +#define err_dipc err_system(0x7) /* distributed ipc */ +#define err_local err_system(0x3e) /* user defined errors */ +#define err_ipc_compat err_system(0x3f) /* (compatibility) mach-ipc errors */ + +#define err_max_system 0x3f + + +/* unix errors get lumped into one subsystem */ +#define unix_err(errno) (err_kern|err_sub(3)|errno) + +typedef kern_return_t mach_error_t; +typedef mach_error_t (* mach_error_fn_t)( void ); + +#endif /* ERROR_H_ */ diff --git a/osfmk/mach/etap.h b/osfmk/mach/etap.h new file mode 100644 index 000000000..a83d30cff --- /dev/null +++ b/osfmk/mach/etap.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File : etap.h + * + * Contains ETAP buffer and table definitions + * + */ + +#ifndef _MACH_ETAP_H_ +#define _MACH_ETAP_H_ + +#include +#include +#include +#include +#include + + +#define ETAP_CBUFF_ENTRIES 20000 +#define ETAP_CBUFF_IBUCKETS 10 +#define ETAP_CBUFF_WIDTH 80 + +#define ETAP_MBUFF_ENTRIES 28000 +#define ETAP_MBUFF_DATASIZE 4 + + +/* =================================== + * Event & Subsystem Table Definitions + * =================================== + */ + +#define EVENT_NAME_LENGTH 20 /* max event name size */ + +struct event_table_entry { + unsigned short event; /* etap event type */ + unsigned short status; /* event trace status */ + char name [EVENT_NAME_LENGTH]; /* event text name */ + unsigned short dynamic; /* dynamic ID (0=none) */ +}; + +struct subs_table_entry { + unsigned short subs; /* etap subsystem type */ + char name [EVENT_NAME_LENGTH]; /* subsystem text name */ +}; + +typedef struct event_table_entry* event_table_t; +typedef struct subs_table_entry* subs_table_t; +typedef unsigned short etap_event_t; + +#define EVENT_TABLE_NULL ((event_table_t) 0) + +/* ========= + * ETAP Time + * ========= + */ + +typedef mach_timespec_t etap_time_t; + +/* ============================= + * Cumulative buffer definitions + * ============================= + */ + +/* + * The cbuff_data structure contains cumulative lock + * statistical information for EITHER hold operations + * OR wait operations. + */ + +struct cbuff_data { + unsigned long triggered; /* number of event occurances */ + etap_time_t time; /* sum of event durations */ + etap_time_t time_sq; /* sum of squared durations */ + etap_time_t min_time; /* min duration of event */ + etap_time_t max_time; /* max duration of event */ +}; + +/* + * The cbuff_entry contains all trace data for an event. + * The cumulative buffer consists of these entries. + */ + +struct cbuff_entry { + etap_event_t event; /* event type */ + unsigned short kind; /* read,write,or simple */ + unsigned int instance; /* & of event struct */ + struct cbuff_data hold; /* hold trace data */ + struct cbuff_data wait; /* wait trace data */ + unsigned long hold_interval[ETAP_CBUFF_IBUCKETS]; /* hold interval array */ + unsigned long wait_interval[ETAP_CBUFF_IBUCKETS]; /* wait interval array */ +}; + +typedef struct cbuff_entry* cbuff_entry_t; + +#define CBUFF_ENTRY_NULL ((cbuff_entry_t)0) + +/* + * The cumulative buffer maintains a header which is used by + * both the kernel instrumentation and the ETAP user-utilities. + */ + +struct cumulative_buffer { + unsigned long next; /* next available entry in buffer */ + unsigned short static_start; /* first static entry in buffer */ + struct cbuff_entry entry [ETAP_CBUFF_ENTRIES]; /* buffer entries */ +}; + +typedef struct cumulative_buffer* cumulative_buffer_t; + + +/* =========================== + * ETAP probe data definitions + * =========================== + */ + +typedef unsigned int etap_data_t[ETAP_MBUFF_DATASIZE]; + +#define ETAP_DATA_ENTRY sizeof(unsigned int) +#define ETAP_DATA_SIZE ETAP_DATA_ENTRY * ETAP_MBUFF_DATASIZE +#define ETAP_DATA_NULL (etap_data_t*) 0 + +/* ========================== + * Monitor buffer definitions + * ========================== + */ + +/* + * The mbuff_entry structure contains trace event instance data. + */ + +struct mbuff_entry { + unsigned short event; /* event type */ + unsigned short flags; /* event strain flags */ + unsigned int instance; /* address of event (lock, thread, etc.) */ + unsigned int pc; /* program counter */ + etap_time_t time; /* operation time */ + etap_data_t data; /* event specific data */ +}; + +typedef struct mbuff_entry* mbuff_entry_t; + +/* + * Each circular monitor buffer will contain maintanence + * information and mon_entry records. + */ + +struct monitor_buffer { + unsigned long free; /* index of next available record */ + unsigned long timestamp; /* timestamp of last wrap around */ + struct mbuff_entry entry[1]; /* buffer entries (holder) */ +}; + +typedef struct monitor_buffer* monitor_buffer_t; + + +/* =================== + * Event strains/flags + * =================== + */ /* | |t|b|e|k|u|m|s|r|w| | | | | */ + /* ----------------------------- */ +#define WRITE_LOCK 0x10 /* | | | | | | | | | |1| | | | | */ +#define READ_LOCK 0x20 /* | | | | | | | | |1| | | | | | */ +#define COMPLEX_LOCK 0x30 /* | | | | | | | | |1|1| | | | | */ +#define SPIN_LOCK 0x40 /* | | | | | | | |1| | | | | | | */ +#define MUTEX_LOCK 0x80 /* | | | | | | |1| | | | | | | | */ +#define USER_EVENT 0x100 /* | | | | | |1| | | | | | | | | */ +#define KERNEL_EVENT 0x200 /* | | | | |1| | | | | | | | | | */ +#define EVENT_END 0x400 /* | | | |1| | | | | | | | | | | */ +#define EVENT_BEGIN 0x800 /* | | |1| | | | | | | | | | | | */ +#define SYSCALL_TRAP 0x1000 /* | |1| | | | | | | | | | | | | */ + + +/* ========================= + * Event trace status values + * ========================= + */ /* | | | | | | | | | | |M|M|C|C| */ + /* | | | | | | | | | | |d|c|d|c| */ + /* ----------------------------- */ +#define CUM_CONTENTION 0x1 /* | | | | | | | | | | | | | |1| */ +#define CUM_DURATION 0x2 /* | | | | | | | | | | | | |1| | */ +#define MON_CONTENTION 0x4 /* | | | | | | | | | | | |1| | | */ +#define MON_DURATION 0x8 /* | | | | | | | | | | |1| | | | */ + +#define ETAP_TRACE_ON 0xf /* | | | | | | | | | | |1|1|1|1| */ +#define ETAP_TRACE_OFF 0x0 /* | | | | | | | | | | | | | | | */ + + +/* ================== + * ETAP trace flavors + * ================== + */ + +/* Mode */ + +#define ETAP_CUMULATIVE 0x3 /* | | | | | | | | | | | | |1|1| */ +#define ETAP_MONITORED 0xc /* | | | | | | | | | | |1|1| | | */ +#define ETAP_RESET 0xf0f0 + +/* Type */ + +#define ETAP_CONTENTION 0x5 /* | | | | | | | | | | | |1| |1| */ +#define ETAP_DURATION 0xa /* | | | | | | | | | | |1| |1| | */ + + +/* =============================== + * Buffer/Table flavor definitions + * =============================== + */ + +#define ETAP_TABLE_EVENT 0 +#define ETAP_TABLE_SUBSYSTEM 1 +#define ETAP_BUFFER_CUMULATIVE 2 +#define ETAP_BUFFER_MONITORED 3 + +/* ========================== + * ETAP function declarations + * ========================== + */ + +extern +kern_return_t etap_trace_event( + unsigned short mode, + unsigned short type, + boolean_t enable, + unsigned int nargs, + unsigned short args[]); + +extern +kern_return_t etap_probe( + unsigned short eventno, + unsigned short event_id, + unsigned int data_size, + etap_data_t *data); + +/* ================================================================= + * convienience user probe macro - only used if DO_PROBE is #defined + * ================================================================= + */ +#ifdef DO_PROBE +#define PROBE_DATA(subsys, tag, data0, data1, data2, data3) \ + { \ + etap_data_t _mmmm; \ + _mmmm[0] = (u_int)data0; \ + _mmmm[1] = (u_int)data1; \ + _mmmm[2] = (u_int)data2; \ + _mmmm[3] = (u_int)data3; \ + etap_probe(subsys, tag, sizeof (etap_data_t), &_mmmm); \ + } +#else +#define PROBE_DATA(type, tag, data0, data1, data2, data3) +#endif /* DO_PROBE */ +#endif /* _MACH_ETAP_H_ */ diff --git a/osfmk/mach/etap_events.h b/osfmk/mach/etap_events.h new file mode 100644 index 000000000..298a465e1 --- /dev/null +++ b/osfmk/mach/etap_events.h @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * File : etap_events.h + * + * Kernel trace event definitions + * + * Notes : If new trace event or subsystem definitions are added + * to this file, the appropriate tables in kern/etap.c MUST be + * updated for the ETAP package to recognize them. + * + */ + +#ifndef _MACH_ETAP_EVENTS_H_ +#define _MACH_ETAP_EVENTS_H_ + + +/* ============================ + * ETAP Subsystem Definitions + * ============================ + */ + +#define ETAP_SUBS_PROBE 0x0000 /* must be zero */ + +#define ETAP_SUBS_LOCK_VM 0x0100 +#define ETAP_SUBS_LOCK_IPC 0x0200 +#define ETAP_SUBS_LOCK_IO 0x0300 +#define ETAP_SUBS_LOCK_THREAD 0x0400 +#define ETAP_SUBS_LOCK_NET 0x0500 +#define ETAP_SUBS_LOCK_NORMA 0x0600 +#define ETAP_SUBS_LOCK_DIPC 0x0700 +#define ETAP_SUBS_LOCK_KKT 0x0800 +#define ETAP_SUBS_LOCK_XKERNEL 0x0900 +#define ETAP_SUBS_LOCK_MISC 0x0a00 + +#define ETAP_NO_TRACE 0x0fff /* never traced */ + + +/* ======================= + * Lock Event Definitions + * ======================= + */ + +#define ETAP_VM_BUCKET (ETAP_SUBS_LOCK_VM + 1) +#define ETAP_VM_HIMEM (ETAP_SUBS_LOCK_VM + 2) +#define ETAP_VM_MAP (ETAP_SUBS_LOCK_VM + 3) +#define ETAP_VM_MAP_I (ETAP_SUBS_LOCK_VM + 4) +#define ETAP_VM_MEMMAN (ETAP_SUBS_LOCK_VM + 5) +#define ETAP_VM_MSYNC (ETAP_SUBS_LOCK_VM + 6) +#define ETAP_VM_OBJ (ETAP_SUBS_LOCK_VM + 7) +#define ETAP_VM_OBJ_CACHE (ETAP_SUBS_LOCK_VM + 8) +#define ETAP_VM_PAGE_ALLOC (ETAP_SUBS_LOCK_VM + 9) +#define ETAP_VM_PAGEOUT (ETAP_SUBS_LOCK_VM + 10) +#define ETAP_VM_PAGEQ (ETAP_SUBS_LOCK_VM + 11) +#define ETAP_VM_PAGEQ_FREE (ETAP_SUBS_LOCK_VM + 12) +#define ETAP_VM_PMAP (ETAP_SUBS_LOCK_VM + 13) +#define ETAP_VM_PMAP_CACHE (ETAP_SUBS_LOCK_VM + 14) +#define ETAP_VM_PMAP_FREE (ETAP_SUBS_LOCK_VM + 15) +#define ETAP_VM_PMAP_KERNEL (ETAP_SUBS_LOCK_VM + 16) +#define ETAP_VM_PMAP_SYS (ETAP_SUBS_LOCK_VM + 17) +#define ETAP_VM_PMAP_SYS_I (ETAP_SUBS_LOCK_VM + 18) +#define ETAP_VM_PMAP_UPDATE (ETAP_SUBS_LOCK_VM + 19) +#define ETAP_VM_PREPPIN (ETAP_SUBS_LOCK_VM + 20) +#define ETAP_VM_RESULT (ETAP_SUBS_LOCK_VM + 21) +#define ETAP_VM_TEST (ETAP_SUBS_LOCK_VM + 22) +#define ETAP_VM_PMAP_PHYSENTRIES (ETAP_SUBS_LOCK_VM + 23) +#define ETAP_VM_PMAP_SID (ETAP_SUBS_LOCK_VM + 24) +#define ETAP_VM_PMAP_PTE (ETAP_SUBS_LOCK_VM + 25) +#define ETAP_VM_PMAP_PTE_OVFLW (ETAP_SUBS_LOCK_VM + 26) +#define ETAP_VM_PMAP_TLB (ETAP_SUBS_LOCK_VM + 27) + +#define ETAP_IPC_IHGB (ETAP_SUBS_LOCK_IPC + 1) +#define ETAP_IPC_IS (ETAP_SUBS_LOCK_IPC + 2) +#define ETAP_IPC_IS_REF (ETAP_SUBS_LOCK_IPC + 3) +#define ETAP_IPC_MQUEUE (ETAP_SUBS_LOCK_IPC + 4) +#define ETAP_IPC_OBJECT (ETAP_SUBS_LOCK_IPC + 5) +#define ETAP_IPC_PORT_MULT (ETAP_SUBS_LOCK_IPC + 6) +#define ETAP_IPC_PORT_TIME (ETAP_SUBS_LOCK_IPC + 7) +#define ETAP_IPC_RPC (ETAP_SUBS_LOCK_IPC + 8) +#define ETAP_IPC_PORT_ALLOCQ (ETAP_SUBS_LOCK_IPC + 9) + +#define ETAP_IO_AHA (ETAP_SUBS_LOCK_IO + 1) +#define ETAP_IO_CHIP (ETAP_SUBS_LOCK_IO + 2) +#define ETAP_IO_DEV (ETAP_SUBS_LOCK_IO + 3) +#define ETAP_IO_DEV_NUM (ETAP_SUBS_LOCK_IO + 4) +#define ETAP_IO_DEV_PAGEH (ETAP_SUBS_LOCK_IO + 5) +#define ETAP_IO_DEV_PAGER (ETAP_SUBS_LOCK_IO + 6) +#define ETAP_IO_DEV_PORT (ETAP_SUBS_LOCK_IO + 7) +#define ETAP_IO_DEV_REF (ETAP_SUBS_LOCK_IO + 8) +#define ETAP_IO_DEVINS (ETAP_SUBS_LOCK_IO + 9) +#define ETAP_IO_DONE_LIST (ETAP_SUBS_LOCK_IO + 10) +#define ETAP_IO_DONE_Q (ETAP_SUBS_LOCK_IO + 11) +#define ETAP_IO_DONE_REF (ETAP_SUBS_LOCK_IO + 12) +#define ETAP_IO_EAHA (ETAP_SUBS_LOCK_IO + 13) +#define ETAP_IO_HD_PROBE (ETAP_SUBS_LOCK_IO + 14) +#define ETAP_IO_IHGB (ETAP_SUBS_LOCK_IO + 15) +#define ETAP_IO_IOPB (ETAP_SUBS_LOCK_IO + 16) +#define ETAP_IO_KDQ (ETAP_SUBS_LOCK_IO + 17) +#define ETAP_IO_KDTTY (ETAP_SUBS_LOCK_IO + 18) +#define ETAP_IO_REQ (ETAP_SUBS_LOCK_IO + 19) +#define ETAP_IO_TARGET (ETAP_SUBS_LOCK_IO + 20) +#define ETAP_IO_TTY (ETAP_SUBS_LOCK_IO + 21) +#define ETAP_IO_IOP_LOCK (ETAP_SUBS_LOCK_IO + 22) +#define ETAP_IO_DEV_NAME (ETAP_SUBS_LOCK_IO + 23) +#define ETAP_IO_CDLI (ETAP_SUBS_LOCK_IO + 24) +#define ETAP_IO_HIPPI_FILTER (ETAP_SUBS_LOCK_IO + 25) +#define ETAP_IO_HIPPI_SRC (ETAP_SUBS_LOCK_IO + 26) +#define ETAP_IO_HIPPI_DST (ETAP_SUBS_LOCK_IO + 27) +#define ETAP_IO_HIPPI_PKT (ETAP_SUBS_LOCK_IO + 28) +#define ETAP_IO_NOTIFY (ETAP_SUBS_LOCK_IO + 29) +#define ETAP_IO_DATADEV (ETAP_SUBS_LOCK_IO + 30) +#define ETAP_IO_OPEN (ETAP_SUBS_LOCK_IO + 31) +#define ETAP_IO_OPEN_I (ETAP_SUBS_LOCK_IO + 32) +#define ETAP_IO_UNDREPLY (ETAP_SUBS_LOCK_IO + 33) + +#define ETAP_THREAD_ACT (ETAP_SUBS_LOCK_THREAD + 1) +#define ETAP_THREAD_ACTION (ETAP_SUBS_LOCK_THREAD + 2) +#define ETAP_THREAD_LOCK (ETAP_SUBS_LOCK_THREAD + 3) +#define ETAP_THREAD_LOCK_SET (ETAP_SUBS_LOCK_THREAD + 4) +#define ETAP_THREAD_NEW (ETAP_SUBS_LOCK_THREAD + 5) +#define ETAP_THREAD_PSET (ETAP_SUBS_LOCK_THREAD + 6) +#define ETAP_THREAD_PSET_ALL (ETAP_SUBS_LOCK_THREAD + 7) +#define ETAP_THREAD_PSET_RUNQ (ETAP_SUBS_LOCK_THREAD + 8) +#define ETAP_THREAD_PSET_IDLE (ETAP_SUBS_LOCK_THREAD + 9) +#define ETAP_THREAD_PSET_QUANT (ETAP_SUBS_LOCK_THREAD + 10) +#define ETAP_THREAD_PROC (ETAP_SUBS_LOCK_THREAD + 11) +#define ETAP_THREAD_PROC_RUNQ (ETAP_SUBS_LOCK_THREAD + 12) +#define ETAP_THREAD_REAPER (ETAP_SUBS_LOCK_THREAD + 13) +#define ETAP_THREAD_RPC (ETAP_SUBS_LOCK_THREAD + 14) +#define ETAP_THREAD_REM_RPC (ETAP_SUBS_LOCK_THREAD + 15) +#define ETAP_THREAD_SEMA (ETAP_SUBS_LOCK_THREAD + 16) +#define ETAP_THREAD_STACK (ETAP_SUBS_LOCK_THREAD + 17) +#define ETAP_THREAD_STACK_USAGE (ETAP_SUBS_LOCK_THREAD + 18) +#define ETAP_THREAD_TASK_NEW (ETAP_SUBS_LOCK_THREAD + 19) +#define ETAP_THREAD_TASK_ITK (ETAP_SUBS_LOCK_THREAD + 20) +#define ETAP_THREAD_ULOCK (ETAP_SUBS_LOCK_THREAD + 21) +#define ETAP_THREAD_WAIT (ETAP_SUBS_LOCK_THREAD + 22) +#define ETAP_THREAD_WAKE (ETAP_SUBS_LOCK_THREAD + 23) +#define ETAP_THREAD_ACT_LIST (ETAP_SUBS_LOCK_THREAD + 24) +#define ETAP_THREAD_TASK_SWAP (ETAP_SUBS_LOCK_THREAD + 25) +#define ETAP_THREAD_TASK_SWAPOUT (ETAP_SUBS_LOCK_THREAD + 26) +#define ETAP_THREAD_SWAPPER (ETAP_SUBS_LOCK_THREAD + 27) + +#define ETAP_NET_IFQ (ETAP_SUBS_LOCK_NET + 1) +#define ETAP_NET_KMSG (ETAP_SUBS_LOCK_NET + 2) +#define ETAP_NET_MBUF (ETAP_SUBS_LOCK_NET + 3) +#define ETAP_NET_POOL (ETAP_SUBS_LOCK_NET + 4) +#define ETAP_NET_Q (ETAP_SUBS_LOCK_NET + 5) +#define ETAP_NET_QFREE (ETAP_SUBS_LOCK_NET + 6) +#define ETAP_NET_RCV (ETAP_SUBS_LOCK_NET + 7) +#define ETAP_NET_RCV_PLIST (ETAP_SUBS_LOCK_NET + 8) +#define ETAP_NET_THREAD (ETAP_SUBS_LOCK_NET + 9) + +#define ETAP_NORMA_XMM (ETAP_SUBS_LOCK_NORMA + 1) +#define ETAP_NORMA_XMMOBJ (ETAP_SUBS_LOCK_NORMA + 2) +#define ETAP_NORMA_XMMCACHE (ETAP_SUBS_LOCK_NORMA + 3) +#define ETAP_NORMA_MP (ETAP_SUBS_LOCK_NORMA + 4) +#define ETAP_NORMA_VOR (ETAP_SUBS_LOCK_NORMA + 5) +#define ETAP_NORMA_TASK (ETAP_SUBS_LOCK_NORMA + 6) + +#define ETAP_DIPC_CLEANUP (ETAP_SUBS_LOCK_DIPC + 1) +#define ETAP_DIPC_MSG_PROG (ETAP_SUBS_LOCK_DIPC + 2) +#define ETAP_DIPC_PREP_QUEUE (ETAP_SUBS_LOCK_DIPC + 3) +#define ETAP_DIPC_PREP_FILL (ETAP_SUBS_LOCK_DIPC + 4) +#define ETAP_DIPC_MIGRATE (ETAP_SUBS_LOCK_DIPC + 5) +#define ETAP_DIPC_DELIVER (ETAP_SUBS_LOCK_DIPC + 6) +#define ETAP_DIPC_RECV_SYNC (ETAP_SUBS_LOCK_DIPC + 7) +#define ETAP_DIPC_RPC (ETAP_SUBS_LOCK_DIPC + 8) +#define ETAP_DIPC_MSG_REQ (ETAP_SUBS_LOCK_DIPC + 9) +#define ETAP_DIPC_MSG_ORDER (ETAP_SUBS_LOCK_DIPC + 10) +#define ETAP_DIPC_MSG_PREPQ (ETAP_SUBS_LOCK_DIPC + 11) +#define ETAP_DIPC_MSG_FREE (ETAP_SUBS_LOCK_DIPC + 12) +#define ETAP_DIPC_KMSG_AST (ETAP_SUBS_LOCK_DIPC + 13) +#define ETAP_DIPC_TEST_LOCK (ETAP_SUBS_LOCK_DIPC + 14) +#define ETAP_DIPC_SPINLOCK (ETAP_SUBS_LOCK_DIPC + 15) +#define ETAP_DIPC_TRACE (ETAP_SUBS_LOCK_DIPC + 16) +#define ETAP_DIPC_REQ_CALLBACK (ETAP_SUBS_LOCK_DIPC + 17) +#define ETAP_DIPC_PORT_NAME (ETAP_SUBS_LOCK_DIPC + 18) +#define ETAP_DIPC_RESTART_PORT (ETAP_SUBS_LOCK_DIPC + 19) +#define ETAP_DIPC_ZERO_PAGE (ETAP_SUBS_LOCK_DIPC + 20) +#define ETAP_DIPC_BLOCKED_NODE (ETAP_SUBS_LOCK_DIPC + 21) +#define ETAP_DIPC_TIMER (ETAP_SUBS_LOCK_DIPC + 22) +#define ETAP_DIPC_SPECIAL_PORT (ETAP_SUBS_LOCK_DIPC + 23) + +#define ETAP_KKT_TEST_WORK (ETAP_SUBS_LOCK_KKT + 1) +#define ETAP_KKT_TEST_MP (ETAP_SUBS_LOCK_KKT + 2) +#define ETAP_KKT_NODE (ETAP_SUBS_LOCK_KKT + 3) +#define ETAP_KKT_CHANNEL_LIST (ETAP_SUBS_LOCK_KKT + 4) +#define ETAP_KKT_CHANNEL (ETAP_SUBS_LOCK_KKT + 5) +#define ETAP_KKT_HANDLE (ETAP_SUBS_LOCK_KKT + 6) +#define ETAP_KKT_MAP (ETAP_SUBS_LOCK_KKT + 7) +#define ETAP_KKT_RESOURCE (ETAP_SUBS_LOCK_KKT + 8) + +#define ETAP_XKERNEL_MASTER (ETAP_SUBS_LOCK_XKERNEL + 1) +#define ETAP_XKERNEL_EVENT (ETAP_SUBS_LOCK_XKERNEL + 2) +#define ETAP_XKERNEL_ETHINPUT (ETAP_SUBS_LOCK_XKERNEL + 3) + +#define ETAP_MISC_AST (ETAP_SUBS_LOCK_MISC + 1) +#define ETAP_MISC_CLOCK (ETAP_SUBS_LOCK_MISC + 2) +#define ETAP_MISC_EMULATE (ETAP_SUBS_LOCK_MISC + 3) +#define ETAP_MISC_EVENT (ETAP_SUBS_LOCK_MISC + 4) +#define ETAP_MISC_KDB (ETAP_SUBS_LOCK_MISC + 5) +#define ETAP_MISC_PCB (ETAP_SUBS_LOCK_MISC + 6) +#define ETAP_MISC_PRINTF (ETAP_SUBS_LOCK_MISC + 7) +#define ETAP_MISC_Q (ETAP_SUBS_LOCK_MISC + 8) +#define ETAP_MISC_RPC_SUBSYS (ETAP_SUBS_LOCK_MISC + 9) +#define ETAP_MISC_RT_CLOCK (ETAP_SUBS_LOCK_MISC + 10) +#define ETAP_MISC_SD_POOL (ETAP_SUBS_LOCK_MISC + 11) +#define ETAP_MISC_TIMER (ETAP_SUBS_LOCK_MISC + 12) +#define ETAP_MISC_UTIME (ETAP_SUBS_LOCK_MISC + 13) +#define ETAP_MISC_XPR (ETAP_SUBS_LOCK_MISC + 14) +#define ETAP_MISC_ZONE (ETAP_SUBS_LOCK_MISC + 15) +#define ETAP_MISC_ZONE_ALL (ETAP_SUBS_LOCK_MISC + 16) +#define ETAP_MISC_ZONE_GET (ETAP_SUBS_LOCK_MISC + 17) +#define ETAP_MISC_ZONE_PTABLE (ETAP_SUBS_LOCK_MISC + 18) +#define ETAP_MISC_LEDGER (ETAP_SUBS_LOCK_MISC + 19) +#define ETAP_MISC_SCSIT_TGT (ETAP_SUBS_LOCK_MISC + 20) +#define ETAP_MISC_SCSIT_SELF (ETAP_SUBS_LOCK_MISC + 21) +#define ETAP_MISC_SPL (ETAP_SUBS_LOCK_MISC + 22) /* i860 */ +#define ETAP_MISC_MASTER (ETAP_SUBS_LOCK_MISC + 23) /* i860 */ +#define ETAP_MISC_FLOAT (ETAP_SUBS_LOCK_MISC + 24) /* i860 */ +#define ETAP_MISC_GROUP (ETAP_SUBS_LOCK_MISC + 25) /* i860 */ +#define ETAP_MISC_FLIPC (ETAP_SUBS_LOCK_MISC + 26) +#define ETAP_MISC_MP_IO (ETAP_SUBS_LOCK_MISC + 27) +#define ETAP_MISC_KERNEL_TEST (ETAP_SUBS_LOCK_MISC + 28) +#define ETAP_MISC_TIMER_LOCK (ETAP_SUBS_LOCK_MISC + 29) +#define ETAP_MISC_POST (ETAP_SUBS_LOCK_MISC + 30) /* i860 */ +#define ETAP_MISC_KERNLOG (ETAP_SUBS_LOCK_MISC + 31) /* Alpha */ +#define ETAP_DPAGE_BS (ETAP_SUBS_LOCK_MISC + 32) /* def pager */ +#define ETAP_DPAGE_BSL (ETAP_SUBS_LOCK_MISC + 33) /* def pager */ +#define ETAP_DPAGE_SEGMENT (ETAP_SUBS_LOCK_MISC + 34) /* def pager */ +#define ETAP_DPAGE_SEGLIST (ETAP_SUBS_LOCK_MISC + 35) /* def pager */ +#define ETAP_DPAGE_VSTRUCT (ETAP_SUBS_LOCK_MISC + 36) /* def pager */ +#define ETAP_DPAGE_VSMAP (ETAP_SUBS_LOCK_MISC + 37) /* def pager */ +#define ETAP_DPAGE_VSLIST (ETAP_SUBS_LOCK_MISC + 38) /* def pager */ +#define ETAP_DPAGE_VSSEQNO (ETAP_SUBS_LOCK_MISC + 39) /* def pager */ +#define ETAP_DPAGE_VSREAD (ETAP_SUBS_LOCK_MISC + 40) /* def pager */ +#define ETAP_DPAGE_VSWRITE (ETAP_SUBS_LOCK_MISC + 41) /* def pager */ +#define ETAP_DPAGE_VSREFS (ETAP_SUBS_LOCK_MISC + 42) /* def pager */ +#define ETAP_DPAGE_VSASYNC (ETAP_SUBS_LOCK_MISC + 43) /* def pager */ + +/* ========================== + * System Probe Definitions + * ========================== + */ +/* probes 0-31 reserved for non-MK (e.g. users and servers) */ +#define ETAP_USER_BASE 0 +#define ETAP_USER_NEVENTS 32 + +#define ETAP_P_USER_EVENT0 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 0) +#define ETAP_P_USER_EVENT1 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 1) +#define ETAP_P_USER_EVENT2 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 2) +#define ETAP_P_USER_EVENT3 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 3) +#define ETAP_P_USER_EVENT4 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 4) +#define ETAP_P_USER_EVENT5 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 5) +#define ETAP_P_USER_EVENT6 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 6) +#define ETAP_P_USER_EVENT7 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 7) +#define ETAP_P_USER_EVENT8 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 8) +#define ETAP_P_USER_EVENT9 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 9) +#define ETAP_P_USER_EVENT10 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 10) +#define ETAP_P_USER_EVENT11 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 11) +#define ETAP_P_USER_EVENT12 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 12) +#define ETAP_P_USER_EVENT13 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 13) +#define ETAP_P_USER_EVENT14 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 14) +#define ETAP_P_USER_EVENT15 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 15) +#define ETAP_P_USER_EVENT16 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 16) +#define ETAP_P_USER_EVENT17 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 17) +#define ETAP_P_USER_EVENT18 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 18) +#define ETAP_P_USER_EVENT19 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 19) +#define ETAP_P_USER_EVENT20 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 20) +#define ETAP_P_USER_EVENT21 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 21) +#define ETAP_P_USER_EVENT22 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 22) +#define ETAP_P_USER_EVENT23 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 23) +#define ETAP_P_USER_EVENT24 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 24) +#define ETAP_P_USER_EVENT25 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 25) +#define ETAP_P_USER_EVENT26 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 26) +#define ETAP_P_USER_EVENT27 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 27) +#define ETAP_P_USER_EVENT28 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 28) +#define ETAP_P_USER_EVENT29 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 29) +#define ETAP_P_USER_EVENT30 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 30) +#define ETAP_P_USER_EVENT31 (ETAP_SUBS_PROBE + ETAP_USER_BASE + 31) + +/* probes 32-63 reserved for MK */ +#define ETAP_SYS_BASE 32 + +#define ETAP_P_SYSCALL_MACH (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 0) +#define ETAP_P_SYSCALL_UNIX (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 1) +#define ETAP_P_THREAD_LIFE (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 2) +#define ETAP_P_THREAD_CTX (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 3) +#define ETAP_P_RPC (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 4) +#define ETAP_P_INTERRUPT (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 5) +#define ETAP_P_ACT_ABORT (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 6) +#define ETAP_P_PRIORITY (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 7) +#define ETAP_P_EXCEPTION (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 8) +#define ETAP_P_DEPRESSION (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 9) +#define ETAP_P_MISC (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 10) +#define ETAP_P_DETAP (ETAP_SUBS_PROBE + ETAP_SYS_BASE + 11) + +/* =========================== + * ETAP Thread block reasons + * =========================== + */ + +#define BLOCKED_ON_UNDEFINED 0 +#define BLOCKED_ON_CLEAR 0 + +#define BLOCKED_ON_SEMAPHORE 1 +#define BLOCKED_ON_LOCK 2 +#define BLOCKED_ON_LOCK_HANDOFF 3 +#define BLOCKED_ON_MUTEX_LOCK 4 +#define BLOCKED_ON_COMPLEX_LOCK 5 +#define BLOCKED_ON_PORT_RCV 6 +#define BLOCKED_ON_REAPER_DONE 7 +#define BLOCKED_ON_IDLE_DONE 8 +#define BLOCKED_ON_TERMINATION 9 + +#endif /* _MACH_ETAP_EVENTS_H_ */ diff --git a/osfmk/mach/events_info.h b/osfmk/mach/events_info.h new file mode 100644 index 000000000..c6eead64e --- /dev/null +++ b/osfmk/mach/events_info.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.3 1995/01/26 22:15:39 ezf + * removed extraneous CMU CR + * [1995/01/26 20:24:56 ezf] + * + * Revision 1.1.8.2 1995/01/06 19:50:12 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:42:32 dwm] + * + * Revision 1.1.4.3 1993/09/17 21:35:27 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:28:46 robert] + * + * Revision 1.1.4.2 1993/06/04 15:13:47 jeffc + * CR9193 - MK5.0 merge. + * [1993/05/18 02:37:52 gm] + * + * Revision 3.0 92/12/31 22:12:17 ede + * Initial revision for OSF/1 R1.3 + * + * Revision 1.2 1991/06/20 12:13:09 devrcs + * Created from mach/task_info.h. + * [91/06/04 08:53:02 jeffc] + * + * $EndLog$ + */ +/* + * Machine-independent event information structures and definitions. + * + * The definitions in this file are exported to the user. The kernel + * will translate its internal data structures to these structures + * as appropriate. + * + * This data structure is used to track events that occur during + * thread execution, and to summarize this information for tasks. + */ + +#ifndef _MACH_EVENTS_INFO_H_ +#define _MACH_EVENTS_INFO_H_ + +struct events_info { + long faults; /* number of page faults */ + long zero_fills; /* number of zero fill pages */ + long reactivations; /* number of reactivated pages */ + long pageins; /* number of actual pageins */ + long cow_faults; /* number of copy-on-write faults */ + long messages_sent; /* number of messages sent */ + long messages_received; /* number of messages received */ +}; +typedef struct events_info events_info_data_t; +typedef struct events_info *events_info_t; +#define EVENTS_INFO_COUNT \ + (sizeof(events_info_data_t) / sizeof(long)) + +#endif /*_MACH_EVENTS_INFO_H_*/ diff --git a/osfmk/mach/exc.defs b/osfmk/mach/exc.defs new file mode 100644 index 000000000..8236168a8 --- /dev/null +++ b/osfmk/mach/exc.defs @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Abstract: + * MiG definitions file for Mach exception interface. + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif + exc 2401; + +#include +#include + +ServerPrefix catch_; + +type exception_data_t = array[*:2] of integer_t; +type exception_type_t = int; + +routine exception_raise( +#if KERNEL_USER + exception_port : mach_port_move_send_t; + thread : mach_port_move_send_t; + task : mach_port_move_send_t; +#else /* KERNEL_USER */ + exception_port : mach_port_t; + thread : mach_port_t; + task : mach_port_t; +#endif /* KERNEL_USER */ + exception : exception_type_t; + code : exception_data_t + ); + +routine exception_raise_state( +#if KERNEL_USER + exception_port : mach_port_move_send_t; +#else /* KERNEL_USER */ + exception_port : mach_port_t; +#endif /* KERNEL_USER */ + exception : exception_type_t; + code : exception_data_t, const; + inout flavor : int; + old_state : thread_state_t, const; + out new_state : thread_state_t); + +routine exception_raise_state_identity( +#if KERNEL_USER + exception_port : mach_port_move_send_t; + thread : mach_port_move_send_t; + task : mach_port_move_send_t; +#else /* KERNEL_USER */ + exception_port : mach_port_t; + thread : mach_port_t; + task : mach_port_t; +#endif /* KERNEL_USER */ + exception : exception_type_t; + code : exception_data_t; + inout flavor : int; + old_state : thread_state_t; + out new_state : thread_state_t); + diff --git a/osfmk/mach/exception.h b/osfmk/mach/exception.h new file mode 100644 index 000000000..6796107b2 --- /dev/null +++ b/osfmk/mach/exception.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * JMM - + * This header will eventually be MIG-generated and define the + * exception interfaces. It used to define the exception data + * types, but those have been moved to exception_types.h for + * consistency. Once this is MIG-generated, it will automatically + * drag in the types, but for compatibility in the interim, just + * pull them in manually. + */ + +#ifndef _MACH_EXCEPTION_H_ +#define _MACH_EXCEPTION_H_ + +#include + +#endif /* _MACH_EXCEPTION_H_ */ diff --git a/osfmk/mach/exception_types.h b/osfmk/mach/exception_types.h new file mode 100644 index 000000000..8f341357b --- /dev/null +++ b/osfmk/mach/exception_types.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_EXCEPTION_TYPES_H_ +#define _MACH_EXCEPTION_TYPES_H_ + +#include + +/* + * Machine-independent exception definitions. + */ + + +#define EXC_BAD_ACCESS 1 /* Could not access memory */ + /* Code contains kern_return_t describing error. */ + /* Subcode contains bad memory address. */ + +#define EXC_BAD_INSTRUCTION 2 /* Instruction failed */ + /* Illegal or undefined instruction or operand */ + +#define EXC_ARITHMETIC 3 /* Arithmetic exception */ + /* Exact nature of exception is in code field */ + +#define EXC_EMULATION 4 /* Emulation instruction */ + /* Emulation support instruction encountered */ + /* Details in code and subcode fields */ + +#define EXC_SOFTWARE 5 /* Software generated exception */ + /* Exact exception is in code field. */ + /* Codes 0 - 0xFFFF reserved to hardware */ + /* Codes 0x10000 - 0x1FFFF reserved for OS emulation (Unix) */ + +#define EXC_BREAKPOINT 6 /* Trace, breakpoint, etc. */ + /* Details in code field. */ + +#define EXC_SYSCALL 7 /* System calls. */ + +#define EXC_MACH_SYSCALL 8 /* Mach system calls. */ + +#define EXC_RPC_ALERT 9 /* RPC alert */ + +/* + * Machine-independent exception behaviors + */ + +# define EXCEPTION_DEFAULT 1 +/* Send a catch_exception_raise message including the identity. + */ + +# define EXCEPTION_STATE 2 +/* Send a catch_exception_raise_state message including the + * thread state. + */ + +# define EXCEPTION_STATE_IDENTITY 3 +/* Send a catch_exception_raise_state_identity message including + * the thread identity and state. + */ + +/* + * Masks for exception definitions, above + * bit zero is unused, therefore 1 word = 31 exception types + */ + +#define EXC_MASK_BAD_ACCESS (1 << EXC_BAD_ACCESS) +#define EXC_MASK_BAD_INSTRUCTION (1 << EXC_BAD_INSTRUCTION) +#define EXC_MASK_ARITHMETIC (1 << EXC_ARITHMETIC) +#define EXC_MASK_EMULATION (1 << EXC_EMULATION) +#define EXC_MASK_SOFTWARE (1 << EXC_SOFTWARE) +#define EXC_MASK_BREAKPOINT (1 << EXC_BREAKPOINT) +#define EXC_MASK_SYSCALL (1 << EXC_SYSCALL) +#define EXC_MASK_MACH_SYSCALL (1 << EXC_MACH_SYSCALL) +#define EXC_MASK_RPC_ALERT (1 << EXC_RPC_ALERT) + +#define EXC_MASK_ALL (EXC_MASK_BAD_ACCESS | \ + EXC_MASK_BAD_INSTRUCTION | \ + EXC_MASK_ARITHMETIC | \ + EXC_MASK_EMULATION | \ + EXC_MASK_SOFTWARE | \ + EXC_MASK_BREAKPOINT | \ + EXC_MASK_SYSCALL | \ + EXC_MASK_MACH_SYSCALL | \ + EXC_MASK_RPC_ALERT | \ + EXC_MASK_MACHINE) + + +#define FIRST_EXCEPTION 1 /* ZERO is illegal */ + +#ifndef ASSEMBLER +#include +#include +#include +/* + * Exported types + */ + +typedef int exception_type_t; +typedef integer_t exception_data_type_t; +typedef int exception_behavior_t; +typedef integer_t *exception_data_t; +typedef unsigned int exception_mask_t; +typedef exception_mask_t *exception_mask_array_t; +typedef exception_behavior_t *exception_behavior_array_t; +typedef thread_state_flavor_t *exception_flavor_array_t; +typedef mach_port_t *exception_port_array_t; + +#endif /* ASSEMBLER */ +#endif /* _MACH_EXCEPTION_TYPES_H_ */ diff --git a/osfmk/mach/flipc_cb.h b/osfmk/mach/flipc_cb.h new file mode 100644 index 000000000..43d811757 --- /dev/null +++ b/osfmk/mach/flipc_cb.h @@ -0,0 +1,1214 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.11.1 1996/09/17 16:34:42 bruel + * fixed types. + * [96/09/17 bruel] + * + * Revision 1.1.6.1 1995/06/13 18:20:10 sjs + * Merged from flipc_shared. + * [95/06/07 sjs] + * + * Revision 1.1.3.14 1995/05/19 00:58:14 sjs + * Added send_ready to shared area, used for fast check if there is something + * to do (and prevents the cache from getting stirred). + * [95/05/18 sjs] + * + * Revision 1.1.3.13 1995/05/16 20:46:28 randys + * Export performance valid information through performance + * structure rather than kernel configuration section. + * [95/05/16 randys] + * + * Added performance (FLIPC_PERF) config information to + * kernel_configuration section of comm buffer, so that user + * programs can find out if this information is being gathered. + * [95/05/16 randys] + * + * Revision 1.1.3.12 1995/05/15 14:26:54 randys + * Updated comments on use of acquire pointer (it's completely + * ignored if dpb is set) and added macros for testing !dpb and + * enabled at the same time. + * [95/05/11 randys] + * + * Change pme_process_ptr ==> sme_process_ptr (since it's being read + * by AIL now). + * [95/05/11 randys] + * + * Added private copied of release and process pointers. + * [95/05/11 randys] + * + * Rearrange endpoint structure to separate data with importantly + * different access patterns into different cache lines. This + * involved duplicating some (effectively constant) entries, and + * creating two versions of some macros. + * [95/05/11 randys] + * + * Revision 1.1.3.11 1995/05/08 16:06:33 randys + * Added comment explaining that an endpoint bufferlist must always + * have valid buffer pointers in all of its entries, to keep + * FLIPC_endpoint_buffer_available from going off the deep end. No + * code changes. + * [95/04/18 randys] + * + * Revision 1.1.3.10 1995/04/05 21:21:52 randys + * Added a field to the buffer control structure holding the + * scheduling policy chosen for the allocations lock. + * [95/04/05 randys] + * + * Revision 1.1.3.9 1995/03/23 20:35:19 randys + * Added comments indicating duplication of declarations of + * flipc_cb_base & flipc_cb_size in this file and in flipc_usermsg.h + * Modified declaration of flipc_cb_size to be unsigned long. + * [95/03/21 randys] + * + * Revision 1.1.3.8 1995/02/23 21:32:42 randys + * Added space for kernel configuration in communications buffer + * control structure. + * [95/02/22 randys] + * + * Revision 1.1.3.7 1995/02/21 17:22:58 randys + * Re-indented code to four space indentation + * [1995/02/21 16:25:32 randys] + * + * Revision 1.1.3.6 1995/02/13 22:57:29 randys + * Replaced all of NEXT_{ACQUIRE,RELEASE,PROCESS}_PTR macros with a + * single NEXT_BUFFERLIST_PTR macro. + * [95/02/03 randys] + * + * Revision 1.1.3.5 1995/01/26 21:01:44 randys + * Add performance structure into CB. + * [1995/01/24 21:14:31 randys] + * + * Added flag in epgroup structure to note that epgroup + * has a semaphore associated with it. + * [1995/01/19 23:02:13 randys] + * + * Add a space in the comm buffer header for the null_destination + * the ME sets up for the AIL. Get rid of + * FLIPC_ADDRESS_ENDPOINT_PTR (it isn't used) + * [1995/01/19 20:22:30 randys] + * + * Up the comm buffer size to 1 megabyte + * [1995/01/17 22:23:27 randys] + * + * Revision 1.1.3.4 1995/01/12 21:19:01 randys + * Minor commenting changes from dlb + * [1995/01/06 18:18:12 randys] + * + * Revision 1.1.3.3 1994/12/22 16:23:57 randys + * Fixed calculation of number of buffers on an endpoint + * to take size of buffer pointers into account. + * [1994/12/21 16:19:55 randys] + * + * Revision 1.1.3.2 1994/12/20 19:01:56 randys + * Moved definition of flipc_simple_lock to flipc_cb.h + * [1994/12/20 17:34:41 randys] + * + * Added a simple lock in the comm buffer to use for the + * allocations lock, along with directions as to how + * to use it (not like a normal simple lock). + * [1994/12/20 15:27:25 randys] + * + * Added error log into communications buffer control + * structure, and changed FLIPC_ADDRESS_ENDPOINT_PTR to + * correctly compute the endpoint pointer based on the + * new ctl structure layout. + * [1994/12/19 23:47:45 randys] + * + * Added filename in comment at top of each file + * [1994/12/19 20:28:20 randys] + * + * Add version field to epgroup to check races on buffer acquire + * from epgroup. + * [1994/12/19 18:05:04 randys] + * + * Revision 1.1.3.1 1994/12/12 17:46:12 randys + * Putting initial flipc implementation under flipc_shared + * [1994/12/12 16:27:46 randys] + * + * Revision 1.1.1.2 1994/12/11 23:11:18 randys + * Initial flipc code checkin + * + * $EndLog$ + */ + +/* + * mach/flipc_cb.h + * + * This file is intended to be the data structure layout for the flipc + * communcations buffer, both for the KKT implementation and + * for the eventual paragon implementation. This file should include + * all of the information necessary for either humans or machines to + * understand the data structure layout. + * + * The communications buffer is the wired section of memory used for + * communication between the flipc applications interface layer and + * the flipc message engine. No structure in it are visible to the + * user; the applications interface layer mediates all user access to + * the CB. + */ + +#ifndef _MACH_FLIPC_CB_H_ +#define _MACH_FLIPC_CB_H_ + +#include + +/* + * Flipc naming and argument ordering conventions (this applies mainly to + * user-interface.h, but seems inappropriate in a user-visible header file): + * + * All objects prefixed with "flipc"; uppercase for user-visible + * objects, lower case for internal ones. + * + * Types created with typedef will have _t suffixes. + * + * Words will be separated by '_'. + * + * Macro definitions will be all in caps. + * + * Enum members will have their initial letter (after Flipc) capitalized. + * + * + * For user-visible routines: + * + * The first word following the "flipc" will be the flipc object type that + * that routine operates on (specifically "domain", "epgroup", + * "endpoint", or "buffer"). + * + * The object named by the first word of the call will, if an argument + * to the call, be the first argument. + * + * Output variables passed as pointers in the arglist will come last. + */ + +/* + * The kinds of objects that exist in the communications buffer are: + * + * Endpoints -- Used for sending or receiving. + * Buffers -- Composed of a buffer header and buffer data. + * Endpoint groups -- Used for collecting multiple numbers of endpoints + * together for a select like operation. + */ + +/* + * We can't use general pointers inside the communications buffer, + * since the address space on either side of the interface is + * different. The places where we could use pointers are: + * + * *) From endpoint sets to endpoints. + * *) From endpoints to buffers. + * + * The kinds of pointers we could use are: + * *) Byte offset from the beginning of the comm buffer. This + * is simple, but has the disadvantage of allowing the user to + * play games with pointing endpoint buffer pointers into data + * space, & etc. + * *) Rigid arrays of each type of object, with the object + * "pointer" being an index into the array. This avoids the + * above problem, but complicates memory allocation (forces + * allocation to be contiguous, which may force pre-deciding + * how much space each of the above types will take). + * + * Though we appear to be going for the rigid allocation for each type + * of data structure, I'm still going to do the "simple offset" + * solution to maintain maximum flexibility into the future. + * The single exception to this is that FLIPC addresses will be composed of + * node number and endpoint number, where the endpoint number will be + * the index into the endpoint array. + */ + +typedef unsigned long flipc_cb_ptr; +/* Define a null value, which doesn't point anywhere into the CB. */ +#define FLIPC_CBPTR_NULL ((flipc_cb_ptr) -1) + +/* + * Synchronization between message engine and application. + * + * In general, it isn't reasonable to allow locking and unlocking of + * data structures between message engine and communications buffer, + * as this requires the message engine to trust arbitrary user + * threads. The solution is to arrange all data structures so that + * they may be accessed by both parties without locking. The way that + * this is usually done is that specific variables are considered to + * be owned by one of the ME or the AIL, and the other party is + * allowed to read the variable but not to modify it. With this + * arrangement, implementing things like producer/consumer circular + * queues is possible; each agent (ME or AIL) goes around the list + * doing its thing, and avoids passing the pointer showing where the + * other agent is working. + * + * Following the above, we may divide structure members into five + * classes, and define prefixes for these five classes. + * + * Description Prefix + * ------------------------------- + * Private to AIL pail_ + * Private to ME pme_ + * AIL owned, read by ME sail_ + * ME owned, read by AIL sme_ + * Shared in other way shrd_ + * + * Shared variables may change their ownership based on their own + * or someone elses value (these variables may be thought of as + * being handed back and forth between the two entities) or on a + * configuration option of the structure (not handed back and forth, + * but still based on another variables value). + * + * In addition, I am going to put variables that are set at endpoint + * allocation and cleared at deallocation (but read by both sides) in + * a separate class; they are "AIL owned, read by ME" but are + * effectively constant over the synchronization protocols we care + * about. + * + * Constant after allocation const_ + * + * Note that this ignores memory consistency issues (when the two + * agents are actually on two separate processors). These issues need + * to be explored in more detail; for now suffice it to say that the + * above methods work given a sequentially consistent memory model or + * a processor consistent memory model. + * + * Also note that an optimizing compiler may reorder our memory + * accesses, playing merry hell with the inter-node synchronization + * protocols (the compiler doesn't know about the other node, after + * all). To avoid this, all structure members used for + * synchronization will be marked volatile; this will force the + * compiler to keep the order and number of accesses intact. This + * will also force the compiler *not* to optimize way accesses to + * these variables, so it is wise to explicitly load the variable into + * a temporary once if you need to do multiple computations with it, + * and store it back afterwards when you are done. + */ + +/* + * Memory allocation: + * + * For maximum simplicity in the first implementation, we need to know + * at comm buffer allocation time how many endpoints, endpoint_sets, + * and buffers we will want total, until the end of time. This + * masively simplifies memory allocation; there will be a single array + * of each type of data and the communication buffer will be taken up + * by the concatenation of these arrays (with some fiddling to make + * sure that no data crosses a page boundary). + * + * For each data type there will be a free list to which pieces of + * data will be added to or removed from as needed. Each data type + * will have a pointer in it to allow it to be linked onto the free + * list. + */ + +/* + * Multiple thread access to data structures: + * + * There are several points in the communications buffer (notably + * endpoint accesses) when multiple application threads will be + * attempting operations on data structures at the same time. To + * multiplex these operations, we need a per-data structure lock. + * Lock attributes: + * *) This lock will not be kernel based, as such a lock would be + * too heavyweight to use for arbitrary sending and receiving + * operations). + * *) Because it is not kernel based, it may not be used to + * multiplex accesses from threads at different kernel + * priority levels. Deadlock would result if a low-priority + * thread gained the lock and then was prempted by a + * high-priority thread that wanted to acquire it. + * *) Architecture-dependent interfaces need to be designed to + * atomically lock and unlock this data structure. + * + * These are "simple locks" and are defined in flipc_dep.h. + */ + +/* + * Lock type. This placement (in flipc_cb.h) is a little bit of a + * hack, as it really should be defined with the machine dependent lock + * macros. But then the machine independent lock macros have problems + * because they have to include it both before and after the prototypes. + * So rather than split the machine dependent stuff into multiple + * files, I'll define it here and hope that this definition works for + * whatever architectures we're on. + */ +typedef unsigned long flipc_simple_lock; + +/* + * Ownership of data structures. + * + * Please note that this is a can of worms, and that I (Randys) + * consider this (and it's interactions with endpoint group membership) + * the likeliest place for design bugs in FLIPC. Any and all should + * take this as an open invitation and challenge to find bugs in what + * follows. + * + * Rules: + * + * *) If you've disabled a structure and synched with the + * appropriate side of the ME, the ME won't touch it. + * + * *) If you've taken a send endpoint off of the send endpoint + * list and sync'd with the ME, the ME won't touch it. + * + *[The rest of this applies to the AIL only; the above rules are the + * only ones the ME respects. ] + * + * *) Within the AIL, a disabled structure is owned by: + * *) The routine that disabled it, before it is put on + * the free list. + * *) The routine that dequeued it from the free list, + * before it is enabled. + * Taking of the simple lock is not required for ownership in + * these cases. Taking of the simple lock is not required for + * the act of *enabling* the structure (you have ownership and + * are giving it away), however it is required for the act of + * disabling the structure (since it is the only valid way to + * take ownership of an enabled structure, and you can't + * modify the enabled bit without having ownership). + * + * *) The simple lock in a structure always needs to be valid, as + * simple locks may be taken while the structure is in any + * state. Simiarly, the enabled bit must always be valid, + * both because it's what the ME checks, and because it may be + * checked by the AIL while the structure is free. + * + * *) Holding the simple lock on an enabled structure imparts + * ownership of that structure. You are allowed to take the + * simple lock of a disabled structure, but ownership is not + * gained by doing so. + * + * *) You are allowed to read the enabled/disabled bit without + * owning the structure (if the structure is disabled, there + * may be no way to gain the ownership). + * + * *) Owning a structure allows you to do what you want with it, + * except: + * *) As mentioned above, the simple lock and + * enabled/disabled bit must always be valid. + * *) The ownership of the endpoint group related members + * of an endpoint structure is special; see below. + * *) The allocations lock must be held to manipulate the + * next send endpoint field of any endpoint. + * + * *) If an endpoint is on an endpoint group, the ownership of + * the the endpoint group related members of the structure + * (sail_endpoint_group and pail_next_eg_endpoint) go with the + * owndership of the endpoint group, not the endpoint. For + * this purpose only, membership is defined atomically as the + * sail_endpoint_group pointer being set to an endpoint group. + * Thus one may remove an endpoint from an endpoint group + * without owning the endpoint (change the sail_endpoint_group + * pointer last). One requires both locks to add an endpoint + * to an endpoint group, however. + * + * (Part of the motivation for this is that removal and + * addition of endpoints to endpoint groups requires + * modifications of pointers in other endpoint structures). + * + * *) No structure may be put on the free list if marked with any + * association to any other structure. Specifically, endpoint + * groups may have no endpoints belonging to them, and + * endpoints may not belong to an endpoint group or have + * buffers belonging to them. + * + * *) One consequence of the above is that endpoint groups may + * not be marked as disabled while they have any endpoints on + * them, as freeing an endpoint requires it to be removed from + * its endpoint group, and if ownership of the endpoint group + * cannot be gained, that is impossible. + * + * *) In theory, endpoints *may* be marked disabled while they + * are still on endpoint groups. In practice, they are not. + * This is relied on by the code which frees endpoint groups, + * in a non-obvious way. Specifically, that code assumes that + * there is no way that a call to free endpoint will return + * with the endpoint still on the endpoint group. Since the + * only way for free endpoint to fail is if the endpoint is + * inactive, and since the endpoint is set inactive only after + * free endpoint (presumably a different one) confirms that it + * isn't on any endpoint group, this assumption is true. + * + * Got that? Take home lesson: don't allow endpoints to be + * marked disabled while still on endpoint groups until you + * *do* get that, and are willing to take the responsibility + * of changing it so that it works under your new scheme. + * + * *) Ownership of the freelist(s) are gained by holding the + * allocations lock for the buffer, and *only* in that way. + * No modification of freelist, send endpoint list, or send + * side ME sync bits is valid without holding the allocations + * lock. In other words, while you can read things in the + * main communications buffer control structure at will, you + * may not change them without owning the allocations lock. + * + * *) The state where a structure is disabled but off of the + * freelist may be valid as an intermediate (while an AIL + * routine is orchestrating a transition) but is not a valid + * static state. This state must not survive the return to + * application code of the thread that disabled the structure. + */ + +/* + * Flipc data buffer management. + * + * A buffer (whether being used for sending or receiving) may be in + * one of three states: + * + * READY -- Buffer held by application. + * PROCESSING -- Buffer held by endpoint, unprocessed. For receive endpoints, + * this means that the buffer is empty, waiting to be filled by + * an incoming message. For send endpoints, this means tht the + * buffer is full, waiting to be sent out. + * COMPLETED -- Buffer held by the endpoint, processed. For receive + * endpoints, this means that the buffer is full, with newly + * received data in it. For send endpoints, this means that the + * buffer is empty (*), with it's data having been sent out. + * + * (*) In point of fact the data hasn't been touched, though bits + * may have been fiddled with in the header data structure. But + * it's been sent. + * FREE -- The buffer is in the pool of free buffers, and may be + * allocated to any newly created endpoint. + * + * The transition diagram between these states is relatively simple: + * + * + * release + * /-----------------\| + * +----------+ -+----------+ + * | READY | |PROCESSING|<- - - - - - + * +----------+_ +----------+ \ + * ^ |\ - - - - - - - - / | | \endpoint allocate + * | (processed) \endpoint \ + * | | \ free | + * | acquire / ------\ + * | \ | + * | / (processed) >+----------+ + * +----------+ | FREE | + * |COMPLETED |< - - - - - - - - - - +----------+ + * +----------+ endpoint allocate / ^ + * | ^- - - - - - - - - - - - - - - - - - - - - - - | + * | / + * \ endpoint free / + * ------------------------------------------------------/ + * + * (If it doesn't look simple, imagine it without the FREE state; that + * state doesn't enter into almost any buffer manipulations) + * + * For send buffers, release==send, acquire==allocate, and + * processed==the sending done by the message engine. For receive buffers, + * release==release, acquire==receive, and process==the actual + * arrival of the message handled by the messaging engine. + * + * The choice of path from the PROCESSING state is an endpoint + * specific configuration option; a particular endpoint may leave a + * processed buffer on the endpoint, or it may release it back to the + * application by dropping it from the endpoint. + * + * Buffers are assigned the PROCESSING state on a newly allocated + * receive endpoint (to be ready to receive messages) and the + * COMPLETED state on a newly allocated send endpoint. + * + * The state (other than FREE) that a particular buffer is in is + * determined by its place on a circular queue of buffer pointers that + * is part of the endpoint structure. Buffers owned by the + * application (READY) are not pointed to by pointers on this queue. + * The buffer is released to the message engine by placement of a + * pointer to it on this queue. When the message engine is done + * processing the buffer, it sets a flag in the buffer header. If the + * endpoint is so configured, it then removes the buffer pointer from + * the queue; otherwise the AIL acquires the buffer (and removes the + * pointer from the queue) when it chooses. + * + * . . . . . . + * . . + * . . + * . . AIL releasing + * . . ^ + * . +-------+--/ + * . | | + * . |Buffers| + * . | to be | + * . |Sent or| + * . |Receivd| + * . | Into | ^ ME processing + * . +-------+ --/ + * . | | + * . AIL | Sent | (These buffers have a flag set to indicate + * .Acquiring| or | that they have been processed. This + * . |Filled | section is optional; the endpoint may be + * . |buffers| configured to drop buffers after processing) + * . ^ | | + * . \--+-------+ + * . . + * . . + * . . . . . . + * + * + * The AIL will refuse to acquire a buffer that has not yet been + * processed by the ME. Acquire will not work at all on endpoints + * that have been configured to drop buffers on completion. + * + * The buffer_available primitive is coded to avoid doing a + * (potentially costly) acquiring of the endpoint flipc lock. Since + * telling where there is a buffer available requires two operations + * (comparison of the acquire and release pointers to see if there are + * any buffers on the endpoint, and then indirection of the acquire + * pointer to see if that buffer has bee processed yet), there is a + * potential race that will admit the possibility of indirecting + * through an invalid pointer. For this reason, for the life of an + * endpoint, it is a requirement that all buffer pointers on the + * bufferlist point *somewhere* (ie. to some existing buffer), so that + * this indirection will not cause an access error. The + * buffer_available primitive may return the wrong result, but (as + * long as the incorrectness is transitory), this is acceptable. + */ + +/* Set up the states so that FLIPC_buffer_processed can just do an + & and a test. */ +typedef enum { + flipc_Free = 0x0, flipc_Processing = 0x1, + flipc_Completed = 0x2, flipc_Ready = 0x3 +} flipc_buffer_state_t; +#define FLIPC_BUFFER_PROCESSED_P(state) ((state) & 0x2) + +/* + * Data header/buffer layout. + * + * For this implementation, and probably for all time, the header + * immediately precedes the data in memory, and the mesaging engine + * will send both header and data. Our priority is message dispatch + * speed rather than raw bandwidth (this is the small message side of + * a transfer mechanism), so we don't mind that we are throwing away + * some bandwidth by taking up transferred space with header data. + * + * The data size will be the maximum size allowed by the underlying + * transport, minus the header size (available at run time). The user + * will be given a pointer to the data buffer, and will use this both + * for copying data in and out, and as an argument to the underlying + * flipc routines. The flipc routines will access appropriately. + * + * The header structure follows; the user data type will be offset and + * cast appropriately to access this. + */ + +typedef struct flipc_data_buffer { + union { + FLIPC_address_t destination; /* For sending. */ + flipc_cb_ptr free; /* Link for header free list. */ + } u; + + /* ME owned if flipc_Processing, AIL owned otherwise. May not ever + assume the state flipc_Ready in an optimized implementation. */ + volatile flipc_buffer_state_t shrd_state; +} *flipc_data_buffer_t; + +/* + * Endpoint structure. + * + * An endpoint is the data structure used for communicating buffers, + * either send or receive. Note that all actual circular lists of + * buffer pointers on the endpoints are in their own array that gets + * partitioned out to the various endpoints. This is because we want + * the endpoint structures themselves to be fixed size for easy + * indexing upon receit of a message. This large scale array will be + * of size (max_buffers_per_endpoint) * (number_of_endpoints). Both + * of these values are set during the domain initialization call. + * + * Note that the pointers contained in the buffer lists are pointers to + * buffer *headers*, not to the data. + */ + +/* + * This structure is divided into four cache lines, separated by their + * usage type: + * + * *) Private data that the AIL scribbles on. + * *) Data the AIL writes (regularly) that the ME reads + * (occaisionally). The canonical example is the release pointer. + * *) Private data that the ME scribbles on. + * *) Data the ME writes (regularly) that the AIL reads (occaisionally). + * The canonical example is the process pointer. + * + * There are a couple of other categories of stuff, that can be shoehorned + * into the above: + * *) Constant data that both sides read regularly. This can be + * duplicated in the two private areas (actually, it can be + * duplicated in any two areas that stay in the cache of the + * respective processors). + * *) Stuff that is not accessed on the critical path; it can go + * almost anywhere (probably in one of the two ping-ponging + * cache lines). + * *) Stuff that is read-only for a single processor goes in that + * processors private data section. + * + * Duplicate entries have a "p" or a "a" suffixed to the name to + * indicate that fact. Note that these will usually, but not always, + * be "const" variables--they may be "const" variables only from the + * critical path viewpoint. + * + * We take cache line length as being 8 * sizeof(int). + */ + +typedef struct flipc_endpoint { + + /* ===Private AIL data=== */ + /* Type of endpoint (send, recv, etc). Duplicated in private + ME section. */ + FLIPC_endpoint_type_t constda_type; + + /* This next value is two variables squeezed into a single word to + * save on memory accesses (since they are almost always read at + * the same time. The two variables are: + * + * const_drop_processed_buffers -- Should the message engine drop + * buffers after processing them (as opposed to leaving them on + * the endpoint)? + * + * sail_enabled (volatile) -- Is the endpoint enabled? This isn't + * marked constant because it is used for synchronization on + * endpoint deallocation. + * + * Note that to reduce test and branches, we these two variables + * are represented by two bits in the word (bit 0 and bit 16). It + * is illegal to have bits other than 0 and 16 set in this word. + * This assumption is used in ENABLED_AND_NOT_DPB_P, and is enforced + * in DOE_CONSTRUCT (assumed to not be performance critical) below. + * + * Duplicated in private ME section. + */ + + volatile unsigned long sailda_dpb_or_enabled; + +#define EXTRACT_DPB(dpb_or_enabled) ((dpb_or_enabled) >> 16) +#define EXTRACT_ENABLED(dpb_or_enabled) ((dpb_or_enabled) & 0xffff) +#define DISABLED_OR_DPB_P(dpb_or_enabled) ((dpb_or_enabled) ^ 0x1) +#define DOE_CONSTRUCT(dpb, enabled) \ + (((dpb) ? 0x10000 : 0) | ((enabled) ? 0x1 : 0)) + + flipc_simple_lock pail_lock; /* Simple lock for serializing + multiple thread access to + structure. AIL owned. */ + /* First element in buffer list array that is ours. Constant + from communications buffer initialization. */ + flipc_cb_ptr constda_my_buffer_list; + /* First element after my_buffer_list that is *not* in my buffer + list. Constant from communications buffer initialization. */ + flipc_cb_ptr constda_next_buffer_list; + + /* First location that has a valid buffer pointer in it. This may + contain a pointer to a buffer available for acquisition, or it + may contain a pointer to a buffer that is still being + processed; the buffer header or process_ptr needs to be checked + to be sure. This location is AIL owned. It is ignored by all + (including the ME and initialization code) if + drop_processed_buffers, above, is set. */ + volatile flipc_cb_ptr shrd_acquire_ptr; + + /* AIL private copy of process pointer. This hopefully means that + the AIL won't need to read the real process pointer (and fault + in a cache line) very often. */ + flipc_cb_ptr pail_process_ptr; + + unsigned int pad_pail_7; + + /* ===End of cache line===*/ + /* ===AIL writes, ME occaisionally reads=== */ + + /* Next location at which the AIL may insert a buffer pointer. */ + volatile flipc_cb_ptr sail_release_ptr; + unsigned int pad_sail_1; + unsigned int pad_sail_2; + unsigned int pad_sail_3; + unsigned int pad_sail_4; + unsigned int pad_sail_5; + unsigned int pad_sail_6; + unsigned int pad_sail_7; + + /* ===End of cache line===*/ + /* ===Private ME data=== */ + /* See above comments (in private ail section). */ + + FLIPC_endpoint_type_t constdm_type; + volatile unsigned long saildm_dpb_or_enabled; + + volatile unsigned long sme_overruns; /* For a receive endpoint, counter for + the number of messages that have + arrived when there hasn't been + space. ME owned. */ + unsigned long pail_overruns_seen; /* A count of the number of overruns + that the AIL has noted and doesn't + want to be bothered with again. + The user only sees the difference + between the previous count and this. */ + + /* + * For send endpoints; linked into a list that is used by the ME + * to find stuff to do. Also used for endpoint free list. + * Null if at end of list. Not "const" because it's used as a + * synchronization variable during setup and teardown + * of send endpoints. + */ + volatile flipc_cb_ptr sail_next_send_endpoint; + + /* Constant buffer lsit pointers for ME. See private ail comments. */ + flipc_cb_ptr constdm_my_buffer_list; + flipc_cb_ptr constdm_next_buffer_list; + + /* Private ME copy of release pointer. This hopefully means that + the ME won't have to read (and fault in a cache line) the + release pointer very often. */ + + flipc_cb_ptr pme_release_ptr; + /* ===End of cache line===*/ + + /* ===ME writes, AIL occaisionally reads=== */ + /* + * For endpoint group membership. + */ + flipc_cb_ptr pail_next_eg_endpoint; /* Next endpoint in endpoint group. + AIL owned. */ + flipc_cb_ptr sail_epgroup; /* Direct pointer to endpoint group that + we are part of. FLIPC_CBPTR_NULL + if none. AIL owned. */ + + /* First location that has a buffer pointer available for + processing. If this value is equal to the release_ptr there are no + buffers available for processing. */ + volatile flipc_cb_ptr sme_process_ptr; + unsigned int pad_sme_3; + unsigned int pad_sme_4; + unsigned int pad_sme_5; + unsigned int pad_sme_6; + unsigned int pad_sme_7; + + /* ===End of cache line===*/ + /* ===END=== */ + + /* The following macros may have possible performance loss in + multiple accesses (or indirection, but a good compiler will get + around that). We need to have versions for each processor so + that the constant reads are done from the right copy. */ + + /* General bufferlist pointer increment macro, with versions + for ME and AIL. */ + +#define NEXT_BUFFERLIST_PTR(bufferlist_ptr, endpoint, suf) \ + (((bufferlist_ptr) + sizeof(flipc_data_buffer_t) \ + == ((endpoint)->const ## suf ## _next_buffer_list)) ? \ + ((endpoint)->const ## suf ## _my_buffer_list) : \ + (bufferlist_ptr) + sizeof(flipc_data_buffer_t)) +#define NEXT_BUFFERLIST_PTR_ME(bufferlist_ptr, endpoint) \ + NEXT_BUFFERLIST_PTR(bufferlist_ptr, endpoint, dm) +#define NEXT_BUFFERLIST_PTR_AIL(bufferlist_ptr, endpoint) \ + NEXT_BUFFERLIST_PTR(bufferlist_ptr, endpoint, da) + + /* Macros for each of "can I release onto this buffer?" "Can I + acquire from this buffer?" and "Can I process an element on + this buffer?" The first two presume they are being executed on + the main procesor, the third on the co-processor. + All have three arguments: + *) A variable which will be set to the release, acquire, or + process pointer after the macro *if* the operation is ok. + *) A temporary variable used inside the function. + *) The endpoint. + + We presume the acquire macro won't be called if drop processed + buffers is enabled; the process and release macros deal + appropriately with that issue. */ + + /* In general these macros will: + *) Not read a volatile structure member more than once. + *) If a variables owner is the other processor, these macros + will check a local copy of the variable first before checking + the other processors. + *) Will only update the local copy if the remote copy really is + different from the local one. + */ + +/* This macro implements the synchronization check; local cbptr is + the pointer owned by the local processor which we want to compare + with a pointer on the remote processor which we have a copy + of locally. Reads the remote pointer zero or one times; other + reads are as necessary. + + The algorithm is: + *) If the local copy says our pointer and the remote value aren't equal, + we're done. + *) Otherwise, check the remote copy. If it says the values aren't + equal, update the local copy. */ + +#define ENDPOINT_SYNCNE_CHECK(local_cbptr, copy_rmt_cbptr, \ + rmt_cbptr, tmp_cbptr) \ + ((local_cbptr) != (copy_rmt_cbptr) \ + || ((((tmp_cbptr) = (rmt_cbptr)) != (local_cbptr)) \ + && (((copy_rmt_cbptr) = (tmp_cbptr)), 1))) + +#define ENDPOINT_ACQUIRE_OK(acquire_cbptr, tmp_cbptr, endpoint) \ + ((acquire_cbptr) = (endpoint)->shrd_acquire_ptr, \ + ENDPOINT_SYNCNE_CHECK(acquire_cbptr, (endpoint)->pail_process_ptr, \ + (endpoint)->sme_process_ptr, tmp_cbptr)) + +#define ENDPOINT_PROCESS_OK(process_cbptr, tmp_cbptr, endpoint) \ + ((process_cbptr) = (endpoint)->sme_process_ptr, \ + ENDPOINT_SYNCNE_CHECK(process_cbptr, (endpoint)->pme_release_ptr, \ + (endpoint)->sail_release_ptr, tmp_cbptr)) + +#define NODPB_ENDPOINT_RELEASE_OK(release_cbptr, tmp_cbptr, endpoint) \ + ((release_cbptr) = (endpoint)->sail_release_ptr, \ + (tmp_cbptr) = (endpoint)->shrd_acquire_ptr, \ + (NEXT_BUFFERLIST_PTR_AIL(release_cbptr, endpoint) \ + != (tmp_cbptr))) + +/* Don't use NEXT_BUFFERLIST_PTR here to save a temporary variable. */ +#define DPB_ENDPOINT_RELEASE_OK(release_cbptr, tmp_cbptr, endpoint) \ + (release_cbptr = (endpoint)->sail_release_ptr, \ + ((release_cbptr + sizeof(flipc_data_buffer_t) == \ + (endpoint)->constda_next_buffer_list) \ + ? ENDPOINT_SYNCNE_CHECK((endpoint)->constda_my_buffer_list, \ + (endpoint)->pail_process_ptr, \ + (endpoint)->sme_process_ptr, \ + tmp_cbptr) \ + : ENDPOINT_SYNCNE_CHECK(release_cbptr + sizeof(flipc_data_buffer_t), \ + (endpoint)->pail_process_ptr, \ + (endpoint)->sme_process_ptr, \ + tmp_cbptr))) + + /* This next is tricky; remember that acquire_ptr points + to an actual bufferptr on the list, whereas release_ptr does + not. This macro is only used in FLIPC_endpoint_query, and so + doesn't need to have an ME version. */ + +#define BUFFERS_ON_ENDPOINT_AIL(acquire_ptr, release_ptr, endpoint) \ + ((release_ptr) > (acquire_ptr) \ + ? ((release_ptr) - (acquire_ptr)) / sizeof(flipc_cb_ptr) \ + : ((((release_ptr) - (endpoint)->constda_my_buffer_list) \ + + ((endpoint)->constda_next_buffer_list - acquire_ptr)) \ + / sizeof(flipc_cb_ptr))) +} *flipc_endpoint_t; + + +/* + * Endpoint groups. + * + * Used to represent a group of endpoints, for linking sending/receiving + * with semaphores & etc. Note that there needs to be a private data + * structure kept by the kernel that associates with each epgroup + * a semaphore to be used for wakeups on that endpoint set. + */ + +typedef struct flipc_epgroup { + flipc_simple_lock pail_lock; /* Lock to synchronize threads (at the + same priority level) accessing this + structure. */ + volatile unsigned long sail_enabled; /* Set if structure is active. */ + unsigned long const_semaphore_associated; /* Flag to indicate whether or not + there is a semaphore associated + with this endpoint group in the + kernel flipc routines. */ + volatile unsigned long sail_wakeup_req; /* Incremented when a thread wants to + be woken. */ + volatile unsigned long pme_wakeup_del; /* Incremented when the ME delivers a + wakeup. */ + unsigned long pail_version; /* Incremented when epgroup membership + is changed; checked when retrieving + a buffer from an epgroup. */ + unsigned long sail_msgs_per_wakeup; /* How many messages need to arrive + before the ME delivers a wakeup. */ + unsigned long pme_msgs_since_wakeup; /* How many messages have arrived + since the last wakeup. ME + owned. */ + + flipc_cb_ptr pail_first_endpoint; /* First endpoint in the group. The + other endpoints are linked along + behind him. AIL owned. */ + flipc_cb_ptr pail_free; /* Used to link this endpoint onto + the freelist. */ +} *flipc_epgroup_t; + +/* + * Communication buffer control structure. + * + * This is in the communications buffer itself. Note that any changes + * in this structure require it to be locked with the allocation lock, + * as access to this structure is shared by all threads using the CB. + */ + +/* + * Individual data type layout. + * + * All we need here is a pointer to the start of each type of data + * struct, the number of those data structures in the communications + * buffer, and a pointer to the beginning of the freelist for that data + * structure. + * + * Note that the composite buffer list doesn't have a freelist associated + * with it, since each section of the buffer list is tightly bound to an + * endpoint, and is allocated and freed with that endpoint. We still + * need the start and number information, though. + */ +struct flipc_cb_type_ctl { + flipc_cb_ptr start; /* Where there array of this type of + data structure starts. */ + unsigned long number; /* How many of them we've got. */ + flipc_cb_ptr free; /* Where the beginning of the freelist + is. */ +}; + +/* + * Synchronization with message engine. + * + * At certain times (specifically during structure allocation/free or + * additions to the send list) you want to know that the messaging + * engine has picked up your changes. However, the message engine has + * (effectively) two threads, one for each of the send and receive + * sides. The mechanisms used for synchronizations with the two sides + * differ. In an eventual co-processor implementation (with a single + * thread), only the send side mechanism will be used. + * + * To request a cached state flush by the send side of the mesasging + * engine, you flip the request_sync bit and it responds by flipping + * the response_sync bit. The send ME checks this bit once every trip + * through the send endpoints. + * + * On the receive side, since receives take very little time and do + * not block (unlike sends) when we want to make sure the ME is + * holding no cached receive side state, we simply spin until we see + * that the ME receive side is no longer operating. It sets a + * variable whenever it is in the process of receiving a message. + */ + +/* + * Proper manipulation of the send endpoint list. + * + * Note that synchronizing with the message engine over access to the + * send endpoint list is especially tricky. There is no problem with + * writing new values in all of the locations required to take a send + * endpoint off of the list. However, we must be very sure before + * modifying the pointer *in* the send endpoint that the ME isn't + * currently working in that send endpoint (else it could be sent off + * into the void). Two options here: + * + * *) Synchronize (using the below variables) for each send + * endpoint removed, after the removal but before the + * modification of the data in the internal structure. + * *) If we can always be sure that the send endpoint link in the + * endpoint structure has a valid value, we can simply let the + * chips fall where they may. It will be null while free, and + * have a value that points back into the send buffer list + * when reallocated. I'm not going to do this; it's sleezy + * and will partially mess up fairness based on ME send + * endpoint round-robinning. + */ + +/* + * This entire structure is protected by an kernel level lock so there + * is no conflict between threads accessing it. See flipc_kfr.c for + * details on this lock; how it is implemented and used depends on what + * kernel base we are on. + */ + +/* + * Note that the last element of this structure is variable sized, so this + * structure itself is also variable sized. + */ +typedef struct flipc_comm_buffer_ctl { + /* Kernel flipc configuration that the user must match in order to + work with this kernel. Checked as soon as the comm buffer is + mapped. */ + struct { + unsigned int real_time_primitives:1; + unsigned int message_engine_in_kernel:1; + unsigned int no_bus_locking:1; /* One way check -- if the kernel doesn't + have this and the user does, that's + an error. */ + } kernel_configuration; + volatile unsigned long send_ready; /* A send(s) is ready to go */ + + /* These first three structures are constant after communications buffer + initialization. */ + unsigned long data_buffer_size; /* Size of the data buffers. */ + unsigned long local_node_address; /* Local node number. */ + FLIPC_address_t null_destination; /* Local null destination value. */ + +#if REAL_TIME_PRIMITIVES + /* The scheduling policy used by the task initializing flipc for + the allocations lock. */ + int allocations_lock_policy; +#else + /* A poor substitute for a kernel level allocations lock. + Note that this *cannot* be used as a regular simple lock; + instead, try to acquire it, call sleep(1), try again, etc. + Spinning on this lock will probably waste lots of cycles. */ + flipc_simple_lock pail_alloc_lock; +#endif + + /* All of the members of these structures except for the free pointer + are constant after initialization. The free pointer is ail owned + and private. */ + struct flipc_cb_type_ctl endpoint; + struct flipc_cb_type_ctl epgroup; + struct flipc_cb_type_ctl bufferlist; + struct flipc_cb_type_ctl data_buffer; + + /* Global synchronization with the message engine. On the KKT + implementation we need one synchronizer for each thread. */ + + /* Send side: */ + volatile unsigned long sail_request_sync; /* request_sync = !request_sync when the + AIL wants to synchronize with the + CB. */ + volatile unsigned long sme_respond_sync; /* respond_sync = !respond_sync when + the ME has noticed the sync + request. By responding to the + sync, the ME is stating that it has + no communications buffer state that + was cached previous to it noticing + the sync. */ + + /* Receive side. */ + volatile unsigned long sme_receive_in_progress; /* Set by the ME before it looks at + any data structures; cleared + afterwards. A simple spin in + the user space on this + variable will suffice, as the + time that the message + engine could be receiving + is low. */ + + /* Send endpoint list starts here. */ + volatile flipc_cb_ptr sail_send_endpoint_list; /* Null if no send endpoints. + */ + + /* Keep track of whatever performance information we choose. */ + struct FLIPC_domain_performance_info performance; + + /* Keep track of various kinds of error information here. */ + struct FLIPC_domain_errors sme_error_log; + +} *flipc_comm_buffer_ctl_t; + + +/* + * The communications buffer. + * + * The only restriction on the layout of the communications buffer is + * that the buffers themselves may not cross page boundaries. So we + * will place the data buffers at the end of the communications + * buffer, and the other objects at the beginning, and there may be a + * little bit of extra space in the middle. + * + * Note that this layout may change in future versions of FLIPC. + * + * +---------------------------+ + * | flipc_comm_buffer_ctl | + * +---------------------------+ + * | | + * | Endpoints | + * | | + * +---------------------------+ + * | | + * | Endpoint Groups | + * | | + * +---------------------------+ + * | | + * | Combined Buffer Lists | + * | | + * +---------------------------+ + * | | + * | (Possible empty space) | + * | | + * +---------------------------+ + * | | + * | Data Buffers | + * | | + * +---------------------------+ + */ + +/* The number of pages that the kernel will reserve for the comm + buffer. The AIL needs to know this to know how much to map. */ +#define COMM_BUFFER_SIZE 0x100000 + +/* + * These variables are set, in a per-address space context, to the base + * and length of the communications buffer. The ME needs to do bounds + * checking to make sure it isn't overrunning anything. Note that the + * existence of these variables implies that an application will only + * open a single domain. + * + * These declarations are duplicated in flipc/flipc_usermsg.h, and + * should be kept in sync with that file. + */ +unsigned char *flipc_cb_base; +unsigned long flipc_cb_length; /* In bytes. */ + +/* + * Following is a set of macros to convert back and forth between + * real address pointers and flipc_cb_ptr's for each data type. They + * rely on the flipc_cb_base being set correctly. + * + * A possible future improvement might be to have bounds checking occur + * inside these macros, but I'm not sure what I'd do if it failed. + */ + +/* Easy going one way. */ +#define FLIPC_CBPTR(ptr) \ +(((unsigned char *) (ptr)) - flipc_cb_base) + +/* Need to get the right types going the other way. */ +#define FLIPC_ENDPOINT_PTR(cb_ptr) \ +((flipc_endpoint_t) ((cb_ptr) + flipc_cb_base)) +#define FLIPC_EPGROUP_PTR(cb_ptr) \ +((flipc_epgroup_t) ((cb_ptr) + flipc_cb_base)) +#define FLIPC_DATA_BUFFER_PTR(cb_ptr) \ +((flipc_data_buffer_t) ((cb_ptr) + flipc_cb_base)) +#define FLIPC_BUFFERLIST_PTR(cb_ptr) \ +((flipc_cb_ptr *) ((cb_ptr) + flipc_cb_base)) + + +/* + * Flipc addresses. + * + * The addresses used by flipc for communication are defined in the + * user visible header file as unsigned longs. These macros pull that + * information apart for use of the FLIPC internal routines. + * + * I assume in the following that endpoints immediately follow the + * comm buffer control structure, because that makes indexing into + * them much easier. + */ + +#define FLIPC_CREATE_ADDRESS(node, endpoint_idx) \ +((node << 16) | (endpoint_idx)) +#define FLIPC_ADDRESS_NODE(addr) (((unsigned long) (addr)) >> 16) +#define FLIPC_ADDRESS_ENDPOINT(addr) (((unsigned long) (addr)) & 0xffff) + +#endif /* _MACH_FLIPC_CB_H_ */ diff --git a/osfmk/mach/flipc_debug.h b/osfmk/mach/flipc_debug.h new file mode 100644 index 000000000..693d695c5 --- /dev/null +++ b/osfmk/mach/flipc_debug.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +/* + * Really a C file, but I'd like to have this code available in both + * the kernel and the application, so I'll put it in a .h file. This + * file needs to be included only once in the AIL or ME, into a .c file + * where it will be compiled. + */ + +/* + * Since these are debug functions, it doesn't matter which processor macro + * version I use; I don't mind spoiling cache while I'm debugging. + */ + +#include +/* + * Print (using printf) all buffers in the communications buffer that + * are not on any endpoint or on the buffer freelist. Only active + * endpoints are checked. + * + * Note that no locking is done; this function implicitly assumes the + * communications buffer is in a quiescent state. It is expected that + * this function will normally be called from a debugger. + * + * As long as it is at it, this function prints buffers that are + * doubly owned (valid pointers to them from two places). + */ + +/* + * Given that these functions will normally be called from the debugger, + * there isn't any need to globally visible prototypes for them. To + * eliminate compilation warnings, we include prototypes for the functions + * here in the file. + */ +static void flipcdbg_update_bufferset_bitvec(flipc_comm_buffer_ctl_t, + flipc_data_buffer_t); +void flipcdbg_print_unowned_buffers(void); +void flipcdbg_buffer_find_refs(flipc_cb_ptr buffer_cbptr); + +#ifdef __GNUC__ +__inline__ +#endif +static void +flipcdbg_update_bufferset_bitvec(flipc_comm_buffer_ctl_t cb_ctl, + flipc_data_buffer_t buffer) +{ + unsigned char *buffer_base = flipc_cb_base + cb_ctl->data_buffer.start; + int bitpos = ((((unsigned char *) buffer) - buffer_base) + / cb_ctl->data_buffer_size); + int element = bitpos / (sizeof(unsigned long) * 8); + int subbitpos = bitpos - element * sizeof(unsigned long) * 8; + + /* Is that position set already? */ + if (flipc_debug_buffer_bitvec[element] & (1 << subbitpos)) + printf("Buffer 0x%x (idx: %d, cbptr: 0x%x) is multiply referenced.\n", + buffer, bitpos, FLIPC_CBPTR(buffer)); + + /* Set it. */ + flipc_debug_buffer_bitvec[element] |= (1 << subbitpos); +} + +void +flipcdbg_print_unowned_buffers(void) +{ + flipc_comm_buffer_ctl_t cb_ctl = + (flipc_comm_buffer_ctl_t) flipc_cb_base; + int i; + unsigned long bitvec_length = ((cb_ctl->data_buffer.number + sizeof(unsigned long) * 8) + / (sizeof(unsigned int) * 8)); + flipc_data_buffer_t current_buffer; + flipc_endpoint_t current_endpoint; + flipc_cb_ptr current_cbptr; + int header_printed = 0; + + /* Clean out the bitvec. */ + for (i = 0; i < bitvec_length; i++) + flipc_debug_buffer_bitvec[i] = 0; + + /* Go through the freelist, setting bits for each buffer. */ + for (current_cbptr = cb_ctl->data_buffer.free; + current_cbptr != FLIPC_CBPTR_NULL; + current_cbptr = current_buffer->u.free) { + int bitpos; + int element, subbitpos; + + current_buffer = FLIPC_DATA_BUFFER_PTR(current_cbptr); + flipcdbg_update_bufferset_bitvec(cb_ctl, current_buffer); + } + + /* Go through all the endpoints, again setting bits for each buffer. */ + for (current_endpoint = FLIPC_ENDPOINT_PTR(cb_ctl->endpoint.start); + (current_endpoint + < (FLIPC_ENDPOINT_PTR(cb_ctl->endpoint.start) + + cb_ctl->endpoint.number)); + current_endpoint++) { + if (EXTRACT_ENABLED(current_endpoint->saildm_dpb_or_enabled)) { + flipc_cb_ptr current_ptr = + (EXTRACT_DPB(current_endpoint->saildm_dpb_or_enabled) + ? current_endpoint->sme_process_ptr + : current_endpoint->shrd_acquire_ptr); + flipc_cb_ptr limit_ptr = current_endpoint->sail_release_ptr; + + while (current_ptr != limit_ptr) { + flipc_cb_ptr current_buffer_cbptr = + *FLIPC_BUFFERLIST_PTR(current_ptr); + flipc_data_buffer_t current_buffer = + FLIPC_DATA_BUFFER_PTR(current_buffer_cbptr); + + /* Mark this as set. */ + flipcdbg_update_bufferset_bitvec(cb_ctl, current_buffer); + + /* Increment the current pointer. */ + current_ptr = NEXT_BUFFERLIST_PTR_ME(current_ptr, + current_endpoint); + } + } + } + + /* Ok, we should have marked every buffer that has a reference. + Print out all the ones that done have references. */ + for (i = 0; i < bitvec_length; i++) { + int this_limit = + ((i == bitvec_length - 1) + ? cb_ctl->data_buffer.number % (sizeof(unsigned long)*8) + : sizeof(unsigned long)*8); + if (flipc_debug_buffer_bitvec[i] != (unsigned long) -1) { + int j; + for (j = 0; j < this_limit; j++) { + if (!(flipc_debug_buffer_bitvec[i] & (1 << j))) { + int buffer_bitpos = i * sizeof(unsigned long) * 8 + j; + flipc_cb_ptr buffer_cbptr = + (buffer_bitpos * cb_ctl->data_buffer_size + + cb_ctl->data_buffer.start); + flipc_data_buffer_t buffer_ptr = + FLIPC_DATA_BUFFER_PTR(buffer_cbptr); + + /* Print header if necessary. */ + if (!header_printed) { + header_printed = 1; + printf("Unreferenced buffers (ptr,idx,cbptr):"); + } + + /* Print buffer. */ + printf(" (0x%x,%d,0x%x)", buffer_ptr, buffer_bitpos, + buffer_cbptr); + } + } + } + } + if (header_printed) + printf("\n"); +} + +void +flipcdbg_buffer_find_refs(flipc_cb_ptr buffer_cbptr) +{ + flipc_comm_buffer_ctl_t cb_ctl = + (flipc_comm_buffer_ctl_t) flipc_cb_base; + int found_on_freelist = 0; + int found_on_endpoints = 0; + int i; + flipc_endpoint_t current_endpoint; + + flipc_cb_ptr current_cbptr; + flipc_data_buffer_t current_buffer; + + /* Go through the freelist, looking for buffer. */ + for (i = 0, current_cbptr = cb_ctl->data_buffer.free; + current_cbptr != FLIPC_CBPTR_NULL; + i++, current_cbptr = current_buffer->u.free) { + if (current_cbptr == buffer_cbptr) { + printf("Buffer found on freelist in position %d\n", i); + found_on_freelist = 1; + } + current_buffer = FLIPC_DATA_BUFFER_PTR(current_cbptr); + if (i > cb_ctl->data_buffer.number) { + printf ("**Some form of corruption following freelist.**"); + return; + } + } + if (found_on_freelist) + printf("(Total buffers on freelist: %d/%d)\n", i, + cb_ctl->data_buffer.number); + + /* Go through all the endpoints, again looking for the buffer. */ + for (current_endpoint = FLIPC_ENDPOINT_PTR(cb_ctl->endpoint.start); + (current_endpoint + < (FLIPC_ENDPOINT_PTR(cb_ctl->endpoint.start) + + cb_ctl->endpoint.number)); + current_endpoint++) { + if (EXTRACT_ENABLED(current_endpoint->saildm_dpb_or_enabled)) { + flipc_cb_ptr current_ptr = + (EXTRACT_DPB(current_endpoint->saildm_dpb_or_enabled) + ? current_endpoint->sme_process_ptr + : current_endpoint->shrd_acquire_ptr); + flipc_cb_ptr limit_ptr = current_endpoint->sail_release_ptr; + + while (current_ptr != limit_ptr) { + current_cbptr = *FLIPC_BUFFERLIST_PTR(current_ptr); + + if (current_cbptr == buffer_cbptr) { + printf("Buffer found on endpoint 0x%x (idx: %d)\n", + current_endpoint, + (current_endpoint + - FLIPC_ENDPOINT_PTR(cb_ctl->endpoint.start))); + found_on_endpoints = 1; + } + + /* Increment the current pointer. */ + current_ptr = NEXT_BUFFERLIST_PTR_ME(current_ptr, + current_endpoint); + } + } + } +} + + + diff --git a/osfmk/mach/flipc_device.h b/osfmk/mach/flipc_device.h new file mode 100644 index 000000000..413a1e1cb --- /dev/null +++ b/osfmk/mach/flipc_device.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/06/13 18:20:16 sjs + * Merged from flipc_shared. + * [95/06/07 sjs] + * + * Revision 1.1.3.4 1995/04/05 21:21:58 randys + * Added allocations_lock_policy argument to usermsg_Init_Buffer set + * status call. + * [95/04/05 randys] + * + * Revision 1.1.3.3 1995/02/21 17:23:08 randys + * Re-indented code to four space indentation + * [1995/02/21 16:25:35 randys] + * + * Revision 1.1.3.2 1994/12/20 19:02:03 randys + * Added filename in comment at top of each file + * [1994/12/19 20:28:25 randys] + * + * Revision 1.1.3.1 1994/12/12 17:46:17 randys + * Putting initial flipc implementation under flipc_shared + * [1994/12/12 16:27:48 randys] + * + * Revision 1.1.1.2 1994/12/11 23:11:21 randys + * Initial flipc code checkin + * + * $EndLog$ + */ + +/* + * mach/flipc_device.h + * + * Declarations related to the device driver interface to FLIPC. + */ + +#ifndef _MACH_FLIPC_DEVICE_H_ +#define _MACH_FLIPC_DEVICE_H_ + +/* + * Definitions of constants both the ME and AIL need to know for + * communications through the device driver interface. These are the + * possible values for the top 16 bits of the flavor parameter; the + * bottom 16 bits are extra information that may be needed (eg. to + * parameterize a request for semaphore in the get status routine). + */ +typedef enum { /* Arguments. */ + /* Get status flavors. */ + usermsg_Get_Initialized_Status = 1, /* (int *init_p) */ + usermsg_Get_Epgroup_Semaphore, /* (mach_port_t *semaphore) */ + usermsg_Return_Allocations_Lock, /* (void) */ + + /* Set status flavors. */ + usermsg_Init_Buffer, /* (int max_endpoints, + int max_epgroups, + int max_buffers, + int max_buffers_per_endpoint, + int allocations_lock_policy) */ + usermsg_Process_Work, /* (void) */ + usermsg_Acquire_Allocations_Lock, /* (void) */ + usermsg_Release_Allocations_Lock, /* (void) */ + usermsg_Epgroup_Associate_Semaphore /* (int epgroup_idx, mach_port_t port) */ +} usermsg_devop_t; + +#define FLIPC_DEVICE_FLAVOR(devop, param) (((devop)<<16)|(param)) + +#endif /* _MACH_FLIPC_DEVICE_H_ */ diff --git a/osfmk/mach/flipc_locks.h b/osfmk/mach/flipc_locks.h new file mode 100644 index 000000000..222bb0b79 --- /dev/null +++ b/osfmk/mach/flipc_locks.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1995/06/13 18:20:29 sjs + * Merged from flipc_shared. + * [95/06/07 sjs] + * + * Revision 1.1.2.3 1995/03/09 19:42:30 rwd + * Move yield function out of macro and prototype. + * [1995/03/09 19:36:25 rwd] + * + * Revision 1.1.2.2 1995/02/21 17:23:11 randys + * Re-indented code to four space indentation + * [1995/02/21 16:25:39 randys] + * + * Revision 1.1.2.1 1994/12/20 19:02:06 randys + * Moved definition of flipc_simple_lock to flipc_cb.h + * [1994/12/20 17:34:44 randys] + * + * Separated the lock macros out into machine dependent and independent files; + * this is the machine independent file. + * [1994/12/20 16:43:38 randys] + * + * $EndLog$ + */ + +/* + * mach/flipc_locks.h + * + * The machine independent part of the flipc_simple_locks definitions. + * Most of the locks definitions is in flipc_dep.h, but what isn't + * dependent on the platform being used is here. + */ + +/* + * Note that the locks defined in this file and in flipc_dep.h are only + * for use by user level code. The reason why this file is visible to + * the kernel is that the kernel section of flipc needs to initialize + * these locks. + */ + +#ifndef _MACH_FLIPC_LOCKS_H_ +#define _MACH_FLIPC_LOCKS_H_ + +/* Get the simple lock type. */ +#include + +/* + * Lock function prototypes. This needs to be before any lock definitions + * that happen to be macros. + */ + +/* Initializes lock. Always a macro (so that kernel code can use it without + library assistance). */ +void flipc_simple_lock_init(flipc_simple_lock *lock); + +/* Returns 1 if lock gained, 0 otherwise. */ +int flipc_simple_lock_try(flipc_simple_lock *lock); + +/* Returns 1 if lock is locked, 0 otherwise. */ +int flipc_simple_lock_locked(flipc_simple_lock *lock); + +/* Releases the lock. */ +void flipc_simple_lock_release(flipc_simple_lock *lock); + +/* Take the lock. */ +void flipc_simple_lock_acquire(flipc_simple_lock *lock); + +/* Take two locks. Does not hold one while spinning on the + other. */ +void flipc_simple_lock_acquire_2(flipc_simple_lock *lock1, + flipc_simple_lock *lock2); + +/* Get the machine dependent stuff. The things that need to be + * defined in a machine dependent fashion are: + * + * flipc_simple_lock_init (must be a macro) + * flipc_simple_lock_try + * flipc_simple_lock_locked + * flipc_simple_lock_release + * + * These last three don't necessarily have to be macros, but if they + * aren't definitions must be included in the machine dependent + * part of the user level library code. + */ +#include + +/* + * Set at flipc initialization time to thread_yield argument to + * FLIPC_domain_init + */ + +extern void (*flipc_simple_lock_yield_fn)(void); + +/* + * Machine independent definitions; defined in terms of above routines. + */ + +/* Take the lock. Assumes an external define saying how long to + spin, and an external function to call when we've spun too long. */ +#define flipc_simple_lock_acquire(lock) \ +do { \ + int __spin_count = 0; \ + \ + while (flipc_simple_lock_locked(lock) \ + || !flipc_simple_lock_try(lock)) \ + if (++__spin_count > LOCK_SPIN_LIMIT) { \ + (*flipc_simple_lock_yield_fn)(); \ + __spin_count = 0; \ + } \ +} while (0) + +/* Take two locks. Hold neither while spinning on the other. */ +#define flipc_simple_lock_acquire_2(lock1, lock2) \ +do { \ + int __spin_count = 0; \ + \ + while (1) { \ + while (flipc_simple_lock_locked(lock1) \ + || !flipc_simple_lock_try(lock1)) \ + if (++__spin_count > LOCK_SPIN_LIMIT) { \ + (*flipc_simple_lock_yield_fn)(); \ + __spin_count = 0; \ + } \ + \ + if (flipc_simple_lock_try(lock2)) \ + break; \ + flipc_simple_lock_release(lock1); \ + \ + while (flipc_simple_lock_locked(lock2) \ + || !flipc_simple_lock_try(lock2)) \ + if (++__spin_count > LOCK_SPIN_LIMIT) { \ + (*flipc_simple_lock_yield_fn)(); \ + __spin_count = 0; \ + } \ + \ + if (flipc_simple_lock_try(lock1)) \ + break; \ + flipc_simple_lock_release(lock2); \ + } \ +} while (0) + +#endif /* _MACH_FLIPC_LOCKS_H_ */ diff --git a/osfmk/mach/flipc_types.h b/osfmk/mach/flipc_types.h new file mode 100644 index 000000000..1019cfbbc --- /dev/null +++ b/osfmk/mach/flipc_types.h @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.1 1996/09/17 16:34:35 bruel + * fixed types. + * [96/09/17 bruel] + * + * Revision 1.1.5.1 1995/06/13 18:20:20 sjs + * Merged from flipc_shared. + * [95/06/07 sjs] + * + * Revision 1.1.3.11 1995/05/23 19:55:36 randys + * Don't keep track of messages sent to a bad destination--that's + * purely a transport function now. + * [95/05/23 randys] + * + * Revision 1.1.3.10 1995/05/23 15:40:20 randys + * Added field to FLIPC_domain_errors to indicate validity of other + * fields. + * [95/05/22 randys] + * + * Revision 1.1.3.9 1995/05/16 20:46:35 randys + * Added a "performance_valid" field to the flipc performance + * structure. + * [95/05/16 randys] + * + * Revision 1.1.3.8 1995/04/05 21:22:01 randys + * Added field to domain_info struct to include allocations lock + * sched policy. + * [95/04/05 randys] + * + * Revision 1.1.3.7 1995/03/09 19:42:33 rwd + * Define SEMAPHORE_NULL (for now) and include mach_types.h instead + * of sema_types.h. + * [95/03/08 rwd] + * + * Revision 1.1.3.6 1995/02/23 21:32:52 randys + * Removed placeholder definition for locks--I don't believe that I + * use locks unless I'm on top of a real time base, in which case + * that base will define the type. + * [95/02/22 randys] + * + * Revision 1.1.3.5 1995/02/21 17:23:13 randys + * Re-indented code to four space indentation + * [1995/02/21 16:25:36 randys] + * + * Revision 1.1.3.4 1995/02/16 23:20:14 randys + * ANSIfy FLIPC_thread_yield_function. + * [95/02/14 randys] + * + * Add FLIPC_thread_yield_function type. + * [95/02/14 randys] + * + * Revision 1.1.3.3 1995/01/26 21:01:51 randys + * Added performance structure. + * [1995/01/24 21:14:12 randys] + * + * Added FLIPC_epgroup_info struct + * [1995/01/24 18:30:02 randys] + * + * Create a new structure (FLIPC_endpoint_info) to return + * information about an endpoint. + * [1995/01/20 19:26:35 randys] + * + * Get rid of FLIPC_DESTINATION_NULL and add in + * FLIPC_ADDRESS_ERROR (return code from FLIPC_buffer_destination) + * [1995/01/19 20:23:24 randys] + * + * Added domain index type for specifying domain in + * init and attach calls + * [1995/01/18 16:47:25 randys] + * + * Revision 1.1.3.2 1994/12/20 19:02:09 randys + * Added error reporting structure type, and added + * room in the standard domain query for error log size. + * [1994/12/19 23:46:09 randys] + * + * Added filename in comment at top of each file + * [1994/12/19 20:28:26 randys] + * + * Support and doc for minor user interface changes for error conditions + * [1994/12/18 23:24:30 randys] + * + * Yank the semaphore type definitions back out, and include the file + * that defines those types. + * [1994/12/13 17:50:03 randys] + * + * Revision 1.1.3.1 1994/12/12 17:46:20 randys + * Put definitions of semaphore_t and SEMAPHORE_NULL back in; they aren't + * defined in user space yet. + * [1994/12/12 17:21:56 randys] + * + * Revision 1.1.1.2 1994/12/11 23:11:23 randys + * Initial flipc code checkin + * + * $EndLog$ + */ + +/* + * mach/flipc_types.h + * + * Definitions of those flipc types that need to be visible to both the AIL + * and kernel sides of flipc (which is just about everything). + */ + +#ifndef _MACH_FLIPC_TYPES_H_ +#define _MACH_FLIPC_TYPES_H_ + +#include + +/* + * Define a couple of generally useful types. + */ +#include + +#ifndef MACH_KERNEL +#define SEMAPHORE_NULL (semaphore_port_t)0 +#endif /* !defined(MACH_KERNEL) */ + +/* + * Basic flipc types; visible to both user and kernel segments of the + * flipc implementation. + */ +/* Flipc addresses. These name a node-endpoint combination for + sending. */ +typedef unsigned int FLIPC_address_t; +#define FLIPC_ADDRESS_ERROR ((FLIPC_address_t) -1) + +/* Flipc endpoints. */ +typedef void *FLIPC_endpoint_t; +#define FLIPC_ENDPOINT_NULL ((FLIPC_endpoint_t) 0) + +/* Buffer pointers (returned by query functions). Users are allowed to + copy directly to/from this pointer; it points at their data. */ +typedef void *FLIPC_buffer_t; +#define FLIPC_BUFFER_NULL ((FLIPC_buffer_t) 0) + +/* Endpoint group identifiers. */ +typedef void *FLIPC_epgroup_t; +#define FLIPC_EPGROUP_NULL ((FLIPC_epgroup_t) 0) +#define FLIPC_EPGROUP_ERROR ((FLIPC_epgroup_t) -1) + +/* Domain index; argument to initialization and attach routines. */ +typedef unsigned int FLIPC_domain_index_t; + +/* Domain handle (mach port). */ +typedef mach_port_t FLIPC_domain_t; + +/* The different types an endpoint can be. FLIPC_Inactive is used when + the endpoint has not been configured and hence is on the freelist. */ +typedef enum { + FLIPC_Inactive = -1, + FLIPC_Send, + FLIPC_Receive +} FLIPC_endpoint_type_t; + +/* Structure for returning performance information about the flipc + domain; a placeholder for future entries as needed. + This information will only be valid if the kernel is configured to + keep flipc performance information. */ +typedef struct FLIPC_domain_performance_info { + unsigned long performance_valid; /* Non zero if the other information + in this structure is valid. */ + unsigned long messages_sent; /* Since last init. */ + unsigned long messages_received; /* Since last init. Includes overruns + (because they are marked in the + endpoint data structure). Doesn't + include other drops (they are + marked in other places) */ +} *FLIPC_domain_performance_info_t; + +/* Flipc yield function. */ +typedef void (*FLIPC_thread_yield_function)(void); + +/* Structure for returning information about the flipc domain. */ +typedef struct FLIPC_domain_info { + int max_endpoints; + int max_epgroups; + int max_buffers; + int max_buffers_per_endpoint; + int msg_buffer_size; + FLIPC_thread_yield_function yield_fn; + int policy; /* Allocations lock sched policy. + Unused if REAL_TIME_PRIMITIVES are + not being used. */ + struct FLIPC_domain_performance_info performance; + int error_log_size; /* In bytes. */ +} *FLIPC_domain_info_t; + +/* Structure for returning information about the error state of + the flipc domain. Note that this is variable sized; the size + of the transport specific information is not known at compile + time. */ +typedef struct FLIPC_domain_errors { + int error_full_config_p; /* 1 if disabled and badtype below are + valid; 0 if only msgdrop_inactive + is valid. */ + int msgdrop_inactive; /* Messages dropped because + of the domain being locally + inactive. */ + int msgdrop_disabled; /* Messages dropped because of a + disabled endpoint. */ + int msgdrop_badtype; /* Messages dropped because they + were sent to a send endpoint. */ + + int transport_error_size; /* Size of the following array of + ints, in bytes. */ + int transport_error_info[1]; /* Really of transport_error_size. */ +} *FLIPC_domain_errors_t; + +/* Structure for returning information about endpoints. */ +typedef struct FLIPC_endpoint_info { + FLIPC_endpoint_type_t type; + unsigned int processed_buffers_dropped_p; + unsigned long number_buffers; + FLIPC_epgroup_t epgroup; +} *FLIPC_endpoint_info_t; + +typedef struct FLIPC_epgroup_info { + unsigned long msgs_per_wakeup; +} *FLIPC_epgroup_info_t; + +#endif /* _MACH_FLIPC_TYPES_H_ */ diff --git a/osfmk/mach/host_info.h b/osfmk/mach/host_info.h new file mode 100644 index 000000000..728860232 --- /dev/null +++ b/osfmk/mach/host_info.h @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.29.1 1997/10/30 15:32:29 barbou + * Added HOST_CPU_LOAD_INFO flavor for host_statistics(). + * [1997/10/30 15:22:10 barbou] + * + * Revision 1.2.18.6 1996/01/09 19:21:44 devrcs + * Changed members of struct host_priority_info to type integer_t. + * Change HOST_RESOURCE_SIZES_COUNT and HOST_PRIORITY_INFO_COUNT + * to be in integer_t units, not int. host_info expects the count + * to be the number of integer_t's to copy. + * [1995/12/01 19:49:11 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:08:49 jfraser] + * + * Revision 1.2.18.5 1995/04/07 19:05:07 barbou + * VM Merge - Task Swapper. + * Add new host_paging_info flavor. + * [91/09/22 13:20:45 jeffc] + * [94/07/28 barbou] + * [95/03/10 barbou] + * + * Revision 1.2.18.4 1995/02/24 14:48:07 alanl + * Merged with DIPC2_SHARED. + * [95/01/03 alanl] + * + * Revision 1.2.22.2 1994/10/14 03:50:38 dwm + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:42:37 dwm] + * + * Revision 1.2.18.2 1994/09/23 02:36:22 ezf + * change marker to not FREE + * [1994/09/22 21:39:41 ezf] + * + * Revision 1.2.18.1 1994/08/07 20:48:45 bolinger + * Merge up to colo_b7. + * [1994/08/01 21:01:21 bolinger] + * + * Revision 1.2.15.2 1994/06/25 03:47:01 dwm + * mk6 CR98 - add flavor interface typedefs (host_flavor_t). + * [1994/06/24 21:54:43 dwm] + * + * Revision 1.2.15.1 1994/02/24 19:05:22 rwd + * Add HOST_PRIORITY_INFO + * [94/02/22 rwd] + * + * Revision 1.2.4.6 1993/10/28 17:17:10 jeffc + * CR10039 -- Add flavor interface typedefs + * [1993/10/28 13:55:48 jeffc] + * + * Revision 1.2.20.1 1994/10/03 18:47:25 sjs + * Intel update: often used KERNEL_BOOTMAGIC_MAX. + * [94/09/22 sjs] + * + * Revision 1.2.4.7 1994/01/28 18:14:07 chasb + * Expand Copyrights + * [1994/01/27 20:04:11 chasb] + * + * Revision 1.2.4.6 1993/10/28 17:17:10 jeffc + * CR10039 -- Add flavor interface typedefs + * [1993/10/28 13:55:48 jeffc] + * + * Revision 1.2.4.5 1993/07/07 15:48:51 brezak + * Add host_info() flavor HOST_RESOURCE_SIZES and attendant struct. + * [1993/07/07 15:47:32 brezak] + * + * Revision 1.2.4.4 1993/06/29 17:54:26 brezak + * Remove HOST_PROCESSOR_SLOTS host_info flavor. Use host_processot_slots(). + * [1993/06/28 20:58:50 brezak] + * + * Revision 1.2.4.3 1993/06/15 20:28:11 brezak + * HOST_LOAD_INFO is now returned by host_statistics(). + * Add HOST_VM_INFO for host_statistics(). Include . + * [1993/06/14 14:18:40 brezak] + * + * Revision 1.1.5.2 1993/06/02 23:43:38 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:15:59 jeffc] + * + * Revision 1.2 1993/04/19 16:33:24 devrcs + * ansi C conformance changes + * [1993/02/02 18:53:04 david] + * + * Revision 1.1 1992/09/30 02:30:38 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4.2.1 92/01/09 18:44:17 jsb + * From durriya@ri.osf.org: defined kernel_boot_info_t. + * [92/01/08 15:01:53 jsb] + * + * Revision 2.4 91/05/14 16:51:48 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:31:58 mrt + * Changed to new Mach copyright + * [91/02/01 17:17:13 mrt] + * + * Revision 2.2 90/06/02 14:57:58 rpd + * Added HOST_LOAD_INFO and related definitions. + * [90/04/27 rpd] + * Created for new host/processor technology. + * [90/03/26 23:50:51 rpd] + * + * Cleanup changes. + * [89/08/02 dlb] + * Add sched_info flavor to return minimum times for use by + * external schedulers. + * [89/06/08 dlb] + * Added kernel_version type definitions. + * [88/12/02 dlb] + * + * Revision 2.4 89/10/15 02:05:31 rpd + * Minor cleanups. + * + * Revision 2.3 89/10/11 17:32:15 dlb + * Include mach/machine/vm_types.h instead of mach/vm_param.h + * [89/10/11 dlb] + * + * Revision 2.2 89/10/11 14:36:55 dlb + * Add sched_info flavor to return minimum times for use by + * external schedulers. + * [89/06/08 dlb] + * + * Added kernel_version type definitions. + * [88/12/02 dlb] + * + * 30-Nov-88 David Black (dlb) at Carnegie-Mellon University + * Created. 2 flavors so far: basic info, slot numbers. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: mach/host_info.h + * + * Definitions for host_info call. + */ + +#ifndef _MACH_HOST_INFO_H_ +#define _MACH_HOST_INFO_H_ + +#include +#include +#include +#include + +/* + * Generic information structure to allow for expansion. + */ +typedef integer_t *host_info_t; /* varying array of int. */ + +#define HOST_INFO_MAX (1024) /* max array size */ +typedef integer_t host_info_data_t[HOST_INFO_MAX]; + +#define KERNEL_VERSION_MAX (512) +typedef char kernel_version_t[KERNEL_VERSION_MAX]; + +#define KERNEL_BOOT_INFO_MAX (4096) +typedef char kernel_boot_info_t[KERNEL_BOOT_INFO_MAX]; + +#define KERNEL_BOOTMAGIC_MAX (8192) + +/* + * Currently defined information. + */ +/* host_info() */ +typedef integer_t host_flavor_t; +#define HOST_BASIC_INFO 1 /* basic info */ +#define HOST_SCHED_INFO 3 /* scheduling info */ +#define HOST_RESOURCE_SIZES 4 /* kernel struct sizes */ +#define HOST_PRIORITY_INFO 5 /* priority information */ +#define HOST_SEMAPHORE_TRAPS 7 /* Has semaphore traps - temporary */ + +struct host_basic_info { + integer_t max_cpus; /* max number of cpus possible */ + integer_t avail_cpus; /* number of cpus now available */ + vm_size_t memory_size; /* size of memory in bytes */ + cpu_type_t cpu_type; /* cpu type */ + cpu_subtype_t cpu_subtype; /* cpu subtype */ +}; + +typedef struct host_basic_info host_basic_info_data_t; +typedef struct host_basic_info *host_basic_info_t; +#define HOST_BASIC_INFO_COUNT \ + (sizeof(host_basic_info_data_t)/sizeof(integer_t)) + +struct host_sched_info { + integer_t min_timeout; /* minimum timeout in milliseconds */ + integer_t min_quantum; /* minimum quantum in milliseconds */ +}; + +typedef struct host_sched_info host_sched_info_data_t; +typedef struct host_sched_info *host_sched_info_t; +#define HOST_SCHED_INFO_COUNT \ + (sizeof(host_sched_info_data_t)/sizeof(integer_t)) + +struct kernel_resource_sizes { + vm_size_t task; + vm_size_t thread; + vm_size_t port; + vm_size_t memory_region; + vm_size_t memory_object; +}; + +typedef struct kernel_resource_sizes kernel_resource_sizes_data_t; +typedef struct kernel_resource_sizes *kernel_resource_sizes_t; +#define HOST_RESOURCE_SIZES_COUNT \ + (sizeof(kernel_resource_sizes_data_t)/sizeof(integer_t)) + +struct host_priority_info { + integer_t kernel_priority; + integer_t system_priority; + integer_t server_priority; + integer_t user_priority; + integer_t depress_priority; + integer_t idle_priority; + integer_t minimum_priority; + integer_t maximum_priority; +}; + +typedef struct host_priority_info host_priority_info_data_t; +typedef struct host_priority_info *host_priority_info_t; +#define HOST_PRIORITY_INFO_COUNT \ + (sizeof(host_priority_info_data_t)/sizeof(integer_t)) + +/* host_statistics() */ +#define HOST_LOAD_INFO 1 /* System loading stats */ +#define HOST_VM_INFO 2 /* Virtual memory stats */ +#define HOST_CPU_LOAD_INFO 3 /* CPU load stats */ + +struct host_load_info { + integer_t avenrun[3]; /* scaled by LOAD_SCALE */ + integer_t mach_factor[3]; /* scaled by LOAD_SCALE */ +}; + +typedef struct host_load_info host_load_info_data_t; +typedef struct host_load_info *host_load_info_t; +#define HOST_LOAD_INFO_COUNT \ + (sizeof(host_load_info_data_t)/sizeof(integer_t)) + +/* in */ +#define HOST_VM_INFO_COUNT \ + (sizeof(vm_statistics_data_t)/sizeof(integer_t)) + +struct host_cpu_load_info { /* number of ticks while running... */ + unsigned long cpu_ticks[CPU_STATE_MAX]; /* ... in the given mode */ +}; +typedef struct host_cpu_load_info host_cpu_load_info_data_t; +typedef struct host_cpu_load_info *host_cpu_load_info_t; +#define HOST_CPU_LOAD_INFO_COUNT \ + (sizeof (host_cpu_load_info_data_t) / sizeof (integer_t)) + +struct host_paging_info { + time_value_t sample_time; /* (TOD) time sample taken */ + time_value_t reset_time; /* (TOD) time at last reset */ + + /* Information about page queues */ + long pagesize; /* page size in bytes */ + long free_count; /* # of pages free */ + long active_count; /* # of pages active */ + long inactive_count; /* # of pages inactive */ + long wire_count; /* # of pages wired down */ + + /* Information about page faults */ + long faults; /* # of faults */ + long zero_fill_count; /* # of zero fill pages */ + long pageins; /* # of faults resulting in pageins */ + long pages_pagedin; /* # of pages paged in */ + long cow_faults; /* # of copy-on-write faults */ + long reactivations; /* # of pages reactivated */ + + /* Information about object cache performance */ + long lookups; /* object cache lookups */ + long hits; /* object cache hits */ + + /* Information about page replacement algorithm */ + long pageouts; /* # of pageout operations (clusters) */ + long pages_pagedout; /* # of pages paged out */ +}; + +typedef struct host_paging_info host_paging_info_data_t; +typedef struct host_paging_info *host_paging_info_t; +#define HOST_PAGING_INFO_COUNT \ + (sizeof(host_paging_info_data_t)/sizeof(int)) + +#endif /* _MACH_HOST_INFO_H_ */ diff --git a/osfmk/mach/host_priv.defs b/osfmk/mach/host_priv.defs new file mode 100644 index 000000000..244c38753 --- /dev/null +++ b/osfmk/mach/host_priv.defs @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Matchmaker definitions file for Mach kernel interface. + */ +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERNEL_USER */ +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + host_priv 400; + +#ifdef KERNEL_USER +userprefix r_; +#endif /* KERNEL_USER */ + +#include +#include +#include +#include + +/* + * Get boot configuration information from kernel. + */ +routine host_get_boot_info( + host_priv : host_priv_t; + out boot_info : kernel_boot_info_t); + +/* + * Reboot this host. + * Only available to privileged users. + */ +routine host_reboot( + host_priv : host_priv_t; + options : int); + + +/* + * Return privileged statistics from this host. + */ +routine host_priv_statistics( + host_priv : host_priv_t; + flavor : host_flavor_t; + out host_info_out : host_info_t, CountInOut); + +/* + * Sets the default memory manager, the port to which + * newly-created temporary memory objects are delivered. + * [See (memory_object_default)memory_object_create.] + * Also sets the default cluster size used for pagein/pageout + * to this port. + * The old memory manager port is returned. + */ +routine host_default_memory_manager( + host_priv : host_priv_t; + inout default_manager : memory_object_default_t = + MACH_MSG_TYPE_MAKE_SEND; + cluster_size : vm_size_t); + + +/* + * Specify that the range of the virtual address space + * of the target task must not cause page faults for + * the indicated accesses. + * + * [ To unwire the pages, specify VM_PROT_NONE. ] + */ +routine vm_wire( + host_priv : host_priv_t; + task : vm_map_t; + address : vm_address_t; + size : vm_size_t; + access : vm_prot_t); + +/* + * Specify that the target thread must always be able + * to run and to allocate memory. + */ +routine thread_wire( + host_priv : host_priv_t; + thread : thread_act_t; + wired : boolean_t); + +/* + * Allocate zero-filled, wired, contiguous physical memory + * in the address space of the target task, either at the + * specified address, or wherever space can be found (if + * anywhere is TRUE), of the specified size. The address + * at which the allocation actually took place is returned. + * All pages will be entered into the task's pmap immediately, + * with VM_PROT_ALL. + * + * In addition to all the failure modes of its cousin, + * vm_allocate, this call may also fail if insufficient + * contiguous memory exists to satisfy the request. + * + * Memory obtained from this call should be freed the + * normal way, via vm_deallocate. + * + * N.B. This is an EXPERIMENTAL interface! + */ +routine vm_allocate_cpm( + host_priv : host_priv_t; + task : vm_map_t; + inout address : vm_address_t; + size : vm_size_t; + anywhere : boolean_t); + +/* + * Get list of processors on this host. + */ +routine host_processors( + host_priv : host_priv_t; + out processor_list : processor_array_t); + + +/* + * Get control port for a processor set. + * Privileged. + */ +routine host_get_clock_control( + host_priv : host_priv_t; + clock_id : clock_id_t; + out clock_ctrl : clock_ctrl_t); + + +/* + * kernel module interface + * + * see mach/kmod.h: + */ +routine kmod_create( + host_priv : host_priv_t; + info : vm_address_t; + out module : kmod_t); + +routine kmod_destroy( + host_priv : host_priv_t; + module : kmod_t); + +routine kmod_control( + host_priv : host_priv_t; + module : kmod_t; + flavor : kmod_control_flavor_t; + inout data : kmod_args_t); + +/* + * Get a given special port for a given node. + * Norma special ports are defined in norma_special_ports.h; + * examples include the master device port. + * There are a limited number of slots available for system servers. + */ +routine host_get_special_port( + host_priv : host_priv_t; + node : int; + which : int; + out port : mach_port_t); + +/* + * Set a given special port for a given node. + * See norma_get_special_port. + */ +routine host_set_special_port( + host_priv : host_priv_t; + which : int; + port : mach_port_t); + +/* + * Set an exception handler for a host on one or more exception types. + * These handlers are invoked for all threads on the host if there are + * no task or thread-specific exception handlers or those handlers returned + * an error. + */ +routine host_set_exception_ports( + host_priv : host_priv_t; + exception_mask : exception_mask_t; + new_port : mach_port_t; + behavior : exception_behavior_t; + new_flavor : thread_state_flavor_t); + + +/* + * Lookup some of the old exception handlers for a host + */ +routine host_get_exception_ports( + host_priv : host_priv_t; + exception_mask : exception_mask_t; + out masks : exception_mask_array_t; + out old_handlers : exception_handler_array_t, SameCount; + out old_behaviors : exception_behavior_array_t, SameCount; + out old_flavors : exception_flavor_array_t, SameCount); + + +/* + * Set an exception handler for a host on one or more exception types. + * At the same time, return the previously defined exception handlers for + * those types. + */ +routine host_swap_exception_ports( + host_priv : host_priv_t; + exception_mask : exception_mask_t; + new_port : mach_port_t; + behavior : exception_behavior_t; + new_flavor : thread_state_flavor_t; + out masks : exception_mask_array_t; + out old_handlerss : exception_handler_array_t, SameCount; + out old_behaviors : exception_behavior_array_t, SameCount; + out old_flavors : exception_flavor_array_t, SameCount); + +/* + * Loads a symbol table for an external file into the kernel debugger. + * The symbol table data is an array of characters. It is assumed that + * the caller and the kernel debugger agree on its format. + * This call is only supported in MACH_DEBUG and MACH_KDB kernels, + * otherwise KERN_FAILURE is returned. + */ +routine host_load_symbol_table( + host : host_priv_t; + task : task_t; + name : symtab_name_t; + symtab : pointer_t); + +/* + * Make the target task swappable or unswappable. + */ +routine task_swappable( + host_priv : host_priv_t; + target_task : task_t; + make_swappable : boolean_t); + +/* + * JMM - Keep all processor_set related items at the end for easy + * removal. + */ +/* + * List all processor sets on host. + */ +routine host_processor_sets( + host_priv : host_priv_t; + out processor_sets : processor_set_name_array_t); + +/* + * Get control port for a processor set. + */ +routine host_processor_set_priv( + host_priv : host_priv_t; + set_name : processor_set_name_t; + out set : processor_set_t); +/* + * Set the dynamic_pager control port. Other entities + * can request a send right to this port to talk with + * the dynamic_pager utility, setting behavioral parameters + * within the dynamic pager and getting low/high backing store + * resource notifications. + */ +routine set_dp_control_port( + host : host_priv_t; + in control_port : mach_port_t); + +/* + * Get the dynamic_pager control port. This port + * allows the holder to talk directly with the dynamic + * pager utility. + */ +routine get_dp_control_port( + host : host_priv_t; + out contorl_port :mach_port_t); + +/* + * Set the UserNotification daemon access port for this host. + * If this value is already set, the kernel will discard its + * reference to the previously registered port. + */ +routine host_set_UNDServer( + host : host_priv_t; + in server : UNDServerRef); + +/* + * Get the UserNotification daemon access port for this host. + * This can then be used to communicate with that daemon, which + * in turn communicates with the User through whatever means + * available (pop-up-menus for GUI systems, text for non-GUI, etc..). + * + * Access to this port is restricted to privileged clients because + * it is a special purpose port intended for kernel clients. User + * level clients should go directly to the CFUserNotifcation services. + */ +routine host_get_UNDServer( + host : host_priv_t; + out server : UNDServerRef); + diff --git a/osfmk/mach/host_reboot.h b/osfmk/mach/host_reboot.h new file mode 100644 index 000000000..acb4f643f --- /dev/null +++ b/osfmk/mach/host_reboot.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/01/06 19:50:20 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:25:10 dwm] + * + * Revision 1.1.2.2 1993/10/20 19:09:29 gm + * CR9913: Replacement for flags used in host_reboot() + * calls. + * [1993/10/13 17:21:14 gm] + * + * $EndLog$ + */ + +#define HOST_REBOOT_HALT 0x8 +#define HOST_REBOOT_DEBUGGER 0x1000 diff --git a/osfmk/mach/host_security.defs b/osfmk/mach/host_security.defs new file mode 100644 index 000000000..ee9a8a470 --- /dev/null +++ b/osfmk/mach/host_security.defs @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: mach/host_security.defs + * + * Abstract: + * Mach host security operations support. + */ +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + host_security 600; + +/* + * Basic types + */ +#include +#include + + +/* + * Create a new task with an explicit security token + */ +routine host_security_create_task_token( + host_security : host_security_t; + parent_task : task_t; + sec_token : security_token_t; + host : host_t; + ledgers : ledger_array_t; + inherit_memory : boolean_t; + out child_task : task_t); + +/* + * Change a task's security token + */ +routine host_security_set_task_token( + host_security : host_security_t; + target_task : task_t; + sec_token : security_token_t; + host : host_t); + diff --git a/osfmk/mach/i386/Makefile b/osfmk/mach/i386/Makefile new file mode 100644 index 000000000..c31a0f391 --- /dev/null +++ b/osfmk/mach/i386/Makefile @@ -0,0 +1,57 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +VPATH+=$(SOURCE)/../../i386: + +MIG_DEFS = mach_i386.defs + +MIGINCLUDES = mach_i386_server.h + +DATAFILES = \ + boolean.h exception.h fp_reg.h flipc_dep.h \ + processor_info.h kern_return.h mach_i386_types.h ndr_def.h syscall_sw.h \ + thread_status.h thread_state.h vm_param.h \ + vm_types.h rpc.h \ + machine_types.defs ${MIG_DEFS} + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_GEN_LIST = \ + asm.h \ + ${MIGINCLUDES} + +INSTALL_MD_DIR = mach/i386 + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_GEN_LIST = \ + asm.h \ + ${MIGINCLUDES} + +EXPORT_MD_DIR = mach/i386 + +.ORDER: ${MIG_HDRS} ${MIGINCLUDES} + +COMP_FILES = mach_i386_server.h mach_i386_server.c + +.ORDER: mach_i386_server.h mach_i386_server.c + +MIGKSFLAGS = -DKERNEL_SERVER + +mach_i386_server.h mach_i386_server.c: mach_i386.defs + ${MIG} ${MIGFLAGS} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader mach_i386_server.h \ + -server mach_i386_server.c \ + $< + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/mach/i386/boolean.h b/osfmk/mach/i386/boolean.h new file mode 100644 index 000000000..0ec6b1893 --- /dev/null +++ b/osfmk/mach/i386/boolean.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:36:44 ezf + * change marker to not FREE + * [1994/09/22 21:39:49 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:40:19 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:03 jeffc] + * + * Revision 1.2 1993/04/19 16:33:37 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:53 david] + * + * Revision 1.1 1992/09/30 02:30:40 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:51:56 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:32:04 mrt + * Changed to new Mach copyright + * [91/02/01 17:09:33 mrt] + * + * Revision 2.2 90/05/03 15:47:26 dbg + * First checkin. + * + * Revision 1.3 89/03/09 20:19:36 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:00:41 gm0w + * Changes for cleanup. + * + * 24-Sep-86 Michael Young (mwyoung) at Carnegie-Mellon University + * Created. + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: boolean.h + * + * Boolean type, for I386. + */ + +#ifndef _MACH_I386_BOOLEAN_H_ +#define _MACH_I386_BOOLEAN_H_ + +typedef int boolean_t; + +#endif /* _MACH_I386_BOOLEAN_H_ */ diff --git a/osfmk/mach/i386/exception.h b/osfmk/mach/i386/exception.h new file mode 100644 index 000000000..b96b76def --- /dev/null +++ b/osfmk/mach/i386/exception.h @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.8.5 1995/04/07 19:05:14 barbou + * Backed out previous submission. + * [95/03/29 barbou] + * + * Revision 1.2.8.4 1995/03/15 17:19:29 bruel + * EXC_TYPES_COUNT is machine independant. + * (the machine exception type is given in the code argument). + * [95/03/06 bruel] + * + * Revision 1.2.8.3 1995/01/10 05:16:18 devrcs + * mk6 CR801 - merge up from nmk18b4 to nmk18b7 + * * Rev 1.2.6.3 1994/11/08 21:53:17 rkc + * Incremented the number of exception types to reflect the addition + * of the alert exception. + * [1994/12/09 21:11:21 dwm] + * + * Revision 1.2.8.1 1994/09/23 02:36:53 ezf + * change marker to not FREE + * [1994/09/22 21:39:53 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:40:25 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:07 jeffc] + * + * Revision 1.2 1993/04/19 16:33:44 devrcs + * changes for EXC_MACH_SYSCALL + * [1993/04/05 12:06:25 david] + * + * make endif tags ansi compliant/include files + * [1993/02/20 21:44:18 david] + * + * Updated to new exception interface. + * [1992/12/23 13:05:21 david] + * + * Revision 1.1 1992/09/30 02:30:41 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:52:05 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:32:08 mrt + * Changed to new Mach copyright + * [91/02/01 17:09:45 mrt] + * + * Revision 2.2 90/05/03 15:47:38 dbg + * First checkin. + * + * Revision 1.3 89/03/09 20:19:42 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:00:47 gm0w + * Changes for cleanup. + * + * 31-Dec-88 Robert Baron (rvb) at Carnegie-Mellon University + * Derived from MACH2.0 vax release. + * + * 2-Nov-87 David Golub (dbg) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_I386_EXCEPTION_H_ +#define _MACH_I386_EXCEPTION_H_ + +/* + * No machine dependent types for the 80386 + */ + +#define EXC_TYPES_COUNT 10 /* incl. illegal exception 0 */ + +/* + * Codes and subcodes for 80386 exceptions. + */ + +#define EXCEPTION_CODE_MAX 2 /* currently code and subcode */ + +/* + * EXC_BAD_INSTRUCTION + */ + +#define EXC_I386_INVOP 1 + +/* + * EXC_ARITHMETIC + */ + +#define EXC_I386_DIV 1 +#define EXC_I386_INTO 2 +#define EXC_I386_NOEXT 3 +#define EXC_I386_EXTOVR 4 +#define EXC_I386_EXTERR 5 +#define EXC_I386_EMERR 6 +#define EXC_I386_BOUND 7 + +/* + * EXC_SOFTWARE + */ + +/* + * EXC_BAD_ACCESS + */ + +/* + * EXC_BREAKPOINT + */ + +#define EXC_I386_SGL 1 +#define EXC_I386_BPT 2 + +#define EXC_I386_DIVERR 0 /* divide by 0 eprror */ +#define EXC_I386_SGLSTP 1 /* single step */ +#define EXC_I386_NMIFLT 2 /* NMI */ +#define EXC_I386_BPTFLT 3 /* breakpoint fault */ +#define EXC_I386_INTOFLT 4 /* INTO overflow fault */ +#define EXC_I386_BOUNDFLT 5 /* BOUND instruction fault */ +#define EXC_I386_INVOPFLT 6 /* invalid opcode fault */ +#define EXC_I386_NOEXTFLT 7 /* extension not available fault*/ +#define EXC_I386_DBLFLT 8 /* double fault */ +#define EXC_I386_EXTOVRFLT 9 /* extension overrun fault */ +#define EXC_I386_INVTSSFLT 10 /* invalid TSS fault */ +#define EXC_I386_SEGNPFLT 11 /* segment not present fault */ +#define EXC_I386_STKFLT 12 /* stack fault */ +#define EXC_I386_GPFLT 13 /* general protection fault */ +#define EXC_I386_PGFLT 14 /* page fault */ +#define EXC_I386_EXTERRFLT 16 /* extension error fault */ +#define EXC_I386_ALIGNFLT 17 /* Alignment fault */ +#define EXC_I386_ENDPERR 33 /* emulated extension error flt */ +#define EXC_I386_ENOEXTFLT 32 /* emulated ext not present */ + + +/* + * machine dependent exception masks + */ +#define EXC_MASK_MACHINE 0 + + +#endif /* _MACH_I386_EXCEPTION_H_ */ diff --git a/osfmk/mach/i386/flipc_dep.h b/osfmk/mach/i386/flipc_dep.h new file mode 100644 index 000000000..7aa606390 --- /dev/null +++ b/osfmk/mach/i386/flipc_dep.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/06/13 18:20:42 sjs + * Merge from flipc_shared. + * [95/06/07 sjs] + * + * Revision 1.1.3.3 1995/02/21 17:23:16 randys + * Re-indented code to four space indentation + * [1995/02/21 16:26:50 randys] + * + * Revision 1.1.3.2 1994/12/20 19:02:12 randys + * Moved definition of flipc_simple_lock to flipc_cb.h + * [1994/12/20 17:35:15 randys] + * + * Moved the machine independent macros into mach/flipc_locks.h + * [1994/12/20 16:44:14 randys] + * + * Added filename in comment at top of file + * [1994/12/19 20:29:36 randys] + * + * Fixed incorrect return of lock_try + * [1994/12/13 00:36:46 randys] + * + * Revision 1.1.3.1 1994/12/12 17:46:29 randys + * Putting initial flipc implementation under flipc_shared + * [1994/12/12 16:27:51 randys] + * + * Revision 1.1.1.2 1994/12/11 23:08:36 randys + * Initial flipc code checkin. + * + * $EndLog$ + */ + +/* + * mach/i386/flipc_dep.h + * + * This file will have all of the FLIPC implementation machine dependent + * defines that need to be visible to both kernel and AIL (eg. bus locks + * and bus synchronization primitives). + */ + +#ifndef _MACH_FLIPC_DEP_H_ +#define _MACH_FLIPC_DEP_H_ + +/* For the 386, we don't need to wrap synchronization variable writes + at all. */ +#define SYNCVAR_WRITE(statement) statement + +/* And similarly (I believe; check on this), for the 386 there isn't any + requirement for write fences. */ +#define WRITE_FENCE() + +/* + * Flipc simple lock defines. These are almost completely for the use + * of the AIL; the reason they are in this file is that they need to + * be initialized properly in the communications buffer initialization + * routine. Sigh. Note in particular that the kernel has no defined + * "simple_lock_yield_function", so it had better never expand the + * macro simple_lock_acquire. + * + * These locks may be declared by "flipc_simple_lock lock;". If they + * are instead declared by FLIPC_DECL_SIMPLE_LOCK(class,lockname) they + * may be used without initialization. + */ + +#define SIMPLE_LOCK_INITIALIZER 0 +#define FLIPC_DECL_SIMPLE_LOCK(class,lockname) \ +class flipc_simple_lock (lockname) = SIMPLE_LOCK_INITIALIZER + +/* + * Lower case because they may be macros or functions. + * I'll include the function prototypes just for examples here. + */ + +#define flipc_simple_lock_init(lock) \ +do { \ + *(lock) = SIMPLE_LOCK_INITIALIZER; \ +} while (0) + +/* + * Defines of the actual routines, for gcc. + */ + +#define flipc_simple_lock_locked(lock) ((*lock) != SIMPLE_LOCK_INITIALIZER) + +#ifdef __GNUC__ + extern __inline__ int flipc_simple_lock_try(flipc_simple_lock *lock) +{ + int r; + __asm__ volatile("movl $1, %0; xchgl %0, %1" : "=&r" (r), "=m" (*lock)); + return !r; +} + +/* I don't know why this requires an ASM, but I'll follow the leader. */ +extern __inline__ void flipc_simple_lock_release(flipc_simple_lock *lock) +{ + register int t; + + __asm__ volatile("xorl %0, %0; xchgl %0, %1" : "=&r" (t), "=m" (*lock)); +} +#else /* __GNUC__ */ +/* If we aren't compiling with gcc, the above need to be functions. */ +#endif /* __GNUC__ */ + +#endif /* _MACH_FLIPC_DEP_H_ */ diff --git a/osfmk/mach/i386/fp_reg.h b/osfmk/mach/i386/fp_reg.h new file mode 100644 index 000000000..39b6c761c --- /dev/null +++ b/osfmk/mach/i386/fp_reg.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:37:03 ezf + * change marker to not FREE + * [1994/09/22 21:39:57 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:40:30 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:11 jeffc] + * + * Revision 1.2 1993/04/19 16:33:51 devrcs + * ansi C conformance changes + * [1993/02/02 18:56:01 david] + * + * Revision 1.1 1992/09/30 02:30:43 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.1.1.1.2.1 92/03/03 16:21:23 jeffreyh + * Merged up to Trunk + * [92/02/26 jeffreyh] + * + * Revision 2.4 92/02/26 13:10:29 elf + * Added stupid alaises to make i386/fpu.c compile. RVB will fix. + * + * [92/02/26 elf] + * + * Revision 2.3 92/02/26 12:47:46 elf + * Installed from i386 directory. + * [92/02/26 danner] + * + * + * Revision 2.2 92/01/03 20:19:47 dbg + * Move this file to mach/i386. Add FP_NO..FP_387 codes for + * floating-point processor status. Error bits in control + * register are masks, not enables. + * [91/10/19 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1992-1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _I386_FP_SAVE_H_ +#define _I386_FP_SAVE_H_ +/* + * Floating point registers and status, as saved + * and restored by FP save/restore instructions. + */ +struct i386_fp_save { + unsigned short fp_control; /* control */ + unsigned short fp_unused_1; + unsigned short fp_status; /* status */ + unsigned short fp_unused_2; + unsigned short fp_tag; /* register tags */ + unsigned short fp_unused_3; + unsigned int fp_eip; /* eip at failed instruction */ + unsigned short fp_cs; /* cs at failed instruction */ + unsigned short fp_opcode; /* opcode of failed instruction */ + unsigned int fp_dp; /* data address */ + unsigned short fp_ds; /* data segment */ + unsigned short fp_unused_4; +}; + +struct i386_fp_regs { + unsigned short fp_reg_word[5][8]; + /* space for 8 80-bit FP registers */ +}; + +/* + * Control register + */ +#define FPC_IE 0x0001 /* enable invalid operation + exception */ +#define FPC_IM FPC_IE +#define FPC_DE 0x0002 /* enable denormalized operation + exception */ +#define FPC_DM FPC_DE +#define FPC_ZE 0x0004 /* enable zero-divide exception */ +#define FPC_ZM FPC_ZE +#define FPC_OE 0x0008 /* enable overflow exception */ +#define FPC_OM FPC_OE +#define FPC_UE 0x0010 /* enable underflow exception */ +#define FPC_PE 0x0020 /* enable precision exception */ +#define FPC_PC 0x0300 /* precision control: */ +#define FPC_PC_24 0x0000 /* 24 bits */ +#define FPC_PC_53 0x0200 /* 53 bits */ +#define FPC_PC_64 0x0300 /* 64 bits */ +#define FPC_RC 0x0c00 /* rounding control: */ +#define FPC_RC_RN 0x0000 /* round to nearest or even */ +#define FPC_RC_RD 0x0400 /* round down */ +#define FPC_RC_RU 0x0800 /* round up */ +#define FPC_RC_CHOP 0x0c00 /* chop */ +#define FPC_IC 0x1000 /* infinity control (obsolete) */ +#define FPC_IC_PROJ 0x0000 /* projective infinity */ +#define FPC_IC_AFF 0x1000 /* affine infinity (std) */ + +/* + * Status register + */ +#define FPS_IE 0x0001 /* invalid operation */ +#define FPS_DE 0x0002 /* denormalized operand */ +#define FPS_ZE 0x0004 /* divide by zero */ +#define FPS_OE 0x0008 /* overflow */ +#define FPS_UE 0x0010 /* underflow */ +#define FPS_PE 0x0020 /* precision */ +#define FPS_SF 0x0040 /* stack flag */ +#define FPS_ES 0x0080 /* error summary */ +#define FPS_C0 0x0100 /* condition code bit 0 */ +#define FPS_C1 0x0200 /* condition code bit 1 */ +#define FPS_C2 0x0400 /* condition code bit 2 */ +#define FPS_TOS 0x3800 /* top-of-stack pointer */ +#define FPS_TOS_SHIFT 11 +#define FPS_C3 0x4000 /* condition code bit 3 */ +#define FPS_BUSY 0x8000 /* FPU busy */ + +/* + * Kind of floating-point support provided by kernel. + */ +#define FP_NO 0 /* no floating point */ +#define FP_SOFT 1 /* software FP emulator */ +#define FP_287 2 /* 80287 */ +#define FP_387 3 /* 80387 or 80486 */ + +#endif /* _I386_FP_SAVE_H_ */ diff --git a/osfmk/mach/i386/kern_return.h b/osfmk/mach/i386/kern_return.h new file mode 100644 index 000000000..d317340c0 --- /dev/null +++ b/osfmk/mach/i386/kern_return.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:37:12 ezf + * change marker to not FREE + * [1994/09/22 21:40:01 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:40:35 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:15 jeffc] + * + * Revision 1.2 1993/04/19 16:33:58 devrcs + * ansi C conformance changes + * [1993/02/02 18:56:09 david] + * + * Revision 1.1 1992/09/30 02:30:47 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:52:15 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:32:12 mrt + * Changed to new Mach copyright + * [91/02/01 17:09:54 mrt] + * + * Revision 2.2 90/05/03 15:47:51 dbg + * First checkin. + * + * Revision 1.3 89/03/09 20:19:48 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:00:54 gm0w + * Changes for cleanup. + * + * 3-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University + * Allow inclusion in assembler input. + * + * 14-Oct-85 Michael Wayne Young (mwyoung) at Carnegie-Mellon University + * Created. + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: kern_return.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Machine-dependent kernel return definitions. + */ + +#ifndef _MACH_I386_KERN_RETURN_H_ +#define _MACH_I386_KERN_RETURN_H_ + +#ifndef ASSEMBLER +typedef int kern_return_t; +#endif /* ASSEMBLER */ +#endif /* _MACH_I386_KERN_RETURN_H_ */ diff --git a/osfmk/mach/i386/mach_i386.defs b/osfmk/mach/i386/mach_i386.defs new file mode 100644 index 000000000..c29abf0b0 --- /dev/null +++ b/osfmk/mach/i386/mach_i386.defs @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.9.1 1994/09/23 02:37:21 ezf + * change marker to not FREE + * [1994/09/22 21:40:05 ezf] + * + * Revision 1.2.6.2 1994/03/17 22:38:39 dwm + * The infamous name change: thread_activation + thread_shuttle = thread. + * [1994/03/17 21:28:20 dwm] + * + * Revision 1.2.6.1 1994/01/12 17:56:07 dwm + * Coloc: initial restructuring to follow Utah model. + * [1994/01/12 17:30:21 dwm] + * + * Revision 1.2.2.3 1993/09/10 13:08:26 rod + * Zap obsolete MACH_IPC_TYPED conditional code. + * [1993/08/31 11:29:37 rod] + * + * Revision 1.2.2.2 1993/06/09 02:40:40 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:18 jeffc] + * + * Revision 1.2 1993/04/19 16:34:07 devrcs + * Merge untyped ipc: + * Introducing new MIG syntax for Untyped IPC (via compile option + * MACH_IPC_TYPED) + * [1993/02/17 23:44:54 travos] + * + * Revision 1.1 1992/09/30 02:22:34 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.2.3.1 92/03/03 16:21:26 jeffreyh + * Changes from TRUNK + * [92/02/26 11:58:37 jeffreyh] + * + * Revision 2.3 92/01/03 20:19:56 dbg + * Renamed io_map to io_port_add. Added io_port_remove, + * io_port_list, set_ldt, get_ldt. + * [91/08/20 dbg] + * + * Revision 2.2 91/07/31 17:51:57 dbg + * Created. + * [91/07/30 17:09:09 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Special functions for i386. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + mach_i386 3800; + +#include +#include +#include + +type device_list_t = ^array[] of device_t; + +type descriptor_t = struct[2] of int; +type descriptor_list_t = array[*] of descriptor_t; + +#undef i386 /* XXX! */ +import ; + +routine i386_io_port_add( + target_act : thread_act_t; + device : device_t); + +routine i386_io_port_remove( + target_act : thread_act_t; + device : device_t); + +routine i386_io_port_list( + target_act : thread_act_t; + out device_list : device_list_t); + +routine i386_set_ldt( + target_act : thread_act_t; + first_selector : int; + desc_list : descriptor_list_t +); + +routine i386_get_ldt( + target_act : thread_act_t; + first_selector : int; + selector_count : int; + out desc_list : descriptor_list_t); diff --git a/osfmk/mach/i386/mach_i386_types.h b/osfmk/mach/i386/mach_i386_types.h new file mode 100644 index 000000000..b2023f05e --- /dev/null +++ b/osfmk/mach/i386/mach_i386_types.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 02:37:30 ezf + * change marker to not FREE + * [1994/09/22 21:40:09 ezf] + * + * Revision 1.1.2.2 1993/06/02 23:44:21 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:21 jeffc] + * + * Revision 1.1 1992/09/30 02:30:48 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.1.1.1.2.1 92/03/03 16:21:41 jeffreyh + * New file from TRUNK + * [92/02/26 11:59:15 jeffreyh] + * + * Revision 2.2 92/01/03 20:20:01 dbg + * Created. + * [91/08/20 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Type definitions for i386 interface routines. + */ + +#ifndef _MACH_MACH_I386_TYPES_H_ +#define _MACH_MACH_I386_TYPES_H_ + +/* + * Array of devices. + */ +typedef device_t *device_list_t; + +/* + * i386 segment descriptor. + */ +struct descriptor { + unsigned int low_word; + unsigned int high_word; +}; + +typedef struct descriptor descriptor_t; +typedef struct descriptor *descriptor_list_t; + +#endif /* _MACH_MACH_I386_TYPES_H_ */ diff --git a/osfmk/mach/i386/machine_types.defs b/osfmk/mach/i386/machine_types.defs new file mode 100644 index 000000000..7a4258c61 --- /dev/null +++ b/osfmk/mach/i386/machine_types.defs @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/01/06 19:50:37 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/11/02 18:18:29 dwm] + * + * Revision 1.1.2.1 1994/05/06 18:54:50 tmt + * New Mig definition file to describe required machine types. + * [1994/05/05 21:08:57 tmt] + * + * $EndLog$ + */ +/* + * Header file for basic, machine-dependent data types. i386 version. + */ + +#ifndef _MACHINE_VM_TYPES_DEFS_ +#define _MACHINE_VM_TYPES_DEFS_ 1 + +/* + * A natural_t is the type for the native + * integer type, e.g. 32 or 64 or.. whatever + * register size the machine has. Unsigned, it is + * used for entities that might be either + * unsigned integers or pointers, and for + * type-casting between the two. + * For instance, the IPC system represents + * a port in user space as an integer and + * in kernel space as a pointer. + */ +type natural_t = unsigned32; + +/* + * An integer_t is the signed counterpart + * of the natural_t type. Both types are + * only supposed to be used to define + * other types in a machine-independent + * way. + */ +type integer_t = int32; + + +#if MACH_IPC_COMPAT +/* + * For the old IPC interface + */ +#define MSG_TYPE_PORT_NAME MACH_MSG_TYPE_INTEGER_32 + +#endif /* MACH_IPC_COMPAT */ + + +#endif /* _MACHINE_VM_TYPES_DEFS_ */ diff --git a/osfmk/mach/i386/ndr_def.h b/osfmk/mach/i386/ndr_def.h new file mode 100644 index 000000000..7a36022f1 --- /dev/null +++ b/osfmk/mach/i386/ndr_def.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:38 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 01:59:33 ezf + * change marker to not FREE + * [1994/09/22 21:25:24 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:29:06 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:06:33 jeffc] + * + * Revision 1.2 1993/04/19 16:15:32 devrcs + * Untyped ipc merge: + * New names for the fields - the structure isn't changed + * [1993/03/12 23:01:28 travos] + * Extended NDR record to include version number(s) + * [1993/03/05 23:09:51 travos] + * It initializes the NDR record. Included also by libmach + * [1993/02/17 21:58:01 travos] + * [1993/03/16 13:42:33 rod] + * + * $EndLog$ + */ + + +/* NDR record for Intel x86s */ + +#include + +NDR_record_t NDR_record = { + 0, /* mig_reserved */ + 0, /* mig_reserved */ + 0, /* mig_reserved */ + NDR_PROTOCOL_2_0, + NDR_INT_LITTLE_ENDIAN, + NDR_CHAR_ASCII, + NDR_FLOAT_IEEE, + 0, +}; diff --git a/osfmk/mach/i386/processor_info.h b/osfmk/mach/i386/processor_info.h new file mode 100644 index 000000000..8709e475a --- /dev/null +++ b/osfmk/mach/i386/processor_info.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: mach/i386/processor_info.h + * + * Data structure definitions for i386 specific processor control + */ + +#ifndef _MACH_I386_PROCESSOR_INFO_H_ +#define _MACH_I386_PROCESSOR_INFO_H_ + + +#endif /* _MACH_I386_PROCESSOR_INFO_H_ */ diff --git a/osfmk/mach/i386/rpc.h b/osfmk/mach/i386/rpc.h new file mode 100644 index 000000000..002549ae8 --- /dev/null +++ b/osfmk/mach/i386/rpc.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#ifndef _MACH_I386_RPC_H_ +#define _MACH_I386_RPC_H_ + +/* + * Just temporary until all vestiges of short-circuit can be + * removed. + */ +#define CAN_SHCIRCUIT(name) (0) + +/* + * Kernel machine dependent macros for mach rpc + * + * User args (argv) begin two words above the frame pointer (past saved ebp + * and return address) on the user stack. Return code is stored in register + * ecx, by convention (must be a caller-saves register, to survive return + * from server work function). The user space instruction pointer is eip, + * and the user stack pointer is uesp. + */ +#define MACH_RPC_ARGV(act) ( (char *)(USER_REGS(act)->ebp + 8) ) +#define MACH_RPC_RET(act) ( USER_REGS(act)->ecx ) +#define MACH_RPC_FUNC(act) ( USER_REGS(act)->edx ) +#define MACH_RPC_SIG(act) ( USER_REGS(act)->edi ) +#define MACH_RPC_UIP(act) ( USER_REGS(act)->eip ) +#define MACH_RPC_USP(act) ( USER_REGS(act)->uesp ) +#define MACH_RPC_RETADDR(sp) ( *((int *)sp - 1) ) + +#endif /* _MACH_I386_RPC_H_ */ diff --git a/osfmk/mach/i386/syscall_sw.h b/osfmk/mach/i386/syscall_sw.h new file mode 100644 index 000000000..4a7a17b3f --- /dev/null +++ b/osfmk/mach/i386/syscall_sw.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/29 17:36:36 mburg + * MK7.3 merger + * + * Revision 1.2.22.1 1998/02/03 09:32:55 gdt + * Merge up to MK7.3 + * [1998/02/03 09:17:02 gdt] + * + * Revision 1.2.20.1 1997/06/17 03:00:55 devrcs + * RPC Enhancements. + * Added new definition of the rpc_return_trap. + * [1996/04/26 21:53:51 yfei] + * + * Revision 1.2.17.2 1996/02/16 00:07:27 yfei + * Merged NSWC based RPC enhancements into MK7_MAIN. + * + * Revision 1.2.12.2 1994/09/23 02:37:42 ezf + * change marker to not FREE + * [1994/09/22 21:40:17 ezf] + * + * Revision 1.2.12.1 1994/08/26 20:48:44 watkins + * Merge with rt2_shared. + * [1994/08/26 18:38:55 watkins] + * + * Revision 1.2.9.1 1994/07/18 22:03:32 burke + * Check-in for merge. + * [1994/07/15 21:04:49 burke] + * + * Revision 1.2.7.3 1994/07/05 14:28:23 watkins + * Merge with rpc. + * [1994/07/05 14:27:30 watkins] + * + * Revision 1.2.6.1 1994/05/18 21:18:29 watkins + * Add macro for rpc call gate. + * [1994/05/18 21:16:19 watkins] + * + * Revision 1.2.2.2 1993/06/09 02:40:45 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:24 jeffc] + * + * Revision 1.2 1993/04/19 16:34:14 devrcs + * Fixes for ANSI C + * [1993/02/26 13:35:10 sp] + * + * Revision 1.1 1992/09/30 02:30:50 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/05/14 16:52:22 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:32:17 mrt + * Changed to new Mach copyright + * [91/02/01 17:10:01 mrt] + * + * Revision 2.3 90/12/05 23:46:16 af + * Made GNU preproc happy. + * + * Revision 2.2 90/05/03 15:48:01 dbg + * Created. + * [90/04/30 16:36:25 dbg] + * + * Revision 1.3.1.1 89/12/22 22:22:03 rvb + * Use asm.h + * [89/12/22 rvb] + * + * Revision 1.3 89/03/09 20:19:53 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:01:00 gm0w + * Changes for cleanup. + * + * 31-Dec-88 Robert Baron (rvb) at Carnegie-Mellon University + * Derived from MACH2.0 vax release. + * + * 1-Sep-86 Michael Young (mwyoung) at Carnegie-Mellon University + * Created from mach_syscalls.h in the user library sources. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_I386_SYSCALL_SW_H_ +#define _MACH_I386_SYSCALL_SW_H_ + +#include + +#define MACHCALLSEL 0x07 +#define RPCCALLSEL 0x0f + +#define kernel_trap(trap_name,trap_number,number_args) \ +LEAF(_##trap_name,0) ;\ + movl $##trap_number,%eax ;\ + lcall $##MACHCALLSEL, $0 ;\ +END(_##trap_name) + +#define rpc_trap(trap_name,trap_number,number_args) \ +LEAF(_##trap_name,0) ;\ + movl $##trap_number,%eax; \ + lcall $##RPCCALLSEL, $0 ;\ +END(_##trap_name) + +#define rpc_return_trap(trap_name,trap_number,number_args) \ +LEAF(_##trap_name,0) ;\ + movl %eax, %ecx; \ + movl $##trap_number,%eax; \ + lcall $##RPCCALLSEL, $0 ;\ +END(_##trap_name) + +#endif /* _MACH_I386_SYSCALL_SW_H_ */ diff --git a/osfmk/mach/i386/thread_state.h b/osfmk/mach/i386/thread_state.h new file mode 100644 index 000000000..300462b9c --- /dev/null +++ b/osfmk/mach/i386/thread_state.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.3 1995/01/10 05:16:26 devrcs + * mk6 CR801 - copyright marker not FREE_ + * [1994/12/01 19:25:21 dwm] + * + * Revision 1.1.6.1 1994/08/07 20:48:54 bolinger + * Merge up to colo_b7. + * [1994/08/01 21:01:26 bolinger] + * + * Revision 1.1.4.1 1994/06/25 03:47:07 dwm + * mk6 CR98 - new file to hold MD THREAD_STATE_MAX + * [1994/06/24 21:54:48 dwm] + * + * $EndLog$ + */ +#ifndef _MACH_I386_THREAD_STATE_H_ +#define _MACH_I386_THREAD_STATE_H_ + +#define I386_THREAD_STATE_MAX 32 + +#if defined (__i386__) +#define THREAD_STATE_MAX I386_THREAD_STATE_MAX +#endif + +#endif /* _MACH_I386_THREAD_STATE_H_ */ diff --git a/osfmk/mach/i386/thread_status.h b/osfmk/mach/i386/thread_status.h new file mode 100644 index 000000000..e67be918c --- /dev/null +++ b/osfmk/mach/i386/thread_status.h @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: thread_status.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * This file contains the structure definitions for the thread + * state as applied to I386 processors. + */ + +#ifndef _MACH_I386_THREAD_STATUS_H_ +#define _MACH_I386_THREAD_STATUS_H_ + +#include +#include +#include /* FIXME */ +#include /* FIXME */ +/* + * i386_thread_state this is the structure that is exported + * to user threads for use in status/mutate + * calls. This structure should never + * change. + * + * i386_float_state exported to use threads for access to + * floating point registers. Try not to + * change this one, either. + * + * i386_isa_port_map_state exported to user threads to allow + * selective in/out operations + * + * i386_v86_assist_state + * + * thread_syscall_state + */ + +/* THREAD_STATE_FLAVOR_LIST 0 */ +#define i386_NEW_THREAD_STATE 1 /* used to be i386_THREAD_STATE */ +#define i386_FLOAT_STATE 2 +#define i386_ISA_PORT_MAP_STATE 3 +#define i386_V86_ASSIST_STATE 4 +#define i386_REGS_SEGS_STATE 5 +#define THREAD_SYSCALL_STATE 6 +#define THREAD_STATE_NONE 7 +#define i386_SAVED_STATE 8 + + +/* + * VALID_THREAD_STATE_FLAVOR is a platform specific macro that when passed + * an exception flavor will return if that is a defined flavor for that + * platform. The macro must be manually updated to include all of the valid + * exception flavors as defined above. + */ +#define VALID_THREAD_STATE_FLAVOR(x) \ + ((x == i386_NEW_THREAD_STATE) || \ + (x == i386_FLOAT_STATE) || \ + (x == i386_ISA_PORT_MAP_STATE) || \ + (x == i386_V86_ASSIST_STATE) || \ + (x == i386_REGS_SEGS_STATE) || \ + (x == THREAD_SYSCALL_STATE) || \ + (x == THREAD_STATE_NONE) || \ + (x == i386_SAVED_STATE)) + +/* + * This structure is used for both + * i386_THREAD_STATE and i386_REGS_SEGS_STATE. + */ +struct i386_new_thread_state { + unsigned int gs; + unsigned int fs; + unsigned int es; + unsigned int ds; + unsigned int edi; + unsigned int esi; + unsigned int ebp; + unsigned int esp; + unsigned int ebx; + unsigned int edx; + unsigned int ecx; + unsigned int eax; + unsigned int eip; + unsigned int cs; + unsigned int efl; + unsigned int uesp; + unsigned int ss; +}; +#define i386_NEW_THREAD_STATE_COUNT \ + (sizeof (struct i386_new_thread_state)/sizeof(unsigned int)) + +/* + * Subset of saved state stored by processor on kernel-to-kernel + * trap. (Used by ddb to examine state guaranteed to be present + * on all traps into debugger.) + */ +struct i386_saved_state_from_kernel { + unsigned int gs; + unsigned int fs; + unsigned int es; + unsigned int ds; + unsigned int edi; + unsigned int esi; + unsigned int ebp; + unsigned int esp; /* kernel esp stored by pusha - + we save cr2 here later */ + unsigned int ebx; + unsigned int edx; + unsigned int ecx; + unsigned int eax; + unsigned int trapno; + unsigned int err; + unsigned int eip; + unsigned int cs; + unsigned int efl; +}; + +/* + * The format in which thread state is saved by Mach on this machine. This + * state flavor is most efficient for exception RPC's to kernel-loaded + * servers, because copying can be avoided: + */ +struct i386_saved_state { + unsigned int gs; + unsigned int fs; + unsigned int es; + unsigned int ds; + unsigned int edi; + unsigned int esi; + unsigned int ebp; + unsigned int esp; /* kernel esp stored by pusha - + we save cr2 here later */ + unsigned int ebx; + unsigned int edx; + unsigned int ecx; + unsigned int eax; + unsigned int trapno; + unsigned int err; + unsigned int eip; + unsigned int cs; + unsigned int efl; + unsigned int uesp; + unsigned int ss; + struct v86_segs { + unsigned int v86_es; /* virtual 8086 segment registers */ + unsigned int v86_ds; + unsigned int v86_fs; + unsigned int v86_gs; + } v86_segs; +#define i386_SAVED_ARGV_COUNT 7 + unsigned int argv_status; /* Boolean flag indicating whether or + * not Mach copied in the args */ + unsigned int argv[i386_SAVED_ARGV_COUNT]; + /* The return address, and the first several + * function call args from the stack, for + * efficient syscall exceptions */ +}; +#define i386_SAVED_STATE_COUNT (sizeof (struct i386_saved_state)/sizeof(unsigned int)) +#define i386_REGS_SEGS_STATE_COUNT i386_SAVED_STATE_COUNT + +/* + * Machine-independent way for servers and Mach's exception mechanism to + * choose the most efficient state flavor for exception RPC's: + */ +#define MACHINE_THREAD_STATE i386_SAVED_STATE +#define MACHINE_THREAD_STATE_COUNT i386_SAVED_STATE_COUNT + +/* + * Largest state on this machine: + * (be sure mach/machine/thread_state.h matches!) + */ +#define THREAD_MACHINE_STATE_MAX i386_SAVED_STATE_COUNT + +/* + * Floating point state. + * + * fpkind tells in what way floating point operations are supported. + * See the values for fp_kind in . + * + * If the kind is FP_NO, then calls to set the state will fail, and + * thread_getstatus will return garbage for the rest of the state. + * If "initialized" is false, then the rest of the state is garbage. + * Clients can set "initialized" to false to force the coprocessor to + * be reset. + * "exc_status" is non-zero if the thread has noticed (but not + * proceeded from) a coprocessor exception. It contains the status + * word with the exception bits set. The status word in "fp_status" + * will have the exception bits turned off. If an exception bit in + * "fp_status" is turned on, then "exc_status" should be zero. This + * happens when the coprocessor exception is noticed after the system + * has context switched to some other thread. + * + * If kind is FP_387, then "state" is a i387_state. Other kinds might + * also use i387_state, but somebody will have to verify it (XXX). + * Note that the registers are ordered from top-of-stack down, not + * according to physical register number. + */ + +#define FP_STATE_BYTES \ + (sizeof (struct i386_fp_save) + sizeof (struct i386_fp_regs)) + +struct i386_float_state { + int fpkind; /* FP_NO..FP_387 (readonly) */ + int initialized; + unsigned char hw_state[FP_STATE_BYTES]; /* actual "hardware" state */ + int exc_status; /* exception status (readonly) */ +}; +#define i386_FLOAT_STATE_COUNT \ + (sizeof(struct i386_float_state)/sizeof(unsigned int)) + + +#define PORT_MAP_BITS 0x400 +struct i386_isa_port_map_state { + unsigned char pm[PORT_MAP_BITS>>3]; +}; + +#define i386_ISA_PORT_MAP_STATE_COUNT \ + (sizeof(struct i386_isa_port_map_state)/sizeof(unsigned int)) + +/* + * V8086 assist supplies a pointer to an interrupt + * descriptor table in task space. + */ +struct i386_v86_assist_state { + unsigned int int_table; /* interrupt table address */ + int int_count; /* interrupt table size */ +}; + +struct v86_interrupt_table { + unsigned int count; /* count of pending interrupts */ + unsigned short mask; /* ignore this interrupt if true */ + unsigned short vec; /* vector to take */ +}; + +#define i386_V86_ASSIST_STATE_COUNT \ + (sizeof(struct i386_v86_assist_state)/sizeof(unsigned int)) + +struct thread_syscall_state { + unsigned eax; + unsigned edx; + unsigned efl; + unsigned eip; + unsigned esp; +}; + +#define i386_THREAD_SYSCALL_STATE_COUNT \ + (sizeof(struct thread_syscall_state) / sizeof(unsigned int)) + +/* + * Main thread state consists of + * general registers, segment registers, + * eip and eflags. + */ + +#define i386_THREAD_STATE -1 + +typedef struct { + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + unsigned int edi; + unsigned int esi; + unsigned int ebp; + unsigned int esp; + unsigned int ss; + unsigned int eflags; + unsigned int eip; + unsigned int cs; + unsigned int ds; + unsigned int es; + unsigned int fs; + unsigned int gs; +} i386_thread_state_t; + +#define i386_THREAD_STATE_COUNT \ + ( sizeof (i386_thread_state_t) / sizeof (int) ) + +/* + * Default segment register values. + */ + +#define USER_CODE_SELECTOR 0x0017 +#define USER_DATA_SELECTOR 0x001f +#define KERN_CODE_SELECTOR 0x0008 +#define KERN_DATA_SELECTOR 0x0010 + +/* + * Thread floating point state + * includes FPU environment as + * well as the register stack. + */ + +#define i386_THREAD_FPSTATE -2 + +typedef struct { + fp_env_t environ; + fp_stack_t stack; +} i386_thread_fpstate_t; + +#define i386_THREAD_FPSTATE_COUNT \ + ( sizeof (i386_thread_fpstate_t) / sizeof (int) ) + +/* + * Extra state that may be + * useful to exception handlers. + */ + +#define i386_THREAD_EXCEPTSTATE -3 + +typedef struct { + unsigned int trapno; + err_code_t err; +} i386_thread_exceptstate_t; + +#define i386_THREAD_EXCEPTSTATE_COUNT \ + ( sizeof (i386_thread_exceptstate_t) / sizeof (int) ) + +/* + * Per-thread variable used + * to store 'self' id for cthreads. + */ + +#define i386_THREAD_CTHREADSTATE -4 + +typedef struct { + unsigned int self; +} i386_thread_cthreadstate_t; + +#define i386_THREAD_CTHREADSTATE_COUNT \ + ( sizeof (i386_thread_cthreadstate_t) / sizeof (int) ) + +#endif /* _MACH_I386_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/i386/vm_param.h b/osfmk/mach/i386/vm_param.h new file mode 100644 index 000000000..f5b0c0536 --- /dev/null +++ b/osfmk/mach/i386/vm_param.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * Copyright (c) 1994 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + */ + +/* + * File: vm_param.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * I386 machine dependent virtual memory parameters. + * Most of the declarations are preceeded by I386_ (or i386_) + * which is OK because only I386 specific code will be using + * them. + */ + +#ifndef _MACH_I386_VM_PARAM_H_ +#define _MACH_I386_VM_PARAM_H_ + +#define BYTE_SIZE 8 /* byte size in bits */ + +#define I386_PGBYTES 4096 /* bytes per 80386 page */ +#define I386_PGSHIFT 12 /* number of bits to shift for pages */ + +/* + * Convert bytes to pages and convert pages to bytes. + * No rounding is used. + */ + +#define i386_btop(x) (((unsigned)(x)) >> I386_PGSHIFT) +#define machine_btop(x) i386_btop(x) +#define i386_ptob(x) (((unsigned)(x)) << I386_PGSHIFT) + +/* + * Round off or truncate to the nearest page. These will work + * for either addresses or counts. (i.e. 1 byte rounds to 1 page + * bytes. + */ + +#define i386_round_page(x) ((((unsigned)(x)) + I386_PGBYTES - 1) & \ + ~(I386_PGBYTES-1)) +#define i386_trunc_page(x) (((unsigned)(x)) & ~(I386_PGBYTES-1)) + +#define VM_MIN_ADDRESS ((vm_offset_t) 0) +#define VM_MAX_ADDRESS ((vm_offset_t) 0xc0000000U) + +#define LINEAR_KERNEL_ADDRESS ((vm_offset_t) 0xc0000000) + +#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0x00000000U) +#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0x3fffffffU) + +#define VM_MIN_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0x0c000000U) +#define VM_MAX_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0x1fffffffU) + +/* FIXME - always leave like this? */ +#define INTSTACK_SIZE (I386_PGBYTES*4) +#define KERNEL_STACK_SIZE (I386_PGBYTES*4) + +#if 0 /* FIXME */ + +#include +#include +#include + +#if defined(AT386) +#include +#endif + +#if !NORMA_VM +#if !TASK_SWAPPER && !THREAD_SWAPPER +#define KERNEL_STACK_SIZE (I386_PGBYTES/2) +#else +/* stack needs to be a multiple of page size to get unwired when swapped */ +#define KERNEL_STACK_SIZE (I386_PGBYTES) +#endif /* TASK || THREAD SWAPPER */ +#define INTSTACK_SIZE (I386_PGBYTES) /* interrupt stack size */ +#else /* NORMA_VM */ +#define KERNEL_STACK_SIZE (I386_PGBYTES*2) +#define INTSTACK_SIZE (I386_PGBYTES*2) /* interrupt stack size */ +#endif /* NORMA_VM */ +#endif /* MACH_KERNEL */ + +/* + * Conversion between 80386 pages and VM pages + */ + +#define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p)))) +#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p)))) +#define vm_to_i386(p) (i386_btop(ptoa(p))) + +/* + * Physical memory is mapped 1-1 with virtual memory starting + * at VM_MIN_KERNEL_ADDRESS. + */ +#define phystokv(a) ((vm_offset_t)(a) + VM_MIN_KERNEL_ADDRESS) + +/* + * For 386 only, ensure that pages are installed in the + * kernel_pmap with VM_PROT_WRITE enabled. This avoids + * code in pmap_enter that disallows a read-only mapping + * in the kernel's pmap. (See ri-osc CR1387.) + * + * An entry in kernel_pmap is made only by the kernel or + * a collocated server -- by definition (;-)), the requester + * is trusted code. If it asked for read-only access, + * it won't attempt a write. We don't have to enforce the + * restriction. (Naturally, this assumes that any collocated + * server will _not_ depend on trapping write accesses to pages + * mapped read-only; this cannot be made to work in the current + * i386-inspired pmap model.) + */ + +/*#if defined(AT386) + +#define PMAP_ENTER_386_CHECK \ + if (cpuid_family == CPUID_FAMILY_386) + +#else -- FIXME? We're only running on Pentiums or better */ + +#define PMAP_ENTER_386_CHECK + +/*#endif*/ + +#define PMAP_ENTER(pmap, virtual_address, page, protection, wired) \ + MACRO_BEGIN \ + vm_prot_t __prot__ = \ + (protection) & ~(page)->page_lock; \ + \ + PMAP_ENTER_386_CHECK \ + if ((pmap) == kernel_pmap) \ + __prot__ |= VM_PROT_WRITE; \ + pmap_enter( \ + (pmap), \ + (virtual_address), \ + (page)->phys_addr, \ + __prot__, \ + (wired) \ + ); \ + MACRO_END + +#endif /* _MACH_I386_VM_PARAM_H_ */ diff --git a/osfmk/mach/i386/vm_types.h b/osfmk/mach/i386/vm_types.h new file mode 100644 index 000000000..ae562ea62 --- /dev/null +++ b/osfmk/mach/i386/vm_types.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:47 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.8.2 1995/01/06 19:50:48 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:42:42 dwm] + * + * Revision 1.2.8.1 1994/09/23 02:38:01 ezf + * change marker to not FREE + * [1994/09/22 21:40:30 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:41:01 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:38 jeffc] + * + * Revision 1.2 1993/04/19 16:34:37 devrcs + * ansi C conformance changes + * [1993/02/02 18:56:34 david] + * + * Revision 1.1 1992/09/30 02:30:57 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:53:00 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:32:34 mrt + * Changed to new Mach copyright + * [91/02/01 17:10:49 mrt] + * + * Revision 2.2 90/05/03 15:48:32 dbg + * First checkin. + * + * Revision 1.3 89/03/09 20:20:12 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:01:20 gm0w + * Changes for cleanup. + * + * 31-Dec-88 Robert Baron (rvb) at Carnegie-Mellon University + * Derived from MACH2.0 vax release. + * + * 23-Apr-87 Michael Young (mwyoung) at Carnegie-Mellon University + * Changed things to "unsigned int" to appease the user community :-). + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: vm_types.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * Header file for VM data types. I386 version. + */ + +#ifndef _MACH_I386_VM_TYPES_H_ +#define _MACH_I386_VM_TYPES_H_ + +#ifdef ASSEMBLER +#else /* ASSEMBLER */ + +/* + * A natural_t is the type for the native + * integer type, e.g. 32 or 64 or.. whatever + * register size the machine has. Unsigned, it is + * used for entities that might be either + * unsigned integers or pointers, and for + * type-casting between the two. + * For instance, the IPC system represents + * a port in user space as an integer and + * in kernel space as a pointer. + */ +typedef unsigned int natural_t; + +/* + * An integer_t is the signed counterpart + * of the natural_t type. Both types are + * only supposed to be used to define + * other types in a machine-independent + * way. + */ +typedef int integer_t; + +#ifdef MACH_KERNEL_PRIVATE +/* + * An int32 is an integer that is at least 32 bits wide + */ +typedef int int32; +typedef unsigned int uint32; +#endif + +/* + * A vm_offset_t is a type-neutral pointer, + * e.g. an offset into a virtual memory space. + */ +typedef natural_t vm_offset_t; + +/* + * A vm_size_t is the proper type for e.g. + * expressing the difference between two + * vm_offset_t entities. + */ +typedef natural_t vm_size_t; + +#endif /* ASSEMBLER */ + +/* + * If composing messages by hand (please dont) + */ + +#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32 + +#endif /* _MACH_I386_VM_TYPES_H_ */ diff --git a/osfmk/mach/kern_return.h b/osfmk/mach/kern_return.h new file mode 100644 index 000000000..c347f1e3d --- /dev/null +++ b/osfmk/mach/kern_return.h @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: h/kern_return.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * Kernel return codes. + * + */ + +#ifndef _MACH_KERN_RETURN_H_ +#define _MACH_KERN_RETURN_H_ + +#include + +/* + * N.B.: If you add errors, please update + * mach_services/lib/libmach/err_kern.sub + */ + +#define KERN_SUCCESS 0 + +#define KERN_INVALID_ADDRESS 1 + /* Specified address is not currently valid. + */ + +#define KERN_PROTECTION_FAILURE 2 + /* Specified memory is valid, but does not permit the + * required forms of access. + */ + +#define KERN_NO_SPACE 3 + /* The address range specified is already in use, or + * no address range of the size specified could be + * found. + */ + +#define KERN_INVALID_ARGUMENT 4 + /* The function requested was not applicable to this + * type of argument, or an argument is invalid + */ + +#define KERN_FAILURE 5 + /* The function could not be performed. A catch-all. + */ + +#define KERN_RESOURCE_SHORTAGE 6 + /* A system resource could not be allocated to fulfill + * this request. This failure may not be permanent. + */ + +#define KERN_NOT_RECEIVER 7 + /* The task in question does not hold receive rights + * for the port argument. + */ + +#define KERN_NO_ACCESS 8 + /* Bogus access restriction. + */ + +#define KERN_MEMORY_FAILURE 9 + /* During a page fault, the target address refers to a + * memory object that has been destroyed. This + * failure is permanent. + */ + +#define KERN_MEMORY_ERROR 10 + /* During a page fault, the memory object indicated + * that the data could not be returned. This failure + * may be temporary; future attempts to access this + * same data may succeed, as defined by the memory + * object. + */ + +#define KERN_ALREADY_IN_SET 11 + /* The receive right is already a member of the portset. + */ + +#define KERN_NOT_IN_SET 12 + /* The receive right is not a member of a port set. + */ + +#define KERN_NAME_EXISTS 13 + /* The name already denotes a right in the task. + */ + +#define KERN_ABORTED 14 + /* The operation was aborted. Ipc code will + * catch this and reflect it as a message error. + */ + +#define KERN_INVALID_NAME 15 + /* The name doesn't denote a right in the task. + */ + +#define KERN_INVALID_TASK 16 + /* Target task isn't an active task. + */ + +#define KERN_INVALID_RIGHT 17 + /* The name denotes a right, but not an appropriate right. + */ + +#define KERN_INVALID_VALUE 18 + /* A blatant range error. + */ + +#define KERN_UREFS_OVERFLOW 19 + /* Operation would overflow limit on user-references. + */ + +#define KERN_INVALID_CAPABILITY 20 + /* The supplied (port) capability is improper. + */ + +#define KERN_RIGHT_EXISTS 21 + /* The task already has send or receive rights + * for the port under another name. + */ + +#define KERN_INVALID_HOST 22 + /* Target host isn't actually a host. + */ + +#define KERN_MEMORY_PRESENT 23 + /* An attempt was made to supply "precious" data + * for memory that is already present in a + * memory object. + */ + +#define KERN_MEMORY_DATA_MOVED 24 + /* A page was requested of a memory manager via + * memory_object_data_request for an object using + * a MEMORY_OBJECT_COPY_CALL strategy, with the + * VM_PROT_WANTS_COPY flag being used to specify + * that the page desired is for a copy of the + * object, and the memory manager has detected + * the page was pushed into a copy of the object + * while the kernel was walking the shadow chain + * from the copy to the object. This error code + * is delivered via memory_object_data_error + * and is handled by the kernel (it forces the + * kernel to restart the fault). It will not be + * seen by users. + */ + +#define KERN_MEMORY_RESTART_COPY 25 + /* A strategic copy was attempted of an object + * upon which a quicker copy is now possible. + * The caller should retry the copy using + * vm_object_copy_quickly. This error code + * is seen only by the kernel. + */ + +#define KERN_INVALID_PROCESSOR_SET 26 + /* An argument applied to assert processor set privilege + * was not a processor set control port. + */ + +#define KERN_POLICY_LIMIT 27 + /* The specified scheduling attributes exceed the thread's + * limits. + */ + +#define KERN_INVALID_POLICY 28 + /* The specified scheduling policy is not currently + * enabled for the processor set. + */ + +#define KERN_INVALID_OBJECT 29 + /* The external memory manager failed to initialize the + * memory object. + */ + +#define KERN_ALREADY_WAITING 30 + /* A thread is attempting to wait for an event for which + * there is already a waiting thread. + */ + +#define KERN_DEFAULT_SET 31 + /* An attempt was made to destroy the default processor + * set. + */ + +#define KERN_EXCEPTION_PROTECTED 32 + /* An attempt was made to fetch an exception port that is + * protected, or to abort a thread while processing a + * protected exception. + */ + +#define KERN_INVALID_LEDGER 33 + /* A ledger was required but not supplied. + */ + +#define KERN_INVALID_MEMORY_CONTROL 34 + /* The port was not a memory cache control port. + */ + +#define KERN_INVALID_SECURITY 35 + /* An argument supplied to assert security privilege + * was not a host security port. + */ + +#define KERN_NOT_DEPRESSED 36 + /* thread_depress_abort was called on a thread which + * was not currently depressed. + */ + +#define KERN_TERMINATED 37 + /* Object has been terminated and is no longer available + */ + +#define KERN_LOCK_SET_DESTROYED 38 + /* Lock set has been destroyed and is no longer available. + */ + +#define KERN_LOCK_UNSTABLE 39 + /* The thread holding the lock terminated before releasing + * the lock + */ + +#define KERN_LOCK_OWNED 40 + /* The lock is already owned by another thread + */ + +#define KERN_LOCK_OWNED_SELF 41 + /* The lock is already owned by the calling thread + */ + +#define KERN_SEMAPHORE_DESTROYED 42 + /* Semaphore has been destroyed and is no longer available. + */ + +#define KERN_RPC_SERVER_TERMINATED 43 + /* Return from RPC indicating the target server was + * terminated before it successfully replied + */ + +#define KERN_RPC_TERMINATE_ORPHAN 44 + /* Terminate an orphaned activation. + */ + +#define KERN_RPC_CONTINUE_ORPHAN 45 + /* Allow an orphaned activation to continue executing. + */ + +#define KERN_NOT_SUPPORTED 46 + /* Empty thread activation (No thread linked to it) + */ + +#define KERN_NODE_DOWN 47 + /* Remote node down or inaccessible. + */ + +#define KERN_NOT_WAITING 48 + /* A signalled thread was not actually waiting. */ + +#define KERN_OPERATION_TIMED_OUT 49 + /* Some thread-oriented operation (semaphore_wait) timed out + */ + +#define KERN_RETURN_MAX 0x100 + /* Maximum return value allowable + */ + +#endif /* _MACH_KERN_RETURN_H_ */ diff --git a/osfmk/mach/kmod.h b/osfmk/mach/kmod.h new file mode 100644 index 000000000..a3007a2ff --- /dev/null +++ b/osfmk/mach/kmod.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 1999 Mar 29 rsulack created. + */ + +#ifndef _MACH_KMOD_H_ +#define _MACH_KMOD_H_ + +#include + +#define KMOD_CNTL_START 1 // call kmod's start routine +#define KMOD_CNTL_STOP 2 // call kmod's stop routine +#define KMOD_CNTL_RETAIN 3 // increase a kmod's reference count +#define KMOD_CNTL_RELEASE 4 // decrease a kmod's reference count +#define KMOD_CNTL_GET_CMD 5 // get kmod load cmd from kernel + +#define KMOD_PACK_IDS(from, to) (((unsigned long)from << 16) | (unsigned long)to) +#define KMOD_UNPACK_FROM_ID(i) ((unsigned long)i >> 16) +#define KMOD_UNPACK_TO_ID(i) ((unsigned long)i & 0xffff) + +#define KMOD_MAX_NAME 64 + +typedef int kmod_t; +typedef int kmod_control_flavor_t; +typedef void* kmod_args_t; + +typedef struct kmod_reference { + struct kmod_reference *next; + struct kmod_info *info; +} kmod_reference_t; + +/**************************************************************************************/ +/* warning any changes to this structure affect the following macros. */ +/**************************************************************************************/ + +#define KMOD_RETURN_SUCCESS KERN_SUCCESS +#define KMOD_RETURN_FAILURE KERN_FAILURE + +typedef kern_return_t kmod_start_func_t(struct kmod_info *ki, void *data); +typedef kern_return_t kmod_stop_func_t(struct kmod_info *ki, void *data); + +typedef struct kmod_info { + struct kmod_info *next; + int info_version; // version of this structure + int id; + char name[KMOD_MAX_NAME]; + char version[KMOD_MAX_NAME]; + int reference_count; // # refs to this + kmod_reference_t *reference_list; // who this refs + vm_address_t address; // starting address + vm_size_t size; // total size + vm_size_t hdr_size; // unwired hdr size + kmod_start_func_t *start; + kmod_stop_func_t *stop; +} kmod_info_t; + +typedef kmod_info_t *kmod_info_array_t; + +#define KMOD_INFO_NAME kmod_info +#define KMOD_INFO_VERSION 1 + +#define KMOD_DECL(name, version) \ + static kmod_start_func_t name ## _module_start; \ + static kmod_stop_func_t name ## _module_stop; \ + kmod_info_t KMOD_INFO_NAME = { 0, KMOD_INFO_VERSION, -1, \ + { #name }, { version }, -1, 0, 0, 0, 0, \ + name ## _module_start, \ + name ## _module_stop }; + +#define KMOD_EXPLICIT_DECL(name, version, start, stop) \ + kmod_info_t KMOD_INFO_NAME = { 0, KMOD_INFO_VERSION, -1, \ + { #name }, { version }, -1, 0, 0, 0, 0, \ + start, stop }; + +// the following is useful for libaries that don't need their own start and stop functions +#define KMOD_LIB_DECL(name, version) \ + kmod_info_t KMOD_INFO_NAME = { 0, KMOD_INFO_VERSION, -1, \ + { #name }, { version }, -1, 0, 0, 0, 0, \ + kmod_default_start, \ + kmod_default_stop }; + + +// ************************************************************************************* +// kmod kernel to user commands +// ************************************************************************************* + +#define KMOD_LOAD_EXTENSION_PACKET 1 +#define KMOD_LOAD_WITH_DEPENDENCIES_PACKET 2 + +// for generic packets +#define KMOD_IOKIT_START_RANGE_PACKET 0x1000 +#define KMOD_IOKIT_END_RANGE_PACKET 0x1fff + +typedef struct kmod_load_extension_cmd { + int type; + char name[KMOD_MAX_NAME]; +} kmod_load_extension_cmd_t; + +typedef struct kmod_load_with_dependencies_cmd { + int type; + char name[KMOD_MAX_NAME]; + char dependencies[1][KMOD_MAX_NAME]; +} kmod_load_with_dependencies_cmd_t; + +typedef struct kmod_generic_cmd { + int type; + char data[1]; +} kmod_generic_cmd_t; + +#ifdef KERNEL_PRIVATE + +extern void kmod_init(); + +extern kern_return_t kmod_create_fake(char *name, char *version); + +extern kmod_info_t *kmod_lookupbyname(char * name); +extern kmod_info_t *kmod_lookupbyid(kmod_t id); + +extern kern_return_t kmod_load_extension(char *name); +extern kern_return_t kmod_load_extension_with_dependencies(char *name, char **dependencies); +extern kern_return_t kmod_send_generic(int type, void *data, int size); + +extern kmod_start_func_t kmod_default_start; +extern kmod_stop_func_t kmod_default_stop; + +extern kern_return_t kmod_initialize_cpp(kmod_info_t *info); +extern kern_return_t kmod_finalize_cpp(kmod_info_t *info); + +extern void kmod_dump(vm_offset_t *addr, unsigned int cnt); + +#endif /* KERNEL_PRIVATE */ + +#endif /* _MACH_KMOD_H_ */ diff --git a/osfmk/mach/ledger.defs b/osfmk/mach/ledger.defs new file mode 100644 index 000000000..ec1b94f76 --- /dev/null +++ b/osfmk/mach/ledger.defs @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERNEL_USER */ +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + ledger 5000; + +#ifdef KERNEL_USER +userprefix r_; +#endif /* KERNEL_USER */ + +#include +#include + +/* + * Create a subordinate ledger + */ +routine ledger_create( + parent_ledger : ledger_t; + ledger_ledger : ledger_t; + out new_ledger : ledger_t; + transfer : ledger_item_t); + +/* + * Destroy a ledger + */ +routine ledger_terminate( + ledger : ledger_t); + +/* + * Transfer resources from a parent ledger to a child + */ +routine ledger_transfer( + parent_ledger : ledger_t; + child_ledger : ledger_t; + transfer : ledger_item_t); + +/* + * Return the ledger limit and balance + */ +routine ledger_read( + ledger : ledger_t; + out balance : ledger_item_t; + out limit : ledger_item_t); + diff --git a/osfmk/mach/lock_set.defs b/osfmk/mach/lock_set.defs new file mode 100644 index 000000000..da19ca388 --- /dev/null +++ b/osfmk/mach/lock_set.defs @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: mach/lock_set.defs + * Author: Joseph CaraDonna + * + * Exported kernel calls + * + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + lock_set 617000; + +#include +#include + +/* + * a lock_set_t is created and destroyed through the task object. + * lock_set_create(task,&lock_set_t,...); + * lock_set_destroy(task,lock_set_t); + */ + +routine lock_acquire( + lock_set : lock_set_t; + lock_id : int); + +routine lock_release( + lock_set : lock_set_t; + lock_id : int); + +routine lock_try( + lock_set : lock_set_t; + lock_id : int); + +routine lock_make_stable( + lock_set : lock_set_t; + lock_id : int); + +routine lock_handoff( + lock_set : lock_set_t; + lock_id : int); + +routine lock_handoff_accept( + lock_set : lock_set_t; + lock_id : int); + diff --git a/osfmk/mach/mach.h b/osfmk/mach/mach.h new file mode 100644 index 000000000..c0e4100ec --- /dev/null +++ b/osfmk/mach/mach.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ +/* + * Includes all the types that a normal user + * of Mach programs should need + */ + +#ifndef _MACH_H_ +#define _MACH_H_ + +#include +#include +#include +#include + +#endif /* _MACH_H_ */ diff --git a/osfmk/mach/mach_host.defs b/osfmk/mach/mach_host.defs new file mode 100644 index 000000000..0877e01cb --- /dev/null +++ b/osfmk/mach/mach_host.defs @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: mach/mach_host.defs + * + * Abstract: + * Mach host operations support. Includes processor allocation and + * control. + */ + +#ifdef MACH_KERNEL +#include +#include +#endif /* MACH_KERNEL */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + mach_host 200; + +/* + * Basic types + */ + +#include +#include +#include +#include + +/* + * References to host objects are returned by: + * mach_host_self() - trap + */ + +/* + * Return information about this host. + */ +routine host_info( + host : host_t; + flavor : host_flavor_t; + out host_info_out : host_info_t, CountInOut); + +/* + * Get string describing current kernel version. + */ +routine host_kernel_version( + host : host_t; + out kernel_version : kernel_version_t); + +/* + * Get host page size + */ +routine host_page_size( + host : host_t; + out page_size : vm_size_t); + +/* + * Allow pagers to create named entries that point to un-mapped + * abstract memory object. The named entries are generally mappable + * and can be subsetted through the mach_make_memory_entry call + */ +routine mach_memory_object_memory_entry( + host :host_t; + internal :boolean_t; + size :vm_size_t; + permission :vm_prot_t; + pager :memory_object_t; + out entry_handle :mach_port_move_send_t); + + +/* + * Get processor info for all the processors on this host. + * The returned data is an OOL array of processor info. + */ +routine host_processor_info( + host : host_t; + flavor : processor_flavor_t; + out processor_count : natural_t; + out processor_info : processor_info_array_t); + +/* + * Return host IO master access port + */ +routine host_get_io_master( + host : host_t; + out io_master : io_master_t); + +/* + * Get service port for a processor set. + * Available to all. + */ +routine host_get_clock_service( + host : host_t; + clock_id : clock_id_t; + out clock_serv : clock_serv_t); + + +routine kmod_get_info( + host : host_t; + out modules : kmod_args_t); + +/* + * Returns information about the memory allocation zones. + * Supported in all kernels.. + */ +routine host_zone_info( + host : host_t; + out names : zone_name_array_t, + Dealloc; + out info : zone_info_array_t, + Dealloc); + +/* + * Returns information about the global VP table. + * Only supported in MACH_VM_DEBUG kernels, + * otherwise returns KERN_FAILURE. + */ +routine host_virtual_physical_table_info( + host : host_t; + out info : hash_info_bucket_array_t, + Dealloc); + +/* + * Returns information about the global reverse hash table. + * This call is only valid on MACH_IPC_DEBUG kernels. + * Otherwise, KERN_FAILURE is returned. + */ +routine host_ipc_hash_info( + host : host_t; + out info : hash_info_bucket_array_t, + Dealloc); + +/* + * JMM - These routines should be on the host_priv port. We need + * to verify the move before putting them there. + */ +routine enable_bluebox( + host : host_t; + in taskID : unsigned; + in TWI_TableStart : unsigned; + in Desc_TableStart : unsigned); + +routine disable_bluebox( + host : host_t); + +/* + * JMM - Keep processor_set related items at the end for easy + * removal. + */ +/* + * Get default processor set for host. + */ +routine processor_set_default( + host : host_t; + out default_set : processor_set_name_t); + +/* + * Create new processor set. Returns real port for manipulations, + * and name port for obtaining information. + */ +routine processor_set_create( + host : host_t; + out new_set : processor_set_t; + out new_name : processor_set_name_t); + +/* + * Temporary interfaces for conversion to 64 bit data path + */ + +routine mach_memory_object_memory_entry_64( + host :host_t; + internal :boolean_t; + size :memory_object_size_t; + permission :vm_prot_t; + pager :memory_object_t; + out entry_handle :mach_port_move_send_t); + +/* + * Return statistics from this host. + */ +routine host_statistics( + host_priv : host_t; + flavor : host_flavor_t; + out host_info_out : host_info_t, CountInOut); diff --git a/osfmk/mach/mach_interface.h b/osfmk/mach/mach_interface.h new file mode 100644 index 000000000..6b0a54fb7 --- /dev/null +++ b/osfmk/mach/mach_interface.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Apple Computer 1998 + * ALL Rights Reserved + */ +/* + * This file represents the interfaces that used to come + * from creating the user headers from the mach.defs file. + * Because mach.defs was decomposed, this file now just + * wraps up all the new interface headers generated from + * each of the new .defs resulting from that decomposition. + */ +#ifndef _MACH_INTERFACE_H_ +#define _MACH_INTERFACE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif /* _MACH_INTERFACE_H_ */ + + diff --git a/osfmk/mach/mach_ioctl.h b/osfmk/mach/mach_ioctl.h new file mode 100644 index 000000000..167e37bae --- /dev/null +++ b/osfmk/mach/mach_ioctl.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.2 1997/02/07 12:12:35 barbou + * Fixed the defines (some were missing the "MACH_" prefixes). + * [1997/02/07 12:06:38 barbou] + * + * Revision 1.1.4.1 1996/11/29 16:59:20 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * Contents of sys/ioctl.h. Prefixed the definitions with "MACH" to avoid + * conflicts with UNIX servers. + * [96/09/18 barbou] + * [1996/11/29 16:33:15 stephen] + * + * $EndLog$ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * +CS.CMU.EDU +* School of Computer Science +* Carnegie Mellon University +* Pittsburgh PA 15213-3890 +* +* any improvements or extensions that they make and grant Carnegie Mellon rights +* to redistribute these changes. +*/ +/* + */ +/* + * Format definitions for 'ioctl' commands in device definitions. + * + * From BSD4.4. + */ + +#ifndef _MACH_MACH_IOCTL_H_ +#define _MACH_MACH_IOCTL_H_ + +/* + * Ioctl's have the command encoded in the lower word, and the size of + * any in or out parameters in the upper word. The high 3 bits of the + * upper word are used to encode the in/out status of the parameter. + */ +#define MACH_IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */ +#define MACH_IOC_VOID 0x20000000 /* no parameters */ +#define MACH_IOC_OUT 0x40000000 /* copy out parameters */ +#define MACH_IOC_IN 0x80000000 /* copy in parameters */ +#define MACH_IOC_INOUT (MACH_IOC_IN|MACH_IOC_OUT) + +#define _MACH_IOC(inout,group,num,len) \ + (inout | ((len & MACH_IOCPARM_MASK) << 16) | ((group) << 8) | (num)) +#define _MACH_IO(g,n) _MACH_IOC(MACH_IOC_VOID, (g), (n), 0) +#define _MACH_IOR(g,n,t) _MACH_IOC(MACH_IOC_OUT, (g), (n), sizeof(t)) +#define _MACH_IOW(g,n,t) _MACH_IOC(MACH_IOC_IN, (g), (n), sizeof(t)) +#define _MACH_IOWR(g,n,t) _MACH_IOC(MACH_IOC_INOUT, (g), (n), sizeof(t)) + +#ifdef MACH_KERNEL + /* + * to avoid changing the references in the micro-kernel sources... + */ +#define IOCPARM_MASK MACH_IOCPARM_MASK +#define IOC_VOID MACH_IOC_VOID +#define IOC_OUT MACH_IOC_OUT +#define IOC_IN MACH_IOC_IN +#define IOC_INOUT MACH_IOC_INOUT +#define _IOC _MACH_IOC +#define _IO _MACH_IO +#define _IOR _MACH_IOR +#define _IOW _MACH_IOW +#define _IOWR _MACH_IOWR +#endif /* MACH_KERNEL */ + +#endif /* _MACH_MACH_IOCTL_H_ */ diff --git a/osfmk/mach/mach_norma.defs b/osfmk/mach/mach_norma.defs new file mode 100644 index 000000000..8e5ff7f09 --- /dev/null +++ b/osfmk/mach/mach_norma.defs @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.3.17.2 1994/09/23 02:38:45 ezf + * change marker to not FREE + * [1994/09/22 21:40:54 ezf] + * + * Revision 1.3.17.1 1994/06/13 20:49:36 dlb + * Merge MK6 and NMK17 + * [1994/06/13 20:47:52 dlb] + * + * Revision 1.3.11.1 1994/02/08 11:01:11 bernadat + * Checked in NMK16_1 changes + * [94/02/04 bernadat] + * + * Revision 1.3.4.2 1993/07/22 13:54:29 bernadat + * [Joe Barerra: joebar@microsoft.com] Added norma_task_teleport as + * an alternative migration mechanism. + * Change from NORMA_MK14.6 [93/03/08 sjs] + * [93/07/16 bernadat] + * + * Revision 1.3.2.2 1993/06/02 23:45:24 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:50 jeffc] + * + * Revision 1.3 1992/12/07 21:29:17 robert + * integrate any changes below for 14.0 (branch from 13.16 base) + * + * Joseph Barrera (jsb) at Carnegie-Mellon University 03-Jul-92 + * Added norma_task_clone to support task migration. + * [1992/12/06 20:25:30 robert] + * + * Revision 1.2 1992/11/25 01:13:00 robert + * fix history + * [1992/11/09 21:25:21 robert] + * + * integrate changes below for norma_14 + * [1992/11/09 16:42:52 robert] + * + * Revision 0.0 92/10/02 dwm + * Add Comment: norma_port_location_hint requires send rights on 'port'. + * + * Revision 1.1 1992/11/05 20:59:18 robert + * Initial revision + * [92/10/02 dwm] + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.6 91/12/13 13:45:12 jsb + * Moved non-exported interfaces to norma/norma_internal.defs. + * Changed name of task_create_remote to norma_task_create. + * Added comments. + * + * Revision 2.5 91/11/14 16:56:43 rpd + * Picked up mysterious norma changes. + * [91/11/14 rpd] + * + * Revision 2.4 91/08/28 11:15:08 jsb + * Added KERNEL_USER definitions. + * Removed norma_set_task_server. + * Added task_create_remote, norma_copy_create. + * [91/08/15 13:28:27 jsb] + * + * Revision 2.3 91/08/03 18:19:02 jsb + * Removed norma_get_{host,host_priv,device}_port; + * Use norma_{get,set}_special_port instead. + * [91/07/25 07:51:11 jsb] + * + * Revision 2.2 91/06/06 17:07:57 jsb + * First checkin. + * [91/05/25 10:37:22 jsb] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif KERNEL_USER +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + mach_norma 555000; +#ifdef KERNEL_USER +userprefix r_; +#endif KERNEL_USER + +#include +#include + +skip; + +/* + * Specify a node upon which children tasks will be created. + * This call exists only to allow testing with unmodified servers. + * Server developers should use norma_task_create instead. + */ +routine task_set_child_node( + target_task : task_t; + child_node : int); + +/* + * THIS CALL WILL BE ELIMINATED. + * Use norma_port_location_hint(,mach_task_self(),) instead. + */ +routine norma_node_self( + host : host_t; + out node : int); + +skip; + +skip; + +/* + * Create a task on the given node, possibly inheriting memory. + * Same inheritance semantics as task_create, including inheritance + * of initial ports and emulation library, except for memory: + * inheritance attributes are ignored, so that all regions appear + * in the child task, shared with the parent, until the parent + * is destroyed. (The inheritance of the regions in the child + * will, however, be set to match the parent.) + * + * This call is intended to support process migration, where the + * inheritance semantics of norma_task_create would break migrated + * programs that depended upon sharing relationships remaining + * after migration. + * + * This call is not a true task migration call, in that it does not + * migrate the port space, threads, and other non-address-space + * attributes of the task. + */ +routine norma_task_clone( + target_task : task_t; + inherit_memory : boolean_t; + child_node : int; + out child_task : task_t); + +/* + * Create a task on the given node, possibly inheriting memory. + * Same inheritance semantics as task_create, including inheritance + * of initial ports and emulation library. + * Setting child_node to node_self forces local task creation. + */ +routine norma_task_create( + target_task : task_t; + inherit_memory : boolean_t; + child_node : int; + out child_task : task_t); + +/* + * Get a given special port for a given node. + * Norma special ports are defined in norma_special_ports.h; + * examples include the master device port. + * There are a limited number of slots available for system servers. + */ +routine norma_get_special_port( + host_priv : host_priv_t; + node : int; + which : int; + out port : mach_port_t); + +/* + * Set a given special port for a given node. + * See norma_get_special_port. + */ +routine norma_set_special_port( + host_priv : host_priv_t; + which : int; + port : mach_port_t); + +/* + * Just like norma_task_clone, except target_task is terminated, + * allowing useful VM optimizations. + */ +routine norma_task_teleport( + target_task : task_t; + inherit_memory : boolean_t; + child_node : int; + out child_task : task_t); + +skip; + +skip; + +/* + * Return best guess of port's current location. + * Guaranteed to be a node where the port once was. + * Guaranteed to be accurate if port has never moved. + * Can be used to determine residence node for hosts, tasks, threads, etc. + */ +routine norma_port_location_hint( + task : task_t; + port : mach_port_t; + out node : int); diff --git a/osfmk/mach/mach_param.h b/osfmk/mach/mach_param.h new file mode 100644 index 000000000..f82d2b3ff --- /dev/null +++ b/osfmk/mach/mach_param.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:45 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.16.2 1994/09/23 02:38:50 ezf + * change marker to not FREE + * [1994/09/22 21:40:58 ezf] + * + * Revision 1.2.16.1 1994/06/13 20:49:40 dlb + * Merge MK6 and NMK17 + * [1994/06/13 20:47:55 dlb] + * + * Revision 1.2.7.1 1994/03/11 15:26:48 bernadat + * Do not account exception ports as registered ports. + * [94/03/11 bernadat] + * + * Revision 1.2.2.4 1993/08/05 19:09:19 jeffc + * CR9508 - Delete dead code. Remove MACH_IPC_COMPAT + * [1993/08/03 17:09:06 jeffc] + * + * Revision 1.2.2.3 1993/08/03 18:29:29 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 18:04:55 gm] + * + * Revision 1.2.2.2 1993/06/09 02:41:29 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:53 jeffc] + * + * Revision 1.2 1993/04/19 16:35:13 devrcs + * Fixes for ANSI C + * [1993/02/26 13:30:09 sp] + * + * Updated to new exception interface. + * [1992/12/23 13:09:02 david] + * + * Revision 1.1 1992/09/30 02:31:14 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4.2.1 92/03/03 16:22:03 jeffreyh + * Changes from TRUNK + * [92/02/26 12:02:58 jeffreyh] + * + * Revision 2.5 92/01/15 13:44:51 rpd + * Changed MACH_IPC_COMPAT conditionals to default to not present. + * + * Revision 2.4 91/05/14 16:54:40 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:33:28 mrt + * Changed to new Mach copyright + * [91/02/01 17:18:01 mrt] + * + * Revision 2.2 90/06/02 14:58:21 rpd + * Created. + * [90/03/26 23:56:39 rpd] + * + * + * Condensed history: + * Moved implementation constants elsewhere (rpd). + * Added SET_MAX (rpd). + * Added KERN_MSG_SMALL_SIZE (mwyoung). + * Added PORT_BACKLOG_MAX (mwyoung). + * Added PORT_BACKLOG_MAX (mwyoung). + * Added TASK_PORT_REGISTER_MAX (mwyoung). + * Created (mwyoung). + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_param.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1986 + * + * Mach system sizing parameters + */ + +#ifndef _MACH_MACH_PARAM_H_ +#define _MACH_MACH_PARAM_H_ + +/* Number of "registered" ports */ + +#define TASK_PORT_REGISTER_MAX 3 + +#endif /* _MACH_MACH_PARAM_H_ */ diff --git a/osfmk/mach/mach_port.defs b/osfmk/mach/mach_port.defs new file mode 100644 index 000000000..10c622034 --- /dev/null +++ b/osfmk/mach/mach_port.defs @@ -0,0 +1,456 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_port.defs + * Author: Rich Draves + * + * Exported kernel calls. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + mach_port 3200; + +#include +#include +#include + +/* + * Returns the set of port and port set names + * to which the target task has access, along with + * the type (set or port) for each name. + */ + +routine mach_port_names( + task : ipc_space_t; + out names : mach_port_name_array_t; + out types : mach_port_type_array_t); + +/* + * Returns the type (set or port) for the port name + * within the target task. Also indicates whether + * there is a dead-name request for the name. + */ + +routine mach_port_type( + task : ipc_space_t; + name : mach_port_name_t; + out ptype : mach_port_type_t); + +/* + * Changes the name by which a port (or port set) is known to + * the target task. The new name can't be in use. The + * old name becomes available for recycling. + */ + +routine mach_port_rename( + task : ipc_space_t; + old_name : mach_port_name_t; + new_name : mach_port_name_t); + +/* + * Allocates the specified kind of object, with the given name. + * The right must be one of + * MACH_PORT_RIGHT_RECEIVE + * MACH_PORT_RIGHT_PORT_SET + * MACH_PORT_RIGHT_DEAD_NAME + * New port sets are empty. New ports don't have any + * send/send-once rights or queued messages. The make-send + * count is zero and their queue limit is MACH_PORT_QLIMIT_DEFAULT. + * New sets, ports, and dead names have one user reference. + */ + +routine mach_port_allocate_name( + task : ipc_space_t; + right : mach_port_right_t; + name : mach_port_name_t); + +/* + * Allocates the specified kind of object. + * The right must be one of + * MACH_PORT_RIGHT_RECEIVE + * MACH_PORT_RIGHT_PORT_SET + * MACH_PORT_RIGHT_DEAD_NAME + * Like port_allocate_name, but the kernel picks a name. + * It can use any name not associated with a right. + */ + +routine mach_port_allocate( + task : ipc_space_t; + right : mach_port_right_t; + out name : mach_port_name_t); + +/* + * Destroys all rights associated with the name and makes it + * available for recycling immediately. The name can be a + * port (possibly with multiple user refs), a port set, or + * a dead name (again, with multiple user refs). + */ + +routine mach_port_destroy( + task : ipc_space_t; + name : mach_port_name_t); + +/* + * Releases one send/send-once/dead-name user ref. + * Just like mach_port_mod_refs -1, but deduces the + * correct type of right. This allows a user task + * to release a ref for a port without worrying + * about whether the port has died or not. + */ + +routine mach_port_deallocate( + task : ipc_space_t; + name : mach_port_name_t); + +/* + * A port set always has one user ref. + * A send-once right always has one user ref. + * A dead name always has one or more user refs. + * A send right always has one or more user refs. + * A receive right always has one user ref. + * The right must be one of + * MACH_PORT_RIGHT_RECEIVE + * MACH_PORT_RIGHT_PORT_SET + * MACH_PORT_RIGHT_DEAD_NAME + * MACH_PORT_RIGHT_SEND + * MACH_PORT_RIGHT_SEND_ONCE + */ + +routine mach_port_get_refs( + task : ipc_space_t; + name : mach_port_name_t; + right : mach_port_right_t; + out refs : mach_port_urefs_t); + +/* + * The delta is a signed change to the task's + * user ref count for the right. Only dead names + * and send rights can have a positive delta. + * The resulting user ref count can't be negative. + * If it is zero, the right is deallocated. + * If the name isn't a composite right, it becomes + * available for recycling. The right must be one of + * MACH_PORT_RIGHT_RECEIVE + * MACH_PORT_RIGHT_PORT_SET + * MACH_PORT_RIGHT_DEAD_NAME + * MACH_PORT_RIGHT_SEND + * MACH_PORT_RIGHT_SEND_ONCE + */ + +routine mach_port_mod_refs( + task : ipc_space_t; + name : mach_port_name_t; + right : mach_port_right_t; + delta : mach_port_delta_t); + +/* + * Allocates a new receive right, and associates it with the + * specified RPC subsystem. + */ +routine mach_port_allocate_subsystem( + task : ipc_space_t; + subsys : subsystem_t; + out name : mach_port_name_t); + +/* + * Only valid for receive rights. + * Sets the make-send count for the port. + */ +routine mach_port_set_mscount( + task : ipc_space_t; + name : mach_port_name_t; + mscount : mach_port_mscount_t); + +/* + * Only valid for port sets. Returns a list of + * the members. + */ + +routine mach_port_get_set_status( + task : ipc_space_t; + name : mach_port_name_t; + out members : mach_port_name_array_t); + +/* + * Puts the member port (the task must have receive rights) + * into the after port set. (Or removes it from any port set + * if after is MACH_PORT_NULL.) If the port is already in + * a set, does an atomic move. + */ + +routine mach_port_move_member( + task : ipc_space_t; + member : mach_port_name_t; + after : mach_port_name_t); + +/* + * Requests a notification from the kernel. The request + * must supply the send-once right which is used for + * the notification. If a send-once right was previously + * registered, it is returned. The msg_id must be one of + * MACH_NOTIFY_PORT_DESTROYED (receive rights) + * MACH_NOTIFY_DEAD_NAME (send/receive/send-once rights) + * MACH_NOTIFY_NO_SENDERS (receive rights) + * + * The sync value specifies whether a notification should + * get sent immediately, if appropriate. The exact meaning + * depends on the notification: + * MACH_NOTIFY_PORT_DESTROYED: must be zero. + * MACH_NOTIFY_DEAD_NAME: if non-zero, then name can be dead, + * and the notification gets sent immediately. + * If zero, then name can't be dead. + * MACH_NOTIFY_NO_SENDERS: the notification gets sent + * immediately if the current mscount is greater + * than or equal to the sync value and there are no + * extant send rights. + */ + +routine mach_port_request_notification( + task : ipc_space_t; + name : mach_port_name_t; + msgid : mach_msg_id_t; + sync : mach_port_mscount_t; + notify : mach_port_send_once_t; + out previous : mach_port_send_once_t); + +/* + * Inserts the specified rights into the target task, + * using the specified name. If inserting send/receive + * rights and the task already has send/receive rights + * for the port, then the names must agree. In any case, + * the task gains a user ref for the port. + */ + +routine mach_port_insert_right( + task : ipc_space_t; + name : mach_port_name_t; + poly : mach_port_poly_t); + +/* + * Returns the specified right for the named port + * in the target task, extracting that right from + * the target task. The target task loses a user + * ref and the name may be available for recycling. + * msgt_name must be one of + * MACH_MSG_TYPE_MOVE_RECEIVE + * MACH_MSG_TYPE_COPY_SEND + * MACH_MSG_TYPE_MAKE_SEND + * MACH_MSG_TYPE_MOVE_SEND + * MACH_MSG_TYPE_MAKE_SEND_ONCE + * MACH_MSG_TYPE_MOVE_SEND_ONCE + */ + +routine mach_port_extract_right( + task : ipc_space_t; + name : mach_port_name_t; + msgt_name : mach_msg_type_name_t; + out poly : mach_port_poly_t); + +/* + * Only valid for receive rights. + * Sets the sequence number for the port. + */ + +routine mach_port_set_seqno( + task : ipc_space_t; + name : mach_port_name_t; + seqno : mach_port_seqno_t); + +/* + * Returns information about a port. + */ + +routine mach_port_get_attributes( + task : ipc_space_t; + name : mach_port_name_t; + flavor : mach_port_flavor_t; + out port_info_out : mach_port_info_t, CountInOut); + +/* + * Set attributes of a port + */ + +routine mach_port_set_attributes( + task : ipc_space_t; + name : mach_port_name_t; + flavor : mach_port_flavor_t; + port_info : mach_port_info_t); + + +/* + * Allocates the specified kind of object, qos version. + * The right must be + * MACH_PORT_RIGHT_RECEIVE + * Like port_allocate_name, but the kernel picks a name. + * It can use any name not associated with a right. + */ + +routine mach_port_allocate_qos( + task : ipc_space_t; + right : mach_port_right_t; + inout qos : mach_port_qos_t; + out name : mach_port_name_t); + +/* + * Generic interface to allocation various kinds of ports. + * Should never be called directly by users (at least not + * unless they are exceedingly masochistic). + */ + +routine mach_port_allocate_full( + task : ipc_space_t; + right : mach_port_right_t; + subs : subsystem_t; + inout qos : mach_port_qos_t; + inout name : mach_port_name_t); + + +/* + * Pre-expand task port name space. + */ +routine task_set_port_space( + task : ipc_space_t; + table_entries : int); + + +/* + * Returns the exact number of extant send rights + * for the given receive right. + * This call is only valid on MACH_IPC_DEBUG kernels. + * Otherwise, KERN_FAILURE is returned. + */ +routine mach_port_get_srights( + task : ipc_space_t; + name : mach_port_name_t; + out srights : mach_port_rights_t); + + +/* + * Returns information about an IPC space. + * This call is only valid on MACH_IPC_DEBUG kernels. + * Otherwise, KERN_FAILURE is returned. + */ +routine mach_port_space_info( + task : ipc_space_t; + out info : ipc_info_space_t; + out table_info : ipc_info_name_array_t, + Dealloc; + out tree_info : ipc_info_tree_name_array_t, + Dealloc); + +/* + * Returns information about the dead-name requests + * registered with the named receive right. + * This call is only valid on MACH_IPC_DEBUG kernels. + * Otherwise, KERN_FAILURE is returned. + */ +routine mach_port_dnrequest_info( + task : ipc_space_t; + name : mach_port_name_t; + out total : unsigned; /* total size of table */ + out used : unsigned); /* amount used */ + +/* + * Return the type and address of the kernel object + * that the given send/receive right represents. + * This call is only valid on MACH_IPC_DEBUG kernels. + * Otherwise, KERN_FAILURE is returned. + */ +routine mach_port_kernel_object( + task : ipc_space_t; + name : mach_port_name_t; + out object_type : unsigned; + out object_addr : vm_offset_t); + + +/* + * Inserts the specified rights into the portset identified + * by the pair. The results of passing in the + * Poly argument via the supplied disposition must yield a + * receive right. + * + * If the pair does not represent a valid portset + * KERN_INVALID_RIGHT is returned. + * + * If the passed in name argument does not represent a receive + * right, KERN_INVALID_CAPABILITY will be returned. + * + * If the port represented by the receive right is already in + * the portset, KERN_ALREADY_IN_SET is returned. + */ +routine mach_port_insert_member( + task : ipc_space_t; + name : mach_port_name_t; + pset : mach_port_name_t); + +/* + * Extracts the specified right from the named portset + * in the target task. + * the target task. The target task loses a user + * ref and the name may be available for recycling. + * msgt_name must be one of + * MACH_MSG_TYPE_MOVE_RECEIVE + * MACH_MSG_TYPE_COPY_SEND + * MACH_MSG_TYPE_MAKE_SEND + * MACH_MSG_TYPE_MOVE_SEND + * MACH_MSG_TYPE_MAKE_SEND_ONCE + * MACH_MSG_TYPE_MOVE_SEND_ONCE + */ + +routine mach_port_extract_member( + task : ipc_space_t; + name : mach_port_name_t; + pset : mach_port_name_t); + diff --git a/osfmk/mach/mach_syscalls.h b/osfmk/mach/mach_syscalls.h new file mode 100644 index 000000000..ec44601d0 --- /dev/null +++ b/osfmk/mach/mach_syscalls.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _MACH_MACH_SYSCALLS_H_ +#define _MACH_MACH_SYSCALLS_H_ + +#include +#include +#include +#include + +extern kern_return_t clock_sleep_trap( + mach_port_name_t clock_name, + sleep_type_t sleep_type, + int sleep_sec, + int sleep_nsec, + mach_timespec_t *wakeup_time); + +extern kern_return_t thread_switch( + mach_port_name_t thread_name, + int option, + mach_msg_timeout_t option_time); + +#endif /* _MACH_MACH_SYSCALLS_H_ */ diff --git a/osfmk/mach/mach_time.h b/osfmk/mach/mach_time.h new file mode 100644 index 000000000..ee41f9a4d --- /dev/null +++ b/osfmk/mach/mach_time.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2001 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2001 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 30 January 2001 (debo) + * Created. + */ + +#ifndef _MACH_MACH_TIME_H_ +#define _MACH_MACH_TIME_H_ + +#include + +#include + +uint64_t mach_absolute_time(void); + +kern_return_t mach_wait_until( + uint64_t deadline); + +struct mach_timebase_info { + uint32_t numer; + uint32_t denom; +}; + +typedef struct mach_timebase_info *mach_timebase_info_t; +typedef struct mach_timebase_info mach_timebase_info_data_t; + +kern_return_t mach_timebase_info( + mach_timebase_info_t info); + +#endif /* _MACH_MACH_TIME_H_ */ diff --git a/osfmk/mach/mach_traps.h b/osfmk/mach/mach_traps.h new file mode 100644 index 000000000..faa547080 --- /dev/null +++ b/osfmk/mach/mach_traps.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Definitions of general Mach system traps. + * + * IPC traps are defined in . + * Kernel RPC functions are defined in . + */ + +#ifndef _MACH_MACH_TRAPS_H_ +#define _MACH_MACH_TRAPS_H_ + +#include +#include +#include +#include +#include + +mach_port_name_t mach_reply_port(void); + +mach_port_name_t thread_self_trap(void); + +mach_port_name_t task_self_trap(void); + +mach_port_name_t host_self_trap(void); + +kern_return_t semaphore_signal_trap( + mach_port_name_t signal_name); + +kern_return_t semaphore_signal_all_trap( + mach_port_name_t signal_name); + +kern_return_t semaphore_signal_thread_trap( + mach_port_name_t signal_name, + mach_port_name_t thread_name); + +kern_return_t semaphore_wait_trap( + mach_port_name_t wait_name); + +kern_return_t semaphore_timedwait_trap( + mach_port_name_t wait_name, + unsigned int sec, + clock_res_t nsec); + +kern_return_t semaphore_wait_signal_trap( + mach_port_name_t wait_name, + mach_port_name_t signal_name); + +kern_return_t semaphore_timedwait_signal_trap( + mach_port_name_t wait_name, + mach_port_name_t signal_name, + unsigned int sec, + clock_res_t nsec); + +kern_return_t init_process(void); + +kern_return_t map_fd( + int fd, + vm_offset_t offset, + vm_offset_t *va, + boolean_t findspace, + vm_size_t size); + +kern_return_t task_for_pid( + mach_port_t target_tport, + int pid, + mach_port_t *t); + +kern_return_t pid_for_task( + mach_port_t t, + int *x); + +kern_return_t macx_swapon( + char *name, + int flags, + int size, + int priority); + +kern_return_t macx_swapoff( + char *name, + int flags); + +extern kern_return_t macx_triggers( + int hi_water, + int low_water, + int flags, + mach_port_t alert_port); + +#endif /* _MACH_MACH_TRAPS_H_ */ diff --git a/osfmk/mach/mach_types.defs b/osfmk/mach/mach_types.defs new file mode 100644 index 000000000..dd0e5022f --- /dev/null +++ b/osfmk/mach/mach_types.defs @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Mach kernel interface type declarations + */ + +#ifndef _MACH_MACH_TYPES_DEFS_ +#define _MACH_MACH_TYPES_DEFS_ + + +#include + +type upl_page_info_t = struct[2] of integer_t; +type memory_object_offset_t = struct[2] of integer_t; +type memory_object_size_t = struct[2] of integer_t; + + + + +type mach_port_status_t = struct[10] of integer_t; /* obsolete */ + + /* mach_port_info_t: can hold either a + * mach_port_status_t (9 ints) or a + * mach_port_limits_t (1 int). If new flavors of + * mach_port_{get,set}_attributes are added, the size of + * this array may have to be increased. (See mach/port.h) + */ +type mach_port_flavor_t = int; +type mach_port_info_t = array[*:10] of integer_t; + +type task_t = mach_port_t +#if KERNEL_SERVER + intran: task_t convert_port_to_task(mach_port_t) + outtran: mach_port_t convert_task_to_port(task_t) + destructor: task_deallocate(task_t) +#endif /* KERNEL_SERVER */ + ; + +type thread_t = mach_port_t +#if KERNEL_SERVER + intran: thread_t convert_port_to_thread(mach_port_t) + outtran: mach_port_t convert_thread_to_port(thread_t) + destructor: thread_deallocate(thread_t) +#endif /* KERNEL_SERVER */ + ; + +type thread_act_t = mach_port_t +#if KERNEL_SERVER + intran: thread_act_t convert_port_to_act(mach_port_t) + outtran: mach_port_t convert_act_to_port(thread_act_t) + destructor: act_deallocate(thread_act_t) +#endif KERNEL_SERVER + ; + +type thread_act_consume_ref_t = mach_port_move_send_t + ctype: thread_act_t +#if KERNEL_SERVER + intran: thread_act_t convert_port_to_act(mach_port_t) + destructor: act_deallocate(thread_act_t) +#endif KERNEL_SERVER + ; + + /* thread_state_t: This inline array can hold + * a machine-dependent amount of data, defined in + * mach/machine/???? (currently THREAD_STATE_MAX, + * in mach/thread_state.h) + */ +#include +type thread_state_flavor_t = int; +type thread_state_t = array[*:THREAD_STATE_MAX] of natural_t; + +type task_array_t = ^array[] of task_t; +type thread_array_t = ^array[] of thread_t; +type thread_act_array_t = ^array[] of thread_act_t; +type act_params_t = array[6] of int; + +type vm_map_t = mach_port_t +#if KERNEL_SERVER + intran: vm_map_t convert_port_to_map(mach_port_t) + destructor: vm_map_deallocate(vm_map_t) +#endif /* KERNEL_SERVER */ + ; + +type vm_task_entry_t = mach_port_t + ctype: vm_map_t +#if KERNEL_SERVER + intran: vm_map_t convert_port_entry_to_map(mach_port_t) + destructor: vm_map_deallocate(vm_map_t) +#endif /* KERNEL_SERVER */ + ; + +type vm_object_entry_t = mach_port_t + ctype: vm_object_t +#if KERNEL_SERVER + intran: vm_object_t convert_port_entry_to_object(mach_port_t) + destructor: vm_object_deallocate(vm_object_t) +#endif /* KERNEL_SERVER */ + ; + +type upl_object_entry_t = mach_port_t + ctype: upl_t +#if KERNEL_SERVER + intran: upl_t convert_port_to_upl(mach_port_t) + destructor: mach_destroy_upl(upl_t) +#endif /* KERNEL_SERVER */ + ; + +type ipc_space_t = mach_port_t +#if KERNEL_SERVER + intran: ipc_space_t convert_port_to_space(mach_port_t) + destructor: space_deallocate(ipc_space_t) +#endif /* KERNEL_SERVER */ + ; + +type vm_address_t = natural_t; +type vm_offset_t = natural_t; +type vm_size_t = natural_t; +type vm_prot_t = int; +type vm_inherit_t = int; +type xxx_vm_statistics_data_t = struct[13] of integer_t; +type vm_behavior_t = int; +type vm_statistics_data_t = struct[12] of integer_t; +type vm_machine_attribute_t = int; +type vm_machine_attribute_val_t = int; +type vm_sync_t = int; + + /* thread_info_t: this inline array can hold any of: + * thread_basic_info_t (10 ints) + * policy_timeshare_info_t (5 ints) + * policy_fifo_info_t (4 ints) + * policy_rr_info_t (5 ints) + * if other thread_info flavors are added, this + * definition may need to be changed. (See + * mach/thread_info.h and mach/policy.h) */ +type thread_flavor_t = int; +type thread_info_t = array[*:12] of integer_t; + +type thread_policy_flavor_t = natural_t; +type thread_policy_t = array[*:16] of integer_t; + + /* task_info_t: this inline array can hold any of: + * task_basic_info_t (8 ints) + * task_events_info_t (8 ints) + * task_thread_times_info_t (4 ints) + * policy_timeshare_info_t (5 ints) + * policy_fifo_info_t (4 ints) + * policy_rr_info_t (5 ints) + * If other task_info flavors are added, this + * definition may need to be changed. (See + * mach/task_info.h and mach/policy.h) */ +type task_flavor_t = int; +type task_info_t = array[*:8] of integer_t; + +type task_policy_flavor_t = natural_t; +type task_policy_t = array[*:16] of integer_t; + +type mem_entry_name_port_t = mach_port_t +#if KERNEL_SERVER + intran: mem_entry_name_port_t null_conversion(mach_port_t) +#endif /* KERNEL_SERVER */ + ; + + +type memory_object_t = mach_port_t +#if KERNEL_SERVER + intran: memory_object_t null_conversion(mach_port_t) +#endif /* KERNEL_SERVER */ + ; + +type memory_object_default_t = mach_port_t +#if KERNEL_SERVER + intran: memory_object_default_t null_conversion(mach_port_t) +#endif /* KERNEL_SERVER */ + ; + +type upl_object_t = mach_port_t +#if KERNEL_SERVER + intran: upl_object_t null_conversion(mach_port_t) +#endif /* KERNEL_SERVER */ + ; + +type vm_object_t = mach_port_t +#if KERNEL_SERVER + intran: vm_object_t vm_object_lookup(mach_port_t) +#endif /* KERNEL_SERVER */ + ; + +type memory_object_name_t = mach_port_t + ctype: mach_port_t + ; + +type memory_object_copy_strategy_t = int; +type memory_object_return_t = int; + +type machine_info_data_t = struct[5] of integer_t; +type machine_slot_data_t = struct[8] of integer_t; + +type host_t = mach_port_t +#if KERNEL_SERVER + intran: host_t convert_port_to_host(mach_port_t) + outtran: mach_port_t convert_host_to_port(host_t) +#endif /* KERNEL_SERVER */ + ; + +type host_priv_t = mach_port_t +#if KERNEL_SERVER + intran: host_priv_t convert_port_to_host_priv(mach_port_t) +#endif /* KERNEL_SERVER */ + ; + +type host_security_t = mach_port_t +#if KERNEL_SERVER + intran: host_security_t convert_port_to_host_security(mach_port_t) +#endif /* KERNEL_SERVER */ + ; + + /* host_info_t: variable-sized inline array that can contain: + * host_basic_info_t (5 ints) + * host_sched_info_t (2 ints) + * kernel_resource_sizes_t (5 ints) + * host_load_info_t (6 ints) + * vm_statistics_t (12 ints) + * If other host_info flavors are added, this definition may + * need to be changed. (See mach/{host_info,vm_statistics}.h) + */ +type host_flavor_t = int; +type host_info_t = array[*:12] of integer_t; + +type processor_t = mach_port_t +#if KERNEL_SERVER + intran: processor_t convert_port_to_processor(mach_port_t) + outtran: mach_port_t convert_processor_to_port(processor_t) +#endif /* KERNEL_SERVER */ + ; + +type processor_array_t = ^array[] of processor_t; + + /* processor_info_t: variable-sized inline array that can + * contain: + * processor_basic_info_t: (5 ints) + * processor_cpu_load_info_t:(4 ints) + * processor_machine_info_t :(12 ints) + * If other processor_info flavors are added, this definition + * may need to be changed. (See mach/processor_info.h) */ +type processor_flavor_t = int; +type processor_info_t = array[*:12] of integer_t; +type processor_info_array_t = ^array[] of integer_t; + +type processor_set_t = mach_port_t +#if KERNEL_SERVER + intran: processor_set_t convert_port_to_pset(mach_port_t) + outtran: mach_port_t convert_pset_to_port(processor_set_t) + destructor: pset_deallocate(processor_set_t) +#endif /* KERNEL_SERVER */ + ; + +type processor_set_array_t = ^array[] of processor_set_t; + +type processor_set_name_t = mach_port_t +#if KERNEL_SERVER + intran: processor_set_name_t convert_port_to_pset_name(mach_port_t) + outtran: mach_port_t convert_pset_name_to_port(processor_set_name_t) + destructor: pset_deallocate(processor_set_name_t) +#endif /* KERNEL_SERVER */ + ; + +type processor_set_name_array_t = ^array[] of processor_set_name_t; + + /* processor_set_info_t: variable-size inline array + * that can hold: + * processor_set_basic_info (5 ints) + * processor_set_load_info (4 ints) + * policy_timeshare_base_t (1 int) + * policy_fifo_base_t (1 int) + * policy_rr_base_t (1 int) + * policy_timeshare_base_t (1 int) + * policy_fifo_base_t (1 int) + * policy_rr_base_t (1 int) + * policy_t (1 int) + * If other flavors are added, this definition may + * need to be changed. (see mach/processor.h) */ +type processor_set_flavor_t = int; +type processor_set_info_t = array[*:5] of integer_t; + +type bootstrap_t = mach_port_t; + +type kernel_version_t = c_string[*:512]; +type kernel_boot_info_t = c_string[*:4096]; + +type time_value_t = struct[2] of integer_t; + +type user_subsystem_t = array[*:16384] of char; + +type subsystem_t = mach_port_t +#if KERNEL_SERVER + intran: subsystem_t convert_port_to_subsystem(mach_port_t) + outtran: mach_port_t convert_subsystem_to_port(subsystem_t) +#endif /* KERNEL_SERVER */ + ; + +type mach_port_qos_t = struct[2] of integer_t; + +type emulation_vector_t = ^array[] of vm_offset_t; + +type inline_existence_map_t = array[*:512] of char; + +type policy_t = int; + /* policy_info_t: variable-size inline array. Can hold: + * policy_timeshare_info_t (5 ints) + * policy_fifo_info_t (4 ints) + * policy_rr_info_t (5 ints) */ +type policy_base_t = array[*:5] of integer_t; +type policy_info_t = array[*:2] of integer_t; +type policy_limit_t = array[*:1] of integer_t; + +type ledger_t = mach_port_t +#if KERNEL_SERVER + intran: ledger_t convert_port_to_ledger(mach_port_t) + outtran: mach_port_t convert_ledger_to_port(ledger_t) +#endif /* KERNEL_SERVER */ + ; + +type ledger_array_t = ^array[] of ledger_t; +type ledger_item_t = integer_t; + +type security_token_t = MACH_MSG_TYPE_INTEGER_64; + + /* memory_object_info_t: variable-size inline array: + * memory_object_attr_info_t (5 ints) + * XXX actually it's 6 ints temporarily (object_ready!) + * memory_object_behave_info_t (4 ints) + * memory_object_perf_info_t (2 ints) + * old_memory_object_attr_info_t (3 ints) + * memory_object_norma_info_t (5 ints) + * If other flavors are added, this definition may + * need to be changed. (see mach/memory_object.h) */ +type memory_object_flavor_t = int; +type memory_object_info_t = array[*:6] of int; + + /* vm_region_info_t: variable-size inline array that can hold: + * vm_region_basic_info_t (8 ints) + * If other flavors are added, this definition may + * need to be changed. (see mach/vm_region.h) */ +type vm_region_flavor_t = int; +type vm_region_info_t = array[*:9] of int; +type vm_region_info_64_t = array[*:10] of int; +type vm_region_recurse_info_t = array[*:19] of int; +type vm_region_recurse_info_64_t = array[*:19] of int; + +type vm_read_entry_t = array[512] of int + ctype: vm_read_entry_t; + +type exception_mask_t = int; +type exception_behavior_t = int; + +type exception_handler_t = mach_port_t; + +type exception_handler_array_t = + array[*:32] of exception_handler_t; + +type exception_behavior_array_t = + array[*:32] of exception_behavior_t; + +type exception_flavor_array_t = + array[*:32] of thread_state_flavor_t; + +type exception_mask_array_t = + array[*:32] of exception_mask_t; + +type semaphore_t = mach_port_t +#if KERNEL_SERVER + intran: semaphore_t convert_port_to_semaphore(mach_port_t) + outtran: mach_port_t convert_semaphore_to_port(semaphore_t) + destructor: semaphore_dereference(semaphore_t) +#endif /* KERNEL_SERVER */ + ; + +type semaphore_consume_ref_t = mach_port_move_send_t + ctype: semaphore_t +#if KERNEL_SERVER + intran: semaphore_t convert_port_to_semaphore(mach_port_t) + destructor: semaphore_dereference(semaphore_t) +#endif /* KERNEL_SERVER */ + ; + +type lock_set_t = mach_port_t +#if KERNEL_SERVER + intran: lock_set_t convert_port_to_lock_set(mach_port_t) + outtran: mach_port_t convert_lock_set_to_port(lock_set_t) + destructor: lock_set_dereference(lock_set_t) +#endif /* KERNEL_SERVER */ + ; + +type upl_page_list_ptr_t = array[*:20] of upl_page_info_t; + +/* kernel module loader */ +type kmod_t = int; +type kmod_control_flavor_t = int; + +type kmod_args_t = ^array[] of MACH_MSG_TYPE_BYTE + ctype: kmod_args_t; + +type io_master_t = mach_port_t; +type UNDServerRef = mach_port_t; + +#ifdef KERNEL_SERVER +#ifdef MACH_KERNEL_PRIVATE +simport ; /* for null conversion */ +simport ; /* for task/thread conversion */ +simport ; /* for host/processor/pset conversions */ +simport ; /* for subsystem conversions */ +simport ; /* for lock_set and semaphore conversions */ +#endif /* MACH_KERNEL_PRIVATE */ + +simport ; /* pick up kernel-specific MIG things */ + +#endif /* KERNEL_SERVER */ + +import ; + +#endif /* _MACH_MACH_TYPES_DEFS_ */ diff --git a/osfmk/mach/mach_types.h b/osfmk/mach/mach_types.h new file mode 100644 index 000000000..0e44e2b43 --- /dev/null +++ b/osfmk/mach/mach_types.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_types.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1986 + * + * Mach external interface definitions. + * + */ + +#ifndef _MACH_MACH_TYPES_H_ +#define _MACH_MACH_TYPES_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef KERNEL_PRIVATE + +#include + +/* + * If we are in the kernel, then pick up the kernel definitions for + * the basic mach types. + */ +#include + +extern ledger_t convert_port_to_ledger(ipc_port_t); /* JMM - Hack */ + +#else /* !KERNEL_PRIVATE */ + +/* + * If we are not in the kernel, then these will all be represented by + * ports at user-space. + */ +typedef mach_port_t task_t; +typedef mach_port_t thread_t; +typedef mach_port_t ipc_space_t; +typedef mach_port_t host_t; +typedef mach_port_t host_priv_t; +typedef mach_port_t host_security_t; +typedef mach_port_t processor_set_t; +typedef mach_port_t processor_set_name_t; +typedef mach_port_t processor_set_control_t; +typedef mach_port_t processor_t; +typedef mach_port_t thread_act_t; +typedef mach_port_t subsystem_t; +typedef mach_port_t semaphore_t; +typedef mach_port_t lock_set_t; +typedef mach_port_t ledger_t; +typedef mach_port_t alarm_t; +typedef mach_port_t clock_serv_t; +typedef mach_port_t clock_ctrl_t; +typedef mach_port_t vm_map_t; +typedef mach_port_t vm_map_copy_t; +typedef mach_port_t vm_object_t; + +#endif /* !KERNEL_PRIVATE */ + + +/* + * JMM - These types are just hard-coded as ports for now + */ +typedef mach_port_t clock_reply_t; +typedef mach_port_t bootstrap_t; +typedef mach_port_t mem_entry_name_port_t; +typedef mach_port_t exception_handler_t; +typedef exception_handler_t *exception_handler_array_t; +typedef mach_port_t vm_object_entry_t; +typedef mach_port_t vm_task_entry_t; +typedef mach_port_t upl_object_entry_t; +typedef mach_port_t io_master_t; +typedef mach_port_t UNDServerRef; + +/* + * JMM - Mig doesn't translate the components of an array. + * For example, Mig won't use the thread_t translations + * to translate a thread_array_t argument. So, these definitions + * are not completely accurate at the moment for other kernel + * components. MIG is being fixed. + */ +typedef task_t *task_array_t; +typedef thread_t *thread_array_t; +typedef processor_set_t *processor_set_array_t; +typedef processor_set_t *processor_set_name_array_t; +typedef processor_t *processor_array_t; +typedef thread_act_t *thread_act_array_t; +typedef ledger_t *ledger_array_t; + +/* + * However the real mach_types got declared, we also have to declare + * types with "port" in the name for compatability with the way OSF + * had declared the user interfaces at one point. Someday these should + * go away. + */ +typedef task_t task_port_t; +typedef task_array_t task_port_array_t; +typedef thread_t thread_port_t; +typedef thread_array_t thread_port_array_t; +typedef ipc_space_t ipc_space_port_t; +typedef host_t host_name_t; +typedef host_t host_name_port_t; +typedef processor_set_t processor_set_port_t; +typedef processor_set_t processor_set_name_port_t; +typedef processor_set_array_t processor_set_name_port_array_t; +typedef processor_set_t processor_set_control_port_t; +typedef processor_t processor_port_t; +typedef processor_array_t processor_port_array_t; +typedef thread_act_t thread_act_port_t; +typedef thread_act_array_t thread_act_port_array_t; +typedef semaphore_t semaphore_port_t; +typedef lock_set_t lock_set_port_t; +typedef ledger_t ledger_port_t; +typedef ledger_array_t ledger_port_array_t; +typedef alarm_t alarm_port_t; +typedef clock_serv_t clock_serv_port_t; +typedef clock_ctrl_t clock_ctrl_port_t; +typedef vm_map_t vm_map_port_t; +typedef vm_map_copy_t vm_map_copy_port_t; +typedef exception_handler_t exception_port_t; +typedef exception_handler_array_t exception_port_arrary_t; + +#define TASK_NULL ((task_t) 0) +#define THREAD_NULL ((thread_t) 0) +#define HOST_NULL ((host_t) 0) +#define HOST_PRIV_NULL ((host_priv_t)0) +#define HOST_SECURITY_NULL ((host_security_t)0) +#define PROCESSOR_SET_NULL ((processor_set_t) 0) +#define PROCESSOR_NULL ((processor_t) 0) +#define THR_ACT_NULL ((thread_act_t) 0) +#define SUBSYSTEM_NULL ((subsystem_t) 0) +#define SEMAPHORE_NULL ((semaphore_t) 0) +#define LOCK_SET_NULL ((lock_set_t) 0) +#define ALARM_NULL ((alarm_t) 0) +#define CLOCK_NULL ((clock_t) 0) +#define VM_MAP_NULL ((vm_map_t) 0) +#define VM_MAP_COPY_NULL ((vm_map_copy_t) 0) +#define VM_OBJECT_NULL ((vm_object_t) 0) +#define UND_SERVER_NULL ((UNDServerRef) 0) + +typedef integer_t ledger_item_t; +typedef vm_offset_t *emulation_vector_t; +typedef char *user_subsystem_t; + +/* + * Backwards compatibility, for those programs written + * before mach/{std,mach}_types.{defs,h} were set up. + */ +#include + +#endif /* _MACH_MACH_TYPES_H_ */ diff --git a/osfmk/mach/machine.h b/osfmk/mach/machine.h new file mode 100644 index 000000000..d54b7d1e1 --- /dev/null +++ b/osfmk/mach/machine.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* File: machine.h + * Author: Avadis Tevanian, Jr. + * Date: 1986 + * + * Machine independent machine abstraction. + */ + +#ifndef _MACH_MACHINE_H_ +#define _MACH_MACHINE_H_ + +#ifdef MACH_KERNEL_PRIVATE +#include +#endif /* MACH_KERNEL_PRIVATE */ + +#include +#include + +/* + * For each host, there is a maximum possible number of + * cpus that may be available in the system. This is the + * compile-time constant NCPUS, which is defined in cpus.h. + * + * In addition, there is a machine_slot specifier for each + * possible cpu in the system. + */ + +struct machine_info { + integer_t major_version; /* kernel major version id */ + integer_t minor_version; /* kernel minor version id */ + integer_t max_cpus; /* max number of cpus compiled */ + integer_t avail_cpus; /* number actually available */ + vm_size_t memory_size; /* size of memory in bytes */ +}; + +typedef struct machine_info *machine_info_t; +typedef struct machine_info machine_info_data_t; /* bogus */ + +typedef integer_t cpu_type_t; +typedef integer_t cpu_subtype_t; + +#define CPU_STATE_MAX 4 + +#define CPU_STATE_USER 0 +#define CPU_STATE_SYSTEM 1 +#define CPU_STATE_IDLE 2 +#define CPU_STATE_NICE 3 + +struct machine_slot { +/*boolean_t*/integer_t is_cpu; /* is there a cpu in this slot? */ + cpu_type_t cpu_type; /* type of cpu */ + cpu_subtype_t cpu_subtype; /* subtype of cpu */ +/*boolean_t*/integer_t running; /* is cpu running */ + integer_t cpu_ticks[CPU_STATE_MAX]; + integer_t clock_freq; /* clock interrupt frequency */ +}; + +typedef struct machine_slot *machine_slot_t; +typedef struct machine_slot machine_slot_data_t; /* bogus */ + +#ifdef KERNEL_PRIVATE +extern struct machine_info machine_info; +extern struct machine_slot machine_slot[]; +#endif + +/* + * Machine types known by all. + */ + +#define CPU_TYPE_ANY ((cpu_type_t) -1) + +#define CPU_TYPE_VAX ((cpu_type_t) 1) +/* skip ((cpu_type_t) 2) */ +/* skip ((cpu_type_t) 3) */ +/* skip ((cpu_type_t) 4) */ +/* skip ((cpu_type_t) 5) */ +#define CPU_TYPE_MC680x0 ((cpu_type_t) 6) +#define CPU_TYPE_I386 ((cpu_type_t) 7) +/* skip CPU_TYPE_MIPS ((cpu_type_t) 8) */ +/* skip ((cpu_type_t) 9) */ +#define CPU_TYPE_MC98000 ((cpu_type_t) 10) +#define CPU_TYPE_HPPA ((cpu_type_t) 11) +/* skip CPU_TYPE_ARM ((cpu_type_t) 12) */ +#define CPU_TYPE_MC88000 ((cpu_type_t) 13) +#define CPU_TYPE_SPARC ((cpu_type_t) 14) +#define CPU_TYPE_I860 ((cpu_type_t) 15) +/* skip CPU_TYPE_ALPHA ((cpu_type_t) 16) */ +/* skip ((cpu_type_t) 17) */ +#define CPU_TYPE_POWERPC ((cpu_type_t) 18) + + +/* + * Machine subtypes (these are defined here, instead of in a machine + * dependent directory, so that any program can get all definitions + * regardless of where is it compiled). + */ + +/* + * Object files that are hand-crafted to run on any + * implementation of an architecture are tagged with + * CPU_SUBTYPE_MULTIPLE. This functions essentially the same as + * the "ALL" subtype of an architecture except that it allows us + * to easily find object files that may need to be modified + * whenever a new implementation of an architecture comes out. + * + * It is the responsibility of the implementor to make sure the + * software handles unsupported implementations elegantly. + */ +#define CPU_SUBTYPE_MULTIPLE ((cpu_subtype_t) -1) +#define CPU_SUBTYPE_LITTLE_ENDIAN ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_BIG_ENDIAN ((cpu_subtype_t) 1) + +/* + * VAX subtypes (these do *not* necessary conform to the actual cpu + * ID assigned by DEC available via the SID register). + */ + +#define CPU_SUBTYPE_VAX_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_VAX780 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_VAX785 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_VAX750 ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_VAX730 ((cpu_subtype_t) 4) +#define CPU_SUBTYPE_UVAXI ((cpu_subtype_t) 5) +#define CPU_SUBTYPE_UVAXII ((cpu_subtype_t) 6) +#define CPU_SUBTYPE_VAX8200 ((cpu_subtype_t) 7) +#define CPU_SUBTYPE_VAX8500 ((cpu_subtype_t) 8) +#define CPU_SUBTYPE_VAX8600 ((cpu_subtype_t) 9) +#define CPU_SUBTYPE_VAX8650 ((cpu_subtype_t) 10) +#define CPU_SUBTYPE_VAX8800 ((cpu_subtype_t) 11) +#define CPU_SUBTYPE_UVAXIII ((cpu_subtype_t) 12) + +/* + * 680x0 subtypes + * + * The subtype definitions here are unusual for historical reasons. + * NeXT used to consider 68030 code as generic 68000 code. For + * backwards compatability: + * + * CPU_SUBTYPE_MC68030 symbol has been preserved for source code + * compatability. + * + * CPU_SUBTYPE_MC680x0_ALL has been defined to be the same + * subtype as CPU_SUBTYPE_MC68030 for binary comatability. + * + * CPU_SUBTYPE_MC68030_ONLY has been added to allow new object + * files to be tagged as containing 68030-specific instructions. + */ + +#define CPU_SUBTYPE_MC680x0_ALL ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MC68030 ((cpu_subtype_t) 1) /* compat */ +#define CPU_SUBTYPE_MC68040 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_MC68030_ONLY ((cpu_subtype_t) 3) + +/* + * I386 subtypes. + */ + +#define CPU_SUBTYPE_I386_ALL ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_386 ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_486 ((cpu_subtype_t) 4) +#define CPU_SUBTYPE_486SX ((cpu_subtype_t) 4 + 128) +#define CPU_SUBTYPE_586 ((cpu_subtype_t) 5) +#define CPU_SUBTYPE_INTEL(f, m) ((cpu_subtype_t) (f) + ((m) << 4)) +#define CPU_SUBTYPE_PENT CPU_SUBTYPE_INTEL(5, 0) +#define CPU_SUBTYPE_PENTPRO CPU_SUBTYPE_INTEL(6, 1) +#define CPU_SUBTYPE_PENTII_M3 CPU_SUBTYPE_INTEL(6, 3) +#define CPU_SUBTYPE_PENTII_M5 CPU_SUBTYPE_INTEL(6, 5) + +#define CPU_SUBTYPE_INTEL_FAMILY(x) ((x) & 15) +#define CPU_SUBTYPE_INTEL_FAMILY_MAX 15 + +#define CPU_SUBTYPE_INTEL_MODEL(x) ((x) >> 4) +#define CPU_SUBTYPE_INTEL_MODEL_ALL 0 + +/* + * Mips subtypes. + */ + +#define CPU_SUBTYPE_MIPS_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_MIPS_R2300 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MIPS_R2600 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_MIPS_R2800 ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_MIPS_R2000a ((cpu_subtype_t) 4) /* pmax */ +#define CPU_SUBTYPE_MIPS_R2000 ((cpu_subtype_t) 5) +#define CPU_SUBTYPE_MIPS_R3000a ((cpu_subtype_t) 6) /* 3max */ +#define CPU_SUBTYPE_MIPS_R3000 ((cpu_subtype_t) 7) + +/* + * MC98000 (PowerPC) subtypes + */ +#define CPU_SUBTYPE_MC98000_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_MC98601 ((cpu_subtype_t) 1) + +/* + * HPPA subtypes for Hewlett-Packard HP-PA family of + * risc processors. Port by NeXT to 700 series. + */ + +#define CPU_SUBTYPE_HPPA_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_HPPA_7100 ((cpu_subtype_t) 0) /* compat */ +#define CPU_SUBTYPE_HPPA_7100LC ((cpu_subtype_t) 1) + +/* + * MC88000 subtypes. + */ +#define CPU_SUBTYPE_MC88000_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_MC88100 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MC88110 ((cpu_subtype_t) 2) + +/* + * SPARC subtypes + */ +#define CPU_SUBTYPE_SPARC_ALL ((cpu_subtype_t) 0) + +/* + * I860 subtypes + */ +#define CPU_SUBTYPE_I860_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_I860_860 ((cpu_subtype_t) 1) + +/* + * PowerPC subtypes + */ +#define CPU_SUBTYPE_POWERPC_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_POWERPC_601 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_POWERPC_602 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_POWERPC_603 ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_POWERPC_603e ((cpu_subtype_t) 4) +#define CPU_SUBTYPE_POWERPC_603ev ((cpu_subtype_t) 5) +#define CPU_SUBTYPE_POWERPC_604 ((cpu_subtype_t) 6) +#define CPU_SUBTYPE_POWERPC_604e ((cpu_subtype_t) 7) +#define CPU_SUBTYPE_POWERPC_620 ((cpu_subtype_t) 8) +#define CPU_SUBTYPE_POWERPC_750 ((cpu_subtype_t) 9) +#define CPU_SUBTYPE_POWERPC_7400 ((cpu_subtype_t) 10) +#define CPU_SUBTYPE_POWERPC_7450 ((cpu_subtype_t) 11) + +#endif /* _MACH_MACHINE_H_ */ diff --git a/osfmk/mach/machine/Makefile b/osfmk/mach/machine/Makefile new file mode 100644 index 000000000..830dd11e0 --- /dev/null +++ b/osfmk/mach/machine/Makefile @@ -0,0 +1,28 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + + +DATAFILES = \ + asm.h boolean.h exception.h kern_return.h ndr_def.h rpc.h \ + processor_info.h thread_state.h thread_status.h \ + vm_param.h vm_types.h machine_types.defs \ + syscall_sw.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = mach/machine + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = mach/machine + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/mach/machine/asm.h b/osfmk/mach/machine/asm.h new file mode 100644 index 000000000..ed74cc334 --- /dev/null +++ b/osfmk/mach/machine/asm.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_ASM_H +#define _MACH_MACHINE_ASM_H + + +#if defined (__ppc__) +#include "mach/ppc/asm.h" +#elif defined (__i386__) +#include "mach/i386/asm.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_ASM_H */ diff --git a/osfmk/mach/machine/boolean.h b/osfmk/mach/machine/boolean.h new file mode 100644 index 000000000..d85824189 --- /dev/null +++ b/osfmk/mach/machine/boolean.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_BOOLEAN_H_ +#define _MACH_MACHINE_BOOLEAN_H_ + + +#if defined (__ppc__) +#include "mach/ppc/boolean.h" +#elif defined (__i386__) +#include "mach/i386/boolean.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_BOOLEAN_H_ */ diff --git a/osfmk/mach/machine/exception.h b/osfmk/mach/machine/exception.h new file mode 100644 index 000000000..e08c34a15 --- /dev/null +++ b/osfmk/mach/machine/exception.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_EXCEPTION_H_ +#define _MACH_MACHINE_EXCEPTION_H_ + + +#if defined (__ppc__) +#include "mach/ppc/exception.h" +#elif defined (__i386__) +#include "mach/i386/exception.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_EXCEPTION_H_ */ diff --git a/osfmk/mach/machine/kern_return.h b/osfmk/mach/machine/kern_return.h new file mode 100644 index 000000000..45ad20b8b --- /dev/null +++ b/osfmk/mach/machine/kern_return.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_KERN_RETURN_H_ +#define _MACH_MACHINE_KERN_RETURN_H_ + + +#if defined (__ppc__) +#include "mach/ppc/kern_return.h" +#elif defined (__i386__) +#include "mach/i386/kern_return.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_KERN_RETURN_H_ */ diff --git a/osfmk/mach/machine/machine_types.defs b/osfmk/mach/machine/machine_types.defs new file mode 100644 index 000000000..2b9ecc264 --- /dev/null +++ b/osfmk/mach/machine/machine_types.defs @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_MACHINE_TYPES_DEFS +#define _MACH_MACHINE_MACHINE_TYPES_DEFS + + +#if defined (__ppc__) +#include "mach/ppc/machine_types.defs" +#elif defined (__i386__) +#include "mach/i386/machine_types.defs" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/machine/ndr_def.h b/osfmk/mach/machine/ndr_def.h new file mode 100644 index 000000000..498d5c984 --- /dev/null +++ b/osfmk/mach/machine/ndr_def.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_NDR_DEF_H +#define _MACH_MACHINE_NDR_DEF_H + + +#if defined (__ppc__) +#include "mach/ppc/ndr_def.h" +#elif defined (__i386__) +#include "mach/i386/ndr_def.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_NDR_DEF_H */ diff --git a/osfmk/mach/machine/processor_info.h b/osfmk/mach/machine/processor_info.h new file mode 100644 index 000000000..fa04db434 --- /dev/null +++ b/osfmk/mach/machine/processor_info.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_PROCESSOR_INFO_H_ +#define _MACH_MACHINE_PROCESSOR_INFO_H_ + + +#if defined (__ppc__) +#include "mach/ppc/processor_info.h" +#elif defined (__i386__) +#include "mach/i386/processor_info.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_PROCESSOR_INFO_H_ */ diff --git a/osfmk/mach/machine/rpc.h b/osfmk/mach/machine/rpc.h new file mode 100644 index 000000000..3d3f19243 --- /dev/null +++ b/osfmk/mach/machine/rpc.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_RPC_H_ +#define _MACH_MACHINE_RPC_H_ + + +#if defined (__ppc__) +#include "mach/ppc/rpc.h" +#elif defined (__i386__) +#include "mach/i386/rpc.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_RPC_H_ */ diff --git a/osfmk/mach/machine/syscall_sw.h b/osfmk/mach/machine/syscall_sw.h new file mode 100644 index 000000000..c140fcdbc --- /dev/null +++ b/osfmk/mach/machine/syscall_sw.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_SYSCALL_SW_H_ +#define _MACH_MACHINE_SYSCALL_SW_H_ + + +#if defined (__ppc__) +#include "mach/ppc/syscall_sw.h" +#elif defined (__i386__) +#include "mach/i386/syscall_sw.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_SYSCALL_SW_H_ */ diff --git a/osfmk/mach/machine/thread_state.h b/osfmk/mach/machine/thread_state.h new file mode 100644 index 000000000..286cfc29d --- /dev/null +++ b/osfmk/mach/machine/thread_state.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_THREAD_STATE_H_ +#define _MACH_MACHINE_THREAD_STATE_H_ + + +#if defined (__ppc__) +#include "mach/ppc/thread_state.h" +#elif defined (__i386__) +#include "mach/i386/thread_state.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_THREAD_STATE_H_ */ diff --git a/osfmk/mach/machine/thread_status.h b/osfmk/mach/machine/thread_status.h new file mode 100644 index 000000000..6047637e6 --- /dev/null +++ b/osfmk/mach/machine/thread_status.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_THREAD_STATUS_H_ +#define _MACH_MACHINE_THREAD_STATUS_H_ + + +#if defined (__ppc__) +#include "mach/ppc/thread_status.h" +#elif defined (__i386__) +#include "mach/i386/thread_status.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/machine/vm_param.h b/osfmk/mach/machine/vm_param.h new file mode 100644 index 000000000..beb5bc185 --- /dev/null +++ b/osfmk/mach/machine/vm_param.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_VM_PARAM_H_ +#define _MACH_MACHINE_VM_PARAM_H_ + + +#if defined (__ppc__) +#include "mach/ppc/vm_param.h" +#elif defined (__i386__) +#include "mach/i386/vm_param.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_VM_PARAM_H_ */ diff --git a/osfmk/mach/machine/vm_types.h b/osfmk/mach/machine/vm_types.h new file mode 100644 index 000000000..7b8770485 --- /dev/null +++ b/osfmk/mach/machine/vm_types.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_VM_TYPES_H_ +#define _MACH_MACHINE_VM_TYPES_H_ + + +#if defined (__ppc__) +#include "mach/ppc/vm_types.h" +#elif defined (__i386__) +#include "mach/i386/vm_types.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_VM_TYPES_H_ */ diff --git a/osfmk/mach/memory_object.defs b/osfmk/mach/memory_object.defs new file mode 100644 index 000000000..2e8ffe8f4 --- /dev/null +++ b/osfmk/mach/memory_object.defs @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/memory_object.defs + * + * Abstract: + * Basic Mach external memory management interface declaration. + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif KERNEL_USER +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + memory_object 2200; + +#ifdef MACH_KERNEL +#include +#endif /* MACH_KERNEL */ + +#include +#include + +#if KERNEL_SERVER +serverprefix dp_; +#else +#if SEQNOS +serverprefix seqnos_; +serverdemux seqnos_memory_object_server; +#endif SEQNOS +#endif + +/* + * Initialize the specified memory object, providing + * a reqeust port on which control calls can be made. + * [To allow the mapping of this object to be used, the + * memory manager must call memory_object_set_attributes, + * specifying the "ready" parameter as TRUE. To reject + * all mappings of this object, the memory manager may + * use memory_object_destroy.] + */ +simpleroutine memory_object_init( + memory_object : memory_object_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; + memory_object_page_size : vm_size_t); + + +/* + * Indicates that the specified memory object is no longer + * mapped (or cached -- see memory_object_set_attributes), + * and that further mappings will cause another memory_object_init + * call to be made. No further calls will be made on + * the memory object by this kernel. + * + * [All rights to the control and name ports are included + * in this call. The memory manager should use port_deallocate + * to release them once they are no longer needed.] + */ +simpleroutine memory_object_terminate( + memory_object : memory_object_t = + MACH_MSG_TYPE_MOVE_SEND + ctype: mach_port_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MOVE_RECEIVE + ctype: mach_port_t); + +/* + * Request data from this memory object. At least + * the specified data should be returned with at + * least the specified access permitted. + * + * [Reply should be memory_object_data_provided.] + */ +simpleroutine memory_object_data_request( + memory_object : memory_object_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; + offset : memory_object_offset_t; + length : vm_size_t; + desired_access : vm_prot_t); + +/* + * Request that the specified portion of this + * memory object be unlocked to allow the specified + * forms of access; the kernel already has the data. + * + * [Reply should be memory_object_lock_request.] + */ +simpleroutine memory_object_data_unlock( + memory_object : memory_object_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; + offset : memory_object_offset_t; + length : vm_size_t; + desired_access : vm_prot_t); + +/* + * Indicate that a previous memory_object_lock_reqeust has been + * completed. Note that this call is made on whatever + * port is specified in the memory_object_lock_request; that port + * need not be the memory object port itself. + * + * [No reply expected.] + */ +simpleroutine memory_object_lock_completed( + memory_object : memory_object_t = + polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE + ctype: mach_port_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; + offset : memory_object_offset_t; + length : vm_size_t); + + skip; + +/* + * Return data to manager. This call is used in place of data_write + * for objects initialized by object_ready instead of set_attributes. + * This call indicates whether the returned data is dirty and whether + * the kernel kept a copy. Precious data remains precious if the + * kernel keeps a copy. The indication that the kernel kept a copy + * is only a hint if the data is not precious; the cleaned copy may + * be discarded without further notifying the manager. + * + * [Reply should be vm_deallocate to release the data.] + */ +simpleroutine memory_object_data_return( + memory_object : memory_object_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; + offset : memory_object_offset_t; + data : pointer_t; + dirty : boolean_t; + kernel_copy : boolean_t); + + +simpleroutine memory_object_synchronize( + memory_object : memory_object_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; + offset : memory_object_offset_t; + length : vm_offset_t; + sync_flags : vm_sync_t ); + + +/* + * Confirm a successful memory_object_change attributes message. + */ +simpleroutine memory_object_change_completed( + memory_object : memory_object_t = + polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE + ctype: mach_port_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; + flavor : memory_object_flavor_t); + + + skip; diff --git a/osfmk/mach/memory_object.h b/osfmk/mach/memory_object.h new file mode 100644 index 000000000..63675bd21 --- /dev/null +++ b/osfmk/mach/memory_object.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: memory_object.h + * Author: Michael Wayne Young + * + * External memory management interface definition. + */ + +#ifndef _MACH_MEMORY_OBJECT_H_ +#define _MACH_MEMORY_OBJECT_H_ + +/* + * User-visible types used in the external memory + * management interface: + */ + +#include +#include + +typedef mach_port_t memory_object_t; + /* A memory object ... */ + /* Used by the kernel to retrieve */ + /* or store data */ + +typedef mach_port_t memory_object_control_t; + /* Provided to a memory manager; ... */ + /* used to control a memory object */ + +typedef mach_port_t memory_object_name_t; + /* Used to describe the memory ... */ + /* object in vm_regions() calls */ + +typedef mach_port_t memory_object_rep_t; + /* Per-client handle for mem object */ + /* Used by user programs to specify */ + /* the object to map */ + +typedef int memory_object_copy_strategy_t; + /* How memory manager handles copy: */ +#define MEMORY_OBJECT_COPY_NONE 0 + /* ... No special support */ +#define MEMORY_OBJECT_COPY_CALL 1 + /* ... Make call on memory manager */ +#define MEMORY_OBJECT_COPY_DELAY 2 + /* ... Memory manager doesn't + * change data externally. + */ +#define MEMORY_OBJECT_COPY_TEMPORARY 3 + /* ... Memory manager doesn't + * change data externally, and + * doesn't need to see changes. + */ +#define MEMORY_OBJECT_COPY_SYMMETRIC 4 + /* ... Memory manager doesn't + * change data externally, + * doesn't need to see changes, + * and object will not be + * multiply mapped. + * + * XXX + * Not yet safe for non-kernel use. + */ + +#define MEMORY_OBJECT_COPY_INVALID 5 + /* ... An invalid copy strategy, + * for external objects which + * have not been initialized. + * Allows copy_strategy to be + * examined without also + * examining pager_ready and + * internal. + */ + +typedef int memory_object_return_t; + /* Which pages to return to manager + this time (lock_request) */ +#define MEMORY_OBJECT_RETURN_NONE 0 + /* ... don't return any. */ +#define MEMORY_OBJECT_RETURN_DIRTY 1 + /* ... only dirty pages. */ +#define MEMORY_OBJECT_RETURN_ALL 2 + /* ... dirty and precious pages. */ +#define MEMORY_OBJECT_RETURN_ANYTHING 3 + /* ... any resident page. */ + +#define MEMORY_OBJECT_NULL MACH_PORT_NULL + + +/* + * Types for the memory object flavor interfaces + */ + +#define MEMORY_OBJECT_INFO_MAX (1024) +typedef int *memory_object_info_t; +typedef int memory_object_flavor_t; +typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; + + +#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 +#define MEMORY_OBJECT_PERFORMANCE_INFO 11 +#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 +#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 +#define MEMORY_OBJECT_BEHAVIOR_INFO 15 + + +struct old_memory_object_behave_info { + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; +}; + +struct memory_object_perf_info { + vm_size_t cluster_size; + boolean_t may_cache; +}; + +struct old_memory_object_attr_info { /* old attr list */ + boolean_t object_ready; + boolean_t may_cache; + memory_object_copy_strategy_t copy_strategy; +}; + +struct memory_object_attr_info { + memory_object_copy_strategy_t copy_strategy; + vm_offset_t cluster_size; + boolean_t may_cache_object; + boolean_t temporary; +}; + +struct memory_object_behave_info { + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; + boolean_t silent_overwrite; + boolean_t advisory_pageout; +}; + +typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; +typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t; + +typedef struct memory_object_behave_info *memory_object_behave_info_t; +typedef struct memory_object_behave_info memory_object_behave_info_data_t; + +typedef struct memory_object_perf_info *memory_object_perf_info_t; +typedef struct memory_object_perf_info memory_object_perf_info_data_t; + +typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; +typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; + +typedef struct memory_object_attr_info *memory_object_attr_info_t; +typedef struct memory_object_attr_info memory_object_attr_info_data_t; + +#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT \ + (sizeof(old_memory_object_behave_info_data_t)/sizeof(int)) +#define MEMORY_OBJECT_BEHAVE_INFO_COUNT \ + (sizeof(memory_object_behave_info_data_t)/sizeof(int)) +#define MEMORY_OBJECT_PERF_INFO_COUNT \ + (sizeof(memory_object_perf_info_data_t)/sizeof(int)) +#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT \ + (sizeof(old_memory_object_attr_info_data_t)/sizeof(int)) +#define MEMORY_OBJECT_ATTR_INFO_COUNT \ + (sizeof(memory_object_attr_info_data_t)/sizeof(int)) + +#define invalid_memory_object_flavor(f) \ + (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ + f != MEMORY_OBJECT_PERFORMANCE_INFO && \ + f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ + f != MEMORY_OBJECT_BEHAVIOR_INFO && \ + f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) + +#endif /* _MACH_MEMORY_OBJECT_H_ */ diff --git a/osfmk/mach/memory_object_control.defs b/osfmk/mach/memory_object_control.defs new file mode 100644 index 000000000..ea4eb53b0 --- /dev/null +++ b/osfmk/mach/memory_object_control.defs @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/memory_object.defs + * + * Abstract: + * Basic Mach external memory management interface declaration. + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif KERNEL_USER +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + memory_object_control 2000; + +#ifdef MACH_KERNEL +#include +#endif /* MACH_KERNEL */ + +#include +#include + +/* + * JMM - For now we define back-and-forth paging until we get the code + * cleaned up. + */ +#define _BACK_AND_FORTH_PAGING_ + +/* + * Retrieves the attributes currently associated with + * a memory object. + */ +routine memory_object_get_attributes( + memory_control : vm_object_t; + flavor : memory_object_flavor_t; + out attributes : memory_object_info_t, CountInOut); + + +simpleroutine memory_object_change_attributes( + memory_control : vm_object_t; + flavor : memory_object_flavor_t; + attributes : memory_object_info_t +#ifdef _BACK_AND_FORTH_PAGING_ + ; reply_to : mach_port_t = + MACH_MSG_TYPE_MAKE_SEND|polymorphic +#endif + ); + +simpleroutine memory_object_synchronize_completed ( + memory_control : vm_object_t; + offset : memory_object_offset_t; + length : vm_offset_t); + +skip; + +/* + * Control use of the data associated with the given + * memory object. For each page in the given range, + * perform the following operations, in order: + * 1) restrict access to the page (disallow + * forms specified by "prot"); + * 2) write back modifications (if "should_return" + * is RETURN_DIRTY and the page is dirty, or + * "should_return" is RETURN_ALL and the page + * is either dirty or precious); and, + * 3) flush the cached copy (if "should_flush" + * is asserted). + * The set of pages is defined by a starting offset + * ("offset") and size ("size"). Only pages with the + * same page alignment as the starting offset are + * considered. + * + * A single acknowledgement is sent (to the "reply_to" + * port) when these actions are complete. + * + * There are two versions of this routine because IPC distinguishes + * between booleans and integers (a 2-valued integer is NOT a + * boolean). The new routine is backwards compatible at the C + * language interface. + */ +simpleroutine memory_object_lock_request( + memory_control : vm_object_t; + offset : memory_object_offset_t; + size : memory_object_size_t; + should_return : memory_object_return_t; + flags : integer_t; + lock_value : vm_prot_t +#ifdef _BACK_AND_FORTH_PAGING_ + ; reply_to : mach_port_t = + MACH_MSG_TYPE_MAKE_SEND|polymorphic +#endif + ); + + + skip; + +/* + */ +simpleroutine memory_object_destroy( + memory_control : vm_object_t; + reason : kern_return_t); + +/* + * Provide the data contents of a range of the given memory + * object, with the access restriction specified, optional + * precious attribute, and reply message. [Only + * whole virtual pages of data can be accepted; partial pages + * will be discarded. Data should be provided on request, but + * may be provided in advance as desired. When data already + * held by this kernel is provided again, the new data is ignored. + * The access restriction is the subset of {read, write, execute} + * which are prohibited. The kernel may not provide any data (or + * protection) consistency among pages with different virtual page + * alignments within the same object. The precious value controls + * how the kernel treats the data. If it is FALSE, the kernel treats + * its copy as a temporary and may throw it away if it hasn't been + * changed. If the precious value is TRUE, the kernel treats its + * copy as a data repository and promises to return it to the manager; + * the manager may tell the kernel to throw it away instead by flushing + * and not cleaning the data -- see memory_object_lock_request. The + * reply_to port is for a compeletion message; it will be + * memory_object_supply_completed.] + */ + + skip; + + skip; + +routine vm_object_upl_request( + object :vm_object_entry_t; + in offset :memory_object_offset_t; + in size :vm_size_t; + out upl :mach_port_move_send_t; + out page_list :upl_page_list_ptr_t, CountInOut; + in cntrl_flags :integer_t); + +routine vm_pager_upl_request( + object :vm_object_entry_t; + in offset :memory_object_offset_t; + in size :vm_size_t; + in super_size :vm_size_t; + out upl :mach_port_move_send_t; + out page_list :upl_page_list_ptr_t, CountInOut; + in cntrl_flags :integer_t); + +routine vm_upl_map( + map :vm_task_entry_t; + in upl :upl_object_t; + inout dst_addr :vm_offset_t); + +routine vm_upl_unmap( + map :vm_task_entry_t; + in upl :upl_object_t); + +routine vm_upl_abort( + upl_object :upl_object_entry_t; + in abort_cond :integer_t); + +routine vm_upl_commit( + upl_object :upl_object_entry_t; + in page_list :upl_page_list_ptr_t); + +routine vm_upl_commit_range( + upl_object :upl_object_entry_t; + offset :vm_offset_t; + size :vm_size_t; + in page_list :upl_page_list_ptr_t); diff --git a/osfmk/mach/memory_object_default.defs b/osfmk/mach/memory_object_default.defs new file mode 100644 index 000000000..e86a41357 --- /dev/null +++ b/osfmk/mach/memory_object_default.defs @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/memory_object_default.defs + * + * Abstract: + * Mach external memory management interface declaration; subset + * that is applicable to managers of kernel-created memory objects. + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif KERNEL_USER +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + memory_object_default 2250; + +#include +#include + +#if KERNEL_SERVER +serverprefix dp_; +#else +#if SEQNOS +serverprefix seqnos_; +serverdemux seqnos_memory_object_default_server; +#endif /* SEQNOS */ +#endif + +/* + * Pass on responsibility for the new kernel-created memory + * object. The port on which this request is that port + * (possibly a memory object itself) registered as the "default + * pager". Other arguments are as described for memory_object_init. + * [No reply required.] + */ +routine memory_object_create( + old_memory_object : memory_object_default_t = + MACH_MSG_TYPE_MOVE_SEND; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + out new_memory_object : memory_object_default_t = + MACH_MSG_TYPE_MAKE_SEND; + new_object_size : vm_size_t); + +/* + * Provide initial data contents for this region of + * the memory object. If data has already been written + * to the object, this value must be discarded; otherwise, + * this call acts identically to memory_object_data_write. + */ +simpleroutine memory_object_data_initialize( + memory_object : memory_object_default_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + memory_control_port : memory_object_control_t = + MACH_MSG_TYPE_MAKE_SEND; + offset : memory_object_offset_t; + data : pointer_t); + diff --git a/osfmk/mach/memory_object_name.defs b/osfmk/mach/memory_object_name.defs new file mode 100644 index 000000000..589a998c4 --- /dev/null +++ b/osfmk/mach/memory_object_name.defs @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_port.defs + * Author: Rich Draves + * + * Exported kernel calls. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + memory_object_name 2600; + +#include +#include + +/* + * References to memory_object_name objects are returned by: + * vm_region(vm_map_t,...) + * + * These are used simply to compare one mapping againsts another + * and have no methods. + */ diff --git a/osfmk/mach/memory_object_types.h b/osfmk/mach/memory_object_types.h new file mode 100644 index 000000000..21f84e39c --- /dev/null +++ b/osfmk/mach/memory_object_types.h @@ -0,0 +1,575 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: memory_object.h + * Author: Michael Wayne Young + * + * External memory management interface definition. + */ + +#ifndef _MACH_MEMORY_OBJECT_TYPES_H_ +#define _MACH_MEMORY_OBJECT_TYPES_H_ + +/* + * User-visible types used in the external memory + * management interface: + */ + +#include +#include +#include + +#define VM_64_BIT_DATA_OBJECTS +#define SHARED_LIBRARY_SERVER_SUPPORTED +#define GLOBAL_SHARED_TEXT_SEGMENT 0x70000000 +#define GLOBAL_SHARED_DATA_SEGMENT 0x80000000 +#define GLOBAL_SHARED_SEGMENT_MASK 0xF0000000 + +typedef mach_port_t memory_object_default_t; + +typedef mach_port_t memory_object_t; + /* A memory object ... */ + /* Used by the kernel to retrieve */ + /* or store data */ + +typedef mach_port_t memory_object_control_t; + /* Provided to a memory manager; ... */ + /* used to control a memory object */ + +typedef mach_port_t memory_object_name_t; + /* Used to describe the memory ... */ + /* object in vm_regions() calls */ + +typedef mach_port_t memory_object_rep_t; + /* Per-client handle for mem object */ + /* Used by user programs to specify */ + /* the object to map */ + +typedef int memory_object_copy_strategy_t; + /* How memory manager handles copy: */ +#define MEMORY_OBJECT_COPY_NONE 0 + /* ... No special support */ +#define MEMORY_OBJECT_COPY_CALL 1 + /* ... Make call on memory manager */ +#define MEMORY_OBJECT_COPY_DELAY 2 + /* ... Memory manager doesn't + * change data externally. + */ +#define MEMORY_OBJECT_COPY_TEMPORARY 3 + /* ... Memory manager doesn't + * change data externally, and + * doesn't need to see changes. + */ +#define MEMORY_OBJECT_COPY_SYMMETRIC 4 + /* ... Memory manager doesn't + * change data externally, + * doesn't need to see changes, + * and object will not be + * multiply mapped. + * + * XXX + * Not yet safe for non-kernel use. + */ + +#define MEMORY_OBJECT_COPY_INVALID 5 + /* ... An invalid copy strategy, + * for external objects which + * have not been initialized. + * Allows copy_strategy to be + * examined without also + * examining pager_ready and + * internal. + */ + +typedef int memory_object_return_t; + /* Which pages to return to manager + this time (lock_request) */ +#define MEMORY_OBJECT_RETURN_NONE 0 + /* ... don't return any. */ +#define MEMORY_OBJECT_RETURN_DIRTY 1 + /* ... only dirty pages. */ +#define MEMORY_OBJECT_RETURN_ALL 2 + /* ... dirty and precious pages. */ +#define MEMORY_OBJECT_RETURN_ANYTHING 3 + /* ... any resident page. */ + +#define MEMORY_OBJECT_NULL MACH_PORT_NULL + +/* + * Data lock request flags + */ + +#define MEMORY_OBJECT_DATA_FLUSH 0x1 +#define MEMORY_OBJECT_DATA_NO_CHANGE 0x2 +#define MEMORY_OBJECT_DATA_PURGE 0x4 +#define MEMORY_OBJECT_COPY_SYNC 0x8 +#define MEMORY_OBJECT_DATA_SYNC 0x10 + +/* + * Types for the memory object flavor interfaces + */ + +#define MEMORY_OBJECT_INFO_MAX (1024) +typedef int *memory_object_info_t; +typedef int memory_object_flavor_t; +typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; + + +#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 +#define MEMORY_OBJECT_PERFORMANCE_INFO 11 +#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 +#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 +#define MEMORY_OBJECT_BEHAVIOR_INFO 15 + + +struct old_memory_object_behave_info { + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; +}; + +struct memory_object_perf_info { + vm_size_t cluster_size; + boolean_t may_cache; +}; + +struct old_memory_object_attr_info { /* old attr list */ + boolean_t object_ready; + boolean_t may_cache; + memory_object_copy_strategy_t copy_strategy; +}; + +struct memory_object_attr_info { + memory_object_copy_strategy_t copy_strategy; + vm_offset_t cluster_size; + boolean_t may_cache_object; + boolean_t temporary; +}; + +struct memory_object_behave_info { + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; + boolean_t silent_overwrite; + boolean_t advisory_pageout; +}; + +typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; +typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t; + +typedef struct memory_object_behave_info *memory_object_behave_info_t; +typedef struct memory_object_behave_info memory_object_behave_info_data_t; + +typedef struct memory_object_perf_info *memory_object_perf_info_t; +typedef struct memory_object_perf_info memory_object_perf_info_data_t; + +typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; +typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; + +typedef struct memory_object_attr_info *memory_object_attr_info_t; +typedef struct memory_object_attr_info memory_object_attr_info_data_t; + +#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT \ + (sizeof(old_memory_object_behave_info_data_t)/sizeof(int)) +#define MEMORY_OBJECT_BEHAVE_INFO_COUNT \ + (sizeof(memory_object_behave_info_data_t)/sizeof(int)) +#define MEMORY_OBJECT_PERF_INFO_COUNT \ + (sizeof(memory_object_perf_info_data_t)/sizeof(int)) +#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT \ + (sizeof(old_memory_object_attr_info_data_t)/sizeof(int)) +#define MEMORY_OBJECT_ATTR_INFO_COUNT \ + (sizeof(memory_object_attr_info_data_t)/sizeof(int)) + +#define invalid_memory_object_flavor(f) \ + (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ + f != MEMORY_OBJECT_PERFORMANCE_INFO && \ + f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ + f != MEMORY_OBJECT_BEHAVIOR_INFO && \ + f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) + + + +/* + * Even before we have components, we do not want to export upl internal + * structure to non mach components. + */ +#ifndef MACH_KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE +typedef struct { + unsigned int opaque; + } * upl_t; +#else +typedef mach_port_t upl_t; +#endif /* KERNEL_PRIVATE */ +#endif + +#define MAX_UPL_TRANSFER 64 + +struct upl_page_info { + vm_offset_t phys_addr; + unsigned int + pageout:1, /* page is to be removed on commit */ + absent:1, /* No valid data in this page */ + dirty:1, /* Page must be cleaned (O) */ + precious:1, /* must be cleaned, we have only copy */ + device:1, /* no page data, mapped dev memory */ + :0; /* force to long boundary */ +}; + +typedef struct upl_page_info upl_page_info_t; + +typedef unsigned long long memory_object_offset_t; +typedef unsigned long long memory_object_size_t; +typedef upl_page_info_t *upl_page_list_ptr_t; +typedef mach_port_t upl_object_t; + + + +/* upl invocation flags */ + +#define UPL_COPYOUT_FROM 0x1 +#define UPL_PRECIOUS 0x2 +#define UPL_NO_SYNC 0x4 +#define UPL_CLEAN_IN_PLACE 0x8 +#define UPL_NOBLOCK 0x10 +#define UPL_RET_ONLY_DIRTY 0x20 +#define UPL_SET_INTERNAL 0x40 + +/* upl abort error flags */ +#define UPL_ABORT_RESTART 0x1 +#define UPL_ABORT_UNAVAILABLE 0x2 +#define UPL_ABORT_ERROR 0x4 +#define UPL_ABORT_FREE_ON_EMPTY 0x8 +#define UPL_ABORT_DUMP_PAGES 0x10 + +/* upl pages check flags */ +#define UPL_CHECK_DIRTY 0x1 + +/* upl pagein/pageout flags */ +#define UPL_IOSYNC 0x1 +#define UPL_NOCOMMIT 0x2 +#define UPL_NORDAHEAD 0x4 + +/* upl commit flags */ +#define UPL_COMMIT_FREE_ON_EMPTY 0x1 +#define UPL_COMMIT_CLEAR_DIRTY 0x2 +#define UPL_COMMIT_SET_DIRTY 0x4 +#define UPL_COMMIT_INACTIVATE 0x8 + +/* flags for return of state from vm_map_get_upl, vm_upl address space */ +/* based call */ +#define UPL_DEV_MEMORY 0x1 +#define UPL_PHYS_CONTIG 0x2 + + +/* access macros for upl_t */ + +#define UPL_DEVICE_PAGE(upl) \ + (((upl)[(index)].phys_addr != 0) ? (!((upl)[0].device)) : FALSE) + +#define UPL_PAGE_PRESENT(upl, index) \ + ((upl)[(index)].phys_addr != 0) + +#define UPL_PHYS_PAGE(upl, index) \ + (((upl)[(index)].phys_addr != 0) ? \ + ((upl)[(index)].phys_addr) : (vm_offset_t)NULL) + +#define UPL_DIRTY_PAGE(upl, index) \ + (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE) + +#define UPL_PRECIOUS_PAGE(upl, index) \ + (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE) + +#define UPL_VALID_PAGE(upl, index) \ + (((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE) + +#define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \ + if ((upl)[(index)].phys_addr != 0) \ + ((upl)[(index)].pageout) = TRUE + +#define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \ + if ((upl)[(index)].phys_addr != 0) \ + ((upl)[(index)].pageout) = FALSE + + +#ifdef KERNEL_PRIVATE +/* + * iokit code doesn't include prerequisite header files, thus the + * !defined(IOKIT). But osfmk code defines IOKIT! Thus the + * defined(MACH_KERNEL). To clean this gorp up "just" fix all + * iokit & driver code to include the prereqs. + */ +#if !defined(IOKIT) || defined(MACH_KERNEL) +#include + +/* The call prototyped below is used strictly by UPL_GET_INTERNAL_PAGE_LIST */ + +extern vm_size_t upl_offset_to_pagelist; +extern vm_size_t upl_get_internal_pagelist_offset(); + +/* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */ +/* list request was made with the UPL_INTERNAL flag */ + +#define UPL_GET_INTERNAL_PAGE_LIST(upl) \ + ((upl_page_info_t *)((upl_offset_to_pagelist == 0) ? \ + (unsigned int)upl + (unsigned int)(upl_offset_to_pagelist = upl_get_internal_pagelist_offset()): \ + (unsigned int)upl + (unsigned int)upl_offset_to_pagelist)) + +extern kern_return_t vm_fault_list_request( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + upl_t *upl, + upl_page_info_t **user_page_list, + int page_list_count, + int cntrol_flags); + +extern kern_return_t upl_system_list_request( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + vm_size_t super_size, + upl_t *upl, + upl_page_info_t **user_page_list, + int page_list_count, + int cntrol_flags); + +extern kern_return_t upl_map( + vm_map_t map, + upl_t upl, + vm_offset_t *dst_addr); + +extern kern_return_t upl_un_map( + vm_map_t map, + upl_t upl); + +extern kern_return_t upl_commit_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + boolean_t free_on_empty, + upl_page_info_t *page_list); + +extern kern_return_t upl_commit( + upl_t upl, + upl_page_info_t *page_list); + +extern upl_t upl_create( + boolean_t internal); + +extern void upl_destroy( + upl_t page_list); + +extern kern_return_t upl_abort( + upl_t page_list, + int error); + +extern kern_return_t upl_abort_range( + upl_t page_list, + vm_offset_t offset, + vm_size_t size, + int error); + +extern void upl_set_dirty( + upl_t upl); + +extern void upl_clear_dirty( + upl_t upl); + + + +extern kern_return_t memory_object_page_op( + vm_object_t object, + vm_object_offset_t offset, + int ops, + vm_offset_t *phys_entry, + int *flags); + +extern kern_return_t memory_object_release_name( + vm_object_t object, + int flags); + +extern kern_return_t vm_map_get_upl( + vm_map_t map, + vm_offset_t offset, + vm_size_t *upl_size, + upl_t *upl, + upl_page_info_t **page_list, + int *count, + int *flags, + int force_data_sync); + +extern kern_return_t vm_region_clone( + ipc_port_t src_region, + ipc_port_t dst_region); + +extern kern_return_t vm_map_region_replace( + vm_map_t target_map, + ipc_port_t old_region, + ipc_port_t new_region, + vm_offset_t start, + vm_offset_t end); + + + + +#ifndef MACH_KERNEL_PRIVATE + +/* address space shared region descriptor */ + +typedef void *shared_region_mapping_t; +typedef void *vm_named_entry_t; + +extern kern_return_t memory_object_destroy_named( + vm_object_t object, + kern_return_t reason); + +extern kern_return_t memory_object_lock_request_named( + vm_object_t object, + vm_object_offset_t offset, + memory_object_size_t size, + memory_object_return_t should_return, + int flags, + int prot, + ipc_port_t reply_to); + +extern kern_return_t memory_object_change_attributes_named( + vm_object_t object, + memory_object_flavor_t flavor, + memory_object_info_t attributes, + int count, + int reply_to, + int reply_to_type); + +extern kern_return_t memory_object_create_named( + ipc_port_t port, + vm_size_t size, + vm_object_t *object_ptr); + +/* +extern kern_return_t vm_get_shared_region( + task_t task, + shared_region_mapping_t *shared_region); + +extern kern_return_t vm_set_shared_region( + task_t task, + shared_region_mapping_t shared_region); +*/ + +extern kern_return_t shared_region_mapping_info( + shared_region_mapping_t shared_region, + ipc_port_t *text_region, + vm_size_t *text_size, + ipc_port_t *data_region, + vm_size_t *data_size, + vm_offset_t *region_mappings, + vm_offset_t *client_base, + vm_offset_t *alternate_base, + vm_offset_t *alternate_next, + int *flags, + shared_region_mapping_t *next); + +extern kern_return_t shared_region_mapping_create( + ipc_port_t text_region, + vm_size_t text_size, + ipc_port_t data_region, + vm_size_t data_size, + vm_offset_t region_mappings, + vm_offset_t client_base, + shared_region_mapping_t *shared_region); + +extern kern_return_t shared_region_mapping_ref( + shared_region_mapping_t shared_region); + +extern kern_return_t shared_region_mapping_dealloc( + shared_region_mapping_t shared_region); + +extern kern_return_t +shared_region_object_chain_attach( + shared_region_mapping_t target_region, + shared_region_mapping_t object_chain); + + +#endif MACH_KERNEL_PRIVATE + + +/* + * Flags for the UPL page ops routine. This routine is not exported + * out of the kernel at the moment and so the defs live here. + */ + + +#define UPL_POP_DIRTY 0x1 +#define UPL_POP_PAGEOUT 0x2 +#define UPL_POP_PRECIOUS 0x4 +#define UPL_POP_ABSENT 0x8 +#define UPL_POP_BUSY 0x10 + +#define UPL_POP_DUMP 0x20000000 +#define UPL_POP_SET 0x40000000 +#define UPL_POP_CLR 0x80000000 + +/* + * Used to support options on memory_object_release_name call + */ + +#define MEMORY_OBJECT_TERMINATE_IDLE 0x1 +#define MEMORY_OBJECT_RESPECT_CACHE 0x2 +#define MEMORY_OBJECT_RELEASE_NO_OP 0x4 + + +#endif /* !defined(IOKIT) || defined(MACH_KERNEL) */ +#endif /* KERNEL_PRIVATE */ + + + +#endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */ diff --git a/osfmk/mach/message.h b/osfmk/mach/message.h new file mode 100644 index 000000000..003373bc8 --- /dev/null +++ b/osfmk/mach/message.h @@ -0,0 +1,590 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/message.h + * + * Mach IPC message and primitive function definitions. + */ + +#ifndef _MACH_MESSAGE_H_ +#define _MACH_MESSAGE_H_ + +#ifdef MACH_KERNEL +/* Have to have MIG parameter check for kernel */ +#define TypeCheck 1 +#define _MIG_KERNEL_SPECIFIC_CODE_ 1 +#endif /* MACH_KERNEL */ + +#include +#include + + +/* + * The timeout mechanism uses mach_msg_timeout_t values, + * passed by value. The timeout units are milliseconds. + * It is controlled with the MACH_SEND_TIMEOUT + * and MACH_RCV_TIMEOUT options. + */ + +typedef natural_t mach_msg_timeout_t; + +/* + * The value to be used when there is no timeout. + * (No MACH_SEND_TIMEOUT/MACH_RCV_TIMEOUT option.) + */ + +#define MACH_MSG_TIMEOUT_NONE ((mach_msg_timeout_t) 0) + +/* + * The kernel uses MACH_MSGH_BITS_COMPLEX as a hint. It it isn't on, it + * assumes the body of the message doesn't contain port rights or OOL + * data. The field is set in received messages. A user task must + * use caution in interpreting the body of a message if the bit isn't + * on, because the mach_msg_type's in the body might "lie" about the + * contents. If the bit isn't on, but the mach_msg_types + * in the body specify rights or OOL data, the behavior is undefined. + * (Ie, an error may or may not be produced.) + * + * The value of MACH_MSGH_BITS_REMOTE determines the interpretation + * of the msgh_remote_port field. It is handled like a msgt_name. + * + * The value of MACH_MSGH_BITS_LOCAL determines the interpretation + * of the msgh_local_port field. It is handled like a msgt_name. + * + * MACH_MSGH_BITS() combines two MACH_MSG_TYPE_* values, for the remote + * and local fields, into a single value suitable for msgh_bits. + * + * MACH_MSGH_BITS_CIRCULAR should be zero; is is used internally. + * + * The unused bits should be zero and are reserved for the kernel + * or for future interface expansion. + */ + +#define MACH_MSGH_BITS_ZERO 0x00000000 +#define MACH_MSGH_BITS_REMOTE_MASK 0x000000ff +#define MACH_MSGH_BITS_LOCAL_MASK 0x0000ff00 +#define MACH_MSGH_BITS_COMPLEX 0x80000000U +#define MACH_MSGH_BITS_USER 0x8000ffffU + +#define MACH_MSGH_BITS_CIRCULAR 0x40000000 /* internal use only */ +#define MACH_MSGH_BITS_USED 0xc000ffffU + +#define MACH_MSGH_BITS_PORTS_MASK \ + (MACH_MSGH_BITS_REMOTE_MASK|MACH_MSGH_BITS_LOCAL_MASK) + +#define MACH_MSGH_BITS(remote, local) \ + ((remote) | ((local) << 8)) +#define MACH_MSGH_BITS_REMOTE(bits) \ + ((bits) & MACH_MSGH_BITS_REMOTE_MASK) +#define MACH_MSGH_BITS_LOCAL(bits) \ + (((bits) & MACH_MSGH_BITS_LOCAL_MASK) >> 8) +#define MACH_MSGH_BITS_PORTS(bits) \ + ((bits) & MACH_MSGH_BITS_PORTS_MASK) +#define MACH_MSGH_BITS_OTHER(bits) \ + ((bits) &~ MACH_MSGH_BITS_PORTS_MASK) + +/* + * Every message starts with a message header. + * Following the message header are zero or more pairs of + * type descriptors (mach_msg_type_t/mach_msg_type_long_t) and + * data values. The size of the message must be specified in bytes, + * and includes the message header, type descriptors, inline + * data, and inline pointer for out-of-line data. + * + * The msgh_remote_port field specifies the destination of the message. + * It must specify a valid send or send-once right for a port. + * + * The msgh_local_port field specifies a "reply port". Normally, + * This field carries a send-once right that the receiver will use + * to reply to the message. It may carry the values MACH_PORT_NULL, + * MACH_PORT_DEAD, a send-once right, or a send right. + * + * The msgh_seqno field carries a sequence number associated with the + * received-from port. A port's sequence number is incremented every + * time a message is received from it. In sent messages, the field's + * value is ignored. + * + * The msgh_id field is uninterpreted by the message primitives. + * It normally carries information specifying the format + * or meaning of the message. + */ + +typedef unsigned int mach_msg_bits_t; +typedef natural_t mach_msg_size_t; +typedef integer_t mach_msg_id_t; + + +#define MACH_MSG_SIZE_NULL (mach_msg_size_t *) 0 + +typedef unsigned int mach_msg_type_name_t; + +#define MACH_MSG_TYPE_MOVE_RECEIVE 16 /* Must hold receive rights */ +#define MACH_MSG_TYPE_MOVE_SEND 17 /* Must hold send rights */ +#define MACH_MSG_TYPE_MOVE_SEND_ONCE 18 /* Must hold sendonce rights */ +#define MACH_MSG_TYPE_COPY_SEND 19 /* Must hold send rights */ +#define MACH_MSG_TYPE_MAKE_SEND 20 /* Must hold receive rights */ +#define MACH_MSG_TYPE_MAKE_SEND_ONCE 21 /* Must hold receive rights */ +#define MACH_MSG_TYPE_COPY_RECEIVE 22 /* Must hold receive rights */ + +typedef unsigned int mach_msg_copy_options_t; + +#define MACH_MSG_PHYSICAL_COPY 0 +#define MACH_MSG_VIRTUAL_COPY 1 +#define MACH_MSG_ALLOCATE 2 +#define MACH_MSG_OVERWRITE 3 +#ifdef MACH_KERNEL +#define MACH_MSG_KALLOC_COPY_T 4 +#define MACH_MSG_PAGE_LIST_COPY_T 5 +#endif /* MACH_KERNEL */ + +typedef unsigned int mach_msg_descriptor_type_t; + +#define MACH_MSG_PORT_DESCRIPTOR 0 +#define MACH_MSG_OOL_DESCRIPTOR 1 +#define MACH_MSG_OOL_PORTS_DESCRIPTOR 2 +#define MACH_MSG_OOL_VOLATILE_DESCRIPTOR 3 + + +typedef struct +{ + void* pad1; + mach_msg_size_t pad2; + unsigned int pad3 : 24; + mach_msg_descriptor_type_t type : 8; +} mach_msg_type_descriptor_t; + +typedef struct +{ + mach_port_t name; + mach_msg_size_t pad1; + unsigned int pad2 : 16; + mach_msg_type_name_t disposition : 8; + mach_msg_descriptor_type_t type : 8; +} mach_msg_port_descriptor_t; + +typedef struct +{ + void* address; + mach_msg_size_t size; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + unsigned int pad1: 8; + mach_msg_descriptor_type_t type: 8; +} mach_msg_ool_descriptor_t; + +typedef struct +{ + void* address; + mach_msg_size_t count; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + mach_msg_type_name_t disposition : 8; + mach_msg_descriptor_type_t type : 8; +} mach_msg_ool_ports_descriptor_t; + +typedef union +{ + mach_msg_port_descriptor_t port; + mach_msg_ool_descriptor_t out_of_line; + mach_msg_ool_ports_descriptor_t ool_ports; + mach_msg_type_descriptor_t type; +} mach_msg_descriptor_t; + +typedef struct +{ + mach_msg_size_t msgh_descriptor_count; +} mach_msg_body_t; + +#define MACH_MSG_BODY_NULL (mach_msg_body_t *) 0 +#define MACH_MSG_DESCRIPTOR_NULL (mach_msg_descriptor_t *) 0 + +typedef struct +{ + mach_msg_bits_t msgh_bits; + mach_msg_size_t msgh_size; + mach_port_t msgh_remote_port; + mach_port_t msgh_local_port; + mach_msg_size_t msgh_reserved; + mach_msg_id_t msgh_id; +} mach_msg_header_t; + +#define MACH_MSG_NULL (mach_msg_header_t *) 0 + +typedef struct +{ + mach_msg_header_t header; + mach_msg_body_t body; +} mach_msg_base_t; + +typedef unsigned int mach_msg_trailer_type_t; + +#define MACH_MSG_TRAILER_FORMAT_0 0 + +typedef unsigned int mach_msg_trailer_size_t; + +typedef struct +{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; +} mach_msg_trailer_t; + +typedef struct +{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; +} mach_msg_seqno_trailer_t; + +typedef struct +{ + unsigned int val[2]; +} security_token_t; + +typedef struct +{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; +} mach_msg_security_trailer_t; + +typedef mach_msg_security_trailer_t mach_msg_format_0_trailer_t; + +#define MACH_MSG_TRAILER_FORMAT_0_SIZE sizeof(mach_msg_format_0_trailer_t) +#define MACH_MSG_TRAILER_MINIMUM_SIZE sizeof(mach_msg_trailer_t) +#define MAX_TRAILER_SIZE MACH_MSG_TRAILER_FORMAT_0_SIZE + +#define KERNEL_SECURITY_TOKEN_VALUE { {0, 1} } +extern security_token_t KERNEL_SECURITY_TOKEN; + +typedef integer_t mach_msg_options_t; + +typedef struct +{ + mach_msg_header_t header; +} mach_msg_empty_send_t; + +typedef struct +{ + mach_msg_header_t header; + mach_msg_trailer_t trailer; +} mach_msg_empty_rcv_t; + +typedef union +{ + mach_msg_empty_send_t send; + mach_msg_empty_rcv_t rcv; +} mach_msg_empty_t; + +/* utility to round the message size - will become machine dependent */ +#define round_msg(x) (((mach_msg_size_t)(x) + sizeof (natural_t) - 1) & \ + ~(sizeof (natural_t) - 1)) + +/* + * There is no fixed upper bound to the size of Mach messages. + */ + +#define MACH_MSG_SIZE_MAX ((mach_msg_size_t) ~0) + +/* + * Compatibility definitions, for code written + * when there was a msgh_kind instead of msgh_seqno. + */ + +#define MACH_MSGH_KIND_NORMAL 0x00000000 +#if 0 +/* code using this is likely to break, so better not to have it defined */ +#define MACH_MSGH_KIND_NOTIFICATION 0x00000001 +#endif +#define msgh_kind msgh_seqno +#define mach_msg_kind_t mach_port_seqno_t + +/* + * The msgt_number field specifies the number of data elements. + * The msgt_size field specifies the size of each data element, in bits. + * The msgt_name field specifies the type of each data element. + * If msgt_inline is TRUE, the data follows the type descriptor + * in the body of the message. If msgt_inline is FALSE, then a pointer + * to the data should follow the type descriptor, and the data is + * sent out-of-line. In this case, if msgt_deallocate is TRUE, + * then the out-of-line data is moved (instead of copied) into the message. + * If msgt_longform is TRUE, then the type descriptor is actually + * a mach_msg_type_long_t. + * + * The actual amount of inline data following the descriptor must + * a multiple of the word size. For out-of-line data, this is a + * pointer. For inline data, the supplied data size (calculated + * from msgt_number/msgt_size) is rounded up. This guarantees + * that type descriptors always fall on word boundaries. + * + * For port rights, msgt_size must be 8*sizeof(mach_port_t). + * If the data is inline, msgt_deallocate should be FALSE. + * The msgt_unused bit should be zero. + * The msgt_name, msgt_size, msgt_number fields in + * a mach_msg_type_long_t should be zero. + */ + +typedef natural_t mach_msg_type_size_t; +typedef natural_t mach_msg_type_number_t; + +/* + * Values received/carried in messages. Tells the receiver what + * sort of port right he now has. + * + * MACH_MSG_TYPE_PORT_NAME is used to transfer a port name + * which should remain uninterpreted by the kernel. (Port rights + * are not transferred, just the port name.) + */ + +#define MACH_MSG_TYPE_PORT_NONE 0 + +#define MACH_MSG_TYPE_PORT_NAME 15 +#define MACH_MSG_TYPE_PORT_RECEIVE MACH_MSG_TYPE_MOVE_RECEIVE +#define MACH_MSG_TYPE_PORT_SEND MACH_MSG_TYPE_MOVE_SEND +#define MACH_MSG_TYPE_PORT_SEND_ONCE MACH_MSG_TYPE_MOVE_SEND_ONCE + +#define MACH_MSG_TYPE_LAST 22 /* Last assigned */ + +/* + * A dummy value. Mostly used to indicate that the actual value + * will be filled in later, dynamically. + */ + +#define MACH_MSG_TYPE_POLYMORPHIC ((mach_msg_type_name_t) -1) + +/* + * Is a given item a port type? + */ + +#define MACH_MSG_TYPE_PORT_ANY(x) \ + (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \ + ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE)) + +#define MACH_MSG_TYPE_PORT_ANY_SEND(x) \ + (((x) >= MACH_MSG_TYPE_MOVE_SEND) && \ + ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE)) + +#define MACH_MSG_TYPE_PORT_ANY_RIGHT(x) \ + (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \ + ((x) <= MACH_MSG_TYPE_MOVE_SEND_ONCE)) + +typedef integer_t mach_msg_option_t; + +#define MACH_MSG_OPTION_NONE 0x00000000 + +#define MACH_SEND_MSG 0x00000001 +#define MACH_RCV_MSG 0x00000002 +#define MACH_RCV_LARGE 0x00000004 + +#define MACH_SEND_TIMEOUT 0x00000010 +#define MACH_SEND_INTERRUPT 0x00000040 /* libmach implements */ +#define MACH_SEND_CANCEL 0x00000080 +#define MACH_SEND_ALWAYS 0x00010000 /* internal use only */ +#define MACH_SEND_TRAILER 0x00020000 + +#define MACH_RCV_TIMEOUT 0x00000100 +#define MACH_RCV_NOTIFY 0x00000200 +#define MACH_RCV_INTERRUPT 0x00000400 /* libmach implements */ +#define MACH_RCV_OVERWRITE 0x00001000 + +/* + * NOTE: a 0x00------ RCV mask implies to ask for + * a MACH_MSG_TRAILER_FORMAT_0 with 0 Elements, + * which is equivalent to a mach_msg_trailer_t. + */ +#define MACH_RCV_TRAILER_NULL 0 +#define MACH_RCV_TRAILER_SEQNO 1 +#define MACH_RCV_TRAILER_SENDER 2 + +#define MACH_RCV_TRAILER_TYPE(x) (((x) & 0xf) << 28) +#define MACH_RCV_TRAILER_ELEMENTS(x) (((x) & 0xf) << 24) +#define MACH_RCV_TRAILER_MASK ((0xff << 24)) + +extern mach_msg_trailer_size_t trailer_size[]; + +#define GET_RCV_ELEMENTS(y) (((y) >> 24) & 0xf) +#define REQUESTED_TRAILER_SIZE(y) (trailer_size[GET_RCV_ELEMENTS(y)]) + +/* + * Much code assumes that mach_msg_return_t == kern_return_t. + * This definition is useful for descriptive purposes. + * + * See for the format of error codes. + * IPC errors are system 4. Send errors are subsystem 0; + * receive errors are subsystem 1. The code field is always non-zero. + * The high bits of the code field communicate extra information + * for some error codes. MACH_MSG_MASK masks off these special bits. + */ + +typedef kern_return_t mach_msg_return_t; + +#define MACH_MSG_SUCCESS 0x00000000 + + +#define MACH_MSG_MASK 0x00003e00 + /* All special error code bits defined below. */ +#define MACH_MSG_IPC_SPACE 0x00002000 + /* No room in IPC name space for another capability name. */ +#define MACH_MSG_VM_SPACE 0x00001000 + /* No room in VM address space for out-of-line memory. */ +#define MACH_MSG_IPC_KERNEL 0x00000800 + /* Kernel resource shortage handling an IPC capability. */ +#define MACH_MSG_VM_KERNEL 0x00000400 + /* Kernel resource shortage handling out-of-line memory. */ + +#define MACH_SEND_IN_PROGRESS 0x10000001 + /* Thread is waiting to send. (Internal use only.) */ +#define MACH_SEND_INVALID_DATA 0x10000002 + /* Bogus in-line data. */ +#define MACH_SEND_INVALID_DEST 0x10000003 + /* Bogus destination port. */ +#define MACH_SEND_TIMED_OUT 0x10000004 + /* Message not sent before timeout expired. */ +#define MACH_SEND_INTERRUPTED 0x10000007 + /* Software interrupt. */ +#define MACH_SEND_MSG_TOO_SMALL 0x10000008 + /* Data doesn't contain a complete message. */ +#define MACH_SEND_INVALID_REPLY 0x10000009 + /* Bogus reply port. */ +#define MACH_SEND_INVALID_RIGHT 0x1000000a + /* Bogus port rights in the message body. */ +#define MACH_SEND_INVALID_NOTIFY 0x1000000b + /* Bogus notify port argument. */ +#define MACH_SEND_INVALID_MEMORY 0x1000000c + /* Invalid out-of-line memory pointer. */ +#define MACH_SEND_NO_BUFFER 0x1000000d + /* No message buffer is available. */ +#define MACH_SEND_TOO_LARGE 0x1000000e + /* Send is too large for port */ +#define MACH_SEND_INVALID_TYPE 0x1000000f + /* Invalid msg-type specification. */ +#define MACH_SEND_INVALID_HEADER 0x10000010 + /* A field in the header had a bad value. */ +#define MACH_SEND_INVALID_TRAILER 0x10000011 + /* The trailer to be sent does not match kernel format. */ +#define MACH_SEND_INVALID_RT_OOL_SIZE 0x10000015 + /* The OOL buffer size is too large for RT behavior */ + +#define MACH_RCV_IN_PROGRESS 0x10004001 + /* Thread is waiting for receive. (Internal use only.) */ +#define MACH_RCV_INVALID_NAME 0x10004002 + /* Bogus name for receive port/port-set. */ +#define MACH_RCV_TIMED_OUT 0x10004003 + /* Didn't get a message within the timeout value. */ +#define MACH_RCV_TOO_LARGE 0x10004004 + /* Message buffer is not large enough for inline data. */ +#define MACH_RCV_INTERRUPTED 0x10004005 + /* Software interrupt. */ +#define MACH_RCV_PORT_CHANGED 0x10004006 + /* Port moved into a set during the receive. */ +#define MACH_RCV_INVALID_NOTIFY 0x10004007 + /* Bogus notify port argument. */ +#define MACH_RCV_INVALID_DATA 0x10004008 + /* Bogus message buffer for inline data. */ +#define MACH_RCV_PORT_DIED 0x10004009 + /* Port/set was sent away/died during receive. */ +#define MACH_RCV_IN_SET 0x1000400a + /* Port is a member of a port set. */ +#define MACH_RCV_HEADER_ERROR 0x1000400b + /* Error receiving message header. See special bits. */ +#define MACH_RCV_BODY_ERROR 0x1000400c + /* Error receiving message body. See special bits. */ +#define MACH_RCV_INVALID_TYPE 0x1000400d + /* Invalid msg-type specification in scatter list. */ +#define MACH_RCV_SCATTER_SMALL 0x1000400e + /* Out-of-line overwrite region is not large enough */ +#define MACH_RCV_INVALID_TRAILER 0x1000400f + /* trailer type or number of trailer elements not supported */ +#define MACH_RCV_IN_PROGRESS_TIMED 0x10004011 + /* Waiting for receive with timeout. (Internal use only.) */ + +extern mach_msg_return_t mach_msg_overwrite_trap( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify, + mach_msg_header_t *rcv_msg, + mach_msg_size_t rcv_limit); + +extern mach_msg_return_t mach_msg_overwrite( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify, + mach_msg_header_t *rcv_msg, + mach_msg_size_t rcv_limit); + +extern mach_msg_return_t mach_msg_trap( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify); + +extern mach_msg_return_t mach_msg( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify); + +#endif /* _MACH_MESSAGE_H_ */ diff --git a/osfmk/mach/mig.h b/osfmk/mach/mig.h new file mode 100644 index 000000000..501d6a8dc --- /dev/null +++ b/osfmk/mach/mig.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach MIG Subsystem Interfaces + */ + +#ifndef _MACH_MIG_H_ +#define _MACH_MIG_H_ + +#include +#include +#include + +/* + * Definition for MIG-generated server stub routines. These routines + * unpack the request message, call the server procedure, and pack the + * reply message. + */ +typedef void (*mig_stub_routine_t) (mach_msg_header_t *InHeadP, + mach_msg_header_t *OutHeadP); + +typedef mig_stub_routine_t mig_routine_t; + +/* + * Definition for server implementation routines. This is the routine + * called by the MIG-generated server stub routine. + */ +typedef kern_return_t (*mig_impl_routine_t)(void); + +typedef mig_impl_routine_t entry_function_t; + +#endif /* _MACH_MIG_H_ */ diff --git a/osfmk/mach/mig_errors.h b/osfmk/mach/mig_errors.h new file mode 100644 index 000000000..6a08cffa9 --- /dev/null +++ b/osfmk/mach/mig_errors.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Mach Interface Generator errors + * + */ + +#ifndef _MACH_MIG_ERRORS_H_ +#define _MACH_MIG_ERRORS_H_ + +#include +#include +#include +#include + +/* + * These error codes should be specified as system 4, subsytem 2. + * But alas backwards compatibility makes that impossible. + * The problem is old clients of new servers (eg, the kernel) + * which get strange large error codes when there is a Mig problem + * in the server. Unfortunately, the IPC system doesn't have + * the knowledge to convert the codes in this situation. + */ + +#define MIG_TYPE_ERROR -300 /* client type check failure */ +#define MIG_REPLY_MISMATCH -301 /* wrong reply message ID */ +#define MIG_REMOTE_ERROR -302 /* server detected error */ +#define MIG_BAD_ID -303 /* bad request message ID */ +#define MIG_BAD_ARGUMENTS -304 /* server type check failure */ +#define MIG_NO_REPLY -305 /* no reply should be send */ +#define MIG_EXCEPTION -306 /* server raised exception */ +#define MIG_ARRAY_TOO_LARGE -307 /* array not large enough */ +#define MIG_SERVER_DIED -308 /* server died */ +#define MIG_TRAILER_ERROR -309 /* trailer has an unknown format */ + +#include + +typedef struct { + mach_msg_header_t Head; + NDR_record_t NDR; + kern_return_t RetCode; +} mig_reply_error_t; + +typedef struct mig_symtab { + char *ms_routine_name; + int ms_routine_number; + void (*ms_routine)(void); /* Since the functions in the + * symbol table have unknown + * signatures, this is the best + * we can do... + */ +} mig_symtab_t; + +/* Client side reply port allocate */ +extern mach_port_t mig_get_reply_port(void); + +/* Client side reply port deallocate */ +extern void mig_dealloc_reply_port(mach_port_t reply_port); + +/* Client side reply port "deallocation" */ +extern void mig_put_reply_port(mach_port_t reply_port); + +/* Allocate memory for out-of-stack mig structures */ +extern char *mig_user_allocate(vm_size_t size); + +/* Deallocate memory used for out-of-stack mig structures */ +extern void mig_user_deallocate(char *data, vm_size_t size); + +/* Bounded string copy */ +extern int mig_strncpy( + char *dest, + char *src, + int len); + +#endif /* _MACH_MIG_ERRORS_H_ */ diff --git a/osfmk/mach/mig_log.h b/osfmk/mach/mig_log.h new file mode 100644 index 000000000..923c0b51e --- /dev/null +++ b/osfmk/mach/mig_log.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:40:32 ezf + * change marker to not FREE + * [1994/09/22 21:41:53 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:42:27 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:17:28 jeffc] + * + * Revision 1.2 1993/04/19 16:36:32 devrcs + * Merge untyped ipc: + * Support for logging and tracing within the MIG stubs + * [1993/02/24 14:47:01 travos] + * [1993/03/16 13:19:16 rod] + * + * $EndLog$ + */ + +#ifndef _mig_log_ +#define _mig_log_ + +typedef enum { + MACH_MSG_LOG_USER, + MACH_MSG_LOG_SERVER +} mig_who_t; + +typedef enum { + MACH_MSG_REQUEST_BEING_SENT, + MACH_MSG_REQUEST_BEING_RCVD, + MACH_MSG_REPLY_BEING_SENT, + MACH_MSG_REPLY_BEING_RCVD +} mig_which_event_t; + +typedef enum { + MACH_MSG_ERROR_WHILE_PARSING, + MACH_MSG_ERROR_UNKNOWN_ID +} mig_which_error_t; + +extern void MigEventTracer +#if defined(__STDC__) +( + mig_who_t who, + mig_which_event_t what, + mach_msg_id_t msgh_id, + unsigned int size, + unsigned int kpd, + unsigned int retcode, + unsigned int ports, + unsigned int oolports, + unsigned int ool, + char *file, + unsigned int line +); +#else /* !defined(__STDC__) */ +(); +#endif /* !defined(__STDC__) */ + +extern void MigEventErrors +#if defined(__STDC__) +( + mig_who_t who, + mig_which_error_t what, + void *par, + char *file, + unsigned int line +); +#else /* !defined(__STDC__) */ +(); +#endif /* !defined(__STDC__) */ + +extern int mig_errors; +extern int mig_tracing; + +#define LOG_ERRORS if (mig_errors) MigEventErrors +#define LOG_TRACE if (mig_tracing) MigEventTracer + +#endif /* _mach_log_ */ + diff --git a/osfmk/mach/mk_timer.h b/osfmk/mach/mk_timer.h new file mode 100644 index 000000000..bb2e8bd6c --- /dev/null +++ b/osfmk/mach/mk_timer.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 31 August 2000 (debo) + * Created. + */ + +#ifndef _MACH_MK_TIMER_H_ +#define _MACH_MK_TIMER_H_ + +#include + +#include + +mach_port_name_t mk_timer_create(void); + +kern_return_t mk_timer_destroy( + mach_port_name_t name); + +kern_return_t mk_timer_arm( + mach_port_name_t name, + AbsoluteTime expire_time); + +kern_return_t mk_timer_cancel( + mach_port_name_t name, + AbsoluteTime *result_time); + +struct mk_timer_expire_msg { + mach_msg_header_t header; + AbsoluteTime time_of_arming; + AbsoluteTime armed_time; + AbsoluteTime time_of_posting; +}; + +typedef struct mk_timer_expire_msg mk_timer_expire_msg_t; + +#endif /* _MACH_MK_TIMER_H_ */ diff --git a/osfmk/mach/mk_traps.h b/osfmk/mach/mk_traps.h new file mode 100644 index 000000000..c5159b834 --- /dev/null +++ b/osfmk/mach/mk_traps.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 20 October 2000 (debo) + * Created. + */ + +#ifndef _MACH_MK_TRAPS_H_ +#define _MACH_MK_TRAPS_H_ + +#endif /* _MACH_MK_TRAPS_H_ */ diff --git a/osfmk/mach/msg_type.h b/osfmk/mach/msg_type.h new file mode 100644 index 000000000..00d56bbb2 --- /dev/null +++ b/osfmk/mach/msg_type.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:40:41 ezf + * change marker to not FREE + * [1994/09/22 21:41:56 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:42:32 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:17:31 jeffc] + * + * Revision 1.2 1993/04/19 16:37:56 devrcs + * ansi C conformance changes + * [1993/02/02 18:53:54 david] + * + * Revision 1.1 1992/09/30 02:31:51 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 16:58:02 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:35:10 mrt + * Changed to new Mach copyright + * [91/02/01 17:17:32 mrt] + * + * Revision 2.1 89/08/03 16:03:38 rwd + * Created. + * + * Revision 2.3 89/02/25 18:39:26 gm0w + * Changes for cleanup. + * + * 4-Mar-87 Michael Young (mwyoung) at Carnegie-Mellon University + * Added MSG_TYPE_RPC. + * + * 22-Dec-86 Mary Thompson + * defined MSG_TYPE_CAMELOT, and MSG_TYPE_ENCRYPTED + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * This file defines user msg types that may be ored into + * the msg_type field in a msg header. Values 0-5 are reserved + * for use by the kernel and are defined in message.h. + * + */ + +#ifndef MSG_TYPE_H_ +#define MSG_TYPE_H_ + +#define MSG_TYPE_CAMELOT (1 << 6) +#define MSG_TYPE_ENCRYPTED (1 << 7) +#define MSG_TYPE_RPC (1 << 8) /* Reply expected */ + +#include + +#endif /* MSG_TYPE_H_ */ diff --git a/osfmk/mach/ndr.h b/osfmk/mach/ndr.h new file mode 100644 index 000000000..56e982d9a --- /dev/null +++ b/osfmk/mach/ndr.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:40:51 ezf + * change marker to not FREE + * [1994/09/22 21:42:00 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:42:37 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:17:34 jeffc] + * + * Revision 1.2 1993/04/19 16:38:03 devrcs + * Merge untyped ipc: + * New names for the fields - the structure isn't changed + * [1993/03/12 23:01:38 travos] + * Extended NDR record to include version number(s) + * [1993/03/05 23:10:21 travos] + * a new NDR structure + * 1993/02/13 00:47:46 travos] + * Created. [travos@osf.org] + * [1993/01/27 11:21:44 rod] + * [1993/03/16 13:23:15 rod] + * + * $EndLog$ + */ + +#ifndef _NDR_H_ +#define _NDR_H_ + +typedef struct { + unsigned char mig_vers; + unsigned char if_vers; + unsigned char reserved1; + unsigned char mig_encoding; + unsigned char int_rep; + unsigned char char_rep; + unsigned char float_rep; + unsigned char reserved2; +} NDR_record_t; + +/* + * MIG supported protocols for Network Data Representation + */ +#define NDR_PROTOCOL_2_0 0 + +/* + * NDR 2.0 format flag type definition and values. + */ +#define NDR_INT_BIG_ENDIAN 0 +#define NDR_INT_LITTLE_ENDIAN 1 +#define NDR_FLOAT_IEEE 0 +#define NDR_FLOAT_VAX 1 +#define NDR_FLOAT_CRAY 2 +#define NDR_FLOAT_IBM 3 +#define NDR_CHAR_ASCII 0 +#define NDR_CHAR_EBCDIC 1 + +extern NDR_record_t NDR_record; + +#endif /* _NDR_H_ */ diff --git a/osfmk/mach/norma_special_ports.h b/osfmk/mach/norma_special_ports.h new file mode 100644 index 000000000..e0d33258e --- /dev/null +++ b/osfmk/mach/norma_special_ports.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/norma_special_ports.h + * + * Defines codes for remote access to special ports. These are NOT + * port identifiers - they are only used for the norma_get_special_port + * and norma_set_special_port routines. + */ + +#ifndef _MACH_NORMA_SPECIAL_PORTS_H_ +#define _MACH_NORMA_SPECIAL_PORTS_H_ + +#define MAX_SPECIAL_KERNEL_ID 10 +#define MAX_SPECIAL_ID 40 + +/* + * Provided by kernel + */ +#define NORMA_DEVICE_PORT 1 +#define NORMA_HOST_PORT 2 +#define NORMA_HOST_PRIV_PORT 3 + +/* + * Not provided by kernel + */ +#define NORMA_NAMESERVER_PORT (1 + MAX_SPECIAL_KERNEL_ID) + +/* + * Definitions for ease of use. + * + * In the get call, the host parameter can be any host, but will generally + * be the local node host port. In the set call, the host must the per-node + * host port for the node being affected. + */ + +#define norma_get_device_port(host, node, port) \ + (norma_get_special_port((host), (node), NORMA_DEVICE_PORT, (port))) + +#define norma_set_device_port(host, port) \ + (norma_set_special_port((host), NORMA_DEVICE_PORT, (port))) + +#define norma_get_host_port(host, node, port) \ + (norma_get_special_port((host), (node), NORMA_HOST_PORT, (port))) + +#define norma_set_host_port(host, port) \ + (norma_set_special_port((host), NORMA_HOST_PORT, (port))) + +#define norma_get_host_priv_port(host, node, port) \ + (norma_get_special_port((host), (node), NORMA_HOST_PRIV_PORT, (port))) + +#define norma_set_host_priv_port(host, port) \ + (norma_set_special_port((host), NORMA_HOST_PRIV_PORT, (port))) + +#define norma_get_nameserver_port(host, node, port) \ + (norma_get_special_port((host), (node), NORMA_NAMESERVER_PORT, (port))) + +#define norma_set_nameserver_port(host, port) \ + (norma_set_special_port((host), NORMA_NAMESERVER_PORT, (port))) + +#endif /* _MACH_NORMA_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/norma_task.defs b/osfmk/mach/norma_task.defs new file mode 100644 index 000000000..462442bd3 --- /dev/null +++ b/osfmk/mach/norma_task.defs @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 02:41:09 ezf + * change marker to not FREE + * [1994/09/22 21:42:07 ezf] + * + * Revision 1.1.2.2 1993/06/02 23:47:12 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:17:42 jeffc] + * + * Revision 1.1 1992/09/30 02:22:57 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* CMU_ENDHIST */ diff --git a/osfmk/mach/notify.defs b/osfmk/mach/notify.defs new file mode 100644 index 000000000..7845a868e --- /dev/null +++ b/osfmk/mach/notify.defs @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +subsystem notify 64; + +#include + +#if SEQNOS +serverprefix do_seqnos_; +serverdemux seqnos_notify_server; +#else SEQNOS +serverprefix do_; +serverdemux notify_server; +#endif SEQNOS + +type notify_port_t = MACH_MSG_TYPE_MOVE_SEND_ONCE + ctype: mach_port_t; + +/* MACH_NOTIFY_FIRST: 0100 */ +skip; + +/* MACH_NOTIFY_PORT_DELETED: 0101 */ +simpleroutine mach_notify_port_deleted( + notify : notify_port_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + name : mach_port_name_t); + +skip; /* was MACH_NOTIFY_MSG_ACCEPTED: 0102 */ + +skip; /* was NOTIFY_OWNERSHIP_RIGHTS: 0103 */ + +skip; /* was NOTIFY_RECEIVE_RIGHTS: 0104 */ + +/* MACH_NOTIFY_PORT_DESTROYED: 0105 */ +simpleroutine mach_notify_port_destroyed( + notify : notify_port_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + rights : mach_port_receive_t); + +/* MACH_NOTIFY_NO_SENDERS: 0106 */ +simpleroutine mach_notify_no_senders( + notify : notify_port_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + mscount : mach_port_mscount_t); + +/* MACH_NOTIFY_SEND_ONCE: 0107 */ +simpleroutine mach_notify_send_once( + notify : notify_port_t +#if SEQNOS +; msgseqno seqno : mach_port_seqno_t +#endif SEQNOS + ); + +/* MACH_NOTIFY_DEAD_NAME: 0110 */ +simpleroutine mach_notify_dead_name( + notify : notify_port_t; +#if SEQNOS + msgseqno seqno : mach_port_seqno_t; +#endif SEQNOS + name : mach_port_name_t); diff --git a/osfmk/mach/notify.h b/osfmk/mach/notify.h new file mode 100644 index 000000000..259ab0942 --- /dev/null +++ b/osfmk/mach/notify.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.12.2 1996/01/09 19:22:05 devrcs + * Made not_count in mach_no_senders_notification_t + * a mach_msg_type_number_t. + * [1995/12/01 19:49:21 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:09:14 jfraser] + * + * Revision 1.2.12.1 1994/09/23 02:41:27 ezf + * change marker to not FREE + * [1994/09/22 21:42:15 ezf] + * + * Revision 1.2.6.5 1993/09/09 16:07:46 jeffc + * CR9745 - delete message accepted notifications + * [1993/09/03 22:15:11 jeffc] + * + * Revision 1.2.6.4 1993/08/05 19:09:35 jeffc + * CR9508 - Delete dead code. Remove MACH_IPC_TYPED + * [1993/08/03 20:18:41 jeffc] + * + * CR9508 - Delete dead code. Remove MACH_IPC_COMPAT + * [1993/08/03 17:09:19 jeffc] + * + * Revision 1.2.6.3 1993/08/03 18:29:46 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 18:24:51 gm] + * + * Revision 1.2.6.2 1993/06/09 02:42:49 gm + * Fix untyped notifications. CR #8969 + * [1993/04/27 11:29:30 rod] + * + * Revision 1.2 1993/04/19 16:38:19 devrcs + * Added trailer support to untyped ipc. [travos@osf.org, fdr@osf.org] + * [1993/04/06 18:28:00 travos] + * Merge untyped ipc: + * Remove the NDR format label from messages with no untyped data + * [1993/03/12 22:50:02 travos] + * changed msgh_body to not_body in the notification message structures. + * [1993/02/25 21:50:38 fdr] + * New definitions for notifications (via compile option MACH_IPC_TYPED) + * [1993/02/24 19:25:42 travos] + * + * ansi C conformance changes + * [1993/02/02 18:54:03 david] + * Revision 1.1 1992/09/30 02:31:55 robert + * Initial revision + * [1993/02/02 19:05:08 david] + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4.2.1 92/03/03 16:22:23 jeffreyh + * Changes form TRUNK + * [92/02/26 12:12:10 jeffreyh] + * + * Revision 2.5 92/01/15 13:44:41 rpd + * Changed MACH_IPC_COMPAT conditionals to default to not present. + * + * Revision 2.4 91/05/14 16:58:21 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:35:18 mrt + * Changed to new Mach copyright + * [91/02/01 17:20:02 mrt] + * + * Revision 2.2 90/06/02 14:59:32 rpd + * Converted to new IPC. + * [90/03/26 22:38:14 rpd] + * + * Revision 2.7.7.1 90/02/20 22:24:32 rpd + * Revised for new IPC. + * [90/02/19 23:38:57 rpd] + * + * + * Condensed history: + * Moved ownership rights under MACH_IPC_XXXHACK (rpd). + * Added NOTIFY_PORT_DESTROYED (rpd). + * Added notification message structure definition (mwyoung). + * Created, based on Accent values (mwyoung). + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/notify.h + * + * Kernel notification message definitions. + */ + +#ifndef _MACH_NOTIFY_H_ +#define _MACH_NOTIFY_H_ + +#include +#include +#include + +/* + * An alternative specification of the notification interface + * may be found in mach/notify.defs. + */ + +#define MACH_NOTIFY_FIRST 0100 +#define MACH_NOTIFY_PORT_DELETED (MACH_NOTIFY_FIRST + 001 ) + /* A send or send-once right was deleted. */ +#define MACH_NOTIFY_PORT_DESTROYED (MACH_NOTIFY_FIRST + 005) + /* A receive right was (would have been) deallocated */ +#define MACH_NOTIFY_NO_SENDERS (MACH_NOTIFY_FIRST + 006) + /* Receive right has no extant send rights */ +#define MACH_NOTIFY_SEND_ONCE (MACH_NOTIFY_FIRST + 007) + /* An extant send-once right died */ +#define MACH_NOTIFY_DEAD_NAME (MACH_NOTIFY_FIRST + 010) + /* Send or send-once right died, leaving a dead-name */ +#define MACH_NOTIFY_LAST (MACH_NOTIFY_FIRST + 015) + +typedef struct { + mach_msg_header_t not_header; + NDR_record_t NDR; + mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ + mach_msg_format_0_trailer_t trailer; +} mach_port_deleted_notification_t; + +typedef struct { + mach_msg_header_t not_header; + mach_msg_body_t not_body; + mach_msg_port_descriptor_t not_port;/* MACH_MSG_TYPE_PORT_RECEIVE */ + mach_msg_format_0_trailer_t trailer; +} mach_port_destroyed_notification_t; + +typedef struct { + mach_msg_header_t not_header; + NDR_record_t NDR; + mach_msg_type_number_t not_count; + mach_msg_format_0_trailer_t trailer; +} mach_no_senders_notification_t; + +typedef struct { + mach_msg_header_t not_header; + mach_msg_format_0_trailer_t trailer; +} mach_send_once_notification_t; + +typedef struct { + mach_msg_header_t not_header; + NDR_record_t NDR; + mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ + mach_msg_format_0_trailer_t trailer; +} mach_dead_name_notification_t; + +#endif /* _MACH_NOTIFY_H_ */ diff --git a/osfmk/mach/policy.h b/osfmk/mach/policy.h new file mode 100644 index 000000000..fdc6f0ecf --- /dev/null +++ b/osfmk/mach/policy.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_POLICY_H_ +#define _MACH_POLICY_H_ + +/* + * mach/policy.h + * + * Definitions for scheduing policy. + * + * N.B. The interfaces defined here are all obsolete!! + */ + +#include +#include + +/* + * Policy definitions. Policies should be powers of 2, + * but cannot be or'd together other than to test for a + * policy 'class'. + */ +#define POLICY_NULL 0 /* none */ +#define POLICY_TIMESHARE 1 /* timesharing */ +#define POLICY_RR 2 /* fixed round robin */ +#define POLICY_FIFO 4 /* fixed fifo */ + +#define __NEW_SCHEDULING_FRAMEWORK__ + +/* + * Check if policy is of 'class' fixed-priority. + */ +#define POLICYCLASS_FIXEDPRI (POLICY_RR | POLICY_FIFO) + +/* + * Check if policy is valid. + */ +#define invalid_policy(policy) \ + ((policy) != POLICY_TIMESHARE && \ + (policy) != POLICY_RR && \ + (policy) != POLICY_FIFO) + + +/* + * New scheduling control interface + */ +typedef int policy_t; +typedef integer_t *policy_info_t; +typedef integer_t *policy_base_t; +typedef integer_t *policy_limit_t; + + +/* + * Types for TIMESHARE policy + */ +struct policy_timeshare_base { + integer_t base_priority; +}; +struct policy_timeshare_limit { + integer_t max_priority; +}; +struct policy_timeshare_info { + integer_t max_priority; + integer_t base_priority; + integer_t cur_priority; + boolean_t depressed; + integer_t depress_priority; +}; + +typedef struct policy_timeshare_base *policy_timeshare_base_t; +typedef struct policy_timeshare_limit *policy_timeshare_limit_t; +typedef struct policy_timeshare_info *policy_timeshare_info_t; + +typedef struct policy_timeshare_base policy_timeshare_base_data_t; +typedef struct policy_timeshare_limit policy_timeshare_limit_data_t; +typedef struct policy_timeshare_info policy_timeshare_info_data_t; + + +#define POLICY_TIMESHARE_BASE_COUNT \ + (sizeof(struct policy_timeshare_base)/sizeof(integer_t)) +#define POLICY_TIMESHARE_LIMIT_COUNT \ + (sizeof(struct policy_timeshare_limit)/sizeof(integer_t)) +#define POLICY_TIMESHARE_INFO_COUNT \ + (sizeof(struct policy_timeshare_info)/sizeof(integer_t)) + + +/* + * Types for the ROUND ROBIN (RR) policy + */ +struct policy_rr_base { + integer_t base_priority; + integer_t quantum; +}; +struct policy_rr_limit { + integer_t max_priority; +}; +struct policy_rr_info { + integer_t max_priority; + integer_t base_priority; + integer_t quantum; + boolean_t depressed; + integer_t depress_priority; +}; + +typedef struct policy_rr_base *policy_rr_base_t; +typedef struct policy_rr_limit *policy_rr_limit_t; +typedef struct policy_rr_info *policy_rr_info_t; + +typedef struct policy_rr_base policy_rr_base_data_t; +typedef struct policy_rr_limit policy_rr_limit_data_t; +typedef struct policy_rr_info policy_rr_info_data_t; + +#define POLICY_RR_BASE_COUNT \ + (sizeof(struct policy_rr_base)/sizeof(integer_t)) +#define POLICY_RR_LIMIT_COUNT \ + (sizeof(struct policy_rr_limit)/sizeof(integer_t)) +#define POLICY_RR_INFO_COUNT \ + (sizeof(struct policy_rr_info)/sizeof(integer_t)) + + +/* + * Types for the FIRST-IN-FIRST-OUT (FIFO) policy + */ +struct policy_fifo_base { + integer_t base_priority; +}; +struct policy_fifo_limit { + integer_t max_priority; +}; +struct policy_fifo_info { + integer_t max_priority; + integer_t base_priority; + boolean_t depressed; + integer_t depress_priority; +}; + +typedef struct policy_fifo_base *policy_fifo_base_t; +typedef struct policy_fifo_limit *policy_fifo_limit_t; +typedef struct policy_fifo_info *policy_fifo_info_t; + +typedef struct policy_fifo_base policy_fifo_base_data_t; +typedef struct policy_fifo_limit policy_fifo_limit_data_t; +typedef struct policy_fifo_info policy_fifo_info_data_t; + +#define POLICY_FIFO_BASE_COUNT \ + (sizeof(struct policy_fifo_base)/sizeof(integer_t)) +#define POLICY_FIFO_LIMIT_COUNT \ + (sizeof(struct policy_fifo_limit)/sizeof(integer_t)) +#define POLICY_FIFO_INFO_COUNT \ + (sizeof(struct policy_fifo_info)/sizeof(integer_t)) + +/* + * Aggregate policy types + */ + +struct policy_bases { + policy_timeshare_base_data_t ts; + policy_rr_base_data_t rr; + policy_fifo_base_data_t fifo; +}; + +struct policy_limits { + policy_timeshare_limit_data_t ts; + policy_rr_limit_data_t rr; + policy_fifo_limit_data_t fifo; +}; + +struct policy_infos { + policy_timeshare_info_data_t ts; + policy_rr_info_data_t rr; + policy_fifo_info_data_t fifo; +}; + +typedef struct policy_bases policy_base_data_t; +typedef struct policy_limits policy_limit_data_t; +typedef struct policy_infos policy_info_data_t; + +#endif /* _MACH_POLICY_H_ */ diff --git a/osfmk/mach/port.h b/osfmk/mach/port.h new file mode 100644 index 000000000..42c182000 --- /dev/null +++ b/osfmk/mach/port.h @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/port.h + * + * Definition of a port + * + * [The basic port_t type should probably be machine-dependent, + * as it must be represented by a 32-bit integer.] + */ + +#ifndef _MACH_PORT_H_ +#define _MACH_PORT_H_ + +#include +#include + +/* + * A port_name_t is a 32 bit value which represents a name of a + * port right within some ipc space. This is a constant definition + * everywhere. + * + * The type port_t represents a reference added or deleted to a + * port right. + * + * At user space, this is represented by returning the name of + * the right(s) that got altered within the user's ipc space. + * So a port_t is the same type as a port_name_t there. + * + * Since there is no right space for the kernel proper (all rights + * are naked rights) these rights are represented by passing a + * pointer to the specific ipc_object_t subclass (typically + * ipc_port_t) that got altered/is to be altered. + */ +typedef natural_t port_name_t; +typedef port_name_t *port_name_array_t; + +#ifdef KERNEL_PRIVATE + +#include +typedef ipc_port_t port_t; + +#else /* ! KERNEL_PRIVATE */ + +typedef port_name_t port_t; + +#endif /* KERNEL_PRIVATE */ + +/* + * PORT_NULL is a legal value that can be carried in messages. + * It indicates the absence of any port or port rights. (A port + * argument keeps the message from being "simple", even if the + * value is PORT_NULL.) The value PORT_DEAD is also a legal + * value that can be carried in messages. It indicates + * that a port right was present, but it died. + */ +#define PORT_NULL ((port_t) 0) +#define PORT_DEAD ((port_t) ~0) +#define PORT_VALID(name) \ + (((port_t)(name) != PORT_NULL) && \ + ((port_t)(name) != PORT_DEAD)) + +/* + * Mach 3.0 renamed everything to have mach_ in front of it. + * Do that mapping here, so we have the types and macros in + * both formats. + */ +typedef port_t mach_port_t; +typedef port_t *mach_port_array_t; +typedef port_name_t mach_port_name_t; +typedef mach_port_name_t *mach_port_name_array_t; + +#define MACH_PORT_NULL 0 /* intentional loose typing */ +#define MACH_PORT_DEAD ((mach_port_name_t) ~0) +#define MACH_PORT_VALID(name) \ + (((name) != MACH_PORT_NULL) && \ + ((name) != MACH_PORT_DEAD)) + +/* + * mach_port_name_t must be an unsigned type. Port values + * have two parts, a generation number and an index. + * These macros encapsulate all knowledge of how + * a mach_port_name_t is laid out. They are made visible + * to user tasks so that packages to map from a mach_port_name_t + * to associated user data can discount the generation + * nuber (if desired) in doing the mapping. + * + * Within the kernel, ipc/ipc_entry.c implicitly assumes + * when it uses the splay tree functions that the generation + * number is in the low bits, so that names are ordered first + * by index and then by generation. If the size of generation + * numbers changes, be sure to update IE_BITS_GEN_MASK and + * friends in ipc/ipc_entry.h. + */ +#ifndef NO_PORT_GEN +#define MACH_PORT_INDEX(name) ((name) >> 8) +#define MACH_PORT_GEN(name) (((name) & 0xff) << 24) +#define MACH_PORT_MAKE(index, gen) \ + (((index) << 8) | (gen) >> 24) +#else +#define MACH_PORT_INDEX(name) (name) +#define MACH_PORT_GEN(name) (0) +#define MACH_PORT_MAKE(index, gen) (index) +#endif /* !NO_PORT_GEN */ + +/* + * These are the different rights a task may have. + * The MACH_PORT_RIGHT_* definitions are used as arguments + * to mach_port_allocate, mach_port_get_refs, etc, to specify + * a particular right to act upon. The mach_port_names and + * mach_port_type calls return bitmasks using the MACH_PORT_TYPE_* + * definitions. This is because a single name may denote + * multiple rights. + */ + +typedef natural_t mach_port_right_t; + +#define MACH_PORT_RIGHT_SEND ((mach_port_right_t) 0) +#define MACH_PORT_RIGHT_RECEIVE ((mach_port_right_t) 1) +#define MACH_PORT_RIGHT_SEND_ONCE ((mach_port_right_t) 2) +#define MACH_PORT_RIGHT_PORT_SET ((mach_port_right_t) 3) +#define MACH_PORT_RIGHT_DEAD_NAME ((mach_port_right_t) 4) +#define MACH_PORT_RIGHT_NUMBER ((mach_port_right_t) 5) + +typedef natural_t mach_port_type_t; +typedef mach_port_type_t *mach_port_type_array_t; + +#define MACH_PORT_TYPE(right) \ + ((mach_port_type_t)(((mach_port_type_t) 1) \ + << ((right) + ((mach_port_right_t) 16)))) +#define MACH_PORT_TYPE_NONE ((mach_port_type_t) 0L) +#define MACH_PORT_TYPE_SEND MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND) +#define MACH_PORT_TYPE_RECEIVE MACH_PORT_TYPE(MACH_PORT_RIGHT_RECEIVE) +#define MACH_PORT_TYPE_SEND_ONCE MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND_ONCE) +#define MACH_PORT_TYPE_PORT_SET MACH_PORT_TYPE(MACH_PORT_RIGHT_PORT_SET) +#define MACH_PORT_TYPE_DEAD_NAME MACH_PORT_TYPE(MACH_PORT_RIGHT_DEAD_NAME) + +/* Convenient combinations. */ + +#define MACH_PORT_TYPE_SEND_RECEIVE \ + (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_RECEIVE) +#define MACH_PORT_TYPE_SEND_RIGHTS \ + (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE) +#define MACH_PORT_TYPE_PORT_RIGHTS \ + (MACH_PORT_TYPE_SEND_RIGHTS|MACH_PORT_TYPE_RECEIVE) +#define MACH_PORT_TYPE_PORT_OR_DEAD \ + (MACH_PORT_TYPE_PORT_RIGHTS|MACH_PORT_TYPE_DEAD_NAME) +#define MACH_PORT_TYPE_ALL_RIGHTS \ + (MACH_PORT_TYPE_PORT_OR_DEAD|MACH_PORT_TYPE_PORT_SET) + +/* Dummy type bits that mach_port_type/mach_port_names can return. */ + +#define MACH_PORT_TYPE_DNREQUEST 0x80000000 + +/* User-references for capabilities. */ + +typedef natural_t mach_port_urefs_t; +typedef integer_t mach_port_delta_t; /* change in urefs */ + +/* Attributes of ports. (See mach_port_get_receive_status.) */ + +typedef natural_t mach_port_seqno_t; /* sequence number */ +typedef natural_t mach_port_mscount_t; /* make-send count */ +typedef natural_t mach_port_msgcount_t; /* number of msgs */ +typedef natural_t mach_port_rights_t; /* number of rights */ + +/* + * A port may have NMS detection enabled, in which case + * it tracks outstanding send rights. Otherwise, there + * is no information available about outstanding srights. + * The return values are deliberately chosen to match + * the old boolean (0=FALSE=no srights, 1=TRUE=srights, + * 2=xxx=no information available). + */ +#define MACH_PORT_SRIGHTS_NONE 0 /* NMS: no srights */ +#define MACH_PORT_SRIGHTS_PRESENT 1 /* NMS: srights */ +#define MACH_PORT_SRIGHTS_NO_INFO 2 /* no NMS */ +typedef unsigned int mach_port_srights_t; /* status of send rights */ + +typedef struct mach_port_status { + mach_port_name_t mps_pset; /* containing port set */ + mach_port_seqno_t mps_seqno; /* sequence number */ + mach_port_mscount_t mps_mscount; /* make-send count */ + mach_port_msgcount_t mps_qlimit; /* queue limit */ + mach_port_msgcount_t mps_msgcount; /* number in the queue */ + mach_port_rights_t mps_sorights; /* how many send-once rights */ + boolean_t mps_srights; /* do send rights exist? */ + boolean_t mps_pdrequest; /* port-deleted requested? */ + boolean_t mps_nsrequest; /* no-senders requested? */ + unsigned int mps_flags; /* port flags */ +} mach_port_status_t; + +#define MACH_PORT_QLIMIT_DEFAULT ((mach_port_msgcount_t) 5) +#define MACH_PORT_QLIMIT_MAX ((mach_port_msgcount_t) 16) + +typedef struct mach_port_limits { + mach_port_msgcount_t mpl_qlimit; /* number of msgs */ +} mach_port_limits_t; + +typedef integer_t *mach_port_info_t; /* varying array of natural_t */ + +/* Flavors for mach_port_get/set_attributes() */ +typedef int mach_port_flavor_t; +#define MACH_PORT_LIMITS_INFO 1 /* uses mach_port_status_t */ +#define MACH_PORT_RECEIVE_STATUS 2 /* uses mach_port_limits_t */ +#define MACH_PORT_DNREQUESTS_SIZE 3 /* info is int */ + +#define MACH_PORT_LIMITS_INFO_COUNT \ + (sizeof(mach_port_limits_t)/sizeof(natural_t)) +#define MACH_PORT_RECEIVE_STATUS_COUNT \ + (sizeof(mach_port_status_t)/sizeof(natural_t)) +#define MACH_PORT_DNREQUESTS_SIZE_COUNT 1 + +/* + * Structure used to pass information about port allocation requests. + * Must be padded to 64-bits total length. + */ + +typedef struct mach_port_qos { + boolean_t name:1; /* name given */ + boolean_t prealloc:1; /* prealloced message */ + boolean_t pad1:30; + natural_t len; +} mach_port_qos_t; + +#endif /* _MACH_PORT_H_ */ diff --git a/osfmk/mach/ppc/Makefile b/osfmk/mach/ppc/Makefile new file mode 100644 index 000000000..2479b84ca --- /dev/null +++ b/osfmk/mach/ppc/Makefile @@ -0,0 +1,35 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +VPATH+=$(SOURCE)/../../ppc: + +DATAFILES = \ + boolean.h exception.h kern_return.h ndr_def.h \ + processor_info.h rpc.h thread_state.h thread_status.h \ + vm_param.h vm_types.h machine_types.defs \ + syscall_sw.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_GEN_LIST = \ + asm.h + +INSTALL_MD_DIR = mach/ppc + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_GEN_LIST = \ + asm.h + +EXPORT_MD_DIR = mach/ppc + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/mach/ppc/boolean.h b/osfmk/mach/ppc/boolean.h new file mode 100644 index 000000000..8e5a91443 --- /dev/null +++ b/osfmk/mach/ppc/boolean.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.1 1996/12/09 16:50:03 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 10:50:49 stephen] + * + * Revision 1.1.6.1 1996/04/11 11:19:44 emcmanus + * Copied from mainline.ppc. + * [1996/04/10 16:56:37 emcmanus] + * + * Revision 1.1.4.1 1995/11/23 17:36:42 stephen + * first powerpc checkin to mainline.ppc + * [1995/11/23 16:44:33 stephen] + * + * Revision 1.1.2.1 1995/08/25 06:49:32 stephen + * Initial checkin of files for PowerPC port + * [1995/08/23 16:27:03 stephen] + * + * Initial checkin of files for PowerPC port + * [1995/08/23 15:03:41 stephen] + * + * Revision 1.2.6.1 1994/09/23 02:36:44 ezf + * change marker to not FREE + * [1994/09/22 21:39:49 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:40:19 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:03 jeffc] + * + * Revision 1.2 1993/04/19 16:33:37 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:53 david] + * + * Revision 1.1 1992/09/30 02:30:40 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:51:56 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:32:04 mrt + * Changed to new Mach copyright + * [91/02/01 17:09:33 mrt] + * + * Revision 2.2 90/05/03 15:47:26 dbg + * First checkin. + * + * Revision 1.3 89/03/09 20:19:36 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:00:41 gm0w + * Changes for cleanup. + * + * 24-Sep-86 Michael Young (mwyoung) at Carnegie-Mellon University + * Created. + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: boolean.h + * + * Boolean type, for ppc. + */ + +#ifndef _MACH_PPC_BOOLEAN_H_ +#define _MACH_PPC_BOOLEAN_H_ + +typedef int boolean_t; + +#endif /* _MACH_PPC_BOOLEAN_H_ */ diff --git a/osfmk/mach/ppc/exception.h b/osfmk/mach/ppc/exception.h new file mode 100644 index 000000000..fbb1dadd4 --- /dev/null +++ b/osfmk/mach/ppc/exception.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Copyright (c) 1990, 1991, 1992, The University of Utah and + * the Center for Software Science at the University of Utah (CSS). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the Center + * for Software Science at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSS requests users of this software to return to css-dist@cs.utah.edu any + * improvements that they make and grant CSS redistribution rights. + * + * Utah $Hdr: $ + */ + +#ifndef _MACH_PPC_EXCEPTION_H_ +#define _MACH_PPC_EXCEPTION_H_ + + +/* STATUS - TODO - this is just transcribed from hp stuff */ + +#define EXC_TYPES_COUNT 10 /* incl. illegal exception 0 */ + +#define EXCEPTION_CODE_MAX 2 /* elements in vector (code+subcode) */ +/* + * EXC_BAD_INSTRUCTION + */ + +#define EXC_PPC_INVALID_SYSCALL 1 /* invalid syscall number */ +#define EXC_PPC_UNIPL_INST 2 /* unimplemented instruction */ +#define EXC_PPC_PRIVINST 3 /* priviledged instruction */ +#define EXC_PPC_PRIVREG 4 /* priviledged register */ +#define EXC_PPC_TRACE 5 /* trace/single-step */ + +/* + * EXC_BAD_ACCESS + * Note: do not conflict with kern_return_t values returned by vm_fault + */ + +#define EXC_PPC_VM_PROT_READ 0x101 /* error reading syscall args */ +#define EXC_PPC_BADSPACE 0x102 /* bad space referenced */ +#define EXC_PPC_UNALIGNED 0x103 /* unaligned data reference */ + +/* + * EXC_ARITHMETIC + */ + +#define EXC_PPC_OVERFLOW 1 /* integer overflow */ +#define EXC_PPC_ZERO_DIVIDE 2 /* integer divide by zero */ +#define EXC_PPC_FLT_INEXACT 3 /* IEEE inexact exception */ +#define EXC_PPC_FLT_ZERO_DIVIDE 4 /* IEEE zero divide */ +#define EXC_PPC_FLT_UNDERFLOW 5 /* IEEE floating underflow */ +#define EXC_PPC_FLT_OVERFLOW 6 /* IEEE floating overflow */ +#define EXC_PPC_FLT_NOT_A_NUMBER 7 /* IEEE not a number */ + +/* + * EXC_PPC_NOEMULATION should go away when we add software emulation + * for floating point. Right now we don't support this. + */ + +#define EXC_PPC_NOEMULATION 8 /* no floating point emulation */ +#define EXC_PPC_ALTIVECASSIST 9 /* Altivec Denorm Assist */ + +/* + * EXC_SOFTWARE + */ +#define EXC_PPC_TRAP 1 /* Program trap */ +#define EXC_PPC_MIGRATE 0x10100 /* Time to bolt */ + + +/* + * EXC_BREAKPOINT + */ + +#define EXC_PPC_BREAKPOINT EXC_PPC_TRAP /* breakpoint trap */ + +/* + * machine dependent exception masks + */ +#define EXC_MASK_MACHINE 0 + +#endif /* _MACH_PPC_EXCEPTION_H_ */ diff --git a/osfmk/mach/ppc/kern_return.h b/osfmk/mach/ppc/kern_return.h new file mode 100644 index 000000000..e14be8d22 --- /dev/null +++ b/osfmk/mach/ppc/kern_return.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.1 1996/12/09 16:50:07 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 10:50:59 stephen] + * + * Revision 1.1.6.1 1996/04/11 11:19:54 emcmanus + * Copied from mainline.ppc. + * [1996/04/10 16:56:46 emcmanus] + * + * Revision 1.1.4.1 1995/11/23 17:36:50 stephen + * first powerpc checkin to mainline.ppc + * [1995/11/23 16:44:51 stephen] + * + * Revision 1.1.2.1 1995/08/25 06:49:40 stephen + * Initial checkin of files for PowerPC port + * [1995/08/23 16:28:33 stephen] + * + * Initial checkin of files for PowerPC port + * [1995/08/23 15:04:02 stephen] + * + * Revision 1.2.6.1 1994/09/23 02:37:12 ezf + * change marker to not FREE + * [1994/09/22 21:40:01 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:40:35 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:15 jeffc] + * + * Revision 1.2 1993/04/19 16:33:58 devrcs + * ansi C conformance changes + * [1993/02/02 18:56:09 david] + * + * Revision 1.1 1992/09/30 02:30:47 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:52:15 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:32:12 mrt + * Changed to new Mach copyright + * [91/02/01 17:09:54 mrt] + * + * Revision 2.2 90/05/03 15:47:51 dbg + * First checkin. + * + * Revision 1.3 89/03/09 20:19:48 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:00:54 gm0w + * Changes for cleanup. + * + * 3-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University + * Allow inclusion in assembler input. + * + * 14-Oct-85 Michael Wayne Young (mwyoung) at Carnegie-Mellon University + * Created. + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: kern_return.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Machine-dependent kernel return definitions. + */ + +#ifndef _MACH_PPC_KERN_RETURN_H_ +#define _MACH_PPC_KERN_RETURN_H_ + +#ifndef ASSEMBLER +typedef int kern_return_t; +#endif /* ASSEMBLER */ +#endif /* _MACH_PPC_KERN_RETURN_H_ */ diff --git a/osfmk/mach/ppc/machine_types.defs b/osfmk/mach/ppc/machine_types.defs new file mode 100644 index 000000000..59c1c916e --- /dev/null +++ b/osfmk/mach/ppc/machine_types.defs @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.1 1996/12/09 16:50:08 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 10:51:04 stephen] + * + * Revision 1.1.6.1 1996/04/11 11:19:58 emcmanus + * Copied from mainline.ppc. + * [1996/04/10 16:56:50 emcmanus] + * + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/11/02 18:18:29 dwm] + * + * Revision 1.1.4.1 1995/11/23 17:36:53 stephen + * first powerpc checkin to mainline.ppc + * [1995/11/23 16:45:00 stephen] + * + * Revision 1.1.2.1 1995/08/25 06:49:44 stephen + * Initial checkin of files for PowerPC port + * [1995/08/23 15:04:13 stephen] + * + * Revision 1.1.2.1 1994/05/06 18:54:50 tmt + * New Mig definition file to describe required machine types. + * [1994/05/05 21:08:57 tmt] + * + * $EndLog$ + */ +/* + * Header file for basic, machine-dependent data types. i386 version. + */ + +#ifndef _MACHINE_VM_TYPES_DEFS_ +#define _MACHINE_VM_TYPES_DEFS_ 1 + +/* + * A natural_t is the type for the native + * integer type, e.g. 32 or 64 or.. whatever + * register size the machine has. Unsigned, it is + * used for entities that might be either + * unsigned integers or pointers, and for + * type-casting between the two. + * For instance, the IPC system represents + * a port in user space as an integer and + * in kernel space as a pointer. + */ +type natural_t = unsigned32; + +/* + * An integer_t is the signed counterpart + * of the natural_t type. Both types are + * only supposed to be used to define + * other types in a machine-independent + * way. + */ +type integer_t = int32; + + +#if MACH_IPC_COMPAT +/* + * For the old IPC interface + */ +#define MSG_TYPE_PORT_NAME MACH_MSG_TYPE_INTEGER_32 + +#endif /* MACH_IPC_COMPAT */ + + +#endif /* _MACHINE_VM_TYPES_DEFS_ */ diff --git a/osfmk/mach/ppc/ndr_def.h b/osfmk/mach/ppc/ndr_def.h new file mode 100644 index 000000000..40e82519c --- /dev/null +++ b/osfmk/mach/ppc/ndr_def.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:02 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.1 1996/12/09 16:55:41 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 11:10:10 stephen] + * + * Revision 1.1.6.1 1996/04/11 09:10:43 emcmanus + * Copied from mainline.ppc. + * [1996/04/10 17:09:22 emcmanus] + * + * Revision 1.1.4.1 1995/11/23 17:39:22 stephen + * first powerpc checkin to mainline.ppc + * [1995/11/23 16:53:16 stephen] + * + * Revision 1.1.2.1 1995/08/25 06:35:32 stephen + * Initial checkin of files for PowerPC port + * [1995/08/23 15:13:31 stephen] + * + * $EndLog$ + */ + +#include + +NDR_record_t NDR_record = { + 0, /* mig_reserved */ + 0, /* mig_reserved */ + 0, /* mig_reserved */ + NDR_PROTOCOL_2_0, + NDR_INT_BIG_ENDIAN, + NDR_CHAR_ASCII, + NDR_FLOAT_IEEE, + 0, +}; diff --git a/osfmk/mach/ppc/processor_info.h b/osfmk/mach/ppc/processor_info.h new file mode 100644 index 000000000..15219f087 --- /dev/null +++ b/osfmk/mach/ppc/processor_info.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: mach/ppc/processor_info.h + * + * Data structure definitions for ppc specific processor control + */ + + +#ifndef _MACH_PPC_PROCESSOR_INFO_H_ +#define _MACH_PPC_PROCESSOR_INFO_H_ + +#include + +/* processor_control command operations */ +#define PROCESSOR_PM_SET_REGS 1 /* Set Performance Monitor Registers */ +#define PROCESSOR_PM_SET_MMCR 2 /* Set Monitor Mode Controls Registers */ +#define PROCESSOR_PM_CLR_PMC 3 /* Clear Performance Monitor Counter Registers */ + +/* + * Performance Monitor Register structures + */ + +typedef union { + unsigned int word; + struct { + unsigned int dis : 1; + unsigned int dp : 1; + unsigned int du : 1; + unsigned int dms : 1; + unsigned int dmr : 1; + unsigned int reserved3 : 1; /* enint */ + unsigned int reserved4 : 1; /* discount */ + unsigned int reserved5 : 2; /* rtcselect */ + unsigned int reserved6 : 1; /* intonbittrans */ + unsigned int threshold : 6; + unsigned int reserved7 : 1; /* pmc1intcontrol */ + unsigned int reserved8 : 1; /* pmcintcontrol */ + unsigned int reserved9 : 1; /* pmctrigger */ + unsigned int pmc1select : 7; + unsigned int pmc2select : 6; + }bits; +}mmcr0_t; + +typedef union { + unsigned int word; + struct { + unsigned int pmc3select : 5; + unsigned int pmc4select : 5; + unsigned int reserved : 22; + }bits; +}mmcr1_t; + +typedef union { + unsigned int word; + struct { + unsigned int threshmult : 1; + unsigned int reserved : 31; + }bits; +}mmcr2_t; + +typedef union { + unsigned int word; + struct { + unsigned int ov : 1; /* overflow value */ + unsigned int cv : 31; /* countervalue */ + }bits; +}pmcn_t; + + + +/* Processor Performance Monitor Registers definitions */ + +struct processor_pm_regs { + union { + mmcr0_t mmcr0; + mmcr1_t mmcr1; + mmcr2_t mmcr2; + }u; + pmcn_t pmc[2]; +}; + +typedef struct processor_pm_regs processor_pm_regs_data_t; +typedef struct processor_pm_regs *processor_pm_regs_t; +#define PROCESSOR_PM_REGS_COUNT \ + (sizeof(processor_pm_regs_data_t) / sizeof (unsigned int)) + +#define PROCESSOR_PM_REGS_COUNT_POWERPC_604 \ + (PROCESSOR_PM_REGS_COUNT * 1 ) + +#define PROCESSOR_PM_REGS_COUNT_POWERPC_604e \ + (PROCESSOR_PM_REGS_COUNT * 2 ) + +#define PROCESSOR_PM_REGS_COUNT_POWERPC_750 \ + (PROCESSOR_PM_REGS_COUNT * 2 ) + +#define PROCESSOR_PM_REGS_COUNT_POWERPC_7400 \ + (PROCESSOR_PM_REGS_COUNT * 3 ) + +typedef unsigned int processor_temperature_data_t; +typedef unsigned int *processor_temperature_t; + +#define PROCESSOR_TEMPERATURE_COUNT 1 + +union processor_control_data { + processor_pm_regs_data_t cmd_pm_regs[3]; +}; + +struct processor_control_cmd { + integer_t cmd_op; + cpu_type_t cmd_cpu_type; + cpu_subtype_t cmd_cpu_subtype; + union processor_control_data u; +}; + +typedef struct processor_control_cmd processor_control_cmd_data_t; +typedef struct processor_control_cmd *processor_control_cmd_t; +#define cmd_pm_regs u.cmd_pm_regs; +#define cmd_pm_ctls u.cmd_pm_ctls; + +#define PROCESSOR_CONTROL_CMD_COUNT \ + (((sizeof(processor_control_cmd_data_t)) - \ + (sizeof(union processor_control_data))) / sizeof (integer_t)) + + /* x should be a processor_pm_regs_t */ +#define PERFMON_MMCR0(x) ((x)[0].u.mmcr0.word) +#define PERFMON_PMC1(x) ((x)[0].pmc[0].word) +#define PERFMON_PMC2(x) ((x)[0].pmc[1].word) +#define PERFMON_MMCR1(x) ((x)[1].u.mmcr1.word) +#define PERFMON_PMC3(x) ((x)[1].pmc[0].word) +#define PERFMON_PMC4(x) ((x)[1].pmc[1].word) +#define PERFMON_MMCR2(x) ((x)[2].u.mmcr2.word) + +#define PERFMON_DIS(x) ((x)[0].u.mmcr0.bits.dis) +#define PERFMON_DP(x) ((x)[0].u.mmcr0.bits.dp) +#define PERFMON_DU(x) ((x)[0].u.mmcr0.bits.du) +#define PERFMON_DMS(x) ((x)[0].u.mmcr0.bits.dms) +#define PERFMON_DMR(x) ((x)[0].u.mmcr0.bits.dmr) +#define PERFMON_THRESHOLD(x) ((x)[0].u.mmcr0.bits.threshold) +#define PERFMON_PMC1SELECT(x) ((x)[0].u.mmcr0.bits.pmc1select) +#define PERFMON_PMC2SELECT(x) ((x)[0].u.mmcr0.bits.pmc2select) +#define PERFMON_PMC3SELECT(x) ((x)[1].u.mmcr1.bits.pmc3select) +#define PERFMON_PMC4SELECT(x) ((x)[1].u.mmcr1.bits.pmc4select) +#define PERFMON_THRESHMULT(x) ((x)[2].u.mmcr2.bits.threshmult) +#define PERFMON_PMC1_CV(x) ((x)[0].u.pmc[0].bits.cv) +#define PERFMON_PMC2_CV(x) ((x)[0].u.pmc[1].bits.cv) +#define PERFMON_PMC3_CV(x) ((x)[1].u.pmc[0].bits.cv) +#define PERFMON_PMC4_CV(x) ((x)[1].u.pmc[1].bits.cv) + +#endif /* _MACH_PPC_PROCESSOR_INFO_H_ */ + diff --git a/osfmk/mach/ppc/rpc.h b/osfmk/mach/ppc/rpc.h new file mode 100644 index 000000000..f0283ec31 --- /dev/null +++ b/osfmk/mach/ppc/rpc.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +#ifndef _MACH_PPC_RPC_H_ +#define _MACH_PPC_RPC_H_ + +#include + +/* + * Just temporary until all vestiges of short-curcuiting can be removed. + */ +#define CAN_SHCIRCUIT(name) (0) + +/* + * Note, these don't quite work for PowerPC, because there are different + * ABIs that lay the parameters out some in registers and some in memory + * with slightly different results. We need to change MIG to assure a + * consistent layout regardless of ABI. + */ +#define MACH_RPC_ARGV(act) (char*)(USER_REGS(act)->r3) +#define MACH_RPC_RET(act) ( USER_REGS(act)->lr ) +#define MACH_RPC_UIP(act) ( USER_REGS(act)->srr0 ) +#define MACH_RPC_USP(act) ( USER_REGS(act)->r1 ) +/* FIXME!! */ +#define MACH_RPC_FUNC(act) ( USER_REGS(act)->r2 ) +#define MACH_RPC_SIG(act) ( USER_REGS(act)->r2 ) + +#endif /* _MACH_PPC_RPC_H_ */ + + + + + + + + + + + diff --git a/osfmk/mach/ppc/syscall_sw.h b/osfmk/mach/ppc/syscall_sw.h new file mode 100644 index 000000000..d6c7a7234 --- /dev/null +++ b/osfmk/mach/ppc/syscall_sw.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _MACH_PPC_SYSCALL_SW_H_ +#define _MACH_PPC_SYSCALL_SW_H_ + +#include + +#define kernel_trap(trap_name,trap_number,number_args) \ +ENTRY(trap_name, TAG_NO_FRAME_USED) @\ + li r0, trap_number @\ + sc @\ + blr + +#define rpc_trap(trap_name,trap_number,number_args) \ +ENTRY(trap_name, TAG_NO_FRAME_USED) @\ + li r0, trap_number @\ + sc @\ + blr + + /* CHECKME! What is this supposed to do? */ +#define rpc_return_trap(trap_name,trap_number,number_args) \ +ENTRY(trap_name, TAG_NO_FRAME_USED) @\ + li r0, trap_number @\ + sc @\ + blr + +#define ppc_trap(trap_name,trap_number) \ +ENTRY(trap_name, TAG_NO_FRAME_USED) @\ + li r0, trap_number @\ + sc @\ + blr + +/* + * Put any definitions for PPC-only system calls in here (only if + * this file is being included from the one that instantiates the + * mach system calls). + * + * Note: PPC-only system calls are in the 0x6000 to 0x6FFF range + */ +#ifdef _MACH_SYSCALL_SW_H_ +ppc_trap(diagCall,0x6000) +ppc_trap(vmm_get_version,0x6001) +ppc_trap(vmm_get_features,0x6002) +ppc_trap(vmm_init_context,0x6003) +ppc_trap(vmm_dispatch,0x6004) +ppc_trap(bb_enable_bluebox,0x6005) +ppc_trap(bb_disable_bluebox,0x6006) +ppc_trap(bb_settaskenv,0x6007) +#endif /* _MACH_SYSCALL_SW_H_ */ + +#endif /* _MACH_PPC_SYSCALL_SW_H_ */ diff --git a/osfmk/mach/ppc/thread_state.h b/osfmk/mach/ppc/thread_state.h new file mode 100644 index 000000000..515d0d730 --- /dev/null +++ b/osfmk/mach/ppc/thread_state.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#ifndef _MACH_PPC_THREAD_STATE_H_ +#define _MACH_PPC_THREAD_STATE_H_ + +/* Size of maximum exported thread state in words */ +#define PPC_THREAD_STATE_MAX (144) /* Size of biggest state possible */ + +#if defined (__ppc__) +#define THREAD_STATE_MAX PPC_THREAD_STATE_MAX +#endif + +#endif /* _MACH_PPC_THREAD_STATE_H_ */ diff --git a/osfmk/mach/ppc/thread_status.h b/osfmk/mach/ppc/thread_status.h new file mode 100644 index 000000000..6dd8ba95d --- /dev/null +++ b/osfmk/mach/ppc/thread_status.h @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _MACH_PPC_THREAD_STATUS_H_ +#define _MACH_PPC_THREAD_STATUS_H_ + +/* + * ppc_thread_state is the structure that is exported to user threads for + * use in status/mutate calls. This structure should never change. + * + */ + +#define PPC_THREAD_STATE 1 +#define PPC_FLOAT_STATE 2 +#define PPC_EXCEPTION_STATE 3 +#define PPC_VECTOR_STATE 4 +#define THREAD_STATE_NONE 7 + +/* + * VALID_THREAD_STATE_FLAVOR is a platform specific macro that when passed + * an exception flavor will return whether that is a defined flavor for + * that platform. + * The macro must be manually updated to include all of the valid exception + * flavors as defined above. + */ +#define VALID_THREAD_STATE_FLAVOR(x) \ + ((x == PPC_THREAD_STATE) || \ + (x == PPC_FLOAT_STATE) || \ + (x == PPC_EXCEPTION_STATE) || \ + (x == PPC_VECTOR_STATE) || \ + (x == THREAD_STATE_NONE)) + +typedef struct ppc_thread_state { + unsigned int srr0; /* Instruction address register (PC) */ + unsigned int srr1; /* Machine state register (supervisor) */ + unsigned int r0; + unsigned int r1; + unsigned int r2; + unsigned int r3; + unsigned int r4; + unsigned int r5; + unsigned int r6; + unsigned int r7; + unsigned int r8; + unsigned int r9; + unsigned int r10; + unsigned int r11; + unsigned int r12; + unsigned int r13; + unsigned int r14; + unsigned int r15; + unsigned int r16; + unsigned int r17; + unsigned int r18; + unsigned int r19; + unsigned int r20; + unsigned int r21; + unsigned int r22; + unsigned int r23; + unsigned int r24; + unsigned int r25; + unsigned int r26; + unsigned int r27; + unsigned int r28; + unsigned int r29; + unsigned int r30; + unsigned int r31; + + unsigned int cr; /* Condition register */ + unsigned int xer; /* User's integer exception register */ + unsigned int lr; /* Link register */ + unsigned int ctr; /* Count register */ + unsigned int mq; /* MQ register (601 only) */ + + unsigned int vrsave; /* Vector Save Register */ +} ppc_thread_state_t; + +/* This structure should be double-word aligned for performance */ + +typedef struct ppc_float_state { + double fpregs[32]; + + unsigned int fpscr_pad; /* fpscr is 64 bits, 32 bits of rubbish */ + unsigned int fpscr; /* floating point status register */ +} ppc_float_state_t; + +typedef struct ppc_vector_state { + unsigned long save_vr[32][4]; + unsigned long save_vscr[4]; + unsigned int save_pad5[4]; + unsigned int save_vrvalid; /* VRs that have been saved */ + unsigned int save_pad6[7]; +} ppc_vector_state_t; + +/* + * saved state structure + * + * This structure corresponds to the state of the user registers as saved + * on the stack upon kernel entry (saved in pcb). On interrupts and exceptions + * we save all registers. On system calls we only save the registers not + * saved by the caller. + * + */ + +typedef struct ppc_saved_state { + unsigned int srr0; /* Instruction address register (PC) */ + unsigned int srr1; /* Machine state register (supervisor) */ + unsigned int r0; + unsigned int r1; + unsigned int r2; + unsigned int r3; + unsigned int r4; + unsigned int r5; + unsigned int r6; + unsigned int r7; + unsigned int r8; + unsigned int r9; + unsigned int r10; + unsigned int r11; + unsigned int r12; + unsigned int r13; + unsigned int r14; + unsigned int r15; + unsigned int r16; + unsigned int r17; + unsigned int r18; + unsigned int r19; + unsigned int r20; + unsigned int r21; + unsigned int r22; + unsigned int r23; + unsigned int r24; + unsigned int r25; + unsigned int r26; + unsigned int r27; + unsigned int r28; + unsigned int r29; + unsigned int r30; + unsigned int r31; + + unsigned int cr; /* Condition register */ + unsigned int xer; /* User's integer exception register */ + unsigned int lr; /* Link register */ + unsigned int ctr; /* Count register */ + unsigned int mq; /* MQ register (601 only) */ + unsigned int vrsave; /* Vector Register Save */ + +/* These are extra. Remove them from the count */ + + unsigned int sr_copyin; /* SR_COPYIN is used for remapping */ + unsigned int pad2[7]; /* struct alignment */ +} ppc_saved_state_t; + +/* + * ppc_exception_state + * + * This structure corresponds to some additional state of the user + * registers as saved in the PCB upon kernel entry. They are only + * available if an exception is passed out of the kernel, and even + * then not all are guaranteed to be updated. + * + * Some padding is included in this structure which allows space for + * servers to store temporary values if need be, to maintain binary + * compatiblity. + */ + +typedef struct ppc_exception_state { + unsigned long dar; /* Fault registers for coredump */ + unsigned long dsisr; + unsigned long exception;/* number of powerpc exception taken */ + unsigned long pad0; /* align to 16 bytes */ + + unsigned long pad1[4]; /* space in PCB "just in case" */ +} ppc_exception_state_t; + +/* + * Save State Flags + */ + +#define PPC_THREAD_STATE_COUNT \ + (sizeof(struct ppc_thread_state) / sizeof(int)) + +#define PPC_EXCEPTION_STATE_COUNT \ + (sizeof(struct ppc_exception_state) / sizeof(int)) + +#define PPC_FLOAT_STATE_COUNT \ + (sizeof(struct ppc_float_state) / sizeof(int)) + +#define PPC_VECTOR_STATE_COUNT \ + (sizeof(struct ppc_vector_state) / sizeof(int)) + +/* + * Machine-independent way for servers and Mach's exception mechanism to + * choose the most efficient state flavor for exception RPC's: + */ +#define MACHINE_THREAD_STATE PPC_THREAD_STATE +#define MACHINE_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT + +/* + * Largest state on this machine: + */ +#define THREAD_MACHINE_STATE_MAX PPC_VECTOR_STATE_COUNT + +#endif /* _MACH_PPC_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/ppc/vm_param.h b/osfmk/mach/ppc/vm_param.h new file mode 100644 index 000000000..459bbfd24 --- /dev/null +++ b/osfmk/mach/ppc/vm_param.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#ifndef _MACH_PPC_VM_PARAM_H_ +#define _MACH_PPC_VM_PARAM_H_ + +#define BYTE_SIZE 8 /* byte size in bits */ + +#define PPC_PGBYTES 4096 /* bytes per ppc page */ +#define PPC_PGSHIFT 12 /* number of bits to shift for pages */ + +#define VM_MIN_ADDRESS ((vm_offset_t) 0) +#define VM_MAX_ADDRESS ((vm_offset_t) 0xfffff000U) + +#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0x00001000) + +/* We map the kernel using only SR0,SR1,SR2,SR3 leaving segments alone */ +#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0x3fffffff) + +#define USER_STACK_END ((vm_offset_t) 0xffff0000U) + +#define ppc_round_page(x) ((((unsigned)(x)) + PPC_PGBYTES - 1) & \ + ~(PPC_PGBYTES-1)) +#define ppc_trunc_page(x) (((unsigned)(x)) & ~(PPC_PGBYTES-1)) + + +#define KERNEL_STACK_SIZE (4 * PPC_PGBYTES) +#define INTSTACK_SIZE (5 * PPC_PGBYTES) + +#endif /* _PPC_VM_PARAM_H_ */ diff --git a/osfmk/mach/ppc/vm_types.h b/osfmk/mach/ppc/vm_types.h new file mode 100644 index 000000000..855c1da16 --- /dev/null +++ b/osfmk/mach/ppc/vm_types.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.1 1996/12/09 16:50:19 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 10:51:34 stephen] + * + * Revision 1.1.6.1 1996/04/11 11:20:26 emcmanus + * Copied from mainline.ppc. + * [1996/04/10 16:57:16 emcmanus] + * + * Revision 1.1.4.1 1995/11/23 17:37:13 stephen + * first powerpc checkin to mainline.ppc + * [1995/11/23 16:45:52 stephen] + * + * Revision 1.1.2.1 1995/08/25 06:50:05 stephen + * Initial checkin of files for PowerPC port + * [1995/08/23 15:05:11 stephen] + * + * Revision 1.2.8.2 1995/01/06 19:50:48 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:42:42 dwm] + * + * Revision 1.2.8.1 1994/09/23 02:38:01 ezf + * change marker to not FREE + * [1994/09/22 21:40:30 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:41:01 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:16:38 jeffc] + * + * Revision 1.2 1993/04/19 16:34:37 devrcs + * ansi C conformance changes + * [1993/02/02 18:56:34 david] + * + * Revision 1.1 1992/09/30 02:30:57 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 16:53:00 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:32:34 mrt + * Changed to new Mach copyright + * [91/02/01 17:10:49 mrt] + * + * Revision 2.2 90/05/03 15:48:32 dbg + * First checkin. + * + * Revision 1.3 89/03/09 20:20:12 rpd + * More cleanup. + * + * Revision 1.2 89/02/26 13:01:20 gm0w + * Changes for cleanup. + * + * 31-Dec-88 Robert Baron (rvb) at Carnegie-Mellon University + * Derived from MACH2.0 vax release. + * + * 23-Apr-87 Michael Young (mwyoung) at Carnegie-Mellon University + * Changed things to "unsigned int" to appease the user community :-). + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: vm_types.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * Header file for VM data types. I386 version. + */ + +#ifndef _MACH_PPC_VM_TYPES_H_ +#define _MACH_PPC_VM_TYPES_H_ + +#ifndef ASSEMBLER + +/* + * A natural_t is the type for the native + * integer type, e.g. 32 or 64 or.. whatever + * register size the machine has. Unsigned, it is + * used for entities that might be either + * unsigned integers or pointers, and for + * type-casting between the two. + * For instance, the IPC system represents + * a port in user space as an integer and + * in kernel space as a pointer. + */ +typedef unsigned int natural_t; + +/* + * An integer_t is the signed counterpart + * of the natural_t type. Both types are + * only supposed to be used to define + * other types in a machine-independent + * way. + */ +typedef int integer_t; + +#ifdef MACH_KERNEL_PRIVATE +/* + * An int32 is an integer that is at least 32 bits wide + */ +typedef int int32; +typedef unsigned int uint32; +#endif + +/* + * A vm_offset_t is a type-neutral pointer, + * e.g. an offset into a virtual memory space. + */ +typedef natural_t vm_offset_t; + +/* + * A vm_size_t is the proper type for e.g. + * expressing the difference between two + * vm_offset_t entities. + */ +typedef natural_t vm_size_t; +typedef unsigned long long vm_double_size_t; + +/* + * space_t is used in the pmap system + */ +typedef unsigned int space_t; + +#endif /* ndef ASSEMBLER */ + +/* + * If composing messages by hand (please dont) + */ + +#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32 + +#endif /* _MACH_PPC_VM_TYPES_H_ */ diff --git a/osfmk/mach/processor.defs b/osfmk/mach/processor.defs new file mode 100644 index 000000000..be0f2ae0e --- /dev/null +++ b/osfmk/mach/processor.defs @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_port.defs + * Author: Rich Draves + * + * Exported kernel calls. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + processor 3000; + +#include +#include + +/* + * References to processor objects are returned by: + * host_processors(host_priv_t,...); + * +/* + * Start processor. + */ +routine processor_start( + processor : processor_t); + +/* + * Exit processor -- may not be restartable. + */ + +routine processor_exit( + processor : processor_t); + +/* + * Return information about this processor. + */ +routine processor_info( + processor : processor_t; + flavor : processor_flavor_t; + out host : host_t; + out processor_info_out: processor_info_t, CountInOut); + + +/* + * Do something machine-dependent to processor. + */ +routine processor_control( + processor : processor_t; + processor_cmd : processor_info_t); + +/* + * JMM - Keep processor_set related stuff at the end because + * they likely will be removed. + */ + +/* + * Assign processor to processor set. + */ +routine processor_assign( + processor : processor_t; + new_set : processor_set_t; + wait : boolean_t); + +/* + * Get current assignment for processor. + */ +routine processor_get_assignment( + processor : processor_t; + out assigned_set : processor_set_name_t); diff --git a/osfmk/mach/processor_info.h b/osfmk/mach/processor_info.h new file mode 100644 index 000000000..17824334b --- /dev/null +++ b/osfmk/mach/processor_info.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: mach/processor_info.h + * Author: David L. Black + * Date: 1988 + * + * Data structure definitions for processor_info, processor_set_info + */ + +#ifndef _MACH_PROCESSOR_INFO_H_ +#define _MACH_PROCESSOR_INFO_H_ + +#include +#include + +/* + * Generic information structure to allow for expansion. + */ +typedef integer_t *processor_info_t; /* varying array of int. */ +typedef integer_t *processor_info_array_t; /* varying array of int */ + +#define PROCESSOR_INFO_MAX (1024) /* max array size */ +typedef integer_t processor_info_data_t[PROCESSOR_INFO_MAX]; + + +typedef integer_t *processor_set_info_t; /* varying array of int. */ + +#define PROCESSOR_SET_INFO_MAX (1024) /* max array size */ +typedef integer_t processor_set_info_data_t[PROCESSOR_SET_INFO_MAX]; + +/* + * Currently defined information. + */ +typedef int processor_flavor_t; +#define PROCESSOR_BASIC_INFO 1 /* basic information */ +#define PROCESSOR_CPU_LOAD_INFO 2 /* cpu load information */ +#define PROCESSOR_PM_REGS_INFO 0x10000001 /* performance monitor register info */ +#define PROCESSOR_TEMPERATURE 0x10000002 /* Processor core temperature */ + +struct processor_basic_info { + cpu_type_t cpu_type; /* type of cpu */ + cpu_subtype_t cpu_subtype; /* subtype of cpu */ + boolean_t running; /* is processor running */ + int slot_num; /* slot number */ + boolean_t is_master; /* is this the master processor */ +}; + +typedef struct processor_basic_info processor_basic_info_data_t; +typedef struct processor_basic_info *processor_basic_info_t; +#define PROCESSOR_BASIC_INFO_COUNT \ + (sizeof(processor_basic_info_data_t)/sizeof(natural_t)) + +struct processor_cpu_load_info { /* number of ticks while running... */ + unsigned long cpu_ticks[CPU_STATE_MAX]; /* ... in the given mode */ +}; + +typedef struct processor_cpu_load_info processor_cpu_load_info_data_t; +typedef struct processor_cpu_load_info *processor_cpu_load_info_t; +#define PROCESSOR_CPU_LOAD_INFO_COUNT \ + (sizeof(processor_cpu_load_info_data_t)/sizeof(natural_t)) + +/* + * Scaling factor for load_average, mach_factor. + */ +#define LOAD_SCALE 1000 + +typedef int processor_set_flavor_t; +#define PROCESSOR_SET_BASIC_INFO 5 /* basic information */ + +struct processor_set_basic_info { + int processor_count; /* How many processors */ + int default_policy; /* When others not enabled */ +}; + +typedef struct processor_set_basic_info processor_set_basic_info_data_t; +typedef struct processor_set_basic_info *processor_set_basic_info_t; +#define PROCESSOR_SET_BASIC_INFO_COUNT \ + (sizeof(processor_set_basic_info_data_t)/sizeof(natural_t)) + +#define PROCESSOR_SET_LOAD_INFO 4 /* scheduling statistics */ + +struct processor_set_load_info { + int task_count; /* How many tasks */ + int thread_count; /* How many threads */ + integer_t load_average; /* Scaled */ + integer_t mach_factor; /* Scaled */ +}; + +typedef struct processor_set_load_info processor_set_load_info_data_t; +typedef struct processor_set_load_info *processor_set_load_info_t; +#define PROCESSOR_SET_LOAD_INFO_COUNT \ + (sizeof(processor_set_load_info_data_t)/sizeof(natural_t)) + + +/* + * New scheduling control interface + */ +#define PROCESSOR_SET_ENABLED_POLICIES 3 +#define PROCESSOR_SET_ENABLED_POLICIES_COUNT \ + (sizeof(policy_t)/sizeof(natural_t)) + +#define PROCESSOR_SET_TIMESHARE_DEFAULT 10 +#define PROCESSOR_SET_TIMESHARE_LIMITS 11 + +#define PROCESSOR_SET_RR_DEFAULT 20 +#define PROCESSOR_SET_RR_LIMITS 21 + +#define PROCESSOR_SET_FIFO_DEFAULT 30 +#define PROCESSOR_SET_FIFO_LIMITS 31 + +#endif /* _MACH_PROCESSOR_INFO_H_ */ diff --git a/osfmk/mach/processor_set.defs b/osfmk/mach/processor_set.defs new file mode 100644 index 000000000..8b01834b5 --- /dev/null +++ b/osfmk/mach/processor_set.defs @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_port.defs + * Author: Rich Draves + * + * Exported kernel calls. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + processor_set 4000; + +#include +#include + +/* + * Return scheduling statistics for a processor set. + */ +routine processor_set_statistics( + pset : processor_set_name_t; + flavor : processor_set_flavor_t; + out info_out : processor_set_info_t, CountInOut); + + +/* + * Destroy processor set. + */ +routine processor_set_destroy( + set : processor_set_t); + + +/* + * Set max priority for processor_set. + */ +routine processor_set_max_priority( + processor_set : processor_set_t; + max_priority : int; + change_threads : boolean_t); + +/* + * Enable policy for processor set + */ +routine processor_set_policy_enable( + processor_set : processor_set_t; + policy : int); + +/* + * Disable policy for processor set + */ +routine processor_set_policy_disable( + processor_set : processor_set_t; + policy : int; + change_threads : boolean_t); + +/* + * List all tasks in processor set. + */ +routine processor_set_tasks( + processor_set : processor_set_t; + out task_list : task_array_t); + +/* + * List all threads in processor set. + */ +routine processor_set_threads( + processor_set : processor_set_t; + out thread_list : thread_act_array_t); + +/* + * Controls the scheduling attributes governing the processor set. + * Allows control of enabled policies, and per-policy base and limit + * priorities. + */ +routine processor_set_policy_control( + pset : processor_set_t; + flavor : processor_set_flavor_t; + policy_info : processor_set_info_t; + change : boolean_t); + + +/* + * Debug Info + * This call is only valid on MACH_DEBUG kernels. + * Otherwise, KERN_FAILURE is returned. + */ +routine processor_set_stack_usage( + pset : processor_set_t; + out total : unsigned; + out space : vm_size_t; + out resident : vm_size_t; + out maxusage : vm_size_t; + out maxstack : vm_offset_t); + +/* + * Get information about processor set. + */ +routine processor_set_info( + set_name : processor_set_name_t; + flavor : int; + out host : host_t; + out info_out : processor_set_info_t, CountInOut); + diff --git a/osfmk/mach/prof.defs b/osfmk/mach/prof.defs new file mode 100644 index 000000000..9c140a6b8 --- /dev/null +++ b/osfmk/mach/prof.defs @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.12.3 1996/01/09 19:22:20 devrcs + * Change notices: msgoption options from int to mach_msg_options_t. + * [1995/12/01 19:49:49 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:09:30 jfraser] + * + * Revision 1.2.12.2 1995/01/06 19:51:41 devrcs + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.2.4 1994/03/24 21:23:54 hidden + * added send_notices to support DCI profile/event tracing + * * End1.3merge + * [1994/11/02 18:32:19 dwm] + * + * Revision 1.2.12.1 1994/09/23 02:41:58 ezf + * change marker to not FREE + * [1994/09/22 21:42:31 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:43:11 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:01 jeffc] + * + * Revision 1.2 1993/04/19 16:38:55 devrcs + * [0014] Change subsystem number as current shared with exc.defs reply messages. + * [1992/12/23 13:11:20 david] + * + * Revision 1.1 1992/09/30 02:23:01 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.1.2.1 92/09/15 17:25:42 jeffreyh + * Created + * [92/07/17 bernadat] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Abstract: + * MiG definitions file for Mach Profiling interface. + * receive_samples(); + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif KERNEL_USER + prof 2450; /* exc.defs from 2400 - 2449 */ + +#include +#include + +ServerPrefix receive_; +UserPrefix send_; + +type sample_array_t = array[*:1024] of unsigned; + +simpleroutine samples( + reply_port : mach_port_t; + samples : sample_array_t); + +simpleroutine notices( + reply_port : mach_port_t; + samples : sample_array_t; + msgoption options : mach_msg_options_t); + + + + diff --git a/osfmk/mach/prof_types.h b/osfmk/mach/prof_types.h new file mode 100644 index 000000000..e7096ddc4 --- /dev/null +++ b/osfmk/mach/prof_types.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.7.2 1995/01/26 22:15:46 ezf + * corrected CR + * [1995/01/26 21:16:02 ezf] + * + * Revision 1.2.3.2 1993/06/09 02:43:16 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:04 jeffc] + * + * Revision 1.2 1993/04/19 16:39:03 devrcs + * ansi C conformance changes + * [1993/02/02 18:54:26 david] + * + * Revision 1.1 1992/09/30 02:32:04 robert + * Initial revision + * + * $EndLog$ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_PROF_TYPES_H +#define _MACH_PROF_TYPES_H + +#define SAMPLE_MAX 256 /* Max array size */ +typedef unsigned sample_array_t[SAMPLE_MAX]; + +#endif /* _MACH_PROF_TYPES_H */ diff --git a/osfmk/mach/rpc.h b/osfmk/mach/rpc.h new file mode 100644 index 000000000..2794e4dd9 --- /dev/null +++ b/osfmk/mach/rpc.h @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach RPC Subsystem Interfaces + */ + +#ifndef _MACH_RPC_H_ +#define _MACH_RPC_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * The various bits of the type field of the routine_arg_descriptor + */ + +/* The basic types */ + +#define TYPE_SHIFT 0 +#define MACH_RPC_PORT (1 << TYPE_SHIFT) +#define MACH_RPC_ARRAY (1 << (TYPE_SHIFT + 1)) +#define MACH_RPC_VARIABLE (1 << (TYPE_SHIFT + 2)) +#define LAST_TYPE_BIT (TYPE_SHIFT+3) + +/* XXX Port arrays need not be variable arrays, as assumed below. Fixme. */ +#define MACH_RPC_ARRAY_FIX (MACH_RPC_ARRAY) +#define MACH_RPC_ARRAY_FIXED (MACH_RPC_ARRAY) +#define MACH_RPC_ARRAY_VAR (MACH_RPC_ARRAY | MACH_RPC_VARIABLE) +#define MACH_RPC_ARRAY_VARIABLE (MACH_RPC_ARRAY | MACH_RPC_VARIABLE) +#define MACH_RPC_PORT_ARRAY (MACH_RPC_PORT | MACH_RPC_ARRAY_VAR) + +/* Argument direction bits */ + +#define DIRECT_SHIFT LAST_TYPE_BIT +#define DIRECTION_SHIFT LAST_TYPE_BIT +#define MACH_RPC_IN (1 << DIRECTION_SHIFT) +#define MACH_RPC_OUT (1 << (DIRECTION_SHIFT + 1)) +#define LAST_DIRECT_BIT (DIRECTION_SHIFT + 2) +#define LAST_DIRECTION_BIT (DIRECTION_SHIFT + 2) + +#define MACH_RPC_INOUT (MACH_RPC_IN | MACH_RPC_OUT) + +/* Persist and pointer bit */ + +#define POINTER_SHIFT LAST_DIRECTION_BIT +#define MACH_RPC_POINTER (1 << POINTER_SHIFT) +#define LAST_POINTER_BIT (POINTER_SHIFT + 1) + +/* Port disposition bits */ + +#define NAME_SHIFT LAST_POINTER_BIT +#define MACH_RPC_RECEIVE (1 << NAME_SHIFT) +#define MACH_RPC_SEND (2 << NAME_SHIFT) +#define MACH_RPC_SEND_ONCE (3 << NAME_SHIFT) +#define LAST_NAME_BIT (NAME_SHIFT + 2) + +#define ACTION_SHIFT LAST_NAME_BIT +#define MACH_RPC_MOVE (1 << ACTION_SHIFT) +#define MACH_RPC_COPY (2 << ACTION_SHIFT) +#define MACH_RPC_MAKE (3 << ACTION_SHIFT) +#define LAST_ACTION_BIT (ACTION_SHIFT + 2) + +#define MACH_RPC_MOVE_RECEIVE (MACH_RPC_MOVE | MACH_RPC_RECEIVE) +#define MACH_RPC_MOVE_SEND (MACH_RPC_MOVE | MACH_RPC_SEND) +#define MACH_RPC_COPY_SEND (MACH_RPC_COPY | MACH_RPC_SEND) +#define MACH_RPC_MAKE_SEND (MACH_RPC_MAKE | MACH_RPC_SEND) +#define MACH_RPC_MOVE_SEND_ONCE (MACH_RPC_MOVE | MACH_RPC_SEND_ONCE) +#define MACH_RPC_MAKE_SEND_ONCE (MACH_RPC_MAKE | MACH_RPC_SEND_ONCE) + +/* Hint for virtual vs. physical copy */ + +#define OPTION_SHIFT LAST_ACTION_BIT +#define MACH_RPC_PHYSICAL_COPY (1 << OPTION_SHIFT) +#define MACH_RPC_VIRTUAL_COPY (1 << (OPTION_SHIFT + 1)) +#define LAST_OPTION_BIT (OPTION_SHIFT + 2) + +/* Deallocate? */ + +#define DEALLOCATE_SHIFT LAST_OPTION_BIT +#define MACH_RPC_DEALLOCATE (1 << DEALLOCATE_SHIFT) +#define LAST_DEALLOCATE_BIT (DEALLOCATE_SHIFT + 1) + +/* Argument is already on the stack */ + +#define ONSTACK_SHIFT LAST_DEALLOCATE_BIT +#define MACH_RPC_ONSTACK (1 << ONSTACK_SHIFT) +#define LAST_ONSTACK_BIT (ONSTACK_SHIFT + 1) + +/* Is variable array bounded? Derived from type and arg.size */ + +#define BOUND_SHIFT LAST_ONSTACK_BIT +#define MACH_RPC_BOUND (1 << BOUND_SHIFT) +#define MACH_RPC_UNBOUND (0) +#define BOUND MACH_RPC_BOUND +#define UNBND MACH_RPC_UNBOUND +#define LAST_BOUND_BIT (BOUND_SHIFT + 1) + +/* + * Basic mach rpc types. + */ +typedef unsigned int routine_arg_type; +typedef unsigned int routine_arg_offset; +typedef unsigned int routine_arg_size; + +/* + * Definitions for a signature's argument and routine descriptor's. + */ +struct routine_arg_descriptor { + routine_arg_type type; /* Port, Array, etc. */ + routine_arg_size size; /* element size in bytes */ + routine_arg_size count; /* number of elements */ + routine_arg_offset offset; /* Offset in list of routine args */ +}; +typedef struct routine_arg_descriptor *routine_arg_descriptor_t; + +struct routine_descriptor { + mig_impl_routine_t impl_routine; /* Server work func pointer */ + mig_stub_routine_t stub_routine; /* Unmarshalling func pointer */ + unsigned int argc; /* Number of argument words */ + unsigned int descr_count; /* Number of complex argument */ + /* descriptors */ + struct routine_arg_descriptor * + arg_descr; /* Pointer to beginning of */ + /* the arg_descr array */ + unsigned int max_reply_msg; /* Max size for reply msg */ +}; +typedef struct routine_descriptor *routine_descriptor_t; + +#define DESCR_SIZE(x) ((x)->descr_count * sizeof(struct routine_arg_descriptor)) + +struct rpc_signature { + struct routine_descriptor rd; + struct routine_arg_descriptor rad[1]; +}; + +#ifdef MACH_KERNEL_PRIVATE + +typedef struct rpc_signature *rpc_signature_t; + +#endif + +#define RPC_SIGBUF_SIZE 8 +/* + * A subsystem describes a set of server routines that can be invoked by + * mach_rpc() on the ports that are registered with the subsystem. For + * each routine, the routine number is given, along with the + * address of the implementation function in the server and a + * description of the arguments of the routine (it's "signature"). + * + * This structure definition is only a template for what is really a + * variable-length structure (generated by MIG for each subsystem). + * The actual structures do not always have one entry in the routine + * array, and also have a varying number of entries in the arg_descr + * array. Each routine has an array of zero or more arg descriptors + * one for each complex arg. These arrays are all catenated together + * to form the arg_descr field of the subsystem struct. The + * arg_descr field of each routine entry points to a unique sub-sequence + * within this catenated array. The goal is to keep everything + * contiguous. + */ +struct rpc_subsystem { + struct subsystem *subsystem; /* Reserved for system use */ + + mach_msg_id_t start; /* Min routine number */ + mach_msg_id_t end; /* Max routine number + 1 */ + unsigned int maxsize; /* Max mach_msg size */ + vm_address_t base_addr; /* Address of this struct in user */ + + struct routine_descriptor /* Array of routine descriptors */ + routine[1 /* Actually, (start-end+1) */ + ]; + + struct routine_arg_descriptor + arg_descriptor[1 /* Actually, the sum of the descr_ */ + ]; /* count fields for all routines */ +}; +typedef struct rpc_subsystem *rpc_subsystem_t; + +#define RPC_SUBSYSTEM_NULL ((rpc_subsystem_t) 0) + +/* + * New RPC declarations + * + * First pass at definitions and types for the new rpc service. + * This is subject to revision. + */ + +/* + * RPC macros + */ + +#define RPC_MASK(shift,last) \ + ( ((1 << ((last)-(shift)))-1) << (shift) ) + +#define RPC_FIELD(field,shift,last) \ + ( (field) & (((1 << ((last)-(shift)))-1) << (shift)) ) + +#define RPC_BOUND(dsc) \ + (((RPC_FIELD((dsc).type,TYPE_SHIFT+1,TYPE_SHIFT+3) == \ + MACH_RPC_ARRAY_VARIABLE) && (dsc).count != 0) ? MACH_RPC_BOUND : 0) + +#define ROUNDUP2(x,n) ((((unsigned)(x)) + (n) - 1) & ~((n)-1)) +#define ROUNDWORD(x) ROUNDUP2(x,sizeof(int)) + +/* + * RPC errors + * + * Display and process errors of different severity, from just for + * information only to fatal (panic). Error code colors indicate how + * difficult it is for the subsystem to handle the error correctly. + * The implication is that, for example, early versions of the code may + * not be handling code red errors properly. The code should use this + * facility instead of regular printf's. + */ + +#define MACH_RPC_DEBUG 1 + +#define ERR_INFO 1 /* purely informational */ +#define ERR_GREEN 2 /* easily handled error */ +#define ERR_YELLOW 3 /* medium difficult error */ +#define ERR_RED 4 /* difficult to handle error */ +#define ERR_FATAL 5 /* unrecoverable error, panic */ + +#if MACH_RPC_DEBUG > 1 +#define rpc_error(E,S) \ + printf("RPC error "); \ + rpc_error_show_severity(S); \ + printf("in file \"%s\", line %d: ", __FILE__, __LINE__); \ + printf E ; \ + printf("\n"); \ + rpc_error_severity(S) +#else +#define rpc_error(E,S) \ + if ((S) == ERR_FATAL || (S) == ERR_RED) { \ + printf("RPC error "); \ + rpc_error_show_severity(S); \ + printf("in file \"%s\", line %d: ", __FILE__, __LINE__); \ + printf E ; \ + printf("\n"); \ + rpc_error_severity(S); \ + } +#endif /* MACH_RPC_DEBUG */ + +/* + * RPC buffer size and break points + * + * These values define the rpc buffer size on the kernel stack, + * and break point values for switching to virtual copy (cow). + * This should be in a machine dependent include file. All sizes + * are in word (sizeof(int)) units. + */ + +#define RPC_KBUF_SIZE 16 /* kernel stack buffer size (ints) */ +#define RPC_COW_SIZE 1024 /* size where COW is a win (ints) */ +#define RPC_DESC_COUNT 4 /* default descriptor count */ + + +/* + * RPC copy state + * + * Record the rpc copy state for arrays, so we can unwind our state + * during error processing. There is one entry per complex (signatured) + * argument. The first entry is marked COPY_TYPE_ALLOC_KRN if this record + * itself was kalloc'd because the number of complex arg descriptors + * exceeded the default value (RPC_DESC_COUNT). This is not a conflict + * since the first argument is always the destination port, never an array. + */ + +#define COPY_TYPE_NO_COPY 0 /* nothing special */ +#define COPY_TYPE_ON_KSTACK 1 /* array is on kernel stack */ +#define COPY_TYPE_ON_SSTACK 2 /* array is on server stack */ +#define COPY_TYPE_VIRTUAL_IN 3 /* vm_map_copyin part of cow */ +#define COPY_TYPE_VIRTUAL_OUT_SVR 4 /* map cpyout svr part of cow */ +#define COPY_TYPE_VIRTUAL_OUT_CLN 5 /* map cpyout cln part of cow */ +#define COPY_TYPE_ALLOC_KRN 6 /* kernel kalloc'd for array */ +#define COPY_TYPE_ALLOC_SVR 7 /* vm_alloc'd in server space */ +#define COPY_TYPE_ALLOC_CLN 8 /* vm_alloc'd in client space */ +#define COPY_TYPE_PORT 9 /* plain port translated */ +#define COPY_TYPE_PORT_ARRAY 10 /* port array translated */ + + +/* + * RPC types + */ +typedef int rpc_id_t; +typedef int rpc_return_t; +typedef unsigned int rpc_size_t; +typedef unsigned int rpc_offset_t; + +struct rpc_copy_state { + unsigned copy_type; /* what kind of copy */ + vm_offset_t alloc_addr; /* address to free */ +}; +typedef struct rpc_copy_state *rpc_copy_state_t; +typedef struct rpc_copy_state rpc_copy_state_data_t; + +typedef boolean_t (*copyfunc_t)(const char *, char *, vm_size_t); + + +/* + * RPC function declarations + */ + +#ifdef CALLOUT_RPC_MODEL + +extern +void rpc_bootstrap( void ); + +extern +void rpc_remote_bootstrap( void ); + +extern +rpc_return_t mach_rpc_trap( + mach_port_name_t dest_port, + rpc_id_t routine_num, + rpc_signature_t signature_ptr, + rpc_size_t signature_size ); + +extern +rpc_return_t mach_rpc_return_trap( void ); + +extern +rpc_return_t mach_rpc_return_error( void ); + +void mach_rpc_return_wrapper( void ); + +void rpc_upcall( + vm_offset_t stack, + vm_offset_t new_stack, + vm_offset_t server_func, + int return_code ); + +void rpc_error_severity( int severity ); +void rpc_error_show_severity( int severity ); +unsigned int name_rpc_to_ipc( unsigned int action ); + +void clean_port_array( + ipc_object_t * array, + unsigned count, + unsigned cooked, + unsigned direct ); + +void unwind_rpc_state( + routine_descriptor_t routine, + rpc_copy_state_t state, + int * arg_buf ); + +kern_return_t unwind_invoke_state( + thread_act_t thr_act ); + +kern_return_t rpc_invke_args_in( + routine_descriptor_t routine, + rpc_copy_state_t state, + int * arg_buf, + copyfunc_t infunc ); + +kern_return_t rpc_invke_args_out( + routine_descriptor_t routine, + rpc_copy_state_t state, + int * arg_buf, + int ** new_sp, + copyfunc_t outfunc ); + +kern_return_t rpc_reply_args_in( + routine_descriptor_t routine, + rpc_copy_state_t state, + int * svr_buf, + copyfunc_t infunc ); + +kern_return_t rpc_reply_args_out( + routine_descriptor_t routine, + rpc_copy_state_t state, + int * svr_buf, + int * cln_buf, + copyfunc_t outfunc ); + +#endif /* CALLOUT_RPC_MODEL */ + +/* + * libmach helper functions: + */ +extern rpc_subsystem_t mach_subsystem_join( + rpc_subsystem_t, + rpc_subsystem_t, + unsigned int *, + void *(* )(int)); + +#endif /* _MACH_RPC_H_ */ + + diff --git a/osfmk/mach/semaphore.defs b/osfmk/mach/semaphore.defs new file mode 100644 index 000000000..468b8c94f --- /dev/null +++ b/osfmk/mach/semaphore.defs @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: mach/semaphore.defs + * Author: Joseph CaraDonna + * + * Exported kernel calls + * + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + semaphore 617200; + +#include +#include +#include + +/* + * a semaphore_t is created and destroyed through the task object. + * semaphore_create(task_t,&semaphore_t,...); + * semaphore_destroy(task_t,semaphore_t); + */ + +routine semaphore_signal( + semaphore : semaphore_t); + +routine semaphore_signal_all( + semaphore : semaphore_t); + +routine semaphore_wait( + semaphore : semaphore_t); + + +routine semaphore_signal_thread( + semaphore : semaphore_t; + thread : thread_act_t); + +routine semaphore_timedwait( + semaphore : semaphore_t; + wait_time : mach_timespec_t); + +routine semaphore_wait_signal( + wait_semaphore : semaphore_t; + signal_semaphore: semaphore_t); + +routine semaphore_timedwait_signal( + wait_semaphore : semaphore_t; + signal_semaphore: semaphore_t; + wait_time : mach_timespec_t); diff --git a/osfmk/mach/semaphore.h b/osfmk/mach/semaphore.h new file mode 100644 index 000000000..fe21aa9fb --- /dev/null +++ b/osfmk/mach/semaphore.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _MACH_SEMAPHORE_H_ +#define _MACH_SEMAPHORE_H_ + +#include +#include +#include +#include + +#define SEMAPHORE_OPTION_NONE 0x00000000 + +#define SEMAPHORE_SIGNAL 0x00000001 +#define SEMAPHORE_WAIT 0x00000002 +#define SEMAPHORE_WAIT_ON_SIGNAL 0x00000008 + +#define SEMAPHORE_SIGNAL_TIMEOUT 0x00000010 +#define SEMAPHORE_SIGNAL_ALL 0x00000020 +#define SEMAPHORE_SIGNAL_INTERRUPT 0x00000040 /* libmach implements */ +#define SEMAPHORE_SIGNAL_PREPOST 0x00000080 + +#define SEMAPHORE_WAIT_TIMEOUT 0x00000100 +#define SEMAPHORE_WAIT_INTERRUPT 0x00000400 /* libmach implements */ + +#define SEMAPHORE_TIMEOUT_NOBLOCK 0x00100000 +#define SEMAPHORE_TIMEOUT_RELATIVE 0x00200000 + +#define SEMAPHORE_USE_SAVED_RESULT 0x01000000 /* internal use only */ +#define SEMAPHORE_SIGNAL_RELEASE 0x02000000 /* internal use only */ + + +/* + * Forward Declarations + * + * The semaphore creation and deallocation routines are + * defined with the Mach task APIs in . + * + * kern_return_t semaphore_create(task_t task, + * semaphore_t *new_semaphore, + * sync_policy_t policy, + * int value); + * + * kern_return_t semaphore_destroy(task_t task, + * semaphore_t semaphore); + */ + +extern kern_return_t semaphore_signal (semaphore_t semaphore); +extern kern_return_t semaphore_signal_all (semaphore_t semaphore); +extern kern_return_t semaphore_signal_thread (semaphore_t semaphore, + thread_act_t thread_act); + +extern kern_return_t semaphore_wait (semaphore_t semaphore); +extern kern_return_t semaphore_timedwait (semaphore_t semaphore, + mach_timespec_t wait_time); + +extern kern_return_t semaphore_wait_signal (semaphore_t wait_semaphore, + semaphore_t signal_semaphore); + +extern kern_return_t semaphore_timedwait_signal(semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + mach_timespec_t wait_time); + +extern kern_return_t semaphore_operator (int options, + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + thread_act_t thread, + mach_timespec_t wait_time); + +#endif /* _MACH_SEMAPHORE_H_ */ diff --git a/osfmk/mach/shared_memory_server.h b/osfmk/mach/shared_memory_server.h new file mode 100644 index 000000000..8d324489e --- /dev/null +++ b/osfmk/mach/shared_memory_server.h @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * File: kern/shared_memory_server.h + * + * protos and struct definitions for shared library + * server and interface + */ +#ifndef _SHARED_MEMORY_SERVER_H_ +#define _SHARED_MEMORY_SERVER_H_ + +#define SHARED_TEXT_REGION_SIZE 0x10000000 +#define SHARED_DATA_REGION_SIZE 0x10000000 +/* + * Note: the two masks below are useful because the assumption is + * made that these shared regions will always be mapped on natural boundaries + * i.e. if the size is 0x10000000 the object can be mapped at + * 0x20000000, or 0x30000000, but not 0x1000000 + */ +#define SHARED_TEXT_REGION_MASK 0xFFFFFFF +#define SHARED_DATA_REGION_MASK 0xFFFFFFF + +#define SHARED_ALTERNATE_LOAD_BASE 0x9000000 + +#include +#ifndef MACH_KERNEL +#include +#else +#include +#endif + +#ifdef MACH_KERNEL_PRIVATE + +#include +#include + +extern ipc_port_t shared_text_region_handle; +extern ipc_port_t shared_data_region_handle; +#else /* MACH_KERNEL_PRIVATE */ + +#ifdef KERNEL_PRIVATE +extern mach_port_t shared_text_region_handle; +extern mach_port_t shared_data_region_handle; +#endif +#endif /* MACH_KERNEL_PRIVATE*/ + +#ifdef KERNEL_PRIVATE + +extern vm_offset_t shared_file_mapping_array; + + +struct shared_region_task_mappings { + ipc_port_t text_region; + vm_size_t text_size; + ipc_port_t data_region; + vm_size_t data_size; + vm_offset_t region_mappings; + vm_offset_t client_base; + vm_offset_t alternate_base; + vm_offset_t alternate_next; + int flags; + vm_offset_t self; +}; + +#define SHARED_REGION_SYSTEM 0x1 +#define SHARED_REGION_FULL 0x2 + +typedef struct shared_region_task_mappings *shared_region_task_mappings_t; +#endif /* KERNEL_PRIVATE */ + + +#define SHARED_LIB_ALIAS 0x10 + + +/* flags field aliases for copyin_shared_file and load_shared_file */ + +/* IN */ +#define ALTERNATE_LOAD_SITE 0x1 +#define NEW_LOCAL_SHARED_REGIONS 0x2 + +/* OUT */ +#define SF_PREV_LOADED 0x1 + + +#define load_file_hash(file_object, size) \ + ((((natural_t)file_object) & 0xffffff) % size) + +#define VM_PROT_COW 0x8 /* must not interfere with normal prot assignments */ +#define VM_PROT_ZF 0x10 /* must not interfere with normal prot assignments */ + +struct sf_mapping { + vm_offset_t mapping_offset; + vm_size_t size; + vm_offset_t file_offset; + vm_prot_t protection; /* read/write/execute/COW/ZF */ + vm_offset_t cksum; +}; + +typedef struct sf_mapping sf_mapping_t; + + +#ifdef MACH_KERNEL_PRIVATE + +struct loaded_mapping { + vm_offset_t mapping_offset; + vm_size_t size; + vm_offset_t file_offset; + vm_prot_t protection; /* read/write/execute/COW/ZF */ + + struct loaded_mapping *next; +}; + +typedef struct loaded_mapping loaded_mapping_t; + +struct load_struct { + queue_chain_t links; + shared_region_mapping_t regions_instance; + int depth; + int file_object; + vm_offset_t base_address; + int mapping_cnt; + loaded_mapping_t *mappings; +}; + +#endif /* MACH_KERNEL_PRIVATE */ + +typedef struct load_struct load_struct_t; +typedef struct load_struct *load_struct_ptr_t; + +#ifdef MACH_KERNEL_PRIVATE + +struct load_file_ele { + union { + sf_mapping_t mapping; + load_struct_t element; + } u; +}; + +struct shared_file_info { + mutex_t lock; /* lock for the structure */ + queue_head_t *hash; /* for later perf enhance */ + int hash_size; + boolean_t hash_init; +}; + +typedef struct shared_file_info shared_file_info_t; + +extern kern_return_t +copyin_shared_file( + vm_offset_t mapped_file, + vm_size_t mapped_file_size, + vm_offset_t *base_address, + int map_cnt, + sf_mapping_t *mappings, + vm_object_t file_object, + shared_region_task_mappings_t shared_region, + int *flags); + +extern kern_return_t +shared_file_init( + ipc_port_t *shared_text_region_handle, + vm_size_t text_region_size, + ipc_port_t *shared_data_region_handle, + vm_size_t data_region_size, + vm_offset_t *shared_file_mapping_array); + +extern load_struct_t * +lsf_hash_lookup( + queue_head_t *hash_table, + void *file_object, + int size, + boolean_t alternate, + shared_region_task_mappings_t sm_info); + +extern load_struct_t * +lsf_hash_delete( + void *file_object, + vm_offset_t base_offset, + shared_region_task_mappings_t sm_info); + +extern void +lsf_hash_insert( + load_struct_t *entry, + shared_region_task_mappings_t sm_info); + +extern kern_return_t +lsf_load( + vm_offset_t mapped_file, + vm_size_t mapped_file_size, + vm_offset_t *base_address, + sf_mapping_t *mappings, + int map_cnt, + void *file_object, + int flags, + shared_region_task_mappings_t sm_info); + +extern void +lsf_unload( + void *file_object, + vm_offset_t base_offset, + shared_region_task_mappings_t sm_info); + +#endif /* MACH_KERNEL_PRIVATE */ +#endif /* _SHARED_MEMORY_SERVER_H_ */ diff --git a/osfmk/mach/std_types.defs b/osfmk/mach/std_types.defs new file mode 100644 index 000000000..adf6056bd --- /dev/null +++ b/osfmk/mach/std_types.defs @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Mach kernel standard interface type declarations + */ + +#ifndef _MACH_STD_TYPES_DEFS_ +#define _MACH_STD_TYPES_DEFS_ + +type char = MACH_MSG_TYPE_CHAR; +type short = MACH_MSG_TYPE_INTEGER_16; +type int = MACH_MSG_TYPE_INTEGER_32; +type int32 = MACH_MSG_TYPE_INTEGER_32; +type boolean_t = MACH_MSG_TYPE_BOOLEAN; +type unsigned = MACH_MSG_TYPE_INTEGER_32; +type unsigned32 = MACH_MSG_TYPE_INTEGER_32; +type int64 = MACH_MSG_TYPE_INTEGER_64; +type unsigned64 = MACH_MSG_TYPE_INTEGER_64; +#include + +type kern_return_t = int; + +type pointer_t = ^array[] of MACH_MSG_TYPE_BYTE + ctype: vm_offset_t; + + +type mach_port_t = MACH_MSG_TYPE_COPY_SEND; +type mach_port_array_t = array[] of mach_port_t; + +type mach_port_name_t = MACH_MSG_TYPE_PORT_NAME + ctype: mach_port_name_t; +type mach_port_name_array_t = array[] of mach_port_name_t + ctype: mach_port_name_array_t; + +type mach_port_right_t = natural_t; + +type mach_port_type_t = natural_t; +type mach_port_type_array_t = array[] of mach_port_type_t; + +type mach_port_urefs_t = natural_t; +type mach_port_delta_t = integer_t; +type mach_port_seqno_t = natural_t; +type mach_port_mscount_t = unsigned; +type mach_port_msgcount_t = unsigned; +type mach_port_rights_t = unsigned; +type mach_msg_id_t = integer_t; +type mach_msg_type_name_t = unsigned; +type mach_msg_options_t = integer_t; + +type mach_port_move_receive_t = MACH_MSG_TYPE_MOVE_RECEIVE + ctype: mach_port_t; +type mach_port_copy_send_t = MACH_MSG_TYPE_COPY_SEND + ctype: mach_port_t; +type mach_port_make_send_t = MACH_MSG_TYPE_MAKE_SEND + ctype: mach_port_t; +type mach_port_move_send_t = MACH_MSG_TYPE_MOVE_SEND + ctype: mach_port_t; +type mach_port_make_send_once_t = MACH_MSG_TYPE_MAKE_SEND_ONCE + ctype: mach_port_t; +type mach_port_move_send_once_t = MACH_MSG_TYPE_MOVE_SEND_ONCE + ctype: mach_port_t; + +type mach_port_receive_t = MACH_MSG_TYPE_PORT_RECEIVE + ctype: mach_port_t; +type mach_port_send_t = MACH_MSG_TYPE_PORT_SEND + ctype: mach_port_t; +type mach_port_send_once_t = MACH_MSG_TYPE_PORT_SEND_ONCE + ctype: mach_port_t; + +type mach_port_poly_t = polymorphic + ctype: mach_port_t; + +import ; + +#endif _MACH_STD_TYPES_DEFS_ diff --git a/osfmk/mach/std_types.h b/osfmk/mach/std_types.h new file mode 100644 index 000000000..864dc86f6 --- /dev/null +++ b/osfmk/mach/std_types.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Mach standard external interface type definitions. + * + */ + +#ifndef STD_TYPES_H_ +#define STD_TYPES_H_ + +#include +#include +#include +#include + +#ifdef MACH_KERNEL_PRIVATE +#include +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* STD_TYPES_H_ */ diff --git a/osfmk/mach/sync.defs b/osfmk/mach/sync.defs new file mode 100644 index 000000000..fb011c404 --- /dev/null +++ b/osfmk/mach/sync.defs @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * File: mach/sync.defs + * Author: Joseph CaraDonna + * + * Exported kernel calls + * + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + sync 617000; + +#include +#include +#include + +#if KERNEL_SERVER +import ; +import ; +import ; +#endif /* KERNEL_SERVER */ + + +type semaphore_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: semaphore_t convert_port_to_semaphore(mach_port_t) + outtran: mach_port_t convert_semaphore_to_port(semaphore_t) + destructor: semaphore_dereference(semaphore_t) +#endif /* KERNEL_SERVER */ + ; + +type semaphore_consume_ref_t = mach_port_move_send_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: semaphore_t convert_port_to_semaphore(mach_port_t) + destructor: semaphore_dereference(semaphore_t) +#endif /* KERNEL_SERVER */ + ; + +type lock_set_t = mach_port_t + ctype: mach_port_t +#if KERNEL_SERVER + intran: lock_set_t convert_port_to_lock_set(mach_port_t) + outtran: mach_port_t convert_lock_set_to_port(lock_set_t) + destructor: lock_set_dereference(lock_set_t) +#endif /* KERNEL_SERVER */ + ; + + +routine semaphore_create( + task : task_t; + out semaphore : semaphore_t; + policy : int; + value : int); + +routine semaphore_destroy( + task : task_t; + semaphore : semaphore_consume_ref_t); + +routine semaphore_signal( + semaphore : semaphore_t); + +routine semaphore_signal_all( + semaphore : semaphore_t); + +routine semaphore_wait( + semaphore : semaphore_t); + +routine lock_set_create( + task : task_t; + out new_lock_set : lock_set_t; + n_ulocks : int; + policy : int); + +routine lock_set_destroy( + task : task_t; + lock_set : lock_set_t); + +routine lock_acquire( + lock_set : lock_set_t; + lock_id : int); + +routine lock_release( + lock_set : lock_set_t; + lock_id : int); + +routine lock_try( + lock_set : lock_set_t; + lock_id : int); + +routine lock_make_stable( + lock_set : lock_set_t; + lock_id : int); + +routine lock_handoff( + lock_set : lock_set_t; + lock_id : int); + +routine lock_handoff_accept( + lock_set : lock_set_t; + lock_id : int); + +routine semaphore_signal_thread( + semaphore : semaphore_t; + thread : thread_act_t); + +routine semaphore_timedwait( + semaphore : semaphore_t; + wait_time : mach_timespec_t); diff --git a/osfmk/mach/sync_policy.h b/osfmk/mach/sync_policy.h new file mode 100644 index 000000000..913ae11bb --- /dev/null +++ b/osfmk/mach/sync_policy.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _SYNC_POLICY_H_ +#define _SYNC_POLICY_H_ + +typedef int sync_policy_t; + +#define SYNC_POLICY_FIFO 0 +#define SYNC_POLICY_FIXED_PRIORITY 1 +#define SYNC_POLICY_MAX 1 + +#endif /*_SYNC_POLICY_H_*/ diff --git a/osfmk/mach/syscall_sw.h b/osfmk/mach/syscall_sw.h new file mode 100644 index 000000000..6ec412ae9 --- /dev/null +++ b/osfmk/mach/syscall_sw.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _MACH_SYSCALL_SW_H_ +#define _MACH_SYSCALL_SW_H_ + +/* + * The machine-dependent "syscall_sw.h" file should + * define a macro for + * kernel_trap(trap_name, trap_number, arg_count) + * which will expand into assembly code for the + * trap. + * + * N.B.: When adding calls, do not put spaces in the macros. + */ + +#include + +/* + * These trap numbers should be taken from the + * table in . + */ + +kernel_trap(mach_reply_port,-26,0) +kernel_trap(thread_self_trap,-27,0) +kernel_trap(task_self_trap,-28,0) +kernel_trap(host_self_trap,-29,0) +kernel_trap(mach_msg_overwrite_trap,-32,9) +kernel_trap(semaphore_signal_trap, -33, 1) +kernel_trap(semaphore_signal_all_trap, -34, 1) +kernel_trap(semaphore_signal_thread_trap, -35, 2) +kernel_trap(semaphore_wait_trap,-36,1) +kernel_trap(semaphore_wait_signal_trap,-37,2) +kernel_trap(semaphore_timedwait_trap,-38,3) +kernel_trap(semaphore_timedwait_signal_trap,-39,4) + +kernel_trap(init_process,-41,0) +kernel_trap(map_fd,-43,5) +kernel_trap(task_for_pid,-45,3) +kernel_trap(pid_for_task,-46,2) +kernel_trap(macx_swapon,-48, 4) +kernel_trap(macx_swapoff,-49, 2) +kernel_trap(macx_triggers,-51, 4) + +kernel_trap(swtch_pri,-59,1) +kernel_trap(swtch,-60,0) +kernel_trap(syscall_thread_switch,-61,3) +kernel_trap(clock_sleep_trap,-62,5) + +kernel_trap(mach_timebase_info,-89,1) +kernel_trap(mach_wait_until,-90,2) +kernel_trap(mk_wait_until,-90,2) +kernel_trap(mk_timer_create,-91,0) +kernel_trap(mk_timer_destroy,-92,1) +kernel_trap(mk_timer_arm,-93,3) +kernel_trap(mk_timer_cancel,-94,2) + +kernel_trap(MKGetTimeBaseInfo,-95,5) + +#endif /* _MACH_SYSCALL_SW_H_ */ diff --git a/osfmk/mach/task.defs b/osfmk/mach/task.defs new file mode 100644 index 000000000..599cfada0 --- /dev/null +++ b/osfmk/mach/task.defs @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_port.defs + * Author: Rich Draves + * + * Exported kernel calls. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + task 3400; + +#include +#include +/* + * Create a new task with an empty set of IPC rights, + * and having an address space constructed from the + * target task (or empty, if inherit_memory is FALSE). + */ +routine task_create( + target_task : task_t; + ledgers : ledger_array_t; + inherit_memory : boolean_t; + out child_task : task_t); + +/* + * Destroy the target task, causing all of its threads + * to be destroyed, all of its IPC rights to be deallocated, + * and all of its address space to be deallocated. + */ +routine task_terminate( + target_task : task_t); + +/* + * Returns the set of threads belonging to the target task. + */ +routine task_threads( + target_task : task_t; + out act_list : thread_act_array_t); + +/* + * Stash a handful of ports for the target task; child + * tasks inherit this stash at task_create time. + */ +routine mach_ports_register( + target_task : task_t; + init_port_set : mach_port_array_t = + ^array[] of mach_port_t); + +/* + * Retrieve the stashed ports for the target task. + */ +routine mach_ports_lookup( + target_task : task_t; + out init_port_set : mach_port_array_t = + ^array[] of mach_port_t); + +/* + * Returns information about the target task. + */ +routine task_info( + target_task : task_t; + flavor : task_flavor_t; + out task_info_out : task_info_t, CountInOut); + +/* + * Set task information. + */ +routine task_set_info( + target_task : task_t; + flavor : task_flavor_t; + task_info_in : task_info_t); + +/* + * Increment the suspend count for the target task. + * No threads within a task may run when the suspend + * count for that task is non-zero. + */ +routine task_suspend( + target_task : task_t); + + +/* + * Decrement the suspend count for the target task, + * if the count is currently non-zero. If the resulting + * suspend count is zero, then threads within the task + * that also have non-zero suspend counts may execute. + */ +routine task_resume( + target_task : task_t); + +/* + * Returns the current value of the selected special port + * associated with the target task. + */ +routine task_get_special_port( + task : task_t; + which_port : int; + out special_port : mach_port_t); + +/* + * Set one of the special ports associated with the + * target task. + */ +routine task_set_special_port( + task : task_t; + which_port : int; + special_port : mach_port_t); + +/* + * Create a new thread within the target task, returning + * the port representing the first thr_act in that new thread. The + * initial execution state of the thread is undefined. + */ +routine thread_create( + parent_task : task_t; + out child_act : thread_act_t); + +/* + * Create a new thread within the target task, returning + * the port representing that new thread. The new thread + * is not suspended; its initial execution state is given + * by flavor and new_state. Returns the port representing + * the new thread. + */ +routine thread_create_running( + parent_task : task_t; + flavor : thread_state_flavor_t; + new_state : thread_state_t; + out child_act : thread_act_t); + +/* + * Set an exception handler for a task on one or more exception types. + * These handlers are invoked for all threads in the task if there are + * no thread-specific exception handlers or those handlers returned an + * error. + */ +routine task_set_exception_ports( + task : task_t; + exception_mask : exception_mask_t; + new_port : mach_port_t; + behavior : exception_behavior_t; + new_flavor : thread_state_flavor_t); + + +/* + * Lookup some of the old exception handlers for a task + */ +routine task_get_exception_ports( + task : task_t; + exception_mask : exception_mask_t; + out masks : exception_mask_array_t; + out old_handlers : exception_handler_array_t, SameCount; + out old_behaviors : exception_behavior_array_t, SameCount; + out old_flavors : exception_flavor_array_t, SameCount); + + +/* + * Set an exception handler for a thread on one or more exception types. + * At the same time, return the previously defined exception handlers for + * those types. + */ +routine task_swap_exception_ports( + task : task_t; + exception_mask : exception_mask_t; + new_port : mach_port_t; + behavior : exception_behavior_t; + new_flavor : thread_state_flavor_t; + out masks : exception_mask_array_t; + out old_handlerss : exception_handler_array_t, SameCount; + out old_behaviors : exception_behavior_array_t, SameCount; + out old_flavors : exception_flavor_array_t, SameCount); + +/* + * Create and destroy lock_set and semaphore synchronizers on a + * per-task basis (i.e. the task owns them). + */ +routine lock_set_create( + task : task_t; + out new_lock_set : lock_set_t; + n_ulocks : int; + policy : int); + +routine lock_set_destroy( + task : task_t; + lock_set : lock_set_t); + +routine semaphore_create( + task : task_t; + out semaphore : semaphore_t; + policy : int; + value : int); + +routine semaphore_destroy( + task : task_t; + semaphore : semaphore_consume_ref_t); + +/* + * Set/get policy information for a task. + * (Approved Mac OS X microkernel interface) + */ + +routine task_policy_set( + task : task_t; + flavor : task_policy_flavor_t; + policy_info : task_policy_t); + +routine task_policy_get( + task : task_t; + flavor : task_policy_flavor_t; +out policy_info : task_policy_t, CountInOut; +inout get_default : boolean_t); + +/* + * Task profiling. + */ +routine task_sample( + task : task_t; + reply : mach_port_make_send_t); + +/* + * JMM - Everything from here down is likely to go away soon + */ +/* + * OBSOLETE interface. + */ +routine task_policy( + task : task_t; + policy : policy_t; + base : policy_base_t; + set_limit : boolean_t; + change : boolean_t); + + +/* + * Establish a user-level handler for the specified + * system call. + */ +routine task_set_emulation( + target_port : task_t; + routine_entry_pt: vm_address_t; + routine_number : int); + +/* + * Get user-level handler entry points for all + * emulated system calls. + */ +routine task_get_emulation_vector( + task : task_t; + out vector_start : int; + out emulation_vector: emulation_vector_t); + +/* + * Establish user-level handlers for the specified + * system calls. Non-emulated system calls are specified + * with emulation_vector[i] == EML_ROUTINE_NULL. + */ +routine task_set_emulation_vector( + task : task_t; + vector_start : int; + emulation_vector: emulation_vector_t); + + +/* + * Establish restart pc for interrupted atomic sequences. + */ +routine task_set_ras_pc( + target_task : task_t; + basepc : vm_address_t; + boundspc : vm_address_t); + + +/* + * JMM - Want to eliminate kernel tasks and processor_set so + * keep them at the end. + */ +/* + * Create a new task in the kernel's address space with + * an empty set of IPC rights, with a map allocated from + * the kernel's map starting at map_base of length map_size. + */ +routine kernel_task_create( + target_task : task_t; + map_base : vm_offset_t; + map_size : vm_size_t; + out child_task : task_t); + +/* + * Assign task to processor set. + */ +routine task_assign( + task : task_t; + new_set : processor_set_t; + assign_threads : boolean_t); + +/* + * Assign task to default set. + */ +routine task_assign_default( + task : task_t; + assign_threads : boolean_t); + +/* + * Get current assignment for task. + */ +routine task_get_assignment( + task : task_t; + out assigned_set : processor_set_name_t); + +/* + * OBSOLETE interface. + */ +routine task_set_policy( + task : task_t; + pset : processor_set_t; + policy : policy_t; + base : policy_base_t; + limit : policy_limit_t; + change : boolean_t); + +/* + * Registers the caller-specified RPC subsystem as a new object. + */ +routine mach_subsystem_create( + task : task_t; + user_subsys : user_subsystem_t; + out subsys : subsystem_t); diff --git a/osfmk/mach/task_info.h b/osfmk/mach/task_info.h new file mode 100644 index 000000000..c42cbd84f --- /dev/null +++ b/osfmk/mach/task_info.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.2 1998/04/29 17:36:54 mburg + * MK7.3 merger + * + * Revision 1.2.31.1 1998/02/03 09:33:56 gdt + * Merge up to MK7.3 + * [1998/02/03 09:17:49 gdt] + * + * Revision 1.2.29.1 1997/06/17 03:01:26 devrcs + * Added `TASK_SCHED_INFO.' + * [1996/03/18 15:24:59 rkc] + * + * Revision 1.2.21.3 1995/01/06 19:51:51 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup, ledgers, security, flavors. + * [1994/10/14 03:43:10 dwm] + * + * Revision 1.2.21.2 1994/09/23 02:42:46 ezf + * change marker to not FREE + * [1994/09/22 21:42:56 ezf] + * + * Revision 1.2.21.1 1994/08/07 20:50:07 bolinger + * Merge up to colo_b7. + * [1994/08/01 21:02:06 bolinger] + * + * Revision 1.2.17.4 1994/06/25 03:47:20 dwm + * mk6 CR98 - add flavor interface typedefs (task_flavor_t). + * [1994/06/24 21:54:58 dwm] + * + * Revision 1.2.17.3 1994/05/02 21:36:04 dwm + * Remove nmk15_compat support. + * [1994/05/02 21:09:10 dwm] + * + * Revision 1.2.17.2 1994/01/14 18:42:23 bolinger + * Add TASK_USER_DATA flavor of task_info() (and task_set_info()). + * [1994/01/14 18:20:52 bolinger] + * + * Revision 1.2.17.1 1994/01/12 17:57:26 dwm + * Fix "ifdef" NMK15_COMPAT to "if" + * [1994/01/12 17:31:13 dwm] + * + * Revision 1.2.3.5 1993/08/03 18:29:52 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 18:33:57 gm] + * + * Revision 1.2.3.4 1993/07/08 19:04:52 watkins + * New version of task_basic_info structure; old version + * is now under nmk15_compat. + * [1993/07/07 21:04:11 watkins] + * + * Revision 1.2.3.3 1993/06/29 21:55:50 watkins + * New definitions for scheduling control interfaces. + * [1993/06/29 20:50:59 watkins] + * + * Revision 1.2.3.2 1993/06/09 02:43:32 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:18 jeffc] + * + * Revision 1.2 1993/04/19 16:39:27 devrcs + * ansi C conformance changes + * [1993/02/02 18:54:59 david] + * + * Revision 1.1 1992/09/30 02:32:09 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 17:00:41 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:36:25 mrt + * Changed to new Mach copyright + * [91/02/01 17:21:17 mrt] + * + * Revision 2.2 90/05/03 15:48:36 dbg + * Added TASK_THREAD_TIMES_INFO flavor. + * [90/04/03 dbg] + * + * Revision 2.1 89/08/03 16:04:49 rwd + * Created. + * + * Revision 2.3 89/02/25 18:41:06 gm0w + * Changes for cleanup. + * + * 15-Jan-88 David Golub (dbg) at Carnegie-Mellon University + * Created, based on old task_statistics. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Machine-independent task information structures and definitions. + * + * The definitions in this file are exported to the user. The kernel + * will translate its internal data structures to these structures + * as appropriate. + * + */ + +#ifndef TASK_INFO_H_ +#define TASK_INFO_H_ + +#include +#include +#include + +/* + * Generic information structure to allow for expansion. + */ +typedef natural_t task_flavor_t; +typedef integer_t *task_info_t; /* varying array of int */ + +#define TASK_INFO_MAX (1024) /* maximum array size */ +typedef integer_t task_info_data_t[TASK_INFO_MAX]; + +/* + * Currently defined information structures. + */ + +#define TASK_BASIC_INFO 4 /* basic information */ + +struct task_basic_info { + integer_t suspend_count; /* suspend count for task */ + vm_size_t virtual_size; /* number of virtual pages */ + vm_size_t resident_size; /* number of resident pages */ + time_value_t user_time; /* total user run time for + terminated threads */ + time_value_t system_time; /* total system run time for + terminated threads */ + policy_t policy; /* default policy for new threads */ +}; + +typedef struct task_basic_info task_basic_info_data_t; +typedef struct task_basic_info *task_basic_info_t; +#define TASK_BASIC_INFO_COUNT \ + (sizeof(task_basic_info_data_t) / sizeof(natural_t)) + + +#define TASK_EVENTS_INFO 2 /* various event counts */ + +struct task_events_info { + integer_t faults; /* number of page faults */ + integer_t pageins; /* number of actual pageins */ + integer_t cow_faults; /* number of copy-on-write faults */ + integer_t messages_sent; /* number of messages sent */ + integer_t messages_received; /* number of messages received */ + integer_t syscalls_mach; /* number of mach system calls */ + integer_t syscalls_unix; /* number of unix system calls */ + integer_t csw; /* number of context switches */ +}; +typedef struct task_events_info task_events_info_data_t; +typedef struct task_events_info *task_events_info_t; +#define TASK_EVENTS_INFO_COUNT \ + (sizeof(task_events_info_data_t) / sizeof(natural_t)) + +#define TASK_THREAD_TIMES_INFO 3 /* total times for live threads - + only accurate if suspended */ + +struct task_thread_times_info { + time_value_t user_time; /* total user run time for + live threads */ + time_value_t system_time; /* total system run time for + live threads */ +}; + +typedef struct task_thread_times_info task_thread_times_info_data_t; +typedef struct task_thread_times_info *task_thread_times_info_t; +#define TASK_THREAD_TIMES_INFO_COUNT \ + (sizeof(task_thread_times_info_data_t) / sizeof(natural_t)) + +#define TASK_SCHED_TIMESHARE_INFO 10 +#define TASK_SCHED_RR_INFO 11 +#define TASK_SCHED_FIFO_INFO 12 + +#define TASK_SECURITY_TOKEN 13 +#define TASK_SECURITY_TOKEN_COUNT \ + (sizeof(security_token_t) / sizeof(natural_t)) + +#define TASK_SCHED_INFO 14 + +#endif /* TASK_INFO_H_ */ diff --git a/osfmk/mach/task_ledger.h b/osfmk/mach/task_ledger.h new file mode 100644 index 000000000..874776fab --- /dev/null +++ b/osfmk/mach/task_ledger.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.2 1995/01/06 19:51:54 devrcs + * mk6 CR668 - 1.3b26 merge + * [1994/10/14 03:43:13 dwm] + * + * Revision 1.1.8.1 1994/09/23 02:42:55 ezf + * change marker to not FREE + * [1994/09/22 21:43:00 ezf] + * + * Revision 1.1.4.3 1993/09/17 21:35:29 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:28:49 robert] + * + * Revision 1.1.4.2 1993/06/04 15:13:57 jeffc + * CR9193 - MK5.0 merge. + * [1993/05/18 02:38:04 gm] + * + * Revision 3.0.2.2 1993/05/15 15:42:19 jph + * Merge MK5.0: change LEDGER_REAL_ITEMS to be LEDGER_N_ITEMS. + * [1993/05/15 15:21:21 jph] + * + * Revision 3.0 1992/12/31 22:13:53 ede + * Initial revision for OSF/1 R1.3 + * + * Revision 1.2 1991/08/15 19:16:53 devrcs + * Ledgers: indices for task_ledger exported routines. + * [91/07/18 11:04:31 dwm] + * + * $EndLog$ + */ + +/* + * Definitions for task ledger line items + */ +#define ITEM_THREADS 0 /* number of threads */ +#define ITEM_TASKS 1 /* number of tasks */ + +#define ITEM_VM 2 /* virtual space (bytes)*/ + +#define LEDGER_N_ITEMS 3 /* Total line items */ + +#define LEDGER_UNLIMITED 0 /* ignored item.maximum */ diff --git a/osfmk/mach/task_policy.h b/osfmk/mach/task_policy.h new file mode 100644 index 000000000..9a57a45fd --- /dev/null +++ b/osfmk/mach/task_policy.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 10 October 2000 (debo) + * Created. + * + * 30 November 2000 (debo) + * Final resolution of review feedback. + */ + +#ifndef _MACH_TASK_POLICY_H_ +#define _MACH_TASK_POLICY_H_ + +#include + +/* + * These are the calls for accessing the policy parameters + * of a particular task. + * + * The extra 'get_default' parameter to the second call is + * IN/OUT as follows: + * 1) if asserted on the way in it indicates that the default + * values should be returned, not the ones currently set, in + * this case 'get_default' will always be asserted on return; + * 2) if unasserted on the way in, the current settings are + * desired and if still unasserted on return, then the info + * returned reflects the current settings, otherwise if + * 'get_default' returns asserted, it means that there are no + * current settings due to other parameters taking precedence, + * and the default ones are being returned instead. + */ + +typedef natural_t task_policy_flavor_t; +typedef integer_t *task_policy_t; + +/* +kern_return_t task_policy_set( + task_t task, + task_policy_flavor_t flavor, + task_policy_t policy_info, + mach_msg_type_number_t count); + +kern_return_t task_policy_get( + task_t task, + task_policy_flavor_t flavor, + task_policy_t policy_info, + mach_msg_type_number_t *count, + boolean_t *get_default); +*/ + +/* + * Defined flavors. + */ +/* + * TASK_CATEGORY_POLICY: + * + * This provides information to the kernel about the role + * of the task in the system. + * + * Parameters: + * + * role: Enumerated as follows: + * + * TASK_UNSPECIFIED is the default, since the role is not + * inherited from the parent. + * + * TASK_FOREGROUND_APPLICATION should be assigned when the + * task is a normal UI application in the foreground from + * the HI point of view. + * **N.B. There may be more than one of these at a given time. + * + * TASK_BACKGROUND_APPLICATION should be assigned when the + * task is a normal UI application in the background from + * the HI point of view. + * + * TASK_CONTROL_APPLICATION should be assigned to the unique + * UI application which implements the pop-up application dialog. + * There can only be one task at a time with this designation, + * which is assigned FCFS. + * + * TASK_GRAPHICS_SERVER should be assigned to the graphics + * management (window) server. There can only be one task at + * a time with this designation, which is assigned FCFS. + */ + +#define TASK_CATEGORY_POLICY 1 + +struct task_category_policy { + enum { + TASK_UNSPECIFIED = 0, + TASK_FOREGROUND_APPLICATION, + TASK_BACKGROUND_APPLICATION, + TASK_CONTROL_APPLICATION, + TASK_GRAPHICS_SERVER, + } role; +}; + +typedef struct task_category_policy task_category_policy_data_t; +typedef struct task_category_policy *task_category_policy_t; + +#define TASK_CATEGORY_POLICY_COUNT \ + (sizeof (task_category_policy_data_t) / sizeof (integer_t)) + +#endif /* _MACH_TASK_POLICY_H_ */ diff --git a/osfmk/mach/task_special_ports.h b/osfmk/mach/task_special_ports.h new file mode 100644 index 000000000..f658d5b43 --- /dev/null +++ b/osfmk/mach/task_special_ports.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.7.2 1995/01/06 19:51:58 devrcs + * mk6 CR668 - 1.3b26 merge + * [1994/10/14 03:43:15 dwm] + * + * Revision 1.2.7.1 1994/09/23 02:43:04 ezf + * change marker to not FREE + * [1994/09/22 21:43:04 ezf] + * + * Revision 1.2.2.5 1993/09/03 15:53:54 jeffc + * CR9255 - Remove MACH_EXC_COMPAT + * [1993/08/26 15:10:56 jeffc] + * + * Revision 1.2.2.4 1993/08/05 19:09:45 jeffc + * CR9508 - Delete dead code. Remove MACH_IPC_COMPAT + * [1993/08/03 17:09:30 jeffc] + * + * Revision 1.2.2.3 1993/08/03 19:05:13 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * CR9600: Add task_special_port_t typedef. + * [1993/08/02 18:34:37 gm] + * + * Revision 1.2.2.2 1993/06/09 02:43:37 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:21 jeffc] + * + * Revision 1.2 1993/04/19 16:39:36 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:14 david] + * + * Revision 1.1 1992/09/30 02:32:11 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4.2.1 92/03/03 16:22:36 jeffreyh + * Changes from TRUNK + * [92/02/26 12:20:27 jeffreyh] + * + * Revision 2.5 92/01/15 13:44:54 rpd + * Changed MACH_IPC_COMPAT conditionals to default to not present. + * + * Revision 2.4 91/05/14 17:00:57 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:36:29 mrt + * Changed to new Mach copyright + * [91/02/01 17:21:29 mrt] + * + * Revision 2.2 90/06/02 15:00:03 rpd + * Converted to new IPC. + * [90/03/26 22:40:08 rpd] + * + * Revision 2.1 89/08/03 16:06:01 rwd + * Created. + * + * Revision 2.3 89/02/25 18:41:12 gm0w + * Changes for cleanup. + * + * 17-Jan-88 David Golub (dbg) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/task_special_ports.h + * + * Defines codes for special_purpose task ports. These are NOT + * port identifiers - they are only used for the task_get_special_port + * and task_set_special_port routines. + * + */ + +#ifndef _MACH_TASK_SPECIAL_PORTS_H_ +#define _MACH_TASK_SPECIAL_PORTS_H_ + +typedef int task_special_port_t; + +#define TASK_KERNEL_PORT 1 /* Represents task to the outside + world.*/ + +#define TASK_HOST_PORT 2 /* The host (priv) port for task. */ + +#define TASK_BOOTSTRAP_PORT 4 /* Bootstrap environment for task. */ + +#define TASK_WIRED_LEDGER_PORT 5 /* Wired resource ledger for task. */ + +#define TASK_PAGED_LEDGER_PORT 6 /* Paged resource ledger for task. */ + +/* + * Definitions for ease of use + */ + +#define task_get_kernel_port(task, port) \ + (task_get_special_port((task), TASK_KERNEL_PORT, (port))) + +#define task_set_kernel_port(task, port) \ + (task_set_special_port((task), TASK_KERNEL_PORT, (port))) + +#define task_get_host_port(task, port) \ + (task_get_special_port((task), TASK_HOST_PORT, (port))) + +#define task_set_host_port(task, port) \ + (task_set_special_port((task), TASK_HOST_PORT, (port))) + +#define task_get_bootstrap_port(task, port) \ + (task_get_special_port((task), TASK_BOOTSTRAP_PORT, (port))) + +#define task_set_bootstrap_port(task, port) \ + (task_set_special_port((task), TASK_BOOTSTRAP_PORT, (port))) + +#define task_get_wired_ledger_port(task, port) \ + (task_get_special_port((task), TASK_WIRED_LEDGER_PORT, (port))) + +#define task_set_wired_ledger_port(task, port) \ + (task_set_special_port((task), TASK_WIRED_LEDGER_PORT, (port))) + +#define task_get_paged_ledger_port(task, port) \ + (task_get_special_port((task), TASK_PAGED_LEDGER_PORT, (port))) + +#define task_set_paged_ledger_port(task, port) \ + (task_set_special_port((task), TASK_PAGED_LEDGER_PORT, (port))) + +#endif /* _MACH_TASK_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/thread_act.defs b/osfmk/mach/thread_act.defs new file mode 100644 index 000000000..2637408a4 --- /dev/null +++ b/osfmk/mach/thread_act.defs @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_port.defs + * Author: Rich Draves + * + * Exported kernel calls. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + thread_act 3600; + +#include +#include + +/* + * Destroy the target thread. + * + * JMM - For legacy reasons this consumes a reference to the + * target thread. This will have to change in the future because + * we want the interfaces to be able to be defined in more standard + * IDLs and transports, and most of them do not support the notion + * of reference ownership transfers (just sharing). + */ +routine thread_terminate( + target_act : thread_act_consume_ref_t); + + +/* + * Return the selected state information for the target + * thr_act. If the thr_act is currently executing, the results + * may be stale. [Flavor THREAD_STATE_FLAVOR_LIST provides a + * list of valid flavors for the target thread.] + */ +routine act_get_state( + target_act : thread_act_t; + flavor : int; + out old_state : thread_state_t, CountInOut); + +/* + * Set the selected state information for the target thread. + * If the thread is currently executing, the state change + * may be ill-defined. + */ +routine act_set_state( + target_act : thread_act_t; + flavor : int; + new_state : thread_state_t); + +/* + * Backward compatible old-style thread routines. + * These have different semantics than the new activation versions. + * + * Return the selected state information for the target + * thread. If the thread is currently executing, the results + * may be stale. [Flavor THREAD_STATE_FLAVOR_LIST provides a + * list of valid flavors for the target thr_act.] + */ +routine thread_get_state( + target_act : thread_act_t; + flavor : thread_state_flavor_t; + out old_state : thread_state_t, CountInOut); + +/* + * Set the selected state information for the target thread. + * If the thread is currently executing, the state change + * may be ill-defined. + */ +routine thread_set_state( + target_act : thread_act_t; + flavor : thread_state_flavor_t; + new_state : thread_state_t); + +/* + * Increment the suspend count for the target thread. + * Once this call has completed, the thread will not + * execute any further user or meta- instructions. + * Once suspended, a thread may not execute again until + * its suspend count is zero, and the suspend count + * for its task is also zero. + */ +routine thread_suspend( + target_act : thread_act_t); + +/* + * Decrement the suspend count for the target thread, + * if that count is not already zero. + */ +routine thread_resume( + target_act : thread_act_t); + +/* + * Cause any user or meta- instructions currently being + * executed by the target thread to be aborted. [Meta- + * instructions consist of the basic traps for IPC + * (e.g., msg_send, msg_receive) and self-identification + * (e.g., task_self, thread_self, thread_reply). Calls + * described by MiG interfaces are not meta-instructions + * themselves.] + */ +routine thread_abort( + target_act : thread_act_t); + +/* + * Cause any user or meta- instructions currently being + * executed by the target thread to be aborted so that + * they are transparently restartable. This call fails + * if the abort would result in a non-restartable condition. + * Retry is the caller's responsibility. [Meta- + * instructions consist of the basic traps for IPC + * (e.g., msg_send, msg_receive) and self-identification + * (e.g., task_self, thread_self, thread_reply). Calls + * described by MiG interfaces are not meta-instructions + * themselves.] + */ +routine thread_abort_safely( + target_act : thread_act_t); + + +routine thread_depress_abort( + thread : thread_act_t); + + +/* + * Returns the current value of the selected special port + * associated with the target thread. + */ +routine thread_get_special_port( + thr_act : thread_act_t; + which_port : int; + out special_port : mach_port_t); + +/* + * Set one of the special ports associated with the + * target thread. + */ +routine thread_set_special_port( + thr_act : thread_act_t; + which_port : int; + special_port : mach_port_t); + +/* + * Returns information about the target thread. + */ +routine thread_info( + target_act : thread_act_t; + flavor : thread_flavor_t; + out thread_info_out : thread_info_t, CountInOut); + +/* + * Set an exception handler for a thread on one or more exception types + */ +routine thread_set_exception_ports( + thread : thread_act_t; + exception_mask : exception_mask_t; + new_port : mach_port_t; + behavior : exception_behavior_t; + new_flavor : thread_state_flavor_t); + +/* + * Lookup some of the old exception handlers for a thread + */ +routine thread_get_exception_ports( + thread : thread_act_t; + exception_mask : exception_mask_t; + out masks : exception_mask_array_t; + out old_handlers : exception_handler_array_t, SameCount; + out old_behaviors : exception_behavior_array_t, SameCount; + out old_flavors : exception_flavor_array_t, SameCount); + +/* + * Set an exception handler for a thread on one or more exception types. + * At the same time, return the previously defined exception handlers for + * those types. + */ +routine thread_swap_exception_ports( + thread : thread_act_t; + exception_mask : exception_mask_t; + new_port : mach_port_t; + behavior : exception_behavior_t; + new_flavor : thread_state_flavor_t; + out masks : exception_mask_array_t; + out old_handlers : exception_handler_array_t, SameCount; + out old_behaviors : exception_behavior_array_t, SameCount; + out old_flavors : exception_flavor_array_t, SameCount); + +/* + * OBSOLETE interface. + */ +routine thread_policy( + thr_act : thread_act_t; + policy : policy_t; + base : policy_base_t; + set_limit : boolean_t); + +/* + * Set/get policy information for a thread. + * (Approved Mac OS X microkernel interface) + */ + +routine thread_policy_set( + thread : thread_act_t; + flavor : thread_policy_flavor_t; + policy_info : thread_policy_t); + +routine thread_policy_get( + thread : thread_act_t; + flavor : thread_policy_flavor_t; +out policy_info : thread_policy_t, CountInOut; +inout get_default : boolean_t); + +/* + * Thread profiling + * This call is only valid for MACH_PROF compiled kernels. + * Otherwise, KERN_FAILURE is returned. + */ +routine thread_sample( + thread : thread_act_t; + reply : mach_port_make_send_t); + +/* + * JMM - Keep etap and processor_set related things at the end + * because they are likely to be removed. + */ +/* + * Sets the ETAP trace status of the target thread. + */ +routine etap_trace_thread( + target_act : thread_act_t; + trace_status : boolean_t); + +/* + * Assign thread to processor set. + */ +routine thread_assign( + thread : thread_act_t; + new_set : processor_set_t); + +/* + * Assign thread to default set. + */ +routine thread_assign_default( + thread : thread_act_t); + +/* + * Get current assignment for thread. + */ +routine thread_get_assignment( + thread : thread_act_t; + out assigned_set : processor_set_name_t); + + +/* + * OBSOLETE interface. + */ +routine thread_set_policy( + thr_act : thread_act_t; + pset : processor_set_t; + policy : policy_t; + base : policy_base_t; + limit : policy_limit_t); diff --git a/osfmk/mach/thread_info.h b/osfmk/mach/thread_info.h new file mode 100644 index 000000000..e55723193 --- /dev/null +++ b/osfmk/mach/thread_info.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.17.3 1995/01/06 19:52:05 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup, flavor typedefs + * [1994/10/14 03:43:17 dwm] + * + * Revision 1.2.17.2 1994/09/23 02:43:16 ezf + * change marker to not FREE + * [1994/09/22 21:43:12 ezf] + * + * Revision 1.2.17.1 1994/08/07 20:50:11 bolinger + * Merge up to colo_b7. + * [1994/08/01 21:02:09 bolinger] + * + * Revision 1.2.13.3 1994/06/25 03:47:23 dwm + * mk6 CR98 - add flavor interface typedefs. + * [1994/06/24 21:55:01 dwm] + * + * Revision 1.2.13.2 1994/05/02 21:36:08 dwm + * Remove nmk15_compat support. + * [1994/05/02 21:09:13 dwm] + * + * Revision 1.2.13.1 1994/01/12 17:57:31 dwm + * Fix "ifdef" NMK15_COMPAT to "if" + * [1994/01/12 17:31:16 dwm] + * + * Revision 1.2.3.5 1993/08/03 18:29:54 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 18:56:17 gm] + * + * Revision 1.2.3.4 1993/07/08 19:04:54 watkins + * New version of thread_basic_info structure; old version + * is now under nmk15_compat. + * [1993/07/07 21:04:15 watkins] + * + * Revision 1.2.3.3 1993/06/29 21:55:52 watkins + * New definitions for scheduling control interfaces. + * [1993/06/29 20:51:04 watkins] + * + * Revision 1.2.3.2 1993/06/09 02:43:43 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:25 jeffc] + * + * Revision 1.2 1993/04/19 16:39:43 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:07 david] + * + * Revision 1.1 1992/09/30 02:32:13 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 17:01:06 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:36:34 mrt + * Changed to new Mach copyright + * [91/02/01 17:21:39 mrt] + * + * Revision 2.2 90/06/02 15:00:08 rpd + * Updated for new scheduling info. + * [90/03/26 22:40:55 rpd] + * + * Revision 2.1 89/08/03 16:06:07 rwd + * Created. + * + * Revision 2.4 89/02/25 18:41:18 gm0w + * Changes for cleanup. + * + * 4-Mar-88 David Black (dlb) at Carnegie-Mellon University + * Added TH_USAGE_SCALE for cpu_usage field. + * + * 15-Jan-88 David Golub (dbg) at Carnegie-Mellon University + * Changed to generic interface (variable-length array) to allow + * for expansion. Renamed to thread_info. + * + * 1-Jun-87 Avadis Tevanian (avie) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/thread_info + * + * Thread information structure and definitions. + * + * The defintions in this file are exported to the user. The kernel + * will translate its internal data structures to these structures + * as appropriate. + * + */ + +#ifndef _MACH_THREAD_INFO_H_ +#define _MACH_THREAD_INFO_H_ + +#include +#include +#include +#include + +/* + * Generic information structure to allow for expansion. + */ +typedef natural_t thread_flavor_t; +typedef integer_t *thread_info_t; /* varying array of int */ + +#define THREAD_INFO_MAX (1024) /* maximum array size */ +typedef integer_t thread_info_data_t[THREAD_INFO_MAX]; + +/* + * Currently defined information. + */ +#define THREAD_BASIC_INFO 3 /* basic information */ + +struct thread_basic_info { + time_value_t user_time; /* user run time */ + time_value_t system_time; /* system run time */ + integer_t cpu_usage; /* scaled cpu usage percentage */ + policy_t policy; /* scheduling policy in effect */ + integer_t run_state; /* run state (see below) */ + integer_t flags; /* various flags (see below) */ + integer_t suspend_count; /* suspend count for thread */ + integer_t sleep_time; /* number of seconds that thread + has been sleeping */ +}; + +typedef struct thread_basic_info thread_basic_info_data_t; +typedef struct thread_basic_info *thread_basic_info_t; +#define THREAD_BASIC_INFO_COUNT \ + (sizeof(thread_basic_info_data_t) / sizeof(natural_t)) + +/* + * Scale factor for usage field. + */ + +#define TH_USAGE_SCALE 1000 + +/* + * Thread run states (state field). + */ + +#define TH_STATE_RUNNING 1 /* thread is running normally */ +#define TH_STATE_STOPPED 2 /* thread is stopped */ +#define TH_STATE_WAITING 3 /* thread is waiting normally */ +#define TH_STATE_UNINTERRUPTIBLE 4 /* thread is in an uninterruptible + wait */ +#define TH_STATE_HALTED 5 /* thread is halted at a + clean point */ + +/* + * Thread flags (flags field). + */ +#define TH_FLAGS_SWAPPED 0x1 /* thread is swapped out */ +#define TH_FLAGS_IDLE 0x2 /* thread is an idle thread */ + +#define THREAD_SCHED_TIMESHARE_INFO 10 +#define THREAD_SCHED_RR_INFO 11 +#define THREAD_SCHED_FIFO_INFO 12 + +#endif /* _MACH_THREAD_INFO_H_ */ diff --git a/osfmk/mach/thread_policy.h b/osfmk/mach/thread_policy.h new file mode 100644 index 000000000..3f2e29cad --- /dev/null +++ b/osfmk/mach/thread_policy.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * + * 10 October 2000 (debo) + * Created. + * + * 30 November 2000 (debo) + * Final resolution of review feedback. + */ + +#ifndef _MACH_THREAD_POLICY_H_ +#define _MACH_THREAD_POLICY_H_ + +#include + +/* + * These are the calls for accessing the policy parameters + * of a particular thread. + * + * The extra 'get_default' parameter to the second call is + * IN/OUT as follows: + * 1) if asserted on the way in it indicates that the default + * values should be returned, not the ones currently set, in + * this case 'get_default' will always be asserted on return; + * 2) if unasserted on the way in, the current settings are + * desired and if still unasserted on return, then the info + * returned reflects the current settings, otherwise if + * 'get_default' returns asserted, it means that there are no + * current settings due to other parameters taking precedence, + * and the default ones are being returned instead. + */ + +typedef natural_t thread_policy_flavor_t; +typedef integer_t *thread_policy_t; + +/* +kern_return_t thread_policy_set( + thread_act_t thread, + thread_policy_flavor_t flavor, + thread_policy_t policy_info, + mach_msg_type_number_t count); + +kern_return_t thread_policy_get( + thread_act_t thread, + thread_policy_flavor_t flavor, + thread_policy_t policy_info, + mach_msg_type_number_t *count, + boolean_t *get_default); +*/ + +/* + * Defined flavors. + */ +/* + * THREAD_STANDARD_POLICY: + * + * This is the standard [fair] scheduling mode, assigned to new + * threads. Specifying it explicitly is not typically required, + * but may be used to return a thread to the default mode setting. + * + * Parameters: + * [none] + */ + +#define THREAD_STANDARD_POLICY 1 + +struct thread_standard_policy { + /* no data */ +}; + +typedef struct thread_standard_policy thread_standard_policy_data_t; +typedef struct thread_standard_policy *thread_standard_policy_t; + +#define THREAD_STANDARD_POLICY_COUNT \ + (sizeof (thread_standard_policy_data_t) / sizeof (integer_t)) + +/* + * THREAD_TIME_CONSTRAINT_POLICY: + * + * This scheduling mode is for threads which have real time + * constraints on their execution. + * + * Parameters: + * + * period: This is the nominal amount of time between separate + * processing arrivals, specified in absolute time units. A + * value of 0 indicates that there is no inherent periodicity in + * the computation. + * + * computation: This is the nominal amount of computation + * time needed during a separate processing arrival, specified + * in absolute time units. + * + * constraint: This is the maximum amount of real time that + * may elapse from the start of a separate processing arrival + * to the end of computation for logically correct functioning, + * specified in absolute time units. Must be (>= computation). + * Note that latency = (constraint - computation). + * + * preemptible: This indicates that the computation may be + * interrupted, subject to the constraint specified above. + */ + +#define THREAD_TIME_CONSTRAINT_POLICY 2 + +struct thread_time_constraint_policy { + natural_t period; + natural_t computation; + natural_t constraint; + boolean_t preemptible; +}; + +typedef struct thread_time_constraint_policy \ + thread_time_constraint_policy_data_t; +typedef struct thread_time_constraint_policy \ + *thread_time_constraint_policy_t; + +#define THREAD_TIME_CONSTRAINT_POLICY_COUNT \ + (sizeof (thread_time_constraint_policy_data_t) / sizeof (integer_t)) + +/* + * THREAD_PRECEDENCE_POLICY: + * + * This may be used to indicate the relative value of the + * computation compared to the other threads in the task. + * + * Parameters: + * + * importance: The importance is specified as a signed value. + */ + +#define THREAD_PRECEDENCE_POLICY 3 + +struct thread_precedence_policy { + integer_t importance; +}; + +typedef struct thread_precedence_policy thread_precedence_policy_data_t; +typedef struct thread_precedence_policy *thread_precedence_policy_t; + +#define THREAD_PRECEDENCE_POLICY_COUNT \ + (sizeof (thread_precedence_policy_data_t) / sizeof (integer_t)) + +#endif /* _MACH_THREAD_POLICY_H_ */ diff --git a/osfmk/mach/thread_special_ports.h b/osfmk/mach/thread_special_ports.h new file mode 100644 index 000000000..da895b69a --- /dev/null +++ b/osfmk/mach/thread_special_ports.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 02:43:25 ezf + * change marker to not FREE + * [1994/09/22 21:43:16 ezf] + * + * Revision 1.2.2.5 1993/09/03 15:53:56 jeffc + * CR9255 - Remove MACH_EXC_COMPAT + * [1993/08/26 15:57:00 jeffc] + * + * Revision 1.2.2.4 1993/08/05 19:09:47 jeffc + * CR9508 - Delete dead code. Remove MACH_IPC_COMPAT + * [1993/08/03 17:09:33 jeffc] + * + * Revision 1.2.2.3 1993/08/03 18:29:57 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 18:56:38 gm] + * + * Revision 1.2.2.2 1993/06/09 02:43:48 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:28 jeffc] + * + * Revision 1.2 1993/04/19 16:39:50 devrcs + * make endif tags ansi compliant/include files + * [1993/02/20 21:45:05 david] + * + * Revision 1.1 1992/09/30 02:32:15 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4.2.1 92/03/03 16:22:38 jeffreyh + * Changes from TRUNK + * [92/02/26 12:20:46 jeffreyh] + * + * Revision 2.5 92/01/15 13:44:57 rpd + * Changed MACH_IPC_COMPAT conditionals to default to not present. + * + * Revision 2.4 91/05/14 17:01:15 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:36:38 mrt + * Changed to new Mach copyright + * [91/02/01 17:21:48 mrt] + * + * Revision 2.2 90/06/02 15:00:15 rpd + * Converted to new IPC. + * [90/03/26 22:41:20 rpd] + * + * Revision 2.1 89/08/03 16:06:13 rwd + * Created. + * + * Revision 2.3 89/02/25 18:41:23 gm0w + * Changes for cleanup. + * + * 17-Jan-88 David Golub (dbg) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/thread_special_ports.h + * + * Defines codes for special_purpose thread ports. These are NOT + * port identifiers - they are only used for the thread_get_special_port + * and thread_set_special_port routines. + * + */ + +#ifndef _MACH_THREAD_SPECIAL_PORTS_H_ +#define _MACH_THREAD_SPECIAL_PORTS_H_ + +#define THREAD_KERNEL_PORT 1 /* Represents the thread to the outside + world.*/ + +/* + * Definitions for ease of use + */ + +#define thread_get_kernel_port(thread, port) \ + (thread_get_special_port((thread), THREAD_KERNEL_PORT, (port))) + +#define thread_set_kernel_port(thread, port) \ + (thread_set_special_port((thread), THREAD_KERNEL_PORT, (port))) + +#endif /* _MACH_THREAD_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/thread_status.h b/osfmk/mach/thread_status.h new file mode 100644 index 000000000..1e2940d48 --- /dev/null +++ b/osfmk/mach/thread_status.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * Revision 1.2.14.4 1995/01/06 19:52:10 devrcs + * mk6 CR668 - 1.3b26 merge + * [1994/10/14 03:43:20 dwm] + * + * Revision 1.2.14.3 1994/09/23 02:43:32 ezf + * change marker to not FREE + * [1994/09/22 21:43:20 ezf] + * + * Revision 1.2.14.2 1994/08/07 20:50:16 bolinger + * Merge up to colo_b7. + * [1994/08/01 21:02:13 bolinger] + * + * Revision 1.2.14.1 1994/06/26 22:59:09 bolinger + * Temporary patch to enable thread state large enough to suit 860. + * [1994/06/26 22:55:25 bolinger] + * + * Revision 1.2.11.2 1994/06/25 03:47:26 dwm + * mk6 CR98 - use new MD THREAD_STATE_MAX + * [1994/06/24 21:55:03 dwm] + * + * Revision 1.2.11.1 1993/12/10 19:37:05 dwm + * Re-hack of workaround: KERNEL_STACK_SIZE back to 1 page; + * lower THREAD_STATE_MAX to 64 ints instead. + * [1993/12/10 19:36:37 dwm] + * + * Revision 1.2.3.3 1993/08/03 18:54:25 gm + * CR9600: Change thread_state_flavor_t typedef from unsigned int to int. + * [1993/08/02 18:57:55 gm] + * + * Revision 1.2.3.2 1993/06/09 02:43:53 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:31 jeffc] + * + * Revision 1.2 1993/04/19 16:39:58 devrcs + * ansi C conformance changes + * [1993/02/02 18:54:42 david] + * + * Add new thread_state types. [sp@gr.osf.org] + * [1992/12/23 13:12:08 david] + * + * Revision 1.1 1992/09/30 02:32:17 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 17:01:22 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:36:42 mrt + * Changed to new Mach copyright + * [91/02/01 17:21:56 mrt] + * + * Revision 2.1 89/08/03 16:06:18 rwd + * Created. + * + * Revision 2.4 89/02/25 18:41:29 gm0w + * Changes for cleanup. + * + * Revision 2.3 89/02/07 00:53:47 mwyoung + * Relocated from mach/thread_status.h + * + * Revision 2.2 88/08/25 18:21:12 mwyoung + * Adjusted include file references. + * [88/08/16 04:16:13 mwyoung] + * + * Add THREAD_STATE_FLAVOR_LIST; remove old stuff. + * [88/08/11 18:49:48 mwyoung] + * + * + * 15-Jan-88 David Golub (dbg) at Carnegie-Mellon University + * Replaced with variable-length array for flexibile interface. + * + * 28-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University + * Latest hacks to keep MiG happy wrt refarrays. + * + * 27-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/thread_status.h + * Author: Avadis Tevanian, Jr. + * + * This file contains the structure definitions for the user-visible + * thread state. This thread state is examined with the thread_get_state + * kernel call and may be changed with the thread_set_state kernel call. + * + */ + +#ifndef THREAD_STATUS_H_ +#define THREAD_STATUS_H_ + +/* + * The actual structure that comprises the thread state is defined + * in the machine dependent module. + */ +#include +#include +#include + +/* + * Generic definition for machine-dependent thread status. + */ + +typedef natural_t *thread_state_t; /* Variable-length array */ + +/* THREAD_STATE_MAX is now defined in */ +typedef int thread_state_data_t[THREAD_STATE_MAX]; + +#define THREAD_STATE_FLAVOR_LIST 0 /* List of valid flavors */ + +typedef int thread_state_flavor_t; +typedef thread_state_flavor_t *thread_state_flavor_array_t; + +#endif /* THREAD_STATUS_H_ */ diff --git a/osfmk/mach/thread_switch.h b/osfmk/mach/thread_switch.h new file mode 100644 index 000000000..e03383287 --- /dev/null +++ b/osfmk/mach/thread_switch.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_THREAD_SWITCH_H_ +#define _MACH_THREAD_SWITCH_H_ + +#include +#include +#include + +/* + * Constant definitions for thread_switch trap. + */ + +#define SWITCH_OPTION_NONE 0 +#define SWITCH_OPTION_DEPRESS 1 +#define SWITCH_OPTION_WAIT 2 + +#define valid_switch_option(opt) (0 <= (opt) && (opt) <= 2) + +extern kern_return_t thread_switch( + mach_port_name_t thread_name, + int option, + mach_msg_timeout_t option_time); + +#endif /* _MACH_THREAD_SWITCH_H_ */ diff --git a/osfmk/mach/time_value.h b/osfmk/mach/time_value.h new file mode 100644 index 000000000..c4a1aea7a --- /dev/null +++ b/osfmk/mach/time_value.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.11.2 1995/01/06 19:52:17 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:43:25 dwm] + * + * Revision 1.2.11.1 1994/09/23 02:43:49 ezf + * change marker to not FREE + * [1994/09/22 21:43:27 ezf] + * + * Revision 1.2.3.2 1993/06/09 02:44:03 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:38 jeffc] + * + * Revision 1.2 1993/04/19 16:40:14 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:30 david] + * + * Revision 1.1 1992/09/30 02:32:21 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/18 14:35:13 rpd + * Added mapped_time_value_t. + * [91/03/21 rpd] + * + * Revision 2.3 91/05/14 17:01:40 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:36:49 mrt + * Changed to new Mach copyright + * [91/02/01 17:22:07 mrt] + * + * Revision 2.1 89/08/03 16:06:24 rwd + * Created. + * + * Revision 2.4 89/02/25 18:41:34 gm0w + * Changes for cleanup. + * + * Revision 2.3 89/02/07 00:53:58 mwyoung + * Relocated from sys/time_value.h + * + * Revision 2.2 89/01/31 01:21:58 rpd + * TIME_MICROS_MAX should be 1 Million, not 10 Million. + * [88/10/12 dlb] + * + * 4-Jan-88 David Golub (dbg) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef TIME_VALUE_H_ +#define TIME_VALUE_H_ + +#include + +/* + * Time value returned by kernel. + */ + +struct time_value { + integer_t seconds; + integer_t microseconds; +}; +typedef struct time_value time_value_t; + +/* + * Macros to manipulate time values. Assume that time values + * are normalized (microseconds <= 999999). + */ +#define TIME_MICROS_MAX (1000000) + +#define time_value_add_usec(val, micros) { \ + if (((val)->microseconds += (micros)) \ + >= TIME_MICROS_MAX) { \ + (val)->microseconds -= TIME_MICROS_MAX; \ + (val)->seconds++; \ + } \ +} + +#define time_value_add(result, addend) { \ + (result)->microseconds += (addend)->microseconds; \ + (result)->seconds += (addend)->seconds; \ + if ((result)->microseconds >= TIME_MICROS_MAX) { \ + (result)->microseconds -= TIME_MICROS_MAX; \ + (result)->seconds++; \ + } \ +} + +/* + * Time value available through the mapped-time interface. + * Read this mapped value with + * do { + * secs = mtime->seconds; + * usecs = mtime->microseconds; + * } while (secs != mtime->check_seconds); + */ + +typedef struct mapped_time_value { + integer_t seconds; + integer_t microseconds; + integer_t check_seconds; +} mapped_time_value_t; + +#endif /* TIME_VALUE_H_ */ diff --git a/osfmk/mach/upl.defs b/osfmk/mach/upl.defs new file mode 100644 index 000000000..f642ad3e8 --- /dev/null +++ b/osfmk/mach/upl.defs @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/upl.defs + * + * Abstract: + * Basic Mach external memory management interface declaration. + */ + +subsystem +#if KERNEL_USER + KernelUser +#endif KERNEL_USER +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + upl 2050; + +#ifdef MACH_KERNEL +#include +#endif /* MACH_KERNEL */ + +#include +#include + + +routine vm_object_upl_request( + object :vm_object_entry_t; + in offset :memory_object_offset_t; + in size :vm_size_t; + out upl :mach_port_move_send_t; + out page_list :upl_page_list_ptr_t, CountInOut; + in cntrl_flags :integer_t); + +routine vm_pager_upl_request( + object :vm_object_entry_t; + in offset :memory_object_offset_t; + in size :vm_size_t; + in super_size :vm_size_t; + out upl :mach_port_move_send_t; + out page_list :upl_page_list_ptr_t, CountInOut; + in cntrl_flags :integer_t); + +routine vm_upl_abort( + upl_object :upl_object_entry_t; + in abort_cond :integer_t); + +routine vm_upl_abort_range( + upl_object :upl_object_entry_t; + offset :vm_offset_t; + size :vm_size_t; + in abort_cond :integer_t); + +routine vm_upl_commit( + upl_object :upl_object_entry_t; + in page_list :upl_page_list_ptr_t); + +routine vm_upl_commit_range( + upl_object :upl_object_entry_t; + offset :vm_offset_t; + size :vm_size_t; + in cntrl_flags :integer_t; + in page_list :upl_page_list_ptr_t); diff --git a/osfmk/mach/vm_attributes.h b/osfmk/mach/vm_attributes.h new file mode 100644 index 000000000..6782a5324 --- /dev/null +++ b/osfmk/mach/vm_attributes.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.9.2 1998/02/02 09:22:22 gdt + * Add new function "MATTR_VAL_GET_INFO" to get shared/resident information + * about a page. This lets Linux display better statistics (e.g. 'top'). + * [1998/02/02 09:21:30 gdt] + * + * Revision 1.2.9.1 1997/09/12 17:16:06 stephen + * Add new MATTR_VAL_CACHE_SYNC which + * syncs I+D caches without necessarily + * flushing them. + * [1997/09/12 16:32:45 stephen] + * + * Revision 1.2.6.1 1994/09/23 02:43:58 ezf + * change marker to not FREE + * [1994/09/22 21:43:31 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:44:08 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:41 jeffc] + * + * Revision 1.2 1993/04/19 16:40:21 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:22 david] + * + * Revision 1.1 1992/09/30 02:32:22 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 17:02:37 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:37:24 mrt + * Changed to new Mach copyright + * [91/02/01 17:22:17 mrt] + * + * Revision 2.2 90/01/22 23:05:53 af + * Created. + * [89/12/08 af] + * + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/vm_attributes.h + * Author: Alessandro Forin + * + * Virtual memory attributes definitions. + * + * These definitions are in addition to the machine-independent + * ones (e.g. protection), and are only selectively supported + * on specific machine architectures. + * + */ + +#ifndef VM_ATTRIBUTES_H_ +#define VM_ATTRIBUTES_H_ + +/* + * Types of machine-dependent attributes + */ +typedef unsigned int vm_machine_attribute_t; + +#define MATTR_CACHE 1 /* cachability */ +#define MATTR_MIGRATE 2 /* migrability */ +#define MATTR_REPLICATE 4 /* replicability */ + +/* + * Values for the above, e.g. operations on attribute + */ +typedef int vm_machine_attribute_val_t; + +#define MATTR_VAL_OFF 0 /* (generic) turn attribute off */ +#define MATTR_VAL_ON 1 /* (generic) turn attribute on */ +#define MATTR_VAL_GET 2 /* (generic) return current value */ + +#define MATTR_VAL_CACHE_FLUSH 6 /* flush from all caches */ +#define MATTR_VAL_DCACHE_FLUSH 7 /* flush from data caches */ +#define MATTR_VAL_ICACHE_FLUSH 8 /* flush from instruction caches */ +#define MATTR_VAL_CACHE_SYNC 9 /* sync I+D caches */ +#define MATTR_VAL_CACHE_SYNC 9 /* sync I+D caches */ + +#define MATTR_VAL_GET_INFO 10 /* get page info (stats) */ + +#endif /* VM_ATTRIBUTES_H_ */ diff --git a/osfmk/mach/vm_behavior.h b/osfmk/mach/vm_behavior.h new file mode 100644 index 000000000..1fe81758c --- /dev/null +++ b/osfmk/mach/vm_behavior.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.10.2 1994/09/23 02:44:05 ezf + * change marker to not FREE + * [1994/09/22 21:43:34 ezf] + * + * Revision 1.1.10.1 1994/08/07 20:50:20 bolinger + * Import colo_shared revision into NMK18. + * [1994/08/02 16:41:42 bolinger] + * + * Revision 1.1.8.1 1994/07/08 20:09:11 dwm + * mk6 CR227 - bring vm_behavior constants up to spec. + * [1994/07/08 20:02:37 dwm] + * + * Revision 1.1.4.3 1993/09/17 21:35:31 robert + * change marker to OSF_FREE_COPYRIGHT + * [1993/09/17 21:28:53 robert] + * + * Revision 1.1.4.2 1993/06/04 15:14:02 jeffc + * CR9193 - MK5.0 merge. + * [1993/06/04 13:54:38 jeffc] + * + * Revision 3.0 92/12/31 22:14:09 ede + * Initial revision for OSF/1 R1.3 + * + * $EndLog$ + */ +/* + * File: mach/vm_behavior.h + * + * Virtual memory map behavior definitions. + * + */ + +#ifndef _MACH_VM_BEHAVIOR_H_ +#define _MACH_VM_BEHAVIOR_H_ + +/* + * Types defined: + * + * vm_behavior_t behavior codes. + */ + +typedef int vm_behavior_t; + +/* + * Enumeration of valid values for vm_behavior_t. + * These describe expected page reference behavior for + * for a given range of virtual memory. For implementation + * details see vm/vm_fault.c + */ + + +#define VM_BEHAVIOR_DEFAULT ((vm_behavior_t) 0) /* default */ +#define VM_BEHAVIOR_RANDOM ((vm_behavior_t) 1) /* random */ +#define VM_BEHAVIOR_SEQUENTIAL ((vm_behavior_t) 2) /* forward sequential */ +#define VM_BEHAVIOR_RSEQNTL ((vm_behavior_t) 3) /* reverse sequential */ + +#endif /*_MACH_VM_BEHAVIOR_H_*/ diff --git a/osfmk/mach/vm_inherit.h b/osfmk/mach/vm_inherit.h new file mode 100644 index 000000000..944c852b5 --- /dev/null +++ b/osfmk/mach/vm_inherit.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.9.1 1994/09/23 02:44:12 ezf + * change marker to not FREE + * [1994/09/22 21:43:38 ezf] + * + * Revision 1.2.6.1 1994/02/17 16:25:09 rwd + * Add VM_INHERIT_LAST_VALID + * [94/02/16 rwd] + * + * Revision 1.2.2.2 1993/06/09 02:44:13 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:44 jeffc] + * + * Revision 1.2 1993/04/19 16:40:30 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:45 david] + * + * Revision 1.1 1992/09/30 02:32:24 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 17:02:47 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:37:31 mrt + * Changed to new Mach copyright + * [91/02/01 17:22:24 mrt] + * + * Revision 2.1 89/08/03 16:06:30 rwd + * Created. + * + * Revision 2.3 89/02/25 18:42:18 gm0w + * Changes for cleanup. + * + * 16-Sep-85 Avadis Tevanian (avie) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/vm_inherit.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * Virtual memory map inheritance definitions. + * + */ + +#ifndef VM_INHERIT_H_ +#define VM_INHERIT_H_ + +/* + * Types defined: + * + * vm_inherit_t inheritance codes. + */ + +typedef unsigned int vm_inherit_t; /* might want to change this */ + +/* + * Enumeration of valid values for vm_inherit_t. + */ + +#define VM_INHERIT_SHARE ((vm_inherit_t) 0) /* share with child */ +#define VM_INHERIT_COPY ((vm_inherit_t) 1) /* copy into child */ +#define VM_INHERIT_NONE ((vm_inherit_t) 2) /* absent from child */ +#define VM_INHERIT_DONATE_COPY ((vm_inherit_t) 3) /* copy and delete */ + +#define VM_INHERIT_DEFAULT VM_INHERIT_COPY +#define VM_INHERIT_LAST_VALID VM_INHERIT_NONE + +#endif /* VM_INHERIT_H_ */ diff --git a/osfmk/mach/vm_map.defs b/osfmk/mach/vm_map.defs new file mode 100644 index 000000000..682d98577 --- /dev/null +++ b/osfmk/mach/vm_map.defs @@ -0,0 +1,426 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/mach_port.defs + * Author: Rich Draves + * + * Exported kernel calls. + */ + +subsystem +#if KERNEL_SERVER + KernelServer +#endif KERNEL_SERVER + vm_map 3800; + +#include +#include +#include + +/* + * Returns information about the contents of the virtual + * address space of the target task at the specified + * address. The returned protection, inheritance, sharing + * and memory object values apply to the entire range described + * by the address range returned; the memory object offset + * corresponds to the beginning of the address range. + * [If the specified address is not allocated, the next + * highest address range is described. If no addresses beyond + * the one specified are allocated, the call returns KERN_NO_SPACE.] + */ +routine vm_region( + target_task : vm_map_t; + inout address : vm_address_t; + out size : vm_size_t; + flavor : vm_region_flavor_t; + out info : vm_region_info_t, CountInOut; + out object_name : memory_object_name_t = + MACH_MSG_TYPE_MOVE_SEND + ctype: mach_port_t); + +/* + * Allocate zero-filled memory in the address space + * of the target task, either at the specified address, + * or wherever space can be found (if anywhere is TRUE), + * of the specified size. The address at which the + * allocation actually took place is returned. + */ +routine vm_allocate( + target_task : vm_task_entry_t; + inout address : vm_address_t; + size : vm_size_t; + flags : int); + +/* + * Deallocate the specified range from the virtual + * address space of the target task. + */ +routine vm_deallocate( + target_task : vm_task_entry_t; + address : vm_address_t; + size : vm_size_t); + +/* + * Set the current or maximum protection attribute + * for the specified range of the virtual address + * space of the target task. The current protection + * limits the memory access rights of threads within + * the task; the maximum protection limits the accesses + * that may be given in the current protection. + * Protections are specified as a set of {read, write, execute} + * *permissions*. + */ +routine vm_protect( + target_task : vm_task_entry_t; + address : vm_address_t; + size : vm_size_t; + set_maximum : boolean_t; + new_protection : vm_prot_t); + +/* + * Set the inheritance attribute for the specified range + * of the virtual address space of the target task. + * The inheritance value is one of {none, copy, share}, and + * specifies how the child address space should acquire + * this memory at the time of a task_create call. + */ +routine vm_inherit( + target_task : vm_task_entry_t; + address : vm_address_t; + size : vm_size_t; + new_inheritance : vm_inherit_t); + +/* + * Returns the contents of the specified range of the + * virtual address space of the target task. [The + * range must be aligned on a virtual page boundary, + * and must be a multiple of pages in extent. The + * protection on the specified range must permit reading.] + */ +routine vm_read( + target_task : vm_map_t; + address : vm_address_t; + size : vm_size_t; + out data : pointer_t); + +/* + * List corrollary to vm_read, returns mapped contents of specified + * ranges within target address space. + */ +routine vm_read_list( + target_task : vm_map_t; + inout data_list : vm_read_entry_t; + count : natural_t); + +/* + * Writes the contents of the specified range of the + * virtual address space of the target task. [The + * range must be aligned on a virtual page boundary, + * and must be a multiple of pages in extent. The + * protection on the specified range must permit writing.] + */ +routine vm_write( + target_task : vm_map_t; + address : vm_address_t; + data : pointer_t); + +/* + * Copy the contents of the source range of the virtual + * address space of the target task to the destination + * range in that same address space. [Both of the + * ranges must be aligned on a virtual page boundary, + * and must be multiples of pages in extent. The + * protection on the source range must permit reading, + * and the protection on the destination range must + * permit writing.] + */ +routine vm_copy( + target_task : vm_map_t; + source_address : vm_address_t; + size : vm_size_t; + dest_address : vm_address_t); + +/* + * Returns the contents of the specified range of the + * virtual address space of the target task. [There + * are no alignment restrictions, and the results will + * overwrite the area pointed to by data - which must + * already exist. The protection on the specified range + * must permit reading.] + */ +routine vm_read_overwrite( + target_task : vm_map_t; + address : vm_address_t; + size : vm_size_t; + data : vm_address_t; + out outsize : vm_size_t); + + +routine vm_msync( + target_task : vm_map_t; + address : vm_address_t; + size : vm_size_t; + sync_flags : vm_sync_t ); + +/* + * Set the paging behavior attribute for the specified range + * of the virtual address space of the target task. + * The behavior value is one of {default, random, forward + * sequential, reverse sequential} and indicates the expected + * page reference pattern for the specified range. + */ +routine vm_behavior_set( + target_task : vm_map_t; + address : vm_address_t; + size : vm_size_t; + new_behavior : vm_behavior_t); + + +/* + * Map a user-defined memory object into the virtual address + * space of the target task. If desired (anywhere is TRUE), + * the kernel will find a suitable address range of the + * specified size; else, the specific address will be allocated. + * + * The beginning address of the range will be aligned on a virtual + * page boundary, be at or beyond the address specified, and + * meet the mask requirements (bits turned on in the mask must not + * be turned on in the result); the size of the range, in bytes, + * will be rounded up to an integral number of virtual pages. + * + * The memory in the resulting range will be associated with the + * specified memory object, with the beginning of the memory range + * referring to the specified offset into the memory object. + * + * The mapping will take the current and maximum protections and + * the inheritance attributes specified; see the vm_protect and + * vm_inherit calls for a description of these attributes. + * + * If desired (copy is TRUE), the memory range will be filled + * with a copy of the data from the memory object; this copy will + * be private to this mapping in this target task. Otherwise, + * the memory in this mapping will be shared with other mappings + * of the same memory object at the same offset (in this task or + * in other tasks). [The Mach kernel only enforces shared memory + * consistency among mappings on one host with similar page alignments. + * The user-defined memory manager for this object is responsible + * for further consistency.] + */ +routine vm_map( + target_task : vm_task_entry_t; + inout address : vm_address_t; + size : vm_size_t; + mask : vm_address_t; + flags : int; + memory_object : memory_object_t; + offset : vm_offset_t; + copy : boolean_t; + cur_protection : vm_prot_t; + max_protection : vm_prot_t; + inheritance : vm_inherit_t); + +/* + * Set/Get special properties of memory associated + * to some virtual address range, such as cachability, + * migrability, replicability. Machine-dependent. + */ +routine vm_machine_attribute( + target_task : vm_map_t; + address : vm_address_t; + size : vm_size_t; + attribute : vm_machine_attribute_t; + inout value : vm_machine_attribute_val_t); + +/* + * Map portion of a task's address space. + */ +routine vm_remap( + target_task : vm_map_t; + inout target_address : vm_address_t; + size : vm_size_t; + mask : vm_address_t; + anywhere : boolean_t; + src_task : vm_map_t; + src_address : vm_address_t; + copy : boolean_t; + out cur_protection : vm_prot_t; + out max_protection : vm_prot_t; + inheritance : vm_inherit_t); + +/* + * Require that all future virtual memory allocation + * allocates wired memory. Setting must_wire to FALSE + * disables the wired future feature. + */ +routine task_wire( + target_task : vm_map_t; + must_wire : boolean_t); + + +/* + * Allow application level processes to create named entries which + * correspond to mapped portions of their address space. These named + * entries can then be manipulated, shared with other processes in + * other address spaces and ultimately mapped in ohter address spaces + */ + +routine mach_make_memory_entry( + target_task :vm_map_t; + inout size :vm_size_t; + offset :vm_offset_t; + permission :vm_prot_t; + out object_handle :mach_port_move_send_t; + parent_entry :mem_entry_name_port_t); + +/* + * Give the caller information on the given location in a virtual + * address space. If a page is mapped return ref and dirty info. + */ +routine vm_map_page_query( + target_map :vm_map_t; + offset :vm_offset_t; + out disposition :integer_t; + out ref_count :integer_t); + +/* + * Returns information about a region of memory. + * Includes info about the chain of objects rooted at that region. + * Only available in MACH_VM_DEBUG compiled kernels, + * otherwise returns KERN_FAILURE. + */ +routine mach_vm_region_info( + task : vm_map_t; + address : vm_address_t; + out region : vm_info_region_t; + out objects : vm_info_object_array_t); + +routine vm_mapped_pages_info( + task : vm_map_t; + out pages : page_address_array_t); + +/* + * Allow application level processes to create named entries which + * are backed by sub-maps which describe regions of address space. + * These regions of space can have objects mapped into them and + * in turn, can be mapped into target address spaces + */ + + +routine vm_region_object_create( + target_task :vm_map_t; + in size :vm_size_t; + out region_object :mach_port_move_send_t); + +/* + * A recursive form of vm_region which probes submaps withint the + * address space. + */ +routine vm_region_recurse( + target_task : vm_map_t; + inout address : vm_address_t; + out size : vm_size_t; + inout nesting_depth : natural_t; + out info : vm_region_recurse_info_t,CountInOut); + + +/* + * The routines below are temporary, meant for transitional use + * as their counterparts are moved from 32 to 64 bit data path + */ + + +routine vm_region_recurse_64( + target_task : vm_map_t; + inout address : vm_address_t; + out size : vm_size_t; + inout nesting_depth : natural_t; + out info : vm_region_recurse_info_64_t,CountInOut); + +routine mach_vm_region_info_64( + task : vm_map_t; + address : vm_address_t; + out region : vm_info_region_64_t; + out objects : vm_info_object_array_t); + +routine vm_region_64( + target_task : vm_map_t; + inout address : vm_address_t; + out size : vm_size_t; + flavor : vm_region_flavor_t; + out info : vm_region_info_64_t, CountInOut; + out object_name : memory_object_name_t = + MACH_MSG_TYPE_MOVE_SEND + ctype: mach_port_t); + +routine mach_make_memory_entry_64( + target_task :vm_map_t; + inout size :memory_object_size_t; + offset :memory_object_offset_t; + permission :vm_prot_t; + out object_handle :mach_port_move_send_t; + parent_entry :mem_entry_name_port_t); + + + +routine vm_map_64( + target_task : vm_task_entry_t; + inout address : vm_address_t; + size : vm_size_t; + mask : vm_address_t; + flags : int; + memory_object : memory_object_t; + offset : memory_object_offset_t; + copy : boolean_t; + cur_protection : vm_prot_t; + max_protection : vm_prot_t; + inheritance : vm_inherit_t); + diff --git a/osfmk/mach/vm_param.h b/osfmk/mach/vm_param.h new file mode 100644 index 000000000..aba1ed3ef --- /dev/null +++ b/osfmk/mach/vm_param.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/vm_param.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Machine independent virtual memory parameters. + * + */ + +#ifndef _MACH_VM_PARAM_H_ +#define _MACH_VM_PARAM_H_ + +#ifndef KERNEL_PRIVATE + +#error YOU HAVE MADE A MISTAKE BY INCLUDING THIS FILE; +#error +#error THIS FILE SHOULD NOT BE VISIBLE TO USER PROGRAMS. +#error +#error USE TO GET MACHINE-DEPENDENT ADDRESS +#error SPACE AND PAGE SIZE ITEMS. +#error +#error USE TO GET TYPE DECLARATIONS USED IN +#error THE MACH KERNEL INTERFACE. +#error +#error IN ALL PROBABILITY, YOU SHOULD GET ALL OF THE TYPES USED IN THE +#error INTERFACE FROM + +#endif /* KERNEL_PRIVATE */ + +#include +#include + +/* + * The machine independent pages are refered to as PAGES. A page + * is some number of hardware pages, depending on the target machine. + */ + +/* + * All references to the size of a page should be done with PAGE_SIZE + * or PAGE_SHIFT. The fact they are variables is hidden here so that + * we can easily make them constant if we so desire. + */ + +/* + * Regardless whether it is implemented with a constant or a variable, + * the PAGE_SIZE is assumed to be a power of two throughout the + * virtual memory system implementation. + */ + +#ifndef PAGE_SIZE_FIXED +extern vm_size_t page_size; +extern vm_size_t page_mask; +extern int page_shift; + +#define PAGE_SIZE page_size /* pagesize in addr units */ +#define PAGE_SHIFT page_shift /* number of bits to shift for pages */ +#define PAGE_MASK page_mask /* mask for off in page */ + +#define PAGE_SIZE_64 (unsigned long long)page_size /* pagesize in addr units */ +#define PAGE_MASK_64 (unsigned long long)page_mask /* mask for off in page */ +#else /* PAGE_SIZE_FIXED */ +#define PAGE_SIZE 4096 +#define PAGE_SHIFT 12 +#define PAGE_MASK (PAGE_SIZE-1) +#define PAGE_SIZE_64 (unsigned long long)4096 +#define PAGE_MASK_64 (PAGE_SIZE_64-1) +#endif /* PAGE_SIZE_FIXED */ + +#ifndef ASSEMBLER +/* + * Convert addresses to pages and vice versa. + * No rounding is used. + */ + +#define atop(x) (((natural_t)(x)) >> PAGE_SHIFT) +#define ptoa(x) ((vm_offset_t)((x) << PAGE_SHIFT)) + +/* + * Round off or truncate to the nearest page. These will work + * for either addresses or counts. (i.e. 1 byte rounds to 1 page + * bytes. + */ + +#define round_page(x) ((vm_offset_t)((((vm_offset_t)(x)) + PAGE_MASK) & ~PAGE_MASK)) +#define trunc_page(x) ((vm_offset_t)(((vm_offset_t)(x)) & ~PAGE_MASK)) + +#define round_page_64(x) ((unsigned long long)((((unsigned long long)(x)) + PAGE_MASK_64) & ~PAGE_MASK_64)) +#define trunc_page_64(x) ((unsigned long long)(((unsigned long long)(x)) & ~PAGE_MASK_64)) + +/* + * Determine whether an address is page-aligned, or a count is + * an exact page multiple. + */ + +#define page_aligned(x) ((((vm_object_offset_t) (x)) & PAGE_MASK) == 0) + +extern vm_size_t mem_size; /* size of physical memory (bytes) */ + +#endif /* ASSEMBLER */ +#endif /* _MACH_VM_PARAM_H_ */ diff --git a/osfmk/mach/vm_prot.h b/osfmk/mach/vm_prot.h new file mode 100644 index 000000000..9c6678214 --- /dev/null +++ b/osfmk/mach/vm_prot.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.3.8.1 1994/09/23 02:44:31 ezf + * change marker to not FREE + * [1994/09/22 21:43:46 ezf] + * + * Revision 1.3.2.2 1993/06/09 02:44:23 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:51 jeffc] + * + * Revision 1.3 1993/04/19 16:40:48 devrcs + * make endif tags ansi compliant/include files + * [1993/02/20 21:45:25 david] + * + * Revision 1.2 1992/12/07 21:29:28 robert + * integrate any changes below for 14.0 (branch from 13.16 base) + * + * Joseph Barrera (jsb) at Carnegie-Mellon University 05-Aug-92 + * Added VM_PROT_WANTS_COPY to solve copy-call race condition. + * [1992/12/06 20:25:47 robert] + * + * Revision 1.1 1992/09/30 02:32:28 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4.3.1 92/03/03 16:22:41 jeffreyh + * [David L. Black 92/02/22 17:03:43 dlb@osf.org] + * Add no change protection value for memory_object_lock_request. + * + * Revision 2.4 91/05/14 17:03:00 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:37:38 mrt + * Changed to new Mach copyright + * [91/02/01 17:22:39 mrt] + * + * Revision 2.2 90/01/22 23:05:57 af + * Removed execute permission from default protection. + * On the only machine that cares for execute permission (mips) + * this is an expensive liability: it requires keeping + * Icache consistent memory that never contains code. + * [89/12/15 af] + * + * Revision 2.1 89/08/03 16:06:47 rwd + * Created. + * + * Revision 2.3 89/02/25 18:42:29 gm0w + * Changes for cleanup. + * + * 6-Jun-85 Avadis Tevanian (avie) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/vm_prot.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * Virtual memory protection definitions. + * + */ + +#ifndef VM_PROT_H_ +#define VM_PROT_H_ + +/* + * Types defined: + * + * vm_prot_t VM protection values. + */ + +typedef int vm_prot_t; + +/* + * Protection values, defined as bits within the vm_prot_t type + */ + +#define VM_PROT_NONE ((vm_prot_t) 0x00) + +#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */ +#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */ +#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */ + +/* + * The default protection for newly-created virtual memory + */ + +#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE) + +/* + * The maximum privileges possible, for parameter checking. + */ + +#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) + +/* + * An invalid protection value. + * Used only by memory_object_lock_request to indicate no change + * to page locks. Using -1 here is a bad idea because it + * looks like VM_PROT_ALL and then some. + */ +#define VM_PROT_NO_CHANGE ((vm_prot_t) 0x08) + +/* + * When a caller finds that he cannot obtain write permission on a + * mapped entry, the following flag can be used. The entry will + * be made "needs copy" effectively copying the object (using COW), + * and write permission will be added to the maximum protections + * for the associated entry. + */ +#define VM_PROT_COPY ((vm_prot_t) 0x10) + + +/* + * Another invalid protection value. + * Used only by memory_object_data_request upon an object + * which has specified a copy_call copy strategy. It is used + * when the kernel wants a page belonging to a copy of the + * object, and is only asking the object as a result of + * following a shadow chain. This solves the race between pages + * being pushed up by the memory manager and the kernel + * walking down the shadow chain. + */ +#define VM_PROT_WANTS_COPY ((vm_prot_t) 0x10) + +#endif /* VM_PROT_H_ */ diff --git a/osfmk/mach/vm_region.h b/osfmk/mach/vm_region.h new file mode 100644 index 000000000..2f46e4e0d --- /dev/null +++ b/osfmk/mach/vm_region.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/01/16 17:22:27 bolinger + * Import files unchanged from osc1.3b11 into cnmk_shared. + * [1995/01/16 17:20:37 bolinger] + * + * Revision 1.1.3.2 1993/10/05 22:23:22 watkins + * Merge forward. + * [1993/10/05 22:05:05 watkins] + * + * Revision 1.1.1.2 1993/09/28 19:42:50 watkins + * Created to comply with spec. + * + * $EndLog$ + */ +/* + * File: mach/vm_region.h + * + * Define the attributes of a task's memory region + * + */ + +#ifndef _MACH_VM_REGION_H_ +#define _MACH_VM_REGION_H_ + +#include +#include +#include +#include + +/* + * Types defined: + * + * vm_region_info_t memory region attributes + */ + +#define VM_REGION_INFO_MAX (1024) +typedef int *vm_region_info_t; +typedef int *vm_region_info_64_t; +typedef int *vm_region_recurse_info_t; +typedef int *vm_region_recurse_info_64_t; +typedef int vm_region_flavor_t; +typedef int vm_region_info_data_t[VM_REGION_INFO_MAX]; + +#define VM_REGION_BASIC_INFO 10 + +struct vm_region_basic_info_64 { + vm_prot_t protection; + vm_prot_t max_protection; + vm_inherit_t inheritance; + boolean_t shared; + boolean_t reserved; + vm_object_offset_t offset; + vm_behavior_t behavior; + unsigned short user_wired_count; +}; + +typedef struct vm_region_basic_info_64 *vm_region_basic_info_64_t; +typedef struct vm_region_basic_info_64 vm_region_basic_info_data_64_t; + +#define VM_REGION_BASIC_INFO_COUNT_64 \ + (sizeof(vm_region_basic_info_data_64_t)/sizeof(int)) + + +struct vm_region_basic_info { + vm_prot_t protection; + vm_prot_t max_protection; + vm_inherit_t inheritance; + boolean_t shared; + boolean_t reserved; +#ifdef soon + vm_object_offset_t offset; +#else + vm_offset_t offset; +#endif + vm_behavior_t behavior; + unsigned short user_wired_count; +}; + +typedef struct vm_region_basic_info *vm_region_basic_info_t; +typedef struct vm_region_basic_info vm_region_basic_info_data_t; + +#define VM_REGION_BASIC_INFO_COUNT \ + (sizeof(vm_region_basic_info_data_t)/sizeof(int)) + + + +#define VM_REGION_EXTENDED_INFO 11 + +#define SM_COW 1 +#define SM_PRIVATE 2 +#define SM_EMPTY 3 +#define SM_SHARED 4 +#define SM_TRUESHARED 5 +#define SM_PRIVATE_ALIASED 6 +#define SM_SHARED_ALIASED 7 + +/* + * For submap info, the SM flags above are overlayed when a submap + * is encountered. The field denotes whether or not machine level mapping + * information is being shared. PTE's etc. When such sharing is taking + * place the value returned is SM_TRUESHARED otherwise SM_PRIVATE is passed + * back. + */ + + +struct vm_region_extended_info { + vm_prot_t protection; + unsigned int user_tag; + unsigned int pages_resident; + unsigned int pages_shared_now_private; + unsigned int pages_swapped_out; + unsigned int pages_referenced; + unsigned int ref_count; + unsigned short shadow_depth; + unsigned char external_pager; + unsigned char share_mode; +}; + +typedef struct vm_region_extended_info *vm_region_extended_info_t; +typedef struct vm_region_extended_info vm_region_extended_info_data_t; + +#define VM_REGION_EXTENDED_INFO_COUNT \ + (sizeof(vm_region_extended_info_data_t)/sizeof(int)) + + +#define VM_REGION_TOP_INFO 12 + +struct vm_region_top_info { + unsigned int obj_id; + unsigned int ref_count; + unsigned int private_pages_resident; + unsigned int shared_pages_resident; + unsigned char share_mode; +}; + +typedef struct vm_region_top_info *vm_region_top_info_t; +typedef struct vm_region_top_info vm_region_top_info_data_t; + +#define VM_REGION_TOP_INFO_COUNT \ + (sizeof(vm_region_top_info_data_t)/sizeof(int)) + + + +/* + * vm_region_submap_info will return information on a submap or object. + * The user supplies a nesting level on the call. When a walk of the + * user's map is done and a submap is encountered, the nesting count is + * checked. If the nesting count is greater than 1 the submap is entered and + * the offset relative to the address in the base map is examined. If the + * nesting count is zero, the information on the submap is returned. + * The caller may thus learn about a submap and its contents by judicious + * choice of the base map address and nesting count. The nesting count + * allows penetration of recursively mapped submaps. If a submap is + * encountered as a mapped entry of another submap, the caller may bump + * the nesting count and call vm_region_recurse again on the target address + * range. The "is_submap" field tells the caller whether or not a submap + * has been encountered. + * + * Object only fields are filled in through a walking of the object shadow + * chain (where one is present), and a walking of the resident page queue. + * + */ +struct vm_region_submap_info { + vm_prot_t protection; /* present access protection */ + vm_prot_t max_protection; /* max avail through vm_prot */ + vm_inherit_t inheritance;/* behavior of map/obj on fork */ +#ifdef soon + vm_object_offset_t offset; /* offset into object/map */ +#else + vm_offset_t offset; /* offset into object/map */ +#endif + unsigned int user_tag; /* user tag on map entry */ + unsigned int pages_resident; /* only valid for objects */ + unsigned int pages_shared_now_private; /* only for objects */ + unsigned int pages_swapped_out; /* only for objects */ + unsigned int pages_referenced; /* only for objects */ + unsigned int ref_count; /* obj/map mappers, etc */ + unsigned short shadow_depth; /* only for obj */ + unsigned char external_pager; /* only for obj */ + unsigned char share_mode; /* see enumeration */ + boolean_t is_submap; /* submap vs obj */ + vm_behavior_t behavior; /* access behavior hint */ + vm_offset_t object_id; /* obj/map name, not a handle */ + unsigned short user_wired_count; +}; + +typedef struct vm_region_submap_info *vm_region_submap_info_t; +typedef struct vm_region_submap_info vm_region_submap_info_data_t; + +#define VM_REGION_SUBMAP_INFO_COUNT \ + (sizeof(vm_region_submap_info_data_t)/sizeof(int)) + + + +struct vm_region_submap_info_64 { + vm_prot_t protection; /* present access protection */ + vm_prot_t max_protection; /* max avail through vm_prot */ + vm_inherit_t inheritance;/* behavior of map/obj on fork */ + vm_object_offset_t offset; /* offset into object/map */ + unsigned int user_tag; /* user tag on map entry */ + unsigned int pages_resident; /* only valid for objects */ + unsigned int pages_shared_now_private; /* only for objects */ + unsigned int pages_swapped_out; /* only for objects */ + unsigned int pages_referenced; /* only for objects */ + unsigned int ref_count; /* obj/map mappers, etc */ + unsigned short shadow_depth; /* only for obj */ + unsigned char external_pager; /* only for obj */ + unsigned char share_mode; /* see enumeration */ + boolean_t is_submap; /* submap vs obj */ + vm_behavior_t behavior; /* access behavior hint */ + vm_offset_t object_id; /* obj/map name, not a handle */ + unsigned short user_wired_count; +}; + +typedef struct vm_region_submap_info_64 *vm_region_submap_info_64_t; +typedef struct vm_region_submap_info_64 vm_region_submap_info_data_64_t; + +#define VM_REGION_SUBMAP_INFO_COUNT_64 \ + (sizeof(vm_region_submap_info_data_64_t)/sizeof(int)) + + +struct vm_read_entry { + vm_address_t address; + vm_size_t size; +}; + +#define VM_MAP_ENTRY_MAX (256) + +typedef struct vm_read_entry vm_read_entry_t[VM_MAP_ENTRY_MAX]; + +#endif /*_MACH_VM_REGION_H_*/ diff --git a/osfmk/mach/vm_statistics.h b/osfmk/mach/vm_statistics.h new file mode 100644 index 000000000..b86d0c51f --- /dev/null +++ b/osfmk/mach/vm_statistics.h @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.12.2 1995/01/06 19:52:25 devrcs + * mk6 CR668 - 1.3b26 merge + * added vm stats + * [1994/10/14 03:43:30 dwm] + * + * Revision 1.2.12.1 1994/09/23 02:44:40 ezf + * change marker to not FREE + * [1994/09/22 21:43:49 ezf] + * + * Revision 1.2.4.4 1993/08/03 18:30:01 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 18:58:32 gm] + * + * Revision 1.2.4.3 1993/06/15 20:28:27 brezak + * Make xxx_vm_statistic for now. + * [1993/06/14 14:11:07 brezak] + * + * Revision 1.2.2.2 1993/06/08 19:03:02 brezak + * Remove page_size from vm_statistics. + * + * Revision 1.1.4.2 1993/06/02 23:49:41 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:54 jeffc] + * + * Revision 1.2 1993/04/19 16:40:55 devrcs + * ansi C conformance changes + * [1993/02/02 18:55:38 david] + * + * Revision 1.1 1992/09/30 02:32:30 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 17:03:07 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:37:41 mrt + * Changed to new Mach copyright + * [91/02/01 17:22:49 mrt] + * + * Revision 2.1 89/08/03 16:06:55 rwd + * Created. + * + * Revision 2.4 89/02/25 18:42:35 gm0w + * Changes for cleanup. + * + * Revision 2.3 89/02/07 00:54:39 mwyoung + * Relocated from sys/vm_statistics.h + * + * Revision 2.2 89/01/30 22:08:54 rpd + * Made variable declarations use "extern". + * [89/01/25 15:26:30 rpd] + * + * 30-Sep-86 Avadis Tevanian (avie) at Carnegie-Mellon University + * Changed "reclaim" to "inactive." + * + * 22-Aug-86 Michael Young (mwyoung) at Carnegie-Mellon University + * Made vm_stat structure kernel-only. + * + * 22-May-86 Avadis Tevanian (avie) at Carnegie-Mellon University + * Defined vm_statistics_data_t as a real typedef so that + * MatchMaker can deal with it. + * + * 14-Feb-86 Avadis Tevanian (avie) at Carnegie-Mellon University + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach/vm_statistics.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub + * + * Virtual memory statistics structure. + * + */ + +#ifndef VM_STATISTICS_H_ +#define VM_STATISTICS_H_ + +#include + +struct vm_statistics { + integer_t free_count; /* # of pages free */ + integer_t active_count; /* # of pages active */ + integer_t inactive_count; /* # of pages inactive */ + integer_t wire_count; /* # of pages wired down */ + integer_t zero_fill_count; /* # of zero fill pages */ + integer_t reactivations; /* # of pages reactivated */ + integer_t pageins; /* # of pageins */ + integer_t pageouts; /* # of pageouts */ + integer_t faults; /* # of faults */ + integer_t cow_faults; /* # of copy-on-writes */ + integer_t lookups; /* object cache lookups */ + integer_t hits; /* object cache hits */ +}; + +typedef struct vm_statistics *vm_statistics_t; +typedef struct vm_statistics vm_statistics_data_t; + + +/* included for the vm_map_page_query call */ + +#define VM_PAGE_QUERY_PAGE_PRESENT 0x1 +#define VM_PAGE_QUERY_PAGE_FICTITIOUS 0x2 +#define VM_PAGE_QUERY_PAGE_REF 0x4 +#define VM_PAGE_QUERY_PAGE_DIRTY 0x8 + + +/* + * Each machine dependent implementation is expected to + * keep certain statistics. They may do this anyway they + * so choose, but are expected to return the statistics + * in the following structure. + */ + +struct pmap_statistics { + integer_t resident_count; /* # of pages mapped (total)*/ + integer_t wired_count; /* # of pages wired */ +}; + +typedef struct pmap_statistics *pmap_statistics_t; + +#define VM_FLAGS_FIXED 0x0 +#define VM_FLAGS_ANYWHERE 0x1 +#define VM_FLAGS_ALIAS_MASK 0xFF000000 +#define VM_GET_FLAGS_ALIAS(flags, alias) \ + (alias) = ((flags) & VM_FLAGS_ALIAS_MASK) >> 24 +#define VM_SET_FLAGS_ALIAS(flags, alias) \ + (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \ + (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24)) + +#define VM_MEMORY_MALLOC 1 +#define VM_MEMORY_MALLOC_SMALL 2 +#define VM_MEMORY_MALLOC_LARGE 3 +#define VM_MEMORY_MALLOC_HUGE 4 +#define VM_MEMORY_SBRK 5// uninteresting -- no one should call + +#define VM_MEMORY_ANALYSIS_TOOL 10 + +#define VM_MEMORY_MACH_MSG 20 +#define VM_MEMORY_IOKIT 21 +#define VM_MEMORY_STACK 30 +#define VM_MEMORY_GUARD 31 +#define VM_MEMORY_SHARED_PMAP 32 +/* memory containing a dylib */ +#define VM_MEMORY_DYLIB 33 + +// Placeholders for now -- as we analyze the libraries and find how they +// use memory, we can make these labels more specific. +#define VM_MEMORY_APPKIT 40 +#define VM_MEMORY_FOUNDATION 41 +#define VM_MEMORY_COREGRAPHICS 42 +#define VM_MEMORY_CARBON 43 +#define VM_MEMORY_ATS 50 + + +/* memory allocated by the dynamic loader for itself */ +#define VM_MEMORY_DYLD 60 +/* malloc'd memory created by dyld */ +#define VM_MEMORY_DYLD_MALLOC 61 + +/* Reserve 240-255 for application */ +#define VM_MEMORY_APPLICATION_SPECIFIC_1 240 +#define VM_MEMORY_APPLICATION_SPECIFIC_16 255 + +#define VM_MAKE_TAG(tag) (tag<<24) +#endif /* VM_STATISTICS_H_ */ diff --git a/osfmk/mach/vm_sync.h b/osfmk/mach/vm_sync.h new file mode 100644 index 000000000..e1bbc3fa3 --- /dev/null +++ b/osfmk/mach/vm_sync.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:31 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:46 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.7.1 1994/09/23 02:44:50 ezf + * change marker to not FREE + * [1994/09/22 21:43:54 ezf] + * + * Revision 1.2.3.3 1993/06/22 15:18:34 sp + * Add definition of VM_SYNC_SYNCHRONOUS [david@gr.osf.org] + * [1993/06/21 13:00:18 sp] + * + * Revision 1.2.3.2 1993/06/09 02:44:33 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:58 jeffc] + * + * Revision 1.2 1993/04/19 16:41:03 devrcs + * Made compatible with other mach .h files. + * [1993/03/15 17:34:44 david] + * + * New for vm_sync definitions. + * [1993/03/03 12:39:16 david] + * + * $EndLog$ + */ +/* + * File: mach/vm_sync.h + * + * Virtual memory synchronisation definitions. + * + */ + +#ifndef VM_SYNC_H_ +#define VM_SYNC_H_ + +typedef unsigned vm_sync_t; + +/* + * Synchronization flags, defined as bits within the vm_sync_t type + */ + +#define VM_SYNC_ASYNCHRONOUS ((vm_sync_t) 0x01) +#define VM_SYNC_SYNCHRONOUS ((vm_sync_t) 0x02) +#define VM_SYNC_INVALIDATE ((vm_sync_t) 0x04) +#define VM_SYNC_KILLPAGES ((vm_sync_t) 0x08) +#define VM_SYNC_DEACTIVATE ((vm_sync_t) 0x10) + +#endif /* VM_SYNC_H_ */ diff --git a/osfmk/mach/vm_types.h b/osfmk/mach/vm_types.h new file mode 100644 index 000000000..a47492be7 --- /dev/null +++ b/osfmk/mach/vm_types.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +#ifndef MACH_VM_TYPES_H_ +#define MACH_VM_TYPES_H_ + +#include + +typedef vm_offset_t pointer_t; +typedef vm_offset_t vm_address_t; +typedef unsigned long long vm_object_offset_t; + +#ifdef KERNEL_PRIVATE + +#if !defined(MACH_KERNEL_PRIVATE) +/* + * Use specifically typed null structures for these in + * other parts of the kernel to enable compiler warnings + * about type mismatches, etc... Otherwise, these would + * be void*. + */ +struct upl ; +struct vm_map ; +struct vm_object ; +struct vm_map_copy ; +#else +typedef struct upl *upl_t; +#endif /* !MACH_KERNEL_PRIVATE */ + +typedef struct vm_map *vm_map_t; +typedef struct vm_object *vm_object_t; +typedef struct vm_map_copy *vm_map_copy_t; + +#endif + +#endif /* MACH_VM_TYPES_H_ */ + + diff --git a/osfmk/mach_debug/Makefile b/osfmk/mach_debug/Makefile new file mode 100644 index 000000000..be1dbe1bd --- /dev/null +++ b/osfmk/mach_debug/Makefile @@ -0,0 +1,31 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +MIG_DEFS = mach_debug_types.defs + +DATAFILES = \ + mach_debug.h \ + hash_info.h ipc_info.h vm_info.h zone_info.h \ + page_info.h mach_debug_types.h \ + ${MIG_DEFS} + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = mach_debug + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = mach_debug + +COMP_FILES = + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/mach_debug/hash_info.h b/osfmk/mach_debug/hash_info.h new file mode 100644 index 000000000..bb8b5e3a2 --- /dev/null +++ b/osfmk/mach_debug/hash_info.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:45 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:17 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.10.2 1995/01/06 19:52:35 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:43:33 dwm] + * + * Revision 1.2.10.1 1994/09/23 02:45:09 ezf + * change marker to not FREE + * [1994/09/22 21:44:01 ezf] + * + * Revision 1.2.3.2 1993/06/09 02:44:38 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:19:01 jeffc] + * + * Revision 1.2 1993/04/19 16:41:12 devrcs + * ansi C conformance changes + * [1993/02/02 18:56:42 david] + * + * Revision 1.1 1992/09/30 02:32:32 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 17:03:21 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:37:46 mrt + * Changed to new Mach copyright + * [91/02/01 17:28:22 mrt] + * + * Revision 2.2 91/01/08 15:18:59 rpd + * Created. + * [91/01/02 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_DEBUG_HASH_INFO_H_ +#define _MACH_DEBUG_HASH_INFO_H_ + +/* + * Remember to update the mig type definitions + * in mach_debug_types.defs when adding/removing fields. + */ + +typedef struct hash_info_bucket { + natural_t hib_count; /* number of records in bucket */ +} hash_info_bucket_t; + +typedef hash_info_bucket_t *hash_info_bucket_array_t; + +#endif /* _MACH_DEBUG_HASH_INFO_H_ */ diff --git a/osfmk/mach_debug/ipc_info.h b/osfmk/mach_debug/ipc_info.h new file mode 100644 index 000000000..f3ec4fac8 --- /dev/null +++ b/osfmk/mach_debug/ipc_info.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:45 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:17 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.13.2 1995/01/06 19:52:40 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:43:35 dwm] + * + * Revision 1.2.13.1 1994/09/23 02:45:18 ezf + * change marker to not FREE + * [1994/09/22 21:44:05 ezf] + * + * Revision 1.2.3.3 1993/09/09 16:07:52 jeffc + * CR9745 - Delete message accepted notifications + * [1993/09/03 20:45:48 jeffc] + * + * Revision 1.2.3.2 1993/06/09 02:44:43 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:19:04 jeffc] + * + * Revision 1.2 1993/04/19 16:41:20 devrcs + * ansi C conformance changes + * [1993/02/02 18:56:50 david] + * + * Revision 1.1 1992/09/30 02:32:34 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5.4.2 92/04/08 15:45:00 jeffreyh + * Back out Mainline changes. Revert back to revision 2.5. + * [92/04/07 10:29:40 jeffreyh] + * + * Revision 2.5 91/05/14 17:03:28 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:37:50 mrt + * Changed to new Mach copyright + * [91/02/01 17:28:30 mrt] + * + * Revision 2.3 91/01/08 15:19:05 rpd + * Moved ipc_info_bucket_t to mach_debug/hash_info.h. + * [91/01/02 rpd] + * + * Revision 2.2 90/06/02 15:00:28 rpd + * Created for new IPC. + * [90/03/26 23:45:14 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach_debug/ipc_info.h + * Author: Rich Draves + * Date: March, 1990 + * + * Definitions for the IPC debugging interface. + */ + +#ifndef _MACH_DEBUG_IPC_INFO_H_ +#define _MACH_DEBUG_IPC_INFO_H_ + +#include +#include +#include + +/* + * Remember to update the mig type definitions + * in mach_debug_types.defs when adding/removing fields. + */ + + +typedef struct ipc_info_space { + natural_t iis_genno_mask; /* generation number mask */ + natural_t iis_table_size; /* size of table */ + natural_t iis_table_next; /* next possible size of table */ + natural_t iis_tree_size; /* size of tree */ + natural_t iis_tree_small; /* # of small entries in tree */ + natural_t iis_tree_hash; /* # of hashed entries in tree */ +} ipc_info_space_t; + + +typedef struct ipc_info_name { + mach_port_name_t iin_name; /* port name, including gen number */ +/*boolean_t*/integer_t iin_collision; /* collision at this entry? */ + mach_port_type_t iin_type; /* straight port type */ + mach_port_urefs_t iin_urefs; /* user-references */ + vm_offset_t iin_object; /* object pointer */ + natural_t iin_next; /* marequest/next in free list */ + natural_t iin_hash; /* hash index */ +} ipc_info_name_t; + +typedef ipc_info_name_t *ipc_info_name_array_t; + + +typedef struct ipc_info_tree_name { + ipc_info_name_t iitn_name; + mach_port_name_t iitn_lchild; /* name of left child */ + mach_port_name_t iitn_rchild; /* name of right child */ +} ipc_info_tree_name_t; + +typedef ipc_info_tree_name_t *ipc_info_tree_name_array_t; + +#endif /* _MACH_DEBUG_IPC_INFO_H_ */ diff --git a/osfmk/mach_debug/mach_debug.h b/osfmk/mach_debug/mach_debug.h new file mode 100644 index 000000000..8f1a6f095 --- /dev/null +++ b/osfmk/mach_debug/mach_debug.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) Apple Computer 1998 + * ALL Rights Reserved + */ +/* + * This file represents the interfaces that used to come + * from creating the user headers from the mach_debug.defs file. + * Because mach_debug.defs was decomposed, this file now just + * wraps up all the new interface headers generated from + * each of the new .defs resulting from that decomposition. + */ +#ifndef _MACH_DEBUG_MACH_DEBUG_H_ +#define _MACH_DEBUG_MACH_DEBUG_H_ + +#include + +#include +#include +#include + +#endif /* _MACH_DEBUG_MACH_DEBUG_H_ */ + + diff --git a/osfmk/mach_debug/mach_debug_types.defs b/osfmk/mach_debug/mach_debug_types.defs new file mode 100644 index 000000000..ce06a74d0 --- /dev/null +++ b/osfmk/mach_debug/mach_debug_types.defs @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:45 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:17 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.18.1 1996/10/03 17:27:31 emcmanus + * Brought ipc_info_name_t, ipc_info_tree_name_t, and vm_info_object_t + * sizes into sync with the C reality. We should not have to do this + * by hand, but that's MiG for you. [CR 2244] + * [1996/09/17 16:35:23 emcmanus] + * + * Revision 1.2.10.2 1995/01/06 19:52:44 devrcs + * mk6 CR668 - 1.3b26 merge + * * Revision 1.2.3.5 1994/05/06 18:56:40 tmt + * Merged with osc1.3b19 + * Merge Alpha changes into osc1.312b source code. + * 64 bits cleanup. + * * End1.3merge + * [1994/11/02 18:32:27 dwm] + * + * Revision 1.2.10.1 1994/09/23 02:45:37 ezf + * change marker to not FREE + * [1994/09/22 21:44:13 ezf] + * + * Revision 1.2.3.3 1993/08/05 19:09:52 jeffc + * CR9508 - delete dead Mach3 code. Remove MACH_IPC_TYPED + * [1993/08/04 17:30:38 jeffc] + * + * Revision 1.2.3.2 1993/06/09 02:44:48 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:19:07 jeffc] + * + * Revision 1.2 1993/04/19 16:41:28 devrcs + * Merge untyped ipc: + * Introducing new MIG syntax for Untyped IPC (via compile option + * MACH_IPC_TYPED) + * [1993/02/17 23:46:03 travos] + * + * Revision 1.1 1992/09/30 02:23:07 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.9.4.2 92/04/08 15:45:11 jeffreyh + * Back out changes from TRUNK. Now back to a Revision 2.9 base. + * [92/04/07 10:30:25 jeffreyh] + * + * Revision 2.9 91/07/31 17:55:42 dbg + * Add symtab_name_t. + * [91/07/30 17:11:38 dbg] + * + * Revision 2.8 91/05/14 17:03:43 mrt + * Correcting copyright + * + * Revision 2.7 91/02/05 17:37:59 mrt + * Changed to new Mach copyright + * [91/02/01 17:28:58 mrt] + * + * Revision 2.6 91/01/08 16:18:08 rpd + * Changed ipc_info_bucket_t to hash_info_bucket_t. + * [91/01/02 rpd] + * + * Revision 2.5 90/10/25 14:46:18 rwd + * Updated vm_info_region_t size. + * [90/10/17 rpd] + * + * Revision 2.4 90/06/19 23:00:23 rpd + * Adjusted zone_info_t definition to account for new collectable field. + * [90/06/05 rpd] + * + * Revision 2.3 90/06/02 15:00:39 rpd + * Added vm_info_region_t, vm_info_object_t. + * [90/05/02 14:47:17 rpd] + * + * Converted to new IPC. + * [90/03/26 22:43:24 rpd] + * + * Revision 2.2 90/05/03 15:48:49 dbg + * Remove callout types. Add zone_name, zone_info, page_address + * types. + * [90/04/06 dbg] + * + * Revision 2.1 89/08/03 17:20:25 rwd + * Created. + * + * Revision 2.4 89/02/25 18:43:41 gm0w + * Changes for cleanup. + * + * Revision 2.3 89/01/15 16:32:43 rpd + * Updated includes for the new mach/ directory. + * [89/01/15 15:11:33 rpd] + * + * Revision 2.2 89/01/12 08:00:34 rpd + * Created. + * [89/01/12 04:21:37 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Mach kernel debugging interface type declarations + */ + +#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_ +#define _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_ + +#include + +type zone_name_t = struct[80] of char; +type zone_name_array_t = array[] of zone_name_t; + +type zone_info_t = struct[9] of integer_t; +type zone_info_array_t = array[] of zone_info_t; + +type hash_info_bucket_t = struct[1] of natural_t; +type hash_info_bucket_array_t = array[] of hash_info_bucket_t; + +type ipc_info_space_t = struct[6] of natural_t; + +type ipc_info_name_t = struct[7] of natural_t; +type ipc_info_name_array_t = array[] of ipc_info_name_t; + +type ipc_info_tree_name_t = struct[9] of natural_t; +type ipc_info_tree_name_array_t = array[] of ipc_info_tree_name_t; + +type vm_info_region_t = struct[10] of natural_t; +type vm_info_region_64_t = struct[11] of natural_t; + +type vm_info_object_t = struct[21] of natural_t; +type vm_info_object_array_t = ^array[] of vm_info_object_t; + +type page_address_array_t = ^array[] of integer_t; + +type symtab_name_t = c_string[*:32]; + +import ; + +#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_ */ diff --git a/osfmk/mach_debug/mach_debug_types.h b/osfmk/mach_debug/mach_debug_types.h new file mode 100644 index 000000000..27e2684cc --- /dev/null +++ b/osfmk/mach_debug/mach_debug_types.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * Mach kernel debugging interface type declarations + */ + +#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_H_ +#define _MACH_DEBUG_MACH_DEBUG_TYPES_H_ + +#include +#include +#include +#include +#include + +typedef char symtab_name_t[32]; + +#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_H_ */ diff --git a/osfmk/mach_debug/page_info.h b/osfmk/mach_debug/page_info.h new file mode 100644 index 000000000..3f05d26a8 --- /dev/null +++ b/osfmk/mach_debug/page_info.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:45 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:17 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 02:45:56 ezf + * change marker to not FREE + * [1994/09/22 21:44:21 ezf] + * + * Revision 1.1.2.3 1993/07/28 18:01:46 jeffc + * CR9523 - Add prototypes to kernel. Protect this file + * against multiple inclusion + * [1993/07/28 12:23:48 jeffc] + * + * Revision 1.1.2.2 1993/06/02 23:50:21 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:19:13 jeffc] + * + * Revision 1.1 1992/09/30 02:32:37 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 17:04:01 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:38:10 mrt + * Changed to new Mach copyright + * [91/02/01 17:29:22 mrt] + * + * Revision 2.2 90/05/03 15:48:58 dbg + * Created. + * [90/04/06 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +#ifndef MACH_DEBUG_PAGE_INFO_H +#define MACH_DEBUG_PAGE_INFO_H + +#include + +typedef vm_offset_t *page_address_array_t; +#endif /* MACH_DEBUG_PAGE_INFO_H */ diff --git a/osfmk/mach_debug/template.mk b/osfmk/mach_debug/template.mk new file mode 100644 index 000000000..7673ffc01 --- /dev/null +++ b/osfmk/mach_debug/template.mk @@ -0,0 +1,59 @@ +# +# @OSF_COPYRIGHT@ +# +# +# HISTORY +# +# Revision 1.1.1.1 1998/09/22 21:05:45 wsanchez +# Import of Mac OS X kernel (~semeria) +# +# Revision 1.1.1.1 1998/03/07 02:26:17 wsanchez +# Import of OSF Mach kernel (~mburg) +# +# Revision 1.1.8.2 1994/09/23 02:46:03 ezf +# change marker to not FREE +# [1994/09/22 21:44:25 ezf] +# +# Revision 1.1.8.1 1994/06/13 19:58:36 dlb +# Merge MK6 and NMK17 +# [1994/06/13 16:22:58 dlb] +# +# Revision 1.1.6.1 1994/03/07 16:41:51 paire +# Added MIGKSARGS and MIGKSENV variables to MIGKSFLAGS definition. +# [94/02/28 paire] +# +# Revision 1.1.2.2 1993/08/04 19:32:37 gm +# CR9605: Add SUBDIRS to mach_kernel build process. +# [1993/08/03 13:30:22 gm] +# +# $EndLog$ + +VPATH = ..:../.. + +MIGFLAGS = -MD ${IDENT} +MIGKSFLAGS = -DKERNEL_SERVER ${MIGKSARGS} ${MIGKSENV} + +MACH_DEBUG_FILES = mach_debug_server.h mach_debug_server.c + +OTHERS = ${MACH_DEBUG_FILES} + +INCFLAGS = -I.. -I../.. +MDINCFLAGS = -I.. -I../.. + +DEPENDENCIES = + +.include <${RULES_MK}> + +.ORDER: ${MACH_DEBUG_FILES} + +${MACH_DEBUG_FILES}: mach_debug/mach_debug.defs + ${_MIG_} ${_MIGFLAGS_} ${MIGKSFLAGS} \ + -header /dev/null \ + -user /dev/null \ + -sheader mach_debug_server.h \ + -server mach_debug_server.c \ + ${mach_debug/mach_debug.defs:P} + +.if exists(depend.mk) +.include "depend.mk" +.endif diff --git a/osfmk/mach_debug/vm_info.h b/osfmk/mach_debug/vm_info.h new file mode 100644 index 000000000..38aa6e6b1 --- /dev/null +++ b/osfmk/mach_debug/vm_info.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: mach_debug/vm_info.h + * Author: Rich Draves + * Date: March, 1990 + * + * Definitions for the VM debugging interface. + */ + +#ifndef _MACH_DEBUG_VM_INFO_H_ +#define _MACH_DEBUG_VM_INFO_H_ + +#include +#include +#include +#include +#include + +/* + * Remember to update the mig type definitions + * in mach_debug_types.defs when adding/removing fields. + */ +typedef struct vm_info_region_64 { + vm_offset_t vir_start; /* start of region */ + vm_offset_t vir_end; /* end of region */ + vm_offset_t vir_object; /* the mapped object */ + vm_object_offset_t vir_offset; /* offset into object */ + boolean_t vir_needs_copy; /* does object need to be copied? */ + vm_prot_t vir_protection; /* protection code */ + vm_prot_t vir_max_protection; /* maximum protection */ + vm_inherit_t vir_inheritance; /* inheritance */ + natural_t vir_wired_count; /* number of times wired */ + natural_t vir_user_wired_count; /* number of times user has wired */ +} vm_info_region_64_t; + +typedef struct vm_info_region { + vm_offset_t vir_start; /* start of region */ + vm_offset_t vir_end; /* end of region */ + vm_offset_t vir_object; /* the mapped object */ + vm_offset_t vir_offset; /* offset into object */ + boolean_t vir_needs_copy; /* does object need to be copied? */ + vm_prot_t vir_protection; /* protection code */ + vm_prot_t vir_max_protection; /* maximum protection */ + vm_inherit_t vir_inheritance; /* inheritance */ + natural_t vir_wired_count; /* number of times wired */ + natural_t vir_user_wired_count; /* number of times user has wired */ +} vm_info_region_t; + + +typedef struct vm_info_object { + vm_offset_t vio_object; /* this object */ + vm_size_t vio_size; /* object size (valid if internal) */ + unsigned int vio_ref_count; /* number of references */ + unsigned int vio_resident_page_count; /* number of resident pages */ + unsigned int vio_absent_count; /* number requested but not filled */ + vm_offset_t vio_copy; /* copy object */ + vm_offset_t vio_shadow; /* shadow object */ + vm_offset_t vio_shadow_offset; /* offset into shadow object */ + vm_offset_t vio_paging_offset; /* offset into memory object */ + memory_object_copy_strategy_t vio_copy_strategy; + /* how to handle data copy */ + vm_offset_t vio_last_alloc; /* offset of last allocation */ + /* many random attributes */ + unsigned int vio_paging_in_progress; + boolean_t vio_pager_created; + boolean_t vio_pager_initialized; + boolean_t vio_pager_ready; + boolean_t vio_can_persist; + boolean_t vio_internal; + boolean_t vio_temporary; + boolean_t vio_alive; + boolean_t vio_lock_in_progress; + boolean_t vio_lock_restart; +} vm_info_object_t; + +typedef vm_info_object_t *vm_info_object_array_t; + +#endif /* _MACH_DEBUG_VM_INFO_H_ */ diff --git a/osfmk/mach_debug/zone_info.h b/osfmk/mach_debug/zone_info.h new file mode 100644 index 000000000..60b56d694 --- /dev/null +++ b/osfmk/mach_debug/zone_info.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:45 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:17 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.12.2 1995/01/06 19:52:51 devrcs + * mk6 CR668 - 1.3b26 merge + * 64bit cleanup + * [1994/10/14 03:43:40 dwm] + * + * Revision 1.2.12.1 1994/09/23 02:46:19 ezf + * change marker to not FREE + * [1994/09/22 21:44:33 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:45:03 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:19:19 jeffc] + * + * Revision 1.2 1993/04/19 16:41:52 devrcs + * ansi C conformance changes + * [1993/02/02 18:57:07 david] + * + * Revision 1.1 1992/09/30 02:32:41 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/05/14 17:04:15 mrt + * Correcting copyright + * + * Revision 2.4 91/02/05 17:38:17 mrt + * Changed to new Mach copyright + * [91/02/01 17:29:40 mrt] + * + * Revision 2.3 90/06/19 23:00:29 rpd + * Added zi_ prefix to zone_info field names. + * Added zi_collectable field to zone_info. + * Added zn_ prefix to zone_name field names. + * [90/06/05 rpd] + * + * Revision 2.2 90/06/02 15:00:54 rpd + * Created. + * [90/03/26 23:53:57 rpd] + * + * Revision 2.2 89/05/06 12:36:08 rpd + * Created. + * [89/05/06 12:35:19 rpd] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _MACH_DEBUG_ZONE_INFO_H_ +#define _MACH_DEBUG_ZONE_INFO_H_ + +#include +#include + +/* + * Remember to update the mig type definitions + * in mach_debug_types.defs when adding/removing fields. + */ + +#define ZONE_NAME_MAX_LEN 80 + +typedef struct zone_name { + char zn_name[ZONE_NAME_MAX_LEN]; +} zone_name_t; + +typedef zone_name_t *zone_name_array_t; + + +typedef struct zone_info { + integer_t zi_count; /* Number of elements used now */ + vm_size_t zi_cur_size; /* current memory utilization */ + vm_size_t zi_max_size; /* how large can this zone grow */ + vm_size_t zi_elem_size; /* size of an element */ + vm_size_t zi_alloc_size; /* size used for more memory */ + integer_t zi_pageable; /* zone pageable? */ + integer_t zi_sleepable; /* sleep if empty? */ + integer_t zi_exhaustible; /* merely return if empty? */ + integer_t zi_collectable; /* garbage collect elements? */ +} zone_info_t; + +typedef zone_info_t *zone_info_array_t; + +#endif /* _MACH_DEBUG_ZONE_INFO_H_ */ diff --git a/osfmk/machine/Makefile b/osfmk/machine/Makefile new file mode 100644 index 000000000..ae517d9e9 --- /dev/null +++ b/osfmk/machine/Makefile @@ -0,0 +1,47 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + + +DATAFILES = \ + machlimits.h \ + ast.h \ + ast_types.h \ + cpu_number.h \ + db_machdep.h \ + endian.h \ + gdb_defs.h \ + iobus.h \ + io_map_entries.h \ + kgdb_defs.h \ + kgdb_setjmp.h \ + lock.h \ + mach_param.h \ + machine_routines.h \ + machine_rpc.h \ + machparam.h \ + pmap.h \ + setjmp.h \ + spl.h \ + task.h \ + thread.h \ + thread_act.h \ + trap.h \ + vm_tuning.h \ + xpr.h \ + hw_lock_types.h + + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = machine + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/machine/asm.h b/osfmk/machine/asm.h new file mode 100644 index 000000000..b0ace5272 --- /dev/null +++ b/osfmk/machine/asm.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_ASM_H +#define _MACHINE_ASM_H + + +#if defined (__ppc__) +#include "ppc/asm.h" +#elif defined (__i386__) +#include "i386/asm.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_ASM_H */ diff --git a/osfmk/machine/ast.h b/osfmk/machine/ast.h new file mode 100644 index 000000000..5ec6ba6eb --- /dev/null +++ b/osfmk/machine/ast.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_AST_H +#define _MACHINE_AST_H + + +#if defined (__ppc__) +#include "ppc/ast.h" +#elif defined (__i386__) +#include "i386/ast.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_AST_H */ diff --git a/osfmk/machine/ast_types.h b/osfmk/machine/ast_types.h new file mode 100644 index 000000000..b04ed6317 --- /dev/null +++ b/osfmk/machine/ast_types.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_AST_TYPES_H +#define _MACHINE_AST_TYPES_H + + +#if defined (__ppc__) +#include "ppc/ast_types.h" +#elif defined (__i386__) +#include "i386/ast_types.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_AST_TYPES_H */ diff --git a/osfmk/machine/cpu_data.h b/osfmk/machine/cpu_data.h new file mode 100644 index 000000000..b10bac948 --- /dev/null +++ b/osfmk/machine/cpu_data.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_CPU_DATA_H +#define _MACHINE_CPU_DATA_H + + +#if defined (__ppc__) +#include "ppc/cpu_data.h" +#elif defined (__i386__) +#include "i386/cpu_data.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_CPU_DATA_H */ diff --git a/osfmk/machine/cpu_number.h b/osfmk/machine/cpu_number.h new file mode 100644 index 000000000..075c108eb --- /dev/null +++ b/osfmk/machine/cpu_number.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_CPU_NUMBER_H +#define _MACHINE_CPU_NUMBER_H + + +#if defined (__ppc__) +#include "ppc/cpu_number.h" +#elif defined (__i386__) +#include "i386/cpu_number.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_CPU_NUMBER_H */ diff --git a/osfmk/machine/db_machdep.h b/osfmk/machine/db_machdep.h new file mode 100644 index 000000000..bec9b4715 --- /dev/null +++ b/osfmk/machine/db_machdep.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_DB_MACHDEP_H +#define _MACHINE_DB_MACHDEP_H + + +#if defined (__ppc__) +#include "ppc/db_machdep.h" +#elif defined (__i386__) +#include "i386/db_machdep.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_DB_MACHDEP_H */ diff --git a/osfmk/machine/disk.h b/osfmk/machine/disk.h new file mode 100644 index 000000000..03174e58d --- /dev/null +++ b/osfmk/machine/disk.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_DISK_H +#define _MACHINE_DISK_H + + +#if defined (__ppc__) +#include "ppc/POWERMAC/disk.h" +#elif defined (__i386__) +#include "i386/AT386/disk.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_IOBUS_H */ diff --git a/osfmk/machine/endian.h b/osfmk/machine/endian.h new file mode 100644 index 000000000..f0c8b29d3 --- /dev/null +++ b/osfmk/machine/endian.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_ENDIAN_H +#define _MACHINE_ENDIAN_H + + +#if defined (__ppc__) +#include "ppc/endian.h" +#elif defined (__i386__) +#include "i386/endian.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_ENDIAN_H */ diff --git a/osfmk/machine/gdb_defs.h b/osfmk/machine/gdb_defs.h new file mode 100644 index 000000000..821448666 --- /dev/null +++ b/osfmk/machine/gdb_defs.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_GDB_DEFS_H +#define _MACHINE_GDB_DEFS_H + + +#if defined (__ppc__) +#include "ppc/gdb_defs.h" +#elif defined (__i386__) +#include "i386/gdb_defs.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_GDB_DEFS_H */ diff --git a/osfmk/machine/hw_lock_types.h b/osfmk/machine/hw_lock_types.h new file mode 100644 index 000000000..0335f3601 --- /dev/null +++ b/osfmk/machine/hw_lock_types.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_HW_LOCK_TYPES_H_ +#define _MACHINE_HW_LOCK_TYPES_H_ + + +#if defined (__ppc__) +#include "ppc/hw_lock_types.h" +#elif defined (__i386__) +#include "i386/hw_lock_types.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_HW_LOCK_TYPES_H_ */ diff --git a/osfmk/machine/io_map_entries.h b/osfmk/machine/io_map_entries.h new file mode 100644 index 000000000..49a6b83ab --- /dev/null +++ b/osfmk/machine/io_map_entries.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_IO_MAP_ENTRIES_H_ +#define _MACHINE_IO_MAP_ENTRIES_H_ + + +#if defined (__ppc__) +#include "ppc/io_map_entries.h" +#elif defined (__i386__) +#include "i386/io_map_entries.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_IO_MAP_ENTRIES_H_ */ diff --git a/osfmk/machine/iobus.h b/osfmk/machine/iobus.h new file mode 100644 index 000000000..b6e758edd --- /dev/null +++ b/osfmk/machine/iobus.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_IOBUS_H +#define _MACHINE_IOBUS_H + + +#if defined (__ppc__) +#include "ppc/POWERMAC/iobus.h" +#elif defined (__i386__) +#include "i386/AT386/iobus.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_IOBUS_H */ diff --git a/osfmk/machine/kgdb_defs.h b/osfmk/machine/kgdb_defs.h new file mode 100644 index 000000000..c4178dcd1 --- /dev/null +++ b/osfmk/machine/kgdb_defs.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_KGDB_DEFS_H +#define _MACHINE_KGDB_DEFS_H + + +#if defined (__ppc__) +#include "ppc/kgdb_defs.h" +#elif defined (__i386__) +#include "i386/kgdb_defs.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_KGDB_DEFS_H */ diff --git a/osfmk/machine/kgdb_setjmp.h b/osfmk/machine/kgdb_setjmp.h new file mode 100644 index 000000000..256830d27 --- /dev/null +++ b/osfmk/machine/kgdb_setjmp.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_KGDB_SETJMP_H +#define _MACHINE_KGDB_SETJMP_H + + +#if defined (__ppc__) +#include "ppc/kgdb_setjmp.h" +#elif defined (__i386__) +#include "i386/kgdb_setjmp.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_KGDB_SETJMP_H */ diff --git a/osfmk/machine/lock.h b/osfmk/machine/lock.h new file mode 100644 index 000000000..a65444044 --- /dev/null +++ b/osfmk/machine/lock.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_LOCK_H_ +#define _MACHINE_LOCK_H_ + + +#if defined (__ppc__) +#include "ppc/lock.h" +#elif defined (__i386__) +#include "i386/lock.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_LOCK_H_ */ diff --git a/osfmk/machine/mach_param.h b/osfmk/machine/mach_param.h new file mode 100644 index 000000000..f143d6aff --- /dev/null +++ b/osfmk/machine/mach_param.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_MACH_PARAM_H +#define _MACHINE_MACH_PARAM_H + + +#if defined (__ppc__) +#include "ppc/mach_param.h" +#elif defined (__i386__) +#include "i386/mach_param.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_MACH_PARAM_H */ diff --git a/osfmk/machine/machine_routines.h b/osfmk/machine/machine_routines.h new file mode 100644 index 000000000..57481db5a --- /dev/null +++ b/osfmk/machine/machine_routines.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_MACHINE_ROUTINES_H +#define _MACHINE_MACHINE_ROUTINES_H + + +#if defined (__ppc__) +#include "ppc/machine_routines.h" +#elif defined (__i386__) +#include "i386/machine_routines.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_MACHINE_ROUTINES_H */ diff --git a/osfmk/machine/machine_rpc.h b/osfmk/machine/machine_rpc.h new file mode 100644 index 000000000..bb1891453 --- /dev/null +++ b/osfmk/machine/machine_rpc.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_MACHINE_RPC_H +#define _MACHINE_MACHINE_RPC_H + + +#if defined (__ppc__) +#include "ppc/machine_rpc.h" +#elif defined (__i386__) +#include "i386/machine_rpc.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_MACHINE_RPC_H */ diff --git a/osfmk/machine/machlimits.h b/osfmk/machine/machlimits.h new file mode 100644 index 000000000..2add98192 --- /dev/null +++ b/osfmk/machine/machlimits.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_MACHLIMITS_H +#define _MACHINE_MACHLIMITS_H + + +#if defined (__ppc__) +#include "ppc/machlimits.h" +#elif defined (__i386__) +#include "i386/machlimits.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_MACHLIMITS_H */ diff --git a/osfmk/machine/machparam.h b/osfmk/machine/machparam.h new file mode 100644 index 000000000..f29685ffc --- /dev/null +++ b/osfmk/machine/machparam.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_MACHPARAM_H +#define _MACHINE_MACHPARAM_H + + +#if defined (__ppc__) +#include "ppc/machparam.h" +#elif defined (__i386__) +#include "i386/machparam.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_MACHPARAM_H */ diff --git a/osfmk/machine/pmap.h b/osfmk/machine/pmap.h new file mode 100644 index 000000000..198bdd167 --- /dev/null +++ b/osfmk/machine/pmap.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_PMAP_H +#define _MACHINE_PMAP_H + + +#if defined (__ppc__) +#include "ppc/pmap.h" +#elif defined (__i386__) +#include "i386/pmap.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_PMAP_H */ diff --git a/osfmk/machine/sched_param.h b/osfmk/machine/sched_param.h new file mode 100644 index 000000000..c32ea5f61 --- /dev/null +++ b/osfmk/machine/sched_param.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_SCHED_PARAM_H +#define _MACHINE_SCHED_PARAM_H + + +#if defined (__ppc__) +#include "ppc/sched_param.h" +#elif defined (__i386__) +#include "i386/sched_param.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_SCHED_PARAM_H */ diff --git a/osfmk/machine/setjmp.h b/osfmk/machine/setjmp.h new file mode 100644 index 000000000..1f4e5ef2c --- /dev/null +++ b/osfmk/machine/setjmp.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_SETJMP_H +#define _MACHINE_SETJMP_H + + +#if defined (__ppc__) +#include "ppc/setjmp.h" +#elif defined (__i386__) +#include "i386/setjmp.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_SETJMP_H */ diff --git a/osfmk/machine/spl.h b/osfmk/machine/spl.h new file mode 100644 index 000000000..f343dbb3e --- /dev/null +++ b/osfmk/machine/spl.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_SPL_H +#define _MACHINE_SPL_H + + +#if defined (__ppc__) +#include "ppc/spl.h" +#elif defined (__i386__) +#include "i386/spl.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_SPL_H */ diff --git a/osfmk/machine/task.h b/osfmk/machine/task.h new file mode 100644 index 000000000..56f48ae72 --- /dev/null +++ b/osfmk/machine/task.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_TASK_H +#define _MACHINE_TASK_H + + +#if defined (__ppc__) +#include "ppc/task.h" +#elif defined (__i386__) +#include "i386/task.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_TASK_H */ diff --git a/osfmk/machine/thread.h b/osfmk/machine/thread.h new file mode 100644 index 000000000..11ace465a --- /dev/null +++ b/osfmk/machine/thread.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_THREAD_H +#define _MACHINE_THREAD_H + + +#if defined (__ppc__) +#include "ppc/thread.h" +#elif defined (__i386__) +#include "i386/thread.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_THREADx_H */ diff --git a/osfmk/machine/thread_act.h b/osfmk/machine/thread_act.h new file mode 100644 index 000000000..e40989981 --- /dev/null +++ b/osfmk/machine/thread_act.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_THREAD_ACT_H +#define _MACHINE_THREAD_ACT_H + + +#if defined (__ppc__) +#include "ppc/thread_act.h" +#elif defined (__i386__) +#include "i386/thread_act.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_THREAD_ACT_H */ diff --git a/osfmk/machine/timer.h b/osfmk/machine/timer.h new file mode 100644 index 000000000..eafb03fb9 --- /dev/null +++ b/osfmk/machine/timer.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_TIMER_H +#define _MACHINE_TIMER_H + + +#if defined (__ppc__) +#include "ppc/timer.h" +#elif defined (__i386__) +#include "i386/timer.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_TIMER_H */ diff --git a/osfmk/machine/trap.h b/osfmk/machine/trap.h new file mode 100644 index 000000000..ef021722c --- /dev/null +++ b/osfmk/machine/trap.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_TRAP_H +#define _MACHINE_TRAP_H + + +#if defined (__ppc__) +#include "ppc/trap.h" +#elif defined (__i386__) +#include "i386/trap.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_TRAP_H */ diff --git a/osfmk/machine/vm_tuning.h b/osfmk/machine/vm_tuning.h new file mode 100644 index 000000000..d74da3049 --- /dev/null +++ b/osfmk/machine/vm_tuning.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_VM_TUNING_H +#define _MACHINE_VM_TUNING_H + + +#if defined (__ppc__) +#include "ppc/vm_tuning.h" +#elif defined (__i386__) +#include "i386/vm_tuning.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_VM_TUNING_H */ diff --git a/osfmk/machine/xpr.h b/osfmk/machine/xpr.h new file mode 100644 index 000000000..9507298b8 --- /dev/null +++ b/osfmk/machine/xpr.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACHINE_XPR_H +#define _MACHINE_XPR_H + + +#if defined (__ppc__) +#include "ppc/xpr.h" +#elif defined (__i386__) +#include "i386/xpr.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACHINE_XPR_H */ diff --git a/osfmk/ppc/AltiAssist.s b/osfmk/ppc/AltiAssist.s new file mode 100644 index 000000000..40adf070a --- /dev/null +++ b/osfmk/ppc/AltiAssist.s @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + AltiAssist.s + + Do the VMX assists + + Lovingly crafted by Bill Angell using traditional methods and only natural or recycled materials. + No animal products are used other than rendered otter bile and deep fried pork lard. + +*/ + +#include +#include +#include +#include +#include +#include + +; +; +; General stuff what happens here: +; 1) All general context saved, interrupts off, translation off +; 2) Vector and floating point disabled, but there may be live context. +; This code is responsible for saving and restoring what is used. This +; includes exception states, java mode, etc. +; 3) No attempt is made to resolve page faults. PTE misses are handled +; automatically, but actual faults (ala copyin/copyout) are not. If +; a fault does occur, the exception that caused entry to the emulation +; routine is remapped to either an instruction or data miss (depending +; upon the stage detected) and redriven through the exception handler. +; The only time that an instruction fault can happen is when a different +; processor removes a mapping between our original fault and when we +; fetch the assisted instruction. For an assisted instruction, data +; faults should not occur (except in the MP case). For a purely +; emulated instruction, faults can occur. +; +; Emulation algorithms cloned from MacOS 9 code. +; +; Assumes that R2 = per_proc_area +; +; + + + .align 5 + .globl EXT(AltivecAssist) + +LEXT(AltivecAssist) + + mfmsr r20 ; Get the current MSR + li r10,emvr0 ; Point to the vector savearea + oris r20,r20,hi16(MASK(MSR_VEC)) ; Turn on vector + mtmsr r20 ; Turn on vector + isync + + li r11,emvr1 ; Another savearea + stvxl v0,r10,r2 ; Save V0 + stvxl v1,r11,r2 ; Save V1 + vspltisw v0,1 ; Set a 1 in V0 + vspltisw v1,8 ; Get half of the shift + vslw v0,v0,v1 ; Shift half way + vslw v0,v0,v1 ; Shift the rest of the way (we now have 0x00010000) + mfvscr v1 ; Get the VSCR + vor v1,v1,v0 ; Turn off Java mode + lvxl v0,r10,r2 ; Restore V0 + mtvscr v1 ; Set Java mode off + lvxl v1,r11,r2 ; Restore V1 + + li r11,T_IN_VAIN ; We are all done + b EXT(EmulExit) ; We are done, no tracing on... + + + + diff --git a/osfmk/ppc/Diagnostics.c b/osfmk/ppc/Diagnostics.c new file mode 100644 index 000000000..6e52de770 --- /dev/null +++ b/osfmk/ppc/Diagnostics.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + * Author: Bill Angell, Apple + * Date: 9/auht-aught + * + * Random diagnostics + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int diagCall(struct savearea *save) { + + union { + unsigned long long tbase; + unsigned int tb[2]; + } ttt, adj; + natural_t tbu, tbu2, tbl; + struct per_proc_info *per_proc; /* Area for my per_proc address */ + int cpu; + + if(!(dgWork.dgFlags & enaDiagSCs)) return 0; /* If not enabled, cause an exception */ + + switch(save->save_r3) { /* Select the routine */ + +/* + * Adjust the timebase for drift recovery testing + */ + case dgAdjTB: /* Adjust the timebase */ + + adj.tb[0] = 0; /* Clear high part */ + adj.tb[1] = save->save_r4; /* Set low order */ + if(adj.tb[1] & 0x80000000) adj.tb[0] = 0xFFFFFFFF; /* Propagate sign bit */ + + do { /* Read current time */ + asm volatile(" mftbu %0" : "=r" (tbu)); + asm volatile(" mftb %0" : "=r" (tbl)); + asm volatile(" mftbu %0" : "=r" (tbu2)); + } while (tbu != tbu2); + + ttt.tb[0] = tbu; /* Set high */ + ttt.tb[1] = tbl; /* Set low */ + + ttt.tbase = ttt.tbase + adj.tbase; /* Increment or decrement the TB */ + + tbu = ttt.tb[0]; /* Save in regular variable */ + tbl = ttt.tb[1]; /* Save in regular variable */ + + mttb(0); /* Set low to keep from ticking */ + mttbu(tbu); /* Set adjusted high */ + mttb(tbl); /* Set adjusted low */ + + return -1; /* Return no AST checking... */ + +/* + * Return physical address of a page + */ + case dgLRA: + + save->save_r3 = pmap_extract(current_act()->map->pmap, save->save_r4); /* Get read address */ + + return -1; /* Return no AST checking... */ + +/* + * Copy physical to virtual + */ + case dgpcpy: + +#if 0 + save->save_r3 = copyp2v(save->save_r4, save->save_r5, save->save_r6); /* Copy the physical page */ +#endif + return 1; /* Return and check for ASTs... */ + + +/* + * Soft reset processor + */ + case dgreset: + + cpu = save->save_r4; /* Get the requested CPU number */ + + if(cpu >= NCPUS) { /* Check for bogus cpu */ + save->save_r3 = KERN_FAILURE; /* Set failure */ + return 1; + } + + if(!machine_slot[cpu].running) return KERN_FAILURE; /* It is not running */ + + per_proc = &per_proc_info[cpu]; /* Point to the processor */ + + (void)PE_cpu_start(per_proc->cpu_id, + per_proc->start_paddr, (vm_offset_t)per_proc); + + save->save_r3 = KERN_SUCCESS; /* Set scuuess */ + + return 1; /* Return and check for ASTs... */ + +/* + * various hack tests + */ + case dgtest: + + pmap_remove(kernel_pmap, save->save_r4, save->save_r4 + 4096); + + return 1; /* Return and check for ASTs... */ + + + default: /* Handle invalid ones */ + return 0; /* Return an exception */ + + }; + +} diff --git a/osfmk/ppc/Diagnostics.h b/osfmk/ppc/Diagnostics.h new file mode 100644 index 000000000..c06672f83 --- /dev/null +++ b/osfmk/ppc/Diagnostics.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + * Here are the Diagnostic interface interfaces + * Lovingly crafted by Bill Angell using traditional methods +*/ + +#ifndef _DIAGNOSTICS_H_ +#define _DIAGNOSTICS_H_ + +#ifndef __ppc__ +#error This file is only useful on PowerPC. +#endif + +int diagCall(struct savearea *save); + +#define diagSCnum 0x00006000 + +#define dgAdjTB 0 +#define dgLRA 1 +#define dgpcpy 2 +#define dgreset 3 +#define dgtest 4 + +typedef struct diagWork { /* Diagnostic work area */ + + unsigned int dgLock; /* Lock if needed */ + unsigned int dgFlags; /* Flags */ +#define enaExpTrace 0x00000001 +#define enaExpTraceb 31 +#define enaUsrFCall 0x00000002 +#define enaUsrFCallb 30 +#define enaUsrPhyMp 0x00000004 +#define enaUsrPhyMpb 29 +#define enaDiagSCs 0x00000008 +#define enaDiagSCsb 28 +#define enaDiagDM 0x00000010 +#define enaDiagSDMb 27 +/* Suppress lock checks */ +#define disLkType 0x80000000 +#define disLktypeb 0 +#define disLkThread 0x40000000 +#define disLkThreadb 1 +#define disLkNmSimp 0x20000000 +#define disLkNmSimpb 2 +#define disLkMyLck 0x10000000 +#define disLkMyLckb 3 + + unsigned int dgMisc0; + unsigned int dgMisc1; + unsigned int dgMisc2; + unsigned int dgMisc3; + unsigned int dgMisc4; + unsigned int dgMisc5; + +} diagWork; + +extern diagWork dgWork; + + +#endif /* _DIAGNOSTICS_H_ */ diff --git a/osfmk/ppc/Emulate.s b/osfmk/ppc/Emulate.s new file mode 100644 index 000000000..813b689d5 --- /dev/null +++ b/osfmk/ppc/Emulate.s @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Emulate.s + + Emulate instructions and traps. + + Lovingly crafted by Bill Angell using traditional methods and only natural or recycled materials. + No animal products are used other than rendered otter bile and deep fried pork lard. + +*/ + +#include +#include +#include +#include +#include +#include + + +; General stuff what happens here: +; 1) All general context saved, interrupts off, translation off +; 2) Vector and floating point disabled, but there may be live context. +; This code is responsible for saving and restoring what is used. This +; includes exception states, java mode, etc. +; 3) No attempt is made to resolve page faults. PTE misses are handled +; automatically, but actual faults (ala copyin/copyout) are not. If +; a fault does occur, the exception that caused entry to the emulation +; routine is remapped to either an instruction or data miss (depending +; upon the stage detected) and redrived through the exception handler. +; The only time that an instruction fault can happen is when a different +; processor removes a mapping between our original fault and when we +; fetch the assisted instruction. For an assisted instruction, data +; faults should not occur (except in the MP case). For a purely +; emulated instruction, faults can occur. +; +; + + + .align 5 + .globl EXT(Emulate) + +LEXT(Emulate) + + b EXT(EmulExit) ; Just return for now... + diff --git a/osfmk/ppc/Firmware.h b/osfmk/ppc/Firmware.h new file mode 100644 index 000000000..213787b5f --- /dev/null +++ b/osfmk/ppc/Firmware.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + * Here be the firmware's public interfaces + * Lovingly crafted by Bill Angell using traditional methods +*/ + +#ifndef _FIRMWARE_H_ +#define _FIRMWARE_H_ + +#ifndef __ppc__ +#error This file is only useful on PowerPC. +#endif + +/* + * This routine is used to write debug output to either the modem or printer port. + * parm 1 is printer (0) or modem (1); parm 2 is ID (printed directly); parm 3 converted to hex + */ + +void dbgDisp(unsigned int port, unsigned int id, unsigned int data); +void dbgLog(unsigned int d0, unsigned int d1, unsigned int d2, unsigned int d3); +void dbgLog2(unsigned int type, unsigned int p1, unsigned int p2); +void dbgDispLL(unsigned int port, unsigned int id, unsigned int data); +void fwSCCinit(unsigned int port); + +extern void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3); +#if 0 /* (TEST/DEBUG) - eliminate inline */ +extern __inline__ void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3) { + + __asm__ volatile("mr r3,%0" : : "r" (item1) : "r3"); + __asm__ volatile("mr r4,%0" : : "r" (item2) : "r4"); + __asm__ volatile("mr r5,%0" : : "r" (item3) : "r5"); + __asm__ volatile("lis r0,hi16(CutTrace)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CutTrace)" : : : "r0"); + __asm__ volatile("sc"); + return; +} +#endif + +extern void DoPreempt(void); +extern __inline__ void DoPreempt(void) { + __asm__ volatile("lis r0,hi16(DoPreemptCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(DoPreemptCall)" : : : "r0"); + __asm__ volatile("sc"); + return; +} + +extern void CreateFakeIO(void); +extern __inline__ void CreateFakeIO(void) { + __asm__ volatile("lis r0,hi16(CreateFakeIOCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CreateFakeIOCall)" : : : "r0"); + __asm__ volatile("sc"); + return; +} + +extern void CreateFakeDEC(void); +extern __inline__ void CreateFakeDEC(void) { + __asm__ volatile("lis r0,hi16(CreateFakeDECCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CreateFakeDECCall)" : : : "r0"); + __asm__ volatile("sc"); + return; +} + +extern void CreateShutdownCTX(void); +extern __inline__ void CreateShutdownCTX(void) { + __asm__ volatile("lis r0,hi16(CreateShutdownCTXCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CreateShutdownCTXCall)" : : : "r0"); + __asm__ volatile("sc"); + return; +} + +typedef struct Boot_Video bootBumbleC; + +extern void StoreReal(unsigned int val, unsigned int addr); +extern void ReadReal(unsigned int raddr, unsigned int *vaddr); +extern void ClearReal(unsigned int addr, unsigned int lgn); +extern void LoadDBATs(unsigned int *bat); +extern void LoadIBATs(unsigned int *bat); +extern void stFloat(unsigned int *addr); +extern int stVectors(unsigned int *addr); +extern int stSpecrs(unsigned int *addr); +extern unsigned int LLTraceSet(unsigned int tflags); +extern void GratefulDebInit(bootBumbleC *boot_video_info); +extern void GratefulDebDisp(unsigned int coord, unsigned int data); +extern void checkNMI(void); + +typedef struct GDWorkArea { /* Grateful Deb work area one per processor */ + +/* Note that a lot of info is duplicated for each processor */ + + unsigned int GDsave[32]; /* Save area for registers */ + + unsigned int GDfp0[2]; + unsigned int GDfp1[2]; + unsigned int GDfp2[2]; + unsigned int GDfp3[2]; + + unsigned int GDtop; /* Top pixel of CPU's window */ + unsigned int GDleft; /* Left pixel of CPU's window */ + unsigned int GDtopleft; /* Physical address of top left in frame buffer */ + unsigned int GDrowbytes; /* Bytes per row */ + unsigned int GDrowchar; /* Bytes per row of characters plus leading */ + unsigned int GDdepth; /* Bits per pixel */ + unsigned int GDcollgn; /* Column width in bytes */ + unsigned int GDready; /* We are ready to go */ + unsigned int GDfiller[16]; /* Fill it up to a 256 byte boundary */ + + unsigned int GDrowbuf1[128]; /* Buffer to an 8 character row */ + unsigned int GDrowbuf2[128]; /* Buffer to an 8 character row */ + +} GDWorkArea; +#define GDfontsize 16 +#define GDdispcols 2 + +#endif /* _FIRMWARE_H_ */ diff --git a/osfmk/ppc/Firmware.s b/osfmk/ppc/Firmware.s new file mode 100644 index 000000000..c7db5a174 --- /dev/null +++ b/osfmk/ppc/Firmware.s @@ -0,0 +1,2335 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + Firmware.s + + Handle things that should be treated as an extension of the hardware + + Lovingly crafted by Bill Angell using traditional methods and only natural or recycled materials. + No animal products are used other than rendered otter bile and deep fried pork lard. + +*/ + +#include +#include +#include +#include +#include +#include +#include + + +/* + * Here we generate the table of supported firmware calls + */ + + + + .data + .align 5 /* Line up on cache line */ + + .globl EXT(FWtable) + +EXT(FWtable): + + .globl CutTrace /* Let everyone know 'bout it */ + .set CutTrace,(.-EXT(FWtable))/4|0x80000000 /* Call number for CutTrace */ + .long callUnimp /* This was already handled in lowmem_vectors */ + +#include + + .set EXT(FirmwareCnt), (.-EXT(FWtable))/4 /* Get the top number */ + + .text + +#define SIMPLESCC 1 +#define NOTQUITEASSIMPLE 1 +/* + * This routine handles the firmware call routine. It must be entered with IR and DR off, + * interruptions disabled, and in supervisor state. + * + * When we enter, we expect R0 to have call number, and LR + * to point to the return. Also, all registers saved in savearea in R13. + * R3 is as passed in by the user. All others must be gotten from the save area + */ + +ENTRY(FirmwareCall, TAG_NO_FRAME_USED) + + rlwinm r1,r0,2,1,29 /* Clear out bit 0 and multiply by 4 */ + lis r12,HIGH_ADDR(EXT(FWtable)) /* Get the high part of the firmware call table */ + cmplwi r1,EXT(FirmwareCnt)*4 /* Is it a valid firmware call number */ + mflr r11 /* Save the return */ + ori r12,r12,LOW_ADDR(EXT(FWtable)) /* Now the low part */ + ble+ goodCall /* Yeah, it is... */ + + li r3,T_SYSTEM_CALL /* Tell the vector handler that we know nothing */ + blr /* Return for errors... */ + +goodCall: mfsprg r10,0 /* Make sure about the per_proc block */ + lwzx r1,r1,r12 /* Pick up the address of the routine */ + lwz r4,saver4(r13) /* Pass in caller's R4 */ + lwz r5,saver5(r13) /* Pass in caller's R5 */ + rlwinm. r1,r1,0,0,29 /* Make sure the flag bits are clear */ + stw r11,PP_TEMPWORK1(r10) /* Save our return point */ + + mtlr r1 /* Put it in the LR */ + beq- callUnimp /* This one was unimplimented... */ + + blrl /* Call the routine... */ + + mfsprg r10,0 /* Make sure about the per_proc again */ + stw r3,saver3(r13) /* Pass back the return code to caller */ + lwz r11,PP_TEMPWORK1(r10) /* Get our return point */ + li r3,T_IN_VAIN /* Tell the vector handler that we took care of it */ + mtlr r11 /* Set the return */ + blr /* Bye, dudes... */ + +callUnimp: lwz r11,PP_TEMPWORK1(r10) /* Restore the return address */ + li r3,T_SYSTEM_CALL /* Tell the vector handler that we know nothing */ + mtlr r11 /* Restore the LR */ + blr /* Return for errors... */ + +/* + * This routine is used to store using a real address. It stores parmeter1 at parameter2. + */ + +ENTRY(StoreReal, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(StoreRealCall) /* Get the top part of the SC number */ + ori r0,r0,LOW_ADDR(StoreRealCall) /* and the bottom part */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + +ENTRY(StoreRealLL, TAG_NO_FRAME_USED) + + stw r3,0(r4) /* Store the word */ + blr /* Leave... */ + +/* + * This routine is used to clear a range of physical pages. + */ + +ENTRY(ClearReal, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(ClearRealCall) /* Get the top part of the SC number */ + ori r0,r0,LOW_ADDR(ClearRealCall) /* and the bottom part */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + +ENTRY(ClearRealLL, TAG_NO_FRAME_USED) + +/* + * We take the first parameter as a physical address. The second is the length in bytes. + * Being crazy, I'll round the address down, and the length up. We could end up clearing + * an extra page at the start and one at the end, but we don't really care. If someone + * is stupid enough to give me unaligned addresses and lengths, I am just arrogant enough + * to take them at their word and to hell with them. + */ + + neg r5,r3 /* Negate the address */ + addi r4,r4,4095 /* Round length up */ + rlwinm r5,r5,0,20,31 /* Save extra length */ + rlwinm r3,r3,0,0,19 /* Round the page on down */ + add r4,r4,r5 /* Add up all extra lengths */ + li r6,32 /* Get a displacement */ + rlwinm r4,r4,0,0,19 /* Round the length back down */ + +clrloop: subi r4,r4,32 /* Back off a cache line */ + dcbz 0,r3 /* Do the even line */ + sub. r4,r4,r6 /* Back off a second time (we only do this to generate a CR */ + dcbz r6,r3 /* Clear the even line */ + addi r3,r3,64 /* Move up to every other line */ + bgt+ clrloop /* Go until we've done it all... */ + + blr /* Leave... */ +/* + * This routine will read in 32 byte of real storage. + */ + +ENTRY(ReadReal, TAG_NO_FRAME_USED) + + mfmsr r0 /* Get the MSR */ + rlwinm r5,r0,0,28,26 /* Clear DR bit */ + rlwinm r5,r5,0,17,15 /* Clear EE bit */ + mtmsr r5 /* Disable EE and DR */ + isync /* Just make sure about it */ + + lwz r5,0(r3) /* Get word 0 */ + lwz r6,4(r3) /* Get word 1 */ + lwz r7,8(r3) /* Get word 2 */ + lwz r8,12(r3) /* Get word 3 */ + lwz r9,16(r3) /* Get word 4 */ + lwz r10,20(r3) /* Get word 5 */ + lwz r11,24(r3) /* Get word 6 */ + lwz r12,28(r3) /* Get word 7 */ + + mtmsr r0 /* Restore original machine state */ + isync /* Insure goodness */ + + stw r5,0(r4) /* Set word 0 */ + stw r6,4(r4) /* Set word 1 */ + stw r7,8(r4) /* Set word 2 */ + stw r8,12(r4) /* Set word 3 */ + stw r9,16(r4) /* Set word 4 */ + stw r10,20(r4) /* Set word 5 */ + stw r11,24(r4) /* Set word 6 */ + stw r12,28(r4) /* Set word 7 */ + + blr + + +/* + * This routine is used to load all 4 DBATs. + */ + +ENTRY(LoadDBATs, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(LoadDBATsCall) /* Top half of LoadDBATsCall firmware call number */ + ori r0,r0,LOW_ADDR(LoadDBATsCall) /* Bottom half */ + sc /* Do it to it */ + + blr /* Bye bye, Birdie... */ + +ENTRY(xLoadDBATsLL, TAG_NO_FRAME_USED) + + lwz r4,0(r3) /* Get DBAT 0 high */ + lwz r5,4(r3) /* Get DBAT 0 low */ + lwz r6,8(r3) /* Get DBAT 1 high */ + lwz r7,12(r3) /* Get DBAT 1 low */ + lwz r8,16(r3) /* Get DBAT 2 high */ + lwz r9,20(r3) /* Get DBAT 2 low */ + lwz r10,24(r3) /* Get DBAT 3 high */ + lwz r11,28(r3) /* Get DBAT 3 low */ + + sync /* Common decency and the state law require you to wash your hands */ + mtdbatu 0,r4 /* Load DBAT 0 high */ + mtdbatl 0,r5 /* Load DBAT 0 low */ + mtdbatu 1,r6 /* Load DBAT 1 high */ + mtdbatl 1,r7 /* Load DBAT 1 low */ + mtdbatu 2,r8 /* Load DBAT 2 high */ + mtdbatl 2,r9 /* Load DBAT 2 low */ + mtdbatu 3,r10 /* Load DBAT 3 high */ + mtdbatl 3,r11 /* Load DBAT 3 low */ + sync /* Make sure it's done */ + isync /* Toss out anything new */ + + blr /* Leave... */ + +/* + * This routine is used to load all 4 IBATs. + */ + +ENTRY(LoadIBATs, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(LoadIBATsCall) /* Top half of CreateFakeIO firmware call number */ + ori r0,r0,LOW_ADDR(LoadIBATsCall) /* Bottom half */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + +ENTRY(xLoadIBATsLL, TAG_NO_FRAME_USED) + + lwz r4,0(r3) /* Get IBAT 0 high */ + lwz r5,4(r3) /* Get IBAT 0 low */ + lwz r6,8(r3) /* Get IBAT 1 high */ + lwz r7,12(r3) /* Get IBAT 1 low */ + lwz r8,16(r3) /* Get IBAT 2 high */ + lwz r9,20(r3) /* Get IBAT 2 low */ + lwz r10,24(r3) /* Get IBAT 3 high */ + lwz r11,28(r3) /* Get IBAT 3 low */ + + sync /* Common decency and the state law require you to wash your hands */ + mtibatu 0,r4 /* Load IBAT 0 high */ + mtibatl 0,r5 /* Load IBAT 0 low */ + mtibatu 1,r6 /* Load IBAT 1 high */ + mtibatl 1,r7 /* Load IBAT 1 low */ + mtibatu 2,r8 /* Load IBAT 2 high */ + mtibatl 2,r9 /* Load IBAT 2 low */ + mtibatu 3,r10 /* Load IBAT 3 high */ + mtibatl 3,r11 /* Load IBAT 3 low */ + sync /* Make sure it's done */ + isync /* Toss out anything new */ + + blr /* Leave... */ + + +/* + * This is the glue to call the CutTrace firmware call + */ + +ENTRY(dbgTrace, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(CutTrace) /* Top half of CreateFakeIO firmware call number */ + ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + +/* + * This is the glue to create a fake I/O interruption + */ + +ENTRY(CreateFakeIO, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(CreateFakeIOCall) /* Top half of CreateFakeIO firmware call number */ + ori r0,r0,LOW_ADDR(CreateFakeIOCall) /* Bottom half */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + +/* + * This is the glue to create a fake Dec interruption + */ + +ENTRY(CreateFakeDEC, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(CreateFakeDECCall) /* Top half of CreateFakeDEC firmware call number */ + ori r0,r0,LOW_ADDR(CreateFakeDECCall) /* Bottom half */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + + +/* + * This is the glue to create a shutdown context + */ + +ENTRY(CreateShutdownCTX, TAG_NO_FRAME_USED) + + lis r0,HIGH_ADDR(CreateShutdownCTXCall) /* Top half of CreateFakeIO firmware call number */ + ori r0,r0,LOW_ADDR(CreateShutdownCTXCall) /* Bottom half */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + +/* + * Used to initialize the SCC for debugging output + */ + + +ENTRY(fwSCCinit, TAG_NO_FRAME_USED) + + mfmsr r8 /* Save the MSR */ + mr. r3,r3 /* See if printer or modem */ + rlwinm r12,r8,0,28,25 /* Turn off translation */ + lis r10,0xF301 /* Set the top part */ + rlwinm r12,r12,0,17,15 /* Turn off interruptions */ +#if 0 + mtmsr r12 /* Smash the MSR */ + isync /* Make it clean */ +#endif + + ori r10,r10,0x2000 /* Assume the printer (this is the normal one) */ + beq+ fwSCCprnt /* It sure are... */ + ori r10,r10,0x0002 /* Move it over to the modem port */ + +fwSCCprnt: dcbf 0,r10 /* Insure it is out */ + sync + eieio + dcbi 0,r10 /* Toss it */ + sync + + + li r7,0x09 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x80 /* Reset channel A */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x04 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x44 /* x16 clock, 1 stop bit */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x03 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0xC0 /* 8 bits per char */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x05 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0xE2 /* DTR mode, 8bit/char */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x02 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x00 /* Vector 0 */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0A /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x00 /* Clear misc controls */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0B /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x50 /* B/R gen T/R */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0C /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0A /* 9600 baud low */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0D /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x00 /* 9600 baud high */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x03 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0xC1 /* 8 bits/char, Rx enable */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x05 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0xEA /* 8 bits/char, Tx enable */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0E /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x01 /* BR rate gen enable */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0F /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x00 /* ints off */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x10 /* Reset ext/stat ints */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x10 /* Reset ext/stat ints */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x01 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x10 /* int on Rx, no Tx int enable */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x09 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x0A /* int on Rx, Tx int enable */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Master enable, no vector */ + dcbi 0,r10 + eieio + + li r7,0x09 /* Set the register */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + li r7,0x02 /* No vector */ + stb r7,0(r10) /* Set the register */ + dcbf 0,r10 /* Force it out */ + sync /* Master enable, no vector */ + dcbi 0,r10 + eieio + + lbz r7,0(r10) /* Clear interrupts */ + sync /* Master enable, no vector */ + dcbi 0,r10 + eieio + +wSCCrdy: eieio /* Barricade it */ + lbz r7,0(r10) /* Get current status */ + dcbi 0,r10 + sync + andi. r7,r7,0x04 /* Is transmitter empty? */ + beq wSCCrdy /* Nope... */ + + eieio + +#if 0 + mtmsr r8 /* Restore 'rupts and TR */ + isync +#endif + blr /* Leave... */ + +/* + * This routine is used to write debug output to either the modem or printer port. + * parm 1 is printer (0) or modem (1); parm 2 is ID (printed directly); parm 3 converted to hex + */ + +ENTRY(dbgDisp, TAG_NO_FRAME_USED) + + mr r12,r0 /* Keep R0 pristene */ + lis r0,HIGH_ADDR(dbgDispCall) /* Top half of dbgDispCall firmware call number */ + ori r0,r0,LOW_ADDR(dbgDispCall) /* Bottom half */ + + sc /* Go display the stuff */ + + mr r0,r12 /* Restore R0 */ + blr /* Return... */ + +/* Here's the low-level part of dbgDisp */ + +ENTRY(dbgDispLL, TAG_NO_FRAME_USED) + +dbgDispInt: mfmsr r8 /* Save the MSR */ + +#if 0 + lis r10,0xF301 /* (TEST/DEBUG) */ + ori r10,r10,0x2002 /* (TEST/DEBUG) */ + dcbf 0,r10 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r10 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ + li r7,0x35 /* (TEST/DEBUG) */ + stb r7,4(r10) /* (TEST/DEBUG) */ + + lis r7,10 /* (TEST/DEBUG) */ +spw6: addi r7,r7,-1 /* (TEST/DEBUG) */ + mr. r7,r7 /* (TEST/DEBUG) */ + bne- spw6 /* (TEST/DEBUG) */ + dcbf 0,r10 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r10 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ +#endif + + rlwinm r12,r8,0,28,25 /* Turn off translation */ + rlwinm r12,r12,0,17,15 /* Turn off interruptions */ + + mflr r11 /* Save the link register */ + +#if 0 + mr r7,r12 /* (TEST/DEBUG) */ + bl dumpr7 /* (TEST/DEBUG) */ +#endif + + mr. r3,r3 /* See if printer or modem */ + lis r10,0xF301 /* Set the top part */ + mr r3,r4 /* Copy the ID parameter */ + +#if 0 + mr r9,r12 /* (TEST/DEBUG) */ + + mtmsr r12 /* (TEST/DEBUG) */ + isync /* (TEST/DEBUG) */ + +#if 0 + mtmsr r8 /* (TEST/DEBUG) */ + isync /* (TEST/DEBUG) */ +#endif + + lis r12,0xF301 /* (TEST/DEBUG) */ + ori r12,r12,0x2002 /* (TEST/DEBUG) */ +#if 1 + dcbf 0,r12 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r12 /* (TEST/DEBUG) */ +#endif + +xqrw1: eieio /* (TEST/DEBUG) */ + lbz r7,0(r12) /* (TEST/DEBUG) */ + dcbi 0,r12 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + andi. r7,r7,0x04 /* (TEST/DEBUG) */ + beq xqrw1 /* (TEST/DEBUG) */ + + eieio /* (TEST/DEBUG) */ + li r7,0x36 /* (TEST/DEBUG) */ + stb r7,4(r12) /* (TEST/DEBUG) */ + eieio + dcbf 0,r12 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r12 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ + + + lis r7,10 /* (TEST/DEBUG) */ +spw7: addi r7,r7,-1 /* (TEST/DEBUG) */ + mr. r7,r7 /* (TEST/DEBUG) */ + bne- spw7 /* (TEST/DEBUG) */ + dcbf 0,r12 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r12 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ + mr r12,r9 /* (TEST/DEBUG) */ +#endif + + mtmsr r12 /* Smash the MSR */ + isync /* Make it clean */ + + +#if SIMPLESCC && !NOTQUITEASSIMPLE + ori r10,r10,0x3010 /* Assume the printer (this is the normal one) */ +#else + ori r10,r10,0x2000 /* Assume the printer (this is the normal one) */ +#endif + beq+ dbgDprintr /* It sure are... */ +#if SIMPLESCC && !NOTQUITEASSIMPLE + ori r10,r10,0x0020 /* Move it over to the modem port */ +#else + ori r10,r10,0x0002 /* Move it over to the modem port */ + +#if !NOTQUITEASSIMPLE + lis r7,0xF300 /* Address of SCC rounded to 128k */ + ori r7,r7,0x0032 /* Make it cache inhibited */ + mtdbatl 3,r7 /* Load DBAT 3 low */ + lis r7,0xF300 /* Address of SCC rounded to 128k */ + ori r7,r7,0x0002 /* Make it supervisor only */ + mtdbatu 3,r7 /* Load DBAT 3 high */ + ori r12,r12,0x0010 /* Turn on DR */ + mtmsr r12 /* Smash the MSR */ + isync /* Make it clean */ + +#endif +#endif + +dbgDprintr: sync +#if 0 + mr r7,r10 /* (TEST/DEBUG) */ + bl dumpr7 /* (TEST/DEBUG) */ +#endif + + dcbi 0,r10 /* Toss it */ + eieio + +#if 0 + lis r12,0xF301 /* (TEST/DEBUG) */ + ori r12,r12,0x2002 /* (TEST/DEBUG) */ + dcbf 0,r12 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r12 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ + li r7,0x37 /* (TEST/DEBUG) */ + stb r7,4(r12) /* (TEST/DEBUG) */ + + lis r7,12 /* (TEST/DEBUG) */ +spw8: addi r7,r7,-1 /* (TEST/DEBUG) */ + mr. r7,r7 /* (TEST/DEBUG) */ + bne- spw8 /* (TEST/DEBUG) */ + dcbf 0,r12 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r12 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ +#endif + + +/* Print the ID parameter */ + + lis r12,HIGH_ADDR(fwdisplock) /* Get the display locker outer */ + ori r12,r12,LOW_ADDR(fwdisplock) /* Last part */ + + lwarx r7,0,r12 ; ? + +ddwait0: lwarx r7,0,r12 /* Get the lock */ + mr. r7,r7 /* Is it locked? */ + bne- ddwait0 /* Yup... */ + stwcx. r12,0,r12 /* Try to get it */ + bne- ddwait0 /* Nope, start all over... */ + +#if 0 + dcbf 0,r10 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r10 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ + li r7,0x38 /* (TEST/DEBUG) */ + stb r7,6(r10) /* (TEST/DEBUG) */ + + lis r7,10 /* (TEST/DEBUG) */ +spwa: addi r7,r7,-1 /* (TEST/DEBUG) */ + mr. r7,r7 /* (TEST/DEBUG) */ + bne- spwa /* (TEST/DEBUG) */ + dcbf 0,r10 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r10 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ +#endif + + rlwinm r3,r3,8,0,31 /* Get the first character */ + bl dbgDchar /* Print it */ + rlwinm r3,r3,8,0,31 /* Get the second character */ + bl dbgDchar /* Print it */ + rlwinm r3,r3,8,0,31 /* Get the third character */ + bl dbgDchar /* Print it */ + rlwinm r3,r3,8,0,31 /* Get the fourth character */ + bl dbgDchar /* Print it */ + + li r3,0x20 /* Get a space for a separator */ + bl dbgDchar /* Print it */ + bl dbg4byte /* Print register 5 in hex */ + + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + mtlr r11 /* Get back the return */ +#if !SIMPLESCC && !NOTQUITEASSIMPLE + li r7,0 /* Get a zero */ + mtdbatu 3,r7 /* Invalidate DBAT 3 upper */ + mtdbatl 3,r7 /* Invalidate DBAT 3 lower */ +#endif + lis r12,HIGH_ADDR(fwdisplock) /* Get the display locker outer */ + li r7,0 /* Get a zero */ + ori r12,r12,LOW_ADDR(fwdisplock) /* Last part */ + dcbi 0,r10 /* ? */ + stw r7,0(r12) /* Release the display lock */ + mtmsr r8 /* Restore the MSR */ + isync /* Wait for it */ + blr /* Leave... */ + + +dbg4byte: mflr r12 /* Save the return */ + + lis r4,HIGH_ADDR(hexTab) /* Point to the top of table */ + li r6,8 /* Set number of hex digits to dump */ + ori r4,r4,LOW_ADDR(hexTab) /* Point to the bottom of table */ + +dbgDnext: rlwinm r5,r5,4,0,31 /* Rotate a nybble */ + subi r6,r6,1 /* Back down the count */ + rlwinm r3,r5,0,28,31 /* Isolate the last nybble */ + lbzx r3,r4,r3 /* Convert to ascii */ + bl dbgDchar /* Print it */ + mr. r6,r6 /* Any more? */ + bne+ dbgDnext /* Convert 'em all... */ + + li r3,0x20 /* Space */ + bl dbgDchar /* Send it */ + mtlr r12 /* Restore LR */ + blr /* Return... */ + +/* Write to whichever serial port. Try to leave it clean, but not too hard (this is a hack) */ + +dbgDchar: +#if SIMPLESCC && !NOTQUITEASSIMPLE + stb r3,0(r10) /* ? */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + + lis r7,3 /* Get enough for about 1ms */ + +dbgDchar0: addi r7,r7,-1 /* Count down */ + mr. r7,r7 /* Waited long enough? */ + bgt+ dbgDchar0 /* Nope... */ +#endif +#if NOTQUITEASSIMPLE +#if 0 + li r7,0x01 /* ? */ + stb r7,0(r10) /* ? */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + lbz r7,0(r10) /* ? */ + dcbi 0,r10 /* Force it out */ + sync /* kill it off */ + eieio + + li r7,0x00 /* ? */ + stb r7,0(r10) /* ? */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + lbz r7,0(r10) /* ? */ + dcbi 0,r10 /* Force it out */ + sync /* kill it off */ + eieio +#endif + +qrw1: eieio /* Barricade it */ + lbz r7,0(r10) /* ? */ + dcbi 0,r10 + sync + andi. r7,r7,0x04 /* ? */ + beq qrw1 /* Nope... */ + + eieio + + stb r3,4(r10) /* ? */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + +qrw2: eieio /* Barricade it */ + lbz r7,0(r10) /* ? */ + dcbi 0,r10 + sync + andi. r7,r7,0x04 /* ? */ + beq qrw2 /* Nope... */ + +#if 0 + eieio + li r7,0x10 /* ? */ + stb r7,0(r10) /* ? */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + + lbz r7,0(r10) /* ? */ + dcbi 0,r10 /* Force it out */ + sync /* kill it off */ + eieio +#endif + + lis r7,0x0080 /* ? */ + lis r9,0xF300 /* ? */ + ori r7,r7,0x010F /* ? */ + stw r7,0x28(r9) /* ? */ + dcbf 0,r10 /* Force it out */ + sync /* Make sure it's out there */ + dcbi 0,r10 + eieio + +#endif +#if !SIMPLESCC && !NOTQUITEASSIMPLE + rlwinm r9,r10,0,0,29 /* Get channel a */ + eieio /* Barricade it */ + + li r7,0x03 /* ? */ + stb r7,0(r9) /* ? */ + eieio /* Barricade it */ + + lbz r7,0(r9) /* ? */ + + eieio /* Barricade it */ + lbz r7,0(r9) /* ? */ + +dchrw1: eieio /* Barricade it */ + lbz r7,0(r10) /* ? */ + andi. r7,r7,0x04 /* ? */ + beq dchrw1 /* Nope... */ + + stb r3,4(r10) /* ? */ + sync /* Make sure it's there */ + eieio /* Don't get confused */ + +dchrw2: eieio /* Barricade it */ + lbz r7,0(r10) /* ? */ + andi. r7,r7,0x04 /* ? */ + beq dchrw2 /* Nope... */ + + eieio /* Avoid confusion */ + lbz r7,0(r10) /* ? */ + andi. r7,r7,0x40 /* ? */ + beq+ nounder /* Nope... */ + + eieio /* Avoid confusion */ + li r7,0xC0 /* ? */ + stb r7,0(r10) /* ? */ + +nounder: eieio /* Avoid confusion */ + li r7,0x10 /* ? */ + stb r7,0(r10) /* ? */ + + eieio /* Avoid confusion */ + li r7,0x38 /* ? */ + stb r7,0(r9) /* ? */ + + eieio /* Avoid confusion */ + li r7,0x30 /* ? */ + stb r7,0(r10) /* ? */ + + eieio /* Avoid confusion */ + li r7,0x20 /* ? */ + stb r7,0(r10) /* ? */ + eieio /* Avoid confusion */ + sync + +#endif + blr /* Return */ + + .globl hexTab + +hexTab: STRINGD "0123456789ABCDEF" /* Convert hex numbers to printable hex */ + + +/* + * Dumps all the registers in the savearea in R13 + */ + + +ENTRY(dbgRegsLL, TAG_NO_FRAME_USED) + + li r3,0 /* ? */ + bl dbgRegsCm /* Join on up... */ + +/* + * Note that we bypass the normal return 'cause we don't wanna mess up R3 + */ + mfsprg r11,0 /* Get the per_proc */ + lwz r11,PP_TEMPWORK1(r11) /* Get our return point */ + li r3,T_IN_VAIN /* Tell the vector handler that we took care of it */ + mtlr r11 /* Set the return */ + blr /* Bye, dudes... */ + +ENTRY(dbgRegs, TAG_NO_FRAME_USED) + +dbgRegsCm: mfmsr r8 /* Save the MSR */ + mr. r3,r3 /* ? */ + rlwinm r12,r8,0,28,25 /* Turn off translation */ + lis r10,0xF301 /* Set the top part */ + rlwinm r12,r12,0,17,15 /* Turn off interruptions */ + mtmsr r12 /* Smash the MSR */ + isync /* Make it clean */ +#if SIMPLESCC && !NOTQUITEASSIMPLE + ori r10,r10,0x3010 /* ? */ +#else + ori r10,r10,0x2000 /* ? */ +#endif + mflr r11 /* Save the link register */ + beq+ dbgDprints /* It sure are... */ +#if SIMPLESCC && !NOTQUITEASSIMPLE + ori r10,r10,0x0020 /* ? */ +#else + ori r10,r10,0x0002 /* ? */ + + dcbf 0,r10 /* Insure it is out */ + sync + dcbi 0,r10 /* Toss it */ +#if !NOTQUITEASSIMPLE + lis r7,0xF300 /* ? */ + ori r7,r7,0x0032 /* ? */ + mtdbatl 3,r7 /* ? */ + lis r7,0xF300 /* ? */ + ori r7,r7,0x0002 /* ? */ + mtdbatu 3,r7 /* ? */ + ori r12,r12,0x0010 /* ? */ + mtmsr r12 /* ? */ + isync /* ? */ +#endif +#endif + +dbgDprints: + lis r3,HIGH_ADDR(fwdisplock) /* Get the display locker outer */ + ori r3,r3,LOW_ADDR(fwdisplock) /* Last part */ + + lwarx r5,0,r3 ; ? +ddwait1: lwarx r5,0,r3 /* Get the lock */ + mr. r5,r5 /* Is it locked? */ + bne- ddwait1 /* Yup... */ + stwcx. r3,0,r3 /* Try to get it */ + bne- ddwait1 /* Nope, start all over... */ + + li r3,0x52 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x65 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x67 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x73 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + + lwz r5,saver0(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver1(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver2(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver3(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,saver4(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver5(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver6(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver7(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,saver8(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver9(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver10(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver11(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,saver12(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver13(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver14(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver15(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,saver16(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver17(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver18(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver19(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,saver20(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver21(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver22(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver23(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,saver24(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver25(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver26(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver27(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,saver28(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver29(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver30(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,saver31(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + +/* Segment registers */ + + li r3,0x53 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x65 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x67 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x73 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + + lwz r5,savesr0(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr1(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr2(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr3(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,savesr4(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr5(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr6(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr7(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,savesr8(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr9(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr10(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr11(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,savesr12(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr13(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr14(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesr15(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x30 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x31 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x64 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x64 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,savesrr0(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savesrr1(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savedar(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savedsisr(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x6C /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x63 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x63 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + li r3,0x20 /* Print eyecatcher */ + bl dbgDchar /* Send it */ + lwz r5,savelr(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savecr(r13) /* Do register */ + bl dbg4byte /* Print */ + lwz r5,savectr(r13) /* Do register */ + bl dbg4byte /* Print */ + li r3,0x0A /* Linefeed */ + bl dbgDchar /* Send it */ + li r3,0x0D /* Carriage return */ + bl dbgDchar /* Send it */ + mtlr r11 /* Get back the return */ + dcbi 0,r10 /* ? */ +#if !SIMPLESCC && !NOTQUITEASSIMPLE + li r7,0 /* Get a zero */ + mtdbatu 3,r7 /* Invalidate DBAT 3 upper */ + mtdbatl 3,r7 /* Invalidate DBAT 3 lower */ +#endif + lis r3,HIGH_ADDR(fwdisplock) /* Get the display locker outer */ + li r7,0 /* Get a zero */ + ori r3,r3,LOW_ADDR(fwdisplock) /* Last part */ + stw r7,0(r3) /* Clear display lock */ + mtmsr r8 /* Restore the MSR */ + isync /* Wait for it */ + blr /* Leave... */ + + +/* + * Used for debugging to leave stuff in 0x380-0x3FF (128 bytes). + * Mapping is V=R. Stores and loads are real. + */ + +ENTRY(dbgCkpt, TAG_NO_FRAME_USED) + + mr r12,r0 /* Keep R0 pristene */ + lis r0,HIGH_ADDR(dbgCkptCall) /* Top half of dbgCkptCall firmware call number */ + ori r0,r0,LOW_ADDR(dbgCkptCall) /* Bottom half */ + + sc /* Go stash the stuff */ + + mr r0,r12 /* Restore R0 */ + blr /* Return... */ + +/* Here's the low-level part of dbgCkpt */ + +ENTRY(dbgCkptLL, TAG_NO_FRAME_USED) + + li r12,0x380 /* Point to output area */ + li r1,32 /* Get line size */ + dcbz 0,r12 /* Make sure we don't fetch a cache line */ + + lwz r4,0x00(r3) /* Load up storage to checkpoint */ + + dcbt r1,r3 /* Start in the next line */ + + lwz r5,0x04(r3) /* Load up storage to checkpoint */ + lwz r6,0x08(r3) /* Load up storage to checkpoint */ + lwz r7,0x0C(r3) /* Load up storage to checkpoint */ + lwz r8,0x10(r3) /* Load up storage to checkpoint */ + lwz r9,0x14(r3) /* Load up storage to checkpoint */ + lwz r10,0x18(r3) /* Load up storage to checkpoint */ + lwz r11,0x1C(r3) /* Load up storage to checkpoint */ + + add r3,r3,r1 /* Bump input */ + + stw r4,0x00(r12) /* Store it */ + stw r5,0x04(r12) /* Store it */ + stw r6,0x08(r12) /* Store it */ + stw r7,0x0C(r12) /* Store it */ + stw r8,0x10(r12) /* Store it */ + stw r9,0x14(r12) /* Store it */ + stw r10,0x18(r12) /* Store it */ + stw r11,0x1C(r12) /* Store it */ + + dcbz r1,r12 /* Clear the next line */ + add r12,r12,r1 /* Point to next output line */ + + lwz r4,0x00(r3) /* Load up storage to checkpoint */ + lwz r5,0x04(r3) /* Load up storage to checkpoint */ + lwz r6,0x08(r3) /* Load up storage to checkpoint */ + lwz r7,0x0C(r3) /* Load up storage to checkpoint */ + lwz r8,0x10(r3) /* Load up storage to checkpoint */ + lwz r9,0x14(r3) /* Load up storage to checkpoint */ + lwz r10,0x18(r3) /* Load up storage to checkpoint */ + lwz r11,0x1C(r3) /* Load up storage to checkpoint */ + + dcbt r1,r3 /* Touch the next line */ + add r3,r3,r1 /* Point to next input line */ + + stw r4,0x00(r12) /* Store it */ + stw r5,0x04(r12) /* Store it */ + stw r6,0x08(r12) /* Store it */ + stw r7,0x0C(r12) /* Store it */ + stw r8,0x10(r12) /* Store it */ + stw r9,0x14(r12) /* Store it */ + stw r10,0x18(r12) /* Store it */ + stw r11,0x1C(r12) /* Store it */ + + dcbz r1,r12 /* Clear the next line */ + add r12,r12,r1 /* Point to next output line */ + + lwz r4,0x00(r3) /* Load up storage to checkpoint */ + lwz r5,0x04(r3) /* Load up storage to checkpoint */ + lwz r6,0x08(r3) /* Load up storage to checkpoint */ + lwz r7,0x0C(r3) /* Load up storage to checkpoint */ + lwz r8,0x10(r3) /* Load up storage to checkpoint */ + lwz r9,0x14(r3) /* Load up storage to checkpoint */ + lwz r10,0x18(r3) /* Load up storage to checkpoint */ + lwz r11,0x1C(r3) /* Load up storage to checkpoint */ + + dcbt r1,r3 /* Touch the next line */ + add r3,r3,r1 /* Point to next input line */ + + stw r4,0x00(r12) /* Store it */ + stw r5,0x04(r12) /* Store it */ + stw r6,0x08(r12) /* Store it */ + stw r7,0x0C(r12) /* Store it */ + stw r8,0x10(r12) /* Store it */ + stw r9,0x14(r12) /* Store it */ + stw r10,0x18(r12) /* Store it */ + stw r11,0x1C(r12) /* Store it */ + + dcbz r1,r12 /* Clear the next line */ + add r12,r12,r1 /* Point to next output line */ + + lwz r4,0x00(r3) /* Load up storage to checkpoint */ + lwz r5,0x04(r3) /* Load up storage to checkpoint */ + lwz r6,0x08(r3) /* Load up storage to checkpoint */ + lwz r7,0x0C(r3) /* Load up storage to checkpoint */ + lwz r8,0x10(r3) /* Load up storage to checkpoint */ + lwz r9,0x14(r3) /* Load up storage to checkpoint */ + lwz r10,0x18(r3) /* Load up storage to checkpoint */ + lwz r11,0x1C(r3) /* Load up storage to checkpoint */ + + stw r4,0x00(r12) /* Store it */ + stw r5,0x04(r12) /* Store it */ + stw r6,0x08(r12) /* Store it */ + stw r7,0x0C(r12) /* Store it */ + stw r8,0x10(r12) /* Store it */ + stw r9,0x14(r12) /* Store it */ + stw r10,0x18(r12) /* Store it */ + stw r11,0x1C(r12) /* Store it */ + + blr + + +/* + * Do Preemption. Forces a T_PREEMPT trap to allow a preemption to occur. + */ + +ENTRY(DoPreemptLL, TAG_NO_FRAME_USED) + + mfsprg r11,0 /* Get the per_proc address */ + lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ + li r3,T_PREEMPT /* Set preemption interrupt value */ + mtlr r11 /* Restore the LR */ + stw r3,saveexception(r13) /* Modify the exception type to preemption */ + blr /* Return to interrupt handler */ + + +/* + * Force 'rupt handler to dispatch with new context + * R3 at the call contains the new savearea. + * R4 at the call contains a return code to pass back in R3. + * Forces a T_CSWITCH + */ + +ENTRY(SwitchContextLL, TAG_NO_FRAME_USED) + + mfsprg r11,0 /* Get the per_proc address */ + lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ + li r3,T_CSWITCH /* Set context switch value */ + mtlr r11 /* Restore the LR */ + stw r3,saveexception(r13) /* Modify the exception type to switch context */ + blr /* Return to interrupt handler */ + + +/* + * Create a fake I/O 'rupt. + * Forces a T_INTERRUPT trap to pretend that an actual I/O interrupt occurred. + */ + +ENTRY(CreateFakeIOLL, TAG_NO_FRAME_USED) + + mfsprg r11,0 /* Get the per_proc address */ + lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ + li r3,T_INTERRUPT /* Set external interrupt value */ + mtlr r11 /* Restore the LR */ + stw r3,saveexception(r13) /* Modify the exception type to external */ + blr /* Return to interrupt handler */ + +/* + * Create a shutdown context + * Forces a T_SHUTDOWN trap. + */ + +ENTRY(CreateShutdownCTXLL, TAG_NO_FRAME_USED) + + mfsprg r11,0 /* Get the per_proc address */ + lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ + li r3,T_SHUTDOWN /* Set external interrupt value */ + mtlr r11 /* Restore the LR */ + stw r3,saveexception(r13) /* Modify the exception type to external */ + blr /* Return to interrupt handler */ + +/* + * Create a fake decrementer 'rupt. + * Forces a T_DECREMENTER trap to pretend that an actual decrementer interrupt occurred. + */ + +ENTRY(CreateFakeDECLL, TAG_NO_FRAME_USED) + + mfsprg r11,0 /* Get the per_proc address */ + lwz r11,PP_TEMPWORK1(r11) /* Restore the return address */ + li r3,T_DECREMENTER /* Set decrementer interrupt value */ + mtlr r11 /* Restore the LR */ + stw r3,saveexception(r13) /* Modify the exception type to external */ + blr /* Return to interrupt handler */ + +/* + * Choke the system. This is just a dummy for now, + * but we'll eventually do something. + */ + +ENTRY(DoChokeLL, TAG_NO_FRAME_USED) + + BREAKPOINT_TRAP /* Dummy for now */ + +/* + * Set the low level trace flags + */ + +ENTRY(LLTraceSet, TAG_NO_FRAME_USED) + + mfsprg r6,2 ; Get feature flags + mfmsr r12 /* Get the MSR */ + mr r4,r3 /* Save the new value */ + andi. r3,r12,0x01C0 /* Clear interrupts and translation */ + mtcrf 0x04,r6 ; Set the features + bt pfNoMSRirb,ltsNoMSR ; Use MSR... + + mtmsr r3 ; Translation and all off + isync ; Toss prefetch + b ltsNoMSRx + +ltsNoMSR: li r0,loadMSR ; Get the MSR setter SC + sc ; Set it + +ltsNoMSRx: + + lis r5,hi16(EXT(trcWork)) ; Get trace area + ori r5,r5,lo16(EXT(trcWork)) ; again + + lwz r3,traceMask(r5) /* Get the old trace flags to pass back */ + stw r4,traceMask(r5) /* Replace with the new ones */ + + mtmsr r12 /* Restore the MSR */ + isync + + blr /* Leave... */ + +#if 1 + +/* +; *************************************************************************** +; +; ----------------- Grateful Deb ---------------- +; +; Debugging: direct draw into main screen menu bar +; +; Takes R4 value, converts it to hex characters and displays it. +; +; Gotta make sure the DCBST is done to force the pixels from the cache. +; +; Position is taken as column, row (0 based) from R3. +; Characters are from hexfont, and are 16x16 pixels. +; +; Only works with two processors so far +; +; +; *************************************************************************** +*/ + +#define GDfromright 20 +#define GDfontsize 16 + +ENTRY(GratefulDeb,TAG_NO_FRAME_USED) + mfspr r6,pir /* Get the PIR */ + lis r5,HIGH_ADDR(EXT(GratefulDebWork)) /* Point to our work area */ + rlwinm r6,r6,8,23,23 /* Get part of the offset to our processors area */ + ori r5,r5,LOW_ADDR(EXT(GratefulDebWork)) /* Start building the address */ + rlwimi r6,r6,2,21,21 /* Get the rest of the offset to our processors area */ + add r6,r6,r5 /* Point at our CPU's work area */ + mfmsr r5 /* Get that MSR */ + stmw r0,GDsave(r6) /* Save all registers */ + lwz r10,GDready(r6) /* See if we're all ready to go */ + ori r0,r5,0x2000 /* Turn on the floating point */ + mr r31,r6 /* Get a more sane base register */ + mr. r10,r10 /* Are we all set? */ + mtmsr r0 /* Enable floating point */ + isync + + stfd f0,GDfp0(r31) /* Save FP */ + stfd f1,GDfp1(r31) /* Save FP */ + stfd f2,GDfp2(r31) /* Save FP */ + stfd f3,GDfp3(r31) /* Save FP */ + + beq- GDbailout /* Go and bail... */ + + rlwinm r25,r3,0,16,31 /* Isolate just the row number */ + lwz r28,GDtopleft(r31) /* Get the physical address of our line 0 */ + rlwinm r3,r3,16,16,31 /* Isolate the column number */ + lwz r27,GDrowbytes(r31) /* Get the number of bytes per row */ + lwz r9,GDrowchar(r31) /* Get the number of bytes per row of full leaded charactrers */ + lwz r26,GDdepth(r31) /* Get the bit depth */ + mullw r25,r25,r9 /* get offset to the row to write in bytes */ + lwz r24,GDcollgn(r31) /* Get the size of columns in bytes */ + add r25,r28,r25 /* Physical address of row */ + mullw r3,r3,r24 /* Get byte offset to first output column */ + + li r9,32 /* Get the initial shift calc */ + + lis r20,HIGH_ADDR(hexfont) /* Point to the font */ + + li r18,GDfontsize /* Get the number of rows in the font */ + ori r20,r20,LOW_ADDR(hexfont) /* Point to the low part */ + add r21,r25,r3 /* Physical address of top left output pixel */ + sub r9,r9,r26 /* Get right shift justifier for pixel size */ + li r7,32 /* Number of bits per word */ + +startNybble: + la r6,GDrowbuf1(r31) /* Point to the row buffer */ + li r19,8 /* Get the number of characters in a row */ + +getNybble: rlwinm r10,r4,9,23,26 /* Get the top nybble * 32 */ + rlwinm r4,r4,4,0,31 /* Rotate a nybble */ + add r10,r20,r10 /* Point to the character in the font */ + + rlwinm r16,r26,4,0,27 /* Width of row in actual bits */ + lhz r15,0(r10) /* Get the next row of the font */ + +rendrow: rlwinm r17,r15,16,0,0 /* Get the next font pixel in the row */ + rlwinm r15,r15,1,16,31 /* Move in the next font pixel */ + srawi r17,r17,31 /* Fill with 1s if black and 0s if white (reversed) */ + + slw r14,r14,r26 /* Make room for our pixel in a register */ + srw r17,r17,r9 /* Isolate one pixels worth of black or white */ + sub. r7,r7,r26 /* See how may bits are left */ + sub r16,r16,r26 /* Count how many bits are left to store for this row */ + or r14,r14,r17 /* Put in the pixel */ + bne+ notfull /* Finish rendering this word */ + + not r14,r14 /* Invert to black on white */ + stw r14,0(r6) /* Write out the word */ + li r7,32 /* Bit per word count */ + addi r6,r6,4 /* Point to the next word */ + +notfull: mr. r16,r16 /* Have we finished the whole character row? */ + bne+ rendrow /* Finish rendering the row */ + + addic. r19,r19,-1 /* Are we finished with a whole display row yet? */ + bne+ getNybble /* Not yet... */ + + la r6,GDrowbuf1(r31) /* Point to the row buffer */ + rlwinm r19,r26,31,0,29 /* Number of cache lines (depth/2) */ + mr r14,r21 /* Get the frame buffer address */ + +// BREAKPOINT_TRAP + +blitrow: lfd f0,0(r6) /* Load a line */ + lfd f1,8(r6) + lfd f2,16(r6) + lfd f3,24(r6) + + stfd f0,0(r14) /* Blit a line */ + stfd f1,8(r14) + stfd f2,16(r14) + stfd f3,24(r14) + + addi r6,r6,32 /* Next buffered line */ + + dcbst 0,r14 /* Force the line to the screen */ + sync /* Make sure the line is on it's way */ + eieio /* Make sure we beat the invalidate */ + dcbi 0,r14 /* Make sure we leave no paradox */ + + addic. r19,r19,-1 /* Done all lines yet? */ + addi r14,r14,32 /* Point to the next output */ + bne+ blitrow /* Nope, do it some more... */ + + addic. r18,r18,-1 /* Have we done all the rows in character yet? */ + addi r20,r20,2 /* Offset the font to the next row */ + add r21,r21,r27 /* Point to start of next row */ + bne+ startNybble /* Nope, go through the word one more time... */ + +GDbailout: mr r1,r31 /* Move the workarea base */ + + lfd f0,GDfp0(r31) /* Restore FP */ + lfd f1,GDfp1(r31) /* Restore FP */ + lfd f2,GDfp2(r31) /* Restore FP */ + lfd f3,GDfp3(r31) /* Restore FP */ + + mtmsr r5 /* Disable floating point */ + isync + + lmw r3,GDsave+12(r1) /* Restore most registers */ + lwz r0,GDsave(r1) /* Restore R0 */ + lwz r1,GDsave+4(r1) /* Finally, R1 */ + blr /* Leave... */ + + +/* + * void GratefulDebDisp(unsigned int coord, unsigned int data); + */ + + +ENTRY(GratefulDebDisp,TAG_NO_FRAME_USED) + + mfmsr r9 /* Save the current MSR */ + mflr r7 /* Save the return */ + andi. r8,r9,0x7FCF /* Clear interrupt and translation */ + mtmsr r8 /* Turn 'em really off */ + isync /* Make sure about the translation part */ + bl EXT(GratefulDeb) /* Display it */ + mtmsr r9 /* Restore interrupt and translation */ + mtlr r7 /* Restore return */ + isync /* Make sure */ + blr + + +#endif + +/* + * void checkNMI(void); + */ + + +ENTRY(checkNMI,TAG_NO_FRAME_USED) + + mfmsr r9 /* Save it */ + andi. r8,r9,0x7FCF /* Clear it */ + mtmsr r8 /* Disable it */ + isync /* Fence it */ + lis r7,0xF300 /* Find it */ + ori r7,r7,0x0020 /* Find it */ + dcbi 0,r7 /* Toss it */ + sync /* Sync it */ + eieio /* Get it */ + lwz r6,0x000C(r7) /* Check it */ + eieio /* Fence it */ + dcbi 0,r7 /* Toss it */ + rlwinm. r4,r6,0,19,19 /* Check it */ + rlwinm r6,r6,0,20,18 /* Clear it */ + sync /* Sync it */ + eieio /* Fence it */ + beq+ xnonmi /* Branch on it */ + + stw r6,0x0008(r7) /* Reset it */ + sync /* Sync it */ + dcbi 0,r6 /* Toss it */ + eieio /* Fence it */ + + mtmsr r9 /* Restore it */ + isync /* Hold it */ + + BREAKPOINT_TRAP /* Kill it */ + blr /* Return from it */ + +xnonmi: /* Label it */ + mtmsr r9 /* Restore it */ + isync /* Hold it */ + blr /* Return from it */ + + +/* + * Early debug code + */ + +dumpr7: lis r9,HIGH_ADDR(hexTab) /* (TEST/DEBUG) */ + li r5,8 /* (TEST/DEBUG) */ + ori r9,r9,LOW_ADDR(hexTab) /* (TEST/DEBUG) */ + +dumpr7n: rlwinm r7,r7,4,0,31 /* (TEST/DEBUG) */ + mr r6,r7 /* (TEST/DEBUG) */ + andi. r6,r6,15 /* (TEST/DEBUG) */ + lbzx r6,r9,r6 /* (TEST/DEBUG) */ + lis r10,0xF301 /* (TEST/DEBUG) */ + ori r10,r10,0x2000 /* (TEST/DEBUG) */ + +#if 0 +xqrw2: eieio /* (TEST/DEBUG) */ + lbz r7,0(r10) /* (TEST/DEBUG) */ + dcbi 0,r10 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + andi. r7,r7,0x04 /* (TEST/DEBUG) */ + beq xqrw2 /* (TEST/DEBUG) */ +#endif + + dcbf 0,r10 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r10 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ + stb r6,4(r10) /* (TEST/DEBUG) */ + + lis r6,10 /* (TEST/DEBUG) */ +dumpr7d: addi r6,r6,-1 /* (TEST/DEBUG) */ + mr. r6,r6 /* (TEST/DEBUG) */ + bne- dumpr7d /* (TEST/DEBUG) */ + dcbf 0,r10 /* (TEST/DEBUG) */ + sync /* (TEST/DEBUG) */ + dcbi 0,r10 /* (TEST/DEBUG) */ + eieio /* (TEST/DEBUG) */ + + addic. r5,r5,-1 /* (TEST/DEBUG) */ + bne+ dumpr7n /* (TEST/DEBUG) */ + + blr /* (TEST/DEBUG) */ + +; +; Log a special entry in physical memory. +; This assumes that memory size has been significantly lowered using +; the maxmem boot option. The buffer starts just after the end of mem_size. +; +; This is absolutely for special tracing cases. Do not ever leave in... +; + +ENTRY(dbgLog,TAG_NO_FRAME_USED) + + li r11,0 ; Clear callers callers callers return + li r10,0 ; Clear callers callers callers callers return + li r9,0 ; Clear callers callers callers callers callers return + lwz r2,0(r1) ; Get callers callers stack frame + lis r0,0x4000 ; First invalid address + lwz r12,8(r2) ; Get our callers return + lwz r2,0(r2) ; Back chain + + mr. r2,r2 ; End of chain? + cmplw cr1,r2,r0 ; Valid kernel address? + beq- nosavehere ; Yes, end of chain... + bge- cr1,nosavehere ; No... + lwz r11,8(r2) ; Get our callers return + lwz r2,0(r2) ; Back chain + + mr. r2,r2 ; End of chain? + cmplw cr1,r2,r0 ; Valid kernel address? + beq- nosavehere ; Yes, end of chain... + bge- cr1,nosavehere ; No... + lwz r10,8(r2) ; Get our callers return + lwz r2,0(r2) ; Back chain + + mr. r2,r2 ; End of chain? + cmplw cr1,r2,r0 ; Valid kernel address? + beq- nosavehere ; Yes, end of chain... + bge- cr1,nosavehere ; No... + lwz r9,8(r2) ; Get our callers return + +nosavehere: mfmsr r8 ; Get the MSR + lis r2,hi16(EXT(DebugWork)) ; High part of area + lis r7,hi16(EXT(mem_actual)) ; High part of actual + andi. r0,r8,0x7FCF ; Interrupts and translation off + ori r2,r2,lo16(EXT(DebugWork)) ; Get the entry + mtmsr r0 ; Turn stuff off + ori r7,r7,lo16(EXT(mem_actual)) ; Get the actual + isync + + lwz r0,4(r2) ; Get the flag + mr. r0,r0 ; Should we log? + lwz r0,0(r7) ; Get the end of memory + lwz r7,0(r2) ; Get the position + bne- waytoofar ; No logging... + mr. r7,r7 ; Is this the first? + bne+ gotspot ; Nope... + + lis r7,hi16(EXT(mem_size)) ; High part of defined memory + ori r7,r7,lo16(EXT(mem_size)) ; Low part of defined memory + lwz r7,0(r7) ; Make it end of defined + +gotspot: cmplw r7,r0 ; Do we fit in memory + addi r0,r7,0x0020 ; Next slot + bge- waytoofar ; No fit... + + stw r0,0(r2) ; Set next time slot + dcbz 0,r7 ; Zap it + + stw r3,0(r7) ; First data + li r3,32 ; Disp to next line + stw r4,4(r7) ; Second data + dcbz r3,r7 ; Zap it + stw r5,8(r7) ; Third data + stw r6,12(r7) ; Fourth data + + stw r12,16(r7) ; Callers callers + stw r11,20(r7) ; Callers callers caller + stw r10,24(r7) ; Callers callers callers caller + stw r9,28(r7) ; Callers callers callers callers caller + +waytoofar: mtmsr r8 ; Back to normal + isync + blr + +; +; Same as the other, but no traceback and 16 byte entry +; Trashes R0, R2, R10, R12 +; + + .align 5 + .globl EXT(dbgLog2) + +LEXT(dbgLog2) + + + mfmsr r10 ; Get the MSR + lis r2,hi16(EXT(DebugWork)) ; High part of area + lis r12,hi16(EXT(mem_actual)) ; High part of actual + andi. r0,r10,0x7FCF ; Interrupts and translation off + ori r2,r2,lo16(EXT(DebugWork)) ; Get the entry + mtmsr r0 ; Turn stuff off + ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual + isync + + lwz r0,4(r2) ; Get the flag + mr. r0,r0 ; Should we log? + lwz r0,0(r12) ; Get the end of memory + lwz r12,0(r2) ; Get the position + bne- waytoofar2 ; No logging... + mr. r12,r12 ; Is this the first? + bne+ gotspot2 ; Nope... + + lis r12,hi16(EXT(mem_size)) ; High part of defined memory + ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory + lwz r12,0(r12) ; Make it end of defined + +gotspot2: cmplw cr1,r12,r0 ; Do we fit in memory + rlwinm. r0,r12,0,27,27 ; Are we on a new line? + bge- cr1,waytoofar2 ; No fit... + addi r0,r12,0x0010 ; Next slot + + bne+ nonewline ; Not on a new line... + dcbz br0,r12 ; Clear it so we do not fetch it + +nonewline: cmplwi r3,68 ; Special place for time stamp? + + stw r0,0(r2) ; Set next time slot + bne+ nospcts ; Nope... + + lwz r0,0x17C(br0) ; Get special saved time stamp + b nospctt ; Skip... + +nospcts: mftb r0 ; Get the current time + +nospctt: stw r3,4(r12) ; First data + stw r4,8(r12) ; Second data + stw r5,12(r12) ; Third data + stw r0,0(r12) ; Time stamp + +waytoofar2: mtmsr r10 ; Back to normal + isync + blr + + +; +; Saves floating point registers +; + + .align 5 + .globl EXT(stFloat) + +LEXT(stFloat) + + mfmsr r0 ; Save the MSR + rlwinm r4,r0,0,MSR_EE_BIT,MSR_EE_BIT ; Turn off interruptions + ori r4,r4,lo16(MASK(MSR_FP)) ; Enable floating point + mtmsr r4 + isync + + stfd f0,0x00(r3) + stfd f1,0x08(r3) + stfd f2,0x10(r3) + stfd f3,0x18(r3) + stfd f4,0x20(r3) + stfd f5,0x28(r3) + stfd f6,0x30(r3) + stfd f7,0x38(r3) + stfd f8,0x40(r3) + stfd f9,0x48(r3) + stfd f10,0x50(r3) + stfd f11,0x58(r3) + stfd f12,0x60(r3) + stfd f13,0x68(r3) + stfd f14,0x70(r3) + stfd f15,0x78(r3) + stfd f16,0x80(r3) + stfd f17,0x88(r3) + stfd f18,0x90(r3) + stfd f19,0x98(r3) + stfd f20,0xA0(r3) + stfd f21,0xA8(r3) + stfd f22,0xB0(r3) + stfd f23,0xB8(r3) + stfd f24,0xC0(r3) + stfd f25,0xC8(r3) + stfd f26,0xD0(r3) + stfd f27,0xD8(r3) + stfd f28,0xE0(r3) + stfd f29,0xE8(r3) + stfd f30,0xF0(r3) + stfd f31,0xF8(r3) + mffs f0 + stfd f0,0x100(r3) + lfd f0,0x00(r3) + mtmsr r0 + isync + blr + + +; +; Saves vector registers. Returns 0 if non-Altivec machine. +; + + .align 5 + .globl EXT(stVectors) + +LEXT(stVectors) + + mfpvr r6 ; Get machine type + mr r5,r3 ; Save area address + rlwinm r6,r6,16,17,31 ; Rotate on it + li r3,0 ; Assume failure + cmplwi r6,PROCESSOR_VERSION_7400 ; Do we have Altivec? + bltlr+ ; No... + + mfmsr r0 ; Save the MSR + rlwinm r4,r0,0,MSR_EE_BIT,MSR_EE_BIT ; Turn off interruptions + oris r4,r4,hi16(MASK(MSR_VEC)) ; Enable vectors + mtmsr r4 + isync + + stvxl v0,0,r5 + addi r5,r5,16 + stvxl v1,0,r5 + addi r5,r5,16 + stvxl v2,0,r5 + addi r5,r5,16 + stvxl v3,0,r5 + addi r5,r5,16 + stvxl v4,0,r5 + addi r5,r5,16 + stvxl v5,0,r5 + addi r5,r5,16 + stvxl v6,0,r5 + addi r5,r5,16 + stvxl v7,0,r5 + addi r5,r5,16 + stvxl v8,0,r5 + addi r5,r5,16 + stvxl v9,0,r5 + addi r5,r5,16 + stvxl v10,0,r5 + addi r5,r5,16 + stvxl v11,0,r5 + addi r5,r5,16 + stvxl v12,0,r5 + addi r5,r5,16 + stvxl v13,0,r5 + addi r5,r5,16 + stvxl v14,0,r5 + addi r5,r5,16 + stvxl v15,0,r5 + addi r5,r5,16 + stvxl v16,0,r5 + addi r5,r5,16 + stvxl v17,0,r5 + addi r5,r5,16 + stvxl v18,0,r5 + addi r5,r5,16 + stvxl v19,0,r5 + addi r5,r5,16 + stvxl v20,0,r5 + addi r5,r5,16 + stvxl v21,0,r5 + addi r5,r5,16 + stvxl v22,0,r5 + addi r5,r5,16 + stvxl v23,0,r5 + addi r5,r5,16 + stvxl v24,0,r5 + addi r5,r5,16 + stvxl v25,0,r5 + addi r5,r5,16 + stvxl v26,0,r5 + addi r5,r5,16 + stvxl v27,0,r5 + addi r5,r5,16 + stvxl v28,0,r5 + addi r5,r5,16 + stvxl v29,0,r5 + addi r5,r5,16 + stvxl v30,0,r5 + addi r5,r5,16 + stvxl v31,0,r5 + mfvscr v31 + addi r6,r5,16 + stvxl v31,0,r6 + li r3,1 + lvxl v31,0,r5 + mtmsr r0 + isync + blr + + +; +; Saves yet more registers +; + + .align 5 + .globl EXT(stSpecrs) + +LEXT(stSpecrs) + + mfmsr r0 ; Save the MSR + rlwinm r4,r0,0,MSR_EE_BIT,MSR_EE_BIT ; Turn off interruptions + mtmsr r4 + isync + + mfpvr r12 + stw r12,4(r3) + rlwinm r12,r12,16,16,31 + + mfdbatu r4,0 + mfdbatl r5,0 + mfdbatu r6,1 + mfdbatl r7,1 + mfdbatu r8,2 + mfdbatl r9,2 + mfdbatu r10,3 + mfdbatl r11,3 + stw r4,8(r3) + stw r5,12(r3) + stw r6,16(r3) + stw r7,20(r3) + stw r8,24(r3) + stw r9,28(r3) + stw r10,32(r3) + stw r11,36(r3) + + mfibatu r4,0 + mfibatl r5,0 + mfibatu r6,1 + mfibatl r7,1 + mfibatu r8,2 + mfibatl r9,2 + mfibatu r10,3 + mfibatl r11,3 + stw r4,40(r3) + stw r5,44(r3) + stw r6,48(r3) + stw r7,52(r3) + stw r8,56(r3) + stw r9,60(r3) + stw r10,64(r3) + stw r11,68(r3) + + mfsprg r4,0 + mfsprg r5,1 + mfsprg r6,2 + mfsprg r7,3 + stw r4,72(r3) + stw r5,76(r3) + stw r6,80(r3) + stw r7,84(r3) + + mfsdr1 r4 + stw r4,88(r3) + + la r4,92(r3) + li r5,0 + +stSnsr: mfsrin r6,r5 + addis r5,r5,0x1000 + stw r6,0(r4) + mr. r5,r5 + addi r4,r4,4 + bne+ stSnsr + + cmplwi cr1,r12,PROCESSOR_VERSION_604e + cmplwi cr5,r12,PROCESSOR_VERSION_604ev + cror cr1_eq,cr1_eq,cr5_eq ; Set if 604 type + cmplwi r12,PROCESSOR_VERSION_750 + mfspr r4,hid0 + stw r4,(39*4)(r3) + + li r4,0 + li r5,0 + li r6,0 + li r7,0 + beq- cr1,before750 + blt- before750 + + mfspr r4,hid1 + mfspr r5,l2cr + mfspr r6,msscr0 + mfspr r7,msscr1 + +before750: stw r4,(40*4)(r3) + stw r6,(42*4)(r3) + stw r5,(41*4)(r3) + stw r7,(43*4)(r3) + + li r4,0 + beq isis750 + + mfspr r4,pir +isis750: stw r4,0(r3) + + li r4,0 + li r5,0 + li r6,0 + li r7,0 + beq- cr1,b4750 + blt- b4750 + + mfspr r4,thrm1 + mfspr r5,thrm2 + mfspr r6,thrm3 + mfspr r7,ictc + +b4750: stw r4,(44*4)(r3) + stw r5,(45*4)(r3) + stw r6,(46*4)(r3) + stw r7,(47*4)(r3) + + li r4,0 + cmplwi r12,PROCESSOR_VERSION_7400 + bne nnmax + + mfpvr r5 + rlwinm r5,r5,0,16,31 + cmplwi r5,0x1101 + beq gnmax + cmplwi r5,0x1102 + bne nnmax + +gnmax: mfspr r4,1016 + +nnmax: stw r4,(48*4)(r3) + + mtmsr r0 + isync + blr diff --git a/osfmk/ppc/FirmwareC.c b/osfmk/ppc/FirmwareC.c new file mode 100644 index 000000000..a8b7355e0 --- /dev/null +++ b/osfmk/ppc/FirmwareC.c @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * This file contains firmware code. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include + +extern Boot_Video boot_video_info; +Boot_Video dgVideo; +extern GDWorkArea GratefulDebWork[]; + +typedef struct RuptCtr { /* Counts hardware interrupts */ + struct GDpos { /* Screen position for Grateful Deb display */ + unsigned short col; /* Column (-1 means no display) */ + unsigned short row; /* Row */ + } GDpos; + unsigned int count; /* Count of interrupt */ + unsigned int timed; /* If set, count updates at timed rate */ + unsigned int lasttime; /* Low of timebase when last updated */ +} RuptCtr; + +/* Window layout for Grateful Deb: + * + * 0 9 + * + * 0 Total Decrimenter + * 1 DSI ISI + * 2 System call External + * 3 SIGP Floating point + * 4 Program Alignment + */ + + + + + +RuptCtr RuptCtrs[96] = { + { { 0, 0}, 0, 1 }, /* Total interruptions */ + { {-1, -1}, 0, 0 }, /* Reset */ + { {-1, -1}, 0, 0 }, /* Machine check */ + { { 0, 1}, 0, 1 }, /* DSIs */ + { { 1, 1}, 0, 1 }, /* ISIs */ + { { 1, 2}, 0, 1 }, /* Externals */ + { { 1, 4}, 0, 0 }, /* Alignment */ + { { 0, 4}, 0, 0 }, /* Program */ + { { 1, 3}, 0, 0 }, /* Floating point */ + { { 1, 0}, 0, 1 }, /* Decrementer */ + { {-1, -1}, 0, 0 }, /* I/O error */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { { 0, 2}, 0, 1 }, /* System call */ + { {-1, -1}, 0, 0 }, /* Trace */ + { {-1, -1}, 0, 0 }, /* Floating point assist */ + { {-1, -1}, 0, 0 }, /* Performance monitor */ + { {-1, -1}, 0, 0 }, /* VMX */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Instruction breakpoint */ + { {-1, -1}, 0, 0 }, /* System management */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Trace */ + { { 0, 3}, 0, 0 }, /* SIGP */ + { {-1, -1}, 0, 0 }, /* Preemption */ + { {-1, -1}, 0, 0 }, /* Context switch */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Special, update frequency controls */ + +/* Start of second processor counts */ + + { { 0, 0}, 0, 1 }, /* Total interruptions */ + { {-1, -1}, 0, 0 }, /* Reset */ + { {-1, -1}, 0, 0 }, /* Machine check */ + { { 0, 1}, 0, 1 }, /* DSIs */ + { { 1, 1}, 0, 1 }, /* ISIs */ + { { 1, 2}, 0, 1 }, /* Externals */ + { { 1, 4}, 0, 0 }, /* Alignment */ + { { 0, 4}, 0, 0 }, /* Program */ + { { 1, 3}, 0, 0 }, /* Floating point */ + { { 1, 0}, 0, 1 }, /* Decrementer */ + { {-1, -1}, 0, 0 }, /* I/O error */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { { 0, 2}, 0, 1 }, /* System call */ + { {-1, -1}, 0, 0 }, /* Trace */ + { {-1, -1}, 0, 0 }, /* Floating point assist */ + { {-1, -1}, 0, 0 }, /* Performance monitor */ + { {-1, -1}, 0, 0 }, /* VMX */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Instruction breakpoint */ + { {-1, -1}, 0, 0 }, /* System management */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Trace */ + { { 0, 3}, 0, 0 }, /* SIGP */ + { {-1, -1}, 0, 0 }, /* Preemption */ + { {-1, -1}, 0, 0 }, /* Context switch */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 }, /* Reserved */ + { {-1, -1}, 0, 0 } /* Special, update frequency controls */ + }; + +void GratefulDebInit(bootBumbleC *boot_video_info) { /* Initialize the video debugger */ + + unsigned int fillframe[256]; + unsigned int startpos, startbyte, windowleft, newwidth, i, j, startword, oldwidth, nrmlgn; + unsigned int nwords, *byteleft, lstlgn, pixlgn, bytelgn; + + if(!boot_video_info) { /* Are we disabling it? + GratefulDebWork[0].GDready = 0; /* Disable output */ + return; + } + + nrmlgn = (9 * GDfontsize) * (boot_video_info->v_depth / 8); /* Get the normal column size in bytes */ + lstlgn = (((8 * GDfontsize) + (GDfontsize >> 1)) * boot_video_info->v_depth) / 8; /* Same as normal, but with 1/2 character space */ + nrmlgn = (nrmlgn + 31) & -32; /* Round to a line */ + + bytelgn = (nrmlgn * (GDdispcols - 1)) + lstlgn; /* Length in bytes */ + pixlgn = bytelgn / (boot_video_info->v_depth / 8); /* Number of pixels wide */ + + startbyte = (boot_video_info->v_width * (boot_video_info->v_depth / 8)) - bytelgn; /* Get the starting byte unaligned */ + startpos = boot_video_info->v_width - pixlgn; /* Starting pixel position */ + + startbyte += (unsigned int)boot_video_info->v_baseAddr & 31; /* Add the extra to cache boundary in frame buffer */ + startbyte &= -32; /* Make sure it's on a cache line for speed */ + startbyte += (unsigned int)boot_video_info->v_baseAddr & 31; /* Subtract the extra to cache boundary in frame buffer */ + + windowleft = startbyte - (((GDfontsize / 2) * boot_video_info->v_depth) / 8); /* Back up a half character */ + windowleft &= -4; /* Make sure it is on a word boundary */ + newwidth = windowleft / (boot_video_info->v_depth / 8); /* Get the new pixel width of screen */ + + oldwidth = boot_video_info->v_width; /* Save the old width */ +// boot_video_info->v_width = newwidth; /* Set the new width */ + + nwords = oldwidth - newwidth; /* See how much to fill in pixels */ + nwords = nwords / (32 / boot_video_info->v_depth); /* Get that in bytes */ + + startword = (newwidth + 3) / 4; /* Where does it start? */ + + + byteleft = (unsigned int *)(boot_video_info->v_baseAddr + windowleft); /* Starting place */ + for (i=0; i < nwords; i++) byteleft[i] = 0; /* Set the row to all black */ + + byteleft = (unsigned int *)(boot_video_info->v_baseAddr + windowleft + (boot_video_info->v_rowBytes * 1)); /* Starting place */ + for (i=0; i < nwords; i++) byteleft[i] = 0; /* Set the row to all black */ + + byteleft = (unsigned int *)(boot_video_info->v_baseAddr + windowleft + + (boot_video_info->v_rowBytes * (boot_video_info->v_height - 2))); /* Starting place */ + for (i=0; i < nwords; i++) byteleft[i] = 0; /* Set the row to all black */ + + byteleft = (unsigned int *)(boot_video_info->v_baseAddr + windowleft + + (boot_video_info->v_rowBytes * (boot_video_info->v_height - 1))); /* Starting place */ + for (i=0; i < nwords; i++) byteleft[i] = 0; /* Set the row to all black */ + + for (i=0; i < nwords; i++) fillframe[i] = 0xFFFFFFFF; /* Set the row to all white */ + + if(boot_video_info->v_depth == 8) { /* See if 8 bits a pixel */ + fillframe[0] = 0x0000FFFF; /* Make left border */ + fillframe[nwords - 1] = 0xFFFF0000; /* Make right border */ + } + else if(boot_video_info->v_depth == 16) { /* See if 16 bits a pixel */ + fillframe[0] = 0x00000000; /* Make left border */ + fillframe[nwords - 1] = 0x00000000; /* Make right border */ + } + else { + fillframe[0] = 0x00000000; /* Make left border */ + fillframe[1] = 0x00000000; /* Make left border */ + fillframe[nwords - 1] = 0x00000000; /* Make right border */ + fillframe[nwords - 2] = 0x00000000; /* Make right border */ + } + + byteleft = (unsigned int *)(boot_video_info->v_baseAddr + windowleft + (boot_video_info->v_rowBytes * 2)); /* Place to start filling */ + + for(i=2; i < (boot_video_info->v_height - 2); i++) { /* Fill the rest */ + for(j=0; jv_rowBytes); /* Next row */ + } + + for(i=0; i<2; i++) { /* Initialize both (for now) processor areas */ + + GratefulDebWork[i].GDtop = 2 + (GDfontsize / 2) + (i * 18 * GDfontsize); + GratefulDebWork[i].GDleft = 2 + startpos + (GDfontsize / 2); + GratefulDebWork[i].GDtopleft = boot_video_info->v_baseAddr + startbyte + + (GratefulDebWork[i].GDtop * boot_video_info->v_rowBytes); + GratefulDebWork[i].GDrowbytes = boot_video_info->v_rowBytes; + GratefulDebWork[i].GDrowchar = boot_video_info->v_rowBytes * (GDfontsize + (GDfontsize / 4)); + GratefulDebWork[i].GDdepth = boot_video_info->v_depth; + GratefulDebWork[i].GDcollgn = nrmlgn; + +// RuptCtrs[(48*i)+47].timed = gPEClockFrequencyInfo.bus_clock_rate_hz >> 6; /* (Update every 16th of a second (16 fps) */ + RuptCtrs[(48*i)+47].timed = gPEClockFrequencyInfo.bus_clock_rate_hz >> 5; /* (Update every 8th of a second (8 fps) */ +// RuptCtrs[(48*i)+47].timed = gPEClockFrequencyInfo.bus_clock_rate_hz >> 4; /* (Update every 4th of a second (4 fps) */ +// RuptCtrs[(48*i)+47].timed = gPEClockFrequencyInfo.bus_clock_rate_hz >> 3; /* (Update every 2th of a second (2 fps) */ +// RuptCtrs[(48*i)+47].timed = gPEClockFrequencyInfo.bus_clock_rate_hz >> 2; /* (Update every 1 second (1 fps) */ + + sync(); + + GratefulDebWork[i].GDready = 1; /* This one's all ready */ + } + +} + + diff --git a/osfmk/ppc/FirmwareCalls.h b/osfmk/ppc/FirmwareCalls.h new file mode 100644 index 000000000..dd600c625 --- /dev/null +++ b/osfmk/ppc/FirmwareCalls.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +#ifdef ASSEMBLER + +#ifdef _FIRMWARECALLS_H_ +#error Hey! You can only include FirmwareCalls.h in one assembler file, dude. And it should be Firmware.s! +#else /* _FIRMWARECALLS_H_ */ + +/* + * Entries for all firmware calls are in here (except for call 0x80000000 - CutTrace + */ + +#define _FIRMWARECALLS_H_ + +#define fwCallEnt(name, entrypt) \ + .globl name __ASMNL__ \ + .set name,(.-EXT(FWtable))/4|0x80000000 __ASMNL__ \ + .long EXT(entrypt) __ASMNL__ + +/* + * + */ + + fwCallEnt(MPgetProcCountCall, MPgetProcCountLL) /* Call the MPgetProcCount routine */ + fwCallEnt(MPstartCall, MPstartLL) /* Call the MPstart routine */ + fwCallEnt(MPexternalHookCall, MPexternalHookLL) /* Get the address of the external interrupt handler */ + fwCallEnt(MPsignalCall, MPsignalLL) /* Call the MPsignal routine */ + fwCallEnt(MPstopCall, MPstopLL) /* Call the MPstop routine */ + + fwCallEnt(dbgDispCall, dbgDispLL) /* Write stuff to printer or modem port */ + fwCallEnt(dbgCkptCall, dbgCkptLL) /* Save 128 bytes from r3 to 0x380 V=R mapping */ + fwCallEnt(StoreRealCall, StoreRealLL) /* Save one word in real storage */ + fwCallEnt(ClearRealCall, ClearRealLL) /* Clear physical pages */ + fwCallEnt(LoadDBATsCall, xLoadDBATsLL) /* Load all DBATs */ + fwCallEnt(LoadIBATsCall, xLoadIBATsLL) /* Load all IBATs */ + fwCallEnt(DoPreemptCall, DoPreemptLL) /* Preempt if need be */ + fwCallEnt(CreateFakeIOCall, CreateFakeIOLL) /* Make a fake I/O interruption */ + fwCallEnt(SwitchContextCall, SwitchContextLL) /* Switch context */ + fwCallEnt(Choke, DoChokeLL) /* Choke (system crash) */ + fwCallEnt(dbgRegsCall, dbgRegsLL) /* Dumps all registers */ + fwCallEnt(CreateFakeDECCall, CreateFakeDECLL) /* Make a fake decrementer interruption */ + fwCallEnt(CreateShutdownCTXCall, CreateShutdownCTXLL) /* create a shutdown context */ +#if PERF_HIST + fwCallEnt(PerfCtlCall, PerfCtlLL) /* Control performance monitor */ +#endif + +#if 0 + fwCallEnt(MPCPUAddressCall, 0) /* Call the MPCPUAddress routine */ + fwCallEnt(MPresumeCall, 0) /* Call the MPresume routine */ + fwCallEnt(MPresetCall, 0) /* Call the MPreset routine */ + fwCallEnt(MPSenseCall, 0) /* Call the MPSense routine */ + fwCallEnt(MPstoreStatusCall, 0) /* Call the MPstoreStatus routine */ + fwCallEnt(MPSetStatusCall, 0) /* Call the MPSetStatus routine */ + fwCallEnt(MPgetSignalCall, 0) /* Call the MPgetSignal routine */ + fwCallEnt(MPsyncTBCall, 0) /* Call the MPsyncTB routine */ + fwCallEnt(MPcheckPendingCall, 0) /* Call the MPcheckPending routine */ +#endif +#endif /* _FIRMWARECALLS_H_ */ + +#else /* ASSEMBLER */ + +/* + * The firmware function headers + */ +extern void CutTrace (unsigned int item1, ...); + +#endif /* ASSEMBLER */ diff --git a/osfmk/ppc/MPinterfaces.s b/osfmk/ppc/MPinterfaces.s new file mode 100644 index 000000000..56bc2b510 --- /dev/null +++ b/osfmk/ppc/MPinterfaces.s @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + MPinterfaces.s + + General interface to the MP hardware handlers anonymous + + Lovingly crafted by Bill Angell using traditional methods and only natural or recycled materials. + No animal products are used other than rendered otter bile. + +*/ + +#include +#include +#include +#include +#include +#include + +/* + * This first section is the glue for the high level C code. + * Anything that needs any kind of system services (e.g., VM) has to be done here. The firmware + * code that implements the SC runs in real mode. + */ + + + +/* #define MPI_DEBUGGING 0 */ +#define MPI_DEBUGGING 0 + +/* + * The routine that implements cpu_number. + */ + +ENTRY(cpu_number, TAG_NO_FRAME_USED) + + mfmsr r9 /* Save the old MSR */ + rlwinm r8,r9,0,17,15 /* Clear interruptions */ + mtmsr r8 /* Interrupts off */ + mfsprg r7,0 /* Get per-proc block */ + lhz r3,PP_CPU_NUMBER(r7) /* Get CPU number */ + mtmsr r9 /* Restore interruptions to entry */ + blr /* Return... */ + + +/* + * The routine glues to the count CPU firmware call + */ + +ENTRY(MPgetProcCount, TAG_NO_FRAME_USED) + + mr r12,r0 /* Keep R0 pristene */ + lis r0,HIGH_ADDR(MPgetProcCountCall) /* Top half of MPgetProcCount firmware call number */ + ori r0,r0,LOW_ADDR(MPgetProcCountCall) /* Bottom half */ + sc /* Go see how many processors we have */ + +#if MPI_DEBUGGING + lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ + ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ + sc /* Cut a backend trace entry */ +#endif + + mr r0,r12 /* Restore R0 */ + + blr /* Return, pass back R3... */ + +/* + * The routine glues to the start CPU firmware call - actually it's really a boot + * The first parameter is the CPU number to start + * The second parameter is the real address of the code used to boot the processor + * The third parameter is the real addess of the CSA for the subject processor + */ + +ENTRY(MPstart, TAG_NO_FRAME_USED) + + mr r12,r0 /* Keep R0 pristene */ + lis r0,HIGH_ADDR(MPstartCall) /* Top half of MPstartCall firmware call number */ + ori r0,r0,LOW_ADDR(MPstartCall) /* Bottom half */ + sc /* Go see how many processors we have */ + +#if MPI_DEBUGGING + lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ + ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ + sc /* Cut a backend trace entry */ +#endif + + mr r0,r12 /* Restore R0 */ + blr /* Return... */ + +/* + * This routine glues to the get external interrupt handler physical address + */ + +ENTRY(MPexternalHook, TAG_NO_FRAME_USED) + + mr r12,r0 /* Keep R0 pristene */ + lis r0,HIGH_ADDR(MPexternalHookCall) /* Top half of MPexternalHookCall firmware call number */ + ori r0,r0,LOW_ADDR(MPexternalHookCall) /* Bottom half */ + sc /* Go see how many processors we have */ + +#if MPI_DEBUGGING + lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ + ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ + sc /* Cut a backend trace entry */ +#endif + + mr r0,r12 /* Restore R0 */ + blr /* Return... */ + + +/* + * This routine glues to the signal processor routine + */ + +ENTRY(MPsignal, TAG_NO_FRAME_USED) + + mr r12,r0 /* Keep R0 pristene */ + lis r0,HIGH_ADDR(MPsignalCall) /* Top half of MPsignalCall firmware call number */ + ori r0,r0,LOW_ADDR(MPsignalCall) /* Bottom half */ + sc /* Go kick the other guy */ + +#if MPI_DEBUGGING + lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ + ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ + sc /* Cut a backend trace entry */ +#endif + + mr r0,r12 /* Restore R0 */ + blr /* Return... */ + + +/* + * This routine glues to the stop processor routine + */ + +ENTRY(MPstop, TAG_NO_FRAME_USED) + + mr r12,r0 /* Keep R0 pristene */ + lis r0,HIGH_ADDR(MPstopCall) /* Top half of MPsignalCall firmware call number */ + ori r0,r0,LOW_ADDR(MPstopCall) /* Bottom half */ + sc /* Stop the other guy cold */ + +#if MPI_DEBUGGING + lis r0,HIGH_ADDR(CutTrace) /* Top half of trace entry maker call */ + ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half of trace entry maker call */ + sc /* Cut a backend trace entry */ +#endif + + mr r0,r12 /* Restore R0 */ + blr /* Return... */ + + +/* ************************************************************************************************************* + * + * This second section is the glue for the low level stuff directly into the MP plugin. + * At this point every register in existence should be saved. Well, they're saved, + * but R13 points to the savearea, and R20 to the trace entry. Please be careful + * with these. You won't like what happens if they're different when you exit. + * + ***************************************************************************************************************/ + + +/* + * See how many physical processors we have + */ + +ENTRY(MPgetProcCountLL, TAG_NO_FRAME_USED) + + lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ + ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ + lwz r10,kCountProcessors*4(r11) /* Get the routine entry point */ + mflr r14 /* Save the return in an unused register */ + mtlr r10 /* Set it */ + blrl /* Call the routine */ + mtlr r14 /* Restore firmware caller address */ + blr /* Leave... */ + +/* + * Start up a processor + */ + +ENTRY(MPstartLL, TAG_NO_FRAME_USED) + + lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ + ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ + lwz r10,kStartProcessor*4(r11) /* Get the routine entry point */ + mflr r14 /* Save the return in an unused register */ + mtlr r10 /* Set it */ + blrl /* Call the routine */ + mtlr r14 /* Restore firmware caller address */ + blr /* Leave... */ + +/* + * Get physical address of SIGP external handler + */ + +ENTRY(MPexternalHookLL, TAG_NO_FRAME_USED) + + lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ + ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ + lwz r10,kExternalHook*4(r11) /* Get the routine entry point */ + mflr r14 /* Save the return in an unused register */ + mtlr r10 /* Set it */ + blrl /* Call the routine */ + mtlr r14 /* Restore firmware caller address */ + blr /* Leave... */ + + + +/* + * Send a signal to another processor + */ + +ENTRY(MPsignalLL, TAG_NO_FRAME_USED) + + lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ + ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ + lwz r10,kSignalProcessor*4(r11) /* Get the routine entry point */ + mflr r14 /* Save the return in an unused register */ + mtlr r10 /* Set it */ + blrl /* Call the routine */ + mtlr r14 /* Restore firmware caller address */ + blr /* Leave... */ + + + +/* + * Stop another processor + */ + +ENTRY(MPstopLL, TAG_NO_FRAME_USED) + + lis r11,HIGH_ADDR(EXT(MPEntries)) /* Get the address of the MP entry block (in the V=R area) */ + ori r11,r11,LOW_ADDR(EXT(MPEntries)) /* Get the bottom of the MP spec area */ + lwz r10,kStopProcessor*4(r11) /* Get the routine entry point */ + mflr r14 /* Save the return in an unused register */ + mtlr r10 /* Set it */ + blrl /* Call the routine */ + mtlr r14 /* Restore firmware caller address */ + blr /* Leave... */ + + +/* + * Third section: Miscellaneous MP related routines + */ + + + +/* + * All non-primary CPUs start here. + * We are dispatched by the SMP driver. Addressing is real (no DR or IR), + * interruptions disabled, etc. R3 points to the CPUStatusArea (CSA) which contains + * most of the state for the processor. This is set up by the primary. Note that we + * do not use everything in the CSA. Caches should be clear and coherent with + * no paradoxies (well, maybe one doxie, a pair would be pushing it). + */ + +ENTRY(start_secondary,TAG_NO_FRAME_USED) + + mr r31,r3 /* Get the pointer to the CSA */ + + lis r21,HIGH_ADDR(SpinTimeOut) /* Get the top part of the spin timeout */ + ori r21,r21,LOW_ADDR(SpinTimeOut) /* Slam in the bottom part */ + +GetValid: lbz r10,CSAregsAreValid(r31) /* Get the CSA validity value */ + + + mr. r10,r10 /* Is the area valid yet? */ + bne GotValid /* Yeah... */ + addic. r21,r21,-1 /* Count the try */ + isync /* Make sure we don't prefetch the valid flag */ + bge+ GetValid /* Still more tries left... */ + blr /* Return and cancel startup request... */ + +GotValid: li r21,0 /* Set the valid flag off (the won't be after the RFI) */ + lwz r10,CSAdec(r31) /* Get the decrimenter */ + stb r21,CSAregsAreValid(r31) /* Clear that validity flag */ + + lwz r11,CSAdbat+(0*8)+0(r31) /* Get the first DBAT */ + lwz r12,CSAdbat+(0*8)+4(r31) /* Get the first DBAT */ + lwz r13,CSAdbat+(1*8)+0(r31) /* Get the second DBAT */ + mtdec r10 /* Set the decrimenter */ + lwz r14,CSAdbat+(1*8)+4(r31) /* Get the second DBAT */ + mtdbatu 0,r11 /* Set top part of DBAT 0 */ + lwz r15,CSAdbat+(2*8)+0(r31) /* Get the third DBAT */ + mtdbatl 0,r12 /* Set lower part of DBAT 0 */ + lwz r16,CSAdbat+(2*8)+4(r31) /* Get the third DBAT */ + mtdbatu 1,r13 /* Set top part of DBAT 1 */ + lwz r17,CSAdbat+(3*8)+0(r31) /* Get the fourth DBAT */ + mtdbatl 1,r14 /* Set lower part of DBAT 1 */ + lwz r18,CSAdbat+(3*8)+4(r31) /* Get the fourth DBAT */ + mtdbatu 2,r15 /* Set top part of DBAT 2 */ + lwz r11,CSAibat+(0*8)+0(r31) /* Get the first IBAT */ + mtdbatl 2,r16 /* Set lower part of DBAT 2 */ + lwz r12,CSAibat+(0*8)+4(r31) /* Get the first IBAT */ + mtdbatu 3,r17 /* Set top part of DBAT 3 */ + lwz r13,CSAibat+(1*8)+0(r31) /* Get the second IBAT */ + mtdbatl 3,r18 /* Set lower part of DBAT 3 */ + lwz r14,CSAibat+(1*8)+4(r31) /* Get the second IBAT */ + mtibatu 0,r11 /* Set top part of IBAT 0 */ + lwz r15,CSAibat+(2*8)+0(r31) /* Get the third IBAT */ + mtibatl 0,r12 /* Set lower part of IBAT 0 */ + lwz r16,CSAibat+(2*8)+4(r31) /* Get the third IBAT */ + mtibatu 1,r13 /* Set top part of IBAT 1 */ + lwz r17,CSAibat+(3*8)+0(r31) /* Get the fourth IBAT */ + mtibatl 1,r14 /* Set lower part of IBAT 1 */ + lwz r18,CSAibat+(3*8)+4(r31) /* Get the fourth IBAT */ + mtibatu 2,r15 /* Set top part of IBAT 2 */ + lwz r11,CSAsdr1(r31) /* Get the SDR1 value */ + mtibatl 2,r16 /* Set lower part of IBAT 2 */ + lwz r12,CSAsprg(r31) /* Get SPRG0 (the per_proc_info address) */ + mtibatu 3,r17 /* Set top part of IBAT 3 */ + lwz r13,CSAmsr(r31) /* Get the MSR */ + mtibatl 3,r18 /* Set lower part of IBAT 3 */ + lwz r14,CSApc(r31) /* Get the PC */ + sync /* Sync up */ + mtsdr1 r11 /* Set the SDR1 value */ + sync /* Sync up */ + + la r10,CSAsr-4(r31) /* Point to SR 0 - 4 */ + li r9,0 /* Start at SR 0 */ + +LoadSRs: lwz r8,4(r10) /* Get the next SR in line */ + addi r10,r10,4 + mtsrin r8,r9 /* Load up the SR */ + addis r9,r9,0x1000 /* Bump to the next SR */ + mr. r9,r9 /* See if we wrapped back to 0 */ + bne+ LoadSRs /* Not yet... */ + + lwz r0,CSAgpr+(0*4)(r31) /* Get a GPR */ + lwz r9,CSAsprg+(1*4)(r31) /* Get SPRG1 (the initial active savearea) */ + mtsrr1 r13 /* Set the MSR to dispatch */ + lwz r1,CSAgpr+(1*4)(r31) /* Get a GPR */ + mtsprg 0,r12 /* Set the SPRG0 (per_proc_into) value */ + lwz r2,CSAgpr+(2*4)(r31) /* Get a GPR */ + mtsrr0 r14 /* Set the PC to dispatch */ + lwz r3,CSAgpr+(3*4)(r31) /* Get a GPR */ + mtsprg 1,r9 /* Set the SPRG1 (the initial active savearea) value */ + lwz r4,CSAgpr+(4*4)(r31) /* Get a GPR */ + lwz r5,CSAgpr+(5*4)(r31) /* Get a GPR */ + lwz r6,CSAgpr+(6*4)(r31) /* Get a GPR */ + lwz r7,CSAgpr+(7*4)(r31) /* Get a GPR */ + lwz r8,CSAgpr+(8*4)(r31) /* Get a GPR */ + lwz r9,CSAgpr+(9*4)(r31) /* Get a GPR */ + lwz r10,CSAgpr+(10*4)(r31) /* Get a GPR */ + lwz r11,CSAgpr+(11*4)(r31) /* Get a GPR */ + lwz r12,CSAgpr+(12*4)(r31) /* Get a GPR */ + lwz r13,CSAgpr+(13*4)(r31) /* Get a GPR */ + lwz r14,CSAgpr+(14*4)(r31) /* Get a GPR */ + lwz r15,CSAgpr+(15*4)(r31) /* Get a GPR */ + lwz r16,CSAgpr+(16*4)(r31) /* Get a GPR */ + lwz r17,CSAgpr+(17*4)(r31) /* Get a GPR */ + lwz r18,CSAgpr+(18*4)(r31) /* Get a GPR */ + lwz r19,CSAgpr+(19*4)(r31) /* Get a GPR */ + lwz r20,CSAgpr+(20*4)(r31) /* Get a GPR */ + lwz r21,CSAgpr+(21*4)(r31) /* Get a GPR */ + lwz r22,CSAgpr+(22*4)(r31) /* Get a GPR */ + lwz r23,CSAgpr+(23*4)(r31) /* Get a GPR */ + lwz r24,CSAgpr+(24*4)(r31) /* Get a GPR */ + lwz r25,CSAgpr+(25*4)(r31) /* Get a GPR */ + lwz r26,CSAgpr+(26*4)(r31) /* Get a GPR */ + lwz r27,CSAgpr+(27*4)(r31) /* Get a GPR */ + lwz r28,CSAgpr+(28*4)(r31) /* Get a GPR */ + lwz r29,CSAgpr+(29*4)(r31) /* Get a GPR */ + lwz r30,CSAgpr+(30*4)(r31) /* Get a GPR */ + lwz r31,CSAgpr+(31*4)(r31) /* Get a GPR */ + + sync /* Make sure we're sunk */ + + rfi /* Get the whole shebang going... */ + + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + + + + +/* + * This routine handles requests to firmware from another processor. It is actually the second level + * of a three level signaling protocol. The first level is handled in the physical MP driver. It is the + * basic physical control for the processor, e.g., physical stop, reset, start. The second level (this + * one) handles cross-processor firmware requests, e.g., complete TLB purges. The last are AST requests + * which are handled directly by mach. + * + * If this code handles the request (based upon MPPICParm0BU which is valid until the next SIGP happens - + * actually, don't count on it once you enable) it will RFI back to the + * interrupted code. If not, it will return and let the higher level interrupt handler be called. + * + * We need to worry about registers we use here, check in lowmem_vectors to see what is boten and verboten. + * + * Note that there are no functions implemented yet. + */ + + +ENTRY(MPsignalFW, TAG_NO_FRAME_USED) + + + mfspr r7,pir /* Get the processor address */ + lis r6,HIGH_ADDR(EXT(MPPICPUs)) /* Get high part of CPU control block array */ + rlwinm r7,r7,5,23,26 /* Get index into CPU array */ + ori r6,r6,HIGH_ADDR(EXT(MPPICPUs)) /* Get low part of CPU control block array */ + add r7,r7,r6 /* Point to the control block for this processor */ + lwz r6,MPPICParm0BU(r7) /* Just pick this up for now */ + blr /* Leave... */ + + +/* + * Make space for the maximum supported CPUs in the data section + */ + +#ifdef __ELF__ + .section ".data" +#else + .data +#endif + .align 5 +EXT(CSA): + .set ., .+(CSAsize*NCPUS) +#ifndef __MACHO__ + .type EXT(CSA), @object + .size EXT(CSA), CSAsize*NCPUS +#endif + .globl EXT(CSA) diff --git a/osfmk/ppc/Makefile b/osfmk/ppc/Makefile new file mode 100644 index 000000000..501264040 --- /dev/null +++ b/osfmk/ppc/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + + +DATAFILES = \ + asm.h machlimits.h + +EXPORT_ONLY_FILES = \ + asm.h \ + cpu_number.h \ + lock.h \ + hw_lock_types.h \ + io_map_entries.h \ + proc_reg.h \ + machine_routines.h \ + Diagnostics.h \ + mappings.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = ppc + +EXPORT_MD_LIST = ${EXPORT_ONLY_FILES} + +EXPORT_MD_DIR = ppc + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/ppc/POWERMAC/dbdma.c b/osfmk/ppc/POWERMAC/dbdma.c new file mode 100644 index 000000000..224fe15fc --- /dev/null +++ b/osfmk/ppc/POWERMAC/dbdma.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include + +#include /* For isync */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int dbdma_alloc_index = 0; +dbdma_command_t *dbdma_alloc_commands = NULL; + +void +dbdma_start(dbdma_regmap_t *dmap, dbdma_command_t *commands) +{ + unsigned long addr = kvtophys((vm_offset_t) commands); + + if (addr & 0xf) + panic("dbdma_start command structure not 16-byte aligned"); + + dmap->d_intselect = 0xff; /* Endian magic - clear out interrupts */ + DBDMA_ST4_ENDIAN(&dmap->d_control, + DBDMA_CLEAR_CNTRL( (DBDMA_CNTRL_ACTIVE | + DBDMA_CNTRL_DEAD | + DBDMA_CNTRL_WAKE | + DBDMA_CNTRL_FLUSH | + DBDMA_CNTRL_PAUSE | + DBDMA_CNTRL_RUN ))); + eieio(); + + while (DBDMA_LD4_ENDIAN(&dmap->d_status) & DBDMA_CNTRL_ACTIVE) + eieio(); + + dmap->d_cmdptrhi = 0; eieio();/* 64-bit not yet */ + DBDMA_ST4_ENDIAN(&dmap->d_cmdptrlo, addr); eieio(); + + DBDMA_ST4_ENDIAN(&dmap->d_control, DBDMA_SET_CNTRL(DBDMA_CNTRL_RUN)); + eieio(); + +} + +void +dbdma_stop(dbdma_regmap_t *dmap) +{ + DBDMA_ST4_ENDIAN(&dmap->d_control, DBDMA_CLEAR_CNTRL(DBDMA_CNTRL_RUN) | + DBDMA_SET_CNTRL(DBDMA_CNTRL_FLUSH)); eieio(); + + while (DBDMA_LD4_ENDIAN(&dmap->d_status) & (DBDMA_CNTRL_ACTIVE|DBDMA_CNTRL_FLUSH)) + eieio(); +} + +void +dbdma_flush(dbdma_regmap_t *dmap) +{ + DBDMA_ST4_ENDIAN(&dmap->d_control,DBDMA_SET_CNTRL(DBDMA_CNTRL_FLUSH)); + eieio(); + + while (DBDMA_LD4_ENDIAN(&dmap->d_status) & (DBDMA_CNTRL_FLUSH)) + eieio(); +} + +void +dbdma_reset(dbdma_regmap_t *dmap) +{ + DBDMA_ST4_ENDIAN(&dmap->d_control, + DBDMA_CLEAR_CNTRL( (DBDMA_CNTRL_ACTIVE | + DBDMA_CNTRL_DEAD | + DBDMA_CNTRL_WAKE | + DBDMA_CNTRL_FLUSH | + DBDMA_CNTRL_PAUSE | + DBDMA_CNTRL_RUN ))); + eieio(); + + while (DBDMA_LD4_ENDIAN(&dmap->d_status) & DBDMA_CNTRL_RUN) + eieio(); +} + +void +dbdma_continue(dbdma_regmap_t *dmap) +{ + DBDMA_ST4_ENDIAN(&dmap->d_control, DBDMA_SET_CNTRL(DBDMA_CNTRL_RUN|DBDMA_CNTRL_WAKE) | DBDMA_CLEAR_CNTRL(DBDMA_CNTRL_PAUSE|DBDMA_CNTRL_DEAD)); + eieio(); +} + +void +dbdma_pause(dbdma_regmap_t *dmap) +{ + DBDMA_ST4_ENDIAN(&dmap->d_control,DBDMA_SET_CNTRL(DBDMA_CNTRL_PAUSE)); + eieio(); + + while (DBDMA_LD4_ENDIAN(&dmap->d_status) & DBDMA_CNTRL_ACTIVE) + eieio(); +} + +dbdma_command_t * +dbdma_alloc(int count) +{ + dbdma_command_t *dbdmap; + + /* + * For now, we assume that dbdma_alloc() is called only when + * the system is bootstrapping, i.e. before the other CPUs + * are activated... + * If that's not the case, we need to protect the global + * variables here. + */ + assert(cpu_number() == master_cpu); + + if (dbdma_alloc_index == 0) + dbdma_alloc_commands = (dbdma_command_t *) io_map(0, PAGE_SIZE); + if ((dbdma_alloc_index+count) >= PAGE_SIZE / sizeof(dbdma_command_t)) + panic("Too many dbdma command structures!"); + + dbdmap = &dbdma_alloc_commands[dbdma_alloc_index]; + dbdma_alloc_index += count; + return dbdmap; +} diff --git a/osfmk/ppc/POWERMAC/dbdma.h b/osfmk/ppc/POWERMAC/dbdma.h new file mode 100644 index 000000000..7cf4d8358 --- /dev/null +++ b/osfmk/ppc/POWERMAC/dbdma.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include diff --git a/osfmk/ppc/POWERMAC/mp/MPPlugIn.h b/osfmk/ppc/POWERMAC/mp/MPPlugIn.h new file mode 100644 index 000000000..07cf468b6 --- /dev/null +++ b/osfmk/ppc/POWERMAC/mp/MPPlugIn.h @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + MPPlugIn.h + + Herein we find all the global MP plugin stuff + + Lovingly crafted by Bill Angell using traditional methods + +*/ + + +/* + * External hook completion codes + * + * The MP plugin's external interrupt hook returns one of these codes + */ + +#define kMPVainInterrupt 0 /* Interruption in vain -- ignore it */ +#define kMPIOInterruptPending 1 /* This is an I/O interruption -- handle it */ +#define kMPSignalPending 2 /* This is a pending signal -- handle it */ + + +/* *********************************************************************** + * Entry point jump table entry numbers + * *********************************************************************** */ + +#define kCountProcessors 0 +#define kStartProcessor 1 /* ->cpu address, ->start address, ->pass-thru parm */ +#define kResumeProcessor 2 /* ->cpu address */ +#define kStopProcessor 3 /* ->cpu address */ +#define kResetProcessor 4 /* ->cpu address */ +#define kSignalProcessor 5 /* ->cpu address */ +#define kStoreProcessorStatus 6 /* ->cpu address, ->status area address */ +#define kSynchClock 7 /* ->cpu address */ +#define kExternalHook 8 /* no parms */ +#define kProcessorState 9 /* ->cpu address */ +#define kRunSIGPRun 10 /* no parms */ +#define kPhoneyFirmware 11 /* Dummy kernel for alternate processors */ + +#define kMPPlugInMaxCall 11 /* set MPPlugInMaxCall to the highest-numbered call */ + + +/* *********************************************************************** + * MP Plug-In specification + * + * The address of this area is passed to the MP plugin by the initialization code. If the + * version ID and the installed hardware match the MP plugin, it returns its memory + * requirements and a table of offsets to its entry points. + * *********************************************************************** */ + +#define kMPPlugInVersionID 1 + +#define kSIGPUninitializedState 0 +#define kSIGPResetState 1 +#define kSIGPStoppedState 2 +#define kSIGPOperatingState 3 +#define kSIGPErrorState 4 + +#define kSIGPnoErr 0 +#define kSIGPInvalidStateErr -3999 +#define kSIGPInterfaceBusyErr -3998 +#define kSIGPPrivilegeErr -3997 +#define kSIGPNoPlugInErr -3996 +#define kTimeBaseSynchronizationErr -3995 +#define kSIGPTargetAddrErr -3994 +#define kSIGPInvalidStatusErr -3993 + +#define kMPPlugInInstallFailed -4999 +#define kMPPlugInInternalError -4998 + +/* + * *********************************************************************** + * Signal processor request codes + * *********************************************************************** + */ + +#define SIGPast 0 /* Requests an ast on target processor */ +#define SIGPptlb 1 /* Requests a total purge of the TLB */ +#define SIGPkdb 2 /* Requests a KDB entry */ + +/* + * *********************************************************************** + * Temporary debugging error codes (well, at least as temporary as the income tax) + * *********************************************************************** + */ +#define kMPPHairyPalms -10002 +#define kMPPOffline -10003 +#define kMPPBadState -10004 +#define kMPPInvalCPU -10005 +#define kMPPCantLock -10006 +#define kMPPNotReady -10007 +#define kMPPNotStopped -10008 +#define kMPPBadCPU -10009 +#define kMPPOnly1CPU -10010 +#define kMPPBadVers -10011 +#define kMPPNotRunning -10012 +#define kMPPTimeOut -10013 +#define kMPPInitTO1 -10014 +#define kMPPInitTO2 -10015 +#define kMPPInitTO3 -10016 + + +/* + * *********************************************************************** + * Let's define some hardware stuff + * *********************************************************************** + */ + +#define Bandit1 0xF2000000 +#define PCI1AdrReg 0xF2800000 +#define GrandCentral 0xF3000000 +#define EtherNetROM 0xF3019000 +#define HammerHead 0xF8000000 +#define ArbConfig 0x0090 +#define TwoCPU 0x02 +#define WhoAmI 0x00B0 +#define PriCPU 0x10 +#define SecCPU 0x08 +#define IntReg 0x00C0 +#define SecInt 0x80 + + +/* + * *********************************************************************** + * Let's define the flags for MPPInterface + * *********************************************************************** + */ + +#define SpinTimeOut 30000000 + +#define MPPICmsgp 0xc0000000 /* Message pending (busy + pass) */ +#define MPPICBusy 0x80000000 /* Processor area busy, i.e., locked */ +#define MPPICPass 0x40000000 /* Busy lock passed to receiving processor */ +#define MPPICOnline 0x20000000 /* Processor is online */ +#define MPPICReady 0x10000000 /* Processor is ready, i.e., started, not reset */ +#define MPPICStop 0x08000000 /* Processor is stopped */ +#define MPPICBset 0x000000FF /* Processor that owns busy, i.e., the ID of */ + /* whomever set busy. When a busy is passed, */ + /* this is the requestor of the function. */ +#define MPPICfunc 0x0000FF00 /* Current function */ +#define MPPICfIdle 0x00 /* No function pending */ +#define MPPICfStrt 0x01 /* Start the processor, physical address in */ + /* MPPIParm0 */ +#define MPPICfResm 0x02 /* Resume a stopped processor */ +#define MPPICfStop 0x03 /* Stop a processor */ +#define MPPICfSigp 0x04 /* Signal a processor */ +#define MPPICfStat 0x05 /* Store the processor machine state - */ + /* physical address of response in MPPIParm0 */ +#define MPPICfTBsy 0x06 /* Synchronize timebase - */ + /* TB image in MPPIParm0 and MPPIParm1 */ +#define MPPICfReset 0x07 /* Reset the processor */ +#define MPPICfTBsy1 0x81 /* TB sync, phase 1 */ +#define MPPICfTBsy2 0x82 /* TB sync, phase 2 */ +#define MPPICSigp 0x80000000 /* Processor has signal pending (keep signal status when stopped) */ +#define MPPICXRun 0x40000000 /* Explicit SIGP run call */ + + + +#ifndef __ASSEMBLER__ + +typedef unsigned char CPUState; +typedef unsigned int CPUNotification; + +struct MPPlugInSpec { /* This is MPSxxxx for assembler */ + unsigned int versionID; /* Version ID, must match */ + unsigned int *areaAddr; /* Virtual address of area to be */ + /* relocated to physical memory */ + unsigned int areaSize; /* Size of area to be relocated */ + unsigned int *offsetTableAddr; /* Virtual address of table of entry offsets */ + unsigned int *baseAddr; /* Common base area - used for debugging */ + unsigned int *dataArea; /* Pointer to the MP workarea - used for debugging */ + unsigned int *CPUArea; /* Pointer to the CPU workarea - used for debugging */ + unsigned int *SIGPhandler; /* Physical address of signal interrupt filter */ +}; + +typedef struct MPPlugInSpec MPPlugInSpec; +typedef MPPlugInSpec *MPPlugInSpecPtr; + +struct MPEntryPts { + unsigned int EntAddr[kMPPlugInMaxCall+1]; /* Real addresses of all plugin entry points */ +}; + +typedef struct MPEntryPts MPEntryPts; + +struct SystemRegister { + unsigned int regno; + unsigned int contents; +}; + +typedef struct SystemRegister SystemRegister; + +typedef struct FPRegs { + unsigned int lo; + unsigned int hi; +} FPRegs; + +struct BATregs { + unsigned int upper; + unsigned int lower; +}; + +typedef struct BATregs BATregs; + + +#define kSysRegCount 16 + +struct CPUStatusArea { /* 0000 This is CSAxxxxx for assembler */ + +/* + * Note that this guy always has to be in one-to-one mapped area contiguously + */ + + CPUState state; /* 0000 */ + unsigned char regsAreValid; /* 0001 */ + unsigned char filler[2]; /* 0002 */ + unsigned int gpr[32]; /* 0004 */ + FPRegs fpr[32]; /* 0084 */ + unsigned int cr; /* 0184 */ + unsigned int fpscr; /* 0188 */ + unsigned int xer; /* 018C */ + unsigned int lr; /* 0190 */ + unsigned int ctr; /* 0194 */ + unsigned int tbu; /* 0198 This is rtcu on 601. */ + unsigned int tbl; /* 019C This is rtcl on 601. */ + unsigned int pvr; /* 01A0 */ + BATregs ibat[4]; /* 01A4 */ + BATregs dbat[4]; /* 01E4 */ + unsigned int sdr1; /* 0224 */ + unsigned int sr[16]; /* 0228 */ + unsigned int dar; /* 0268 */ + unsigned int dsisr; /* 026C */ + unsigned int sprg[4]; /* 0270 */ + unsigned int srr0; /* 0280 */ + unsigned int srr1; /* 0284 */ + unsigned int dec; /* 0288 */ + unsigned int dabr; /* 028C */ + unsigned int iabr; /* 0290 */ + unsigned int ear; /* 0294 */ + unsigned int hid[16]; /* 0298 */ + unsigned int mmcr[2]; /* 02D8 */ + unsigned int pmc[4]; /* 02E0 */ + unsigned int pir; /* 02F0 */ + unsigned int sda; /* 02F4 */ + unsigned int sia; /* 02F8 */ + unsigned int mq; /* 02FC */ + + unsigned int msr; /* 0300 */ + unsigned int pc; /* 0304 */ + + SystemRegister sysregs[kSysRegCount]; /* 0308 */ + + unsigned int filler2[6]; /* 0388 Always pad up to 32-byte boundary */ + /* 03A0 */ +}; + +typedef struct CPUStatusArea CPUStatusArea; +typedef CPUStatusArea *CPUStatusAreaPtr; + +extern CPUStatusArea CSA[NCPUS]; + +struct SenseInfo { + CPUNotification notification; + CPUState state; +}; + +typedef struct SenseInfo SenseInfo; +typedef SenseInfo *SenseInfoPtr; + + +struct MPPInterface { + + unsigned int MPPICStat; /* Processor status (interlocked update for this one) */ + unsigned int MPPICParm0; /* SIGP parm 0 */ + unsigned int MPPICParm1; /* SIGP parm 1 */ + unsigned int MPPICParm2; /* SIGP parm 2 */ + unsigned int MPPICspare0; /* unused */ + unsigned int MPPICspare1; /* unused */ + unsigned int MPPICParm0BU; /* Parm 0 backed up here at 'rupt time for safe keeping */ + unsigned int MPPICPriv; /* Processor status (interlocked update for this one) */ +}; + +typedef struct MPPInterface MPPInterface; +typedef MPPInterface *MPPInterfacePtr; + +extern MPPInterface MPPICPUs[]; + + +/* *********************************************************************** + * Function prototypes and data areas + * *********************************************************************** */ + +extern unsigned int MPgetProcCount (void); +extern unsigned int MPstart (unsigned int cpu, unsigned int sadr, unsigned int parm); +extern unsigned int MPexternalHook (void); +extern unsigned int MPsignal (unsigned int cpu, unsigned int SIGPparm); +extern unsigned int MPstop (unsigned int cpu); +#if 0 +extern unsigned int MPCPUAddress (void); +extern unsigned int MPresume (unsigned int cpu); +extern unsigned int MPreset (unsigned int cpu); +extern unsigned int MPSense (unsigned int cpu, unsigned int *info); +extern unsigned int MPstoreStatus (unsigned int cpu, unsigned int *statusArea); +extern unsigned int MPSetStatus (unsigned int cpu, unsigned int *statusArea); +extern unsigned int MPgetSignal (void); +extern unsigned int MPsyncTB (void); +extern unsigned int MPcheckPending (void); +#endif +extern int MPinstall (unsigned int physAddr, unsigned int band1, unsigned int hammerh, unsigned int grandc, + unsigned int pci1ar, unsigned int enetr); +extern unsigned int MPprobe (MPPlugInSpecPtr spec, unsigned int hammerh); + +extern void start_secondary (void); +extern void mp_intr (void); + + +extern MPPlugInSpec MPspec; /* An area for the MP interfaces */ +extern MPEntryPts MPEntries; /* Real addresses of plugin routines */ + +#endif /* ndef __ASSEMBLER */ diff --git a/osfmk/ppc/POWERMAC/mp/MP_2p.s b/osfmk/ppc/POWERMAC/mp/MP_2p.s new file mode 100644 index 000000000..73c54e0fd --- /dev/null +++ b/osfmk/ppc/POWERMAC/mp/MP_2p.s @@ -0,0 +1,2409 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT_INTERNAL_USE_ONLY@ + */ + +/* + MP_2p.s + + MP low-level signaling, configuration, et all. This is for a and Apple/Daystar 2p board + + Lovingly crafted by Bill Angell using traditional methods + +*/ + +#include +#include +#include +#include +#include + + + + .set MPPlugInVersion,0 /* Current version code */ + +/* */ +/* Interfaces to hardware */ +/* */ + + .set PCI1ARdisp, 0x00800000 /* Displacement from Bandit to PCI1 address configuiration register */ + .set GrandCdisp, 0x01000000 /* Displacement from Bandit to Grand Central */ + .set EventsReg, 0x20 /* Interruption events register (latched) */ + .set LevelsReg, 0x2C /* Interruption levels register (unlatched) */ + .set MaskReg, 0x24 /* Interruption mask register */ + .set ClearReg, 0x28 /* Interruption clear register */ + .set TicksPerMic, 11 /* We'll use 11 ticks per µS - 120MHz is really 10, 180MHz is 11.24 */ + .set EtherNRdisp, 0x01019000 /* Displacement into bandit of EtherNet ROM */ + +#ifdef __ELF__ + .section ".data" +#else + .data +#endif + + .align 5 /* Get us out to the end */ + + .globl MPPIwork +#ifdef __ELF__ + .type MPPIwork,@function +#endif + +MPPIwork: +MPPIstatus: .byte 0 /* Global MP board status */ + .set MPPIinit, 0x80 /* Global initialization complete */ + .set MPPI2Pv2, 0x40 /* Second rev of 2P board (no watchdog and different state machine) */ + .byte 0 /* Reserved */ +MPPIinst: .byte 0 /* Mask of CPUs installed */ +MPPIonline: .byte 0 /* Mask of CPUs online (i.e., initialized) */ +MPPIlogCPU: .long 0 /* Used to configure CPU addresses */ +MPPITBsync: .long 0 /* Used to sync time bases */ + .long 0 +MPPIHammer: .long 0 /* Address of HammerHead */ +MPPIGrandC: .long 0 /* Address of GrandCentral */ +MPPIPCI1Adr: .long 0 /* Address of PCI1's config reg addr */ +MPPIEther: .long 0 /* Address of EtherNet ROM */ + + .align 5 +MPPISncFght: .fill 4,4,0 /* Space for 9 passes of a TB sync fight + 1 guard pass */ + .fill 4,4,0 + .fill 4,4,0 + .fill 4,4,0 + .fill 4,4,0 + .fill 4,4,0 + .fill 4,4,0 + .fill 4,4,0 + .fill 4,4,0 + .fill 4,4,0 + .align 7 /* Point to the start of the CPU status */ + + .globl EXT(MPPICPUs) +#ifdef __ELF__ + .type EXT(MPPICPUs),@function +#endif +EXT(MPPICPUs): /* Start of Processor specific areas */ +/* There are 8 of these indexed by processor number */ + + +MPPICPU0: .fill 8,4,0 /* First processor */ +MPPICPU1: .fill 8,4,0 /* Second processor */ +MPPICPU2: .fill 8,4,0 /* Third processor */ +MPPICPU3: .fill 8,4,0 /* Fourth processor */ + .set MPPIMaxCPU, (.-EXT(MPPICPUs)-32)/32 /* Get the maximum CPU address */ + + + .text + +/******************************************************************************************************** */ +/******************************************************************************************************** */ +/* */ +/* Here starteth ye stuff */ +/* */ +/******************************************************************************************************** */ +/******************************************************************************************************** */ + +/******************************************************************************************************** */ +/* */ +/* Validate that the hardware matches with our code. At this point, we cannot check */ +/* for anything other than the possibility of this working. There's no version code */ +/* or nothin'. So, if we have a second processor and are a 604 or 604e, we'll say */ +/* we're capable. Also we'll check version codes for our code. */ +/* */ +/* When we get here, DDAT and IDAT are both on, 'rupts are disabled. */ +/* */ +/* We're called like this: */ +/* OSStatus MP_probe(MPPlugInSpecPtr spec, UInt32 HammerheadAddr); */ +/* */ +/******************************************************************************************************** */ + +ENTRY(MPprobe, TAG_NO_FRAME_USED) + + +MPPIbase: mfpvr r7 /* Get the processor version */ + rlwinm r7,r7,16,16,31 /* Isolate the processor type */ + + lbz r5,ArbConfig(r4) /* See if there is another processor */ + + andi. r5,r5,TwoCPU /* Are we a real live two processor? */ + beq OneWay /* Nope, we be gone... */ + + cmplwi cr0,r7,4 /* Are we a 604? */ + beq SeemsOK /* Yeah, we're cool... */ + cmplwi cr0,r7,9 /* Are we a 604E? */ + beq SeemsOK /* Yeah, go finish up... */ + +OneWay: li r3,0 /* Say we can't find the proper CPU */ + blr /* Leave... */ + +SeemsOK: mr r10,r3 /* Save the parameter list */ + + lwz r4,MPSversionID(r10) /* Get the version ID */ + cmplwi cr0,r4,kMPPlugInVersionID /* Correct version? */ + beq IsOK /* Yeah, we think we're ok... */ + + li r3,0 /* Set bad version' */ + blr /* Leave... */ + +IsOK: mflr r11 /* Save the LR */ + lis r9,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + bl SetBase1 /* Jump to the next instruction */ +SetBase1: mflr r12 /* Get the base register */ + ori r9,r9,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + addi r12,r12,LOW_ADDR(MPPIbase-SetBase1) /* Adjust to the start of all our code */ + + stw r12,MPSbaseAddr(r10) /* Save off the common base for all functions */ + + la r5,LOW_ADDR(MPPIFunctions-MPPIbase)(r12) /* Point to the base of all functions */ + stw r5,MPSareaAddr(r10) /* Pass back the code address */ + + la r5,LOW_ADDR(MPPIFuncOffs-MPPIbase)(r12) /* Point to the function offset table */ + stw r5,MPSoffsetTableAddr(r10) /* Pass back the pointer to the offset table */ + + li r5,LOW_ADDR(MPPISize-MPPIFunctions) /* Get our size without data area */ + stw r5,MPSareaSize(r10) /* Save it */ + + stw r9,MPSdataArea(r10) /* Save it */ + + la r5,LOW_ADDR(EXT(MPPICPUs)-MPPIwork)(r9) /* Point to the CPU area base */ + stw r5,MPSCPUArea(r10) /* Save it */ + + mtlr r11 /* Restore that return address */ + li r3,1 /* Set no error */ + blr /* Leave, we're all done... */ + +/******************************************************************************************************** */ +/******************************************************************************************************** */ +/* */ +/* Here starteth ye code that starteth up ye second prothether. */ +/* Yea, though ye prothether executeth asynchronously, it appears unto men */ +/* in ye shape of a synchronous process. By ye instruction of He who gave it */ +/* form and being, it stopeth to worship and praise its Lord, to joyously */ +/* receive His blessings and teachings, to guide its way along the path to */ +/* righteous execution. */ +/* */ +/******************************************************************************************************** */ +/******************************************************************************************************** */ + + +/******************************************************************************************************** */ +/* */ +/* Initialize the MP hardware. This will bring the other processor online. */ +/* */ +/* First we will tick the board to its 5th state the "TBEN off" state. */ +/* */ +/* Just for giggles, here's the states: */ +/* */ +/* 1) 1st ROM - This state exists after motherboard reset */ +/* 2) Open Firmware - Transitions here when the SecInt line is first asserted */ +/* Open Firmware attempts to execute some code on the secondary */ +/* processor to obtain the PVR register. It's got some problems */ +/* and hangs the secondary disabled. */ +/* 3) Reset (my name) - Entered when the SecInt line is deasserted. A timer starts and */ +/* 468µS later the reset line is pulled. I may have this wrong here, */ +/* it may be that the reset line is held for 468µS. Either way, */ +/* this state is invisible to us. */ +/* 4) 2nd ROM - This state exists when the secondary processor begins executing */ +/* after the reset. */ +/* 5) TBEN off - We transition here when SecInt is asserted in the 2nd ROM state. */ +/* In this state, the TBEN pin is set to disable the timebase from */ +/* running on all processors, thus freezing time. (Performace analysis */ +/* note: here would be the best time to run stats, all tests would */ +/* run in 0 time giving us infinite speed.) Also the "primary arbitration" */ +/* mode is set. This mode causes the CPU board to arbitrate both processors */ +/* using a single bus master. This gets us around the L2 cache dumbness. */ +/* We should also note that because of this, there is now no way to */ +/* tell if we are on the secondary processor, the WhoAmI register will */ +/* always indicate the primary processor. We need to have sewn */ +/* name tags into our underwear before now. */ +/* Finally, this state is the only way we can tell if we are executing */ +/* on the older version of the 2-way board. When it is in this state */ +/* "primary arbitration" has not been enabled yet. The WhoAmI register */ +/* will indicate if we are on the secondary processor on not. We should */ +/* check this because we need to do signals differently. */ +/* 6) TBEN on - The next assertion of SecInt brings us to our final destination. For */ +/* those of you who will be deplaning, please remember that timebases */ +/* are running and primary arbitration is enabled. Always remember: */ +/* buckle up for safety and if you're tired pull over for a rest. */ +/* */ +/******************************************************************************************************** */ + +ENTRY(MPinstall, TAG_NO_FRAME_USED) + +/* int MP_install(unsigned int *physAddr, unsigned int band1, unsigned int hammerh, unsigned int grandc, + * unsigned int pci1ar, unsigned int enetr); + */ + + lis r11,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + mflr r0 /* Save the LR */ + ori r11,r11,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + + stw r5,MPPIHammer-MPPIwork(r11) /* Save the HammerHead address for later */ + stw r6,MPPIGrandC-MPPIwork(r11) /* Save address of Grand Central */ + stw r7,MPPIPCI1Adr-MPPIwork(r11) /* Save the PCI1 address register address */ + stw r8,MPPIEther-MPPIwork(r11) /* Save Ethernet ROM address */ + + li r4,LOW_ADDR(0xC080) /* Set CPU 0&1 installed, CPU 0 online */ + lis r10,(MPPICOnline+MPPICReady)>>16 /* Set CPU 0 online and ready */ + + mfspr r6,pir /* Get the PIR contents */ + + sth r4,MPPIinst-MPPIwork(r11) /* Set 'em for later */ + rlwinm r6,r6,0,0,27 /* Clear to use processor 0 */ + stw r10,EXT(MPPICPUs)-MPPIwork(r11) /* Preset CPU 0 online and ready */ + + mtspr pir,r6 /* Set our PIR */ + +/* */ +/* Ok, ok, enough of this. Let's really start 'em up. */ +/* */ + + lis r9,HIGH_ADDR(CPUInit) /* Top of init code */ + li r6,1 /* Get the other guy's CPU address */ + ori r9,r9,LOW_ADDR(CPUInit) /* Get physical address of init code */ + + mfmsr r8 /* Get the MSR */ + + stw r6,MPPIlogCPU-MPPIwork(r11) /* Set the logical CPU address to assign */ + + rlwinm r6,r8,0,17,15 /* Turn off interruptions */ + sync /* Make sure the work area is updated */ + mtmsr r6 /* Flip the EE bit off */ + isync /* Chill a bit */ + + stw r9,0(r7) /* Pass the initialization code address to our friend */ + sync /* Fence off the pig */ + + li r6,0 /* Clear this out */ + stb r6,IntReg(r5) /* Kick the other processor */ + eieio /* Pig in the sty */ + +/* At this point we should be in the "TBEN off" state. The second processor should be starting */ +/* to come up. */ + +/* Note that we are assuming that the secondary processor will reset the interrupt request. */ +/* If we are on one of the old boards, we will die in about 256µS if it is not reset, 'cause */ +/* of that silly watchchihuahua timer. We can't use the TB or decrimenter here to set a */ +/* timeout because when we are in "TBEN off" state these guys don't run. */ + + lis r4,HIGH_ADDR(SpinTimeOut) /* Get about 1 second at 200MHz */ + /* At 120 MHz this is 1.66 seconds, at 400MHz it is .5 */ + /* All these are more than enough time for this handshake */ + ori r4,r4,LOW_ADDR(SpinTimeOut) /* Get the bottom part */ + +WaitReady: lwz r9,0(r7) /* Get this back */ + mr. r9,r9 /* The other processor will set to 0 */ + /* when it is ready for the work area address */ + beq CodeUp /* The code is up on the other side */ + subi r4,r4,1 /* Count the try */ + mr. r4,r4 /* Did we timeout? */ + bne+ WaitReady /* Nope... */ + + li r3,kMPPInitTO1 /* Set that we timed out with initial code bringup */ + mtmsr r8 /* Restore the interrupt state */ + mtlr r0 /* Restore the return addess */ + blr /* Return a failure... */ + +CodeUp: isync /* Make sure we don't prefetch past here */ + +/* Timebase is stopped here, no need for the funky "get time base right" loop */ + + mftbu r4 /* Get upper timebase half */ + mftb r9 /* Get bottom */ + stw r4,MPPITBsync-MPPIwork(r11) /* Save the top */ + stw r9,MPPITBsync+4-MPPIwork(r11) /* Save the second half */ + sync /* Be very sure it's there */ + + stw r11,0(r7) /* Set the PCI1 adr reg non-zero - this releases the spin */ + /* loop and allows the timebase to be set. */ + eieio + + lis r9,HIGH_ADDR(SpinTimeOut) /* Get the spin time */ + ori r9,r9,LOW_ADDR(SpinTimeOut) /* Get the bottom part */ + +WaitTBset: lwz r4,0(r7) /* Get this back */ + mr. r4,r4 /* When zero, the other guy's TB is set up */ + beq- TBSetUp /* She's'a all done... */ + subi r9,r9,1 /* Count the try */ + mr. r9,r9 /* Did we timeout? */ + bne+ WaitTBset /* Nope... */ + + li r3,kMPPInitTO3 /* Set that we timed out setting clock */ + mtmsr r8 /* Restore the interrupt state */ + isync + mtlr r0 /* Restore the return addess */ + blr /* Return a failure... */ + +TBSetUp: stb r6,IntReg(r5) /* Kick the other processor again */ + /* This will tick us to the next state */ + eieio + +SpinDelay: addi r6,r6,1 /* Bump spin count (we finally are trashing R6) */ + cmplwi cr0,r6,4096 /* Spun enough? */ + ble+ SpinDelay /* Nope... */ + + li r6,SecInt /* Set the interrupt bit */ + stb r6,IntReg(r5) /* Deassert the external signal */ +/* */ +/* Ok, the other processor should be online in a spin waiting for a start signal from */ +/* us. It should be in the reset state with no external interruptions pending. There may */ +/* be a decrimenter pop waiting in the wings though. */ +/* */ + + lwz r7,MPPIGrandC-MPPIwork(r11) /* Point to GrandCentral */ + lwz r4,MaskReg(r7) /* Get the grand central mask register (note that this */ + /* is a little-endian area, but I'm too lazy to access it that way */ + /* so I'll document what it really should be, but, probably, it would */ + /* have been much, much easier just to code up the lwbrx and be done */ + /* with it rather than producing this monograph describing my alternate */ + /* access method that I really don't explain anyway. */ + ori r4,r4,0x0040 /* Flip on bit 30 (hah, figure that one out). This enables the */ + /* Ext10 interrupt which is connected to the MACE ethernet chip's */ + /* chip-select pin. */ + stw r4,MaskReg(r7) /* Stick it on back */ + eieio + + mtlr r0 /* Get back the original LR */ + sync /* Make sure all storage ops are done */ + mtmsr r8 /* Restore the MSR */ + isync + li r3,kSIGPnoErr /* Set that we worked jest fine and dandy */ + blr /* Bye now... */ + + .align 5 +/******************************************************************************************************** */ +/******************************************************************************************************** */ +/* */ +/* This is where the individual SIGP function calls reside. */ +/* Also, it is where we cram the second processor's initialization code wo'w we */ +/* can use physical addressing. */ +/* */ +/******************************************************************************************************** */ +/******************************************************************************************************** */ + +MPPIFunctions: /* Start of all externally called functions and interrupt handling code */ + + +/******************************************************************************************************** */ +/* */ +/* Count the number of processors. This hardwires to 2 (or 1 if no secondary) */ +/* */ +/******************************************************************************************************** */ + +CountProcessors: + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + mfmsr r9 /* Get the MSR */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + + ori r10,r9,0x0010 /* Turn on DDAT */ + + lwz r8,MPPIHammer-MPPIwork(r12) /* Point to the HammerHead controller */ + + mtmsr r10 /* Turn on DDAT */ + isync /* Kill speculation */ + + li r3,2 /* Assume we have them all */ + lbz r5,ArbConfig(r8) /* Check if we've seen a second processor */ + andi. r5,r5,TwoCPU /* Are we a real live two processor? */ + mtmsr r9 /* Put back the DDAT */ + isync + + bnelr+ /* Yeah... */ + li r3,1 /* Nope, set a count of 1 */ + blr /* Leave, we're inadequate... */ + +/******************************************************************************************************** */ +/* */ +/* Start up the selected processor (R3=processor; R4=physical start address; R5=pass-thru parm) */ +/* */ +/******************************************************************************************************** */ + +StartProcessor: + + mr r7,r5 /* Copy pass-thru parameter */ + mfspr r10,pir /* Get our processor number */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + cmplw cr0,r3,r10 /* Trying to start ourselves? */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ + beqlr- /* Self abuse... */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + mflr r11 /* Save the return address */ + add r9,r9,r12 /* Point right at the entry */ + +SPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ + li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ + rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ + lis r6,MPPICOnline>>16 /* Get the online flag */ + bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ + and. r0,r5,r6 /* Are we online */ + li r3,kMPPOffline /* Set offline */ + beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ + li r3,kMPPBadState /* Set bad state */ + oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ + + stwcx. r5,0,r9 /* Try to set busy */ + bne- SPretry + + ori r6,r10,MPPICfStrt<<8 /* Put the Start function in front of the processor ID */ + rlwimi r5,r6,0,16,31 /* Put these behind the status flags */ + stw r4,MPPICParm0(r9) /* Set the starting physical address parameter */ + stw r7,MPPICParm2(r9) /* Set pass-thru parameter */ + + sync /* Make sure it's all out there */ + b KickAndGo /* We're done now... */ + +/******************************************************************************************************** */ +/* */ +/* Reset the selected processor (R3=processor). You can't reset yourself or the primary. */ +/* We're gonna try, try real hard... This is not for the faint-of-heart. */ +/* If there's ever any way to yank a reset line, we'll do it here. */ +/* */ +/******************************************************************************************************** */ + +ResetProcessor: + mfspr r10,pir /* Get our processor number */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + rlwinm r10,r10,0,28,31 /* Clean up the PIR */ + cmplw cr0,r3,r10 /* Trying to start ourselves? */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ + beqlr- /* Self abuse... */ + mr. r9,r9 /* Trying to reset the primary?!? Dude, that's insubordination!!!! */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + li r3,kMPPInvalCPU /* Say that that's a major offense */ + beqlr- /* Bye now... */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + mflr r11 /* Save the return address */ + add r9,r9,r12 /* Point right at the entry */ + + li r4,16 /* Try for 16 times to get the busy lock */ + +RSlockS: mftb r6 /* Time stamp start */ + +RSlock: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ + rlwinm. r0,r5,0,2,2 /* Are we online */ + li r3,kMPPOffline /* Set offline */ + cmplwi cr1,r5,0 /* Check for busy */ + beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ + bge+ cr1,RSnotBusy /* Not busy, make it so... */ + + mftb r7 /* Stamp the time */ + sub r7,r7,r6 /* Get elapsed time */ + rlwinm. r7,r7,16,16,31 /* Divide ticks by microseconds (this is pretty darn "kinda-in-the-ballpark") */ + cmplwi cr0,r7,TicksPerMic /* See if we hit 65536µS yet */ + blt+ RSlock /* Not yet... */ + +RSatmtCnt: subi r4,r4,1 /* Count the retries */ + mr. r4,r4 /* Are we done yet? */ + bgt+ RSlockS /* Start the lock attempt again... */ + + li r3,kMPPCantLock /* Say we can't get the lock */ + b ErrorReturn /* Bye, dude... */ + +RSnotBusy: rlwinm r5,r5,0,0,15 /* Clear out the function and requestor */ + oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Set busy */ + or r5,r10,r5 /* Add in our processor */ + ori r5,r5,MPPICfReset<<8 /* Set the reset function */ + stwcx. r5,0,r9 /* Cram it back */ + bne- RSatmtCnt /* We lost the reservation... */ + b KickAndGo /* Try to send it across... */ + + +/******************************************************************************************************** */ +/* */ +/* Here we will try to resume execution of a stopped processor (R3=processor). */ +/* */ +/******************************************************************************************************** */ + +ResumeProcessor: + mfspr r10,pir /* Get our processor number */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + cmplw cr0,r3,r10 /* Trying to resume ourselves? */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ + beqlr- /* Self abuse... */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + mflr r11 /* Save the link register */ + add r9,r9,r12 /* Point right at the entry */ + +RPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ + li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ + rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ + lis r6,MPPICOnline>>16 /* Get the online flag */ + bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ + and. r0,r5,r6 /* Are we online */ + li r3,kMPPOffline /* Set offline */ + lis r6,MPPICReady>>16 /* Get the ready bit */ + beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ + and. r0,r5,r6 /* Are we ready? */ + li r3,kMPPNotReady /* Set not ready */ + lis r6,MPPICStop>>16 /* Get the stopped bit */ + beq- ErrorReturn /* Ain't ready, buzz off... */ + and. r0,r5,r6 /* Are we stopped? */ + li r3,kMPPNotStopped /* Set not stopped */ + oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ + beq- ErrorReturn /* Nope, not stopped, so how do we resume? */ + + stwcx. r5,0,r9 /* Try to set busy */ + bne- RPretry + + ori r6,r10,MPPICfResm<<8 /* Put the resume function in front of the processor ID */ + rlwimi r5,r6,0,16,31 /* Put these behind the status flags */ + b KickAndGo /* We're done now... */ + + + +/******************************************************************************************************** */ +/* */ +/* Here we will try to stop execution of a running processor (R3=processor). */ +/* */ +/******************************************************************************************************** */ + +StopProcessor: + mfspr r10,pir /* Get our processor number */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + cmplw cr0,r3,r10 /* Are we doing ourselves? */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + li r3,kMPPHairyPalms /* Set trying to do it to ourselves */ + beqlr- /* Self abuse... */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + mflr r11 /* Save the link register */ + add r9,r9,r12 /* Point right at the entry */ + +PPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ + li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ + rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ + lis r6,MPPICOnline>>16 /* Get the online flag */ + bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ + and. r0,r5,r6 /* Are we online */ + li r3,kMPPOffline /* Set offline */ + lis r6,MPPICReady>>16 /* Get the ready bit */ + beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ + and. r0,r5,r6 /* Are we ready? */ + li r3,kMPPNotReady /* Set not ready */ + lis r6,MPPICStop>>16 /* Get the stopped bit */ + beq- ErrorReturn /* Ain't ready, buzz off... */ + and. r0,r5,r6 /* Are we stopped? */ + li r3,kMPPNotRunning /* Set not running */ + oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ + bne- ErrorReturn /* Nope, already stopped, so how do we stop? */ + + stwcx. r5,0,r9 /* Try to set busy */ + ori r10,r10,MPPICfStop<<8 /* Put the stop function in front of the processor ID */ + bne- PPretry + + rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ + b KickAndGo /* We're done now... */ + + +/******************************************************************************************************** */ +/* */ +/* Here we will try to signal a running processor (R3=processor). */ +/* Note that this should have good performace. Well, actually, seeing as how slow we really are, it */ +/* probably is moot anyhow. */ +/* Another note: this function (and all most others as well) will return a timeout when the */ +/* second processor tries to do itself on the old version of the board. This happens because */ +/* In order to keep the watchchihuahua from popping (just imagine the scene: that little runt-dog just so */ +/* excited that its veins and eyes bulge and then explode) signaling to the secondary */ +/* is done syncronously and disabled. If the secondary signals the secondary, it will never enable so */ +/* it will never see the 'rupt, so it will never clear it, so it will time out, so there... */ +/* */ +/******************************************************************************************************** */ + +SignalProcessor: + mfspr r10,pir /* Get our processor number */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + mflr r11 /* Save the link register */ + add r9,r9,r12 /* Point right at the entry */ + +SiPretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ + li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ + rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ + lis r6,MPPICOnline>>16 /* Get the online flag */ + bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ + and. r0,r5,r6 /* Are we online */ + li r3,kMPPOffline /* Set offline */ + lis r6,MPPICReady>>16 /* Get the ready bit */ + beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ + and. r0,r5,r6 /* Are we ready? */ + li r3,kMPPNotReady /* Set not ready */ + oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ + beq- ErrorReturn /* Ain't ready, buzz off... */ + + stwcx. r5,0,r9 /* Try to set busy */ + ori r10,r10,MPPICfSigp<<8 /* Put the SIGP function in front of the processor ID */ + bne- SiPretry + + stw r4,MPPICParm0(r9) /* Pass along the SIGP parameter */ + + rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ + b KickAndGo /* We're done now... */ + + +/******************************************************************************************************** */ +/* */ +/* Here we will store the state of a processor (R3=processor; R4=status area). */ +/* Self abuse will store the state as is, is not asynchronous, and grows hair on your palms. */ +/* */ +/******************************************************************************************************** */ + +StoreProcessorStatus: + mfspr r10,pir /* Get our processor number */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + cmplw cr0,r3,r10 /* Saving our own state??? Abusing oneself??? */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + mflr r11 /* Save the link register */ + beq Flagellant /* Oh baby, oh baby... */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + add r9,r9,r12 /* Point right at the entry */ + +SSretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ + li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ + rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ + lis r6,MPPICOnline>>16 /* Get the online flag */ + bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ + and. r0,r5,r6 /* Are we online */ + li r3,kMPPOffline /* Set offline */ + beq- ErrorReturn /* Ain't online, buzz off... */ + oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ + + stwcx. r5,0,r9 /* Try to set busy */ + ori r10,r10,MPPICfStat<<8 /* Put the store status function in front of the processor ID */ + bne- SSretry /* Lost reservation, return busy... */ + + li r0,0 /* Get false */ + stb r0,CSAregsAreValid(r4) /* Set that the registers ain't valid */ + stw r4,MPPICParm0(r9) /* Set the status area physical address parameter */ + + rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ + b KickAndGo /* We're done now... */ + +/* Spill one's seed upon the soil */ + +Flagellant: bl StoreStatus /* Go store off all the registers 'n' stuff */ + mtlr r11 /* Restore the return address */ + li r3,kSIGPnoErr /* Return no error */ + blr /* Leave... */ + + +/******************************************************************************************************** */ +/* */ +/* Here we will attempt to syncronize clocks (R3=processor). */ +/* Self abuse will just return with an all-ok code. */ +/* */ +/******************************************************************************************************** */ + +SynchClock: + mfspr r10,pir /* Get our processor number */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + cmplw cr0,r3,r10 /* Cleaning our own clock?? */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + li r3,kSIGPnoErr /* Assume self-cleaning clock */ + beqlr /* Oh baby, oh baby... */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + mflr r11 /* Save the link register */ + add r9,r9,r12 /* Point right at the entry */ + +SyCretry: lwarx r5,0,r9 /* Pick up the status flags (MPPICStat) and reserve it */ + li r3,kSIGPInterfaceBusyErr /* Fill dead space and get busy return code */ + rlwinm. r0,r5,0,0,0 /* Are we marked as busy? */ + lis r6,MPPICOnline>>16 /* Get the online flag */ + bne- ErrorReturn /* Yeah, go leave, don't bother me now... */ + and. r0,r5,r6 /* Are we online */ + li r3,kMPPOffline /* Set offline */ + beq- ErrorReturn /* Ain't online, ain't ready, buzz off... */ + oris r5,r5,(MPPICBusy>>16)&0x0000FFFF /* Turn on the busy bit */ + li r0,0 /* Clear this */ + + stwcx. r5,0,r9 /* Try to set busy */ + ori r10,r10,MPPICfTBsy<<8 /* Put the timebase sync function in front of the processor ID */ + bne- SyCretry /* Lost reservation, return busy... */ + + stw r0,MPPITBsync+4-MPPIwork(r12) /* Make sure the parm area is 0 */ + mr r0,r11 /* Save the LR */ + bl SyCbase /* Get a base register */ +SyCbase: rlwimi r5,r10,0,16,31 /* Put these behind the status flags */ + mflr r11 /* Get the base */ + la r11,(4*4)(r11) /* DON'T MESS WITH THESE INSTRUCTIONS Make up the return point */ + b KickAndGo /* Go signal the other side */ + +SyCKrtrn: mr r11,r0 /* Restore the return */ + +/* */ +/* Start sync'ing 'er up */ +/* */ + + mftb r4 /* Take a timeout stamp (don't need top half, we have at least 13 hours) */ + +SyCInP0: lwz r5,0(r9) /* Get the CPU status word */ + rlwinm r5,r5,24,24,31 /* Isolate the command byte */ + cmplwi cr0,r5,MPPICfTBsy1 /* Have we reached time base sync phase 1 yet? */ + beq SyCInP1 /* Yeah, we're in phase 1... */ + mftb r5 /* Get the bottom half of the timer again */ + sub r5,r5,r4 /* How long we been messin' around? */ + cmplwi cr0,r5,1000*TicksPerMic /* Don't try more'n' a 1000µS */ + blt+ SyCInP0 /* We haven't, so wait some more... */ + li r3,kMPPTimeOut /* Signal timeout */ + b ErrorReturn /* By dude... */ + +/* */ +/* Here we make sure there is enough time to sync the clocks before the lower part of the TB ticks */ +/* up into the high part. This eliminates the need for any funky */ +/* "get-the-top-then-get-the-bottom-then-get-the-top-again-to-see-if-it-changed" stuff. That would */ +/* only make the sync harder to do. */ +/* */ +/* Also, because we use the lower TB value for the signal, we also need to make sure we do not have */ +/* a value of 0, we would be ever-so-sorry if it was. */ +/* */ + +SyCInP1: li r4,lo16(0xC000) /* Get the minimum time left on clock before tick ('bout 1 1/4 ms) */ + li r8,0 /* Get a 0 constant */ + +SyCdelay: mftb r5 /* Get the time left */ + cmplw cr0,r5,r4 /* See if there is sufficient time before carry into high clock */ + bgt- SyCdelay /* Nope, hang until it is... */ + mr. r5,r5 /* Did we just tick, however? */ + beq- SyCdelay /* Yeah, wait until it is at least 1... */ + + mftbu r4 /* Get the upper */ + stw r4,MPPITBsync-MPPIwork(r12) /* Make sure the top half is set */ + sync /* Wait until it is done */ + + mftb r5 /* Get the lower timebase now */ + stw r5,MPPITBsync+4-MPPIwork(r12) /* Shove it out for the other processor */ + + la r6,MPPISncFght-MPPIwork(r12) /* Point to the courtroom area */ + li r5,0 /* Point to the first line */ + +SyCclear: dcbz r5,r6 /* Clear the court */ + addi r5,r5,32 /* Point to the next line */ + cmplwi cr0,r5,10*2*32 /* Enough for 9 iterations, 2 chunks at a time */ + blt+ SyCclear /* Clear the whole smear... */ + sync /* Make sure everyone's out */ + + mftb r5 /* Get the lower timebase now */ + +SyCWait: lwz r7,MPPITBsync+4-MPPIwork(r12) /* Get it back */ + mftb r6 /* Get the bottom half again */ + mr. r7,r7 /* Have they set their clock yet? */ + sub r0,r6,r5 /* See if we're hung up */ + beq- SyCdonesync /* Clock is set */ + cmplwi cr0,r0,1000*TicksPerMic /* Timeout if we spend more than 1000µS doing this */ + blt+ SyCWait /* No timeout, wait some more... */ + li r3,kMPPTimeOut /* Set timeout */ + b ErrorReturn /* Leave... */ + +/* */ +/* Ok, so now we have set a preliminary TB value on the second processor. It's close, but only */ +/* within handgranade range. */ +/* */ +/* What we will do now is to let the processors (starting with the other guy) argue about the time for */ +/* a while (10 passes-we use the middle 8). We'll look at the results and try to adjust the other processor's */ +/* time such that the timing windows are overlapping evenly. This should put the TBs close enough together */ +/* (0-2 ticks) that the difference is undetectable. */ +/* */ + + + +SyCdonesync: + li r4,0 /* Clear this */ + la r5,MPPISncFght-MPPIwork(r12) /* Point to the squared circle */ + +SyCWtArg: + dcbf 0,r5 /* Make sure of it */ + sync /* Doubly shure */ + lwz r6,0(r5) /* Listen for the defence argument */ + + mr. r6,r6 /* See if they are done */ + beq+ SyCWtArg /* Nope, still going... */ + + mftb r7 /* They're done, time for rebuttal */ + stw r7,32(r5) /* Make rebuttle */ + + addi r4,r4,1 /* Count rounds */ + + cmplwi cr0,r4,10 /* See if we've gone 8 rounds plus an extra one */ + addi r5,r5,64 /* Point to the next round areas */ + + blt+ SyCWtArg /* Not yet, come out of your corners fighting... */ + + mftb r5 /* Stamp the wait */ + +SyCWadj: lwz r7,MPPITBsync+4-MPPIwork(r12) /* Get adjustment flag */ + mftb r6 /* Get timebase again */ + + mr. r7,r7 /* Have they set their timebase with adjusted time yet? */ + sub r6,r6,r5 /* Get elapsed time */ + bne+ SyCdone /* They say it, sync done... */ + cmplwi cr0,r6,1000*TicksPerMic /* Timeout if we spend more than 1000µS doing this */ + blt+ SyCWadj /* Still time, wait until adjustment is done... */ + + li r3,kMPPTimeOut /* Set timeout */ + b ErrorReturn /* Pass it back... */ + +SyCdone: li r3,kSIGPnoErr /* No errors */ + mtlr r11 /* Restore LR */ + blr /* Leave... */ + + +/******************************************************************************************************** */ +/* */ +/* Here we will get the physical address of the interrupt handler. */ +/* */ +/******************************************************************************************************** */ + +GetExtHandlerAddress: + mflr r11 /* Save our return */ + bl GEXbase /* Make a base address */ +GEXbase: mflr r3 /* Get address into our base */ + addi r3,r3,LOW_ADDR(GotSignal-GEXbase) /* Get the logical address of the 'rupt handler */ + + mtlr r11 /* Restore LR */ + blr + + +/******************************************************************************************************** */ +/* */ +/* Here we will get a snapshot of the processor's current signaling state (R3=processor). */ +/* */ +/******************************************************************************************************** */ + +ProcessorState: + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + cmplwi cr1,r3,MPPIMaxCPU /* See if we are bigger than max */ + li r3,kSIGPTargetAddrErr /* CPU number is too big */ + bgtlr- cr1 /* Sure are... (Get our address also) */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + add r9,r9,r12 /* Point right at the entry */ + lwz r4,MPPICStat(r9) /* Get the status word */ + li r3,kSIGPnoErr /* Set no errors */ + rlwinm. r4,r4,0,0,0 /* Test for busy status */ + beqlr /* Return kSIGPnoErr if not busy */ + li r3,kSIGPInterfaceBusyErr /* Otherwise, return busy */ + blr /* Return it */ + +/******************************************************************************************************** */ +/* */ +/* Here we will try to handle any pending messages (just as if an interruption occurred). */ +/* The purpose of this function is to assure the message passing system runs even */ +/* though external interrupts are disabled. Lacking a separate physical signalling */ +/* class, we have to share the external interrupt signal. Unfortunately, there are */ +/* times when disabled loops occur (in spin locks, in the debugger, etc.), and when they */ +/* happen, a low level message sent to a processor will not get processed, hence this */ +/* function exists to be called from those disabled loops. Since the calls are often */ +/* from disabled code, all that can be done is to process any pending *message*. Any */ +/* pending notification interruption (referred to throughtout this code as a SIGP */ +/* interruption) must remain pending. */ +/* */ +/******************************************************************************************************** */ + +RunSIGPRun: + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + mfspr r3,pir /* Get our CPU address */ + rlwinm r9,r3,5,23,26 /* Get index into CPU array */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + mflr r11 /* Save the link register */ + add r9,r9,r12 /* Point right at our entry */ + lwz r3,MPPICPriv(r9) /* Get our privates */ + cmplw cr1,r11,r11 /* Make sure IdleWait doesn't try to clear 'rupt request */ + oris r3,r3,MPPICXRun>>16 /* Diddle with them and show we entered here */ + stw r3,MPPICPriv(r9) /* Put away our privates */ + b IdleWait /* Go pretend there was an interrupt... */ + +/******************************************************************************************************** */ +/* */ +/* Error return. We only need this when we leave with a reservation. We really SHOULD clear it... */ +/* */ +/******************************************************************************************************** */ + +ErrorReturn: + mtlr r11 /* Restore LR */ + blr + +/******************************************************************************************************** */ +/* */ +/* Kick the target processor. Note that we won't set the passing bit until we are ready to exit. */ +/* The reason for this is that we have the silly, old watchchihuahua board to deal with. Because */ +/* we can't just set the interrupt and leave, we gotta wait for it to be seen on the other side. */ +/* This means that there could be a timeout and if so, we need to back off the function request else */ +/* we'd see busy when they tried to redrive it. We'll have to deal with a tad of spin on the secondary side. */ +/* note that this just applies to a primary to secondary function on the old board. */ +/* */ +/******************************************************************************************************** */ + +KickAndGo: + la r8,MPPICPU0-MPPIwork(r12) /* Get the primary work area address */ + mtlr r11 /* Restore the link register */ + cmplw cr0,r8,r9 /* Which is target? primary or secondary? */ + mfmsr r11 /* Save off the MSR */ + oris r5,r5,MPPICPass>>16 /* Set the passing bit on */ + stw r5,MPPICStat(r9) /* Store the pass and let the other processor go on */ + + beq KickPrimary /* The target is the primary... */ + + ori r3,r11,0x0010 /* Turn on DDAT bit */ + lbz r4,MPPIstatus-MPPIwork(r12) /* Load up the global status byte */ + lwz r8,MPPIHammer-MPPIwork(r12) /* Point to the Hammerhead area */ + + mtmsr r3 /* Turn on DDAT */ + isync + + andi. r4,r4,MPPI2Pv2 /* Are we on the new or old board? */ + li r3,0 /* Set the bit for an interrupt request */ + beq KickOld /* Ok, it's the old board... */ + + sync /* Make sure this is out there */ + stb r3,IntReg(r8) /* Set the interruption signal */ + eieio + + mtmsr r11 /* Set DDAT back to what it was */ + isync + li r3,kSIGPnoErr /* Set no errors */ + blr /* Leave... */ + +KickOld: li r4,8 /* Set the number of tries */ + +KickAgain: mftb r6 /* Stamp the bottom half of time base */ + stb r3,IntReg(r8) /* Stick the interrupt */ + eieio /* Fence me in */ + +CheckKick: lbz r10,IntReg(r8) /* Get the interrupt request back again */ + mr. r10,r10 /* Yes? Got it? */ + bne FinalDelay /* Yeah, do the final delay and then go away... */ + + mftb r7 /* Get the time again */ + sub r7,r7,r6 /* Get time-so-far */ + cmplwi cr0,r7,75*TicksPerMic /* Hold it for 75µS (average disable is supposed to be 100µS or so) */ + blt+ CheckKick /* Keep waiting the whole time... */ + + li r10,SecInt /* Set the deassert bit */ + mftb r6 /* Stamp start of deassert time */ + stb r10,IntReg(r8) /* Deassert the interrupt request */ + eieio + +DeassertWT: mftb r7 /* Stamp out the time */ + sub r7,r7,r6 /* Get elapsed */ + cmplwi cr0,r7,16*TicksPerMic /* Hold off 16µS (minimum is 12µS) */ + blt+ DeassertWT /* Keep spinning... */ + + subi r4,r4,1 /* See if we have another retry we can do */ + mr. r4,r4 /* Are we there yet? */ + blt+ KickAgain /* Retry one more time... */ + + rlwinm r5,r5,0,2,31 /* Clear busy and passing bits */ + rlwinm r5,r5,0,24,15 /* Clear the function request to idle */ + + mtmsr r11 /* Restore DDAT stuff */ + isync + + stw r5,MPPICStat(r9) /* Rescind the request */ + li r3,kMPPTimeOut /* Set timeout */ + blr /* Leave... */ + +FinalDelay: mftb r6 /* Stamp the start of the final delay */ + +FinalDelayW: + mftb r7 /* Stamp out the time */ + sub r7,r7,r6 /* Get elapsed */ + cmplwi cr0,r7,16*TicksPerMic /* Hold off 16µS (minimum is 12µS) */ + blt+ FinalDelayW /* Keep spinning... */ + + mtmsr r11 /* Restore DDAT stuff */ + isync + li r3,kSIGPnoErr /* Set no errors */ + blr /* Leave... */ + +KickPrimary: + ori r3,r11,0x0010 /* Turn on the DDAT bit */ + lwz r8,MPPIEther-MPPIwork(r12) /* Get the address of the ethernet ROM */ + + mtmsr r3 /* Turn on DDAT */ + isync + + li r4,4 /* Get flip count */ + + sync /* Make sure the status word is out there */ + +FlipOff: lbz r3,0(r8) /* Reference ethernet ROM to get chip select twiddled */ + eieio /* Make sure of this (Hmm, this is chip select, not memory-mapped */ + /* storage. Do we even need the eieio?) */ + + addic. r4,r4,-1 /* Have we flipped them off enough? */ + bgt+ FlipOff /* Not yet, they deserve more... */ + + mtmsr r11 /* Restore DDAT stuff */ + isync + li r3,kSIGPnoErr /* Set no errors */ + blr /* Return... */ + +/******************************************************************************************************** */ +/* */ +/* This is the code for the secondary processor */ +/* */ +/******************************************************************************************************** */ + +/* Note that none of this code needs locks because there's kind of a synchronization */ +/* shuffle going on. */ + +/* */ +/* First, we need to do a bit of initialization of the processor. */ +/* */ + + +CPUInit: + li r27,0x3040 /* Set floating point and machine checks on, IP to 0xFFF0xxxx */ + mtmsr r27 /* Load 'em on in */ + isync + + lis r28,-32768 /* Turn on machine checks */ + /* should be 0x8000 */ + ori r28,r28,0xCC84 /* Enable caches, clear them, */ + /* disable serial execution and turn BHT on */ + sync + mtspr HID0,r28 /* Start the cache clear */ + sync + +/* */ +/* Clear out the TLB. They be garbage after hard reset. */ +/* */ + + li r0,512 /* Get number of TLB entries (FIX THIS) */ + li r3,0 /* Start at 0 */ + mtctr r0 /* Set the CTR */ + +purgeTLB: tlbie r3 /* Purge this entry */ + addi r3,r3,4096 /* Next page */ + bdnz purgeTLB /* Do 'em all... */ + + sync /* Make sure all TLB purges are done */ + tlbsync /* Make sure on other processors also */ + sync /* Make sure the TLBSYNC is done */ + +/* */ +/* Clear out the BATs. They are garbage after hard reset. */ +/* */ + + li r3,0 /* Clear a register */ + + mtspr DBAT0L,r3 /* Clear BAT */ + mtspr DBAT0U,r3 /* Clear BAT */ + mtspr DBAT1L,r3 /* Clear BAT */ + mtspr DBAT1U,r3 /* Clear BAT */ + mtspr DBAT2L,r3 /* Clear BAT */ + mtspr DBAT2U,r3 /* Clear BAT */ + mtspr DBAT3L,r3 /* Clear BAT */ + mtspr DBAT3U,r3 /* Clear BAT */ + + mtspr IBAT0L,r3 /* Clear BAT */ + mtspr IBAT0U,r3 /* Clear BAT */ + mtspr IBAT1L,r3 /* Clear BAT */ + mtspr IBAT1U,r3 /* Clear BAT */ + mtspr IBAT2L,r3 /* Clear BAT */ + mtspr IBAT2U,r3 /* Clear BAT */ + mtspr IBAT3L,r3 /* Clear BAT */ + mtspr IBAT3U,r3 /* Clear BAT */ + +/* */ +/* Map 0xF0000000 to 0xFFFFFFFF for I/O; make it R/W non-cacheable */ +/* Map 0x00000000 to 0x0FFFFFFF for mainstore; make it R/W cachable */ +/* */ + + lis r6,0xF000 /* Set RPN to last segment */ + ori r6,r6,0x1FFF /* Set up upper BAT for 256M, access both */ + + lis r7,0xF000 /* Set RPN to last segment */ + ori r7,r7,0x0032 /* Set up lower BAT for 256M, access both, non-cachable */ + + mtspr DBAT0L,r7 /* Setup ROM and I/O mapped areas */ + mtspr DBAT0U,r6 /* Now do the upper DBAT */ + sync + + li r6,0x1FFF /* Set up upper BAT for 256M, access both */ + li r7,0x0012 /* Set up lower BAT for r/w access */ + + mtspr DBAT1L,r7 /* Set up an initial view of mainstore */ + mtspr DBAT1U,r6 /* Now do the upper DBAT */ + sync + +/* */ +/* Clean up SDR and segment registers */ +/* */ + + li r3,0 /* Clear a register */ + mtspr SDR1,r3 /* Clear SDR1 */ + + li r4,0 /* Clear index for segment registers */ + lis r5,0x1000 /* Set the segment indexer */ + +clearSR: mtsrin r3,r4 /* Zero out the SR */ + add. r4,r4,r5 /* Point to the next segment */ + bne- clearSR /* Keep going until we wrap back to 0 */ + + lis r5,HIGH_ADDR(EXT(FloatInit)) /* Get top of floating point init value */ + ori r5,r5,LOW_ADDR(EXT(FloatInit)) /* Slam bottom */ + lfd f0,0(r5) /* Initialize FP0 */ + fmr f1,f0 /* Ours in not */ + fmr f2,f0 /* to wonder why, */ + fmr f3,f0 /* ours is but to */ + fmr f4,f0 /* do or die! */ + fmr f5,f0 + fmr f6,f0 + fmr f7,f0 + fmr f8,f0 + fmr f9,f0 + fmr f10,f0 + fmr f11,f0 + fmr f12,f0 + fmr f13,f0 + fmr f14,f0 + fmr f15,f0 + fmr f16,f0 + fmr f17,f0 + fmr f18,f0 + fmr f19,f0 + fmr f20,f0 + fmr f21,f0 + fmr f22,f0 + fmr f23,f0 + fmr f24,f0 + fmr f25,f0 + fmr f26,f0 + fmr f27,f0 + fmr f28,f0 + fmr f29,f0 + fmr f30,f0 + fmr f31,f0 + +/* */ +/* Whew, that was like, work, man! What a cleaning job, I should be neater */ +/* when I reset. */ +/* */ +/* Finally we can get some data DAT turned on and we can reset the interrupt */ +/* (which may have been done before we get here) and get into the bring up */ +/* handshakes. */ +/* */ +/* Note that here we need to use the actual V=R addresses for HammerHead */ +/* and PCI1 adr. There are no virtual mappings set up on this processor. */ +/* We need to switch once the firmware is initialized. Also, we don't know */ +/* where our control block is yet. */ +/* */ + + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + + mfmsr r3 /* Get the MSR */ + ori r3,r3,0x0010 /* Turn data DAT on */ + mtmsr r3 /* DAT is on (well, almost) */ + isync /* Now it is for sure */ + + lis r8,HammerHead>>16 /* Point to the HammerHead controller */ + li r7,SecInt /* Get value to reset */ + stb r7,IntReg(r8) /* Reset the interrupt */ + eieio /* Fence it off */ + +/* */ +/* Now we can plant and harvest some bits. */ +/* */ + + lwz r6,MPPIlogCPU-MPPIwork(r12) /* Get the logical CPU address to assign */ + mfspr r7,pir /* Get the old PIR */ + rlwimi r7,r6,0,27,31 /* Copy all of the reserved parts */ + mtspr pir,r7 /* Set it */ + +/* */ +/* This little piece of code here determines if we are on the first or second version */ +/* of the two processor board. The old one shouldn't ever be shipped (well, maybe by */ +/* DayStar) but there are some around here. */ +/* */ +/* The newer version of the 2P board has a different state machine than the older one. */ +/* When we are in the board state we're in now, primary arbitration is turned on while */ +/* it is not until the next state in the old board. By checking the our bus address */ +/* (WhoAmI) we can tell. */ +/* */ + + lbz r7,WhoAmI(r8) /* Get the current bus master ID */ + andi. r7,r7,PriCPU /* Do we think we're the primary? */ + beq On2Pv1 /* No, that means we're on the old 2P board */ + + lbz r7,MPPIstatus-MPPIwork(r12) /* Get the status byte */ + ori r7,r7,MPPI2Pv2 /* Show we're on the new board */ + stb r7,MPPIstatus-MPPIwork(r12) /* Set the board version */ + +On2Pv1: rlwinm r9,r6,5,23,26 /* Get index into the CPU specific area */ + + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Index to processor */ + add r9,r9,r12 /* Get a base for our CPU specific area */ + + oris r6,r6,((MPPICBusy+MPPICOnline+MPPICStop)>>16)&0x0000FFFF /* Set CPU busy, online, stopped, */ + /* and busy set by himself */ + stw r6,MPPICStat(r9) /* Save the whole status word */ + + li r4,0x80 /* Get beginnings of a CPU address mask */ + lhz r11,MPPIinst-MPPIwork(r12) /* Get the installed and online status flags */ + srw r4,r4,r6 /* Make a mask */ + rlwimi r4,r4,8,16,23 /* Double up the mask for both flags */ + or r11,r11,r4 /* Set that we are installed and online */ + sync /* Make sure the main processor sees the rest of the stuff */ + + sth r11,MPPIinst-MPPIwork(r12) /* We're almost done, just need to set the TB */ + + lis r5,PCI1AdrReg>>16 /* Point to the PCI1 address register */ + li r4,0 /* Clear this out */ + stw r4,0(r5) /* Set PCI register to 0 to show we're ready for TB sync */ + eieio /* Fence it off */ + +Wait4TB: lwz r7,0(r5) /* Get the PCI1 reg to see if time to set time */ + mr. r7,r7 /* Is it ready yet? */ + beq Wait4TB /* Nope, wait for it... */ + isync /* No peeking... */ + + lwz r3,MPPITBsync-MPPIwork(r12) /* Get the high word of TB */ + lwz r4,MPPITBsync+4-MPPIwork(r12) /* Get the low word */ + +/* Note that we need no TB magic here 'cause they ain't running */ + + mttbu r3 /* Set the high part */ + mttbl r4 /* Set the low part */ + + rlwinm r6,r6,0,2,31 /* Clear the busy bit and passed */ + stw r6,MPPICStat(r9) /* Store the status word */ + + sync /* Make sure all is right with the world */ + + li r3,0 /* Set the init done signal */ + stw r3,0(r5) /* Feed the dog and let him out */ + sync /* Make sure this is pushed on out */ + + li r27,0x3040 /* Make MSR the way we likes it */ + mtmsr r27 /* Load 'em on in */ + isync + +/* */ +/* Jump on to the idle wait loop. We're online and ready, but we're */ +/* still in the reset state. We need to wait until we see a start signal. */ +/* */ +/* Note that the idle loop expects R9 to be our CPU-specific work area; */ +/* R12 is the base of the code and global work area */ +/* */ + + cmplw cr1,r11,r12 /* Make sure IdleWait knows to clear 'rupt request */ + b IdleWait + + +/******************************************************************************************************** */ +/******************************************************************************************************** */ +/* */ +/* Here is the interruption handler. */ +/* */ +/* What we'll do here is to get our registers into a standard state and figure out which */ +/* which processor we are on. The processors have pretty much the same code. The primary */ +/* will reset the the secondary to primary interruption bit and the secondary will reset the SecInt */ +/* flags. */ +/* */ +/* The primary to secondary interrupt is an exception interruption contolled by a bit in the */ +/* Hammerhead IntReg. The only bit in here is SecInt which is active low. Writing a 0 into the */ +/* bit (bit 0) yanks on the external pin on the secondary. Note that it is the only external */ +/* connected on the secondary. SecInt must be set to 1 to clear the interruption. On the old */ +/* 2P board, asserting the external interrupt causes a watchdog timer to start which expires unless */ +/* the interrupt request is withdrawn. On a 180Mhz system the time to expire is about 256µS, */ +/* not very long. So, what we need to do is to time the assertion and if it has not been reset */ +/* reset, do it ourself. Unfortunatelty we need to keep it deasserted for at least 12µS or the */ +/* watchdog will not stop. This leads to another problem: even if the secondary processor sees */ +/* the interrupt and deasserts the request itself, we cannot reassert before the 12µS limit, */ +/* else havoc will be wrought. We just gotta make sure. */ +/* */ +/* So, the secondary to primary interrupt is megafunky. The mother board is wired with the */ +/* MACE ethernet chip's chip-select pin wired to Grand Centeral's external interrrupt #10 pin. */ +/* This causes a transient interrupt whenever MACE is diddled. GC latches the interrupt into the */ +/* events register where we can see it and clear it. */ +/* */ +/******************************************************************************************************** */ +/******************************************************************************************************** */ + +GotSignal: mfspr r9,pir /* Get our processor ID */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top half of the data area */ + rlwinm r9,r9,5,23,26 /* Clean this up */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get the bottom half of the data area */ + la r9,EXT(MPPICPUs)-MPPIwork(r9) /* Point into the proccessor control area */ + mflr r11 /* Save our return */ + add r9,r9,r12 /* Point right at the entry */ + +/* We'll come in here if we're stopped and found the 'rupt via polling */ +/* or we were kicked off by the PollSIGP call. We need */ +/* to wipe out the interrupt request no matter how we got here. */ + +SimRupt: mfmsr r4 /* Get the MSR */ + + la r8,MPPICPU0-MPPIwork(r12) /* Get address of main processor's work area */ + ori r5,r4,0x0010 /* Turn on the DDAT bit */ + cmplw cr0,r8,r9 /* Are we on the main? */ + cmplw cr1,r4,r4 /* Set CR1 to indicate we've cleared any 'rupts */ + bne SecondarySig /* Go if we are not on main processor... */ + +/* */ +/* Handle the secondary to primary signal */ +/* */ + +PrimarySig: + + lwz r8,MPPIGrandC-MPPIwork(r12) /* Get the address of the Grand Central area base */ + mtmsr r5 /* Turn on DDAT */ + isync /* Now don't be usin' dem speculative executions */ + li r7,EventsReg /* Get address of the interrupt events register */ + lwbrx r6,r7,r8 /* Grab the interruption events */ + + lis r5,0x4000 /* Get the mask for the Ext10 pin */ + and. r0,r6,r5 /* See if our bit is on */ + li r7,ClearReg /* Point to the interruption clear register */ + + beq+ SkpClr /* Skip the clear 'cause it's supposed to be soooo slow... */ + + stwbrx r5,r7,r8 /* Reset the interrupt latch */ + eieio /* Fence off the last 'rupt */ + +SkpClr: mtmsr r4 /* Set MSR to entry state */ + isync /* Make sure we ain't gunked up no future storage references */ + + bne+ IdleWait /* Go join up and decode the function... */ + + mtlr r11 /* Restore return address */ + andc. r0,r6,r5 /* Any other bits on? */ + li r3,kMPVainInterrupt /* Assume we got nothing */ + beqlr /* We got nothing, tell 'em to eat 'rupt... */ + li r3,kMPIOInterruptPending /* Tell them to process an I/O 'rupt */ + blr /* Ignore the interrupt... */ + +/* */ +/* Handle the primary to secondary signal */ +/* */ + +SecondarySig: + lwz r3,MPPICStat(r9) /* Pick up our status word */ + lis r8,HammerHead>>16 /* Get the address of the hammerhead (used during INIT on non-main processor) */ + rlwinm. r3,r3,0,3,3 /* Check if we are already "in-the-know" (all started up) */ + beq- UseAltAddr /* Nope, use hardcoded Hammerhead address */ + lwz r8,MPPIHammer-MPPIwork(r12) /* Get the kernel's HammerHead area */ + +UseAltAddr: mtmsr r5 /* Turn on DDAT */ + isync /* Now don't be usin' dem speculative executions */ + li r0,SecInt /* Get the Secondary interrupt bit */ + stb r0,IntReg(r8) /* Reset the interrupt request */ + mtmsr r4 /* Set MSR to entry state */ + eieio /* Fence me in */ + isync /* Make sure we ain't gunked up no future storage references */ + + b IdleWait /* Go decode this request... */ + +/******************************************************************************************************** */ +/******************************************************************************************************** */ +/* */ +/* This is the idle wait. */ +/* */ +/* We're stuck in here so long as we are stopped or reset. */ +/* All functions except for "start" pass back through here. Start is weird because */ +/* it is an initial thing, i.e., we can't have gotten here via any kind of exception, */ +/* so there is no state to restore. The "started" code is expected to require no know */ +/* state and will take care of all initialization/fixup required. */ +/* */ +/******************************************************************************************************** */ +/******************************************************************************************************** */ + +BadRuptState: /* We don't do anything special yet for a bad state, just eat request */ +KillBusy: rlwinm r3, r3, 0, 2, 31 /* Remove the message pending flags. */ + rlwinm r3, r3, 0, 24, 16 /* Set the function to idle. */ + stw r3,MPPICStat(r9) /* Update/unlock the status word. */ + +ReenterWait: cmplwi cr1,r9,0 /* Turn off the 'rupt cleared flag */ + +IdleWait: lis r4,MPPICBusy>>16 /* Get busy status */ + +SpinIdle: + lwz r3,MPPICStat(r9) /* Pick up our status word */ + + and. r5,r3,r4 /* Isolate the busy bit */ + lis r6,MPPICPass>>16 /* Get the passed busy flag */ + bne TooBusy /* Work, work, work, that's all we do is work... */ + + rlwinm. r5,r3,0,4,4 /* See if we are stopped */ + lwz r8,MPPICPriv(r9) /* Pick up our private flags */ + bne- SpinIdle /* Yeah, keep spinning... */ + + +/* */ +/* Restore the state and get outta here. Now, we shouldn't be in a reset state and not be stopped, */ +/* so we can go ahead and safely return up a level because it exists. If we are reset, no state exists */ +/* and we should always be stopped. */ +/* */ + + rlwinm r4, r8, 1, 0, 0 /* Get the explicit run bit, shifted left one. */ + rlwinm. r5, r8, 0, 0, 0 /* See if there is a SIGP signal pending */ + and r4, r8, r4 /* Turn off the SIGP pending bit if this was not an explicit run */ + /* Also the explicit run bit is cleared */ + mtlr r11 /* Restore the return point */ + li r3,kMPVainInterrupt /* Tell the interrupt handler to ignore the interrupt */ + stw r4,MPPICPriv(r9) /* Set that flag back for later */ + beqlr /* Time to leave if we ate the 'rupt... */ + + li r3,kMPSignalPending /* Set that there is a SIGP interruption pending */ + + blr /* Go away, let our caller handle this thing... QED!!!!!!!!! */ + +/* */ +/* QQQQQ EEEEEEEEEE DDDDDDDDD */ +/* QQQQQQQQQ EEEEEEEEEE DDDDDDDDDDD */ +/* QQQQ QQQQ EEEE DDD DDD */ +/* QQQQ QQQQ EEEEEEEEEE DDD DDD */ +/* QQQQ Q QQQQ EEEEEEEEEE DDD DDD */ +/* QQQQ QQQQQ EEEE DDD DDD */ +/* QQQQQQQQQQQ EEEEEEEEEE DDDDDDDDDDD */ +/* QQQQQ QQQ EEEEEEEEEE DDDDDDDDD */ +/* */ +/* (I finished here) */ +/* */ + + +/* */ +/* This is where we decode the function and do what's right. */ +/* First we need to check if it's really time to do something. */ +/* */ + +TooBusy: and. r5,r3,r6 /* See if the passed flag is on */ + beq SpinIdle /* No, not yet, try the whole smear again... */ + + beq+ cr1,KeepRupt /* Don't clear 'rupt if we already did (or entered via RunSIGRun) */ + + lwz r5,MPPICPriv(r9) /* Get the private flags */ + rlwinm. r5, r5, 0, 1, 1 /* Did we enter via RunSIGPRun? */ + beq SimRupt /* Nope, 's'ok, go clear physical 'rupt... */ + +KeepRupt: + bl GetOurBase /* Get our address */ +GetOurBase: rlwinm r4,r3,26,22,29 /* Get the opcode index * 4 */ + mflr r12 /* Get the base address */ + la r7,LOW_ADDR(IFuncTable-GetOurBase)(r12) /* Point to the function table */ + + cmplwi cr0,r4,7*4 /* See if they sent us some bogus junk */ + /* Change 7 if we add more functions */ + add r7,r7,r4 /* Point right at the entry */ + bgt- KillBusy /* Bad request code, reset busy and eat it... */ + + mtlr r7 /* Set up the LR */ + + blr /* Go execute the function... */ + +IFuncTable: + b KillBusy /* This handles the signal in vain... */ + b IStart /* This handles the start function */ + b IResume /* This handles the resume function */ + b IStop /* This handles the stop function */ + b ISIGP /* This handles the SIGP function */ + b IStatus /* This handles the store status function */ + b ITBsync /* This handles the synchronize timer base function */ + b IReset /* This handles the reset function */ + +/******************************************************************************************************** */ +/******************************************************************************************************** */ +/* */ +/* Here are the functions handled at interrupt time */ +/* */ +/******************************************************************************************************** */ +/******************************************************************************************************** */ + +/******************************************************************************************************** */ +/* */ +/* The Start function. This guy requires that the processor be in the reset and online state. */ +/* */ +/******************************************************************************************************** */ + +IStart: lis r4,MPPICOnline>>16 /* Get bits required to be on */ + isync /* Make sure we haven't gone past here */ + and r6,r3,r4 /* See if they are on */ + cmplw cr1,r6,r4 /* Are they all on? */ + lwz r4,MPPICParm0(r9) /* Get the physical address of the code to go to */ + bne- cr1,BadRuptState /* Some required state bits are off */ + rlwinm r3,r3,0,2,31 /* Kill the busy bits */ + rlwinm r3,r3,0,24,15 /* Set the function to idle */ + oris r3,r3,MPPICReady>>16 /* Set ready state */ + rlwinm r3,r3,0,5,3 /* Clear out the stop bit */ + mtlr r4 /* Set the LR */ + stw r3,MPPICStat(r9) /* Clear out the status flags */ + lwz r3,MPPICParm2(r9) /* Get pass-thru parameter */ + blrl /* Start up the code... */ +/* */ +/* The rules for coming back here via BLR are just opposite the normal way: you can trash R0-R3 and */ +/* R13-R31, all the CRs; don't touch SPRG1 or SPRG3, the MSR, the SRs or BATs 0 and 1. */ +/* Follow these simple rules and you allowed back; don't follow them and die. */ +/* We only come back here if there is some kind of startup failure so's we can try again later */ +/* */ + + lwz r3,MPPICStat(r9) /* Get back the status word */ + cmplw cr1,r4,r4 /* Show that we have already taken care of the 'rupt */ + rlwinm r3,r3,0,4,2 /* Reset the ready bit */ + b KillBusy /* Back into the fold... */ + +/******************************************************************************************************** */ +/* */ +/* The Resume function. This guy requires that the processor be online and ready. */ +/* */ +/******************************************************************************************************** */ + +IResume: lis r4,(MPPICOnline+MPPICReady)>>16 /* Get states required to be set */ + and r6,r3,r4 /* See if they are on */ + cmplw cr0,r6,r4 /* Are they all on? */ + bne- BadRuptState /* Some required off state bits are on */ + rlwinm r3,r3,0,5,3 /* Clear out the stop bit */ + b KillBusy /* Get going... */ + +/******************************************************************************************************** */ +/* */ +/* The Stop function. All we care about here is that the guy is online. */ +/* */ +/******************************************************************************************************** */ + +IStop: lis r4,MPPICOnline>>16 /* All we care about is if we are online or not */ + and. r6,r3,r4 /* See if we are online */ + beq- BadRuptState /* Some required off state bits are on */ + oris r3,r3,MPPICStop>>16 /* Set the stop bit */ + b KillBusy /* Get stopped... */ + + +/******************************************************************************************************** */ +/* */ +/* The SIGP function. All we care about here is that the guy is online. */ +/* */ +/******************************************************************************************************** */ + +ISIGP: lis r4,(MPPICOnline+MPPICReady)>>16 /* Get states required to be set */ + and r6,r3,r4 /* See if they are on */ + lwz r7,MPPICPriv(r9) /* Get the private flags */ + cmplw cr0,r6,r4 /* Are they all on? */ + oris r6,r7,(MPPICSigp>>16)&0x0000FFFF /* Set the SIGP pending bit */ + bne- BadRuptState /* Some required off state bits are on */ + lwz r4,MPPICParm0(r9) /* Get the SIGP parameter */ + stw r6,MPPICPriv(r9) /* Stick the pending bit back */ + stw r4,MPPICParm0BU(r9) /* Back up parm 0 so it is safe once we unlock */ + b KillBusy /* Get stopped... */ + +/******************************************************************************************************** */ +/* */ +/* The store status function. This guy requires that the processor be in the stopped state. */ +/* */ +/******************************************************************************************************** */ + +IStatus: lis r4,MPPICOnline>>16 /* All we care about is if we are online or not */ + and. r6,r3,r4 /* See if we are online */ + isync /* Make sure we havn't gone past here */ + beq- BadRuptState /* Some required off state bits are on */ + lwz r4,MPPICParm0(r9) /* Get the status area physical address */ + rlwinm. r6,r3,0,3,3 /* Test processor ready */ + + beq INotReady /* Not ready, don't assume valid exception save area */ + bl StoreStatus /* Go store off all the registers 'n' stuff */ + b KillBusy /* All done... */ + +INotReady: + lis r7,0xDEAD /* Get 0xDEAD + 1 */ + ori r7,r7,0xF1D0 /* Get 0xDEADF1D0 */ + stw r7,CSAgpr+(0*4)(r4) /* Store invalid R0 */ + stw r7,CSAgpr+(1*4)(r4) /* Store invalid R1 */ + stw r7,CSAgpr+(2*4)(r4) /* Store invalid R2 */ + stw r7,CSAgpr+(3*4)(r4) /* Store invalid R3 */ + stw r7,CSAgpr+(4*4)(r4) /* Store invalid R4 */ + stw r7,CSAgpr+(5*4)(r4) /* Store invalid R5 */ + stw r7,CSAgpr+(6*4)(r4) /* Store invalid R6 */ + stw r7,CSAgpr+(7*4)(r4) /* Store invalid R7 */ + stw r7,CSAgpr+(8*4)(r4) /* Store invalid R8 */ + stw r7,CSAgpr+(9*4)(r4) /* Store invalid R9 */ + stw r7,CSAgpr+(10*4)(r4) /* Store invalid R10 */ + stw r7,CSAgpr+(11*4)(r4) /* Store invalid R11 */ + stw r7,CSAgpr+(12*4)(r4) /* Store invalid R12 */ + stw r13,CSAgpr+(13*4)(r4) /* Save general registers */ + stw r14,CSAgpr+(14*4)(r4) /* Save general registers */ + stw r15,CSAgpr+(15*4)(r4) /* Save general registers */ + stw r16,CSAgpr+(16*4)(r4) /* Save general registers */ + stw r17,CSAgpr+(17*4)(r4) /* Save general registers */ + stw r18,CSAgpr+(18*4)(r4) /* Save general registers */ + stw r19,CSAgpr+(19*4)(r4) /* Save general registers */ + stw r20,CSAgpr+(20*4)(r4) /* Save general registers */ + stw r21,CSAgpr+(21*4)(r4) /* Save general registers */ + stw r22,CSAgpr+(22*4)(r4) /* Save general registers */ + stw r23,CSAgpr+(23*4)(r4) /* Save general registers */ + stw r24,CSAgpr+(24*4)(r4) /* Save general registers */ + stw r25,CSAgpr+(25*4)(r4) /* Save general registers */ + stw r26,CSAgpr+(26*4)(r4) /* Save general registers */ + stw r27,CSAgpr+(27*4)(r4) /* Save general registers */ + stw r28,CSAgpr+(28*4)(r4) /* Save general registers */ + stw r29,CSAgpr+(29*4)(r4) /* Save general registers */ + stw r30,CSAgpr+(30*4)(r4) /* Save general registers */ + stw r31,CSAgpr+(31*4)(r4) /* Save general registers */ + bl StoreLiveStatus + b KillBusy + +/* */ +/* Save the whole status. Lot's of busy work. */ +/* Anything marked unclean is of the devil and should be shunned. Actually, it depends upon */ +/* knowledge of firmware control areas and is no good for a plug in. But, we've sacrificed the */ +/* white ram and are standing within a circle made of his skin, so we can dance with the devil */ +/* safely. */ +/* */ + +StoreStatus: + mfspr r10,sprg0 /* Get the pointer to the exception save area (unclean) */ + + lwz r5,saver0(r13) /* Get R0 (unclean) */ + lwz r6,saver1(r13) /* Get R1 (unclean) */ + lwz r7,saver2(r13) /* Get R2 (unclean) */ + stw r5,CSAgpr+(0*4)(r4) /* Save R0 */ + stw r6,CSAgpr+(1*4)(r4) /* Save R1 */ + stw r7,CSAgpr+(2*4)(r4) /* Save R2 */ + lwz r5,saver3(r13) /* Get R3 (unclean) */ + lwz r6,saver4(r13) /* Get R4 (unclean) */ + lwz r7,saver5(r13) /* Get R5 (unclean) */ + stw r5,CSAgpr+(3*4)(r4) /* Save R3 */ + stw r6,CSAgpr+(4*4)(r4) /* Save R4 */ + stw r7,CSAgpr+(5*4)(r4) /* Save R5 */ + lwz r5,saver6(r13) /* Get R6 (unclean) */ + lwz r6,saver7(r13) /* Get R7 (unclean) */ + lwz r7,saver8(r13) /* Get R8 (unclean) */ + stw r5,CSAgpr+(6*4)(r4) /* Save R6 */ + stw r6,CSAgpr+(7*4)(r4) /* Save R7 */ + stw r7,CSAgpr+(8*4)(r4) /* Save R8 */ + lwz r5,saver9(r13) /* Get R9 (unclean) */ + lwz r6,saver10(r13) /* Get R10 (unclean) */ + lwz r7,saver11(r13) /* Get R11 (unclean) */ + stw r5,CSAgpr+(9*4)(r4) /* Save R9 */ + stw r6,CSAgpr+(10*4)(r4) /* Save R10 */ + lwz r5,saver12(r13) /* Get R12 (unclean) */ + stw r7,CSAgpr+(11*4)(r4) /* Save R11 */ + stw r5,CSAgpr+(12*4)(r4) /* Save R12 */ + + lwz r5,saver13(r13) /* Get R13 (unclean) */ + lwz r6,saver14(r13) /* Get R14 (unclean) */ + lwz r7,saver15(r13) /* Get R15 (unclean) */ + stw r5,CSAgpr+(13*4)(r4) /* Save R13 */ + stw r6,CSAgpr+(14*4)(r4) /* Save R14 */ + stw r7,CSAgpr+(15*4)(r4) /* Save R15 */ + lwz r5,saver16(r13) /* Get R16 (unclean) */ + lwz r6,saver17(r13) /* Get R17 (unclean) */ + lwz r7,saver18(r13) /* Get R18 (unclean) */ + stw r5,CSAgpr+(16*4)(r4) /* Save R16 */ + stw r6,CSAgpr+(17*4)(r4) /* Save R17 */ + stw r7,CSAgpr+(18*4)(r4) /* Save R18 */ + lwz r5,saver19(r13) /* Get R19 (unclean) */ + lwz r6,saver20(r13) /* Get R20 (unclean) */ + lwz r7,saver21(r13) /* Get R21 (unclean) */ + stw r5,CSAgpr+(19*4)(r4) /* Save R19 */ + stw r6,CSAgpr+(20*4)(r4) /* Save R20 */ + stw r7,CSAgpr+(21*4)(r4) /* Save R21 */ + lwz r5,saver22(r13) /* Get R22 (unclean) */ + lwz r6,saver23(r13) /* Get R23 (unclean) */ + lwz r7,saver24(r13) /* Get R24 (unclean) */ + stw r5,CSAgpr+(22*4)(r4) /* Save R22 */ + stw r6,CSAgpr+(23*4)(r4) /* Save R23*/ + stw r7,CSAgpr+(24*4)(r4) /* Save R24 */ + lwz r5,saver25(r13) /* Get R25 (unclean) */ + lwz r6,saver26(r13) /* Get R26 (unclean) */ + lwz r7,saver27(r13) /* Get R27 (unclean) */ + stw r5,CSAgpr+(25*4)(r4) /* Save R25 */ + stw r6,CSAgpr+(26*4)(r4) /* Save R26 */ + stw r7,CSAgpr+(27*4)(r4) /* Save R27 */ + + lwz r5,saver28(r13) /* Get R28 (unclean) */ + lwz r6,saver29(r13) /* Get R29 (unclean) */ + lwz r7,saver30(r13) /* Get R30 (unclean) */ + stw r5,CSAgpr+(28*4)(r4) /* Save R28 */ + lwz r5,saver31(r13) /* Get R31(unclean) */ + stw r6,CSAgpr+(29*4)(r4) /* Save R29 */ + stw r7,CSAgpr+(30*4)(r4) /* Save R30 */ + stw r5,CSAgpr+(31*4)(r4) /* Save R31 */ + +StoreLiveStatus: + mfmsr r5 /* Get the current MSR */ + ori r6,r5,0x2000 /* Turn on floating point instructions */ + mtmsr r6 /* Turn them on */ + isync /* Make sure they're on */ + + stfd f0,CSAfpr+(0*8)(r4) /* Save floating point registers */ + stfd f1,CSAfpr+(1*8)(r4) /* Save floating point registers */ + stfd f2,CSAfpr+(2*8)(r4) /* Save floating point registers */ + stfd f3,CSAfpr+(3*8)(r4) /* Save floating point registers */ + stfd f4,CSAfpr+(4*8)(r4) /* Save floating point registers */ + stfd f5,CSAfpr+(5*8)(r4) /* Save floating point registers */ + stfd f6,CSAfpr+(6*8)(r4) /* Save floating point registers */ + stfd f7,CSAfpr+(7*8)(r4) /* Save floating point registers */ + stfd f8,CSAfpr+(8*8)(r4) /* Save floating point registers */ + stfd f9,CSAfpr+(9*8)(r4) /* Save floating point registers */ + stfd f10,CSAfpr+(10*8)(r4) /* Save floating point registers */ + stfd f11,CSAfpr+(11*8)(r4) /* Save floating point registers */ + stfd f12,CSAfpr+(12*8)(r4) /* Save floating point registers */ + stfd f13,CSAfpr+(13*8)(r4) /* Save floating point registers */ + stfd f14,CSAfpr+(14*8)(r4) /* Save floating point registers */ + stfd f15,CSAfpr+(15*8)(r4) /* Save floating point registers */ + stfd f16,CSAfpr+(16*8)(r4) /* Save floating point registers */ + stfd f17,CSAfpr+(17*8)(r4) /* Save floating point registers */ + stfd f18,CSAfpr+(18*8)(r4) /* Save floating point registers */ + stfd f19,CSAfpr+(19*8)(r4) /* Save floating point registers */ + stfd f20,CSAfpr+(20*8)(r4) /* Save floating point registers */ + stfd f21,CSAfpr+(21*8)(r4) /* Save floating point registers */ + stfd f22,CSAfpr+(22*8)(r4) /* Save floating point registers */ + stfd f23,CSAfpr+(23*8)(r4) /* Save floating point registers */ + stfd f24,CSAfpr+(24*8)(r4) /* Save floating point registers */ + stfd f25,CSAfpr+(25*8)(r4) /* Save floating point registers */ + stfd f26,CSAfpr+(26*8)(r4) /* Save floating point registers */ + stfd f27,CSAfpr+(27*8)(r4) /* Save floating point registers */ + stfd f28,CSAfpr+(28*8)(r4) /* Save floating point registers */ + stfd f29,CSAfpr+(29*8)(r4) /* Save floating point registers */ + stfd f30,CSAfpr+(30*8)(r4) /* Save floating point registers */ + stfd f31,CSAfpr+(31*8)(r4) /* Save floating point registers */ + + mffs f1 /* Get the FPSCR */ + stfd f1,CSAfpscr-4(r4) /* Save the whole thing (we'll overlay the first half with CR later) */ + + lfd f1,CSAfpr+(1*4)(r4) /* Restore F1 */ + + mtmsr r5 /* Put the floating point back to what it was before */ + isync /* Wait for it */ + + lwz r6,savecr(r13) /* Get the old CR (unclean) */ + stw r6,CSAcr(r4) /* Save the CR */ + + mfxer r6 /* Get the XER */ + stw r6,CSAxer(r4) /* Save the XER */ + + lwz r6,savelr(r13) /* Get the old LR (unclean) */ + stw r6,CSAlr(r4) /* Save the LR */ + + mfctr r6 /* Get the CTR */ + stw r6,CSActr(r4) /* Save the CTR */ + +STtbase: mftbu r5 /* Get the upper timebase */ + mftb r6 /* Get the lower */ + mftbu r7 /* Get the top again */ + cmplw cr0,r5,r7 /* Did it tick? */ + bne- STtbase /* Yeah, do it again... */ + + mfdec r7 /* Get the decrimenter (make it at about the same time as the TB) */ + stw r7,CSAdec(r4) /* Save the decrimenter */ + + + stw r5,CSAtbu(r4) /* Stash the top part */ + stw r6,CSAtbl(r4) /* Stash the lower part */ + + lwz r5,savesrr1(r13) /* SRR1 at exception is as close as we get to the MSR (unclean) */ + lwz r6,savesrr0(r13) /* Get SRR0 also */ + stw r5,CSAmsr(r4) /* Save the MSR */ + stw r6,CSApc(r4) /* Save the PC */ + stw r5,CSAsrr1(r4) /* Set SRR1 also */ + stw r6,CSAsrr0(r4) /* Save SRR0 */ + + mfpvr r5 /* Get the PVR */ + stw r5,CSApvr(r4) /* Save the PVR */ + + mfspr r5,pir /* Get the PIR */ + stw r5,CSApir(r4) /* Save the PIR */ + + mfspr r5,ibat0u /* Get the upper IBAT0 */ + mfspr r6,ibat0l /* Get the lower IBAT0 */ + stw r5,CSAibat+(0*8+0)(r4) /* Save the upper IBAT0 */ + stw r6,CSAibat+(0*8+4)(r4) /* Save the upper IBAT0 */ + + mfspr r5,ibat1u /* Get the upper IBAT1 */ + mfspr r6,ibat1l /* Get the lower IBAT1 */ + stw r5,CSAibat+(1*8+0)(r4) /* Save the upper IBAT1 */ + stw r6,CSAibat+(1*8+4)(r4) /* Save the upper IBAT1 */ + + mfspr r5,ibat2u /* Get the upper IBAT2 */ + mfspr r6,ibat2l /* Get the lower IBAT2 */ + stw r5,CSAibat+(2*8+0)(r4) /* Save the upper IBAT2 */ + stw r6,CSAibat+(2*8+4)(r4) /* Save the upper IBAT2 */ + + mfspr r5,ibat3u /* Get the upper IBAT3 */ + mfspr r6,ibat3l /* Get the lower IBAT3 */ + stw r5,CSAibat+(3*8+0)(r4) /* Save the upper IBAT3 */ + stw r6,CSAibat+(3*8+4)(r4) /* Save the upper IBAT3 */ + + mfspr r5,dbat0u /* Get the upper DBAT0 */ + mfspr r6,dbat0l /* Get the lower DBAT0 */ + stw r5,CSAdbat+(0*8+0)(r4) /* Save the upper DBAT0 */ + stw r6,CSAdbat+(0*8+4)(r4) /* Save the upper DBAT0 */ + + mfspr r5,dbat1u /* Get the upper DBAT1 */ + mfspr r6,dbat1l /* Get the lower DBAT1 */ + stw r5,CSAdbat+(1*8+0)(r4) /* Save the upper DBAT1 */ + stw r6,CSAdbat+(1*8+4)(r4) /* Save the upper DBAT1 */ + + mfspr r5,dbat2u /* Get the upper DBAT2 */ + mfspr r6,dbat2l /* Get the lower DBAT2 */ + stw r5,CSAdbat+(2*8+0)(r4) /* Save the upper DBAT2 */ + stw r6,CSAdbat+(2*8+4)(r4) /* Save the upper DBAT2 */ + + mfspr r5,dbat3u /* Get the upper DBAT3 */ + mfspr r6,dbat3l /* Get the lower DBAT3 */ + stw r5,CSAdbat+(3*8+0)(r4) /* Save the upper DBAT3 */ + stw r6,CSAdbat+(3*8+4)(r4) /* Save the upper DBAT3 */ + + mfsdr1 r5 /* Get the SDR1 */ + stw r5,CSAsdr1(r4) /* Save the SDR1 */ + + mfsr r5,sr0 /* Get SR 0 */ + mfsr r6,sr1 /* Get SR 1 */ + mfsr r7,sr2 /* Get SR 2 */ + stw r5,CSAsr+(0*4)(r4) /* Save SR 0 */ + stw r6,CSAsr+(1*4)(r4) /* Save SR 1 */ + mfsr r5,sr3 /* Get SR 3 */ + mfsr r6,sr4 /* Get SR 4 */ + stw r7,CSAsr+(2*4)(r4) /* Save SR 2 */ + mfsr r7,sr5 /* Get SR 5 */ + stw r5,CSAsr+(3*4)(r4) /* Save SR 3 */ + stw r6,CSAsr+(4*4)(r4) /* Save SR 4 */ + mfsr r5,sr6 /* Get SR 6 */ + mfsr r6,sr7 /* Get SR 7 */ + stw r7,CSAsr+(5*4)(r4) /* Save SR 5 */ + mfsr r7,sr8 /* Get SR 8 */ + stw r5,CSAsr+(6*4)(r4) /* Save SR 6 */ + stw r6,CSAsr+(7*4)(r4) /* Save SR 7 */ + mfsr r5,sr9 /* Get SR 9 */ + mfsr r6,sr10 /* Get SR 11 */ + stw r7,CSAsr+(8*4)(r4) /* Save SR 8 */ + mfsr r7,sr11 /* Get SR 11 */ + stw r5,CSAsr+(9*4)(r4) /* Save SR 9 */ + stw r6,CSAsr+(10*4)(r4) /* Save SR 10 */ + mfsr r5,sr12 /* Get SR 12 */ + mfsr r6,sr13 /* Get SR 13 */ + stw r7,CSAsr+(11*4)(r4) /* Save SR 11 */ + mfsr r7,sr14 /* Get SR 14 */ + stw r5,CSAsr+(12*4)(r4) /* Save SR 12 */ + stw r6,CSAsr+(13*4)(r4) /* Save SR 13 */ + mfsr r5,sr15 /* Get SR 15 */ + stw r7,CSAsr+(14*4)(r4) /* Save SR 14 */ + stw r5,CSAsr+(15*4)(r4) /* Save SR 15 */ + + mfdar r6 /* Get the DAR */ + stw r6,CSAdar(r4) /* Save it */ + + mfdsisr r5 /* Get the DSISR */ + stw r5,CSAdsisr(r4) /* Save it */ + + stw r10,CSAsprg+(1*4)(r4) /* Save SPRG1 */ + mfspr r7,sprg0 /* Get SPRG0 */ + mfspr r6,sprg2 /* Get SPRG2 */ + stw r7,CSAsprg+(0*4)(r4) /* Save SPRG0 */ + mfspr r5,sprg3 /* Get SPRG3 */ + stw r6,CSAsprg+(2*4)(r4) /* Save SPRG2 */ + stw r5,CSAsprg+(3*4)(r4) /* Save SPRG4 */ + + mfspr r6,1013 /* Get the DABR */ + mfspr r7,1010 /* Get the IABR */ + stw r6,CSAdabr(r4) /* Save the DABR */ + stw r7,CSAiabr(r4) /* Save the IABR */ + + mfspr r5,282 /* Get the EAR */ + stw r5,CSAear(r4) /* Save the EAR */ + + lis r7,0xDEAD /* Get 0xDEAD */ + ori r7,r7,0xF1D0 /* Get 0xDEADF1D0 */ + + mfpvr r5 /* Get the processor type */ + rlwinm r5,r5,16,16,31 /* Isolate the processor */ + cmplwi cr1,r5,4 /* Set CR1_EQ if this is a plain 604, something else if it's a 604E */ + + mfspr r6,hid0 /* Get HID0 */ + mr r5,r7 /* Assume 604 */ + beq cr1,NoHID1 /* It is... */ + mfspr r5,hid1 /* Get the HID1 */ + +NoHID1: stw r6,CSAhid+(0*4)(r4) /* Save HID0 */ + stw r5,CSAhid+(1*4)(r4) /* Save HID1 */ + stw r7,CSAhid+(2*4)(r4) /* Save HID2 */ + stw r7,CSAhid+(3*4)(r4) /* Save HID3 */ + stw r7,CSAhid+(4*4)(r4) /* Save HID4 */ + stw r7,CSAhid+(5*4)(r4) /* Save HID5 */ + stw r7,CSAhid+(6*4)(r4) /* Save HID6 */ + stw r7,CSAhid+(7*4)(r4) /* Save HID7 */ + stw r7,CSAhid+(8*4)(r4) /* Save HID8 */ + stw r7,CSAhid+(9*4)(r4) /* Save HID9 */ + stw r7,CSAhid+(10*4)(r4) /* Save HID10 */ + stw r7,CSAhid+(11*4)(r4) /* Save HID11 */ + stw r7,CSAhid+(12*4)(r4) /* Save HID12 */ + stw r7,CSAhid+(13*4)(r4) /* Save HID13 */ + stw r7,CSAhid+(14*4)(r4) /* Save HID14 */ + stw r7,CSAhid+(15*4)(r4) /* Save HID15 */ + + mfspr r6,952 /* Get MMCR0 */ + mr r5,r7 /* Assume 604 */ + beq NoMMCR1 /* It is... */ + mfspr r5,956 /* Get the MMCR1 */ + +NoMMCR1: stw r6,CSAmmcr+(0*4)(r4) /* Save MMCR0 */ + stw r5,CSAmmcr+(1*4)(r4) /* Save MMCR1 */ + + mfspr r6,953 /* Get PMC1 */ + mfspr r5,954 /* Get PMC2 */ + stw r6,CSApmc+(0*4)(r4) /* Save PMC1 */ + stw r5,CSApmc+(1*4)(r4) /* Save PMC2 */ + + mr r6,r7 /* Assume 604 */ + mr r5,r7 /* Assume 604 */ + beq NoPMC3 /* Yeah... */ + mfspr r6,957 /* Get the PMC3 for a 604E */ + mfspr r5,958 /* Get the PMC4 for a 604E */ + +NoPMC3: stw r6,CSApmc+(2*4)(r4) /* Save PMC3 */ + stw r5,CSApmc+(3*4)(r4) /* Save PMC4 */ + + mfspr r6,955 /* Get SIA */ + mfspr r5,959 /* Get SDA */ + stw r6,CSAsia(r4) /* Save the SIA */ + stw r5,CSAsda(r4) /* Save the SDA */ + + stw r7,CSAmq(r4) /* There is no MQ on either the 604 or 604E */ + + + lwz r6,MPPICStat(r9) /* Get the status of this processor */ + lis r10,MPPICReady>>16 /* Get the flag for reset or not */ + li r5,kSIGPResetState /* Assume we're operating */ + and. r0,r6,r10 /* See if the ready bit is set */ + lis r10,MPPICStop>>16 /* Get the flag for stopped or not */ + beq SetStateInf /* Go set that we are reset... */ + and. r0,r6,r10 /* Are we stopped? */ + li r5,kSIGPStoppedState /* Assume we area */ + bne SetStateInf /* We are, go set it... */ + li r5,kSIGPOperatingState /* Not stopped, so we're going */ + +SetStateInf: stb r5,CSAstate(r4) /* Set the state byte */ + + li r0,1 /* Set the truth */ + sync /* Make sure it's stored */ + + stb r0,CSAregsAreValid(r4) /* Set that the status is valid */ + + blr /* We're done here... */ + + +/******************************************************************************************************** */ +/* */ +/* The synchronize time base function. No state requirements for this one. */ +/* */ +/******************************************************************************************************** */ + +ITBsync: /* This handles the synchronize time base function */ + lis r12,HIGH_ADDR(MPPIwork) /* Get the top of work area */ + li r0,MPPICfTBsy1 /* Get the flag for TB sync state 1 */ + li r7,0 /* Get a 0 */ + ori r12,r12,LOW_ADDR(MPPIwork) /* Get low part of work area */ + mttbl r7 /* Clear the bottom of the TB so's there's noupper ticks */ + mttbu r7 /* Clear the top part, just 'cause I wanna */ + + sync /* Make sure all is saved */ + stb r0,MPPICStat+2(r9) /* Tell the main dude to tell us the time */ + isync /* Make sure we don't go nowhere's */ + +/* */ +/* Remember that the sync'ing processor insures that the TB won't tick the high part for at least */ +/* 16k ticks. That should be way longer than we need for the whole process here */ +/* */ + +WaitTBLower: lwz r5,MPPITBsync+4-MPPIwork(r12) /* Get the lower part of the TB */ + mttbl r5 /* Put it in just in case it's set now */ + mr. r5,r5 /* Was it actually? */ + beq+ WaitTBLower /* Nope, go check again... */ + lwz r4,MPPITBsync-MPPIwork(r12) /* Get the high order part */ + mttbu r4 /* Set the top half also */ + + stw r7,MPPITBsync+4-MPPIwork(r12) /* Tell 'em we've got it */ + + sync + + li r4,0 /* Clear this */ + la r5,MPPISncFght-32-MPPIwork(r12) /* Point to the squared circle (our corner) */ + + b TB1stPnch /* Go take the first punch... */ + +TBSargue: + dcbf 0,r5 /* *** Fix cache coherency (data integrity) HW bug *** */ + sync /* *** Fix cache coherency (data integrity) HW bug *** */ + lwz r6,0(r5) /* Listen for the procecution's argument */ + mr. r6,r6 /* See if they are done */ + beq+ TBSargue /* Nope, still going... */ + +TB1stPnch: mftb r7 /* They're done, time for rebuttal */ + stw r7,32(r5) /* Make rebuttle */ + + addi r4,r4,1 /* Count rounds */ + + cmplwi cr0,r4,10 /* See if we've gone 9 more rounds */ + addi r5,r5,64 /* Point to the next round areas */ + + blt+ TBSargue /* Not yet, come out of your corners fighting... */ + +/* */ +/* We'll set the latest-up-to-datest from the other processor now */ +/* */ +TBSetTB: + dcbf 0,r5 /* *** Fix cache coherency (data integrity) HW bug *** */ + sync /* *** Fix cache coherency (data integrity) HW bug *** */ + lwz r6,0(r5) /* Listen for the procecution's argument */ + mttbl r6 /* Set it just in case it's ok */ + mr. r6,r6 /* See if they are done */ + beq+ TBSetTB /* Nope, still going... */ + +/* */ +/* Get average duration for each processor. We skip the first pass on the asumption */ +/* that the caches were not warmed up and it would take longer. In proctice this */ +/* is what was seen. */ +/* */ + + mr r0,r11 /* Move return address to a safe register */ + + li r4,0 /* Clear a counter */ + li r3,0 /* Clear accumulator for duration */ + li r10,0 /* Clear start time accumulator top half */ + li r11,0 /* Clear start time accumulator bottom half */ + li r1,0 /* Clear start time accumulator top half */ + li r2,0 /* Clear start time accumulator bottom half */ + li r10,0 /* Clear accumulator for durations */ + la r5,MPPISncFght+64-MPPIwork(r12) /* Get second round start time address */ + +TBSaccumU: lwz r6,0(r5) /* Get start time */ + lwz r11,32(r5) /* Get the other processor's start time */ + lwz r7,64(r5) /* Get end time */ + lwz r8,96(r5) /* Other proc's end time */ + sub r7,r7,r6 /* Get duration */ + sub r8,r8,r11 /* Get other side's duration */ + addi r4,r4,1 /* Count arguments */ + add r3,r3,r7 /* Accumulate durations */ + add r2,r2,r7 /* Accumulate other side's durations */ + cmplwi cr0,r4,8 /* Have we gotten them all yet? */ + addi r5,r5,64 /* Step to the next argument */ + blt+ TBSaccumU /* We're not done yet... */ + + add r7,r2,r3 /* Sum the two differences */ + addi r7,r7,0x10 /* Round up */ + rlwinm r7,r7,27,5,31 /* Get the average difference divided in half */ + + mftb r8 /* Get the time now */ + add r8,r8,r7 /* Slide the window */ + mttbl r8 /* Set the time */ + + stw r12,MPPITBsync+4-MPPIwork(r12) /* Show that we are done */ + + lwz r3,MPPICStat(r9) /* Get back our status */ + mr r11,r0 /* Restore the return register */ + b KillBusy /* We're all done now, done for it, c'est la vie... */ + + +/******************************************************************************************************** */ +/* */ +/* The reset function. No state requirements for this one. */ +/* This suicides the processor. Our caller is never returned to (good english). The only way out of */ +/* this is a start function subsequently. So, we give a flying f**k about the registers 'n' sutff. */ +/* */ +/******************************************************************************************************** */ + +IReset: lis r28,0x8000 /* Turn on machine checks */ + + ori r28,r28,0xCC84 /* Enable caches, clear them, */ + /* disable serial execution and turn BHT on */ + sync + mtspr HID0,r28 /* Start the cache clear */ + sync + +/* */ +/* Clear out the TLB. They be garbage after hard reset. */ +/* */ + + li r0,512 /* Get number of TLB entries (FIX THIS) */ + li r3,0 /* Start at 0 */ + mtctr r0 /* Set the CTR */ + +IRpurgeTLB: tlbie r3 /* Purge this entry */ + addi r3,r3,4096 /* Next page */ + bdnz IRpurgeTLB /* Do 'em all... */ + + sync /* Make sure all TLB purges are done */ + tlbsync /* Make sure on other processors also */ + sync /* Make sure the TLBSYNC is done */ + +/* */ +/* Clear out the BATs. */ +/* */ + + li r3,0 /* Clear a register */ + + mtspr DBAT0L,r3 /* Clear BAT */ + mtspr DBAT0U,r3 /* Clear BAT */ + mtspr DBAT1L,r3 /* Clear BAT */ + mtspr DBAT1U,r3 /* Clear BAT */ + mtspr DBAT2L,r3 /* Clear BAT */ + mtspr DBAT2U,r3 /* Clear BAT */ + mtspr DBAT3L,r3 /* Clear BAT */ + mtspr DBAT3U,r3 /* Clear BAT */ + + mtspr IBAT0L,r3 /* Clear BAT */ + mtspr IBAT0U,r3 /* Clear BAT */ + mtspr IBAT1L,r3 /* Clear BAT */ + mtspr IBAT1U,r3 /* Clear BAT */ + mtspr IBAT2L,r3 /* Clear BAT */ + mtspr IBAT2U,r3 /* Clear BAT */ + mtspr IBAT3L,r3 /* Clear BAT */ + mtspr IBAT3U,r3 /* Clear BAT */ + +/* */ +/* Map 0xF0000000 to 0xFFFFFFFF for I/O; make it R/W non-cacheable */ +/* Map 0x00000000 to 0x0FFFFFFF for mainstore; make it R/W cachable */ +/* */ + + lis r6,0xF000 /* Set RPN to last segment */ + ori r6,r6,0x1FFF /* Set up upper BAT for 256M, access both */ + + lis r7,0xF000 /* Set RPN to last segment */ + ori r7,r7,0x0032 /* Set up lower BAT for 256M, access both, non-cachable */ + + mtspr DBAT0L,r7 /* Setup ROM and I/O mapped areas */ + mtspr DBAT0U,r6 /* Now do the upper DBAT */ + sync + + li r6,0x1FFF /* Set up upper BAT for 256M, access both */ + li r7,0x0012 /* Set up lower BAT for r/w access */ + + mtspr DBAT1L,r7 /* Set up an initial view of mainstore */ + mtspr DBAT1U,r6 /* Now do the upper DBAT */ + sync + +/* */ +/* Clean up SDR and segment registers */ +/* */ + + li r3,0 /* Clear a register */ + mtspr SDR1,r3 /* Clear SDR1 */ + + li r4,0 /* Clear index for segment registers */ + lis r5,0x1000 /* Set the segment indexer */ + +IRclearSR: mtsrin r3,r4 /* Zero out the SR */ + add. r4,r4,r5 /* Point to the next segment */ + bne- IRclearSR /* Keep going until we wrap back to 0 */ + + lis r3,(MPPICOnline+MPPICStop)>>16 /* Set the reset/online state flags */ + b KillBusy /* Go wipe out the busy flags... */ + +/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ +/* */ +/* Here lies the Phoney Firmware used to test SIGPs. Take this out later. */ +/* */ +/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ + +mp_PhoneyFirmware: + + li r27,0x3040 /* Set floating point and machine checks on, IP to 0xFFF0xxxx */ + mtmsr r27 /* Load 'em on in */ + isync + + bl PhoneyBase /* Make a base register */ +PhoneyBase: mflr r26 /* Get it */ + addi r26,r26,LOW_ADDR(MPPIbase-PhoneyBase) /* Adjust it back */ + + la r20,LOW_ADDR(rupttab-MPPIbase)(r26) /* Get the address of the interrupt table */ + la r21,LOW_ADDR(rupttabend-MPPIbase)(r26) /* Get the end of the table */ + +relocate: lwz r22,0(r20) /* Get the displacement to routine */ + add r22,r22,r12 /* Relocate to the physical address */ + stw r22,0(r20) /* Stick it back */ + addi r20,r20,4 /* Point to the next one */ + cmplw cr0,r20,r21 /* Still in table? */ + ble+ cr0,relocate /* Yeah... */ + + la r20,LOW_ADDR(rupttab-MPPIbase)(r26) /* Get the interrupt table back again */ + mtsprg 3,r20 /* Activate the phoney Rupt table */ + + lis r24,hi16(HammerHead) /* Get the actual hammerhead address */ + ori r24,r24,0x0032 /* Make R/W non-cachable */ + lwz r23,MPPIHammer-MPPIwork(r12) /* Get the address mapped on the main processor */ + ori r23,r23,0x0003 /* Set both super and user valid for 128KB */ + + mtspr DBAT0L,r24 /* Setup hammerhead's real address */ + mtspr DBAT0U,r23 /* Map hammerhead to the same virtual address as on the main processor */ + sync /* Make sure it is done */ + + la r25,MPPICPU2-MPPIwork(r12) /* Point to a phoney register save area */ + mtsprg 1,r25 /* Phoney up initialized processor state */ + + lis r24,0xFEED /* Get 0xFEED */ + ori r24,r24,0xF1D0 /* Get 0xFEEDF1D0 */ + + stw r24,CSAgpr+(0*4)(r25) /* Store invalid R0 */ + stw r24,CSAgpr+(1*4)(r25) /* Store invalid R1 */ + stw r24,CSAgpr+(2*4)(r25) /* Store invalid R2 */ + stw r24,CSAgpr+(3*4)(r25) /* Store invalid R3 */ + stw r24,CSAgpr+(4*4)(r25) /* Store invalid r4 */ + stw r24,CSAgpr+(5*4)(r25) /* Store invalid R5 */ + stw r24,CSAgpr+(6*4)(r25) /* Store invalid R6 */ + stw r24,CSAgpr+(7*4)(r25) /* Store invalid r7 */ + stw r24,CSAgpr+(8*4)(r25) /* Store invalid R8 */ + stw r24,CSAgpr+(9*4)(r25) /* Store invalid R9 */ + stw r24,CSAgpr+(10*4)(r25) /* Store invalid R10 */ + stw r24,CSAgpr+(11*4)(r25) /* Store invalid R11 */ + stw r24,CSAgpr+(12*4)(r25) /* Store invalid R12 */ + +waititout: lwz r25,0x30(br0) /* Get wait count */ + mfmsr r24 /* Get the MSR */ + addi r25,r25,1 /* Bounce it up */ + ori r24,r24,0x8000 /* Turn on external interruptions */ + stw r25,0x30(br0) /* Save back the count */ + mtmsr r24 /* Set it */ + isync /* Stop until we're here */ + b waititout /* Loop forever... */ + +/* */ +/* Phoney interrupt handlers */ +/* */ + +pexternal: mflr r29 /* Get the LR value */ + lwz r29,0(r29) /* Get the rupt code */ + stw r29,0x0B0(br0) /* Save the code */ + bl GotSignal /* Call the signal handler */ + oris r3,r3,0x8000 /* Turn on high bit so we see a code 0 */ + stw r3,0xA8(br0) /* Save return code in debug area */ + +ignorerupt: mflr r29 /* Get the LR value */ + lwz r29,0(r29) /* Get the rupt code */ + stw r29,0x0B0(br0) /* Save the code */ + rfi /* Bail to from whence we commest... */ + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + +rupttab: .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long pexternal /* Phoney external handler */ + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt + .long ignorerupt +rupttabend: .long ignorerupt + +/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ +/* */ +/* Here lies the end of the Phoney Firmware used to test SIGPs. Take this out later. */ +/* */ +/* (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) (TEST/DEBUG) */ + + +/* */ +/* Table of function offsets */ +/* */ + +MPPIFuncOffs: + + .long CountProcessors-MPPIFunctions /* Offset to routine */ + .long StartProcessor-MPPIFunctions /* Offset to routine */ + .long ResumeProcessor-MPPIFunctions /* Offset to routine */ + .long StopProcessor-MPPIFunctions /* Offset to routine */ + .long ResetProcessor-MPPIFunctions /* Offset to routine */ + .long SignalProcessor-MPPIFunctions /* Offset to routine */ + .long StoreProcessorStatus-MPPIFunctions /* Offset to routine */ + .long SynchClock-MPPIFunctions /* Offset to routine */ + .long GetExtHandlerAddress-MPPIFunctions /* Offset to routine */ + .long GotSignal-MPPIFunctions /* Offset to routine */ + .long ProcessorState-MPPIFunctions /* Offset to routine */ + .long RunSIGPRun-MPPIFunctions /* Offset to routine */ + .long mp_PhoneyFirmware-MPPIFunctions /* (TEST/DEBUG) */ + +MPPISize: + diff --git a/osfmk/ppc/POWERMAC/mp/mp.c b/osfmk/ppc/POWERMAC/mp/mp.c new file mode 100644 index 000000000..b75296d6e --- /dev/null +++ b/osfmk/ppc/POWERMAC/mp/mp.c @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +#include +#include + +MPPlugInSpec MPspec; /* An area for the MP interfaces */ +MPEntryPts MPEntries; /* Real addresses of plugin routines */ diff --git a/osfmk/ppc/POWERMAC/mp/mp.h b/osfmk/ppc/POWERMAC/mp/mp.h new file mode 100644 index 000000000..9dd6ec62f --- /dev/null +++ b/osfmk/ppc/POWERMAC/mp/mp.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_POWERMAC_MP_MP_H_ +#define _PPC_POWERMAC_MP_MP_H_ + +#include + +#if NCPUS > 1 + +#ifndef ASSEMBLER +#include +extern int real_ncpus; /* real number of cpus */ +extern int wncpu; /* wanted number of cpus */ +decl_simple_lock_data(extern, debugger_lock) /* debugger lock */ + +extern int debugger_cpu; /* current cpu running debugger */ +extern int debugger_debug; +extern int debugger_is_slave[]; +extern int debugger_active[]; +#endif /* ASSEMBLER */ + +#endif /* NCPUS > 1 */ + +#endif /* _PPC_POWERMAC_MP_MP_H_ */ diff --git a/osfmk/ppc/POWERMAC/scc_8530.h b/osfmk/ppc/POWERMAC/scc_8530.h new file mode 100644 index 000000000..868e649df --- /dev/null +++ b/osfmk/ppc/POWERMAC/scc_8530.h @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: scc_8530.h + * Author: Alessandro Forin, Carnegie Mellon University + * Date: 6/91 + * + * Definitions for the Zilog Z8530 SCC serial line chip + */ + +#ifndef _SCC_8530_H_ +#define _SCC_8530_H_ + +/* + * Register map, needs definition of the alignment + * used on the specific machine. + * #define the 'scc_register_t' data type before + * including this header file. For restrictions on + * access modes define the set/get_datum macros. + * We provide defaults ifnot. + */ + + +#define SCC_CHANNEL_A 1 +#define SCC_CHANNEL_B 0 + +#define SCC_MODEM SCC_CHANNEL_A +#define SCC_PRINTER SCC_CHANNEL_B + +#define SCC_DATA_OFFSET 4 + +typedef unsigned char *scc_regmap_t; + +extern void powermac_scc_set_datum(scc_regmap_t regs, unsigned int offset, unsigned char value); +extern unsigned char powermac_scc_get_datum(scc_regmap_t regs, unsigned int offset); + +#define scc_set_datum(regs, d, v) powermac_scc_set_datum(regs, (d), (v)) +#define scc_get_datum(regs, d,v) (v) = powermac_scc_get_datum(regs, (d)); + +#define scc_init_reg(regs,chan) { \ + char tmp; \ + scc_get_datum(regs, ((chan)<<1),tmp); \ + scc_get_datum(regs, ((chan)<<1),tmp); \ + } + +#define scc_read_reg(regs,chan,reg,val) { \ + scc_set_datum(regs, ((chan)<<1),reg); \ + scc_get_datum(regs, ((chan)<<1),val); \ + } + +#define scc_read_reg_zero(regs,chan,val) { \ + scc_get_datum(regs, ((chan)<<1),val); \ + } + +#define scc_write_reg(regs,chan,reg,val) { \ + scc_set_datum(regs, ((chan)<<1),reg); \ + scc_set_datum(regs, ((chan)<<1),val); \ + } + +#define scc_write_reg_zero(regs,chan,val) { \ + scc_set_datum(regs, ((chan)<<1),val); \ + } + +#define scc_read_data(regs,chan,val) { \ + scc_get_datum(regs, ((chan)<<1)+SCC_DATA_OFFSET,val); \ + } + +#define scc_write_data(regs,chan,val) { \ + scc_set_datum(regs, ((chan)<<1)+SCC_DATA_OFFSET,val); \ + } + + +/* + * Addressable registers + */ + +#define SCC_RR0 0 /* status register */ +#define SCC_RR1 1 /* special receive conditions */ +#define SCC_RR2 2 /* (modified) interrupt vector */ +#define SCC_RR3 3 /* interrupts pending (cha A only) */ +#define SCC_RR8 8 /* recv buffer (alias for data) */ +#define SCC_RR10 10 /* sdlc status */ +#define SCC_RR12 12 /* BRG constant, low part */ +#define SCC_RR13 13 /* BRG constant, high part */ +#define SCC_RR15 15 /* interrupts currently enabled */ + +#define SCC_WR0 0 /* reg select, and commands */ +#define SCC_WR1 1 /* interrupt and DMA enables */ +#define SCC_WR2 2 /* interrupt vector */ +#define SCC_WR3 3 /* receiver params and enables */ +#define SCC_WR4 4 /* clock/char/parity params */ +#define SCC_WR5 5 /* xmit params and enables */ +#define SCC_WR6 6 /* synchr SYNCH/address */ +#define SCC_WR7 7 /* synchr SYNCH/flag */ +#define SCC_WR8 8 /* xmit buffer (alias for data) */ +#define SCC_WR9 9 /* vectoring and resets */ +#define SCC_WR10 10 /* synchr params */ +#define SCC_WR11 11 /* clocking definitions */ +#define SCC_WR12 12 /* BRG constant, low part */ +#define SCC_WR13 13 /* BRG constant, high part */ +#define SCC_WR14 14 /* BRG enables and commands */ +#define SCC_WR15 15 /* interrupt enables */ + +/* + * Read registers defines + */ + +#define SCC_RR0_BREAK 0x80 /* break detected (rings twice), or */ +#define SCC_RR0_ABORT 0x80 /* abort (synchr) */ +#define SCC_RR0_TX_UNDERRUN 0x40 /* xmit buffer empty/end of message */ +#define SCC_RR0_CTS 0x20 /* clear-to-send pin active (sampled + only on intr and after RESI cmd */ +#define SCC_RR0_SYNCH 0x10 /* SYNCH found/still hunting */ +#define SCC_RR0_DCD 0x08 /* carrier-detect (same as CTS) */ +#define SCC_RR0_TX_EMPTY 0x04 /* xmit buffer empty */ +#define SCC_RR0_ZERO_COUNT 0x02 /* ? */ +#define SCC_RR0_RX_AVAIL 0x01 /* recv fifo not empty */ + +#define SCC_RR1_EOF 0x80 /* end-of-frame, SDLC mode */ +#define SCC_RR1_CRC_ERR 0x40 /* incorrect CRC or.. */ +#define SCC_RR1_FRAME_ERR 0x40 /* ..bad frame */ +#define SCC_RR1_RX_OVERRUN 0x20 /* rcv fifo overflow */ +#define SCC_RR1_PARITY_ERR 0x10 /* incorrect parity in data */ +#define SCC_RR1_RESIDUE0 0x08 +#define SCC_RR1_RESIDUE1 0x04 +#define SCC_RR1_RESIDUE2 0x02 +#define SCC_RR1_ALL_SENT 0x01 + +/* RR2 contains the interrupt vector unmodified (channel A) or + modified as follows (channel B, if vector-include-status) */ + +#define SCC_RR2_STATUS(val) ((val)&0xe) /* 11/7/95 used to be 0xf */ + +#define SCC_RR2_B_XMIT_DONE 0x0 +#define SCC_RR2_B_EXT_STATUS 0x2 +#define SCC_RR2_B_RECV_DONE 0x4 +#define SCC_RR2_B_RECV_SPECIAL 0x6 +#define SCC_RR2_A_XMIT_DONE 0x8 +#define SCC_RR2_A_EXT_STATUS 0xa +#define SCC_RR2_A_RECV_DONE 0xc +#define SCC_RR2_A_RECV_SPECIAL 0xe + +/* Interrupts pending, to be read from channel A only (B raz) */ +#define SCC_RR3_zero 0xc0 +#define SCC_RR3_RX_IP_A 0x20 +#define SCC_RR3_TX_IP_A 0x10 +#define SCC_RR3_EXT_IP_A 0x08 +#define SCC_RR3_RX_IP_B 0x04 +#define SCC_RR3_TX_IP_B 0x02 +#define SCC_RR3_EXT_IP_B 0x01 + +/* RR8 is the receive data buffer, a 3 deep FIFO */ +#define SCC_RECV_BUFFER SCC_RR8 +#define SCC_RECV_FIFO_DEEP 3 + +#define SCC_RR10_1CLKS 0x80 +#define SCC_RR10_2CLKS 0x40 +#define SCC_RR10_zero 0x2d +#define SCC_RR10_LOOP_SND 0x10 +#define SCC_RR10_ON_LOOP 0x02 + +/* RR12/RR13 hold the timing base, upper byte in RR13 */ + +#define scc_get_timing_base(scc,chan,val) { \ + register char tmp; \ + scc_read_reg(scc,chan,SCC_RR12,val);\ + scc_read_reg(scc,chan,SCC_RR13,tmp);\ + (val) = ((val)<<8)|(tmp&0xff);\ + } + +#define SCC_RR15_BREAK_IE 0x80 +#define SCC_RR15_TX_UNDERRUN_IE 0x40 +#define SCC_RR15_CTS_IE 0x20 +#define SCC_RR15_SYNCH_IE 0x10 +#define SCC_RR15_DCD_IE 0x08 +#define SCC_RR15_zero 0x05 +#define SCC_RR15_ZERO_COUNT_IE 0x02 + + +/* + * Write registers defines + */ + +/* WR0 is used for commands too */ +#define SCC_RESET_TXURUN_LATCH 0xc0 +#define SCC_RESET_TX_CRC 0x80 +#define SCC_RESET_RX_CRC 0x40 +#define SCC_RESET_HIGHEST_IUS 0x38 /* channel A only */ +#define SCC_RESET_ERROR 0x30 +#define SCC_RESET_TX_IP 0x28 +#define SCC_IE_NEXT_CHAR 0x20 +#define SCC_SEND_SDLC_ABORT 0x18 +#define SCC_RESET_EXT_IP 0x10 + +#define SCC_WR1_DMA_ENABLE 0x80 /* dma control */ +#define SCC_WR1_DMA_MODE 0x40 /* drive ~req for DMA controller */ +#define SCC_WR1_DMA_RECV_DATA 0x20 /* from wire to host memory */ + /* interrupt enable/conditions */ +#define SCC_WR1_RXI_SPECIAL_O 0x18 /* on special only */ +#define SCC_WR1_RXI_ALL_CHAR 0x10 /* on each char, or special */ +#define SCC_WR1_RXI_FIRST_CHAR 0x08 /* on first char, or special */ +#define SCC_WR1_RXI_DISABLE 0x00 /* never on recv */ +#define SCC_WR1_PARITY_IE 0x04 /* on parity errors */ +#define SCC_WR1_TX_IE 0x02 +#define SCC_WR1_EXT_IE 0x01 + +/* WR2 is common and contains the interrupt vector (high nibble) */ + +#define SCC_WR3_RX_8_BITS 0xc0 +#define SCC_WR3_RX_6_BITS 0x80 +#define SCC_WR3_RX_7_BITS 0x40 +#define SCC_WR3_RX_5_BITS 0x00 +#define SCC_WR3_AUTO_ENABLE 0x20 +#define SCC_WR3_HUNT_MODE 0x10 +#define SCC_WR3_RX_CRC_ENABLE 0x08 +#define SCC_WR3_SDLC_SRCH 0x04 +#define SCC_WR3_INHIBIT_SYNCH 0x02 +#define SCC_WR3_RX_ENABLE 0x01 + +/* Should be re-written after reset */ +#define SCC_WR4_CLK_x64 0xc0 /* clock divide factor */ +#define SCC_WR4_CLK_x32 0x80 +#define SCC_WR4_CLK_x16 0x40 +#define SCC_WR4_CLK_x1 0x00 +#define SCC_WR4_EXT_SYNCH_MODE 0x30 /* synch modes */ +#define SCC_WR4_SDLC_MODE 0x20 +#define SCC_WR4_16BIT_SYNCH 0x10 +#define SCC_WR4_8BIT_SYNCH 0x00 +#define SCC_WR4_2_STOP 0x0c /* asynch modes */ +#define SCC_WR4_1_5_STOP 0x08 +#define SCC_WR4_1_STOP 0x04 +#define SCC_WR4_SYNCH_MODE 0x00 +#define SCC_WR4_EVEN_PARITY 0x02 +#define SCC_WR4_PARITY_ENABLE 0x01 + +#define SCC_WR5_DTR 0x80 /* drive DTR pin */ +#define SCC_WR5_TX_8_BITS 0x60 +#define SCC_WR5_TX_6_BITS 0x40 +#define SCC_WR5_TX_7_BITS 0x20 +#define SCC_WR5_TX_5_BITS 0x00 +#define SCC_WR5_SEND_BREAK 0x10 +#define SCC_WR5_TX_ENABLE 0x08 +#define SCC_WR5_CRC_16 0x04 /* CRC if non zero, .. */ +#define SCC_WR5_SDLC 0x00 /* ..SDLC otherwise */ +#define SCC_WR5_RTS 0x02 /* drive RTS pin */ +#define SCC_WR5_TX_CRC_ENABLE 0x01 + +/* Registers WR6 and WR7 are for synch modes data, with among other things: */ + +#define SCC_WR6_BISYNCH_12 0x0f +#define SCC_WR6_SDLC_RANGE_MASK 0x0f +#define SCC_WR7_SDLC_FLAG 0x7e + +/* Register WR7' (prime) controls some ESCC features */ +#define SCC_WR7P_RX_FIFO 0x08 /* Enable interrupt on FIFO 1/2 full */ + +/* WR8 is the transmit data buffer (no FIFO) */ +#define SCC_XMT_BUFFER SCC_WR8 + +#define SCC_WR9_HW_RESET 0xc0 /* force hardware reset */ +#define SCC_WR9_RESET_CHA_A 0x80 +#define SCC_WR9_RESET_CHA_B 0x40 +#define SCC_WR9_NON_VECTORED 0x20 /* mbz for Zilog chip */ +#define SCC_WR9_STATUS_HIGH 0x10 +#define SCC_WR9_MASTER_IE 0x08 +#define SCC_WR9_DLC 0x04 /* disable-lower-chain */ +#define SCC_WR9_NV 0x02 /* no vector */ +#define SCC_WR9_VIS 0x01 /* vector-includes-status */ + +#define SCC_WR10_CRC_PRESET 0x80 +#define SCC_WR10_FM0 0x60 +#define SCC_WR10_FM1 0x40 +#define SCC_WR10_NRZI 0x20 +#define SCC_WR10_NRZ 0x00 +#define SCC_WR10_ACTIVE_ON_POLL 0x10 +#define SCC_WR10_MARK_IDLE 0x08 /* flag if zero */ +#define SCC_WR10_ABORT_ON_URUN 0x04 /* flag if zero */ +#define SCC_WR10_LOOP_MODE 0x02 +#define SCC_WR10_6BIT_SYNCH 0x01 +#define SCC_WR10_8BIT_SYNCH 0x00 + +#define SCC_WR11_RTxC_XTAL 0x80 /* RTxC pin is input (ext oscill) */ +#define SCC_WR11_RCLK_DPLL 0x60 /* clock received data on dpll */ +#define SCC_WR11_RCLK_BAUDR 0x40 /* .. on BRG */ +#define SCC_WR11_RCLK_TRc_PIN 0x20 /* .. on TRxC pin */ +#define SCC_WR11_RCLK_RTc_PIN 0x00 /* .. on RTxC pin */ +#define SCC_WR11_XTLK_DPLL 0x18 +#define SCC_WR11_XTLK_BAUDR 0x10 +#define SCC_WR11_XTLK_TRc_PIN 0x08 +#define SCC_WR11_XTLK_RTc_PIN 0x00 +#define SCC_WR11_TRc_OUT 0x04 /* drive TRxC pin as output from..*/ +#define SCC_WR11_TRcOUT_DPLL 0x03 /* .. the dpll */ +#define SCC_WR11_TRcOUT_BAUDR 0x02 /* .. the BRG */ +#define SCC_WR11_TRcOUT_XMTCLK 0x01 /* .. the xmit clock */ +#define SCC_WR11_TRcOUT_XTAL 0x00 /* .. the external oscillator */ + +/* WR12/WR13 are for timing base preset */ +#define scc_set_timing_base(scc,chan,val) { \ + scc_write_reg(scc,chan,SCC_RR12,val);\ + scc_write_reg(scc,chan,SCC_RR13,(val)>>8);\ + } + +/* More commands in this register */ +#define SCC_WR14_NRZI_MODE 0xe0 /* synch modulations */ +#define SCC_WR14_FM_MODE 0xc0 +#define SCC_WR14_RTc_SOURCE 0xa0 /* clock is from pin .. */ +#define SCC_WR14_BAUDR_SOURCE 0x80 /* .. or internal BRG */ +#define SCC_WR14_DISABLE_DPLL 0x60 +#define SCC_WR14_RESET_CLKMISS 0x40 +#define SCC_WR14_SEARCH_MODE 0x20 +/* ..and more bitsy */ +#define SCC_WR14_LOCAL_LOOPB 0x10 +#define SCC_WR14_AUTO_ECHO 0x08 +#define SCC_WR14_DTR_REQUEST 0x04 +#define SCC_WR14_BAUDR_SRC 0x02 +#define SCC_WR14_BAUDR_ENABLE 0x01 + +#define SCC_WR15_BREAK_IE 0x80 +#define SCC_WR15_TX_UNDERRUN_IE 0x40 +#define SCC_WR15_CTS_IE 0x20 +#define SCC_WR15_SYNCHUNT_IE 0x10 +#define SCC_WR15_DCD_IE 0x08 +#define SCC_WR15_zero 0x05 +#define SCC_WR15_ZERO_COUNT_IE 0x02 +#define SCC_WR15_ENABLE_ESCC 0x01 /* Enable some ESCC registers */ + +#define NSCC_LINE 2 /* How many lines are support per 8530 */ +/* + * Driver status + */ + +#define SCC_FLAGS_DMA_PAUSED 0x00001 /* DMA has been paused because of XON/XOFF */ +#define SCC_FLAGS_DMA_TX_BUSY 0x00002 /* On going DMA operation.. */ + +struct scc_softreg { + unsigned char wr1; + unsigned char wr4; + unsigned char wr5; + unsigned char wr14; + + unsigned long speed; + unsigned long flags; + unsigned long dma_flags; +}; + + +struct scc_softc { + scc_regmap_t regs; + struct scc_dma_ops *dma_ops; + + /* software copy of some write regs, for reg |= */ + struct scc_softreg softr[NSCC_LINE]; + + int flags; + int modem[NSCC_LINE]; /* Mach modem bits (TM_DTR etc). */ + int dcd_timer[NSCC_LINE]; + int dma_initted; + + char polling_mode; + char probed_once; + + boolean_t full_modem; +}; + +#define DCD_TIMEOUT 4 + +typedef struct scc_softc *scc_softc_t; +extern struct scc_softc scc_softc[]; + +#endif /*_SCC_8530_H_*/ diff --git a/osfmk/ppc/POWERMAC/serial_io.c b/osfmk/ppc/POWERMAC/serial_io.c new file mode 100644 index 000000000..498bd3755 --- /dev/null +++ b/osfmk/ppc/POWERMAC/serial_io.c @@ -0,0 +1,632 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: scc_8530_hdw.c + * Author: Alessandro Forin, Carnegie Mellon University + * Date: 6/91 + * + * Hardware-level operations for the SCC Serial Line Driver + */ + +#define NSCC 1 /* Number of serial chips, two ports per chip. */ +#if NSCC > 0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_KDB +#include +#endif /* MACH_KDB */ + +#define kdebug_state() (1) +#define delay(x) { volatile int _d_; for (_d_ = 0; _d_ < (10000*x); _d_++) ; } + +#define NSCC_LINE 2 /* 2 ttys per chip */ + +#define SCC_DMA_TRANSFERS 0 + +struct scc_tty scc_tty[NSCC_LINE]; + +#define scc_tty_for(chan) (&scc_tty[chan]) +/* #define scc_unit(dev_no) (dev_no) */ + +#define scc_dev_no(chan) ((chan)^0x01) +#define scc_chan(dev_no) ((dev_no)^0x01) + +int serial_initted = 0; +unsigned int scc_parm_done = 0; /* (TEST/DEBUG) */ + +static struct scc_byte { + unsigned char reg; + unsigned char val; +} scc_init_hw[] = { + + 9, 0x80, + 4, 0x44, + 3, 0xC0, + 5, 0xE2, + 2, 0x00, + 10, 0x00, + 11, 0x50, + 12, 0x0A, + 13, 0x00, + 3, 0xC1, + 5, 0xEA, + 14, 0x01, + 15, 0x00, + 0, 0x10, + 0, 0x10, +#if 0 + 1, 0x12, /* int or Rx, Tx int enable */ +#else + 1, 0x10, /* int or Rx, no Tx int enable */ +#endif + 9, 0x0A +}; + +static int scc_init_hw_count = sizeof(scc_init_hw)/sizeof(scc_init_hw[0]); + +enum scc_error {SCC_ERR_NONE, SCC_ERR_PARITY, SCC_ERR_BREAK, SCC_ERR_OVERRUN}; + + +/* + * BRG formula is: + * ClockFrequency (115200 for Power Mac) + * BRGconstant = --------------------------- - 2 + * BaudRate + */ + +#define SERIAL_CLOCK_FREQUENCY (115200*2) /* Power Mac value */ +#define convert_baud_rate(rate) ((((SERIAL_CLOCK_FREQUENCY) + (rate)) / (2 * (rate))) - 2) + +#define DEFAULT_SPEED 38400 +#define DEFAULT_FLAGS (TF_LITOUT|TF_ECHO) + +int scc_param(struct scc_tty *tp); + + +struct scc_softc scc_softc[NSCC]; +caddr_t scc_std[NSCC] = { (caddr_t) 0}; + + +#define SCC_RR1_ERRS (SCC_RR1_FRAME_ERR|SCC_RR1_RX_OVERRUN|SCC_RR1_PARITY_ERR) +#define SCC_RR3_ALL (SCC_RR3_RX_IP_A|SCC_RR3_TX_IP_A|SCC_RR3_EXT_IP_A|\ + SCC_RR3_RX_IP_B|SCC_RR3_TX_IP_B|SCC_RR3_EXT_IP_B) + +#define DEBUG_SCC +#undef DEBUG_SCC + +#ifdef DEBUG_SCC +static int total_chars, total_ints, total_overruns, total_errors, num_ints, max_chars; +static int chars_received[8]; +static int __SCC_STATS = 0; +static int max_in_q = 0; +static int max_out_q = 0; +#endif + +DECL_FUNNEL(, scc_funnel) /* funnel to serialize the SCC driver */ +boolean_t scc_funnel_initted = FALSE; +#define SCC_FUNNEL scc_funnel +#define SCC_FUNNEL_INITTED scc_funnel_initted + + +/* + * Adapt/Probe/Attach functions + */ +boolean_t scc_uses_modem_control = FALSE;/* patch this with adb */ +decl_simple_lock_data(,scc_stomp) /* (TEST/DEBUG) */ + +/* This is called VERY early on in the init and therefore has to have + * hardcoded addresses of the serial hardware control registers. The + * serial line may be needed for console and debugging output before + * anything else takes place + */ + +void +initialize_serial( caddr_t scc_phys_base ) +{ + int i, chan, bits; + scc_regmap_t regs; + DECL_FUNNEL_VARS + + assert( scc_phys_base ); + + if (!SCC_FUNNEL_INITTED) { + FUNNEL_INIT(&SCC_FUNNEL, master_processor); + SCC_FUNNEL_INITTED = TRUE; + } + FUNNEL_ENTER(&SCC_FUNNEL); + + if (serial_initted) { + FUNNEL_EXIT(&SCC_FUNNEL); + return; + } + + simple_lock_init(&scc_stomp, FALSE); /* (TEST/DEBUG) */ + + scc_softc[0].full_modem = TRUE; + + scc_std[0] = scc_phys_base; + + regs = scc_softc[0].regs = (scc_regmap_t)scc_std[0]; + + for (chan = 0; chan < NSCC_LINE; chan++) { + if (chan == 1) + scc_init_hw[0].val = 0x80; + + for (i = 0; i < scc_init_hw_count; i++) { + scc_write_reg(regs, chan, + scc_init_hw[i].reg, scc_init_hw[i].val); + } + } + + /* Call probe so we are ready very early for remote gdb and for serial + console output if appropriate. */ + if (scc_probe()) { + for (i = 0; i < NSCC_LINE; i++) { + scc_softc[0].softr[i].wr5 = SCC_WR5_DTR | SCC_WR5_RTS; + scc_param(scc_tty_for(i)); + /* Enable SCC interrupts (how many interrupts are to this thing?!?) */ + scc_write_reg(regs, i, 9, SCC_WR9_NV); + + scc_read_reg_zero(regs, 0, bits);/* Clear the status */ + } + scc_parm_done = 1; /* (TEST/DEBUG) */ + } + + serial_initted = TRUE; + + FUNNEL_EXIT(&SCC_FUNNEL); + return; +} + +int +scc_probe(void) +{ + scc_softc_t scc; + register int val, i; + register scc_regmap_t regs; + spl_t s; + DECL_FUNNEL_VARS + + if (!SCC_FUNNEL_INITTED) { + FUNNEL_INIT(&SCC_FUNNEL, master_processor); + SCC_FUNNEL_INITTED = TRUE; + } + FUNNEL_ENTER(&SCC_FUNNEL); + + /* Readjust the I/O address to handling + * new memory mappings. + */ + +// scc_std[0] = POWERMAC_IO(scc_std[0]); + + regs = (scc_regmap_t)scc_std[0]; + + if (regs == (scc_regmap_t) 0) { + FUNNEL_EXIT(&SCC_FUNNEL); + return 0; + } + + scc = &scc_softc[0]; + scc->regs = regs; + + s = splhigh(); + + for (i = 0; i < NSCC_LINE; i++) { + register struct scc_tty *tp; + tp = scc_tty_for(i); + tp->t_addr = (char*)(0x80000000L + (i&1)); + /* Set default values. These will be overridden on + open but are needed if the port will be used + independently of the Mach interfaces, e.g., for + gdb or for a serial console. */ + tp->t_ispeed = DEFAULT_SPEED; + tp->t_ospeed = DEFAULT_SPEED; + tp->t_flags = DEFAULT_FLAGS; + scc->softr[i].speed = -1; + + /* do min buffering */ + tp->t_state |= TS_MIN; + + tp->t_dev = scc_dev_no(i); + } + + splx(s); + + FUNNEL_EXIT(&SCC_FUNNEL); + return 1; +} + +/* + * Get a char from a specific SCC line + * [this is only used for console&screen purposes] + * must be splhigh since it may be called from another routine under spl + */ + +int +scc_getc(int unit, int line, boolean_t wait, boolean_t raw) +{ + register scc_regmap_t regs; + unsigned char c, value; + int rcvalue, from_line; + spl_t s = splhigh(); + DECL_FUNNEL_VARS + + FUNNEL_ENTER(&SCC_FUNNEL); + + simple_lock(&scc_stomp); /* (TEST/DEBUG) */ + regs = scc_softc[0].regs; + + /* + * wait till something available + * + */ +again: + rcvalue = 0; + while (1) { + scc_read_reg_zero(regs, line, value); + + if (value & SCC_RR0_RX_AVAIL) + break; + + if (!wait) { + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + splx(s); + FUNNEL_EXIT(&SCC_FUNNEL); + return -1; + } + } + + /* + * if nothing found return -1 + */ + + scc_read_reg(regs, line, SCC_RR1, value); + scc_read_data(regs, line, c); + +#if MACH_KDB + if (console_is_serial() && + c == ('_' & 0x1f)) { + /* Drop into the debugger */ + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + Debugger("Serial Line Request"); + simple_lock(&scc_stomp); /* (TEST/DEBUG) */ + scc_write_reg(regs, line, SCC_RR0, SCC_RESET_HIGHEST_IUS); + if (wait) { + goto again; + } + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + splx(s); + FUNNEL_EXIT(&SCC_FUNNEL); + return -1; + } +#endif /* MACH_KDB */ + + /* + * bad chars not ok + */ + if (value&(SCC_RR1_PARITY_ERR | SCC_RR1_RX_OVERRUN | SCC_RR1_FRAME_ERR)) { + scc_write_reg(regs, line, SCC_RR0, SCC_RESET_ERROR); + + if (wait) { + scc_write_reg(regs, line, SCC_RR0, SCC_RESET_HIGHEST_IUS); + goto again; + } + } + + scc_write_reg(regs, line, SCC_RR0, SCC_RESET_HIGHEST_IUS); + + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + splx(s); + + FUNNEL_EXIT(&SCC_FUNNEL); + return c; +} + +/* + * Put a char on a specific SCC line + * use splhigh since we might be doing a printf in high spl'd code + */ + +int +scc_putc(int unit, int line, int c) +{ + scc_regmap_t regs; + spl_t s = splhigh(); + unsigned char value; + DECL_FUNNEL_VARS + + FUNNEL_ENTER(&SCC_FUNNEL); + simple_lock(&scc_stomp); /* (TEST/DEBUG) */ + + regs = scc_softc[0].regs; + + do { + scc_read_reg(regs, line, SCC_RR0, value); + if (value & SCC_RR0_TX_EMPTY) + break; + delay(1); + } while (1); + + scc_write_data(regs, line, c); +/* wait for it to swallow the char ? */ + + do { + scc_read_reg(regs, line, SCC_RR0, value); + if (value & SCC_RR0_TX_EMPTY) + break; + } while (1); + scc_write_reg(regs, line, SCC_RR0, SCC_RESET_HIGHEST_IUS); + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + + splx(s); + + FUNNEL_EXIT(&SCC_FUNNEL); + return 0; +} + + +void +powermac_scc_set_datum(scc_regmap_t regs, unsigned int offset, unsigned char value) +{ + volatile unsigned char *address = (unsigned char *) regs + offset; + + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); + + *address = value; + eieio(); + + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); +} + +unsigned char +powermac_scc_get_datum(scc_regmap_t regs, unsigned int offset) +{ + volatile unsigned char *address = (unsigned char *) regs + offset; + unsigned char value; + + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); + + value = *address; eieio(); + return value; + + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); +} + +int +scc_param(struct scc_tty *tp) +{ + scc_regmap_t regs; + unsigned char value; + unsigned short speed_value; + int bits, chan; + spl_t s; + struct scc_softreg *sr; + scc_softc_t scc; + + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); + + s = splhigh(); + simple_lock(&scc_stomp); /* (TEST/DEBUG) */ + + chan = scc_chan(tp->t_dev); + scc = &scc_softc[0]; + regs = scc->regs; + + sr = &scc->softr[chan]; + + /* Do a quick check to see if the hardware needs to change */ + if ((sr->flags & (TF_ODDP|TF_EVENP)) == (tp->t_flags & (TF_ODDP|TF_EVENP)) + && sr->speed == tp->t_ispeed) { + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + splx(s); /* (TEST/DEBUG) */ + return 0; /* (TEST/DEBUG) */ + } + + if(scc_parm_done) { + + scc_write_reg(regs, chan, 3, SCC_WR3_RX_8_BITS|SCC_WR3_RX_ENABLE); /* (TEST/DEBUG) */ + sr->wr1 = SCC_WR1_RXI_FIRST_CHAR | SCC_WR1_EXT_IE; /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 1, sr->wr1); /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 15, SCC_WR15_ENABLE_ESCC); /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 7, SCC_WR7P_RX_FIFO); /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 0, SCC_IE_NEXT_CHAR); /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 0, SCC_RESET_EXT_IP); /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 0, SCC_RESET_EXT_IP); /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 9, SCC_WR9_MASTER_IE|SCC_WR9_NV); /* (TEST/DEBUG) */ + scc_read_reg_zero(regs, 0, bits); /* (TEST/DEBUG) */ + sr->wr1 = SCC_WR1_RXI_FIRST_CHAR | SCC_WR1_EXT_IE; /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 1, sr->wr1); /* (TEST/DEBUG) */ + scc_write_reg(regs, chan, 0, SCC_IE_NEXT_CHAR); /* (TEST/DEBUG) */ + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + splx(s); /* (TEST/DEBUG) */ + return 0; /* (TEST/DEBUG) */ + } + + sr->flags = tp->t_flags; + sr->speed = tp->t_ispeed; + + + if (tp->t_ispeed == 0) { + sr->wr5 &= ~SCC_WR5_DTR; + scc_write_reg(regs, chan, 5, sr->wr5); + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + splx(s); + + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); + return 0; + } + + +#if SCC_DMA_TRANSFERS + if (scc->dma_initted & (1<dma_ops->scc_dma_reset_rx(chan); +#endif + + value = SCC_WR4_1_STOP; + + /* + * For 115K the clocking divide changes to 64.. to 230K will + * start at the normal clock divide 16. + * + * However, both speeds will pull from a different clocking + * source + */ + + if (tp->t_ispeed == 115200) + value |= SCC_WR4_CLK_x32; + else + value |= SCC_WR4_CLK_x16 ; + + /* .. and parity */ + if ((tp->t_flags & (TF_ODDP | TF_EVENP)) == TF_EVENP) + value |= (SCC_WR4_EVEN_PARITY | SCC_WR4_PARITY_ENABLE); + else if ((tp->t_flags & (TF_ODDP | TF_EVENP)) == TF_ODDP) + value |= SCC_WR4_PARITY_ENABLE; + + /* set it now, remember it must be first after reset */ + sr->wr4 = value; + + /* Program Parity, and Stop bits */ + scc_write_reg(regs, chan, 4, sr->wr4); + + /* Setup for 8 bits */ + scc_write_reg(regs, chan, 3, SCC_WR3_RX_8_BITS); + + // Set DTR, RTS, and transmitter bits/character. + sr->wr5 = SCC_WR5_TX_8_BITS | SCC_WR5_RTS | SCC_WR5_DTR; + + scc_write_reg(regs, chan, 5, sr->wr5); + + scc_write_reg(regs, chan, 14, 0); /* Disable baud rate */ + + /* Setup baud rate 57.6Kbps, 115K, 230K should all yeild + * a converted baud rate of zero + */ + speed_value = convert_baud_rate(tp->t_ispeed); + + if (speed_value == 0xffff) + speed_value = 0; + + scc_set_timing_base(regs, chan, speed_value); + + if (tp->t_ispeed == 115200 || tp->t_ispeed == 230400) { + /* Special case here.. change the clock source*/ + scc_write_reg(regs, chan, 11, 0); + /* Baud rate generator is disabled.. */ + } else { + scc_write_reg(regs, chan, 11, SCC_WR11_RCLK_BAUDR|SCC_WR11_XTLK_BAUDR); + /* Enable the baud rate generator */ + scc_write_reg(regs, chan, 14, SCC_WR14_BAUDR_ENABLE); + } + + + scc_write_reg(regs, chan, 3, SCC_WR3_RX_8_BITS|SCC_WR3_RX_ENABLE); + + + sr->wr1 = SCC_WR1_RXI_FIRST_CHAR | SCC_WR1_EXT_IE; + scc_write_reg(regs, chan, 1, sr->wr1); + scc_write_reg(regs, chan, 15, SCC_WR15_ENABLE_ESCC); + scc_write_reg(regs, chan, 7, SCC_WR7P_RX_FIFO); + scc_write_reg(regs, chan, 0, SCC_IE_NEXT_CHAR); + + + /* Clear out any pending external or status interrupts */ + scc_write_reg(regs, chan, 0, SCC_RESET_EXT_IP); + scc_write_reg(regs, chan, 0, SCC_RESET_EXT_IP); + //scc_write_reg(regs, chan, 0, SCC_RESET_ERROR); + + /* Enable SCC interrupts (how many interrupts are to this thing?!?) */ + scc_write_reg(regs, chan, 9, SCC_WR9_MASTER_IE|SCC_WR9_NV); + + scc_read_reg_zero(regs, 0, bits);/* Clear the status */ + +#if SCC_DMA_TRANSFERS + if (scc->dma_initted & (1<dma_ops->scc_dma_start_rx(chan); + scc->dma_ops->scc_dma_setup_8530(chan); + } else +#endif + { + sr->wr1 = SCC_WR1_RXI_FIRST_CHAR | SCC_WR1_EXT_IE; + scc_write_reg(regs, chan, 1, sr->wr1); + scc_write_reg(regs, chan, 0, SCC_IE_NEXT_CHAR); + } + + sr->wr5 |= SCC_WR5_TX_ENABLE; + scc_write_reg(regs, chan, 5, sr->wr5); + + simple_unlock(&scc_stomp); /* (TEST/DEBUG) */ + splx(s); + + assert(FUNNEL_IN_USE(&SCC_FUNNEL)); + return 0; + +} + +#endif /* NSCC > 0 */ diff --git a/osfmk/ppc/POWERMAC/serial_io.h b/osfmk/ppc/POWERMAC/serial_io.h new file mode 100644 index 000000000..a1f9979d9 --- /dev/null +++ b/osfmk/ppc/POWERMAC/serial_io.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + + +#include +#include + +/* + * Console is on the Printer Port (chip channel 0) + * Debugger is on the Modem Port (chip channel 1) + */ + +#define CONSOLE_PORT 1 + +struct scc_tty { + char * t_addr; /* device pointer */ + int t_dev; /* device number */ + int t_ispeed; /* input speed */ + int t_ospeed; /* output speed */ + char t_breakc; /* character to deliver when 'break' + condition received */ + int t_flags; /* mode flags */ + int t_state; /* current state */ + int t_line; /* fake line discipline number, + for old drivers - always 0 */ + int t_outofband; /* current out-of-band events */ + int t_outofbandarg; /* arg to first out-of-band event */ + int t_nquoted; /* number of quoted chars in inq */ + int t_hiwater; /* baud-rate limited high water mark */ + int t_lowater; /* baud-rate limited low water mark */ +}; +typedef struct scc_tty *scc_tty_t; + +/* + * function declarations for performing serial i/o + * other functions below are declared in kern/misc_protos.h + * cnputc, cngetc, cnmaygetc + */ + +void initialize_serial(caddr_t scc_phys_base); + +extern int scc_probe(void); + +#if 0 +extern int scc_open( + dev_t dev, + dev_mode_t flag, + io_req_t ior); + +extern void scc_close( + dev_t dev); + +extern int scc_read( + dev_t dev, + io_req_t ior); + +extern io_return_t scc_write( + dev_t dev, + io_req_t ior); + +extern io_return_t scc_get_status( + dev_t dev, + dev_flavor_t flavor, + dev_status_t data, + mach_msg_type_number_t *status_count); + +extern io_return_t scc_set_status( + dev_t dev, + dev_flavor_t flavor, + dev_status_t data, + mach_msg_type_number_t status_count); + +extern boolean_t scc_portdeath( + dev_t dev, + ipc_port_t port); + +#endif /* 0 */ + +extern int scc_putc( + int unit, + int line, + int c); + +extern int scc_getc( + int unit, + int line, + boolean_t wait, + boolean_t raw); + +/* Functions in serial_console.c for switching between serial and video + consoles. */ +extern boolean_t console_is_serial(void); +extern int switch_to_serial_console( + void); + +extern int switch_to_video_console( + void); + +extern void switch_to_old_console( + int old_console); + + +/* + * JMM - We are not really going to support this driver in SMP (barely + * support it now - so just pick up the stubbed out versions. + */ +#define DECL_FUNNEL(class,f) +#define DECL_FUNNEL_VARS +#define FUNNEL_INIT(f,p) +#define FUNNEL_ENTER(f) +#define FUNNEL_EXIT(f) +#define FUNNEL_ESCAPE(f) (1) +#define FUNNEL_REENTER(f,count) +#define FUNNEL_IN_USE(f) (TRUE) + +/* + * Flags + */ +#define TF_ODDP 0x00000002 /* get/send odd parity */ +#define TF_EVENP 0x00000004 /* get/send even parity */ +#define TF_ANYP (TF_ODDP|TF_EVENP) + /* get any parity/send none */ +#define TF_LITOUT 0x00000008 /* output all 8 bits + otherwise, characters >= 0x80 + are time delays XXX */ +#define TF_ECHO 0x00000080 /* device wants user to echo input */ +#define TS_MIN 0x00004000 /* buffer input chars, if possible */ diff --git a/osfmk/ppc/POWERMAC/video_console.c b/osfmk/ppc/POWERMAC/video_console.c new file mode 100644 index 000000000..2ed27aeec --- /dev/null +++ b/osfmk/ppc/POWERMAC/video_console.c @@ -0,0 +1,2223 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* MACH PPC - video_console.c + * + * Original based on NetBSD's mac68k/dev/ite.c driver + * + * This driver differs in + * - MACH driver"ized" + * - Uses phys_copy and flush_cache to in several places + * for performance optimizations + * - 7x15 font + * - Black background and white (character) foreground + * - Assumes 6100/7100/8100 class of machine + * + * The original header follows... + * + * + * NetBSD: ite.c,v 1.16 1995/07/17 01:24:34 briggs Exp + * + * Copyright (c) 1988 University of Utah. + * Copyright (c) 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: Utah $Hdr: ite.c 1.28 92/12/20$ + * + * @(#)ite.c 8.2 (Berkeley) 1/12/94 + */ + +/* + * ite.c + * + * The ite module handles the system console; that is, stuff printed + * by the kernel and by user programs while "desktop" and X aren't + * running. Some (very small) parts are based on hp300's 4.4 ite.c, + * hence the above copyright. + * + * -- Brad and Lawrence, June 26th, 1994 + * + */ + +#include + +#include +#include +#include /* spl definitions */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define FAST_JUMP_SCROLL + +#define CHARWIDTH 8 +#define CHARHEIGHT 16 + +#define ATTR_NONE 0 +#define ATTR_BOLD 1 +#define ATTR_UNDER 2 +#define ATTR_REVERSE 4 + +enum vt100state_e { + ESnormal, /* Nothing yet */ + ESesc, /* Got ESC */ + ESsquare, /* Got ESC [ */ + ESgetpars, /* About to get or getting the parameters */ + ESgotpars, /* Finished getting the parameters */ + ESfunckey, /* Function key */ + EShash, /* DEC-specific stuff (screen align, etc.) */ + ESsetG0, /* Specify the G0 character set */ + ESsetG1, /* Specify the G1 character set */ + ESask, + EScharsize, + ESignore /* Ignore this sequence */ +} vt100state = ESnormal; + +struct vc_info vinfo; + +/* Calculated in vccninit(): */ +static int vc_wrap_mode = 1, vc_relative_origin = 0; +static int vc_charset_select = 0, vc_save_charset_s = 0; +static int vc_charset[2] = { 0, 0 }; +static int vc_charset_save[2] = { 0, 0 }; + +/* VT100 state: */ +#define MAXPARS 16 +static int x = 0, y = 0, savex, savey; +static int par[MAXPARS], numpars, hanging_cursor, attr, saveattr; + +/* VT100 tab stops & scroll region */ +static char tab_stops[255]; +static int scrreg_top, scrreg_bottom; + +/* Misc */ +void vc_initialize(void); +void vc_flush_forward_buffer(void); +void vc_store_char(unsigned char); + +void vcattach(void); + + +/* + * For the color support (Michel Pollet) + */ +unsigned char vc_color_index_table[33] = + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }; + +unsigned long vc_color_depth_masks[4] = + { 0x000000FF, 0x00007FFF, 0x00FFFFFF }; + +unsigned long vc_colors[8][3] = { + { 0xFFFFFFFF, 0x00000000, 0x00000000 }, /* black */ + { 0x23232323, 0x7C007C00, 0x00FF0000 }, /* red */ + { 0xb9b9b9b9, 0x03e003e0, 0x0000FF00 }, /* green */ + { 0x05050505, 0x7FE07FE0, 0x00FFFF00 }, /* yellow */ + { 0xd2d2d2d2, 0x001f001f, 0x000000FF}, /* blue */ +// { 0x80808080, 0x31933193, 0x00666699 }, /* blue */ + { 0x18181818, 0x7C1F7C1F, 0x00FF00FF }, /* magenta */ + { 0xb4b4b4b4, 0x03FF03FF, 0x0000FFFF }, /* cyan */ + { 0x00000000, 0x7FFF7FFF, 0x00FFFFFF } /* white */ +}; + +unsigned long vc_color_mask = 0; +unsigned long vc_color_fore = 0; +unsigned long vc_color_back = 0; +int vc_normal_background = 1; + + +/* + * For the jump scroll and buffering (Michel Pollet) + * 80*22 means on a 80*24 screen, the screen will + * scroll jump almost a full screen + * keeping only what's necessary for you to be able to read ;-) + */ +#define VC_MAX_FORWARD_SIZE (100*36) + +/* + * Delay between console updates in clock hz units, the larger the + * delay the fuller the jump-scroll buffer will be and so the faster the + * (scrolling) output. The smaller the delay, the less jerky the + * display. Heuristics show that at 10 touch-typists (Mike!) complain + */ +#define VC_CONSOLE_UPDATE_TIMEOUT 5 + +static unsigned char vc_forward_buffer[VC_MAX_FORWARD_SIZE]; +static long vc_forward_buffer_size = 0; +static int vc_forward_buffer_enabled = 0; +static int vc_forward_buffer_busy = 0; +decl_simple_lock_data(,vc_forward_lock) + +#ifdef FAST_JUMP_SCROLL +static void (*vc_forward_paintchar) (unsigned char c, int x, int y, int attrs); +static enum { + PFoff, + PFwind, + PFscroll, + PFunwind +} vc_forward_preflight_mode = PFoff; +static struct { + enum vt100state_e vt100state; + + int vc_wrap_mode, vc_relative_origin; + int vc_charset_select, vc_save_charset_s; + int vc_charset[2]; + int vc_charset_save[2]; + + int x, y, savex, savey; + int par[MAXPARS], numpars, hanging_cursor, attr, saveattr; + + char tab_stops[255]; + int scrreg_top, scrreg_bottom; + + unsigned long vc_color_fore; + unsigned long vc_color_back; +} vc_forward_preflight_save; +static int vc_forward_scroll = 0; +#endif FAST_JUMP_SCROLL + +/* + * New Rendering code from Michel Pollet + */ + +/* That function will be called for drawing */ +static void (*vc_paintchar) (unsigned char c, int x, int y, int attrs); + +#ifdef RENDERALLOCATE +unsigned char *renderedFont = NULL; /* rendered font buffer */ +#else +#define REN_MAX_DEPTH 32 +/* that's the size for a 32 bits buffer... */ +#define REN_MAX_SIZE (128L*1024) +unsigned char renderedFont[REN_MAX_SIZE]; +#endif + +/* Rendered Font Size */ +unsigned long vc_rendered_font_size = REN_MAX_SIZE; +long vc_rendered_error = 0; + +/* If the one bit table was reversed */ +short vc_one_bit_reversed = 0; + +/* Size of a character in the table (bytes) */ +int vc_rendered_char_size = 0; + +/* +# Attribute codes: +# 00=none 01=bold 04=underscore 05=blink 07=reverse 08=concealed +# Text color codes: +# 30=black 31=red 32=green 33=yellow 34=blue 35=magenta 36=cyan 37=white +# Background color codes: +# 40=black 41=red 42=green 43=yellow 44=blue 45=magenta 46=cyan 47=white +*/ + +#define VC_RESET_BACKGROUND 40 +#define VC_RESET_FOREGROUND 37 + +static void vc_color_set(int color) +{ + if (vinfo.v_depth < 8) + return; + if (color >= 30 && color <= 37) + vc_color_fore = vc_colors[color-30][vc_color_index_table[vinfo.v_depth]]; + if (color >= 40 && color <= 47) { + vc_color_back = vc_colors[color-40][vc_color_index_table[vinfo.v_depth]]; + vc_normal_background = color == 40; + } + +} + +static void vc_render_font(short olddepth, short newdepth) +{ + int charIndex; /* index in ISO font */ + union { + unsigned char *charptr; + unsigned short *shortptr; + unsigned long *longptr; + } current; /* current place in rendered font, multiple types. */ + + unsigned char *theChar; /* current char in iso_font */ + + if (olddepth == newdepth && renderedFont) { + return; /* nothing to do */ + } + + if (olddepth != 1 && renderedFont) { +#ifdef RENDERALLOCATE + (void) kmem_free(kernel_map, (vm_offset_t*)renderedFont, vc_rendered_font_size); +#endif + } + vc_rendered_font_size = REN_MAX_SIZE; + if (newdepth == 1) { +#ifdef RENDERALLOCATE + renderedFont = iso_font; +#endif + vc_rendered_char_size = 16; + if (!vc_one_bit_reversed) { /* reverse the font for the blitter */ + int i; + for (i = 0; i < ((ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size); i++) { + if (iso_font[i]) { + unsigned char mask1 = 0x80; + unsigned char mask2 = 0x01; + unsigned char val = 0; + while (mask1) { + if (iso_font[i] & mask1) + val |= mask2; + mask1 >>= 1; + mask2 <<= 1; + } + renderedFont[i] = ~val; + } else renderedFont[i] = 0xff; + } + vc_one_bit_reversed = 1; + } + return; + } + { + long csize = newdepth / 8; /* bytes per pixel */ + vc_rendered_char_size = csize ? CHARHEIGHT * (csize * CHARWIDTH) : + /* for 2 & 4 */ CHARHEIGHT * (CHARWIDTH/(6-newdepth)); + csize = (ISO_CHAR_MAX-ISO_CHAR_MIN+1) * vc_rendered_char_size; +#ifndef RENDERALLOCATE + if (csize > vc_rendered_font_size) { + vc_rendered_error = csize; + return; + } else + vc_rendered_font_size = csize; +#else + vc_rendered_font_size = csize; +#endif + } + +#ifdef RENDERALLOCATE + if (kmem_alloc(kernel_map, + (vm_offset_t *)&renderedFont, + vc_rendered_font_size) != KERN_SUCCESS) { + renderedFont = NULL; + vc_rendered_error = vc_rendered_font_size; + return; + } +#endif + current.charptr = renderedFont; + theChar = iso_font; + for (charIndex = ISO_CHAR_MIN; charIndex <= ISO_CHAR_MAX; charIndex++) { + int line; + for (line = 0; line < CHARHEIGHT; line++) { + unsigned char mask = 1; + do { + switch (newdepth) { + case 2: { + unsigned char value = 0; + if (*theChar & mask) value |= 0xC0; mask <<= 1; + if (*theChar & mask) value |= 0x30; mask <<= 1; + if (*theChar & mask) value |= 0x0C; mask <<= 1; + if (*theChar & mask) value |= 0x03; + value = ~value; + *current.charptr++ = value; + } + break; + case 4: + { + unsigned char value = 0; + if (*theChar & mask) value |= 0xF0; mask <<= 1; + if (*theChar & mask) value |= 0x0F; + value = ~value; + *current.charptr++ = value; + } + break; + case 8: + *current.charptr++ = (*theChar & mask) ? 0xff : 0; + break; + case 16: + *current.shortptr++ = (*theChar & mask) ? 0xFFFF : 0; + break; + + case 32: + *current.longptr++ = (*theChar & mask) ? 0xFFFFFFFF : 0; + break; + } + mask <<= 1; + } while (mask); /* while the single bit drops to the right */ + theChar++; + } + } +} + +#ifdef FAST_JUMP_SCROLL +static void vc_paint_char(unsigned char ch, int xx, int yy, int attrs) +{ + switch (vc_forward_preflight_mode) { + case PFoff: + vc_forward_paintchar(ch, xx, yy, attrs); + break; + case PFwind: + break; + case PFscroll: + break; + case PFunwind: + if (yy >= scrreg_top && yy < scrreg_bottom) { + yy -= vc_forward_scroll; + if (yy < scrreg_top || yy >= scrreg_bottom) + break; + } + vc_forward_paintchar(ch, xx, yy, attrs); + break; + } +} +#endif FAST_JUMP_SCROLL + +static void vc_paint_char1(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned char *theChar; + unsigned char *where; + int i; + + theChar = (unsigned char*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned char*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ + *where = *theChar++; + + where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned char val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + unsigned char mask1 = 0xC0, mask2 = 0x40; + int bit = 0; + for (bit = 0; bit < 7; bit++) { + if ((save & mask1) == mask2) + val &= ~mask2; + mask1 >>= 1; + mask2 >>= 1; + } + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + *where = val; + + where = (unsigned char*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void vc_paint_char2(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned short *theChar; + unsigned short *where; + int i; + + theChar = (unsigned short*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned short*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * 2)); + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ + *where = *theChar++; + + where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned short val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + unsigned short mask1 = 0xF000, mask2 = 0x3000; + int bit = 0; + for (bit = 0; bit < 7; bit++) { + if ((save & mask1) == mask2) + val &= ~mask2; + mask1 >>= 2; + mask2 >>= 2; + } + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + *where = val; + + where = (unsigned short*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void vc_paint_char4(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * 4)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attributes ? FLY !!!! */ + *where = *theChar++; + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + unsigned long mask1 = 0xff000000, mask2 = 0x0F000000; + int bit = 0; + for (bit = 0; bit < 7; bit++) { + if ((save & mask1) == mask2) + val &= ~mask2; + mask1 >>= 4; + mask2 >>= 4; + } + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + *where = val; + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +static void vc_paint_char8c(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * CHARWIDTH)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attr? FLY !*/ + unsigned long *store = where; + int x; + for (x = 0; x < 2; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 2; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !(save & 0xFF000000)) + val |= 0xff000000; + if ((save & 0xFFFF0000) == 0xFF000000) + val |= 0x00FF0000; + if ((save & 0x00FFFF00) == 0x00FF0000) + val |= 0x0000FF00; + if ((save & 0x0000FFFF) == 0x0000FF00) + val |= 0x000000FF; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + lastpixel = save & 0xff; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} +static void vc_paint_char16c(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * CHARWIDTH * 2)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ + unsigned long *store = where; + int x; + for (x = 0; x < 4; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little bit slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 4; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (save == 0xFFFF0000) val |= 0xFFFF; + else if (lastpixel && !(save & 0xFFFF0000)) + val |= 0xFFFF0000; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + + *store++ = val; + lastpixel = save & 0x7fff; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} +static void vc_paint_char32c(unsigned char ch, int xx, int yy, int attrs) +{ + unsigned long *theChar; + unsigned long *where; + int i; + + theChar = (unsigned long*)(renderedFont + (ch * vc_rendered_char_size)); + where = (unsigned long*)(vinfo.v_baseaddr + + (yy * CHARHEIGHT * vinfo.v_rowbytes) + + (xx * CHARWIDTH * 4)); + + if (!attrs) for (i = 0; i < CHARHEIGHT; i++) { /* No attrs ? FLY ! */ + unsigned long *store = where; + int x; + for (x = 0; x < 8; x++) { + unsigned long val = *theChar++; + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } else for (i = 0; i < CHARHEIGHT; i++) { /* a little slower */ + unsigned long *store = where, lastpixel = 0; + int x; + for (x = 0 ; x < 8; x++) { + unsigned long val = *theChar++, save = val; + if (attrs & ATTR_BOLD) { /* bold support */ + if (lastpixel && !save) + val = 0xFFFFFFFF; + } + if (attrs & ATTR_REVERSE) val = ~val; + if (attrs & ATTR_UNDER && i == CHARHEIGHT-1) val = ~val; + + val = (vc_color_back & ~val) | (vc_color_fore & val); + *store++ = val; + lastpixel = save; + } + + where = (unsigned long*)(((unsigned char*)where)+vinfo.v_rowbytes); + } + +} + +/* + * That's a plain dumb reverse of the cursor position + * It do a binary reverse, so it will not looks good when we have + * color support. we'll see that later + */ +static void reversecursor(void) +{ + union { + unsigned char *charptr; + unsigned short *shortptr; + unsigned long *longptr; + } where; + int line, col; + + where.longptr = (unsigned long*)(vinfo.v_baseaddr + + (y * CHARHEIGHT * vinfo.v_rowbytes) + + (x /** CHARWIDTH*/ * vinfo.v_depth)); + for (line = 0; line < CHARHEIGHT; line++) { + switch (vinfo.v_depth) { + case 1: + *where.charptr = ~*where.charptr; + break; + case 2: + *where.shortptr = ~*where.shortptr; + break; + case 4: + *where.longptr = ~*where.longptr; + break; +/* that code still exists because since characters on the screen are + * of different colors that reverse function may not work if the + * cursor is on a character that is in a different color that the + * current one. When we have buffering, things will work better. MP + */ +#ifdef VC_BINARY_REVERSE + case 8: + where.longptr[0] = ~where.longptr[0]; + where.longptr[1] = ~where.longptr[1]; + break; + case 16: + for (col = 0; col < 4; col++) + where.longptr[col] = ~where.longptr[col]; + break; + case 32: + for (col = 0; col < 8; col++) + where.longptr[col] = ~where.longptr[col]; + break; +#else + case 8: + for (col = 0; col < 8; col++) + where.charptr[col] = where.charptr[col] != (vc_color_fore & vc_color_mask) ? + vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; + break; + case 16: + for (col = 0; col < 8; col++) + where.shortptr[col] = where.shortptr[col] != (vc_color_fore & vc_color_mask) ? + vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; + break; + case 32: + for (col = 0; col < 8; col++) + where.longptr[col] = where.longptr[col] != (vc_color_fore & vc_color_mask) ? + vc_color_fore & vc_color_mask : vc_color_back & vc_color_mask; + break; +#endif + } + where.charptr += vinfo.v_rowbytes; + } +} + + +static void +scrollup(int num) +{ + unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; + + linelongs = (vinfo.v_rowbytes * CHARHEIGHT) >> 2; + rowline = (vinfo.v_rowbytes) >> 2; + rowscanline = (vinfo.v_rowscanbytes) >> 2; + +#ifdef FAST_JUMP_SCROLL + if (vc_forward_preflight_mode == PFwind) { + vc_forward_scroll += num; + return; + } + if (vc_forward_preflight_mode == PFscroll || vc_forward_preflight_mode == PFoff) { +#endif FAST_JUMP_SCROLL + + to = (unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs); + from = to + (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + + i = (scrreg_bottom - scrreg_top) - num; + + while (i-- > 0) { + for (line = 0; line < CHARHEIGHT; line++) { + /* + * Only copy what is displayed + */ + video_scroll_up((unsigned int) from, + (unsigned int) (from+(vinfo.v_rowscanbytes/4)), + (unsigned int) to); + + from += rowline; + to += rowline; + } + } + + /* Now set the freed up lines to the background colour */ + + + to = ((unsigned long *) vinfo.v_baseaddr + (scrreg_top * linelongs)) + + ((scrreg_bottom - scrreg_top - num) * linelongs); + +#ifdef FAST_JUMP_SCROLL + if (vc_forward_preflight_mode == PFscroll) + return; + } else if (vc_forward_preflight_mode == PFunwind) { + long linestart, linelast; + vc_forward_scroll -= num; + + linestart = scrreg_bottom - num - vc_forward_scroll; + linelast = linestart + num - 1; + + if (linestart >= scrreg_bottom || linelast < scrreg_top) + return; + + if (linelast >= scrreg_bottom) + linelast = scrreg_bottom - 1; + if (linestart < scrreg_top) + linestart = scrreg_top; + + to = ((unsigned long *) vinfo.v_baseaddr) + (linelongs * linestart); + num = linelast - linestart + 1; + } +#endif FAST_JUMP_SCROLL + + for (linelongs = CHARHEIGHT * num; linelongs-- > 0;) { + from = to; + for (i = 0; i < rowscanline; i++) + *to++ = vc_color_back; + + to = from + rowline; + } + +} + +static void +scrolldown(int num) +{ + unsigned long *from, *to, linelongs, i, line, rowline, rowscanline; + + linelongs = (vinfo.v_rowbytes * CHARHEIGHT) >> 2; + rowline = (vinfo.v_rowbytes) >> 2; + rowscanline = (vinfo.v_rowscanbytes) >> 2; + +#ifdef FAST_JUMP_SCROLL + if (vc_forward_preflight_mode == PFwind) { + vc_forward_scroll -= num; + return; + } + if (vc_forward_preflight_mode == PFscroll || vc_forward_preflight_mode == PFoff) { +#endif FAST_JUMP_SCROLL + + to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_bottom) + - (rowline - rowscanline); + from = to - (linelongs * num); /* handle multiple line scroll (Michel Pollet) */ + + i = (scrreg_bottom - scrreg_top) - num; + + while (i-- > 0) { + for (line = 0; line < CHARHEIGHT; line++) { + /* + * Only copy what is displayed + */ + video_scroll_down((unsigned int) from, + (unsigned int) (from-(vinfo.v_rowscanbytes/4)), + (unsigned int) to); + + from -= rowline; + to -= rowline; + } + } + + /* Now set the freed up lines to the background colour */ + + to = (unsigned long *) vinfo.v_baseaddr + (linelongs * scrreg_top); + +#ifdef FAST_JUMP_SCROLL + if (vc_forward_preflight_mode == PFscroll) + return; + } else if (vc_forward_preflight_mode == PFunwind) { + long linestart, linelast; + vc_forward_scroll += num; + + linestart = scrreg_top - vc_forward_scroll; + linelast = linestart + num - 1; + + if (linestart >= scrreg_bottom || linelast < scrreg_top) + return; + + if (linelast >= scrreg_bottom) + linelast = scrreg_bottom - 1; + if (linestart < scrreg_top) + linestart = scrreg_top; + + to = ((unsigned long *) vinfo.v_baseaddr) + (linelongs * linestart); + num = linelast - linestart + 1; + } +#endif FAST_JUMP_SCROLL + + for (line = CHARHEIGHT * num; line > 0; line--) { + from = to; + + for (i = 0; i < rowscanline; i++) + *(to++) = vc_color_back; + + to = from + rowline; + } + +} + + +static void +clear_line(int which) +{ + int start, end, i; + + /* + * This routine runs extremely slowly. I don't think it's + * used all that often, except for To end of line. I'll go + * back and speed this up when I speed up the whole vc + * module. --LK + */ + + switch (which) { + case 0: /* To end of line */ + start = x; + end = vinfo.v_columns-1; + break; + case 1: /* To start of line */ + start = 0; + end = x; + break; + case 2: /* Whole line */ + start = 0; + end = vinfo.v_columns-1; + break; + } + + for (i = start; i <= end; i++) { + vc_paintchar(' ', i, y, ATTR_NONE); + } + +} + +static void +clear_screen(int which) +{ + unsigned long *p, *endp, *row; + int linelongs, col; + int rowline, rowlongs; + + rowline = vinfo.v_rowscanbytes / 4; + rowlongs = vinfo.v_rowbytes / 4; + + p = (unsigned long*) vinfo.v_baseaddr;; + endp = (unsigned long*) vinfo.v_baseaddr; + + linelongs = vinfo.v_rowbytes * CHARHEIGHT / 4; + + switch (which) { + case 0: /* To end of screen */ + clear_line(0); + if (y < vinfo.v_rows - 1) { + p += (y + 1) * linelongs; + endp += rowlongs * vinfo.v_height; + } + break; + case 1: /* To start of screen */ + clear_line(1); + if (y > 1) { + endp += (y + 1) * linelongs; + } + break; + case 2: /* Whole screen */ + endp += rowlongs * vinfo.v_height; + break; + } + + for (row = p ; row < endp ; row += rowlongs) { + for (col = 0; col < rowline; col++) + *(row+col) = vc_color_back; + } + +} + +static void +reset_tabs(void) +{ + int i; + + for (i = 0; i<= vinfo.v_columns; i++) { + tab_stops[i] = ((i % 8) == 0); + } + +} + +static void +vt100_reset(void) +{ + reset_tabs(); + scrreg_top = 0; + scrreg_bottom = vinfo.v_rows; + attr = ATTR_NONE; + vc_charset[0] = vc_charset[1] = 0; + vc_charset_select = 0; + vc_wrap_mode = 1; + vc_relative_origin = 0; + vc_color_set(VC_RESET_BACKGROUND); + vc_color_set(VC_RESET_FOREGROUND); + +} + +static void +putc_normal(unsigned char ch) +{ + switch (ch) { + case '\a': /* Beep */ + { + extern int asc_ringbell(); //In IOBSDConsole.cpp + int rang; + spl_t s; + + rang = asc_ringbell(); + + if(!rang) { + /* + * No sound hardware, invert the screen twice instead + */ + unsigned long *ptr; + int i, j; + /* XOR the screen twice */ + for (i = 0; i < 2 ; i++) { + /* For each row, xor the scanbytes */ + for (ptr = (unsigned long*)vinfo.v_baseaddr; + ptr < (unsigned long*)(vinfo.v_baseaddr + + (vinfo.v_height * vinfo.v_rowbytes)); + ptr += (vinfo.v_rowbytes / + sizeof (unsigned long*))) + for (j = 0; + j < vinfo.v_rowscanbytes / + sizeof (unsigned long*); + j++) + *(ptr+j) =~*(ptr+j); + } + } + } + break; + + case 127: /* Delete */ + case '\b': /* Backspace */ + if (hanging_cursor) { + hanging_cursor = 0; + } else + if (x > 0) { + x--; + } + break; + case '\t': /* Tab */ + while (x < vinfo.v_columns && !tab_stops[++x]); + if (x >= vinfo.v_columns) + x = vinfo.v_columns-1; + break; + case 0x0b: + case 0x0c: + case '\n': /* Line feed */ + if (y >= scrreg_bottom -1 ) { + scrollup(1); + y = scrreg_bottom - 1; + } else { + y++; + } + break; + case '\r': /* Carriage return */ + x = 0; + hanging_cursor = 0; + break; + case 0x0e: /* Select G1 charset (Control-N) */ + vc_charset_select = 1; + break; + case 0x0f: /* Select G0 charset (Control-O) */ + vc_charset_select = 0; + break; + case 0x18 : /* CAN : cancel */ + case 0x1A : /* like cancel */ + /* well, i do nothing here, may be later */ + break; + case '\033': /* Escape */ + vt100state = ESesc; + hanging_cursor = 0; + break; + default: + if (ch >= ' ') { + if (hanging_cursor) { + x = 0; + if (y >= scrreg_bottom -1 ) { + scrollup(1); + y = scrreg_bottom - 1; + } else { + y++; + } + hanging_cursor = 0; + } + vc_paintchar((ch >= 0x60 && ch <= 0x7f) ? ch + vc_charset[vc_charset_select] + : ch, x, y, attr); + if (x == vinfo.v_columns - 1) { + hanging_cursor = vc_wrap_mode; + } else { + x++; + } + } + break; + } + +} + +static void +putc_esc(unsigned char ch) +{ + vt100state = ESnormal; + + switch (ch) { + case '[': + vt100state = ESsquare; + break; + case 'c': /* Reset terminal */ + vt100_reset(); + clear_screen(2); + x = y = 0; + break; + case 'D': /* Line feed */ + case 'E': + if (y >= scrreg_bottom -1) { + scrollup(1); + y = scrreg_bottom - 1; + } else { + y++; + } + if (ch == 'E') x = 0; + break; + case 'H': /* Set tab stop */ + tab_stops[x] = 1; + break; + case 'M': /* Cursor up */ + if (y <= scrreg_top) { + scrolldown(1); + y = scrreg_top; + } else { + y--; + } + break; + case '>': + vt100_reset(); + break; + case '7': /* Save cursor */ + savex = x; + savey = y; + saveattr = attr; + vc_save_charset_s = vc_charset_select; + vc_charset_save[0] = vc_charset[0]; + vc_charset_save[1] = vc_charset[1]; + break; + case '8': /* Restore cursor */ + x = savex; + y = savey; + attr = saveattr; + vc_charset_select = vc_save_charset_s; + vc_charset[0] = vc_charset_save[0]; + vc_charset[1] = vc_charset_save[1]; + break; + case 'Z': /* return terminal ID */ + break; + case '#': /* change characters height */ + vt100state = EScharsize; + break; + case '(': + vt100state = ESsetG0; + break; + case ')': /* character set sequence */ + vt100state = ESsetG1; + break; + case '=': + break; + default: + /* Rest not supported */ + break; + } + +} + +static void +putc_askcmd(unsigned char ch) +{ + if (ch >= '0' && ch <= '9') { + par[numpars] = (10*par[numpars]) + (ch-'0'); + return; + } + vt100state = ESnormal; + + switch (par[0]) { + case 6: + vc_relative_origin = ch == 'h'; + break; + case 7: /* wrap around mode h=1, l=0*/ + vc_wrap_mode = ch == 'h'; + break; + default: + break; + } + +} + +static void +putc_charsizecmd(unsigned char ch) +{ + vt100state = ESnormal; + + switch (ch) { + case '3' : + case '4' : + case '5' : + case '6' : + break; + case '8' : /* fill 'E's */ + { + int xx, yy; + for (yy = 0; yy < vinfo.v_rows; yy++) + for (xx = 0; xx < vinfo.v_columns; xx++) + vc_paintchar('E', xx, yy, ATTR_NONE); + } + break; + } + +} + +static void +putc_charsetcmd(int charset, unsigned char ch) +{ + vt100state = ESnormal; + + switch (ch) { + case 'A' : + case 'B' : + default: + vc_charset[charset] = 0; + break; + case '0' : /* Graphic characters */ + case '2' : + vc_charset[charset] = 0x21; + break; + } + +} + +static void +putc_gotpars(unsigned char ch) +{ + int i; + + if (ch < ' ') { + /* special case for vttest for handling cursor + movement in escape sequences */ + putc_normal(ch); + vt100state = ESgotpars; + return; + } + vt100state = ESnormal; + switch (ch) { + case 'A': /* Up */ + y -= par[0] ? par[0] : 1; + if (y < scrreg_top) + y = scrreg_top; + break; + case 'B': /* Down */ + y += par[0] ? par[0] : 1; + if (y >= scrreg_bottom) + y = scrreg_bottom - 1; + break; + case 'C': /* Right */ + x += par[0] ? par[0] : 1; + if (x >= vinfo.v_columns) + x = vinfo.v_columns-1; + break; + case 'D': /* Left */ + x -= par[0] ? par[0] : 1; + if (x < 0) + x = 0; + break; + case 'H': /* Set cursor position */ + case 'f': + x = par[1] ? par[1] - 1 : 0; + y = par[0] ? par[0] - 1 : 0; + if (vc_relative_origin) + y += scrreg_top; + hanging_cursor = 0; + break; + case 'X': /* clear p1 characters */ + if (numpars) { + int i; + for (i = x; i < x + par[0]; i++) + vc_paintchar(' ', i, y, ATTR_NONE); + } + break; + case 'J': /* Clear part of screen */ + clear_screen(par[0]); + break; + case 'K': /* Clear part of line */ + clear_line(par[0]); + break; + case 'g': /* tab stops */ + switch (par[0]) { + case 1: + case 2: /* reset tab stops */ + /* reset_tabs(); */ + break; + case 3: /* Clear every tabs */ + { + int i; + + for (i = 0; i <= vinfo.v_columns; i++) + tab_stops[i] = 0; + } + break; + case 0: + tab_stops[x] = 0; + break; + } + break; + case 'm': /* Set attribute */ + for (i = 0; i < numpars; i++) { + switch (par[i]) { + case 0: + attr = ATTR_NONE; + vc_color_set(VC_RESET_BACKGROUND); + vc_color_set(VC_RESET_FOREGROUND); + break; + case 1: + attr |= ATTR_BOLD; + break; + case 4: + attr |= ATTR_UNDER; + break; + case 7: + attr |= ATTR_REVERSE; + break; + case 22: + attr &= ~ATTR_BOLD; + break; + case 24: + attr &= ~ATTR_UNDER; + break; + case 27: + attr &= ~ATTR_REVERSE; + break; + case 5: + case 25: /* blink/no blink */ + break; + default: + vc_color_set(par[i]); + break; + } + } + break; + case 'r': /* Set scroll region */ + x = y = 0; + /* ensure top < bottom, and both within limits */ + if ((numpars > 0) && (par[0] < vinfo.v_rows)) { + scrreg_top = par[0] ? par[0] - 1 : 0; + if (scrreg_top < 0) + scrreg_top = 0; + } else { + scrreg_top = 0; + } + if ((numpars > 1) && (par[1] <= vinfo.v_rows) && (par[1] > par[0])) { + scrreg_bottom = par[1]; + if (scrreg_bottom > vinfo.v_rows) + scrreg_bottom = vinfo.v_rows; + } else { + scrreg_bottom = vinfo.v_rows; + } + if (vc_relative_origin) + y = scrreg_top; + break; + } + +} + +static void +putc_getpars(unsigned char ch) +{ + if (ch == '?') { + vt100state = ESask; + return; + } + if (ch == '[') { + vt100state = ESnormal; + /* Not supported */ + return; + } + if (ch == ';' && numpars < MAXPARS - 1) { + numpars++; + } else + if (ch >= '0' && ch <= '9') { + par[numpars] *= 10; + par[numpars] += ch - '0'; + } else { + numpars++; + vt100state = ESgotpars; + putc_gotpars(ch); + } +} + +static void +putc_square(unsigned char ch) +{ + int i; + + for (i = 0; i < MAXPARS; i++) { + par[i] = 0; + } + + numpars = 0; + vt100state = ESgetpars; + + putc_getpars(ch); + +} + +void +vc_putchar(char ch) +{ + if (!ch) { + return; /* ignore null characters */ + } + switch (vt100state) { + default:vt100state = ESnormal; /* FALLTHROUGH */ + case ESnormal: + putc_normal(ch); + break; + case ESesc: + putc_esc(ch); + break; + case ESsquare: + putc_square(ch); + break; + case ESgetpars: + putc_getpars(ch); + break; + case ESgotpars: + putc_gotpars(ch); + break; + case ESask: + putc_askcmd(ch); + break; + case EScharsize: + putc_charsizecmd(ch); + break; + case ESsetG0: + putc_charsetcmd(0, ch); + break; + case ESsetG1: + putc_charsetcmd(1, ch); + break; + } + + if (x >= vinfo.v_columns) { + x = vinfo.v_columns - 1; + } + if (x < 0) { + x = 0; + } + if (y >= vinfo.v_rows) { + y = vinfo.v_rows - 1; + } + if (y < 0) { + y = 0; + } + +} + +/* + * Actually draws the buffer, handle the jump scroll + */ +void vc_flush_forward_buffer(void) +{ + int start = 0; + int todo = 0; + spl_t s; + + assert(vc_forward_buffer_enabled); + + s = splhigh(); + simple_lock(&vc_forward_lock); + + if (vc_forward_buffer_busy) { + /* Bail out if we're already in the middle of a flush. */ + simple_unlock(&vc_forward_lock); + splx(s); + return; + } + + vc_forward_buffer_busy = 1; + + while (todo < vc_forward_buffer_size) { + todo = vc_forward_buffer_size; + + /* Drop the lock while we update the screen. */ + simple_unlock(&vc_forward_lock); + splx(s); + + reversecursor(); + + do { + int i; +#ifdef FAST_JUMP_SCROLL + if ((todo - start) < 2) { + vc_putchar(vc_forward_buffer[start++]); + } else { + assert(vc_forward_scroll == 0); + + vc_forward_preflight_save.vt100state = vt100state; + vc_forward_preflight_save.vc_wrap_mode = vc_wrap_mode; + vc_forward_preflight_save.vc_relative_origin = vc_relative_origin; + vc_forward_preflight_save.vc_charset_select = vc_charset_select; + vc_forward_preflight_save.vc_save_charset_s = vc_save_charset_s; + vc_forward_preflight_save.vc_charset[0] = vc_charset[0]; + vc_forward_preflight_save.vc_charset[1] = vc_charset[1]; + vc_forward_preflight_save.vc_charset_save[0] = vc_charset_save[0]; + vc_forward_preflight_save.vc_charset_save[1] = vc_charset_save[1]; + vc_forward_preflight_save.x = x; + vc_forward_preflight_save.y = y; + vc_forward_preflight_save.savex = savex; + vc_forward_preflight_save.savey = savey; + vc_forward_preflight_save.numpars = numpars; + vc_forward_preflight_save.hanging_cursor = hanging_cursor; + vc_forward_preflight_save.attr = attr; + vc_forward_preflight_save.saveattr = saveattr; + vc_forward_preflight_save.scrreg_top = scrreg_top; + vc_forward_preflight_save.scrreg_bottom = scrreg_bottom; + vc_forward_preflight_save.vc_color_fore = vc_color_fore; + vc_forward_preflight_save.vc_color_back = vc_color_back; + bcopy( (const char *) par, + (char *) vc_forward_preflight_save.par, + (vm_size_t) sizeof(par) ); + bcopy( (const char *) tab_stops, + (char *) vc_forward_preflight_save.tab_stops, + (vm_size_t) sizeof(tab_stops) ); + + vc_forward_preflight_mode = PFwind; + + for (i = start; + i < todo && + vc_forward_preflight_save.scrreg_top == scrreg_top && + vc_forward_preflight_save.scrreg_bottom == scrreg_bottom; + i++) + vc_putchar(vc_forward_buffer[i]); + + vt100state = vc_forward_preflight_save.vt100state; + vc_wrap_mode = vc_forward_preflight_save.vc_wrap_mode; + vc_relative_origin = vc_forward_preflight_save.vc_relative_origin; + vc_charset_select = vc_forward_preflight_save.vc_charset_select; + vc_save_charset_s = vc_forward_preflight_save.vc_save_charset_s; + vc_charset[0] = vc_forward_preflight_save.vc_charset[0]; + vc_charset[1] = vc_forward_preflight_save.vc_charset[1]; + vc_charset_save[0] = vc_forward_preflight_save.vc_charset_save[0]; + vc_charset_save[1] = vc_forward_preflight_save.vc_charset_save[1]; + x = vc_forward_preflight_save.x; + y = vc_forward_preflight_save.y; + savex = vc_forward_preflight_save.savex; + savey = vc_forward_preflight_save.savey; + numpars = vc_forward_preflight_save.numpars; + hanging_cursor = vc_forward_preflight_save.hanging_cursor; + attr = vc_forward_preflight_save.attr; + saveattr = vc_forward_preflight_save.saveattr; + scrreg_top = vc_forward_preflight_save.scrreg_top; + scrreg_bottom = vc_forward_preflight_save.scrreg_bottom; + vc_color_fore = vc_forward_preflight_save.vc_color_fore; + vc_color_back = vc_forward_preflight_save.vc_color_back; + bcopy( (const char *) vc_forward_preflight_save.par, + (char *) par, + (vm_size_t) sizeof(par) ); + bcopy( (const char *) vc_forward_preflight_save.tab_stops, + (char *) tab_stops, + (vm_size_t) sizeof(tab_stops) ); + + vc_forward_preflight_mode = PFscroll; + + if (vc_forward_scroll > 0) + scrollup(vc_forward_scroll > scrreg_bottom - scrreg_top ? + scrreg_bottom - scrreg_top : vc_forward_scroll); + else if (vc_forward_scroll < 0) + scrolldown(-vc_forward_scroll > scrreg_bottom - scrreg_top ? + scrreg_bottom - scrreg_top : -vc_forward_scroll); + + vc_forward_preflight_mode = PFunwind; + + for (; start < i; start++) + vc_putchar(vc_forward_buffer[start]); + + assert(vc_forward_scroll == 0); + + vc_forward_preflight_mode = PFoff; + } +#else !FAST_JUMP_SCROLL + int plaintext = 1; + int drawlen = start; + int jump = 0; + int param = 0, changebackground = 0; + enum vt100state_e vtState = vt100state; + /* + * In simple words, here we're pre-parsing the text to look for + * + Newlines, for computing jump scroll + * + /\033\[[0-9;]*]m/ to continue on + * any other sequence will stop. We don't want to have cursor + * movement escape sequences while we're trying to pre-scroll + * the screen. + * We have to be extra carefull about the sequences that changes + * the background color to prevent scrolling in those + * particular cases. + * That parsing was added to speed up 'man' and 'color-ls' a + * zillion time (at least). It's worth it, trust me. + * (mail Nick Stephen for a True Performance Graph) + * Michel Pollet + */ + for (i = start; i < todo && plaintext; i++) { + drawlen++; + switch (vtState) { + case ESnormal: + switch (vc_forward_buffer[i]) { + case '\033': + vtState = ESesc; + break; + case '\n': + jump++; + break; + } + break; + case ESesc: + switch (vc_forward_buffer[i]) { + case '[': + vtState = ESgetpars; + param = 0; + changebackground = 0; + break; + default: + plaintext = 0; + break; + } + break; + case ESgetpars: + if ((vc_forward_buffer[i] >= '0' && + vc_forward_buffer[i] <= '9') || + vc_forward_buffer[i] == ';') { + if (vc_forward_buffer[i] >= '0' && + vc_forward_buffer[i] <= '9') + param = (param*10)+(vc_forward_buffer[i]-'0'); + else { + if (param >= 40 && param <= 47) + changebackground = 1; + if (!vc_normal_background && + !param) + changebackground = 1; + param = 0; + } + break; /* continue on */ + } + vtState = ESgotpars; + /* fall */ + case ESgotpars: + switch (vc_forward_buffer[i]) { + case 'm': + vtState = ESnormal; + if (param >= 40 && param <= 47) + changebackground = 1; + if (!vc_normal_background && + !param) + changebackground = 1; + if (changebackground) { + plaintext = 0; + jump = 0; + /* REALLY don't jump */ + } + /* Yup ! we've got it */ + break; + default: + plaintext = 0; + break; + } + break; + default: + plaintext = 0; + break; + } + + } + + /* + * Then we look if it would be appropriate to forward jump + * the screen before drawing + */ + if (jump && (scrreg_bottom - scrreg_top) > 2) { + jump -= scrreg_bottom - y - 1; + if (jump > 0 ) { + if (jump >= scrreg_bottom - scrreg_top) + jump = scrreg_bottom - scrreg_top -1; + y -= jump; + scrollup(jump); + } + } + /* + * and we draw what we've found to the parser + */ + for (i = start; i < drawlen; i++) + vc_putchar(vc_forward_buffer[start++]); + /* + * Continue sending characters to the parser until we're sure we're + * back on normal characters. + */ + for (i = start; i < todo && + vt100state != ESnormal ; i++) + vc_putchar(vc_forward_buffer[start++]); +#endif !FAST_JUMP_SCROLL + /* Then loop again if there still things to draw */ + } while (start < todo); + + reversecursor(); + + /* Re-acquire the lock while we check our state. */ + s = splhigh(); + simple_lock(&vc_forward_lock); + } + + vc_forward_buffer_busy = 0; + vc_forward_buffer_size = 0; + + simple_unlock(&vc_forward_lock); + splx(s); +} + +int +vcputc(int l, int u, int c) +{ + /* + * Either we're really buffering stuff or we're not yet because + * the probe hasn't been done. + */ + if (vc_forward_buffer_enabled) + vc_store_char(c); + else + vc_putchar(c); + + return 0; +} + +/* + * Store characters to be drawn 'later', handle overflows + */ + +void +vc_store_char(unsigned char c) +{ + int flush = 0; + spl_t s; + + assert(vc_forward_buffer_enabled); + + s = splhigh(); + simple_lock(&vc_forward_lock); + + /* Spin until the buffer has space for another character. */ + while (vc_forward_buffer_size == VC_MAX_FORWARD_SIZE) { + simple_unlock(&vc_forward_lock); + splx(s); + /* wait */ + s = splhigh(); + simple_lock(&vc_forward_lock); + } + + assert(vc_forward_buffer_size < VC_MAX_FORWARD_SIZE); + + vc_forward_buffer[vc_forward_buffer_size++] = (unsigned char)c; + + if (vc_forward_buffer_size == 1) { + /* If we're adding the first character to the buffer, + * start the timer, otherwise it is already running. + */ + if (debug_mode) { + flush = 1; + } else { + timeout((timeout_fcn_t)vc_flush_forward_buffer, + (void *)0, + VC_CONSOLE_UPDATE_TIMEOUT); + } + } else if (vc_forward_buffer_size == VC_MAX_FORWARD_SIZE || debug_mode) { + /* + * If there is an overflow or this is an immediate character display + * (eg. pre-clock printfs, panics), then we force a draw (take into + * account that a flush might already be in progress). + */ + if (!vc_forward_buffer_busy) { + flush = 1; + untimeout((timeout_fcn_t)vc_flush_forward_buffer, (void *)0); + } + } + + simple_unlock(&vc_forward_lock); + splx(s); + + if (flush) { + /* + * Immediate character display.. kernel printf uses this. Make sure + * get flushed and that panics get fully displayed. + */ + vc_flush_forward_buffer(); + } +} + +void +vc_initialize(void) +{ +#if 0 + GratefulDebInit(); /* (TEST/DEBUG) */ +#endif + +#if DEBUG && SERIAL_CONSOLE_DEFAULT && !defined(MACH_PE) + printf(" Video info: %d; video_board=%08X\n", i, vboard); + printf(" Video name: %s\n", vinfo.v_name); + printf(" height=%d; width=%d, depth=%d; rowbytes=%d; type=%08X\n", + vinfo.v_height, vinfo.v_width, vinfo.v_depth, vinfo.v_rowbytes, vinfo.v_type); + printf(" physical address=%08X\n", vinfo.v_physaddr); +#endif + + vinfo.v_rows = vinfo.v_height / CHARHEIGHT; + vinfo.v_columns = vinfo.v_width / CHARWIDTH; + + if (vinfo.v_depth >= 8) { + vinfo.v_rowscanbytes = (vinfo.v_depth / 8) * vinfo.v_width; + } else { + vinfo.v_rowscanbytes = vinfo.v_width / (8 / vinfo.v_depth); + } + +#if DEBUG && SERIAL_CONSOLE_DEFAULT && !defined(MACH_PE) + printf(" inited=%d\n", vc_initted); +#endif + + + vc_render_font(1, vinfo.v_depth); + vc_color_mask = vc_color_depth_masks[vc_color_index_table[vinfo.v_depth]]; + vt100_reset(); + switch (vinfo.v_depth) { + default: + case 1: + vc_paintchar = vc_paint_char1; + break; + case 2: + vc_paintchar = vc_paint_char2; + break; + case 4: + vc_paintchar = vc_paint_char4; + break; + case 8: + vc_paintchar = vc_paint_char8c; + break; + case 16: + vc_paintchar = vc_paint_char16c; + break; + case 32: + vc_paintchar = vc_paint_char32c; + break; + } + +#ifdef FAST_JUMP_SCROLL + vc_forward_paintchar = vc_paintchar; + vc_paintchar = vc_paint_char; +#endif FAST_JUMP_SCROLL +} + +void +vcattach(void) +{ + if (vinfo.v_depth >= 8) + printf("\033[31mC\033[32mO\033[33mL\033[34mO\033[35mR\033[0m "); + printf("video console at 0x%x (%dx%dx%d)\n", vinfo.v_baseaddr, + vinfo.v_width, vinfo.v_height, vinfo.v_depth); + + /* + * Added for the buffering and jump scrolling + */ + /* Init our lock */ + simple_lock_init(&vc_forward_lock, ETAP_IO_TTY); + + vc_forward_buffer_enabled = 1; + +} + + +struct vc_progress_element { + unsigned int version; + unsigned int flags; + unsigned int time; + unsigned char count; + unsigned char res[3]; + int width; + int height; + int dx; + int dy; + int transparent; + unsigned int res2[3]; + unsigned char data[0]; +}; +typedef struct vc_progress_element vc_progress_element; + +static vc_progress_element * vc_progress; +static unsigned char * vc_progress_data; +static boolean_t vc_progress_enable; +static unsigned char * vc_clut; +static unsigned int vc_progress_tick; +static boolean_t vc_graphics_mode; +static boolean_t vc_acquired; +static boolean_t vc_need_clear; + +void vc_blit_rect_8c( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned char * dst; + int line, col; + unsigned char data; + + dst = (unsigned char *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + *(dst + col) = data; + } + dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); + } + +} + +void vc_blit_rect_8m( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned char * dst; + int line, col; + unsigned int data; + + dst = (unsigned char *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + data *= 3; + *(dst + col) = ((19595 * vc_clut[data + 0] + + 38470 * vc_clut[data + 1] + + 7471 * vc_clut[data + 2] ) / 65536); + } + dst = (volatile unsigned char *) (((int)dst) + vinfo.v_rowbytes); + } +} + + + +void vc_blit_rect_16( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned short * dst; + int line, col; + unsigned int data; + + dst = (volatile unsigned short *)(vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 2)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + data *= 3; + *(dst + col) = ( (0xf8 & (vc_clut[data + 0])) << 7) + | ( (0xf8 & (vc_clut[data + 1])) << 2) + | ( (0xf8 & (vc_clut[data + 2])) >> 3); + } + dst = (volatile unsigned short *) (((int)dst) + vinfo.v_rowbytes); + } +} + +void vc_blit_rect_32( unsigned int x, unsigned int y, + unsigned int width, unsigned int height, + int transparent, unsigned char * dataPtr ) +{ + volatile unsigned int * dst; + int line, col; + unsigned int data; + + dst = (volatile unsigned int *) (vinfo.v_baseaddr + + (y * vinfo.v_rowbytes) + + (x * 4)); + + for( line = 0; line < height; line++) { + for( col = 0; col < width; col++) { + data = *dataPtr++; + if( data == transparent) + continue; + + data *= 3; + *(dst + col) = (vc_clut[data + 0] << 16) + | (vc_clut[data + 1] << 8) + | (vc_clut[data + 2]); + } + dst = (volatile unsigned int *) (((int)dst) + vinfo.v_rowbytes); + } +} + +void vc_blit_rect( int x, int y, + int width, int height, + int transparent, unsigned char * dataPtr ) +{ + switch( vinfo.v_depth) { + case 8: + vc_blit_rect_8c( x, y, width, height, transparent, dataPtr); + break; + case 16: + vc_blit_rect_16( x, y, width, height, transparent, dataPtr); + break; + case 32: + vc_blit_rect_32( x, y, width, height, transparent, dataPtr); + break; + } +} + +void vc_progress_task( void * arg ) +{ + spl_t s; + int count = (int) arg; + int x, y, width, height; + unsigned char * data; + + s = splhigh(); + simple_lock(&vc_forward_lock); + + if( vc_progress_enable) { + count++; + if( count >= vc_progress->count) + count = 0; + + width = vc_progress->width; + height = vc_progress->height; + x = vc_progress->dx; + y = vc_progress->dy; + data = vc_progress_data; + data += count * width * height; + if( 1 & vc_progress->flags) { + x += (vinfo.v_width / 2); + x += (vinfo.v_height / 2); + } + vc_blit_rect( x, y, width, height, + vc_progress->transparent,data ); + + timeout( vc_progress_task, (void *) count, + vc_progress_tick ); + } + simple_unlock(&vc_forward_lock); + splx(s); +} + +void vc_display_icon( vc_progress_element * desc, + unsigned char * data ) +{ + int x, y, width, height; + + if( vc_acquired && vc_graphics_mode && vc_clut) { + + width = desc->width; + height = desc->height; + x = desc->dx; + y = desc->dy; + if( 1 & desc->flags) { + x += (vinfo.v_width / 2); + y += (vinfo.v_height / 2); + } + vc_blit_rect( x, y, width, height, desc->transparent, data ); + } +} + +boolean_t +vc_progress_set( boolean_t enable ) +{ + spl_t s; + + if( !vc_progress) + return( FALSE ); + + s = splhigh(); + simple_lock(&vc_forward_lock); + + if( vc_progress_enable != enable) { + vc_progress_enable = enable; + if( enable) + timeout(vc_progress_task, (void *) 0, + vc_progress_tick ); + else + untimeout( vc_progress_task, (void *) 0 ); + } + + simple_unlock(&vc_forward_lock); + splx(s); + + return( TRUE ); +} + + +boolean_t +vc_progress_initialize( vc_progress_element * desc, + unsigned char * data, + unsigned char * clut ) +{ + if( (!clut) || (!desc) || (!data)) + return( FALSE ); + vc_clut = clut; + + vc_progress = desc; + vc_progress_data = data; + vc_progress_tick = vc_progress->time * hz / 1000; + + return( TRUE ); +} + +// FirmwareC.c needs: +Boot_Video boot_video_info; + +extern int disableConsoleOutput; + +void vc_clear_screen( void ) +{ + reversecursor(); + vt100_reset(); + x = y = 0; + clear_screen(2); + reversecursor(); +}; + +void +initialize_screen(Boot_Video * boot_vinfo, unsigned int op) +{ + if( boot_vinfo) { + bcopy( (const void *) boot_vinfo, + (void *) &boot_video_info, + sizeof( boot_video_info)); + + vinfo.v_name[0] = 0; + vinfo.v_width = boot_vinfo->v_width; + vinfo.v_height = boot_vinfo->v_height; + vinfo.v_depth = boot_vinfo->v_depth; + vinfo.v_rowbytes = boot_vinfo->v_rowBytes; + vinfo.v_physaddr = boot_vinfo->v_baseAddr; + vinfo.v_baseaddr = vinfo.v_physaddr; + vinfo.v_type = 0; + + vc_initialize(); +#if 0 + GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re-initialize GratefulDeb */ +#endif + } + + switch( op ) { + + case kPEGraphicsMode: + vc_graphics_mode = TRUE; + disableConsoleOutput = TRUE; + vc_acquired = TRUE; + break; + + case kPETextMode: + vc_graphics_mode = FALSE; + disableConsoleOutput = FALSE; + vc_acquired = TRUE; + vc_clear_screen(); + break; + + case kPETextScreen: + vc_progress_set( FALSE ); + disableConsoleOutput = FALSE; + if( vc_need_clear) { + vc_need_clear = FALSE; + vc_clear_screen(); + } + break; + + case kPEEnableScreen: + if( vc_acquired) { + if( vc_graphics_mode) + vc_progress_set( TRUE ); + else + vc_clear_screen(); + } + break; + + case kPEDisableScreen: + vc_progress_set( FALSE ); + break; + + case kPEAcquireScreen: + vc_need_clear = (FALSE == vc_acquired); + vc_acquired = TRUE; + vc_progress_set( vc_graphics_mode ); + disableConsoleOutput = vc_graphics_mode; + if( vc_need_clear && !vc_graphics_mode) { + vc_need_clear = FALSE; + vc_clear_screen(); + } + break; + + case kPEReleaseScreen: + vc_acquired = FALSE; + vc_progress_set( FALSE ); + disableConsoleOutput = TRUE; +#if 0 + GratefulDebInit(0); /* Stop grateful debugger */ +#endif + break; + } +#if 0 + if( boot_vinfo) GratefulDebInit((bootBumbleC *)boot_vinfo); /* Re initialize GratefulDeb */ +#endif +} diff --git a/osfmk/ppc/POWERMAC/video_console.h b/osfmk/ppc/POWERMAC/video_console.h new file mode 100644 index 000000000..083712b08 --- /dev/null +++ b/osfmk/ppc/POWERMAC/video_console.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:43 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:05 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.9.4 1997/05/09 15:36:59 barbou + * Moved "video" funnel declaration to video_board.h. + * [97/05/09 barbou] + * + * Revision 1.1.9.3 1997/05/08 19:33:07 barbou + * SMP support: + * Funnelized the "video" driver. + * [1997/05/08 18:20:34 barbou] + * + * Revision 1.1.9.2 1997/01/27 15:27:31 stephen + * Export new set/get_status + * VC_GETKEYBOARDLEDS/VC_SETKEYBOARDLEDS + * [1997/01/27 15:27:01 stephen] + * + * Revision 1.1.9.1 1996/12/09 16:52:52 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 10:57:12 stephen] + * + * Revision 1.1.7.4 1996/10/18 08:25:16 stephen + * Added v_rowscanbytes field + * [1996/10/18 08:24:11 stephen] + * + * Revision 1.1.7.3 1996/10/14 18:36:33 stephen + * Added v_rows, v_volumns + * Removed sys/ioctl.h inclusion + * File is now exported from microkernel + * [1996/10/14 18:24:17 stephen] + * + * Revision 1.1.7.2 1996/08/23 09:24:10 stephen + * Added guards around file + * [1996/08/23 09:23:05 stephen] + * + * Revision 1.1.7.1 1996/06/20 12:53:46 stephen + * added VM_TYPE_AV + * [1996/06/20 12:51:04 stephen] + * + * Revision 1.1.4.3 1996/05/28 10:47:39 stephen + * Added HPV video capability + * [1996/05/28 10:45:10 stephen] + * + * Revision 1.1.4.2 1996/05/03 17:26:06 stephen + * Added APPLE_FREE_COPYRIGHT + * [1996/05/03 17:20:05 stephen] + * + * Revision 1.1.4.1 1996/04/11 09:06:47 emcmanus + * Copied from mainline.ppc. + * [1996/04/10 17:01:34 emcmanus] + * + * Revision 1.1.2.2 1996/03/14 12:58:25 stephen + * Various new definitions from Mike + * [1996/03/14 12:21:30 stephen] + * + * Revision 1.1.2.1 1996/02/08 17:37:58 stephen + * created + * [1996/02/08 17:32:46 stephen] + * + * $EndLog$ + */ + +#ifndef _POWERMAC_VIDEO_CONSOLE_H_ +#define _POWERMAC_VIDEO_CONSOLE_H_ + + +struct vc_info { + unsigned long v_height; /* pixels */ + unsigned long v_width; /* pixels */ + unsigned long v_depth; + unsigned long v_rowbytes; + unsigned long v_baseaddr; + unsigned long v_type; + char v_name[32]; + unsigned long v_physaddr; + unsigned long v_rows; /* characters */ + unsigned long v_columns; /* characters */ + unsigned long v_rowscanbytes; /* Actualy number of bytes used for display per row*/ + /* Note for PCI (VCI) systems, part of the row byte line + is used for the hardware cursor which is not to be touched */ + unsigned long v_reserved[5]; +}; + +#endif /* _POWERMAC_VIDEO_CONSOLE_H_ */ diff --git a/osfmk/ppc/POWERMAC/video_console_entries.h b/osfmk/ppc/POWERMAC/video_console_entries.h new file mode 100644 index 000000000..4976ee80d --- /dev/null +++ b/osfmk/ppc/POWERMAC/video_console_entries.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:43 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:05 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.10.1 1996/12/09 16:52:54 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 10:57:17 stephen] + * + * Revision 1.1.8.2 1996/06/14 08:40:48 emcmanus + * Added prototype for vc_putchar(). + * [1996/05/07 09:35:43 emcmanus] + * + * Revision 1.1.8.1 1996/06/07 16:04:24 stephen + * Added video_scroll_up and video_scroll_down prototypes + * [1996/06/07 15:43:59 stephen] + * + * Revision 1.1.4.3 1996/05/03 17:26:10 stephen + * Added APPLE_FREE_COPYRIGHT + * [1996/05/03 17:20:12 stephen] + * + * Revision 1.1.4.2 1996/04/27 15:23:46 emcmanus + * Added vcputc() and vcgetc() prototypes so these functions can be + * used in the console switch. + * [1996/04/27 15:03:38 emcmanus] + * + * Revision 1.1.4.1 1996/04/11 09:06:51 emcmanus + * Copied from mainline.ppc. + * [1996/04/10 17:01:38 emcmanus] + * + * Revision 1.1.2.3 1996/03/14 12:58:27 stephen + * no change + * [1996/03/14 12:56:24 stephen] + * + * Revision 1.1.2.2 1996/01/30 13:29:09 stephen + * Added vcmmap + * [1996/01/30 13:27:11 stephen] + * + * Revision 1.1.2.1 1996/01/12 16:15:06 stephen + * First revision + * [1996/01/12 14:41:47 stephen] + * + * $EndLog$ + */ +#include + +extern void vc_putchar( + char ch); +extern int vcputc( + int l, + int u, + int c); +extern int vcgetc( + int l, + int u, + boolean_t wait, + boolean_t raw); + +extern void video_scroll_up(unsigned long start, + unsigned long end, + unsigned long dest); + +extern void video_scroll_down(unsigned long start, /* HIGH addr */ + unsigned long end, /* LOW addr */ + unsigned long dest); /* HIGH addr */ diff --git a/osfmk/ppc/POWERMAC/video_scroll.s b/osfmk/ppc/POWERMAC/video_scroll.s new file mode 100644 index 000000000..f7d2611a3 --- /dev/null +++ b/osfmk/ppc/POWERMAC/video_scroll.s @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ + +/* Routines to perform high-speed scrolling, assuming that the memory is + * non-cached, and that the amount of memory to be scrolled is a multiple + * of (at least) 16. + */ + +#include +#include + +/* + * void video_scroll_up(unsigned long start, + * unsigned long end, + * unsigned long dest) + */ + +ENTRY(video_scroll_up, TAG_NO_FRAME_USED) + + mfmsr r0 /* Get the MSR */ + mflr r6 /* Get the LR */ + ori r7,r0,1<<(31-MSR_FP_BIT) /* Turn on floating point */ + stwu r1,-(FM_SIZE+16)(r1) /* Get space for a couple of registers on stack */ + rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interrupts */ + stw r6,(FM_SIZE+16+FM_LR_SAVE)(r1) /* Save the return */ + + mtmsr r7 /* Turn on FPU */ + isync /* Wait for it */ + +vsufpuon1: stfd f0,(FM_SIZE+0)(r1) /* Save one register */ + stfd f1,(FM_SIZE+8)(r1) /* and the second */ + + stw r0,(FM_SIZE+FM_LR_SAVE)(r1) /* Save return */ + +/* ok, now we can use the FPU registers to do some fast copying + */ + +.L_vscr_up_loop: + lfd f0, 0(r3) + lfd f1, 8(r3) + + addi r3, r3, 16 + + stfd f0, 0(r5) + + cmpl cr0, r3, r4 + + stfd f1, 8(r5) + + addi r5, r5, 16 + + blt+ cr0, .L_vscr_up_loop + + lfd f0,(FM_SIZE+0)(r1) /* Load back one register */ + lfd f1,(FM_SIZE+8)(r1) /* and the second */ + lwz r1,0(r1) /* Pop the stack */ + + mtmsr r0 /* Turn off FPU again */ + isync /* Wait for it */ + blr /* Go away, don't bother me... */ + + +/* + * void video_scroll_down(unsigned long start, HIGH address to scroll from + * unsigned long end, LOW address + * unsigned long dest) HIGH address + */ + +ENTRY(video_scroll_down, TAG_NO_FRAME_USED) + + /* Save off the link register, we want to call fpu_save. + */ + + + mfmsr r0 /* Get the MSR */ + mflr r6 /* Get the LR */ + ori r7,r0,1<<(31-MSR_FP_BIT) /* Turn on floating point */ + stwu r1,-(FM_SIZE+16)(r1) /* Get space for a couple of registers on stack */ + rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interrupts */ + stw r6,(FM_SIZE+16+FM_LR_SAVE)(r1) /* Save the return */ + + mtmsr r7 /* Turn on FPU */ + isync /* Wait for it */ + +vsdfpuon1: stfd f0,(FM_SIZE+0)(r1) /* Save one register */ + stfd f1,(FM_SIZE+8)(r1) /* and the second */ + + stw r0,(FM_SIZE+FM_LR_SAVE)(r1) /* Save return */ + +/* ok, now we can use the FPU registers to do some fast copying */ + +.L_vscr_down_loop: + lfd f0, -16(r3) + lfd f1, -8(r3) + + subi r3, r3, 16 + + stfd f0, -16(r5) + + cmpl cr0, r3, r4 + + stfd f1, -8(r5) + + subi r5, r5, 16 + + bgt+ cr0, .L_vscr_down_loop + + + lfd f0,(FM_SIZE+0)(r1) /* Load back one register */ + lfd f1,(FM_SIZE+8)(r1) /* and the second */ + lwz r1,0(r1) /* Pop the stack */ + + mtmsr r0 /* Turn off FPU again */ + isync /* Wait for it */ + blr /* Go away, don't bother me... */ + diff --git a/osfmk/ppc/PPCcalls.c b/osfmk/ppc/PPCcalls.c new file mode 100644 index 000000000..1460c10be --- /dev/null +++ b/osfmk/ppc/PPCcalls.c @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * To add a new entry: + * Add an "PPCcall(routine)" to the table in ppc/PPCcalls.h + * + * Add trap definition to mach/ppc/syscall_sw.h and + * recompile user library. + * + */ + +#include diff --git a/osfmk/ppc/PPCcalls.h b/osfmk/ppc/PPCcalls.h new file mode 100644 index 000000000..073a24057 --- /dev/null +++ b/osfmk/ppc/PPCcalls.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * To add a new entry: + * Add an "PPCTRAP(routine)" to the table below + * + * Add trap definition to mach/ppc/syscall_sw.h and + * recompile user library. + * + * Note: + * The maximum number of calls is 0x1000 (4096 for the hexually challanged) + * + */ + +typedef int (*PPCcallEnt)(struct savearea *save); + +#define PPCcall(rout) rout +#define dis (PPCcallEnt)0 + +PPCcallEnt PPCcalls[] = { + + PPCcall(diagCall), /* 0x6000 Call diagnostics routines */ + PPCcall(vmm_get_version), /* 0x6001 Get Virtual Machine Monitor version */ + PPCcall(vmm_get_features), /* 0x6002 Get Virtual Machine Monitor supported features */ + PPCcall(vmm_init_context), /* 0x6003 Initialize a VMM context */ + PPCcall(vmm_dispatch), /* 0x6004 Dispatch a Virtual Machine Monitor call */ + PPCcall(bb_enable_bluebox), /* 0x6005 Enable this thread for use in the blue box virtual machine */ + PPCcall(bb_disable_bluebox), /* 0x6006 Disable this thread for use in the blue box virtual machine */ + PPCcall(bb_settaskenv), /* 0x6007 Set the BlueBox per thread task environment data */ + + PPCcall(dis), /* 0x6008 disabled */ + PPCcall(dis), /* 0x6009 disabled */ + PPCcall(dis), /* 0x600A disabled */ + PPCcall(dis), /* 0x600B disabled */ + PPCcall(dis), /* 0x600C disabled */ + PPCcall(dis), /* 0x600D disabled */ + PPCcall(dis), /* 0x600E disabled */ + PPCcall(dis), /* 0x600F disabled */ +}; + +#undef dis diff --git a/osfmk/ppc/Performance.h b/osfmk/ppc/Performance.h new file mode 100644 index 000000000..b06f1f018 --- /dev/null +++ b/osfmk/ppc/Performance.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Keep special performance related stuff in here + */ + +#define PERF_HIST 0 +#define PMIHIST_SIZE 0x00400000 +#define perfClear 0 +#define perfStart 1 +#define perfStop 2 +#define perfMap 3 + +#ifndef ASSEMBLER + +extern unsigned int PMIhist; +extern unsigned int PMIhistV; +extern unsigned int PerfCtl(unsigned int cmd, unsigned int parm0); + +#endif /* !ASSEMBLER */ diff --git a/osfmk/ppc/Performance.s b/osfmk/ppc/Performance.s new file mode 100644 index 000000000..d3c11e922 --- /dev/null +++ b/osfmk/ppc/Performance.s @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT_INTERNAL_USE_ONLY@ + */ + +/* + Performance.s + + Handle things that should are related to the hardware performance monitor + + Lovingly crafted by Bill Angell using traditional methods and only natural or recycled materials. + No more than 7500 chinchillas were killed in the production of the code. + +*/ + +#include +#include +#include +#include +#include +#include +#include + +#if PERF_HIST +/* + * This routine is used to interface to the performance monitor + */ + +ENTRY(PerfCtl, TAG_NO_FRAME_USED) + + lis r0,PerfCtlCall@h /* Get the top part of the SC number */ + ori r0,r0,PerfCtlCall@l /* and the bottom part */ + sc /* Do it to it */ + blr /* Bye bye, Birdie... */ + + +ENTRY(PerfCtlLL, TAG_NO_FRAME_USED) + + cmplwi r3,maxPerf /* See if we are within range */ + mflr r11 /* Get the return point */ + li r3,0 /* Show failure */ + bgelrl- /* Load up current address and, also, leave if out of range */ +prfBase: mflr r12 /* Get our address */ + rlwinm r10,r3,2,0,31 /* Get displacement into branch table */ + addi r12,r12,prfBrnch-prfBase /* Point to the branch address */ + add r12,r12,r10 /* Point to the branch */ + mtlr r12 /* Get it in the link register */ + blr /* Vector to the specific performance command... */ + +prfBrnch: b prfClear /* Clear the histogram table */ + b prfStart /* Start the performance monitor */ + b prfStop /* Stop the performance monitor */ + b prfMap /* Map the histogram into an address space */ + .equ maxPerf, (.-prfBrnch)/4 /* Set the highest valid address */ + +/* + * Clear the monitor histogram + */ +prfClear: + li r4,PMIhist@l /* We know this to be in page 0, so no need for the high part */ + lis r8,PMIHIST_SIZE@h /* Get high half of the table size */ + lwz r4,0(r4) /* Get the real address of the histgram */ + ori r8,r8,PMIHIST_SIZE@l /* Get the low half of the table size */ + li r6,32 /* Get a displacement */ + li r3,1 /* Set up a good return code */ + mtlr r11 /* Restore the return address */ + +clrloop: subi r8,r8,32 /* Back off a cache line */ + dcbz 0,r4 /* Do the even line */ + sub. r8,r8,r6 /* Back off a second time (we only do this to generate a CR */ + dcbz r6,r4 /* Clear the even line */ + addi r4,r4,64 /* Move up to every other line */ + bgt+ clrloop /* Go until we've done it all... */ + + blr /* Leave... */ + +/* + * Start the monitor histogram + */ + prfStart: + mtlr r11 /* Restore the return address */ + blr /* Return... */ + +/* + * Stop the monitor histogram + */ + prfStop: + mtlr r11 /* Restore the return address */ + blr /* Return... */ + +/* + * Maps the monitor histogram into another address space + */ + prfMap: + mtlr r11 /* Restore the return address */ + blr /* Return... */ + +#endif + diff --git a/osfmk/ppc/PseudoKernel.c b/osfmk/ppc/PseudoKernel.c new file mode 100644 index 000000000..79ad19dbd --- /dev/null +++ b/osfmk/ppc/PseudoKernel.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: PseudoKernel.c + + Contains: BlueBox PseudoKernel calls + Written by: Mark Gorlinsky + Bill Angell + + Copyright: 1997 by Apple Computer, Inc., all rights reserved + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void bbSetRupt(ReturnHandler *rh, thread_act_t ct); +void DumpTheSave(struct savearea *save); /* (TEST/DEBUG) */ + +/* +** Function: NotifyInterruption +** +** Inputs: +** ppcInterrupHandler - interrupt handler to execute +** interruptStatePtr - current interrupt state +** +** Outputs: +** +** Notes: +** +*/ +kern_return_t syscall_notify_interrupt ( void ) { + + UInt32 interruptState; + task_t task; + spl_t s; + thread_act_t act, fact; + thread_t thread; + bbRupt *bbr; + BTTD_t *bttd; + int i; + + task = current_task(); /* Figure out who our task is */ + + task_lock(task); /* Lock our task */ + + fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ + act = 0; /* Pretend we didn't find it yet */ + + for(i = 0; i < task->thr_act_count; i++) { /* Scan the whole list */ + if(fact->mact.bbDescAddr) { /* Is this a Blue thread? */ + bttd = (BTTD_t *)(fact->mact.bbDescAddr & -PAGE_SIZE); + if(bttd->InterruptVector) { /* Is this the Blue interrupt thread? */ + act = fact; /* Yeah... */ + break; /* Found it, Bail the loop... */ + } + } + fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + } + + if(!act) { /* Couldn't find a bluebox */ + task_unlock(task); /* Release task lock */ + return KERN_FAILURE; /* No tickie, no shirtee... */ + } + + act_lock_thread(act); /* Make sure this stays 'round */ + task_unlock(task); /* Safe to release now */ + + /* if the calling thread is the BlueBox thread that handles interrupts + * we know that we are in the PsuedoKernel and we can short circuit + * setting up the asynchronous task by setting a pending interrupt. + */ + + if ( (unsigned int)act == (unsigned int)current_act() ) { + bttd->InterruptControlWord = bttd->InterruptControlWord | + ((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); + + act_unlock_thread(act); /* Unlock the activation */ + return KERN_SUCCESS; + } + + if(act->mact.bbPendRupt >= 16) { /* Have we hit the arbitrary maximum? */ + act_unlock_thread(act); /* Unlock the activation */ + return KERN_RESOURCE_SHORTAGE; /* Too many pending right now */ + } + + if(!(bbr = (bbRupt *)kalloc(sizeof(bbRupt)))) { /* Get a return handler control block */ + act_unlock_thread(act); /* Unlock the activation */ + return KERN_RESOURCE_SHORTAGE; /* No storage... */ + } + + (void)hw_atomic_add(&act->mact.bbPendRupt, 1); /* Count this 'rupt */ + bbr->rh.handler = bbSetRupt; /* Set interruption routine */ + + bbr->rh.next = act->handlers; /* Put our interrupt at the start of the list */ + act->handlers = &bbr->rh; + + s = splsched(); /* No talking in class */ + act_set_apc(act); /* Set an APC AST */ + splx(s); /* Ok, you can talk now */ + + act_unlock_thread(act); /* Unlock the activation */ + return KERN_SUCCESS; /* We're done... */ +} + +/* + * This guy is fired off asynchronously to actually do the 'rupt. + * We will find the user state savearea and modify it. If we can't, + * we just leave after releasing our work area + */ + +void bbSetRupt(ReturnHandler *rh, thread_act_t act) { + + savearea *sv; + BTTD_t *bttd; + bbRupt *bbr; + UInt32 interruptState; + + bbr = (bbRupt *)rh; /* Make our area convenient */ + + if(!(act->mact.bbDescAddr)) { /* Is BlueBox still enabled? */ + kfree((vm_offset_t)bbr, sizeof(bbRupt)); /* No, release the control block */ + return; + } + + (void)hw_atomic_sub(&act->mact.bbPendRupt, 1); /* Uncount this 'rupt */ + + if(!(sv = (savearea *)find_user_regs(act))) { /* Find the user state registers */ + kfree((vm_offset_t)bbr, sizeof(bbRupt)); /* Couldn't find 'em, release the control block */ + return; + } + + bttd = (BTTD_t *)(act->mact.bbDescAddr & -PAGE_SIZE); + + interruptState = (bttd->InterruptControlWord & kInterruptStateMask) >> kInterruptStateShift; + + switch (interruptState) { + + case kInSystemContext: + sv->save_cr |= bttd->postIntMask; /* post int in CR2 */ + break; + + case kInAlternateContext: + bttd->InterruptControlWord = (bttd->InterruptControlWord & ~kInterruptStateMask) | + (kInPseudoKernel << kInterruptStateShift); + + bttd->exceptionInfo.srr0 = sv->save_srr0; /* Save the current PC */ + sv->save_srr0 = bttd->InterruptVector; /* Set the new PC */ + bttd->exceptionInfo.sprg1 = sv->save_r1; /* Save the original R1 */ + sv->save_r1 = bttd->exceptionInfo.sprg0; /* Set the new R1 */ + bttd->exceptionInfo.srr1 = sv->save_srr1; /* Save the original MSR */ + sv->save_srr1 &= ~(MASK(MSR_BE)|MASK(MSR_SE)); /* Clear SE|BE bits in MSR */ + act->mact.specFlags &= ~bbNoMachSC; /* reactivate Mach SCs */ + /* drop through to post int in backup CR2 in ICW */ + + case kInExceptionHandler: + case kInPseudoKernel: + case kOutsideBlue: + bttd->InterruptControlWord = bttd->InterruptControlWord | + ((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); + break; + + default: + break; + } + + kfree((vm_offset_t)bbr, sizeof(bbRupt)); /* Release the control block */ + return; + +} + +/* + * This function is used to enable the firmware assist code for bluebox traps, system calls + * and interrupts. + * + * The assist code can be called from two types of threads. The blue thread, which handles + * traps, system calls and interrupts and preemptive threads that only issue system calls. + * + */ + +kern_return_t enable_bluebox( + host_t host, + void *taskID, /* opaque task ID */ + void *TWI_TableStart, /* Start of TWI table */ + char *Desc_TableStart /* Start of descriptor table */ + ) { + + thread_t th; + vm_offset_t kerndescaddr, physdescaddr, origdescoffset; + kern_return_t ret; + + th = current_thread(); /* Get our thread */ + + if ( host == HOST_NULL ) return KERN_INVALID_HOST; + if ( ! is_suser() ) return KERN_FAILURE; /* We will only do this for the superuser */ + if ( th->top_act->mact.bbDescAddr ) return KERN_FAILURE; /* Bail if already authorized... */ + if ( ! (unsigned int) Desc_TableStart ) return KERN_FAILURE; /* There has to be a descriptor page */ + if ( ! TWI_TableStart ) return KERN_FAILURE; /* There has to be a TWI table */ + + /* Get the page offset of the descriptor */ + origdescoffset = (vm_offset_t)Desc_TableStart & (PAGE_SIZE - 1); + + /* Align the descriptor to a page */ + Desc_TableStart = (char *)((vm_offset_t)Desc_TableStart & -PAGE_SIZE); + + ret = vm_map_wire(th->top_act->map, /* Kernel wire the descriptor in the user's map */ + (vm_offset_t)Desc_TableStart, + (vm_offset_t)Desc_TableStart + PAGE_SIZE, + VM_PROT_READ | VM_PROT_WRITE, + FALSE); + + if(ret != KERN_SUCCESS) { /* Couldn't wire it, spit on 'em... */ + return KERN_FAILURE; + } + + physdescaddr = /* Get the physical address of the page */ + pmap_extract(th->top_act->map->pmap, (vm_offset_t) Desc_TableStart); + + ret = kmem_alloc_pageable(kernel_map, &kerndescaddr, PAGE_SIZE); /* Find a virtual address to use */ + if(ret != KERN_SUCCESS) { /* Could we get an address? */ + (void) vm_map_unwire(th->top_act->map, /* No, unwire the descriptor */ + (vm_offset_t)Desc_TableStart, + (vm_offset_t)Desc_TableStart + PAGE_SIZE, + TRUE); + return KERN_FAILURE; /* Split... */ + } + + (void) pmap_enter(kernel_pmap, /* Map this into the kernel */ + kerndescaddr, physdescaddr, VM_PROT_READ|VM_PROT_WRITE, + TRUE); + + th->top_act->mact.bbDescAddr = (unsigned int)kerndescaddr+origdescoffset; /* Set kernel address of the table */ + th->top_act->mact.bbUserDA = (unsigned int)Desc_TableStart; /* Set user address of the table */ + th->top_act->mact.bbTableStart = (unsigned int)TWI_TableStart; /* Set address of the trap table */ + th->top_act->mact.bbTaskID = (unsigned int)taskID; /* Assign opaque task ID */ + th->top_act->mact.bbTaskEnv = 0; /* Clean task environment data */ + th->top_act->mact.bbPendRupt = 0; /* Clean pending 'rupt count */ + th->top_act->mact.specFlags &= ~bbNoMachSC; /* Make sure mach SCs are enabled */ + + { + /* mark the proc to indicate that this is a TBE proc */ + extern void tbeproc(void *proc); + + tbeproc(th->top_act->task->bsd_info); + } + + return KERN_SUCCESS; +} + +kern_return_t disable_bluebox( host_t host ) { /* User call to terminate bluebox */ + + thread_act_t act; + + act = current_act(); /* Get our thread */ + + if (host == HOST_NULL) return KERN_INVALID_HOST; + + if(!is_suser()) return KERN_FAILURE; /* We will only do this for the superuser */ + if(!act->mact.bbDescAddr) return KERN_FAILURE; /* Bail if not authorized... */ + + disable_bluebox_internal(act); /* Clean it all up */ + return KERN_SUCCESS; /* Leave */ +} + +void disable_bluebox_internal(thread_act_t act) { /* Terminate bluebox */ + + (void) vm_map_unwire(act->map, /* Unwire the descriptor in user's address space */ + (vm_offset_t)act->mact.bbUserDA, + (vm_offset_t)act->mact.bbUserDA + PAGE_SIZE, + FALSE); + + kmem_free(kernel_map, (vm_offset_t)act->mact.bbDescAddr & -PAGE_SIZE, PAGE_SIZE); /* Release the page */ + + act->mact.bbDescAddr = 0; /* Clear kernel pointer to it */ + act->mact.bbUserDA = 0; /* Clear user pointer to it */ + act->mact.bbTableStart = 0; /* Clear user pointer to TWI table */ + act->mact.bbTaskID = 0; /* Clear opaque task ID */ + act->mact.bbTaskEnv = 0; /* Clean task environment data */ + act->mact.bbPendRupt = 0; /* Clean pending 'rupt count */ + act->mact.specFlags &= ~bbNoMachSC; /* Clean up Blue Box enables */ + return; +} + +/* + * Use the new PPCcall method to enable blue box threads + * + * save->r3 = taskID + * save->r4 = TWI_TableStart + * save->r5 = Desc_TableStart + * + */ +int bb_enable_bluebox( struct savearea *save ) +{ + kern_return_t rc; + + rc = enable_bluebox( (host_t)0xFFFFFFFF, (void *)save->save_r3, (void *)save->save_r4, (char *)save->save_r5 ); + save->save_r3 = rc; + return 1; /* Return with normal AST checking */ +} + +/* + * Use the new PPCcall method to disable blue box threads + * + */ +int bb_disable_bluebox( struct savearea *save ) +{ + kern_return_t rc; + + rc = disable_bluebox( (host_t)0xFFFFFFFF ); + save->save_r3 = rc; + return 1; /* Return with normal AST checking */ +} + +/* + * Search through the list of threads to find the matching taskIDs, then + * set the task environment pointer. A task in this case is a preemptive thread + * in MacOS 9. + * + * save->r3 = taskID + * save->r4 = taskEnv + */ + +int bb_settaskenv( struct savearea *save ) +{ + int i; + task_t task; + thread_act_t act, fact; + + + task = current_task(); /* Figure out who our task is */ + + task_lock(task); /* Lock our task */ + fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ + act = 0; /* Pretend we didn't find it yet */ + + for(i = 0; i < task->thr_act_count; i++) { /* Scan the whole list */ + if(fact->mact.bbDescAddr) { /* Is this a Blue thread? */ + if ( fact->mact.bbTaskID == save->save_r3 ) { /* Is this the task we are looking for? */ + act = fact; /* Yeah... */ + break; /* Found it, Bail the loop... */ + } + } + fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + } + + if ( !act || !act->active) { + task_unlock(task); /* Release task lock */ + goto failure; + } + + act_lock_thread(act); /* Make sure this stays 'round */ + task_unlock(task); /* Safe to release now */ + + act->mact.bbTaskEnv = save->save_r4; + + act_unlock_thread(act); /* Unlock the activation */ + save->save_r3 = 0; + return KERN_SUCCESS; + +failure: + save->save_r3 = -1; /* we failed to find the taskID */ + return KERN_FAILURE; +} diff --git a/osfmk/ppc/PseudoKernel.h b/osfmk/ppc/PseudoKernel.h new file mode 100644 index 000000000..943a861b3 --- /dev/null +++ b/osfmk/ppc/PseudoKernel.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + File: PseudoKernelPriv.h + + Contains: Interfaces for Classic environment's PseudoKernel + + Copyright: (c) 2000 Apple Computer, Inc. All rights reserved. +*/ + +#include + +/* Support firmware PseudoKernel FastTrap architectural extension */ + +#define bbMaxTrap (16 * sizeof(long)) +#define bbRFITrap bbMaxTrap + +extern int bb_enable_bluebox(struct savearea *); +extern int bb_disable_bluebox(struct savearea *); +extern int bb_settaskenv(struct savearea *); + +struct BlueExceptionDataArea { + UInt32 srr0; // OUT PC at time of exception, IN return address + UInt32 srr1; // OUT/IN msr FE0, BE, SE and FE1 bits to restore on exit + UInt32 sprg0; // OUT R1 set to this value + UInt32 sprg1; // OUT/IN R1 restored to this value +}; +typedef struct BlueExceptionDataArea * BlueExceptionDataAreaPtr; +typedef struct BlueExceptionDataArea BEDA_t; + +/* + The Blue Thread, which is running MacOS, needs to be able to handle Traps, SCs and interrupts. +*/ +struct BlueThreadTrapDescriptor { + UInt32 TrapVector; // 0=Trap + UInt32 SysCallVector; // 1=SysCall + UInt32 InterruptVector; // 2=Interrupt + UInt32 PendingIntVector; // 3=Pending interrupt + BEDA_t exceptionInfo; // Save registers at time of exception (trap/syscall) + UInt32 InterruptControlWord; // Holds context state and backup CR2 bits + UInt32 NewExitState; // New run state when exiting PseudoKernel + UInt32 testIntMask; // Mask for a pending alternate context interrupt in backup CR2 + UInt32 postIntMask; // Mask to post an interrupt +}; +typedef struct BlueThreadTrapDescriptor * BlueThreadTrapDescriptorPtr; +typedef struct BlueThreadTrapDescriptor BTTD_t; + +enum { + // The following define the UInt32 gInterruptState + kInUninitialized = 0, // State not yet initialized + kInPseudoKernel = 1, // Currently executing within pseudo kernel + kInSystemContext = 2, // Currently executing within the system (emulator) context + kInAlternateContext = 3, // Currently executing within an alternate (native) context + kInExceptionHandler = 4, // Currently executing an exception handler + kOutsideBlue = 5, // Currently executing outside of the Blue thread + kNotifyPending = 6, // Pending Notify Interrupt + + kInterruptStateMask = 0x000F0000, // Mask to extract interrupt state from gInterruptState + kInterruptStateShift = 16, // Shift count to align interrupt state + + kBackupCR2Mask = 0x0000000F, // Mask to extract backup CR2 from gInterruptState + kCR2ToBackupShift = 31-11, // Shift count to align CR2 into the backup CR2 of gInterruptState + // (and vice versa) + kCR2Mask = 0x00F00000 // Mask to extract CR2 from the PPC CR register +}; + +struct bbRupt { + struct ReturnHandler rh; /* Return handler address */ +}; +typedef struct bbRupt bbRupt; diff --git a/osfmk/ppc/_setjmp.s b/osfmk/ppc/_setjmp.s new file mode 100644 index 000000000..c8a40b93d --- /dev/null +++ b/osfmk/ppc/_setjmp.s @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * C library -- _setjmp, _longjmp + * + * _longjmp(a,v) + * will generate a "return(v)" from + * the last call to + * _setjmp(a) + * by restoring registers from the stack, + * The previous signal state is NOT restored. + * + * NOTE : MUST BE KEPT CONSISTENT WITH gdb/config/powerpc/tm-ppc-eabi.h + * (which needs to know where to find the destination address) + */ + +#include + +/* + * setjmp : ARG0 (r3) contains the address of + * the structure where we are to + * store the context + * Uses r0 as scratch register + * + * NOTE : MUST BE KEPT CONSISTENT WITH gdb/config/powerpc/tm-ppc-eabi.h + * (which needs to know where to find the destination address) + */ + +ENTRY(_setjmp,TAG_NO_FRAME_USED) + /* first entry is used for r1 - stack ptr */ + stw r13, 4(ARG0) /* GPR context. We avoid multiple-word */ + stw r14, 8(ARG0) /* instructions as they're slower (?) */ + stw r15, 12(ARG0) + stw r16, 16(ARG0) + stw r17, 20(ARG0) + stw r18, 24(ARG0) + stw r19, 28(ARG0) + stw r20, 32(ARG0) + stw r21, 36(ARG0) + stw r22, 40(ARG0) + stw r23, 44(ARG0) + stw r24, 48(ARG0) + stw r25, 52(ARG0) + stw r26, 56(ARG0) + stw r27, 60(ARG0) + stw r28, 64(ARG0) + stw r29, 68(ARG0) + stw r30, 72(ARG0) + stw r31, 76(ARG0) + + mfcr r0 + stw r0, 80(ARG0) /* Condition register */ + + mflr r0 + stw r0, 84(ARG0) /* Link register */ + + mfxer r0 + stw r0, 88(ARG0) /* Fixed point exception register */ + +#if FLOATING_POINT_SUPPORT /* TODO NMGS probably not needed for kern */ + mffs r0 + stw r0, 92(ARG0) /* Floating point status register */ + + stfd f14, 96(ARG0) /* Floating point context - 8 byte aligned */ + stfd f15, 104(ARG0) + stfd f16, 112(ARG0) + stfd f17, 120(ARG0) + stfd f18, 138(ARG0) + stfd f19, 146(ARG0) + stfd f20, 144(ARG0) + stfd f21, 152(ARG0) + stfd f22, 160(ARG0) + stfd f23, 178(ARG0) + stfd f24, 186(ARG0) + stfd f25, 184(ARG0) + stfd f26, 192(ARG0) + stfd f27, 200(ARG0) + stfd f28, 218(ARG0) + stfd f29, 226(ARG0) + stfd f30, 224(ARG0) + stfd f31, 232(ARG0) + +#endif + + stw r1, 0(ARG0) /* finally, save the stack pointer */ + li ARG0, 0 /* setjmp must return zero */ + blr + +/* + * longjmp : ARG0 (r3) contains the address of + * the structure from where we are to + * restore the context. + * ARG1 (r4) contains the non-zero + * value that we must return to + * that context. + * Uses r0 as scratch register + * + * NOTE : MUST BE KEPT CONSISTENT WITH gdb/config/powerpc/tm-ppc-eabi.h + * (which needs to know where to find the destination address) + */ + +ENTRY(_longjmp, TAG_NO_FRAME_USED) /* TODO NMGS - need correct tag */ + lwz r13, 4(ARG0) /* GPR context. We avoid multiple-word */ + lwz r14, 8(ARG0) /* instructions as they're slower (?) */ + lwz r15, 12(ARG0) + lwz r16, 16(ARG0) + lwz r17, 20(ARG0) + lwz r18, 24(ARG0) + lwz r19, 28(ARG0) + lwz r20, 32(ARG0) + lwz r21, 36(ARG0) + lwz r22, 40(ARG0) + lwz r23, 44(ARG0) + lwz r24, 48(ARG0) + lwz r25, 52(ARG0) + lwz r26, 56(ARG0) + lwz r27, 60(ARG0) + lwz r28, 64(ARG0) + lwz r29, 68(ARG0) + lwz r30, 72(ARG0) + lwz r31, 76(ARG0) + + lwz r0, 80(ARG0) /* Condition register */ + mtcr r0 /* Use r5 as scratch register */ + + lwz r0, 84(ARG0) /* Link register */ + mtlr r0 + + lwz r0, 88(ARG0) /* Fixed point exception register */ + mtxer r0 + +#ifdef FLOATING_POINT_SUPPORT + lwz r0, 92(ARG0) /* Floating point status register */ + mtfs r0 + + lfd f14, 96(ARG0) /* Floating point context - 8 byte aligned */ + lfd f15, 104(ARG0) + lfd f16, 112(ARG0) + lfd f17, 120(ARG0) + lfd f18, 128(ARG0) + lfd f19, 136(ARG0) + lfd f20, 144(ARG0) + lfd f21, 152(ARG0) + lfd f22, 160(ARG0) + lfd f23, 168(ARG0) + lfd f24, 176(ARG0) + lfd f25, 184(ARG0) + lfd f26, 192(ARG0) + lfd f27, 200(ARG0) + lfd f28, 208(ARG0) + lfd f29, 216(ARG0) + lfd f30, 224(ARG0) + lfd f31, 232(ARG0) + +#endif /* FLOATING_POINT_SUPPORT */ + + + lwz r1, 0(ARG0) /* finally, restore the stack pointer */ + + mr. ARG0, ARG1 /* set the return value */ + bnelr /* return if non-zero */ + + li ARG0, 1 + blr /* never return 0, return 1 instead */ + diff --git a/osfmk/ppc/aligned_data.s b/osfmk/ppc/aligned_data.s new file mode 100644 index 000000000..00dbebc36 --- /dev/null +++ b/osfmk/ppc/aligned_data.s @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * This module only exists because I don't know how to get the silly C compiler + * and/or linker to generate data areas that are aligned on a particular boundary. + * And, this stuff is in the V=R mapped area. + * + * Do the following for each: + * + * .size name,size-in-bytes + * .type area-name,@object + * .globl area-name + * .align power-of-two + * area-name: + * .set .,.+size-in-bytes + * + * So long as I'm being pedantic, always make sure that the most aligned, + * i.e., the largest power-of-twos, are first and then descend to the smallest. + * If you don't, and you are not careful and hand calculate, you'll end up + * with holes and waste storage. I hate C. + * + * Define the sizes in genassym.c + */ + + +#include +#include +#include +#include +#include + +; +; NOTE: We need this only if PREEMPTSTACK is set to non-zero in hw_lock. +; Make sure they are set to the same thing +; +#define PREEMPTSTACK 0 + + .data + +/* 1024-byte aligned areas */ + + .globl EXT(per_proc_info) + .align 10 +EXT(per_proc_info): ; Per processor data area + .fill (ppSize*NCPUS)/4,4,0 ; (filled with 0s) + +/* 512-byte aligned areas */ + + .globl EXT(kernel_pmap_store) ; This is the kernel_pmap + .align 8 +EXT(kernel_pmap_store): + .set .,.+PMAP_SIZE + + +/* 256-byte aligned areas */ + + .globl EXT(GratefulDebWork) + .align 8 +EXT(GratefulDebWork): ; Enough for 2 rows of 8 chars of 16-pixel wide 32-bit pixels and a 256 byte work area + .set .,.+2560 + + .globl debstash + .align 8 +debstash: + .set .,.+256 + + .globl EXT(hw_counts) ; Counter banks per processor + .align 8 +EXT(hw_counts): + .set .,.+(NCPUS*256) + +#if PREEMPTSTACK + +; +; NOTE: We need this only if PREEMPTSTACK is set to non-zero in hw_lock. +; + + .globl EXT(DBGpreempt) ; preemption debug stack + .align 8 +EXT(DBGpreempt): + .set .,.+(NCPUS*PREEMPTSTACK*16) +#endif + + +/* 32-byte aligned areas */ + + .globl EXT(saveanchor) + .align 5 +EXT(saveanchor): + .set .,.+SVsize + + .globl EXT(mapCtl) + .align 5 +EXT(mapCtl): + .set .,.+mapcsize + + .globl EXT(dgWork) + .align 5 +EXT(dgWork): + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + + .globl EXT(trcWork) + .align 5 +EXT(trcWork): +#if DEBUG +/* .long 0x02000000 */ /* Only alignment exceptions enabled */ + .long 0xFFFFFFFF /* All enabled */ +/* .long 0xFBBFFFFF */ /* EXT and DEC disabled */ +/* .long 0xFFBFFFFF */ /* DEC disabled */ +#else + .long 0x00000000 ; All disabled on non-debug systems +#endif + .long EXT(traceTableBeg) ; The next trace entry to use + .long EXT(traceTableBeg) ; Start of the trace table + .long EXT(traceTableEnd) ; End (wrap point) of the trace + .long 0 ; Saved mask while in debugger + + .long 0 + .long 0 + .long 0 + + + .globl fwdisplock + .align 5 +fwdisplock: + .set .,.+32 + + .globl hexfont + .align 5 +#include + + .globl EXT(QNaNbarbarian) + .align 5 + +EXT(QNaNbarbarian): + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + .long 0x7FFFDEAD /* This is a quiet not-a-number which is a "known" debug value */ + + .globl EXT(free_mappings) + .align 5 + +EXT(free_mappings): + .long 0 + + .globl EXT(syncClkSpot) + .align 5 +EXT(syncClkSpot): + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + + .globl EXT(NMIss) + .align 5 +EXT(NMIss): + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + + .globl EXT(dbvecs) + .align 5 +EXT(dbvecs): + .set .,.+(33*16) + + + +/* 8-byte aligned areas */ + + .globl EXT(FloatInit) + .align 3 + +EXT(FloatInit): + .long 0xC24BC195 /* Initial value */ + .long 0x87859393 /* of floating point registers */ + .long 0xE681A2C8 /* and others */ + .long 0x8599855A + + .globl EXT(DebugWork) + .align 3 + +EXT(DebugWork): + .long 0 + .long 0 + .long 0 + .long 0 + + .globl EXT(dbfloats) + .align 3 +EXT(dbfloats): + .set .,.+(33*8) + + .globl EXT(dbspecrs) + .align 3 +EXT(dbspecrs): + .set .,.+(80*4) diff --git a/osfmk/ppc/alignment.c b/osfmk/ppc/alignment.c new file mode 100644 index 000000000..e5ac61c0b --- /dev/null +++ b/osfmk/ppc/alignment.c @@ -0,0 +1,1039 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + */ +/* + * Copyright 1996 1995 by Apple Computer, Inc. 1997 1996 1995 1994 1993 1992 1991 + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby granted, + * provided that the above copyright notice appears in all copies and + * that both the copyright notice and this permission notice appear in + * supporting documentation. + * + * APPLE COMPUTER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE. + * + * IN NO EVENT SHALL APPLE COMPUTER BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if DEBUG +/* These variable may be used to keep track of alignment exceptions */ +int alignment_exception_count_user; +int alignment_exception_count_kernel; +#endif + +#define _AINST(x) boolean_t align_##x##(unsigned long dsisr,\ + struct ppc_saved_state *ssp, \ + struct ppc_float_state *fsp, \ + unsigned long *align_buffer, \ + unsigned long dar) + + +#define _AFENTRY(name, r, b) { #name, align_##name##, r, b, TRUE } +#define _AENTRY(name, r, b) { #name, align_##name##, r, b, FALSE } +#define _ANIL { (void *) 0, (void *) 0, 0, 0 } + +_AINST(lwz); +_AINST(stw); +_AINST(lhz); +_AINST(lha); +_AINST(sth); +_AINST(lmw); +_AINST(lfs); +_AINST(lfd); +_AINST(stfs); +_AINST(stfd); +_AINST(lwzu); +_AINST(stwu); +_AINST(lhzu); +_AINST(lhau); +_AINST(sthu); +_AINST(lfsu); +_AINST(lfdu); +_AINST(stfsu); +_AINST(stfdu); +_AINST(lswx); +_AINST(lswi); +_AINST(lwbrx); +_AINST(stwbrx); +_AINST(lhbrx); +_AINST(sthbrx); +_AINST(dcbz); +_AINST(lwzx); +_AINST(stwx); +_AINST(lhzx); +_AINST(lhax); +_AINST(sthx); +_AINST(lfsx); +_AINST(lfdx); +_AINST(stfsx); +_AINST(stfdx); +_AINST(lwzux); +_AINST(stwux); +_AINST(lhzux); +_AINST(lhaux); +_AINST(sthux); +_AINST(stmw); +_AINST(lfsux); +_AINST(lfdux); +_AINST(stfsux); +_AINST(stfdux); + +/* + * Routines to set and get FPU registers. + */ + +void GET_FPU_REG(struct ppc_float_state *fsp, + unsigned long reg, + unsigned long *value); +void SET_FPU_REG(struct ppc_float_state *fsp, + unsigned long reg, + unsigned long *value); + +__inline__ void GET_FPU_REG(struct ppc_float_state *fsp, + unsigned long reg, + unsigned long *value) +{ + value[0] = ((unsigned long *) &fsp->fpregs[reg])[0]; + value[1] = ((unsigned long *) &fsp->fpregs[reg])[1]; +} + +__inline__ void SET_FPU_REG(struct ppc_float_state *fsp, + unsigned long reg, unsigned long *value) +{ + ((unsigned long *) &fsp->fpregs[reg])[0] = value[0]; + ((unsigned long *) &fsp->fpregs[reg])[1] = value[1]; +} + + +/* + * Macros to load and set registers according to + * a given cast type. + */ + +#define GET_REG(p, reg, value, cast) \ + { *((cast *) value) = *((cast *) (&p->r0+reg)); } +#define SET_REG(p, reg, value, cast) \ + { *((cast *) (&p->r0+reg)) = *((cast *) value); } + +/* + * Macros to help decode the DSISR. + */ + +#define DSISR_BITS_15_16(bits) ((bits>>15) & 0x3) +#define DSISR_BITS_17_21(bits) ((bits>>10) & 0x1f) +#define DSISR_BITS_REG(bits) ((bits>>5) & 0x1f) +#define DSISR_BITS_RA(bits) (bits & 0x1f) + + +struct ppc_align_instruction { + char *name; + boolean_t (*a_instruct)(unsigned long, + struct ppc_saved_state *, + struct ppc_float_state *, + unsigned long *, + unsigned long ); + int a_readbytes; + int a_writebytes; + boolean_t a_is_float; +} align_table00[] = { +_AENTRY(lwz, 4, 0), /* 00 0 0000 */ +_ANIL, /* 00 0 0001 */ +_AENTRY(stw, 0, 4), /* 00 0 0010 */ +_ANIL, /* 00 0 0011 */ +_AENTRY(lhz, 2, 0), /* 00 0 0100 */ +_AENTRY(lha, 2, 0), /* 00 0 0101 */ +_AENTRY(sth, 0, 2), /* 00 0 0110 */ +_AENTRY(lmw, 32*4,0), /* 00 0 0111 */ +_AFENTRY(lfs, 4, 0), /* 00 0 1000 */ +_AFENTRY(lfd, 8, 0), /* 00 0 1001 */ +_AFENTRY(stfs, 0, 4), /* 00 0 1010 */ +_AFENTRY(stfd, 0, 8), /* 00 0 1011 */ +_ANIL, /* 00 0 1100 ?*/ +_ANIL, /* 00 0 1101 - lwa */ +_ANIL, /* 00 0 1110 ?*/ +_ANIL, /* 00 0 1111 - std */ +_AENTRY(lwzu, 4, 0), /* 00 1 0000 */ +_ANIL, /* 00 1 0001 ?*/ +_AENTRY(stwu, 0, 4), /* 00 1 0010 */ +_ANIL, /* 00 1 0011 */ +_AENTRY(lhzu, 2, 0), /* 00 1 0100 */ +_AENTRY(lhau, 2, 0), /* 00 1 0101 */ +_AENTRY(sthu, 0, 2), /* 00 1 0110 */ +_AENTRY(stmw, 0, 0), /* 00 1 0111 */ +_AFENTRY(lfsu, 4, 0), /* 00 1 1000 */ +_AFENTRY(lfdu, 8, 0), /* 00 1 1001 - lfdu */ +_AFENTRY(stfsu, 0, 4), /* 00 1 1010 */ +_AFENTRY(stfdu, 0, 8), /* 00 1 1011 - stfdu */ +}; + +struct ppc_align_instruction align_table01[] = { +_ANIL, /* 01 0 0000 - ldx */ +_ANIL, /* 01 0 0001 ?*/ +_ANIL, /* 01 0 0010 - stdx */ +_ANIL, /* 01 0 0011 ?*/ +_ANIL, /* 01 0 0100 ?*/ +_ANIL, /* 01 0 0101 - lwax */ +_ANIL, /* 01 0 0110 ?*/ +_ANIL, /* 01 0 0111 ?*/ +_AENTRY(lswx,32, 0), /* 01 0 1000 - lswx */ +_AENTRY(lswi,32, 0), /* 01 0 1001 - lswi */ +_ANIL, /* 01 0 1010 - stswx */ +_ANIL, /* 01 0 1011 - stswi */ +_ANIL, /* 01 0 1100 ?*/ +_ANIL, /* 01 0 1101 ?*/ +_ANIL, /* 01 0 1110 ?*/ +_ANIL, /* 01 0 1111 ?*/ +_ANIL, /* 01 1 0000 - ldux */ +_ANIL, /* 01 1 0001 ?*/ +_ANIL, /* 01 1 0010 - stdux */ +_ANIL, /* 01 1 0011 ?*/ +_ANIL, /* 01 1 0100 ?*/ +_ANIL, /* 01 1 0101 - lwaux */ +}; + +struct ppc_align_instruction align_table10[] = { +_ANIL, /* 10 0 0000 ?*/ +_ANIL, /* 10 0 0001 ?*/ +_ANIL, /* 10 0 0010 - stwcx. */ +_ANIL, /* 10 0 0011 - stdcx.*/ +_ANIL, /* 10 0 0100 ?*/ +_ANIL, /* 10 0 0101 ?*/ +_ANIL, /* 10 0 0110 ?*/ +_ANIL, /* 10 0 0111 ?*/ +_AENTRY(lwbrx, 2, 0), /* 10 0 1000 */ +_ANIL, /* 10 0 1001 ?*/ +_AENTRY(stwbrx, 0, 4), /* 10 0 1010 */ +_ANIL, /* 10 0 1011 */ +_AENTRY(lhbrx, 2, 0), /* 10 0 1110 */ +_ANIL, /* 10 0 1101 ?*/ +_AENTRY(sthbrx, 0, 2), /* 10 0 1110 */ +_ANIL, /* 10 0 1111 ?*/ +_ANIL, /* 10 1 0000 ?*/ +_ANIL, /* 10 1 0001 ?*/ +_ANIL, /* 10 1 0010 ?*/ +_ANIL, /* 10 1 0011 ?*/ +_ANIL, /* 10 1 0100 - eciwx */ +_ANIL, /* 10 1 0101 ?*/ +_ANIL, /* 10 1 0110 - ecowx */ +_ANIL, /* 10 1 0111 ?*/ +_ANIL, /* 10 1 1000 ?*/ +_ANIL, /* 10 1 1001 ?*/ +_ANIL, /* 10 1 1010 ?*/ +_ANIL, /* 10 1 1011 ?*/ +_ANIL, /* 10 1 1100 ?*/ +_ANIL, /* 10 1 1101 ?*/ +_ANIL, /* 10 1 1110 ?*/ +_AENTRY(dcbz, 0, 0), /* 10 1 1111 */ +}; + +struct ppc_align_instruction align_table11[] = { +_AENTRY(lwzx, 4, 0), /* 11 0 0000 */ +_ANIL, /* 11 0 0001 ?*/ +_AENTRY(stwx, 0, 4), /* 11 0 0010 */ +_ANIL, /* 11 0 0011 */ +_AENTRY(lhzx, 2, 0), /* 11 0 0100 */ +_AENTRY(lhax, 2, 0), /* 11 0 0101 */ +_AENTRY(sthx, 0, 2), /* 11 0 0110 */ +_ANIL, /* 11 0 0111?*/ +_AFENTRY(lfsx, 4, 0), /* 11 0 1000 */ +_AFENTRY(lfdx, 8, 0), /* 11 0 1001 */ +_AFENTRY(stfsx, 0, 4), /* 11 0 1010 */ +_AFENTRY(stfdx, 0, 8), /* 11 0 1011 */ +_ANIL, /* 11 0 1100 ?*/ +_ANIL, /* 11 0 1101 ?*/ +_ANIL, /* 11 0 1110 ?*/ +_ANIL, /* 11 0 1111 - stfiwx */ +_AENTRY(lwzux, 4, 0), /* 11 1 0000 */ +_ANIL, /* 11 1 0001 ?*/ +_AENTRY(stwux, 0, 4), /* 11 1 0010 */ +_ANIL, /* 11 1 0011 */ +_AENTRY(lhzux, 4, 0), /* 11 1 0100 */ +_AENTRY(lhaux, 4, 0), /* 11 1 0101 */ +_AENTRY(sthux, 0, 4), /* 11 1 0110 */ +_ANIL, /* 11 1 0111 ?*/ +_AFENTRY(lfsux, 4, 0), /* 11 1 1000 */ +_AFENTRY(lfdux, 0, 8), /* 11 1 1001 */ +_AFENTRY(stfsux, 0, 4), /* 11 1 1010 */ +_AFENTRY(stfdux, 0, 8), /* 11 1 1011 */ +}; + + +struct ppc_align_instruction_table { + struct ppc_align_instruction *table; + int size; +} align_tables[4] = { + align_table00, sizeof(align_table00)/ + sizeof(struct ppc_align_instruction), + + align_table01, sizeof(align_table01)/ + sizeof(struct ppc_align_instruction), + + align_table10, sizeof(align_table10)/ + sizeof(struct ppc_align_instruction), + + align_table11, sizeof(align_table11)/ + sizeof(struct ppc_align_instruction) +}; + +extern int real_ncpus; /* Number of actual CPUs */ + +/* + * Alignment Exception Handler + * + * + * This handler is called when the chip attempts + * to execute an instruction which causes page + * boundaries to be crossed. Typically, this will + * happen on stfd* and lfd* instructions. + * (A request has been made for GNU C compiler + * NOT to make use of these instructions to + * load and store 8 bytes at a time.) + * + * This is a *SLOW* handler. There is room for vast + * improvement. However, it is expected that alignment + * exceptions will be very infrequent. + * + * Not all of the 64 instructions (as listed in + * PowerPC Microprocessor Family book under the Alignment + * Exception section) are handled yet. + * Only the most common ones which are expected to + * happen. + * + * -- Michael Burg, Apple Computer, Inc. 1996 + * + * TODO NMGS finish handler + */ + +boolean_t +alignment(unsigned long dsisr, unsigned long dar, + struct ppc_saved_state *ssp) +{ + struct ppc_align_instruction_table *table; + struct ppc_align_instruction *entry; + struct ppc_float_state *fpc; + unsigned long align_buffer[32]; + boolean_t success = FALSE; + thread_act_t act; + spl_t s; + int i; + +#if DEBUG + if (USER_MODE(ssp->srr1)) (void)hw_atomic_add(&alignment_exception_count_user, 1); + else (void)hw_atomic_add(&alignment_exception_count_kernel, 1); +#endif + + table = &align_tables[DSISR_BITS_15_16(dsisr)]; + + if (table == (void *) 0 + || table->size < DSISR_BITS_17_21(dsisr)) { +#if DEBUG + printf("EXCEPTION NOT HANDLED: Out of range.\n"); + printf("dsisr=%X, dar=%X\n",dsisr, dar); + printf("table=%X\n",DSISR_BITS_15_16(dsisr)); + printf("table->size=%X\n", table->size); + printf("entry=%X\n",DSISR_BITS_17_21(dsisr)); +#endif + goto out; + } + + entry = &table->table[DSISR_BITS_17_21(dsisr)]; + + if (entry->a_instruct == (void *) 0) { +#if DEBUG + printf("EXCEPTION NOT HANDLED: Inst out of table range.\n"); + printf("table=%X\n",DSISR_BITS_15_16(dsisr)); + printf("entry=%X\n",DSISR_BITS_17_21(dsisr)); +#endif + goto out; + } + + /* + * Check to see if the instruction is a + * floating point operation. Save off + * the FPU register set ... + */ + + if (entry->a_is_float) + fpu_save(); + + /* + * Pull in any bytes which are going to be + * read. + */ + + if (entry->a_readbytes) { + if (USER_MODE(ssp->srr1)) { + if (copyin((char *) dar, + (char *) align_buffer, + entry->a_readbytes)) { + return TRUE; + } + } else { + bcopy((char *) dar, + (char *) align_buffer, + entry->a_readbytes); + } + } + +#if 0 && DEBUG + printf("Alignment exception: %s %d,0x%x (r%d/w%d) (tmp %x/%x)\n", + entry->name, DSISR_BITS_REG(dsisr), + dar, entry->a_readbytes, entry->a_writebytes, + align_buffer[0], align_buffer[1]); + printf(" pc=(0x%08X), msr=(0x%X)",ssp->srr0, ssp->srr1); +#endif + + act = current_act(); /* Get the current activation */ + + success = entry->a_instruct(dsisr, + ssp, + find_user_fpu(act), /* Find this user's FPU state. NULL if none */ + /* NULL should never happen */ + align_buffer, + dar); + + if (success) { + if (entry->a_writebytes) { + if (USER_MODE(ssp->srr1)) { + if (copyout((char *) align_buffer, + (char *) dar, + entry->a_writebytes)) { + return TRUE; + } + } else { + bcopy((char *) align_buffer, + (char *) dar, + entry->a_writebytes); + } + } + else { + for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ + (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */ + } + + if (USER_MODE(ssp->srr1)) { + if (copyout((char *) align_buffer, + (char *) dar, + entry->a_writebytes)) { + return TRUE; + } + } else { + bcopy((char *) align_buffer, + (char *) dar, + entry->a_writebytes); + } + } + + ssp->srr0 += 4; /* Skip the instruction .. */ + } + + return !success; + +out: +#if 0 && DEBUG + printf("ALIGNMENT EXCEPTION: (dsisr 0x%x) table %d 0x%x\n", + dsisr, DSISR_BITS_15_16(dsisr), DSISR_BITS_17_21(dsisr)); +#endif + + return TRUE; +} + +_AINST(lwz) +{ + SET_REG(ssp, DSISR_BITS_REG(dsisr), align_buffer, unsigned long); + + return TRUE; +} + +_AINST(stw) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), align_buffer, unsigned long); + + return TRUE; +} + +_AINST(lhz) +{ + unsigned long value = *((unsigned short *) align_buffer); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned long); + + return TRUE; +} + +_AINST(lha) +{ + long value = *((short *) align_buffer); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned long); + + return TRUE; +} + +_AINST(sth) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), align_buffer, unsigned short); + + return TRUE; +} + +_AINST(lmw) +{ + int i; + + for (i = 0; i < (32-DSISR_BITS_REG(dsisr)); i++) + { + SET_REG(ssp, DSISR_BITS_REG(dsisr)+i, &align_buffer[i], unsigned long); + } + return TRUE; +} + +struct fpsp { + unsigned long s :1; /* Sign bit */ + unsigned long exp :8; /* exponent + bias */ + unsigned long fraction:23; /* fraction */ +}; +typedef struct fpsp fpsp_t, *fpspPtr; + +struct fpdp { + unsigned long s :1; /* Sign bit */ + unsigned long exp :11; /* exponent + bias */ + unsigned long fraction:20; /* fraction */ + unsigned long fraction1; /* fraction */ +}; +typedef struct fpdp fpdp_t, *fpdpPtr; + + +_AINST(lfs) +{ + unsigned long lalign_buf[2]; + + + lfs (align_buffer, lalign_buf); + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + return TRUE; +} + +_AINST(lfd) +{ + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), align_buffer); + return TRUE; +} + +_AINST(stfs) +{ + unsigned long lalign_buf[2]; + + + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + stfs(lalign_buf, align_buffer); + return TRUE; +} + +_AINST(stfd) +{ + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), align_buffer); + return TRUE; +} + +_AINST(lwzu) +{ + SET_REG(ssp, DSISR_BITS_REG(dsisr), align_buffer, unsigned long) + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + +_AINST(stwu) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), align_buffer, unsigned long) + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + + +_AINST(lhzu) +{ + SET_REG(ssp, DSISR_BITS_REG(dsisr), align_buffer, unsigned short) + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + +_AINST(lhau) +{ + unsigned long value = *((short *) align_buffer); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned long); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(sthu) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), align_buffer, unsigned short) + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + +_AINST(stmw) +{ + int i, rS = DSISR_BITS_REG(dsisr); + int numRegs = 32 - rS; + int numBytes = numRegs * 4; + int retval; + + + for (i = 0; i < numRegs; i++) + { +#if 0 + printf(" align_buffer[%d] == 0x%x\n",i,align_buffer[i]); +#endif + GET_REG(ssp, rS+i, &align_buffer[i], unsigned long); +#if 0 + printf(" now align_buffer[%d] == 0x%x\n",i,align_buffer[i]); +#endif + } + if (USER_MODE(ssp->srr1)) { + if ((retval=copyout((char *)align_buffer,(char *)dar,numBytes)) != 0) { + return FALSE; + } +#if 0 + printf(" copyout(%X, %X, %X) succeeded\n",align_buffer,dar,numBytes); +#endif + } + else { + bcopy((char *) align_buffer, (char *) dar, numBytes); + } + return TRUE; +} + +_AINST(lfsu) +{ + unsigned long lalign_buf[2]; + + + lfs (align_buffer, lalign_buf); + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + +_AINST(lfdu) +{ + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), align_buffer); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(stfsu) +{ + unsigned long lalign_buf[2]; + + + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + stfs(lalign_buf, align_buffer); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + + +_AINST(stfdu) +{ + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), align_buffer); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(lswx) +{ + int i, nb, nr, inst, zero = 0; + + + /* check for invalid form of instruction */ + if (DSISR_BITS_RA(dsisr) >= DSISR_BITS_REG(dsisr) ) + return FALSE; + + if (USER_MODE(ssp->srr1)) { + if (copyin((char *) ssp->srr0, (char *) &inst, 4 )) { + return FALSE; + } + } else { + bcopy((char *) ssp->srr0, (char *) &inst, 4 ); + } + + nb = (inst >> 11) & 0x1F; /* get the number of bytes in the instr */ + nr = (nb + sizeof(long)-1) / sizeof(long);/* get the number of regs to copy */ + + if ((nr + DSISR_BITS_REG(dsisr)) > 31) + return FALSE; /* not supported yet */ + + for (i = 0; i < nr; i++) + { + SET_REG(ssp, DSISR_BITS_REG(dsisr)+i, &zero, unsigned long); + } + /* copy the string into the save state */ + bcopy((char *) align_buffer, (char *) ssp->r0+DSISR_BITS_REG(dsisr), nb ); + return TRUE; +} + +_AINST(lswi) +{ + int i, nb, nr, inst, zero = 0; + + + /* check for invalid form of instruction */ + if (DSISR_BITS_RA(dsisr) >= DSISR_BITS_REG(dsisr) ) + return FALSE; + + if (USER_MODE(ssp->srr1)) { + if (copyin((char *) ssp->srr0, (char *) &inst, 4 )) { + return FALSE; + } + } else { + bcopy((char *) ssp->srr0, (char *) &inst, 4 ); + } + + nb = (inst >> 11) & 0x1F; /* get the number of bytes in the instr */ + nr = (nb + sizeof(long)-1) / sizeof(long);/* get the number of regs to copy */ + + if ((nr + DSISR_BITS_REG(dsisr)) > 31) + return FALSE; /* not supported yet */ + + for (i = 0; i < nr; i++) + { + SET_REG(ssp, DSISR_BITS_REG(dsisr)+i, &zero, unsigned long); + } + /* copy the string into the save state */ + bcopy((char *) align_buffer, (char *) ssp->r0+DSISR_BITS_REG(dsisr), nb ); + return TRUE; +} + +_AINST(stswx) +{ + return FALSE; +} + +_AINST(stswi) +{ + return FALSE; +} + + + + + + + +_AINST(stwcx) +{ + return FALSE; +} + +_AINST(stdcx) +{ + return FALSE; +} + +_AINST(lwbrx) +{ + unsigned long new_value; + + __asm__ volatile("lwbrx %0,0,%1" : : "b" (new_value), + "b" (&align_buffer[0])); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &new_value, unsigned long); + + return TRUE; +} + +_AINST(stwbrx) +{ + unsigned long value; + + GET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned long); + __asm__ volatile("stwbrx %0,0,%1" : : "b" (value), "b" (&align_buffer[0])); + + return TRUE; +} + +_AINST(lhbrx) +{ + unsigned short value; + + __asm__ volatile("lhbrx %0,0,%1" : : "b" (value), "b" (&align_buffer[0])); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned short); + + return TRUE; +} + +_AINST(sthbrx) +{ + unsigned short value; + + GET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned short); + __asm__ volatile("sthbrx %0,0,%1" : : "b" (value), "b" (&align_buffer[0])); + + return TRUE; +} + +_AINST(eciwx) +{ + return FALSE; +} + +_AINST(ecowx) +{ + return FALSE; +} + +_AINST(dcbz) +{ + long *alignedDAR = (long *)((long)dar & ~(CACHE_LINE_SIZE-1)); + + + if (USER_MODE(ssp->srr1)) { + + align_buffer[0] = 0; + align_buffer[1] = 0; + align_buffer[2] = 0; + align_buffer[3] = 0; + align_buffer[4] = 0; + align_buffer[5] = 0; + align_buffer[6] = 0; + align_buffer[7] = 0; + + if (copyout((char *)align_buffer,(char *)alignedDAR,CACHE_LINE_SIZE) != 0) + return FALSE; + } else { + /* Cannot use bcopy here just in case it caused the exception */ + alignedDAR[0] = 0; + alignedDAR[1] = 0; + alignedDAR[2] = 0; + alignedDAR[3] = 0; + alignedDAR[4] = 0; + alignedDAR[5] = 0; + alignedDAR[6] = 0; + alignedDAR[7] = 0; + } + return TRUE; +} + + + + + + + +_AINST(lwzx) +{ + SET_REG(ssp, DSISR_BITS_REG(dsisr), &align_buffer[0], unsigned long); + + return TRUE; +} + +_AINST(stwx) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), &align_buffer[0], unsigned long); + + return TRUE; +} + +_AINST(lhzx) +{ + SET_REG(ssp, DSISR_BITS_REG(dsisr), &align_buffer[0], unsigned short); + + return TRUE; +} + +_AINST(lhax) +{ + unsigned long value = *((short *) &align_buffer[0]); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned long); + + return TRUE; +} + +_AINST(sthx) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), &align_buffer[0], unsigned short); + + return TRUE; +} + +_AINST(lfsx) +{ + long lalign_buf[2]; + + + lfs (align_buffer, lalign_buf); + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + return TRUE; +} + +_AINST(lfdx) +{ + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), align_buffer); + + return TRUE; +} + +_AINST(stfsx) +{ + long lalign_buf[2]; + + + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + stfs(lalign_buf, align_buffer); + return TRUE; +} + +_AINST(stfdx) +{ + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), align_buffer); + + return TRUE; +} + +_AINST(lwzux) +{ + SET_REG(ssp, DSISR_BITS_REG(dsisr), &align_buffer[0], unsigned long); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(stwux) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), &align_buffer[0], unsigned long); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(lhzux) +{ + unsigned long value = *((unsigned short *)&align_buffer[0]); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned long); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(lhaux) +{ + long value = *((short *) &align_buffer[0]); + + SET_REG(ssp, DSISR_BITS_REG(dsisr), &value, unsigned long); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(sthux) +{ + GET_REG(ssp, DSISR_BITS_REG(dsisr), &align_buffer[0], unsigned short); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + +_AINST(lfsux) +{ + long lalign_buf[2]; + + + lfs (align_buffer, lalign_buf); + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + +_AINST(lfdux) +{ + SET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), &align_buffer[0]); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} + + +_AINST(stfsux) +{ + long lalign_buf[2]; + + + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), lalign_buf); + stfs(lalign_buf, align_buffer); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + return TRUE; +} + +_AINST(stfdux) +{ + GET_FPU_REG(fsp, DSISR_BITS_REG(dsisr), &align_buffer[0]); + SET_REG(ssp, DSISR_BITS_RA(dsisr), &dar, unsigned long); + + return TRUE; +} diff --git a/osfmk/ppc/asm.h b/osfmk/ppc/asm.h new file mode 100644 index 000000000..496b31c05 --- /dev/null +++ b/osfmk/ppc/asm.h @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#ifndef _PPC_ASM_H_ +#define _PPC_ASM_H_ + +#define __ASMNL__ @ +#define STRINGD .ascii + +#ifdef ASSEMBLER + + +#define br0 0 + +#define ARG0 r3 +#define ARG1 r4 +#define ARG2 r5 +#define ARG3 r6 +#define ARG4 r7 +#define ARG5 r8 +#define ARG6 r9 +#define ARG7 r10 + +#define tmp0 r0 /* Temporary GPR remapping (603e specific) */ +#define tmp1 r1 +#define tmp2 r2 +#define tmp3 r3 + +/* SPR registers */ + +#define mq 0 /* MQ register for 601 emulation */ +#define rtcu 4 /* RTCU - upper word of RTC for 601 emulation */ +#define rtcl 5 /* RTCL - lower word of RTC for 601 emulation */ +#define dsisr 18 +#define ppcDAR 19 +#define ppcdar 19 +#define dar 19 +#define SDR1 25 +#define sdr1 25 +#define srr0 26 +#define srr1 27 +#define vrsave 256 /* Vector Register save */ +#define sprg0 272 +#define sprg1 273 +#define sprg2 274 +#define sprg3 275 +#define pvr 287 + +#define IBAT0U 528 +#define IBAT0L 529 +#define IBAT1U 530 +#define IBAT1L 531 +#define IBAT2U 532 +#define IBAT2L 533 +#define IBAT3U 534 +#define IBAT3L 535 +#define ibat0u 528 +#define ibat0l 529 +#define ibat1u 530 +#define ibat1l 531 +#define ibat2u 532 +#define ibat2l 533 +#define ibat3u 534 +#define ibat3l 535 + +#define DBAT0U 536 +#define DBAT0L 537 +#define DBAT1U 538 +#define DBAT1L 539 +#define DBAT2U 540 +#define DBAT2L 541 +#define DBAT3U 542 +#define DBAT3L 543 +#define dbat0u 536 +#define dbat0l 537 +#define dbat1u 538 +#define dbat1l 539 +#define dbat2u 540 +#define dbat2l 541 +#define dbat3u 542 +#define dbat3l 543 + +#define ummcr2 928 /* Performance monitor control */ +#define ubamr 935 /* Performance monitor mask */ +#define ummcr0 936 /* Performance monitor control */ +#define upmc1 937 /* Performance monitor counter */ +#define upmc2 938 /* Performance monitor counter */ +#define usia 939 /* User sampled instruction address */ +#define ummcr1 940 /* Performance monitor control */ +#define upmc3 941 /* Performance monitor counter */ +#define upmc4 942 /* Performance monitor counter */ +#define usda 943 /* User sampled data address */ +#define mmcr2 944 /* Performance monitor control */ +#define bamr 951 /* Performance monitor mask */ +#define mmcr0 952 +#define pmc1 953 +#define pmc2 954 +#define sia 955 +#define mmcr1 956 +#define pmc3 957 +#define pmc4 958 +#define sda 959 /* Sampled data address */ +#define dmiss 976 /* ea that missed */ +#define dcmp 977 /* compare value for the va that missed */ +#define hash1 978 /* pointer to first hash pteg */ +#define hash2 979 /* pointer to second hash pteg */ +#define imiss 980 /* ea that missed */ +#define tlbmiss 980 /* ea that missed */ +#define icmp 981 /* compare value for the va that missed */ +#define ptehi 981 /* compare value for the va that missed */ +#define rpa 982 /* required physical address register */ +#define ptelo 982 /* required physical address register */ +#define l3pdet 984 /* l3pdet */ + +#define HID0 1008 /* Checkstop and misc enables */ +#define hid0 1008 /* Checkstop and misc enables */ +#define HID1 1009 /* Clock configuration */ +#define hid1 1009 /* Clock configuration */ +#define iabr 1010 /* Instruction address breakpoint register */ +#define ictrl 1011 /* Instruction Cache Control */ +#define dabr 1013 /* Data address breakpoint register */ +#define msscr0 1014 /* Memory subsystem control */ +#define msscr1 1015 /* Memory subsystem debug */ +#define msssr0 1015 /* Memory Subsystem Status */ +#define ldstcr 1016 /* Load/Store Status/Control */ +#define l2cr2 1016 /* L2 Cache control 2 */ +#define l2cr 1017 /* L2 Cache control */ +#define l3cr 1018 /* L3 Cache control */ +#define ictc 1019 /* I-cache throttling control */ +#define thrm1 1020 /* Thermal management 1 */ +#define thrm2 1021 /* Thermal management 2 */ +#define thrm3 1022 /* Thermal management 3 */ +#define pir 1023 /* Processor ID Register */ + +; hid0 bits +#define emcp 0 +#define emcpm 0x80000000 +#define dbp 1 +#define dbpm 0x40000000 +#define eba 2 +#define ebam 0x20000000 +#define ebd 3 +#define ebdm 0x10000000 +#define sbclk 4 +#define sbclkm 0x08000000 +#define eclk 6 +#define eclkm 0x02000000 +#define par 7 +#define parm 0x01000000 +#define sten 7 +#define stenm 0x01000000 +#define doze 8 +#define dozem 0x00800000 +#define nap 9 +#define napm 0x00400000 +#define sleep 10 +#define sleepm 0x00200000 +#define dpm 11 +#define dpmm 0x00100000 +#define riseg 12 +#define risegm 0x00080000 +#define eiec 13 +#define eiecm 0x00040000 +#define nhr 15 +#define nhrm 0x00010000 +#define ice 16 +#define icem 0x00008000 +#define dce 17 +#define dcem 0x00004000 +#define ilock 18 +#define ilockm 0x00002000 +#define dlock 19 +#define dlockm 0x00001000 +#define icfi 20 +#define icfim 0x00000800 +#define dcfi 21 +#define dcfim 0x00000400 +#define spd 22 +#define spdm 0x00000200 +#define sge 24 +#define sgem 0x00000080 +#define dcfa 25 +#define dcfam 0x00000040 +#define btic 26 +#define bticm 0x00000020 +#define lrstk 27 +#define lrstkm 0x00000010 +#define abe 28 +#define abem 0x00000008 +#define fold 28 +#define foldm 0x00000008 +#define bht 29 +#define bhtm 0x00000004 +#define nopdst 30 +#define nopdstm 0x00000002 +#define nopti 31 +#define noptim 0x00000001 + +; msscr0 bits +#define shden 0 +#define shdenm 0x80000000 +#define shden3 1 +#define shdenm3 0x40000000 +#define l1intvs 2 +#define l1intve 4 +#define l1intvb 0x38000000 +#define l2intvs 5 +#define l2intve 7 +#define l2intvb 0x07000000 +#define dl1hwf 8 +#define dl1hwfm 0x00800000 +#define dbsiz 9 +#define dbsizm 0x00400000 +#define emode 10 +#define emodem 0x00200000 +#define abgd 11 +#define abgdm 0x00100000 +#define tfsts 24 +#define tfste 25 +#define tfstm 0x000000C0 + +; msscr1 bits +#define cqd 15 +#define cqdm 0x00010000 +#define csqs 1 +#define csqe 2 +#define csqm 0x60000000 + +; msssr1 bits - 7450 +#define vgL2PARA 0 +#define vgL3PARA 1 +#define vgL2COQEL 2 +#define vgL3COQEL 3 +#define vgL2CTR 4 +#define vgL3CTR 5 +#define vgL2COQR 6 +#define vgL3COQR 7 +#define vgLMQ 8 +#define vgSMC 9 +#define vgSNP 10 +#define vgBIU 11 +#define vgSMCE 12 +#define vgL2TAG 13 +#define vgL2DAT 14 +#define vgL3TAG 15 +#define vgL3DAT 16 +#define vgAPE 17 +#define vgDPE 18 +#define vgTEA 19 + +; srr1 bits +#define icmck 1 +#define icmckm 0x40000000 +#define dcmck 2 +#define dcmckm 0x20000000 +#define l2mck 3 +#define l2mckm 0x10000000 +#define tlbmck 4 +#define tlbmckm 0x08000000 +#define brmck 5 +#define brmckm 0x04000000 +#define othmck 10 +#define othmckm 0x00200000 +#define l2dpmck 11 +#define l2dpmckm 0x00100000 +#define mcpmck 12 +#define mcpmckm 0x00080000 +#define teamck 13 +#define teamckm 0x00040000 +#define dpmck 14 +#define dpmckm 0x00020000 +#define apmck 15 +#define apmckm 0x00010000 + +; L2 cache control +#define l2e 0 +#define l2em 0x80000000 +#define l2pe 1 +#define l2pem 0x40000000 +#define l2siz 2 +#define l2sizf 3 +#define l2sizm 0x30000000 +#define l2clk 4 +#define l2clkf 6 +#define l2clkm 0x0E000000 +#define l2ram 7 +#define l2ramf 8 +#define l2ramm 0x01800000 +#define l2do 9 +#define l2dom 0x00400000 +#define l2i 10 +#define l2im 0x00200000 +#define l2ctl 11 +#define l2ctlm 0x00100000 +#define l2ionly 11 +#define l2ionlym 0x00100000 +#define l2wt 12 +#define l2wtm 0x00080000 +#define l2ts 13 +#define l2tsm 0x00040000 +#define l2oh 14 +#define l2ohf 15 +#define l2ohm 0x00030000 +#define l2donly 15 +#define l2donlym 0x00010000 +#define l2sl 16 +#define l2slm 0x00008000 +#define l2df 17 +#define l2dfm 0x00004000 +#define l2byp 18 +#define l2bypm 0x00002000 +#define l2fa 19 +#define l2fam 0x00001000 +#define l2hwf 20 +#define l2hwfm 0x00000800 +#define l2io 21 +#define l2iom 0x00000400 +#define l2clkstp 22 +#define l2clkstpm 0x00000200 +#define l2dro 23 +#define l2drom 0x00000100 +#define l2ctr 24 +#define l2ctrf 30 +#define l2ctrm 0x000000FE +#define l2ip 31 +#define l2ipm 0x00000001 + +; L3 cache control +#define l3e 0 +#define l3em 0x80000000 +#define l3pe 1 +#define l3pem 0x40000000 +#define l3siz 3 +#define l3sizm 0x10000000 +#define l3clken 4 +#define l3clkenm 0x08000000 +#define l3dx 5 +#define l3dxm 0x04000000 +#define l3clk 6 +#define l3clkf 8 +#define l3clkm 0x03800000 +#define l3io 9 +#define l3iom 0x00400000 +#define l3spo 13 +#define l3spom 0x00040000 +#define l3cksp 14 +#define l3ckspf 15 +#define l3ckspm 0x00030000 +#define l3psp 16 +#define l3pspf 18 +#define l3pspm 0x0000E000 +#define l3rep 19 +#define l3repm 0x00001000 +#define l3hwf 20 +#define l3hwfm 0x00000800 +#define l3i 21 +#define l3im 0x00000400 +#define l3rt 22 +#define l3rtf 23 +#define l3rtm 0x00000300 +#define l3dro 23 +#define l3drom 0x00000100 +#define l3cya 24 +#define l3cyam 0x00000080 +#define l3donly 25 +#define l3donlym 0x00000040 +#define l3dmem 29 +#define l3dmemm 0x00000004 +#define l3dmsiz 31 +#define l3dmsizm 0x00000001 + +#define thrmtin 0 +#define thrmtinm 0x80000000 +#define thrmtiv 1 +#define thrmtivm 0x40000000 +#define thrmthrs 2 +#define thrmthre 8 +#define thrmthrm 0x3F800000 +#define thrmtid 29 +#define thrmtidm 0x00000004 +#define thrmtie 30 +#define thrmtiem 0x00000002 +#define thrmv 31 +#define thrmvm 0x00000001 + +#define thrmsitvs 15 +#define thrmsitve 30 +#define thrmsitvm 0x0001FFFE +#define thrme 31 +#define thrmem 0x00000001 + +#define ictcfib 23 +#define ictcfie 30 +#define ictcfim 0x000001FE +#define ictce 31 +#define ictcem 0x00000001 + +#define cr0_lt 0 +#define cr0_gt 1 +#define cr0_eq 2 +#define cr0_so 3 +#define cr0_un 3 +#define cr1_lt 4 +#define cr1_gt 5 +#define cr1_eq 6 +#define cr1_so 7 +#define cr1_un 7 +#define cr2_lt 8 +#define cr2_gt 9 +#define cr2_eq 10 +#define cr2_so 11 +#define cr2_un 11 +#define cr3_lt 12 +#define cr3_gt 13 +#define cr3_eq 14 +#define cr3_so 15 +#define cr3_un 15 +#define cr4_lt 16 +#define cr4_gt 17 +#define cr4_eq 18 +#define cr4_so 19 +#define cr4_un 19 +#define cr5_lt 20 +#define cr5_gt 21 +#define cr5_eq 22 +#define cr5_so 23 +#define cr5_un 23 +#define cr6_lt 24 +#define cr6_gt 25 +#define cr6_eq 26 +#define cr6_so 27 +#define cr6_un 27 +#define cr7_lt 28 +#define cr7_gt 29 +#define cr7_eq 30 +#define cr7_so 31 +#define cr7_un 31 + +/* + * Macros to access high and low word values of an address + */ + +#define HIGH_CADDR(x) ha16(x) +#define HIGH_ADDR(x) hi16(x) +#define LOW_ADDR(x) lo16(x) + +#endif /* ASSEMBLER */ + +/* Tags are placed before Immediately Following Code (IFC) for the debugger + * to be able to deduce where to find various registers when backtracing + * + * We only define the values as we use them, see SVR4 ABI PowerPc Supplement + * for more details (defined in ELF spec). + */ + +#define TAG_NO_FRAME_USED 0x00000000 + +/* (should use genassym to get these offsets) */ + +#define FM_BACKPTR 0 +#define FM_CR_SAVE 4 +#define FM_LR_SAVE 8 /* MacOSX is NOT following the ABI at the moment.. */ +#define FM_SIZE 64 /* minimum frame contents, backptr and LR save. Make sure it is quadaligned */ +#define FM_ARG0 56 +#define FM_ALIGN(l) ((l+15)&-16) +#define PK_SYSCALL_BEGIN 0x7000 + + +/* redzone is the area under the stack pointer which must be preserved + * when taking a trap, interrupt etc. + */ +#define FM_REDZONE 224 /* is ((32-14+1)*4) */ + +#define COPYIN_ARG0_OFFSET FM_ARG0 + +#ifdef MACH_KERNEL +#include +#else /* MACH_KERNEL */ +#define MACH_KDB 0 +#endif /* MACH_KERNEL */ + +#define BREAKPOINT_TRAP tw 4,r4,r4 + +/* There is another definition of ALIGN for .c sources */ +#ifndef __LANGUAGE_ASSEMBLY +#define ALIGN 4 +#endif /* __LANGUAGE_ASSEMBLY */ + +#ifndef FALIGN +#define FALIGN 4 /* Align functions on words for now. Cachelines is better */ +#endif + +#define LB(x,n) n +#if __STDC__ +#define LCL(x) L ## x +#define EXT(x) _ ## x +#define LEXT(x) _ ## x ## : +#define LBc(x,n) n ## : +#define LBb(x,n) n ## b +#define LBf(x,n) n ## f +#else /* __STDC__ */ +#define LCL(x) L/**/x +#define EXT(x) _/**/x +#define LEXT(x) _/**/x/**/: +#define LBc(x,n) n/**/: +#define LBb(x,n) n/**/b +#define LBf(x,n) n/**/f +#endif /* __STDC__ */ + +#define String .asciz +#define Value .word +#define Times(a,b) (a*b) +#define Divide(a,b) (a/b) + +#define data16 .byte 0x66 +#define addr16 .byte 0x67 + +#if !GPROF +#define MCOUNT +#endif /* GPROF */ + +#define ELF_FUNC(x) +#define ELF_DATA(x) +#define ELF_SIZE(x,s) + +#define Entry(x,tag) .text@.align FALIGN@ .globl EXT(x)@ LEXT(x) +#define ENTRY(x,tag) Entry(x,tag)@MCOUNT +#define ENTRY2(x,y,tag) .text@ .align FALIGN@ .globl EXT(x)@ .globl EXT(y)@ \ + LEXT(x)@ LEXT(y) @\ + MCOUNT +#if __STDC__ +#define ASENTRY(x) .globl x @ .align FALIGN; x ## @ MCOUNT +#else +#define ASENTRY(x) .globl x @ .align FALIGN; x @ MCOUNT +#endif /* __STDC__ */ +#define DATA(x) .globl EXT(x) @ .align ALIGN @ LEXT(x) + + +#define End(x) ELF_SIZE(x,.-x) +#define END(x) End(EXT(x)) +#define ENDDATA(x) END(x) +#define Enddata(x) End(x) + +/* These defines are here for .c files that wish to reference global symbols + * within __asm__ statements. + */ +#define CC_SYM_PREFIX "_" + +#endif /* _PPC_ASM_H_ */ diff --git a/osfmk/ppc/ast.h b/osfmk/ppc/ast.h new file mode 100644 index 000000000..fe64a1f94 --- /dev/null +++ b/osfmk/ppc/ast.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Machine-dependent AST file for ppc. + */ + +#ifndef _PPC_AST_H_ +#define _PPC_AST_H_ + +/* + * Empty file - use the machine-independent versions. + */ + +#endif /* _PPC_AST_H_ */ diff --git a/osfmk/ppc/ast_types.h b/osfmk/ppc/ast_types.h new file mode 100644 index 000000000..4db875d69 --- /dev/null +++ b/osfmk/ppc/ast_types.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_AST_TYPES_H_ +#define _PPC_AST_TYPES_H_ + +/* + * Data type for remote ast_check() invocation support. Currently + * not implemented. Do this first to avoid include problems. + */ +typedef int ast_check_t; + +#endif /* _PPC_AST_TYPES_H_ */ diff --git a/osfmk/ppc/atomic_switch.h b/osfmk/ppc/atomic_switch.h new file mode 100644 index 000000000..7b2eed2ff --- /dev/null +++ b/osfmk/ppc/atomic_switch.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +typedef unsigned char UInt8; +typedef unsigned short UInt16; +typedef unsigned long UInt32; + + +/* Support firmware CallPseudoKernel architectural extension */ + +struct CallPseudoKernelDescriptor { + UInt32 pc; + UInt32 gpr0; + UInt32 intControlAddr; + UInt32 newState; + UInt32 intStateMask; + UInt32 intCR2Mask; + UInt32 intCR2Shift; + UInt32 sysContextState; +}; +typedef struct CallPseudoKernelDescriptor CallPseudoKernelDescriptor; +typedef CallPseudoKernelDescriptor * CallPseudoKernelDescriptorPtr; +typedef CallPseudoKernelDescriptor CPKD_t; + + + +/* Support firmware ExitPseudoKernel architectural extension */ + +struct ExitPseudoKernelDescriptor { + UInt32 pc; + UInt32 sp; + UInt32 gpr0; + UInt32 gpr3; + UInt32 cr; + UInt32 intControlAddr; + UInt32 newState; + UInt32 intStateMask; + UInt32 intCR2Mask; + UInt32 intCR2Shift; + UInt32 sysContextState; + UInt32 intPendingMask; + UInt32 intPendingPC; + UInt32 msrUpdate; +}; +typedef struct ExitPseudoKernelDescriptor ExitPseudoKernelDescriptor; +typedef ExitPseudoKernelDescriptor * ExitPseudoKernelDescriptorPtr; +typedef ExitPseudoKernelDescriptor EPKD_t; + + +struct EmulatorDescriptor { + UInt8 regMap[16]; // table mapping 68K D0..D7, A0..A7 register to PowerPC registers + UInt32 bootstrapVersionOffset; // offset within emulator data page of the bootstrap version string + UInt32 ecbOffset; // offset within emulator data page of the ECB + UInt32 intModeLevelOffset; // offset within emulator data page of the interrupt mode level + UInt32 entryAddress; // offset within text of the emulator's main entry point + UInt32 kcallTrapTableOffset; // offset within text of the nanokernel(!) call trap table + UInt32 postIntMask; // post interrupt mask + UInt32 clearIntMask; // clear interrupt mask + UInt32 testIntMask; // test interrupt mask + UInt32 codeSize; // total size of emulator object code (interpretive + DR) + UInt32 hashTableSize; // size of DR emulator's hash table + UInt32 drCodeStartOffset; // offset within text of the DR emulator's object code + UInt32 drInitOffset; // offset within DR emulator of its initialization entry point + UInt32 drAllocateCache; // offset within DR emulator of its cache allocation entry point + UInt32 dispatchTableOffset; // offset within text of the encoded instruction dispatch table +}; +typedef struct EmulatorDescriptor EmulatorDescriptor; +typedef EmulatorDescriptor *EmulatorDescriptorPtr; + + +enum { + // The following define the UInt32 gInterruptState + kInUninitialized = 0, // State not yet initialized + kInPseudoKernel = 1, // Currently executing within pseudo kernel + kInSystemContext = 2, // Currently executing within the system (emulator) context + kInAlternateContext = 3, // Currently executing within an alternate (native) context + kInExceptionHandler = 4, // Currently executing an exception handler + kOutsideMain = 5, // Currently executing outside of the main thread + kNotifyPending = 6, // Pending Notify Interrupt + + kInterruptStateMask = 0x000F0000, // Mask to extract interrupt state from gInterruptState + kInterruptStateShift = 16, // Shift count to align interrupt state + + kBackupCR2Mask = 0x0000000F, // Mask to extract backup CR2 from gInterruptState + kCR2ToBackupShift = 31-11, // Shift count to align CR2 into the backup CR2 of gInterruptState + // (and vice versa) + kCR2Mask = 0x00F00000 // Mask to extract CR2 from the PPC CR register +}; + + +enum { + kcReturnFromException = 0, + kcRunAlternateContext = 1, + kcResetSystem = 2, + kcVMDispatch = 3, + kcPrioritizeInterrupts = 4, + kcPowerDispatch = 5, + kcRTASDispatch = 6, + kcGetAdapterProcPtrsPPC = 12, + kcGetAdapterProcPtrs = 13, + kcCallAdapterProc = 14, + kcSystemCrash = 15 +}; + +#define bbMaxCode 16 + diff --git a/osfmk/ppc/atomic_switch.s b/osfmk/ppc/atomic_switch.s new file mode 100644 index 000000000..4133b1220 --- /dev/null +++ b/osfmk/ppc/atomic_switch.s @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +/* + * Classic atomic switch and fast trap code + * Written by: Mark Gorlinsky + */ + +/* +** +** Blue Box Fast Trap entry +** +** +** The registers at entry are as hw_exceptions left them. Which means +** that the Blue Box data area is pointed to be R26. +** +** We exit here through the fast path exit point in hw_exceptions. That means that +** upon exit, R4 must not change. It is the savearea with the current user context +** to restore. +** +** Input registers are: +** r0 = Syscall number +** r4 = Current context savearea (do not modify) +** r13 = THREAD_TOP_ACT pointer +** r26 = base of ACT_MACH_BDA in kernel address space +** -- for Traps -- +** r24 = Index into TWI table (x4) +** +** +*/ + + +ENTRY(atomic_switch_syscall, TAG_NO_FRAME_USED) + +/* + * Here's where we check for special Blue Box fast traps + * If we don't recognize the syscall, we'll go back to regular processing + */ + cmpwi r0,-1 ; Is it NKIsPreemptiveTask + beq- isBBpretask ; It is a fast syscall... + cmpwi r0,-2 ; Is it kcNKIsPreemptiveTaskEnv + bne- nofastSC ; Not a fast syscall... + + ; kcNKIsPreemptiveTaskEnv return task.taskEnv in r0 + + lwz r23, ACT_MACT_BTE(r13) ; Get the taskEnv + stw r23, saver0(r4) ; Return the taskEnv in R0 + +isBBpretask: ; answer the question is this a preemptive task ? + rlwinm r6,r26,0,0,19 ; Start of page is bttd + lwz r1,BTTD_INTERRUPT_VECTOR(r6) ; Get interrupt vector + lwz r6, savecr(r4) ; Get the current CCRs + cmpwi r1,0 ; Is this a preemptive thread ? + rlwinm r6,r6,0,cr0_eq+1,cr0_eq-1 ; Clear CR0 EQ bit + bne notpretask ; Only the cooperative thread has an interrupt vector + oris r6,r6,(0x8000 >> cr0_eq) ; Set CR0[eq] if task is preemptive. +notpretask: + stw r6, savecr(r4) ; Save the new current CCRs + + b EXT(fastexit) ; Take the fast path exit... + +nofastSC: + li r5, BTTD_SYSCALL_VECTOR + b .L_CallPseudoKernel + +ENTRY(atomic_switch_trap, TAG_NO_FRAME_USED) + +/* +** functions 0-15 -> Call PseudoKernel +** 16 -> Exit PseudoKernel +*/ + + cmplwi cr7,r24,BB_RFI_TRAP ; Is this an RFI? + beq cr7,.L_ExitPseudoKernel ; Yes... + + li r5, BTTD_TRAP_VECTOR + +/****************************************************************************** + * void CallPseudoKernel ( int vector, thread_act_t * act, BEDA_t * beda, savearea *sv ) + * + * This op provides a means of invoking the BlueBox PseudoKernel from a + * system (68k) or native (PPC) context while changing BlueBox interruption + * state atomically. As an added bonus, this op leaves all but R1/PC of the user + * state registers intact. R1/PC are saved in a per thread save area, the base of + * which is located in the bbDescAddr member of the thread_act structure. + * + * This op is invoked from the Emulator Trap dispatch table or from a System + * Call when Mach SCs have been disabled. A vectorindex is passed in to indicate + * which vector should be taken. + * + * If this op is invoked from the Emulator Trap dispatch table, the kernel is + * aware of starting address of this table. It used the users PC (SRR0) + * and the start of the Trap dispatch table address to verify the trap exception + * as a atomic_switch trap. If a trap exception is verified as a atomic_switch + * trap we enter here with the following registers loaded. + * + * Input registers are: + * r5 = Vector to take + * r13 = Current thread context data + * r26 = Base address of BlueBox exception data area in kernel address space + * r4 = Current context savearea (do not modify) + * + ******************************************************************************/ + +.L_CallPseudoKernel: + + rlwinm r6,r26,0,0,19 ; Start of page is bttd + lwz r7,ACT_MACT_SPF(r13) ; Get special flags + lwz r1,BTTD_INTERRUPT_VECTOR(r6) ; Get interrupt vector + rlwinm r7,r7,0,bbNoMachSCbit+1,bbNoMachSCbit-1 + ; Reactivate Mach SCs + lwz r8,BTTD_INTCONTROLWORD(r6) ; Get Interrupt Control Word + cmpwi r1,0 ; Is this a preemptive thread ? + stw r7,ACT_MACT_SPF(r13) ; Update special flags + beq .L_CallFromPreemptiveThread ; No int vector means preemptive thread + + rlwinm r1,r8,0,INTSTATEMASK_B,INTSTATEMASK_E + ; Extract current Interrupt state + rlwinm r8,r8,0,INTSTATEMASK_E+1,INTSTATEMASK_B-1 + ; Clear current interrupt state + xoris r2,r1,SYSCONTEXTSTATE ; Setup for System Context check + lwz r1,savecr(r4) ; Load current CR bits + cmpwi r2,0 ; Check if state is System Context? + oris r8,r8,PSEUDOKERNELSTATE ; Update state for entering the PK + bne .L_CallFromAlternateContext ; No, then do not save CR2 bits + + rlwimi r8,r1,32-INTCR2TOBACKUPSHIFT,INTBACKUPCR2MASK_B,INTBACKUPCR2MASK_E + ; Insert live CR2 in ICW BackupCR2 +.L_CallFromAlternateContext: + + stw r8,BTTD_INTCONTROLWORD(r6) ; Update ICW + +.L_CallFromPreemptiveThread: + + lwz r1,savesrr0(r4) ; Get current PC + lwz r2,saver1(r4) ; Get current R1 + lwz r3,savesrr1(r4) ; Get current MSR + stw r1,BEDA_SRR0(r26) ; Save current PC + rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 + ; Clear SE|BE bits in MSR + stw r2,BEDA_SPRG1(r26) ; Save current R1 + stw r3,savesrr1(r4) ; Load new MSR + + lwz r1,BEDA_SPRG0(r26) ; Get replacement R1 + lwzx r2,r5,r6 ; Load vector address + stw r3,BEDA_SRR1(r26) ; Update saved MSR + stw r1,saver1(r4) ; Load up new R1 + stw r2,savesrr0(r4) ; Save vector as PC + + b EXT(fastexit) ; Go back and take the fast path exit... + +/****************************************************************************** + * void ExitPseudoKernel ( thread_act_t * act, BEDA_t * beda, savearea * sv ) + * + * This op provides a means of exiting from the BlueBox PseudoKernel to a + * user context. This op attempts to simulate an RFI for the returning + * Traps (atomic_switch_trap) and SysCalls (atomic_switch_syscall). Only the + * Blue Thread handling interrupts is allowed to atomically change + * interruption state and handle pending interrupts. + * + * If an interrupt is pending and we are returning to the alternate context, + * the exit is aborted and we return to an pending interrupt handler in the + * Blue Box pseudokernel. + * + * It also allows the MSR's FE0, FE1, BE and SE bits to updated for the user + * and completes the PPC register loading. + * + * Input registers are: + * r4 = Current context savearea (do not modify) + * r13 = Pointer to the current active thread's data + * r26 = Base address of BlueBox Data in kernel address space + * + ******************************************************************************/ + +.L_ExitPseudoKernel: + + rlwinm r6,r26,0,0,19 ; Start of page is bttd + lwz r7,ACT_MACT_SPF(r13) ; Get special flags + lwz r2,BTTD_INTERRUPT_VECTOR(r6) ; Get the interrupt vector + lwz r1,BEDA_SPRG1(r26) ; Get saved CTR + oris r7,r7,(0x8000 >> bbNoMachSCbit) ; Disable Mach SCs for Blue Box + + cmpwi r2,0 ; Is this a preemptive thread + stw r1,savectr(r4) ; Update CTR + beq .L_ExitFromPreemptiveThread + + lwz r8,BTTD_INTCONTROLWORD(r6) ; Get ICW + lwz r1,BTTD_NEWEXITSTATE(r6) ; New interrupt state + lwz r2,BTTD_TESTINTMASK(r6) ; Get pending interrupt mask + lis r3,SYSCONTEXTSTATE ; Setup for check in system context + rlwimi r8,r1,0,INTSTATEMASK_B,INTSTATEMASK_E + ; Insert new state + cmplw cr1,r1,r3 ; System context ? + and. r2,r8,r2 ; Any pending interrupt? + lwz r1,savecr(r4) ; Get current CR + + beq cr1,.L_ExitToSystemContext ; We are in system context + beq .L_ExitUpdateRuptControlWord ; We do not have a pending interrupt + + lwz r2,saver1(r4) ; Get current R1 + lwz r1,BEDA_SPRG0(r26) ; Get replacement R1 + stw r2,BEDA_SPRG1(r26) ; Save current R1 + stw r1,saver1(r4) ; Load up new R1 + lwz r3,BTTD_PENDINGINT_VECTOR(r6) ; Get pending interrupt PC + b .L_ExitAbortExit ; Abort and Exit + +.L_ExitToSystemContext: + rlwimi r1,r8,INTCR2TOBACKUPSHIFT,INTCR2MASK_B,INTCR2MASK_E + ; Insert live CR2 into backup CR2 +.L_ExitUpdateRuptControlWord: + stw r8,BTTD_INTCONTROLWORD(r6) ; Update ICW + stw r1,savecr(r4) ; Update CR + +.L_ExitFromPreemptiveThread: + lwz r2,savesrr1(r4) ; Get current MSR + lwz r1,BEDA_SRR1(r26) ; Get new MSR + stw r7,ACT_MACT_SPF(r13) ; Update special flags + rlwimi r2,r1,0,MSR_FE0_BIT,MSR_FE1_BIT + ; Insert FE0,FE1,SE,BE bits + lwz r3,BEDA_SRR0(r26) ; Get new PC + stw r2,savesrr1(r4) ; Update MSR + +.L_ExitAbortExit: + stw r3,savesrr0(r4) ; Update PC + + b EXT(fastexit) ; Go back and take the fast path exit... + diff --git a/osfmk/ppc/bat_init.c b/osfmk/ppc/bat_init.c new file mode 100644 index 000000000..9d297538e --- /dev/null +++ b/osfmk/ppc/bat_init.c @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#ifdef XXX_LOADER +unsigned int kernel_seg_regs[] = { + KERNEL_SEG_REG0_VALUE, + KERNEL_SEG_REG1_VALUE, + SEG_REG_INVALID, /* 2 */ + SEG_REG_INVALID, /* 3 */ + SEG_REG_INVALID, /* 4 */ + KERNEL_SEG_REG5_VALUE, /* 5 - I/O segment */ + SEG_REG_INVALID, /* 6 */ + SEG_REG_INVALID, /* 7 */ + KERNEL_SEG_REG8_VALUE, /* 8-F are possible IO space */ + KERNEL_SEG_REG9_VALUE, + KERNEL_SEG_REG10_VALUE, + KERNEL_SEG_REG11_VALUE, + KERNEL_SEG_REG12_VALUE, + KERNEL_SEG_REG13_VALUE, + KERNEL_SEG_REG14_VALUE, /* 14 - A/V video */ + KERNEL_SEG_REG15_VALUE /* 15 - NuBus etc */ +}; +#else +extern unsigned int kernel_seg_regs[]; +#endif + +// The sophisticated BAT manager + +unsigned int mappedSegments = 0; +unsigned int availableBATs = 0xE; // BAT0 used, 1-3 available + +vm_offset_t +PEResidentAddress( vm_offset_t address, vm_size_t length ) +{ + if( mappedSegments & (1 << (15 & (address >> 28)))) + return( address); + else + return( 0); +} + +vm_offset_t +PEMapSegment( vm_offset_t address, vm_size_t length ) +{ + vm_offset_t retAddress; + bat_t bat; + int batNum; + + retAddress = PEResidentAddress( address, length ); + if( retAddress) + return( retAddress); + + if( length < (256 * 1024)) + return( 0); + if( availableBATs == 0) + return( 0); + + for( batNum = 0; + (0 == (availableBATs & (1 << batNum))); + batNum++); + + bat.upper.word = address & 0xf0000000; + bat.lower.word = bat.upper.word; + + bat.upper.bits.bl = 0x7ff; /* size = 256M */ + bat.upper.bits.vs = 1; + bat.upper.bits.vp = 0; /* user disabled */ + + bat.lower.bits.wimg = PTE_WIMG_IO; + bat.lower.bits.pp = 2; /* read/write access */ + + // Update the shadow bats. + shadow_BAT.DBATs[batNum].upper = bat.upper.word; + shadow_BAT.DBATs[batNum].lower = bat.lower.word; + + sync();isync(); + switch( batNum) { // !%$@!! mtdbat needs literal + case 0: + mtdbatu( 0, BAT_INVALID); /* invalidate old mapping */ + mtdbatl( 0, bat.lower.word); + mtdbatu( 0, bat.upper.word); + break; + case 1: + mtdbatu( 1, BAT_INVALID); + mtdbatl( 1, bat.lower.word); + mtdbatu( 1, bat.upper.word); + break; + case 2: + mtdbatu( 2, BAT_INVALID); + mtdbatl( 2, bat.lower.word); + mtdbatu( 2, bat.upper.word); + break; + case 3: + mtdbatu( 3, BAT_INVALID); + mtdbatl( 3, bat.lower.word); + mtdbatu( 3, bat.upper.word); + break; + } + sync();isync(); + + availableBATs &= ~(1 << batNum); + mappedSegments |= (1 << (15 & (address >> 28))); + + return( address); +} + +void initialize_bats(boot_args *args) +{ + int i; + + /* Give ourselves the virtual map that we would like */ + bat_t bat; + + /* Make sure that the BATs map what we expect. Note + * that we assume BAT0 maps kernel text & data. + * + * Except, oops, none of the BATs have ever been set. + * Developer worked only by fluke. + */ + + bat.upper.word = 0; + bat.upper.bits.bepi = 0x0; /* start at logical addr 0M */ + /* + * We should be smarter here about picking an + * amount to map + */ + bat.upper.bits.bl = 0x7ff; /* size = 256M */ + bat.upper.bits.vs = 1; + bat.upper.bits.vp = 0; + + bat.lower.word = 0; + bat.lower.bits.brpn = 0x0; /* start at physical addr 0 */ + bat.lower.bits.wimg = PTE_WIMG_DEFAULT; + bat.lower.bits.pp = 2; /* read/write access */ + + /* Mustn't cause any data traffic here, + * we're modifying our data BAT register! + */ + + sync(); + mtdbatu(0, BAT_INVALID); /* invalidate old mapping */ + isync(); + mtdbatl(0, bat.lower.word); + isync(); + mtdbatu(0, bat.upper.word); /* update with new mapping */ + isync(); + mtibatl(0, bat.lower.word); + isync(); + mtibatu(0, bat.upper.word); /* update with new mapping */ + isync(); + + sync();isync(); + mtdbatu(1,BAT_INVALID); mtdbatl(1,BAT_INVALID); + mtibatu(1,BAT_INVALID); mtibatl(1,BAT_INVALID); + mtdbatu(2,BAT_INVALID); mtdbatl(2,BAT_INVALID); + mtibatu(2,BAT_INVALID); mtibatl(2,BAT_INVALID); + mtdbatu(3,BAT_INVALID); mtdbatl(3,BAT_INVALID); + mtibatu(3,BAT_INVALID); mtibatl(3,BAT_INVALID); + sync();isync(); + + PEMapSegment( 0xf0000000, 0x10000000); + if( args->Video.v_baseAddr) + PEMapSegment( args->Video.v_baseAddr, 0x10000000); + + /* Set up segment registers as VM through space 0 */ + isync(); + for (i=0; i<=15; i++) { + mtsrin(KERNEL_SEG_REG0_VALUE | i, i * 0x10000000); + } + isync(); +} + +/* + * Adjust the size of the region mapped by a BAT + * to to be just large enough to include the specified + * offset, and return the offset of the new end of the region. + * Note that both 'offsets' are really *lengths*, i.e. the + * offset of the end of the mapped region from the beginning. + * Either the instruction or data BATs (or both) can be specified. + * If the new length is greater than the size mappable by a BAT, + * then that value is just returned and no changes are made. + */ +vm_offset_t +adjust_bat_limit( + vm_offset_t new_minimum, + int batn, + boolean_t ibat, + boolean_t dbat +) +{ + vm_offset_t new_limit; + + if (new_minimum <= 256*1024*1024) { + unsigned int bl = 0; + + new_limit = 128*1024; + while (new_limit < new_minimum) { + new_limit *= 2; + bl = (bl << 1) | 1; + } + + { + batu_t batu; + + if (dbat) switch (batn) { + + case 0: + mfdbatu(batu, 0 ); + batu.bits.bl = bl; + + sync(); isync(); + mtdbatu( 0, batu); + sync(); isync(); + + break; + + case 1: + mfdbatu(batu, 1 ); + batu.bits.bl = bl; + + sync(); isync(); + mtdbatu( 1, batu); + sync(); isync(); + + break; + + case 2: + mfdbatu(batu, 2 ); + batu.bits.bl = bl; + + sync(); isync(); + mtdbatu( 2, batu); + sync(); isync(); + + break; + + case 3: + mfdbatu(batu, 3 ); + batu.bits.bl = bl; + + sync(); isync(); + mtdbatu( 3, batu); + sync(); isync(); + + break; + } + + if (ibat) switch (batn) { + + case 0: + mfibatu(batu, 0 ); + batu.bits.bl = bl; + + sync(); isync(); + mtibatu( 0, batu); + sync(); isync(); + + break; + + case 1: + mfibatu(batu, 1 ); + batu.bits.bl = bl; + + sync(); isync(); + mtibatu( 1, batu); + sync(); isync(); + + break; + + case 2: + mfibatu(batu, 2 ); + batu.bits.bl = bl; + + sync(); isync(); + mtibatu( 2, batu); + sync(); isync(); + + break; + + case 3: + mfibatu(batu, 3 ); + batu.bits.bl = bl; + + sync(); isync(); + mtibatu( 3, batu); + sync(); isync(); + + break; + } + } + } + else + new_limit = new_minimum; + + return (new_limit); +} diff --git a/osfmk/ppc/bcopy.s b/osfmk/ppc/bcopy.s new file mode 100644 index 000000000..95ad3ea6b --- /dev/null +++ b/osfmk/ppc/bcopy.s @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +; +; Copy bytes of data around. handles overlapped data. +; +; Change this to use Altivec later on, and maybe floating point. +; +; NOTE: This file compiles and executes on both MacOX 8.x (Codewarrior) +; and MacOX X. The "#if 0"s are treated as comments by CW so the +; stuff between them is included by CW and excluded on MacOX X. +; Same with the "#include"s. +; +#include +#include + +; Use CR5_lt to indicate non-cached +#define noncache 20 +; Use CR5_gt to indicate that we need to turn data translation back on +#define fixxlate 21 +#if 0 +noncache: equ 20 +fixxlate: equ 21 +#endif +#if 0 +br0: equ 0 +#endif + +; +; bcopy_nc(from, to, nbytes) +; +; bcopy_nc operates on non-cached memory so we can not use any kind +; of cache instructions. +; + + + +#if 0 + IF 0 +#endif +ENTRY(bcopy_nc, TAG_NO_FRAME_USED) +#if 0 + ENDIF + export xbcopy_nc[DS] + tc xbcopy_nc[TC],xbcopy_nc[DS] + csect xbcopy_nc[DS] + dc.l .xbcopy_nc + dc.l TOC[tc0] + export .xbcopy_nc + csect xbcopy_nc[PR] +.xbcopy_nc: +#endif + + crset noncache ; Set non-cached + b bcpswap + +; +; void bcopy_phys(from, to, nbytes) +; Turns off data translation before the copy. Note, this one will +; not work in user state +; + +#if 0 + IF 0 +#endif +ENTRY(bcopy_phys, TAG_NO_FRAME_USED) +#if 0 + ENDIF + export xbcopy_phys[DS] + tc bcopy_physc[TC],bcopy_phys[DS] + csect bcopy_phys[DS] + dc.l .bcopy_phys + dc.l TOC[tc0] + export .bcopy_phys + csect bcopy_phys[PR] +.bcopy_phys: +#endif + + mfmsr r9 ; Get the MSR + crclr noncache ; Set cached + rlwinm. r8,r9,0,MSR_DR_BIT,MSR_DR_BIT ; Is data translation on? + + cmplw cr1,r4,r3 ; Compare "to" and "from" + cmplwi cr7,r5,0 ; Check if we have a 0 length + mr r6,r3 ; Set source + beqlr- cr1 ; Bail if "to" and "from" are the same + xor r9,r9,r8 ; Turn off translation if it is on (should be) + beqlr- cr7 ; Bail if length is 0 + + mtmsr r9 ; Set DR translation off + isync ; Wait for it + + crnot fixxlate,cr0_eq ; Remember to turn on translation if it was + b copyit ; Go copy it... + +; +; void bcopy(from, to, nbytes) +; + +#if 0 + IF 0 +#endif +ENTRY(bcopy, TAG_NO_FRAME_USED) +#if 0 + ENDIF + export xbcopy[DS] + tc xbcopyc[TC],xbcopy[DS] + csect xbcopy[DS] + dc.l .xbcopy + dc.l TOC[tc0] + export .xbcopy + csect xbcopy[PR] +.xbcopy: +#endif + + crclr noncache ; Set cached + +bcpswap: cmplw cr1,r4,r3 ; Compare "to" and "from" + mr. r5,r5 ; Check if we have a 0 length + mr r6,r3 ; Set source + beqlr- cr1 ; Bail if "to" and "from" are the same + beqlr- ; Bail if length is 0 + crclr fixxlate ; Set translation already ok + b copyit ; Go copy it... + +; +; When we move the memory, forward overlays must be handled. We +; also can not use the cache instructions if we are from bcopy_nc. +; We need to preserve R3 because it needs to be returned for memcpy. +; We can be interrupted and lose control here. +; +; There is no stack, so in order to used floating point, we would +; need to take the FP exception. Any potential gains by using FP +; would be more than eaten up by this. +; +; Later, we should used Altivec for large moves. +; + +#if 0 + IF 0 +#endif +ENTRY(memcpy, TAG_NO_FRAME_USED) +#if 0 + ENDIF + export xmemcpy[DS] + tc xmemcpy[TC],xmemcpy[DS] + csect xmemcpy[DS] + dc.l .xmemcpy + dc.l TOC[tc0] + export .xmemcpy + csect xmemcpy[PR] +.xmemcpy: +#endif + cmplw cr1,r3,r4 ; "to" and "from" the same? + mr r6,r4 ; Set the "from" + mr. r5,r5 ; Length zero? + crclr noncache ; Set cached + mr r4,r3 ; Set the "to" + crclr fixxlate ; Set translation already ok + beqlr- cr1 ; "to" and "from" are the same + beqlr- ; Length is 0 + +copyit: sub r12,r4,r6 ; Get potential overlap (negative if backward move) + lis r8,0x7FFF ; Start up a mask + srawi r11,r12,31 ; Propagate the sign bit + dcbt br0,r6 ; Touch in the first source line + cntlzw r7,r5 ; Get the highest power of 2 factor of the length + ori r8,r8,0xFFFF ; Make limit 0x7FFFFFFF + xor r9,r12,r11 ; If sink - source was negative, invert bits + srw r8,r8,r7 ; Get move length limitation + sub r9,r9,r11 ; If sink - source was negative, add 1 and get absolute value + cmplw r12,r5 ; See if we actually forward overlap + cmplwi cr7,r9,32 ; See if at least a line between source and sink + dcbtst br0,r4 ; Touch in the first sink line + cmplwi cr1,r5,32 ; Are we moving more than a line? + cror noncache,noncache,28 ; Set to not DCBZ output line if not enough space + blt- fwdovrlap ; This is a forward overlapping area, handle it... + +; +; R4 = sink +; R5 = length +; R6 = source +; + +; +; Here we figure out how much we have to move to get the sink onto a +; cache boundary. If we can, and there are still more that 32 bytes +; left to move, we can really speed things up by DCBZing the sink line. +; We can not do this if noncache is set because we will take an +; alignment exception. + + neg r0,r4 ; Get the number of bytes to move to align to a line boundary + rlwinm. r0,r0,0,27,31 ; Clean it up and test it + and r0,r0,r8 ; limit to the maximum front end move + mtcrf 3,r0 ; Make branch mask for partial moves + sub r5,r5,r0 ; Set the length left to move + beq alline ; Already on a line... + + bf 31,alhalf ; No single byte to do... + lbz r7,0(r6) ; Get the byte + addi r6,r6,1 ; Point to the next + stb r7,0(r4) ; Save the single + addi r4,r4,1 ; Bump sink + +; Sink is halfword aligned here + +alhalf: bf 30,alword ; No halfword to do... + lhz r7,0(r6) ; Get the halfword + addi r6,r6,2 ; Point to the next + sth r7,0(r4) ; Save the halfword + addi r4,r4,2 ; Bump sink + +; Sink is word aligned here + +alword: bf 29,aldouble ; No word to do... + lwz r7,0(r6) ; Get the word + addi r6,r6,4 ; Point to the next + stw r7,0(r4) ; Save the word + addi r4,r4,4 ; Bump sink + +; Sink is double aligned here + +aldouble: bf 28,alquad ; No double to do... + lwz r7,0(r6) ; Get the first word + lwz r8,4(r6) ; Get the second word + addi r6,r6,8 ; Point to the next + stw r7,0(r4) ; Save the first word + stw r8,4(r4) ; Save the second word + addi r4,r4,8 ; Bump sink + +; Sink is quadword aligned here + +alquad: bf 27,alline ; No quad to do... + lwz r7,0(r6) ; Get the first word + lwz r8,4(r6) ; Get the second word + lwz r9,8(r6) ; Get the third word + stw r7,0(r4) ; Save the first word + lwz r11,12(r6) ; Get the fourth word + addi r6,r6,16 ; Point to the next + stw r8,4(r4) ; Save the second word + stw r9,8(r4) ; Save the third word + stw r11,12(r4) ; Save the fourth word + addi r4,r4,16 ; Bump sink + +; Sink is line aligned here + +alline: rlwinm. r0,r5,27,5,31 ; Get the number of full lines to move + mtcrf 3,r5 ; Make branch mask for backend partial moves + rlwinm r11,r5,0,0,26 ; Get number of bytes we are going to move + beq- backend ; No full lines to move + + sub r5,r5,r11 ; Calculate the residual + li r10,96 ; Stride for touch ahead + +nxtline: subic. r0,r0,1 ; Account for the line now + + bt- noncache,skipz ; Skip if we are not cached... + dcbz br0,r4 ; Blow away the whole line because we are replacing it + dcbt r6,r10 ; Touch ahead a bit + +skipz: lwz r7,0(r6) ; Get the first word + lwz r8,4(r6) ; Get the second word + lwz r9,8(r6) ; Get the third word + stw r7,0(r4) ; Save the first word + lwz r11,12(r6) ; Get the fourth word + stw r8,4(r4) ; Save the second word + lwz r7,16(r6) ; Get the fifth word + stw r9,8(r4) ; Save the third word + lwz r8,20(r6) ; Get the sixth word + stw r11,12(r4) ; Save the fourth word + lwz r9,24(r6) ; Get the seventh word + stw r7,16(r4) ; Save the fifth word + lwz r11,28(r6) ; Get the eighth word + addi r6,r6,32 ; Point to the next + stw r8,20(r4) ; Save the sixth word + stw r9,24(r4) ; Save the seventh word + stw r11,28(r4) ; Save the eighth word + addi r4,r4,32 ; Bump sink + bgt+ nxtline ; Do the next line, if any... + + +; Move backend quadword + +backend: bf 27,noquad ; No quad to do... + lwz r7,0(r6) ; Get the first word + lwz r8,4(r6) ; Get the second word + lwz r9,8(r6) ; Get the third word + lwz r11,12(r6) ; Get the fourth word + stw r7,0(r4) ; Save the first word + addi r6,r6,16 ; Point to the next + stw r8,4(r4) ; Save the second word + stw r9,8(r4) ; Save the third word + stw r11,12(r4) ; Save the fourth word + addi r4,r4,16 ; Bump sink + +; Move backend double + +noquad: bf 28,nodouble ; No double to do... + lwz r7,0(r6) ; Get the first word + lwz r8,4(r6) ; Get the second word + addi r6,r6,8 ; Point to the next + stw r7,0(r4) ; Save the first word + stw r8,4(r4) ; Save the second word + addi r4,r4,8 ; Bump sink + +; Move backend word + +nodouble: bf 29,noword ; No word to do... + lwz r7,0(r6) ; Get the word + addi r6,r6,4 ; Point to the next + stw r7,0(r4) ; Save the word + addi r4,r4,4 ; Bump sink + +; Move backend halfword + +noword: bf 30,nohalf ; No halfword to do... + lhz r7,0(r6) ; Get the halfword + addi r6,r6,2 ; Point to the next + sth r7,0(r4) ; Save the halfword + addi r4,r4,2 ; Bump sink + +; Move backend byte + +nohalf: bf 31,bcpydone ; Leave cuz we are all done... + lbz r7,0(r6) ; Get the byte + stb r7,0(r4) ; Save the single + +bcpydone: bflr fixxlate ; Leave now if we do not need to fix translation... + mfmsr r9 ; Get the MSR + ori r9,r9,lo16(MASK(MSR_DR)) ; Turn data translation on + mtmsr r9 ; Just do it + isync ; Hang in there + blr ; Leave cuz we are all done... + +; +; 0123456789ABCDEF0123456789ABCDEF +; 0123456789ABCDEF0123456789ABCDEF +; F +; DE +; 9ABC +; 12345678 +; 123456789ABCDEF0 +; 0 + +; +; Here is where we handle a forward overlapping move. These will be slow +; because we can not kill the cache of the destination until after we have +; loaded/saved the source area. Also, because reading memory backwards is +; slower when the cache line needs to be loaded because the critical +; doubleword is loaded first, i.e., the last, then it goes back to the first, +; and on in order. That means that when we are at the second to last DW we +; have to wait until the whole line is in cache before we can proceed. +; + +fwdovrlap: add r4,r5,r4 ; Point past the last sink byte + add r6,r5,r6 ; Point past the last source byte + and r0,r4,r8 ; Apply movement limit + li r12,-1 ; Make sure we touch in the actual line + mtcrf 3,r0 ; Figure out the best way to move backwards + dcbt r12,r6 ; Touch in the last line of source + rlwinm. r0,r0,0,27,31 ; Calculate the length to adjust to cache boundary + dcbtst r12,r4 ; Touch in the last line of the sink + beq- balline ; Aready on cache line boundary + + sub r5,r5,r0 ; Precaculate move length left after alignment + + bf 31,balhalf ; No single byte to do... + lbz r7,-1(r6) ; Get the byte + subi r6,r6,1 ; Point to the next + stb r7,-1(r4) ; Save the single + subi r4,r4,1 ; Bump sink + +; Sink is halfword aligned here + +balhalf: bf 30,balword ; No halfword to do... + lhz r7,-2(r6) ; Get the halfword + subi r6,r6,2 ; Point to the next + sth r7,-2(r4) ; Save the halfword + subi r4,r4,2 ; Bump sink + +; Sink is word aligned here + +balword: bf 29,baldouble ; No word to do... + lwz r7,-4(r6) ; Get the word + subi r6,r6,4 ; Point to the next + stw r7,-4(r4) ; Save the word + subi r4,r4,4 ; Bump sink + +; Sink is double aligned here + +baldouble: bf 28,balquad ; No double to do... + lwz r7,-8(r6) ; Get the first word + lwz r8,-4(r6) ; Get the second word + subi r6,r6,8 ; Point to the next + stw r7,-8(r4) ; Save the first word + stw r8,-4(r4) ; Save the second word + subi r4,r4,8 ; Bump sink + +; Sink is quadword aligned here + +balquad: bf 27,balline ; No quad to do... + lwz r7,-16(r6) ; Get the first word + lwz r8,-12(r6) ; Get the second word + lwz r9,-8(r6) ; Get the third word + lwz r11,-4(r6) ; Get the fourth word + stw r7,-16(r4) ; Save the first word + subi r6,r6,16 ; Point to the next + stw r8,-12(r4) ; Save the second word + stw r9,-8(r4) ; Save the third word + stw r11,-4(r4) ; Save the fourth word + subi r4,r4,16 ; Bump sink + +; Sink is line aligned here + +balline: rlwinm. r0,r5,27,5,31 ; Get the number of full lines to move + mtcrf 3,r5 ; Make branch mask for backend partial moves + beq- bbackend ; No full lines to move +#if 0 + stwu r1,-8(r1) ; Dummy stack for MacOS + stw r2,4(r1) ; Save RTOC +#endif + + +; Registers in use: R0, R1, R3, R4, R5, R6 +; Registers not in use: R2, R7, R8, R9, R10, R11, R12 - Ok, we can make another free for 8 of them + +bnxtline: subic. r0,r0,1 ; Account for the line now + + lwz r7,-32(r6) ; Get the first word + lwz r5,-28(r6) ; Get the second word + lwz r2,-24(r6) ; Get the third word + lwz r12,-20(r6) ; Get the third word + lwz r11,-16(r6) ; Get the fifth word + lwz r10,-12(r6) ; Get the sixth word + lwz r9,-8(r6) ; Get the seventh word + lwz r8,-4(r6) ; Get the eighth word + subi r6,r6,32 ; Point to the next + + stw r7,-32(r4) ; Get the first word + ble- bnotouch ; Last time, skip touch of source... + dcbt br0,r6 ; Touch in next source line + +bnotouch: stw r5,-28(r4) ; Get the second word + stw r2,-24(r4) ; Get the third word + stw r12,-20(r4) ; Get the third word + stw r11,-16(r4) ; Get the fifth word + stw r10,-12(r4) ; Get the sixth word + stw r9,-8(r4) ; Get the seventh word + stw r8,-4(r4) ; Get the eighth word + subi r4,r4,32 ; Bump sink + + bgt+ bnxtline ; Do the next line, if any... +#if 0 + lwz r2,4(r1) ; Restore RTOC + lwz r1,0(r1) ; Pop dummy stack +#endif + +; +; Note: We touched these lines in at the beginning +; + +; Move backend quadword + +bbackend: bf 27,bnoquad ; No quad to do... + lwz r7,-16(r6) ; Get the first word + lwz r8,-12(r6) ; Get the second word + lwz r9,-8(r6) ; Get the third word + lwz r11,-4(r6) ; Get the fourth word + stw r7,-16(r4) ; Save the first word + subi r6,r6,16 ; Point to the next + stw r8,-12(r4) ; Save the second word + stw r9,-8(r4) ; Save the third word + stw r11,-4(r4) ; Save the fourth word + subi r4,r4,16 ; Bump sink + +; Move backend double + +bnoquad: bf 28,bnodouble ; No double to do... + lwz r7,-8(r6) ; Get the first word + lwz r8,-4(r6) ; Get the second word + subi r6,r6,8 ; Point to the next + stw r7,-8(r4) ; Save the first word + stw r8,-4(r4) ; Save the second word + subi r4,r4,8 ; Bump sink + +; Move backend word + +bnodouble: bf 29,bnoword ; No word to do... + lwz r7,-4(r6) ; Get the word + subi r6,r6,4 ; Point to the next + stw r7,-4(r4) ; Save the word + subi r4,r4,4 ; Bump sink + +; Move backend halfword + +bnoword: bf 30,bnohalf ; No halfword to do... + lhz r7,-2(r6) ; Get the halfword + subi r6,r6,2 ; Point to the next + sth r7,-2(r4) ; Save the halfword + subi r4,r4,2 ; Bump sink + +; Move backend byte + +bnohalf: bflr 31 ; Leave cuz we are all done... + lbz r7,-1(r6) ; Get the byte + stb r7,-1(r4) ; Save the single + + blr ; Leave cuz we are all done... diff --git a/osfmk/ppc/bits.s b/osfmk/ppc/bits.s new file mode 100644 index 000000000..a01cf81fe --- /dev/null +++ b/osfmk/ppc/bits.s @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include +#include + +# +# void setbit(int bitno, int *s) +# +# Set indicated bit in bit string. +# Note: being big-endian, bit 0 is 0x80000000. + +ENTRY(setbit,TAG_NO_FRAME_USED) + + rlwinm r8,r3,29,3,31 /* Get byte displacement */ + rlwinm r9,r3,0,29,31 /* Get bit within byte */ + li r6,0x80 /* Start with bit 0 */ + lbzx r5,r4,r8 /* Grab target byte */ + srw r6,r6,r9 /* Get the right bit (fits right into the load cycle) */ + or r5,r5,r6 /* Turn on the right bit */ + stbx r5,r4,r8 /* Save the byte back */ + blr + +# +# void clrbit(int bitno, int *s) +# +# Clear indicated bit in bit string. +# Note: being big-endian, bit 0 is 0x80000000. + +ENTRY(clrbit,TAG_NO_FRAME_USED) + + rlwinm r8,r3,29,3,31 /* Get byte displacement */ + rlwinm r9,r3,0,29,31 /* Get bit within byte */ + li r6,0x80 /* Start with bit 0 */ + lbzx r5,r4,r8 /* Grab target byte */ + srw r6,r6,r9 /* Get the right bit (fits right into the load cycle) */ + andc r5,r5,r6 /* Turn off the right bit */ + stbx r5,r4,r8 /* Save the byte back */ + blr + + +# /* +# * Find first bit set in bit string. +# */ +# int +# ffsbit(int *s) +# +# Returns the bit index of the first bit set (starting from 0) +# Assumes pointer is word-aligned + +ENTRY(ffsbit, TAG_NO_FRAME_USED) + lwz r0, 0(ARG0) + mr ARG1, ARG0 /* Free up ARG0 for result */ + + cmpwi r0, 0 /* Check against zero... */ + cntlzw ARG0, r0 /* Free inst... find the set bit... */ + bnelr+ /* Return if bit in first word */ + +.L_ffsbit_lp: + lwz r0, 4(ARG1) + addi ARG1, ARG1, 4 + cmpwi r0, 0 /* Check against zero... */ + cntlzw r12, r0 + add ARG0, ARG0, r12 /* ARG0 keeps bit count */ + beq+ .L_ffsbit_lp + blr + +/* + * int tstbit(int bitno, int *s) + * + * Test indicated bit in bit string. + * Note: being big-endian, bit 0 is 0x80000000. + */ + +ENTRY2(tstbit, testbit, TAG_NO_FRAME_USED) + + rlwinm r8,r3,29,3,31 /* Get byte displacement */ + rlwinm r9,r3,0,29,31 /* Get bit within byte */ + lbzx r5,r4,r8 /* Grab target byte */ + addi r9,r9,25 /* Get actual shift value */ + rlwnm r3,r5,r9,31,31 /* Pass the bit back */ + blr diff --git a/osfmk/ppc/boot.h b/osfmk/ppc/boot.h new file mode 100644 index 000000000..e37a7d660 --- /dev/null +++ b/osfmk/ppc/boot.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include diff --git a/osfmk/ppc/bsd_asm.s b/osfmk/ppc/bsd_asm.s new file mode 100644 index 000000000..c1af2dafc --- /dev/null +++ b/osfmk/ppc/bsd_asm.s @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +/* + * void cthread_set_self(cproc_t p) + * + * set's thread state "user_value" + * + * This op is invoked as follows: + * li r0, CthreadSetSelfNumber // load the fast-trap number + * sc // invoke fast-trap + * blr + * + * Entry: VM switched ON + * Interrupts OFF + * original r1-3 saved in sprg1-3 + * original srr0 and srr1 saved in per_proc_info structure + * original cr saved in per_proc_info structure + * exception type saved in per_proc_info structure + * r1 = scratch + * r2 = virt addr of per_proc_info + * r3 = exception type (one of EXC_...) + * + */ + .text + .align 5 +ENTRY(CthreadSetSelfNumber, TAG_NO_FRAME_USED) + lwz r1, PP_CPU_DATA(r2) + lwz r1, CPU_ACTIVE_THREAD(r1) + lwz r1, THREAD_TOP_ACT(r1) + lwz r1, ACT_MACT_PCB(r1) + + mfsprg r3, 3 + stw r3, CTHREAD_SELF(r1) + + /* Prepare to rfi to the exception exit routine, which is + * in physical address space */ + addis r3, 0, HIGH_CADDR(EXT(exception_exit)) + addi r3, r3, LOW_ADDR(EXT(exception_exit)) + + lwz r3, 0(r3) + mtsrr0 r3 + li r3, MSR_VM_OFF + mtsrr1 r3 + + lwz r3, PP_SAVE_SRR1(r2) /* load the last register... */ + lwz r2, PP_SAVE_SRR0(r2) /* For trampoline */ + lwz r1, PCB_SR0(r1) /* For trampoline... */ + + rfi + + +/* + * ur_cthread_t ur_cthread_self(void) + * + * return thread state "user_value" + * + * This op is invoked as follows: + * li r0, UrCthreadSelfNumber // load the fast-trap number + * sc // invoke fast-trap + * blr + * + * Entry: VM switched ON + * Interrupts OFF + * original r1-3 saved in sprg1-3 + * original srr0 and srr1 saved in per_proc_info structure + * original cr saved in per_proc_info structure + * exception type saved in per_proc_info structure + * r1 = scratch + * r2 = virt addr of per_proc_info + * r3 = exception type (one of EXC_...) + * + */ + .text + .align 5 +ENTRY(UrCthreadSelfNumber, TAG_NO_FRAME_USED) + lwz r1, PP_CPU_DATA(r2) + lwz r1, CPU_ACTIVE_THREAD(r1) + lwz r1, THREAD_TOP_ACT(r1) + lwz r1, ACT_MACT_PCB(r1) + + lwz r3, CTHREAD_SELF(r1) + mtsprg 3, r3 + + + /* Prepare to rfi to the exception exit routine, which is + * in physical address space */ + addis r3, 0, HIGH_CADDR(EXT(exception_exit)) + addi r3, r3, LOW_ADDR(EXT(exception_exit)) + lwz r3, 0(r3) + mtsrr0 r3 + li r3, MSR_VM_OFF + mtsrr1 r3 + + lwz r3, PP_SAVE_SRR1(r2) /* load the last register... */ + lwz r2, PP_SAVE_SRR0(r2) /* For trampoline */ + lwz r1, PCB_SR0(r1) /* For trampoline... */ + + rfi diff --git a/osfmk/ppc/bsd_ppc.c b/osfmk/ppc/bsd_ppc.c new file mode 100644 index 000000000..8918fed40 --- /dev/null +++ b/osfmk/ppc/bsd_ppc.c @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define ERESTART -1 /* restart syscall */ +#define EJUSTRETURN -2 /* don't modify regs, just return */ + +struct unix_syscallargs { + int flavor; + int r3; + int arg1, arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9; +}; +struct sysent { /* system call table */ + unsigned short sy_narg; /* number of args */ + char sy_parallel; /* can execute in parallel */ + char sy_funnel; /* funnel type */ + unsigned long (*sy_call)(void *, void *, int *); /* implementing function */ +}; + +#define KERNEL_FUNNEL 1 +#define NETWORK_FUNNEL 2 + +extern funnel_t * kernel_flock; +extern funnel_t * network_flock; + +extern struct sysent sysent[]; + +void *get_bsdtask_info( + task_t); + +int set_bsduthreadargs ( + thread_act_t, struct pcb *, + struct unix_syscallargs *); + +void * get_bsduthreadarg( + thread_act_t); + +void +unix_syscall( + struct pcb * pcb, + int, int, int, int, int, int, int ); + +/* + * Function: unix_syscall + * + * Inputs: pcb - pointer to Process Control Block + * arg1 - arguments to mach system calls + * arg2 + * arg3 + * arg4 + * arg5 + * arg6 + * arg7 + * + * Outputs: none + */ +void +unix_syscall( + struct pcb * pcb, + int arg1, + int arg2, + int arg3, + int arg4, + int arg5, + int arg6, + int arg7 + ) +{ + struct ppc_saved_state *regs; + thread_act_t thread; + struct sysent *callp; + int nargs, error; + unsigned short code; + void * p, *vt; + int * vtint; + int *rval; + int funnel_type; + + struct unix_syscallargs sarg; + extern int nsysent; + + + regs = &pcb->ss; + code = regs->r0; + + thread = current_act(); + p = get_bsdtask_info(current_task()); + rval = (int *)get_bsduthreadrval(thread); + + /* + ** Get index into sysent table + */ + + + /* + ** Set up call pointer + */ + callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; + + sarg. flavor = (callp == sysent)? 1: 0; + if (sarg.flavor) { + code = regs->r3; + callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; + + } + else + sarg. r3 = regs->r3; + + if (code != 180) { + if (sarg.flavor) + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + arg1, arg2, arg3, arg4, 0); + else + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + sarg.r3, arg1, arg2, arg3, 0); + } + sarg. arg1 = arg1; + sarg. arg2 = arg2; + sarg. arg3 = arg3; + sarg. arg4 = arg4; + sarg. arg5 = arg5; + sarg. arg6 = arg6; + sarg. arg7 = arg7; + + if(callp->sy_funnel == NETWORK_FUNNEL) { + (void) thread_funnel_set(network_flock, TRUE); + } + else { + (void) thread_funnel_set(kernel_flock, TRUE); + } + + set_bsduthreadargs(thread,pcb,&sarg); + + + if (callp->sy_narg > 8) + panic("unix_syscall: max arg count exceeded"); + + rval[0] = 0; + + /* r4 is volatile, if we set it to regs->r4 here the child + * will have parents r4 after execve */ + rval[1] = 0; + + error = 0; /* Start with a good value */ + + /* + ** the PPC runtime calls cerror after every unix system call, so + ** assume no error and adjust the "pc" to skip this call. + ** It will be set back to the cerror call if an error is detected. + */ + regs->srr0 += 4; + vt = get_bsduthreadarg(thread); + counter_always(c_syscalls_unix++); + current_task()->syscalls_unix++; + error = (*(callp->sy_call))(p, (void *)vt, rval); + + if (error == ERESTART) { + regs->srr0 -= 8; + } + else if (error != EJUSTRETURN) { + if (error) + { + regs->r3 = error; + /* set the "pc" to execute cerror routine */ + regs->srr0 -= 4; + } else { /* (not error) */ + regs->r3 = rval[0]; + regs->r4 = rval[1]; + } + } + /* else (error == EJUSTRETURN) { nothing } */ + + (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); + + if (code != 180) { + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, rval[0], rval[1], 0, 0); + } + + thread_exception_return(); + /* NOTREACHED */ +} + +unix_syscall_return(error) +{ + struct ppc_saved_state *regs; + thread_act_t thread; + struct sysent *callp; + int nargs; + unsigned short code; + int *rval; + void * p, *vt; + int * vtint; + struct pcb *pcb; + + struct unix_syscallargs sarg; + extern int nsysent; + + thread = current_act(); + p = get_bsdtask_info(current_task()); + rval = (int *)get_bsduthreadrval(thread); + pcb = thread->mact.pcb; + regs = &pcb->ss; + + if (thread_funnel_get() == THR_FUNNEL_NULL) + panic("Unix syscall return without funnel held"); + + /* + ** Get index into sysent table + */ + code = regs->r0; + + if (error == ERESTART) { + regs->srr0 -= 8; + } + else if (error != EJUSTRETURN) { + if (error) + { + regs->r3 = error; + /* set the "pc" to execute cerror routine */ + regs->srr0 -= 4; + } else { /* (not error) */ + regs->r3 = rval[0]; + regs->r4 = rval[1]; + } + } + /* else (error == EJUSTRETURN) { nothing } */ + + (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); + + if (code != 180) { + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, rval[0], rval[1], 0, 0); + } + + thread_exception_return(); + /* NOTREACHED */ +} + diff --git a/osfmk/ppc/bzero.s b/osfmk/ppc/bzero.s new file mode 100644 index 000000000..ac0afddf9 --- /dev/null +++ b/osfmk/ppc/bzero.s @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + /* + * @OSF_FREE_COPYRIGHT@ + */ + +#include +#include /* For CACHE_LINE_SIZE */ + +/* + * void bzero(char *addr, unsigned int length) + * + * bzero implementation for PowerPC + * - assumes cacheable memory (i.e. uses DCBZ) + * - assumes non-pic code + * + * returns start address in r3, as per memset (called by memset) + */ + +ENTRY(bzero, TAG_NO_FRAME_USED) + + cmpwi cr0, r4, 0 /* no bytes to zero? */ + mr r7, r3 + mr r8, r3 /* use r8 as counter to where we are */ + beqlr- + cmpwi cr0, r4, CACHE_LINE_SIZE /* clear less than a block? */ + li r0, 0 /* use r0 as source of zeros */ + blt .L_bzeroEndWord + +/* first, clear bytes up to the next word boundary */ + addis r6, 0, HIGH_CADDR(.L_bzeroBeginWord) + addi r6, r6, LOW_ADDR(.L_bzeroBeginWord) + /* extract byte offset as word offset */ + rlwinm. r5, r8, 2, 28, 29 + addi r8, r8, -1 /* adjust for update */ + beq .L_bzeroBeginWord /* no bytes to zero */ + subfic r5, r5, 16 /* compute the number of instructions */ + sub r6, r6, r5 /* back from word clear to execute */ + mtctr r6 + bctr + + stbu r0, 1(r8) + stbu r0, 1(r8) + stbu r0, 1(r8) + +/* clear words up to the next block boundary */ +.L_bzeroBeginWord: + addis r6, 0, HIGH_CADDR(.L_bzeroBlock) + addi r6, r6, LOW_ADDR(.L_bzeroBlock) + addi r8, r8, 1 + rlwinm. r5, r8, 0, 27, 29 /* extract word offset */ + addi r8, r8, -4 /* adjust for update */ + beq .L_bzeroBlock /* no words to zero */ + /* compute the number of instructions */ + subfic r5, r5, CACHE_LINE_SIZE + sub r6, r6, r5 /* back from word clear to execute */ + mtctr r6 + bctr + + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + + /* clear cache blocks */ +.L_bzeroBlock: + addi r8, r8, 4 /* remove update adjust */ + sub r5, r8, r7 /* bytes zeroed */ + sub r4, r4, r5 + srwi. r5, r4, CACHE_LINE_POW2 /* blocks to zero */ + beq .L_bzeroEndWord + mtctr r5 + +.L_bzeroBlock1: + dcbz 0, r8 + addi r8, r8, CACHE_LINE_SIZE + bdnz .L_bzeroBlock1 + + /* clear remaining words */ +.L_bzeroEndWord: + addis r6, 0, HIGH_CADDR(.L_bzeroEndByte) + addi r6, r6, LOW_ADDR(.L_bzeroEndByte) + rlwinm. r5, r4, 0, 27, 29 /* extract word offset */ + addi r8, r8, -4 /* adjust for update */ + beq .L_bzeroEndByte /* no words to zero */ + sub r6, r6, r5 /* back from word clear to execute */ + mtctr r6 + bctr + + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + stwu r0, 4(r8) + + /* clear remaining bytes */ +.L_bzeroEndByte: + addis r6, 0, HIGH_CADDR(.L_bzeroEnd) + addi r6, r6, LOW_ADDR(.L_bzeroEnd) + /* extract byte offset as word offset */ + rlwinm. r5, r4, 2, 28, 29 + addi r8, r8, 3 /* adjust for update */ + beqlr + sub r6, r6, r5 /* back from word clear to execute */ + mtctr r6 + bctr + + stbu r0, 1(r8) + stbu r0, 1(r8) + stbu r0, 1(r8) + +.L_bzeroEnd: + blr + +/* + * void *memset(void *from, int c, vm_size_t nbytes) + * + * almost everywhere in the kernel + * this appears to be called with argument c==0. We optimise for those + * cases and call bzero if we can. + * + */ + +ENTRY(memset, TAG_NO_FRAME_USED) + + mr. ARG3, ARG1 + mr ARG1, ARG2 + /* optimised case - do a bzero */ + beq+ EXT(bzero) + + /* If count is zero, return straight away */ + cmpi cr0, ARG1, 0 + beqlr- + + /* Now, ARG0 = addr, ARG1=len, ARG3=value */ + + subi ARG2, ARG0, 1 /* use ARG2 as our counter */ + +0: + subi ARG1, ARG1, 1 + cmpi cr0, ARG1, 0 + stbu ARG3, 1(ARG2) + bne+ 0b + + /* Return original address in ARG0 */ + + blr diff --git a/osfmk/ppc/cache.s b/osfmk/ppc/cache.s new file mode 100644 index 000000000..d21ffeed8 --- /dev/null +++ b/osfmk/ppc/cache.s @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include + +#include +#include +#include +#include +#include +#include + +/* + * extern void sync_cache(vm_offset_t pa, unsigned count); + * + * sync_cache takes a physical address and count to sync, thus + * must not be called for multiple virtual pages. + * + * it writes out the data cache and invalidates the instruction + * cache for the address range in question + */ + +ENTRY(sync_cache, TAG_NO_FRAME_USED) + + /* Switch off data translations */ + mfmsr r6 + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + mtmsr r7 + isync + + /* Check to see if the address is aligned. */ + add r8, r3,r4 + andi. r8,r8,(CACHE_LINE_SIZE-1) + beq- .L_sync_check + addi r4,r4,CACHE_LINE_SIZE + li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ + andc r4,r4,r7 + andc r3,r3,r7 + +.L_sync_check: + cmpwi r4, CACHE_LINE_SIZE + ble .L_sync_one_line + + /* Make ctr hold count of how many times we should loop */ + addi r8, r4, (CACHE_LINE_SIZE-1) + srwi r8, r8, CACHE_LINE_POW2 + mtctr r8 + + /* loop to flush the data cache */ +.L_sync_data_loop: + subic r4, r4, CACHE_LINE_SIZE + dcbst r3, r4 + bdnz .L_sync_data_loop + + sync + mtctr r8 + + /* loop to invalidate the instruction cache */ +.L_sync_inval_loop: + icbi r3, r4 + addic r4, r4, CACHE_LINE_SIZE + bdnz .L_sync_inval_loop + +.L_sync_cache_done: + sync /* Finish physical writes */ + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are on */ + blr + +.L_sync_one_line: + dcbst 0,r3 + sync + icbi 0,r3 + b .L_sync_cache_done + +/* + * extern void flush_dcache(vm_offset_t addr, unsigned count, boolean phys); + * + * flush_dcache takes a virtual or physical address and count to flush + * and (can be called for multiple virtual pages). + * + * it flushes the data cache + * cache for the address range in question + * + * if 'phys' is non-zero then physical addresses will be used + */ + +ENTRY(flush_dcache, TAG_NO_FRAME_USED) + + /* optionally switch off data translations */ + + cmpwi r5, 0 + mfmsr r6 + beq+ 0f + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + mtmsr r7 + isync +0: + + /* Check to see if the address is aligned. */ + add r8, r3,r4 + andi. r8,r8,(CACHE_LINE_SIZE-1) + beq- .L_flush_dcache_check + addi r4,r4,CACHE_LINE_SIZE + li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ + andc r4,r4,r7 + andc r3,r3,r7 + +.L_flush_dcache_check: + cmpwi r4, CACHE_LINE_SIZE + ble .L_flush_dcache_one_line + + /* Make ctr hold count of how many times we should loop */ + addi r8, r4, (CACHE_LINE_SIZE-1) + srwi r8, r8, CACHE_LINE_POW2 + mtctr r8 + +.L_flush_dcache_flush_loop: + subic r4, r4, CACHE_LINE_SIZE + dcbf r3, r4 + bdnz .L_flush_dcache_flush_loop + +.L_flush_dcache_done: + /* Sync restore msr if it was modified */ + cmpwi r5, 0 + sync /* make sure invalidates have completed */ + beq+ 0f + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are on */ +0: + blr + +.L_flush_dcache_one_line: + xor r4,r4,r4 + dcbf 0,r3 + b .L_flush_dcache_done + + +/* + * extern void invalidate_dcache(vm_offset_t va, unsigned count, boolean phys); + * + * invalidate_dcache takes a virtual or physical address and count to + * invalidate and (can be called for multiple virtual pages). + * + * it invalidates the data cache for the address range in question + */ + +ENTRY(invalidate_dcache, TAG_NO_FRAME_USED) + + /* optionally switch off data translations */ + + cmpwi r5, 0 + mfmsr r6 + beq+ 0f + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + mtmsr r7 + isync +0: + + /* Check to see if the address is aligned. */ + add r8, r3,r4 + andi. r8,r8,(CACHE_LINE_SIZE-1) + beq- .L_invalidate_dcache_check + addi r4,r4,CACHE_LINE_SIZE + li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ + andc r4,r4,r7 + andc r3,r3,r7 + +.L_invalidate_dcache_check: + cmpwi r4, CACHE_LINE_SIZE + ble .L_invalidate_dcache_one_line + + /* Make ctr hold count of how many times we should loop */ + addi r8, r4, (CACHE_LINE_SIZE-1) + srwi r8, r8, CACHE_LINE_POW2 + mtctr r8 + +.L_invalidate_dcache_invalidate_loop: + subic r4, r4, CACHE_LINE_SIZE + dcbi r3, r4 + dcbi r3, r4 + bdnz .L_invalidate_dcache_invalidate_loop + +.L_invalidate_dcache_done: + /* Sync restore msr if it was modified */ + cmpwi r5, 0 + sync /* make sure invalidates have completed */ + beq+ 0f + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are on */ +0: + blr + +.L_invalidate_dcache_one_line: + xor r4,r4,r4 + dcbi 0,r3 + dcbi 0,r3 + b .L_invalidate_dcache_done + +/* + * extern void invalidate_icache(vm_offset_t addr, unsigned cnt, boolean phys); + * + * invalidate_icache takes a virtual or physical address and + * count to invalidate, (can be called for multiple virtual pages). + * + * it invalidates the instruction cache for the address range in question. + */ + +ENTRY(invalidate_icache, TAG_NO_FRAME_USED) + + /* optionally switch off data translations */ + cmpwi r5, 0 + mfmsr r6 + beq+ 0f + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + mtmsr r7 + isync +0: + + /* Check to see if the address is aligned. */ + add r8, r3,r4 + andi. r8,r8,(CACHE_LINE_SIZE-1) + beq- .L_invalidate_icache_check + addi r4,r4,CACHE_LINE_SIZE + li r7,(CACHE_LINE_SIZE-1) /* Align buffer & count - avoid overflow problems */ + andc r4,r4,r7 + andc r3,r3,r7 + +.L_invalidate_icache_check: + cmpwi r4, CACHE_LINE_SIZE + ble .L_invalidate_icache_one_line + + /* Make ctr hold count of how many times we should loop */ + addi r8, r4, (CACHE_LINE_SIZE-1) + srwi r8, r8, CACHE_LINE_POW2 + mtctr r8 + +.L_invalidate_icache_invalidate_loop: + subic r4, r4, CACHE_LINE_SIZE + icbi r3, r4 + icbi r3, r4 + bdnz .L_invalidate_icache_invalidate_loop + +.L_invalidate_icache_done: + /* Sync restore msr if it was modified */ + cmpwi r5, 0 + sync /* make sure invalidates have completed */ + beq+ 0f + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are on */ +0: + blr + +.L_invalidate_icache_one_line: + xor r4,r4,r4 + icbi 0,r3 + icbi 0,r3 + b .L_invalidate_icache_done diff --git a/osfmk/ppc/clock.h b/osfmk/ppc/clock.h new file mode 100644 index 000000000..e4c912349 --- /dev/null +++ b/osfmk/ppc/clock.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _PPC_CLOCK_H_ +#define _PPC_CLOCK_H_ + +#include + +#define CLK_SPEED 0.0000012766 /* time to complete a clock (3 MHz) */ + +#if HZ == 120 +# define CLK_INTERVAL 6528 /* clocks to hit CLK_TCK ticks per sec */ +#elif HZ == 100 +# define CLK_INTERVAL 7833 /* clocks to hit CLK_TCK ticks per sec */ +#elif HZ == 60 +# define CLK_INTERVAL 13055 /* clocks to hit CLK_TCK ticks per sec */ +#else +#error "unknown clock speed" +#endif + /* 6528 for 119.998 Hz. */ + /* 7833 for 100.004 Hz */ + /* 13055 for 60.002 Hz. */ +#define CLK_INTH (CLK_INTERVAL >> 8) +#define CLK_INTL (CLK_INTERVAL & 0xff) + +#define SECDAY ((unsigned)(24*60*60)) +#define SECYR ((unsigned)(365*SECDAY + SECDAY/4)) + +#endif /* _PPC_CLOCK_H_ */ diff --git a/osfmk/ppc/conf.c b/osfmk/ppc/conf.c new file mode 100644 index 000000000..585ef5b73 --- /dev/null +++ b/osfmk/ppc/conf.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#include +#include +#include + +/* + * Clock device subsystem configuration. The clock_list[] + * table contains the clock structures for all clocks in + * the system. + */ + +extern struct clock_ops sysclk_ops, calend_ops; + +/* + * List of clock devices. + */ +struct clock clock_list[] = { + + /* SYSTEM_CLOCK */ + { &sysclk_ops, 0, 0, 0 }, + + /* CALENDAR_CLOCK */ + { &calend_ops, 0, 0, 0 }, +}; +int clock_count = sizeof(clock_list) / sizeof(clock_list[0]); + + diff --git a/osfmk/ppc/console_feed.c b/osfmk/ppc/console_feed.c new file mode 100644 index 000000000..be442911a --- /dev/null +++ b/osfmk/ppc/console_feed.c @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ + +/* Intercept mach console output and supply it to a user application */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_KDB +#include +#endif /* MACH_KDB */ + +static struct cirbuf cons_feed_cb; +static int cons_feed_count = 0; +io_req_t cons_feed_queued = 0; + +/* console feed lock should be taken at splhigh */ +decl_simple_lock_data(,cons_feed_lock) + +boolean_t cons_feed_read_done(io_req_t ior); + +io_return_t +console_feed_open( + dev_t dev, + dev_mode_t flag, + io_req_t ior) +{ + spl_t s; + + simple_lock_init(&cons_feed_lock, ETAP_IO_TTY); +#if MACH_KDB + if (console_is_serial()) { + return D_DEVICE_DOWN; + } +#endif /* MACH_KDB */ + cb_alloc(&cons_feed_cb, CONSOLE_FEED_BUFSIZE); + s = splhigh(); + simple_lock(&cons_feed_lock); + cons_feed_count++; + simple_unlock(&cons_feed_lock); + splx(s); + return D_SUCCESS; +} + +void +console_feed_close( + dev_t dev) +{ + spl_t s; + + s = splhigh(); + simple_lock(&cons_feed_lock); + cons_feed_count--; + simple_unlock(&cons_feed_lock); + splx(s); + + console_feed_cancel_and_flush(); + cb_free(&cons_feed_cb); + + return; +} + +/* A routine that can be called from a panic or other problem + * situation. It switches off the console feed and dumps any + * remaining buffered information to the original console + * (usually the screen). It doesn't free up the buffer, since + * it tries to be as minimal as possible + */ + +void console_feed_cancel_and_flush(void) +{ + int c; + spl_t s; + +#if NCONSFEED > 0 +#if MACH_KDB + if (console_is_serial()) { + return; + } +#endif /* MACH_KDB */ + + s = splhigh(); + simple_lock(&cons_feed_lock); + if (cons_feed_count == 0) { + simple_unlock(&cons_feed_lock); + splx(s); + return; + } + cons_feed_count = 0; + simple_unlock(&cons_feed_lock); + splx(s); + + do { + c = getc(&cons_feed_cb); + if (c == -1) + break; + cnputc(c); + } while (1); +#endif /* NCONSFEED > 0 */ +} + +io_return_t +console_feed_read( + dev_t dev, + io_req_t ior) +{ + spl_t s; + kern_return_t rc; + int count; + + rc = device_read_alloc(ior, (vm_size_t) ior->io_count); + if (rc != KERN_SUCCESS) + return rc; + + s = splhigh(); + simple_lock(&cons_feed_lock); + + ior->io_residual = ior->io_count; + + count = q_to_b(&cons_feed_cb, (char *) ior->io_data, ior->io_count); + if (count == 0) { + if (ior->io_mode & D_NOWAIT) { + rc = D_WOULD_BLOCK; + } + if (cons_feed_queued == NULL) { + ior->io_done = cons_feed_read_done; + cons_feed_queued = ior; + rc = D_IO_QUEUED; + } else { + /* Can't queue multiple read requests yet */ + rc = D_INVALID_OPERATION; + } + simple_unlock(&cons_feed_lock); + splx(s); + return rc; + } + + simple_unlock(&cons_feed_lock); + splx(s); + + ior->io_residual -= count; + + iodone(ior); + + if (ior->io_op & IO_SYNC) { + iowait(ior); + } + + return D_SUCCESS; +} + +/* Called when data is ready and there's a queued-up read waiting */ +boolean_t cons_feed_read_done(io_req_t ior) +{ + spl_t s; + int count; + + s = splhigh(); + simple_lock(&cons_feed_lock); + + count = q_to_b(&cons_feed_cb, (char *) ior->io_data, ior->io_count); + if (count == 0) { + if (cons_feed_queued == NULL) { + ior->io_done = cons_feed_read_done; + cons_feed_queued = ior; + } + simple_unlock(&cons_feed_lock); + splx(s); + return FALSE; + } + + simple_unlock(&cons_feed_lock); + splx(s); + + ior->io_residual -= count; + ds_read_done(ior); + + return TRUE; +} + +/* This routine is called from putc() - it should return TRUE if + * the character should be passed on to a physical console, FALSE + * if the feed has intercepted the character. It may be called from + * under interrupt (even splhigh) + */ + +boolean_t console_feed_putc(char c) +{ + spl_t s; + io_req_t ior; + boolean_t retval; + +#if MACH_KDB + if (db_active) { + return TRUE; + } +#endif /* MACH_KDB */ + + retval=TRUE; /* TRUE : character should be displayed now */ + if (!cons_feed_count) { + return TRUE; + } + s = splhigh(); + simple_lock(&cons_feed_lock); + if (!cons_feed_count) { + simple_unlock(&cons_feed_lock); + splx(s); + return TRUE; + } + /* queue up the data if we can */ + if (!putc(c, &cons_feed_cb)) { + /* able to stock the character */ + retval = FALSE; + } + if (cons_feed_queued != NULL) { + /* Queued up request - service it */ + ior = cons_feed_queued; + cons_feed_queued = NULL; + simple_unlock(&cons_feed_lock); + splx(s); + iodone(ior); + retval=FALSE; + } else { + simple_unlock(&cons_feed_lock); + splx(s); + } + return retval; +} diff --git a/osfmk/ppc/console_feed_entries.h b/osfmk/ppc/console_feed_entries.h new file mode 100644 index 000000000..312734a44 --- /dev/null +++ b/osfmk/ppc/console_feed_entries.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + * + */ + +extern io_return_t console_feed_open( + dev_t dev, + dev_mode_t flag, + io_req_t ior); + +extern void console_feed_close( + dev_t dev); + +extern io_return_t console_feed_read( + dev_t dev, + io_req_t ior); + +extern boolean_t console_feed_putc(char c); +extern void console_feed_cancel_and_flush(void); + +#define CONSOLE_FEED_BUFSIZE 4096 diff --git a/osfmk/ppc/cpu.c b/osfmk/ppc/cpu.c new file mode 100644 index 000000000..360c29c6e --- /dev/null +++ b/osfmk/ppc/cpu.c @@ -0,0 +1,801 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * File: ppc/cpu.c + * + * cpu specific routines + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include + +/* TODO: BOGUS TO BE REMOVED */ +int real_ncpus = 1; + +int wncpu = NCPUS; +resethandler_t resethandler_target; + +#define MMCR0_SUPPORT_MASK 0xf83f1fff +#define MMCR1_SUPPORT_MASK 0xffc00000 +#define MMCR2_SUPPORT_MASK 0x80000000 + +extern int debugger_pending[NCPUS]; +extern int debugger_is_slave[NCPUS]; +extern int debugger_holdoff[NCPUS]; +extern int debugger_sync; + +struct SIGtimebase { + boolean_t avail; + boolean_t ready; + boolean_t done; + AbsoluteTime abstime; +}; + +extern struct SIGtimebase syncClkSpot; + +void cpu_sync_timebase(void); + +kern_return_t +cpu_control( + int slot_num, + processor_info_t info, + unsigned int count) +{ + cpu_type_t cpu_type; + cpu_subtype_t cpu_subtype; + processor_pm_regs_t perf_regs; + processor_control_cmd_t cmd; + boolean_t oldlevel; + + cpu_type = machine_slot[slot_num].cpu_type; + cpu_subtype = machine_slot[slot_num].cpu_subtype; + cmd = (processor_control_cmd_t) info; + + if (count < PROCESSOR_CONTROL_CMD_COUNT) + return(KERN_FAILURE); + + if ( cpu_type != cmd->cmd_cpu_type || + cpu_subtype != cmd->cmd_cpu_subtype) + return(KERN_FAILURE); + + switch (cmd->cmd_op) + { + case PROCESSOR_PM_CLR_PMC: /* Clear Performance Monitor Counters */ + switch (cpu_subtype) + { + case CPU_SUBTYPE_POWERPC_604: + { + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + mtpmc1(0x0); + mtpmc2(0x0); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + return(KERN_SUCCESS); + } + case CPU_SUBTYPE_POWERPC_604e: + case CPU_SUBTYPE_POWERPC_750: + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + { + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + mtpmc1(0x0); + mtpmc2(0x0); + mtpmc3(0x0); + mtpmc4(0x0); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + return(KERN_SUCCESS); + } + default: + return(KERN_FAILURE); + } /* cpu_subtype */ + case PROCESSOR_PM_SET_REGS: /* Set Performance Monitor Registors */ + switch (cpu_subtype) + { + case CPU_SUBTYPE_POWERPC_604: + if (count < (PROCESSOR_CONTROL_CMD_COUNT + + PROCESSOR_PM_REGS_COUNT_POWERPC_604)) + return(KERN_FAILURE); + else + { + perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); + mtpmc1(PERFMON_PMC1(perf_regs)); + mtpmc2(PERFMON_PMC2(perf_regs)); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + return(KERN_SUCCESS); + } + case CPU_SUBTYPE_POWERPC_604e: + case CPU_SUBTYPE_POWERPC_750: + if (count < (PROCESSOR_CONTROL_CMD_COUNT + + PROCESSOR_PM_REGS_COUNT_POWERPC_750)) + return(KERN_FAILURE); + else + { + perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); + mtpmc1(PERFMON_PMC1(perf_regs)); + mtpmc2(PERFMON_PMC2(perf_regs)); + mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); + mtpmc3(PERFMON_PMC3(perf_regs)); + mtpmc4(PERFMON_PMC4(perf_regs)); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + return(KERN_SUCCESS); + } + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + if (count < (PROCESSOR_CONTROL_CMD_COUNT + + PROCESSOR_PM_REGS_COUNT_POWERPC_7400)) + return(KERN_FAILURE); + else + { + perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); + mtpmc1(PERFMON_PMC1(perf_regs)); + mtpmc2(PERFMON_PMC2(perf_regs)); + mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); + mtpmc3(PERFMON_PMC3(perf_regs)); + mtpmc4(PERFMON_PMC4(perf_regs)); + mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + return(KERN_SUCCESS); + } + default: + return(KERN_FAILURE); + } /* switch cpu_subtype */ + case PROCESSOR_PM_SET_MMCR: + switch (cpu_subtype) + { + case CPU_SUBTYPE_POWERPC_604: + if (count < (PROCESSOR_CONTROL_CMD_COUNT + + PROCESSOR_PM_REGS_COUNT_POWERPC_604)) + return(KERN_FAILURE); + else + { + perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; + mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); + return(KERN_SUCCESS); + } + case CPU_SUBTYPE_POWERPC_604e: + case CPU_SUBTYPE_POWERPC_750: + if (count < (PROCESSOR_CONTROL_CMD_COUNT + + PROCESSOR_PM_REGS_COUNT_POWERPC_750)) + return(KERN_FAILURE); + else + { + perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); + mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + return(KERN_SUCCESS); + } + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + if (count < (PROCESSOR_CONTROL_CMD_COUNT + + PROCESSOR_PM_REGS_COUNT_POWERPC_7400)) + return(KERN_FAILURE); + else + { + perf_regs = (processor_pm_regs_t)cmd->cmd_pm_regs; + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + mtmmcr0(PERFMON_MMCR0(perf_regs) & MMCR0_SUPPORT_MASK); + mtmmcr1(PERFMON_MMCR1(perf_regs) & MMCR1_SUPPORT_MASK); + mtmmcr2(PERFMON_MMCR2(perf_regs) & MMCR2_SUPPORT_MASK); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + return(KERN_SUCCESS); + } + default: + return(KERN_FAILURE); + } /* cpu_subtype */ + default: + return(KERN_FAILURE); + } /* switch cmd_op */ +} + +kern_return_t +cpu_info_count( + processor_flavor_t flavor, + unsigned int *count) +{ + cpu_subtype_t cpu_subtype; + + /* + * For now, we just assume that all CPUs are of the same type + */ + cpu_subtype = machine_slot[0].cpu_subtype; + switch (flavor) { + case PROCESSOR_PM_REGS_INFO: + switch (cpu_subtype) { + case CPU_SUBTYPE_POWERPC_604: + *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604; + return(KERN_SUCCESS); + + case CPU_SUBTYPE_POWERPC_604e: + case CPU_SUBTYPE_POWERPC_750: + + *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750; + return(KERN_SUCCESS); + + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + + *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400; + return(KERN_SUCCESS); + + default: + *count = 0; + return(KERN_INVALID_ARGUMENT); + } /* switch cpu_subtype */ + + case PROCESSOR_TEMPERATURE: + *count = PROCESSOR_TEMPERATURE_COUNT; + return (KERN_SUCCESS); + + default: + *count = 0; + return(KERN_INVALID_ARGUMENT); + + } +} + +kern_return_t +cpu_info( + processor_flavor_t flavor, + int slot_num, + processor_info_t info, + unsigned int *count) +{ + cpu_subtype_t cpu_subtype; + processor_pm_regs_t perf_regs; + boolean_t oldlevel; + unsigned int temp[2]; + + cpu_subtype = machine_slot[slot_num].cpu_subtype; + + switch (flavor) { + case PROCESSOR_PM_REGS_INFO: + + perf_regs = (processor_pm_regs_t) info; + + switch (cpu_subtype) { + case CPU_SUBTYPE_POWERPC_604: + + if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_604) + return(KERN_FAILURE); + + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + PERFMON_MMCR0(perf_regs) = mfmmcr0(); + PERFMON_PMC1(perf_regs) = mfpmc1(); + PERFMON_PMC2(perf_regs) = mfpmc2(); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + + *count = PROCESSOR_PM_REGS_COUNT_POWERPC_604; + return(KERN_SUCCESS); + + case CPU_SUBTYPE_POWERPC_604e: + case CPU_SUBTYPE_POWERPC_750: + + if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_750) + return(KERN_FAILURE); + + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + PERFMON_MMCR0(perf_regs) = mfmmcr0(); + PERFMON_PMC1(perf_regs) = mfpmc1(); + PERFMON_PMC2(perf_regs) = mfpmc2(); + PERFMON_MMCR1(perf_regs) = mfmmcr1(); + PERFMON_PMC3(perf_regs) = mfpmc3(); + PERFMON_PMC4(perf_regs) = mfpmc4(); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + + *count = PROCESSOR_PM_REGS_COUNT_POWERPC_750; + return(KERN_SUCCESS); + + case CPU_SUBTYPE_POWERPC_7400: + case CPU_SUBTYPE_POWERPC_7450: + + if (*count < PROCESSOR_PM_REGS_COUNT_POWERPC_7400) + return(KERN_FAILURE); + + oldlevel = ml_set_interrupts_enabled(FALSE); /* disable interrupts */ + PERFMON_MMCR0(perf_regs) = mfmmcr0(); + PERFMON_PMC1(perf_regs) = mfpmc1(); + PERFMON_PMC2(perf_regs) = mfpmc2(); + PERFMON_MMCR1(perf_regs) = mfmmcr1(); + PERFMON_PMC3(perf_regs) = mfpmc3(); + PERFMON_PMC4(perf_regs) = mfpmc4(); + PERFMON_MMCR2(perf_regs) = mfmmcr2(); + ml_set_interrupts_enabled(oldlevel); /* enable interrupts */ + + *count = PROCESSOR_PM_REGS_COUNT_POWERPC_7400; + return(KERN_SUCCESS); + + default: + return(KERN_FAILURE); + } /* switch cpu_subtype */ + + case PROCESSOR_TEMPERATURE: /* Get the temperature of a processor */ + + disable_preemption(); /* Don't move me now */ + + if(slot_num == cpu_number()) { /* Is this for the local CPU? */ + *info = ml_read_temp(); /* Get the temperature */ + } + else { /* For another CPU */ + temp[0] = -1; /* Set sync flag */ + eieio(); + sync(); + temp[1] = -1; /* Set invalid temperature */ + (void)cpu_signal(slot_num, SIGPcpureq, CPRQtemp ,(unsigned int)&temp); /* Ask him to take his temperature */ + (void)hw_cpu_sync(temp, LockTimeOut); /* Wait for the other processor to get its temperature */ + *info = temp[1]; /* Pass it back */ + } + + enable_preemption(); /* Ok to move now */ + return(KERN_SUCCESS); + + default: + return(KERN_INVALID_ARGUMENT); + + } /* flavor */ +} + +void +cpu_init( + void) +{ + int cpu; + + cpu = cpu_number(); + + machine_slot[cpu].running = TRUE; + machine_slot[cpu].cpu_type = CPU_TYPE_POWERPC; + machine_slot[cpu].cpu_subtype = (cpu_subtype_t)per_proc_info[cpu].pf.rptdProc; + +} + +void +cpu_machine_init( + void) +{ + struct per_proc_info *proc_info; + int cpu; + + /* TODO: realese mutex lock reset_handler_lock */ + + cpu = cpu_number(); + proc_info = &per_proc_info[cpu]; + PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); + if (cpu != master_cpu) + cpu_sync_timebase(); + ml_init_interrupt(); + proc_info->cpu_flags |= BootDone; +} + +kern_return_t +cpu_register( + int *target_cpu +) +{ + int cpu; + + /* + * TODO: + * - Run cpu_register() in exclusion mode + */ + + *target_cpu = -1; + for(cpu=0; cpu < wncpu; cpu++) { + if(!machine_slot[cpu].is_cpu) { + machine_slot[cpu].is_cpu = TRUE; + *target_cpu = cpu; + break; + } + } + if (*target_cpu != -1) { + real_ncpus++; + return KERN_SUCCESS; + } else + return KERN_FAILURE; +} + +kern_return_t +cpu_start( + int cpu) +{ + struct per_proc_info *proc_info; + kern_return_t ret; + + extern void (*exception_handlers[])(void); + extern vm_offset_t intstack; + extern vm_offset_t debstack; + + proc_info = &per_proc_info[cpu]; + + if (cpu == cpu_number()) { + PE_cpu_machine_init(proc_info->cpu_id, !(proc_info->cpu_flags & BootDone)); + ml_init_interrupt(); + proc_info->cpu_flags |= BootDone; + + return KERN_SUCCESS; + } else { + extern void _start_cpu(void); + + proc_info->cpu_number = cpu; + proc_info->cpu_flags &= BootDone; + proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state); + proc_info->intstack_top_ss = proc_info->istackptr; +#if MACH_KDP || MACH_KDB + proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state); + proc_info->debstack_top_ss = proc_info->debstackptr; +#endif /* MACH_KDP || MACH_KDB */ + proc_info->get_interrupts_enabled = fake_get_interrupts_enabled; + proc_info->set_interrupts_enabled = fake_set_interrupts_enabled; + proc_info->active_kloaded = (unsigned int)&active_kloaded[cpu]; + proc_info->cpu_data = (unsigned int)&cpu_data[cpu]; + proc_info->active_stacks = (unsigned int)&active_stacks[cpu]; + proc_info->need_ast = (unsigned int)&need_ast[cpu]; + proc_info->FPU_thread = 0; + proc_info->FPU_vmmCtx = 0; + proc_info->VMX_thread = 0; + proc_info->VMX_vmmCtx = 0; + + if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { + + /* TODO: get mutex lock reset_handler_lock */ + + resethandler_target.type = RESET_HANDLER_START; + resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu); + resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info); + + ml_phys_write((vm_offset_t)&ResetHandler + 0, + resethandler_target.type); + ml_phys_write((vm_offset_t)&ResetHandler + 4, + resethandler_target.call_paddr); + ml_phys_write((vm_offset_t)&ResetHandler + 8, + resethandler_target.arg__paddr); + + } +/* + * Note: we pass the current time to the other processor here. He will load it + * as early as possible so that there is a chance that it is close to accurate. + * After the machine is up a while, we will officially resync the clocks so + * that all processors are the same. This is just to get close. + */ + + ml_get_timebase(&proc_info->ruptStamp); /* Pass our current time to the other guy */ + + __asm__ volatile("sync"); /* Commit to storage */ + __asm__ volatile("isync"); /* Wait a second */ + ret = PE_cpu_start(proc_info->cpu_id, + proc_info->start_paddr, (vm_offset_t)proc_info); + + if (ret != KERN_SUCCESS && + proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { + + /* TODO: realese mutex lock reset_handler_lock */ + } + return(ret); + } +} + +/* + * Here is where we implement the receiver of the signaling protocol. + * We wait for the signal status area to be passed to us. Then we snarf + * up the status, the sender, and the 3 potential parms. Next we release + * the lock and signal the other guy. + */ + +void +cpu_signal_handler( + void) +{ + + unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype; + unsigned int *parmAddr; + struct per_proc_info *pproc; /* Area for my per_proc address */ + int cpu; + struct SIGtimebase *timebaseAddr; + natural_t tbu, tbu2, tbl; + + cpu = cpu_number(); /* Get the CPU number */ + pproc = &per_proc_info[cpu]; /* Point to our block */ + +/* + * Since we've been signaled, wait just under 1ms for the signal lock to pass + */ + if(!hw_lock_mbits(&pproc->MPsigpStat, MPsigpMsgp, (MPsigpBusy | MPsigpPass), + (MPsigpBusy | MPsigpPass), (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) { + panic("cpu_signal_handler: Lock pass timed out\n"); + } + + holdStat = pproc->MPsigpStat; /* Snarf stat word */ + holdParm0 = pproc->MPsigpParm0; /* Snarf parameter */ + holdParm1 = pproc->MPsigpParm1; /* Snarf parameter */ + holdParm2 = pproc->MPsigpParm2; /* Snarf parameter */ + + __asm__ volatile("isync"); /* Make sure we don't unlock until memory is in */ + + pproc->MPsigpStat = holdStat & ~(MPsigpMsgp | MPsigpFunc); /* Release lock */ + + switch ((holdStat & MPsigpFunc) >> 8) { /* Decode function code */ + + case MPsigpIdle: /* Was function cancelled? */ + return; /* Yup... */ + + case MPsigpSigp: /* Signal Processor message? */ + + switch (holdParm0) { /* Decode SIGP message order */ + + case SIGPast: /* Should we do an AST? */ + pproc->numSIGPast++; /* Count this one */ +#if 0 + kprintf("cpu_signal_handler: AST check on cpu %x\n", cpu_number()); +#endif + ast_check(); /* Yes, do it */ + /* XXX: Should check if AST_URGENT is needed */ + ast_on(AST_URGENT); + return; /* All done... */ + + case SIGPcpureq: /* CPU specific function? */ + + pproc->numSIGPcpureq++; /* Count this one */ + switch (holdParm1) { /* Select specific function */ + + case CPRQtemp: /* Get the temperature */ + parmAddr = (unsigned int *)holdParm2; /* Get the destination address */ + parmAddr[1] = ml_read_temp(); /* Get the core temperature */ + eieio(); /* Force order */ + sync(); /* Force to memory */ + parmAddr[0] = 0; /* Show we're done */ + return; + + case CPRQtimebase: + + timebaseAddr = (struct SIGtimebase *)holdParm2; + + if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL) + pproc->time_base_enable(pproc->cpu_id, FALSE); + + timebaseAddr->abstime.hi = 0; /* Touch to force into cache */ + sync(); + + do { + asm volatile(" mftbu %0" : "=r" (tbu)); + asm volatile(" mftb %0" : "=r" (tbl)); + asm volatile(" mftbu %0" : "=r" (tbu2)); + } while (tbu != tbu2); + + timebaseAddr->abstime.lo = tbl; /* Set low order */ + timebaseAddr->abstime.hi = tbu; /* Set high order */ + sync(); /* Force order */ + + timebaseAddr->avail = TRUE; + + while (*(volatile int *)&(syncClkSpot.ready) == FALSE); + + if(pproc->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL) + pproc->time_base_enable(pproc->cpu_id, TRUE); + + timebaseAddr->done = TRUE; + + return; + + default: + panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1); + return; + } + + + case SIGPdebug: /* Enter the debugger? */ + + pproc->numSIGPdebug++; /* Count this one */ + debugger_is_slave[cpu]++; /* Bump up the count to show we're here */ + hw_atomic_sub(&debugger_sync, 1); /* Show we've received the 'rupt */ + __asm__ volatile("tw 4,r3,r3"); /* Enter the debugger */ + return; /* All done now... */ + + case SIGPwake: /* Wake up CPU */ + pproc->numSIGPwake++; /* Count this one */ + return; /* No need to do anything, the interrupt does it all... */ + + default: + panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0); + return; + + } + + default: + panic("cpu_signal_handler: unknown SIGP function - %08X\n", (holdStat & MPsigpFunc) >> 8); + return; + + } + panic("cpu_signal_handler: we should never get here\n"); +} + +/* + * Here is where we send a message to another processor. So far we only have two: + * SIGPast and SIGPdebug. SIGPast is used to preempt and kick off threads (this is + * currently disabled). SIGPdebug is used to enter the debugger. + * + * We set up the SIGP function to indicate that this is a simple message and set the + * order code (MPsigpParm0) to SIGPast or SIGPdebug). After finding the per_processor + * block for the target, we lock the message block. Then we set the parameter(s). + * Next we change the lock (also called "busy") to "passing" and finally signal + * the other processor. Note that we only wait about 1ms to get the message lock. + * If we time out, we return failure to our caller. It is their responsibility to + * recover. + */ + +kern_return_t +cpu_signal( + int target, + int signal, + unsigned int p1, + unsigned int p2) +{ + + unsigned int holdStat, holdParm0, holdParm1, holdParm2, mtype; + struct per_proc_info *tpproc, *mpproc; /* Area for per_proc addresses */ + int cpu; + +#if DEBUG + if(target > NCPUS) panic("cpu_signal: invalid target CPU - %08X\n", target); +#endif + + cpu = cpu_number(); /* Get our CPU number */ + if(target == cpu) return KERN_FAILURE; /* Don't play with ourselves */ + if(!machine_slot[target].running) return KERN_FAILURE; /* These guys are too young */ + + mpproc = &per_proc_info[cpu]; /* Point to our block */ + tpproc = &per_proc_info[target]; /* Point to the target's block */ + + if(!hw_lock_mbits(&tpproc->MPsigpStat, MPsigpMsgp, 0, MPsigpBusy, + (gPEClockFrequencyInfo.bus_clock_rate_hz >> 7))) { /* Try to lock the message block */ + return KERN_FAILURE; /* Timed out, take your ball and go home... */ + } + + holdStat = MPsigpBusy | MPsigpPass | (MPsigpSigp << 8) | cpu; /* Set up the signal status word */ + tpproc->MPsigpParm0 = signal; /* Set message order */ + tpproc->MPsigpParm1 = p1; /* Set additional parm */ + tpproc->MPsigpParm2 = p2; /* Set additional parm */ + + __asm__ volatile("sync"); /* Make sure it's all there */ + + tpproc->MPsigpStat = holdStat; /* Set status and pass the lock */ + __asm__ volatile("eieio"); /* I'm a paraniod freak */ + + PE_cpu_signal(mpproc->cpu_id, tpproc->cpu_id); /* Kick the other processor */ + + return KERN_SUCCESS; /* All is goodness and rainbows... */ +} + +void +cpu_doshutdown( + void) +{ + processor_doshutdown(current_processor()); +} + +void +cpu_sleep( + void) +{ + struct per_proc_info *proc_info; + unsigned int cpu; + extern void (*exception_handlers[])(void); + extern vm_offset_t intstack; + extern vm_offset_t debstack; + extern void _restart_cpu(void); + + cpu = cpu_number(); +#if 0 + kprintf("******* About to sleep cpu %d\n", cpu); +#endif + + proc_info = &per_proc_info[cpu]; + + if (proc_info->cpu_number == 0) { + proc_info->cpu_flags &= BootDone; + proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state); + proc_info->intstack_top_ss = proc_info->istackptr; +#if MACH_KDP || MACH_KDB + proc_info->debstackptr = (vm_offset_t)&debstack + (KERNEL_STACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state); + proc_info->debstack_top_ss = proc_info->debstackptr; +#endif /* MACH_KDP || MACH_KDB */ + proc_info->get_interrupts_enabled = fake_get_interrupts_enabled; + proc_info->set_interrupts_enabled = fake_set_interrupts_enabled; + proc_info->FPU_thread = 0; + + if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { + extern void _start_cpu(void); + + resethandler_target.type = RESET_HANDLER_START; + resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu); + resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info); + + ml_phys_write((vm_offset_t)&ResetHandler + 0, + resethandler_target.type); + ml_phys_write((vm_offset_t)&ResetHandler + 4, + resethandler_target.call_paddr); + ml_phys_write((vm_offset_t)&ResetHandler + 8, + resethandler_target.arg__paddr); + + __asm__ volatile("sync"); + __asm__ volatile("isync"); + } + } + + PE_cpu_machine_quiesce(proc_info->cpu_id); +} + +void +cpu_sync_timebase( + void) +{ + natural_t tbu, tbl; + boolean_t intr; + + intr = ml_set_interrupts_enabled(FALSE); /* No interruptions in here */ + + /* Note that syncClkSpot is in a cache aligned area */ + syncClkSpot.avail = FALSE; + syncClkSpot.ready = FALSE; + syncClkSpot.done = FALSE; + + while (cpu_signal(master_cpu, SIGPcpureq, CPRQtimebase, (unsigned int)&syncClkSpot) + != KERN_SUCCESS); + + + while (*(volatile int *)&(syncClkSpot.avail) == FALSE); + isync(); + + /* + * We do the following to keep the compiler from generating extra stuff + * in tb set part + */ + tbu = syncClkSpot.abstime.hi; + tbl = syncClkSpot.abstime.lo; + + mttb(0); + mttbu(tbu); + mttb(tbl); + + syncClkSpot.ready = TRUE; + + while (*(volatile int *)&(syncClkSpot.done) == FALSE); + + (void)ml_set_interrupts_enabled(intr); +} diff --git a/osfmk/ppc/cpu_data.h b/osfmk/ppc/cpu_data.h new file mode 100644 index 000000000..ebaf57d90 --- /dev/null +++ b/osfmk/ppc/cpu_data.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef PPC_CPU_DATA +#define PPC_CPU_DATA + +#if defined(__GNUC__) + +#define disable_preemption _disable_preemption +#define enable_preemption _enable_preemption +#define enable_preemption_no_check _enable_preemption_no_check +#define mp_disable_preemption _disable_preemption +#define mp_enable_preemption _enable_preemption +#define mp_enable_preemption_no_check _enable_preemption_no_check + +extern thread_t current_thread(void); +extern int get_preemption_level(void); +extern void disable_preemption(void); +extern void enable_preemption(void); +extern void enable_preemption_no_check(void); +extern void mp_disable_preemption(void); +extern void mp_enable_preemption(void); +extern void mp_enable_preemption_no_check(void); +extern int get_simple_lock_count(void); +#endif /* defined(__GNUC__) */ + +#endif /* PPC_CPU_DATA */ diff --git a/osfmk/ppc/cpu_number.h b/osfmk/ppc/cpu_number.h new file mode 100644 index 000000000..6aea4245d --- /dev/null +++ b/osfmk/ppc/cpu_number.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#ifndef _PPC_CPU_NUMBER_H_ +#define _PPC_CPU_NUMBER_H_ + +extern int cpu_number(void); + +#endif /* _PPC_CPU_NUMBER_H_ */ diff --git a/osfmk/ppc/cswtch.s b/osfmk/ppc/cswtch.s new file mode 100644 index 000000000..d9ef06022 --- /dev/null +++ b/osfmk/ppc/cswtch.s @@ -0,0 +1,2638 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include +#include +#include + +#define FPVECDBG 0 +#define GDDBG 0 + + .text + +/* + * void load_context(thread_t thread) + * + * Load the context for the first kernel thread, and go. + * + * NOTE - if DEBUG is set, the former routine is a piece + * of C capable of printing out debug info before calling the latter, + * otherwise both entry points are identical. + */ + +ENTRY2(load_context, Load_context, TAG_NO_FRAME_USED) + +/* + * Since this is the first thread, we came in on the interrupt + * stack. The first thread never returns, so there is no need to + * worry about saving its frame, hence we can reset the istackptr + * back to the saved_state structure at it's top + */ + + +/* + * get new thread pointer and set it into the active_threads pointer + * + */ + + mfsprg r6,0 + lwz r0,PP_INTSTACK_TOP_SS(r6) + lwz r11,PP_CPU_DATA(r6) + stw r0,PP_ISTACKPTR(r6) + stw r3,CPU_ACTIVE_THREAD(r11) + +/* Find the new stack and store it in active_stacks */ + + lwz r12,PP_ACTIVE_STACKS(r6) + lwz r1,THREAD_KERNEL_STACK(r3) + lwz r9,THREAD_TOP_ACT(r3) /* Point to the active activation */ + stw r1,0(r12) + li r0,0 /* Clear a register */ + lwz r8,ACT_MACT_PCB(r9) /* Get the savearea used */ + lwz r10,SAVflags(r8) /* Get the savearea flags */ + rlwinm r7,r8,0,0,19 /* Switch to savearea base */ + lwz r11,SAVprev(r8) /* Get the previous savearea */ + mfmsr r5 /* Since we are passing control, get our MSR values */ + lwz r1,saver1(r8) /* Load new stack pointer */ + rlwinm r10,r10,0,1,31 /* Remove the attached flag */ + stw r0,saver3(r8) /* Make sure we pass in a 0 for the continuation */ + lwz r7,SACvrswap(r7) /* Get the translation from virtual to real */ + stw r0,FM_BACKPTR(r1) /* zero backptr */ + stw r5,savesrr1(r8) /* Pass our MSR to the new guy */ + stw r10,SAVflags(r8) /* Pass back the flags */ + xor r3,r7,r8 /* Get the physical address of the new context save area */ + stw r11,ACT_MACT_PCB(r9) /* Unstack our savearea */ + b EXT(exception_exit) /* Go end it all... */ + +/* struct thread_shuttle *Switch_context(struct thread_shuttle *old, + * void (*cont)(void), + * struct thread_shuttle *new) + * + * Switch from one thread to another. If a continuation is supplied, then + * we do not need to save callee save registers. + * + */ + +/* void Call_continuation( void (*continuation)(void), vm_offset_t stack_ptr) + */ + +ENTRY(Call_continuation, TAG_NO_FRAME_USED) + mtlr r3 + mr r1, r4 /* Load new stack pointer */ + blr /* Jump to the continuation */ + +/* + * Get the old kernel stack, and store into the thread structure. + * See if a continuation is supplied, and skip state save if so. + * NB. Continuations are no longer used, so this test is omitted, + * as should the second argument, but it is in generic code. + * We always save state. This does not hurt even if continuations + * are put back in. + */ + +/* Context switches are double jumps. We pass the following to the + * context switch firmware call: + * + * R3 = switchee's savearea + * R4 = old thread + * R5 = new SRR0 + * R6 = new SRR1 + * + * savesrr0 is set to go to switch_in + * savesrr1 is set to uninterruptible with translation on + */ + + +ENTRY(Switch_context, TAG_NO_FRAME_USED) + + mfsprg r6,0 /* Get the per_proc block */ + lwz r12,PP_ACTIVE_STACKS(r6) +#if DEBUG + lwz r11,PP_ISTACKPTR(r6) ; (DEBUG/TRACE) make sure we are not + mr. r11,r11 ; (DEBUG/TRACE) on the interrupt + bne+ notonintstack ; (DEBUG/TRACE) stack + BREAKPOINT_TRAP +notonintstack: +#endif + stw r4,THREAD_CONTINUATION(r3) + cmpwi cr1,r4,0 /* used waaaay down below */ + lwz r11,0(r12) + stw r11,THREAD_KERNEL_STACK(r3) +/* + * Make the new thread the current thread. + */ + + lwz r11,PP_CPU_DATA(r6) + stw r5, CPU_ACTIVE_THREAD(r11) + + lwz r11,THREAD_KERNEL_STACK(r5) + + lwz r5,THREAD_TOP_ACT(r5) + lwz r10,PP_ACTIVE_STACKS(r6) + lwz r7,CTHREAD_SELF(r5) ; Pick up the user assist word + lwz r8,ACT_MACT_PCB(r5) /* Get the PCB for the new guy */ + + stw r11,0(r10) ; Save the kernel stack address + stw r7,UAW(r6) ; Save the assist word for the "ultra fast path" + + lwz r7,ACT_MACT_SPF(r5) ; Get the special flags + + lwz r10,ACT_KLOADED(r5) + li r0,0 + cmpwi cr0,r10,0 + lwz r10,PP_ACTIVE_KLOADED(r6) + stw r7,spcFlags(r6) ; Set per_proc copy of the special flags + beq cr0,.L_sw_ctx_not_kld + + stw r5,0(r10) + b .L_sw_ctx_cont + +.L_sw_ctx_not_kld: + stw r0,0(r10) /* act_kloaded = 0 */ + +.L_sw_ctx_cont: + lis r10,hi16(EXT(trcWork)) ; Get top of trace mask + rlwinm r7,r8,0,0,19 /* Switch to savearea base */ + ori r10,r10,lo16(EXT(trcWork)) ; Get bottom of mask + lwz r11,SAVprev(r8) /* Get the previous of the switchee's savearea */ + lwz r10,traceMask(r10) ; Get the enabled traces + lis r0,hi16(CutTrace) ; Trace FW call + mr. r10,r10 ; Any tracing going on? + ori r0,r0,lo16(CutTrace) ; Trace FW call + beq+ cswNoTrc ; No trace today, dude... + mr r10,r3 ; Save across trace + lwz r2,THREAD_TOP_ACT(r3) ; Trace old activation + mr r3,r11 ; Trace prev savearea + sc ; Cut trace entry of context switch + mr r3,r10 ; Restore + +cswNoTrc: mfmsr r6 /* Get the MSR because the switched to thread should inherit it */ + lwz r7,SACvrswap(r7) /* Get the translation from virtual to real */ + lis r0,hi16(SwitchContextCall) /* Top part of switch context */ + lis r9,hi16(EXT(switch_in)) /* Get top of switch in routine */ + stw r11,ACT_MACT_PCB(r5) /* Dequeue the savearea we're switching to */ + + rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 /* Turn off the FP */ + ori r9,r9,lo16(EXT(switch_in)) /* Bottom half of switch in */ + lwz r5,savesrr0(r8) /* Set up the new SRR0 */ + rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 /* Turn off the vector */ + mr r4,r3 /* Save our old thread to pass back */ + stw r9,savesrr0(r8) /* Make us jump to the switch in routine */ + li r10,MSR_SUPERVISOR_INT_OFF /* Get the switcher's MSR */ + lwz r9,SAVflags(r8) /* Get the flags */ + stw r10,savesrr1(r8) /* Set up for switch in */ + rlwinm r9,r9,0,15,13 /* Reset the syscall flag */ + ori r0,r0,lo16(SwitchContextCall) /* Bottom part of switch context */ + rlwinm r9,r9,0,1,31 /* Clear the attached flag */ + xor r3,r7,r8 /* Get the physical address of the new context save area */ + stw r9,SAVflags(r8) /* Set the flags */ +/* if blocking on continuation avoid saving state */ + bne cr1,1f + sc /* Switch to the new context */ + +/* We come back here in the new thread context + * R4 was set to hold the old thread pointer, but switch_in will put it into + * R3 where it belongs. + */ + blr /* Jump into the new thread */ + +1: stw r5,savesrr0(r8) /* go to real pc */ + stw r4,saver3(r8) /* must pass back old thread */ + b EXT(exception_exit) /* blocking on continuation, avoid state save */ + + + +/* + * All switched to threads come here first to clean up the old thread. + * We need to do the following contortions because we need to keep + * the LR clean. And because we need to manipulate the savearea chain + * with translation on. If we could, this should be done in lowmem_vectors + * before translation is turned on. But we can't, dang it! + * + * R3 = switcher's savearea + * saver4 = old thread in switcher's save + * saver5 = new SRR0 in switcher's save + * saver6 = new SRR1 in switcher's save + + + */ + +ENTRY(switch_in, TAG_NO_FRAME_USED) + + lwz r4,saver4(r3) /* Get the old thread */ + li r8,MSR_VM_OFF /* Set to everything off */ + lwz r9,THREAD_TOP_ACT(r4) /* Get the switched from ACT */ + lwz r5,saver5(r3) /* Get the srr0 value */ + lwz r10,ACT_MACT_PCB(r9) /* Get the top PCB on the old thread */ + lwz r6,saver6(r3) /* Get the srr1 value */ + + stw r3,ACT_MACT_PCB(r9) /* Put the new one on top */ + stw r10,SAVprev(r3) /* Chain on the old one */ + + mr r3,r4 /* Pass back the old thread */ + + mtsrr0 r5 /* Set return point */ + mtsrr1 r6 /* Set return MSR */ + rfi /* Jam... */ + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + + + +/* + * void fpu_save(void) + * + * To do the floating point and VMX, we keep three thread pointers: one + * to the current thread, one to the thread that has the floating point context + * loaded into the FPU registers, and one for the VMX owner. + * + * Each of these threads has three PCB pointers. The normal PCB, the FPU pcb, + * and the VMX pcb. There is also a bit for each in the savearea flags. + * When we take an exception, or need to use the FPU/VMX in the kernel, we call + * this routine. It checks to see if there is an owner thread for the facility. + * If so, it saves the facility's state information in the normal PCB. Then, it + * turns on the appropriate flag in the savearea to indicate that the state is + * in that particular savearea. Also, the thread pointer for the owner in + * the per_processor block is cleared. Note that we don't have to worry about the + * PCB pointers in the thread because whenever the state is loaded, the associated + * savearea is released and the pointer cleared. This is done so that the facility + * context always migrates to the normal savearea/PCB. This always insures that + * no more than 2 saveareas are used for a thread. + * + * When the context is loaded into the facility, the associated PCB is released if + * its usage flags indicate that it is empty. (Note that return from exception and + * context switch honor these flags and won't release a savearea if there is unrestored + * facility context.) The per_processor is set to point to the facility owner's + * thread and the associated PCB pointer within the thread is cleared because + * the PCB has been released. + * + * Part of loading a context is to release the savearea. If the savearea contains + * other context, the savearea cannot be released. So, what we're left with is + * that there will be no normal context savearea, but one for the as-not-yet + * restored facility savearea. Again, when that context is reloaded, the PCB + * is released, and when it is again stored, it goes into the "normal" savearea. + * + * So, what do we do when there is no general context, and we have some FPU/VMX + * state to save? Heck if I know, but it happens when we switch threads when + * we shortcut system calls. The question is: does the target thread carry the + * FPU/VMX context with it or not? Actually, it don't matter, not one little bit. + * If we are asked to save it, we gotta. It's a really lousy way to do it, but + * short of starting over with FPUs, it's what's what. Therefore, we'll + * allocate an FPU context save and attach it. + * + * Actually, it's not quite that simple: since we aren't in + * in interrupt handler context (that's only in fpu_switch) we can't use + * quickfret to merge FPU into general context. So, if there is an FPU + * savearea, we need to use that. So what we do is: if there is FPU context + * use that. If there is a general context, then use that. If neither, + * allocate a savearea and make that the FPU context. + * + * The next thing we have to do is to allow the kernel to use both the + * floating point and Altivec. It is not recommended, but there may be a + * good reason to do so. So, what we need to do is to treat each of the + * three types of context the same, by keeping a LIFO chain of states. + * We do have a problem with that in that there can be multiple levels of + * kernel context. For example, we are using floating point and we take a + * page fault, and somehow start using the FPU, and take another page fault, + * etc. + * + * Anyway, we will hope that we only reasonably use floating point and vectors in + * the kernel. And try to pack the context in as few saveareas as possible. + * + * The way we keep these "levels" of floating point or vector context straight is + * to remember the top of the normal savearea chain when we activate the + * facility when it is first used. Then, when we save that context, this value + * is saved in its level field. + * + * What the level concept gives us is a way to distinguish between multiple + * independent contexts under the same thread activation. Any time we take + * any kind of interruption (trap, system call, I/O interruption), we are, + * in effect, running with a different context even though we are in the + * same thread. The top savearea address is used only as a marker. It does not + * point to any context associated with the float or vector context. For example, + * the top savearea pointer will always be 0 for the user context, because there + * it it always last on the list. + * + * As normal context is unstacked, the first facility context is checked and + * if there is a match, the facility savearea is released. This is because we + * are returning to a level before the facility saved there was used. In effect, + * this allows us to unwind the facility context saveareas at different rates. + * + * In conjunction with the current activation, these markers are also used to + * determine the state of the facility enablement. Whenever the facility context is + * "live," i.e., loaded in the hardware registers and belonging to the currently + * running context, the facility is enabled before dispatch. + * + * There is nothing special about using floating point or vector facilities, + * no preliminary saving, enabling, or disabling. You just use them. The only exception + * is during context switching on an SMP system. In this case, the context must + * be saved as there is no guarantee that the thread will resume on the same + * processor. This is not a good thing, not at all. + * + * Whenever we switch out a thread with a dirty context, we always need to save it + * because it can wake up on a different processor. However, once the context has + * been saved, we don't need to save it again until it gets dirty, nor do we need + * to reload it unless someone else's context has been loaded. To handle this + * optimization, we need 3 things. We need to know what processor the saved context + * was last loaded on, whether the loaded context could be dirty, and if we've already + * saved it. + * + * Whenever the facility is enabled, the processor ID is saved in the activation. This + * will show which processor has dirty data. When a context switch occurs, the facility + * contexts are saved, but are still remembered as live. The next time we need to + * context switch, we first check if the state is live, and if not, do no state + * saving. Then we check if the state has already been save and if not, save it. + * The facility is always disabled on a context switch. On a UP, this save function + * does not occur. + * + * Whenever a facility unavailable interruption occurs, the current state is saved + * if it is live and unsaved. However, if the live state is the same as the new + * one to be loaded, the processor ID is checked and if it is the current processor + * the state does not need to be loaded or saved. The facility is simply enabled. + * + * Once allocated, facility saveareas are not released until a return is made to a + * previous level. Once a facility has been enabled, there is no way to tell if + * it will ever be used again, but it is likely. Therefore, discarding a savearea + * when its context is made live is extra overhead. So, we don't do it, but we + * do mark the savearea contents as invalid. + * + */ + +/* +; The following is the actual way it is implemented. It doesn't quite match +; the above text. I need to go and fix that. +; +; Context download (operates on owner's data): +; +; 0) enable facility +; 1) if no owner exit to context restore +; 2) if context processor != current processor exit to context restore +; 3) if current activation == owner activation: +; 1) if curr level == active level: +; 1) if top facility savearea exists: +; invalidate savearea by setting level to 1 +; 2) enable facility for user +; 3) exit +; +; 2) else go to 5 +; +; 4) if curr level == active level: +; 1) if top facility savearea exists: +; 1) if top save level == active level exit to context restore +; +; 5) allocate savearea +; 1) if there is a facility save and it is invalid, select it, and break +; 2) scan normal list for free facility area, select if found, and break +; 3) scan other facility for free save: select, if found, and break +; 4) allocate a new save area +; +; 6) save context +; 7) mark facility save with curr level +; 8) if reusing cached savearea (case #1) exit to context restore +; 9) set facility save backchain to facility top savearea +; 10) set facility top to savearea +; 11) exit to context restore +; +; +; Context restore/upload (operates on current activation's data): +; +; 1) set current to activation +; 2) set active level to current level +; 3) set context processor to current processor +; 4) if no facility savearea or top save level != curr level +; initialize facility registers to empty value +; 5) else +; 1) load registers from savearea +; 2) invalidate save area by setting level to 1 +; +; 6) enable facility for user +; 7) exit to interrupt return +; +; +; Context save (operates on current activation's data; only used during context switch): +; (context switch always disables the facility) +; +; 1) if no owner exit +; 2) if owner != current activation exit +; 3) if context processor != current processor +; 1) clear owner +; 2) exit +; +; 4) if facility top savearea level exists and == active level exit +; 5) if curr level != active level exit +; 6) allocate savearea +; 1) if there is a facility save and it is invalid, select it, and break +; 2) scan normal list for free facility area, select if found, and break +; 3) scan other facility for free save: select, if found, and break +; 4) allocate a new save area +; 7) save context +; 8) mark facility savearea with curr level +; 9) if reusing cached savearea (case #1) exit +; 10) set facility save backchain to facility top savearea +; 11) set facility top to savearea +; 12) exit +; +; +; Exception exit (hw_exceptions): +; +; 1) disable return facility +; 2) if returning savearea != active level +; 1) if owner != current activation exit +; 2) if context processor != current processor: +; 1) clear owner +; 2) exit +; +; 3) if new level != active level exit +; 4) enable return facility +; 5) exit +; +; 3) if no facility savearea exit +; 4) if top save level == active or top is invalid +; 1) dequeue top facility savearea +; 2) set active level to new top savearea's level +; 3) release savearea +; 4) if owner == current activation clear owner +; 5) exit +; +; +; +; +; if (owner == activation) && (curr level == active level) +; && (activation processor == current processor) ::= context live +*/ + +ENTRY(fpu_save, TAG_NO_FRAME_USED) + + mfmsr r0 ; Get the MSR + rlwinm r0,r0,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point forever + rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; But do interrupts only for now + ori r2,r2,MASK(MSR_FP) ; Enable the floating point feature for now also + mtmsr r2 ; Set the MSR + isync + + mfsprg r6,0 ; Get the per_processor block + lwz r12,PP_FPU_THREAD(r6) ; Get the thread that owns the FPU +#if FPVECDBG + mr r7,r0 ; (TEST/DEBUG) + li r4,0 ; (TEST/DEBUG) + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + mr. r3,r12 ; (TEST/DEBUG) + li r2,0x6F00 ; (TEST/DEBUG) + li r5,0 ; (TEST/DEBUG) + beq- noowneryet ; (TEST/DEBUG) + lwz r4,ACT_MACT_FPUlvl(r12) ; (TEST/DEBUG) + lwz r5,ACT_MACT_FPU(r12) ; (TEST/DEBUG) + +noowneryet: oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) + mr r0,r7 ; (TEST/DEBUG) +#endif + mflr r2 ; Save the return address + lwz r10,PP_CPU_DATA(r6) ; Get the CPU data pointer + lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number + + mr. r12,r12 ; Anyone own the FPU? + + lwz r10,CPU_ACTIVE_THREAD(r10) ; Get the pointer to the active thread + + beq- fsret ; Nobody owns the FPU, no save required... + + lwz r10,THREAD_TOP_ACT(r10) ; Now get the activation that is running + lwz r9,ACT_MACT_FPUcpu(r12) ; Get the last CPU to use this context + + cmplw r12,r10 ; Do we own the FPU? + cmplw cr1,r9,r11 ; Was the context for this processor? + bne+ fsret ; Facility belongs to some other activation... + li r3,0 ; Assume we need a fix-me-up + beq- cr1,fsgoodcpu ; Facility last used on this processor... + stw r3,PP_FPU_THREAD(r6) ; Clear owner because it was really on the other processor + b fsret ; Bail now with no save... + +fsgoodcpu: lwz r3,ACT_MACT_FPU(r12) ; Get the current FPU savearea for the thread + lwz r9,ACT_MACT_FPUlvl(r12) ; Get our current level indicator + + cmplwi cr1,r3,0 ; Have we ever saved this facility context? + beq- cr1,fsneedone ; Never saved it, so we need an area... + + lwz r8,SAVlvlfp(r3) ; Get the level this savearea is for + cmplwi r8,1 ; See if it is a spare + cmplw cr1,r9,r8 ; Correct level? + beq+ fsusespare ; We have a spare to use... + beq- cr1,fsret ; The current level is already saved, bail out... + +fsneedone: li r3,0 ; Tell the routine to allocate an area if none found + bl fpsrchsave ; Find a free savearea + + mfsprg r6,0 ; Get back per_processor block + oris r7,r7,hi16(SAVfpuvalid) ; Set the allocated bit + lwz r12,PP_FPU_THREAD(r6) ; Get back our thread + mtlr r2 ; Restore return + lwz r8,ACT_MACT_FPU(r12) ; Get the current top floating point savearea + lwz r9,ACT_MACT_FPUlvl(r12) ; Get our current level indicator again + stw r3,ACT_MACT_FPU(r12) ; Set this as the latest FPU savearea for the thread + stw r8,SAVprefp(r3) ; And then chain this in front + stw r7,SAVflags(r3) ; Set the validity flags + stw r12,SAVact(r3) ; Make sure we point to the right guy + +fsusespare: stw r9,SAVlvlfp(r3) ; And set the level this savearea is for + +; +; Save the current FPU state into the PCB of the thread that owns it. +; + + la r11,savefp0(r3) ; Point to the 1st line + dcbz 0,r11 ; Allocate the first savearea line + + la r11,savefp4(r3) /* Point to the 2nd line */ + stfd f0,savefp0(r3) + dcbz 0,r11 /* allocate it */ + stfd f1,savefp1(r3) + stfd f2,savefp2(r3) + la r11,savefp8(r3) /* Point to the 3rd line */ + stfd f3,savefp3(r3) + dcbz 0,r11 /* allocate it */ + stfd f4,savefp4(r3) + stfd f5,savefp5(r3) + stfd f6,savefp6(r3) + la r11,savefp12(r3) /* Point to the 4th line */ + stfd f7,savefp7(r3) + dcbz 0,r11 /* allocate it */ + stfd f8,savefp8(r3) + stfd f9,savefp9(r3) + stfd f10,savefp10(r3) + la r11,savefp16(r3) /* Point to the 5th line */ + stfd f11,savefp11(r3) + dcbz 0,r11 /* allocate it */ + stfd f12,savefp12(r3) + stfd f13,savefp13(r3) + stfd f14,savefp14(r3) + la r11,savefp20(r3) /* Point to the 6th line */ + stfd f15,savefp15(r3) + stfd f16,savefp16(r3) + stfd f17,savefp17(r3) + stfd f18,savefp18(r3) + la r11,savefp24(r3) /* Point to the 7th line */ + stfd f19,savefp19(r3) + dcbz 0,r11 /* allocate it */ + stfd f20,savefp20(r3) + lwz r10,liveFPSCR(r6) ; Get the previously saved FPSCR + stfd f21,savefp21(r3) + stfd f22,savefp22(r3) + li r9,0 ; Just clear this out + la r11,savefp28(r3) /* Point to the 8th line */ + stfd f23,savefp23(r3) + dcbz 0,r11 /* allocate it */ + stfd f24,savefp24(r3) + stfd f25,savefp25(r3) + stfd f26,savefp26(r3) + stfd f27,savefp27(r3) + stfd f28,savefp28(r3) + +; Note that we just save the FPSCR here for ease. It is really already saved +; in the "normal" context area of the savearea. + + stw r9,savefpscrpad(r3) ; Save the FPSCR pad + stw r10,savefpscr(r3) ; Save the FPSCR + + stfd f29,savefp29(r3) + stfd f30,savefp30(r3) + stfd f31,savefp31(r3) + lfd f0,savefp0(r3) ; We need to restore F0 because we used it + ; to get the FPSCR + +#if 0 + la r9,savefp0(r3) ; (TEST/DEBUG) + la r10,savefp31(r3) ; (TEST/DEBUG) + +chkkillmedead: + lha r8,0(r9) ; (TEST/DEBUG) + addi r9,r9,8 ; (TEST/DEBUG) + cmpwi r8,-8 ; (TEST/DEBUG) + cmplw cr1,r9,r10 ; (TEST/DEBUG) + bne+ dontkillmedead ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) + +dontkillmedead: ; (TEST/DEBUG) + ble+ cr1,chkkillmedead ; (TEST/DEBUG) +#endif + +fsret: mtmsr r0 ; Put interrupts on if they were and floating point off + isync + + blr + +/* + * fpu_switch() + * + * Entered to handle the floating-point unavailable exception and + * switch fpu context + * + * This code is run in virtual address mode on with interrupts off. + * + * Upon exit, the code returns to the users context with the floating + * point facility turned on. + * + * ENTRY: VM switched ON + * Interrupts OFF + * State is saved in savearea pointed to by R4. + * All other registers are free. + * + */ + +ENTRY(fpu_switch, TAG_NO_FRAME_USED) +#if DEBUG +#if GDDBG + mr r7,r4 ; Save input parameter + lis r3,hi16(EXT(fpu_trap_count)) ; Get address of FP trap counter + ori r3,r3,lo16(EXT(fpu_trap_count)) ; Get address of FP trap counter + lwz r1,0(r3) + lis r5,hi16(EXT(GratefulDeb)) ; Point to top of display + ori r5,r5,lo16(EXT(GratefulDeb)) ; Put in bottom part + addi r1,r1,1 + mtlr r5 ; Set link register + stw r1,0(r3) + mr r4,r1 + li r3,0 + blrl ; Display count + mr r4,r7 ; Restore the parameter +#else + lis r3,hi16(EXT(fpu_trap_count)) ; Get address of FP trap counter + ori r3,r3,lo16(EXT(fpu_trap_count)) ; Get address of FP trap counter + lwz r1,0(r3) + addi r1,r1,1 + stw r1,0(r3) +#endif +#endif /* DEBUG */ + + mfsprg r6,0 ; Get the per_processor block + mfmsr r19 ; Get the current MSR + + lwz r10,PP_CPU_DATA(r6) ; Get the CPU data pointer + lwz r12,PP_FPU_THREAD(r6) ; Get the thread that owns the FPU + lwz r10,CPU_ACTIVE_THREAD(r10) ; Get the pointer to the active thread + ori r19,r19,lo16(MASK(MSR_FP)) ; Enable the floating point feature + lwz r17,THREAD_TOP_ACT(r10) ; Now get the activation that is running + +; R12 has the "old" activation +; R17 has the "new" activation + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F01 ; (TEST/DEBUG) + mr r3,r12 ; (TEST/DEBUG) + mr r5,r17 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + mr. r12,r12 ; See if there is any live FP status + + lhz r18,PP_CPU_NUMBER(r6) ; Get the current CPU, we will need it later + + mtmsr r19 ; Enable floating point instructions + isync + + beq- fsnosave ; No live context, so nothing to save... + + lwz r19,ACT_MACT_FPUcpu(r12) ; Get the "old" active CPU + lwz r15,ACT_MACT_PCB(r12) ; Get the current level of the "old" one + cmplw r18,r19 ; Check the CPU that the old context is live on + lwz r14,ACT_MACT_FPU(r12) ; Point to the top of the old context stack + bne- fsnosave ; Context is not live if used on a different CPU... + lwz r13,ACT_MACT_FPUlvl(r12) ; Get the "old" active level + +; +; First, check to see if all we are doing is enabling because the +; "new" context is live. +; +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F02 ; (TEST/DEBUG) + mr r1,r15 ; (TEST/DEBUG) + mr r3,r13 ; (TEST/DEBUG) + mr r5,r14 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + cmplw cr1,r12,r17 ; Are the "old" activation and the "new" the same? + cmplwi cr2,r14,0 ; Is there any saved context on the "old" activation? + bne+ cr1,fsmstsave ; The activations are different so "old" context must be saved... + +; +; Here we know that both the "old" and "new" activations are the same. We will +; check the current level and active levels. If they are the same, the context is +; already live, so all we do is turn on the facility and invalidate the top +; savearea. +; +; If the current level, the active level, and the top savearea level are the +; same, then the context was saved as part of a thread context switch and neither +; needs saving or restoration. +; +; In all other cases, the context must be saved unless we are just re-enabling +; floating point. +; + + cmplw r13,r15 ; Are the levels the same? + cmplwi cr2,r14,0 ; Is there any saved context? + bne- fsmstsave ; Levels are different, we need to save... + + beq- cr2,fsenable ; No saved context at all, enable and go... + + lwz r20,SAVlvlfp(r14) ; Get the level of the top savearea + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F03 ; (TEST/DEBUG) + mr r3,r15 ; (TEST/DEBUG) + mr r5,r20 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + cmplw r15,r20 ; Is the top level the same as the current? + li r0,1 ; Get the invalid flag + bne- fsenable ; Not the same, just enable and go... + + stw r0,SAVlvlfp(r14) ; Invalidate that top savearea + + b fsenable ; Then enable and go... + +; +; We need to save the "old" context here. The LIFO queueing scheme works +; out for all cases because if both the "new" and "old" activations are the +; same, there can not be any saved state to load. the "new" level is +; truely new. +; +; When we save the context, we either use a new savearea, or the free +; one that is cached at the head of the list. + +fsmstsave: beq- cr2,fsgetsave ; There is no possible cached save area + + lwz r5,SAVlvlfp(r14) ; Get the level of first facility savearea +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F04 ; (TEST/DEBUG) + mr r3,r15 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + mr r3,r14 ; Assume we are invalid + cmplwi r5,1 ; Is it invalid? + cmplw cr1,r5,r13 ; Is the SA level the active one? + beq+ fsusecache ; Invalid, just use it... + beq- cr1,fsnosave ; The SA level is active, it is already saved... + +fsgetsave: mr r3,r4 ; Use the interrupt save as the context savearea if none cached +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F05 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + bl fpsrchsave ; Find a free savearea + + stw r3,ACT_MACT_FPU(r12) ; Set this as the latest context savearea for the thread + mfsprg r6,0 ; Get back per_processor block + stw r14,SAVprefp(r3) ; And then chain this in front + oris r7,r7,hi16(SAVfpuvalid) ; Set the allocated bit + stw r12,SAVact(r3) ; Make sure we point to the right guy + stw r7,SAVflags(r3) ; Set the allocation flags + +fsusecache: la r11,savefp0(r3) ; Point to the 1st line in area + stw r13,SAVlvlfp(r3) ; Set this context level +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F06 ; (TEST/DEBUG) + mr r5,r13 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + +; +; Now we will actually save the old context +; + + dcbz 0,r11 ; Allocate the output area + + la r11,savefp4(r3) ; Point to the 2nd line + stfd f0,savefp0(r3) + dcbz 0,r11 ; Allocate cache + stfd f1,savefp1(r3) + stfd f2,savefp2(r3) + la r11,savefp8(r3) ; Point to the 3rd line + stfd f3,savefp3(r3) + dcbz 0,r11 ; Allocate cache + stfd f4,savefp4(r3) + stfd f5,savefp5(r3) + stfd f6,savefp6(r3) + la r11,savefp12(r3) ; Point to the 4th line + stfd f7,savefp7(r3) + dcbz 0,r11 ; Allocate cache + stfd f8,savefp8(r3) + stfd f9,savefp9(r3) + stfd f10,savefp10(r3) + la r11,savefp16(r3) ; Point to the 5th line + stfd f11,savefp11(r3) + dcbz 0,r11 ; Allocate cache + stfd f12,savefp12(r3) + stfd f13,savefp13(r3) + stfd f14,savefp14(r3) + la r11,savefp20(r3) ; Point to the 6th line + stfd f15,savefp15(r3) + dcbz 0,r11 ; Allocate cache + stfd f16,savefp16(r3) + stfd f17,savefp17(r3) + stfd f18,savefp18(r3) + la r11,savefp24(r3) ; Point to the 7th line + stfd f19,savefp19(r3) + dcbz 0,r11 ; Allocate cache + stfd f20,savefp20(r3) + + li r14,0 ; Clear this for now + lwz r15,liveFPSCR(r6) ; Get the previously saved FPSCR + + stfd f21,savefp21(r3) + stfd f22,savefp22(r3) + la r11,savefp28(r3) ; Point to the 8th line + stfd f23,savefp23(r3) + dcbz 0,r11 ; allocate it + stfd f24,savefp24(r3) + stfd f25,savefp25(r3) + stfd f26,savefp26(r3) + la r11,savefpscrpad(r3) ; Point to the 9th line + stfd f27,savefp27(r3) + dcbz 0,r11 ; allocate it + stfd f28,savefp28(r3) + stfd f29,savefp29(r3) + stfd f30,savefp30(r3) + stfd f31,savefp31(r3) + +; Note that we just save the FPSCR here for ease. It is really already saved +; in the "normal" context area of the savearea. + + stw r14,savefpscrpad(r3) ; Save the FPSCR pad + stw r15,savefpscr(r3) ; Save the FPSCR + +; +; The context is all saved now and the facility is free. +; +; Now check out the "new" and see if we need to load up his context. +; If we do (and this should be the normal case), do it and then invalidate the +; savearea. (This will keep it cached for quick access next time around.) +; +; If we do not (remember, we already took care of the case where we just enable +; the FPU), we need to fill the registers with junk, because this level has +; never used them before and some thieving bastard could hack the old values +; of some thread! Just imagine what would happen if they could! Why, nothing +; would be safe! My God! It is terrifying! +; + + +fsnosave: lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one + lwz r14,ACT_MACT_FPU(r17) ; Point to the top of the "new" context stack + lwz r13,ACT_MACT_FPUlvl(r17) ; Get the "new" active level +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F07 ; (TEST/DEBUG) + mr r1,r15 ; (TEST/DEBUG) + mr r3,r14 ; (TEST/DEBUG) + mr r5,r13 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + cmplwi cr1,r14,0 ; Do we possibly have some context to load? + stw r15,ACT_MACT_FPUlvl(r17) ; Set the "new" active level + stw r18,ACT_MACT_FPUcpu(r17) ; Set the active CPU + la r11,savefp0(r14) ; Point to first line to bring in + stw r17,PP_FPU_THREAD(r6) ; Store current thread address in fpu_thread to claim fpu for thread + + beq+ cr1,MakeSureThatNoTerroristsCanHurtUsByGod ; No "new" context to load... + lwz r0,SAVlvlfp(r14) ; Get the level of first facility savearea + cmplw r0,r15 ; Top level correct to load? + bne- MakeSureThatNoTerroristsCanHurtUsByGod ; No, go initialize... + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F08 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + dcbt 0,r11 ; Touch line in + li r0,1 ; Get the level invalid indication + + la r11,savefp4(r14) ; Point to next line + dcbt 0,r11 ; Touch line in + lfd f0, savefp0(r14) + lfd f1,savefp1(r14) + stw r0,SAVlvlfp(r14) ; Mark the savearea invalid because we are activating again + lfd f2,savefp2(r14) + la r11,savefp8(r14) ; Point to next line + lfd f3,savefp3(r14) + dcbt 0,r11 ; Touch line in + lfd f4,savefp4(r14) + lfd f5,savefp5(r14) + lfd f6,savefp6(r14) + la r11,savefp12(r14) ; Point to next line + lfd f7,savefp7(r14) + dcbt 0,r11 ; Touch line in + lfd f8,savefp8(r14) + lfd f9,savefp9(r14) + lfd f10,savefp10(r14) + la r11,savefp16(r14) ; Point to next line + lfd f11,savefp11(r14) + dcbt 0,r11 ; Touch line in + lfd f12,savefp12(r14) + lfd f13,savefp13(r14) + lfd f14,savefp14(r14) + la r11,savefp20(r14) ; Point to next line + lfd f15,savefp15(r14) + dcbt 0,r11 ; Touch line in + lfd f16,savefp16(r14) + lfd f17,savefp17(r14) + lfd f18,savefp18(r14) + la r11,savefp24(r14) ; Point to next line + lfd f19,savefp19(r14) + dcbt 0,r11 ; Touch line in + lfd f20,savefp20(r14) + lfd f21,savefp21(r14) + la r11,savefp28(r14) ; Point to next line + lfd f22,savefp22(r14) + lfd f23,savefp23(r14) + dcbt 0,r11 ; Touch line in + lfd f24,savefp24(r14) + lfd f25,savefp25(r14) + lfd f26,savefp26(r14) + lfd f27,savefp27(r14) + lfd f28,savefp28(r14) + lfd f29,savefp29(r14) + lfd f30,savefp30(r14) + lfd f31,savefp31(r14) + +fsenable: lwz r9,SAVflags(r4) /* Get the flags of the current savearea */ + lwz r8,savesrr1(r4) ; Get the msr of the interrupted guy + rlwinm r5,r4,0,0,19 /* Get the page address of the savearea */ + ori r8,r8,MASK(MSR_FP) ; Enable the floating point feature + lwz r10,ACT_MACT_SPF(r17) ; Get the special flags + lis r7,hi16(SAVattach) /* Get the attached flag */ + lwz r5,SACvrswap(r5) /* Get Virtual to Real translation */ + oris r10,r10,hi16(floatUsed|floatCng) ; Set that we used floating point + mr. r15,r15 ; See if we are doing this for user state + stw r8,savesrr1(r4) ; Set the msr of the interrupted guy + andc r9,r9,r7 /* Clear the attached bit */ + xor r3,r4,r5 /* Get the real address of the savearea */ + bne- fsnuser ; We are not user state... + stw r10,ACT_MACT_SPF(r17) ; Set the activation copy + stw r10,spcFlags(r6) ; Set per_proc copy + +fsnuser: +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F0A ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + stw r9,SAVflags(r4) /* Set the flags of the current savearea */ + + b EXT(exception_exit) /* Exit from the fray... */ + +/* + * Initialize the registers to some bogus value + */ + +MakeSureThatNoTerroristsCanHurtUsByGod: + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x7F09 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + lis r5,hi16(EXT(FloatInit)) /* Get top secret floating point init value address */ + ori r5,r5,lo16(EXT(FloatInit)) /* Slam bottom */ + lfd f0,0(r5) /* Initialize FP0 */ + fmr f1,f0 ; Do them all + fmr f2,f0 + fmr f3,f0 + fmr f4,f0 + fmr f5,f0 + fmr f6,f0 + fmr f7,f0 + fmr f8,f0 + fmr f9,f0 + fmr f10,f0 + fmr f11,f0 + fmr f12,f0 + fmr f13,f0 + fmr f14,f0 + fmr f15,f0 + fmr f16,f0 + fmr f17,f0 + fsub f31,f31,f31 ; Get set to initialize the FPSCR + fmr f18,f0 + fmr f19,f0 + fmr f20,f0 + mtfsf 0xff,f31 ; Clear all FPSCR exception eanbles + fmr f21,f0 + fmr f22,f0 + fmr f23,f0 + fmr f24,f0 + fmr f25,f0 + fmr f26,f0 + fmr f27,f0 + fmr f28,f0 + fmr f29,f0 + fmr f30,f0 + fmr f31,f0 + b fsenable ; Finish setting it all up... + +; +; Finds an unused floating point area in the activation pointed +; to by R12s saved contexts. If none are found (unlikely but possible) +; and R3 is 0, a new area is allocated. If R3 is non-zero, it contains +; a pointer to an floating point savearea that is free. +; +fpsrchsave: + lwz r6,ACT_MACT_PCB(r12) ; Get the first "normal" savearea + +fpsrnorm: mr. r5,r6 ; Is there another? + beq- fpsrvect ; No, search the vector saveareas... + lwz r7,SAVflags(r5) ; Get the flags for this guy + lwz r6,SAVprev(r5) ; Get the previous savearea, just in case + andis. r8,r7,hi16(SAVfpuvalid) ; Have we found an empty FPU save in normal? + beq+ fpsrgot ; We found one... + b fpsrnorm ; Search again... + +fpsrvect: lwz r6,ACT_MACT_VMX(r12) ; Get the first "vector" savearea + +fpsrvectx: mr. r5,r6 ; Is there another? + beq- fpsrget ; No, try to allocate one... + lwz r7,SAVflags(r5) ; Get the flags for this guy + lwz r6,SAVprevec(r5) ; Get the previous savearea, just in case + andis. r8,r7,hi16(SAVfpuvalid) ; Have we found an empty FPU save in vector? + bne- fpsrvectx ; Search again... + +fpsrgot: mr r3,r5 ; Get the savearea into the right register + blr ; Return... + +fpsrget: mr. r5,r3 ; Do we allocate or use existing? + beq+ fpsrallo ; Allocate one... + + lwz r7,SAVflags(r3) ; Get the passed in area flags + blr ; Return... +; +; NOTE: save_get will return directly and set R7 to 0... +; +fpsrallo: b EXT(save_get) ; Get a fresh savearea + +/* + * Altivec stuff is here. The techniques used are pretty identical to + * the floating point. Except that we will honor the VRSAVE register + * settings when loading and restoring registers. + * + * There are two indications of saved VRs: the VRSAVE register and the vrvalid + * mask. VRSAVE is set by the vector user and represents the VRs that they + * say that they are using. The vrvalid mask indicates which vector registers + * are saved in the savearea. Whenever context is saved, it is saved according + * to the VRSAVE register. It is loaded based on VRSAVE anded with + * vrvalid (all other registers are splatted with 0s). This is done because we + * don't want to load any registers we don't have a copy of, we want to set them + * to zero instead. + * + */ + +ENTRY(vec_save, TAG_NO_FRAME_USED) + + mfmsr r0 ; Get the MSR + rlwinm r0,r0,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector forever + rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; But do interrupts only for now + oris r2,r2,hi16(MASK(MSR_VEC)) ; Enable the vector facility for now also + mtmsr r2 ; Set the MSR + isync + + mfsprg r6,0 ; Get the per_processor block + lwz r12,PP_VMX_THREAD(r6) ; Get the thread that owns the vector +#if FPVECDBG + mr r7,r0 ; (TEST/DEBUG) + li r4,0 ; (TEST/DEBUG) + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + mr. r3,r12 ; (TEST/DEBUG) + li r2,0x5F00 ; (TEST/DEBUG) + li r5,0 ; (TEST/DEBUG) + beq- noowneryeu ; (TEST/DEBUG) + lwz r4,ACT_MACT_VMXlvl(r12) ; (TEST/DEBUG) + lwz r5,ACT_MACT_VMX(r12) ; (TEST/DEBUG) + +noowneryeu: oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) + mr r0,r7 ; (TEST/DEBUG) +#endif + mflr r2 ; Save the return address + lwz r10,PP_CPU_DATA(r6) ; Get the CPU data pointer + lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number + + mr. r12,r12 ; Anyone own the vector? + + lwz r10,CPU_ACTIVE_THREAD(r10) ; Get the pointer to the active thread + + beq- vsret ; Nobody owns the vector, no save required... + + lwz r10,THREAD_TOP_ACT(r10) ; Now get the activation that is running + lwz r9,ACT_MACT_VMXcpu(r12) ; Get the last CPU to use this context + + cmplw r12,r10 ; Do we own the thread? + cmplw cr1,r9,r11 ; Was the context for this processor? + bne+ vsret ; Facility belongs to some other activation... + li r3,0 ; Assume we need a fix-me-up + beq- cr1,vsgoodcpu ; Facility last used on this processor... + stw r3,PP_VMX_THREAD(r6) ; Clear owner because it was really on the other processor + b vsret ; Bail now with no save... + +vsgoodcpu: lwz r3,ACT_MACT_VMX(r12) ; Get the current vector savearea for the thread + lwz r9,ACT_MACT_VMXlvl(r12) ; Get our current level indicator + + cmplwi cr1,r3,0 ; Have we ever saved this facility context? + beq- cr1,vsneedone ; Never saved it, so we need an area... + + lwz r8,SAVlvlvec(r3) ; Get the level this savearea is for + cmplwi r8,1 ; See if this is a spare + cmplw cr1,r9,r8 ; Correct level? + beq+ vsusespare ; It is still live... + beq- cr1,vsret ; The current level is already saved, bail out... + +vsneedone: li r3,0 ; Tell the routine to allocate an area if none found + bl vsrchsave ; Find a free savearea + + mfsprg r6,0 ; Get back per_processor block + oris r7,r7,hi16(SAVvmxvalid) ; Set the allocated bit + lwz r12,PP_VMX_THREAD(r6) ; Get back our thread + mtlr r2 ; Restore return + lwz r8,ACT_MACT_VMX(r12) ; Get the current top vector savearea + lwz r9,ACT_MACT_VMXlvl(r12) ; Get our current level indicator again + stw r3,ACT_MACT_VMX(r12) ; Set this as the latest vector savearea for the thread + stw r8,SAVprevec(r3) ; And then chain this in front + stw r7,SAVflags(r3) ; Set the allocation flags + stw r12,SAVact(r3) ; Make sure we point to the right guy + +vsusespare: stw r9,SAVlvlvec(r3) ; And set the level this savearea is for + mfcr r2 ; Save non-volatile CRs + lwz r10,liveVRS(r6) ; Get the right VRSave register + lis r9,0x5555 ; Mask with odd bits set + rlwinm r11,r10,1,0,31 ; Shift over 1 + ori r9,r9,0x5555 ; Finish mask + or r12,r10,r11 ; After this, even bits show which lines to zap + + andc r11,r12,r9 ; Clear out odd bits + + la r6,savevr0(r3) ; Point to line 0 + rlwinm r4,r11,15,0,15 ; Move line 8-15 flags to high order odd bits + la r9,savevrvalid(r3) ; Point to the saved register mask field + or r4,r11,r4 ; Set the odd bits + ; (bit 0 is line 0, bit 1 is line 8, + ; bit 2 is line 1, bit 3 is line 9, etc. + dcba br0,r9 ; Allocate the cache for it + rlwimi r4,r10,16,16,31 ; Put vrsave 0 - 15 into positions 16 - 31 + la r7,savevr2(r3) ; Point to line 1 + mtcrf 255,r4 ; Load up the CRs + stw r10,savevrvalid(r3) ; Save the validity information + mr r8,r6 ; Start registers off +; +; Save the current vector state +; + + bf 0,snol0 ; No line 0 to do... + dcba br0,r6 ; Allocate cache line 0 + +snol0: + la r6,savevr4(r3) ; Point to line 2 + bf 2,snol1 ; No line 1 to do... + dcba br0,r7 ; Allocate cache line 1 + +snol1: + la r7,savevr6(r3) ; Point to line 3 + bf 4,snol2 ; No line 2 to do... + dcba br0,r6 ; Allocate cache line 2 + +snol2: + li r11,16 ; Get offset for odd registers + bf 16,snovr0 ; Do not save VR0... + stvxl v0,br0,r8 ; Save VR0 + +snovr0: + la r9,savevr2(r3) ; Point to V2/V3 pair + bf 17,snovr1 ; Do not save VR1... + stvxl v1,r11,r8 ; Save VR1 + +snovr1: + la r6,savevr8(r3) ; Point to line 4 + bf 6,snol3 ; No line 3 to do... + dcba br0,r7 ; Allocate cache line 3 + +snol3: + la r8,savevr4(r3) ; Point to V4/V5 pair + bf 18,snovr2 ; Do not save VR2... + stvxl v2,br0,r9 ; Save VR2 + +snovr2: + bf 19,snovr3 ; Do not save VR3... + stvxl v3,r11,r9 ; Save VR3 + +snovr3: +; +; Note: CR4 is now free +; + la r7,savevr10(r3) ; Point to line 5 + bf 8,snol4 ; No line 4 to do... + dcba br0,r6 ; Allocate cache line 4 + +snol4: + la r9,savevr6(r3) ; Point to R6/R7 pair + bf 20,snovr4 ; Do not save VR4... + stvxl v4,br0,r8 ; Save VR4 + +snovr4: + bf 21,snovr5 ; Do not save VR5... + stvxl v5,r11,r8 ; Save VR5 + +snovr5: + mtcrf 0x08,r10 ; Set CRs for registers 16-19 + la r6,savevr12(r3) ; Point to line 6 + bf 10,snol5 ; No line 5 to do... + dcba br0,r7 ; Allocate cache line 5 + +snol5: + la r8,savevr8(r3) ; Point to V8/V9 pair + bf 22,snovr6 ; Do not save VR6... + stvxl v6,br0,r9 ; Save VR6 + +snovr6: + bf 23,snovr7 ; Do not save VR7... + stvxl v7,r11,r9 ; Save VR7 + +snovr7: +; +; Note: CR5 is now free +; + la r7,savevr14(r3) ; Point to line 7 + bf 12,snol6 ; No line 6 to do... + dcba br0,r6 ; Allocate cache line 6 + +snol6: + la r9,savevr10(r3) ; Point to V10/V11 pair + bf 24,snovr8 ; Do not save VR8... + stvxl v8,br0,r8 ; Save VR8 + +snovr8: + bf 25,snovr9 ; Do not save VR9... + stvxl v9,r11,r8 ; Save VR9 + +snovr9: + mtcrf 0x04,r10 ; Set CRs for registers 20-23 + la r6,savevr16(r3) ; Point to line 8 + bf 14,snol7 ; No line 7 to do... + dcba br0,r7 ; Allocate cache line 7 + +snol7: + la r8,savevr12(r3) ; Point to V12/V13 pair + bf 26,snovr10 ; Do not save VR10... + stvxl v10,br0,r9 ; Save VR10 + +snovr10: + bf 27,snovr11 ; Do not save VR11... + stvxl v11,r11,r9 ; Save VR11 + +snovr11: + +; +; Note: CR6 is now free +; + la r7,savevr18(r3) ; Point to line 9 + bf 1,snol8 ; No line 8 to do... + dcba br0,r6 ; Allocate cache line 8 + +snol8: + la r9,savevr14(r3) ; Point to V14/V15 pair + bf 28,snovr12 ; Do not save VR12... + stvxl v12,br0,r8 ; Save VR12 + +snovr12: + bf 29,snovr13 ; Do not save VR13... + stvxl v13,r11,r8 ; Save VR13 + +snovr13: + mtcrf 0x02,r10 ; Set CRs for registers 24-27 + la r6,savevr20(r3) ; Point to line 10 + bf 3,snol9 ; No line 9 to do... + dcba br0,r7 ; Allocate cache line 9 + +snol9: + la r8,savevr16(r3) ; Point to V16/V17 pair + bf 30,snovr14 ; Do not save VR14... + stvxl v14,br0,r9 ; Save VR14 + +snovr14: + bf 31,snovr15 ; Do not save VR15... + stvxl v15,r11,r9 ; Save VR15 + +snovr15: +; +; Note: CR7 is now free +; + la r7,savevr22(r3) ; Point to line 11 + bf 5,snol10 ; No line 10 to do... + dcba br0,r6 ; Allocate cache line 10 + +snol10: + la r9,savevr18(r3) ; Point to V18/V19 pair + bf 16,snovr16 ; Do not save VR16... + stvxl v16,br0,r8 ; Save VR16 + +snovr16: + bf 17,snovr17 ; Do not save VR17... + stvxl v17,r11,r8 ; Save VR17 + +snovr17: + mtcrf 0x01,r10 ; Set CRs for registers 28-31 +; +; Note: All registers have been or are accounted for in CRs +; + la r6,savevr24(r3) ; Point to line 12 + bf 7,snol11 ; No line 11 to do... + dcba br0,r7 ; Allocate cache line 11 + +snol11: + la r8,savevr20(r3) ; Point to V20/V21 pair + bf 18,snovr18 ; Do not save VR18... + stvxl v18,br0,r9 ; Save VR18 + +snovr18: + bf 19,snovr19 ; Do not save VR19... + stvxl v19,r11,r9 ; Save VR19 + +snovr19: + la r7,savevr26(r3) ; Point to line 13 + bf 9,snol12 ; No line 12 to do... + dcba br0,r6 ; Allocate cache line 12 + +snol12: + la r9,savevr22(r3) ; Point to V22/V23 pair + bf 20,snovr20 ; Do not save VR20... + stvxl v20,br0,r8 ; Save VR20 + +snovr20: + bf 21,snovr21 ; Do not save VR21... + stvxl v21,r11,r8 ; Save VR21 + +snovr21: + la r6,savevr28(r3) ; Point to line 14 + bf 11,snol13 ; No line 13 to do... + dcba br0,r7 ; Allocate cache line 13 + +snol13: + la r8,savevr24(r3) ; Point to V24/V25 pair + bf 22,snovr22 ; Do not save VR22... + stvxl v22,br0,r9 ; Save VR22 + +snovr22: + bf 23,snovr23 ; Do not save VR23... + stvxl v23,r11,r9 ; Save VR23 + +snovr23: + la r7,savevr30(r3) ; Point to line 15 + bf 13,snol14 ; No line 14 to do... + dcba br0,r6 ; Allocate cache line 14 + +snol14: + la r9,savevr26(r3) ; Point to V26/V27 pair + bf 24,snovr24 ; Do not save VR24... + stvxl v24,br0,r8 ; Save VR24 + +snovr24: + bf 25,snovr25 ; Do not save VR25... + stvxl v25,r11,r8 ; Save VR25 + +snovr25: + bf 15,snol15 ; No line 15 to do... + dcba br0,r7 ; Allocate cache line 15 + +snol15: +; +; Note: All cache lines allocated now +; + la r8,savevr28(r3) ; Point to V28/V29 pair + bf 26,snovr26 ; Do not save VR26... + stvxl v26,br0,r9 ; Save VR26 + +snovr26: + bf 27,snovr27 ; Do not save VR27... + stvxl v27,r11,r9 ; Save VR27 + +snovr27: + la r7,savevr30(r3) ; Point to V30/V31 pair + bf 28,snovr28 ; Do not save VR28... + stvxl v28,br0,r8 ; Save VR28 + +snovr28: + bf 29,snovr29 ; Do not save VR29... + stvxl v29,r11,r8 ; Save VR29 + +snovr29: + mfvscr v27 ; Get the VSCR + la r8,savevscr(r3) ; Point to the VSCR save area + bf 30,snovr30 ; Do not save VR30... + stvxl v30,br0,r7 ; Save VR30 + +snovr30: + dcba br0,r8 ; Allocate VSCR savearea + bf 31,snovr31 ; Do not save VR31... + stvxl v31,r11,r7 ; Save VR31 + +snovr31: + add r11,r11,r9 ; Point to V27s saved value + stvxl v27,br0,r8 ; Save the VSCR + bt 27,v27ok ; V27 has been saved and is marked as wanted + + lis r11,hi16(EXT(QNaNbarbarian)) ; V27 is not wanted, so get empty value + ori r11,r11,lo16(EXT(QNaNbarbarian)) + +v27ok: mtcrf 255,r2 ; Restore all non-volatile CRs + lvxl v27,br0,r11 ; Restore or load empty value into V27 because we used it + +; +; Save the current vector state into the savearea of the thread that owns it. +; + +vsret: mtmsr r0 ; Put interrupts on if they were and vector off + isync + + blr + +/* + * vec_switch() + * + * Entered to handle the vector unavailable exception and + * switch vector context + * + * This code is run with virtual address mode on and interrupts off. + * + * Upon exit, the code returns to the users context with the vector + * facility turned on. + * + * ENTRY: VM switched ON + * Interrupts OFF + * State is saved in savearea pointed to by R4. + * All other registers are free. + * + */ + +ENTRY(vec_switch, TAG_NO_FRAME_USED) + +#if DEBUG +#if GDDBG + mr r7,r4 ; Save input parameter + lis r3,hi16(EXT(vec_trap_count)) ; Get address of vector trap counter + ori r3,r3,lo16(EXT(vec_trap_count)) ; Get address of vector trap counter + lwz r1,0(r3) + lis r5,hi16(EXT(GratefulDeb)) ; Point to top of display + ori r5,r5,lo16(EXT(GratefulDeb)) ; Put in bottom part + addi r1,r1,1 + mtlr r5 ; Set link register + stw r1,0(r3) + mr r4,r1 + lis r3,1 + blrl ; Display count + mr r4,r7 ; Restore the parameter +#else + lis r3,hi16(EXT(vec_trap_count)) ; Get address of vector trap counter + ori r3,r3,lo16(EXT(vec_trap_count)) ; Get address of vector trap counter + lwz r1,0(r3) + addi r1,r1,1 + stw r1,0(r3) +#endif +#endif /* DEBUG */ + + mfsprg r6,0 /* Get the per_processor block */ + mfmsr r19 /* Get the current MSR */ + + lwz r10,PP_CPU_DATA(r6) /* Get the CPU data pointer */ + lwz r12,PP_VMX_THREAD(r6) /* Get the thread that owns the vector */ + lwz r10,CPU_ACTIVE_THREAD(r10) /* Get the pointer to the active thread */ + oris r19,r19,hi16(MASK(MSR_VEC)) /* Enable the vector feature */ + lwz r17,THREAD_TOP_ACT(r10) /* Now get the activation that is running */ + +; R12 has the "old" activation +; R17 has the "new" activation + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F01 ; (TEST/DEBUG) + mr r3,r12 ; (TEST/DEBUG) + mr r5,r17 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r18,r4 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r18 ; Restore it + mfsprg r6,0 ; Get the per_processor block back +#endif +#endif + mr. r12,r12 ; See if there is any live vector status + + lhz r18,PP_CPU_NUMBER(r6) ; Get our CPU number + + mtmsr r19 /* Set vector available */ + isync + + + beq- vsnosave ; No live context, so nothing to save... + + lwz r19,ACT_MACT_VMXcpu(r12) ; Get the "old" active CPU + lwz r15,ACT_MACT_PCB(r12) ; Get the current level of the "old" one + cmplw r18,r19 ; Check the CPU that the old context is live on + lwz r14,ACT_MACT_VMX(r12) ; Point to the top of the old context stack + bne- vsnosave ; Context is not live if used on a different CPU... + lwz r13,ACT_MACT_VMXlvl(r12) ; Get the "old" active level + +; +; First, check to see if all we are doing is enabling because the +; "new" context is live. +; +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F02 ; (TEST/DEBUG) + mr r1,r15 ; (TEST/DEBUG) + mr r3,r13 ; (TEST/DEBUG) + mr r5,r14 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r8,r4 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r8 ; Restore it +#endif +#endif + + cmplw cr1,r12,r17 ; Is the "old" activation and the "new" the same? + cmplwi cr2,r14,0 ; Is there any saved context on the "old" activation? + bne+ cr1,vsmstsave ; The activations are different so "old" context must be saved... + +; +; Here we know that both the "old" and "new" activations are the same. We will +; check the current level and active levels. If they are the same, the context is +; already live, so all we do is turn on the facility and invalidate the top +; savearea. +; +; If the current level, the active level, and the top savearea level are the +; same, then the context was saved as part of a thread context switch and neither +; needs saving or restoration. +; +; In all other cases, the context must be saved unless we are just re-enabling +; vector. +; + + cmplw r13,r15 ; Are the levels the same? + cmplwi cr2,r14,0 ; Is there any saved context? + bne- vsmstsave ; Levels are different, we need to save... + + beq- cr2,vrenable ; No saved context at all, enable and go... + + lwz r20,SAVlvlvec(r14) ; Get the level of the top savearea + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F03 ; (TEST/DEBUG) + mr r3,r15 ; (TEST/DEBUG) + mr r5,r20 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r8,r4 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r8 ; Restore it +#endif +#endif + cmplw r15,r20 ; Is the top level the same as the current? + li r0,1 ; Get the invalid flag + bne- vrenable ; Not the same, just enable and go... + + stw r0,SAVlvlvec(r14) ; Invalidate that top savearea + + b vrenable ; Then enable and go... + +; +; We need to save the "old" context here. The LIFO queueing scheme works +; out for all cases because if both the "new" and "old" activations are the +; same, there can not be any saved state to load. the "new" level is +; truely new. +; +; When we save the context, we either use a new savearea, or the free +; one that is cached at the head of the list. + +vsmstsave: beq- cr2,vsgetsave ; There is no possible cached save area + + lwz r5,SAVlvlvec(r14) ; Get the level of first facility savearea +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F04 ; (TEST/DEBUG) + mr r3,r15 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r8,r4 ; Save this + mr r7,r5 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r8 ; Restore it + mr r5,r7 ; Restore it +#endif +#endif + mr r3,r14 ; Assume we are invalid + cmplwi r5,1 ; Is it invalid? + cmplw cr1,r5,r13 ; Is the SA level the active one? + beq+ vsusecache ; Invalid, just use it... + beq- cr1,vsnosave ; The SA level is active, it is already saved... + +vsgetsave: mr r3,r4 ; Use the interrupt save as the context savearea if none cached +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F05 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r8,r4 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r8 ; Restore it + mr r3,r8 ; This too +#endif +#endif + + bl vsrchsave ; Find a free savearea + + stw r3,ACT_MACT_VMX(r12) ; Set this as the latest context savearea for the thread + mfsprg r6,0 ; Get back per_processor block + stw r14,SAVprevec(r3) ; And then chain this in front + oris r7,r7,hi16(SAVvmxvalid) ; Set the allocated bit + stw r12,SAVact(r3) ; Make sure we point to the right guy + stw r7,SAVflags(r3) ; Set the allocation flags + +vsusecache: la r11,savevr0(r3) ; Point to the 1st line in area + stw r13,SAVlvlvec(r3) ; Set this context level +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F06 ; (TEST/DEBUG) + mr r5,r13 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + mr r10,r3 + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r8,r4 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r8 ; Restore it + mr r3,r10 + mfsprg r6,0 ; Get back per_processor block +#endif +#endif + +vsgotsave: + lwz r10,liveVRS(r6) ; Get the right VRSave register + lis r9,0x5555 ; Mask with odd bits set + rlwinm r11,r10,1,0,31 ; Shift over 1 + ori r9,r9,0x5555 ; Finish mask + or r12,r10,r11 ; After this, even bits show which lines to zap + + stw r13,SAVlvlvec(r3) ; Set the savearea level + andc r13,r12,r9 ; Clear out odd bits + + la r20,savevr0(r3) ; Point to line 0 + rlwinm r24,r13,15,0,15 ; Move line 8-15 flags to high order odd bits + la r23,savevrvalid(r3) ; Point to the saved register mask field + or r24,r13,r24 ; Set the odd bits + ; (bit 0 is line 0, bit 1 is line 8, + ; bit 2 is line 1, bit 3 is line 9, etc. + dcba br0,r23 ; Allocate the cache for it + rlwimi r24,r10,16,16,31 ; Put vrsave 0 - 15 into positions 16 - 31 + la r21,savevr2(r3) ; Point to line 1 + mtcrf 255,r24 ; Load up the CRs + stw r10,savevrvalid(r3) ; Save the validity information + mr r22,r20 ; Start registers off +; +; Save the current vector state +; + + bf 0,nol0 ; No line 0 to do... + dcba br0,r20 ; Allocate cache line 0 + +nol0: + la r20,savevr4(r3) ; Point to line 2 + bf 2,nol1 ; No line 1 to do... + dcba br0,r21 ; Allocate cache line 1 + +nol1: + la r21,savevr6(r3) ; Point to line 3 + bf 4,nol2 ; No line 2 to do... + dcba br0,r20 ; Allocate cache line 2 + +nol2: + li r30,16 ; Get offset for odd registers + bf 16,novr0 ; Do not save VR0... + stvxl v0,br0,r22 ; Save VR0 + +novr0: + la r23,savevr2(r3) ; Point to V2/V3 pair + bf 17,novr1 ; Do not save VR1... + stvxl v1,r30,r22 ; Save VR1 + +novr1: + la r20,savevr8(r3) ; Point to line 4 + bf 6,nol3 ; No line 3 to do... + dcba br0,r21 ; Allocate cache line 3 + +nol3: + la r22,savevr4(r3) ; Point to V4/V5 pair + bf 18,novr2 ; Do not save VR2... + stvxl v2,br0,r23 ; Save VR2 + +novr2: + bf 19,novr3 ; Do not save VR3... + stvxl v3,r30,r23 ; Save VR3 + +novr3: +; +; Note: CR4 is now free +; + la r21,savevr10(r3) ; Point to line 5 + bf 8,nol4 ; No line 4 to do... + dcba br0,r20 ; Allocate cache line 4 + +nol4: + la r23,savevr6(r3) ; Point to R6/R7 pair + bf 20,novr4 ; Do not save VR4... + stvxl v4,br0,r22 ; Save VR4 + +novr4: + bf 21,novr5 ; Do not save VR5... + stvxl v5,r30,r22 ; Save VR5 + +novr5: + mtcrf 0x08,r10 ; Set CRs for registers 16-19 + la r20,savevr12(r3) ; Point to line 6 + bf 10,nol5 ; No line 5 to do... + dcba br0,r21 ; Allocate cache line 5 + +nol5: + la r22,savevr8(r3) ; Point to V8/V9 pair + bf 22,novr6 ; Do not save VR6... + stvxl v6,br0,r23 ; Save VR6 + +novr6: + bf 23,novr7 ; Do not save VR7... + stvxl v7,r30,r23 ; Save VR7 + +novr7: +; +; Note: CR5 is now free +; + la r21,savevr14(r3) ; Point to line 7 + bf 12,nol6 ; No line 6 to do... + dcba br0,r20 ; Allocate cache line 6 + +nol6: + la r23,savevr10(r3) ; Point to V10/V11 pair + bf 24,novr8 ; Do not save VR8... + stvxl v8,br0,r22 ; Save VR8 + +novr8: + bf 25,novr9 ; Do not save VR9... + stvxl v9,r30,r22 ; Save VR9 + +novr9: + mtcrf 0x04,r10 ; Set CRs for registers 20-23 + la r20,savevr16(r3) ; Point to line 8 + bf 14,nol7 ; No line 7 to do... + dcba br0,r21 ; Allocate cache line 7 + +nol7: + la r22,savevr12(r3) ; Point to V12/V13 pair + bf 26,novr10 ; Do not save VR10... + stvxl v10,br0,r23 ; Save VR10 + +novr10: + bf 27,novr11 ; Do not save VR11... + stvxl v11,r30,r23 ; Save VR11 + +novr11: + +; +; Note: CR6 is now free +; + la r21,savevr18(r3) ; Point to line 9 + bf 1,nol8 ; No line 8 to do... + dcba br0,r20 ; Allocate cache line 8 + +nol8: + la r23,savevr14(r3) ; Point to V14/V15 pair + bf 28,novr12 ; Do not save VR12... + stvxl v12,br0,r22 ; Save VR12 + +novr12: + bf 29,novr13 ; Do not save VR13... + stvxl v13,r30,r22 ; Save VR13 + +novr13: + mtcrf 0x02,r10 ; Set CRs for registers 24-27 + la r20,savevr20(r3) ; Point to line 10 + bf 3,nol9 ; No line 9 to do... + dcba br0,r21 ; Allocate cache line 9 + +nol9: + la r22,savevr16(r3) ; Point to V16/V17 pair + bf 30,novr14 ; Do not save VR14... + stvxl v14,br0,r23 ; Save VR14 + +novr14: + bf 31,novr15 ; Do not save VR15... + stvxl v15,r30,r23 ; Save VR15 + +novr15: +; +; Note: CR7 is now free +; + la r21,savevr22(r3) ; Point to line 11 + bf 5,nol10 ; No line 10 to do... + dcba br0,r20 ; Allocate cache line 10 + +nol10: + la r23,savevr18(r3) ; Point to V18/V19 pair + bf 16,novr16 ; Do not save VR16... + stvxl v16,br0,r22 ; Save VR16 + +novr16: + bf 17,novr17 ; Do not save VR17... + stvxl v17,r30,r22 ; Save VR17 + +novr17: + mtcrf 0x01,r10 ; Set CRs for registers 28-31 +; +; Note: All registers have been or are accounted for in CRs +; + la r20,savevr24(r3) ; Point to line 12 + bf 7,nol11 ; No line 11 to do... + dcba br0,r21 ; Allocate cache line 11 + +nol11: + la r22,savevr20(r3) ; Point to V20/V21 pair + bf 18,novr18 ; Do not save VR18... + stvxl v18,br0,r23 ; Save VR18 + +novr18: + bf 19,novr19 ; Do not save VR19... + stvxl v19,r30,r23 ; Save VR19 + +novr19: + la r21,savevr26(r3) ; Point to line 13 + bf 9,nol12 ; No line 12 to do... + dcba br0,r20 ; Allocate cache line 12 + +nol12: + la r23,savevr22(r3) ; Point to V22/V23 pair + bf 20,novr20 ; Do not save VR20... + stvxl v20,br0,r22 ; Save VR20 + +novr20: + bf 21,novr21 ; Do not save VR21... + stvxl v21,r30,r22 ; Save VR21 + +novr21: + la r20,savevr28(r3) ; Point to line 14 + bf 11,nol13 ; No line 13 to do... + dcba br0,r21 ; Allocate cache line 13 + +nol13: + la r22,savevr24(r3) ; Point to V24/V25 pair + bf 22,novr22 ; Do not save VR22... + stvxl v22,br0,r23 ; Save VR22 + +novr22: + bf 23,novr23 ; Do not save VR23... + stvxl v23,r30,r23 ; Save VR23 + +novr23: + la r21,savevr30(r3) ; Point to line 15 + bf 13,nol14 ; No line 14 to do... + dcba br0,r20 ; Allocate cache line 14 + +nol14: + la r23,savevr26(r3) ; Point to V26/V27 pair + bf 24,novr24 ; Do not save VR24... + stvxl v24,br0,r22 ; Save VR24 + +novr24: + bf 25,novr25 ; Do not save VR25... + stvxl v25,r30,r22 ; Save VR25 + +novr25: + bf 15,nol15 ; No line 15 to do... + dcba br0,r21 ; Allocate cache line 15 + +nol15: +; +; Note: All cache lines allocated now +; + la r22,savevr28(r3) ; Point to V28/V29 pair + bf 26,novr26 ; Do not save VR26... + stvxl v26,br0,r23 ; Save VR26 + +novr26: + bf 27,novr27 ; Do not save VR27... + stvxl v27,r30,r23 ; Save VR27 + +novr27: + la r23,savevr30(r3) ; Point to V30/V31 pair + bf 28,novr28 ; Do not save VR28... + stvxl v28,br0,r22 ; Save VR28 + +novr28: + mfvscr v27 ; Get the VSCR + bf 29,novr29 ; Do not save VR29... + stvxl v29,r30,r22 ; Save VR29 + +novr29: + la r22,savevscr(r3) ; Point to the VSCR save area + bf 30,novr30 ; Do not save VR30... + stvxl v30,br0,r23 ; Save VR30 + +novr30: + dcba br0,r22 ; Allocate VSCR savearea + bf 31,novr31 ; Do not save VR31... + stvxl v31,r30,r23 ; Save VR31 + +novr31: + stvxl v27,br0,r22 ; Save the VSCR + + + +/* + * Now check out the current thread and see if we need to load up his context. + * If we do (and this should be the normal case), do it and then release the + * savearea. + * + * If we don't (remember, we already took care of the case where we just enable + * the vector), we need to fill the registers with garbage, because this thread has + * never used them before and some thieving bastard could hack the old values + * of some thread! Just imagine what would happen if they could! Why, nothing + * would be safe! My Gosh! It's terrifying! + */ + +vsnosave: lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one + lwz r14,ACT_MACT_VMX(r17) ; Point to the top of the "new" context stack + lwz r13,ACT_MACT_VMXlvl(r17) ; Get the "new" active level + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F07 ; (TEST/DEBUG) + mr r1,r15 ; (TEST/DEBUG) + mr r3,r14 ; (TEST/DEBUG) + mr r5,r13 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + cmplwi cr1,r14,0 ; Do we possibly have some context to load? + stw r15,ACT_MACT_VMXlvl(r17) ; Set the "new" active level + la r23,savevscr(r14) ; Point to the VSCR + stw r18,ACT_MACT_VMXcpu(r17) ; Set the active CPU + la r20,savevr0(r14) ; Point to first line to bring in + stw r17,PP_VMX_THREAD(r6) ; Store current thread address in vmx_thread to claim vector for thread + beq- cr1,ProtectTheAmericanWay ; Nothing to restore, first time use... + lwz r0,SAVlvlvec(r14) ; Get the level of first facility savearea + cmplw r0,r15 ; Top level correct to load? + bne- ProtectTheAmericanWay ; No, go initialize... + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F08 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + mr r8,r3 + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r22,r4 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r22 ; Restore it + mr r3,r8 +#endif +#endif + + li r0,1 ; Get the level invalid indication + lwz r22,savevrsave(r4) ; Get the most current VRSAVE + lwz r10,savevrvalid(r14) ; Get the valid VRs in the savearea + lis r9,0x5555 ; Mask with odd bits set + and r10,r10,r22 ; Figure out just what registers need to be loaded + ori r9,r9,0x5555 ; Finish mask + rlwinm r11,r10,1,0,31 ; Shift over 1 + stw r0,SAVlvlvec(r14) ; Mark the savearea invalid because we are activating again + or r12,r10,r11 ; After this, even bits show which lines to touch + dcbt br0,r23 ; Touch in the VSCR + andc r13,r12,r9 ; Clear out odd bits + + la r20,savevr0(r14) ; Point to line 0 + rlwinm r3,r13,15,0,15 ; Move line 8-15 flags to high order odd bits + la r21,savevr2(r3) ; Point to line 1 + or r3,r13,r3 ; Set the odd bits + ; (bit 0 is line 0, bit 1 is line 8, + ; bit 2 is line 1, bit 3 is line 9, etc. + lvxl v31,br0,r23 ; Get the VSCR + rlwimi r3,r10,16,16,31 ; Put vrsave 0 - 15 into positions 16 - 31 + mtvscr v31 ; Slam the VSCR value + mtcrf 255,r3 ; Load up the CRs + mr r22,r20 ; Start registers off +; +; Load the new vector state +; + + bf 0,lnol0 ; No line 0 to do... + dcbt br0,r20 ; Touch cache line 0 + +lnol0: + la r20,savevr4(r14) ; Point to line 2 + bf 2,lnol1 ; No line 1 to do... + dcbt br0,r21 ; Touch cache line 1 + +lnol1: + la r21,savevr6(r14) ; Point to line 3 + bf 4,lnol2 ; No line 2 to do... + dcbt br0,r20 ; Touch cache line 2 + +lnol2: + li r30,16 ; Get offset for odd registers + bf 16,lnovr0 ; Do not restore VR0... + lvxl v0,br0,r22 ; Restore VR0 + +lnovr0: + la r23,savevr2(r14) ; Point to V2/V3 pair + bf 17,lnovr1 ; Do not restore VR1... + lvxl v1,r30,r22 ; Restore VR1 + +lnovr1: + la r20,savevr8(r14) ; Point to line 4 + bf 6,lnol3 ; No line 3 to do... + dcbt br0,r21 ; Touch cache line 3 + +lnol3: + la r22,savevr4(r14) ; Point to V4/V5 pair + bf 18,lnovr2 ; Do not restore VR2... + lvxl v2,br0,r23 ; Restore VR2 + +lnovr2: + bf 19,lnovr3 ; Do not restore VR3... + lvxl v3,r30,r23 ; Restore VR3 + +lnovr3: +; +; Note: CR4 is now free +; + la r21,savevr10(r14) ; Point to line 5 + bf 8,lnol4 ; No line 4 to do... + dcbt br0,r20 ; Touch cache line 4 + +lnol4: + la r23,savevr6(r14) ; Point to R6/R7 pair + bf 20,lnovr4 ; Do not restore VR4... + lvxl v4,br0,r22 ; Restore VR4 + +lnovr4: + bf 21,lnovr5 ; Do not restore VR5... + lvxl v5,r30,r22 ; Restore VR5 + +lnovr5: + mtcrf 0x08,r10 ; Set CRs for registers 16-19 + la r20,savevr12(r14) ; Point to line 6 + bf 10,lnol5 ; No line 5 to do... + dcbt br0,r21 ; Touch cache line 5 + +lnol5: + la r22,savevr8(r14) ; Point to V8/V9 pair + bf 22,lnovr6 ; Do not restore VR6... + lvxl v6,br0,r23 ; Restore VR6 + +lnovr6: + bf 23,lnovr7 ; Do not restore VR7... + lvxl v7,r30,r23 ; Restore VR7 + +lnovr7: +; +; Note: CR5 is now free +; + la r21,savevr14(r14) ; Point to line 7 + bf 12,lnol6 ; No line 6 to do... + dcbt br0,r20 ; Touch cache line 6 + +lnol6: + la r23,savevr10(r14) ; Point to V10/V11 pair + bf 24,lnovr8 ; Do not restore VR8... + lvxl v8,br0,r22 ; Restore VR8 + +lnovr8: + bf 25,lnovr9 ; Do not save VR9... + lvxl v9,r30,r22 ; Restore VR9 + +lnovr9: + mtcrf 0x04,r10 ; Set CRs for registers 20-23 + la r20,savevr16(r14) ; Point to line 8 + bf 14,lnol7 ; No line 7 to do... + dcbt br0,r21 ; Touch cache line 7 + +lnol7: + la r22,savevr12(r14) ; Point to V12/V13 pair + bf 26,lnovr10 ; Do not restore VR10... + lvxl v10,br0,r23 ; Restore VR10 + +lnovr10: + bf 27,lnovr11 ; Do not restore VR11... + lvxl v11,r30,r23 ; Restore VR11 + +lnovr11: + +; +; Note: CR6 is now free +; + la r21,savevr18(r14) ; Point to line 9 + bf 1,lnol8 ; No line 8 to do... + dcbt br0,r20 ; Touch cache line 8 + +lnol8: + la r23,savevr14(r14) ; Point to V14/V15 pair + bf 28,lnovr12 ; Do not restore VR12... + lvxl v12,br0,r22 ; Restore VR12 + +lnovr12: + bf 29,lnovr13 ; Do not restore VR13... + lvxl v13,r30,r22 ; Restore VR13 + +lnovr13: + mtcrf 0x02,r10 ; Set CRs for registers 24-27 + la r20,savevr20(r14) ; Point to line 10 + bf 3,lnol9 ; No line 9 to do... + dcbt br0,r21 ; Touch cache line 9 + +lnol9: + la r22,savevr16(r14) ; Point to V16/V17 pair + bf 30,lnovr14 ; Do not restore VR14... + lvxl v14,br0,r23 ; Restore VR14 + +lnovr14: + bf 31,lnovr15 ; Do not restore VR15... + lvxl v15,r30,r23 ; Restore VR15 + +lnovr15: +; +; Note: CR7 is now free +; + la r21,savevr22(r14) ; Point to line 11 + bf 5,lnol10 ; No line 10 to do... + dcbt br0,r20 ; Touch cache line 10 + +lnol10: + la r23,savevr18(r14) ; Point to V18/V19 pair + bf 16,lnovr16 ; Do not restore VR16... + lvxl v16,br0,r22 ; Restore VR16 + +lnovr16: + bf 17,lnovr17 ; Do not restore VR17... + lvxl v17,r30,r22 ; Restore VR17 + +lnovr17: + mtcrf 0x01,r10 ; Set CRs for registers 28-31 +; +; Note: All registers have been or are accounted for in CRs +; + la r20,savevr24(r14) ; Point to line 12 + bf 7,lnol11 ; No line 11 to do... + dcbt br0,r21 ; Touch cache line 11 + +lnol11: + la r22,savevr20(r14) ; Point to V20/V21 pair + bf 18,lnovr18 ; Do not restore VR18... + lvxl v18,br0,r23 ; Restore VR18 + +lnovr18: + bf 19,lnovr19 ; Do not restore VR19... + lvxl v19,r30,r23 ; Restore VR19 + +lnovr19: + la r21,savevr26(r14) ; Point to line 13 + bf 9,lnol12 ; No line 12 to do... + dcbt br0,r20 ; Touch cache line 12 + +lnol12: + la r23,savevr22(r14) ; Point to V22/V23 pair + bf 20,lnovr20 ; Do not restore VR20... + lvxl v20,br0,r22 ; Restore VR20 + +lnovr20: + bf 21,lnovr21 ; Do not restore VR21... + lvxl v21,r30,r22 ; Restore VR21 + +lnovr21: + la r20,savevr28(r14) ; Point to line 14 + bf 11,lnol13 ; No line 13 to do... + dcbt br0,r21 ; Touch cache line 13 + +lnol13: + la r22,savevr24(r14) ; Point to V24/V25 pair + bf 22,lnovr22 ; Do not restore VR22... + lvxl v22,br0,r23 ; Restore VR22 + +lnovr22: + bf 23,lnovr23 ; Do not restore VR23... + lvxl v23,r30,r23 ; Restore VR23 + +lnovr23: + la r21,savevr30(r14) ; Point to line 15 + bf 13,lnol14 ; No line 14 to do... + dcbt br0,r20 ; Touch cache line 14 + +lnol14: + la r23,savevr26(r14) ; Point to V26/V27 pair + bf 24,lnovr24 ; Do not restore VR24... + lvxl v24,br0,r22 ; Restore VR24 + +lnovr24: + bf 25,lnovr25 ; Do not restore VR25... + lvxl v25,r30,r22 ; Restore VR25 + +lnovr25: + bf 15,lnol15 ; No line 15 to do... + dcbt br0,r21 ; Touch cache line 15 + +lnol15: +; +; Note: All needed cache lines have been touched now +; + la r22,savevr28(r14) ; Point to V28/V29 pair + bf 26,lnovr26 ; Do not restore VR26... + lvxl v26,br0,r23 ; Restore VR26 + +lnovr26: + bf 27,lnovr27 ; Do not restore VR27... + lvxl v27,r30,r23 ; Restore VR27 + +lnovr27: + la r23,savevr30(r14) ; Point to V30/V31 pair + bf 28,lnovr28 ; Do not restore VR28... + lvxl v28,br0,r22 ; Restore VR28 + +lnovr28: + bf 29,lnovr29 ; Do not restore VR29... + lvxl v29,r30,r22 ; Restore VR29 + +lnovr29: + bf 30,lnovr30 ; Do not restore VR30... + lvxl v30,br0,r23 ; Restore VR30 + +lnovr30: +; +; Everything is restored now except for VR31. We need it to get +; the QNaNBarbarian value to put into idle vector registers +; + + lis r5,hi16(EXT(QNaNbarbarian)) ; Get address of empty value + cmpwi r10,-1 ; Handle the quick case of all registers in use + ori r5,r5,lo16(EXT(QNaNbarbarian)) ; Get low address of empty value + beq- mstlvr31 ; Not likely, but all are in use... + mtcrf 255,r10 ; Get mask of valid registers + lvxl v31,br0,r5 ; Initialize VR31 to the empty value + + bt 0,ni0 ; Register is ok already... + vor v0,v31,v31 ; Copy into the next register +ni0: + bt 1,ni1 ; Register is ok already... + vor v1,v31,v31 ; Copy into the next register +ni1: + bt 2,ni2 ; Register is ok already... + vor v2,v31,v31 ; Copy into the next register +ni2: + bt 3,ni3 ; Register is ok already... + vor v3,v31,v31 ; Copy into the next register +ni3: + bt 4,ni4 ; Register is ok already... + vor v4,v31,v31 ; Copy into the next register +ni4: + bt 5,ni5 ; Register is ok already... + vor v5,v31,v31 ; Copy into the next register +ni5: + bt 6,ni6 ; Register is ok already... + vor v6,v31,v31 ; Copy into the next register +ni6: + bt 7,ni7 ; Register is ok already... + vor v7,v31,v31 ; Copy into the next register +ni7: + bt 8,ni8 ; Register is ok already... + vor v8,v31,v31 ; Copy into the next register +ni8: + bt 9,ni9 ; Register is ok already... + vor v9,v31,v31 ; Copy into the next register +ni9: + bt 10,ni10 ; Register is ok already... + vor v10,v31,v31 ; Copy into the next register +ni10: + bt 11,ni11 ; Register is ok already... + vor v11,v31,v31 ; Copy into the next register +ni11: + bt 12,ni12 ; Register is ok already... + vor v12,v31,v31 ; Copy into the next register +ni12: + bt 13,ni13 ; Register is ok already... + vor v13,v31,v31 ; Copy into the next register +ni13: + bt 14,ni14 ; Register is ok already... + vor v14,v31,v31 ; Copy into the next register +ni14: + bt 15,ni15 ; Register is ok already... + vor v15,v31,v31 ; Copy into the next register +ni15: + bt 16,ni16 ; Register is ok already... + vor v16,v31,v31 ; Copy into the next register +ni16: + bt 17,ni17 ; Register is ok already... + vor v17,v31,v31 ; Copy into the next register +ni17: + bt 18,ni18 ; Register is ok already... + vor v18,v31,v31 ; Copy into the next register +ni18: + bt 19,ni19 ; Register is ok already... + vor v19,v31,v31 ; Copy into the next register +ni19: + bt 20,ni20 ; Register is ok already... + vor v20,v31,v31 ; Copy into the next register +ni20: + bt 21,ni21 ; Register is ok already... + vor v21,v31,v31 ; Copy into the next register +ni21: + bt 22,ni22 ; Register is ok already... + vor v22,v31,v31 ; Copy into the next register +ni22: + bt 23,ni23 ; Register is ok already... + vor v23,v31,v31 ; Copy into the next register +ni23: + bt 24,ni24 ; Register is ok already... + vor v24,v31,v31 ; Copy into the next register +ni24: + bt 25,ni25 ; Register is ok already... + vor v25,v31,v31 ; Copy into the next register +ni25: + bt 26,ni26 ; Register is ok already... + vor v26,v31,v31 ; Copy into the next register +ni26: + bt 27,ni27 ; Register is ok already... + vor v27,v31,v31 ; Copy into the next register +ni27: + bt 28,ni28 ; Register is ok already... + vor v28,v31,v31 ; Copy into the next register +ni28: + bt 29,ni29 ; Register is ok already... + vor v29,v31,v31 ; Copy into the next register +ni29: + bt 30,ni30 ; Register is ok already... + vor v30,v31,v31 ; Copy into the next register +ni30: + bf 31,lnovr31 ; R31 is empty, no need to restore... + +mstlvr31: lvxl v31,r30,r23 ; Restore VR31 + +lnovr31: + +vrenable: + lwz r9,SAVflags(r4) /* Get the flags of the current savearea */ + lwz r8,savesrr1(r4) ; Get the msr of the interrupted guy + rlwinm r5,r4,0,0,19 /* Get the page address of the savearea */ + oris r8,r8,hi16(MASK(MSR_VEC)) ; Enable the vector facility + lwz r10,ACT_MACT_SPF(r17) ; Get the special flags + lis r7,hi16(SAVattach) /* Get the attached flag */ + lwz r5,SACvrswap(r5) /* Get Virtual to Real translation */ + oris r10,r10,hi16(vectorUsed|vectorCng) ; Set that we used vectors + mr. r15,r15 ; See if we are doing this for user state + stw r8,savesrr1(r4) ; Set the msr of the interrupted guy + andc r9,r9,r7 /* Clear the attached bit */ + xor r3,r4,r5 /* Get the real address of the savearea */ + stw r9,SAVflags(r4) /* Set the flags of the current savearea */ + bne- vrnuser ; We are not user state... + stw r10,ACT_MACT_SPF(r17) ; Set the activation copy + stw r10,spcFlags(r6) ; Set per_proc copy + +vrnuser: +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F0A ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + mr r8,r3 ; Save this + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r3,r8 ; Restore it +#endif +#endif + b EXT(exception_exit) /* Exit from the fray... */ + +/* + * Initialize the registers to some bogus value + * We make sure that non-Java mode is the default here + */ + +ProtectTheAmericanWay: + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x5F09 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#if GDDBG + lis r3,hi16(EXT(GratefulDeb)) ; Point to top of display + mr r8,r4 ; Save this + ori r3,r3,lo16(EXT(GratefulDeb)) ; Put in bottom part + mr r4,r2 ; Set value + mtlr r3 ; Set link register + li r3,1 ; Display address + blrl ; Display it + mr r4,r8 ; Restore it +#endif +#endif + lis r5,hi16(EXT(QNaNbarbarian)) ; Get address of empty value + vspltish v1,1 ; Turn on the non-Java bit and saturate + ori r5,r5,lo16(EXT(QNaNbarbarian)) ; Get low address of empty value + vspltisw v2,1 ; Turn on the saturate bit + lvxl v0,br0,r5 ; Initialize VR0 + vxor v1,v1,v2 ; Turn off saturate + + vor v2,v0,v0 ; Copy into the next register + mtvscr v1 ; Clear the vector status register + vor v3,v0,v0 ; Copy into the next register + vor v1,v0,v0 ; Copy into the next register + vor v4,v0,v0 ; Copy into the next register + vor v5,v0,v0 ; Copy into the next register + vor v6,v0,v0 ; Copy into the next register + vor v7,v0,v0 ; Copy into the next register + vor v8,v0,v0 ; Copy into the next register + vor v9,v0,v0 ; Copy into the next register + vor v10,v0,v0 ; Copy into the next register + vor v11,v0,v0 ; Copy into the next register + vor v12,v0,v0 ; Copy into the next register + vor v13,v0,v0 ; Copy into the next register + vor v14,v0,v0 ; Copy into the next register + vor v15,v0,v0 ; Copy into the next register + vor v16,v0,v0 ; Copy into the next register + vor v17,v0,v0 ; Copy into the next register + vor v18,v0,v0 ; Copy into the next register + vor v19,v0,v0 ; Copy into the next register + vor v20,v0,v0 ; Copy into the next register + vor v21,v0,v0 ; Copy into the next register + vor v22,v0,v0 ; Copy into the next register + vor v23,v0,v0 ; Copy into the next register + vor v24,v0,v0 ; Copy into the next register + vor v25,v0,v0 ; Copy into the next register + vor v26,v0,v0 ; Copy into the next register + vor v27,v0,v0 ; Copy into the next register + vor v28,v0,v0 ; Copy into the next register + vor v29,v0,v0 ; Copy into the next register + vor v30,v0,v0 ; Copy into the next register + vor v31,v0,v0 ; Copy into the next register + b vrenable ; Finish setting it all up... + +; +; Finds a unused vector area in the activation pointed +; to by R12s saved contexts. If none are found (unlikely but possible) +; and R3 is 0, a new area is allocated. If R3 is non-zero, it contains +; a pointer to a vector savearea that is free. +; + +vsrchsave: lwz r6,ACT_MACT_PCB(r12) ; Get the first "normal" savearea + +vsrnorm: mr. r5,r6 ; Is there another? + beq- vsrvect ; No, search the floating point saveareas... + lwz r7,SAVflags(r5) ; Get the flags for this guy + lwz r6,SAVprev(r5) ; Get the previous savearea, just in case + andis. r8,r7,hi16(SAVvmxvalid) ; Have we found an empty vector save in normal? + beq+ vsrgot ; We found one... + b vsrnorm ; Search again... + +vsrvect: lwz r6,ACT_MACT_FPU(r12) ; Get the first "floating point" savearea + +vsrvectx: mr. r5,r6 ; Is there another? + beq- vsrget ; No, try to allocate one... + lwz r7,SAVflags(r5) ; Get the flags for this guy + lwz r6,SAVprefp(r5) ; Get the previous savearea, just in case + andis. r8,r7,hi16(SAVvmxvalid) ; Have we found an empty vector save in float? + bne- vsrvectx ; Search again... + +vsrgot: mr r3,r5 ; Get the savearea into the right register + blr ; Return... + +vsrget: mr. r5,r3 ; Do we allocate or use existing? + beq+ vsrallo ; Allocate one... + + lwz r7,SAVflags(r3) ; Get the passed in area flags + blr ; Return... +; +; NOTE: save_get will return directly and set R7 to 0... +; +vsrallo: b EXT(save_get) ; Get a fresh savearea + + +/* + * void lfs(fpsp,fpdp) + * + * load the single precision float to the double + * + * This routine is used by the alignment handler. + * + */ +ENTRY(lfs, TAG_NO_FRAME_USED) + lfs f1, 0(r3) + stfd f1, 0(r4) + blr + +/* + * fpsp stfs(fpdp,fpsp) + * + * store the double precision float to the single + * + * This routine is used by the alignment handler. + * + */ +ENTRY(stfs, TAG_NO_FRAME_USED) + lfd f1, 0(r3) + stfs f1, 0(r4) + blr + diff --git a/osfmk/ppc/db_asm.s b/osfmk/ppc/db_asm.s new file mode 100644 index 000000000..b26a1e1d0 --- /dev/null +++ b/osfmk/ppc/db_asm.s @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include + +/* void + * db_phys_copy(src, dst, bytecount) + * vm_offset_t src; + * vm_offset_t dst; + * int bytecount + * + * This routine will copy bytecount bytes from physical address src to physical + * address dst. + */ +ENTRY(db_phys_copy, TAG_NO_FRAME_USED) + + /* Switch off data translations */ + mfmsr r6 + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + mtmsr r7 + isync /* Ensure data translations are off */ + + subi r3, r3, 4 + subi r4, r4, 4 + + cmpwi r5, 3 + ble- .L_db_phys_copy_bytes +.L_db_phys_copy_loop: + lwz r0, 4(r3) + addi r3, r3, 4 + subi r5, r5, 4 + stw r0, 4(r4) + addi r4, r4, 4 + cmpwi r5, 3 + bgt+ .L_db_phys_copy_loop + + /* If no leftover bytes, we're done now */ + cmpwi r5, 0 + beq+ .L_db_phys_copy_done + +.L_db_phys_copy_bytes: + addi r3, r3, 3 + addi r4, r4, 3 +.L_db_phys_copy_byte_loop: + lbz r0, 1(r3) + addi r3, r3, 1 + subi r5, r5, 1 + stb r0, 1(r4) + addi r4, r4, 1 + cmpwi r5, 0 + bne+ .L_db_phys_copy_loop + +.L_db_phys_copy_done: + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are off */ + + blr + +/* void + * db_phys_cmp(src_a, src_b, bytecount) + * vm_offset_t src_a; + * vm_offset_t src_b; + * int bytecount + * + * This routine will compare bytecount bytes from physical address src_a and physical + * address src_b. + */ + + /* Switch off data translations */ + mfmsr r6 + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + mtmsr r7 + isync /* Ensure data translations are off */ + + subi r3, r3, 4 + subi r4, r4, 4 + + cmpwi r5, 3 + ble- .L_db_phys_cmp_bytes +.L_db_phys_cmp_loop: + lwz r0, 4(r3) + lwz r7, 4(r4) + addi r3, r3, 4 + addi r4, r4, 4 + subi r5, r5, 4 + cmpw r0, r7 + bne .L_db_phys_cmp_false + cmpwi r5, 3 + bgt+ .L_db_phys_cmp_loop + + /* If no leftover bytes, we're done now */ + cmpwi r5, 0 + beq+ .L_db_phys_cmp_true + +.L_db_phys_cmp_bytes: + addi r3, r3, 3 + addi r4, r4, 3 +.L_db_phys_cmp_byte_loop: + lbz r0, 1(r3) + lbz r7, 1(r4) + addi r3, r3, 1 + addi r4, r4, 1 + subi r5, r5, 1 + cmpw r0, r7 + bne .L_db_phys_cmp_false + cmpwi r5, 0 + bne+ .L_db_phys_cmp_loop + +.L_db_phys_cmp_true: + li r3, 1 + b .L_db_phys_cmp_done + +.L_db_phys_cmp_false: + li r3, 0 + +.L_db_phys_cmp_done: + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are off */ + + blr + diff --git a/osfmk/ppc/db_disasm.c b/osfmk/ppc/db_disasm.c new file mode 100644 index 000000000..984124ba0 --- /dev/null +++ b/osfmk/ppc/db_disasm.c @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Instruction disassembler. + */ + +#include +#include + +#include +#include +#include + +#include +#include + +#include "ppc_disasm.h" + +db_addr_t db_disasm_pc, db_disasm_symaddr; +boolean_t db_disasm_print_symaddr; + +/* + * Disassemble instruction at 'loc'. 'altfmt' specifies an + * (optional) alternate format. Return address of start of + * next instruction. + */ +db_addr_t +db_disasm( + db_addr_t loc, + boolean_t altfmt, + task_t task) +{ + int inst; + char *p; + + inst = db_get_task_value(loc, 4, FALSE, task); + db_disasm_pc = loc; + db_disasm_print_symaddr = FALSE; + p = in(inst); + db_printf("%s", p); + if (db_disasm_print_symaddr) { + db_printf(" <"); + db_task_printsym(db_disasm_symaddr, DB_STGY_ANY, task); + db_printf(">"); + } + dis_done(); + return (loc+4); +} + +/* + * Given four bytes of instruction (stored as an int, not an + * array of characters), compute if the instruction reads + * memory. + */ +int +db_inst_load( + unsigned long insw) +{ +#if 1 + db_printf("db_inst_load: coming soon in a debugger near you!\n"); + return 0; +#else + unsigned char insb, bits; + + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab[insb]; + if (!(bits & DBLS_LOAD)) + return (0); + while (1) { + switch (bits & DBLS_MODS) { + case 0: + return (1); + case DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0); + case DBLS_SECOND|DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0 ? 2 : 0); + case DBLS_SECOND: + return (2); + case DBLS_ESCAPE: + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab0f[insb]; + break; + case DBLS_SWREG: + return (db_inst_swreg(TRUE, insw, insb)); + default: + panic ("db_inst_load: unknown mod bits"); + } + } +#endif +} + +/* + * Given four bytes of instruction (stored as an int, not an + * array of characters), compute if the instruction writes + * memory. + */ +int +db_inst_store( + unsigned long insw) +{ +#if 1 + db_printf("db_inst_store: coming soon in a debugger near you!\n"); + return 0; +#else + unsigned char insb, bits; + + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab[insb]; + if (!(bits & DBLS_STORE)) + return (0); + while (1) { + switch (bits & DBLS_MODS) { + case 0: + return (1); + case DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0); + case DBLS_SECOND|DBLS_MODRM: + insb = insw & 0xff; + return ((insb & 0xc0) != 0xc0 ? 2 : 0); + case DBLS_SECOND: + return (2); + case DBLS_ESCAPE: + insb = insw & 0xff; + insw >>= 8; + bits = db_ldstrtab0f[insb]; + break; + case DBLS_SWREG: + return (db_inst_swreg(FALSE, insw, insb)); + default: + panic ("db_inst_store: unknown mod bits"); + } + } +#endif +} + +/* + * Extra routines for the automatically generated disassembler + */ +char * +hex( + bits n) +{ + char *p; + + if (n < 10) + return dec(n); + p = dis_alloc(11); + sprintf(p, "0x%lx", n); + return p; +} + +char * +dec( + bits n) +{ + char *p = dis_alloc(11); + sprintf(p, "%lu", n); + return p; +} + +char * +brdispl( + bits displ, + bits nbits) +{ + int sign, extended; + + sign = 1 << (nbits - 1); + extended = (displ & sign ? displ - (sign << 1) : displ); + db_disasm_symaddr = db_disasm_pc + (extended << 2); + db_disasm_print_symaddr = TRUE; + return hex(extended << 2); +} + +char * +mbz( + bits n) +{ + return n ? "[reserved bits not zero]" : ""; +} + +size_t db_disasm_string_size = 0; +#define DB_DISASM_STRING_MAXSIZE 4096 +char db_disasm_string[DB_DISASM_STRING_MAXSIZE]; + +void *db_disasm_malloc(size_t size); /* forward */ +void * +db_disasm_malloc( + size_t size) +{ + void * new_buf; + + if (db_disasm_string_size + size <= DB_DISASM_STRING_MAXSIZE) { + new_buf = (void *) (db_disasm_string + db_disasm_string_size); + db_disasm_string_size += size; + return new_buf; + } + db_printf("db_disasm_malloc(size=%d) failed: %d left !\n", + size, + DB_DISASM_STRING_MAXSIZE - db_disasm_string_size); + return (void *) 0; +} diff --git a/osfmk/ppc/db_interface.c b/osfmk/ppc/db_interface.c new file mode 100644 index 000000000..943baa616 --- /dev/null +++ b/osfmk/ppc/db_interface.c @@ -0,0 +1,671 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct ppc_saved_state *ppc_last_saved_statep; +struct ppc_saved_state ppc_nested_saved_state; +unsigned ppc_last_kdb_sp; + +extern int debugger_active[NCPUS]; /* Debugger active on CPU */ +extern int debugger_cpu; /* Current cpu running debugger */ + +int db_all_set_up = 0; + + +#if !MACH_KDP +void kdp_register_send_receive(void); +#endif + +/* + * Enter KDB through a keyboard trap. + * We show the registers as of the keyboard interrupt + * instead of those at its call to KDB. + */ +struct int_regs { + /* XXX more registers ? */ + struct ppc_interrupt_state *is; +}; + +extern char * trap_type[]; +extern int TRAP_TYPES; + +/* + * Code used to synchronize kdb among all cpus, one active at a time, switch + * from on to another using kdb_on! #cpu or cpu #cpu + */ + +decl_simple_lock_data(, kdb_lock) /* kdb lock */ + +#define db_simple_lock_init(l, e) hw_lock_init(&((l)->interlock)) +#define db_simple_lock_try(l) hw_lock_try(&((l)->interlock)) +#define db_simple_unlock(l) hw_lock_unlock(&((l)->interlock)) + +extern volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus holding + breakpoints (ie: cpus that did not + insert back breakpoints) */ +extern boolean_t db_breakpoints_inserted; + +/* Forward */ + +extern void kdbprinttrap( + int type, + int code, + int *pc, + int sp); +extern int db_user_to_kernel_address( + task_t task, + vm_offset_t addr, + unsigned *kaddr, + int flag); +extern void db_write_bytes_user_space( + vm_offset_t addr, + int size, + char *data, + task_t task); +extern int db_search_null( + task_t task, + unsigned *svaddr, + unsigned evaddr, + unsigned *skaddr, + int flag); +extern int kdb_enter(int); +extern void kdb_leave(void); +extern void lock_kdb(void); +extern void unlock_kdb(void); + +#if DB_MACHINE_COMMANDS +struct db_command ppc_db_commands[] = { + { "lt", db_low_trace, CS_MORE|CS_SET_DOT, 0 }, + { (char *)0, 0, 0, 0 } +}; +#endif /* DB_MACHINE_COMMANDS */ + +#if !MACH_KDP +void kdp_register_send_receive(void) {} +#endif + +extern jmp_buf_t *db_recover; +spl_t saved_ipl[NCPUS]; /* just to know what IPL was before trap */ +struct ppc_saved_state *saved_state[NCPUS]; + +/* + * kdb_trap - field a TRACE or BPT trap + */ +void +kdb_trap( + int type, + struct ppc_saved_state *regs) +{ + boolean_t trap_from_user; + int previous_console_device; + int code=0; + + previous_console_device=switch_to_serial_console(); + + switch (type) { + case T_TRACE: /* single_step */ + case T_PROGRAM: /* breakpoint */ +#if 0 + case T_WATCHPOINT: /* watchpoint */ +#endif + case -1: /* keyboard interrupt */ + break; + + default: + if (db_recover) { + ppc_nested_saved_state = *regs; + db_printf("Caught "); + if (type > TRAP_TYPES) + db_printf("type %d", type); + else + db_printf("%s", trap_type[type]); + db_printf(" trap, pc = %x\n", + regs->srr0); + db_error(""); + /*NOTREACHED*/ + } + kdbprinttrap(type, code, (int *)®s->srr0, regs->r1); + } + + saved_state[cpu_number()] = regs; + + ppc_last_saved_statep = regs; + ppc_last_kdb_sp = (unsigned) &type; + + if (!IS_USER_TRAP(regs)) { + bzero((char *)&ddb_regs, sizeof (ddb_regs)); + ddb_regs = *regs; + trap_from_user = FALSE; + + } + else { + ddb_regs = *regs; + trap_from_user = TRUE; + } + + db_task_trap(type, code, trap_from_user); + + *regs = ddb_regs; + + if ((type == T_PROGRAM) && + (db_get_task_value(regs->srr0, + BKPT_SIZE, + FALSE, + db_target_space(current_act(), + trap_from_user)) + == BKPT_INST)) + regs->srr0 += BKPT_SIZE; + +kdb_exit: + saved_state[cpu_number()] = 0; + switch_to_old_console(previous_console_device); + +} + + +/* + * Print trap reason. + */ + +void +kdbprinttrap( + int type, + int code, + int *pc, + int sp) +{ + printf("kernel: "); + if (type > TRAP_TYPES) + db_printf("type %d", type); + else + db_printf("%s", trap_type[type]); + db_printf(" trap, code=%x pc@%x = %x sp=%x\n", + code, pc, *(int *)pc, sp); + db_run_mode = STEP_CONTINUE; +} + +/* + * + */ +vm_offset_t db_vtophys( + pmap_t pmap, + vm_offset_t va) +{ + register mapping *mp; + register vm_offset_t pa; + + pa = (vm_offset_t)LRA(pmap->space,(void *)va); + + if (pa != 0) + return(pa); + + mp = hw_lock_phys_vir(pmap->space, va); + if((unsigned int)mp&1) { + return 0; + } + + if(!mp) { /* If it was not a normal page */ + pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ + return pa; /* Return physical address */ + } + + mp = hw_cpv(mp); /* Convert to virtual address */ + + if(!mp->physent) { + pa = (vm_offset_t)((mp->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); + } else { + pa = (vm_offset_t)((mp->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); + hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); + } + + return(pa); +} + +int +db_user_to_kernel_address( + task_t task, + vm_offset_t addr, + unsigned *kaddr, + int flag) +{ + unsigned int sr_val, raddr; + + raddr = (unsigned int)db_vtophys(task->map->pmap, trunc_page(addr)); /* Get the real address */ + + if (!raddr) { + if (flag) { + db_printf("\nno memory is assigned to address %08x\n", addr); + db_error(0); + /* NOTREACHED */ + } + return -1; + } + sr_val = SEG_REG_PROT | task->map->pmap->space + | ((addr >> 8) & 0x00F00000); + + mtsr(SR_COPYIN_NUM, sr_val); + sync(); + *kaddr = (addr & 0x0fffffff) | (SR_COPYIN_NUM << 28); + return(0); +} + +/* + * Read bytes from task address space for debugger. + */ +void +db_read_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task) +{ + int n,max; + unsigned phys_dst; + unsigned phys_src; + pmap_t pmap; + + while (size > 0) { + if (task != NULL) + pmap = task->map->pmap; + else + pmap = kernel_pmap; + + phys_src = (unsigned int)db_vtophys(pmap, trunc_page(addr)); + if (phys_src == 0) { + db_printf("\nno memory is assigned to src address %08x\n", + addr); + db_error(0); + /* NOTREACHED */ + } + phys_src = phys_src| (addr & page_mask); + + phys_dst = (unsigned int)db_vtophys(kernel_pmap, trunc_page(data)); + if (phys_dst == 0) { + db_printf("\nno memory is assigned to dst address %08x\n", + data); + db_error(0); + /* NOTREACHED */ + } + + phys_dst = phys_dst | (((vm_offset_t) data) & page_mask); + + /* don't over-run any page boundaries - check src range */ + max = ppc_round_page(phys_src) - phys_src; + if (max > size) + max = size; + /* Check destination won't run over boundary either */ + n = ppc_round_page(phys_dst) - phys_dst; + if (n < max) + max = n; + size -= max; + addr += max; + phys_copy(phys_src, phys_dst, max); + + /* resync I+D caches */ + sync_cache(phys_dst, max); + + phys_src += max; + phys_dst += max; + } +} + +/* + * Write bytes to task address space for debugger. + */ +void +db_write_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task) +{ + int n,max; + unsigned phys_dst; + unsigned phys_src; + pmap_t pmap; + + while (size > 0) { + + phys_src = (unsigned int)db_vtophys(kernel_pmap, trunc_page(data)); + if (phys_src == 0) { + db_printf("\nno memory is assigned to src address %08x\n", + data); + db_error(0); + /* NOTREACHED */ + } + + phys_src = phys_src | (((vm_offset_t) data) & page_mask); + + /* space stays as kernel space unless in another task */ + if (task == NULL) pmap = kernel_pmap; + else pmap = task->map->pmap; + + phys_dst = (unsigned int)db_vtophys(pmap, trunc_page(addr)); + if (phys_dst == 0) { + db_printf("\nno memory is assigned to dst address %08x\n", + addr); + db_error(0); + /* NOTREACHED */ + } + phys_dst = phys_dst| (addr & page_mask); + + /* don't over-run any page boundaries - check src range */ + max = ppc_round_page(phys_src) - phys_src; + if (max > size) + max = size; + /* Check destination won't run over boundary either */ + n = ppc_round_page(phys_dst) - phys_dst; + if (n < max) + max = n; + size -= max; + addr += max; + phys_copy(phys_src, phys_dst, max); + + /* resync I+D caches */ + sync_cache(phys_dst, max); + + phys_src += max; + phys_dst += max; + } +} + +boolean_t +db_check_access( + vm_offset_t addr, + int size, + task_t task) +{ + register int n; + unsigned int kern_addr; + + if (task == kernel_task || task == TASK_NULL) { + if (kernel_task == TASK_NULL) + return(TRUE); + task = kernel_task; + } else if (task == TASK_NULL) { + if (current_act() == THR_ACT_NULL) + return(FALSE); + task = current_act()->task; + } + while (size > 0) { + if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0) + return(FALSE); + n = ppc_trunc_page(addr+PPC_PGBYTES) - addr; + if (n > size) + n = size; + size -= n; + addr += n; + } + return(TRUE); +} + +boolean_t +db_phys_eq( + task_t task1, + vm_offset_t addr1, + task_t task2, + vm_offset_t addr2) +{ + vm_offset_t physa, physb; + + if ((addr1 & (PPC_PGBYTES-1)) != (addr2 & (PPC_PGBYTES-1))) /* Is byte displacement the same? */ + return FALSE; + + if (task1 == TASK_NULL) { /* See if there is a task active */ + if (current_act() == THR_ACT_NULL) /* See if there is a current task */ + return FALSE; + task1 = current_act()->task; /* If so, use that one */ + } + + if(!(physa = db_vtophys(task1->map->pmap, trunc_page(addr1)))) return FALSE; /* Get real address of the first */ + if(!(physb = db_vtophys(task2->map->pmap, trunc_page(addr2)))) return FALSE; /* Get real address of the second */ + + return (physa == physb); /* Check if they are equal, then return... */ +} + +#define DB_USER_STACK_ADDR (0xc0000000) +#define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(PPC_PGBYTES*3)) + +int +db_search_null( + task_t task, + unsigned *svaddr, + unsigned evaddr, + unsigned *skaddr, + int flag) +{ + register unsigned vaddr; + register unsigned *kaddr; + + kaddr = (unsigned *)*skaddr; + for (vaddr = *svaddr; vaddr > evaddr; ) { + if (vaddr % PPC_PGBYTES == 0) { + vaddr -= sizeof(unsigned); + if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0) + return(-1); + kaddr = (unsigned *)*skaddr; + } else { + vaddr -= sizeof(unsigned); + kaddr--; + } + if ((*kaddr == 0) ^ (flag == 0)) { + *svaddr = vaddr; + *skaddr = (unsigned)kaddr; + return(0); + } + } + return(-1); +} + +void +db_task_name( + task_t task) +{ + register char *p; + register int n; + unsigned int vaddr, kaddr; + + vaddr = DB_USER_STACK_ADDR; + kaddr = 0; + + /* + * skip nulls at the end + */ + if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0) < 0) { + db_printf(DB_NULL_TASK_NAME); + return; + } + /* + * search start of args + */ + if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1) < 0) { + db_printf(DB_NULL_TASK_NAME); + return; + } + + n = DB_TASK_NAME_LEN-1; + p = (char *)kaddr + sizeof(unsigned); + for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0; + vaddr++, p++, n--) { + if (vaddr % PPC_PGBYTES == 0) { + if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) <0) + return; + p = (char*)kaddr; + } + db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p); + } + while (n-- >= 0) /* compare with >= 0 for one more space */ + db_printf(" "); +} + +void +db_machdep_init(void) { +#define KDB_READY 0x1 + extern int kdb_flag; + + kdb_flag |= KDB_READY; +} + + +#ifdef __STDC__ +#define KDB_SAVE(type, name) extern type name; type name##_save = name +#define KDB_RESTORE(name) name = name##_save +#else /* __STDC__ */ +#define KDB_SAVE(type, name) extern type name; type name/**/_save = name +#define KDB_RESTORE(name) name = name/**/_save +#endif /* __STDC__ */ + +#define KDB_SAVE_CTXT() \ + KDB_SAVE(int, db_run_mode); \ + KDB_SAVE(boolean_t, db_sstep_print); \ + KDB_SAVE(int, db_loop_count); \ + KDB_SAVE(int, db_call_depth); \ + KDB_SAVE(int, db_inst_count); \ + KDB_SAVE(int, db_last_inst_count); \ + KDB_SAVE(int, db_load_count); \ + KDB_SAVE(int, db_store_count); \ + KDB_SAVE(boolean_t, db_cmd_loop_done); \ + KDB_SAVE(jmp_buf_t *, db_recover); \ + KDB_SAVE(db_addr_t, db_dot); \ + KDB_SAVE(db_addr_t, db_last_addr); \ + KDB_SAVE(db_addr_t, db_prev); \ + KDB_SAVE(db_addr_t, db_next); \ + KDB_SAVE(db_regs_t, ddb_regs); + +#define KDB_RESTORE_CTXT() \ + KDB_RESTORE(db_run_mode); \ + KDB_RESTORE(db_sstep_print); \ + KDB_RESTORE(db_loop_count); \ + KDB_RESTORE(db_call_depth); \ + KDB_RESTORE(db_inst_count); \ + KDB_RESTORE(db_last_inst_count); \ + KDB_RESTORE(db_load_count); \ + KDB_RESTORE(db_store_count); \ + KDB_RESTORE(db_cmd_loop_done); \ + KDB_RESTORE(db_recover); \ + KDB_RESTORE(db_dot); \ + KDB_RESTORE(db_last_addr); \ + KDB_RESTORE(db_prev); \ + KDB_RESTORE(db_next); \ + KDB_RESTORE(ddb_regs); + +/* + * switch to another cpu + */ +void +kdb_on( + int cpu) +{ + KDB_SAVE_CTXT(); + if (cpu < 0 || cpu >= NCPUS || !debugger_active[cpu]) + return; + db_set_breakpoints(); + db_set_watchpoints(); + debugger_cpu = cpu; + unlock_debugger(); + lock_debugger(); + db_clear_breakpoints(); + db_clear_watchpoints(); + KDB_RESTORE_CTXT(); + if (debugger_cpu == -1) {/* someone continued */ + debugger_cpu = cpu_number(); + db_continue_cmd(0, 0, 0, ""); + } +} + +/* + * system reboot + */ +void db_reboot( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + boolean_t reboot = TRUE; + char *cp, c; + + cp = modif; + while ((c = *cp++) != 0) { + if (c == 'r') /* reboot */ + reboot = TRUE; + if (c == 'h') /* halt */ + reboot = FALSE; + } + halt_all_cpus(reboot); +} + +/* + * Switch to gdb + */ +void +db_to_gdb( + void) +{ + extern unsigned int switch_debugger; + + switch_debugger=1; +} diff --git a/osfmk/ppc/db_low_trace.c b/osfmk/ppc/db_low_trace.c new file mode 100644 index 000000000..8b80ffba9 --- /dev/null +++ b/osfmk/ppc/db_low_trace.c @@ -0,0 +1,661 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +/* + * Author: Bill Angell, Apple + * Date: 6/97 + * + * exceptions and certain C functions write into a trace table which + * can be examined via the machine 'lt' command under kdb + */ + + +#include /* For strcpy() */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include /* For db_option() */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void db_dumpphys(struct phys_entry *pp); /* Dump from physent */ +void db_dumppca(struct mapping *mp); /* PCA */ +void db_dumpmapping(struct mapping *mp); /* Dump out a mapping */ +void db_dumppmap(pmap_t pmap); /* Dump out a pmap */ +extern kmod_info_t *kmod; /* Find the kmods */ + +db_addr_t db_low_trace_prev = 0; + +/* + * Print out the low level trace table: + * + * Displays the entry and 15 before it in newest to oldest order + * + * lt [entaddr] + + * If entaddr is omitted, it starts with the most current + * If entaddr = 0, it starts with the most current and does the whole table + */ +void db_low_trace(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + int c, i; + unsigned int tempx, cnt; + unsigned int xbuf[8]; + unsigned int xTraceCurr, xTraceStart, xTraceEnd, cxltr, xxltr; + db_addr_t next_addr; + LowTraceRecord xltr; + unsigned char cmark; + + cnt = 16; /* Default to 16 entries */ + + xTraceCurr = trcWork.traceCurr; /* Transfer current pointer */ + xTraceStart = trcWork.traceStart; /* Transfer start of table */ + xTraceEnd = trcWork.traceEnd; /* Transfer end of table */ + + if(addr == -1) cnt = 0x7FFFFFFF; /* Max the count */ + + if(!addr || (addr == -1)) { + addr=xTraceCurr-sizeof(LowTraceRecord); /* Start at the newest */ + if((unsigned int)addr=xTraceEnd) { /* In the table? */ + db_printf("address not in low memory trace table\n"); /* Tell the fool */ + return; /* Leave... */ + } + + if((unsigned int)addr&0x0000003F) { /* Proper alignment? */ + db_printf("address not aligned on trace entry boundary (0x40)\n"); /* Tell 'em */ + return; /* Leave... */ + } + + xxltr=(unsigned int)addr; /* Set the start */ + cxltr=((xTraceCurr==xTraceStart ? xTraceEnd : xTraceCurr)-sizeof(LowTraceRecord)); /* Get address of newest entry */ + + db_low_trace_prev = addr; /* Starting point */ + + for(i=0; i < cnt; i++) { /* Dump the 16 (or all) entries */ + + ReadReal(xxltr, (unsigned int *)&xltr); /* Get the first half */ + ReadReal(xxltr+32, &(((unsigned int *)&xltr)[8])); /* Get the second half */ + + db_printf("\n%s%08X %1X %08X %08X - %04X\n", (xxltr!=cxltr ? " " : "*"), + xxltr, + xltr.LTR_cpu, xltr.LTR_timeHi, xltr.LTR_timeLo, + (xltr.LTR_excpt&0x8000 ? 0xFFFF : xltr.LTR_excpt*64)); /* Print the first line */ + db_printf(" %08X %08X %08X %08X %08X %08X %08X\n", + xltr.LTR_cr, xltr.LTR_srr0, xltr.LTR_srr1, xltr.LTR_dar, xltr.LTR_save, xltr.LTR_lr, xltr.LTR_ctr); + db_printf(" %08X %08X %08X %08X %08X %08X\n", + xltr.LTR_r0, xltr.LTR_r1, xltr.LTR_r2, xltr.LTR_r3, xltr.LTR_r4, xltr.LTR_r5); + + if((cnt != 16) && (xxltr == xTraceCurr)) break; /* If whole table dump, exit when we hit start again... */ + + xxltr-=sizeof(LowTraceRecord); /* Back it on up */ + if(xxltrphysent) { + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ + } + return; /* Tell them we did it */ + + +} + +/* + * Displays all of the in-use pmaps in the system. + * + * dp + */ +void db_display_pmap(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + pmap_t pmap; + + pmap = kernel_pmap; /* Start at the beginning */ + + db_printf("PMAP (real) Next Prev VRMask Space Bmaps Flags Ref spaceNum Resident Wired\n"); +// xxxxxxxx rrrrrrrr xxxxxxxx pppppppp vvvvvvvv ssssssss bbbbbbbb cccccccc vvvvvvvv nnnnnnnn rrrrrrrr wwwwwwwww + while(1) { /* Do them all */ + db_printf("%08X %08X %08X %08X %08X %08X %08X %08X %08X %08X %08X %08X\n", + pmap, (unsigned int)pmap ^ pmap->pmapvr, + pmap->pmap_link.next, pmap->pmap_link.prev, pmap->pmapvr, + pmap->space, pmap->bmaps, pmap->vflags, pmap->ref_count, pmap->spaceNum, + pmap->stats.resident_count, + pmap->stats.wired_count); + + +// xxxxxxxx rrrrrrrr xxxxxxxx pppppppp vvvvvvvv ssssssss bbbbbbbb cccccccc vvvvvvvv nnnnnnnn rrrrrrrr wwwwwwwww + db_printf(" SRs: %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapSegs[0], pmap->pmapSegs[1], pmap->pmapSegs[2], pmap->pmapSegs[3], + pmap->pmapSegs[4], pmap->pmapSegs[5], pmap->pmapSegs[6], pmap->pmapSegs[7]); + db_printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapSegs[8], pmap->pmapSegs[9], pmap->pmapSegs[10], pmap->pmapSegs[11], + pmap->pmapSegs[12], pmap->pmapSegs[13], pmap->pmapSegs[14], pmap->pmapSegs[15]); + + db_printf(" spmaps: %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapPmaps[0], pmap->pmapPmaps[1], pmap->pmapPmaps[2], pmap->pmapPmaps[3], + pmap->pmapPmaps[4], pmap->pmapPmaps[5], pmap->pmapPmaps[6], pmap->pmapPmaps[7]); + db_printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", pmap->pmapPmaps[8], pmap->pmapPmaps[9], pmap->pmapPmaps[10], pmap->pmapPmaps[11], + pmap->pmapPmaps[12], pmap->pmapPmaps[13], pmap->pmapPmaps[14], pmap->pmapPmaps[15]); + + pmap = (pmap_t)pmap->pmap_link.next; /* Skip to the next */ + db_printf("\n"); + if(pmap == kernel_pmap) break; /* We've wrapped, we're done */ + } + return; +} + +/* + * print information about the passed in pmap block + */ + +void db_dumppmap(pmap_t pmap) { + + db_printf("Dump of pmap block: %08X\n", pmap); + db_printf(" pmap_link: %08X %08X\n", pmap->pmap_link.next, pmap->pmap_link.prev); + db_printf(" pmapvr: %08X\n", pmap->pmapvr); + db_printf(" space: %08X\n", pmap->space); + db_printf(" bmaps: %08X\n", pmap->bmaps); + db_printf(" ref_count: %08X\n", pmap->ref_count); + db_printf(" spaceNum: %08X\n", pmap->spaceNum); + db_printf(" resident_count: %08X\n", pmap->stats.resident_count); + db_printf(" wired_count: %08X\n", pmap->stats.wired_count); + db_printf("\n"); + + return; + +} + +/* + * Prints out a mapping control block + * + */ + +void db_dumpmapping(struct mapping *mp) { /* Dump out a mapping */ + + db_printf("Dump of mapping block: %08X\n", mp); /* Header */ + db_printf(" next: %08X\n", mp->next); + db_printf(" hashnext: %08X\n", mp->hashnext); + db_printf(" PTEhash: %08X\n", mp->PTEhash); + db_printf(" PTEent: %08X\n", mp->PTEent); + db_printf(" physent: %08X\n", mp->physent); + db_printf(" PTEv: %08X\n", mp->PTEv); + db_printf(" PTEr: %08X\n", mp->PTEr); + db_printf(" pmap: %08X\n", mp->pmap); + + if(mp->physent) { /* Print physent if it exists */ + db_printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1); + } + else { + db_printf("Associated physical entry: none\n"); + } + + db_dumppca(mp); /* Dump out the PCA information */ + + return; +} + +/* + * Prints out a PTEG control area + * + */ + +void db_dumppca(struct mapping *mp) { /* PCA */ + + PCA *pca; + unsigned int *pteg, sdr; + + pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */ + __asm__ volatile("mfsdr1 %0" : "=r" (sdr)); + db_printf(" SDR1: %08X\n", sdr); + pteg=(unsigned int *)((unsigned int)pca-(((sdr&0x0000FFFF)+1)<<16)); + db_printf(" Dump of PCA: %08X\n", pca); /* Header */ + db_printf(" PCAlock: %08X\n", pca->PCAlock); + db_printf(" PCAallo: %08X\n", pca->flgs.PCAallo); + db_printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]); + db_printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]); + db_printf("Dump of PTEG: %08X\n", pteg); /* Header */ + db_printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]); + db_printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]); + db_printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]); + db_printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]); + return; +} + +/* + * Dumps starting with a physical entry + */ + +void db_dumpphys(struct phys_entry *pp) { /* Dump from physent */ + + mapping *mp; + PCA *pca; + unsigned int *pteg; + + db_printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1); + mp = hw_cpv(pp->phys_link); + while(mp) { + db_dumpmapping(mp); + db_dumppca(mp); + mp = hw_cpv(mp->next); + } + + return; +} + + +/* + * Print out 256 bytes of virtual storage + * + * + * dv [entaddr] [space] + * address must be on 32-byte boundary. It will be rounded down if not + */ +void db_display_virtual(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + int i, size, lines, rlines; + unsigned int xbuf[8]; + unsigned int xspace; + + mapping *mp, *mpv; + vm_offset_t pa; + + if (db_expression(&xspace)) dvspace = xspace; /* Get the space or set default */ + + addr&=-32; + + size = 4096 - (addr & 0x00000FFF); /* Bytes left on page */ + lines = size / 32; /* Number of lines in first or only part */ + if(lines > 8) lines = 8; + rlines = 8 - lines; + if(rlines < 0) lines = 0; + + db_printf("Dumping %08X (space=%08X); ", addr, dvspace); + mp = hw_lock_phys_vir(dvspace, addr); /* Lock the physical entry for this mapping */ + if(!mp) { /* Did we find one? */ + db_printf("Not mapped\n"); + return; /* Didn't find any, return FALSE... */ + } + if((unsigned int)mp&1) { /* Did we timeout? */ + db_printf("Timeout locking physical entry for virtual address (%08X)\n", addr); /* Yeah, scream about it! */ + return; /* Bad hair day, return FALSE... */ + } + mpv = hw_cpv(mp); /* Get virtual address of mapping */ + if(!mpv->physent) { /* Was there a physical entry? */ + pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ + } + else { + pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + } + db_printf("phys=%08X\n", pa); + for(i=0; iphysent) { /* Was there a physical entry? */ + pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ + } + else { + pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)addr & (PAGE_SIZE-1))); /* Get physical address from physent */ + hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + } + db_printf("phys=%08X\n", pa); + for(i=0; itasks.next; task != (task_t)&pset->tasks.next; task = (task_t)task->pset_tasks.next) { /* Go through the tasks */ + taskact = 0; /* Reset activation count */ + db_printf("\nTask %4d @%08X:\n", tottasks, task); /* Show where we're at */ + for(act = (thread_act_t)task->thr_acts.next; act != (thread_act_t)&task->thr_acts; act = (thread_act_t)act->thr_acts.next) { /* Go through activations */ + db_printf(" Act %4d @%08X - p: %08X fp: %08X fl: %08X fc: %d vp: %08X vl: %08X vp: %d\n", + taskact, act, act->mact.pcb, act->mact.FPU_pcb, act->mact.FPU_lvl, act->mact.FPU_cpu, + act->mact.VMX_pcb, act->mact.VMX_lvl, act->mact.VMX_cpu); + + + save = (savearea *)act->mact.pcb; /* Set the start of the normal chain */ + chainsize = 0; + while(save) { /* Do them all */ + totsaves++; /* Count savearea */ + db_printf(" Norm %08X: %08X %08X - tot = %d\n", save, save->save_srr0, save->save_srr1, totsaves); + save = save->save_prev; /* Next one */ + if(chainsize++ > chainmax) { /* See if we might be in a loop */ + db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); + break; + } + } + + save = (savearea *)act->mact.FPU_pcb; /* Set the start of the floating point chain */ + chainsize = 0; + while(save) { /* Do them all */ + if(!(save->save_flags & SAVattach)) totsaves++; /* Count savearea only if not a normal one also */ + db_printf(" FPU %08X: %08X - tot = %d\n", save, save->save_level_fp, totsaves); + save = save->save_prev_float; /* Next one */ + if(chainsize++ > chainmax) { /* See if we might be in a loop */ + db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); + break; + } + } + + save = (savearea *)act->mact.VMX_pcb; /* Set the start of the floating point chain */ + chainsize = 0; + while(save) { /* Do them all */ + if(!(save->save_flags & (SAVattach | SAVfpuvalid))) totsaves++; /* Count savearea only if not a normal one also */ + db_printf(" Vec %08X: %08X - tot = %d\n", save, save->save_level_vec, totsaves); + save = save->save_prev_vector; /* Next one */ + if(chainsize++ > chainmax) { /* See if we might be in a loop */ + db_printf(" Chain terminated by count (%d) before %08X\n", chainmax, save); + break; + } + } + taskact++; + } + tottasks++; + } + + db_printf("Total saveareas accounted for: %d\n", totsaves); + return; +} + +/* + * Print out extra registers + * + * + * dx + */ + +extern unsigned int dbfloats[33][2]; +extern unsigned int dbvecs[33][4]; +extern unsigned int dbspecrs[80]; + +void db_display_xregs(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + int i, j, pents; + + stSpecrs(dbspecrs); /* Save special registers */ + db_printf("PIR: %08X\n", dbspecrs[0]); + db_printf("PVR: %08X\n", dbspecrs[1]); + db_printf("SDR1: %08X\n", dbspecrs[22]); + db_printf("HID0: %08X\n", dbspecrs[39]); + db_printf("HID1: %08X\n", dbspecrs[40]); + db_printf("L2CR: %08X\n", dbspecrs[41]); + db_printf("MSSCR0: %08X\n", dbspecrs[42]); + db_printf("MSSCR1: %08X\n", dbspecrs[43]); + db_printf("THRM1: %08X\n", dbspecrs[44]); + db_printf("THRM2: %08X\n", dbspecrs[45]); + db_printf("THRM3: %08X\n", dbspecrs[46]); + db_printf("ICTC: %08X\n", dbspecrs[47]); + db_printf("\n"); + + db_printf("DBAT: %08X %08X %08X %08X\n", dbspecrs[2], dbspecrs[3], dbspecrs[4], dbspecrs[5]); + db_printf(" %08X %08X %08X %08X\n", dbspecrs[6], dbspecrs[7], dbspecrs[8], dbspecrs[9]); + db_printf("IBAT: %08X %08X %08X %08X\n", dbspecrs[10], dbspecrs[11], dbspecrs[12], dbspecrs[13]); + db_printf(" %08X %08X %08X %08X\n", dbspecrs[14], dbspecrs[15], dbspecrs[16], dbspecrs[17]); + db_printf("SPRG: %08X %08X %08X %08X\n", dbspecrs[18], dbspecrs[19], dbspecrs[20], dbspecrs[21]); + db_printf("\n"); + for(i = 0; i < 16; i += 8) { /* Print 8 at a time */ + db_printf("SR%02d: %08X %08X %08X %08X %08X %08X %08X %08X\n", i, + dbspecrs[23+i], dbspecrs[24+i], dbspecrs[25+i], dbspecrs[26+i], + dbspecrs[27+i], dbspecrs[28+i], dbspecrs[29+i], dbspecrs[30+i]); + } + + db_printf("\n"); + + stFloat(dbfloats); /* Save floating point registers */ + for(i = 0; i < 32; i += 4) { /* Print 4 at a time */ + db_printf("F%02d: %08X %08X %08X %08X %08X %08X %08X %08X\n", i, + dbfloats[i][0], dbfloats[i][1], dbfloats[i+1][0], dbfloats[i+1][1], + dbfloats[i+2][0], dbfloats[i+2][1], dbfloats[i+3][0], dbfloats[i+3][1]); + } + db_printf("FCR: %08X %08X\n", dbfloats[32][0], dbfloats[32][1]); /* Print FSCR */ + + if(!stVectors(dbvecs)) return; /* Return if not Altivec capable */ + + db_printf("\n"); + + for(i = 0; i < 32; i += 2) { /* Print 2 at a time */ + db_printf("V%02d: %08X %08X %08X %08X %08X %08X %08X %08X\n", i, + dbvecs[i][0], dbvecs[i][1], dbvecs[i][2], dbvecs[i][3], + dbvecs[i+1][0], dbvecs[i+1][1], dbvecs[i+1][2], dbvecs[i+1][3]); + } + db_printf("VCR: %08X %08X %08X %08X\n", dbvecs[32][0], dbvecs[32][1], dbvecs[32][2], dbvecs[32][3]); /* Print VSCR */ + + return; /* Tell them we did it */ + + +} + +/* + * Displays all of the kmods in the system. + * + * dp + */ +void db_display_kmod(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + kmod_info_t *kmd; + unsigned int strt, end; + + kmd = kmod; /* Start at the start */ + + db_printf("info addr start - end name ver\n"); + + while(kmd) { /* Dump 'em all */ + strt = (unsigned int)kmd->address + kmd->hdr_size; /* Get start of kmod text */ + end = (unsigned int)kmd->address + kmd->size; /* Get end of kmod */ + db_printf("%08X %08X %08X - %08X: %s, %s\n", kmd, kmd->address, strt, end, + kmd->name, kmd->version); + kmd = kmd->next; /* Step to it */ + } + + return; +} + +/* + * Displays stuff + * + * gs + */ +unsigned char xxgpo[36] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +void db_gsnoop(db_expr_t addr, int have_addr, db_expr_t count, char * modif) { + + int i, j; + unsigned char *gp, gpn[36]; +#define ngpr 34 + + gp = (unsigned char *)0x8000005C; + + for(i = 0; i < ngpr; i++) gpn[i] = gp[i]; /* Copy 'em */ + + for(i = 0; i < ngpr; i++) { + db_printf("%02X ", gpn[i]); + } + db_printf("\n"); + + for(i = 0; i < ngpr; i++) { + if(gpn[i] != xxgpo[i]) db_printf("^^ "); + else db_printf(" "); + } + db_printf("\n"); + + for(i = 0; i < ngpr; i++) xxgpo[i] = gpn[i]; /* Save 'em */ + + return; +} + + +void Dumbo(void); +void Dumbo(void){ +} diff --git a/osfmk/ppc/db_low_trace.h b/osfmk/ppc/db_low_trace.h new file mode 100644 index 000000000..7b243f6ff --- /dev/null +++ b/osfmk/ppc/db_low_trace.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +#ifndef _DDB_DB_LTR_H_ +#define _DDB_DB_LTR_H_ + +#include +#include + +/* Prototypes for functions exported by this module. + */ + +void db_list_pmap( + db_expr_t addr, + int have_addr, + db_expr_t count, + char *modif +); + +void db_low_trace( + db_expr_t addr, + int have_addr, + db_expr_t count, + char *modif +); + +void db_display_long( + db_expr_t addr, + int have_addr, + db_expr_t count, + char *modif +); + +void db_display_real( + db_expr_t addr, + int have_addr, + db_expr_t count, + char *modif +); + +void db_display_virtual(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_display_mappings(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_display_pmap(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_display_save(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_display_xregs(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_display_kmod(db_expr_t addr, int have_addr, db_expr_t count, char * modif); +void db_gsnoop(db_expr_t addr, int have_addr, db_expr_t count, char * modif); + +#endif /* !_DDB_DB_LTR_H_ */ diff --git a/osfmk/ppc/db_machdep.h b/osfmk/ppc/db_machdep.h new file mode 100644 index 000000000..44c768e62 --- /dev/null +++ b/osfmk/ppc/db_machdep.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef _PPC_DB_MACHDEP_H_ +#define _PPC_DB_MACHDEP_H_ + +/* + * Machine-dependent defines for new kernel debugger. + */ + +#include +#include +#include +#include /* for thread_status */ +#include +#include + +typedef vm_offset_t db_addr_t; /* address - unsigned */ +typedef int db_expr_t; /* expression - signed */ + +typedef struct ppc_saved_state db_regs_t; +db_regs_t ddb_regs; /* register state */ +#define DDB_REGS (&ddb_regs) +extern int db_active; /* ddb is active */ + +#define PC_REGS(regs) ((db_addr_t)(regs)->srr0) + +#define BKPT_INST 0x7c810808 /* breakpoint instruction */ +#define BKPT_SIZE (4) /* size of breakpoint inst */ +#define BKPT_SET(inst) (BKPT_INST) + +#define db_clear_single_step(regs) ((regs)->srr1 &= ~MASK(MSR_SE)) +#define db_set_single_step(regs) ((regs)->srr1 |= MASK(MSR_SE)) + +#define IS_BREAKPOINT_TRAP(type, code) (FALSE) +#define IS_WATCHPOINT_TRAP(type, code) (FALSE) + +#define inst_trap_return(ins) (FALSE) +#define inst_return(ins) (FALSE) +#define inst_call(ins) (FALSE) + +int db_inst_load(unsigned long); +int db_inst_store(unsigned long); + +/* access capability and access macros */ + +#define DB_ACCESS_LEVEL DB_ACCESS_ANY /* any space */ +#define DB_CHECK_ACCESS(addr,size,task) \ + db_check_access(addr,size,task) +#define DB_PHYS_EQ(task1,addr1,task2,addr2) \ + db_phys_eq(task1,addr1,task2,addr2) +#define DB_VALID_KERN_ADDR(addr) \ + ((addr) >= VM_MIN_KERNEL_ADDRESS && \ + (addr) < VM_MAX_KERNEL_ADDRESS) +#define DB_VALID_ADDRESS(addr,user) \ + ((!(user) && DB_VALID_KERN_ADDR(addr)) || \ + ((user) && (addr) < VM_MAX_ADDRESS)) + +/* + * Given pointer to ppc_saved_state, determine if it represents + * a thread executing a) in user space, b) in the kernel, or c) + * in a kernel-loaded task. Return true for cases a) and c). + */ +#define IS_USER_TRAP(regs) \ + (USER_MODE(regs->srr1)) + +extern boolean_t db_check_access( + vm_offset_t addr, + int size, + task_t task); +extern boolean_t db_phys_eq( + task_t task1, + vm_offset_t addr1, + task_t task2, + vm_offset_t addr2); +extern db_addr_t db_disasm( + db_addr_t loc, + boolean_t altfmt, + task_t task); +extern vm_offset_t db_kvtophys( + space_t space, + vm_offset_t va); + +extern void db_read_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task); +extern void db_write_bytes( + vm_offset_t addr, + int size, + char *data, + task_t task); +extern void db_stack_trace_cmd( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif); +extern void db_reboot( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif); +extern void db_low_trace( + db_expr_t addr, + int have_addr, + db_expr_t count, + char *modif); +extern void db_to_gdb( + void); + + +/* macros for printing OS server dependent task name */ + +#define DB_TASK_NAME(task) db_task_name(task) +#define DB_TASK_NAME_TITLE "COMMAND " +#define DB_TASK_NAME_LEN 39 +#define DB_NULL_TASK_NAME "? " + +extern void db_task_name( + task_t task); + +/* macro for checking if a thread has used floating-point */ + +#define db_act_fp_used(act) (FALSE) + +extern void kdb_trap( + int type, + struct ppc_saved_state *regs); +extern boolean_t db_trap_from_asm( + struct ppc_saved_state *regs); +extern void kdb_on( + int cpu); +extern void cnpollc( + boolean_t on); + +extern void kdb_kintr(void); + +extern void db_phys_copy( + vm_offset_t, + vm_offset_t, + vm_size_t); + +extern boolean_t db_phys_cmp( + vm_offset_t, + vm_offset_t, + vm_size_t); + +#endif /* _PPC_DB_MACHDEP_H_ */ diff --git a/osfmk/ppc/db_trace.c b/osfmk/ppc/db_trace.c new file mode 100644 index 000000000..d24838fb6 --- /dev/null +++ b/osfmk/ppc/db_trace.c @@ -0,0 +1,831 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +extern jmp_buf_t *db_recover; +extern struct ppc_saved_state *saved_state[]; + +struct ppc_saved_state ddb_null_kregs; + +extern vm_offset_t vm_min_inks_addr; /* set by db_clone_symtabXXX */ + +#define DB_NUMARGS_MAX 5 + + +extern char FixedStackStart[], FixedStackEnd[]; +#define INFIXEDSTACK(va) \ + ((((vm_offset_t)(va)) >= (vm_offset_t)&FixedStackStart) && \ + (((vm_offset_t)(va)) < ((vm_offset_t)&FixedStackEnd))) + +#if 0 + +#define INKERNELSTACK(va, th) \ + (th == THR_ACT_NULL || \ + (((vm_offset_t)(va)) >= th->thread->kernel_stack && \ + (((vm_offset_t)(va)) < th->thread->kernel_stack + \ + KERNEL_STACK_SIZE)) || \ + INFIXEDSTACK(va)) +#else +#define INKERNELSTACK(va, th) 1 + +#endif + +#ifdef __MACHO__ +struct db_ppc_frame { + struct db_ppc_frame *f_frame; + int pad1; + db_addr_t f_retaddr; + int pad3; + int pad4; + int pad5; + db_addr_t f_arg[DB_NUMARGS_MAX]; +}; +#endif + +#define TRAP 1 +#define INTERRUPT 2 +#define SYSCALL 3 + +db_addr_t db_user_trap_symbol_value = 0; +db_addr_t db_kernel_trap_symbol_value = 0; +db_addr_t db_interrupt_symbol_value = 0; +db_addr_t db_return_to_iret_symbol_value = 0; +db_addr_t db_syscall_symbol_value = 0; +boolean_t db_trace_symbols_found = FALSE; + +extern int db_ppc_reg_value( + struct db_variable * vp, + db_expr_t * val, + int flag, + db_var_aux_param_t ap); +extern void db_find_trace_symbols(void); +extern int db_numargs( + struct db_ppc_frame *fp, + task_t task); +extern boolean_t db_find_arg( + struct db_ppc_frame *frame, + db_addr_t calleepc, + task_t task, + int narg, + db_addr_t *arg); +extern void db_nextframe( + struct db_ppc_frame **lfp, + struct db_ppc_frame **fp, + db_addr_t *ip, + int frame_type, + thread_act_t thr_act, + db_addr_t linkpc); +extern int _setjmp( + jmp_buf_t * jb); + +/* + * Machine register set. + */ +struct db_variable db_regs[] = { + /* XXX "pc" is an alias to "srr0"... */ + { "pc", (int *)&ddb_regs.srr0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "srr0", (int *)&ddb_regs.srr0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "srr1", (int *)&ddb_regs.srr1, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r0", (int *)&ddb_regs.r0, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r1", (int *)&ddb_regs.r1, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r2", (int *)&ddb_regs.r2, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r3", (int *)&ddb_regs.r3, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r4", (int *)&ddb_regs.r4, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r5", (int *)&ddb_regs.r5, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r6", (int *)&ddb_regs.r6, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r7", (int *)&ddb_regs.r7, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r8", (int *)&ddb_regs.r8, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r9", (int *)&ddb_regs.r9, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r10", (int *)&ddb_regs.r10, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r11", (int *)&ddb_regs.r11, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r12", (int *)&ddb_regs.r12, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r13", (int *)&ddb_regs.r13, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r14", (int *)&ddb_regs.r14, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r15", (int *)&ddb_regs.r15, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r16", (int *)&ddb_regs.r16, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r17", (int *)&ddb_regs.r17, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r18", (int *)&ddb_regs.r18, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r19", (int *)&ddb_regs.r19, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r20", (int *)&ddb_regs.r20, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r21", (int *)&ddb_regs.r21, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r22", (int *)&ddb_regs.r22, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r23", (int *)&ddb_regs.r23, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r24", (int *)&ddb_regs.r24, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r25", (int *)&ddb_regs.r25, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r26", (int *)&ddb_regs.r26, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r27", (int *)&ddb_regs.r27, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r28", (int *)&ddb_regs.r28, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r29", (int *)&ddb_regs.r29, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r30", (int *)&ddb_regs.r30, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "r31", (int *)&ddb_regs.r31, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "cr", (int *)&ddb_regs.cr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "xer", (int *)&ddb_regs.xer, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "lr", (int *)&ddb_regs.lr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "ctr", (int *)&ddb_regs.ctr, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "mq", (int *)&ddb_regs.mq, db_ppc_reg_value, 0, 0, 0, 0, TRUE }, + { "sr_copyin",(int *)&ddb_regs.sr_copyin,db_ppc_reg_value, 0, 0, 0, 0, TRUE }, +}; +struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]); + +int +db_ppc_reg_value( + struct db_variable *vp, + db_expr_t *valuep, + int flag, + db_var_aux_param_t ap) +{ + int *dp = 0; + db_expr_t null_reg = 0; + register thread_act_t thr_act = ap->thr_act; + int cpu; + + if (db_option(ap->modif, 'u')) { + if (thr_act == THR_ACT_NULL) { + if ((thr_act = current_act()) == THR_ACT_NULL) + db_error("no user registers\n"); + } + if (thr_act == current_act()) { + if (IS_USER_TRAP((&ddb_regs))) + dp = vp->valuep; + else if (INFIXEDSTACK(ddb_regs.r1)) + db_error("cannot get/set user registers in nested interrupt\n"); + } + } else { + if (thr_act == THR_ACT_NULL || thr_act == current_act()) { + dp = vp->valuep; + } else { + if (thr_act->thread && + !(thr_act->thread->state & TH_STACK_HANDOFF) && + thr_act->thread->kernel_stack) { + int cpu; + + for (cpu = 0; cpu < NCPUS; cpu++) { + if (machine_slot[cpu].running == TRUE && + cpu_data[cpu].active_thread == thr_act->thread && saved_state[cpu]) { + dp = (int *) (((int)saved_state[cpu]) + + (((int) vp->valuep) - + (int) &ddb_regs)); + break; + } + } +#if 0 + if (dp == 0 && thr_act && thr_act->thread) + dp = db_lookup_i386_kreg(vp->name, + (int *)(STACK_IKS(thr_act->thread->kernel_stack))); +#endif + if (dp == 0) + dp = &null_reg; + } else if (thr_act->thread && + (thr_act->thread->state&TH_STACK_HANDOFF)){ + /* only PC is valid */ + if (vp->valuep == (int *) &ddb_regs.srr0) { + dp = (int *)(&thr_act->thread->continuation); + } else { + dp = &null_reg; + } + } + } + } + if (dp == 0) { + int cpu; + + if (!db_option(ap->modif, 'u')) { + for (cpu = 0; cpu < NCPUS; cpu++) { + if (machine_slot[cpu].running == TRUE && + cpu_data[cpu].active_thread == thr_act->thread && saved_state[cpu]) { + dp = (int *) (((int)saved_state[cpu]) + + (((int) vp->valuep) - + (int) &ddb_regs)); + break; + } + } + } + if (dp == 0) { + if (!thr_act || thr_act->mact.pcb == 0) + db_error("no pcb\n"); + dp = (int *)((int)(&thr_act->mact.pcb->ss) + + ((int)vp->valuep - (int)&ddb_regs)); + } + } + if (flag == DB_VAR_SET) + *dp = *valuep; + else + *valuep = *dp; + return(0); +} + +void +db_find_trace_symbols(void) +{ + db_expr_t value; + boolean_t found_some; + + found_some = FALSE; + if (db_value_of_name(CC_SYM_PREFIX "thandler", &value)) { + db_user_trap_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (db_value_of_name(CC_SYM_PREFIX "thandler", &value)) { + db_kernel_trap_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (db_value_of_name(CC_SYM_PREFIX "ihandler", &value)) { + db_interrupt_symbol_value = (db_addr_t) value; + found_some = TRUE; + } +#if 0 + if (db_value_of_name(CC_SYM_PREFIX "return_to_iret", &value)) { + db_return_to_iret_symbol_value = (db_addr_t) value; + found_some = TRUE; + } +#endif + if (db_value_of_name(CC_SYM_PREFIX "thandler", &value)) { + db_syscall_symbol_value = (db_addr_t) value; + found_some = TRUE; + } + if (found_some) + db_trace_symbols_found = TRUE; +} + +int +db_numargs( + struct db_ppc_frame *fp, + task_t task) +{ + return (DB_NUMARGS_MAX); +} + +boolean_t +db_find_arg( + struct db_ppc_frame *fp, + db_addr_t calleepc, + task_t task, + int narg, + db_addr_t *arg) +{ + db_addr_t argp; + db_addr_t calleep; + db_addr_t offset; + int i; + int inst; + char *name; + +#if XXX_BS + db_find_task_sym_and_offset(calleepc, &name, &offset, task); + calleep = calleepc-offset; + + for (i = 0; calleep < calleepc; i++, calleep++) { + if (!DB_CHECK_ACCESS((int) calleep, 4, task)) { + continue; + } + inst = db_get_task_value(calleep, 4, FALSE, task); + if ((inst & 0xffff0000) == (0x907f0000 + (narg << 21)) || + (inst & 0xffff0000) == (0x90610000 + (narg << 21))) { + argp = (db_addr_t) &(fp->f_arg[narg]); + *arg = argp; + return TRUE; + } + } +#endif + return FALSE; +} + +/* + * Figure out the next frame up in the call stack. + * For trap(), we print the address of the faulting instruction and + * proceed with the calling frame. We return the ip that faulted. + * If the trap was caused by jumping through a bogus pointer, then + * the next line in the backtrace will list some random function as + * being called. It should get the argument list correct, though. + * It might be possible to dig out from the next frame up the name + * of the function that faulted, but that could get hairy. + */ +void +db_nextframe( + struct db_ppc_frame **lfp, /* in/out */ + struct db_ppc_frame **fp, /* in/out */ + db_addr_t *ip, /* out */ + int frame_type, /* in */ + thread_act_t thr_act, + db_addr_t linkpc) /* in */ +{ + extern char * trap_type[]; + extern int TRAP_TYPES; + + struct ppc_saved_state *saved_regs; + + task_t task = (thr_act != THR_ACT_NULL)? thr_act->task: TASK_NULL; + + switch(frame_type) { + case TRAP: +#if 0 + /* + * We know that trap() has 1 argument and we know that + * it is an (strcut i386_saved_state *). + */ + saved_regs = (struct i386_saved_state *) + db_get_task_value((int)&((*fp)->f_arg0),4,FALSE,task); + if (saved_regs->trapno >= 0 && saved_regs->trapno < TRAP_TYPES) { + db_printf(">>>>> %s trap at ", + trap_type[saved_regs->trapno]); + } else { + db_printf(">>>>> trap (number %d) at ", + saved_regs->trapno & 0xffff); + } + db_task_printsym(saved_regs->eip, DB_STGY_PROC, task); + db_printf(" <<<<<\n"); + *fp = (struct i386_frame *)saved_regs->ebp; + *ip = (db_addr_t)saved_regs->eip; +#else + db_printf(">>>>> trap <<<<<\n"); + goto miss_frame; +#endif + break; + case INTERRUPT: + if (*lfp == 0) { + db_printf(">>>>> interrupt <<<<<\n"); + goto miss_frame; + } +#if 0 + db_printf(">>>>> interrupt at "); + ifp = (struct interrupt_frame *)(*lfp); + *fp = ifp->if_frame; + if (ifp->if_iretaddr == db_return_to_iret_symbol_value) + *ip = ((struct i386_interrupt_state *) ifp->if_edx)->eip; + else + *ip = (db_addr_t) ifp->if_eip; + db_task_printsym(*ip, DB_STGY_PROC, task); + db_printf(" <<<<<\n"); +#else + db_printf(">>>>> interrupt <<<<<\n"); + goto miss_frame; +#endif + break; + case SYSCALL: + if (thr_act != THR_ACT_NULL && thr_act->mact.pcb) { + *ip = (db_addr_t) thr_act->mact.pcb->ss.srr0; + *fp = (struct db_ppc_frame *) (thr_act->mact.pcb->ss.r1); + break; + } + /* falling down for unknown case */ + default: + miss_frame: + if ((*fp)->f_frame) + *ip = (db_addr_t) + db_get_task_value((int)&(*fp)->f_frame->f_retaddr, + 4, FALSE, task); + else + *ip = (db_addr_t) + db_get_task_value((int)&(*fp)->f_retaddr, + 4, FALSE, task); + + *lfp = *fp; + *fp = (struct db_ppc_frame *) + db_get_task_value((int)&(*fp)->f_frame, 4, FALSE, task); + break; + } +} + +void +db_stack_trace_cmd( + db_expr_t addr, + boolean_t have_addr, + db_expr_t count, + char *modif) +{ + struct db_ppc_frame *frame, *lastframe; + db_addr_t callpc, linkpc, lastcallpc; + int frame_type; + boolean_t kernel_only = TRUE; + boolean_t trace_thread = FALSE; + boolean_t trace_all_threads = FALSE; + int thcount = 0; + char *filename; + int linenum; + task_t task; + thread_act_t th, top_act; + int user_frame; + int frame_count; + jmp_buf_t *prev; + jmp_buf_t db_jmp_buf; + queue_entry_t act_list; + + if (!db_trace_symbols_found) + db_find_trace_symbols(); + { + register char *cp = modif; + register char c; + + while ((c = *cp++) != 0) { + if (c == 't') + trace_thread = TRUE; + if (c == 'T') { + trace_all_threads = TRUE; + trace_thread = TRUE; + } + if (c == 'u') + kernel_only = FALSE; + } + } + + if (trace_all_threads) { + if (!have_addr && !trace_thread) { + have_addr = TRUE; + trace_thread = TRUE; + act_list = &(current_task()->thr_acts); + addr = (db_expr_t) queue_first(act_list); + } + else if (trace_thread) { + if (have_addr) { + if (!db_check_act_address_valid((thread_act_t)addr)) { + if (db_lookup_task((task_t)addr) == -1) + return; + act_list = &(((task_t)addr)->thr_acts); + addr = (db_expr_t) queue_first(act_list); + } + else { + act_list = &(((thread_act_t)addr)->task->thr_acts); + thcount = db_lookup_task_act(((thread_act_t)addr)->task, + (thread_act_t)addr); + } + } + else { + th = db_default_act; + if (th == THR_ACT_NULL) + th = current_act(); + if (th == THR_ACT_NULL) { + db_printf("no active thr_act\n"); + return; + } + have_addr = TRUE; + act_list = &th->task->thr_acts; + addr = (db_expr_t) queue_first(act_list); + } + } + } + + if (count == -1) + count = 65535; + +next_thread: + top_act = THR_ACT_NULL; + + user_frame = 0; + frame_count = count; + + if (!have_addr && !trace_thread) { + frame = (struct db_ppc_frame *)(ddb_regs.r1); + callpc = (db_addr_t)ddb_regs.srr0; + linkpc = (db_addr_t)ddb_regs.lr; + th = current_act(); + task = (th != THR_ACT_NULL)? th->task: TASK_NULL; + } + else if (trace_thread) { + if (have_addr) { + th = (thread_act_t) addr; + if (!db_check_act_address_valid(th)) + return; + } + else { + th = db_default_act; + if (th == THR_ACT_NULL) + th = current_act(); + if (th == THR_ACT_NULL) { + db_printf("no active thread\n"); + return; + } + } + if (trace_all_threads) + db_printf("---------- Thread 0x%x (#%d of %d) ----------\n", + addr, thcount, th->task->thr_act_count); + +next_activation: + + user_frame = 0; + + task = th->task; + if (th == current_act()) { + frame = (struct db_ppc_frame *)(ddb_regs.r1); + callpc = (db_addr_t)ddb_regs.srr0; + linkpc = (db_addr_t)ddb_regs.lr; + } + else { + if (th->mact.pcb == 0) { + db_printf("thread has no pcb\n"); + goto thread_done; + } + if (!th->thread) { + register struct ppc_saved_state *pss = + &th->mact.pcb->ss; + + db_printf("thread has no shuttle\n"); + #if 0 + frame = (struct db_ppc_frame *) (pss->r1); + callpc = (db_addr_t) (pss->srr0); + linkpc = (db_addr_t) (pss->lr); + #else + goto thread_done; + #endif + } + else if ((th->thread->state & TH_STACK_HANDOFF) || + th->thread->kernel_stack == 0) { + register struct ppc_saved_state *pss = + &th->mact.pcb->ss; + + db_printf("Continuation "); + db_task_printsym((db_expr_t)th->thread->continuation, + DB_STGY_PROC, task); + db_printf("\n"); + frame = (struct db_ppc_frame *) (pss->r1); + callpc = (db_addr_t) (pss->srr0); + linkpc = (db_addr_t) (pss->lr); + } + else { + int cpu; + + for (cpu = 0; cpu < NCPUS; cpu++) { + if (machine_slot[cpu].running == TRUE && + cpu_data[cpu].active_thread == th->thread && + saved_state[cpu]) { + break; + } + } + if (top_act != THR_ACT_NULL) { + /* + * Trying to get the backtrace of an activation + * which is not the top_most one in the RPC chain: + * use the activation's pcb. + */ + struct ppc_saved_state *pss; + + pss = (struct ppc_saved_state *)th->mact.pcb; + frame = (struct db_ppc_frame *) (pss->r1); + callpc = (db_addr_t) (pss->srr0); + linkpc = (db_addr_t) (pss->lr); + } else { + if (cpu == NCPUS) { + register struct ppc_saved_state *iks; + int r; + + iks = (struct ppc_saved_state *)th->mact.pcb; + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + frame = (struct db_ppc_frame *) (iks->r1); + callpc = (db_addr_t) (iks->lr); + linkpc = 0; + } else { + /* + * The kernel stack has probably been + * paged out (swapped out activation). + */ + db_recover = prev; + if (r == 2) /* 'q' from db_more() */ + db_error(0); + db_printf("\n", + iks); + goto next_act; + } + db_recover = prev; + } else { + db_printf(">>>>> active on cpu %d <<<<<\n", + cpu); + frame = (struct db_ppc_frame *) + (saved_state[cpu]->r1); + callpc = (db_addr_t) saved_state[cpu]->srr0; + linkpc = (db_addr_t) saved_state[cpu]->lr; + } + } + } + } + } else { + frame = (struct db_ppc_frame *)addr; + th = (db_default_act)? db_default_act: current_act(); + task = (th != THR_ACT_NULL)? th->task: TASK_NULL; + if (frame->f_frame) { + callpc = (db_addr_t)db_get_task_value + ((int)&frame->f_frame->f_retaddr, + 4, FALSE, (user_frame) ? task : 0); + callpc = callpc-sizeof(callpc); + } else + callpc =0; + linkpc = 0; + } + + if (!INKERNELSTACK((unsigned)frame, th)) { + db_printf(">>>>> user space <<<<<\n"); + if (kernel_only) + goto thread_done; + user_frame++; + } + + lastframe = 0; + lastcallpc = (db_addr_t) 0; + while (frame_count-- && frame != 0) { + int narg = DB_NUMARGS_MAX; + int arg; + char * name; + db_expr_t offset; + db_addr_t call_func = 0; + int r; + db_addr_t off; + + db_symbol_values(NULL, + db_search_task_symbol_and_line( + callpc, DB_STGY_XTRN, &offset, &filename, + &linenum, (user_frame) ? task : 0, &narg), + &name, (db_expr_t *)&call_func); + if ( name == NULL) { + db_find_task_sym_and_offset(callpc, + &name, &off, (user_frame) ? task : 0); + offset = (db_expr_t) off; + } + + if (user_frame == 0) { + if (call_func && + (call_func == db_user_trap_symbol_value || + call_func == db_kernel_trap_symbol_value)) { + frame_type = TRAP; + narg = 1; + } else if (call_func && + call_func == db_interrupt_symbol_value) { + frame_type = INTERRUPT; + goto next_frame; + } else if (call_func && + call_func == db_syscall_symbol_value) { + frame_type = SYSCALL; + goto next_frame; + } else { + frame_type = 0; + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) + == 0) { + if (narg < 0) + narg = db_numargs(frame, + (user_frame) ? task : 0); + db_recover = prev; + } else { + db_recover = prev; + goto next_act; + } + } + } else { + frame_type = 0; + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + if (narg < 0) + narg = db_numargs(frame, + (user_frame) ? task : 0); + db_recover = prev; + } else { + db_recover = prev; + goto next_act; + } + } + + if (name == 0 || offset > db_maxoff) { + db_printf("[%08X]0x%08X(", frame, callpc); + } else { + db_printf("[%08X]%s", frame, name); + if (offset) + db_printf("+%x", offset); + db_printf("("); + }; + + narg = db_numargs(frame, (user_frame) ? task : 0); + + for (arg =0; arg < narg; arg++) { + db_addr_t argp; + int value; + boolean_t found; + + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + found = FALSE; + if (lastframe) + found = db_find_arg(frame, lastframe->f_retaddr, + (user_frame) ? task : 0, arg, &argp); + if (found) + value = db_get_task_value(argp, 4, FALSE, + (user_frame) ? task : 0); + } else { + db_recover = prev; + if (r == 2) /* 'q' from db_more() */ + db_error(0); + db_printf("... )"); + db_printf("\n"); + goto next_act; + } + db_recover = prev; + if (found) + db_printf("%08X", value); + else + db_printf("??"); + argp = argp + sizeof(argp); + if (arg < narg-1) + db_printf(","); + } + if (arg != narg) + db_printf("..."); + db_printf(")"); + db_printf("\n"); + + next_frame: + lastcallpc = callpc; + prev = db_recover; + if ((r = _setjmp(db_recover = &db_jmp_buf)) == 0) { + db_nextframe(&lastframe, &frame, &callpc, frame_type, + (user_frame) ? th : THR_ACT_NULL, linkpc); + callpc = callpc-sizeof(callpc); + db_recover = prev; + } else { + db_recover = prev; + frame = 0; + } + linkpc = 0; + + if (frame == 0) { + next_act: + if (th->lower != THR_ACT_NULL) { + if (top_act == THR_ACT_NULL) + top_act = th; + th = th->lower; + db_printf(">>>>> next activation 0x%x ($task%d.%d) <<<<<\n", + th, + db_lookup_task(th->task), + db_lookup_task_act(th->task, th)); + goto next_activation; + } + /* end of chain */ + break; + } + if (!INKERNELSTACK(lastframe, th) || + !INKERNELSTACK((unsigned)frame, th)) + user_frame++; + if (user_frame == 1) { + db_printf(">>>>> user space <<<<<\n"); + if (kernel_only) + break; + } + + if (frame <= lastframe) { + if ((INKERNELSTACK(lastframe, th) && !INKERNELSTACK(frame, th))) continue; + db_printf("Bad frame pointer: 0x%x\n", frame); + break; + } + } + + thread_done: + if (trace_all_threads) { + if (top_act != THR_ACT_NULL) + th = top_act; + th = (thread_act_t) queue_next(&th->thr_acts); + if (! queue_end(act_list, (queue_entry_t) th)) { + db_printf("\n"); + addr = (db_expr_t) th; + thcount++; + goto next_thread; + + } + } +} diff --git a/osfmk/ppc/endian.h b/osfmk/ppc/endian.h new file mode 100644 index 000000000..5ff26263e --- /dev/null +++ b/osfmk/ppc/endian.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _MACHINE_ENDIAN_H_ +#define _MACHINE_ENDIAN_H_ + +/* + * Definitions for byte order, + * according to byte significance from low address to high. + */ +#define LITTLE_ENDIAN 1234 /* least-significant byte first (vax) */ +#define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ +#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp) */ + +#if _BIG_ENDIAN /* Predefined by compiler */ +#define BYTE_ORDER BIG_ENDIAN /* byte order we use on ppc */ +#define ENDIAN BIG +#else +#error code has not been ported to little endian targets yet +#endif + +/* + * Macros for network/external number representation conversion. + */ +#if BYTE_ORDER == BIG_ENDIAN && !defined(lint) +#define ntohl(x) (x) +#define ntohs(x) (x) +#define htonl(x) (x) +#define htons(x) (x) + +static __inline__ unsigned int byte_reverse_word(unsigned int word); +static __inline__ unsigned int byte_reverse_word(unsigned int word) { + unsigned int result; + __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (&word)); + return result; +} + +/* The above function is commutative, so we can use it for + * translations in both directions (to/from little endianness) + * Note that htolx and ltohx are probably identical, they are + * included for completeness. + */ +#define htoll(x) byte_reverse_word(x) +#define htols(x) (byte_reverse_word(x) >> 16) +#define ltohl(x) htoll(x) +#define ltohs(x) htols(x) + +#define htobl(x) (x) +#define htobs(x) (x) +#define btohl(x) (x) +#define btohs(x) (x) + +#else +unsigned short ntohs(), htons(); +unsigned long ntohl(), htonl(); +#endif + +/* This defines the order of elements in a bitfield, + * it is principally used by the SCSI subsystem in + * the definitions of mapped registers + */ +#define BYTE_MSF 1 + +#endif /* _MACHINE_ENDIAN_H_ */ diff --git a/osfmk/ppc/exception.h b/osfmk/ppc/exception.h new file mode 100644 index 000000000..4db360ba0 --- /dev/null +++ b/osfmk/ppc/exception.h @@ -0,0 +1,642 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* Miscellaneous constants and structures used by the exception + * handlers + */ + +#ifndef _PPC_EXCEPTION_H_ +#define _PPC_EXCEPTION_H_ + +#ifndef ASSEMBLER + +#include +#include +#include + +#include +#include +#include +#include +#include + +/* Per processor CPU features */ +struct procFeatures { + unsigned int Available; +#define pfFloat 0x80000000 +#define pfFloatb 0 +#define pfAltivec 0x40000000 +#define pfAltivecb 1 +#define pfAvJava 0x20000000 +#define pfAvJavab 2 +#define pfSMPcap 0x10000000 +#define pfSMPcapb 3 +#define pfCanSleep 0x08000000 +#define pfCanSleepb 4 +#define pfCanNap 0x04000000 +#define pfCanNapb 5 +#define pfCanDoze 0x02000000 +#define pfCanDozeb 6 +#define pfThermal 0x01000000 +#define pfThermalb 7 +#define pfThermInt 0x00800000 +#define pfThermIntb 8 +#define pfL23lck 0x00001000 +#define pfL23lckb 19 +#define pfWillNap 0x00000800 +#define pfWillNapb 20 +#define pfNoMSRir 0x00000400 +#define pfNoMSRirb 21 +#define pfL1nnc 0x00000200 +#define pfL1nncb 22 +#define pfL1i 0x00000100 +#define pfL1ib 23 +#define pfL1d 0x00000080 +#define pfL1db 24 +#define pfL1fa 0x00000040 +#define pfL1fab 25 +#define pfL2 0x00000020 +#define pfL2b 26 +#define pfL2fa 0x00000010 +#define pfL2fab 27 +#define pfL2i 0x00000008 +#define pfL2ib 28 +#define pfL3 0x00000004 +#define pfL3b 29 +#define pfL3fa 0x00000002 +#define pfL3fab 30 +#define pfValid 0x00000001 +#define pfValidb 31 + unsigned short rptdProc; + unsigned short lineSize; + unsigned int l1iSize; + unsigned int l1dSize; + unsigned int l2cr; + unsigned int l2Size; + unsigned int l3cr; + unsigned int l3Size; + unsigned int pfHID0; + unsigned int pfHID1; + unsigned int pfHID2; + unsigned int pfHID3; + unsigned int pfMSSCR0; + unsigned int pfMSSCR1; + unsigned int pfICTRL; + unsigned int pfLDSTCR; +}; + +typedef struct procFeatures procFeatures; + +struct thrmControl { + unsigned int maxTemp; /* Maximum temprature before damage */ + unsigned int throttleTemp; /* Temprature at which to throttle down */ + unsigned int lowTemp; /* Interrupt when temprature drops below */ + unsigned int highTemp; /* Interrupt when temprature exceeds this */ + unsigned int thrm3val; /* Value for thrm3 register */ + unsigned int rsvd[3]; /* Pad to cache line */ +}; + +typedef struct thrmControl thrmControl; + +/* When an exception is taken, this info is accessed via sprg0 */ +/* We should always have this one on a cache line boundary */ +struct per_proc_info { + unsigned short cpu_number; + unsigned short cpu_flags; /* Various low-level flags */ + vm_offset_t istackptr; + vm_offset_t intstack_top_ss; + +#if MACH_KDP || MACH_KDB + vm_offset_t debstackptr; + vm_offset_t debstack_top_ss; +#else + unsigned int ppigas1[2]; /* Take up some space if no KDP or KDB */ +#endif + + unsigned int tempwork1; /* Temp work area - monitor use carefully */ + unsigned int save_exception_type; + unsigned int old_thread; + + /* PPC cache line boundary here - 020 */ + + unsigned int active_kloaded; /* pointer to active_kloaded[CPU_NO] */ + unsigned int cpu_data; /* pointer to cpu_data[CPU_NO] */ + unsigned int need_ast; /* pointer to need_ast[CPU_NO] */ +/* + * Note: the following two pairs of words need to stay in order and each pair must + * be in the same reservation (line) granule + */ + unsigned int FPU_thread; /* Thread owning the FPU on this cpu.*/ + unsigned int FPU_vmmCtx; /* Owing virtual machine context */ + unsigned int VMX_thread; /* Thread owning the VMX on this cpu */ + unsigned int VMX_vmmCtx; /* Owing virtual machine context */ + unsigned int active_stacks; /* pointer to active_stacks[CPU_NO] */ + + /* PPC cache line boundary here - 040 */ + unsigned int quickfret; /* Pointer to savearea for exception exit to free */ + unsigned int Lastpmap; /* Last user pmap loaded */ + unsigned int userspace; /* Last loaded user memory space ID */ + unsigned int userpmap; /* User pmap - real address */ + unsigned int liveVRSave; /* VRSave assiciated with live vector registers */ + unsigned int spcFlags; /* Special thread flags */ + unsigned int liveFPSCR; /* FPSCR which is for the live context */ + unsigned int ppigas05C; /* Reserved area */ + + /* PPC cache line boundary here - 060 */ + boolean_t (*set_interrupts_enabled)(boolean_t); + boolean_t (*get_interrupts_enabled)(void); + IOInterruptHandler interrupt_handler; + void * interrupt_nub; + unsigned int interrupt_source; + void * interrupt_target; + void * interrupt_refCon; + unsigned int savedSave; /* Savearea saved across sleep - must be 0 at boot */ + + /* PPC cache line boundary here - 080 */ + unsigned int MPsigpStat; /* Signal Processor status (interlocked update for this one) */ +#define MPsigpMsgp 0xC0000000 /* Message pending (busy + pass) */ +#define MPsigpBusy 0x80000000 /* Processor area busy, i.e., locked */ +#define MPsigpPass 0x40000000 /* Busy lock passed to receiving processor */ +#define MPsigpSrc 0x000000FF /* Processor that owns busy, i.e., the ID of */ + /* whomever set busy. When a busy is passed, */ + /* this is the requestor of the function. */ +#define MPsigpFunc 0x0000FF00 /* Current function */ +#define MPsigpIdle 0x00 /* No function pending */ +#define MPsigpSigp 0x04 /* Signal a processor */ +#define SIGPast 0 /* Requests an ast on target processor */ +#define SIGPcpureq 1 /* Requests CPU specific function */ +#define SIGPdebug 2 /* Requests a debugger entry */ +#define SIGPwake 3 /* Wake up a sleeping processor */ +#define CPRQtemp 0 /* Get temprature of processor */ +#define CPRQtimebase 1 /* Get timebase of processor */ + unsigned int MPsigpParm0; /* SIGP parm 0 */ + unsigned int MPsigpParm1; /* SIGP parm 1 */ + unsigned int MPsigpParm2; /* SIGP parm 2 */ + cpu_id_t cpu_id; + vm_offset_t start_paddr; + unsigned int ruptStamp[2]; /* Timebase at last interruption */ + + /* PPC cache line boundary here - 0A0 */ + procFeatures pf; /* Processor features */ + + /* PPC cache line boundary here - 0E0 */ + thrmControl thrm; /* Thermal controls */ + + /* PPC cache line boundary here - 100 */ + unsigned int napStamp[2]; /* Time base when we napped */ + unsigned int napTotal[2]; /* Total nap time in ticks */ + unsigned int numSIGPast; /* Number of SIGP asts recieved */ + unsigned int numSIGPcpureq; /* Number of SIGP cpu requests recieved */ + unsigned int numSIGPdebug; /* Number of SIGP debugs recieved */ + unsigned int numSIGPwake; /* Number of SIGP wakes recieved */ + + /* PPC cache line boundary here - 120 */ + unsigned int spcTRc; /* Special trace count */ + unsigned int spcTRp; /* Special trace buffer pointer */ + unsigned int Uassist; /* User Assist Word */ + unsigned int rsrvd12C[5]; /* Reserved slots */ + + /* PPC cache line boundary here - 140 */ + time_base_enable_t time_base_enable; + unsigned int rsrvd140[7]; /* Reserved slots */ + + /* PPC cache line boundary here - 160 */ + unsigned int rsrvd160[8]; /* Reserved slots */ + + /* PPC cache line boundary here - 180 */ + unsigned int rsrvd180[8]; /* Reserved slots */ + + /* PPC cache line boundary here - 1A0 */ + unsigned int rsrvd1A0[8]; /* Reserved slots */ + + /* PPC cache line boundary here - 1C0 */ + unsigned int rsrvd1C0[8]; /* Reserved slots */ + + /* PPC cache line boundary here - 1E0 */ + double emfp0; /* Copies of floating point registers */ + double emfp1; /* Used for emulation purposes */ + double emfp2; + double emfp3; + + double emfp4; + double emfp5; + double emfp6; + double emfp7; + + double emfp8; + double emfp9; + double emfp10; + double emfp11; + + double emfp12; + double emfp13; + double emfp14; + double emfp15; + + double emfp16; + double emfp17; + double emfp18; + double emfp19; + + double emfp20; + double emfp21; + double emfp22; + double emfp23; + + double emfp24; + double emfp25; + double emfp26; + double emfp27; + + double emfp28; + double emfp29; + double emfp30; + double emfp31; + +/* - 2E0 */ + unsigned int emfpscr_pad; + unsigned int emfpscr; + unsigned int empadfp[6]; + +/* - 300 */ + unsigned int emvr0[4]; /* Copies of vector registers used both */ + unsigned int emvr1[4]; /* for full vector emulation or */ + unsigned int emvr2[4]; /* as saveareas while assisting denorms */ + unsigned int emvr3[4]; + unsigned int emvr4[4]; + unsigned int emvr5[4]; + unsigned int emvr6[4]; + unsigned int emvr7[4]; + unsigned int emvr8[4]; + unsigned int emvr9[4]; + unsigned int emvr10[4]; + unsigned int emvr11[4]; + unsigned int emvr12[4]; + unsigned int emvr13[4]; + unsigned int emvr14[4]; + unsigned int emvr15[4]; + unsigned int emvr16[4]; + unsigned int emvr17[4]; + unsigned int emvr18[4]; + unsigned int emvr19[4]; + unsigned int emvr20[4]; + unsigned int emvr21[4]; + unsigned int emvr22[4]; + unsigned int emvr23[4]; + unsigned int emvr24[4]; + unsigned int emvr25[4]; + unsigned int emvr26[4]; + unsigned int emvr27[4]; + unsigned int emvr28[4]; + unsigned int emvr29[4]; + unsigned int emvr30[4]; + unsigned int emvr31[4]; + unsigned int emvscr[4]; + unsigned int empadvr[4]; +/* - 520 */ + + unsigned int patcharea[56]; +/* - 600 */ + +}; + + +extern struct per_proc_info per_proc_info[NCPUS]; + +typedef struct savearea { + +/* The following area corresponds to ppc_saved_state and ppc_thread_state */ + +/* offset 0x0000 */ + unsigned int save_srr0; + unsigned int save_srr1; + unsigned int save_r0; + unsigned int save_r1; + unsigned int save_r2; + unsigned int save_r3; + unsigned int save_r4; + unsigned int save_r5; + + unsigned int save_r6; + unsigned int save_r7; + unsigned int save_r8; + unsigned int save_r9; + unsigned int save_r10; + unsigned int save_r11; + unsigned int save_r12; + unsigned int save_r13; + + unsigned int save_r14; + unsigned int save_r15; + unsigned int save_r16; + unsigned int save_r17; + unsigned int save_r18; + unsigned int save_r19; + unsigned int save_r20; + unsigned int save_r21; + + unsigned int save_r22; + unsigned int save_r23; + unsigned int save_r24; + unsigned int save_r25; + unsigned int save_r26; + unsigned int save_r27; + unsigned int save_r28; + unsigned int save_r29; + + unsigned int save_r30; + unsigned int save_r31; + unsigned int save_cr; + unsigned int save_xer; + unsigned int save_lr; + unsigned int save_ctr; + unsigned int save_mq; + unsigned int save_vrsave; + + unsigned int save_sr_copyin; + unsigned int save_space; + unsigned int save_xfpscrpad; + unsigned int save_xfpscr; + unsigned int save_pad2[4]; + + +/* The following corresponds to ppc_exception_state */ + +/* offset 0x00C0 */ + unsigned int save_dar; + unsigned int save_dsisr; + unsigned int save_exception; + unsigned int save_pad3[5]; + +/* The following corresponds to ppc_float_state */ + +/* offset 0x00E0 */ + double save_fp0; + double save_fp1; + double save_fp2; + double save_fp3; + + double save_fp4; + double save_fp5; + double save_fp6; + double save_fp7; + + double save_fp8; + double save_fp9; + double save_fp10; + double save_fp11; + + double save_fp12; + double save_fp13; + double save_fp14; + double save_fp15; + + double save_fp16; + double save_fp17; + double save_fp18; + double save_fp19; + + double save_fp20; + double save_fp21; + double save_fp22; + double save_fp23; + + double save_fp24; + double save_fp25; + double save_fp26; + double save_fp27; + + double save_fp28; + double save_fp29; + double save_fp30; + double save_fp31; + + unsigned int save_fpscr_pad; + unsigned int save_fpscr; + unsigned int save_pad4[6]; + +/* The following is the save area for the VMX registers */ + +/* offset 0x0200 */ + unsigned int save_vr0[4]; + unsigned int save_vr1[4]; + unsigned int save_vr2[4]; + unsigned int save_vr3[4]; + unsigned int save_vr4[4]; + unsigned int save_vr5[4]; + unsigned int save_vr6[4]; + unsigned int save_vr7[4]; + unsigned int save_vr8[4]; + unsigned int save_vr9[4]; + unsigned int save_vr10[4]; + unsigned int save_vr11[4]; + unsigned int save_vr12[4]; + unsigned int save_vr13[4]; + unsigned int save_vr14[4]; + unsigned int save_vr15[4]; + unsigned int save_vr16[4]; + unsigned int save_vr17[4]; + unsigned int save_vr18[4]; + unsigned int save_vr19[4]; + unsigned int save_vr20[4]; + unsigned int save_vr21[4]; + unsigned int save_vr22[4]; + unsigned int save_vr23[4]; + unsigned int save_vr24[4]; + unsigned int save_vr25[4]; + unsigned int save_vr26[4]; + unsigned int save_vr27[4]; + unsigned int save_vr28[4]; + unsigned int save_vr29[4]; + unsigned int save_vr30[4]; + unsigned int save_vr31[4]; + unsigned int save_vscr[4]; /* Note that this is always valid if VMX has been used */ + unsigned int save_pad5[4]; /* Insures that vrvalid is on a cache line */ + unsigned int save_vrvalid; /* VRs that have been saved */ + unsigned int save_pad6[7]; + +/* The following is the save area for the segment registers */ + +/* offset 0x0440 */ + + unsigned int save_sr0; + unsigned int save_sr1; + unsigned int save_sr2; + unsigned int save_sr3; + unsigned int save_sr4; + unsigned int save_sr5; + unsigned int save_sr6; + unsigned int save_sr7; + + unsigned int save_sr8; + unsigned int save_sr9; + unsigned int save_sr10; + unsigned int save_sr11; + unsigned int save_sr12; + unsigned int save_sr13; + unsigned int save_sr14; + unsigned int save_sr15; + +/* The following are the control area for this save area */ + +/* offset 0x0480 */ + + struct savearea *save_prev; /* The address of the previous normal savearea */ + struct savearea *save_prev_float; /* The address of the previous floating point savearea */ + struct savearea *save_prev_vector; /* The address of the previous vector savearea */ + struct savearea *save_qfret; /* The "quick release" chain */ + struct savearea *save_phys; /* The physical address of this savearea */ + struct thread_activation *save_act; /* Pointer to the associated activation */ + unsigned int save_flags; /* Various flags */ +#define save_perm 0x80000000 /* Permanent area, cannot be released */ + unsigned int save_level_fp; /* Level that floating point state belongs to */ + unsigned int save_level_vec; /* Level that vector state belongs to */ + +} savearea; + +typedef struct savectl { /* Savearea control */ + + unsigned int *sac_next; /* Points to next savearea page that has a free slot - real */ + unsigned int sac_vrswap; /* XOR mask to swap V to R or vice versa */ + unsigned int sac_alloc; /* Bitmap of allocated slots */ + unsigned int sac_flags; /* Various flags */ +} savectl; + +struct Saveanchor { + unsigned int savelock; /* Lock word for savearea manipulation */ + int savecount; /* The total number of save areas allocated */ + int saveinuse; /* Number of areas in use */ + int savemin; /* We abend if lower than this */ + int saveneghyst; /* The negative hysteresis value */ + int savetarget; /* The target point for free save areas */ + int saveposhyst; /* The positive hysteresis value */ + unsigned int savefree; /* Anchor for the freelist queue */ + /* Cache line (32-byte) boundary */ + int savextnd; /* Free list extention count */ + int saveneed; /* Number of savearea's needed. So far, we assume we need 3 per activation */ + int savemaxcount; + int savespare[5]; /* Spare */ +}; + + +extern char *trap_type[]; + +#endif /* ndef ASSEMBLER */ + +#define sac_empty 0xC0000000 /* Mask with all entries empty */ +#define sac_cnt 2 /* Number of entries per page */ +#define sac_busy 0x80000000 /* This page is busy - used during initial allocation */ +#define sac_perm 0x40000000 /* Page permanently assigned */ + +#define SAVattach 0x80000000 /* Savearea is attached to a thread */ +#define SAVfpuvalid 0x40000000 /* Savearea contains FPU context */ +#define SAVvmxvalid 0x20000000 /* Savearea contains VMX context */ +#define SAVinuse 0xE0000000 /* Save area is inuse */ +#define SAVrststk 0x00010000 /* Indicates that the current stack should be reset to empty */ +#define SAVsyscall 0x00020000 /* Indicates that the savearea is associated with a syscall */ +#define SAVredrive 0x00040000 /* Indicates that the low-level fault handler associated */ + /* with this savearea should be redriven */ + +/* cpu_flags defs */ +#define SIGPactive 0x8000 +#define needSRload 0x4000 +#define turnEEon 0x2000 +#define traceBE 0x1000 /* user mode BE tracing in enabled */ +#define traceBEb 3 /* bit number for traceBE */ +#define BootDone 0x0100 +#define loadMSR 0x7FF4 + +#define T_VECTOR_SIZE 4 /* function pointer size */ +#define InitialSaveMin 4 /* The initial value for the minimum number of saveareas */ +#define InitialNegHysteresis 5 /* The number off from target before we adjust upwards */ +#define InitialPosHysteresis 10 /* The number off from target before we adjust downwards */ +#define InitialSaveTarget 20 /* The number of saveareas for an initial target */ +#define InitialSaveAreas 20 /* The number of saveareas to allocate at boot */ +#define InitialSaveBloks (InitialSaveAreas+sac_cnt-1)/sac_cnt /* The number of savearea blocks to allocate at boot */ + +/* Hardware exceptions */ + +#define T_IN_VAIN (0x00 * T_VECTOR_SIZE) +#define T_RESET (0x01 * T_VECTOR_SIZE) +#define T_MACHINE_CHECK (0x02 * T_VECTOR_SIZE) +#define T_DATA_ACCESS (0x03 * T_VECTOR_SIZE) +#define T_INSTRUCTION_ACCESS (0x04 * T_VECTOR_SIZE) +#define T_INTERRUPT (0x05 * T_VECTOR_SIZE) +#define T_ALIGNMENT (0x06 * T_VECTOR_SIZE) +#define T_PROGRAM (0x07 * T_VECTOR_SIZE) +#define T_FP_UNAVAILABLE (0x08 * T_VECTOR_SIZE) +#define T_DECREMENTER (0x09 * T_VECTOR_SIZE) +#define T_IO_ERROR (0x0a * T_VECTOR_SIZE) +#define T_RESERVED (0x0b * T_VECTOR_SIZE) +#define T_SYSTEM_CALL (0x0c * T_VECTOR_SIZE) +#define T_TRACE (0x0d * T_VECTOR_SIZE) +#define T_FP_ASSIST (0x0e * T_VECTOR_SIZE) +#define T_PERF_MON (0x0f * T_VECTOR_SIZE) +#define T_VMX (0x10 * T_VECTOR_SIZE) +#define T_INVALID_EXCP0 (0x11 * T_VECTOR_SIZE) +#define T_INVALID_EXCP1 (0x12 * T_VECTOR_SIZE) +#define T_INVALID_EXCP2 (0x13 * T_VECTOR_SIZE) +#define T_INSTRUCTION_BKPT (0x14 * T_VECTOR_SIZE) +#define T_SYSTEM_MANAGEMENT (0x15 * T_VECTOR_SIZE) +#define T_ALTIVEC_ASSIST (0x16 * T_VECTOR_SIZE) +#define T_THERMAL (0x17 * T_VECTOR_SIZE) +#define T_INVALID_EXCP5 (0x18 * T_VECTOR_SIZE) +#define T_INVALID_EXCP6 (0x19 * T_VECTOR_SIZE) +#define T_INVALID_EXCP7 (0x1A * T_VECTOR_SIZE) +#define T_INVALID_EXCP8 (0x1B * T_VECTOR_SIZE) +#define T_INVALID_EXCP9 (0x1C * T_VECTOR_SIZE) +#define T_INVALID_EXCP10 (0x1D * T_VECTOR_SIZE) +#define T_INVALID_EXCP11 (0x1E * T_VECTOR_SIZE) +#define T_INVALID_EXCP12 (0x1F * T_VECTOR_SIZE) +#define T_INVALID_EXCP13 (0x20 * T_VECTOR_SIZE) + +#define T_RUNMODE_TRACE (0x21 * T_VECTOR_SIZE) /* 601 only */ + +#define T_SIGP (0x22 * T_VECTOR_SIZE) +#define T_PREEMPT (0x23 * T_VECTOR_SIZE) +#define T_CSWITCH (0x24 * T_VECTOR_SIZE) +#define T_SHUTDOWN (0x25 * T_VECTOR_SIZE) + +#define T_AST (0x100 * T_VECTOR_SIZE) +#define T_MAX T_SHUTDOWN /* Maximum exception no */ + +#define EXCEPTION_VECTOR(exception) (exception * 0x100 /T_VECTOR_SIZE ) + +#ifndef ASSEMBLER + +typedef struct resethandler { + unsigned int type; + vm_offset_t call_paddr; + vm_offset_t arg__paddr; +} resethandler_t; + +extern resethandler_t ResetHandler; + +#endif + +#define RESET_HANDLER_NULL 0x0 +#define RESET_HANDLER_START 0x1 + +#endif /* _PPC_EXCEPTION_H_ */ diff --git a/osfmk/ppc/fpu_protos.h b/osfmk/ppc/fpu_protos.h new file mode 100644 index 000000000..ac5ba8775 --- /dev/null +++ b/osfmk/ppc/fpu_protos.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _PPC_FPU_PROTOS_H_ +#define _PPC_FPU_PROTOS_H_ + +#include + +extern void fpu_save(void); +extern void fpu_disable(void); + +#endif /* _PPC_FPU_PROTOS_H_ */ diff --git a/osfmk/ppc/genassym.c b/osfmk/ppc/genassym.c new file mode 100644 index 000000000..cdf366c63 --- /dev/null +++ b/osfmk/ppc/genassym.c @@ -0,0 +1,939 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +/* + * genassym.c is used to produce an + * assembly file which, intermingled with unuseful assembly code, + * has all the necessary definitions emitted. This assembly file is + * then postprocessed with sed to extract only these definitions + * and thus the final assyms.s is created. + * + * This convoluted means is necessary since the structure alignment + * and packing may be different between the host machine and the + * target so we are forced into using the cross compiler to generate + * the values, but we cannot run anything on the target machine. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE)0)->MEMBER) + +#define DECLARE(SYM,VAL) \ + __asm("#DEFINITION##define\t" SYM "\t%0" : : "n" ((u_int)(VAL))) + +int main(int argc, char *argv[]) +{ + /* Process Control Block */ + + DECLARE("PCB_FLOAT_STATE", offsetof(struct pcb *, fs)); + + /* Floating point state */ + + DECLARE("PCB_FS_F0", offsetof(struct pcb *, fs.fpregs[0])); + DECLARE("PCB_FS_F1", offsetof(struct pcb *, fs.fpregs[1])); + DECLARE("PCB_FS_F2", offsetof(struct pcb *, fs.fpregs[2])); + DECLARE("PCB_FS_F3", offsetof(struct pcb *, fs.fpregs[3])); + DECLARE("PCB_FS_F4", offsetof(struct pcb *, fs.fpregs[4])); + DECLARE("PCB_FS_F5", offsetof(struct pcb *, fs.fpregs[5])); + DECLARE("PCB_FS_F6", offsetof(struct pcb *, fs.fpregs[6])); + DECLARE("PCB_FS_F7", offsetof(struct pcb *, fs.fpregs[7])); + DECLARE("PCB_FS_F8", offsetof(struct pcb *, fs.fpregs[8])); + DECLARE("PCB_FS_F9", offsetof(struct pcb *, fs.fpregs[9])); + DECLARE("PCB_FS_F10", offsetof(struct pcb *, fs.fpregs[10])); + DECLARE("PCB_FS_F11", offsetof(struct pcb *, fs.fpregs[11])); + DECLARE("PCB_FS_F12", offsetof(struct pcb *, fs.fpregs[12])); + DECLARE("PCB_FS_F13", offsetof(struct pcb *, fs.fpregs[13])); + DECLARE("PCB_FS_F14", offsetof(struct pcb *, fs.fpregs[14])); + DECLARE("PCB_FS_F15", offsetof(struct pcb *, fs.fpregs[15])); + DECLARE("PCB_FS_F16", offsetof(struct pcb *, fs.fpregs[16])); + DECLARE("PCB_FS_F17", offsetof(struct pcb *, fs.fpregs[17])); + DECLARE("PCB_FS_F18", offsetof(struct pcb *, fs.fpregs[18])); + DECLARE("PCB_FS_F19", offsetof(struct pcb *, fs.fpregs[19])); + DECLARE("PCB_FS_F20", offsetof(struct pcb *, fs.fpregs[20])); + DECLARE("PCB_FS_F21", offsetof(struct pcb *, fs.fpregs[21])); + DECLARE("PCB_FS_F22", offsetof(struct pcb *, fs.fpregs[22])); + DECLARE("PCB_FS_F23", offsetof(struct pcb *, fs.fpregs[23])); + DECLARE("PCB_FS_F24", offsetof(struct pcb *, fs.fpregs[24])); + DECLARE("PCB_FS_F25", offsetof(struct pcb *, fs.fpregs[25])); + DECLARE("PCB_FS_F26", offsetof(struct pcb *, fs.fpregs[26])); + DECLARE("PCB_FS_F27", offsetof(struct pcb *, fs.fpregs[27])); + DECLARE("PCB_FS_F28", offsetof(struct pcb *, fs.fpregs[28])); + DECLARE("PCB_FS_F29", offsetof(struct pcb *, fs.fpregs[29])); + DECLARE("PCB_FS_F30", offsetof(struct pcb *, fs.fpregs[30])); + DECLARE("PCB_FS_F31", offsetof(struct pcb *, fs.fpregs[31])); + DECLARE("PCB_FS_FPSCR", offsetof(struct pcb *, fs.fpscr_pad)); + + DECLARE("PCB_SAVED_STATE",offsetof(struct pcb *, ss)); + DECLARE("ACT_MACT_KSP", offsetof(struct thread_activation *, mact.ksp)); + DECLARE("ACT_MACT_BEDA", offsetof(struct thread_activation *, mact.bbDescAddr)); + DECLARE("ACT_MACT_BTS", offsetof(struct thread_activation *, mact.bbTableStart)); + DECLARE("ACT_MACT_BTE", offsetof(struct thread_activation *, mact.bbTaskEnv)); + DECLARE("ACT_MACT_SPF", offsetof(struct thread_activation *, mact.specFlags)); + DECLARE("qactTimer", offsetof(struct thread_activation *, mact.qactTimer)); + DECLARE("floatUsed", floatUsed); + DECLARE("vectorUsed", vectorUsed); + DECLARE("bbNoMachSCbit",bbNoMachSCbit); + DECLARE("runningVM", runningVM); + DECLARE("floatCng", floatCng); + DECLARE("vectorCng", vectorCng); + DECLARE("floatCngbit", floatCngbit); + DECLARE("vectorCngbit", vectorCngbit); + + DECLARE("PCB_SIZE", sizeof(struct pcb)); + + /* Save State Structure */ + DECLARE("SS_R0", offsetof(struct ppc_saved_state *, r0)); + DECLARE("SS_R1", offsetof(struct ppc_saved_state *, r1)); + DECLARE("SS_R2", offsetof(struct ppc_saved_state *, r2)); + DECLARE("SS_R3", offsetof(struct ppc_saved_state *, r3)); + DECLARE("SS_R4", offsetof(struct ppc_saved_state *, r4)); + DECLARE("SS_R5", offsetof(struct ppc_saved_state *, r5)); + DECLARE("SS_R6", offsetof(struct ppc_saved_state *, r6)); + DECLARE("SS_R7", offsetof(struct ppc_saved_state *, r7)); + DECLARE("SS_R8", offsetof(struct ppc_saved_state *, r8)); + DECLARE("SS_R9", offsetof(struct ppc_saved_state *, r9)); + DECLARE("SS_R10", offsetof(struct ppc_saved_state *, r10)); + DECLARE("SS_R11", offsetof(struct ppc_saved_state *, r11)); + DECLARE("SS_R12", offsetof(struct ppc_saved_state *, r12)); + DECLARE("SS_R13", offsetof(struct ppc_saved_state *, r13)); + DECLARE("SS_R14", offsetof(struct ppc_saved_state *, r14)); + DECLARE("SS_R15", offsetof(struct ppc_saved_state *, r15)); + DECLARE("SS_R16", offsetof(struct ppc_saved_state *, r16)); + DECLARE("SS_R17", offsetof(struct ppc_saved_state *, r17)); + DECLARE("SS_R18", offsetof(struct ppc_saved_state *, r18)); + DECLARE("SS_R19", offsetof(struct ppc_saved_state *, r19)); + DECLARE("SS_R20", offsetof(struct ppc_saved_state *, r20)); + DECLARE("SS_R21", offsetof(struct ppc_saved_state *, r21)); + DECLARE("SS_R22", offsetof(struct ppc_saved_state *, r22)); + DECLARE("SS_R23", offsetof(struct ppc_saved_state *, r23)); + DECLARE("SS_R24", offsetof(struct ppc_saved_state *, r24)); + DECLARE("SS_R25", offsetof(struct ppc_saved_state *, r25)); + DECLARE("SS_R26", offsetof(struct ppc_saved_state *, r26)); + DECLARE("SS_R27", offsetof(struct ppc_saved_state *, r27)); + DECLARE("SS_R28", offsetof(struct ppc_saved_state *, r28)); + DECLARE("SS_R29", offsetof(struct ppc_saved_state *, r29)); + DECLARE("SS_R30", offsetof(struct ppc_saved_state *, r30)); + DECLARE("SS_R31", offsetof(struct ppc_saved_state *, r31)); + DECLARE("SS_CR", offsetof(struct ppc_saved_state *, cr)); + DECLARE("SS_XER", offsetof(struct ppc_saved_state *, xer)); + DECLARE("SS_LR", offsetof(struct ppc_saved_state *, lr)); + DECLARE("SS_CTR", offsetof(struct ppc_saved_state *, ctr)); + DECLARE("SS_SRR0", offsetof(struct ppc_saved_state *, srr0)); + DECLARE("SS_SRR1", offsetof(struct ppc_saved_state *, srr1)); + DECLARE("SS_MQ", offsetof(struct ppc_saved_state *, mq)); + DECLARE("SS_SR_COPYIN", offsetof(struct ppc_saved_state *, sr_copyin)); + DECLARE("SS_SIZE", sizeof(struct ppc_saved_state)); + + /* Per Proc info structure */ + DECLARE("PP_CPU_NUMBER", offsetof(struct per_proc_info *, cpu_number)); + DECLARE("PP_CPU_FLAGS", offsetof(struct per_proc_info *, cpu_flags)); + DECLARE("PP_ISTACKPTR", offsetof(struct per_proc_info *, istackptr)); + DECLARE("PP_INTSTACK_TOP_SS", offsetof(struct per_proc_info *, intstack_top_ss)); +#if MACH_KDP || MACH_KDB + DECLARE("PP_DEBSTACKPTR", offsetof(struct per_proc_info *, debstackptr)); + DECLARE("PP_DEBSTACK_TOP_SS", offsetof(struct per_proc_info *, debstack_top_ss)); +#endif + DECLARE("PP_TEMPWORK1", offsetof(struct per_proc_info *, tempwork1)); + DECLARE("PP_USERSPACE", offsetof(struct per_proc_info *, userspace)); + DECLARE("PP_USERPMAP", offsetof(struct per_proc_info *, userpmap)); + DECLARE("PP_LASTPMAP", offsetof(struct per_proc_info *, Lastpmap)); + DECLARE("savedSave", offsetof(struct per_proc_info *, savedSave)); + + DECLARE("PP_SAVE_EXCEPTION_TYPE", offsetof(struct per_proc_info *, save_exception_type)); + DECLARE("PP_CPU_DATA", offsetof(struct per_proc_info *, cpu_data)); + DECLARE("PP_ACTIVE_KLOADED", offsetof(struct per_proc_info *, active_kloaded)); + DECLARE("PP_ACTIVE_STACKS", offsetof(struct per_proc_info *, active_stacks)); + DECLARE("PP_NEED_AST", offsetof(struct per_proc_info *, need_ast)); + DECLARE("PP_FPU_THREAD", offsetof(struct per_proc_info *, FPU_thread)); + DECLARE("FPU_vmmCtx", offsetof(struct per_proc_info *, FPU_vmmCtx)); + DECLARE("PP_VMX_THREAD", offsetof(struct per_proc_info *, VMX_thread)); + DECLARE("VMX_vmmCtx", offsetof(struct per_proc_info *, VMX_vmmCtx)); + DECLARE("PP_QUICKFRET", offsetof(struct per_proc_info *, quickfret)); + DECLARE("UAW", offsetof(struct per_proc_info *, Uassist)); + DECLARE("liveVRS", offsetof(struct per_proc_info *, liveVRSave)); + DECLARE("liveFPSCR", offsetof(struct per_proc_info *, liveFPSCR)); + DECLARE("spcFlags", offsetof(struct per_proc_info *, spcFlags)); + DECLARE("spcTRc", offsetof(struct per_proc_info *, spcTRc)); + DECLARE("spcTRp", offsetof(struct per_proc_info *, spcTRp)); + DECLARE("ruptStamp", offsetof(struct per_proc_info *, ruptStamp)); + DECLARE("pfAvailable", offsetof(struct per_proc_info *, pf.Available)); + DECLARE("pfFloat", pfFloat); + DECLARE("pfFloatb", pfFloatb); + DECLARE("pfAltivec", pfAltivec); + DECLARE("pfAltivecb", pfAltivecb); + DECLARE("pfAvJava", pfAvJava); + DECLARE("pfAvJavab", pfAvJavab); + DECLARE("pfSMPcap", pfSMPcap); + DECLARE("pfSMPcapb", pfSMPcapb); + DECLARE("pfCanSleep", pfCanSleep); + DECLARE("pfCanSleepb", pfCanSleepb); + DECLARE("pfCanNap", pfCanNap); + DECLARE("pfCanNapb", pfCanNapb); + DECLARE("pfCanDoze", pfCanDoze); + DECLARE("pfCanDozeb", pfCanDozeb); + DECLARE("pfCanDoze", pfCanDoze); + DECLARE("pfCanDozeb", pfCanDozeb); + DECLARE("pfThermal", pfThermal); + DECLARE("pfThermalb", pfThermalb); + DECLARE("pfThermInt", pfThermInt); + DECLARE("pfThermIntb", pfThermIntb); + DECLARE("pfL23lck", pfL23lck); + DECLARE("pfL23lckb", pfL23lckb); + DECLARE("pfWillNap", pfWillNap); + DECLARE("pfWillNapb", pfWillNapb); + DECLARE("pfNoMSRir", pfNoMSRir); + DECLARE("pfNoMSRirb", pfNoMSRirb); + DECLARE("pfL1nnc", pfL1nnc); + DECLARE("pfL1nncb", pfL1nncb); + DECLARE("pfL1i", pfL1i); + DECLARE("pfL1ib", pfL1ib); + DECLARE("pfL1d", pfL1d); + DECLARE("pfL1db", pfL1db); + DECLARE("pfL1fa", pfL1fa); + DECLARE("pfL1fab", pfL1fab); + DECLARE("pfL2", pfL2); + DECLARE("pfL2b", pfL2b); + DECLARE("pfL2fa", pfL2fa); + DECLARE("pfL2fab", pfL2fab); + DECLARE("pfL2i", pfL2i); + DECLARE("pfL2ib", pfL2ib); + DECLARE("pfL3", pfL3); + DECLARE("pfL3b", pfL3b); + DECLARE("pfL3fa", pfL3fa); + DECLARE("pfL3fab", pfL3fab); + DECLARE("pfValid", pfValid); + DECLARE("pfValidb", pfValidb); + DECLARE("pfrptdProc", offsetof(struct per_proc_info *, pf.rptdProc)); + DECLARE("pflineSize", offsetof(struct per_proc_info *, pf.lineSize)); + DECLARE("pfl1iSize", offsetof(struct per_proc_info *, pf.l1iSize)); + DECLARE("pfl1dSize", offsetof(struct per_proc_info *, pf.l1dSize)); + DECLARE("pfl2cr", offsetof(struct per_proc_info *, pf.l2cr)); + DECLARE("pfl2Size", offsetof(struct per_proc_info *, pf.l2Size)); + DECLARE("pfl3cr", offsetof(struct per_proc_info *, pf.l3cr)); + DECLARE("pfl3Size", offsetof(struct per_proc_info *, pf.l3Size)); + DECLARE("pfHID0", offsetof(struct per_proc_info *, pf.pfHID0)); + DECLARE("pfHID1", offsetof(struct per_proc_info *, pf.pfHID1)); + DECLARE("pfHID2", offsetof(struct per_proc_info *, pf.pfHID2)); + DECLARE("pfHID3", offsetof(struct per_proc_info *, pf.pfHID3)); + DECLARE("pfMSSCR0", offsetof(struct per_proc_info *, pf.pfMSSCR0)); + DECLARE("pfMSSCR1", offsetof(struct per_proc_info *, pf.pfMSSCR1)); + DECLARE("pfICTRL", offsetof(struct per_proc_info *, pf.pfICTRL)); + DECLARE("pfLDSTCR", offsetof(struct per_proc_info *, pf.pfLDSTCR)); + DECLARE("pfSize", sizeof(procFeatures)); + + DECLARE("thrmmaxTemp", offsetof(struct per_proc_info *, thrm.maxTemp)); + DECLARE("thrmthrottleTemp", offsetof(struct per_proc_info *, thrm.throttleTemp)); + DECLARE("thrmlowTemp", offsetof(struct per_proc_info *, thrm.lowTemp)); + DECLARE("thrmhighTemp", offsetof(struct per_proc_info *, thrm.highTemp)); + DECLARE("thrm3val", offsetof(struct per_proc_info *, thrm.thrm3val)); + DECLARE("thrmSize", sizeof(thrmControl)); + + DECLARE("napStamp", offsetof(struct per_proc_info *, napStamp)); + DECLARE("napTotal", offsetof(struct per_proc_info *, napTotal)); + + DECLARE("emfp0", offsetof(struct per_proc_info *, emfp0)); + DECLARE("emfp1", offsetof(struct per_proc_info *, emfp1)); + DECLARE("emfp2", offsetof(struct per_proc_info *, emfp2)); + DECLARE("emfp3", offsetof(struct per_proc_info *, emfp3)); + DECLARE("emfp4", offsetof(struct per_proc_info *, emfp4)); + DECLARE("emfp5", offsetof(struct per_proc_info *, emfp5)); + DECLARE("emfp6", offsetof(struct per_proc_info *, emfp6)); + DECLARE("emfp7", offsetof(struct per_proc_info *, emfp7)); + DECLARE("emfp8", offsetof(struct per_proc_info *, emfp8)); + DECLARE("emfp9", offsetof(struct per_proc_info *, emfp9)); + DECLARE("emfp10", offsetof(struct per_proc_info *, emfp10)); + DECLARE("emfp11", offsetof(struct per_proc_info *, emfp11)); + DECLARE("emfp12", offsetof(struct per_proc_info *, emfp12)); + DECLARE("emfp13", offsetof(struct per_proc_info *, emfp13)); + DECLARE("emfp14", offsetof(struct per_proc_info *, emfp14)); + DECLARE("emfp15", offsetof(struct per_proc_info *, emfp15)); + DECLARE("emfp16", offsetof(struct per_proc_info *, emfp16)); + DECLARE("emfp17", offsetof(struct per_proc_info *, emfp17)); + DECLARE("emfp18", offsetof(struct per_proc_info *, emfp18)); + DECLARE("emfp19", offsetof(struct per_proc_info *, emfp19)); + DECLARE("emfp20", offsetof(struct per_proc_info *, emfp20)); + DECLARE("emfp21", offsetof(struct per_proc_info *, emfp21)); + DECLARE("emfp22", offsetof(struct per_proc_info *, emfp22)); + DECLARE("emfp23", offsetof(struct per_proc_info *, emfp23)); + DECLARE("emfp24", offsetof(struct per_proc_info *, emfp24)); + DECLARE("emfp25", offsetof(struct per_proc_info *, emfp25)); + DECLARE("emfp26", offsetof(struct per_proc_info *, emfp26)); + DECLARE("emfp27", offsetof(struct per_proc_info *, emfp27)); + DECLARE("emfp28", offsetof(struct per_proc_info *, emfp28)); + DECLARE("emfp29", offsetof(struct per_proc_info *, emfp29)); + DECLARE("emfp30", offsetof(struct per_proc_info *, emfp30)); + DECLARE("emfp31", offsetof(struct per_proc_info *, emfp31)); + DECLARE("emfpscr_pad", offsetof(struct per_proc_info *, emfpscr_pad)); + DECLARE("emfpscr", offsetof(struct per_proc_info *, emfpscr)); + + DECLARE("emvr0", offsetof(struct per_proc_info *, emvr0)); + DECLARE("emvr1", offsetof(struct per_proc_info *, emvr1)); + DECLARE("emvr2", offsetof(struct per_proc_info *, emvr2)); + DECLARE("emvr3", offsetof(struct per_proc_info *, emvr3)); + DECLARE("emvr4", offsetof(struct per_proc_info *, emvr4)); + DECLARE("emvr5", offsetof(struct per_proc_info *, emvr5)); + DECLARE("emvr6", offsetof(struct per_proc_info *, emvr6)); + DECLARE("emvr7", offsetof(struct per_proc_info *, emvr7)); + DECLARE("emvr8", offsetof(struct per_proc_info *, emvr8)); + DECLARE("emvr9", offsetof(struct per_proc_info *, emvr9)); + DECLARE("emvr10", offsetof(struct per_proc_info *, emvr10)); + DECLARE("emvr11", offsetof(struct per_proc_info *, emvr11)); + DECLARE("emvr12", offsetof(struct per_proc_info *, emvr12)); + DECLARE("emvr13", offsetof(struct per_proc_info *, emvr13)); + DECLARE("emvr14", offsetof(struct per_proc_info *, emvr14)); + DECLARE("emvr15", offsetof(struct per_proc_info *, emvr15)); + DECLARE("emvr16", offsetof(struct per_proc_info *, emvr16)); + DECLARE("emvr17", offsetof(struct per_proc_info *, emvr17)); + DECLARE("emvr18", offsetof(struct per_proc_info *, emvr18)); + DECLARE("emvr19", offsetof(struct per_proc_info *, emvr19)); + DECLARE("emvr20", offsetof(struct per_proc_info *, emvr20)); + DECLARE("emvr21", offsetof(struct per_proc_info *, emvr21)); + DECLARE("emvr22", offsetof(struct per_proc_info *, emvr22)); + DECLARE("emvr23", offsetof(struct per_proc_info *, emvr23)); + DECLARE("emvr24", offsetof(struct per_proc_info *, emvr24)); + DECLARE("emvr25", offsetof(struct per_proc_info *, emvr25)); + DECLARE("emvr26", offsetof(struct per_proc_info *, emvr26)); + DECLARE("emvr27", offsetof(struct per_proc_info *, emvr27)); + DECLARE("emvr28", offsetof(struct per_proc_info *, emvr28)); + DECLARE("emvr29", offsetof(struct per_proc_info *, emvr29)); + DECLARE("emvr30", offsetof(struct per_proc_info *, emvr30)); + DECLARE("emvr31", offsetof(struct per_proc_info *, emvr31)); + DECLARE("empadvr", offsetof(struct per_proc_info *, empadvr)); + DECLARE("ppSize", sizeof(struct per_proc_info)); + + DECLARE("RESETHANDLER_TYPE", offsetof(struct resethandler *, type)); + DECLARE("RESETHANDLER_CALL", offsetof(struct resethandler *, call_paddr)); + DECLARE("RESETHANDLER_ARG", offsetof(struct resethandler *, arg__paddr)); + + /* we want offset from + * bottom of kernel stack, not offset into structure + */ +#define IKSBASE (u_int)STACK_IKS(0) + + /* values from kern/thread.h */ + DECLARE("THREAD_TOP_ACT", + offsetof(struct thread_shuttle *, top_act)); + DECLARE("THREAD_KERNEL_STACK", + offsetof(struct thread_shuttle *, kernel_stack)); + DECLARE("THREAD_CONTINUATION", + offsetof(struct thread_shuttle *, continuation)); + DECLARE("THREAD_RECOVER", + offsetof(struct thread_shuttle *, recover)); +#if MACH_LDEBUG + DECLARE("THREAD_MUTEX_COUNT", + offsetof(struct thread_shuttle *, mutex_count)); +#endif /* MACH_LDEBUG */ + DECLARE("THREAD_PSET", offsetof(struct thread_shuttle *, processor_set)); + DECLARE("THREAD_LINKS", offsetof(struct thread_shuttle *, links)); + DECLARE("THREAD_PSTHRN", offsetof(struct thread_shuttle *, pset_threads.next)); + + /* values from kern/thread_act.h */ + DECLARE("ACT_TASK", offsetof(struct thread_activation *, task)); + DECLARE("ACT_THREAD", offsetof(struct thread_activation *, thread)); + DECLARE("ACT_LOWER", offsetof(struct thread_activation *, lower)); + DECLARE("ACT_MACT_PCB",offsetof(struct thread_activation *, mact.pcb)); + DECLARE("ACT_MACT_FPU",offsetof(struct thread_activation *, mact.FPU_pcb)); + DECLARE("ACT_MACT_FPUlvl",offsetof(struct thread_activation *, mact.FPU_lvl)); + DECLARE("ACT_MACT_FPUcpu",offsetof(struct thread_activation *, mact.FPU_cpu)); + DECLARE("ACT_MACT_VMX",offsetof(struct thread_activation *, mact.VMX_pcb)); + DECLARE("ACT_MACT_VMXlvl",offsetof(struct thread_activation *, mact.VMX_lvl)); + DECLARE("ACT_MACT_VMXcpu",offsetof(struct thread_activation *, mact.VMX_cpu)); + DECLARE("ACT_AST", offsetof(struct thread_activation *, ast)); + DECLARE("ACT_VMMAP", offsetof(struct thread_activation *, map)); + DECLARE("runningVM", runningVM); + DECLARE("runningVMbit", runningVMbit); + DECLARE("ACT_KLOADED", + offsetof(struct thread_activation *, kernel_loaded)); + DECLARE("ACT_KLOADING", + offsetof(struct thread_activation *, kernel_loading)); + DECLARE("ACT_MACH_EXC_PORT", + offsetof(struct thread_activation *, + exc_actions[EXC_MACH_SYSCALL].port)); + DECLARE("vmmCEntry", offsetof(struct thread_activation *, mact.vmmCEntry)); + DECLARE("vmmControl", offsetof(struct thread_activation *, mact.vmmControl)); +#ifdef MACH_BSD + DECLARE("CTHREAD_SELF", offsetof(struct thread_activation *, mact.cthread_self)); +#endif + + /* Values from vmachmon.h */ + + DECLARE("kVmmGetVersion", kVmmGetVersion); + DECLARE("kVmmvGetFeatures", kVmmvGetFeatures); + DECLARE("kVmmInitContext", kVmmInitContext); + DECLARE("kVmmTearDownContext", kVmmTearDownContext); + DECLARE("kVmmTearDownAll", kVmmTearDownAll); + DECLARE("kVmmMapPage", kVmmMapPage); + DECLARE("kVmmGetPageMapping", kVmmGetPageMapping); + DECLARE("kVmmUnmapPage", kVmmUnmapPage); + DECLARE("kVmmUnmapAllPages", kVmmUnmapAllPages); + DECLARE("kVmmGetPageDirtyFlag", kVmmGetPageDirtyFlag); + DECLARE("kVmmGetFloatState", kVmmGetFloatState); + DECLARE("kVmmGetVectorState", kVmmGetVectorState); + DECLARE("kVmmSetTimer", kVmmSetTimer); + DECLARE("kVmmExecuteVM", kVmmExecuteVM); + + DECLARE("kVmmReturnNull", kVmmReturnNull); + DECLARE("kVmmBogusContext", kVmmBogusContext); + DECLARE("kVmmReturnDataPageFault", kVmmReturnDataPageFault); + DECLARE("kVmmReturnInstrPageFault", kVmmReturnInstrPageFault); + DECLARE("kVmmReturnAlignmentFault", kVmmReturnAlignmentFault); + DECLARE("kVmmReturnProgramException", kVmmReturnProgramException); + DECLARE("kVmmReturnSystemCall", kVmmReturnSystemCall); + DECLARE("kVmmReturnTraceException", kVmmReturnTraceException); + + DECLARE("vmmFlags", offsetof(struct vmmCntrlEntry *, vmmFlags)); + DECLARE("vmmInUseb", vmmInUseb); + DECLARE("vmmInUse", vmmInUse); + DECLARE("vmmPmap", offsetof(struct vmmCntrlEntry *, vmmPmap)); + DECLARE("vmmContextKern", offsetof(struct vmmCntrlEntry *, vmmContextKern)); + DECLARE("vmmContextUser", offsetof(struct vmmCntrlEntry *, vmmContextUser)); + DECLARE("vmmFPU_pcb", offsetof(struct vmmCntrlEntry *, vmmFPU_pcb)); + DECLARE("vmmFPU_cpu", offsetof(struct vmmCntrlEntry *, vmmFPU_cpu)); + DECLARE("vmmVMX_pcb", offsetof(struct vmmCntrlEntry *, vmmVMX_pcb)); + DECLARE("vmmVMX_cpu", offsetof(struct vmmCntrlEntry *, vmmVMX_cpu)); + DECLARE("vmmLastMap", offsetof(struct vmmCntrlEntry *, vmmLastMap)); + DECLARE("vmmCEntrySize", sizeof(struct vmmCntrlEntry)); + DECLARE("kVmmMaxContextsPerThread", kVmmMaxContextsPerThread); + + DECLARE("interface_version", offsetof(struct vmm_state_page_t *, interface_version)); + DECLARE("thread_index", offsetof(struct vmm_state_page_t *, thread_index)); + DECLARE("vmmStat", offsetof(struct vmm_state_page_t *, vmmStat)); + DECLARE("vmmCntrl", offsetof(struct vmm_state_page_t *, vmmCntrl)); + DECLARE("return_code", offsetof(struct vmm_state_page_t *, return_code)); + DECLARE("return_params", offsetof(struct vmm_state_page_t *, return_params)); + DECLARE("vmm_proc_state", offsetof(struct vmm_state_page_t *, vmm_proc_state)); + DECLARE("return_params", offsetof(struct vmm_state_page_t *, return_params)); + DECLARE("vmmppcVRs", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcVRs)); + DECLARE("vmmppcVSCR", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcVSCR)); + DECLARE("vmmppcFPRs", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcFPRs)); + DECLARE("vmmppcFPSCR", offsetof(struct vmm_state_page_t *, vmm_proc_state.ppcFPSCR)); + DECLARE("vmmFloatCngd", vmmFloatCngd); + DECLARE("vmmFloatCngdb", vmmFloatCngdb); + DECLARE("vmmVectCngd", vmmVectCngd); + DECLARE("vmmVectCngdb", vmmVectCngdb); + DECLARE("vmmTimerPop", vmmTimerPop); + DECLARE("vmmTimerPopb", vmmTimerPopb); + DECLARE("vmmMapDone", vmmMapDone); + DECLARE("vmmMapDoneb", vmmMapDoneb); + DECLARE("vmmSpfSave", vmmSpfSave); + DECLARE("vmmSpfSaveb", vmmSpfSaveb); + DECLARE("vmmFloatLoad", vmmFloatLoad); + DECLARE("vmmFloatLoadb", vmmFloatLoadb); + DECLARE("vmmVectLoad", vmmVectLoad); + DECLARE("vmmVectLoadb", vmmVectLoadb); + DECLARE("vmmVectVRall", vmmVectVRall); + DECLARE("vmmVectVRallb", vmmVectVRallb); + DECLARE("vmmVectVAss", vmmVectVAss); + DECLARE("vmmVectVAssb", vmmVectVAssb); + + /* values from kern/task.h */ + DECLARE("TASK_MACH_EXC_PORT", + offsetof(struct task *, exc_actions[EXC_MACH_SYSCALL].port)); + DECLARE("TASK_SYSCALLS_MACH", + offsetof(struct task *, syscalls_mach)); + + /* values from vm/vm_map.h */ + DECLARE("VMMAP_PMAP", offsetof(struct vm_map *, pmap)); + + /* values from machine/pmap.h */ + DECLARE("PMAP_SPACE", offsetof(struct pmap *, space)); + DECLARE("PMAP_BMAPS", offsetof(struct pmap *, bmaps)); + DECLARE("PMAP_PMAPVR", offsetof(struct pmap *, pmapvr)); + DECLARE("PMAP_VFLAGS", offsetof(struct pmap *, vflags)); + DECLARE("PMAP_USAGE", offsetof(struct pmap *, pmapUsage)); + DECLARE("PMAP_SEGS", offsetof(struct pmap *, pmapSegs)); + DECLARE("PMAP_SIZE", pmapSize); + + + /* Constants from pmap.h */ + DECLARE("PPC_SID_KERNEL", PPC_SID_KERNEL); + + /* values for accessing mach_trap table */ + DECLARE("MACH_TRAP_OFFSET_POW2", 4); + + DECLARE("MACH_TRAP_ARGC", + offsetof(mach_trap_t *, mach_trap_arg_count)); + DECLARE("MACH_TRAP_FUNCTION", + offsetof(mach_trap_t *, mach_trap_function)); + + DECLARE("HOST_SELF", offsetof(host_t, host_self)); + + DECLARE("PPCcallmax", sizeof(PPCcalls)); + + /* values from cpu_data.h */ + DECLARE("CPU_ACTIVE_THREAD", offsetof(cpu_data_t *, active_thread)); + DECLARE("CPU_PREEMPTION_LEVEL", offsetof(cpu_data_t *, preemption_level)); + DECLARE("CPU_SIMPLE_LOCK_COUNT", + offsetof(cpu_data_t *, simple_lock_count)); + DECLARE("CPU_INTERRUPT_LEVEL",offsetof(cpu_data_t *, interrupt_level)); + + /* Misc values used by assembler */ + DECLARE("AST_ALL", AST_ALL); + DECLARE("AST_URGENT", AST_URGENT); + + /* Simple Lock structure */ + DECLARE("SLOCK_ILK", offsetof(simple_lock_t, interlock)); +#if MACH_LDEBUG + DECLARE("SLOCK_TYPE", offsetof(simple_lock_t, lock_type)); + DECLARE("SLOCK_PC", offsetof(simple_lock_t, debug.lock_pc)); + DECLARE("SLOCK_THREAD", offsetof(simple_lock_t, debug.lock_thread)); + DECLARE("SLOCK_DURATIONH",offsetof(simple_lock_t, debug.duration[0])); + DECLARE("SLOCK_DURATIONL",offsetof(simple_lock_t, debug.duration[1])); + DECLARE("USLOCK_TAG", USLOCK_TAG); +#endif /* MACH_LDEBUG */ + + /* Mutex structure */ + DECLARE("MUTEX_ILK", offsetof(mutex_t *, interlock)); + DECLARE("MUTEX_LOCKED", offsetof(mutex_t *, locked)); + DECLARE("MUTEX_WAITERS",offsetof(mutex_t *, waiters)); +#if MACH_LDEBUG + DECLARE("MUTEX_TYPE", offsetof(mutex_t *, type)); + DECLARE("MUTEX_PC", offsetof(mutex_t *, pc)); + DECLARE("MUTEX_THREAD", offsetof(mutex_t *, thread)); + DECLARE("MUTEX_TAG", MUTEX_TAG); +#endif /* MACH_LDEBUG */ + +#if NCPUS > 1 + /* values from mp/PlugIn.h */ + + DECLARE("MPSversionID", offsetof(struct MPPlugInSpec *, versionID)); + DECLARE("MPSareaAddr", offsetof(struct MPPlugInSpec *, areaAddr)); + DECLARE("MPSareaSize", offsetof(struct MPPlugInSpec *, areaSize)); + DECLARE("MPSoffsetTableAddr", offsetof(struct MPPlugInSpec *, offsetTableAddr)); + DECLARE("MPSbaseAddr", offsetof(struct MPPlugInSpec *, baseAddr)); + DECLARE("MPSdataArea", offsetof(struct MPPlugInSpec *, dataArea)); + DECLARE("MPSCPUArea", offsetof(struct MPPlugInSpec *, CPUArea)); + DECLARE("MPSSIGPhandler", offsetof(struct MPPlugInSpec *, SIGPhandler)); + + DECLARE("CSAstate", offsetof(struct CPUStatusArea *, state)); + DECLARE("CSAregsAreValid", offsetof(struct CPUStatusArea *, + regsAreValid)); + DECLARE("CSAgpr", offsetof(struct CPUStatusArea *, gpr)); + DECLARE("CSAfpr", offsetof(struct CPUStatusArea *, fpr)); + DECLARE("CSAcr", offsetof(struct CPUStatusArea *, cr)); + DECLARE("CSAfpscr", offsetof(struct CPUStatusArea *, fpscr)); + DECLARE("CSAxer", offsetof(struct CPUStatusArea *, xer)); + DECLARE("CSAlr", offsetof(struct CPUStatusArea *, lr)); + DECLARE("CSActr", offsetof(struct CPUStatusArea *, ctr)); + DECLARE("CSAtbu", offsetof(struct CPUStatusArea *, tbu)); + DECLARE("CSAtbl", offsetof(struct CPUStatusArea *, tbl)); + DECLARE("CSApvr", offsetof(struct CPUStatusArea *, pvr)); + DECLARE("CSAibat", offsetof(struct CPUStatusArea *, ibat)); + DECLARE("CSAdbat", offsetof(struct CPUStatusArea *, dbat)); + DECLARE("CSAsdr1", offsetof(struct CPUStatusArea *, sdr1)); + DECLARE("CSAsr", offsetof(struct CPUStatusArea *, sr)); + DECLARE("CSAdar", offsetof(struct CPUStatusArea *, dar)); + DECLARE("CSAdsisr", offsetof(struct CPUStatusArea *, dsisr)); + DECLARE("CSAsprg", offsetof(struct CPUStatusArea *, sprg)); + DECLARE("CSAsrr0", offsetof(struct CPUStatusArea *, srr0)); + DECLARE("CSAsrr1", offsetof(struct CPUStatusArea *, srr1)); + DECLARE("CSAdec", offsetof(struct CPUStatusArea *, dec)); + DECLARE("CSAdabr", offsetof(struct CPUStatusArea *, dabr)); + DECLARE("CSAiabr", offsetof(struct CPUStatusArea *, iabr)); + DECLARE("CSAear", offsetof(struct CPUStatusArea *, ear)); + DECLARE("CSAhid", offsetof(struct CPUStatusArea *, hid)); + DECLARE("CSAmmcr", offsetof(struct CPUStatusArea *, mmcr)); + DECLARE("CSApmc", offsetof(struct CPUStatusArea *, pmc)); + DECLARE("CSApir", offsetof(struct CPUStatusArea *, pir)); + DECLARE("CSAsda", offsetof(struct CPUStatusArea *, sda)); + DECLARE("CSAsia", offsetof(struct CPUStatusArea *, sia)); + DECLARE("CSAmq", offsetof(struct CPUStatusArea *, mq)); + DECLARE("CSAmsr", offsetof(struct CPUStatusArea *, msr)); + DECLARE("CSApc", offsetof(struct CPUStatusArea *, pc)); + DECLARE("CSAsysregs", offsetof(struct CPUStatusArea *, sysregs)); + DECLARE("CSAsize", sizeof(struct CPUStatusArea)); + + + DECLARE("MPPICStat", offsetof(struct MPPInterface *, MPPICStat)); + DECLARE("MPPICParm0", offsetof(struct MPPInterface *, MPPICParm0)); + DECLARE("MPPICParm1", offsetof(struct MPPInterface *, MPPICParm1)); + DECLARE("MPPICParm2", offsetof(struct MPPInterface *, MPPICParm2)); + DECLARE("MPPICspare0", offsetof(struct MPPInterface *, MPPICspare0)); + DECLARE("MPPICspare1", offsetof(struct MPPInterface *, MPPICspare1)); + DECLARE("MPPICParm0BU", offsetof(struct MPPInterface *, MPPICParm0BU)); + DECLARE("MPPICPriv", offsetof(struct MPPInterface *, MPPICPriv)); + + + +#endif /* NCPUS > 1 */ + + /* values from low_trace.h */ + DECLARE("LTR_cpu", offsetof(struct LowTraceRecord *, LTR_cpu)); + DECLARE("LTR_excpt", offsetof(struct LowTraceRecord *, LTR_excpt)); + DECLARE("LTR_timeHi", offsetof(struct LowTraceRecord *, LTR_timeHi)); + DECLARE("LTR_timeLo", offsetof(struct LowTraceRecord *, LTR_timeLo)); + DECLARE("LTR_cr", offsetof(struct LowTraceRecord *, LTR_cr)); + DECLARE("LTR_srr0", offsetof(struct LowTraceRecord *, LTR_srr0)); + DECLARE("LTR_srr1", offsetof(struct LowTraceRecord *, LTR_srr1)); + DECLARE("LTR_dar", offsetof(struct LowTraceRecord *, LTR_dar)); + DECLARE("LTR_save", offsetof(struct LowTraceRecord *, LTR_save)); + DECLARE("LTR_lr", offsetof(struct LowTraceRecord *, LTR_lr)); + DECLARE("LTR_ctr", offsetof(struct LowTraceRecord *, LTR_ctr)); + DECLARE("LTR_r0", offsetof(struct LowTraceRecord *, LTR_r0)); + DECLARE("LTR_r1", offsetof(struct LowTraceRecord *, LTR_r1)); + DECLARE("LTR_r2", offsetof(struct LowTraceRecord *, LTR_r2)); + DECLARE("LTR_r3", offsetof(struct LowTraceRecord *, LTR_r3)); + DECLARE("LTR_r4", offsetof(struct LowTraceRecord *, LTR_r4)); + DECLARE("LTR_r5", offsetof(struct LowTraceRecord *, LTR_r5)); + DECLARE("LTR_size", sizeof(struct LowTraceRecord)); + +/* Values from pexpert.h */ + DECLARE("PECFIcpurate", offsetof(struct clock_frequency_info_t *, cpu_clock_rate_hz)); + DECLARE("PECFIbusrate", offsetof(struct clock_frequency_info_t *, bus_clock_rate_hz)); + +/* Values from pmap_internals.h and mappings.h */ + DECLARE("mmnext", offsetof(struct mapping *, next)); + DECLARE("mmhashnext", offsetof(struct mapping *, hashnext)); + DECLARE("mmPTEhash", offsetof(struct mapping *, PTEhash)); + DECLARE("mmPTEent", offsetof(struct mapping *, PTEent)); + DECLARE("mmPTEv", offsetof(struct mapping *, PTEv)); + DECLARE("mmPTEr", offsetof(struct mapping *, PTEr)); + DECLARE("mmphysent", offsetof(struct mapping *, physent)); + DECLARE("mmpmap", offsetof(struct mapping *, pmap)); + + DECLARE("bmnext", offsetof(struct blokmap *, next)); + DECLARE("bmstart", offsetof(struct blokmap *, start)); + DECLARE("bmend", offsetof(struct blokmap *, end)); + DECLARE("bmPTEr", offsetof(struct blokmap *, PTEr)); + DECLARE("bmspace", offsetof(struct blokmap *, space)); + DECLARE("blkFlags", offsetof(struct blokmap *, blkFlags)); + DECLARE("blkPerm", blkPerm); + DECLARE("blkPermbit", blkPermbit); + + DECLARE("mbvrswap", offsetof(struct mappingblok *, mapblokvrswap)); + DECLARE("mbfree", offsetof(struct mappingblok *, mapblokfree)); + DECLARE("mapcsize", sizeof(struct mappingctl)); + + DECLARE("pephyslink", offsetof(struct phys_entry *, phys_link)); + DECLARE("pepte1", offsetof(struct phys_entry *, pte1)); + + DECLARE("PCAlock", offsetof(struct PCA *, PCAlock)); + DECLARE("PCAallo", offsetof(struct PCA *, flgs.PCAallo)); + DECLARE("PCAfree", offsetof(struct PCA *, flgs.PCAalflgs.PCAfree)); + DECLARE("PCAauto", offsetof(struct PCA *, flgs.PCAalflgs.PCAauto)); + DECLARE("PCAslck", offsetof(struct PCA *, flgs.PCAalflgs.PCAslck)); + DECLARE("PCAsteal", offsetof(struct PCA *, flgs.PCAalflgs.PCAsteal)); + DECLARE("PCAgas", offsetof(struct PCA *, PCAgas)); + DECLARE("PCAhash", offsetof(struct PCA *, PCAhash)); + + DECLARE("SVlock", offsetof(struct Saveanchor *, savelock)); + DECLARE("SVcount", offsetof(struct Saveanchor *, savecount)); + DECLARE("SVinuse", offsetof(struct Saveanchor *, saveinuse)); + DECLARE("SVmin", offsetof(struct Saveanchor *, savemin)); + DECLARE("SVneghyst", offsetof(struct Saveanchor *, saveneghyst)); + DECLARE("SVtarget", offsetof(struct Saveanchor *, savetarget)); + DECLARE("SVposhyst", offsetof(struct Saveanchor *, saveposhyst)); + DECLARE("SVfree", offsetof(struct Saveanchor *, savefree)); + DECLARE("SVsize", sizeof(struct Saveanchor)); + +#if 1 + DECLARE("GDsave", offsetof(struct GDWorkArea *, GDsave)); + DECLARE("GDfp0", offsetof(struct GDWorkArea *, GDfp0)); + DECLARE("GDfp1", offsetof(struct GDWorkArea *, GDfp1)); + DECLARE("GDfp2", offsetof(struct GDWorkArea *, GDfp2)); + DECLARE("GDfp3", offsetof(struct GDWorkArea *, GDfp3)); + DECLARE("GDtop", offsetof(struct GDWorkArea *, GDtop)); + DECLARE("GDleft", offsetof(struct GDWorkArea *, GDleft)); + DECLARE("GDtopleft", offsetof(struct GDWorkArea *, GDtopleft)); + DECLARE("GDrowbytes", offsetof(struct GDWorkArea *, GDrowbytes)); + DECLARE("GDrowchar", offsetof(struct GDWorkArea *, GDrowchar)); + DECLARE("GDdepth", offsetof(struct GDWorkArea *, GDdepth)); + DECLARE("GDcollgn", offsetof(struct GDWorkArea *, GDcollgn)); + DECLARE("GDready", offsetof(struct GDWorkArea *, GDready)); + DECLARE("GDrowbuf1", offsetof(struct GDWorkArea *, GDrowbuf1)); + DECLARE("GDrowbuf2", offsetof(struct GDWorkArea *, GDrowbuf2)); +#endif + + DECLARE("dgLock", offsetof(struct diagWork *, dgLock)); + DECLARE("dgFlags", offsetof(struct diagWork *, dgFlags)); + DECLARE("dgMisc0", offsetof(struct diagWork *, dgMisc0)); + DECLARE("enaExpTrace", enaExpTrace); + DECLARE("enaExpTraceb", enaExpTraceb); + DECLARE("enaUsrFCall", enaUsrFCall); + DECLARE("enaUsrFCallb", enaUsrFCallb); + DECLARE("enaUsrPhyMp", enaUsrPhyMp); + DECLARE("enaUsrPhyMpb", enaUsrPhyMpb); + DECLARE("enaDiagSCs", enaDiagSCs); + DECLARE("enaDiagSCsb", enaDiagSCsb); + DECLARE("disLkType", disLkType); + DECLARE("disLktypeb", disLktypeb); + DECLARE("disLkThread", disLkThread); + DECLARE("disLkThreadb", disLkThreadb); + DECLARE("disLkNmSimp", disLkNmSimp); + DECLARE("disLkNmSimpb", disLkNmSimpb); + DECLARE("disLkMyLck", disLkMyLck); + DECLARE("disLkMyLckb", disLkMyLckb); + DECLARE("dgMisc1", offsetof(struct diagWork *, dgMisc1)); + DECLARE("dgMisc2", offsetof(struct diagWork *, dgMisc2)); + DECLARE("dgMisc3", offsetof(struct diagWork *, dgMisc3)); + DECLARE("dgMisc4", offsetof(struct diagWork *, dgMisc4)); + DECLARE("dgMisc5", offsetof(struct diagWork *, dgMisc5)); + + DECLARE("traceMask", offsetof(struct traceWork *, traceMask)); + DECLARE("traceCurr", offsetof(struct traceWork *, traceCurr)); + DECLARE("traceStart", offsetof(struct traceWork *, traceStart)); + DECLARE("traceEnd", offsetof(struct traceWork *, traceEnd)); + DECLARE("traceMsnd", offsetof(struct traceWork *, traceMsnd)); + + DECLARE("SACsize", sizeof(struct savectl)); + DECLARE("SACspot", 4096-sizeof(struct savectl)); + DECLARE("SACnext", offsetof(struct savectl *, sac_next)+4096-sizeof(struct savectl)); + DECLARE("SACvrswap", offsetof(struct savectl *, sac_vrswap)+4096-sizeof(struct savectl)); + DECLARE("SACalloc", offsetof(struct savectl *, sac_alloc)+4096-sizeof(struct savectl)); + DECLARE("SACflags", offsetof(struct savectl *, sac_flags)+4096-sizeof(struct savectl)); + + DECLARE("SAVprev", offsetof(struct savearea *, save_prev)); + DECLARE("SAVprefp", offsetof(struct savearea *, save_prev_float)); + DECLARE("SAVprevec", offsetof(struct savearea *, save_prev_vector)); + DECLARE("SAVphys", offsetof(struct savearea *, save_phys)); + DECLARE("SAVqfret", offsetof(struct savearea *, save_qfret)); + DECLARE("SAVact", offsetof(struct savearea *, save_act)); + DECLARE("SAVflags", offsetof(struct savearea *, save_flags)); + DECLARE("SAVlvlfp", offsetof(struct savearea *, save_level_fp)); + DECLARE("SAVlvlvec", offsetof(struct savearea *, save_level_vec)); + DECLARE("SAVsize", sizeof(struct savearea)); + + DECLARE("savesrr0", offsetof(struct savearea *, save_srr0)); + DECLARE("savesrr1", offsetof(struct savearea *, save_srr1)); + DECLARE("savecr", offsetof(struct savearea *, save_cr)); + DECLARE("savexer", offsetof(struct savearea *, save_xer)); + DECLARE("savelr", offsetof(struct savearea *, save_lr)); + DECLARE("savectr", offsetof(struct savearea *, save_ctr)); + DECLARE("savemq", offsetof(struct savearea *, save_mq)); + DECLARE("savecopyin", offsetof(struct savearea *, save_sr_copyin)); + DECLARE("savedar", offsetof(struct savearea *, save_dar)); + DECLARE("savedsisr", offsetof(struct savearea *, save_dsisr)); + DECLARE("saveexception", offsetof(struct savearea *, save_exception)); + DECLARE("savexfpscrpad", offsetof(struct savearea *, save_xfpscrpad)); + DECLARE("savexfpscr", offsetof(struct savearea *, save_xfpscr)); + DECLARE("savevrsave", offsetof(struct savearea *, save_vrsave)); + + DECLARE("saver0", offsetof(struct savearea *, save_r0)); + DECLARE("saver1", offsetof(struct savearea *, save_r1)); + DECLARE("saver2", offsetof(struct savearea *, save_r2)); + DECLARE("saver3", offsetof(struct savearea *, save_r3)); + DECLARE("saver4", offsetof(struct savearea *, save_r4)); + DECLARE("saver5", offsetof(struct savearea *, save_r5)); + DECLARE("saver6", offsetof(struct savearea *, save_r6)); + DECLARE("saver7", offsetof(struct savearea *, save_r7)); + DECLARE("saver8", offsetof(struct savearea *, save_r8)); + DECLARE("saver9", offsetof(struct savearea *, save_r9)); + DECLARE("saver10", offsetof(struct savearea *, save_r10)); + DECLARE("saver11", offsetof(struct savearea *, save_r11)); + DECLARE("saver12", offsetof(struct savearea *, save_r12)); + DECLARE("saver13", offsetof(struct savearea *, save_r13)); + DECLARE("saver14", offsetof(struct savearea *, save_r14)); + DECLARE("saver15", offsetof(struct savearea *, save_r15)); + DECLARE("saver16", offsetof(struct savearea *, save_r16)); + DECLARE("saver17", offsetof(struct savearea *, save_r17)); + DECLARE("saver18", offsetof(struct savearea *, save_r18)); + DECLARE("saver19", offsetof(struct savearea *, save_r19)); + DECLARE("saver20", offsetof(struct savearea *, save_r20)); + DECLARE("saver21", offsetof(struct savearea *, save_r21)); + DECLARE("saver22", offsetof(struct savearea *, save_r22)); + DECLARE("saver23", offsetof(struct savearea *, save_r23)); + DECLARE("saver24", offsetof(struct savearea *, save_r24)); + DECLARE("saver25", offsetof(struct savearea *, save_r25)); + DECLARE("saver26", offsetof(struct savearea *, save_r26)); + DECLARE("saver27", offsetof(struct savearea *, save_r27)); + DECLARE("saver28", offsetof(struct savearea *, save_r28)); + DECLARE("saver29", offsetof(struct savearea *, save_r29)); + DECLARE("saver30", offsetof(struct savearea *, save_r30)); + DECLARE("saver31", offsetof(struct savearea *, save_r31)); + + DECLARE("savefp0", offsetof(struct savearea *, save_fp0)); + DECLARE("savefp1", offsetof(struct savearea *, save_fp1)); + DECLARE("savefp2", offsetof(struct savearea *, save_fp2)); + DECLARE("savefp3", offsetof(struct savearea *, save_fp3)); + DECLARE("savefp4", offsetof(struct savearea *, save_fp4)); + DECLARE("savefp5", offsetof(struct savearea *, save_fp5)); + DECLARE("savefp6", offsetof(struct savearea *, save_fp6)); + DECLARE("savefp7", offsetof(struct savearea *, save_fp7)); + DECLARE("savefp8", offsetof(struct savearea *, save_fp8)); + DECLARE("savefp9", offsetof(struct savearea *, save_fp9)); + DECLARE("savefp10", offsetof(struct savearea *, save_fp10)); + DECLARE("savefp11", offsetof(struct savearea *, save_fp11)); + DECLARE("savefp12", offsetof(struct savearea *, save_fp12)); + DECLARE("savefp13", offsetof(struct savearea *, save_fp13)); + DECLARE("savefp14", offsetof(struct savearea *, save_fp14)); + DECLARE("savefp15", offsetof(struct savearea *, save_fp15)); + DECLARE("savefp16", offsetof(struct savearea *, save_fp16)); + DECLARE("savefp17", offsetof(struct savearea *, save_fp17)); + DECLARE("savefp18", offsetof(struct savearea *, save_fp18)); + DECLARE("savefp19", offsetof(struct savearea *, save_fp19)); + DECLARE("savefp20", offsetof(struct savearea *, save_fp20)); + DECLARE("savefp21", offsetof(struct savearea *, save_fp21)); + DECLARE("savefp22", offsetof(struct savearea *, save_fp22)); + DECLARE("savefp23", offsetof(struct savearea *, save_fp23)); + DECLARE("savefp24", offsetof(struct savearea *, save_fp24)); + DECLARE("savefp25", offsetof(struct savearea *, save_fp25)); + DECLARE("savefp26", offsetof(struct savearea *, save_fp26)); + DECLARE("savefp27", offsetof(struct savearea *, save_fp27)); + DECLARE("savefp28", offsetof(struct savearea *, save_fp28)); + DECLARE("savefp29", offsetof(struct savearea *, save_fp29)); + DECLARE("savefp30", offsetof(struct savearea *, save_fp30)); + DECLARE("savefp31", offsetof(struct savearea *, save_fp31)); + DECLARE("savefpscrpad", offsetof(struct savearea *, save_fpscr_pad)); + DECLARE("savefpscr", offsetof(struct savearea *, save_fpscr)); + + DECLARE("savesr0", offsetof(struct savearea *, save_sr0)); + DECLARE("savesr1", offsetof(struct savearea *, save_sr1)); + DECLARE("savesr2", offsetof(struct savearea *, save_sr2)); + DECLARE("savesr3", offsetof(struct savearea *, save_sr3)); + DECLARE("savesr4", offsetof(struct savearea *, save_sr4)); + DECLARE("savesr5", offsetof(struct savearea *, save_sr5)); + DECLARE("savesr6", offsetof(struct savearea *, save_sr6)); + DECLARE("savesr7", offsetof(struct savearea *, save_sr7)); + DECLARE("savesr8", offsetof(struct savearea *, save_sr8)); + DECLARE("savesr9", offsetof(struct savearea *, save_sr9)); + DECLARE("savesr10", offsetof(struct savearea *, save_sr10)); + DECLARE("savesr11", offsetof(struct savearea *, save_sr11)); + DECLARE("savesr12", offsetof(struct savearea *, save_sr12)); + DECLARE("savesr13", offsetof(struct savearea *, save_sr13)); + DECLARE("savesr14", offsetof(struct savearea *, save_sr14)); + DECLARE("savesr15", offsetof(struct savearea *, save_sr15)); + + DECLARE("savevr0", offsetof(struct savearea *, save_vr0)); + DECLARE("savevr1", offsetof(struct savearea *, save_vr1)); + DECLARE("savevr2", offsetof(struct savearea *, save_vr2)); + DECLARE("savevr3", offsetof(struct savearea *, save_vr3)); + DECLARE("savevr4", offsetof(struct savearea *, save_vr4)); + DECLARE("savevr5", offsetof(struct savearea *, save_vr5)); + DECLARE("savevr6", offsetof(struct savearea *, save_vr6)); + DECLARE("savevr7", offsetof(struct savearea *, save_vr7)); + DECLARE("savevr8", offsetof(struct savearea *, save_vr8)); + DECLARE("savevr9", offsetof(struct savearea *, save_vr9)); + DECLARE("savevr10", offsetof(struct savearea *, save_vr10)); + DECLARE("savevr11", offsetof(struct savearea *, save_vr11)); + DECLARE("savevr12", offsetof(struct savearea *, save_vr12)); + DECLARE("savevr13", offsetof(struct savearea *, save_vr13)); + DECLARE("savevr14", offsetof(struct savearea *, save_vr14)); + DECLARE("savevr15", offsetof(struct savearea *, save_vr15)); + DECLARE("savevr16", offsetof(struct savearea *, save_vr16)); + DECLARE("savevr17", offsetof(struct savearea *, save_vr17)); + DECLARE("savevr18", offsetof(struct savearea *, save_vr18)); + DECLARE("savevr19", offsetof(struct savearea *, save_vr19)); + DECLARE("savevr20", offsetof(struct savearea *, save_vr20)); + DECLARE("savevr21", offsetof(struct savearea *, save_vr21)); + DECLARE("savevr22", offsetof(struct savearea *, save_vr22)); + DECLARE("savevr23", offsetof(struct savearea *, save_vr23)); + DECLARE("savevr24", offsetof(struct savearea *, save_vr24)); + DECLARE("savevr25", offsetof(struct savearea *, save_vr25)); + DECLARE("savevr26", offsetof(struct savearea *, save_vr26)); + DECLARE("savevr27", offsetof(struct savearea *, save_vr27)); + DECLARE("savevr28", offsetof(struct savearea *, save_vr28)); + DECLARE("savevr29", offsetof(struct savearea *, save_vr29)); + DECLARE("savevr30", offsetof(struct savearea *, save_vr30)); + DECLARE("savevr31", offsetof(struct savearea *, save_vr31)); + DECLARE("savevscr", offsetof(struct savearea *, save_vscr)); + DECLARE("savevrvalid", offsetof(struct savearea *, save_vrvalid)); + + /* PseudoKernel Exception Descriptor info */ + DECLARE("BEDA_SRR0", offsetof(BEDA_t *, srr0)); + DECLARE("BEDA_SRR1", offsetof(BEDA_t *, srr1)); + DECLARE("BEDA_SPRG0", offsetof(BEDA_t *, sprg0)); + DECLARE("BEDA_SPRG1", offsetof(BEDA_t *, sprg1)); + + /* PseudoKernel Interrupt Control Word */ + DECLARE("BTTD_INTCONTROLWORD", offsetof(BTTD_t *, InterruptControlWord)); + + /* New state when exiting the pseudokernel */ + DECLARE("BTTD_NEWEXITSTATE", offsetof(BTTD_t *, NewExitState)); + + /* PseudoKernel Test/Post Interrupt */ + DECLARE("BTTD_TESTINTMASK", offsetof(BTTD_t *, testIntMask)); + DECLARE("BTTD_POSTINTMASK", offsetof(BTTD_t *, postIntMask)); + + /* PseudoKernel Vectors */ + DECLARE("BTTD_TRAP_VECTOR", offsetof(BTTD_t *, TrapVector)); + DECLARE("BTTD_SYSCALL_VECTOR", offsetof(BTTD_t *, SysCallVector)); + DECLARE("BTTD_INTERRUPT_VECTOR", offsetof(BTTD_t *, InterruptVector)); + DECLARE("BTTD_PENDINGINT_VECTOR", offsetof(BTTD_t *, PendingIntVector)); + + /* PseudoKernel Bits, Masks and misc */ + DECLARE("SYSCONTEXTSTATE", kInSystemContext); + DECLARE("PSEUDOKERNELSTATE", kInPseudoKernel); + DECLARE("INTSTATEMASK_B", 12); + DECLARE("INTSTATEMASK_E", 15); + DECLARE("INTCR2MASK_B", 8); + DECLARE("INTCR2MASK_E", 11); + DECLARE("INTBACKUPCR2MASK_B", 28); + DECLARE("INTBACKUPCR2MASK_E", 31); + DECLARE("INTCR2TOBACKUPSHIFT", kCR2ToBackupShift); + DECLARE("BB_MAX_TRAP", bbMaxTrap); + DECLARE("BB_RFI_TRAP", bbRFITrap); + + /* Various hackery */ + DECLARE("procState", offsetof(struct processor *, state)); + + DECLARE("CPU_SUBTYPE_POWERPC_ALL", CPU_SUBTYPE_POWERPC_ALL); + DECLARE("CPU_SUBTYPE_POWERPC_601", CPU_SUBTYPE_POWERPC_601); + DECLARE("CPU_SUBTYPE_POWERPC_602", CPU_SUBTYPE_POWERPC_602); + DECLARE("CPU_SUBTYPE_POWERPC_603", CPU_SUBTYPE_POWERPC_603); + DECLARE("CPU_SUBTYPE_POWERPC_603e", CPU_SUBTYPE_POWERPC_603e); + DECLARE("CPU_SUBTYPE_POWERPC_603ev", CPU_SUBTYPE_POWERPC_603ev); + DECLARE("CPU_SUBTYPE_POWERPC_604", CPU_SUBTYPE_POWERPC_604); + DECLARE("CPU_SUBTYPE_POWERPC_604e", CPU_SUBTYPE_POWERPC_604e); + DECLARE("CPU_SUBTYPE_POWERPC_620", CPU_SUBTYPE_POWERPC_620); + DECLARE("CPU_SUBTYPE_POWERPC_750", CPU_SUBTYPE_POWERPC_750); + DECLARE("CPU_SUBTYPE_POWERPC_7400", CPU_SUBTYPE_POWERPC_7400); + DECLARE("CPU_SUBTYPE_POWERPC_7450", CPU_SUBTYPE_POWERPC_7450); + + + return(0); /* For ANSI C :-) */ + + + +} diff --git a/osfmk/ppc/hardclock_entries.h b/osfmk/ppc/hardclock_entries.h new file mode 100644 index 000000000..ab499201d --- /dev/null +++ b/osfmk/ppc/hardclock_entries.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _HARDCLOCK_ENTRIES_H_ +#define _HARDCLOCK_ENTRIES_H_ + +extern void hardclock(struct ppc_saved_state*); + +#endif /* _HARDCLOCK_ENTRIES_H_ */ diff --git a/osfmk/ppc/hexfont.h b/osfmk/ppc/hexfont.h new file mode 100644 index 000000000..91b3caf3e --- /dev/null +++ b/osfmk/ppc/hexfont.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* 0123456789ABCDEF */ + +hexfont: .short 0x0000 /* 0b0000000000000000 */ + .short 0x07C0 /* 0b0000011111000000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x3C78 /* 0b0011110001111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x701C /* 0b0111000000011100 */ + .short 0x701C /* 0b0111000000011100 */ + .short 0x701C /* 0b0111000000011100 */ + .short 0x701C /* 0b0111000000011100 */ + .short 0x701C /* 0b0111000000011100 */ + .short 0x701C /* 0b0111000000011100 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3C78 /* 0b0011110001111000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x07C0 /* 0b0000011111000000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0080 /* 0b0000000010000000 */ + .short 0x0180 /* 0b0000000110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0780 /* 0b0000011110000000 */ + .short 0x0F80 /* 0b0000111110000000 */ + .short 0x1F80 /* 0b0001111110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x7038 /* 0b0111000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x00F0 /* 0b0000000011110000 */ + .short 0x01E0 /* 0b0000000111100000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0F00 /* 0b0000111100000000 */ + .short 0x1C00 /* 0b0001110000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x7FFC /* 0b0111111111111100 */ + .short 0x7FFC /* 0b0111111111111100 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x00F0 /* 0b0000000011110000 */ + .short 0x00F0 /* 0b0000000011110000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0020 /* 0b0000000000100000 */ + .short 0x0060 /* 0b0000000001100000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x01E0 /* 0b0000000111100000 */ + .short 0x03E0 /* 0b0000001111100000 */ + .short 0x07E0 /* 0b0000011111100000 */ + .short 0x0EE0 /* 0b0000111011100000 */ + .short 0x1CE0 /* 0b0001110011100000 */ + .short 0x3FF8 /* 0b0011111111111000 */ + .short 0x7FF8 /* 0b0111111111111000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x3FF8 /* 0b0011111111111000 */ + .short 0x3FF8 /* 0b0011111111111000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3FC0 /* 0b0011111111000000 */ + .short 0x3FF0 /* 0b0011111111110000 */ + .short 0x3870 /* 0b0011100001110000 */ + .short 0x3038 /* 0b0011000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3FF0 /* 0b0011111111110000 */ + .short 0x0FC0 /* 0b0000111111000000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0070 /* 0b0000000001110000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x01C0 /* 0b0000000111000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0700 /* 0b0000011100000000 */ + .short 0x0E00 /* 0b0000111000000000 */ + .short 0x1C00 /* 0b0001110000000000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x3FF8 /* 0b0011111111111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x003C /* 0b0000000000111000 */ + .short 0x0038 /* 0b0000000000111000 */ + .short 0x0070 /* 0b0000000001110000 */ + .short 0x0070 /* 0b0000000001110000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x01C0 /* 0b0000000111000000 */ + .short 0x01C0 /* 0b0000000111000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0700 /* 0b0000011100000000 */ + .short 0x0700 /* 0b0000011100000000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x1FF8 /* 0b0001111111111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x383C /* 0b0011100000011100 */ + .short 0x383C /* 0b0011100000011100 */ + .short 0x1FFC /* 0b0001111111111100 */ + .short 0x0FF8 /* 0b0000111111111000 */ + .short 0x0078 /* 0b0000000001111000 */ + .short 0x0070 /* 0b0000000001110000 */ + .short 0x00E0 /* 0b0000000011100000 */ + .short 0x01C0 /* 0b0000000111000000 */ + .short 0x0380 /* 0b0000001110000000 */ + .short 0x0700 /* 0b0000011100000000 */ + .short 0x0E00 /* 0b0000111000000000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x07E0 /* 0b0000011111100000 */ + .short 0x0FF0 /* 0b0000111111110000 */ + .short 0x1C38 /* 0b0001110000111000 */ + .short 0x1C38 /* 0b0001110000111000 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x3FF0 /* 0b0011111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3FF8 /* 0b0011111111111000 */ + .short 0x3FF0 /* 0b0011111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x383C /* 0b0011100000111100 */ + .short 0x3FF8 /* 0b0011111111111000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x7000 /* 0b0111000000000000 */ + .short 0x7000 /* 0b0111000000000000 */ + .short 0x7000 /* 0b0111000000000000 */ + .short 0x7000 /* 0b0111000000000000 */ + .short 0x7000 /* 0b0111000000000000 */ + .short 0x7000 /* 0b0111000000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x1FF0 /* 0b0001111111110000 */ + .short 0x0FE0 /* 0b0000111111100000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x3FF0 /* 0b0011111111110000 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x381C /* 0b0011100000011100 */ + .short 0x3838 /* 0b0011100000111000 */ + .short 0x3FF0 /* 0b0011111111110000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x0000 /* 0b0000000000000000 */ + + .short 0x0000 /* 0b0000000000000000 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x3FFC /* 0b0011111111111100 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x3FE0 /* 0b0011111111100000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x3800 /* 0b0011100000000000 */ + .short 0x0000 /* 0b0000000000000000 */ + diff --git a/osfmk/ppc/hw_counters.h b/osfmk/ppc/hw_counters.h new file mode 100644 index 000000000..8a7a5d321 --- /dev/null +++ b/osfmk/ppc/hw_counters.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * Hardware specific performance counters + */ +#ifndef _HW_COUNTERS_H_ +#define _HW_COUNTERS_H_ + +#ifndef __ppc__ +#error This file is only useful on PowerPC. +#endif + + +typedef struct hw_counters { + + unsigned int hw_InVains; /* In vain */ + unsigned int hw_Resets; /* Reset */ + unsigned int hw_MachineChecks; /* Machine check */ + unsigned int hw_DSIs; /* DSIs */ + unsigned int hw_ISIs; /* ISIs */ + unsigned int hw_Externals; /* Externals */ + unsigned int hw_Alignments; /* Alignment */ + unsigned int hw_Programs; /* Program */ + unsigned int hw_FloatPointUnavailable; /* Floating point */ + unsigned int hw_Decrementers; /* Decrementer */ + unsigned int hw_IOErrors; /* I/O error */ + unsigned int hw_rsvd0; /* Reserved */ + unsigned int hw_SystemCalls; /* System call */ + unsigned int hw_Traces; /* Trace */ + unsigned int hw_FloatingPointAssists; /* Floating point assist */ + unsigned int hw_PerformanceMonitors; /* Performance monitor */ + unsigned int hw_Altivecs; /* VMX */ + unsigned int hw_rsvd1; /* Reserved */ + unsigned int hw_rsvd2; /* Reserved */ + unsigned int hw_rsvd3; /* Reserved */ + unsigned int hw_InstBreakpoints; /* Instruction breakpoint */ + unsigned int hw_SystemManagements; /* System management */ + unsigned int hw_rsvd4; /* Reserved */ + unsigned int hw_AltivecAssists; /* Altivec Assist */ + unsigned int hw_rsvd6; /* Reserved */ + unsigned int hw_rsvd7; /* Reserved */ + unsigned int hw_rsvd8; /* Reserved */ + unsigned int hw_rsvd9; /* Reserved */ + unsigned int hw_rsvd10; /* Reserved */ + unsigned int hw_rsvd11; /* Reserved */ + unsigned int hw_rsvd12; /* Reserved */ + unsigned int hw_rsvd13; /* Reserved */ + unsigned int hw_rsvd14; /* Reserved */ + unsigned int hw_Trace601; /* Trace */ + unsigned int hw_SIGPs; /* SIGP */ + unsigned int hw_Preemptions; /* Preemption */ + unsigned int hw_ContextSwitchs; /* Context switch */ + + unsigned int hw_spare[27]; /* Pad to 256 bytes */ + +} hw_counters; + +extern hw_counters hw_counts(NCPUS); + +#endif /* _HW_COUNTERS_H_ */ diff --git a/osfmk/ppc/hw_exception.s b/osfmk/ppc/hw_exception.s new file mode 100644 index 000000000..af24433e1 --- /dev/null +++ b/osfmk/ppc/hw_exception.s @@ -0,0 +1,1842 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* Low level routines dealing with exception entry and exit. + * There are various types of exception: + * + * Interrupt, trap, system call and debugger entry. Each has it's own + * handler since the state save routine is different for each. The + * code is very similar (a lot of cut and paste). + * + * The code for the FPU disabled handler (lazy fpu) is in cswtch.s + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + + +#define VERIFYSAVE 0 +#define FPVECDBG 0 + +/* + * thandler(type) + * + * ENTRY: VM switched ON + * Interrupts OFF + * R3 contains exception code + * R4 points to the saved context (virtual address) + * Everything is saved in savearea + */ + +/* + * If pcb.ksp == 0 then the kernel stack is already busy, + * we save ppc_saved state below the current stack pointer, + * leaving enough space for the 'red zone' in case the + * trapped thread was in the middle of saving state below + * its stack pointer. + * + * otherwise we save a ppc_saved_state in the pcb, and switch to + * the kernel stack (setting pcb.ksp to 0) + * + * on return, we do the reverse, the last state is popped from the pcb + * and pcb.ksp is set to the top of stack + */ + +#if DEBUG + +/* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when + * another trap is taken. We need at least enough space for a saved state + * structure plus two small backpointer frames, and we add a few + * hundred bytes for the space needed by the C (which may be less but + * may be much more). We're trying to catch kernel stack overflows :-) + */ + +#define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256 + +#endif /* DEBUG */ + + .text + +ENTRY(thandler, TAG_NO_FRAME_USED) /* What tag should this have?! */ + + mfsprg r25,0 /* Get the per_proc */ + + lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer + + + lwz r6,PP_CPU_DATA(r25) /* Get point to cpu specific data */ + cmpwi cr0,r1,0 ; Are we on interrupt stack? + lwz r6,CPU_ACTIVE_THREAD(r6) /* Get the pointer to the currently active thread */ + beq- cr0,EXT(ihandler) ; Yes, not allowed except when debugger + ; is active. We will let the ihandler do this... + lwz r13,THREAD_TOP_ACT(r6) /* Point to the active activation */ + lwz r26,ACT_MACT_BEDA(r13) /* Pick up the pointer to the blue box data area */ + lwz r8,ACT_MACT_PCB(r13) /* Get the last savearea used */ + mr. r26,r26 /* Do we have Blue Box Assist active? */ + lwz r1,ACT_MACT_KSP(r13) /* Get the stack */ + bnel- checkassist /* See if we should assist this */ + stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */ + stw r8,SAVprev(r4) /* Queue the new save area in the front */ + +#if VERIFYSAVE + bl versave ; (TEST/DEBUG) +#endif + + cmpwi cr1,r1, 0 /* zero implies already on kstack */ + stw r13,SAVact(r4) /* Point the savearea at its activation */ + bne cr1,.L_kstackfree /* This test is also used below */ + lwz r1,saver1(r4) /* Get the stack at 'rupt time */ + + +/* On kernel stack, allocate stack frame and check for overflow */ +#if DEBUG +/* + * Test if we will overflow the Kernel Stack. We + * check that there is at least TRAP_SPACE_NEEDED bytes + * free on the kernel stack +*/ + + lwz r7,THREAD_KERNEL_STACK(r6) + addi r7,r7,TRAP_SPACE_NEEDED + cmp cr0,r1,r7 + bng- EXT(ihandler) +#endif /* DEBUG */ + + subi r1,r1,FM_REDZONE /* Back up stack and leave room for a red zone */ + +.L_kstackfree: +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + lwz r7,savesrr1(r4) /* Pick up the entry MSR */ + li r0,0 /* Make this 0 */ + + beq cr1,.L_state_on_kstack /* using above test for pcb/stack */ + + stw r0,ACT_MACT_KSP(r13) /* Show that we have taken the stack */ + +.L_state_on_kstack: + rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? + lwz r6,saver1(r4) /* Grab interrupt time stack */ + beq+ tvecoff ; Vector off, do not save vrsave... + lwz r3,savevrsave(r4) ; Get the VRSAVE register + stw r3,liveVRS(r25) ; Set the live value + +tvecoff: rlwinm. r3,r7,0,MSR_FP_BIT,MSR_FP_BIT ; Was floating point on? + subi r1,r1,FM_SIZE /* Push a header onto the current stack */ + beq+ tfpoff /* Floating point was off... */ + lwz r3,savexfpscr(r4) ; Grab the just saved FPSCR + stw r3,liveFPSCR(r25) ; Make it the live copy + +tfpoff: stw r6,FM_BACKPTR(r1) /* Link backwards */ + +#if DEBUG +/* If debugging, we need two frames, the first being a dummy + * which links back to the trapped routine. The second is + * that which the C routine below will need + */ + lwz r3,savesrr0(r4) /* Get the point of interruption */ + stw r3,FM_LR_SAVE(r1) /* save old instr ptr as LR value */ + stwu r1, -FM_SIZE(r1) /* and make new frame */ +#endif /* DEBUG */ + + +/* call trap handler proper, with + * ARG0 = type (not yet, holds pcb ptr) + * ARG1 = saved_state ptr (already there) + * ARG2 = dsisr (already there) + * ARG3 = dar (already there) + */ + + + lwz r3,saveexception(r4) /* Get the exception code */ + lwz r0,ACT_MACT_SPF(r13) ; Get the special flags + + addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range + rlwinm. r0,r0,0,runningVMbit,runningVMbit ; Are we in VM state? (cr0_eq == 0 if yes) + cmplwi cr2,r5,T_RUNMODE_TRACE-T_DATA_ACCESS ; Are we still in range? (cr_gt if not) + + lwz r5,savedsisr(r4) /* Get the saved DSISR */ + + crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes) + rlwinm. r0,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes) + + cmpi cr2,r3,T_PREEMPT ; Is this a preemption? + + crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes) + + lwz r6,savedar(r4) /* Get the DAR */ + + beq- cr2, .L_call_trap /* Don't turn on interrupts for T_PREEMPT */ + beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM... + +/* syscall exception might warp here if there's nothing left + * to do except generate a trap + */ + +.L_call_trap: +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + + bl EXT(trap) + +/* + * Ok, return from C function + * + * This is also the point where new threads come when they are created. + * The new thread is setup to look like a thread that took an + * interrupt and went immediatly into trap. + * + */ + +thread_return: + + mfmsr r7 /* Get the MSR */ + lwz r4,SAVprev(r3) /* Pick up the previous savearea */ + rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear the interrupt enable mask */ + lwz r11,SAVflags(r3) /* Get the flags of the current savearea */ + mtmsr r7 /* Disable for interrupts */ + + mfsprg r10,0 /* Restore the per_proc info */ + + lwz r8,savesrr1(r3) ; Get the MSR we are going to + lwz r1,PP_CPU_DATA(r10) /* Get the CPU data area */ + rlwinm r11,r11,0,15,13 /* Clear the syscall flag */ + lwz r1,CPU_ACTIVE_THREAD(r1) /* and the active thread */ + rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user? + lwz r8,THREAD_TOP_ACT(r1) /* Now find the current activation */ + stw r11,SAVflags(r3) /* Save back the flags (with reset stack cleared) */ + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + stw r4,ACT_MACT_PCB(r8) /* Point to the previous savearea (or 0 if none) */ + + beq- chkfac ; We are not leaving the kernel yet... + + lwz r5,THREAD_KERNEL_STACK(r1) /* Get the base pointer to the stack */ + addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE /* Reset to empty */ + stw r5,ACT_MACT_KSP(r8) /* Save the empty stack pointer */ + b chkfac /* Go end it all... */ + + + +/* + * shandler(type) + * + * ENTRY: VM switched ON + * Interrupts OFF + * R3 contains exception code + * R4 points to the saved context (virtual address) + * Everything is saved in savearea + */ + +/* + * If pcb.ksp == 0 then the kernel stack is already busy, + * this is an error - jump to the debugger entry + * + * otherwise depending upon the type of + * syscall, look it up in the kernel table + * or pass it to the server. + * + * on return, we do the reverse, the state is popped from the pcb + * and pcb.ksp is set to the top of stack. + */ + +/* + * NOTE: + * mach system calls are negative + * BSD system calls are low positive + * PPC-only system calls are in the range 0x6xxx + * PPC-only "fast" traps are in the range 0x7xxx + */ + +ENTRY(shandler, TAG_NO_FRAME_USED) + + mfsprg r25,0 /* Get the per proc area */ + lwz r0,saver0(r4) /* Get the original syscall number */ + lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer + rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check + lwz r16,PP_CPU_DATA(r25) /* Assume we need this */ + mr. r17,r17 ; Are we on interrupt stack? + lwz r7,savesrr1(r4) ; Get the SRR1 value + beq- EXT(ihandler) ; On interrupt stack, not allowed... + rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? + lwz r16,CPU_ACTIVE_THREAD(r16) /* Get the thread pointer */ + + beq+ svecoff ; Vector off, do not save vrsave... + lwz r7,savevrsave(r4) ; Get the VRSAVE register + stw r7,liveVRS(r25) ; Set the live value + +svecoff: rlwinm. r6,r7,0,MSR_FP_BIT,MSR_FP_BIT ; Was floating point on? + lwz r13,THREAD_TOP_ACT(r16) /* Pick up the active thread */ + beq+ sfpoff ; Skip if floating point is off... + lwz r9,savexfpscr(r4) ; Grab the just saved FPSCR + stw r9,liveFPSCR(r25) ; Make it the live copy + +; Check if SCs are being redirected for the BlueBox or to VMM + +sfpoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags + lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area + rlwinm. r9,r6,0,runningVMbit,runningVMbit ; Are we running in alternate context right now? + cmpwi cr1,r26,0 ; Do we have Blue Box Assist active? + crmove cr2_eq,cr0_eq ; Remember if we are in VMM + rlwinm. r6,r6,0,bbNoMachSCbit,bbNoMachSCbit ; Are mach SCs redirected for this thread + beq+ noassist ; Take branch if SCs are not redirected + beq- cr1,noassist ; No assist for non-bluebox threads + mr r9,r13 ; Setup ACT for atomic_switch api + b EXT(atomic_switch_syscall) ; Go to the assist... + +noassist: + cmplwi r15,0x7000 /* Do we have a fast path trap? */ + lwz r14,ACT_MACT_PCB(r13) /* Now point to the PCB */ + beql+ fastpath /* We think it's a fastpath... */ + + lwz r1,ACT_MACT_KSP(r13) /* Get the kernel stack pointer */ +#if DEBUG + mr. r1,r1 /* Are we already on the kernel stack? */ + li r3,T_SYSTEM_CALL /* Yup, pretend we had an interrupt... */ + beq- EXT(ihandler) /* Bad boy, bad boy... What'cha gonna do when they come for you? */ +#endif /* DEBUG */ + + stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */ + li r0,0 /* Clear this out */ + stw r14,SAVprev(r4) /* Queue the new save area in the front */ + stw r13,SAVact(r4) /* Point the savearea at its activation */ + +#if VERIFYSAVE + bl versave ; (TEST/DEBUG) +#endif + + mr r30,r4 /* Save pointer to the new context savearea */ + lwz r15,saver1(r4) /* Grab interrupt time stack */ + stw r0,ACT_MACT_KSP(r13) /* Mark stack as busy with 0 val */ + stw r15,FM_BACKPTR(r1) /* Link backwards */ + +#if DEBUG + /* If debugging, we need two frames, the first being a dummy + * which links back to the trapped routine. The second is + * that which the C routine below will need + */ + lwz r8,savesrr0(r30) /* Get the point of interruption */ + stw r8,FM_LR_SAVE(r1) /* save old instr ptr as LR value */ + stwu r1, -FM_SIZE(r1) /* and make new frame */ +#endif /* DEBUG */ + + mfmsr r11 /* Get the MSR */ + lwz r15,SAVflags(r4) /* Get the savearea flags */ + ori r11,r11,lo16(MASK(MSR_EE)) /* Turn on interruption enabled bit */ + lwz r0,saver0(r30) ; Get R0 back + oris r15,r15,SAVsyscall >> 16 /* Mark that it this is a syscall */ + rlwinm r10,r0,0,0,19 ; Keep only the top part + stwu r1,-(FM_SIZE+ARG_SIZE)(r1) /* Make a stack frame */ + cmplwi r10,0x6000 ; Is it the special ppc-only guy? + stw r15,SAVflags(r30) /* Save syscall marker */ + bne- cr2,exitFromVM ; It is time to exit from alternate context... + + beq- ppcscall ; Call the ppc-only system call handler... + + mtmsr r11 /* Enable interruptions */ + + /* Call a function that can print out our syscall info */ + /* Note that we don't care about any volatiles yet */ + mr r4,r30 + bl EXT(syscall_trace) + + lwz r0,saver0(r30) /* Get the system call selector */ + mr. r0,r0 /* What kind is it? */ + blt- .L_kernel_syscall /* -ve syscall - go to kernel */ + /* +ve syscall - go to server */ + cmpwi cr0,r0,0x7FFA + beq- .L_notify_interrupt_syscall + +#ifdef MACH_BSD + mr r3,r30 /* Get PCB/savearea */ + lwz r4,saver4(r30) /* Restore r4 */ + lwz r5,saver5(r30) /* Restore r5 */ + lwz r6,saver6(r30) /* Restore r6 */ + lwz r7,saver7(r30) /* Restore r7 */ + lwz r8,saver8(r30) /* Restore r8 */ + lwz r9,saver9(r30) /* Restore r9 */ + lwz r10,saver10(r30) /* Restore r10 */ + bl EXT(unix_syscall) /* Check out unix... */ +#endif + +.L_call_server_syscall_exception: + li r3,EXC_SYSCALL /* doexception(EXC_SYSCALL, num, 1) */ + +.L_call_server_exception: + mr r4,r0 /* Set syscall selector */ + li r5,1 + b EXT(doexception) /* Go away, never to return... */ + +/* The above, but with EXC_MACH_SYSCALL */ +.L_call_server_mach_syscall: + li r3,EXC_MACH_SYSCALL + b .L_call_server_exception /* Join the common above... */ + +.L_notify_interrupt_syscall: + lwz r3,saver3(r30) ; Get the new PC address to pass in + bl EXT(syscall_notify_interrupt) + b .L_syscall_return + +; +; Handle PPC-only system call interface +; These are called with interruptions disabled +; and the savearea/pcb as the first parameter. +; It is up to the callee to enable interruptions if +; they should be. We are in a state here where +; both interrupts and preemption is ok, but because we could +; be calling diagnostic code we will not enable. +; +; Also, the callee is responsible for finding any parameters +; in the savearea/pcb. It also must set saver3 with any return +; code before returning. +; +; There are 3 possible return codes: +; 0 the call is disabled or something, we treat this like it was bogus +; + the call finished ok, check for AST +; - the call finished ok, do not check for AST +; +; Note: the last option is intended for special diagnostics calls that +; want the thread to return and execute before checking for preemption. +; + +ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table + lis r10,hi16(EXT(PPCcalls)) ; Get PPC-only system call table + cmplwi r11,PPCcallmax ; See if we are too big + ori r10,r10,lo16(EXT(PPCcalls)) ; Merge in low half + bgt- .L_call_server_syscall_exception ; Bogus call... + lwzx r11,r10,r11 ; Get function address + +; +; Note: make sure we do not change the savearea in R30 to +; a different register without checking. Some of the PPCcalls +; depend upon it being there. +; + + mr r3,r30 ; Pass the savearea + mr r4,r13 ; Pass the activation + mr. r11,r11 ; See if there is a function here + mtlr r11 ; Set the function address + beq- .L_call_server_syscall_exception ; Disabled call... + blrl ; Call it + + + .globl EXT(ppcscret) +LEXT(ppcscret) + mr. r3,r3 ; See what we should do + mr r31,r16 ; Restore the current thread pointer + bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return.... + blt+ .L_thread_syscall_return ; Return, but no ASTs.... + lwz r0,saver0(r30) ; Restore the system call number + b .L_call_server_syscall_exception ; Go to common exit... + + +/* Once here, we know that the syscall was -ve + * we should still have r1=ksp, + * r16 = pointer to current thread, + * r13 = pointer to top activation, + * r0 = syscall number + * r30 = pointer to saved state (in pcb) + */ +.L_kernel_syscall: + neg r31, r0 /* Make number +ve and put in r31*/ + + /* If out of range, call server with syscall exception */ + addis r29, 0, HIGH_CADDR(EXT(mach_trap_count)) + addi r29, r29, LOW_ADDR(EXT(mach_trap_count)) + lwz r29, 0(r29) + + cmp cr0, r31, r29 + bge- cr0, .L_call_server_syscall_exception + + addis r29, 0, HIGH_CADDR(EXT(mach_trap_table)) + addi r29, r29, LOW_ADDR(EXT(mach_trap_table)) + + /* multiply the trap number to get offset into table */ + slwi r31, r31, MACH_TRAP_OFFSET_POW2 + + /* r31 now holds offset into table of our trap entry, + * add on the table base, and it then holds pointer to entry + */ + add r31, r31, r29 + + /* If the function is kern_invalid, prepare to send an exception. + This is messy, but parallels the x86. We need it for task_by_pid, + at least. */ + lis r29, HIGH_CADDR(EXT(kern_invalid)) + addi r29, r29, LOW_ADDR(EXT(kern_invalid)) + lwz r0, MACH_TRAP_FUNCTION(r31) + cmp cr0, r0, r29 + beq- .L_call_server_syscall_exception + + /* get arg count. If argc > 8 then not all args were in regs, + * so we must perform copyin. + */ + lwz r29, MACH_TRAP_ARGC(r31) + cmpwi cr0, r29, 8 + ble+ .L_syscall_got_args + +/* argc > 8 - perform a copyin */ +/* if the syscall came from kernel space, we can just copy */ + + lwz r0,savesrr1(r30) /* Pick up exception time MSR */ + andi. r0,r0,MASK(MSR_PR) /* Check the priv bit */ + bne+ .L_syscall_arg_copyin /* We're not priviliged... */ + +/* we came from a privilaged task, just do a copy */ +/* get user's stack pointer */ + + lwz r28,saver1(r30) /* Get the stack pointer */ + + subi r29,r29,8 /* Get the number of arguments to copy */ + + addi r28,r28,COPYIN_ARG0_OFFSET-4 /* Point to source - 4 */ + addi r27,r1,FM_ARG0-4 /* Point to sink - 4 */ + +.L_syscall_copy_word_loop: + addic. r29,r29,-1 /* Count down the number of arguments left */ + lwz r0,4(r28) /* Pick up the argument from the stack */ + addi r28,r28,4 /* Point to the next source */ + stw r0,4(r27) /* Store the argument */ + addi r27,r27,4 /* Point to the next sink */ + bne+ .L_syscall_copy_word_loop /* Move all arguments... */ + b .L_syscall_got_args /* Go call it now... */ + + +/* we came from a user task, pay the price of a real copyin */ +/* set recovery point */ + +.L_syscall_arg_copyin: + lwz r8,ACT_VMMAP(r13) ; Get the vm_map for this activation + lis r28,hi16(.L_syscall_copyin_recover) + lwz r8,VMMAP_PMAP(r8) ; Get the pmap + ori r28,r28,lo16(.L_syscall_copyin_recover) + addi r8,r8,PMAP_SEGS ; Point to the pmap SR slots + stw r28,THREAD_RECOVER(r16) /* R16 still holds thread ptr */ + +/* We can manipulate the COPYIN segment register quite easily + * here, but we've also got to make sure we don't go over a + * segment boundary - hence some mess. + * Registers from 12-29 are free for our use. + */ + + + lwz r28,saver1(r30) /* Get the stack pointer */ + subi r29,r29,8 /* Get the number of arguments to copy */ + addi r28,r28,COPYIN_ARG0_OFFSET /* Set source in user land */ + +/* set up SR_COPYIN to allow us to copy, we may need to loop + * around if we change segments. We know that this previously + * pointed to user space, so the sid doesn't need setting. + */ + + rlwinm r7,r28,6,26,29 ; Get index to the segment slot + +.L_syscall_copyin_seg_loop: + + + lwzx r10,r8,r7 ; Get the source SR value + rlwinm r26,r28,0,4,31 ; Clear the segment number from source address + mtsr SR_COPYIN,r10 ; Set the copyin SR + isync + + oris r26,r26,(SR_COPYIN_NUM << (28-16)) ; Insert the copyin segment number into source address + +/* Make r27 point to address-4 of where we will store copied args */ + addi r27,r1,FM_ARG0-4 + +.L_syscall_copyin_word_loop: + + lwz r0,0(r26) /* MAY CAUSE PAGE FAULT! */ + subi r29,r29,1 ; Decrement count + addi r26,r26,4 ; Bump input + stw r0,4(r27) ; Save the copied in word + mr. r29,r29 ; Are they all moved? + addi r27,r27,4 ; Bump output + beq+ .L_syscall_copyin_done ; Escape if we are done... + + rlwinm. r0,r26,0,4,29 ; Did we just step into a new segment? + addi r28,r28,4 ; Bump up user state address also + bne+ .L_syscall_copyin_word_loop ; We are still on the same segment... + + addi r7,r7,4 ; Bump to next slot + b .L_syscall_copyin_seg_loop /* On new segment! remap */ + +/* Don't bother restoring SR_COPYIN, we can leave it trashed */ +/* clear thread recovery as we're done touching user data */ + +.L_syscall_copyin_done: + li r0,0 + stw r0,THREAD_RECOVER(r16) /* R16 still holds thread ptr */ + +.L_syscall_got_args: + lwz r8,ACT_TASK(r13) /* Get our task */ + lis r10,hi16(EXT(c_syscalls_mach)) /* Get top half of counter address */ + lwz r7,TASK_SYSCALLS_MACH(r8) ; Get the current count + lwz r3,saver3(r30) /* Restore r3 */ + addi r7,r7,1 ; Bump it + ori r10,r10,lo16(EXT(c_syscalls_mach)) /* Get low half of counter address */ + stw r7,TASK_SYSCALLS_MACH(r8) ; Save it + lwz r4,saver4(r30) /* Restore r4 */ + lwz r9,0(r10) /* Get counter */ + lwz r5,saver5(r30) /* Restore r5 */ + lwz r6,saver6(r30) /* Restore r6 */ + addi r9,r9,1 /* Add 1 */ + lwz r7,saver7(r30) /* Restore r7 */ + lwz r8,saver8(r30) /* Restore r8 */ + stw r9,0(r10) /* Save it back */ + lwz r9,saver9(r30) /* Restore r9 */ + lwz r10,saver10(r30) /* Restore r10 */ + + lwz r0,MACH_TRAP_FUNCTION(r31) + +/* calling this function, all the callee-saved registers are + * still valid except for r30 and r31 which are in the PCB + * r30 holds pointer to saved state (ie. pcb) + * r31 is scrap + */ + mtctr r0 + bctrl /* perform the actual syscall */ + +/* 'standard' syscall returns here - INTERRUPTS ARE STILL ON */ + +/* r3 contains value that we're going to return to the user + */ + +/* + * Ok, return from C function, ARG0 = return value + * + * get the active thread's PCB pointer and thus pointer to user state + * saved state is still in R30 and the active thread is in R16 . + */ + +/* Store return value into saved state structure, since + * we need to pick up the value from here later - the + * syscall may perform a thread_set_syscall_return + * followed by a thread_exception_return, ending up + * at thread_syscall_return below, with SS_R3 having + * been set up already + */ + +/* When we are here, r16 should point to the current thread, + * r30 should point to the current pcb + */ + +/* save off return value, we must load it + * back anyway for thread_exception_return + * TODO NMGS put in register? + */ +.L_syscall_return: + mr r31,r16 /* Move the current thread pointer */ + stw r3,saver3(r30) /* Stash the return code */ + + /* Call a function that records the end of */ + /* the mach system call */ + mr r4,r30 + bl EXT(syscall_trace_end) + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + mr r4,r31 /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + mr r5,r30 /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + +.L_thread_syscall_ret_check_ast: + mfmsr r12 /* Get the current MSR */ + rlwinm r12,r12,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interruptions enable bit */ + mtmsr r12 /* Turn interruptions off */ + + mfsprg r10,0 /* Get the per_processor block */ + +/* Check to see if there's an outstanding AST */ + + lwz r4,PP_NEED_AST(r10) + lwz r4,0(r4) + cmpi cr0,r4, 0 + beq cr0,.L_syscall_no_ast + +/* Yes there is, call ast_taken + * pretending that the user thread took an AST exception here, + * ast_taken will save all state and bring us back here + */ + +#if DEBUG +/* debug assert - make sure that we're not returning to kernel */ + lwz r3,savesrr1(r30) + andi. r3,r3,MASK(MSR_PR) + bne+ 0f /* returning to user level, check */ + + BREAKPOINT_TRAP +0: +#endif /* DEBUG */ + + li r3, 0 + li r4, AST_ALL + li r5, 1 + bl EXT(ast_taken) + + b .L_thread_syscall_ret_check_ast + +/* thread_exception_return returns to here, almost all + * registers intact. It expects a full context restore + * of what it hasn't restored itself (ie. what we use). + * + * In particular for us, + * we still have r31 points to the current thread, + * r30 points to the current pcb + */ + +.L_syscall_no_ast: +.L_thread_syscall_return: + + mr r3,r30 ; Get savearea to the correct register for common exit + lwz r8,THREAD_TOP_ACT(r31) /* Now find the current activation */ + + lwz r11,SAVflags(r30) /* Get the flags */ + lwz r5,THREAD_KERNEL_STACK(r31) /* Get the base pointer to the stack */ + rlwinm r11,r11,0,15,13 /* Clear the syscall flag */ + lwz r4,SAVprev(r30) ; Get the previous save area + stw r11,SAVflags(r30) /* Stick back the flags */ + addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE /* Reset to empty */ + stw r4,ACT_MACT_PCB(r8) ; Save previous save area + stw r5,ACT_MACT_KSP(r8) /* Save the empty stack pointer */ + + b chkfac ; Go end it all... + + +.L_syscall_copyin_recover: + + /* This is the catcher for any data faults in the copyin + * of arguments from the user's stack. + * r30 still holds a pointer to the PCB + * + * call syscall_error(EXC_BAD_ACCESS, EXC_PPC_VM_PROT_READ, sp, ssp), + * + * we already had a frame so we can do this + */ + + li r3,EXC_BAD_ACCESS + li r4,EXC_PPC_VM_PROT_READ + lwz r5,saver1(r30) + mr r6,r30 + + bl EXT(syscall_error) + b .L_syscall_return + + +/* + * thread_bootstrap_return() + * + * NOTE: THIS IS GOING AWAY IN A FEW DAYS.... + * + */ + +ENTRY(thread_bootstrap_return, TAG_NO_FRAME_USED) + b .L_thread_exc_ret_check_ast ; Do the same as thread_exception_return... + +/* + * thread_exception_return() + * + * Return to user mode directly from within a system call. + */ + +ENTRY(thread_exception_return, TAG_NO_FRAME_USED) + +.L_thread_exc_ret_check_ast: + + mfmsr r3 /* Get the MSR */ + rlwinm r3,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear EE */ + mtmsr r3 /* Disable interrupts */ + +/* Check to see if there's an outstanding AST */ +/* We don't bother establishing a call frame even though CHECK_AST + can invoke ast_taken(), because it can just borrow our caller's + frame, given that we're not going to return. +*/ + + mfsprg r10,0 /* Get the per_processor block */ + lwz r4,PP_NEED_AST(r10) + lwz r4,0(r4) + cmpi cr0,r4, 0 + beq cr0,.L_exc_ret_no_ast + + /* Yes there is, call ast_taken + * pretending that the user thread took an AST exception here, + * ast_taken will save all state and bring us back here + */ + + + li r3,0 + li r4,AST_ALL + li r5,1 + + bl EXT(ast_taken) + b .L_thread_exc_ret_check_ast /* check for a second AST (rare)*/ + +/* arriving here, interrupts should be disabled */ +/* Get the active thread's PCB pointer to restore regs + */ +.L_exc_ret_no_ast: + + lwz r31,PP_CPU_DATA(r10) + lwz r31,CPU_ACTIVE_THREAD(r31) + lwz r30,THREAD_TOP_ACT(r31) + lwz r30,ACT_MACT_PCB(r30) + mr. r30,r30 ; Is there any context yet? + beq- makeDummyCtx ; No, hack one up... +#if DEBUG +/* + * debug assert - make sure that we're not returning to kernel + * get the active thread's PCB pointer and thus pointer to user state + */ + + lwz r3,savesrr1(r30) + andi. r3,r3,MASK(MSR_PR) + bne+ ret_user2 ; We are ok... + + BREAKPOINT_TRAP +ret_user2: +#endif /* DEBUG */ + +/* If the MSR_SYSCALL_MASK isn't set, then we came from a trap, + * so warp into the return_from_trap (thread_return) routine, + * which takes PCB pointer in R3, not in r30! + */ + lwz r0,SAVflags(r30) + mr r3,r30 /* Copy pcb pointer into r3 in case */ + andis. r0,r0,SAVsyscall>>16 /* Are we returning from a syscall? */ + beq- cr0,thread_return /* Nope, must be a thread return... */ + b .L_thread_syscall_return + +; +; This is where we handle someone trying who did a thread_create followed +; by a thread_resume with no intervening thread_set_state. Just make an +; empty context, initialize it to trash and let em execute at 0... + +makeDummyCtx: + bl EXT(save_get) ; Get a save_area + li r0,0 ; Get a 0 + addi r2,r3,savefp0 ; Point past what we are clearing + mr r4,r3 ; Save the start + +cleardummy: stw r0,0(r4) ; Clear stuff + addi r4,r4,4 ; Next word + cmplw r4,r2 ; Still some more? + blt+ cleardummy ; Yeah... + + lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR + ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part + stw r2,savesrr1(r3) ; Set the default user MSR + + b thread_return ; Go let em try to execute, hah! + +/* + * ihandler(type) + * + * ENTRY: VM switched ON + * Interrupts OFF + * R3 contains exception code + * R4 points to the saved context (virtual address) + * Everything is saved in savearea + * + */ + +ENTRY(ihandler, TAG_NO_FRAME_USED) + +/* + * get the value of istackptr, if it's zero then we're already on the + * interrupt stack, otherwise it points to a saved_state structure + * at the top of the interrupt stack. + */ + + lwz r10,savesrr1(r4) /* Get SRR1 */ + mfsprg r25,0 /* Get the per_proc block */ + li r14,0 /* Zero this for now */ + rlwinm. r13,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? + lwz r16,PP_CPU_DATA(r25) /* Assume we need this */ + crmove cr1_eq,cr0_eq ; Remember vector enablement + lwz r1,PP_ISTACKPTR(r25) /* Get the interrupt stack */ + rlwinm. r10,r10,0,MSR_FP_BIT,MSR_FP_BIT ; Was floating point on? + li r13,0 /* Zero this for now */ + lwz r16,CPU_ACTIVE_THREAD(r16) /* Get the thread pointer */ + + beq+ cr1,ivecoff ; Vector off, do not save vrsave... + lwz r7,savevrsave(r4) ; Get the VRSAVE register + stw r7,liveVRS(r25) ; Set the live value + +ivecoff: li r0,0 /* Get a constant 0 */ + cmplwi cr1,r16,0 /* Are we still booting? */ + beq+ ifpoff ; Skip if floating point is off... + lwz r9,savexfpscr(r4) ; Grab the just saved FPSCR + stw r9,liveFPSCR(r25) ; Make it the live copy + +ifpoff: mr. r1,r1 /* Is it active? */ + beq- cr1,ihboot1 /* We're still coming up... */ + lwz r13,THREAD_TOP_ACT(r16) /* Pick up the active thread */ + lwz r14,ACT_MACT_PCB(r13) /* Now point to the PCB */ + +ihboot1: lwz r9,saver1(r4) /* Pick up the 'rupt time stack */ + stw r14,SAVprev(r4) /* Queue the new save area in the front */ + stw r13,SAVact(r4) /* Point the savearea at its activation */ + beq- cr1,ihboot4 /* We're still coming up... */ + stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */ + +ihboot4: bne .L_istackfree /* Nope... */ + +/* We're already on the interrupt stack, get back the old + * stack pointer and make room for a frame + */ + + subi r1,r9,FM_REDZONE /* Back up beyond the red zone */ + b ihsetback /* Go set up the back chain... */ + +.L_istackfree: + lwz r10,SAVflags(r4) + stw r0,PP_ISTACKPTR(r25) /* Mark the stack in use */ + oris r10,r10,HIGH_ADDR(SAVrststk) /* Indicate we reset stack when we return from this one */ + stw r10,SAVflags(r4) /* Stick it back */ + + /* + * To summarise, when we reach here, the state has been saved and + * the stack is marked as busy. We now generate a small + * stack frame with backpointers to follow the calling + * conventions. We set up the backpointers to the trapped + * routine allowing us to backtrace. + */ + +ihsetback: subi r1,r1,FM_SIZE /* Make a new frame */ + stw r9,FM_BACKPTR(r1) /* point back to previous stackptr */ + +#if VERIFYSAVE + bl versave ; (TEST/DEBUG) +#endif + +#if DEBUG +/* If debugging, we need two frames, the first being a dummy + * which links back to the trapped routine. The second is + * that which the C routine below will need + */ + lwz r5,savesrr0(r4) /* Get interrupt address */ + stw r5,FM_LR_SAVE(r1) /* save old instr ptr as LR value */ + stwu r1,-FM_SIZE(r1) /* Make another new frame for C routine */ +#endif /* DEBUG */ + + lwz r5,savedsisr(r4) /* Get the DSISR */ + lwz r6,savedar(r4) /* Get the DAR */ + + bl EXT(interrupt) + + +/* interrupt() returns a pointer to the saved state in r3 + * + * Ok, back from C. Disable interrupts while we restore things + */ + .globl EXT(ihandler_ret) + +LEXT(ihandler_ret) /* Marks our return point from debugger entry */ + + mfmsr r0 /* Get our MSR */ + rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Flip off the interrupt enabled bit */ + mtmsr r0 /* Make sure interrupts are disabled */ + mfsprg r10,0 /* Get the per_proc block */ + + lwz r8,PP_CPU_DATA(r10) /* Get the CPU data area */ + lwz r7,SAVflags(r3) /* Pick up the flags */ + lwz r8,CPU_ACTIVE_THREAD(r8) /* and the active thread */ + lwz r9,SAVprev(r3) /* Get previous save area */ + cmplwi cr1,r8,0 /* Are we still initializing? */ + lwz r12,savesrr1(r3) /* Get the MSR we will load on return */ + beq- cr1,ihboot2 /* Skip if we are still in init... */ + lwz r8,THREAD_TOP_ACT(r8) /* Pick up the active thread */ + +ihboot2: andis. r11,r7,HIGH_ADDR(SAVrststk) /* Is this the first on the stack? */ + beq- cr1,ihboot3 /* Skip if we are still in init... */ + stw r9,ACT_MACT_PCB(r8) /* Point to previous context savearea */ + +ihboot3: mr r4,r3 /* Move the savearea pointer */ + beq .L_no_int_ast2 /* Get going if not the top o' stack... */ + + +/* We're the last frame on the stack. Restore istackptr to empty state. + * + * Check for ASTs if one of the below is true: + * returning to user mode + * returning to a kloaded server + */ + lwz r9,PP_INTSTACK_TOP_SS(r10) /* Get the empty stack value */ + lwz r5,PP_CPU_DATA(r10) /* Get cpu_data ptr */ + andc r7,r7,r11 /* Remove the stack reset bit in case we pass this one */ + stw r9,PP_ISTACKPTR(r10) /* Save that saved state ptr */ + lwz r3,CPU_PREEMPTION_LEVEL(r5) /* Get preemption level */ + stw r7,SAVflags(r4) /* Save the flags */ + cmplwi r3, 0 /* Check for preemption */ + bne .L_no_int_ast /* Don't preempt if level is not zero */ + andi. r6,r12,MASK(MSR_PR) /* privilege mode */ + lwz r11,PP_NEED_AST(r10) /* Get the AST request address */ + lwz r11,0(r11) /* Get the request */ + beq- .L_kernel_int_ast /* In kernel space, AST_URGENT check */ + li r3,T_AST /* Assume the worst */ + mr. r11,r11 /* Are there any pending? */ + beq .L_no_int_ast /* Nope... */ + b .L_call_thandler + +.L_kernel_int_ast: + andi. r11,r11,AST_URGENT /* AST_URGENT */ + li r3,T_PREEMPT /* Assume the worst */ + beq .L_no_int_ast /* Nope... */ + +.L_call_thandler: + +/* + * There is a pending AST. Massage things to make it look like + * we took a trap and jump into the trap handler. To do this + * we essentially pretend to return from the interrupt but + * at the last minute jump into the trap handler with an AST + * trap instead of performing an rfi. + */ + + stw r3,saveexception(r4) /* Set the exception code to T_AST/T_PREEMPT */ + b EXT(thandler) /* hyperspace into AST trap */ + +.L_no_int_ast: + mr r3,r4 ; Get into the right register for common code +.L_no_int_ast2: + rlwinm r7,r7,0,15,13 /* Clear the syscall bit */ + li r4,0 ; Assume for a moment that we are in init + stw r7,SAVflags(r3) /* Set the flags */ + beq- cr1,chkfac ; Jump away if we are in init... + lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker + + +; +; This section is common to all exception exits. It throws away vector +; and floating point saveareas as the exception level of a thread is +; exited. +; +; It also enables the facility if its context is live +; Requires: +; R3 = Savearea to be released (virtual) +; R4 = New top of savearea stack (could be 0) +; R8 = pointer to activation +; R10 = per_proc block +; +chkfac: mr. r8,r8 ; Are we still in boot? + beq- chkenax ; Yeah, skip it all... + + lwz r20,ACT_MACT_FPUlvl(r8) ; Get the FPU level + lwz r12,savesrr1(r3) ; Get the current MSR + cmplw r20,r3 ; Are we returning from the active level? + lwz r23,PP_FPU_THREAD(r10) ; Get floating point owner + rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now + cmplw cr1,r23,r8 ; Are we the facility owner? + lhz r26,PP_CPU_NUMBER(r10) ; Get the current CPU number + beq- chkfpfree ; Leaving active level, can not possibly enable... + bne- cr1,chkvec ; Not our facility, nothing to do here... + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3301 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + lwz r24,ACT_MACT_FPUcpu(r8) ; Get the CPU this context was enabled on last + cmplw r4,r20 ; Are we going to be in the right level? + cmplw cr1,r24,r26 ; Are we on the right CPU? + li r0,0 ; Get a constant 0 + beq+ cr1,chkfpnlvl ; Right CPU... + + stw r0,PP_FPU_THREAD(r10) ; Show facility unowned so we do not get back here + b chkvec ; Go check out the vector facility... + +chkfpnlvl: bne- chkvec ; Different level, can not enable... + lwz r24,ACT_MACT_FPU(r8) ; Get the floating point save area + ori r12,r12,lo16(MASK(MSR_FP)) ; Enable facility + mr. r24,r24 ; Does the savearea exist? + li r0,1 ; Get set to invalidate + beq- chkvec ; Nothing to invalidate... + lwz r25,SAVlvlfp(r24) ; Get the level of top savearea + cmplw r4,r25 ; Is the top one ours? + bne+ chkvec ; Not ours... + stw r0,SAVlvlfp(r24) ; Invalidate the first one + +#if 0 + mfmsr r0 ; (TEST/DEBUG) + ori r0,r0,0x2000 ; (TEST/DEBUG) + mtmsr r0 ; (TEST/DEBUG) + isync ; (TEST/DEBUG) + + stfd f0,savevr0(r3) ; (TEST/DEBUG) + stfd f1,savevr0+8(r3) ; (TEST/DEBUG) + stfd f2,savevr0+0x10(r3) ; (TEST/DEBUG) + stfd f3,savevr0+0x18(r3) ; (TEST/DEBUG) + stfd f4,savevr0+0x20(r3) ; (TEST/DEBUG) + stfd f5,savevr0+0x28(r3) ; (TEST/DEBUG) + stfd f6,savevr0+0x30(r3) ; (TEST/DEBUG) + stfd f7,savevr0+0x38(r3) ; (TEST/DEBUG) + stfd f8,savevr0+0x40(r3) ; (TEST/DEBUG) + stfd f9,savevr0+0x48(r3) ; (TEST/DEBUG) + stfd f10,savevr0+0x50(r3) ; (TEST/DEBUG) + stfd f11,savevr0+0x58(r3) ; (TEST/DEBUG) + stfd f12,savevr0+0x60(r3) ; (TEST/DEBUG) + stfd f13,savevr0+0x68(r3) ; (TEST/DEBUG) + stfd f14,savevr0+0x70(r3) ; (TEST/DEBUG) + stfd f15,savevr0+0x78(r3) ; (TEST/DEBUG) + stfd f16,savevr0+0x80(r3) ; (TEST/DEBUG) + stfd f17,savevr0+0x88(r3 ; (TEST/DEBUG) + stfd f18,savevr0+0x90(r3) ; (TEST/DEBUG) + stfd f19,savevr0+0x98(r3) ; (TEST/DEBUG) + stfd f20,savevr0+0xA0(r3) ; (TEST/DEBUG) + stfd f21,savevr0+0xA8(r3) ; (TEST/DEBUG) + stfd f22,savevr0+0xB0(r3) ; (TEST/DEBUG) + stfd f23,savevr0+0xB8(r3) ; (TEST/DEBUG) + stfd f24,savevr0+0xC0(r3) ; (TEST/DEBUG) + stfd f25,savevr0+0xC8(r3) ; (TEST/DEBUG) + stfd f26,savevr0+0xD0(r3) ; (TEST/DEBUG) + stfd f27,savevr0+0xD8(r3) ; (TEST/DEBUG) + stfd f28,savevr0+0xE0(r3) ; (TEST/DEBUG) + stfd f29,savevr0+0xE8(r3) ; (TEST/DEBUG) + stfd f30,savevr0+0xF0(r3) ; (TEST/DEBUG) + stfd f31,savevr0+0xF8(r3) ; (TEST/DEBUG) + + li r2,64 ; (TEST/DEBUG) + la r20,savevr0(r3) ; (TEST/DEBUG) + la r21,savefp0(r24) ; (TEST/DEBUG) + +ckmurderdeath2: + lwz r22,0(r20) ; (TEST/DEBUG) + subic. r2,r2,1 ; (TEST/DEBUG) + lwz r23,0(r21) ; (TEST/DEBUG) + addi r20,r20,4 ; (TEST/DEBUG) + cmplw cr1,r22,r23 ; (TEST/DEBUG) + addi r21,r21,4 ; (TEST/DEBUG) + bne- cr1,diekilldead2 ; (TEST/DEBUG) + bne+ ckmurderdeath2 ; (TEST/DEBUG) + b dontdiekilldead2 ; (TEST/DEBUG) + +diekilldead2: ; (TEST/DEBUG) + mr r4,r24 ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) + +dontdiekilldead2: + lfd f0,savevr0(r3) ; (TEST/DEBUG) + lfd f1,savevr0+8(r3) ; (TEST/DEBUG) +#endif + + + + b chkvec ; Go check out the vector facility... + +chkfpfree: li r0,0 ; Clear a register + lwz r24,ACT_MACT_FPU(r8) ; Get the floating point save area + + bne- cr1,chkfpnfr ; Not our facility, do not clear... + stw r0,PP_FPU_THREAD(r10) ; Clear floating point owner +chkfpnfr: + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3302 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + mr. r24,r24 ; Do we even have a savearea? + beq+ chkvec ; Nope... + +#if FPVECDBG + rlwinm. r0,r24,0,0,15 ; (TEST/DEBUG) + bne+ notbadxxx1 ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) +notbadxxx1: ; (TEST/DEBUG) + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3303 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + lwz r25,SAVlvlfp(r24) ; Get the level of top savearea + cmplwi r25,1 ; Is the top area invalid? + cmplw cr1,r25,r3 ; Is it for the returned from context? + beq fptoss ; It is invalid... + bne cr1,chkvec ; Not for the returned context... + +fptoss: lwz r25,SAVprefp(r24) ; Get previous savearea +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3304 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + mr r5,r25 ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + mr. r25,r25 ; Is there one? + stw r25,ACT_MACT_FPU(r8) ; Set the new pointer + beq fptoplvl ; Nope, we are at the top... +#if FPVECDBG + rlwinm. r0,r25,0,0,15 ; (TEST/DEBUG) + bne+ notbadxxx2 ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) +notbadxxx2: ; (TEST/DEBUG) +#endif + lwz r25,SAVlvlfp(r25) ; Get the new level + +fptoplvl: lwz r19,SAVflags(r24) ; Get the savearea flags +#if FPVECDBG + rlwinm. r0,r19,0,1,1 ; (TEST/DEBUG) + bne+ donotdie3 ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) +donotdie3: ; (TEST/DEBUG) +#endif + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3305 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + rlwinm r22,r24,0,0,19 ; Round down to the base savearea block + rlwinm r19,r19,0,2,0 ; Remove the floating point in use flag + stw r25,ACT_MACT_FPUlvl(r8) ; Set the new top level + andis. r0,r19,hi16(SAVinuse) ; Still in use? + stw r19,SAVflags(r24) ; Set the savearea flags + bne- chkvec ; Yes, go check out vector... +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3306 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif +#if FPVECDBG + rlwinm. r0,r24,0,0,15 ; (TEST/DEBUG) + bne+ notbadxxx3 ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) +notbadxxx3: ; (TEST/DEBUG) +#endif + lwz r23,SACvrswap(r22) ; Get the conversion from virtual to real + lwz r20,PP_QUICKFRET(r10) ; Get the old quick fret head + xor r23,r24,r23 ; Convert to physical + stw r20,SAVqfret(r24) ; Back chain the quick release queue + stw r23,PP_QUICKFRET(r10) ; Anchor it + +; +; Check out vector stuff (and translate savearea to physical for exit) +; +chkvec: + lwz r20,ACT_MACT_VMXlvl(r8) ; Get the vector level + lwz r23,PP_VMX_THREAD(r10) ; Get vector owner + cmplw r20,r3 ; Are we returning from the active level? + rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now + cmplw cr1,r23,r8 ; Are we the facility owner? + beq- chkvecfree ; Leaving active level, can not possibly enable... + bne- cr1,setena ; Not our facility, nothing to do here... + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3401 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + lwz r24,ACT_MACT_VMXcpu(r8) ; Get the CPU this context was enabled on last + cmplw r4,r20 ; Are we going to be in the right level? + cmplw cr1,r24,r26 ; Are we on the right CPU? + li r0,0 ; Get a constant 0 + beq+ cr1,chkvecnlvl ; Right CPU... + + stw r0,PP_VMX_THREAD(r10) ; Show facility unowned so we do not get back here + b setena ; Go actually exit... + +chkvecnlvl: bne- setena ; Different level, can not enable... + lwz r24,ACT_MACT_VMX(r8) ; Get the vector save area + oris r12,r12,hi16(MASK(MSR_VEC)) ; Enable facility + mr. r24,r24 ; Does the savearea exist? + li r0,1 ; Get set to invalidate + beq- setena ; Nothing to invalidate... + lwz r25,SAVlvlvec(r24) ; Get the level of top savearea + cmplw r4,r25 ; Is the top one ours? + bne+ setena ; Not ours... + stw r0,SAVlvlvec(r24) ; Invalidate the first one + b setena ; Actually exit... + +chkvecfree: li r0,0 ; Clear a register + lwz r24,ACT_MACT_VMX(r8) ; Get the vector save area + bne- cr1,chkvecnfr ; Not our facility, do not clear... + stw r0,PP_VMX_THREAD(r10) ; Clear vector owner +chkvecnfr: + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3402 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + + mr. r24,r24 ; Do we even have a savearea? + beq+ setena ; Nope... + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3403 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + lwz r25,SAVlvlvec(r24) ; Get the level + cmplwi r25,1 ; Is the top area invalid? + cmplw cr1,r25,r3 ; Is it for the returned from context? + beq vectoss ; It is invalid... + bne cr1,setena ; Not for the returned context... + +vectoss: lwz r25,SAVprevec(r24) ; Get previous savearea +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3504 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + mr r5,r25 ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + mr. r25,r25 ; Is there one? + stw r25,ACT_MACT_VMX(r8) ; Set the new pointer + beq vectoplvl ; Nope, we are at the top... + lwz r25,SAVlvlvec(r25) ; Get the new level + +vectoplvl: lwz r19,SAVflags(r24) ; Get the savearea flags + +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3405 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + rlwinm r22,r24,0,0,19 ; Round down to the base savearea block + rlwinm r19,r19,0,3,1 ; Remove the vector in use flag + stw r25,ACT_MACT_VMXlvl(r8) ; Set the new top level + andis. r0,r19,hi16(SAVinuse) ; Still in use? + stw r19,SAVflags(r24) ; Set the savearea flags + bne- setena ; Yes, all done... +#if FPVECDBG + lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) + li r2,0x3406 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +#endif + lwz r23,SACvrswap(r22) ; Get the conversion from virtual to real + lwz r20,PP_QUICKFRET(r10) ; Get the old quick fret head + xor r23,r24,r23 ; Convert to physical + stw r20,SAVqfret(r24) ; Back chain the quick release queue + stw r23,PP_QUICKFRET(r10) ; Anchor it + +setena: rlwinm r20,r12,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector + rlwimi. r20,r12,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats + beq setenaa ; Neither float nor vector turned on.... + + lwz r5,ACT_MACT_SPF(r8) ; Get activation copy + lwz r6,spcFlags(r10) ; Get per_proc copy + or r5,r5,r20 ; Set vector/float changed bits in activation + or r6,r6,r20 ; Set vector/float changed bits in per_proc + stw r5,ACT_MACT_SPF(r8) ; Set activation copy + stw r6,spcFlags(r10) ; Set per_proc copy + +setenaa: stw r12,savesrr1(r3) ; Turn facility on or off + + mfdec r24 ; Get decrementer + lwz r22,qactTimer(r8) ; Get high order quick activation timer + mr. r24,r24 ; See if it has popped already... + lwz r23,qactTimer+4(r8) ; Get low order qact timer + ble- chkenax ; We have popped or are just about to... + +segtb: mftbu r20 ; Get the upper time base + mftb r21 ; Get the low + mftbu r19 ; Get upper again + or. r0,r22,r23 ; Any time set? + cmplw cr1,r20,r19 ; Did they change? + beq+ chkenax ; No time set.... + bne- cr1,segtb ; Timebase ticked, get them again... + + subfc r6,r21,r23 ; Subtract current from qact time + li r0,0 ; Make a 0 + subfe r5,r20,r22 ; Finish subtract + subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise + andc. r12,r5,r0 ; Set 0 if qact has passed + andc r13,r6,r0 ; Set 0 if qact has passed + bne chkenax ; If high order is non-zero, this is too big for a decrementer + cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on) + bge+ chkenax ; No, do not reset decrementer... + + mtdec r13 ; Set our value + +chkenax: lwz r6,SAVflags(r3) ; Pick up the flags of the old savearea + + +#if DEBUG + lwz r20,SAVact(r3) ; (TEST/DEBUG) Make sure our restore + lwz r21,PP_CPU_DATA(r10) ; (TEST/DEBUG) context is associated + lwz r21,CPU_ACTIVE_THREAD(r21) ; (TEST/DEBUG) with the current act. + cmpwi r21,0 ; (TEST/DEBUG) + beq- yeswereok ; (TEST/DEBUG) + lwz r21,THREAD_TOP_ACT(r21) ; (TEST/DEBUG) + cmplw r21,r20 ; (TEST/DEBUG) + beq+ yeswereok ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) + +yeswereok: +#endif + + rlwinm r5,r3,0,0,19 ; Round savearea down to page bndry + rlwinm r6,r6,0,1,31 ; Mark savearea free + lwz r5,SACvrswap(r5) ; Get the conversion from virtual to real + stw r6,SAVflags(r3) ; Set savearea flags + xor r3,r3,r5 ; Flip to physical address + b EXT(exception_exit) ; We are all done now... + + + +/* + * Here's where we handle the fastpath stuff + * We'll do what we can here because registers are already + * loaded and it will be less confusing that moving them around. + * If we need to though, we'll branch off somewhere's else. + * + * Registers when we get here: + * + * r0 = syscall number + * r4 = savearea/pcb + * r13 = activation + * r14 = previous savearea (if any) + * r16 = thread + * r25 = per_proc + */ + +fastpath: cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber? + bnelr- cr3 ; Not a fast path... + +/* + * void cthread_set_self(cproc_t p) + * + * set's thread state "user_value" + * + * This op is invoked as follows: + * li r0, CthreadSetSelfNumber // load the fast-trap number + * sc // invoke fast-trap + * blr + * + */ + +CthreadSetSelfNumber: + + lwz r5,saver3(r4) /* Retrieve the self number */ + stw r5,CTHREAD_SELF(r13) /* Remember it */ + stw r5,UAW(r25) /* Prime the per_proc_info with it */ + + + .globl EXT(fastexit) +EXT(fastexit): + lwz r8,SAVflags(r4) /* Pick up the flags */ + rlwinm r9,r4,0,0,19 /* Round down to the base savearea block */ + rlwinm r8,r8,0,1,31 /* Clear the attached bit */ + lwz r9,SACvrswap(r9) /* Get the conversion from virtual to real */ + stw r8,SAVflags(r4) /* Set the flags */ + xor r3,r4,r9 /* Switch savearea to physical addressing */ + b EXT(exception_exit) /* Go back to the caller... */ + + +/* + * Here's where we check for a hit on the Blue Box Assist + * Most registers are non-volatile, so be careful here. If we don't + * recognize the trap instruction we go back for regular processing. + * Otherwise we transfer to the assist code. + */ + +checkassist: + lwz r23,saveexception(r4) /* Get the exception code */ + cmpi cr0, r23, T_AST /* Check for T_AST trap */ + beqlr- /* Go handle it */ + + lwz r23,savesrr1(r4) /* Get the interrupted MSR */ + lwz r24,ACT_MACT_BTS(r13) /* Get the table start */ + rlwinm. r27,r23,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in userland? + rlwinm r23,r23,0,SRR1_PRG_TRAP_BIT,SRR1_PRG_TRAP_BIT ; See if this was actually a trap instruction + beqlr- /* No assist in kernel mode... */ + mr. r23,r23 ; Check for trap + lwz r27,savesrr0(r4) /* Get trapped address */ + beqlr- ; No assist if not trap instruction... + +checkassistBP: /* Safe place to breakpoint */ + + sub r24,r27,r24 /* See how far into it we are */ + cmplwi r24,BB_MAX_TRAP /* Do we fit in the list? */ + bgtlr- /* Nope, it's a regular trap... */ + b EXT(atomic_switch_trap) /* Go to the assist... */ + +; +; Virtual Machine Monitor +; Here is where we exit from the emulated context +; Note that most registers get trashed here +; R3 and R30 are preserved across the call and hold the activation +; and savearea respectivily. +; + +exitFromVM: mr r30,r4 ; Get the savearea + mr r3,r13 ; Get the activation + + b EXT(vmm_exit) ; Do it to it + + .align 5 + .globl EXT(retFromVM) + +LEXT(retFromVM) + mfsprg r10,0 ; Restore the per_proc info + mr r8,r3 ; Get the activation + lwz r4,SAVprev(r30) ; Pick up the previous savearea + mr r3,r30 ; Put savearea in proper register for common code + lwz r11,SAVflags(r30) ; Get the flags of the current savearea + rlwinm r11,r11,0,15,13 ; Clear the syscall flag + lwz r1,ACT_THREAD(r8) ; and the active thread + stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared) + + stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none) + + lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack + addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty + stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer + b chkfac ; Go end it all... + + +#if MACH_KDB +/* + * Here's where we jump into the debugger. This is called from + * either an MP signal from another processor, or a command-power NMI + * on the main processor. + * + * Note that somewhere in our stack should be a return into the interrupt + * handler. If there isn't, we'll crash off the end of the stack, actually, + * it'll just quietly return. hahahahaha. + */ + +ENTRY(kdb_kintr, TAG_NO_FRAME_USED) + + lis r9,HIGH_ADDR(EXT(ihandler_ret)) /* Top part of interrupt return */ + lis r10,HIGH_ADDR(EXT(intercept_ret)) /* Top part of intercept return */ + ori r9,r9,LOW_ADDR(EXT(ihandler_ret)) /* Bottom part of interrupt return */ + ori r10,r10,LOW_ADDR(EXT(intercept_ret)) /* Bottom part of intercept return */ + + lwz r8,0(r1) /* Get our caller's stack frame */ + +srchrets: mr. r8,r8 /* Have we reached the end of our rope? */ + beqlr- /* Yeah, just bail... */ + lwz r7,FM_LR_SAVE(r8) /* The whoever called them */ + cmplw cr0,r9,r7 /* Was it the interrupt handler? */ + beq srchfnd /* Yeah... */ + lwz r8,0(r8) /* Chain back to the previous frame */ + b srchrets /* Ok, check again... */ + +srchfnd: stw r10,FM_LR_SAVE(r8) /* Modify return to come to us instead */ + blr /* Finish up and get back here... */ + +/* + * We come here when we've returned all the way to the interrupt handler. + * That way we can enter the debugger with the registers and stack which + * existed at the point of interruption. + * + * R3 points to the saved state at entry + */ + + ENTRY(intercept_ret, TAG_NO_FRAME_USED) + + lis r6,HIGH_ADDR(EXT(kdb_trap)) /* Get the top part of the KDB enter routine */ + mr r5,r3 /* Move saved state to the correct parameter */ + ori r6,r6,LOW_ADDR(EXT(kdb_trap)) /* Get the last part of the KDB enter routine */ + li r4,0 /* Set a code of 0 */ + mr r13,r3 /* Save the saved state pointer in a non-volatile */ + mtlr r6 /* Set the routine address */ + li r3,-1 /* Show we had an interrupt type */ + + blrl /* Go enter KDB */ + + mr r3,r13 /* Put the saved state where expected */ + b EXT(ihandler_ret) /* Go return from the interruption... */ + +#endif + +#if VERIFYSAVE +; +; Savearea chain verification +; + +versave: + +#if 0 +; +; Make sure that only the top FPU savearea is marked invalid +; + + lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + li r20,0 ; (TEST/DEBUG) + lwz r26,0(r27) ; (TEST/DEBUG) + lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) + mr. r26,r26 ; (TEST/DEBUG) + lwz r28,psthreads(r28) ; (TEST/DEBUG) + bnelr- ; (TEST/DEBUG) + +fcknxtth: mr. r27,r27 ; (TEST/DEBUG) + beqlr- ; (TEST/DEBUG) + + lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) + +fckact: mr. r26,r26 ; (TEST/DEBUG) + bne+ fckact2 ; (TEST/DEBUG) + + lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line + subi r27,r27,1 ; (TEST/DEBUG) + b fcknxtth ; (TEST/DEBUG) + +fckact2: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain + mr. r20,r20 ; (TEST/DEBUG) Are there any? + beq+ fcknact ; (TEST/DEBUG) No... + +fckact3: lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Get next in list + mr. r20,r20 ; (TEST/DEBUG) Check next savearea + beq+ fcknact ; (TEST/DEBUG) No... + + lwz r29,SAVlvlfp(r20) ; (TEST/DEBUG) Get the level + + cmplwi r29,1 ; (TEST/DEBUG) Is it invalid?? + bne+ fckact3 ; (TEST/DEBUG) Nope... + + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + stw r27,0(r27) ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) + +fcknact: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation + b fckact ; (TEST/DEBUG) +#endif + +#if 1 +; +; Make sure there are no circular links in the float chain +; And that FP is marked busy in it. +; And the only the top is marked invalid. +; And that the owning PCB is correct. +; + + lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + li r20,0 ; (TEST/DEBUG) + lwz r26,0(r27) ; (TEST/DEBUG) + lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) + mr. r26,r26 ; (TEST/DEBUG) + lwz r28,psthreads(r28) ; (TEST/DEBUG) + bnelr- ; (TEST/DEBUG) + +fcknxtth: mr. r27,r27 ; (TEST/DEBUG) + beqlr- ; (TEST/DEBUG) + + lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) + +fckact: mr. r26,r26 ; (TEST/DEBUG) + bne+ fckact2 ; (TEST/DEBUG) + + lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line + subi r27,r27,1 ; (TEST/DEBUG) + b fcknxtth ; (TEST/DEBUG) + +fckact2: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain + li r29,1 ; (TEST/DEBUG) + li r22,0 ; (TEST/DEBUG) + +fckact3: mr. r20,r20 ; (TEST/DEBUG) Are there any? + beq+ fckact5 ; (TEST/DEBUG) No... + + addi r22,r22,1 ; (TEST/DEBUG) Count chain depth + + lwz r21,SAVflags(r20) ; (TEST/DEBUG) Get the flags + rlwinm. r21,r21,0,1,1 ; (TEST/DEBUG) FP busy? + bne+ fckact3a ; (TEST/DEBUG) Yeah... + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + stw r27,0(r27) ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) Die + +fckact3a: cmplwi r22,1 ; (TEST/DEBUG) At first SA? + beq+ fckact3b ; (TEST/DEBUG) Yeah, invalid is ok... + lwz r21,SAVlvlfp(r20) ; (TEST/DEBUG) Get level + cmplwi r21,1 ; (TEST/DEBUG) Is it invalid? + bne+ fckact3b ; (TEST/DEBUG) Nope, it is ok... + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + stw r27,0(r27) ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) Die + +fckact3b: lwz r21,SAVact(r20) ; (TEST/DEBUG) Get the owner + cmplw r21,r26 ; (TEST/DEBUG) Correct activation? + beq+ fckact3c ; (TEST/DEBUG) Yup... + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + stw r27,0(r27) ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) Die + +fckact3c: ; (TEST/DEBUG) + lbz r21,SAVflags+3(r20) ; (TEST/DEBUG) Pick up the test byte + mr. r21,r21 ; (TEST/DEBUG) marked? + beq+ fckact4 ; (TEST/DEBUG) No, good... + + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + stw r27,0(r27) ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) + +fckact4: stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Set the test byte + lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list + b fckact3 ; (TEST/DEBUG) Try it... + +fckact5: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain + li r29,0 ; (TEST/DEBUG) + +fckact6: mr. r20,r20 ; (TEST/DEBUG) Are there any? + beq+ fcknact ; (TEST/DEBUG) No... + + stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Clear the test byte + lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list + b fckact6 ; (TEST/DEBUG) Try it... + +fcknact: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation + b fckact ; (TEST/DEBUG) +#endif + + +#if 0 +; +; Make sure in use count matches found savearea. This is +; not always accurate. There is a variable "fuzz" factor in count. + + lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) + lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + li r20,0 ; (TEST/DEBUG) + lwz r26,0(r27) ; (TEST/DEBUG) + lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) + mr. r26,r26 ; (TEST/DEBUG) + lwz r28,psthreads(r28) ; (TEST/DEBUG) + bnelr- ; (TEST/DEBUG) + +cknxtth: mr. r27,r27 ; (TEST/DEBUG) + beq- cktotal ; (TEST/DEBUG) + + lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) + +ckact: mr. r26,r26 ; (TEST/DEBUG) + bne+ ckact2 ; (TEST/DEBUG) + + lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line + subi r27,r27,1 ; (TEST/DEBUG) + b cknxtth ; (TEST/DEBUG) + +ckact2: lwz r29,ACT_MACT_PCB(r26) ; (TEST/DEBUG) + +cknorm: mr. r29,r29 ; (TEST/DEBUG) + beq- cknormd ; (TEST/DEBUG) + + addi r20,r20,1 ; (TEST/DEBUG) Count normal savearea + + lwz r29,SAVprev(r29) ; (TEST/DEBUG) + b cknorm ; (TEST/DEBUG) + +cknormd: lwz r29,ACT_MACT_FPU(r26) ; (TEST/DEBUG) + +ckfpu: mr. r29,r29 ; (TEST/DEBUG) + beq- ckfpud ; (TEST/DEBUG) + + lwz r21,SAVflags(r29) ; (TEST/DEBUG) + rlwinm. r21,r21,0,0,0 ; (TEST/DEBUG) See if already counted + bne- cknfpu ; (TEST/DEBUG) + + addi r20,r20,1 ; (TEST/DEBUG) Count fpu savearea + +cknfpu: lwz r29,SAVprefp(r29) ; (TEST/DEBUG) + b ckfpu ; (TEST/DEBUG) + +ckfpud: lwz r29,ACT_MACT_VMX(r26) ; (TEST/DEBUG) + +ckvmx: mr. r29,r29 ; (TEST/DEBUG) + beq- ckvmxd ; (TEST/DEBUG) + + lwz r21,SAVflags(r29) ; (TEST/DEBUG) + rlwinm. r21,r21,0,0,1 ; (TEST/DEBUG) See if already counted + bne- cknvmx ; (TEST/DEBUG) + + addi r20,r20,1 ; (TEST/DEBUG) Count vector savearea + +cknvmx: lwz r29,SAVprevec(r29) ; (TEST/DEBUG) + b ckvmx ; (TEST/DEBUG) + +ckvmxd: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation + b ckact ; (TEST/DEBUG) + +cktotal: lis r28,hi16(EXT(saveanchor)) ; (TEST/DEBUG) + lis r27,hi16(EXT(real_ncpus)) ; (TEST/DEBUG) + ori r28,r28,lo16(EXT(saveanchor)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(real_ncpus)) ; (TEST/DEBUG) + + lwz r21,SVinuse(r28) ; (TEST/DEBUG) + lwz r27,0(r27) ; (TEST/DEBUG) Get the number of CPUs + sub. r29,r21,r20 ; (TEST/DEBUG) Get number accounted for + blt- badsave ; (TEST/DEBUG) Have too many in use... + sub r26,r29,r27 ; (TEST/DEBUG) Should be 1 unaccounted for for each processor + cmpwi r26,10 ; (TEST/DEBUG) Allow a 10 area slop factor + bltlr+ ; (TEST/DEBUG) + +badsave: lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) + ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) + stw r27,0(r27) ; (TEST/DEBUG) + BREAKPOINT_TRAP ; (TEST/DEBUG) +#endif +#endif diff --git a/osfmk/ppc/hw_lock.s b/osfmk/ppc/hw_lock.s new file mode 100644 index 000000000..a317068d1 --- /dev/null +++ b/osfmk/ppc/hw_lock.s @@ -0,0 +1,1557 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include + +#define STRING ascii + +#define SWT_HI 0+FM_SIZE +#define SWT_LO 4+FM_SIZE +#define MISSED 8+FM_SIZE + +; +; NOTE: make sure that PREEMPTSTACK in aligned_data is +; set the same as it is here. This is the number of +; traceback entries we can handle per processor +; +; A value of 0 disables the stack. +; +#define PREEMPTSTACK 0 +#define CHECKNMI 0 +#define CHECKLOCKS 1 + +#include + +#define PROLOG(space) \ + stwu r1,-(FM_ALIGN(space)+FM_SIZE)(r1) __ASMNL__ \ + mflr r0 __ASMNL__ \ + stw r3,FM_ARG0(r1) __ASMNL__ \ + stw r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1) __ASMNL__ + +#define EPILOG \ + lwz r1,0(r1) __ASMNL__ \ + lwz r0,FM_LR_SAVE(r1) __ASMNL__ \ + mtlr r0 __ASMNL__ + +#if MACH_LDEBUG && CHECKLOCKS +/* + * Routines for general lock debugging. + */ + +/* Gets lock check flags in CR6: CR bits 24-27 */ + +#define CHECK_SETUP(rg) \ + lis rg,hi16(EXT(dgWork)) __ASMNL__ \ + ori rg,rg,lo16(EXT(dgWork)) __ASMNL__ \ + lbz rg,dgFlags(rg) __ASMNL__ \ + mtcrf 2,rg __ASMNL__ + + +/* + * Checks for expected lock types and calls "panic" on + * mismatch. Detects calls to Mutex functions with + * type simplelock and vice versa. + */ +#define CHECK_MUTEX_TYPE() \ + bt 24+disLktypeb,1f __ASMNL__ \ + lwz r10,MUTEX_TYPE(r3) __ASMNL__ \ + cmpwi r10,MUTEX_TAG __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(not_a_mutex) __ASMNL__ \ + ori r3,r3,lo16(not_a_mutex) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: + + .data +not_a_mutex: + STRINGD "not a mutex!\n\000" + .text + +#define CHECK_SIMPLE_LOCK_TYPE() \ + bt 24+disLktypeb,1f __ASMNL__ \ + lwz r10,SLOCK_TYPE(r3) __ASMNL__ \ + cmpwi r10,USLOCK_TAG __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(not_a_slock) __ASMNL__ \ + ori r3,r3,lo16(not_a_slock) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: + + .data +not_a_slock: + STRINGD "not a simple lock!\n\000" + .text + +#define CHECK_NO_SIMPLELOCKS() \ + bt 24+disLkNmSimpb,2f __ASMNL__ \ + mfmsr r11 __ASMNL__ \ + rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ + mtmsr r10 __ASMNL__ \ + mfsprg r10,0 __ASMNL__ \ + lwz r10,PP_CPU_DATA(r10) __ASMNL__ \ + lwz r10,CPU_SIMPLE_LOCK_COUNT(r10) __ASMNL__ \ + cmpwi r10,0 __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(simple_locks_held) __ASMNL__ \ + ori r3,r3,lo16(simple_locks_held) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: __ASMNL__ \ + mtmsr r11 __ASMNL__ \ +2: + + .data +simple_locks_held: + STRINGD "simple locks held!\n\000" + .text + +/* + * Verifies return to the correct thread in "unlock" situations. + */ + +#define CHECK_THREAD(thread_offset) \ + bt 24+disLkThreadb,2f __ASMNL__ \ + mfmsr r11 __ASMNL__ \ + rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ + mtmsr r10 __ASMNL__ \ + mfsprg r10,0 __ASMNL__ \ + lwz r10,PP_CPU_DATA(r10) __ASMNL__ \ + lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \ + cmpwi r10,0 __ASMNL__ \ + beq- 1f __ASMNL__ \ + lwz r9,thread_offset(r3) __ASMNL__ \ + cmpw r9,r10 __ASMNL__ \ + beq+ 1f __ASMNL__ \ + lis r3,hi16(wrong_thread) __ASMNL__ \ + ori r3,r3,lo16(wrong_thread) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: __ASMNL__ \ + mtmsr r11 __ASMNL__ \ +2: + .data +wrong_thread: + STRINGD "wrong thread!\n\000" + .text + +#define CHECK_MYLOCK(thread_offset) \ + bt 24+disLkMyLckb,2f __ASMNL__ \ + mfmsr r11 __ASMNL__ \ + rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 __ASMNL__ \ + mtmsr r10 __ASMNL__ \ + mfsprg r10,0 __ASMNL__ \ + lwz r10,PP_CPU_DATA(r10) __ASMNL__ \ + lwz r10,CPU_ACTIVE_THREAD(r10) __ASMNL__ \ + cmpwi r10,0 __ASMNL__ \ + beq- 1f __ASMNL__ \ + lwz r9, thread_offset(r3) __ASMNL__ \ + cmpw r9,r10 __ASMNL__ \ + bne+ 1f __ASMNL__ \ + lis r3, HIGH_ADDR(mylock_attempt) __ASMNL__ \ + ori r3,r3,LOW_ADDR(mylock_attempt) __ASMNL__ \ + bl EXT(panic) __ASMNL__ \ + lwz r3,FM_ARG0(r1) __ASMNL__ \ +1: __ASMNL__ \ + mtmsr r11 __ASMNL__ \ +2: + + .data +mylock_attempt: + STRINGD "mylock attempt!\n\000" + .text + +#else /* MACH_LDEBUG */ + +#define CHECK_SETUP(rg) +#define CHECK_MUTEX_TYPE() +#define CHECK_SIMPLE_LOCK_TYPE() +#define CHECK_THREAD(thread_offset) +#define CHECK_NO_SIMPLELOCKS() +#define CHECK_MYLOCK(thread_offset) + +#endif /* MACH_LDEBUG */ + +/* + * void hw_lock_init(hw_lock_t) + * + * Initialize a hardware lock. These locks should be cache aligned and a multiple + * of cache size. + */ + +ENTRY(hw_lock_init, TAG_NO_FRAME_USED) + + li r0, 0 /* set lock to free == 0 */ + stw r0, 0(r3) /* Initialize the lock */ + blr + +/* + * void hw_lock_unlock(hw_lock_t) + * + * Unconditionally release lock. + * MACH_RT: release preemption level. + */ + + + .align 5 + .globl EXT(hw_lock_unlock) + +LEXT(hw_lock_unlock) + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lis r5,0xFFFF /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + sync /* Flush writes done under lock */ + li r0, 0 /* set lock to free */ + stw r0, 0(r3) + +#if MACH_RT + b epStart /* Go enable preemption... */ +#else + blr +#endif + + +/* + * Special case for internal use. Uses same lock code, but sets up so + * that there will be no disabling of preemption after locking. Generally + * used for mutex locks when obtaining the interlock although there is + * nothing stopping other uses. + */ + +lockLock: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */ + ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */ + cmplwi cr1,r1,0 /* Set flag to disable disable preemption */ + lwz r4,0(r4) /* Get the timerout value */ + b lockComm /* Join on up... */ + +/* + * void hw_lock_lock(hw_lock_t) + * + * Acquire lock, spinning until it becomes available. + * MACH_RT: also return with preemption disabled. + * Apparently not used except by mach_perf. + * We will just set a default timeout and jump into the NORMAL timeout lock. + */ + + .align 5 + .globl EXT(hw_lock_lock) + +LEXT(hw_lock_lock) + +lockDisa: lis r4,HIGH_ADDR(EXT(LockTimeOut)) /* Get the high part */ + ori r4,r4,LOW_ADDR(EXT(LockTimeOut)) /* And the low part */ + cmplw cr1,r1,r1 /* Set flag to enable disable preemption */ + lwz r4,0(r4) /* Get the timerout value */ + b lockComm /* Join on up... */ + +/* + * unsigned int hw_lock_to(hw_lock_t, unsigned int timeout) + * + * Try to acquire spin-lock. Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * We try fairly hard to get this lock. We disable for interruptions, but + * reenable after a "short" timeout (128 ticks, we may want to change this). + * After checking to see if the large timeout value (passed in) has expired and a + * sufficient number of cycles have gone by (to insure pending 'rupts are taken), + * we return either in abject failure, or disable and go back to the lock sniff routine. + * If the sniffer finds the lock free, it jumps right up and tries to grab it. + * + * One programming note: NEVER DO NOTHING IN HERE NO HOW THAT WILL FORCE US TO CALL + * THIS WITH TRANSLATION OR INTERRUPTIONS EITHER ON OR OFF, GOSH DARN IT! + * + */ + .align 5 + .globl EXT(hw_lock_to) + +LEXT(hw_lock_to) + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lis r5,0xEEEE /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + +#if CHECKNMI + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) +#endif + + cmplw cr1,r1,r1 /* Set flag to enable disable preemption */ + +lockComm: mfmsr r9 /* Get the MSR value */ + mr r5,r3 /* Get the address of the lock */ + rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */ + + mtmsr r7 /* Turn off interruptions */ + mftb r8 /* Get the low part of the time base */ + + lwarx r6,0,r5 ; ? + +lcktry: lwarx r6,0,r5 /* Grab the lock value */ + li r3,1 /* Use part of the delay time */ + mr. r6,r6 /* Is it locked? */ + bne- lcksniff /* Yeah, wait for it to clear... */ + stwcx. r3,0,r5 /* Try to seize that there durn lock */ +#if MACH_RT + bne- lcktry /* Couldn't get it... */ + beq+ cr1,daPreComm /* We got it, go disable preemption if we're supposed to... */ + mtmsr r9 ; Restore interrupt state + blr /* Go on home... */ +#else /* MACH_RT */ + beq+ lckgot /* We got it, yahoo... */ + b lcktry /* Just start up again if the store failed... */ +#endif /* MACH_RT */ + + .align 5 + +lcksniff: lwz r3,0(r5) /* Get that lock in here */ + mr. r3,r3 /* Is it free yet? */ + beq+ lcktry /* Yeah, try for it again... */ + + mftb r10 /* Time stamp us now */ + sub r10,r10,r8 /* Get the elapsed time */ + cmplwi r10,128 /* Have we been spinning for 128 tb ticks? */ + blt+ lcksniff /* Not yet... */ + + mtmsr r9 /* Say, any interrupts pending? */ + +/* The following instructions force the pipeline to be interlocked to that only one + instruction is issued per cycle. The insures that we stay enabled for a long enough + time; if it's too short, pending interruptions will not have a chance to be taken */ + + subi r4,r4,128 /* Back off elapsed time from timeout value */ + or r4,r4,r4 /* Do nothing here but force a single cycle delay */ + mr. r4,r4 /* See if we used the whole timeout */ + li r3,0 /* Assume a timeout return code */ + or r4,r4,r4 /* Do nothing here but force a single cycle delay */ + + ble- lckfail /* We failed */ + mtmsr r7 /* Disable for interruptions */ + mftb r8 /* Get the low part of the time base */ + b lcksniff /* Now that we've opened an enable window, keep trying... */ + +#if !MACH_RT +lckgot: mtmsr r9 /* Enable for interruptions */ + isync /* Make sure we don't use a speculativily loaded value */ + blr +#endif /* !MACH_RT */ + +lckfail: /* We couldn't get the lock */ + li r3,0 /* Set failure return code */ + blr /* Return, head hanging low... */ + + +/* + * unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout) + * + * Try to acquire spin-lock. The second parameter is the bit mask to test and set. + * multiple bits may be set. Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * We try fairly hard to get this lock. We disable for interruptions, but + * reenable after a "short" timeout (128 ticks, we may want to shorten this). + * After checking to see if the large timeout value (passed in) has expired and a + * sufficient number of cycles have gone by (to insure pending 'rupts are taken), + * we return either in abject failure, or disable and go back to the lock sniff routine. + * If the sniffer finds the lock free, it jumps right up and tries to grab it. + * + * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY + * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND + * RESTORE FROM THE STACK. + * + */ + + .align 5 + + nop ; Force loop alignment to cache line + nop + nop + nop + + .globl EXT(hw_lock_bit) + +LEXT(hw_lock_bit) + + mfmsr r9 /* Get the MSR value */ + rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Get MSR that is uninterruptible */ + + mtmsr r7 /* Turn off interruptions */ + + mftb r8 /* Get the low part of the time base */ + + lwarx r0,0,r3 ; ? + +bittry: lwarx r6,0,r3 /* Grab the lock value */ + and. r0,r6,r4 /* See if any of the lock bits are on */ + or r6,r6,r4 /* Turn on the lock bits */ + bne- bitsniff /* Yeah, wait for it to clear... */ + stwcx. r6,0,r3 /* Try to seize that there durn lock */ + beq+ bitgot /* We got it, yahoo... */ + b bittry /* Just start up again if the store failed... */ + + .align 5 + +bitsniff: lwz r6,0(r3) /* Get that lock in here */ + and. r0,r6,r4 /* See if any of the lock bits are on */ + beq+ bittry /* Yeah, try for it again... */ + + mftb r6 /* Time stamp us now */ + sub r6,r6,r8 /* Get the elapsed time */ + cmplwi r6,128 /* Have we been spinning for 128 tb ticks? */ + blt+ bitsniff /* Not yet... */ + + mtmsr r9 /* Say, any interrupts pending? */ + +/* The following instructions force the pipeline to be interlocked to that only one + instruction is issued per cycle. The insures that we stay enabled for a long enough + time. If it's too short, pending interruptions will not have a chance to be taken +*/ + + subi r5,r5,128 /* Back off elapsed time from timeout value */ + or r5,r5,r5 /* Do nothing here but force a single cycle delay */ + mr. r5,r5 /* See if we used the whole timeout */ + or r5,r5,r5 /* Do nothing here but force a single cycle delay */ + + ble- bitfail /* We failed */ + mtmsr r7 /* Disable for interruptions */ + mftb r8 /* Get the low part of the time base */ + b bitsniff /* Now that we've opened an enable window, keep trying... */ + + .align 5 + +bitgot: mtmsr r9 /* Enable for interruptions */ + li r3,1 /* Set good return code */ + isync /* Make sure we don't use a speculativily loaded value */ + blr + +bitfail: li r3,0 /* Set failure return code */ + blr /* Return, head hanging low... */ + + +/* + * unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit) + * + * Release bit based spin-lock. The second parameter is the bit mask to clear. + * Multiple bits may be cleared. + * + * NOTE WELL!!!! THE ROUTINE hw_lock_phys_vir KNOWS WHAT REGISTERS THIS GUY + * USES. THIS SAVES A TRANSLATION OFF TO ON TRANSITION AND BACK AND A SAVE AND + * RESTORE FROM THE STACK. + */ + + .align 5 + .globl EXT(hw_unlock_bit) + +LEXT(hw_unlock_bit) + + sync + lwarx r0,0,r3 ; ? + +ubittry: lwarx r0,0,r3 /* Grab the lock value */ + andc r0,r0,r4 /* Clear the lock bits */ + stwcx. r0,0,r3 /* Try to clear that there durn lock */ + bne- ubittry /* Try again, couldn't save it... */ + + blr /* Leave... */ + +/* + * unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value, + * unsigned int newb, unsigned int timeout) + * + * Try to acquire spin-lock. The second parameter is the bit mask to check. + * The third is the value of those bits and the 4th is what to set them to. + * Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * We try fairly hard to get this lock. We disable for interruptions, but + * reenable after a "short" timeout (128 ticks, we may want to shorten this). + * After checking to see if the large timeout value (passed in) has expired and a + * sufficient number of cycles have gone by (to insure pending 'rupts are taken), + * we return either in abject failure, or disable and go back to the lock sniff routine. + * If the sniffer finds the lock free, it jumps right up and tries to grab it. + * + */ + + .align 5 + + nop ; Force loop alignment to cache line + nop + nop + nop + + .globl EXT(hw_lock_mbits) + +LEXT(hw_lock_mbits) + + mfmsr r9 ; Get the MSR value + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Get MSR that is uninterruptible + + mtmsr r8 ; Turn off interruptions + + mftb r10 ; Get the low part of the time base + + lwarx r0,0,r3 ; ? + +mbittry: lwarx r12,0,r3 ; Grab the lock value + and r0,r12,r4 ; Clear extra bits + or r12,r12,r6 ; Turn on the lock bits + cmplw r0,r5 ; Are these the right bits? + bne- mbitsniff ; Nope, wait for it to clear... + stwcx. r12,0,r3 ; Try to seize that there durn lock + beq+ mbitgot ; We got it, yahoo... + b mbittry ; Just start up again if the store failed... + + .align 5 + +mbitsniff: lwz r12,0(r3) ; Get that lock in here + and r0,r12,r4 ; Clear extra bits + or r12,r12,r6 ; Turn on the lock bits + cmplw r0,r5 ; Are these the right bits? + beq+ mbittry ; Yeah, try for it again... + + mftb r11 ; Time stamp us now + sub r11,r11,r10 ; Get the elapsed time + cmplwi r11,128 ; Have we been spinning for 128 tb ticks? + blt+ mbitsniff ; Not yet... + + mtmsr r9 ; Say, any interrupts pending? + +; The following instructions force the pipeline to be interlocked to that only one +; instruction is issued per cycle. The insures that we stay enabled for a long enough +; time. If it is too short, pending interruptions will not have a chance to be taken + + subi r7,r7,128 ; Back off elapsed time from timeout value + or r7,r7,r7 ; Do nothing here but force a single cycle delay + mr. r7,r7 ; See if we used the whole timeout + or r7,r7,r7 ; Do nothing here but force a single cycle delay + + ble- mbitfail ; We failed + mtmsr r8 ; Disable for interruptions + mftb r10 ; Get the low part of the time base + b mbitsniff ; Now that we have opened an enable window, keep trying... + + .align 5 + +mbitgot: mtmsr r9 ; Enable for interruptions + li r3,1 ; Set good return code + isync ; Make sure we do not use a speculativily loaded value + blr + +mbitfail: li r3,0 ; Set failure return code + blr ; Return, head hanging low... + + +/* + * unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout) + * + * Spin until word hits 0 or timeout. + * Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * + * The theory is that a processor will bump a counter as it signals + * other processors. Then it will spin untl the counter hits 0 (or + * times out). The other processors, as it receives the signal will + * decrement the counter. + * + * The other processors use interlocked update to decrement, this one + * does not need to interlock. + * + */ + + .align 5 + + .globl EXT(hw_cpu_sync) + +LEXT(hw_cpu_sync) + + mftb r10 ; Get the low part of the time base + mr r9,r3 ; Save the sync word address + li r3,1 ; Assume we work + +csynctry: lwz r11,0(r9) ; Grab the sync value + mr. r11,r11 ; Counter hit 0? + beqlr- ; Yeah, we are sunk... + mftb r12 ; Time stamp us now + + sub r12,r12,r10 ; Get the elapsed time + cmplw r4,r12 ; Have we gone too long? + bge+ csynctry ; Not yet... + + li r3,0 ; Set failure... + blr ; Return, head hanging low... + +/* + * unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout) + * + * Spin until word changes or timeout. + * Return success (1) or failure (0). + * Attempt will fail after timeout ticks of the timebase. + * + * This is used to insure that a processor passes a certain point. + * An example of use is to monitor the last interrupt time in the + * per_proc block. This can be used to insure that the other processor + * has seen at least one interrupt since a specific time. + * + */ + + .align 5 + + .globl EXT(hw_cpu_sync) + +LEXT(hw_cpu_wcng) + + mftb r10 ; Get the low part of the time base + mr r9,r3 ; Save the sync word address + li r3,1 ; Assume we work + +wcngtry: lwz r11,0(r9) ; Grab the value + cmplw r11,r4 ; Do they still match? + bnelr- ; Nope, cool... + mftb r12 ; Time stamp us now + + sub r12,r12,r10 ; Get the elapsed time + cmplw r5,r12 ; Have we gone too long? + bge+ wcngtry ; Not yet... + + li r3,0 ; Set failure... + blr ; Return, head hanging low... + + +/* + * unsigned int hw_lock_try(hw_lock_t) + * + * try to acquire spin-lock. Return success (1) or failure (0) + * MACH_RT: returns with preemption disabled on success. + * + */ + .align 5 + .globl EXT(hw_lock_try) + +LEXT(hw_lock_try) + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lis r5,0x9999 /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + mfmsr r9 /* Save the MSR value */ + li r4, 1 /* value to be stored... 1==taken */ + rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruption bit */ + +#if MACH_LDEBUG + lis r5, 0x10 /* roughly 1E6 */ + mtctr r5 +#endif /* MACH_LDEBUG */ + + mtmsr r7 /* Disable interruptions and thus, preemption */ + + lwarx r5,0,r3 ; ? + +.L_lock_try_loop: + +#if MACH_LDEBUG + bdnz+ 0f /* Count attempts */ + mtmsr r9 /* Restore enablement */ + BREAKPOINT_TRAP /* Get to debugger */ + mtmsr r7 /* Disable interruptions and thus, preemption */ +0: +#endif /* MACH_LDEBUG */ + + lwarx r5,0,r3 /* Ld from addr of arg and reserve */ + + cmpwi r5, 0 /* TEST... */ + bne- .L_lock_try_failed /* branch if taken. Predict free */ + + stwcx. r4, 0,r3 /* And SET (if still reserved) */ + mfsprg r6,0 /* Get the per_proc block */ + bne- .L_lock_try_loop /* If set failed, loop back */ + + lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ + isync + +#if MACH_RT + lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ + addi r5,r5,1 /* Bring up the disable count */ + stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */ + +#endif /* MACH_RT */ + + mtmsr r9 /* Allow interruptions now */ + li r3,1 /* Set that the lock was free */ + blr + +.L_lock_try_failed: + mtmsr r9 /* Allow interruptions now */ + li r3,0 /* FAILURE - lock was taken */ + blr + +/* + * unsigned int hw_lock_held(hw_lock_t) + * + * Return 1 if lock is held + * MACH_RT: doesn't change preemption state. + * N.B. Racy, of course. + * + */ + .align 5 + .globl EXT(hw_lock_held) + +LEXT(hw_lock_held) + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lis r5,0x8888 /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + isync /* Make sure we don't use a speculativily fetched lock */ + lwz r3, 0(r3) /* Return value of lock */ + blr + +/* + * unsigned int hw_compare_and_store(unsigned int old, unsigned int new, unsigned int *area) + * + * Compare old to area if equal, store new, and return true + * else return false and no store + * This is an atomic operation + * + */ + .align 5 + .globl EXT(hw_compare_and_store) + +LEXT(hw_compare_and_store) + + mr r6,r3 /* Save the old value */ + + lwarx r9,0,r5 ; ? + +cstry: lwarx r9,0,r5 /* Grab the area value */ + li r3,1 /* Assume it works */ + cmplw cr0,r9,r6 /* Does it match the old value? */ + bne- csfail /* No, it must have changed... */ + stwcx. r4,0,r5 /* Try to save the new value */ + bne- cstry /* Didn't get it, try again... */ + isync /* Just hold up prefetch */ + blr /* Return... */ + +csfail: li r3,0 /* Set failure */ + blr /* Better luck next time... */ + + +/* + * unsigned int hw_atomic_add(unsigned int *area, int *val) + * + * Atomically add the second parameter to the first. + * Returns the result. + * + */ + .align 5 + .globl EXT(hw_atomic_add) + +LEXT(hw_atomic_add) + + mr r6,r3 /* Save the area */ + + lwarx r3,0,r6 ; ? + +addtry: lwarx r3,0,r6 /* Grab the area value */ + add r3,r3,r4 /* Add the value */ + stwcx. r3,0,r6 /* Try to save the new value */ + bne- addtry /* Didn't get it, try again... */ + blr /* Return... */ + + +/* + * unsigned int hw_atomic_sub(unsigned int *area, int *val) + * + * Atomically subtract the second parameter from the first. + * Returns the result. + * + */ + .align 5 + .globl EXT(hw_atomic_sub) + +LEXT(hw_atomic_sub) + + mr r6,r3 /* Save the area */ + + lwarx r3,0,r6 ; ? + +subtry: lwarx r3,0,r6 /* Grab the area value */ + sub r3,r3,r4 /* Subtract the value */ + stwcx. r3,0,r6 /* Try to save the new value */ + bne- subtry /* Didn't get it, try again... */ + blr /* Return... */ + + +/* + * void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp) + * + * Atomically inserts the element at the head of the list + * anchor is the pointer to the first element + * element is the pointer to the element to insert + * disp is the displacement into the element to the chain pointer + * + */ + .align 5 + .globl EXT(hw_queue_atomic) + +LEXT(hw_queue_atomic) + + mr r7,r4 /* Make end point the same as start */ + mr r8,r5 /* Copy the displacement also */ + b hw_queue_comm /* Join common code... */ + +/* + * void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp) + * + * Atomically inserts the list of elements at the head of the list + * anchor is the pointer to the first element + * first is the pointer to the first element to insert + * last is the pointer to the last element to insert + * disp is the displacement into the element to the chain pointer + * + */ + .align 5 + .globl EXT(hw_queue_atomic_list) + +LEXT(hw_queue_atomic_list) + + mr r7,r5 /* Make end point the same as start */ + mr r8,r6 /* Copy the displacement also */ + +hw_queue_comm: + lwarx r9,0,r3 ; ? + +hw_queue_comm2: + lwarx r9,0,r3 /* Pick up the anchor */ + stwx r9,r8,r7 /* Chain that to the end of the new stuff */ + stwcx. r4,0,r3 /* Try to chain into the front */ + bne- hw_queue_comm2 /* Didn't make it, try again... */ + + blr /* Return... */ + +/* + * unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp) + * + * Atomically removes the first element in a list and returns it. + * anchor is the pointer to the first element + * disp is the displacement into the element to the chain pointer + * Returns element if found, 0 if empty. + * + */ + .align 5 + .globl EXT(hw_dequeue_atomic) + +LEXT(hw_dequeue_atomic) + + mr r5,r3 /* Save the anchor */ + +hw_dequeue_comm: + lwarx r9,0,r3 ; ? + +hw_dequeue_comm2: + lwarx r3,0,r5 /* Pick up the anchor */ + mr. r3,r3 /* Is the list empty? */ + beqlr- /* Leave it list empty... */ + lwzx r9,r4,r3 /* Get the next in line */ + stwcx. r9,0,r5 /* Try to chain into the front */ + beqlr+ ; Got the thing, go away with it... + b hw_dequeue_comm2 ; Did not make it, try again... + +/* + * void mutex_init(mutex_t* l, etap_event_t etap) + */ + +ENTRY(mutex_init,TAG_NO_FRAME_USED) + + PROLOG(0) + li r10, 0 + stw r10, MUTEX_ILK(r3) /* clear interlock */ + stw r10, MUTEX_LOCKED(r3) /* clear locked flag */ + sth r10, MUTEX_WAITERS(r3) /* init waiter count */ + +#if MACH_LDEBUG + stw r10, MUTEX_PC(r3) /* init caller pc */ + stw r10, MUTEX_THREAD(r3) /* and owning thread */ + li r10, MUTEX_TAG + stw r10, MUTEX_TYPE(r3) /* set lock type */ +#endif /* MACH_LDEBUG */ + +#if ETAP_LOCK_TRACE + bl EXT(etap_mutex_init) /* init ETAP data */ +#endif /* ETAP_LOCK_TRACE */ + + EPILOG + blr + +/* + * void _mutex_lock(mutex_t*) + */ + + .align 5 + .globl EXT(_mutex_lock) + +LEXT(_mutex_lock) + +#if CHECKNMI + mflr r12 ; (TEST/DEBUG) + bl EXT(ml_sense_nmi) ; (TEST/DEBUG) + mtlr r12 ; (TEST/DEBUG) +#endif + + PROLOG(12) + +#if ETAP_LOCK_TRACE + li r0, 0 + stw r0,SWT_HI(r1) /* set wait time to 0 (HI) */ + stw r0,SWT_LO(r1) /* set wait time to 0 (LO) */ + stw r0,MISSED(r1) /* clear local miss marker */ +#endif /* ETAP_LOCK_TRACE */ + + CHECK_SETUP(r12) + CHECK_MUTEX_TYPE() + CHECK_NO_SIMPLELOCKS() + +.L_ml_retry: +#if 0 + mfsprg r4,0 /* (TEST/DEBUG) */ + lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */ + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */ + lis r5,0xAAAA /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + + bl lockDisa /* Go get a lock on the mutex's interlock lock */ + mr. r4,r3 /* Did we get it? */ + lwz r3,FM_ARG0(r1) /* Restore the lock address */ + bne+ mlGotInt /* We got it just fine... */ + + lis r3,HIGH_ADDR(mutex_failed1) ; Get the failed mutex message + ori r3,r3,LOW_ADDR(mutex_failed1) ; Get the failed mutex message + bl EXT(panic) ; Call panic + BREAKPOINT_TRAP ; We die here anyway, can not get the lock + + .data +mutex_failed1: + STRINGD "We can't get a mutex interlock lock on mutex_lock\n\000" + .text + +mlGotInt: + +/* Note that there is no reason to do a load and reserve here. We already + hold the interlock lock and no one can touch this field unless they + have that, so, we're free to play */ + + lwz r4,MUTEX_LOCKED(r3) /* Get the mutex's lock field */ + + li r10,1 /* Set the lock value */ + + mr. r4,r4 /* So, can we have it? */ + bne- mlInUse /* Nope, sombody's playing already... */ + + stw r10,MUTEX_LOCKED(r3) /* Take it unto ourselves */ + +#if MACH_LDEBUG + mfmsr r11 + rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 + mtmsr r10 + mfsprg r9,0 /* Get the per_proc block */ + lwz r10,0(r1) /* Get previous save frame */ + lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */ + lwz r10,FM_LR_SAVE(r10) /* Get our caller's address */ + lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */ + stw r10,MUTEX_PC(r3) /* Save our caller */ + mr. r8,r8 /* Is there any thread? */ + stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */ + beq- .L_ml_no_active_thread /* No owning thread... */ + lwz r9,THREAD_MUTEX_COUNT(r8) /* Get the mutex count */ + addi r9,r9,1 /* Bump it up */ + stw r9,THREAD_MUTEX_COUNT(r8) /* Stash it back */ +.L_ml_no_active_thread: + mtmsr r11 +#endif /* MACH_LDEBUG */ + + li r10,0 /* Get the unlock value */ + sync /* Push it all out */ + stw r10,MUTEX_ILK(r3) /* free the interlock */ + +#if ETAP_LOCK_TRACE + mflr r4 + lwz r5,SWT_HI(r1) + lwz r6,SWT_LO(r1) + bl EXT(etap_mutex_hold) /* collect hold timestamp */ +#endif /* ETAP_LOCK_TRACE */ + + EPILOG /* Restore all saved registers */ + +#if MACH_RT + b epStart /* Go enable preemption... */ +#else + blr /* Return... */ +#endif + +/* + * We come to here when we have a resource conflict. In other words, + * the mutex is held. + */ + +mlInUse: + +#if ETAP_LOCK_TRACE + lwz r7,MISSED(r1) + cmpwi r7,0 /* did we already take a wait timestamp ? */ + bne .L_ml_block /* yup. carry-on */ + bl EXT(etap_mutex_miss) /* get wait timestamp */ + stw r3,SWT_HI(r1) /* store timestamp */ + stw r4,SWT_LO(r1) + li r7, 1 /* mark wait timestamp as taken */ + stw r7,MISSED(r1) + lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ +.L_ml_block: +#endif /* ETAP_LOCK_TRACE */ + + CHECK_SETUP(r12) + CHECK_MYLOCK(MUTEX_THREAD) /* Assert we don't own the lock already */ + + +/* Note that we come in here with the interlock set. The wait routine + * will unlock it before waiting. + */ + bl EXT(mutex_lock_wait) /* Wait for our turn at the lock */ + + lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ + b .L_ml_retry /* and try again... */ + + +/* + * void _mutex_try(mutex_t*) + * + */ + + .align 5 + .globl EXT(_mutex_try) + +LEXT(_mutex_try) + + PROLOG(8) /* reserve space for SWT_HI and SWT_LO */ + +#if ETAP_LOCK_TRACE + li r5, 0 + stw r5, STW_HI(r1) /* set wait time to 0 (HI) */ + stw r5, SWT_LO(r1) /* set wait time to 0 (LO) */ +#endif /* ETAP_LOCK_TRACE */ + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lis r5,0xBBBB /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + CHECK_SETUP(r12) + CHECK_MUTEX_TYPE() + CHECK_NO_SIMPLELOCKS() + + lwz r6,MUTEX_LOCKED(r3) /* Quick check */ + mr. r6,r6 /* to see if someone has this lock already */ + bne- mtFail /* Someone's got it already... */ + + bl lockDisa /* Go get a lock on the mutex's interlock lock */ + mr. r4,r3 /* Did we get it? */ + lwz r3,FM_ARG0(r1) /* Restore the lock address */ + bne+ mtGotInt /* We got it just fine... */ + + lis r3,HIGH_ADDR(mutex_failed2) ; Get the failed mutex message + ori r3,r3,LOW_ADDR(mutex_failed2) ; Get the failed mutex message + bl EXT(panic) ; Call panic + BREAKPOINT_TRAP ; We die here anyway, can not get the lock + + .data +mutex_failed2: + STRINGD "We can't get a mutex interlock lock on mutex_try\n\000" + .text + +mtGotInt: + +/* Note that there is no reason to do a load and reserve here. We already + hold the interlock and no one can touch at this field unless they + have that, so, we're free to play */ + + lwz r4,MUTEX_LOCKED(r3) /* Get the mutex's lock field */ + + li r10,1 /* Set the lock value */ + + mr. r4,r4 /* So, can we have it? */ + bne- mtInUse /* Nope, sombody's playing already... */ + + stw r10,MUTEX_LOCKED(r3) /* Take it unto ourselves */ + +#if MACH_LDEBUG + mfmsr r11 + rlwinm r10,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 + mtmsr r10 + mfsprg r9,0 /* Get the per_proc block */ + lwz r10,0(r1) /* Get previous save frame */ + lwz r9,PP_CPU_DATA(r9) /* Point to the cpu data area */ + lwz r10,FM_LR_SAVE(r10) /* Get our caller's address */ + lwz r8, CPU_ACTIVE_THREAD(r9) /* Get the active thread */ + stw r10,MUTEX_PC(r3) /* Save our caller */ + mr. r8,r8 /* Is there any thread? */ + stw r8,MUTEX_THREAD(r3) /* Set the mutex's holding thread */ + beq- .L_mt_no_active_thread /* No owning thread... */ + lwz r9, THREAD_MUTEX_COUNT(r8) /* Get the mutex count */ + addi r9, r9, 1 /* Bump it up */ + stw r9, THREAD_MUTEX_COUNT(r8) /* Stash it back */ +.L_mt_no_active_thread: + mtmsr r11 +#endif /* MACH_LDEBUG */ + + li r10,0 /* Get the unlock value */ + sync /* Push it all out */ + stw r10,MUTEX_ILK(r3) /* free the interlock */ + +#if ETAP_LOCK_TRACE + lwz r4,0(r1) /* Back chain the stack */ + lwz r5,SWT_HI(r1) + lwz r4,FM_LR_SAVE(r4) /* Get our caller's address */ + lwz r6,SWT_LO(r1) + bl EXT(etap_mutex_hold) /* collect hold timestamp */ +#endif /* ETAP_LOCK_TRACE */ + +#if MACH_RT + bl epStart /* Go enable preemption... */ +#endif + li r3, 1 + EPILOG /* Restore all saved registers */ + blr /* Return... */ + +/* + * We come to here when we have a resource conflict. In other words, + * the mutex is held. + */ + +mtInUse: li r10,0 /* Get the unlock value */ + sync /* Push it all out */ + stw r10,MUTEX_ILK(r3) /* free the interlock */ +#if MACH_RT + bl epStart /* Go enable preemption... */ +#endif + +mtFail: li r3,0 /* Set failure code */ + EPILOG /* Restore all saved registers */ + blr /* Return... */ + + +/* + * void mutex_unlock(mutex_t* l) + */ + + .align 5 + .globl EXT(mutex_unlock) + +LEXT(mutex_unlock) + + PROLOG(0) + +#if ETAP_LOCK_TRACE + bl EXT(etap_mutex_unlock) /* collect ETAP data */ + lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ +#endif /* ETAP_LOCK_TRACE */ + + CHECK_SETUP(r12) + CHECK_MUTEX_TYPE() + CHECK_THREAD(MUTEX_THREAD) + +#if 0 + mfsprg r4,0 /* (TEST/DEBUG) */ + lwz r4,PP_CPU_DATA(r4) /* (TEST/DEBUG) */ + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lwz r4,CPU_ACTIVE_THREAD(r4) /* (TEST/DEBUG) */ + lis r5,0xCCCC /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + bl lockDisa /* Go get a lock on the mutex's interlock lock */ + mr. r4,r3 /* Did we get it? */ + lwz r3,FM_ARG0(r1) /* Restore the lock address */ + bne+ muGotInt /* We got it just fine... */ + + lis r3,HIGH_ADDR(mutex_failed3) ; Get the failed mutex message + ori r3,r3,LOW_ADDR(mutex_failed3) ; Get the failed mutex message + bl EXT(panic) ; Call panic + BREAKPOINT_TRAP ; We die here anyway, can not get the lock + + .data +mutex_failed3: + STRINGD "We can't get a mutex interlock lock on mutex_unlock\n\000" + .text + + +muGotInt: + lhz r10,MUTEX_WAITERS(r3) /* are there any waiters ? */ + cmpwi r10,0 + beq+ muUnlock /* Nope, we're done... */ + + bl EXT(mutex_unlock_wakeup) /* yes, wake a thread */ + lwz r3,FM_ARG0(r1) /* restore r3 (saved in prolog) */ + li r10,0 /* Get unlock value */ + +muUnlock: +#if MACH_LDEBUG + mfmsr r11 + rlwinm r9,r11,0,MSR_EE_BIT+1,MSR_EE_BIT-1 + mtmsr r9 + mfsprg r9,0 + lwz r9,PP_CPU_DATA(r9) + lwz r9,CPU_ACTIVE_THREAD(r9) + stw r10,MUTEX_THREAD(r3) /* disown thread */ + cmpwi r9,0 + beq- .L_mu_no_active_thread + lwz r8,THREAD_MUTEX_COUNT(r9) + subi r8,r8,1 + stw r8,THREAD_MUTEX_COUNT(r9) +.L_mu_no_active_thread: + mtmsr r11 +#endif /* MACH_LDEBUG */ + + stw r10,MUTEX_LOCKED(r3) /* release the mutex */ + sync /* Make sure it's all there before we release */ + stw r10,MUTEX_ILK(r3) /* unlock the interlock */ + + EPILOG /* Deal with the stack now, enable_preemption doesn't always want one */ +#if MACH_RT + b epStart /* Go enable preemption... */ +#else + blr /* Return... */ +#endif + +/* + * void interlock_unlock(hw_lock_t lock) + */ + + .align 5 + .globl EXT(interlock_unlock) + +LEXT(interlock_unlock) + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + lis r5,0xDDDD /* (TEST/DEBUG) */ + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + li r10,0 + sync + stw r10,0(r3) + +#if MACH_RT + b epStart /* Go enable preemption... */ +#else + blr /* Return... */ +#endif + +#if MACH_RT +/* + * Here is where we enable preemption. We need to be protected + * against ourselves, we can't chance getting interrupted and modifying + * our processor wide preemption count after we'sve loaded it up. So, + * we need to disable all 'rupts. Actually, we could use a compare + * and swap to do this, but, since there are no MP considerations + * (we are dealing with a CPU local field) it is much, much faster + * to disable. + * + * Note that if we are not genned MP, the calls here will be no-opped via + * a #define and since the _mp forms are the same, likewise a #define + * will be used to route to the other forms + */ + +/* This version does not check if we get preempted or not */ + + + .align 4 + .globl EXT(_enable_preemption_no_check) + +LEXT(_enable_preemption_no_check) + cmplw cr1,r1,r1 /* Force zero cr so we know not to check if preempted */ + b epCommn /* Join up with the other enable code... */ + + +/* This version checks if we get preempted or not */ + + .align 5 + .globl EXT(_enable_preemption) + +LEXT(_enable_preemption) + +epStart: cmplwi cr1,r1,0 /* Force non-zero cr so we know to check if preempted */ + +/* + * Common enable preemption code + */ + +epCommn: mfmsr r9 /* Save the old MSR */ + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtmsr r8 /* Interrupts off */ + + mfsprg r3,0 /* Get the per_proc block */ + lwz r6,PP_CPU_DATA(r3) /* Get the pointer to the CPU data from per proc */ + li r8,-1 /* Get a decrimenter */ + lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ + add. r5,r5,r8 /* Bring down the disable count */ +#if 0 + mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early + mr. r4,r4 ; (TEST/DEBUG) + beq- epskptrc0 ; (TEST/DEBUG) + lis r0,hi16(CutTrace) ; (TEST/DEBUG) + lis r4,0xBBBB ; (TEST/DEBUG) + oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +epskptrc0: mr. r5,r5 ; (TEST/DEBUG) +#endif +#if MACH_LDEBUG + blt- epTooFar /* Yeah, we did... */ +#endif /* MACH_LDEBUG */ + stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */ + + beq+ epCheckPreempt /* Go check if we need to be preempted... */ + +epNoCheck: mtmsr r9 /* Restore the interrupt level */ + blr /* Leave... */ + +#if MACH_LDEBUG +epTooFar: + lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */ + lis r3,HIGH_ADDR(epTooFarStr) /* First half of panic string */ + ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */ + ori r3,r3,LOW_ADDR(epTooFarStr) /* Second half of panic string */ + mtlr r6 /* Get the address of the panic routine */ + mtmsr r9 /* Restore interruptions */ + blrl /* Panic... */ + + .data +epTooFarStr: + STRINGD "_enable_preemption: preemption_level <= 0!\000" + .text +#endif /* MACH_LDEBUG */ + + .align 5 + +epCheckPreempt: + lwz r7,PP_NEED_AST(r3) /* Get the AST request address */ + li r5,AST_URGENT /* Get the requests we do honor */ + lwz r7,0(r7) /* Get the actual, real live, extra special AST word */ + lis r0,HIGH_ADDR(DoPreemptCall) /* Just in case, get the top of firmware call */ + and. r7,r7,r5 ; Should we preempt? + ori r0,r0,LOW_ADDR(DoPreemptCall) /* Merge in bottom part */ + beq+ epCPno ; No preemption here... + + andi. r3,r9,lo16(MASK(MSR_EE)) ; We cannot preempt if interruptions are off + +epCPno: mtmsr r9 /* Allow interrupts if we can */ + beqlr+ ; We probably will not preempt... + sc /* Do the preemption */ + blr /* Now, go away now... */ + +/* + * Here is where we disable preemption. Since preemption is on a + * per processor basis (a thread runs on one CPU at a time) we don't + * need any cross-processor synchronization. We do, however, need to + * be interrupt safe, so we don't preempt while in the process of + * disabling it. We could use SPLs, but since we always want complete + * disablement, and this is platform specific code, we'll just kick the + * MSR. We'll save a couple of orders of magnitude over using SPLs. + */ + + .align 5 + + nop ; Use these 5 nops to force daPreComm + nop ; to a line boundary. + nop + nop + nop + + .globl EXT(_disable_preemption) + +LEXT(_disable_preemption) + +daPreAll: mfmsr r9 /* Save the old MSR */ + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtmsr r8 /* Interrupts off */ + +daPreComm: mfsprg r6,0 /* Get the per_proc block */ + lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ + lwz r5,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ + addi r5,r5,1 /* Bring up the disable count */ + stw r5,CPU_PREEMPTION_LEVEL(r6) /* Save it back */ +#if 0 + mfsprg r4,1 ; (TEST/DEBUG) Note the next 3 keep from interrpting too early + mr. r4,r4 ; (TEST/DEBUG) + beq- epskptrc1 ; (TEST/DEBUG) + lis r0,hi16(CutTrace) ; (TEST/DEBUG) + lis r4,0xAAAA ; (TEST/DEBUG) + oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) + sc ; (TEST/DEBUG) +epskptrc1: ; (TEST/DEBUG) +#endif + +; +; Set PREEMPTSTACK above to enable a preemption traceback stack. +; +; NOTE: make sure that PREEMPTSTACK in aligned_data is +; set the same as it is here. This is the number of +; traceback entries we can handle per processor +; +; A value of 0 disables the stack. +; +#if PREEMPTSTACK + cmplwi r5,PREEMPTSTACK ; Maximum depth + lwz r6,CPU_ACTIVE_THREAD(r6) ; Get the pointer to the currently active thread + bgt- nopredeb ; Too many to stack... + mr. r6,r6 ; During boot? + beq- nopredeb ; Yes, do not do backtrace... + lwz r6,THREAD_TOP_ACT(r6) ; Point to the active activation + lwz r6,ACT_MACT_PCB(r6) ; Get the last savearea used + mr. r0,r6 ; Any saved context? + beq- nosaveds ; No... + lwz r0,saver1(r6) ; Get end of savearea chain + +nosaveds: li r11,0 ; Clear callers callers callers return + li r10,0 ; Clear callers callers callers callers return + li r8,0 ; Clear callers callers callers callers callers return + lwz r2,0(r1) ; Get callers callers stack frame + lwz r12,8(r2) ; Get our callers return + lwz r4,0(r2) ; Back chain + + xor r2,r4,r2 ; Form difference + cmplwi r2,8192 ; Within a couple of pages? + mr r2,r4 ; Move register + bge- nosaveher2 ; No, no back chain then... + lwz r11,8(r2) ; Get our callers return + lwz r4,0(r2) ; Back chain + + xor r2,r4,r2 ; Form difference + cmplwi r2,8192 ; Within a couple of pages? + mr r2,r4 ; Move register + bge- nosaveher2 ; No, no back chain then... + lwz r10,8(r2) ; Get our callers return + lwz r4,0(r2) ; Back chain + + xor r2,r4,r2 ; Form difference + cmplwi r2,8192 ; Within a couple of pages? + mr r2,r4 ; Move register + bge- nosaveher2 ; No, no back chain then... + lwz r8,8(r2) ; Get our callers return + +nosaveher2: + addi r5,r5,-1 ; Get index to slot + mfspr r6,pir ; Get our processor + mflr r4 ; Get our return + rlwinm r6,r6,8,0,23 ; Index to processor slot + lis r2,hi16(EXT(DBGpreempt)) ; Stack high order + rlwinm r5,r5,4,0,27 ; Index to stack slot + ori r2,r2,lo16(EXT(DBGpreempt)) ; Stack low order + add r2,r2,r5 ; Point to slot + add r2,r2,r6 ; Move to processor + stw r4,0(r2) ; Save our return + stw r11,4(r2) ; Save callers caller + stw r10,8(r2) ; Save callers callers caller + stw r8,12(r2) ; Save callers callers callers caller +nopredeb: +#endif + mtmsr r9 /* Allow interruptions now */ + + blr /* Return... */ + +/* + * Return the active thread for both inside and outside osfmk consumption + */ + + .align 5 + .globl EXT(current_thread) + +LEXT(current_thread) + + mfmsr r9 /* Save the old MSR */ + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtmsr r8 /* Interrupts off */ + mfsprg r6,0 /* Get the per_proc */ + lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ + lwz r3,CPU_ACTIVE_THREAD(r6) /* Get the active thread */ + mtmsr r9 /* Restore interruptions to entry */ + blr /* Return... */ + + +/* + * Return the current preemption level + */ + + .align 5 + .globl EXT(get_preemption_level) + +LEXT(get_preemption_level) + + mfmsr r9 /* Save the old MSR */ + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtmsr r8 /* Interrupts off */ + mfsprg r6,0 /* Get the per_proc */ + lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ + lwz r3,CPU_PREEMPTION_LEVEL(r6) /* Get the preemption level */ + mtmsr r9 /* Restore interruptions to entry */ + blr /* Return... */ + + +/* + * Return the simple lock count + */ + + .align 5 + .globl EXT(get_simple_lock_count) + +LEXT(get_simple_lock_count) + + mfmsr r9 /* Save the old MSR */ + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtmsr r8 /* Interrupts off */ + mfsprg r6,0 /* Get the per_proc */ + lwz r6,PP_CPU_DATA(r6) /* Get the pointer to the CPU data from per proc */ + lwz r3,CPU_SIMPLE_LOCK_COUNT(r6) /* Get the simple lock count */ + mtmsr r9 /* Restore interruptions to entry */ + blr /* Return... */ + +#endif /* MACH_RT */ diff --git a/osfmk/ppc/hw_lock_types.h b/osfmk/ppc/hw_lock_types.h new file mode 100644 index 000000000..73ac50030 --- /dev/null +++ b/osfmk/ppc/hw_lock_types.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1998 Apple Computer + * All Rights Reserved + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _PPC_HW_LOCK_TYPES_H_ +#define _PPC_HW_LOCK_TYPES_H_ + +/* + * The "hardware lock". Low-level locking primitives that + * MUST be exported by machine-dependent code; this abstraction + * must provide atomic, non-blocking mutual exclusion that + * is invulnerable to uniprocessor or SMP races, interrupts, + * traps or any other events. + * + * hw_lock_data_t machine-specific lock data structure + * hw_lock_t pointer to hw_lock_data_t + * + * An implementation must export these data types and must + * also provide routines to manipulate them (see prototypes, + * below). These routines may be external, inlined, optimized, + * or whatever, based on the kernel configuration. In the event + * that the implementation wishes to define its own prototypes, + * macros, or inline functions, it may define LOCK_HW_PROTOS + * to disable the definitions below. + * + * Mach does not expect these locks to support statistics, + * debugging, tracing or any other complexity. In certain + * configurations, Mach will build other locking constructs + * on top of this one. A correctly functioning Mach port need + * only implement these locks to be successful. However, + * greater efficiency may be gained with additional machine- + * dependent optimizations for the locking constructs defined + * later in kern/lock.h. + */ + +struct hslock { + int lock_data; +}; +typedef struct hslock hw_lock_data_t, *hw_lock_t; +#define hw_lock_addr(hwl) (&((hwl).lock_data)) + +#endif /* _PPC_HW_LOCK_TYPES_H_ */ diff --git a/osfmk/ppc/hw_vm.s b/osfmk/ppc/hw_vm.s new file mode 100644 index 000000000..f525675f6 --- /dev/null +++ b/osfmk/ppc/hw_vm.s @@ -0,0 +1,3161 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#define PERFTIMES 0 + + .text + +/* + * + * Random notes and musings... + * + * Access to mappings via the PTEG hash must be done with the list locked. + * Access via the physical entries is controlled by the physent lock. + * Access to mappings is controlled by the PTEG lock once they are queued. + * If they are not on the list, they don't really exist, so + * only one processor at a time can find them, so no access control is needed. + * + * The second half of the PTE is kept in the physical entry. It is done this + * way, because there may be multiple mappings that refer to the same physical + * page (i.e., address aliases or synonymns). We must do it this way, because + * maintenance of the reference and change bits becomes nightmarish if each mapping + * has its own. One side effect of this, and not necessarily a bad one, is that + * all mappings for a single page can have a single WIMG, protection state, and RC bits. + * The only "bad" thing, is the reference bit. With a single copy, we can not get + * a completely accurate working set calculation, i.e., we can't tell which mapping was + * used to reference the page, all we can tell is that the physical page was + * referenced. + * + * The master copys of the reference and change bits are kept in the phys_entry. + * Other than the reference and change bits, changes to the phys_entry are not + * allowed if it has any mappings. The master reference and change bits must be + * changed via atomic update. + * + * Invalidating a PTE merges the RC bits into the phys_entry. + * + * Before checking the reference and/or bits, ALL mappings to the physical page are + * invalidated. + * + * PTEs are never explicitly validated, they are always faulted in. They are also + * not visible outside of the hw_vm modules. Complete seperation of church and state. + * + * Removal of a mapping is invalidates its PTE. + * + * So, how do we deal with mappings to I/O space? We don't have a physent for it. + * Within the mapping is a copy of the second half of the PTE. This is used + * ONLY when there is no physical entry. It is swapped into the PTE whenever + * it is built. There is no need to swap it back out, because RC is not + * maintained for these mappings. + * + * So, I'm starting to get concerned about the number of lwarx/stcwx loops in + * this. Satisfying a mapped address with no stealing requires one lock. If we + * steal an entry, there's two locks and an atomic update. Invalidation of an entry + * takes one lock and, if there is a PTE, another lock and an atomic update. Other + * operations are multiples (per mapping) of the above. Maybe we should look for + * an alternative. So far, I haven't found one, but I haven't looked hard. + */ + + +/* hw_add_map(struct mapping *mp, space_t space, vm_offset_t va) - Adds a mapping + * + * Adds a mapping to the PTEG hash list. + * + * Interrupts must be disabled before calling. + * + * Using the space and the virtual address, we hash into the hash table + * and get a lock on the PTEG hash chain. Then we chain the + * mapping to the front of the list. + * + */ + + .align 5 + .globl EXT(hw_add_map) + +LEXT(hw_add_map) + +#if PERFTIMES && DEBUG + mr r7,r3 + mflr r11 + li r3,20 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r7 + mtlr r11 +#endif + + mfmsr r0 /* Get the MSR */ + eqv r6,r6,r6 /* Fill the bottom with foxes */ + rlwinm r11,r4,6,6,25 /* Position the space for the VSID */ + mfspr r10,sdr1 /* Get hash table base and size */ + rlwimi r11,r5,30,2,5 /* Insert the segment no. to make a VSID */ + mfsprg r12,2 ; Get feature flags + rlwimi r6,r10,16,0,15 /* Make table size -1 out of mask */ + rlwinm r7,r5,26,10,25 /* Isolate the page index */ + or r8,r10,r6 /* Point to the last byte in table */ + rlwinm r9,r5,4,0,3 ; Move nybble 1 up to 0 + xor r7,r7,r11 /* Get primary hash */ + mtcrf 0x04,r12 ; Set the features + andi. r12,r0,0x7FCF /* Disable translation and interruptions */ + rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */ + addi r8,r8,1 /* Point to the PTEG Control Area */ + xor r9,r9,r5 ; Splooch vaddr nybble 0 and 1 together + and r7,r7,r6 /* Wrap the hash */ + rlwimi r11,r5,10,26,31 /* Move API into pte ID */ + rlwinm r9,r9,6,27,29 ; Get splooched bits in place + add r8,r8,r7 /* Point to our PCA entry */ + rlwinm r10,r4,2,27,29 ; Get low 3 bits of the VSID for look-aside hash + + bt pfNoMSRirb,hamNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b hamNoMSRx + +hamNoMSR: mr r4,r0 ; Save R0 + mr r2,r3 ; Save + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r0,r4 ; Restore + mr r3,r2 ; Restore +hamNoMSRx: + + la r4,PCAhash(r8) /* Point to the mapping hash area */ + xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID + isync /* Get rid of anything prefetched before we ref storage */ +/* + * We've now got the address of our PCA, the hash chain anchor, our API subhash, + * and word 0 of the PTE (the virtual part). + * + * Now, we just lock the PCA. + */ + + li r12,1 /* Get the locked value */ + dcbt 0,r4 /* We'll need the hash area in a sec, so get it */ + add r4,r4,r9 /* Point to the right mapping hash slot */ + + lwarx r10,0,r8 ; ? + +ptegLckx: lwarx r10,0,r8 /* Get the PTEG lock */ + mr. r10,r10 /* Is it locked? */ + bne- ptegLckwx /* Yeah... */ + stwcx. r12,0,r8 /* Take take it */ + bne- ptegLckx /* Someone else was trying, try again... */ + b ptegSXgx /* All done... */ + + .align 4 + +ptegLckwx: mr. r10,r10 /* Check if it's already held */ + beq+ ptegLckx /* It's clear... */ + lwz r10,0(r8) /* Get lock word again... */ + b ptegLckwx /* Wait... */ + + .align 4 + +ptegSXgx: isync /* Make sure we haven't used anything yet */ + + lwz r7,0(r4) /* Pick up the anchor of hash list */ + stw r3,0(r4) /* Save the new head */ + stw r7,mmhashnext(r3) /* Chain in the old head */ + + stw r4,mmPTEhash(r3) /* Point to the head of the hash list */ + + sync /* Make sure the chain is updated */ + stw r10,0(r8) /* Unlock the hash list */ + mtmsr r0 /* Restore translation and interruptions */ + isync /* Toss anything done with DAT off */ +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,21 + bl EXT(dbgLog2) ; end of hw_add_map + mr r3,r4 + mtlr r11 +#endif + blr /* Leave... */ + + +/* mp=hw_lock_phys_vir(space, va) - Finds and locks a physical entry by vaddr. + * + * Returns the mapping with the associated physent locked if found, or a + * zero and no lock if not. It we timed out trying to get a the lock on + * the physical entry, we retun a 1. A physical entry can never be on an + * odd boundary, so we can distinguish between a mapping and a timeout code. + * + * Interrupts must be disabled before calling. + * + * Using the space and the virtual address, we hash into the hash table + * and get a lock on the PTEG hash chain. Then we search the chain for the + * mapping for our virtual address. From there, we extract the pointer to + * the physical entry. + * + * Next comes a bit of monkey business. we need to get a lock on the physical + * entry. But, according to our rules, we can't get it after we've gotten the + * PTEG hash lock, we could deadlock if we do. So, we need to release the + * hash lock. The problem is, though, that as soon as we release it, some + * other yahoo may remove our mapping between the time that we release the + * hash lock and obtain the phys entry lock. So, we can't count on the + * mapping once we release the lock. Instead, after we lock the phys entry, + * we search the mapping list (phys_link) for our translation. If we don't find it, + * we unlock the phys entry, bail out, and return a 0 for the mapping address. If we + * did find it, we keep the lock and return the address of the mapping block. + * + * What happens when a mapping is found, but there is no physical entry? + * This is what happens when there is I/O area mapped. It one of these mappings + * is found, the mapping is returned, as is usual for this call, but we don't + * try to lock anything. There could possibly be some problems here if another + * processor releases the mapping while we still alre using it. Hope this + * ain't gonna happen. + * + * Taaa-dahhh! Easy as pie, huh? + * + * So, we have a few hacks hacks for running translate off in here. + * First, when we call the lock routine, we have carnel knowlege of the registers is uses. + * That way, we don't need a stack frame, which we can't have 'cause the stack is in + * virtual storage. But wait, as if that's not enough... We need one more register. So, + * we cram the LR into the CTR and return from there. + * + */ + .align 5 + .globl EXT(hw_lock_phys_vir) + +LEXT(hw_lock_phys_vir) + +#if PERFTIMES && DEBUG + mflr r11 + mr r5,r3 + li r3,22 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r5 + mtlr r11 +#endif + mfmsr r12 /* Get the MSR */ + eqv r6,r6,r6 /* Fill the bottom with foxes */ + mfsprg r9,2 ; Get feature flags + rlwinm r11,r3,6,6,25 /* Position the space for the VSID */ + mfspr r5,sdr1 /* Get hash table base and size */ + rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */ + mtcrf 0x04,r9 ; Set the features + rlwimi r6,r5,16,0,15 /* Make table size -1 out of mask */ + andi. r0,r12,0x7FCF /* Disable translation and interruptions */ + rlwinm r9,r4,4,0,3 ; Move nybble 1 up to 0 + rlwinm r7,r4,26,10,25 /* Isolate the page index */ + or r8,r5,r6 /* Point to the last byte in table */ + xor r7,r7,r11 /* Get primary hash */ + rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */ + addi r8,r8,1 /* Point to the PTEG Control Area */ + xor r9,r9,r4 ; Splooch vaddr nybble 0 and 1 together + and r7,r7,r6 /* Wrap the hash */ + rlwimi r11,r4,10,26,31 /* Move API into pte ID */ + rlwinm r9,r9,6,27,29 ; Get splooched bits in place + add r8,r8,r7 /* Point to our PCA entry */ + rlwinm r10,r3,2,27,29 ; Get low 3 bits of the VSID for look-aside hash + + bt pfNoMSRirb,hlpNoMSR ; No MSR... + + mtmsr r0 ; Translation and all off + isync ; Toss prefetch + b hlpNoMSRx + +hlpNoMSR: mr r3,r0 ; Get the new MSR + li r0,loadMSR ; Get the MSR setter SC + sc ; Set it +hlpNoMSRx: + + la r3,PCAhash(r8) /* Point to the mapping hash area */ + xor r9,r9,r10 ; Finish splooching nybble 0, 1, and the low bits of the VSID + isync /* Make sure translation is off before we ref storage */ + +/* + * We've now got the address of our PCA, the hash chain anchor, our API subhash, + * and word 0 of the PTE (the virtual part). + * + * Now, we just lock the PCA and find our mapping, if it exists. + */ + + dcbt 0,r3 /* We'll need the hash area in a sec, so get it */ + add r3,r3,r9 /* Point to the right mapping hash slot */ + + lwarx r10,0,r8 ; ? + +ptegLcka: lwarx r10,0,r8 /* Get the PTEG lock */ + li r5,1 /* Get the locked value */ + mr. r10,r10 /* Is it locked? */ + bne- ptegLckwa /* Yeah... */ + stwcx. r5,0,r8 /* Take take it */ + bne- ptegLcka /* Someone else was trying, try again... */ + b ptegSXga /* All done... */ + + .align 4 + +ptegLckwa: mr. r10,r10 /* Check if it's already held */ + beq+ ptegLcka /* It's clear... */ + lwz r10,0(r8) /* Get lock word again... */ + b ptegLckwa /* Wait... */ + + .align 4 + +ptegSXga: isync /* Make sure we haven't used anything yet */ + + mflr r0 /* Get the LR */ + lwz r9,0(r3) /* Pick up the first mapping block */ + mtctr r0 /* Stuff it into the CTR */ + +findmapa: + + mr. r3,r9 /* Did we hit the end? */ + bne+ chkmapa /* Nope... */ + + stw r3,0(r8) /* Unlock the PTEG lock + Note: we never saved anything while we + had the lock, so we don't need a sync + before we unlock it */ + +vbail: mtmsr r12 /* Restore translation and interruptions */ + isync /* Make sure translation is cool */ +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,23 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + bctr /* Return in abject failure... */ + + .align 4 + +chkmapa: lwz r10,mmPTEv(r3) /* Pick up our virtual ID */ + lwz r9,mmhashnext(r3) /* Pick up next mapping block */ + cmplw r10,r11 /* Have we found ourself? */ + bne- findmapa /* Nope, still wandering... */ + + lwz r9,mmphysent(r3) /* Get our physical entry pointer */ + li r5,0 /* Clear this out */ + mr. r9,r9 /* Is there, like, a physical entry? */ + stw r5,0(r8) /* Unlock the PTEG lock + Note: we never saved anything while we + had the lock, so we don't need a sync + before we unlock it */ + + beq- vbail /* If there is no physical entry, it's time + to leave... */ + +/* Here we want to call hw_lock_bit. We don't want to use the stack, 'cause it's + * in virtual storage, and we're in real. So, we've carefully looked at the code + * in hw_lock_bit (and unlock) and cleverly don't use any of the registers that it uses. + * Be very, very aware of how you change this code. By the way, it uses: + * R0, R6, R7, R8, and R9. R3, R4, and R5 contain parameters + * Unfortunatly, we need to stash R9 still. So... Since we know we will not be interrupted + * ('cause we turned off interruptions and translation is off) we will use SPRG3... + */ + + lwz r10,mmPTEhash(r3) /* Save the head of the hash-alike chain. We need it to find ourselves later */ + lis r5,HIGH_ADDR(EXT(LockTimeOut)) /* Get address of timeout value */ + la r3,pephyslink(r9) /* Point to the lock word */ + ori r5,r5,LOW_ADDR(EXT(LockTimeOut)) /* Get second half of address */ + li r4,PHYS_LOCK /* Get the lock bit value */ + lwz r5,0(r5) /* Pick up the timeout value */ + mtsprg 3,r9 /* Save R9 in SPRG3 */ + + bl EXT(hw_lock_bit) /* Go do the lock */ + + mfsprg r9,3 /* Restore pointer to the phys_entry */ + mr. r3,r3 /* Did we timeout? */ + lwz r4,pephyslink(r9) /* Pick up first mapping block */ + beq- penterr /* Bad deal, we timed out... */ + + rlwinm r4,r4,0,0,26 ; Clear out the flags from first link + +findmapb: mr. r3,r4 /* Did we hit the end? */ + bne+ chkmapb /* Nope... */ + + la r3,pephyslink(r9) /* Point to where the lock is */ + li r4,PHYS_LOCK /* Get the lock bit value */ + bl EXT(hw_unlock_bit) /* Go unlock the physentry */ + + li r3,0 /* Say we failed */ + b vbail /* Return in abject failure... */ + +penterr: li r3,1 /* Set timeout */ + b vbail /* Return in abject failure... */ + + .align 5 + +chkmapb: lwz r6,mmPTEv(r3) /* Pick up our virtual ID */ + lwz r4,mmnext(r3) /* Pick up next mapping block */ + cmplw r6,r11 /* Have we found ourself? */ + lwz r5,mmPTEhash(r3) /* Get the start of our hash chain */ + bne- findmapb /* Nope, still wandering... */ + cmplw r5,r10 /* On the same hash chain? */ + bne- findmapb /* Nope, keep looking... */ + + b vbail /* Return in glorious triumph... */ + + +/* + * hw_rem_map(mapping) - remove a mapping from the system. + * + * Upon entry, R3 contains a pointer to a mapping block and the associated + * physical entry is locked if there is one. + * + * If the mapping entry indicates that there is a PTE entry, we invalidate + * if and merge the reference and change information into the phys_entry. + * + * Next, we remove the mapping from the phys_ent and the PTEG hash list. + * + * Unlock any locks that are left, and exit. + * + * Note that this must be done with both interruptions off and VM off + * + * Note that this code depends upon the VSID being of the format 00SXXXXX + * where S is the segment number. + * + * + */ + + .align 5 + .globl EXT(hw_rem_map) + +LEXT(hw_rem_map) +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,24 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + mfsprg r9,2 ; Get feature flags + mfmsr r0 /* Save the MSR */ + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtcrf 0x04,r9 ; Set the features + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + + bt pfNoMSRirb,lmvNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b lmvNoMSRx + +lmvNoMSR: + mr r6,r0 + mr r4,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r4 + mr r0,r6 + +lmvNoMSRx: + + + lwz r6,mmPTEhash(r3) /* Get pointer to hash list anchor */ + lwz r5,mmPTEv(r3) /* Get the VSID */ + dcbt 0,r6 /* We'll need that chain in a bit */ + + rlwinm r7,r6,0,0,25 /* Round hash list down to PCA boundary */ + li r12,1 /* Get the locked value */ + subi r6,r6,mmhashnext /* Make the anchor look like an entry */ + + lwarx r10,0,r7 ; ? + +ptegLck1: lwarx r10,0,r7 /* Get the PTEG lock */ + mr. r10,r10 /* Is it locked? */ + bne- ptegLckw1 /* Yeah... */ + stwcx. r12,0,r7 /* Try to take it */ + bne- ptegLck1 /* Someone else was trying, try again... */ + b ptegSXg1 /* All done... */ + + .align 4 + +ptegLckw1: mr. r10,r10 /* Check if it's already held */ + beq+ ptegLck1 /* It's clear... */ + lwz r10,0(r7) /* Get lock word again... */ + b ptegLckw1 /* Wait... */ + + .align 4 + +ptegSXg1: isync /* Make sure we haven't used anything yet */ + + lwz r12,mmhashnext(r3) /* Prime with our forward pointer */ + lwz r4,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */ + +srchmaps: mr. r10,r6 /* Save the previous entry */ + bne+ mapok /* No error... */ + + lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */ + ori r0,r0,LOW_ADDR(Choke) + sc /* Firmware Heimlich manuever */ + + .align 4 + +mapok: lwz r6,mmhashnext(r6) /* Look at the next one */ + cmplwi cr5,r4,0 /* Is there a PTE? */ + cmplw r6,r3 /* Have we found ourselves? */ + bne+ srchmaps /* Nope, get your head together... */ + + stw r12,mmhashnext(r10) /* Remove us from the queue */ + rlwinm r9,r5,1,0,3 /* Move in the segment */ + rlwinm r8,r4,6,4,19 /* Line PTEG disp up to a page */ + rlwinm r11,r5,5,4,19 /* Line up the VSID */ + lwz r10,mmphysent(r3) /* Point to the physical entry */ + + beq+ cr5,nopte /* There's no PTE to invalidate... */ + + xor r8,r8,r11 /* Back hash to virt index */ + lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ + rlwimi r9,r5,22,4,9 /* Move in the API */ + ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ + mfspr r11,pvr /* Find out what kind of machine we are */ + rlwimi r9,r8,0,10,19 /* Create the virtual address */ + rlwinm r11,r11,16,16,31 /* Isolate CPU type */ + + stw r5,0(r4) /* Make the PTE invalid */ + + cmplwi cr1,r11,3 /* Is this a 603? */ + sync /* Make sure the invalid is stored */ + + lwarx r5,0,r12 ; ? + +tlbhang1: lwarx r5,0,r12 /* Get the TLBIE lock */ + rlwinm r11,r4,29,29,31 /* Get the bit position of entry */ + mr. r5,r5 /* Is it locked? */ + lis r6,0x8000 /* Start up a bit mask */ + li r5,1 /* Get our lock word */ + bne- tlbhang1 /* It's locked, go wait... */ + stwcx. r5,0,r12 /* Try to get it */ + bne- tlbhang1 /* We was beat... */ + + srw r6,r6,r11 /* Make a "free slot" mask */ + lwz r5,PCAallo(r7) /* Get the allocation control bits */ + rlwinm r11,r6,24,8,15 /* Make the autogen bit to turn off */ + or r5,r5,r6 /* turn on the free bit */ + rlwimi r11,r11,24,16,23 /* Get lock bit mask to turn it off */ + + andc r5,r5,r11 /* Turn off the lock and autogen bits in allocation flags */ + li r11,0 /* Lock clear value */ + + tlbie r9 /* Invalidate it everywhere */ + + + beq- cr1,its603a /* It's a 603, skip the tlbsync... */ + + eieio /* Make sure that the tlbie happens first */ + tlbsync /* wait for everyone to catch up */ + +its603a: sync /* Make sure of it all */ + stw r11,0(r12) /* Clear the tlbie lock */ + eieio /* Make sure those RC bit are loaded */ + stw r5,PCAallo(r7) /* Show that the slot is free */ + stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */ + +nopte: mr. r10,r10 /* See if there is a physical entry */ + la r9,pephyslink(r10) /* Point to the physical mapping chain */ + beq- nophys /* No physical entry, we're done... */ + beq- cr5,nadamrg /* No PTE to merge... */ + + lwz r6,4(r4) /* Get the latest reference and change bits */ + la r12,pepte1(r10) /* Point right at the master copy */ + rlwinm r6,r6,0,23,24 /* Extract just the RC bits */ + + lwarx r8,0,r12 ; ? + +mrgrc: lwarx r8,0,r12 /* Get the master copy */ + or r8,r8,r6 /* Merge in latest RC */ + stwcx. r8,0,r12 /* Save it back */ + bne- mrgrc /* If it changed, try again... */ + +nadamrg: li r11,0 /* Clear this out */ + lwz r12,mmnext(r3) /* Prime with our next */ + stw r11,0(r7) /* Unlock the hash chain now so we don't + lock out another processor during the + our next little search */ + + +srchpmap: mr. r10,r9 /* Save the previous entry */ + bne+ mapok1 /* No error... */ + + lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */ + ori r0,r0,LOW_ADDR(Choke) + sc /* Firmware Heimlich maneuver */ + + .align 4 + +mapok1: lwz r9,mmnext(r9) /* Look at the next one */ + rlwinm r8,r9,0,27,31 ; Save the flags (including the lock) + rlwinm r9,r9,0,0,26 ; Clear out the flags from first link + cmplw r9,r3 /* Have we found ourselves? */ + bne+ srchpmap /* Nope, get your head together... */ + + rlwimi r12,r8,0,27,31 ; Insert the lock and flags */ + stw r12,mmnext(r10) /* Remove us from the queue */ + + mtmsr r0 /* Interrupts and translation back on */ + isync +#if PERFTIMES && DEBUG + mflr r11 + li r3,25 + bl EXT(dbgLog2) ; Start of hw_add_map + mtlr r11 +#endif + blr /* Return... */ + + .align 4 + +nophys: li r4,0 /* Make sure this is 0 */ + sync /* Make sure that chain is updated */ + stw r4,0(r7) /* Unlock the hash chain */ + mtmsr r0 /* Interrupts and translation back on */ + isync +#if PERFTIMES && DEBUG + mflr r11 + li r3,25 + bl EXT(dbgLog2) ; Start of hw_add_map + mtlr r11 +#endif + blr /* Return... */ + + +/* + * hw_prot(physent, prot) - Change the protection of a physical page + * + * Upon entry, R3 contains a pointer to a physical entry which is locked. + * R4 contains the PPC protection bits. + * + * The first thing we do is to slam the new protection into the phys entry. + * Then we scan the mappings and process each one. + * + * Acquire the lock on the PTEG hash list for the mapping being processed. + * + * If the current mapping has a PTE entry, we invalidate + * it and merge the reference and change information into the phys_entry. + * + * Next, slam the protection bits into the entry and unlock the hash list. + * + * Note that this must be done with both interruptions off and VM off + * + * + */ + + .align 5 + .globl EXT(hw_prot) + +LEXT(hw_prot) +#if PERFTIMES && DEBUG + mflr r11 + mr r7,r3 +// lwz r5,4(r3) + li r5,0x1111 + li r3,26 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r7 + mtlr r11 +#endif + mfsprg r9,2 ; Get feature flags + mfmsr r0 /* Save the MSR */ + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + li r5,pepte1 /* Get displacement to the second word of master pte */ + mtcrf 0x04,r9 ; Set the features + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + + bt pfNoMSRirb,hpNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b hpNoMSRx + +hpNoMSR: + mr r10,r0 + mr r7,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r0,r10 + mr r3,r7 +hpNoMSRx: + + + + lwz r10,pephyslink(r3) /* Get the first mapping block */ + rlwinm r10,r10,0,0,26 ; Clear out the flags from first link + +/* + * Note that we need to to do the interlocked update here because another processor + * can be updating the reference and change bits even though the physical entry + * is locked. All modifications to the PTE portion of the physical entry must be + * done via interlocked update. + */ + + lwarx r8,r5,r3 ; ? + +protcng: lwarx r8,r5,r3 /* Get the master copy */ + rlwimi r8,r4,0,30,31 /* Move in the protection bits */ + stwcx. r8,r5,r3 /* Save it back */ + bne- protcng /* If it changed, try again... */ + + + +protnext: mr. r10,r10 /* Are there any more mappings? */ + beq- protdone /* Naw... */ + + lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */ + lwz r5,mmPTEv(r10) /* Get the virtual address */ + rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ + + li r12,1 /* Get the locked value */ + + lwarx r11,0,r7 ; ? + +protLck1: lwarx r11,0,r7 /* Get the PTEG lock */ + mr. r11,r11 /* Is it locked? */ + bne- protLckw1 /* Yeah... */ + stwcx. r12,0,r7 /* Try to take it */ + bne- protLck1 /* Someone else was trying, try again... */ + b protSXg1 /* All done... */ + + .align 4 + +protLckw1: mr. r11,r11 /* Check if it's already held */ + beq+ protLck1 /* It's clear... */ + lwz r11,0(r7) /* Get lock word again... */ + b protLckw1 /* Wait... */ + + .align 4 + +protSXg1: isync /* Make sure we haven't used anything yet */ + + lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */ + + rlwinm r9,r5,1,0,3 /* Move in the segment */ + lwz r2,mmPTEr(r10) ; Get the mapping copy of the PTE + mr. r6,r6 /* See if there is a PTE here */ + rlwinm r8,r5,31,2,25 /* Line it up */ + rlwimi r2,r4,0,30,31 ; Move protection bits into the mapping copy + + beq+ protul /* There's no PTE to invalidate... */ + + xor r8,r8,r6 /* Back hash to virt index */ + rlwimi r9,r5,22,4,9 /* Move in the API */ + lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ + rlwinm r5,r5,0,1,31 /* Clear the valid bit */ + ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ + mfspr r11,pvr /* Find out what kind of machine we are */ + rlwimi r9,r8,6,10,19 /* Create the virtual address */ + rlwinm r11,r11,16,16,31 /* Isolate CPU type */ + + stw r5,0(r6) /* Make the PTE invalid */ + cmplwi cr1,r11,3 /* Is this a 603? */ + sync /* Make sure the invalid is stored */ + + lwarx r11,0,r12 ; ? + +tlbhangp: lwarx r11,0,r12 /* Get the TLBIE lock */ + rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ + mr. r11,r11 /* Is it locked? */ + lis r5,0x8000 /* Start up a bit mask */ + li r11,1 /* Get our lock word */ + bne- tlbhangp /* It's locked, go wait... */ + stwcx. r11,0,r12 /* Try to get it */ + bne- tlbhangp /* We was beat... */ + + li r11,0 /* Lock clear value */ + + tlbie r9 /* Invalidate it everywhere */ + + beq- cr1,its603p /* It's a 603, skip the tlbsync... */ + + eieio /* Make sure that the tlbie happens first */ + tlbsync /* wait for everyone to catch up */ + +its603p: stw r11,0(r12) /* Clear the lock */ + srw r5,r5,r8 /* Make a "free slot" mask */ + sync /* Make sure of it all */ + + lwz r6,4(r6) /* Get the latest reference and change bits */ + stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */ + rlwinm r6,r6,0,23,24 /* Extract the RC bits */ + lwz r9,PCAallo(r7) /* Get the allocation control bits */ + rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ + rlwimi r2,r6,0,23,24 ; Put the latest RC bit in mapping copy + or r9,r9,r5 /* Set the slot free */ + rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ + andc r9,r9,r8 /* Clear the auto and lock bits */ + li r5,pepte1 /* Get displacement to the second word of master pte */ + stw r9,PCAallo(r7) /* Store the allocation controls */ + + lwarx r11,r5,r3 ; ? +protmod: lwarx r11,r5,r3 /* Get the master copy */ + or r11,r11,r6 /* Merge in latest RC */ + stwcx. r11,r5,r3 /* Save it back */ + bne- protmod /* If it changed, try again... */ + + sync /* Make sure that chain is updated */ + +protul: li r4,0 /* Get a 0 */ + stw r2,mmPTEr(r10) ; Save the updated mapping PTE + lwz r10,mmnext(r10) /* Get the next */ + stw r4,0(r7) /* Unlock the hash chain */ + b protnext /* Go get the next one */ + + .align 4 + +protdone: mtmsr r0 /* Interrupts and translation back on */ + isync +#if PERFTIMES && DEBUG + mflr r11 + li r3,27 + bl EXT(dbgLog2) ; Start of hw_add_map + mtlr r11 +#endif + blr /* Return... */ + + +/* + * hw_prot_virt(mapping, prot) - Change the protection of single page + * + * Upon entry, R3 contains a pointer (real) to a mapping. + * R4 contains the PPC protection bits. + * + * Acquire the lock on the PTEG hash list for the mapping being processed. + * + * If the current mapping has a PTE entry, we invalidate + * it and merge the reference and change information into the phys_entry. + * + * Next, slam the protection bits into the entry, merge the RC bits, + * and unlock the hash list. + * + * Note that this must be done with both interruptions off and VM off + * + * + */ + + .align 5 + .globl EXT(hw_prot_virt) + +LEXT(hw_prot_virt) +#if PERFTIMES && DEBUG + mflr r11 + mr r7,r3 +// lwz r5,4(r3) + li r5,0x1111 + li r3,40 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r7 + mtlr r11 +#endif + mfsprg r9,2 ; Get feature flags + mfmsr r0 /* Save the MSR */ + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtcrf 0x04,r9 ; Set the features + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + + bt pfNoMSRirb,hpvNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b hpvNoMSRx + +hpvNoMSR: + mr r5,r0 + mr r7,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r7 + mr r0,r5 +hpvNoMSRx: + + + +/* + * Note that we need to to do the interlocked update here because another processor + * can be updating the reference and change bits even though the physical entry + * is locked. All modifications to the PTE portion of the physical entry must be + * done via interlocked update. + */ + + lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */ + lwz r5,mmPTEv(r3) /* Get the virtual address */ + rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ + + li r12,1 /* Get the locked value */ + + lwarx r11,0,r7 ; ? + +protvLck1: lwarx r11,0,r7 /* Get the PTEG lock */ + mr. r11,r11 /* Is it locked? */ + bne- protvLckw1 /* Yeah... */ + stwcx. r12,0,r7 /* Try to take it */ + bne- protvLck1 /* Someone else was trying, try again... */ + b protvSXg1 /* All done... */ + + .align 4 + +protvLckw1: mr. r11,r11 /* Check if it's already held */ + beq+ protvLck1 /* It's clear... */ + lwz r11,0(r7) /* Get lock word again... */ + b protvLckw1 /* Wait... */ + + .align 4 + +protvSXg1: isync /* Make sure we haven't used anything yet */ + + lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */ + lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part + + rlwinm r9,r5,1,0,3 /* Move in the segment */ + cmplwi cr7,r6,0 ; Any PTE to invalidate? + rlwimi r2,r4,0,30,31 ; Move in the new protection bits + rlwinm r8,r5,31,2,25 /* Line it up */ + + beq+ cr7,pvnophys /* There's no PTE to invalidate... */ + + xor r8,r8,r6 /* Back hash to virt index */ + rlwimi r9,r5,22,4,9 /* Move in the API */ + lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ + rlwinm r5,r5,0,1,31 /* Clear the valid bit */ + ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ + mfspr r11,pvr /* Find out what kind of machine we are */ + rlwimi r9,r8,6,10,19 /* Create the virtual address */ + rlwinm r11,r11,16,16,31 /* Isolate CPU type */ + + stw r5,0(r6) /* Make the PTE invalid */ + cmplwi cr1,r11,3 /* Is this a 603? */ + sync /* Make sure the invalid is stored */ + + lwarx r11,0,r12 ; ? + +tlbhangpv: lwarx r11,0,r12 /* Get the TLBIE lock */ + rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ + mr. r11,r11 /* Is it locked? */ + lis r5,0x8000 /* Start up a bit mask */ + li r11,1 /* Get our lock word */ + bne- tlbhangpv /* It's locked, go wait... */ + stwcx. r11,0,r12 /* Try to get it */ + bne- tlbhangpv /* We was beat... */ + + li r11,0 /* Lock clear value */ + + tlbie r9 /* Invalidate it everywhere */ + + beq- cr1,its603pv /* It's a 603, skip the tlbsync... */ + + eieio /* Make sure that the tlbie happens first */ + tlbsync /* wait for everyone to catch up */ + +its603pv: stw r11,0(r12) /* Clear the lock */ + srw r5,r5,r8 /* Make a "free slot" mask */ + sync /* Make sure of it all */ + + lwz r6,4(r6) /* Get the latest reference and change bits */ + stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */ + rlwinm r6,r6,0,23,24 /* Extract the RC bits */ + lwz r9,PCAallo(r7) /* Get the allocation control bits */ + rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ + lwz r10,mmphysent(r3) ; Get any physical entry + or r9,r9,r5 /* Set the slot free */ + rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ + andc r9,r9,r8 /* Clear the auto and lock bits */ + mr. r10,r10 ; Is there a physical entry? + li r5,pepte1 /* Get displacement to the second word of master pte */ + stw r9,PCAallo(r7) /* Store the allocation controls */ + rlwimi r2,r6,0,23,24 ; Stick in RC bits + beq- pvnophys ; No physical entry... + + + lwarx r11,r5,r10 ; ? + +protvmod: lwarx r11,r5,r10 /* Get the master copy */ + or r11,r11,r6 /* Merge in latest RC */ + stwcx. r11,r5,r10 /* Save it back */ + bne- protvmod /* If it changed, try again... */ + + sync /* Make sure that chain is updated */ + +pvnophys: li r4,0 /* Get a 0 */ + stw r2,mmPTEr(r3) ; Set the real part of the PTE + stw r4,0(r7) /* Unlock the hash chain */ + mtmsr r0 ; Restore interrupts and translation + isync + +#if PERFTIMES && DEBUG + mflr r11 + li r3,41 + bl EXT(dbgLog2) + mtlr r11 +#endif + blr /* Return... */ + + +/* + * hw_attr_virt(mapping, attr) - Change the attributes of single page + * + * Upon entry, R3 contains a pointer (real) to a mapping. + * R4 contains the WIMG bits. + * + * Acquire the lock on the PTEG hash list for the mapping being processed. + * + * If the current mapping has a PTE entry, we invalidate + * it and merge the reference and change information into the phys_entry. + * + * Next, slam the WIMG bits into the entry, merge the RC bits, + * and unlock the hash list. + * + * Note that this must be done with both interruptions off and VM off + * + * + */ + + .align 5 + .globl EXT(hw_attr_virt) + +LEXT(hw_attr_virt) +#if PERFTIMES && DEBUG + mflr r11 + mr r7,r3 +// lwz r5,4(r3) + li r5,0x1111 + li r3,40 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r7 + mtlr r11 +#endif + mfsprg r9,2 ; Get feature flags + mfmsr r0 /* Save the MSR */ + mtcrf 0x04,r9 ; Set the features + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + + bt pfNoMSRirb,havNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b havNoMSRx + +havNoMSR: + mr r5,r0 + mr r7,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r7 + mr r0,r5 +havNoMSRx: + +/* + * Note that we need to to do the interlocked update here because another processor + * can be updating the reference and change bits even though the physical entry + * is locked. All modifications to the PTE portion of the physical entry must be + * done via interlocked update. + */ + + lwz r7,mmPTEhash(r3) /* Get pointer to hash list anchor */ + lwz r5,mmPTEv(r3) /* Get the virtual address */ + rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ + + li r12,1 /* Get the locked value */ + + lwarx r11,0,r7 ; ? + +attrvLck1: lwarx r11,0,r7 /* Get the PTEG lock */ + mr. r11,r11 /* Is it locked? */ + bne- attrvLckw1 /* Yeah... */ + stwcx. r12,0,r7 /* Try to take it */ + bne- attrvLck1 /* Someone else was trying, try again... */ + b attrvSXg1 /* All done... */ + + .align 4 + +attrvLckw1: mr. r11,r11 /* Check if it's already held */ + beq+ attrvLck1 /* It's clear... */ + lwz r11,0(r7) /* Get lock word again... */ + b attrvLckw1 /* Wait... */ + + .align 4 + +attrvSXg1: isync /* Make sure we haven't used anything yet */ + + lwz r6,mmPTEent(r3) /* Get the pointer to the PTE now that the lock's set */ + lwz r2,mmPTEr(r3) ; Get the mapping copy if the real part + + rlwinm r9,r5,1,0,3 /* Move in the segment */ + mr. r6,r6 /* See if there is a PTE here */ + rlwimi r2,r4,0,25,28 ; Move in the new attribute bits + rlwinm r8,r5,31,2,25 /* Line it up and check if empty */ + + beq+ avnophys /* There's no PTE to invalidate... */ + + xor r8,r8,r6 /* Back hash to virt index */ + rlwimi r9,r5,22,4,9 /* Move in the API */ + lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ + rlwinm r5,r5,0,1,31 /* Clear the valid bit */ + ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ + mfspr r11,pvr /* Find out what kind of machine we are */ + rlwimi r9,r8,6,10,19 /* Create the virtual address */ + rlwinm r11,r11,16,16,31 /* Isolate CPU type */ + stw r5,0(r6) /* Make the PTE invalid */ + cmplwi cr1,r11,3 /* Is this a 603? */ + sync /* Make sure the invalid is stored */ + + lwarx r11,0,r12 ; ? + +tlbhangav: lwarx r11,0,r12 /* Get the TLBIE lock */ + rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ + mr. r11,r11 /* Is it locked? */ + lis r5,0x8000 /* Start up a bit mask */ + li r11,1 /* Get our lock word */ + bne- tlbhangav /* It's locked, go wait... */ + stwcx. r11,0,r12 /* Try to get it */ + bne- tlbhangav /* We was beat... */ + + li r11,0 /* Lock clear value */ + + tlbie r9 /* Invalidate it everywhere */ + + beq- cr1,its603av /* It's a 603, skip the tlbsync... */ + + eieio /* Make sure that the tlbie happens first */ + tlbsync /* wait for everyone to catch up */ + +its603av: stw r11,0(r12) /* Clear the lock */ + srw r5,r5,r8 /* Make a "free slot" mask */ + sync /* Make sure of it all */ + + lwz r6,4(r6) /* Get the latest reference and change bits */ + stw r11,mmPTEent(r3) /* Clear the pointer to the PTE */ + rlwinm r6,r6,0,23,24 /* Extract the RC bits */ + lwz r9,PCAallo(r7) /* Get the allocation control bits */ + rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ + lwz r10,mmphysent(r3) ; Get any physical entry + or r9,r9,r5 /* Set the slot free */ + rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ + andc r9,r9,r8 /* Clear the auto and lock bits */ + mr. r10,r10 ; Is there a physical entry? + li r5,pepte1 /* Get displacement to the second word of master pte */ + stw r9,PCAallo(r7) /* Store the allocation controls */ + rlwimi r2,r6,0,23,24 ; Stick in RC bits + beq- avnophys ; No physical entry... + + lwarx r11,r5,r10 ; ? + +attrvmod: lwarx r11,r5,r10 /* Get the master copy */ + or r11,r11,r6 /* Merge in latest RC */ + stwcx. r11,r5,r10 /* Save it back */ + bne- attrvmod /* If it changed, try again... */ + + sync /* Make sure that chain is updated */ + +avnophys: li r4,0 /* Get a 0 */ + stw r2,mmPTEr(r3) ; Set the real part of the PTE + stw r4,0(r7) /* Unlock the hash chain */ + + rlwinm r2,r2,0,0,19 ; Clear back to page boundary + +attrflsh: cmplwi r4,(4096-32) ; Are we about to do the last line on page? + dcbst r2,r4 ; Flush cache because we changed attributes + addi r4,r4,32 ; Bump up cache + blt+ attrflsh ; Do the whole page... + sync + + li r4,0 +attrimvl: cmplwi r4,(4096-32) ; Are we about to do the last line on page? + dcbi r2,r4 ; Invalidate dcache because we changed attributes + icbi r2,r4 ; Invalidate icache because we changed attributes + icbi r2,r4 ; Invalidate icache because we changed attributes + addi r4,r4,32 ; Bump up cache + blt+ attrimvl ; Do the whole page... + sync + + mtmsr r0 ; Restore interrupts and translation + isync + +#if PERFTIMES && DEBUG + mflr r11 + li r3,41 + bl EXT(dbgLog2) + mtlr r11 +#endif + blr /* Return... */ + + +/* + * hw_pte_comm(physent) - Do something to the PTE pointing to a physical page + * + * Upon entry, R3 contains a pointer to a physical entry which is locked. + * Note that this must be done with both interruptions off and VM off + * + * First, we set up CRs 5 and 7 to indicate which of the 7 calls this is. + * + * Now we scan the mappings to invalidate any with an active PTE. + * + * Acquire the lock on the PTEG hash list for the mapping being processed. + * + * If the current mapping has a PTE entry, we invalidate + * it and merge the reference and change information into the phys_entry. + * + * Next, unlock the hash list and go on to the next mapping. + * + * + * + */ + + .align 5 + .globl EXT(hw_inv_all) + +LEXT(hw_inv_all) + + li r9,0x800 /* Indicate invalidate all */ + li r2,0 ; No inadvertant modifications please + b hw_pte_comm /* Join in the fun... */ + + + .align 5 + .globl EXT(hw_tst_mod) + +LEXT(hw_tst_mod) + + lwz r8,pepte1(r3) ; Get the saved PTE image + li r9,0x400 /* Indicate test modify */ + li r2,0 ; No inadvertant modifications please + rlwinm. r8,r8,25,31,31 ; Make change bit into return code + beq+ hw_pte_comm ; Assume we do not know if it is set... + mr r3,r8 ; Set the return code + blr ; Return quickly... + + .align 5 + .globl EXT(hw_tst_ref) + +LEXT(hw_tst_ref) + lwz r8,pepte1(r3) ; Get the saved PTE image + li r9,0x200 /* Indicate test reference bit */ + li r2,0 ; No inadvertant modifications please + rlwinm. r8,r8,24,31,31 ; Make reference bit into return code + beq+ hw_pte_comm ; Assume we do not know if it is set... + mr r3,r8 ; Set the return code + blr ; Return quickly... + +/* + * Note that the following are all in one CR for ease of use later + */ + .align 4 + .globl EXT(hw_set_mod) + +LEXT(hw_set_mod) + + li r9,0x008 /* Indicate set modify bit */ + li r2,0x4 ; Set set C, clear none + b hw_pte_comm /* Join in the fun... */ + + + .align 4 + .globl EXT(hw_clr_mod) + +LEXT(hw_clr_mod) + + li r9,0x004 /* Indicate clear modify bit */ + li r2,0x1 ; Set set none, clear C + b hw_pte_comm /* Join in the fun... */ + + + .align 4 + .globl EXT(hw_set_ref) + +LEXT(hw_set_ref) + + li r9,0x002 /* Indicate set reference */ + li r2,0x8 ; Set set R, clear none + b hw_pte_comm /* Join in the fun... */ + + .align 5 + .globl EXT(hw_clr_ref) + +LEXT(hw_clr_ref) + + li r9,0x001 /* Indicate clear reference bit */ + li r2,0x2 ; Set set none, clear R + b hw_pte_comm /* Join in the fun... */ + + +/* + * This is the common stuff. + */ + + .align 5 + +hw_pte_comm: /* Common routine for pte tests and manips */ + +#if PERFTIMES && DEBUG + mflr r11 + mr r7,r3 + lwz r4,4(r3) + mr r5,r9 + li r3,28 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r7 + mtlr r11 +#endif + mfsprg r8,2 ; Get feature flags + lwz r10,pephyslink(r3) /* Get the first mapping block */ + mfmsr r0 /* Save the MSR */ + rlwinm. r10,r10,0,0,26 ; Clear out the flags from first link and see if we are mapped + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtcrf 0x04,r8 ; Set the features + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + beq- comnmap ; No mapping + dcbt br0,r10 ; Touch the first mapping in before the isync + +comnmap: + + bt pfNoMSRirb,hpcNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b hpcNoMSRx + +hpcNoMSR: + mr r5,r0 + mr r7,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r7 + mr r0,r5 +hpcNoMSRx: + + mtcrf 0x05,r9 /* Set the call type flags into cr5 and 7 */ + + beq- commdone ; Nothing us mapped to this page... + b commnext ; Jump to first pass (jump here so we can align loop) + + .align 5 + +commnext: lwz r11,mmnext(r10) ; Get the pointer to the next mapping (if any) + lwz r7,mmPTEhash(r10) /* Get pointer to hash list anchor */ + lwz r5,mmPTEv(r10) /* Get the virtual address */ + mr. r11,r11 ; More mappings to go? + rlwinm r7,r7,0,0,25 /* Round hash list down to PCA boundary */ + beq- commnxtch ; No more mappings... + dcbt br0,r11 ; Touch the next mapping + +commnxtch: li r12,1 /* Get the locked value */ + + lwarx r11,0,r7 ; ? + +commLck1: lwarx r11,0,r7 /* Get the PTEG lock */ + mr. r11,r11 /* Is it locked? */ + bne- commLckw1 /* Yeah... */ + stwcx. r12,0,r7 /* Try to take it */ + bne- commLck1 /* Someone else was trying, try again... */ + b commSXg1 /* All done... */ + + .align 4 + +commLckw1: mr. r11,r11 /* Check if it's already held */ + beq+ commLck1 /* It's clear... */ + lwz r11,0(r7) /* Get lock word again... */ + b commLckw1 /* Wait... */ + + .align 4 + +commSXg1: isync /* Make sure we haven't used anything yet */ + + lwz r6,mmPTEent(r10) /* Get the pointer to the PTE now that the lock's set */ + + rlwinm r9,r5,1,0,3 /* Move in the segment */ + mr. r6,r6 /* See if there is a PTE entry here */ + rlwinm r8,r5,31,2,25 /* Line it up and check if empty */ + + beq+ commul /* There's no PTE to invalidate... */ + + xor r8,r8,r6 /* Back hash to virt index */ + rlwimi r9,r5,22,4,9 /* Move in the API */ + lis r12,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ + rlwinm r5,r5,0,1,31 /* Clear the valid bit */ + ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ + rlwimi r9,r8,6,10,19 /* Create the virtual address */ + + stw r5,0(r6) /* Make the PTE invalid */ + mfspr r4,pvr /* Find out what kind of machine we are */ + sync /* Make sure the invalid is stored */ + + lwarx r11,0,r12 ; ? + +tlbhangco: lwarx r11,0,r12 /* Get the TLBIE lock */ + rlwinm r8,r6,29,29,31 /* Get the bit position of entry */ + mr. r11,r11 /* Is it locked? */ + lis r5,0x8000 /* Start up a bit mask */ + li r11,1 /* Get our lock word */ + bne- tlbhangco /* It's locked, go wait... */ + stwcx. r11,0,r12 /* Try to get it */ + bne- tlbhangco /* We was beat... */ + + rlwinm r4,r4,16,16,31 /* Isolate CPU type */ + li r11,0 /* Lock clear value */ + cmplwi r4,3 /* Is this a 603? */ + + tlbie r9 /* Invalidate it everywhere */ + + beq- its603co /* It's a 603, skip the tlbsync... */ + + eieio /* Make sure that the tlbie happens first */ + tlbsync /* wait for everyone to catch up */ + +its603co: stw r11,0(r12) /* Clear the lock */ + srw r5,r5,r8 /* Make a "free slot" mask */ + sync /* Make sure of it all */ + + lwz r6,4(r6) /* Get the latest reference and change bits */ + lwz r9,PCAallo(r7) /* Get the allocation control bits */ + stw r11,mmPTEent(r10) /* Clear the pointer to the PTE */ + rlwinm r8,r5,24,8,15 /* Make the autogen bit to turn off */ + or r9,r9,r5 /* Set the slot free */ + rlwimi r8,r8,24,16,23 /* Get lock bit mask to turn it off */ + rlwinm r4,r6,0,23,24 /* Extract the RC bits */ + andc r9,r9,r8 /* Clear the auto and lock bits */ + li r5,pepte1 /* Get displacement to the second word of master pte */ + stw r9,PCAallo(r7) /* Store the allocation controls */ + + lwarx r11,r5,r3 ; ? +commmod: lwarx r11,r5,r3 /* Get the master copy */ + or r11,r11,r4 /* Merge in latest RC */ + stwcx. r11,r5,r3 /* Save it back */ + bne- commmod /* If it changed, try again... */ + + sync /* Make sure that chain is updated */ + b commulnl ; Skip loading the old real part... + +commul: lwz r6,mmPTEr(r10) ; Get the real part + +commulnl: rlwinm r12,r2,5,23,24 ; Get the "set" bits + rlwinm r11,r2,7,23,24 ; Get the "clear" bits + + or r6,r6,r12 ; Set the bits to come on + andc r6,r6,r11 ; Clear those to come off + + stw r6,mmPTEr(r10) ; Set the new RC + + lwz r10,mmnext(r10) /* Get the next */ + li r4,0 /* Make sure this is 0 */ + mr. r10,r10 ; Is there another mapping? + stw r4,0(r7) /* Unlock the hash chain */ + bne+ commnext ; Go get the next if there is one... + +/* + * Now that all PTEs have been invalidated and the master RC bits are updated, + * we go ahead and figure out what the original call was and do that. Note that + * another processor could be messing around and may have entered one of the + * PTEs we just removed into the hash table. Too bad... You takes yer chances. + * If there's a problem with that, it's because some higher level was trying to + * do something with a mapping that it shouldn't. So, the problem's really + * there, nyaaa, nyaaa, nyaaa... nyaaa, nyaaa... nyaaa! So there! + */ + +commdone: li r5,pepte1 /* Get displacement to the second word of master pte */ + blt cr5,commfini /* We're finished, it was invalidate all... */ + bgt cr5,commtst /* It was a test modified... */ + beq cr5,commtst /* It was a test reference... */ + +/* + * Note that we need to to do the interlocked update here because another processor + * can be updating the reference and change bits even though the physical entry + * is locked. All modifications to the PTE portion of the physical entry must be + * done via interlocked update. + */ + + rlwinm r12,r2,5,23,24 ; Get the "set" bits + rlwinm r11,r2,7,23,24 ; Get the "clear" bits + + lwarx r8,r5,r3 ; ? + +commcng: lwarx r8,r5,r3 /* Get the master copy */ + or r8,r8,r12 ; Set the bits to come on + andc r8,r8,r11 ; Clear those to come off + stwcx. r8,r5,r3 /* Save it back */ + bne- commcng /* If it changed, try again... */ + + mtmsr r0 /* Interrupts and translation back on */ + isync +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,29 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + blr /* Return... */ + + .align 4 + +commtst: lwz r8,pepte1(r3) /* Get the PTE */ + bne- cr5,commtcb ; This is for the change bit... + mtmsr r0 ; Interrupts and translation back on + rlwinm r3,r8,24,31,31 ; Copy reference bit to bit 31 + isync ; Toss prefetching +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,29 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + blr ; Return... + + .align 4 + +commtcb: rlwinm r3,r8,25,31,31 ; Copy change bit to bit 31 + +commfini: mtmsr r0 ; Interrupts and translation back on + isync ; Toss prefetching + +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,29 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + blr ; Return... + +/* + * unsigned int hw_test_rc(mapping *mp, boolean_t reset); + * + * Test the RC bits for a specific mapping. If reset is non-zero, clear them. + * We return the RC value in the mapping if there is no PTE or if C is set. + * (Note: R is always set with C.) Otherwise we invalidate the PTE and + * collect the RC bits from there, also merging them into the global copy. + * + * For now, we release the PTE slot and leave it invalid. In the future, we + * may consider re-validating and not releasing the slot. It would be faster, + * but our current implementation says that we will have not PTEs valid + * without the reference bit set. + * + * We will special case C==1 && not reset to just return the RC. + * + * Probable state is worst performance state: C bit is off and there is a PTE. + */ + +#define htrReset 31 + + .align 5 + .globl EXT(hw_test_rc) + +LEXT(hw_test_rc) + + mfsprg r9,2 ; Get feature flags + mfmsr r0 ; Save the MSR + mr. r4,r4 ; See if we have a reset to do later + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruption mask + crnot htrReset,cr0_eq ; Remember reset + mtcrf 0x04,r9 ; Set the features + rlwinm r12,r12,0,28,25 ; Clear IR and DR + + bt pfNoMSRirb,htrNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b htrNoMSRx + +htrNoMSR: + mr r2,r0 + mr r7,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r7 + mr r0,r2 +htrNoMSRx: + + lwz r2,mmPTEr(r3) ; Get the real part + lwz r7,mmPTEhash(r3) ; Get pointer to hash list anchor + rlwinm. r12,r2,0,24,24 ; Is the change bit on? + lwz r5,mmPTEv(r3) ; Get the virtual address + crnor cr0_eq,cr0_eq,htrReset ; Set if C=1 && not reset + rlwinm r7,r7,0,0,25 ; Round hash list down to PCA boundary + bt cr0_eq,htrcset ; Special case changed but no reset case... + + li r12,1 ; Get the locked value + +htrLck1: lwarx r11,0,r7 ; Get the PTEG lock + mr. r11,r11 ; Is it locked? + bne- htrLckw1 ; Yeah... + stwcx. r12,0,r7 ; Try to take it + bne- htrLck1 ; Someone else was trying, try again... + b htrSXg1 ; All done... + + .align 4 + +htrLckw1: mr. r11,r11 ; Check if it is already held + beq+ htrLck1 ; It is clear... + lwz r11,0(r7) ; Get lock word again... + b htrLckw1 ; Wait... + + .align 4 + +htrSXg1: isync ; Make sure we have not used anything yet + + lwz r6,mmPTEent(r3) ; Get the pointer to the PTE now that the lock is set + lwz r2,mmPTEr(r3) ; Get the mapping copy of the real part + + rlwinm r9,r5,1,0,3 ; Move in the segment + mr. r6,r6 ; Any PTE to invalidate? + rlwinm r8,r5,31,2,25 ; Line it up + + beq+ htrnopte ; There is no PTE to invalidate... + + xor r8,r8,r6 ; Back hash to virt index + rlwimi r9,r5,22,4,9 ; Move in the API + lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock + rlwinm r5,r5,0,1,31 ; Clear the valid bit + ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part + mfspr r11,pvr ; Find out what kind of machine we are + rlwimi r9,r8,6,10,19 ; Create the virtual address + rlwinm r11,r11,16,16,31 ; Isolate CPU type + + stw r5,0(r6) ; Make the PTE invalid + cmplwi cr1,r11,3 ; Is this a 603? + sync ; Make sure the invalid is stored + +htrtlbhang: lwarx r11,0,r12 ; Get the TLBIE lock + rlwinm r8,r6,29,29,31 ; Get the bit position of entry + mr. r11,r11 ; Is it locked? + lis r5,0x8000 ; Start up a bit mask + li r11,1 ; Get our lock word + bne- htrtlbhang ; It is locked, go wait... + stwcx. r11,0,r12 ; Try to get it + bne- htrtlbhang ; We was beat... + + li r11,0 ; Lock clear value + + tlbie r9 ;Invalidate it everywhere + + beq- cr1,htr603 ; It is a 603, skip the tlbsync... + + eieio ; Make sure that the tlbie happens first + tlbsync ; wait for everyone to catch up + +htr603: stw r11,0(r12) ; Clear the lock + srw r5,r5,r8 ; Make a "free slot" mask + sync ; Make sure of it all + + lwz r6,4(r6) ; Get the latest reference and change bits + stw r11,mmPTEent(r3) ; Clear the pointer to the PTE + rlwinm r6,r6,0,23,24 ; Extract the RC bits + lwz r9,PCAallo(r7) ; Get the allocation control bits + rlwinm r8,r5,24,8,15 ; Make the autogen bit to turn off + lwz r10,mmphysent(r3) ; Get any physical entry + or r9,r9,r5 ; Set the slot free + rlwimi r8,r8,24,16,23 ; Get lock bit mask to turn it off + andc r9,r9,r8 ; Clear the auto and lock bits + mr. r10,r10 ; Is there a physical entry? + li r5,pepte1 ; Get displacement to the second word of master pte + stw r9,PCAallo(r7) ; Store the allocation controls + rlwimi r2,r6,0,23,24 ; Stick in RC bits + beq- htrnopte ; No physical entry... + +htrmrc: lwarx r11,r5,r10 ; Get the master copy + or r11,r11,r6 ; Merge in latest RC + stwcx. r11,r5,r10 ; Save it back + bne- htrmrc ; If it changed, try again... + + sync ; Make sure that chain update is stored + +htrnopte: rlwinm r3,r2,25,30,31 ; Position RC and mask off + bf htrReset,htrnorst ; No reset to do... + rlwinm r2,r2,0,25,22 ; Clear the RC if requested + +htrnorst: li r4,0 ; Get a 0 + stw r2,mmPTEr(r3) ; Set the real part of the PTE + stw r4,0(r7) ; Unlock the hash chain + + mtmsr r0 ; Restore interrupts and translation + isync + blr ; Return... + + .align 4 + +htrcset: rlwinm r3,r2,25,30,31 ; Position RC and mask off + mtmsr r0 ; Restore interrupts and translation + isync + blr ; Return... + + +/* + * hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) - Sets the default physical page attributes + * + * Note that this must be done with both interruptions off and VM off + * Move the passed in attributes into the pte image in the phys entry + * + * + */ + + .align 5 + .globl EXT(hw_phys_attr) + +LEXT(hw_phys_attr) + +#if PERFTIMES && DEBUG + mflr r11 + mr r8,r3 + mr r7,r5 + mr r5,r4 +// lwz r4,4(r3) + li r4,0x1111 + li r3,30 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r8 + mr r4,r5 + mr r5,r7 + mtlr r11 +#endif + mfsprg r9,2 ; Get feature flags + mfmsr r0 /* Save the MSR */ + andi. r5,r5,0x0078 /* Clean up the WIMG */ + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtcrf 0x04,r9 ; Set the features + rlwimi r5,r4,0,30,31 /* Move the protection into the wimg register */ + la r6,pepte1(r3) /* Point to the default pte */ + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + + bt pfNoMSRirb,hpaNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b hpaNoMSRx + +hpaNoMSR: + mr r10,r0 + mr r4,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r4 + mr r0,r10 +hpaNoMSRx: + +atmattr: lwarx r10,0,r6 /* Get the pte */ + rlwimi r10,r5,0,25,31 /* Move in the new attributes */ + stwcx. r10,0,r6 /* Try it on for size */ + bne- atmattr /* Someone else was trying, try again... */ + + mtmsr r0 /* Interrupts and translation back on */ + isync +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r10 + li r3,31 + bl EXT(dbgLog2) ; Start of hw_add_map + mtlr r11 +#endif + blr /* All done... */ + + + +/* + * handlePF - handle a page fault interruption + * + * If the fault can be handled, this routine will RFI directly, + * otherwise it will return with all registers as in entry. + * + * Upon entry, state and all registers have been saved in savearea. + * This is pointed to by R13. + * IR and DR are off, interrupts are masked, + * Floating point be disabled. + * R3 is the interrupt code. + * + * If we bail, we must restore cr5, and all registers except 6 and + * 3. + * + */ + + .align 5 + .globl EXT(handlePF) + +LEXT(handlePF) + +/* + * This first part does a quick check to see if we can handle the fault. + * We can't handle any kind of protection exceptions here, so we pass + * them up to the next level. + * + * The mapping lists are kept in MRS (most recently stolen) + * order on queues anchored within from the + * PTEG to which the virtual address hashes. This is further segregated by + * the low-order 3 bits of the VSID XORed with the segment number and XORed + * with bits 4-7 of the vaddr in an attempt to keep the searches + * short. + * + * MRS is handled by moving the entry to the head of its list when stolen in the + * assumption that it will be revalidated soon. Entries are created on the head + * of the list because they will be used again almost immediately. + * + * We need R13 set to the savearea, R3 set to the interrupt code, and R2 + * set to the per_proc. + * + * NOTE: In order for a page-fault redrive to work, the translation miss + * bit must be set in the DSISR (or SRR1 for IFETCH). That must occur + * before we come here. + */ + + cmplwi r3,T_INSTRUCTION_ACCESS /* See if this is for the instruction */ + lwz r8,savesrr1(r13) ; Get the MSR to determine mode + beq- gotIfetch ; We have an IFETCH here... + + lwz r7,savedsisr(r13) /* Get the DSISR */ + lwz r6,savedar(r13) /* Get the fault address */ + b ckIfProt ; Go check if this is a protection fault... + +gotIfetch: mr r7,r8 ; IFETCH info is in SRR1 + lwz r6,savesrr0(r13) /* Get the instruction address */ + +ckIfProt: rlwinm. r7,r7,0,1,1 ; Is this a protection exception? + beqlr- ; Yes... (probably not though) + +/* + * We will need to restore registers if we bail after this point. + * Note that at this point several SRs have been changed to the kernel versions. + * Therefore, for these we must build these values. + */ + +#if PERFTIMES && DEBUG + mflr r11 + mr r5,r6 + mr r4,r3 + li r3,32 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 + mfsprg r2,0 +#endif + lwz r3,PP_USERPMAP(r2) ; Get the user pmap (not needed if kernel access, but optimize for user??) + rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Supervisor state access? + rlwinm r5,r6,6,26,29 ; Get index to the segment slot + eqv r1,r1,r1 ; Fill the bottom with foxes + bne+ notsuper ; Go do the user mode interrupt stuff... + + cmplwi cr1,r5,SR_COPYIN_NUM*4 ; See if this is the copyin/copyout segment + rlwinm r3,r6,24,8,11 ; Make the kernel VSID + bne+ cr1,havevsid ; We are done if we do not want the copyin/out guy... + + mfsr r3,SR_COPYIN ; Get the copy vsid + b havevsid ; Join up... + + .align 5 + +notsuper: addi r5,r5,PMAP_SEGS ; Get offset to table + lwzx r3,r3,r5 ; Get the VSID + +havevsid: mfspr r5,sdr1 /* Get hash table base and size */ + cror cr1_eq,cr0_eq,cr0_eq ; Remember if kernel fault for later + rlwinm r9,r6,2,2,5 ; Move nybble 1 up to 0 (keep aligned with VSID) + rlwimi r1,r5,16,0,15 /* Make table size -1 out of mask */ + rlwinm r3,r3,6,2,25 /* Position the space for the VSID */ + rlwinm r7,r6,26,10,25 /* Isolate the page index */ + xor r9,r9,r3 ; Splooch vaddr nybble 0 (from VSID) and 1 together + or r8,r5,r1 /* Point to the last byte in table */ + xor r7,r7,r3 /* Get primary hash */ + rlwinm r3,r3,1,1,24 /* Position VSID for pte ID */ + addi r8,r8,1 /* Point to the PTEG Control Area */ + rlwinm r9,r9,8,27,29 ; Get splooched bits in place + and r7,r7,r1 /* Wrap the hash */ + rlwimi r3,r6,10,26,31 /* Move API into pte ID */ + add r8,r8,r7 /* Point to our PCA entry */ + rlwinm r12,r3,27,27,29 ; Get low 3 bits of the VSID for look-aside hash + la r11,PCAhash(r8) /* Point to the mapping hash area */ + xor r9,r9,r12 ; Finish splooching nybble 0, 1, and the low bits of the VSID + + +/* + * We have about as much as we need to start searching the autogen (aka block maps) + * and mappings. From here on, any kind of failure will bail, and + * contention will either bail or restart from here. + * + * + */ + + li r12,1 /* Get the locked value */ + dcbt 0,r11 /* We'll need the hash area in a sec, so get it */ + add r11,r11,r9 /* Point to the right mapping hash slot */ + + lwarx r10,0,r8 ; ? +ptegLck: lwarx r10,0,r8 /* Get the PTEG lock */ + mr. r10,r10 /* Is it locked? */ + bne- ptegLckw /* Yeah... */ + stwcx. r12,0,r8 /* Take take it */ + bne- ptegLck /* Someone else was trying, try again... */ + b ptegSXg /* All done... */ + + .align 4 + +ptegLckw: mr. r10,r10 /* Check if it's already held */ + beq+ ptegLck /* It's clear... */ + lwz r10,0(r8) /* Get lock word again... */ + b ptegLckw /* Wait... */ + + .align 5 + + nop ; Force ISYNC to last instruction in IFETCH + nop + nop + +ptegSXg: isync /* Make sure we haven't used anything yet */ + + lwz r9,0(r11) /* Pick up first mapping block */ + mr r5,r11 /* Get the address of the anchor */ + mr r7,r9 /* Save the first in line */ + b findmap ; Take space and force loop to cache line + +findmap: mr. r12,r9 /* Are there more? */ + beq- tryAuto /* Nope, nothing in mapping list for us... */ + + lwz r10,mmPTEv(r12) /* Get unique PTE identification */ + lwz r9,mmhashnext(r12) /* Get the chain, just in case */ + cmplw r10,r3 /* Did we hit our PTE? */ + lwz r0,mmPTEent(r12) /* Get the pointer to the hash table entry */ + mr r5,r12 /* Save the current as previous */ + bne- findmap ; Nothing here, try the next... + +; Cache line boundary here + + cmplwi cr1,r0,0 /* Is there actually a PTE entry in the hash? */ + lwz r2,mmphysent(r12) /* Get the physical entry */ + bne- cr1,MustBeOK /* There's an entry in the hash table, so, this must + have been taken care of already... */ + lis r4,0x8000 ; Tell PTE inserter that this was not an auto + cmplwi cr2,r2,0 /* Is there a physical entry? */ + li r0,0x0100 /* Force on the reference bit whenever we make a PTE valid */ + bne+ cr2,gotphys /* Skip down if we have a physical entry */ + li r0,0x0180 /* When there is no physical entry, force on + both R and C bits to keep hardware from + updating the PTE to set them. We don't + keep track of RC for I/O areas, so this is ok */ + +gotphys: lwz r2,mmPTEr(r12) ; Get the second part of the PTE + b insert /* Go insert into the PTEG... */ + +MustBeOK: li r10,0 /* Get lock clear value */ + li r3,T_IN_VAIN /* Say that we handled it */ + stw r10,PCAlock(r8) /* Clear the PTEG lock */ + sync +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,33 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + blr /* Blow back and handle exception */ + + + +/* + * We couldn't find it in the mapping list. As a last try, we will + * see if we can autogen it from the block mapped list. + * + * A block mapped area is defined as a contiguous virtual area that is mapped to + * a contiguous physical area. The olde-tyme IBM VM/XA Interpretive Execution + * architecture referred to this as a V=F, or Virtual = Fixed area. + * + * We consider a V=F area to be a single entity, adjacent areas can not be merged + * or overlapped. The protection and memory attributes are the same and reference + * and change indications are not kept. The areas are not considered part of the + * physical RAM of the machine and do not have any associated physical table + * entries. Their primary use is intended for mapped I/O areas (e.g., framebuffers) + * although certain areas of RAM, such as the kernel V=R memory, can be mapped. + * + * We also have a problem in the case of copyin/out: that access is done + * within the kernel for a user address. Unfortunately, the user isn't + * necessarily the current guy. That means that we don't have access to the + * right autogen list. We can't support this kind of access. So, we need to do + * a quick check here and cause a fault if an attempt to copyin or out to + * any autogenned area. + * + * The lists must be kept short. + * + * NOTE: kernel_pmap_store must be in V=R storage!!!!!!!!!!!!!! + */ + + .align 5 + +tryAuto: rlwinm. r11,r3,0,5,24 ; Check if this is a kernel VSID + lis r10,HIGH_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the top part of kernel block map anchor + crandc cr0_eq,cr1_eq,cr0_eq ; Set if kernel access and non-zero VSID (copyin or copyout) + mfsprg r11,0 ; Get the per_proc area + beq- cr0,realFault ; Can not autogen for copyin/copyout... + ori r10,r10,LOW_ADDR(EXT(kernel_pmap_store)+PMAP_BMAPS) ; Get the bottom part + beq- cr1,bmInKernel ; We are in kernel... (cr1 set way back at entry) + + lwz r10,PP_USERPMAP(r11) ; Get the user pmap + la r10,PMAP_BMAPS(r10) ; Point to the chain anchor + b bmInKernel ; Jump over alignment gap... + nop + nop + nop + nop + nop + nop +bmInKernel: +#ifndef CHIP_ERRATA_MAX_V1 + lwarx r9,0,r10 +#endif /* CHIP_ERRATA_MAX_V1 */ + +bmapLck: lwarx r9,0,r10 ; Get the block map anchor and lock + rlwinm. r5,r9,0,31,31 ; Is it locked? + ori r5,r5,1 ; Set the lock + bne- bmapLckw ; Yeah... + stwcx. r5,0,r10 ; Lock the bmap list + bne- bmapLck ; Someone else was trying, try again... + b bmapSXg ; All done... + + .align 4 + +bmapLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held + beq+ bmapLck ; Not no more... + lwz r9,0(r10) ; Get lock word again... + b bmapLckw ; Check it out... + + .align 5 + + nop ; Force ISYNC to last instruction in IFETCH + nop + nop + +bmapSXg: rlwinm. r4,r9,0,0,26 ; Clear out flags and lock + isync ; Make sure we have not used anything yet + bne+ findAuto ; We have something, let us go... + +bmapNone: stw r9,0(r10) ; Unlock it, we have nothing here + ; No sync here because we have not changed anything + +/* + * When we come here, we know that we can't handle this. Restore whatever + * state that we trashed and go back to continue handling the interrupt. + */ + +realFault: li r10,0 /* Get lock clear value */ + lwz r3,saveexception(r13) /* Figure out the exception code again */ + stw r10,PCAlock(r8) /* Clear the PTEG lock */ +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,33 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + blr /* Blow back and handle exception */ + + .align 5 + +findAuto: mr. r4,r4 ; Is there more? + beq- bmapNone ; No more... + lwz r5,bmstart(r4) ; Get the bottom of range + lwz r11,bmend(r4) ; Get the top of range + cmplw cr0,r6,r5 ; Are we before the entry? + cmplw cr1,r6,r11 ; Are we after the entry? + cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range + bne+ cr1,faGot ; Found it... + + lwz r4,bmnext(r4) ; Get the next one + b findAuto ; Check it out... + +faGot: rlwinm r6,r6,0,0,19 ; Round to page + lwz r2,bmPTEr(r4) ; Get the real part of the PTE + sub r5,r6,r5 ; Get offset into area + stw r9,0(r10) ; Unlock it, we are done with it (no sync needed) + add r2,r2,r5 ; Adjust the real address + + lis r4,0x8080 /* Indicate that this was autogened */ + li r0,0x0180 /* Autogenned areas always set RC bits. + This keeps the hardware from having + to do two storage writes */ + +/* + * Here where we insert the PTE into the hash. The PTE image is in R3, R2. + * The PTEG allocation controls are a bit map of the state of the PTEG. The + * PCAlock bits are a temporary lock for the specified PTE. PCAfree indicates that + * the PTE slot is empty. PCAauto means that it comes from an autogen area. These + * guys do not keep track of reference and change and are actually "wired". + * They're easy to maintain. PCAsteal + * is a sliding position mask used to "randomize" PTE slot stealing. All 4 of these + * fields fit in a single word and are loaded and stored under control of the + * PTEG control area lock (PCAlock). + * + * Note that PCAauto does not contribute to the steal calculations at all. Originally + * it did, autogens were second in priority. This can result in a pathalogical + * case where an instruction can not make forward progress, or one PTE slot + * thrashes. + * + * Physically, the fields are arranged: + * 0: PCAfree + * 1: PCAauto + * 2: PCAlock + * 3: PCAsteal + */ + +insert: lwz r10,PCAallo(r8) /* Get the PTEG controls */ + eqv r6,r6,r6 /* Get all ones */ + mr r11,r10 /* Make a copy */ + rlwimi r6,r10,8,16,23 /* Insert sliding steal position */ + rlwimi r11,r11,24,24,31 /* Duplicate the locked field */ + addi r6,r6,-256 /* Form mask */ + rlwimi r11,r11,16,0,15 /* This gives us a quadrupled lock mask */ + rlwinm r5,r10,31,24,0 /* Slide over the mask for next time */ + mr r9,r10 /* Make a copy to test */ + not r11,r11 /* Invert the quadrupled lock */ + or r2,r2,r0 /* Force on R, and maybe C bit */ + and r9,r9,r11 /* Remove the locked guys */ + rlwimi r5,r5,8,24,24 /* Wrap bottom bit to top in mask */ + rlwimi r9,r11,0,16,31 /* Put two copies of the unlocked entries at the end */ + rlwinm r6,r6,0,16,7 ; Remove the autogens from the priority calculations + rlwimi r10,r5,0,24,31 /* Move steal map back in */ + and r9,r9,r6 /* Set the starting point for stealing */ + +/* So, now we have in R9: + byte 0 = ~locked & free + byte 1 = 0 + byte 2 = ~locked & (PCAsteal - 1) + byte 3 = ~locked + + Each bit position represents (modulo 8) a PTE. If it is 1, it is available for + allocation at its priority level, left to right. + + Additionally, the PCA steal field in R10 has been rotated right one bit. +*/ + + + rlwinm r21,r10,8,0,7 ; Isolate just the old autogen bits + cntlzw r6,r9 /* Allocate a slot */ + mr r14,r12 /* Save our mapping for later */ + cmplwi r6,32 ; Was there anything available? + rlwinm r7,r6,29,30,31 /* Get the priority slot we got this from */ + rlwinm r6,r6,0,29,31 ; Isolate bit position + srw r11,r4,r6 /* Position the PTEG control bits */ + slw r21,r21,r6 ; Move corresponding old autogen flag to bit 0 + mr r22,r11 ; Get another copy of the selected slot + + beq- realFault /* Arghh, no slots! Take the long way 'round... */ + + /* Remember, we've already set up the mask pattern + depending upon how we got here: + if got here from simple mapping, R4=0x80000000, + if we got here from autogen it is 0x80800000. */ + + rlwinm r6,r6,3,26,28 /* Start calculating actual PTE address */ + rlwimi r22,r22,24,8,15 ; Duplicate selected slot in second byte + rlwinm. r11,r11,0,8,15 /* Isolate just the auto bit (remember about it too) */ + andc r10,r10,r22 /* Turn off the free and auto bits */ + add r6,r8,r6 /* Get position into PTEG control area */ + cmplwi cr1,r7,1 /* Set the condition based upon the old PTE type */ + sub r6,r6,r1 /* Switch it to the hash table */ + or r10,r10,r11 /* Turn auto on if it is (PTEG control all set up now) */ + subi r6,r6,1 /* Point right */ + stw r10,PCAallo(r8) /* Allocate our slot */ + dcbt br0,r6 ; Touch in the PTE + bne wasauto /* This was autogenned... */ + + stw r6,mmPTEent(r14) /* Link the mapping to the PTE slot */ + +/* + * So, now we're here and what exactly do we have? We've got: + * 1) a full PTE entry, both top and bottom words in R3 and R2 + * 2) an allocated slot in the PTEG. + * 3) R8 still points to the PTEG Control Area (PCA) + * 4) R6 points to the PTE entry. + * 5) R1 contains length of the hash table-1. We use this to back-translate + * a PTE to a virtual address so we can invalidate TLBs. + * 6) R11 has a copy of the PCA controls we set. + * 7a) R7 indicates what the PTE slot was before we got to it. 0 shows + * that it was empty and 2 or 3, that it was + * a we've stolen a live one. CR1 is set to LT for empty and GT + * otherwise. + * 7b) Bit 0 of R21 is 1 if the stolen PTE was autogenned + * 8) So far as our selected PTE, it should be valid if it was stolen + * and invalid if not. We could put some kind of assert here to + * check, but I think that I'd rather leave it in as a mysterious, + * non-reproducable bug. + * 9) The new PTE's mapping has been moved to the front of its PTEG hash list + * so that it's kept in some semblance of a MRU list. + * 10) R14 points to the mapping we're adding. + * + * So, what do we have to do yet? + * 1) If we stole a slot, we need to invalidate the PTE completely. + * 2) If we stole one AND it was not an autogen, + * copy the entire old PTE (including R and C bits) to its mapping. + * 3) Set the new PTE in the PTEG and make sure it is valid. + * 4) Unlock the PTEG control area. + * 5) Go back to the interrupt handler, changing the interrupt + * code to "in vain" which will restore the registers and bail out. + * + */ +wasauto: oris r3,r3,0x8000 /* Turn on the valid bit */ + blt+ cr1,slamit /* It was empty, go slam it on in... */ + + lwz r10,0(r6) /* Grab the top part of the PTE */ + rlwinm r12,r6,6,4,19 /* Match up the hash to a page boundary */ + rlwinm r5,r10,5,4,19 /* Extract the VSID to a page boundary */ + rlwinm r10,r10,0,1,31 /* Make it invalid */ + xor r12,r5,r12 /* Calculate vaddr */ + stw r10,0(r6) /* Invalidate the PTE */ + rlwinm r5,r10,7,27,29 ; Move nybble 0 up to subhash position + rlwimi r12,r10,1,0,3 /* Move in the segment portion */ + lis r9,HIGH_ADDR(EXT(tlb_system_lock)) /* Get the TLBIE lock */ + xor r5,r5,r10 ; Splooch nybble 0 and 1 + rlwimi r12,r10,22,4,9 /* Move in the API */ + ori r9,r9,LOW_ADDR(EXT(tlb_system_lock)) /* Grab up the bottom part */ + rlwinm r4,r10,27,27,29 ; Get low 3 bits of the VSID for look-aside hash + + sync /* Make sure the invalid is stored */ + + xor r4,r4,r5 ; Finish splooching nybble 0, 1, and the low bits of the VSID + + lwarx r5,0,r9 ; ? + +tlbhang: lwarx r5,0,r9 /* Get the TLBIE lock */ + + rlwinm r4,r4,0,27,29 ; Clean up splooched hash value + + mr. r5,r5 /* Is it locked? */ + add r4,r4,r8 /* Point to the offset into the PCA area */ + li r5,1 /* Get our lock word */ + bne- tlbhang /* It's locked, go wait... */ + + la r4,PCAhash(r4) /* Point to the start of the hash chain for the PTE we're replacing */ + + stwcx. r5,0,r9 /* Try to get it */ + bne- tlbhang /* We was beat... */ + + mfspr r7,pvr /* Find out what kind of machine we are */ + li r5,0 /* Lock clear value */ + rlwinm r7,r7,16,16,31 /* Isolate CPU type */ + + tlbie r12 /* Invalidate it everywhere */ + + cmplwi r7,3 /* Is this a 603? */ + stw r5,0(r9) /* Clear the lock */ + + beq- its603 /* It's a 603, skip the tlbsync... */ + + eieio /* Make sure that the tlbie happens first */ + tlbsync /* wait for everyone to catch up */ + +its603: rlwinm. r21,r21,0,0,0 ; See if we just stole an autogenned entry + sync /* Make sure of it all */ + + bne slamit ; The old was an autogen, time to slam the new in... + + lwz r9,4(r6) /* Get the real portion of old PTE */ + lwz r7,0(r4) /* Get the first element. We can't get to here + if we aren't working with a mapping... */ + mr r0,r7 ; Save pointer to first element + +findold: mr r1,r11 ; Save the previous guy + mr. r11,r7 /* Copy and test the chain */ + beq- bebad /* Assume it's not zero... */ + + lwz r5,mmPTEv(r11) /* See if this is the old active one */ + cmplw cr2,r11,r14 /* Check if this is actually the new one */ + cmplw r5,r10 /* Is this us? (Note: valid bit kept off in mappings) */ + lwz r7,mmhashnext(r11) /* Get the next one in line */ + beq- cr2,findold /* Don't count the new one... */ + cmplw cr2,r11,r0 ; Check if we are first on the list + bne+ findold /* Not it (and assume the worst)... */ + + lwz r12,mmphysent(r11) /* Get the pointer to the physical entry */ + beq- cr2,nomove ; We are first, no need to requeue... + + stw r11,0(r4) ; Chain us to the head + stw r0,mmhashnext(r11) ; Chain the old head to us + stw r7,mmhashnext(r1) ; Unlink us + +nomove: li r5,0 /* Clear this on out */ + + mr. r12,r12 /* Is there a physical entry? */ + stw r5,mmPTEent(r11) ; Clear the PTE entry pointer + li r5,pepte1 /* Point to the PTE last half */ + stw r9,mmPTEr(r11) ; Squirrel away the whole thing (RC bits are in here) + + beq- mrgmrcx ; No physical entry for this one... + + rlwinm r11,r9,0,23,24 /* Keep only the RC bits */ + + lwarx r9,r5,r12 ; ? + +mrgmrcx: lwarx r9,r5,r12 /* Get the master copy */ + or r9,r9,r11 /* Merge in latest RC */ + stwcx. r9,r5,r12 /* Save it back */ + bne- mrgmrcx /* If it changed, try again... */ + +/* + * Here's where we finish up. We save the real part of the PTE, eieio it, to make sure it's + * out there before the top half (with the valid bit set). + */ + +slamit: stw r2,4(r6) /* Stash the real part */ + li r4,0 /* Get a lock clear value */ + eieio /* Erect a barricade */ + stw r3,0(r6) /* Stash the virtual part and set valid on */ + + stw r4,PCAlock(r8) /* Clear the PCA lock */ + + li r3,T_IN_VAIN /* Say that we handled it */ + sync /* Go no further until the stores complete */ +#if PERFTIMES && DEBUG + mflr r11 + mr r4,r3 + li r3,33 + bl EXT(dbgLog2) ; Start of hw_add_map + mr r3,r4 + mtlr r11 +#endif + blr /* Back to the fold... */ + +bebad: lis r0,HIGH_ADDR(Choke) /* We have a kernel choke!!! */ + ori r0,r0,LOW_ADDR(Choke) + sc /* Firmware Heimlich maneuver */ + +/* + * This walks the hash table or DBATs to locate the physical address of a virtual one. + * The space is provided. If it is the kernel space, the DBATs are searched first. Failing + * that, the hash table is accessed. Zero is returned for failure, so it must be special cased. + * This is usually used for debugging, so we try not to rely + * on anything that we don't have to. + */ + +ENTRY(LRA, TAG_NO_FRAME_USED) + + mfsprg r8,2 ; Get feature flags + mfmsr r10 /* Save the current MSR */ + mtcrf 0x04,r8 ; Set the features + xoris r5,r3,HIGH_ADDR(PPC_SID_KERNEL) /* Clear the top half if equal */ + andi. r9,r10,0x7FCF /* Turn off interrupts and translation */ + eqv r12,r12,r12 /* Fill the bottom with foxes */ + + bt pfNoMSRirb,lraNoMSR ; No MSR... + + mtmsr r9 ; Translation and all off + isync ; Toss prefetch + b lraNoMSRx + +lraNoMSR: + mr r7,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r9 ; Get new MSR + sc ; Set it + mr r3,r7 +lraNoMSRx: + + cmplwi r5,LOW_ADDR(PPC_SID_KERNEL) /* See if this is kernel space */ + rlwinm r11,r3,6,6,25 /* Position the space for the VSID */ + isync /* Purge pipe */ + bne- notkernsp /* This is not for the kernel... */ + + mfspr r5,dbat0u /* Get the virtual address and length */ + eqv r8,r8,r8 /* Get all foxes */ + rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ + rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ + beq- ckbat1 /* not valid, skip this one... */ + sub r7,r4,r7 /* Subtract out the base */ + rlwimi r8,r5,15,0,14 /* Get area length - 1 */ + mfspr r6,dbat0l /* Get the real part */ + cmplw r7,r8 /* Check if it is in the range */ + bng+ fndbat /* Yup, she's a good un... */ + +ckbat1: mfspr r5,dbat1u /* Get the virtual address and length */ + eqv r8,r8,r8 /* Get all foxes */ + rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ + rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ + beq- ckbat2 /* not valid, skip this one... */ + sub r7,r4,r7 /* Subtract out the base */ + rlwimi r8,r5,15,0,14 /* Get area length - 1 */ + mfspr r6,dbat1l /* Get the real part */ + cmplw r7,r8 /* Check if it is in the range */ + bng+ fndbat /* Yup, she's a good un... */ + +ckbat2: mfspr r5,dbat2u /* Get the virtual address and length */ + eqv r8,r8,r8 /* Get all foxes */ + rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ + rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ + beq- ckbat3 /* not valid, skip this one... */ + sub r7,r4,r7 /* Subtract out the base */ + rlwimi r8,r5,15,0,14 /* Get area length - 1 */ + mfspr r6,dbat2l /* Get the real part */ + cmplw r7,r8 /* Check if it is in the range */ + bng- fndbat /* Yup, she's a good un... */ + +ckbat3: mfspr r5,dbat3u /* Get the virtual address and length */ + eqv r8,r8,r8 /* Get all foxes */ + rlwinm. r0,r5,0,30,30 /* Check if valid for supervisor state */ + rlwinm r7,r5,0,0,14 /* Clean up the base virtual address */ + beq- notkernsp /* not valid, skip this one... */ + sub r7,r4,r7 /* Subtract out the base */ + rlwimi r8,r5,15,0,14 /* Get area length - 1 */ + mfspr r6,dbat3l /* Get the real part */ + cmplw r7,r8 /* Check if it is in the range */ + bgt+ notkernsp /* No good... */ + +fndbat: rlwinm r6,r6,0,0,14 /* Clean up the real address */ + mtmsr r10 /* Restore state */ + add r3,r7,r6 /* Relocate the offset to real */ + isync /* Purge pipe */ + blr /* Bye, bye... */ +notkernsp: mfspr r5,sdr1 /* Get hash table base and size */ + rlwimi r11,r4,30,2,5 /* Insert the segment no. to make a VSID */ + rlwimi r12,r5,16,0,15 /* Make table size -1 out of mask */ + rlwinm r7,r4,26,10,25 /* Isolate the page index */ + andc r5,r5,r12 /* Clean up the hash table */ + xor r7,r7,r11 /* Get primary hash */ + rlwinm r11,r11,1,1,24 /* Position VSID for pte ID */ + and r7,r7,r12 /* Wrap the hash */ + rlwimi r11,r4,10,26,31 /* Move API into pte ID */ + add r5,r7,r5 /* Point to the PTEG */ + oris r11,r11,0x8000 /* Slam on valid bit so's we don't match an invalid one */ + + li r9,8 /* Get the number of PTEs to check */ + lwz r6,0(r5) /* Preload the virtual half */ + +fndpte: subi r9,r9,1 /* Count the pte */ + lwz r3,4(r5) /* Get the real half */ + cmplw cr1,r6,r11 /* Is this what we want? */ + lwz r6,8(r5) /* Start to get the next virtual half */ + mr. r9,r9 /* Any more to try? */ + addi r5,r5,8 /* Bump to next slot */ + beq cr1,gotxlate /* We found what we were looking for... */ + bne+ fndpte /* Go try the next PTE... */ + + mtmsr r10 /* Restore state */ + li r3,0 /* Show failure */ + isync /* Purge pipe */ + blr /* Leave... */ + +gotxlate: mtmsr r10 /* Restore state */ + rlwimi r3,r4,0,20,31 /* Cram in the page displacement */ + isync /* Purge pipe */ + blr /* Return... */ + + + +/* + * struct blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr) + * + * This is used to add a block mapping entry to the MRU list whose top + * node is anchored at bmaps. This is a real address and is also used as + * the lock. + * + * Overlapping areas are not allowed. If we find one, we return it's address and + * expect the upper layers to panic. We only check this for a debug build... + * + */ + + .align 5 + .globl EXT(hw_add_blk) + +LEXT(hw_add_blk) + + mfsprg r9,2 ; Get feature flags + lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation + mfmsr r0 /* Save the MSR */ + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtcrf 0x04,r9 ; Set the features + xor r3,r3,r6 ; Get real address of bmap anchor + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + la r3,PMAP_BMAPS(r3) ; Point to bmap header + + bt pfNoMSRirb,habNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b habNoMSRx + +habNoMSR: + mr r9,r0 + mr r8,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r8 + mr r0,r9 +habNoMSRx: + +abLck: lwarx r9,0,r3 ; Get the block map anchor and lock + rlwinm. r8,r9,0,31,31 ; Is it locked? + ori r8,r9,1 ; Set the lock + bne- abLckw ; Yeah... + stwcx. r8,0,r3 ; Lock the bmap list + bne- abLck ; Someone else was trying, try again... + b abSXg ; All done... + + .align 4 + +abLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held + beq+ abLck ; Not no more... + lwz r9,0(r3) ; Get lock word again... + b abLckw ; Check it out... + + .align 5 + + nop ; Force ISYNC to last instruction in IFETCH + nop + +abSXg: rlwinm r11,r9,0,0,26 ; Clear out flags and lock + isync ; Make sure we have not used anything yet + +; +; +; + + lwz r7,bmstart(r4) ; Get start + lwz r8,bmend(r4) ; Get end + mr r2,r11 ; Get chain + +abChk: mr. r10,r2 ; End of chain? + beq abChkD ; Yes, chain is ok... + lwz r5,bmstart(r10) ; Get start of current area + lwz r6,bmend(r10) ; Get end of current area + + cmplw cr0,r8,r5 ; Is the end of the new before the old? + cmplw cr1,r8,r6 ; Is the end of the new after the old? + cmplw cr6,r6,r7 ; Is the end of the old before the new? + cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in old + cmplw cr7,r6,r8 ; Is the end of the old after the new? + lwz r2,bmnext(r10) ; Get pointer to the next + cror cr6_eq,cr6_lt,cr7_gt ; Set cr2_eq if old not in new + crand cr1_eq,cr1_eq,cr6_eq ; Set cr1_eq if no overlap + beq+ cr1,abChk ; Ok check the next... + + stw r9,0(r3) ; Unlock + mtmsr r0 ; Restore xlation and rupts + mr r3,r10 ; Pass back the overlap + isync ; + blr ; Return... + +abChkD: stw r11,bmnext(r4) ; Chain this on in + rlwimi r4,r9,0,27,31 ; Copy in locks and flags + sync ; Make sure that is done + + stw r4,0(r3) ; Unlock and chain the new first one + mtmsr r0 ; Restore xlation and rupts + li r3,0 ; Pass back a no failure return code + isync + blr ; Return... + + +/* + * struct blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) + * + * This is used to remove a block mapping entry from the list that + * is anchored at bmaps. bmaps is a virtual address and is also used as + * the lock. + * + * Note that this function clears a single block that contains + * any address within the range sva to eva (inclusive). To entirely + * clear any range, hw_rem_blk must be called repeatedly until it + * returns a 0. + * + * The block is removed from the list and all hash table entries + * corresponding to the mapped block are invalidated and the TLB + * entries are purged. If the block is large, this could take + * quite a while. We need to hash every possible address in the + * range and lock down the PCA. + * + * If we attempt to remove a permanent entry, we will not do it. + * The block address will be ored with 1 and returned. + * + * + */ + + .align 5 + .globl EXT(hw_rem_blk) + +LEXT(hw_rem_blk) + + mfsprg r9,2 ; Get feature flags + lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation + mfmsr r0 /* Save the MSR */ + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtcrf 0x04,r9 ; Set the features + xor r3,r3,r6 ; Get real address of bmap anchor + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + la r3,PMAP_BMAPS(r3) ; Point to the bmap chain head + + bt pfNoMSRirb,hrbNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b hrbNoMSRx + +hrbNoMSR: + mr r9,r0 + mr r8,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r8 + mr r0,r9 +hrbNoMSRx: + +rbLck: lwarx r9,0,r3 ; Get the block map anchor and lock + rlwinm. r8,r9,0,31,31 ; Is it locked? + ori r8,r9,1 ; Set the lock + bne- rbLckw ; Yeah... + stwcx. r8,0,r3 ; Lock the bmap list + bne- rbLck ; Someone else was trying, try again... + b rbSXg ; All done... + + .align 4 + +rbLckw: rlwinm. r11,r9,0,31,31 ; Check if it is still held + beq+ rbLck ; Not no more... + lwz r9,0(r3) ; Get lock word again... + b rbLckw ; Check it out... + + .align 5 + + nop ; Force ISYNC to last instruction in IFETCH + nop + +rbSXg: rlwinm. r2,r9,0,0,26 ; Clear out flags and lock + mr r10,r3 ; Keep anchor as previous pointer + isync ; Make sure we have not used anything yet + + beq- rbMT ; There is nothing in the list + +rbChk: mr r12,r10 ; Save the previous + mr. r10,r2 ; End of chain? + beq rbMT ; Yes, nothing to do... + lwz r11,bmstart(r10) ; Get start of current area + lwz r6,bmend(r10) ; Get end of current area + + cmplw cr0,r5,r11 ; Is the end of range before the start of the area? + cmplw cr1,r4,r6 ; Is the start of range after the end of the area? + cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range + lwz r2,bmnext(r10) ; Get the next one + beq+ cr1,rbChk ; Not this one, check the next... + + lwz r8,blkFlags(r10) ; Get the flags + + cmplw cr1,r12,r3 ; Did we delete the first one? + rlwinm. r8,r8,0,blkPermbit,blkPermbit ; is this a permanent block? + bne cr1,rbnFirst ; Nope... + rlwimi r9,r2,0,0,26 ; Change the lock value + ori r2,r9,1 ; Turn on the lock bit + +rbnFirst: bne- rbPerm ; This is permanent, do not remove... + lwz r8,bmspace(r10) ; Get the VSID + stw r2,bmnext(r12) ; Unchain us + + eqv r4,r4,r4 ; Fill the bottom with foxes + mfspr r12,sdr1 ; Get hash table base and size + rlwinm r8,r8,6,0,25 ; Align VSID to PTEG + rlwimi r4,r12,16,0,15 ; Make table size - 1 out of mask + andc r12,r12,r4 ; Clean up address of hash table + rlwinm r5,r11,26,6,25 ; Rotate virtual start address into PTEG units + add r12,r12,r4 ; Point to PCA - 1 + rlwinm r6,r6,26,6,25 ; Rotate virtual end address into PTEG units + addi r12,r12,1 ; Point to PCA base + sub r6,r6,r5 ; Get the total number of PTEGs to clear + cmplw r6,r4 ; See if this wraps all the way around + blt rbHash ; Nope, length is right + subi r6,r4,32+31 ; Back down to correct length + +rbHash: xor r2,r8,r5 ; Hash into table + and r2,r2,r4 ; Wrap into the table + add r2,r2,r12 ; Point right at the PCA + +rbLcka: lwarx r7,0,r2 ; Get the PTEG lock + mr. r7,r7 ; Is it locked? + bne- rbLckwa ; Yeah... + li r7,1 ; Get the locked value + stwcx. r7,0,r2 ; Take it + bne- rbLcka ; Someone else was trying, try again... + b rbSXga ; All done... + +rbLckwa: mr. r7,r7 ; Check if it is already held + beq+ rbLcka ; It is clear... + lwz r7,0(r2) ; Get lock word again... + b rbLckwa ; Wait... + +rbSXga: isync ; Make sure nothing used yet + lwz r7,PCAallo(r2) ; Get the allocation word + rlwinm. r11,r7,8,0,7 ; Isolate the autogenerated PTEs + or r7,r7,r11 ; Release the autogen slots + beq+ rbAintNone ; There are not any here + mtcrf 0xC0,r11 ; Set the branch masks for autogens + sub r11,r2,r4 ; Move back to the hash table + 1 + rlwinm r7,r7,0,16,7 ; Clear the autogen field + subi r11,r11,1 ; Point to the PTEG + stw r7,PCAallo(r2) ; Update the flags + li r7,0 ; Get an invalid PTE value + + bf 0,rbSlot1 ; No autogen here + stw r7,0x00(r11) ; Invalidate PTE +rbSlot1: bf 1,rbSlot2 ; No autogen here + stw r7,0x08(r11) ; Invalidate PTE +rbSlot2: bf 2,rbSlot3 ; No autogen here + stw r7,0x10(r11) ; Invalidate PTE +rbSlot3: bf 3,rbSlot4 ; No autogen here + stw r7,0x18(r11) ; Invalidate PTE +rbSlot4: bf 4,rbSlot5 ; No autogen here + stw r7,0x20(r11) ; Invalidate PTE +rbSlot5: bf 5,rbSlot6 ; No autogen here + stw r7,0x28(r11) ; Invalidate PTE +rbSlot6: bf 6,rbSlot7 ; No autogen here + stw r7,0x30(r11) ; Invalidate PTE +rbSlot7: bf 7,rbSlotx ; No autogen here + stw r7,0x38(r11) ; Invalidate PTE +rbSlotx: + +rbAintNone: li r7,0 ; Clear this out + sync ; To make SMP happy + addic. r6,r6,-64 ; Decrement the count + stw r7,PCAlock(r2) ; Release the PTEG lock + addi r5,r5,64 ; Move up by adjusted page number + bge+ rbHash ; Not done... + + sync ; Make sure the memory is quiet + +; +; Here we take the easy way out and just purge the entire TLB. This is +; certainly faster and definitly easier than blasting just the correct ones +; in the range, we only need one lock and one TLBSYNC. We would hope +; that most blocks are more than 64 pages (256K) and on every machine +; up to Book E, 64 TLBIEs will invalidate the entire table. +; + + li r5,64 ; Get number of TLB entries to purge + lis r12,HIGH_ADDR(EXT(tlb_system_lock)) ; Get the TLBIE lock + li r6,0 ; Start at 0 + ori r12,r12,LOW_ADDR(EXT(tlb_system_lock)) ; Grab up the bottom part + +rbTlbL: lwarx r2,0,r12 ; Get the TLBIE lock + mr. r2,r2 ; Is it locked? + li r2,1 ; Get our lock value + bne- rbTlbL ; It is locked, go wait... + stwcx. r2,0,r12 ; Try to get it + bne- rbTlbL ; We was beat... + +rbTlbN: addic. r5,r5,-1 ; See if we did them all + tlbie r6 ; Invalidate it everywhere + addi r6,r6,0x1000 ; Up to the next page + bgt+ rbTlbN ; Make sure we have done it all... + + mfspr r5,pvr ; Find out what kind of machine we are + li r2,0 ; Lock clear value + + rlwinm r5,r5,16,16,31 ; Isolate CPU type + cmplwi r5,3 ; Is this a 603? + sync ; Make sure all is quiet + beq- rbits603a ; It is a 603, skip the tlbsync... + + eieio ; Make sure that the tlbie happens first + tlbsync ; wait for everyone to catch up + +rbits603a: sync ; Wait for quiet again + stw r2,0(r12) ; Unlock invalidates + + sync ; Make sure that is done + + stw r9,0(r3) ; Unlock and chain the new first one + mtmsr r0 ; Restore xlation and rupts + mr r3,r10 ; Pass back the removed block + isync + blr ; Return... + +rbMT: stw r9,0(r3) ; Unlock + mtmsr r0 ; Restore xlation and rupts + li r3,0 ; Say we did not find one + isync + blr ; Return... + +rbPerm: stw r9,0(r3) ; Unlock + mtmsr r0 ; Restore xlation and rupts + ori r3,r10,1 ; Say we did not remove it + isync + blr ; Return... + + +/* + * vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va) + * + * This is used to translate a virtual address within a block mapping entry + * to a physical address. If not found, 0 is returned. + * + */ + + .align 5 + .globl EXT(hw_cvp_blk) + +LEXT(hw_cvp_blk) + + mfsprg r9,2 ; Get feature flags + lwz r6,PMAP_PMAPVR(r3) ; Get the v to r translation + mfmsr r0 /* Save the MSR */ + rlwinm r12,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear interruptions */ + mtcrf 0x04,r9 ; Set the features + xor r3,r3,r6 ; Get real address of bmap anchor + rlwinm r12,r12,0,28,25 /* Clear IR and DR */ + la r3,PMAP_BMAPS(r3) ; Point to chain header + + bt pfNoMSRirb,hcbNoMSR ; No MSR... + + mtmsr r12 ; Translation and all off + isync ; Toss prefetch + b hcbNoMSRx + +hcbNoMSR: + mr r9,r0 + mr r8,r3 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r12 ; Get new MSR + sc ; Set it + mr r3,r8 + mr r0,r9 +hcbNoMSRx: + +cbLck: lwarx r9,0,r3 ; Get the block map anchor and lock + rlwinm. r8,r9,0,31,31 ; Is it locked? + ori r8,r9,1 ; Set the lock + bne- cbLckw ; Yeah... + stwcx. r8,0,r3 ; Lock the bmap list + bne- cbLck ; Someone else was trying, try again... + b cbSXg ; All done... + + .align 4 + +cbLckw: rlwinm. r5,r9,0,31,31 ; Check if it is still held + beq+ cbLck ; Not no more... + lwz r9,0(r3) ; Get lock word again... + b cbLckw ; Check it out... + + .align 5 + + nop ; Force ISYNC to last instruction in IFETCH + nop + nop + nop + nop + +cbSXg: rlwinm. r11,r9,0,0,26 ; Clear out flags and lock + li r2,0 ; Assume we do not find anything + isync ; Make sure we have not used anything yet + +cbChk: mr. r11,r11 ; Is there more? + beq- cbDone ; No more... + lwz r5,bmstart(r11) ; Get the bottom of range + lwz r12,bmend(r11) ; Get the top of range + cmplw cr0,r4,r5 ; Are we before the entry? + cmplw cr1,r4,r12 ; Are we after of the entry? + cror cr1_eq,cr0_lt,cr1_gt ; Set cr1_eq if new not in range + beq- cr1,cbNo ; We are not in the range... + + lwz r2,bmPTEr(r11) ; Get the real part of the PTE + sub r5,r4,r5 ; Get offset into area + rlwinm r2,r2,0,0,19 ; Clean out everything but the page + add r2,r2,r5 ; Adjust the real address + +cbDone: stw r9,0(r3) ; Unlock it, we are done with it (no sync needed) + mtmsr r0 ; Restore translation and interrupts... + isync ; Make sure it is on + mr r3,r2 ; Set return physical address + blr ; Leave... + + .align 5 + +cbNo: lwz r11,bmnext(r11) ; Link next + b cbChk ; Check it out... + + +/* + * hw_set_user_space(pmap) + * hw_set_user_space_dis(pmap) + * + * Indicate whether memory space needs to be switched. + * We really need to turn off interrupts here, because we need to be non-preemptable + * + * hw_set_user_space_dis is used when interruptions are already disabled. Mind the + * register usage here. The VMM switch code in vmachmon.s that calls this + * know what registers are in use. Check that if these change. + */ + + + + .align 5 + .globl EXT(hw_set_user_space) + +LEXT(hw_set_user_space) + + mfmsr r10 /* Get the current MSR */ + rlwinm r9,r10,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off 'rupts */ + mtmsr r9 /* Disable 'em */ + lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation + lwz r4,PMAP_SPACE(r3) ; Get the space + mfsprg r6,0 /* Get the per_proc_info address */ + xor r3,r3,r7 ; Get real address of bmap anchor + stw r4,PP_USERSPACE(r6) /* Show our new address space */ + stw r3,PP_USERPMAP(r6) ; Show our real pmap address + mtmsr r10 /* Restore interruptions */ + blr /* Return... */ + + .align 5 + .globl EXT(hw_set_user_space_dis) + +LEXT(hw_set_user_space_dis) + + lwz r7,PMAP_PMAPVR(r3) ; Get the v to r translation + lwz r4,PMAP_SPACE(r3) ; Get the space + mfsprg r6,0 ; Get the per_proc_info address + xor r3,r3,r7 ; Get real address of bmap anchor + stw r4,PP_USERSPACE(r6) ; Show our new address space + stw r3,PP_USERPMAP(r6) ; Show our real pmap address + blr ; Return... + + +/* struct mapping *hw_cpv(struct mapping *mp) - Converts a physcial mapping CB address to virtual + * + */ + + .align 5 + .globl EXT(hw_cpv) + +LEXT(hw_cpv) + + rlwinm. r4,r3,0,0,19 ; Round back to the mapping block allocation control block + mfmsr r10 ; Get the current MSR + beq- hcpvret ; Skip if we are passed a 0... + andi. r9,r10,0x7FEF ; Turn off interrupts and data translation + mtmsr r9 ; Disable DR and EE + isync + + lwz r4,mbvrswap(r4) ; Get the conversion value + mtmsr r10 ; Interrupts and DR back on + isync + xor r3,r3,r4 ; Convert to physical + +hcpvret: rlwinm r3,r3,0,0,26 ; Clean out any flags + blr + + +/* struct mapping *hw_cvp(struct mapping *mp) - Converts a virtual mapping CB address to physcial + * + * Translation must be on for this + * + */ + + .align 5 + .globl EXT(hw_cvp) + +LEXT(hw_cvp) + + rlwinm r4,r3,0,0,19 ; Round back to the mapping block allocation control block + rlwinm r3,r3,0,0,26 ; Clean out any flags + lwz r4,mbvrswap(r4) ; Get the conversion value + xor r3,r3,r4 ; Convert to virtual + blr + + +/* int mapalc(struct mappingblok *mb) - Finds, allocates, and checks a free mapping entry in a block + * + * Lock must already be held on mapping block list + * returns 0 if all slots filled. + * returns n if a slot is found and it is not the last + * returns -n if a slot os found and it is the last + * when n and -n are returned, the corresponding bit is cleared + * + */ + + .align 5 + .globl EXT(mapalc) + +LEXT(mapalc) + + lwz r4,mbfree(r3) ; Get the first mask + lis r0,0x8000 ; Get the mask to clear the first free bit + lwz r5,mbfree+4(r3) ; Get the second mask + mr r12,r3 ; Save the return + cntlzw r8,r4 ; Get first free field + lwz r6,mbfree+8(r3) ; Get the third mask + srw. r9,r0,r8 ; Get bit corresponding to first free one + lwz r7,mbfree+12(r3) ; Get the fourth mask + cntlzw r10,r5 ; Get first free field in second word + andc r4,r4,r9 ; Turn it off + bne malcfnd0 ; Found one... + + srw. r9,r0,r10 ; Get bit corresponding to first free one in second word + cntlzw r11,r6 ; Get first free field in third word + andc r5,r5,r9 ; Turn it off + bne malcfnd1 ; Found one... + + srw. r9,r0,r11 ; Get bit corresponding to first free one in third word + cntlzw r10,r7 ; Get first free field in fourth word + andc r6,r6,r9 ; Turn it off + bne malcfnd2 ; Found one... + + srw. r9,r0,r10 ; Get bit corresponding to first free one in second word + li r3,0 ; Assume abject failure + andc r7,r7,r9 ; Turn it off + beqlr ; There are none any left... + + addi r3,r10,96 ; Set the correct bit number + stw r7,mbfree+12(r12) ; Actually allocate the slot + +mapafin: or r4,r4,r5 ; Merge the first two allocation maps + or r6,r6,r7 ; Then the last two + or. r4,r4,r6 ; Merge both halves + bnelr+ ; Return if some left for next time... + + neg r3,r3 ; Indicate we just allocated the last one + blr ; Leave... + +malcfnd0: stw r4,mbfree(r12) ; Actually allocate the slot + mr r3,r8 ; Set the correct bit number + b mapafin ; Exit now... + +malcfnd1: stw r5,mbfree+4(r12) ; Actually allocate the slot + addi r3,r10,32 ; Set the correct bit number + b mapafin ; Exit now... + +malcfnd2: stw r6,mbfree+8(r12) ; Actually allocate the slot + addi r3,r11,64 ; Set the correct bit number + b mapafin ; Exit now... + + +/* + * Log out all memory usage + */ + + .align 5 + .globl EXT(logmem) + +LEXT(logmem) + + mfmsr r2 ; Get the MSR + lis r10,hi16(EXT(DebugWork)) ; High part of area + lis r12,hi16(EXT(mem_actual)) ; High part of actual + andi. r0,r10,0x7FCF ; Interrupts and translation off + ori r10,r10,lo16(EXT(DebugWork)) ; Get the entry + mtmsr r0 ; Turn stuff off + ori r12,r12,lo16(EXT(mem_actual)) ; Get the actual + li r0,1 ; Get a one + + isync + + stw r0,4(r10) ; Force logging off + lwz r0,0(r12) ; Get the end of memory + + lis r12,hi16(EXT(mem_size)) ; High part of defined memory + ori r12,r12,lo16(EXT(mem_size)) ; Low part of defined memory + lwz r12,0(r12) ; Make it end of defined + + cmplw r0,r12 ; Is there room for the data? + ble- logmemexit ; No, do not even try... + + stw r12,0(r12) ; Set defined memory size + stw r0,4(r12) ; Set the actual amount of memory + + lis r3,hi16(EXT(hash_table_base)) ; Hash table address + lis r4,hi16(EXT(hash_table_size)) ; Hash table size + lis r5,hi16(EXT(pmap_mem_regions)) ; Memory regions + lis r6,hi16(EXT(mapCtl)) ; Mappings + ori r3,r3,lo16(EXT(hash_table_base)) + ori r4,r4,lo16(EXT(hash_table_size)) + ori r5,r5,lo16(EXT(pmap_mem_regions)) + ori r6,r6,lo16(EXT(mapCtl)) + lwz r3,0(r3) + lwz r4,0(r4) + lwz r5,4(r5) ; Get the pointer to the phys_ent table + lwz r6,0(r6) ; Get the pointer to the current mapping block + stw r3,8(r12) ; Save the hash table address + stw r4,12(r12) ; Save the hash table size + stw r5,16(r12) ; Save the physent pointer + stw r6,20(r12) ; Save the mappings + + addi r11,r12,0x1000 ; Point to area to move hash table and PCA + + add r4,r4,r4 ; Double size for both + +copyhash: lwz r7,0(r3) ; Copy both of them + lwz r8,4(r3) + lwz r9,8(r3) + lwz r10,12(r3) + subic. r4,r4,0x10 + addi r3,r3,0x10 + stw r7,0(r11) + stw r8,4(r11) + stw r9,8(r11) + stw r10,12(r11) + addi r11,r11,0x10 + bgt+ copyhash + + rlwinm r4,r12,20,12,31 ; Get number of phys_ents + +copyphys: lwz r7,0(r5) ; Copy physents + lwz r8,4(r5) + subic. r4,r4,1 + addi r5,r5,8 + stw r7,0(r11) + stw r8,4(r11) + addi r11,r11,8 + bgt+ copyphys + + addi r11,r11,4095 ; Round up to next page + rlwinm r11,r11,0,0,19 + + lwz r4,4(r6) ; Get the size of the mapping area + +copymaps: lwz r7,0(r6) ; Copy the mappings + lwz r8,4(r6) + lwz r9,8(r6) + lwz r10,12(r6) + subic. r4,r4,0x10 + addi r6,r6,0x10 + stw r7,0(r11) + stw r8,4(r11) + stw r9,8(r11) + stw r10,12(r11) + addi r11,r11,0x10 + bgt+ copymaps + + sub r11,r11,r12 ; Get the total length we saved + stw r11,24(r12) ; Save the size + +logmemexit: mtmsr r2 ; Back to normal + li r3,0 + isync + blr + + diff --git a/osfmk/ppc/interrupt.c b/osfmk/ppc/interrupt.c new file mode 100644 index 000000000..5e72b712a --- /dev/null +++ b/osfmk/ppc/interrupt.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if NCPUS > 1 +#include +#endif /* NCPUS > 1 */ +#include + +struct ppc_saved_state * interrupt( + int type, + struct ppc_saved_state *ssp, + unsigned int dsisr, + unsigned int dar) +{ + int current_cpu, tmpr, targtemp; + unsigned int throttle; + AbsoluteTime now; + thread_act_t act; + + disable_preemption(); + + current_cpu = cpu_number(); + + switch (type) { + + case T_THERMAL: /* Fix the air conditioning, I'm dripping with sweat, or freezing, whatever... */ + +/* + * Note that this code is just a hackification until we have a real thermal plan. + */ + + tmpr = ml_read_temp(); /* Find out just how hot it is */ + targtemp = (dar >> 23) & 0x7F; /* Get the temprature we were looking for */ + if(dar & 4) { /* Did the temprature drop down? */ +#if 1 + kprintf("THERMAL below (cpu %d) target = %d; actual = %d; thrm = %08X\n", current_cpu, targtemp, tmpr, dar); +#endif +#if 0 + throttle = ml_throttle(0); /* Set throttle off */ +#if 1 + kprintf("THERMAL (cpu %d) throttle set off; last = %d\n", current_cpu, throttle); +#endif +#endif + ml_thrm_set(0, per_proc_info[current_cpu].thrm.throttleTemp); /* Set no low temp and max allowable as max */ + +#if 1 + kprintf("THERMAL (cpu %d) temp set to: off min, %d max\n", current_cpu, per_proc_info[current_cpu].thrm.throttleTemp); +#endif + } + else { +#if 1 + kprintf("THERMAL above (cpu %d) target = %d; actual = %d; thrm = %08X\n", current_cpu, targtemp, tmpr, dar); +#endif +#if 0 + throttle = ml_throttle(32); /* Set throttle on about 1/8th */ +#if 1 + kprintf("THERMAL (cpu %d) throttle set to 32; last = %d\n", current_cpu, throttle); +#endif +#endif + ml_thrm_set(per_proc_info[current_cpu].thrm.throttleTemp - 4, 0); /* Set low temp to max - 4 and max off */ +#if 1 + kprintf("THERMAL (cpu %d) temp set to: %d min, off max\n", current_cpu, per_proc_info[current_cpu].thrm.throttleTemp - 4); +#endif + + } + break; + + case T_DECREMENTER: + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE, + isync_mfdec(), ((savearea *)ssp)->save_srr0, 0, 0, 0); + +#if 0 + if (pcsample_enable) { + if (find_user_regs(current_act())) + add_pcsamples (user_pc(current_act())); + } +#endif + + act = current_act(); /* Find ourselves */ + if(act->mact.qactTimer.hi | act->mact.qactTimer.lo) { /* Is the timer set? */ + clock_get_uptime(&now); /* Find out what time it is */ + if (CMP_ABSOLUTETIME(&act->mact.qactTimer, &now) <= 0) { /* It is set, has it popped? */ + act->mact.qactTimer.hi = 0; /* Clear single shot timer */ + act->mact.qactTimer.lo = 0; /* and the other half */ + if((unsigned int)act->mact.vmmControl & 0xFFFFFFFE) { /* Are there any virtual machines? */ + vmm_timer_pop(act); /* Yes, check out them out... */ + } + } + } + + rtclock_intr(0, ssp, 0); + break; + + case T_INTERRUPT: + /* Call the platform interrupt routine */ + counter_always(c_incoming_interrupts++); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, + current_cpu, ((savearea *)ssp)->save_srr0, 0, 0, 0); + + per_proc_info[current_cpu].interrupt_handler( + per_proc_info[current_cpu].interrupt_target, + per_proc_info[current_cpu].interrupt_refCon, + per_proc_info[current_cpu].interrupt_nub, + per_proc_info[current_cpu].interrupt_source); + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END, + 0, 0, 0, 0, 0); + + break; + + case T_SIGP: + /* Did the other processor signal us? */ + cpu_signal_handler(); + break; + + case T_SHUTDOWN: + cpu_doshutdown(); + panic("returning from cpu_doshutdown()\n"); + break; + + + default: + #if MACH_KDP || MACH_KDB + (void)Call_Debugger(type, ssp); + #else + panic("Invalid interrupt type %x\n", type); + #endif + break; + } + + enable_preemption(); + return ssp; +} diff --git a/osfmk/ppc/io_map.c b/osfmk/ppc/io_map.c new file mode 100644 index 000000000..5ce5ab5e8 --- /dev/null +++ b/osfmk/ppc/io_map.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern vm_offset_t virtual_avail; + +/* + * Allocate and map memory for devices that may need to be mapped + * outside the usual physical memory. If phys_addr is NULL then + * steal the appropriate number of physical pages from the vm + * system and map them. + */ +vm_offset_t +io_map(phys_addr, size) + vm_offset_t phys_addr; + vm_size_t size; +{ + vm_offset_t start; + int i; + unsigned int j; + vm_page_t m; + + +#if DEBUG + assert (kernel_map != VM_MAP_NULL); /* VM must be initialised */ +#endif + + if (phys_addr != 0) { + /* make sure we map full contents of all the pages concerned */ + size = round_page(size + (phys_addr & PAGE_MASK)); + + /* Steal some free virtual addresses */ + + (void) kmem_alloc_pageable(kernel_map, &start, size); + + pmap_map_block(kernel_pmap, start, phys_addr, size, + VM_PROT_READ|VM_PROT_WRITE, PTE_WIMG_IO, 0); /* Set up a block mapped area */ + + return (start + (phys_addr & PAGE_MASK)); + + } else { + + /* Steal some free virtual addresses */ + (void) kmem_alloc_pageable(kernel_map, &start, size); + + mapping_prealloc(size); /* Make sure there are enough free mappings */ + /* Steal some physical pages and map them one by one */ + for (i = 0; i < size ; i += PAGE_SIZE) { + m = VM_PAGE_NULL; + while ((m = vm_page_grab()) == VM_PAGE_NULL) + VM_PAGE_WAIT(); + vm_page_gobble(m); + (void) pmap_map_bd(start + i, + m->phys_addr, + m->phys_addr + PAGE_SIZE, + VM_PROT_READ|VM_PROT_WRITE); + } + + mapping_relpre(); /* Allow mapping release */ + return start; + } +} diff --git a/osfmk/ppc/io_map_entries.h b/osfmk/ppc/io_map_entries.h new file mode 100644 index 000000000..e7a329dc3 --- /dev/null +++ b/osfmk/ppc/io_map_entries.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _PPC_IO_MAP_ENTRIES_H_ +#define _PPC_IO_MAP_ENTRIES_H_ + +extern vm_offset_t io_map( + vm_offset_t phys_addr, + vm_size_t size); + +#endif /* _PPC_IO_MAP_ENTRIES_H_ */ diff --git a/osfmk/ppc/iso_font.h b/osfmk/ppc/iso_font.h new file mode 100644 index 000000000..25ac9734f --- /dev/null +++ b/osfmk/ppc/iso_font.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * ISO Latin-1 Font + * + * Copyright (c) 2000 + * Ka-Ping Yee + * + * This font may be freely used for any purpose. + */ + +/* + * adjusted 'A' 'V' to improve their dense appearance (ie. lightened) + * adjusted 'i' 'l' to improve their flow within a word (ie. widened) + * adjusted 'E' 'F' '#' + */ + +unsigned char iso_font[256*16] = { +/* 0 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 1 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 2 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 3 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 4 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 5 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 6 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 7 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 8 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 9 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 10 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 11 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 12 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 13 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 14 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 15 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 16 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 17 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 18 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 19 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 20 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 21 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 22 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 23 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 24 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 25 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 26 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 27 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 28 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 29 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 30 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 31 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 32 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 33 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 34 */ 0x00,0x00,0x6c,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 35 */ 0x00,0x00,0x00,0x36,0x36,0x7f,0x36,0x36,0x7f,0x36,0x36,0x00,0x00,0x00,0x00,0x00, +/* 36 */ 0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x3e,0x68,0x68,0x6b,0x3e,0x08,0x08,0x00,0x00, +/* 37 */ 0x00,0x00,0x00,0x33,0x13,0x18,0x08,0x0c,0x04,0x06,0x32,0x33,0x00,0x00,0x00,0x00, +/* 38 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x6c,0x3e,0x33,0x33,0x7b,0xce,0x00,0x00,0x00,0x00, +/* 39 */ 0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 40 */ 0x00,0x00,0x30,0x18,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x18,0x30,0x00,0x00,0x00, +/* 41 */ 0x00,0x00,0x0c,0x18,0x18,0x30,0x30,0x30,0x30,0x30,0x18,0x18,0x0c,0x00,0x00,0x00, +/* 42 */ 0x00,0x00,0x00,0x00,0x00,0x36,0x1c,0x7f,0x1c,0x36,0x00,0x00,0x00,0x00,0x00,0x00, +/* 43 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00, +/* 44 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, +/* 45 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 46 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 47 */ 0x00,0x00,0x60,0x20,0x30,0x10,0x18,0x08,0x0c,0x04,0x06,0x02,0x03,0x00,0x00,0x00, +/* 48 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x6b,0x6b,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 49 */ 0x00,0x00,0x18,0x1e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 50 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x30,0x18,0x0c,0x06,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 51 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x3c,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 52 */ 0x00,0x00,0x30,0x38,0x3c,0x36,0x33,0x7f,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00, +/* 53 */ 0x00,0x00,0x7f,0x03,0x03,0x3f,0x60,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 54 */ 0x00,0x00,0x3c,0x06,0x03,0x03,0x3f,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 55 */ 0x00,0x00,0x7f,0x60,0x30,0x30,0x18,0x18,0x18,0x0c,0x0c,0x0c,0x00,0x00,0x00,0x00, +/* 56 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x3e,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 57 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7e,0x60,0x60,0x60,0x30,0x1e,0x00,0x00,0x00,0x00, +/* 58 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 59 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, +/* 60 */ 0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00, +/* 61 */ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 62 */ 0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00, +/* 63 */ 0x00,0x00,0x3e,0x63,0x60,0x30,0x30,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, +/* 64 */ 0x00,0x00,0x3c,0x66,0x73,0x7b,0x6b,0x6b,0x7b,0x33,0x06,0x3c,0x00,0x00,0x00,0x00, +/* 65 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 66 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x3f,0x63,0x63,0x63,0x63,0x3f,0x00,0x00,0x00,0x00, +/* 67 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x00,0x00,0x00,0x00, +/* 68 */ 0x00,0x00,0x1f,0x33,0x63,0x63,0x63,0x63,0x63,0x63,0x33,0x1f,0x00,0x00,0x00,0x00, +/* 69 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 70 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, +/* 71 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x73,0x63,0x63,0x66,0x7c,0x00,0x00,0x00,0x00, +/* 72 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 73 */ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 74 */ 0x00,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00,0x00,0x00,0x00, +/* 75 */ 0x00,0x00,0x63,0x33,0x1b,0x0f,0x07,0x07,0x0f,0x1b,0x33,0x63,0x00,0x00,0x00,0x00, +/* 76 */ 0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 77 */ 0x00,0x00,0x63,0x63,0x77,0x7f,0x7f,0x6b,0x6b,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 78 */ 0x00,0x00,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, +/* 79 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 80 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, +/* 81 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x6f,0x7b,0x3e,0x30,0x60,0x00,0x00, +/* 82 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x1b,0x33,0x63,0x63,0x00,0x00,0x00,0x00, +/* 83 */ 0x00,0x00,0x3e,0x63,0x03,0x03,0x0e,0x38,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 84 */ 0x00,0x00,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 85 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 86 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, +/* 87 */ 0x00,0x00,0x63,0x63,0x6b,0x6b,0x6b,0x6b,0x7f,0x36,0x36,0x36,0x00,0x00,0x00,0x00, +/* 88 */ 0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x36,0x36,0x63,0x63,0x00,0x00,0x00,0x00, +/* 89 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 90 */ 0x00,0x00,0x7f,0x30,0x30,0x18,0x18,0x0c,0x0c,0x06,0x06,0x7f,0x00,0x00,0x00,0x00, +/* 91 */ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00, +/* 92 */ 0x00,0x00,0x03,0x02,0x06,0x04,0x0c,0x08,0x18,0x10,0x30,0x20,0x60,0x00,0x00,0x00, +/* 93 */ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00, +/* 94 */ 0x00,0x08,0x1c,0x36,0x63,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 95 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00, +/* 96 */ 0x00,0x00,0x0c,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 97 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 98 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x00,0x00,0x00,0x00, +/* 99 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 100 */ 0x00,0x00,0x60,0x60,0x60,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 101 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 102 */ 0x00,0x00,0x3c,0x66,0x06,0x1f,0x06,0x06,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00, +/* 103 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0x63,0x3e,0x00, +/* 104 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 105 */ 0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 106 */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00, +/* 107 */ 0x00,0x00,0x03,0x03,0x03,0x63,0x33,0x1b,0x0f,0x1f,0x33,0x63,0x00,0x00,0x00,0x00, +/* 108 */ 0x00,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 109 */ 0x00,0x00,0x00,0x00,0x00,0x35,0x6b,0x6b,0x6b,0x6b,0x6b,0x6b,0x00,0x00,0x00,0x00, +/* 110 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 111 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 112 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x03,0x03,0x03,0x00, +/* 113 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0xe0,0x60,0x00, +/* 114 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, +/* 115 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x0e,0x38,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 116 */ 0x00,0x00,0x00,0x0c,0x0c,0x3e,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 117 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 118 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, +/* 119 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x6b,0x6b,0x6b,0x3e,0x36,0x36,0x00,0x00,0x00,0x00, +/* 120 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x36,0x1c,0x1c,0x1c,0x36,0x63,0x00,0x00,0x00,0x00, +/* 121 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, +/* 122 */ 0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x30,0x18,0x0c,0x06,0x7f,0x00,0x00,0x00,0x00, +/* 123 */ 0x00,0x00,0x70,0x18,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00, +/* 124 */ 0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00, +/* 125 */ 0x00,0x00,0x0e,0x18,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00, +/* 126 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 127 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 128 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 129 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 130 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 131 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 132 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 133 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 134 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 135 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 136 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 137 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 138 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 139 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 140 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 141 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 142 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 143 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 144 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 145 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 146 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 147 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 148 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 149 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 150 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 151 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 152 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 153 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 154 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 155 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 156 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 157 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 158 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 159 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 160 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 161 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00, +/* 162 */ 0x00,0x00,0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x0b,0x6b,0x3e,0x08,0x08,0x00,0x00, +/* 163 */ 0x00,0x00,0x1c,0x36,0x06,0x06,0x1f,0x06,0x06,0x07,0x6f,0x3b,0x00,0x00,0x00,0x00, +/* 164 */ 0x00,0x00,0x00,0x00,0x66,0x3c,0x66,0x66,0x66,0x3c,0x66,0x00,0x00,0x00,0x00,0x00, +/* 165 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x7e,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, +/* 166 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 167 */ 0x00,0x3c,0x66,0x0c,0x1e,0x33,0x63,0x66,0x3c,0x18,0x33,0x1e,0x00,0x00,0x00,0x00, +/* 168 */ 0x00,0x00,0x36,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 169 */ 0x00,0x00,0x3c,0x42,0x99,0xa5,0x85,0xa5,0x99,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, +/* 170 */ 0x00,0x1e,0x30,0x3e,0x33,0x3b,0x36,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 171 */ 0x00,0x00,0x00,0x00,0x00,0x6c,0x36,0x1b,0x1b,0x36,0x6c,0x00,0x00,0x00,0x00,0x00, +/* 172 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x60,0x60,0x00,0x00,0x00,0x00,0x00,0x00, +/* 173 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 174 */ 0x00,0x00,0x3c,0x42,0x9d,0xa5,0x9d,0xa5,0xa5,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, +/* 175 */ 0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 176 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 177 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x7e,0x00,0x00,0x00,0x00,0x00, +/* 178 */ 0x00,0x1e,0x33,0x18,0x0c,0x06,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 179 */ 0x00,0x1e,0x33,0x18,0x30,0x33,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 180 */ 0x00,0x30,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 181 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x76,0x6e,0x06,0x06,0x03,0x00, +/* 182 */ 0x00,0x00,0x7e,0x2f,0x2f,0x2f,0x2e,0x28,0x28,0x28,0x28,0x28,0x00,0x00,0x00,0x00, +/* 183 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 184 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x30,0x1e,0x00, +/* 185 */ 0x00,0x0c,0x0e,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 186 */ 0x00,0x1e,0x33,0x33,0x33,0x33,0x1e,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, +/* 187 */ 0x00,0x00,0x00,0x00,0x00,0x1b,0x36,0x6c,0x6c,0x36,0x1b,0x00,0x00,0x00,0x00,0x00, +/* 188 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, +/* 189 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x1c,0x36,0x18,0x0c,0x3e,0x00,0x00, +/* 190 */ 0x00,0x1c,0x36,0x18,0x36,0x1c,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, +/* 191 */ 0x00,0x00,0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x06,0x06,0x03,0x63,0x3e,0x00,0x00, +/* 192 */ 0x0c,0x18,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 193 */ 0x18,0x0c,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 194 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 195 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 196 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 197 */ 0x1c,0x36,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 198 */ 0x00,0x00,0xfe,0x33,0x33,0x33,0xff,0x33,0x33,0x33,0x33,0xf3,0x00,0x00,0x00,0x00, +/* 199 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x18,0x30,0x1e,0x00, +/* 200 */ 0x0c,0x18,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 201 */ 0x18,0x0c,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 202 */ 0x08,0x14,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 203 */ 0x36,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, +/* 204 */ 0x0c,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 205 */ 0x30,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 206 */ 0x18,0x24,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 207 */ 0x66,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, +/* 208 */ 0x00,0x00,0x1e,0x36,0x66,0x66,0x6f,0x66,0x66,0x66,0x36,0x1e,0x00,0x00,0x00,0x00, +/* 209 */ 0x6e,0x3b,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, +/* 210 */ 0x06,0x0c,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 211 */ 0x30,0x18,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 212 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 213 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 214 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 215 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0x18,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00, +/* 216 */ 0x00,0x20,0x3e,0x73,0x73,0x6b,0x6b,0x6b,0x6b,0x67,0x67,0x3e,0x02,0x00,0x00,0x00, +/* 217 */ 0x0c,0x18,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 218 */ 0x18,0x0c,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 219 */ 0x08,0x14,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 220 */ 0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 221 */ 0x30,0x18,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, +/* 222 */ 0x00,0x00,0x0f,0x06,0x3e,0x66,0x66,0x66,0x66,0x3e,0x06,0x0f,0x00,0x00,0x00,0x00, +/* 223 */ 0x00,0x00,0x1e,0x33,0x33,0x1b,0x33,0x63,0x63,0x63,0x63,0x3b,0x00,0x00,0x00,0x00, +/* 224 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 225 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 226 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 227 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 228 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 229 */ 0x00,0x1c,0x36,0x1c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 230 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0xdb,0xd8,0xfe,0x1b,0xdb,0x76,0x00,0x00,0x00,0x00, +/* 231 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x18,0x30,0x1e,0x00, +/* 232 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 233 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 234 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 235 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 236 */ 0x00,0x06,0x0c,0x18,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 237 */ 0x00,0x18,0x0c,0x06,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 238 */ 0x00,0x08,0x1c,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 239 */ 0x00,0x00,0x36,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, +/* 240 */ 0x00,0x00,0x2c,0x18,0x34,0x60,0x7c,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00, +/* 241 */ 0x00,0x00,0x6e,0x3b,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, +/* 242 */ 0x00,0x06,0x0c,0x18,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 243 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 244 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 245 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 246 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, +/* 247 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, +/* 248 */ 0x00,0x00,0x00,0x00,0x20,0x3e,0x73,0x6b,0x6b,0x6b,0x67,0x3e,0x02,0x00,0x00,0x00, +/* 249 */ 0x00,0x06,0x0c,0x18,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 250 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 251 */ 0x00,0x08,0x1c,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 252 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, +/* 253 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, +/* 254 */ 0x00,0x00,0x0f,0x06,0x06,0x3e,0x66,0x66,0x66,0x66,0x66,0x3e,0x06,0x06,0x0f,0x00, +/* 255 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00 +}; + +#define ISO_CHAR_MIN 0x00 +#define ISO_CHAR_MAX 0xFF +#define ISO_CHAR_HEIGHT 16 diff --git a/osfmk/ppc/lock.h b/osfmk/ppc/lock.h new file mode 100644 index 000000000..ec40b5f43 --- /dev/null +++ b/osfmk/ppc/lock.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Copyright (C) 1998 Apple Computer + * All Rights Reserved + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _PPC_HW_LOCK_H_ +#define _PPC_HW_LOCK_H_ + +#include +#include + +#define NEED_ATOMIC 1 + +#define mutex_try _mutex_try +#define mutex_lock(m) \ +MACRO_BEGIN \ + assert(assert_wait_possible()); \ + _mutex_lock((m)); \ +MACRO_END + +extern unsigned int LockTimeOut; /* Number of hardware ticks of a lock timeout */ + +#endif /* _PPC_HW_LOCK_H_ */ diff --git a/osfmk/ppc/low_trace.h b/osfmk/ppc/low_trace.h new file mode 100644 index 000000000..3f7515a2a --- /dev/null +++ b/osfmk/ppc/low_trace.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * + * These are the structures and constants used for the low-level trace + */ + + + + + + +#ifndef _LOW_TRACE_H_ +#define _LOW_TRACE_H_ + +typedef struct LowTraceRecord { + + unsigned short LTR_cpu; /* 0000 - CPU address */ + unsigned short LTR_excpt; /* 0002 - Exception code */ + unsigned int LTR_timeHi; /* 0004 - High order time */ + unsigned int LTR_timeLo; /* 0008 - Low order time */ + unsigned int LTR_cr; /* 000C - CR */ + unsigned int LTR_srr0; /* 0010 - SRR0 */ + unsigned int LTR_srr1; /* 0014 - SRR1 */ + unsigned int LTR_dar; /* 0018 - DAR */ + unsigned int LTR_save; /* 001C - savearea */ + + unsigned int LTR_lr; /* 0020 - LR */ + unsigned int LTR_ctr; /* 0024 - CTR */ + unsigned int LTR_r0; /* 0028 - R0 */ + unsigned int LTR_r1; /* 002C - R1 */ + unsigned int LTR_r2; /* 0030 - R2 */ + unsigned int LTR_r3; /* 0034 - R3 */ + unsigned int LTR_r4; /* 0038 - R4 */ + unsigned int LTR_r5; /* 003C - R5 */ + +} LowTraceRecord; + +typedef struct traceWork { + + unsigned int traceMask; /* Types to be traced */ + unsigned int traceCurr; /* Address of next slot */ + unsigned int traceStart; /* Start of trace table */ + unsigned int traceEnd; /* End of trace table */ + unsigned int traceMsnd; /* Saved trace mask */ + unsigned int traceGas[3]; +} traceWork; + +extern traceWork trcWork; + + +#endif /* ifndef _LOW_TRACE_H_ */ diff --git a/osfmk/ppc/lowmem_vectors.s b/osfmk/ppc/lowmem_vectors.s new file mode 100644 index 000000000..acf0d31d7 --- /dev/null +++ b/osfmk/ppc/lowmem_vectors.s @@ -0,0 +1,2459 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Low-memory exception vector code for PowerPC MACH + * + * These are the only routines that are ever run with + * VM instruction translation switched off. + * + * The PowerPC is quite strange in that rather than having a set + * of exception vectors, the exception handlers are installed + * in well-known addresses in low memory. This code must be loaded + * at ZERO in physical memory. The simplest way of doing this is + * to load the kernel at zero, and specify this as the first file + * on the linker command line. + * + * When this code is loaded into place, it is loaded at virtual + * address KERNELBASE, which is mapped to zero (physical). + * + * This code handles all powerpc exceptions and is always entered + * in supervisor mode with translation off. It saves the minimum + * processor state before switching back on translation and + * jumping to the approprate routine. + * + * Vectors from 0x100 to 0x3fff occupy 0x100 bytes each (64 instructions) + * + * We use some of this space to decide which stack to use, and where to + * save the context etc, before jumping to a generic handler. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define TRCSAVE 0 +#define CHECKSAVE 0 +#define PERFTIMES 0 +#define ESPDEBUG 0 + +#if TRCSAVE +#error The TRCSAVE option is broken.... Fix it +#endif + +#define featL1ena 24 +#define featSMP 25 +#define featAltivec 26 +#define wasNapping 27 +#define featFP 28 + +#define VECTOR_SEGMENT .section __VECTORS, __interrupts + + VECTOR_SEGMENT + + + .globl EXT(ExceptionVectorsStart) + +EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */ +baseR: /* Used so we have more readable code */ + +/* + * System reset - call debugger + */ + . = 0xf0 + .globl EXT(ResetHandler) +EXT(ResetHandler): + .long 0x0 + .long 0x0 + .long 0x0 + + . = 0x100 +.L_handler100: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type + mfcr r11 + cmpi cr0,r13,RESET_HANDLER_START + bne resetexc + + li r11,RESET_HANDLER_NULL + stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type + + lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0) + lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0) + mtlr r4 + blr + +resetexc: + mtcr r11 + mfsprg r13,1 /* Get the exception save area */ + li r11,T_RESET /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * Machine check + */ + + . = 0x200 +.L_handler200: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_MACHINE_CHECK /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * Data access - page fault, invalid memory rights for operation + */ + + . = 0x300 +.L_handler300: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_DATA_ACCESS /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * Instruction access - as for data access + */ + + . = 0x400 +.L_handler400: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_INSTRUCTION_ACCESS /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * External interrupt + */ + + . = 0x500 +.L_handler500: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_INTERRUPT /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * Alignment - many reasons + */ + + . = 0x600 +.L_handler600: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_ALIGNMENT /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * Program - floating point exception, illegal inst, priv inst, user trap + */ + + . = 0x700 +.L_handler700: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_PROGRAM /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * Floating point disabled + */ + + . = 0x800 +.L_handler800: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_FP_UNAVAILABLE /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + + +/* + * Decrementer - DEC register has passed zero. + */ + + . = 0x900 +.L_handler900: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_DECREMENTER /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * I/O controller interface error - MACH does not use this + */ + + . = 0xA00 +.L_handlerA00: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_IO_ERROR /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * Reserved + */ + + . = 0xB00 +.L_handlerB00: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_RESERVED /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * System call - generated by the sc instruction + */ + + . = 0xC00 +.L_handlerC00: + mtsprg 3,r11 ; Save R11 + mtsprg 2,r13 ; Save R13 + mfcr r11 ; Save the CR + +; Note: this first compare takes care of almost all of the non-fast paths +; BSD system calls are negative and, platform-specific and mach system +; calls are all less than 0x7000. +; +; Note that 0x7FF2 and 0x7FF3 are user state only and do not need to set sprg2. + + cmpwi r0,0x7FF2 ; Ultra fast path cthread info call? + blt+ notufp ; Not ultra fast... + mfsprg r13,0 ; Get the per_proc_area + cmplwi cr1,r0,0x7FF4 ; Ultra fast path fp/vec facility state? + bgt+ cr1,notufp ; Not ultra fast... + beq+ cr1,scloadmsr ; It is the load msr guy... + lwz r13,spcFlags(r13) ; Get the facility status + rlwinm. r13,r13,0,runningVMbit,runningVMbit ; Are we running a VM right now? + bne- notufp ; Yes, no fast trap allowed... + + mfsprg r11,3 ; Restore R11 + mfsprg r3,0 ; Get the per_proc_area + mfsprg r13,2 ; Restore R13 + beq- cr1,isvecfp ; This is the facility stat call + lwz r3,UAW(r3) ; Get the assist word + rfi ; All done, scream back... (no need to restore CR or R11, they are volatile) +; +isvecfp: lwz r3,spcFlags(r3) ; Get the facility status + rfi ; Bail back... +; + .align 5 +notufp: mtcrf 0xC0,r11 ; Restore the used CRs + li r11,T_SYSTEM_CALL ; Set interrupt code + mfsprg r13,1 ; Get the exception save area + b .L_exception_entry ; Join common... + +scloadmsr: mfsrr1 r13 ; Get the old SRR + rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; From problem state? + mfsprg r13,0 ; Restore per_proc + bne- notufp ; Someone is trying to cheat... + + mtcrf 0xC0,r11 ; Restore CR + lwz r11,pfAvailable(r13) ; Pick up the feature flags + mtsrr1 r3 ; Set new MSR + mfsprg r13,2 ; Restore R13 + mtsprg 2,r11 ; Set the feature flags into sprg2 + mfsprg r11,3 ; Restore R11 + rfi ; Blast back + + +/* + * Trace - generated by single stepping + * performance monitor BE branch enable tracing/logging + * is also done here now. while this is permanently in the + * system the impact is completely unnoticable as this code is + * only executed when (a) a single step or branch exception is + * hit, (b) in the single step debugger case there is so much + * overhead already the few extra instructions for testing for BE + * are not even noticable, (c) the BE logging code is *only* run + * when it is enabled by the tool which will not happen during + * normal system usage + * + * Note that this trace is available only to user state so we do not + * need to set sprg2 before returning. + */ + + . = 0xD00 +.L_handlerD00: + mtsprg 2,r13 ; Save R13 + mtsprg 3,r11 ; Save R11 + mfsrr1 r13 ; Get the old MSR + mfcr r11 ; Get the CR + rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? + beq- notspectr ; Yes, not special trace... + mfsprg r13,0 ; Get the per_proc area + lhz r13,PP_CPU_FLAGS(r13) ; Get the flags + rlwinm. r13,r13,0,traceBEb+16,traceBEb+16 ; Special trace enabled? + bne+ specbrtr ; Yeah... + +notspectr: mtcr r11 ; Restore CR + mfsprg r13,1 ; Get the savearea + li r11,T_TRACE ; Set interrupt code + b .L_exception_entry ; Join common... + +; +; We are doing the special branch trace +; + +specbrtr: mfsprg r13,0 ; Get the per_proc area + stw r1,emfp0(r13) ; Save in a scratch area + stw r2,emfp0+4(r13) ; Save in a scratch area + stw r3,emfp0+8(r13) ; Save in a scratch area + + lwz r1,spcTRc(r13) ; Pick up the count + lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer + subi r1,r1,1 ; Count down + lwz r3,spcTRp(r13) ; Pick up buffer position + mr. r1,r1 ; Is it time to count? + ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer + cmplwi cr1,r3,4092 ; Set cr1_eq if we should take exception + ble+ spclogpc ; We are logging this one... + cmplwi cr1,r2,0 ; Set cr1_eq false so we do not take an interrupt + b spcskip ; Fly away... + +spclogpc: mfsrr0 r1 ; Get the pc + stwx r1,r2,r3 ; Save it in the buffer + addi r3,r3,4 ; Point to the next slot + li r1,2 ; Number of branches to skip + rlwinm r3,r3,0,20,31 ; Wrap the slot at one page + stw r3,spcTRp(r13) ; Save the new slot + +spcskip: stw r1,spcTRc(r13) ; Save the new count + + lwz r1,emfp0(r13) ; Restore work register + lwz r2,emfp0+4(r13) ; Restore work register + lwz r3,emfp0+8(r13) ; Restore work register + beq cr1,notspectr ; Buffer filled, make a rupt... + + mtcr r11 ; Restore the CR + mfsprg r13,2 ; Restore R13 + mfsprg r11,3 ; Restore R11 + rfi ; Bail back... + +/* + * Floating point assist + */ + + . = 0xe00 +.L_handlerE00: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_FP_ASSIST /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + + +/* + * Performance monitor interruption + */ + + . = 0xF00 +PMIhandler: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_PERF_MON /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + + +/* + * VMX exception + */ + + . = 0xF20 +VMXhandler: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_VMX /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + + + +/* + * Instruction translation miss - we inline this code. + * Upon entry (done for us by the machine): + * srr0 : addr of instruction that missed + * srr1 : bits 0-3 = saved CR0 + * 4 = lru way bit + * 16-31 = saved msr + * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) + * imiss: ea that missed + * icmp : the compare value for the va that missed + * hash1: pointer to first hash pteg + * hash2: pointer to 2nd hash pteg + * + * Register usage: + * tmp0: saved counter + * tmp1: junk + * tmp2: pointer to pteg + * tmp3: current compare value + * + * This code is taken from the 603e User's Manual with + * some bugfixes and minor improvements to save bytes and cycles + * + * NOTE: Do not touch sprg2 in here + */ + + . = 0x1000 +.L_handler1000: + mfspr tmp2, hash1 + mfctr tmp0 /* use tmp0 to save ctr */ + mfspr tmp3, icmp + +.L_imiss_find_pte_in_pteg: + li tmp1, 8 /* count */ + subi tmp2, tmp2, 8 /* offset for lwzu */ + mtctr tmp1 /* count... */ + +.L_imiss_pteg_loop: + lwz tmp1, 8(tmp2) /* check pte0 for match... */ + addi tmp2, tmp2, 8 + cmpw cr0, tmp1, tmp3 +#if 0 + bdnzf+ cr0, .L_imiss_pteg_loop +#else + bc 0,2, .L_imiss_pteg_loop +#endif + beq+ cr0, .L_imiss_found_pte + + /* Not found in PTEG, we must scan 2nd then give up */ + + andi. tmp1, tmp3, MASK(PTE0_HASH_ID) + bne- .L_imiss_do_no_hash_exception /* give up */ + + mfspr tmp2, hash2 + ori tmp3, tmp3, MASK(PTE0_HASH_ID) + b .L_imiss_find_pte_in_pteg + +.L_imiss_found_pte: + + lwz tmp1, 4(tmp2) /* get pte1_t */ + andi. tmp3, tmp1, MASK(PTE1_WIMG_GUARD) /* Fault? */ + bne- .L_imiss_do_prot_exception /* Guarded - illegal */ + + /* Ok, we've found what we need to, restore and rfi! */ + + mtctr tmp0 /* restore ctr */ + mfsrr1 tmp3 + mfspr tmp0, imiss + mtcrf 0x80, tmp3 /* Restore CR0 */ + mtspr rpa, tmp1 /* set the pte */ + ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ + tlbli tmp0 + sth tmp1, 6(tmp2) + rfi + +.L_imiss_do_prot_exception: + /* set up srr1 to indicate protection exception... */ + mfsrr1 tmp3 + andi. tmp2, tmp3, 0xffff + addis tmp2, tmp2, MASK(SRR1_TRANS_PROT) >> 16 + b .L_imiss_do_exception + +.L_imiss_do_no_hash_exception: + /* clean up registers for protection exception... */ + mfsrr1 tmp3 + andi. tmp2, tmp3, 0xffff + addis tmp2, tmp2, MASK(SRR1_TRANS_HASH) >> 16 + + /* And the entry into the usual instruction fault handler ... */ +.L_imiss_do_exception: + + mtctr tmp0 /* Restore ctr */ + mtsrr1 tmp2 /* Set up srr1 */ + mfmsr tmp0 + xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ + mtcrf 0x80, tmp3 /* Restore CR0 */ + mtmsr tmp0 /* reset MSR[TGPR] */ + b .L_handler400 /* Instr Access */ + +/* + * Data load translation miss + * + * Upon entry (done for us by the machine): + * srr0 : addr of instruction that missed + * srr1 : bits 0-3 = saved CR0 + * 4 = lru way bit + * 5 = 1 if store + * 16-31 = saved msr + * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) + * dmiss: ea that missed + * dcmp : the compare value for the va that missed + * hash1: pointer to first hash pteg + * hash2: pointer to 2nd hash pteg + * + * Register usage: + * tmp0: saved counter + * tmp1: junk + * tmp2: pointer to pteg + * tmp3: current compare value + * + * This code is taken from the 603e User's Manual with + * some bugfixes and minor improvements to save bytes and cycles + * + * NOTE: Do not touch sprg2 in here + */ + + . = 0x1100 +.L_handler1100: + mfspr tmp2, hash1 + mfctr tmp0 /* use tmp0 to save ctr */ + mfspr tmp3, dcmp + +.L_dlmiss_find_pte_in_pteg: + li tmp1, 8 /* count */ + subi tmp2, tmp2, 8 /* offset for lwzu */ + mtctr tmp1 /* count... */ + +.L_dlmiss_pteg_loop: + lwz tmp1, 8(tmp2) /* check pte0 for match... */ + addi tmp2, tmp2, 8 + cmpw cr0, tmp1, tmp3 +#if 0 /* How to write this correctly? */ + bdnzf+ cr0, .L_dlmiss_pteg_loop +#else + bc 0,2, .L_dlmiss_pteg_loop +#endif + beq+ cr0, .L_dmiss_found_pte + + /* Not found in PTEG, we must scan 2nd then give up */ + + andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ + bne- .L_dmiss_do_no_hash_exception /* give up */ + + mfspr tmp2, hash2 + ori tmp3, tmp3, MASK(PTE0_HASH_ID) + b .L_dlmiss_find_pte_in_pteg + +.L_dmiss_found_pte: + + lwz tmp1, 4(tmp2) /* get pte1_t */ + + /* Ok, we've found what we need to, restore and rfi! */ + + mtctr tmp0 /* restore ctr */ + mfsrr1 tmp3 + mfspr tmp0, dmiss + mtcrf 0x80, tmp3 /* Restore CR0 */ + mtspr rpa, tmp1 /* set the pte */ + ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ + tlbld tmp0 /* load up tlb */ + sth tmp1, 6(tmp2) /* sth is faster? */ + rfi + + /* This code is shared with data store translation miss */ + +.L_dmiss_do_no_hash_exception: + /* clean up registers for protection exception... */ + mfsrr1 tmp3 + /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ + rlwinm tmp1, tmp3, 9, 6, 6 + addis tmp1, tmp1, MASK(DSISR_HASH) >> 16 + + /* And the entry into the usual data fault handler ... */ + + mtctr tmp0 /* Restore ctr */ + andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ + mtsrr1 tmp2 /* Set srr1 */ + mtdsisr tmp1 + mfspr tmp2, dmiss + mtdar tmp2 + mfmsr tmp0 + xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ + mtcrf 0x80, tmp3 /* Restore CR0 */ + sync /* Needed on some */ + mtmsr tmp0 /* reset MSR[TGPR] */ + b .L_handler300 /* Data Access */ + +/* + * Data store translation miss (similar to data load) + * + * Upon entry (done for us by the machine): + * srr0 : addr of instruction that missed + * srr1 : bits 0-3 = saved CR0 + * 4 = lru way bit + * 5 = 1 if store + * 16-31 = saved msr + * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) + * dmiss: ea that missed + * dcmp : the compare value for the va that missed + * hash1: pointer to first hash pteg + * hash2: pointer to 2nd hash pteg + * + * Register usage: + * tmp0: saved counter + * tmp1: junk + * tmp2: pointer to pteg + * tmp3: current compare value + * + * This code is taken from the 603e User's Manual with + * some bugfixes and minor improvements to save bytes and cycles + * + * NOTE: Do not touch sprg2 in here + */ + + . = 0x1200 +.L_handler1200: + mfspr tmp2, hash1 + mfctr tmp0 /* use tmp0 to save ctr */ + mfspr tmp3, dcmp + +.L_dsmiss_find_pte_in_pteg: + li tmp1, 8 /* count */ + subi tmp2, tmp2, 8 /* offset for lwzu */ + mtctr tmp1 /* count... */ + +.L_dsmiss_pteg_loop: + lwz tmp1, 8(tmp2) /* check pte0 for match... */ + addi tmp2, tmp2, 8 + + cmpw cr0, tmp1, tmp3 +#if 0 /* I don't know how to write this properly */ + bdnzf+ cr0, .L_dsmiss_pteg_loop +#else + bc 0,2, .L_dsmiss_pteg_loop +#endif + beq+ cr0, .L_dsmiss_found_pte + + /* Not found in PTEG, we must scan 2nd then give up */ + + andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ + bne- .L_dmiss_do_no_hash_exception /* give up */ + + mfspr tmp2, hash2 + ori tmp3, tmp3, MASK(PTE0_HASH_ID) + b .L_dsmiss_find_pte_in_pteg + +.L_dsmiss_found_pte: + + lwz tmp1, 4(tmp2) /* get pte1_t */ + andi. tmp3, tmp1, MASK(PTE1_CHANGED) /* unchanged, check? */ + beq- .L_dsmiss_check_prot /* yes, check prot */ + +.L_dsmiss_resolved: + /* Ok, we've found what we need to, restore and rfi! */ + + mtctr tmp0 /* restore ctr */ + mfsrr1 tmp3 + mfspr tmp0, dmiss + mtcrf 0x80, tmp3 /* Restore CR0 */ + mtspr rpa, tmp1 /* set the pte */ + tlbld tmp0 /* load up tlb */ + rfi + +.L_dsmiss_check_prot: + /* PTE is unchanged, we must check that we can write */ + rlwinm. tmp3, tmp1, 30, 0, 1 /* check PP[1] */ + bge- .L_dsmiss_check_prot_user_kern + andi. tmp3, tmp1, 1 /* check PP[0] */ + beq+ .L_dsmiss_check_prot_ok + +.L_dmiss_do_prot_exception: + /* clean up registers for protection exception... */ + mfsrr1 tmp3 + /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ + rlwinm tmp1, tmp3, 9, 6, 6 + addis tmp1, tmp1, MASK(DSISR_PROT) >> 16 + + /* And the entry into the usual data fault handler ... */ + + mtctr tmp0 /* Restore ctr */ + andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ + mtsrr1 tmp2 /* Set srr1 */ + mtdsisr tmp1 + mfspr tmp2, dmiss + mtdar tmp2 + mfmsr tmp0 + xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ + mtcrf 0x80, tmp3 /* Restore CR0 */ + sync /* Needed on some */ + mtmsr tmp0 /* reset MSR[TGPR] */ + b .L_handler300 /* Data Access */ + +/* NB - if we knew we were on a 603e we could test just the MSR_KEY bit */ +.L_dsmiss_check_prot_user_kern: + mfsrr1 tmp3 + andi. tmp3, tmp3, MASK(MSR_PR) + beq+ .L_dsmiss_check_prot_kern + mfspr tmp3, dmiss /* check user privs */ + mfsrin tmp3, tmp3 /* get excepting SR */ + andis. tmp3, tmp3, 0x2000 /* Test SR ku bit */ + beq+ .L_dsmiss_check_prot_ok + b .L_dmiss_do_prot_exception + +.L_dsmiss_check_prot_kern: + mfspr tmp3, dmiss /* check kern privs */ + mfsrin tmp3, tmp3 + andis. tmp3, tmp3, 0x4000 /* Test SR Ks bit */ + bne- .L_dmiss_do_prot_exception + +.L_dsmiss_check_prot_ok: + /* Ok, mark as referenced and changed before resolving the fault */ + ori tmp1, tmp1, (MASK(PTE1_REFERENCED)|MASK(PTE1_CHANGED)) + sth tmp1, 6(tmp2) + b .L_dsmiss_resolved + +/* + * Instruction address breakpoint + */ + + . = 0x1300 +.L_handler1300: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * System management interrupt + */ + + . = 0x1400 +.L_handler1400: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +; +; Altivec Java Mode Assist interrupt +; + + . = 0x1600 +.L_handler1600: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +; +; Thermal interruption +; + + . = 0x1700 +.L_handler1700: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_THERMAL /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * There is now a large gap of reserved traps + */ + +/* + * Run mode/ trace exception - single stepping on 601 processors + */ + + . = 0x2000 +.L_handler2000: + mtsprg 2,r13 /* Save R13 */ + mtsprg 3,r11 /* Save R11 */ + mfsprg r13,1 /* Get the exception save area */ + li r11,T_RUNMODE_TRACE /* Set 'rupt code */ + b .L_exception_entry /* Join common... */ + +/* + * .L_exception_entry(type) + * + * This is the common exception handling routine called by any + * type of system exception. + * + * ENTRY: via a system exception handler, thus interrupts off, VM off. + * r3 has been saved in sprg3 and now contains a number + * representing the exception's origins + * + */ + + .data + .align ALIGN + .globl EXT(exception_entry) +EXT(exception_entry): + .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */ + + VECTOR_SEGMENT + .align 5 + +.L_exception_entry: + +/* + * + * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ + * instruction to clear and allcoate a line in the cache. This way we won't take any cache + * misses, so these stores won't take all that long. Except the first line that is because + * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are + * off also. + * + * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions + * are ignored. + */ + + stw r1,saver1(r13) ; Save register 1 + stw r0,saver0(r13) ; Save register 0 + mfspr r1,hid0 ; Get HID0 + mfcr r0 ; Save the CR + mtcrf 255,r1 ; Get set to test for cache and sleep + bf sleep,notsleep ; Skip if we are not trying to sleep + + mtcrf 255,r0 ; Restore the CR + lwz r0,saver0(r13) ; Restore R0 + lwz r1,saver1(r13) ; Restore R1 + mfsprg r13,0 ; Get the per_proc + lwz r11,pfAvailable(r13) ; Get back the feature flags + mfsprg r13,2 ; Restore R13 + mtsprg 2,r11 ; Set sprg2 to the features + mfsprg r11,3 ; Restore R11 + rfi ; Jump back into sleep code... + .long 0 ; Leave these here please... + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + + .align 5 + +notsleep: stw r2,saver2(r13) ; Save this one + crmove featL1ena,dce ; Copy the cache enable bit + rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits + mtspr hid0,r2 ; Clear the nap/doze bits + cmplw r2,r1 ; See if we were napping + li r1,32 ; Point to the next line in case we need it + crnot wasNapping,cr0_eq ; Remember if we were napping + mfsprg r2,0 ; Get the per_proc area + bf- featL1ena,skipz1 ; L1 cache is disabled... + dcbz r1,r13 ; Reserve our line in cache + +; +; Remember, we are setting up CR6 with feature flags +; +skipz1: lwz r1,pfAvailable(r2) ; Get the CPU features flags + stw r3,saver3(r13) ; Save this one + mtcrf 0xE0,r1 ; Put the features flags (that we care about) in the CR + stw r4,saver4(r13) ; Save this one + stw r6,saver6(r13) ; Save this one + crmove featSMP,pfSMPcapb ; See if we have a PIR + stw r8,saver8(r13) ; Save this one + crmove featAltivec,pfAltivecb ; Set the Altivec flag + mfsrr0 r6 /* Get the interruption SRR0 */ + stw r8,saver8(r13) /* Save this one */ + crmove featFP,pfFloatb ; Remember that we have floating point + stw r7,saver7(r13) /* Save this one */ + lhz r8,PP_CPU_FLAGS(r2) ; Get the flags + mfsrr1 r7 /* Get the interrupt SRR1 */ + rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on + stw r6,savesrr0(r13) /* Save the SRR0 */ + rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit + stw r5,saver5(r13) /* Save this one */ + and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on + mfsprg r6,2 ; Get interrupt time R13 + mtsprg 2,r1 ; Set the feature flags + andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set + mfsprg r8,3 /* Get 'rupt time R11 */ + stw r7,savesrr1(r13) /* Save SRR1 */ + stw r6,saver13(r13) /* Save 'rupt R1 */ + stw r8,saver11(r13) /* Save 'rupt time R11 */ + +getTB: mftbu r6 ; Get the upper timebase + mftb r7 ; Get the lower timebase + mftbu r8 ; Get the upper one again + cmplw r6,r8 ; Did the top tick? + bne- getTB ; Yeah, need to get it again... + + stw r8,ruptStamp(r2) ; Save the top of time stamp + la r6,saver14(r13) ; Point to the next cache line + stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp + bf- featL1ena,skipz2 ; L1 cache is disabled... + dcbz 0,r6 /* Allocate in cache */ +skipz2: + stw r9,saver9(r13) /* Save this one */ + + la r9,saver30(r13) /* Point to the trailing end */ + stw r10,saver10(r13) /* Save this one */ + mflr r4 /* Get the LR */ + mfxer r10 ; Get the XER + + bf+ wasNapping,notNapping ; Skip if not waking up from nap... + + lwz r6,napStamp+4(r2) ; Pick up low order nap stamp + lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return + lwz r5,napStamp(r2) ; and high order + subfc r7,r6,r7 ; Subtract low stamp from now + lwz r6,napTotal+4(r2) ; Pick up low total + subfe r5,r5,r8 ; Subtract high stamp and borrow from now + lwz r8,napTotal(r2) ; Pick up the high total + addc r6,r6,r7 ; Add low to total + ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return + adde r8,r8,r5 ; Add high and carry to total + stw r6,napTotal+4(r2) ; Save the low total + stw r8,napTotal(r2) ; Save the high total + stw r3,savesrr0(r13) ; Modify to return to nap/doze exit + +notNapping: stw r12,saver12(r13) /* Save this one */ + + bf- featL1ena,skipz3 ; L1 cache is disabled... + dcbz 0,r9 /* Allocate the last in the area */ +skipz3: + stw r14,saver14(r13) /* Save this one */ + stw r15,saver15(r13) /* Save this one */ + la r14,saver22(r13) /* Point to the next block to save into */ + stw r0,savecr(r13) ; Save rupt CR + mfctr r6 /* Get the CTR */ + stw r16,saver16(r13) /* Save this one */ + stw r4,savelr(r13) /* Save 'rupt LR */ + + bf- featL1ena,skipz4 ; L1 cache is disabled... + dcbz 0,r14 /* Allocate next save area line */ +skipz4: + stw r17,saver17(r13) /* Save this one */ + stw r18,saver18(r13) /* Save this one */ + stw r6,savectr(r13) /* Save 'rupt CTR */ + stw r19,saver19(r13) /* Save this one */ + lis r12,HIGH_ADDR(KERNEL_SEG_REG0_VALUE) /* Get the high half of the kernel SR0 value */ + mfdar r6 /* Get the 'rupt DAR */ + stw r20,saver20(r13) /* Save this one */ +#if 0 + mfsr r14,sr0 ; (TEST/DEBUG) + stw r14,savesr0(r13) ; (TEST/DEBUG) + mfsr r14,sr1 ; (TEST/DEBUG) + stw r14,savesr1(r13) ; (TEST/DEBUG) + mfsr r14,sr2 ; (TEST/DEBUG) + stw r14,savesr2(r13) ; (TEST/DEBUG) + mfsr r14,sr3 ; (TEST/DEBUG) + stw r14,savesr3(r13) ; (TEST/DEBUG) + mfsr r14,sr4 ; (TEST/DEBUG) + stw r14,savesr4(r13) ; (TEST/DEBUG) + mfsr r14,sr5 ; (TEST/DEBUG) + stw r14,savesr5(r13) ; (TEST/DEBUG) + mfsr r14,sr6 ; (TEST/DEBUG) + stw r14,savesr6(r13) ; (TEST/DEBUG) + mfsr r14,sr7 ; (TEST/DEBUG) + stw r14,savesr7(r13) ; (TEST/DEBUG) + mfsr r14,sr8 ; (TEST/DEBUG) + stw r14,savesr8(r13) ; (TEST/DEBUG) + mfsr r14,sr9 ; (TEST/DEBUG) + stw r14,savesr9(r13) ; (TEST/DEBUG) + mfsr r14,sr10 ; (TEST/DEBUG) + stw r14,savesr10(r13) ; (TEST/DEBUG) + mfsr r14,sr11 ; (TEST/DEBUG) + stw r14,savesr11(r13) ; (TEST/DEBUG) + mfsr r14,sr12 ; (TEST/DEBUG) + stw r14,savesr12(r13) ; (TEST/DEBUG) + mfsr r14,sr13 ; (TEST/DEBUG) + stw r14,savesr13(r13) ; (TEST/DEBUG) + mfsr r14,sr15 ; (TEST/DEBUG) + stw r14,savesr15(r13) ; (TEST/DEBUG) +#endif + + mtsr sr0,r12 /* Set the kernel SR0 */ + stw r21,saver21(r13) /* Save this one */ + addis r12,r12,0x0010 ; Point to the second segment of kernel + stw r10,savexer(r13) ; Save the rupt XER + mtsr sr1,r12 /* Set the kernel SR1 */ + stw r30,saver30(r13) /* Save this one */ + addis r12,r12,0x0010 ; Point to the third segment of kernel + stw r31,saver31(r13) /* Save this one */ + mtsr sr2,r12 /* Set the kernel SR2 */ + stw r22,saver22(r13) /* Save this one */ + addis r12,r12,0x0010 ; Point to the third segment of kernel + la r10,savedar(r13) /* Point to exception info block */ + stw r23,saver23(r13) /* Save this one */ + mtsr sr3,r12 /* Set the kernel SR3 */ + stw r24,saver24(r13) /* Save this one */ + stw r25,saver25(r13) /* Save this one */ + mfdsisr r7 /* Get the 'rupt DSISR */ + stw r26,saver26(r13) /* Save this one */ + + bf- featL1ena,skipz5 ; L1 cache is disabled... + dcbz 0,r10 /* Allocate exception info line */ +skipz5: + + stw r27,saver27(r13) /* Save this one */ + li r10,emfp0 ; Point to floating point save + stw r28,saver28(r13) /* Save this one */ + stw r29,saver29(r13) /* Save this one */ + mfsr r14,sr14 ; Get the copyin/out segment register + stw r6,savedar(r13) /* Save the 'rupt DAR */ + bf- featL1ena,skipz5a ; Do not do this if no L1... + dcbz r10,r2 ; Clear and allocate an L1 slot + +skipz5a: stw r7,savedsisr(r13) /* Save the 'rupt code DSISR */ + stw r11,saveexception(r13) /* Save the exception code */ + stw r14,savesr14(r13) ; Save copyin/copyout + + lis r8,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ + li r19,0 ; Assume no Altivec + ori r8,r8,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ + + bf featAltivec,noavec ; No Altivec on this CPU... + li r9,0 ; Get set to clear VRSAVE + mfspr r19,vrsave ; Get the VRSAVE register + mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level +; +; We need to save the FPSCR as if it is normal context. +; This is because pending exceptions will cause an exception even if +; FP is disabled. We need to clear the FPSCR when we first start running in the +; kernel. +; +noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags + + bf- featFP,nofpexe ; No possible floating point exceptions... + + mfmsr r9 ; Get the MSR value + ori r7,r9,lo16(MASK(MSR_FP)) ; Enable floating point + mtmsr r7 ; Do it + isync + stfd f0,emfp0(r2) ; Save FPR0 + stfd f1,emfp1(r2) ; Save FPR1 + mffs f0 ; Get the FPSCR + fsub f1,f1,f1 ; Make a 0 + stfd f0,savexfpscrpad(r13) ; Save the FPSCR + mtfsf 0xFF,f1 ; Clear it + lfd f0,emfp0(r2) ; Restore FPR0 + lfd f1,emfp1(r2) ; Restore FPR1 + mtmsr r9 ; Turn off FP + isync +nofpexe: + +/* + * Everything is saved at this point, except for FPRs, and VMX registers + * + * Time for a new save area. Allocate the trace table entry now also + * Note that we haven't touched R0-R5 yet. Except for R0 & R1, that's in the save + */ + + +lllck: lwarx r9,0,r8 /* Grab the lock value */ + li r7,1 /* Use part of the delay time */ + mr. r9,r9 /* Is it locked? */ + bne- lllcks /* Yeah, wait for it to clear... */ + stwcx. r7,0,r8 /* Try to seize that there durn lock */ + beq+ lllckd /* Got it... */ + b lllck /* Collision, try again... */ + +lllcks: lwz r9,SVlock(r8) /* Get that lock in here */ + mr. r9,r9 /* Is it free yet? */ + beq+ lllck /* Yeah, try for it again... */ + b lllcks /* Sniff away... */ + +lllckd: isync /* Purge any speculative executions here */ + lis r23,hi16(EXT(trcWork)) ; Get the work area address + rlwinm r7,r11,30,0,31 /* Save 'rupt code shifted right 2 */ + ori r23,r23,lo16(EXT(trcWork)) ; Get the rest +#if 1 + lwz r14,traceMask(r23) /* Get the trace mask */ +#else + li r14,-1 /* (TEST/DEBUG) */ +#endif + addi r7,r7,10 /* Adjust for CR5_EQ position */ + lwz r15,SVfree(r8) /* Get the head of the save area list */ + lwz r25,SVinuse(r8) /* Get the in use count */ + rlwnm r7,r14,r7,22,22 /* Set CR5_EQ bit position to 0 if tracing allowed */ + lwz r20,traceCurr(r23) /* Pick up the current trace entry */ + mtcrf 0x04,r7 /* Set CR5 to show trace or not */ + + lwz r14,SACalloc(r15) /* Pick up the allocation bits */ + addi r25,r25,1 /* Bump up the in use count for the new savearea */ + lwz r21,traceEnd(r23) /* Grab up the end of it all */ + mr. r14,r14 /* Can we use the first one? */ + blt use1st /* Yeah... */ + + andis. r14,r14,0x8000 /* Show we used the second and remember if it was the last */ + addi r10,r15,0x0800 /* Point to the first one */ + b gotsave /* We have the area now... */ + +use1st: andis. r14,r14,0x4000 /* Mark first gone and remember if empty */ + mr r10,r15 /* Set the save area */ + +gotsave: stw r14,SACalloc(r15) /* Put back the allocation bits */ + bne nodqsave /* There's still an empty slot, don't dequeue... */ + + lwz r16,SACnext(r15) /* Get the next in line */ + stw r16,SVfree(r8) /* Dequeue our now empty save area block */ + +nodqsave: addi r22,r20,LTR_size /* Point to the next trace entry */ + stw r25,SVinuse(r8) /* Set the in use count */ + li r17,0 /* Clear this for the lock */ + cmplw r22,r21 /* Do we need to wrap the trace table? */ + stw r17,SAVprev(r10) /* Clear back pointer for the newly allocated guy */ + mtsprg 1,r10 /* Get set for the next 'rupt */ + bne+ gotTrcEnt /* We got a trace entry... */ + + lwz r22,traceStart(r23) /* Wrap back to the top */ + +gotTrcEnt: bne- cr5,skipTrace1 /* Don't want to trace this kind... */ + + stw r22,traceCurr(r23) /* Set the next entry for the next guy */ + +#if ESPDEBUG + dcbst br0,r23 ; (TEST/DEBUG) + sync ; (TEST/DEBUG) +#endif + + bf- featL1ena,skipz6 ; L1 cache is disabled... + dcbz 0,r20 /* Allocate cache for the entry */ +skipz6: + +skipTrace1: sync /* Make sure all stores are done */ + stw r17,SVlock(r8) /* Unlock both save and trace areas */ + + +/* + * At this point, we can take another exception and lose nothing. + * + * We still have the current savearea pointed to by R13, the next by R10 and + * sprg1. R20 contains the pointer to a trace entry and CR5_eq says + * to do the trace or not. + * + * Note that R13 was chosen as the save area pointer because the SIGP, + * firmware, and DSI/ISI handlers aren't supposed to touch anything + * over R12. But, actually, the DSI/ISI stuff does. + * + * + * Let's cut that trace entry now. + */ + + lwz r0,saver0(r13) ; Get back interrupt time R0 + bne- cr5,skipTrace2 /* Don't want to trace this kind... */ + + mfsprg r2,0 ; Get the per_proc + li r14,32 /* Second line of entry */ + + lwz r16,ruptStamp(r2) ; Get top of time base + lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp + + bf- featL1ena,skipz7 ; L1 cache is disabled... + dcbz r14,r20 /* Zap the second half */ + +skipz7: stw r16,LTR_timeHi(r20) /* Set the upper part of TB */ + bf featSMP,nopir4 ; Is there a processor ID register on this guy? + mfspr r19,pir /* Get the processor address */ + b gotpir4 /* Got it... */ +nopir4: li r19,0 /* Assume processor 0 for those underprivileged folks */ +gotpir4: + lwz r1,saver1(r13) ; Get back interrupt time R1 + stw r17,LTR_timeLo(r20) /* Set the lower part of TB */ + rlwinm r19,r19,0,27,31 /* Cut the junk */ + lwz r2,saver2(r13) ; Get back interrupt time R2 + stw r0,LTR_r0(r20) /* Save off register 0 */ + lwz r3,saver3(r13) ; Restore this one + sth r19,LTR_cpu(r20) /* Stash the cpu address */ + stw r1,LTR_r1(r20) /* Save off register 1 */ + lwz r4,saver4(r13) ; Restore this one + stw r2,LTR_r2(r20) /* Save off register 2 */ + lwz r5,saver5(r13) ; Restore this one + stw r3,LTR_r3(r20) /* Save off register 3 */ + lwz r16,savecr(r13) /* We don't remember the CR anymore, get it */ + stw r4,LTR_r4(r20) /* Save off register 4 */ + mfsrr0 r17 /* Get this back, it's still good */ + stw r5,LTR_r5(r20) /* Save off register 5 */ + mfsrr1 r18 /* This is still good in here also */ + + stw r16,LTR_cr(r20) /* Save the CR (or dec) */ + stw r17,LTR_srr0(r20) /* Save the SSR0 */ + stw r18,LTR_srr1(r20) /* Save the SRR1 */ + mfdar r17 /* Get this back */ + + mflr r16 /* Get the LR */ + stw r17,LTR_dar(r20) /* Save the DAR */ + mfctr r17 /* Get the CTR */ + stw r16,LTR_lr(r20) /* Save the LR */ +#if 0 + lis r17,HIGH_ADDR(EXT(saveanchor)) ; (TEST/DEBUG) + ori r17,r17,LOW_ADDR(EXT(saveanchor)) ; (TEST/DEBUG) + lwz r16,SVcount(r17) ; (TEST/DEBUG) + lwz r17,SVinuse(r17) ; (TEST/DEBUG) + rlwimi r17,r16,16,0,15 ; (TEST/DEBUG) +#endif + stw r17,LTR_ctr(r20) /* Save off the CTR */ + stw r13,LTR_save(r20) /* Save the savearea */ + sth r11,LTR_excpt(r20) /* Save the exception type */ +#if ESPDEBUG + addi r17,r20,32 ; (TEST/DEBUG) + dcbst br0,r20 ; (TEST/DEBUG) + dcbst br0,r17 ; (TEST/DEBUG) + sync ; (TEST/DEBUG) +#endif + +/* + * We're done with the trace, except for maybe modifying the exception + * code later on. So, that means that we need to save R20 and CR5, but + * R0 to R5 are clear now. + * + * So, let's finish setting up the kernel registers now. + */ + +skipTrace2: + +#if PERFTIMES && DEBUG + li r3,68 ; Indicate interrupt + mr r4,r11 ; Get code to log + mr r5,r13 ; Get savearea to log + mr r8,r0 ; Save R0 + bl EXT(dbgLog2) ; Cut log entry + mr r0,r8 ; Restore R0 +#endif + + mfsprg r2,0 /* Get the per processor block */ + +#if CHECKSAVE + + lis r4,0x7FFF /* (TEST/DEBUG) */ + mfdec r12 /* (TEST/DEBUG) */ + or r4,r4,r12 /* (TEST/DEBUG) */ + mtdec r4 /* (TEST/DEBUG) */ + li r4,0x20 /* (TEST/DEBUG) */ + + lwarx r8,0,r4 ; ? + +mpwait2: lwarx r8,0,r4 /* (TEST/DEBUG) */ + mr. r8,r8 /* (TEST/DEBUG) */ + bne- mpwait2 /* (TEST/DEBUG) */ + stwcx. r4,0,r4 /* (TEST/DEBUG) */ + bne- mpwait2 /* (TEST/DEBUG) */ + + isync /* (TEST/DEBUG) */ + lwz r4,0xD80(br0) /* (TEST/DEBUG) */ + mr. r4,r4 /* (TEST/DEBUG) */ + li r4,1 /* (TEST/DEBUG) */ + bne- doncheksv /* (TEST/DEBUG) */ + + lis r8,HIGH_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */ + ori r8,r8,LOW_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */ + + stw r4,0xD80(br0) /* (TEST/DEBUG) */ + + lwarx r4,0,r8 ; ? + +mpwait2x: lwarx r4,0,r8 /* (TEST/DEBUG) */ + mr. r4,r4 /* (TEST/DEBUG) */ + bne- mpwait2x /* (TEST/DEBUG) */ + stwcx. r8,0,r8 /* (TEST/DEBUG) */ + bne- mpwait2x /* (TEST/DEBUG) */ + + isync /* (TEST/DEBUG) */ + +#if 0 + rlwinm r4,r13,0,0,19 /* (TEST/DEBUG) */ + lwz r21,SACflags(r4) /* (TEST/DEBUG) */ + rlwinm r22,r21,24,24,31 /* (TEST/DEBUG) */ + cmplwi r22,0x00EE /* (TEST/DEBUG) */ + lwz r22,SACvrswap(r4) /* (TEST/DEBUG) */ + bne- currbad /* (TEST/DEBUG) */ + andis. r21,r21,hi16(sac_perm) /* (TEST/DEBUG) */ + bne- currnotbad /* (TEST/DEBUG) */ + mr. r22,r22 /* (TEST/DEBUG) */ + bne+ currnotbad /* (TEST/DEBUG) */ + +currbad: lis r23,hi16(EXT(debugbackpocket)) /* (TEST/DEBUG) */ + ori r23,r23,lo16(EXT(debugbackpocket)) /* (TEST/DEBUG) */ + stw r23,SVfree(r8) /* (TEST/DEBUG) */ + + mfsprg r25,1 /* (TEST/DEBUG) */ + mtsprg 1,r23 /* (TEST/DEBUG) */ + lwz r26,SACalloc(r23) /* (TEST/DEBUG) */ + rlwinm r26,r26,0,1,31 /* (TEST/DEBUG) */ + stw r26,SACalloc(r23) /* (TEST/DEBUG) */ + + sync /* (TEST/DEBUG) */ + li r28,0 /* (TEST/DEBUG) */ + stw r28,0x20(br0) /* (TEST/DEBUG) */ + stw r28,0(r8) /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +currnotbad: +#endif + + lwz r28,SVcount(r8) /* (TEST/DEBUG) */ + lwz r21,SVinuse(r8) /* (TEST/DEBUG) */ + lwz r23,SVmin(r8) /* (TEST/DEBUG) */ + sub r22,r28,r21 /* (TEST/DEBUG) */ + cmpw r22,r23 /* (TEST/DEBUG) */ + bge+ cksave0 /* (TEST/DEBUG) */ + + li r4,0 /* (TEST/DEBUG) */ + stw r4,0x20(br0) /* (TEST/DEBUG) */ + stw r4,0(r8) /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +cksave0: lwz r28,SVfree(r8) /* (TEST/DEBUG) */ + li r24,0 /* (TEST/DEBUG) */ + li r29,1 /* (TEST/SAVE) */ + +cksave0a: mr. r28,r28 /* (TEST/DEBUG) */ + beq- cksave3 /* (TEST/DEBUG) */ + + rlwinm. r21,r28,0,4,19 /* (TEST/DEBUG) */ + bne+ cksave1 /* (TEST/DEBUG) */ + + li r4,0 /* (TEST/DEBUG) */ + stw r4,0x20(br0) /* (TEST/DEBUG) */ + stw r4,0(r8) /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +cksave1: rlwinm. r21,r28,0,21,3 /* (TEST/DEBUG) */ + beq+ cksave2 /* (TEST/DEBUG) */ + + li r4,0 /* (TEST/DEBUG) */ + stw r4,0x20(br0) /* (TEST/DEBUG) */ + stw r4,0(r8) /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +cksave2: lwz r25,SACalloc(r28) /* (TEST/DEBUG) */ + lbz r26,SACflags+2(r28) /* (TEST/DEBUG) */ + lbz r21,SACflags+3(r28) /* (TEST/DEBUG) */ + cmplwi r26,0x00EE /* (TEST/DEBUG) */ + stb r29,SACflags+3(r28) /* (TEST/DEBUG) */ + beq+ cksave2z + + li r4,0 /* (TEST/DEBUG) */ + stw r4,0x20(br0) /* (TEST/DEBUG) */ + stw r4,0(r8) /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +cksave2z: mr. r21,r21 /* (TEST/DEBUG) */ + beq+ cksave2a /* (TEST/DEBUG) */ + + li r4,0 /* (TEST/DEBUG) */ + stw r4,0x20(br0) /* (TEST/DEBUG) */ + stw r4,0(r8) /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +cksave2a: rlwinm r26,r25,1,31,31 /* (TEST/DEBUG) */ + rlwinm r27,r25,2,31,31 /* (TEST/DEBUG) */ + add r24,r24,r26 /* (TEST/DEBUG) */ + add r24,r24,r27 /* (TEST/DEBUG) */ + lwz r28,SACnext(r28) /* (TEST/DEBUG) */ + b cksave0a /* (TEST/DEBUG) */ + +cksave3: cmplw r24,r22 /* (TEST/DEBUG) */ + beq+ cksave4 /* (TEST/DEBUG) */ + + li r4,0 /* (TEST/DEBUG) */ + stw r4,0x20(br0) /* (TEST/DEBUG) */ + stw r4,0(r8) /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +cksave4: lwz r28,SVfree(r8) /* (TEST/DEBUG) */ + li r24,0 /* (TEST/DEBUG) */ + +cksave5: mr. r28,r28 /* (TEST/DEBUG) */ + beq- cksave6 /* (TEST/DEBUG) */ + stb r24,SACflags+3(r28) /* (TEST/DEBUG) */ + lwz r28,SACnext(r28) /* (TEST/DEBUG) */ + b cksave5 /* (TEST/DEBUG) */ + +cksave6: + + li r4,0 /* (TEST/DEBUG) */ + stw r4,0xD80(br0) /* (TEST/DEBUG) */ + stw r4,0(r8) /* (TEST/DEBUG) */ + +doncheksv: + li r4,0 /* (TEST/DEBUG) */ + stw r4,0x20(br0) /* (TEST/DEBUG) */ + mtdec r12 /* (TEST/DEBUG) */ +#endif + + lis r4,HIGH_ADDR(EXT(MPspec)) /* Get the MP control block */ + dcbt 0,r2 /* We'll need the per_proc in a sec */ + cmplwi cr0,r11,T_INTERRUPT /* Do we have an external interrupt? */ + ori r4,r4,LOW_ADDR(EXT(MPspec)) /* Get the bottom half of the MP control block */ + bne+ notracex /* Not an external... */ + +/* + * Here we check to see if there was a interprocessor signal + */ + + lwz r4,MPSSIGPhandler(r4) /* Get the address of the SIGP interrupt filter */ + lhz r3,PP_CPU_FLAGS(r2) /* Get the CPU flags */ + cmplwi cr1,r4,0 /* Check if signal filter is initialized yet */ + andi. r3,r3,LOW_ADDR(SIGPactive) /* See if this processor has started up */ + mtlr r4 /* Load up filter address */ + beq- cr1,notracex /* We don't have a filter yet... */ + beq- notracex /* This processor hasn't started filtering yet... */ + + blrl /* Filter the interrupt */ + + mfsprg r2,0 /* Make sure we have the per processor block */ + cmplwi cr0,r3,kMPIOInterruptPending /* See what the filter says */ + li r11,T_INTERRUPT /* Assume we have a regular external 'rupt */ + beq+ modRupt /* Yeah, we figured it would be... */ + li r11,T_SIGP /* Assume we had a signal processor interrupt */ + bgt+ modRupt /* Yeah, at this point we would assume so... */ + li r11,T_IN_VAIN /* Nothing there actually, so eat it */ + +modRupt: stw r11,PP_SAVE_EXCEPTION_TYPE(r2) /* Set that it was either in vain or a SIGP */ + stw r11,saveexception(r13) /* Save the exception code here also */ + bne- cr5,notracex /* Jump if no tracing... */ + sth r11,LTR_excpt(r20) /* Save the exception type */ + +notracex: + +#if 0 + bf featSMP,nopir6 /* (TEST/DEBUG) */ + mfspr r7,pir /* (TEST/DEBUG) */ + b gotpir6 /* (TEST/DEBUG) */ +nopir6: li r7,0 /* (TEST/DEBUG) */ +gotpir6: /* (TEST/DEBUG) */ + lis r6,HIGH_ADDR(EXT(RuptCtrs)) /* (TEST/DEBUG) */ + rlwinm r7,r7,8,23,23 /* (TEST/DEBUG) */ + lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + rlwimi r7,r7,1,22,22 /* (TEST/DEBUG) */ + ori r6,r6,LOW_ADDR(EXT(RuptCtrs)) /* (TEST/DEBUG) */ + rlwinm r1,r11,2,0,29 /* (TEST/DEBUG) */ + add r6,r6,r7 /* (TEST/DEBUG) */ + ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + lwz r21,(47*16)+8(r6) /* (TEST/DEBUG) */ + lwz r22,(47*16)+12(r6) /* (TEST/DEBUG) */ + add r1,r1,r6 /* (TEST/DEBUG) */ + mftb r24 /* (TEST/DEBUG) */ + sub r22,r24,r22 /* (TEST/DEBUG) */ + lwz r4,4(r6) /* (TEST/DEBUG) */ + cmplw cr2,r22,r21 /* (TEST/DEBUG) */ + lwz r7,4(r1) /* (TEST/DEBUG) */ + lwz r21,8(r6) /* (TEST/DEBUG) */ + blt+ cr2,nottime /* (TEST/DEBUG) */ + stw r24,(47*16)+12(r6) /* (TEST/DEBUG) */ + +nottime: addi r4,r4,1 /* (TEST/DEBUG) */ + lwz r22,8(r1) /* (TEST/DEBUG) */ + addi r7,r7,1 /* (TEST/DEBUG) */ + stw r4,4(r6) /* (TEST/DEBUG) */ + lwz r3,0(r6) /* (TEST/DEBUG) */ + mr. r21,r21 /* (TEST/DEBUG) */ + stw r7,4(r1) /* (TEST/DEBUG) */ + mtlr r12 /* (TEST/DEBUG) */ + lwz r1,0(r1) /* (TEST/DEBUG) */ + beq- nottimed1 /* (TEST/DEBUG) */ + blt+ cr2,isnttime1 /* (TEST/DEBUG) */ + +nottimed1: mr. r3,r3 /* (TEST/DEBUG) */ + bgelrl+ /* (TEST/DEBUG) */ + +isnttime1: mr. r22,r22 /* (TEST/DEBUG) */ + beq- nottimed2 /* (TEST/DEBUG) */ + blt+ cr2,isnttime2 /* (TEST/DEBUG) */ + +nottimed2: mr. r3,r1 /* (TEST/DEBUG) */ + mtlr r12 /* (TEST/DEBUG) */ + mr r4,r7 /* (TEST/DEBUG) */ + bgelrl+ /* (TEST/DEBUG) */ + mr r3,r11 /* (TEST/DEBUG) */ + +isnttime2: cmplwi r11,T_DATA_ACCESS /* (TEST/DEBUG) */ + lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + bne+ nodsidisp /* (TEST/DEBUG) */ + mr. r22,r22 /* (TEST/DEBUG) */ + beq- nottimed3 /* (TEST/DEBUG) */ + blt+ cr2,nodsidisp /* (TEST/DEBUG) */ + +nottimed3: li r3,5 /* (TEST/DEBUG) */ + ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ + mtlr r12 /* (TEST/DEBUG) */ + blrl /* (TEST/DEBUG) */ + + lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + lis r3,9 /* (TEST/DEBUG) */ + ori r3,r3,5 /* (TEST/DEBUG) */ + mtlr r12 /* (TEST/DEBUG) */ + lwz r4,savedar(r13) /* (TEST/DEBUG) */ + blrl /* (TEST/DEBUG) */ + +nodsidisp: cmplwi r11,T_INSTRUCTION_ACCESS /* (TEST/DEBUG) */ + lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + bne+ noisidisp /* (TEST/DEBUG) */ + mr. r22,r22 /* (TEST/DEBUG) */ + beq- nottimed4 /* (TEST/DEBUG) */ + blt+ cr2,noisidisp /* (TEST/DEBUG) */ + +nottimed4: li r3,6 /* (TEST/DEBUG) */ + ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ + mtlr r12 /* (TEST/DEBUG) */ + blrl /* (TEST/DEBUG) */ + +noisidisp: mr r3,r11 /* (TEST/DEBUG) */ +#endif + +#if 0 + cmplwi r11,T_PROGRAM /* (TEST/DEBUG) */ + lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + bne+ nopgmdisp /* (TEST/DEBUG) */ + li r3,7 /* (TEST/DEBUG) */ + ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ + lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ + mtlr r12 /* (TEST/DEBUG) */ + blrl /* (TEST/DEBUG) */ + +nopgmdisp: mr r3,r11 /* (TEST/DEBUG) */ +#endif + + li r21,0 ; Assume no processor register for now + lis r12,hi16(EXT(hw_counts)) ; Get the high part of the interrupt counters + bf featSMP,nopirhere ; Jump if this processor does not have a PIR... + mfspr r21,pir ; Get the PIR + +nopirhere: ori r12,r12,lo16(EXT(hw_counts)) ; Get the low part of the interrupt counters + lwz r7,savesrr1(r13) ; Get the entering MSR + rlwinm r21,r21,8,20,23 ; Get index to processor counts + mtcrf 0x80,r0 /* Set our CR0 to the high nybble of the request code */ + rlwinm r6,r0,1,0,31 /* Move sign bit to the end */ + cmplwi cr1,r11,T_SYSTEM_CALL /* Did we get a system call? */ + crandc cr0_lt,cr0_lt,cr0_gt /* See if we have R0 equal to 0b10xx...x */ + add r12,r12,r21 ; Point to the processor count area + cmplwi cr3,r11,T_IN_VAIN /* Was this all in vain? All for nothing? */ + lwzx r22,r12,r11 ; Get the old value + cmplwi cr2,r6,1 /* See if original R0 had the CutTrace request code in it */ + addi r22,r22,1 ; Count this one + cmplwi cr4,r11,T_SIGP /* Indicate if we had a SIGP 'rupt */ + stwx r22,r12,r11 ; Store it back + + beq- cr3,EatRupt /* Interrupt was all for nothing... */ + cmplwi cr3,r11,T_MACHINE_CHECK ; Did we get a machine check? + bne+ cr1,noCutT /* Not a system call... */ + bnl+ cr0,noCutT /* R0 not 0b10xxx...x, can't be any kind of magical system call... */ + rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state? + lis r1,hi16(EXT(dgWork)) ; Get the diagnostics flags + beq+ FCisok ; From supervisor state... + + ori r1,r1,lo16(EXT(dgWork)) ; Again + lwz r1,dgFlags(r1) ; Get the flags + rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid? + beq- noCutT ; No... + +FCisok: beq- cr2,isCutTrace /* This is a CutTrace system call */ + +/* + * Here's where we call the firmware. If it returns T_IN_VAIN, that means + * that it has handled the interruption. Remember: thou shalt not trash R13 + * or R20 while you are away. Anything else is ok. + */ + + lis r1,hi16(EXT(FirmwareCall)) /* Top half of firmware call handler */ + ori r1,r1,lo16(EXT(FirmwareCall)) /* Bottom half of it */ + lwz r3,saver3(r13) /* Restore the first parameter, the rest are ok already */ + mtlr r1 /* Get it in the link register */ + blrl /* Call the handler */ + + cmplwi r3,T_IN_VAIN /* Was it handled? */ + mfsprg r2,0 /* Restore the per_processor area */ + beq+ EatRupt /* Interrupt was handled... */ + mr r11,r3 /* Put the 'rupt code in the right register */ + b noSIGP /* Go to the normal system call handler */ + +isCutTrace: + li r7,-32768 /* Get a 0x8000 for the exception code */ + bne- cr5,EatRupt /* Tracing is disabled... */ + sth r7,LTR_excpt(r20) /* Modify the exception type to a CutTrace */ + b EatRupt /* Time to go home... */ + +/* We are here 'cause we didn't have a CutTrace system call */ + +noCutT: beq- cr3,MachineCheck ; Whoa... Machine check... + bne+ cr4,noSIGP /* Skip away if we didn't get a SIGP... */ + + lis r6,HIGH_ADDR(EXT(MPsignalFW)) /* Top half of SIGP handler */ + ori r6,r6,LOW_ADDR(EXT(MPsignalFW)) /* Bottom half of it */ + mtlr r6 /* Get it in the link register */ + + blrl /* Call the handler - we'll only come back if this is an AST, */ + /* 'cause FW can't handle that */ + mfsprg r2,0 /* Restore the per_processor area */ +; +; The following interrupts are the only ones that can be redriven +; by the higher level code or emulation routines. +; + +Redrive: cmplwi cr0,r3,T_IN_VAIN /* Did the signal handler eat the signal? */ + mr r11,r3 /* Move it to the right place */ + beq+ cr0,EatRupt /* Bail now if the signal handler processed the signal... */ + + +/* + * Here's where we check for the other fast-path exceptions: translation exceptions, + * emulated instructions, etc. + */ + +noSIGP: cmplwi cr3,r11,T_ALTIVEC_ASSIST ; Check for an Altivec denorm assist + cmplwi cr1,r11,T_PROGRAM /* See if we got a program exception */ + cmplwi cr2,r11,T_INSTRUCTION_ACCESS /* Check on an ISI */ + bne+ cr3,noAltivecAssist ; It is not an assist... + b EXT(AltivecAssist) ; It is an assist... + +noAltivecAssist: + bne+ cr1,noEmulate ; No emulation here... + b EXT(Emulate) ; Go try to emulate... + +noEmulate: cmplwi cr3,r11,T_CSWITCH /* Are we context switching */ + cmplwi r11,T_DATA_ACCESS /* Check on a DSI */ + beq- cr2,DSIorISI /* It's a PTE fault... */ + beq- cr3,conswtch /* It's a context switch... */ + bne+ PassUp /* It's not a PTE fault... */ + +/* + * This call will either handle the fault, in which case it will not + * return, or return to pass the fault up the line. + */ + +DSIorISI: + lis r7,HIGH_ADDR(EXT(handlePF)) /* Top half of DSI handler */ + ori r7,r7,LOW_ADDR(EXT(handlePF)) /* Bottom half of it */ + mtlr r7 /* Get it in the link register */ + mr r3,r11 /* Move the 'rupt code */ + + blrl /* See if we can handle this fault */ + + lwz r0,savesrr1(r13) ; Get the MSR in use at exception time + mfsprg r2, 0 /* Get back per_proc */ + cmplwi cr1,r3,T_IN_VAIN ; Was it handled? + andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on + mr r11,r3 /* Make sure we can find this later */ + beq+ cr1,EatRupt ; Yeah, just blast back to the user... + andc r0,r0,r4 ; Remove the recover bit + beq+ PassUp ; Not on, normal case... + lwz r4,savesrr0(r13) ; Get the failing instruction address + lwz r5,savecr(r13) ; Get the condition register + stw r0,savesrr1(r13) ; Save the result MSR + addi r4,r4,4 ; Skip failing instruction + rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed + stw r4,savesrr0(r13) ; Save instruction address + stw r4,savecr(r13) ; And the resume CR + b EatRupt ; Resume emulated code + +/* + * Here is where we handle the context switch firmware call. The old + * context has been saved, and the new savearea in in saver3. We'll just + * muck around with the savearea pointers, and then join the exit routine + */ +conswtch: lwz r28,SAVflags(r13) /* The the flags of the current */ + mr r29,r13 /* Save the save */ + rlwinm r30,r13,0,0,19 /* Get the start of the savearea block */ + lwz r5,saver3(r13) /* Switch to the new savearea */ + oris r28,r28,HIGH_ADDR(SAVattach) /* Turn on the attached flag */ + lwz r30,SACvrswap(r30) /* get real to virtual translation */ + mr r13,r5 /* Switch saveareas */ + xor r27,r29,r30 /* Flip to virtual */ + stw r28,SAVflags(r29) /* Stash it back */ + stw r27,saver3(r5) /* Push the new savearea to the switch to routine */ + b EatRupt /* Start 'er up... */ + +; +; Handle machine check here. +; +; ? +; +MachineCheck: + lwz r27,savesrr1(r13) ; ? + rlwinm. r11,r27,0,dcmck,dcmck ; ? + beq+ notDCache ; ? + + mfspr r11,msscr0 ; ? + dssall ; ? + sync + + lwz r27,savesrr1(r13) ; ? + +hiccup: cmplw r27,r27 ; ? + bne- hiccup ; ? + isync ; ? + + oris r11,r11,hi16(dl1hwfm) ; ? + mtspr msscr0,r11 ; ? + +rstbsy: mfspr r11,msscr0 ; ? + + rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? + bne rstbsy ; ? + + sync ; ? + + li r11,T_IN_VAIN ; ? + b EatRupt ; ? + + +notDCache: +; +; Check if the failure was in +; ml_probe_read. If so, this is expected, so modify the PC to +; ml_proble_read_mck and then eat the exception. +; + lwz r30,savesrr0(r13) ; Get the failing PC + lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part + lis r27,hi16(EXT(ml_probe_read)) ; High order part + ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part + ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part + cmplw r30,r28 ; Check highest possible + cmplw cr1,r30,r27 ; Check lowest + bge- PassUp ; Outside of range + blt- cr1,PassUp ; Outside of range +; +; We need to fix up the BATs here because the probe +; routine messed them all up... As long as we are at it, +; fix up to return directly to caller of probe. +; + + lwz r30,saver5(r13) ; Get proper DBAT values + lwz r28,saver6(r13) + lwz r27,saver7(r13) + lwz r11,saver8(r13) + lwz r18,saver9(r13) + + sync + mtdbatu 0,r30 ; Restore DBAT 0 high + mtdbatl 0,r28 ; Restore DBAT 0 low + mtdbatu 1,r27 ; Restore DBAT 1 high + mtdbatu 2,r11 ; Restore DBAT 2 high + mtdbatu 3,r18 ; Restore DBAT 3 high + sync + + lwz r28,savelr(r13) ; Get return point + lwz r27,saver0(r13) ; Get the saved MSR + li r30,0 ; Get a failure RC + stw r28,savesrr0(r13) ; Set the return point + stw r27,savesrr1(r13) ; Set the continued MSR + stw r30,saver3(r13) ; Set return code + li r11,T_IN_VAIN ; Set new interrupt code + b EatRupt ; Yum, yum, eat it all up... + +/* + * Here's where we come back from some instruction emulator. If we come back with + * T_IN_VAIN, the emulation is done and we should just reload state and directly + * go back to the interrupted code. Otherwise, we'll check to see if + * we need to redrive with a different interrupt, i.e., DSI. + */ + + .align 5 + .globl EXT(EmulExit) + +LEXT(EmulExit) + + cmplwi r11,T_IN_VAIN /* Was it emulated? */ + lis r1,hi16(SAVredrive) ; Get redrive request + mfsprg r2,0 ; Restore the per_proc area + beq+ EatRupt /* Yeah, just blast back to the user... */ + lwz r4,SAVflags(r13) ; Pick up the flags + + and. r0,r4,r1 ; Check if redrive requested + andc r4,r4,r1 ; Clear redrive + + beq+ PassUp ; No redrive, just keep on going... + + lwz r3,saveexception(r13) ; Restore exception code + stw r4,SAVflags(r13) ; Set the flags + b Redrive ; Redrive the exception... + +/* Jump into main handler code switching on VM at the same time */ + +/* We assume kernel data is mapped contiguously in physical + * memory, otherwise we'd need to switch on (at least) virtual data. + * SRs are already set up. + */ +PassUp: lis r2,hi16(EXT(exception_handlers)) ; Get exception vector address + ori r2,r2,lo16(EXT(exception_handlers)) ; And low half + lwzx r6,r2,r11 /* Get the actual exception handler address */ + +PassUpDeb: lwz r8,SAVflags(r13) /* Get the flags */ + mtsrr0 r6 /* Set up the handler address */ + oris r8,r8,HIGH_ADDR(SAVattach) /* Since we're passing it up, attach it */ + rlwinm r5,r13,0,0,19 /* Back off to the start of savearea block */ + + mfmsr r3 /* Get our MSR */ + stw r8,SAVflags(r13) /* Pass up the flags */ + rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 /* Clear all but the trace bits */ + li r2,MSR_SUPERVISOR_INT_OFF /* Get our normal MSR value */ + lwz r5,SACvrswap(r5) /* Get real to virtual conversion */ + or r2,r2,r3 /* Keep the trace bits if they're on */ + mr r3,r11 /* Pass the exception code in the paramter reg */ + mtsrr1 r2 /* Set up our normal MSR value */ + xor r4,r13,r5 /* Pass up the virtual address of context savearea */ + + rfi /* Launch the exception handler */ + + .long 0 /* Leave these here gol durn it! */ + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + +/* + * This routine is the only place where we return from an interruption. + * Anyplace else is wrong. Even if I write the code, it's still wrong. + * Feel free to come by and slap me if I do do it--even though I may + * have had a good reason to do it. + * + * All we need to remember here is that R13 must point to the savearea + * that has the context we need to load up. Translation and interruptions + * must be disabled. + * + * This code always loads the context in the savearea pointed to + * by R13. In the process, it throws away the savearea. If there + * is any tomfoolery with savearea stacks, it must be taken care of + * before we get here. + * + * Speaking of tomfoolery, this is where we synthesize interruptions + * if any need to be. + */ + + .align 5 + +EatRupt: mr r31,r13 /* Move the savearea pointer to the far end of the register set */ + +EatRupt2: mfsprg r2,0 /* Get the per_proc block */ + dcbt 0,r31 ; Get this because we need it very soon + +#if TRCSAVE + lwz r30,saver0(r31) ; (TEST/DEBUG) Get users R0 + lwz r20,saveexception(r31) ; (TEST/DEBUG) Returning from trace? + xor r30,r20,r30 ; (TEST/DEBUG) Make code + rlwinm r30,r30,1,0,31 ; (TEST/DEBUG) Make an easy test + cmplwi cr5,r30,0x61 ; (TEST/DEBUG) See if this is a trace +#endif + +/* + * First we see if we are able to free the new savearea. + * If it is not attached to anything, put it on the free list. + * This is real dangerous, we haven't restored context yet... + * So, the free savearea chain lock must stay until the bitter end! + */ + +/* + * It's dangerous here. We haven't restored anything from the current savearea yet. + * And, we mark it the active one. So, if we get an exception in here, it is + * unrecoverable. Unless we mess up, we can't get any kind of exception. So, + * it is important to assay this code as only the purest of gold. + * + * But first, see if there is a savearea hanging off of quickfret. If so, + * we release that one first and then come back for the other. We should rarely + * see one, they appear when FPU or VMX context is discarded by either returning + * to a higher exception level, or explicitly. + * + * A word about QUICKFRET: Multiple saveareas may be queued for release. It is + * the responsibility of the queuer to insure that the savearea is not multiply + * queued and that the appropriate inuse bits are reset. + */ + + + + mfsprg r27,2 ; Get the processor features + lwz r1,savesrr1(r31) ; Get destination MSR + mtcrf 0x60,r27 ; Set CRs with thermal facilities + mr r18,r31 ; Save the savearea pointer + rlwinm. r0,r1,0,MSR_EE_BIT,MSR_EE_BIT ; Are interruptions going to be enabled? + lwz r19,PP_QUICKFRET(r2) ; Get the quick release savearea + crandc 31,pfThermalb,pfThermIntb ; See if we have both thermometer and not interrupt facility + li r0,0 ; Get a zero + crandc 31,31,cr0_eq ; Factor in enablement + la r21,savesr0(r18) ; Point to the first thing we restore + bf 31,tempisok ; No thermal checking needed... + +; +; We get to here if 1) there is a thermal facility, and 2) the hardware +; will or cannot interrupt, and 3) the interrupt will be enabled after this point. +; + + mfspr r16,thrm3 ; Get thermal 3 + mfspr r14,thrm1 ; Get thermal 2 + rlwinm. r16,r16,0,thrme,thrme ; Is the themometer enabled? + mfspr r15,thrm2 ; Get thermal 2 + beq- tempisok ; No thermometer... + rlwinm r16,r14,2,28,31 ; Cluster THRM1s TIE, V, TIN, and TIV at bottom 4 bits + srawi r0,r15,31 ; Make a mask of 1s if temprature over + rlwinm r30,r15,2,28,31 ; Cluster THRM2s TIE, V, TIN, and TIV at bottom 4 bits +; +; Note that the following compare check that V, TIN, and TIV are set and that TIE is cleared. +; This insures that we only emulate when the hardware is not set to interrupt. +; + cmplwi cr0,r16,7 ; Is there a valid pending interruption for THRM1? + cmplwi cr1,r30,7 ; Is there a valid pending interruption for THRM2? + and r15,r15,r0 ; Keep high temp if that interrupted, zero if not + cror cr0_eq,cr0_eq,cr1_eq ; Merge both + andc r14,r14,r0 ; Keep low if high did not interrupt, zero if it did + bne+ tempisok ; Nope, temprature is in range + + li r3,T_THERMAL ; Time to emulate a thermal interruption + or r14,r14,r15 ; Get contents of interrupting register + mr r13,r31 ; Make sure savearea is pointed to correctly + stw r3,saveexception(r31) ; Restore exception code + stw r14,savedar(r31) ; Set the contents of the interrupting register into the dar + b Redrive ; Go process this new interruption... + + +tempisok: lis r30,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ + stw r0,PP_QUICKFRET(r2) /* Clear quickfret pointer */ + ori r30,r30,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ + dcbt 0,r21 /* Touch in the first thing */ + +#if TRCSAVE + beq- cr5,trkill0 ; (TEST/DEBUG) Do not trace this type + lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask + mr. r14,r14 ; (TEST/DEBUG) Is it stopped? + beq- trkill0 ; (TEST/DEBUG) yes... + bl cte ; (TEST/DEBUG) Trace this + stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea + stw r19,LTR_r2(r20) ; (TEST/DEBUG) Quickfret savearea +trkill0: +#endif + +rtlck: lwarx r22,0,r30 /* Grab the lock value */ + li r23,1 /* Use part of the delay time */ + mr. r22,r22 /* Is it locked? */ + bne- rtlcks /* Yeah, wait for it to clear... */ + stwcx. r23,0,r30 /* Try to seize that there durn lock */ + beq+ fretagain ; Got it... + b rtlck /* Collision, try again... */ + +rtlcks: lwz r22,SVlock(r30) /* Get that lock in here */ + mr. r22,r22 /* Is it free yet? */ + beq+ rtlck /* Yeah, try for it again... */ + b rtlcks /* Sniff away... */ + +; +; Lock gotten, toss the saveareas +; +fretagain: +#if TRCSAVE + beq- cr5,trkill1 ; (TEST/DEBUG) Do not trace this type + lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask + mr. r14,r14 ; (TEST/DEBUG) Is it stopped? + beq- trkill1 ; (TEST/DEBUG) yes... + li r0,1 ; (TEST/DEBUG) ID number + bl cte ; (TEST/DEBUG) Trace this + stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea + stw r19,LTR_r2(r20) ; (TEST/DEBUG) Quickfret savearea +trkill1: +#endif + + mr. r18,r18 ; Are we actually done here? + beq- donefret ; Yeah... + mr. r31,r19 ; Is there a quickfret to do? + beq+ noqfrt ; Nope... + lwz r19,SAVqfret(r19) ; Yes, get the next in line +#if TRCSAVE + beq- cr5,trkill2 ; (TEST/DEBUG) Do not trace this type + lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask + mr. r14,r14 ; (TEST/DEBUG) Is it stopped? + beq- trkill2 ; (TEST/DEBUG) yes... + li r0,2 ; (TEST/DEBUG) ID number + bl cte ; (TEST/DEBUG) Trace this + stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea + stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea + stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss +trkill2: +#endif + b doqfrt ; Go do it... + +noqfrt: mr r31,r18 ; Set the area to release + li r18,0 ; Show we have done it +#if TRCSAVE + beq- cr5,trkill3 ; (TEST/DEBUG) Do not trace this type + lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask + mr. r14,r14 ; (TEST/DEBUG) Is it stopped? + beq- trkill3 ; (TEST/DEBUG) yes... + li r0,3 ; (TEST/DEBUG) ID number + bl cte ; (TEST/DEBUG) Trace this + stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea + stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea + stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss +trkill3: +#endif + +doqfrt: li r0,0 ; Get a constant 0 + lis r26,0x8000 /* Build a bit mask and assume first savearea */ + stw r0,SAVqfret(r31) ; Make sure back chain is unlinked + lwz r28,SAVflags(r31) ; Get the flags for the old active one +#if TRCSAVE + beq- cr5,trkill4 ; (TEST/DEBUG) Do not trace this type + lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask + mr. r14,r14 ; (TEST/DEBUG) Is it stopped? + beq- trkill4 ; (TEST/DEBUG) yes... + li r0,4 ; (TEST/DEBUG) ID number + bl cte ; (TEST/DEBUG) Trace this + stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea + stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea + stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss + stw r28,LTR_r4(r20) ; (TEST/DEBUG) Save current flags +trkill4: +#endif + rlwinm r25,r31,21,31,31 /* Get position of savearea in block */ + andis. r28,r28,HIGH_ADDR(SAVinuse) /* See if we need to free it */ + srw r26,r26,r25 /* Get bit position to deallocate */ + rlwinm r29,r31,0,0,19 /* Round savearea pointer to even page address */ + + bne- fretagain /* Still in use, we can't free this one... */ + + lwz r23,SACalloc(r29) /* Get the allocation for this block */ + lwz r24,SVinuse(r30) /* Get the in use count */ + mr r28,r23 ; (TEST/DEBUG) save for trace + or r23,r23,r26 /* Turn on our bit */ + subi r24,r24,1 /* Show that this one is free */ + cmplw r23,r26 /* Is our's the only one free? */ + stw r23,SACalloc(r29) /* Save it out */ + bne+ rstrest /* Nope, then the block is already on the free list */ + + lwz r22,SVfree(r30) /* Get the old head of the free list */ + stw r29,SVfree(r30) /* Point the head at us now */ + stw r22,SACnext(r29) ; Point us to the old last + +rstrest: stw r24,SVinuse(r30) /* Set the in use count */ +#if TRCSAVE + beq- cr5,trkill5 ; (TEST/DEBUG) Do not trace this type + lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask + mr. r14,r14 ; (TEST/DEBUG) Is it stopped? + beq- trkill5 ; (TEST/DEBUG) yes... + li r0,5 ; (TEST/DEBUG) ID number + bl cte ; (TEST/DEBUG) Trace this + stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea + stw r19,LTR_r2(r20) ; (TEST/DEBUG) Next quickfret savearea + stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss + stw r28,LTR_srr1(r20) ; (TEST/DEBUG) Save the original allocation + stw r23,LTR_dar(r20) ; (TEST/DEBUG) Save the new allocation + stw r24,LTR_save(r20) ; (TEST/DEBUG) Save the new in use count + stw r22,LTR_lr(r20) ; (TEST/DEBUG) Save the old top of free list + stw r29,LTR_ctr(r20) ; (TEST/DEBUG) Save the new top of free list +trkill5: +#endif + b fretagain ; Go finish up the rest... + +; +; Build the SR values depending upon destination. If we are going to the kernel, +; the SRs are almost all the way set up. SR14 (or the currently used copyin/out register) +; must be set to whatever it was at the last exception because it varies. All the rest +; have been set up already. +; +; If we are going into user space, we need to check a bit more. SR0, SR1, SR2, and +; SR14 (current implementation) must be restored always. The others must be set if +; they are different that what was loaded last time (i.e., tasks have switched). +; We check the last loaded address space ID and if the same, we skip the loads. +; This is a performance gain because SR manipulations are slow. +; + + .align 5 + +donefret: lwz r26,savesrr1(r31) ; Get destination state flags + lwz r7,PP_USERPMAP(r2) ; Pick up the user pmap we may launch + cmplw cr3,r14,r14 ; Set that we do not need to stop streams + rlwinm. r17,r26,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are going to user or system + li r14,PMAP_SEGS ; Point to segments + bne+ gotouser ; We are going into user state... + + lwz r14,savesr14(r31) ; Get the copyin/out register at interrupt time + mtsr sr14,r14 ; Set SR14 + b segsdone ; We are all set up now... + + .align 5 + +gotouser: dcbt r14,r7 ; Touch the segment register contents + lwz r16,PP_LASTPMAP(r7) ; Pick up the last loaded pmap + addi r14,r14,32 ; Second half of pmap segments + lwz r13,PMAP_VFLAGS(r7) ; Get the flags + lwz r15,PMAP_SPACE(r7) ; Get the primary space + dcbt r14,r7 ; Touch second page + mtcrf 0x0F,r13 ; Set CRs to correspond to the subordinate spaces + oris r15,r15,hi16(SEG_REG_PROT) ; Set segment 0 SR value + lhz r9,PP_CPU_FLAGS(r2) ; Get the processor flags + + addis r13,r15,0x0000 ; Get SR0 value + bf 16,nlsr0 ; No alternate here... + lwz r13,PMAP_SEGS+(0*4)(r7) ; Get SR0 value + +nlsr0: mtsr sr0,r13 ; Load up the SR + rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on + + addis r13,r15,0x0010 ; Get SR1 value + bf 17,nlsr1 ; No alternate here... + lwz r13,PMAP_SEGS+(1*4)(r7) ; Get SR1 value + +nlsr1: mtsr sr1,r13 ; Load up the SR + or r26,r26,r9 ; Flip on the BE bit for special trace if needed + + cmplw cr3,r7,r16 ; Are we running the same segs as last time? + + addis r13,r15,0x0020 ; Get SR2 value + bf 18,nlsr2 ; No alternate here... + lwz r13,PMAP_SEGS+(2*4)(r7) ; Get SR2 value + +nlsr2: mtsr sr2,r13 ; Load up the SR + + addis r13,r15,0x0030 ; Get SR3 value + bf 19,nlsr3 ; No alternate here... + lwz r13,PMAP_SEGS+(3*4)(r7) ; Get SR3 value + +nlsr3: mtsr sr3,r13 ; Load up the SR + + addis r13,r15,0x00E0 ; Get SR14 value + bf 30,nlsr14 ; No alternate here... + lwz r13,PMAP_SEGS+(14*4)(r7) ; Get SR14 value + +nlsr14: mtsr sr14,r13 ; Load up the SR + + beq+ segsdone ; All done if same pmap as last time... + + addis r13,r15,0x0040 ; Get SR4 value + bf 20,nlsr4 ; No alternate here... + lwz r13,PMAP_SEGS+(4*4)(r7) ; Get SR4 value + +nlsr4: mtsr sr4,r13 ; Load up the SR + + addis r13,r15,0x0050 ; Get SR5 value + bf 21,nlsr5 ; No alternate here... + lwz r13,PMAP_SEGS+(5*4)(r7) ; Get SR5 value + +nlsr5: mtsr sr5,r13 ; Load up the SR + + addis r13,r15,0x0060 ; Get SR6 value + bf 22,nlsr6 ; No alternate here... + lwz r13,PMAP_SEGS+(6*4)(r7) ; Get SR6 value + +nlsr6: mtsr sr6,r13 ; Load up the SR + + addis r13,r15,0x0070 ; Get SR7 value + bf 23,nlsr7 ; No alternate here... + lwz r13,PMAP_SEGS+(7*4)(r7) ; Get SR7 value + +nlsr7: mtsr sr7,r13 ; Load up the SR + + addis r13,r15,0x0080 ; Get SR8 value + bf 24,nlsr8 ; No alternate here... + lwz r13,PMAP_SEGS+(8*4)(r7) ; Get SR8 value + +nlsr8: mtsr sr8,r13 ; Load up the SR + + addis r13,r15,0x0090 ; Get SR9 value + bf 25,nlsr9 ; No alternate here... + lwz r13,PMAP_SEGS+(9*4)(r7) ; Get SR9 value + +nlsr9: mtsr sr9,r13 ; Load up the SR + + addis r13,r15,0x00A0 ; Get SR10 value + bf 26,nlsr10 ; No alternate here... + lwz r13,PMAP_SEGS+(10*4)(r7) ; Get SR10 value + +nlsr10: mtsr sr10,r13 ; Load up the SR + + addis r13,r15,0x00B0 ; Get SR11 value + bf 27,nlsr11 ; No alternate here... + lwz r13,PMAP_SEGS+(11*4)(r7) ; Get SR11 value + +nlsr11: mtsr sr11,r13 ; Load up the SR + + addis r13,r15,0x00C0 ; Get SR12 value + bf 28,nlsr12 ; No alternate here... + lwz r13,PMAP_SEGS+(12*4)(r7) ; Get SR12 value + +nlsr12: mtsr sr12,r13 ; Load up the SR + + addis r13,r15,0x00D0 ; Get SR13 value + bf 29,nlsr13 ; No alternate here... + lwz r13,PMAP_SEGS+(13*4)(r7) ; Get SR13 value + +nlsr13: mtsr sr13,r13 ; Load up the SR + + addis r13,r15,0x00F0 ; Get SR15 value + bf 31,nlsr15 ; No alternate here... + lwz r13,PMAP_SEGS+(15*4)(r7) ; Get SR15 value + +nlsr15: mtsr sr15,r13 ; Load up the SR + +segsdone: li r1,emfp0 ; Point to the fp savearea + lwz r25,savesrr0(r31) ; Get the SRR0 to use + la r28,saver6(r31) /* Point to the next line to use */ + dcbt r1,r2 ; Start moving in a work area + lwz r0,saver0(r31) /* Restore */ + dcbt 0,r28 /* Touch it in */ + mr r29,r2 ; Save the per_proc + lwz r1,saver1(r31) /* Restore */ + lwz r2,saver2(r31) /* Restore */ + la r28,saver14(r31) /* Point to the next line to get */ + lwz r3,saver3(r31) /* Restore */ + mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7) + lwz r4,saver4(r31) /* Restore */ + mtsrr0 r25 /* Restore the SRR0 now */ + lwz r5,saver5(r31) /* Restore */ + mtsrr1 r26 /* Restore the SRR1 now */ + lwz r6,saver6(r31) /* Restore */ + + dcbt 0,r28 /* Touch that next line on in */ + la r28,savexfpscrpad(r31) ; Point to the saved fpscr + + lwz r7,saver7(r31) /* Restore */ + dcbt 0,r28 ; Touch saved fpscr + lwz r8,saver8(r31) /* Restore */ + lwz r9,saver9(r31) /* Restore */ + lwz r10,saver10(r31) /* Restore */ + lwz r11,saver11(r31) /* Restore */ + lwz r12,saver12(r31) /* Restore */ + lwz r13,saver13(r31) /* Restore */ + la r28,saver22(r31) /* Point to the next line to do */ + lwz r14,saver14(r31) /* Restore */ + lwz r15,saver15(r31) /* Restore */ + +; +; Note that floating point will be enabled from here on until the RFI +; + + bf- pfFloatb,nofphere ; Skip if no floating point... + mfmsr r27 ; Save the MSR + ori r27,r27,lo16(MASK(MSR_FP)) ; Enable floating point + mtmsr r27 ; Really enable + isync + stfd f0,emfp0(r29) ; Save FP0 + lfd f0,savexfpscrpad(r31) ; Get the fpscr + mtfsf 0xFF,f0 ; Restore fpscr + lfd f0,emfp0(r29) ; Restore the used register + +nofphere: dcbt 0,r28 /* Touch in another line of context */ + + lwz r16,saver16(r31) /* Restore */ + lwz r17,saver17(r31) /* Restore */ + lwz r18,saver18(r31) /* Restore */ + lwz r19,saver19(r31) /* Restore */ + lwz r20,saver20(r31) /* Restore */ + lwz r21,saver21(r31) /* Restore */ + la r28,saver30(r31) /* Point to the final line */ + lwz r22,saver22(r31) /* Restore */ + + dcbt 0,r28 /* Suck it in */ + + lwz r23,saver23(r31) /* Restore */ + lwz r24,saver24(r31) /* Restore */ + lwz r25,saver25(r31) /* Restore */ + lwz r26,saver26(r31) /* Restore */ + lwz r27,saver27(r31) /* Restore */ + + lwz r28,savecr(r31) /* Get CR to restore */ + bf pfAltivecb,noavec4 ; No vector on this machine + lwz r29,savevrsave(r31) ; Get the vrsave + beq+ cr3,noavec3 ; SRs have not changed, no need to stop the streams... + dssall ; Kill all data streams + ; The streams should be suspended + ; already, and we do a bunch of + ; dependent loads and a sync later + ; so we should be cool. + +noavec3: mtspr vrsave,r29 ; Set the vrsave + +noavec4: lwz r29,savexer(r31) /* Get XER to restore */ + mtcr r28 /* Restore the CR */ + lwz r28,savelr(r31) /* Get LR to restore */ + mtxer r29 /* Restore the XER */ + lwz r29,savectr(r31) /* Get the CTR to restore */ + mtlr r28 /* Restore the LR */ + lwz r28,saver30(r31) /* Restore */ + mtctr r29 /* Restore the CTR */ + lwz r29,saver31(r31) /* Restore */ + mtsprg 2,r28 /* Save R30 */ + lwz r28,saver28(r31) /* Restore */ + mtsprg 3,r29 /* Save R31 */ + lwz r29,saver29(r31) /* Restore */ + +#if PERFTIMES && DEBUG + stmw r1,0x280(br0) ; Save all registers + mfcr r20 ; Save the CR + mflr r21 ; Save the LR + mfsrr0 r9 ; Save SRR0 + mfsrr1 r11 ; Save SRR1 + mr r8,r0 ; Save R0 + li r3,69 ; Indicate interrupt + mr r4,r11 ; Set MSR to log + mr r5,r31 ; Get savearea to log + bl EXT(dbgLog2) ; Cut log entry + mr r0,r8 ; Restore R0 + mtsrr0 r9 ; Restore SRR0 + mtsrr1 r11 ; Restore SRR1 + mtlr r21 ; Restore the LR + mtcr r20 ; Restore the CR + lmw r1,0x280(br0) ; Restore all the rest +#endif + + li r31,0 /* Get set to clear lock */ + sync /* Make sure it's all out there */ + stw r31,SVlock(r30) /* Unlock it */ + mfsprg r30,2 /* Restore R30 */ + mfsprg r31,0 ; Get per_proc + lwz r31,pfAvailable(r31) ; Get the feature flags + mtsprg 2,r31 ; Set the feature flags + mfsprg r31,3 /* Restore R31 */ + + rfi /* Click heels three times and think very hard that there's no place like home */ + + .long 0 /* For old 601 bug */ + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + .long 0 + + + + +/* + * exception_exit(savearea *) + * + * + * ENTRY : IR and/or DR and/or interruptions can be on + * R3 points to the physical address of a savearea + */ + + .align 5 + .globl EXT(exception_exit) + +LEXT(exception_exit) + + mfsprg r29,2 ; Get feature flags + mfmsr r30 /* Get the current MSR */ + mtcrf 0x04,r29 ; Set the features + mr r31,r3 /* Get the savearea in the right register */ + andi. r30,r30,0x7FCF /* Turn off externals, IR, and DR */ + lis r1,hi16(SAVredrive) ; Get redrive request + + bt pfNoMSRirb,eeNoMSR ; No MSR... + + mtmsr r30 ; Translation and all off + isync ; Toss prefetch + b eeNoMSRx + +eeNoMSR: li r0,loadMSR ; Get the MSR setter SC + mr r3,r30 ; Get new MSR + sc ; Set it + +eeNoMSRx: + mfsprg r2,0 ; Get the per_proc block + lwz r4,SAVflags(r31) ; Pick up the flags + mr r13,r31 ; Put savearea here also + + and. r0,r4,r1 ; Check if redrive requested + andc r4,r4,r1 ; Clear redrive + + dcbt br0,r2 ; We will need this in just a sec + + beq+ EatRupt ; No redrive, just exit... + + lwz r3,saveexception(r13) ; Restore exception code + stw r4,SAVflags(r13) ; Set the flags + b Redrive ; Redrive the exception... + +; +; Make trace entry for lowmem_vectors internal debug +; +#if TRCSAVE +cte: + lwz r20,LOW_ADDR(EXT(traceCurr)-EXT(ExceptionVectorsStart))(br0) ; Pick up the current trace entry + lwz r16,LOW_ADDR(EXT(traceEnd)-EXT(ExceptionVectorsStart))(br0) ; Grab up the end of it all + addi r17,r20,LTR_size ; Point to the next trace entry + cmplw r17,r16 ; Do we need to wrap the trace table? + li r15,32 ; Second line of entry + bne+ ctenwrap ; We got a trace entry... + lwz r17,LOW_ADDR(EXT(traceStart)-EXT(ExceptionVectorsStart))(br0) ; Wrap back to the top + +ctenwrap: stw r17,LOW_ADDR(EXT(traceCurr)-EXT(ExceptionVectorsStart))(br0) ; Set the next entry for the next guy + + bf- featL1ena,skipz8 ; L1 cache is disabled... + dcbz 0,r20 ; Allocate cache for the entry + dcbz r15,r20 ; Zap the second half +skipz8: + +ctegetTB: mftbu r16 ; Get the upper timebase + mftb r17 ; Get the lower timebase + mftbu r15 ; Get the upper one again + cmplw r16,r15 ; Did the top tick? + bne- ctegetTB ; Yeah, need to get it again... + + li r15,0x111 ; Get the special trace ID code + stw r0,LTR_r0(r20) ; Save R0 (usually used as an ID number + stw r16,LTR_timeHi(r20) ; Set the upper part of TB + mflr r16 ; Get the return point + stw r17,LTR_timeLo(r20) ; Set the lower part of TB + sth r15,LTR_excpt(r20) ; Save the exception type + stw r16,LTR_srr0(r20) ; Save the return point + blr ; Leave... +#endif + +/* + * Start of the trace table + */ + + .align 12 /* Align to 4k boundary */ + + .globl EXT(traceTableBeg) +EXT(traceTableBeg): /* Start of trace table */ +/* .fill 2048,4,0 Make an 8k trace table for now */ + .fill 13760,4,0 /* Make an .trace table for now */ +/* .fill 240000,4,0 Make an .trace table for now */ + .globl EXT(traceTableEnd) +EXT(traceTableEnd): /* End of trace table */ + + .globl EXT(ExceptionVectorsEnd) +EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */ +#ifndef HACKALERTHACKALERT +/* + * This .long needs to be here because the linker gets confused and tries to + * include the final label in a section in the next section if there is nothing + * after it + */ + .long 0 /* (HACK/HACK/HACK) */ +#endif + + .data + .align ALIGN + .globl EXT(exception_end) +EXT(exception_end): + .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */ + + diff --git a/osfmk/ppc/mach_param.h b/osfmk/ppc/mach_param.h new file mode 100644 index 000000000..49ac015de --- /dev/null +++ b/osfmk/ppc/mach_param.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Machine-dependent parameters for ppc. + */ + +#define HZ (100) /* clock tick each 10 ms. */ diff --git a/osfmk/ppc/machine_cpu.h b/osfmk/ppc/machine_cpu.h new file mode 100644 index 000000000..98dd13a77 --- /dev/null +++ b/osfmk/ppc/machine_cpu.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PPC_MACHINE_CPU_H_ +#define _PPC_MACHINE_CPU_H_ + +#include +#include +#include +#include + +void cpu_machine_init( + void); + +kern_return_t cpu_register( + int *); + +kern_return_t cpu_start( + int); + +void cpu_doshutdown( + void); + +void cpu_sleep( + void); + +void cpu_signal_handler( + void); + +#endif /* _PPC_MACHINE_CPU_H_ */ diff --git a/osfmk/ppc/machine_routines.c b/osfmk/ppc/machine_routines.c new file mode 100644 index 000000000..8333d637c --- /dev/null +++ b/osfmk/ppc/machine_routines.c @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +boolean_t get_interrupts_enabled(void); +extern boolean_t set_interrupts_enabled(boolean_t); + +/* Map memory map IO space */ +vm_offset_t +ml_io_map( + vm_offset_t phys_addr, + vm_size_t size) +{ + return(io_map(phys_addr,size)); +} + +/* static memory allocation */ +vm_offset_t +ml_static_malloc( + vm_size_t size) +{ + extern vm_offset_t static_memory_end; + extern boolean_t pmap_initialized; + vm_offset_t vaddr; + + if (pmap_initialized) + return((vm_offset_t)NULL); + else { + vaddr = static_memory_end; + static_memory_end = round_page(vaddr+size); + return(vaddr); + } +} + +vm_offset_t +ml_static_ptovirt( + vm_offset_t paddr) +{ + extern vm_offset_t static_memory_end; + vm_offset_t vaddr; + + /* Static memory is map V=R */ + vaddr = paddr; + if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) ) + return(vaddr); + else + return((vm_offset_t)NULL); +} + +void +ml_static_mfree( + vm_offset_t vaddr, + vm_size_t size) +{ + vm_offset_t paddr_cur, vaddr_cur; + + for (vaddr_cur = round_page(vaddr); + vaddr_cur < trunc_page(vaddr+size); + vaddr_cur += PAGE_SIZE) { + paddr_cur = pmap_extract(kernel_pmap, vaddr_cur); + if (paddr_cur != (vm_offset_t)NULL) { + vm_page_wire_count--; + pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE); + vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE); + } + } +} + +/* virtual to physical on wired pages */ +vm_offset_t ml_vtophys( + vm_offset_t vaddr) +{ + return(pmap_extract(kernel_pmap, vaddr)); +} + +/* Initialize Interrupt Handler */ +void ml_install_interrupt_handler( + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + int current_cpu; + boolean_t current_state; + + current_cpu = cpu_number(); + current_state = ml_get_interrupts_enabled(); + + per_proc_info[current_cpu].interrupt_nub = nub; + per_proc_info[current_cpu].interrupt_source = source; + per_proc_info[current_cpu].interrupt_target = target; + per_proc_info[current_cpu].interrupt_handler = handler; + per_proc_info[current_cpu].interrupt_refCon = refCon; + + per_proc_info[current_cpu].get_interrupts_enabled + = get_interrupts_enabled; + per_proc_info[current_cpu].set_interrupts_enabled + = set_interrupts_enabled; + (void) ml_set_interrupts_enabled(current_state); +} + +/* Initialize Interrupts */ +void ml_init_interrupt(void) +{ + int current_cpu; + boolean_t current_state; + + current_state = ml_get_interrupts_enabled(); + + current_cpu = cpu_number(); + per_proc_info[current_cpu].get_interrupts_enabled + = get_interrupts_enabled; + per_proc_info[current_cpu].set_interrupts_enabled + = set_interrupts_enabled; + (void) ml_set_interrupts_enabled(current_state); +} + +boolean_t fake_get_interrupts_enabled(void) +{ + /* + * The scheduler is not active on this cpu. There is no need to disable + * preemption. The current thread wont be dispatched on anhother cpu. + */ + return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0); +} + +boolean_t fake_set_interrupts_enabled(boolean_t enable) +{ + boolean_t interrupt_state_prev; + + /* + * The scheduler is not active on this cpu. There is no need to disable + * preemption. The current thread wont be dispatched on anhother cpu. + */ + interrupt_state_prev = + (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0; + if (interrupt_state_prev != enable) + per_proc_info[cpu_number()].cpu_flags ^= turnEEon; + return(interrupt_state_prev); +} + +/* Get Interrupts Enabled */ +boolean_t ml_get_interrupts_enabled(void) +{ + return(per_proc_info[cpu_number()].get_interrupts_enabled()); +} + +boolean_t get_interrupts_enabled(void) +{ + return((mfmsr() & MASK(MSR_EE)) != 0); +} + +/* Set Interrupts Enabled */ +boolean_t ml_set_interrupts_enabled(boolean_t enable) +{ + return(per_proc_info[cpu_number()].set_interrupts_enabled(enable)); +} + +/* Check if running at interrupt context */ +boolean_t ml_at_interrupt_context(void) +{ + /* + * If running at interrupt context, the current thread won't be + * dispatched on another cpu. There is no need to turn off preemption. + */ + return (per_proc_info[cpu_number()].istackptr == 0); +} + +/* Generate a fake interrupt */ +void ml_cause_interrupt(void) +{ + CreateFakeIO(); +} + +void machine_clock_assist(void) +{ + if (per_proc_info[cpu_number()].get_interrupts_enabled == fake_get_interrupts_enabled) + CreateFakeDEC(); +} + +void machine_idle(void) +{ + if (per_proc_info[cpu_number()].get_interrupts_enabled != fake_get_interrupts_enabled) { + int cur_decr; + + machine_idle_ppc(); + + /* + * protect against a lost decrementer trap + * if the current decrementer value is negative + * by more than 10 ticks, re-arm it since it's + * unlikely to fire at this point... a hardware + * interrupt got us out of machine_idle and may + * also be contributing to this state + */ + cur_decr = isync_mfdec(); + + if (cur_decr < -10) { + mtdec(1); + } + } +} + +void +machine_signal_idle( + processor_t processor) +{ + (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0); +} + +kern_return_t +ml_processor_register( + ml_processor_info_t *processor_info, + processor_t *processor, + ipi_handler_t *ipi_handler) +{ + kern_return_t ret; + int target_cpu; + + if (processor_info->boot_cpu == FALSE) { + if (cpu_register(&target_cpu) != KERN_SUCCESS) + return KERN_FAILURE; + } else { + /* boot_cpu is always 0 */ + target_cpu= 0; + } + + per_proc_info[target_cpu].cpu_id = processor_info->cpu_id; + per_proc_info[target_cpu].start_paddr = processor_info->start_paddr; + + if(per_proc_info[target_cpu].pf.Available & pfCanNap) + if(processor_info->supports_nap) + per_proc_info[target_cpu].pf.Available |= pfWillNap; + + if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL) + per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable; + else + per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL; + + if(target_cpu == cpu_number()) + __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */ + + *processor = cpu_to_processor(target_cpu); + *ipi_handler = cpu_signal_handler; + + return KERN_SUCCESS; +} + +boolean_t +ml_enable_nap(int target_cpu, boolean_t nap_enabled) +{ + boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap); + + if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */ + if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */ + else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */ + } + + if(target_cpu == cpu_number()) + __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */ + + return (prev_value); +} + +void +ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info) +{ + if (cpu_info == 0) return; + + cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0; + cpu_info->cache_line_size = per_proc_info[0].pf.lineSize; + cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize; + cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize; + + if (per_proc_info[0].pf.Available & pfL2) { + cpu_info->l2_settings = per_proc_info[0].pf.l2cr; + cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size; + } else { + cpu_info->l2_settings = 0; + cpu_info->l2_cache_size = 0xFFFFFFFF; + } + if (per_proc_info[0].pf.Available & pfL3) { + cpu_info->l3_settings = per_proc_info[0].pf.l3cr; + cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size; + } else { + cpu_info->l3_settings = 0; + cpu_info->l3_cache_size = 0xFFFFFFFF; + } +} + +void +init_ast_check(processor_t processor) +{} + +void +cause_ast_check(processor_t processor) +{ + if ((processor != current_processor()) + && (per_proc_info[processor->slot_num].get_interrupts_enabled + != fake_get_interrupts_enabled)) + cpu_signal(processor->slot_num, SIGPast, NULL, NULL); +} + +thread_t +switch_to_shutdown_context( + thread_t thread, + void (*doshutdown)(processor_t), + processor_t processor) +{ + disable_preemption(); + CreateShutdownCTX(); + enable_preemption(); + return((thread_t)(per_proc_info[cpu_number()].old_thread)); +} + +int +set_be_bit() +{ + + int mycpu; + boolean_t current_state; + + current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */ + mycpu = cpu_number(); + per_proc_info[mycpu].cpu_flags |= traceBE; + (void) ml_set_interrupts_enabled(current_state); + return(1); +} + +int +clr_be_bit() +{ + int mycpu; + boolean_t current_state; + + current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */ + mycpu = cpu_number(); + per_proc_info[mycpu].cpu_flags &= ~traceBE; + (void) ml_set_interrupts_enabled(current_state); + return(1); +} + +int +be_tracing() +{ + int mycpu = cpu_number(); + return(per_proc_info[mycpu].cpu_flags & traceBE); +} diff --git a/osfmk/ppc/machine_routines.h b/osfmk/ppc/machine_routines.h new file mode 100644 index 000000000..6dc2d05dd --- /dev/null +++ b/osfmk/ppc/machine_routines.h @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_MACHINE_ROUTINES_H_ +#define _PPC_MACHINE_ROUTINES_H_ + +#include +#include +#include +#include + + +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) +/* IO memory map services */ + +/* Map memory map IO space */ +vm_offset_t ml_io_map( + vm_offset_t phys_addr, + vm_size_t size); + +/* boot memory allocation */ +vm_offset_t ml_static_malloc( + vm_size_t size); +#endif + +vm_offset_t +ml_static_ptovirt( + vm_offset_t); + +void ml_static_mfree( + vm_offset_t, + vm_size_t); + +/* virtual to physical on wired pages */ +vm_offset_t ml_vtophys( + vm_offset_t vaddr); + +/* Init Interrupts */ +void ml_install_interrupt_handler( + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon); + +#ifdef MACH_KERNEL_PRIVATE +void ml_init_interrupt(void); + +boolean_t fake_get_interrupts_enabled(void); + +boolean_t fake_set_interrupts_enabled( + boolean_t enable); +#endif + +/* Get Interrupts Enabled */ +boolean_t ml_get_interrupts_enabled(void); + +/* Set Interrupts Enabled */ +boolean_t ml_set_interrupts_enabled(boolean_t enable); + +/* Check if running at interrupt context */ +boolean_t ml_at_interrupt_context(void); + +/* Generate a fake interrupt */ +void ml_cause_interrupt(void); + +#ifdef MACH_KERNEL_PRIVATE +/* check pending timers */ +void machine_clock_assist(void); + +void machine_idle(void); + +void machine_signal_idle( + processor_t processor); +#endif + +/* PCI config cycle probing */ +boolean_t ml_probe_read( + vm_offset_t paddr, + unsigned int *val); + +/* Read physical address byte */ +unsigned int ml_phys_read_byte( + vm_offset_t paddr); + +/* Read physical address */ +unsigned int ml_phys_read( + vm_offset_t paddr); + +/* Write physical address byte */ +void ml_phys_write_byte( + vm_offset_t paddr, unsigned int data); + +/* Write physical address */ +void ml_phys_write( + vm_offset_t paddr, unsigned int data); + +/* Type for the IPI Hander */ +typedef void (*ipi_handler_t)(void); + +/* Type for the Time Base Enable function */ +typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); + +/* Struct for ml_processor_register */ +struct ml_processor_info_t { + cpu_id_t cpu_id; + boolean_t boot_cpu; + vm_offset_t start_paddr; + boolean_t supports_nap; + unsigned long l2cr_value; + time_base_enable_t time_base_enable; +}; + +typedef struct ml_processor_info_t ml_processor_info_t; + +/* Register a processor */ +kern_return_t ml_processor_register( + ml_processor_info_t *processor_info, + processor_t *processor, + ipi_handler_t *ipi_handler); + +/* enables (or disables) the processor nap mode the function returns the previous value*/ +boolean_t ml_enable_nap( + int target_cpu, + boolean_t nap_enabled); + +/* Put the processor to sleep */ +void ml_ppc_sleep(void); + +/* Struct for ml_ppc_get_cpu_info */ +struct ml_ppc_cpu_info_t { + unsigned long vector_unit; + unsigned long cache_line_size; + unsigned long l1_icache_size; + unsigned long l1_dcache_size; + unsigned long l2_settings; + unsigned long l2_cache_size; + unsigned long l3_settings; + unsigned long l3_cache_size; +}; + +typedef struct ml_ppc_cpu_info_t ml_ppc_cpu_info_t; + +/* Get processor info */ +void ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info); + +#ifdef MACH_KERNEL_PRIVATE +void cacheInit(void); + +void cacheDisable(void); + +void ml_thrm_init(void); +unsigned int ml_read_temp(void); + +void ml_thrm_set( + unsigned int low, + unsigned int high); + +unsigned int ml_throttle( + unsigned int); +#endif + +void ml_get_timebase(unsigned long long *timstamp); +void ml_sense__nmi(void); + +#endif /* _PPC_MACHINE_ROUTINES_H_ */ diff --git a/osfmk/ppc/machine_routines_asm.s b/osfmk/ppc/machine_routines_asm.s new file mode 100644 index 000000000..48e918762 --- /dev/null +++ b/osfmk/ppc/machine_routines_asm.s @@ -0,0 +1,1123 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include + +/* PCI config cycle probing + * + * boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val) + * + * Read the memory location at physical address paddr. + * This is a part of a device probe, so there is a good chance we will + * have a machine check here. So we have to be able to handle that. + * We assume that machine checks are enabled both in MSR and HIDs + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_probe_read) + +LEXT(ml_probe_read) + + mfsprg r9,2 ; Get feature flags + mfmsr r0 ; Save the current MSR + neg r10,r3 ; Number of bytes to end of page + rlwinm r2,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions + rlwinm. r10,r10,0,20,31 ; Clear excess junk and test for page bndry + mr r12,r3 ; Save the load address + mtcrf 0x04,r9 ; Set the features + cmplwi cr1,r10,4 ; At least 4 bytes left in page? + rlwinm r2,r2,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Clear translation + beq- mprdoit ; We are right on the boundary... + li r3,0 + bltlr- cr1 ; No, just return failure... + +mprdoit: + + bt pfNoMSRirb,mprNoMSR ; No MSR... + + mtmsr r2 ; Translation and all off + isync ; Toss prefetch + b mprNoMSRx + +mprNoMSR: + mr r5,r0 + li r0,loadMSR ; Get the MSR setter SC + mr r3,r2 ; Get new MSR + sc ; Set it + mr r0,r5 + li r3,0 +mprNoMSRx: + +; +; We need to insure that there is no more than 1 BAT register that +; can get a hit. There could be repercussions beyond the ken +; of mortal man. It is best not to tempt fate. +; + li r10,0 ; Clear a register + mfdbatu r5,0 ; Save DBAT 0 high + mfdbatl r6,0 ; Save DBAT 0 low + mfdbatu r7,1 ; Save DBAT 1 high + mfdbatu r8,2 ; Save DBAT 2 high + mfdbatu r9,3 ; Save DBAT 3 high + + sync ; Make sure all is well + + mtdbatu 1,r10 ; Invalidate DBAT 1 + mtdbatu 2,r10 ; Invalidate DBAT 2 + mtdbatu 3,r10 ; Invalidate DBAT 3 + + rlwinm r10,r12,0,0,14 ; Round down to a 128k boundary + ori r11,r10,0x32 ; Set uncached, coherent, R/W + ori r10,r10,2 ; Make the upper half (128k, valid supervisor) + mtdbatl 0,r11 ; Set lower BAT first + mtdbatu 0,r10 ; Now the upper + sync ; Just make sure + + ori r11,r2,lo16(MASK(MSR_DR)) ; Turn on data translation + mtmsr r11 ; Do it for real + isync ; Make sure of it + + eieio ; Make sure of all previous accesses + sync ; Make sure it is all caught up + + lwz r11,0(r12) ; Get it and maybe machine check here + + eieio ; Make sure of ordering again + sync ; Get caught up yet again + isync ; Do not go further till we are here + + mtdbatu 0,r5 ; Restore DBAT 0 high + mtdbatl 0,r6 ; Restore DBAT 0 low + mtdbatu 1,r7 ; Restore DBAT 1 high + mtdbatu 2,r8 ; Restore DBAT 2 high + mtdbatu 3,r9 ; Restore DBAT 3 high + sync + + li r3,1 ; We made it + + mtmsr r0 ; Restore translation and exceptions + isync ; Toss speculations + + stw r11,0(r4) ; Save the loaded value + blr ; Return... + +; Force a line boundry here. This means we will be able to check addresses better + .align 5 + .globl EXT(ml_probe_read_mck) +LEXT(ml_probe_read_mck) + +/* Read physical address + * + * unsigned int ml_phys_read_byte(vm_offset_t paddr) + * + * Read the byte at physical address paddr. Memory should not be cache inhibited. + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_phys_read_byte) + +LEXT(ml_phys_read_byte) + + mfmsr r10 ; Save the current MSR + rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions + rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation + + mtmsr r4 ; Translation and all off + isync ; Toss prefetch + + lbz r3,0(r3) ; Get the byte + sync + + mtmsr r10 ; Restore translation and rupts + isync + blr + +/* Read physical address + * + * unsigned int ml_phys_read(vm_offset_t paddr) + * + * Read the word at physical address paddr. Memory should not be cache inhibited. + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_phys_read) + +LEXT(ml_phys_read) + + mfmsr r0 ; Save the current MSR + rlwinm r4,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions + rlwinm r4,r4,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation + + mtmsr r4 ; Translation and all off + isync ; Toss prefetch + + lwz r3,0(r3) ; Get the word + sync + + mtmsr r0 ; Restore translation and rupts + isync + blr + +/* Write physical address byte + * + * void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) + * + * Write the byte at physical address paddr. Memory should not be cache inhibited. + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_phys_write_byte) + +LEXT(ml_phys_write_byte) + + mfmsr r0 ; Save the current MSR + rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions + rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation + + mtmsr r5 ; Translation and all off + isync ; Toss prefetch + + stb r4,0(r3) ; Set the byte + sync + + mtmsr r0 ; Restore translation and rupts + isync + blr + +/* Write physical address + * + * void ml_phys_write(vm_offset_t paddr, unsigned int data) + * + * Write the word at physical address paddr. Memory should not be cache inhibited. + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_phys_write) + +LEXT(ml_phys_write) + + mfmsr r0 ; Save the current MSR + rlwinm r5,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear interruptions + rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear translation + + mtmsr r5 ; Translation and all off + isync ; Toss prefetch + + stw r4,0(r3) ; Set the word + sync + + mtmsr r0 ; Restore translation and rupts + isync + blr + + +/* set interrupts enabled or disabled + * + * boolean_t set_interrupts_enabled(boolean_t enable) + * + * Set EE bit to "enable" and return old value as boolean + */ + +; Force a line boundry here + .align 5 + .globl EXT(set_interrupts_enabled) + +LEXT(set_interrupts_enabled) + + mfmsr r5 ; Get the current MSR + mr r4,r3 ; Save the old value + rlwinm r3,r5,17,31,31 ; Set return value + rlwimi r5,r4,15,16,16 ; Insert new EE bit + andi. r7,r5,lo16(MASK(MSR_EE)) ; Interruptions + bne CheckPreemption +NoPreemption: + mtmsr r5 ; Slam enablement + blr + +CheckPreemption: + mfsprg r7,0 + lwz r8,PP_NEED_AST(r7) + lwz r7,PP_CPU_DATA(r7) + li r6,AST_URGENT + lwz r8,0(r8) + lwz r7,CPU_PREEMPTION_LEVEL(r7) + lis r0,HIGH_ADDR(DoPreemptCall) + and. r8,r8,r6 + ori r0,r0,LOW_ADDR(DoPreemptCall) + beq+ NoPreemption + cmpi cr0, r7, 0 + bne+ NoPreemption + sc + mtmsr r5 + blr + + +/* Set machine into idle power-saving mode. + * + * void machine_idle_ppc(void) + * + * We will use the PPC NAP or DOZE for this. + * This call always returns. Must be called with spllo (i.e., interruptions + * enabled). + * + */ + + +; Force a line boundry here + .align 5 + .globl EXT(machine_idle_ppc) + +LEXT(machine_idle_ppc) + + mfmsr r3 ; Get the current MSR + rlwinm r5,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + mtmsr r5 ; Hold up interruptions for now + mfsprg r12,0 ; Get the per_proc_info + mfspr r6,hid0 ; Get the current power-saving mode + mfsprg r11,2 ; Get CPU specific features + rlwinm r6,r6,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) + mtcrf 0xC7,r11 ; Get the facility flags + + lis r4,hi16(napm) ; Assume we can nap + bt pfWillNapb,yesnap ; Yeah, nap is ok... + + lis r4,hi16(dozem) ; Assume we can doze + bt pfCanDozeb,yesnap ; We can sleep or doze one this machine... + + ori r3,r3,lo16(MASK(MSR_EE)) ; Flip on EE + mtmsr r3 ; Turn interruptions back on + blr ; Leave... + +yesnap: mftbu r9 ; Get the upper timebase + mftb r7 ; Get the lower timebase + mftbu r8 ; Get the upper one again + cmplw r9,r8 ; Did the top tick? + bne- yesnap ; Yeah, need to get it again... + stw r8,napStamp(r12) ; Set high order time stamp + stw r7,napStamp+4(r12) ; Set low order nap stamp + + bf pfL1nncb,minoflushl1 ; The L1 is coherent in nap/doze... +; +; 7450 does not keep L1 cache coherent across nap/sleep it must alwasy flush. +; It does not have a L1 flush assist, so we do not test for it here. +; +; Note that the time stamp take above is not completely accurate for 7450 +; because we are about to flush the L1 cache and that takes a bit of time. +; + cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache + bf- cr0_eq,minoflushl1 ; No level 1 to flush... + rlwinm. r0,r4,0,ice,dce ; Were either of the level 1s on? + beq- minoflushl1 ; No, no need to flush... + +miswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size + rlwinm r2,r0,0,1,30 ; Double it + add r0,r0,r2 ; Get 3 times cache size + rlwinm r2,r5,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Turn off data translation + rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines + lis r3,0xFFF0 ; Dead recon ROM address for now + mtctr r0 ; Number of lines to flush + mtmsr r2 ; Do it + isync + +miswfldl1a: lwz r2,0(r3) ; Flush anything else + addi r3,r3,32 ; Next line + bdnz miswfldl1a ; Flush the lot... + +miinvdl1: sync ; Make sure all flushes have been committed + mtmsr r5 ; Put back data translation + isync + + mfspr r8,hid0 ; Get the HID0 bits + li r7,lo16(icem|dcem) ; Get the cache enable bits + andc r8,r8,r7 ; Clear cache enables + mtspr hid0,r8 ; and turn off L1 cache + sync ; Make sure all is done + + ori r8,r8,lo16(icfim|dcfim) ; Set the HID0 bits for invalidate + sync + isync + + mtspr hid0,r8 ; Start the invalidate + sync + +minoflushl1: + +; +; We have to open up interruptions here because book 4 says that we should +; turn on only the POW bit and that we should have interrupts enabled +; The interrupt handler will detect that nap or doze is set if an interrupt +; is taken and set everything up to return directly to machine_idle_ret. +; So, make sure everything we need there is already set up... +; + ori r7,r5,lo16(MASK(MSR_EE)) ; Flip on EE + or r6,r6,r4 ; Set nap or doze + oris r5,r7,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR + mtspr hid0,r6 ; Set up the HID for nap/doze + isync ; Make sure it is set + mtmsr r7 ; Enable for interrupts + rlwinm. r11,r11,0,pfAltivecb,pfAltivecb ; Do we have altivec? + beq- minovec ; No... + dssall ; Stop the streams before we nap/doze + +minovec: sync ; Make sure queues are clear + mtmsr r5 ; Nap or doze + isync ; Make sure this takes before we proceed + b minovec ; loop if POW does not take +; +; Note that the interrupt handler will turn off the nap/doze bits in the hid. +; Also remember that the interrupt handler will force return to here whenever +; the nap/doze bits are set. +; + .globl EXT(machine_idle_ret) +LEXT(machine_idle_ret) + mtmsr r7 ; Make sure the MSR is what we want + isync ; In case we turn on translation + + blr ; Return... + +/* Put machine to sleep. + * This call never returns. We always exit sleep via a soft reset. + * All external interruptions must be drained at this point and disabled. + * + * void ml_ppc_sleep(void) + * + * We will use the PPC SLEEP for this. + * + * There is one bit of hackery in here: we need to enable for + * interruptions when we go to sleep and there may be a pending + * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for + * interruptions. The decrimenter rupt vector recognizes this and returns + * directly back here. + * + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_ppc_sleep) + +LEXT(ml_ppc_sleep) + +#if 0 + mfmsr r5 ; Hack to spin instead of sleep + rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation + rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + mtmsr r5 ; No talking + isync + +; No interrupts allowed after we get the savearea + + mfsprg r6,0 ; Get the per_proc + mfsprg r7,1 ; Get the pending savearea + stw r7,savedSave(r6) ; Save the savearea for when we wake up + +deadsleep: addi r3,r3,1 ; Make analyzer happy + addi r3,r3,1 + addi r3,r3,1 + b deadsleep ; Die the death of 1000 joys... +#endif + + mfsprg r12,0 ; Get the per_proc_info + mfspr r4,hid0 ; Get the current power-saving mode + eqv r10,r10,r10 ; Get all foxes + mfsprg r11,2 ; Get CPU specific features + mfmsr r5 ; Get the current MSR + rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF + rlwinm r4,r4,0,sleep+1,doze-1 ; Clear all possible power-saving modes (not DPM though) + mtdec r10 ; Load decrimenter with 0x7FFFFFFF + isync ; and make sure, + mfdec r9 ; really sure, it gets there + + mtcrf 0x07,r11 ; Get the cache flags, etc + + oris r4,r4,hi16(sleepm) ; Set sleep + rlwinm r5,r5,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation +; +; Note that we need translation off before we set the HID to sleep. Otherwise +; we will ignore any PTE misses that occur and cause an infinite loop. +; + bt pfNoMSRirb,mpsNoMSR ; No MSR... + + mtmsr r5 ; Translation off + isync ; Toss prefetch + b mpsNoMSRx + +mpsNoMSR: + li r0,loadMSR ; Get the MSR setter SC + mr r3,r5 ; Get new MSR + sc ; Set it +mpsNoMSRx: + + ori r3,r5,lo16(MASK(MSR_EE)) ; Flip on EE + sync + mtspr hid0,r4 ; Set up the HID to sleep + + mtmsr r3 ; Enable for interrupts to drain decrimenter + + add r6,r4,r5 ; Just waste time + add r6,r6,r4 ; A bit more + add r6,r6,r5 ; A bit more + + mtmsr r5 ; Interruptions back off + isync ; Toss prefetch + + mfsprg r7,1 ; Get the pending savearea + stw r7,savedSave(r12) ; Save the savearea for when we wake up + +; +; We are here with translation off, interrupts off, all possible +; interruptions drained off, and a decrimenter that will not pop. +; + + bl EXT(cacheInit) ; Clear out the caches. This will leave them on + bl EXT(cacheDisable) ; Turn off all caches + + mfmsr r5 ; Get the current MSR + oris r5,r5,hi16(MASK(MSR_POW)) ; Turn on power management in next MSR + ; Leave EE off because power goes off shortly + +slSleepNow: sync ; Sync it all up + mtmsr r5 ; Do sleep with interruptions enabled + isync ; Take a pill + b slSleepNow ; Go back to sleep if we wake up... + + + +/* Initialize all caches including the TLBs + * + * void cacheInit(void) + * + * This is used to force the caches to an initial clean state. First, we + * check if the cache is on, if so, we need to flush the contents to memory. + * Then we invalidate the L1. Next, we configure and invalidate the L2 etc. + * Finally we turn on all of the caches + * + * Note that if translation is not disabled when this is called, the TLB will not + * be completely clear after return. + * + */ + +; Force a line boundry here + .align 5 + .globl EXT(cacheInit) + +LEXT(cacheInit) + + mfsprg r12,0 ; Get the per_proc_info + mfspr r9,hid0 ; Get the current power-saving mode + + mfsprg r11,2 ; Get CPU specific features + mfmsr r7 ; Get the current MSR + rlwinm r4,r9,0,dpm+1,doze-1 ; Clear all possible power-saving modes (also disable DPM) + rlwimi r11,r11,pfL23lckb+1,31,31 ; Move pfL23lck to another position (to keep from using non-volatile CRs) + rlwinm r5,r7,0,MSR_DR_BIT+1,MSR_IR_BIT-1 ; Turn off translation + rlwinm r5,r5,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + mtcrf 0x87,r11 ; Get the feature flags + mtspr hid0,r4 ; Set up the HID + + bt pfNoMSRirb,ciNoMSR ; No MSR... + + mtmsr r5 ; Translation and all off + isync ; Toss prefetch + b ciNoMSRx + +ciNoMSR: + li r0,loadMSR ; Get the MSR setter SC + mr r3,r5 ; Get new MSR + sc ; Set it +ciNoMSRx: + + bf pfAltivecb,cinoDSS ; No Altivec here... + + dssall ; Stop streams + sync + +cinoDSS: lis r5,hi16(EXT(tlb_system_lock)) ; Get the TLBIE lock + li r0,128 ; Get number of TLB entries + ori r5,r5,lo16(EXT(tlb_system_lock)) ; Grab up the bottom part + + li r6,0 ; Start at 0 + lwarx r2,0,r5 ; ? + +citlbhang: lwarx r2,0,r5 ; Get the TLBIE lock + mr. r2,r2 ; Is it locked? + bne- citlbhang ; It is locked, go wait... + stwcx. r0,0,r5 ; Try to get it + bne- citlbhang ; We was beat... + + mtctr r0 ; Set the CTR + +cipurgeTLB: tlbie r6 ; Purge this entry + addi r6,r6,4096 ; Next page + bdnz cipurgeTLB ; Do them all... + + mtcrf 0x80,r11 ; Set SMP capability + sync ; Make sure all TLB purges are done + eieio ; Order, order in the court + + bf pfSMPcapb,cinoSMP ; SMP incapable... + + tlbsync ; Sync all TLBs + sync + +cinoSMP: stw r2,0(r5) ; Unlock TLBIE lock + + cror cr0_eq,pfL1ib,pfL1db ; Check for either I- or D-cache + bf- cr0_eq,cinoL1 ; No level 1 to flush... + rlwinm. r0,r9,0,ice,dce ; Were either of the level 1s on? + beq- cinoL1 ; No, no need to flush... + + bf pfL1fab,ciswdl1 ; If no hw flush assist, go do by software... + + mfspr r8,msscr0 ; Get the memory system control register + oris r8,r8,hi16(dl1hwfm) ; Turn on the hardware flush request + + mtspr msscr0,r8 ; Start the flush operation + +ciwdl1f: mfspr r8,msscr0 ; Get the control register again + + rlwinm. r8,r8,0,dl1hwf,dl1hwf ; Has the flush request been reset yet? + bne ciwdl1f ; No, flush is still in progress... + b ciinvdl1 ; Go invalidate l1... + +; +; We need to either make this very complicated or to use ROM for +; the flush. The problem is that if during the following sequence a +; snoop occurs that invalidates one of the lines in the cache, the +; PLRU sequence will be altered making it possible to miss lines +; during the flush. So, we either need to dedicate an area of RAM +; to each processor, lock use of a RAM area, or use ROM. ROM is +; by far the easiest. Note that this is not an issue for machines +; that have harware flush assists. +; + +ciswdl1: lwz r0,pfl1dSize(r12) ; Get the level 1 cache size + rlwinm r2,r0,0,1,30 ; Double it + add r0,r0,r2 ; Get 3 times cache size + rlwinm r0,r0,26,6,31 ; Get 3/2 number of cache lines + lis r3,0xFFF0 ; Dead recon ROM address for now + mtctr r0 ; Number of lines to flush + +ciswfldl1a: lwz r2,0(r3) ; Flush anything else + addi r3,r3,32 ; Next line + bdnz ciswfldl1a ; Flush the lot... + +ciinvdl1: sync ; Make sure all flushes have been committed + + mfspr r8,hid0 ; Get the HID0 bits + rlwinm r8,r8,0,dce+1,ice-1 ; Clear cache enables + mtspr hid0,r8 ; and turn off L1 cache + sync ; Make sure all is done + + ori r8,r8,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate + sync + isync + + mtspr hid0,r8 ; Start the invalidate and turn on cache + rlwinm r8,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits + mtspr hid0,r8 ; Turn off the invalidate (needed for some older machines) + sync + +cinoL1: +; +; Flush and disable the level 2 +; + bf pfL2b,cinol2 ; No level 2 cache to flush + + mfspr r8,l2cr ; Get the L2CR + lwz r3,pfl2cr(r12) ; Get the L2CR value + lis r0,hi16(l2sizm|l2clkm|l2ramm|l2ohm) ; Get confiuration bits + xor r2,r8,r3 ; Get changing bits? + ori r0,r0,lo16(l2slm|l2dfm|l2bypm) ; More config bits + and. r0,r0,r2 ; Did any change? + bne- ciinvdl2 ; Yes, just invalidate and get PLL synced... + + bf pfL2fab,ciswfl2 ; Flush not in hardware... + + mr r10,r3 ; Take a copy now + + bf 31,cinol2lck ; Skip if pfL23lck not set... + + oris r10,r10,hi16(l2ionlym|l2donlym) ; Set both instruction- and data-only + sync + mtspr l2cr,r10 ; Lock out the cache + sync + isync + +cinol2lck: ori r10,r10,lo16(l2hwfm) ; Request flush + sync ; Make sure everything is done + + mtspr l2cr,r10 ; Request flush + +cihwfl2: mfspr r10,l2cr ; Get back the L2CR + rlwinm. r10,r10,0,l2hwf,l2hwf ; Is the flush over? + bne+ cihwfl2 ; Nope, keep going... + b ciinvdl2 ; Flush done, go invalidate L2... + +ciswfl2: + lwz r0,pfl2Size(r12) ; Get the L2 size + oris r2,r3,hi16(l2dom) ; Set L2 to data only mode + mtspr l2cr,r2 ; Go into data only mode + sync ; Clean it up + + rlwinm r0,r0,27,5,31 ; Get the number of lines + lis r10,0xFFF0 ; Dead recon ROM for now + mtctr r0 ; Set the number of lines + +ciswfldl2a: lwz r0,0(r10) ; Load something to flush something + addi r10,r10,32 ; Next line + bdnz ciswfldl2a ; Do the lot... + +ciinvdl2: rlwinm r3,r3,0,l2e+1,31 ; Clear the enable bit + b cinla ; Branch to next line... + + .align 5 +cinlc: mtspr l2cr,r3 ; Disable L2 + sync + isync + b ciinvl2 ; It is off, go invalidate it... + +cinla: b cinlb ; Branch to next... + +cinlb: sync ; Finish memory stuff + isync ; Stop speculation + b cinlc ; Jump back up and turn off cache... + +ciinvl2: sync + isync + oris r2,r3,hi16(l2im) ; Get the invalidate flag set + + mtspr l2cr,r2 ; Start the invalidate + sync + isync +ciinvdl2a: mfspr r2,l2cr ; Get the L2CR + bf pfL2ib,ciinvdl2b ; Flush not in hardware... + rlwinm. r2,r2,0,l2i,l2i ; Is the invalidate still going? + bne+ ciinvdl2a ; Assume so, this will take a looong time... + sync + b cinol2 ; No level 2 cache to flush +ciinvdl2b: + rlwinm. r2,r2,0,l2ip,l2ip ; Is the invalidate still going? + bne+ ciinvdl2a ; Assume so, this will take a looong time... + sync + mtspr l2cr,r3 ; Turn off the invalidate request + +cinol2: + +; +; Flush and enable the level 3 +; + bf pfL3b,cinol3 ; No level 3 cache to flush + + mfspr r8,l3cr ; Get the L3CR + lwz r3,pfl3cr(r12) ; Get the L3CR value + lis r0,hi16(l3pem|l3sizm|l3dxm|l3clkm|l3spom|l3ckspm) ; Get configuration bits + xor r2,r8,r3 ; Get changing bits? + ori r0,r0,lo16(l3pspm|l3repm|l3rtm|l3cyam|l3dmemm|l3dmsizm) ; More config bits + and. r0,r0,r2 ; Did any change? + bne- ciinvdl3 ; Yes, just invalidate and get PLL synced... + + sync ; 7450 book says do this even though not needed + mr r10,r3 ; Take a copy now + + bf 31,cinol3lck ; Skip if pfL23lck not set... + + oris r10,r10,hi16(l3iom) ; Set instruction-only + ori r10,r10,lo16(l3donlym) ; Set data-only + sync + mtspr l3cr,r10 ; Lock out the cache + sync + isync + +cinol3lck: ori r10,r10,lo16(l3hwfm) ; Request flush + sync ; Make sure everything is done + + mtspr l3cr,r10 ; Request flush + +cihwfl3: mfspr r10,l3cr ; Get back the L3CR + rlwinm. r10,r10,0,l3hwf,l3hwf ; Is the flush over? + bne+ cihwfl3 ; Nope, keep going... + +ciinvdl3: rlwinm r3,r3,0,l3e+1,31 ; Clear the enable bit + sync ; Make sure of life, liberty, and justice + mtspr l3cr,r3 ; Disable L3 + sync + + ori r3,r3,lo16(l3im) ; Get the invalidate flag set + + mtspr l3cr,r3 ; Start the invalidate + +ciinvdl3b: mfspr r3,l3cr ; Get the L3CR + rlwinm. r3,r3,0,l3i,l3i ; Is the invalidate still going? + bne+ ciinvdl3b ; Assume so... + sync + + mfspr r3,l3pdet ; ? + rlwimi r3,r3,28,0,23 ; ? + oris r3,r3,0xF000 ; ? + ori r3,r3,0x0080 ; ? + mtspr l3pdet,r3 ; ? + isync + + mfspr r3,l3cr ; Get the L3CR + rlwinm r3,r3,0,l3clken+1,l3clken-1 ; Clear the clock enable bit + mtspr l3cr,r3 ; Disable the clock + + li r2,128 ; ? +ciinvdl3c: addi r2,r2,-1 ; ? + cmplwi r2,0 ; ? + bne+ ciinvdl3c + + mfspr r10,msssr0 ; ? + rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ? + mtspr msssr0,r10 ; ? + sync + + oris r3,r3,hi16(l3em|l3clkenm) ; Turn on enable bit + mtspr l3cr,r3 ; Enable it + sync +cinol3: + bf pfL2b,cinol2a ; No level 2 cache to enable + + lwz r3,pfl2cr(r12) ; Get the L2CR value + oris r3,r3,hi16(l2em) ; Turn on enable bit + mtspr l2cr,r3 ; Enable it + sync + +; +; Invalidate and turn on L1s +; + +cinol2a: rlwinm r8,r9,0,dce+1,ice-1 ; Clear the I- and D- cache enables + mtspr hid0,r8 ; Turn off dem caches + sync + + ori r8,r9,lo16(icem|dcem|icfim|dcfim) ; Set the HID0 bits for enable, and invalidate + rlwinm r9,r8,0,dcfi+1,icfi-1 ; Turn off the invalidate bits + sync + isync + + mtspr hid0,r8 ; Start the invalidate and turn on L1 cache + mtspr hid0,r9 ; Turn off the invalidate (needed for some older machines) + sync + mtmsr r7 ; Restore MSR to entry + isync + blr ; Return... + + +/* Disables all caches + * + * void cacheDisable(void) + * + * Turns off all caches on the processor. They are not flushed. + * + */ + +; Force a line boundry here + .align 5 + .globl EXT(cacheDisable) + +LEXT(cacheDisable) + + mfsprg r11,2 ; Get CPU specific features + mtcrf 0x83,r11 ; Set feature flags + + bf pfAltivecb,cdNoAlt ; No vectors... + + dssall ; Stop streams + +cdNoAlt: sync + + mfspr r5,hid0 ; Get the hid + rlwinm r5,r5,0,dce+1,ice-1 ; Clear the I- and D- cache enables + mtspr hid0,r5 ; Turn off dem caches + sync + + bf pfL2b,cdNoL2 ; Skip if no L2... + + mfspr r5,l2cr ; Get the L2 + rlwinm r5,r5,0,l2e+1,31 ; Turn off enable bit + + b cinlaa ; Branch to next line... + + .align 5 +cinlcc: mtspr l2cr,r5 ; Disable L2 + sync + isync + b cdNoL2 ; It is off, we are done... + +cinlaa: b cinlbb ; Branch to next... + +cinlbb: sync ; Finish memory stuff + isync ; Stop speculation + b cinlcc ; Jump back up and turn off cache... + +cdNoL2: + + bf pfL3b,cdNoL3 ; Skip down if no L3... + + mfspr r5,l3cr ; Get the L3 + rlwinm r5,r5,0,l3e+1,31 ; Turn off enable bit + rlwinm r5,r5,0,l3clken+1,l3clken-1 ; Turn off cache enable bit + mtspr l3cr,r5 ; Disable the caches + sync + +cdNoL3: + blr ; Leave... + + +/* Initialize processor thermal monitoring + * void ml_thrm_init(void) + * + * Build initial TAU registers and start them all going. + * We ca not do this at initial start up because we need to have the processor frequency first. + * And just why is this in assembler when it does not have to be?? Cause I am just too + * lazy to open up a "C" file, thats why. + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_thrm_init) + +LEXT(ml_thrm_init) + + mfsprg r12,0 ; Get the per_proc blok + lis r11,hi16(EXT(gPEClockFrequencyInfo)) ; Get top of processor information + mfsprg r10,2 ; Get CPU specific features + ori r11,r11,lo16(EXT(gPEClockFrequencyInfo)) ; Get bottom of processor information + mtcrf 0x40,r10 ; Get the installed features + + li r3,lo16(thrmtidm|thrmvm) ; Set for lower-than thermal event at 0 degrees + bflr pfThermalb ; No thermal monitoring on this cpu + mtspr thrm1,r3 ; Do it + + lwz r3,thrmthrottleTemp(r12) ; Get our throttle temprature + rlwinm r3,r3,31-thrmthre,thrmthrs,thrmthre ; Position it + ori r3,r3,lo16(thrmvm) ; Set for higher-than event + mtspr thrm2,r3 ; Set it + + lis r4,hi16(1000000) ; Top of million +; +; Note: some CPU manuals say this is processor clocks, some say bus rate. The latter +; makes more sense because otherwise we can not get over about 400MHz. +#if 0 + lwz r3,PECFIcpurate(r11) ; Get the processor speed +#else + lwz r3,PECFIbusrate(r11) ; Get the bus speed +#endif + ori r4,r4,lo16(1000000) ; Bottom of million + lis r7,hi16(thrmsitvm>>1) ; Get top of highest possible value + divwu r3,r3,r4 ; Get number of cycles per microseconds + ori r7,r7,lo16(thrmsitvm>>1) ; Get the bottom of the highest possible value + addi r3,r3,1 ; Insure we have enough + mulli r3,r3,20 ; Get 20 microseconds worth of cycles + cmplw r3,r7 ; Check against max + ble+ smallenuf ; It is ok... + mr r3,r7 ; Saturate + +smallenuf: rlwinm r3,r3,31-thrmsitve,thrmsitvs,thrmsitve ; Position + ori r3,r3,lo16(thrmem) ; Enable with at least 20micro sec sample + stw r3,thrm3val(r12) ; Save this in case we need it later + mtspr thrm3,r3 ; Do it + blr + + +/* Set thermal monitor bounds + * void ml_thrm_set(unsigned int low, unsigned int high) + * + * Set TAU to interrupt below low and above high. A value of + * zero disables interruptions in that direction. + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_thrm_set) + +LEXT(ml_thrm_set) + + mfmsr r0 ; Get the MSR + rlwinm r6,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Clear EE bit + mtmsr r6 + + mfsprg r12,0 ; Get the per_proc blok + + rlwinm. r6,r3,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled + mfsprg r9,2 ; Get CPU specific features + stw r3,thrmlowTemp(r12) ; Set the low temprature + mtcrf 0x40,r9 ; See if we can thermal this machine + rlwinm r9,r9,(((31-thrmtie)+(pfThermIntb+1))&31),thrmtie,thrmtie ; Set interrupt enable if this machine can handle it + bf pfThermalb,tsetcant ; No can do... + beq tsetlowo ; We are setting the low off... + ori r6,r6,lo16(thrmtidm|thrmvm) ; Set the lower-than and valid bit + or r6,r6,r9 ; Set interruption request if supported + +tsetlowo: mtspr thrm1,r6 ; Cram the register + + rlwinm. r6,r4,31-thrmthre,thrmthrs,thrmthre ; Position it and see if enabled + stw r4,thrmhighTemp(r12) ; Set the high temprature + beq tsethigho ; We are setting the high off... + ori r6,r6,lo16(thrmvm) ; Set valid bit + or r6,r6,r9 ; Set interruption request if supported + +tsethigho: mtspr thrm2,r6 ; Cram the register + +tsetcant: mtmsr r0 ; Reenable interruptions + blr ; Leave... + +/* Read processor temprature + * unsigned int ml_read_temp(void) + * + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_read_temp) + +LEXT(ml_read_temp) + + mfmsr r9 ; Save the MSR + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + li r5,15 ; Starting point for ranging (start at 15 so we do not overflow) + mfsprg r7,2 ; Get CPU specific features + mtmsr r8 ; Do not allow interruptions + mtcrf 0x40,r7 ; See if we can thermal this machine + bf pfThermalb,thrmcant ; No can do... + + mfspr r11,thrm1 ; Save thrm1 + +thrmrange: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it + ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than + + mtspr thrm1,r4 ; Set the test value + +thrmreada: mfspr r3,thrm1 ; Get the thermal register back + rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet? + beq+ thrmreada ; Nope... + + rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold? + bne thrmsearch ; No, we went over... + + addi r5,r5,16 ; Start by trying every 16 degrees + cmplwi r5,127 ; Have we hit the max? + blt- thrmrange ; Got some more to do... + +thrmsearch: rlwinm r4,r5,31-thrmthre,thrmthrs,thrmthre ; Position it + ori r4,r4,lo16(thrmtidm|thrmvm) ; Flip on the valid bit and make comparision for less than + + mtspr thrm1,r4 ; Set the test value + +thrmread: mfspr r3,thrm1 ; Get the thermal register back + rlwinm. r0,r3,0,thrmtiv,thrmtiv ; Has it settled yet? + beq+ thrmread ; Nope... + + rlwinm. r0,r3,0,thrmtin,thrmtin ; Are we still under the threshold? + beq thrmdone ; No, we hit it... + addic. r5,r5,-1 ; Go down a degree + bge+ thrmsearch ; Try again (until we are below freezing)... + +thrmdone: addi r3,r5,1 ; Return the temprature (bump it up to make it correct) + mtspr thrm1,r11 ; Restore the thermal register + mtmsr r9 ; Re-enable interruptions + blr ; Leave... + +thrmcant: eqv r3,r3,r3 ; Return bogus temprature because we can not read it + mtmsr r9 ; Re-enable interruptions + blr ; Leave... + +/* Throttle processor speed up or down + * unsigned int ml_throttle(unsigned int step) + * + * Returns old speed and sets new. Both step and return are values from 0 to + * 255 that define number of throttle steps, 0 being off and "ictcfim" is max * 2. + * + */ + +; Force a line boundry here + .align 5 + .globl EXT(ml_throttle) + +LEXT(ml_throttle) + + mfmsr r9 ; Save the MSR + rlwinm r8,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + cmplwi r3,lo16(ictcfim>>1) ; See if we are going too far + mtmsr r8 ; Do not allow interruptions + ble+ throtok ; Throttle value is ok... + li r3,lo16(ictcfim>>1) ; Set max + +throtok: rlwinm. r4,r3,1,ictcfib,ictcfie ; Set the throttle + beq throtoff ; Skip if we are turning it off... + ori r4,r4,lo16(thrmvm) ; Turn on the valid bit + +throtoff: mfspr r3,ictc ; Get the old throttle + mtspr ictc,r4 ; Set the new + rlwinm r3,r3,31,1,31 ; Shift throttle value over + mtmsr r9 ; Restore interruptions + blr ; Return... + +/* +** ml_get_timebase() +** +** Entry - R3 contains pointer to 64 bit structure. +** +** Exit - 64 bit structure filled in. +** +*/ +; Force a line boundry here + .align 5 + .globl EXT(ml_get_timebase) + +LEXT(ml_get_timebase) + +loop: + mftbu r4 + mftb r5 + mftbu r6 + cmpw r6, r4 + bne- loop + + stw r4, 0(r3) + stw r5, 4(r3) + + blr + +/* +** ml_sense_nmi() +** +*/ +; Force a line boundry here + .align 5 + .globl EXT(ml_sense_nmi) + +LEXT(ml_sense_nmi) + + blr ; Leave... + diff --git a/osfmk/ppc/machine_rpc.h b/osfmk/ppc/machine_rpc.h new file mode 100644 index 000000000..dfda35fc5 --- /dev/null +++ b/osfmk/ppc/machine_rpc.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _MACHINE_RPC_H_ +#define _MACHINE_RPC_H_ + +#if ETAP_EVENT_MONITOR +#define ETAP_EXCEPTION_PROBE(_f, _th, _ex, _sysnum) \ + if (_ex == EXC_SYSCALL) { \ + ETAP_PROBE_DATA(ETAP_P_SYSCALL_UNIX, \ + _f, \ + _th, \ + _sysnum, \ + sizeof(int)); \ + } +#else /* ETAP_EVENT_MONITOR */ +#define ETAP_EXCEPTION_PROBE(_f, _th, _ex, _sysnum) +#endif /* ETAP_EVENT_MONITOR */ + +#endif /* _MACHINE_RPC_H_ */ + + diff --git a/osfmk/ppc/machlimits.h b/osfmk/ppc/machlimits.h new file mode 100644 index 000000000..453c62f15 --- /dev/null +++ b/osfmk/ppc/machlimits.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:41 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:02 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.2.1 1996/12/09 16:55:05 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * New file based on hp_pa + * [1996/12/09 11:09:22 stephen] + * + * $EndLog$ + */ +/* + * Copyright (c) 1988 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * @(#)machlimits.h 7.1 (Berkeley) 2/15/89 + */ +#ifndef _MACH_MACHLIMITS_H_ +#define _MACH_MACHLIMITS_H_ + +#define CHAR_BIT 8 /* number of bits in a char */ + +#define SCHAR_MAX 127 /* max value for a signed char */ +#define SCHAR_MIN (-128) /* min value for a signed char */ + +#define UCHAR_MAX 255U /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ + +#define USHRT_MAX 65535U /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ + +#define UINT_MAX 0xFFFFFFFFU /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ + +#define ULONG_MAX UINT_MAX /* max value for an unsigned long */ +#define LONG_MAX INT_MAX /* max value for a long */ +#define LONG_MIN INT_MIN /* min value for a long */ + +/* Must be at least two, for internationalization (NLS/KJI) */ +#define MB_LEN_MAX 4 /* multibyte characters */ + +#endif /* _MACH_MACHLIMITS_H_ */ diff --git a/osfmk/ppc/machparam.h b/osfmk/ppc/machparam.h new file mode 100644 index 000000000..7df7e9afc --- /dev/null +++ b/osfmk/ppc/machparam.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Copyright (c) 1990, 1991 The University of Utah and + * the Center for Software Science at the University of Utah (CSS). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the Center + * for Software Science at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSS requests users of this software to return to css-dist@cs.utah.edu any + * improvements that they make and grant CSS redistribution rights. + * + * Utah $Hdr: machparam.h 1.7 92/05/22$ + */ + +#ifndef _PPC_MACHPARAM_H_ +#define _PPC_MACHPARAM_H_ + +/* + * Machine dependent constants for ppc. + * Added as needed (for device drivers). + */ +#define NBPG 4096 /* bytes/page */ +#define PGSHIFT 12 /* LOG2(NBPG) */ + +#define DEV_BSHIFT 10 /* log2(DEV_BSIZE) */ + +/* + * Disk devices do all IO in 1024-byte blocks. + */ +#define DEV_BSIZE 1024 + +#define btop(x) ((x)>>PGSHIFT) +#define ptob(x) ((x)<>(PGSHIFT-DEV_BSHIFT)) + +/* clicks to bytes */ +#define ctob(x) ((x)<>PGSHIFT) + +#endif /* _PPC_MACHPARAM_H_ */ diff --git a/osfmk/ppc/mappings.c b/osfmk/ppc/mappings.c new file mode 100644 index 000000000..192569f7d --- /dev/null +++ b/osfmk/ppc/mappings.c @@ -0,0 +1,1899 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * This file is used to maintain the virtual to real mappings for a PowerPC machine. + * The code herein is primarily used to bridge between the pmap layer and the hardware layer. + * Currently, some of the function of this module is contained within pmap.c. We may want to move + * all of this into it (or most anyway) for the sake of performance. We shall see as we write it. + * + * We also depend upon the structure of the phys_entry control block. We do put some processor + * specific stuff in there. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include /* (TEST/DEBUG) */ + +#define PERFTIMES 0 + +#if PERFTIMES && DEBUG +#define debugLog2(a, b, c) dbgLog2(a, b, c) +#else +#define debugLog2(a, b, c) +#endif + +vm_map_t mapping_map = VM_MAP_NULL; + +unsigned int incrVSID = 0; /* VSID increment value */ +unsigned int mappingdeb0 = 0; +unsigned int mappingdeb1 = 0; +extern unsigned int hash_table_size; +extern vm_offset_t mem_size; +/* + * ppc_prot translates from the mach representation of protections to the PPC version. + * Calculation of it like this saves a memory reference - and maybe a couple of microseconds. + * It eliminates the used of this table. + * unsigned char ppc_prot[8] = { 0, 3, 2, 2, 3, 3, 2, 2 }; + */ + +#define ppc_prot(p) ((0xAFAC >> (p << 1)) & 3) + +/* + * About PPC VSID generation: + * + * This function is called to generate an address space ID. This space ID must be unique within + * the system. For the PowerPC, it is used to build the VSID. We build a VSID in the following + * way: space ID << 4 | segment. Since a VSID is 24 bits, and out of that, we reserve the last + * 4, so, we can have 2^20 (2M) unique IDs. Each pmap has a unique space ID, so we should be able + * to have 2M pmaps at a time, which we couldn't, we'd run out of memory way before then. The + * problem is that only a certain number of pmaps are kept in a free list and if that is full, + * they are release. This causes us to lose track of what space IDs are free to be reused. + * We can do 4 things: 1) not worry about it, 2) keep all free pmaps, 3) rebuild all mappings + * when the space ID wraps, or 4) scan the list of pmaps and find a free one. + * + * Yet another consideration is the hardware use of the VSID. It is used as part of the hash + * calculation for virtual address lookup. An improperly chosen value could potentially cause + * too many hashes to hit the same bucket, causing PTEG overflows. The actual hash function + * is (page index XOR vsid) mod number of ptegs. For a 32MB machine, using the suggested + * hash table size, there are 2^12 (8192) PTEGs. Remember, though, that the bottom 4 bits + * are reserved for the segment number, which means that we really have 2^(12-4) 512 space IDs + * before we start hashing to the same buckets with the same vaddrs. Also, within a space ID, + * every 8192 pages (32MB) within a segment will hash to the same bucket. That's 8 collisions + * per segment. So, a scan of every page for 256MB would fill 32 PTEGs completely, but + * with no overflow. I don't think that this is a problem. + * + * There may be a problem with the space ID, though. A new space ID is generate (mainly) + * whenever there is a fork. There shouldn't really be any problem because (for a 32MB + * machine) we can have 512 pmaps and still not have hash collisions for the same address. + * The potential problem, though, is if we get long-term pmaps that have space IDs that are + * the same modulo 512. We can reduce this problem by having the segment number be bits + * 0-3 of the space ID rather than 20-23. Doing this means that, in effect, corresponding + * vaddrs in different segments hash to the same PTEG. While this is somewhat of a problem, + * I don't think that it is as signifigant as the other, so, I'll make the space ID + * with segment first. + * + * The final, and biggest problem is the wrap, which will happen every 2^20 space IDs. + * While this is a problem that should only happen in periods counted in weeks, it can and + * will happen. This is assuming a monotonically increasing space ID. If we were to search + * for an inactive space ID, there could not be a wrap until there was 2^20 concurrent space IDs. + * That's pretty unlikely to happen. There couldn't be enough storage to support a million tasks. + * + * So, what we do is to keep all active pmaps in a chain (anchored from kernel_pmap and + * locked by free_pmap_lock) that is sorted in VSID sequence order. + * + * Whenever we need a VSID, we walk the list looking for the next in the sequence from + * the last that was freed. The we allocate that. + * + * NOTE: We must be called with interruptions off and free_pmap_lock held. + * + */ + +/* + * mapping_init(); + * Do anything that needs to be done before the mapping system can be used. + * Hash table must be initialized before we call this. + * + * Calculate the SID increment. Currently we use size^(1/2) + size^(1/4) + 1; + */ + +void mapping_init(void) { + + unsigned int tmp; + + __asm__ volatile("cntlzw %0, %1" : "=r" (tmp) : "r" (hash_table_size)); /* Get number of leading 0s */ + + incrVSID = 1 << ((32 - tmp + 1) >> 1); /* Get ceiling of sqrt of table size */ + incrVSID |= 1 << ((32 - tmp + 1) >> 2); /* Get ceiling of quadroot of table size */ + incrVSID |= 1; /* Set bit and add 1 */ + return; + +} + + +/* + * mapping_remove(pmap_t pmap, vm_offset_t va); + * Given a pmap and virtual address, this routine finds the mapping and removes it from + * both its PTEG hash list and the physical entry list. The mapping block will be added to + * the free list. If the free list threshold is reached, garbage collection will happen. + * We also kick back a return code to say whether or not we had one to remove. + * + * We have a strict ordering here: the mapping must be removed from the PTEG hash list before + * it can be removed from the physical entry list. This allows us to get by with only the PTEG + * hash lock at page fault time. The physical entry lock must be held while we remove the mapping + * from both lists. The PTEG lock is one of the lowest level locks. No PTE fault, interruptions, + * losing control, getting other locks, etc., are allowed when you hold it. You do, and you die. + * It's just that simple! + * + * When the phys_entry lock is held, the mappings chained to that one are guaranteed to stay around. + * However, a mapping's order on the PTEG hash chain is not. The interrupt handler uses the PTEG + * lock to control the hash cahin and may move the position of the mapping for MRU calculations. + * + * Note that mappings do not need to point to a physical entry. When they don't, it indicates + * the mapping is outside of physical memory and usually refers to a memory mapped device of + * some sort. Naturally, we can't lock what we don't have, so the phys entry lock and unlock + * routines return normally, but don't do anything. + */ + +boolean_t mapping_remove(pmap_t pmap, vm_offset_t va) { /* Remove a single mapping for this VADDR + Returns TRUE if a mapping was found to remove */ + + mapping *mp, *mpv; + register blokmap *blm; + spl_t s; + unsigned int *useadd, *useaddr; + int i; + + debugLog2(1, va, pmap->space); /* start mapping_remove */ + + s=splhigh(); /* Don't bother me */ + + mp = hw_lock_phys_vir(pmap->space, va); /* Lock the physical entry for this mapping */ + + if(!mp) { /* Did we find one? */ + if(mp = (mapping *)hw_rem_blk(pmap, va, va)) { /* No normal pages, try to remove an odd-sized one */ + splx(s); /* Allow 'rupts now */ + + if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */ + blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFE)); /* Get virtual address */ + panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n", + pmap, va, blm); + } +#if 0 + blm = (blokmap *)hw_cpv(mp); /* (TEST/DEBUG) */ + kprintf("mapping_remove: removed block map - bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ + blm, blm->start, blm->end, blm->PTEr); +#endif + mapping_free(hw_cpv(mp)); /* Release it */ + debugLog2(2, 1, 0); /* End mapping_remove */ + return TRUE; /* Tell them we did it */ + } + splx(s); /* Restore the interrupt level */ + debugLog2(2, 0, 0); /* end mapping_remove */ + return FALSE; /* Didn't find any, return FALSE... */ + } + if((unsigned int)mp&1) { /* Did we timeout? */ + panic("mapping_remove: timeout locking physical entry\n"); /* Yeah, scream about it! */ + splx(s); /* Restore the interrupt level */ + return FALSE; /* Bad hair day, return FALSE... */ + } + + mpv = hw_cpv(mp); /* Get virtual address of mapping */ +#if DEBUG + if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); +#else + (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */ +#endif + useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */ + useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ + (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ + +#if 0 + for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ + if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ + panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", + i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); + } + } +#endif + + hw_rem_map(mp); /* Remove the corresponding mapping */ + + if(mpv->physent)hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ + + splx(s); /* Was there something you needed? */ + + mapping_free(mpv); /* Add mapping to the free list */ + debugLog2(2, 1, 0); /* end mapping_remove */ + return TRUE; /* Tell them we did it */ +} + +/* + * mapping_purge(struct phys_entry *pp) - release all mappings for this physent to the free list + * + * This guy releases any mappings that exist for a physical page. + * We get the lock on the phys_entry, and hold it through out this whole routine. + * That way, no one can change the queue out from underneath us. We keep fetching + * the physents mapping anchor until it is null, then we're done. + * + * For each mapping, we call the remove routine to remove it from the PTEG hash list and + * decriment the pmap's residency count. Then we release the mapping back to the free list. + * + */ + +void mapping_purge(struct phys_entry *pp) { /* Remove all mappings for this physent */ + + mapping *mp, *mpv; + spl_t s; + unsigned int *useadd, *useaddr, uindx; + int i; + + s=splhigh(); /* Don't bother me */ + debugLog2(3, pp->pte1, 0); /* start mapping_purge */ + + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ + panic("\nmapping_purge: Timeout attempting to lock physical entry at %08X: %08X %08X\n", + pp, pp->phys_link, pp->pte1); /* Complain about timeout */ + } + + while(mp = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)) { /* Keep going so long as there's another */ + + mpv = hw_cpv(mp); /* Get the virtual address */ +#if DEBUG + if(hw_atomic_sub(&mpv->pmap->stats.resident_count, 1) < 0) panic("pmap resident count went negative\n"); +#else + (void)hw_atomic_sub(&mpv->pmap->stats.resident_count, 1); /* Decrement the resident page count */ +#endif + + uindx = ((mpv->PTEv >> 24) & 0x78) | ((mpv->PTEv >> 3) & 7); /* Join segment number and top 2 bits of the API */ + useadd = (unsigned int *)&mpv->pmap->pmapUsage[uindx]; /* Point to slot to bump */ + useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ + (void)hw_atomic_sub(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ + +#if 0 + for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ + if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ + panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", + i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); + } + } +#endif + + + hw_rem_map(mp); /* Remove the mapping */ + mapping_free(mpv); /* Add mapping to the free list */ + } + + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + + debugLog2(4, pp->pte1, 0); /* end mapping_purge */ + splx(s); /* Was there something you needed? */ + return; /* Tell them we did it */ +} + + +/* + * mapping_make(pmap, pp, va, spa, prot, attr, locked) - map a virtual address to a real one + * + * This routine takes the given parameters, builds a mapping block, and queues it into the + * correct lists. + * + * The pp parameter can be null. This allows us to make a mapping that is not + * associated with any physical page. We may need this for certain I/O areas. + * + * If the phys_entry address is null, we neither lock or chain into it. + * If locked is 1, we already hold the lock on the phys_entry and won't get nor release it. + */ + +mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked) { /* Make an address mapping */ + + register mapping *mp, *mpv; + unsigned int *useadd, *useaddr; + spl_t s; + int i; + + debugLog2(5, va, pa); /* start mapping_purge */ + mpv = mapping_alloc(); /* Get a spare mapping block */ + + mpv->pmap = pmap; /* Initialize the pmap pointer */ + mpv->physent = pp; /* Initialize the pointer to the physical entry */ + mpv->PTEr = ((unsigned int)pa & ~(PAGE_SIZE - 1)) | attr<<3 | ppc_prot(prot); /* Build the real portion of the PTE */ + mpv->PTEv = (((unsigned int)va >> 1) & 0x78000000) | (pmap->space << 7) | (((unsigned int)va >> 22) & 0x0000003F); /* Build the VSID */ + + s=splhigh(); /* Don't bother from now on */ + + mp = hw_cvp(mpv); /* Get the physical address of this */ + + if(pp && !locked) { /* Is there a physical entry? Or do we already hold the lock? */ + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ + panic("\nmapping_make: Timeout attempting to lock physical entry at %08X: %08X %08X\n", + pp, pp->phys_link, pp->pte1); /* Complain about timeout */ + } + } + + if(pp) { /* See of there is a physcial entry */ + mpv->next = (mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS); /* Move the old anchor to the new mappings forward */ + pp->phys_link = (mapping *)((unsigned int)mp | (unsigned int)pp->phys_link & PHYS_FLAGS); /* Point the anchor at us. Now we're on the list (keep the flags) */ + } + + hw_add_map(mp, pmap->space, va); /* Stick it on the PTEG hash list */ + + (void)hw_atomic_add(&mpv->pmap->stats.resident_count, 1); /* Increment the resident page count */ + useadd = (unsigned int *)&pmap->pmapUsage[(va >> pmapUsageShft) & pmapUsageMask]; /* Point to slot to bump */ + useaddr = (unsigned int *)((unsigned int)useadd & -4); /* Round down to word */ + (void)hw_atomic_add(useaddr, (useaddr == useadd) ? 0x00010000 : 1); /* Increment the even or odd slot */ +#if 0 + for(i = 0; i < (pmapUsageMask + 1); i++) { /* (TEST/DEBUG) */ + if((mpv->pmap->pmapUsage[i]) > 8192) { /* (TEST/DEBUG) */ + panic("mapping_remove: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", + i * pmapUsageSize, mpv->pmap->pmapUsage[i], mpv->pmap); + } + } +#endif + + if(pp && !locked)hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* If we have one and we didn't hold on entry, unlock the physical entry */ + + splx(s); /* Ok for interruptions now */ + debugLog2(6, pmap->space, prot); /* end mapping_purge */ + return mpv; /* Leave... */ +} + + +/* + * Enters optimal translations for odd-sized V=F blocks. + * + * Builds a block map for each power-of-two hunk o' address + * that exists. This is specific to the processor type. + * PPC uses BAT register size stuff. Future PPC might have + * something else. + * + * The supplied va is expected to be maxoptimal vs the supplied boundary. We're too + * stupid to know otherwise so we only look at the va anyhow, so there... + * + */ + +void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr) { /* Maps optimal autogenned blocks */ + + register blokmap *blm, *oblm; + unsigned int pg; + unsigned int maxsize, boundary, leading, trailing, cbsize, minsize, tomin; + int i, maxshft, nummax, minshft; + +#if 1 + kprintf("mapping_block_map_opt: pmap=%08X; va=%08X; pa=%08X; ; bnd=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ + pmap, va, pa, bnd, size, prot, attr); +#endif + + minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */ + maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */ + + minshft = 31 - cntlzw(minsize); /* Shift to position minimum size */ + maxshft = 31 - cntlzw(blokValid); /* Shift to position maximum size */ + + leading = ((va + bnd - 1) & -bnd) - va; /* Get size of leading area */ + trailing = size - leading; /* Get size of trailing area */ + tomin = ((va + minsize - 1) & -minsize) - va; /* Get size needed to round up to the minimum block size */ + +#if 1 + kprintf("mapping_block_map_opt: bnd=%08X; leading=%08X; trailing=%08X; tomin=%08X\n", bnd, leading, trailing, tomin); /* (TEST/DEBUG) */ +#endif + + if(tomin)pmap_map_block(pmap, va, pa, tomin, prot, attr, 0); /* Map up to minimum block size */ + + va = va + tomin; /* Adjust virtual start */ + pa = pa + tomin; /* Adjust physical start */ + leading = leading - tomin; /* Adjust leading size */ + +/* + * Some of this code is very classic PPC. We need to fix this up. + */ + + leading = leading >> minshft; /* Position for bit testing */ + cbsize = minsize; /* Set the minimum size */ + + for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, small to large */ + + if(leading & 1) { + pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */ + pa = pa + cbsize; /* Bump up physical address */ + va = va + cbsize; /* Bump up virtual address */ + } + + leading = leading >> 1; /* Shift up to next size */ + cbsize = cbsize << 1; /* Here too */ + + } + + nummax = trailing >> maxshft; /* Get number of max size blocks left */ + for(i=0; i < nummax - 1; i++) { /* Account for all max size block left but 1 */ + pmap_map_block(pmap, va, pa, maxsize, prot, attr, 0); /* Map up to next boundary */ + + pa = pa + maxsize; /* Bump up physical address */ + va = va + maxsize; /* Bump up virtual address */ + trailing -= maxsize; /* Back off what we just did */ + } + + cbsize = maxsize; /* Start at maximum size */ + + for(i = 0; i < (maxshft - minshft + 1); i ++) { /* Cycle through all block sizes, high to low */ + + if(trailing & cbsize) { + trailing &= ~cbsize; /* Remove the block we are allocating */ + pmap_map_block(pmap, va, pa, cbsize, prot, attr, 0); /* Map up to next boundary */ + pa = pa + cbsize; /* Bump up physical address */ + va = va + cbsize; /* Bump up virtual address */ + } + cbsize = cbsize >> 1; /* Next size down */ + } + + if(trailing) pmap_map_block(pmap, va, pa, trailing, prot, attr, 0); /* Map up to end */ + + return; /* Return */ +} + + +/* + * Enters translations for odd-sized V=F blocks. + * + * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request + * will be split into normal-sized page mappings. + * + * The higher level VM map should be locked to insure that we don't have a + * double diddle here. + * + * We panic if we get a block that overlaps with another. We do not merge adjacent + * blocks because removing any address within a block removes the entire block and if + * would really mess things up if we trashed too much. + * + * Once a block is mapped, it is unmutable, that is, protection, catch mode, etc. can + * not be changed. The block must be unmapped and then remapped with the new stuff. + * We also do not keep track of reference or change flags. + * + * Blocks are kept in MRU order anchored from the pmap. The chain is traversed only + * with interruptions and translation disabled and under the control of the lock located + * in the first block map. MRU is used because it is expected that the same entry + * will be accessed repeatedly while PTEs are being generated to cover those addresses. + * + */ + +void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr, unsigned int flags) { /* Map an autogenned block */ + + register blokmap *blm, *oblm; + unsigned int pg; + +#if 0 + kprintf("pmap_map_block: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ + pmap, va, pa, size, prot, attr); +#endif + + if(size < ODDBLKMIN) { /* Is this below the minimum size? */ + for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ + mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ +#if 0 + kprintf("pmap_map_block: mm: va=%08X; pa=%08X\n", /* (TEST/DEBUG) */ + va + pg, pa + pg); +#endif + } + return; /* All done */ + } + + blm = (blokmap *)mapping_alloc(); /* Get a block mapping */ + + blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */ + blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */ + blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */ + blm->space = pmap->space; /* Set the space (only needed for remove) */ + blm->blkFlags = flags; /* Set the block's flags */ + +#if 0 + kprintf("pmap_map_block: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ + blm, blm->start, blm->end, blm->PTEr); +#endif + + blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */ + +#if 0 + kprintf("pmap_map_block: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ + blm, pmap->bmaps); +#endif + + if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */ + panic("pmap_map_block: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */ + } + +#if 0 + kprintf("pmap_map_block: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ + blm, pmap->bmaps); +#endif + + return; /* Return */ +} + + +/* + * Optimally enters translations for odd-sized V=F blocks. + * + * Checks to insure that the request is at least ODDBLKMIN in size. If smaller, the request + * will be split into normal-sized page mappings. + * + * This one is different than pmap_map_block in that it will allocate it's own virtual + * target address. Rather than allocating a single block, + * it will also allocate multiple blocks that are power-of-two aligned/sized. This allows + * hardware-level mapping that takes advantage of BAT maps or large page sizes. + * + * Most considerations for pmap_map_block apply. + * + * + */ + +kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, + vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an optimal autogenned block */ + + register blokmap *blm, *oblm; + unsigned int pg; + kern_return_t err; + unsigned int bnd; + +#if 1 + kprintf("pmap_map_block_opt: map=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ + map, pa, size, prot, attr); +#endif + + if(size < ODDBLKMIN) { /* Is this below the minimum size? */ + err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */ + if(err) { +#if DEBUG + kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we died */ +#endif + return(err); /* Pass back the error */ + } +#if 1 + kprintf("pmap_map_block_opt: small; vaddr = %08X\n", *va); /* (TEST/DEBUG) */ +#endif + + for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ + mapping_make(map->pmap, 0, *va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ + } + return(KERN_SUCCESS); /* All done */ + } + + err = vm_map_block(map, va, &bnd, pa, size, prot); /* Go get an optimal allocation */ + + if(err == KERN_INVALID_ADDRESS) { /* Can we try a brute force block mapping? */ + err = vm_allocate(map, va, size, VM_FLAGS_ANYWHERE); /* Make us some memories */ + if(err) { +#if DEBUG + kprintf("pmap_map_block_opt: non-optimal vm_allocate() returned %d\n", err); /* Say we died */ +#endif + return(err); /* Pass back the error */ + } +#if 1 + kprintf("pmap_map_block_opt: non-optimal - vaddr = %08X\n", *va); /* (TEST/DEBUG) */ +#endif + pmap_map_block(map->pmap, *va, pa, size, prot, attr, 0); /* Set up a block mapped area */ + return KERN_SUCCESS; /* All done now */ + } + + if(err != KERN_SUCCESS) { /* We couldn't get any address range to map this... */ +#if DEBUG + kprintf("pmap_map_block_opt: vm_allocate() returned %d\n", err); /* Say we couldn' do it */ +#endif + return(err); + } + +#if 1 + kprintf("pmap_map_block_opt: optimal - vaddr=%08X; bnd=%08X\n", *va, bnd); /* (TEST/DEBUG) */ +#endif + mapping_block_map_opt(map->pmap, *va, pa, bnd, size, prot, attr); /* Go build the maps */ + return(KERN_SUCCESS); /* All done */ +} + + +#if 0 + +/* + * Enters translations for odd-sized V=F blocks and merges adjacent or overlapping + * areas. + * + * Once blocks are merged, they act like one block, i.e., if you remove it, + * it all goes... + * + * This can only be used during boot. Ain't no way we can handle SMP + * or preemption easily, so we restrict it. We don't check either. We + * assume only skilled professional programmers will attempt using this + * function. We assume no responsibility, either real or imagined, for + * injury or death resulting from unauthorized use of this function. + * + * No user servicable parts inside. Notice to be removed by end-user only, + * under penalty of applicable federal and state laws. + * + * See descriptions of pmap_map_block. Ignore the part where we say we panic for + * overlapping areas. Note that we do panic if we can't merge. + * + */ + +void pmap_map_block_merge(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { /* Map an autogenned block */ + + register blokmap *blm, *oblm; + unsigned int pg; + spl_t s; + +#if 1 + kprintf("pmap_map_block_merge: pmap=%08X; va=%08X; pa=%08X; size=%08X; prot=%08X; attr=%08X\n", /* (TEST/DEBUG) */ + pmap, va, pa, size, prot, attr); +#endif + + s=splhigh(); /* Don't bother from now on */ + if(size < ODDBLKMIN) { /* Is this below the minimum size? */ + for(pg = 0; pg < size; pg += PAGE_SIZE) { /* Add all pages in this block */ + mapping_make(pmap, 0, va + pg, pa + pg, prot, attr, 0); /* Map this page on in */ + } + return; /* All done */ + } + + blm = (blokmap *)mapping_alloc(); /* Get a block mapping */ + + blm->start = (unsigned int)va & -PAGE_SIZE; /* Get virtual block start */ + blm->end = (blm->start + size - 1) | (PAGE_SIZE - 1); /* Get virtual block end */ + blm->PTEr = ((unsigned int)pa & -PAGE_SIZE) | attr<<3 | ppc_prot(prot); /* Build the real portion of the base PTE */ + +#if 1 + kprintf("pmap_map_block_merge: bm=%08X; start=%08X; end=%08X; PTEr=%08X\n", /* (TEST/DEBUG) */ + blm, blm->start, blm->end, blm->PTEr); +#endif + + blm = (blokmap *)hw_cvp((mapping *)blm); /* Get the physical address of this */ + +#if 1 + kprintf("pmap_map_block_merge: bm (real)=%08X; pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ + blm, pmap->bmaps); +#endif + + if(oblm = hw_add_blk(pmap, blm)) { /* Add to list and make sure we don't overlap anything */ + panic("pmap_map_block_merge: block map overlap - blm = %08X\n", oblm); /* Squeak loudly and carry a big stick */ + } + +#if 1 + kprintf("pmap_map_block_merge: pmap->bmaps=%08X\n", /* (TEST/DEBUG) */ + blm, pmap->bmaps); +#endif + splx(s); /* Ok for interruptions now */ + + return; /* Return */ +} +#endif + +/* + * void mapping_protect_phys(phys_entry *pp, vm_prot_t prot) - change the protection of a physical page + * + * This routine takes a physical entry and runs through all mappings attached to it and changes + * the protection. If there are PTEs associated with the mappings, they will be invalidated before + * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g., + * higher to lower, lower to higher. + * + * Phys_entry is unlocked. + */ + +void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked) { /* Change protection of all mappings to page */ + + spl_t spl; + + debugLog2(9, pp->pte1, prot); /* end remap */ + spl=splhigh(); /* No interruptions during this */ + if(!locked) { /* Do we need to lock the physent? */ + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ + panic("\nmapping_protect: Timeout attempting to lock physical entry at %08X: %08X %08X\n", + pp, pp->phys_link, pp->pte1); /* Complain about timeout */ + } + } + + hw_prot(pp, ppc_prot(prot)); /* Go set the protection on this physical page */ + + if(!locked) hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + splx(spl); /* Restore interrupt state */ + debugLog2(10, pp->pte1, 0); /* end remap */ + + return; /* Leave... */ +} + +/* + * void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) - change the protection of a virtual page + * + * This routine takes a pmap and virtual address and changes + * the protection. If there are PTEs associated with the mappings, they will be invalidated before + * the protection is changed. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). There is no limitation on changes, e.g., + * higher to lower, lower to higher. + * + */ + +void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot) { /* Change protection of a virtual page */ + + mapping *mp, *mpv; + spl_t s; + + debugLog2(9, vaddr, pmap); /* start mapping_protect */ + s = splhigh(); /* Don't bother me */ + + mp = hw_lock_phys_vir(pmap->space, vaddr); /* Lock the physical entry for this mapping */ + + if(!mp) { /* Did we find one? */ + splx(s); /* Restore the interrupt level */ + debugLog2(10, 0, 0); /* end mapping_pmap */ + return; /* Didn't find any... */ + } + if((unsigned int)mp & 1) { /* Did we timeout? */ + panic("mapping_protect: timeout locking physical entry\n"); /* Yeah, scream about it! */ + splx(s); /* Restore the interrupt level */ + return; /* Bad hair day... */ + } + + hw_prot_virt(mp, ppc_prot(prot)); /* Go set the protection on this virtual mapping */ + + mpv = hw_cpv(mp); /* Get virtual address of mapping */ + if(mpv->physent) { /* If there is a physical page, */ + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + } + splx(s); /* Restore interrupt state */ + debugLog2(10, mpv->PTEr, 0); /* end remap */ + + return; /* Leave... */ +} + +/* + * mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) Sets the default physical page attributes + * + * This routine takes a physical entry and sets the physical attributes. There can be no mappings + * associated with this page when we do it. + */ + +void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg) { /* Sets the default physical page attributes */ + + debugLog2(11, pp->pte1, prot); /* end remap */ + + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry */ + panic("\nmapping_phys_attr: Timeout attempting to lock physical entry at %08X: %08X %08X\n", + pp, pp->phys_link, pp->pte1); /* Complain about timeout */ + } + + hw_phys_attr(pp, ppc_prot(prot), wimg); /* Go set the default WIMG and protection */ + + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + debugLog2(12, pp->pte1, wimg); /* end remap */ + + return; /* Leave... */ +} + +/* + * void mapping_invall(phys_entry *pp) - invalidates all ptes associated with a page + * + * This routine takes a physical entry and runs through all mappings attached to it and invalidates + * any PTEs it finds. + * + * Interruptions must be disabled and the physical entry locked at entry. + */ + +void mapping_invall(struct phys_entry *pp) { /* Clear all PTEs pointing to a physical page */ + + hw_inv_all(pp); /* Go set the change bit of a physical page */ + + return; /* Leave... */ +} + + +/* + * void mapping_clr_mod(phys_entry *pp) - clears the change bit of a physical page + * + * This routine takes a physical entry and runs through all mappings attached to it and turns + * off the change bit. If there are PTEs associated with the mappings, they will be invalidated before + * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). + * + * Interruptions must be disabled and the physical entry locked at entry. + */ + +void mapping_clr_mod(struct phys_entry *pp) { /* Clears the change bit of a physical page */ + + hw_clr_mod(pp); /* Go clear the change bit of a physical page */ + return; /* Leave... */ +} + + +/* + * void mapping_set_mod(phys_entry *pp) - set the change bit of a physical page + * + * This routine takes a physical entry and runs through all mappings attached to it and turns + * on the change bit. If there are PTEs associated with the mappings, they will be invalidated before + * the change bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). + * + * Interruptions must be disabled and the physical entry locked at entry. + */ + +void mapping_set_mod(struct phys_entry *pp) { /* Sets the change bit of a physical page */ + + hw_set_mod(pp); /* Go set the change bit of a physical page */ + return; /* Leave... */ +} + + +/* + * void mapping_clr_ref(struct phys_entry *pp) - clears the reference bit of a physical page + * + * This routine takes a physical entry and runs through all mappings attached to it and turns + * off the reference bit. If there are PTEs associated with the mappings, they will be invalidated before + * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). + * + * Interruptions must be disabled at entry. + */ + +void mapping_clr_ref(struct phys_entry *pp) { /* Clears the reference bit of a physical page */ + + mapping *mp; + + debugLog2(13, pp->pte1, 0); /* end remap */ + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Lock the physical entry for this mapping */ + panic("Lock timeout getting lock on physical entry\n"); /* Just die... */ + } + hw_clr_ref(pp); /* Go clear the reference bit of a physical page */ + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock physical entry */ + debugLog2(14, pp->pte1, 0); /* end remap */ + return; /* Leave... */ +} + + +/* + * void mapping_set_ref(phys_entry *pp) - set the reference bit of a physical page + * + * This routine takes a physical entry and runs through all mappings attached to it and turns + * on the reference bit. If there are PTEs associated with the mappings, they will be invalidated before + * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). + * + * Interruptions must be disabled and the physical entry locked at entry. + */ + +void mapping_set_ref(struct phys_entry *pp) { /* Sets the reference bit of a physical page */ + + hw_set_ref(pp); /* Go set the reference bit of a physical page */ + return; /* Leave... */ +} + + +/* + * void mapping_tst_mod(phys_entry *pp) - test the change bit of a physical page + * + * This routine takes a physical entry and runs through all mappings attached to it and tests + * the changed bit. If there are PTEs associated with the mappings, they will be invalidated before + * the changed bit is tested. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). + * + * Interruptions must be disabled and the physical entry locked at entry. + */ + +boolean_t mapping_tst_mod(struct phys_entry *pp) { /* Tests the change bit of a physical page */ + + return(hw_tst_mod(pp)); /* Go test the change bit of a physical page */ +} + + +/* + * void mapping_tst_ref(phys_entry *pp) - tests the reference bit of a physical page + * + * This routine takes a physical entry and runs through all mappings attached to it and tests + * the reference bit. If there are PTEs associated with the mappings, they will be invalidated before + * the reference bit is changed. We don't try to save the PTE. We won't worry about the LRU calculations + * either (I don't think, maybe I'll change my mind later). + * + * Interruptions must be disabled and the physical entry locked at entry. + */ + +boolean_t mapping_tst_ref(struct phys_entry *pp) { /* Tests the reference bit of a physical page */ + + return(hw_tst_ref(pp)); /* Go test the reference bit of a physical page */ +} + + +/* + * void mapping_phys_init(physent, wimg) - fills in the default processor dependent areas of the phys ent + * + * Currently, this sets the default word 1 of the PTE. The only bits set are the WIMG bits + */ + +void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg) { /* Initializes hw specific storage attributes */ + + pp->pte1 = (pa & -PAGE_SIZE) | ((wimg << 3) & 0x00000078); /* Set the WIMG and phys addr in the default PTE1 */ + + return; /* Leave... */ +} + + +/* + * mapping_adjust(void) - Releases free mapping blocks and/or allocates new ones + * + * This routine frees any mapping blocks queued to mapCtl.mapcrel. It also checks + * the number of free mappings remaining, and if below a threshold, replenishes them. + * The list will be replenshed from mapCtl.mapcrel if there are enough. Otherwise, + * a new one is allocated. + * + * This routine allocates and/or memory and must be called from a safe place. + * Currently, vm_pageout_scan is the safest place. We insure that the + */ + +thread_call_t mapping_adjust_call; +static thread_call_data_t mapping_adjust_call_data; + +void mapping_adjust(void) { /* Adjust free mappings */ + + kern_return_t retr; + mappingblok *mb, *mbn; + spl_t s; + int allocsize, i; + extern int vm_page_free_count; + + if(mapCtl.mapcmin <= MAPPERBLOK) { + mapCtl.mapcmin = (mem_size / PAGE_SIZE) / 16; + +#if DEBUG + kprintf("mapping_adjust: minimum entries rqrd = %08X\n", mapCtl.mapcmin); + kprintf("mapping_adjust: free = %08X; in use = %08X; release = %08X\n", + mapCtl.mapcfree, mapCtl.mapcinuse, mapCtl.mapcreln); +#endif + } + + s = splhigh(); /* Don't bother from now on */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_adjust - timeout getting control lock (1)\n"); /* Tell all and die */ + } + + if (mapping_adjust_call == NULL) { + thread_call_setup(&mapping_adjust_call_data, mapping_adjust, NULL); + mapping_adjust_call = &mapping_adjust_call_data; + } + + while(1) { /* Keep going until we've got enough */ + + allocsize = mapCtl.mapcmin - mapCtl.mapcfree; /* Figure out how much we need */ + if(allocsize < 1) break; /* Leave if we have all we need */ + + if((unsigned int)(mbn = mapCtl.mapcrel)) { /* Can we rescue a free one? */ + mapCtl.mapcrel = mbn->nextblok; /* Dequeue it */ + mapCtl.mapcreln--; /* Back off the count */ + allocsize = MAPPERBLOK; /* Show we allocated one block */ + } + else { /* No free ones, try to get it */ + + allocsize = (allocsize + MAPPERBLOK - 1) / MAPPERBLOK; /* Get the number of pages we need */ + if(allocsize > (mapCtl.mapcfree / 2)) allocsize = (mapCtl.mapcfree / 2); /* Don't try for anything that we can't comfortably map */ + + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + + for(; allocsize > 0; allocsize >>= 1) { /* Try allocating in descending halves */ + retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE * allocsize); /* Find a virtual address to use */ + if((retr != KERN_SUCCESS) && (allocsize == 1)) { /* Did we find any memory at all? */ + panic("Whoops... Not a bit of wired memory left for anyone\n"); + } + if(retr == KERN_SUCCESS) break; /* We got some memory, bail out... */ + } + + allocsize = allocsize * MAPPERBLOK; /* Convert pages to number of maps allocated */ + s = splhigh(); /* Don't bother from now on */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_adjust - timeout getting control lock (2)\n"); /* Tell all and die */ + } + } + for(; allocsize > 0; allocsize -= MAPPERBLOK) { /* Release one block at a time */ + mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */ + mbn = (mappingblok *)((unsigned int)mbn + PAGE_SIZE); /* Point to the next slot */ + } + if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) + mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1)); + } + + if(mapCtl.mapcholdoff) { /* Should we hold off this release? */ + mapCtl.mapcrecurse = 0; /* We are done now */ + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + return; /* Return... */ + } + + mbn = mapCtl.mapcrel; /* Get first pending release block */ + mapCtl.mapcrel = 0; /* Dequeue them */ + mapCtl.mapcreln = 0; /* Set count to 0 */ + + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + + while((unsigned int)mbn) { /* Toss 'em all */ + mb = mbn->nextblok; /* Get the next */ + kmem_free(mapping_map, (vm_offset_t) mbn, PAGE_SIZE); /* Release this mapping block */ + mbn = mb; /* Chain to the next */ + } + + __asm__ volatile("sync"); /* Make sure all is well */ + mapCtl.mapcrecurse = 0; /* We are done now */ + return; +} + +/* + * mapping_free(mapping *mp) - release a mapping to the free list + * + * This routine takes a mapping and adds it to the free list. + * If this mapping make the block non-empty, we queue it to the free block list. + * NOTE: we might want to queue it to the end to keep quelch the pathalogical + * case when we get a mapping and free it repeatedly causing the block to chain and unchain. + * If this release fills a block and we are above the threshold, we release the block + */ + +void mapping_free(struct mapping *mp) { /* Release a mapping */ + + mappingblok *mb, *mbn; + spl_t s; + unsigned int full, mindx; + + mindx = ((unsigned int)mp & (PAGE_SIZE - 1)) >> 5; /* Get index to mapping */ + mb = (mappingblok *)((unsigned int)mp & -PAGE_SIZE); /* Point to the mapping block */ + + s = splhigh(); /* Don't bother from now on */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_free - timeout getting control lock\n"); /* Tell all and die */ + } + + full = !(mb->mapblokfree[0] | mb->mapblokfree[1] | mb->mapblokfree[2] | mb->mapblokfree[3]); /* See if full now */ + mb->mapblokfree[mindx >> 5] |= (0x80000000 >> (mindx & 31)); /* Flip on the free bit */ + + if(full) { /* If it was full before this: */ + mb->nextblok = mapCtl.mapcnext; /* Move head of list to us */ + mapCtl.mapcnext = mb; /* Chain us to the head of the list */ + } + + mapCtl.mapcfree++; /* Bump free count */ + mapCtl.mapcinuse--; /* Decriment in use count */ + + mapCtl.mapcfreec++; /* Count total calls */ + + if(mapCtl.mapcfree > mapCtl.mapcmin) { /* Should we consider releasing this? */ + if(((mb->mapblokfree[0] | 0x80000000) & mb->mapblokfree[1] & mb->mapblokfree[2] & mb->mapblokfree[3]) + == 0xFFFFFFFF) { /* See if empty now */ + + if(mapCtl.mapcnext == mb) { /* Are we first on the list? */ + mapCtl.mapcnext = mb->nextblok; /* Unchain us */ + if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* If last, remove last */ + } + else { /* We're not first */ + for(mbn = mapCtl.mapcnext; mbn != 0; mbn = mbn->nextblok) { /* Search for our block */ + if(mbn->nextblok == mb) break; /* Is the next one our's? */ + } + if(!mbn) panic("mapping_free: attempt to release mapping block (%08X) not on list\n", mp); + mbn->nextblok = mb->nextblok; /* Dequeue us */ + if(mapCtl.mapclast == mb) mapCtl.mapclast = mbn; /* If last, make our predecessor last */ + } + + if(mb->mapblokflags & mbPerm) { /* Is this permanently assigned? */ + mb->nextblok = mapCtl.mapcnext; /* Move chain head to us */ + mapCtl.mapcnext = mb; /* Chain us to the head */ + if(!((unsigned int)mb->nextblok)) mapCtl.mapclast = mb; /* If last, make us so */ + } + else { + mapCtl.mapcfree -= MAPPERBLOK; /* Remove the block from the free count */ + mapCtl.mapcreln++; /* Count on release list */ + mb->nextblok = mapCtl.mapcrel; /* Move pointer */ + mapCtl.mapcrel = mb; /* Chain us in front */ + } + } + } + + if(mapCtl.mapcreln > MAPFRTHRSH) { /* Do we have way too many releasable mappings? */ + if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ + thread_call_enter(mapping_adjust_call); /* Go toss some */ + } + } + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + + return; /* Bye, dude... */ +} + + +/* + * mapping_alloc(void) - obtain a mapping from the free list + * + * This routine takes a mapping off of the free list and returns it's address. + * + * We do this by finding a free entry in the first block and allocating it. + * If this allocation empties the block, we remove it from the free list. + * If this allocation drops the total number of free entries below a threshold, + * we allocate a new block. + * + */ + +mapping *mapping_alloc(void) { /* Obtain a mapping */ + + register mapping *mp; + mappingblok *mb, *mbn; + spl_t s; + int mindx; + kern_return_t retr; + + s = splhigh(); /* Don't bother from now on */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ + } + + if(!(mb = mapCtl.mapcnext)) { /* Get the first block entry */ + panic("mapping_alloc - free mappings exhausted\n"); /* Whine and moan */ + } + + if(!(mindx = mapalc(mb))) { /* Allocate a slot */ + panic("mapping_alloc - empty mapping block detected at %08X\n", mb); /* Not allowed to find none */ + } + + if(mindx < 0) { /* Did we just take the last one */ + mindx = -mindx; /* Make positive */ + mapCtl.mapcnext = mb->nextblok; /* Remove us from the list */ + if(!((unsigned int)mapCtl.mapcnext)) mapCtl.mapclast = 0; /* Removed the last one */ + } + + mapCtl.mapcfree--; /* Decrement free count */ + mapCtl.mapcinuse++; /* Bump in use count */ + + mapCtl.mapcallocc++; /* Count total calls */ + +/* + * Note: in the following code, we will attempt to rescue blocks only one at a time. + * Eventually, after a few more mapping_alloc calls, we will catch up. If there are none + * rescueable, we will kick the misc scan who will allocate some for us. We only do this + * if we haven't already done it. + * For early boot, we are set up to only rescue one block at a time. This is because we prime + * the release list with as much as we need until threads start. + */ + if(mapCtl.mapcfree < mapCtl.mapcmin) { /* See if we need to replenish */ + if(mbn = mapCtl.mapcrel) { /* Try to rescue a block from impending doom */ + mapCtl.mapcrel = mbn->nextblok; /* Pop the queue */ + mapCtl.mapcreln--; /* Back off the count */ + mapping_free_init((vm_offset_t)mbn, 0, 1); /* Initialize a non-permanent block */ + } + else { /* We need to replenish */ + if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) { + if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ + thread_call_enter(mapping_adjust_call); /* Go allocate some more */ + } + } + } + } + + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + + mp = &((mapping *)mb)[mindx]; /* Point to the allocated mapping */ + __asm__ volatile("dcbz 0,%0" : : "r" (mp)); /* Clean it up */ + return mp; /* Send it back... */ +} + + +void +consider_mapping_adjust() +{ + spl_t s; + + s = splhigh(); /* Don't bother from now on */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_alloc - timeout getting control lock\n"); /* Tell all and die */ + } + + if (mapCtl.mapcfree < (mapCtl.mapcmin / 4)) { + if(hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ + thread_call_enter(mapping_adjust_call); /* Go allocate some more */ + } + } + + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + +} + + + +/* + * void mapping_free_init(mb, perm) - Adds a block of storage to the free mapping list + * + * The mapping block is a page size area on a page boundary. It contains 1 header and 127 + * mappings. This call adds and initializes a block for use. + * + * The header contains a chain link, bit maps, a virtual to real translation mask, and + * some statistics. Bit maps map each slot on the page (bit 0 is not used because it + * corresponds to the header). The translation mask is the XOR of the virtual and real + * addresses (needless to say, the block must be wired). + * + * We handle these mappings the same way as saveareas: the block is only on the chain so + * long as there are free entries in it. + * + * Empty blocks are garbage collected when there are at least mapCtl.mapcmin pages worth of free + * mappings. Blocks marked PERM won't ever be released. + * + * If perm is negative, the mapping is initialized, but immediately queued to the mapCtl.mapcrel + * list. We do this only at start up time. This is done because we only allocate blocks + * in the pageout scan and it doesn't start up until after we run out of the initial mappings. + * Therefore, we need to preallocate a bunch, but we don't want them to be permanent. If we put + * them on the release queue, the allocate routine will rescue them. Then when the + * pageout scan starts, all extra ones will be released. + * + */ + + +void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked) { + /* Set's start and end of a block of mappings + perm indicates if the block can be released + or goes straight to the release queue . + locked indicates if the lock is held already */ + + mappingblok *mb; + spl_t s; + int i; + unsigned int raddr; + + mb = (mappingblok *)mbl; /* Start of area */ + + + if(perm >= 0) { /* See if we need to initialize the block */ + if(perm) { + raddr = (unsigned int)mbl; /* Perm means V=R */ + mb->mapblokflags = mbPerm; /* Set perm */ + } + else { + raddr = kvtophys(mbl); /* Get real address */ + mb->mapblokflags = 0; /* Set not perm */ + } + + mb->mapblokvrswap = raddr ^ (unsigned int)mbl; /* Form translation mask */ + + mb->mapblokfree[0] = 0x7FFFFFFF; /* Set first 32 (minus 1) free */ + mb->mapblokfree[1] = 0xFFFFFFFF; /* Set next 32 free */ + mb->mapblokfree[2] = 0xFFFFFFFF; /* Set next 32 free */ + mb->mapblokfree[3] = 0xFFFFFFFF; /* Set next 32 free */ + } + + s = splhigh(); /* Don't bother from now on */ + if(!locked) { /* Do we need the lock? */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_free_init - timeout getting control lock\n"); /* Tell all and die */ + } + } + + if(perm < 0) { /* Direct to release queue? */ + mb->nextblok = mapCtl.mapcrel; /* Move forward pointer */ + mapCtl.mapcrel = mb; /* Queue us on in */ + mapCtl.mapcreln++; /* Count the free block */ + } + else { /* Add to the free list */ + + mb->nextblok = 0; /* We always add to the end */ + mapCtl.mapcfree += MAPPERBLOK; /* Bump count */ + + if(!((unsigned int)mapCtl.mapcnext)) { /* First entry on list? */ + mapCtl.mapcnext = mapCtl.mapclast = mb; /* Chain to us */ + } + else { /* We are not the first */ + mapCtl.mapclast->nextblok = mb; /* Point the last to us */ + mapCtl.mapclast = mb; /* We are now last */ + } + } + + if(!locked) { /* Do we need to unlock? */ + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + } + splx(s); /* Restore 'rupts */ + return; /* All done, leave... */ +} + + +/* + * void mapping_prealloc(unsigned int) - Preallocates mapppings for large request + * + * No locks can be held, because we allocate memory here. + * This routine needs a corresponding mapping_relpre call to remove the + * hold off flag so that the adjust routine will free the extra mapping + * blocks on the release list. I don't like this, but I don't know + * how else to do this for now... + * + */ + +void mapping_prealloc(unsigned int size) { /* Preallocates mapppings for large request */ + + int nmapb, i; + kern_return_t retr; + mappingblok *mbn; + spl_t s; + + s = splhigh(); /* Don't bother from now on */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_prealloc - timeout getting control lock\n"); /* Tell all and die */ + } + + nmapb = (size >> 12) + mapCtl.mapcmin; /* Get number of entries needed for this and the minimum */ + + mapCtl.mapcholdoff++; /* Bump the hold off count */ + + if((nmapb = (nmapb - mapCtl.mapcfree)) <= 0) { /* Do we already have enough? */ + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + return; + } + if (!hw_compare_and_store(0, 1, &mapCtl.mapcrecurse)) { /* Make sure we aren't recursing */ + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + return; + } + nmapb = (nmapb + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks to get */ + + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ + + for(i = 0; i < nmapb; i++) { /* Allocate 'em all */ + retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */ + if(retr != KERN_SUCCESS) { /* Did we get some memory? */ + panic("Whoops... Not a bit of wired memory left for anyone\n"); + } + mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize on to the release queue */ + } + if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) + mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1)); + + mapCtl.mapcrecurse = 0; /* We are done now */ +} + +/* + * void mapping_relpre(void) - Releases preallocation release hold off + * + * This routine removes the + * hold off flag so that the adjust routine will free the extra mapping + * blocks on the release list. I don't like this, but I don't know + * how else to do this for now... + * + */ + +void mapping_relpre(void) { /* Releases release hold off */ + + spl_t s; + + s = splhigh(); /* Don't bother from now on */ + if(!hw_lock_to((hw_lock_t)&mapCtl.mapclock, LockTimeOut)) { /* Lock the control header */ + panic("mapping_relpre - timeout getting control lock\n"); /* Tell all and die */ + } + if(--mapCtl.mapcholdoff < 0) { /* Back down the hold off count */ + panic("mapping_relpre: hold-off count went negative\n"); + } + + hw_lock_unlock((hw_lock_t)&mapCtl.mapclock); /* Unlock our stuff */ + splx(s); /* Restore 'rupts */ +} + +/* + * void mapping_free_prime(void) - Primes the mapping block release list + * + * See mapping_free_init. + * No locks can be held, because we allocate memory here. + * One processor running only. + * + */ + +void mapping_free_prime(void) { /* Primes the mapping block release list */ + + int nmapb, i; + kern_return_t retr; + mappingblok *mbn; + vm_offset_t mapping_min; + + retr = kmem_suballoc(kernel_map, &mapping_min, mem_size / 16, + FALSE, TRUE, &mapping_map); + + if (retr != KERN_SUCCESS) + panic("mapping_free_prime: kmem_suballoc failed"); + + + nmapb = (mapCtl.mapcfree + mapCtl.mapcinuse + MAPPERBLOK - 1) / MAPPERBLOK; /* Get permanent allocation */ + nmapb = nmapb * 4; /* Get 4 times our initial allocation */ + +#if DEBUG + kprintf("mapping_free_prime: free = %08X; in use = %08X; priming = %08X\n", + mapCtl.mapcfree, mapCtl.mapcinuse, nmapb); +#endif + + for(i = 0; i < nmapb; i++) { /* Allocate 'em all */ + retr = kmem_alloc_wired(mapping_map, (vm_offset_t *)&mbn, PAGE_SIZE); /* Find a virtual address to use */ + if(retr != KERN_SUCCESS) { /* Did we get some memory? */ + panic("Whoops... Not a bit of wired memory left for anyone\n"); + } + mapping_free_init((vm_offset_t)mbn, -1, 0); /* Initialize onto release queue */ + } + if ((mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1))) > mapCtl.mapcmaxalloc) + mapCtl.mapcmaxalloc = mapCtl.mapcinuse + mapCtl.mapcfree + (mapCtl.mapcreln * (MAPPERBLOK + 1)); +} + + + +mapping_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, + vm_size_t *alloc_size, int *collectable, int *exhaustable) +{ + *count = mapCtl.mapcinuse; + *cur_size = ((PAGE_SIZE / (MAPPERBLOK + 1)) * (mapCtl.mapcinuse + mapCtl.mapcfree)) + (PAGE_SIZE * mapCtl.mapcreln); + *max_size = (PAGE_SIZE / (MAPPERBLOK + 1)) * mapCtl.mapcmaxalloc; + *elem_size = (PAGE_SIZE / (MAPPERBLOK + 1)); + *alloc_size = PAGE_SIZE; + + *collectable = 1; + *exhaustable = 0; +} + + +/* + * vm_offset_t mapping_p2v(pmap_t pmap, phys_entry *pp) - Finds first virtual mapping of a physical page in a space + * + * Gets a lock on the physical entry. Then it searches the list of attached mappings for one with + * the same space. If it finds it, it returns the virtual address. + * + * Note that this will fail if the pmap has nested pmaps in it. Fact is, I'll check + * for it and fail it myself... + */ + +vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp) { /* Finds first virtual mapping of a physical page in a space */ + + spl_t s; + register mapping *mp, *mpv; + vm_offset_t va; + + if(pmap->vflags & pmapAltSeg) return 0; /* If there are nested pmaps, fail immediately */ + + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ + splx(s); /* Restore 'rupts */ + panic("mapping_p2v: timeout getting lock on physent\n"); /* Arrrgghhhh! */ + return(0); /* Should die before here */ + } + + va = 0; /* Assume failure */ + + for(mpv = hw_cpv(pp->phys_link); mpv; mpv = hw_cpv(mpv->next)) { /* Scan 'em all */ + + if(!(((mpv->PTEv >> 7) & 0x000FFFFF) == pmap->space)) continue; /* Skip all the rest if this is not the right space... */ + + va = ((((unsigned int)mpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */ + va = va | ((mpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */ + va = va | ((mpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */ + break; /* We're done now, pass virtual address back */ + } + + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + splx(s); /* Restore 'rupts */ + return(va); /* Return the result or 0... */ +} + +/* + * kvtophys(addr) + * + * Convert a kernel virtual address to a physical address + */ +vm_offset_t kvtophys(vm_offset_t va) { + + register mapping *mp, *mpv; + register blokmap *bmp; + register vm_offset_t pa; + spl_t s; + + s=splhigh(); /* Don't bother from now on */ + mp = hw_lock_phys_vir(PPC_SID_KERNEL, va); /* Find mapping and lock the physical entry for this mapping */ + + if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ + splx(s); /* Restore 'rupts */ + panic("kvtophys: timeout obtaining lock on physical entry (vaddr=%08X)\n", va); /* Scream bloody murder! */ + return 0; + } + + if(!mp) { /* If it was not a normal page */ + pa = hw_cvp_blk(kernel_pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ + splx(s); /* Restore 'rupts */ + return pa; /* Return physical address */ + } + + mpv = hw_cpv(mp); /* Convert to virtual addressing */ + + if(!mpv->physent) { /* Was there a physical entry? */ + pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */ + } + else { + pa = (vm_offset_t)((mpv->physent->pte1 & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Get physical address from physent */ + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + } + + splx(s); /* Restore 'rupts */ + return pa; /* Return the physical address... */ +} + +/* + * phystokv(addr) + * + * Convert a physical address to a kernel virtual address if + * there is a mapping, otherwise return NULL + */ + +vm_offset_t phystokv(vm_offset_t pa) { + + struct phys_entry *pp; + vm_offset_t va; + + pp = pmap_find_physentry(pa); /* Find the physical entry */ + if (PHYS_NULL == pp) { + return (vm_offset_t)NULL; /* If none, return null */ + } + if(!(va=mapping_p2v(kernel_pmap, pp))) { + return 0; /* Can't find it, return 0... */ + } + return (va | (pa & (PAGE_SIZE-1))); /* Build and return VADDR... */ + +} + +/* + * void ignore_zero_fault(boolean_t) - Sets up to ignore or honor any fault on + * page 0 access for the current thread. + * + * If parameter is TRUE, faults are ignored + * If parameter is FALSE, faults are honored + * + */ + +void ignore_zero_fault(boolean_t type) { /* Sets up to ignore or honor any fault on page 0 access for the current thread */ + + if(type) current_act()->mact.specFlags |= ignoreZeroFault; /* Ignore faults on page 0 */ + else current_act()->mact.specFlags &= ~ignoreZeroFault; /* Honor faults on page 0 */ + + return; /* Return the result or 0... */ +} + + +/* + * Allocates a range of virtual addresses in a map as optimally as + * possible for block mapping. The start address is aligned such + * that a minimum number of power-of-two sized/aligned blocks is + * required to cover the entire range. + * + * We also use a mask of valid block sizes to determine optimality. + * + * Note that the passed in pa is not actually mapped to the selected va, + * rather, it is used to figure the optimal boundary. The actual + * V to R mapping is done externally. + * + * This function will return KERN_INVALID_ADDRESS if an optimal address + * can not be found. It is not necessarily a fatal error, the caller may still be + * still be able to do a non-optimal assignment. + */ + +kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, + vm_size_t size, vm_prot_t prot) { + + vm_map_entry_t entry, next, tmp_entry, new_entry; + vm_offset_t start, end, algnpa, endadr, strtadr, curradr; + vm_offset_t boundary; + + unsigned int maxsize, minsize, leading, trailing; + + assert(page_aligned(pa)); + assert(page_aligned(size)); + + if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); /* Dude, like we need a target map */ + + minsize = blokValid ^ (blokValid & (blokValid - 1)); /* Set minimum subblock size */ + maxsize = 0x80000000 >> cntlzw(blokValid); /* Set maximum subblock size */ + + boundary = 0x80000000 >> cntlzw(size); /* Get optimal boundary */ + if(boundary > maxsize) boundary = maxsize; /* Pin this at maximum supported hardware size */ + + vm_map_lock(map); /* No touchee no mapee */ + + for(; boundary > minsize; boundary >>= 1) { /* Try all optimizations until we find one */ + if(!(boundary & blokValid)) continue; /* Skip unavailable block sizes */ + algnpa = (pa + boundary - 1) & -boundary; /* Round physical up */ + leading = algnpa - pa; /* Get leading size */ + + curradr = 0; /* Start low */ + + while(1) { /* Try all possible values for this opt level */ + + curradr = curradr + boundary; /* Get the next optimal address */ + strtadr = curradr - leading; /* Calculate start of optimal range */ + endadr = strtadr + size; /* And now the end */ + + if((curradr < boundary) || /* Did address wrap here? */ + (strtadr > curradr) || /* How about this way? */ + (endadr < strtadr)) break; /* We wrapped, try next lower optimization... */ + + if(strtadr < map->min_offset) continue; /* Jump to the next higher slot... */ + if(endadr > map->max_offset) break; /* No room right now... */ + + if(vm_map_lookup_entry(map, strtadr, &entry)) continue; /* Find slot, continue if allocated... */ + + next = entry->vme_next; /* Get the next entry */ + if((next == vm_map_to_entry(map)) || /* Are we the last entry? */ + (next->vme_start >= endadr)) { /* or do we end before the next entry? */ + + new_entry = vm_map_entry_insert(map, entry, strtadr, endadr, /* Yes, carve out our entry */ + VM_OBJECT_NULL, + 0, /* Offset into object of 0 */ + FALSE, /* No copy needed */ + FALSE, /* Not shared */ + FALSE, /* Not in transition */ + prot, /* Set the protection to requested */ + prot, /* We can't change protection */ + VM_BEHAVIOR_DEFAULT, /* Use default behavior, but makes no never mind, + 'cause we don't page in this area */ + VM_INHERIT_DEFAULT, /* Default inheritance */ + 0); /* Nothing is wired */ + + vm_map_unlock(map); /* Let the world see it all */ + *va = strtadr; /* Tell everyone */ + *bnd = boundary; /* Say what boundary we are aligned to */ + return(KERN_SUCCESS); /* Leave, all is right with the world... */ + } + } + } + + vm_map_unlock(map); /* Couldn't find a slot */ + return(KERN_INVALID_ADDRESS); +} + +/* + * Copies data from a physical page to a virtual page. This is used to + * move data from the kernel to user state. + * + * Note that it is invalid to have a source that spans a page boundry. + * This can block. + * We don't check protection either. + * And we don't handle a block mapped sink address either. + * + */ + +kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size) { + + vm_map_t map; + kern_return_t ret; + unsigned int spaceid; + int left, csize; + vm_offset_t pa; + register mapping *mpv, *mp; + spl_t s; + + if((size == 0) || ((source ^ (source + size - 1)) & -PAGE_SIZE)) return KERN_FAILURE; /* We don't allow a source page crosser */ + map = current_act()->map; /* Get the current map */ + + while(size) { + s=splhigh(); /* Don't bother me */ + + spaceid = map->pmap->pmapSegs[(unsigned int)sink >> 28]; /* Get space ID. Don't bother to clean top bits */ + + mp = hw_lock_phys_vir(spaceid, sink); /* Lock the physical entry for the sink */ + if(!mp) { /* Was it there? */ + splx(s); /* Restore the interrupt level */ + ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in... */ + if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */ + + return KERN_FAILURE; /* Didn't find any, return no good... */ + } + if((unsigned int)mp&1) { /* Did we timeout? */ + panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", sink); /* Yeah, scream about it! */ + splx(s); /* Restore the interrupt level */ + return KERN_FAILURE; /* Bad hair day, return FALSE... */ + } + + mpv = hw_cpv(mp); /* Convert mapping block to virtual */ + + if(mpv->PTEr & 1) { /* Are we write protected? yes, could indicate COW */ + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */ + splx(s); /* Restore the interrupt level */ + ret = vm_fault(map, trunc_page(sink), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* check for a COW area */ + if (ret == KERN_SUCCESS) continue; /* We got it in, try again to find it... */ + return KERN_FAILURE; /* Didn't find any, return no good... */ + } + left = PAGE_SIZE - (sink & PAGE_MASK); /* Get amount left on sink page */ + + csize = size < left ? size : left; /* Set amount to copy this pass */ + + pa = (vm_offset_t)((mpv->physent->pte1 & ~PAGE_MASK) | ((unsigned int)sink & PAGE_MASK)); /* Get physical address of sink */ + + bcopy_phys((char *)source, (char *)pa, csize); /* Do a physical copy */ + + hw_set_mod(mpv->physent); /* Go set the change of the sink */ + + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the sink */ + splx(s); /* Open up for interrupts */ + + sink += csize; /* Move up to start of next page */ + source += csize; /* Move up source */ + size -= csize; /* Set amount for next pass */ + } + return KERN_SUCCESS; +} + + +#if DEBUG +/* + * Dumps out the mapping stuff associated with a virtual address + */ +void dumpaddr(space_t space, vm_offset_t va) { + + mapping *mp, *mpv; + vm_offset_t pa; + spl_t s; + + s=splhigh(); /* Don't bother me */ + + mp = hw_lock_phys_vir(space, va); /* Lock the physical entry for this mapping */ + if(!mp) { /* Did we find one? */ + splx(s); /* Restore the interrupt level */ + printf("dumpaddr: virtual address (%08X) not mapped\n", va); + return; /* Didn't find any, return FALSE... */ + } + if((unsigned int)mp&1) { /* Did we timeout? */ + panic("dumpaddr: timeout locking physical entry for virtual address (%08X)\n", va); /* Yeah, scream about it! */ + splx(s); /* Restore the interrupt level */ + return; /* Bad hair day, return FALSE... */ + } + printf("dumpaddr: space=%08X; vaddr=%08X\n", space, va); /* Say what address were dumping */ + mpv = hw_cpv(mp); /* Get virtual address of mapping */ + dumpmapping(mpv); + if(mpv->physent) { + dumppca(mpv); + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock physical entry associated with mapping */ + } + splx(s); /* Was there something you needed? */ + return; /* Tell them we did it */ +} + + + +/* + * Prints out a mapping control block + * + */ + +void dumpmapping(struct mapping *mp) { /* Dump out a mapping */ + + printf("Dump of mapping block: %08X\n", mp); /* Header */ + printf(" next: %08X\n", mp->next); + printf(" hashnext: %08X\n", mp->hashnext); + printf(" PTEhash: %08X\n", mp->PTEhash); + printf(" PTEent: %08X\n", mp->PTEent); + printf(" physent: %08X\n", mp->physent); + printf(" PTEv: %08X\n", mp->PTEv); + printf(" PTEr: %08X\n", mp->PTEr); + printf(" pmap: %08X\n", mp->pmap); + + if(mp->physent) { /* Print physent if it exists */ + printf("Associated physical entry: %08X %08X\n", mp->physent->phys_link, mp->physent->pte1); + } + else { + printf("Associated physical entry: none\n"); + } + + dumppca(mp); /* Dump out the PCA information */ + + return; +} + +/* + * Prints out a PTEG control area + * + */ + +void dumppca(struct mapping *mp) { /* PCA */ + + PCA *pca; + unsigned int *pteg; + + pca = (PCA *)((unsigned int)mp->PTEhash&-64); /* Back up to the start of the PCA */ + pteg=(unsigned int *)((unsigned int)pca-(((hash_table_base&0x0000FFFF)+1)<<16)); + printf(" Dump of PCA: %08X\n", pca); /* Header */ + printf(" PCAlock: %08X\n", pca->PCAlock); + printf(" PCAallo: %08X\n", pca->flgs.PCAallo); + printf(" PCAhash: %08X %08X %08X %08X\n", pca->PCAhash[0], pca->PCAhash[1], pca->PCAhash[2], pca->PCAhash[3]); + printf(" %08X %08X %08X %08X\n", pca->PCAhash[4], pca->PCAhash[5], pca->PCAhash[6], pca->PCAhash[7]); + printf("Dump of PTEG: %08X\n", pteg); /* Header */ + printf(" %08X %08X %08X %08X\n", pteg[0], pteg[1], pteg[2], pteg[3]); + printf(" %08X %08X %08X %08X\n", pteg[4], pteg[5], pteg[6], pteg[7]); + printf(" %08X %08X %08X %08X\n", pteg[8], pteg[9], pteg[10], pteg[11]); + printf(" %08X %08X %08X %08X\n", pteg[12], pteg[13], pteg[14], pteg[15]); + return; +} + +/* + * Dumps starting with a physical entry + */ + +void dumpphys(struct phys_entry *pp) { /* Dump from physent */ + + mapping *mp; + PCA *pca; + unsigned int *pteg; + + printf("Dump from physical entry %08X: %08X %08X\n", pp, pp->phys_link, pp->pte1); + mp = hw_cpv(pp->phys_link); + while(mp) { + dumpmapping(mp); + dumppca(mp); + mp = hw_cpv(mp->next); + } + + return; +} + +#endif + + +kern_return_t bmapvideo(vm_offset_t *info); +kern_return_t bmapvideo(vm_offset_t *info) { + + extern struct vc_info vinfo; + + (void)copyout((char *)&vinfo, (char *)info, sizeof(struct vc_info)); /* Copy out the video info */ + return KERN_SUCCESS; +} + +kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr); +kern_return_t bmapmap(vm_offset_t va, vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr) { + + pmap_map_block(current_act()->task->map->pmap, va, pa, size, prot, attr, 0); /* Map it in */ + return KERN_SUCCESS; +} + +kern_return_t bmapmapr(vm_offset_t va); +kern_return_t bmapmapr(vm_offset_t va) { + + mapping_remove(current_act()->task->map->pmap, va); /* Remove map */ + return KERN_SUCCESS; +} diff --git a/osfmk/ppc/mappings.h b/osfmk/ppc/mappings.h new file mode 100644 index 000000000..ebe99b036 --- /dev/null +++ b/osfmk/ppc/mappings.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * Header files for the hardware virtual memory mapping stuff + */ +#ifndef _PPC_MAPPINGS_H_ +#define _PPC_MAPPINGS_H_ + +typedef struct PCA { /* PTEG Control Area */ + unsigned int PCAlock; /* PCA lock */ + union flgs { + unsigned int PCAallo; /* Allocation controls */ + struct PCAalflgs { /* Keep these in order!!! */ + unsigned char PCAfree; /* Indicates the slot is free */ + unsigned char PCAauto; /* Indicates that the PTE was autogenned */ + unsigned char PCAslck; /* Indicates that the slot is locked */ + unsigned char PCAsteal; /* Steal scan start position */ + } PCAalflgs; + } flgs; + unsigned int PCAgas[6]; /* Filler to 32 byte boundary */ + unsigned int PCAhash[8]; /* PTEG hash chains */ +} PCA; + +#define MAPFLAGS 0x0000001F +#define BMAP 0x00000001 + +typedef struct mapping { + struct mapping *next; /* MUST BE FIRST - chain off physent */ + struct mapping *hashnext; /* Next mapping in same hash group */ + unsigned int *PTEhash; /* Pointer to the head of the mapping hash list */ + unsigned int *PTEent; /* Pointer to PTE if exists */ + struct phys_entry *physent; /* Quick pointer back to the physical entry */ + unsigned int PTEv; /* Virtual half of HW PTE */ + unsigned int PTEr; /* Real half of HW PTE. This is used ONLY if + there is no physical entry associated + with this mapping, ie.e, physent==0 */ + struct pmap *pmap; /* Quick pointer back to the containing pmap */ +} mapping; + +/* + * This control block maps odd size blocks of memory. The mapping must + * be V=F (Virtual = Fixed), i.e., virtually and physically contiguous + * multiples of hardware size pages. + * + * This control block overlays the mapping CB and is allocated from the + * same pool. + * + * It is expected that only a small number of these exist for each address + * space and will typically be for I/O areas. It is further assumed that + * there is a minimum size (ODDBLKMIN) for these blocks. If smaller, the + * block will be split into N normal page mappings. + * + * Binary tree for fast lookups. + */ + + +typedef struct blokmap { + struct blokmap *next; /* Next block in list */ + unsigned int start; /* Start of block */ + unsigned int end; /* End of block */ + unsigned int PTEr; /* Real half of HW PTE at base address */ + unsigned int space; /* Cached VSID */ + unsigned int blkFlags; /* Flags for this block */ +#define blkPerm 0x80000000 +#define blkPermbit 0 + unsigned int gas3; /* Reserved */ + unsigned int gas4; /* Reserved */ +} blokmap; + +#define ODDBLKMIN (8 * PAGE_SIZE) + +#define MAPPING_NULL ((struct mapping *) 0) + +typedef struct mappingctl { + unsigned int mapclock; /* Mapping allocation lock */ + unsigned int mapcrecurse; /* Mapping allocation recursion control */ + struct mappingblok *mapcnext; /* First mapping block with free entries */ + struct mappingblok *mapclast; /* Last mapping block with free entries */ + struct mappingblok *mapcrel; /* List of deferred block releases */ + unsigned int mapcfree; /* Total free entries on list */ + unsigned int mapcinuse; /* Total entries in use */ + unsigned int mapcreln; /* Total blocks on pending release list */ + int mapcholdoff; /* Hold off clearing release list */ + unsigned int mapcfreec; /* Total calls to mapping free */ + unsigned int mapcallocc; /* Total calls to mapping alloc */ + unsigned int mapcmin; /* Minimum free mappings to keep */ + unsigned int mapcmaxalloc; /* Maximum number of mappings allocated at one time */ + unsigned int mapcgas[3]; /* Pad to 64 bytes */ +} mappingctl; + +#define MAPPERBLOK 127 +#define MAPALTHRSH (4*MAPPERBLOK) +#define MAPFRTHRSH (2 * ((MAPALTHRSH + MAPPERBLOK - 1) / MAPPERBLOK)) +typedef struct mappingblok { + unsigned int mapblokfree[4]; /* Bit map of free mapping entrys */ + unsigned int mapblokvrswap; /* Virtual address XORed with physical address */ + unsigned int mapblokflags; /* Various flags */ +#define mbPerm 0x80000000 /* Block is permanent */ + struct mappingblok *nextblok; /* Pointer to the next mapping block */ +} mappingblok; + +extern mappingctl mapCtl; /* Mapping allocation control */ + +extern void mapping_phys_init(struct phys_entry *pp, unsigned int pa, unsigned int wimg); /* Initializes hw specific storage attributes */ +extern boolean_t mapping_remove(pmap_t pmap, vm_offset_t va); /* Remove a single mapping for this VADDR */ +extern void mapping_free_init(vm_offset_t mbl, int perm, boolean_t locked); /* Sets start and end of a block of mappings */ +extern void mapping_adjust(void); /* Adjust free mapping count */ +extern void mapping_free_prime(void); /* Primes the mapping block release list */ +extern void mapping_prealloc(unsigned int); /* Preallocate mappings for large use */ +extern void mapping_relpre(void); /* Releases preallocate request */ +extern void mapping_init(void); /* Do initial stuff */ +extern mapping *mapping_alloc(void); /* Obtain a mapping */ +extern void mapping_free(struct mapping *mp); /* Release a mapping */ +extern boolean_t mapping_tst_ref(struct phys_entry *pp); /* Tests the reference bit of a physical page */ +extern boolean_t mapping_tst_mod(struct phys_entry *pp); /* Tests the change bit of a physical page */ +extern void mapping_set_ref(struct phys_entry *pp); /* Sets the reference bit of a physical page */ +extern void mapping_clr_ref(struct phys_entry *pp); /* Clears the reference bit of a physical page */ +extern void mapping_set_mod(struct phys_entry *pp); /* Sets the change bit of a physical page */ +extern void mapping_clr_mod(struct phys_entry *pp); /* Clears the change bit of a physical page */ +extern void mapping_invall(struct phys_entry *pp); /* Clear all PTEs pointing to a physical page */ +extern void mapping_protect_phys(struct phys_entry *pp, vm_prot_t prot, boolean_t locked); /* Change protection of all mappings to page */ +extern void mapping_protect(pmap_t pmap, vm_offset_t vaddr, vm_prot_t prot); /* Change protection of a single mapping to page */ +extern mapping *mapping_make(pmap_t pmap, struct phys_entry *pp, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, int attr, boolean_t locked); /* Make an address mapping */ +extern void mapping_purge(struct phys_entry *pp); /* Remove all mappings for this physent */ +extern vm_offset_t mapping_p2v(pmap_t pmap, struct phys_entry *pp); /* Finds first virtual mapping of a physical page in a space */ +extern void mapping_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg); /* Sets the default physical page attributes */ +extern void mapping_block_map_opt(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_offset_t bnd, vm_size_t size, vm_prot_t prot, int attr); /* Map a block optimally */ +extern int mapalc(struct mappingblok *mb); /* Finds and allcates a mapping entry */ +extern void ignore_zero_fault(boolean_t type); /* Sets up to ignore or honor any fault on page 0 access for the current thread */ + + +extern mapping *hw_lock_phys_vir(space_t space, vm_offset_t va); /* Finds and locks a physical entry by vaddr */ +extern mapping *hw_cpv(struct mapping *mapping); /* Converts a physical mapping control block address to virtual */ +extern mapping *hw_cvp(struct mapping *mapping); /* Converts a virtual mapping control block address to physical */ +extern void hw_rem_map(struct mapping *mapping); /* Remove a mapping from the system */ +extern void hw_add_map(struct mapping *mp, space_t space, vm_offset_t va); /* Add a mapping to the PTEG hash list */ +extern blokmap *hw_rem_blk(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); /* Remove a block that falls within a range */ +extern vm_offset_t hw_cvp_blk(pmap_t pmap, vm_offset_t va); /* Convert mapped block virtual to physical */ +extern blokmap *hw_add_blk(pmap_t pmap, struct blokmap *bmr); /* Add a block to the pmap */ +extern void hw_prot(struct phys_entry *pp, vm_prot_t prot); /* Change the protection of a physical page */ +extern void hw_prot_virt(struct mapping *mp, vm_prot_t prot); /* Change the protection of a virtual page */ +extern void hw_attr_virt(struct mapping *mp, unsigned int wimg); /* Change the attributes of a virtual page */ +extern void hw_phys_attr(struct phys_entry *pp, vm_prot_t prot, unsigned int wimg); /* Sets the default physical page attributes */ +extern unsigned int hw_test_rc(struct mapping *mp, boolean_t reset); /* Test and optionally reset the RC bit of specific mapping */ + +extern boolean_t hw_tst_mod(struct phys_entry *pp); /* Tests change bit */ +extern void hw_set_mod(struct phys_entry *pp); /* Set change bit */ +extern void hw_clr_mod(struct phys_entry *pp); /* Clear change bit */ + +extern boolean_t hw_tst_ref(struct phys_entry *pp); /* Tests reference bit */ +extern void hw_set_ref(struct phys_entry *pp); /* Set reference bit */ +extern void hw_clr_ref(struct phys_entry *pp); /* Clear reference bit */ + +extern void hw_inv_all(struct phys_entry *pp); /* Invalidate all PTEs associated with page */ +extern void hw_set_user_space(pmap_t pmap); /* Indicate we need a space switch */ +extern void hw_set_user_space_dis(pmap_t pmap); /* Indicate we need a space switch (already disabled) */ +kern_return_t copyp2v(vm_offset_t source, vm_offset_t sink, unsigned int size); /* Copy a physical page to a virtual address */ +extern void *LRA(space_t space, void *vaddr); /* Translate virtual to real using only HW tables */ +extern void dumpaddr(space_t space, vm_offset_t va); +extern void dumpmapping(struct mapping *mp); /* Print contents of a mapping */ +extern void dumppca(struct mapping *mp); /* Print contents of a PCA */ +extern void dumpphys(struct phys_entry *pp); /* Prints stuff starting at phys */ + +extern unsigned int mappingdeb0; /* (TEST/DEBUG) */ +extern unsigned int incrVSID; /* VSID increment value */ + +#endif /* _PPC_MAPPINGS_H_ */ + diff --git a/osfmk/ppc/mem.c b/osfmk/ppc/mem.c new file mode 100644 index 000000000..5d2a9325f --- /dev/null +++ b/osfmk/ppc/mem.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* A marvelous selection of support routines for virtual memory */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include /* For pmap_pteg_overflow */ + +/* These refer to physical addresses and are set and referenced elsewhere */ + +unsigned int hash_table_base; +unsigned int hash_table_size; + +unsigned int hash_function_mask; + +struct shadowBAT shadow_BAT; + +/* gather statistics about hash table usage */ + +#if DEBUG +#define MEM_STATS 1 +#else +#define MEM_STATS 0 +#endif /* DEBUG */ + +#if MEM_STATS +/* hash table usage information */ +struct hash_table_stats { + int find_pte_in_pteg_calls; + int find_pte_in_pteg_not_found; + int find_pte_in_pteg_location[8]; + struct find_or_alloc_calls { + int found_primary; + int found_secondary; + int alloc_primary; + int alloc_secondary; + int overflow; + int not_found; + } find_or_alloc_calls[2]; + +} hash_table_stats[NCPUS]; + +#define INC_STAT(LOC) \ + hash_table_stats[cpu_number()].find_pte_in_pteg_location[LOC]++ + +#else /* MEM_STATS */ +#define INC_STAT(LOC) +#endif /* MEM_STATS */ + +/* Set up the machine registers for the given hash table. + * The table has already been zeroed. + */ +void hash_table_init(unsigned int base, unsigned int size) +{ + sync(); /* SYNC: it's not just the law, it's a good idea... */ + mtsdr1(hash_table_base | ((size-1)>>16)); /* Slam the SDR1 with the has table address */ + sync(); /* SYNC: it's not just the law, it's a good idea... */ + isync(); +} + diff --git a/osfmk/ppc/mem.h b/osfmk/ppc/mem.h new file mode 100644 index 000000000..ef9169ac8 --- /dev/null +++ b/osfmk/ppc/mem.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_MEM_H_ +#define _PPC_MEM_H_ + +#include +#include + +#include +#include +#include +#include + +extern vm_offset_t hash_table_base; +extern unsigned int hash_table_size; + +void hash_table_init(vm_offset_t base, vm_offset_t size); + +#define MAX_BAT 4 + +typedef struct ppcBAT { + unsigned int upper; /* Upper half of BAT */ + unsigned int lower; /* Lower half of BAT */ +} ppcBAT; + +struct shadowBAT { + ppcBAT IBATs[MAX_BAT]; /* Instruction BATs */ + ppcBAT DBATs[MAX_BAT]; /* Data BAT */ +}; + +extern struct shadowBAT shadow_BAT; + +#endif /* _PPC_MEM_H_ */ diff --git a/osfmk/ppc/misc.c b/osfmk/ppc/misc.c new file mode 100644 index 000000000..8caaf5d6c --- /dev/null +++ b/osfmk/ppc/misc.c @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * copyin/out_multiple - the assembler copyin/out functions jump to C for + * help when the copyin lies over a segment boundary. The C breaks + * down the copy into two sub-copies and re-calls the assembler with + * these sub-copies. Very rare occurrance. Warning: These functions are + * called whilst active_thread->thread_recover is still set. + */ + +extern boolean_t copyin_multiple(const char *src, + char *dst, + vm_size_t count); + +boolean_t copyin_multiple(const char *src, + char *dst, + vm_size_t count) +{ + const char *midpoint; + vm_size_t first_count; + boolean_t first_result; + + /* Assert that we've been called because of a segment boundary, + * this function is more expensive than the assembler, and should + * only be called in this difficult case. + */ + assert(((vm_offset_t)src & 0xF0000000) != + ((vm_offset_t)(src + count -1) & 0xF0000000)); + + /* TODO NMGS define sensible constants for segments, and apply + * to C and assembler (assembler is much harder) + */ + midpoint = (const char*) ((vm_offset_t)(src + count) & 0xF0000000); + first_count = (midpoint - src); + + first_result = copyin(src, dst, first_count); + + /* If there was an error, stop now and return error */ + if (first_result != 0) + return first_result; + + /* otherwise finish the job and return result */ + return copyin(midpoint, dst + first_count, count-first_count); +} + +extern int copyout_multiple(const char *src, char *dst, vm_size_t count); + +int copyout_multiple(const char *src, char *dst, vm_size_t count) +{ + char *midpoint; + vm_size_t first_count; + boolean_t first_result; + + /* Assert that we've been called because of a segment boundary, + * this function is more expensive than the assembler, and should + * only be called in this difficult case. For copyout, the + * segment boundary is on the dst + */ + assert(((vm_offset_t)dst & 0xF0000000) != + ((vm_offset_t)(dst + count - 1) & 0xF0000000)); + + /* TODO NMGS define sensible constants for segments, and apply + * to C and assembler (assembler is much harder) + */ + midpoint = (char *) ((vm_offset_t)(dst + count) & 0xF0000000); + first_count = (midpoint - dst); + + first_result = copyout(src, dst, first_count); + + /* If there was an error, stop now and return error */ + if (first_result != 0) + return first_result; + + /* otherwise finish the job and return result */ + + return copyout(src + first_count, midpoint, count-first_count); +} + +int bcmp( + const char *a, + const char *b, + vm_size_t len) +{ + if (len == 0) + return 0; + + do + if (*a++ != *b++) + break; + while (--len); + + return len; +} + +#if DEBUG +void regDump(struct ppc_saved_state *state) +{ + int i; + + for (i=0; i<32; i++) { + if ((i % 8) == 0) + kprintf("\n%4d :",i); + kprintf(" %08x",*(&state->r0+i)); + } + + kprintf("\n"); + kprintf("cr = 0x%08x\t\t",state->cr); + kprintf("xer = 0x%08x\n",state->xer); + kprintf("lr = 0x%08x\t\t",state->lr); + kprintf("ctr = 0x%08x\n",state->ctr); + kprintf("srr0(iar) = 0x%08x\t\t",state->srr0); + kprintf("srr1(msr) = 0x%08B\n",state->srr1, + "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18" + "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT"); + kprintf("mq = 0x%08x\t\t",state->mq); + kprintf("sr_copyin = 0x%08x\n",state->sr_copyin); + kprintf("\n"); + + /* Be nice - for user tasks, generate some stack trace */ + if (state->srr1 & MASK(MSR_PR)) { + char *addr = (char*)state->r1; + unsigned int buf[2]; + for (i = 0; i < 8; i++) { + if (addr == (char*)NULL) + break; + if (!copyin(addr,(char*)buf, 2 * sizeof(int))) { + printf("0x%08x : %08x\n",buf[0],buf[1]); + addr = (char*)buf[0]; + } else { + break; + } + } + } +} +#endif /* DEBUG */ + +#ifdef 0 +/* + * invalidate_cache_for_io + * + * Takes cache of those requests which may require to flush the + * data cache first before invalidation. + */ + + +void +invalidate_cache_for_io(vm_offset_t area, unsigned count, boolean_t phys) +{ + vm_offset_t aligned_start, aligned_end, end; + + /* For unaligned reads we need to flush any + * unaligned cache lines. We invalidate the + * rest as this is faster + */ + + aligned_start = area & ~(CACHE_LINE_SIZE-1); + if (aligned_start != area) + flush_dcache(aligned_start, CACHE_LINE_SIZE, phys); + + end = area + count; + aligned_end = (end & ~(CACHE_LINE_SIZE-1)); + if (aligned_end != end) + flush_dcache(aligned_end, CACHE_LINE_SIZE, phys); + + invalidate_dcache(area, count, phys); +} + +extern void tracecopyin(unsigned int src, unsigned int dest, unsigned int lgn, unsigned int from); +void tracecopyin(unsigned int src, unsigned int dest, unsigned int lgn, unsigned int from) { + + spl_t spl; + + spl = splhigh(); + printf("Copy in called from %08X: src=%08X; dest=%08X; lgn=%08X\n", from, src, dest, lgn); + splx(spl); + return; +} + +extern void tracecopyout(unsigned int src, unsigned int dest, unsigned int lgn, unsigned int from); +void tracecopyout(unsigned int src, unsigned int dest, unsigned int lgn, unsigned int from) { + + spl_t spl; + + spl = splhigh(); + printf("Copy out called from %08X: src=%08X; dest=%08X; lgn=%08X\n", from, src, dest, lgn); + splx(spl); + return; +} + +extern void tracecopystr(unsigned int src, unsigned int dest, unsigned int max, + unsigned int lgn, unsigned int from); +void tracecopystr(unsigned int src, unsigned int dest, unsigned int max, + unsigned int lgn, unsigned int from) { + + spl_t spl; + + spl = splhigh(); + printf("Copy in string called from %08X: src=%08X; dest=%08X; max=%08X; lgnadr=%08X\n", + from, src, dest, max, lgn); + splx(spl); + return; +} + +unsigned int ExceptionTrace = 0; +extern void ExceptionTracePrint(struct savearea *sv, int type); +void ExceptionTracePrint(struct savearea *sv, int type) { + + spl_t spl; + + spl = splhigh(); + + if(type) { + printf(" Trap from %08X, type=%08X, R0=%08X, R1=%08X, R3=%08X, LR=%08X, AST=%08X\n", + sv->save_srr0, sv->save_exception, sv->save_r0, sv->save_r1, sv->save_r3, + sv->save_lr, need_ast[0]); + } + else { + printf("Syscall from %08X, type=%08X, R0=%08X, R1=%08X, R3=%08X, LR=%08X, AST=%08X\n", + sv->save_srr0, sv->save_exception, sv->save_r0, sv->save_r1, sv->save_r3, + sv->save_lr, need_ast[0]); + } + splx(spl); + return; +} +#endif diff --git a/osfmk/ppc/misc_asm.s b/osfmk/ppc/misc_asm.s new file mode 100644 index 000000000..fecfd70b6 --- /dev/null +++ b/osfmk/ppc/misc_asm.s @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * vm_offset_t getrpc(void) - Return address of the function + * that called the current function + */ + +/* By using this function, we force the caller to save its LR in a known + * location, which we can pick up and return. See PowerPC ELF specs. + */ +ENTRY(getrpc, TAG_NO_FRAME_USED) + lwz ARG0, FM_BACKPTR(r1) /* Load our backchain ptr */ + lwz ARG0, FM_LR_SAVE(ARG0) /* Load previously saved LR */ + blr /* And return */ + + +/* Mask and unmask interrupts at the processor level */ +ENTRY(interrupt_disable, TAG_NO_FRAME_USED) + mfmsr r0 + rlwinm r0, r0, 0, MSR_EE_BIT+1, MSR_EE_BIT-1 + mtmsr r0 + blr + +ENTRY(interrupt_enable, TAG_NO_FRAME_USED) + + mfmsr r0 + ori r0, r0, MASK(MSR_EE) + mtmsr r0 + blr + +#if MACH_KDB +/* + * Kernel debugger versions of the spl*() functions. This allows breakpoints + * in the spl*() functions. + */ + +/* Mask and unmask interrupts at the processor level */ +ENTRY(db_interrupt_disable, TAG_NO_FRAME_USED) + mfmsr r0 + rlwinm r0, r0, 0, MSR_EE_BIT+1, MSR_EE_BIT-1 + mtmsr r0 + blr + +ENTRY(db_interrupt_enable, TAG_NO_FRAME_USED) + mfmsr r0 + ori r0, r0, MASK(MSR_EE) + mtmsr r0 + blr +#endif /* MACH_KDB */ + +/* + * General entry for all debuggers. This gets us onto the debug stack and + * then back off at exit. We need to pass back R3 to caller. + */ + +ENTRY(Call_Debugger, TAG_NO_FRAME_USED) + + mfmsr r7 ; Get the current MSR + mflr r0 ; Save the return + rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions + mtmsr r7 ; Do it + mfsprg r8,0 ; Get the per_proc block + stw r0,FM_LR_SAVE(r1) ; Save return on current stack + + lwz r9,PP_DEBSTACKPTR(r8) ; Get the debug stack + cmpwi r9,0 ; Are we already on it? + bne cdNewDeb ; No... + + mr r9,r1 ; We are already on the stack, so use the current value + subi r9,r9,FM_REDZONE+FM_SIZE ; Carve some extra space here + +cdNewDeb: li r0,0 ; Clear this out + stw r1,FM_ARG0(r9) ; Save the old stack pointer as if it were the first arg + + stw r0,PP_DEBSTACKPTR(r8) ; Mark debug stack as busy + + subi r1,r9,FM_SIZE ; Carve a new frame + stw r0,FM_BACKPTR(r1) ; Chain back + + bl EXT(Call_DebuggerC) ; Call the "C" phase of this + + mfmsr r0 ; Get the MSR just in case it was enabled + addi r1,r1,FM_SIZE ; Pop off first stack frame + rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Turn off interruptions enable bit + mtmsr r0 + + mfsprg r8,0 ; Get the per_proc block address + + lwz r9,PP_DEBSTACK_TOP_SS(r8) ; Get the top of the stack + cmplw r1,r9 ; Have we hit the bottom of the debug stack? + lwz r1,FM_ARG0(r1) ; Get previous stack frame + lwz r0,FM_LR_SAVE(r1) ; Get return address + mtlr r0 ; Set the return point + bnelr ; Return if still on debug stack + + stw r9,PP_DEBSTACKPTR(r8) ; Mark debug stack as free + blr + + +/* The following routines are for C-support. They are usually + * inlined into the C using the specifications in proc_reg.h, + * but if optimisation is switched off, the inlining doesn't work + */ + +ENTRY(get_got, TAG_NO_FRAME_USED) + mr ARG0, r2 + blr + +ENTRY(mflr, TAG_NO_FRAME_USED) + mflr ARG0 + blr + +ENTRY(mfpvr, TAG_NO_FRAME_USED) + mfpvr ARG0 + blr + +ENTRY(mtmsr, TAG_NO_FRAME_USED) + mtmsr ARG0 + isync + blr + +ENTRY(mfmsr, TAG_NO_FRAME_USED) + mfmsr ARG0 + blr + +ENTRY(mtsrin, TAG_NO_FRAME_USED) + isync + mtsrin ARG0, ARG1 + isync + blr + +ENTRY(mfsrin, TAG_NO_FRAME_USED) + mfsrin ARG0, ARG0 + blr + +ENTRY(mtsdr1, TAG_NO_FRAME_USED) + mtsdr1 ARG0 + blr + +ENTRY(mtdar, TAG_NO_FRAME_USED) + mtdar ARG0 + blr + +ENTRY(mfdar, TAG_NO_FRAME_USED) + mfdar ARG0 + blr + +ENTRY(mtdec, TAG_NO_FRAME_USED) + mtdec ARG0 + blr + +/* Decrementer frequency and realtime|timebase processor registers + * are different between ppc601 and ppc603/4, we define them all. + */ + +ENTRY(isync_mfdec, TAG_NO_FRAME_USED) + isync + mfdec ARG0 + blr + + +ENTRY(mftb, TAG_NO_FRAME_USED) + mftb ARG0 + blr + +ENTRY(mftbu, TAG_NO_FRAME_USED) + mftbu ARG0 + blr + +ENTRY(mfrtcl, TAG_NO_FRAME_USED) + mfspr ARG0, 5 + blr + +ENTRY(mfrtcu, TAG_NO_FRAME_USED) + mfspr ARG0, 4 + blr + +ENTRY(tlbie, TAG_NO_FRAME_USED) + tlbie ARG0 + blr + + +/* + * Performance Monitor Register Support + */ + +ENTRY(mfmmcr0, TAG_NO_FRAME_USED) + mfspr r3,mmcr0 + blr + +ENTRY(mtmmcr0, TAG_NO_FRAME_USED) + mtspr mmcr0,r3 + blr + +ENTRY(mfmmcr1, TAG_NO_FRAME_USED) + mfspr r3,mmcr1 + blr + +ENTRY(mtmmcr1, TAG_NO_FRAME_USED) + mtspr mmcr1,r3 + blr + +ENTRY(mfmmcr2, TAG_NO_FRAME_USED) + mfspr r3,mmcr2 + blr + +ENTRY(mtmmcr2, TAG_NO_FRAME_USED) + mtspr mmcr2,r3 + blr + +ENTRY(mfpmc1, TAG_NO_FRAME_USED) + mfspr r3,pmc1 + blr + +ENTRY(mtpmc1, TAG_NO_FRAME_USED) + mtspr pmc1,r3 + blr + +ENTRY(mfpmc2, TAG_NO_FRAME_USED) + mfspr r3,pmc2 + blr + +ENTRY(mtpmc2, TAG_NO_FRAME_USED) + mtspr pmc2,r3 + blr + +ENTRY(mfpmc3, TAG_NO_FRAME_USED) + mfspr r3,pmc3 + blr + +ENTRY(mtpmc3, TAG_NO_FRAME_USED) + mtspr pmc3,r3 + blr + +ENTRY(mfpmc4, TAG_NO_FRAME_USED) + mfspr r3,pmc4 + blr + +ENTRY(mtpmc4, TAG_NO_FRAME_USED) + mtspr pmc4,r3 + blr + +ENTRY(mfsia, TAG_NO_FRAME_USED) + mfspr r3,sia + blr + +ENTRY(mfsda, TAG_NO_FRAME_USED) + mfspr r3,sda + blr + diff --git a/osfmk/ppc/misc_protos.h b/osfmk/ppc/misc_protos.h new file mode 100644 index 000000000..30b1612e8 --- /dev/null +++ b/osfmk/ppc/misc_protos.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_MISC_PROTOS_H_ +#define _PPC_MISC_PROTOS_H_ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +extern int strcmp(const char *s1, const char *s2); +extern int strncmp(const char *s1, const char *s2, unsigned long n); +extern char *strcat(char *dest, const char *src); +extern char *strcpy(char *dest, const char *src); + +extern void vprintf(const char *fmt, va_list args); +extern void printf(const char *fmt, ...); + +extern void bcopy_nc(char *from, char *to, int size); /* uncached-safe */ +extern void bcopy_phys(char *from, char *to, int size); /* Physical to physical copy (ints must be disabled) */ + +extern void ppc_init(boot_args *args); +extern struct ppc_saved_state *enterDebugger(unsigned int trap, + struct ppc_saved_state *state, + unsigned int dsisr); + +extern void ppc_vm_init(unsigned int mem_size, boot_args *args); +extern void regDump(struct ppc_saved_state *state); + +extern void autoconf(void); +extern void machine_init(void); +extern void machine_conf(void); +extern void probeio(void); +extern int cons_find(boolean_t); +extern void machine_startup(boot_args *args); + +extern void interrupt_init(void); +extern void interrupt_enable(void); +extern void interrupt_disable(void); +extern void disable_bluebox_internal(thread_act_t act); +#if MACH_KDB +extern void db_interrupt_enable(void); +extern void db_interrupt_disable(void); +#endif /* MACH_KDB */ +extern void amic_init(void); + +extern void phys_zero(vm_offset_t, vm_size_t); +extern void phys_copy(vm_offset_t, vm_offset_t, vm_size_t); + +extern void Load_context(thread_t th); + +extern struct thread_shuttle *Switch_context(struct thread_shuttle *old, + void (*cont)(void), + struct thread_shuttle *new); + +extern int nsec_to_processor_clock_ticks(int nsec); + +extern void tick_delay(int ticks); + +#ifdef DEBUG +#define DPRINTF(x) { printf("%s : ",__FUNCTION__);printf x; } +#endif /* DEBUG */ + +#if MACH_ASSERT +extern void dump_pcb(pcb_t pcb); +extern void dump_thread(thread_t th); +#endif + +#if NCPUS > 1 +extern void mp_probe_cpus(void); +#if MACH_KDB +extern void remote_kdb(void); +extern void clear_kdb_intr(void); +extern void kdb_console(void); +#endif /* MACH_KDB */ +#endif /* NCPUS > 1 */ + +#endif /* _PPC_MISC_PROTOS_H_ */ diff --git a/osfmk/ppc/model_dep.c b/osfmk/ppc/model_dep.c new file mode 100644 index 000000000..970e9b156 --- /dev/null +++ b/osfmk/ppc/model_dep.c @@ -0,0 +1,645 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * (c) Copyright 1988 HEWLETT-PACKARD COMPANY + * + * To anyone who acknowledges that this file is provided "AS IS" + * without any express or implied warranty: + * permission to use, copy, modify, and distribute this file + * for any purpose is hereby granted without fee, provided that + * the above copyright notice and this notice appears in all + * copies, and that the name of Hewlett-Packard Company not be + * used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * Hewlett-Packard Company makes no representations about the + * suitability of this software for any purpose. + */ +/* + * Copyright (c) 1990,1991,1992,1994 The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS" + * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES + * WHATSOEVER RESULTING FROM ITS USE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Utah $Hdr: model_dep.c 1.34 94/12/14$ + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include /* for btop */ + +#if MACH_KDB +#include +#include +#include +#include + +extern struct db_command ppc_db_commands[]; +#endif /* MACH_KDB */ + +char kernel_args_buf[256] = "/mach_kernel"; +char boot_args_buf[256] = "/mach_servers/bootstrap"; +char env_buf[256]; + +#define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3"); +#define TRAP_DEBUGGER_INST 0x7c831808 +#define TRAP_DIRECT __asm__ volatile("tw 4,r4,r4"); +#define TRAP_DIRECT_INST 0x7c842008 +#define TRAP_INST_SIZE 4 +#define BREAK_TO_KDP0 0x7fe00008 +#define BREAK_TO_KDP1 0x7c800008 +#define BREAK_TO_KDB0 0x7c810808 + +/* + * Code used to synchronize debuggers among all cpus, one active at a time, switch + * from on to another using kdb_on! #cpu or cpu #cpu + */ + +decl_simple_lock_data(, debugger_lock) /* debugger lock */ + +int debugger_cpu = -1; /* current cpu running debugger */ +int debugger_debug = 0; /* Debug debugger */ +int debugger_is_slave[NCPUS]; /* Show that we were entered via sigp */ +int debugger_active[NCPUS]; /* Debugger active on CPU */ +int debugger_pending[NCPUS]; /* Debugger entry pending on CPU (this is a HACK) */ +int debugger_holdoff[NCPUS]; /* Holdoff debugger entry on this CPU (this is a HACK) */ +int db_run_mode; /* Debugger run mode */ +unsigned int debugger_sync = 0; /* Cross processor debugger entry sync */ +extern unsigned int NMIss; /* NMI debounce switch */ + +unsigned int lastTrace; /* Value of low-level exception trace controls */ + +volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus holding + breakpoints (ie: cpus that did not + insert back breakpoints) */ +void unlock_debugger(void); +void lock_debugger(void); + +#if !MACH_KDB +boolean_t db_breakpoints_inserted = TRUE; +jmp_buf_t *db_recover = 0; +#endif + +#if MACH_KDB +#include +int kdb_flag=0; +extern boolean_t db_breakpoints_inserted; +extern jmp_buf_t *db_recover; +#define KDB_READY 0x1 +#endif + +#if MACH_KDP +extern int kdp_flag; +#define KDP_READY 0x1 +#endif + +boolean_t db_im_stepping = 0xFFFFFFFF; /* Remember if we were stepping */ + +extern const char version[]; + +#if !MACH_KDB +void kdb_trap(int type, struct ppc_saved_state *regs); +void kdb_trap(int type, struct ppc_saved_state *regs) { + return; +} +#endif + +#if !MACH_KDP +void kdp_trap(int type, struct ppc_saved_state *regs); +void kdp_trap(int type, struct ppc_saved_state *regs) { + return; +} +#endif + +void +machine_startup(boot_args *args) +{ + int boot_arg; + + if (PE_parse_boot_arg("cpus", &wncpu)) { + if (!((wncpu > 0) && (wncpu < NCPUS))) + wncpu = NCPUS; + } else + wncpu = NCPUS; + + if( PE_get_hotkey( kPEControlKey )) + halt_in_debugger = halt_in_debugger ? 0 : 1; + + if (PE_parse_boot_arg("debug", &boot_arg)) { + if (boot_arg & DB_HALT) halt_in_debugger=1; + if (boot_arg & DB_PRT) disableDebugOuput=FALSE; + if (boot_arg & DB_SLOG) systemLogDiags=TRUE; + } + + hw_lock_init(&debugger_lock); /* initialized debugger lock */ + +#if MACH_KDB + /* + * Initialize KDB + */ +#if DB_MACHINE_COMMANDS + db_machine_commands_install(ppc_db_commands); +#endif /* DB_MACHINE_COMMANDS */ + ddb_init(); + + if (boot_arg & DB_KDB) + current_debugger = KDB_CUR_DB; + + /* + * Cause a breakpoint trap to the debugger before proceeding + * any further if the proper option bit was specified in + * the boot flags. + */ + if (halt_in_debugger && (current_debugger == KDB_CUR_DB)) { + Debugger("inline call to debugger(machine_startup)"); + halt_in_debugger = 0; + active_debugger =1; + } +#endif /* MACH_KDB */ + if (PE_parse_boot_arg("preempt", &boot_arg)) { + extern int default_preemption_rate; + + default_preemption_rate = boot_arg; + } + if (PE_parse_boot_arg("kpreempt", &boot_arg)) { + extern int kernel_preemption_mode; + extern boolean_t zone_gc_allowed; + + kernel_preemption_mode = boot_arg; + zone_gc_allowed = FALSE; /* XXX: TO BE REMOVED */ + } + + machine_conf(); + + ml_thrm_init(); /* Start thermal monitoring on this processor */ + + /* + * Start the system. + */ + setup_main(); + + /* Should never return */ +} + +char * +machine_boot_info( + char *buf, + vm_size_t size) +{ + return(PE_boot_args()); +} + +void +machine_conf(void) +{ + machine_info.max_cpus = NCPUS; + machine_info.avail_cpus = 1; + machine_info.memory_size = mem_size; +} + +void +machine_init(void) +{ + clock_config(); +} + +void slave_machine_init(void) +{ + (void) ml_set_interrupts_enabled(FALSE); /* Make sure we are disabled */ + clock_init(); /* Init the clock */ + cpu_machine_init(); /* Initialize the processor */ +} + +void +halt_all_cpus(boolean_t reboot) +{ + if(reboot) + { + printf("MACH Reboot\n"); + PEHaltRestart(kPERestartCPU); + } + else + { + printf("CPU halted\n"); + PEHaltRestart(kPEHaltCPU); + } + while(1); +} + +void +halt_cpu(void) +{ + halt_all_cpus(FALSE); +} + +#if MACH_ASSERT +/* + * Machine-dependent routine to fill in an array with up to callstack_max + * levels of return pc information. + */ +void machine_callstack( + natural_t *buf, + vm_size_t callstack_max) +{ +} +#endif /* MACH_ASSERT */ + + +void +print_backtrace(struct ppc_saved_state *ssp) +{ + unsigned int *stackptr, *raddr, *rstack, trans; + int i, frames_cnt, skip_top_frames, frames_max; + unsigned int store[8]; /* Buffer for real storage reads */ + vm_offset_t backtrace_entries[32]; + + printf("backtrace: "); + frames_cnt =0; + + /* Get our stackpointer for backtrace */ + if (ssp==NULL) { + __asm__ volatile("mr %0, r1" : "=r" (stackptr)); + skip_top_frames = 1; + } else { + stackptr = (unsigned int *)(ssp->r1); + skip_top_frames = 0; + backtrace_entries[frames_cnt] = ssp->srr0; + frames_cnt++; + printf("0x%08x ", ssp->srr0); + } + + frames_max = 32-frames_cnt; + for (i = 0; i < frames_max; i++) { + + if(!stackptr) break; /* No more to get... */ + + /* Avoid causing page fault */ + if (!(raddr = LRA(PPC_SID_KERNEL, (void *)((unsigned int)stackptr+FM_LR_SAVE)))) + break; + ReadReal((unsigned int)raddr, &store[0]); + if (skip_top_frames) + skip_top_frames--; + else { + backtrace_entries[frames_cnt] = store[0]; + frames_cnt++; + printf("0x%08x ",store[0]); + } + if (!(raddr = LRA(PPC_SID_KERNEL, (void *)stackptr))) + break; + ReadReal((unsigned int)raddr, &store[0]); + stackptr=(unsigned int *)store[0]; + } + printf("\n"); + + if (frames_cnt) + kmod_dump((vm_offset_t *)&backtrace_entries[0], frames_cnt); +} + +void +Debugger(const char *message) { + + int i; + unsigned int store[8]; + spl_t spl; + + spl = splhigh(); /* No interruptions from here on */ + +/* + * backtrace for Debugger() call from panic() if no current debugger + * backtrace and return for double panic() call + */ + if ((panicstr != (char *)0) && + (((nestedpanic != 0) && (current_debugger == 1)) || (active_debugger == 0))) { + print_backtrace(NULL); + if (nestedpanic != 0) { + splx(spl); + return; /* Yeah, don't enter again... */ + } + } + + if (debug_mode && debugger_active[cpu_number()]) { /* Are we already on debugger on this processor? */ + splx(spl); + return; /* Yeah, don't do it again... */ + } + + if ((current_debugger != NO_CUR_DB)) { /* If there is a debugger configured, enter it */ + printf("Debugger(%s)\n", message); + TRAP_DEBUGGER; + splx(spl); + return; /* Done debugging for a while */ + } + + printf("\nNo debugger configured - dumping debug information\n"); + printf("\nversion string : %s\n",version); + mfdbatu(store[0],0); + mfdbatl(store[1],0); + mfdbatu(store[2],1); + mfdbatl(store[3],1); + mfdbatu(store[4],2); + mfdbatl(store[5],2); + mfdbatu(store[6],3); + mfdbatl(store[7],3); + printf("DBAT0: %08X %08X\n", store[0], store[1]); + printf("DBAT1: %08X %08X\n", store[2], store[3]); + printf("DBAT2: %08X %08X\n", store[4], store[5]); + printf("DBAT3: %08X %08X\n", store[6], store[7]); + printf("MSR=%08X\n",mfmsr()); + print_backtrace(NULL); + splx(spl); + return; +} + +/* + * When we get here, interruptions are disabled and we are on the debugger stack + * Never, ever, ever, ever enable interruptions from here on + */ + +int Call_DebuggerC( + int type, + struct ppc_saved_state *saved_state) +{ + int directcall, wait; + vm_offset_t instr_ptr; + unsigned int instr; + int my_cpu, tcpu; + + my_cpu = cpu_number(); /* Get our CPU */ + +#if MACH_KDB + if((debugger_cpu == my_cpu) && /* Do we already own debugger? */ + debugger_active[my_cpu] && /* and are we really active? */ + db_recover && /* and have we set up recovery? */ + (current_debugger == KDB_CUR_DB)) { /* and are we in KDB (only it handles recovery) */ + kdb_trap(type, saved_state); /* Then reenter it... */ + } +#endif + + hw_atomic_add(&debug_mode, 1); /* Indicate we are in debugger */ + debugger_active[my_cpu]++; /* Show active on our CPU */ + lock_debugger(); /* Insure that only one CPU is in debugger */ + + if(db_im_stepping == my_cpu) { /* Are we just back from a step? */ + enable_preemption_no_check(); /* Enable preemption now */ + db_im_stepping = 0xFFFFFFFF; /* Nobody stepping right now */ + } + + if (debugger_debug) { +#if 0 + kprintf("Call_DebuggerC(%d): %08X %08X, debact = %d\n", my_cpu, type, saved_state, debug_mode); /* (TEST/DEBUG) */ +#endif + printf("Call_Debugger: enter - cpu %d, is_slave %d, debugger_cpu %d, pc %08X\n", + my_cpu, debugger_is_slave[my_cpu], debugger_cpu, saved_state->srr0); + } + + if (instr_ptr = (vm_offset_t)LRA(PPC_SID_KERNEL, (void *)(saved_state->srr0))) { + instr = ml_phys_read(instr_ptr); /* Get the trap that caused entry */ + } + else instr = 0; + +#if 0 + if (debugger_debug) kprintf("Call_DebuggerC(%d): instr_ptr = %08X, instr = %08X\n", my_cpu, instr_ptr, instr); /* (TEST/DEBUG) */ +#endif + + if (db_breakpoints_inserted) cpus_holding_bkpts++; /* Bump up the holding count */ + if (debugger_cpu == -1 && !debugger_is_slave[my_cpu]) { +#if 0 + if (debugger_debug) kprintf("Call_DebuggerC(%d): lasttrace = %08X\n", my_cpu, lastTrace); /* (TEST/DEBUG) */ +#endif + debugger_cpu = my_cpu; /* Show that we are debugger */ + lastTrace = LLTraceSet(0); /* Disable low-level tracing */ + + for(tcpu = 0; tcpu < NCPUS; tcpu++) { /* Stop all the other guys */ + if(tcpu == my_cpu) continue; /* Don't diddle ourselves */ + hw_atomic_add(&debugger_sync, 1); /* Count signal sent */ + (void)cpu_signal(tcpu, SIGPdebug, 0 ,0); /* Tell 'em to enter debugger */ + } + (void)hw_cpu_sync(&debugger_sync, LockTimeOut); /* Wait for the other processors to enter debug */ + debugger_sync = 0; /* We're done with it */ + } + else if (debugger_cpu != my_cpu) goto debugger_exit; /* We are not debugger, don't continue... */ + + + if (instr == TRAP_DIRECT_INST) { + disableDebugOuput = FALSE; + print_backtrace(saved_state); + } + + switch_debugger = 0; /* Make sure switch request is off */ + directcall = 1; /* Assume direct call */ + + if (saved_state->srr1 & MASK(SRR1_PRG_TRAP)) { /* Trap instruction? */ + + directcall = 0; /* We had a trap not a direct call */ + + switch (instr) { /* Select trap type */ + +#if MACH_KDP + case BREAK_TO_KDP0: /* Breakpoint into KDP? */ + case BREAK_TO_KDP1: /* Breakpoint into KDP? */ + current_debugger = KDP_CUR_DB; /* Yes, set KDP */ + kdp_trap(type, saved_state); /* Enter it */ + break; +#endif + +#if MACH_KDB + case BREAK_TO_KDB0: /* Breakpoint to KDB (the "good" debugger)? */ + current_debugger = KDB_CUR_DB; /* Yes, set it */ + kdb_trap(type, saved_state); /* Enter it */ + break; +#endif + + case TRAP_DEBUGGER_INST: /* Should we enter the current debugger? */ + case TRAP_DIRECT_INST: /* Should we enter the current debugger? */ + if (current_debugger == KDP_CUR_DB) /* Is current KDP? */ + kdp_trap(type, saved_state); /* Yes, enter it */ + else if (current_debugger == KDB_CUR_DB) /* Is this KDB? */ + kdb_trap(type, saved_state); /* Yes, go ahead and enter */ + else goto debugger_error; /* No debugger active */ + break; + + default: /* Unknown/bogus trap type */ + goto debugger_error; + } + } + + while(1) { /* We are here to handle debugger switches */ + + if(!directcall) { /* Was this a direct call? */ + if(!switch_debugger) break; /* No, then leave if no switch requested... */ + +/* + * Note: we can only switch to a debugger we have. Ignore bogus switch requests. + */ +#if 0 + if (debugger_debug) kprintf("Call_DebuggerC(%d): switching debuggers\n", my_cpu); /* (TEST/DEBUG) */ +#endif +#if MACH_KDB + if(current_debugger == KDP_CUR_DB) current_debugger = KDB_CUR_DB; /* Switch to KDB */ +#if MACH_KDP + else +#endif +#endif +#if MACH_KDP + if(current_debugger == KDB_CUR_DB) current_debugger = KDP_CUR_DB; /* Switch to KDP */ +#endif + } + + switch_debugger = 0; /* Clear request */ + directcall = 0; /* Clear first-time direct call indication */ + + switch (current_debugger) { /* Enter correct debugger */ + + case KDP_CUR_DB: /* Enter KDP */ + kdp_trap(type, saved_state); + break; + + case KDB_CUR_DB: /* Enter KDB */ + kdb_trap(type, saved_state); + break; + + default: /* No debugger installed */ + goto debugger_error; + break; + } + } + +debugger_exit: +#if 0 + if (debugger_debug) kprintf("Call_DebuggerC(%d): exit - inst = %08X, cpu=%d(%d), run=%d\n", my_cpu, + instr, my_cpu, debugger_cpu, db_run_mode); /* (TEST/DEBUG) */ +#endif + if ((instr == TRAP_DEBUGGER_INST) || /* Did we trap to enter debugger? */ + (instr == TRAP_DIRECT_INST)) saved_state->srr0 += TRAP_INST_SIZE; /* Yes, point past trap */ + + if(debugger_cpu == my_cpu) LLTraceSet(lastTrace); /* Enable tracing on the way out if we are debugger */ + + wait = FALSE; /* Assume we are not going to wait */ + if (db_run_mode == STEP_CONTINUE) { /* Are we going to run? */ + wait = TRUE; /* Yeah, remember to wait for breakpoints to clear */ + debugger_cpu = -1; /* Release other processor's debuggers */ + debugger_pending[0] = 0; /* Release request (this is a HACK) */ + debugger_pending[1] = 0; /* Release request (this is a HACK) */ + NMIss = 0; /* Let NMI bounce */ + } + + if(db_run_mode == STEP_ONCE) { /* Are we about to step? */ + disable_preemption(); /* Disable preemption for the step */ + db_im_stepping = my_cpu; /* Remember that I am about to step */ + } + + if (db_breakpoints_inserted) cpus_holding_bkpts--; /* If any breakpoints, back off count */ + if (debugger_is_slave[my_cpu]) debugger_is_slave[my_cpu]--; /* If we were a slove, uncount us */ + if (debugger_debug) + printf("Call_Debugger: exit - cpu %d, debugger_cpu %d, run_mode %d holds %d\n", + my_cpu, debugger_cpu, db_run_mode, + cpus_holding_bkpts); + + unlock_debugger(); /* Release the lock */ + debugger_active[my_cpu]--; /* Say we aren't active anymore */ + + if (wait) while(cpus_holding_bkpts); /* Wait for breakpoints to clear */ + + hw_atomic_sub(&debug_mode, 1); /* Set out of debug now */ + + return(1); /* Exit debugger normally */ + +debugger_error: + if(db_run_mode != STEP_ONCE) enable_preemption_no_check(); /* Enable preemption, but don't preempt here */ + hw_atomic_sub(&debug_mode, 1); /* Set out of debug now */ + return(0); /* Return in shame... */ + +} + +void lock_debugger(void) { + int my_cpu; + register int i; + + my_cpu = cpu_number(); /* Get our CPU number */ + + while(1) { /* Check until we get it */ + + if (debugger_cpu != -1 && debugger_cpu != my_cpu) continue; /* Someone, not us, is debugger... */ + if (hw_lock_try(&debugger_lock)) { /* Get the debug lock */ + if (debugger_cpu == -1 || debugger_cpu == my_cpu) break; /* Is it us? */ + hw_lock_unlock(&debugger_lock); /* Not us, release lock */ + } + } +} + +void unlock_debugger(void) { + + hw_lock_unlock(&debugger_lock); + +} + + diff --git a/osfmk/ppc/movc.s b/osfmk/ppc/movc.s new file mode 100644 index 000000000..4d88e9cee --- /dev/null +++ b/osfmk/ppc/movc.s @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#include +#include +#include +#include +#include +#include + +/* + * void pmap_zero_page(vm_offset_t pa) + * + * zero a page of physical memory. + */ + +#if DEBUG + /* C debug stub in pmap.c calls this */ +ENTRY(pmap_zero_page_assembler, TAG_NO_FRAME_USED) +#else +ENTRY(pmap_zero_page, TAG_NO_FRAME_USED) +#endif /* DEBUG */ + + mfmsr r6 /* Get the MSR */ + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 /* Turn off DR */ + rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions + li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */ + mtmsr r7 /* Set MSR to DR off */ + isync /* Ensure data translations are off */ + + +.L_phys_zero_loop: + subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */ + dcbz r4, r3 /* Clear the whole thing to 0s */ + subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */ + dcbz r5, r3 /* Clear the next to zeros */ + bgt+ .L_phys_zero_loop /* Keep going until we do the page... */ + + sync /* Make sure they're all done */ + li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */ + +.L_inst_inval_loop: + subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */ + icbi r4, r3 /* Clear the whole thing to 0s */ + subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */ + icbi r5, r3 /* Clear the next to zeros */ + bgt+ .L_inst_inval_loop /* Keep going until we do the page... */ + + sync /* Make sure they're all done */ + + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are on */ + + blr + +/* void + * phys_copy(src, dst, bytecount) + * vm_offset_t src; + * vm_offset_t dst; + * int bytecount + * + * This routine will copy bytecount bytes from physical address src to physical + * address dst. + */ + +ENTRY(phys_copy, TAG_NO_FRAME_USED) + + /* Switch off data translations */ + mfmsr r6 + rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 + rlwinm r7, r7, 0, MSR_EE_BIT+1, MSR_EE_BIT-1 + mtmsr r7 + isync /* Ensure data translations are off */ + + subi r3, r3, 4 + subi r4, r4, 4 + + cmpwi r5, 3 + ble- .L_phys_copy_bytes +.L_phys_copy_loop: + lwz r0, 4(r3) + addi r3, r3, 4 + subi r5, r5, 4 + stw r0, 4(r4) + addi r4, r4, 4 + cmpwi r5, 3 + bgt+ .L_phys_copy_loop + + /* If no leftover bytes, we're done now */ + cmpwi r5, 0 + beq+ .L_phys_copy_done + +.L_phys_copy_bytes: + addi r3, r3, 3 + addi r4, r4, 3 +.L_phys_copy_byte_loop: + lbz r0, 1(r3) + addi r3, r3, 1 + subi r5, r5, 1 + stb r0, 1(r4) + addi r4, r4, 1 + cmpwi r5, 0 + bne+ .L_phys_copy_byte_loop + +.L_phys_copy_done: + mtmsr r6 /* Restore original translations */ + isync /* Ensure data translations are off */ + + blr + +/* void + * pmap_copy_page(src, dst) + * vm_offset_t src; + * vm_offset_t dst; + * + * This routine will copy the physical page src to physical page dst + * + * This routine assumes that the src and dst are page aligned and that the + * destination is cached. + * + * We also must assume that noone will be executing within the destination + * page. We also assume that this will be used for paging + * + */ + +#if DEBUG + /* if debug, we have a little piece of C around this + * in pmap.c that gives some trace ability + */ +ENTRY(pmap_copy_page_assembler, TAG_NO_FRAME_USED) +#else +ENTRY(pmap_copy_page, TAG_NO_FRAME_USED) +#endif /* DEBUG */ + +#if 0 + mfpvr r9 ; Get the PVR + rlwinm r9,r9,16,16,31 ; Isolate the PPC processor + cmplwi r9,PROCESSOR_VERSION_Max ; Do we have Altivec? + beq+ wegotaltivec ; Yeah... +#endif + + mfmsr r9 ; Get the MSR + stwu r1,-(FM_SIZE+32)(r1) ; Make a frame for us + rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions + ori r7,r7,lo16(MASK(MSR_FP)) ; Turn on the FPU + mtmsr r7 ; Disable rupts and enable FPU + isync + + stfd f0,FM_SIZE+0(r1) ; Save an FP register + rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit + stfd f1,FM_SIZE+8(r1) ; Save an FP register + addi r6,r3,PPC_PGBYTES ; Point to the start of the next page + stfd f2,FM_SIZE+16(r1) ; Save an FP register + mr r8,r4 ; Save the destination + stfd f3,FM_SIZE+24(r1) ; Save an FP register + + mtmsr r7 ; Set the new MSR + isync ; Ensure data translations are off + + dcbt br0, r3 /* Start in first input line */ + li r5, CACHE_LINE_SIZE /* Get the line size */ + +.L_pmap_copy_page_loop: + dcbz 0, r4 /* Allocate a line for the output */ + lfd f0, 0(r3) /* Get first 8 */ + lfd f1, 8(r3) /* Get second 8 */ + lfd f2, 16(r3) /* Get third 8 */ + stfd f0, 0(r4) /* Put first 8 */ + dcbt r5, r3 /* Start next line coming in */ + lfd f3, 24(r3) /* Get fourth 8 */ + stfd f1, 8(r4) /* Put second 8 */ + addi r3,r3,CACHE_LINE_SIZE /* Point to the next line in */ + stfd f2, 16(r4) /* Put third 8 */ + cmplw cr0,r3,r6 /* See if we're finished yet */ + stfd f3, 24(r4) /* Put fourth 8 */ + dcbst br0,r4 /* Force it out */ + addi r4,r4,CACHE_LINE_SIZE /* Point to the next line out */ + blt+ .L_pmap_copy_page_loop /* Copy the whole page */ + + sync /* Make sure they're all done */ + li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */ + +invalinst: + subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */ + icbi r4, r8 /* Trash the i-cache */ + subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */ + icbi r5, r8 /* Trash the i-cache */ + bgt+ invalinst /* Keep going until we do the page... */ + + rlwimi r7,r9,0,MSR_DR_BIT,MSR_DR_BIT ; Set DDAT if on + sync ; Make sure all invalidates done + + mtmsr r7 ; Set DDAT correctly + isync + + lfd f0,FM_SIZE+0(r1) ; Restore an FP register + lfd f1,FM_SIZE+8(r1) ; Restore an FP register + lfd f2,FM_SIZE+16(r1) ; Restore an FP register + lfd f3,FM_SIZE+24(r1) ; Restore an FP register + + lwz r1,0(r1) ; Pop up the stack + + mtmsr r9 ; Turn off FPU now and maybe rupts back on + isync + blr + +#if 0 +; +; This is not very optimal. We just do it here for a test of +; Altivec in the kernel. +; +wegotaltivec: + mfmsr r9 ; Get the MSR + lis r8,hi16(0xC0000000) ; Make sure we keep the first 2 vector registers + rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions + lis r6,lo16(2*256+128) ; Specify 128 blocks of 2 vectors each + rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit + ori r6,r6,32 ; Set a 32-byte stride + mtsprg 256,r8 ; Set VRSave + mtmsr r7 ; Disable rupts and turn xlate off + isync + + addi r11,r3,4096 ; Point to the next page + li r10,16 ; Get vector size + +avmovepg: lvxl v0,br0,r3 ; Get first half of line + dcba br0,r4 ; Allocate output + lvxl v1,r10,r3 ; Get second half of line + stvxl v0,br0,r4 ; Save first half of line + addi r3,r3,32 ; Point to the next line + icbi br0,r4 ; Make the icache go away also + stvxl v1,r10,r4 ; Save second half of line + cmplw r3,r11 ; Have we reached the next page? + dcbst br0,r4 ; Make sure the line is on its way out + addi r4,r4,32 ; Point to the next line + blt+ avmovepg ; Move the next line... + + li r8,0 ; Clear this + sync ; Make sure all the memory stuff is done + mtsprg 256,r8 ; Show we are not using VRs any more + mtmsr r9 ; Translation and interruptions back on + isync + blr +#endif + + + + +/* + * int + * copyin(src, dst, count) + * vm_offset_t src; + * vm_offset_t dst; + * int count; + * + */ + +ENTRY2(copyin, copyinmsg, TAG_NO_FRAME_USED) + +/* Preamble allowing us to call a sub-function */ + mflr r0 + stw r0,FM_LR_SAVE(r1) + stwu r1,-(FM_SIZE+16)(r1) + + mfmsr r0 /* Get the MSR */ + rlwinm r6,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */ + mtmsr r6 /* Disable 'rupts */ + + mfsprg r6,0 /* Get the per_proc */ + lwz r6,PP_CPU_DATA(r6) + cmpli cr0,r5,0 + lwz r10,CPU_ACTIVE_THREAD(r6) + mtmsr r0 /* Set 'rupts back */ + ble- cr0,.L_copyinout_trivial + +/* we know we have a valid copyin to do now */ +/* Set up thread_recover in case we hit an illegal address */ + + lwz r8,THREAD_TOP_ACT(r10) + lis r11,hi16(.L_copyinout_error) + lwz r8,ACT_VMMAP(r8) + ori r11,r11,lo16(.L_copyinout_error) + add r9,r3,r5 /* Get the end of the source */ + lwz r8,VMMAP_PMAP(r8) ; Get the pmap + rlwinm r12,r3,6,26,29 ; Get index to the segment slot + subi r9,r9,1 /* Make sure we don't go too far */ + add r8,r8,r12 ; Start indexing to the segment value + stw r11,THREAD_RECOVER(r10) + xor r9,r9,r3 /* Smoosh 'em together */ + lwz r8,PMAP_SEGS(r8) ; Get the source SR value + rlwinm. r9,r9,0,1,3 /* Top nybble equal? */ + mtsr SR_COPYIN,r8 ; Set the SR + isync +#if 0 + lis r0,HIGH_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */ + ori r0,r0,LOW_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + +/* For optimization, we check if the copyin lies on a segment + * boundary. If it doesn't, we can use a simple copy. If it + * does, we split it into two separate copies in some C code. + */ + + bne- .L_call_copyin_multiple /* Nope, we went past the segment boundary... */ + + rlwinm r3,r3,0,4,31 + oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */ + + bl EXT(bcopy) + +/* Now that copyin is done, we don't need a recovery point */ + mfmsr r7 /* Get the MSR */ + rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */ + mtmsr r6 /* Disable 'rupts */ + + mfsprg r6,0 /* Get the per_proc */ + + lwz r6,PP_CPU_DATA(r6) + addi r1,r1,FM_SIZE+16 + lwz r10,CPU_ACTIVE_THREAD(r6) + mtmsr r7 ; Restore interrupts + li r3,0 + lwz r0,FM_LR_SAVE(r1) + stw r3,THREAD_RECOVER(r10) /* Clear recovery */ + mtlr r0 + blr + +/* we get here via the exception handler if an illegal + * user memory reference was made. + */ +.L_copyinout_error: + +/* Now that copyin is done, we don't need a recovery point */ + + mfmsr r7 /* Get the MSR */ + rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */ + mtmsr r6 /* Disable 'rupts */ + + mfsprg r6,0 /* Get the per_proc */ + + lwz r6,PP_CPU_DATA(r6) + addi r1,r1,FM_SIZE+16 + lwz r10,CPU_ACTIVE_THREAD(r6) + mtmsr r7 ; Restore interrupts + li r4,0 + lwz r0,FM_LR_SAVE(r1) + stw r4,THREAD_RECOVER(r10) /* Clear recovery */ + mtlr r0 + li r3,EFAULT ; Indicate error (EFAULT) + blr + +.L_copyinout_trivial: + /* The copyin/out was for either 0 bytes or a negative + * number of bytes, return an appropriate value (0 == SUCCESS). + * cr0 still contains result of comparison of len with 0. + */ + li r3, 0 + beq+ cr0, .L_copyinout_negative + li r3, 1 +.L_copyinout_negative: + + /* unwind the stack */ + addi r1, r1, FM_SIZE+16 + lwz r0, FM_LR_SAVE(r1) + mtlr r0 + + blr + +.L_call_copyin_multiple: + + /* unwind the stack */ + addi r1, r1, FM_SIZE+16 + lwz r0, FM_LR_SAVE(r1) + mtlr r0 + + b EXT(copyin_multiple) /* not a call - a jump! */ + +/* + * int + * copyout(src, dst, count) + * vm_offset_t src; + * vm_offset_t dst; + * int count; + * + */ + +ENTRY2(copyout, copyoutmsg, TAG_NO_FRAME_USED) + +/* Preamble allowing us to call a sub-function */ + + mflr r0 + stw r0,FM_LR_SAVE(r1) + stwu r1,-(FM_SIZE+16)(r1) + +#if 0 + stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ + stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ + stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ + mr r6,r0 /* (TEST/DEBUG) */ + + bl EXT(tracecopyout) /* (TEST/DEBUG) */ + + lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ + lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ + lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ +#endif + + mfmsr r7 /* Get the MSR */ + rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */ + mtmsr r6 /* Disable 'rupts */ + + mfsprg r6,0 /* Get the per_proc */ + + lwz r6,PP_CPU_DATA(r6) + cmpli cr0,r5,0 + lwz r10,CPU_ACTIVE_THREAD(r6) + mtmsr r7 /* Restore 'rupts */ + ble- cr0,.L_copyinout_trivial +/* we know we have a valid copyout to do now */ +/* Set up thread_recover in case we hit an illegal address */ + + + lwz r8,THREAD_TOP_ACT(r10) + lis r11,HIGH_ADDR(.L_copyinout_error) + lwz r8,ACT_VMMAP(r8) + rlwinm r12,r4,6,26,29 ; Get index to the segment slot + ori r11,r11,LOW_ADDR(.L_copyinout_error) + add r9,r4,r5 /* Get the end of the destination */ + lwz r8,VMMAP_PMAP(r8) + subi r9,r9,1 /* Make sure we don't go too far */ + add r8,r8,r12 ; Start indexing to the segment value + stw r11,THREAD_RECOVER(r10) + xor r9,r9,r4 /* Smoosh 'em together */ + lwz r8,PMAP_SEGS(r8) ; Get the source SR value + rlwinm. r9,r9,0,1,3 /* Top nybble equal? */ + mtsr SR_COPYIN,r8 + isync + + +/* For optimisation, we check if the copyout lies on a segment + * boundary. If it doesn't, we can use a simple copy. If it + * does, we split it into two separate copies in some C code. + */ + + bne- .L_call_copyout_multiple /* Nope, we went past the segment boundary... */ + + rlwinm r4,r4,0,4,31 + oris r4,r4,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */ + + bl EXT(bcopy) + +/* Now that copyout is done, we don't need a recovery point */ + mfmsr r7 /* Get the MSR */ + rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */ + mtmsr r6 /* Disable 'rupts */ + + mfsprg r6,0 /* Get the per_proc */ + + lwz r6,PP_CPU_DATA(r6) + addi r1,r1,FM_SIZE+16 + lwz r10,CPU_ACTIVE_THREAD(r6) + mtmsr r7 ; Restore interrupts + li r3,0 + lwz r0,FM_LR_SAVE(r1) + stw r3,THREAD_RECOVER(r10) /* Clear recovery */ + mtlr r0 + blr + +.L_call_copyout_multiple: + /* unwind the stack */ + addi r1, r1, FM_SIZE+16 + lwz r0, FM_LR_SAVE(r1) + mtlr r0 + + b EXT(copyout_multiple) /* not a call - a jump! */ + +/* + * boolean_t + * copyinstr(src, dst, count, maxcount) + * vm_offset_t src; + * vm_offset_t dst; + * vm_size_t maxcount; + * vm_size_t* count; + * + * Set *count to the number of bytes copied + * + * If dst == NULL, don't copy, just count bytes. + * Only currently called from klcopyinstr. + */ + +ENTRY(copyinstr, TAG_NO_FRAME_USED) + +/* Preamble allowing us to call a sub-function */ + mflr r0 + stw r0,FM_LR_SAVE(r1) + stwu r1,-(FM_SIZE+16)(r1) + +#if 0 + stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ + stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ + stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ + stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */ + mr r7,r0 /* (TEST/DEBUG) */ + + bl EXT(tracecopystr) /* (TEST/DEBUG) */ + + lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */ + lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */ + lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */ + stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */ +#endif + + mfmsr r0 /* Get the MSR */ + rlwinm r7,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */ + mtmsr r7 /* Disable 'rupts */ + + mfsprg r7,0 /* Get the per_proc */ + lwz r7,PP_CPU_DATA(r7) + cmpli cr0,r5,0 + lwz r10,CPU_ACTIVE_THREAD(r7) + mtmsr r0 /* Restore 'rupts */ + ble- cr0,.L_copyinout_trivial + +/* we know we have a valid copyin to do now */ +/* Set up thread_recover in case we hit an illegal address */ + + li r0,0 + lwz r8,THREAD_TOP_ACT(r10) + stw r0,0(r6) /* Clear result length */ + lis r11,HIGH_ADDR(.L_copyinout_error) + lwz r8,ACT_VMMAP(r8) ; Get the map for this activation + rlwinm r12,r3,6,26,29 ; Get index to the segment slot + lwz r8,VMMAP_PMAP(r8) + ori r11,r11,LOW_ADDR(.L_copyinout_error) + add r8,r8,r12 ; Start indexing to the segment value + stw r11,THREAD_RECOVER(r10) + rlwinm r3,r3,0,4,31 + lwz r7,PMAP_SEGS(r8) ; Get the source SR value + oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */ + +/* Copy byte by byte for now - TODO NMGS speed this up with + * some clever (but fairly standard) logic for word copies. + * We don't use a copyinstr_multiple since copyinstr is called + * with INT_MAX in the linux server. Eugh. + */ + + li r9,0 /* Clear byte counter */ + +/* If the destination is NULL, don't do writes, + * just count bytes. We set CR7 outside the loop to save time + */ + cmpwi cr7,r4,0 /* Is the destination null? */ + +nxtseg: mtsr SR_COPYIN,r7 /* Set the source SR */ + isync + +.L_copyinstr_loop: + lbz r0,0(r3) /* Get the source */ + addic. r5,r5,-1 /* Have we gone far enough? */ + addi r3,r3,1 /* Bump source pointer */ + + cmpwi cr1,r0,0 /* Did we hit a null? */ + + beq cr7,.L_copyinstr_no_store /* If we are just counting, skip the store... */ + + stb r0,0(r4) /* Move to sink */ + addi r4,r4,1 /* Advance sink pointer */ + +.L_copyinstr_no_store: + + addi r9,r9,1 /* Count the character */ + beq- cr1,.L_copyinstr_done /* We're done if we did a null... */ + beq- cr0,L_copyinstr_toobig /* Also if we maxed the count... */ + +/* Check to see if the copyin pointer has moved out of the + * copyin segment, if it has we must remap. + */ + + rlwinm. r0,r3,0,4,31 /* Did we wrap around to 0? */ + bne+ cr0,.L_copyinstr_loop /* Nope... */ + + lwz r7,PMAP_SEGS+4(r8) ; Get the next source SR value + addi r8,r8,4 ; Point to the next segment + oris r3,r0,(SR_COPYIN_NUM << (28-16)) /* Reset the segment number */ + b nxtseg /* Keep going... */ + +L_copyinstr_toobig: + li r3,ENAMETOOLONG + b L_copyinstr_return +.L_copyinstr_done: + li r3,0 /* Normal return */ +L_copyinstr_return: + li r4,0 /* to clear thread_recover */ + stw r9,0(r6) /* Set how many bytes we did */ + stw r4,THREAD_RECOVER(r10) /* Clear recovery exit */ + + addi r1, r1, FM_SIZE+16 + lwz r0, FM_LR_SAVE(r1) + mtlr r0 + blr diff --git a/osfmk/ppc/mp.h b/osfmk/ppc/mp.h new file mode 100644 index 000000000..2c7da0d11 --- /dev/null +++ b/osfmk/ppc/mp.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_MP_H_ +#define _PPC_MP_H_ + +#include +#include + +#if NCPUS > 1 +extern void interrupt_stack_alloc(void); + +extern unsigned int wncpu; +#endif /* NCPUS > 1 */ + +#endif /* _PPC_MP_H_ */ diff --git a/osfmk/ppc/net_filter.c b/osfmk/ppc/net_filter.c new file mode 100644 index 000000000..3f9225a28 --- /dev/null +++ b/osfmk/ppc/net_filter.c @@ -0,0 +1,750 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + + +#if NET_FILTER_COMPILER + + +#define USE_EXTRA_REGS 0 + +#define REG_ZERO 0 /* Register we keep equal to 0. */ +#define REG_DATAADDR 3 /* Address of packet data, and filter return. */ +#define REG_DATALEN 4 /* Length of packet data in two-byte units. */ +#define REG_HDRADDR 5 /* Address of header data. */ +#define REG_RET 3 /* Where to put return value. */ + +/* Originally we dealt in virtual register numbers which were essentially + indexes into this array, and only converted to machine register numbers + when emitting instructions. But that meant a lot of conversions, so + instead we deal with machine register numbers all along, even though this + means wasting slots in the regs[] array. */ +const unsigned char scratchregs[] = { + 6, 7, 8, 9, 10, 11, 12, +#if USE_EXTRA_REGS /* Callee-saves regs available if we save them. */ +#define INITIAL_NSCRATCHREGS 8 /* Number of registers above. */ + #error not yet written +#endif +}; +#define NSCRATCHREGS (sizeof scratchregs / sizeof scratchregs[0]) +#define NREGS 32 +#define NO_REG 1 /* This is the stack pointer! Flag value. */ + +#define MAX_LI 0x7fff /* Max unsigned value in an LI. */ + +#define BCLR(cond) ((19 << 26) | (cond) | (16 << 1)) +#define BLR() BCLR(COND_ALWAYS) +#define BC(cond, off) ((16 << 26) | (cond) | ((off) << 2)) +#define COND(BO, BI) (((BO) << (16 + 5)) | ((BI) << 16)) +#define COND_ALWAYS COND(COND_IF_ALWAYS, 0) +#define COND_EQ COND(COND_IF_TRUE, COND_BIT(0, BIT_EQ)) +#define COND_NE COND(COND_IF_FALSE, COND_BIT(0, BIT_EQ)) +#define COND_LE COND(COND_IF_FALSE, COND_BIT(0, BIT_GT)) +#define COND_GE COND(COND_IF_FALSE, COND_BIT(0, BIT_LT)) +#define COND_BIT(crf, bit) \ + ((crf) * 4 + (bit)) +#define BIT_EQ 2 +#define BIT_GT 1 +#define BIT_LT 0 +#define COND_IF_FALSE 0x04 +#define COND_IF_TRUE 0x0c +#define COND_IF_ALWAYS 0x14 + +/* For arithmetical instructions, a is the dest and b is the source; + for logical instructions, a is the source and b is the dest. Ho hum. */ +#define IMMED(opcode, a, b, imm) \ + (((opcode) << 26) | ((a) << 21) | ((b) << 16) | \ + ((imm) & 0xffff)) +#define ADDI(dst, src, imm) \ + IMMED(14, dst, src, imm) +#define ADDIC(dst, src, imm) \ + IMMED(12, dst, src, imm) +#define SUBFIC(dst, src, imm) \ + IMMED(8, dst, src, imm) +#define LI(dst, imm) ADDI(dst, 0, (imm)) +#define ANDI(dst, src, imm) \ + IMMED(28, src, dst, imm) +#define ORI(dst, src, imm) \ + IMMED(24, src, dst, imm) +#define XORI(dst, src, imm) \ + IMMED(26, src, dst, imm) + +#define CMPL(lhs, rhs) ((31 << 26) | ((lhs) << 16) | ((rhs) << 11) | (32 << 1)) +#define CMPLI(lhs, imm) ((10 << 26) | ((lhs) << 16) | ((imm) & 0xffff)) + +#define INTEGER_OP(opcode, a, b, c) \ + ((31 << 26) | ((a) << 21) | ((b) << 16) | \ + ((c) << 11) | ((opcode) << 1)) +#define ARITH_OP(opcode, dst, lhs, rhs) \ + INTEGER_OP(opcode, dst, lhs, rhs) +#define ADD(dst, lhs, rhs) \ + ARITH_OP(OP_ADD, dst, lhs, rhs) +#define ADDE(dst, lhs, rhs) \ + ARITH_OP(OP_ADDE, dst, lhs, rhs) +#define SUBF(dst, lhs, rhs) \ + ARITH_OP(OP_SUBF, dst, lhs, rhs) +#define SUBFC(dst, lhs, rhs) \ + ARITH_OP(OP_SUBFC, dst, lhs, rhs) +#define SUBFE(dst, lhs, rhs) \ + ARITH_OP(OP_SUBFE, dst, lhs, rhs) +#define LOGIC_OP(opcode, dst, lhs, rhs) \ + INTEGER_OP(opcode, lhs, dst, rhs) +#define OR(dst, lhs, rhs) \ + LOGIC_OP(OP_OR, dst, lhs, rhs) +#define XOR(dst, lhs, rhs) \ + LOGIC_OP(OP_XOR, dst, lhs, rhs) +#define OP_ADD 266 +#define OP_ADDE 138 +#define OP_AND 28 +#define OP_OR 444 +#define OP_SRW 536 +#define OP_SUBF 40 +#define OP_SUBFC 8 +#define OP_SUBFE 136 +#define OP_XOR 316 +#define MR(dst, src) OR(dst, src, src) + +#define LHZ(dst, base, offset) \ + ((40 << 26) | ((dst) << 21) | ((base) << 16) | \ + ((offset) & 0xffff)) +#define LHZX(dst, base, index) \ + INTEGER_OP(279, dst, base, index) +#define MFCR(dst) INTEGER_OP(19, dst, 0, 0) + +#define RLWINM(dst, src, shiftimm, mbegin, mend) \ + ((21 << 26) | ((src) << 21) | ((dst) << 16) | \ + ((shiftimm) << 11) | ((mbegin) << 6) | ((mend) << 1)) +#define RLWNM(dst, src, shiftreg, mbegin, mend) \ + ((23 << 26) | ((src) << 21) | ((dst) << 16) | \ + ((shiftreg) << 11) | ((mbegin) << 6) | ((mend) << 1)) + +/* Every NETF_arg generates at most four instructions (4 for PUSHIND). + Every NETF_op generates at most 3 instructions (3 for EQ and NEQ). */ +#define MAX_INSTR_PER_ARG 4 +#define MAX_INSTR_PER_OP 3 +#define MAX_INSTR_PER_ITEM (MAX_INSTR_PER_ARG + MAX_INSTR_PER_OP) +int junk_filter[MAX_INSTR_PER_ITEM]; + +enum {NF_LITERAL, NF_HEADER, NF_DATA}; +struct common { /* Keeps track of values we might want to avoid reloading. */ + char type; /* NF_LITERAL: immediate; NF_HEADER: header word; + NF_DATA: data word. */ + char nuses; /* Number of remaining uses for this value. */ + unsigned char reg; + /* Register this value is currently in, or NO_REG if none. */ + unsigned short value; + /* Immediate value or header or data offset. */ +}; +struct reg { /* Keeps track of the current contents of registers. */ + unsigned char commoni; + /* Index in common[] of the contained value. */ +#define NOT_COMMON_VALUE NET_MAX_FILTER /* When not a common[] value. */ + unsigned char stacktimes; + /* Number of times register appears in stack. */ +}; +struct local { /* Gather local arrays so we could kalloc() if needed. */ + struct common common[NET_MAX_FILTER]; /* Potentially common values. */ + struct reg regs[NREGS]; /* Register statuses. */ + unsigned char commonpos[NET_MAX_FILTER]; /* Index in common[] for the + value loaded in each filter + command. */ + unsigned char stackregs[NET_FILTER_STACK_DEPTH]; + /* Registers making up the + stack. */ +#if USE_EXTRA_REGS + unsigned char maxreg; +#endif +}; + +int allocate_register(struct local *s, int commoni); +int compile_preamble(int *instructions, struct local *s); + +/* Compile a packet filter into POWERPC machine code. We do everything in + the 7 caller-saves registers listed in scratchregs[], except when + USE_EXTRA_REGS is defined, in which case we may also allocate callee- + saves registers if needed. (Not yet implemented on PPC.) + + Rather than maintaining an explicit stack in memory, we allocate registers + dynamically to correspond to stack elements -- we can do this because we + know the state of the stack at every point in the filter program. We also + attempt to keep around in registers values (immediates, or header or data + words) that are used later on, to avoid having to load them again. + Since there are only 7 registers being used, we might be forced to reload + a value that we could have kept if we had more. We might even be unable + to contain the stack in the registers, in which case we return failure and + cause the filter to be interpreted by net_do_filter(). But for all current + filters I looked at, 7 registers is enough even to avoid reloads. When + USE_EXTRA_REGS is defined there are about 28 available registers, which is + plenty. + + We depend heavily on NET_MAX_FILTER and NET_FILTER_STACK_DEPTH being + small. We keep indexes to arrays sized by them in char-sized fields, + originally because we tried allocating these arrays on the stack. + Even then we overflowed the small (4K) kernel stack, so we were forced + to allocate the arrays dynamically, which is the reason for the existence + of `struct local'. + + We also depend on the filter being logically correct, for instance not + being longer than NET_MAX_FILTER or underflowing its stack. This is + supposed to have been checked by parse_net_filter() before the filter + is compiled. + + We are supposed to return 1 (TRUE) if the filter accepts the packet + and 0 (FALSE) otherwise. In fact, we may return any non-zero value + for true, which is sufficient for our caller and convenient for us. + + There are lots and lots of optimisations that we could do but don't. + This is supposedly a *micro*-kernel, after all. Here are some things + that could be added without too much headache: + - Using the condition register. We go to a lot of trouble to generate + integer truth values for EQ etc, but most of the time those values + are just ANDed or ORed together or used as arguments to COR etc. So + we could compute the comparison values directly into CR bits and + operate on them using the CR logical instructions without (most of + the time) ever having to generate integer equivalents. + - More registers. We could note the last uses of r3, r4, and + r5, and convert them to general registers after those uses. But if + register shortage turns out to be a problem it is probably best just + to define USE_EXTRA_REGS and have done with it. + - Minimising range checks. Every time we refer to a word in the data + part, we generate code to ensure that it is within bounds. But often + the truth of these tests is implied by earlier tests. Instead, at the + start of the filter and after every COR or CNAND we could insert + a single check when that is necessary. (After CAND and CNOR we don't + need to check since if they terminate it will be to return FALSE + anyway so all we'd do would be to return it prematurely.) + - Remembering immediate values. Instead of generating code as soon as we + see a PUSHLIT, we could remember that value and only generate code when + it is used. This would enable us to generate certain shorter + instructions (like addi) that incorporate the immediate value instead + of ever putting it in a register. + */ + +filter_fct_t +net_filter_alloc(filter_t *filter, unsigned int size, unsigned int *lenp) +{ + struct local *s; + int len, oldi, i, j, t, ncommon, sp; + int type, value, arg, op, reg, reg1, dst, commoni; + int returnfalseoffset; + int *instructions, *instp, *returnfalse; +#if USE_EXTRA_REGS + int oldmaxreg; +#endif + boolean_t compiling; + +#define SCHAR_MAX 127 /* machine/machlimits->h, anyone? */ + assert(NET_MAX_FILTER <= SCHAR_MAX); + assert(NET_FILTER_STACK_DEPTH <= SCHAR_MAX); + assert(NREGS <= SCHAR_MAX); + + assert(size < NET_MAX_FILTER); + + s = (struct local *) kalloc(sizeof *s); + +#if USE_EXTRA_REGS + s->maxreg = INITIAL_NSCRATCHREGS; +#endif + len = 0; + compiling = FALSE; + returnfalse = junk_filter; + + /* This loop runs at least twice, once with compiling==FALSE to determine + the length of the instructions we will compile, and once with + compiling==TRUE to compile them. The code generated on the two passes + must be the same. In the USE_EXTRA_REGS case, the loop can be re-run + an extra time while !compiling, if we decide to use the callee-saves + registers. This is because we may be able to generate better code with + the help of these registers than before. */ + while (1) { + + /* Identify values that we can potentially preserve in a register to + avoid having to reload them. All immediate values and references to + known offsets in the header or data are candidates. The results of + this loop are the same on every run, so with a bit of work we + could run it just once; but this is not a time-critical + application. */ + ncommon = 0; + for (i = 0; i < size; i++) { + oldi = i; + arg = NETF_ARG(filter[i]); + if (arg == NETF_PUSHLIT) { + type = NF_LITERAL; + value = filter[++i]; + } else if (arg >= NETF_PUSHSTK) { + continue; + } else if (arg >= NETF_PUSHHDR) { + type = NF_HEADER; + value = arg - NETF_PUSHHDR; + } else if (arg >= NETF_PUSHWORD) { + type = NF_DATA; + value = arg - NETF_PUSHWORD; + } else { + continue; + } + for (j = 0; j < ncommon; j++) { + if (s->common[j].type == type && s->common[j].value == value) { + s->common[j].nuses++; + break; + } + } + if (j == ncommon) { + s->common[j].type = type; + s->common[j].value = value; + s->common[j].nuses = 1; + ncommon++; + } + s->commonpos[oldi] = j; + } + +#if USE_EXTRA_REGS + oldmaxreg = s->maxreg; +#endif + + /* Initially, no registers hold common values or are on the stack. */ + for (i = 0; i < ncommon; i++) + s->common[i].reg = NO_REG; + for (i = 0; i < NSCRATCHREGS; i++) { + s->regs[scratchregs[i]].commoni = NOT_COMMON_VALUE; + s->regs[scratchregs[i]].stacktimes = 0; + } + + /* Now read through the filter and generate code. */ + sp = -1; /* sp points to top element */ + for (i = 0; i < size; i++) { + if (!compiling) + instp = junk_filter; + + assert(sp >= -1); + assert(sp < NET_FILTER_STACK_DEPTH - 1); + commoni = s->commonpos[i]; + arg = NETF_ARG(filter[i]); + op = NETF_OP(filter[i]); + + /* Generate code to get the required value into a register and + set `reg' to the number of this register. */ + switch (arg) { + case NETF_PUSHLIT: + value = filter[++i]; + reg = s->common[commoni].reg; + if (reg == NO_REG) { + if ((reg = allocate_register(s, commoni)) == NO_REG) + goto fail; + assert(value >= 0); /* Comes from unsigned short. */ + *instp++ = ORI(reg, REG_ZERO, value); + } + s->common[commoni].nuses--; + break; + case NETF_NOPUSH: + reg = s->stackregs[sp--]; + s->regs[reg].stacktimes--; + break; + case NETF_PUSHZERO: + reg = REG_ZERO; + break; + case NETF_PUSHIND: + case NETF_PUSHHDRIND: + reg1 = s->stackregs[sp--]; + s->regs[reg1].stacktimes--; + if (arg == NETF_PUSHIND) + *instp++ = CMPL(reg1, REG_DATALEN); + else + *instp++ = CMPLI(reg1, + NET_HDW_HDR_MAX/sizeof (unsigned short)); + *instp = BC(COND_GE, returnfalse - instp); + instp++; + if ((reg = allocate_register(s, -1)) == NO_REG) + goto fail; + *instp++ = ADD(reg, reg1, reg1); + *instp++ = LHZX(reg, (arg == NETF_PUSHIND) ? + REG_DATAADDR : REG_HDRADDR, reg); + break; + default: + if (arg >= NETF_PUSHSTK) + reg = s->stackregs[sp - (arg - NETF_PUSHSTK)]; + else if (arg >= NETF_PUSHWORD) { + assert(2 * (NETF_PUSHHDR - NETF_PUSHWORD) <= MAX_LI); + assert(NETF_PUSHSTK - NETF_PUSHHDR <= MAX_LI); + reg = s->common[commoni].reg; + if (reg == NO_REG) { + if ((reg = allocate_register(s, commoni)) == NO_REG) + goto fail; + if (arg < NETF_PUSHHDR) { + value = arg - NETF_PUSHWORD; + *instp++ = CMPLI(REG_DATALEN, value); + *instp = BC(COND_LE, returnfalse - instp); + instp++; + reg1 = REG_DATAADDR; + } else { + value = arg - NETF_PUSHHDR; + reg1 = REG_HDRADDR; + } + *instp++ = LHZ(reg, reg1, 2 * value); + } + s->common[commoni].nuses--; + } + } + + /* Now generate code to do `op' on `reg1' (lhs) and `reg' (rhs). */ + if (op != NETF_NOP) { + reg1 = s->stackregs[sp--]; + s->regs[reg1].stacktimes--; + } + switch (op) { + case NETF_OP(NETF_CAND): + case NETF_OP(NETF_COR): + case NETF_OP(NETF_CNAND): + case NETF_OP(NETF_CNOR): + dst = -1; + case NETF_OP(NETF_NOP): + break; + default: + /* Allocate a register to put the result in. */ + if ((dst = allocate_register(s, -1)) == NO_REG) + goto fail; + } + switch (op) { + case NETF_OP(NETF_NOP): + dst = reg; + break; + case NETF_OP(NETF_EQ): + case NETF_OP(NETF_NEQ): + /* We arrange for the truth value to end up in the carry + flag and then put it in the destination register by + adding-with-carry zero to itself. To set the carry, we + first make a value `x' that is zero if the values are + equal; this is either their XOR, or, if we know the + rhs is 0, the lhs. Then to set the carry only when + x==0 we do `subfic dst,x,0' (subtract x from 0, setting + carry as not-borrow, so set only if x==0); to set it when + x!=0 we do `addic dst,x,-1' (add -1 to x setting carry, + so set unless x==0). We're only interested in the carry + from these operations, not dst. + We don't test if reg1==REG_ZERO since in practice you + write NETF_PUSHLIT|NETF_EQ; the other order is eccentric + so you get an extra instruction, tough. */ + if (reg == REG_ZERO) + t = reg1; + else { + *instp++ = XOR(dst, reg1, reg); + t = dst; + } + *instp++ = (op == NETF_OP(NETF_EQ)) ? + SUBFIC(dst, t, 0) : ADDIC(dst, t, -1); + *instp++ = ADDE(dst, REG_ZERO, REG_ZERO); + break; + case NETF_OP(NETF_LT): + /* LT and GT take advantage of the fact that all numbers are + 16-bit quantities, so the sign bit after a subtraction + is a reliable indication of the relative magnitudes of + the operands. */ + *instp++ = SUBF(dst, reg, reg1); /* dst = reg1 - reg */ + *instp++ = RLWINM(dst, dst, 1, 31, 31); /* sign bit */ + break; + case NETF_OP(NETF_GT): + *instp++ = SUBF(dst, reg1, reg); /* dst = reg - reg1 */ + *instp++ = RLWINM(dst, dst, 1, 31, 31); /* sign bit */ + break; + case NETF_OP(NETF_LE): + /* LE and GE use the carry (= not-borrow) flag. When doing + a - b, there is a borrow if b > a, so carry if b <= a. */ + *instp++ = SUBFC(dst, reg1, reg); /* dst = reg - reg1 */ + *instp++ = ADDE(dst, REG_ZERO, REG_ZERO);/* ca if reg1 <= reg */ + break; + case NETF_OP(NETF_GE): + *instp++ = SUBFC(dst, reg, reg1); /* dst = reg1 - reg */ + *instp++ = ADDE(dst, REG_ZERO, REG_ZERO);/* ca if reg <= reg1 */ + break; + case NETF_OP(NETF_AND): + j = OP_AND; + goto logical; + case NETF_OP(NETF_OR): + j = OP_OR; + goto logical; + case NETF_OP(NETF_XOR): + j = OP_XOR; + goto logical; + case NETF_OP(NETF_RSH): + j = OP_SRW; +logical: + *instp++ = LOGIC_OP(j, dst, reg1, reg); + break; + case NETF_OP(NETF_ADD): + j = OP_ADD; + goto arithmetical; + case NETF_OP(NETF_SUB): + j = OP_SUBF; /* First reg subtracted from second. */ +arithmetical: + *instp++ = ARITH_OP(j, dst, reg, reg1); + *instp++ = ANDI(dst, dst, 0xffff); + break; + case NETF_OP(NETF_LSH): + *instp++ = RLWNM(dst, reg1, reg, 16, 31); + break; + case NETF_OP(NETF_COR): + case NETF_OP(NETF_CNAND): + *instp++ = CMPL(reg1, reg); + *instp++ = BCLR((op == NETF_OP(NETF_COR)) ? COND_EQ : COND_NE); + break; + case NETF_OP(NETF_CAND): + case NETF_OP(NETF_CNOR): + *instp++ = CMPL(reg1, reg); + *instp = BC((op == NETF_OP(NETF_CAND)) ? COND_NE : COND_EQ, + returnfalse - instp); + instp++; + break; + default: + printf("op == 0x%x\n", op); + panic("net_filter_alloc: bad op"); + /* Should have been caught by parse_net_filter(). */ + } + /* If the op generated a result, push it on the stack. */ + if (dst >= 0) { + s->stackregs[++sp] = dst; + s->regs[dst].stacktimes++; + } + if (!compiling) { + assert(instp - junk_filter <= MAX_INSTR_PER_ITEM); + len += instp - junk_filter; + } + } + if (compiling) { + /* If the stack contains any values, we are supposed to return 0 or + 1 according as the top-of-stack is zero or not. Since the only + place we are called requires just zero-false/nonzero-true, we + simply copy the value into r3. If the stack is empty, we + leave the pointer value r3 intact to return TRUE. */ + if (sp >= 0) + *instp++ = MR(REG_RET, s->stackregs[sp]); + *instp++ = BLR(); + /* Branch here to return false. We could avoid adding these + instructions if they are not used, but practically every + filter does use them (for failure values when trying to + access values beyond the header or data length) so it's + not worth the effort. */ + assert(instp == returnfalse); + *instp++ = LI(REG_RET, 0); + *instp++ = BLR(); + break; + } else { + len += 1 + (sp >= 0); + /* For the reach-the-end return instruction(s). */ +#if USE_EXTRA_REGS + if (s->maxreg > oldmaxreg) { + len = 0; + continue; + } +#endif + len += compile_preamble(NULL, s); + returnfalseoffset = len; + len += 2; /* For the return-false instructions. */ + } + if ((instructions = (int *) kalloc(len * sizeof (int))) == NULL) + return NULL; + returnfalse = instructions + returnfalseoffset; + instp = instructions; + instp += compile_preamble(instp, s); + compiling = TRUE; + } + + assert(instp - instructions == len); + *lenp = len * sizeof (int); + { + kern_return_t kr; + vm_machine_attribute_val_t val = MATTR_VAL_CACHE_SYNC; + + kr = pmap_attribute(kernel_pmap, (vm_offset_t) instructions, + len * sizeof (int), MATTR_CACHE, &val); + if (kr != KERN_SUCCESS) { + printf("net_filter_alloc: pmap_attribute -> 0x%x\n", kr); + return NULL; + } + } + kfree((vm_offset_t) s, sizeof *s); + return (filter_fct_t) instructions; +fail: + assert(!compiling); + kfree((vm_offset_t) s, sizeof *s); + printf("net_filter_alloc: failed to compile (filter too complex)\n"); + printf("-- will work, but more slowly; consider enabling USE_EXTRA_REGS\n"); + return NULL; +} + + +/* Allocate a register. Registers that are already being used to make up + the virtual stack are ineligible. Among the others, we choose the one + whose value has the least number of subsequent uses (ideally, and + usually, 0) of the common value it already holds. If commoni is >= + 0, it is the index in common[] of the value we are going to put in + the allocated register, so we can update the various data structures + appropriately. */ +int +allocate_register(struct local *s, int commoni) +{ + int i, reg, bestreg, nuses, bestregnuses, maxreg; + + bestreg = NO_REG; +#if USE_EXTRA_REGS + maxreg = s->maxreg; +#else + maxreg = NSCRATCHREGS; +#endif + while (1) { + bestregnuses = NOT_COMMON_VALUE; + for (i = 0; i < maxreg; i++) { + reg = scratchregs[i]; + if (s->regs[reg].stacktimes == 0) { + nuses = (s->regs[reg].commoni == NOT_COMMON_VALUE) ? + 0 : s->common[s->regs[reg].commoni].nuses; + if (nuses < bestregnuses) { + bestreg = reg; + bestregnuses = nuses; + } + } + } + if (bestreg != NO_REG) + break; +#if USE_EXTRA_REGS + if (maxreg == NSCRATCHREGS) + return NO_REG; + s->maxreg = ++maxreg; +#else + return NO_REG; +#endif + } + if (bestregnuses > 0) + printf("net_filter_alloc: forced to reallocate r%d\n", bestreg); + /* With USE_EXTRA_REGS, we could push up the number of registers + here to have one extra available for common values, but it's usually + not worth the overhead of the extra save-and-restore in the preamble. + Anyway, this never happens with typical filters. */ + if (s->regs[bestreg].commoni != NOT_COMMON_VALUE) + s->common[s->regs[bestreg].commoni].reg = NO_REG; + if (commoni >= 0) { + s->regs[bestreg].commoni = commoni; + s->common[commoni].reg = bestreg; + } else + s->regs[bestreg].commoni = NOT_COMMON_VALUE; + return bestreg; +} + + +#define FIXED_PREAMBLE_INSTRUCTIONS 1 + +int +compile_preamble(int *instructions, struct local *s) +{ + int *instp; + int len = FIXED_PREAMBLE_INSTRUCTIONS; +#if USE_EXTRA_REGS +#error this hp code must be ported to the ppc + int extra_regs, i, j, t, disp; + + extra_regs = s->maxreg - INITIAL_NSCRATCHREGS; + if (extra_regs > 0) { + len = extra_regs * 2 + 4; + /* stw rp | (n-1) * stw | bl | stw | ldw rp | (n-1) * ldw | bv | ldw */ + } else + return 0; +#endif + if (instructions == NULL) + return len; + instp = instructions; + + *instp++ = LI(REG_ZERO, 0); + assert(instp - instructions == FIXED_PREAMBLE_INSTRUCTIONS); + +#if USE_EXTRA_REGS +#error this hp code must be ported to the ppc + /* Generate a wrapper function to save the callee-saves registers + before invoking the filter code we have generated. It would be + marginally better to have the filter branch directly to the + postamble code on return, but the difference is trivial and it + is easier to have it always branch to (rp). */ +#define FRAME_SIZE 128 /* This is plenty without being excessive. */ + *instp++ = STW_NEG(REG_RTN, 20, REG_SP); /* stw rp,-20(sp) */ + i = INITIAL_NSCRATCHREGS; + t = STWM(scratchregs[i], FRAME_SIZE, REG_SP); /* stwm r3,128(sp) */ + j = FRAME_SIZE; + while (++i < s->maxreg) { + *instp++ = t; + j -= sizeof (int); + t = STW_NEG(scratchregs[i], j, REG_SP); /* stw r4,-124(sp) &c */ + } + disp = extra_regs + 2; /* n * ldw | bv | ldw rp */ + *instp++ = BL(disp, REG_RTN); /* bl filter,rp */ + *instp++ = t; /* stw in delay slot */ + *instp++ = LDW_NEG(FRAME_SIZE + 20, REG_SP, REG_RTN); + /* ldw -148(sp),rp */ + while (--i > INITIAL_NSCRATCHREGS) { + *instp++ = LDW_NEG(j, REG_SP, scratchregs[i]); /* ldw -124(sp),r4 &c */ + j += sizeof (int); + } + *instp++ = BV(0, REG_RTN); /* bv (rp) */ + *instp++ = LDWM_NEG(FRAME_SIZE, REG_SP, scratchregs[i]); + /* ldwm -128(sp),r3 + in delay slot */ +#endif + + assert(instp - instructions == len); + return len; +} + +void +net_filter_free(filter_fct_t fp, unsigned int len) +{ + kfree((vm_offset_t) fp, len); +} + +#else /* NET_FILTER_COMPILER */ + +/* + * Compilation of a source network filter into ppc instructions + * - a small version that doesnt do anything, but doesn't take + * up any space either. Note that if using a single mklinux server + * with ethertalk enabled (standard situation), the filter passes + * everything through so no need to compile one. If running multi + * servers then there is more of a need. Ethertalk (in linux server) + * should really have a packet filter, but at time of writing + * it does not. + */ +filter_fct_t +net_filter_alloc( + filter_t *fpstart, + unsigned int fplen, + unsigned int *len) +{ + *len = 0; + return ((filter_fct_t)0); +} + +void +net_filter_free( + filter_fct_t fp, + unsigned int len) +{ + assert(fp == (filter_fct_t)0 && len == 0); +} +#endif /* NET_FILTER_COMPILER */ diff --git a/osfmk/ppc/new_screen.h b/osfmk/ppc/new_screen.h new file mode 100644 index 000000000..0bec494ff --- /dev/null +++ b/osfmk/ppc/new_screen.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _NEW_SCREEN_H_ +#define _NEW_SCREEN_H_ + +#include + +/* AV and HPV cards */ +#define AV_BUFFER_START 0xE0000000 +#define AV_BUFFER_END 0xE0500000 +#define HPV_BUFFER_START 0xFE000000 +#define HPV_BUFFER_END 0xFF000000 + +extern void clear_RGB16(int color); +extern void adj_position(unsigned char C); +extern void put_cursor(int color); +extern void screen_put_char(unsigned char C); +extern void initialize_screen(void *); +#endif /* _NEW_SCREEN_H_ */ diff --git a/osfmk/ppc/notify_interrupt.c b/osfmk/ppc/notify_interrupt.c new file mode 100644 index 000000000..4e1c4ef9e --- /dev/null +++ b/osfmk/ppc/notify_interrupt.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include + +int debugNotify = 0; + +/* +** Function: NotifyInterruption +** +** Inputs: port - mach_port for main thread +** ppcInterrupHandler - interrupt handler to execute +** interruptStatePtr - current interrupt state +** emulatorDescriptor - where in emulator to notify +** originalPC - where the emulator was executing +** originalR2 - new R2 +** +** Outputs: +** +** Notes: +** +*/ + +unsigned long +syscall_notify_interrupt(mach_port_t, UInt32, UInt32 *, EmulatorDescriptor *, + void ** , void **, void *); + +unsigned long +syscall_notify_interrupt( mach_port_t port_thread, + UInt32 ppcInterruptHandler, + UInt32 * interruptStatePtr, + EmulatorDescriptor * emulatorDescriptor, + void ** originalPC, + void ** originalR2, + void *othread ) +{ + kern_return_t result; + struct ppc_saved_state *mainPCB; + thread_t thread, nthread; + thread_act_t act; + UInt32 interruptState, currentState, postIntMask; + extern thread_act_t port_name_to_act(mach_port_t); + boolean_t isSelf, runningInKernel; + static unsigned long sequence =0; + +#define COPYIN_INTSTATE() { \ + (void) copyin((char *) interruptStatePtr, (char *)&interruptState, sizeof(interruptState)); \ + if (emulatorDescriptor) \ + (void) copyin((char *) &emulatorDescriptor->postIntMask, (char *)&postIntMask, sizeof(postIntMask)); } +#define COPYOUT_INTSTATE() (void) copyout((char *) &interruptState, (char *)interruptStatePtr, sizeof(interruptState)) + + + act = port_name_to_act(port_thread); + + + if (act == THR_ACT_NULL) + return port_thread; + + runningInKernel = (act->mact.ksp == 0); + isSelf = (current_act() == act); + + if (!isSelf) { + /* First.. suspend the thread */ + result = thread_suspend(act); + + if (result) { + act_deallocate(act); + return port_thread; + } + + /* Now try to find and wait for any pending activitations + * to complete.. (the following is an expansion of + * thread_set_state()) + */ + + thread = act_lock_thread(act); + if (!act->active) { + act_unlock_thread(act); + act_deallocate(act); + return port_thread; + } + + thread_hold(act); + + while (1) { + if (!thread || act != thread->top_act) + break; + + act_unlock_thread(act); + (void) thread_stop_wait(thread); + nthread = act_lock_thread(act); + if (nthread == thread) + break; + thread_unstop(thread); + thread = nthread; + } + + } + + COPYIN_INTSTATE() + if (isSelf) + currentState = kOutsideMain; + else + currentState = (interruptState & kInterruptStateMask) >> kInterruptStateShift; + + if (debugNotify > 5) { + printf("\nNotifyInterruption: %X, %X, %X, %X, %X, %X\n", + port_thread, ppcInterruptHandler, interruptStatePtr, + emulatorDescriptor, originalPC, originalR2 ); + } + mainPCB = USER_REGS(act); + + switch (currentState) + { + case kNotifyPending: + case kInUninitialized: + if (debugNotify > 2) + printf("NotifyInterrupt: kInUninitialized\n"); + break; + + case kInPseudoKernel: + case kOutsideMain: + if (debugNotify > 2) + printf("NotifyInterrupt: kInPseudoKernel/kOutsideMain\n"); + interruptState = interruptState + | ((postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); + COPYOUT_INTSTATE(); + break; + + case kInSystemContext: + if (debugNotify > 2) + printf("kInSystemContext: old CR %x, postIntMask %x, new CR %x\n", + mainPCB->cr, postIntMask, mainPCB->cr | postIntMask); + mainPCB->cr |= postIntMask; + break; + + case kInAlternateContext: + if (debugNotify > 2) + printf("kInAlternateContext: IN InterruptState %x, postIntMask %x\n", + interruptState, postIntMask); + interruptState = interruptState | ((postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); + interruptState = (interruptState & ~kInterruptStateMask); + + if (runningInKernel) + interruptState |= (kNotifyPending << kInterruptStateShift); + else + interruptState |= (kInPseudoKernel << kInterruptStateShift); + + (void) copyout((char *)&mainPCB->srr0, (char *)originalPC, sizeof(originalPC)); + (void) copyout((char *)&mainPCB->r2, (char *)originalR2, sizeof(originalR2)); + COPYOUT_INTSTATE(); + if (debugNotify > 2) + printf("kInAlternateContext: Out interruptState %x, Old PC %x, New %x, R2 %x\n", + interruptState, mainPCB->srr0, ppcInterruptHandler, mainPCB->r2); + + mainPCB->srr0 = ppcInterruptHandler; + break; + + case kInExceptionHandler: + if (debugNotify > 2) + printf("NotifyInterrupt: kInExceptionHandler\n"); + interruptState = interruptState | ((postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask); + COPYOUT_INTSTATE(); + break; + + default: + if (debugNotify) + printf("NotifyInterrupt: default "); + printf("Interruption while running in unknown state\n"); + printf("interruptState = 0x%X\n",currentState); + break; + } + + if (!isSelf) { + if (thread && act == thread->top_act) + thread_unstop(thread); + thread_release(act); + act_unlock_thread(act); + thread_resume(act); + } + + act_deallocate(act); + + return port_thread; +} diff --git a/osfmk/ppc/pcb.c b/osfmk/ppc/pcb.c new file mode 100644 index 000000000..9aa01672a --- /dev/null +++ b/osfmk/ppc/pcb.c @@ -0,0 +1,951 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Copyright (c) 1990,1991,1992 The University of Utah and + * the Center for Software Science (CSS). All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the Center + * for Software Science at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSS requests users of this software to return to css-dist@cs.utah.edu any + * improvements that they make and grant CSS redistribution rights. + * + * Utah $Hdr: pcb.c 1.23 92/06/27$ + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern int real_ncpus; /* Number of actual CPUs */ +extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ + +/* + * These constants are dumb. They should not be in asm.h! + */ + +#define KF_SIZE (FM_SIZE+ARG_SIZE+FM_REDZONE) + +#if DEBUG +int fpu_trap_count = 0; +int fpu_switch_count = 0; +int vec_trap_count = 0; +int vec_switch_count = 0; +#endif + +extern struct thread_shuttle *Switch_context( + struct thread_shuttle *old, + void (*cont)(void), + struct thread_shuttle *new); + + +#if MACH_LDEBUG || MACH_KDB +void log_thread_action (char *, long, long, long); +#endif + + +/* + * consider_machine_collect: try to collect machine-dependent pages + */ +void +consider_machine_collect() +{ + /* + * none currently available + */ + return; +} + +void +consider_machine_adjust() +{ + consider_mapping_adjust(); +} + + +/* + * stack_attach: Attach a kernel stack to a thread. + */ +void +machine_kernel_stack_init( + struct thread_shuttle *thread, + void (*start_pos)(thread_t)) +{ + vm_offset_t stack; + unsigned int *kss; + struct savearea *sv; + + assert(thread->top_act->mact.pcb); + assert(thread->kernel_stack); + stack = thread->kernel_stack; + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("machine_kernel_stack_init(thr=%x,stk=%x,start_pos=%x)\n", thread,stack,start_pos); +#endif /* MACH_ASSERT */ + + kss = (unsigned int *)STACK_IKS(stack); + sv=(savearea *)(thread->top_act->mact.pcb); /* This for the sake of C */ + + sv->save_lr = (unsigned int) start_pos; /* Set up the execution address */ + sv->save_srr0 = (unsigned int) start_pos; /* Here too */ + sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; /* Set the normal running MSR */ + sv->save_r1 = (vm_offset_t) ((int)kss - KF_SIZE); /* Point to the top frame on the stack */ + sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ + sv->save_xfpscr = 0; /* Start with a clear fpscr */ + + *((int *)sv->save_r1) = 0; /* Zero the frame backpointer */ + thread->top_act->mact.ksp = 0; /* Show that the kernel stack is in use already */ + +} + +/* + * switch_context: Switch from one thread to another, needed for + * switching of space + * + */ +struct thread_shuttle* +switch_context( + struct thread_shuttle *old, + void (*continuation)(void), + struct thread_shuttle *new) +{ + register thread_act_t old_act = old->top_act, new_act = new->top_act; + register struct thread_shuttle* retval; + pmap_t new_pmap; +#if MACH_LDEBUG || MACH_KDB + log_thread_action("switch", + (long)old, + (long)new, + (long)__builtin_return_address(0)); +#endif + per_proc_info[cpu_number()].old_thread = old; + assert(old_act->kernel_loaded || + active_stacks[cpu_number()] == old_act->thread->kernel_stack); + + if(get_preemption_level() != 1) { /* Make sure we are not at wrong preemption level */ + panic("switch_context: Invalid preemption level (%d); old = %08X, cont = %08X, new = %08X\n", + get_preemption_level(), old, continuation, new); + } + check_simple_locks(); + + /* Our context might wake up on another processor, so we must + * not keep hot state in our FPU, it must go back to the pcb + * so that it can be found by the other if needed + */ + if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ + fpu_save(); /* Save floating point if used */ + vec_save(); /* Save vector if used */ + } + +#if DEBUG + if (watchacts & WA_PCB) { + printf("switch_context(0x%08x, 0x%x, 0x%08x)\n", + old,continuation,new); + } +#endif /* DEBUG */ + + /* + * We do not have to worry about the PMAP module, so switch. + * + * We must not use top_act->map since this may not be the actual + * task map, but the map being used for a klcopyin/out. + */ + + if(new_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ + pmap_switch(new_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ + } + else { /* otherwise, we use the task's pmap */ + new_pmap = new_act->task->map->pmap; + if ((old_act->task->map->pmap != new_pmap) || (old_act->mact.specFlags & runningVM)) { + pmap_switch(new_pmap); /* Switch if there is a change */ + } + } + + /* Sanity check - is the stack pointer inside the stack that + * we're about to switch to? Is the execution address within + * the kernel's VM space?? + */ +#if 0 + printf("************* stack=%08X; R1=%08X; LR=%08X; old=%08X; cont=%08X; new=%08X\n", + new->kernel_stack, new_act->mact.pcb->ss.r1, + new_act->mact.pcb->ss.lr, old, continuation, new); /* (TEST/DEBUG) */ + assert((new->kernel_stack < new_act->mact.pcb->ss.r1) && + ((unsigned int)STACK_IKS(new->kernel_stack) > + new_act->mact.pcb->ss.r1)); + assert(new_act->mact.pcb->ss.lr < VM_MAX_KERNEL_ADDRESS); +#endif + + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, + (int)old, (int)new, old->sched_pri, new->sched_pri, 0); + + + retval = Switch_context(old, continuation, new); + assert(retval != (struct thread_shuttle*)NULL); + + /* We've returned from having switched context, so we should be + * back in the original context. + */ + + return retval; +} + +/* + * Alter the thread's state so that a following thread_exception_return + * will make the thread return 'retval' from a syscall. + */ +void +thread_set_syscall_return( + struct thread_shuttle *thread, + kern_return_t retval) +{ + struct ppc_saved_state *ssp = &thread->top_act->mact.pcb->ss; + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("thread_set_syscall_return(thr=%x,retval=%d)\n", thread,retval); +#endif /* MACH_ASSERT */ + + ssp->r3 = retval; +} + +/* + * Initialize the machine-dependent state for a new thread. + */ +kern_return_t +thread_machine_create( + struct thread_shuttle *thread, + thread_act_t thr_act, + void (*start_pos)(thread_t)) +{ + + savearea *sv; /* Pointer to newly allocated savearea */ + unsigned int *CIsTooLimited, i; + + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("thread_machine_create(thr=%x,thr_act=%x,st=%x)\n", thread, thr_act, start_pos); +#endif /* MACH_ASSERT */ + + hw_atomic_add(&saveanchor.saveneed, 4); /* Account for the number of saveareas we think we "need" + for this activation */ + assert(thr_act->mact.pcb == (pcb_t)0); /* Make sure there was no previous savearea */ + + sv = save_alloc(); /* Go get us a savearea */ + + bzero((char *) sv, sizeof(struct pcb)); /* Clear out the whole shebang */ + + sv->save_act = thr_act; /* Set who owns it */ + sv->save_vrsave = 0; + thr_act->mact.pcb = (pcb_t)sv; /* Point to the save area */ + + thread->kernel_stack = (int)stack_alloc(thread,start_pos); /* Allocate our kernel stack */ + assert(thread->kernel_stack); /* Make sure we got it */ + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("pcb_init(%x) pcb=%x\n", thr_act, sv); +#endif /* MACH_ASSERT */ + /* + * User threads will pull their context from the pcb when first + * returning to user mode, so fill in all the necessary values. + * Kernel threads are initialized from the save state structure + * at the base of the kernel stack (see stack_attach()). + */ + + sv->save_srr1 = MSR_EXPORT_MASK_SET; /* Set the default user MSR */ + + CIsTooLimited = (unsigned int *)(&sv->save_sr0); /* Make a pointer 'cause C can't cast on the left */ + for(i=0; i<16; i++) { /* Initialize all SRs */ + CIsTooLimited[i] = SEG_REG_PROT | (i << 20) | thr_act->task->map->pmap->space; /* Set the SR value */ + } + sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | thr_act->task->map->pmap->space; /* Default the copyin */ + + return(KERN_SUCCESS); +} + +/* + * Machine-dependent cleanup prior to destroying a thread + */ +void +thread_machine_destroy( thread_t thread ) +{ + spl_t s; + + if (thread->kernel_stack) { + s = splsched(); + stack_free(thread); + splx(s); + } +} + +/* + * flush out any lazily evaluated HW state in the + * owning thread's context, before termination. + */ +void +thread_machine_flush( thread_act_t cur_act ) +{ +} + +/* + * Number of times we needed to swap an activation back in before + * switching to it. + */ +int switch_act_swapins = 0; + +/* + * machine_switch_act + * + * Machine-dependent details of activation switching. Called with + * RPC locks held and preemption disabled. + */ +void +machine_switch_act( + thread_t thread, + thread_act_t old, + thread_act_t new, + int cpu) +{ + pmap_t new_pmap; + + /* Our context might wake up on another processor, so we must + * not keep hot state in our FPU, it must go back to the pcb + * so that it can be found by the other if needed + */ + if(real_ncpus > 1) { /* This is potentially slow, so only do when actually SMP */ + fpu_save(); /* Save floating point if used */ + vec_save(); /* Save vector if used */ + } + + active_stacks[cpu] = thread->kernel_stack; + + ast_context(new, cpu); + + /* Activations might have different pmaps + * (process->kernel->server, for example). + * Change space if needed + */ + + if(new->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ + pmap_switch(new->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ + } + else { /* otherwise, we use the task's pmap */ + new_pmap = new->task->map->pmap; + if ((old->task->map->pmap != new_pmap) || (old->mact.specFlags & runningVM)) { + pmap_switch(new_pmap); + } + } + +} + +void +pcb_user_to_kernel(thread_act_t act) +{ + + return; /* Not needed, I hope... */ +} + + +/* + * act_machine_sv_free + * release saveareas associated with an act. if flag is true, release + * user level savearea(s) too, else don't + * + * this code cannot block so we call the proper save area free routine + */ +void +act_machine_sv_free(thread_act_t act) +{ + register pcb_t pcb,userpcb,npcb; + register savearea *svp; + register int i; + +/* + * This next bit insures that any live facility context for this thread is discarded on every processor + * that may have it. We go through all per-processor blocks and zero the facility owner if + * it is the thread being destroyed. This needs to be done via a compare-and-swap because + * some other processor could change the owner while we are clearing it. It turns out that + * this is the only place where we need the interlock, normal use of the owner field is cpu-local + * and doesn't need the interlock. Because we are called during termintation, and a thread + * terminates itself, the context on other processors has been saved (because we save it as + * part of the context switch), even if it is still considered live. Since the dead thread is + * not running elsewhere, and the context is saved, any other processor looking at the owner + * field will not attempt to save context again, meaning that it doesn't matter if the owner + * changes out from under it. + */ + + /* + * free VMX and FPU saveareas. do not free user save areas. + * user VMX and FPU saveareas, if any, i'm told are last in + * the chain so we just stop if we find them + * we identify user VMX and FPU saveareas when we find a pcb + * with a save level of 0. we identify user regular save + * areas when we find one with MSR_PR set + */ + + pcb = act->mact.VMX_pcb; /* Get the top vector savearea */ + while(pcb) { /* Any VMX saved state? */ + svp = (savearea *)pcb; /* save lots of casting later */ + if (svp->save_level_vec == 0) break; /* done when hit user if any */ + pcb = (pcb_t)svp->save_prev_vector; /* Get one underneath our's */ + svp->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */ + if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */ + + save_ret(svp); /* release it */ + } + } + act->mact.VMX_pcb = pcb; + if (act->mact.VMX_lvl != 0) { + for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ + (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */ + } + } + + pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */ + while(pcb) { /* Any floating point saved state? */ + svp = (savearea *)pcb; + if (svp->save_level_fp == 0) break; /* done when hit user if any */ + pcb = (pcb_t)svp->save_prev_float; /* Get one underneath our's */ + svp->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */ + if(!(svp->save_flags & SAVinuse)) { /* Anyone left with this one? */ + save_ret(svp); /* Nope, release it */ + } + } + act->mact.FPU_pcb = pcb; + if (act->mact.FPU_lvl != 0) { + for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ + (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */ + } + } + + /* + * free all regular saveareas except a user savearea, if any + */ + + pcb = act->mact.pcb; + userpcb = (pcb_t)0; + while(pcb) { + svp = (savearea *)pcb; + if ((svp->save_srr1 & MASK(MSR_PR))) { + assert(userpcb == (pcb_t)0); + userpcb = pcb; + svp = (savearea *)userpcb; + npcb = (pcb_t)svp->save_prev; + svp->save_prev = (struct savearea *)0; + } else { + svp->save_flags &= ~SAVattach; /* Clear the attached flag */ + npcb = (pcb_t)svp->save_prev; + if(!(svp->save_flags & SAVinuse)) /* Anyone left with this one? */ + save_ret(svp); + } + pcb = npcb; + } + act->mact.pcb = userpcb; + +} + + +/* + * act_virtual_machine_destroy: + * Shutdown any virtual machines associated with a thread + */ +void +act_virtual_machine_destroy(thread_act_t act) +{ + if(act->mact.bbDescAddr) { /* Check if the Blue box assist is active */ + disable_bluebox_internal(act); /* Kill off bluebox */ + } + + if(act->mact.vmmControl) { /* Check if VMM is active */ + vmm_tear_down_all(act); /* Kill off all VMM contexts */ + } +} + +/* + * act_machine_destroy: Shutdown any state associated with a thread pcb. + */ +void +act_machine_destroy(thread_act_t act) +{ + register pcb_t pcb, opcb; + int i; + +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("act_machine_destroy(0x%x)\n", act); +#endif /* MACH_ASSERT */ + + act_virtual_machine_destroy(act); + +/* + * This next bit insures that any live facility context for this thread is discarded on every processor + * that may have it. We go through all per-processor blocks and zero the facility owner if + * it is the thread being destroyed. This needs to be done via a compare-and-swap because + * some other processor could change the owner while we are clearing it. It turns out that + * this is the only place where we need the interlock, normal use of the owner field is cpu-local + * and doesn't need the interlock. Because we are called during termintation, and a thread + * terminates itself, the context on other processors has been saved (because we save it as + * part of the context switch), even if it is still considered live. Since the dead thread is + * not running elsewhere, and the context is saved, any other processor looking at the owner + * field will not attempt to save context again, meaning that it doesn't matter if the owner + * changes out from under it. + */ + + for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ + (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].FPU_thread); /* Clear if ours */ + (void)hw_compare_and_store((unsigned int)act, 0, &per_proc_info[i].VMX_thread); /* Clear if ours */ + } + + pcb = act->mact.VMX_pcb; /* Get the top vector savearea */ + while(pcb) { /* Any VMX saved state? */ + opcb = pcb; /* Save current savearea address */ + pcb = (pcb_t)(((savearea *)pcb)->save_prev_vector); /* Get one underneath our's */ + ((savearea *)opcb)->save_flags &= ~SAVvmxvalid; /* Clear the VMX flag */ + + if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */ + save_release((savearea *)opcb); /* Nope, release it */ + } + } + act->mact.VMX_pcb = (pcb_t)0; /* Clear pointer */ + + pcb = act->mact.FPU_pcb; /* Get the top floating point savearea */ + while(pcb) { /* Any floating point saved state? */ + opcb = pcb; /* Save current savearea address */ + pcb = (pcb_t)(((savearea *)pcb)->save_prev_float); /* Get one underneath our's */ + ((savearea *)opcb)->save_flags &= ~SAVfpuvalid; /* Clear the floating point flag */ + + if(!(((savearea *)opcb)->save_flags & SAVinuse)) { /* Anyone left with this one? */ + save_release((savearea *)opcb); /* Nope, release it */ + } + } + act->mact.FPU_pcb = (pcb_t)0; /* Clear pointer */ + + pcb = act->mact.pcb; /* Get the top normal savearea */ + act->mact.pcb = (pcb_t)0; /* Clear pointer */ + + while(pcb) { /* Any normal saved state left? */ + opcb = pcb; /* Keep track of what we're working on */ + pcb = (pcb_t)(((savearea *)pcb)->save_prev); /* Get one underneath our's */ + + ((savearea *)opcb)->save_flags = 0; /* Clear all flags since we release this in any case */ + save_release((savearea *)opcb); /* Release this one */ + } + + hw_atomic_sub(&saveanchor.saveneed, 4); /* Unaccount for the number of saveareas we think we "need" + for this activation */ +} + +kern_return_t +act_machine_create(task_t task, thread_act_t thr_act) +{ + /* + * Clear & Init the pcb (sets up user-mode s regs) + * We don't use this anymore. + */ + + register pcb_t pcb; + register int i; + unsigned int *CIsTooLimited; + pmap_t pmap; + + return KERN_SUCCESS; +} + +void act_machine_init() +{ +#if MACH_ASSERT + if (watchacts & WA_PCB) + printf("act_machine_init()\n"); +#endif /* MACH_ASSERT */ + + /* Good to verify these once */ + assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); + + assert( THREAD_STATE_MAX >= PPC_THREAD_STATE_COUNT ); + assert( THREAD_STATE_MAX >= PPC_EXCEPTION_STATE_COUNT ); + assert( THREAD_STATE_MAX >= PPC_FLOAT_STATE_COUNT ); + assert( THREAD_STATE_MAX >= sizeof(struct ppc_saved_state)/sizeof(int)); + + /* + * If we start using kernel activations, + * would normally create kernel_thread_pool here, + * populating it from the act_zone + */ +} + +void +act_machine_return(int code) +{ + thread_act_t thr_act = current_act(); + +#if MACH_ASSERT + if (watchacts & WA_EXIT) + printf("act_machine_return(0x%x) cur_act=%x(%d) thr=%x(%d)\n", + code, thr_act, thr_act->ref_count, + thr_act->thread, thr_act->thread->ref_count); +#endif /* MACH_ASSERT */ + + + /* + * This code is called with nothing locked. + * It also returns with nothing locked, if it returns. + * + * This routine terminates the current thread activation. + * If this is the only activation associated with its + * thread shuttle, then the entire thread (shuttle plus + * activation) is terminated. + */ + assert( code == KERN_TERMINATED ); + assert( thr_act ); + + act_lock_thread(thr_act); + +#ifdef CALLOUT_RPC_MODEL + /* + * JMM - This needs to get cleaned up to work under the much simpler + * return (instead of callout model). + */ + if (thr_act->thread->top_act != thr_act) { + /* + * this is not the top activation; + * if possible, we should clone the shuttle so that + * both the root RPC-chain and the soon-to-be-orphaned + * RPC-chain have shuttles + * + * JMM - Cloning is a horrible idea! Instead we should alert + * the pieces upstream to return the shuttle. We will use + * alerts for this. + */ + act_unlock_thread(thr_act); + panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED"); + } + + if (thr_act->lower != THR_ACT_NULL) { + thread_t cur_thread = current_thread(); + thread_act_t cur_act; + struct ipc_port *iplock; + + /* terminate the entire thread (shuttle plus activation) */ + /* terminate only this activation, send an appropriate */ + /* return code back to the activation that invoked us. */ + iplock = thr_act->pool_port; /* remember for unlock call */ + thr_act->lower->alerts |= SERVER_TERMINATED; + install_special_handler(thr_act->lower); + + /* Return to previous act with error code */ + + act_locked_act_reference(thr_act); /* keep it around */ + act_switch_swapcheck(cur_thread, (ipc_port_t)0); + + (void) switch_act(THR_ACT_NULL); + /* assert(thr_act->ref_count == 0); */ /* XXX */ + cur_act = cur_thread->top_act; + MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED; + machine_kernel_stack_init(cur_thread, mach_rpc_return_error); + /* + * The following unlocks must be done separately since fields + * used by `act_unlock_thread()' have been cleared, meaning + * that it would not release all of the appropriate locks. + */ + rpc_unlock(cur_thread); + if (iplock) ip_unlock(iplock); /* must be done separately */ + act_unlock(thr_act); + act_deallocate(thr_act); /* free it */ + Load_context(cur_thread); + /*NOTREACHED*/ + + panic("act_machine_return: TALKING ZOMBIE! (2)"); + } + +#endif /* CALLOUT_RPC_MODEL */ + + /* This is the only activation attached to the shuttle... */ + + assert(thr_act->thread->top_act == thr_act); + act_unlock_thread(thr_act); + thread_terminate_self(); + + /*NOTREACHED*/ + panic("act_machine_return: TALKING ZOMBIE! (1)"); +} + +void +thread_machine_set_current(struct thread_shuttle *thread) +{ + register int my_cpu = cpu_number(); + + cpu_data[my_cpu].active_thread = thread; + + active_kloaded[my_cpu] = thread->top_act->kernel_loaded ? thread->top_act : THR_ACT_NULL; +} + +void +thread_machine_init(void) +{ +#ifdef MACHINE_STACK +#if KERNEL_STACK_SIZE > PPC_PGBYTES + panic("KERNEL_STACK_SIZE can't be greater than PPC_PGBYTES\n"); +#endif +#endif +} + +#if MACH_ASSERT +void +dump_pcb(pcb_t pcb) +{ + printf("pcb @ %8.8x:\n", pcb); +#if DEBUG + regDump(&pcb->ss); +#endif /* DEBUG */ +} + +void +dump_thread(thread_t th) +{ + printf(" thread @ 0x%x:\n", th); +} + +int + dump_act(thread_act_t thr_act) +{ + if (!thr_act) + return(0); + + printf("thr_act(0x%x)(%d): thread=%x(%d) task=%x(%d)\n", + thr_act, thr_act->ref_count, + thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, + thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); + + printf("\talerts=%x mask=%x susp=%x active=%x hi=%x lo=%x\n", + thr_act->alerts, thr_act->alert_mask, + thr_act->suspend_count, thr_act->active, + thr_act->higher, thr_act->lower); + + return((int)thr_act); +} + +#endif + +unsigned int +get_useraddr() +{ + + thread_act_t thr_act = current_act(); + + return(thr_act->mact.pcb->ss.srr0); +} + +/* + * detach and return a kernel stack from a thread + */ + +vm_offset_t +stack_detach(thread_t thread) +{ + vm_offset_t stack; + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_DETACH), + thread, thread->priority, + thread->sched_pri, 0, + 0); + + stack = thread->kernel_stack; + thread->kernel_stack = 0; + return(stack); +} + +/* + * attach a kernel stack to a thread and initialize it + * + * attaches a stack to a thread. if there is no save + * area we allocate one. the top save area is then + * loaded with the pc (continuation address), the initial + * stack pointer, and a std kernel MSR. if the top + * save area is the user save area bad things will + * happen + * + */ + +void +stack_attach(struct thread_shuttle *thread, + vm_offset_t stack, + void (*start_pos)(thread_t)) +{ + thread_act_t thr_act; + unsigned int *kss; + struct savearea *sv; + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), + thread, thread->priority, + thread->sched_pri, start_pos, + 0); + + assert(stack); + kss = (unsigned int *)STACK_IKS(stack); + thread->kernel_stack = stack; + + /* during initialization we sometimes do not have an + activation. in that case do not do anything */ + if ((thr_act = thread->top_act) != 0) { + sv = save_get(); /* cannot block */ + // bzero((char *) sv, sizeof(struct pcb)); + sv->save_act = thr_act; + sv->save_prev = (struct savearea *)thr_act->mact.pcb; + thr_act->mact.pcb = (pcb_t)sv; + + sv->save_srr0 = (unsigned int) start_pos; + /* sv->save_r3 = ARG ? */ + sv->save_r1 = (vm_offset_t)((int)kss - KF_SIZE); + sv->save_srr1 = MSR_SUPERVISOR_INT_OFF; + sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ + sv->save_xfpscr = 0; /* Start with a clear fpscr */ + *((int *)sv->save_r1) = 0; + thr_act->mact.ksp = 0; + } + + return; +} + +/* + * move a stack from old to new thread + */ + +void +stack_handoff(thread_t old, + thread_t new) +{ + + vm_offset_t stack; + pmap_t new_pmap; + + assert(new->top_act); + assert(old->top_act); + + stack = stack_detach(old); + new->kernel_stack = stack; + +#if NCPUS > 1 + if (real_ncpus > 1) { + fpu_save(); + vec_save(); + } +#endif + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, + (int)old, (int)new, old->sched_pri, new->sched_pri, 0); + + + if(new->top_act->mact.specFlags & runningVM) { /* Is the new guy running a VM? */ + pmap_switch(new->top_act->mact.vmmCEntry->vmmPmap); /* Switch to the VM's pmap */ + } + else { /* otherwise, we use the task's pmap */ + new_pmap = new->top_act->task->map->pmap; + if ((old->top_act->task->map->pmap != new_pmap) || (old->top_act->mact.specFlags & runningVM)) { + pmap_switch(new_pmap); + } + } + + thread_machine_set_current(new); + active_stacks[cpu_number()] = new->kernel_stack; + per_proc_info[cpu_number()].Uassist = new->top_act->mact.cthread_self; + return; +} + +/* + * clean and initialize the current kernel stack and go to + * the given continuation routine + */ + +void +call_continuation(void (*continuation)(void) ) +{ + + unsigned int *kss; + vm_offset_t tsp; + + assert(current_thread()->kernel_stack); + kss = (unsigned int *)STACK_IKS(current_thread()->kernel_stack); + assert(continuation); + + tsp = (vm_offset_t)((int)kss - KF_SIZE); + assert(tsp); + *((int *)tsp) = 0; + + Call_continuation(continuation, tsp); + + return; +} + +void +thread_swapin_mach_alloc(thread_t thread) +{ + struct savearea *sv; + + assert(thread->top_act->mact.pcb == 0); + + sv = save_alloc(); + assert(sv); + // bzero((char *) sv, sizeof(struct pcb)); + sv->save_act = thread->top_act; + thread->top_act->mact.pcb = (pcb_t)sv; + +} diff --git a/osfmk/ppc/pmap.c b/osfmk/ppc/pmap.c new file mode 100644 index 000000000..e7b6e01f8 --- /dev/null +++ b/osfmk/ppc/pmap.c @@ -0,0 +1,2251 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1990,1991,1992 The University of Utah and + * the Center for Software Science (CSS). + * Copyright (c) 1991,1987 Carnegie Mellon University. + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation, + * and that all advertising materials mentioning features or use of + * this software display the following acknowledgement: ``This product + * includes software developed by the Center for Software Science at + * the University of Utah.'' + * + * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF + * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY + * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF + * THIS SOFTWARE. + * + * CSS requests users of this software to return to css-dist@cs.utah.edu any + * improvements that they make and grant CSS redistribution rights. + * + * Carnegie Mellon requests users of this software to return to + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + * + * Utah $Hdr: pmap.c 1.28 92/06/23$ + * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90 + */ + +/* + * Manages physical address maps for powerpc. + * + * In addition to hardware address maps, this + * module is called upon to provide software-use-only + * maps which may or may not be stored in the same + * form as hardware maps. These pseudo-maps are + * used to store intermediate results from copy + * operations to and from address spaces. + * + * Since the information managed by this module is + * also stored by the logical address mapping module, + * this module may throw away valid virtual-to-physical + * mappings at almost any time. However, invalidations + * of virtual-to-physical mappings must be done as + * requested. + * + * In order to cope with hardware architectures which + * make virtual-to-physical map invalidates expensive, + * this module may delay invalidate or reduced protection + * operations until such time as they are actually + * necessary. This module is given full information to + * when physical maps must be made correct. + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +#if DB_MACHINE_COMMANDS +/* optionally enable traces of pmap operations in post-mortem trace table */ +/* #define PMAP_LOWTRACE 1 */ +#define PMAP_LOWTRACE 0 +#else /* DB_MACHINE_COMMANDS */ +/* Can not trace even if we wanted to */ +#define PMAP_LOWTRACE 0 +#endif /* DB_MACHINE_COMMANDS */ + +#define PERFTIMES 0 + +#if PERFTIMES && DEBUG +#define debugLog2(a, b, c) dbgLog2(a, b, c) +#else +#define debugLog2(a, b, c) +#endif + +extern unsigned int avail_remaining; +extern unsigned int mappingdeb0; +extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ +extern int real_ncpus; /* Number of actual CPUs */ +unsigned int debugbackpocket; /* (TEST/DEBUG) */ + +vm_offset_t avail_next; +vm_offset_t first_free_virt; +int current_free_region; /* Used in pmap_next_page */ + +/* forward */ +void pmap_activate(pmap_t pmap, thread_t th, int which_cpu); +void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu); +void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount); + +#if MACH_VM_DEBUG +int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space); +#endif + +#if DEBUG +#define PDB_USER 0x01 /* exported functions */ +#define PDB_MAPPING 0x02 /* low-level mapping routines */ +#define PDB_ENTER 0x04 /* pmap_enter specifics */ +#define PDB_COPY 0x08 /* copy page debugging */ +#define PDB_ZERO 0x10 /* zero page debugging */ +#define PDB_WIRED 0x20 /* things concerning wired entries */ +#define PDB_PTEG 0x40 /* PTEG overflows */ +#define PDB_LOCK 0x100 /* locks */ +#define PDB_IO 0x200 /* Improper use of WIMG_IO checks - PCI machines */ + +int pmdebug=0; +#endif + +/* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */ + +extern struct pmap kernel_pmap_store; +pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */ +pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */ +struct zone *pmap_zone; /* zone of pmap structures */ +boolean_t pmap_initialized = FALSE; + +/* + * Physical-to-virtual translations are handled by inverted page table + * structures, phys_tables. Multiple mappings of a single page are handled + * by linking the affected mapping structures. We initialise one region + * for phys_tables of the physical memory we know about, but more may be + * added as it is discovered (eg. by drivers). + */ +struct phys_entry *phys_table; /* For debugging */ + +lock_t pmap_system_lock; + +decl_simple_lock_data(,tlb_system_lock) + +/* + * free pmap list. caches the first free_pmap_max pmaps that are freed up + */ +int free_pmap_max = 32; +int free_pmap_count; +pmap_t free_pmap_list; +decl_simple_lock_data(,free_pmap_lock) + +/* + * Function to get index into phys_table for a given physical address + */ + +struct phys_entry *pmap_find_physentry(vm_offset_t pa) +{ + int i; + struct phys_entry *entry; + + for (i = pmap_mem_regions_count-1; i >= 0; i--) { + if (pa < pmap_mem_regions[i].start) + continue; + if (pa >= pmap_mem_regions[i].end) + return PHYS_NULL; + + entry = &pmap_mem_regions[i].phys_table[(pa - pmap_mem_regions[i].start) >> PPC_PGSHIFT]; + __asm__ volatile("dcbt 0,%0" : : "r" (entry)); /* We will use this in a little bit */ + return entry; + } + kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa); + return PHYS_NULL; +} + +/* + * kern_return_t + * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa, + * boolean_t available, unsigned int attr) + * Allocate some extra physentries for the physical addresses given, + * specifying some default attribute that on the powerpc specifies + * the default cachability for any mappings using these addresses + * If the memory is marked as available, it is added to the general + * VM pool, otherwise it is not (it is reserved for card IO etc). + */ +kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa, + boolean_t available, unsigned int attr) +{ + int i,j; + spl_t s; + + /* Only map whole pages */ + + panic("Forget it! You can't map no more memory, you greedy puke!\n"); + + spa = trunc_page(spa); + epa = round_page(epa); + + /* First check that the region doesn't already exist */ + + assert (epa >= spa); + for (i = 0; i < pmap_mem_regions_count; i++) { + /* If we're below the next region, then no conflict */ + if (epa < pmap_mem_regions[i].start) + break; + if (spa < pmap_mem_regions[i].end) { +#if DEBUG + kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa,epa,attr); +#endif /* DEBUG */ + return KERN_NO_SPACE; + } + } + +#if DEBUG + kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i, pmap_mem_regions_count); /* (TEST/DEBUG) */ +#endif + + /* Check that we've got enough space for another region */ + if (pmap_mem_regions_count == PMAP_MEM_REGION_MAX) + return KERN_RESOURCE_SHORTAGE; + + /* Once here, i points to the mem_region above ours in physical mem */ + + /* allocate a new phys_table for this new region */ +#if DEBUG + kprintf("pmap_add_physical_memory; kalloc\n"); /* (TEST/DEBUG) */ +#endif + + phys_table = (struct phys_entry *) + kalloc(sizeof(struct phys_entry) * atop(epa-spa)); +#if DEBUG + kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table); /* (TEST/DEBUG) */ +#endif + + /* Initialise the new phys_table entries */ + for (j = 0; j < atop(epa-spa); j++) { + + phys_table[j].phys_link = MAPPING_NULL; + + mapping_phys_init(&phys_table[j], spa+(j*PAGE_SIZE), attr); /* Initialize the hardware specific portions */ + + } + s = splhigh(); + + /* Move all the phys_table entries up some to make room in + * the ordered list. + */ + for (j = pmap_mem_regions_count; j > i ; j--) + pmap_mem_regions[j] = pmap_mem_regions[j-1]; + + /* Insert a new entry with some memory to back it */ + + pmap_mem_regions[i].start = spa; + pmap_mem_regions[i].end = epa; + pmap_mem_regions[i].phys_table = phys_table; + + pmap_mem_regions_count++; + splx(s); + +#if DEBUG + for(i=0; i spa); + debugLog2(40, va, spa); /* Log pmap_map call */ + + pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_DEFAULT, blkPerm); /* Set up a permanent block mapped area */ + + debugLog2(41, epa, prot); /* Log pmap_map call */ + + return(va); +} + +/* + * pmap_map_bd(va, spa, epa, prot) + * Back-door routine for mapping kernel VM at initialisation. + * Used for mapping memory outside the known physical memory + * space, with caching disabled. Designed for use by device probes. + * + * A virtual address range starting at "va" is mapped to the physical + * address range "spa" to "epa" with machine independent protection + * "prot". + * + * "va", "spa", and "epa" are byte addresses and must be on machine + * independent page boundaries. + * + * WARNING: The current version of memcpy() can use the dcbz instruction + * on the destination addresses. This will cause an alignment exception + * and consequent overhead if the destination is caching-disabled. So + * avoid memcpy()ing into the memory mapped by this function. + * + * also, many other pmap_ routines will misbehave if you try and change + * protections or remove these mappings, they are designed to be permanent. + * + * These areas will be added to the autogen list, if possible. Existing translations + * are overridden and their mapping stuctures are released. This takes place in + * the autogen_map function. + * + * Locking: + * this routine is called only during system initialization when only + * one processor is active, so no need to take locks... + */ +vm_offset_t +pmap_map_bd( + vm_offset_t va, + vm_offset_t spa, + vm_offset_t epa, + vm_prot_t prot) +{ + register struct mapping *mp; + register struct phys_entry *pp; + + + if (spa == epa) + return(va); + + assert(epa > spa); + + debugLog2(42, va, epa); /* Log pmap_map_bd call */ + + pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_IO, blkPerm); /* Set up autogen area */ + + debugLog2(43, epa, prot); /* Log pmap_map_bd exit */ + + return(va); +} + +/* + * Bootstrap the system enough to run with virtual memory. + * Map the kernel's code and data, and allocate the system page table. + * Called with mapping done by BATs. Page_size must already be set. + * + * Parameters: + * mem_size: Total memory present + * first_avail: First virtual address available + * first_phys_avail: First physical address available + */ +void +pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, vm_offset_t *first_phys_avail, unsigned int kmapsize) +{ + register struct mapping *mp; + vm_offset_t addr; + vm_size_t size; + int i, num, j, rsize, mapsize, vmpagesz, vmmapsz; + unsigned int mask; + vm_offset_t first_used_addr; + PCA *pcaptr; + savectl *savec, *savec2; + vm_offset_t save, save2; + + *first_avail = round_page(*first_avail); + +#if DEBUG + kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n", + *first_avail, *first_phys_avail, avail_remaining); +#endif + + assert(PAGE_SIZE == PPC_PGBYTES); + + /* + * Initialize kernel pmap + */ + kernel_pmap = &kernel_pmap_store; + cursor_pmap = &kernel_pmap_store; + + lock_init(&pmap_system_lock, + FALSE, /* NOT a sleep lock */ + ETAP_VM_PMAP_SYS, + ETAP_VM_PMAP_SYS_I); + + simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL); + + kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */ + kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */ + kernel_pmap->ref_count = 1; + kernel_pmap->space = PPC_SID_KERNEL; + kernel_pmap->pmapvr = 0; /* Virtual = Real */ + kernel_pmap->bmaps = 0; /* No block pages just yet */ + for(i=0; i < 128; i++) { /* Clear usage slots */ + kernel_pmap->pmapUsage[i] = 0; + } + for(i=0; i < 16; i++) { /* Initialize for laughs */ + kernel_pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | PPC_SID_KERNEL; + } + + /* + * Allocate: (from first_avail up) + * Aligned to its own size: + * hash table (for mem size 2**x, allocate 2**(x-10) entries) + * mapping table (same size and immediatly following hash table) + */ + /* hash_table_size must be a power of 2, recommended sizes are + * taken from PPC601 User Manual, table 6-19. We take the next + * highest size if mem_size is not a power of two. + * TODO NMGS make this configurable at boot time. + */ + + num = sizeof(pte_t) * (mem_size >> 10); + + for (hash_table_size = 64 * 1024; /* minimum size = 64Kbytes */ + hash_table_size < num; + hash_table_size *= 2) + continue; + + /* Scale to within any physical memory layout constraints */ + do { + num = atop(mem_size); /* num now holds mem_size in pages */ + + /* size of all structures that we're going to allocate */ + + size = (vm_size_t) ( + (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */ + (8 * PAGE_SIZE) + /* For backpocket saveareas */ + hash_table_size + /* For hash table */ + hash_table_size + /* For PTEG allocation table */ + (num * sizeof(struct phys_entry)) /* For the physical entries */ + ); + + mapsize = size = round_page(size); /* Get size of area to map that we just calculated */ + mapsize = mapsize + kmapsize; /* Account for the kernel text size */ + + vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */ + vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */ + + mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */ + + mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */ + mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */ + mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */ + +#if DEBUG + kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz); + kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz); + kprintf("pmap_bootstrap: size before mappings = %08X\n", size); + kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize); + kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize); +#endif + + size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */ + + /* hash table must be aligned to its size */ + + addr = (*first_avail + + (hash_table_size-1)) & ~(hash_table_size-1); + + if (addr + size > pmap_mem_regions[0].end) { + hash_table_size /= 2; + } else { + break; + } + /* If we have had to shrink hash table to too small, panic */ + if (hash_table_size == 32 * 1024) + panic("cannot lay out pmap memory map correctly"); + } while (1); + +#if DEBUG + kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n", + hash_table_size, size, addr); +#endif + if (round_page(*first_phys_avail) < trunc_page(addr)) { + /* We are stepping over at least one page here, so + * add this region to the free regions so that it can + * be allocated by pmap_steal + */ + free_regions[free_regions_count].start = round_page(*first_phys_avail); + free_regions[free_regions_count].end = trunc_page(addr); + + avail_remaining += (free_regions[free_regions_count].end - + free_regions[free_regions_count].start) / + PPC_PGBYTES; +#if DEBUG + kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n", + free_regions[free_regions_count].start,free_regions[free_regions_count].end, + avail_remaining); +#endif /* DEBUG */ + free_regions_count++; + } + + /* Zero everything - this also invalidates the hash table entries */ + bzero((char *)addr, size); + + /* Set up some pointers to our new structures */ + + /* from here, addr points to the next free address */ + + first_used_addr = addr; /* remember where we started */ + + /* Set up hash table address and dma buffer address, keeping + * alignment. These mappings are all 1-1, so dma_r == dma_v + * + * If hash_table_size == dma_buffer_alignment, then put hash_table + * first, since dma_buffer_size may be smaller than alignment, but + * hash table alignment==hash_table_size. + */ + hash_table_base = addr; + + addr += hash_table_size; + addr += hash_table_size; /* Add another for the PTEG Control Area */ + assert((hash_table_base & (hash_table_size-1)) == 0); + + pcaptr = (PCA *)(hash_table_base+hash_table_size); /* Point to the PCA table */ + + for(i=0; i < (hash_table_size/64) ; i++) { /* For all of PTEG control areas: */ + pcaptr[i].flgs.PCAalflgs.PCAfree=0xFF; /* Mark all slots free */ + pcaptr[i].flgs.PCAalflgs.PCAsteal=0x01; /* Initialize steal position */ + } + +/* + * Allocate our initial context save areas. As soon as we do this, + * we can take an interrupt. We do the saveareas here, 'cause they're guaranteed + * to be at least page aligned. + */ + save2 = addr; /* Remember first page */ + save = addr; /* Point to the whole block of blocks */ + savec2 = (savectl *)(addr + PAGE_SIZE - sizeof(savectl)); /* Point to the first's control area */ + + for(i=0; i < InitialSaveBloks; i++) { /* Initialize the saveareas */ + + savec = (savectl *)(save + PAGE_SIZE - sizeof(savectl)); /* Get the control area for this one */ + + savec->sac_alloc = sac_empty; /* Mark both free */ + savec->sac_vrswap = 0; /* V=R, so the translation factor is 0 */ + savec->sac_flags = sac_perm; /* Mark it permanent */ + + savec->sac_flags |= 0x0000EE00; /* (TEST/DEBUG) */ + + save += PAGE_SIZE; /* Jump up to the next one now */ + + savec->sac_next = (unsigned int *)save; /* Link these two */ + + } + + savec->sac_next = (unsigned int *)0; /* Clear the forward pointer for the last */ + savec2->sac_alloc &= 0x7FFFFFFF; /* Mark the first one in use */ + + saveanchor.savefree = (unsigned int)save2; /* Point to the first one */ + saveanchor.savecount = InitialSaveBloks * sac_cnt; /* The total number of save areas allocated */ + saveanchor.saveinuse = 1; /* Number of areas in use */ + saveanchor.savemin = InitialSaveMin; /* We abend if lower than this */ + saveanchor.saveneghyst = InitialNegHysteresis; /* The minimum number to keep free (must be a multiple of sac_cnt) */ + saveanchor.savetarget = InitialSaveTarget; /* The target point for free save areas (must be a multiple of sac_cnt) */ + saveanchor.saveposhyst = InitialPosHysteresis; /* The high water mark for free save areas (must be a multiple of sac_cnt) */ + __asm__ volatile ("mtsprg 1, %0" : : "r" (save2)); /* Tell the exception handler about it */ + + addr += InitialSaveBloks * PAGE_SIZE; /* Move up the next free address */ + + save2 = addr; + save = addr; + savec2 = (savectl *)(addr + PAGE_SIZE - sizeof(savectl)); + + for(i=0; i < 8; i++) { /* Allocate backpocket saveareas */ + + savec = (savectl *)(save + PAGE_SIZE - sizeof(savectl)); + + savec->sac_alloc = sac_empty; + savec->sac_vrswap = 0; + savec->sac_flags = sac_perm; + savec->sac_flags |= 0x0000EE00; + + save += PAGE_SIZE; + + savec->sac_next = (unsigned int *)save; + + } + + savec->sac_next = (unsigned int *)0; + savec2->sac_alloc &= 0x7FFFFFFF; + debugbackpocket = save2; + addr += 8 * PAGE_SIZE; + + /* phys_table is static to help debugging, + * this variable is no longer actually used + * outside of this scope + */ + + phys_table = (struct phys_entry *) addr; + +#if DEBUG + kprintf("hash_table_base =%08X\n", hash_table_base); + kprintf("phys_table =%08X\n", phys_table); + kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count); +#endif + + for (i = 0; i < pmap_mem_regions_count; i++) { + + pmap_mem_regions[i].phys_table = phys_table; + rsize = (pmap_mem_regions[i].end - (unsigned int)pmap_mem_regions[i].start)/PAGE_SIZE; + +#if DEBUG + kprintf("Initializing physical table for region %d\n", i); + kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n", + phys_table, rsize, pmap_mem_regions[i].start, + (unsigned int)pmap_mem_regions[i].end); +#endif + + for (j = 0; j < rsize; j++) { + phys_table[j].phys_link = MAPPING_NULL; + mapping_phys_init(&phys_table[j], (unsigned int)pmap_mem_regions[i].start+(j*PAGE_SIZE), + PTE_WIMG_DEFAULT); /* Initializes hw specific storage attributes */ + } + phys_table = phys_table + + atop(pmap_mem_regions[i].end - pmap_mem_regions[i].start); + } + + /* restore phys_table for debug */ + phys_table = (struct phys_entry *) addr; + + addr += sizeof(struct phys_entry) * num; + + simple_lock_init(&tlb_system_lock, ETAP_VM_PMAP_TLB); + + /* Initialise the registers necessary for supporting the hashtable */ +#if DEBUG + kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base, hash_table_size); +#endif + + hash_table_init(hash_table_base, hash_table_size); + +/* + * Remaining space is for mapping entries. Tell the initializer routine that + * the mapping system can't release this block because it's permanently assigned + */ + + mapping_init(); /* Initialize the mapping tables */ + + for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */ + mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */ + } + mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */ + +#if DEBUG + + kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n", + first_used_addr, round_page(first_used_addr+size), + first_used_addr); +#endif /* DEBUG */ + + /* Map V=R the page tables */ + pmap_map(first_used_addr, first_used_addr, + round_page(first_used_addr+size), VM_PROT_READ | VM_PROT_WRITE); + +#if DEBUG + + for(i=first_used_addr; i < round_page(first_used_addr+size); i+=PAGE_SIZE) { /* Step through all these mappings */ + if(i != (j = kvtophys(i))) { /* Verify that the mapping was made V=R */ + kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i, j); + } + } +#endif + + *first_avail = round_page(first_used_addr + size); + first_free_virt = round_page(first_used_addr + size); + + /* All the rest of memory is free - add it to the free + * regions so that it can be allocated by pmap_steal + */ + free_regions[free_regions_count].start = *first_avail; + free_regions[free_regions_count].end = pmap_mem_regions[0].end; + + avail_remaining += (free_regions[free_regions_count].end - + free_regions[free_regions_count].start) / + PPC_PGBYTES; + +#if DEBUG + kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n", + free_regions[free_regions_count].start,free_regions[free_regions_count].end, + avail_remaining); +#endif /* DEBUG */ + + free_regions_count++; + + current_free_region = 0; + + avail_next = free_regions[current_free_region].start; + +#if DEBUG + kprintf("Number of free regions=%d\n",free_regions_count); /* (TEST/DEBUG) */ + kprintf("Current free region=%d\n",current_free_region); /* (TEST/DEBUG) */ + for(i=0;i= free_regions_count) { + /* We're into the pmap_mem_regions, handle this + * separately to free_regions + */ + + int current_pmap_mem_region = current_free_region - + free_regions_count + 1; + if (current_pmap_mem_region > pmap_mem_regions_count) + return FALSE; + *addrp = avail_next; + avail_next += PAGE_SIZE; + avail_remaining--; + if (avail_next >= pmap_mem_regions[current_pmap_mem_region].end) { + current_free_region++; + current_pmap_mem_region++; + avail_next = pmap_mem_regions[current_pmap_mem_region].start; +#if DEBUG + kprintf("pmap_next_page : next region start=0x%08x\n",avail_next); +#endif /* DEBUG */ + } + return TRUE; + } + + /* We're in the free_regions, allocate next page and increment + * counters + */ + *addrp = avail_next; + + avail_next += PAGE_SIZE; + avail_remaining--; + + if (avail_next >= free_regions[current_free_region].end) { + current_free_region++; + if (current_free_region < free_regions_count) + avail_next = free_regions[current_free_region].start; + else + avail_next = pmap_mem_regions[current_free_region - + free_regions_count + 1].start; +#if DEBUG + kprintf("pmap_next_page : next region start=0x%08x\n",avail_next); +#endif + } + return TRUE; +} + +void pmap_virtual_space( + vm_offset_t *startp, + vm_offset_t *endp) +{ + *startp = round_page(first_free_virt); + *endp = VM_MAX_KERNEL_ADDRESS; +} + +/* + * pmap_create + * + * Create and return a physical map. + * + * If the size specified for the map is zero, the map is an actual physical + * map, and may be referenced by the hardware. + * + * A pmap is either in the free list or in the in-use list. The only use + * of the in-use list (aside from debugging) is to handle the VSID wrap situation. + * Whenever a new pmap is allocated (i.e., not recovered from the free list). The + * in-use list is matched until a hole in the VSID sequence is found. (Note + * that the in-use pmaps are queued in VSID sequence order.) This is all done + * while free_pmap_lock is held. + * + * If the size specified is non-zero, the map will be used in software + * only, and is bounded by that size. + */ +pmap_t +pmap_create(vm_size_t size) +{ + pmap_t pmap, ckpmap, fore, aft; + int s, i; + space_t sid; + unsigned int currSID; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00001, size, 0); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_create(size=%x)%c", size, size ? '\n' : ' '); +#endif + + /* + * A software use-only map doesn't even need a pmap structure. + */ + if (size) + return(PMAP_NULL); + + /* + * If there is a pmap in the pmap free list, reuse it. + * Note that we use free_pmap_list for all chaining of pmaps, both to + * the free list and the in use chain (anchored from kernel_pmap). + */ + s = splhigh(); + simple_lock(&free_pmap_lock); + + if(free_pmap_list) { /* Any free? */ + pmap = free_pmap_list; /* Yes, allocate it */ + free_pmap_list = (pmap_t)pmap->bmaps; /* Dequeue this one (we chain free ones through bmaps) */ + free_pmap_count--; + } + else { + simple_unlock(&free_pmap_lock); /* Unlock just in case */ + splx(s); + + pmap = (pmap_t) zalloc(pmap_zone); /* Get one */ + if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */ + + bzero((char *)pmap, pmapSize); /* Clean up the pmap */ + + s = splhigh(); + simple_lock(&free_pmap_lock); /* Lock it back up */ + + ckpmap = cursor_pmap; /* Get starting point for free ID search */ + currSID = ckpmap->spaceNum; /* Get the actual space ID number */ + + while(1) { /* Keep trying until something happens */ + + currSID = (currSID + 1) & SID_MAX; /* Get the next in the sequence */ + ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */ + + if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */ + + if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */ + panic("pmap_create: Maximum number (2^20) active address spaces reached\n"); /* Die pig dog */ + } + } + + pmap->space = (currSID * incrVSID) & SID_MAX; /* Calculate the actual VSID */ + pmap->spaceNum = currSID; /* Set the space ID number */ + +/* + * Now we link into the chain just before the out of sequence guy. + */ + + fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */ + pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */ + fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */ + pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */ + ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */ + + simple_lock_init(&pmap->lock, ETAP_VM_PMAP); + pmap->pmapvr = (unsigned int)pmap ^ (unsigned int)pmap_extract(kernel_pmap, (vm_offset_t)pmap); /* Get physical pointer to the pmap and make mask */ + } + pmap->ref_count = 1; + pmap->stats.resident_count = 0; + pmap->stats.wired_count = 0; + pmap->bmaps = 0; /* Clear block map pointer to 0 */ + pmap->vflags = 0; /* Mark all alternates invalid for now */ + for(i=0; i < 128; i++) { /* Clean out usage slots */ + pmap->pmapUsage[i] = 0; + } + for(i=0; i < 16; i++) { /* Initialize for laughs */ + pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | pmap->space; + } + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00002, (unsigned int)pmap, (unsigned int)pmap->space); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("-> %x, space id = %d\n", pmap, pmap->space); +#endif + + simple_unlock(&free_pmap_lock); + splx(s); + return(pmap); +} + +/* + * pmap_destroy + * + * Gives up a reference to the specified pmap. When the reference count + * reaches zero the pmap structure is added to the pmap free list. + * + * Should only be called if the map contains no valid mappings. + */ +void +pmap_destroy(pmap_t pmap) +{ + int ref_count; + spl_t s; + pmap_t fore, aft; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00003, (unsigned int)pmap, 0); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_destroy(pmap=%x)\n", pmap); +#endif + + if (pmap == PMAP_NULL) + return; + + ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */ + if(ref_count>0) return; /* Still more users, leave now... */ + + if(ref_count < 0) /* Did we go too far? */ + panic("pmap_destroy(): ref_count < 0"); + +#ifdef notdef + if(pmap->stats.resident_count != 0) + panic("PMAP_DESTROY: pmap not empty"); +#else + if(pmap->stats.resident_count != 0) { + pmap_remove(pmap, 0, 0xFFFFF000); + } +#endif + + /* + * Add the pmap to the pmap free list. + */ + + s = splhigh(); + /* + * Add the pmap to the pmap free list. + */ + simple_lock(&free_pmap_lock); + + if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */ + + pmap->bmaps = (struct blokmap *)free_pmap_list; /* Queue in front */ + free_pmap_list = pmap; + free_pmap_count++; + simple_unlock(&free_pmap_lock); + + } else { + if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */ + fore = (pmap_t)pmap->pmap_link.prev; + aft = (pmap_t)pmap->pmap_link.next; + fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */ + aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */ + simple_unlock(&free_pmap_lock); + zfree(pmap_zone, (vm_offset_t) pmap); + } + splx(s); +} + +/* + * pmap_reference(pmap) + * gains a reference to the specified pmap. + */ +void +pmap_reference(pmap_t pmap) +{ + spl_t s; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00004, (unsigned int)pmap, 0); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_reference(pmap=%x)\n", pmap); +#endif + + if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */ +} + +/* + * pmap_remove(pmap, s, e) + * unmaps all virtual addresses v in the virtual address + * range determined by [s, e) and pmap. + * s and e must be on machine independent page boundaries and + * s must be less than or equal to e. + * + * Note that pmap_remove does not remove any mappings in nested pmaps. We just + * skip those segments. + */ +void +pmap_remove( + pmap_t pmap, + vm_offset_t sva, + vm_offset_t eva) +{ + spl_t spl; + struct mapping *mp, *blm; + vm_offset_t lpage; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00005, (unsigned int)pmap, sva|((eva-sva)>>12)); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n", + pmap, sva, eva); +#endif + + if (pmap == PMAP_NULL) + return; + + /* It is just possible that eva might have wrapped around to zero, + * and sometimes we get asked to liberate something of size zero + * even though it's dumb (eg. after zero length read_overwrites) + */ + assert(eva >= sva); + + /* If these are not page aligned the loop might not terminate */ + assert((sva == trunc_page(sva)) && (eva == trunc_page(eva))); + + /* We liberate addresses from high to low, since the stack grows + * down. This means that we won't need to test addresses below + * the limit of stack growth + */ + + debugLog2(44, sva, eva); /* Log pmap_map call */ + + sva = trunc_page(sva); /* Make it clean */ + lpage = trunc_page(eva) - PAGE_SIZE; /* Point to the last page contained in the range */ + +/* + * Here we will remove all of the block mappings that overlap this range. + * hw_rem_blk removes one mapping in the range and returns. If it returns + * 0, there are no blocks in the range. + */ + + while(mp = (mapping *)hw_rem_blk(pmap, sva, lpage)) { /* Keep going until no more */ + if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */ + blm = (blokmap *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFE)); /* Get virtual address */ + panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n", + pmap, sva, blm); + } + mapping_free(hw_cpv(mp)); /* Release it */ + } + + while (pmap->stats.resident_count && (eva > sva)) { + + eva -= PAGE_SIZE; /* Back up a page */ + +#if 1 + if((0x00008000 >> (sva >> 28)) & pmap->vflags) + panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */ +#endif + if(!(pmap->pmapUsage[(eva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ + eva = eva & (-pmapUsageSize); /* Back up into the previous slot */ + continue; /* Check the next... */ + } + mapping_remove(pmap, eva); /* Remove the mapping for this address */ + } + + debugLog2(45, 0, 0); /* Log pmap_map call */ +} + +/* + * Routine: + * pmap_page_protect + * + * Function: + * Lower the permission for all mappings to a given page. + */ +void +pmap_page_protect( + vm_offset_t pa, + vm_prot_t prot) +{ + register struct phys_entry *pp; + boolean_t remove; + + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00006, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa, prot); +#endif + + debugLog2(46, pa, prot); /* Log pmap_page_protect call */ + + switch (prot) { + case VM_PROT_READ: + case VM_PROT_READ|VM_PROT_EXECUTE: + remove = FALSE; + break; + case VM_PROT_ALL: + return; + default: + remove = TRUE; + break; + } + + pp = pmap_find_physentry(pa); /* Get the physent for this page */ + if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */ + + if (remove) { /* If the protection was set to none, we'll remove all mappings */ + mapping_purge(pp); /* Get rid of them all */ + + debugLog2(47, 0, 0); /* Log pmap_map call */ + return; /* Leave... */ + } + + /* When we get here, it means that we are to change the protection for a + * physical page. + */ + + mapping_protect_phys(pp, prot, 0); /* Change protection of all mappings to page. */ + + debugLog2(47, 1, 0); /* Log pmap_map call */ +} + +/* + * pmap_protect(pmap, s, e, prot) + * changes the protection on all virtual addresses v in the + * virtual address range determined by [s, e] and pmap to prot. + * s and e must be on machine independent page boundaries and + * s must be less than or equal to e. + * + * Note that any requests to change the protection of a nested pmap are + * ignored. Those changes MUST be done by calling this with the correct pmap. + */ +void pmap_protect( + pmap_t pmap, + vm_offset_t sva, + vm_offset_t eva, + vm_prot_t prot) +{ + spl_t spl; + register struct phys_entry *pp; + register struct mapping *mp, *mpv; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00008, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap, sva, eva, prot); + + assert(sva < eva); +#endif + + if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */ + + debugLog2(48, sva, eva); /* Log pmap_map call */ + + if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */ + pmap_remove(pmap, sva, eva); /* Yeah, dump 'em */ + + debugLog2(49, prot, 0); /* Log pmap_map call */ + + return; /* Leave... */ + } + + sva = trunc_page(sva); /* Start up a page boundary */ + + while(sva < eva) { /* Step through */ + + if(!(pmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ + sva = (sva + pmapUsageSize) &(-pmapUsageSize); /* Jump up into the next slot if nothing here */ + if(!sva) break; /* We tried to wrap, kill loop... */ + continue; /* Check the next... */ + } + +#if 1 + if((0x00008000 >> (sva >> 28)) & pmap->vflags) + panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */ +#endif + + mapping_protect(pmap, sva, prot); /* Change the protection on the page */ + sva += PAGE_SIZE; /* On to the next page */ + } + + debugLog2(49, prot, 1); /* Log pmap_map call */ + return; /* Leave... */ +} + +/* + * pmap_enter + * + * Create a translation for the virtual address (virt) to the physical + * address (phys) in the pmap with the protection requested. If the + * translation is wired then we can not allow a full page fault, i.e., + * the mapping control block is not eligible to be stolen in a low memory + * condition. + * + * NB: This is the only routine which MAY NOT lazy-evaluate + * or lose information. That is, this routine must actually + * insert this page into the given map NOW. + */ +void +pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, + boolean_t wired) +{ + spl_t spl; + struct mapping *mp; + struct phys_entry *pp; + int memattr; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00009, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */ + dbgTrace(0xF1D04009, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */ +#endif + + if (pmap == PMAP_NULL) return; /* If they gave us no pmap, just leave... */ + + debugLog2(50, va, pa); /* Log pmap_map call */ + + pp = pmap_find_physentry(pa); /* Get the physent for this physical page */ + + if((0x00008000 >> (va >> 28)) & pmap->vflags) + panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, va); /* (TEST/DEBUG) panic */ + + spl=splhigh(); /* Have to disallow interrupts between the + time we possibly clear a mapping and the time + we get it remapped again. An I/O SLIH could + try to drive an IOR using the page before + we get it mapped (Dude! This was a tough + bug!!!!) */ + + mapping_remove(pmap, va); /* Remove any other mapping at this address */ + + memattr = PTE_WIMG_IO; /* Assume I/O mapping for a moment */ + if(pp) memattr = ((pp->pte1&0x00000078) >> 3); /* Set the attribute to the physical default */ + + mp=mapping_make(pmap, pp, va, pa, prot, memattr, 0); /* Make the address mapping */ + + splx(spl); /* I'm not busy no more - come what may */ + + debugLog2(51, prot, 0); /* Log pmap_map call */ + +#if DEBUG + if (pmdebug & (PDB_USER|PDB_ENTER)) + kprintf("leaving pmap_enter\n"); +#endif + +} + +/* + * pmap_extract(pmap, va) + * returns the physical address corrsponding to the + * virtual address specified by pmap and va if the + * virtual address is mapped and 0 if it is not. + */ +vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) { + + spl_t spl; + register struct mapping *mp, *mpv; + register vm_offset_t pa; + unsigned int seg; + pmap_t actpmap; + + +#if PMAP_LOWTRACE + dbgTrace(0xF1D0000B, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */ +#endif +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap, va); +#endif + + seg = va >> 28; /* Isolate segment */ + if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ + else actpmap = pmap; /* Otherwise use the one passed in */ + + pa = (vm_offset_t) 0; /* Clear this to 0 */ + + debugLog2(52, actpmap->space, va); /* Log pmap_map call */ + + spl = splhigh(); /* We can't allow any loss of control here */ + + if(mp=hw_lock_phys_vir(actpmap->space, va)) { /* Find the mapping for this vaddr and lock physent */ + if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ + panic("pmap_extract: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ + splx(spl); /* Interruptions are cool now */ + return 0; + } + + mpv = hw_cpv(mp); /* Get virtual address of mapping */ + pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Build the physical address */ + if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + splx(spl); /* Interruptions are cool now */ + + debugLog2(53, pa, 0); /* Log pmap_map call */ + + return pa; /* Return the physical address... */ + } + + pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */ + /* Note no nested pmaps here */ + splx(spl); /* Restore 'rupts */ + debugLog2(53, pa, 0); /* Log pmap_map call */ + return pa; /* Return physical address or 0 */ +} + +/* + * pmap_attributes: + * + * Set/Get special memory attributes; Set is not implemented. + * + * Note: 'VAL_GET_INFO' is used to return info about a page. + * If less than 1 page is specified, return the physical page + * mapping and a count of the number of mappings to that page. + * If more than one page is specified, return the number + * of resident pages and the number of shared (more than + * one mapping) pages in the range; + * + */ +kern_return_t +pmap_attribute(pmap, address, size, attribute, value) + pmap_t pmap; + vm_offset_t address; + vm_size_t size; + vm_machine_attribute_t attribute; + vm_machine_attribute_val_t* value; +{ + spl_t s; + vm_offset_t sva, eva; + vm_offset_t pa; + kern_return_t ret; + register struct mapping *mp, *mpv; + register struct phys_entry *pp; + int total, seg; + pmap_t actpmap; + + if (attribute != MATTR_CACHE) + return KERN_INVALID_ARGUMENT; + + /* We can't get the caching attribute for more than one page + * at a time + */ + if ((*value == MATTR_VAL_GET) && + (trunc_page(address) != trunc_page(address+size-1))) + return KERN_INVALID_ARGUMENT; + + if (pmap == PMAP_NULL) + return KERN_SUCCESS; + + sva = trunc_page(address); + eva = round_page(address + size); + ret = KERN_SUCCESS; + + debugLog2(54, address, attribute); /* Log pmap_map call */ + + switch (*value) { + case MATTR_VAL_CACHE_SYNC: /* sync I+D caches */ + case MATTR_VAL_CACHE_FLUSH: /* flush from all caches */ + case MATTR_VAL_DCACHE_FLUSH: /* flush from data cache(s) */ + case MATTR_VAL_ICACHE_FLUSH: /* flush from instr cache(s) */ + sva = trunc_page(sva); + s = splhigh(); + + while (sva < eva) { + seg = sva >> 28; /* Isolate segment */ + if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ + else actpmap = pmap; /* Otherwise use the one passed in */ + +/* + * Note: the following should work ok with nested pmaps because there are not overlayed mappings + */ + if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ + sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */ + if(!sva) break; /* We tried to wrap, kill loop... */ + continue; /* Check the next... */ + } + + if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */ + sva += PAGE_SIZE; /* Point to the next page */ + continue; /* Skip if the page is not mapped... */ + } + + if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ + panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ + continue; + } + + mpv = hw_cpv(mp); /* Get virtual address of mapping */ + if((unsigned int)mpv->physent) { /* Is there a physical entry? */ + pa = (vm_offset_t)mpv->physent->pte1 & -PAGE_SIZE; /* Yes, get the physical address from there */ + } + else { + pa = (vm_offset_t)(mpv->PTEr & PAGE_SIZE); /* Otherwise from the mapping */ + } + + switch (*value) { /* What type was that again? */ + case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */ + sync_cache(pa, PAGE_SIZE); /* Sync up dem caches */ + break; /* Done with this one here... */ + + case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */ + flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */ + invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */ + break; /* Done with this one here... */ + + case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */ + flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */ + break; /* Done with this one here... */ + + case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */ + invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */ + break; /* Done with this one here... */ + } + if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry if it exists*/ + + sva += PAGE_SIZE; /* Point to the next page */ + } + splx(s); + break; + + case MATTR_VAL_GET_INFO: /* Get info */ + total = 0; + s = splhigh(); /* Lock 'em out */ + + if (size <= PAGE_SIZE) { /* Do they want just one page */ + seg = sva >> 28; /* Isolate segment */ + if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ + else actpmap = pmap; /* Otherwise use the one passed in */ + if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */ + *value = 0; /* Return nothing if no mapping */ + } + else { + if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ + panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ + } + mpv = hw_cpv(mp); /* Get virtual address of mapping */ + if(pp = mpv->physent) { /* Check for a physical entry */ + total = 0; /* Clear the count */ + for (mpv = (mapping *)hw_cpv((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)); mpv != NULL; mpv = hw_cpv(mp->next)) total++; /* Count the mapping */ + *value = (vm_machine_attribute_val_t) ((pp->pte1 & -PAGE_SIZE) | total); /* Pass back the physical address and the count of mappings */ + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Clear the physical entry lock */ + } + else { /* This is the case for an I/O mapped area */ + *value = (vm_machine_attribute_val_t) ((mpv->PTEr & -PAGE_SIZE) | 1); /* Pass back the physical address and the count of mappings */ + } + } + } + else { + total = 0; + while (sva < eva) { + seg = sva >> 28; /* Isolate segment */ + if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */ + else actpmap = pmap; /* Otherwise use the one passed in */ + + if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */ + sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */ + if(!sva) break; /* We tried to wrap, kill loop... */ + continue; /* Check the next... */ + } + if(mp = hw_lock_phys_vir(actpmap->space, sva)) { /* Find the mapping for this vaddr and lock physent */ + if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ + panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ + continue; + } + mpv = hw_cpv(mp); /* Get virtual address of mapping */ + total += 65536 + (mpv->physent && ((mapping *)((unsigned int)mpv->physent->phys_link & -32))->next); /* Count the "resident" and shared pages */ + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Clear the physical entry lock */ + } + sva += PAGE_SIZE; + } + *value = total; + } + splx(s); + break; + + case MATTR_VAL_GET: /* return current value */ + case MATTR_VAL_OFF: /* turn attribute off */ + case MATTR_VAL_ON: /* turn attribute on */ + default: + ret = KERN_INVALID_ARGUMENT; + break; + } + + debugLog2(55, 0, 0); /* Log pmap_map call */ + + return ret; +} + +/* + * pmap_collect + * + * Garbage collects the physical map system for pages that are no longer used. + * It isn't implemented or needed or wanted. + */ +void +pmap_collect(pmap_t pmap) +{ + return; +} + +/* + * Routine: pmap_activate + * Function: + * Binds the given physical map to the given + * processor, and returns a hardware map description. + * It isn't implemented or needed or wanted. + */ +void +pmap_activate( + pmap_t pmap, + thread_t th, + int which_cpu) +{ + return; +} +/* + * pmap_deactivate: + * It isn't implemented or needed or wanted. + */ +void +pmap_deactivate( + pmap_t pmap, + thread_t th, + int which_cpu) +{ + return; +} + +#if DEBUG + +/* + * pmap_zero_page + * pmap_copy page + * + * are implemented in movc.s, these + * are just wrappers to help debugging + */ + +extern void pmap_zero_page_assembler(vm_offset_t p); +extern void pmap_copy_page_assembler(vm_offset_t src, vm_offset_t dst); + +/* + * pmap_zero_page(pa) + * + * pmap_zero_page zeros the specified (machine independent) page pa. + */ +void +pmap_zero_page( + vm_offset_t p) +{ + register struct mapping *mp; + register struct phys_entry *pp; + + if (pmdebug & (PDB_USER|PDB_ZERO)) + kprintf("pmap_zero_page(pa=%x)\n", p); + + /* + * XXX can these happen? + */ + if (pmap_find_physentry(p) == PHYS_NULL) + panic("zero_page: physaddr out of range"); + + pmap_zero_page_assembler(p); +} + +/* + * pmap_copy_page(src, dst) + * + * pmap_copy_page copies the specified (machine independent) + * page from physical address src to physical address dst. + * + * We need to invalidate the cache for address dst before + * we do the copy. Apparently there won't be any mappings + * to the dst address normally. + */ +void +pmap_copy_page( + vm_offset_t src, + vm_offset_t dst) +{ + register struct phys_entry *pp; + + if (pmdebug & (PDB_USER|PDB_COPY)) + kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src, dst); + if (pmdebug & PDB_COPY) + kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n", + src, dst, PAGE_SIZE); + + pmap_copy_page_assembler(src, dst); +} +#endif /* DEBUG */ + +/* + * pmap_pageable(pmap, s, e, pageable) + * Make the specified pages (by pmap, offset) + * pageable (or not) as requested. + * + * A page which is not pageable may not take + * a fault; therefore, its page table entry + * must remain valid for the duration. + * + * This routine is merely advisory; pmap_enter() + * will specify that these pages are to be wired + * down (or not) as appropriate. + * + * (called from vm/vm_fault.c). + */ +void +pmap_pageable( + pmap_t pmap, + vm_offset_t start, + vm_offset_t end, + boolean_t pageable) +{ + + return; /* This is not used... */ + +} +/* + * Routine: pmap_change_wiring + * NOTE USED ANYMORE. + */ +void +pmap_change_wiring( + register pmap_t pmap, + vm_offset_t va, + boolean_t wired) +{ + return; /* This is not used... */ +} + +/* + * pmap_modify_pages(pmap, s, e) + * sets the modified bit on all virtual addresses v in the + * virtual address range determined by [s, e] and pmap, + * s and e must be on machine independent page boundaries and + * s must be less than or equal to e. + */ +void +pmap_modify_pages( + pmap_t pmap, + vm_offset_t sva, + vm_offset_t eva) +{ + spl_t spl; + mapping *mp; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00010, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap, sva, eva); +#endif + + if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */ + + debugLog2(56, sva, eva); /* Log pmap_map call */ + + spl=splhigh(); /* Don't bother me */ + + for ( ; sva < eva; sva += PAGE_SIZE) { /* Cycle through the whole range */ + mp = hw_lock_phys_vir(pmap->space, sva); /* Lock the physical entry for this mapping */ + if(mp) { /* Did we find one? */ + if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */ + panic("pmap_modify_pages: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */ + continue; + } + mp = hw_cpv(mp); /* Convert to virtual addressing */ + if(!mp->physent) continue; /* No physical entry means an I/O page, we can't set attributes */ + mapping_set_mod(mp->physent); /* Set the modfied bit for this page */ + hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + } + } + splx(spl); /* Restore the interrupt level */ + + debugLog2(57, 0, 0); /* Log pmap_map call */ + return; /* Leave... */ +} + +/* + * pmap_clear_modify(phys) + * clears the hardware modified ("dirty") bit for one + * machine independant page starting at the given + * physical address. phys must be aligned on a machine + * independant page boundary. + */ +void +pmap_clear_modify(vm_offset_t pa) +{ + register struct phys_entry *pp; + spl_t spl; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00011, (unsigned int)pa, 0); /* (TEST/DEBUG) */ +#endif +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_clear_modify(pa=%x)\n", pa); +#endif + + pp = pmap_find_physentry(pa); /* Find the physent for this page */ + if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */ + + debugLog2(58, pa, 0); /* Log pmap_map call */ + + spl=splhigh(); /* Don't bother me */ + + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ + panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */ + splx(spl); /* Restore 'rupts */ + return; /* Should die before here */ + } + + mapping_clr_mod(pp); /* Clear all change bits for physical page */ + + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + splx(spl); /* Restore the interrupt level */ + + debugLog2(59, 0, 0); /* Log pmap_map call */ +} + +/* + * pmap_is_modified(phys) + * returns TRUE if the given physical page has been modified + * since the last call to pmap_clear_modify(). + */ +boolean_t +pmap_is_modified(register vm_offset_t pa) +{ + register struct phys_entry *pp; + spl_t spl; + boolean_t ret; + + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00012, (unsigned int)pa, 0); /* (TEST/DEBUG) */ +#endif +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_is_modified(pa=%x)\n", pa); +#endif + + pp = pmap_find_physentry(pa); /* Find the physent for this page */ + if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */ + + debugLog2(60, pa, 0); /* Log pmap_map call */ + + spl=splhigh(); /* Don't bother me */ + + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ + panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */ + splx(spl); /* Restore 'rupts */ + return 0; /* Should die before here */ + } + + ret = mapping_tst_mod(pp); /* Check for modified */ + + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + splx(spl); /* Restore the interrupt level */ + + debugLog2(61, ret, 0); /* Log pmap_map call */ + + return ret; +} + +/* + * pmap_clear_reference(phys) + * clears the hardware referenced bit in the given machine + * independant physical page. + * + */ +void +pmap_clear_reference(vm_offset_t pa) +{ + register struct phys_entry *pp; + spl_t spl; + + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00013, (unsigned int)pa, 0); /* (TEST/DEBUG) */ +#endif +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_clear_reference(pa=%x)\n", pa); +#endif + + pp = pmap_find_physentry(pa); /* Find the physent for this page */ + if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */ + + debugLog2(62, pa, 0); /* Log pmap_map call */ + + spl=splhigh(); /* Don't bother me */ + mapping_clr_ref(pp); /* Clear all reference bits for physical page */ + splx(spl); /* Restore the interrupt level */ + + debugLog2(63, 0, 0); /* Log pmap_map call */ + +} + +/* + * pmap_is_referenced(phys) + * returns TRUE if the given physical page has been referenced + * since the last call to pmap_clear_reference(). + */ +boolean_t +pmap_is_referenced(vm_offset_t pa) +{ + register struct phys_entry *pp; + spl_t spl; + boolean_t ret; + + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00014, (unsigned int)pa, 0); /* (TEST/DEBUG) */ +#endif +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_is_referenced(pa=%x)\n", pa); +#endif + + pp = pmap_find_physentry(pa); /* Find the physent for this page */ + if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */ + + debugLog2(64, pa, 0); /* Log pmap_map call */ + + spl=splhigh(); /* Don't bother me */ + + if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */ + panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */ + splx(spl); /* Restore 'rupts */ + return 0; /* Should die before here */ + } + + ret = mapping_tst_ref(pp); /* Check for referenced */ + + hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */ + splx(spl); /* Restore the interrupt level */ + + debugLog2(65, ret, 0); /* Log pmap_map call */ + + return ret; +} + +#if MACH_VM_DEBUG +int +pmap_list_resident_pages( + register pmap_t pmap, + register vm_offset_t *listp, + register int space) +{ + return 0; +} +#endif /* MACH_VM_DEBUG */ + +/* + * Locking: + * spl: VM + */ +void +pmap_copy_part_page( + vm_offset_t src, + vm_offset_t src_offset, + vm_offset_t dst, + vm_offset_t dst_offset, + vm_size_t len) +{ + register struct phys_entry *pp_src, *pp_dst; + spl_t s; + + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00019, (unsigned int)src+src_offset, (unsigned int)dst+dst_offset); /* (TEST/DEBUG) */ + dbgTrace(0xF1D04019, (unsigned int)len, 0); /* (TEST/DEBUG) */ +#endif + s = splhigh(); + + assert(((dst & PAGE_MASK)+dst_offset+len) <= PAGE_SIZE); + assert(((src & PAGE_MASK)+src_offset+len) <= PAGE_SIZE); + + /* + * Since the source and destination are physical addresses, + * turn off data translation to perform a bcopy() in bcopy_phys(). + */ + phys_copy((vm_offset_t) src+src_offset, + (vm_offset_t) dst+dst_offset, len); + + splx(s); +} + +void +pmap_zero_part_page( + vm_offset_t p, + vm_offset_t offset, + vm_size_t len) +{ + panic("pmap_zero_part_page"); +} + +boolean_t pmap_verify_free(vm_offset_t pa) { + + struct phys_entry *pp; + +#if PMAP_LOWTRACE + dbgTrace(0xF1D00007, (unsigned int)pa, 0); /* (TEST/DEBUG) */ +#endif + +#if DEBUG + if (pmdebug & PDB_USER) + kprintf("pmap_verify_free(pa=%x)\n", pa); +#endif + + if (!pmap_initialized) return(TRUE); + + pp = pmap_find_physentry(pa); /* Look up the physical entry */ + if (pp == PHYS_NULL) return FALSE; /* If there isn't one, show no mapping... */ + return ((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS) == MAPPING_NULL); /* Otherwise, return TRUE if mapping exists... */ +} + + +/* Determine if we need to switch space and set up for it if so */ + +void pmap_switch(pmap_t map) +{ + unsigned int i; + +#if DEBUG + if (watchacts & WA_PCB) { + kprintf("Switching to map at 0x%08x, space=%d\n", + map,map->space); + } +#endif /* DEBUG */ + + +/* when changing to kernel space, don't bother + * doing anything, the kernel is mapped from here already. + */ + if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */ + return; /* If so, we don't do anything... */ + } + + hw_set_user_space(map); /* Indicate if we need to load the SRs or not */ + return; /* Bye, bye, butterfly... */ +} + +/* + * kern_return_t pmap_nest(grand, subord, vaddr, size) + * + * grand = the pmap that we will nest subord into + * subord = the pmap that goes into the grand + * vaddr = start of range in pmap to be inserted + * size = size of range in pmap to be inserted + * + * Inserts a pmap into another. This is used to implement shared segments. + * On the current PPC processors, this is limited to segment (256MB) aligned + * segment sized ranges. + */ + +kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size) { + + unsigned int oflags, seg, grandr; + int i; + + if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */ + if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ + + while(1) { /* Test and set the subordinate flag */ + oflags = subord->vflags & ~pmapAltSeg; /* Get old unset value */ + if(subord->vflags & pmapAltSeg) { /* Are trying to nest one already nested? */ + panic("pmap_nest: Attempt to nest an already nested pmap\n"); + } + if(hw_compare_and_store(oflags, oflags | pmapSubord, &subord->vflags)) break; /* Done if we got it set */ + } + + simple_lock(&grand->lock); /* Lock the superior pmap */ + + if(grand->vflags & pmapSubord) { /* Are we only one level deep? */ + simple_unlock(&grand->lock); /* Unlock the superior pmap */ + panic("pmap_nest: Attempt to nest into subordinate pmap\n"); + return KERN_FAILURE; /* Shame on you */ + } + + seg = vaddr >> 28; /* Isolate the segment number */ + if((0x00008000 >> seg) & grand->vflags) { /* See if it is already in use */ + simple_unlock(&grand->lock); /* Unlock the superior pmap */ + panic("pmap_nest: Attempt to nest into already nested segment\n"); + return KERN_FAILURE; /* Shame on you */ + } + + grand->pmapPmaps[seg] = subord; /* Set the pointer to the subordinate */ + grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | subord->space; /* Set the vsid to the subordinate's vsid */ + grand->vflags |= (0x00008000 >> seg); /* Set in-use bit */ + + grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */ + + simple_unlock(&grand->lock); /* Unlock the grand pmap */ + + +/* + * Note that the following will force the segment registers to be reloaded following + * the next interrupt on all processors if they are using the pmap we just changed. + * + * This probably isn't needed, but it just feels better to do it. The reason it isn't + * needed is that there is no mapped memory in the grand pmap's segment before we + * nest and we will take a fault if it is accessed. + */ + + + for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ + (void)hw_compare_and_store((unsigned int)grand, 0, &per_proc_info[i].userpmap); /* Clear if ours */ + } + + return KERN_SUCCESS; /* Bye, bye, butterfly... */ +} + + +/* + * kern_return_t pmap_unnest(grand, vaddr, size) + * + * grand = the pmap that we will nest subord into + * vaddr = start of range in pmap to be inserted + * size = size of range in pmap to be inserted + * + * Removes a pmap from another. This is used to implement shared segments. + * On the current PPC processors, this is limited to segment (256MB) aligned + * segment sized ranges. + */ + +kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) { + + unsigned int oflags, seg, grandr, tstamp; + int i, tcpu, mycpu; + + if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */ + if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */ + + simple_lock(&grand->lock); /* Lock the superior pmap */ + disable_preemption(); /* It's all for me! */ + + seg = vaddr >> 28; /* Isolate the segment number */ + if(!((0x00008000 >> seg) & grand->vflags)) { /* See if it is already in use */ + enable_preemption(); /* Ok, your turn */ + simple_unlock(&grand->lock); /* Unlock the superior pmap */ + panic("pmap_unnest: Attempt to unnest an unnested segment\n"); + return KERN_FAILURE; /* Shame on you */ + } + + grand->pmapPmaps[seg] = (pmap_t)0; /* Clear the pointer to the subordinate */ + grand->pmapSegs[seg] = grand->space; /* Set the pointer to the subordinate's vsid */ + grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | grand->space; /* Set the vsid to the grand's vsid */ + grand->vflags &= ~(0x00008000 >> seg); /* Clear in-use bit */ + + grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */ + + simple_unlock(&grand->lock); /* Unlock the superior pmap */ + +/* + * Note that the following will force the segment registers to be reloaded + * on all processors (if they are using the pmap we just changed) before returning. + * + * This is needed. The reason is that until the segment register is + * reloaded, another thread in the same task on a different processor will + * be able to access memory that it isn't allowed to anymore. That can happen + * because access to the subordinate pmap is being removed, but the pmap is still + * valid. + * + * Note that we only kick the other processor if we see that it was using the pmap while we + * were changing it. + */ + + + mycpu = cpu_number(); /* Who am I? Am I just a dream? */ + for(i=0; i < real_ncpus; i++) { /* Cycle through processors */ + if(hw_compare_and_store((unsigned int)grand, 0, &per_proc_info[i].userpmap)) { /* Clear if ours and kick the other guy if he was using it */ + if(i == mycpu) continue; /* Don't diddle ourselves */ + tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */ + if(cpu_signal(i, SIGPwake, 0, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change + panic("pmap_unnest: Signal processor (%d) failed\n", i); + } + if(!hw_cpu_wcng(tstamp, &per_proc_info[i].ruptStamp[1], LockTimeOut) { /* Wait for the other processors to enter debug */ + panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i); + } + } + } + + enable_preemption(); /* Others can run now */ + return KERN_SUCCESS; /* Bye, bye, butterfly... */ +} + + +void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { + + int cnt, i, j, k; + vm_offset_t xx; + + if(!pmap) return; + + sva = trunc_page(sva); + eva = trunc_page(eva); + + for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */ + if((pmap->pmapUsage[i]) > 8192) { /* See if this is a sane number */ + panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n", + i * pmapUsageSize, pmap->pmapUsage[i], pmap); + } + } + j = 0; + while(1) { /* Try multiple times */ + cnt = 0; + for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */ + cnt = cnt + pmap->pmapUsage[i]; /* Sum all slots */ + } + if(cnt == pmap->stats.resident_count) break; /* We're ok if we match... */ + + j++; + for(i = 0; i < 100000; i++) { + k = j + i; + } + if(j >= 10) { + panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n", + cnt, pmap->stats.resident_count, pmap); + } + } + + for(xx = sva; xx < eva; xx += PAGE_SIZE) { /* See if any slots not clear */ + if(pmap_extract(pmap, xx)) { + panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n", + sva, eva, xx, pmap); + } + } +} + + + + + + diff --git a/osfmk/ppc/pmap.h b/osfmk/ppc/pmap.h new file mode 100644 index 000000000..d2063239b --- /dev/null +++ b/osfmk/ppc/pmap.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Copyright (c) 1990 The University of Utah and + * the Center for Software Science at the University of Utah (CSS). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the Center + * for Software Science at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSS requests users of this software to return to css-dist@cs.utah.edu any + * improvements that they make and grant CSS redistribution rights. + * + * Utah $Hdr: pmap.h 1.13 91/09/25$ + * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 9/90 + */ + +#ifndef _PPC_PMAP_H_ +#define _PPC_PMAP_H_ + +#include +#include +#include +#include +#include +#include + +struct pmap { + queue_head_t pmap_link; /* MUST BE FIRST */ + unsigned int pmapvr; /* Virtual to real conversion mask */ + space_t space; /* space for this pmap */ +#define BMAPLOCK 0x00000001 + struct blokmap *bmaps; /* Physical pointer to odd-size page maps */ + int ref_count; /* reference count */ + unsigned int vflags; /* Alternate map validity flags */ +#define pmapBatVal 0xFF000000 +#define pmapBatDVal 0xF0000000 +#define pmapBatIVal 0x0F000000 +#define pmapFlags 0x00FF0000 +#define pmapSubord 0x00800000 +#define pmapAltSeg 0x0000FFFF + unsigned int spaceNum; /* Space number */ +/* PPC line boundary here - 020 */ + unsigned int pmapSegs[16]; /* Contents of segment register if different than base space */ +/* PPC line boundary here - 060 */ + struct pmap *pmapPmaps[16]; /* Pointer to next lower level of pmaps */ +/* PPC line boundary here - 0A0 */ +/* Note: this must start on a word boundary */ + unsigned short pmapUsage[128]; /* Count of pages mapped into 32mb (8192 page) slots */ +#define pmapUsageShft 25 +#define pmapUsageMask 0x0000007F +#define pmapUsageSize (32*1024*1024) + +/* PPC line boundary here - 1A0 */ + struct pmap_statistics stats; /* statistics */ + decl_simple_lock_data(,lock) /* lock on map */ + +/* Need to pad out to a power of 2 - right now it is 512 bytes */ +#define pmapSize 512 +}; + +#define PMAP_NULL ((pmap_t) 0) + +extern pmap_t kernel_pmap; /* The kernel's map */ +extern pmap_t cursor_pmap; /* The pmap to start allocations with */ + +#define PMAP_SWITCH_USER(th, map, my_cpu) th->map = map; + +#define PMAP_ACTIVATE(pmap, th, cpu) +#define PMAP_DEACTIVATE(pmap, th, cpu) +#define PMAP_CONTEXT(pmap,th) + +#define pmap_kernel_va(VA) \ + (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) + +#define PPC_SID_KERNEL 0 /* Must change KERNEL_SEG_REG0_VALUE if !0 */ +#define SID_MAX ((1<<20) - 1) /* Space ID=20 bits, segment_id=SID + 4 bits */ + +#define pmap_kernel() (kernel_pmap) +#define pmap_resident_count(pmap) ((pmap)->stats.resident_count) +#define pmap_remove_attributes(pmap,start,end) +#define pmap_copy(dpmap,spmap,da,len,sa) +#define pmap_update() + +#define pmap_phys_address(x) ((x) << PPC_PGSHIFT) +#define pmap_phys_to_frame(x) ((x) >> PPC_PGSHIFT) + +/* + * prototypes. + */ +extern void ppc_protection_init(void); +extern vm_offset_t phystokv(vm_offset_t pa); /* Get kernel virtual address from physical */ +extern vm_offset_t kvtophys(vm_offset_t va); /* Get physical address from kernel virtual */ +extern vm_offset_t pmap_map(vm_offset_t va, + vm_offset_t spa, + vm_offset_t epa, + vm_prot_t prot); +extern kern_return_t pmap_add_physical_memory(vm_offset_t spa, + vm_offset_t epa, + boolean_t available, + unsigned int attr); +extern vm_offset_t pmap_map_bd(vm_offset_t va, + vm_offset_t spa, + vm_offset_t epa, + vm_prot_t prot); +extern void pmap_bootstrap(unsigned int mem_size, + vm_offset_t *first_avail, + vm_offset_t *first_phys_avail, unsigned int kmapsize); +extern void pmap_block_map(vm_offset_t pa, + vm_size_t size, + vm_prot_t prot, + int entry, + int dtlb); +extern void pmap_switch(pmap_t); + +extern vm_offset_t pmap_extract(pmap_t pmap, + vm_offset_t va); + +extern void pmap_remove_all(vm_offset_t pa); + +extern boolean_t pmap_verify_free(vm_offset_t pa); +extern void sync_cache(vm_offset_t pa, unsigned length); +extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys); +extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys); +extern void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys); +extern void invalidate_cache_for_io(vm_offset_t va, unsigned length, boolean_t phys); +extern void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size, + vm_prot_t prot, int attr, unsigned int flags); /* Map a block */ +extern kern_return_t pmap_map_block_opt(vm_map_t map, vm_offset_t *va, + vm_offset_t pa, vm_size_t size, vm_prot_t prot, int attr); /* Map a block allocating an optimal virtual address */ +extern kern_return_t vm_map_block(vm_map_t map, vm_offset_t *va, vm_offset_t *bnd, vm_offset_t pa, + vm_size_t size, vm_prot_t prot); + +extern kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size); + +extern void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva); + +#endif /* _PPC_PMAP_H_ */ + diff --git a/osfmk/ppc/pmap_internals.h b/osfmk/ppc/pmap_internals.h new file mode 100644 index 000000000..88f6cf295 --- /dev/null +++ b/osfmk/ppc/pmap_internals.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* Things that don't need to be exported from pmap. Putting + * them here and not in pmap.h avoids major recompiles when + * modifying something either here or in proc_reg.h + */ + +#ifndef _PMAP_INTERNALS_H_ +#define _PMAP_INTERNALS_H_ + +/* + * Definition of the flags in the low 5 bits of the phys_link field of the phys_entry + */ + +#define PHYS_LOCK 0x00000001 +#define PHYS_FLAGS 0x0000001F + +#ifndef ASSEMBLER + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Page table entries are stored in groups (PTEGS) in a hash table */ + +#if __PPC__ +#if _BIG_ENDIAN == 0 +error - bitfield structures are not checked for bit ordering in words +#endif /* _BIG_ENDIAN */ +#endif /* __PPC__ */ + +/* + * Don't change these structures unless you change the assembly code + */ + +struct phys_entry { + struct mapping *phys_link; /* MUST BE FIRST - chain of mappings and flags in the low 5 bits, see above */ + unsigned int pte1; /* referenced/changed/wimg - info update atomically */ +}; + + +#define PHYS_NULL ((struct phys_entry *)0) + +/* Memory may be non-contiguous. This data structure contains info + * for mapping this non-contiguous space into the contiguous + * physical->virtual mapping tables. An array of this type is + * provided to the pmap system at bootstrap by ppc_vm_init. + * + * NB : regions must be in order in this structure. + */ + +typedef struct mem_region { + vm_offset_t start; /* Address of base of region */ + struct phys_entry *phys_table; /* base of region's table */ + unsigned int end; /* End address+1 */ +} mem_region_t; + +/* PMAP_MEM_REGION_MAX has a PowerMac dependancy - at least the value of + * kMaxRAMBanks in ppc/POWERMAC/nkinfo.h + */ +#define PMAP_MEM_REGION_MAX 26 + +extern mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX]; +extern int pmap_mem_regions_count; + +/* keep track of free regions of physical memory so that we can offer + * them up via pmap_next_page later on + */ + +#define FREE_REGION_MAX 8 +extern mem_region_t free_regions[FREE_REGION_MAX]; +extern int free_regions_count; + +/* Prototypes */ + +struct phys_entry *pmap_find_physentry(vm_offset_t pa); + + +#if DEBUG +extern int pmdebug; +#define PDB_LOCK 0x100 +#define LOCKPRINTF(args) if (pmdebug & PDB_LOCK) printf args; else +#else /* DEBUG */ +#define LOCKPRINTF(args) +#endif /* DEBUG */ + +extern vm_offset_t hash_table_base; +extern unsigned int hash_table_size; + +#endif +#endif /* _PMAP_INTERNALS_H_ */ diff --git a/osfmk/ppc/ppc_disasm.i b/osfmk/ppc/ppc_disasm.i new file mode 100644 index 000000000..a3976c9ea --- /dev/null +++ b/osfmk/ppc/ppc_disasm.i @@ -0,0 +1,220 @@ +# @OSF_COPYRIGHT@ +# + +# ppc.i - PowerPC instructions +# , +# By Eamonn McManus , 1995. + +# simplified mnemonics +# ori 0,0,0 +in 01100000000000000000000000000000 nop +# addi[s] rD,0,value +in 00111sddddd00000iiiiiiiiiiiiiiii li{|s}[$s] \ + $reg($d),{$simm16($i)|$shifted16($i)}[$s] +# or rA,rS,rS +in 011111dddddaaaaabbbbb0110111100r {or{|.}[$r] $reg($a),$reg($b),$reg($d)|\ + mr{|.}[$r] $reg($a),$reg($d)}[$b == $d] +in 011111dddddaaaaabbbbb0100111100r xor{|.}[$r] $reg($a),$reg($b),$reg($d) + +# mtcrf 0xFF,rS +in 011111ddddd011111111000100100000 mtcr $reg($d) + +in 00001Dcccccaaaaaiiiiiiiiiiiiiiii t{d|w}[$D]$tcond($c)i $reg($a),$simm16($i) +in 000111dddddaaaaaiiiiiiiiiiiiiiii mulli $reg($d),$reg($a),$simm16($i) +in 001000dddddaaaaaiiiiiiiiiiiiiiii subfic $reg($d),$reg($a),$simm16($i) +in 00101Uddd0laaaaaiiiiiiiiiiiiiiii cmp{l|}[$U]i \ + $crcom($d){|1,}[$l]$reg($a),$simm16($i) +in 00110rdddddaaaaaiiiiiiiiiiiiiiii addic{|.}[$r] $reg($d),$reg0($a),$simm16($i) +in 00111sdddddaaaaaiiiiiiiiiiiiiiii addi{|s}[$s] $reg($d),$reg0($a),\ + {$simm16($i)|$shifted16($i)}[$s] +in 010000cccccccccciiiiiiiiiiiiiial $br($c,$a,$l,,1)\ + {$brdispl($i,14)|$brabs($i)}[$a] +in 01000100000000000000000000000010 sc +in 010010iiiiiiiiiiiiiiiiiiiiiiiial b{|l}[$l]{|a}[$a] \ + {$brdispl($i,24)|$brabs($i)}[$a] +in 010011ddd00sss000000000000000000 mcrf $crf($d),$crf($s) +in 010011cccccccccc000000000010000l $br($c,0,$l,lr,0) +in 010011dddddaaaaabbbbb0oooo000010 cr$crop($o) $crb($d),$crb($a),$crb($b) +in 01001100000000000000000001100100 rfi +in 01001100000000000000000100101100 isync +in 010011cccccccccc000001000010000l $br($c,0,$l,ctr,0) +in 010111dddddaaaaabbbbbffffftttttr rlwnm{|.}[$r] \ + $reg($a),$reg($d),$reg($b),$dec($f),$dec($t) +in 0101xxdddddaaaaasssssffffftttttr rl{wimi|winm|?|?}[$x]{|.}[$r] \ + $reg($a),$reg($d),$dec($s),$dec($f),$dec($t) +in 011110dddddaaaaasssssffffff0xxSr rld{icl|icr|ic|imi}[$x]{|.}[$r] \ + $reg($a),$reg($d),$dec($[sssssS]),$dec($f) +in 011110dddddaaaaabbbbbffffff100xr rldc{l|r}[$x]{|.}[$r] \ + $reg($a),$reg($d),$reg($b),$dec($f) +in 011111ddd0laaaaabbbbb0000u000000 cmp{|l}[$u] \ + $crcom($d){|1,}[$l]$reg($a),$reg($b) +in 011111cccccaaaaabbbbb000w0001000 t{w|d}[$w]$tcond($c) $reg($a),$reg($b) +in 011111dddddaaaaabbbbbo000C01000r subf{c|}[$C]{|o}[$o]{|.}[$r] \ + $reg($d),$reg($a),$reg($b) +in 011111dddddaaaaabbbbb000u0010w1r mulh{d|w}[$w]{u|}[$u]{|.}[$r] \ + $reg($d),$reg($a),$reg($b) +in 011111dddddaaaaabbbbbott0001010r add{c|e||?}[$t]{|o}[$o]{|.}[$r] \ + $reg($d),$reg($a),$reg($b) +in 011111ddddd0000000000000m0100110 mf{cr|msr}[$m] $reg($d) +in 011111dddddaaaaabbbbb000w0101000 l{w|d}[$w]arx $reg($d),$reg0($a),$reg($b) +in 011111dddddaaaaabbbbb0000u101010 ld{|u}[$u]x $reg($d),$reg0($a),$reg($b) +in 011111dddddaaaaabbbbb0ooou101110 $ldst($o){|u}[$u]x \ + $reg($d),$reg($a),$reg($b) +in 011111dddddaaaaabbbbb0000011A00r {slw|and}[$A]{|.}[$r] \ + $reg($a),$reg($d),$reg($b) +in 011111dddddaaaaa000000000w11010r cntlz{w|d}[$w]{|.}[$r] $reg($a),$reg($d) +in 011111dddddaaaaabbbbb0000011011r sld{|.}[$r] $reg($a),$reg($d),$reg($b) +in 01111100000aaaaabbbbb00001101100 dcbst $reg($a),$reg($b) +in 011111dddddaaaaabbbbb0000111100r andc{|.}[$r] $reg($a),$reg($d),$reg($b) +in 01111100000aaaaabbbbb00010101100 dcbf $reg($a),$reg($b) +in 011111dddddaaaaa00000o001101000r neg{|o}[$o]{|.}[$r] $reg($d),$reg($a) +in 011111dddddaaaaabbbbb0001111100r nor{|.}[$r] $reg($a),$reg($d),$reg($b) +in 011111dddddaaaaabbbbbo01z001000r subf{|z}[$z]e{|o}[$o]{|.}[$r] \ + $reg($d),$reg($a) +in 011111ddddd0ffffffff000100100m00 mt{crf $hex($f),|msr}[$m] $reg($d) +in 011111sssssaaaaabbbbb0010u101010 std{|u}[$u]x $reg($s),$reg0($a),$reg($b) +in 011111sssssaaaaabbbbb001w0101101 st{w|d}[$w]cx. $reg($s),$reg0($a),$reg($b) +in 011111dddddaaaaa00000o011001010r addze{|o}[$o]{|.}[$r] $reg($d),$reg($a) +in 011111sssss0rrrr0000000110100100 mtsr $dec($r),$reg($s) +in 011111dddddaaaaa00000o0111010x0r {subf|add}[$x]me{|o}[$o]{|.}[$r] \ + $reg($d),$reg($a) +in 011111dddddaaaaabbbbbo0111010w1r mull{w|d}[$w]{|o}[$o]{|.}[$r] \ + $reg($d),$reg($a),$reg($b) +in 011111sssss00000bbbbb00111100100 mtsrin $reg($s),$reg($b) +in 01111100000aaaaabbbbb00111101100 dcbtst $reg0($a),$reg($b) +in 01111100000aaaaabbbbb01000101100 dcbt $reg0($a),$reg($b) +in 011111sssssaaaaabbbbb0100011100r eqv{|.}[$r] $reg($a),$reg($s),$reg($b) +in 0111110000000000bbbbb01001100100 tlbie $reg($b) +in 011111dddddaaaaabbbbb01i01101100 ec{i|o}[$i]wx $reg($d),$reg0($a),$reg($b) +in 011111dddddrrrrrrrrrr01t10100110 m{f|t}[$t]spr $reg($d),$spr($r) +in 011111dddddaaaaabbbbb0101u101010 lwa{|u}[$u]x $reg($d),$reg($a),$reg($b) +in 01111100000000000000001011100100 tlbia +in 011111dddddtttttttttt01011100110 mftb $reg($d),$dec($t) +in 011111sssssaaaaabbbbb0110011100r orc{|.}[$r] $reg($a),$reg($s),$reg($b) +in 0111110000000000bbbbb01101100100 slbie $reg($b) +in 011111dddddaaaaabbbbbo111u010w1r div{d|w}[$w]{u|}[$u]{|o}[$o]{|.}[$r] \ + $reg($d),$reg($a),$reg($b) +in 01111100000aaaaabbbbb01110101100 dcbi $reg0($a),$reg($b) +in 011111sssssaaaaabbbbb0111011100r nand{|.}[$r] $reg($a),$reg($s),$reg($b) +in 01111100000000000000001111100100 slbia +in 011111ddd00000000000010000000000 mcrxr $crf($d) +in 011111dddddaaaaabbbbb10000101010 lswx $reg($d),$reg0($a),$reg($b) +in 011111dddddaaaaabbbbb1w000101100 l{w|h}[$w]brx $reg($d),$reg0($a),$reg($b) +in 011111dddddaaaaabbbbb100su101110 lf{s|d}[$s]{|u}[$u]x \ + $fr($d),$reg0($a),$reg($b) +in 011111sssssaaaaabbbbb1x000110w0r sr{|a}[$x]{w|d}[$w]{|.}[$r] \ + $reg($a),$reg($s),$reg($b) +in 011111sssssaaaaabbbbb1000011011r srd{|.}[$r] $reg($a),$reg($s),$reg($b) +in 01111100000000000000010001101100 tlbsync +in 011111ddddd0rrrr0000010010101100 mfsr $reg($d),$dec($r) +in 011111dddddaaaaannnnn10010101010 lswi $reg($d),$reg0($a),$dec($n) +in 01111100000000000000010010101100 sync +in 011111ddddd00000bbbbb10100100110 mfsrin $reg($d),$reg($b) +in 011111sssssaaaaabbbbb10100101010 stswx $reg($s),$reg0($a),$reg($b) +in 011111sssssaaaaabbbbb1w100101100 st{w|h}[$w]brx $reg($s),$reg0($a),$reg($b) +in 011111sssssaaaaabbbbb101du101110 stf{s|d}[$d]{|u}[$u]x \ + $fr($s),{$reg0($a)|$reg($a)}[$u],$reg($b) +in 011111sssssaaaaannnnn10110101010 stswi $reg($s),$reg0($a),$dec($n) +in 011111dddddaaaaasssss1100111000r srawi{|.}[$r] $reg($a),$reg($s),$dec($s) +in 01111100000000000000011010101100 eieio +in 011111sssssaaaaa00000111xx11010r exts{h|b|w|?}[$x]{|.}[$r] $reg($a),$reg($s) +in 01111100000aaaaabbbbb11110101100 icbi $reg0($a),$reg($b) +in 011111sssssaaaaabbbbb11110101110 stfiwx $fr($s),$reg0($a),$reg($b) +in 01111100000aaaaabbbbb11111101100 dcbz $reg0($a),$reg($b) +in 011Axsaaaaadddddiiiiiiiiiiiiiiii {{|x}[$x]or|{and|?}[$x]}[$A]i{|s}[$s]\ + {|.}[$A] $reg($d),$reg($a),\ + {$hex($i)|$shifted16($i)}[$s] +# Grouping andi with xori and ori may not be such a brilliant idea, since it +# gets invoked as a catch-all for the 011111 instructions below. But that +# just means that we get a different sort of undefined instruction. +in 10111sdddddaaaaaiiiiiiiiiiiiiiii {l|st}[$s]mw \ + $reg($d),$simm16($i)($reg0($a)) +in 10oooudddddaaaaaiiiiiiiiiiiiiiii $ldst($o){|u}[$u] \ + $reg($d),$simm16($i)($reg0($a)) +in 110sDudddddaaaaaiiiiiiiiiiiiiiii {l|st}[$s]f{s|d}[$D]{|u}[$u] \ + $fr($d),$simm16($i)($reg0($a)) +in 111010dddddaaaaaiiiiiiiiiiiiiixy l{d{|u}[$y]|{|?}[$y]w}[$x] \ + $reg($d),$simm16($i)($reg0($a)) +in 111s11dddddaaaaabbbbb0000010010r fdiv{s|}[$s]{|.}[$r] \ + $fr($d),$fr($a),$fr($b) +in 111s11dddddaaaaabbbbb000001010xr f{sub|add}[$x]{s|}[$s]{|.}[$r] \ + $fr($d),$fr($a),$fr($b) +in 111s11ddddd00000bbbbb0000010110r fsqrt{s|}[$s]{|.}[$r] $fr($d),$fr($b) +in 111011ddddd00000bbbbb0000011000r fress{|.}[$r] $fr($d),$fr($b) +in 111s11dddddaaaaa00000ccccc11001r fmul{s|}[$s]{|.}[$r] \ + $fr($d),$fr($a),$fr($c) +in 111s11dddddaaaaabbbbbccccc111nxr f{|n}[$n]m{sub|add}[$x]{s|}[$s]{|.}[$r] \ + $fr($d),$fr($a),$fr($c),$fr($b) +in 111110sssssaaaaaiiiiiiiiiiiiii0u std{|u}[$u] \ + $reg($s),$simm16($i)({$reg0($a)|$reg($a)}[$u]) +in 111111ccc00aaaaabbbbb0000o000000 fcmp{u|o}[$o] $crf($c),$fr($a),$fr($b) +in 111111ddddd00000bbbbb0000001100r frsp{|.}[$r] $fr($d),$fr($b) +in 111111ddddd00000bbbbb000000111zr fctiw{|z}[$z]{|.}[$r] $fr($d),$fr($b) +in 111111dddddaaaaabbbbbccccc10111r fsel{|.}[$r] \ + $fr($d),$fr($a),$fr($c),$fr($b) +in 111111ddddd00000bbbbb0000011010r frsqrte{|,.}[$r] $fr($d),$fr($b) +in 111111ddddd0000000000000xx00110r mtfsb{?|1|0|?}[$x]{|.}[$r] $fcond($d) +in 111111ddddd00000bbbbb0000101000r fneg{|.}[$r] $fr($d),$fr($b) +in 111111ddd00sss000000000010000000 mcrfs $crf($d),$crf($s) +in 111111ddddd00000bbbbb0001001000r fmr{|.}[$r] $fr($d),$fr($b) +in 111111ddd0000000iiii00010000110r mtfsfi{|.}[$r] $crf($d),$simm16($i) +in 111111ddddd00000bbbbb0010001000r fnabs{|.}[$r] $fr($d),$fr($b) +in 111111ddddd00000bbbbb0100001000r fabs{|.}[$r] $fr($d),$fr($b) +in 111111ddddd00000000001001000111r mffs{|.}[$r] $fr($d) +in 1111110ffffffff0bbbbb1011000111r mtfsf{|.}[$r] $hex($f),$fr($b) +in 111111ddddd00000bbbbb110010111zr fctid{|z}[$z]{|.}[$r] $fr($d),$fr($b) +in 111111ddddd00000bbbbb1101001110r fcfid{|.}[$r] $fr($d),$fr($b) + +in xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ? + + +ldst ooo {lwz|lbz|stw|stb|lhz|lha|sth|?}[$o] +br utdzyrrrcc(%a,%l,s,%C) b{d{nz|z}[$z]|{|?}[$z]}[$d]{c|}[$u]\ + {|l}[$l]{|a}[$a]$s \ + {$crcom($r)$cond($[cct]){|,}[$C]|}[$u] +cond ccc {ge|lt|le|gt|ne|eq|ns|so}[$c] +fcond ccc $hex($c) +crb rrrcc $cr($r):$cond($[cc1]) +crop oooo {?|nor|?|?|andc|?|xor|nand|and|eqv|?|?|?|orc|or|?}[$o] +tcond ccccc {?|lgt|llt|?|eq|lge|lle|?|?|?|?|?|ge|?|?|?|lt|?|?|?|le|?|?|?|ne|?|?|?|?|?|?|a}[$c] + +spr 0000000000 mq +spr 0000100000 xer +spr 0010l00000 rtc{u|l}[$l] +spr s011000000 dec{u|s}[$s] +spr 0100000000 lr +spr 0100100000 ctr +spr 1001000000 dsisr +spr 1001100000 dar +spr 1100100000 sdr1 +spr 1101n00000 srr$dec($n) +spr 100nn01000 sprg$dec($n) +spr 1101001000 ear +spr 1101101000 pvr +spr 10nnl10000 ibat$dec($n){u|l}[$l] +spr 1000n11111 hid$dec($n) +spr 1001011111 iabr +spr 1010111111 dabr +spr 1111111111 pir +spr xxxxxxxxxx ? + +reg0 00000 0 +reg0 nnnnn $reg($n) + +reg (%n) r$dec($n) +fr (%n) fr$dec($n) +cr (%n) cr$dec($n) +crf (%n) crf$dec($n) +crcom 000 +crcom nnn $cr($n), + +simm16 snnnnnnnnnnnnnnn {$hex($n)|-$hex((1 << 15) - $n)}[$s] + +shifted16 (%n) $hex($n << 16) + +brabs (%n) $hex($n << 2) + +hex (%n) : +dec (%n) : +mbz (%n) : +brdispl (%d,%n) : diff --git a/osfmk/ppc/ppc_init.c b/osfmk/ppc/ppc_init.c new file mode 100644 index 000000000..b527cba05 --- /dev/null +++ b/osfmk/ppc/ppc_init.c @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern const char version[]; +extern const char version_variant[]; + +extern unsigned int intstack_top_ss; /* declared in start.s */ +#if MACH_KDP || MACH_KDB +extern unsigned int debstackptr; /* declared in start.s */ +extern unsigned int debstack_top_ss; /* declared in start.s */ +#endif /* MACH_KDP || MACH_KDB */ + +unsigned int kernel_seg_regs[] = { + KERNEL_SEG_REG0_VALUE, /* 0 */ + KERNEL_SEG_REG0_VALUE + 1, /* 1 */ + KERNEL_SEG_REG0_VALUE + 2, /* 2 */ + SEG_REG_INVALID, /* 3 */ + SEG_REG_INVALID, /* 4 */ + KERNEL_SEG_REG5_VALUE, /* 5 - I/O segment */ + SEG_REG_INVALID, /* 6 */ + SEG_REG_INVALID, /* 7 */ + KERNEL_SEG_REG8_VALUE, /* 8-F are possible IO space */ + KERNEL_SEG_REG9_VALUE, + KERNEL_SEG_REG10_VALUE, + KERNEL_SEG_REG11_VALUE, + KERNEL_SEG_REG12_VALUE, + KERNEL_SEG_REG13_VALUE, + KERNEL_SEG_REG14_VALUE, /* 14 - A/V video */ + KERNEL_SEG_REG15_VALUE /* 15 - NuBus etc */ +}; + +extern void thandler(void); /* trap handler */ +extern void ihandler(void); /* interrupt handler */ +extern void shandler(void); /* syscall handler */ +extern void fpu_switch(void); /* fp handler */ +extern void vec_switch(void); /* vector handler */ +extern void atomic_switch_trap(void); /* fast path atomic thread switch */ + +void (*exception_handlers[])(void) = { + thandler, /* 0x000 INVALID EXCEPTION (T_IN_VAIN) */ + thandler, /* 0x100 System reset (T_RESET) */ + thandler, /* 0x200 Machine check (T_MACHINE_CHECK) */ + thandler, /* 0x300 Data access (T_DATA_ACCESS) */ + thandler, /* 0x400 Instruction access (T_INSTRUCTION_ACCESS) */ + ihandler, /* 0x500 External interrupt (T_INTERRUPT) */ + thandler, /* 0x600 Alignment (T_ALIGNMENT) */ + thandler, /* 0x700 fp exc, ill/priv instr, trap (T_PROGRAM) */ + fpu_switch, /* 0x800 Floating point disabled (T_FP_UNAVAILABLE) */ + ihandler, /* 0x900 Decrementer (T_DECREMENTER) */ + thandler, /* 0xA00 I/O controller interface (T_IO_ERROR) */ + thandler, /* 0xB00 INVALID EXCEPTION (T_RESERVED) */ + shandler, /* 0xC00 System call exception (T_SYSTEM_CALL) */ + thandler, /* 0xD00 Trace (T_TRACE) */ + thandler, /* 0xE00 FP assist (T_FP_ASSIST) */ + thandler, /* 0xF00 Performance monitor (T_PERF_MON) */ + vec_switch, /* 0xF20 VMX (T_VMX) */ + thandler, /* 0x1000 INVALID EXCEPTION (T_INVALID_EXCP0) */ + thandler, /* 0x1100 INVALID EXCEPTION (T_INVALID_EXCP1) */ + thandler, /* 0x1200 INVALID EXCEPTION (T_INVALID_EXCP2) */ + thandler, /* 0x1300 instruction breakpoint (T_INSTRUCTION_BKPT) */ + ihandler, /* 0x1400 system management (T_SYSTEM_MANAGEMENT) */ + thandler, /* 0x1600 Altivec Assist (T_ALTIVEC_ASSIST) */ + ihandler, /* 0x1700 Thermal interruption (T_THERMAL) */ + thandler, /* 0x1800 INVALID EXCEPTION (T_INVALID_EXCP5) */ + thandler, /* 0x1900 INVALID EXCEPTION (T_INVALID_EXCP6) */ + thandler, /* 0x1A00 INVALID EXCEPTION (T_INVALID_EXCP7) */ + thandler, /* 0x1B00 INVALID EXCEPTION (T_INVALID_EXCP8) */ + thandler, /* 0x1C00 INVALID EXCEPTION (T_INVALID_EXCP9) */ + thandler, /* 0x1D00 INVALID EXCEPTION (T_INVALID_EXCP10) */ + thandler, /* 0x1E00 INVALID EXCEPTION (T_INVALID_EXCP11) */ + thandler, /* 0x1F00 INVALID EXCEPTION (T_INVALID_EXCP12) */ + thandler, /* 0x1F00 INVALID EXCEPTION (T_INVALID_EXCP13) */ + thandler, /* 0x2000 Run Mode/Trace (T_RUNMODE_TRACE) */ + + ihandler, /* Software Signal processor (T_SIGP) */ + thandler, /* Software Preemption (T_PREEMPT) */ + ihandler, /* Software INVALID EXCEPTION (T_CSWITCH) */ + ihandler /* Software Shutdown Context (T_SHUTDOWN) */ +}; + +int pc_trace_buf[1024] = {0}; +int pc_trace_cnt = 1024; + +void ppc_init(boot_args *args) +{ + int i; + unsigned long *src,*dst; + char *str; + unsigned long addr, videoAddr; + unsigned int maxmem; + bat_t bat; + extern vm_offset_t static_memory_end; + + /* + * Setup per_proc info for first cpu. + */ + + per_proc_info[0].cpu_number = 0; + per_proc_info[0].cpu_flags = 0; + per_proc_info[0].istackptr = 0; /* we're on the interrupt stack */ + per_proc_info[0].intstack_top_ss = intstack_top_ss; +#if MACH_KDP || MACH_KDB + per_proc_info[0].debstackptr = debstackptr; + per_proc_info[0].debstack_top_ss = debstack_top_ss; +#endif /* MACH_KDP || MACH_KDB */ + per_proc_info[0].get_interrupts_enabled = + fake_get_interrupts_enabled; + per_proc_info[0].set_interrupts_enabled = + fake_set_interrupts_enabled; + per_proc_info[0].active_kloaded = (unsigned int) + &active_kloaded[0]; + per_proc_info[0].cpu_data = (unsigned int) + &cpu_data[0]; + per_proc_info[0].active_stacks = (unsigned int) + &active_stacks[0]; + per_proc_info[0].need_ast = (unsigned int) + &need_ast[0]; + per_proc_info[0].FPU_thread = 0; + per_proc_info[0].FPU_vmmCtx = 0; + per_proc_info[0].VMX_thread = 0; + per_proc_info[0].VMX_vmmCtx = 0; + + machine_slot[0].is_cpu = TRUE; + + cpu_init(); + + /* + * Setup some processor related structures to satisfy funnels. + * Must be done before using unparallelized device drivers. + */ + processor_ptr[0] = &processor_array[0]; + master_cpu = 0; + master_processor = cpu_to_processor(master_cpu); + + /* Set up segment registers as VM through space 0 */ + for (i=0; i<=15; i++) { + isync(); + mtsrin((KERNEL_SEG_REG0_VALUE | (i << 20)), i * 0x10000000); + sync(); + } + + static_memory_end = round_page(args->topOfKernelData);; + /* Get platform expert set up */ + PE_init_platform(FALSE, args); + + + /* This is how the BATs get configured */ + /* IBAT[0] maps Segment 0 1:1 */ + /* DBAT[0] maps Segment 0 1:1 */ + /* DBAT[2] maps the I/O Segment 1:1 */ + /* DBAT[3] maps the Video Segment 1:1 */ + + /* If v_baseAddr is non zero, use DBAT3 to map the video segment */ + videoAddr = args->Video.v_baseAddr & 0xF0000000; + if (videoAddr) { + /* start off specifying 1-1 mapping of video seg */ + bat.upper.word = videoAddr; + bat.lower.word = videoAddr; + + bat.upper.bits.bl = 0x7ff; /* size = 256M */ + bat.upper.bits.vs = 1; + bat.upper.bits.vp = 0; + + bat.lower.bits.wimg = PTE_WIMG_IO; + bat.lower.bits.pp = 2; /* read/write access */ + + sync();isync(); + mtdbatu(3, BAT_INVALID); /* invalidate old mapping */ + mtdbatl(3, bat.lower.word); + mtdbatu(3, bat.upper.word); + sync();isync(); + } + + /* Use DBAT2 to map the io segment */ + addr = get_io_base_addr() & 0xF0000000; + if (addr != videoAddr) { + /* start off specifying 1-1 mapping of io seg */ + bat.upper.word = addr; + bat.lower.word = addr; + + bat.upper.bits.bl = 0x7ff; /* size = 256M */ + bat.upper.bits.vs = 1; + bat.upper.bits.vp = 0; + + bat.lower.bits.wimg = PTE_WIMG_IO; + bat.lower.bits.pp = 2; /* read/write access */ + + sync();isync(); + mtdbatu(2, BAT_INVALID); /* invalidate old mapping */ + mtdbatl(2, bat.lower.word); + mtdbatu(2, bat.upper.word); + sync();isync(); + } + + if (!PE_parse_boot_arg("diag", &dgWork.dgFlags)) dgWork.dgFlags=0; /* Set diagnostic flags */ + if(dgWork.dgFlags & enaExpTrace) trcWork.traceMask = 0xFFFFFFFF; /* If tracing requested, enable it */ + +#if 0 + GratefulDebInit((bootBumbleC *)&(args->Video)); /* Initialize the GratefulDeb debugger */ +#endif + + printf_init(); /* Init this in case we need debugger */ + panic_init(); /* Init this in case we need debugger */ + + /* setup debugging output if one has been chosen */ + PE_init_kprintf(FALSE); + kprintf("kprintf initialized\n"); + + /* create the console for verbose or pretty mode */ + PE_create_console(); + + /* setup console output */ + PE_init_printf(FALSE); + + kprintf("version_variant = %s\n", version_variant); + kprintf("version = %s\n", version); + +#if DEBUG + printf("\n\n\nThis program was compiled using gcc %d.%d for powerpc\n", + __GNUC__,__GNUC_MINOR__); + + /* Processor version information */ + { + unsigned int pvr; + __asm__ ("mfpvr %0" : "=r" (pvr)); + printf("processor version register : 0x%08x\n",pvr); + } + for (i = 0; i < kMaxDRAMBanks; i++) { + if (args->PhysicalDRAM[i].size) + printf("DRAM at 0x%08x size 0x%08x\n", + args->PhysicalDRAM[i].base, + args->PhysicalDRAM[i].size); + } +#endif /* DEBUG */ + + /* + * VM initialization, after this we're using page tables... + */ + if (!PE_parse_boot_arg("maxmem", &maxmem)) + maxmem=0; + else + maxmem = maxmem * (1024 * 1024); + + ppc_vm_init(maxmem, args); + + PE_init_platform(TRUE, args); + + machine_startup(args); +} + +ppc_init_cpu( + struct per_proc_info *proc_info) +{ + int i; + unsigned int gph; + savectl *sctl; /* Savearea controls */ + + if(proc_info->savedSave) { /* Do we have a savearea set up already? */ + mtsprg(1, proc_info->savedSave); /* Set saved address of savearea */ + } + else { + gph = (unsigned int)save_get_phys(); /* Get a savearea (physical addressing) */ + mtsprg(1, gph); /* Set physical address of savearea */ + } + + cpu_init(); + + proc_info->Lastpmap = 0; /* Clear last used space */ + + /* Set up segment registers as VM through space 0 */ + for (i=0; i<=15; i++) { + isync(); + mtsrin((KERNEL_SEG_REG0_VALUE | (i << 20)), i * 0x10000000); + sync(); + } + + ppc_vm_cpu_init(proc_info); + + ml_thrm_init(); /* Start thermal monitoring on this processor */ + + slave_main(); +} diff --git a/osfmk/ppc/ppc_vm_init.c b/osfmk/ppc/ppc_vm_init.c new file mode 100644 index 000000000..29bee412e --- /dev/null +++ b/osfmk/ppc/ppc_vm_init.c @@ -0,0 +1,343 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __MACHO__ +#include +#endif + +extern unsigned int intstack[]; /* declared in start.s */ +extern unsigned int intstack_top_ss; /* declared in start.s */ + +vm_offset_t mem_size; /* Size of actual physical memory present + minus any performance buffer and possibly limited + by mem_limit in bytes */ +vm_offset_t mem_actual; /* The "One True" physical memory size + actually, it's the highest physical address + 1 */ + + +mem_region_t pmap_mem_regions[PMAP_MEM_REGION_MAX]; +int pmap_mem_regions_count = 0; /* No non-contiguous memory regions */ + +mem_region_t free_regions[FREE_REGION_MAX]; +int free_regions_count; + +#ifndef __MACHO__ +extern unsigned long etext; +#endif + +unsigned int avail_remaining = 0; +vm_offset_t first_avail; +vm_offset_t static_memory_end; +extern vm_offset_t avail_next; + +#ifdef __MACHO__ +extern struct mach_header _mh_execute_header; +vm_offset_t sectTEXTB; +int sectSizeTEXT; +vm_offset_t sectDATAB; +int sectSizeDATA; +vm_offset_t sectOBJCB; +int sectSizeOBJC; +vm_offset_t sectLINKB; +int sectSizeLINK; +vm_offset_t sectKLDB; +int sectSizeKLD; + +vm_offset_t end, etext, edata; +#endif + +extern unsigned long exception_entry; +extern unsigned long exception_end; + + +void ppc_vm_init(unsigned int mem_limit, boot_args *args) +{ + unsigned int htabmask; + unsigned int i, j, batsize, kmapsize; + vm_offset_t addr; + int boot_task_end_offset; + const char *cpus; + mapping *mp; + vm_offset_t first_phys_avail; + vm_offset_t sizeadj, oldstart; + +#ifdef __MACHO__ + /* Now retrieve addresses for end, edata, and etext + * from MACH-O headers. + */ + sectTEXTB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__TEXT", §SizeTEXT); + sectDATAB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__DATA", §SizeDATA); + sectOBJCB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__OBJC", §SizeOBJC); + sectLINKB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__LINKEDIT", §SizeLINK); + sectKLDB = (vm_offset_t)getsegdatafromheader( + &_mh_execute_header, "__KLD", §SizeKLD); + + etext = (vm_offset_t) sectTEXTB + sectSizeTEXT; + edata = (vm_offset_t) sectDATAB + sectSizeDATA; + end = round_page(getlastaddr()); /* Force end to next page */ +#if DEBUG + kprintf("sectTEXT: %x, size: %x\n", sectTEXTB, sectSizeTEXT); + kprintf("sectDATA: %x, size: %x\n", sectDATAB, sectSizeDATA); + kprintf("sectOBJC: %x, size: %x\n", sectOBJCB, sectSizeOBJC); + kprintf("sectLINK: %x, size: %x\n", sectLINKB, sectSizeLINK); + kprintf("sectKLD: %x, size: %x\n", sectKLDB, sectSizeKLD); + kprintf("end: %x\n", end); +#endif +#endif /* __MACHO__ */ + +/* Stitch valid memory regions together - they may be contiguous + * even though they're not already glued together + */ + mem_actual = mem_actual = args->PhysicalDRAM[0].base + args->PhysicalDRAM[0].size; /* Initialize to the first region size */ + addr = 0; /* temp use as pointer to previous memory region... */ + for (i = 1; i < kMaxDRAMBanks; i++) { + + if (args->PhysicalDRAM[i].size == 0) continue; /* If region is empty, skip it */ + + if((args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size) > mem_actual) { /* New high? */ + mem_actual = args->PhysicalDRAM[i].base + args->PhysicalDRAM[i].size; /* Take the high bid */ + } + + if (args->PhysicalDRAM[i].base == /* Does the end of the last hit the start of the next? */ + args->PhysicalDRAM[addr].base + + args->PhysicalDRAM[addr].size) { + kprintf("region 0x%08x size 0x%08x joining region 0x%08x size 0x%08x\n", + args->PhysicalDRAM[addr].base, args->PhysicalDRAM[addr].size, + args->PhysicalDRAM[i].base, args->PhysicalDRAM[i].size); + + args->PhysicalDRAM[addr].size += args->PhysicalDRAM[i].size; /* Join them */ + args->PhysicalDRAM[i].size = 0; + continue; + } + /* This is now last non-zero region to compare against */ + addr = i; + } + + /* Go through the list of memory regions passed in via the args + * and copy valid entries into the pmap_mem_regions table, adding + * further calculated entries. + */ + + pmap_mem_regions_count = 0; + mem_size = 0; /* Will use to total memory found so far */ + + for (i = 0; i < kMaxDRAMBanks; i++) { + if (args->PhysicalDRAM[i].size == 0) + continue; + + /* The following should only happen if memory size has + been artificially reduced with -m */ + if (mem_limit > 0 && + mem_size + args->PhysicalDRAM[i].size > mem_limit) + args->PhysicalDRAM[i].size = mem_limit - mem_size; + + /* We've found a region, tally memory */ + + pmap_mem_regions[pmap_mem_regions_count].start = + args->PhysicalDRAM[i].base; + pmap_mem_regions[pmap_mem_regions_count].end = + args->PhysicalDRAM[i].base + + args->PhysicalDRAM[i].size; + + /* Regions must be provided in ascending order */ + assert ((pmap_mem_regions_count == 0) || + pmap_mem_regions[pmap_mem_regions_count].start > + pmap_mem_regions[pmap_mem_regions_count-1].start); + + if (pmap_mem_regions_count > 0) { + /* we add on any pages not in the first memory + * region to the avail_remaining count. The first + * memory region is used for mapping everything for + * bootup and is taken care of specially. + */ + avail_remaining += + args->PhysicalDRAM[i].size / PPC_PGBYTES; + } + + /* Keep track of how much memory we've found */ + + mem_size += args->PhysicalDRAM[i].size; + + /* incremement number of regions found */ + pmap_mem_regions_count++; + } + + kprintf("mem_size: %d M\n",mem_size / (1024 * 1024)); + + /* + * Initialize the pmap system, using space above `first_avail' + * for the necessary data structures. + * NOTE : assume that we'll have enough space mapped in already + */ + + first_phys_avail = static_memory_end; + first_avail = adjust_bat_limit(first_phys_avail, 0, FALSE, FALSE); + + kmapsize = (round_page(exception_end) - trunc_page(exception_entry)) + /* Get size we will map later */ + (round_page(sectTEXTB+sectSizeTEXT) - trunc_page(sectTEXTB)) + + (round_page(sectDATAB+sectSizeDATA) - trunc_page(sectDATAB)) + + (round_page(sectOBJCB+sectSizeOBJC) - trunc_page(sectOBJCB)) + + (round_page(sectLINKB+sectSizeLINK) - trunc_page(sectLINKB)) + + (round_page(sectKLDB+sectSizeKLD) - trunc_page(sectKLDB)) + + (round_page(static_memory_end) - trunc_page(end)); + + pmap_bootstrap(mem_size,&first_avail,&first_phys_avail, kmapsize); + +#ifdef __MACHO__ +#if DEBUG + kprintf("Mapping memory:\n"); + kprintf(" exception vector: %08X, %08X - %08X\n", trunc_page(exception_entry), + trunc_page(exception_entry), round_page(exception_end)); + kprintf(" sectTEXTB: %08X, %08X - %08X\n", trunc_page(sectTEXTB), + trunc_page(sectTEXTB), round_page(sectTEXTB+sectSizeTEXT)); + kprintf(" sectDATAB: %08X, %08X - %08X\n", trunc_page(sectDATAB), + trunc_page(sectDATAB), round_page(sectDATAB+sectSizeDATA)); + kprintf(" sectOBJCB: %08X, %08X - %08X\n", trunc_page(sectOBJCB), + trunc_page(sectOBJCB), round_page(sectOBJCB+sectSizeOBJC)); + kprintf(" sectLINKB: %08X, %08X - %08X\n", trunc_page(sectLINKB), + trunc_page(sectLINKB), round_page(sectLINKB+sectSizeLINK)); + kprintf(" sectKLDB: %08X, %08X - %08X\n", trunc_page(sectKLDB), + trunc_page(sectKLDB), round_page(sectKLDB+sectSizeKLD)); + kprintf(" end: %08X, %08X - %08X\n", trunc_page(end), + trunc_page(end), static_memory_end); +#endif /* DEBUG */ + pmap_map(trunc_page(exception_entry), trunc_page(exception_entry), + round_page(exception_end), VM_PROT_READ|VM_PROT_EXECUTE); + pmap_map(trunc_page(sectTEXTB), trunc_page(sectTEXTB), + round_page(sectTEXTB+sectSizeTEXT), VM_PROT_READ|VM_PROT_EXECUTE); + pmap_map(trunc_page(sectDATAB), trunc_page(sectDATAB), + round_page(sectDATAB+sectSizeDATA), VM_PROT_READ|VM_PROT_WRITE); + pmap_map(trunc_page(sectOBJCB), trunc_page(sectOBJCB), + round_page(sectOBJCB+sectSizeOBJC), VM_PROT_READ|VM_PROT_WRITE); + + + /* The KLD and LINKEDIT segments are unloaded in toto after boot completes, + * but via ml_static_mfree(), through IODTFreeLoaderInfo(). Hence, we have + * to map both segments page-by-page. + */ + for (addr = trunc_page(sectKLDB); + addr < round_page(sectKLDB+sectSizeKLD); + addr += PAGE_SIZE) { + + pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); + } + + for (addr = trunc_page(sectLINKB); + addr < round_page(sectLINKB+sectSizeLINK); + addr += PAGE_SIZE) { + + pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); + } + +/* + * We need to map the remainder page-by-page because some of this will + * be released later, but not all. Ergo, no block mapping here + */ + for(addr = trunc_page(end); addr < round_page(static_memory_end); addr += PAGE_SIZE) { + pmap_enter(kernel_pmap, addr, addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); + } +#endif /* __MACHO__ */ + +#if DEBUG + for (i=0 ; i < free_regions_count; i++) { + kprintf("Free region start 0x%08x end 0x%08x\n", + free_regions[i].start,free_regions[i].end); + } +#endif + + /* Initialize shadow IBATs */ + shadow_BAT.IBATs[0].upper=BAT_INVALID; + shadow_BAT.IBATs[0].lower=BAT_INVALID; + shadow_BAT.IBATs[1].upper=BAT_INVALID; + shadow_BAT.IBATs[1].lower=BAT_INVALID; + shadow_BAT.IBATs[2].upper=BAT_INVALID; + shadow_BAT.IBATs[2].lower=BAT_INVALID; + shadow_BAT.IBATs[3].upper=BAT_INVALID; + shadow_BAT.IBATs[3].lower=BAT_INVALID; + + LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); /* Load up real IBATs from shadows */ + + /* Initialize shadow DBATs */ + shadow_BAT.DBATs[0].upper=BAT_INVALID; + shadow_BAT.DBATs[0].lower=BAT_INVALID; + shadow_BAT.DBATs[1].upper=BAT_INVALID; + shadow_BAT.DBATs[1].lower=BAT_INVALID; + mfdbatu(shadow_BAT.DBATs[2].upper,2); + mfdbatl(shadow_BAT.DBATs[2].lower,2); + mfdbatu(shadow_BAT.DBATs[3].upper,3); + mfdbatl(shadow_BAT.DBATs[3].lower,3); + + LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); /* Load up real DBATs from shadows */ + + sync();isync(); +#if DEBUG + for(i=0; i<4; i++) kprintf("DBAT%1d: %08X %08X\n", + i, shadow_BAT.DBATs[i].upper, shadow_BAT.DBATs[i].lower); + for(i=0; i<4; i++) kprintf("IBAT%1d: %08X %08X\n", + i, shadow_BAT.IBATs[i].upper, shadow_BAT.IBATs[i].lower); +#endif +} + +void ppc_vm_cpu_init( + struct per_proc_info *proc_info) +{ + hash_table_init(hash_table_base, hash_table_size); + + LoadIBATs((unsigned int *)&shadow_BAT.IBATs[0]); + LoadDBATs((unsigned int *)&shadow_BAT.DBATs[0]); + + sync();isync(); +} diff --git a/osfmk/ppc/proc_reg.h b/osfmk/ppc/proc_reg.h new file mode 100644 index 000000000..68fb49dba --- /dev/null +++ b/osfmk/ppc/proc_reg.h @@ -0,0 +1,744 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_PROC_REG_H_ +#define _PPC_PROC_REG_H_ + +#include + +/* Define some useful masks that convert from bit numbers */ + +#if __PPC__ +#if _BIG_ENDIAN +#ifndef ENDIAN_MASK +#define ENDIAN_MASK(val,size) (1 << ((size-1) - val)) +#endif +#else +#error code not ported to little endian targets yet +#endif /* _BIG_ENDIAN */ +#endif /* __PPC__ */ + +#define MASK32(PART) ENDIAN_MASK(PART ## _BIT, 32) +#define MASK16(PART) ENDIAN_MASK(PART ## _BIT, 16) +#define MASK8(PART) ENDIAN_MASK(PART ## _BIT, 8) + +#undef MASK +#define MASK(PART) MASK32(PART) + +#define BITS_PER_WORD 32 +#define BITS_PER_WORD_POW2 5 + +/* Defines for decoding the MSR bits */ + +#define MSR_SF_BIT 0 +#define MSR_RES1_BIT 1 +#define MSR_RES2_BIT 2 +#define MSR_RES3_BIT 3 +#define MSR_RES4_BIT 4 +#define MSR_RES5_BIT 5 +#define MSR_VEC_BIT 6 +#define MSR_RES7_BIT 7 +#define MSR_RES8_BIT 8 +#define MSR_RES9_BIT 9 +#define MSR_RES10_BIT 10 +#define MSR_RES11_BIT 11 +#define MSR_KEY_BIT 12 /* Key bit on 603e (not on 603) */ +#define MSR_POW_BIT 13 +#define MSR_TGPR_BIT 14 /* Temporary GPR mappings on 603/603e */ +#define MSR_ILE_BIT 15 +#define MSR_EE_BIT 16 +#define MSR_PR_BIT 17 +#define MSR_FP_BIT 18 +#define MSR_ME_BIT 19 +#define MSR_FE0_BIT 20 +#define MSR_SE_BIT 21 +#define MSR_BE_BIT 22 +#define MSR_FE1_BIT 23 +#define MSR_RES24_BIT 24 /* AL bit in power architectures */ +#define MSR_IP_BIT 25 +#define MSR_IR_BIT 26 +#define MSR_DR_BIT 27 +#define MSR_RES28_BIT 28 +#define MSR_PM_BIT 29 +#define MSR_RI_BIT 30 +#define MSR_LE_BIT 31 + +/* MSR for kernel mode, interrupts disabled, running in virtual mode */ +#define MSR_SUPERVISOR_INT_OFF (MASK(MSR_ME) | MASK(MSR_IR) | MASK(MSR_DR)) + +/* MSR for above but with interrupts enabled */ +#define MSR_SUPERVISOR_INT_ON (MSR_SUPERVISOR_INT_OFF | MASK(MSR_EE)) + +/* MSR for physical mode code */ +#define MSR_VM_OFF (MASK(MSR_ME)) + +/* MSR for physical instruction, virtual data */ +#define MSR_PHYS_INST_VIRT_DATA (MASK(MSR_ME) | MASK(MSR_IR)) + +/* MSR mask for user-exported bits - identify bits that must be set/reset */ + +/* SET - external exceptions, machine check, vm on, user-level privs */ +#define MSR_EXPORT_MASK_SET (MASK(MSR_EE)| MASK(MSR_ME)| \ + MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_PR)) + +/* only the following bits may be changed by a task */ +#define MSR_IMPORT_BITS (MASK(MSR_FE0)|MASK(MSR_SE)|MASK(MSR_BE)| \ + MASK(MSR_FE1)| MASK(MSR_PM) | MASK(MSR_LE)) + +#define MSR_PREPARE_FOR_IMPORT(origmsr, newmsr) \ + ((origmsr & ~MSR_IMPORT_BITS) | (newmsr & MSR_IMPORT_BITS)) + +#define MSR_VEC_ON (MASK(MSR_VEC)) + +#define USER_MODE(msr) (msr & MASK(MSR_PR) ? TRUE : FALSE) + +/* seg reg values must be simple expressions so that assembler can cope */ +#define SEG_REG_INVALID 0x0000 +#define KERNEL_SEG_REG0_VALUE 0x20000000 /* T=0,Ks=0,Ku=1 PPC_SID_KERNEL=0*/ + +/* the following segment register values are only used prior to the probe, + * they map the various device areas 1-1 on 601 machines + */ +#define KERNEL_SEG_REG5_VALUE 0xa7F00005 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=5 */ +#define KERNEL_SEG_REG8_VALUE 0xa7F00008 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=8 */ +#define KERNEL_SEG_REG9_VALUE 0xa7F00009 /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=9 */ +#define KERNEL_SEG_REG10_VALUE 0xa7F0000a /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=a */ +#define KERNEL_SEG_REG11_VALUE 0xa7F0000b /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=b */ +#define KERNEL_SEG_REG12_VALUE 0xa7F0000c /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=c */ +#define KERNEL_SEG_REG13_VALUE 0xa7F0000d /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=d */ +#define KERNEL_SEG_REG14_VALUE 0xa7F0000e /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=e */ +#define KERNEL_SEG_REG15_VALUE 0xa7F0000f /* T=1,Ks=0,Ku=1,BUID=0x7F,SR=f */ + +/* For SEG_REG_PROT we have T=0, Ks=0, Ku=1 */ +#define SEG_REG_PROT 0x20000000 /* seg regs should have these bits set */ + +/* SR_COPYIN is used for copyin/copyout+remapping and must be + * saved and restored in the thread context. + */ +/* SR_UNUSED_BY_KERN is unused by the kernel, and thus contains + * the space ID of the currently interrupted user task immediately + * after an exception and before interrupts are reenabled. It's used + * purely for an assert. + */ + +/* SR_KERNEL used for asserts... */ + +#define SR_COPYIN sr14 +#define SR_UNUSED_BY_KERN sr13 +#define SR_KERNEL sr0 + +#define SR_UNUSED_BY_KERN_NUM 13 +#define SR_COPYIN_NAME sr14 +#define SR_COPYIN_NUM 14 + + +/* DSISR bits on data access exceptions */ + +#define DSISR_IO_BIT 0 /* NOT USED on 601 */ +#define DSISR_HASH_BIT 1 +#define DSISR_PROT_BIT 4 +#define DSISR_IO_SPC_BIT 5 +#define DSISR_WRITE_BIT 6 +#define DSISR_WATCH_BIT 9 +#define DSISR_EIO_BIT 11 + +/* SRR1 bits on data/instruction translation exceptions */ + +#define SRR1_TRANS_HASH_BIT 1 +#define SRR1_TRANS_IO_BIT 3 +#define SRR1_TRANS_PROT_BIT 4 +#define SRR1_TRANS_NO_PTE_BIT 10 + +/* SRR1 bits on program exceptions */ + +#define SRR1_PRG_FE_BIT 11 +#define SRR1_PRG_ILL_INS_BIT 12 +#define SRR1_PRG_PRV_INS_BIT 13 +#define SRR1_PRG_TRAP_BIT 14 + +/* BAT information */ + +/* Constants used when setting mask values */ + +#define BAT_INVALID 0 + +/* + * Virtual to physical mapping macros/structures. + * IMPORTANT NOTE: there is one mapping per HW page, not per MACH page. + */ + +#define CACHE_LINE_SIZE 32 +#define CACHE_LINE_POW2 5 +#define cache_align(x) (((x) + CACHE_LINE_SIZE-1) & ~(CACHE_LINE_SIZE - 1)) + +#define PTE1_WIMG_GUARD_BIT 28 /* Needed for assembler */ +#define PTE1_REFERENCED_BIT 23 /* ditto */ +#define PTE1_CHANGED_BIT 24 +#define PTE0_HASH_ID_BIT 25 + +#define PPC_HASHSIZE 2048 /* size of hash table */ +#define PPC_HASHSIZE_LOG2 11 +#define PPC_MIN_MPP 2 /* min # of mappings per phys page */ + +/* macros to help decide processor type */ +#define PROCESSOR_VERSION_601 1 +#define PROCESSOR_VERSION_603 3 +#define PROCESSOR_VERSION_604 4 +#define PROCESSOR_VERSION_603e 6 +#define PROCESSOR_VERSION_750 8 +#define PROCESSOR_VERSION_604e 9 +#define PROCESSOR_VERSION_604ev 10 /* ? */ +#define PROCESSOR_VERSION_7400 12 /* ? */ +#define PROCESSOR_VERSION_7410 0x800C /* ? */ +#define PROCESSOR_VERSION_7450 0x8000 /* ? */ + +#ifndef ASSEMBLER +#ifdef __GNUC__ + +#if _BIG_ENDIAN == 0 +#error - bitfield structures are not checked for bit ordering in words +#endif /* _BIG_ENDIAN */ + +/* Structures and types for machine registers */ + +typedef union { + unsigned int word; + struct { + unsigned int htaborg : 16; + unsigned int reserved : 7; + unsigned int htabmask : 9; + } bits; +} sdr1_t; + +/* Block mapping registers. These values are model dependent. + * Eventually, we will need to up these to 64 bit values. + */ + +#define blokValid 0x1FFE0000 +#define batMin 0x00020000 +#define batMax 0x10000000 +#define batICnt 4 +#define batDCnt 4 + +/* BAT register structures. + * Not used for standard mappings, but may be used + * for mapping devices. Note that the 601 has a + * different BAT layout than the other PowerPC processors + */ + +typedef union { + unsigned int word; + struct { + unsigned int blpi : 15; + unsigned int reserved : 10; + unsigned int wim : 3; + unsigned int ks : 1; + unsigned int ku : 1; + unsigned int pp : 2; + } bits; +} bat601u_t; + +typedef union { + unsigned int word; + struct { + unsigned int pbn : 15; + unsigned int reserved : 10; + unsigned int valid : 1; + unsigned int bsm : 6; + } bits; +} bat601l_t; + +typedef struct bat601_t { + bat601u_t upper; + bat601l_t lower; +} bat601_t; + +typedef union { + unsigned int word; + struct { + unsigned int bepi : 15; + unsigned int reserved : 4; + unsigned int bl : 11; + unsigned int vs : 1; + unsigned int vp : 1; + } bits; +} batu_t; + +typedef union { + unsigned int word; + struct { + unsigned int brpn : 15; + unsigned int reserved : 10; + unsigned int wimg : 4; + unsigned int reserved2 : 1; + unsigned int pp : 2; + } bits; +} batl_t; + +typedef struct bat_t { + batu_t upper; + batl_t lower; +} bat_t; + +/* PTE entries + * Used extensively for standard mappings + */ + +typedef union { + unsigned int word; + struct { + unsigned int valid : 1; + unsigned int segment_id : 24; + unsigned int hash_id : 1; + unsigned int page_index : 6; /* Abbreviated */ + } bits; + struct { + unsigned int valid : 1; + unsigned int not_used : 5; + unsigned int segment_id : 19; /* Least Sig 19 bits */ + unsigned int hash_id : 1; + unsigned int page_index : 6; + } hash_bits; +} pte0_t; + +typedef union { + unsigned int word; + struct { + unsigned int phys_page : 20; + unsigned int reserved3 : 3; + unsigned int referenced : 1; + unsigned int changed : 1; + unsigned int wimg : 4; + unsigned int reserved1 : 1; + unsigned int protection : 2; + } bits; +} pte1_t; + +typedef struct pte_t { + pte0_t pte0; + pte1_t pte1; +} pte_t; + +#define PTE_NULL ((pte_t*) NULL) /* No pte found/associated with this */ +#define PTE_EMPTY 0x7fffffbf /* Value in the pte0.word of a free pte */ + +#define PTE_WIMG_CB_CACHED 0 /* cached, writeback */ +#define PTE_WIMG_CB_CACHED_GUARDED 1 /* cached, writeback, guarded */ +#define PTE_WIMG_CB_CACHED_COHERENT 2 /* cached, writeback, coherent (default) */ +#define PTE_WIMG_CB_CACHED_COHERENT_GUARDED 3 /* cached, writeback, coherent, guarded */ +#define PTE_WIMG_UNCACHED 4 /* uncached */ +#define PTE_WIMG_UNCACHED_GUARDED 5 /* uncached, guarded */ +#define PTE_WIMG_UNCACHED_COHERENT 6 /* uncached, coherentt */ +#define PTE_WIMG_UNCACHED_COHERENT_GUARDED 7 /* uncached, coherent, guarded */ +#define PTE_WIMG_WT_CACHED 8 /* cached, writethru */ +#define PTE_WIMG_WT_CACHED_GUARDED 9 /* cached, writethru, guarded */ +#define PTE_WIMG_WT_CACHED_COHERENT 10 /* cached, writethru, coherent */ +#define PTE_WIMG_WT_CACHED_COHERENT_GUARDED 11 /* cached, writethru, coherent, guarded */ + +#define PTE_WIMG_DEFAULT PTE_WIMG_CB_CACHED_COHERENT +#define PTE_WIMG_IO PTE_WIMG_UNCACHED_COHERENT_GUARDED + +/* + * A virtual address is decoded into various parts when looking for its PTE + */ + +typedef struct va_full_t { + unsigned int seg_num : 4; + unsigned int page_index : 16; + unsigned int byte_ofs : 12; +} va_full_t; + +typedef struct va_abbrev_t { /* use bits.abbrev for abbreviated page index */ + unsigned int seg_num : 4; + unsigned int page_index : 6; + unsigned int junk : 10; + unsigned int byte_ofs : 12; +} va_abbrev_t; + +typedef union { + unsigned int word; + va_full_t full; + va_abbrev_t abbrev; +} virtual_addr_t; + +/* A physical address can be split up into page and offset */ + +typedef struct pa_t { + unsigned int page_no : 20; + unsigned int offset : 12; +} pa_t; + +typedef union { + unsigned int word; + pa_t bits; +} physical_addr_t; + +/* + * C-helper inline functions for accessing machine registers follow. + */ + + +#ifdef __ELF__ +#define __CASMNL__ ";" +#else +#define __CASMNL__ "@" +#endif + +/* Return the current GOT pointer */ + +extern unsigned int get_got(void); + +extern __inline__ unsigned int get_got(void) +{ + unsigned int result; +#ifndef __ELF__ + __asm__ volatile("mr %0, r2" : "=r" (result)); +#else + __asm__ volatile("mr %0, 2" : "=r" (result)); +#endif + return result; +} + +/* + * Various memory/IO synchronisation instructions + */ + + /* Use eieio as a memory barrier to order stores. + * Useful for device control and PTE maintenance. + */ + +#define eieio() \ + __asm__ volatile("eieio") + + /* Use sync to ensure previous stores have completed. + This is required when manipulating locks and/or + maintaining PTEs or other shared structures on SMP + machines. + */ + +#define sync() \ + __asm__ volatile("sync") + + /* Use isync to sychronize context; that is, the ensure + no prefetching of instructions happen before the + instruction. + */ + +#define isync() \ + __asm__ volatile("isync") + + +/* + * This guy will make sure all tlbs on all processors finish their tlbies + */ +#define tlbsync() \ + __asm__ volatile("tlbsync") + + + /* Invalidate TLB entry. Caution, requires context synchronization. + */ +extern void tlbie(unsigned int val); + +extern __inline__ void tlbie(unsigned int val) +{ + __asm__ volatile("tlbie %0" : : "r" (val)); + return; +} + + + +/* + * Access to various system registers + */ + +extern unsigned int mflr(void); + +extern __inline__ unsigned int mflr(void) +{ + unsigned int result; + __asm__ volatile("mflr %0" : "=r" (result)); + return result; +} + +extern unsigned int mfpvr(void); + +extern __inline__ unsigned int mfpvr(void) +{ + unsigned int result; + __asm__ ("mfpvr %0" : "=r" (result)); + return result; +} + +/* mtmsr might need syncs etc around it, don't provide simple + * inline macro + */ + +extern unsigned int mfmsr(void); + +extern __inline__ unsigned int mfmsr(void) +{ + unsigned int result; + __asm__ volatile("mfmsr %0" : "=r" (result)); + return result; +} + +/* mtsr and mfsr must be macros since SR must be hardcoded */ + +#if __ELF__ +#define mtsr(SR, REG) \ + __asm__ volatile("sync" __CASMNL__ "mtsr %0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG)); +#define mfsr(REG, SR) \ + __asm__ volatile("mfsr %0, %1" : "=r" (REG) : "i" (SR)); +#else +#define mtsr(SR, REG) \ + __asm__ volatile("sync" __CASMNL__ "mtsr sr%0, %1 " __CASMNL__ "isync" : : "i" (SR), "r" (REG)); + +#define mfsr(REG, SR) \ + __asm__ volatile("mfsr %0, sr%1" : "=r" (REG) : "i" (SR)); +#endif + + +extern void mtsrin(unsigned int val, unsigned int reg); + +extern __inline__ void mtsrin(unsigned int val, unsigned int reg) +{ + __asm__ volatile("sync" __CASMNL__ "mtsrin %0, %1" __CASMNL__ " isync" : : "r" (val), "r" (reg)); + return; +} + +extern unsigned int mfsrin(unsigned int reg); + +extern __inline__ unsigned int mfsrin(unsigned int reg) +{ + unsigned int result; + __asm__ volatile("mfsrin %0, %1" : "=r" (result) : "r" (reg)); + return result; +} + +extern void mtsdr1(unsigned int val); + +extern __inline__ void mtsdr1(unsigned int val) +{ + __asm__ volatile("mtsdr1 %0" : : "r" (val)); + return; +} + +extern void mtdar(unsigned int val); + +extern __inline__ void mtdar(unsigned int val) +{ + __asm__ volatile("mtdar %0" : : "r" (val)); + return; +} + +extern unsigned int mfdar(void); + +extern __inline__ unsigned int mfdar(void) +{ + unsigned int result; + __asm__ volatile("mfdar %0" : "=r" (result)); + return result; +} + +extern void mtdec(unsigned int val); + +extern __inline__ void mtdec(unsigned int val) +{ + __asm__ volatile("mtdec %0" : : "r" (val)); + return; +} + +extern int isync_mfdec(void); + +extern __inline__ int isync_mfdec(void) +{ + int result; + __asm__ volatile("isync" __CASMNL__ "mfdec %0" : "=r" (result)); + return result; +} + +/* Read and write the value from the real-time clock + * or time base registers. Note that you have to + * use the right ones depending upon being on + * 601 or 603/604. Care about carries between + * the words and using the right registers must be + * done by the calling function. + */ + +extern void mttb(unsigned int val); + +extern __inline__ void mttb(unsigned int val) +{ + __asm__ volatile("mtspr tbl, %0" : : "r" (val)); + return; +} + +extern unsigned int mftb(void); + +extern __inline__ unsigned int mftb(void) +{ + unsigned int result; + __asm__ volatile("mftb %0" : "=r" (result)); + return result; +} + +extern void mttbu(unsigned int val); + +extern __inline__ void mttbu(unsigned int val) +{ + __asm__ volatile("mtspr tbu, %0" : : "r" (val)); + return; +} + +extern unsigned int mftbu(void); + +extern __inline__ unsigned int mftbu(void) +{ + unsigned int result; + __asm__ volatile("mftbu %0" : "=r" (result)); + return result; +} + +extern void mtrtcl(unsigned int val); + +extern __inline__ void mtrtcl(unsigned int val) +{ + __asm__ volatile("mtspr 21,%0" : : "r" (val)); + return; +} + +extern unsigned int mfrtcl(void); + +extern __inline__ unsigned int mfrtcl(void) +{ + unsigned int result; + __asm__ volatile("mfspr %0,5" : "=r" (result)); + return result; +} + +extern void mtrtcu(unsigned int val); + +extern __inline__ void mtrtcu(unsigned int val) +{ + __asm__ volatile("mtspr 20,%0" : : "r" (val)); + return; +} + +extern unsigned int mfrtcu(void); + +extern __inline__ unsigned int mfrtcu(void) +{ + unsigned int result; + __asm__ volatile("mfspr %0,4" : "=r" (result)); + return result; +} + +extern void mtl2cr(unsigned int val); + +extern __inline__ void mtl2cr(unsigned int val) +{ + __asm__ volatile("mtspr l2cr, %0" : : "r" (val)); + return; +} + +extern unsigned int mfl2cr(void); + +extern __inline__ unsigned int mfl2cr(void) +{ + unsigned int result; + __asm__ volatile("mfspr %0, l2cr" : "=r" (result)); + return result; +} + +extern unsigned int cntlzw(unsigned int num); + +extern __inline__ unsigned int cntlzw(unsigned int num) +{ + unsigned int result; + __asm__ volatile("cntlzw %0, %1" : "=r" (result) : "r" (num)); + return result; +} + + +/* functions for doing byte reversed loads and stores */ + +extern unsigned int lwbrx(unsigned int addr); + +extern __inline__ unsigned int lwbrx(unsigned int addr) +{ + unsigned int result; + __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (addr)); + return result; +} + +extern void stwbrx(unsigned int data, unsigned int addr); + +extern __inline__ void stwbrx(unsigned int data, unsigned int addr) +{ + __asm__ volatile("stwbrx %0, 0, %1" : : "r" (data), "r" (addr)); +} + +/* Performance Monitor Register access routines */ +extern unsigned long mfmmcr0(void); +extern void mtmmcr0(unsigned long); +extern unsigned long mfmmcr1(void); +extern void mtmmcr1(unsigned long); +extern unsigned long mfmmcr2(void); +extern void mtmmcr2(unsigned long); +extern unsigned long mfpmc1(void); +extern void mtpmc1(unsigned long); +extern unsigned long mfpmc2(void); +extern void mtpmc2(unsigned long); +extern unsigned long mfpmc3(void); +extern void mtpmc3(unsigned long); +extern unsigned long mfpmc4(void); +extern void mtpmc4(unsigned long); +extern unsigned long mfsia(void); +extern unsigned long mfsda(void); + +/* macros since the argument n is a hard-coded constant */ + +#define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg)) +#define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg)) + +#define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg)) +#define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg)) + +#define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg)) +#define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg)) + +#define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg)) +#define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg)) + +#define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg)) +#define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg)) + +#define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val)) +#define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg)) + +#endif /* __GNUC__ */ +#endif /* !ASSEMBLER */ + +#endif /* _PPC_PROC_REG_H_ */ diff --git a/osfmk/ppc/rtclock.c b/osfmk/ppc/rtclock.c new file mode 100644 index 000000000..9ccae5ae7 --- /dev/null +++ b/osfmk/ppc/rtclock.c @@ -0,0 +1,1082 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * File: rtclock.c + * Purpose: Routines for handling the machine dependent + * real-time clock. + */ + +#include + +#include +#include +#include +#include + +#include /* HZ */ +#include + +#include + +/*XXX power management hacks XXX*/ +#include +#include + +extern void *registerSleepWakeInterest( + void *callback, + void *target, + void *refCon); +/*XXX power management hacks XXX*/ + +#include + +int sysclk_config(void); + +int sysclk_init(void); + +kern_return_t sysclk_gettime( + mach_timespec_t *cur_time); + +kern_return_t sysclk_getattr( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); + +void sysclk_setalarm( + mach_timespec_t *deadline); + +struct clock_ops sysclk_ops = { + sysclk_config, sysclk_init, + sysclk_gettime, 0, + sysclk_getattr, 0, + sysclk_setalarm, +}; + +int calend_config(void); + +int calend_init(void); + +kern_return_t calend_gettime( + mach_timespec_t *cur_time); + +kern_return_t calend_settime( + mach_timespec_t *cur_time); + +kern_return_t calend_getattr( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); + +struct clock_ops calend_ops = { + calend_config, calend_init, + calend_gettime, calend_settime, + calend_getattr, 0, + 0, +}; + +/* local data declarations */ + +static struct rtclock { + mach_timespec_t calend_offset; + boolean_t calend_is_set; + + mach_timebase_info_data_t timebase_const; + + struct rtclock_timer { + AbsoluteTime deadline; + boolean_t is_set; + } timer[NCPUS]; + + clock_timer_func_t timer_expire; + + timer_call_data_t alarm[NCPUS]; + + /* debugging */ + AbsoluteTime last_abstime[NCPUS]; + int last_decr[NCPUS]; + + decl_simple_lock_data(,lock) /* real-time clock device lock */ +} rtclock; + +static boolean_t rtclock_initialized; + +static AbsoluteTime rtclock_tick_deadline[NCPUS]; +static AbsoluteTime rtclock_tick_interval; + +static void timespec_to_absolutetime( + mach_timespec_t timespec, + AbsoluteTime *result); + +static int deadline_to_decrementer( + AbsoluteTime deadline, + AbsoluteTime now); + +static void rtclock_alarm_timer( + timer_call_param_t p0, + timer_call_param_t p1); + +/* global data declarations */ + +#define RTC_TICKPERIOD (NSEC_PER_SEC / HZ) + +#define DECREMENTER_MAX 0x7FFFFFFFUL +#define DECREMENTER_MIN 0xAUL + +natural_t rtclock_decrementer_min; + +/* + * Macros to lock/unlock real-time clock device. + */ +#define LOCK_RTC(s) \ +MACRO_BEGIN \ + (s) = splclock(); \ + simple_lock(&rtclock.lock); \ +MACRO_END + +#define UNLOCK_RTC(s) \ +MACRO_BEGIN \ + simple_unlock(&rtclock.lock); \ + splx(s); \ +MACRO_END + +static void +timebase_callback( + struct timebase_freq_t *freq) +{ + natural_t numer, denom; + int n; + spl_t s; + + denom = freq->timebase_num; + n = 9; + while (!(denom % 10)) { + if (n < 1) + break; + denom /= 10; + n--; + } + + numer = freq->timebase_den; + while (n-- > 0) { + numer *= 10; + } + + LOCK_RTC(s); + rtclock.timebase_const.numer = numer; + rtclock.timebase_const.denom = denom; + UNLOCK_RTC(s); +} + +/* + * Configure the real-time clock device. + */ +int +sysclk_config(void) +{ + int i; + + if (cpu_number() != master_cpu) + return(1); + + for (i = 0; i < NCPUS; i++) + timer_call_setup(&rtclock.alarm[i], rtclock_alarm_timer, NULL); + + simple_lock_init(&rtclock.lock, ETAP_MISC_RT_CLOCK); + + PE_register_timebase_callback(timebase_callback); + + return (1); +} + +/* + * Initialize the system clock device. + */ +int +sysclk_init(void) +{ + AbsoluteTime abstime; + int decr, mycpu = cpu_number(); + + if (mycpu != master_cpu) { + if (rtclock_initialized == FALSE) { + panic("sysclk_init on cpu %d, rtc not initialized\n", mycpu); + } + /* Set decrementer and hence our next tick due */ + clock_get_uptime(&abstime); + rtclock_tick_deadline[mycpu] = abstime; + ADD_ABSOLUTETIME(&rtclock_tick_deadline[mycpu], + &rtclock_tick_interval); + decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); + mtdec(decr); + rtclock.last_decr[mycpu] = decr; + + return(1); + } + + /* + * Initialize non-zero clock structure values. + */ + clock_interval_to_absolutetime_interval(RTC_TICKPERIOD, 1, + &rtclock_tick_interval); + /* Set decrementer and our next tick due */ + clock_get_uptime(&abstime); + rtclock_tick_deadline[mycpu] = abstime; + ADD_ABSOLUTETIME(&rtclock_tick_deadline[mycpu], &rtclock_tick_interval); + decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); + mtdec(decr); + rtclock.last_decr[mycpu] = decr; + + rtclock_initialized = TRUE; + + return (1); +} + +/* + * Perform a full 64 bit by 32 bit unsigned multiply, + * yielding a 96 bit product. The most significant + * portion of the product is returned as a 64 bit + * quantity, with the lower portion as a 32 bit word. + */ +static void +umul_64by32( + AbsoluteTime now64, + natural_t mult32, + AbsoluteTime *result64, + natural_t *result32) +{ + natural_t mid, mid2; + + asm volatile(" mullw %0,%1,%2" : + "=r" (*result32) : + "r" (now64.lo), "r" (mult32)); + + asm volatile(" mullw %0,%1,%2" : + "=r" (mid2) : + "r" (now64.hi), "r" (mult32)); + asm volatile(" mulhwu %0,%1,%2" : + "=r" (mid) : + "r" (now64.lo), "r" (mult32)); + + asm volatile(" mulhwu %0,%1,%2" : + "=r" (result64->hi) : + "r" (now64.hi), "r" (mult32)); + + asm volatile(" addc %0,%2,%3; + addze %1,%4" : + "=r" (result64->lo), "=r" (result64->hi) : + "r" (mid), "r" (mid2), "1" (result64->hi)); +} + +/* + * Perform a partial 64 bit by 32 bit unsigned multiply, + * yielding a 64 bit product. Only the least significant + * 64 bits of the product are calculated and returned. + */ +static void +umul_64by32to64( + AbsoluteTime now64, + natural_t mult32, + AbsoluteTime *result64) +{ + natural_t mid, mid2; + + asm volatile(" mullw %0,%1,%2" : + "=r" (result64->lo) : + "r" (now64.lo), "r" (mult32)); + + asm volatile(" mullw %0,%1,%2" : + "=r" (mid2) : + "r" (now64.hi), "r" (mult32)); + asm volatile(" mulhwu %0,%1,%2" : + "=r" (mid) : + "r" (now64.lo), "r" (mult32)); + + asm volatile(" add %0,%1,%2" : + "=r" (result64->hi) : + "r" (mid), "r" (mid2)); +} + +/* + * Perform an unsigned division of a 96 bit value + * by a 32 bit value, yielding a 96 bit quotient. + * The most significant portion of the product is + * returned as a 64 bit quantity, with the lower + * portion as a 32 bit word. + */ +static __inline__ +void +udiv_96by32( + AbsoluteTime now64, + natural_t now32, + natural_t div32, + AbsoluteTime *result64, + natural_t *result32) +{ + AbsoluteTime t64; + + if (now64.hi > 0 || now64.lo >= div32) { + AbsoluteTime_to_scalar(result64) = + AbsoluteTime_to_scalar(&now64) / div32; + + umul_64by32to64(*result64, div32, &t64); + + AbsoluteTime_to_scalar(&t64) = + AbsoluteTime_to_scalar(&now64) - AbsoluteTime_to_scalar(&t64); + + *result32 = (((unsigned long long)t64.lo << 32) | now32) / div32; + } + else { + AbsoluteTime_to_scalar(result64) = + (((unsigned long long)now64.lo << 32) | now32) / div32; + + *result32 = result64->lo; + result64->lo = result64->hi; + result64->hi = 0; + } +} + +/* + * Perform an unsigned division of a 96 bit value + * by a 32 bit value, yielding a 64 bit quotient. + * Any higher order bits of the quotient are simply + * discarded. + */ +static __inline__ +void +udiv_96by32to64( + AbsoluteTime now64, + natural_t now32, + natural_t div32, + AbsoluteTime *result64) +{ + AbsoluteTime t64; + + if (now64.hi > 0 || now64.lo >= div32) { + AbsoluteTime_to_scalar(result64) = + AbsoluteTime_to_scalar(&now64) / div32; + + umul_64by32to64(*result64, div32, &t64); + + AbsoluteTime_to_scalar(&t64) = + AbsoluteTime_to_scalar(&now64) - AbsoluteTime_to_scalar(&t64); + + result64->hi = result64->lo; + result64->lo = (((unsigned long long)t64.lo << 32) | now32) / div32; + } + else { + AbsoluteTime_to_scalar(result64) = + (((unsigned long long)now64.lo << 32) | now32) / div32; + } +} + +/* + * Perform an unsigned division of a 96 bit value + * by a 32 bit value, yielding a 32 bit quotient, + * and a 32 bit remainder. Any higher order bits + * of the quotient are simply discarded. + */ +static __inline__ +void +udiv_96by32to32and32( + AbsoluteTime now64, + natural_t now32, + natural_t div32, + natural_t *result32, + natural_t *remain32) +{ + AbsoluteTime t64, u64; + + if (now64.hi > 0 || now64.lo >= div32) { + AbsoluteTime_to_scalar(&t64) = + AbsoluteTime_to_scalar(&now64) / div32; + + umul_64by32to64(t64, div32, &t64); + + AbsoluteTime_to_scalar(&t64) = + AbsoluteTime_to_scalar(&now64) - AbsoluteTime_to_scalar(&t64); + + AbsoluteTime_to_scalar(&t64) = + ((unsigned long long)t64.lo << 32) | now32; + + AbsoluteTime_to_scalar(&u64) = + AbsoluteTime_to_scalar(&t64) / div32; + + *result32 = u64.lo; + + umul_64by32to64(u64, div32, &u64); + + *remain32 = AbsoluteTime_to_scalar(&t64) - + AbsoluteTime_to_scalar(&u64); + } + else { + AbsoluteTime_to_scalar(&t64) = + ((unsigned long long)now64.lo << 32) | now32; + + AbsoluteTime_to_scalar(&u64) = + AbsoluteTime_to_scalar(&t64) / div32; + + *result32 = u64.lo; + + umul_64by32to64(u64, div32, &u64); + + *remain32 = AbsoluteTime_to_scalar(&t64) - + AbsoluteTime_to_scalar(&u64); + } +} + +/* + * Get the clock device time. This routine is responsible + * for converting the device's machine dependent time value + * into a canonical mach_timespec_t value. + * + * SMP configurations - *this currently assumes that the processor + * clocks will be synchronised* + */ +kern_return_t +sysclk_gettime_internal( + mach_timespec_t *time) /* OUT */ +{ + AbsoluteTime now; + AbsoluteTime t64; + natural_t t32; + natural_t numer, denom; + + numer = rtclock.timebase_const.numer; + denom = rtclock.timebase_const.denom; + + clock_get_uptime(&now); + + umul_64by32(now, numer, &t64, &t32); + + udiv_96by32(t64, t32, denom, &t64, &t32); + + udiv_96by32to32and32(t64, t32, NSEC_PER_SEC, + &time->tv_sec, &time->tv_nsec); + + return (KERN_SUCCESS); +} + +kern_return_t +sysclk_gettime( + mach_timespec_t *time) /* OUT */ +{ + AbsoluteTime now; + AbsoluteTime t64; + natural_t t32; + natural_t numer, denom; + spl_t s; + + LOCK_RTC(s); + numer = rtclock.timebase_const.numer; + denom = rtclock.timebase_const.denom; + UNLOCK_RTC(s); + + clock_get_uptime(&now); + + umul_64by32(now, numer, &t64, &t32); + + udiv_96by32(t64, t32, denom, &t64, &t32); + + udiv_96by32to32and32(t64, t32, NSEC_PER_SEC, + &time->tv_sec, &time->tv_nsec); + + return (KERN_SUCCESS); +} + +/* + * Get clock device attributes. + */ +kern_return_t +sysclk_getattr( + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + spl_t s; + + if (*count != 1) + return (KERN_FAILURE); + switch (flavor) { + + case CLOCK_GET_TIME_RES: /* >0 res */ + case CLOCK_ALARM_CURRES: /* =0 no alarm */ + case CLOCK_ALARM_MINRES: + case CLOCK_ALARM_MAXRES: + LOCK_RTC(s); + *(clock_res_t *) attr = RTC_TICKPERIOD; + UNLOCK_RTC(s); + break; + + default: + return (KERN_INVALID_VALUE); + } + return (KERN_SUCCESS); +} + +/* + * Set deadline for the next alarm on the clock device. This call + * always resets the time to deliver an alarm for the clock. + */ +void +sysclk_setalarm( + mach_timespec_t *deadline) +{ + AbsoluteTime abstime; + + timespec_to_absolutetime(*deadline, &abstime); + timer_call_enter(&rtclock.alarm[cpu_number()], abstime); +} + +/* + * Configure the calendar clock. + */ +int +calend_config(void) +{ + return (1); +} + +/* + * Initialize the calendar clock. + */ +int +calend_init(void) +{ + if (cpu_number() != master_cpu) + return(1); + + return (1); +} + +/* + * Get the current clock time. + */ +kern_return_t +calend_gettime( + mach_timespec_t *curr_time) /* OUT */ +{ + spl_t s; + + LOCK_RTC(s); + if (!rtclock.calend_is_set) { + UNLOCK_RTC(s); + return (KERN_FAILURE); + } + + (void) sysclk_gettime_internal(curr_time); + ADD_MACH_TIMESPEC(curr_time, &rtclock.calend_offset); + UNLOCK_RTC(s); + + return (KERN_SUCCESS); +} + +/* + * Set the current clock time. + */ +kern_return_t +calend_settime( + mach_timespec_t *new_time) +{ + mach_timespec_t curr_time; + spl_t s; + + LOCK_RTC(s); + (void) sysclk_gettime_internal(&curr_time); + rtclock.calend_offset = *new_time; + SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); + rtclock.calend_is_set = TRUE; + UNLOCK_RTC(s); + + PESetGMTTimeOfDay(new_time->tv_sec); + + return (KERN_SUCCESS); +} + +/* + * Get clock device attributes. + */ +kern_return_t +calend_getattr( + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + spl_t s; + + if (*count != 1) + return (KERN_FAILURE); + switch (flavor) { + + case CLOCK_GET_TIME_RES: /* >0 res */ + LOCK_RTC(s); + *(clock_res_t *) attr = RTC_TICKPERIOD; + UNLOCK_RTC(s); + break; + + case CLOCK_ALARM_CURRES: /* =0 no alarm */ + case CLOCK_ALARM_MINRES: + case CLOCK_ALARM_MAXRES: + *(clock_res_t *) attr = 0; + break; + + default: + return (KERN_INVALID_VALUE); + } + return (KERN_SUCCESS); +} + +void +clock_adjust_calendar( + clock_res_t nsec) +{ + spl_t s; + + LOCK_RTC(s); + if (rtclock.calend_is_set) + ADD_MACH_TIMESPEC_NSEC(&rtclock.calend_offset, nsec); + UNLOCK_RTC(s); +} + +static void +calend_setup_internal( + long seconds) +{ + mach_timespec_t curr_time; + + (void) sysclk_gettime_internal(&curr_time); + if (curr_time.tv_nsec < 500*USEC_PER_SEC) + rtclock.calend_offset.tv_sec = seconds; + else + rtclock.calend_offset.tv_sec = seconds + 1; + rtclock.calend_offset.tv_nsec = 0; + SUB_MACH_TIMESPEC(&rtclock.calend_offset, &curr_time); + rtclock.calend_is_set = TRUE; +} + +static thread_call_t calend_wakeup_call; +static thread_call_data_t calend_wakeup_call_data; + +static void +calend_wakeup_resynch( + thread_call_param_t p0, + thread_call_param_t p1) +{ + long seconds = PEGetGMTTimeOfDay(); + spl_t s; + + LOCK_RTC(s); + calend_setup_internal(seconds); + UNLOCK_RTC(s); +} + +static IOReturn +calend_sleep_wake_notif( + void *target, + void *refCon, + UInt32 messageType, + void *provider, + void *messageArg, + vm_size_t argSize) +{ + if (messageType != kIOMessageSystemHasPoweredOn) + return (kIOReturnUnsupported); + + if (calend_wakeup_call != NULL) + thread_call_enter(calend_wakeup_call); + + return (kIOReturnSuccess); +} + +void +clock_initialize_calendar(void) +{ + long seconds; + spl_t s; + + thread_call_setup(&calend_wakeup_call_data, calend_wakeup_resynch, NULL); + calend_wakeup_call = &calend_wakeup_call_data; + + registerSleepWakeInterest(calend_sleep_wake_notif, NULL, NULL); + + seconds = PEGetGMTTimeOfDay(); + + LOCK_RTC(s); + if (!rtclock.calend_is_set) + calend_setup_internal(seconds); + UNLOCK_RTC(s); +} + +mach_timespec_t +clock_get_calendar_offset(void) +{ + mach_timespec_t result = MACH_TIMESPEC_ZERO; + spl_t s; + + LOCK_RTC(s); + if (rtclock.calend_is_set) + result = rtclock.calend_offset; + UNLOCK_RTC(s); + + return (result); +} + +void +clock_timebase_info( + mach_timebase_info_t info) +{ + spl_t s; + + LOCK_RTC(s); + *info = rtclock.timebase_const; + UNLOCK_RTC(s); +} + +void +clock_set_timer_deadline( + AbsoluteTime deadline) +{ + AbsoluteTime abstime; + int decr, mycpu; + struct rtclock_timer *mytimer; + spl_t s; + + s = splclock(); + mycpu = cpu_number(); + mytimer = &rtclock.timer[mycpu]; + clock_get_uptime(&abstime); + rtclock.last_abstime[mycpu] = abstime; + mytimer->deadline = deadline; + mytimer->is_set = TRUE; + if ( CMP_ABSOLUTETIME(&mytimer->deadline, + &rtclock_tick_deadline[mycpu]) < 0) { + decr = deadline_to_decrementer(mytimer->deadline, abstime); + if ( rtclock_decrementer_min != 0 && + rtclock_decrementer_min < (natural_t)decr ) + decr = rtclock_decrementer_min; + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) + | DBG_FUNC_NONE, decr, 2, 0, 0, 0); + + mtdec(decr); + rtclock.last_decr[mycpu] = decr; + } + splx(s); +} + +void +clock_set_timer_func( + clock_timer_func_t func) +{ + spl_t s; + + LOCK_RTC(s); + if (rtclock.timer_expire == NULL) + rtclock.timer_expire = func; + UNLOCK_RTC(s); +} + +/* + * Reset the clock device. This causes the realtime clock + * device to reload its mode and count value (frequency). + */ +void +rtclock_reset(void) +{ + return; +} + +/* + * Real-time clock device interrupt. + */ +void +rtclock_intr( + int device, + struct ppc_saved_state *ssp, + spl_t old_spl) +{ + AbsoluteTime abstime; + int decr[3], mycpu = cpu_number(); + struct rtclock_timer *mytimer = &rtclock.timer[mycpu]; + + /* + * We may receive interrupts too early, we must reject them. + */ + if (rtclock_initialized == FALSE) { + mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */ + return; + } + + decr[1] = decr[2] = DECREMENTER_MAX; + + clock_get_uptime(&abstime); + rtclock.last_abstime[mycpu] = abstime; + if (CMP_ABSOLUTETIME(&rtclock_tick_deadline[mycpu], &abstime) <= 0) { + clock_deadline_for_periodic_event(rtclock_tick_interval, abstime, + &rtclock_tick_deadline[mycpu]); + hertz_tick(USER_MODE(ssp->srr1), ssp->srr0); + } + + clock_get_uptime(&abstime); + rtclock.last_abstime[mycpu] = abstime; + if (mytimer->is_set && + CMP_ABSOLUTETIME(&mytimer->deadline, &abstime) <= 0) { + mytimer->is_set = FALSE; + (*rtclock.timer_expire)(abstime); + } + + clock_get_uptime(&abstime); + rtclock.last_abstime[mycpu] = abstime; + decr[1] = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime); + + if (mytimer->is_set) + decr[2] = deadline_to_decrementer(mytimer->deadline, abstime); + + if (decr[1] > decr[2]) + decr[1] = decr[2]; + + if ( rtclock_decrementer_min != 0 && + rtclock_decrementer_min < (natural_t)decr[1] ) + decr[1] = rtclock_decrementer_min; + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) + | DBG_FUNC_NONE, decr[1], 3, 0, 0, 0); + + mtdec(decr[1]); + rtclock.last_decr[mycpu] = decr[1]; +} + +static void +rtclock_alarm_timer( + timer_call_param_t p0, + timer_call_param_t p1) +{ + mach_timespec_t timestamp; + + (void) sysclk_gettime(×tamp); + + clock_alarm_intr(SYSTEM_CLOCK, ×tamp); +} + +void +clock_get_uptime( + AbsoluteTime *result) +{ + natural_t hi, lo, hic; + + do { + asm volatile(" mftbu %0" : "=r" (hi)); + asm volatile(" mftb %0" : "=r" (lo)); + asm volatile(" mftbu %0" : "=r" (hic)); + } while (hic != hi); + + result->lo = lo; + result->hi = hi; +} + +static int +deadline_to_decrementer( + AbsoluteTime deadline, + AbsoluteTime now) +{ + uint64_t delt; + + if (CMP_ABSOLUTETIME(&deadline, &now) <= 0) + return DECREMENTER_MIN; + else { + delt = AbsoluteTime_to_scalar(&deadline) - + AbsoluteTime_to_scalar(&now); + return (delt >= (DECREMENTER_MAX + 1))? DECREMENTER_MAX: + ((delt >= (DECREMENTER_MIN + 1))? (delt - 1): DECREMENTER_MIN); + } +} + +static void +timespec_to_absolutetime( + mach_timespec_t timespec, + AbsoluteTime *result) +{ + AbsoluteTime t64; + natural_t t32; + natural_t numer, denom; + spl_t s; + + LOCK_RTC(s); + numer = rtclock.timebase_const.numer; + denom = rtclock.timebase_const.denom; + UNLOCK_RTC(s); + + asm volatile(" mullw %0,%1,%2" : + "=r" (t64.lo) : + "r" (timespec.tv_sec), "r" (NSEC_PER_SEC)); + + asm volatile(" mulhwu %0,%1,%2" : + "=r" (t64.hi) : + "r" (timespec.tv_sec), "r" (NSEC_PER_SEC)); + + AbsoluteTime_to_scalar(&t64) += timespec.tv_nsec; + + umul_64by32(t64, denom, &t64, &t32); + + udiv_96by32(t64, t32, numer, &t64, &t32); + + result->hi = t64.lo; + result->lo = t32; +} + +void +clock_interval_to_deadline( + natural_t interval, + natural_t scale_factor, + AbsoluteTime *result) +{ + AbsoluteTime abstime; + + clock_get_uptime(result); + + clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); + + ADD_ABSOLUTETIME(result, &abstime); +} + +void +clock_interval_to_absolutetime_interval( + natural_t interval, + natural_t scale_factor, + AbsoluteTime *result) +{ + AbsoluteTime t64; + natural_t t32; + natural_t numer, denom; + spl_t s; + + LOCK_RTC(s); + numer = rtclock.timebase_const.numer; + denom = rtclock.timebase_const.denom; + UNLOCK_RTC(s); + + asm volatile(" mullw %0,%1,%2" : + "=r" (t64.lo) : + "r" (interval), "r" (scale_factor)); + asm volatile(" mulhwu %0,%1,%2" : + "=r" (t64.hi) : + "r" (interval), "r" (scale_factor)); + + umul_64by32(t64, denom, &t64, &t32); + + udiv_96by32(t64, t32, numer, &t64, &t32); + + result->hi = t64.lo; + result->lo = t32; +} + +void +clock_absolutetime_interval_to_deadline( + AbsoluteTime abstime, + AbsoluteTime *result) +{ + clock_get_uptime(result); + + ADD_ABSOLUTETIME(result, &abstime); +} + +void +absolutetime_to_nanoseconds( + AbsoluteTime abstime, + UInt64 *result) +{ + AbsoluteTime t64; + natural_t t32; + natural_t numer, denom; + spl_t s; + + LOCK_RTC(s); + numer = rtclock.timebase_const.numer; + denom = rtclock.timebase_const.denom; + UNLOCK_RTC(s); + + umul_64by32(abstime, numer, &t64, &t32); + + udiv_96by32to64(t64, t32, denom, (void *)result); +} + +void +nanoseconds_to_absolutetime( + UInt64 nanoseconds, + AbsoluteTime *result) +{ + AbsoluteTime t64; + natural_t t32; + natural_t numer, denom; + spl_t s; + + LOCK_RTC(s); + numer = rtclock.timebase_const.numer; + denom = rtclock.timebase_const.denom; + UNLOCK_RTC(s); + + AbsoluteTime_to_scalar(&t64) = nanoseconds; + + umul_64by32(t64, denom, &t64, &t32); + + udiv_96by32to64(t64, t32, numer, result); +} + +/* + * Spin-loop delay primitives. + */ +void +delay_for_interval( + natural_t interval, + natural_t scale_factor) +{ + AbsoluteTime now, end; + + clock_interval_to_deadline(interval, scale_factor, &end); + + do { + clock_get_uptime(&now); + } while (CMP_ABSOLUTETIME(&now, &end) < 0); +} + +void +clock_delay_until( + AbsoluteTime deadline) +{ + AbsoluteTime now; + + do { + clock_get_uptime(&now); + } while (CMP_ABSOLUTETIME(&now, &deadline) < 0); +} + +void +delay( + int usec) +{ + delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC); +} diff --git a/osfmk/ppc/savearea.c b/osfmk/ppc/savearea.c new file mode 100644 index 000000000..e96dd0246 --- /dev/null +++ b/osfmk/ppc/savearea.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * This file is used to maintain the exception save areas + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +extern struct Saveanchor saveanchor; /* Aliged savearea anchor */ +unsigned int debsave0 = 0; /* Debug flag */ +unsigned int backchain = 0; /* Debug flag */ + +/* + * These routines keep track of exception save areas and keeps the count within specific limits. If there are + * too few, more are allocated, too many, and they are released. This savearea is where the PCBs are + * stored. They never span a page boundary and are referenced by both virtual and real addresses. + * Within the interrupt vectors, the real address is used because at that level, no exceptions + * can be tolerated. Save areas can be dynamic or permanent. Permanant saveareas are allocated + * at boot time and must be in place before any type of exception occurs. These are never released, + * and the number is based upon some arbitrary (yet to be determined) amount times the number of + * processors. This represents the minimum number required to process a total system failure without + * destroying valuable and ever-so-handy system debugging information. + * + * + */ + +/* + * This routine allocates a save area. It checks if enough are available. + * If not, it allocates upward to the target free count. + * Then, it allocates one and returns it. + */ + + + +struct savearea *save_alloc(void) { /* Reserve a save area */ + + kern_return_t retr; + savectl *sctl; /* Previous and current save pages */ + vm_offset_t vaddr, paddr; + struct savearea *newbaby; + + if(saveanchor.savecount <= (saveanchor.saveneed - saveanchor.saveneghyst)) { /* Start allocating if we drop too far */ + while(saveanchor.savecount < saveanchor.saveneed) { /* Keep adding until the adjustment is done */ + + + retr = kmem_alloc_wired(kernel_map, &vaddr, PAGE_SIZE); /* Find a virtual address to use */ + + if(retr != KERN_SUCCESS) { /* Did we get some memory? */ + panic("Whoops... Not a bit of wired memory left for saveareas\n"); + } + + paddr = pmap_extract(kernel_pmap, vaddr); /* Get the physical */ + + bzero((void *)vaddr, PAGE_SIZE); /* Clear it all to zeros */ + sctl = (savectl *)(vaddr+PAGE_SIZE-sizeof(savectl)); /* Point to the control area of the new page */ + sctl->sac_alloc = sac_empty; /* Mark all entries free */ + sctl->sac_vrswap = (unsigned int)vaddr ^ (unsigned int)paddr; /* Form mask to convert V to R and vice versa */ + + sctl->sac_flags |= 0x0000EE00; /* (TEST/DEBUG) */ + + if(!save_queue(paddr)) { /* Add the new ones to the free savearea list */ + panic("Arrgghhhh, time out trying to lock the savearea anchor during upward adjustment\n"); + } + } + } + if (saveanchor.savecount > saveanchor.savemaxcount) + saveanchor.savemaxcount = saveanchor.savecount; + + newbaby = save_get(); /* Get a savearea and return it */ + if(!((unsigned int)newbaby & 0xFFFFF000)) { /* Whoa... None left??? No, way, no can do... */ + panic("No saveareas?!?!?! No way! Can't happen! Nuh-uh... I'm dead, done for, kaput...\n"); + } + + return newbaby; /* Bye-bye baby... */ + +} + + +/* + * This routine releases a save area to the free queue. If after that, we have more than our maximum target, + * we start releasing what we can until we hit the normal target. + */ + + + +void save_release(struct savearea *save) { /* Release a save area */ + + savectl *csave; /* The just released savearea block */ + + save_ret(save); /* Return a savearea to the free list */ + + if(saveanchor.savecount > (saveanchor.saveneed + saveanchor.saveposhyst)) { /* Start releasing if we have to many */ + csave = (savectl *)42; /* Start with some nonzero garbage */ + while((unsigned int)csave && (saveanchor.savecount > saveanchor.saveneed)) { /* Keep removing until the adjustment is done */ + + csave = save_dequeue(); /* Find and dequeue one that is all empty */ + + if((unsigned int)csave & 1) { /* Did we timeout trying to get the lock? */ + panic("Arrgghhhh, time out trying to lock the savearea anchor during downward adjustment\n"); + return; + } + + if((unsigned int)csave) kmem_free(kernel_map, (vm_offset_t) csave, PAGE_SIZE); /* Release the page if we found one */ + } + } + return; + +} + + +save_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, + vm_size_t *alloc_size, int *collectable, int *exhaustable) +{ + *count = saveanchor.saveinuse; + *cur_size = saveanchor.savecount * (PAGE_SIZE / 2); + *max_size = saveanchor.savemaxcount * (PAGE_SIZE / 2); + *elem_size = PAGE_SIZE / 2; + *alloc_size = PAGE_SIZE; + *collectable = 1; + *exhaustable = 0; +} + + + +/* + * This routine prints the free savearea block chain for debugging. + */ + + + +void save_free_dump(void) { /* Dump the free chain */ + + unsigned int *dsv, omsr; + savectl *dsc; + + dsv = save_deb(&omsr); /* Get the virtual of the first and disable interrupts */ + + while(dsv) { /* Do 'em all */ + dsc=(savectl *)((unsigned int)dsv+4096-sizeof(savectl)); /* Point to the control area */ +// printf("%08X %08X: nxt=%08X; alloc=%08X; flags=%08X\n", dsv, /* Print it all out */ +// ((unsigned int)dsv)^(dsc->sac_vrswap), dsc->sac_next, dsc->sac_alloc, dsc->sac_flags); + dsv=(unsigned int *)(((unsigned int) dsc->sac_next)^(dsc->sac_vrswap)); /* On to the next, virtually */ + + } + __asm__ volatile ("mtmsr %0" : : "r" (omsr)); /* Restore the interruption mask */ + return; +} + +/* + * This routine prints the free savearea block chain for debugging. + */ + + + +void DumpTheSave(struct savearea *save) { /* Dump the free chain */ + + unsigned int *r; + + printf("savearea at %08X\n", save); + printf(" srrs: %08X %08X\n", save->save_srr0, save->save_srr1); + printf(" cr, xer, lr: %08X %08X %08X\n", save->save_cr, save->save_xer, save->save_lr); + printf("ctr, dar, dsisr: %08X %08X %08X\n", save->save_ctr, save->save_dar, save->save_dsisr); + printf(" space, copyin: %08X %08X\n", save->save_space, save->save_sr_copyin); + r=&save->save_r0; + printf(" regs: %08X %08X %08X %08X %08X %08X %08X %08X\n", r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]); + printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]); + printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", r[16], r[17], r[18], r[19], r[20], r[21], r[22], r[23]); + printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", r[24], r[25], r[29], r[27], r[28], r[29], r[30], r[31]); + r=(unsigned int *)&save->save_fp0; + printf(" floats: %08X%08X %08X%08X %08X%08X %08X%08X\n", r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]); + printf(" %08X%08X %08X%08X %08X%08X %08X%08X\n", r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]); + printf(" %08X%08X %08X%08X %08X%08X %08X%08X\n", r[16], r[17], r[18], r[19], r[20], r[21], r[22], r[23]); + printf(" %08X%08X %08X%08X %08X%08X %08X%08X\n", r[24], r[25], r[29], r[27], r[28], r[29], r[30], r[31]); + printf(" %08X%08X %08X%08X %08X%08X %08X%08X\n", r[32], r[33], r[34], r[35], r[36], r[37], r[38], r[39]); + printf(" %08X%08X %08X%08X %08X%08X %08X%08X\n", r[40], r[41], r[42], r[43], r[44], r[45], r[46], r[47]); + printf(" %08X%08X %08X%08X %08X%08X %08X%08X\n", r[48], r[49], r[50], r[51], r[52], r[53], r[54], r[55]); + printf(" %08X%08X %08X%08X %08X%08X %08X%08X\n", r[56], r[57], r[58], r[59], r[60], r[61], r[62], r[63]); + r=&save->save_sr0; + printf(" srs: %08X %08X %08X %08X %08X %08X %08X %08X\n", r[0], r[1], r[2], r[3], r[4], r[5], r[6], r[7]); + printf(" %08X %08X %08X %08X %08X %08X %08X %08X\n", r[8], r[9], r[10], r[11], r[12], r[13], r[14], r[15]); + printf("prev, phys, act: %08X %08X %08X\n", save->save_prev, save->save_phys, save->save_act); + printf(" flags: %08X\n", save->save_flags); + return; +} + + + + +/* + * Dumps out savearea and stack backchains + */ + +void DumpBackChain(struct savearea *save) { /* Prints out back chains */ + + unsigned int *r; + savearea *sv; + + if(!backchain) return; + printf("Proceeding back from savearea at %08X:\n", save); + sv=save; + while(sv) { + printf(" curr=%08X; prev=%08X; stack=%08X\n", sv, sv->save_prev, sv->save_r1); + sv=sv->save_prev; + } + return; +} + + + + diff --git a/osfmk/ppc/savearea.h b/osfmk/ppc/savearea.h new file mode 100644 index 000000000..e98ada3c0 --- /dev/null +++ b/osfmk/ppc/savearea.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PPC_SAVEAREA_H_ +#define _PPC_SAVEAREA_H_ + +#include +#include + +void save_release(struct savearea *save); /* Release a save area */ +struct savectl *save_dequeue(void); /* Find and dequeue one that is all empty */ +unsigned int save_queue(vm_offset_t); /* Add a new savearea block to the free list */ +struct savearea *save_get(void); /* Obtains a savearea from the free list (returns virtual address) */ +struct savearea *save_get_phys(void); /* Obtains a savearea from the free list (returns physical address) */ +struct savearea *save_alloc(void); /* Obtains a savearea and allocates blocks if needed */ +struct savearea *save_cpv(struct savearea *); /* Converts a physical savearea address to virtual */ +unsigned int *save_deb(unsigned int *msr); /* Finds virtual of first free block and disableds interrupts */ +void save_ret(struct savearea *); /* Returns a savearea to the free list */ +void save_free_dump(void); /* Dump the free chain */ +void DumpTheSave(struct savearea *); /* Prints out a savearea */ +void DumpBackChain(struct savearea *save); /* Dumps a backchain */ + +#endif /* _PPC_SAVEAREA_H_ */ + + diff --git a/osfmk/ppc/savearea_asm.s b/osfmk/ppc/savearea_asm.s new file mode 100644 index 000000000..425e41cca --- /dev/null +++ b/osfmk/ppc/savearea_asm.s @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + .text + +/* + * This routine will add a savearea block to the free list. + * Note really well: we can take NO exceptions of any kind, + * including a PTE miss once the savearea lock is held. That's + * a guaranteed deadlock. That means we must disable for interrutions + * and turn all translation off. + * + * Note that the savearea list should NEVER be empty + */ + +ENTRY(save_queue,TAG_NO_FRAME_USED) + + + mfmsr r12 /* Get the MSR */ + lis r10,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ + andi. r11,r12,0x7FCF /* Turn off all translation and 'rupts */ + ori r10,r10,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ + mtmsr r11 /* Make the MSR current */ + + isync + +#if 0 + rlwinm. r3,r3,0,0,19 /* (TEST/DEBUG) */ + bne+ notraceit /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ +notraceit: /* (TEST/DEBUG) */ +#else + rlwinm r3,r3,0,0,19 /* Make sure it's clean and tidy */ +#endif + + lwarx r9,0,r10 ; ? + +sqlck: lwarx r9,0,r10 /* Grab the lock value */ + li r8,1 /* Use part of the delay time */ + mr. r9,r9 /* Is it locked? */ + bne- sqlcks /* Yeah, wait for it to clear... */ + stwcx. r8,0,r10 /* Try to seize that there durn lock */ + beq+ sqlckd /* Got it... */ + b sqlck /* Collision, try again... */ + +sqlcks: lwz r9,SVlock(r10) /* Get that lock in here */ + mr. r9,r9 /* Is it free yet? */ + beq+ sqlck /* Yeah, try for it again... */ + b sqlcks /* Sniff away... */ + +sqlckd: isync /* Make sure translation is off */ + lwz r7,SVfree(r10) /* Get the free save area list anchor */ + lwz r6,SVcount(r10) /* Get the total count of saveareas */ + stw r3,SVfree(r10) /* Queue in the new one */ + addi r6,r6,sac_cnt /* Count the ones we are linking in */ + stw r7,SACnext(r3) /* Queue the old first one off of us */ + li r8,0 /* Get a free lock value */ + stw r6,SVcount(r10) /* Save the new count */ + + sync /* Make sure everything is done */ + stw r8,SVlock(r10) /* Unlock the savearea chain */ + + mtmsr r12 /* Restore interrupts and translation */ + isync /* Dump any speculations */ + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + li r2,0x2201 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + + blr /* Leave... */ + + +/* + * This routine will find and remove an empty savearea block from the free list. + * Note really well: we can take NO exceptions of any kind, + * including a PTE miss once the savearea lock is held. That's + * a guaranteed deadlock. That means we must disable for interrutions + * and turn all translation off. + * + * We pass back the virtual address of the one we just released + * or a zero if none to free. + * + * Note that the savearea list should NEVER be empty + */ + +ENTRY(save_dequeue,TAG_NO_FRAME_USED) + + + mfmsr r12 /* Get the MSR */ + lis r10,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ + andi. r11,r12,0x7FCF /* Turn off all translation and 'rupts */ + ori r10,r10,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ + mtmsr r11 /* Make the MSR current */ + isync /* Make sure translation is off */ + + lwarx r9,0,r10 ; ? + +sdqlck: lwarx r9,0,r10 /* Grab the lock value */ + li r8,1 /* Use part of the delay time */ + mr. r9,r9 /* Is it locked? */ + bne- sdqlcks /* Yeah, wait for it to clear... */ + stwcx. r8,0,r10 /* Try to seize that there durn lock */ + beq+ sdqlckd /* Got it... */ + b sdqlck /* Collision, try again... */ + +sdqlcks: lwz r9,SVlock(r10) /* Get that lock in here */ + mr. r9,r9 /* Is it free yet? */ + beq+ sdqlck /* Yeah, try for it again... */ + b sdqlcks /* Sniff away... */ + + +sdqlckd: lwz r3,SVfree(r10) /* Get the free save area list anchor */ + la r5,SVfree(r10) /* Remember that the we're just starting out */ + lwz r6,SVcount(r10) /* Get the total count of saveareas for later */ + lis r8,sac_empty>>16 /* Get the empty block indication */ + +sdqchk: lwz r4,SACalloc(r3) /* Get the allocation flags */ + lwz r9,SACflags(r3) /* Get the flags */ + lwz r7,SACnext(r3) /* Point on to the next one */ + andis. r9,r9,hi16(sac_perm) /* Is this permanently allocated? */ + cmplw cr1,r4,r8 /* Does this look empty? */ + bne- sdqperm /* It's permanent, can't release... */ + beq- cr1,sdqfnd /* Yeah, empty... */ + +sdqperm: la r5,SACnext(r3) /* Remember the last guy */ + mr. r3,r7 /* Any more left? */ + bne+ sdqchk /* Yeah... */ + b sdqunlk /* Nope, just go unlock and leave... */ + +sdqfnd: subi r6,r6,sac_cnt /* Back off the number of saveareas in here */ + stw r7,0(r5) /* Dequeue our guy */ + lwz r9,SACvrswap(r3) /* Get addressing conversion */ + stw r6,SVcount(r10) /* Back off the count for this block */ + xor r3,r3,r9 /* Flip to virtual addressing */ + +sdqunlk: li r8,0 /* Get a free lock value */ + sync /* Make sure everything is done */ + stw r8,SVlock(r10) /* Unlock the savearea chain */ + + mtmsr r12 /* Restore interrupts and translation */ + isync /* Dump any speculations */ + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + li r2,0x2202 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + + blr /* Leave... */ + + + +/* + * This routine will obtain a savearea from the free list. + * Note really well: we can take NO exceptions of any kind, + * including a PTE miss once the savearea lock is held. That's + * a guaranteed deadlock. That means we must disable for interrutions + * and turn all translation off. + * + * We pass back the virtual address of the one we just obtained + * or a zero if none to allocate. + * + * Note that the savearea list should NEVER be empty + * NOTE!!! NEVER USE R0, R2, or R12 IN HERE THAT WAY WE DON'T NEED A + * STACK FRAME IN FPU_SAVE, FPU_SWITCH, VEC_SAVE, OR VEC_SWITCH. + */ + +ENTRY(save_get_phys,TAG_NO_FRAME_USED) + + cmplw cr1,r1,r1 ; Set CR1_eq to indicate we want physical address + b csaveget ; Join the common... + +ENTRY(save_get,TAG_NO_FRAME_USED) + + cmplwi cr1,r1,0 ; Set CR1_ne to indicate we want virtual address + +csaveget: mfmsr r11 /* Get the MSR */ + lis r10,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ + andi. r8,r11,0x7FCF /* Turn off all translation and 'rupts */ + ori r10,r10,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ + mtmsr r8 /* Make the MSR current */ + isync ; Make sure it is done + + lwarx r9,0,r10 ; ? + +sglck: lwarx r9,0,r10 /* Grab the lock value */ + li r7,1 /* Use part of the delay time */ + mr. r9,r9 /* Is it locked? */ + bne- sglcks /* Yeah, wait for it to clear... */ + stwcx. r7,0,r10 /* Try to seize that there durn lock */ + beq+ sglckd /* Got it... */ + b sglck /* Collision, try again... */ + +sglcks: lwz r9,SVlock(r10) /* Get that lock in here */ + mr. r9,r9 /* Is it free yet? */ + beq+ sglck /* Yeah, try for it again... */ + b sglcks /* Sniff away... */ + +sglckd: isync /* Make sure translation is off */ + lwz r8,SVfree(r10) /* Get the head of the save area list */ + lwz r9,SVinuse(r10) /* Get the inuse field */ + + lwz r7,SACalloc(r8) /* Pick up the allocation bits */ + lwz r5,SACvrswap(r8) /* Get real to virtual translation */ + mr. r7,r7 /* Can we use the first one? */ + blt use1st /* Yeah... */ + + andis. r7,r7,0x8000 /* Show we used the second and remember if it was the last */ + addi r3,r8,0x0800 /* Point to the first one */ + b gotsave /* We have the area now... */ + +use1st: andis. r7,r7,0x4000 /* Mark first gone and remember if empty */ + mr r3,r8 /* Set the save area */ + +gotsave: stw r7,SACalloc(r8) /* Put back the allocation bits */ + bne nodqsave /* There's still an empty slot, don't dequeue... */ + + lwz r4,SACnext(r8) /* Get the next in line */ + stw r4,SVfree(r10) /* Dequeue our now empty save area block */ + +nodqsave: lis r6,HIGH_ADDR(SAVattach) /* Show that it is attached for now */ + li r4,0 /* Clear this for the lock */ + stw r6,SAVflags(r3) /* Set the flags to attached */ + addi r9,r9,1 /* Bump up the inuse count */ + stw r4,SAVprev(r3) /* Make sure that backchain is clear */ + stw r9,SVinuse(r10) /* Set the inuse field */ + sync /* Make sure all stores are done */ + stw r4,SVlock(r10) /* Unlock both save and trace areas */ + mtmsr r11 /* Restore translation and exceptions */ + isync /* Make sure about it */ + +#if 0 + mr r11,r0 /* (TEST/DEBUG) */ + mr r7,r2 /* (TEST/DEBUG) */ + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + li r2,0x2203 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ + mr r0,r11 /* (TEST/DEBUG) */ + mr r2,r7 /* (TEST/DEBUG) */ +#endif + + li r7,0 ; NOTE WELL: we set R7 to zero for vector and float saving code in cswtch.s + beqlr- cr1 ; Return now if we want the physical address + xor r3,r3,r5 /* Get the virtual address */ + blr /* Leave... */ + + +/* + * This routine will return a savearea to the free list. + * Note really well: we can take NO exceptions of any kind, + * including a PTE miss once the savearea lock is held. That's + * a guaranteed deadlock. That means we must disable for interrutions + * and turn all translation off. + * + * We take a virtual address. + * + */ + +ENTRY(save_ret,TAG_NO_FRAME_USED) + +#if 0 + cmplwi r3,0x1000 ; (TEST/DEBUG) + bgt+ notpage0 ; (TEST/DEBUG) + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +notpage0: rlwinm r6,r3,0,0,19 /* (TEST/DEBUG) */ + rlwinm r7,r3,21,31,31 /* (TEST/DEBUG) */ + lis r8,0x8000 /* (TEST/DEBUG) */ + lwz r6,SACalloc(r6) /* (TEST/DEBUG) */ + srw r8,r8,r7 /* (TEST/DEBUG) */ + and. r8,r8,r6 /* (TEST/DEBUG) */ + beq+ nodoublefret /* (TEST/DEBUG) */ + BREAKPOINT_TRAP /* (TEST/DEBUG) */ + +nodoublefret: /* (TEST/DEBUG) */ +#endif + + lwz r7,SAVflags(r3) /* Get the flags */ + rlwinm r6,r3,0,0,19 /* Round back down to the savearea page block */ + andis. r7,r7,HIGH_ADDR(SAVinuse) /* Still in use? */ + mfmsr r12 /* Get the MSR */ + bnelr- /* Still in use, just leave... */ + lwz r5,SACvrswap(r6) /* Get the conversion to real */ + lis r10,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ + andi. r11,r12,0x7FCF /* Turn off all translation and 'rupts */ + ori r10,r10,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ + mtmsr r11 /* Make the MSR current */ + isync /* Make sure translation is off */ + + mfsprg r11,1 /* Get the active save area */ + xor r3,r3,r5 /* Get the real address of the savearea */ + cmplw r11,r3 /* Are we trying to toss the active one? */ + xor r6,r6,r5 /* Make the savearea block real also */ + beq- srbigtimepanic /* This is a no-no... */ + + rlwinm r7,r3,21,31,31 /* Get position of savearea in block */ + lis r8,0x8000 /* Build a bit mask and assume first savearea */ + srw r8,r8,r7 /* Get bit position of do deallocate */ + + lwarx r11,0,r10 ; ? + +srlck: lwarx r11,0,r10 /* Grab the lock value */ + li r7,1 /* Use part of the delay time */ + mr. r11,r11 /* Is it locked? */ + bne- srlcks /* Yeah, wait for it to clear... */ + stwcx. r7,0,r10 /* Try to seize that there durn lock */ + beq+ srlckd /* Got it... */ + b srlck /* Collision, try again... */ + +srlcks: lwz r11,SVlock(r10) /* Get that lock in here */ + mr. r11,r11 /* Is it free yet? */ + beq+ srlck /* Yeah, try for it again... */ + b srlcks /* Sniff away... */ + +srlckd: isync /* Toss preexecutions */ + lwz r11,SACalloc(r6) /* Get the allocation for this block */ + lwz r7,SVinuse(r10) /* Get the in use count */ + or r11,r11,r8 /* Turn on our bit */ + subi r7,r7,1 /* We released one, adjust count */ + cmplw r11,r8 /* Is our's the only one free? */ + stw r7,SVinuse(r10) /* Save out count */ + stw r11,SACalloc(r6) /* Save it out */ + bne+ srtrest /* Nope, then the block is already on the free list */ + + lwz r11,SVfree(r10) /* Get the old head of the free list */ + stw r6,SVfree(r10) /* Point the head at us now */ + stw r11,SACnext(r6) /* Point us at the old last */ + +srtrest: li r8,0 /* Get set to clear the savearea lock */ + sync /* Make sure it's all out there */ + stw r8,SVlock(r10) /* Unlock it */ + mtmsr r12 /* Restore interruptions and translation */ + isync + +#if 0 + lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ + li r2,0x2204 ; (TEST/DEBUG) + oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ + sc /* (TEST/DEBUG) */ +#endif + + blr /* Go away... */ + +srbigtimepanic: + lis r6,HIGH_ADDR(EXT(panic)) /* First half of panic call */ + lis r3,HIGH_ADDR(EXT(srfreeactive)) /* First half of panic string */ + ori r6,r6,LOW_ADDR(EXT(panic)) /* Second half of panic call */ + ori r3,r3,LOW_ADDR(EXT(srfreeactive)) /* Second half of panic string */ + mtlr r6 /* Get the address of the panic routine */ + mtmsr r12 /* Restore interruptions and translation */ + isync + blrl /* Panic... */ + + .data +EXT(srfreeactive): + STRINGD "save_ret: Attempting to release the active savearea!!!!\000" + .text + + +/* + * struct savearea *save_cpv(struct savearea *); Converts a physical savearea address to virtual + */ + + .align 5 + .globl EXT(save_cpv) + +LEXT(save_cpv) + + mfmsr r10 ; Get the current MSR + rlwinm r4,r3,0,0,19 ; Round back to the start of the physical savearea block + andi. r9,r10,0x7FEF ; Turn off interrupts and data translation + mtmsr r9 ; Disable DR and EE + isync + + lwz r4,SACvrswap(r4) ; Get the conversion to virtual + mtmsr r10 ; Interrupts and DR back on + isync + xor r3,r3,r4 ; Convert to physical + blr + + +/* + * This routine will return the virtual address of the first free savearea + * block and disable for interruptions. + * Note really well: this is only for debugging, don't expect it to always work! + * + * We take a virtual address in R4 to save the original MSR, and + * return the virtual address. + * + */ + +ENTRY(save_deb,TAG_NO_FRAME_USED) + + mfmsr r12 /* Get the MSR */ + lis r10,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ + stw r12,0(r3) /* Save it */ + andi. r11,r12,0x7FCF /* Turn off all translation and 'rupts */ + ori r10,r10,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ + mtmsr r11 /* Make the MSR current */ + isync /* Make sure translation is off */ + lwz r3,SVfree(r10) /* Get the physical first in list */ + andi. r11,r12,0x7FFF /* Clear only interruption */ + lwz r5,SACvrswap(r3) /* Get the conversion to virtual */ + mtmsr r11 /* Restore DAT but not INT */ + xor r3,r3,r5 /* Make it virtual */ + isync + blr + + + + + diff --git a/osfmk/ppc/sched_param.h b/osfmk/ppc/sched_param.h new file mode 100644 index 000000000..b6430899b --- /dev/null +++ b/osfmk/ppc/sched_param.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + */ + +/* + * Scheduler parameters. + */ + +#ifndef _PPC_SCHED_PARAM_H_ +#define _PPC_SCHED_PARAM_H_ + +#include +#include + +#endif _PPC_SCHED_PARAM_H_ diff --git a/osfmk/ppc/screen.h b/osfmk/ppc/screen.h new file mode 100644 index 000000000..dd81dadb3 --- /dev/null +++ b/osfmk/ppc/screen.h @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: screen.h + * Author: Alessandro Forin, Carnegie Mellon University + * Date: 9/90 + * + * Definitions for the Generic Screen Driver. + */ + +#ifndef _SCREEN_H_ +#define _SCREEN_H_ + +/* + * Most of these structures are defined so that the + * resulting structure mapped to user space appears + * to be compatible with the one used by the DEC X + * servers (pm_info..). Keep it that way and the + * X servers will keep on running. + */ + +/* + * Generic structures and defines + */ + +/* colors */ +typedef struct { + unsigned short red; + unsigned short green; + unsigned short blue; +} color_map_t; + +typedef struct { + short unused; + unsigned short index; + color_map_t value; +} color_map_entry_t; + +typedef struct { + unsigned int Bg_rgb[3]; + unsigned int Fg_rgb[3]; +} cursor_color_t; + +/* generic input event */ +typedef struct { + short x; /* x position */ + short y; /* y position */ + unsigned int time; /* 1 millisecond units */ + + unsigned char type; /* button up/down/raw or motion */ +# define EVT_BUTTON_UP 0 +# define EVT_BUTTON_DOWN 1 +# define EVT_BUTTON_RAW 2 +# define EVT_PTR_MOTION 3 + + unsigned char key; /* the key (button only) */ +# define KEY_LEFT_BUTTON 1 +# define KEY_MIDDLE_BUTTON 2 +# define KEY_RIGHT_BUTTON 3 +# define KEY_TBL_LEFT_BUTTON 0 +# define KEY_TBL_FRONT_BUTTON 1 +# define KEY_TBL_RIGHT_BUTTON 2 +# define KEY_TBL_BACK_BUTTON 3 + + unsigned char index; /* which instance of device */ + + unsigned char device; /* which device */ +# define DEV_NULL 0 +# define DEV_MOUSE 1 +# define DEV_KEYBD 2 +# define DEV_TABLET 3 +# define DEV_AUX 4 +# define DEV_CONSOLE 5 +# define DEV_KNOB 8 +# define DEV_JOYSTICK 9 + +} screen_event_t; + +/* timed coordinate info */ +typedef struct { + unsigned int time; + short x, y; +} screen_timed_point_t; + +/* queue of input events, and ring of mouse motions track */ +typedef struct { + screen_event_t *events; + unsigned int q_size; + unsigned int q_head; + unsigned int q_tail; + unsigned long timestamp; + screen_timed_point_t *track; + unsigned int t_size; + unsigned int t_next; +} screen_evque_t; + +/* mouse/cursor position */ +typedef struct { + short x; + short y; +} screen_point_t; + +/* mouse motion bounding boxes */ +typedef struct { + short bottom; + short right; + short left; + short top; +} screen_rect_t; + +/* + * Here it is, each field is marked as + * + * Kset : kernel sets it unconditionally + * Kuse : kernel uses it, safely + * Kdep : kernel might depend on it + */ +typedef struct { + screen_evque_t evque; /* Kset, Kuse */ + short mouse_buttons; /* Kset */ + screen_point_t xx3 /*tablet*/; + short xx4 /*tswitches*/; + screen_point_t cursor; /* Kset */ + short row; /* Kdep */ + short col; /* Kdep */ + short max_row; /* Kdep */ + short max_col; /* Kdep */ + short max_x; /* Kset */ + short max_y; /* Kset */ + short max_cur_x; /* Kdep */ + short max_cur_y; /* Kdep */ + int version; /* Kset */ + union { + struct { + unsigned char * bitmap; /* Kset */ + short * x16 /*scanmap*/; + short * x17 /*cursorbits*/; + short * x18 /*pmaddr*/; + unsigned char * planemask; /* Kset */ + } pm; + struct { + int x15 /* flags */; + int * gram /* Kset */; + int * rb_addr /* Kset */; + int rb_phys /* Kset */; + int rb_size /* Kset */; + } gx; + } dev_dep_1; + screen_point_t mouse_loc; /* Kdep */ + screen_rect_t mouse_box; /* Kdep */ + short mouse_threshold;/* Kuse */ + short mouse_scale; /* Kuse */ + short min_cur_x; /* Kdep */ + short min_cur_y; /* Kdep */ + union { + struct { + int x26 /*dev_type*/; + char * x27 /*framebuffer*/; + char * x28 /*volatile struct bt459 *bt459*/; + int x29 /*slot*/; + char cursor_sprite[1024];/* Kset */ + unsigned char Bg_color[3]; /* Kset */ + unsigned char Fg_color[3]; /* Kset */ + int tablet_scale_x; /* Kuse */ + int tablet_scale_y; /* Kuse */ + } pm; + struct { + char * gxo /* Kset */; + char stamp_width /* Kset */; + char stamp_height /* Kset */; + char nplanes /* Kset */; + char x27_4 /* n10_present */; + char x28_1 /* dplanes */; + char zplanes /* Kset */; + char zzplanes /* Kset */; + unsigned char cursor_sprite[1024] /* Kuse */; + char x285_0 /* padding for next, which was int */; + unsigned char Fg_color[4] /* Kuse */; + unsigned char Bg_color[4] /* Kuse */; + unsigned short cmap_index /* Kuse */; + unsigned short cmap_count /* Kuse */; + unsigned int colormap[256] /* Kuse */; + int * stic_dma_rb /* Kset */; + int * stic_reg /* Kset */; + int ptpt_phys /* Kdep */; + int ptpt_size /* Kdep */; + int * ptpt_pgin /* Kset */; + } gx; + } dev_dep_2; + short frame_scanline_width; /* in pixels, Kset */ + short frame_height; /* in scanlines, Kset */ + /* + * Event queues are allocated right after that + */ +#define MAX_EVENTS 64 +#define MAX_TRACK 100 + screen_event_t event_queue[MAX_EVENTS]; /* Kset */ + screen_timed_point_t point_track[MAX_TRACK]; /* Kset */ + /* + * Some like it hot + */ + unsigned int event_id; + int interrupt_info; +} user_info_t; + + +/* + * Screen get_status codes and arguments + */ +#include + + /* Get size (and offset) of mapped info */ +#define SCREEN_GET_OFFSETS _IOR('q', 6, unsigned **) + + /* Get screen status flags */ +#define SCREEN_STATUS_FLAGS _IOR('q', 22, int *) +# define MONO_SCREEN 0x01 +# define COLOR_SCREEN 0x02 +# define SCREEN_BEING_UPDATED 0x04 + +/* + * Screen set_status codes and arguments + */ + + /* start/stop screen saver, control fading interval */ +#define SCREEN_FADE _IOW('q', 114, int) /* fade screen */ +# define NO_FADE -1 + + /* Turn video on/off manually */ +#define SCREEN_ON _IO('q', 10) +#define SCREEN_OFF _IO('q', 11) + + /* Fixup pointers inside mapped info structure */ +#define SCREEN_ADJ_MAPPED_INFO _IOR('q', 1, user_info_t *) + + /* Initialize anything that needs to, hw-wise */ +#define SCREEN_INIT _IO('q', 4) + + /* Position cursor to a specific spot */ +#define SCREEN_SET_CURSOR _IOW('q', 2, screen_point_t) + + /* Load Bg/Fg colors for cursor */ +#define SCREEN_SET_CURSOR_COLOR _IOW('q', 3, cursor_color_t) + + /* Load cursor sprite, small cursor form */ +typedef unsigned short cursor_sprite_t[32]; + +#define SCREEN_LOAD_CURSOR _IOW('q', 7, cursor_sprite_t) + + /* Load cursor sprite, large 64x64 cursor form */ +typedef char cursor_sprite_long_t[1024]; + +#define SCREEN_LOAD_CURSOR_LONG _IOW('q', 13, cursor_sprite_long_t) + + /* Modify a given entry in the color map (VDAC) */ +#define SCREEN_SET_CMAP_ENTRY _IOW('q', 12, color_map_entry_t) + + /* Return some other information about hardware (optional) */ +typedef struct { + int frame_width; + int frame_height; + int frame_visible_width; + int frame_visible_height; +} screen_hw_info_t; +#define SCREEN_HARDWARE_INFO _IOR('q', 23, screen_hw_info_t) + + /* Screen-dependent, unspecified (and despised) */ +#define SCREEN_HARDWARE_DEP _IO('q', 24) + +#endif /* _SCREEN_H_ */ diff --git a/osfmk/ppc/screen_switch.h b/osfmk/ppc/screen_switch.h new file mode 100644 index 000000000..cbf92e7cf --- /dev/null +++ b/osfmk/ppc/screen_switch.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: screen_switch.h + * Author: Alessandro Forin, Carnegie Mellon University + * Date: 10/90 + * + * Definitions of things that must be tailored to + * specific hardware boards for the Generic Screen Driver. + */ + +#ifndef SCREEN_SWITCH_H +#define SCREEN_SWITCH_H 1 + +#include + +/* + * List of probe routines, scanned at cold-boot time + * to see which, if any, graphic display is available. + * This is done before autoconf, so that printing on + * the console works early on. The alloc routine is + * called only on the first device that answers. + * Ditto for the setup routine, called later on. + */ +struct screen_probe_vector { + int (*probe)(void); + unsigned int (*alloc)(void); + int (*setup)(int, user_info_t); +}; + +/* + * Low-level operations on the graphic device, used + * by the otherwise device-independent interface code + */ + +/* Forward declaration of screen_softc_t */ +typedef struct screen_softc *screen_softc_t; + +struct screen_switch { + int (*graphic_open)(void); /* when X11 opens */ + int (*graphic_close)(screen_softc_t); /* .. or closes */ + int (*set_status)(screen_softc_t, + dev_flavor_t, + dev_status_t, + natural_t); /* dev-specific ops */ + int (*get_status)(screen_softc_t, + dev_flavor_t, + dev_status_t, + natural_t*); /* dev-specific ops */ + int (*char_paint)(screen_softc_t, + int, + int, + int); /* blitc */ + int (*pos_cursor)(void*, + int, + int); /* cursor positioning*/ + int (*insert_line)(screen_softc_t, + short); /* ..and scroll down */ + int (*remove_line)(screen_softc_t, + short); /* ..and scroll up */ + int (*clear_bitmap)(screen_softc_t); /* blank screen */ + int (*video_on)(void*, + user_info_t*); /* screen saver */ + int (*video_off)(void*, + user_info_t*); + int (*intr_enable)(void*, + boolean_t); + int (*map_page)(screen_softc_t, + vm_offset_t, + int); /* user-space mapping*/ +}; + +/* + * Each graphic device needs page-aligned memory + * to be mapped in user space later (for events + * and such). Size and content of this memory + * is unfortunately device-dependent, even if + * it did not need to (puns). + */ +extern char *screen_data; + +extern struct screen_probe_vector screen_probe_vector[]; + +extern int screen_noop(void), screen_find(void); + +#endif /* SCREEN_SWITCH_H */ diff --git a/osfmk/ppc/serial_console.c b/osfmk/ppc/serial_console.c new file mode 100644 index 000000000..7c31eaa3c --- /dev/null +++ b/osfmk/ppc/serial_console.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ + +#include +#include +#include + +#include +#include /* spl definitions */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * A machine MUST have a console. In our case + * things are a little complicated by the graphic + * display: people expect it to be their "console", + * but we'd like to be able to live without it. + * This is not to be confused with the "rconsole" thing: + * that just duplicates the console I/O to + * another place (for debugging/logging purposes). + */ + +const int console_unit = 0; +const int console_chan_default = CONSOLE_PORT; +#define console_chan (console_chan_default) /* ^ cpu_number()) */ + +#define OPS(putc, getc, nosplputc, nosplgetc) putc, getc + +const struct console_ops { + int (*putc)(int, int, int); + int (*getc)(int, int, boolean_t, boolean_t); +} cons_ops[] = { +#define SCC_CONS_OPS 0 + {OPS(scc_putc, scc_getc, no_spl_scputc, no_spl_scgetc)}, +#define VC_CONS_OPS 1 + {OPS(vcputc, vcgetc, no_spl_vcputc, no_spl_vcgetc)}, +}; +#define NCONSOPS (sizeof cons_ops / sizeof cons_ops[0]) + +#if SERIAL_CONSOLE_DEFAULT +#define CONS_OPS SCC_CONS_OPS +#define CONS_NAME "com" +#else +#define CONS_OPS VC_CONS_OPS +#define CONS_NAME "vc" +#endif + +#define MP_SAFE_CONSOLE 1 /* Set this to 1 to allow more than 1 processor to print at once */ +#if MP_SAFE_CONSOLE + +struct ppcbfr { /* Controls multiple processor output */ + unsigned int pos; /* Current position in buffer */ + unsigned int noprompt; /* Set if we skip the prompt */ + unsigned int echo; /* Control character echoing */ + char buffer[256]; /* Fairly big buffer */ +}; +typedef struct ppcbfr ppcbfr; +ppcbfr cbfr[NCPUS]; /* Get one of these for each processor */ +volatile unsigned int cbfpend; /* A buffer is pending output */ +volatile unsigned int sconowner=-1; /* Mark who's actually writing */ + +#endif + + +unsigned int cons_ops_index = CONS_OPS; +unsigned int killprint = 0; +unsigned int debcnputc = 0; +extern unsigned int mappingdeb0; +extern int debugger_holdoff[NCPUS]; + +static void _cnputc(char c) +{ + cons_ops[cons_ops_index].putc(console_unit, console_chan, c); +} + +void cnputcusr(char c) { /* Echo input character directly */ + + unsigned int cpu; + + if (cpu_data[master_cpu].active_thread) cpu = cpu_number(); /* If we're started up, use the current CPU */ + else cpu = master_cpu; /* Otherwise use the master_cpu */ + + hw_atomic_add(&debugger_holdoff[cpu], 1); /* Don't allow debugger entry just now (this is a HACK) */ + + _cnputc( c); /* Echo the character */ + if(c=='\n') _cnputc( '\r'); /* Add a return if we had a new line */ + + hw_atomic_sub(&debugger_holdoff[cpu], 1); /* Don't allow debugger entry just now (this is a HACK) */ + return; +} + +void +cnputc(char c) +{ + + unsigned int oldpend, i, cpu, ourbit, sccpu; + spl_t s; + +#if MP_SAFE_CONSOLE + +/* + * Handle multiple CPU console output. + * Note: this thing has gotten god-awful complicated. We need a better way. + */ + + + if(killprint) { + return; /* If printing is disabled, bail... */ + } + + if (cpu_data[master_cpu].active_thread) cpu = cpu_number(); /* If we're started up, use the current CPU */ + else cpu = master_cpu; /* Otherwise use the master_cpu */ + + hw_atomic_add(&debugger_holdoff[cpu], 1); /* Don't allow debugger entry just now (this is a HACK) */ + + ourbit = 1 << cpu; /* Make a mask for just us */ + if(debugger_cpu != -1) { /* Are we in the debugger with empty buffers? */ + + while(sconowner != cpu) { /* Anyone but us? */ + hw_compare_and_store(-1, cpu, (unsigned int *)&sconowner); /* Try to mark it for us if idle */ + } + + _cnputc( c); /* Yeah, just write it */ + if(c=='\n') /* Did we just write a new line? */ + _cnputc( '\r'); /* Yeah, just add a return */ + + sconowner=-1; /* Mark it idle */ + hw_atomic_sub(&debugger_holdoff[cpu], 1); /* Don't allow debugger entry just now (this is a HACK) */ + + return; /* Leave... */ + } + + s=splhigh(); /* Don't bother me */ + + while(ourbit&cbfpend); /* We aren't "double buffered," so we'll just wait until the buffers are written */ + isync(); /* Just in case we had to wait */ + + if(c) { /* If the character is not null */ + cbfr[cpu].buffer[cbfr[cpu].pos]=c; /* Fill in the buffer for our CPU */ + cbfr[cpu].pos++; /* Up the count */ + if(cbfr[cpu].pos > 253) { /* Is the buffer full? */ + cbfr[cpu].buffer[254]='\n'; /* Yeah, set the second to last as a LF */ + cbfr[cpu].buffer[255]='\r'; /* And the last to a CR */ + cbfr[cpu].pos=256; /* Push the buffer to the end */ + c='\r'; /* Set character to a CR */ + } + } + + if(c == '\n') { /* Are we finishing a line? */ + cbfr[cpu].buffer[cbfr[cpu].pos]='\r'; /* And the last to a CR */ + cbfr[cpu].pos++; /* Up the count */ + c='\r'; /* Set character to a CR */ + } + +#if 1 + if(cbfr[cpu].echo == 1) { /* Did we hit an escape last time? */ + if(c == 'K') { /* Is it a partial clear? */ + cbfr[cpu].echo = 2; /* Yes, enter echo mode */ + } + else cbfr[cpu].echo = 0; /* Otherwise reset escape */ + } + else if(cbfr[cpu].echo == 0) { /* Not in escape sequence, see if we should enter */ + cbfr[cpu].echo = 1; /* Set that we are in escape sequence */ + } +#endif + + if((c == 0x00) || (c == '\r') || (cbfr[cpu].echo == 2)) { /* Try to push out all buffers if we see CR or null */ + + while(1) { /* Loop until we see who's doing this */ + oldpend=cbfpend; /* Get the currentest pending buffer flags */ + if(hw_compare_and_store(oldpend, oldpend|ourbit, (unsigned int *)&cbfpend)) /* Swap ours on if no change */ + break; /* Bail the loop if it worked */ + } + + if(!hw_compare_and_store(-1, cpu, (unsigned int *)&sconowner)) { /* See if someone else has this, and take it if not */ + debugger_holdoff[cpu] = 0; /* Allow debugger entry (this is a HACK) */ + splx(s); /* Let's take some 'rupts now */ + return; /* We leave here, 'cause another processor is already writing the buffers */ + } + + while(1) { /* Loop to dump out all of the finished buffers */ + oldpend=cbfpend; /* Get the most current finished buffers */ + for(sccpu=0; sccpu= NCONSOPS && !squawked) { + squawked = TRUE; + printf("switch_to_old_console: unknown ops %d\n", ops); + } else + cons_ops_index = ops; +} + + +int +vcgetc(int l, int u, boolean_t wait, boolean_t raw) +{ + char c; + + if( 0 == (*PE_poll_input)( 0, &c)) + return( c); + else + return( 0); +} diff --git a/osfmk/ppc/serial_defs.h b/osfmk/ppc/serial_defs.h new file mode 100644 index 000000000..508165fab --- /dev/null +++ b/osfmk/ppc/serial_defs.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * @APPLE_FREE_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: serial_defs.h + * Author: Alessandro Forin, Carnegie Mellon University + * Date: 7/91 + * + * Generic console driver for serial-line based consoles. + */ + +#ifndef _PPC_SERIAL_DEFS_ +#define _PPC_SERIAL_DEFS_ + +#include +/* + * Common defs + */ + + +#define CONS_ERR_PARITY 0x1000 +#define CONS_ERR_BREAK 0x2000 +#define CONS_ERR_OVERRUN 0x4000 + + +#endif /* _PPC_SERIAL_DEFS_ */ diff --git a/osfmk/ppc/setjmp.h b/osfmk/ppc/setjmp.h new file mode 100644 index 000000000..a1a0b34c4 --- /dev/null +++ b/osfmk/ppc/setjmp.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PPC_SETJMP_H_ +#define _PPC_SETJMP_H_ + +/* + * We save the following registers (marked as non-volatile in the ELF spec) + * + * r1 - stack pointer + * r13 - small data area pointer + * r14-r30 - local variables + * r31 - local variable/environment pointer + * + * cr - condition register + * lr - link register (to know where to jump back to) + * xer - fixed point exception register + * + * fpscr - floating point status and control + * f14-f31 - local variables + * + * which comes to 57 words. We round up to 64 for good measure. + */ + +typedef struct jmp_buf { + int jmp_buf[64]; +} jmp_buf_t; + +#endif /* _PPC_SETJMP_H_ */ diff --git a/osfmk/ppc/start.s b/osfmk/ppc/start.s new file mode 100644 index 000000000..c2e58c929 --- /dev/null +++ b/osfmk/ppc/start.s @@ -0,0 +1,832 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define ptFilter 0 +#define ptVersion 4 +#define ptRevision 6 +#define ptFeatures 8 +#define ptInitRout 12 +#define ptRptdProc 16 +#define ptTempMax 20 +#define ptTempThr 24 +#define ptLineSize 28 +#define ptl1iSize 32 +#define ptl1dSize 36 +#define ptSize 40 + +#define bootCPU 10 +#define firstInit 9 +#define firstBoot 8 + +/* + * Interrupt and bootup stack for initial processor + */ + + .file "start.s" + + .data + + /* Align on page boundry */ + .align PPC_PGSHIFT + /* Red zone for interrupt stack, one page (will be unmapped)*/ + .set ., .+PPC_PGBYTES + /* intstack itself */ + + .globl EXT(FixedStackStart) +EXT(FixedStackStart): + + .globl EXT(intstack) +EXT(intstack): + .set ., .+INTSTACK_SIZE*NCPUS + + /* Debugger stack - used by the debugger if present */ + /* NOTE!!! Keep the debugger stack right after the interrupt stack */ +#if MACH_KDP || MACH_KDB + .globl EXT(debstack) +EXT(debstack): + .set ., .+KERNEL_STACK_SIZE*NCPUS + + .globl EXT(FixedStackEnd) +EXT(FixedStackEnd): + + .align ALIGN + .globl EXT(intstack_top_ss) +EXT(intstack_top_ss): + .long EXT(intstack)+INTSTACK_SIZE-SS_SIZE /* intstack_top_ss points to the top of interrupt stack */ + + .align ALIGN + .globl EXT(debstack_top_ss) +EXT(debstack_top_ss): + + .long EXT(debstack)+KERNEL_STACK_SIZE-SS_SIZE /* debstack_top_ss points to the top of debug stack */ + + .globl EXT(debstackptr) +EXT(debstackptr): + .long EXT(debstack)+KERNEL_STACK_SIZE-SS_SIZE + +#endif /* MACH_KDP || MACH_KDB */ + +/* + * All CPUs start here. + * + * This code is called from SecondaryLoader + * + * Various arguments are passed via a table: + * ARG0 = pointer to other startup parameters + */ + .text + +ENTRY(_start_cpu,TAG_NO_FRAME_USED) + crclr bootCPU ; Set non-boot processor + crclr firstInit ; Set not first time init + mr r30,r3 ; Set current per_proc + +; +; Note that we are just trying to get close. The real TB sync will take +; place later. The value we are loading is set in two places. For the +; main processor, it will be the TB at the last interrupt before we went +; to sleep. For the others, it will be the time just before the main +; processor woke us up. +; + + lwz r15,ruptStamp(r3) ; Get the timebase from the other processor + li r17,0 ; Clear this out + lwz r16,ruptStamp+4(r3) ; Get the timebase from the other processor + mtspr tbl,r17 ; Clear bottom so we do not tick + mtspr tbu,r15 ; Set top + mtspr tbl,r16 ; Then bottom again + + b allstart + +ENTRY(_start,TAG_NO_FRAME_USED) + + lis r30,hi16(EXT(per_proc_info)) ; Set current per_proc + ori r30,r30,lo16(EXT(per_proc_info)) ; Set current per_proc + crset bootCPU ; Set boot processor + + lwz r17,pfAvailable(r30) ; Get the available bits + rlwinm. r0,r17,0,pfValidb,pfValidb ; Have we initialized the feature flags yet? + crmove firstInit,cr0_eq ; Set if we are doing first time init + bne allstart ; Yeah, we must be waking up from sleep... + +; +; Here is where we do any one time general low-level initialization + + lis r20,HIGH_ADDR(fwdisplock) ; Get address of the firmware display lock + li r19,0 ; Zorch a register + ori r20,r20,LOW_ADDR(fwdisplock) ; Get address of the firmware display lock + stw r19,0(r20) ; Make sure the lock is free + +allstart: mr r31,r3 ; Save away arguments + lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc + ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc + + mtsprg 0,r30 ; Set the per_proc + + mfspr r6,hid0 ; Get the HID0 + li r7,MSR_VM_OFF ; Get real mode MSR + rlwinm r6,r6,0,sleep+1,doze-1 ; Remove any vestiges of sleep + mtspr hid0,r6 ; Set the insominac HID0 + mtmsr r7 ; Set the real mode SRR + isync + +; Map in the first 256Mb in both instruction and data BATs + + li r7,((0x7FF<<2)|2) ; Set up for V=R 256MB in supervisor space + li r8,((2<<3)|2) ; Physical address = 0, coherent, R/W + li r9,0 ; Clear out a register + + mtsprg 1,r9 ; Clear the extra SPRGs + mtsprg 2,r9 + mtsprg 3,r9 + + sync + isync + mtdbatu 0,r7 ; Map bottom 256MB + mtdbatl 0,r8 ; Map bottom 256MB + mtdbatu 1,r9 ; Invalidate maps + mtdbatl 1,r9 ; Invalidate maps + mtdbatu 2,r9 ; Invalidate maps + mtdbatl 2,r9 ; Invalidate maps + mtdbatu 3,r9 ; Invalidate maps + mtdbatl 3,r9 ; Invalidate maps + sync + isync + mtibatu 0,r7 ; Map bottom 256MB + mtibatl 0,r8 ; Map bottom 256MB + mtibatu 1,r9 ; Invalidate maps + mtibatl 1,r9 ; Invalidate maps + mtibatu 2,r9 ; Invalidate maps + mtibatl 2,r9 ; Invalidate maps + mtibatu 3,r9 ; Invalidate maps + mtibatl 3,r9 ; Invalidate maps + sync + isync + + lis r26,hi16(processor_types) ; Point to processor table + ori r26,r26,lo16(processor_types) ; Other half + mfpvr r10 ; Get the PVR + +nextPVR: lwz r28,ptFilter(r26) ; Get the filter + lwz r27,ptVersion(r26) ; Get the version and revision codes + and r28,r10,r28 ; Throw away dont care bits + cmplw r27,r28 ; Is this the right set? + beq donePVR ; We have the right one... + addi r26,r26,ptSize ; Point to the next type + b nextPVR ; Check it out... + +donePVR: lwz r20,ptInitRout(r26) ; Grab the special init routine + mtlr r20 ; Setup to call the init + + bf firstInit,notFirst ; Not first boot, go... + +; +; The following code just does a general initialization of the features just +; after the initial first-time boot. This is not done after waking up or on +; any "secondary" processor. +; +; We are just setting defaults. The specific initialization code will modify these +; if necessary. +; + + lwz r17,ptFeatures(r26) ; Pick up the features + + lwz r13,ptRptdProc(r26) ; Get the reported processor + sth r13,pfrptdProc(r30) ; Set the reported processor + + lwz r13,ptTempMax(r26) ; Get maximum operating temperature + stw r13,thrmmaxTemp(r30) ; Set the maximum + lwz r13,ptTempThr(r26) ; Get temprature to throttle down when exceeded + stw r13,thrmthrottleTemp(r30) ; Set the temperature that we throttle + + lwz r13,ptLineSize(r26) ; Get the cache line size + sth r13,pflineSize(r30) ; Save it + lwz r13,ptl1iSize(r26) ; Get icache size + stw r13,pfl1iSize(r30) ; Save it + lwz r13,ptl1dSize(r26) ; Get dcache size + stw r13,pfl1dSize(r30) ; Save it + b doOurInit ; Go do processor specific initialization... + +notFirst: lwz r17,pfAvailable(r30) ; Get our features + rlwinm. r0,r17,0,pfValidb,pfValidb ; Have we set up this CPU yet? + bne doOurInit ; Yeah, must be wakeup... + + lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc + ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc + + la r7,pfAvailable(r30) ; Point to features of our processor + la r8,pfAvailable(r23) ; Point to features of boot processor + li r9,(pfSize+thrmSize)/4 ; Get size of a features area + +cpyFeat: subi r9,r9,1 ; Count word + lwz r0,0(r8) ; Get boot cpu features + stw r0,0(r7) ; Copy to ours + mr. r9,r9 ; Finished? + addi r7,r7,4 ; Next out + addi r8,r8,4 ; Next in + bgt cpyFeat ; Copy all boot cpu features to us... + + lwz r17,pfAvailable(r30) ; Get our newly initialized features + +doOurInit: + mr. r20,r20 ; See if initialization routine + crand firstBoot,bootCPU,firstInit ; Indicate if we are on the initial first processor startup + bnelrl ; Do the initialization + + ori r17,r17,lo16(pfValid) ; Set the valid bit + stw r17,pfAvailable(r30) ; Set the available features + mtsprg 2,r17 ; Remember the feature flags + + rlwinm. r0,r17,0,pfFloatb,pfFloatb ; See if there is floating point + beq- noFloat ; Nope, this is a really stupid machine... + + li r0,MSR_VM_OFF|MASK(MSR_FP) ; Enable for floating point + mtmsr r0 /* Set the standard MSR values */ + isync + + lis r5,HIGH_ADDR(EXT(FloatInit)) /* Get top of floating point init value */ + ori r5,r5,LOW_ADDR(EXT(FloatInit)) /* Slam bottom */ + lfd f0,0(r5) /* Initialize FP0 */ + fmr f1,f0 /* Ours in not */ + fmr f2,f0 /* to wonder why, */ + fmr f3,f0 /* ours is but to */ + fmr f4,f0 /* do or die! */ + fmr f5,f0 + fmr f6,f0 + fmr f7,f0 + fmr f8,f0 + fmr f9,f0 + fmr f10,f0 + fmr f11,f0 + fmr f12,f0 + fmr f13,f0 + fmr f14,f0 + fmr f15,f0 + fmr f16,f0 + fmr f17,f0 + fmr f18,f0 + fmr f19,f0 + fmr f20,f0 + fmr f21,f0 + fmr f22,f0 + fmr f23,f0 + fmr f24,f0 + fmr f25,f0 + fmr f26,f0 + fmr f27,f0 + fmr f28,f0 + fmr f29,f0 + fmr f30,f0 + fmr f31,f0 + + li r0, MSR_VM_OFF ; Turn off floating point + mtmsr r0 + isync + +noFloat: rlwinm. r0,r17,0,pfAltivecb,pfAltivecb ; See if there is Altivec + beq- noVector ; Nope... + + li r0,0 ; Clear out a register + + lis r7,hi16(MSR_VEC_ON) ; Get real mode MSR + Altivec + ori r7,r7,lo16(MSR_VM_OFF) ; Get real mode MSR + Altivec + mtmsr r7 ; Set the real mode SRR */ + isync ; Make sure it has happened + + lis r5,hi16(EXT(QNaNbarbarian)) ; Altivec initializer + ori r5,r5,lo16(EXT(QNaNbarbarian)) ; Altivec initializer + + mtspr vrsave,r0 ; Set that no VRs are used yet */ + + vspltisw v1,0 ; Clear a register + lvx v0,br0,r5 ; Initialize VR0 + mtvscr v1 ; Clear the vector status register + vor v2,v0,v0 ; Copy into the next register + vor v1,v0,v0 ; Copy into the next register + vor v3,v0,v0 ; Copy into the next register + vor v4,v0,v0 ; Copy into the next register + vor v5,v0,v0 ; Copy into the next register + vor v6,v0,v0 ; Copy into the next register + vor v7,v0,v0 ; Copy into the next register + vor v8,v0,v0 ; Copy into the next register + vor v9,v0,v0 ; Copy into the next register + vor v10,v0,v0 ; Copy into the next register + vor v11,v0,v0 ; Copy into the next register + vor v12,v0,v0 ; Copy into the next register + vor v13,v0,v0 ; Copy into the next register + vor v14,v0,v0 ; Copy into the next register + vor v15,v0,v0 ; Copy into the next register + vor v16,v0,v0 ; Copy into the next register + vor v17,v0,v0 ; Copy into the next register + vor v18,v0,v0 ; Copy into the next register + vor v19,v0,v0 ; Copy into the next register + vor v20,v0,v0 ; Copy into the next register + vor v21,v0,v0 ; Copy into the next register + vor v22,v0,v0 ; Copy into the next register + vor v23,v0,v0 ; Copy into the next register + vor v24,v0,v0 ; Copy into the next register + vor v25,v0,v0 ; Copy into the next register + vor v26,v0,v0 ; Copy into the next register + vor v27,v0,v0 ; Copy into the next register + vor v28,v0,v0 ; Copy into the next register + vor v29,v0,v0 ; Copy into the next register + vor v30,v0,v0 ; Copy into the next register + vor v31,v0,v0 ; Copy into the next register + + li r0, MSR_VM_OFF ; Turn off vectors + mtmsr r0 + isync + +noVector: rlwinm. r0,r17,0,pfSMPcapb,pfSMPcapb ; See if we can do SMP + beq- noSMP ; Nope... + + lhz r13,PP_CPU_NUMBER(r30) ; Get the CPU number + mtspr pir,r13 ; Set the PIR + +noSMP: rlwinm. r0,r17,0,pfThermalb,pfThermalb ; See if there is an TAU + beq- noThermometer ; Nope... + + li r13,0 ; Disable thermals for now + mtspr thrm3,r13 ; Do it + li r13,lo16(thrmtidm|thrmvm) ; Set for lower-than thermal event at 0 degrees + mtspr thrm1,r13 ; Do it + lis r13,hi16(thrmthrm) ; Set 127 degrees + ori r13,r13,lo16(thrmvm) ; Set for higher-than event + mtspr thrm2,r13 ; Set it + +noThermometer: + + bl EXT(cacheInit) ; Initializes all caches (including the TLB) + + li r0,MSR_SUPERVISOR_INT_OFF ; Make sure we do not have FP enabled + mtmsr r0 ; Set the standard MSR values + isync + + bf bootCPU,callcpu ; Not the boot processor... + + lis r29,HIGH_ADDR(EXT(intstack_top_ss)) ; move onto interrupt stack + ori r29,r29,LOW_ADDR(EXT(intstack_top_ss)) + lwz r29,0(r29) + + li r28,0 + stw r28,FM_BACKPTR(r29) ; store a null frame backpointer + + mr r1,r29 + mr r3,r31 ; Restore any arguments we may have trashed + + bl EXT(ppc_init) ; Jump into boot init code + BREAKPOINT_TRAP + +callcpu: + lwz r29,PP_INTSTACK_TOP_SS(r31) ; move onto interrupt stack + + li r28,0 + stw r28,FM_BACKPTR(r29) ; store a null frame backpointer + + + mr r1,r29 ; move onto new stack + mr r3,r31 ; Restore any arguments we may have trashed + + bl EXT(ppc_init_cpu) ; Jump into cpu init code + BREAKPOINT_TRAP ; Should never return + +; +; Specific processor initialization routines +; + +; 750CX + +init750CX: bflr firstBoot ; No init for wakeup.... + mfspr r13,hid1 ; Get HID1 + li r14,lo16(0xFD5F) ; Get valid + rlwinm r13,r13,4,28,31 ; Isolate + slw r14,r14,r13 ; Position + rlwimi r17,r14,15-pfCanNapb,pfCanNapb,pfCanNapb ; Set it + b init750com ; Join common... + +; 750 + +init750: bflr firstBoot ; No init for wakeup.... + +init750com: mfspr r13,l2cr ; Get the L2CR + rlwinm. r0,r13,0,l2e,l2e ; Any L2? + bne+ i750hl2 ; Yes... + rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No L2, turn off feature + +i750hl2: lis r14,hi16(256*1024) ; Base L2 size + addis r15,r13,0x3000 ; Hah... Figure this one out... + rlwinm r15,r15,4,30,31 ; Isolate + rlwinm. r8,r13,0,l2siz,l2sizf ; Was size valid? + slw r14,r14,r15 ; Set 256KB, 512KB, or 1MB + beq- init750none ; Not a valid setting... + + stw r13,pfl2cr(r30) ; Shadow the L2CR + stw r14,pfl2Size(r30) ; Set the L2 size + blr ; Return.... + +init750none: + rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No level 2 cache + blr ; Return... + + +init7400: bf firstBoot,i7400nb ; Do different if not initial boot... + mfspr r13,l2cr ; Get the L2CR + rlwinm. r0,r13,0,l2e,l2e ; Any L2? + bne+ i7400hl2 ; Yes... + rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No L2, turn off feature + +i7400hl2: lis r14,hi16(256*1024) ; Base L2 size + addis r15,r13,0x3000 ; Hah... Figure this one out... + rlwinm r15,r15,4,30,31 + slw r14,r14,r15 ; Set 256KB, 512KB, 1MB, or 2MB + + stw r13,pfl2cr(r30) ; Shadow the L2CR + stw r14,pfl2Size(r30) ; Set the L2 size + + mfspr r11,hid0 ; Get the current HID0 + oris r11,r11,hi16(emcpm|eiecm) ; ? + mtspr hid0,r11 ; ? + isync + stw r11,pfHID0(r30) ; Save the HID0 value + + mfspr r11,msscr0 ; Get the msscr0 register + stw r11,pfMSSCR0(r30) ; Save the MSSCR0 value + mfspr r11,msscr1 ; Get the msscr1 register + stw r11,pfMSSCR1(r30) ; Save the MSSCR1 value + blr ; Return... + +i7400nb: lwz r11,pfHID0(r30) ; Get HID0 + sync + mtspr hid0,r11 ; Set the HID + lwz r11,pfMSSCR0(r30) ; Get MSSCR0 + isync + sync + mtspr msscr0,r11 ; Set the MSSCR0 + lwz r11,pfMSSCR1(r30) ; Get msscr1 + isync + sync + mtspr msscr1,r11 ; Set the msscr1 + isync + sync + blr + +; 7410 +; Note that this is the same as 7400 except we initialize the l2cr2 register + +init7410: li r13,0 ; Clear + mtspr 1016,r13 ; Turn off direct cache + b init7400 ; Join up with common.... + +; 7450 + +init7450: bf firstBoot,i7450nb ; Do different if not initial boot... + oris r17,r17,hi16(pfAvJava) ; Show that we do Java mode in non-open source version + + mfspr r13,l2cr ; Get the L2CR + rlwinm. r0,r13,0,l2e,l2e ; Any L2? + bne+ i7450hl2 ; Yes... + rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No L2, turn off feature + +i7450hl2: lis r14,hi16(256*1024) ; Base L2 size + rlwinm r15,r13,22,12,13 ; Convert to 256k, 512k, or 768k + add r14,r14,r15 ; Add in minimum + + stw r13,pfl2cr(r30) ; Shadow the L2CR + stw r14,pfl2Size(r30) ; Set the L2 size + +; Take care of level 3 cache + + mfspr r13,l3cr ; Get the L3CR + rlwinm. r0,r13,0,l3e,l3e ; Any L2? + bne+ i7450hl3 ; Yes... + rlwinm r17,r17,0,pfL3b+1,pfL3b-1 ; No L2, turn off feature + +i7450hl3: cmplwi cr0,r13,0 ; No L3 if L3CR is zero + beq- init7450none ; Go turn off the features... + lis r14,hi16(1024*1024) ; Base L3 size + rlwinm r15,r13,4,31,31 ; Get size multiplier + slw r14,r14,r15 ; Set 1 or 2MB + + stw r13,pfl3cr(r30) ; Shadow the L3CR + stw r14,pfl3Size(r30) ; Set the L3 size + b init7450fin ; Return.... + +init7450none: + rlwinm r17,r17,0,pfL3fab+1,pfL3b-1 ; No 3rd level cache or assist + +init7450fin: mfspr r11,hid0 ; Get the current HID0 + stw r11,pfHID0(r30) ; Save the HID0 value + mfspr r11,hid1 ; Get the current HID1 + stw r11,pfHID1(r30) ; Save the HID1 value + mfspr r11,msscr0 ; Get the msscr0 register + stw r11,pfMSSCR0(r30) ; Save the MSSCR0 value + mfspr r11,msscr1 ; Get the msscr1 register + stw r11,pfMSSCR1(r30) ; Save the MSSCR1 value + mfspr r11,ictrl ; Get the ictrl register + stw r11,pfICTRL(r30) ; Save the ICTRL value + mfspr r11,ldstcr ; Get the ldstcr register + stw r11,pfLDSTCR(r30) ; Save the LDSTCR value + blr ; Return.... + + +i7450nb: lwz r11,pfHID0(r30) ; Get HID0 + sync + mtspr hid0,r11 ; Set the HID + isync + lwz r11,pfHID1(r30) ; Get HID1 + sync + mtspr hid1,r11 ; Set the HID + isync + lwz r11,pfMSSCR0(r30) ; Get MSSCR0 + sync + mtspr msscr0,r11 ; Set the MSSCR0 + isync + sync + lwz r11,pfICTRL(r30) ; Get ICTRL + sync + mtspr ictrl,r11 ; Set the ICTRL + isync + sync + lwz r11,pfLDSTCR(r30) ; Get LDSTCR + sync + mtspr ldstcr,r11 ; Set the LDSTCR + isync + sync + blr + + +; +; Processor to feature table + +; .align 2 - Always on word boundary +; .long ptFilter - Mask of significant bits in the Version/Revision code +; - NOTE: Always order from most restrictive to least restrictive matching +; .short ptVersion - Version code from PVR. Always start with 0 which is default +; .short ptRevision - Revision code from PVR. A zero value denotes the generic attributes if not specific +; .long ptFeatures - Available features +; .long ptInitRout - Initilization routine. Can modify any of the other attributes. +; .long ptRptdProc - Processor type reported +; .long ptTempMax - Maximum operating temprature +; .long ptTempThr - Temprature threshold. We throttle if above +; .long ptLineSize - Level 1 cache line size +; .long ptl1iSize - Level 1 instruction cache size +; .long ptl1dSize - Level 1 data cache size + + .align 2 +processor_types: + + +; 601 (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_601 + .short 0 + .long pfFloat | pfSMPcap | pfL1i | pfL1d + .long 0 + .long CPU_SUBTYPE_POWERPC_ALL + .long 0 + .long 0 + .long 32 + .long 32*1024 + .long 32*1024 + +; 603 (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_603 + .short 0 + .long pfFloat | pfL1i | pfL1d + .long 0 + .long CPU_SUBTYPE_POWERPC_603 + .long 0 + .long 0 + .long 32 + .long 32*1024 + .long 32*1024 + +; 603e (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_603e + .short 0 + .long pfFloat | pfL1i | pfL1d + .long 0 + .long CPU_SUBTYPE_POWERPC_603e + .long 0 + .long 0 + .long 32 + .long 32*1024 + .long 32*1024 + +; 604 (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_604 + .short 0 + .long pfFloat | pfSMPcap | pfL1i | pfL1d + .long 0 + .long CPU_SUBTYPE_POWERPC_604 + .long 0 + .long 0 + .long 32 + .long 32*1024 + .long 32*1024 + +; 604e (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_604e + .short 0 + .long pfFloat | pfSMPcap | pfL1i | pfL1d + .long 0 + .long CPU_SUBTYPE_POWERPC_604e + .long 0 + .long 0 + .long 32 + .long 32*1024 + .long 32*1024 + +; 604ev (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_604ev + .short 0 + .long pfFloat | pfSMPcap | pfL1i | pfL1d + .long 0 + .long CPU_SUBTYPE_POWERPC_604e + .long 0 + .long 0 + .long 32 + .long 32*1024 + .long 32*1024 + +; 750 (ver 2.2) + + .align 2 + .long 0xFFFFFFFF ; Exact match + .short PROCESSOR_VERSION_750 + .short 0x4202 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL2 + .long init750 + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; 750CX (ver 2.x) + + .align 2 + .long 0xFFFF0F00 ; 2.x vers + .short PROCESSOR_VERSION_750 + .short 0x0200 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL2 + .long init750CX + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; 750 (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_750 + .short 0 + .long pfFloat | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pfL1i | pfL1d | pfL2 + .long init750 + .long CPU_SUBTYPE_POWERPC_750 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; 7400 (generic) + + .align 2 + .long 0xFFFF0000 ; All revisions + .short PROCESSOR_VERSION_7400 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfThermal | pfL1i | pfL1d | pfL1fa | pfL2 | pfL2fa + .long init7400 + .long CPU_SUBTYPE_POWERPC_7400 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; 7410 (ver 1.1) + + .align 2 + .long 0xFFFFFFFF ; Exact match + .short PROCESSOR_VERSION_7400 + .short 0x1101 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL1fa | pfL2 | pfL2fa + .long init7410 + .long CPU_SUBTYPE_POWERPC_7400 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; 7410 (generic) + + .align 2 + .long 0xFFFF0000 ; All other revisions + .short PROCESSOR_VERSION_7410 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfCanNap | pfCanDoze | pfL1i | pfL1d | pfL1fa | pfL2 | pfL2fa + .long init7410 + .long CPU_SUBTYPE_POWERPC_7400 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; 7450 (ver 1.xx) + + .align 2 + .long 0xFFFFFF00 ; Just revisions 1.xx + .short PROCESSOR_VERSION_7450 + .short 0x0100 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfL23lck | pfL1nnc | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa + .long init7450 + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; 7450 (>=2) + + .align 2 + .long 0xFFFF0000 ; All other revisions + .short PROCESSOR_VERSION_7450 + .short 0 + .long pfFloat | pfAltivec | pfSMPcap | pfCanSleep | pfNoMSRir | pfL23lck | pfL1i | pfL1d | pfL2 | pfL2fa | pfL2i | pfL3 | pfL3fa + .long init7450 + .long CPU_SUBTYPE_POWERPC_7450 + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 + +; Default dumb loser machine + + .align 2 + .long 0x00000000 ; Matches everything + .short 0 + .short 0 + .long pfFloat | pfL1i | pfL1d + .long 0 + .long CPU_SUBTYPE_POWERPC_ALL + .long 105 + .long 90 + .long 32 + .long 32*1024 + .long 32*1024 diff --git a/osfmk/ppc/status.c b/osfmk/ppc/status.c new file mode 100644 index 000000000..aed0b63e6 --- /dev/null +++ b/osfmk/ppc/status.c @@ -0,0 +1,988 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern unsigned int killprint; +extern double FloatInit; +extern unsigned long QNaNbarbarian[4]; +extern void thread_bootstrap_return(void); + + +struct ppc_saved_state * get_user_regs(thread_act_t); + +#define USRSTACK 0xc0000000 + +kern_return_t +thread_userstack( + thread_t, + int, + thread_state_t, + unsigned int, + vm_offset_t * +); + +kern_return_t +thread_entrypoint( + thread_t, + int, + thread_state_t, + unsigned int, + vm_offset_t * +); + +unsigned int get_msr_exportmask(void); +unsigned int get_msr_nbits(void); +unsigned int get_msr_rbits(void); +void thread_set_child(thread_act_t child, int pid); + +/* + * Maps state flavor to number of words in the state: + */ +unsigned int state_count[] = { + /* FLAVOR_LIST */ 0, + PPC_THREAD_STATE_COUNT, + PPC_FLOAT_STATE_COUNT, + PPC_EXCEPTION_STATE_COUNT, +}; + +/* + * thread_getstatus: + * + * Get the status of the specified thread. + */ + +kern_return_t +act_machine_get_state( + thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) +{ + + register struct savearea *sv; /* Pointer to the context savearea */ + int i, j; + unsigned int vrvalidwrk; + + register struct ppc_thread_state *ts; + register struct ppc_exception_state *es; + register struct ppc_float_state *fs; + register struct ppc_vector_state *vs; + +#if MACH_ASSERT + if (watchacts & WA_STATE) + printf("act_%x act_machine_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n", + current_act(), thr_act, flavor, tstate, + count, (count ? *count : 0)); +#endif /* MACH_ASSERT */ + + + switch (flavor) { + + case THREAD_STATE_FLAVOR_LIST: + + if (*count < 3) { + return (KERN_INVALID_ARGUMENT); + } + + tstate[0] = PPC_THREAD_STATE; + tstate[1] = PPC_FLOAT_STATE; + tstate[2] = PPC_EXCEPTION_STATE; + *count = 3; + + return KERN_SUCCESS; + + case PPC_THREAD_STATE: + + if (*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */ + return KERN_INVALID_ARGUMENT; + } + + ts = (struct ppc_thread_state *) tstate; + + sv = (savearea *)(thr_act->mact.pcb); /* Start with the normal savearea */ + while(sv) { /* Find the user context */ + if(sv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + sv = sv->save_prev; /* Back chain */ + } + + if(sv) { /* Is there a save area yet? */ + ts->r0 = sv->save_r0; + ts->r1 = sv->save_r1; + ts->r2 = sv->save_r2; + ts->r3 = sv->save_r3; + ts->r4 = sv->save_r4; + ts->r5 = sv->save_r5; + ts->r6 = sv->save_r6; + ts->r7 = sv->save_r7; + ts->r8 = sv->save_r8; + ts->r9 = sv->save_r9; + ts->r10 = sv->save_r10; + ts->r11 = sv->save_r11; + ts->r12 = sv->save_r12; + ts->r13 = sv->save_r13; + ts->r14 = sv->save_r14; + ts->r15 = sv->save_r15; + ts->r16 = sv->save_r16; + ts->r17 = sv->save_r17; + ts->r18 = sv->save_r18; + ts->r19 = sv->save_r19; + ts->r20 = sv->save_r20; + ts->r21 = sv->save_r21; + ts->r22 = sv->save_r22; + ts->r23 = sv->save_r23; + ts->r24 = sv->save_r24; + ts->r25 = sv->save_r25; + ts->r26 = sv->save_r26; + ts->r27 = sv->save_r27; + ts->r28 = sv->save_r28; + ts->r29 = sv->save_r29; + ts->r30 = sv->save_r30; + ts->r31 = sv->save_r31; + ts->cr = sv->save_cr; + ts->xer = sv->save_xer; + ts->lr = sv->save_lr; + ts->ctr = sv->save_ctr; + ts->srr0 = sv->save_srr0; + ts->srr1 = sv->save_srr1; + ts->mq = sv->save_mq; /* MQ register (601 only) */ + ts->vrsave = sv->save_vrsave; /* VRSAVE register (Altivec only) */ + } + else { /* No user state yet. Save seemingly random values. */ + + for(i=0; i < 32; i+=2) { /* Fill up with defaults */ + ((unsigned int *)&ts->r0)[i] = ((unsigned int *)&FloatInit)[0]; + ((unsigned int *)&ts->r0)[i+1] = ((unsigned int *)&FloatInit)[1]; + } + ts->cr = 0; + ts->xer = 0; + ts->lr = ((unsigned int *)&FloatInit)[0]; + ts->ctr = ((unsigned int *)&FloatInit)[1]; + ts->srr0 = ((unsigned int *)&FloatInit)[0]; + ts->srr1 = MSR_EXPORT_MASK_SET; + ts->mq = 0; + ts->vrsave = 0; /* VRSAVE register (Altivec only) */ + } + + *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */ + return KERN_SUCCESS; + + case PPC_EXCEPTION_STATE: + + if (*count < PPC_EXCEPTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + es = (struct ppc_exception_state *) tstate; + + sv = (savearea *)(thr_act->mact.pcb); /* Start with the normal savearea */ + while(sv) { /* Find the user context */ + if(sv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + sv = sv->save_prev; /* Back chain */ + } + + if(sv) { /* See if valid state yet */ + es->dar = sv->save_dar; + es->dsisr = sv->save_dsisr; + es->exception = sv->save_exception; + } + else { /* Nope, not yet */ + es->dar = 0; + es->dsisr = 0; + es->exception = ((unsigned int *)&FloatInit)[0]; + } + + *count = PPC_EXCEPTION_STATE_COUNT; + return KERN_SUCCESS; + + case PPC_FLOAT_STATE: + + if (*count < PPC_FLOAT_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + fpu_save(); /* Just in case it's live, save it */ + + fs = (struct ppc_float_state *) tstate; /* Point to destination */ + + sv = (savearea *)(thr_act->mact.FPU_pcb); /* Start with the top FPU savearea */ + while(sv) { /* Find the user context */ + if(!sv->save_level_fp) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + sv = sv->save_prev_float; /* Back chain */ + } + + if(sv) { /* See if we have any */ + bcopy((char *)&sv->save_fp0, (char *)fs, 33*8); /* 32 registers plus status and pad */ + } + else { /* No floating point yet */ + + for(i=0; i < 32; i++) { /* Initialize floating points */ + fs->fpregs[i] = FloatInit; /* Initial value */ + } + fs->fpscr_pad = 0; /* Initial value */ + fs->fpscr = 0; /* Initial value */ + } + + *count = PPC_FLOAT_STATE_COUNT; + + return KERN_SUCCESS; + + case PPC_VECTOR_STATE: + + if (*count < PPC_VECTOR_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + vec_save(); /* Just in case it's live, save it */ + + vs = (struct ppc_vector_state *) tstate; /* Point to destination */ + + sv = (savearea *)(thr_act->mact.VMX_pcb); /* Start with the top FPU savearea */ + while(sv) { /* Find the user context */ + if(!sv->save_level_vec) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + sv = sv->save_prev_vector; /* Back chain */ + } + + if(sv) { /* See if we have any */ + + vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */ + vs->save_vrvalid = sv->save_vrvalid; /* Set the valid flags */ + for(j=0; j < 4; j++) vs->save_vscr[j] = sv->save_vscr[j]; /* Set value for vscr */ + + for(i=0; i < 32; i++) { /* Copy the saved registers and invalidate the others */ + for(j=0; j < 4; j++) { + if(vrvalidwrk & 0x80000000) (vs->save_vr)[i][j] = + ((unsigned int *)&(sv->save_vr0))[(i * 4) + j]; /* We have this register saved */ + else vs->save_vr[i][j] = QNaNbarbarian[j]; /* Set invalid value */ + } + vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */ + } + } + else { /* No vector yet */ + + for(i=0; i < 32; i++) { /* Initialize vector registers */ + for(j=0; j < 4; j++) vs->save_vr[i][j] = QNaNbarbarian[j]; /* Initial value */ + } + for(j=0; j < 4; j++) vs->save_vscr[j] = 0; /* Initial value */ + vs->save_vrvalid = 0; /* Clear the valid flags */ + } + + for (i=0; i < 4; i++) vs->save_pad5[i] = 0; /* Clear cruft */ + for (i=0; i < 7; i++) vs->save_pad6[i] = 0; /* Clear cruft */ + + *count = PPC_VECTOR_STATE_COUNT; + return KERN_SUCCESS; + + default: + return KERN_INVALID_ARGUMENT; + } +} + + +/* + * thread_setstatus: + * + * Set the status of the specified thread. + */ +kern_return_t +act_machine_set_state( + thread_act_t thr_act, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count) +{ + + savearea *sv, *osv, *usv, *ssv; + unsigned int spc, i, *srs, isnew, clgn; + register struct ppc_thread_state *ts; + register struct ppc_exception_state *es; + register struct ppc_float_state *fs; + register struct ppc_vector_state *vs; + spl_t spl; + + int kernel_act = thr_act->kernel_loading || thr_act->kernel_loaded; + +#if MACH_ASSERT + if (watchacts & WA_STATE) + printf("act_%x act_machine_set_state(thr_act=%x,flav=%x,st=%x,cnt=%x)\n", + current_act(), thr_act, flavor, tstate, count); +#endif /* MACH_ASSERT */ + +// dbgTrace((unsigned int)thr_act, (unsigned int)sv, flavor); /* (TEST/DEBUG) */ + + clgn = count; /* Get the count */ + + switch (flavor) { /* Validate the count before we do anything else */ + case PPC_THREAD_STATE: + + if (clgn < PPC_THREAD_STATE_COUNT) { /* Is it too short? */ + return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ + } + + if(clgn > PPC_THREAD_STATE_COUNT) clgn = PPC_THREAD_STATE_COUNT; /* If too long, pin it at max */ + break; + + case PPC_EXCEPTION_STATE: + + if (clgn < PPC_EXCEPTION_STATE_COUNT) { /* Is it too short? */ + return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ + } + + if(clgn > PPC_EXCEPTION_STATE_COUNT) clgn = PPC_EXCEPTION_STATE_COUNT; /* If too long, pin it at max */ + break; + + case PPC_FLOAT_STATE: + + if (clgn < PPC_FLOAT_STATE_COUNT) { /* Is it too short? */ + return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ + } + + if(clgn > PPC_FLOAT_STATE_COUNT) clgn = PPC_FLOAT_STATE_COUNT; /* If too long, pin it at max */ + break; + + + case PPC_VECTOR_STATE: + + if (clgn < PPC_VECTOR_STATE_COUNT) { /* Is it too short? */ + return KERN_INVALID_ARGUMENT; /* Yeah, just leave... */ + } + + if(clgn > PPC_VECTOR_STATE_COUNT) clgn = PPC_VECTOR_STATE_COUNT; /* If too long, pin it at max */ + break; + + default: + return KERN_INVALID_ARGUMENT; + } + + isnew = 0; /* Remember when we make a new one */ + + switch (flavor) { + + case PPC_THREAD_STATE: + case PPC_EXCEPTION_STATE: + + ts = (struct ppc_thread_state *)tstate; + + sv = (savearea *)thr_act->mact.pcb; /* Get the top savearea on the stack */ + osv = 0; /* Set no user savearea yet */ + + while(sv) { /* Find the user context */ + if(sv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + osv = sv; /* Save the last one */ + sv = sv->save_prev; /* Get the previous context */ + } + + if(!sv) { /* We didn't find a user context so allocate and initialize one */ + isnew = 1; /* Remember we made a new one */ + sv = save_alloc(); /* Get one */ + sv->save_act = thr_act; /* Point to the activation */ + sv->save_flags |= SAVattach; /* Say that it is in use */ + sv->save_srr1 = MSR_EXPORT_MASK_SET & ~MASK(MSR_PR); /* Assume kernel state */ + sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ + sv->save_xfpscr = 0; /* Start with a clear fpscr */ + + spc = (unsigned int)thr_act->map->pmap->space; /* Get the space we're in */ + + srs = (unsigned int *)&sv->save_sr0; /* Point to the SRs */ + for(i=0; i < 16; i++) { /* Fill in the SRs for the new context */ + srs[i] = SEG_REG_PROT | (i<<20) | spc; /* Set the SR */ + } + + sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | spc; /* Make sure the copyin is set */ + + if(osv) { /* Did we already have one? */ + osv->save_prev = sv; /* Chain us on the end */ + } + else { /* We are the first */ + thr_act->mact.pcb = (pcb_t)sv; /* Put it there */ + } + sv->save_prev = 0; /* Properly terminate the chain */ + + } + + if(flavor == PPC_THREAD_STATE) { /* Are we updating plain state? */ + + sv->save_r0 = ts->r0; + sv->save_r1 = ts->r1; + sv->save_r2 = ts->r2; + sv->save_r3 = ts->r3; + sv->save_r4 = ts->r4; + sv->save_r5 = ts->r5; + sv->save_r6 = ts->r6; + sv->save_r7 = ts->r7; + sv->save_r8 = ts->r8; + sv->save_r9 = ts->r9; + sv->save_r10 = ts->r10; + sv->save_r11 = ts->r11; + sv->save_r12 = ts->r12; + sv->save_r13 = ts->r13; + sv->save_r14 = ts->r14; + sv->save_r15 = ts->r15; + sv->save_r16 = ts->r16; + sv->save_r17 = ts->r17; + sv->save_r18 = ts->r18; + sv->save_r19 = ts->r19; + sv->save_r20 = ts->r20; + sv->save_r21 = ts->r21; + sv->save_r22 = ts->r22; + sv->save_r23 = ts->r23; + sv->save_r24 = ts->r24; + sv->save_r25 = ts->r25; + sv->save_r26 = ts->r26; + sv->save_r27 = ts->r27; + sv->save_r28 = ts->r28; + sv->save_r29 = ts->r29; + sv->save_r30 = ts->r30; + sv->save_r31 = ts->r31; + + sv->save_cr = ts->cr; + sv->save_xer = ts->xer; + sv->save_lr = ts->lr; + sv->save_ctr = ts->ctr; + sv->save_srr0 = ts->srr0; + sv->save_mq = ts->mq; + sv->save_vrsave = ts->vrsave; /* VRSAVE register (Altivec only) */ + + sv->save_srr1 = MSR_PREPARE_FOR_IMPORT(sv->save_srr1, ts->srr1); /* Set the bits we can change */ + + if(!kernel_act) sv->save_srr1 |= MSR_EXPORT_MASK_SET; /* If not a kernel guy, force the magic bits on */ + + sv->save_srr1 &= ~(MASK(MSR_FP) | MASK(MSR_FP)); /* Make sure we don't enable the floating point unit */ + + if(isnew) { /* Is it a new one? */ + sv->save_dar = 0; /* Yes, these need initialization also */ + sv->save_dsisr = 0; + sv->save_exception = 0; + } + + return KERN_SUCCESS; + } + else { /* This must be exception state */ + if(isnew) /* If new, we need to initialize the normal registers */ + for(i=0; i < 32; i+=2) { /* Fill up with defaults */ + ((unsigned int *)&sv->save_r0)[i] = ((unsigned int *)&FloatInit)[0]; + ((unsigned int *)&sv->save_r0)[i+1] = ((unsigned int *)&FloatInit)[1]; + } + sv->save_cr = 0; + sv->save_xer = 0; + sv->save_lr = ((unsigned int *)&FloatInit)[0]; + sv->save_ctr = ((unsigned int *)&FloatInit)[1]; + sv->save_srr0 = ((unsigned int *)&FloatInit)[0]; + sv->save_srr1 = MSR_EXPORT_MASK_SET; + sv->save_mq = 0; + sv->save_vrsave = 0; /* VRSAVE register (Altivec only) */ + } + + es = (struct ppc_exception_state *) tstate; + + sv->save_dar = es->dar; + sv->save_dsisr = es->dsisr; + sv->save_exception = es->exception; + + return KERN_SUCCESS; + + case PPC_FLOAT_STATE: + + spl = splhigh(); /* Don't bother me while I'm zapping the owner stuff */ + + if (per_proc_info[cpu_number()].FPU_thread == (unsigned int)thr_act) /* If we own the FPU, and */ + if(!thr_act->mact.FPU_lvl) per_proc_info[cpu_number()].FPU_thread = 0; /* it's user level, say we don't own it any more */ + + splx(spl); /* Restore the interrupt level */ + + sv = (savearea *)thr_act->mact.FPU_pcb; /* Get the top savearea on the stack */ + osv = 0; /* Set no user savearea yet */ + + while(sv) { /* Find the user context */ + if(!(sv->save_level_fp)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + osv = sv; /* Save the last one */ + sv = sv->save_prev_float; /* Get the previous context */ + } + + if(!sv) { /* We didn't find a user context so allocate and initialize one */ + + sv = (savearea *)thr_act->mact.pcb; /* Point to the top savearea on the normal stack */ + + while(sv) { /* Have we hit the end? */ + if(!(sv->save_flags & SAVfpuvalid)) break; /* Is floating point in use here? */ + sv = sv->save_prev; /* Back chain */ + } + + if(!sv) { /* If there wasn't one on the normal chain, check vector */ + sv = (savearea *)thr_act->mact.VMX_pcb; /* Point to the top savearea on the vector stack */ + while(sv) { /* Have we hit the end? */ + if(!(sv->save_flags & SAVfpuvalid)) break; /* Is floating point in use here? */ + sv = sv->save_prev_vector; /* Back chain */ + } + } + + if(!sv) { /* Do we have one yet? */ + sv = save_alloc(); /* If we still don't have one, get a new one */ + sv->save_act = thr_act; /* Point to the activation */ + + spc=(unsigned int)thr_act->map->pmap->space; /* Get the space we're in */ + + srs=(unsigned int *)&sv->save_sr0; /* Point to the SRs */ + for(i=0; i < 16; i++) { /* Fill in the SRs for the new context */ + srs[i] = SEG_REG_PROT | (i<<20) | spc; /* Set the SR */ + } + + sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | spc; /* Make sure the copyin is set */ + } + + if(osv) { /* Did we already have one? */ + osv->save_prev_float = sv; /* Chain us on the end */ + } + else { /* We are the first */ + thr_act->mact.FPU_pcb = (pcb_t)sv; /* Put it there */ + } + sv->save_prev_float = 0; /* Properly terminate the chain */ + sv->save_level_fp = 0; /* Make sure we are for the user level */ + sv->save_flags |= SAVfpuvalid; /* Say that it is in use by floating point */ + } + + fs = (struct ppc_float_state *) tstate; /* Point to source */ + + + bcopy((char *)fs, (char *)&sv->save_fp0, clgn*4); /* 32 registers plus status and pad */ + + usv = find_user_regs(thr_act); /* Find the user registers */ + if(!usv) usv = get_user_regs(thr_act); /* Didn't find any, allocate and initialize one */ + + usv->save_xfpscrpad = sv->save_fpscr_pad; /* Copy the pad value to normal */ + usv->save_xfpscr = sv->save_fpscr; /* Copy the fpscr value to normal */ + + + return KERN_SUCCESS; + + + case PPC_VECTOR_STATE: + + spl = splhigh(); /* Don't bother me while I'm zapping the owner stuff */ + + if (per_proc_info[cpu_number()].VMX_thread == (unsigned int)thr_act) /* If we own the vector, and */ + if(!thr_act->mact.VMX_lvl) per_proc_info[cpu_number()].VMX_thread = 0; /* it's user level, say we don't own it any more */ + + splx(spl); /* Restore the interrupt level */ + + sv = (savearea *)thr_act->mact.VMX_pcb; /* Get the top savearea on the stack */ + osv = 0; /* Set no user savearea yet */ + + while(sv) { /* Find the user context */ + if(!(sv->save_level_vec)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + osv = sv; /* Save the last one */ + sv = sv->save_prev_vector; /* Get the previous context */ + } + + if(!sv) { /* We didn't find a user context so allocate and initialize one */ + + sv = (savearea *)thr_act->mact.pcb; /* Point to the top savearea on the normal stack */ + + while(sv) { /* Have we hit the end? */ + if(!(sv->save_flags & SAVvmxvalid)) break; /* Is vector in use here? */ + sv = sv->save_prev; /* Back chain */ + } + + if(!sv) { /* If there wasn't one on the normal chain, check vector */ + sv = (savearea *)thr_act->mact.FPU_pcb; /* Point to the top savearea on the FPU stack */ + while(sv) { /* Have we hit the end? */ + if(!(sv->save_flags & SAVvmxvalid)) break; /* Is vector in use here? */ + sv = sv->save_prev_float; /* Get the previous context */ + } + } + + if(!sv) { /* Do we have one yet? */ + sv = save_alloc(); /* If we still don't have one, get a new one */ + sv->save_act = thr_act; /* Point to the activation */ + + spc=(unsigned int)thr_act->map->pmap->space; /* Get the space we're in */ + + srs=(unsigned int *)&sv->save_sr0; /* Point to the SRs */ + for(i=0; i < 16; i++) { /* Fill in the SRs for the new context */ + srs[i] = SEG_REG_PROT | (i<<20) | spc; /* Set the SR */ + } + + sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | spc; /* Make sure the copyin is set */ + } + + if(osv) { /* Did we already have one? */ + osv->save_prev_vector = sv; /* Chain us on the end */ + } + else { /* We are the first */ + thr_act->mact.VMX_pcb = (pcb_t)sv; /* Put it there */ + } + sv->save_prev_vector = 0; /* Properly terminate the chain */ + sv->save_level_vec = 0; /* Make sure we are for the user level */ + sv->save_flags |= SAVvmxvalid; /* Say that it is in use by vector */ + } + + + vs = (struct ppc_vector_state *) tstate; /* Point to source */ + + bcopy((char *)vs, (char *)&sv->save_vr0, clgn*4); /* 32 registers plus status and validity and pad */ + + return KERN_SUCCESS; + + + default: + return KERN_INVALID_ARGUMENT; + } +} + +/* + * Duplicates the context of one thread into a new one. + * The new thread is assumed to be new and have no user state contexts. + * We also assume that the old thread can't be running anywhere. + * + * We're only going to be duplicating user context here. That means that we will have to + * eliminate any floating point or vector kernel contexts and carry across the user state ones. + * We will optimize and cram all states into one savearea. Actually that will be the easiest thing + * to do. + */ + +void act_thread_dup(thread_act_t old, thread_act_t new) { + + savearea *sv, *osv, *fsv; + unsigned int spc, i, *srs; + + fpu_save(); /* Make certain floating point state is all saved */ + vec_save(); /* Make certain the vector state is all saved */ + + osv = (savearea *)new->mact.pcb; /* Get the top savearea on the stack */ + sv = 0; /* Set no new user savearea yet */ + + while(osv) { /* Find the user context */ + if(osv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ + sv=osv; /* Say which to use */ + break; /* Outta here */ + } + osv=osv->save_prev; /* Get the previous context */ + } + + if(!sv) { /* We didn't find a user context so allocate and initialize one */ + osv = (savearea *)new->mact.pcb; /* Point to the top savearea on the stack */ + sv = save_alloc(); /* Get one */ + sv->save_flags |= SAVattach; /* Say that it is in use */ + sv->save_act = new; /* Point to the activation */ + + spc=(unsigned int)new->map->pmap->space; /* Get the space we're in */ + + srs=(unsigned int *)&sv->save_sr0; /* Point to the SRs */ + for(i=0; i < 16; i++) { /* Fill in the SRs for the new context */ + srs[i] = SEG_REG_PROT | (i<<20) | spc; /* Set the SR */ + } + + sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | spc; /* Make sure the copyin is set */ + + if(osv) { /* Did we already have one? */ + sv->save_prev = osv->save_prev; /* Move the back chain of the top savearea */ + osv->save_prev = sv; /* Chain us just after it */ + } + else { /* We are the first */ + new->mact.pcb = (pcb_t)sv; /* Make it the active one */ + } + + } + + osv = (savearea *)(old->mact.pcb); /* Start with the normal savearea */ + while(osv) { /* Find the user context */ + if(osv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + osv = osv->save_prev; /* Back chain */ + } + + bcopy((char *)&osv->save_srr0, (char *)&sv->save_srr0, sizeof(struct ppc_thread_state)); /* Copy in normal state stuff */ + + sv->save_xfpscrpad = osv->save_xfpscrpad; /* Copy the pad value to old */ + sv->save_xfpscr = osv->save_xfpscr; /* Copy the fpscr value to old */ + + new->mact.FPU_pcb = (pcb_t)0 ; /* Initialize floating point savearea */ + new->mact.FPU_lvl = (pcb_t)0 ; /* Initialize floating point level */ + new->mact.FPU_cpu = 0 ; /* Initialize last used cpu (FP not live, so this doesn't really matter) */ + new->mact.VMX_pcb = (pcb_t)0 ; /* Initialize vector savearea */ + new->mact.VMX_lvl = (pcb_t)0 ; /* Initialize vector level */ + new->mact.VMX_cpu = 0 ; /* Initialize last used cpu (vector not live, so this doesn't reall matter) */ + + sv->save_prev_float = (savearea *)0; /* Clear the back chain */ + sv->save_prev_vector = (savearea *)0; /* Clear the back chain */ + + sv->save_srr1 &= ~(MASK(MSR_FP) | MASK(MSR_VEC)); /* Make certain that floating point and vector are turned off */ + + fsv = (savearea *)old->mact.FPU_pcb; /* Get the start of the floating point chain */ + while(fsv) { /* Look until the end or we find it */ + if(!(fsv->save_level_fp)) { /* Is the the user state stuff? (the level is 0 if so) */ + sv->save_flags |= SAVfpuvalid; /* Show we have it */ + bcopy((char *)&osv->save_fp0, (char *)&sv->save_fp0, sizeof(struct ppc_float_state)); /* Copy in floating point state stuff */ + new->mact.FPU_pcb = (pcb_t)sv; /* Make it the active one */ + break; /* Done, everything else is all set up... */ + } + fsv = fsv->save_prev_float; /* Try the previous one */ + } + + fsv = (savearea *)old->mact.VMX_pcb; /* Get the start of the vector chain */ + while(fsv) { /* Look until the end or we find it */ + if(!(fsv->save_level_vec)) { /* Is the the user state stuff? (the level is 0 if so) */ + sv->save_flags |= SAVvmxvalid; /* Show we have it */ + bcopy((char *)&osv->save_vr0, (char *)&sv->save_vr0, sizeof(struct ppc_vector_state)); /* Copy in Altivec state stuff */ + new->mact.VMX_pcb = (pcb_t)sv; /* Make it the active one */ + break; /* Done, everything else is all set up... */ + } + fsv = fsv->save_prev_vector; /* Try the previous one */ + } + + return; /* Bye bye... */ +} + +/* + * Initializes a fresh set of user state values. If there is no user state context, + * one is created. Floats and VMX are not created. We set initial values for everything. + */ + +struct ppc_saved_state * get_user_regs(thread_act_t act) { + + savearea *sv, *osv; + unsigned int spc, i, *srs; + + sv = (savearea *)act->mact.pcb; /* Get the top savearea on the stack */ + osv = 0; /* Set no user savearea yet */ + + while(sv) { /* Find the user context */ + if(sv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + osv = sv; /* Save the last one */ + sv = sv->save_prev; /* Get the previous context */ + } + + if(!sv) { /* We didn't find a user context so allocate and initialize one */ + sv = save_alloc(); /* Get one */ + sv->save_flags |= SAVattach; /* Say that it is in use */ + sv->save_act = act; /* Point to the activation */ + + if(osv) { /* Did we already have one? */ + osv->save_prev = sv; /* Chain us on the end */ + } + else { /* We are the first */ + act->mact.pcb = (pcb_t)sv; /* Put it there */ + } + sv->save_prev = 0; /* Properly terminate the chain */ + } + + for(i=0; i < 32; i+=2) { /* Fill up with defaults */ + ((unsigned int *)&sv->save_r0)[i] = ((unsigned int *)&FloatInit)[0]; + ((unsigned int *)&sv->save_r0)[i+1] = ((unsigned int *)&FloatInit)[1]; + } + sv->save_cr = 0; + sv->save_xer = 0; + sv->save_lr = ((unsigned int *)&FloatInit)[0]; + sv->save_ctr = ((unsigned int *)&FloatInit)[1]; + sv->save_srr0 = ((unsigned int *)&FloatInit)[0]; + sv->save_srr1 = MSR_EXPORT_MASK_SET; + sv->save_mq = 0; + sv->save_vrsave = 0; /* VRSAVE register (Altivec only) */ + sv->save_xfpscrpad = 0; /* Start with a clear fpscr */ + sv->save_xfpscr = 0; /* Start with a clear fpscr */ + + spc=(unsigned int)act->map->pmap->space; /* Get the space we're in */ + + srs=(unsigned int *)&sv->save_sr0; /* Point to the SRs */ + for(i=0; i < 16; i++) { /* Fill in the SRs for the new context */ + srs[i] = SEG_REG_PROT | (i<<20) | spc; /* Set the SR */ + } + + sv->save_sr_copyin = SEG_REG_PROT | (SR_COPYIN_NUM<<20) | spc; /* Make sure the copyin is set */ + + return (struct ppc_saved_state *)sv; /* Bye bye... */ +} + +/* + * Find the user state context. If there is no user state context, + * we just return a 0. + */ + +struct ppc_saved_state * find_user_regs(thread_act_t act) { + + savearea *sv; + + sv = (savearea *)act->mact.pcb; /* Get the top savearea on the stack */ + + while(sv) { /* Find the user context */ + if(sv->save_srr1 & MASK(MSR_PR)) { /* Are we looking at the user context? */ + break; /* Outta here */ + } + sv = sv->save_prev; /* Get the previous context */ + } + + return (struct ppc_saved_state *)sv; /* Bye bye... */ +} + +/* + * Find the user state floating pointcontext. If there is no user state context, + * we just return a 0. + */ + +struct ppc_float_state * find_user_fpu(thread_act_t act) { + + savearea *fsv; + + fsv = (savearea *)act->mact.FPU_pcb; /* Get the start of the floating point chain */ + while(fsv) { /* Look until the end or we find it */ + if(!(fsv->save_level_fp)) break; /* Is the the user state stuff? (the level is 0 if so) */ + fsv = fsv->save_prev_float; /* Try the previous one */ + } + + return (struct ppc_float_state *)&(fsv->save_fp0); /* Bye bye... */ +} + +/* + * thread_userstack: + * + * Return the user stack pointer from the machine + * dependent thread state info. + */ +kern_return_t +thread_userstack( + thread_t thread, + int flavor, + thread_state_t tstate, + unsigned int count, + vm_offset_t *user_stack +) +{ + struct ppc_thread_state *state; + + /* + * Set a default. + */ + if (*user_stack == 0) + *user_stack = USRSTACK; + + switch (flavor) { + case PPC_THREAD_STATE: + if (count < PPC_THREAD_STATE_COUNT) + return (KERN_INVALID_ARGUMENT); + + state = (struct ppc_thread_state *) tstate; + + /* + * If a valid user stack is specified, use it. + */ + *user_stack = state->r1 ? state->r1: USRSTACK; + break; + default : + return (KERN_INVALID_ARGUMENT); + } + + return (KERN_SUCCESS); +} + +kern_return_t +thread_entrypoint( + thread_t thread, + int flavor, + thread_state_t tstate, + unsigned int count, + vm_offset_t *entry_point +) +{ + struct ppc_thread_state *state; + + /* + * Set a default. + */ + if (*entry_point == 0) + *entry_point = VM_MIN_ADDRESS; + + switch (flavor) { + + case PPC_THREAD_STATE: + if (count < PPC_THREAD_STATE_COUNT) + return (KERN_INVALID_ARGUMENT); + + state = (struct ppc_thread_state *) tstate; + + /* + * If a valid entry point is specified, use it. + */ + *entry_point = state->srr0 ? state->srr0: VM_MIN_ADDRESS; + break; + default: + return (KERN_INVALID_ARGUMENT); + } + + return (KERN_SUCCESS); +} + +unsigned int get_msr_exportmask(void) +{ + return (MSR_EXPORT_MASK_SET); +} + +unsigned int get_msr_nbits(void) +{ + return (MASK(MSR_POW)|MASK(MSR_ILE)|MASK(MSR_IP)|MASK(MSR_LE)); +} +unsigned int get_msr_rbits(void) +{ + return (MASK(MSR_PR)|MASK(MSR_ME)|MASK(MSR_IR)|MASK(MSR_DR)|MASK(MSR_EE)); +} + +void thread_set_child(thread_act_t child, int pid) +{ + struct ppc_saved_state *child_state; + + child_state = find_user_regs(child); + + child_state->r3 = pid; + child_state->r4 = 1; +} diff --git a/osfmk/ppc/stubs.c b/osfmk/ppc/stubs.c new file mode 100644 index 000000000..709f8b6e3 --- /dev/null +++ b/osfmk/ppc/stubs.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ + +/* TODO NMGS REMOVE ALL OF THESE AND THEN THIS FILE !!! */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int +procitab(u_int spllvl, + void (*handler)(int), + int unit) +{ + printf("NMGS TODO NOT YET"); + return 0; +} diff --git a/osfmk/ppc/task.h b/osfmk/ppc/task.h new file mode 100644 index 000000000..3d5259fac --- /dev/null +++ b/osfmk/ppc/task.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +/* + * No machine dependant task fields + */ + +#define MACHINE_TASK + diff --git a/osfmk/ppc/testjump.c b/osfmk/ppc/testjump.c new file mode 100644 index 000000000..5debfb6dc --- /dev/null +++ b/osfmk/ppc/testjump.c @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ + +#define VERBOSE 0 + +#include + +int recursed(jmp_buf_t *bufp, int retval, int depth) +{ + int mumbojumbo[16]; + int i; + +#if VERBOSE + for (i=0;i + +/* + * Return address of the function that called current function, given + * address of the first parameter of current function. We can't + * do it this way, since parameter was copied from a register + * into a local variable. Call an assembly sub-function to + * return this. + */ + +extern vm_offset_t getrpc(void); +#define GET_RETURN_PC(addr) getrpc() + +#define STACK_IKS(stack) \ + ((vm_offset_t)(((vm_offset_t)stack)+KERNEL_STACK_SIZE)-FM_SIZE) + +#define syscall_emulation_sync(task) /* do nothing */ + +/* + * Defining this indicates that MD code will supply an exception() + * routine, conformant with kern/exception.c (dependency alert!) + * but which does wonderfully fast, machine-dependent magic. + */ + +#define MACHINE_FAST_EXCEPTION 1 + +#endif /* _PPC_THREAD_H_ */ + + diff --git a/osfmk/ppc/thread_act.h b/osfmk/ppc/thread_act.h new file mode 100644 index 000000000..30bf02c8e --- /dev/null +++ b/osfmk/ppc/thread_act.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _PPC_THREAD_ACT_H_ +#define _PPC_THREAD_ACT_H_ + +#include +#include +#include +#include +#include +#include + + +/* + * Kernel state structure + * + * This holds the kernel state that is saved and restored across context + * switches. This is kept at the top of the kernel stack. + * + * XXX Some state is saved only because it is not saved on entry to the + * kernel from user mode. This needs to be straightened out. + */ + +/* + * PPC process control block + * + * In the continuation model, the PCB holds the user context. It is used + * on entry to the kernel from user mode, either by system call or trap, + * to store the necessary user registers and other state. + * + * Note that this structure overlays a savearea. Make sure that these + * guys are updated in concert with that. + */ +struct pcb +{ + struct ppc_saved_state ss; + struct ppc_exception_state es; + struct ppc_float_state fs; + unsigned int gas1[6]; /* Force alignment with savearea */ + struct ppc_vector_state vec; + +}; + +typedef struct pcb *pcb_t; + +/* + * Maps state flavor to number of words in the state: + */ +extern unsigned int state_count[]; + +#define USER_REGS(ThrAct) (&(ThrAct)->mact.pcb->ss) + +#define user_pc(ThrAct) ((ThrAct)->mact.pcb->ss.srr0) + +#define act_machine_state_ptr(ThrAct) (thread_state_t)USER_REGS(ThrAct) + +typedef struct MachineThrAct { + /* + * pointer to process control block control blocks. Potentially + * one for each active facility context. They may point to the + * same saveareas. + */ + pcb_t pcb; /* The "normal" savearea */ + pcb_t FPU_pcb; /* The floating point savearea */ + pcb_t FPU_lvl; /* The floating point context level */ + unsigned int FPU_cpu; /* The last processor to enable floating point */ + pcb_t VMX_pcb; /* The VMX savearea */ + pcb_t VMX_lvl; /* The VMX context level */ + unsigned int VMX_cpu; /* The last processor to enable vector */ + struct vmmCntrlEntry *vmmCEntry; /* Pointer current emulation context or 0 */ + struct vmmCntrlTable *vmmControl; /* Pointer to virtual machine monitor control table */ + AbsoluteTime qactTimer; /* Time thread needs to interrupt. This is a single-shot timer. Zero is unset */ + unsigned int ksp; /* points to TOP OF STACK or zero */ + unsigned int bbDescAddr; /* Points to Blue Box Trap descriptor area in kernel (page aligned) */ + unsigned int bbUserDA; /* Points to Blue Box Trap descriptor area in user (page aligned) */ + unsigned int bbTableStart; /* Points to Blue Box Trap dispatch area in user */ + unsigned int bbPendRupt; /* Number of pending Blue Box interruptions */ + unsigned int bbTaskID; /* Opaque task ID for Blue Box threads */ + unsigned int bbTaskEnv; /* Opaque task data reference for Blue Box threads */ + unsigned int specFlags; /* Special flags */ + +/* special flags bits */ + +#define ignoreZeroFaultbit 0 +#define floatUsedbit 1 +#define vectorUsedbit 2 +#define bbNoMachSCbit 3 +#define runningVMbit 4 +#define floatCngbit 5 +#define vectorCngbit 6 +#define timerPopbit 7 + +#define ignoreZeroFault (1<<(31-ignoreZeroFaultbit)) +#define floatUsed (1<<(31-floatUsedbit)) +#define vectorUsed (1<<(31-vectorUsedbit)) +#define bbNoMachSC (1<<(31-bbNoMachSCbit)) +#define runningVM (1<<(31-runningVMbit)) +#define floatCng (1<<(31-floatCngbit)) +#define vectorCng (1<<(31-vectorCngbit)) +#define timerPop (1<<(31-timerPopbit)) + +#ifdef MACH_BSD + unsigned long cthread_self; /* for use of cthread package */ +#endif + +} MachineThrAct, *MachineThrAct_t; + +extern struct ppc_saved_state * find_user_regs(thread_act_t act); +extern struct ppc_float_state * find_user_fpu(thread_act_t act); + +#endif /* _PPC_THREAD_ACT_H_ */ diff --git a/osfmk/ppc/trap.c b/osfmk/ppc/trap.c new file mode 100644 index 000000000..3b0fbddb8 --- /dev/null +++ b/osfmk/ppc/trap.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* For kernel_map */ +#include +#include +#include +#include /* for SR_xxx definitions */ +#include +#include +#include + +#include + +#if MACH_KDB +#include +#include +#include +#include + +boolean_t let_ddb_vm_fault = FALSE; +boolean_t debug_all_traps_with_kdb = FALSE; +extern struct db_watchpoint *db_watchpoint_list; +extern boolean_t db_watchpoints_inserted; +extern boolean_t db_breakpoints_inserted; + + + +#endif /* MACH_KDB */ + +extern int debugger_active[NCPUS]; +extern vm_address_t bsd_init_task; +extern char init_task_failure_data[]; + +/* + * XXX don't pass VM_PROT_EXECUTE to vm_fault(), execute permission is implied + * in either R or RW (note: the pmap module knows this). This is done for the + * benefit of programs that execute out of their data space (ala lisp). + * If we didn't do this in that scenerio, the ITLB miss code would call us + * and we would call vm_fault() with RX permission. However, the space was + * probably vm_allocate()ed with just RW and vm_fault would fail. The "right" + * solution to me is to have the un*x server always allocate data with RWX for + * compatibility with existing binaries. + */ + +#define PROT_EXEC (VM_PROT_READ) +#define PROT_RO (VM_PROT_READ) +#define PROT_RW (VM_PROT_READ|VM_PROT_WRITE) + +/* A useful macro to update the ppc_exception_state in the PCB + * before calling doexception + */ +#define UPDATE_PPC_EXCEPTION_STATE { \ + thread_act_t thr_act = current_act(); \ + struct ppc_exception_state *es = &thr_act->mact.pcb->es; \ + es->dar = dar; \ + es->dsisr = dsisr; \ + es->exception = trapno / T_VECTOR_SIZE; /* back to powerpc */ \ +} + +static void unresolved_kernel_trap(int trapno, + struct ppc_saved_state *ssp, + unsigned int dsisr, + unsigned int dar, + char *message); + +struct ppc_saved_state *trap(int trapno, + struct ppc_saved_state *ssp, + unsigned int dsisr, + unsigned int dar) +{ + int exception=0; + int code; + int subcode; + vm_map_t map; + unsigned int sp; + unsigned int space,space2; + unsigned int offset; + thread_act_t thr_act = current_act(); + boolean_t intr; +#ifdef MACH_BSD + time_value_t tv; +#endif /* MACH_BSD */ + +/* + * Remember that we are disabled for interruptions when we come in here. Because + * of latency concerns, we need to enable interruptions in the interrupted process + * was enabled itself as soon as we can. + */ + + intr = (ssp->srr1 & MASK(MSR_EE)) != 0; /* Remember if we were enabled */ + + /* Handle kernel traps first */ + + if (!USER_MODE(ssp->srr1)) { + /* + * Trap came from kernel + */ + switch (trapno) { + + case T_PREEMPT: /* Handle a preempt trap */ + ast_taken(FALSE, AST_URGENT, FALSE); + break; + + case T_RESET: /* Reset interruption */ +#if 0 + kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n", + ssp->srr0, ssp->srr1); +#else + panic("Unexpected Reset exception; srr0 = %08X, srr1 = %08X\n", + ssp->srr0, ssp->srr1); +#endif + break; /* We just ignore these */ + + /* + * These trap types should never be seen by trap() + * in kernel mode, anyway. + * Some are interrupts that should be seen by + * interrupt() others just don't happen because they + * are handled elsewhere. Some could happen but are + * considered to be fatal in kernel mode. + */ + case T_DECREMENTER: + case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */ + case T_MACHINE_CHECK: + case T_SYSTEM_MANAGEMENT: + case T_ALTIVEC_ASSIST: + case T_INTERRUPT: + case T_FP_UNAVAILABLE: + case T_IO_ERROR: + case T_RESERVED: + default: + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + break; + + case T_TRACE: + case T_RUNMODE_TRACE: + case T_INSTRUCTION_BKPT: + if (!Call_Debugger(trapno, ssp)) + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + break; + + case T_PROGRAM: + if (ssp->srr1 & MASK(SRR1_PRG_TRAP)) { + if (!Call_Debugger(trapno, ssp)) + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + } else { + unresolved_kernel_trap(trapno, ssp, + dsisr, dar, NULL); + } + break; + + case T_ALIGNMENT: + if (alignment(dsisr, dar, ssp)) { + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + } + break; + + case T_DATA_ACCESS: + +#if MACH_KDB + mp_disable_preemption(); + if (debug_mode + && debugger_active[cpu_number()] + && !let_ddb_vm_fault) { + /* + * Force kdb to handle this one. + */ + kdb_trap(trapno, ssp); + } + mp_enable_preemption(); +#endif /* MACH_KDB */ + + if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */ + + /* simple case : not SR_COPYIN segment, from kernel */ + if ((dar >> 28) != SR_COPYIN_NUM) { + map = kernel_map; + + offset = dar; + + +/* + * Note: Some ROM device drivers will access page 0 when they start. The IOKit will + * set a flag to tell us to ignore any access fault on page 0. After the driver is + * opened, it will clear the flag. + */ + if((0 == (dar & -PAGE_SIZE)) && /* Check for access of page 0 and */ + ((thr_act->mact.specFlags) & ignoreZeroFault)) { + /* special case of ignoring page zero faults */ + ssp->srr0 += 4; /* Point to next instruction */ + break; + } + + code = vm_fault(map, trunc_page(offset), + dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, + FALSE, THREAD_UNINT); + + if (code != KERN_SUCCESS) { + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + } else { + ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */ + ((savearea *)ssp)->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + } + break; + } + + /* If we get here, the fault was due to a copyin/out */ + + map = thr_act->map; + + /* Mask out SR_COPYIN and mask in original segment */ + + offset = (dar & 0x0fffffff) | + ((mfsrin(dar)<<8) & 0xF0000000); + + code = vm_fault(map, trunc_page(offset), + dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, + FALSE, THREAD_ABORTSAFE); + + /* If we failed, there should be a recovery + * spot to rfi to. + */ + if (code != KERN_SUCCESS) { + + if (thr_act->thread->recover) { + + act_lock_thread(thr_act); + ssp->srr0 = thr_act->thread->recover; + thr_act->thread->recover = + (vm_offset_t)NULL; + act_unlock_thread(thr_act); + } else { + unresolved_kernel_trap(trapno, ssp, dsisr, dar, "copyin/out has no recovery point"); + } + } + else { + ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */ + ((savearea *)ssp)->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + } + + break; + + case T_INSTRUCTION_ACCESS: + +#if MACH_KDB + if (debug_mode + && debugger_active[cpu_number()] + && !let_ddb_vm_fault) { + /* + * Force kdb to handle this one. + */ + kdb_trap(trapno, ssp); + } +#endif /* MACH_KDB */ + + /* Same as for data access, except fault type + * is PROT_EXEC and addr comes from srr0 + */ + + if(intr) ml_set_interrupts_enabled(TRUE); /* Enable if we were */ + + map = kernel_map; + + code = vm_fault(map, trunc_page(ssp->srr0), + PROT_EXEC, FALSE, THREAD_UNINT); + + if (code != KERN_SUCCESS) { + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + } else { + ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */ + ssp->srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + } + break; + + /* Usually shandler handles all the system calls, but the + * atomic thread switcher may throwup (via thandler) and + * have to pass it up to the exception handler. + */ + + case T_SYSTEM_CALL: + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + break; + + case T_AST: + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + break; + } + } else { + + ml_set_interrupts_enabled(TRUE); /* Processing for user state traps is always enabled */ + +#ifdef MACH_BSD + { + void get_procrustime(time_value_t *); + + get_procrustime(&tv); + } +#endif /* MACH_BSD */ + + + /* + * Trap came from user task + */ + + switch (trapno) { + + case T_PREEMPT: + unresolved_kernel_trap(trapno, ssp, dsisr, dar, NULL); + break; + + /* + * These trap types should never be seen by trap() + * Some are interrupts that should be seen by + * interrupt() others just don't happen because they + * are handled elsewhere. + */ + case T_DECREMENTER: + case T_IN_VAIN: /* Shouldn't ever see this, lowmem_vectors eats it */ + case T_MACHINE_CHECK: + case T_INTERRUPT: + case T_FP_UNAVAILABLE: + case T_SYSTEM_MANAGEMENT: + case T_RESERVED: + case T_IO_ERROR: + + default: + + ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */ + + panic("Unexpected user state trap(cpu %d): %08X DSISR=0x%08X DAR=0x%08X PC=0x%08X, MSR=0x%08X\n", + cpu_number(), trapno, dsisr, dar, ssp->srr0, ssp->srr1); + break; + + case T_RESET: +#if 0 + kprintf("*** Reset exception ignored; srr0 = %08X, srr1 = %08X\n", + ssp->srr0, ssp->srr1); +#else + panic("Unexpected Reset exception: srr0 = %08X, srr1 = %08X\n", + ssp->srr0, ssp->srr1); +#endif + break; /* We just ignore these */ + + case T_ALIGNMENT: + if (alignment(dsisr, dar, ssp)) { + code = EXC_PPC_UNALIGNED; + exception = EXC_BAD_ACCESS; + subcode = dar; + } + break; + + case T_TRACE: /* Real PPC chips */ + if (be_tracing()) { + add_pcbuffer(); + return ssp; + } + /* fall through */ + + case T_INSTRUCTION_BKPT: /* 603 PPC chips */ + case T_RUNMODE_TRACE: /* 601 PPC chips */ + exception = EXC_BREAKPOINT; + code = EXC_PPC_TRACE; + subcode = ssp->srr0; + break; + + case T_PROGRAM: + if (ssp->srr1 & MASK(SRR1_PRG_FE)) { + fpu_save(); + UPDATE_PPC_EXCEPTION_STATE; + exception = EXC_ARITHMETIC; + code = EXC_ARITHMETIC; + + mp_disable_preemption(); + subcode = current_act()->mact.FPU_pcb->fs.fpscr; + mp_enable_preemption(); + } + else if (ssp->srr1 & MASK(SRR1_PRG_ILL_INS)) { + + UPDATE_PPC_EXCEPTION_STATE + exception = EXC_BAD_INSTRUCTION; + code = EXC_PPC_UNIPL_INST; + subcode = ssp->srr0; + } else if (ssp->srr1 & MASK(SRR1_PRG_PRV_INS)) { + + UPDATE_PPC_EXCEPTION_STATE; + exception = EXC_BAD_INSTRUCTION; + code = EXC_PPC_PRIVINST; + subcode = ssp->srr0; + } else if (ssp->srr1 & MASK(SRR1_PRG_TRAP)) { + unsigned int inst; + + if (copyin((char *) ssp->srr0, (char *) &inst, 4 )) + panic("copyin failed\n"); + UPDATE_PPC_EXCEPTION_STATE; + if (inst == 0x7FE00008) { + exception = EXC_BREAKPOINT; + code = EXC_PPC_BREAKPOINT; + } else { + exception = EXC_SOFTWARE; + code = EXC_PPC_TRAP; + } + subcode = ssp->srr0; + } + break; + + case T_ALTIVEC_ASSIST: + UPDATE_PPC_EXCEPTION_STATE; + exception = EXC_ARITHMETIC; + code = EXC_PPC_ALTIVECASSIST; + subcode = ssp->srr0; + break; + + case T_DATA_ACCESS: + map = thr_act->map; + + code = vm_fault(map, trunc_page(dar), + dsisr & MASK(DSISR_WRITE) ? PROT_RW : PROT_RO, + FALSE, THREAD_ABORTSAFE); + + if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) { + UPDATE_PPC_EXCEPTION_STATE; + exception = EXC_BAD_ACCESS; + subcode = dar; + } else { + ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */ + ((savearea *)ssp)->save_dsisr |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + } + break; + + case T_INSTRUCTION_ACCESS: + /* Same as for data access, except fault type + * is PROT_EXEC and addr comes from srr0 + */ + map = thr_act->map; + + code = vm_fault(map, trunc_page(ssp->srr0), + PROT_EXEC, FALSE, THREAD_ABORTSAFE); + + if ((code != KERN_SUCCESS) && (code != KERN_ABORTED)) { + UPDATE_PPC_EXCEPTION_STATE; + exception = EXC_BAD_ACCESS; + subcode = ssp->srr0; + } else { + ((savearea *)ssp)->save_flags |= SAVredrive; /* Tell low-level to re-try fault */ + ssp->srr1 |= MASK(DSISR_HASH); /* Make sure this is marked as a miss */ + } + break; + + case T_AST: + ml_set_interrupts_enabled(FALSE); + ast_taken(FALSE, AST_ALL, intr); + break; + + } +#ifdef MACH_BSD + { + void bsd_uprofil(time_value_t *, unsigned int); + + bsd_uprofil(&tv, ssp->srr0); + } +#endif /* MACH_BSD */ + } + + if (exception) { + /* if this is the init task, save the exception information */ + /* this probably is a fatal exception */ + if(bsd_init_task == current_task()) { + char *buf; + int i; + + buf = init_task_failure_data; + + + buf += sprintf(buf, "Exception Code = 0x%x, Subcode = 0x%x\n", code, subcode); + buf += sprintf(buf, "DSISR = 0x%08x, DAR = 0x%08x\n" + , dsisr, dar); + + for (i=0; i<32; i++) { + if ((i % 8) == 0) { + buf += sprintf(buf, "\n%4d :",i); + } + buf += sprintf(buf, " %08x",*(&ssp->r0+i)); + } + + buf += sprintf(buf, "\n\n"); + buf += sprintf(buf, "cr = 0x%08x\t\t",ssp->cr); + buf += sprintf(buf, "xer = 0x%08x\n",ssp->xer); + buf += sprintf(buf, "lr = 0x%08x\t\t",ssp->lr); + buf += sprintf(buf, "ctr = 0x%08x\n",ssp->ctr); + buf += sprintf(buf, "srr0(iar) = 0x%08x\t\t",ssp->srr0); + buf += sprintf(buf, "srr1(msr) = 0x%08B\n",ssp->srr1, + "\x10\x11""EE\x12PR\x13""FP\x14ME\x15""FE0\x16SE\x18" + "FE1\x19""AL\x1a""EP\x1bIT\x1c""DT"); + buf += sprintf(buf, "\n\n"); + + /* generate some stack trace */ + buf += sprintf(buf, "Application level back trace:\n"); + if (ssp->srr1 & MASK(MSR_PR)) { + char *addr = (char*)ssp->r1; + unsigned int stack_buf[3]; + for (i = 0; i < 8; i++) { + if (addr == (char*)NULL) + break; + if (!copyin(addr,(char*)stack_buf, + 3 * sizeof(int))) { + buf += sprintf(buf, "0x%08x : 0x%08x\n" + ,addr,stack_buf[2]); + addr = (char*)stack_buf[0]; + } else { + break; + } + } + } + buf[0] = '\0'; + } + doexception(exception, code, subcode); + } + /* AST delivery + * Check to see if we need an AST, if so take care of it here + */ + ml_set_interrupts_enabled(FALSE); + if (USER_MODE(ssp->srr1)) + while (ast_needed(cpu_number())) { + ast_taken(FALSE, AST_ALL, intr); + ml_set_interrupts_enabled(FALSE); + } + + return ssp; +} + +/* This routine is called from assembly before each and every system call. + * It must preserve r3. + */ + +extern int syscall_trace(int, struct ppc_saved_state *); + + +extern int pmdebug; + +int syscall_trace(int retval, struct ppc_saved_state *ssp) +{ + int i, argc; + + int kdarg[3]; + /* Always prepare to trace mach system calls */ + if (kdebug_enable && (ssp->r0 & 0x80000000)) { + /* Mach trap */ + kdarg[0]=0; + kdarg[1]=0; + kdarg[2]=0; + argc = mach_trap_table[-(ssp->r0)].mach_trap_arg_count; + if (argc > 3) + argc = 3; + for (i=0; i < argc; i++) + kdarg[i] = (int)*(&ssp->r3 + i); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (-(ssp->r0))) | DBG_FUNC_START, + kdarg[0], kdarg[1], kdarg[2], 0, 0); + } + + return retval; +} + +/* This routine is called from assembly after each mach system call + * It must preserve r3. + */ + +extern int syscall_trace_end(int, struct ppc_saved_state *); + +int syscall_trace_end(int retval, struct ppc_saved_state *ssp) +{ + if (kdebug_enable && (ssp->r0 & 0x80000000)) { + /* Mach trap */ + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(ssp->r0))) | DBG_FUNC_END, + retval, 0, 0, 0, 0); + } + return retval; +} + +/* + * called from syscall if there is an error + */ + +int syscall_error( + int exception, + int code, + int subcode, + struct ppc_saved_state *ssp) +{ + register thread_t thread; + + thread = current_thread(); + + if (thread == 0) + panic("syscall error in boot phase"); + + if (!USER_MODE(ssp->srr1)) + panic("system call called from kernel"); + + doexception(exception, code, subcode); + + return 0; +} + +/* Pass up a server syscall/exception */ +void +doexception( + int exc, + int code, + int sub) +{ + exception_data_type_t codes[EXCEPTION_CODE_MAX]; + + codes[0] = code; + codes[1] = sub; + exception(exc, codes, 2); +} + +char *trap_type[] = { + "0x000 Interrupt in vain", + "0x100 System reset", + "0x200 Machine check", + "0x300 Data access", + "0x400 Instruction access", + "0x500 External interrupt", + "0x600 Alignment", + "0x700 Program", + "0x800 Floating point", + "0x900 Decrementer", + "0xA00 I/O controller interface", + "0xB00 INVALID EXCEPTION", + "0xC00 System call exception", + "0xD00 Trace", + "0xE00 FP assist", + "0xF20 VMX", + "0xF00 INVALID EXCEPTION", + "0x1000 Instruction PTE miss", + "0x1100 Data load PTE miss", + "0x1200 Data store PTE miss", + "0x1300 Instruction Breakpoint", + "0x1400 System Management", + "0x1500 INVALID EXCEPTION", + "0x1600 Altivec Assist", + "0x1700 INVALID EXCEPTION", + "0x1800 INVALID EXCEPTION", + "0x1900 INVALID EXCEPTION", + "0x1A00 INVALID EXCEPTION", + "0x1B00 INVALID EXCEPTION", + "0x1C00 INVALID EXCEPTION", + "0x1D00 INVALID EXCEPTION", + "0x1E00 INVALID EXCEPTION", + "0x1F00 INVALID EXCEPTION", + "0x2000 Run Mode/Trace" +}; +int TRAP_TYPES = sizeof (trap_type) / sizeof (trap_type[0]); + +void unresolved_kernel_trap(int trapno, + struct ppc_saved_state *ssp, + unsigned int dsisr, + unsigned int dar, + char *message) +{ + char *trap_name; + extern void print_backtrace(struct ppc_saved_state *); + extern unsigned int debug_mode, disableDebugOuput; + + ml_set_interrupts_enabled(FALSE); /* Turn off interruptions */ + + disableDebugOuput = FALSE; + debug_mode++; + if ((unsigned)trapno <= T_MAX) + trap_name = trap_type[trapno / T_VECTOR_SIZE]; + else + trap_name = "???? unrecognized exception"; + if (message == NULL) + message = trap_name; + + printf("\n\nUnresolved kernel trap(cpu %d): %s DSISR=0x%08x DAR=0x%08x PC=0x%08x, MSR=0x%08X\n" + "generating stack backtrace prior to panic:\n\n", + cpu_number(), trap_name, dsisr, dar, ssp->srr0, ssp->srr1); + + print_backtrace(ssp); + + (void *)Call_Debugger(trapno, ssp); + panic(message); +} + +void +thread_syscall_return( + kern_return_t ret) +{ + register thread_act_t thr_act = current_act(); + register struct ppc_saved_state *regs = USER_REGS(thr_act); + + if (kdebug_enable && (regs->r0 & 0x80000000)) { + /* Mach trap */ + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(-(regs->r0))) | DBG_FUNC_END, + ret, 0, 0, 0, 0); + } + regs->r3 = ret; + + thread_exception_return(); + /*NOTREACHED*/ +} + + +#if MACH_KDB +void +thread_kdb_return(void) +{ + register thread_act_t thr_act = current_act(); + register thread_t cur_thr = current_thread(); + register struct ppc_saved_state *regs = USER_REGS(thr_act); + + Call_Debugger(thr_act->mact.pcb->es.exception, regs); +#if MACH_LDEBUG + assert(cur_thr->mutex_count == 0); +#endif /* MACH_LDEBUG */ + check_simple_locks(); + thread_exception_return(); + /*NOTREACHED*/ +} +#endif /* MACH_KDB */ diff --git a/osfmk/ppc/trap.h b/osfmk/ppc/trap.h new file mode 100644 index 000000000..0cecf5f0c --- /dev/null +++ b/osfmk/ppc/trap.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#ifndef _PPC_TRAP_H_ +#define _PPC_TRAP_H_ + +/* maximum number of arguments to a syscall trap */ +#define NARGS 12 +/* Size to reserve in frame for arguments - first 8 are in registers */ +#define ARG_SIZE FM_ALIGN((NARGS-8)*4) + + +/* + * Hardware exception vectors for powerpc are in exception.h + */ + +#ifndef ASSEMBLER + +#include +#include +#include + +extern void doexception(int exc, int code, int sub); + +extern void thread_exception_return(void); + +extern boolean_t alignment(unsigned long dsisr, + unsigned long dar, + struct ppc_saved_state *ssp); + +extern struct ppc_saved_state* trap(int trapno, + struct ppc_saved_state *ss, + unsigned int dsisr, + unsigned int dar); + +extern struct ppc_saved_state* interrupt(int intno, + struct ppc_saved_state *ss, + unsigned int dsisr, + unsigned int dar); + +extern int syscall_error(int exception, + int code, + int subcode, + struct ppc_saved_state *ss); + + +extern int procitab(unsigned, void (*)(int), int); + +#endif /* ASSEMBLER */ + +#endif /* _PPC_TRAP_H_ */ diff --git a/osfmk/ppc/vm_tuning.h b/osfmk/ppc/vm_tuning.h new file mode 100644 index 000000000..a0254bc7c --- /dev/null +++ b/osfmk/ppc/vm_tuning.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _MACHINE_VM_TUNING_H_ +#define _MACHINE_VM_TUNING_H_ + +#endif /* _MACHINE_VM_TUNING_H_ */ diff --git a/osfmk/ppc/vmachmon.c b/osfmk/ppc/vmachmon.c new file mode 100644 index 000000000..383e62ac8 --- /dev/null +++ b/osfmk/ppc/vmachmon.c @@ -0,0 +1,1032 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*----------------------------------------------------------------------- +** vmachmon.c +** +** C routines that we are adding to the MacOS X kernel. +** +** Weird Apple PSL stuff goes here... +** +** Until then, Copyright 2000, Connectix +-----------------------------------------------------------------------*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +extern struct Saveanchor saveanchor; /* Aligned savearea anchor */ +extern double FloatInit; +extern unsigned long QNaNbarbarian[4]; + +/************************************************************************************* + Virtual Machine Monitor Internal Routines +**************************************************************************************/ + +/*----------------------------------------------------------------------- +** vmm_get_entry +** +** This function verifies and return a vmm context entry index +** +** Inputs: +** act - pointer to current thread activation +** index - index into vmm control table (this is a "one based" value) +** +** Outputs: +** address of a vmmCntrlEntry or 0 if not found +-----------------------------------------------------------------------*/ + +vmmCntrlEntry *vmm_get_entry( + thread_act_t act, + vmm_thread_index_t index) +{ + vmmCntrlTable *CTable; + vmmCntrlEntry *CEntry; + + if (act->mact.vmmControl == 0) return NULL; /* No control table means no vmm */ + if ((index - 1) >= kVmmMaxContextsPerThread) return NULL; /* Index not in range */ + + CTable = act->mact.vmmControl; /* Make the address a bit more convienient */ + CEntry = &CTable->vmmc[index - 1]; /* Point to the entry */ + + if (!(CEntry->vmmFlags & vmmInUse)) return NULL; /* See if the slot is actually in use */ + + return CEntry; +} + + + +/************************************************************************************* + Virtual Machine Monitor Exported Functionality + + The following routines are used to implement a quick-switch mechanism for + virtual machines that need to execute within their own processor envinroment + (including register and MMU state). +**************************************************************************************/ + +/*----------------------------------------------------------------------- +** vmm_get_version +** +** This function returns the current version of the virtual machine +** interface. It is divided into two portions. The top 16 bits +** represent the major version number, and the bottom 16 bits +** represent the minor version number. Clients using the Vmm +** functionality should make sure they are using a verison new +** enough for them. +** +** Inputs: +** none +** +** Outputs: +** 32-bit number representing major/minor version of +** the Vmm module +-----------------------------------------------------------------------*/ + +int vmm_get_version(struct savearea *save) +{ + save->save_r3 = kVmmCurrentVersion; /* Return the version */ + return 1; +} + + +/*----------------------------------------------------------------------- +** Vmm_get_features +** +** This function returns a set of flags that represents the functionality +** supported by the current verison of the Vmm interface. Clients should +** use this to determine whether they can run on this system. +** +** Inputs: +** none +** +** Outputs: +** 32-bit number representing functionality supported by this +** version of the Vmm module +-----------------------------------------------------------------------*/ + +int vmm_get_features(struct savearea *save) +{ + save->save_r3 = kVmmCurrentFeatures; /* Return the features */ + return 1; +} + + +/*----------------------------------------------------------------------- +** vmm_init_context +** +** This function initializes an emulation context. It allocates +** a new pmap (address space) and fills in the initial processor +** state within the specified structure. The structure, mapped +** into the client's logical address space, must be page-aligned. +** +** Inputs: +** act - pointer to current thread activation +** version - requested version of the Vmm interface (allowing +** future versions of the interface to change, but still +** support older clients) +** vmm_user_state - pointer to a logical page within the +** client's address space +** +** Outputs: +** kernel return code indicating success or failure +-----------------------------------------------------------------------*/ + +int vmm_init_context(struct savearea *save) +{ + + thread_act_t act; + vmm_version_t version; + vmm_state_page_t * vmm_user_state; + vmmCntrlTable *CTable; + vm_offset_t conkern; + vmm_state_page_t * vks; + vm_offset_t conphys; + kern_return_t ret; + pmap_t new_pmap; + int cvi, i; + task_t task; + thread_act_t fact, gact; + + vmm_user_state = (vmm_state_page_t *)save->save_r4; /* Get the user address of the comm area */ + if ((unsigned int)vmm_user_state & (PAGE_SIZE - 1)) { /* Make sure the comm area is page aligned */ + save->save_r3 = KERN_FAILURE; /* Return failure */ + return 1; + } + + /* If the client is requesting a newer major version than */ + /* we currently support, we'll have to fail. In the future, */ + /* we can add new major versions and support the older ones. */ + version = save->save_r3; /* Pick up passed in version */ + if ((version >> 16) > (kVmmCurrentVersion >> 16)) { + } + + act = current_act(); /* Pick up our activation */ + + ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ + + task = current_task(); /* Figure out who we are */ + + task_lock(task); /* Lock our task */ + + fact = (thread_act_t)task->thr_acts.next; /* Get the first activation on task */ + gact = 0; /* Pretend we didn't find it yet */ + + for(i = 0; i < task->thr_act_count; i++) { /* All of the activations */ + if(fact->mact.vmmControl) { /* Is this a virtual machine monitor? */ + gact = fact; /* Yeah... */ + break; /* Bail the loop... */ + } + fact = (thread_act_t)fact->thr_acts.next; /* Go to the next one */ + } + + +/* + * We only allow one thread per task to be a virtual machine monitor right now. This solves + * a number of potential problems that I can't put my finger on right now. + * + * Utlimately, I think we want to move the controls and make all this task based instead of + * thread based. That would allow an emulator architecture to spawn a kernel thread for each + * VM (if they want) rather than hand dispatch contexts. + */ + + if(gact && (gact != act)) { /* Check if another thread is a vmm or trying to be */ + task_unlock(task); /* Release task lock */ + ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ + save->save_r3 = KERN_FAILURE; /* We must play alone... */ + return 1; + } + + if(!gact) act->mact.vmmControl = (vmmCntrlTable *)1; /* Temporarily mark that we are the vmm thread */ + + task_unlock(task); /* Safe to release now (because we've marked ourselves) */ + + CTable = act->mact.vmmControl; /* Get the control table address */ + if ((unsigned int)CTable == 1) { /* If we are marked, try to allocate a new table, otherwise we have one */ + if(!(CTable = (vmmCntrlTable *)kalloc(sizeof(vmmCntrlTable)))) { /* Get a fresh emulation control table */ + act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */ + ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ + save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No storage... */ + return 1; + } + + bzero((void *)CTable, sizeof(vmmCntrlTable)); /* Clean it up */ + act->mact.vmmControl = CTable; /* Initialize the table anchor */ + } + + for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ + if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) break; /* Bail if we find an unused slot */ + } + + if(cvi >= kVmmMaxContextsPerThread) { /* Did we find one? */ + ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ + save->save_r3 = KERN_RESOURCE_SHORTAGE; /* No empty slots... */ + return 1; + } + + ret = vm_map_wire( /* Wire the virtual machine monitor's context area */ + act->map, + (vm_offset_t)vmm_user_state, + (vm_offset_t)vmm_user_state + PAGE_SIZE, + VM_PROT_READ | VM_PROT_WRITE, + FALSE); + + if (ret != KERN_SUCCESS) /* The wire failed, return the code */ + goto return_in_shame; + + /* Map the vmm state into the kernel's address space. */ + conphys = pmap_extract(act->map->pmap, (vm_offset_t)vmm_user_state); + + /* Find a virtual address to use. */ + ret = kmem_alloc_pageable(kernel_map, &conkern, PAGE_SIZE); + if (ret != KERN_SUCCESS) { /* Did we find an address? */ + (void) vm_map_unwire(act->map, /* No, unwire the context area */ + (vm_offset_t)vmm_user_state, + (vm_offset_t)vmm_user_state + PAGE_SIZE, + TRUE); + goto return_in_shame; + } + + /* Map it into the kernel's address space. */ + pmap_enter(kernel_pmap, conkern, conphys, VM_PROT_READ | VM_PROT_WRITE, TRUE); + + /* Clear the vmm state structure. */ + vks = (vmm_state_page_t *)conkern; + bzero((char *)vks, PAGE_SIZE); + + /* Allocate a new pmap for the new vmm context. */ + new_pmap = pmap_create(0); + if (new_pmap == PMAP_NULL) { + (void) vm_map_unwire(act->map, /* Couldn't get a pmap, unwire the user page */ + (vm_offset_t)vmm_user_state, + (vm_offset_t)vmm_user_state + PAGE_SIZE, + TRUE); + + kmem_free(kernel_map, conkern, PAGE_SIZE); /* Release the kernel address */ + goto return_in_shame; + } + + /* We're home free now. Simply fill in the necessary info and return. */ + + vks->interface_version = version; /* Set our version code */ + vks->thread_index = cvi + 1; /* Tell the user the index for this virtual machine */ + + CTable->vmmc[cvi].vmmFlags = vmmInUse; /* Mark the slot in use and make sure the rest are clear */ + CTable->vmmc[cvi].vmmPmap = new_pmap; /* Remember the pmap for this guy */ + CTable->vmmc[cvi].vmmContextKern = vks; /* Remember the kernel address of comm area */ + CTable->vmmc[cvi].vmmContextUser = vmm_user_state; /* Remember user address of comm area */ + CTable->vmmc[cvi].vmmFPU_pcb = 0; /* Clear saved floating point context */ + CTable->vmmc[cvi].vmmFPU_cpu = -1; /* Invalidate CPU saved fp context is valid on */ + CTable->vmmc[cvi].vmmVMX_pcb = 0; /* Clear saved vector context */ + CTable->vmmc[cvi].vmmVMX_cpu = -1; /* Invalidate CPU saved vector context is valid on */ + + hw_atomic_add(&saveanchor.saveneed, 2); /* Account for the number of extra saveareas we think we might "need" */ + + ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ + save->save_r3 = KERN_SUCCESS; /* Hip, hip, horay... */ + return 1; + +return_in_shame: + if(!gact) kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table if we just allocated it */ + act->mact.vmmControl = 0; /* Unmark us as vmm 'cause we failed */ + ml_set_interrupts_enabled(FALSE); /* Set back interruptions */ + save->save_r3 = ret; /* Pass back return code... */ + return 1; + +} + + +/*----------------------------------------------------------------------- +** vmm_tear_down_context +** +** This function uninitializes an emulation context. It deallocates +** internal resources associated with the context block. +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** +** Outputs: +** kernel return code indicating success or failure +-----------------------------------------------------------------------*/ + +kern_return_t vmm_tear_down_context( + thread_act_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + vmmCntrlTable *CTable; + int cvi; + register savearea *sv; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + + ml_set_interrupts_enabled(TRUE); /* This can take a bit of time so pass interruptions */ + + hw_atomic_sub(&saveanchor.saveneed, 2); /* We don't need these extra saveareas anymore */ + + if(CEntry->vmmFPU_pcb) { /* Is there any floating point context? */ + sv = (savearea *)CEntry->vmmFPU_pcb; /* Make useable */ + sv->save_flags &= ~SAVfpuvalid; /* Clear in use bit */ + if(!(sv->save_flags & SAVinuse)) { /* Anyone left with this one? */ + save_release(sv); /* Nope, release it */ + } + } + + if(CEntry->vmmVMX_pcb) { /* Is there any vector context? */ + sv = (savearea *)CEntry->vmmVMX_pcb; /* Make useable */ + sv->save_flags &= ~SAVvmxvalid; /* Clear in use bit */ + if(!(sv->save_flags & SAVinuse)) { /* Anyone left with this one? */ + save_release(sv); /* Nope, release it */ + } + } + + mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ + pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ + pmap_destroy(CEntry->vmmPmap); /* Toss the pmap for this context */ + CEntry->vmmPmap = NULL; /* Clean it up */ + + (void) vm_map_unwire( /* Unwire the user comm page */ + act->map, + (vm_offset_t)CEntry->vmmContextUser, + (vm_offset_t)CEntry->vmmContextUser + PAGE_SIZE, + FALSE); + + kmem_free(kernel_map, (vm_offset_t)CEntry->vmmContextKern, PAGE_SIZE); /* Remove kernel's view of the comm page */ + + CEntry->vmmFlags = 0; /* Clear out all of the flags for this entry including in use */ + CEntry->vmmPmap = 0; /* Clear pmap pointer */ + CEntry->vmmContextKern = 0; /* Clear the kernel address of comm area */ + CEntry->vmmContextUser = 0; /* Clear the user address of comm area */ + CEntry->vmmFPU_pcb = 0; /* Clear saved floating point context */ + CEntry->vmmFPU_cpu = -1; /* Invalidate CPU saved fp context is valid on */ + CEntry->vmmVMX_pcb = 0; /* Clear saved vector context */ + CEntry->vmmVMX_cpu = -1; /* Invalidate CPU saved vector context is valid on */ + + CTable = act->mact.vmmControl; /* Get the control table address */ + for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Search to find a free slot */ + if(CTable->vmmc[cvi].vmmFlags & vmmInUse) { /* Return if there are still some in use */ + ml_set_interrupts_enabled(FALSE); /* No more interruptions */ + return KERN_SUCCESS; /* Leave... */ + } + } + + kfree((vm_offset_t)CTable, sizeof(vmmCntrlTable)); /* Toss the table because to tossed the last context */ + act->mact.vmmControl = 0; /* Unmark us as vmm */ + + ml_set_interrupts_enabled(FALSE); /* No more interruptions */ + + return KERN_SUCCESS; +} + +/*----------------------------------------------------------------------- +** vmm_tear_down_all +** +** This function uninitializes all emulation contexts. If there are +** any vmm contexts, it calls vmm_tear_down_context for each one. +** +** Note: this can also be called from normal thread termination. Because of +** that, we will context switch out of an alternate if we are currenty in it. +** It will be terminated with no valid return code set because we don't expect +** the activation to ever run again. +** +** Inputs: +** activation to tear down +** +** Outputs: +** All vmm contexts released and VMM shut down +-----------------------------------------------------------------------*/ +void vmm_tear_down_all(thread_act_t act) { + + vmmCntrlTable *CTable; + int cvi; + kern_return_t ret; + savearea *save; + spl_t s; + + if(act->mact.specFlags & runningVM) { /* Are we actually in a context right now? */ + save = (savearea *)find_user_regs(act); /* Find the user state context */ + if(!save) { /* Did we find it? */ + panic("vmm_tear_down_all: runningVM marked but no user state context\n"); + return; + } + + s = splhigh(); /* Make sure interrupts are off */ + vmm_force_exit(act, save); /* Force and exit from VM state */ + splx(s); /* Restore interrupts */ + } + + if(CTable = act->mact.vmmControl) { /* Do we have a vmm control block? */ + + for(cvi = 1; cvi <= kVmmMaxContextsPerThread; cvi++) { /* Look at all slots */ + if(CTable->vmmc[cvi - 1].vmmFlags & vmmInUse) { /* Is this one in use */ + ret = vmm_tear_down_context(act, cvi); /* Take down the found context */ + if(ret != KERN_SUCCESS) { /* Did it go away? */ + panic("vmm_tear_down_all: vmm_tear_down_context failed; ret=%08X, act = %08X, cvi = %d\n", + ret, act, cvi); + } + } + } + if(act->mact.vmmControl) { /* Did we find one? */ + panic("vmm_tear_down_all: control table did not get deallocated\n"); /* Table did not go away */ + } + } + + return; +} + +/*----------------------------------------------------------------------- +** vmm_map_page +** +** This function maps a page from within the client's logical +** address space into the alternate address space of the +** Virtual Machine Monitor context. +** +** The page need not be locked or resident. If not resident, it will be faulted +** in by this code, which may take some time. Also, if the page is not locked, +** it, and this mapping may disappear at any time, even before it gets used. Note also +** that reference and change information is NOT preserved when a page is unmapped, either +** explicitly or implicitly (e.g., a pageout, being unmapped in the non-alternate address +** space). This means that if RC is needed, the page MUST be wired. +** +** Note that if there is already a mapping at the address, it is removed and all +** information (including RC) is lost BEFORE an attempt is made to map it. Also, +** if the map call fails, the old address is still unmapped.. +** +** Inputs: +** act - pointer to current thread activation +** index - index of vmm state for this page +** va - virtual address within the client's address +** space (must be page aligned) +** ava - virtual address within the alternate address +** space (must be page aligned) +** prot - protection flags +** +** Note that attempted mapping of areas in nested pmaps (shared libraries) or block mapped +** areas are not allowed and will fail. Same with directly mapped I/O areas. +** +** Input conditions: +** Interrupts disabled (from fast trap) +** +** Outputs: +** kernel return code indicating success or failure +** if success, va resident and alternate mapping made +-----------------------------------------------------------------------*/ + +kern_return_t vmm_map_page( + thread_act_t act, + vmm_thread_index_t index, + vm_offset_t cva, + vm_offset_t ava, + vm_prot_t prot) +{ + kern_return_t ret; + vmmCntrlEntry *CEntry; + vm_offset_t phys_addr; + register mapping *mpv, *mp, *nmpv, *nmp; + struct phys_entry *pp; + pmap_t mpmap; + vm_map_t map; + + CEntry = vmm_get_entry(act, index); /* Get and validate the index */ + if (CEntry == NULL)return KERN_FAILURE; /* No good, failure... */ + +/* + * Find out if we have already mapped the address and toss it out if so. + */ + mp = hw_lock_phys_vir(CEntry->vmmPmap->space, ava); /* See if there is already a mapping */ + if((unsigned int)mp & 1) { /* Did we timeout? */ + panic("vmm_map_page: timeout locking physical entry for alternate virtual address (%08X)\n", ava); /* Yeah, scream about it! */ + return KERN_FAILURE; /* Bad hair day, return FALSE... */ + } + if(mp) { /* If it was there, toss it */ + mpv = hw_cpv(mp); /* Convert mapping block to virtual */ + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + (void)mapping_remove(CEntry->vmmPmap, ava); /* Throw away the mapping. we're about to replace it */ + } + map = current_act()->map; /* Get the current map */ + + while(1) { /* Keep trying until we get it or until we fail */ + if(hw_cvp_blk(map->pmap, cva)) return KERN_FAILURE; /* Make sure that there is no block map at this address */ + + mp = hw_lock_phys_vir(map->pmap->space, cva); /* Lock the physical entry for emulator's page */ + if((unsigned int)mp&1) { /* Did we timeout? */ + panic("vmm_map_page: timeout locking physical entry for emulator virtual address (%08X)\n", cva); /* Yeah, scream about it! */ + return KERN_FAILURE; /* Bad hair day, return FALSE... */ + } + + if(mp) { /* We found it... */ + mpv = hw_cpv(mp); /* Convert mapping block to virtual */ + if(!(mpv->PTEr & 1)) break; /* If we are not write protected, we are ok... */ + } + + ml_set_interrupts_enabled(TRUE); /* Enable interruptions */ + ret = vm_fault(map, trunc_page(cva), VM_PROT_READ | VM_PROT_WRITE, FALSE); /* Didn't find it, try to fault it in read/write... */ + ml_set_interrupts_enabled(FALSE); /* Disable interruptions */ + if (ret != KERN_SUCCESS) return KERN_FAILURE; /* There isn't a page there, return... */ + } + + + if(!mpv->physent) { /* Is this an I/O area, e.g., framebuffer? */ + return KERN_FAILURE; /* Yes, we won't map it... */ + } + +/* + * Now we make a mapping using all of the attributes of the source page except for protection. + * Also specify that the physical entry is locked. + */ + nmpv = mapping_make(CEntry->vmmPmap, mpv->physent, (ava & -PAGE_SIZE), + (mpv->physent->pte1 & -PAGE_SIZE), prot, ((mpv->physent->pte1 >> 3) & 0xF), 1); + + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry now, we're done with it */ + + CEntry->vmmLastMap = ava & -PAGE_SIZE; /* Remember the last mapping we made */ + CEntry->vmmFlags |= vmmMapDone; /* Set that we did a map operation */ + + return KERN_SUCCESS; +} + + +/*----------------------------------------------------------------------- +** vmm_get_page_mapping +** +** This function determines whether the specified VMM +** virtual address is mapped. +** +** Inputs: +** act - pointer to current thread activation +** index - index of vmm state for this page +** va - virtual address within the alternate's address +** space +** +** Outputs: +** Non-alternate's virtual address (page aligned) or -1 if not mapped or any failure +** +** Note: +** If there are aliases to the page in the non-alternate address space, +** this call could return the wrong one. Moral of the story: no aliases. +-----------------------------------------------------------------------*/ + +vm_offset_t vmm_get_page_mapping( + thread_act_t act, + vmm_thread_index_t index, + vm_offset_t va) +{ + vmmCntrlEntry *CEntry; + vm_offset_t ova; + register mapping *mpv, *mp, *nmpv, *nmp; + pmap_t pmap; + + CEntry = vmm_get_entry(act, index); /* Get and validate the index */ + if (CEntry == NULL)return -1; /* No good, failure... */ + + mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ + if((unsigned int)mp & 1) { /* Did we timeout? */ + panic("vmm_get_page_mapping: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ + return -1; /* Bad hair day, return FALSE... */ + } + if(!mp) return -1; /* Not mapped, return -1 */ + + mpv = hw_cpv(mp); /* Convert mapping block to virtual */ + pmap = current_act()->map->pmap; /* Get the current pmap */ + ova = -1; /* Assume failure for now */ + + for(nmpv = hw_cpv(mpv->physent->phys_link); nmpv; nmpv = hw_cpv(nmpv->next)) { /* Scan 'em all */ + + if(nmpv->pmap != pmap) continue; /* Skip all the rest if this is not the right pmap... */ + + ova = ((((unsigned int)nmpv->PTEhash & -64) << 6) ^ (pmap->space << 12)) & 0x003FF000; /* Backward hash to the wrapped VADDR */ + ova = ova | ((nmpv->PTEv << 1) & 0xF0000000); /* Move in the segment number */ + ova = ova | ((nmpv->PTEv << 22) & 0x0FC00000); /* Add in the API for the top of the address */ + break; /* We're done now, pass virtual address back */ + } + + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + + if(ova == -1) panic("vmm_get_page_mapping: could not back-map alternate va (%08X)\n", va); /* We are bad wrong if we can't find it */ + + return ova; +} + +/*----------------------------------------------------------------------- +** vmm_unmap_page +** +** This function unmaps a page from the alternate's logical +** address space. +** +** Inputs: +** act - pointer to current thread activation +** index - index of vmm state for this page +** va - virtual address within the vmm's address +** space +** +** Outputs: +** kernel return code indicating success or failure +-----------------------------------------------------------------------*/ + +kern_return_t vmm_unmap_page( + thread_act_t act, + vmm_thread_index_t index, + vm_offset_t va) +{ + vmmCntrlEntry *CEntry; + boolean_t ret; + kern_return_t kern_result = KERN_SUCCESS; + + CEntry = vmm_get_entry(act, index); /* Get and validate the index */ + if (CEntry == NULL)return -1; /* No good, failure... */ + + ret = mapping_remove(CEntry->vmmPmap, va); /* Toss the mapping */ + + return (ret ? KERN_SUCCESS : KERN_FAILURE); /* Return... */ +} + +/*----------------------------------------------------------------------- +** vmm_unmap_all_pages +** +** This function unmaps all pages from the alternates's logical +** address space. +** +** Inputs: +** act - pointer to current thread activation +** index - index of context state +** +** Outputs: +** none +** +** Note: +** All pages are unmapped, but the address space (i.e., pmap) is still alive +-----------------------------------------------------------------------*/ + +void vmm_unmap_all_pages( + thread_act_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return; /* Either this isn't vmm thread or the index is bogus */ + +/* + * Note: the pmap code won't deal with the last page in the address space, so handle it explicitly + */ + mapping_remove(CEntry->vmmPmap, 0xFFFFF000); /* Remove final page explicitly because we might have mapped it */ + pmap_remove(CEntry->vmmPmap, 0, 0xFFFFF000); /* Remove all entries from this map */ + return; +} + + +/*----------------------------------------------------------------------- +** vmm_get_page_dirty_flag +** +** This function returns the changed flag of the page +** and optionally clears clears the flag. +** +** Inputs: +** act - pointer to current thread activation +** index - index of vmm state for this page +** va - virtual address within the vmm's address +** space +** reset - Clears dirty if true, untouched if not +** +** Outputs: +** the dirty bit +** clears the dirty bit in the pte if requested +** +** Note: +** The RC bits are merged into the global physical entry +-----------------------------------------------------------------------*/ + +boolean_t vmm_get_page_dirty_flag( + thread_act_t act, + vmm_thread_index_t index, + vm_offset_t va, + unsigned int reset) +{ + vmmCntrlEntry *CEntry; + register mapping *mpv, *mp; + unsigned int RC; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return 1; /* Either this isn't vmm thread or the index is bogus */ + + mp = hw_lock_phys_vir(CEntry->vmmPmap->space, va); /* Look up the mapping */ + if((unsigned int)mp & 1) { /* Did we timeout? */ + panic("vmm_get_page_dirty_flag: timeout locking physical entry for alternate virtual address (%08X)\n", va); /* Yeah, scream about it! */ + return 1; /* Bad hair day, return dirty... */ + } + if(!mp) return 1; /* Not mapped, return dirty... */ + + RC = hw_test_rc(mp, reset); /* Fetch the RC bits and clear if requested */ + + mpv = hw_cpv(mp); /* Convert mapping block to virtual */ + hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* We're done, unlock the physical entry */ + + return (RC & 1); /* Return the change bit */ +} + +/*----------------------------------------------------------------------- +** vmm_get_float_state +** +** This function causes the current floating point state to +** be saved into the shared context area. It also clears the +** vmmFloatCngd changed flag. +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** +** Outputs: +** context saved +-----------------------------------------------------------------------*/ + +kern_return_t vmm_get_float_state( + thread_act_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + vmmCntrlTable *CTable; + int i; + register struct savearea *sv; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + + act->mact.specFlags &= ~floatCng; /* Clear the special flag */ + CEntry->vmmContextKern->vmmStat &= ~vmmFloatCngd; /* Clear the change indication */ + + if(sv = (struct savearea *)CEntry->vmmFPU_pcb) { /* Is there context yet? */ + bcopy((char *)&sv->save_fp0, (char *)&(CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[0].d), sizeof(vmm_processor_state_t)); /* 32 registers plus status and pad */ + return KERN_SUCCESS; + } + + CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[0] = 0; /* Clear FPSCR */ + CEntry->vmmContextKern->vmm_proc_state.ppcFPSCR.i[1] = 0; /* Clear FPSCR */ + + for(i = 0; i < 32; i++) { /* Initialize floating points */ + CEntry->vmmContextKern->vmm_proc_state.ppcFPRs[i].d = FloatInit; /* Initial value */ + } + + return KERN_SUCCESS; +} + +/*----------------------------------------------------------------------- +** vmm_get_vector_state +** +** This function causes the current vector state to +** be saved into the shared context area. It also clears the +** vmmVectorCngd changed flag. +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** +** Outputs: +** context saved +-----------------------------------------------------------------------*/ + +kern_return_t vmm_get_vector_state( + thread_act_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + vmmCntrlTable *CTable; + int i, j; + unsigned int vrvalidwrk; + register struct savearea *sv; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + + act->mact.specFlags &= ~vectorCng; /* Clear the special flag */ + CEntry->vmmContextKern->vmmStat &= ~vmmVectCngd; /* Clear the change indication */ + + if(sv = (savearea *)CEntry->vmmVMX_pcb) { /* Is there context yet? */ + + vrvalidwrk = sv->save_vrvalid; /* Get the valid flags */ + + for(j=0; j < 4; j++) { /* Set value for vscr */ + CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j] = sv->save_vscr[j]; + } + + for(i = 0; i < 32; i++) { /* Copy the saved registers and invalidate the others */ + if(vrvalidwrk & 0x80000000) { /* Do we have a valid value here? */ + for(j = 0; j < 4; j++) { /* If so, copy it over */ + CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = ((unsigned int *)&(sv->save_vr0))[(i * 4) + j]; + } + } + else { + for(j = 0; j < 4; j++) { /* Otherwise set to empty value */ + CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; + } + } + + vrvalidwrk = vrvalidwrk << 1; /* Shift over to the next */ + + } + + return KERN_SUCCESS; + } + + for(j = 0; j < 4; j++) { /* Initialize vscr to java mode */ + CEntry->vmmContextKern->vmm_proc_state.ppcVSCR.i[j] = 0; /* Initial value */ + } + + for(i = 0; i < 32; i++) { /* Initialize vector registers */ + for(j=0; j < 4; j++) { /* Do words */ + CEntry->vmmContextKern->vmm_proc_state.ppcVRs[i].i[j] = QNaNbarbarian[j]; /* Initial value */ + } + } + + return KERN_SUCCESS; +} + +/*----------------------------------------------------------------------- +** vmm_set_timer +** +** This function causes a timer (in AbsoluteTime) for a specific time +** to be set It also clears the vmmTimerPop flag if the timer is actually +** set, it is cleared otherwise. +** +** A timer is cleared by setting setting the time to 0. This will clear +** the vmmTimerPop bit. Simply setting the timer to earlier than the +** current time clears the internal timer request, but leaves the +** vmmTimerPop flag set. +** +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** timerhi - high order word of AbsoluteTime to pop +** timerlo - low order word of AbsoluteTime to pop +** +** Outputs: +** timer set, vmmTimerPop cleared +-----------------------------------------------------------------------*/ + +kern_return_t vmm_set_timer( + thread_act_t act, + vmm_thread_index_t index, + unsigned int timerhi, + unsigned int timerlo) +{ + vmmCntrlEntry *CEntry; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + + CEntry->vmmTimer.hi = timerhi; /* Set the high order part */ + CEntry->vmmTimer.lo = timerlo; /* Set the low order part */ + + vmm_timer_pop(act); /* Go adjust all of the timer stuff */ + return KERN_SUCCESS; /* Leave now... */ +} + + +/*----------------------------------------------------------------------- +** vmm_get_timer +** +** This function causes the timer for a specified VM to be +** returned in return_params[0] and return_params[1]. +** +** +** Inputs: +** act - pointer to current thread activation structure +** index - index returned by vmm_init_context +** +** Outputs: +** Timer value set in return_params[0] and return_params[1]. +** Set to 0 if timer is not set. +-----------------------------------------------------------------------*/ + +kern_return_t vmm_get_timer( + thread_act_t act, + vmm_thread_index_t index) +{ + vmmCntrlEntry *CEntry; + vmmCntrlTable *CTable; + + CEntry = vmm_get_entry(act, index); /* Convert index to entry */ + if (CEntry == NULL) return KERN_FAILURE; /* Either this isn't vmm thread or the index is bogus */ + + CEntry->vmmContextKern->return_params[0] = CEntry->vmmTimer.hi; /* Return the last timer value */ + CEntry->vmmContextKern->return_params[1] = CEntry->vmmTimer.lo; /* Return the last timer value */ + + return KERN_SUCCESS; +} + + + +/*----------------------------------------------------------------------- +** vmm_timer_pop +** +** This function causes all timers in the array of VMs to be updated. +** All appropriate flags are set or reset. If a VM is currently +** running and its timer expired, it is intercepted. +** +** The qactTimer value is set to the lowest unexpired timer. It is +** zeroed if all timers are expired or have been reset. +** +** Inputs: +** act - pointer to current thread activation structure +** +** Outputs: +** timers set, vmmTimerPop cleared or set +-----------------------------------------------------------------------*/ + +void vmm_timer_pop( + thread_act_t act) +{ + vmmCntrlEntry *CEntry; + vmmCntrlTable *CTable; + int cvi, any; + AbsoluteTime now, soonest; + savearea *sv; + + if(!((unsigned int)act->mact.vmmControl & 0xFFFFFFFE)) { /* Are there any virtual machines? */ + panic("vmm_timer_pop: No virtual machines defined; act = %08X\n", act); + } + + soonest.hi = 0xFFFFFFFF; /* Max time */ + soonest.lo = 0xFFFFFFFF; /* Max time */ + + clock_get_uptime((AbsoluteTime *)&now); /* What time is it? */ + + CTable = act->mact.vmmControl; /* Make this easier */ + any = 0; /* Haven't found a running unexpired timer yet */ + + for(cvi = 0; cvi < kVmmMaxContextsPerThread; cvi++) { /* Cycle through all and check time now */ + + if(!(CTable->vmmc[cvi].vmmFlags & vmmInUse)) continue; /* Do not check if the entry is empty */ + + if(!(CTable->vmmc[cvi].vmmTimer.hi | CTable->vmmc[cvi].vmmTimer.hi)) { /* Is the timer reset? */ + CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Clear timer popped */ + CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Clear timer popped */ + continue; /* Check next */ + } + + if (CMP_ABSOLUTETIME(&CTable->vmmc[cvi].vmmTimer, &now) <= 0) { + CTable->vmmc[cvi].vmmFlags |= vmmTimerPop; /* Set timer popped here */ + CTable->vmmc[cvi].vmmContextKern->vmmStat |= vmmTimerPop; /* Set timer popped here */ + if((unsigned int)&CTable->vmmc[cvi] == (unsigned int)act->mact.vmmCEntry) { /* Is this the running VM? */ + sv = (savearea *)find_user_regs(act); /* Get the user state registers */ + if(!sv) { /* Did we find something? */ + panic("vmm_timer_pop: no user context; act = %08X\n", act); + } + sv->save_exception = T_IN_VAIN; /* Indicate that this is a null exception */ + vmm_force_exit(act, sv); /* Intercept a running VM */ + } + continue; /* Check the rest */ + } + else { /* It hasn't popped yet */ + CTable->vmmc[cvi].vmmFlags &= ~vmmTimerPop; /* Set timer not popped here */ + CTable->vmmc[cvi].vmmContextKern->vmmStat &= ~vmmTimerPop; /* Set timer not popped here */ + } + + any = 1; /* Show we found an active unexpired timer */ + + if (CMP_ABSOLUTETIME(&CTable->vmmc[cvi].vmmTimer, &soonest) < 0) { + soonest.hi = CTable->vmmc[cvi].vmmTimer.hi; /* Set high order lowest timer */ + soonest.lo = CTable->vmmc[cvi].vmmTimer.lo; /* Set low order lowest timer */ + } + } + + if(any) { + if (!(act->mact.qactTimer.hi | act->mact.qactTimer.lo) || + (CMP_ABSOLUTETIME(&soonest, &act->mact.qactTimer) <= 0)) { + act->mact.qactTimer.hi = soonest.hi; /* Set high order lowest timer */ + act->mact.qactTimer.lo = soonest.lo; /* Set low order lowest timer */ + } + } + + return; +} diff --git a/osfmk/ppc/vmachmon.h b/osfmk/ppc/vmachmon.h new file mode 100644 index 000000000..4c5ed7f6c --- /dev/null +++ b/osfmk/ppc/vmachmon.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*----------------------------------------------------------------------- +** vmachmon.h +** +** C routines that we are adding to the MacOS X kernel. +** +** Wierd Apple PSL stuff goes here... +** +** Until then, Copyright 2000, Connectix +** +-----------------------------------------------------------------------*/ + +#include + +#ifndef _VEMULATION_H_ +#define _VEMULATION_H_ + +/************************************************************************************* + External Emulation Types +**************************************************************************************/ + +typedef union vmm_vector_register_t { + unsigned long i[4]; + unsigned short s[8]; + unsigned char b[16]; +} vmm_vector_register_t; + +typedef union vmm_fp_register_t { + double d; + unsigned long i[2]; + unsigned short s[4]; + unsigned char b[8]; +} vmm_fp_register_t; + +typedef struct vmm_processor_state_t { +/* + * NOTE: The general context needs to correspond to the order of the savearea for quick swaps + */ + unsigned long ppcPC; + unsigned long ppcMSR; + + unsigned long ppcGPRs[32]; + + unsigned long ppcCR; + unsigned long ppcXER; + unsigned long ppcLR; + unsigned long ppcCTR; + unsigned long ppcMQ; /* Obsolete */ + unsigned long ppcVRSave; + unsigned long ppcReserved1[40]; /* Future processor state can go here */ + +/* We must be 16-byte aligned here */ + + vmm_vector_register_t ppcVRs[32]; + vmm_vector_register_t ppcVSCR; + +/* We must be 8-byte aligned here */ + + vmm_fp_register_t ppcFPRs[32]; + vmm_fp_register_t ppcFPSCR; + unsigned long ppcReserved2[2]; /* Pad out to multiple of 16 bytes */ +} vmm_processor_state_t; + +typedef unsigned long vmm_return_code_t; + +typedef unsigned long vmm_thread_index_t; +enum { + kVmmCurrentVersion = 0x00010000 +}; + +typedef unsigned long vmm_features_t; +enum { + kVmmFeature_LittleEndian = 0x00000001 +}; + +typedef unsigned long vmm_version_t; + +typedef struct vmm_state_page_t { + /* This structure must remain below 4Kb (one page) in size */ + vmm_version_t interface_version; + vmm_thread_index_t thread_index; + unsigned int vmmStat; /* Note: this field is identical to vmmFlags in vmmCntrlEntry */ + unsigned int vmmCntrl; +#define vmmFloatLoad 0x80000000 +#define vmmFloatLoadb 0 +#define vmmVectLoad 0x40000000 +#define vmmVectLoadb 1 +#define vmmVectVRall 0x20000000 +#define vmmVectVRallb 2 +#define vmmVectVAss 0x10000000 +#define vmmVectVAssb 3 + vmm_return_code_t return_code; + unsigned long return_params[4]; + unsigned long gas[7]; /* For alignment */ + + /* The next portion of the structure must remain 32-byte aligned */ + vmm_processor_state_t vmm_proc_state; + +} vmm_state_page_t; + +enum { + /* Function Indices (passed in r3) */ + kVmmGetVersion = 0, + kVmmvGetFeatures, + kVmmInitContext, + kVmmTearDownContext, + kVmmTearDownAll, + kVmmMapPage, + kVmmGetPageMapping, + kVmmUnmapPage, + kVmmUnmapAllPages, + kVmmGetPageDirtyFlag, + kVmmGetFloatState, + kVmmGetVectorState, + kVmmSetTimer, + kVmmGetTimer, + kVmmExecuteVM +}; + +#define kVmmReturnNull 0 +#define kVmmBogusContext 1 +#define kVmmReturnDataPageFault 3 +#define kVmmReturnInstrPageFault 4 +#define kVmmReturnAlignmentFault 6 +#define kVmmReturnProgramException 7 +#define kVmmReturnSystemCall 12 +#define kVmmReturnTraceException 13 +#define kVmmAltivecAssist 22 + + +/************************************************************************************* + Internal Emulation Types +**************************************************************************************/ + +#define kVmmMaxContextsPerThread 32 + +enum { + kVmmCurrentFeatures = kVmmFeature_LittleEndian +}; + +typedef struct vmmCntrlEntry { /* Virtual Machine Monitor control table entry */ + unsigned int vmmFlags; /* Assorted control flags */ +#define vmmInUse 0x80000000 +#define vmmInUseb 0 +#define vmmFloatCngd 0x40000000 +#define vmmFloatCngdb 1 +#define vmmVectCngd 0x20000000 +#define vmmVectCngdb 2 +#define vmmTimerPop 0x10000000 +#define vmmTimerPopb 3 +#define vmmMapDone 0x08000000 +#define vmmMapDoneb 4 +#define vmmSpfSave 0x000000FF +#define vmmSpfSaveb 24 + pmap_t vmmPmap; /* pmap for alternate context's view of task memory */ + vmm_state_page_t *vmmContextKern; /* Kernel address of context communications area */ + vmm_state_page_t *vmmContextUser; /* User address of context communications area */ + pcb_t vmmFPU_pcb; /* Saved floating point context */ + unsigned int vmmFPU_cpu; /* CPU saved fp context is valid on */ + pcb_t vmmVMX_pcb; /* Saved vector context */ + unsigned int vmmVMX_cpu; /* CPU saved vector context is valid on */ + AbsoluteTime vmmTimer; /* Last set timer value. Zero means unset */ + vm_offset_t vmmLastMap; /* Last vaddr mapping into virtual machine */ +} vmmCntrlEntry; + +typedef struct vmmCntrlTable { /* Virtual Machine Monitor Control table */ + vmmCntrlEntry vmmc[kVmmMaxContextsPerThread]; /* One entry for each possible Virtual Machine Monitor context */ +} vmmCntrlTable; + +/* function decls for kernel level routines... */ +extern vmmCntrlEntry *vmm_get_entry(thread_act_t act, vmm_thread_index_t index); +extern kern_return_t vmm_tear_down_context(thread_act_t act, vmm_thread_index_t index); +extern kern_return_t vmm_get_float_state(thread_act_t act, vmm_thread_index_t index); +extern kern_return_t vmm_get_vector_state(thread_act_t act, vmm_thread_index_t index); +extern kern_return_t vmm_set_timer(thread_act_t act, vmm_thread_index_t index, unsigned int timerhi, unsigned int timerlo); +extern kern_return_t vmm_get_timer(thread_act_t act, vmm_thread_index_t index); +extern void vmm_tear_down_all(thread_act_t act); +extern kern_return_t vmm_map_page(thread_act_t act, vmm_thread_index_t hindex, vm_offset_t cva, + vm_offset_t ava, vm_prot_t prot); +extern vm_offset_t vmm_get_page_mapping(thread_act_t act, vmm_thread_index_t index, + vm_offset_t va); +extern kern_return_t vmm_unmap_page(thread_act_t act, vmm_thread_index_t index, vm_offset_t va); +extern void vmm_unmap_all_pages(thread_act_t act, vmm_thread_index_t index); +extern boolean_t vmm_get_page_dirty_flag(thread_act_t act, vmm_thread_index_t index, + vm_offset_t va, unsigned int reset); +extern int vmm_get_features(struct savearea *); +extern int vmm_get_version(struct savearea *); +extern int vmm_init_context(struct savearea *); +extern int vmm_dispatch(struct savearea *); +extern int vmm_exit(thread_act_t act, struct savearea *); +extern void vmm_force_exit(thread_act_t act, struct savearea *); +void vmm_timer_pop(thread_act_t act); + +#endif + diff --git a/osfmk/ppc/vmachmon_asm.s b/osfmk/ppc/vmachmon_asm.s new file mode 100644 index 000000000..a1039dceb --- /dev/null +++ b/osfmk/ppc/vmachmon_asm.s @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +/* + * This file contains implementations for the Virtual Machine Monitor + * facility. + */ + + +/* + * int vmm_dispatch(savearea, act); + + * vmm_dispatch is a PPC only system call. It is used with a selector (first + * parameter) to determine what function to enter. This is treated as an extension + * of hw_exceptions. + * + * Inputs: + * R4 = current activation + * R16 = current thread + * R30 = current savearea + */ + + .align 5 /* Line up on cache line */ + .globl EXT(vmm_dispatch_table) + +LEXT(vmm_dispatch_table) + + /* Don't change the order of these routines in the table. It's */ + /* OK to add new routines, but they must be added at the bottom. */ + + .long EXT(vmm_get_version_sel) ; Get the version of the VMM interface + .long EXT(vmm_get_features_sel) ; Get the features of the VMM interface + .long EXT(vmm_init_context_sel) ; Initializes a new VMM context + .long EXT(vmm_tear_down_context) ; Tears down a previously-allocated VMM context + .long EXT(vmm_tear_down_all) ; Tears down all VMMs + .long EXT(vmm_map_page) ; Maps a page from the main address space into the VM space + .long EXT(vmm_get_page_mapping) ; Returns client va associated with VM va + .long EXT(vmm_unmap_page) ; Unmaps a page from the VM space + .long EXT(vmm_unmap_all_pages) ; Unmaps all pages from the VM space + .long EXT(vmm_get_page_dirty_flag) ; Gets the change bit for a page and optionally clears it + .long EXT(vmm_get_float_state) ; Gets current floating point state + .long EXT(vmm_get_vector_state) ; Gets current vector state + .long EXT(vmm_set_timer) ; Sets a timer value + .long EXT(vmm_get_timer) ; Gets a timer value + .long EXT(switchIntoVM) ; Switches to the VM context + + .set vmm_count,(.-EXT(vmm_dispatch_table))/4 ; Get the top number + + + .align 5 + .globl EXT(vmm_dispatch) + +LEXT(vmm_dispatch) + + lwz r11,saver3(r30) ; Get the selector + mr r3,r4 ; All of our functions want the activation as the first parm + lis r10,hi16(EXT(vmm_dispatch_table)) ; Get top half of table + cmplwi r11,kVmmExecuteVM ; Should we switch to the VM now? + cmplwi cr1,r11,vmm_count ; See if we have a valid selector + ori r10,r10,lo16(EXT(vmm_dispatch_table)) ; Get low half of table + lwz r4,saver4(r30) ; Get 1st parameter after selector + beq+ EXT(switchIntoVM) ; Yes, go switch to it.... + rlwinm r11,r11,2,0,29 ; Index into table + bgt- cr1,vmmBogus ; It is a bogus entry + lwzx r10,r10,r11 ; Get address of routine + lwz r5,saver5(r30) ; Get 2nd parameter after selector + lwz r6,saver6(r30) ; Get 3rd parameter after selector + mtlr r10 ; Set the routine address + lwz r7,saver7(r30) ; Get 4th parameter after selector +; +; NOTE: currently the most paramters for any call is 4. We will support at most 8 because we +; do not want to get into any stack based parms. However, here is where we need to add +; code for the 5th - 8th parms if we need them. +; + + blrl ; Call function + + stw r3,saver3(r30) ; Pass back the return code + li r3,1 ; Set normal return with check for AST + b EXT(ppcscret) ; Go back to handler... + +vmmBogus: eqv r3,r3,r3 ; Bogus selector, treat like a bogus system call + b EXT(ppcscret) ; Go back to handler... + + + .align 5 + .globl EXT(vmm_get_version_sel) + +LEXT(vmm_get_version_sel) ; Selector based version of get version + + lis r3,hi16(EXT(vmm_get_version)) + ori r3,r3,lo16(EXT(vmm_get_version)) + b selcomm + + + .align 5 + .globl EXT(vmm_get_features_sel) + +LEXT(vmm_get_features_sel) ; Selector based version of get features + + lis r3,hi16(EXT(vmm_get_features_sel)) + ori r3,r3,lo16(EXT(vmm_get_features_sel)) + b selcomm + + + .align 5 + .globl EXT(vmm_init_context_sel) + +LEXT(vmm_init_context_sel) ; Selector based version of init context + + lis r3,hi16(EXT(vmm_init_context_sel)) + ori r3,r3,lo16(EXT(vmm_init_context_sel)) + +selcomm: mtlr r3 ; Set the real routine address + mr r3,r30 ; Pass in the savearea + blrl ; Call the function + b EXT(ppcscret) ; Go back to handler... + +/* + * Here is where we transition to the virtual machine. + * + * We will swap the register context in the savearea with that which is saved in our shared + * context area. We will validity check a bit and clear any nasty bits in the MSR and force + * the manditory ones on. + * + * Then we will setup the new address space to run with, and anything else that is normally part + * of a context switch. + * + * Still need to figure out final floats and vectors. For now, we will go brute + * force and when we go into the VM, we will force save any normal floats and + * vectors. Then we will hide them and swap the VM copy (if any) into the normal + * chain. When we exit VM we will do the opposite. This is not as fast as I would + * like it to be. + * + * + */ + + .align 5 + .globl EXT(switchIntoVM) + +LEXT(switchIntoVM) + lwz r5,vmmControl(r3) ; Pick up the control table address + subi r4,r4,1 ; Switch to zero offset + rlwinm. r2,r5,0,0,30 ; Is there a context there? (Note: we will ignore bit 31 so that we + ; do not try this while we are transitioning off to on + cmplwi cr1,r4,kVmmMaxContextsPerThread ; Is the index valid? + beq- vmmBogus ; Not started, treat like a bogus system call + mulli r2,r4,vmmCEntrySize ; Get displacement from index + bgt- cr1,swvmmBogus ; Index is bogus... + add r2,r2,r5 ; Point to the entry + + lwz r4,vmmFlags(r2) ; Get the flags for the selected entry + lwz r5,vmmContextKern(r2) ; Get the context area address + rlwinm. r26,r4,0,vmmInUseb,vmmInUseb ; See if the slot is in use + bne+ swvmChkIntcpt ; We are so cool. Go do check for immediate intercepts... + +swvmmBogus: li r2,kVmmBogusContext ; Set bogus index return + li r3,1 ; Set normal return with check for AST + stw r2,saver3(r30) ; Pass back the return code + b EXT(ppcscret) ; Go back to handler... + +; +; Here we check for any immediate intercepts. So far, the only +; one of these is a timer pop. We will not dispatch if the timer has +; already popped. They need to either reset the timer (i.e. set timer +; to 0) or to set a future time. +; + +swvmChkIntcpt: + rlwinm. r26,r4,0,vmmTimerPopb,vmmTimerPopb ; Did the timer pop? + beq+ swvmDoSwitch ; No... + + li r2,kVmmReturnNull ; Set null return + li r3,1 ; Set normal return with check for AST + stw r2,saver3(r30) ; Pass back the return code + stw r2,return_code(r5) ; Save the exit code + b EXT(ppcscret) ; Go back to handler... + +; +; Here is where we actually swap into the VM (alternate) context. +; We will bulk do a wholesale swap of the registers in the context area (the VMs) +; with the ones in the savearea (our main code). During the copy, we will fix up the +; MSR, forcing on a few bits and turning off a few others. Then we will deal with the +; PMAP and other per_proc stuff. Finally, we will exit back through the main exception +; handler to deal with unstacking saveareas and ASTs, etc. +; + +swvmDoSwitch: + +; +; First, we save the volatile registers we care about. Remember, all register +; handling here is pretty funky anyway, so we just pick the ones that are ok. +; + mr r26,r3 ; Save the activation pointer + mr r28,r5 ; Save the context pointer + mr r27,r2 ; Save the context entry + + bl vmmxcng ; Exchange the vector and floating point contexts + mr r5,r28 ; Restore this register + + lwz r11,ACT_MACT_SPF(r26) ; Get the special flags + lwz r3,vmmPmap(r27) ; Get the pointer to the PMAP + oris r15,r11,hi16(runningVM) ; ; Show that we are swapped to the VM right now + bl EXT(hw_set_user_space_dis) ; Swap the address spaces + lwz r17,vmmFlags(r27) ; Get the status flags + mfsprg r10,0 ; Get the per_proc + rlwinm. r0,r17,0,vmmMapDoneb,vmmMapDoneb ; Did we just do a map function? + stw r27,vmmCEntry(r26) ; Remember what context we are running + andc r17,r17,r0 ; Turn off map flag + beq+ swvmNoMap ; No mapping done... + +; +; This little bit of hoopala here (triggered by vmmMapDone) is +; a performance enhancement. This will change the returning savearea +; to look like we had a DSI rather than a system call. Then, setting +; the redrive bit, the exception handler will redrive the exception as +; a DSI, entering the last mapped address into the hash table. This keeps +; double faults from happening. Note that there is only a gain if the VM +; takes a fault, then the emulator resolves it only, and then begins +; the VM execution again. It seems like this should be the normal case. +; + + lwz r3,SAVflags(r30) ; Pick up the savearea flags + lwz r2,vmmLastMap(r27) ; Get the last mapped address + li r20,T_DATA_ACCESS ; Change to DSI fault + oris r3,r3,hi16(SAVredrive) ; Set exception redrive + stw r2,savedar(r30) ; Set the DAR to the last thing we mapped + stw r3,SAVflags(r30) ; Turn on the redrive request + lis r2,hi16(MASK(DSISR_HASH)) ; Set PTE/DBAT miss + stw r20,saveexception(r30) ; Say we need to emulate a DSI + stw r2,savedsisr(r30) ; Pretend we have a PTE miss + +swvmNoMap: rlwimi r15,r17,32-(floatCngbit-vmmFloatCngdb),floatCngbit,vectorCngbit ; Shift and insert changed bits + rlwimi r17,r11,8,24,31 ; Save the old spf flags + stw r15,spcFlags(r10) ; Set per_proc copy of the special flags + stw r15,ACT_MACT_SPF(r26) ; Get the special flags + + stw r17,vmmFlags(r27) ; Set the status flags + + bl swapCtxt ; First, swap the general register state + + lwz r17,vmmContextKern(r27) ; Get the comm area + + lwz r15,vmmCntrl(r17) ; Get the control flags + + rlwinm. r0,r15,0,vmmFloatLoadb,vmmFloatLoadb ; Are there new floating point values? + li r14,vmmppcFPRs ; Get displacement to the new values + andc r15,r15,r0 ; Clear the bit + beq+ swvmNoNewFloats ; Nope, good... + + lwz r3,ACT_MACT_FPU(r26) ; Get the FPU savearea + dcbt r14,r18 ; Touch in first line of new stuff + mr. r3,r3 ; Is there one? + bne+ swvmGotFloat ; Yes... + + bl EXT(save_get) ; Get a savearea + + li r11,0 ; Get a 0 + lis r7,hi16(SAVfpuvalid) ; Set the allocated bit + stw r3,ACT_MACT_FPU(r26) ; Set the floating point savearea + stw r7,SAVflags(r3) ; Set the validity flags + stw r11,SAVlvlfp(r3) ; Set the context level + +swvmGotFloat: + dcbt r14,r17 ; Touch in first line of new stuff + la r4,savefp0(r3) ; Point to the destination + mr r21,r3 ; Save the save area + la r3,vmmppcFPRs(r17) ; Point to the source + li r5,33*8 ; Get the size (32 FP + FPSCR at 8 bytes each) + + bl EXT(bcopy) ; Copy the new values + + lwz r11,ACT_MACT_SPF(r26) ; Get the special flags + stw r15,vmmCntrl(r17) ; Save the control flags sans vmmFloatLoad + rlwinm r11,r11,0,floatCngbit+1,floatCngbit-1 ; Clear the changed bit here + lwz r14,vmmStat(r17) ; Get the status flags + mfsprg r10,0 ; Get the per_proc + stw r11,ACT_MACT_SPF(r26) ; Get the special flags + rlwinm r14,r14,0,vmmFloatCngdb+1,vmmFloatCngdb-1 ; Clear the changed flag + stw r11,spcFlags(r10) ; Set per_proc copy of the special flags + stw r14,vmmStat(r17) ; Set the status flags sans vmmFloatCngd + lwz r11,savefpscrpad(r21) ; Get the new fpscr pad + lwz r14,savefpscr(r21) ; Get the new fpscr + stw r11,savexfpscrpad(r30) ; Save the new fpscr pad + stw r14,savexfpscr(r30) ; Save the new fpscr + +swvmNoNewFloats: + rlwinm. r0,r15,0,vmmVectLoadb,vmmVectLoadb ; Are there new vector values? + li r14,vmmppcVRs ; Get displacement to the new values + andc r15,r15,r0 ; Clear the bit + beq+ swvmNoNewVects ; Nope, good... + + lwz r3,ACT_MACT_VMX(r26) ; Get the vector savearea + dcbt r14,r27 ; Touch in first line of new stuff + mr. r3,r3 ; Is there one? + bne+ swvmGotVect ; Yes... + + bl EXT(save_get) ; Get a savearea + + li r21,0 ; Get a 0 + lis r7,hi16(SAVvmxvalid) ; Set the allocated bit + stw r3,ACT_MACT_VMX(r26) ; Set the vector savearea indication + stw r7,SAVflags(r3) ; Set the validity flags + stw r21,SAVlvlvec(r3) ; Set the context level + +swvmGotVect: + dcbt r14,r17 ; Touch in first line of new stuff + mr r21,r3 ; Save the pointer to the savearea + la r4,savevr0(r3) ; Point to the destination + la r3,vmmppcVRs(r17) ; Point to the source + li r5,33*16 ; Get the size (32 vectors + VSCR at 16 bytes each) + + bl EXT(bcopy) ; Copy the new values + + lwz r11,ACT_MACT_SPF(r26) ; Get the special flags + stw r15,vmmCntrl(r17) ; Save the control flags sans vmmVectLoad + rlwinm r11,r11,0,vectorCngbit+1,vectorCngbit-1 ; Clear the changed bit here + lwz r14,vmmStat(r17) ; Get the status flags + mfsprg r10,0 ; Get the per_proc + stw r11,ACT_MACT_SPF(r26) ; Get the special flags + rlwinm r14,r14,0,vmmVectCngdb+1,vmmVectCngdb-1 ; Clear the changed flag + eqv r15,r15,r15 ; Get all foxes + stw r11,spcFlags(r10) ; Set per_proc copy of the special flags + stw r14,vmmStat(r17) ; Set the status flags sans vmmVectCngd + stw r15,savevrvalid(r21) ; Set the valid bits to all foxes + +swvmNoNewVects: + li r3,1 ; Show normal exit with check for AST + mr r9,r26 ; Move the activation pointer + b EXT(ppcscret) ; Go back to handler... + + +; +; Here is where we exchange the emulator floating and vector contexts +; for the virtual machines. Remember, this is not so efficient and needs +; a rewrite. Also remember the funky register conventions (i.e., +; we need to know what our callers need saved and what our callees trash. +; +; Note: we expect R26 to contain the activation and R27 to contain the context +; entry pointer. +; + +vmmxcng: mflr r21 ; Save the return point + bl EXT(fpu_save) ; Save any floating point context + bl EXT(vec_save) ; Save any vector point context + + lis r10,hi16(EXT(per_proc_info)) ; Get top of first per_proc + li r8,PP_FPU_THREAD ; Index to FPU owner + ori r10,r10,lo16(EXT(per_proc_info)) ; Get bottom of first per_proc + lis r6,hi16(EXT(real_ncpus)) ; Get number of CPUs + li r7,0 ; Get set to clear + ori r6,r6,lo16(EXT(real_ncpus)) ; Get number of CPUs + li r9,PP_VMX_THREAD ; Index to vector owner + lwz r6,0(r6) ; Get the actual CPU count + +vmmrt1: lwarx r3,r8,r10 ; Get FPU owner + cmplw r3,r26 ; Do we own it? + bne vmmrt2 ; Nope... + stwcx. r7,r8,r10 ; Clear it + bne- vmmrt1 ; Someone else diddled, try again.... + +vmmrt2: lwarx r3,r9,r10 ; Get vector owner + cmplw r3,r26 ; Do we own it? + bne vmmxnvec ; Nope... + stwcx. r7,r9,r10 ; Clear it + bne- vmmrt2 ; Someone else diddled, try again.... + +vmmxnvec: addic. r6,r6,-1 ; Done with all CPUs? + addi r10,r10,ppSize ; On to the next + bgt vmmrt1 ; Do all processors... + +; +; At this point, the FP and Vector states for the current activation +; are saved and not live on any processor. Also, they should be the +; only contexts on the activation. Note that because we are currently +; taking the cowardly way out and insuring that no contexts are live, +; we do not need to worry about the CPU fields. +; + + lwz r8,ACT_MACT_FPU(r26) ; Get the FPU savearea + lwz r9,ACT_MACT_VMX(r26) ; Get the vector savearea + lwz r10,vmmFPU_pcb(r27) ; Get the FPU savearea + lwz r11,vmmVMX_pcb(r27) ; Get the vector savearea + li r7,0 ; Clear this + mtlr r21 ; Restore the return + stw r10,ACT_MACT_FPU(r26) ; Set the FPU savearea + stw r11,ACT_MACT_VMX(r26) ; Set the vector savearea + stw r8,vmmFPU_pcb(r27) ; Set the FPU savearea + stw r9,vmmVMX_pcb(r27) ; Set the vector savearea + stw r7,ACT_MACT_FPUlvl(r26) ; Make sure the level is clear + stw r7,ACT_MACT_VMXlvl(r26) ; Make sure the level is clear + + mr. r8,r8 ; Do we have any old floating point context? + lwz r7,savexfpscrpad(r30) ; Get first part of latest fpscr + lwz r9,savexfpscr(r30) ; Get second part of the latest fpscr + beq- xcngnold ; Nope... + stw r7,savefpscrpad(r8) ; Set first part of fpscr + stw r9,savefpscr(r8) ; Set fpscr + +xcngnold: mr. r10,r10 ; Any new context? + li r7,0 ; Assume no FP + li r9,0 ; Assume no FP + beq- xcngnnew ; Nope... + lwz r7,savefpscrpad(r10) ; Get first part of latest fpscr + lwz r9,savefpscr(r10) ; Get second part of the latest fpscr + +xcngnnew: stw r7,savexfpscrpad(r30) ; Set the fpsc + stw r9,savexfpscr(r30) ; Set the fpscr + blr ; Return... + +; +; Here is where we exit from vmm mode. We do this on any kind of exception. +; Interruptions (decrementer, external, etc.) are another story though. +; These we just pass through. We also switch back explicity when requested. +; This will happen in response to a timer pop and some kinds of ASTs. +; +; Inputs: +; R3 = activation +; R4 = savearea +; + + .align 5 + .globl EXT(vmm_exit) + +LEXT(vmm_exit) + + lwz r2,vmmCEntry(r3) ; Get the context that is active + lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy + lwz r11,ACT_MACT_SPF(r3) ; Get the special flags + lwz r19,vmmFlags(r2) ; Get the status flags + mr r16,r3 ; R16 is safe to use for the activation address + + rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits + li r0,0 ; Get a zero + rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf + lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation + rlwinm r11,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag + stw r0,vmmCEntry(r16) ; Clear pointer to active context + stw r19,vmmFlags(r2) ; Set the status flags + mfsprg r10,0 ; Get the per_proc block + stw r11,ACT_MACT_SPF(r16) ; Get the special flags + stw r11,spcFlags(r10) ; Set per_proc copy of the special flags + + mr r26,r16 ; Save the activation pointer + mr r27,r2 ; Save the context entry + + bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator + + bl vmmxcng ; Exchange the vector and floating point contexts + + mr r2,r27 ; Restore + lwz r5,vmmContextKern(r2) ; Get the context area address + mr r3,r16 ; Restore activation address + stw r19,vmmStat(r5) ; Save the changed and popped flags + bl swapCtxt ; Exchange the VM context for the emulator one + stw r8,saver3(r30) ; Set the return code as the return value also + b EXT(retFromVM) ; Go back to handler... + + +; +; Here is where we force exit from vmm mode. We do this when as +; part of termination and is used to insure that we are not executing +; in an alternate context. Because this is called from C we need to save +; all non-volatile registers. +; +; Inputs: +; R3 = activation +; R4 = user savearea +; Interruptions disabled +; + + .align 5 + .globl EXT(vmm_force_exit) + +LEXT(vmm_force_exit) + + stwu r1,-(FM_ALIGN(20*4)+FM_SIZE)(r1) ; Get enough space for the registers + mflr r0 ; Save the return + stmw r13,FM_ARG0(r1) ; Save all non-volatile registers + stw r0,(FM_ALIGN(20*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return + + lwz r2,vmmCEntry(r3) ; Get the context that is active + lwz r11,ACT_MACT_SPF(r3) ; Get the special flags + lwz r19,vmmFlags(r2) ; Get the status flags + lwz r12,ACT_VMMAP(r3) ; Get the VM_MAP for this guy + + rlwimi r19,r11,floatCngbit-vmmFloatCngdb,vmmFloatCngdb,vmmVectCngdb ; Shift and insert changed bits + mr r26,r3 ; Save the activation pointer + rlwimi r11,r19,vmmSpfSaveb,floatCngbit,vectorCngbit ; Restore the saved part of the spf + li r0,0 ; Get a zero + rlwinm r9,r11,0,runningVMbit+1,runningVMbit-1 ; Clear the "in VM" flag + cmplw r9,r11 ; Check if we were in a vm + lwz r3,VMMAP_PMAP(r12) ; Get the pmap for the activation + beq- vfeNotRun ; We were not in a vm.... + stw r0,vmmCEntry(r26) ; Clear pointer to active context + mfsprg r10,0 ; Get the per_proc block + stw r9,ACT_MACT_SPF(r26) ; Get the special flags + stw r9,spcFlags(r10) ; Set per_proc copy of the special flags + + mr r27,r2 ; Save the context entry + mr r30,r4 ; Save the savearea + + bl EXT(hw_set_user_space_dis) ; Swap the address spaces back to the emulator + + bl vmmxcng ; Exchange the vector and floating point contexts + + lwz r5,vmmContextKern(r27) ; Get the context area address + stw r19,vmmStat(r5) ; Save the changed and popped flags + bl swapCtxt ; Exchange the VM context for the emulator one + + li r8,kVmmReturnNull ; Set a null return when we force an intercept + stw r8,saver3(r30) ; Set the return code as the return value also + + +vfeNotRun: lmw r13,FM_ARG0(r1) ; Restore all non-volatile registers + lwz r1,0(r1) ; Pop the stack + lwz r0,FM_LR_SAVE(r1) ; Get the return address + mtlr r0 ; Set return + blr + +; +; Note: we will not do any DCBTs to the savearea. It was just stored to a few cycles ago and should +; still be in the cache. Note also that the context area registers map identically to the savearea. +; +; NOTE: we do not save any of the non-volatile registers through this swap code +; NOTE NOTE: R16 is important to save!!!! +; NOTE: I am too dumb to figure out a faster way to swap 5 lines of memory. So I go for +; the simple way + + .align 5 + +swapCtxt: addi r6,r5,vmm_proc_state ; Point to the state + li r25,32 ; Get a cache size increment + addi r4,r30,savesrr0 ; Point to the start of the savearea + dcbt 0,r6 ; Touch in the first line of the context area + + lwz r14,saveexception(r30) ; Get the exception code + lwz r7,savesrr0(r4) ; First line of savearea + lwz r8,savesrr1(r4) + lwz r9,saver0(r4) + cmplwi cr1,r14,T_SYSTEM_CALL ; Are we switching because of a system call? + lwz r10,saver1(r4) + lwz r11,saver2(r4) + lwz r12,saver3(r4) + lwz r13,saver4(r4) + lwz r14,saver5(r4) + + dcbt r25,r6 ; Touch second line of context area + addi r25,r25,32 ; Bump + + lwz r15,savesrr0(r6) ; First line of context + lis r22,hi16(MSR_IMPORT_BITS) ; Get the MSR bits that are controllable by user + lwz r23,savesrr1(r6) + ori r22,r25,lo16(MSR_IMPORT_BITS) ; Get the rest of the MSR bits that are controllable by user + lwz r17,saver0(r6) + lwz r18,saver1(r6) + and r23,r23,r22 ; Keep only the controllable bits + lwz r19,saver2(r6) + oris r23,r23,hi16(MSR_EXPORT_MASK_SET) ; Force on the required bits + lwz r20,saver3(r6) + ori r23,r23,lo16(MSR_EXPORT_MASK_SET) ; Force on the other required bits + lwz r21,saver4(r6) + lwz r22,saver5(r6) + + dcbt r25,r6 ; Touch third line of context area + addi r25,r25,32 ; Bump (r25 is 64 now) + + stw r7,savesrr0(r6) ; Save emulator context into the context area + stw r8,savesrr1(r6) + stw r9,saver0(r6) + stw r10,saver1(r6) + stw r11,saver2(r6) + stw r12,saver3(r6) + stw r13,saver4(r6) + stw r14,saver5(r6) + +; +; Save the first 3 parameters if we are an SC (we will take care of the last later) +; + bne+ cr1,swapnotsc ; Skip next if not an SC exception... + stw r12,return_params+0(r5) ; Save the first return + stw r13,return_params+4(r5) ; Save the second return + stw r14,return_params+8(r5) ; Save the third return + +swapnotsc: stw r15,savesrr0(r4) ; Save vm context into the savearea + stw r23,savesrr1(r4) + stw r17,saver0(r4) + stw r18,saver1(r4) + stw r19,saver2(r4) + stw r20,saver3(r4) + stw r21,saver4(r4) + stw r22,saver5(r4) + +; +; The first hunk is swapped, do the rest in a loop +; + li r23,4 ; Four more hunks to swap + + +swaploop: addi r4,r4,32 ; Bump savearea pointer + addi r6,r6,32 ; Bump context area pointer + addic. r23,r23,-1 ; Count down + dcbt r25,r6 ; Touch 4th, 5th, and 6th and 7th which are extra + + lwz r7,0(r4) ; Read savearea + lwz r8,4(r4) + lwz r9,8(r4) + lwz r10,12(r4) + lwz r11,16(r4) + lwz r12,20(r4) + lwz r13,24(r4) + lwz r14,28(r4) + + lwz r15,0(r6) ; Read vm context + lwz r24,4(r6) + lwz r17,8(r6) + lwz r18,12(r6) + lwz r19,16(r6) + lwz r20,20(r6) + lwz r21,24(r6) + lwz r22,28(r6) + + stw r7,0(r6) ; Write context + stw r8,4(r6) + stw r9,8(r6) + stw r10,12(r6) + stw r11,16(r6) + stw r12,20(r6) + stw r13,24(r6) + stw r14,28(r6) + + stw r15,0(r4) ; Write vm context + stw r24,4(r4) + stw r17,8(r4) + stw r18,12(r4) + stw r19,16(r4) + stw r20,20(r4) + stw r21,24(r4) + stw r22,28(r4) + + bgt+ swaploop ; Do it all... + +; +; Cobble up the exception return code and save any specific return values +; + + lwz r7,saveexception(r30) ; Pick up the exception code + rlwinm r8,r7,30,24,31 ; Convert exception to return code + cmplwi r7,T_DATA_ACCESS ; Was this a DSI? + stw r8,return_code(r5) ; Save the exit code + cmplwi cr1,r7,T_INSTRUCTION_ACCESS ; Exiting because of an ISI? + beq+ swapDSI ; Yeah... + cmplwi r7,T_ALIGNMENT ; Alignment exception? + beq+ cr1,swapISI ; We had an ISI... + cmplwi cr1,r7,T_SYSTEM_CALL ; Exiting because of an system call? + beq+ swapDSI ; An alignment exception looks like a DSI... + beq+ cr1,swapSC ; We had a system call... + + blr ; Return... + +; +; Set exit returns for a DSI or alignment exception +; + +swapDSI: lwz r10,savedar(r30) ; Get the DAR + lwz r7,savedsisr(r30) ; and the DSISR + stw r10,return_params+0(r5) ; Save DAR as first return parm + stw r7,return_params+4(r5) ; Save DSISR as second return parm + blr ; Return... + +; +; Set exit returns for a ISI +; + +swapISI: lwz r7,savesrr1+vmm_proc_state(r5) ; Get the SRR1 value + lwz r10,savesrr0+vmm_proc_state(r5) ; Get the PC as failing address + rlwinm r7,r7,0,1,4 ; Save the bits that match the DSISR + stw r10,return_params+0(r5) ; Save PC as first return parm + stw r7,return_params+4(r5) ; Save the pseudo-DSISR as second return parm + blr ; Return... + +; +; Set exit returns for a system call (note: we did the first 3 earlier) +; Do we really need to pass parameters back here???? +; + +swapSC: lwz r10,saver6+vmm_proc_state(r5) ; Get the fourth paramter + stw r10,return_params+12(r5) ; Save it + blr ; Return... + diff --git a/osfmk/ppc/xpr.h b/osfmk/ppc/xpr.h new file mode 100644 index 000000000..133cb3a1b --- /dev/null +++ b/osfmk/ppc/xpr.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +/* + * Machine dependent module for the XPR tracing facility. + */ + +#define XPR_TIMESTAMP (0) diff --git a/osfmk/profiling/Makefile b/osfmk/profiling/Makefile new file mode 100644 index 000000000..878005ec7 --- /dev/null +++ b/osfmk/profiling/Makefile @@ -0,0 +1,51 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + machine + +INSTINC_SUBDIRS_PPC = \ + ppc + +INSTINC_SUBDIRS_I386 = \ + i386 + +EXPINC_SUBDIRS = \ + machine + +EXPINC_SUBDIRS_PPC = \ + ppc + +EXPINC_SUBDIRS_I386 = \ + i386 + +MIG_DEFS = \ + +MIG_HDRS = \ + +DATAFILES = \ + profile-internal.h profile-mk.h profile-kgmon.c \ + ${MIG_DEFS} + +MIGINCLUDES = \ + +INSTALL_MI_LIST = ${DATAFILES} ${_MIG_HDRS_} ${MIGINCLUDES} + +INSTALL_MI_DIR = profile + +EXPORT_MI_LIST = ${DATAFILES} ${_MIG_HDRS_} ${MIGINCLUDES} + +EXPORT_MI_DIR = profile + +.ORDER: ${_MIG_HDRS_} ${MIGINCLUDES} + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/profiling/i386/Makefile b/osfmk/profiling/i386/Makefile new file mode 100644 index 000000000..14a36756a --- /dev/null +++ b/osfmk/profiling/i386/Makefile @@ -0,0 +1,24 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + profile-md.h profile-md.c profile-asm.s + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = profile/i386 + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = profile/i386 + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/profiling/i386/profile-asm.s b/osfmk/profiling/i386/profile-asm.s new file mode 100644 index 000000000..d4272c053 --- /dev/null +++ b/osfmk/profiling/i386/profile-asm.s @@ -0,0 +1,1449 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.1 1997/09/22 17:41:24 barbou + * MP+RT: protect cpu_number() usage against preemption. + * [97/09/16 barbou] + * + * Revision 1.1.5.1 1995/01/06 19:53:37 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:25:20 dwm] + * + * Revision 1.1.2.2 1994/05/16 19:19:17 meissner + * Add support for converting 64-bit integers to a decimal string. + * Use the correct address (selfpc) when creating the prof header for gprof. + * [1994/04/28 21:44:59 meissner] + * + * Revision 1.1.2.1 1994/04/08 17:51:42 meissner + * Make most stats 64 bits, except for things like memory allocation. + * [1994/04/02 14:58:21 meissner] + * + * Do not provide old mcount support under MK or server. + * Fixup stats size so it is the same as in profile-md.h. + * [1994/03/29 21:00:03 meissner] + * + * Use faster sequence for overflow addition. + * Keep {dummy,prof,gprof,old}_mcount counts in double precision. + * Add kernel NCPUS > 1 support. + * [1994/03/17 20:13:23 meissner] + * + * Add gprof/prof overflow support + * [1994/03/17 14:56:44 meissner] + * + * Add size of histogram counters & unused fields to profile_profil struct + * [1994/02/17 21:41:44 meissner] + * + * Add too_low/too_high to profile_stats. + * [1994/02/16 22:38:11 meissner] + * + * Bump # allocation contexts to 32 from 16. + * Store unique ptr address in gprof function header structure for _profile_reset. + * Add new fields from profile-{internal,md}.h. + * Align loop looking for an unlocked acontext. + * Count # times a locked context block was found. + * Expand copyright. + * [1994/02/07 12:40:56 meissner] + * + * Keep track of the number of times the kernel overflows the HISTCOUNTER counter. + * [1994/02/03 20:13:23 meissner] + * + * Add stats for {user,kernel,idle} mode in the kernel. + * [1994/02/03 15:17:22 meissner] + * + * No change. + * [1994/02/03 00:58:49 meissner] + * + * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. + * [1994/02/01 12:03:56 meissner] + * + * Move _mcount_ptr to be closer to other data declarations. + * Add text_len to profile_profil structure for mk. + * Split records_cnt into prof_cnt/gprof_cnt. + * Always update prof_cnt/gprof_cnt even if not DO_STATS. + * Add current/max cpu indicator to stats for kernel. + * [1994/01/28 23:33:20 meissner] + * + * Don't do 4+Lgotoff(lab), use separate labels. + * Change GPROF_HASH_SHIFT to 9 (from 8). + * [1994/01/26 22:00:59 meissner] + * + * Fixup NO_RECURSIVE_ALLOC to do byte loads, not word loads. + * [1994/01/26 20:30:57 meissner] + * + * Move callback pointers into separate allocation context. + * Add size fields for other structures to profile-vars. + * Allocate string table as one large allocation. + * Rewrite old mcount code once again. + * Use multiply to make hash value, not divide. + * Hash table is now a power of two. + * [1994/01/26 20:23:32 meissner] + * + * Cut hash table size back to 16189. + * Add size fields to all structures. + * Add major/minor version number to _profile_md. + * Move allocation context block pointers to _profile_vars. + * Move _gprof_dummy after _profile_md. + * New function header code now falls into hash an element + * to avoid having the hash code duplicated or use a macro. + * Fix bug in _gprof_mcount with ELF shared libraries. + * [1994/01/25 01:45:59 meissner] + * + * Move init functions to C code; rearrange profil varaibles. + * [1994/01/22 01:11:14 meissner] + * + * No change. + * [1994/01/20 20:56:43 meissner] + * + * Fixup copyright. + * [1994/01/18 23:07:39 meissner] + * + * Make flags byte-sized. + * Add have_bb flag. + * Add init_format flag. + * Always put word size multipler first in .space. + * [1994/01/18 21:57:14 meissner] + * + * Fix elfpic problems in last change. + * [1994/01/16 14:04:26 meissner] + * + * Rewrite gprof caching to be faster & not need a lock. + * Record prof information for gprof too. + * Bump reserved stats to 64. + * Bump up hash table size 30799. + * Conditionally use lock prefix. + * Change most #ifdef's to #if. + * DEBUG_PROFILE turns on stack frames now. + * Conditionally add externs to gprof to determine where time is spent. + * Prof_mcount uses xchgl to update function pointer. + * [1994/01/15 18:40:33 meissner] + * + * Fix a comment. + * Separate statistics from debugging (though debugging turns it on). + * Remove debug code that traces each gprof request. + * [1994/01/15 00:59:02 meissner] + * + * Move max hash bucket calculation into _gprof_write & put info in stats structure. + * [1994/01/04 16:15:14 meissner] + * + * Use _profile_printf to write diagnostics; add diag_stream to hold stream to write to. + * [1994/01/04 15:37:44 meissner] + * + * Add more allocation memory pools (gprof function hdrs in particular). + * For prof, gprof arc, and gprof function hdrs, allocate 16 pages at a time. + * Add major/minor version numbers to _profile_{vars,stats}. + * Add # profil buckets field to _profil_stats. + * [19 + * + * $EndLog$ + */ + +/* + * Common 386 profiling module that is shared between the kernel, mach + * servers, and the user space library. Each environment includes + * this file. + */ + + .file "profile-asm.s" + +#include + +#include + +/* + * By default, debugging turns on statistics and stack frames. + */ + +#if DEBUG_PROFILE +#ifndef DO_STATS +#define DO_STATS 1 +#endif + +#ifndef STACK_FRAMES +#define STACK_FRAMES 1 +#endif +#endif + +#ifndef OLD_MCOUNT +#define OLD_MCOUNT 0 /* do not compile old code for mcount */ +#endif + +#ifndef DO_STATS +#define DO_STATS 1 /* compile in statistics code */ +#endif + +#ifndef DO_LOCK +#define DO_LOCK 0 /* use lock; in front of increments */ +#endif + +#ifndef LOCK_STATS +#define LOCK_STATS DO_LOCK /* update stats with lock set */ +#endif + +#ifndef STACK_FRAMES +#define STACK_FRAMES 0 /* create stack frames for debugger */ +#endif + +#ifndef NO_RECURSIVE_ALLOC +#define NO_RECURSIVE_ALLOC 0 /* check for recursive allocs */ + /* (not thread safe!) */ +#endif + +#ifndef MARK_GPROF +#define MARK_GPROF 0 /* add externs for gprof profiling */ +#endif + +#ifndef OVERFLOW +#define OVERFLOW 1 /* add overflow checking support */ +#endif + +/* + * Turn on the use of the lock prefix if desired. + */ + +#ifndef LOCK +#if DO_LOCK +#define LOCK lock; +#else +#define LOCK +#endif +#endif + +#ifndef SLOCK +#if LOCK_STATS +#define SLOCK LOCK +#else +#define SLOCK +#endif +#endif + +/* + * Double or single precision incrementing + */ + +#if OVERFLOW +#define DINC(mem) LOCK addl $1,mem; LOCK adcl $0,4+mem +#define DINC2(mem,mem2) LOCK addl $1,mem; LOCK adcl $0,mem2 +#define SDINC(mem) SLOCK addl $1,mem; SLOCK adcl $0,4+mem +#define SDADD(val,mem) SLOCK addl val,mem; SLOCK adcl $0,4+mem +#define SDADDNEG(val,mem) SLOCK subl val,mem; SLOCK adcl $0,4+mem +#define SDSUB(val,mem) SLOCK subl val,mem; SLOCK sbbl $0,4+mem + +#else +#define DINC(mem) LOCK incl mem +#define DINC2(mem,mem2) LOCK incl mem +#define SDINC(mem) SLOCK incl mem +#define SDADD(val,mem) SLOCK addl val,mem +#define SDADDNEG(val,mem) SLOCK subl val,mem +#define SDSUB(val,mem) SLOCK subl val,mem +#endif + +/* + * Stack frame support so that debugger traceback works. + */ + +#if STACK_FRAMES +#define ENTER pushl %ebp; movl %esp,%ebp +#define LEAVE0 popl %ebp +#define Estack 4 +#else +#define ENTER +#define LEAVE0 +#define Estack 0 +#endif + +/* + * Gprof profiling. + */ + +#if MARK_GPROF +#define MARK(name) .globl EXT(name); ELF_FUNC(EXT(name)); ELF_SIZE(EXT(name),0); LEXT(name) +#else +#define MARK(name) +#endif + +/* + * Profiling allocation context block. Each time memory is needed, the + * allocator loops until it finds an unlocked context block, and allocates + * from that block. If no context blocks are available, a new memory + * pool is allocated, and added to the end of the chain. + */ + +LCL(A_next) = 0 /* next context block link (must be 0) */ +LCL(A_plist) = LCL(A_next)+4 /* head of page list for context block */ +LCL(A_lock) = LCL(A_plist)+4 /* lock word */ +LCL(A_size) = LCL(A_lock)+4 /* size of context block */ + +#define A_next LCL(A_next) +#define A_plist LCL(A_plist) +#define A_lock LCL(A_lock) +#define A_size LCL(A_size) + +/* + * Allocation contexts used. + */ + +LCL(C_prof) = 0 /* prof records */ +LCL(C_gprof) = 1 /* gprof arc records */ +LCL(C_gfunc) = 2 /* gprof function headers */ +LCL(C_misc) = 3 /* misc. allocations */ +LCL(C_profil) = 4 /* memory for profil */ +LCL(C_dci) = 5 /* memory for dci */ +LCL(C_bb) = 6 /* memory for basic blocks */ +LCL(C_callback) = 7 /* memory for callbacks */ +LCL(C_max) = 32 /* # allocation contexts */ + +#define C_prof LCL(C_prof) +#define C_gprof LCL(C_gprof) +#define C_gfunc LCL(C_gfunc) +#define C_max LCL(C_max) + +/* + * Linked list of memory allocations. + */ + +LCL(M_first) = 0 /* pointer to first byte available */ +LCL(M_ptr) = LCL(M_first)+4 /* pointer to next available byte */ +LCL(M_next) = LCL(M_ptr)+4 /* next page allocated */ +LCL(M_nfree) = LCL(M_next)+4 /* # bytes available */ +LCL(M_nalloc) = LCL(M_nfree)+4 /* # bytes allocated */ +LCL(M_num) = LCL(M_nalloc)+4 /* # allocations done on this page */ +LCL(M_size) = LCL(M_num)+4 /* size of page header */ + +#define M_first LCL(M_first) +#define M_ptr LCL(M_ptr) +#define M_next LCL(M_next) +#define M_nfree LCL(M_nfree) +#define M_nalloc LCL(M_nalloc) +#define M_num LCL(M_num) +#define M_size LCL(M_size) + +/* + * Prof data type. + */ + +LCL(P_addr) = 0 /* function address */ +LCL(P_count) = LCL(P_addr)+4 /* # times function called */ +LCL(P_overflow) = LCL(P_count)+4 /* # times count overflowed */ +LCL(P_size) = LCL(P_overflow)+4 /* size of prof data type */ + +#define P_addr LCL(P_addr) +#define P_count LCL(P_count) +#define P_overflow LCL(P_overflow) +#define P_size LCL(P_size) + +/* + * Gprof data type. + */ + +LCL(G_next) = 0 /* next hash link (must be 0) */ +LCL(G_frompc) = LCL(G_next)+4 /* caller's caller */ +LCL(G_selfpc) = LCL(G_frompc)+4 /* caller's address */ +LCL(G_count) = LCL(G_selfpc)+4 /* # times arc traversed */ +LCL(G_overflow) = LCL(G_count)+4 /* # times count overflowed */ +LCL(G_size) = LCL(G_overflow)+4 /* size of gprof data type */ + +#define G_next LCL(G_next) +#define G_frompc LCL(G_frompc) +#define G_selfpc LCL(G_selfpc) +#define G_count LCL(G_count) +#define G_overflow LCL(G_overflow) +#define G_size LCL(G_size) + +/* + * Gprof header. + * + * At least one header is allocated for each unique function that is profiled. + * In order to save time calculating the hash value, the last H_maxcache + * distinct arcs are cached within this structure. Also, to avoid loading + * the GOT when searching the hash table, we copy the hash pointer to this + * structure, so that we only load the GOT when we need to allocate an arc. + */ + +LCL(H_maxcache) = 3 /* # of cache table entries */ +LCL(H_csize) = 4*LCL(H_maxcache) /* size of each cache array */ + +LCL(H_hash_ptr) = 0 /* hash table to use */ +LCL(H_unique_ptr) = LCL(H_hash_ptr)+4 /* function unique pointer */ +LCL(H_prof) = LCL(H_unique_ptr)+4 /* prof statistics */ +LCL(H_cache_ptr) = LCL(H_prof)+P_size /* cache table of element pointers */ +LCL(H_size) = LCL(H_cache_ptr)+LCL(H_csize) /* size of gprof header type */ + +#define H_maxcache LCL(H_maxcache) +#define H_csize LCL(H_csize) +#define H_hash_ptr LCL(H_hash_ptr) +#define H_unique_ptr LCL(H_unique_ptr) +#define H_prof LCL(H_prof) +#define H_cache_ptr LCL(H_cache_ptr) +#define H_size LCL(H_size) + +/* + * Number of digits needed to write a 64 bit number including trailing null. + * (rounded up to be divisable by 4). + */ + +#define N_digit 24 + + + .data + +/* + * Default gprof hash table size, which must be a power of two. + * The shift specifies how many low order bits to eliminate when + * calculating the hash value. + */ + +#ifndef GPROF_HASH_SIZE +#define GPROF_HASH_SIZE 16384 +#endif + +#ifndef GPROF_HASH_SHIFT +#define GPROF_HASH_SHIFT 9 +#endif + +#define GPROF_HASH_MASK (GPROF_HASH_SIZE-1) + +DATA(_profile_hash_size) + .long GPROF_HASH_SIZE +ENDDATA(_profile_hash_size) + + + +/* + * Pointer that the compiler uses to call to the appropriate mcount function. + */ + +DATA(_mcount_ptr) + .long EXT(_dummy_mcount) +ENDDATA(_mcount_ptr) + +/* + * Global profile variables. The structure that accesses this in C is declared + * in profile-internal.h. All items in .data that follow this will be used as + * one giant record, and each unique machine, thread, kgmon output or what have + * you will create a separate instance. Typically there is only one instance + * which will be the memory laid out below. + */ + +LCL(var_major_version) = 0 /* major version number */ +LCL(var_minor_version) = LCL(var_major_version)+4 /* minor version number */ +LCL(vars_size) = LCL(var_minor_version)+4 /* size of _profile_vars structure */ +LCL(plist_size) = LCL(vars_size)+4 /* size of page_list structure */ +LCL(acontext_size) = LCL(plist_size)+4 /* size of allocation contexts */ +LCL(callback_size) = LCL(acontext_size)+4 /* size of callback structure */ +LCL(type) = LCL(callback_size)+4 /* profile type (gprof, prof) */ +LCL(error_msg) = LCL(type)+4 /* error message for perror */ +LCL(filename) = LCL(error_msg)+4 /* filename to write to */ +LCL(str_ptr) = LCL(filename)+4 /* string table pointer */ +LCL(stream) = LCL(str_ptr)+4 /* stdio stream to write to */ +LCL(diag_stream) = LCL(stream)+4 /* stdio stream to write diagnostics to */ +LCL(fwrite_func) = LCL(diag_stream)+4 /* function like fwrite to output bytes */ +LCL(page_size) = LCL(fwrite_func)+4 /* page size in bytes */ +LCL(str_bytes) = LCL(page_size)+4 /* # bytes in string table */ +LCL(str_total) = LCL(str_bytes)+4 /* # total bytes allocated for string table */ +LCL(clock_ticks) = LCL(str_total)+4 /* # clock ticks per second */ + + /* profil variables */ +LCL(profil_start) = LCL(clock_ticks)+4 /* start of profil variables */ +LCL(lowpc) = LCL(clock_ticks)+4 /* lowest address */ +LCL(highpc) = LCL(lowpc)+4 /* highest address */ +LCL(text_len) = LCL(highpc)+4 /* highpc-lowpc */ +LCL(profil_len) = LCL(text_len)+4 /* size of profil buffer */ +LCL(counter_size) = LCL(profil_len)+4 /* size of indivual counter */ +LCL(scale) = LCL(counter_size)+4 /* scale factor */ +LCL(profil_unused) = LCL(scale)+4 /* unused fields */ +LCL(profil_end) = LCL(profil_unused)+4*8 /* end of profil_info structure */ +LCL(profil_buf) = LCL(profil_end) /* buffer for profil */ + + /* Output selection func ptrs */ +LCL(output_init) = LCL(profil_buf)+4 /* Initialization */ +LCL(output) = LCL(output_init)+4 /* Write out profiling info */ +LCL(output_ptr) = LCL(output)+4 /* Output specific data ptr */ + + /* Memory allocation support */ +LCL(acontext) = LCL(output_ptr)+4 /* pointers to allocation context blocks */ + +LCL(bogus_func) = LCL(acontext)+4*C_max /* function to use if gprof arc is bad */ +LCL(vars_unused) = LCL(bogus_func)+4 /* future growth */ + + /* flags */ +LCL(init) = LCL(vars_unused)+4*63 /* whether initializations were done */ +LCL(active) = LCL(init)+1 /* whether profiling is active */ +LCL(do_profile) = LCL(active)+1 /* whether to do profiling */ +LCL(use_dci) = LCL(do_profile)+1 /* whether to use DCI */ +LCL(use_profil) = LCL(use_dci)+1 /* whether to use profil */ +LCL(recursive_alloc) = LCL(use_profil)+1 /* alloc called recursively */ +LCL(output_uarea) = LCL(recursive_alloc)+1 /* output uarea */ +LCL(output_stats) = LCL(output_uarea)+1 /* output stats info */ +LCL(output_clock) = LCL(output_stats)+1 /* output the clock ticks */ +LCL(multiple_sections) = LCL(output_clock)+1 /* multiple sections are ok */ +LCL(have_bb) = LCL(multiple_sections)+1 /* whether we have basic block data */ +LCL(init_format) = LCL(have_bb)+1 /* The output format has been chosen */ +LCL(debug) = LCL(init_format)+1 /* Whether or not we are debugging */ +LCL(check_funcs) = LCL(debug)+1 /* Whether to check functions for validity */ +LCL(flag_unused) = LCL(check_funcs)+1 /* unused flags */ +LCL(end_of_vars) = LCL(flag_unused)+62 /* size of machine independent vars */ + +/* + * Data that contains profile statistics that can be dumped out + * into the {,g}mon.out file. This is defined in profile-md.h. + */ + +LCL(stats_start) = LCL(end_of_vars) /* start of stats substructure */ +LCL(stats_major_version)= LCL(stats_start) /* major version number */ +LCL(stats_minor_version)= LCL(stats_major_version)+4 /* minor version number */ +LCL(stats_size) = LCL(stats_minor_version)+4 /* size of _profile_stats structure */ +LCL(profil_buckets) = LCL(stats_size)+4 /* # profil buckets */ +LCL(my_cpu) = LCL(profil_buckets)+4 /* identify which cpu/thread this is */ +LCL(max_cpu) = LCL(my_cpu)+4 /* identify which cpu/thread this is */ +LCL(prof_records) = LCL(max_cpu)+4 /* # of profiled functions */ +LCL(gprof_records) = LCL(prof_records)+4 /* # of gprof arcs created */ +LCL(hash_buckets) = LCL(gprof_records)+4 /* max gprof hash buckets on a chain */ +LCL(bogus_count) = LCL(hash_buckets)+4 /* # bogus functions found in gprof */ + +LCL(cnt) = LCL(bogus_count)+4 /* # of _{prof,gprof}_mcount calls */ +LCL(dummy) = LCL(cnt)+8 /* # of _dummy_mcount calls */ +LCL(old_mcount) = LCL(dummy)+8 /* # of old mcount calls */ +LCL(hash_search) = LCL(old_mcount)+8 /* # gprof hash buckets searched */ +LCL(hash_num) = LCL(hash_search)+8 /* # times hash table searched */ +LCL(user_ticks) = LCL(hash_num)+8 /* # ticks within user space */ +LCL(kernel_ticks) = LCL(user_ticks)+8 /* # ticks within kernel space */ +LCL(idle_ticks) = LCL(kernel_ticks)+8 /* # ticks cpu was idle */ +LCL(overflow_ticks) = LCL(idle_ticks)+8 /* # ticks where histcounter overflowed */ +LCL(acontext_locked) = LCL(overflow_ticks)+8 /* # times an acontext was locked */ +LCL(too_low) = LCL(acontext_locked)+8 /* # times histogram tick too low */ +LCL(too_high) = LCL(too_low)+8 /* # times histogram tick too low */ +LCL(prof_overflow) = LCL(too_high)+8 /* # times the prof count field overflowed */ +LCL(gprof_overflow) = LCL(prof_overflow)+8 /* # times the gprof count field overflowed */ +LCL(num_alloc) = LCL(gprof_overflow)+8 /* # allocations in each context */ +LCL(bytes_alloc) = LCL(num_alloc)+4*C_max /* bytes allocated in each context */ +LCL(num_context) = LCL(bytes_alloc)+4*C_max /* # allocation context blocks */ +LCL(wasted) = LCL(num_context)+4*C_max /* # bytes wasted */ +LCL(overhead) = LCL(wasted)+4*C_max /* # bytes of overhead */ +LCL(buckets) = LCL(overhead)+4*C_max /* # hash indexes that have n buckets */ +LCL(cache_hits1) = LCL(buckets)+4*10 /* # gprof cache hits in bucket #1 */ +LCL(cache_hits2) = LCL(cache_hits1)+8 /* # gprof cache hits in bucket #2 */ +LCL(cache_hits3) = LCL(cache_hits2)+8 /* # gprof cache hits in bucket #3 */ +LCL(stats_unused) = LCL(cache_hits3)+8 /* reserved for future use */ +LCL(stats_end) = LCL(stats_unused)+8*64 /* end of stats structure */ + +/* + * Machine dependent variables that no C file should access (except for + * profile-md.c). + */ + +LCL(md_start) = LCL(stats_end) /* start of md structure */ +LCL(md_major_version) = LCL(md_start) /* major version number */ +LCL(md_minor_version) = LCL(md_major_version)+4 /* minor version number */ +LCL(md_size) = LCL(md_minor_version)+4 /* size of _profile_stats structure */ +LCL(hash_ptr) = LCL(md_size)+4 /* gprof hash pointer */ +LCL(hash_size) = LCL(hash_ptr)+4 /* gprof hash size */ +LCL(num_cache) = LCL(hash_size)+4 /* # of cache entries */ +LCL(save_mcount_ptr) = LCL(num_cache)+4 /* save for mcount_ptr when suspending profiling */ +LCL(mcount_ptr_ptr) = LCL(save_mcount_ptr)+4 /* pointer to _mcount_ptr */ +LCL(dummy_ptr) = LCL(mcount_ptr_ptr)+4 /* pointer to gprof_dummy */ +LCL(alloc_pages) = LCL(dummy_ptr)+4 /* allocate more memory */ +LCL(num_buffer) = LCL(alloc_pages)+4 /* buffer to convert 64 bit ints in */ +LCL(md_unused) = LCL(num_buffer)+N_digit /* unused fields */ +LCL(md_end) = LCL(md_unused)+4*58 /* end of md structure */ +LCL(total_size) = LCL(md_end) /* size of entire structure */ + +/* + * Size of the entire _profile_vars structure. + */ + +DATA(_profile_size) + .long LCL(total_size) +ENDDATA(_profile_size) + +/* + * Size of the statistics substructure. + */ + +DATA(_profile_stats_size) + .long LCL(stats_end)-LCL(stats_start) +ENDDATA(_profile_stats_size) + +/* + * Size of the profil info substructure. + */ + +DATA(_profile_profil_size) + .long LCL(profil_end)-LCL(profil_start) +ENDDATA(_profile_profil_size) + +/* + * Size of the machine dependent substructure. + */ + +DATA(_profile_md_size) + .long LCL(md_end)-LCL(md_start) +ENDDATA(_profile_profil_size) + +/* + * Whether statistics are supported. + */ + +DATA(_profile_do_stats) + .long DO_STATS +ENDDATA(_profile_do_stats) + + .text + +/* + * Map LCL(xxx) -> into simpler names + */ + +#define V_acontext LCL(acontext) +#define V_acontext_locked LCL(acontext_locked) +#define V_alloc_pages LCL(alloc_pages) +#define V_bogus_func LCL(bogus_func) +#define V_bytes_alloc LCL(bytes_alloc) +#define V_cache_hits1 LCL(cache_hits1) +#define V_cache_hits2 LCL(cache_hits2) +#define V_cache_hits3 LCL(cache_hits3) +#define V_cnt LCL(cnt) +#define V_cnt_overflow LCL(cnt_overflow) +#define V_check_funcs LCL(check_funcs) +#define V_dummy LCL(dummy) +#define V_dummy_overflow LCL(dummy_overflow) +#define V_dummy_ptr LCL(dummy_ptr) +#define V_gprof_records LCL(gprof_records) +#define V_hash_num LCL(hash_num) +#define V_hash_ptr LCL(hash_ptr) +#define V_hash_search LCL(hash_search) +#define V_mcount_ptr_ptr LCL(mcount_ptr_ptr) +#define V_num_alloc LCL(num_alloc) +#define V_num_buffer LCL(num_buffer) +#define V_num_context LCL(num_context) +#define V_old_mcount LCL(old_mcount) +#define V_old_mcount_overflow LCL(old_mcount_overflow) +#define V_overhead LCL(overhead) +#define V_page_size LCL(page_size) +#define V_prof_records LCL(prof_records) +#define V_recursive_alloc LCL(recursive_alloc) +#define V_wasted LCL(wasted) + +/* + * Loadup %ebx with the address of _profile_vars. On a multiprocessor, this + * will loads up the appropriate machine's _profile_vars structure. + * For ELF shared libraries, rely on the fact that we won't need a GOT, + * except to load this pointer. + */ + +#if defined (MACH_KERNEL) && NCPUS > 1 +#define ASSEMBLER +#if AT386 +#include +#endif + +#if SQT +#include +#endif + +#ifndef CPU_NUMBER +#error "Cannot determine how to get CPU number" +#endif + +#define Vload CPU_NUMBER(%ebx); movl EXT(_profile_vars_cpus)(,%ebx,4),%ebx + +#else /* not kernel or not multiprocessor */ +#define Vload Gload; Egaddr(%ebx,_profile_vars) +#endif + + +/* + * Allocate some memory for profiling. This memory is guaranteed to + * be zero. + * %eax contains the memory size requested and will contain ptr on exit. + * %ebx contains the address of the appropriate profile_vars structure. + * %ecx is the number of the memory pool to allocate from (trashed on exit). + * %edx is trashed. + * %esi is preserved. + * %edi is preserved. + * %ebp is preserved. + */ + +Entry(_profile_alloc_asm) + ENTER + pushl %esi + pushl %edi + + movl %ecx,%edi /* move context number to saved reg */ + +#if NO_RECURSIVE_ALLOC + movb $-1,%cl + xchgb %cl,V_recursive_alloc(%ebx) + cmpb $0,%cl + je LCL(no_recurse) + + int $3 + + .align ALIGN +LCL(no_recurse): +#endif + + leal V_acontext(%ebx,%edi,4),%ecx + + /* Loop looking for a free allocation context. */ + /* %eax = size, %ebx = vars addr, %ecx = ptr to allocation context to try */ + /* %edi = context number */ + + .align ALIGN +LCL(alloc_loop): + movl %ecx,%esi /* save ptr in case no more contexts */ + movl A_next(%ecx),%ecx /* next context block */ + cmpl $0,%ecx + je LCL(alloc_context) /* need to allocate a new context block */ + + movl $-1,%edx + xchgl %edx,A_lock(%ecx) /* %edx == 0 if context available */ + +#if DO_STATS + SDADDNEG(%edx,V_acontext_locked(%ebx)) /* increment counter if lock was held */ +#endif + + cmpl $0,%edx + jne LCL(alloc_loop) /* go back if this context block is not available */ + + /* Allocation context found (%ecx), now allocate. */ + movl A_plist(%ecx),%edx /* pointer to current block */ + cmpl $0,%edx /* first allocation? */ + je LCL(alloc_new) + + cmpl %eax,M_nfree(%edx) /* see if we have enough space */ + jl LCL(alloc_new) /* jump if not enough space */ + + /* Allocate from local block (and common exit) */ + /* %eax = bytes to allocate, %ebx = GOT, %ecx = context, %edx = memory block */ + /* %edi = context number */ + + .align ALIGN +LCL(alloc_ret): + +#if DO_STATS + SLOCK incl V_num_alloc(%ebx,%edi,4) /* update global counters */ + SLOCK addl %eax,V_bytes_alloc(%ebx,%edi,4) + SLOCK subl %eax,V_wasted(%ebx,%edi,4) +#endif + + movl M_ptr(%edx),%esi /* pointer return value */ + subl %eax,M_nfree(%edx) /* decrement bytes remaining */ + addl %eax,M_nalloc(%edx) /* increment bytes allocated */ + incl M_num(%edx) /* increment # allocations */ + addl %eax,M_ptr(%edx) /* advance pointer */ + movl $0,A_lock(%ecx) /* unlock context block */ + movl %esi,%eax /* return pointer */ + +#if NO_RECURSIVE_ALLOC + movb $0,V_recursive_alloc(%ebx) +#endif + + popl %edi + popl %esi + LEAVE0 + ret /* return to the caller */ + + /* Allocate space in whole number of pages */ + /* %eax = bytes to allocate, %ebx = vars address, %ecx = context */ + /* %edi = context number */ + + .align ALIGN +LCL(alloc_new): + pushl %eax /* save regs */ + pushl %ecx + movl V_page_size(%ebx),%edx + addl $(M_size-1),%eax /* add in overhead size & subtract 1 */ + decl %edx /* page_size - 1 */ + addl %edx,%eax /* round up to whole number of pages */ + notl %edx + andl %edx,%eax + leal -M_size(%eax),%esi /* save allocation size */ + pushl %eax /* argument to _profile_alloc_pages */ + call *V_alloc_pages(%ebx) /* allocate some memory */ + addl $4,%esp /* pop off argument */ + +#if DO_STATS + SLOCK addl %esi,V_wasted(%ebx,%edi,4) /* udpate global counters */ + SLOCK addl $M_size,V_overhead(%ebx,%edi,4) +#endif + + popl %ecx /* context block */ + movl %eax,%edx /* memory block pointer */ + movl %esi,M_nfree(%edx) /* # free bytes */ + addl $M_size,%eax /* bump past overhead */ + movl A_plist(%ecx),%esi /* previous memory block or 0 */ + movl %eax,M_first(%edx) /* first space available */ + movl %eax,M_ptr(%edx) /* current address available */ + movl %esi,M_next(%edx) /* next memory block allocated */ + movl %edx,A_plist(%ecx) /* update current page list */ + popl %eax /* user size request */ + jmp LCL(alloc_ret) /* goto common return code */ + + /* Allocate a context header in addition to memory block header + data */ + /* %eax = bytes to allocate, %ebx = GOT, %esi = ptr to store context ptr */ + /* %edi = context number */ + + .align ALIGN +LCL(alloc_context): + pushl %eax /* save regs */ + pushl %esi + movl V_page_size(%ebx),%edx + addl $(A_size+M_size-1),%eax /* add in overhead size & subtract 1 */ + decl %edx /* page_size - 1 */ + addl %edx,%eax /* round up to whole number of pages */ + notl %edx + andl %edx,%eax + leal -A_size-M_size(%eax),%esi /* save allocation size */ + pushl %eax /* argument to _profile_alloc_pages */ + call *V_alloc_pages(%ebx) /* allocate some memory */ + addl $4,%esp /* pop off argument */ + +#if DO_STATS + SLOCK incl V_num_context(%ebx,%edi,4) /* bump # context blocks */ + SLOCK addl %esi,V_wasted(%ebx,%edi,4) /* update global counters */ + SLOCK addl $(A_size+M_size),V_overhead(%ebx,%edi,4) +#endif + + movl %eax,%ecx /* context pointer */ + leal A_size(%eax),%edx /* memory block pointer */ + movl %esi,M_nfree(%edx) /* # free bytes */ + addl $(A_size+M_size),%eax /* bump past overhead */ + movl %eax,M_first(%edx) /* first space available */ + movl %eax,M_ptr(%edx) /* current address available */ + movl $0,M_next(%edx) /* next memory block allocated */ + movl %edx,A_plist(%ecx) /* head of memory block list */ + movl $1,A_lock(%ecx) /* set lock */ + popl %esi /* ptr to store context block link */ + movl %ecx,%eax /* context pointer temp */ + xchgl %eax,A_next(%esi) /* link into chain */ + movl %eax,A_next(%ecx) /* add links in case of threading */ + popl %eax /* user size request */ + jmp LCL(alloc_ret) /* goto common return code */ + +END(_profile_alloc_asm) + +/* + * C callable version of the profile memory allocator. + * extern void *_profile_alloc(struct profile_vars *, size_t, acontext_type_t); +*/ + +Entry(_profile_alloc) + ENTER + pushl %ebx + movl 12+Estack(%esp),%eax /* memory size */ + movl 8+Estack(%esp),%ebx /* provile_vars address */ + addl $3,%eax /* round up to word boundary */ + movl 16+Estack(%esp),%ecx /* which memory pool to allocate from */ + andl $0xfffffffc,%eax + call EXT(_profile_alloc_asm) + popl %ebx + LEAVE0 + ret +END(_profile_alloc) + + +/* + * Dummy mcount routine that just returns. + * + * +-------------------------------+ + * | | + * | | + * | caller's caller stack, | + * | saved registers, params. | + * | | + * | | + * +-------------------------------+ + * | caller's caller return addr. | + * +-------------------------------+ + * esp --> | caller's return address | + * +-------------------------------+ + * + * edx --> function unqiue LCL + */ + +Entry(_dummy_mcount) + ENTER + +#if DO_STATS + pushl %ebx + MP_DISABLE_PREEMPTION(%ebx) + Vload + SDINC(V_dummy(%ebx)) + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx +#endif + + LEAVE0 + ret +END(_dummy_mcount) + + +/* + * Entry point for System V based profiling, count how many times each function + * is called. The function label is passed in %edx, and the top two words on + * the stack are the caller's address, and the caller's return address. + * + * +-------------------------------+ + * | | + * | | + * | caller's caller stack, | + * | saved registers, params. | + * | | + * | | + * +-------------------------------+ + * | caller's caller return addr. | + * +-------------------------------+ + * esp --> | caller's return address | + * +-------------------------------+ + * + * edx --> function unique label + * + * We don't worry about the possibility about two threads calling + * the same function for the first time simulataneously. If that + * happens, two records will be created, and one of the records + * address will be stored in in the function unique label (which + * is aligned by the compiler, so we don't have to watch out for + * crossing page/cache boundaries). + */ + +Entry(_prof_mcount) + ENTER + +#if DO_STATS + pushl %ebx + MP_DISABLE_PREEMPTION(%ebx) + Vload + SDINC(V_cnt(%ebx)) +#endif + + movl (%edx),%eax /* initialized? */ + cmpl $0,%eax + je LCL(pnew) + + DINC2(P_count(%eax),P_overflow(%eax)) /* bump function count (double precision) */ + +#if DO_STATS + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx +#endif + + LEAVE0 + ret + + .align ALIGN +LCL(pnew): + +#if !DO_STATS + pushl %ebx + MP_DISABLE_PREEMPTION(%ebx) + Vload +#endif + + SLOCK incl V_prof_records(%ebx) + pushl %edx + movl $P_size,%eax /* allocation size */ + movl $C_prof,%ecx /* allocation pool */ + call EXT(_profile_alloc_asm) /* allocate a new record */ + popl %edx + + movl Estack+4(%esp),%ecx /* caller's address */ + movl %ecx,P_addr(%eax) + movl $1,P_count(%eax) /* call count */ + xchgl %eax,(%edx) /* update function header */ + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx + LEAVE0 + ret + +END(_prof_mcount) + + +/* + * Entry point for BSD based graph profiling, count how many times each unique + * call graph (caller + callee) is called. The function label is passed in + * %edx, and the top two words on the stack are the caller's address, and the + * caller's return address. + * + * +-------------------------------+ + * | | + * | | + * | caller's caller stack, | + * | saved registers, params. | + * | | + * | | + * +-------------------------------+ + * | caller's caller return addr. | + * +-------------------------------+ + * esp --> | caller's return address | + * +-------------------------------+ + * + * edx --> function unqiue label + * + * We don't worry about the possibility about two threads calling the same + * function simulataneously. If that happens, two records will be created, and + * one of the records address will be stored in in the function unique label + * (which is aligned by the compiler). + * + * By design, the gprof header is not locked. Each of the cache pointers is + * always a valid pointer (possibily to a null record), and if another thread + * comes in and modifies the pointer, it does so automatically with a simple store. + * Since all arcs are in the hash table, the caches are just to avoid doing + * a multiplication in the common case, and if they don't match, the arcs will + * still be found. + */ + +Entry(_gprof_mcount) + + ENTER + movl Estack+4(%esp),%ecx /* caller's caller address */ + +#if DO_STATS + pushl %ebx + MP_DISABLE_PREEMPTION(%ebx) + Vload + SDINC(V_cnt(%ebx)) /* bump profile call counter (double int) */ +#endif + + movl (%edx),%eax /* Gprof header allocated? */ + cmpl $0,%eax + je LCL(gnew) /* skip if first call */ + + DINC2(H_prof+P_count(%eax),H_prof+P_overflow(%eax)) /* bump function count */ + + /* See if this call arc is the same as the last time */ +MARK(_gprof_mcount_cache1) + movl H_cache_ptr(%eax),%edx /* last arc searched */ + cmpl %ecx,G_frompc(%edx) /* skip if not equal */ + jne LCL(gcache2) + + /* Same as last time, increment and return */ + + DINC2(G_count(%edx),G_overflow(%edx)) /* bump arc count */ + +#if DO_STATS + SDINC(V_cache_hits1(%ebx)) /* update counter */ + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx +#endif + + LEAVE0 + ret + + /* Search second cache entry */ + /* %eax = gprof func header, %ebx = vars address if DO_STATS, %ecx = caller's caller */ + /* %edx = first arc searched */ + /* %ebx if DO_STATS pushed on stack */ + + .align ALIGN +MARK(_gprof_mcount_cache2) +LCL(gcache2): + pushl %esi /* get a saved register */ + movl H_cache_ptr+4(%eax),%esi /* 2nd arc to be searched */ + cmpl %ecx,G_frompc(%esi) /* skip if not equal */ + jne LCL(gcache3) + + /* Element found, increment, reset last arc searched and return */ + + DINC2(G_count(%esi),G_overflow(%esi)) /* bump arc count */ + + movl %esi,H_cache_ptr+0(%eax) /* swap 1st and 2nd cached arcs */ + popl %esi + movl %edx,H_cache_ptr+4(%eax) + +#if DO_STATS + SDINC(V_cache_hits2(%ebx)) /* update counter */ + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx +#endif + + LEAVE0 + ret + + /* Search third cache entry */ + /* %eax = gprof func header, %ebx = vars address if DO_STATS, %ecx = caller's caller */ + /* %edx = first arc searched, %esi = second arc searched */ + /* %esi, %ebx if DO_STATS pushed on stack */ + + .align ALIGN +MARK(_gprof_mcount_cache3) +LCL(gcache3): + pushl %edi + movl H_cache_ptr+8(%eax),%edi /* 3rd arc to be searched */ + cmpl %ecx,G_frompc(%edi) /* skip if not equal */ + jne LCL(gnocache) + + /* Element found, increment, reset last arc searched and return */ + + DINC2(G_count(%edi),G_overflow(%edi)) /* bump arc count */ + + movl %edi,H_cache_ptr+0(%eax) /* make this 1st cached arc */ + movl %esi,H_cache_ptr+8(%eax) + movl %edx,H_cache_ptr+4(%eax) + popl %edi + popl %esi + +#if DO_STATS + SDINC(V_cache_hits3(%ebx)) /* update counter */ + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx +#endif + + LEAVE0 + ret + + /* No function context, allocate a new context */ + /* %ebx is the variables address if DO_STATS */ + /* %ecx is the caller's caller's address */ + /* %edx is the unique function pointer */ + /* %ebx if DO_STATS pushed on stack */ + + .align ALIGN +MARK(_gprof_mcount_new) +LCL(gnew): + pushl %esi + pushl %edi + +#if !DO_STATS + pushl %ebx /* Address of vars needed for alloc */ + MP_DISABLE_PREEMPTION(%ebx) + Vload /* stats already loaded address */ +#endif + + SLOCK incl V_prof_records(%ebx) + movl %edx,%esi /* save unique function ptr */ + movl %ecx,%edi /* and caller's caller address */ + movl $H_size,%eax /* memory block size */ + movl $C_gfunc,%ecx /* gprof function header memory pool */ + call EXT(_profile_alloc_asm) + + movl V_hash_ptr(%ebx),%ecx /* copy hash_ptr to func header */ + movl V_dummy_ptr(%ebx),%edx /* dummy cache entry */ + movl %ecx,H_hash_ptr(%eax) + movl %edx,H_cache_ptr+0(%eax) /* store dummy cache ptrs */ + movl %edx,H_cache_ptr+4(%eax) + movl %edx,H_cache_ptr+8(%eax) + movl %esi,H_unique_ptr(%eax) /* remember function unique ptr */ + movl Estack+12(%esp),%ecx /* caller's address */ + movl $1,H_prof+P_count(%eax) /* function called once so far */ + movl %ecx,H_prof+P_addr(%eax) /* set up prof information */ + movl %eax,(%esi) /* update context block address */ + movl %edi,%ecx /* caller's caller address */ + movl %edx,%esi /* 2nd cached arc */ + +#if !DO_STATS + popl %ebx +#endif + + /* Fall through to add element to the hash table. This may involve */ + /* searching a few hash table elements that don't need to be searched */ + /* since we have a new element, but it allows the hash table function */ + /* to be specified in only one place */ + + /* Didn't find entry in cache, search the global hash table */ + /* %eax = gprof func header, %ebx = vars address if DO_STATS */ + /* %ecx = caller's caller */ + /* %edx, %esi = cached arcs that were searched */ + /* %edi, %esi, %ebx if DO_STATS pushed on stack */ + + .align ALIGN +MARK(_gprof_mcount_hash) +LCL(gnocache): + + pushl %esi /* save 2nd arc searched */ + pushl %edx /* save 1st arc searched */ + movl %eax,%esi /* save gprof func header */ + +#if DO_STATS + SDINC(V_hash_num(%ebx)) + movl Estack+20(%esp),%edi /* caller's address */ +#else + movl Estack+16(%esp),%edi /* caller's address */ +#endif + movl %ecx,%eax /* caller's caller address */ + imull %edi,%eax /* multiply to get hash */ + movl H_hash_ptr(%esi),%edx /* hash pointer */ + shrl $GPROF_HASH_SHIFT,%eax /* eliminate low order bits */ + andl $GPROF_HASH_MASK,%eax /* mask to get hash value */ + leal 0(%edx,%eax,4),%eax /* pointer to hash bucket */ + movl %eax,%edx /* save hash bucket address */ + + /* %eax = old arc, %ebx = vars address if DO_STATS, %ecx = caller's caller */ + /* %edx = hash bucket address, %esi = gfunc ptr, %edi = caller's addr */ + /* 2 old arcs, %edi, %esi, %ebx if DO_STATS pushed on stack */ + + .align ALIGN +LCL(ghash): + movl G_next(%eax),%eax /* get next hash element */ + cmpl $0,%eax /* end of line? */ + je LCL(ghashnew) /* skip if allocate new hash */ + +#if DO_STATS + SDINC(V_hash_search(%ebx)) +#endif + + cmpl G_selfpc(%eax),%edi /* loop back if not one we want */ + jne LCL(ghash) + + cmpl G_frompc(%eax),%ecx /* loop back if not one we want */ + jne LCL(ghash) + + /* Found an entry, increment count, set up for caching, and return */ + /* %eax = arc, %ebx = vars address if DO_STATS, %esi = func header */ + /* 2 old arcs, %edi, %esi, %ebx if DO_STATS pushed on stack */ + + DINC2(G_count(%eax),G_overflow(%eax)) /* bump arc count */ + + popl %ecx /* previous 1st arc searched */ + movl %eax,H_cache_ptr+0(%esi) /* this element is now 1st arc */ + popl %edi /* previous 2nd arc searched */ + movl %ecx,H_cache_ptr+4(%esi) /* new 2nd arc to be searched */ + movl %edi,H_cache_ptr+8(%esi) /* new 3rd arc to be searched */ + popl %edi + popl %esi + +#if DO_STATS + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx +#endif + + LEAVE0 + ret /* return to user */ + + /* Allocate new arc */ + /* %eax = old arc, %ebx = vars address if DO_STATS, %ecx = caller's caller */ + /* %edx = hash bucket address, %esi = gfunc ptr, %edi = caller's addr */ + /* 2 old arcs, %edi, %esi, %ebx if DO_STATS pushed on stack */ + + .align ALIGN +MARK(_gprof_mcount_hashnew) +LCL(ghashnew): + +#if !DO_STATS + pushl %ebx /* load address of vars if we haven't */ + MP_DISABLE_PREEMPTION(%ebx) + Vload /* already done so */ +#endif + + SLOCK incl V_gprof_records(%ebx) + pushl %edx + movl %ecx,%edi /* save caller's caller */ + movl $G_size,%eax /* arc size */ + movl $C_gprof,%ecx /* gprof memory pool */ + call EXT(_profile_alloc_asm) + popl %edx + + movl $1,G_count(%eax) /* set call count */ + movl Estack+20(%esp),%ecx /* caller's address */ + movl %edi,G_frompc(%eax) /* caller's caller */ + movl %ecx,G_selfpc(%eax) + +#if !DO_STATS + popl %ebx /* release %ebx if no stats */ +#endif + + movl (%edx),%ecx /* first hash bucket */ + movl %ecx,G_next(%eax) /* update link */ + movl %eax,%ecx /* copy for xchgl */ + xchgl %ecx,(%edx) /* add to hash linked list */ + movl %ecx,G_next(%eax) /* update in case list changed */ + + popl %ecx /* previous 1st arc searched */ + popl %edi /* previous 2nd arc searched */ + movl %eax,H_cache_ptr+0(%esi) /* this element is now 1st arc */ + movl %ecx,H_cache_ptr+4(%esi) /* new 2nd arc to be searched */ + movl %edi,H_cache_ptr+8(%esi) /* new 3rd arc to be searched */ + + popl %edi + popl %esi + +#if DO_STATS + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx +#endif + + LEAVE0 + ret /* return to user */ + +END(_gprof_mcount) + + +/* + * This function assumes that neither the caller or it's caller + * has not omitted the frame pointer in order to get the caller's + * caller. The stack looks like the following at the time of the call: + * + * +-------------------------------+ + * | | + * | | + * | caller's caller stack, | + * | saved registers, params. | + * | | + * | | + * +-------------------------------+ + * | caller's caller return addr. | + * +-------------------------------+ + * fp --> | previous frame pointer | + * +-------------------------------+ + * | | + * | caller's stack, saved regs, | + * | params. | + * | | + * +-------------------------------+ + * sp --> | caller's return address | + * +-------------------------------+ + * + * Recent versions of the compiler put the address of the pointer + * sized word in %edx. Previous versions did not, but this code + * does not support them. + */ + +/* + * Note that OSF/rose blew defining _mcount, since it prepends leading + * underscores, and _mcount didn't have a second leading underscore. However, + * some of the kernel/server functions 'know' that mcount has a leading + * underscore, so we satisfy both camps. + */ + +#if OLD_MCOUNT + .globl mcount + .globl _mcount + ELF_FUNC(mcount) + ELF_FUNC(_mcount) + .align FALIGN +_mcount: +mcount: + + pushl %ebx + MP_DISABLE_PREEMPTION(%ebx) + Vload + +#if DO_STATS + SDINC(V_old_mcount(%ebx)) +#endif + + /* In calling the functions, we will actually leave 1 extra word on the */ + /* top of the stack, but generated code will not notice, since the function */ + /* uses a frame pointer */ + + movl V_mcount_ptr_ptr(%ebx),%ecx /* address of mcount_ptr */ + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx + movl 4(%ebp),%eax /* caller's caller return address */ + xchgl %eax,(%esp) /* push & get return address */ + pushl %eax /* push return address */ + jmp *(%ecx) /* go to profile the function */ + +End(mcount) +End(_mcount) +#endif + + +#if !defined(KERNEL) && !defined(MACH_KERNEL) + +/* + * Convert a 64-bit integer to a string. + * Arg #1 is a pointer to a string (at least 24 bytes) or NULL + * Arg #2 is the low part of the 64-bit integer. + * Arg #3 is the high part of the 64-bit integer. + */ + +Entry(_profile_cnt_to_decimal) + ENTER + pushl %ebx + pushl %esi + pushl %edi + movl Estack+16(%esp),%ebx /* pointer or null */ + movl Estack+20(%esp),%edi /* low part of number */ + movl $10,%ecx /* divisor */ + cmpl $0,%ebx /* skip if pointer ok */ + jne LCL(cvt_nonnull) + + MP_DISABLE_PREEMPTION(%ebx) + Vload /* get _profile_vars address */ + leal V_num_buffer(%ebx),%ebx /* temp buffer to use */ + + .align ALIGN +LCL(cvt_nonnull): + addl $(N_digit-1),%ebx /* point string at end */ + movb $0,0(%ebx) /* null terminate string */ + +#if OVERFLOW + movl Estack+24(%esp),%esi /* high part of number */ + cmpl $0,%esi /* any thing left in high part? */ + je LCL(cvt_low) + + .align ALIGN +LCL(cvt_high): + movl %esi,%eax /* calculate high/10 & high%10 */ + xorl %edx,%edx + divl %ecx + movl %eax,%esi + + movl %edi,%eax /* calculate (low + (high%10)*2^32) / 10 */ + divl %ecx + movl %eax,%edi + + decl %ebx /* decrement string pointer */ + addl $48,%edx /* convert from 0..9 -> '0'..'9' */ + movb %dl,0(%ebx) /* store digit in string */ + cmpl $0,%esi /* any thing left in high part? */ + jne LCL(cvt_high) + +#endif /* OVERFLOW */ + + .align ALIGN +LCL(cvt_low): + movl %edi,%eax /* get low part into %eax */ + + .align ALIGN +LCL(cvt_low2): + xorl %edx,%edx /* 0 */ + divl %ecx /* calculate next digit */ + decl %ebx /* decrement string pointer */ + addl $48,%edx /* convert from 0..9 -> '0'..'9' */ + movb %dl,0(%ebx) /* store digit in string */ + cmpl $0,%eax /* any more digits to convert? */ + jne LCL(cvt_low2) + + movl %ebx,%eax /* return value */ + popl %edi + popl %esi + MP_ENABLE_PREEMPTION(%ebx) + popl %ebx + LEAVE0 + ret + +END(_profile_cnt_to_decimal) + +#endif diff --git a/osfmk/profiling/i386/profile-md.c b/osfmk/profiling/i386/profile-md.c new file mode 100644 index 000000000..80016e279 --- /dev/null +++ b/osfmk/profiling/i386/profile-md.c @@ -0,0 +1,1192 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/01/06 19:53:45 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:25:24 dwm] + * + * Revision 1.1.2.2 1994/05/16 19:19:22 meissner + * Protect against hash_ptr being null in _profile_update_stats. + * [1994/05/16 17:23:53 meissner] + * + * Remove _profile_cnt_to_hex, _profile_strbuffer. + * _profile_print_stats now takes const pointers. + * Use the new 64-bit arithmetic support instead of converting to double. + * Add _profile_merge_stats to merge statistics. + * [1994/04/28 21:45:04 meissner] + * + * If MACH_ASSERT is on in server or kernel, turn on profiling printfs. + * Print out fractional digits for average # of hash searches in stats. + * Update overflow_ticks for # times the lprofil counter overflows into high word. + * Don't make sizes of C/asm structures a const array, since it has pointers in it. + * Add support for converting 64 bit ints to a string. + * Use PROF_CNT_TO_DECIMAL where possible instead of PROF_CNT_TO_LDOUBLE. + * [1994/04/20 15:47:02 meissner] + * + * Revision 1.1.2.1 1994/04/08 17:51:51 meissner + * no change + * [1994/04/08 02:11:40 meissner] + * + * Make most stats 64 bits, except for things like memory allocation. + * [1994/04/02 14:58:28 meissner] + * + * Add some printfs under #idef DEBUG_PROFILE. + * [1994/03/29 21:00:11 meissner] + * + * Further changes for gprof/prof overflow support. + * Add overflow support for {gprof,prof,old,dummy}_mcount counters. + * [1994/03/17 20:13:31 meissner] + * + * Add gprof/prof overflow support + * [1994/03/17 14:56:51 meissner] + * + * Use memset instead of bzero. + * [1994/02/28 23:56:10 meissner] + * + * Add size of histogram counters & unused fields to profile_profil struct + * [1994/02/17 21:41:50 meissner] + * + * Allocate slop space for server in addition to microkernel. + * Add 3rd argument to _profile_print_stats for profil info. + * Print # histogram ticks too low/too high for server/mk. + * [1994/02/16 22:38:18 meissner] + * + * Calculate percentages for # of hash buckets. + * [1994/02/11 16:52:04 meissner] + * + * Print stats as an unsigned number. + * [1994/02/07 18:47:05 meissner] + * + * For kernel and server, include not . + * Always do assert on comparing asm vs. C structure sizes. + * Add _profile_reset to reset profiling information. + * Add _profile_update_stats to update the statistics. + * Move _gprof_write code that updates hash stats to _profile_update_stats. + * Don't allocate space for basic block support just yet. + * Add support for range checking the gprof arc {from,self}pc addresses. + * _profile_debug now calls _profile_update_stats. + * Print how many times the acontext was locked. + * If DEBUG_PROFILE is defined, set pv->debug to 1. + * Expand copyright. + * [1994/02/07 12:41:03 meissner] + * + * Keep track of the number of times the kernel overflows the HISTCOUNTER counter. + * [1994/02/03 20:13:28 meissner] + * + * Add stats for {user,kernel,idle} mode in the kernel. + * [1994/02/03 15:17:31 meissner] + * + * Print unused stats in hex as well as decimal. + * [1994/02/03 14:52:20 meissner] + * + * _profile_print_stats no longer takes profile_{vars,md} pointer arguments. + * If stream is NULL, _profile_print_stats will use stdout. + * Separate _profile_update_stats from _gprof_write. + * [1994/02/03 00:58:55 meissner] + * + * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. + * [1994/02/01 12:04:01 meissner] + * + * Add allocation flag to _profile_md_init. + * Fix core dumps in _profile_print_stats if no profile_vars ptr passed. + * Print numbers in 12 columns, not 8. + * Print my_cpu/max_cpu if max_cpu != 0. + * Make allocations print like other stats. + * Use ACONTEXT_FIRST to start loop on, not ACONTEXT_PROF. + * [1994/01/28 23:33:26 meissner] + * + * Move callback pointers into separate allocation context. + * Add size fields for other structures to profile-vars. + * [1994/01/26 20:23:37 meissner] + * + * Allocate initial memory at startup. + * Print structure sizes and version number when printing stats. + * Initialize size fields and version numbers. + * Allocation context pointers moved to _profile_vars. + * [1994/01/25 01:46:04 meissner] + * + * Move init code here from assembly language. + * [1994/01/22 01:13:21 meissner] + * + * Include instead of "profile-md.h". + * [1994/01/20 20:56:49 meissner] + * + * Fixup copyright. + * [1994/01/18 23:08:02 meissner] + * + * Rename profile.h -> profile-md.h. + * [1994/01/18 19:44:57 meissner] + * + * Write out stats unused fields. + * Make _prof_write write out the prof stats gprof collects. + * [1994/01/15 18:40:37 meissner] + * + * Remove debug code called from profile-asm.s. + * Always print out the # of profil buckets. + * [1994/01/15 00:59:06 meissner] + * + * Fix typo. + * [1994/01/04 16:34:46 meissner] + * + * Move max hash bucket calculation into _gprof_write & put info in stats structure. + * [1994/01/04 16:15:17 meissner] + * + * Use _profile_printf to write diagnostics; add diag_stream to hold stream to write to. + * [1994/01/04 15:37:46 meissner] + * + * Correctly handle case where more than one allocation context was + * allocated due to multiple threads. + * Cast stats to long for output. + * Print number of profil buckets field in _profile_stats. + * Add support for GFUNC allocation context. + * [1994/01/04 14:26:00 meissner] + * + * CR 10198 - Initial version. + * [1994/01/01 22:44:10 meissne + * + * $EndLog$ + */ + +#include +#include +#include + +#if defined(MACH_KERNEL) || defined(_KERNEL) + +#include +#if MACH_ASSERT && !defined(DEBUG_PROFILE) +#define DEBUG_PROFILE 1 +#endif + +extern int printf(const char *, ...); +extern void panic(const char *); +#else +#include +#define panic(str) exit(1) +#endif + +#ifndef PROFILE_NUM_FUNCS +#define PROFILE_NUM_FUNCS 2000 +#endif + +#ifndef PROFILE_NUM_ARCS +#define PROFILE_NUM_ARCS 8000 +#endif + +/* + * Information passed on from profile-asm.s + */ + +extern int _profile_do_stats; +extern size_t _profile_size; +extern size_t _profile_stats_size; +extern size_t _profile_md_size; +extern size_t _profile_profil_size; +extern size_t _profile_hash_size; + +/* + * All profiling variables, and a dummy gprof record. + */ + +struct profile_vars _profile_vars = { 0 }; +struct hasharc _gprof_dummy = { 0 }; + +/* + * Forward references. + */ + +static void *_profile_md_acontext(struct profile_vars *pv, + void *ptr, + size_t len, + acontext_type_t type); + +static void _profile_reset_alloc(struct profile_vars *, + acontext_type_t); + +extern void _bogus_function(void); + +/* + * Function to set up the initial allocation for a context block. + */ + +static void * +_profile_md_acontext(struct profile_vars *pv, + void *ptr, + size_t len, + acontext_type_t type) +{ + struct memory { + struct alloc_context context; + struct page_list plist; + int data[1]; + }; + + struct memory *mptr = (struct memory *)ptr; + struct alloc_context *context = &mptr->context; + struct page_list *plist = &mptr->plist; + +#ifdef DEBUG_PROFILE + _profile_printf("_profile_md_acontext: pv= 0x%lx, ptr= 0x%lx, len= %6ld, type= %d\n", + (long)pv, + (long)ptr, + (long)len, + (int)type); +#endif + + /* Fill in context block header */ + context->next = pv->acontext[type]; + context->plist = plist; + context->lock = 0; + + /* Fill in first page list information */ + plist->ptr = plist->first = (void *)&mptr->data[0]; + plist->next = (struct page_list *)0; + plist->bytes_free = len - ((char *)plist->ptr - (char *)ptr); + plist->bytes_allocated = 0; + plist->num_allocations = 0; + + /* Update statistics */ + pv->stats.num_context[type]++; + pv->stats.wasted[type] += plist->bytes_free; + pv->stats.overhead[type] += len - plist->bytes_free; + + /* And setup context block */ + pv->acontext[type] = context; + + return (void *)((char *)ptr+len); +} + + +/* + * Machine dependent function to initialize things. + */ + +void +_profile_md_init(struct profile_vars *pv, + profile_type_t type, + profile_alloc_mem_t alloc_mem) +{ + size_t page_size = pv->page_size; + size_t arc_size; + size_t func_size; + size_t misc_size; + size_t hash_size; + size_t extra_arc_size; + size_t extra_func_size; + size_t callback_size = page_size; + void *ptr; + acontext_type_t ac; + int i; + static struct { + size_t c_size; /* size C thinks structure is */ + size_t *asm_size_ptr; /* pointer to size asm thinks struct is */ + const char *name; /* structure name */ + } sizes[] = { + { sizeof(struct profile_profil), &_profile_profil_size, "profile_profil" }, + { sizeof(struct profile_stats), &_profile_stats_size, "profile_stats" }, + { sizeof(struct profile_md), &_profile_md_size, "profile_md" }, + { sizeof(struct profile_vars), &_profile_size, "profile_vars" }}; + +#ifdef DEBUG_PROFILE + _profile_printf("_profile_md_init: pv = 0x%lx, type = %d, alloc = %d\n", + (long) pv, + (int)type, + (int)alloc_mem); +#endif + + for (i = 0; i < sizeof (sizes) / sizeof(sizes[0]); i++) { + if (sizes[i].c_size != *sizes[i].asm_size_ptr) { + _profile_printf("C thinks struct %s is %ld bytes, asm thinks it is %ld bytes\n", + sizes[i].name, + (long)sizes[i].c_size, + (long)*sizes[i].asm_size_ptr); + + panic(sizes[i].name); + } + } + + /* Figure out which function will handle compiler generated profiling */ + if (type == PROFILE_GPROF) { + pv->md.save_mcount_ptr = _gprof_mcount; + + } else if (type == PROFILE_PROF) { + pv->md.save_mcount_ptr = _prof_mcount; + + } else { + pv->md.save_mcount_ptr = _dummy_mcount; + } + + pv->vars_size = sizeof(struct profile_vars); + pv->plist_size = sizeof(struct page_list); + pv->acontext_size = sizeof(struct alloc_context); + pv->callback_size = sizeof(struct callback); + pv->major_version = PROFILE_MAJOR_VERSION; + pv->minor_version = PROFILE_MINOR_VERSION; + pv->type = type; + pv->do_profile = 1; + pv->use_dci = 1; + pv->use_profil = 1; + pv->output_uarea = 1; + pv->output_stats = (prof_flag_t) _profile_do_stats; + pv->output_clock = 1; + pv->multiple_sections = 1; + pv->init_format = 0; + pv->bogus_func = _bogus_function; + +#ifdef DEBUG_PROFILE + pv->debug = 1; +#endif + + if (!pv->error_msg) { + pv->error_msg = "error in profiling"; + } + + if (!pv->page_size) { + pv->page_size = 4096; + } + + pv->stats.stats_size = sizeof(struct profile_stats); + pv->stats.major_version = PROFILE_MAJOR_VERSION; + pv->stats.minor_version = PROFILE_MINOR_VERSION; + + pv->md.md_size = sizeof(struct profile_md); + pv->md.major_version = PROFILE_MAJOR_VERSION; + pv->md.minor_version = PROFILE_MINOR_VERSION; + pv->md.hash_size = _profile_hash_size; + pv->md.num_cache = MAX_CACHE; + pv->md.mcount_ptr_ptr = &_mcount_ptr; + pv->md.dummy_ptr = &_gprof_dummy; + pv->md.alloc_pages = _profile_alloc_pages; + + /* zero out all allocation context blocks */ + for (ac = ACONTEXT_FIRST; ac < ACONTEXT_MAX; ac++) { + pv->acontext[ac] = (struct alloc_context *)0; + } + + /* Don't allocate memory if not desired */ + if (!alloc_mem) { + return; + } + + /* Allocate some space for the initial allocations */ + switch (type) { + default: + misc_size = page_size; + ptr = _profile_alloc_pages(misc_size + callback_size); + ptr = _profile_md_acontext(pv, ptr, misc_size, ACONTEXT_MISC); + ptr = _profile_md_acontext(pv, ptr, callback_size, ACONTEXT_CALLBACK); + break; + + case PROFILE_GPROF: + +#if defined(MACH_KERNEL) || defined(_KERNEL) + /* + * For the MK & server allocate some slop space now for the + * secondary context blocks in case allocations are done at + * interrupt level when another allocation is being done. This + * is done before the main allocation blocks and will be pushed + * so that it will only be used when the main allocation block + * is locked. + */ + extra_arc_size = 4*page_size; + extra_func_size = 2*page_size; +#else + extra_arc_size = extra_func_size = 0; +#endif + + /* Set up allocation areas */ + arc_size = ROUNDUP(PROFILE_NUM_ARCS * sizeof(struct hasharc), page_size); + func_size = ROUNDUP(PROFILE_NUM_FUNCS * sizeof(struct gfuncs), page_size); + hash_size = _profile_hash_size * sizeof (struct hasharc *); + misc_size = ROUNDUP(hash_size + page_size, page_size); + + ptr = _profile_alloc_pages(arc_size + + func_size + + misc_size + + callback_size + + extra_arc_size + + extra_func_size); + +#if defined(MACH_KERNEL) || defined(_KERNEL) + ptr = _profile_md_acontext(pv, ptr, extra_arc_size, ACONTEXT_GPROF); + ptr = _profile_md_acontext(pv, ptr, extra_func_size, ACONTEXT_GFUNC); +#endif + ptr = _profile_md_acontext(pv, ptr, arc_size, ACONTEXT_GPROF); + ptr = _profile_md_acontext(pv, ptr, func_size, ACONTEXT_GFUNC); + ptr = _profile_md_acontext(pv, ptr, misc_size, ACONTEXT_MISC); + ptr = _profile_md_acontext(pv, ptr, callback_size, ACONTEXT_CALLBACK); + + /* Allocate hash table */ + pv->md.hash_ptr = (struct hasharc **) _profile_alloc(pv, hash_size, ACONTEXT_MISC); + break; + + case PROFILE_PROF: + /* Set up allocation areas */ + func_size = ROUNDUP(PROFILE_NUM_FUNCS * sizeof(struct prof_ext), page_size); + misc_size = page_size; + + ptr = _profile_alloc_pages(func_size + + misc_size + + callback_size); + + ptr = _profile_md_acontext(pv, ptr, func_size, ACONTEXT_PROF); + ptr = _profile_md_acontext(pv, ptr, misc_size, ACONTEXT_MISC); + ptr = _profile_md_acontext(pv, ptr, callback_size, ACONTEXT_CALLBACK); + break; + } +} + + +/* + * Machine dependent functions to start and stop profiling. + */ + +int +_profile_md_start(void) +{ + _mcount_ptr = _profile_vars.md.save_mcount_ptr; + return 0; +} + +int +_profile_md_stop(void) +{ + _mcount_ptr = _dummy_mcount; + return 0; +} + + +/* + * Free up all memory in a memory context block. + */ + +static void +_profile_reset_alloc(struct profile_vars *pv, acontext_type_t ac) +{ + struct alloc_context *aptr; + struct page_list *plist; + + for (aptr = pv->acontext[ac]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; + plist != (struct page_list *)0; + plist = plist->next) { + + plist->ptr = plist->first; + plist->bytes_free += plist->bytes_allocated; + plist->bytes_allocated = 0; + plist->num_allocations = 0; + memset(plist->first, '\0', plist->bytes_allocated); + } + } +} + + +/* + * Reset profiling. Since the only user of this function is the kernel + * and the server, we don't have to worry about other stuff than gprof. + */ + +void +_profile_reset(struct profile_vars *pv) +{ + struct alloc_context *aptr; + struct page_list *plist; + struct gfuncs *gfunc; + + if (pv->active) { + _profile_md_stop(); + } + + /* Reset all function unique pointers back to 0 */ + for (aptr = pv->acontext[ACONTEXT_GFUNC]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; + plist != (struct page_list *)0; + plist = plist->next) { + + for (gfunc = (struct gfuncs *)plist->first; + gfunc < (struct gfuncs *)plist->ptr; + gfunc++) { + + *(gfunc->unique_ptr) = (struct hasharc *)0; + } + } + } + + /* Release memory */ + _profile_reset_alloc(pv, ACONTEXT_GPROF); + _profile_reset_alloc(pv, ACONTEXT_GFUNC); + _profile_reset_alloc(pv, ACONTEXT_PROF); + + memset((void *)pv->profil_buf, '\0', pv->profil_info.profil_len); + memset((void *)pv->md.hash_ptr, '\0', pv->md.hash_size * sizeof(struct hasharc *)); + memset((void *)&pv->stats, '\0', sizeof(pv->stats)); + + pv->stats.stats_size = sizeof(struct profile_stats); + pv->stats.major_version = PROFILE_MAJOR_VERSION; + pv->stats.minor_version = PROFILE_MINOR_VERSION; + + if (pv->active) { + _profile_md_start(); + } +} + + +/* + * Machine dependent function to write out gprof records. + */ + +size_t +_gprof_write(struct profile_vars *pv, struct callback *callback_ptr) +{ + struct alloc_context *aptr; + struct page_list *plist; + size_t bytes = 0; + struct hasharc *hptr; + int i; + + for (aptr = pv->acontext[ACONTEXT_GPROF]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; plist != (struct page_list *)0; plist = plist->next) { + hptr = (struct hasharc *)plist->first; + for (i = 0; i < plist->num_allocations; (i++, hptr++)) { + + struct gprof_arc arc = hptr->arc; + int nrecs = 1 + (hptr->overflow * 2); + int j; + + if (pv->check_funcs) { + if (arc.frompc < pv->profil_info.lowpc || + arc.frompc > pv->profil_info.highpc) { + + arc.frompc = (prof_uptrint_t)pv->bogus_func; + } + + if (arc.selfpc < pv->profil_info.lowpc || + arc.selfpc > pv->profil_info.highpc) { + + arc.selfpc = (prof_uptrint_t)pv->bogus_func; + } + } + + /* For each overflow, emit 2 extra records with the count + set to 0x80000000 */ + for (j = 0; j < nrecs; j++) { + bytes += sizeof (arc); + if ((*pv->fwrite_func)((void *)&arc, + sizeof(arc), + 1, + pv->stream) != 1) { + + _profile_error(pv); + } + + arc.count = 0x80000000; + } + } + } + } + + return bytes; +} + + +/* + * Machine dependent function to write out prof records. + */ + +size_t +_prof_write(struct profile_vars *pv, struct callback *callback_ptr) +{ + struct alloc_context *aptr; + struct page_list *plist; + size_t bytes = 0; + struct prof_ext prof_st; + struct prof_int *pptr; + struct gfuncs *gptr; + int nrecs; + int i, j; + + /* Write out information prof_mcount collects */ + for (aptr = pv->acontext[ACONTEXT_PROF]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; plist != (struct page_list *)0; plist = plist->next) { + pptr = (struct prof_int *)plist->first; + + for (i = 0; i < plist->num_allocations; (i++, pptr++)) { + + /* Write out 2 records for each overflow, each with a + count of 0x80000000 + the normal record */ + prof_st = pptr->prof; + nrecs = 1 + (pptr->overflow * 2); + + for (j = 0; j < nrecs; j++) { + bytes += sizeof (struct prof_ext); + if ((*pv->fwrite_func)((void *)&prof_st, + sizeof(prof_st), + 1, + pv->stream) != 1) { + + _profile_error(pv); + } + + prof_st.cncall = 0x80000000; + } + } + } + } + + /* Now write out the prof information that gprof collects */ + for (aptr = pv->acontext[ACONTEXT_GFUNC]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; plist != (struct page_list *)0; plist = plist->next) { + gptr = (struct gfuncs *)plist->first; + + for (i = 0; i < plist->num_allocations; (i++, gptr++)) { + + /* Write out 2 records for each overflow, each with a + count of 0x80000000 + the normal record */ + prof_st = gptr->prof.prof; + nrecs = 1 + (gptr->prof.overflow * 2); + + for (j = 0; j < nrecs; j++) { + bytes += sizeof (struct prof_ext); + if ((*pv->fwrite_func)((void *)&prof_st, + sizeof(prof_st), + 1, + pv->stream) != 1) { + + _profile_error(pv); + } + + prof_st.cncall = 0x80000000; + } + } + } + } + + return bytes; +} + + +/* + * Update any statistics. For the 386, calculate the hash table loading factor. + * Also figure out how many overflows occured. + */ + +void +_profile_update_stats(struct profile_vars *pv) +{ + struct alloc_context *aptr; + struct page_list *plist; + struct hasharc *hptr; + struct prof_int *pptr; + struct gfuncs *fptr; + LHISTCOUNTER *lptr; + int i; + + for(i = 0; i < MAX_BUCKETS+1; i++) { + pv->stats.buckets[i] = 0; + } + + pv->stats.hash_buckets = 0; + + if (pv->md.hash_ptr) { + for (i = 0; i < pv->md.hash_size; i++) { + long nbuckets = 0; + struct hasharc *hptr; + + for (hptr = pv->md.hash_ptr[i]; hptr; hptr = hptr->next) { + nbuckets++; + } + + pv->stats.buckets[ (nbuckets < MAX_BUCKETS) ? nbuckets : MAX_BUCKETS ]++; + if (pv->stats.hash_buckets < nbuckets) { + pv->stats.hash_buckets = nbuckets; + } + } + } + + /* Count how many times functions are out of bounds */ + if (pv->check_funcs) { + pv->stats.bogus_count = 0; + + for (aptr = pv->acontext[ACONTEXT_GPROF]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; + plist != (struct page_list *)0; + plist = plist->next) { + + hptr = (struct hasharc *)plist->first; + for (i = 0; i < plist->num_allocations; (i++, hptr++)) { + + if (hptr->arc.frompc < pv->profil_info.lowpc || + hptr->arc.frompc > pv->profil_info.highpc) { + pv->stats.bogus_count++; + } + + if (hptr->arc.selfpc < pv->profil_info.lowpc || + hptr->arc.selfpc > pv->profil_info.highpc) { + pv->stats.bogus_count++; + } + } + } + } + } + + /* Figure out how many overflows occurred */ + PROF_ULONG_TO_CNT(pv->stats.prof_overflow, 0); + PROF_ULONG_TO_CNT(pv->stats.gprof_overflow, 0); + + for (aptr = pv->acontext[ACONTEXT_GPROF]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; + plist != (struct page_list *)0; + plist = plist->next) { + + hptr = (struct hasharc *)plist->first; + for (i = 0; i < plist->num_allocations; (i++, hptr++)) { + PROF_CNT_ADD(pv->stats.gprof_overflow, hptr->overflow); + } + } + } + + for (aptr = pv->acontext[ACONTEXT_PROF]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; + plist != (struct page_list *)0; + plist = plist->next) { + + pptr = (struct prof_int *)plist->first; + for (i = 0; i < plist->num_allocations; (i++, pptr++)) { + PROF_CNT_ADD(pv->stats.prof_overflow, pptr->overflow); + } + } + } + + for (aptr = pv->acontext[ACONTEXT_GFUNC]; + aptr != (struct alloc_context *)0; + aptr = aptr->next) { + + for (plist = aptr->plist; + plist != (struct page_list *)0; + plist = plist->next) { + + fptr = (struct gfuncs *)plist->first; + for (i = 0; i < plist->num_allocations; (i++, fptr++)) { + PROF_CNT_ADD(pv->stats.prof_overflow, fptr->prof.overflow); + } + } + } + + /* Now go through & count how many times the LHISTCOUNTER overflowed into a 2nd word */ + lptr = (LHISTCOUNTER *)pv->profil_buf; + + if (pv->use_profil && + pv->profil_info.counter_size == sizeof(LHISTCOUNTER) && + lptr != (LHISTCOUNTER *)0) { + + PROF_ULONG_TO_CNT(pv->stats.overflow_ticks, 0); + for (i = 0; i < pv->stats.profil_buckets; i++) { + PROF_CNT_ADD(pv->stats.overflow_ticks, lptr[i].high); + } + } +} + +#if !defined(_KERNEL) && !defined(MACH_KERNEL) + +/* + * Routine callable from the debugger that prints the statistics. + */ + +int _profile_debug(void) +{ + _profile_update_stats(&_profile_vars); + _profile_print_stats(stderr, &_profile_vars.stats, &_profile_vars.profil_info); + return 0; +} + +/* + * Print the statistics structure in a meaningful way. + */ + +void _profile_print_stats(FILE *stream, + const struct profile_stats *stats, + const struct profile_profil *pinfo) +{ + int i; + prof_cnt_t total_hits; + acontext_type_t ac; + int width_cname = 0; + int width_alloc = 0; + int width_wasted = 0; + int width_overhead = 0; + int width_context = 0; + static const char *cname[ACONTEXT_MAX] = ACONTEXT_NAMES; + char buf[20]; + + if (!stats) { + return; + } + + if (!stream) { + stream = stdout; + } + + sprintf(buf, "%ld.%ld", (long)stats->major_version, (long)stats->minor_version); + fprintf(stream, "%12s profiling version number\n", buf); + fprintf(stream, "%12lu size of profile_vars\n", (long unsigned)sizeof(struct profile_vars)); + fprintf(stream, "%12lu size of profile_stats\n", (long unsigned)sizeof(struct profile_stats)); + fprintf(stream, "%12lu size of profile_md\n", (long unsigned)sizeof(struct profile_md)); + fprintf(stream, "%12s calls to _{,g}prof_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats->cnt)); + fprintf(stream, "%12s calls to old mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats->old_mcount)); + fprintf(stream, "%12s calls to _dummy_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats->dummy)); + fprintf(stream, "%12lu functions profiled\n", (long unsigned)stats->prof_records); + fprintf(stream, "%12lu gprof arcs\n", (long unsigned)stats->gprof_records); + + if (pinfo) { + fprintf(stream, "%12lu profil buckets\n", (long unsigned)stats->profil_buckets); + fprintf(stream, "%12lu profil lowpc [0x%lx]\n", + (long unsigned)pinfo->lowpc, + (long unsigned)pinfo->lowpc); + + fprintf(stream, "%12lu profil highpc [0x%lx]\n", + (long unsigned)pinfo->highpc, + (long unsigned)pinfo->highpc); + + fprintf(stream, "%12lu profil highpc-lowpc\n", (long unsigned)(pinfo->highpc - pinfo->lowpc)); + fprintf(stream, "%12lu profil buffer length\n", (long unsigned)pinfo->profil_len); + fprintf(stream, "%12lu profil sizeof counters\n", (long unsigned)pinfo->counter_size); + fprintf(stream, "%12lu profil scale (%g)\n", + (long unsigned)pinfo->scale, + ((double)pinfo->scale) / ((double) 0x10000)); + + + for (i = 0; i < sizeof (pinfo->profil_unused) / sizeof (pinfo->profil_unused[0]); i++) { + if (pinfo->profil_unused[i]) { + fprintf(stream, "%12lu profil unused[%2d] {0x%.8lx}\n", + (long unsigned)pinfo->profil_unused[i], + i, + (long unsigned)pinfo->profil_unused[i]); + } + } + } + + if (stats->max_cpu) { + fprintf(stream, "%12lu current cpu/thread\n", (long unsigned)stats->my_cpu); + fprintf(stream, "%12lu max cpu/thread+1\n", (long unsigned)stats->max_cpu); + } + + if (stats->bogus_count != 0) { + fprintf(stream, + "%12lu gprof functions found outside of range\n", + (long unsigned)stats->bogus_count); + } + + if (PROF_CNT_NE_0(stats->too_low)) { + fprintf(stream, + "%12s histogram ticks were too low\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->too_low)); + } + + if (PROF_CNT_NE_0(stats->too_high)) { + fprintf(stream, + "%12s histogram ticks were too high\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->too_high)); + } + + if (PROF_CNT_NE_0(stats->acontext_locked)) { + fprintf(stream, + "%12s times an allocation context was locked\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->acontext_locked)); + } + + if (PROF_CNT_NE_0(stats->kernel_ticks) + || PROF_CNT_NE_0(stats->user_ticks) + || PROF_CNT_NE_0(stats->idle_ticks)) { + + prof_cnt_t total_ticks; + long double total_ticks_dbl; + + total_ticks = stats->kernel_ticks; + PROF_CNT_LADD(total_ticks, stats->user_ticks); + PROF_CNT_LADD(total_ticks, stats->idle_ticks); + total_ticks_dbl = PROF_CNT_TO_LDOUBLE(total_ticks); + + fprintf(stream, + "%12s total ticks\n", + PROF_CNT_TO_DECIMAL((char *)0, total_ticks)); + + fprintf(stream, + "%12s ticks within the kernel (%5.2Lf%%)\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->kernel_ticks), + 100.0L * (PROF_CNT_TO_LDOUBLE(stats->kernel_ticks) / total_ticks_dbl)); + + fprintf(stream, + "%12s ticks within user space (%5.2Lf%%)\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->user_ticks), + 100.0L * (PROF_CNT_TO_LDOUBLE(stats->user_ticks) / total_ticks_dbl)); + + fprintf(stream, + "%12s ticks idle (%5.2Lf%%)\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->idle_ticks), + 100.0L * (PROF_CNT_TO_LDOUBLE(stats->idle_ticks) / total_ticks_dbl)); + } + + if (PROF_CNT_NE_0(stats->overflow_ticks)) { + fprintf(stream, "%12s times a HISTCOUNTER counter would have overflowed\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->overflow_ticks)); + } + + if (PROF_CNT_NE_0(stats->hash_num)) { + long double total_buckets = 0.0L; + + for (i = 0; i <= MAX_BUCKETS; i++) { + total_buckets += (long double)stats->buckets[i]; + } + + fprintf(stream, "%12lu max bucket(s) on hash chain.\n", (long unsigned)stats->hash_buckets); + for (i = 0; i < MAX_BUCKETS; i++) { + if (stats->buckets[i] != 0) { + fprintf(stream, "%12lu bucket(s) had %d entries (%5.2Lf%%)\n", + (long unsigned)stats->buckets[i], i, + 100.0L * ((long double)stats->buckets[i] / total_buckets)); + } + } + + if (stats->buckets[MAX_BUCKETS] != 0) { + fprintf(stream, "%12lu bucket(s) had more than %d entries (%5.2Lf%%)\n", + (long unsigned)stats->buckets[MAX_BUCKETS], MAX_BUCKETS, + 100.0L * ((long double)stats->buckets[MAX_BUCKETS] / total_buckets)); + } + } + + PROF_ULONG_TO_CNT(total_hits, 0); + for (i = 0; i < MAX_CACHE; i++) { + PROF_CNT_LADD(total_hits, stats->cache_hits[i]); + } + + if (PROF_CNT_NE_0(total_hits)) { + long double total = PROF_CNT_TO_LDOUBLE(stats->cnt); + long double total_hits_dbl = PROF_CNT_TO_LDOUBLE(total_hits); + + fprintf(stream, + "%12s cache hits (%.2Lf%%)\n", + PROF_CNT_TO_DECIMAL((char *)0, total_hits), + 100.0L * (total_hits_dbl / total)); + + for (i = 0; i < MAX_CACHE; i++) { + if (PROF_CNT_NE_0(stats->cache_hits[i])) { + fprintf(stream, + "%12s times cache#%d matched (%5.2Lf%% of cache hits, %5.2Lf%% total)\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->cache_hits[i]), + i+1, + 100.0L * (PROF_CNT_TO_LDOUBLE(stats->cache_hits[i]) / total_hits_dbl), + 100.0L * (PROF_CNT_TO_LDOUBLE(stats->cache_hits[i]) / total)); + } + } + + if (PROF_CNT_NE_0(stats->hash_num)) { + fprintf(stream, "%12s times hash table searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats->hash_num)); + fprintf(stream, "%12s hash buckets searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats->hash_search)); + fprintf(stream, "%12.4Lf average buckets searched\n", + PROF_CNT_TO_LDOUBLE(stats->hash_search) / PROF_CNT_TO_LDOUBLE(stats->hash_num)); + } + } + + for (i = 0; i < sizeof (stats->stats_unused) / sizeof (stats->stats_unused[0]); i++) { + if (PROF_CNT_NE_0(stats->stats_unused[i])) { + fprintf(stream, "%12s unused[%2d] {0x%.8lx 0x%.8lx}\n", + PROF_CNT_TO_DECIMAL((char *)0, stats->stats_unused[i]), + i, + (unsigned long)stats->stats_unused[i].high, + (unsigned long)stats->stats_unused[i].low); + } + } + + /* Get the width for the allocation contexts */ + for (ac = ACONTEXT_FIRST; ac < ACONTEXT_MAX; ac++) { + int len; + + if (stats->num_context[ac] == 0) { + continue; + } + + len = strlen (cname[ac]); + if (len > width_cname) + width_cname = len; + + len = sprintf (buf, "%lu", (long unsigned)stats->num_alloc[ac]); + if (len > width_alloc) + width_alloc = len; + + len = sprintf (buf, "%lu", (long unsigned)stats->wasted[ac]); + if (len > width_wasted) + width_wasted = len; + + len = sprintf (buf, "%lu", (long unsigned)stats->overhead[ac]); + if (len > width_overhead) + width_overhead = len; + + len = sprintf (buf, "%lu", (long unsigned)stats->num_context[ac]); + if (len > width_context) + width_context = len; + } + + /* Print info about allocation contexts */ + for (ac = ACONTEXT_FIRST; ac < ACONTEXT_MAX; ac++) { + if (stats->num_context[ac] == 0) { + continue; + } + + fprintf (stream, + "%12lu bytes in %-*s %*lu alloc, %*lu unused, %*lu over, %*lu context\n", + (long unsigned)stats->bytes_alloc[ac], + width_cname, cname[ac], + width_alloc, (long unsigned)stats->num_alloc[ac], + width_wasted, (long unsigned)stats->wasted[ac], + width_overhead, (long unsigned)stats->overhead[ac], + width_context, (long unsigned)stats->num_context[ac]); + } +} + + +/* + * Merge a new statistics field into an old one. + */ + +void _profile_merge_stats(struct profile_stats *old_stats, const struct profile_stats *new_stats) +{ + int i; + + /* If nothing passed, just return */ + if (!old_stats || !new_stats) + return; + + /* If the old_stats has not been initialized, just copy in the new stats */ + if (old_stats->major_version == 0) { + *old_stats = *new_stats; + + /* Otherwise, update stats, field by field */ + } else { + if (old_stats->prof_records < new_stats->prof_records) + old_stats->prof_records = new_stats->prof_records; + + if (old_stats->gprof_records < new_stats->gprof_records) + old_stats->gprof_records = new_stats->gprof_records; + + if (old_stats->hash_buckets < new_stats->hash_buckets) + old_stats->hash_buckets = new_stats->hash_buckets; + + if (old_stats->bogus_count < new_stats->bogus_count) + old_stats->bogus_count = new_stats->bogus_count; + + PROF_CNT_LADD(old_stats->cnt, new_stats->cnt); + PROF_CNT_LADD(old_stats->dummy, new_stats->dummy); + PROF_CNT_LADD(old_stats->old_mcount, new_stats->old_mcount); + PROF_CNT_LADD(old_stats->hash_search, new_stats->hash_search); + PROF_CNT_LADD(old_stats->hash_num, new_stats->hash_num); + PROF_CNT_LADD(old_stats->user_ticks, new_stats->user_ticks); + PROF_CNT_LADD(old_stats->kernel_ticks, new_stats->kernel_ticks); + PROF_CNT_LADD(old_stats->idle_ticks, new_stats->idle_ticks); + PROF_CNT_LADD(old_stats->overflow_ticks, new_stats->overflow_ticks); + PROF_CNT_LADD(old_stats->acontext_locked, new_stats->acontext_locked); + PROF_CNT_LADD(old_stats->too_low, new_stats->too_low); + PROF_CNT_LADD(old_stats->too_high, new_stats->too_high); + PROF_CNT_LADD(old_stats->prof_overflow, new_stats->prof_overflow); + PROF_CNT_LADD(old_stats->gprof_overflow, new_stats->gprof_overflow); + + for (i = 0; i < (int)ACONTEXT_MAX; i++) { + if (old_stats->num_alloc[i] < new_stats->num_alloc[i]) + old_stats->num_alloc[i] = new_stats->num_alloc[i]; + + if (old_stats->bytes_alloc[i] < new_stats->bytes_alloc[i]) + old_stats->bytes_alloc[i] = new_stats->bytes_alloc[i]; + + if (old_stats->num_context[i] < new_stats->num_context[i]) + old_stats->num_context[i] = new_stats->num_context[i]; + + if (old_stats->wasted[i] < new_stats->wasted[i]) + old_stats->wasted[i] = new_stats->wasted[i]; + + if (old_stats->overhead[i] < new_stats->overhead[i]) + old_stats->overhead[i] = new_stats->overhead[i]; + + } + + for (i = 0; i < MAX_BUCKETS+1; i++) { + if (old_stats->buckets[i] < new_stats->buckets[i]) + old_stats->buckets[i] = new_stats->buckets[i]; + } + + for (i = 0; i < MAX_CACHE; i++) { + PROF_CNT_LADD(old_stats->cache_hits[i], new_stats->cache_hits[i]); + } + + for (i = 0; i < sizeof(old_stats->stats_unused) / sizeof(old_stats->stats_unused[0]); i++) { + PROF_CNT_LADD(old_stats->stats_unused[i], new_stats->stats_unused[i]); + } + } +} + +#endif + + +/* + * Invalid function address used when checking of function addresses is + * desired for gprof arcs, and we discover an address out of bounds. + * There should be no callers of this function. + */ + +void +_bogus_function(void) +{ +} diff --git a/osfmk/profiling/i386/profile-md.h b/osfmk/profiling/i386/profile-md.h new file mode 100644 index 000000000..7fa90283e --- /dev/null +++ b/osfmk/profiling/i386/profile-md.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.2 1996/07/31 09:57:36 paire + * Added some more constraints to __asm__ functions for compilation + * under gcc2.7.1 for PROF_CNT_[L]{ADD|SUB} macros + * [96/06/14 paire] + * + * Revision 1.1.5.1 1995/01/06 19:53:52 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:25:27 dwm] + * + * Revision 1.1.2.2 1994/05/16 19:19:26 meissner + * Add {,L}PROF_CNT_{SUB,LSUB,OVERFLOW} macros for gprof command. + * [1994/05/10 10:36:06 meissner] + * + * Correct 64-bit integer asms to specify result values as inputs, and use =g instead of =m. + * Cast the integer argument to PROF_CNT_ADD to unsigned long, so a short register is widened. + * Add more support for writing the gprof command. + * PROF_CNT_{EQ,NE} should not use ^=, it just uses ^. + * Round PROF_CNT_DIGITS up to 24 bytes so it is word aligned. + * _profile_cnt_to_decimal now takes the low/high values as separate arguments. + * Delete _profile_cnt_to_hex. + * [1994/04/28 21:45:07 meissner] + * + * Add more 64 bit arithmetic macros to support writing gprof. + * [1994/04/20 15:47:05 meissner] + * + * Revision 1.1.2.1 1994/04/08 17:51:56 meissner + * Correct spelling on LPROF_CNT_TO_LDOUBLE macro. + * [1994/04/08 16:18:06 meissner] + * + * Make LHISTCOUNTER be 64 bits. + * Define LPROF_CNT_INC to increment LHISTCOUNTER. + * [1994/04/08 12:40:32 meissner] + * + * Make most stats 64 bits, except for things like memory allocation. + * [1994/04/02 14:58:34 meissner] + * + * Add overflow support for {gprof,prof,old,dummy}_mcount counters. + * [1994/03/17 20:13:37 meissner] + * + * Add gprof/prof overflow support + * [1994/03/17 14:56:56 meissner] + * + * Define LHISTCOUNTER. + * [1994/02/28 12:05:16 meissner] + * + * Set HISTFRACTION to 4, so new lprofil call takes the same space. + * [1994/02/24 16:15:34 meissner] + * + * Add too_low/too_high to profile_stats. + * [1994/02/16 22:38:23 meissner] + * + * Make prof_cnt_t unsigned long. + * [1994/02/11 16:52:09 meissner] + * + * Remember function unique ptr in gfuncs structure to reset profiling. + * Add support for range checking gprof arc {from,self}pc addresses. + * Add counter for # times acontext was locked. + * Expand copyright. + * [1994/02/07 12:41:08 meissner] + * + * Keep track of the number of times the kernel overflows the HISTCOUNTER counter. + * [1994/02/03 20:13:31 meissner] + * + * Add stats for {user,kernel,idle} mode in the kernel. + * [1994/02/03 15:17:36 meissner] + * + * No change. + * [1994/02/03 00:58:59 meissner] + * + * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. + * [1994/02/01 12:04:04 meissner] + * + * Split # records to # gprof and # prof records. + * Add my_cpu/max_cpu fields. + * [1994/01/28 23:33:30 meissner] + * + * Eliminate hash_{size,mask} from gfuncs structure. + * [1994/01/26 20:23:41 meissner] + * + * Add structure size fields to _profile_{vars,stats,md}. + * Add major/minor version number to _profile_md. + * Move allocation context block pointer to main structure. + * Delete shift count for allocation contexts. + * [1994/01/25 01:46:08 meissner] + * + * Add HASHFRACTION + * [1994/01/22 01:14:02 meissner] + * + * Split profile-md.h into profile-internal.h and profile-md. + * [1994/01/20 20:57:18 meissner] + * + * Fixup copyright. + * [1994/01/18 23:08:14 meissner] + * + * Make flags byte-sized. + * Add have_bb flag. + * Add init_format flag. + * [1994/01/18 21:57:18 meissner] + * + * CR 10198 - Initial version. + * [1994/01/18 19:44:59 meissner] + * + * $EndLog$ + */ + +#ifndef _PROFILE_MD_H +#define _PROFILE_MD_H + +/* + * Define the interfaces between the assembly language profiling support + * that is common between the kernel, mach servers, and user space library. + */ + +/* + * Integer types used. + */ + +typedef long prof_ptrint_t; /* hold either pointer or signed int */ +typedef unsigned long prof_uptrint_t; /* hold either pointer or unsigned int */ +typedef long prof_lock_t; /* lock word type */ +typedef unsigned char prof_flag_t; /* type for boolean flags */ + +/* + * Double precision counter. + */ + +typedef struct prof_cnt_t { + prof_uptrint_t low; /* low 32 bits of counter */ + prof_uptrint_t high; /* high 32 bits of counter */ +} prof_cnt_t; + +#if defined(__GNUC__) && !defined(lint) +#define PROF_CNT_INC(cnt) \ + __asm__("addl $1,%0; adcl $0,%1" \ + : "=g" ((cnt).low), "=g" ((cnt).high) \ + : "0" ((cnt).low), "1" ((cnt).high)) + +#define PROF_CNT_ADD(cnt,val) \ + __asm__("addl %2,%0; adcl $0,%1" \ + : "=g,r" ((cnt).low), "=g,r" ((cnt).high) \ + : "r,g" ((unsigned long)(val)), \ + "0,0" ((cnt).low), "1,1" ((cnt).high)) + +#define PROF_CNT_LADD(cnt,val) \ + __asm__("addl %2,%0; adcl %3,%1" \ + : "=g,r" ((cnt).low), "=g,r" ((cnt).high) \ + : "r,g" ((val).low), "r,g" ((val).high), \ + "0,0" ((cnt).low), "1,1" ((cnt).high)) + +#define PROF_CNT_SUB(cnt,val) \ + __asm__("subl %2,%0; sbbl $0,%1" \ + : "=g,r" ((cnt).low), "=g,r" ((cnt).high) \ + : "r,g" ((unsigned long)(val)), \ + "0,0" ((cnt).low), "1,1" ((cnt).high)) + +#define PROF_CNT_LSUB(cnt,val) \ + __asm__("subl %2,%0; sbbl %3,%1" \ + : "=g,r" ((cnt).low), "=g,r" ((cnt).high) \ + : "r,g" ((val).low), "r,g" ((val).high), \ + "0,0" ((cnt).low), "1,1" ((cnt).high)) + +#else +#define PROF_CNT_INC(cnt) ((++((cnt).low) == 0) ? ++((cnt).high) : 0) +#define PROF_CNT_ADD(cnt,val) (((((cnt).low + (val)) < (val)) ? ((cnt).high++) : 0), ((cnt).low += (val))) +#define PROF_CNT_LADD(cnt,val) (PROF_CNT_ADD(cnt,(val).low), (cnt).high += (val).high) +#define PROF_CNT_SUB(cnt,val) (((((cnt).low - (val)) > (cnt).low) ? ((cnt).high--) : 0), ((cnt).low -= (val))) +#define PROF_CNT_LSUB(cnt,val) (PROF_CNT_SUB(cnt,(val).low), (cnt).high -= (val).high) +#endif + +#define PROF_ULONG_TO_CNT(cnt,val) (((cnt).high = 0), ((cnt).low = val)) +#define PROF_CNT_OVERFLOW(cnt,high,low) (((high) = (cnt).high), ((low) = (cnt).low)) +#define PROF_CNT_TO_ULONG(cnt) (((cnt).high == 0) ? (cnt).low : 0xffffffffu) +#define PROF_CNT_TO_LDOUBLE(cnt) ((((long double)(cnt).high) * 4294967296.0L) + (long double)(cnt).low) +#define PROF_CNT_TO_DECIMAL(buf,cnt) _profile_cnt_to_decimal(buf, (cnt).low, (cnt).high) +#define PROF_CNT_EQ_0(cnt) (((cnt).high | (cnt).low) == 0) +#define PROF_CNT_NE_0(cnt) (((cnt).high | (cnt).low) != 0) +#define PROF_CNT_EQ(cnt1,cnt2) ((((cnt1).high ^ (cnt2).high) | ((cnt1).low ^ (cnt2).low)) == 0) +#define PROF_CNT_NE(cnt1,cnt2) ((((cnt1).high ^ (cnt2).high) | ((cnt1).low ^ (cnt2).low)) != 0) +#define PROF_CNT_GT(cnt1,cnt2) (((cnt1).high > (cnt2).high) || ((cnt1).low > (cnt2).low)) +#define PROF_CNT_LT(cnt1,cnt2) (((cnt1).high < (cnt2).high) || ((cnt1).low < (cnt2).low)) + +/* max # digits + null to hold prof_cnt_t values (round up to multiple of 4) */ +#define PROF_CNT_DIGITS 24 + +/* + * Types of the profil counter. + */ + +typedef unsigned short HISTCOUNTER; /* profil */ +typedef prof_cnt_t LHISTCOUNTER; /* lprofil */ + +#define LPROF_ULONG_TO_CNT(cnt,val) PROF_ULONG_TO_CNT(cnt,val) +#define LPROF_CNT_INC(lp) PROF_CNT_INC(lp) +#define LPROF_CNT_ADD(lp,val) PROF_CNT_ADD(lp,val) +#define LPROF_CNT_LADD(lp,val) PROF_CNT_LADD(lp,val) +#define LPROF_CNT_SUB(lp,val) PROF_CNT_SUB(lp,val) +#define LPROF_CNT_LSUB(lp,val) PROF_CNT_LSUB(lp,val) +#define LPROF_CNT_OVERFLOW(lp,high,low) PROF_CNT_OVERFLOW(lp,high,low) +#define LPROF_CNT_TO_ULONG(lp) PROF_CNT_TO_ULONG(lp) +#define LPROF_CNT_TO_LDOUBLE(lp) PROF_CNT_TO_LDOUBLE(lp) +#define LPROF_CNT_TO_DECIMAL(buf,cnt) PROF_CNT_TO_DECIMAL(buf,cnt) +#define LPROF_CNT_EQ_0(cnt) PROF_CNT_EQ_0(cnt) +#define LPROF_CNT_NE_0(cnt) PROF_CNT_NE_0(cnt) +#define LPROF_CNT_EQ(cnt1,cnt2) PROF_CNT_EQ(cnt1,cnt2) +#define LPROF_CNT_NE(cnt1,cnt2) PROF_CNT_NE(cnt1,cnt2) +#define LPROF_CNT_GT(cnt1,cnt2) PROF_CNT_GT(cnt1,cnt2) +#define LPROF_CNT_LT(cnt1,cnt2) PROF_CNT_LT(cnt1,cnt2) +#define LPROF_CNT_DIGITS PROF_CNT_DIGITS + +/* + * fraction of text space to allocate for histogram counters + */ + +#define HISTFRACTION 4 + +/* + * Fraction of text space to allocate for from hash buckets. + */ + +#define HASHFRACTION HISTFRACTION + +/* + * Prof call count, external format. + */ + +struct prof_ext { + prof_uptrint_t cvalue; /* caller address */ + prof_uptrint_t cncall; /* # of calls */ +}; + +/* + * Prof call count, internal format. + */ + +struct prof_int { + struct prof_ext prof; /* external prof struct */ + prof_uptrint_t overflow; /* # times prof counter overflowed */ +}; + +/* + * Gprof arc, external format. + */ + +struct gprof_arc { + prof_uptrint_t frompc; /* caller's caller */ + prof_uptrint_t selfpc; /* caller's address */ + prof_uptrint_t count; /* # times arc traversed */ +}; + +/* + * Gprof arc, internal format. + */ + +struct hasharc { + struct hasharc *next; /* next gprof record */ + struct gprof_arc arc; /* gprof record */ + prof_uptrint_t overflow; /* # times counter overflowed */ +}; + +/* + * Linked list of all function profile blocks. + */ + +#define MAX_CACHE 3 /* # cache table entries */ + +struct gfuncs { + struct hasharc **hash_ptr; /* gprof hash table */ + struct hasharc **unique_ptr; /* function unique pointer */ + struct prof_int prof; /* -p stats for elf */ + struct hasharc *cache_ptr[MAX_CACHE]; /* cache element pointers */ +}; + +/* + * Profile information which might be written out in ELF {,g}mon.out files. + */ + +#define MAX_BUCKETS 9 /* max bucket chain to print out */ + +struct profile_stats { /* Debugging counters */ + prof_uptrint_t major_version; /* major version number */ + prof_uptrint_t minor_version; /* minor version number */ + prof_uptrint_t stats_size; /* size of profile_vars structure */ + prof_uptrint_t profil_buckets; /* # profil buckets */ + prof_uptrint_t my_cpu; /* identify current cpu/thread */ + prof_uptrint_t max_cpu; /* identify max cpu/thread */ + prof_uptrint_t prof_records; /* # of functions profiled */ + prof_uptrint_t gprof_records; /* # of gprof arcs */ + prof_uptrint_t hash_buckets; /* # gprof hash buckets */ + prof_uptrint_t bogus_count; /* # of bogus functions found in gprof */ + + prof_cnt_t cnt; /* # of calls to _{,g}prof_mcount */ + prof_cnt_t dummy; /* # of calls to _dummy_mcount */ + prof_cnt_t old_mcount; /* # of calls to old mcount */ + prof_cnt_t hash_search; /* # hash buckets searched */ + prof_cnt_t hash_num; /* # times hash table searched */ + prof_cnt_t user_ticks; /* # ticks in user space */ + prof_cnt_t kernel_ticks; /* # ticks in kernel space */ + prof_cnt_t idle_ticks; /* # ticks in idle mode */ + prof_cnt_t overflow_ticks; /* # ticks where HISTCOUNTER overflowed */ + prof_cnt_t acontext_locked; /* # times an acontext was locked */ + prof_cnt_t too_low; /* # times a histogram tick was too low */ + prof_cnt_t too_high; /* # times a histogram tick was too high */ + prof_cnt_t prof_overflow; /* # times a prof count field overflowed */ + prof_cnt_t gprof_overflow; /* # times a gprof count field overflowed */ + + /* allocation statistics */ + prof_uptrint_t num_alloc [(int)ACONTEXT_MAX]; /* # allocations */ + prof_uptrint_t bytes_alloc[(int)ACONTEXT_MAX]; /* bytes allocated */ + prof_uptrint_t num_context[(int)ACONTEXT_MAX]; /* # contexts */ + prof_uptrint_t wasted [(int)ACONTEXT_MAX]; /* wasted bytes */ + prof_uptrint_t overhead [(int)ACONTEXT_MAX]; /* overhead bytes */ + + prof_uptrint_t buckets[MAX_BUCKETS+1]; /* # hash indexes that have n buckets */ + prof_cnt_t cache_hits[MAX_CACHE]; /* # times nth cache entry matched */ + + prof_cnt_t stats_unused[64]; /* reserved for future use */ +}; + +#define PROFILE_MAJOR_VERSION 1 +#define PROFILE_MINOR_VERSION 1 + +/* + * Machine dependent fields. + */ + +struct profile_md { + int major_version; /* major version number */ + int minor_version; /* minor version number */ + size_t md_size; /* size of profile_md structure */ + struct hasharc **hash_ptr; /* gprof hash table */ + size_t hash_size; /* size of hash table */ + prof_uptrint_t num_cache; /* # of cache entries */ + void (*save_mcount_ptr)(void); /* save for _mcount_ptr */ + void (**mcount_ptr_ptr)(void); /* pointer to _mcount_ptr */ + struct hasharc *dummy_ptr; /* pointer to dummy gprof record */ + void *(*alloc_pages)(size_t); /* pointer to _profile_alloc_pages */ + char num_buffer[PROF_CNT_DIGITS]; /* convert 64 bit ints to string */ + long md_unused[58]; /* add unused fields */ +}; + +/* + * Record information about each function call. Specify + * caller, caller's caller, and a unique label for use by + * the profiling routines. + */ +extern void _prof_mcount(void); +extern void _gprof_mcount(void); +extern void _dummy_mcount(void); +extern void (*_mcount_ptr)(void); + +/* + * Function in profile-md.c to convert prof_cnt_t to string format (decimal & hex). + */ +extern char *_profile_cnt_to_decimal(char *, prof_uptrint_t, prof_uptrint_t); + +#endif /* _PROFILE_MD_H */ diff --git a/osfmk/profiling/machine/Makefile b/osfmk/profiling/machine/Makefile new file mode 100644 index 000000000..e881a130b --- /dev/null +++ b/osfmk/profiling/machine/Makefile @@ -0,0 +1,25 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + + +DATAFILES = \ + profile-md.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = profile/machine + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = profile/machine + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/profiling/machine/profile-md.h b/osfmk/profiling/machine/profile-md.h new file mode 100644 index 000000000..c66d76ffc --- /dev/null +++ b/osfmk/profiling/machine/profile-md.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _MACH_MACHINE_PROFILE_MD_H +#define _MACH_MACHINE_PROFILE_MD_H_ + + +#if defined (__ppc__) +#include "profiling/ppc/profile-md.h" +#elif defined (__i386__) +#include "profiling/i386/profile-md.h" +#else +#error architecture not supported +#endif + + +#endif /* _MACH_MACHINE_PROFILE_MD_H_ */ diff --git a/osfmk/profiling/ppc/Makefile b/osfmk/profiling/ppc/Makefile new file mode 100644 index 000000000..ebea6420f --- /dev/null +++ b/osfmk/profiling/ppc/Makefile @@ -0,0 +1,25 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + + +DATAFILES = \ + profile-md.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = profile/ppc + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = profile/ppc + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/profiling/ppc/profile-md.h b/osfmk/profiling/ppc/profile-md.h new file mode 100644 index 000000000..d6b4fa4fd --- /dev/null +++ b/osfmk/profiling/ppc/profile-md.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.8.1 1996/12/09 16:57:22 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * [1996/12/09 11:13:16 stephen] + * + * Revision 1.1.6.1 1996/04/11 11:20:35 emcmanus + * Copied from mainline.ppc. + * [1996/04/11 08:26:36 emcmanus] + * + * hppa merge + * [1995/03/15 09:47:27 bruel] + * + * Revision 1.1.4.1 1995/11/23 17:37:28 stephen + * first powerpc checkin to mainline.ppc + * [1995/11/23 16:46:29 stephen] + * + * Revision 1.1.2.1 1995/08/25 06:50:17 stephen + * Initial checkin of files for PowerPC port + * [1995/08/23 15:05:31 stephen] + * + * Revision 1.1.2.1 1995/02/14 14:25:16 bruel + * First Revision. + * [95/01/27 bruel] + * + * $EndLog$ + */ + +#ifndef _PROFILE_MD_H +#define _PROFILE_MD_H + +/* + * Define the interfaces between the assembly language profiling support + * that is common between the kernel, mach servers, and user space library. + */ + +/* + * Integer types used. + */ + +typedef long prof_ptrint_t; /* hold either pointer or signed int */ +typedef unsigned long prof_uptrint_t; /* hold either pointer or unsigned int */ +typedef long prof_lock_t; /* lock word type */ +typedef unsigned char prof_flag_t; /* type for boolean flags */ + +/* + * Double precision counter. + */ + +typedef struct prof_cnt_t { + prof_uptrint_t low; /* low 32 bits of counter */ + prof_uptrint_t high; /* high 32 bits of counter */ +} prof_cnt_t; + +#define PROF_CNT_INC(cnt) ((++((cnt).low) == 0) ? ++((cnt).high) : 0) +#define PROF_CNT_ADD(cnt,val) (((((cnt).low + (val)) < (val)) ? ((cnt).high++) : 0), ((cnt).low += (val))) +#define PROF_CNT_LADD(cnt,val) (PROF_CNT_ADD(cnt,(val).low), (cnt).high += (val).high) +#define PROF_CNT_SUB(cnt,val) (((((cnt).low - (val)) > (cnt).low) ? ((cnt).high--) : 0), ((cnt).low -= (val))) +#define PROF_CNT_LSUB(cnt,val) (PROF_CNT_SUB(cnt,(val).low), (cnt).high -= (val).high) + +#define LPROF_ULONG_TO_CNT(cnt,val) PROF_ULONG_TO_CNT(cnt,val) +#define LPROF_CNT_INC(lp) PROF_CNT_INC(lp) +#define LPROF_CNT_ADD(lp,val) PROF_CNT_ADD(lp,val) +#define LPROF_CNT_LADD(lp,val) PROF_CNT_LADD(lp,val) +#define LPROF_CNT_SUB(lp,val) PROF_CNT_SUB(lp,val) +#define LPROF_CNT_LSUB(lp,val) PROF_CNT_LSUB(lp,val) +#define LPROF_CNT_OVERFLOW(lp,high,low) PROF_CNT_OVERFLOW(lp,high,low) +#define LPROF_CNT_TO_ULONG(lp) PROF_CNT_TO_ULONG(lp) +#define LPROF_CNT_TO_LDOUBLE(lp) PROF_CNT_TO_LDOUBLE(lp) +#define LPROF_CNT_TO_DECIMAL(buf,cnt) PROF_CNT_TO_DECIMAL(buf,cnt) +#define LPROF_CNT_EQ_0(cnt) PROF_CNT_EQ_0(cnt) +#define LPROF_CNT_NE_0(cnt) PROF_CNT_NE_0(cnt) +#define LPROF_CNT_EQ(cnt1,cnt2) PROF_CNT_EQ(cnt1,cnt2) +#define LPROF_CNT_NE(cnt1,cnt2) PROF_CNT_NE(cnt1,cnt2) +#define LPROF_CNT_GT(cnt1,cnt2) PROF_CNT_GT(cnt1,cnt2) +#define LPROF_CNT_LT(cnt1,cnt2) PROF_CNT_LT(cnt1,cnt2) +#define LPROF_CNT_DIGITS PROF_CNT_DIGITS + + +/* + * Types of the profil counter. + */ + +typedef unsigned short HISTCOUNTER; /* profil */ +typedef prof_cnt_t LHISTCOUNTER; /* lprofil */ + +struct profile_stats { /* Debugging counters */ + prof_uptrint_t major_version; /* major version number */ + prof_uptrint_t minor_version; /* minor version number */ +}; + +struct profile_md { + int major_version; /* major version number */ + int minor_version; /* minor version number */ +}; + +#define PROFILE_MAJOR_VERSION 1 +#define PROFILE_MINOR_VERSION 1 + +#endif /* _PROFILE_MD_H */ + + + + + + diff --git a/osfmk/profiling/profile-internal.h b/osfmk/profiling/profile-internal.h new file mode 100644 index 000000000..242d290e8 --- /dev/null +++ b/osfmk/profiling/profile-internal.h @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Define the internal interfaces between the profiling support that is + * common between the kernel, mach servers, and user space library. + */ + +#ifndef _PROFILE_INTERNAL_H +#define _PROFILE_INTERNAL_H + +/* + * Allow us not to require stdio.h in kernel/server space, but + * use it in user space. + */ + +#if !defined(MACH_KERNEL) && !defined(_KERNEL) +#include +#endif + +/* + * Scaling factor for the profil system call. + */ + +#define SCALE_1_TO_1 0x10000L + + +/* + * Forward reference to structures used. + */ + +struct profile_vars; +struct profile_stats; +struct profile_md; +struct profile_dci; +struct profile_profil; +struct callback; +struct gprof_arc; +struct prof_ext; + +/* + * Profiling type + */ + +typedef enum profile_type { + PROFILE_NONE, + PROFILE_GPROF, + PROFILE_PROF +} profile_type_t; + +/* + * Whether to allocate memory in _profile_md_init. + */ + +typedef enum profile_alloc_mem { + PROFILE_ALLOC_MEM_NO, + PROFILE_ALLOC_MEM_YES +} profile_alloc_mem_t; + +/* + * Allocation context block types. + */ + +typedef enum acontext_type { + ACONTEXT_PROF, /* 0: prof records */ + ACONTEXT_GPROF, /* 1: gprof arcs */ + ACONTEXT_GFUNC, /* 2: gprof function headers */ + ACONTEXT_MISC, /* 3: misc. allocations */ + ACONTEXT_PROFIL, /* 4: profil based allocations */ + ACONTEXT_DCI, /* 5: dci based allocations */ + ACONTEXT_BASIC_BLOCK, /* 6: basic block allocations */ + ACONTEXT_CALLBACK, /* 7: callback structures */ + ACONTEXT_MAX = 32 /* # allocation contexts */ +} acontext_type_t; + +#define ACONTEXT_FIRST ACONTEXT_PROF + +#define ACONTEXT_NAMES { \ + "prof", \ + "gprof", \ + "gfunc", \ + "misc", \ + "profil", \ + "dci", \ + "bb", \ + "callback", \ + "#8", \ + "#9", \ + "#10", \ + "#11", \ + "#12", \ + "#13", \ + "#14", \ + "#15", \ + "#16", \ + "#17", \ + "#18", \ + "#19", \ + "#20", \ + "#21", \ + "#22", \ + "#23", \ + "#24", \ + "#25", \ + "#26", \ + "#27", \ + "#28", \ + "#29", \ + "#30", \ + "#31", \ + } + +/* + * Kgmon control codes + */ + +typedef enum kgmon_control { + KGMON_UNUSED, /* insure no 0 is ever used */ + KGMON_GET_STATUS, /* return whether or not profiling is active */ + KGMON_GET_PROFILE_VARS, /* return the _profile_vars structure */ + KGMON_GET_PROFILE_STATS, /* return the _profile_stats structure */ + KGMON_GET_DEBUG, /* return whether or not debugging is on */ + + KGMON_SET_PROFILE_ON = 50, /* turn on profiling */ + KGMON_SET_PROFILE_OFF, /* turn off profiling */ + KGMON_SET_PROFILE_RESET, /* reset profiling tables */ + KGMON_SET_DEBUG_ON, /* turn on debugging */ + KGMON_SET_DEBUG_OFF /* turn off debugging */ +} kgmon_control_t; + +#define KGMON_GET_MIN KGMON_GET_STATUS +#define KGMON_GET_MAX KGMON_GET_DEBUG +#define KGMON_SET_MIN KGMON_SET_PROFILE_ON +#define KGMON_SET_MAX KGMON_SET_DEBUG_OFF + +#define ENCODE_KGMON(num, control, cpu_thread) \ + ((num) = ((cpu_thread) << 8) | (control)) + +#define DECODE_KGMON(num, control, cpu_thread) \ +do { \ + control = (num) & 0xff; \ + cpu_thread = (num) >> 8; \ +} while (0) + +#define LEGAL_KGMON(num) (((unsigned long)(num)) <= 0xffff) + +/* + * Pull in all of the machine dependent types now after defining the enums. + */ + +#include + +/* + * general rounding functions. + */ + +#define ROUNDDOWN(x,y) (((x)/(y))*(y)) +#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) + +/* + * Linked list of pages allocated for a particular allocation context block. + */ + +struct page_list { + void *first; /* pointer to first byte available */ + void *ptr; /* pointer to next available byte */ + struct page_list *next; /* next page allocated */ + size_t bytes_free; /* # bytes available */ + size_t bytes_allocated; /* # bytes allocates so far */ + size_t num_allocations; /* # of allocations */ +}; + +/* + * Allocation context block. + */ + +struct alloc_context { + struct alloc_context *next; /* next allocation context block */ + struct page_list *plist; /* head of page list */ + prof_lock_t lock; /* lock field available to asm */ +}; + + +/* + * Callback structure that records information for one record in the + * profiling output. + */ + +#define STR_MAX 32 + +struct callback { + void *sec_ptr; /* callback user data */ + /* callback function */ + size_t (*callback)(struct profile_vars *, struct callback *); + long sec_val1; /* section specific value */ + long sec_val2; /* section specific value */ + size_t sec_recsize; /* record size */ + size_t sec_length; /* total length */ + char sec_name[STR_MAX]; /* section name */ +}; + +/* + * Basic profil information (except for the profil buffer). + */ + +struct profile_profil { + prof_uptrint_t lowpc; /* lowest address */ + prof_uptrint_t highpc; /* highest address */ + size_t text_len; /* highpc-lowpc */ + size_t profil_len; /* length of the profil buffer */ + size_t counter_size; /* size of indivual counters (HISTCOUNTER) */ + unsigned long scale; /* scaling factor (65536 / scale) */ + unsigned long profil_unused[8]; /* currently unused */ +}; + +/* + * Profiling internal variables. This structure is intended to be machine independent. + */ + +struct profile_vars { + int major_version; /* major version number */ + int minor_version; /* minor version number */ + size_t vars_size; /* size of profile_vars structure */ + size_t plist_size; /* size of page_list structure */ + size_t acontext_size; /* size of allocation context struct */ + size_t callback_size; /* size of callback structure */ + profile_type_t type; /* profile type */ + const char *error_msg; /* error message for perror */ + const char *filename; /* filename to write to */ + char *str_ptr; /* string table */ + +#if !defined(MACH_KERNEL) && !defined(_KERNEL) + FILE *stream; /* stdio stream to write to */ + FILE *diag_stream; /* stdio stream to write diagnostics to */ + /* function to write out some bytes */ + size_t (*fwrite_func)(const void *, size_t, size_t, FILE *); +#else + void *stream; /* pointer passed to fwrite_func */ + void *diag_stream; /* stdio stream to write diagnostics to */ + /* function to write out some bytes */ + size_t (*fwrite_func)(const void *, size_t, size_t, void *); +#endif + + size_t page_size; /* machine pagesize */ + size_t str_bytes; /* # bytes in string table */ + size_t str_total; /* # bytes allocated total for string table */ + long clock_ticks; /* # clock ticks per second */ + + /* profil related variables */ + struct profile_profil profil_info; /* profil information */ + HISTCOUNTER *profil_buf; /* profil buffer */ + + /* Profiling output selection */ + void (*output_init)(struct profile_vars *); /* output init function */ + void (*output)(struct profile_vars *); /* output function */ + void *output_ptr; /* output specific info */ + + /* allocation contexts */ + struct alloc_context *acontext[(int)ACONTEXT_MAX]; + + void (*bogus_func)(void); /* Function to use if address out of bounds */ + prof_uptrint_t vars_unused[63]; /* future growth */ + + /* Various flags */ + prof_flag_t init; /* != 0 if initialized */ + prof_flag_t active; /* != 0 if profiling is active */ + prof_flag_t do_profile; /* != 0 if profiling is being done */ + prof_flag_t use_dci; /* != 0 if using DCI */ + + prof_flag_t use_profil; /* != 0 if using profil */ + prof_flag_t recursive_alloc; /* != 0 if alloc taking place */ + prof_flag_t output_uarea; /* != 0 if output the uarea */ + prof_flag_t output_stats; /* != 0 if output the stats */ + + prof_flag_t output_clock; /* != 0 if output the clock ticks */ + prof_flag_t multiple_sections; /* != 0 if output allows multiple sections */ + prof_flag_t have_bb; /* != 0 if we have basic block data */ + prof_flag_t init_format; /* != 0 if output format has been chosen */ + + prof_flag_t debug; /* != 0 if debugging */ + prof_flag_t check_funcs; /* != 0 if check gprof arcs for being in range */ + prof_flag_t flag_unused[62]; /* space for more flags */ + + struct profile_stats stats; /* profiling statistics */ + struct profile_md md; /* machine dependent info */ +}; + +/* + * Profiling static data. + */ + +extern struct profile_vars _profile_vars; + +/* + * Functions called by the machine dependent routines, and provided by + * specific routines to the kernel, server, and user space library. + */ + +#if (__GNUC__ < 2) || (__GNUC__ == 2 && __GNUC_MINOR__ < 5) || defined(lint) +#define __attribute__(arg) +#endif + +#if defined(_KERNEL) || defined(MACH_KERNEL) +#define _profile_printf printf +#else +extern int _profile_printf(const char *, ...) __attribute__((format(printf,1,2))); +#endif + +extern void *_profile_alloc_pages (size_t); +extern void _profile_free_pages (void *, size_t); +extern void _profile_error(struct profile_vars *); + +/* + * Functions provided by the machine dependent files. + */ + +extern void _profile_md_init(struct profile_vars *, profile_type_t, profile_alloc_mem_t); +extern int _profile_md_start(void); +extern int _profile_md_stop(void); +extern void *_profile_alloc(struct profile_vars *, size_t, acontext_type_t); +extern size_t _gprof_write(struct profile_vars *, struct callback *); +extern size_t _prof_write(struct profile_vars *, struct callback *); +extern void _profile_update_stats(struct profile_vars *); +extern void _profile_reset(struct profile_vars *); + +#if !defined(_KERNEL) && !defined(MACH_KERNEL) +extern void _profile_print_stats(FILE *, const struct profile_stats *, const struct profile_profil *); +extern void _profile_merge_stats(struct profile_stats *, const struct profile_stats *); +#else + +/* + * Functions defined in profile-kgmon.c + */ + +extern long _profile_kgmon(int, + size_t, + long, + int, + void **, + void (*)(kgmon_control_t)); +#ifdef _KERNEL +extern void kgmon_server_control(kgmon_control_t); + +#endif /* _KERNEL */ +#endif /* _KERNEL or MACH_KERNEL */ + +#endif /* _PROFILE_INTERNAL_H */ diff --git a/osfmk/profiling/profile-kgmon.c b/osfmk/profiling/profile-kgmon.c new file mode 100644 index 000000000..e2eb6272a --- /dev/null +++ b/osfmk/profiling/profile-kgmon.c @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.5.1 1995/01/06 19:54:04 devrcs + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:25:34 dwm] + * + * Revision 1.1.2.1 1994/04/08 17:52:05 meissner + * Add callback function to _profile_kgmon. + * [1994/02/16 22:38:31 meissner] + * + * _profile_kgmon now returns pointer to area, doesn't do move itself. + * [1994/02/11 16:52:17 meissner] + * + * Move all printfs into if (pv->debug) { ... } blocks. + * Add debug printfs protected by if (pv->debug) for all error conditions. + * Add code to reset profiling information. + * Add code to get/set debug flag. + * Expand copyright. + * [1994/02/07 12:41:14 meissner] + * + * Add support to copy arbitrary regions. + * Delete several of the KGMON_GET commands, now that arb. regions are supported. + * Explicitly call _profile_update_stats before dumping vars or stats. + * [1994/02/03 00:59:05 meissner] + * + * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. + * [1994/02/01 12:04:09 meissner] + * + * CR 10198 - Initial version. + * [1994/01/28 23:33:37 meissner] + * + * $EndLog$ + */ + +#include + +#ifdef MACH_KERNEL +#include +#endif + +#ifndef PROFILE_VARS +#define PROFILE_VARS(cpu) (&_profile_vars) +#endif + +extern int printf(const char *, ...); + + +/* + * Kgmon interface. This returns the count of bytes moved if everything was ok, + * or -1 if there were errors. + */ + +long +_profile_kgmon(int write, + size_t count, + long indx, + int max_cpus, + void **p_ptr, + void (*control_func)(kgmon_control_t)) +{ + kgmon_control_t kgmon; + int cpu; + int error = 0; + int i; + struct profile_vars *pv; + static struct callback dummy_callback; + + *p_ptr = (void *)0; + + /* + * If the number passed is not within bounds, just copy the data directly. + */ + + if (!LEGAL_KGMON(indx)) { + *p_ptr = (void *)indx; + if (!write) { + if (PROFILE_VARS(0)->debug) { + printf("_profile_kgmon: copy %5ld bytes, from 0x%lx\n", + (long)count, + (long)indx); + } + + } else { + if (PROFILE_VARS(0)->debug) { + printf("_profile_kgmon: copy %5ld bytes, to 0x%lx\n", + (long)count, + (long)indx); + } + } + + return count; + } + + /* + * Decode the record number into the component pieces. + */ + + DECODE_KGMON(indx, kgmon, cpu); + + if (PROFILE_VARS(0)->debug) { + printf("_profile_kgmon: start: kgmon control = %2d, cpu = %d, count = %ld\n", + kgmon, cpu, (long)count); + } + + /* Validate the CPU number */ + if (cpu < 0 || cpu >= max_cpus) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON, bad cpu %d\n", cpu); + } + + return -1; + + } else { + pv = PROFILE_VARS(cpu); + + if (!write) { + switch (kgmon) { + default: + if (PROFILE_VARS(0)->debug) { + printf("Unknown KGMON read command\n"); + } + + error = -1; + break; + + case KGMON_GET_STATUS: /* return whether or not profiling is active */ + if (cpu != 0) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_GET_STATUS: cpu = %d\n", cpu); + } + + error = -1; + break; + } + + if (count != sizeof(pv->active)) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_GET_STATUS: count = %ld, should be %ld\n", + (long)count, + (long)sizeof(pv->active)); + } + + error = -1; + break; + } + + *p_ptr = (void *)&pv->active; + break; + + case KGMON_GET_DEBUG: /* return whether or not debugging is active */ + if (cpu != 0) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_GET_DEBUG: cpu = %d\n", cpu); + } + + error = -1; + break; + } + + if (count != sizeof(pv->debug)) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_GET_DEBUG: count = %ld, should be %ld\n", + (long)count, + (long)sizeof(pv->active)); + } + + error = -1; + break; + } + + *p_ptr = (void *)&pv->debug; + break; + + case KGMON_GET_PROFILE_VARS: /* return the _profile_vars structure */ + if (count != sizeof(struct profile_vars)) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_GET_PROFILE_VARS: count = %ld, should be %ld\n", + (long)count, + (long)sizeof(struct profile_vars)); + } + + error = -1; + break; + } + + _profile_update_stats(pv); + *p_ptr = (void *)pv; + break; + + case KGMON_GET_PROFILE_STATS: /* return the _profile_stats structure */ + if (count != sizeof(struct profile_stats)) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_GET_PROFILE_STATS: count = %ld, should be = %ld\n", + (long)count, + (long)sizeof(struct profile_stats)); + } + + error = -1; + break; + } + + _profile_update_stats(pv); + *p_ptr = (void *)&pv->stats; + break; + } + + } else { + switch (kgmon) { + default: + if (PROFILE_VARS(0)->debug) { + printf("Unknown KGMON write command\n"); + } + + error = -1; + break; + + case KGMON_SET_PROFILE_ON: /* turn on profiling */ + if (cpu != 0) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_SET_PROFILE_ON, cpu = %d\n", cpu); + } + + error = -1; + break; + } + + if (!PROFILE_VARS(0)->active) { + for (i = 0; i < max_cpus; i++) { + PROFILE_VARS(i)->active = 1; + } + + if (control_func) { + (*control_func)(kgmon); + } + + _profile_md_start(); + } + + count = 0; + break; + + case KGMON_SET_PROFILE_OFF: /* turn off profiling */ + if (cpu != 0) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_SET_PROFILE_OFF, cpu = %d\n", cpu); + } + + error = -1; + break; + } + + if (PROFILE_VARS(0)->active) { + for (i = 0; i < max_cpus; i++) { + PROFILE_VARS(i)->active = 0; + } + + _profile_md_stop(); + + if (control_func) { + (*control_func)(kgmon); + } + } + + count = 0; + break; + + case KGMON_SET_PROFILE_RESET: /* reset profiling */ + if (cpu != 0) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_SET_PROFILE_RESET, cpu = %d\n", cpu); + } + + error = -1; + break; + } + + for (i = 0; i < max_cpus; i++) { + _profile_reset(PROFILE_VARS(i)); + } + + if (control_func) { + (*control_func)(kgmon); + } + + count = 0; + break; + + case KGMON_SET_DEBUG_ON: /* turn on profiling */ + if (cpu != 0) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_SET_DEBUG_ON, cpu = %d\n", cpu); + } + + error = -1; + break; + } + + if (!PROFILE_VARS(0)->debug) { + for (i = 0; i < max_cpus; i++) { + PROFILE_VARS(i)->debug = 1; + } + + if (control_func) { + (*control_func)(kgmon); + } + } + + count = 0; + break; + + case KGMON_SET_DEBUG_OFF: /* turn off profiling */ + if (cpu != 0) { + if (PROFILE_VARS(0)->debug) { + printf("KGMON_SET_DEBUG_OFF, cpu = %d\n", cpu); + } + + error = -1; + break; + } + + if (PROFILE_VARS(0)->debug) { + for (i = 0; i < max_cpus; i++) { + PROFILE_VARS(i)->debug = 0; + } + + if (control_func) { + (*control_func)(kgmon); + } + } + + count = 0; + break; + } + } + } + + if (error) { + if (PROFILE_VARS(0)->debug) { + printf("_profile_kgmon: done: kgmon control = %2d, cpu = %d, error = %d\n", + kgmon, cpu, error); + } + + return -1; + } + + if (PROFILE_VARS(0)->debug) { + printf("_profile_kgmon: done: kgmon control = %2d, cpu = %d, count = %ld\n", + kgmon, cpu, (long)count); + } + + return count; +} diff --git a/osfmk/profiling/profile-mk.c b/osfmk/profiling/profile-mk.c new file mode 100644 index 000000000..84174f599 --- /dev/null +++ b/osfmk/profiling/profile-mk.c @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Microkernel interface to common profiling. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +extern char etext[], pstart[]; + +#if NCPUS > 1 +struct profile_vars *_profile_vars_cpus[NCPUS] = { &_profile_vars }; +struct profile_vars _profile_vars_aux[NCPUS-1]; +#endif + +void * +_profile_alloc_pages (size_t size) +{ + vm_offset_t addr; + + /* + * For the MK, we can't support allocating pages at runtime, because we + * might be at interrupt level, so abort if we didn't size the table + * properly. + */ + + if (PROFILE_VARS(0)->active) { + panic("Call to _profile_alloc_pages while profiling is running."); + } + + if (kmem_alloc(kernel_map, &addr, size)) { + panic("Could not allocate memory for profiling"); + } + + memset((void *)addr, '\0', size); + if (PROFILE_VARS(0)->debug) { + printf("Allocated %d bytes for profiling, address 0x%x\n", (int)size, (int)addr); + } + + return((caddr_t)addr); +} + +void +_profile_free_pages(void *addr, size_t size) +{ + if (PROFILE_VARS(0)->debug) { + printf("Freed %d bytes for profiling, address 0x%x\n", (int)size, (int)addr); + } + + kmem_free(kernel_map, (vm_offset_t)addr, size); + return; +} + +void _profile_error(struct profile_vars *pv) +{ + panic("Fatal error in profiling"); +} + +void +kmstartup(void) +{ + prof_uptrint_t textsize; + prof_uptrint_t monsize; + prof_uptrint_t lowpc; + prof_uptrint_t highpc; + int i; + struct profile_vars *pv; + + /* + * round lowpc and highpc to multiples of the density we're using + * so the rest of the scaling (here and in gprof) stays in ints. + */ + + lowpc = ROUNDDOWN((prof_uptrint_t)&pstart[0], HISTFRACTION*sizeof(LHISTCOUNTER)); + highpc = ROUNDUP((prof_uptrint_t)&etext[0], HISTFRACTION*sizeof(LHISTCOUNTER)); + textsize = highpc - lowpc; + monsize = (textsize / HISTFRACTION) * sizeof(LHISTCOUNTER); + + for (i = 0; i < NCPUS; i++) { + pv = PROFILE_VARS(i); + +#if NCPUS > 1 + if (!pv) { + _profile_vars_cpus[i] = pv = &_profile_vars_aux[i-i]; + } +#endif + +#ifdef DEBUG_PROFILE + pv->debug = 1; +#endif + pv->page_size = PAGE_SIZE; + _profile_md_init(pv, PROFILE_GPROF, PROFILE_ALLOC_MEM_YES); + + /* Profil related variables */ + pv->profil_buf = _profile_alloc (pv, monsize, ACONTEXT_PROFIL); + pv->profil_info.highpc = highpc; + pv->profil_info.lowpc = lowpc; + pv->profil_info.text_len = textsize; + pv->profil_info.profil_len = monsize; + pv->profil_info.counter_size = sizeof(LHISTCOUNTER); + pv->profil_info.scale = 0x10000 / HISTFRACTION; + pv->stats.profil_buckets = monsize / sizeof(LHISTCOUNTER); + + /* Other gprof variables */ + pv->stats.my_cpu = i; + pv->stats.max_cpu = NCPUS; + pv->init = 1; + pv->active = 1; + pv->use_dci = 0; + pv->use_profil = 1; + pv->check_funcs = 1; /* for now */ + + if (pv->debug) { + printf("Profiling kernel, s_textsize=%ld, monsize=%ld [0x%lx..0x%lx], cpu = %d\n", + (long)textsize, + (long)monsize, + (long)lowpc, + (long)highpc, + i); + } + } + + _profile_md_start(); +} + +/* driver component */ + +int +gprofprobe(caddr_t port, void *ctlr) +{ + return(1); +} + +void +gprofattach(void) +{ + kmstartup(); + return; +} + +/* struct bus_device *gprofinfo[NGPROF]; */ +struct bus_device *gprofinfo[1]; + +struct bus_driver gprof_driver = { + gprofprobe, 0, gprofattach, 0, 0, "gprof", gprofinfo, "gprofc", 0, 0}; + + +io_return_t +gprofopen(dev_t dev, + int flags, + io_req_t ior) +{ + ior->io_error = D_SUCCESS; + return(0); +} + +void +gprofclose(dev_t dev) +{ + return; +} + +void +gprofstrategy(io_req_t ior) +{ + void *sys_ptr = (void *)0; + + long count = _profile_kgmon(!(ior->io_op & IO_READ), + ior->io_count, + ior->io_recnum, + NCPUS, + &sys_ptr, + (void (*)(kgmon_control_t))0); + + if (count < 0) { + ior->io_error = D_INVALID_RECNUM; + + } else { + if (count > 0 && sys_ptr != (void *)0) { + if (ior->io_op & IO_READ) { + memcpy((void *)ior->io_data, sys_ptr, count); + } else { + memcpy(sys_ptr, (void *)ior->io_data, count); + } + } + + ior->io_error = D_SUCCESS; + ior->io_residual = ior->io_count - count; + } + + iodone(ior); +} + +io_return_t +gprofread(dev_t dev, + io_req_t ior) +{ + return(block_io(gprofstrategy, minphys, ior)); +} + +io_return_t +gprofwrite(dev_t dev, + io_req_t ior) +{ + return (block_io(gprofstrategy, minphys, ior)); +} diff --git a/osfmk/profiling/profile-mk.h b/osfmk/profiling/profile-mk.h new file mode 100644 index 000000000..89bf6681c --- /dev/null +++ b/osfmk/profiling/profile-mk.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Microkernel interface to common profiling. + */ + +#include +#include +#include +#include + +/* + * JMM - We don't use these, just the BSD interfaces. + */ +#if 0 +extern void kmstartup(void); +extern int gprofprobe(caddr_t, void *); +extern void gprofattach(void); +extern int gprofopen(dev_t, int, io_req_t); +extern void gprofclose(dev_t); +extern void gprofstrategy(io_req_t); +extern int gprofread(dev_t, io_req_t); +extern int gprofwrite(dev_t, io_req_t); +#endif + +/* + * Macros to access the nth cpu's profile variable structures. + */ + +#if NCPUS <= 1 +#define PROFILE_VARS(cpu) (&_profile_vars) + +#else +extern struct profile_vars *_profile_vars_cpus[NCPUS]; +#define PROFILE_VARS(cpu) (_profile_vars_cpus[(cpu)]) +#endif + + diff --git a/osfmk/sys/ioctl.h b/osfmk/sys/ioctl.h new file mode 100644 index 000000000..b305d064d --- /dev/null +++ b/osfmk/sys/ioctl.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.10.1 1996/11/29 16:59:52 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * Moved contents to mach/mach_ioctl.h. + * [96/09/18 barbou] + * + * Revision 1.2.6.1 1994/09/23 03:12:49 ezf + * change marker to not FREE + * [1994/09/22 21:58:45 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:55:17 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:30:51 jeffc] + * + * Revision 1.2 1993/04/19 17:16:43 devrcs + * Fixes for ANSI C + * [1993/02/26 14:02:24 sp] + * + * Revision 1.1 1992/09/30 02:36:52 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.5 91/10/09 16:18:46 af + * Revision 2.4.1.1 91/09/01 15:53:00 af + * Upgraded to BSD 4.4. + * [91/09/01 af] + * + * Revision 2.4.1.1 91/09/01 15:53:00 af + * Upgraded to BSD 4.4. + * [91/09/01 af] + * + * Revision 2.4 91/05/14 17:40:04 mrt + * Correcting copyright + * + * Revision 2.3 91/03/16 15:01:35 rpd + * Fixed the definitions for ANSI C. + * [91/02/20 rpd] + * + * Revision 2.2 91/02/14 15:04:02 mrt + * Changed to new Mach copyright + * + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ +/* + * Format definitions for 'ioctl' commands in device definitions. + * + * From BSD4.4. + */ + +#ifndef _SYS_IOCTL_H_ +#define _SYS_IOCTL_H_ + +#include + +#endif /* _SYS_IOCTL_H_ */ diff --git a/osfmk/sys/scsi.h b/osfmk/sys/scsi.h new file mode 100644 index 000000000..319b0e2e2 --- /dev/null +++ b/osfmk/sys/scsi.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1996/04/17 17:48:51 davidp + * Created for use with SVR4 drivers. + * [1996/04/11 13:18:06 davidp] + * + * Revision 1.1.1.2 1996/03/04 17:50:08 calvert + * Created for use with SVR4 drivers. + * + * $EndLog$ + */ diff --git a/osfmk/sys/sdi.h b/osfmk/sys/sdi.h new file mode 100644 index 000000000..13d487ea6 --- /dev/null +++ b/osfmk/sys/sdi.h @@ -0,0 +1,499 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.7.1 1996/09/17 16:34:56 bruel + * removed size_t, already defined in types.h. + * [96/09/17 bruel] + * + * Revision 1.1.4.1 1996/04/17 17:48:54 davidp + * Created for use with SVR4 drivers. + * [1996/04/11 13:19:26 davidp] + * + * Revision 1.1.1.2 1996/03/04 17:53:46 calvert + * Created for use with SVR4 drivers. + * + * $EndLog$ + */ +/* In vi use ":set ts=4" to edit/view this file + */ +#ifndef _SYS_SDI_H_ +#define _SYS_SDI_H_ 1 + +#include "scsi/scsi_endian.h" + +typedef u_long paddr_t; +typedef u_int rm_key_t; + +typedef long hba_clock_t; + +typedef u_long major_t; +typedef u_long minor_t; + +typedef u_long hba_buf_t; /* just to satisfy declaration */ +typedef u_long hba_uio_t; /* just to satisfy declaration */ + + +struct ver_no { + uchar_t sv_release; + uchar_t sv_machine; + short sv_modes; +}; + +struct hba_idata_v4 { + int version_num; + char *name; + uchar_t ha_id; + ulong_t ioaddr1; + int dmachan1; + int iov; + int cntlr; + int active; + ulong_t idata_memaddr; + uchar_t idata_ctlorder; + uchar_t idata_nbus; + ushort_t idata_ntargets; + ushort_t idata_nluns; + rm_key_t idata_rmkey; + void *idata_intrcookie; + int idata_cpubind; +}; + +struct hba_idata { + int version_num; + char *name; + uchar_t ha_id; + ulong_t ioaddr1; + int dmachan1; + int iov; + int cntlr; + int active; +}; + +#define HBA_SVR4_2 1 +#define HBA_SVR4_2_2 2 +#define HBA_SVR4_2MP 3 + +#define HBA_VMASK 0xffff + +#define HBA_IDATA_EXT 0x10000 +#define HBA_EXT_INFO 0x20000 +#define HBA_AUTOCONF 0x40000 + +#define VID_LEN 8 +#define PID_LEN 16 +#define REV_LEN 4 + +#define INQ_LEN VID_LEN+PID_LEN+1 +#define INQ_EXLEN INQ_LEN+REV_LEN + +struct ident { + BITFIELD_2( unsigned char, + id_type : 5, + id_pqual : 3); + BITFIELD_2(unsigned char, + id_qualif : 7, + id_rmb : 1); + uchar_t id_ver; + BITFIELD_2(unsigned char, + id_form : 4, + id_res1 : 4); + uchar_t id_len; + uchar_t id_vu [3]; + char id_vendor [VID_LEN]; + char id_prod [PID_LEN]; + char id_revnum [REV_LEN]; +}; + +#define SCSI_INQ_CON 0x0 +#define SCSI_INQ_TC 0x1 +#define SCSI_INQ_TNC 0x3 + +struct scsi_adr { + int scsi_ctl; + int scsi_target; + int scsi_lun; + int scsi_bus; +}; + +struct scsi_ad { + ulong_t sa_major; + ulong_t sa_minor; + uchar_t sa_lun; + BITFIELD_2(unsigned char, + sa_bus : 3, + sa_exta : 5); + short sa_ct; +}; + +/* sa_ct */ +#define SDI_SA_CT(c,t) (((c) << 3) | ((t) & 0x07)) +#define SDI_HAN(sa) (((sa)->sa_ct >> 3) & 0x07) +#define SDI_TCN(sa) ((sa)->sa_ct & 0x07) + +#define SDI_ETCN(sa) ((sa)->sa_exta) +#define SDI_EHAN(sa) (((sa)->sa_ct >> 3) & 0x1f) + +struct sdi_edt { + struct sdi_edt *hash_p; + short hba_no; + uchar_t scsi_id; + uchar_t lun; + struct owner *curdrv; + struct owner *owner_list; + ulong_t res1; + int pdtype; + uchar_t iotype; + char inquiry [INQ_EXLEN]; + struct scsi_adr scsi_adr; + ulong_t memaddr; + uchar_t ctlorder; + struct ident edt_ident; +}; + +/* iotype */ +#define F_DMA 0x001 +#define F_DMA_24 F_DMA +#define F_PIO 0x002 +#define F_SCGTH 0x004 +#define F_RMB 0x008 +#define F_DMA_32 0x010 +#define F_HDWREA 0x020 +#define F_RESID 0x040 + +struct mod_operations { + int (*modm_install)(void); + int (*modm_remove)(void); + int (*modm_info)(void); + int (*modm_bind)(void); +}; + +struct modlink { + struct mod_operations *ml_ops; + void *ml_type_data; +}; + +struct mod_type_data { + char *mtd_info; + void *mtd_pdata; +}; + +struct modwrapper { + int mw_rev; + int (*mw_load)(void); + int (*mw_unload)(void); + void (*mw_halt)(void); + void *mw_conf_data; + struct modlink *mw_modlink; +}; + +struct hbadata { + struct xsb *sb; +}; + +typedef struct physreq { + paddr_t phys_align; + paddr_t phys_boundary; + uchar_t phys_dmasize; + uchar_t phys_max_scgth; + uchar_t phys_flags; + void *phys_brkup_poolp; +} physreq_t; + + +typedef struct bcb { + uchar_t bcb_addrtypes; + uchar_t bcb_flags; + size_t bcb_max_xfer; + size_t bcb_granularity; + physreq_t *bcb_physreqp; +} bcb_t; + +struct hbagetinfo { + char *name; + char iotype; + bcb_t *bcbp; +}; + +struct hba_info { + int *hba_flag; + ulong_t max_xfer; + long (*hba_freeblk)(struct hbadata *hdp, int cntlr); + struct hbadata *(*hba_getblk)(int flag, int cntlr); + long (*hba_icmd)(struct hbadata *hdp, int flag); + void (*hba_getinfo)(struct scsi_ad *sap, + struct hbagetinfo *hgip); + long (*hba_send)(struct hbadata *hdp, int flag); + int (*hba_xlat)(struct hbadata *hdp, int bflag, void *procp, + int flag); + int (*hba_open)(void); + int (*hba_close)(void); + int (*hba_ioctl)(void); +}; + +/* hba_flag */ +#define HBA_MP 0x01 +#define HBA_HOT 0x02 +#define HBA_TIMEOUT 0x04 + +#define SC_EXHAN(minor) (((minor) >> 5) & 0x1f) +#define SC_EXTCN(minor) ((((minor) >> 2) & 0x07) | ((minor >> 7) & 0x18)) +#define SC_EXLUN(minor) (((minor) & 0x03) | ((minor>>10) & 0x1C)) +#define SC_BUS(minor) (((minor) >> 15) & 0x07) + +#define SC_MKMINOR(h,t,l,b) ( \ + (((h) & 0x1f) << 5) | \ + (((t) & 0x07) << 2) | (((t) & 0x18) << 7) | \ + ((l) & 0x03) | (((l) & 0x1c) << 10) | \ + (((b) & 0x07) << 15) \ + ) + +#define SDI_NAMESZ 49 + +#define SM_POOLSIZE 28 +#define LG_POOLSIZE (sizeof (struct xsb)) + +#define SCB_TYPE 1 +#define ISCB_TYPE 2 +#define SFB_TYPE 3 + +#define SCB_WRITE 0x00 +#define SCB_READ 0x01 +#define SCB_LINK 0x02 +#define SCB_HAAD 0x04 +#define SCB_PARTBLK 0x08 + +#define SDI_NOALLOC 0x00000000 +#define SDI_ASW 0x00000001 +#define SDI_LINKF0 0x00000002 +#define SDI_LINKF1 0x00000003 +#define SDI_QFLUSH 0xE0000004 +#define SDI_ABORT 0xF0000005 +#define SDI_RESET 0xF0000006 +#define SDI_CRESET 0xD0000007 +#define SDI_V2PERR 0xA0000008 +#define SDI_TIME 0xD0000009 +#define SDI_NOTEQ 0x8000000A +#define SDI_HAERR 0xE000000B +#define SDI_MEMERR 0xA000000C +#define SDI_SBUSER 0xA000000D +#define SDI_CKSTAT 0xD000000E +#define SDI_SCBERR 0x8000000F +#define SDI_OOS 0xA0000010 +#define SDI_NOSELE 0x90000011 +#define SDI_MISMAT 0x90000012 +#define SDI_PROGRES 0x00000013 +#define SDI_UNUSED 0x00000014 +#define SDI_ONEIC 0x80000017 +#define SDI_SFBERR 0x80000019 +#define SDI_TCERR 0x9000001A + +#define SDI_ERROR 0x80000000 +#define SDI_RETRY 0x40000000 +#define SDI_MESS 0x20000000 +#define SDI_SUSPEND 0x10000000 + +#define SFB_NOPF 0x00 +#define SFB_RESETM 0x01 +#define SFB_ABORTM 0x02 +#define SFB_FLUSHR 0x03 +#define SFB_RESUME 0x04 +#define SFB_SUSPEND 0x05 +#define SFB_ADD_DEV 0x06 +#define SFB_RM_DEV 0x07 +#define SFB_PAUSE 0x08 +#define SFB_CONTINUE 0x09 + +#define SDI_386_AT 0x06 +#define SDI_386_MCA 0x07 +#define SDI_386_EISA 0x08 + +#define SDI_RET_OK 0 +#define SDI_RET_ERR -1 +#define SDI_RET_RETRY 1 + +#define SDI_SEND 0x0081 +#define SDI_TRESET 0x0082 +#define SDI_BRESET 0x0084 +#define HA_VER 0x0083 +#define SDI_RESERVE 0x0085 +#define SDI_RELEASE 0x0086 +#define SDI_RESTAT 0x0087 +#define HA_GETPARMS 0x008a +#define IHA_GETPARMS 0x008b +#define HA_SETPARMS 0x008c +#define IHA_SETPARMS 0x008d +#define HA_GETPPARMS 0x008e + +struct sense { + uchar_t sd_pad0; + BITFIELD_2(unsigned char, + sd_errc : 7, + sd_valid : 1); + uchar_t sd_res1; + BITFIELD_5(unsigned char, + sd_key : 4, + sd_res2 : 1, + sd_ili : 1, + sd_eom : 1, + sd_fm : 1); + uint_t sd_ba; + uchar_t sd_len; + uchar_t sd_res3 [4]; + uchar_t sd_sencode; + uchar_t sd_qualifier; + uchar_t sd_fru; + BITFIELD_5(unsigned char, + sd_bitpt : 3, + sd_bpv : 1, + sd_res4 : 2, + sd_cd : 1, + sd_res5 : 1); + uchar_t sd_field [2]; + uchar_t sd_res6; + uchar_t sd_buffer; + uchar_t sd_res7 [2]; +}; + + +struct sb_extra { + struct sense sb_sense; +}; + +#define sc_priv sc_extra + +struct sb; + +struct scb { + ulong_t sc_comp_code; + void *sc_extra; + void (*sc_int)(struct sb *sbp); + caddr_t sc_cmdpt; + caddr_t sc_datapt; + long sc_wd; + time_t sc_time; + struct scsi_ad sc_dev; + ushort_t sc_mode; + uchar_t sc_status; + char sc_fill; + struct sb *sc_link; + long sc_cmdsz; + long sc_datasz; + long sc_resid; + hba_clock_t sc_start; +}; + +struct sfb { + ulong_t sf_comp_code; + char *sf_priv; + void (*sf_int)(struct sb *sbp); + struct scsi_ad sf_dev; + ulong_t sf_func; + int sf_wd; +}; + +struct sb { + ulong_t sb_type; + union { + struct scb b_scb; + struct sfb b_sfb; + } sb_b; +}; + +#define SCB sb_b.b_scb +#define SFB sb_b.b_sfb + +struct xsb { + struct sb sb; + struct hbadata *hbadata_p; + struct owner *owner_p; + struct sb_extra extra; +}; + +#define S_GOOD 0X00 +#define S_CKCON 0X02 +#define S_METGD 0X04 +#define S_BUSY 0X08 +#define S_INGD 0X10 +#define S_INMET 0X12 +#define S_RESER 0X18 +#define S_CTERM 0x22 +#define S_QFULL 0x28 + +#define SLEEP 0 +#define NOSLEEP 1 + +#define KM_SLEEP SLEEP +#define KM_NOSLEEP NOSLEEP +#define KM_DMA 2 +#define KM_REQ_DMA 4 +#define KM_PHYSCONTIG 8 + +struct mod_drvintr { + ushort_t di_magic; + ushort_t di_version; + char *di_modname; + int *di_devflagp; + void (*di_handler)(int vect); + void *di_hook; +}; + +#define MOD_INTR_MAGIC 0xEB13 +#define MOD_INTR_VER 1 + +struct o_mod_drvintr { + struct intr_info *drv_intrinfo; + void (*ihndler)(int vect); +}; + +#define MOD_INTRVER_MASK 0xff000000 +#define MOD_INTRVER_42 0x01000000 + +#define INTRVER(infop) ((unsigned int)((infop)->ivect_no & MOD_INTRVER_MASK)) +#define INTRNO(infop) ((infop)->ivect_no & ~MOD_INTRVER_MASK) + +struct intr_info0 { + int ivect_no; + int int_pri; + int itype; +}; + +struct intr_info { + int ivect_no; + int int_pri; + int itype; + int int_cpu; + int int_mp; +}; + +#endif /* _SYS_SDI_H_ */ diff --git a/osfmk/sys/sdi_edt.h b/osfmk/sys/sdi_edt.h new file mode 100644 index 000000000..ac6576c05 --- /dev/null +++ b/osfmk/sys/sdi_edt.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.4.1 1996/04/17 17:48:58 davidp + * Created for use with SVR4 drivers. + * [1996/04/11 13:20:36 davidp] + * + * Revision 1.1.1.2 1996/03/04 17:54:47 calvert + * Created for use with SVR4 drivers. + * + * $EndLog$ + */ diff --git a/osfmk/sys/syslog.h b/osfmk/sys/syslog.h new file mode 100644 index 000000000..aed42097f --- /dev/null +++ b/osfmk/sys/syslog.h @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 03:13:08 ezf + * change marker to not FREE + * [1994/09/22 21:58:52 ezf] + * + * Revision 1.2.2.3 1993/08/03 18:30:38 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 19:02:56 gm] + * + * Revision 1.2.2.2 1993/06/09 02:55:27 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:30:57 jeffc] + * + * Revision 1.2 1993/04/19 17:16:58 devrcs + * Fixes for ANSI C + * [1993/02/26 14:02:39 sp] + * + * Revision 1.1 1992/09/30 02:36:56 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 17:40:18 mrt + * Correcting copyright + * + * Revision 2.3 91/05/13 06:07:15 af + * Removed CMU conditionals. + * [91/05/12 16:31:12 af] + * + * Revision 2.2 91/02/05 17:56:53 mrt + * Changed to new Mach copyright + * [91/02/01 17:49:22 mrt] + * + * Revision 2.1 89/08/03 16:10:10 rwd + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ + +/* + * Copyright (c) 1982, 1986, 1988 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms are permitted + * provided that the above copyright notice and this paragraph are + * duplicated in all such forms and that any documentation, + * advertising materials, and other materials related to such + * distribution and use acknowledge that the software was developed + * by the University of California, Berkeley. The name of the + * University may not be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * @(#)syslog.h 7.10 (Berkeley) 6/27/88 + */ + +/* + * Facility codes + */ + +#ifndef _SYS_SYSLOG_H_ +#define _SYS_SYSLOG_H_ + +#define LOG_KERN (0<<3) /* kernel messages */ +#define LOG_USER (1<<3) /* random user-level messages */ +#define LOG_MAIL (2<<3) /* mail system */ +#define LOG_DAEMON (3<<3) /* system daemons */ +#define LOG_AUTH (4<<3) /* security/authorization messages */ +#define LOG_SYSLOG (5<<3) /* messages generated internally by syslogd */ +#define LOG_LPR (6<<3) /* line printer subsystem */ +#define LOG_NEWS (7<<3) /* network news subsystem */ +#define LOG_UUCP (8<<3) /* UUCP subsystem */ + /* other codes through 15 reserved for system use */ +#define LOG_LOCAL0 (16<<3) /* reserved for local use */ +#define LOG_LOCAL1 (17<<3) /* reserved for local use */ +#define LOG_LOCAL2 (18<<3) /* reserved for local use */ +#define LOG_LOCAL3 (19<<3) /* reserved for local use */ +#define LOG_LOCAL4 (20<<3) /* reserved for local use */ +#define LOG_LOCAL5 (21<<3) /* reserved for local use */ +#define LOG_LOCAL6 (22<<3) /* reserved for local use */ +#define LOG_LOCAL7 (23<<3) /* reserved for local use */ + +#define LOG_NFACILITIES 24 /* maximum number of facilities */ +#define LOG_FACMASK 0x03f8 /* mask to extract facility part */ + +#define LOG_FAC(p) (((p) & LOG_FACMASK) >> 3) /* facility of pri */ + +/* + * Priorities (these are ordered) + */ + +#define LOG_EMERG 0 /* system is unusable */ +#define LOG_ALERT 1 /* action must be taken immediately */ +#define LOG_CRIT 2 /* critical conditions */ +#define LOG_ERR 3 /* error conditions */ +#define LOG_WARNING 4 /* warning conditions */ +#define LOG_NOTICE 5 /* normal but signification condition */ +#define LOG_INFO 6 /* informational */ +#define LOG_DEBUG 7 /* debug-level messages */ + +#define LOG_PRIMASK 0x0007 /* mask to extract priority part (internal) */ +#define LOG_PRI(p) ((p) & LOG_PRIMASK) /* extract priority */ + +#define LOG_MAKEPRI(fac, pri) (((fac) << 3) | (pri)) + +#ifdef MACH_KERNEL +#define LOG_PRINTF -1 /* pseudo-priority to indicate use of printf */ +#endif + +/* + * arguments to setlogmask. + */ +#define LOG_MASK(pri) (1 << (pri)) /* mask for one priority */ +#define LOG_UPTO(pri) ((1 << ((pri)+1)) - 1) /* all priorities through pri */ + +/* + * Option flags for openlog. + * + * LOG_ODELAY no longer does anything; LOG_NDELAY is the + * inverse of what it used to be. + */ +#define LOG_PID 0x01 /* log the pid with each message */ +#define LOG_CONS 0x02 /* log on the console if errors in sending */ +#define LOG_ODELAY 0x04 /* delay open until first syslog() (default) */ +#define LOG_NDELAY 0x08 /* don't delay open */ +#define LOG_NOWAIT 0x10 /* if forking to log on console, don't wait() */ + +#if defined(__STDC__) +extern void openlog(const char *, int); +extern void syslog(int, const char *, ...); +extern void closelog(void); +extern void setlogmask(int); +#endif /* defined(__STDC__) */ +#endif /* _SYS_SYSLOG_H_ */ diff --git a/osfmk/sys/time.h b/osfmk/sys/time.h new file mode 100644 index 000000000..f2fa500d9 --- /dev/null +++ b/osfmk/sys/time.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 03:13:17 ezf + * change marker to not FREE + * [1994/09/22 21:58:56 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:55:33 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:31:02 jeffc] + * + * Revision 1.2 1993/04/19 17:17:07 devrcs + * Fixes for ANSI C + * [1993/02/26 14:02:46 sp] + * + * Revision 1.1 1992/09/30 02:36:58 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 17:40:25 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:56:58 mrt + * Changed to new Mach copyright + * [91/02/01 17:49:29 mrt] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ +/* + * Time-keeper for kernel IO devices. + * + * May or may not have any relation to wall-clock time. + */ + +#ifndef _SYS_TIME_H_ +#define _SYS_TIME_H_ +#include + +extern time_value_t time; + +/* + * Definitions to keep old code happy. + */ +#define timeval_t time_value_t +#define timeval time_value +#define tv_sec seconds +#define tv_usec microseconds + +#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) +#define timercmp(tvp, uvp, cmp) \ + ((tvp)->tv_sec cmp (uvp)->tv_sec || \ + (tvp)->tv_sec == (uvp)->tv_sec && (tvp)->tv_usec cmp (uvp)->tv_usec) +#define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 +#endif /* _SYS_TIME_H_ */ diff --git a/osfmk/sys/tm.h b/osfmk/sys/tm.h new file mode 100644 index 000000000..db34b6f84 --- /dev/null +++ b/osfmk/sys/tm.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.6.1 1994/09/23 03:13:27 ezf + * change marker to not FREE + * [1994/09/22 21:59:00 ezf] + * + * Revision 1.2.2.2 1993/06/09 02:55:37 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:31:05 jeffc] + * + * Revision 1.2 1993/04/19 17:17:19 devrcs + * Fixes for ANSI C + * [1993/02/26 14:02:53 sp] + * + * Revision 1.1 1992/09/30 02:37:00 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.3 91/05/14 17:40:33 mrt + * Correcting copyright + * + * Revision 2.2 91/02/05 17:57:03 mrt + * Changed to new Mach copyright + * [91/02/01 17:49:35 mrt] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ +/* + * Time, broken out. + */ +#ifndef _SYS_TM_H_ +#define _SYS_TM_H_ +struct tm { + int tm_sec; + int tm_min; + int tm_hour; + int tm_mday; + int tm_mon; + int tm_year; +}; +#endif /* _SYS_TM_H_ */ diff --git a/osfmk/sys/types.h b/osfmk/sys/types.h new file mode 100644 index 000000000..e1e72d576 --- /dev/null +++ b/osfmk/sys/types.h @@ -0,0 +1,165 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:48 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.10.3 1996/01/09 19:23:12 devrcs + * Change time_t typedef from "unsigned int" to "int" to + * match the server and what it has historically been. + * Added more shorthand definitions for unsigned typedefs. + * Made conditional on ASSEMBLER not being defined. + * [1995/12/01 20:39:08 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:10:35 jfraser] + * + * Revision 1.1.10.2 1995/01/06 19:57:26 devrcs + * mk6 CR668 - 1.3b26 merge + * add shorthand defs for unsigned typedefs + * OSF alpha pal merge + * paranoid bit masking, 64bit cleanup, add NBBY + * [1994/10/14 03:43:58 dwm] + * + * Revision 1.1.10.1 1994/09/23 03:13:36 ezf + * change marker to not FREE + * [1994/09/22 21:59:04 ezf] + * + * Revision 1.1.3.2 1993/06/03 00:18:19 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:31:08 jeffc] + * + * Revision 1.1 1992/09/30 02:37:03 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.6 91/05/14 17:40:39 mrt + * Correcting copyright + * + * Revision 2.5 91/02/05 17:57:07 mrt + * Changed to new Mach copyright + * [91/02/01 17:49:41 mrt] + * + * Revision 2.4 90/08/27 22:13:03 dbg + * Created. + * [90/07/16 dbg] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ +#ifndef _SYS_TYPES_H_ +#define _SYS_TYPES_H_ + +#ifndef ASSEMBLER + +/* + * Common type definitions that lots of old files seem to want. + */ + +typedef unsigned char u_char; /* unsigned char */ +typedef unsigned short u_short; /* unsigned short */ +typedef unsigned int u_int; /* unsigned int */ +typedef unsigned long u_long; /* unsigned long */ + +typedef struct _quad_ { + unsigned int val[2]; /* 2 32-bit values make... */ +} quad; /* an 8-byte item */ + +typedef char * caddr_t; /* address of a (signed) char */ + +typedef int time_t; /* a signed 32 */ +typedef unsigned int daddr_t; /* an unsigned 32 */ +typedef unsigned int off_t; /* another unsigned 32 */ + +typedef unsigned short dev_t; /* another unsigned short */ +#define NODEV ((dev_t)-1) /* and a null value for it */ + +#define major(i) (((i) >> 8) & 0xFF) +#define minor(i) ((i) & 0xFF) +#define makedev(i,j) ((((i) & 0xFF) << 8) | ((j) & 0xFF)) + +#define NBBY 8 + +#ifndef NULL +#define NULL ((void *) 0) /* the null pointer */ +#endif + +/* + * Shorthand type definitions for unsigned storage classes + */ +typedef unsigned char uchar_t; +typedef unsigned short ushort_t; +typedef unsigned int uint_t; +typedef unsigned long ulong_t; +typedef volatile unsigned char vuchar_t; +typedef volatile unsigned short vushort_t; +typedef volatile unsigned int vuint_t; +typedef volatile unsigned long vulong_t; + +/* + * Shorthand type definitions for unsigned storage classes + */ +typedef uchar_t uchar; +typedef ushort_t ushort; +typedef uint_t uint; +typedef ulong_t ulong; + +#endif /* !ASSEMBLER */ + +#endif /* _SYS_TYPES_H_ */ diff --git a/osfmk/sys/varargs.h b/osfmk/sys/varargs.h new file mode 100644 index 000000000..19c3b94b9 --- /dev/null +++ b/osfmk/sys/varargs.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.2.28.1 1996/11/29 16:59:53 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * Added powerpc special case + * [1996/11/29 16:34:18 stephen] + * + * Revision 1.2.15.2 1996/01/09 19:23:16 devrcs + * Added alpha varargs.h + * [1995/12/01 20:39:10 jfraser] + * + * Merged '64-bit safe' changes from DEC alpha port. + * [1995/11/21 18:10:39 jfraser] + * + * Revision 1.2.15.1 1994/09/23 03:13:46 ezf + * change marker to not FREE + * [1994/09/22 21:59:07 ezf] + * + * Revision 1.2.4.3 1993/08/03 18:30:40 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 19:03:10 gm] + * + * Revision 1.2.4.2 1993/06/09 02:55:42 gm + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:31:11 jeffc] + * + * Revision 1.2 1993/04/19 17:17:26 devrcs + * correct endif tags for ansi + * [1993/02/25 17:56:02 david] + * + * Revision 1.1 1992/09/30 02:37:05 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.10 91/12/10 16:32:53 jsb + * Fixes from Intel + * [91/12/10 15:52:01 jsb] + * + * Revision 2.9 91/09/12 16:54:22 debo + * Added mac2. + * [91/09/11 17:22:52 debo] + * + * Revision 2.8 91/07/09 23:23:50 danner + * Added luna88k support. + * [91/06/24 danner] + * + * Revision 2.7 91/06/18 20:53:02 jsb + * Moved i860 varargs code here from i860/i860_varargs.h, thanks to + * new copyright from Intel. + * [91/06/18 19:15:02 jsb] + * + * Revision 2.6 91/05/14 17:40:46 mrt + * Correcting copyright + * + * Revision 2.5 91/02/05 17:57:12 mrt + * Changed to new Mach copyright + * [91/02/01 17:49:51 mrt] + * + * Revision 2.4 90/11/25 17:48:50 jsb + * Added i860 support. + * [90/11/25 16:54:09 jsb] + * + * Revision 2.3 90/05/03 15:51:29 dbg + * Added i386. + * [90/02/08 dbg] + * + * Revision 2.2 89/11/29 14:16:44 af + * RCS-ed, added mips case. Mips also needs it in Mach standalone + * programs. + * [89/10/28 10:39:14 af] + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ + +#ifndef _SYS_VARARGS_H_ +#define _SYS_VARARGS_H_ + +#if defined(vax) || defined(sun3) || defined(mips) || defined(i386) || defined(mac2) +#define va_dcl int va_alist; +typedef char * va_list; + +#define va_start(pvar) (pvar) = (va_list)&va_alist +#define va_end(pvar) +#ifdef mips +# define va_arg(pvar, type) ((type *)(pvar = \ + (va_list) (sizeof(type) > 4 ? ((int)pvar + 2*8 - 1) & -8 \ + : ((int)pvar + 2*4 - 1) & -4)))[-1] +#else /* mips */ +#define va_arg(pvar,type) ( \ + (pvar) += ((sizeof(type)+3) & ~0x3), \ + *((type *)((pvar) - ((sizeof(type)+3) & ~0x3))) ) +#endif /* mips */ +#endif /* vax */ + +/* + * Try to make varargs work for the Multimax so that _doprnt can be + * declared as + * _doprnt(file, fmt, list) + * FILE *file; + * char *fmt; + * va_list *list; + * and use + * + * n = va_arg(*list, type) + * + * without needing to drag in extra declarations + * + * and printf becomes + * + * printf(fmt, va_alist) + * char *fmt; + * va_dcl + * { + * va_list listp; + * va_start(listp); + * _doprnt((FILE *)0, fmt, &listp); + * va_end(listp); + * } + */ + +#if defined(multimax) && defined(MACH_KERNEL) + +/* + * the vararglist pointer is an elaborate structure (ecch) + */ +typedef struct va_list { + char *va_item; /* current item */ + int *va_ptr1, /* arglist pointers for 1, 2, n */ + *va_ptr2, + *va_ptrn; + int va_ct; /* current argument number */ +} va_list; + +#define va_alist va_arg1, va_arg2, va_argn +#define va_dcl int va_arg1, va_arg2, va_argn; + +#define va_start(pvar) ( \ + (pvar).va_ptr1 = &va_arg1, \ + (pvar).va_ptr2 = &va_arg2, \ + (pvar).va_ptrn = &va_argn, \ + (pvar).va_ct = 0 ) + +#define va_end(pvar) + +#define va_arg(pvar, type) ( \ + (pvar).va_ct++, \ + (pvar).va_item = (char *) \ + ( ((pvar).va_ct == 1) \ + ? (pvar).va_ptr1 \ + : ((pvar).va_ct == 2) \ + ? (pvar).va_ptr2 \ + : (pvar).va_ptrn++ ) , \ + *((type *)((pvar).va_item)) ) + +/* what a mess! */ +#endif /* defined(multimax) && defined(MACH_KERNEL) */ + +#if i860 +#include /* PGI vs. Greenhills */ +#endif + +#ifdef luna88k +#include /* How nice */ +#endif + +#if defined (__PPC__) && defined (_CALL_SYSV) +#include /* care of gcc compiler - TEMPORARY 2.7.1 TODO NMGS*/ +#endif + +#if defined(__alpha) +# include +#endif /* defined(__alpha) */ + +#endif /* _SYS_VARARGS_H_ */ diff --git a/osfmk/sys/version.h b/osfmk/sys/version.h new file mode 100644 index 000000000..fa9a638ba --- /dev/null +++ b/osfmk/sys/version.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:59 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.6.1 1994/09/23 03:13:55 ezf + * change marker to not FREE + * [1994/09/22 21:59:11 ezf] + * + * Revision 1.1.2.2 1993/06/03 00:18:34 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:31:15 jeffc] + * + * Revision 1.1 1992/09/30 02:37:07 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.4 91/05/14 17:40:52 mrt + * Correcting copyright + * + * Revision 2.3 91/02/05 17:57:18 mrt + * Changed to new Mach copyright + * [91/02/01 17:49:58 mrt] + * + * Revision 2.2 90/01/19 14:35:31 rwd + * Set version to 3.0 and set include version to 0 + * [89/12/10 rwd] + * + * Revision 2.1 89/08/03 16:10:14 rwd + * Created. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + */ + +/* + * Each kernel has a major and minor version number. Changes in + * the major number in general indicate a change in exported features. + * Changes in minor number usually correspond to internal-only + * changes that the user need not be aware of (in general). These + * values are stored at boot time in the machine_info strucuture and + * can be obtained by user programs with the host_info kernel call. + * This mechanism is intended to be the formal way for Mach programs + * to provide for backward compatibility in future releases. + * + * [ This needs to be reconciled somehow with the major/minor version + * number stuffed into the version string - mja, 5/8/87 ] + * + * Following is an informal history of the numbers: + * + * 25-March-87 Avadis Tevanian, Jr. + * Created version numbering scheme. Started with major 1, + * minor 0. + */ + +#define KERNEL_MAJOR_VERSION 3 +#define KERNEL_MINOR_VERSION 0 + +/* + * Version number of the kernel include files. + * + * This number must be changed whenever an incompatible change is made to one + * or more of our include files which are used by application programs that + * delve into kernel memory. The number should normally be simply incremented + * but may actually be changed in any manner so long as it differs from the + * numbers previously assigned to any other versions with which the current + * version is incompatible. It is used at boot time to determine which + * versions of the system programs to install. + * + * Note that the symbol _INCLUDE_VERSION must be set to this in the symbol + * table. On the VAX for example, this is done in locore.s. + */ + +/* + * Current allocation strategy: bump either branch by 2, until non-MACH is + * excised from the CSD environment. + */ +#define INCLUDE_VERSION 0 diff --git a/osfmk/vm/Makefile b/osfmk/vm/Makefile new file mode 100644 index 000000000..2339c55c6 --- /dev/null +++ b/osfmk/vm/Makefile @@ -0,0 +1,28 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = + +EXPORT_ONLY_FILES = \ + vm_map.h \ + vm_kern.h \ + pmap.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = vm + +EXPORT_MI_LIST = ${DATAFILES} ${EXPORT_ONLY_FILES} + +EXPORT_MI_DIR = vm + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/osfmk/vm/bsd_vm.c b/osfmk/vm/bsd_vm.c new file mode 100644 index 000000000..6c4a68dc7 --- /dev/null +++ b/osfmk/vm/bsd_vm.c @@ -0,0 +1,791 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* BSD VM COMPONENT INTERFACES */ +int +get_map_nentries( + vm_map_t); + +vm_offset_t +get_map_start( + vm_map_t); + +vm_offset_t +get_map_end( + vm_map_t); + +/* + * + */ +int +get_map_nentries( + vm_map_t map) +{ + return(map->hdr.nentries); +} + +/* + * + */ +vm_offset_t +get_map_start( + vm_map_t map) +{ + return(vm_map_first_entry(map)->vme_start); +} + +/* + * + */ +vm_offset_t +get_map_end( + vm_map_t map) +{ + return(vm_map_last_entry(map)->vme_end); +} + +/* + * BSD VNODE PAGER + */ + +/* until component support available */ +int vnode_pager_workaround; + +typedef int vnode_port_t; + +typedef struct vnode_pager { + ipc_port_t pager; /* pager */ + ipc_port_t pager_handle; /* pager handle */ + ipc_port_t vm_obj_handle; /* memory object's control handle */ + vnode_port_t vnode_handle; /* vnode handle */ +} *vnode_pager_t; + +typedef struct vnode_port_entry { + queue_chain_t links; /* queue links */ + ipc_port_t name; /* port name */ + vnode_pager_t pager_rec; /* pager record */ +} *vnode_port_entry_t; + + +#define VNODE_PORT_HASH_COUNT 127 +#define vnode_port_hash(name_port) \ + (((int)(name_port) & 0xffffff) % VNODE_PORT_HASH_COUNT) + +queue_head_t vnode_port_hashtable[VNODE_PORT_HASH_COUNT]; +zone_t vnode_port_hash_zone; +decl_simple_lock_data(,vnode_port_hash_lock) + + +ipc_port_t +trigger_name_to_port( + mach_port_t); + +void +vnode_pager_bootstrap( + void); + +void +vnode_pager_alloc_map( + void); + +ipc_port_t +vnode_pager_setup( + vnode_port_t, + ipc_port_t); + +ipc_port_t +vnode_pager_lookup( + vnode_port_t, + ipc_port_t); + +kern_return_t +vnode_pager_init( + ipc_port_t, + ipc_port_t, + vm_size_t); + +kern_return_t +vnode_pager_data_request( + ipc_port_t, + ipc_port_t, + vm_object_offset_t, + vm_size_t, + vm_prot_t); + +kern_return_t +vnode_pager_data_return( + ipc_port_t, + ipc_port_t, + vm_object_offset_t, + pointer_t, + vm_size_t, + boolean_t, + boolean_t); + +void +vnode_pager_no_senders( + ipc_port_t, + mach_port_mscount_t); + +kern_return_t +vnode_pager_terminate( + ipc_port_t, + ipc_port_t); + +kern_return_t +vnode_pager_cluster_read( + vnode_pager_t, + vm_object_offset_t, + vm_size_t); + +void +vnode_pager_cluster_write( + vnode_pager_t, + vm_object_offset_t, + vm_size_t); + +kern_return_t +memory_object_change_attributes( + vm_object_t, + memory_object_flavor_t, + memory_object_info_t, + mach_msg_type_number_t, + ipc_port_t, + mach_msg_type_name_t); + +int +vnode_pagein( + vnode_port_t, + upl_t, + vm_offset_t, + vm_object_offset_t, + int, + int, + int *); +int +vnode_pageout( + vnode_port_t, + upl_t, + vm_offset_t, + vm_object_offset_t, + int, + int, + int *); + +vnode_pager_t +vnode_object_create( + vnode_port_t vp); + +void +vnode_port_hash_init(void); + +void +vnode_port_hash_insert( + ipc_port_t, + vnode_pager_t); + +vnode_pager_t +vnode_port_hash_lookup( + ipc_port_t); + +void +vnode_port_hash_delete( + ipc_port_t); + +void +vnode_pager_release_from_cache( + int *cnt); + +zone_t vnode_pager_zone; + + +#define VNODE_PAGER_NULL ((vnode_pager_t) 0) + +/* TODO: Should be set dynamically by vnode_pager_init() */ +#define CLUSTER_SHIFT 1 + +/* TODO: Should be set dynamically by vnode_pager_bootstrap() */ +#define MAX_VNODE 10000 + + +#if DEBUG +int pagerdebug=0; + +#define PAGER_ALL 0xffffffff +#define PAGER_INIT 0x00000001 +#define PAGER_PAGEIN 0x00000002 + +#define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}} +#else +#define PAGER_DEBUG(LEVEL, A) +#endif + +/* + * Routine: macx_triggers + * Function: + * Syscall interface to set the call backs for low and + * high water marks. + */ +int +macx_triggers( + int hi_water, + int low_water, + int flags, + mach_port_t trigger_name) +{ + kern_return_t kr; + ipc_port_t default_pager_port = MACH_PORT_NULL; + ipc_port_t trigger_port; + + kr = host_default_memory_manager(host_priv_self(), + &default_pager_port, 0); + if(kr != KERN_SUCCESS) { + return EINVAL; + } + trigger_port = trigger_name_to_port(trigger_name); + if(trigger_port == NULL) { + return EINVAL; + } + /* trigger_port is locked and active */ + ip_unlock(trigger_port); + default_pager_triggers(default_pager_port, + hi_water, low_water, flags, trigger_port); + ipc_port_make_send(trigger_port); + + /* + * Set thread scheduling priority and policy for the current thread + * it is assumed for the time being that the thread setting the alert + * is the same one which will be servicing it. + */ + { + struct policy_timeshare_base fifo_base; + struct policy_timeshare_limit fifo_limit; + policy_base_t base; + processor_set_t pset; + policy_limit_t limit; + + pset = (current_thread())->processor_set; + base = (policy_base_t) &fifo_base; + limit = (policy_limit_t) &fifo_limit; + fifo_limit.max_priority = fifo_base.base_priority = MAXPRI_STANDARD; + thread_set_policy((current_thread())->top_act, pset, POLICY_FIFO, base, POLICY_TIMESHARE_BASE_COUNT, limit, POLICY_TIMESHARE_LIMIT_COUNT); + } + + current_thread()->vm_privilege = TRUE; +} + +/* + * + */ +ipc_port_t +trigger_name_to_port( + mach_port_t trigger_name) +{ + ipc_port_t trigger_port; + ipc_space_t space; + + if (trigger_name == 0) + return (NULL); + + space = current_space(); + if(ipc_port_translate_receive(space, (mach_port_name_t)trigger_name, + &trigger_port) != KERN_SUCCESS) + return (NULL); + return trigger_port; +} + +/* + * + */ +void +vnode_pager_bootstrap(void) +{ + register vm_size_t size; + + size = (vm_size_t) sizeof(struct vnode_pager); + vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size, + PAGE_SIZE, "vnode pager structures"); + vnode_port_hash_init(); + + return; +} + +/* + * + */ +ipc_port_t +vnode_pager_setup( + vnode_port_t vp, + ipc_port_t pager) +{ + vnode_pager_t vnode_object; + kern_return_t kr; + ipc_port_t previous; + + if (pager && + (vnode_object = vnode_port_hash_lookup(pager))) { + if (vnode_object->vnode_handle == vp) + return(pager); + } + + vnode_object = vnode_object_create(vp); + if (vnode_object == VNODE_PAGER_NULL) + panic("vnode_pager_setup: vnode_object_create() failed"); + + vnode_object->pager = ipc_port_alloc_kernel(); + assert (vnode_object->pager != IP_NULL); + pager_mux_hash_insert(vnode_object->pager, + (rpc_subsystem_t)&vnode_pager_workaround); + + vnode_object->pager_handle = ipc_port_make_send(vnode_object->pager); + + vnode_port_hash_insert(vnode_object->pager_handle, vnode_object); + + ipc_port_make_sonce(vnode_object->pager); + ip_lock(vnode_object->pager); /* unlocked in nsrequest below */ + ipc_port_nsrequest(vnode_object->pager, 1, vnode_object->pager, &previous); + + PAGER_DEBUG(PAGER_INIT, ("vnode_pager_setup: vp %x pager %x vnode_pager %x\n", vp, vnode_object->pager_handle, vnode_object)); + + ubc_setpager( vp, vnode_object->pager_handle); + return(vnode_object->pager_handle); +} + +/* + * + */ +ipc_port_t +vnode_pager_lookup( + vnode_port_t vp, + ipc_port_t pager) +{ + vnode_pager_t vnode_object; + kern_return_t kr; + + if (pager && + (vnode_object = vnode_port_hash_lookup(pager))) { + if (vnode_object->vnode_handle == vp) + return(vnode_object->vm_obj_handle); + else + return NULL; + } + else + return NULL; +} + +/* + * + */ +kern_return_t +vnode_pager_init(ipc_port_t pager, + ipc_port_t pager_request, + vm_size_t pg_size) +{ + vnode_pager_t vnode_object; + kern_return_t kr; + memory_object_attr_info_data_t attributes; + vm_object_t vm_object; + + + PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %x, %x, %x\n", pager, pager_request, pg_size)); + + vnode_object = vnode_port_hash_lookup(pager); + if (vnode_object == VNODE_PAGER_NULL) + panic("vnode_pager_init: lookup failed"); + + vnode_object->vm_obj_handle = pager_request; + + vm_object = vm_object_lookup(pager_request); + + if (vm_object == VM_OBJECT_NULL) + panic("vnode_pager_init: vm_object_lookup() failed"); + + attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY; + /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/ + attributes.cluster_size = (1 << (PAGE_SHIFT)); + attributes.may_cache_object = TRUE; + attributes.temporary = TRUE; + + kr = memory_object_change_attributes( + vm_object, + MEMORY_OBJECT_ATTRIBUTE_INFO, + (memory_object_info_t) &attributes, + MEMORY_OBJECT_ATTR_INFO_COUNT, + MACH_PORT_NULL, 0); + if (kr != KERN_SUCCESS) + panic("vnode_pager_init: memory_object_change_attributes() failed"); + + return(KERN_SUCCESS); +} + +/* + * + */ +kern_return_t +vnode_pager_data_return( + ipc_port_t mem_obj, + ipc_port_t control_port, + vm_object_offset_t offset, + pointer_t addr, + vm_size_t data_cnt, + boolean_t dirty, + boolean_t kernel_copy) +{ + register vnode_pager_t vnode_object; + + vnode_object = vnode_port_hash_lookup(mem_obj); + if (vnode_object == VNODE_PAGER_NULL) + panic("vnode_pager_data_return: lookup failed"); + + vnode_pager_cluster_write(vnode_object, offset, data_cnt); + + return KERN_SUCCESS; +} + +/* + * + */ +kern_return_t +vnode_pager_data_request( + ipc_port_t mem_obj, + ipc_port_t mem_obj_control, + vm_object_offset_t offset, + vm_size_t length, + vm_prot_t protection_required) +{ + register vnode_pager_t vnode_object; + + PAGER_DEBUG(PAGER_ALL, ("vnode_pager_data_request: %x, %x, %x, %x, %x\n", mem_obj, mem_obj_control, offset, length, protection_required)); + + vnode_object = vnode_port_hash_lookup(mem_obj); + + PAGER_DEBUG(PAGER_PAGEIN, ("vnode_pager_data_request: %x, %x, %x, %x, %x, vnode_object %x\n", mem_obj, mem_obj_control, offset, length, protection_required, vnode_object)); + + if (vnode_object == VNODE_PAGER_NULL) + panic("vnode_pager_data_request: lookup failed"); + + vnode_pager_cluster_read(vnode_object, offset, length); + + return KERN_SUCCESS; +} + +/* + * + */ +void +vnode_pager_no_senders( + ipc_port_t mem_obj, + mach_port_mscount_t mscount) +{ + register vnode_pager_t vnode_object; + + PAGER_DEBUG(PAGER_ALL, ("vnode_pager_nosenders: %x, %x\n", mem_obj, mscount)); + + vnode_object = vnode_port_hash_lookup(mem_obj); + if (vnode_object == VNODE_PAGER_NULL) + panic("vnode_pager_no_senders: lookup failed"); + + assert(vnode_object->pager_handle == mem_obj); + + pager_mux_hash_delete((ipc_port_t) vnode_object->pager_handle); + ipc_port_dealloc_kernel(vnode_object->pager); + vnode_port_hash_delete(vnode_object->pager_handle); + if (vnode_object->vnode_handle != (vnode_port_t) NULL) { + vnode_pager_vrele(vnode_object->vnode_handle); + } + zfree(vnode_pager_zone, (vm_offset_t) vnode_object); + + return; +} + +/* + * + */ +kern_return_t +vnode_pager_terminate( + ipc_port_t mem_obj, + ipc_port_t mem_obj_control) +{ + register vnode_pager_t vnode_object; + + PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %x, %x\n", mem_obj, mem_obj_control)); + + vnode_object = vnode_port_hash_lookup(mem_obj); + if (vnode_object == VNODE_PAGER_NULL) + panic("vnode_pager_terminate: lookup failed"); + + assert(vnode_object->pager_handle == mem_obj); + + /* release extra send right created by the fact that the caller */ + /* of vnode_pager_setup does not establish a mapping between a */ + /* cache object and the mem_obj (AMO). When a subsequent vm_map */ + /* is done, vm_map will bump the send right count */ + ipc_port_release_send(mem_obj); + + /* release a send right because terminate is called directly and */ + /* not through IPC, the right won't disappear quietly */ + ipc_port_release_send(mem_obj); + + ipc_port_dealloc_kernel(mem_obj_control); + + return(KERN_SUCCESS); +} + +/* + * + */ +kern_return_t +vnode_pager_synchronize( + ipc_port_t pager, + ipc_port_t pager_request, + vm_object_offset_t offset, + vm_offset_t length, + vm_sync_t sync_flags) +{ + memory_object_synchronize_completed(vm_object_lookup(pager_request), offset, length); + + return (KERN_SUCCESS); +} + +/* + * + */ +void +vnode_pager_cluster_write( + vnode_pager_t vnode_object, + vm_object_offset_t offset, + vm_size_t cnt) +{ + int error = 0; + int local_error = 0; + int kret; + int size; + + if (cnt & PAGE_MASK) { + panic("vs_cluster_write: cnt not a multiple of PAGE_SIZE"); + } + size = (cnt < (PAGE_SIZE*32)) ? cnt : (PAGE_SIZE*32); /* effective min */ + + while (cnt) { + + kret = vnode_pageout(vnode_object->vnode_handle, (upl_t )NULL, (vm_offset_t)NULL, offset, size, 0, &local_error); + + if (local_error != 0) { + error = local_error; + local_error = 0; + } + cnt -= size; + offset += size; + } +#if 0 + if (error != 0) + return(KERN_FAILURE); + + return(KERN_SUCCESS); +#endif /* 0 */ +} + + +/* + * + */ +kern_return_t +vnode_pager_cluster_read( + vnode_pager_t vnode_object, + vm_object_offset_t offset, + vm_size_t cnt) +{ + int error = 0; + int local_error = 0; + int kret; + int size; + + if(cnt & PAGE_MASK) { + panic("vs_cluster_read: cnt not a multiple of PAGE_SIZE"); + } + + size = PAGE_SIZE; + + while (cnt) { + + kret = vnode_pagein(vnode_object->vnode_handle, (upl_t)NULL, (vm_offset_t)NULL, offset, size, 0, &local_error); + + if (local_error != 0) { + error = local_error; + local_error = 0; + } + cnt -= size; + offset += size; + } + if (error != 0) + return(KERN_FAILURE); + + return(KERN_SUCCESS); + +} + + +/* + * + */ +void +vnode_pager_release_from_cache( + int *cnt) +{ + memory_object_free_from_cache( + &realhost, (int)&vnode_pager_workaround, cnt); +} + +/* + * + */ +vnode_pager_t +vnode_object_create( + vnode_port_t vp) +{ + register vnode_pager_t vnode_object; + + vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone); + if (vnode_object == VNODE_PAGER_NULL) + return(VNODE_PAGER_NULL); + vnode_object->pager_handle = IP_NULL; + vnode_object->vm_obj_handle = IP_NULL; + vnode_object->vnode_handle = vp; + + return(vnode_object); +} + +/* + * + */ +void +vnode_port_hash_init(void) +{ + register vm_size_t size; + register int i; + + + size = (vm_size_t) sizeof(struct vnode_port_entry); + + vnode_port_hash_zone = zinit(size, + (vm_size_t) MAX_VNODE * size, + PAGE_SIZE, "vnode_pager port hash"); + + for (i = 0; i < VNODE_PORT_HASH_COUNT; i++) + queue_init(&vnode_port_hashtable[i]); + + simple_lock_init(&vnode_port_hash_lock,ETAP_NO_TRACE); +} + +/* + * + */ +void +vnode_port_hash_insert( + ipc_port_t name_port, + vnode_pager_t rec) +{ + register vnode_port_entry_t new_entry; + + new_entry = (vnode_port_entry_t) zalloc(vnode_port_hash_zone); + /* + * TODO: Delete the following check once MAX_VNODE is removed + */ + if (!new_entry) + panic("vnode_port_hash_insert: no space"); + new_entry->name = name_port; + new_entry->pager_rec = rec; + + simple_lock(&vnode_port_hash_lock); + queue_enter(&vnode_port_hashtable[vnode_port_hash(name_port)], + new_entry, vnode_port_entry_t, links); + simple_unlock(&vnode_port_hash_lock); +} + +/* + * + */ +vnode_pager_t +vnode_port_hash_lookup( + ipc_port_t name_port) +{ + register queue_t bucket; + register vnode_port_entry_t entry; + vnode_pager_t rec; + + bucket = (queue_t) &vnode_port_hashtable[vnode_port_hash(name_port)]; + + simple_lock(&vnode_port_hash_lock); + entry = (vnode_port_entry_t) queue_first(bucket); + while (!queue_end(bucket,&entry->links)) { + if (entry->name == name_port) { + rec = entry->pager_rec; + simple_unlock(&vnode_port_hash_lock); + return(rec); + } + entry = (vnode_port_entry_t)queue_next(&entry->links); + } + simple_unlock(&vnode_port_hash_lock); + return(VNODE_PAGER_NULL); +} + +/* + * + */ +void +vnode_port_hash_delete( + ipc_port_t name_port) +{ + register queue_t bucket; + register vnode_port_entry_t entry; + + bucket = (queue_t) &vnode_port_hashtable[vnode_port_hash(name_port)]; + + simple_lock(&vnode_port_hash_lock); + entry = (vnode_port_entry_t) queue_first(bucket); + while (!queue_end(bucket,&entry->links)) { + if (entry->name == name_port) { + queue_remove(bucket, entry, vnode_port_entry_t,links); + simple_unlock(&vnode_port_hash_lock); + zfree(vnode_port_hash_zone, (vm_offset_t) entry); + return; + } + entry = (vnode_port_entry_t)queue_next(&entry->links); + } + simple_unlock(&vnode_port_hash_lock); +} diff --git a/osfmk/vm/cpm.h b/osfmk/vm/cpm.h new file mode 100644 index 000000000..b84f9ffbd --- /dev/null +++ b/osfmk/vm/cpm.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + * + */ + +#ifndef _VM_CPM_H_ +#define _VM_CPM_H_ + +/* + * File: vm/cpm.h + * Author: Alan Langerman + * Date: April 1995 and January 1996 + * + * Contiguous physical memory allocator. + */ + +#include +#include + +/* + * Return a linked list of physically contiguous + * wired pages. Caller is responsible for disposal + * via cpm_release. + * + * These pages are all in "gobbled" state when . + */ +extern kern_return_t + cpm_allocate(vm_size_t size, vm_page_t *list, boolean_t wire); + +/* + * CPM-specific event counters. + */ +#define VM_CPM_COUNTERS (MACH_KDB && MACH_COUNTERS && VM_CPM) +#if VM_CPM_COUNTERS +#define cpm_counter(foo) foo +#else /* VM_CPM_COUNTERS */ +#define cpm_counter(foo) +#endif /* VM_CPM_COUNTERS */ + +#endif /* _VM_CPM_H_ */ diff --git a/osfmk/vm/memory_object.c b/osfmk/vm/memory_object.c new file mode 100644 index 000000000..6c51bdcb9 --- /dev/null +++ b/osfmk/vm/memory_object.c @@ -0,0 +1,1736 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/memory_object.c + * Author: Michael Wayne Young + * + * External memory management interface control functions. + */ + +#ifdef MACH_BSD +/* THIS code should be removed when the component merge is completed */ +extern int vnode_pager_workaround; +#endif + +#include + +/* + * Interface dependencies: + */ + +#include /* For pointer_t */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +/* + * Implementation dependencies: + */ +#include /* For memcpy() */ + +#include +#include +#include +#include /* For pmap_clear_modify */ +#include +#include /* For current_thread() */ +#include +#include /* For kernel_map, vm_move */ +#include /* For vm_map_pageable */ +#include +#include + +#include + +#if MACH_PAGEMAP +#include +#endif /* MACH_PAGEMAP */ + + +ipc_port_t memory_manager_default = IP_NULL; +vm_size_t memory_manager_default_cluster = 0; +decl_mutex_data(,memory_manager_default_lock) + +/* + * Forward ref to file-local function: + */ +boolean_t +memory_object_update(vm_object_t, vm_object_offset_t, + vm_size_t, memory_object_return_t, int, vm_prot_t); + + +/* + * Routine: memory_object_should_return_page + * + * Description: + * Determine whether the given page should be returned, + * based on the page's state and on the given return policy. + * + * We should return the page if one of the following is true: + * + * 1. Page is dirty and should_return is not RETURN_NONE. + * 2. Page is precious and should_return is RETURN_ALL. + * 3. Should_return is RETURN_ANYTHING. + * + * As a side effect, m->dirty will be made consistent + * with pmap_is_modified(m), if should_return is not + * MEMORY_OBJECT_RETURN_NONE. + */ + +#define memory_object_should_return_page(m, should_return) \ + (should_return != MEMORY_OBJECT_RETURN_NONE && \ + (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_addr))) || \ + ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ + (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) + +typedef int memory_object_lock_result_t; + +#define MEMORY_OBJECT_LOCK_RESULT_DONE 0 +#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1 +#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2 +#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3 + +memory_object_lock_result_t memory_object_lock_page( + vm_page_t m, + memory_object_return_t should_return, + boolean_t should_flush, + vm_prot_t prot); + +/* + * Routine: memory_object_lock_page + * + * Description: + * Perform the appropriate lock operations on the + * given page. See the description of + * "memory_object_lock_request" for the meanings + * of the arguments. + * + * Returns an indication that the operation + * completed, blocked, or that the page must + * be cleaned. + */ +memory_object_lock_result_t +memory_object_lock_page( + vm_page_t m, + memory_object_return_t should_return, + boolean_t should_flush, + vm_prot_t prot) +{ + XPR(XPR_MEMORY_OBJECT, + "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", + (integer_t)m, should_return, should_flush, prot, 0); + + /* + * If we cannot change access to the page, + * either because a mapping is in progress + * (busy page) or because a mapping has been + * wired, then give up. + */ + + if (m->busy || m->cleaning) + return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); + + /* + * Don't worry about pages for which the kernel + * does not have any data. + */ + + if (m->absent || m->error || m->restart) + return(MEMORY_OBJECT_LOCK_RESULT_DONE); + + assert(!m->fictitious); + + if (m->wire_count != 0) { + /* + * If no change would take place + * anyway, return successfully. + * + * No change means: + * Not flushing AND + * No change to page lock [2 checks] AND + * Should not return page + * + * XXX This doesn't handle sending a copy of a wired + * XXX page to the pager, but that will require some + * XXX significant surgery. + */ + if (!should_flush && + (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) && + ! memory_object_should_return_page(m, should_return)) { + + /* + * Restart page unlock requests, + * even though no change took place. + * [Memory managers may be expecting + * to see new requests.] + */ + m->unlock_request = VM_PROT_NONE; + PAGE_WAKEUP(m); + + return(MEMORY_OBJECT_LOCK_RESULT_DONE); + } + + return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); + } + + /* + * If the page is to be flushed, allow + * that to be done as part of the protection. + */ + + if (should_flush) + prot = VM_PROT_ALL; + + /* + * Set the page lock. + * + * If we are decreasing permission, do it now; + * let the fault handler take care of increases + * (pmap_page_protect may not increase protection). + */ + + if (prot != VM_PROT_NO_CHANGE) { +#if 0 + /* code associated with the vestigial + * memory_object_data_unlock + */ + if ((m->page_lock ^ prot) & prot) { + pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot); + } + m->page_lock = prot; + m->lock_supplied = TRUE; + if (prot != VM_PROT_NONE) + m->unusual = TRUE; + else + m->unusual = FALSE; + + /* + * Restart any past unlock requests, even if no + * change resulted. If the manager explicitly + * requested no protection change, then it is assumed + * to be remembering past requests. + */ + + m->unlock_request = VM_PROT_NONE; +#endif /* 0 */ + PAGE_WAKEUP(m); + } + + /* + * Handle page returning. + */ + + if (memory_object_should_return_page(m, should_return)) { + + /* + * If we weren't planning + * to flush the page anyway, + * we may need to remove the + * page from the pageout + * system and from physical + * maps now. + */ + + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(m); + vm_page_unlock_queues(); + + if (!should_flush) + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + + if (m->dirty) + return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); + else + return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); + } + + /* + * Handle flushing + */ + + if (should_flush) { + VM_PAGE_FREE(m); + } else { + extern boolean_t vm_page_deactivate_hint; + + /* + * XXX Make clean but not flush a paging hint, + * and deactivate the pages. This is a hack + * because it overloads flush/clean with + * implementation-dependent meaning. This only + * happens to pages that are already clean. + */ + + if (vm_page_deactivate_hint && + (should_return != MEMORY_OBJECT_RETURN_NONE)) { + vm_page_lock_queues(); + vm_page_deactivate(m); + vm_page_unlock_queues(); + } + } + + return(MEMORY_OBJECT_LOCK_RESULT_DONE); +} +#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \ +MACRO_BEGIN \ + \ + register int i; \ + register vm_page_t hp; \ + \ + vm_object_unlock(object); \ + \ + if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == \ + ((rpc_subsystem_t) &vnode_pager_workaround)) { \ + (void) vnode_pager_data_return(object->pager, \ + object->pager_request, \ + po, \ + POINTER_T(0), \ + data_cnt, \ + (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ + !should_flush); \ + } else { \ + (void) memory_object_data_return(object->pager, \ + object->pager_request, \ + po, \ + POINTER_T(0), \ + data_cnt, \ + (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ + !should_flush); \ + } \ + \ + vm_object_lock(object); \ + \ +MACRO_END + +#ifdef MACH_BSD +#define PAGEOUT_PAGES(object, new_object, new_offset, action, po) \ +MACRO_BEGIN \ + \ + vm_map_copy_t copy; \ + register int i; \ + register vm_page_t hp; \ + \ + vm_object_unlock(object); \ + \ + (void) vm_map_copyin_object(new_object, 0, new_offset, ©); \ + \ + if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == \ + ((rpc_subsystem_t) &vnode_pager_workaround)) { \ + (void) vnode_pager_data_return(object->pager, \ + object->pager_request, \ + po, \ + POINTER_T(copy), \ + new_offset, \ + (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ + !should_flush); \ + } else { \ + (void) memory_object_data_return(object->pager, \ + object->pager_request, \ + po, \ + POINTER_T(copy), \ + new_offset, \ + (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ + !should_flush); \ + } \ + \ + vm_object_lock(object); \ + \ + for (i = 0; i < atop(new_offset); i++) { \ + hp = holding_pages[i]; \ + if (hp != VM_PAGE_NULL) { \ + vm_object_paging_end(object); \ + VM_PAGE_FREE(hp); \ + } \ + } \ + \ + new_object = VM_OBJECT_NULL; \ +MACRO_END +#else +#define PAGEOUT_PAGES(object, new_object, new_offset, action, po) \ +MACRO_BEGIN \ + \ + vm_map_copy_t copy; \ + register int i; \ + register vm_page_t hp; \ + \ + vm_object_unlock(object); \ + \ + (void) vm_map_copyin_object(new_object, 0, new_offset, ©); \ + \ + (void) memory_object_data_return( \ + object->pager, \ + object->pager_request, \ + po, \ + POINTER_T(copy), \ + new_offset, \ + (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ + !should_flush); \ + \ + vm_object_lock(object); \ + \ + for (i = 0; i < atop(new_offset); i++) { \ + hp = holding_pages[i]; \ + if (hp != VM_PAGE_NULL) { \ + vm_object_paging_end(object); \ + VM_PAGE_FREE(hp); \ + } \ + } \ + \ + new_object = VM_OBJECT_NULL; \ +MACRO_END +#endif + +/* + * Routine: memory_object_lock_request [user interface] + * + * Description: + * Control use of the data associated with the given + * memory object. For each page in the given range, + * perform the following operations, in order: + * 1) restrict access to the page (disallow + * forms specified by "prot"); + * 2) return data to the manager (if "should_return" + * is RETURN_DIRTY and the page is dirty, or + * "should_return" is RETURN_ALL and the page + * is either dirty or precious); and, + * 3) flush the cached copy (if "should_flush" + * is asserted). + * The set of pages is defined by a starting offset + * ("offset") and size ("size"). Only pages with the + * same page alignment as the starting offset are + * considered. + * + * A single acknowledgement is sent (to the "reply_to" + * port) when these actions are complete. If successful, + * the naked send right for reply_to is consumed. + */ + +kern_return_t +memory_object_lock_request( + register vm_object_t object, + register vm_object_offset_t offset, + register vm_object_size_t size, + memory_object_return_t should_return, + int flags, + vm_prot_t prot, + ipc_port_t reply_to, + mach_msg_type_name_t reply_to_type) +{ + vm_object_offset_t original_offset = offset; + boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH; + + XPR(XPR_MEMORY_OBJECT, + "m_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n", + (integer_t)object, offset, size, + (((should_return&1)<<1)|should_flush), prot); + + /* + * Check for bogus arguments. + */ + if (object == VM_OBJECT_NULL) + return (KERN_INVALID_ARGUMENT); + + if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) { + vm_object_deallocate(object); + return (KERN_INVALID_ARGUMENT); + } + + size = round_page(size); + + /* + * Lock the object, and acquire a paging reference to + * prevent the memory_object and control ports from + * being destroyed. + */ + + vm_object_lock(object); + vm_object_paging_begin(object); + offset -= object->paging_offset; + + (void)memory_object_update(object, + offset, size, should_return, flags, prot); + + if (IP_VALID(reply_to)) { + vm_object_unlock(object); + + /* consumes our naked send-once/send right for reply_to */ + (void) memory_object_lock_completed(reply_to, reply_to_type, + object->pager_request, original_offset, size); + + vm_object_lock(object); + } + + vm_object_paging_end(object); + vm_object_unlock(object); + vm_object_deallocate(object); + + return (KERN_SUCCESS); +} + +/* + * Routine: memory_object_sync + * + * Kernel internal function to synch out pages in a given + * range within an object to its memory manager. Much the + * same as memory_object_lock_request but page protection + * is not changed. + * + * If the should_flush and should_return flags are true pages + * are flushed, that is dirty & precious pages are written to + * the memory manager and then discarded. If should_return + * is false, only precious pages are returned to the memory + * manager. + * + * If should flush is false and should_return true, the memory + * manager's copy of the pages is updated. If should_return + * is also false, only the precious pages are updated. This + * last option is of limited utility. + * + * Returns: + * FALSE if no pages were returned to the pager + * TRUE otherwise. + */ + +boolean_t +memory_object_sync( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + boolean_t should_flush, + boolean_t should_return) +{ + boolean_t rv; + + XPR(XPR_MEMORY_OBJECT, + "m_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", + (integer_t)object, offset, size, should_flush, should_return); + + /* + * Lock the object, and acquire a paging reference to + * prevent the memory_object and control ports from + * being destroyed. + */ + vm_object_lock(object); + vm_object_paging_begin(object); + + rv = memory_object_update(object, offset, size, + (should_return) ? + MEMORY_OBJECT_RETURN_ALL : + MEMORY_OBJECT_RETURN_NONE, + (should_flush) ? + MEMORY_OBJECT_DATA_FLUSH : 0, + VM_PROT_NO_CHANGE); + + + vm_object_paging_end(object); + vm_object_unlock(object); + return rv; +} + +/* + * Routine: memory_object_update + * Description: + * Work function for m_o_lock_request(), m_o_sync(). + * + * Called with object locked and paging ref taken. + */ +kern_return_t +memory_object_update( + register vm_object_t object, + register vm_object_offset_t offset, + register vm_size_t size, + memory_object_return_t should_return, + int flags, + vm_prot_t prot) +{ + register vm_page_t m; + vm_page_t holding_page; + vm_size_t original_size = size; + vm_object_offset_t paging_offset = 0; + vm_object_t copy_object; + vm_size_t data_cnt = 0; + vm_object_offset_t last_offset = offset; + memory_object_lock_result_t page_lock_result; + memory_object_lock_result_t pageout_action; + boolean_t data_returned = FALSE; + boolean_t update_cow; + boolean_t should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; +#ifndef NOT_LIST_REQ + boolean_t pending_pageout = FALSE; +#endif + + /* + * To avoid blocking while scanning for pages, save + * dirty pages to be cleaned all at once. + * + * XXXO A similar strategy could be used to limit the + * number of times that a scan must be restarted for + * other reasons. Those pages that would require blocking + * could be temporarily collected in another list, or + * their offsets could be recorded in a small array. + */ + + /* + * XXX NOTE: May want to consider converting this to a page list + * XXX vm_map_copy interface. Need to understand object + * XXX coalescing implications before doing so. + */ + + update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH) + && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) && + !(flags & MEMORY_OBJECT_DATA_PURGE))) + || (flags & MEMORY_OBJECT_COPY_SYNC); + + + if((((copy_object = object->copy) != NULL) && update_cow) || + (flags & MEMORY_OBJECT_DATA_SYNC)) { + vm_size_t i; + vm_size_t copy_size; + vm_object_offset_t copy_offset; + vm_prot_t prot; + vm_page_t page; + vm_page_t top_page; + kern_return_t error = 0; + + if(copy_object != NULL) { + /* translate offset with respect to shadow's offset */ + copy_offset = (offset >= copy_object->shadow_offset)? + offset - copy_object->shadow_offset : + (vm_object_offset_t) 0; + if(copy_offset > copy_object->size) + copy_offset = copy_object->size; + + /* clip size with respect to shadow offset */ + copy_size = (offset >= copy_object->shadow_offset) ? + size : size - (copy_object->shadow_offset - offset); + + if(copy_size <= 0) { + copy_size = 0; + } else { + copy_size = ((copy_offset + copy_size) + <= copy_object->size) ? + copy_size : copy_object->size - copy_offset; + } + /* check for a copy_offset which is beyond the end of */ + /* the copy_object */ + if(copy_size < 0) + copy_size = 0; + + copy_size+=offset; + + vm_object_unlock(object); + vm_object_lock(copy_object); + } else { + copy_object = object; + + copy_size = offset + size; + copy_offset = offset; + } + + vm_object_paging_begin(copy_object); + for (i=copy_offset; iobject, top_page); + PAGE_WAKEUP_DONE(page); + vm_page_lock_queues(); + if (!page->active && !page->inactive) + vm_page_activate(page); + vm_page_unlock_queues(); + vm_object_lock(copy_object); + vm_object_paging_begin(copy_object); + } else { + PAGE_WAKEUP_DONE(page); + vm_page_lock_queues(); + if (!page->active && !page->inactive) + vm_page_activate(page); + vm_page_unlock_queues(); + } + break; + case VM_FAULT_RETRY: + prot = VM_PROT_WRITE|VM_PROT_READ; + vm_object_lock(copy_object); + vm_object_paging_begin(copy_object); + goto RETRY_COW_OF_LOCK_REQUEST; + case VM_FAULT_INTERRUPTED: + prot = VM_PROT_WRITE|VM_PROT_READ; + vm_object_lock(copy_object); + vm_object_paging_begin(copy_object); + goto RETRY_COW_OF_LOCK_REQUEST; + case VM_FAULT_MEMORY_SHORTAGE: + VM_PAGE_WAIT(); + prot = VM_PROT_WRITE|VM_PROT_READ; + vm_object_lock(copy_object); + vm_object_paging_begin(copy_object); + goto RETRY_COW_OF_LOCK_REQUEST; + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + prot = VM_PROT_WRITE|VM_PROT_READ; + vm_object_lock(copy_object); + vm_object_paging_begin(copy_object); + goto RETRY_COW_OF_LOCK_REQUEST; + case VM_FAULT_MEMORY_ERROR: + vm_object_lock(object); + goto BYPASS_COW_COPYIN; + } + + } + vm_object_paging_end(copy_object); + if(copy_object != object) { + vm_object_unlock(copy_object); + vm_object_lock(object); + } + } + if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { + return KERN_SUCCESS; + } + if(((copy_object = object->copy) != NULL) && + (flags & MEMORY_OBJECT_DATA_PURGE)) { + copy_object->shadow_severed = TRUE; + copy_object->shadowed = FALSE; + copy_object->shadow = NULL; + /* delete the ref the COW was holding on the target object */ + vm_object_deallocate(object); + } +BYPASS_COW_COPYIN: + + for (; + size != 0; + size -= PAGE_SIZE, offset += PAGE_SIZE_64) + { + /* + * Limit the number of pages to be cleaned at once. + */ + if (pending_pageout && + data_cnt >= PAGE_SIZE * DATA_WRITE_MAX) + { + LIST_REQ_PAGEOUT_PAGES(object, data_cnt, + pageout_action, paging_offset); + data_cnt = 0; + pending_pageout = FALSE; + } + + while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { + page_lock_result = memory_object_lock_page(m, should_return, + should_flush, prot); + + XPR(XPR_MEMORY_OBJECT, + "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n", + (integer_t)object, offset, page_lock_result, 0, 0); + + switch (page_lock_result) + { + case MEMORY_OBJECT_LOCK_RESULT_DONE: + /* + * End of a cluster of dirty pages. + */ + if(pending_pageout) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, + paging_offset); + data_cnt = 0; + pending_pageout = FALSE; + continue; + } + break; + + case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK: + /* + * Since it is necessary to block, + * clean any dirty pages now. + */ + if(pending_pageout) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, + paging_offset); + pending_pageout = FALSE; + data_cnt = 0; + continue; + } + + PAGE_ASSERT_WAIT(m, THREAD_UNINT); + vm_object_unlock(object); + thread_block((void (*)(void))0); + vm_object_lock(object); + continue; + + case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN: + case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: + /* + * The clean and return cases are similar. + * + */ + + /* + * if this would form a discontiguous block, + * clean the old pages and start anew. + * + */ + + /* + * Mark the page busy since we unlock the + * object below. + */ + m->busy = TRUE; + if (pending_pageout && + (last_offset != offset || + pageout_action != page_lock_result)) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, + paging_offset); + pending_pageout = FALSE; + data_cnt = 0; + } + m->busy = FALSE; + holding_page = VM_PAGE_NULL; + if(m->cleaning) { + PAGE_ASSERT_WAIT(m, THREAD_UNINT); + vm_object_unlock(object); + thread_block((void (*)(void))0); + continue; + } + if(!pending_pageout) { + pending_pageout = TRUE; + pageout_action = page_lock_result; + paging_offset = offset; + } + if (should_flush) { + vm_page_lock_queues(); + m->list_req_pending = TRUE; + m->cleaning = TRUE; + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + vm_page_unlock_queues(); + } else { + /* + * Clean but do not flush + */ + vm_page_lock_queues(); + m->list_req_pending = TRUE; + m->cleaning = TRUE; + vm_page_unlock_queues(); + + } + vm_object_unlock(object); + + + data_cnt += PAGE_SIZE; + last_offset = offset + PAGE_SIZE_64; + data_returned = TRUE; + + vm_object_lock(object); + break; + } + break; + } + } + + /* + * We have completed the scan for applicable pages. + * Clean any pages that have been saved. + */ +#ifdef NOT_LIST_REQ + if (new_object != VM_OBJECT_NULL) { + PAGEOUT_PAGES(object, new_object, new_offset, pageout_action, + paging_offset); + } +#else + if (pending_pageout) { + LIST_REQ_PAGEOUT_PAGES(object, + data_cnt, pageout_action, paging_offset); + } +#endif + return (data_returned); +} + +/* + * Routine: memory_object_synchronize_completed [user interface] + * + * Tell kernel that previously synchronized data + * (memory_object_synchronize) has been queue or placed on the + * backing storage. + * + * Note: there may be multiple synchronize requests for a given + * memory object outstanding but they will not overlap. + */ + +kern_return_t +memory_object_synchronize_completed( + vm_object_t object, + vm_object_offset_t offset, + vm_offset_t length) +{ + msync_req_t msr; + + XPR(XPR_MEMORY_OBJECT, + "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n", + (integer_t)object, offset, length, 0, 0); + + /* + * Look for bogus arguments + */ + + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } + + vm_object_lock(object); + +/* + * search for sync request structure + */ + queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { + if (msr->offset == offset && msr->length == length) { + queue_remove(&object->msr_q, msr, msync_req_t, msr_q); + break; + } + }/* queue_iterate */ + + if (queue_end(&object->msr_q, (queue_entry_t)msr)) { + vm_object_unlock(object); + vm_object_deallocate(object); + return KERN_INVALID_ARGUMENT; + } + + msr_lock(msr); + vm_object_unlock(object); + msr->flag = VM_MSYNC_DONE; + msr_unlock(msr); + thread_wakeup((event_t) msr); + vm_object_deallocate(object); + + return KERN_SUCCESS; +}/* memory_object_synchronize_completed */ + +kern_return_t +memory_object_set_attributes_common( + vm_object_t object, + boolean_t may_cache, + memory_object_copy_strategy_t copy_strategy, + boolean_t temporary, + vm_size_t cluster_size, + boolean_t silent_overwrite, + boolean_t advisory_pageout) +{ + boolean_t object_became_ready; + + XPR(XPR_MEMORY_OBJECT, + "m_o_set_attr_com, object 0x%X flg %x strat %d\n", + (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0); + + if (object == VM_OBJECT_NULL) + return(KERN_INVALID_ARGUMENT); + + /* + * Verify the attributes of importance + */ + + switch(copy_strategy) { + case MEMORY_OBJECT_COPY_NONE: + case MEMORY_OBJECT_COPY_DELAY: + break; + default: + vm_object_deallocate(object); + return(KERN_INVALID_ARGUMENT); + } + +#if !ADVISORY_PAGEOUT + if (silent_overwrite || advisory_pageout) { + vm_object_deallocate(object); + return(KERN_INVALID_ARGUMENT); + } +#endif /* !ADVISORY_PAGEOUT */ + if (may_cache) + may_cache = TRUE; + if (temporary) + temporary = TRUE; + if (cluster_size != 0) { + int pages_per_cluster; + pages_per_cluster = atop(cluster_size); + /* + * Cluster size must be integral multiple of page size, + * and be a power of 2 number of pages. + */ + if ((cluster_size & (PAGE_SIZE-1)) || + ((pages_per_cluster-1) & pages_per_cluster)) { + vm_object_deallocate(object); + return KERN_INVALID_ARGUMENT; + } + } + + vm_object_lock(object); + + /* + * Copy the attributes + */ + assert(!object->internal); + object_became_ready = !object->pager_ready; + object->copy_strategy = copy_strategy; + object->can_persist = may_cache; + object->temporary = temporary; + object->silent_overwrite = silent_overwrite; + object->advisory_pageout = advisory_pageout; + if (cluster_size == 0) + cluster_size = PAGE_SIZE; + object->cluster_size = cluster_size; + + assert(cluster_size >= PAGE_SIZE && + cluster_size % PAGE_SIZE == 0); + + /* + * Wake up anyone waiting for the ready attribute + * to become asserted. + */ + + if (object_became_ready) { + object->pager_ready = TRUE; + vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); + } + + vm_object_unlock(object); + + vm_object_deallocate(object); + + return(KERN_SUCCESS); +} + +/* + * Set the memory object attribute as provided. + * + * XXX This routine cannot be completed until the vm_msync, clean + * in place, and cluster work is completed. See ifdef notyet + * below and note that memory_object_set_attributes_common() + * may have to be expanded. + */ +kern_return_t +memory_object_change_attributes( + vm_object_t object, + memory_object_flavor_t flavor, + memory_object_info_t attributes, + mach_msg_type_number_t count, + ipc_port_t reply_to, + mach_msg_type_name_t reply_to_type) +{ + kern_return_t result = KERN_SUCCESS; + boolean_t temporary; + boolean_t may_cache; + boolean_t invalidate; + vm_size_t cluster_size; + memory_object_copy_strategy_t copy_strategy; + boolean_t silent_overwrite; + boolean_t advisory_pageout; + + if (object == VM_OBJECT_NULL) + return(KERN_INVALID_ARGUMENT); + + vm_object_lock(object); + temporary = object->temporary; + may_cache = object->can_persist; + copy_strategy = object->copy_strategy; + silent_overwrite = object->silent_overwrite; + advisory_pageout = object->advisory_pageout; +#if notyet + invalidate = object->invalidate; +#endif + cluster_size = object->cluster_size; + vm_object_unlock(object); + + switch (flavor) { + case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: + { + old_memory_object_behave_info_t behave; + + if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + behave = (old_memory_object_behave_info_t) attributes; + + temporary = behave->temporary; + invalidate = behave->invalidate; + copy_strategy = behave->copy_strategy; + + break; + } + + case MEMORY_OBJECT_BEHAVIOR_INFO: + { + memory_object_behave_info_t behave; + + if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + behave = (memory_object_behave_info_t) attributes; + + temporary = behave->temporary; + invalidate = behave->invalidate; + copy_strategy = behave->copy_strategy; + silent_overwrite = behave->silent_overwrite; + advisory_pageout = behave->advisory_pageout; + break; + } + + case MEMORY_OBJECT_PERFORMANCE_INFO: + { + memory_object_perf_info_t perf; + + if (count != MEMORY_OBJECT_PERF_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + perf = (memory_object_perf_info_t) attributes; + + may_cache = perf->may_cache; + cluster_size = round_page(perf->cluster_size); + + break; + } + + case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: + { + old_memory_object_attr_info_t attr; + + if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + attr = (old_memory_object_attr_info_t) attributes; + + may_cache = attr->may_cache; + copy_strategy = attr->copy_strategy; + cluster_size = page_size; + + break; + } + + case MEMORY_OBJECT_ATTRIBUTE_INFO: + { + memory_object_attr_info_t attr; + + if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } + + attr = (memory_object_attr_info_t) attributes; + + copy_strategy = attr->copy_strategy; + may_cache = attr->may_cache_object; + cluster_size = attr->cluster_size; + temporary = attr->temporary; + + break; + } + + default: + result = KERN_INVALID_ARGUMENT; + break; + } + + if (result != KERN_SUCCESS) { + vm_object_deallocate(object); + return(result); + } + + if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) { + copy_strategy = MEMORY_OBJECT_COPY_DELAY; + temporary = TRUE; + } else { + temporary = FALSE; + } + + /* + * Do the work and throw away our object reference. It + * is important that the object reference be deallocated + * BEFORE sending the reply. The whole point of the reply + * is that it shows up after the terminate message that + * may be generated by setting the object uncacheable. + * + * XXX may_cache may become a tri-valued variable to handle + * XXX uncache if not in use. + */ + result = memory_object_set_attributes_common(object, + may_cache, + copy_strategy, + temporary, + cluster_size, + silent_overwrite, + advisory_pageout); + + if (IP_VALID(reply_to)) { + /* consumes our naked send-once/send right for reply_to */ + (void) memory_object_change_completed(reply_to, reply_to_type, + object->alive ? + object->pager_request : PAGER_REQUEST_NULL, + flavor); + } + + return(result); +} + +kern_return_t +memory_object_get_attributes( + vm_object_t object, + memory_object_flavor_t flavor, + memory_object_info_t attributes, /* pointer to OUT array */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + kern_return_t ret = KERN_SUCCESS; + + if (object == VM_OBJECT_NULL) + return(KERN_INVALID_ARGUMENT); + + vm_object_lock(object); + + switch (flavor) { + case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: + { + old_memory_object_behave_info_t behave; + + if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { + ret = KERN_INVALID_ARGUMENT; + break; + } + + behave = (old_memory_object_behave_info_t) attributes; + behave->copy_strategy = object->copy_strategy; + behave->temporary = object->temporary; +#if notyet /* remove when vm_msync complies and clean in place fini */ + behave->invalidate = object->invalidate; +#else + behave->invalidate = FALSE; +#endif + + *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT; + break; + } + + case MEMORY_OBJECT_BEHAVIOR_INFO: + { + memory_object_behave_info_t behave; + + if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) { + ret = KERN_INVALID_ARGUMENT; + break; + } + + behave = (memory_object_behave_info_t) attributes; + behave->copy_strategy = object->copy_strategy; + behave->temporary = object->temporary; +#if notyet /* remove when vm_msync complies and clean in place fini */ + behave->invalidate = object->invalidate; +#else + behave->invalidate = FALSE; +#endif + behave->advisory_pageout = object->advisory_pageout; + behave->silent_overwrite = object->silent_overwrite; + *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; + break; + } + + case MEMORY_OBJECT_PERFORMANCE_INFO: + { + memory_object_perf_info_t perf; + + if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) { + ret = KERN_INVALID_ARGUMENT; + break; + } + + perf = (memory_object_perf_info_t) attributes; + perf->cluster_size = object->cluster_size; + perf->may_cache = object->can_persist; + + *count = MEMORY_OBJECT_PERF_INFO_COUNT; + break; + } + + case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: + { + old_memory_object_attr_info_t attr; + + if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { + ret = KERN_INVALID_ARGUMENT; + break; + } + + attr = (old_memory_object_attr_info_t) attributes; + attr->may_cache = object->can_persist; + attr->copy_strategy = object->copy_strategy; + + *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT; + break; + } + + case MEMORY_OBJECT_ATTRIBUTE_INFO: + { + memory_object_attr_info_t attr; + + if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) { + ret = KERN_INVALID_ARGUMENT; + break; + } + + attr = (memory_object_attr_info_t) attributes; + attr->copy_strategy = object->copy_strategy; + attr->cluster_size = object->cluster_size; + attr->may_cache_object = object->can_persist; + attr->temporary = object->temporary; + + *count = MEMORY_OBJECT_ATTR_INFO_COUNT; + break; + } + + default: + ret = KERN_INVALID_ARGUMENT; + break; + } + + vm_object_unlock(object); + + vm_object_deallocate(object); + + return(ret); +} + +int vm_stat_discard_cleared_reply = 0; +int vm_stat_discard_cleared_unset = 0; +int vm_stat_discard_cleared_too_late = 0; + + +/* + * vm_set_default_memory_manager(): + * [Obsolete] + */ +kern_return_t +vm_set_default_memory_manager( + host_t host, + ipc_port_t *default_manager) +{ + return(host_default_memory_manager(host_priv_self(), default_manager, 4*PAGE_SIZE)); +} + +/* + * Routine: host_default_memory_manager + * Purpose: + * set/get the default memory manager port and default cluster + * size. + * + * If successful, consumes the supplied naked send right. + */ +kern_return_t +host_default_memory_manager( + host_priv_t host_priv, + ipc_port_t *default_manager, + vm_size_t cluster_size) +{ + ipc_port_t current_manager; + ipc_port_t new_manager; + ipc_port_t returned_manager; + + if (host_priv == HOST_PRIV_NULL) + return(KERN_INVALID_HOST); + + assert(host_priv == &realhost); + + new_manager = *default_manager; + mutex_lock(&memory_manager_default_lock); + current_manager = memory_manager_default; + + if (new_manager == IP_NULL) { + /* + * Retrieve the current value. + */ + + returned_manager = ipc_port_copy_send(current_manager); + } else { + /* + * Retrieve the current value, + * and replace it with the supplied value. + * We consume the supplied naked send right. + */ + + returned_manager = current_manager; + memory_manager_default = new_manager; + if (cluster_size % PAGE_SIZE != 0) { +#if 0 + mutex_unlock(&memory_manager_default_lock); + return KERN_INVALID_ARGUMENT; +#else + cluster_size = round_page(cluster_size); +#endif + } + memory_manager_default_cluster = cluster_size; + + /* + * In case anyone's been waiting for a memory + * manager to be established, wake them up. + */ + + thread_wakeup((event_t) &memory_manager_default); + } + + mutex_unlock(&memory_manager_default_lock); + + *default_manager = returned_manager; + return(KERN_SUCCESS); +} + +/* + * Routine: memory_manager_default_reference + * Purpose: + * Returns a naked send right for the default + * memory manager. The returned right is always + * valid (not IP_NULL or IP_DEAD). + */ + +ipc_port_t +memory_manager_default_reference( + vm_size_t *cluster_size) +{ + ipc_port_t current_manager; + + mutex_lock(&memory_manager_default_lock); + + while (current_manager = ipc_port_copy_send(memory_manager_default), + !IP_VALID(current_manager)) { + thread_sleep_mutex((event_t) &memory_manager_default, + &memory_manager_default_lock, THREAD_UNINT); + mutex_lock(&memory_manager_default_lock); + } + *cluster_size = memory_manager_default_cluster; + + mutex_unlock(&memory_manager_default_lock); + + return current_manager; +} + +/* + * Routine: memory_manager_default_port + * Purpose: + * Returns true if the receiver for the port + * is the default memory manager. + * + * This is a hack to let ds_read_done + * know when it should keep memory wired. + */ + +boolean_t +memory_manager_default_port( + ipc_port_t port) +{ + ipc_port_t current; + boolean_t result; + + mutex_lock(&memory_manager_default_lock); + current = memory_manager_default; + if (IP_VALID(current)) { + /* + * There is no point in bothering to lock + * both ports, which would be painful to do. + * If the receive rights are moving around, + * we might be inaccurate. + */ + + result = port->ip_receiver == current->ip_receiver; + } else + result = FALSE; + mutex_unlock(&memory_manager_default_lock); + + return result; +} + +/* + * Routine: memory_manager_default_check + * + * Purpose: + * Check whether a default memory manager has been set + * up yet, or not. Returns KERN_SUCCESS if dmm exists, + * and KERN_FAILURE if dmm does not exist. + * + * If there is no default memory manager, log an error, + * but only the first time. + * + */ +kern_return_t +memory_manager_default_check(void) +{ + ipc_port_t current; + + mutex_lock(&memory_manager_default_lock); + current = memory_manager_default; + if (!IP_VALID(current)) { + static boolean_t logged; /* initialized to 0 */ + boolean_t complain = !logged; + logged = TRUE; + mutex_unlock(&memory_manager_default_lock); + if (complain) + printf("Warning: No default memory manager\n"); + return(KERN_FAILURE); + } else { + mutex_unlock(&memory_manager_default_lock); + return(KERN_SUCCESS); + } +} + +void +memory_manager_default_init(void) +{ + memory_manager_default = IP_NULL; + mutex_init(&memory_manager_default_lock, ETAP_VM_MEMMAN); +} + + +void +memory_object_deactivate_pages( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + boolean_t kill_page) +{ + vm_object_t orig_object; + int pages_moved = 0; + int pages_found = 0; + + /* + * entered with object lock held, acquire a paging reference to + * prevent the memory_object and control ports from + * being destroyed. + */ + orig_object = object; + + for (;;) { + register vm_page_t m; + vm_object_offset_t toffset; + vm_object_size_t tsize; + + vm_object_paging_begin(object); + vm_page_lock_queues(); + + for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) { + + if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) { + + pages_found++; + + if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) { + + m->reference = FALSE; + pmap_clear_reference(m->phys_addr); + + if ((kill_page) && (object->internal)) { + m->precious = FALSE; + m->dirty = FALSE; + pmap_clear_modify(m->phys_addr); + vm_external_state_clr(object->existence_map, offset); + } + VM_PAGE_QUEUES_REMOVE(m); + + queue_enter_first(&vm_page_queue_inactive, m, vm_page_t, pageq); + + m->inactive = TRUE; + if (!m->fictitious) + vm_page_inactive_count++; + + pages_moved++; + } + } + } + vm_page_unlock_queues(); + vm_object_paging_end(object); + + if (object->shadow) { + vm_object_t tmp_object; + + kill_page = 0; + + offset += object->shadow_offset; + + tmp_object = object->shadow; + vm_object_lock(tmp_object); + + if (object != orig_object) + vm_object_unlock(object); + object = tmp_object; + } else + break; + } + if (object != orig_object) + vm_object_unlock(object); +} + +/* Allow manipulation of individual page state. This is actually part of */ +/* the UPL regimen but takes place on the object rather than on a UPL */ + +kern_return_t +memory_object_page_op( + vm_object_t object, + vm_object_offset_t offset, + int ops, + vm_offset_t *phys_entry, + int *flags) +{ + vm_page_t dst_page; + + vm_object_lock(object); + + while(TRUE) { + if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) { + vm_object_unlock(object); + return KERN_FAILURE; + } + + /* Sync up on getting the busy bit */ + if((dst_page->busy || dst_page->cleaning) && + (((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { + /* someone else is playing with the page, we will */ + /* have to wait */ + PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT); + vm_object_unlock(object); + thread_block((void(*)(void))0); + vm_object_lock(object); + continue; + } + + if (ops & UPL_POP_DUMP) { + vm_page_lock_queues(); + vm_page_free(dst_page); + vm_page_unlock_queues(); + break; + } + + if (flags) { + *flags = 0; + + /* Get the condition of flags before requested ops */ + /* are undertaken */ + + if(dst_page->dirty) *flags |= UPL_POP_DIRTY; + if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT; + if(dst_page->precious) *flags |= UPL_POP_PRECIOUS; + if(dst_page->absent) *flags |= UPL_POP_ABSENT; + if(dst_page->busy) *flags |= UPL_POP_BUSY; + } + if (phys_entry) + *phys_entry = dst_page->phys_addr; + + /* The caller should have made a call either contingent with */ + /* or prior to this call to set UPL_POP_BUSY */ + if(ops & UPL_POP_SET) { + /* The protection granted with this assert will */ + /* not be complete. If the caller violates the */ + /* convention and attempts to change page state */ + /* without first setting busy we may not see it */ + /* because the page may already be busy. However */ + /* if such violations occur we will assert sooner */ + /* or later. */ + assert(dst_page->busy || (ops & UPL_POP_BUSY)); + if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE; + if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE; + if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE; + if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE; + if (ops & UPL_POP_BUSY) dst_page->busy = TRUE; + } + + if(ops & UPL_POP_CLR) { + assert(dst_page->busy); + if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE; + if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE; + if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE; + if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE; + if (ops & UPL_POP_BUSY) { + dst_page->busy = FALSE; + PAGE_WAKEUP(dst_page); + } + } + break; + } + + vm_object_unlock(object); + return KERN_SUCCESS; + +} + + diff --git a/osfmk/vm/memory_object.h b/osfmk/vm/memory_object.h new file mode 100644 index 000000000..d5e897ef0 --- /dev/null +++ b/osfmk/vm/memory_object.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ +/* + */ + +#ifndef _VM_MEMORY_OBJECT_H_ +#define _VM_MEMORY_OBJECT_H_ + +#include + +#include +#include + +extern kern_return_t memory_object_set_attributes_common( + vm_object_t object, + boolean_t may_cache, + memory_object_copy_strategy_t copy_strategy, + boolean_t temporary, + vm_size_t cluster_size, + boolean_t silent_overwrite, + boolean_t advisory_pageout); + +extern boolean_t memory_object_sync ( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + boolean_t should_flush, + boolean_t should_return); + +extern ipc_port_t memory_manager_default_reference( + vm_size_t *cluster_size); + +extern boolean_t memory_manager_default_port(ipc_port_t port); + +extern kern_return_t memory_manager_default_check(void); + +extern void memory_manager_default_init(void); + + +extern kern_return_t memory_object_free_from_cache( + host_t host, + int pager_id, + int *count); + +extern kern_return_t memory_object_remove_cached_object( + ipc_port_t port); + +extern void memory_object_deactivate_pages( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + boolean_t kill_page); + +#endif /* _VM_MEMORY_OBJECT_H_ */ diff --git a/osfmk/vm/pmap.h b/osfmk/vm/pmap.h new file mode 100644 index 000000000..4dda94445 --- /dev/null +++ b/osfmk/vm/pmap.h @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/pmap.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * Machine address mapping definitions -- machine-independent + * section. [For machine-dependent section, see "machine/pmap.h".] + */ + +#ifndef _VM_PMAP_H_ +#define _VM_PMAP_H_ + +#include +#include +#include +#include +#include +#include + +/* + * The following is a description of the interface to the + * machine-dependent "physical map" data structure. The module + * must provide a "pmap_t" data type that represents the + * set of valid virtual-to-physical addresses for one user + * address space. [The kernel address space is represented + * by a distinguished "pmap_t".] The routines described manage + * this type, install and update virtual-to-physical mappings, + * and perform operations on physical addresses common to + * many address spaces. + */ + +#ifndef MACH_KERNEL_PRIVATE + +typedef void *pmap_t; + +#else /* MACH_KERNEL_PRIVATE */ + +typedef struct pmap *pmap_t; + +#include + +/* + * Routines used for initialization. + * There is traditionally also a pmap_bootstrap, + * used very early by machine-dependent code, + * but it is not part of the interface. + */ + +extern vm_offset_t pmap_steal_memory(vm_size_t size); + /* During VM initialization, + * steal a chunk of memory. + */ +extern unsigned int pmap_free_pages(void); /* During VM initialization, + * report remaining unused + * physical pages. + */ +extern void pmap_startup( + vm_offset_t *startp, + vm_offset_t *endp); + /* During VM initialization, + * use remaining physical pages + * to allocate page frames. + */ +extern void pmap_init(void); /* Initialization, + * after kernel runs + * in virtual memory. + */ + +#ifndef MACHINE_PAGES +/* + * If machine/pmap.h defines MACHINE_PAGES, it must implement + * the above functions. The pmap module has complete control. + * Otherwise, it must implement + * pmap_free_pages + * pmap_virtual_space + * pmap_next_page + * pmap_init + * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup + * using pmap_free_pages, pmap_next_page, pmap_virtual_space, + * and pmap_enter. pmap_free_pages may over-estimate the number + * of unused physical pages, and pmap_next_page may return FALSE + * to indicate that there are no more unused pages to return. + * However, for best performance pmap_free_pages should be accurate. + */ + +extern boolean_t pmap_next_page(vm_offset_t *paddr); + /* During VM initialization, + * return the next unused + * physical page. + */ +extern void pmap_virtual_space( + vm_offset_t *virtual_start, + vm_offset_t *virtual_end); + /* During VM initialization, + * report virtual space + * available for the kernel. + */ +#endif /* MACHINE_PAGES */ + +/* + * Routines to manage the physical map data structure. + */ +extern pmap_t pmap_create(vm_size_t size); /* Create a pmap_t. */ +extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ +extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ +extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ +extern void pmap_switch(pmap_t); + + +extern void pmap_enter( /* Enter a mapping */ + pmap_t pmap, + vm_offset_t v, + vm_offset_t pa, + vm_prot_t prot, + boolean_t wired); + + +/* + * Routines that operate on physical addresses. + */ +extern void pmap_page_protect( /* Restrict access to page. */ + vm_offset_t phys, + vm_prot_t prot); + +extern void (pmap_zero_page)( + vm_offset_t phys); + +extern void (pmap_zero_part_page)( + vm_offset_t p, + vm_offset_t offset, + vm_size_t len); + +extern void (pmap_copy_page)( + vm_offset_t src, + vm_offset_t dest); + +extern void (pmap_copy_part_page)( + vm_offset_t src, + vm_offset_t src_offset, + vm_offset_t dst, + vm_offset_t dst_offset, + vm_size_t len); + +extern void (pmap_copy_part_lpage)( + vm_offset_t src, + vm_offset_t dst, + vm_offset_t dst_offset, + vm_size_t len); + +extern void (pmap_copy_part_rpage)( + vm_offset_t src, + vm_offset_t src_offset, + vm_offset_t dst, + vm_size_t len); + +/* + * debug/assertions. pmap_verify_free returns true iff + * the given physical page is mapped into no pmap. + */ +extern boolean_t pmap_verify_free(vm_offset_t paddr); + +/* + * Statistics routines + */ +extern int (pmap_resident_count)(pmap_t pmap); + +/* + * Sundry required (internal) routines + */ +extern void pmap_collect(pmap_t pmap);/* Perform garbage + * collection, if any */ + + +extern vm_offset_t (pmap_phys_address)( /* Transform address returned + * by device driver mapping + * function to physical address + * known to this module. */ + int frame); + +extern int (pmap_phys_to_frame)( /* Inverse of pmap_phys_addess, + * for use by device driver + * mapping function in + * machine-independent + * pseudo-devices. */ + vm_offset_t phys); + +/* + * Optional routines + */ +extern void (pmap_copy)( /* Copy range of mappings, + * if desired. */ + pmap_t dest, + pmap_t source, + vm_offset_t dest_va, + vm_size_t size, + vm_offset_t source_va); + +extern kern_return_t (pmap_attribute)( /* Get/Set special memory + * attributes */ + pmap_t pmap, + vm_offset_t va, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value); + +/* + * Routines defined as macros. + */ +#ifndef PMAP_ACTIVATE_USER +#define PMAP_ACTIVATE_USER(act, cpu) { \ + pmap_t pmap; \ + \ + pmap = (act)->map->pmap; \ + if (pmap != pmap_kernel()) \ + PMAP_ACTIVATE(pmap, (act), (cpu)); \ +} +#endif /* PMAP_ACTIVATE_USER */ + +#ifndef PMAP_DEACTIVATE_USER +#define PMAP_DEACTIVATE_USER(act, cpu) { \ + pmap_t pmap; \ + \ + pmap = (act)->map->pmap; \ + if ((pmap) != pmap_kernel()) \ + PMAP_DEACTIVATE(pmap, (act), (cpu)); \ +} +#endif /* PMAP_DEACTIVATE_USER */ + +#ifndef PMAP_ACTIVATE_KERNEL +#define PMAP_ACTIVATE_KERNEL(cpu) \ + PMAP_ACTIVATE(pmap_kernel(), THR_ACT_NULL, cpu) +#endif /* PMAP_ACTIVATE_KERNEL */ + +#ifndef PMAP_DEACTIVATE_KERNEL +#define PMAP_DEACTIVATE_KERNEL(cpu) \ + PMAP_DEACTIVATE(pmap_kernel(), THR_ACT_NULL, cpu) +#endif /* PMAP_DEACTIVATE_KERNEL */ + +#ifndef PMAP_ENTER +/* + * Macro to be used in place of pmap_enter() + */ +#define PMAP_ENTER(pmap, virtual_address, page, protection, wired) \ + MACRO_BEGIN \ + pmap_enter( \ + (pmap), \ + (virtual_address), \ + (page)->phys_addr, \ + (protection) & ~(page)->page_lock, \ + (wired) \ + ); \ + MACRO_END +#endif /* !PMAP_ENTER */ + +#endif /* MACH_KERNEL_PRIVATE */ + +/* + * JMM - This portion is exported to other kernel components right now, + * but will be pulled back in the future when the needed functionality + * is provided in a cleaner manner. + */ + +#define PMAP_NULL ((pmap_t) 0) + +extern pmap_t kernel_pmap; /* The kernel's map */ +#define pmap_kernel() (kernel_pmap) + +/* + * Routines to manage reference/modify bits based on + * physical addresses, simulating them if not provided + * by the hardware. + */ + /* Clear reference bit */ +extern void pmap_clear_reference(vm_offset_t paddr); + /* Return reference bit */ +extern boolean_t (pmap_is_referenced)(vm_offset_t paddr); + /* Set modify bit */ +extern void pmap_set_modify(vm_offset_t paddr); + /* Clear modify bit */ +extern void pmap_clear_modify(vm_offset_t paddr); + /* Return modify bit */ +extern boolean_t pmap_is_modified(vm_offset_t paddr); + +/* + * Routines that operate on ranges of virtual addresses. + */ +extern void pmap_remove( /* Remove mappings. */ + pmap_t map, + vm_offset_t s, + vm_offset_t e); + +extern void pmap_protect( /* Change protections. */ + pmap_t map, + vm_offset_t s, + vm_offset_t e, + vm_prot_t prot); + +extern void (pmap_pageable)( + pmap_t pmap, + vm_offset_t start, + vm_offset_t end, + boolean_t pageable); + +extern void pmap_modify_pages( /* Set modify bit for pages */ + pmap_t map, + vm_offset_t s, + vm_offset_t e); + +extern vm_offset_t pmap_extract(pmap_t pmap, + vm_offset_t va); + +extern void pmap_change_wiring( /* Specify pageability */ + pmap_t pmap, + vm_offset_t va, + boolean_t wired); +#endif /* _VM_PMAP_H_ */ diff --git a/osfmk/vm/vm_debug.c b/osfmk/vm/vm_debug.c new file mode 100644 index 000000000..bd78d02c4 --- /dev/null +++ b/osfmk/vm/vm_debug.c @@ -0,0 +1,648 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_debug.c. + * Author: Rich Draves + * Date: March, 1990 + * + * Exported kernel calls. See mach_debug/mach_debug.defs. + */ +#include +#include +#include +#include +#include +#include +#include + +#if MACH_VM_DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif + +/* + * Routine: mach_vm_region_info [kernel call] + * Purpose: + * Retrieve information about a VM region, + * including info about the object chain. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Retrieve region/object info. + * KERN_INVALID_TASK The map is null. + * KERN_NO_SPACE There is no entry at/after the address. + * KERN_RESOURCE_SHORTAGE Can't allocate memory. + */ + +kern_return_t +mach_vm_region_info( + vm_map_t map, + vm_offset_t address, + vm_info_region_t *regionp, + vm_info_object_array_t *objectsp, + mach_msg_type_number_t *objectsCntp) +{ +#if !MACH_VM_DEBUG + return KERN_FAILURE; +#else + vm_map_copy_t copy; + vm_offset_t addr; /* memory for OOL data */ + vm_size_t size; /* size of the memory */ + unsigned int room; /* room for this many objects */ + unsigned int used; /* actually this many objects */ + vm_info_region_t region; + kern_return_t kr; + + if (map == VM_MAP_NULL) + return KERN_INVALID_TASK; + + size = 0; /* no memory allocated yet */ + + for (;;) { + vm_map_t cmap; /* current map in traversal */ + vm_map_t nmap; /* next map to look at */ + vm_map_entry_t entry; + vm_object_t object, cobject, nobject; + + /* nothing is locked */ + + vm_map_lock_read(map); + for (cmap = map;; cmap = nmap) { + /* cmap is read-locked */ + + if (!vm_map_lookup_entry(cmap, address, &entry)) { + entry = entry->vme_next; + if (entry == vm_map_to_entry(cmap)) { + vm_map_unlock_read(cmap); + if (size != 0) + kmem_free(ipc_kernel_map, + addr, size); + return KERN_NO_SPACE; + } + } + + if (entry->is_sub_map) + nmap = entry->object.sub_map; + else + break; + + /* move down to the lower map */ + + vm_map_lock_read(nmap); + vm_map_unlock_read(cmap); + } + + /* cmap is read-locked; we have a real entry */ + + object = entry->object.vm_object; + region.vir_start = entry->vme_start; + region.vir_end = entry->vme_end; + region.vir_object = (vm_offset_t) object; + region.vir_offset = entry->offset; + region.vir_needs_copy = entry->needs_copy; + region.vir_protection = entry->protection; + region.vir_max_protection = entry->max_protection; + region.vir_inheritance = entry->inheritance; + region.vir_wired_count = entry->wired_count; + region.vir_user_wired_count = entry->user_wired_count; + + used = 0; + room = size / sizeof(vm_info_object_t); + + if (object == VM_OBJECT_NULL) { + vm_map_unlock_read(cmap); + /* no memory needed */ + break; + } + + vm_object_lock(object); + vm_map_unlock_read(cmap); + + for (cobject = object;; cobject = nobject) { + /* cobject is locked */ + + if (used < room) { + vm_info_object_t *vio = + &((vm_info_object_t *) addr)[used]; + + vio->vio_object = + (vm_offset_t) cobject; + vio->vio_size = + cobject->size; + vio->vio_ref_count = + cobject->ref_count; + vio->vio_resident_page_count = + cobject->resident_page_count; + vio->vio_absent_count = + cobject->absent_count; + vio->vio_copy = + (vm_offset_t) cobject->copy; + vio->vio_shadow = + (vm_offset_t) cobject->shadow; + vio->vio_shadow_offset = + cobject->shadow_offset; + vio->vio_paging_offset = + cobject->paging_offset; + vio->vio_copy_strategy = + cobject->copy_strategy; + vio->vio_last_alloc = + cobject->last_alloc; + vio->vio_paging_in_progress = + cobject->paging_in_progress; + vio->vio_pager_created = + cobject->pager_created; + vio->vio_pager_initialized = + cobject->pager_initialized; + vio->vio_pager_ready = + cobject->pager_ready; + vio->vio_can_persist = + cobject->can_persist; + vio->vio_internal = + cobject->internal; + vio->vio_temporary = + cobject->temporary; + vio->vio_alive = + cobject->alive; + vio->vio_lock_in_progress = + cobject->lock_in_progress; + vio->vio_lock_restart = + cobject->lock_restart; + } + + used++; + nobject = cobject->shadow; + if (nobject == VM_OBJECT_NULL) { + vm_object_unlock(cobject); + break; + } + + vm_object_lock(nobject); + vm_object_unlock(cobject); + } + + /* nothing locked */ + + if (used <= room) + break; + + /* must allocate more memory */ + + if (size != 0) + kmem_free(ipc_kernel_map, addr, size); + size = round_page(2 * used * sizeof(vm_info_object_t)); + + kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); + if (kr != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + + kr = vm_map_wire(ipc_kernel_map, addr, addr + size, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + assert(kr == KERN_SUCCESS); + } + + /* free excess memory; make remaining memory pageable */ + + if (used == 0) { + copy = VM_MAP_COPY_NULL; + + if (size != 0) + kmem_free(ipc_kernel_map, addr, size); + } else { + vm_size_t size_used = + round_page(used * sizeof(vm_info_object_t)); + + kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_copyin(ipc_kernel_map, addr, size_used, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + if (size != size_used) + kmem_free(ipc_kernel_map, + addr + size_used, size - size_used); + } + + *regionp = region; + *objectsp = (vm_info_object_array_t) copy; + *objectsCntp = used; + return KERN_SUCCESS; +#endif /* MACH_VM_DEBUG */ +} +/* + * Temporary call for 64 bit data path interface transiotion + */ + +kern_return_t +mach_vm_region_info_64( + vm_map_t map, + vm_offset_t address, + vm_info_region_64_t *regionp, + vm_info_object_array_t *objectsp, + mach_msg_type_number_t *objectsCntp) +{ +#if !MACH_VM_DEBUG + return KERN_FAILURE; +#else + vm_map_copy_t copy; + vm_offset_t addr; /* memory for OOL data */ + vm_size_t size; /* size of the memory */ + unsigned int room; /* room for this many objects */ + unsigned int used; /* actually this many objects */ + vm_info_region_64_t region; + kern_return_t kr; + + if (map == VM_MAP_NULL) + return KERN_INVALID_TASK; + + size = 0; /* no memory allocated yet */ + + for (;;) { + vm_map_t cmap; /* current map in traversal */ + vm_map_t nmap; /* next map to look at */ + vm_map_entry_t entry; + vm_object_t object, cobject, nobject; + + /* nothing is locked */ + + vm_map_lock_read(map); + for (cmap = map;; cmap = nmap) { + /* cmap is read-locked */ + + if (!vm_map_lookup_entry(cmap, address, &entry)) { + entry = entry->vme_next; + if (entry == vm_map_to_entry(cmap)) { + vm_map_unlock_read(cmap); + if (size != 0) + kmem_free(ipc_kernel_map, + addr, size); + return KERN_NO_SPACE; + } + } + + if (entry->is_sub_map) + nmap = entry->object.sub_map; + else + break; + + /* move down to the lower map */ + + vm_map_lock_read(nmap); + vm_map_unlock_read(cmap); + } + + /* cmap is read-locked; we have a real entry */ + + object = entry->object.vm_object; + region.vir_start = entry->vme_start; + region.vir_end = entry->vme_end; + region.vir_object = (vm_offset_t) object; + region.vir_offset = entry->offset; + region.vir_needs_copy = entry->needs_copy; + region.vir_protection = entry->protection; + region.vir_max_protection = entry->max_protection; + region.vir_inheritance = entry->inheritance; + region.vir_wired_count = entry->wired_count; + region.vir_user_wired_count = entry->user_wired_count; + + used = 0; + room = size / sizeof(vm_info_object_t); + + if (object == VM_OBJECT_NULL) { + vm_map_unlock_read(cmap); + /* no memory needed */ + break; + } + + vm_object_lock(object); + vm_map_unlock_read(cmap); + + for (cobject = object;; cobject = nobject) { + /* cobject is locked */ + + if (used < room) { + vm_info_object_t *vio = + &((vm_info_object_t *) addr)[used]; + + vio->vio_object = + (vm_offset_t) cobject; + vio->vio_size = + cobject->size; + vio->vio_ref_count = + cobject->ref_count; + vio->vio_resident_page_count = + cobject->resident_page_count; + vio->vio_absent_count = + cobject->absent_count; + vio->vio_copy = + (vm_offset_t) cobject->copy; + vio->vio_shadow = + (vm_offset_t) cobject->shadow; + vio->vio_shadow_offset = + cobject->shadow_offset; + vio->vio_paging_offset = + cobject->paging_offset; + vio->vio_copy_strategy = + cobject->copy_strategy; + vio->vio_last_alloc = + cobject->last_alloc; + vio->vio_paging_in_progress = + cobject->paging_in_progress; + vio->vio_pager_created = + cobject->pager_created; + vio->vio_pager_initialized = + cobject->pager_initialized; + vio->vio_pager_ready = + cobject->pager_ready; + vio->vio_can_persist = + cobject->can_persist; + vio->vio_internal = + cobject->internal; + vio->vio_temporary = + cobject->temporary; + vio->vio_alive = + cobject->alive; + vio->vio_lock_in_progress = + cobject->lock_in_progress; + vio->vio_lock_restart = + cobject->lock_restart; + } + + used++; + nobject = cobject->shadow; + if (nobject == VM_OBJECT_NULL) { + vm_object_unlock(cobject); + break; + } + + vm_object_lock(nobject); + vm_object_unlock(cobject); + } + + /* nothing locked */ + + if (used <= room) + break; + + /* must allocate more memory */ + + if (size != 0) + kmem_free(ipc_kernel_map, addr, size); + size = round_page(2 * used * sizeof(vm_info_object_t)); + + kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); + if (kr != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + + kr = vm_map_wire(ipc_kernel_map, addr, addr + size, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + assert(kr == KERN_SUCCESS); + } + + /* free excess memory; make remaining memory pageable */ + + if (used == 0) { + copy = VM_MAP_COPY_NULL; + + if (size != 0) + kmem_free(ipc_kernel_map, addr, size); + } else { + vm_size_t size_used = + round_page(used * sizeof(vm_info_object_t)); + + kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE); + assert(kr == KERN_SUCCESS); + + kr = vm_map_copyin(ipc_kernel_map, addr, size_used, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + if (size != size_used) + kmem_free(ipc_kernel_map, + addr + size_used, size - size_used); + } + + *regionp = region; + *objectsp = (vm_info_object_array_t) copy; + *objectsCntp = used; + return KERN_SUCCESS; +#endif /* MACH_VM_DEBUG */ +} +/* + * Return an array of virtual pages that are mapped to a task. + */ +kern_return_t +vm_mapped_pages_info( + vm_map_t map, + page_address_array_t *pages, + mach_msg_type_number_t *pages_count) +{ +#if !MACH_VM_DEBUG + return KERN_FAILURE; +#else + pmap_t pmap; + vm_size_t size, size_used; + unsigned int actual, space; + page_address_array_t list; + vm_offset_t addr; + + if (map == VM_MAP_NULL) + return (KERN_INVALID_ARGUMENT); + + pmap = map->pmap; + size = pmap_resident_count(pmap) * sizeof(vm_offset_t); + size = round_page(size); + + for (;;) { + (void) vm_allocate(ipc_kernel_map, &addr, size, TRUE); + (void) vm_map_unwire(ipc_kernel_map, addr, addr + size, FALSE); + + list = (page_address_array_t) addr; + space = size / sizeof(vm_offset_t); + + actual = pmap_list_resident_pages(pmap, + list, + space); + if (actual <= space) + break; + + /* + * Free memory if not enough + */ + (void) kmem_free(ipc_kernel_map, addr, size); + + /* + * Try again, doubling the size + */ + size = round_page(actual * sizeof(vm_offset_t)); + } + if (actual == 0) { + *pages = 0; + *pages_count = 0; + (void) kmem_free(ipc_kernel_map, addr, size); + } + else { + *pages_count = actual; + size_used = round_page(actual * sizeof(vm_offset_t)); + (void) vm_map_wire(ipc_kernel_map, + addr, addr + size, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + (void) vm_map_copyin( + ipc_kernel_map, + addr, + size_used, + TRUE, + (vm_map_copy_t *)pages); + if (size_used != size) { + (void) kmem_free(ipc_kernel_map, + addr + size_used, + size - size_used); + } + } + + return (KERN_SUCCESS); +#endif /* MACH_VM_DEBUG */ +} + +/* + * Routine: host_virtual_physical_table_info + * Purpose: + * Return information about the VP table. + * Conditions: + * Nothing locked. Obeys CountInOut protocol. + * Returns: + * KERN_SUCCESS Returned information. + * KERN_INVALID_HOST The host is null. + * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. + */ + +kern_return_t +host_virtual_physical_table_info( + host_t host, + hash_info_bucket_array_t *infop, + mach_msg_type_number_t *countp) +{ +#if !MACH_VM_DEBUG + return KERN_FAILURE; +#else + vm_offset_t addr; + vm_size_t size; + hash_info_bucket_t *info; + unsigned int potential, actual; + kern_return_t kr; + + if (host == HOST_NULL) + return KERN_INVALID_HOST; + + /* start with in-line data */ + + info = *infop; + potential = *countp; + + for (;;) { + actual = vm_page_info(info, potential); + if (actual <= potential) + break; + + /* allocate more memory */ + + if (info != *infop) + kmem_free(ipc_kernel_map, addr, size); + + size = round_page(actual * sizeof *info); + kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size); + if (kr != KERN_SUCCESS) + return KERN_RESOURCE_SHORTAGE; + + info = (hash_info_bucket_t *) addr; + potential = size/sizeof *info; + } + + if (info == *infop) { + /* data fit in-line; nothing to deallocate */ + + *countp = actual; + } else if (actual == 0) { + kmem_free(ipc_kernel_map, addr, size); + + *countp = 0; + } else { + vm_map_copy_t copy; + vm_size_t used; + + used = round_page(actual * sizeof *info); + + if (used != size) + kmem_free(ipc_kernel_map, addr + used, size - used); + + kr = vm_map_copyin(ipc_kernel_map, addr, used, + TRUE, ©); + assert(kr == KERN_SUCCESS); + + *infop = (hash_info_bucket_t *) copy; + *countp = actual; + } + + return KERN_SUCCESS; +#endif /* MACH_VM_DEBUG */ +} diff --git a/osfmk/vm/vm_debug.h b/osfmk/vm/vm_debug.h new file mode 100644 index 000000000..fee86f23a --- /dev/null +++ b/osfmk/vm/vm_debug.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef VM_VM_DEBUG_H +#define VM_VM_DEBUG_H + +#include +#include +#include +#include +#include + +extern unsigned int vm_page_info( + hash_info_bucket_t *info, + unsigned int count); + +extern int pmap_list_resident_pages( + pmap_t pmap, + vm_offset_t *listp, + int space); + +#endif /* VM_VM_DEBUG_H */ diff --git a/osfmk/vm/vm_external.c b/osfmk/vm/vm_external.c new file mode 100644 index 000000000..f5eca5ba5 --- /dev/null +++ b/osfmk/vm/vm_external.c @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * This module maintains information about the presence of + * pages not in memory. Since an external memory object + * must maintain a complete knowledge of its contents, this + * information takes the form of hints. + */ +#include /* for memcpy()/memset() */ + +#include +#include +#include +#include +#include + +/* + * The implementation uses bit arrays to record whether + * a page has been written to external storage. For + * convenience, these bit arrays come in various sizes. + * For example, a map N bytes long can record: + * + * 16 bytes = 128 pages = (@ 4KB/page) 512KB + * 1024 bytes = 8192 pages = (@ 4KB/page) 32MB + * 4096 bytes = 32768 pages = (@ 4KB/page) 128MB + * + * For a 32-bit machine with 4KB pages, the largest size + * would be 128KB = 32 pages. Machines with a larger page + * size are more efficient. + * + * This subsystem must be very careful about memory allocation, + * since vm_external_create() is almost always called with + * vm_privilege set. The largest map to be allocated must be less + * than or equal to a single page, and the kalloc subsystem must + * never allocate more than a single page in response to a kalloc() + * request. Also, vm_external_destroy() must not take any blocking + * locks, since it is called with a vm_object lock held. This + * implies that kfree() MUST be implemented in terms of zfree() + * NOT kmem_free() for all request sizes that this subsystem uses. + * + * For efficiency, this subsystem knows that the kalloc() subsystem + * is implemented in terms of power-of-2 allocation, and that the + * minimum allocation unit is KALLOC_MINSIZE + * + * XXXO + * Should consider using existence_map to hold bits directly + * when existence_size <= 4 bytes (i.e., 32 pages). + */ + +#define SMALL_SIZE KALLOC_MINSIZE +#define LARGE_SIZE PAGE_SIZE + +static vm_size_t power_of_2(vm_size_t size); + +static vm_size_t +power_of_2(vm_size_t size) +{ + vm_size_t power; + + power = 2 * SMALL_SIZE; + while (power < size) { + power <<= 1; + } + return(power); +} + +vm_external_map_t +vm_external_create( + vm_offset_t size) +{ + vm_size_t bytes; + vm_external_map_t result = VM_EXTERNAL_NULL; + + bytes = stob(size); + if (bytes <= SMALL_SIZE) { + if ((result = (vm_external_map_t)kalloc(SMALL_SIZE)) != NULL) { + memset(result, 0, SMALL_SIZE); + } + } else if (bytes <= LARGE_SIZE) { + bytes = power_of_2(bytes); + + if ((result = (vm_external_map_t)kalloc(bytes)) != NULL) { + memset(result, 0, bytes); + } + } + return(result); +} + +void +vm_external_destroy( + vm_external_map_t map, + vm_size_t size) +{ + vm_size_t bytes; + + if (map == VM_EXTERNAL_NULL) + return; + + bytes = stob(size); + if (bytes <= SMALL_SIZE) { + bytes = SMALL_SIZE; + } else { + bytes = power_of_2(bytes); + } + kfree((vm_offset_t)map, bytes); +} + +/* + * Return the number of bytes needed for a vm_external_map given the + * size of the object to be mapped, i.e. the size of the map that was + * created by vm_external_create. + */ +vm_size_t +vm_external_map_size( + vm_offset_t size) +{ + vm_size_t bytes; + + bytes = stob(size); + if (bytes != 0) + if (bytes <= SMALL_SIZE) { + bytes = SMALL_SIZE; + } else { + bytes = power_of_2(bytes); + } + return bytes; +} + +void +vm_external_copy( + vm_external_map_t old_map, + vm_size_t old_size, + vm_external_map_t new_map) +{ + /* + * Cannot copy non-existent maps + */ + if ((old_map == VM_EXTERNAL_NULL) || (new_map == VM_EXTERNAL_NULL)) + return; + + /* + * Copy old map to new + */ + memcpy(new_map, old_map, stob(old_size)); +} + +boolean_t +vm_external_within( + vm_size_t new_size, + vm_size_t old_size) +{ + vm_size_t new_bytes; + vm_size_t old_bytes; + + assert(new_size >= old_size); + + /* + * "old_bytes" is calculated to be the actual amount of space + * allocated for a map of size "old_size". + */ + old_bytes = stob(old_size); + if (old_bytes <= SMALL_SIZE) old_bytes = SMALL_SIZE; + else if (old_bytes <= LARGE_SIZE) old_bytes = power_of_2(old_bytes); + + /* + * "new_bytes" is the map size required to map the "new_size" object. + * Since the rounding algorithms are the same, we needn't actually + * round up new_bytes to get the correct answer + */ + new_bytes = stob(new_size); + + return(new_bytes <= old_bytes); +} + +vm_external_state_t +_vm_external_state_get( + vm_external_map_t map, + vm_offset_t offset) +{ + unsigned + int bit, byte; + + assert (map != VM_EXTERNAL_NULL); + + bit = atop(offset); + byte = bit >> 3; + if (map[byte] & (1 << (bit & 07))) { + return VM_EXTERNAL_STATE_EXISTS; + } else { + return VM_EXTERNAL_STATE_ABSENT; + } +} + +void +vm_external_state_set( + vm_external_map_t map, + vm_offset_t offset) +{ + unsigned + int bit, byte; + + if (map == VM_EXTERNAL_NULL) + return; + + bit = atop(offset); + byte = bit >> 3; + map[byte] |= (1 << (bit & 07)); +} + +void +vm_external_state_clr( + vm_external_map_t map, + vm_offset_t offset) +{ + unsigned + int bit, byte; + + if (map == VM_EXTERNAL_NULL) + return; + + bit = atop(offset); + byte = bit >> 3; + map[byte] &= ~(1 << (bit & 07)); +} + +void +vm_external_module_initialize(void) +{ +} diff --git a/osfmk/vm/vm_external.h b/osfmk/vm/vm_external.h new file mode 100644 index 000000000..3b189e69e --- /dev/null +++ b/osfmk/vm/vm_external.h @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +#ifndef VM_VM_EXTERNAL_H_ +#define VM_VM_EXTERNAL_H_ + +#include +#include + +/* + * External page management hint technology + * + * The data structure exported by this module maintains + * a (potentially incomplete) map of the pages written + * to external storage for a range of virtual memory. + */ + +typedef char *vm_external_map_t; +#define VM_EXTERNAL_NULL ((char *) 0) + +/* + * The states that may be recorded for a page of external storage. + */ + +typedef int vm_external_state_t; +#define VM_EXTERNAL_STATE_EXISTS 1 +#define VM_EXTERNAL_STATE_UNKNOWN 2 +#define VM_EXTERNAL_STATE_ABSENT 3 + +/* + * Useful macros + */ +#define stob(s) ((atop((s)) + 07) >> 3) + +/* + * Routines exported by this module. + */ + /* Initialize the module */ +extern void vm_external_module_initialize(void); + + +extern vm_external_map_t vm_external_create( + /* Create a vm_external_map_t */ + vm_offset_t size); + +extern void vm_external_destroy( + /* Destroy one */ + vm_external_map_t map, + vm_size_t size); + +extern vm_size_t vm_external_map_size( + /* Return size of map in bytes */ + vm_offset_t size); + +extern void vm_external_copy( + /* Copy one into another */ + vm_external_map_t old_map, + vm_size_t old_size, + vm_external_map_t new_map); + +extern void vm_external_state_set( + /* Set state of a page to + * VM_EXTERNAL_STATE_EXISTS */ + vm_external_map_t map, + vm_offset_t offset); + +extern void vm_external_state_clr( + /* clear page state + */ + vm_external_map_t map, + vm_offset_t offset); + +#define vm_external_state_get(map, offset) \ + (((map) != VM_EXTERNAL_NULL) ? \ + _vm_external_state_get((map), (offset)) : \ + VM_EXTERNAL_STATE_UNKNOWN) + /* Retrieve the state for a + * given page, if known. */ + +extern vm_external_state_t _vm_external_state_get( + /* HIDDEN routine */ + vm_external_map_t map, + vm_offset_t offset); + +boolean_t vm_external_within( + /* Check if new object size + * fits in current map */ + vm_size_t new_size, + vm_size_t old_size); +#endif /* VM_VM_EXTERNAL_H_ */ diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c new file mode 100644 index 000000000..77ec5f18c --- /dev/null +++ b/osfmk/vm/vm_fault.c @@ -0,0 +1,3552 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm_fault.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * Page fault handling module. + */ +#ifdef MACH_BSD +/* remove after component interface available */ +extern int vnode_pager_workaround; +#endif + +#include +#include +#include + +#include +#include +#include /* for error codes */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + /* For memory_object_data_{request,unlock} */ +#include +#include +#include +#include + +#include + +#define VM_FAULT_CLASSIFY 0 +#define VM_FAULT_STATIC_CONFIG 1 + +#define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */ + +int vm_object_absent_max = 50; + +int vm_fault_debug = 0; +boolean_t vm_page_deactivate_behind = TRUE; + +vm_machine_attribute_val_t mv_cache_sync = MATTR_VAL_CACHE_SYNC; + +#if !VM_FAULT_STATIC_CONFIG +boolean_t vm_fault_dirty_handling = FALSE; +boolean_t vm_fault_interruptible = FALSE; +boolean_t software_reference_bits = TRUE; +#endif + +#if MACH_KDB +extern struct db_watchpoint *db_watchpoint_list; +#endif /* MACH_KDB */ + +/* Forward declarations of internal routines. */ +extern kern_return_t vm_fault_wire_fast( + vm_map_t map, + vm_offset_t va, + vm_map_entry_t entry, + pmap_t pmap); + +extern void vm_fault_continue(void); + +extern void vm_fault_copy_cleanup( + vm_page_t page, + vm_page_t top_page); + +extern void vm_fault_copy_dst_cleanup( + vm_page_t page); + +#if VM_FAULT_CLASSIFY +extern void vm_fault_classify(vm_object_t object, + vm_object_offset_t offset, + vm_prot_t fault_type); + +extern void vm_fault_classify_init(void); +#endif + +/* + * Routine: vm_fault_init + * Purpose: + * Initialize our private data structures. + */ +void +vm_fault_init(void) +{ +} + +/* + * Routine: vm_fault_cleanup + * Purpose: + * Clean up the result of vm_fault_page. + * Results: + * The paging reference for "object" is released. + * "object" is unlocked. + * If "top_page" is not null, "top_page" is + * freed and the paging reference for the object + * containing it is released. + * + * In/out conditions: + * "object" must be locked. + */ +void +vm_fault_cleanup( + register vm_object_t object, + register vm_page_t top_page) +{ + vm_object_paging_end(object); + vm_object_unlock(object); + + if (top_page != VM_PAGE_NULL) { + object = top_page->object; + vm_object_lock(object); + VM_PAGE_FREE(top_page); + vm_object_paging_end(object); + vm_object_unlock(object); + } +} + +#if MACH_CLUSTER_STATS +#define MAXCLUSTERPAGES 16 +struct { + unsigned long pages_in_cluster; + unsigned long pages_at_higher_offsets; + unsigned long pages_at_lower_offsets; +} cluster_stats_in[MAXCLUSTERPAGES]; +#define CLUSTER_STAT(clause) clause +#define CLUSTER_STAT_HIGHER(x) \ + ((cluster_stats_in[(x)].pages_at_higher_offsets)++) +#define CLUSTER_STAT_LOWER(x) \ + ((cluster_stats_in[(x)].pages_at_lower_offsets)++) +#define CLUSTER_STAT_CLUSTER(x) \ + ((cluster_stats_in[(x)].pages_in_cluster)++) +#else /* MACH_CLUSTER_STATS */ +#define CLUSTER_STAT(clause) +#endif /* MACH_CLUSTER_STATS */ + +/* XXX - temporary */ +boolean_t vm_allow_clustered_pagein = FALSE; +int vm_pagein_cluster_used = 0; + +/* + * Prepage default sizes given VM_BEHAVIOR_DEFAULT reference behavior + */ +int vm_default_ahead = 1; /* Number of pages to prepage ahead */ +int vm_default_behind = 0; /* Number of pages to prepage behind */ + +#define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0) + +/* + * Routine: vm_fault_page + * Purpose: + * Find the resident page for the virtual memory + * specified by the given virtual memory object + * and offset. + * Additional arguments: + * The required permissions for the page is given + * in "fault_type". Desired permissions are included + * in "protection". The minimum and maximum valid offsets + * within the object for the relevant map entry are + * passed in "lo_offset" and "hi_offset" respectively and + * the expected page reference pattern is passed in "behavior". + * These three parameters are used to determine pagein cluster + * limits. + * + * If the desired page is known to be resident (for + * example, because it was previously wired down), asserting + * the "unwiring" parameter will speed the search. + * + * If the operation can be interrupted (by thread_abort + * or thread_terminate), then the "interruptible" + * parameter should be asserted. + * + * Results: + * The page containing the proper data is returned + * in "result_page". + * + * In/out conditions: + * The source object must be locked and referenced, + * and must donate one paging reference. The reference + * is not affected. The paging reference and lock are + * consumed. + * + * If the call succeeds, the object in which "result_page" + * resides is left locked and holding a paging reference. + * If this is not the original object, a busy page in the + * original object is returned in "top_page", to prevent other + * callers from pursuing this same data, along with a paging + * reference for the original object. The "top_page" should + * be destroyed when this guarantee is no longer required. + * The "result_page" is also left busy. It is not removed + * from the pageout queues. + */ + +vm_fault_return_t +vm_fault_page( + /* Arguments: */ + vm_object_t first_object, /* Object to begin search */ + vm_object_offset_t first_offset, /* Offset into object */ + vm_prot_t fault_type, /* What access is requested */ + boolean_t must_be_resident,/* Must page be resident? */ + int interruptible, /* how may fault be interrupted? */ + vm_object_offset_t lo_offset, /* Map entry start */ + vm_object_offset_t hi_offset, /* Map entry end */ + vm_behavior_t behavior, /* Page reference behavior */ + /* Modifies in place: */ + vm_prot_t *protection, /* Protection for mapping */ + /* Returns: */ + vm_page_t *result_page, /* Page found, if successful */ + vm_page_t *top_page, /* Page in top object, if + * not result_page. */ + int *type_of_fault, /* if non-null, fill in with type of fault + * COW, zero-fill, etc... returned in trace point */ + /* More arguments: */ + kern_return_t *error_code, /* code if page is in error */ + boolean_t no_zero_fill, /* don't zero fill absent pages */ + boolean_t data_supply) /* treat as data_supply if + * it is a write fault and a full + * page is provided */ +{ + register + vm_page_t m; + register + vm_object_t object; + register + vm_object_offset_t offset; + vm_page_t first_m; + vm_object_t next_object; + vm_object_t copy_object; + boolean_t look_for_page; + vm_prot_t access_required = fault_type; + vm_prot_t wants_copy_flag; + vm_size_t cluster_size, length; + vm_object_offset_t cluster_offset; + vm_object_offset_t cluster_start, cluster_end, paging_offset; + vm_object_offset_t align_offset; + CLUSTER_STAT(int pages_at_higher_offsets;) + CLUSTER_STAT(int pages_at_lower_offsets;) + kern_return_t wait_result; + thread_t cur_thread; + boolean_t interruptible_state; + +#ifdef MACH_BSD + kern_return_t vnode_pager_data_request(ipc_port_t, + ipc_port_t, vm_object_offset_t, vm_size_t, vm_prot_t); +#endif + +#if MACH_PAGEMAP +/* + * MACH page map - an optional optimization where a bit map is maintained + * by the VM subsystem for internal objects to indicate which pages of + * the object currently reside on backing store. This existence map + * duplicates information maintained by the vnode pager. It is + * created at the time of the first pageout against the object, i.e. + * at the same time pager for the object is created. The optimization + * is designed to eliminate pager interaction overhead, if it is + * 'known' that the page does not exist on backing store. + * + * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is + * either marked as paged out in the existence map for the object or no + * existence map exists for the object. LOOK_FOR() is one of the + * criteria in the decision to invoke the pager. It is also used as one + * of the criteria to terminate the scan for adjacent pages in a clustered + * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for + * permanent objects. Note also that if the pager for an internal object + * has not been created, the pager is not invoked regardless of the value + * of LOOK_FOR() and that clustered pagein scans are only done on an object + * for which a pager has been created. + * + * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset + * is marked as paged out in the existence map for the object. PAGED_OUT() + * PAGED_OUT() is used to determine if a page has already been pushed + * into a copy object in order to avoid a redundant page out operation. + */ +#define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \ + != VM_EXTERNAL_STATE_ABSENT) +#define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \ + == VM_EXTERNAL_STATE_EXISTS) +#else /* MACH_PAGEMAP */ +/* + * If the MACH page map optimization is not enabled, + * LOOK_FOR() always evaluates to TRUE. The pager will always be + * invoked to resolve missing pages in an object, assuming the pager + * has been created for the object. In a clustered page operation, the + * absence of a page on backing backing store cannot be used to terminate + * a scan for adjacent pages since that information is available only in + * the pager. Hence pages that may not be paged out are potentially + * included in a clustered request. The vnode pager is coded to deal + * with any combination of absent/present pages in a clustered + * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager + * will always be invoked to push a dirty page into a copy object assuming + * a pager has been created. If the page has already been pushed, the + * pager will ingore the new request. + */ +#define LOOK_FOR(o, f) TRUE +#define PAGED_OUT(o, f) FALSE +#endif /* MACH_PAGEMAP */ + +/* + * Recovery actions + */ +#define PREPARE_RELEASE_PAGE(m) \ + MACRO_BEGIN \ + vm_page_lock_queues(); \ + MACRO_END + +#define DO_RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PAGE_WAKEUP_DONE(m); \ + if (!m->active && !m->inactive) \ + vm_page_activate(m); \ + vm_page_unlock_queues(); \ + MACRO_END + +#define RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PREPARE_RELEASE_PAGE(m); \ + DO_RELEASE_PAGE(m); \ + MACRO_END + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */ +#endif + + + +#if !VM_FAULT_STATIC_CONFIG + if (vm_fault_dirty_handling +#if MACH_KDB + /* + * If there are watchpoints set, then + * we don't want to give away write permission + * on a read fault. Make the task write fault, + * so that the watchpoint code notices the access. + */ + || db_watchpoint_list +#endif /* MACH_KDB */ + ) { + /* + * If we aren't asking for write permission, + * then don't give it away. We're using write + * faults to set the dirty bit. + */ + if (!(fault_type & VM_PROT_WRITE)) + *protection &= ~VM_PROT_WRITE; + } + + if (!vm_fault_interruptible) + interruptible = THREAD_UNINT; +#else /* STATIC_CONFIG */ +#if MACH_KDB + /* + * If there are watchpoints set, then + * we don't want to give away write permission + * on a read fault. Make the task write fault, + * so that the watchpoint code notices the access. + */ + if (db_watchpoint_list) { + /* + * If we aren't asking for write permission, + * then don't give it away. We're using write + * faults to set the dirty bit. + */ + if (!(fault_type & VM_PROT_WRITE)) + *protection &= ~VM_PROT_WRITE; + } + +#endif /* MACH_KDB */ +#endif /* STATIC_CONFIG */ + + cur_thread = current_thread(); + + interruptible_state = cur_thread->interruptible; + if (interruptible == THREAD_UNINT) + cur_thread->interruptible = FALSE; + + /* + * INVARIANTS (through entire routine): + * + * 1) At all times, we must either have the object + * lock or a busy page in some object to prevent + * some other thread from trying to bring in + * the same page. + * + * Note that we cannot hold any locks during the + * pager access or when waiting for memory, so + * we use a busy page then. + * + * Note also that we aren't as concerned about more than + * one thread attempting to memory_object_data_unlock + * the same page at once, so we don't hold the page + * as busy then, but do record the highest unlock + * value so far. [Unlock requests may also be delivered + * out of order.] + * + * 2) To prevent another thread from racing us down the + * shadow chain and entering a new page in the top + * object before we do, we must keep a busy page in + * the top object while following the shadow chain. + * + * 3) We must increment paging_in_progress on any object + * for which we have a busy page + * + * 4) We leave busy pages on the pageout queues. + * If the pageout daemon comes across a busy page, + * it will remove the page from the pageout queues. + */ + + /* + * Search for the page at object/offset. + */ + + object = first_object; + offset = first_offset; + first_m = VM_PAGE_NULL; + access_required = fault_type; + + XPR(XPR_VM_FAULT, + "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n", + (integer_t)object, offset, fault_type, *protection, 0); + + /* + * See whether this page is resident + */ + + while (TRUE) { +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + if (!object->alive) { + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_MEMORY_ERROR); + } + m = vm_page_lookup(object, offset); +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ +#endif + if (m != VM_PAGE_NULL) { + /* + * If the page was pre-paged as part of a + * cluster, record the fact. + */ + if (m->clustered) { + vm_pagein_cluster_used++; + m->clustered = FALSE; + } + + /* + * If the page is being brought in, + * wait for it and then retry. + * + * A possible optimization: if the page + * is known to be resident, we can ignore + * pages that are absent (regardless of + * whether they're busy). + */ + + if (m->busy) { +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + PAGE_ASSERT_WAIT(m, interruptible); + vm_object_unlock(object); + XPR(XPR_VM_FAULT, + "vm_f_page: block busy obj 0x%X, offset 0x%X, page 0x%X\n", + (integer_t)object, offset, + (integer_t)m, 0, 0); + counter(c_vm_fault_page_block_busy_kernel++); + wait_result = thread_block((void (*)(void))0); + + vm_object_lock(object); + if (wait_result != THREAD_AWAKENED) { + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + if (wait_result == THREAD_RESTART) + { + return(VM_FAULT_RETRY); + } + else + { + return(VM_FAULT_INTERRUPTED); + } + } + continue; + } + + /* + * If the page is in error, give up now. + */ + + if (m->error) { +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */ +#endif + if (error_code) + *error_code = m->page_error; + VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_MEMORY_ERROR); + } + + /* + * If the pager wants us to restart + * at the top of the chain, + * typically because it has moved the + * page to another pager, then do so. + */ + + if (m->restart) { +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_RETRY); + } + + /* + * If the page isn't busy, but is absent, + * then it was deemed "unavailable". + */ + + if (m->absent) { + /* + * Remove the non-existent page (unless it's + * in the top object) and move on down to the + * next object (if there is one). + */ +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */ +#endif + + next_object = object->shadow; + if (next_object == VM_OBJECT_NULL) { + vm_page_t real_m; + + assert(!must_be_resident); + + if (object->shadow_severed) { + vm_fault_cleanup( + object, first_m); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_MEMORY_ERROR; + } + + /* + * Absent page at bottom of shadow + * chain; zero fill the page we left + * busy in the first object, and flush + * the absent page. But first we + * need to allocate a real page. + */ + if (VM_PAGE_THROTTLED() || + (real_m = vm_page_grab()) == VM_PAGE_NULL) { + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_MEMORY_SHORTAGE); + } + + XPR(XPR_VM_FAULT, + "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n", + (integer_t)object, offset, + (integer_t)m, + (integer_t)first_object, 0); + if (object != first_object) { + VM_PAGE_FREE(m); + vm_object_paging_end(object); + vm_object_unlock(object); + object = first_object; + offset = first_offset; + m = first_m; + first_m = VM_PAGE_NULL; + vm_object_lock(object); + } + + VM_PAGE_FREE(m); + assert(real_m->busy); + vm_page_insert(real_m, object, offset); + m = real_m; + + /* + * Drop the lock while zero filling + * page. Then break because this + * is the page we wanted. Checking + * the page lock is a waste of time; + * this page was either absent or + * newly allocated -- in both cases + * it can't be page locked by a pager. + */ + if (!no_zero_fill) { + vm_object_unlock(object); + vm_page_zero_fill(m); + if (type_of_fault) + *type_of_fault = DBG_ZERO_FILL_FAULT; + VM_STAT(zero_fill_count++); + vm_object_lock(object); + } + pmap_clear_modify(m->phys_addr); + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(m); + queue_enter(&vm_page_queue_inactive, + m, vm_page_t, pageq); + m->inactive = TRUE; + vm_page_inactive_count++; + vm_page_unlock_queues(); + break; + } else { + if (must_be_resident) { + vm_object_paging_end(object); + } else if (object != first_object) { + vm_object_paging_end(object); + VM_PAGE_FREE(m); + } else { + first_m = m; + m->absent = FALSE; + m->unusual = FALSE; + vm_object_absent_release(object); + m->busy = TRUE; + + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(m); + vm_page_unlock_queues(); + } + XPR(XPR_VM_FAULT, + "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n", + (integer_t)object, offset, + (integer_t)next_object, + offset+object->shadow_offset,0); + offset += object->shadow_offset; + hi_offset += object->shadow_offset; + lo_offset += object->shadow_offset; + access_required = VM_PROT_READ; + vm_object_lock(next_object); + vm_object_unlock(object); + object = next_object; + vm_object_paging_begin(object); + continue; + } + } + + if ((m->cleaning) + && ((object != first_object) || + (object->copy != VM_OBJECT_NULL)) + && (fault_type & VM_PROT_WRITE)) { + /* + * This is a copy-on-write fault that will + * cause us to revoke access to this page, but + * this page is in the process of being cleaned + * in a clustered pageout. We must wait until + * the cleaning operation completes before + * revoking access to the original page, + * otherwise we might attempt to remove a + * wired mapping. + */ +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */ +#endif + XPR(XPR_VM_FAULT, + "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n", + (integer_t)object, offset, + (integer_t)m, 0, 0); + /* take an extra ref so that object won't die */ + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_fault_cleanup(object, first_m); + counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(object); + assert(object->ref_count > 0); + m = vm_page_lookup(object, offset); + if (m != VM_PAGE_NULL && m->cleaning) { + PAGE_ASSERT_WAIT(m, interruptible); + vm_object_unlock(object); + wait_result = thread_block((void (*)(void)) 0); + vm_object_deallocate(object); + goto backoff; + } else { + vm_object_unlock(object); + vm_object_deallocate(object); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_RETRY; + } + } + + /* + * If the desired access to this page has + * been locked out, request that it be unlocked. + */ + + if (access_required & m->page_lock) { + if ((access_required & m->unlock_request) != access_required) { + vm_prot_t new_unlock_request; + kern_return_t rc; + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF000A, (unsigned int) m, (unsigned int) object->pager_ready); /* (TEST/DEBUG) */ +#endif + if (!object->pager_ready) { + XPR(XPR_VM_FAULT, + "vm_f_page: ready wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n", + access_required, + (integer_t)object, offset, + (integer_t)m, 0); + /* take an extra ref */ + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_fault_cleanup(object, + first_m); + counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(object); + assert(object->ref_count > 0); + if (!object->pager_ready) { + vm_object_assert_wait( + object, + VM_OBJECT_EVENT_PAGER_READY, + interruptible); + vm_object_unlock(object); + wait_result = thread_block((void (*)(void))0); + vm_object_deallocate(object); + goto backoff; + } else { + vm_object_unlock(object); + vm_object_deallocate(object); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_RETRY; + } + } + + new_unlock_request = m->unlock_request = + (access_required | m->unlock_request); + vm_object_unlock(object); + XPR(XPR_VM_FAULT, + "vm_f_page: unlock obj 0x%X, offset 0x%X, page 0x%X, unl_req %d\n", + (integer_t)object, offset, + (integer_t)m, new_unlock_request, 0); + if ((rc = memory_object_data_unlock( + object->pager, + object->pager_request, + offset + object->paging_offset, + PAGE_SIZE, + new_unlock_request)) + != KERN_SUCCESS) { + if (vm_fault_debug) + printf("vm_fault: memory_object_data_unlock failed\n"); + vm_object_lock(object); + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return((rc == MACH_SEND_INTERRUPTED) ? + VM_FAULT_INTERRUPTED : + VM_FAULT_MEMORY_ERROR); + } + vm_object_lock(object); + continue; + } + + XPR(XPR_VM_FAULT, + "vm_f_page: access wait acc_req %d, obj 0x%X, offset 0x%X, page 0x%X\n", + access_required, (integer_t)object, + offset, (integer_t)m, 0); + /* take an extra ref so object won't die */ + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_fault_cleanup(object, first_m); + counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(object); + assert(object->ref_count > 0); + m = vm_page_lookup(object, offset); + if (m != VM_PAGE_NULL && + (access_required & m->page_lock) && + !((access_required & m->unlock_request) != access_required)) { + PAGE_ASSERT_WAIT(m, interruptible); + vm_object_unlock(object); + wait_result = thread_block((void (*)(void)) 0); + vm_object_deallocate(object); + goto backoff; + } else { + vm_object_unlock(object); + vm_object_deallocate(object); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_RETRY; + } + } + /* + * We mark the page busy and leave it on + * the pageout queues. If the pageout + * deamon comes across it, then it will + * remove the page. + */ + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + +#if !VM_FAULT_STATIC_CONFIG + if (!software_reference_bits) { + vm_page_lock_queues(); + if (m->inactive) + vm_stat.reactivations++; + + VM_PAGE_QUEUES_REMOVE(m); + vm_page_unlock_queues(); + } +#endif + XPR(XPR_VM_FAULT, + "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n", + (integer_t)object, offset, (integer_t)m, 0, 0); + assert(!m->busy); + m->busy = TRUE; + assert(!m->absent); + break; + } + + look_for_page = + (object->pager_created) && + LOOK_FOR(object, offset) && + (!data_supply); + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */ +#endif + if ((look_for_page || (object == first_object)) + && !must_be_resident) { + /* + * Allocate a new page for this object/offset + * pair. + */ + + m = vm_page_grab_fictitious(); +#if TRACEFAULTPAGE + dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ +#endif + if (m == VM_PAGE_NULL) { + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_FICTITIOUS_SHORTAGE); + } + vm_page_insert(m, object, offset); + } + + if (look_for_page && !must_be_resident) { + kern_return_t rc; + + /* + * If the memory manager is not ready, we + * cannot make requests. + */ + if (!object->pager_ready) { +#if TRACEFAULTPAGE + dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + VM_PAGE_FREE(m); + XPR(XPR_VM_FAULT, + "vm_f_page: ready wait obj 0x%X, offset 0x%X\n", + (integer_t)object, offset, 0, 0, 0); + /* take an extra ref so object won't die */ + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_fault_cleanup(object, first_m); + counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(object); + assert(object->ref_count > 0); + if (!object->pager_ready) { + vm_object_assert_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + interruptible); + vm_object_unlock(object); + wait_result = thread_block((void (*)(void))0); + vm_object_deallocate(object); + goto backoff; + } else { + vm_object_unlock(object); + vm_object_deallocate(object); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_RETRY; + } + } + + if (object->internal) { + /* + * Requests to the default pager + * must reserve a real page in advance, + * because the pager's data-provided + * won't block for pages. IMPORTANT: + * this acts as a throttling mechanism + * for data_requests to the default + * pager. + */ + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF000F, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + if (m->fictitious && !vm_page_convert(m)) { + VM_PAGE_FREE(m); + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_MEMORY_SHORTAGE); + } + } else if (object->absent_count > + vm_object_absent_max) { + /* + * If there are too many outstanding page + * requests pending on this object, we + * wait for them to be resolved now. + */ + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + VM_PAGE_FREE(m); + /* take an extra ref so object won't die */ + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_fault_cleanup(object, first_m); + counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(object); + assert(object->ref_count > 0); + if (object->absent_count > vm_object_absent_max) { + vm_object_absent_assert_wait(object, + interruptible); + vm_object_unlock(object); + wait_result = thread_block((void (*)(void))0); + vm_object_deallocate(object); + goto backoff; + } else { + vm_object_unlock(object); + vm_object_deallocate(object); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_RETRY; + } + } + + /* + * Indicate that the page is waiting for data + * from the memory manager. + */ + + m->list_req_pending = TRUE; + m->absent = TRUE; + m->unusual = TRUE; + object->absent_count++; + + cluster_start = offset; + length = PAGE_SIZE; + cluster_size = object->cluster_size; + + /* + * Skip clustered pagein if it is globally disabled + * or random page reference behavior is expected + * for the address range containing the faulting + * address or the object paging block size is + * equal to the page size. + */ + if (!vm_allow_clustered_pagein || + behavior == VM_BEHAVIOR_RANDOM || + cluster_size == PAGE_SIZE) { + cluster_start = trunc_page_64(cluster_start); + goto no_clustering; + } + + assert(offset >= lo_offset); + assert(offset < hi_offset); + assert(ALIGNED(object->paging_offset)); + assert(cluster_size >= PAGE_SIZE); + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0011, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + /* + * Decide whether to scan ahead or behind for + * additional pages contiguous to the faulted + * page in the same paging block. The decision + * is based on system wide globals and the + * expected page reference behavior of the + * address range contained the faulting address. + * First calculate some constants. + */ + paging_offset = offset + object->paging_offset; + cluster_offset = paging_offset & (cluster_size - 1); + align_offset = paging_offset&(PAGE_SIZE_64-1); + if (align_offset != 0) { + cluster_offset = trunc_page_64(cluster_offset); + } + +#define SPANS_CLUSTER(x) ((((x) - align_offset) & (vm_object_offset_t)(cluster_size - 1)) == 0) + + /* + * Backward scan only if reverse sequential + * behavior has been specified + */ + CLUSTER_STAT(pages_at_lower_offsets = 0;) + if (((vm_default_behind != 0 && + behavior == VM_BEHAVIOR_DEFAULT) || + behavior == VM_BEHAVIOR_RSEQNTL) && offset) { + vm_object_offset_t cluster_bot; + + /* + * Calculate lower search boundary. + * Exclude pages that span a cluster boundary. + * Clip to start of map entry. + * For default page reference behavior, scan + * default pages behind. + */ + cluster_bot = (offset > cluster_offset) ? + offset - cluster_offset : offset; + if (align_offset != 0) { + if ((cluster_bot < offset) && + SPANS_CLUSTER(cluster_bot)) { + cluster_bot += PAGE_SIZE_64; + } + } + if (behavior == VM_BEHAVIOR_DEFAULT) { + vm_object_offset_t + bot = (vm_object_offset_t) + (vm_default_behind * PAGE_SIZE); + + if (cluster_bot < (offset - bot)) + cluster_bot = offset - bot; + } + if (lo_offset > cluster_bot) + cluster_bot = lo_offset; + + for ( cluster_start = offset - PAGE_SIZE_64; + (cluster_start >= cluster_bot) && + (cluster_start != + (align_offset - PAGE_SIZE_64)); + cluster_start -= PAGE_SIZE_64) { + assert(cluster_size > PAGE_SIZE_64); +retry_cluster_backw: + if (!LOOK_FOR(object, cluster_start) || + vm_page_lookup(object, cluster_start) + != VM_PAGE_NULL) { + break; + } + if (object->internal) { + /* + * need to acquire a real page in + * advance because this acts as + * a throttling mechanism for + * data_requests to the default + * pager. If this fails, give up + * trying to find any more pages + * in the cluster and send off the + * request for what we already have. + */ + if ((m = vm_page_grab()) + == VM_PAGE_NULL) { + cluster_start += PAGE_SIZE_64; + cluster_end = offset + PAGE_SIZE_64; + goto give_up; + } + } else if ((m = vm_page_grab_fictitious()) + == VM_PAGE_NULL) { + vm_object_unlock(object); + vm_page_more_fictitious(); + vm_object_lock(object); + goto retry_cluster_backw; + } + m->absent = TRUE; + m->unusual = TRUE; + m->clustered = TRUE; + m->list_req_pending = TRUE; + + vm_page_insert(m, object, cluster_start); + CLUSTER_STAT(pages_at_lower_offsets++;) + object->absent_count++; + } + cluster_start += PAGE_SIZE_64; + assert(cluster_start >= cluster_bot); + } + assert(cluster_start <= offset); + + /* + * Forward scan if default or sequential behavior + * specified + */ + CLUSTER_STAT(pages_at_higher_offsets = 0;) + if ((behavior == VM_BEHAVIOR_DEFAULT && + vm_default_ahead != 0) || + behavior == VM_BEHAVIOR_SEQUENTIAL) { + vm_object_offset_t cluster_top; + + /* + * Calculate upper search boundary. + * Exclude pages that span a cluster boundary. + * Clip to end of map entry. + * For default page reference behavior, scan + * default pages ahead. + */ + cluster_top = (offset + cluster_size) - + cluster_offset; + if (align_offset != 0) { + if ((cluster_top > (offset + PAGE_SIZE_64)) && + SPANS_CLUSTER(cluster_top)) { + cluster_top -= PAGE_SIZE_64; + } + } + if (behavior == VM_BEHAVIOR_DEFAULT) { + vm_object_offset_t top = (vm_object_offset_t) + ((vm_default_ahead*PAGE_SIZE)+PAGE_SIZE); + + if (cluster_top > (offset + top)) + cluster_top = offset + top; + } + if (cluster_top > hi_offset) + cluster_top = hi_offset; + + for (cluster_end = offset + PAGE_SIZE_64; + cluster_end < cluster_top; + cluster_end += PAGE_SIZE_64) { + assert(cluster_size > PAGE_SIZE); +retry_cluster_forw: + if (!LOOK_FOR(object, cluster_end) || + vm_page_lookup(object, cluster_end) + != VM_PAGE_NULL) { + break; + } + if (object->internal) { + /* + * need to acquire a real page in + * advance because this acts as + * a throttling mechanism for + * data_requests to the default + * pager. If this fails, give up + * trying to find any more pages + * in the cluster and send off the + * request for what we already have. + */ + if ((m = vm_page_grab()) + == VM_PAGE_NULL) { + break; + } + } else if ((m = vm_page_grab_fictitious()) + == VM_PAGE_NULL) { + vm_object_unlock(object); + vm_page_more_fictitious(); + vm_object_lock(object); + goto retry_cluster_forw; + } + m->absent = TRUE; + m->unusual = TRUE; + m->clustered = TRUE; + m->list_req_pending = TRUE; + + vm_page_insert(m, object, cluster_end); + CLUSTER_STAT(pages_at_higher_offsets++;) + object->absent_count++; + } + assert(cluster_end <= cluster_top); + } + else { + cluster_end = offset + PAGE_SIZE_64; + } +give_up: + assert(cluster_end >= offset + PAGE_SIZE_64); + length = cluster_end - cluster_start; + +#if MACH_CLUSTER_STATS + CLUSTER_STAT_HIGHER(pages_at_higher_offsets); + CLUSTER_STAT_LOWER(pages_at_lower_offsets); + CLUSTER_STAT_CLUSTER(length/PAGE_SIZE); +#endif /* MACH_CLUSTER_STATS */ + +no_clustering: +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */ +#endif + /* + * We have a busy page, so we can + * release the object lock. + */ + vm_object_unlock(object); + + /* + * Call the memory manager to retrieve the data. + */ + + if (type_of_fault) + *type_of_fault = DBG_PAGEIN_FAULT; + VM_STAT(pageins++); + current_task()->pageins++; + + /* + * If this object uses a copy_call strategy, + * and we are interested in a copy of this object + * (having gotten here only by following a + * shadow chain), then tell the memory manager + * via a flag added to the desired_access + * parameter, so that it can detect a race + * between our walking down the shadow chain + * and its pushing pages up into a copy of + * the object that it manages. + */ + + if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && + object != first_object) { + wants_copy_flag = VM_PROT_WANTS_COPY; + } else { + wants_copy_flag = VM_PROT_NONE; + } + + XPR(XPR_VM_FAULT, + "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n", + (integer_t)object, offset, (integer_t)m, + access_required | wants_copy_flag, 0); + +#ifdef MACH_BSD + if (((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == + ((rpc_subsystem_t) &vnode_pager_workaround)) { + rc = vnode_pager_data_request(object->pager, + object->pager_request, + cluster_start + object->paging_offset, + length, + access_required | wants_copy_flag); + } else { + rc = memory_object_data_request(object->pager, + object->pager_request, + cluster_start + object->paging_offset, + length, + access_required | wants_copy_flag); + } +#else + rc = memory_object_data_request(object->pager, + object->pager_request, + cluster_start + object->paging_offset, + length, + access_required | wants_copy_flag); + +#endif + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */ +#endif + if (rc != KERN_SUCCESS) { + if (rc != MACH_SEND_INTERRUPTED + && vm_fault_debug) + printf("%s(0x%x, 0x%x, 0x%x, 0x%x, 0x%x) failed, rc=%d, object=0x%x\n", + "memory_object_data_request", + object->pager, + object->pager_request, + cluster_start + object->paging_offset, + length, access_required, + rc, object); + /* + * Don't want to leave a busy page around, + * but the data request may have blocked, + * so check if it's still there and busy. + */ + vm_object_lock(object); + for (; length; + length -= PAGE_SIZE, + cluster_start += PAGE_SIZE_64) { + vm_page_t p; + if ((p = vm_page_lookup(object, + cluster_start)) + && p->absent && p->busy + && p != first_m) { + VM_PAGE_FREE(m); + } + } + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return((rc == MACH_SEND_INTERRUPTED) ? + VM_FAULT_INTERRUPTED : + VM_FAULT_MEMORY_ERROR); + } + + /* + * Retry with same object/offset, since new data may + * be in a different page (i.e., m is meaningless at + * this point). + */ + vm_object_lock(object); + if ((interruptible != THREAD_UNINT) && + (current_thread()->state & TH_ABORT)) { + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_INTERRUPTED); + } + continue; + } + + /* + * The only case in which we get here is if + * object has no pager (or unwiring). If the pager doesn't + * have the page this is handled in the m->absent case above + * (and if you change things here you should look above). + */ +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ +#endif + if (object == first_object) + first_m = m; + else + assert(m == VM_PAGE_NULL); + + XPR(XPR_VM_FAULT, + "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n", + (integer_t)object, offset, (integer_t)m, + (integer_t)object->shadow, 0); + /* + * Move on to the next object. Lock the next + * object before unlocking the current one. + */ + next_object = object->shadow; + if (next_object == VM_OBJECT_NULL) { + assert(!must_be_resident); + /* + * If there's no object left, fill the page + * in the top object with zeros. But first we + * need to allocate a real page. + */ + + if (object != first_object) { + vm_object_paging_end(object); + vm_object_unlock(object); + + object = first_object; + offset = first_offset; + vm_object_lock(object); + } + + m = first_m; + assert(m->object == object); + first_m = VM_PAGE_NULL; + + if (object->shadow_severed) { + VM_PAGE_FREE(m); + vm_fault_cleanup(object, VM_PAGE_NULL); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_MEMORY_ERROR; + } + + if (VM_PAGE_THROTTLED() || + (m->fictitious && !vm_page_convert(m))) { + VM_PAGE_FREE(m); + vm_fault_cleanup(object, VM_PAGE_NULL); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_MEMORY_SHORTAGE); + } + + if (!no_zero_fill) { + vm_object_unlock(object); + vm_page_zero_fill(m); + if (type_of_fault) + *type_of_fault = DBG_ZERO_FILL_FAULT; + VM_STAT(zero_fill_count++); + vm_object_lock(object); + } + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(m); + queue_enter(&vm_page_queue_inactive, + m, vm_page_t, pageq); + m->inactive = TRUE; + vm_page_inactive_count++; + vm_page_unlock_queues(); + pmap_clear_modify(m->phys_addr); + break; + } + else { + if ((object != first_object) || must_be_resident) + vm_object_paging_end(object); + offset += object->shadow_offset; + hi_offset += object->shadow_offset; + lo_offset += object->shadow_offset; + access_required = VM_PROT_READ; + vm_object_lock(next_object); + vm_object_unlock(object); + object = next_object; + vm_object_paging_begin(object); + } + } + + /* + * PAGE HAS BEEN FOUND. + * + * This page (m) is: + * busy, so that we can play with it; + * not absent, so that nobody else will fill it; + * possibly eligible for pageout; + * + * The top-level page (first_m) is: + * VM_PAGE_NULL if the page was found in the + * top-level object; + * busy, not absent, and ineligible for pageout. + * + * The current object (object) is locked. A paging + * reference is held for the current and top-level + * objects. + */ + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ +#endif +#if EXTRA_ASSERTIONS + assert(m->busy && !m->absent); + assert((first_m == VM_PAGE_NULL) || + (first_m->busy && !first_m->absent && + !first_m->active && !first_m->inactive)); +#endif /* EXTRA_ASSERTIONS */ + + XPR(XPR_VM_FAULT, + "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n", + (integer_t)object, offset, (integer_t)m, + (integer_t)first_object, (integer_t)first_m); + /* + * If the page is being written, but isn't + * already owned by the top-level object, + * we have to copy it into a new page owned + * by the top-level object. + */ + + if (object != first_object) { + /* + * We only really need to copy if we + * want to write it. + */ + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ +#endif + if (fault_type & VM_PROT_WRITE) { + vm_page_t copy_m; + + assert(!must_be_resident); + + /* + * If we try to collapse first_object at this + * point, we may deadlock when we try to get + * the lock on an intermediate object (since we + * have the bottom object locked). We can't + * unlock the bottom object, because the page + * we found may move (by collapse) if we do. + * + * Instead, we first copy the page. Then, when + * we have no more use for the bottom object, + * we unlock it and try to collapse. + * + * Note that we copy the page even if we didn't + * need to... that's the breaks. + */ + + /* + * Allocate a page for the copy + */ + copy_m = vm_page_grab(); + if (copy_m == VM_PAGE_NULL) { + RELEASE_PAGE(m); + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_MEMORY_SHORTAGE); + } + + + XPR(XPR_VM_FAULT, + "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n", + (integer_t)object, offset, + (integer_t)m, (integer_t)copy_m, 0); + vm_page_copy(m, copy_m); + + /* + * If another map is truly sharing this + * page with us, we have to flush all + * uses of the original page, since we + * can't distinguish those which want the + * original from those which need the + * new copy. + * + * XXXO If we know that only one map has + * access to this page, then we could + * avoid the pmap_page_protect() call. + */ + + vm_page_lock_queues(); + assert(!m->cleaning); + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + vm_page_deactivate(m); + copy_m->dirty = TRUE; + /* + * Setting reference here prevents this fault from + * being counted as a (per-thread) reactivate as well + * as a copy-on-write. + */ + first_m->reference = TRUE; + vm_page_unlock_queues(); + + /* + * We no longer need the old page or object. + */ + + PAGE_WAKEUP_DONE(m); + vm_object_paging_end(object); + vm_object_unlock(object); + + if (type_of_fault) + *type_of_fault = DBG_COW_FAULT; + VM_STAT(cow_faults++); + current_task()->cow_faults++; + object = first_object; + offset = first_offset; + + vm_object_lock(object); + VM_PAGE_FREE(first_m); + first_m = VM_PAGE_NULL; + assert(copy_m->busy); + vm_page_insert(copy_m, object, offset); + m = copy_m; + + /* + * Now that we've gotten the copy out of the + * way, let's try to collapse the top object. + * But we have to play ugly games with + * paging_in_progress to do that... + */ + + vm_object_paging_end(object); + vm_object_collapse(object); + vm_object_paging_begin(object); + + } + else { + *protection &= (~VM_PROT_WRITE); + } + } + + /* + * Now check whether the page needs to be pushed into the + * copy object. The use of asymmetric copy on write for + * shared temporary objects means that we may do two copies to + * satisfy the fault; one above to get the page from a + * shadowed object, and one here to push it into the copy. + */ + + while (first_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY && + (copy_object = first_object->copy) != VM_OBJECT_NULL) { + vm_object_offset_t copy_offset; + vm_page_t copy_m; + +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */ +#endif + /* + * If the page is being written, but hasn't been + * copied to the copy-object, we have to copy it there. + */ + + if ((fault_type & VM_PROT_WRITE) == 0) { + *protection &= ~VM_PROT_WRITE; + break; + } + + /* + * If the page was guaranteed to be resident, + * we must have already performed the copy. + */ + + if (must_be_resident) + break; + + /* + * Try to get the lock on the copy_object. + */ + if (!vm_object_lock_try(copy_object)) { + vm_object_unlock(object); + + mutex_pause(); /* wait a bit */ + + vm_object_lock(object); + continue; + } + + /* + * Make another reference to the copy-object, + * to keep it from disappearing during the + * copy. + */ + assert(copy_object->ref_count > 0); + copy_object->ref_count++; + VM_OBJ_RES_INCR(copy_object); + + /* + * Does the page exist in the copy? + */ + copy_offset = first_offset - copy_object->shadow_offset; + if (copy_object->size <= copy_offset) + /* + * Copy object doesn't cover this page -- do nothing. + */ + ; + else if ((copy_m = + vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { + /* Page currently exists in the copy object */ + if (copy_m->busy) { + /* + * If the page is being brought + * in, wait for it and then retry. + */ + RELEASE_PAGE(m); + /* take an extra ref so object won't die */ + assert(copy_object->ref_count > 0); + copy_object->ref_count++; + vm_object_res_reference(copy_object); + vm_object_unlock(copy_object); + vm_fault_cleanup(object, first_m); + counter(c_vm_fault_page_block_backoff_kernel++); + vm_object_lock(copy_object); + assert(copy_object->ref_count > 0); + VM_OBJ_RES_DECR(copy_object); + copy_object->ref_count--; + assert(copy_object->ref_count > 0); + copy_m = vm_page_lookup(copy_object, copy_offset); + if (copy_m != VM_PAGE_NULL && copy_m->busy) { + PAGE_ASSERT_WAIT(copy_m, interruptible); + vm_object_unlock(copy_object); + wait_result = thread_block((void (*)(void))0); + vm_object_deallocate(copy_object); + goto backoff; + } else { + vm_object_unlock(copy_object); + vm_object_deallocate(copy_object); + cur_thread->interruptible = interruptible_state; + return VM_FAULT_RETRY; + } + } + } + else if (!PAGED_OUT(copy_object, copy_offset)) { + /* + * If PAGED_OUT is TRUE, then the page used to exist + * in the copy-object, and has already been paged out. + * We don't need to repeat this. If PAGED_OUT is + * FALSE, then either we don't know (!pager_created, + * for example) or it hasn't been paged out. + * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT) + * We must copy the page to the copy object. + */ + + /* + * Allocate a page for the copy + */ + copy_m = vm_page_alloc(copy_object, copy_offset); + if (copy_m == VM_PAGE_NULL) { + RELEASE_PAGE(m); + VM_OBJ_RES_DECR(copy_object); + copy_object->ref_count--; + assert(copy_object->ref_count > 0); + vm_object_unlock(copy_object); + vm_fault_cleanup(object, first_m); + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_MEMORY_SHORTAGE); + } + + /* + * Must copy page into copy-object. + */ + + vm_page_copy(m, copy_m); + + /* + * If the old page was in use by any users + * of the copy-object, it must be removed + * from all pmaps. (We can't know which + * pmaps use it.) + */ + + vm_page_lock_queues(); + assert(!m->cleaning); + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + copy_m->dirty = TRUE; + vm_page_unlock_queues(); + + /* + * If there's a pager, then immediately + * page out this page, using the "initialize" + * option. Else, we use the copy. + */ + + if +#if MACH_PAGEMAP + ((!copy_object->pager_created) || + vm_external_state_get( + copy_object->existence_map, copy_offset) + == VM_EXTERNAL_STATE_ABSENT) +#else + (!copy_object->pager_created) +#endif + { + vm_page_lock_queues(); + vm_page_activate(copy_m); + vm_page_unlock_queues(); + PAGE_WAKEUP_DONE(copy_m); + } + else { + assert(copy_m->busy == TRUE); + + /* + * The page is already ready for pageout: + * not on pageout queues and busy. + * Unlock everything except the + * copy_object itself. + */ + + vm_object_unlock(object); + + /* + * Write the page to the copy-object, + * flushing it from the kernel. + */ + + vm_pageout_initialize_page(copy_m); + + /* + * Since the pageout may have + * temporarily dropped the + * copy_object's lock, we + * check whether we'll have + * to deallocate the hard way. + */ + + if ((copy_object->shadow != object) || + (copy_object->ref_count == 1)) { + vm_object_unlock(copy_object); + vm_object_deallocate(copy_object); + vm_object_lock(object); + continue; + } + + /* + * Pick back up the old object's + * lock. [It is safe to do so, + * since it must be deeper in the + * object tree.] + */ + + vm_object_lock(object); + } + + /* + * Because we're pushing a page upward + * in the object tree, we must restart + * any faults that are waiting here. + * [Note that this is an expansion of + * PAGE_WAKEUP that uses the THREAD_RESTART + * wait result]. Can't turn off the page's + * busy bit because we're not done with it. + */ + + if (m->wanted) { + m->wanted = FALSE; + thread_wakeup_with_result((event_t) m, + THREAD_RESTART); + } + } + + /* + * The reference count on copy_object must be + * at least 2: one for our extra reference, + * and at least one from the outside world + * (we checked that when we last locked + * copy_object). + */ + copy_object->ref_count--; + assert(copy_object->ref_count > 0); + VM_OBJ_RES_DECR(copy_object); + vm_object_unlock(copy_object); + + break; + } + + *result_page = m; + *top_page = first_m; + + XPR(XPR_VM_FAULT, + "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n", + (integer_t)object, offset, (integer_t)m, (integer_t)first_m, 0); + /* + * If the page can be written, assume that it will be. + * [Earlier, we restrict the permission to allow write + * access only if the fault so required, so we don't + * mark read-only data as dirty.] + */ + +#if !VM_FAULT_STATIC_CONFIG + if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE)) + m->dirty = TRUE; +#endif +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_page_deactivate_behind); /* (TEST/DEBUG) */ +#endif + if (vm_page_deactivate_behind) { + if (offset && /* don't underflow */ + (object->last_alloc == (offset - PAGE_SIZE_64))) { + m = vm_page_lookup(object, object->last_alloc); + if ((m != VM_PAGE_NULL) && !m->busy) { + vm_page_lock_queues(); + vm_page_deactivate(m); + vm_page_unlock_queues(); + } +#if TRACEFAULTPAGE + dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ +#endif + } + object->last_alloc = offset; + } +#if TRACEFAULTPAGE + dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ +#endif + cur_thread->interruptible = interruptible_state; + return(VM_FAULT_SUCCESS); + +#if 0 + block_and_backoff: + vm_fault_cleanup(object, first_m); + + counter(c_vm_fault_page_block_backoff_kernel++); + thread_block((void (*)(void))0); +#endif + + backoff: + cur_thread->interruptible = interruptible_state; + if (wait_result == THREAD_INTERRUPTED) + return VM_FAULT_INTERRUPTED; + return VM_FAULT_RETRY; + +#undef RELEASE_PAGE +} + +/* + * Routine: vm_fault + * Purpose: + * Handle page faults, including pseudo-faults + * used to change the wiring status of pages. + * Returns: + * Explicit continuations have been removed. + * Implementation: + * vm_fault and vm_fault_page save mucho state + * in the moral equivalent of a closure. The state + * structure is allocated when first entering vm_fault + * and deallocated when leaving vm_fault. + */ + +kern_return_t +vm_fault( + vm_map_t map, + vm_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + int interruptible) +{ + vm_map_version_t version; /* Map version for verificiation */ + boolean_t wired; /* Should mapping be wired down? */ + vm_object_t object; /* Top-level object */ + vm_object_offset_t offset; /* Top-level offset */ + vm_prot_t prot; /* Protection for mapping */ + vm_behavior_t behavior; /* Expected paging behavior */ + vm_object_offset_t lo_offset, hi_offset; + vm_object_t old_copy_object; /* Saved copy object */ + vm_page_t result_page; /* Result of vm_fault_page */ + vm_page_t top_page; /* Placeholder page */ + kern_return_t kr; + + register + vm_page_t m; /* Fast access to result_page */ + kern_return_t error_code; /* page error reasons */ + register + vm_object_t cur_object; + register + vm_object_offset_t cur_offset; + vm_page_t cur_m; + vm_object_t new_object; + int type_of_fault; + vm_map_t pmap_map = map; + vm_map_t original_map = map; + pmap_t pmap = NULL; + boolean_t funnel_set = FALSE; + funnel_t *curflock; + thread_t cur_thread; + boolean_t interruptible_state; + + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_START, + vaddr, + 0, + 0, + 0, + 0); + + cur_thread = current_thread(); + + interruptible_state = cur_thread->interruptible; + if (interruptible == THREAD_UNINT) + cur_thread->interruptible = FALSE; + + /* + * assume we will hit a page in the cache + * otherwise, explicitly override with + * the real fault type once we determine it + */ + type_of_fault = DBG_CACHE_HIT_FAULT; + + VM_STAT(faults++); + current_task()->faults++; + + /* + * drop funnel if it is already held. Then restore while returning + */ + if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) { + funnel_set = TRUE; + curflock = cur_thread->funnel_lock; + thread_funnel_set( curflock , FALSE); + } + + RetryFault: ; + + /* + * Find the backing store object and offset into + * it to begin the search. + */ + map = original_map; + vm_map_lock_read(map); + kr = vm_map_lookup_locked(&map, vaddr, fault_type, &version, + &object, &offset, + &prot, &wired, + &behavior, &lo_offset, &hi_offset, &pmap_map); + + pmap = pmap_map->pmap; + + if (kr != KERN_SUCCESS) { + vm_map_unlock_read(map); + goto done; + } + + /* + * If the page is wired, we must fault for the current protection + * value, to avoid further faults. + */ + + if (wired) + fault_type = prot | VM_PROT_WRITE; + +#if VM_FAULT_CLASSIFY + /* + * Temporary data gathering code + */ + vm_fault_classify(object, offset, fault_type); +#endif + /* + * Fast fault code. The basic idea is to do as much as + * possible while holding the map lock and object locks. + * Busy pages are not used until the object lock has to + * be dropped to do something (copy, zero fill, pmap enter). + * Similarly, paging references aren't acquired until that + * point, and object references aren't used. + * + * If we can figure out what to do + * (zero fill, copy on write, pmap enter) while holding + * the locks, then it gets done. Otherwise, we give up, + * and use the original fault path (which doesn't hold + * the map lock, and relies on busy pages). + * The give up cases include: + * - Have to talk to pager. + * - Page is busy, absent or in error. + * - Pager has locked out desired access. + * - Fault needs to be restarted. + * - Have to push page into copy object. + * + * The code is an infinite loop that moves one level down + * the shadow chain each time. cur_object and cur_offset + * refer to the current object being examined. object and offset + * are the original object from the map. The loop is at the + * top level if and only if object and cur_object are the same. + * + * Invariants: Map lock is held throughout. Lock is held on + * original object and cur_object (if different) when + * continuing or exiting loop. + * + */ + + + /* + * If this page is to be inserted in a copy delay object + * for writing, and if the object has a copy, then the + * copy delay strategy is implemented in the slow fault page. + */ + if (object->copy_strategy != MEMORY_OBJECT_COPY_DELAY || + object->copy == VM_OBJECT_NULL || + (fault_type & VM_PROT_WRITE) == 0) { + cur_object = object; + cur_offset = offset; + + while (TRUE) { + m = vm_page_lookup(cur_object, cur_offset); + if (m != VM_PAGE_NULL) { + if (m->busy) + break; + + if (m->unusual && (m->error || m->restart || + m->absent || (fault_type & m->page_lock))) { + + /* + * Unusual case. Give up. + */ + break; + } + + /* + * Two cases of map in faults: + * - At top level w/o copy object. + * - Read fault anywhere. + * --> must disallow write. + */ + + if (object == cur_object && + object->copy == VM_OBJECT_NULL) + goto FastMapInFault; + + if ((fault_type & VM_PROT_WRITE) == 0) { + + prot &= ~VM_PROT_WRITE; + + /* + * Set up to map the page ... + * mark the page busy, drop + * locks and take a paging reference + * on the object with the page. + */ + + if (object != cur_object) { + vm_object_unlock(object); + object = cur_object; + } +FastMapInFault: + m->busy = TRUE; + + vm_object_paging_begin(object); + vm_object_unlock(object); + +FastPmapEnter: + /* + * Check a couple of global reasons to + * be conservative about write access. + * Then do the pmap_enter. + */ +#if !VM_FAULT_STATIC_CONFIG + if (vm_fault_dirty_handling +#if MACH_KDB + || db_watchpoint_list +#endif + && (fault_type & VM_PROT_WRITE) == 0) + prot &= ~VM_PROT_WRITE; +#else /* STATIC_CONFIG */ +#if MACH_KDB + if (db_watchpoint_list + && (fault_type & VM_PROT_WRITE) == 0) + prot &= ~VM_PROT_WRITE; +#endif /* MACH_KDB */ +#endif /* STATIC_CONFIG */ + PMAP_ENTER(pmap, vaddr, m, prot, wired); + pmap_attribute(pmap, + vaddr, + PAGE_SIZE, + MATTR_CACHE, + &mv_cache_sync); + + if (m->clustered) { + vm_pagein_cluster_used++; + m->clustered = FALSE; + + } + /* + * Grab the object lock to manipulate + * the page queues. Change wiring + * case is obvious. In soft ref bits + * case activate page only if it fell + * off paging queues, otherwise just + * activate it if it's inactive. + * + * NOTE: original vm_fault code will + * move active page to back of active + * queue. This code doesn't. + */ + vm_object_lock(object); + vm_page_lock_queues(); + + m->reference = TRUE; + + if (change_wiring) { + if (wired) + vm_page_wire(m); + else + vm_page_unwire(m); + } +#if VM_FAULT_STATIC_CONFIG + else { + if (!m->active && !m->inactive) + vm_page_activate(m); + } +#else + else if (software_reference_bits) { + if (!m->active && !m->inactive) + vm_page_activate(m); + } + else if (!m->active) { + vm_page_activate(m); + } +#endif + vm_page_unlock_queues(); + + /* + * That's it, clean up and return. + */ + PAGE_WAKEUP_DONE(m); + vm_object_paging_end(object); + vm_object_unlock(object); + vm_map_unlock_read(map); + if(pmap_map != map) + vm_map_unlock(pmap_map); + + if (funnel_set) { + thread_funnel_set( curflock, TRUE); + funnel_set = FALSE; + } + cur_thread->interruptible = interruptible_state; + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END, + vaddr, + type_of_fault, + KERN_SUCCESS, + 0, + 0); + return KERN_SUCCESS; + } + + /* + * Copy on write fault. If objects match, then + * object->copy must not be NULL (else control + * would be in previous code block), and we + * have a potential push into the copy object + * with which we won't cope here. + */ + + if (cur_object == object) + break; + + /* + * This is now a shadow based copy on write + * fault -- it requires a copy up the shadow + * chain. + * + * Allocate a page in the original top level + * object. Give up if allocate fails. Also + * need to remember current page, as it's the + * source of the copy. + */ + cur_m = m; + m = vm_page_grab(); + if (m == VM_PAGE_NULL) { + break; + } + + /* + * Now do the copy. Mark the source busy + * and take out paging references on both + * objects. + * + * NOTE: This code holds the map lock across + * the page copy. + */ + + cur_m->busy = TRUE; + vm_page_copy(cur_m, m); + vm_page_insert(m, object, offset); + + vm_object_paging_begin(cur_object); + vm_object_paging_begin(object); + + type_of_fault = DBG_COW_FAULT; + VM_STAT(cow_faults++); + current_task()->cow_faults++; + + /* + * Now cope with the source page and object + * If the top object has a ref count of 1 + * then no other map can access it, and hence + * it's not necessary to do the pmap_page_protect. + */ + + + vm_page_lock_queues(); + vm_page_deactivate(cur_m); + m->dirty = TRUE; + pmap_page_protect(cur_m->phys_addr, + VM_PROT_NONE); + vm_page_unlock_queues(); + + PAGE_WAKEUP_DONE(cur_m); + vm_object_paging_end(cur_object); + vm_object_unlock(cur_object); + + /* + * Slight hack to call vm_object collapse + * and then reuse common map in code. + * note that the object lock was taken above. + */ + + vm_object_paging_end(object); + vm_object_collapse(object); + vm_object_paging_begin(object); + vm_object_unlock(object); + + goto FastPmapEnter; + } + else { + + /* + * No page at cur_object, cur_offset + */ + + if (cur_object->pager_created) { + + /* + * Have to talk to the pager. Give up. + */ + + break; + } + + + if (cur_object->shadow == VM_OBJECT_NULL) { + + if (cur_object->shadow_severed) { + vm_object_paging_end(object); + vm_object_unlock(object); + vm_map_unlock_read(map); + if(pmap_map != map) + vm_map_unlock(pmap_map); + + if (funnel_set) { + thread_funnel_set( curflock, TRUE); + funnel_set = FALSE; + } + cur_thread->interruptible = interruptible_state; + + return VM_FAULT_MEMORY_ERROR; + } + + /* + * Zero fill fault. Page gets + * filled in top object. Insert + * page, then drop any lower lock. + * Give up if no page. + */ + if ((vm_page_free_target - + ((vm_page_free_target-vm_page_free_min)>>2)) + > vm_page_free_count) { + break; + } + m = vm_page_alloc(object, offset); + if (m == VM_PAGE_NULL) { + break; + } + + if (cur_object != object) + vm_object_unlock(cur_object); + + vm_object_paging_begin(object); + vm_object_unlock(object); + + /* + * Now zero fill page and map it. + * the page is probably going to + * be written soon, so don't bother + * to clear the modified bit + * + * NOTE: This code holds the map + * lock across the zero fill. + */ + + if (!map->no_zero_fill) { + vm_page_zero_fill(m); + type_of_fault = DBG_ZERO_FILL_FAULT; + VM_STAT(zero_fill_count++); + } + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(m); + queue_enter(&vm_page_queue_inactive, + m, vm_page_t, pageq); + m->inactive = TRUE; + vm_page_inactive_count++; + vm_page_unlock_queues(); + goto FastPmapEnter; + } + + /* + * On to the next level + */ + + cur_offset += cur_object->shadow_offset; + new_object = cur_object->shadow; + vm_object_lock(new_object); + if (cur_object != object) + vm_object_unlock(cur_object); + cur_object = new_object; + + continue; + } + } + + /* + * Cleanup from fast fault failure. Drop any object + * lock other than original and drop map lock. + */ + + if (object != cur_object) + vm_object_unlock(cur_object); + } + vm_map_unlock_read(map); + if(pmap_map != map) + vm_map_unlock(pmap_map); + + /* + * Make a reference to this object to + * prevent its disposal while we are messing with + * it. Once we have the reference, the map is free + * to be diddled. Since objects reference their + * shadows (and copies), they will stay around as well. + */ + + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_object_paging_begin(object); + + XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0); + kr = vm_fault_page(object, offset, fault_type, + (change_wiring && !wired), + interruptible, + lo_offset, hi_offset, behavior, + &prot, &result_page, &top_page, + &type_of_fault, + &error_code, map->no_zero_fill, FALSE); + + /* + * If we didn't succeed, lose the object reference immediately. + */ + + if (kr != VM_FAULT_SUCCESS) + vm_object_deallocate(object); + + /* + * See why we failed, and take corrective action. + */ + + switch (kr) { + case VM_FAULT_SUCCESS: + break; + case VM_FAULT_MEMORY_SHORTAGE: + if (vm_page_wait((change_wiring) ? + THREAD_UNINT : + THREAD_ABORTSAFE)) + goto RetryFault; + /* fall thru */ + case VM_FAULT_INTERRUPTED: + kr = KERN_ABORTED; + goto done; + case VM_FAULT_RETRY: + goto RetryFault; + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + goto RetryFault; + case VM_FAULT_MEMORY_ERROR: + if (error_code) + kr = error_code; + else + kr = KERN_MEMORY_ERROR; + goto done; + } + + m = result_page; + + assert((change_wiring && !wired) ? + (top_page == VM_PAGE_NULL) : + ((top_page == VM_PAGE_NULL) == (m->object == object))); + + /* + * How to clean up the result of vm_fault_page. This + * happens whether the mapping is entered or not. + */ + +#define UNLOCK_AND_DEALLOCATE \ + MACRO_BEGIN \ + vm_fault_cleanup(m->object, top_page); \ + vm_object_deallocate(object); \ + MACRO_END + + /* + * What to do with the resulting page from vm_fault_page + * if it doesn't get entered into the physical map: + */ + +#define RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PAGE_WAKEUP_DONE(m); \ + vm_page_lock_queues(); \ + if (!m->active && !m->inactive) \ + vm_page_activate(m); \ + vm_page_unlock_queues(); \ + MACRO_END + + /* + * We must verify that the maps have not changed + * since our last lookup. + */ + + old_copy_object = m->object->copy; + + vm_object_unlock(m->object); + if ((map != original_map) || !vm_map_verify(map, &version)) { + vm_object_t retry_object; + vm_object_offset_t retry_offset; + vm_prot_t retry_prot; + + /* + * To avoid trying to write_lock the map while another + * thread has it read_locked (in vm_map_pageable), we + * do not try for write permission. If the page is + * still writable, we will get write permission. If it + * is not, or has been marked needs_copy, we enter the + * mapping without write permission, and will merely + * take another fault. + */ + map = original_map; + vm_map_lock_read(map); + kr = vm_map_lookup_locked(&map, vaddr, + fault_type & ~VM_PROT_WRITE, &version, + &retry_object, &retry_offset, &retry_prot, + &wired, &behavior, &lo_offset, &hi_offset, + &pmap_map); + pmap = pmap_map->pmap; + + if (kr != KERN_SUCCESS) { + vm_map_unlock_read(map); + vm_object_lock(m->object); + RELEASE_PAGE(m); + UNLOCK_AND_DEALLOCATE; + goto done; + } + + vm_object_unlock(retry_object); + vm_object_lock(m->object); + + if ((retry_object != object) || + (retry_offset != offset)) { + vm_map_unlock_read(map); + if(pmap_map != map) + vm_map_unlock(pmap_map); + RELEASE_PAGE(m); + UNLOCK_AND_DEALLOCATE; + goto RetryFault; + } + + /* + * Check whether the protection has changed or the object + * has been copied while we left the map unlocked. + */ + prot &= retry_prot; + vm_object_unlock(m->object); + } + vm_object_lock(m->object); + + /* + * If the copy object changed while the top-level object + * was unlocked, then we must take away write permission. + */ + + if (m->object->copy != old_copy_object) + prot &= ~VM_PROT_WRITE; + + /* + * If we want to wire down this page, but no longer have + * adequate permissions, we must start all over. + */ + + if (wired && (fault_type != (prot|VM_PROT_WRITE))) { + vm_map_verify_done(map, &version); + if(pmap_map != map) + vm_map_unlock(pmap_map); + RELEASE_PAGE(m); + UNLOCK_AND_DEALLOCATE; + goto RetryFault; + } + + /* + * It's critically important that a wired-down page be faulted + * only once in each map for which it is wired. + */ + vm_object_unlock(m->object); + + /* + * Put this page into the physical map. + * We had to do the unlock above because pmap_enter + * may cause other faults. The page may be on + * the pageout queues. If the pageout daemon comes + * across the page, it will remove it from the queues. + */ + PMAP_ENTER(pmap, vaddr, m, prot, wired); + + /* Sync I & D caches for new mapping*/ + pmap_attribute(pmap, + vaddr, + PAGE_SIZE, + MATTR_CACHE, + &mv_cache_sync); + + /* + * If the page is not wired down and isn't already + * on a pageout queue, then put it where the + * pageout daemon can find it. + */ + vm_object_lock(m->object); + vm_page_lock_queues(); + if (change_wiring) { + if (wired) + vm_page_wire(m); + else + vm_page_unwire(m); + } +#if VM_FAULT_STATIC_CONFIG + else { + if (!m->active && !m->inactive) + vm_page_activate(m); + m->reference = TRUE; + } +#else + else if (software_reference_bits) { + if (!m->active && !m->inactive) + vm_page_activate(m); + m->reference = TRUE; + } else { + vm_page_activate(m); + } +#endif + vm_page_unlock_queues(); + + /* + * Unlock everything, and return + */ + + vm_map_verify_done(map, &version); + if(pmap_map != map) + vm_map_unlock(pmap_map); + PAGE_WAKEUP_DONE(m); + kr = KERN_SUCCESS; + UNLOCK_AND_DEALLOCATE; + +#undef UNLOCK_AND_DEALLOCATE +#undef RELEASE_PAGE + + done: + if (funnel_set) { + thread_funnel_set( curflock, TRUE); + funnel_set = FALSE; + } + cur_thread->interruptible = interruptible_state; + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, 0)) | DBG_FUNC_END, + vaddr, + type_of_fault, + kr, + 0, + 0); + return(kr); +} + +/* + * vm_fault_wire: + * + * Wire down a range of virtual addresses in a map. + */ +kern_return_t +vm_fault_wire( + vm_map_t map, + vm_map_entry_t entry, + pmap_t pmap) +{ + + register vm_offset_t va; + register vm_offset_t end_addr = entry->vme_end; + register kern_return_t rc; + + assert(entry->in_transition); + + /* + * Inform the physical mapping system that the + * range of addresses may not fault, so that + * page tables and such can be locked down as well. + */ + + pmap_pageable(pmap, entry->vme_start, end_addr, FALSE); + + /* + * We simulate a fault to get the page and enter it + * in the physical map. + */ + + for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { + if ((rc = vm_fault_wire_fast( + map, va, entry, pmap)) != KERN_SUCCESS) { + rc = vm_fault(map, va, VM_PROT_NONE, TRUE, + (pmap == kernel_pmap) ? THREAD_UNINT : THREAD_ABORTSAFE); + } + + if (rc != KERN_SUCCESS) { + struct vm_map_entry tmp_entry = *entry; + + /* unwire wired pages */ + tmp_entry.vme_end = va; + vm_fault_unwire(map, &tmp_entry, FALSE, pmap); + + return rc; + } + } + return KERN_SUCCESS; +} + +/* + * vm_fault_unwire: + * + * Unwire a range of virtual addresses in a map. + */ +void +vm_fault_unwire( + vm_map_t map, + vm_map_entry_t entry, + boolean_t deallocate, + pmap_t pmap) +{ + register vm_offset_t va; + register vm_offset_t end_addr = entry->vme_end; + vm_object_t object; + + object = (entry->is_sub_map) + ? VM_OBJECT_NULL : entry->object.vm_object; + + /* + * Since the pages are wired down, we must be able to + * get their mappings from the physical map system. + */ + + for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { + pmap_change_wiring(pmap, va, FALSE); + + if (object == VM_OBJECT_NULL) { + (void) vm_fault(map, va, VM_PROT_NONE, TRUE, THREAD_UNINT); + } else { + vm_prot_t prot; + vm_page_t result_page; + vm_page_t top_page; + vm_object_t result_object; + vm_fault_return_t result; + + do { + prot = VM_PROT_NONE; + + vm_object_lock(object); + vm_object_paging_begin(object); + XPR(XPR_VM_FAULT, + "vm_fault_unwire -> vm_fault_page\n", + 0,0,0,0,0); + result = vm_fault_page(object, + entry->offset + + (va - entry->vme_start), + VM_PROT_NONE, TRUE, + THREAD_UNINT, + entry->offset, + entry->offset + + (entry->vme_end + - entry->vme_start), + entry->behavior, + &prot, + &result_page, + &top_page, + (int *)0, + 0, map->no_zero_fill, + FALSE); + } while (result == VM_FAULT_RETRY); + + if (result != VM_FAULT_SUCCESS) + panic("vm_fault_unwire: failure"); + + result_object = result_page->object; + if (deallocate) { + assert(!result_page->fictitious); + pmap_page_protect(result_page->phys_addr, + VM_PROT_NONE); + VM_PAGE_FREE(result_page); + } else { + vm_page_lock_queues(); + vm_page_unwire(result_page); + vm_page_unlock_queues(); + PAGE_WAKEUP_DONE(result_page); + } + + vm_fault_cleanup(result_object, top_page); + } + } + + /* + * Inform the physical mapping system that the range + * of addresses may fault, so that page tables and + * such may be unwired themselves. + */ + + pmap_pageable(pmap, entry->vme_start, end_addr, TRUE); + +} + +/* + * vm_fault_wire_fast: + * + * Handle common case of a wire down page fault at the given address. + * If successful, the page is inserted into the associated physical map. + * The map entry is passed in to avoid the overhead of a map lookup. + * + * NOTE: the given address should be truncated to the + * proper page address. + * + * KERN_SUCCESS is returned if the page fault is handled; otherwise, + * a standard error specifying why the fault is fatal is returned. + * + * The map in question must be referenced, and remains so. + * Caller has a read lock on the map. + * + * This is a stripped version of vm_fault() for wiring pages. Anything + * other than the common case will return KERN_FAILURE, and the caller + * is expected to call vm_fault(). + */ +kern_return_t +vm_fault_wire_fast( + vm_map_t map, + vm_offset_t va, + vm_map_entry_t entry, + pmap_t pmap) +{ + vm_object_t object; + vm_object_offset_t offset; + register vm_page_t m; + vm_prot_t prot; + thread_act_t thr_act; + + VM_STAT(faults++); + + if((thr_act=current_act()) && (thr_act->task != TASK_NULL)) + thr_act->task->faults++; + +/* + * Recovery actions + */ + +#undef RELEASE_PAGE +#define RELEASE_PAGE(m) { \ + PAGE_WAKEUP_DONE(m); \ + vm_page_lock_queues(); \ + vm_page_unwire(m); \ + vm_page_unlock_queues(); \ +} + + +#undef UNLOCK_THINGS +#define UNLOCK_THINGS { \ + object->paging_in_progress--; \ + vm_object_unlock(object); \ +} + +#undef UNLOCK_AND_DEALLOCATE +#define UNLOCK_AND_DEALLOCATE { \ + UNLOCK_THINGS; \ + vm_object_deallocate(object); \ +} +/* + * Give up and have caller do things the hard way. + */ + +#define GIVE_UP { \ + UNLOCK_AND_DEALLOCATE; \ + return(KERN_FAILURE); \ +} + + + /* + * If this entry is not directly to a vm_object, bail out. + */ + if (entry->is_sub_map) + return(KERN_FAILURE); + + /* + * Find the backing store object and offset into it. + */ + + object = entry->object.vm_object; + offset = (va - entry->vme_start) + entry->offset; + prot = entry->protection; + + /* + * Make a reference to this object to prevent its + * disposal while we are messing with it. + */ + + vm_object_lock(object); + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + object->paging_in_progress++; + + /* + * INVARIANTS (through entire routine): + * + * 1) At all times, we must either have the object + * lock or a busy page in some object to prevent + * some other thread from trying to bring in + * the same page. + * + * 2) Once we have a busy page, we must remove it from + * the pageout queues, so that the pageout daemon + * will not grab it away. + * + */ + + /* + * Look for page in top-level object. If it's not there or + * there's something going on, give up. + */ + m = vm_page_lookup(object, offset); + if ((m == VM_PAGE_NULL) || (m->busy) || + (m->unusual && ( m->error || m->restart || m->absent || + prot & m->page_lock))) { + + GIVE_UP; + } + + /* + * Wire the page down now. All bail outs beyond this + * point must unwire the page. + */ + + vm_page_lock_queues(); + vm_page_wire(m); + vm_page_unlock_queues(); + + /* + * Mark page busy for other threads. + */ + assert(!m->busy); + m->busy = TRUE; + assert(!m->absent); + + /* + * Give up if the page is being written and there's a copy object + */ + if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) { + RELEASE_PAGE(m); + GIVE_UP; + } + + /* + * Put this page into the physical map. + * We have to unlock the object because pmap_enter + * may cause other faults. + */ + vm_object_unlock(object); + + PMAP_ENTER(pmap, va, m, prot, TRUE); + /* Sync I & D caches for new mapping */ + pmap_attribute(pmap, + va, + PAGE_SIZE, + MATTR_CACHE, + &mv_cache_sync); + + /* + * Must relock object so that paging_in_progress can be cleared. + */ + vm_object_lock(object); + + /* + * Unlock everything, and return + */ + + PAGE_WAKEUP_DONE(m); + UNLOCK_AND_DEALLOCATE; + + return(KERN_SUCCESS); + +} + +/* + * Routine: vm_fault_copy_cleanup + * Purpose: + * Release a page used by vm_fault_copy. + */ + +void +vm_fault_copy_cleanup( + vm_page_t page, + vm_page_t top_page) +{ + vm_object_t object = page->object; + + vm_object_lock(object); + PAGE_WAKEUP_DONE(page); + vm_page_lock_queues(); + if (!page->active && !page->inactive) + vm_page_activate(page); + vm_page_unlock_queues(); + vm_fault_cleanup(object, top_page); +} + +void +vm_fault_copy_dst_cleanup( + vm_page_t page) +{ + vm_object_t object; + + if (page != VM_PAGE_NULL) { + object = page->object; + vm_object_lock(object); + vm_page_lock_queues(); + vm_page_unwire(page); + vm_page_unlock_queues(); + vm_object_paging_end(object); + vm_object_unlock(object); + } +} + +/* + * Routine: vm_fault_copy + * + * Purpose: + * Copy pages from one virtual memory object to another -- + * neither the source nor destination pages need be resident. + * + * Before actually copying a page, the version associated with + * the destination address map wil be verified. + * + * In/out conditions: + * The caller must hold a reference, but not a lock, to + * each of the source and destination objects and to the + * destination map. + * + * Results: + * Returns KERN_SUCCESS if no errors were encountered in + * reading or writing the data. Returns KERN_INTERRUPTED if + * the operation was interrupted (only possible if the + * "interruptible" argument is asserted). Other return values + * indicate a permanent error in copying the data. + * + * The actual amount of data copied will be returned in the + * "copy_size" argument. In the event that the destination map + * verification failed, this amount may be less than the amount + * requested. + */ +kern_return_t +vm_fault_copy( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_size_t *src_size, /* INOUT */ + vm_object_t dst_object, + vm_object_offset_t dst_offset, + vm_map_t dst_map, + vm_map_version_t *dst_version, + int interruptible) +{ + vm_page_t result_page; + + vm_page_t src_page; + vm_page_t src_top_page; + vm_prot_t src_prot; + + vm_page_t dst_page; + vm_page_t dst_top_page; + vm_prot_t dst_prot; + + vm_size_t amount_left; + vm_object_t old_copy_object; + kern_return_t error = 0; + + vm_size_t part_size; + + /* + * In order not to confuse the clustered pageins, align + * the different offsets on a page boundary. + */ + vm_object_offset_t src_lo_offset = trunc_page_64(src_offset); + vm_object_offset_t dst_lo_offset = trunc_page_64(dst_offset); + vm_object_offset_t src_hi_offset = round_page_64(src_offset + *src_size); + vm_object_offset_t dst_hi_offset = round_page_64(dst_offset + *src_size); + +#define RETURN(x) \ + MACRO_BEGIN \ + *src_size -= amount_left; \ + MACRO_RETURN(x); \ + MACRO_END + + amount_left = *src_size; + do { /* while (amount_left > 0) */ + /* + * There may be a deadlock if both source and destination + * pages are the same. To avoid this deadlock, the copy must + * start by getting the destination page in order to apply + * COW semantics if any. + */ + + RetryDestinationFault: ; + + dst_prot = VM_PROT_WRITE|VM_PROT_READ; + + vm_object_lock(dst_object); + vm_object_paging_begin(dst_object); + + XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0); + switch (vm_fault_page(dst_object, + trunc_page_64(dst_offset), + VM_PROT_WRITE|VM_PROT_READ, + FALSE, + interruptible, + dst_lo_offset, + dst_hi_offset, + VM_BEHAVIOR_SEQUENTIAL, + &dst_prot, + &dst_page, + &dst_top_page, + (int *)0, + &error, + dst_map->no_zero_fill, + FALSE)) { + case VM_FAULT_SUCCESS: + break; + case VM_FAULT_RETRY: + goto RetryDestinationFault; + case VM_FAULT_MEMORY_SHORTAGE: + if (vm_page_wait(interruptible)) + goto RetryDestinationFault; + /* fall thru */ + case VM_FAULT_INTERRUPTED: + RETURN(MACH_SEND_INTERRUPTED); + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + goto RetryDestinationFault; + case VM_FAULT_MEMORY_ERROR: + if (error) + return (error); + else + return(KERN_MEMORY_ERROR); + } + assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE); + + old_copy_object = dst_page->object->copy; + + /* + * There exists the possiblity that the source and + * destination page are the same. But we can't + * easily determine that now. If they are the + * same, the call to vm_fault_page() for the + * destination page will deadlock. To prevent this we + * wire the page so we can drop busy without having + * the page daemon steal the page. We clean up the + * top page but keep the paging reference on the object + * holding the dest page so it doesn't go away. + */ + + vm_page_lock_queues(); + vm_page_wire(dst_page); + vm_page_unlock_queues(); + PAGE_WAKEUP_DONE(dst_page); + vm_object_unlock(dst_page->object); + + if (dst_top_page != VM_PAGE_NULL) { + vm_object_lock(dst_object); + VM_PAGE_FREE(dst_top_page); + vm_object_paging_end(dst_object); + vm_object_unlock(dst_object); + } + + RetrySourceFault: ; + + if (src_object == VM_OBJECT_NULL) { + /* + * No source object. We will just + * zero-fill the page in dst_object. + */ + src_page = VM_PAGE_NULL; + } else { + vm_object_lock(src_object); + src_page = vm_page_lookup(src_object, + trunc_page_64(src_offset)); + if (src_page == dst_page) + src_prot = dst_prot; + else { + src_prot = VM_PROT_READ; + vm_object_paging_begin(src_object); + + XPR(XPR_VM_FAULT, + "vm_fault_copy(2) -> vm_fault_page\n", + 0,0,0,0,0); + switch (vm_fault_page(src_object, + trunc_page_64(src_offset), + VM_PROT_READ, + FALSE, + interruptible, + src_lo_offset, + src_hi_offset, + VM_BEHAVIOR_SEQUENTIAL, + &src_prot, + &result_page, + &src_top_page, + (int *)0, + &error, + FALSE, + FALSE)) { + + case VM_FAULT_SUCCESS: + break; + case VM_FAULT_RETRY: + goto RetrySourceFault; + case VM_FAULT_MEMORY_SHORTAGE: + if (vm_page_wait(interruptible)) + goto RetrySourceFault; + /* fall thru */ + case VM_FAULT_INTERRUPTED: + vm_fault_copy_dst_cleanup(dst_page); + RETURN(MACH_SEND_INTERRUPTED); + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + goto RetrySourceFault; + case VM_FAULT_MEMORY_ERROR: + vm_fault_copy_dst_cleanup(dst_page); + if (error) + return (error); + else + return(KERN_MEMORY_ERROR); + } + + src_page = result_page; + + assert((src_top_page == VM_PAGE_NULL) == + (src_page->object == src_object)); + } + assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE); + vm_object_unlock(src_page->object); + } + + if (!vm_map_verify(dst_map, dst_version)) { + if (src_page != VM_PAGE_NULL && src_page != dst_page) + vm_fault_copy_cleanup(src_page, src_top_page); + vm_fault_copy_dst_cleanup(dst_page); + break; + } + + vm_object_lock(dst_page->object); + + if (dst_page->object->copy != old_copy_object) { + vm_object_unlock(dst_page->object); + vm_map_verify_done(dst_map, dst_version); + if (src_page != VM_PAGE_NULL && src_page != dst_page) + vm_fault_copy_cleanup(src_page, src_top_page); + vm_fault_copy_dst_cleanup(dst_page); + break; + } + vm_object_unlock(dst_page->object); + + /* + * Copy the page, and note that it is dirty + * immediately. + */ + + if (!page_aligned(src_offset) || + !page_aligned(dst_offset) || + !page_aligned(amount_left)) { + + vm_object_offset_t src_po, + dst_po; + + src_po = src_offset - trunc_page_64(src_offset); + dst_po = dst_offset - trunc_page_64(dst_offset); + + if (dst_po > src_po) { + part_size = PAGE_SIZE - dst_po; + } else { + part_size = PAGE_SIZE - src_po; + } + if (part_size > (amount_left)){ + part_size = amount_left; + } + + if (src_page == VM_PAGE_NULL) { + vm_page_part_zero_fill(dst_page, + dst_po, part_size); + } else { + vm_page_part_copy(src_page, src_po, + dst_page, dst_po, part_size); + if(!dst_page->dirty){ + vm_object_lock(dst_object); + dst_page->dirty = TRUE; + vm_object_unlock(dst_page->object); + } + + } + } else { + part_size = PAGE_SIZE; + + if (src_page == VM_PAGE_NULL) + vm_page_zero_fill(dst_page); + else{ + vm_page_copy(src_page, dst_page); + if(!dst_page->dirty){ + vm_object_lock(dst_object); + dst_page->dirty = TRUE; + vm_object_unlock(dst_page->object); + } + } + + } + + /* + * Unlock everything, and return + */ + + vm_map_verify_done(dst_map, dst_version); + + if (src_page != VM_PAGE_NULL && src_page != dst_page) + vm_fault_copy_cleanup(src_page, src_top_page); + vm_fault_copy_dst_cleanup(dst_page); + + amount_left -= part_size; + src_offset += part_size; + dst_offset += part_size; + } while (amount_left > 0); + + RETURN(KERN_SUCCESS); +#undef RETURN + + /*NOTREACHED*/ +} + +#ifdef notdef + +/* + * Routine: vm_fault_page_overwrite + * + * Description: + * A form of vm_fault_page that assumes that the + * resulting page will be overwritten in its entirety, + * making it unnecessary to obtain the correct *contents* + * of the page. + * + * Implementation: + * XXX Untested. Also unused. Eventually, this technology + * could be used in vm_fault_copy() to advantage. + */ +vm_fault_return_t +vm_fault_page_overwrite( + register + vm_object_t dst_object, + vm_object_offset_t dst_offset, + vm_page_t *result_page) /* OUT */ +{ + register + vm_page_t dst_page; + kern_return_t wait_result; + +#define interruptible THREAD_UNINT /* XXX */ + + while (TRUE) { + /* + * Look for a page at this offset + */ + + while ((dst_page = vm_page_lookup(dst_object, dst_offset)) + == VM_PAGE_NULL) { + /* + * No page, no problem... just allocate one. + */ + + dst_page = vm_page_alloc(dst_object, dst_offset); + if (dst_page == VM_PAGE_NULL) { + vm_object_unlock(dst_object); + VM_PAGE_WAIT(); + vm_object_lock(dst_object); + continue; + } + + /* + * Pretend that the memory manager + * write-protected the page. + * + * Note that we will be asking for write + * permission without asking for the data + * first. + */ + + dst_page->overwriting = TRUE; + dst_page->page_lock = VM_PROT_WRITE; + dst_page->absent = TRUE; + dst_page->unusual = TRUE; + dst_object->absent_count++; + + break; + + /* + * When we bail out, we might have to throw + * away the page created here. + */ + +#define DISCARD_PAGE \ + MACRO_BEGIN \ + vm_object_lock(dst_object); \ + dst_page = vm_page_lookup(dst_object, dst_offset); \ + if ((dst_page != VM_PAGE_NULL) && dst_page->overwriting) \ + VM_PAGE_FREE(dst_page); \ + vm_object_unlock(dst_object); \ + MACRO_END + } + + /* + * If the page is write-protected... + */ + + if (dst_page->page_lock & VM_PROT_WRITE) { + /* + * ... and an unlock request hasn't been sent + */ + + if ( ! (dst_page->unlock_request & VM_PROT_WRITE)) { + vm_prot_t u; + kern_return_t rc; + + /* + * ... then send one now. + */ + + if (!dst_object->pager_ready) { + vm_object_assert_wait(dst_object, + VM_OBJECT_EVENT_PAGER_READY, + interruptible); + vm_object_unlock(dst_object); + wait_result = thread_block((void (*)(void))0); + if (wait_result != THREAD_AWAKENED) { + DISCARD_PAGE; + return(VM_FAULT_INTERRUPTED); + } + continue; + } + + u = dst_page->unlock_request |= VM_PROT_WRITE; + vm_object_unlock(dst_object); + + if ((rc = memory_object_data_unlock( + dst_object->pager, + dst_object->pager_request, + dst_offset + dst_object->paging_offset, + PAGE_SIZE, + u)) != KERN_SUCCESS) { + if (vm_fault_debug) + printf("vm_object_overwrite: memory_object_data_unlock failed\n"); + DISCARD_PAGE; + return((rc == MACH_SEND_INTERRUPTED) ? + VM_FAULT_INTERRUPTED : + VM_FAULT_MEMORY_ERROR); + } + vm_object_lock(dst_object); + continue; + } + + /* ... fall through to wait below */ + } else { + /* + * If the page isn't being used for other + * purposes, then we're done. + */ + if ( ! (dst_page->busy || dst_page->absent || + dst_page->error || dst_page->restart) ) + break; + } + + PAGE_ASSERT_WAIT(dst_page, interruptible); + vm_object_unlock(dst_object); + wait_result = thread_block((void (*)(void))0); + if (wait_result != THREAD_AWAKENED) { + DISCARD_PAGE; + return(VM_FAULT_INTERRUPTED); + } + } + + *result_page = dst_page; + return(VM_FAULT_SUCCESS); + +#undef interruptible +#undef DISCARD_PAGE +} + +#endif /* notdef */ + +#if VM_FAULT_CLASSIFY +/* + * Temporary statistics gathering support. + */ + +/* + * Statistics arrays: + */ +#define VM_FAULT_TYPES_MAX 5 +#define VM_FAULT_LEVEL_MAX 8 + +int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX]; + +#define VM_FAULT_TYPE_ZERO_FILL 0 +#define VM_FAULT_TYPE_MAP_IN 1 +#define VM_FAULT_TYPE_PAGER 2 +#define VM_FAULT_TYPE_COPY 3 +#define VM_FAULT_TYPE_OTHER 4 + + +void +vm_fault_classify(vm_object_t object, + vm_object_offset_t offset, + vm_prot_t fault_type) +{ + int type, level = 0; + vm_page_t m; + + while (TRUE) { + m = vm_page_lookup(object, offset); + if (m != VM_PAGE_NULL) { + if (m->busy || m->error || m->restart || m->absent || + fault_type & m->page_lock) { + type = VM_FAULT_TYPE_OTHER; + break; + } + if (((fault_type & VM_PROT_WRITE) == 0) || + ((level == 0) && object->copy == VM_OBJECT_NULL)) { + type = VM_FAULT_TYPE_MAP_IN; + break; + } + type = VM_FAULT_TYPE_COPY; + break; + } + else { + if (object->pager_created) { + type = VM_FAULT_TYPE_PAGER; + break; + } + if (object->shadow == VM_OBJECT_NULL) { + type = VM_FAULT_TYPE_ZERO_FILL; + break; + } + + offset += object->shadow_offset; + object = object->shadow; + level++; + continue; + } + } + + if (level > VM_FAULT_LEVEL_MAX) + level = VM_FAULT_LEVEL_MAX; + + vm_fault_stats[type][level] += 1; + + return; +} + +/* cleanup routine to call from debugger */ + +void +vm_fault_classify_init(void) +{ + int type, level; + + for (type = 0; type < VM_FAULT_TYPES_MAX; type++) { + for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) { + vm_fault_stats[type][level] = 0; + } + } + + return; +} +#endif /* VM_FAULT_CLASSIFY */ diff --git a/osfmk/vm/vm_fault.h b/osfmk/vm/vm_fault.h new file mode 100644 index 000000000..edab4633b --- /dev/null +++ b/osfmk/vm/vm_fault.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_fault.h + * + * Page fault handling module declarations. + */ + +#ifndef _VM_VM_FAULT_H_ +#define _VM_VM_FAULT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +typedef kern_return_t vm_fault_return_t; +#define VM_FAULT_SUCCESS 0 +#define VM_FAULT_RETRY 1 +#define VM_FAULT_INTERRUPTED 2 +#define VM_FAULT_MEMORY_SHORTAGE 3 +#define VM_FAULT_FICTITIOUS_SHORTAGE 4 +#define VM_FAULT_MEMORY_ERROR 5 + +extern void vm_fault_init(void); + +/* + * Page fault handling based on vm_object only. + */ + +extern vm_fault_return_t vm_fault_page( + /* Arguments: */ + vm_object_t first_object, /* Object to begin search */ + vm_object_offset_t first_offset,/* Offset into object */ + vm_prot_t fault_type, /* What access is requested */ + boolean_t must_be_resident,/* Must page be resident? */ + int interruptible,/* how may fault be interrupted */ + vm_object_offset_t lo_offset, /* Map entry start */ + vm_object_offset_t hi_offset, /* Map entry end */ + vm_behavior_t behavior, /* Expected paging behavior */ + /* Modifies in place: */ + vm_prot_t *protection, /* Protection for mapping */ + /* Returns: */ + vm_page_t *result_page, /* Page found, if successful */ + vm_page_t *top_page, /* Page in top object, if + * not result_page. */ + int *type_of_fault, /* if non-zero, return COW, zero-filled, etc... + * used by kernel trace point in vm_fault */ + /* More arguments: */ + kern_return_t *error_code, /* code if page is in error */ + boolean_t no_zero_fill, /* don't fill absent pages */ + boolean_t data_supply); /* treat as data_supply */ + +extern void vm_fault_cleanup( + vm_object_t object, + vm_page_t top_page); +/* + * Page fault handling based on vm_map (or entries therein) + */ + +extern kern_return_t vm_fault( + vm_map_t map, + vm_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + int interruptible); + +extern kern_return_t vm_fault_wire( + vm_map_t map, + vm_map_entry_t entry, + pmap_t pmap); + +extern void vm_fault_unwire( + vm_map_t map, + vm_map_entry_t entry, + boolean_t deallocate, + pmap_t pmap); + +extern kern_return_t vm_fault_copy( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_size_t *src_size, /* INOUT */ + vm_object_t dst_object, + vm_object_offset_t dst_offset, + vm_map_t dst_map, + vm_map_version_t *dst_version, + int interruptible); + +#endif /* _VM_VM_FAULT_H_ */ diff --git a/osfmk/vm/vm_init.c b/osfmk/vm/vm_init.c new file mode 100644 index 000000000..fc085b3c8 --- /dev/null +++ b/osfmk/vm/vm_init.c @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_init.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Initialize the Virtual Memory subsystem. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ZONE_MAP_MIN (12 * 1024 * 1024) +#define ZONE_MAP_MAX (128 * 1024 * 1024) + +/* + * vm_mem_bootstrap initializes the virtual memory system. + * This is done only by the first cpu up. + */ + +void +vm_mem_bootstrap(void) +{ + vm_offset_t start, end; + vm_size_t zsize; + + /* + * Initializes resident memory structures. + * From here on, all physical memory is accounted for, + * and we use only virtual addresses. + */ + + vm_page_bootstrap(&start, &end); + + /* + * Initialize other VM packages + */ + + zone_bootstrap(); + vm_object_bootstrap(); + vm_map_init(); + kmem_init(start, end); + pmap_init(); + + zsize = mem_size >> 2; /* Get target zone size as 1/4 of physical memory */ + if(zsize < ZONE_MAP_MIN) zsize = ZONE_MAP_MIN; /* Clamp to min */ + if(zsize > ZONE_MAP_MAX) zsize = ZONE_MAP_MAX; /* Clamp to max */ + zone_init(zsize); /* Allocate address space for zones */ + + kalloc_init(); + vm_fault_init(); + vm_page_module_init(); + memory_manager_default_init(); +} + +void +vm_mem_init(void) +{ + vm_object_init(); +} diff --git a/osfmk/vm/vm_init.h b/osfmk/vm/vm_init.h new file mode 100644 index 000000000..98733d845 --- /dev/null +++ b/osfmk/vm/vm_init.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef VM_INIT_H +#define VM_INIT_H + +extern void vm_mem_bootstrap(void); +extern void vm_mem_init(void); +extern void vm_map_steal_memory(void); + +#endif /* VM_INIT_H */ diff --git a/osfmk/vm/vm_kern.c b/osfmk/vm/vm_kern.c new file mode 100644 index 000000000..7d957c220 --- /dev/null +++ b/osfmk/vm/vm_kern.c @@ -0,0 +1,975 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_kern.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Kernel memory management. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +/* + * Variables exported by this module. + */ + +vm_map_t kernel_map; +vm_map_t kernel_pageable_map; + +/* + * Forward declarations for internal functions. + */ +extern kern_return_t kmem_alloc_pages( + register vm_object_t object, + register vm_object_offset_t offset, + register vm_offset_t start, + register vm_offset_t end, + vm_prot_t protection); + +extern void kmem_remap_pages( + register vm_object_t object, + register vm_object_offset_t offset, + register vm_offset_t start, + register vm_offset_t end, + vm_prot_t protection); + +kern_return_t +kmem_alloc_contig( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_offset_t mask, + int flags) +{ + vm_object_t object; + vm_page_t m, pages; + kern_return_t kr; + vm_offset_t addr, i; + vm_object_offset_t offset; + vm_map_entry_t entry; + + if (map == VM_MAP_NULL || (flags && (flags ^ KMA_KOBJECT))) + return KERN_INVALID_ARGUMENT; + + if (size == 0) { + *addrp = 0; + return KERN_INVALID_ARGUMENT; + } + + size = round_page(size); + if ((flags & KMA_KOBJECT) == 0) { + object = vm_object_allocate(size); + kr = vm_map_find_space(map, &addr, size, mask, &entry); + } + else { + object = kernel_object; + kr = vm_map_find_space(map, &addr, size, mask, &entry); + } + + if ((flags & KMA_KOBJECT) == 0) { + entry->object.vm_object = object; + entry->offset = offset = 0; + } else { + offset = addr - VM_MIN_KERNEL_ADDRESS; + + if (entry->object.vm_object == VM_OBJECT_NULL) { + vm_object_reference(object); + entry->object.vm_object = object; + entry->offset = offset; + } + } + + if (kr != KERN_SUCCESS) { + if ((flags & KMA_KOBJECT) == 0) + vm_object_deallocate(object); + return kr; + } + + vm_map_unlock(map); + + kr = cpm_allocate(size, &pages, FALSE); + + if (kr != KERN_SUCCESS) { + vm_map_remove(map, addr, addr + size, 0); + *addrp = 0; + return kr; + } + + vm_object_lock(object); + for (i = 0; i < size; i += PAGE_SIZE) { + m = pages; + pages = NEXT_PAGE(m); + m->busy = FALSE; + vm_page_insert(m, object, offset + i); + } + vm_object_unlock(object); + + if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE)) + != KERN_SUCCESS) { + if (object == kernel_object) { + vm_object_lock(object); + vm_object_page_remove(object, offset, offset + size); + vm_object_unlock(object); + } + vm_map_remove(map, addr, addr + size, 0); + return kr; + } + if (object == kernel_object) + vm_map_simplify(map, addr); + + *addrp = addr; + return KERN_SUCCESS; +} + +/* + * Master entry point for allocating kernel memory. + * NOTE: this routine is _never_ interrupt safe. + * + * map : map to allocate into + * addrp : pointer to start address of new memory + * size : size of memory requested + * flags : options + * KMA_HERE *addrp is base address, else "anywhere" + * KMA_NOPAGEWAIT don't wait for pages if unavailable + * KMA_KOBJECT use kernel_object + */ + +kern_return_t +kernel_memory_allocate( + register vm_map_t map, + register vm_offset_t *addrp, + register vm_size_t size, + register vm_offset_t mask, + int flags) +{ + vm_object_t object = VM_OBJECT_NULL; + vm_map_entry_t entry; + vm_object_offset_t offset; + vm_offset_t addr; + vm_offset_t i; + kern_return_t kr; + + size = round_page(size); + if ((flags & KMA_KOBJECT) == 0) { + /* + * Allocate a new object. We must do this before locking + * the map, or risk deadlock with the default pager: + * device_read_alloc uses kmem_alloc, + * which tries to allocate an object, + * which uses kmem_alloc_wired to get memory, + * which blocks for pages. + * then the default pager needs to read a block + * to process a memory_object_data_write, + * and device_read_alloc calls kmem_alloc + * and deadlocks on the map lock. + */ + object = vm_object_allocate(size); + kr = vm_map_find_space(map, &addr, size, mask, &entry); + } + else { + object = kernel_object; + kr = vm_map_find_space(map, &addr, size, mask, &entry); + } + if (kr != KERN_SUCCESS) { + if ((flags & KMA_KOBJECT) == 0) + vm_object_deallocate(object); + return kr; + } + + if ((flags & KMA_KOBJECT) == 0) { + entry->object.vm_object = object; + entry->offset = offset = 0; + } else { + offset = addr - VM_MIN_KERNEL_ADDRESS; + + if (entry->object.vm_object == VM_OBJECT_NULL) { + vm_object_reference(object); + entry->object.vm_object = object; + entry->offset = offset; + } + } + + /* + * Since we have not given out this address yet, + * it is safe to unlock the map. + */ + vm_map_unlock(map); + + vm_object_lock(object); + for (i = 0; i < size; i += PAGE_SIZE) { + vm_page_t mem; + + while ((mem = vm_page_alloc(object, + offset + (vm_object_offset_t)i)) + == VM_PAGE_NULL) { + if (flags & KMA_NOPAGEWAIT) { + if (object == kernel_object) + vm_object_page_remove(object, offset, + offset + (vm_object_offset_t)i); + vm_object_unlock(object); + vm_map_remove(map, addr, addr + size, 0); + return KERN_RESOURCE_SHORTAGE; + } + vm_object_unlock(object); + VM_PAGE_WAIT(); + vm_object_lock(object); + } + mem->busy = FALSE; + } + vm_object_unlock(object); + + if ((kr = vm_map_wire(map, addr, addr + size, VM_PROT_DEFAULT, FALSE)) + != KERN_SUCCESS) { + if (object == kernel_object) { + vm_object_lock(object); + vm_object_page_remove(object, offset, offset + size); + vm_object_unlock(object); + } + vm_map_remove(map, addr, addr + size, 0); + return (kr); + } + if (object == kernel_object) + vm_map_simplify(map, addr); + + /* + * Return the memory, not zeroed. + */ +#if (NCPUS > 1) && i860 + bzero( addr, size ); +#endif /* #if (NCPUS > 1) && i860 */ + *addrp = addr; + return KERN_SUCCESS; +} + +/* + * kmem_alloc: + * + * Allocate wired-down memory in the kernel's address map + * or a submap. The memory is not zero-filled. + */ + +kern_return_t +kmem_alloc( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) +{ + return kernel_memory_allocate(map, addrp, size, 0, 0); +} + +/* + * kmem_realloc: + * + * Reallocate wired-down memory in the kernel's address map + * or a submap. Newly allocated pages are not zeroed. + * This can only be used on regions allocated with kmem_alloc. + * + * If successful, the pages in the old region are mapped twice. + * The old region is unchanged. Use kmem_free to get rid of it. + */ +kern_return_t +kmem_realloc( + vm_map_t map, + vm_offset_t oldaddr, + vm_size_t oldsize, + vm_offset_t *newaddrp, + vm_size_t newsize) +{ + vm_offset_t oldmin, oldmax; + vm_offset_t newaddr; + vm_object_t object; + vm_map_entry_t oldentry, newentry; + kern_return_t kr; + + oldmin = trunc_page(oldaddr); + oldmax = round_page(oldaddr + oldsize); + oldsize = oldmax - oldmin; + newsize = round_page(newsize); + + /* + * Find space for the new region. + */ + + kr = vm_map_find_space(map, &newaddr, newsize, (vm_offset_t) 0, + &newentry); + if (kr != KERN_SUCCESS) { + return kr; + } + + /* + * Find the VM object backing the old region. + */ + + if (!vm_map_lookup_entry(map, oldmin, &oldentry)) + panic("kmem_realloc"); + object = oldentry->object.vm_object; + + /* + * Increase the size of the object and + * fill in the new region. + */ + + vm_object_reference(object); + vm_object_lock(object); + if (object->size != oldsize) + panic("kmem_realloc"); + object->size = newsize; + vm_object_unlock(object); + + newentry->object.vm_object = object; + newentry->offset = 0; + assert (newentry->wired_count == 0); + newentry->wired_count = 1; + + /* + * Since we have not given out this address yet, + * it is safe to unlock the map. We are trusting + * that nobody will play with either region. + */ + + vm_map_unlock(map); + + /* + * Remap the pages in the old region and + * allocate more pages for the new region. + */ + + kmem_remap_pages(object, 0, + newaddr, newaddr + oldsize, + VM_PROT_DEFAULT); + kmem_alloc_pages(object, oldsize, + newaddr + oldsize, newaddr + newsize, + VM_PROT_DEFAULT); + + *newaddrp = newaddr; + return KERN_SUCCESS; +} + +/* + * kmem_alloc_wired: + * + * Allocate wired-down memory in the kernel's address map + * or a submap. The memory is not zero-filled. + * + * The memory is allocated in the kernel_object. + * It may not be copied with vm_map_copy, and + * it may not be reallocated with kmem_realloc. + */ + +kern_return_t +kmem_alloc_wired( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) +{ + return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT); +} + +/* + * kmem_alloc_aligned: + * + * Like kmem_alloc_wired, except that the memory is aligned. + * The size should be a power-of-2. + */ + +kern_return_t +kmem_alloc_aligned( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) +{ + if ((size & (size - 1)) != 0) + panic("kmem_alloc_aligned: size not aligned"); + return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT); +} + +/* + * kmem_alloc_pageable: + * + * Allocate pageable memory in the kernel's address map. + */ + +kern_return_t +kmem_alloc_pageable( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) +{ + vm_offset_t addr; + kern_return_t kr; + +#ifndef normal + addr = (vm_map_min(map)) + 0x1000; +#else + addr = vm_map_min(map); +#endif + kr = vm_map_enter(map, &addr, round_page(size), + (vm_offset_t) 0, TRUE, + VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) + return kr; + + *addrp = addr; + return KERN_SUCCESS; +} + +/* + * kmem_free: + * + * Release a region of kernel virtual memory allocated + * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable, + * and return the physical pages associated with that region. + */ + +void +kmem_free( + vm_map_t map, + vm_offset_t addr, + vm_size_t size) +{ + kern_return_t kr; + + kr = vm_map_remove(map, trunc_page(addr), + round_page(addr + size), VM_MAP_REMOVE_KUNWIRE); + if (kr != KERN_SUCCESS) + panic("kmem_free"); +} + +/* + * Allocate new wired pages in an object. + * The object is assumed to be mapped into the kernel map or + * a submap. + */ + +kern_return_t +kmem_alloc_pages( + register vm_object_t object, + register vm_object_offset_t offset, + register vm_offset_t start, + register vm_offset_t end, + vm_prot_t protection) +{ + /* + * Mark the pmap region as not pageable. + */ + pmap_pageable(kernel_pmap, start, end, FALSE); + + while (start < end) { + register vm_page_t mem; + + vm_object_lock(object); + + /* + * Allocate a page + */ + while ((mem = vm_page_alloc(object, offset)) + == VM_PAGE_NULL) { + vm_object_unlock(object); + VM_PAGE_WAIT(); + vm_object_lock(object); + } + + /* + * Wire it down + */ + vm_page_lock_queues(); + vm_page_wire(mem); + vm_page_unlock_queues(); + vm_object_unlock(object); + + /* + * Enter it in the kernel pmap + */ + PMAP_ENTER(kernel_pmap, start, mem, + protection, TRUE); + + vm_object_lock(object); + PAGE_WAKEUP_DONE(mem); + vm_object_unlock(object); + + start += PAGE_SIZE; + offset += PAGE_SIZE_64; + } + return KERN_SUCCESS; +} + +/* + * Remap wired pages in an object into a new region. + * The object is assumed to be mapped into the kernel map or + * a submap. + */ +void +kmem_remap_pages( + register vm_object_t object, + register vm_object_offset_t offset, + register vm_offset_t start, + register vm_offset_t end, + vm_prot_t protection) +{ + /* + * Mark the pmap region as not pageable. + */ + pmap_pageable(kernel_pmap, start, end, FALSE); + + while (start < end) { + register vm_page_t mem; + + vm_object_lock(object); + + /* + * Find a page + */ + if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL) + panic("kmem_remap_pages"); + + /* + * Wire it down (again) + */ + vm_page_lock_queues(); + vm_page_wire(mem); + vm_page_unlock_queues(); + vm_object_unlock(object); + + /* + * Enter it in the kernel pmap. The page isn't busy, + * but this shouldn't be a problem because it is wired. + */ + PMAP_ENTER(kernel_pmap, start, mem, + protection, TRUE); + + start += PAGE_SIZE; + offset += PAGE_SIZE; + } +} + +/* + * kmem_suballoc: + * + * Allocates a map to manage a subrange + * of the kernel virtual address space. + * + * Arguments are as follows: + * + * parent Map to take range from + * addr Address of start of range (IN/OUT) + * size Size of range to find + * pageable Can region be paged + * anywhere Can region be located anywhere in map + * new_map Pointer to new submap + */ +kern_return_t +kmem_suballoc( + vm_map_t parent, + vm_offset_t *addr, + vm_size_t size, + boolean_t pageable, + boolean_t anywhere, + vm_map_t *new_map) +{ + vm_map_t map; + kern_return_t kr; + + size = round_page(size); + + /* + * Need reference on submap object because it is internal + * to the vm_system. vm_object_enter will never be called + * on it (usual source of reference for vm_map_enter). + */ + vm_object_reference(vm_submap_object); + + if (anywhere == TRUE) + *addr = (vm_offset_t)vm_map_min(parent); + kr = vm_map_enter(parent, addr, size, + (vm_offset_t) 0, anywhere, + vm_submap_object, (vm_object_offset_t) 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) { + vm_object_deallocate(vm_submap_object); + return (kr); + } + + pmap_reference(vm_map_pmap(parent)); + map = vm_map_create(vm_map_pmap(parent), *addr, *addr + size, pageable); + if (map == VM_MAP_NULL) + panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ + + kr = vm_map_submap(parent, *addr, *addr + size, map, *addr, FALSE); + if (kr != KERN_SUCCESS) { + /* + * See comment preceding vm_map_submap(). + */ + vm_map_remove(parent, *addr, *addr + size, VM_MAP_NO_FLAGS); + vm_map_deallocate(map); /* also removes ref to pmap */ + vm_object_deallocate(vm_submap_object); + return (kr); + } + + *new_map = map; + return (KERN_SUCCESS); +} + +/* + * kmem_init: + * + * Initialize the kernel's virtual memory map, taking + * into account all memory allocated up to this time. + */ +void +kmem_init( + vm_offset_t start, + vm_offset_t end) +{ + kernel_map = vm_map_create(pmap_kernel(), + VM_MIN_KERNEL_ADDRESS, end, + FALSE); + + /* + * Reserve virtual memory allocated up to this time. + */ + + if (start != VM_MIN_KERNEL_ADDRESS) { + vm_offset_t addr = VM_MIN_KERNEL_ADDRESS; + (void) vm_map_enter(kernel_map, + &addr, start - VM_MIN_KERNEL_ADDRESS, + (vm_offset_t) 0, TRUE, + VM_OBJECT_NULL, + (vm_object_offset_t) 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + } + + /* + * Account for kernel memory (text, data, bss, vm shenanigans). + * This may include inaccessible "holes" as determined by what + * the machine-dependent init code includes in mem_size. + */ + vm_page_wire_count = (atop(mem_size) - (vm_page_free_count + + vm_page_active_count + + vm_page_inactive_count)); +} + +/* + * kmem_io_map_copyout: + * + * Establish temporary mapping in designated map for the memory + * passed in. Memory format must be a page_list vm_map_copy. + */ + +kern_return_t +kmem_io_map_copyout( + vm_map_t map, + vm_offset_t *addr, /* actual addr of data */ + vm_size_t *alloc_size, /* size allocated */ + vm_map_copy_t copy, + vm_size_t min_size, /* Do at least this much */ + vm_prot_t prot) /* Protection of mapping */ +{ + vm_offset_t myaddr, offset; + vm_size_t mysize, copy_size; + kern_return_t ret; + register + vm_page_t *page_list; + vm_map_copy_t new_copy; + register + int i; + + assert(copy->type == VM_MAP_COPY_PAGE_LIST); + assert(min_size != 0); + + /* + * Figure out the size in vm pages. + */ + min_size += (vm_size_t)(copy->offset - trunc_page_64(copy->offset)); + min_size = round_page(min_size); + mysize = (vm_size_t)(round_page_64( + copy->offset + (vm_object_offset_t)copy->size) - + trunc_page_64(copy->offset)); + + /* + * If total size is larger than one page list and + * we don't have to do more than one page list, then + * only do one page list. + * + * XXX Could be much smarter about this ... like trimming length + * XXX if we need more than one page list but not all of them. + */ + + copy_size = ptoa(copy->cpy_npages); + if (mysize > copy_size && copy_size > min_size) + mysize = copy_size; + + /* + * Allocate some address space in the map (must be kernel + * space). + */ + myaddr = vm_map_min(map); + ret = vm_map_enter(map, &myaddr, mysize, + (vm_offset_t) 0, TRUE, + VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, + prot, prot, VM_INHERIT_DEFAULT); + + if (ret != KERN_SUCCESS) + return(ret); + + /* + * Tell the pmap module that this will be wired, and + * enter the mappings. + */ + pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE); + + *addr = myaddr + (vm_offset_t) + (copy->offset - trunc_page_64(copy->offset)); + *alloc_size = mysize; + + offset = myaddr; + page_list = ©->cpy_page_list[0]; + while (TRUE) { + for ( i = 0; i < copy->cpy_npages; i++, offset+=PAGE_SIZE_64) { + PMAP_ENTER(vm_map_pmap(map), + (vm_offset_t)offset, *page_list, + prot, TRUE); + page_list++; + } + + if (offset == (myaddr + mysize)) + break; + + /* + * Onward to the next page_list. The extend_cont + * leaves the current page list's pages alone; + * they'll be cleaned up at discard. Reset this + * copy's continuation to discard the next one. + */ + vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret); + + if (ret != KERN_SUCCESS) { + kmem_io_map_deallocate(map, myaddr, mysize); + return(ret); + } + copy->cpy_cont = vm_map_copy_discard_cont; + copy->cpy_cont_args = (vm_map_copyin_args_t) new_copy; + assert(new_copy != VM_MAP_COPY_NULL); + assert(new_copy->type == VM_MAP_COPY_PAGE_LIST); + copy = new_copy; + page_list = ©->cpy_page_list[0]; + } + + return(ret); +} + +/* + * kmem_io_map_deallocate: + * + * Get rid of the mapping established by kmem_io_map_copyout. + * Assumes that addr and size have been rounded to page boundaries. + */ + +void +kmem_io_map_deallocate( + vm_map_t map, + vm_offset_t addr, + vm_size_t size) +{ + + register vm_offset_t va, end; + + end = round_page(addr + size); + for (va = trunc_page(addr); va < end; va += PAGE_SIZE) + pmap_change_wiring(vm_map_pmap(map), va, FALSE); + + /* + * Remove the mappings. The pmap_remove is needed. + */ + + pmap_remove(vm_map_pmap(map), addr, addr + size); + vm_map_remove(map, addr, addr + size, VM_MAP_REMOVE_KUNWIRE); +} + + +/* + * kmem_io_object_trunc: + * + * Truncate an object vm_map_copy_t. + * Called by the scatter/gather list network code to remove pages from + * the tail end of a packet. Also unwires the objects pages. + */ + +kern_return_t +kmem_io_object_trunc(copy, new_size) + vm_map_copy_t copy; /* IN/OUT copy object */ + register vm_size_t new_size; /* IN new object size */ +{ + register vm_size_t offset, old_size; + + assert(copy->type == VM_MAP_COPY_OBJECT); + + old_size = (vm_size_t)round_page_64(copy->size); + copy->size = new_size; + new_size = round_page(new_size); + + vm_object_lock(copy->cpy_object); + vm_object_page_remove(copy->cpy_object, + (vm_object_offset_t)new_size, (vm_object_offset_t)old_size); + for (offset = 0; offset < new_size; offset += PAGE_SIZE) { + register vm_page_t mem; + + if ((mem = vm_page_lookup(copy->cpy_object, + (vm_object_offset_t)offset)) == VM_PAGE_NULL) + panic("kmem_io_object_trunc: unable to find object page"); + + /* + * Make sure these pages are marked dirty + */ + mem->dirty = TRUE; + vm_page_lock_queues(); + vm_page_unwire(mem); + vm_page_unlock_queues(); + } + copy->cpy_object->size = new_size; /* adjust size of object */ + vm_object_unlock(copy->cpy_object); + return(KERN_SUCCESS); +} + +/* + * kmem_io_object_deallocate: + * + * Free an vm_map_copy_t. + * Called by the scatter/gather list network code to free a packet. + */ + +void +kmem_io_object_deallocate( + vm_map_copy_t copy) /* IN/OUT copy object */ +{ + kern_return_t ret; + + /* + * Clear out all the object pages (this will leave an empty object). + */ + ret = kmem_io_object_trunc(copy, 0); + if (ret != KERN_SUCCESS) + panic("kmem_io_object_deallocate: unable to truncate object"); + /* + * ...and discard the copy object. + */ + vm_map_copy_discard(copy); +} + +/* + * Routine: copyinmap + * Purpose: + * Like copyin, except that fromaddr is an address + * in the specified VM map. This implementation + * is incomplete; it handles the current user map + * and the kernel map/submaps. + */ +boolean_t +copyinmap( + vm_map_t map, + vm_offset_t fromaddr, + vm_offset_t toaddr, + vm_size_t length) +{ + if (vm_map_pmap(map) == pmap_kernel()) { + /* assume a correct copy */ + memcpy((void *)toaddr, (void *)fromaddr, length); + return FALSE; + } + + if (current_map() == map) + return copyin((char *)fromaddr, (char *)toaddr, length); + + return TRUE; +} + +/* + * Routine: copyoutmap + * Purpose: + * Like copyout, except that toaddr is an address + * in the specified VM map. This implementation + * is incomplete; it handles the current user map + * and the kernel map/submaps. + */ +boolean_t +copyoutmap( + vm_map_t map, + vm_offset_t fromaddr, + vm_offset_t toaddr, + vm_size_t length) +{ + if (vm_map_pmap(map) == pmap_kernel()) { + /* assume a correct copy */ + memcpy((void *)toaddr, (void *)fromaddr, length); + return FALSE; + } + + if (current_map() == map) + return copyout((char *)fromaddr, (char *)toaddr, length); + + return TRUE; +} diff --git a/osfmk/vm/vm_kern.h b/osfmk/vm/vm_kern.h new file mode 100644 index 000000000..ec3d2cf2d --- /dev/null +++ b/osfmk/vm/vm_kern.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_kern.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Kernel memory management definitions. + */ + +#ifndef _VM_VM_KERN_H_ +#define _VM_VM_KERN_H_ + +#include +#include +#include +#include + +extern void kmem_init( + vm_offset_t start, + vm_offset_t end); + +extern kern_return_t kernel_memory_allocate( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_offset_t mask, + int flags); + +/* flags for kernel_memory_allocate */ +#define KMA_HERE 0x01 +#define KMA_NOPAGEWAIT 0x02 +#define KMA_KOBJECT 0x04 + +extern kern_return_t kmem_alloc_contig( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_offset_t mask, + int flags); + +extern kern_return_t kmem_alloc( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); + +extern kern_return_t kmem_alloc_pageable( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); + +extern kern_return_t kmem_alloc_wired( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); + +extern kern_return_t kmem_alloc_aligned( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); + +extern kern_return_t kmem_realloc( + vm_map_t map, + vm_offset_t oldaddr, + vm_size_t oldsize, + vm_offset_t *newaddrp, + vm_size_t newsize); + +extern void kmem_free( + vm_map_t map, + vm_offset_t addr, + vm_size_t size); + +extern kern_return_t kmem_suballoc( + vm_map_t parent, + vm_offset_t *addr, + vm_size_t size, + boolean_t pageable, + boolean_t anywhere, + vm_map_t *new_map); + +extern kern_return_t kmem_io_map_copyout( + vm_map_t map, + vm_offset_t *addr, + vm_size_t *alloc_size, + vm_map_copy_t copy, + vm_size_t min_size, + vm_prot_t prot); + +extern void kmem_io_map_deallocate( + vm_map_t map, + vm_offset_t addr, + vm_size_t size); + +extern void kmem_io_object_deallocate( + vm_map_copy_t copy); + +extern kern_return_t kmem_io_object_trunc( + vm_map_copy_t copy, + vm_size_t new_size); + +extern boolean_t copyinmap( + vm_map_t map, + vm_offset_t fromaddr, + vm_offset_t toaddr, + vm_size_t length); + +extern boolean_t copyoutmap( + vm_map_t map, + vm_offset_t fromaddr, + vm_offset_t toaddr, + vm_size_t length); + +extern vm_map_t kernel_map; +extern vm_map_t kernel_pageable_map; +extern vm_map_t ipc_kernel_map; + +#endif /* _VM_VM_KERN_H_ */ diff --git a/osfmk/vm/vm_map.c b/osfmk/vm/vm_map.c new file mode 100644 index 000000000..bfb9a05d9 --- /dev/null +++ b/osfmk/vm/vm_map.c @@ -0,0 +1,10299 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_map.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Virtual memory mapping module. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Internal prototypes + */ +extern boolean_t vm_map_range_check( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_map_entry_t *entry); + +extern vm_map_entry_t _vm_map_entry_create( + struct vm_map_header *map_header); + +extern void _vm_map_entry_dispose( + struct vm_map_header *map_header, + vm_map_entry_t entry); + +extern void vm_map_pmap_enter( + vm_map_t map, + vm_offset_t addr, + vm_offset_t end_addr, + vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection); + +extern void _vm_map_clip_end( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_offset_t end); + +extern void vm_map_entry_delete( + vm_map_t map, + vm_map_entry_t entry); + +extern kern_return_t vm_map_delete( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + int flags); + +extern void vm_map_copy_steal_pages( + vm_map_copy_t copy); + +extern kern_return_t vm_map_copy_overwrite_unaligned( + vm_map_t dst_map, + vm_map_entry_t entry, + vm_map_copy_t copy, + vm_offset_t start); + +extern kern_return_t vm_map_copy_overwrite_aligned( + vm_map_t dst_map, + vm_map_entry_t tmp_entry, + vm_map_copy_t copy, + vm_offset_t start, + pmap_t pmap); + +extern kern_return_t vm_map_copyin_kernel_buffer( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result); /* OUT */ + +extern kern_return_t vm_map_copyout_kernel_buffer( + vm_map_t map, + vm_offset_t *addr, /* IN/OUT */ + vm_map_copy_t copy, + boolean_t overwrite); + +extern kern_return_t vm_map_copyin_page_list_cont( + vm_map_copyin_args_t cont_args, + vm_map_copy_t *copy_result); /* OUT */ + +extern void vm_map_fork_share( + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map); + +extern boolean_t vm_map_fork_copy( + vm_map_t old_map, + vm_map_entry_t *old_entry_p, + vm_map_t new_map); + +extern kern_return_t vm_remap_range_allocate( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + vm_map_entry_t *map_entry); /* OUT */ + +extern void _vm_map_clip_start( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_offset_t start); + +void vm_region_top_walk( + vm_map_entry_t entry, + vm_region_top_info_t top); + +void vm_region_walk( + vm_map_entry_t entry, + vm_region_extended_info_t extended, + vm_object_offset_t offset, + vm_offset_t range, + vm_map_t map, + vm_offset_t va); + +/* + * Macros to copy a vm_map_entry. We must be careful to correctly + * manage the wired page count. vm_map_entry_copy() creates a new + * map entry to the same memory - the wired count in the new entry + * must be set to zero. vm_map_entry_copy_full() creates a new + * entry that is identical to the old entry. This preserves the + * wire count; it's used for map splitting and zone changing in + * vm_map_copyout. + */ +#define vm_map_entry_copy(NEW,OLD) \ +MACRO_BEGIN \ + *(NEW) = *(OLD); \ + (NEW)->is_shared = FALSE; \ + (NEW)->needs_wakeup = FALSE; \ + (NEW)->in_transition = FALSE; \ + (NEW)->wired_count = 0; \ + (NEW)->user_wired_count = 0; \ +MACRO_END + +#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD)) + +/* + * Virtual memory maps provide for the mapping, protection, + * and sharing of virtual memory objects. In addition, + * this module provides for an efficient virtual copy of + * memory from one map to another. + * + * Synchronization is required prior to most operations. + * + * Maps consist of an ordered doubly-linked list of simple + * entries; a single hint is used to speed up lookups. + * + * Sharing maps have been deleted from this version of Mach. + * All shared objects are now mapped directly into the respective + * maps. This requires a change in the copy on write strategy; + * the asymmetric (delayed) strategy is used for shared temporary + * objects instead of the symmetric (shadow) strategy. All maps + * are now "top level" maps (either task map, kernel map or submap + * of the kernel map). + * + * Since portions of maps are specified by start/end addreses, + * which may not align with existing map entries, all + * routines merely "clip" entries to these start/end values. + * [That is, an entry is split into two, bordering at a + * start or end value.] Note that these clippings may not + * always be necessary (as the two resulting entries are then + * not changed); however, the clipping is done for convenience. + * No attempt is currently made to "glue back together" two + * abutting entries. + * + * The symmetric (shadow) copy strategy implements virtual copy + * by copying VM object references from one map to + * another, and then marking both regions as copy-on-write. + * It is important to note that only one writeable reference + * to a VM object region exists in any map when this strategy + * is used -- this means that shadow object creation can be + * delayed until a write operation occurs. The symmetric (delayed) + * strategy allows multiple maps to have writeable references to + * the same region of a vm object, and hence cannot delay creating + * its copy objects. See vm_object_copy_quickly() in vm_object.c. + * Copying of permanent objects is completely different; see + * vm_object_copy_strategically() in vm_object.c. + */ + +zone_t vm_map_zone; /* zone for vm_map structures */ +zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ +zone_t vm_map_kentry_zone; /* zone for kernel entry structures */ +zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ + + +/* + * Placeholder object for submap operations. This object is dropped + * into the range by a call to vm_map_find, and removed when + * vm_map_submap creates the submap. + */ + +vm_object_t vm_submap_object; + +/* + * vm_map_init: + * + * Initialize the vm_map module. Must be called before + * any other vm_map routines. + * + * Map and entry structures are allocated from zones -- we must + * initialize those zones. + * + * There are three zones of interest: + * + * vm_map_zone: used to allocate maps. + * vm_map_entry_zone: used to allocate map entries. + * vm_map_kentry_zone: used to allocate map entries for the kernel. + * + * The kernel allocates map entries from a special zone that is initially + * "crammed" with memory. It would be difficult (perhaps impossible) for + * the kernel to allocate more memory to a entry zone when it became + * empty since the very act of allocating memory implies the creation + * of a new entry. + */ + +vm_offset_t map_data; +vm_size_t map_data_size; +vm_offset_t kentry_data; +vm_size_t kentry_data_size; +int kentry_count = 2048; /* to init kentry_data_size */ + +/* + * Threshold for aggressive (eager) page map entering for vm copyout + * operations. Any copyout larger will NOT be aggressively entered. + */ +vm_size_t vm_map_aggressive_enter_max; /* set by bootstrap */ + +void +vm_map_init( + void) +{ + vm_map_zone = zinit((vm_size_t) sizeof(struct vm_map), 40*1024, + PAGE_SIZE, "maps"); + + vm_map_entry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), + 1024*1024, PAGE_SIZE*5, + "non-kernel map entries"); + + vm_map_kentry_zone = zinit((vm_size_t) sizeof(struct vm_map_entry), + kentry_data_size, kentry_data_size, + "kernel map entries"); + + vm_map_copy_zone = zinit((vm_size_t) sizeof(struct vm_map_copy), + 16*1024, PAGE_SIZE, "map copies"); + + /* + * Cram the map and kentry zones with initial data. + * Set kentry_zone non-collectible to aid zone_gc(). + */ + zone_change(vm_map_zone, Z_COLLECT, FALSE); + zone_change(vm_map_kentry_zone, Z_COLLECT, FALSE); + zone_change(vm_map_kentry_zone, Z_EXPAND, FALSE); + zcram(vm_map_zone, map_data, map_data_size); + zcram(vm_map_kentry_zone, kentry_data, kentry_data_size); +} + +void +vm_map_steal_memory( + void) +{ + map_data_size = round_page(10 * sizeof(struct vm_map)); + map_data = pmap_steal_memory(map_data_size); + +#if 0 + /* + * Limiting worst case: vm_map_kentry_zone needs to map each "available" + * physical page (i.e. that beyond the kernel image and page tables) + * individually; we guess at most one entry per eight pages in the + * real world. This works out to roughly .1 of 1% of physical memory, + * or roughly 1900 entries (64K) for a 64M machine with 4K pages. + */ +#endif + kentry_count = pmap_free_pages() / 8; + + + kentry_data_size = + round_page(kentry_count * sizeof(struct vm_map_entry)); + kentry_data = pmap_steal_memory(kentry_data_size); +} + +/* + * vm_map_create: + * + * Creates and returns a new empty VM map with + * the given physical map structure, and having + * the given lower and upper address bounds. + */ +vm_map_t +vm_map_create( + pmap_t pmap, + vm_offset_t min, + vm_offset_t max, + boolean_t pageable) +{ + register vm_map_t result; + + result = (vm_map_t) zalloc(vm_map_zone); + if (result == VM_MAP_NULL) + panic("vm_map_create"); + + vm_map_first_entry(result) = vm_map_to_entry(result); + vm_map_last_entry(result) = vm_map_to_entry(result); + result->hdr.nentries = 0; + result->hdr.entries_pageable = pageable; + + result->size = 0; + result->ref_count = 1; +#if TASK_SWAPPER + result->res_count = 1; + result->sw_state = MAP_SW_IN; +#endif /* TASK_SWAPPER */ + result->pmap = pmap; + result->min_offset = min; + result->max_offset = max; + result->wiring_required = FALSE; + result->no_zero_fill = FALSE; + result->wait_for_space = FALSE; + result->first_free = vm_map_to_entry(result); + result->hint = vm_map_to_entry(result); + vm_map_lock_init(result); + mutex_init(&result->s_lock, ETAP_VM_RESULT); + + return(result); +} + +/* + * vm_map_entry_create: [ internal use only ] + * + * Allocates a VM map entry for insertion in the + * given map (or map copy). No fields are filled. + */ +#define vm_map_entry_create(map) \ + _vm_map_entry_create(&(map)->hdr) + +#define vm_map_copy_entry_create(copy) \ + _vm_map_entry_create(&(copy)->cpy_hdr) + +vm_map_entry_t +_vm_map_entry_create( + register struct vm_map_header *map_header) +{ + register zone_t zone; + register vm_map_entry_t entry; + + if (map_header->entries_pageable) + zone = vm_map_entry_zone; + else + zone = vm_map_kentry_zone; + + entry = (vm_map_entry_t) zalloc(zone); + if (entry == VM_MAP_ENTRY_NULL) + panic("vm_map_entry_create"); + + return(entry); +} + +/* + * vm_map_entry_dispose: [ internal use only ] + * + * Inverse of vm_map_entry_create. + */ +#define vm_map_entry_dispose(map, entry) \ +MACRO_BEGIN \ + if((entry) == (map)->first_free) \ + (map)->first_free = vm_map_to_entry(map); \ + if((entry) == (map)->hint) \ + (map)->hint = vm_map_to_entry(map); \ + _vm_map_entry_dispose(&(map)->hdr, (entry)); \ +MACRO_END + +#define vm_map_copy_entry_dispose(map, entry) \ + _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry)) + +void +_vm_map_entry_dispose( + register struct vm_map_header *map_header, + register vm_map_entry_t entry) +{ + register zone_t zone; + + if (map_header->entries_pageable) + zone = vm_map_entry_zone; + else + zone = vm_map_kentry_zone; + + zfree(zone, (vm_offset_t) entry); +} + +boolean_t first_free_is_valid(vm_map_t map); /* forward */ +boolean_t first_free_check = FALSE; +boolean_t +first_free_is_valid( + vm_map_t map) +{ + vm_map_entry_t entry, next; + + if (!first_free_check) + return TRUE; + + entry = vm_map_to_entry(map); + next = entry->vme_next; + while (trunc_page(next->vme_start) == trunc_page(entry->vme_end) || + (trunc_page(next->vme_start) == trunc_page(entry->vme_start) && + next != vm_map_to_entry(map))) { + entry = next; + next = entry->vme_next; + if (entry == vm_map_to_entry(map)) + break; + } + if (map->first_free != entry) { + printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n", + map, map->first_free, entry); + return FALSE; + } + return TRUE; +} + +/* + * UPDATE_FIRST_FREE: + * + * Updates the map->first_free pointer to the + * entry immediately before the first hole in the map. + * The map should be locked. + */ +#define UPDATE_FIRST_FREE(map, new_first_free) \ +MACRO_BEGIN \ + vm_map_t UFF_map; \ + vm_map_entry_t UFF_first_free; \ + vm_map_entry_t UFF_next_entry; \ + UFF_map = (map); \ + UFF_first_free = (new_first_free); \ + UFF_next_entry = UFF_first_free->vme_next; \ + while (trunc_page(UFF_next_entry->vme_start) == \ + trunc_page(UFF_first_free->vme_end) || \ + (trunc_page(UFF_next_entry->vme_start) == \ + trunc_page(UFF_first_free->vme_start) && \ + UFF_next_entry != vm_map_to_entry(UFF_map))) { \ + UFF_first_free = UFF_next_entry; \ + UFF_next_entry = UFF_first_free->vme_next; \ + if (UFF_first_free == vm_map_to_entry(UFF_map)) \ + break; \ + } \ + UFF_map->first_free = UFF_first_free; \ + assert(first_free_is_valid(UFF_map)); \ +MACRO_END + +/* + * vm_map_entry_{un,}link: + * + * Insert/remove entries from maps (or map copies). + */ +#define vm_map_entry_link(map, after_where, entry) \ +MACRO_BEGIN \ + vm_map_t VMEL_map; \ + vm_map_entry_t VMEL_entry; \ + VMEL_map = (map); \ + VMEL_entry = (entry); \ + _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \ + UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \ +MACRO_END + + +#define vm_map_copy_entry_link(copy, after_where, entry) \ + _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry)) + +#define _vm_map_entry_link(hdr, after_where, entry) \ + MACRO_BEGIN \ + (hdr)->nentries++; \ + (entry)->vme_prev = (after_where); \ + (entry)->vme_next = (after_where)->vme_next; \ + (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \ + MACRO_END + +#define vm_map_entry_unlink(map, entry) \ +MACRO_BEGIN \ + vm_map_t VMEU_map; \ + vm_map_entry_t VMEU_entry; \ + vm_map_entry_t VMEU_first_free; \ + VMEU_map = (map); \ + VMEU_entry = (entry); \ + if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \ + VMEU_first_free = VMEU_entry->vme_prev; \ + else \ + VMEU_first_free = VMEU_map->first_free; \ + _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \ + UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \ +MACRO_END + +#define vm_map_copy_entry_unlink(copy, entry) \ + _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry)) + +#define _vm_map_entry_unlink(hdr, entry) \ + MACRO_BEGIN \ + (hdr)->nentries--; \ + (entry)->vme_next->vme_prev = (entry)->vme_prev; \ + (entry)->vme_prev->vme_next = (entry)->vme_next; \ + MACRO_END + +/* + * kernel_vm_map_reference: + * + * kernel internal export version for iokit and bsd components + * in lieu of component interface semantics. + * + */ +void +kernel_vm_map_reference( + register vm_map_t map) +{ + if (map == VM_MAP_NULL) + return; + + mutex_lock(&map->s_lock); +#if TASK_SWAPPER + assert(map->res_count > 0); + assert(map->ref_count >= map->res_count); + map->res_count++; +#endif + map->ref_count++; + mutex_unlock(&map->s_lock); +} + +#if MACH_ASSERT && TASK_SWAPPER +/* + * vm_map_reference: + * + * Adds valid reference and residence counts to the given map. + * The map must be in memory (i.e. non-zero residence count). + * + */ +void +vm_map_reference( + register vm_map_t map) +{ + if (map == VM_MAP_NULL) + return; + + mutex_lock(&map->s_lock); + assert(map->res_count > 0); + assert(map->ref_count >= map->res_count); + map->ref_count++; + map->res_count++; + mutex_unlock(&map->s_lock); +} + +/* + * vm_map_res_reference: + * + * Adds another valid residence count to the given map. + * + * Map is locked so this function can be called from + * vm_map_swapin. + * + */ +void vm_map_res_reference(register vm_map_t map) +{ + /* assert map is locked */ + assert(map->res_count >= 0); + assert(map->ref_count >= map->res_count); + if (map->res_count == 0) { + mutex_unlock(&map->s_lock); + vm_map_lock(map); + vm_map_swapin(map); + mutex_lock(&map->s_lock); + ++map->res_count; + vm_map_unlock(map); + } else + ++map->res_count; +} + +/* + * vm_map_reference_swap: + * + * Adds valid reference and residence counts to the given map. + * + * The map may not be in memory (i.e. zero residence count). + * + */ +void vm_map_reference_swap(register vm_map_t map) +{ + assert(map != VM_MAP_NULL); + mutex_lock(&map->s_lock); + assert(map->res_count >= 0); + assert(map->ref_count >= map->res_count); + map->ref_count++; + vm_map_res_reference(map); + mutex_unlock(&map->s_lock); +} + +/* + * vm_map_res_deallocate: + * + * Decrement residence count on a map; possibly causing swapout. + * + * The map must be in memory (i.e. non-zero residence count). + * + * The map is locked, so this function is callable from vm_map_deallocate. + * + */ +void vm_map_res_deallocate(register vm_map_t map) +{ + assert(map->res_count > 0); + if (--map->res_count == 0) { + mutex_unlock(&map->s_lock); + vm_map_lock(map); + vm_map_swapout(map); + vm_map_unlock(map); + mutex_lock(&map->s_lock); + } + assert(map->ref_count >= map->res_count); +} +#endif /* MACH_ASSERT && TASK_SWAPPER */ + +/* + * vm_map_deallocate: + * + * Removes a reference from the specified map, + * destroying it if no references remain. + * The map should not be locked. + */ +void +vm_map_deallocate( + register vm_map_t map) +{ + unsigned int ref; + + if (map == VM_MAP_NULL) + return; + + mutex_lock(&map->s_lock); + ref = --map->ref_count; + if (ref > 0) { + vm_map_res_deallocate(map); + mutex_unlock(&map->s_lock); + return; + } + assert(map->ref_count == 0); + mutex_unlock(&map->s_lock); + +#if TASK_SWAPPER + /* + * The map residence count isn't decremented here because + * the vm_map_delete below will traverse the entire map, + * deleting entries, and the residence counts on objects + * and sharing maps will go away then. + */ +#endif + + vm_map_destroy(map); +} + +/* + * vm_map_destroy: + * + * Actually destroy a map. + */ +void +vm_map_destroy( + register vm_map_t map) +{ + vm_map_lock(map); + (void) vm_map_delete(map, map->min_offset, + map->max_offset, VM_MAP_NO_FLAGS); + vm_map_unlock(map); + + pmap_destroy(map->pmap); + + zfree(vm_map_zone, (vm_offset_t) map); +} + +#if TASK_SWAPPER +/* + * vm_map_swapin/vm_map_swapout + * + * Swap a map in and out, either referencing or releasing its resources. + * These functions are internal use only; however, they must be exported + * because they may be called from macros, which are exported. + * + * In the case of swapout, there could be races on the residence count, + * so if the residence count is up, we return, assuming that a + * vm_map_deallocate() call in the near future will bring us back. + * + * Locking: + * -- We use the map write lock for synchronization among races. + * -- The map write lock, and not the simple s_lock, protects the + * swap state of the map. + * -- If a map entry is a share map, then we hold both locks, in + * hierarchical order. + * + * Synchronization Notes: + * 1) If a vm_map_swapin() call happens while swapout in progress, it + * will block on the map lock and proceed when swapout is through. + * 2) A vm_map_reference() call at this time is illegal, and will + * cause a panic. vm_map_reference() is only allowed on resident + * maps, since it refuses to block. + * 3) A vm_map_swapin() call during a swapin will block, and + * proceeed when the first swapin is done, turning into a nop. + * This is the reason the res_count is not incremented until + * after the swapin is complete. + * 4) There is a timing hole after the checks of the res_count, before + * the map lock is taken, during which a swapin may get the lock + * before a swapout about to happen. If this happens, the swapin + * will detect the state and increment the reference count, causing + * the swapout to be a nop, thereby delaying it until a later + * vm_map_deallocate. If the swapout gets the lock first, then + * the swapin will simply block until the swapout is done, and + * then proceed. + * + * Because vm_map_swapin() is potentially an expensive operation, it + * should be used with caution. + * + * Invariants: + * 1) A map with a residence count of zero is either swapped, or + * being swapped. + * 2) A map with a non-zero residence count is either resident, + * or being swapped in. + */ + +int vm_map_swap_enable = 1; + +void vm_map_swapin (vm_map_t map) +{ + register vm_map_entry_t entry; + + if (!vm_map_swap_enable) /* debug */ + return; + + /* + * Map is locked + * First deal with various races. + */ + if (map->sw_state == MAP_SW_IN) + /* + * we raced with swapout and won. Returning will incr. + * the res_count, turning the swapout into a nop. + */ + return; + + /* + * The residence count must be zero. If we raced with another + * swapin, the state would have been IN; if we raced with a + * swapout (after another competing swapin), we must have lost + * the race to get here (see above comment), in which case + * res_count is still 0. + */ + assert(map->res_count == 0); + + /* + * There are no intermediate states of a map going out or + * coming in, since the map is locked during the transition. + */ + assert(map->sw_state == MAP_SW_OUT); + + /* + * We now operate upon each map entry. If the entry is a sub- + * or share-map, we call vm_map_res_reference upon it. + * If the entry is an object, we call vm_object_res_reference + * (this may iterate through the shadow chain). + * Note that we hold the map locked the entire time, + * even if we get back here via a recursive call in + * vm_map_res_reference. + */ + entry = vm_map_first_entry(map); + + while (entry != vm_map_to_entry(map)) { + if (entry->object.vm_object != VM_OBJECT_NULL) { + if (entry->is_sub_map) { + vm_map_t lmap = entry->object.sub_map; + mutex_lock(&lmap->s_lock); + vm_map_res_reference(lmap); + mutex_unlock(&lmap->s_lock); + } else { + vm_object_t object = entry->object.vm_object; + vm_object_lock(object); + /* + * This call may iterate through the + * shadow chain. + */ + vm_object_res_reference(object); + vm_object_unlock(object); + } + } + entry = entry->vme_next; + } + assert(map->sw_state == MAP_SW_OUT); + map->sw_state = MAP_SW_IN; +} + +void vm_map_swapout(vm_map_t map) +{ + register vm_map_entry_t entry; + + /* + * Map is locked + * First deal with various races. + * If we raced with a swapin and lost, the residence count + * will have been incremented to 1, and we simply return. + */ + mutex_lock(&map->s_lock); + if (map->res_count != 0) { + mutex_unlock(&map->s_lock); + return; + } + mutex_unlock(&map->s_lock); + + /* + * There are no intermediate states of a map going out or + * coming in, since the map is locked during the transition. + */ + assert(map->sw_state == MAP_SW_IN); + + if (!vm_map_swap_enable) + return; + + /* + * We now operate upon each map entry. If the entry is a sub- + * or share-map, we call vm_map_res_deallocate upon it. + * If the entry is an object, we call vm_object_res_deallocate + * (this may iterate through the shadow chain). + * Note that we hold the map locked the entire time, + * even if we get back here via a recursive call in + * vm_map_res_deallocate. + */ + entry = vm_map_first_entry(map); + + while (entry != vm_map_to_entry(map)) { + if (entry->object.vm_object != VM_OBJECT_NULL) { + if (entry->is_sub_map) { + vm_map_t lmap = entry->object.sub_map; + mutex_lock(&lmap->s_lock); + vm_map_res_deallocate(lmap); + mutex_unlock(&lmap->s_lock); + } else { + vm_object_t object = entry->object.vm_object; + vm_object_lock(object); + /* + * This call may take a long time, + * since it could actively push + * out pages (if we implement it + * that way). + */ + vm_object_res_deallocate(object); + vm_object_unlock(object); + } + } + entry = entry->vme_next; + } + assert(map->sw_state == MAP_SW_IN); + map->sw_state = MAP_SW_OUT; +} + +#endif /* TASK_SWAPPER */ + + +/* + * SAVE_HINT: + * + * Saves the specified entry as the hint for + * future lookups. Performs necessary interlocks. + */ +#define SAVE_HINT(map,value) \ + mutex_lock(&(map)->s_lock); \ + (map)->hint = (value); \ + mutex_unlock(&(map)->s_lock); + +/* + * vm_map_lookup_entry: [ internal use only ] + * + * Finds the map entry containing (or + * immediately preceding) the specified address + * in the given map; the entry is returned + * in the "entry" parameter. The boolean + * result indicates whether the address is + * actually contained in the map. + */ +boolean_t +vm_map_lookup_entry( + register vm_map_t map, + register vm_offset_t address, + vm_map_entry_t *entry) /* OUT */ +{ + register vm_map_entry_t cur; + register vm_map_entry_t last; + + /* + * Start looking either from the head of the + * list, or from the hint. + */ + + mutex_lock(&map->s_lock); + cur = map->hint; + mutex_unlock(&map->s_lock); + + if (cur == vm_map_to_entry(map)) + cur = cur->vme_next; + + if (address >= cur->vme_start) { + /* + * Go from hint to end of list. + * + * But first, make a quick check to see if + * we are already looking at the entry we + * want (which is usually the case). + * Note also that we don't need to save the hint + * here... it is the same hint (unless we are + * at the header, in which case the hint didn't + * buy us anything anyway). + */ + last = vm_map_to_entry(map); + if ((cur != last) && (cur->vme_end > address)) { + *entry = cur; + return(TRUE); + } + } + else { + /* + * Go from start to hint, *inclusively* + */ + last = cur->vme_next; + cur = vm_map_first_entry(map); + } + + /* + * Search linearly + */ + + while (cur != last) { + if (cur->vme_end > address) { + if (address >= cur->vme_start) { + /* + * Save this lookup for future + * hints, and return + */ + + *entry = cur; + SAVE_HINT(map, cur); + return(TRUE); + } + break; + } + cur = cur->vme_next; + } + *entry = cur->vme_prev; + SAVE_HINT(map, *entry); + return(FALSE); +} + +/* + * Routine: vm_map_find_space + * Purpose: + * Allocate a range in the specified virtual address map, + * returning the entry allocated for that range. + * Used by kmem_alloc, etc. + * + * The map must be NOT be locked. It will be returned locked + * on KERN_SUCCESS, unlocked on failure. + * + * If an entry is allocated, the object/offset fields + * are initialized to zero. + */ +kern_return_t +vm_map_find_space( + register vm_map_t map, + vm_offset_t *address, /* OUT */ + vm_size_t size, + vm_offset_t mask, + vm_map_entry_t *o_entry) /* OUT */ +{ + register vm_map_entry_t entry, new_entry; + register vm_offset_t start; + register vm_offset_t end; + + new_entry = vm_map_entry_create(map); + + /* + * Look for the first possible address; if there's already + * something at this address, we have to start after it. + */ + + vm_map_lock(map); + + assert(first_free_is_valid(map)); + if ((entry = map->first_free) == vm_map_to_entry(map)) + start = map->min_offset; + else + start = entry->vme_end; + + /* + * In any case, the "entry" always precedes + * the proposed new region throughout the loop: + */ + + while (TRUE) { + register vm_map_entry_t next; + + /* + * Find the end of the proposed new region. + * Be sure we didn't go beyond the end, or + * wrap around the address. + */ + + end = ((start + mask) & ~mask); + if (end < start) { + vm_map_entry_dispose(map, new_entry); + vm_map_unlock(map); + return(KERN_NO_SPACE); + } + start = end; + end += size; + + if ((end > map->max_offset) || (end < start)) { + vm_map_entry_dispose(map, new_entry); + vm_map_unlock(map); + return(KERN_NO_SPACE); + } + + /* + * If there are no more entries, we must win. + */ + + next = entry->vme_next; + if (next == vm_map_to_entry(map)) + break; + + /* + * If there is another entry, it must be + * after the end of the potential new region. + */ + + if (next->vme_start >= end) + break; + + /* + * Didn't fit -- move to the next entry. + */ + + entry = next; + start = entry->vme_end; + } + + /* + * At this point, + * "start" and "end" should define the endpoints of the + * available new range, and + * "entry" should refer to the region before the new + * range, and + * + * the map should be locked. + */ + + *address = start; + + new_entry->vme_start = start; + new_entry->vme_end = end; + assert(page_aligned(new_entry->vme_start)); + assert(page_aligned(new_entry->vme_end)); + + new_entry->is_shared = FALSE; + new_entry->is_sub_map = FALSE; + new_entry->use_pmap = FALSE; + new_entry->object.vm_object = VM_OBJECT_NULL; + new_entry->offset = (vm_object_offset_t) 0; + + new_entry->needs_copy = FALSE; + + new_entry->inheritance = VM_INHERIT_DEFAULT; + new_entry->protection = VM_PROT_DEFAULT; + new_entry->max_protection = VM_PROT_ALL; + new_entry->behavior = VM_BEHAVIOR_DEFAULT; + new_entry->wired_count = 0; + new_entry->user_wired_count = 0; + + new_entry->in_transition = FALSE; + new_entry->needs_wakeup = FALSE; + + /* + * Insert the new entry into the list + */ + + vm_map_entry_link(map, entry, new_entry); + + map->size += size; + + /* + * Update the lookup hint + */ + SAVE_HINT(map, new_entry); + + *o_entry = new_entry; + return(KERN_SUCCESS); +} + +int vm_map_pmap_enter_print = FALSE; +int vm_map_pmap_enter_enable = FALSE; + +/* + * Routine: vm_map_pmap_enter + * + * Description: + * Force pages from the specified object to be entered into + * the pmap at the specified address if they are present. + * As soon as a page not found in the object the scan ends. + * + * Returns: + * Nothing. + * + * In/out conditions: + * The source map should not be locked on entry. + */ +void +vm_map_pmap_enter( + vm_map_t map, + register vm_offset_t addr, + register vm_offset_t end_addr, + register vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection) +{ + while (addr < end_addr) { + register vm_page_t m; + + vm_object_lock(object); + vm_object_paging_begin(object); + + m = vm_page_lookup(object, offset); + if (m == VM_PAGE_NULL || m->busy || + (m->unusual && ( m->error || m->restart || m->absent || + protection & m->page_lock))) { + + vm_object_paging_end(object); + vm_object_unlock(object); + return; + } + + assert(!m->fictitious); /* XXX is this possible ??? */ + + if (vm_map_pmap_enter_print) { + printf("vm_map_pmap_enter:"); + printf("map: %x, addr: %x, object: %x, offset: %x\n", + map, addr, object, offset); + } + + m->busy = TRUE; + vm_object_unlock(object); + + PMAP_ENTER(map->pmap, addr, m, + protection, FALSE); + + vm_object_lock(object); + PAGE_WAKEUP_DONE(m); + vm_page_lock_queues(); + if (!m->active && !m->inactive) + vm_page_activate(m); + vm_page_unlock_queues(); + vm_object_paging_end(object); + vm_object_unlock(object); + + offset += PAGE_SIZE_64; + addr += PAGE_SIZE; + } +} + +/* + * Routine: vm_map_enter + * + * Description: + * Allocate a range in the specified virtual address map. + * The resulting range will refer to memory defined by + * the given memory object and offset into that object. + * + * Arguments are as defined in the vm_map call. + */ +kern_return_t +vm_map_enter( + register vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t size, + vm_offset_t mask, + int flags, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_map_entry_t entry; + register vm_offset_t start; + register vm_offset_t end; + kern_return_t result = KERN_SUCCESS; + + boolean_t anywhere = VM_FLAGS_ANYWHERE & flags; + char alias; + + VM_GET_FLAGS_ALIAS(flags, alias); + +#define RETURN(value) { result = value; goto BailOut; } + + assert(page_aligned(*address)); + assert(page_aligned(size)); + StartAgain: ; + + start = *address; + + if (anywhere) { + vm_map_lock(map); + + /* + * Calculate the first possible address. + */ + + if (start < map->min_offset) + start = map->min_offset; + if (start > map->max_offset) + RETURN(KERN_NO_SPACE); + + /* + * Look for the first possible address; + * if there's already something at this + * address, we have to start after it. + */ + + assert(first_free_is_valid(map)); + if (start == map->min_offset) { + if ((entry = map->first_free) != vm_map_to_entry(map)) + start = entry->vme_end; + } else { + vm_map_entry_t tmp_entry; + if (vm_map_lookup_entry(map, start, &tmp_entry)) + start = tmp_entry->vme_end; + entry = tmp_entry; + } + + /* + * In any case, the "entry" always precedes + * the proposed new region throughout the + * loop: + */ + + while (TRUE) { + register vm_map_entry_t next; + + /* + * Find the end of the proposed new region. + * Be sure we didn't go beyond the end, or + * wrap around the address. + */ + + end = ((start + mask) & ~mask); + if (end < start) + RETURN(KERN_NO_SPACE); + start = end; + end += size; + + if ((end > map->max_offset) || (end < start)) { + if (map->wait_for_space) { + if (size <= (map->max_offset - + map->min_offset)) { + assert_wait((event_t)map, + THREAD_ABORTSAFE); + vm_map_unlock(map); + thread_block((void (*)(void))0); + goto StartAgain; + } + } + RETURN(KERN_NO_SPACE); + } + + /* + * If there are no more entries, we must win. + */ + + next = entry->vme_next; + if (next == vm_map_to_entry(map)) + break; + + /* + * If there is another entry, it must be + * after the end of the potential new region. + */ + + if (next->vme_start >= end) + break; + + /* + * Didn't fit -- move to the next entry. + */ + + entry = next; + start = entry->vme_end; + } + *address = start; + } else { + vm_map_entry_t temp_entry; + + /* + * Verify that: + * the address doesn't itself violate + * the mask requirement. + */ + + vm_map_lock(map); + if ((start & mask) != 0) + RETURN(KERN_NO_SPACE); + + /* + * ... the address is within bounds + */ + + end = start + size; + + if ((start < map->min_offset) || + (end > map->max_offset) || + (start >= end)) { + RETURN(KERN_INVALID_ADDRESS); + } + + /* + * ... the starting address isn't allocated + */ + + if (vm_map_lookup_entry(map, start, &temp_entry)) + RETURN(KERN_NO_SPACE); + + entry = temp_entry; + + /* + * ... the next region doesn't overlap the + * end point. + */ + + if ((entry->vme_next != vm_map_to_entry(map)) && + (entry->vme_next->vme_start < end)) + RETURN(KERN_NO_SPACE); + } + + /* + * At this point, + * "start" and "end" should define the endpoints of the + * available new range, and + * "entry" should refer to the region before the new + * range, and + * + * the map should be locked. + */ + + /* + * See whether we can avoid creating a new entry (and object) by + * extending one of our neighbors. [So far, we only attempt to + * extend from below.] + */ + + if ((object == VM_OBJECT_NULL) && + (entry != vm_map_to_entry(map)) && + (entry->vme_end == start) && + (!entry->is_shared) && + (!entry->is_sub_map) && + (entry->alias == alias) && + (entry->inheritance == inheritance) && + (entry->protection == cur_protection) && + (entry->max_protection == max_protection) && + (entry->behavior == VM_BEHAVIOR_DEFAULT) && + (entry->in_transition == 0) && + (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ + if (vm_object_coalesce(entry->object.vm_object, + VM_OBJECT_NULL, + entry->offset, + (vm_object_offset_t) 0, + (vm_size_t)(entry->vme_end - entry->vme_start), + (vm_size_t)(end - entry->vme_end))) { + + /* + * Coalesced the two objects - can extend + * the previous map entry to include the + * new range. + */ + map->size += (end - entry->vme_end); + entry->vme_end = end; + UPDATE_FIRST_FREE(map, map->first_free); + RETURN(KERN_SUCCESS); + } + } + + /* + * Create a new entry + */ + + { /**/ + register vm_map_entry_t new_entry; + + new_entry = vm_map_entry_insert(map, entry, start, end, object, + offset, needs_copy, FALSE, FALSE, + cur_protection, max_protection, + VM_BEHAVIOR_DEFAULT, inheritance, 0); + new_entry->alias = alias; + vm_map_unlock(map); + + /* Wire down the new entry if the user + * requested all new map entries be wired. + */ + if (map->wiring_required) { + result = vm_map_wire(map, start, end, + new_entry->protection, TRUE); + return(result); + } + + if ((object != VM_OBJECT_NULL) && + (vm_map_pmap_enter_enable) && + (!anywhere) && + (!needs_copy) && + (size < (128*1024))) { + vm_map_pmap_enter(map, start, end, + object, offset, cur_protection); + } + + return(result); + } /**/ + + BailOut: ; + vm_map_unlock(map); + return(result); + +#undef RETURN +} + +/* + * vm_map_clip_start: [ internal use only ] + * + * Asserts that the given entry begins at or after + * the specified address; if necessary, + * it splits the entry into two. + */ +#ifndef i386 +#define vm_map_clip_start(map, entry, startaddr) \ +MACRO_BEGIN \ + vm_map_t VMCS_map; \ + vm_map_entry_t VMCS_entry; \ + vm_offset_t VMCS_startaddr; \ + VMCS_map = (map); \ + VMCS_entry = (entry); \ + VMCS_startaddr = (startaddr); \ + if (VMCS_startaddr > VMCS_entry->vme_start) { \ + if(entry->use_pmap) { \ + vm_offset_t pmap_base_addr; \ + vm_offset_t pmap_end_addr; \ + \ + pmap_base_addr = 0xF0000000 & entry->vme_start; \ + pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \ + pmap_unnest(map->pmap, pmap_base_addr, \ + (pmap_end_addr - pmap_base_addr) + 1); \ + entry->use_pmap = FALSE; \ + } \ + _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\ + } \ + UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \ +MACRO_END +#else +#define vm_map_clip_start(map, entry, startaddr) \ +MACRO_BEGIN \ + vm_map_t VMCS_map; \ + vm_map_entry_t VMCS_entry; \ + vm_offset_t VMCS_startaddr; \ + VMCS_map = (map); \ + VMCS_entry = (entry); \ + VMCS_startaddr = (startaddr); \ + if (VMCS_startaddr > VMCS_entry->vme_start) { \ + _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\ + } \ + UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \ +MACRO_END +#endif + +#define vm_map_copy_clip_start(copy, entry, startaddr) \ + MACRO_BEGIN \ + if ((startaddr) > (entry)->vme_start) \ + _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \ + MACRO_END + +/* + * This routine is called only when it is known that + * the entry must be split. + */ +void +_vm_map_clip_start( + register struct vm_map_header *map_header, + register vm_map_entry_t entry, + register vm_offset_t start) +{ + register vm_map_entry_t new_entry; + + /* + * Split off the front portion -- + * note that we must insert the new + * entry BEFORE this one, so that + * this entry has the specified starting + * address. + */ + + new_entry = _vm_map_entry_create(map_header); + vm_map_entry_copy_full(new_entry, entry); + + new_entry->vme_end = start; + entry->offset += (start - entry->vme_start); + entry->vme_start = start; + + _vm_map_entry_link(map_header, entry->vme_prev, new_entry); + + if (entry->is_sub_map) + vm_map_reference(new_entry->object.sub_map); + else + vm_object_reference(new_entry->object.vm_object); +} + + +/* + * vm_map_clip_end: [ internal use only ] + * + * Asserts that the given entry ends at or before + * the specified address; if necessary, + * it splits the entry into two. + */ +#ifndef i386 +#define vm_map_clip_end(map, entry, endaddr) \ +MACRO_BEGIN \ + vm_map_t VMCE_map; \ + vm_map_entry_t VMCE_entry; \ + vm_offset_t VMCE_endaddr; \ + VMCE_map = (map); \ + VMCE_entry = (entry); \ + VMCE_endaddr = (endaddr); \ + if (VMCE_endaddr < VMCE_entry->vme_end) { \ + if(entry->use_pmap) { \ + vm_offset_t pmap_base_addr; \ + vm_offset_t pmap_end_addr; \ + \ + pmap_base_addr = 0xF0000000 & entry->vme_start; \ + pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \ + pmap_unnest(map->pmap, pmap_base_addr, \ + (pmap_end_addr - pmap_base_addr) + 1); \ + entry->use_pmap = FALSE; \ + } \ + _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \ + } \ + UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \ +MACRO_END +#else +#define vm_map_clip_end(map, entry, endaddr) \ +MACRO_BEGIN \ + vm_map_t VMCE_map; \ + vm_map_entry_t VMCE_entry; \ + vm_offset_t VMCE_endaddr; \ + VMCE_map = (map); \ + VMCE_entry = (entry); \ + VMCE_endaddr = (endaddr); \ + if (VMCE_endaddr < VMCE_entry->vme_end) { \ + _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \ + } \ + UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \ +MACRO_END +#endif + +#define vm_map_copy_clip_end(copy, entry, endaddr) \ + MACRO_BEGIN \ + if ((endaddr) < (entry)->vme_end) \ + _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \ + MACRO_END + +/* + * This routine is called only when it is known that + * the entry must be split. + */ +void +_vm_map_clip_end( + register struct vm_map_header *map_header, + register vm_map_entry_t entry, + register vm_offset_t end) +{ + register vm_map_entry_t new_entry; + + /* + * Create a new entry and insert it + * AFTER the specified entry + */ + + new_entry = _vm_map_entry_create(map_header); + vm_map_entry_copy_full(new_entry, entry); + + new_entry->vme_start = entry->vme_end = end; + new_entry->offset += (end - entry->vme_start); + + _vm_map_entry_link(map_header, entry, new_entry); + + if (entry->is_sub_map) + vm_map_reference(new_entry->object.sub_map); + else + vm_object_reference(new_entry->object.vm_object); +} + + +/* + * VM_MAP_RANGE_CHECK: [ internal use only ] + * + * Asserts that the starting and ending region + * addresses fall within the valid range of the map. + */ +#define VM_MAP_RANGE_CHECK(map, start, end) \ + { \ + if (start < vm_map_min(map)) \ + start = vm_map_min(map); \ + if (end > vm_map_max(map)) \ + end = vm_map_max(map); \ + if (start > end) \ + start = end; \ + } + +/* + * vm_map_range_check: [ internal use only ] + * + * Check that the region defined by the specified start and + * end addresses are wholly contained within a single map + * entry or set of adjacent map entries of the spacified map, + * i.e. the specified region contains no unmapped space. + * If any or all of the region is unmapped, FALSE is returned. + * Otherwise, TRUE is returned and if the output argument 'entry' + * is not NULL it points to the map entry containing the start + * of the region. + * + * The map is locked for reading on entry and is left locked. + */ +boolean_t +vm_map_range_check( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + vm_map_entry_t *entry) +{ + vm_map_entry_t cur; + register vm_offset_t prev; + + /* + * Basic sanity checks first + */ + if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) + return (FALSE); + + /* + * Check first if the region starts within a valid + * mapping for the map. + */ + if (!vm_map_lookup_entry(map, start, &cur)) + return (FALSE); + + /* + * Optimize for the case that the region is contained + * in a single map entry. + */ + if (entry != (vm_map_entry_t *) NULL) + *entry = cur; + if (end <= cur->vme_end) + return (TRUE); + + /* + * If the region is not wholly contained within a + * single entry, walk the entries looking for holes. + */ + prev = cur->vme_end; + cur = cur->vme_next; + while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) { + if (end <= cur->vme_end) + return (TRUE); + prev = cur->vme_end; + cur = cur->vme_next; + } + return (FALSE); +} + +/* + * vm_map_submap: [ kernel use only ] + * + * Mark the given range as handled by a subordinate map. + * + * This range must have been created with vm_map_find using + * the vm_submap_object, and no other operations may have been + * performed on this range prior to calling vm_map_submap. + * + * Only a limited number of operations can be performed + * within this rage after calling vm_map_submap: + * vm_fault + * [Don't try vm_map_copyin!] + * + * To remove a submapping, one must first remove the + * range from the superior map, and then destroy the + * submap (if desired). [Better yet, don't try it.] + */ +kern_return_t +vm_map_submap( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + vm_map_t submap, + vm_offset_t offset, + boolean_t use_pmap) +{ + vm_map_entry_t entry; + register kern_return_t result = KERN_INVALID_ARGUMENT; + register vm_object_t object; + + vm_map_lock(map); + + VM_MAP_RANGE_CHECK(map, start, end); + + if (vm_map_lookup_entry(map, start, &entry)) { + vm_map_clip_start(map, entry, start); + } + else + entry = entry->vme_next; + + if(entry == vm_map_to_entry(map)) { + vm_map_unlock(map); + return KERN_INVALID_ARGUMENT; + } + + vm_map_clip_end(map, entry, end); + + if ((entry->vme_start == start) && (entry->vme_end == end) && + (!entry->is_sub_map) && + ((object = entry->object.vm_object) == vm_submap_object) && + (object->resident_page_count == 0) && + (object->copy == VM_OBJECT_NULL) && + (object->shadow == VM_OBJECT_NULL) && + (!object->pager_created)) { + entry->offset = (vm_object_offset_t)offset; + entry->object.vm_object = VM_OBJECT_NULL; + vm_object_deallocate(object); + entry->is_sub_map = TRUE; + vm_map_reference(entry->object.sub_map = submap); +#ifndef i386 + if ((use_pmap) && (offset == 0)) { + /* nest if platform code will allow */ + result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, + start, end - start); + if(result) + panic("pmap_nest failed!"); + entry->use_pmap = TRUE; + } +#endif +#ifdef i386 + pmap_remove(map->pmap, start, end); +#endif + result = KERN_SUCCESS; + } + vm_map_unlock(map); + + return(result); +} + +/* + * vm_map_protect: + * + * Sets the protection of the specified address + * region in the target map. If "set_max" is + * specified, the maximum protection is to be set; + * otherwise, only the current protection is affected. + */ +kern_return_t +vm_map_protect( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + register vm_prot_t new_prot, + register boolean_t set_max) +{ + register vm_map_entry_t current; + register vm_offset_t prev; + vm_map_entry_t entry; + vm_prot_t new_max; + boolean_t clip; + + XPR(XPR_VM_MAP, + "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d", + (integer_t)map, start, end, new_prot, set_max); + + vm_map_lock(map); + + /* + * Lookup the entry. If it doesn't start in a valid + * entry, return an error. Remember if we need to + * clip the entry. We don't do it here because we don't + * want to make any changes until we've scanned the + * entire range below for address and protection + * violations. + */ + if (!(clip = vm_map_lookup_entry(map, start, &entry))) { + vm_map_unlock(map); + return(KERN_INVALID_ADDRESS); + } + + /* + * Make a first pass to check for protection and address + * violations. + */ + + current = entry; + prev = current->vme_start; + while ((current != vm_map_to_entry(map)) && + (current->vme_start < end)) { + + /* + * If there is a hole, return an error. + */ + if (current->vme_start != prev) { + vm_map_unlock(map); + return(KERN_INVALID_ADDRESS); + } + + new_max = current->max_protection; + if(new_prot & VM_PROT_COPY) { + new_max |= VM_PROT_WRITE; + if ((new_prot & (new_max | VM_PROT_COPY)) != new_prot) { + vm_map_unlock(map); + return(KERN_PROTECTION_FAILURE); + } + } else { + if ((new_prot & new_max) != new_prot) { + vm_map_unlock(map); + return(KERN_PROTECTION_FAILURE); + } + } + + prev = current->vme_end; + current = current->vme_next; + } + if (end > prev) { + vm_map_unlock(map); + return(KERN_INVALID_ADDRESS); + } + + /* + * Go back and fix up protections. + * Clip to start here if the range starts within + * the entry. + */ + + current = entry; + if (clip) { + vm_map_clip_start(map, entry, start); + } + while ((current != vm_map_to_entry(map)) && + (current->vme_start < end)) { + + vm_prot_t old_prot; + + vm_map_clip_end(map, current, end); + + old_prot = current->protection; + + if(new_prot & VM_PROT_COPY) { + /* caller is asking specifically to copy the */ + /* mapped data, this implies that max protection */ + /* will include write. Caller must be prepared */ + /* for loss of shared memory communication in the */ + /* target area after taking this step */ + current->needs_copy = TRUE; + current->max_protection |= VM_PROT_WRITE; + } + + if (set_max) + current->protection = + (current->max_protection = + new_prot & ~VM_PROT_COPY) & + old_prot; + else + current->protection = new_prot & ~VM_PROT_COPY; + + /* + * Update physical map if necessary. + * If the request is to turn off write protection, + * we won't do it for real (in pmap). This is because + * it would cause copy-on-write to fail. We've already + * set, the new protection in the map, so if a + * write-protect fault occurred, it will be fixed up + * properly, COW or not. + */ + /* the 256M hack for existing hardware limitations */ + if (current->protection != old_prot) { + if(current->is_sub_map && current->use_pmap) { + vm_offset_t pmap_base_addr; + vm_offset_t pmap_end_addr; + vm_map_entry_t local_entry; + + pmap_base_addr = 0xF0000000 & current->vme_start; + pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; +#ifndef i386 + if(!vm_map_lookup_entry(map, + pmap_base_addr, &local_entry)) + panic("vm_map_protect: nested pmap area is missing"); + while ((local_entry != vm_map_to_entry(map)) && + (local_entry->vme_start < pmap_end_addr)) { + local_entry->use_pmap = FALSE; + local_entry = local_entry->vme_next; + } + pmap_unnest(map->pmap, pmap_base_addr, + (pmap_end_addr - pmap_base_addr) + 1); +#endif + } + if (!(current->protection & VM_PROT_WRITE)) { + /* Look one level in we support nested pmaps */ + /* from mapped submaps which are direct entries */ + /* in our map */ + if(current->is_sub_map && current->use_pmap) { + pmap_protect(current->object.sub_map->pmap, + current->vme_start, + current->vme_end, + current->protection); + } else { + pmap_protect(map->pmap, current->vme_start, + current->vme_end, + current->protection); + } + } + } + current = current->vme_next; + } + + vm_map_unlock(map); + return(KERN_SUCCESS); +} + +/* + * vm_map_inherit: + * + * Sets the inheritance of the specified address + * range in the target map. Inheritance + * affects how the map will be shared with + * child maps at the time of vm_map_fork. + */ +kern_return_t +vm_map_inherit( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + register vm_inherit_t new_inheritance) +{ + register vm_map_entry_t entry; + vm_map_entry_t temp_entry; + + vm_map_lock(map); + + VM_MAP_RANGE_CHECK(map, start, end); + + if (vm_map_lookup_entry(map, start, &temp_entry)) { + entry = temp_entry; + vm_map_clip_start(map, entry, start); + } + else { + temp_entry = temp_entry->vme_next; + entry = temp_entry; + } + + /* first check entire range for submaps which can't support the */ + /* given inheritance. */ + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + if(entry->is_sub_map) { + if(new_inheritance == VM_INHERIT_COPY) + return(KERN_INVALID_ARGUMENT); + } + + entry = entry->vme_next; + } + + entry = temp_entry; + + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + vm_map_clip_end(map, entry, end); + + entry->inheritance = new_inheritance; + + entry = entry->vme_next; + } + + vm_map_unlock(map); + return(KERN_SUCCESS); +} + +/* + * vm_map_wire: + * + * Sets the pageability of the specified address range in the + * target map as wired. Regions specified as not pageable require + * locked-down physical memory and physical page maps. The + * access_type variable indicates types of accesses that must not + * generate page faults. This is checked against protection of + * memory being locked-down. + * + * The map must not be locked, but a reference must remain to the + * map throughout the call. + */ +kern_return_t +vm_map_wire_nested( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + register vm_prot_t access_type, + boolean_t user_wire, + pmap_t map_pmap) +{ + register vm_map_entry_t entry; + struct vm_map_entry *first_entry, tmp_entry; + vm_map_t pmap_map; + register vm_offset_t s,e; + kern_return_t rc; + boolean_t need_wakeup; + boolean_t main_map = FALSE; + unsigned int last_timestamp; + vm_size_t size; + + vm_map_lock(map); + if(map_pmap == NULL) + main_map = TRUE; + last_timestamp = map->timestamp; + + VM_MAP_RANGE_CHECK(map, start, end); + assert(page_aligned(start)); + assert(page_aligned(end)); + + if (vm_map_lookup_entry(map, start, &first_entry)) { + entry = first_entry; + /* vm_map_clip_start will be done later. */ + } else { + /* Start address is not in map */ + vm_map_unlock(map); + return(KERN_INVALID_ADDRESS); + } + + s=start; + need_wakeup = FALSE; + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + /* + * If another thread is wiring/unwiring this entry then + * block after informing other thread to wake us up. + */ + if (entry->in_transition) { + /* + * We have not clipped the entry. Make sure that + * the start address is in range so that the lookup + * below will succeed. + */ + s = entry->vme_start < start? start: entry->vme_start; + + entry->needs_wakeup = TRUE; + + /* + * wake up anybody waiting on entries that we have + * already wired. + */ + if (need_wakeup) { + vm_map_entry_wakeup(map); + need_wakeup = FALSE; + } + /* + * User wiring is interruptible + */ + vm_map_entry_wait(map, + (user_wire) ? THREAD_ABORTSAFE : + THREAD_UNINT); + if (user_wire && current_thread()->wait_result == + THREAD_INTERRUPTED) { + /* + * undo the wirings we have done so far + * We do not clear the needs_wakeup flag, + * because we cannot tell if we were the + * only one waiting. + */ + vm_map_unwire(map, start, s, user_wire); + return(KERN_FAILURE); + } + + vm_map_lock(map); + /* + * Cannot avoid a lookup here. reset timestamp. + */ + last_timestamp = map->timestamp; + + /* + * The entry could have been clipped, look it up again. + * Worse that can happen is, it may not exist anymore. + */ + if (!vm_map_lookup_entry(map, s, &first_entry)) { + if (!user_wire) + panic("vm_map_wire: re-lookup failed"); + + /* + * User: undo everything upto the previous + * entry. let vm_map_unwire worry about + * checking the validity of the range. + */ + vm_map_unlock(map); + vm_map_unwire(map, start, s, user_wire); + return(KERN_FAILURE); + } + entry = first_entry; + continue; + } + + if(entry->is_sub_map) { + vm_offset_t sub_start; + vm_offset_t sub_end; + vm_offset_t local_end; + pmap_t pmap; + + vm_map_clip_start(map, entry, start); + vm_map_clip_end(map, entry, end); + + sub_start += entry->offset; + sub_end = entry->vme_end - entry->vme_start; + sub_end += entry->offset; + + local_end = entry->vme_end; + if(map_pmap == NULL) { + if(entry->use_pmap) { + pmap = entry->object.sub_map->pmap; + } else { + pmap = map->pmap; + } + if (entry->wired_count) { + if (entry->wired_count + >= MAX_WIRE_COUNT) + panic("vm_map_wire: too many wirings"); + + if (user_wire && + entry->user_wired_count + >= MAX_WIRE_COUNT) { + vm_map_unlock(map); + vm_map_unwire(map, start, + entry->vme_start, user_wire); + return(KERN_FAILURE); + } + if (!user_wire || + (entry->user_wired_count++ == 0)) + entry->wired_count++; + entry = entry->vme_next; + continue; + + } else { + vm_object_t object; + vm_object_offset_t offset_hi; + vm_object_offset_t offset_lo; + vm_object_offset_t offset; + vm_prot_t prot; + boolean_t wired; + vm_behavior_t behavior; + vm_offset_t local_start; + vm_map_entry_t local_entry; + vm_map_version_t version; + vm_map_t lookup_map; + + /* call vm_map_lookup_locked to */ + /* cause any needs copy to be */ + /* evaluated */ + local_start = entry->vme_start; + lookup_map = map; + vm_map_lock_write_to_read(map); + if(vm_map_lookup_locked( + &lookup_map, local_start, + VM_PROT_WRITE, + &version, &object, + &offset, &prot, &wired, + &behavior, &offset_lo, + &offset_hi, &pmap_map)) { + + vm_map_unlock(lookup_map); + vm_map_unwire(map, start, + entry->vme_start, user_wire); + return(KERN_FAILURE); + } + if(pmap_map != lookup_map) + vm_map_unlock(pmap_map); + if(lookup_map != map) { + vm_map_unlock(lookup_map); + vm_map_lock(map); + } else { + vm_map_unlock(map); + vm_map_lock(map); + } + last_timestamp = + version.main_timestamp; + vm_object_unlock(object); + if (vm_map_lookup_entry(map, + local_start, &local_entry)) { + vm_map_unlock(map); + vm_map_unwire(map, start, + entry->vme_start, user_wire); + return(KERN_FAILURE); + } + /* did we have a change of type? */ + if (!local_entry->is_sub_map) + continue; + entry = local_entry; + if (user_wire) + entry->user_wired_count++; + entry->wired_count++; + + entry->in_transition = TRUE; + + vm_map_unlock(map); + rc = vm_map_wire_nested( + entry->object.sub_map, + sub_start, sub_end, + access_type, + user_wire, pmap); + vm_map_lock(map); + last_timestamp = map->timestamp; + } + } else { + vm_map_unlock(map); + rc = vm_map_wire_nested(entry->object.sub_map, + sub_start, sub_end, + access_type, + user_wire, pmap); + vm_map_lock(map); + last_timestamp = map->timestamp; + } + s = entry->vme_start; + e = entry->vme_end; + if (last_timestamp+1 != map->timestamp) { + /* + * Find the entry again. It could have been clipped + * after we unlocked the map. + */ + if (!vm_map_lookup_entry(map, local_end, + &first_entry)) + panic("vm_map_wire: re-lookup failed"); + + entry = first_entry; + } + + last_timestamp = map->timestamp; + while ((entry != vm_map_to_entry(map)) && + (entry->vme_start < e)) { + assert(entry->in_transition); + entry->in_transition = FALSE; + if (entry->needs_wakeup) { + entry->needs_wakeup = FALSE; + need_wakeup = TRUE; + } + if (rc != KERN_SUCCESS) {/* from vm_*_wire */ + if(main_map) { + if (user_wire) + entry->user_wired_count--; + entry->wired_count--; + } + } + entry = entry->vme_next; + } + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + vm_map_unlock(map); + if (need_wakeup) + vm_map_entry_wakeup(map); + /* + * undo everything upto the previous entry. + */ + (void)vm_map_unwire(map, start, s, user_wire); + return rc; + } + continue; + } + + /* + * If this entry is already wired then increment + * the appropriate wire reference count. + */ + if (entry->wired_count && main_map) { + /* sanity check: wired_count is a short */ + if (entry->wired_count >= MAX_WIRE_COUNT) + panic("vm_map_wire: too many wirings"); + + if (user_wire && + entry->user_wired_count >= MAX_WIRE_COUNT) { + vm_map_unlock(map); + vm_map_unwire(map, start, + entry->vme_start, user_wire); + return(KERN_FAILURE); + } + /* + * entry is already wired down, get our reference + * after clipping to our range. + */ + vm_map_clip_start(map, entry, start); + vm_map_clip_end(map, entry, end); + if (!user_wire || (entry->user_wired_count++ == 0)) + entry->wired_count++; + + entry = entry->vme_next; + continue; + } + + /* + * Unwired entry or wire request transmitted via submap + */ + + + /* + * Perform actions of vm_map_lookup that need the write + * lock on the map: create a shadow object for a + * copy-on-write region, or an object for a zero-fill + * region. + */ + size = entry->vme_end - entry->vme_start; + /* + * If wiring a copy-on-write page, we need to copy it now + * even if we're only (currently) requesting read access. + * This is aggressive, but once it's wired we can't move it. + */ + if (entry->needs_copy) { + vm_object_shadow(&entry->object.vm_object, + &entry->offset, size); + entry->needs_copy = FALSE; + } else if (entry->object.vm_object == VM_OBJECT_NULL) { + entry->object.vm_object = vm_object_allocate(size); + entry->offset = (vm_object_offset_t)0; + } + + vm_map_clip_start(map, entry, start); + vm_map_clip_end(map, entry, end); + + s = entry->vme_start; + e = entry->vme_end; + + /* + * Check for holes and protection mismatch. + * Holes: Next entry should be contiguous unless this + * is the end of the region. + * Protection: Access requested must be allowed, unless + * wiring is by protection class + */ + if ((((entry->vme_end < end) && + ((entry->vme_next == vm_map_to_entry(map)) || + (entry->vme_next->vme_start > entry->vme_end))) || + ((entry->protection & access_type) != access_type))) { + /* + * Found a hole or protection problem. + * Unwire the region we wired so far. + */ + if (start != entry->vme_start) { + vm_map_unlock(map); + vm_map_unwire(map, start, s, user_wire); + } else { + vm_map_unlock(map); + } + return((entry->protection&access_type) != access_type? + KERN_PROTECTION_FAILURE: KERN_INVALID_ADDRESS); + } + + assert(entry->wired_count == 0 && entry->user_wired_count == 0); + + if(main_map) { + if (user_wire) + entry->user_wired_count++; + entry->wired_count++; + } + + entry->in_transition = TRUE; + + /* + * This entry might get split once we unlock the map. + * In vm_fault_wire(), we need the current range as + * defined by this entry. In order for this to work + * along with a simultaneous clip operation, we make a + * temporary copy of this entry and use that for the + * wiring. Note that the underlying objects do not + * change during a clip. + */ + tmp_entry = *entry; + + /* + * The in_transition state guarentees that the entry + * (or entries for this range, if split occured) will be + * there when the map lock is acquired for the second time. + */ + vm_map_unlock(map); + if(map_pmap) + rc = vm_fault_wire(map, &tmp_entry, map_pmap); + else + rc = vm_fault_wire(map, &tmp_entry, map->pmap); + vm_map_lock(map); + + if (last_timestamp+1 != map->timestamp) { + /* + * Find the entry again. It could have been clipped + * after we unlocked the map. + */ + if (!vm_map_lookup_entry(map, tmp_entry.vme_start, + &first_entry)) + panic("vm_map_wire: re-lookup failed"); + + entry = first_entry; + } + + last_timestamp = map->timestamp; + + while ((entry != vm_map_to_entry(map)) && + (entry->vme_start < tmp_entry.vme_end)) { + assert(entry->in_transition); + entry->in_transition = FALSE; + if (entry->needs_wakeup) { + entry->needs_wakeup = FALSE; + need_wakeup = TRUE; + } + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + if(main_map) { + if (user_wire) + entry->user_wired_count--; + entry->wired_count--; + } + } + entry = entry->vme_next; + } + + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + vm_map_unlock(map); + if (need_wakeup) + vm_map_entry_wakeup(map); + /* + * undo everything upto the previous entry. + */ + (void)vm_map_unwire(map, start, s, user_wire); + return rc; + } + } /* end while loop through map entries */ + vm_map_unlock(map); + + /* + * wake up anybody waiting on entries we wired. + */ + if (need_wakeup) + vm_map_entry_wakeup(map); + + return(KERN_SUCCESS); + +} + +kern_return_t +vm_map_wire( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + register vm_prot_t access_type, + boolean_t user_wire) +{ + + kern_return_t kret; + +#ifdef ppc + /* + * the calls to mapping_prealloc and mapping_relpre + * (along with the VM_MAP_RANGE_CHECK to insure a + * resonable range was passed in) are + * currently necessary because + * we haven't enabled kernel pre-emption + * and/or the pmap_enter cannot purge and re-use + * existing mappings + */ + VM_MAP_RANGE_CHECK(map, start, end); + mapping_prealloc(end - start); +#endif + kret = vm_map_wire_nested(map, start, end, access_type, + user_wire, (pmap_t)NULL); +#ifdef ppc + mapping_relpre(); +#endif + return kret; +} + +/* + * vm_map_unwire: + * + * Sets the pageability of the specified address range in the target + * as pageable. Regions specified must have been wired previously. + * + * The map must not be locked, but a reference must remain to the map + * throughout the call. + * + * Kernel will panic on failures. User unwire ignores holes and + * unwired and intransition entries to avoid losing memory by leaving + * it unwired. + */ +kern_return_t +vm_map_unwire_nested( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + boolean_t user_wire, + pmap_t map_pmap) +{ + register vm_map_entry_t entry; + struct vm_map_entry *first_entry, tmp_entry; + boolean_t need_wakeup; + boolean_t main_map = FALSE; + unsigned int last_timestamp; + + vm_map_lock(map); + if(map_pmap == NULL) + main_map = TRUE; + last_timestamp = map->timestamp; + + VM_MAP_RANGE_CHECK(map, start, end); + assert(page_aligned(start)); + assert(page_aligned(end)); + + if (vm_map_lookup_entry(map, start, &first_entry)) { + entry = first_entry; + /* vm_map_clip_start will be done later. */ + } + else { + /* Start address is not in map. */ + vm_map_unlock(map); + return(KERN_INVALID_ADDRESS); + } + + need_wakeup = FALSE; + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + if (entry->in_transition) { + /* + * 1) + * Another thread is wiring down this entry. Note + * that if it is not for the other thread we would + * be unwiring an unwired entry. This is not + * permitted. If we wait, we will be unwiring memory + * we did not wire. + * + * 2) + * Another thread is unwiring this entry. We did not + * have a reference to it, because if we did, this + * entry will not be getting unwired now. + */ + if (!user_wire) + panic("vm_map_unwire: in_transition entry"); + + entry = entry->vme_next; + continue; + } + + if(entry->is_sub_map) { + vm_offset_t sub_start; + vm_offset_t sub_end; + vm_offset_t local_end; + pmap_t pmap; + + + vm_map_clip_start(map, entry, start); + vm_map_clip_end(map, entry, end); + + sub_start = entry->offset; + sub_end = entry->vme_end - entry->vme_start; + sub_end += entry->offset; + local_end = entry->vme_end; + if(map_pmap == NULL) { + if(entry->use_pmap) { + pmap = entry->object.sub_map->pmap; + } else { + pmap = map->pmap; + } + if (entry->wired_count == 0 || + (user_wire && entry->user_wired_count == 0)) { + if (!user_wire) + panic("vm_map_unwire: entry is unwired"); + entry = entry->vme_next; + continue; + } + + /* + * Check for holes + * Holes: Next entry should be contiguous unless + * this is the end of the region. + */ + if (((entry->vme_end < end) && + ((entry->vme_next == vm_map_to_entry(map)) || + (entry->vme_next->vme_start + > entry->vme_end)))) { + if (!user_wire) + panic("vm_map_unwire: non-contiguous region"); +/* + entry = entry->vme_next; + continue; +*/ + } + + if (!user_wire || (--entry->user_wired_count == 0)) + entry->wired_count--; + + if (entry->wired_count != 0) { + entry = entry->vme_next; + continue; + } + + entry->in_transition = TRUE; + tmp_entry = *entry;/* see comment in vm_map_wire() */ + + /* + * We can unlock the map now. The in_transition state + * guarantees existance of the entry. + */ + vm_map_unlock(map); + vm_map_unwire_nested(entry->object.sub_map, + sub_start, sub_end, user_wire, pmap); + vm_map_lock(map); + + if (last_timestamp+1 != map->timestamp) { + /* + * Find the entry again. It could have been + * clipped or deleted after we unlocked the map. + */ + if (!vm_map_lookup_entry(map, + tmp_entry.vme_start, + &first_entry)) { + if (!user_wire) + panic("vm_map_unwire: re-lookup failed"); + entry = first_entry->vme_next; + } else + entry = first_entry; + } + last_timestamp = map->timestamp; + + /* + * clear transition bit for all constituent entries + * that were in the original entry (saved in + * tmp_entry). Also check for waiters. + */ + while ((entry != vm_map_to_entry(map)) && + (entry->vme_start < tmp_entry.vme_end)) { + assert(entry->in_transition); + entry->in_transition = FALSE; + if (entry->needs_wakeup) { + entry->needs_wakeup = FALSE; + need_wakeup = TRUE; + } + entry = entry->vme_next; + } + continue; + } else { + vm_map_unlock(map); + vm_map_unwire_nested(entry->object.sub_map, + sub_start, sub_end, user_wire, pmap); + vm_map_lock(map); + + if (last_timestamp+1 != map->timestamp) { + /* + * Find the entry again. It could have been + * clipped or deleted after we unlocked the map. + */ + if (!vm_map_lookup_entry(map, + tmp_entry.vme_start, + &first_entry)) { + if (!user_wire) + panic("vm_map_unwire: re-lookup failed"); + entry = first_entry->vme_next; + } else + entry = first_entry; + } + last_timestamp = map->timestamp; + } + } + + + if (main_map && (entry->wired_count == 0 || + (user_wire && entry->user_wired_count == 0))) { + if (!user_wire) + panic("vm_map_unwire: entry is unwired"); + + entry = entry->vme_next; + continue; + } + + assert(entry->wired_count > 0 && + (!user_wire || entry->user_wired_count > 0)); + + vm_map_clip_start(map, entry, start); + vm_map_clip_end(map, entry, end); + + /* + * Check for holes + * Holes: Next entry should be contiguous unless + * this is the end of the region. + */ + if (((entry->vme_end < end) && + ((entry->vme_next == vm_map_to_entry(map)) || + (entry->vme_next->vme_start > entry->vme_end)))) { + + if (!user_wire) + panic("vm_map_unwire: non-contiguous region"); + entry = entry->vme_next; + continue; + } + + if(main_map) { + if (!user_wire || (--entry->user_wired_count == 0)) + entry->wired_count--; + + if (entry->wired_count != 0) { + entry = entry->vme_next; + continue; + } + } + + entry->in_transition = TRUE; + tmp_entry = *entry; /* see comment in vm_map_wire() */ + + /* + * We can unlock the map now. The in_transition state + * guarantees existance of the entry. + */ + vm_map_unlock(map); + if(map_pmap) { + vm_fault_unwire(map, &tmp_entry, FALSE, map_pmap); + } else { + vm_fault_unwire(map, &tmp_entry, FALSE, map->pmap); + } + vm_map_lock(map); + + if (last_timestamp+1 != map->timestamp) { + /* + * Find the entry again. It could have been clipped + * or deleted after we unlocked the map. + */ + if (!vm_map_lookup_entry(map, tmp_entry.vme_start, + &first_entry)) { + if (!user_wire) + panic("vm_map_unwire: re-lookup failed"); + entry = first_entry->vme_next; + } else + entry = first_entry; + } + last_timestamp = map->timestamp; + + /* + * clear transition bit for all constituent entries that + * were in the original entry (saved in tmp_entry). Also + * check for waiters. + */ + while ((entry != vm_map_to_entry(map)) && + (entry->vme_start < tmp_entry.vme_end)) { + assert(entry->in_transition); + entry->in_transition = FALSE; + if (entry->needs_wakeup) { + entry->needs_wakeup = FALSE; + need_wakeup = TRUE; + } + entry = entry->vme_next; + } + } + vm_map_unlock(map); + /* + * wake up anybody waiting on entries that we have unwired. + */ + if (need_wakeup) + vm_map_entry_wakeup(map); + return(KERN_SUCCESS); + +} + +kern_return_t +vm_map_unwire( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + boolean_t user_wire) +{ + return vm_map_unwire_nested(map, start, end, user_wire, (pmap_t)NULL); +} + + +/* + * vm_map_entry_delete: [ internal use only ] + * + * Deallocate the given entry from the target map. + */ +void +vm_map_entry_delete( + register vm_map_t map, + register vm_map_entry_t entry) +{ + register vm_offset_t s, e; + register vm_object_t object; + register vm_map_t submap; + extern vm_object_t kernel_object; + + s = entry->vme_start; + e = entry->vme_end; + assert(page_aligned(s)); + assert(page_aligned(e)); + assert(entry->wired_count == 0); + assert(entry->user_wired_count == 0); + + if (entry->is_sub_map) { + object = NULL; + submap = entry->object.sub_map; + } else { + submap = NULL; + object = entry->object.vm_object; + } + + vm_map_entry_unlink(map, entry); + map->size -= e - s; + + vm_map_entry_dispose(map, entry); + + vm_map_unlock(map); + /* + * Deallocate the object only after removing all + * pmap entries pointing to its pages. + */ + if (submap) + vm_map_deallocate(submap); + else + vm_object_deallocate(object); + +} + +void +vm_map_submap_pmap_clean( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_map_t sub_map, + vm_offset_t offset) +{ + vm_offset_t submap_start; + vm_offset_t submap_end; + vm_offset_t addr; + vm_size_t remove_size; + vm_map_entry_t entry; + + submap_end = offset + (end - start); + submap_start = offset; + if(vm_map_lookup_entry(sub_map, offset, &entry)) { + + remove_size = (entry->vme_end - entry->vme_start); + if(offset > entry->vme_start) + remove_size -= offset - entry->vme_start; + + + if(submap_end < entry->vme_end) { + remove_size -= + entry->vme_end - submap_end; + } + if(entry->is_sub_map) { + vm_map_submap_pmap_clean( + sub_map, + start, + start + remove_size, + entry->object.sub_map, + entry->offset); + } else { + pmap_remove(map->pmap, start, start + remove_size); + } + } + + entry = entry->vme_next; + + while((entry != vm_map_to_entry(sub_map)) + && (entry->vme_start < submap_end)) { + remove_size = (entry->vme_end - entry->vme_start); + if(submap_end < entry->vme_end) { + remove_size -= entry->vme_end - submap_end; + } + if(entry->is_sub_map) { + vm_map_submap_pmap_clean( + sub_map, + (start + entry->vme_start) - offset, + ((start + entry->vme_start) - offset) + remove_size, + entry->object.sub_map, + entry->offset); + } else { + pmap_remove(map->pmap, + (start + entry->vme_start) - offset, + ((start + entry->vme_start) - offset) + remove_size); + } + entry = entry->vme_next; + } + return; +} + +/* + * vm_map_delete: [ internal use only ] + * + * Deallocates the given address range from the target map. + * Removes all user wirings. Unwires one kernel wiring if + * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go + * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps + * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set. + * + * This routine is called with map locked and leaves map locked. + */ +kern_return_t +vm_map_delete( + register vm_map_t map, + vm_offset_t start, + register vm_offset_t end, + int flags) +{ + vm_map_entry_t entry, next; + struct vm_map_entry *first_entry, tmp_entry; + register vm_offset_t s, e; + register vm_object_t object; + boolean_t need_wakeup; + unsigned int last_timestamp = ~0; /* unlikely value */ + int interruptible; + extern vm_map_t kernel_map; + + interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ? + THREAD_ABORTSAFE : THREAD_UNINT; + + /* + * All our DMA I/O operations in IOKit are currently done by + * wiring through the map entries of the task requesting the I/O. + * Because of this, we must always wait for kernel wirings + * to go away on the entries before deleting them. + * + * Any caller who wants to actually remove a kernel wiring + * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to + * properly remove one wiring instead of blasting through + * them all. + */ + flags |= VM_MAP_REMOVE_WAIT_FOR_KWIRE; + + /* + * Find the start of the region, and clip it + */ + if (vm_map_lookup_entry(map, start, &first_entry)) { + entry = first_entry; + vm_map_clip_start(map, entry, start); + + /* + * Fix the lookup hint now, rather than each + * time through the loop. + */ + SAVE_HINT(map, entry->vme_prev); + } else { + entry = first_entry->vme_next; + } + + need_wakeup = FALSE; + /* + * Step through all entries in this region + */ + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + + vm_map_clip_end(map, entry, end); + if (entry->in_transition) { + /* + * Another thread is wiring/unwiring this entry. + * Let the other thread know we are waiting. + */ + s = entry->vme_start; + entry->needs_wakeup = TRUE; + + /* + * wake up anybody waiting on entries that we have + * already unwired/deleted. + */ + if (need_wakeup) { + vm_map_entry_wakeup(map); + need_wakeup = FALSE; + } + + vm_map_entry_wait(map, interruptible); + + if (interruptible && + current_thread()->wait_result == THREAD_INTERRUPTED) + /* + * We do not clear the needs_wakeup flag, + * since we cannot tell if we were the only one. + */ + return KERN_ABORTED; + + vm_map_lock(map); + /* + * Cannot avoid a lookup here. reset timestamp. + */ + last_timestamp = map->timestamp; + + /* + * The entry could have been clipped or it + * may not exist anymore. Look it up again. + */ + if (!vm_map_lookup_entry(map, s, &first_entry)) { + assert((map != kernel_map) && + (!entry->is_sub_map)); + /* + * User: use the next entry + */ + entry = first_entry->vme_next; + } else { + entry = first_entry; + SAVE_HINT(map, entry->vme_prev); + } + continue; + } /* end in_transition */ + + if (entry->wired_count) { + /* + * Remove a kernel wiring if requested or if + * there are user wirings. + */ + if ((flags & VM_MAP_REMOVE_KUNWIRE) || + (entry->user_wired_count > 0)) + entry->wired_count--; + + /* remove all user wire references */ + entry->user_wired_count = 0; + + if (entry->wired_count != 0) { + assert((map != kernel_map) && + (!entry->is_sub_map)); + /* + * Cannot continue. Typical case is when + * a user thread has physical io pending on + * on this page. Either wait for the + * kernel wiring to go away or return an + * error. + */ + if (flags & VM_MAP_REMOVE_WAIT_FOR_KWIRE) { + + s = entry->vme_start; + entry->needs_wakeup = TRUE; + vm_map_entry_wait(map, interruptible); + + if (interruptible && + current_thread()->wait_result == + THREAD_INTERRUPTED) + /* + * We do not clear the + * needs_wakeup flag, since we + * cannot tell if we were the + * only one. + */ + return KERN_ABORTED; + + vm_map_lock(map); + /* + * Cannot avoid a lookup here. reset + * timestamp. + */ + last_timestamp = map->timestamp; + + /* + * The entry could have been clipped or + * it may not exist anymore. Look it + * up again. + */ + if (!vm_map_lookup_entry(map, s, + &first_entry)) { + assert((map != kernel_map) && + (!entry->is_sub_map)); + /* + * User: use the next entry + */ + entry = first_entry->vme_next; + } else { + entry = first_entry; + SAVE_HINT(map, entry->vme_prev); + } + continue; + } + else { + return KERN_FAILURE; + } + } + + entry->in_transition = TRUE; + /* + * copy current entry. see comment in vm_map_wire() + */ + tmp_entry = *entry; + s = entry->vme_start; + e = entry->vme_end; + + /* + * We can unlock the map now. The in_transition + * state guarentees existance of the entry. + */ + vm_map_unlock(map); + vm_fault_unwire(map, &tmp_entry, + tmp_entry.object.vm_object == kernel_object, + map->pmap); + vm_map_lock(map); + + if (last_timestamp+1 != map->timestamp) { + /* + * Find the entry again. It could have + * been clipped after we unlocked the map. + */ + if (!vm_map_lookup_entry(map, s, &first_entry)){ + assert((map != kernel_map) && + (!entry->is_sub_map)); + first_entry = first_entry->vme_next; + } else { + SAVE_HINT(map, entry->vme_prev); + } + } else { + SAVE_HINT(map, entry->vme_prev); + first_entry = entry; + } + + last_timestamp = map->timestamp; + + entry = first_entry; + while ((entry != vm_map_to_entry(map)) && + (entry->vme_start < tmp_entry.vme_end)) { + assert(entry->in_transition); + entry->in_transition = FALSE; + if (entry->needs_wakeup) { + entry->needs_wakeup = FALSE; + need_wakeup = TRUE; + } + entry = entry->vme_next; + } + /* + * We have unwired the entry(s). Go back and + * delete them. + */ + entry = first_entry; + continue; + } + + /* entry is unwired */ + assert(entry->wired_count == 0); + assert(entry->user_wired_count == 0); + + if ((!entry->is_sub_map && + entry->object.vm_object != kernel_object) || + entry->is_sub_map) { + if(entry->is_sub_map) { + if(entry->use_pmap) { +#ifndef i386 + pmap_unnest(map->pmap, entry->vme_start, + entry->vme_end - entry->vme_start); +#endif + } else { + vm_map_submap_pmap_clean( + map, entry->vme_start, entry->vme_end, + entry->object.sub_map, + entry->offset); + } + } else { + pmap_remove(map->pmap, + entry->vme_start, entry->vme_end); + } + } + + next = entry->vme_next; + s = next->vme_start; + last_timestamp = map->timestamp; + vm_map_entry_delete(map, entry); + /* vm_map_entry_delete unlocks the map */ + vm_map_lock(map); + entry = next; + + if(entry == vm_map_to_entry(map)) { + break; + } + if (last_timestamp+1 != map->timestamp) { + /* + * we are responsible for deleting everything + * from the give space, if someone has interfered + * we pick up where we left off, back fills should + * be all right for anyone except map_delete and + * we have to assume that the task has been fully + * disabled before we get here + */ + if (!vm_map_lookup_entry(map, s, &entry)){ + entry = entry->vme_next; + } else { + SAVE_HINT(map, entry->vme_prev); + } + /* + * others can not only allocate behind us, we can + * also see coalesce while we don't have the map lock + */ + if(entry == vm_map_to_entry(map)) { + break; + } + vm_map_clip_start(map, entry, s); + } + last_timestamp = map->timestamp; + } + + if (map->wait_for_space) + thread_wakeup((event_t) map); + /* + * wake up anybody waiting on entries that we have already deleted. + */ + if (need_wakeup) + vm_map_entry_wakeup(map); + + return KERN_SUCCESS; +} + +/* + * vm_map_remove: + * + * Remove the given address range from the target map. + * This is the exported form of vm_map_delete. + */ +kern_return_t +vm_map_remove( + register vm_map_t map, + register vm_offset_t start, + register vm_offset_t end, + register boolean_t flags) +{ + register kern_return_t result; + + vm_map_lock(map); + VM_MAP_RANGE_CHECK(map, start, end); + result = vm_map_delete(map, start, end, flags); + vm_map_unlock(map); + + return(result); +} + + +/* + * vm_map_copy_steal_pages: + * + * Steal all the pages from a vm_map_copy page_list by copying ones + * that have not already been stolen. + */ +void +vm_map_copy_steal_pages( + vm_map_copy_t copy) +{ + register vm_page_t m, new_m; + register int i; + vm_object_t object; + + assert(copy->type == VM_MAP_COPY_PAGE_LIST); + for (i = 0; i < copy->cpy_npages; i++) { + + /* + * If the page is not tabled, then it's already stolen. + */ + m = copy->cpy_page_list[i]; + if (!m->tabled) + continue; + + /* + * Page was not stolen, get a new + * one and do the copy now. + */ + while ((new_m = vm_page_grab()) == VM_PAGE_NULL) { + VM_PAGE_WAIT(); + } + + vm_page_gobble(new_m); /* mark as consumed internally */ + vm_page_copy(m, new_m); + + object = m->object; + vm_object_lock(object); + vm_page_lock_queues(); + if (!m->active && !m->inactive) + vm_page_activate(m); + vm_page_unlock_queues(); + PAGE_WAKEUP_DONE(m); + vm_object_paging_end(object); + vm_object_unlock(object); + + copy->cpy_page_list[i] = new_m; + } + copy->cpy_page_loose = TRUE; +} + +/* + * vm_map_copy_page_discard: + * + * Get rid of the pages in a page_list copy. If the pages are + * stolen, they are freed. If the pages are not stolen, they + * are unbusied, and associated state is cleaned up. + */ +void +vm_map_copy_page_discard( + vm_map_copy_t copy) +{ + assert(copy->type == VM_MAP_COPY_PAGE_LIST); + while (copy->cpy_npages > 0) { + vm_page_t m; + + if ((m = copy->cpy_page_list[--(copy->cpy_npages)]) != + VM_PAGE_NULL) { + + /* + * If it's not in the table, then it's + * a stolen page that goes back + * to the free list. Else it belongs + * to some object, and we hold a + * paging reference on that object. + */ + if (!m->tabled) { + VM_PAGE_FREE(m); + } + else { + vm_object_t object; + + object = m->object; + + vm_object_lock(object); + vm_page_lock_queues(); + if (!m->active && !m->inactive) + vm_page_activate(m); + vm_page_unlock_queues(); + + if ((!m->busy)) { + kern_return_t kr; + kr = vm_page_unpin(m); + assert(kr == KERN_SUCCESS); + } else { + PAGE_WAKEUP_DONE(m); + } + vm_object_paging_end(object); + vm_object_unlock(object); + } + } + } +} + +/* + * Routine: vm_map_copy_discard + * + * Description: + * Dispose of a map copy object (returned by + * vm_map_copyin). + */ +void +vm_map_copy_discard( + vm_map_copy_t copy) +{ + TR_DECL("vm_map_copy_discard"); + +/* tr3("enter: copy 0x%x type %d", copy, copy->type);*/ +free_next_copy: + if (copy == VM_MAP_COPY_NULL) + return; + + switch (copy->type) { + case VM_MAP_COPY_ENTRY_LIST: + while (vm_map_copy_first_entry(copy) != + vm_map_copy_to_entry(copy)) { + vm_map_entry_t entry = vm_map_copy_first_entry(copy); + + vm_map_copy_entry_unlink(copy, entry); + vm_object_deallocate(entry->object.vm_object); + vm_map_copy_entry_dispose(copy, entry); + } + break; + case VM_MAP_COPY_OBJECT: + vm_object_deallocate(copy->cpy_object); + break; + case VM_MAP_COPY_PAGE_LIST: + + /* + * To clean this up, we have to unbusy all the pages + * and release the paging references in their objects. + */ + if (copy->cpy_npages > 0) + vm_map_copy_page_discard(copy); + + /* + * If there's a continuation, abort it. The + * abort routine releases any storage. + */ + if (vm_map_copy_has_cont(copy)) { + + assert(vm_map_copy_cont_is_valid(copy)); + /* + * Special case: recognize + * vm_map_copy_discard_cont and optimize + * here to avoid tail recursion. + */ + if (copy->cpy_cont == vm_map_copy_discard_cont) { + register vm_map_copy_t new_copy; + + new_copy = (vm_map_copy_t) copy->cpy_cont_args; + zfree(vm_map_copy_zone, (vm_offset_t) copy); + copy = new_copy; + goto free_next_copy; + } else { + vm_map_copy_abort_cont(copy); + } + } + + break; + + case VM_MAP_COPY_KERNEL_BUFFER: + + /* + * The vm_map_copy_t and possibly the data buffer were + * allocated by a single call to kalloc(), i.e. the + * vm_map_copy_t was not allocated out of the zone. + */ + kfree((vm_offset_t) copy, copy->cpy_kalloc_size); + return; + } + zfree(vm_map_copy_zone, (vm_offset_t) copy); +} + +/* + * Routine: vm_map_copy_copy + * + * Description: + * Move the information in a map copy object to + * a new map copy object, leaving the old one + * empty. + * + * This is used by kernel routines that need + * to look at out-of-line data (in copyin form) + * before deciding whether to return SUCCESS. + * If the routine returns FAILURE, the original + * copy object will be deallocated; therefore, + * these routines must make a copy of the copy + * object and leave the original empty so that + * deallocation will not fail. + */ +vm_map_copy_t +vm_map_copy_copy( + vm_map_copy_t copy) +{ + vm_map_copy_t new_copy; + + if (copy == VM_MAP_COPY_NULL) + return VM_MAP_COPY_NULL; + + /* + * Allocate a new copy object, and copy the information + * from the old one into it. + */ + + new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + *new_copy = *copy; + + if (copy->type == VM_MAP_COPY_ENTRY_LIST) { + /* + * The links in the entry chain must be + * changed to point to the new copy object. + */ + vm_map_copy_first_entry(copy)->vme_prev + = vm_map_copy_to_entry(new_copy); + vm_map_copy_last_entry(copy)->vme_next + = vm_map_copy_to_entry(new_copy); + } + + /* + * Change the old copy object into one that contains + * nothing to be deallocated. + */ + copy->type = VM_MAP_COPY_OBJECT; + copy->cpy_object = VM_OBJECT_NULL; + + /* + * Return the new object. + */ + return new_copy; +} + +/* + * Routine: vm_map_copy_discard_cont + * + * Description: + * A version of vm_map_copy_discard that can be called + * as a continuation from a vm_map_copy page list. + */ +kern_return_t +vm_map_copy_discard_cont( + vm_map_copyin_args_t cont_args, + vm_map_copy_t *copy_result) /* OUT */ +{ + vm_map_copy_discard((vm_map_copy_t) cont_args); + if (copy_result != (vm_map_copy_t *)0) + *copy_result = VM_MAP_COPY_NULL; + return(KERN_SUCCESS); +} + +kern_return_t +vm_map_overwrite_submap_recurse( + vm_map_t dst_map, + vm_offset_t dst_addr, + vm_size_t dst_size) +{ + vm_offset_t dst_end; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + kern_return_t result; + boolean_t encountered_sub_map = FALSE; + + + + /* + * Verify that the destination is all writeable + * initially. We have to trunc the destination + * address and round the copy size or we'll end up + * splitting entries in strange ways. + */ + + dst_end = round_page(dst_addr + dst_size); + +start_pass_1: + vm_map_lock(dst_map); + if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + + vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr)); + + for (entry = tmp_entry;;) { + vm_map_entry_t next; + + next = entry->vme_next; + while(entry->is_sub_map) { + vm_offset_t sub_start; + vm_offset_t sub_end; + vm_offset_t local_end; + + if (entry->in_transition) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + + goto start_pass_1; + } + + encountered_sub_map = TRUE; + sub_start = entry->offset; + + if(entry->vme_end < dst_end) + sub_end = entry->vme_end; + else + sub_end = dst_end; + sub_end -= entry->vme_start; + sub_end += entry->offset; + local_end = entry->vme_end; + vm_map_unlock(dst_map); + + result = vm_map_overwrite_submap_recurse( + entry->object.sub_map, + sub_start, + sub_end - sub_start); + + if(result != KERN_SUCCESS) + return result; + if (dst_end <= entry->vme_end) + return KERN_SUCCESS; + vm_map_lock(dst_map); + if(!vm_map_lookup_entry(dst_map, local_end, + &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + entry = tmp_entry; + next = entry->vme_next; + } + + if ( ! (entry->protection & VM_PROT_WRITE)) { + vm_map_unlock(dst_map); + return(KERN_PROTECTION_FAILURE); + } + + /* + * If the entry is in transition, we must wait + * for it to exit that state. Anything could happen + * when we unlock the map, so start over. + */ + if (entry->in_transition) { + + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + + goto start_pass_1; + } + +/* + * our range is contained completely within this map entry + */ + if (dst_end <= entry->vme_end) { + vm_map_unlock(dst_map); + return KERN_SUCCESS; + } +/* + * check that range specified is contiguous region + */ + if ((next == vm_map_to_entry(dst_map)) || + (next->vme_start != entry->vme_end)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + + /* + * Check for permanent objects in the destination. + */ + if ((entry->object.vm_object != VM_OBJECT_NULL) && + ((!entry->object.vm_object->internal) || + (entry->object.vm_object->true_share))) { + if(encountered_sub_map) { + vm_map_unlock(dst_map); + return(KERN_FAILURE); + } + } + + + entry = next; + }/* for */ + vm_map_unlock(dst_map); + return(KERN_SUCCESS); +} + +/* + * Routine: vm_map_copy_overwrite + * + * Description: + * Copy the memory described by the map copy + * object (copy; returned by vm_map_copyin) onto + * the specified destination region (dst_map, dst_addr). + * The destination must be writeable. + * + * Unlike vm_map_copyout, this routine actually + * writes over previously-mapped memory. If the + * previous mapping was to a permanent (user-supplied) + * memory object, it is preserved. + * + * The attributes (protection and inheritance) of the + * destination region are preserved. + * + * If successful, consumes the copy object. + * Otherwise, the caller is responsible for it. + * + * Implementation notes: + * To overwrite aligned temporary virtual memory, it is + * sufficient to remove the previous mapping and insert + * the new copy. This replacement is done either on + * the whole region (if no permanent virtual memory + * objects are embedded in the destination region) or + * in individual map entries. + * + * To overwrite permanent virtual memory , it is necessary + * to copy each page, as the external memory management + * interface currently does not provide any optimizations. + * + * Unaligned memory also has to be copied. It is possible + * to use 'vm_trickery' to copy the aligned data. This is + * not done but not hard to implement. + * + * Once a page of permanent memory has been overwritten, + * it is impossible to interrupt this function; otherwise, + * the call would be neither atomic nor location-independent. + * The kernel-state portion of a user thread must be + * interruptible. + * + * It may be expensive to forward all requests that might + * overwrite permanent memory (vm_write, vm_copy) to + * uninterruptible kernel threads. This routine may be + * called by interruptible threads; however, success is + * not guaranteed -- if the request cannot be performed + * atomically and interruptibly, an error indication is + * returned. + */ + +kern_return_t +vm_map_copy_overwrite_nested( + vm_map_t dst_map, + vm_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible, + pmap_t pmap) +{ + vm_offset_t dst_end; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + kern_return_t kr; + boolean_t aligned = TRUE; + boolean_t contains_permanent_objects = FALSE; + boolean_t encountered_sub_map = FALSE; + vm_offset_t base_addr; + vm_size_t copy_size; + vm_size_t total_size; + + + /* + * Check for null copy object. + */ + + if (copy == VM_MAP_COPY_NULL) + return(KERN_SUCCESS); + + /* + * Check for special kernel buffer allocated + * by new_ipc_kmsg_copyin. + */ + + if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { + return(vm_map_copyout_kernel_buffer(dst_map, &dst_addr, + copy, TRUE)); + } + + /* + * Only works for entry lists at the moment. Will + * support page lists later. + */ + + assert(copy->type == VM_MAP_COPY_ENTRY_LIST); + + if (copy->size == 0) { + vm_map_copy_discard(copy); + return(KERN_SUCCESS); + } + + /* + * Verify that the destination is all writeable + * initially. We have to trunc the destination + * address and round the copy size or we'll end up + * splitting entries in strange ways. + */ + + if (!page_aligned(copy->size) || + !page_aligned (copy->offset) || + !page_aligned (dst_addr)) + { + aligned = FALSE; + dst_end = round_page(dst_addr + copy->size); + } else { + dst_end = dst_addr + copy->size; + } + +start_pass_1: + vm_map_lock(dst_map); + if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr)); + for (entry = tmp_entry;;) { + vm_map_entry_t next = entry->vme_next; + + while(entry->is_sub_map) { + vm_offset_t sub_start; + vm_offset_t sub_end; + vm_offset_t local_end; + + if (entry->in_transition) { + + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + + goto start_pass_1; + } + + local_end = entry->vme_end; + if (!(entry->needs_copy)) { + /* if needs_copy we are a COW submap */ + /* in such a case we just replace so */ + /* there is no need for the follow- */ + /* ing check. */ + encountered_sub_map = TRUE; + sub_start = entry->offset; + + if(entry->vme_end < dst_end) + sub_end = entry->vme_end; + else + sub_end = dst_end; + sub_end -= entry->vme_start; + sub_end += entry->offset; + vm_map_unlock(dst_map); + + kr = vm_map_overwrite_submap_recurse( + entry->object.sub_map, + sub_start, + sub_end - sub_start); + if(kr != KERN_SUCCESS) + return kr; + vm_map_lock(dst_map); + } + + if (dst_end <= entry->vme_end) + goto start_overwrite; + if(!vm_map_lookup_entry(dst_map, local_end, + &entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + next = entry->vme_next; + } + + if ( ! (entry->protection & VM_PROT_WRITE)) { + vm_map_unlock(dst_map); + return(KERN_PROTECTION_FAILURE); + } + + /* + * If the entry is in transition, we must wait + * for it to exit that state. Anything could happen + * when we unlock the map, so start over. + */ + if (entry->in_transition) { + + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + + goto start_pass_1; + } + +/* + * our range is contained completely within this map entry + */ + if (dst_end <= entry->vme_end) + break; +/* + * check that range specified is contiguous region + */ + if ((next == vm_map_to_entry(dst_map)) || + (next->vme_start != entry->vme_end)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + + + /* + * Check for permanent objects in the destination. + */ + if ((entry->object.vm_object != VM_OBJECT_NULL) && + ((!entry->object.vm_object->internal) || + (entry->object.vm_object->true_share))) { + contains_permanent_objects = TRUE; + } + + entry = next; + }/* for */ + +start_overwrite: + /* + * If there are permanent objects in the destination, then + * the copy cannot be interrupted. + */ + + if (interruptible && contains_permanent_objects) { + vm_map_unlock(dst_map); + return(KERN_FAILURE); /* XXX */ + } + + /* + * + * Make a second pass, overwriting the data + * At the beginning of each loop iteration, + * the next entry to be overwritten is "tmp_entry" + * (initially, the value returned from the lookup above), + * and the starting address expected in that entry + * is "start". + */ + + total_size = copy->size; + if(encountered_sub_map) { + copy_size = 0; + /* re-calculate tmp_entry since we've had the map */ + /* unlocked */ + if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + } else { + copy_size = copy->size; + } + + base_addr = dst_addr; + while(TRUE) { + /* deconstruct the copy object and do in parts */ + /* only in sub_map, interruptable case */ + vm_map_entry_t copy_entry; + vm_map_entry_t previous_prev; + vm_map_entry_t next_copy; + int nentries; + int remaining_entries; + int new_offset; + + for (entry = tmp_entry; copy_size == 0;) { + vm_map_entry_t next; + + next = entry->vme_next; + + /* tmp_entry and base address are moved along */ + /* each time we encounter a sub-map. Otherwise */ + /* entry can outpase tmp_entry, and the copy_size */ + /* may reflect the distance between them */ + /* if the current entry is found to be in transition */ + /* we will start over at the beginning or the last */ + /* encounter of a submap as dictated by base_addr */ + /* we will zero copy_size accordingly. */ + if (entry->in_transition) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + + vm_map_lock(dst_map); + if(!vm_map_lookup_entry(dst_map, base_addr, + &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + copy_size = 0; + entry = tmp_entry; + continue; + } + if(entry->is_sub_map) { + vm_offset_t sub_start; + vm_offset_t sub_end; + vm_offset_t local_end; + + if (entry->needs_copy) { + /* if this is a COW submap */ + /* just back the range with a */ + /* anonymous entry */ + if(entry->vme_end < dst_end) + sub_end = entry->vme_end; + else + sub_end = dst_end; + if(entry->vme_start < base_addr) + sub_start = base_addr; + else + sub_start = entry->vme_start; + vm_map_clip_end( + dst_map, entry, sub_end); + vm_map_clip_start( + dst_map, entry, sub_start); + entry->is_sub_map = FALSE; + vm_map_deallocate( + entry->object.sub_map); + entry->object.sub_map = NULL; + entry->is_shared = FALSE; + entry->needs_copy = FALSE; + entry->offset = 0; + entry->protection = VM_PROT_ALL; + entry->max_protection = VM_PROT_ALL; + entry->wired_count = 0; + entry->user_wired_count = 0; + if(entry->inheritance + == VM_INHERIT_SHARE) + entry->inheritance = VM_INHERIT_COPY; + continue; + } + /* first take care of any non-sub_map */ + /* entries to send */ + if(base_addr < entry->vme_start) { + /* stuff to send */ + copy_size = + entry->vme_start - base_addr; + break; + } + sub_start = entry->offset; + + if(entry->vme_end < dst_end) + sub_end = entry->vme_end; + else + sub_end = dst_end; + sub_end -= entry->vme_start; + sub_end += entry->offset; + local_end = entry->vme_end; + vm_map_unlock(dst_map); + copy_size = sub_end - sub_start; + + /* adjust the copy object */ + if (total_size > copy_size) { + vm_size_t local_size = 0; + vm_size_t entry_size; + + nentries = 1; + new_offset = copy->offset; + copy_entry = vm_map_copy_first_entry(copy); + while(copy_entry != + vm_map_copy_to_entry(copy)){ + entry_size = copy_entry->vme_end - + copy_entry->vme_start; + if((local_size < copy_size) && + ((local_size + entry_size) + >= copy_size)) { + vm_map_copy_clip_end(copy, + copy_entry, + copy_entry->vme_start + + (copy_size - local_size)); + entry_size = copy_entry->vme_end - + copy_entry->vme_start; + local_size += entry_size; + new_offset += entry_size; + } + if(local_size >= copy_size) { + next_copy = copy_entry->vme_next; + copy_entry->vme_next = + vm_map_copy_to_entry(copy); + previous_prev = + copy->cpy_hdr.links.prev; + copy->cpy_hdr.links.prev = copy_entry; + copy->size = copy_size; + remaining_entries = + copy->cpy_hdr.nentries; + remaining_entries -= nentries; + copy->cpy_hdr.nentries = nentries; + break; + } else { + local_size += entry_size; + new_offset += entry_size; + nentries++; + } + copy_entry = copy_entry->vme_next; + } + } + + if((entry->use_pmap) && (pmap == NULL)) { + kr = vm_map_copy_overwrite_nested( + entry->object.sub_map, + sub_start, + copy, + interruptible, + entry->object.sub_map->pmap); + } else if (pmap != NULL) { + kr = vm_map_copy_overwrite_nested( + entry->object.sub_map, + sub_start, + copy, + interruptible, pmap); + } else { + kr = vm_map_copy_overwrite_nested( + entry->object.sub_map, + sub_start, + copy, + interruptible, + dst_map->pmap); + } + if(kr != KERN_SUCCESS) { + if(next_copy != NULL) { + copy->cpy_hdr.nentries += + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev + = previous_prev; + copy->size = total_size; + } + return kr; + } + if (dst_end <= local_end) { + return(KERN_SUCCESS); + } + /* otherwise copy no longer exists, it was */ + /* destroyed after successful copy_overwrite */ + copy = (vm_map_copy_t) + zalloc(vm_map_copy_zone); + vm_map_copy_first_entry(copy) = + vm_map_copy_last_entry(copy) = + vm_map_copy_to_entry(copy); + copy->type = VM_MAP_COPY_ENTRY_LIST; + copy->offset = new_offset; + + total_size -= copy_size; + copy_size = 0; + /* put back remainder of copy in container */ + if(next_copy != NULL) { + copy->cpy_hdr.nentries = remaining_entries; + copy->cpy_hdr.links.next = next_copy; + copy->cpy_hdr.links.prev = previous_prev; + copy->size = total_size; + next_copy->vme_prev = + vm_map_copy_to_entry(copy); + next_copy = NULL; + } + base_addr = local_end; + vm_map_lock(dst_map); + if(!vm_map_lookup_entry(dst_map, + local_end, &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + entry = tmp_entry; + continue; + } + if (dst_end <= entry->vme_end) { + copy_size = dst_end - base_addr; + break; + } + + if ((next == vm_map_to_entry(dst_map)) || + (next->vme_start != entry->vme_end)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + + entry = next; + }/* for */ + + next_copy = NULL; + nentries = 1; + + /* adjust the copy object */ + if (total_size > copy_size) { + vm_size_t local_size = 0; + vm_size_t entry_size; + + new_offset = copy->offset; + copy_entry = vm_map_copy_first_entry(copy); + while(copy_entry != vm_map_copy_to_entry(copy)) { + entry_size = copy_entry->vme_end - + copy_entry->vme_start; + if((local_size < copy_size) && + ((local_size + entry_size) + >= copy_size)) { + vm_map_copy_clip_end(copy, copy_entry, + copy_entry->vme_start + + (copy_size - local_size)); + entry_size = copy_entry->vme_end - + copy_entry->vme_start; + local_size += entry_size; + new_offset += entry_size; + } + if(local_size >= copy_size) { + next_copy = copy_entry->vme_next; + copy_entry->vme_next = + vm_map_copy_to_entry(copy); + previous_prev = + copy->cpy_hdr.links.prev; + copy->cpy_hdr.links.prev = copy_entry; + copy->size = copy_size; + remaining_entries = + copy->cpy_hdr.nentries; + remaining_entries -= nentries; + copy->cpy_hdr.nentries = nentries; + break; + } else { + local_size += entry_size; + new_offset += entry_size; + nentries++; + } + copy_entry = copy_entry->vme_next; + } + } + + if (aligned) { + pmap_t local_pmap; + + if(pmap) + local_pmap = pmap; + else + local_pmap = dst_map->pmap; + + if ((kr = vm_map_copy_overwrite_aligned( + dst_map, tmp_entry, copy, + base_addr, local_pmap)) != KERN_SUCCESS) { + if(next_copy != NULL) { + copy->cpy_hdr.nentries += + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev = + previous_prev; + copy->size += copy_size; + } + return kr; + } + vm_map_unlock(dst_map); + } else { + /* + * Performance gain: + * + * if the copy and dst address are misaligned but the same + * offset within the page we can copy_not_aligned the + * misaligned parts and copy aligned the rest. If they are + * aligned but len is unaligned we simply need to copy + * the end bit unaligned. We'll need to split the misaligned + * bits of the region in this case ! + */ + /* ALWAYS UNLOCKS THE dst_map MAP */ + if ((kr = vm_map_copy_overwrite_unaligned( dst_map, + tmp_entry, copy, base_addr)) != KERN_SUCCESS) { + if(next_copy != NULL) { + copy->cpy_hdr.nentries += + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev = + previous_prev; + copy->size += copy_size; + } + return kr; + } + } + total_size -= copy_size; + if(total_size == 0) + break; + base_addr += copy_size; + copy_size = 0; + copy->offset = new_offset; + if(next_copy != NULL) { + copy->cpy_hdr.nentries = remaining_entries; + copy->cpy_hdr.links.next = next_copy; + copy->cpy_hdr.links.prev = previous_prev; + next_copy->vme_prev = vm_map_copy_to_entry(copy); + copy->size = total_size; + } + vm_map_lock(dst_map); + while(TRUE) { + if (!vm_map_lookup_entry(dst_map, + base_addr, &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + if (tmp_entry->in_transition) { + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + } else { + break; + } + } + vm_map_clip_start(dst_map, tmp_entry, trunc_page(base_addr)); + + entry = tmp_entry; + } /* while */ + + /* + * Throw away the vm_map_copy object + */ + vm_map_copy_discard(copy); + + return(KERN_SUCCESS); +}/* vm_map_copy_overwrite */ + +kern_return_t +vm_map_copy_overwrite( + vm_map_t dst_map, + vm_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible) +{ + return vm_map_copy_overwrite_nested( + dst_map, dst_addr, copy, interruptible, (pmap_t) NULL); +} + + +/* + * Routine: vm_map_copy_overwrite_unaligned + * + * Decription: + * Physically copy unaligned data + * + * Implementation: + * Unaligned parts of pages have to be physically copied. We use + * a modified form of vm_fault_copy (which understands none-aligned + * page offsets and sizes) to do the copy. We attempt to copy as + * much memory in one go as possibly, however vm_fault_copy copies + * within 1 memory object so we have to find the smaller of "amount left" + * "source object data size" and "target object data size". With + * unaligned data we don't need to split regions, therefore the source + * (copy) object should be one map entry, the target range may be split + * over multiple map entries however. In any event we are pessimistic + * about these assumptions. + * + * Assumptions: + * dst_map is locked on entry and is return locked on success, + * unlocked on error. + */ + +kern_return_t +vm_map_copy_overwrite_unaligned( + vm_map_t dst_map, + vm_map_entry_t entry, + vm_map_copy_t copy, + vm_offset_t start) +{ + vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy); + vm_map_version_t version; + vm_object_t dst_object; + vm_object_offset_t dst_offset; + vm_object_offset_t src_offset; + vm_object_offset_t entry_offset; + vm_offset_t entry_end; + vm_size_t src_size, + dst_size, + copy_size, + amount_left; + kern_return_t kr = KERN_SUCCESS; + + vm_map_lock_write_to_read(dst_map); + + src_offset = copy->offset - trunc_page_64(copy->offset); + amount_left = copy->size; +/* + * unaligned so we never clipped this entry, we need the offset into + * the vm_object not just the data. + */ + while (amount_left > 0) { + + if (entry == vm_map_to_entry(dst_map)) { + vm_map_unlock_read(dst_map); + return KERN_INVALID_ADDRESS; + } + + /* "start" must be within the current map entry */ + assert ((start>=entry->vme_start) && (startvme_end)); + + dst_offset = start - entry->vme_start; + + dst_size = entry->vme_end - start; + + src_size = copy_entry->vme_end - + (copy_entry->vme_start + src_offset); + + if (dst_size < src_size) { +/* + * we can only copy dst_size bytes before + * we have to get the next destination entry + */ + copy_size = dst_size; + } else { +/* + * we can only copy src_size bytes before + * we have to get the next source copy entry + */ + copy_size = src_size; + } + + if (copy_size > amount_left) { + copy_size = amount_left; + } +/* + * Entry needs copy, create a shadow shadow object for + * Copy on write region. + */ + if (entry->needs_copy && + ((entry->protection & VM_PROT_WRITE) != 0)) + { + if (vm_map_lock_read_to_write(dst_map)) { + vm_map_lock_read(dst_map); + goto RetryLookup; + } + vm_object_shadow(&entry->object.vm_object, + &entry->offset, + (vm_size_t)(entry->vme_end + - entry->vme_start)); + entry->needs_copy = FALSE; + vm_map_lock_write_to_read(dst_map); + } + dst_object = entry->object.vm_object; +/* + * unlike with the virtual (aligned) copy we're going + * to fault on it therefore we need a target object. + */ + if (dst_object == VM_OBJECT_NULL) { + if (vm_map_lock_read_to_write(dst_map)) { + vm_map_lock_read(dst_map); + goto RetryLookup; + } + dst_object = vm_object_allocate((vm_size_t) + entry->vme_end - entry->vme_start); + entry->object.vm_object = dst_object; + entry->offset = 0; + vm_map_lock_write_to_read(dst_map); + } +/* + * Take an object reference and unlock map. The "entry" may + * disappear or change when the map is unlocked. + */ + vm_object_reference(dst_object); + version.main_timestamp = dst_map->timestamp; + entry_offset = entry->offset; + entry_end = entry->vme_end; + vm_map_unlock_read(dst_map); +/* + * Copy as much as possible in one pass + */ + kr = vm_fault_copy( + copy_entry->object.vm_object, + copy_entry->offset + src_offset, + ©_size, + dst_object, + entry_offset + dst_offset, + dst_map, + &version, + THREAD_UNINT ); + + start += copy_size; + src_offset += copy_size; + amount_left -= copy_size; +/* + * Release the object reference + */ + vm_object_deallocate(dst_object); +/* + * If a hard error occurred, return it now + */ + if (kr != KERN_SUCCESS) + return kr; + + if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end + || amount_left == 0) + { +/* + * all done with this copy entry, dispose. + */ + vm_map_copy_entry_unlink(copy, copy_entry); + vm_object_deallocate(copy_entry->object.vm_object); + vm_map_copy_entry_dispose(copy, copy_entry); + + if ((copy_entry = vm_map_copy_first_entry(copy)) + == vm_map_copy_to_entry(copy) && amount_left) { +/* + * not finished copying but run out of source + */ + return KERN_INVALID_ADDRESS; + } + src_offset = 0; + } + + if (amount_left == 0) + return KERN_SUCCESS; + + vm_map_lock_read(dst_map); + if (version.main_timestamp == dst_map->timestamp) { + if (start == entry_end) { +/* + * destination region is split. Use the version + * information to avoid a lookup in the normal + * case. + */ + entry = entry->vme_next; +/* + * should be contiguous. Fail if we encounter + * a hole in the destination. + */ + if (start != entry->vme_start) { + vm_map_unlock_read(dst_map); + return KERN_INVALID_ADDRESS ; + } + } + } else { +/* + * Map version check failed. + * we must lookup the entry because somebody + * might have changed the map behind our backs. + */ +RetryLookup: + if (!vm_map_lookup_entry(dst_map, start, &entry)) + { + vm_map_unlock_read(dst_map); + return KERN_INVALID_ADDRESS ; + } + } + }/* while */ + + /* NOTREACHED ?? */ + vm_map_unlock_read(dst_map); + + return KERN_SUCCESS; +}/* vm_map_copy_overwrite_unaligned */ + +/* + * Routine: vm_map_copy_overwrite_aligned + * + * Description: + * Does all the vm_trickery possible for whole pages. + * + * Implementation: + * + * If there are no permanent objects in the destination, + * and the source and destination map entry zones match, + * and the destination map entry is not shared, + * then the map entries can be deleted and replaced + * with those from the copy. The following code is the + * basic idea of what to do, but there are lots of annoying + * little details about getting protection and inheritance + * right. Should add protection, inheritance, and sharing checks + * to the above pass and make sure that no wiring is involved. + */ + +kern_return_t +vm_map_copy_overwrite_aligned( + vm_map_t dst_map, + vm_map_entry_t tmp_entry, + vm_map_copy_t copy, + vm_offset_t start, + pmap_t pmap) +{ + vm_object_t object; + vm_map_entry_t copy_entry; + vm_size_t copy_size; + vm_size_t size; + vm_map_entry_t entry; + + while ((copy_entry = vm_map_copy_first_entry(copy)) + != vm_map_copy_to_entry(copy)) + { + copy_size = (copy_entry->vme_end - copy_entry->vme_start); + + entry = tmp_entry; + if (entry == vm_map_to_entry(dst_map)) { + vm_map_unlock(dst_map); + return KERN_INVALID_ADDRESS; + } + size = (entry->vme_end - entry->vme_start); + /* + * Make sure that no holes popped up in the + * address map, and that the protection is + * still valid, in case the map was unlocked + * earlier. + */ + + if ((entry->vme_start != start) || ((entry->is_sub_map) + && !entry->needs_copy)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + assert(entry != vm_map_to_entry(dst_map)); + + /* + * Check protection again + */ + + if ( ! (entry->protection & VM_PROT_WRITE)) { + vm_map_unlock(dst_map); + return(KERN_PROTECTION_FAILURE); + } + + /* + * Adjust to source size first + */ + + if (copy_size < size) { + vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size); + size = copy_size; + } + + /* + * Adjust to destination size + */ + + if (size < copy_size) { + vm_map_copy_clip_end(copy, copy_entry, + copy_entry->vme_start + size); + copy_size = size; + } + + assert((entry->vme_end - entry->vme_start) == size); + assert((tmp_entry->vme_end - tmp_entry->vme_start) == size); + assert((copy_entry->vme_end - copy_entry->vme_start) == size); + + /* + * If the destination contains temporary unshared memory, + * we can perform the copy by throwing it away and + * installing the source data. + */ + + object = entry->object.vm_object; + if ((!entry->is_shared && + ((object == VM_OBJECT_NULL) || + (object->internal && !object->true_share))) || + entry->needs_copy) { + vm_object_t old_object = entry->object.vm_object; + vm_object_offset_t old_offset = entry->offset; + vm_object_offset_t offset; + + /* + * Ensure that the source and destination aren't + * identical + */ + if (old_object == copy_entry->object.vm_object && + old_offset == copy_entry->offset) { + vm_map_copy_entry_unlink(copy, copy_entry); + vm_map_copy_entry_dispose(copy, copy_entry); + + if (old_object != VM_OBJECT_NULL) + vm_object_deallocate(old_object); + + start = tmp_entry->vme_end; + tmp_entry = tmp_entry->vme_next; + continue; + } + + if (old_object != VM_OBJECT_NULL) { + if(entry->is_sub_map) { + if(entry->use_pmap) { +#ifndef i386 + pmap_unnest(dst_map->pmap, + entry->vme_start, + entry->vme_end - entry->vme_start); +#endif + } else { + vm_map_submap_pmap_clean( + dst_map, entry->vme_start, + entry->vme_end, + entry->object.sub_map, + entry->offset); + } + vm_map_deallocate( + entry->object.sub_map); + } else { + vm_object_pmap_protect( + old_object, + old_offset, + size, + pmap, + tmp_entry->vme_start, + VM_PROT_NONE); + + vm_object_deallocate(old_object); + } + } + + entry->is_sub_map = FALSE; + entry->object = copy_entry->object; + object = entry->object.vm_object; + entry->needs_copy = copy_entry->needs_copy; + entry->wired_count = 0; + entry->user_wired_count = 0; + offset = entry->offset = copy_entry->offset; + + vm_map_copy_entry_unlink(copy, copy_entry); + vm_map_copy_entry_dispose(copy, copy_entry); +#if BAD_OPTIMIZATION + /* + * if we turn this optimization back on + * we need to revisit our use of pmap mappings + * large copies will cause us to run out and panic + * this optimization only saved on average 2 us per page if ALL + * the pages in the source were currently mapped + * and ALL the pages in the dest were touched, if there were fewer + * than 2/3 of the pages touched, this optimization actually cost more cycles + */ + + /* + * Try to aggressively enter physical mappings + * (but avoid uninstantiated objects) + */ + if (object != VM_OBJECT_NULL) { + vm_offset_t va = entry->vme_start; + + while (va < entry->vme_end) { + register vm_page_t m; + vm_prot_t prot; + + /* + * Look for the page in the top object + */ + prot = entry->protection; + vm_object_lock(object); + vm_object_paging_begin(object); + + if ((m = vm_page_lookup(object,offset)) != + VM_PAGE_NULL && !m->busy && + !m->fictitious && + (!m->unusual || (!m->error && + !m->restart && !m->absent && + (prot & m->page_lock) == 0))) { + + m->busy = TRUE; + vm_object_unlock(object); + + /* + * Honor COW obligations + */ + if (entry->needs_copy) + prot &= ~VM_PROT_WRITE; + + PMAP_ENTER(pmap, va, m, + prot, FALSE); + + vm_object_lock(object); + vm_page_lock_queues(); + if (!m->active && !m->inactive) + vm_page_activate(m); + vm_page_unlock_queues(); + PAGE_WAKEUP_DONE(m); + } + vm_object_paging_end(object); + vm_object_unlock(object); + + offset += PAGE_SIZE_64; + va += PAGE_SIZE; + } /* end while (va < entry->vme_end) */ + } /* end if (object) */ +#endif + /* + * Set up for the next iteration. The map + * has not been unlocked, so the next + * address should be at the end of this + * entry, and the next map entry should be + * the one following it. + */ + + start = tmp_entry->vme_end; + tmp_entry = tmp_entry->vme_next; + } else { + vm_map_version_t version; + vm_object_t dst_object = entry->object.vm_object; + vm_object_offset_t dst_offset = entry->offset; + kern_return_t r; + + /* + * Take an object reference, and record + * the map version information so that the + * map can be safely unlocked. + */ + + vm_object_reference(dst_object); + + version.main_timestamp = dst_map->timestamp; + + vm_map_unlock(dst_map); + + /* + * Copy as much as possible in one pass + */ + + copy_size = size; + r = vm_fault_copy( + copy_entry->object.vm_object, + copy_entry->offset, + ©_size, + dst_object, + dst_offset, + dst_map, + &version, + THREAD_UNINT ); + + /* + * Release the object reference + */ + + vm_object_deallocate(dst_object); + + /* + * If a hard error occurred, return it now + */ + + if (r != KERN_SUCCESS) + return(r); + + if (copy_size != 0) { + /* + * Dispose of the copied region + */ + + vm_map_copy_clip_end(copy, copy_entry, + copy_entry->vme_start + copy_size); + vm_map_copy_entry_unlink(copy, copy_entry); + vm_object_deallocate(copy_entry->object.vm_object); + vm_map_copy_entry_dispose(copy, copy_entry); + } + + /* + * Pick up in the destination map where we left off. + * + * Use the version information to avoid a lookup + * in the normal case. + */ + + start += copy_size; + vm_map_lock(dst_map); + if ((version.main_timestamp + 1) == dst_map->timestamp) { + /* We can safely use saved tmp_entry value */ + + vm_map_clip_end(dst_map, tmp_entry, start); + tmp_entry = tmp_entry->vme_next; + } else { + /* Must do lookup of tmp_entry */ + + if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { + vm_map_unlock(dst_map); + return(KERN_INVALID_ADDRESS); + } + vm_map_clip_start(dst_map, tmp_entry, start); + } + } + }/* while */ + + return(KERN_SUCCESS); +}/* vm_map_copy_overwrite_aligned */ + +/* + * Routine: vm_map_copyin_kernel_buffer + * + * Description: + * Copy in data to a kernel buffer from space in the + * source map. The original space may be otpionally + * deallocated. + * + * If successful, returns a new copy object. + */ +kern_return_t +vm_map_copyin_kernel_buffer( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result) +{ + boolean_t flags; + vm_map_copy_t copy; + vm_size_t kalloc_size = sizeof(struct vm_map_copy) + len; + + copy = (vm_map_copy_t) kalloc(kalloc_size); + if (copy == VM_MAP_COPY_NULL) { + return KERN_RESOURCE_SHORTAGE; + } + copy->type = VM_MAP_COPY_KERNEL_BUFFER; + copy->size = len; + copy->offset = 0; + copy->cpy_kdata = (vm_offset_t) (copy + 1); + copy->cpy_kalloc_size = kalloc_size; + + if (src_map == kernel_map) { + bcopy((char *)src_addr, (char *)copy->cpy_kdata, len); + flags = VM_MAP_REMOVE_KUNWIRE | VM_MAP_REMOVE_WAIT_FOR_KWIRE | + VM_MAP_REMOVE_INTERRUPTIBLE; + } else { + kern_return_t kr; + kr = copyinmap(src_map, src_addr, copy->cpy_kdata, len); + if (kr != KERN_SUCCESS) { + kfree((vm_offset_t)copy, kalloc_size); + return kr; + } + flags = VM_MAP_REMOVE_WAIT_FOR_KWIRE | + VM_MAP_REMOVE_INTERRUPTIBLE; + } + if (src_destroy) { + (void) vm_map_remove(src_map, trunc_page(src_addr), + round_page(src_addr + len), + flags); + } + *copy_result = copy; + return KERN_SUCCESS; +} + +/* + * Routine: vm_map_copyout_kernel_buffer + * + * Description: + * Copy out data from a kernel buffer into space in the + * destination map. The space may be otpionally dynamically + * allocated. + * + * If successful, consumes the copy object. + * Otherwise, the caller is responsible for it. + */ +kern_return_t +vm_map_copyout_kernel_buffer( + vm_map_t map, + vm_offset_t *addr, /* IN/OUT */ + vm_map_copy_t copy, + boolean_t overwrite) +{ + kern_return_t kr = KERN_SUCCESS; + thread_act_t thr_act = current_act(); + + if (!overwrite) { + + /* + * Allocate space in the target map for the data + */ + *addr = 0; + kr = vm_map_enter(map, + addr, + round_page(copy->size), + (vm_offset_t) 0, + TRUE, + VM_OBJECT_NULL, + (vm_object_offset_t) 0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) + return(kr); + } + + /* + * Copyout the data from the kernel buffer to the target map. + */ + if (thr_act->map == map) { + + /* + * If the target map is the current map, just do + * the copy. + */ + if (copyout((char *)copy->cpy_kdata, (char *)*addr, + copy->size)) { + kr = KERN_INVALID_ADDRESS; + } + } + else { + vm_map_t oldmap; + + /* + * If the target map is another map, assume the + * target's address space identity for the duration + * of the copy. + */ + vm_map_reference(map); + oldmap = vm_map_switch(map); + + if (copyout((char *)copy->cpy_kdata, (char *)*addr, + copy->size)) { + kr = KERN_INVALID_ADDRESS; + } + + (void) vm_map_switch(oldmap); + vm_map_deallocate(map); + } + + kfree((vm_offset_t)copy, copy->cpy_kalloc_size); + + return(kr); +} + +/* + * Macro: vm_map_copy_insert + * + * Description: + * Link a copy chain ("copy") into a map at the + * specified location (after "where"). + * Side effects: + * The copy chain is destroyed. + * Warning: + * The arguments are evaluated multiple times. + */ +#define vm_map_copy_insert(map, where, copy) \ +MACRO_BEGIN \ + vm_map_t VMCI_map; \ + vm_map_entry_t VMCI_where; \ + vm_map_copy_t VMCI_copy; \ + VMCI_map = (map); \ + VMCI_where = (where); \ + VMCI_copy = (copy); \ + ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\ + ->vme_next = (VMCI_where->vme_next); \ + ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \ + ->vme_prev = VMCI_where; \ + VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \ + UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \ + zfree(vm_map_copy_zone, (vm_offset_t) VMCI_copy); \ +MACRO_END + +/* + * Routine: vm_map_copyout + * + * Description: + * Copy out a copy chain ("copy") into newly-allocated + * space in the destination map. + * + * If successful, consumes the copy object. + * Otherwise, the caller is responsible for it. + */ +kern_return_t +vm_map_copyout( + register vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + register vm_map_copy_t copy) +{ + vm_size_t size; + vm_size_t adjustment; + vm_offset_t start; + vm_object_offset_t vm_copy_start; + vm_map_entry_t last; + register + vm_map_entry_t entry; + + /* + * Check for null copy object. + */ + + if (copy == VM_MAP_COPY_NULL) { + *dst_addr = 0; + return(KERN_SUCCESS); + } + + /* + * Check for special copy object, created + * by vm_map_copyin_object. + */ + + if (copy->type == VM_MAP_COPY_OBJECT) { + vm_object_t object = copy->cpy_object; + kern_return_t kr; + vm_object_offset_t offset; + + offset = trunc_page_64(copy->offset); + size = round_page(copy->size + + (vm_size_t)(copy->offset - offset)); + *dst_addr = 0; + kr = vm_map_enter(dst_map, dst_addr, size, + (vm_offset_t) 0, TRUE, + object, offset, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) + return(kr); + /* Account for non-pagealigned copy object */ + *dst_addr += (vm_offset_t)(copy->offset - offset); + zfree(vm_map_copy_zone, (vm_offset_t) copy); + return(KERN_SUCCESS); + } + + /* + * Check for special kernel buffer allocated + * by new_ipc_kmsg_copyin. + */ + + if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { + return(vm_map_copyout_kernel_buffer(dst_map, dst_addr, + copy, FALSE)); + } + + if (copy->type == VM_MAP_COPY_PAGE_LIST) + return(vm_map_copyout_page_list(dst_map, dst_addr, copy)); + + /* + * Find space for the data + */ + + vm_copy_start = trunc_page_64(copy->offset); + size = round_page((vm_size_t)copy->offset + copy->size) + - vm_copy_start; + + StartAgain: ; + + vm_map_lock(dst_map); + assert(first_free_is_valid(dst_map)); + start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ? + vm_map_min(dst_map) : last->vme_end; + + while (TRUE) { + vm_map_entry_t next = last->vme_next; + vm_offset_t end = start + size; + + if ((end > dst_map->max_offset) || (end < start)) { + if (dst_map->wait_for_space) { + if (size <= (dst_map->max_offset - dst_map->min_offset)) { + assert_wait((event_t) dst_map, + THREAD_INTERRUPTIBLE); + vm_map_unlock(dst_map); + thread_block((void (*)(void))0); + goto StartAgain; + } + } + vm_map_unlock(dst_map); + return(KERN_NO_SPACE); + } + + if ((next == vm_map_to_entry(dst_map)) || + (next->vme_start >= end)) + break; + + last = next; + start = last->vme_end; + } + + /* + * Since we're going to just drop the map + * entries from the copy into the destination + * map, they must come from the same pool. + */ + + if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) { + /* + * Mismatches occur when dealing with the default + * pager. + */ + zone_t old_zone; + vm_map_entry_t next, new; + + /* + * Find the zone that the copies were allocated from + */ + old_zone = (copy->cpy_hdr.entries_pageable) + ? vm_map_entry_zone + : vm_map_kentry_zone; + entry = vm_map_copy_first_entry(copy); + + /* + * Reinitialize the copy so that vm_map_copy_entry_link + * will work. + */ + copy->cpy_hdr.nentries = 0; + copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable; + vm_map_copy_first_entry(copy) = + vm_map_copy_last_entry(copy) = + vm_map_copy_to_entry(copy); + + /* + * Copy each entry. + */ + while (entry != vm_map_copy_to_entry(copy)) { + new = vm_map_copy_entry_create(copy); + vm_map_entry_copy_full(new, entry); + new->use_pmap = FALSE; /* clr address space specifics */ + vm_map_copy_entry_link(copy, + vm_map_copy_last_entry(copy), + new); + next = entry->vme_next; + zfree(old_zone, (vm_offset_t) entry); + entry = next; + } + } + + /* + * Adjust the addresses in the copy chain, and + * reset the region attributes. + */ + + adjustment = start - vm_copy_start; + for (entry = vm_map_copy_first_entry(copy); + entry != vm_map_copy_to_entry(copy); + entry = entry->vme_next) { + entry->vme_start += adjustment; + entry->vme_end += adjustment; + + entry->inheritance = VM_INHERIT_DEFAULT; + entry->protection = VM_PROT_DEFAULT; + entry->max_protection = VM_PROT_ALL; + entry->behavior = VM_BEHAVIOR_DEFAULT; + + /* + * If the entry is now wired, + * map the pages into the destination map. + */ + if (entry->wired_count != 0) { + register vm_offset_t va; + vm_object_offset_t offset; + register vm_object_t object; + + object = entry->object.vm_object; + offset = entry->offset; + va = entry->vme_start; + + pmap_pageable(dst_map->pmap, + entry->vme_start, + entry->vme_end, + TRUE); + + while (va < entry->vme_end) { + register vm_page_t m; + + /* + * Look up the page in the object. + * Assert that the page will be found in the + * top object: + * either + * the object was newly created by + * vm_object_copy_slowly, and has + * copies of all of the pages from + * the source object + * or + * the object was moved from the old + * map entry; because the old map + * entry was wired, all of the pages + * were in the top-level object. + * (XXX not true if we wire pages for + * reading) + */ + vm_object_lock(object); + vm_object_paging_begin(object); + + m = vm_page_lookup(object, offset); + if (m == VM_PAGE_NULL || m->wire_count == 0 || + m->absent) + panic("vm_map_copyout: wiring 0x%x", m); + + m->busy = TRUE; + vm_object_unlock(object); + + PMAP_ENTER(dst_map->pmap, va, m, + entry->protection, TRUE); + + vm_object_lock(object); + PAGE_WAKEUP_DONE(m); + /* the page is wired, so we don't have to activate */ + vm_object_paging_end(object); + vm_object_unlock(object); + + offset += PAGE_SIZE_64; + va += PAGE_SIZE; + } + } + else if (size <= vm_map_aggressive_enter_max) { + + register vm_offset_t va; + vm_object_offset_t offset; + register vm_object_t object; + vm_prot_t prot; + + object = entry->object.vm_object; + if (object != VM_OBJECT_NULL) { + + offset = entry->offset; + va = entry->vme_start; + while (va < entry->vme_end) { + register vm_page_t m; + + /* + * Look up the page in the object. + * Assert that the page will be found + * in the top object if at all... + */ + vm_object_lock(object); + vm_object_paging_begin(object); + + if (((m = vm_page_lookup(object, + offset)) + != VM_PAGE_NULL) && + !m->busy && !m->fictitious && + !m->absent && !m->error) { + m->busy = TRUE; + vm_object_unlock(object); + + /* honor cow obligations */ + prot = entry->protection; + if (entry->needs_copy) + prot &= ~VM_PROT_WRITE; + + PMAP_ENTER(dst_map->pmap, va, + m, prot, FALSE); + + vm_object_lock(object); + vm_page_lock_queues(); + if (!m->active && !m->inactive) + vm_page_activate(m); + vm_page_unlock_queues(); + PAGE_WAKEUP_DONE(m); + } + vm_object_paging_end(object); + vm_object_unlock(object); + + offset += PAGE_SIZE_64; + va += PAGE_SIZE; + } + } + } + } + + /* + * Correct the page alignment for the result + */ + + *dst_addr = start + (copy->offset - vm_copy_start); + + /* + * Update the hints and the map size + */ + + SAVE_HINT(dst_map, vm_map_copy_last_entry(copy)); + + dst_map->size += size; + + /* + * Link in the copy + */ + + vm_map_copy_insert(dst_map, last, copy); + + vm_map_unlock(dst_map); + + /* + * XXX If wiring_required, call vm_map_pageable + */ + + return(KERN_SUCCESS); +} + +boolean_t vm_map_aggressive_enter; /* not used yet */ + +/* + * + * vm_map_copyout_page_list: + * + * Version of vm_map_copyout() for page list vm map copies. + * + */ +kern_return_t +vm_map_copyout_page_list( + register vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + register vm_map_copy_t copy) +{ + vm_size_t size; + vm_offset_t start; + vm_offset_t end; + vm_object_offset_t offset; + vm_map_entry_t last; + register + vm_object_t object; + vm_page_t *page_list, m; + vm_map_entry_t entry; + vm_object_offset_t old_last_offset; + boolean_t cont_invoked, needs_wakeup; + kern_return_t result = KERN_SUCCESS; + vm_map_copy_t orig_copy; + vm_object_offset_t dst_offset; + boolean_t must_wire; + boolean_t aggressive_enter; + + /* + * Check for null copy object. + */ + + if (copy == VM_MAP_COPY_NULL) { + *dst_addr = 0; + return(KERN_SUCCESS); + } + + assert(copy->type == VM_MAP_COPY_PAGE_LIST); + + /* + * Make sure the pages are stolen, because we are + * going to put them in a new object. Assume that + * all pages are identical to first in this regard. + */ + + page_list = ©->cpy_page_list[0]; + if (!copy->cpy_page_loose) + vm_map_copy_steal_pages(copy); + + /* + * Find space for the data + */ + + size = round_page_64(copy->offset + (vm_object_offset_t)copy->size) - + trunc_page_64(copy->offset); +StartAgain: + vm_map_lock(dst_map); + must_wire = dst_map->wiring_required; + + assert(first_free_is_valid(dst_map)); + last = dst_map->first_free; + if (last == vm_map_to_entry(dst_map)) { + start = vm_map_min(dst_map); + } else { + start = last->vme_end; + } + + while (TRUE) { + vm_map_entry_t next = last->vme_next; + end = start + size; + + if ((end > dst_map->max_offset) || (end < start)) { + if (dst_map->wait_for_space) { + if (size <= (dst_map->max_offset - + dst_map->min_offset)) { + assert_wait((event_t) dst_map, + THREAD_INTERRUPTIBLE); + vm_map_unlock(dst_map); + thread_block((void (*)(void))0); + goto StartAgain; + } + } + vm_map_unlock(dst_map); + return(KERN_NO_SPACE); + } + + if ((next == vm_map_to_entry(dst_map)) || + (next->vme_start >= end)) { + break; + } + + last = next; + start = last->vme_end; + } + + /* + * See whether we can avoid creating a new entry (and object) by + * extending one of our neighbors. [So far, we only attempt to + * extend from below.] + * + * The code path below here is a bit twisted. If any of the + * extension checks fails, we branch to create_object. If + * it all works, we fall out the bottom and goto insert_pages. + */ + if (last == vm_map_to_entry(dst_map) || + last->vme_end != start || + last->is_shared != FALSE || + last->is_sub_map != FALSE || + last->inheritance != VM_INHERIT_DEFAULT || + last->protection != VM_PROT_DEFAULT || + last->max_protection != VM_PROT_ALL || + last->behavior != VM_BEHAVIOR_DEFAULT || + last->in_transition || + (must_wire ? (last->wired_count != 1 || + last->user_wired_count != 0) : + (last->wired_count != 0))) { + goto create_object; + } + + /* + * If this entry needs an object, make one. + */ + if (last->object.vm_object == VM_OBJECT_NULL) { + object = vm_object_allocate( + (vm_size_t)(last->vme_end - last->vme_start + size)); + last->object.vm_object = object; + last->offset = 0; + } + else { + vm_object_offset_t prev_offset = last->offset; + vm_size_t prev_size = start - last->vme_start; + vm_size_t new_size; + + /* + * This is basically vm_object_coalesce. + */ + + object = last->object.vm_object; + vm_object_lock(object); + + /* + * Try to collapse the object first + */ + vm_object_collapse(object); + + + /* + * Can't coalesce if pages not mapped to + * last may be in use anyway: + * . more than one reference + * . paged out + * . shadows another object + * . has a copy elsewhere + * . paging references (pages might be in page-list) + */ + + if ((object->ref_count > 1) || + object->pager_created || + (object->shadow != VM_OBJECT_NULL) || + (object->copy != VM_OBJECT_NULL) || + (object->paging_in_progress != 0)) { + vm_object_unlock(object); + goto create_object; + } + + /* + * Extend the object if necessary. Don't have to call + * vm_object_page_remove because the pages aren't mapped, + * and vm_page_replace will free up any old ones it encounters. + */ + new_size = prev_offset + prev_size + size; + if (new_size > object->size) { +#if MACH_PAGEMAP + /* + * We cannot extend an object that has existence info, + * since the existence info might then fail to cover + * the entire object. + * + * This assertion must be true because the object + * has no pager, and we only create existence info + * for objects with pagers. + */ + assert(object->existence_map == VM_EXTERNAL_NULL); +#endif /* MACH_PAGEMAP */ + object->size = new_size; + } + vm_object_unlock(object); + } + + /* + * Coalesced the two objects - can extend + * the previous map entry to include the + * new range. + */ + dst_map->size += size; + last->vme_end = end; + UPDATE_FIRST_FREE(dst_map, dst_map->first_free); + + SAVE_HINT(dst_map, last); + + goto insert_pages; + +create_object: + + /* + * Create object + */ + object = vm_object_allocate(size); + + /* + * Create entry + */ + last = vm_map_entry_insert(dst_map, last, start, start + size, + object, 0, FALSE, FALSE, TRUE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_BEHAVIOR_DEFAULT, + VM_INHERIT_DEFAULT, (must_wire ? 1 : 0)); + + /* + * Transfer pages into new object. + * Scan page list in vm_map_copy. + */ +insert_pages: + dst_offset = copy->offset & PAGE_MASK_64; + cont_invoked = FALSE; + orig_copy = copy; + last->in_transition = TRUE; + old_last_offset = last->offset + + (start - last->vme_start); + + aggressive_enter = (size <= vm_map_aggressive_enter_max); + + for (offset = 0; offset < size; offset += PAGE_SIZE_64) { + m = *page_list; + assert(m && !m->tabled); + + /* + * Must clear busy bit in page before inserting it. + * Ok to skip wakeup logic because nobody else + * can possibly know about this page. Also set + * dirty bit on the assumption that the page is + * not a page of zeros. + */ + + m->busy = FALSE; + m->dirty = TRUE; + vm_object_lock(object); + vm_page_lock_queues(); + vm_page_replace(m, object, old_last_offset + offset); + if (must_wire) { + vm_page_wire(m); + } else if (aggressive_enter) { + vm_page_activate(m); + } + vm_page_unlock_queues(); + vm_object_unlock(object); + + if (aggressive_enter || must_wire) { + PMAP_ENTER(dst_map->pmap, + last->vme_start + m->offset - last->offset, + m, last->protection, must_wire); + } + + *page_list++ = VM_PAGE_NULL; + assert(copy != VM_MAP_COPY_NULL); + assert(copy->type == VM_MAP_COPY_PAGE_LIST); + if (--(copy->cpy_npages) == 0 && + vm_map_copy_has_cont(copy)) { + vm_map_copy_t new_copy; + + /* + * Ok to unlock map because entry is + * marked in_transition. + */ + cont_invoked = TRUE; + vm_map_unlock(dst_map); + vm_map_copy_invoke_cont(copy, &new_copy, &result); + + if (result == KERN_SUCCESS) { + + /* + * If we got back a copy with real pages, + * steal them now. Either all of the + * pages in the list are tabled or none + * of them are; mixtures are not possible. + * + * Save original copy for consume on + * success logic at end of routine. + */ + if (copy != orig_copy) + vm_map_copy_discard(copy); + + if ((copy = new_copy) != VM_MAP_COPY_NULL) { + page_list = ©->cpy_page_list[0]; + if (!copy->cpy_page_loose) + vm_map_copy_steal_pages(copy); + } + } + else { + /* + * Continuation failed. + */ + vm_map_lock(dst_map); + goto error; + } + + vm_map_lock(dst_map); + } + } + + *dst_addr = start + dst_offset; + + /* + * Clear the in transition bits. This is easy if we + * didn't have a continuation. + */ +error: + needs_wakeup = FALSE; + if (!cont_invoked) { + /* + * We didn't unlock the map, so nobody could + * be waiting. + */ + last->in_transition = FALSE; + assert(!last->needs_wakeup); + } + else { + if (!vm_map_lookup_entry(dst_map, start, &entry)) + panic("vm_map_copyout_page_list: missing entry"); + + /* + * Clear transition bit for all constituent entries that + * were in the original entry. Also check for waiters. + */ + while ((entry != vm_map_to_entry(dst_map)) && + (entry->vme_start < end)) { + assert(entry->in_transition); + entry->in_transition = FALSE; + if (entry->needs_wakeup) { + entry->needs_wakeup = FALSE; + needs_wakeup = TRUE; + } + entry = entry->vme_next; + } + } + + if (result != KERN_SUCCESS) + (void) vm_map_delete(dst_map, start, end, VM_MAP_NO_FLAGS); + + vm_map_unlock(dst_map); + + if (needs_wakeup) + vm_map_entry_wakeup(dst_map); + + /* + * Consume on success logic. + */ + if (copy != VM_MAP_COPY_NULL && copy != orig_copy) { + zfree(vm_map_copy_zone, (vm_offset_t) copy); + } + if (result == KERN_SUCCESS) { + assert(orig_copy != VM_MAP_COPY_NULL); + assert(orig_copy->type == VM_MAP_COPY_PAGE_LIST); + zfree(vm_map_copy_zone, (vm_offset_t) orig_copy); + } + + return(result); +} + +/* + * Routine: vm_map_copyin + * + * Description: + * Copy the specified region (src_addr, len) from the + * source address space (src_map), possibly removing + * the region from the source address space (src_destroy). + * + * Returns: + * A vm_map_copy_t object (copy_result), suitable for + * insertion into another address space (using vm_map_copyout), + * copying over another address space region (using + * vm_map_copy_overwrite). If the copy is unused, it + * should be destroyed (using vm_map_copy_discard). + * + * In/out conditions: + * The source map should not be locked on entry. + */ + +typedef struct submap_map { + vm_map_t parent_map; + vm_offset_t base_start; + vm_offset_t base_end; + struct submap_map *next; +} submap_map_t; + +kern_return_t +vm_map_copyin_common( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + boolean_t src_volatile, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t use_maxprot) +{ + extern int msg_ool_size_small; + + vm_map_entry_t tmp_entry; /* Result of last map lookup -- + * in multi-level lookup, this + * entry contains the actual + * vm_object/offset. + */ + register + vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */ + + vm_offset_t src_start; /* Start of current entry -- + * where copy is taking place now + */ + vm_offset_t src_end; /* End of entire region to be + * copied */ + vm_offset_t base_start; /* submap fields to save offsets */ + /* in original map */ + vm_offset_t base_end; + vm_map_t base_map=src_map; + vm_map_entry_t base_entry; + boolean_t map_share=FALSE; + submap_map_t *parent_maps = NULL; + + register + vm_map_copy_t copy; /* Resulting copy */ + vm_offset_t copy_addr; + + /* + * Check for copies of zero bytes. + */ + + if (len == 0) { + *copy_result = VM_MAP_COPY_NULL; + return(KERN_SUCCESS); + } + + /* + * If the copy is sufficiently small, use a kernel buffer instead + * of making a virtual copy. The theory being that the cost of + * setting up VM (and taking C-O-W faults) dominates the copy costs + * for small regions. + */ + if ((len < msg_ool_size_small) && !use_maxprot) + return vm_map_copyin_kernel_buffer(src_map, src_addr, len, + src_destroy, copy_result); + + /* + * Compute start and end of region + */ + + src_start = trunc_page(src_addr); + src_end = round_page(src_addr + len); + + XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0); + + /* + * Check that the end address doesn't overflow + */ + + if (src_end <= src_start) + if ((src_end < src_start) || (src_start != 0)) + return(KERN_INVALID_ADDRESS); + + /* + * Allocate a header element for the list. + * + * Use the start and end in the header to + * remember the endpoints prior to rounding. + */ + + copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + vm_map_copy_first_entry(copy) = + vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); + copy->type = VM_MAP_COPY_ENTRY_LIST; + copy->cpy_hdr.nentries = 0; + copy->cpy_hdr.entries_pageable = TRUE; + + copy->offset = src_addr; + copy->size = len; + + new_entry = vm_map_copy_entry_create(copy); + +#define RETURN(x) \ + MACRO_BEGIN \ + vm_map_unlock(src_map); \ + if (new_entry != VM_MAP_ENTRY_NULL) \ + vm_map_copy_entry_dispose(copy,new_entry); \ + vm_map_copy_discard(copy); \ + { \ + submap_map_t *ptr; \ + \ + for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \ + parent_maps=parent_maps->next; \ + kfree((vm_offset_t)ptr, sizeof(submap_map_t)); \ + } \ + } \ + MACRO_RETURN(x); \ + MACRO_END + + /* + * Find the beginning of the region. + */ + + vm_map_lock(src_map); + + if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) + RETURN(KERN_INVALID_ADDRESS); + if(!tmp_entry->is_sub_map) { + vm_map_clip_start(src_map, tmp_entry, src_start); + } + /* set for later submap fix-up */ + copy_addr = src_start; + + /* + * Go through entries until we get to the end. + */ + + while (TRUE) { + register + vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ + vm_size_t src_size; /* Size of source + * map entry (in both + * maps) + */ + + register + vm_object_t src_object; /* Object to copy */ + vm_object_offset_t src_offset; + + boolean_t src_needs_copy; /* Should source map + * be made read-only + * for copy-on-write? + */ + + boolean_t new_entry_needs_copy; /* Will new entry be COW? */ + + boolean_t was_wired; /* Was source wired? */ + vm_map_version_t version; /* Version before locks + * dropped to make copy + */ + kern_return_t result; /* Return value from + * copy_strategically. + */ + while(tmp_entry->is_sub_map) { + vm_size_t submap_len; + submap_map_t *ptr; + + ptr = (submap_map_t *)kalloc(sizeof(submap_map_t)); + ptr->next = parent_maps; + parent_maps = ptr; + ptr->parent_map = src_map; + ptr->base_start = src_start; + ptr->base_end = src_end; + submap_len = tmp_entry->vme_end - src_start; + if(submap_len > (src_end-src_start)) + submap_len = src_end-src_start; + ptr->base_start += submap_len; + + src_start -= tmp_entry->vme_start; + src_start += tmp_entry->offset; + src_end = src_start + submap_len; + src_map = tmp_entry->object.sub_map; + vm_map_lock(src_map); + vm_map_unlock(ptr->parent_map); + if (!vm_map_lookup_entry( + src_map, src_start, &tmp_entry)) + RETURN(KERN_INVALID_ADDRESS); + map_share = TRUE; + if(!tmp_entry->is_sub_map) + vm_map_clip_start(src_map, tmp_entry, src_start); + src_entry = tmp_entry; + } + /* + * Create a new address map entry to hold the result. + * Fill in the fields from the appropriate source entries. + * We must unlock the source map to do this if we need + * to allocate a map entry. + */ + if (new_entry == VM_MAP_ENTRY_NULL) { + version.main_timestamp = src_map->timestamp; + vm_map_unlock(src_map); + + new_entry = vm_map_copy_entry_create(copy); + + vm_map_lock(src_map); + if ((version.main_timestamp + 1) != src_map->timestamp) { + if (!vm_map_lookup_entry(src_map, src_start, + &tmp_entry)) { + RETURN(KERN_INVALID_ADDRESS); + } + vm_map_clip_start(src_map, tmp_entry, src_start); + continue; /* restart w/ new tmp_entry */ + } + } + + /* + * Verify that the region can be read. + */ + if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE && + !use_maxprot) || + (src_entry->max_protection & VM_PROT_READ) == 0) + RETURN(KERN_PROTECTION_FAILURE); + + /* + * Clip against the endpoints of the entire region. + */ + + vm_map_clip_end(src_map, src_entry, src_end); + + src_size = src_entry->vme_end - src_start; + src_object = src_entry->object.vm_object; + src_offset = src_entry->offset; + was_wired = (src_entry->wired_count != 0); + + vm_map_entry_copy(new_entry, src_entry); + new_entry->use_pmap = FALSE; /* clr address space specifics */ + + /* + * Attempt non-blocking copy-on-write optimizations. + */ + + if (src_destroy && + (src_object == VM_OBJECT_NULL || + (src_object->internal && !src_object->true_share + && !map_share))) { + /* + * If we are destroying the source, and the object + * is internal, we can move the object reference + * from the source to the copy. The copy is + * copy-on-write only if the source is. + * We make another reference to the object, because + * destroying the source entry will deallocate it. + */ + vm_object_reference(src_object); + + /* + * Copy is always unwired. vm_map_copy_entry + * set its wired count to zero. + */ + + goto CopySuccessful; + } + + +RestartCopy: + XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n", + src_object, new_entry, new_entry->object.vm_object, + was_wired, 0); + if (!was_wired && + vm_object_copy_quickly( + &new_entry->object.vm_object, + src_offset, + src_size, + &src_needs_copy, + &new_entry_needs_copy)) { + + new_entry->needs_copy = new_entry_needs_copy; + + /* + * Handle copy-on-write obligations + */ + + if (src_needs_copy && !tmp_entry->needs_copy) { + if (tmp_entry->is_shared || + tmp_entry->object.vm_object->true_share || + map_share) { + /* dec ref gained in copy_quickly */ + vm_object_lock(src_object); + src_object->ref_count--; + vm_object_res_deallocate(src_object); + vm_object_unlock(src_object); + new_entry->object.vm_object = + vm_object_copy_delayed( + src_object, + src_offset, + src_size); + } else { + vm_object_pmap_protect( + src_object, + src_offset, + src_size, + (src_entry->is_shared ? + PMAP_NULL + : src_map->pmap), + src_entry->vme_start, + src_entry->protection & + ~VM_PROT_WRITE); + + tmp_entry->needs_copy = TRUE; + } + } + + /* + * The map has never been unlocked, so it's safe + * to move to the next entry rather than doing + * another lookup. + */ + + goto CopySuccessful; + } + + new_entry->needs_copy = FALSE; + + /* + * Take an object reference, so that we may + * release the map lock(s). + */ + + assert(src_object != VM_OBJECT_NULL); + vm_object_reference(src_object); + + /* + * Record the timestamp for later verification. + * Unlock the map. + */ + + version.main_timestamp = src_map->timestamp; + vm_map_unlock(src_map); + + /* + * Perform the copy + */ + + if (was_wired) { + vm_object_lock(src_object); + result = vm_object_copy_slowly( + src_object, + src_offset, + src_size, + THREAD_UNINT, + &new_entry->object.vm_object); + new_entry->offset = 0; + new_entry->needs_copy = FALSE; + } else { + result = vm_object_copy_strategically(src_object, + src_offset, + src_size, + &new_entry->object.vm_object, + &new_entry->offset, + &new_entry_needs_copy); + + new_entry->needs_copy = new_entry_needs_copy; + + } + + if (result != KERN_SUCCESS && + result != KERN_MEMORY_RESTART_COPY) { + vm_map_lock(src_map); + RETURN(result); + } + + /* + * Throw away the extra reference + */ + + vm_object_deallocate(src_object); + + /* + * Verify that the map has not substantially + * changed while the copy was being made. + */ + + vm_map_lock(src_map); /* Increments timestamp once! */ + + if ((version.main_timestamp + 1) == src_map->timestamp) + goto VerificationSuccessful; + + /* + * Simple version comparison failed. + * + * Retry the lookup and verify that the + * same object/offset are still present. + * + * [Note: a memory manager that colludes with + * the calling task can detect that we have + * cheated. While the map was unlocked, the + * mapping could have been changed and restored.] + */ + + if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) { + RETURN(KERN_INVALID_ADDRESS); + } + + src_entry = tmp_entry; + vm_map_clip_start(src_map, src_entry, src_start); + + if ((src_entry->protection & VM_PROT_READ == VM_PROT_NONE && + !use_maxprot) || + src_entry->max_protection & VM_PROT_READ == 0) + goto VerificationFailed; + + if (src_entry->vme_end < new_entry->vme_end) + src_size = (new_entry->vme_end = src_entry->vme_end) - src_start; + + if ((src_entry->object.vm_object != src_object) || + (src_entry->offset != src_offset) ) { + + /* + * Verification failed. + * + * Start over with this top-level entry. + */ + + VerificationFailed: ; + + vm_object_deallocate(new_entry->object.vm_object); + tmp_entry = src_entry; + continue; + } + + /* + * Verification succeeded. + */ + + VerificationSuccessful: ; + + if (result == KERN_MEMORY_RESTART_COPY) + goto RestartCopy; + + /* + * Copy succeeded. + */ + + CopySuccessful: ; + + /* + * Link in the new copy entry. + */ + + vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), + new_entry); + + /* + * Determine whether the entire region + * has been copied. + */ + src_start = new_entry->vme_end; + new_entry = VM_MAP_ENTRY_NULL; + while ((src_start >= src_end) && (src_end != 0)) { + if (src_map != base_map) { + submap_map_t *ptr; + + ptr = parent_maps; + assert(ptr != NULL); + parent_maps = parent_maps->next; + vm_map_lock(ptr->parent_map); + vm_map_unlock(src_map); + src_map = ptr->parent_map; + src_start = ptr->base_start; + src_end = ptr->base_end; + if ((src_end > src_start) && + !vm_map_lookup_entry( + src_map, src_start, &tmp_entry)) + RETURN(KERN_INVALID_ADDRESS); + kfree((vm_offset_t)ptr, sizeof(submap_map_t)); + if(parent_maps == NULL) + map_share = FALSE; + src_entry = tmp_entry->vme_prev; + } else + break; + } + if ((src_start >= src_end) && (src_end != 0)) + break; + + /* + * Verify that there are no gaps in the region + */ + + tmp_entry = src_entry->vme_next; + if ((tmp_entry->vme_start != src_start) || + (tmp_entry == vm_map_to_entry(src_map))) + RETURN(KERN_INVALID_ADDRESS); + } + + /* + * If the source should be destroyed, do it now, since the + * copy was successful. + */ + if (src_destroy) { + (void) vm_map_delete(src_map, + trunc_page(src_addr), + src_end, + (src_map == kernel_map) ? + VM_MAP_REMOVE_KUNWIRE : + VM_MAP_NO_FLAGS); + } + + vm_map_unlock(src_map); + + /* Fix-up start and end points in copy. This is necessary */ + /* when the various entries in the copy object were picked */ + /* up from different sub-maps */ + + tmp_entry = vm_map_copy_first_entry(copy); + while (tmp_entry != vm_map_copy_to_entry(copy)) { + tmp_entry->vme_end = copy_addr + + (tmp_entry->vme_end - tmp_entry->vme_start); + tmp_entry->vme_start = copy_addr; + copy_addr += tmp_entry->vme_end - tmp_entry->vme_start; + tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next; + } + + *copy_result = copy; + return(KERN_SUCCESS); + +#undef RETURN +} + +/* + * vm_map_copyin_object: + * + * Create a copy object from an object. + * Our caller donates an object reference. + */ + +kern_return_t +vm_map_copyin_object( + vm_object_t object, + vm_object_offset_t offset, /* offset of region in object */ + vm_object_size_t size, /* size of region in object */ + vm_map_copy_t *copy_result) /* OUT */ +{ + vm_map_copy_t copy; /* Resulting copy */ + + /* + * We drop the object into a special copy object + * that contains the object directly. + */ + + copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy->type = VM_MAP_COPY_OBJECT; + copy->cpy_object = object; + copy->cpy_index = 0; + copy->offset = offset; + copy->size = size; + + *copy_result = copy; + return(KERN_SUCCESS); +} + +/* + * vm_map_copyin_page_list_cont: + * + * Continuation routine for vm_map_copyin_page_list. + * + * If vm_map_copyin_page_list can't fit the entire vm range + * into a single page list object, it creates a continuation. + * When the target of the operation has used the pages in the + * initial page list, it invokes the continuation, which calls + * this routine. If an error happens, the continuation is aborted + * (abort arg to this routine is TRUE). To avoid deadlocks, the + * pages are discarded from the initial page list before invoking + * the continuation. + * + * NOTE: This is not the same sort of continuation used by + * the scheduler. + */ + +kern_return_t +vm_map_copyin_page_list_cont( + vm_map_copyin_args_t cont_args, + vm_map_copy_t *copy_result) /* OUT */ +{ + kern_return_t result = KERN_SUCCESS; + register boolean_t abort, src_destroy, src_destroy_only; + + /* + * Check for cases that only require memory destruction. + */ + abort = (copy_result == (vm_map_copy_t *) 0); + src_destroy = (cont_args->destroy_len != (vm_size_t) 0); + src_destroy_only = (cont_args->src_len == (vm_size_t) 0); + + if (abort || src_destroy_only) { + if (src_destroy) + result = vm_map_remove(cont_args->map, + cont_args->destroy_addr, + cont_args->destroy_addr + cont_args->destroy_len, + VM_MAP_NO_FLAGS); + if (!abort) + *copy_result = VM_MAP_COPY_NULL; + } + else { + result = vm_map_copyin_page_list(cont_args->map, + cont_args->src_addr, cont_args->src_len, + cont_args->options, copy_result, TRUE); + + if (src_destroy && + (cont_args->options & VM_MAP_COPYIN_OPT_STEAL_PAGES) && + vm_map_copy_has_cont(*copy_result)) { + vm_map_copyin_args_t new_args; + /* + * Transfer old destroy info. + */ + new_args = (vm_map_copyin_args_t) + (*copy_result)->cpy_cont_args; + new_args->destroy_addr = cont_args->destroy_addr; + new_args->destroy_len = cont_args->destroy_len; + } + } + + vm_map_deallocate(cont_args->map); + kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t)); + + return(result); +} + +/* + * vm_map_copyin_page_list: + * + * This is a variant of vm_map_copyin that copies in a list of pages. + * If steal_pages is TRUE, the pages are only in the returned list. + * If steal_pages is FALSE, the pages are busy and still in their + * objects. A continuation may be returned if not all the pages fit: + * the recipient of this copy_result must be prepared to deal with it. + */ + +kern_return_t +vm_map_copyin_page_list( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + int options, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t is_cont) +{ + vm_map_entry_t src_entry; + vm_page_t m; + vm_offset_t src_start; + vm_offset_t src_end; + vm_size_t src_size; + register vm_object_t src_object; + register vm_object_offset_t src_offset; + vm_object_offset_t src_last_offset; + register vm_map_copy_t copy; /* Resulting copy */ + kern_return_t result = KERN_SUCCESS; + boolean_t need_map_lookup; + vm_map_copyin_args_t cont_args; + kern_return_t error_code; + vm_prot_t prot; + boolean_t wired; + boolean_t no_zero_fill; + + submap_map_t *parent_maps = NULL; + vm_map_t base_map = src_map; + + prot = (options & VM_MAP_COPYIN_OPT_VM_PROT); + no_zero_fill = (options & VM_MAP_COPYIN_OPT_NO_ZERO_FILL); + + /* + * If steal_pages is FALSE, this leaves busy pages in + * the object. A continuation must be used if src_destroy + * is true in this case (!steal_pages && src_destroy). + * + * XXX Still have a more general problem of what happens + * XXX if the same page occurs twice in a list. Deadlock + * XXX can happen if vm_fault_page was called. A + * XXX possible solution is to use a continuation if vm_fault_page + * XXX is called and we cross a map entry boundary. + */ + + /* + * Check for copies of zero bytes. + */ + + if (len == 0) { + *copy_result = VM_MAP_COPY_NULL; + return(KERN_SUCCESS); + } + + /* + * Compute start and end of region + */ + + src_start = trunc_page(src_addr); + src_end = round_page(src_addr + len); + + /* + * If the region is not page aligned, override the no_zero_fill + * argument. + */ + + if (options & VM_MAP_COPYIN_OPT_NO_ZERO_FILL) { + if (!page_aligned(src_addr) || !page_aligned(src_addr +len)) + options &= ~VM_MAP_COPYIN_OPT_NO_ZERO_FILL; + } + + /* + * Check that the end address doesn't overflow + */ + + if (src_end <= src_start && (src_end < src_start || src_start != 0)) { + return KERN_INVALID_ADDRESS; + } + + /* + * Allocate a header element for the page list. + * + * Record original offset and size, as caller may not + * be page-aligned. + */ + + copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); + copy->type = VM_MAP_COPY_PAGE_LIST; + copy->cpy_npages = 0; + copy->cpy_page_loose = FALSE; + copy->offset = src_addr; + copy->size = len; + copy->cpy_cont = VM_MAP_COPY_CONT_NULL; + copy->cpy_cont_args = VM_MAP_COPYIN_ARGS_NULL; + + /* + * Find the beginning of the region. + */ + +do_map_lookup: + + vm_map_lock(src_map); + + if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) { + result = KERN_INVALID_ADDRESS; + goto error; + } + need_map_lookup = FALSE; + + /* + * Go through entries until we get to the end. + */ + + while (TRUE) { + if ((src_entry->protection & prot) != prot) { + result = KERN_PROTECTION_FAILURE; + goto error; + } + + /* translate down through submaps to find the target entry */ + while(src_entry->is_sub_map) { + vm_size_t submap_len; + submap_map_t *ptr; + + ptr = (submap_map_t *)kalloc(sizeof(submap_map_t)); + ptr->next = parent_maps; + parent_maps = ptr; + ptr->parent_map = src_map; + ptr->base_start = src_start; + ptr->base_end = src_end; + submap_len = src_entry->vme_end - src_entry->vme_start; + if(submap_len > (src_end-src_start)) + submap_len = src_end-src_start; + ptr->base_start += submap_len; + + src_start -= src_entry->vme_start; + src_start += src_entry->offset; + src_end = src_start + submap_len; + src_map = src_entry->object.sub_map; + vm_map_lock(src_map); + vm_map_unlock(ptr->parent_map); + if (!vm_map_lookup_entry( + src_map, src_start, &src_entry)) { + result = KERN_INVALID_ADDRESS; + goto error; + } + vm_map_clip_start(src_map, src_entry, src_start); + } + + wired = (src_entry->wired_count != 0); + + if (src_end > src_entry->vme_end) + src_size = src_entry->vme_end - src_start; + else + src_size = src_end - src_start; + + src_object = src_entry->object.vm_object; + + /* + * If src_object is NULL, allocate it now; + * we're going to fault on it shortly. + */ + if (src_object == VM_OBJECT_NULL) { + src_object = vm_object_allocate((vm_size_t) + src_entry->vme_end - + src_entry->vme_start); + src_entry->object.vm_object = src_object; + } + else if (src_entry->needs_copy && (prot & VM_PROT_WRITE)) { + vm_object_shadow( + &src_entry->object.vm_object, + &src_entry->offset, + (vm_size_t) (src_entry->vme_end - + src_entry->vme_start)); + + src_entry->needs_copy = FALSE; + + /* reset src_object */ + src_object = src_entry->object.vm_object; + } + + /* + * calculate src_offset now, since vm_object_shadow + * may have changed src_entry->offset. + */ + src_offset = src_entry->offset + (src_start - src_entry->vme_start); + + /* + * Iterate over pages. Fault in ones that aren't present. + */ + src_last_offset = src_offset + src_size; + for (; (src_offset < src_last_offset); + src_offset += PAGE_SIZE_64, src_start += PAGE_SIZE) { + + if (copy->cpy_npages == VM_MAP_COPY_PAGE_LIST_MAX) { + vm_offset_t src_delta; +make_continuation: + /* + * At this point we have the max number of + * pages busy for this thread that we're + * willing to allow. Stop here and record + * arguments for the remainder. Note: + * this means that this routine isn't atomic, + * but that's the breaks. Note that only + * the first vm_map_copy_t that comes back + * from this routine has the right offset + * and size; those from continuations are + * page rounded, and short by the amount + * already done. + * + * Reset src_end so the src_destroy + * code at the bottom doesn't do + * something stupid. + */ + + src_delta = src_end - src_start; + while (src_map != base_map) { + submap_map_t *ptr; + + if(!need_map_lookup) { + vm_map_unlock(src_map); + } + ptr = parent_maps; + assert(ptr != NULL); + parent_maps = parent_maps->next; + src_map = ptr->parent_map; + src_start = ptr->base_start - src_delta; + src_delta = ptr->base_end - src_start; + kfree((vm_offset_t)ptr, sizeof(submap_map_t)); + + need_map_lookup = TRUE; + } + src_end = src_start; + + + cont_args = (vm_map_copyin_args_t) + kalloc(sizeof(vm_map_copyin_args_data_t)); + cont_args->map = src_map; + vm_map_reference(src_map); + cont_args->src_addr = src_start; + cont_args->src_len = len - (src_start - src_addr); + if (options & VM_MAP_COPYIN_OPT_SRC_DESTROY) { + cont_args->destroy_addr = cont_args->src_addr; + cont_args->destroy_len = cont_args->src_len; + } else { + cont_args->destroy_addr = (vm_offset_t) 0; + cont_args->destroy_len = (vm_offset_t) 0; + } + cont_args->options = options; + + copy->cpy_cont_args = cont_args; + copy->cpy_cont = vm_map_copyin_page_list_cont; + + break; + } + + /* + * Try to find the page of data. Have to + * fault it in if there's no page, or something + * going on with the page, or the object has + * a copy object. + */ + vm_object_lock(src_object); + vm_object_paging_begin(src_object); + if (((m = vm_page_lookup(src_object, src_offset)) != + VM_PAGE_NULL) && + !m->busy && !m->fictitious && !m->unusual && + ((prot & VM_PROT_WRITE) == 0 || + (m->object->copy == VM_OBJECT_NULL))) { + + if (!m->absent && + !(options & VM_MAP_COPYIN_OPT_STEAL_PAGES)) { + + /* + * The page is present and will not be + * replaced, prep it. Thus allowing + * mutiple access on this page + */ + kern_return_t kr; + + kr = vm_page_prep(m); + assert(kr == KERN_SUCCESS); + kr = vm_page_pin(m); + assert(kr == KERN_SUCCESS); + } else { + /* + * This is the page. Mark it busy + * and keep the paging reference on + * the object whilst we do our thing. + */ + + m->busy = TRUE; + } + } else { + vm_prot_t result_prot; + vm_page_t top_page; + kern_return_t kr; + boolean_t data_supply; + + /* + * Have to fault the page in; must + * unlock the map to do so. While + * the map is unlocked, anything + * can happen, we must lookup the + * map entry before continuing. + */ + vm_map_unlock(src_map); + need_map_lookup = TRUE; + data_supply = src_object->silent_overwrite && + (prot & VM_PROT_WRITE) && + src_start >= src_addr && + src_start + PAGE_SIZE <= + src_addr + len; + +retry: + result_prot = prot; + + XPR(XPR_VM_FAULT, + "vm_map_copyin_page_list -> vm_fault_page\n", + 0,0,0,0,0); + kr = vm_fault_page(src_object, src_offset, + prot, FALSE, THREAD_UNINT, + src_entry->offset, + src_entry->offset + + (src_entry->vme_end - + src_entry->vme_start), + VM_BEHAVIOR_SEQUENTIAL, + &result_prot, &m, &top_page, + (int *)0, + &error_code, + options & VM_MAP_COPYIN_OPT_NO_ZERO_FILL, + data_supply); + /* + * Cope with what happened. + */ + switch (kr) { + case VM_FAULT_SUCCESS: + + /* + * If we lost write access, + * try again. + */ + if ((prot & VM_PROT_WRITE) && + !(result_prot & VM_PROT_WRITE)) { + vm_object_lock(src_object); + vm_object_paging_begin(src_object); + goto retry; + } + break; + case VM_FAULT_MEMORY_SHORTAGE: + VM_PAGE_WAIT(); + /* fall thru */ + case VM_FAULT_INTERRUPTED: /* ??? */ + case VM_FAULT_RETRY: + vm_object_lock(src_object); + vm_object_paging_begin(src_object); + goto retry; + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + vm_object_lock(src_object); + vm_object_paging_begin(src_object); + goto retry; + case VM_FAULT_MEMORY_ERROR: + /* + * Something broke. If this + * is a continuation, return + * a partial result if possible, + * else fail the whole thing. + * In the continuation case, the + * next continuation call will + * get this error if it persists. + */ + vm_map_lock(src_map); + if (is_cont && + copy->cpy_npages != 0) + goto make_continuation; + + result = error_code ? error_code : KERN_MEMORY_ERROR; + goto error; + } + + if (top_page != VM_PAGE_NULL) { + vm_object_lock(src_object); + VM_PAGE_FREE(top_page); + vm_object_paging_end(src_object); + vm_object_unlock(src_object); + } + + } + + /* + * The page is busy, its object is locked, and + * we have a paging reference on it. Either + * the map is locked, or need_map_lookup is + * TRUE. + */ + + /* + * Put the page in the page list. + */ + copy->cpy_page_list[copy->cpy_npages++] = m; + vm_object_unlock(m->object); + + /* + * Pmap enter support. Only used for + * device I/O for colocated server. + * + * WARNING: This code assumes that this + * option is only used for well behaved + * memory. If the mapping has changed, + * the following code will make mistakes. + * + * XXXO probably ought to do pmap_extract first, + * XXXO to avoid needless pmap_enter, but this + * XXXO can't detect protection mismatch?? + */ + + if (options & VM_MAP_COPYIN_OPT_PMAP_ENTER) { + /* + * XXX Only used on kernel map. + * XXX Must not remove VM_PROT_WRITE on + * XXX an I/O only requiring VM_PROT_READ + * XXX as another I/O may be active on same page + * XXX assume that if mapping exists, it must + * XXX have the equivalent of at least VM_PROT_READ, + * XXX but don't assume it has VM_PROT_WRITE as the + * XXX pmap might not all the rights of the object + */ + assert(vm_map_pmap(src_map) == kernel_pmap); + + if ((prot & VM_PROT_WRITE) || + (pmap_extract(vm_map_pmap(src_map), + src_start) != m->phys_addr)) + + PMAP_ENTER(vm_map_pmap(src_map), src_start, + m, prot, wired); + } + if(need_map_lookup) { + need_map_lookup = FALSE; + vm_map_lock(src_map); + if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) { + result = KERN_INVALID_ADDRESS; + goto error; + } + } + } + + /* + * Verify that there are no gaps in the region + */ + src_start = src_entry->vme_end; + if (src_start < src_end) { + src_entry = src_entry->vme_next; + if (need_map_lookup) { + need_map_lookup = FALSE; + vm_map_lock(src_map); + if(!vm_map_lookup_entry(src_map, + src_start, &src_entry)) { + result = KERN_INVALID_ADDRESS; + goto error; + } + } else if (src_entry->vme_start != src_start) { + result = KERN_INVALID_ADDRESS; + goto error; + } + } + + /* + * DETERMINE whether the entire region + * has been copied. + */ + + while ((src_start >= src_end) && (src_end != 0)) { + if (src_map != base_map) { + submap_map_t *ptr; + + ptr = parent_maps; + assert(ptr != NULL); + parent_maps = parent_maps->next; + src_start = ptr->base_start; + src_end = ptr->base_end; + if(need_map_lookup) { + need_map_lookup = FALSE; + } + else { + vm_map_unlock(src_map); + } + src_map = ptr->parent_map; + vm_map_lock(src_map); + if((src_start < src_end) && + (!vm_map_lookup_entry(ptr->parent_map, + src_start, &src_entry))) { + result = KERN_INVALID_ADDRESS; + kfree((vm_offset_t)ptr, sizeof(submap_map_t)); + goto error; + } + kfree((vm_offset_t)ptr, sizeof(submap_map_t)); + } else + break; + } + if ((src_start >= src_end) && (src_end != 0)) { + if (need_map_lookup) + vm_map_lock(src_map); + break; + } + + } + + /* + * If steal_pages is true, make sure all + * pages in the copy are not in any object + * We try to remove them from the original + * object, but we may have to copy them. + * + * At this point every page in the list is busy + * and holds a paging reference to its object. + * When we're done stealing, every page is busy, + * and in no object (m->tabled == FALSE). + */ + src_start = trunc_page(src_addr); + if (options & VM_MAP_COPYIN_OPT_STEAL_PAGES) { + register int i; + vm_offset_t page_vaddr; + vm_offset_t unwire_end; + vm_offset_t map_entry_end; + boolean_t share_map = FALSE; + + unwire_end = src_start; + map_entry_end = src_start; + for (i = 0; i < copy->cpy_npages; i++) { + + + /* + * Remove the page from its object if it + * can be stolen. It can be stolen if: + * + * (1) The source is being destroyed, + * the object is internal (hence + * temporary), and not shared. + * (2) The page is not precious. + * + * The not shared check consists of two + * parts: (a) there are no objects that + * shadow this object. (b) it is not the + * object in any shared map entries (i.e., + * use_shared_copy is not set). + * + * The first check (a) means that we can't + * steal pages from objects that are not + * at the top of their shadow chains. This + * should not be a frequent occurrence. + * + * Stealing wired pages requires telling the + * pmap module to let go of them. + * + * NOTE: stealing clean pages from objects + * whose mappings survive requires a call to + * the pmap module. Maybe later. + */ + m = copy->cpy_page_list[i]; + src_object = m->object; + vm_object_lock(src_object); + + page_vaddr = src_start + (i * PAGE_SIZE); + if(page_vaddr > map_entry_end) { + if (!vm_map_lookup_entry(src_map, page_vaddr, &src_entry)) + share_map = TRUE; + else if (src_entry->is_sub_map) { + map_entry_end = src_entry->vme_end; + share_map = TRUE; + } else { + map_entry_end = src_entry->vme_end; + share_map = FALSE; + } + } + + + if ((options & VM_MAP_COPYIN_OPT_SRC_DESTROY) && + src_object->internal && + !src_object->true_share && + (!src_object->shadowed) && + (src_object->copy_strategy == + MEMORY_OBJECT_COPY_SYMMETRIC) && + !m->precious && + !share_map) { + + if (m->wire_count > 0) { + + assert(m->wire_count == 1); + /* + * In order to steal a wired + * page, we have to unwire it + * first. We do this inline + * here because we have the page. + * + * Step 1: Unwire the map entry. + * Also tell the pmap module + * that this piece of the + * pmap is pageable. + */ + vm_object_unlock(src_object); + if (page_vaddr >= unwire_end) { + if (!vm_map_lookup_entry(src_map, + page_vaddr, &src_entry)) + panic("vm_map_copyin_page_list: missing wired map entry"); + + vm_map_clip_start(src_map, src_entry, + page_vaddr); + vm_map_clip_end(src_map, src_entry, + src_start + src_size); + +/* revisit why this assert fails CDY + assert(src_entry->wired_count > 0); +*/ + src_entry->wired_count = 0; + src_entry->user_wired_count = 0; + unwire_end = src_entry->vme_end; + pmap_pageable(vm_map_pmap(src_map), + page_vaddr, unwire_end, TRUE); + } + + /* + * Step 2: Unwire the page. + * pmap_remove handles this for us. + */ + vm_object_lock(src_object); + } + + /* + * Don't need to remove the mapping; + * vm_map_delete will handle it. + * + * Steal the page. Setting the wire count + * to zero is vm_page_unwire without + * activating the page. + */ + vm_page_lock_queues(); + vm_page_remove(m); + if (m->wire_count > 0) { + m->wire_count = 0; + vm_page_wire_count--; + } else { + VM_PAGE_QUEUES_REMOVE(m); + } + vm_page_unlock_queues(); + } else { + /* + * Have to copy this page. Have to + * unlock the map while copying, + * hence no further page stealing. + * Hence just copy all the pages. + * Unlock the map while copying; + * This means no further page stealing. + */ + vm_object_unlock(src_object); + vm_map_unlock(src_map); + vm_map_copy_steal_pages(copy); + vm_map_lock(src_map); + break; + } + + vm_object_paging_end(src_object); + vm_object_unlock(src_object); + } + + copy->cpy_page_loose = TRUE; + + /* + * If the source should be destroyed, do it now, since the + * copy was successful. + */ + + if (options & VM_MAP_COPYIN_OPT_SRC_DESTROY) { + (void) vm_map_delete(src_map, src_start, + src_end, VM_MAP_NO_FLAGS); + } + } else { + /* + * Not stealing pages leaves busy or prepped pages in the map. + * This will cause source destruction to hang. Use + * a continuation to prevent this. + */ + if ((options & VM_MAP_COPYIN_OPT_SRC_DESTROY) && + !vm_map_copy_has_cont(copy)) { + cont_args = (vm_map_copyin_args_t) + kalloc(sizeof(vm_map_copyin_args_data_t)); + vm_map_reference(src_map); + cont_args->map = src_map; + cont_args->src_addr = (vm_offset_t) 0; + cont_args->src_len = (vm_size_t) 0; + cont_args->destroy_addr = src_start; + cont_args->destroy_len = src_end - src_start; + cont_args->options = options; + + copy->cpy_cont_args = cont_args; + copy->cpy_cont = vm_map_copyin_page_list_cont; + } + } + + vm_map_unlock(src_map); + + *copy_result = copy; + return(result); + +error: + { + submap_map_t *ptr; + + vm_map_unlock(src_map); + vm_map_copy_discard(copy); + + for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { + parent_maps=parent_maps->next; + kfree((vm_offset_t)ptr, sizeof(submap_map_t)); + } + return(result); + } +} + +void +vm_map_fork_share( + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map) +{ + vm_object_t object; + vm_map_entry_t new_entry; + kern_return_t result; + + /* + * New sharing code. New map entry + * references original object. Internal + * objects use asynchronous copy algorithm for + * future copies. First make sure we have + * the right object. If we need a shadow, + * or someone else already has one, then + * make a new shadow and share it. + */ + + object = old_entry->object.vm_object; + if (old_entry->is_sub_map) { + assert(old_entry->wired_count == 0); +#ifndef i386 + if(old_entry->use_pmap) { + result = pmap_nest(new_map->pmap, + (old_entry->object.sub_map)->pmap, + old_entry->vme_start, + old_entry->vme_end - old_entry->vme_start); + if(result) + panic("vm_map_fork_share: pmap_nest failed!"); + } +#endif + } else if (object == VM_OBJECT_NULL) { + object = vm_object_allocate((vm_size_t)(old_entry->vme_end - + old_entry->vme_start)); + old_entry->offset = 0; + old_entry->object.vm_object = object; + assert(!old_entry->needs_copy); + } else if (object->copy_strategy != + MEMORY_OBJECT_COPY_SYMMETRIC) { + + /* + * We are already using an asymmetric + * copy, and therefore we already have + * the right object. + */ + + assert(! old_entry->needs_copy); + } + else if (old_entry->needs_copy || /* case 1 */ + object->shadowed || /* case 2 */ + (!object->true_share && /* case 3 */ + !old_entry->is_shared && + (object->size > + (vm_size_t)(old_entry->vme_end - + old_entry->vme_start)))) { + + /* + * We need to create a shadow. + * There are three cases here. + * In the first case, we need to + * complete a deferred symmetrical + * copy that we participated in. + * In the second and third cases, + * we need to create the shadow so + * that changes that we make to the + * object do not interfere with + * any symmetrical copies which + * have occured (case 2) or which + * might occur (case 3). + * + * The first case is when we had + * deferred shadow object creation + * via the entry->needs_copy mechanism. + * This mechanism only works when + * only one entry points to the source + * object, and we are about to create + * a second entry pointing to the + * same object. The problem is that + * there is no way of mapping from + * an object to the entries pointing + * to it. (Deferred shadow creation + * works with one entry because occurs + * at fault time, and we walk from the + * entry to the object when handling + * the fault.) + * + * The second case is when the object + * to be shared has already been copied + * with a symmetric copy, but we point + * directly to the object without + * needs_copy set in our entry. (This + * can happen because different ranges + * of an object can be pointed to by + * different entries. In particular, + * a single entry pointing to an object + * can be split by a call to vm_inherit, + * which, combined with task_create, can + * result in the different entries + * having different needs_copy values.) + * The shadowed flag in the object allows + * us to detect this case. The problem + * with this case is that if this object + * has or will have shadows, then we + * must not perform an asymmetric copy + * of this object, since such a copy + * allows the object to be changed, which + * will break the previous symmetrical + * copies (which rely upon the object + * not changing). In a sense, the shadowed + * flag says "don't change this object". + * We fix this by creating a shadow + * object for this object, and sharing + * that. This works because we are free + * to change the shadow object (and thus + * to use an asymmetric copy strategy); + * this is also semantically correct, + * since this object is temporary, and + * therefore a copy of the object is + * as good as the object itself. (This + * is not true for permanent objects, + * since the pager needs to see changes, + * which won't happen if the changes + * are made to a copy.) + * + * The third case is when the object + * to be shared has parts sticking + * outside of the entry we're working + * with, and thus may in the future + * be subject to a symmetrical copy. + * (This is a preemptive version of + * case 2.) + */ + + assert(!(object->shadowed && old_entry->is_shared)); + vm_object_shadow(&old_entry->object.vm_object, + &old_entry->offset, + (vm_size_t) (old_entry->vme_end - + old_entry->vme_start)); + + /* + * If we're making a shadow for other than + * copy on write reasons, then we have + * to remove write permission. + */ + +/* CDY FIX this! page_protect! */ + if (!old_entry->needs_copy && + (old_entry->protection & VM_PROT_WRITE)) { + if(old_entry->is_sub_map && old_entry->use_pmap) { + pmap_protect(old_entry->object.sub_map->pmap, + old_entry->vme_start, + old_entry->vme_end, + old_entry->protection & ~VM_PROT_WRITE); + } else { + pmap_protect(vm_map_pmap(old_map), + old_entry->vme_start, + old_entry->vme_end, + old_entry->protection & ~VM_PROT_WRITE); + } + } + + old_entry->needs_copy = FALSE; + object = old_entry->object.vm_object; + } + + /* + * If object was using a symmetric copy strategy, + * change its copy strategy to the default + * asymmetric copy strategy, which is copy_delay + * in the non-norma case and copy_call in the + * norma case. Bump the reference count for the + * new entry. + */ + + if(old_entry->is_sub_map) { + vm_map_lock(old_entry->object.sub_map); + vm_map_reference(old_entry->object.sub_map); + vm_map_unlock(old_entry->object.sub_map); + } else { + vm_object_lock(object); + object->ref_count++; + vm_object_res_reference(object); + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { + object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } + vm_object_unlock(object); + } + + /* + * Clone the entry, using object ref from above. + * Mark both entries as shared. + */ + + new_entry = vm_map_entry_create(new_map); + vm_map_entry_copy(new_entry, old_entry); + old_entry->is_shared = TRUE; + new_entry->is_shared = TRUE; + + /* + * Insert the entry into the new map -- we + * know we're inserting at the end of the new + * map. + */ + + vm_map_entry_link(new_map, vm_map_last_entry(new_map), new_entry); + + /* + * Update the physical map + */ + + if (old_entry->is_sub_map) { + /* Bill Angell pmap support goes here */ + } else { + pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start, + old_entry->vme_end - old_entry->vme_start, + old_entry->vme_start); + } +} + +boolean_t +vm_map_fork_copy( + vm_map_t old_map, + vm_map_entry_t *old_entry_p, + vm_map_t new_map) +{ + vm_map_entry_t old_entry = *old_entry_p; + vm_size_t entry_size = old_entry->vme_end - old_entry->vme_start; + vm_offset_t start = old_entry->vme_start; + vm_map_copy_t copy; + vm_map_entry_t last = vm_map_last_entry(new_map); + + vm_map_unlock(old_map); + /* + * Use maxprot version of copyin because we + * care about whether this memory can ever + * be accessed, not just whether it's accessible + * right now. + */ + if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, ©) + != KERN_SUCCESS) { + /* + * The map might have changed while it + * was unlocked, check it again. Skip + * any blank space or permanently + * unreadable region. + */ + vm_map_lock(old_map); + if (!vm_map_lookup_entry(old_map, start, &last) || + last->max_protection & VM_PROT_READ == + VM_PROT_NONE) { + last = last->vme_next; + } + *old_entry_p = last; + + /* + * XXX For some error returns, want to + * XXX skip to the next element. Note + * that INVALID_ADDRESS and + * PROTECTION_FAILURE are handled above. + */ + + return FALSE; + } + + /* + * Insert the copy into the new map + */ + + vm_map_copy_insert(new_map, last, copy); + + /* + * Pick up the traversal at the end of + * the copied region. + */ + + vm_map_lock(old_map); + start += entry_size; + if (! vm_map_lookup_entry(old_map, start, &last)) { + last = last->vme_next; + } else { + vm_map_clip_start(old_map, last, start); + } + *old_entry_p = last; + + return TRUE; +} + +/* + * vm_map_fork: + * + * Create and return a new map based on the old + * map, according to the inheritance values on the + * regions in that map. + * + * The source map must not be locked. + */ +vm_map_t +vm_map_fork( + vm_map_t old_map) +{ + pmap_t new_pmap = pmap_create((vm_size_t) 0); + vm_map_t new_map; + vm_map_entry_t old_entry; + vm_size_t new_size = 0, entry_size; + vm_map_entry_t new_entry; + boolean_t src_needs_copy; + boolean_t new_entry_needs_copy; + + vm_map_reference_swap(old_map); + vm_map_lock(old_map); + + new_map = vm_map_create(new_pmap, + old_map->min_offset, + old_map->max_offset, + old_map->hdr.entries_pageable); + + for ( + old_entry = vm_map_first_entry(old_map); + old_entry != vm_map_to_entry(old_map); + ) { + + entry_size = old_entry->vme_end - old_entry->vme_start; + + switch (old_entry->inheritance) { + case VM_INHERIT_NONE: + break; + + case VM_INHERIT_SHARE: + vm_map_fork_share(old_map, old_entry, new_map); + new_size += entry_size; + break; + + case VM_INHERIT_COPY: + + /* + * Inline the copy_quickly case; + * upon failure, fall back on call + * to vm_map_fork_copy. + */ + + if(old_entry->is_sub_map) + break; + if (old_entry->wired_count != 0) { + goto slow_vm_map_fork_copy; + } + + new_entry = vm_map_entry_create(new_map); + vm_map_entry_copy(new_entry, old_entry); + /* clear address space specifics */ + new_entry->use_pmap = FALSE; + + if (! vm_object_copy_quickly( + &new_entry->object.vm_object, + old_entry->offset, + (old_entry->vme_end - + old_entry->vme_start), + &src_needs_copy, + &new_entry_needs_copy)) { + vm_map_entry_dispose(new_map, new_entry); + goto slow_vm_map_fork_copy; + } + + /* + * Handle copy-on-write obligations + */ + + if (src_needs_copy && !old_entry->needs_copy) { + vm_object_pmap_protect( + old_entry->object.vm_object, + old_entry->offset, + (old_entry->vme_end - + old_entry->vme_start), + ((old_entry->is_shared + || old_entry->is_sub_map) + ? PMAP_NULL : + old_map->pmap), + old_entry->vme_start, + old_entry->protection & ~VM_PROT_WRITE); + + old_entry->needs_copy = TRUE; + } + new_entry->needs_copy = new_entry_needs_copy; + + /* + * Insert the entry at the end + * of the map. + */ + + vm_map_entry_link(new_map, vm_map_last_entry(new_map), + new_entry); + new_size += entry_size; + break; + + slow_vm_map_fork_copy: + if (vm_map_fork_copy(old_map, &old_entry, new_map)) { + new_size += entry_size; + } + continue; + } + old_entry = old_entry->vme_next; + } + + new_map->size = new_size; + vm_map_unlock(old_map); + vm_map_deallocate(old_map); + + return(new_map); +} + + +/* + * vm_map_lookup_locked: + * + * Finds the VM object, offset, and + * protection for a given virtual address in the + * specified map, assuming a page fault of the + * type specified. + * + * Returns the (object, offset, protection) for + * this address, whether it is wired down, and whether + * this map has the only reference to the data in question. + * In order to later verify this lookup, a "version" + * is returned. + * + * The map MUST be locked by the caller and WILL be + * locked on exit. In order to guarantee the + * existence of the returned object, it is returned + * locked. + * + * If a lookup is requested with "write protection" + * specified, the map may be changed to perform virtual + * copying operations, although the data referenced will + * remain the same. + */ +kern_return_t +vm_map_lookup_locked( + vm_map_t *var_map, /* IN/OUT */ + register vm_offset_t vaddr, + register vm_prot_t fault_type, + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_object_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired, /* OUT */ + int *behavior, /* OUT */ + vm_object_offset_t *lo_offset, /* OUT */ + vm_object_offset_t *hi_offset, /* OUT */ + vm_map_t *pmap_map) +{ + vm_map_entry_t entry; + register vm_map_t map = *var_map; + vm_map_t old_map = *var_map; + vm_map_t cow_sub_map_parent = VM_MAP_NULL; + vm_offset_t cow_parent_vaddr; + vm_offset_t old_start; + vm_offset_t old_end; + register vm_prot_t prot; + + *pmap_map = map; + RetryLookup: ; + + /* + * If the map has an interesting hint, try it before calling + * full blown lookup routine. + */ + + mutex_lock(&map->s_lock); + entry = map->hint; + mutex_unlock(&map->s_lock); + + if ((entry == vm_map_to_entry(map)) || + (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) { + vm_map_entry_t tmp_entry; + + /* + * Entry was either not a valid hint, or the vaddr + * was not contained in the entry, so do a full lookup. + */ + if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { + if((cow_sub_map_parent) && (cow_sub_map_parent != map)) + vm_map_unlock(cow_sub_map_parent); + if((*pmap_map != map) + && (*pmap_map != cow_sub_map_parent)) + vm_map_unlock(*pmap_map); + return KERN_INVALID_ADDRESS; + } + + entry = tmp_entry; + } + if(map == old_map) { + old_start = entry->vme_start; + old_end = entry->vme_end; + } + + /* + * Handle submaps. Drop lock on upper map, submap is + * returned locked. + */ + +submap_recurse: + if (entry->is_sub_map) { + vm_offset_t local_vaddr; + vm_offset_t end_delta; + vm_offset_t start_delta; + vm_offset_t object_start_delta; + vm_map_entry_t submap_entry; + boolean_t mapped_needs_copy=FALSE; + + local_vaddr = vaddr; + + if ((!entry->needs_copy) && (entry->use_pmap)) { + /* if pmap_map equals map we unlock below */ + if ((*pmap_map != map) && + (*pmap_map != cow_sub_map_parent)) + vm_map_unlock(*pmap_map); + *pmap_map = entry->object.sub_map; + } + + if(entry->needs_copy) { + if (!mapped_needs_copy) { + if (vm_map_lock_read_to_write(map)) { + vm_map_lock_read(map); + if(*pmap_map == entry->object.sub_map) + *pmap_map = map; + goto RetryLookup; + } + vm_map_lock_read(entry->object.sub_map); + cow_sub_map_parent = map; + /* reset base to map before cow object */ + /* this is the map which will accept */ + /* the new cow object */ + old_start = entry->vme_start; + old_end = entry->vme_end; + cow_parent_vaddr = vaddr; + mapped_needs_copy = TRUE; + } else { + vm_map_lock_read(entry->object.sub_map); + if((cow_sub_map_parent != map) && + (*pmap_map != map)) + vm_map_unlock(map); + } + } else { + vm_map_lock_read(entry->object.sub_map); + /* leave map locked if it is a target */ + /* cow sub_map above otherwise, just */ + /* follow the maps down to the object */ + /* here we unlock knowing we are not */ + /* revisiting the map. */ + if((*pmap_map != map) && (map != cow_sub_map_parent)) + vm_map_unlock_read(map); + } + + *var_map = map = entry->object.sub_map; + + /* calculate the offset in the submap for vaddr */ + local_vaddr = (local_vaddr - entry->vme_start) + entry->offset; + +RetrySubMap: + if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { + if((cow_sub_map_parent) && (cow_sub_map_parent != map)){ + vm_map_unlock(cow_sub_map_parent); + } + if((*pmap_map != map) + && (*pmap_map != cow_sub_map_parent)) { + vm_map_unlock(*pmap_map); + } + *pmap_map = map; + return KERN_INVALID_ADDRESS; + } + /* find the attenuated shadow of the underlying object */ + /* on our target map */ + + /* in english the submap object may extend beyond the */ + /* region mapped by the entry or, may only fill a portion */ + /* of it. For our purposes, we only care if the object */ + /* doesn't fill. In this case the area which will */ + /* ultimately be clipped in the top map will only need */ + /* to be as big as the portion of the underlying entry */ + /* which is mapped */ + start_delta = submap_entry->vme_start > entry->offset ? + submap_entry->vme_start - entry->offset : 0; + + end_delta = + (entry->offset + start_delta + (old_end - old_start)) <= + submap_entry->vme_end ? + 0 : (entry->offset + + (old_end - old_start)) + - submap_entry->vme_end; + + old_start += start_delta; + old_end -= end_delta; + + if(submap_entry->is_sub_map) { + entry = submap_entry; + vaddr = local_vaddr; + goto submap_recurse; + } + + if(((fault_type & VM_PROT_WRITE) && cow_sub_map_parent)) { + + vm_object_t copy_object; + vm_offset_t local_start; + vm_offset_t local_end; + + if (vm_map_lock_read_to_write(map)) { + vm_map_lock_read(map); + old_start -= start_delta; + old_end += end_delta; + goto RetrySubMap; + } + if (submap_entry->object.vm_object == VM_OBJECT_NULL) { + submap_entry->object.vm_object = + vm_object_allocate( + (vm_size_t) + (submap_entry->vme_end + - submap_entry->vme_start)); + submap_entry->offset = 0; + } + local_start = local_vaddr - + (cow_parent_vaddr - old_start); + local_end = local_vaddr + + (old_end - cow_parent_vaddr); + vm_map_clip_start(map, submap_entry, local_start); + vm_map_clip_end(map, submap_entry, local_end); + + /* This is the COW case, lets connect */ + /* an entry in our space to the underlying */ + /* object in the submap, bypassing the */ + /* submap. */ + + /* set up shadow object */ + copy_object = submap_entry->object.vm_object; + submap_entry->object.vm_object->shadowed = TRUE; + submap_entry->needs_copy = TRUE; + vm_object_pmap_protect(submap_entry->object.vm_object, + submap_entry->offset, + submap_entry->vme_end - + submap_entry->vme_start, + submap_entry->is_shared ? + PMAP_NULL : map->pmap, + submap_entry->vme_start, + submap_entry->protection & + ~VM_PROT_WRITE); + + + /* This works diffently than the */ + /* normal submap case. We go back */ + /* to the parent of the cow map and*/ + /* clip out the target portion of */ + /* the sub_map, substituting the */ + /* new copy object, */ + + vm_map_unlock(map); + local_start = old_start; + local_end = old_end; + map = cow_sub_map_parent; + *var_map = cow_sub_map_parent; + vaddr = cow_parent_vaddr; + cow_sub_map_parent = NULL; + + if(!vm_map_lookup_entry(map, + vaddr, &entry)) { + vm_object_deallocate( + copy_object); + vm_map_lock_write_to_read(map); + return KERN_INVALID_ADDRESS; + } + + /* clip out the portion of space */ + /* mapped by the sub map which */ + /* corresponds to the underlying */ + /* object */ + vm_map_clip_start(map, entry, local_start); + vm_map_clip_end(map, entry, local_end); + + + /* substitute copy object for */ + /* shared map entry */ + vm_map_deallocate(entry->object.sub_map); + entry->is_sub_map = FALSE; + vm_object_reference(copy_object); + entry->object.vm_object = copy_object; + entry->offset = submap_entry->offset; + + entry->protection |= VM_PROT_WRITE; + entry->max_protection |= VM_PROT_WRITE; + entry->needs_copy = TRUE; + if(entry->inheritance == VM_INHERIT_SHARE) + entry->inheritance = VM_INHERIT_COPY; + if (map != old_map) + entry->is_shared = TRUE; + + vm_map_lock_write_to_read(map); + } else { + if((cow_sub_map_parent) + && (cow_sub_map_parent != *pmap_map) + && (cow_sub_map_parent != map)) { + vm_map_unlock(cow_sub_map_parent); + } + entry = submap_entry; + vaddr = local_vaddr; + } + } + + /* + * Check whether this task is allowed to have + * this page. + */ + + prot = entry->protection; + if ((fault_type & (prot)) != fault_type) { + if (*pmap_map != map) { + vm_map_unlock(*pmap_map); + } + *pmap_map = map; + return KERN_PROTECTION_FAILURE; + } + + /* + * If this page is not pageable, we have to get + * it for all possible accesses. + */ + + if (*wired = (entry->wired_count != 0)) + prot = fault_type = entry->protection; + + /* + * If the entry was copy-on-write, we either ... + */ + + if (entry->needs_copy) { + /* + * If we want to write the page, we may as well + * handle that now since we've got the map locked. + * + * If we don't need to write the page, we just + * demote the permissions allowed. + */ + + if (fault_type & VM_PROT_WRITE || *wired) { + /* + * Make a new object, and place it in the + * object chain. Note that no new references + * have appeared -- one just moved from the + * map to the new object. + */ + + if (vm_map_lock_read_to_write(map)) { + vm_map_lock_read(map); + goto RetryLookup; + } + vm_object_shadow(&entry->object.vm_object, + &entry->offset, + (vm_size_t) (entry->vme_end - + entry->vme_start)); + + entry->object.vm_object->shadowed = TRUE; + entry->needs_copy = FALSE; + vm_map_lock_write_to_read(map); + } + else { + /* + * We're attempting to read a copy-on-write + * page -- don't allow writes. + */ + + prot &= (~VM_PROT_WRITE); + } + } + + /* + * Create an object if necessary. + */ + if (entry->object.vm_object == VM_OBJECT_NULL) { + + if (vm_map_lock_read_to_write(map)) { + vm_map_lock_read(map); + goto RetryLookup; + } + + entry->object.vm_object = vm_object_allocate( + (vm_size_t)(entry->vme_end - entry->vme_start)); + entry->offset = 0; + vm_map_lock_write_to_read(map); + } + + /* + * Return the object/offset from this entry. If the entry + * was copy-on-write or empty, it has been fixed up. Also + * return the protection. + */ + + *offset = (vaddr - entry->vme_start) + entry->offset; + *object = entry->object.vm_object; + *out_prot = prot; + *behavior = entry->behavior; + *lo_offset = entry->offset; + *hi_offset = (entry->vme_end - entry->vme_start) + entry->offset; + + /* + * Lock the object to prevent it from disappearing + */ + + vm_object_lock(*object); + + /* + * Save the version number + */ + + out_version->main_timestamp = map->timestamp; + + return KERN_SUCCESS; +} + + +/* + * vm_map_verify: + * + * Verifies that the map in question has not changed + * since the given version. If successful, the map + * will not change until vm_map_verify_done() is called. + */ +boolean_t +vm_map_verify( + register vm_map_t map, + register vm_map_version_t *version) /* REF */ +{ + boolean_t result; + + vm_map_lock_read(map); + result = (map->timestamp == version->main_timestamp); + + if (!result) + vm_map_unlock_read(map); + + return(result); +} + +/* + * vm_map_verify_done: + * + * Releases locks acquired by a vm_map_verify. + * + * This is now a macro in vm/vm_map.h. It does a + * vm_map_unlock_read on the map. + */ + + +/* + * vm_region: + * + * User call to obtain information about a region in + * a task's address map. Currently, only one flavor is + * supported. + * + * XXX The reserved and behavior fields cannot be filled + * in until the vm merge from the IK is completed, and + * vm_reserve is implemented. + * + * XXX Dependency: syscall_vm_region() also supports only one flavor. + */ + +kern_return_t +vm_region( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + ipc_port_t *object_name) /* OUT */ +{ + vm_map_entry_t tmp_entry; + register + vm_map_entry_t entry; + register + vm_offset_t start; + vm_region_basic_info_t basic; + vm_region_extended_info_t extended; + vm_region_top_info_t top; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + switch (flavor) { + + case VM_REGION_BASIC_INFO: + { + if (*count < VM_REGION_BASIC_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + basic = (vm_region_basic_info_t) info; + *count = VM_REGION_BASIC_INFO_COUNT; + + vm_map_lock_read(map); + + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + } + + start = entry->vme_start; + + basic->offset = entry->offset; + basic->protection = entry->protection; + basic->inheritance = entry->inheritance; + basic->max_protection = entry->max_protection; + basic->behavior = entry->behavior; + basic->user_wired_count = entry->user_wired_count; + basic->reserved = entry->is_sub_map; + *address = start; + *size = (entry->vme_end - start); + + if (object_name) *object_name = IP_NULL; + if (entry->is_sub_map) { + basic->shared = FALSE; + } else { + basic->shared = entry->is_shared; + } + + vm_map_unlock_read(map); + return(KERN_SUCCESS); + } + case VM_REGION_EXTENDED_INFO: + { + + if (*count < VM_REGION_EXTENDED_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + extended = (vm_region_extended_info_t) info; + *count = VM_REGION_EXTENDED_INFO_COUNT; + + vm_map_lock_read(map); + + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + } + start = entry->vme_start; + + extended->protection = entry->protection; + extended->user_tag = entry->alias; + extended->pages_resident = 0; + extended->pages_swapped_out = 0; + extended->pages_shared_now_private = 0; + extended->pages_referenced = 0; + extended->external_pager = 0; + extended->shadow_depth = 0; + + vm_region_walk(entry, extended, entry->offset, entry->vme_end - start, map, start); + + if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) + extended->share_mode = SM_PRIVATE; + + if (object_name) + *object_name = IP_NULL; + *address = start; + *size = (entry->vme_end - start); + + vm_map_unlock_read(map); + return(KERN_SUCCESS); + } + case VM_REGION_TOP_INFO: + { + + if (*count < VM_REGION_TOP_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + top = (vm_region_top_info_t) info; + *count = VM_REGION_TOP_INFO_COUNT; + + vm_map_lock_read(map); + + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + + } + start = entry->vme_start; + + top->private_pages_resident = 0; + top->shared_pages_resident = 0; + + vm_region_top_walk(entry, top); + + if (object_name) + *object_name = IP_NULL; + *address = start; + *size = (entry->vme_end - start); + + vm_map_unlock_read(map); + return(KERN_SUCCESS); + } + default: + return(KERN_INVALID_ARGUMENT); + } +} + +/* + * vm_region_recurse: A form of vm_region which follows the + * submaps in a target map + * + */ + +kern_return_t +vm_region_recurse( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + natural_t *nesting_depth, /* IN/OUT */ + vm_region_recurse_info_t info, /* IN/OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + vm_map_entry_t tmp_entry; + register + vm_map_entry_t entry; + register + vm_offset_t start; + + unsigned int recurse_count; + vm_map_t submap; + vm_map_t base_map; + vm_map_entry_t base_entry; + vm_offset_t base_next; + vm_offset_t base_addr; + vm_offset_t baddr_start_delta; + vm_region_submap_info_t submap_info; + vm_region_extended_info_data_t extended; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + submap_info = (vm_region_submap_info_t) info; + *count = VM_REGION_SUBMAP_INFO_COUNT; + + if (*count < VM_REGION_SUBMAP_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + start = *address; + base_map = map; + recurse_count = *nesting_depth; + +LOOKUP_NEXT_BASE_ENTRY: + vm_map_lock_read(map); + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + } + *size = entry->vme_end - entry->vme_start; + start = entry->vme_start; + base_addr = start; + baddr_start_delta = *address - start; + base_next = entry->vme_end; + base_entry = entry; + + while(entry->is_sub_map && recurse_count) { + recurse_count--; + vm_map_lock_read(entry->object.sub_map); + + + if(entry == base_entry) { + start = entry->offset; + start += *address - entry->vme_start; + } + + submap = entry->object.sub_map; + vm_map_unlock_read(map); + map = submap; + + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) + == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + map = base_map; + start = base_next; + recurse_count = 0; + *nesting_depth = 0; + goto LOOKUP_NEXT_BASE_ENTRY; + } + } else { + entry = tmp_entry; + + } + if(start <= entry->vme_start) { + vm_offset_t old_start = start; + if(baddr_start_delta) { + base_addr += (baddr_start_delta); + *size -= baddr_start_delta; + baddr_start_delta = 0; + } + if(base_next <= + (base_addr += (entry->vme_start - start))) { + vm_map_unlock_read(map); + map = base_map; + start = base_next; + recurse_count = 0; + *nesting_depth = 0; + goto LOOKUP_NEXT_BASE_ENTRY; + } + *size -= entry->vme_start - start; + if (*size > (entry->vme_end - entry->vme_start)) { + *size = entry->vme_end - entry->vme_start; + } + start = 0; + } else { + if(baddr_start_delta) { + if((start - entry->vme_start) + < baddr_start_delta) { + base_addr += start - entry->vme_start; + *size -= start - entry->vme_start; + } else { + base_addr += baddr_start_delta; + *size += baddr_start_delta; + } + baddr_start_delta = 0; + } + base_addr += entry->vme_start; + if(base_addr >= base_next) { + vm_map_unlock_read(map); + map = base_map; + start = base_next; + recurse_count = 0; + *nesting_depth = 0; + goto LOOKUP_NEXT_BASE_ENTRY; + } + if (*size > (entry->vme_end - start)) + *size = entry->vme_end - start; + + start = entry->vme_start - start; + } + + start += entry->offset; + + } + *nesting_depth -= recurse_count; + if(entry != base_entry) { + start = entry->vme_start + (start - entry->offset); + } + + + submap_info->user_tag = entry->alias; + submap_info->offset = entry->offset; + submap_info->protection = entry->protection; + submap_info->inheritance = entry->inheritance; + submap_info->max_protection = entry->max_protection; + submap_info->behavior = entry->behavior; + submap_info->user_wired_count = entry->user_wired_count; + submap_info->is_submap = entry->is_sub_map; + submap_info->object_id = (vm_offset_t)entry->object.vm_object; + *address = base_addr; + + + extended.pages_resident = 0; + extended.pages_swapped_out = 0; + extended.pages_shared_now_private = 0; + extended.pages_referenced = 0; + extended.external_pager = 0; + extended.shadow_depth = 0; + + if(!entry->is_sub_map) { + vm_region_walk(entry, &extended, entry->offset, + entry->vme_end - start, map, start); + submap_info->share_mode = extended.share_mode; + if (extended.external_pager && extended.ref_count == 2 + && extended.share_mode == SM_SHARED) + submap_info->share_mode = SM_PRIVATE; + submap_info->ref_count = extended.ref_count; + } else { + if(entry->use_pmap) + submap_info->share_mode = SM_TRUESHARED; + else + submap_info->share_mode = SM_PRIVATE; + submap_info->ref_count = entry->object.sub_map->ref_count; + } + + submap_info->pages_resident = extended.pages_resident; + submap_info->pages_swapped_out = extended.pages_swapped_out; + submap_info->pages_shared_now_private = + extended.pages_shared_now_private; + submap_info->pages_referenced = extended.pages_referenced; + submap_info->external_pager = extended.external_pager; + submap_info->shadow_depth = extended.shadow_depth; + + vm_map_unlock_read(map); + return(KERN_SUCCESS); +} + +/* + * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY + * Goes away after regular vm_region_recurse function migrates to + * 64 bits + * vm_region_recurse: A form of vm_region which follows the + * submaps in a target map + * + */ + +kern_return_t +vm_region_recurse_64( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + natural_t *nesting_depth, /* IN/OUT */ + vm_region_recurse_info_t info, /* IN/OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ +{ + vm_map_entry_t tmp_entry; + register + vm_map_entry_t entry; + register + vm_offset_t start; + + unsigned int recurse_count; + vm_map_t submap; + vm_map_t base_map; + vm_map_entry_t base_entry; + vm_offset_t base_next; + vm_offset_t base_addr; + vm_offset_t baddr_start_delta; + vm_region_submap_info_64_t submap_info; + vm_region_extended_info_data_t extended; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + submap_info = (vm_region_submap_info_64_t) info; + *count = VM_REGION_SUBMAP_INFO_COUNT; + + if (*count < VM_REGION_SUBMAP_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + start = *address; + base_map = map; + recurse_count = *nesting_depth; + +LOOKUP_NEXT_BASE_ENTRY: + vm_map_lock_read(map); + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + } + *size = entry->vme_end - entry->vme_start; + start = entry->vme_start; + base_addr = start; + baddr_start_delta = *address - start; + base_next = entry->vme_end; + base_entry = entry; + + while(entry->is_sub_map && recurse_count) { + recurse_count--; + vm_map_lock_read(entry->object.sub_map); + + + if(entry == base_entry) { + start = entry->offset; + start += *address - entry->vme_start; + } + + submap = entry->object.sub_map; + vm_map_unlock_read(map); + map = submap; + + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) + == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + map = base_map; + start = base_next; + recurse_count = 0; + *nesting_depth = 0; + goto LOOKUP_NEXT_BASE_ENTRY; + } + } else { + entry = tmp_entry; + + } + if(start <= entry->vme_start) { + vm_offset_t old_start = start; + if(baddr_start_delta) { + base_addr += (baddr_start_delta); + *size -= baddr_start_delta; + baddr_start_delta = 0; + } + if(base_next <= + (base_addr += (entry->vme_start - start))) { + vm_map_unlock_read(map); + map = base_map; + start = base_next; + recurse_count = 0; + *nesting_depth = 0; + goto LOOKUP_NEXT_BASE_ENTRY; + } + *size -= entry->vme_start - start; + if (*size > (entry->vme_end - entry->vme_start)) { + *size = entry->vme_end - entry->vme_start; + } + start = 0; + } else { + if(baddr_start_delta) { + if((start - entry->vme_start) + < baddr_start_delta) { + base_addr += start - entry->vme_start; + *size -= start - entry->vme_start; + } else { + base_addr += baddr_start_delta; + *size += baddr_start_delta; + } + baddr_start_delta = 0; + } + base_addr += entry->vme_start; + if(base_addr >= base_next) { + vm_map_unlock_read(map); + map = base_map; + start = base_next; + recurse_count = 0; + *nesting_depth = 0; + goto LOOKUP_NEXT_BASE_ENTRY; + } + if (*size > (entry->vme_end - start)) + *size = entry->vme_end - start; + + start = entry->vme_start - start; + } + + start += entry->offset; + + } + *nesting_depth -= recurse_count; + if(entry != base_entry) { + start = entry->vme_start + (start - entry->offset); + } + + + submap_info->user_tag = entry->alias; + submap_info->offset = entry->offset; + submap_info->protection = entry->protection; + submap_info->inheritance = entry->inheritance; + submap_info->max_protection = entry->max_protection; + submap_info->behavior = entry->behavior; + submap_info->user_wired_count = entry->user_wired_count; + submap_info->is_submap = entry->is_sub_map; + submap_info->object_id = (vm_offset_t)entry->object.vm_object; + *address = base_addr; + + + extended.pages_resident = 0; + extended.pages_swapped_out = 0; + extended.pages_shared_now_private = 0; + extended.pages_referenced = 0; + extended.external_pager = 0; + extended.shadow_depth = 0; + + if(!entry->is_sub_map) { + vm_region_walk(entry, &extended, entry->offset, + entry->vme_end - start, map, start); + submap_info->share_mode = extended.share_mode; + if (extended.external_pager && extended.ref_count == 2 + && extended.share_mode == SM_SHARED) + submap_info->share_mode = SM_PRIVATE; + submap_info->ref_count = extended.ref_count; + } else { + if(entry->use_pmap) + submap_info->share_mode = SM_TRUESHARED; + else + submap_info->share_mode = SM_PRIVATE; + submap_info->ref_count = entry->object.sub_map->ref_count; + } + + submap_info->pages_resident = extended.pages_resident; + submap_info->pages_swapped_out = extended.pages_swapped_out; + submap_info->pages_shared_now_private = + extended.pages_shared_now_private; + submap_info->pages_referenced = extended.pages_referenced; + submap_info->external_pager = extended.external_pager; + submap_info->shadow_depth = extended.shadow_depth; + + vm_map_unlock_read(map); + return(KERN_SUCCESS); +} + + +/* + * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY + * Goes away after regular vm_region function migrates to + * 64 bits + */ + + +kern_return_t +vm_region_64( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + ipc_port_t *object_name) /* OUT */ +{ + vm_map_entry_t tmp_entry; + register + vm_map_entry_t entry; + register + vm_offset_t start; + vm_region_basic_info_64_t basic; + vm_region_extended_info_t extended; + vm_region_top_info_t top; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + switch (flavor) { + + case VM_REGION_BASIC_INFO: + { + if (*count < VM_REGION_BASIC_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + basic = (vm_region_basic_info_64_t) info; + *count = VM_REGION_BASIC_INFO_COUNT; + + vm_map_lock_read(map); + + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + } + + start = entry->vme_start; + + basic->offset = entry->offset; + basic->protection = entry->protection; + basic->inheritance = entry->inheritance; + basic->max_protection = entry->max_protection; + basic->behavior = entry->behavior; + basic->user_wired_count = entry->user_wired_count; + basic->reserved = entry->is_sub_map; + *address = start; + *size = (entry->vme_end - start); + + if (object_name) *object_name = IP_NULL; + if (entry->is_sub_map) { + basic->shared = FALSE; + } else { + basic->shared = entry->is_shared; + } + + vm_map_unlock_read(map); + return(KERN_SUCCESS); + } + case VM_REGION_EXTENDED_INFO: + { + + if (*count < VM_REGION_EXTENDED_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + extended = (vm_region_extended_info_t) info; + *count = VM_REGION_EXTENDED_INFO_COUNT; + + vm_map_lock_read(map); + + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + } + start = entry->vme_start; + + extended->protection = entry->protection; + extended->user_tag = entry->alias; + extended->pages_resident = 0; + extended->pages_swapped_out = 0; + extended->pages_shared_now_private = 0; + extended->pages_referenced = 0; + extended->external_pager = 0; + extended->shadow_depth = 0; + + vm_region_walk(entry, extended, entry->offset, entry->vme_end - start, map, start); + + if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) + extended->share_mode = SM_PRIVATE; + + if (object_name) + *object_name = IP_NULL; + *address = start; + *size = (entry->vme_end - start); + + vm_map_unlock_read(map); + return(KERN_SUCCESS); + } + case VM_REGION_TOP_INFO: + { + + if (*count < VM_REGION_TOP_INFO_COUNT) + return(KERN_INVALID_ARGUMENT); + + top = (vm_region_top_info_t) info; + *count = VM_REGION_TOP_INFO_COUNT; + + vm_map_lock_read(map); + + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return(KERN_INVALID_ADDRESS); + } + } else { + entry = tmp_entry; + + } + start = entry->vme_start; + + top->private_pages_resident = 0; + top->shared_pages_resident = 0; + + vm_region_top_walk(entry, top); + + if (object_name) + *object_name = IP_NULL; + *address = start; + *size = (entry->vme_end - start); + + vm_map_unlock_read(map); + return(KERN_SUCCESS); + } + default: + return(KERN_INVALID_ARGUMENT); + } +} + +void +vm_region_top_walk( + vm_map_entry_t entry, + vm_region_top_info_t top) +{ + register struct vm_object *obj, *tmp_obj; + + if (entry->object.vm_object == 0) { + top->share_mode = SM_EMPTY; + top->ref_count = 0; + top->obj_id = 0; + return; + } + if (entry->is_sub_map) + vm_region_top_walk((vm_map_entry_t)entry->object.sub_map, top); + else { + obj = entry->object.vm_object; + + vm_object_lock(obj); + + if (obj->shadow) { + if (obj->ref_count == 1) + top->private_pages_resident = obj->resident_page_count; + else + top->shared_pages_resident = obj->resident_page_count; + top->ref_count = obj->ref_count; + top->share_mode = SM_COW; + + while (tmp_obj = obj->shadow) { + vm_object_lock(tmp_obj); + vm_object_unlock(obj); + obj = tmp_obj; + + top->shared_pages_resident += obj->resident_page_count; + top->ref_count += obj->ref_count - 1; + } + } else { + if (entry->needs_copy) { + top->share_mode = SM_COW; + top->shared_pages_resident = obj->resident_page_count; + } else { + if (obj->ref_count == 1 || + (obj->ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) { + top->share_mode = SM_PRIVATE; + top->private_pages_resident = obj->resident_page_count; + } else { + top->share_mode = SM_SHARED; + top->shared_pages_resident = obj->resident_page_count; + } + } + top->ref_count = obj->ref_count; + } + top->obj_id = (int)obj; + + vm_object_unlock(obj); + } +} + +void +vm_region_walk( + vm_map_entry_t entry, + vm_region_extended_info_t extended, + vm_object_offset_t offset, + vm_offset_t range, + vm_map_t map, + vm_offset_t va) +{ + register struct vm_object *obj, *tmp_obj; + register vm_offset_t last_offset; + register int i; + void vm_region_look_for_page(); + + if (entry->object.vm_object == 0) { + extended->share_mode = SM_EMPTY; + extended->ref_count = 0; + return; + } + if (entry->is_sub_map) + vm_region_walk((vm_map_entry_t)entry->object.sub_map, extended, offset + entry->offset, + range, map, va); + else { + obj = entry->object.vm_object; + + vm_object_lock(obj); + + for (last_offset = offset + range; offset < last_offset; offset += PAGE_SIZE_64, va += PAGE_SIZE) + vm_region_look_for_page(obj, extended, offset, obj->ref_count, 0, map, va); + + if (extended->shadow_depth || entry->needs_copy) + extended->share_mode = SM_COW; + else { + if (obj->ref_count == 1) + extended->share_mode = SM_PRIVATE; + else { + if (obj->true_share) + extended->share_mode = SM_TRUESHARED; + else + extended->share_mode = SM_SHARED; + } + } + extended->ref_count = obj->ref_count - extended->shadow_depth; + + for (i = 0; i < extended->shadow_depth; i++) { + if ((tmp_obj = obj->shadow) == 0) + break; + vm_object_lock(tmp_obj); + vm_object_unlock(obj); + extended->ref_count += tmp_obj->ref_count; + obj = tmp_obj; + } + vm_object_unlock(obj); + + if (extended->share_mode == SM_SHARED) { + register vm_map_entry_t cur; + register vm_map_entry_t last; + int my_refs; + + obj = entry->object.vm_object; + last = vm_map_to_entry(map); + my_refs = 0; + + for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) + my_refs += vm_region_count_obj_refs(cur, obj); + + if (my_refs == obj->ref_count) + extended->share_mode = SM_PRIVATE_ALIASED; + else if (my_refs > 1) + extended->share_mode = SM_SHARED_ALIASED; + } + } +} + + + +void +vm_region_look_for_page( + vm_object_t object, + vm_region_extended_info_t extended, + vm_object_offset_t offset, + int max_refcnt, + int depth, + vm_map_t map, + vm_offset_t va) +{ + register vm_page_t p; + register vm_object_t shadow; + + shadow = object->shadow; + + if ( !(object->pager_trusted) && !(object->internal)) + extended->external_pager = 1; + + if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { + if (shadow && (max_refcnt == 1)) + extended->pages_shared_now_private++; + + if (pmap_extract(vm_map_pmap(map), va)) + extended->pages_referenced++; + extended->pages_resident++; + + return; + } + if (object->existence_map) { + if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_EXISTS) { + if (shadow && (max_refcnt == 1)) + extended->pages_shared_now_private++; + extended->pages_swapped_out++; + + return; + } + } + if (shadow) { + vm_object_lock(shadow); + + if (++depth > extended->shadow_depth) + extended->shadow_depth = depth; + + if (shadow->ref_count > max_refcnt) + max_refcnt = shadow->ref_count; + + vm_region_look_for_page(shadow, extended, offset + object->shadow_offset, + max_refcnt, depth, map, va); + vm_object_unlock(shadow); + + return; + } +} + + +vm_region_count_obj_refs( + vm_map_entry_t entry, + vm_object_t object) +{ + register int ref_count; + register vm_object_t chk_obj; + register vm_object_t tmp_obj; + + if (entry->object.vm_object == 0) + return(0); + + if (entry->is_sub_map) + ref_count = vm_region_count_obj_refs((vm_map_entry_t)entry->object.sub_map, object); + else { + ref_count = 0; + + chk_obj = entry->object.vm_object; + vm_object_lock(chk_obj); + + while (chk_obj) { + if (chk_obj == object) + ref_count++; + if (tmp_obj = chk_obj->shadow) + vm_object_lock(tmp_obj); + vm_object_unlock(chk_obj); + + chk_obj = tmp_obj; + } + } + return(ref_count); +} + + +/* + * Routine: vm_map_simplify + * + * Description: + * Attempt to simplify the map representation in + * the vicinity of the given starting address. + * Note: + * This routine is intended primarily to keep the + * kernel maps more compact -- they generally don't + * benefit from the "expand a map entry" technology + * at allocation time because the adjacent entry + * is often wired down. + */ +void +vm_map_simplify( + vm_map_t map, + vm_offset_t start) +{ + vm_map_entry_t this_entry; + vm_map_entry_t prev_entry; + vm_map_entry_t next_entry; + + vm_map_lock(map); + if ( + (vm_map_lookup_entry(map, start, &this_entry)) && + ((prev_entry = this_entry->vme_prev) != vm_map_to_entry(map)) && + + (prev_entry->vme_end == this_entry->vme_start) && + + (prev_entry->is_shared == FALSE) && + (prev_entry->is_sub_map == FALSE) && + + (this_entry->is_shared == FALSE) && + (this_entry->is_sub_map == FALSE) && + + (prev_entry->inheritance == this_entry->inheritance) && + (prev_entry->protection == this_entry->protection) && + (prev_entry->max_protection == this_entry->max_protection) && + (prev_entry->behavior == this_entry->behavior) && + (prev_entry->wired_count == this_entry->wired_count) && + (prev_entry->user_wired_count == this_entry->user_wired_count)&& + (prev_entry->in_transition == FALSE) && + (this_entry->in_transition == FALSE) && + + (prev_entry->needs_copy == this_entry->needs_copy) && + + (prev_entry->object.vm_object == this_entry->object.vm_object)&& + ((prev_entry->offset + + (prev_entry->vme_end - prev_entry->vme_start)) + == this_entry->offset) + ) { + SAVE_HINT(map, prev_entry); + vm_map_entry_unlink(map, this_entry); + prev_entry->vme_end = this_entry->vme_end; + UPDATE_FIRST_FREE(map, map->first_free); + vm_object_deallocate(this_entry->object.vm_object); + vm_map_entry_dispose(map, this_entry); + counter(c_vm_map_simplified_lower++); + } + if ( + (vm_map_lookup_entry(map, start, &this_entry)) && + ((next_entry = this_entry->vme_next) != vm_map_to_entry(map)) && + + (next_entry->vme_start == this_entry->vme_end) && + + (next_entry->is_shared == FALSE) && + (next_entry->is_sub_map == FALSE) && + + (next_entry->is_shared == FALSE) && + (next_entry->is_sub_map == FALSE) && + + (next_entry->inheritance == this_entry->inheritance) && + (next_entry->protection == this_entry->protection) && + (next_entry->max_protection == this_entry->max_protection) && + (next_entry->behavior == this_entry->behavior) && + (next_entry->wired_count == this_entry->wired_count) && + (next_entry->user_wired_count == this_entry->user_wired_count)&& + (this_entry->in_transition == FALSE) && + (next_entry->in_transition == FALSE) && + + (next_entry->needs_copy == this_entry->needs_copy) && + + (next_entry->object.vm_object == this_entry->object.vm_object)&& + ((this_entry->offset + + (this_entry->vme_end - this_entry->vme_start)) + == next_entry->offset) + ) { + vm_map_entry_unlink(map, next_entry); + this_entry->vme_end = next_entry->vme_end; + UPDATE_FIRST_FREE(map, map->first_free); + vm_object_deallocate(next_entry->object.vm_object); + vm_map_entry_dispose(map, next_entry); + counter(c_vm_map_simplified_upper++); + } + counter(c_vm_map_simplify_called++); + vm_map_unlock(map); +} + + +/* + * Routine: vm_map_machine_attribute + * Purpose: + * Provide machine-specific attributes to mappings, + * such as cachability etc. for machines that provide + * them. NUMA architectures and machines with big/strange + * caches will use this. + * Note: + * Responsibilities for locking and checking are handled here, + * everything else in the pmap module. If any non-volatile + * information must be kept, the pmap module should handle + * it itself. [This assumes that attributes do not + * need to be inherited, which seems ok to me] + */ +kern_return_t +vm_map_machine_attribute( + vm_map_t map, + vm_offset_t address, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ +{ + kern_return_t ret; + + if (address < vm_map_min(map) || + (address + size) > vm_map_max(map)) + return KERN_INVALID_ADDRESS; + + vm_map_lock(map); + + ret = pmap_attribute(map->pmap, address, size, attribute, value); + + vm_map_unlock(map); + + return ret; +} + +/* + * vm_map_behavior_set: + * + * Sets the paging reference behavior of the specified address + * range in the target map. Paging reference behavior affects + * how pagein operations resulting from faults on the map will be + * clustered. + */ +kern_return_t +vm_map_behavior_set( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_behavior_t new_behavior) +{ + register vm_map_entry_t entry; + vm_map_entry_t temp_entry; + + XPR(XPR_VM_MAP, + "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d", + (integer_t)map, start, end, new_behavior, 0); + + switch (new_behavior) { + case VM_BEHAVIOR_DEFAULT: + case VM_BEHAVIOR_RANDOM: + case VM_BEHAVIOR_SEQUENTIAL: + case VM_BEHAVIOR_RSEQNTL: + break; + default: + return(KERN_INVALID_ARGUMENT); + } + + vm_map_lock(map); + + /* + * The entire address range must be valid for the map. + * Note that vm_map_range_check() does a + * vm_map_lookup_entry() internally and returns the + * entry containing the start of the address range if + * the entire range is valid. + */ + if (vm_map_range_check(map, start, end, &temp_entry)) { + entry = temp_entry; + vm_map_clip_start(map, entry, start); + } + else { + vm_map_unlock(map); + return(KERN_INVALID_ADDRESS); + } + + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + vm_map_clip_end(map, entry, end); + + entry->behavior = new_behavior; + + entry = entry->vme_next; + } + + vm_map_unlock(map); + return(KERN_SUCCESS); +} + + +int +vm_map_copy_cont_is_valid( + vm_map_copy_t copy) +{ + vm_map_copy_cont_t cont; + + assert(copy->type == VM_MAP_COPY_PAGE_LIST); + cont = copy->cpy_cont; + if ( + cont != vm_map_copy_discard_cont && + cont != vm_map_copyin_page_list_cont ) { + printf("vm_map_copy_cont_is_valid: bogus cont 0x%x\n", cont); + assert((integer_t) cont == 0xdeadbeef); + } + return 1; +} + +#include +#if MACH_KDB +#include +#include + +#define printf db_printf + +/* + * Forward declarations for internal functions. + */ +extern void vm_map_links_print( + struct vm_map_links *links); + +extern void vm_map_header_print( + struct vm_map_header *header); + +extern void vm_map_entry_print( + vm_map_entry_t entry); + +extern void vm_follow_entry( + vm_map_entry_t entry); + +extern void vm_follow_map( + vm_map_t map); + +/* + * vm_map_links_print: [ debug ] + */ +void +vm_map_links_print( + struct vm_map_links *links) +{ + iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n", + links->prev, + links->next, + links->start, + links->end); +} + +/* + * vm_map_header_print: [ debug ] + */ +void +vm_map_header_print( + struct vm_map_header *header) +{ + vm_map_links_print(&header->links); + iprintf("nentries=0x%x, %sentries_pageable\n", + header->nentries, + (header->entries_pageable ? "" : "!")); +} + +/* + * vm_follow_entry: [ debug ] + */ +void +vm_follow_entry( + vm_map_entry_t entry) +{ + extern int db_indent; + int shadows; + + iprintf("map entry 0x%x:\n", entry); + + db_indent += 2; + + shadows = vm_follow_object(entry->object.vm_object); + iprintf("Total objects : %d\n",shadows); + + db_indent -= 2; +} + +/* + * vm_map_entry_print: [ debug ] + */ +void +vm_map_entry_print( + register vm_map_entry_t entry) +{ + extern int db_indent; + static char *inheritance_name[4] = { "share", "copy", "none", "?"}; + static char *behavior_name[4] = { "dflt", "rand", "seqtl", "rseqntl" }; + + iprintf("map entry 0x%x:\n", entry); + + db_indent += 2; + + vm_map_links_print(&entry->links); + + iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n", + entry->vme_start, + entry->vme_end, + entry->protection, + entry->max_protection, + inheritance_name[(entry->inheritance & 0x3)]); + + iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n", + behavior_name[(entry->behavior & 0x3)], + entry->wired_count, + entry->user_wired_count); + iprintf("%sin_transition, %sneeds_wakeup\n", + (entry->in_transition ? "" : "!"), + (entry->needs_wakeup ? "" : "!")); + + if (entry->is_sub_map) { + iprintf("submap=0x%x, offset=0x%x\n", + entry->object.sub_map, + entry->offset); + } else { + iprintf("object=0x%x, offset=0x%x, ", + entry->object.vm_object, + entry->offset); + printf("%sis_shared, %sneeds_copy\n", + (entry->is_shared ? "" : "!"), + (entry->needs_copy ? "" : "!")); + } + + db_indent -= 2; +} + +/* + * vm_follow_map: [ debug ] + */ +void +vm_follow_map( + vm_map_t map) +{ + register vm_map_entry_t entry; + extern int db_indent; + + iprintf("task map 0x%x:\n", map); + + db_indent += 2; + + for (entry = vm_map_first_entry(map); + entry && entry != vm_map_to_entry(map); + entry = entry->vme_next) { + vm_follow_entry(entry); + } + + db_indent -= 2; +} + +/* + * vm_map_print: [ debug ] + */ +void +vm_map_print( + register vm_map_t map) +{ + register vm_map_entry_t entry; + extern int db_indent; + char *swstate; + + iprintf("task map 0x%x:\n", map); + + db_indent += 2; + + vm_map_header_print(&map->hdr); + + iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n", + map->pmap, + map->size, + map->ref_count, + map->hint, + map->first_free); + + iprintf("%swait_for_space, %swiring_required, timestamp=%d\n", + (map->wait_for_space ? "" : "!"), + (map->wiring_required ? "" : "!"), + map->timestamp); + +#if TASK_SWAPPER + switch (map->sw_state) { + case MAP_SW_IN: + swstate = "SW_IN"; + break; + case MAP_SW_OUT: + swstate = "SW_OUT"; + break; + default: + swstate = "????"; + break; + } + iprintf("res=%d, sw_state=%s\n", map->res_count, swstate); +#endif /* TASK_SWAPPER */ + + for (entry = vm_map_first_entry(map); + entry && entry != vm_map_to_entry(map); + entry = entry->vme_next) { + vm_map_entry_print(entry); + } + + db_indent -= 2; +} + +/* + * Routine: vm_map_copy_print + * Purpose: + * Pretty-print a copy object for ddb. + */ + +void +vm_map_copy_print( + vm_map_copy_t copy) +{ + extern int db_indent; + int i, npages; + vm_map_entry_t entry; + + printf("copy object 0x%x\n", copy); + + db_indent += 2; + + iprintf("type=%d", copy->type); + switch (copy->type) { + case VM_MAP_COPY_ENTRY_LIST: + printf("[entry_list]"); + break; + + case VM_MAP_COPY_OBJECT: + printf("[object]"); + break; + + case VM_MAP_COPY_PAGE_LIST: + printf("[page_list]"); + break; + + case VM_MAP_COPY_KERNEL_BUFFER: + printf("[kernel_buffer]"); + break; + + default: + printf("[bad type]"); + break; + } + printf(", offset=0x%x", copy->offset); + printf(", size=0x%x\n", copy->size); + + switch (copy->type) { + case VM_MAP_COPY_ENTRY_LIST: + vm_map_header_print(©->cpy_hdr); + for (entry = vm_map_copy_first_entry(copy); + entry && entry != vm_map_copy_to_entry(copy); + entry = entry->vme_next) { + vm_map_entry_print(entry); + } + break; + + case VM_MAP_COPY_OBJECT: + iprintf("object=0x%x\n", copy->cpy_object); + break; + + case VM_MAP_COPY_KERNEL_BUFFER: + iprintf("kernel buffer=0x%x", copy->cpy_kdata); + printf(", kalloc_size=0x%x\n", copy->cpy_kalloc_size); + break; + + case VM_MAP_COPY_PAGE_LIST: + iprintf("npages=%d", copy->cpy_npages); + printf(", cont=%x", copy->cpy_cont); + printf(", cont_args=%x\n", copy->cpy_cont_args); + if (copy->cpy_npages < 0) { + npages = 0; + } else if (copy->cpy_npages > VM_MAP_COPY_PAGE_LIST_MAX) { + npages = VM_MAP_COPY_PAGE_LIST_MAX; + } else { + npages = copy->cpy_npages; + } + iprintf("copy->cpy_page_list[0..%d] = {", npages); + for (i = 0; i < npages - 1; i++) { + printf("0x%x, ", copy->cpy_page_list[i]); + } + if (npages > 0) { + printf("0x%x", copy->cpy_page_list[npages - 1]); + } + printf("}\n"); + break; + } + + db_indent -=2; +} + +/* + * db_vm_map_total_size(map) [ debug ] + * + * return the total virtual size (in bytes) of the map + */ +vm_size_t +db_vm_map_total_size( + vm_map_t map) +{ + vm_map_entry_t entry; + vm_size_t total; + + total = 0; + for (entry = vm_map_first_entry(map); + entry != vm_map_to_entry(map); + entry = entry->vme_next) { + total += entry->vme_end - entry->vme_start; + } + + return total; +} + +#endif /* MACH_KDB */ + +/* + * Routine: vm_map_entry_insert + * + * Descritpion: This routine inserts a new vm_entry in a locked map. + */ +vm_map_entry_t +vm_map_entry_insert( + vm_map_t map, + vm_map_entry_t insp_entry, + vm_offset_t start, + vm_offset_t end, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + boolean_t is_shared, + boolean_t in_transition, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_behavior_t behavior, + vm_inherit_t inheritance, + unsigned wired_count) +{ + vm_map_entry_t new_entry; + + assert(insp_entry != (vm_map_entry_t)0); + + new_entry = vm_map_entry_create(map); + + new_entry->vme_start = start; + new_entry->vme_end = end; + assert(page_aligned(new_entry->vme_start)); + assert(page_aligned(new_entry->vme_end)); + + new_entry->object.vm_object = object; + new_entry->offset = offset; + new_entry->is_shared = is_shared; + new_entry->is_sub_map = FALSE; + new_entry->needs_copy = needs_copy; + new_entry->in_transition = in_transition; + new_entry->needs_wakeup = FALSE; + new_entry->inheritance = inheritance; + new_entry->protection = cur_protection; + new_entry->max_protection = max_protection; + new_entry->behavior = behavior; + new_entry->wired_count = wired_count; + new_entry->user_wired_count = 0; + new_entry->use_pmap = FALSE; + + /* + * Insert the new entry into the list. + */ + + vm_map_entry_link(map, insp_entry, new_entry); + map->size += end - start; + + /* + * Update the free space hint and the lookup hint. + */ + + SAVE_HINT(map, new_entry); + return new_entry; +} + +/* + * Routine: vm_remap_extract + * + * Descritpion: This routine returns a vm_entry list from a map. + */ +kern_return_t +vm_remap_extract( + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + boolean_t copy, + struct vm_map_header *map_header, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + /* What, no behavior? */ + vm_inherit_t inheritance, + boolean_t pageable) +{ + kern_return_t result; + vm_size_t mapped_size; + vm_size_t tmp_size; + vm_map_entry_t src_entry; /* result of last map lookup */ + vm_map_entry_t new_entry; + vm_object_offset_t offset; + vm_offset_t map_address; + vm_offset_t src_start; /* start of entry to map */ + vm_offset_t src_end; /* end of region to be mapped */ + vm_object_t object; + vm_map_version_t version; + boolean_t src_needs_copy; + boolean_t new_entry_needs_copy; + + assert(map != VM_MAP_NULL); + assert(size != 0 && size == round_page(size)); + assert(inheritance == VM_INHERIT_NONE || + inheritance == VM_INHERIT_COPY || + inheritance == VM_INHERIT_SHARE); + + /* + * Compute start and end of region. + */ + src_start = trunc_page(addr); + src_end = round_page(src_start + size); + + /* + * Initialize map_header. + */ + map_header->links.next = (struct vm_map_entry *)&map_header->links; + map_header->links.prev = (struct vm_map_entry *)&map_header->links; + map_header->nentries = 0; + map_header->entries_pageable = pageable; + + *cur_protection = VM_PROT_ALL; + *max_protection = VM_PROT_ALL; + + map_address = 0; + mapped_size = 0; + result = KERN_SUCCESS; + + /* + * The specified source virtual space might correspond to + * multiple map entries, need to loop on them. + */ + vm_map_lock(map); + while (mapped_size != size) { + vm_size_t entry_size; + + /* + * Find the beginning of the region. + */ + if (! vm_map_lookup_entry(map, src_start, &src_entry)) { + result = KERN_INVALID_ADDRESS; + break; + } + + if (src_start < src_entry->vme_start || + (mapped_size && src_start != src_entry->vme_start)) { + result = KERN_INVALID_ADDRESS; + break; + } + + if(src_entry->is_sub_map) { + result = KERN_INVALID_ADDRESS; + break; + } + + tmp_size = size - mapped_size; + if (src_end > src_entry->vme_end) + tmp_size -= (src_end - src_entry->vme_end); + + entry_size = (vm_size_t)(src_entry->vme_end - + src_entry->vme_start); + + if(src_entry->is_sub_map) { + vm_map_reference(src_entry->object.sub_map); + } else { + object = src_entry->object.vm_object; + + if (object == VM_OBJECT_NULL) { + object = vm_object_allocate(entry_size); + src_entry->offset = 0; + src_entry->object.vm_object = object; + } else if (object->copy_strategy != + MEMORY_OBJECT_COPY_SYMMETRIC) { + /* + * We are already using an asymmetric + * copy, and therefore we already have + * the right object. + */ + assert(!src_entry->needs_copy); + } else if (src_entry->needs_copy || object->shadowed || + (object->internal && !object->true_share && + !src_entry->is_shared && + object->size > entry_size)) { + + vm_object_shadow(&src_entry->object.vm_object, + &src_entry->offset, + entry_size); + + if (!src_entry->needs_copy && + (src_entry->protection & VM_PROT_WRITE)) { + pmap_protect(vm_map_pmap(map), + src_entry->vme_start, + src_entry->vme_end, + src_entry->protection & + ~VM_PROT_WRITE); + } + + object = src_entry->object.vm_object; + src_entry->needs_copy = FALSE; + } + + + vm_object_lock(object); + object->ref_count++; /* object ref. for new entry */ + VM_OBJ_RES_INCR(object); + if (object->copy_strategy == + MEMORY_OBJECT_COPY_SYMMETRIC) { + object->copy_strategy = + MEMORY_OBJECT_COPY_DELAY; + } + vm_object_unlock(object); + } + + offset = src_entry->offset + (src_start - src_entry->vme_start); + + new_entry = _vm_map_entry_create(map_header); + vm_map_entry_copy(new_entry, src_entry); + new_entry->use_pmap = FALSE; /* clr address space specifics */ + + new_entry->vme_start = map_address; + new_entry->vme_end = map_address + tmp_size; + new_entry->inheritance = inheritance; + new_entry->offset = offset; + + /* + * The new region has to be copied now if required. + */ + RestartCopy: + if (!copy) { + src_entry->is_shared = TRUE; + new_entry->is_shared = TRUE; + if (!(new_entry->is_sub_map)) + new_entry->needs_copy = FALSE; + + } else if (src_entry->is_sub_map) { + /* make this a COW sub_map if not already */ + new_entry->needs_copy = TRUE; + } else if (src_entry->wired_count == 0 && + vm_object_copy_quickly(&new_entry->object.vm_object, + new_entry->offset, + (new_entry->vme_end - + new_entry->vme_start), + &src_needs_copy, + &new_entry_needs_copy)) { + + new_entry->needs_copy = new_entry_needs_copy; + new_entry->is_shared = FALSE; + + /* + * Handle copy_on_write semantics. + */ + if (src_needs_copy && !src_entry->needs_copy) { + vm_object_pmap_protect(object, + offset, + entry_size, + (src_entry->is_shared ? + PMAP_NULL : map->pmap), + src_entry->vme_start, + src_entry->protection & + ~VM_PROT_WRITE); + + src_entry->needs_copy = TRUE; + } + /* + * Throw away the old object reference of the new entry. + */ + vm_object_deallocate(object); + + } else { + new_entry->is_shared = FALSE; + + /* + * The map can be safely unlocked since we + * already hold a reference on the object. + * + * Record the timestamp of the map for later + * verification, and unlock the map. + */ + version.main_timestamp = map->timestamp; + vm_map_unlock(map); + + /* + * Perform the copy. + */ + if (src_entry->wired_count > 0) { + vm_object_lock(object); + result = vm_object_copy_slowly( + object, + offset, + entry_size, + THREAD_UNINT, + &new_entry->object.vm_object); + + new_entry->offset = 0; + new_entry->needs_copy = FALSE; + } else { + result = vm_object_copy_strategically( + object, + offset, + entry_size, + &new_entry->object.vm_object, + &new_entry->offset, + &new_entry_needs_copy); + + new_entry->needs_copy = new_entry_needs_copy; + } + + /* + * Throw away the old object reference of the new entry. + */ + vm_object_deallocate(object); + + if (result != KERN_SUCCESS && + result != KERN_MEMORY_RESTART_COPY) { + _vm_map_entry_dispose(map_header, new_entry); + break; + } + + /* + * Verify that the map has not substantially + * changed while the copy was being made. + */ + + vm_map_lock(map); /* Increments timestamp once! */ + if (version.main_timestamp + 1 != map->timestamp) { + /* + * Simple version comparison failed. + * + * Retry the lookup and verify that the + * same object/offset are still present. + */ + vm_object_deallocate(new_entry-> + object.vm_object); + _vm_map_entry_dispose(map_header, new_entry); + if (result == KERN_MEMORY_RESTART_COPY) + result = KERN_SUCCESS; + continue; + } + + if (result == KERN_MEMORY_RESTART_COPY) { + vm_object_reference(object); + goto RestartCopy; + } + } + + _vm_map_entry_link(map_header, + map_header->links.prev, new_entry); + + *cur_protection &= src_entry->protection; + *max_protection &= src_entry->max_protection; + + map_address += tmp_size; + mapped_size += tmp_size; + src_start += tmp_size; + + } /* end while */ + + vm_map_unlock(map); + if (result != KERN_SUCCESS) { + /* + * Free all allocated elements. + */ + for (src_entry = map_header->links.next; + src_entry != (struct vm_map_entry *)&map_header->links; + src_entry = new_entry) { + new_entry = src_entry->vme_next; + _vm_map_entry_unlink(map_header, src_entry); + vm_object_deallocate(src_entry->object.vm_object); + _vm_map_entry_dispose(map_header, src_entry); + } + } + return result; +} + +/* + * Routine: vm_remap + * + * Map portion of a task's address space. + * Mapped region must not overlap more than + * one vm memory object. Protections and + * inheritance attributes remain the same + * as in the original task and are out parameters. + * Source and Target task can be identical + * Other attributes are identical as for vm_map() + */ +kern_return_t +vm_remap( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + vm_map_t src_map, + vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) +{ + kern_return_t result; + vm_map_entry_t entry; + vm_map_entry_t insp_entry; + vm_map_entry_t new_entry; + struct vm_map_header map_header; + + if (target_map == VM_MAP_NULL) + return KERN_INVALID_ARGUMENT; + + switch (inheritance) { + case VM_INHERIT_NONE: + case VM_INHERIT_COPY: + case VM_INHERIT_SHARE: + if (size != 0 && src_map != VM_MAP_NULL) + break; + /*FALL THRU*/ + default: + return KERN_INVALID_ARGUMENT; + } + + size = round_page(size); + + result = vm_remap_extract(src_map, memory_address, + size, copy, &map_header, + cur_protection, + max_protection, + inheritance, + target_map->hdr. + entries_pageable); + vm_map_deallocate(src_map); + + if (result != KERN_SUCCESS) { + return result; + } + + /* + * Allocate/check a range of free virtual address + * space for the target + */ + *address = trunc_page(*address); + vm_map_lock(target_map); + result = vm_remap_range_allocate(target_map, address, size, + mask, anywhere, &insp_entry); + + for (entry = map_header.links.next; + entry != (struct vm_map_entry *)&map_header.links; + entry = new_entry) { + new_entry = entry->vme_next; + _vm_map_entry_unlink(&map_header, entry); + if (result == KERN_SUCCESS) { + entry->vme_start += *address; + entry->vme_end += *address; + vm_map_entry_link(target_map, insp_entry, entry); + insp_entry = entry; + } else { + if (!entry->is_sub_map) { + vm_object_deallocate(entry->object.vm_object); + } else { + vm_map_deallocate(entry->object.sub_map); + } + _vm_map_entry_dispose(&map_header, entry); + } + } + + if (result == KERN_SUCCESS) { + target_map->size += size; + SAVE_HINT(target_map, insp_entry); + } + vm_map_unlock(target_map); + + if (result == KERN_SUCCESS && target_map->wiring_required) + result = vm_map_wire(target_map, *address, + *address + size, *cur_protection, TRUE); + return result; +} + +/* + * Routine: vm_remap_range_allocate + * + * Description: + * Allocate a range in the specified virtual address map. + * returns the address and the map entry just before the allocated + * range + * + * Map must be locked. + */ + +kern_return_t +vm_remap_range_allocate( + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + vm_map_entry_t *map_entry) /* OUT */ +{ + register vm_map_entry_t entry; + register vm_offset_t start; + register vm_offset_t end; + kern_return_t result = KERN_SUCCESS; + + StartAgain: ; + + start = *address; + + if (anywhere) + { + /* + * Calculate the first possible address. + */ + + if (start < map->min_offset) + start = map->min_offset; + if (start > map->max_offset) + return(KERN_NO_SPACE); + + /* + * Look for the first possible address; + * if there's already something at this + * address, we have to start after it. + */ + + assert(first_free_is_valid(map)); + if (start == map->min_offset) { + if ((entry = map->first_free) != vm_map_to_entry(map)) + start = entry->vme_end; + } else { + vm_map_entry_t tmp_entry; + if (vm_map_lookup_entry(map, start, &tmp_entry)) + start = tmp_entry->vme_end; + entry = tmp_entry; + } + + /* + * In any case, the "entry" always precedes + * the proposed new region throughout the + * loop: + */ + + while (TRUE) { + register vm_map_entry_t next; + + /* + * Find the end of the proposed new region. + * Be sure we didn't go beyond the end, or + * wrap around the address. + */ + + end = ((start + mask) & ~mask); + if (end < start) + return(KERN_NO_SPACE); + start = end; + end += size; + + if ((end > map->max_offset) || (end < start)) { + if (map->wait_for_space) { + if (size <= (map->max_offset - + map->min_offset)) { + assert_wait((event_t) map, THREAD_INTERRUPTIBLE); + vm_map_unlock(map); + thread_block((void (*)(void))0); + vm_map_lock(map); + goto StartAgain; + } + } + + return(KERN_NO_SPACE); + } + + /* + * If there are no more entries, we must win. + */ + + next = entry->vme_next; + if (next == vm_map_to_entry(map)) + break; + + /* + * If there is another entry, it must be + * after the end of the potential new region. + */ + + if (next->vme_start >= end) + break; + + /* + * Didn't fit -- move to the next entry. + */ + + entry = next; + start = entry->vme_end; + } + *address = start; + } else { + vm_map_entry_t temp_entry; + + /* + * Verify that: + * the address doesn't itself violate + * the mask requirement. + */ + + if ((start & mask) != 0) + return(KERN_NO_SPACE); + + + /* + * ... the address is within bounds + */ + + end = start + size; + + if ((start < map->min_offset) || + (end > map->max_offset) || + (start >= end)) { + return(KERN_INVALID_ADDRESS); + } + + /* + * ... the starting address isn't allocated + */ + + if (vm_map_lookup_entry(map, start, &temp_entry)) + return(KERN_NO_SPACE); + + entry = temp_entry; + + /* + * ... the next region doesn't overlap the + * end point. + */ + + if ((entry->vme_next != vm_map_to_entry(map)) && + (entry->vme_next->vme_start < end)) + return(KERN_NO_SPACE); + } + *map_entry = entry; + return(KERN_SUCCESS); +} + +/* + * vm_map_switch: + * + * Set the address map for the current thr_act to the specified map + */ + +vm_map_t +vm_map_switch( + vm_map_t map) +{ + int mycpu; + thread_act_t thr_act = current_act(); + vm_map_t oldmap = thr_act->map; + + mp_disable_preemption(); + mycpu = cpu_number(); + + /* + * Deactivate the current map and activate the requested map + */ + PMAP_SWITCH_USER(thr_act, map, mycpu); + + mp_enable_preemption(); + return(oldmap); +} + + +/* + * Routine: vm_map_write_user + * + * Description: + * Copy out data from a kernel space into space in the + * destination map. The space must already exist in the + * destination map. + * NOTE: This routine should only be called by threads + * which can block on a page fault. i.e. kernel mode user + * threads. + * + */ +kern_return_t +vm_map_write_user( + vm_map_t map, + vm_offset_t src_addr, + vm_offset_t dst_addr, + vm_size_t size) +{ + thread_act_t thr_act = current_act(); + kern_return_t kr = KERN_SUCCESS; + + if(thr_act->map == map) { + if (copyout((char *)src_addr, (char *)dst_addr, size)) { + kr = KERN_INVALID_ADDRESS; + } + } else { + vm_map_t oldmap; + + /* take on the identity of the target map while doing */ + /* the transfer */ + + vm_map_reference(map); + oldmap = vm_map_switch(map); + if (copyout((char *)src_addr, (char *)dst_addr, size)) { + kr = KERN_INVALID_ADDRESS; + } + vm_map_switch(oldmap); + vm_map_deallocate(map); + } + return kr; +} + +/* + * Routine: vm_map_read_user + * + * Description: + * Copy in data from a user space source map into the + * kernel map. The space must already exist in the + * kernel map. + * NOTE: This routine should only be called by threads + * which can block on a page fault. i.e. kernel mode user + * threads. + * + */ +kern_return_t +vm_map_read_user( + vm_map_t map, + vm_offset_t src_addr, + vm_offset_t dst_addr, + vm_size_t size) +{ + thread_act_t thr_act = current_act(); + kern_return_t kr = KERN_SUCCESS; + + if(thr_act->map == map) { + if (copyin((char *)src_addr, (char *)dst_addr, size)) { + kr = KERN_INVALID_ADDRESS; + } + } else { + vm_map_t oldmap; + + /* take on the identity of the target map while doing */ + /* the transfer */ + + vm_map_reference(map); + oldmap = vm_map_switch(map); + if (copyin((char *)src_addr, (char *)dst_addr, size)) { + kr = KERN_INVALID_ADDRESS; + } + vm_map_switch(oldmap); + vm_map_deallocate(map); + } + return kr; +} + +/* Takes existing source and destination sub-maps and clones the contents of */ +/* the source map */ + +kern_return_t +vm_region_clone( + ipc_port_t src_region, + ipc_port_t dst_region) +{ + vm_named_entry_t src_object; + vm_named_entry_t dst_object; + vm_map_t src_map; + vm_map_t dst_map; + vm_offset_t addr; + vm_offset_t max_off; + vm_map_entry_t entry; + vm_map_entry_t new_entry; + vm_map_entry_t insert_point; + + src_object = (vm_named_entry_t)src_region->ip_kobject; + dst_object = (vm_named_entry_t)dst_region->ip_kobject; + if((!src_object->is_sub_map) || (!dst_object->is_sub_map)) { + return KERN_INVALID_ARGUMENT; + } + src_map = (vm_map_t)src_object->backing.map; + dst_map = (vm_map_t)dst_object->backing.map; + /* destination map is assumed to be unavailable to any other */ + /* activity. i.e. it is new */ + vm_map_lock(src_map); + if((src_map->min_offset != dst_map->min_offset) + || (src_map->max_offset != dst_map->max_offset)) { + vm_map_unlock(src_map); + return KERN_INVALID_ARGUMENT; + } + addr = src_map->min_offset; + vm_map_lookup_entry(dst_map, addr, &entry); + if(entry == vm_map_to_entry(dst_map)) { + entry = entry->vme_next; + } + if(entry == vm_map_to_entry(dst_map)) { + max_off = src_map->max_offset; + } else { + max_off = entry->vme_start; + } + vm_map_lookup_entry(src_map, addr, &entry); + if(entry == vm_map_to_entry(src_map)) { + entry = entry->vme_next; + } + vm_map_lookup_entry(dst_map, addr, &insert_point); + while((entry != vm_map_to_entry(src_map)) && + (entry->vme_end <= max_off)) { + addr = entry->vme_start; + new_entry = vm_map_entry_create(dst_map); + vm_map_entry_copy(new_entry, entry); + vm_map_entry_link(dst_map, insert_point, new_entry); + insert_point = new_entry; + if (entry->object.vm_object != VM_OBJECT_NULL) { + if (new_entry->is_sub_map) { + vm_map_reference(new_entry->object.sub_map); + } else { + vm_object_reference( + new_entry->object.vm_object); + } + } + dst_map->size += new_entry->vme_end - new_entry->vme_start; + entry = entry->vme_next; + } + vm_map_unlock(src_map); + return KERN_SUCCESS; +} + +/* + * Export routines to other components for the things we access locally through + * macros. + */ +#undef current_map +vm_map_t +current_map(void) +{ + return (current_map_fast()); +} + +/* + * vm_map_check_protection: + * + * Assert that the target map allows the specified + * privilege on the entire address region given. + * The entire region must be allocated. + */ +boolean_t vm_map_check_protection(map, start, end, protection) + register vm_map_t map; + register vm_offset_t start; + register vm_offset_t end; + register vm_prot_t protection; +{ + register vm_map_entry_t entry; + vm_map_entry_t tmp_entry; + + vm_map_lock(map); + + if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) + { + vm_map_unlock(map); + return (FALSE); + } + + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + vm_map_unlock(map); + return(FALSE); + } + + entry = tmp_entry; + + while (start < end) { + if (entry == vm_map_to_entry(map)) { + vm_map_unlock(map); + return(FALSE); + } + + /* + * No holes allowed! + */ + + if (start < entry->vme_start) { + vm_map_unlock(map); + return(FALSE); + } + + /* + * Check protection associated with entry. + */ + + if ((entry->protection & protection) != protection) { + vm_map_unlock(map); + return(FALSE); + } + + /* go to next entry */ + + start = entry->vme_end; + entry = entry->vme_next; + } + vm_map_unlock(map); + return(TRUE); +} diff --git a/osfmk/vm/vm_map.h b/osfmk/vm/vm_map.h new file mode 100644 index 000000000..d9eb8c6c9 --- /dev/null +++ b/osfmk/vm/vm_map.h @@ -0,0 +1,1013 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: vm/vm_map.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Virtual memory map module definitions. + * + * Contributors: + * avie, dlb, mwyoung + */ + +#ifndef _VM_VM_MAP_H_ +#define _VM_VM_MAP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct vm_map_entry *vm_map_entry_t; + +extern void kernel_vm_map_reference(vm_map_t map); + +#ifndef MACH_KERNEL_PRIVATE + +struct vm_map_entry {}; + +extern void vm_map_reference(vm_map_t map); +extern vm_map_t current_map(void); + +#else /* MACH_KERNEL_PRIVATE */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#define shared_region_mapping_lock_init(object) \ + mutex_init(&(object)->Lock, ETAP_VM_OBJ) +#define shared_region_mapping_lock(object) mutex_lock(&(object)->Lock) +#define shared_region_mapping_unlock(object) mutex_unlock(&(object)->Lock) +#include + +#define current_map_fast() (current_act_fast()->map) +#define current_map() (current_map_fast()) + +/* + * Types defined: + * + * vm_map_t the high-level address map data structure. + * vm_map_entry_t an entry in an address map. + * vm_map_version_t a timestamp of a map, for use with vm_map_lookup + * vm_map_copy_t represents memory copied from an address map, + * used for inter-map copy operations + */ + +/* + * Type: vm_map_object_t [internal use only] + * + * Description: + * The target of an address mapping, either a virtual + * memory object or a sub map (of the kernel map). + */ +typedef union vm_map_object { + struct vm_object *vm_object; /* object object */ + struct vm_map *sub_map; /* belongs to another map */ +} vm_map_object_t; + +#define named_entry_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ) +#define named_entry_lock(object) mutex_lock(&(object)->Lock) +#define named_entry_unlock(object) mutex_unlock(&(object)->Lock) + +/* + * Type: vm_named_entry_t [internal use only] + * + * Description: + * Description of a mapping to a memory cache object. + * + * Implementation: + * While the handle to this object is used as a means to map + * and pass around the right to map regions backed by pagers + * of all sorts, the named_entry itself is only manipulated + * by the kernel. Named entries hold information on the + * right to map a region of a cached object. Namely, + * the target cache object, the beginning and ending of the + * region to be mapped, and the permissions, (read, write) + * with which it can be mapped. + * + */ + +struct vm_named_entry { + decl_mutex_data(, Lock) /* Synchronization */ + vm_object_t object; /* object I point to */ + vm_object_offset_t offset; /* offset into object */ + union { + ipc_port_t pager; /* amo pager port */ + vm_map_t map; /* map backing submap */ + } backing; + unsigned int size; /* size of region */ + unsigned int protection; /* access permissions */ + int ref_count; /* Number of references */ + unsigned int + /* boolean_t */ internal:1, /* is an internal object */ + /* boolean_t */ is_sub_map:1; /* is object is a submap? */ +}; + +typedef struct vm_named_entry *vm_named_entry_t; + + +/* + * Type: vm_map_entry_t [internal use only] + * + * Description: + * A single mapping within an address map. + * + * Implementation: + * Address map entries consist of start and end addresses, + * a VM object (or sub map) and offset into that object, + * and user-exported inheritance and protection information. + * Control information for virtual copy operations is also + * stored in the address map entry. + */ +struct vm_map_links { + struct vm_map_entry *prev; /* previous entry */ + struct vm_map_entry *next; /* next entry */ + vm_offset_t start; /* start address */ + vm_offset_t end; /* end address */ +}; + +struct vm_map_entry { + struct vm_map_links links; /* links to other entries */ +#define vme_prev links.prev +#define vme_next links.next +#define vme_start links.start +#define vme_end links.end + union vm_map_object object; /* object I point to */ + vm_object_offset_t offset; /* offset into object */ + unsigned int + /* boolean_t */ is_shared:1, /* region is shared */ + /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ + /* boolean_t */ in_transition:1, /* Entry being changed */ + /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ + /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ + /* behavior is not defined for submap type */ + /* boolean_t */ needs_copy:1, /* object need to be copied? */ + /* Only in task maps: */ + /* vm_prot_t */ protection:3, /* protection code */ + /* vm_prot_t */ max_protection:3,/* maximum protection */ + /* vm_inherit_t */ inheritance:2, /* inheritance */ + /* nested pmap */ use_pmap:1, /* nested pmaps */ + /* user alias */ alias:8; + unsigned short wired_count; /* can be paged if = 0 */ + unsigned short user_wired_count; /* for vm_wire */ +}; + +/* + * wired_counts are unsigned short. This value is used to safeguard + * against any mishaps due to runaway user programs. + */ +#define MAX_WIRE_COUNT 65535 + + + +/* + * Type: struct vm_map_header + * + * Description: + * Header for a vm_map and a vm_map_copy. + */ +struct vm_map_header { + struct vm_map_links links; /* first, last, min, max */ + int nentries; /* Number of entries */ + boolean_t entries_pageable; + /* are map entries pageable? */ +}; + +/* + * Type: vm_map_t [exported; contents invisible] + * + * Description: + * An address map -- a directory relating valid + * regions of a task's address space to the corresponding + * virtual memory objects. + * + * Implementation: + * Maps are doubly-linked lists of map entries, sorted + * by address. One hint is used to start + * searches again from the last successful search, + * insertion, or removal. Another hint is used to + * quickly find free space. + */ +struct vm_map { + lock_t lock; /* uni- and smp-lock */ + struct vm_map_header hdr; /* Map entry header */ +#define min_offset hdr.links.start /* start of range */ +#define max_offset hdr.links.end /* end of range */ + pmap_t pmap; /* Physical map */ + vm_size_t size; /* virtual size */ + int ref_count; /* Reference count */ +#if TASK_SWAPPER + int res_count; /* Residence count (swap) */ + int sw_state; /* Swap state */ +#endif /* TASK_SWAPPER */ + decl_mutex_data(, s_lock) /* Lock ref, res, hint fields */ + vm_map_entry_t hint; /* hint for quick lookups */ + vm_map_entry_t first_free; /* First free space hint */ + boolean_t wait_for_space; /* Should callers wait + for space? */ + boolean_t wiring_required;/* All memory wired? */ + boolean_t no_zero_fill; /* No zero fill absent pages */ + unsigned int timestamp; /* Version number */ +} ; + +#define vm_map_to_entry(map) ((struct vm_map_entry *) &(map)->hdr.links) +#define vm_map_first_entry(map) ((map)->hdr.links.next) +#define vm_map_last_entry(map) ((map)->hdr.links.prev) + +#if TASK_SWAPPER +/* + * VM map swap states. There are no transition states. + */ +#define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ +#define MAP_SW_OUT 2 /* map is out (res_count == 0 */ +#endif /* TASK_SWAPPER */ + +/* + * Type: vm_map_version_t [exported; contents invisible] + * + * Description: + * Map versions may be used to quickly validate a previous + * lookup operation. + * + * Usage note: + * Because they are bulky objects, map versions are usually + * passed by reference. + * + * Implementation: + * Just a timestamp for the main map. + */ +typedef struct vm_map_version { + unsigned int main_timestamp; +} vm_map_version_t; + +/* + * Type: vm_map_copy_t [exported; contents invisible] + * + * Description: + * A map copy object represents a region of virtual memory + * that has been copied from an address map but is still + * in transit. + * + * A map copy object may only be used by a single thread + * at a time. + * + * Implementation: + * There are three formats for map copy objects. + * The first is very similar to the main + * address map in structure, and as a result, some + * of the internal maintenance functions/macros can + * be used with either address maps or map copy objects. + * + * The map copy object contains a header links + * entry onto which the other entries that represent + * the region are chained. + * + * The second format is a single vm object. This is used + * primarily in the pageout path. The third format is a + * list of vm pages. An optional continuation provides + * a hook to be called to obtain more of the memory, + * or perform other operations. The continuation takes 3 + * arguments, a saved arg buffer, a pointer to a new vm_map_copy + * (returned) and an abort flag (abort if TRUE). + */ + +#define VM_MAP_COPY_PAGE_LIST_MAX 20 +#define VM_MAP_COPY_PAGE_LIST_MAX_SIZE (VM_MAP_COPY_PAGE_LIST_MAX * PAGE_SIZE) + + +/* + * Options for vm_map_copyin_page_list. + */ + +#define VM_MAP_COPYIN_OPT_VM_PROT 0x7 +#define VM_MAP_COPYIN_OPT_SRC_DESTROY 0x8 +#define VM_MAP_COPYIN_OPT_STEAL_PAGES 0x10 +#define VM_MAP_COPYIN_OPT_PMAP_ENTER 0x20 +#define VM_MAP_COPYIN_OPT_NO_ZERO_FILL 0x40 + +/* + * Continuation structures for vm_map_copyin_page_list. + */ +typedef struct { + vm_map_t map; + vm_offset_t src_addr; + vm_size_t src_len; + vm_offset_t destroy_addr; + vm_size_t destroy_len; + int options; +} vm_map_copyin_args_data_t, *vm_map_copyin_args_t; + +#define VM_MAP_COPYIN_ARGS_NULL ((vm_map_copyin_args_t) 0) + + +/* vm_map_copy_cont_t is a type definition/prototype + * for the cont function pointer in vm_map_copy structure. + */ +typedef kern_return_t (*vm_map_copy_cont_t)( + vm_map_copyin_args_t, + vm_map_copy_t *); + +#define VM_MAP_COPY_CONT_NULL ((vm_map_copy_cont_t) 0) + +struct vm_map_copy { + int type; +#define VM_MAP_COPY_ENTRY_LIST 1 +#define VM_MAP_COPY_OBJECT 2 +#define VM_MAP_COPY_PAGE_LIST 3 +#define VM_MAP_COPY_KERNEL_BUFFER 4 + vm_object_offset_t offset; + vm_size_t size; + union { + struct vm_map_header hdr; /* ENTRY_LIST */ + struct { /* OBJECT */ + vm_object_t object; + vm_size_t index; /* record progress as pages + * are moved from object to + * page list; must be zero + * when first invoking + * vm_map_object_to_page_list + */ + } c_o; + struct { /* PAGE_LIST */ + int npages; + boolean_t page_loose; + vm_map_copy_cont_t cont; + vm_map_copyin_args_t cont_args; + vm_page_t page_list[VM_MAP_COPY_PAGE_LIST_MAX]; + } c_p; + struct { /* KERNEL_BUFFER */ + vm_offset_t kdata; + vm_size_t kalloc_size; /* size of this copy_t */ + } c_k; + } c_u; +}; + + +#define cpy_hdr c_u.hdr + +#define cpy_object c_u.c_o.object +#define cpy_index c_u.c_o.index + +#define cpy_page_list c_u.c_p.page_list +#define cpy_npages c_u.c_p.npages +#define cpy_page_loose c_u.c_p.page_loose +#define cpy_cont c_u.c_p.cont +#define cpy_cont_args c_u.c_p.cont_args + +#define cpy_kdata c_u.c_k.kdata +#define cpy_kalloc_size c_u.c_k.kalloc_size + + +/* + * Useful macros for entry list copy objects + */ + +#define vm_map_copy_to_entry(copy) \ + ((struct vm_map_entry *) &(copy)->cpy_hdr.links) +#define vm_map_copy_first_entry(copy) \ + ((copy)->cpy_hdr.links.next) +#define vm_map_copy_last_entry(copy) \ + ((copy)->cpy_hdr.links.prev) + +/* + * Continuation macros for page list copy objects + */ + +#define vm_map_copy_invoke_cont(old_copy, new_copy, result) \ +MACRO_BEGIN \ + assert(vm_map_copy_cont_is_valid(old_copy)); \ + vm_map_copy_page_discard(old_copy); \ + *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \ + new_copy); \ + (old_copy)->cpy_cont = VM_MAP_COPY_CONT_NULL; \ +MACRO_END + +#define vm_map_copy_invoke_extend_cont(old_copy, new_copy, result) \ +MACRO_BEGIN \ + assert(vm_map_copy_cont_is_valid(old_copy)); \ + *result = (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \ + new_copy); \ + (old_copy)->cpy_cont = VM_MAP_COPY_CONT_NULL; \ +MACRO_END + +#define vm_map_copy_abort_cont(old_copy) \ +MACRO_BEGIN \ + assert(vm_map_copy_cont_is_valid(old_copy)); \ + vm_map_copy_page_discard(old_copy); \ + (*((old_copy)->cpy_cont))((old_copy)->cpy_cont_args, \ + (vm_map_copy_t *) 0); \ + (old_copy)->cpy_cont = VM_MAP_COPY_CONT_NULL; \ + (old_copy)->cpy_cont_args = VM_MAP_COPYIN_ARGS_NULL; \ +MACRO_END + +#define vm_map_copy_has_cont(copy) \ + (((copy)->cpy_cont) != VM_MAP_COPY_CONT_NULL) + +/* + * Macro to determine number of pages in a page-list copy chain. + */ + +#define vm_map_copy_page_count(copy) \ + (round_page(((vm_offset_t)(copy)->offset - trunc_page((vm_offset_t)(copy)->offset)) + (copy)->size) / PAGE_SIZE) + +/* + * Macros: vm_map_lock, etc. [internal use only] + * Description: + * Perform locking on the data portion of a map. + * When multiple maps are to be locked, order by map address. + * (See vm_map.c::vm_remap()) + */ + +#define vm_map_lock_init(map) \ +MACRO_BEGIN \ + lock_init(&(map)->lock, TRUE, ETAP_VM_MAP, ETAP_VM_MAP_I); \ + (map)->timestamp = 0; \ +MACRO_END +#define vm_map_lock(map) \ +MACRO_BEGIN \ + lock_write(&(map)->lock); \ + (map)->timestamp++; \ +MACRO_END + +#define vm_map_unlock(map) lock_write_done(&(map)->lock) +#define vm_map_lock_read(map) lock_read(&(map)->lock) +#define vm_map_unlock_read(map) lock_read_done(&(map)->lock) +#define vm_map_lock_write_to_read(map) \ + lock_write_to_read(&(map)->lock) +#define vm_map_lock_read_to_write(map) \ + (lock_read_to_write(&(map)->lock) || (((map)->timestamp++), 0)) + +extern zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ + +/* + * Exported procedures that operate on vm_map_t. + */ + +/* Initialize the module */ +extern void vm_map_init(void); + +/* Allocate a range in the specified virtual address map and + * return the entry allocated for that range. */ +extern kern_return_t vm_map_find_space( + vm_map_t map, + vm_offset_t *address, /* OUT */ + vm_size_t size, + vm_offset_t mask, + vm_map_entry_t *o_entry); /* OUT */ + +/* Lookup map entry containing or the specified address in the given map */ +extern boolean_t vm_map_lookup_entry( + vm_map_t map, + vm_offset_t address, + vm_map_entry_t *entry); /* OUT */ + +/* A version of vm_map_copy_discard that can be called + * as a continuation from a vm_map_copy page list. */ +extern kern_return_t vm_map_copy_discard_cont( + vm_map_copyin_args_t cont_args, + vm_map_copy_t *copy_result);/* OUT */ + +/* Find the VM object, offset, and protection for a given virtual address + * in the specified map, assuming a page fault of the type specified. */ +extern kern_return_t vm_map_lookup_locked( + vm_map_t *var_map, /* IN/OUT */ + vm_offset_t vaddr, + vm_prot_t fault_type, + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_object_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired, /* OUT */ + int *behavior, /* OUT */ + vm_object_offset_t *lo_offset, /* OUT */ + vm_object_offset_t *hi_offset, /* OUT */ + vm_map_t *pmap_map); /* OUT */ + +/* Verifies that the map has not changed since the given version. */ +extern boolean_t vm_map_verify( + vm_map_t map, + vm_map_version_t *version); /* REF */ + +/* Split a vm_map_entry into 2 entries */ +extern void _vm_map_clip_start( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_offset_t start); + +extern vm_map_entry_t vm_map_entry_insert( + vm_map_t map, + vm_map_entry_t insp_entry, + vm_offset_t start, + vm_offset_t end, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + boolean_t is_shared, + boolean_t in_transition, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_behavior_t behavior, + vm_inherit_t inheritance, + unsigned wired_count); + +extern kern_return_t vm_remap_extract( + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + boolean_t copy, + struct vm_map_header *map_header, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance, + boolean_t pageable); + +extern kern_return_t vm_remap_range_allocate( + vm_map_t map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + vm_map_entry_t *map_entry); + +extern kern_return_t vm_remap_extract( + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + boolean_t copy, + struct vm_map_header *map_header, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance, + boolean_t pageable); + +extern kern_return_t vm_remap_range_allocate( + vm_map_t map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + boolean_t anywhere, + vm_map_entry_t *map_entry); + +/* + * Functions implemented as macros + */ +#define vm_map_min(map) ((map)->min_offset) + /* Lowest valid address in + * a map */ + +#define vm_map_max(map) ((map)->max_offset) + /* Highest valid address */ + +#define vm_map_pmap(map) ((map)->pmap) + /* Physical map associated + * with this address map */ + +#define vm_map_verify_done(map, version) vm_map_unlock_read(map) + /* Operation that required + * a verified lookup is + * now complete */ + +/* + * Macros/functions for map residence counts and swapin/out of vm maps + */ +#if TASK_SWAPPER + +#if MACH_ASSERT +/* Gain a reference to an existing map */ +extern void vm_map_reference( + vm_map_t map); +/* Lose a residence count */ +extern void vm_map_res_deallocate( + vm_map_t map); +/* Gain a residence count on a map */ +extern void vm_map_res_reference( + vm_map_t map); +/* Gain reference & residence counts to possibly swapped-out map */ +extern void vm_map_reference_swap( + vm_map_t map); + +#else /* MACH_ASSERT */ + +#define vm_map_reference(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + if (Map) { \ + mutex_lock(&Map->s_lock); \ + Map->res_count++; \ + Map->ref_count++; \ + mutex_unlock(&Map->s_lock); \ + } \ +MACRO_END + +#define vm_map_res_reference(map) \ +MACRO_BEGIN \ + vm_map_t Lmap = (map); \ + if (Lmap->res_count == 0) { \ + mutex_unlock(&Lmap->s_lock); \ + vm_map_lock(Lmap); \ + vm_map_swapin(Lmap); \ + mutex_lock(&Lmap->s_lock); \ + ++Lmap->res_count; \ + vm_map_unlock(Lmap); \ + } else \ + ++Lmap->res_count; \ +MACRO_END + +#define vm_map_res_deallocate(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + if (--Map->res_count == 0) { \ + mutex_unlock(&Map->s_lock); \ + vm_map_lock(Map); \ + vm_map_swapout(Map); \ + vm_map_unlock(Map); \ + mutex_lock(&Map->s_lock); \ + } \ +MACRO_END + +#define vm_map_reference_swap(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + mutex_lock(&Map->s_lock); \ + ++Map->ref_count; \ + vm_map_res_reference(Map); \ + mutex_unlock(&Map->s_lock); \ +MACRO_END +#endif /* MACH_ASSERT */ + +extern void vm_map_swapin( + vm_map_t map); + +extern void vm_map_swapout( + vm_map_t map); + +#else /* TASK_SWAPPER */ + +#define vm_map_reference(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + if (Map) { \ + mutex_lock(&Map->s_lock); \ + Map->ref_count++; \ + mutex_unlock(&Map->s_lock); \ + } \ +MACRO_END + +#define vm_map_reference_swap(map) vm_map_reference(map) +#define vm_map_res_reference(map) +#define vm_map_res_deallocate(map) + +#endif /* TASK_SWAPPER */ + +/* + * Submap object. Must be used to create memory to be put + * in a submap by vm_map_submap. + */ +extern vm_object_t vm_submap_object; + +/* + * Wait and wakeup macros for in_transition map entries. + */ +#define vm_map_entry_wait(map, interruptible) \ + MACRO_BEGIN \ + assert_wait((event_t)&(map)->hdr, interruptible); \ + vm_map_unlock(map); \ + thread_block((void (*)(void))0); \ + MACRO_END + +#define vm_map_entry_wakeup(map) thread_wakeup((event_t)(&(map)->hdr)) + + + +#define vm_map_ref_fast(map) \ + MACRO_BEGIN \ + mutex_lock(&map->s_lock); \ + map->ref_count++; \ + vm_map_res_reference(map); \ + mutex_unlock(&map->s_lock); \ + MACRO_END + +#define vm_map_dealloc_fast(map) \ + MACRO_BEGIN \ + register int c; \ + \ + mutex_lock(&map->s_lock); \ + c = --map->ref_count; \ + if (c > 0) \ + vm_map_res_deallocate(map); \ + mutex_unlock(&map->s_lock); \ + if (c == 0) \ + vm_map_destroy(map); \ + MACRO_END + + +/* simplify map entries */ +extern void vm_map_simplify( + vm_map_t map, + vm_offset_t start); + +/* Steal all the pages from a vm_map_copy page_list */ +extern void vm_map_copy_steal_pages( + vm_map_copy_t copy); + +/* Discard a copy without using it */ +extern void vm_map_copy_discard( + vm_map_copy_t copy); + +/* Move the information in a map copy object to a new map copy object */ +extern vm_map_copy_t vm_map_copy_copy( + vm_map_copy_t copy); + +/* Overwrite existing memory with a copy */ +extern kern_return_t vm_map_copy_overwrite( + vm_map_t dst_map, + vm_offset_t dst_addr, + vm_map_copy_t copy, + int interruptible); + +/* Version of vm_map_copyout() for page list vm map copies. */ +extern kern_return_t vm_map_copyout_page_list( + vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + vm_map_copy_t copy); + +/* Get rid of the pages in a page_list copy. */ +extern void vm_map_copy_page_discard( + vm_map_copy_t copy); + +/* Create a copy object from an object. */ +extern kern_return_t vm_map_copyin_object( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_map_copy_t *copy_result); /* OUT */ + + +/* Make a copy of a region */ +/* Make a copy of a region using a page list copy */ +extern kern_return_t vm_map_copyin_page_list( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + int options, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t is_cont); + +extern vm_map_t vm_map_switch( + vm_map_t map); + +extern int vm_map_copy_cont_is_valid( + vm_map_copy_t copy); + + + +#endif /* !MACH_KERNEL_PRIVATE */ + +/* Get rid of a map */ +extern void vm_map_destroy( + vm_map_t map); +/* Lose a reference */ +extern void vm_map_deallocate( + vm_map_t map); + +/* Create an empty map */ +extern vm_map_t vm_map_create( + pmap_t pmap, + vm_offset_t min, + vm_offset_t max, + boolean_t pageable); + + +/* Enter a mapping */ +extern kern_return_t vm_map_enter( + vm_map_t map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); + +extern kern_return_t vm_map_write_user( + vm_map_t map, + vm_offset_t src_addr, + vm_offset_t dst_addr, + vm_size_t size); + +extern kern_return_t vm_map_read_user( + vm_map_t map, + vm_offset_t src_addr, + vm_offset_t dst_addr, + vm_size_t size); + +/* Create a new task map using an existing task map as a template. */ +extern vm_map_t vm_map_fork( + vm_map_t old_map); + +/* Change protection */ +extern kern_return_t vm_map_protect( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t new_prot, + boolean_t set_max); + +/* Change inheritance */ +extern kern_return_t vm_map_inherit( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_inherit_t new_inheritance); + +/* wire down a region */ +extern kern_return_t vm_map_wire( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_prot_t access_type, + boolean_t user_wire); + +/* unwire a region */ +extern kern_return_t vm_map_unwire( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + boolean_t user_wire); + +/* Deallocate a region */ +extern kern_return_t vm_map_remove( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + boolean_t flags); + +/* Place a copy into a map */ +extern kern_return_t vm_map_copyout( + vm_map_t dst_map, + vm_offset_t *dst_addr, /* OUT */ + vm_map_copy_t copy); + + +/* Add or remove machine-dependent attributes from map regions */ +extern kern_return_t vm_map_machine_attribute( + vm_map_t map, + vm_offset_t address, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value); /* IN/OUT */ +/* Set paging behavior */ +extern kern_return_t vm_map_behavior_set( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_behavior_t new_behavior); + +extern kern_return_t vm_map_copyin_common( + vm_map_t src_map, + vm_offset_t src_addr, + vm_size_t len, + boolean_t src_destroy, + boolean_t src_volatile, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t use_maxprot); + +extern kern_return_t vm_map_submap( + vm_map_t map, + vm_offset_t start, + vm_offset_t end, + vm_map_t submap, + vm_offset_t offset, + boolean_t use_pmap); + +/* + * Macros to invoke vm_map_copyin_common. vm_map_copyin is the + * usual form; it handles a copyin based on the current protection + * (current protection == VM_PROT_NONE) is a failure. + * vm_map_copyin_maxprot handles a copyin based on maximum possible + * access. The difference is that a region with no current access + * BUT possible maximum access is rejected by vm_map_copyin(), but + * returned by vm_map_copyin_maxprot. + */ +#define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ + vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ + FALSE, copy_result, FALSE) + +#define vm_map_copyin_maxprot(src_map, \ + src_addr, len, src_destroy, copy_result) \ + vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ + FALSE, copy_result, TRUE) + +#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) + +/* + * Flags for vm_map_remove() and vm_map_delete() + */ +#define VM_MAP_NO_FLAGS 0x0 +#define VM_MAP_REMOVE_KUNWIRE 0x1 +#define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 +#define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 + + +#ifdef MACH_KERNEL_PRIVATE + +/* address space shared region descriptor */ + +struct shared_region_mapping { + decl_mutex_data(, Lock) /* Synchronization */ + int ref_count; + ipc_port_t text_region; + vm_size_t text_size; + ipc_port_t data_region; + vm_size_t data_size; + vm_offset_t region_mappings; + vm_offset_t client_base; + vm_offset_t alternate_base; + vm_offset_t alternate_next; + int flags; + int depth; + struct shared_region_object_chain *object_chain; + struct shared_region_mapping *self; + struct shared_region_mapping *next; +}; + +typedef struct shared_region_mapping *shared_region_mapping_t; + +struct shared_region_object_chain { + shared_region_mapping_t object_chain_region; + int depth; + struct shared_region_object_chain *next; +}; + +typedef struct shared_region_object_chain *shared_region_object_chain_t; + +#endif /* MACH_KERNEL_PRIVATE */ + + +#endif /* _VM_VM_MAP_H_ */ + diff --git a/osfmk/vm/vm_object.c b/osfmk/vm/vm_object.c new file mode 100644 index 000000000..13443d22c --- /dev/null +++ b/osfmk/vm/vm_object.c @@ -0,0 +1,4513 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_object.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * Virtual memory object module. + */ + +#ifdef MACH_BSD +/* remove as part of compoenent support merge */ +extern int vnode_pager_workaround; +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +/* + * Virtual memory objects maintain the actual data + * associated with allocated virtual memory. A given + * page of memory exists within exactly one object. + * + * An object is only deallocated when all "references" + * are given up. Only one "reference" to a given + * region of an object should be writeable. + * + * Associated with each object is a list of all resident + * memory pages belonging to that object; this list is + * maintained by the "vm_page" module, but locked by the object's + * lock. + * + * Each object also records the memory object port + * that is used by the kernel to request and write + * back data (the memory object port, field "pager"), + * and the ports provided to the memory manager, the server that + * manages that data, to return data and control its + * use (the memory object control port, field "pager_request") + * and for naming (the memory object name port, field "pager_name"). + * + * Virtual memory objects are allocated to provide + * zero-filled memory (vm_allocate) or map a user-defined + * memory object into a virtual address space (vm_map). + * + * Virtual memory objects that refer to a user-defined + * memory object are called "permanent", because all changes + * made in virtual memory are reflected back to the + * memory manager, which may then store it permanently. + * Other virtual memory objects are called "temporary", + * meaning that changes need be written back only when + * necessary to reclaim pages, and that storage associated + * with the object can be discarded once it is no longer + * mapped. + * + * A permanent memory object may be mapped into more + * than one virtual address space. Moreover, two threads + * may attempt to make the first mapping of a memory + * object concurrently. Only one thread is allowed to + * complete this mapping; all others wait for the + * "pager_initialized" field is asserted, indicating + * that the first thread has initialized all of the + * necessary fields in the virtual memory object structure. + * + * The kernel relies on a *default memory manager* to + * provide backing storage for the zero-filled virtual + * memory objects. The memory object ports associated + * with these temporary virtual memory objects are only + * generated and passed to the default memory manager + * when it becomes necessary. Virtual memory objects + * that depend on the default memory manager are called + * "internal". The "pager_created" field is provided to + * indicate whether these ports have ever been allocated. + * + * The kernel may also create virtual memory objects to + * hold changed pages after a copy-on-write operation. + * In this case, the virtual memory object (and its + * backing storage -- its memory object) only contain + * those pages that have been changed. The "shadow" + * field refers to the virtual memory object that contains + * the remainder of the contents. The "shadow_offset" + * field indicates where in the "shadow" these contents begin. + * The "copy" field refers to a virtual memory object + * to which changed pages must be copied before changing + * this object, in order to implement another form + * of copy-on-write optimization. + * + * The virtual memory object structure also records + * the attributes associated with its memory object. + * The "pager_ready", "can_persist" and "copy_strategy" + * fields represent those attributes. The "cached_list" + * field is used in the implementation of the persistence + * attribute. + * + * ZZZ Continue this comment. + */ + +/* Forward declarations for internal functions. */ +extern void _vm_object_allocate( + vm_object_size_t size, + vm_object_t object); + +extern kern_return_t vm_object_terminate( + vm_object_t object); + +extern void vm_object_remove( + vm_object_t object); + +extern vm_object_t vm_object_cache_trim( + boolean_t called_from_vm_object_deallocate); + +extern void vm_object_deactivate_pages( + vm_object_t object); + +extern void vm_object_abort_activity( + vm_object_t object); + +extern kern_return_t vm_object_copy_call( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *_result_object); + +extern void vm_object_do_collapse( + vm_object_t object, + vm_object_t backing_object); + +extern void vm_object_do_bypass( + vm_object_t object, + vm_object_t backing_object); + +extern void memory_object_release( + ipc_port_t pager, + pager_request_t pager_request); + +zone_t vm_object_zone; /* vm backing store zone */ + +/* + * All wired-down kernel memory belongs to a single virtual + * memory object (kernel_object) to avoid wasting data structures. + */ +struct vm_object kernel_object_store; +vm_object_t kernel_object = &kernel_object_store; + +/* + * The submap object is used as a placeholder for vm_map_submap + * operations. The object is declared in vm_map.c because it + * is exported by the vm_map module. The storage is declared + * here because it must be initialized here. + */ +struct vm_object vm_submap_object_store; + +/* + * Virtual memory objects are initialized from + * a template (see vm_object_allocate). + * + * When adding a new field to the virtual memory + * object structure, be sure to add initialization + * (see vm_object_init). + */ +struct vm_object vm_object_template; + +/* + * Virtual memory objects that are not referenced by + * any address maps, but that are allowed to persist + * (an attribute specified by the associated memory manager), + * are kept in a queue (vm_object_cached_list). + * + * When an object from this queue is referenced again, + * for example to make another address space mapping, + * it must be removed from the queue. That is, the + * queue contains *only* objects with zero references. + * + * The kernel may choose to terminate objects from this + * queue in order to reclaim storage. The current policy + * is to permit a fixed maximum number of unreferenced + * objects (vm_object_cached_max). + * + * A spin lock (accessed by routines + * vm_object_cache_{lock,lock_try,unlock}) governs the + * object cache. It must be held when objects are + * added to or removed from the cache (in vm_object_terminate). + * The routines that acquire a reference to a virtual + * memory object based on one of the memory object ports + * must also lock the cache. + * + * Ideally, the object cache should be more isolated + * from the reference mechanism, so that the lock need + * not be held to make simple references. + */ +queue_head_t vm_object_cached_list; +int vm_object_cached_count; +int vm_object_cached_high; /* highest # of cached objects */ +int vm_object_cached_max = 500; /* may be patched*/ + +decl_mutex_data(,vm_object_cached_lock_data) + +#define vm_object_cache_lock() \ + mutex_lock(&vm_object_cached_lock_data) +#define vm_object_cache_lock_try() \ + mutex_try(&vm_object_cached_lock_data) +#define vm_object_cache_unlock() \ + mutex_unlock(&vm_object_cached_lock_data) + +#define VM_OBJECT_HASH_COUNT 1024 +queue_head_t vm_object_hashtable[VM_OBJECT_HASH_COUNT]; +struct zone *vm_object_hash_zone; + +struct vm_object_hash_entry { + queue_chain_t hash_link; /* hash chain link */ + ipc_port_t pager; /* pager we represent */ + vm_object_t object; /* corresponding object */ + boolean_t waiting; /* someone waiting for + * termination */ +}; + +typedef struct vm_object_hash_entry *vm_object_hash_entry_t; +#define VM_OBJECT_HASH_ENTRY_NULL ((vm_object_hash_entry_t) 0) + +#define VM_OBJECT_HASH_SHIFT 8 +#define vm_object_hash(pager) \ + ((((unsigned)pager) >> VM_OBJECT_HASH_SHIFT) % VM_OBJECT_HASH_COUNT) + +/* + * vm_object_hash_lookup looks up a pager in the hashtable + * and returns the corresponding entry, with optional removal. + */ + +vm_object_hash_entry_t +vm_object_hash_lookup( + ipc_port_t pager, + boolean_t remove_entry) +{ + register queue_t bucket; + register vm_object_hash_entry_t entry; + + bucket = &vm_object_hashtable[vm_object_hash(pager)]; + + entry = (vm_object_hash_entry_t)queue_first(bucket); + while (!queue_end(bucket, (queue_entry_t)entry)) { + if (entry->pager == pager && !remove_entry) + return(entry); + else if (entry->pager == pager) { + queue_remove(bucket, entry, + vm_object_hash_entry_t, hash_link); + return(entry); + } + + entry = (vm_object_hash_entry_t)queue_next(&entry->hash_link); + } + + return(VM_OBJECT_HASH_ENTRY_NULL); +} + +/* + * vm_object_hash_enter enters the specified + * pager / cache object association in the hashtable. + */ + +void +vm_object_hash_insert( + vm_object_hash_entry_t entry) +{ + register queue_t bucket; + + bucket = &vm_object_hashtable[vm_object_hash(entry->pager)]; + + queue_enter(bucket, entry, vm_object_hash_entry_t, hash_link); +} + +vm_object_hash_entry_t +vm_object_hash_entry_alloc( + ipc_port_t pager) +{ + vm_object_hash_entry_t entry; + + entry = (vm_object_hash_entry_t)zalloc(vm_object_hash_zone); + entry->pager = pager; + entry->object = VM_OBJECT_NULL; + entry->waiting = FALSE; + + return(entry); +} + +void +vm_object_hash_entry_free( + vm_object_hash_entry_t entry) +{ + zfree(vm_object_hash_zone, (vm_offset_t)entry); +} + +/* + * vm_object_allocate: + * + * Returns a new object with the given size. + */ + +void +_vm_object_allocate( + vm_object_size_t size, + vm_object_t object) +{ + XPR(XPR_VM_OBJECT, + "vm_object_allocate, object 0x%X size 0x%X\n", + (integer_t)object, size, 0,0,0); + + *object = vm_object_template; + queue_init(&object->memq); + queue_init(&object->msr_q); +#ifdef UBC_DEBUG + queue_init(&object->uplq); +#endif /* UBC_DEBUG */ + vm_object_lock_init(object); + object->size = size; +} + +vm_object_t +vm_object_allocate( + vm_object_size_t size) +{ + register vm_object_t object; + register ipc_port_t port; + + object = (vm_object_t) zalloc(vm_object_zone); + +// dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */ + + _vm_object_allocate(size, object); + + return object; +} + +/* + * vm_object_bootstrap: + * + * Initialize the VM objects module. + */ +void +vm_object_bootstrap(void) +{ + register i; + + vm_object_zone = zinit((vm_size_t) sizeof(struct vm_object), + round_page(512*1024), + round_page(12*1024), + "vm objects"); + + queue_init(&vm_object_cached_list); + mutex_init(&vm_object_cached_lock_data, ETAP_VM_OBJ_CACHE); + + vm_object_hash_zone = + zinit((vm_size_t) sizeof (struct vm_object_hash_entry), + round_page(512*1024), + round_page(12*1024), + "vm object hash entries"); + + for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) + queue_init(&vm_object_hashtable[i]); + + /* + * Fill in a template object, for quick initialization + */ + + /* memq; Lock; init after allocation */ + vm_object_template.size = 0; + vm_object_template.frozen_size = 0; + vm_object_template.ref_count = 1; +#if TASK_SWAPPER + vm_object_template.res_count = 1; +#endif /* TASK_SWAPPER */ + vm_object_template.resident_page_count = 0; + vm_object_template.copy = VM_OBJECT_NULL; + vm_object_template.shadow = VM_OBJECT_NULL; + vm_object_template.shadow_offset = (vm_object_offset_t) 0; + vm_object_template.true_share = FALSE; + + vm_object_template.pager = IP_NULL; + vm_object_template.paging_offset = 0; + vm_object_template.pager_request = PAGER_REQUEST_NULL; + /* msr_q; init after allocation */ + + vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC; + vm_object_template.absent_count = 0; + vm_object_template.paging_in_progress = 0; + + /* Begin bitfields */ + vm_object_template.all_wanted = 0; /* all bits FALSE */ + vm_object_template.pager_created = FALSE; + vm_object_template.pager_initialized = FALSE; + vm_object_template.pager_ready = FALSE; + vm_object_template.pager_trusted = FALSE; + vm_object_template.can_persist = FALSE; + vm_object_template.internal = TRUE; + vm_object_template.temporary = TRUE; + vm_object_template.private = FALSE; + vm_object_template.pageout = FALSE; + vm_object_template.alive = TRUE; + vm_object_template.lock_in_progress = FALSE; + vm_object_template.lock_restart = FALSE; + vm_object_template.silent_overwrite = FALSE; + vm_object_template.advisory_pageout = FALSE; + vm_object_template.shadowed = FALSE; + vm_object_template.terminating = FALSE; + vm_object_template.shadow_severed = FALSE; + vm_object_template.phys_contiguous = FALSE; + /* End bitfields */ + + /* cached_list; init after allocation */ + vm_object_template.last_alloc = (vm_object_offset_t) 0; + vm_object_template.cluster_size = 0; +#if MACH_PAGEMAP + vm_object_template.existence_map = VM_EXTERNAL_NULL; +#endif /* MACH_PAGEMAP */ +#if MACH_ASSERT + vm_object_template.paging_object = VM_OBJECT_NULL; +#endif /* MACH_ASSERT */ + + /* + * Initialize the "kernel object" + */ + + kernel_object = &kernel_object_store; + +/* + * Note that in the following size specifications, we need to add 1 because + * VM_MAX_KERNEL_ADDRESS is a maximum address, not a size. + */ + _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1, + kernel_object); + + /* + * Initialize the "submap object". Make it as large as the + * kernel object so that no limit is imposed on submap sizes. + */ + + vm_submap_object = &vm_submap_object_store; + _vm_object_allocate((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) + 1, + vm_submap_object); + /* + * Create an "extra" reference to this object so that we never + * try to deallocate it; zfree doesn't like to be called with + * non-zone memory. + */ + vm_object_reference(vm_submap_object); + +#if MACH_PAGEMAP + vm_external_module_initialize(); +#endif /* MACH_PAGEMAP */ +} + +void +vm_object_init(void) +{ + /* + * Finish initializing the kernel object. + */ +} + +#if TASK_SWAPPER +/* + * vm_object_res_deallocate + * + * (recursively) decrement residence counts on vm objects and their shadows. + * Called from vm_object_deallocate and when swapping out an object. + * + * The object is locked, and remains locked throughout the function, + * even as we iterate down the shadow chain. Locks on intermediate objects + * will be dropped, but not the original object. + * + * NOTE: this function used to use recursion, rather than iteration. + */ + +void +vm_object_res_deallocate( + vm_object_t object) +{ + vm_object_t orig_object = object; + /* + * Object is locked so it can be called directly + * from vm_object_deallocate. Original object is never + * unlocked. + */ + assert(object->res_count > 0); + while (--object->res_count == 0) { + assert(object->ref_count >= object->res_count); + vm_object_deactivate_pages(object); + /* iterate on shadow, if present */ + if (object->shadow != VM_OBJECT_NULL) { + vm_object_t tmp_object = object->shadow; + vm_object_lock(tmp_object); + if (object != orig_object) + vm_object_unlock(object); + object = tmp_object; + assert(object->res_count > 0); + } else + break; + } + if (object != orig_object) + vm_object_unlock(object); +} + +/* + * vm_object_res_reference + * + * Internal function to increment residence count on a vm object + * and its shadows. It is called only from vm_object_reference, and + * when swapping in a vm object, via vm_map_swap. + * + * The object is locked, and remains locked throughout the function, + * even as we iterate down the shadow chain. Locks on intermediate objects + * will be dropped, but not the original object. + * + * NOTE: this function used to use recursion, rather than iteration. + */ + +void +vm_object_res_reference( + vm_object_t object) +{ + vm_object_t orig_object = object; + /* + * Object is locked, so this can be called directly + * from vm_object_reference. This lock is never released. + */ + while ((++object->res_count == 1) && + (object->shadow != VM_OBJECT_NULL)) { + vm_object_t tmp_object = object->shadow; + + assert(object->ref_count >= object->res_count); + vm_object_lock(tmp_object); + if (object != orig_object) + vm_object_unlock(object); + object = tmp_object; + } + if (object != orig_object) + vm_object_unlock(object); + assert(orig_object->ref_count >= orig_object->res_count); +} +#endif /* TASK_SWAPPER */ + +#if MACH_ASSERT +/* + * vm_object_reference: + * + * Gets another reference to the given object. + */ +void +vm_object_reference( + register vm_object_t object) +{ + if (object == VM_OBJECT_NULL) + return; + + vm_object_lock(object); + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_object_unlock(object); +} +#endif /* MACH_ASSERT */ + +/* remove the typedef below when emergency work-around is taken out */ +typedef struct vnode_pager { + ipc_port_t pager; /* pager */ + ipc_port_t pager_handle; /* pager handle */ + ipc_port_t vm_obj_handle; /* memory object's control handle */ + void *vnode_handle; /* vnode handle */ +} *vnode_pager_t; + +#define MIGHT_NOT_CACHE_SHADOWS 1 +#if MIGHT_NOT_CACHE_SHADOWS +int cache_shadows = TRUE; +#endif /* MIGHT_NOT_CACHE_SHADOWS */ + +/* + * vm_object_deallocate: + * + * Release a reference to the specified object, + * gained either through a vm_object_allocate + * or a vm_object_reference call. When all references + * are gone, storage associated with this object + * may be relinquished. + * + * No object may be locked. + */ +void +vm_object_deallocate( + register vm_object_t object) +{ + boolean_t retry_cache_trim = FALSE; + vm_object_t shadow; + +// if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */ +// else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */ + + + while (object != VM_OBJECT_NULL) { + + /* + * The cache holds a reference (uncounted) to + * the object; we must lock it before removing + * the object. + */ + + vm_object_cache_lock(); + vm_object_lock(object); + assert(object->alive); + + /* + * Lose the reference. If other references + * remain, then we are done, unless we need + * to retry a cache trim. + * If it is the last reference, then keep it + * until any pending initialization is completed. + */ + + assert(object->ref_count > 0); + if ((object->ref_count > 1) || (object->terminating)) { + /* if the object is terminating, it cannot go into */ + /* the cache and we obviously should not call */ + /* terminate again. */ + object->ref_count--; + { + /* The following is an emergency work-around for */ + /* no-mappings left notification to UBC. This fix */ + /* violates numerous layering boundaries, is not */ + /* provable with respect to races for new mappings */ + /* from the UBC layer and is just plain ugly. The */ + /* proper fix requires a guarantee of state */ + /* between the vnode and the memory object and a */ + /* sequenced delivery of empty status. This can */ + /* be provided by the object_named interface and */ + /* the effort to convert over should be undertaken */ + /* at the earliest possible moment. */ + if(object->ref_count == 1) { + vnode_pager_t vnode_pager; + if(object->pager) { + vnode_pager = (vnode_pager_t) + vnode_port_hash_lookup( + object->pager); + if(vnode_pager) { + extern void ubc_unmap(void *); + ubc_unmap(vnode_pager->vnode_handle); + } + } + } + } + vm_object_res_deallocate(object); + vm_object_unlock(object); + vm_object_cache_unlock(); + if (retry_cache_trim && + ((object = vm_object_cache_trim(TRUE)) != + VM_OBJECT_NULL)) { + continue; + } + return; + } + + /* + * We have to wait for initialization + * before destroying or caching the object. + */ + + if (object->pager_created && ! object->pager_initialized) { + assert(! object->can_persist); + vm_object_assert_wait(object, + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); + vm_object_unlock(object); + vm_object_cache_unlock(); + thread_block((void (*)(void))0); + continue; + } + + /* + * If this object can persist, then enter it in + * the cache. Otherwise, terminate it. + * + * NOTE: Only permanent objects are cached, and + * permanent objects cannot have shadows. This + * affects the residence counting logic in a minor + * way (can do it in-line, mostly). + */ + + if (object->can_persist) { + /* + * Now it is safe to decrement reference count, + * and to return if reference count is > 0. + */ + if (--object->ref_count > 0) { + vm_object_res_deallocate(object); + vm_object_unlock(object); + vm_object_cache_unlock(); + if (retry_cache_trim && + ((object = vm_object_cache_trim(TRUE)) != + VM_OBJECT_NULL)) { + continue; + } + return; + } + +#if MIGHT_NOT_CACHE_SHADOWS + /* + * Remove shadow now if we don't + * want to cache shadows. + */ + if (! cache_shadows) { + shadow = object->shadow; + object->shadow = VM_OBJECT_NULL; + } +#endif /* MIGHT_NOT_CACHE_SHADOWS */ + + /* + * Enter the object onto the queue of + * cached objects, and deactivate + * all of its pages. + */ + assert(object->shadow == VM_OBJECT_NULL); + VM_OBJ_RES_DECR(object); + XPR(XPR_VM_OBJECT, + "vm_o_deallocate: adding %x to cache, queue = (%x, %x)\n", + (integer_t)object, + (integer_t)vm_object_cached_list.next, + (integer_t)vm_object_cached_list.prev,0,0); + + vm_object_cached_count++; + if (vm_object_cached_count > vm_object_cached_high) + vm_object_cached_high = vm_object_cached_count; + queue_enter(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cache_unlock(); + vm_object_deactivate_pages(object); + vm_object_unlock(object); + +#if MIGHT_NOT_CACHE_SHADOWS + /* + * If we have a shadow that we need + * to deallocate, do so now, remembering + * to trim the cache later. + */ + if (! cache_shadows && shadow != VM_OBJECT_NULL) { + object = shadow; + retry_cache_trim = TRUE; + continue; + } +#endif /* MIGHT_NOT_CACHE_SHADOWS */ + + /* + * Trim the cache. If the cache trim + * returns with a shadow for us to deallocate, + * then remember to retry the cache trim + * when we are done deallocating the shadow. + * Otherwise, we are done. + */ + + object = vm_object_cache_trim(TRUE); + if (object == VM_OBJECT_NULL) { + return; + } + retry_cache_trim = TRUE; + + } else { + /* + * This object is not cachable; terminate it. + */ + XPR(XPR_VM_OBJECT, + "vm_o_deallocate: !cacheable 0x%X res %d paging_ops %d thread 0x%lX ref %d\n", + (integer_t)object, object->resident_page_count, + object->paging_in_progress, + (natural_t)current_thread(),object->ref_count); + + VM_OBJ_RES_DECR(object); /* XXX ? */ + /* + * Terminate this object. If it had a shadow, + * then deallocate it; otherwise, if we need + * to retry a cache trim, do so now; otherwise, + * we are done. "pageout" objects have a shadow, + * but maintain a "paging reference" rather than + * a normal reference. + */ + shadow = object->pageout?VM_OBJECT_NULL:object->shadow; + if(vm_object_terminate(object) != KERN_SUCCESS) { + return; + } + if (shadow != VM_OBJECT_NULL) { + object = shadow; + continue; + } + if (retry_cache_trim && + ((object = vm_object_cache_trim(TRUE)) != + VM_OBJECT_NULL)) { + continue; + } + return; + } + } + assert(! retry_cache_trim); +} + +/* + * Check to see whether we really need to trim + * down the cache. If so, remove an object from + * the cache, terminate it, and repeat. + * + * Called with, and returns with, cache lock unlocked. + */ +vm_object_t +vm_object_cache_trim( + boolean_t called_from_vm_object_deallocate) +{ + register vm_object_t object = VM_OBJECT_NULL; + vm_object_t shadow; + + for (;;) { + + /* + * If we no longer need to trim the cache, + * then we are done. + */ + + vm_object_cache_lock(); + if (vm_object_cached_count <= vm_object_cached_max) { + vm_object_cache_unlock(); + return VM_OBJECT_NULL; + } + + /* + * We must trim down the cache, so remove + * the first object in the cache. + */ + XPR(XPR_VM_OBJECT, + "vm_object_cache_trim: removing from front of cache (%x, %x)\n", + (integer_t)vm_object_cached_list.next, + (integer_t)vm_object_cached_list.prev, 0, 0, 0); + + object = (vm_object_t) queue_first(&vm_object_cached_list); + vm_object_lock(object); + queue_remove(&vm_object_cached_list, object, vm_object_t, + cached_list); + vm_object_cached_count--; + + /* + * Since this object is in the cache, we know + * that it is initialized and has no references. + * Take a reference to avoid recursive deallocations. + */ + + assert(object->pager_initialized); + assert(object->ref_count == 0); + object->ref_count++; + + /* + * Terminate the object. + * If the object had a shadow, we let vm_object_deallocate + * deallocate it. "pageout" objects have a shadow, but + * maintain a "paging reference" rather than a normal + * reference. + * (We are careful here to limit recursion.) + */ + shadow = object->pageout?VM_OBJECT_NULL:object->shadow; + if(vm_object_terminate(object) != KERN_SUCCESS) + continue; + if (shadow != VM_OBJECT_NULL) { + if (called_from_vm_object_deallocate) { + return shadow; + } else { + vm_object_deallocate(shadow); + } + } + } +} + +boolean_t vm_object_terminate_remove_all = FALSE; + +/* + * Routine: vm_object_terminate + * Purpose: + * Free all resources associated with a vm_object. + * In/out conditions: + * Upon entry, the object and the cache must be locked, + * and the object must have exactly one reference. + * + * The shadow object reference is left alone. + * + * The object must be unlocked if its found that pages + * must be flushed to a backing object. If someone + * manages to map the object while it is being flushed + * the object is returned unlocked and unchanged. Otherwise, + * upon exit, the cache will be unlocked, and the + * object will cease to exist. + */ +kern_return_t +vm_object_terminate( + register vm_object_t object) +{ + register vm_page_t p; + vm_object_t shadow_object; + + XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n", + (integer_t)object, object->ref_count, 0, 0, 0); + + /* + * Make sure the object isn't already being terminated + */ + + assert(object->alive); + if(object->terminating) { + vm_object_cache_unlock(); + object->ref_count -= 1; + vm_object_unlock(object); + return KERN_FAILURE; + } + object->terminating = TRUE; + + vm_object_cache_unlock(); + if (!object->pageout && (!object->temporary || object->can_persist) + && (object->pager != NULL || object->shadow_severed)) { + while (!queue_empty(&object->memq)) { + /* + * Clear pager_trusted bit so that the pages get yanked + * out of the object instead of cleaned in place. This + * prevents a deadlock in XMM and makes more sense anyway. + */ + object->pager_trusted = FALSE; + + p = (vm_page_t) queue_first(&object->memq); + + VM_PAGE_CHECK(p); + + if (p->busy || p->cleaning) { + if(p->cleaning || p->absent) { + vm_object_paging_wait(object, THREAD_UNINT); + continue; + } else { + panic("vm_object_terminate.3 0x%x 0x%x", object, p); + } + } + + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(p); + vm_page_unlock_queues(); + + if (p->absent || p->private) { + + /* + * For private pages, VM_PAGE_FREE just + * leaves the page structure around for + * its owner to clean up. For absent + * pages, the structure is returned to + * the appropriate pool. + */ + + goto free_page; + } + + if (p->fictitious) + panic("vm_object_terminate.4 0x%x 0x%x", object, p); + + if (!p->dirty) + p->dirty = pmap_is_modified(p->phys_addr); + + if (p->dirty || p->precious) { + p->busy = TRUE; + vm_object_paging_begin(object); + /* protect the object from re-use/caching while it */ + /* is unlocked */ + vm_object_unlock(object); + vm_pageout_cluster(p); /* flush page */ + vm_object_lock(object); + vm_object_paging_wait(object, THREAD_UNINT); + XPR(XPR_VM_OBJECT, + "vm_object_terminate restart, object 0x%X ref %d\n", + (integer_t)object, object->ref_count, 0, 0, 0); + } else { + free_page: + VM_PAGE_FREE(p); + } + } + } + if(object->ref_count != 1) { + object->ref_count -= 1; + vm_object_res_deallocate(object); + object->terminating = FALSE; + /* kick off anyone waiting on terminating */ + vm_object_paging_begin(object); + vm_object_paging_end(object); + vm_object_unlock(object); + return KERN_FAILURE; + } + + object->alive = FALSE; + + /* + * Make sure no one can look us up now. + */ + + vm_object_cache_lock(); + + if(object->pager != IP_NULL) { + vm_object_hash_entry_t entry; + + entry = vm_object_hash_lookup(object->pager, FALSE); + if (entry != VM_OBJECT_HASH_ENTRY_NULL) + entry->object = VM_OBJECT_NULL; + } + + vm_object_cache_unlock(); + + /* + * Detach the object from its shadow if we are the shadow's + * copy. + */ + if (((shadow_object = object->shadow) != VM_OBJECT_NULL) && + !(object->pageout)) { + vm_object_lock(shadow_object); + assert((shadow_object->copy == object) || + (shadow_object->copy == VM_OBJECT_NULL)); + shadow_object->copy = VM_OBJECT_NULL; + vm_object_unlock(shadow_object); + } + + /* + * The pageout daemon might be playing with our pages. + * Now that the object is dead, it won't touch any more + * pages, but some pages might already be on their way out. + * Hence, we wait until the active paging activities have ceased. + */ + vm_object_paging_wait(object, THREAD_UNINT); + object->ref_count--; +#if TASK_SWAPPER + assert(object->res_count == 0); +#endif /* TASK_SWAPPER */ + +Restart: + assert (object->ref_count == 0); + + /* + * Clean or free the pages, as appropriate. + * It is possible for us to find busy/absent pages, + * if some faults on this object were aborted. + */ + if (object->pageout) { + assert(shadow_object != VM_OBJECT_NULL); + assert(shadow_object == object->shadow); + + vm_pageout_object_terminate(object); + + } else if (object->temporary && ! object->can_persist || + object->pager == IP_NULL) { + while (!queue_empty(&object->memq)) { + p = (vm_page_t) queue_first(&object->memq); + + VM_PAGE_CHECK(p); + VM_PAGE_FREE(p); + } + } else if (!queue_empty(&object->memq)) { + panic("vm_object_terminate: queue just emptied isn't"); + } + + assert(object->paging_in_progress == 0); + assert(object->ref_count == 0); + + vm_object_remove(object); + + /* + * Throw away port rights... note that they may + * already have been thrown away (by vm_object_destroy + * or memory_object_destroy). + * + * Instead of destroying the control port, + * we send all rights off to the memory manager, + * using memory_object_terminate. + */ + + vm_object_unlock(object); + if (object->pager != IP_NULL) { + /* consumes our rights for pager, pager_request */ + memory_object_release(object->pager, object->pager_request); + } + /* kick off anyone waiting on terminating */ + vm_object_lock(object); + vm_object_paging_begin(object); + vm_object_paging_end(object); + vm_object_unlock(object); + +#if MACH_PAGEMAP + vm_external_destroy(object->existence_map, object->size); +#endif /* MACH_PAGEMAP */ + + /* + * Free the space for the object. + */ + + zfree(vm_object_zone, (vm_offset_t) object); + return KERN_SUCCESS; +} + +/* + * Routine: vm_object_pager_wakeup + * Purpose: Wake up anyone waiting for termination of a pager. + */ + +void +vm_object_pager_wakeup( + ipc_port_t pager) +{ + vm_object_hash_entry_t entry; + boolean_t waiting = FALSE; + + /* + * If anyone was waiting for the memory_object_terminate + * to be queued, wake them up now. + */ + vm_object_cache_lock(); + entry = vm_object_hash_lookup(pager, TRUE); + if (entry != VM_OBJECT_HASH_ENTRY_NULL) + waiting = entry->waiting; + vm_object_cache_unlock(); + if (entry != VM_OBJECT_HASH_ENTRY_NULL) { + if (waiting) + thread_wakeup((event_t) pager); + vm_object_hash_entry_free(entry); + } +} + +/* + * memory_object_release_name: + * Enforces name semantic on memory_object reference count decrement + * This routine should not be called unless the caller holds a name + * reference gained through the memory_object_named_create or the + * memory_object_rename call. + * If the TERMINATE_IDLE flag is set, the call will return if the + * reference count is not 1. i.e. idle with the only remaining reference + * being the name. + * If the decision is made to proceed the name field flag is set to + * false and the reference count is decremented. If the RESPECT_CACHE + * flag is set and the reference count has gone to zero, the + * memory_object is checked to see if it is cacheable otherwise when + * the reference count is zero, it is simply terminated. + */ + +kern_return_t +memory_object_release_name( + vm_object_t object, + int flags) +{ + vm_object_t shadow; + boolean_t original_object = TRUE; + + while (object != VM_OBJECT_NULL) { + + /* + * The cache holds a reference (uncounted) to + * the object. We must locke it before removing + * the object. + * + */ + + vm_object_cache_lock(); + vm_object_lock(object); + assert(object->alive); + if(original_object) + assert(object->named); + assert(object->ref_count > 0); + + /* + * We have to wait for initialization before + * destroying or caching the object. + */ + + if (object->pager_created && !object->pager_initialized) { + assert(!object->can_persist); + vm_object_assert_wait(object, + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); + vm_object_unlock(object); + vm_object_cache_unlock(); + thread_block((void (*)(void)) 0); + continue; + } + + if (((object->ref_count > 1) + && (flags & MEMORY_OBJECT_TERMINATE_IDLE)) + || (object->terminating)) { + vm_object_unlock(object); + vm_object_cache_unlock(); + return KERN_FAILURE; + } else { + if (flags & MEMORY_OBJECT_RELEASE_NO_OP) { + vm_object_unlock(object); + vm_object_cache_unlock(); + return KERN_SUCCESS; + } + } + + if ((flags & MEMORY_OBJECT_RESPECT_CACHE) && + (object->ref_count == 1)) { + if(original_object) + object->named = FALSE; + vm_object_unlock(object); + vm_object_cache_unlock(); + /* let vm_object_deallocate push this thing into */ + /* the cache, if that it is where it is bound */ + vm_object_deallocate(object); + return KERN_SUCCESS; + } + VM_OBJ_RES_DECR(object); + shadow = object->pageout?VM_OBJECT_NULL:object->shadow; + if(object->ref_count == 1) { + if(vm_object_terminate(object) != KERN_SUCCESS) { + if(original_object) { + return KERN_FAILURE; + } else { + return KERN_SUCCESS; + } + } + if (shadow != VM_OBJECT_NULL) { + original_object = FALSE; + object = shadow; + continue; + } + return KERN_SUCCESS; + } else { + object->ref_count--; + if(original_object) + object->named = FALSE; + vm_object_unlock(object); + vm_object_cache_unlock(); + return KERN_SUCCESS; + } + } +} + +/* + * Routine: memory_object_release + * Purpose: Terminate the pager and release port rights, + * just like memory_object_terminate, except + * that we wake up anyone blocked in vm_object_enter + * waiting for termination message to be queued + * before calling memory_object_init. + */ +void +memory_object_release( + ipc_port_t pager, + pager_request_t pager_request) +{ +#ifdef MACH_BSD + kern_return_t vnode_pager_terminate(ipc_port_t, ipc_port_t); +#endif + + /* + * Keep a reference to pager port; + * the terminate might otherwise release all references. + */ + ipc_port_copy_send(pager); + + /* + * Terminate the pager. + */ + +#ifdef MACH_BSD + if(((rpc_subsystem_t)pager_mux_hash_lookup(pager)) == + ((rpc_subsystem_t) &vnode_pager_workaround)) { + (void) vnode_pager_terminate(pager, pager_request); + } else { + (void) memory_object_terminate(pager, pager_request); + } +#else + (void) memory_object_terminate(pager, pager_request); +#endif + + /* + * Wakeup anyone waiting for this terminate + */ + vm_object_pager_wakeup(pager); + + /* + * Release reference to pager port. + */ + ipc_port_release_send(pager); +} + +/* + * Routine: vm_object_abort_activity [internal use only] + * Purpose: + * Abort paging requests pending on this object. + * In/out conditions: + * The object is locked on entry and exit. + */ +void +vm_object_abort_activity( + vm_object_t object) +{ + register + vm_page_t p; + vm_page_t next; + + XPR(XPR_VM_OBJECT, "vm_object_abort_activity, object 0x%X\n", + (integer_t)object, 0, 0, 0, 0); + + /* + * Abort all activity that would be waiting + * for a result on this memory object. + * + * We could also choose to destroy all pages + * that we have in memory for this object, but + * we don't. + */ + + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + next = (vm_page_t) queue_next(&p->listq); + + /* + * If it's being paged in, destroy it. + * If an unlock has been requested, start it again. + */ + + if (p->busy && p->absent) { + VM_PAGE_FREE(p); + } + else { + if (p->unlock_request != VM_PROT_NONE) + p->unlock_request = VM_PROT_NONE; + PAGE_WAKEUP(p); + } + + p = next; + } + + /* + * Wake up threads waiting for the memory object to + * become ready. + */ + + object->pager_ready = TRUE; + vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); +} + +/* + * Routine: memory_object_destroy [user interface] + * Purpose: + * Shut down a memory object, despite the + * presence of address map (or other) references + * to the vm_object. + */ +kern_return_t +memory_object_destroy( + register vm_object_t object, + kern_return_t reason) +{ + ipc_port_t old_object; + pager_request_t old_pager_request; + +#ifdef lint + reason++; +#endif /* lint */ + + if (object == VM_OBJECT_NULL) + return(KERN_SUCCESS); + + /* + * Remove the port associations immediately. + * + * This will prevent the memory manager from further + * meddling. [If it wanted to flush data or make + * other changes, it should have done so before performing + * the destroy call.] + */ + + vm_object_cache_lock(); + vm_object_lock(object); + vm_object_remove(object); + object->can_persist = FALSE; + object->named = FALSE; + vm_object_cache_unlock(); + + /* + * Rip out the ports from the vm_object now... this + * will prevent new memory_object calls from succeeding. + */ + + old_object = object->pager; + old_pager_request = object->pager_request; + + object->pager = IP_NULL; + object->pager_request = PAGER_REQUEST_NULL; + + /* + * Wait for existing paging activity (that might + * have the old ports) to subside. + */ + + vm_object_paging_wait(object, THREAD_UNINT); + vm_object_unlock(object); + + /* + * Shut down the ports now. + * + * [Paging operations may be proceeding concurrently -- + * they'll get the null values established above.] + */ + + if (old_object != IP_NULL) { + /* consumes our rights for object, control */ + memory_object_release(old_object, old_pager_request); + } + + /* + * Lose the reference that was donated for this routine + */ + + vm_object_deallocate(object); + + return(KERN_SUCCESS); +} + +/* + * vm_object_deactivate_pages + * + * Deactivate all pages in the specified object. (Keep its pages + * in memory even though it is no longer referenced.) + * + * The object must be locked. + */ +void +vm_object_deactivate_pages( + register vm_object_t object) +{ + register vm_page_t p; + + queue_iterate(&object->memq, p, vm_page_t, listq) { + vm_page_lock_queues(); + if (!p->busy) + vm_page_deactivate(p); + vm_page_unlock_queues(); + } +} + + +/* + * Routine: vm_object_pmap_protect + * + * Purpose: + * Reduces the permission for all physical + * pages in the specified object range. + * + * If removing write permission only, it is + * sufficient to protect only the pages in + * the top-level object; only those pages may + * have write permission. + * + * If removing all access, we must follow the + * shadow chain from the top-level object to + * remove access to all pages in shadowed objects. + * + * The object must *not* be locked. The object must + * be temporary/internal. + * + * If pmap is not NULL, this routine assumes that + * the only mappings for the pages are in that + * pmap. + */ + +void +vm_object_pmap_protect( + register vm_object_t object, + register vm_object_offset_t offset, + vm_size_t size, + pmap_t pmap, + vm_offset_t pmap_start, + vm_prot_t prot) +{ + if (object == VM_OBJECT_NULL) + return; + + vm_object_lock(object); + + assert(object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC); + + while (TRUE) { + if (object->resident_page_count > atop(size) / 2 && + pmap != PMAP_NULL) { + vm_object_unlock(object); + pmap_protect(pmap, pmap_start, pmap_start + size, prot); + return; + } + + { + register vm_page_t p; + register vm_object_offset_t end; + + end = offset + size; + + if (pmap != PMAP_NULL) { + queue_iterate(&object->memq, p, vm_page_t, listq) { + if (!p->fictitious && + (offset <= p->offset) && (p->offset < end)) { + + vm_offset_t start = pmap_start + + (vm_offset_t)(p->offset - offset); + + pmap_protect(pmap, start, start + PAGE_SIZE, prot); + } + } + } else { + queue_iterate(&object->memq, p, vm_page_t, listq) { + if (!p->fictitious && + (offset <= p->offset) && (p->offset < end)) { + + pmap_page_protect(p->phys_addr, + prot & ~p->page_lock); + } + } + } + } + + if (prot == VM_PROT_NONE) { + /* + * Must follow shadow chain to remove access + * to pages in shadowed objects. + */ + register vm_object_t next_object; + + next_object = object->shadow; + if (next_object != VM_OBJECT_NULL) { + offset += object->shadow_offset; + vm_object_lock(next_object); + vm_object_unlock(object); + object = next_object; + } + else { + /* + * End of chain - we are done. + */ + break; + } + } + else { + /* + * Pages in shadowed objects may never have + * write permission - we may stop here. + */ + break; + } + } + + vm_object_unlock(object); +} + +/* + * Routine: vm_object_copy_slowly + * + * Description: + * Copy the specified range of the source + * virtual memory object without using + * protection-based optimizations (such + * as copy-on-write). The pages in the + * region are actually copied. + * + * In/out conditions: + * The caller must hold a reference and a lock + * for the source virtual memory object. The source + * object will be returned *unlocked*. + * + * Results: + * If the copy is completed successfully, KERN_SUCCESS is + * returned. If the caller asserted the interruptible + * argument, and an interruption occurred while waiting + * for a user-generated event, MACH_SEND_INTERRUPTED is + * returned. Other values may be returned to indicate + * hard errors during the copy operation. + * + * A new virtual memory object is returned in a + * parameter (_result_object). The contents of this + * new object, starting at a zero offset, are a copy + * of the source memory region. In the event of + * an error, this parameter will contain the value + * VM_OBJECT_NULL. + */ +kern_return_t +vm_object_copy_slowly( + register vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + boolean_t interruptible, + vm_object_t *_result_object) /* OUT */ +{ + vm_object_t new_object; + vm_object_offset_t new_offset; + + vm_object_offset_t src_lo_offset = src_offset; + vm_object_offset_t src_hi_offset = src_offset + size; + + XPR(XPR_VM_OBJECT, "v_o_c_slowly obj 0x%x off 0x%x size 0x%x\n", + src_object, src_offset, size, 0, 0); + + if (size == 0) { + vm_object_unlock(src_object); + *_result_object = VM_OBJECT_NULL; + return(KERN_INVALID_ARGUMENT); + } + + /* + * Prevent destruction of the source object while we copy. + */ + + assert(src_object->ref_count > 0); + src_object->ref_count++; + VM_OBJ_RES_INCR(src_object); + vm_object_unlock(src_object); + + /* + * Create a new object to hold the copied pages. + * A few notes: + * We fill the new object starting at offset 0, + * regardless of the input offset. + * We don't bother to lock the new object within + * this routine, since we have the only reference. + */ + + new_object = vm_object_allocate(size); + new_offset = 0; + + assert(size == trunc_page_64(size)); /* Will the loop terminate? */ + + for ( ; + size != 0 ; + src_offset += PAGE_SIZE_64, + new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64 + ) { + vm_page_t new_page; + vm_fault_return_t result; + + while ((new_page = vm_page_alloc(new_object, new_offset)) + == VM_PAGE_NULL) { + if (!vm_page_wait(interruptible)) { + vm_object_deallocate(new_object); + *_result_object = VM_OBJECT_NULL; + return(MACH_SEND_INTERRUPTED); + } + } + + do { + vm_prot_t prot = VM_PROT_READ; + vm_page_t _result_page; + vm_page_t top_page; + register + vm_page_t result_page; + kern_return_t error_code; + + vm_object_lock(src_object); + vm_object_paging_begin(src_object); + + XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0); + result = vm_fault_page(src_object, src_offset, + VM_PROT_READ, FALSE, interruptible, + src_lo_offset, src_hi_offset, + VM_BEHAVIOR_SEQUENTIAL, + &prot, &_result_page, &top_page, + (int *)0, + &error_code, FALSE, FALSE); + + switch(result) { + case VM_FAULT_SUCCESS: + result_page = _result_page; + + /* + * We don't need to hold the object + * lock -- the busy page will be enough. + * [We don't care about picking up any + * new modifications.] + * + * Copy the page to the new object. + * + * POLICY DECISION: + * If result_page is clean, + * we could steal it instead + * of copying. + */ + + vm_object_unlock(result_page->object); + vm_page_copy(result_page, new_page); + + /* + * Let go of both pages (make them + * not busy, perform wakeup, activate). + */ + + new_page->busy = FALSE; + new_page->dirty = TRUE; + vm_object_lock(result_page->object); + PAGE_WAKEUP_DONE(result_page); + + vm_page_lock_queues(); + if (!result_page->active && + !result_page->inactive) + vm_page_activate(result_page); + vm_page_activate(new_page); + vm_page_unlock_queues(); + + /* + * Release paging references and + * top-level placeholder page, if any. + */ + + vm_fault_cleanup(result_page->object, + top_page); + + break; + + case VM_FAULT_RETRY: + break; + + case VM_FAULT_FICTITIOUS_SHORTAGE: + vm_page_more_fictitious(); + break; + + case VM_FAULT_MEMORY_SHORTAGE: + if (vm_page_wait(interruptible)) + break; + /* fall thru */ + + case VM_FAULT_INTERRUPTED: + vm_page_free(new_page); + vm_object_deallocate(new_object); + vm_object_deallocate(src_object); + *_result_object = VM_OBJECT_NULL; + return(MACH_SEND_INTERRUPTED); + + case VM_FAULT_MEMORY_ERROR: + /* + * A policy choice: + * (a) ignore pages that we can't + * copy + * (b) return the null object if + * any page fails [chosen] + */ + + vm_page_lock_queues(); + vm_page_free(new_page); + vm_page_unlock_queues(); + vm_object_deallocate(new_object); + vm_object_deallocate(src_object); + *_result_object = VM_OBJECT_NULL; + return(error_code ? error_code: + KERN_MEMORY_ERROR); + } + } while (result != VM_FAULT_SUCCESS); + } + + /* + * Lose the extra reference, and return our object. + */ + + vm_object_deallocate(src_object); + *_result_object = new_object; + return(KERN_SUCCESS); +} + +/* + * Routine: vm_object_copy_quickly + * + * Purpose: + * Copy the specified range of the source virtual + * memory object, if it can be done without waiting + * for user-generated events. + * + * Results: + * If the copy is successful, the copy is returned in + * the arguments; otherwise, the arguments are not + * affected. + * + * In/out conditions: + * The object should be unlocked on entry and exit. + */ + +/*ARGSUSED*/ +boolean_t +vm_object_copy_quickly( + vm_object_t *_object, /* INOUT */ + vm_object_offset_t offset, /* IN */ + vm_object_size_t size, /* IN */ + boolean_t *_src_needs_copy, /* OUT */ + boolean_t *_dst_needs_copy) /* OUT */ +{ + vm_object_t object = *_object; + memory_object_copy_strategy_t copy_strategy; + + XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n", + *_object, offset, size, 0, 0); + if (object == VM_OBJECT_NULL) { + *_src_needs_copy = FALSE; + *_dst_needs_copy = FALSE; + return(TRUE); + } + + vm_object_lock(object); + + copy_strategy = object->copy_strategy; + + switch (copy_strategy) { + case MEMORY_OBJECT_COPY_SYMMETRIC: + + /* + * Symmetric copy strategy. + * Make another reference to the object. + * Leave object/offset unchanged. + */ + + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + object->shadowed = TRUE; + vm_object_unlock(object); + + /* + * Both source and destination must make + * shadows, and the source must be made + * read-only if not already. + */ + + *_src_needs_copy = TRUE; + *_dst_needs_copy = TRUE; + + break; + + case MEMORY_OBJECT_COPY_DELAY: + vm_object_unlock(object); + return(FALSE); + + default: + vm_object_unlock(object); + return(FALSE); + } + return(TRUE); +} + +int copy_call_count = 0; +int copy_call_sleep_count = 0; +int copy_call_restart_count = 0; + +/* + * Routine: vm_object_copy_call [internal] + * + * Description: + * Copy the source object (src_object), using the + * user-managed copy algorithm. + * + * In/out conditions: + * The source object must be locked on entry. It + * will be *unlocked* on exit. + * + * Results: + * If the copy is successful, KERN_SUCCESS is returned. + * A new object that represents the copied virtual + * memory is returned in a parameter (*_result_object). + * If the return value indicates an error, this parameter + * is not valid. + */ +kern_return_t +vm_object_copy_call( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *_result_object) /* OUT */ +{ + kern_return_t kr; + vm_object_t copy; + boolean_t check_ready = FALSE; + + /* + * If a copy is already in progress, wait and retry. + * + * XXX + * Consider making this call interruptable, as Mike + * intended it to be. + * + * XXXO + * Need a counter or version or something to allow + * us to use the copy that the currently requesting + * thread is obtaining -- is it worth adding to the + * vm object structure? Depends how common this case it. + */ + copy_call_count++; + while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { + vm_object_wait(src_object, VM_OBJECT_EVENT_COPY_CALL, + THREAD_UNINT); + vm_object_lock(src_object); + copy_call_restart_count++; + } + + /* + * Indicate (for the benefit of memory_object_create_copy) + * that we want a copy for src_object. (Note that we cannot + * do a real assert_wait before calling memory_object_copy, + * so we simply set the flag.) + */ + + vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL); + vm_object_unlock(src_object); + + /* + * Ask the memory manager to give us a memory object + * which represents a copy of the src object. + * The memory manager may give us a memory object + * which we already have, or it may give us a + * new memory object. This memory object will arrive + * via memory_object_create_copy. + */ + + kr = KERN_FAILURE; /* XXX need to change memory_object.defs */ + if (kr != KERN_SUCCESS) { + return kr; + } + + /* + * Wait for the copy to arrive. + */ + vm_object_lock(src_object); + while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { + vm_object_wait(src_object, VM_OBJECT_EVENT_COPY_CALL, + THREAD_UNINT); + vm_object_lock(src_object); + copy_call_sleep_count++; + } +Retry: + assert(src_object->copy != VM_OBJECT_NULL); + copy = src_object->copy; + if (!vm_object_lock_try(copy)) { + vm_object_unlock(src_object); + mutex_pause(); /* wait a bit */ + vm_object_lock(src_object); + goto Retry; + } + if (copy->size < src_offset+size) + copy->size = src_offset+size; + + if (!copy->pager_ready) + check_ready = TRUE; + + /* + * Return the copy. + */ + *_result_object = copy; + vm_object_unlock(copy); + vm_object_unlock(src_object); + + /* Wait for the copy to be ready. */ + if (check_ready == TRUE) { + vm_object_lock(copy); + while (!copy->pager_ready) { + vm_object_wait(copy, VM_OBJECT_EVENT_PAGER_READY, + FALSE); + vm_object_lock(copy); + } + vm_object_unlock(copy); + } + + return KERN_SUCCESS; +} + +int copy_delayed_lock_collisions = 0; +int copy_delayed_max_collisions = 0; +int copy_delayed_lock_contention = 0; +int copy_delayed_protect_iterate = 0; +int copy_delayed_protect_lookup = 0; +int copy_delayed_protect_lookup_wait = 0; + +/* + * Routine: vm_object_copy_delayed [internal] + * + * Description: + * Copy the specified virtual memory object, using + * the asymmetric copy-on-write algorithm. + * + * In/out conditions: + * The object must be unlocked on entry. + * + * This routine will not block waiting for user-generated + * events. It is not interruptible. + */ +vm_object_t +vm_object_copy_delayed( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size) +{ + vm_object_t new_copy = VM_OBJECT_NULL; + vm_object_t old_copy; + vm_page_t p; + vm_object_size_t copy_size; + + int collisions = 0; + /* + * The user-level memory manager wants to see all of the changes + * to this object, but it has promised not to make any changes on + * its own. + * + * Perform an asymmetric copy-on-write, as follows: + * Create a new object, called a "copy object" to hold + * pages modified by the new mapping (i.e., the copy, + * not the original mapping). + * Record the original object as the backing object for + * the copy object. If the original mapping does not + * change a page, it may be used read-only by the copy. + * Record the copy object in the original object. + * When the original mapping causes a page to be modified, + * it must be copied to a new page that is "pushed" to + * the copy object. + * Mark the new mapping (the copy object) copy-on-write. + * This makes the copy object itself read-only, allowing + * it to be reused if the original mapping makes no + * changes, and simplifying the synchronization required + * in the "push" operation described above. + * + * The copy-on-write is said to be assymetric because the original + * object is *not* marked copy-on-write. A copied page is pushed + * to the copy object, regardless which party attempted to modify + * the page. + * + * Repeated asymmetric copy operations may be done. If the + * original object has not been changed since the last copy, its + * copy object can be reused. Otherwise, a new copy object can be + * inserted between the original object and its previous copy + * object. Since any copy object is read-only, this cannot affect + * affect the contents of the previous copy object. + * + * Note that a copy object is higher in the object tree than the + * original object; therefore, use of the copy object recorded in + * the original object must be done carefully, to avoid deadlock. + */ + + Retry: + vm_object_lock(src_object); + + /* + * See whether we can reuse the result of a previous + * copy operation. + */ + + old_copy = src_object->copy; + if (old_copy != VM_OBJECT_NULL) { + /* + * Try to get the locks (out of order) + */ + if (!vm_object_lock_try(old_copy)) { + vm_object_unlock(src_object); + mutex_pause(); + + /* Heisenberg Rules */ + copy_delayed_lock_collisions++; + if (collisions++ == 0) + copy_delayed_lock_contention++; + + if (collisions > copy_delayed_max_collisions) + copy_delayed_max_collisions = collisions; + + goto Retry; + } + + /* + * Determine whether the old copy object has + * been modified. + */ + + if (old_copy->resident_page_count == 0 && + !old_copy->pager_created) { + /* + * It has not been modified. + * + * Return another reference to + * the existing copy-object. + */ + assert(old_copy->ref_count > 0); + old_copy->ref_count++; + + if (old_copy->size < src_offset+size) + old_copy->size = src_offset+size; + +#if TASK_SWAPPER + /* + * We have to reproduce some of the code from + * vm_object_res_reference because we've taken + * the locks out of order here, and deadlock + * would result if we simply called that function. + */ + if (++old_copy->res_count == 1) { + assert(old_copy->shadow == src_object); + vm_object_res_reference(src_object); + } +#endif /* TASK_SWAPPER */ + + vm_object_unlock(old_copy); + vm_object_unlock(src_object); + + if (new_copy != VM_OBJECT_NULL) { + vm_object_unlock(new_copy); + vm_object_deallocate(new_copy); + } + + return(old_copy); + } + if (new_copy == VM_OBJECT_NULL) { + vm_object_unlock(old_copy); + vm_object_unlock(src_object); + new_copy = vm_object_allocate(src_offset + size); + vm_object_lock(new_copy); + goto Retry; + } + + /* + * Adjust the size argument so that the newly-created + * copy object will be large enough to back either the + * new old copy object or the new mapping. + */ + if (old_copy->size > src_offset+size) + size = old_copy->size - src_offset; + + /* + * The copy-object is always made large enough to + * completely shadow the original object, since + * it may have several users who want to shadow + * the original object at different points. + */ + + assert((old_copy->shadow == src_object) && + (old_copy->shadow_offset == (vm_object_offset_t) 0)); + + /* + * Make the old copy-object shadow the new one. + * It will receive no more pages from the original + * object. + */ + + src_object->ref_count--; /* remove ref. from old_copy */ + assert(src_object->ref_count > 0); + old_copy->shadow = new_copy; + assert(new_copy->ref_count > 0); + new_copy->ref_count++; /* for old_copy->shadow ref. */ + +#if TASK_SWAPPER + if (old_copy->res_count) { + VM_OBJ_RES_INCR(new_copy); + VM_OBJ_RES_DECR(src_object); + } +#endif + + vm_object_unlock(old_copy); /* done with old_copy */ + } else if (new_copy == VM_OBJECT_NULL) { + vm_object_unlock(src_object); + new_copy = vm_object_allocate(src_offset + size); + vm_object_lock(new_copy); + goto Retry; + } + + /* + * Readjust the copy-object size if necessary. + */ + copy_size = new_copy->size; + if (copy_size < src_offset+size) { + copy_size = src_offset+size; + new_copy->size = copy_size; + } + + /* + * Point the new copy at the existing object. + */ + + new_copy->shadow = src_object; + new_copy->shadow_offset = 0; + new_copy->shadowed = TRUE; /* caller must set needs_copy */ + assert(src_object->ref_count > 0); + src_object->ref_count++; + VM_OBJ_RES_INCR(src_object); + src_object->copy = new_copy; + vm_object_unlock(new_copy); + + /* + * Mark all (current) pages of the existing object copy-on-write. + * This object may have a shadow chain below it, but + * those pages will already be marked copy-on-write. + */ + + vm_object_paging_wait(src_object, THREAD_UNINT); + copy_delayed_protect_iterate++; + queue_iterate(&src_object->memq, p, vm_page_t, listq) { + if (!p->fictitious) + pmap_page_protect(p->phys_addr, + (VM_PROT_ALL & ~VM_PROT_WRITE & + ~p->page_lock)); + } + vm_object_unlock(src_object); + XPR(XPR_VM_OBJECT, + "vm_object_copy_delayed: used copy object %X for source %X\n", + (integer_t)new_copy, (integer_t)src_object, 0, 0, 0); + + return(new_copy); +} + +/* + * Routine: vm_object_copy_strategically + * + * Purpose: + * Perform a copy according to the source object's + * declared strategy. This operation may block, + * and may be interrupted. + */ +kern_return_t +vm_object_copy_strategically( + register vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *dst_object, /* OUT */ + vm_object_offset_t *dst_offset, /* OUT */ + boolean_t *dst_needs_copy) /* OUT */ +{ + boolean_t result; + boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */ + memory_object_copy_strategy_t copy_strategy; + + assert(src_object != VM_OBJECT_NULL); + + vm_object_lock(src_object); + + /* + * The copy strategy is only valid if the memory manager + * is "ready". Internal objects are always ready. + */ + + while (!src_object->internal && !src_object->pager_ready) { + + vm_object_wait( src_object, + VM_OBJECT_EVENT_PAGER_READY, + interruptible); + if (interruptible && + (current_thread()->wait_result != THREAD_AWAKENED)) { + *dst_object = VM_OBJECT_NULL; + *dst_offset = 0; + *dst_needs_copy = FALSE; + return(MACH_SEND_INTERRUPTED); + } + vm_object_lock(src_object); + } + + copy_strategy = src_object->copy_strategy; + + /* + * Use the appropriate copy strategy. + */ + + switch (copy_strategy) { + case MEMORY_OBJECT_COPY_NONE: + result = vm_object_copy_slowly(src_object, src_offset, size, + interruptible, dst_object); + if (result == KERN_SUCCESS) { + *dst_offset = 0; + *dst_needs_copy = FALSE; + } + break; + + case MEMORY_OBJECT_COPY_CALL: + result = vm_object_copy_call(src_object, src_offset, size, + dst_object); + if (result == KERN_SUCCESS) { + *dst_offset = src_offset; + *dst_needs_copy = TRUE; + } + break; + + case MEMORY_OBJECT_COPY_DELAY: + vm_object_unlock(src_object); + *dst_object = vm_object_copy_delayed(src_object, + src_offset, size); + *dst_offset = src_offset; + *dst_needs_copy = TRUE; + result = KERN_SUCCESS; + break; + + case MEMORY_OBJECT_COPY_SYMMETRIC: + XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n",(natural_t)src_object, src_offset, size, 0, 0); + vm_object_unlock(src_object); + result = KERN_MEMORY_RESTART_COPY; + break; + + default: + panic("copy_strategically: bad strategy"); + result = KERN_INVALID_ARGUMENT; + } + return(result); +} + +/* + * vm_object_shadow: + * + * Create a new object which is backed by the + * specified existing object range. The source + * object reference is deallocated. + * + * The new object and offset into that object + * are returned in the source parameters. + */ +boolean_t vm_object_shadow_check = FALSE; + +boolean_t +vm_object_shadow( + vm_object_t *object, /* IN/OUT */ + vm_object_offset_t *offset, /* IN/OUT */ + vm_object_size_t length) +{ + register vm_object_t source; + register vm_object_t result; + + source = *object; + assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC); + + /* + * Determine if we really need a shadow. + */ + + if (vm_object_shadow_check && source->ref_count == 1 && + (source->shadow == VM_OBJECT_NULL || + source->shadow->copy == VM_OBJECT_NULL)) + { + source->shadowed = FALSE; + return FALSE; + } + + /* + * Allocate a new object with the given length + */ + + if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) + panic("vm_object_shadow: no object for shadowing"); + + /* + * The new object shadows the source object, adding + * a reference to it. Our caller changes his reference + * to point to the new object, removing a reference to + * the source object. Net result: no change of reference + * count. + */ + result->shadow = source; + + /* + * Store the offset into the source object, + * and fix up the offset into the new object. + */ + + result->shadow_offset = *offset; + + /* + * Return the new things + */ + + *offset = 0; + *object = result; + return TRUE; +} + +/* + * The relationship between vm_object structures and + * the memory_object ports requires careful synchronization. + * + * All associations are created by vm_object_enter. All three + * port fields are filled in, as follows: + * pager: the memory_object port itself, supplied by + * the user requesting a mapping (or the kernel, + * when initializing internal objects); the + * kernel simulates holding send rights by keeping + * a port reference; + * pager_request: + * the memory object control port, + * created by the kernel; the kernel holds + * receive (and ownership) rights to this + * port, but no other references. + * All of the ports are referenced by their global names. + * + * When initialization is complete, the "initialized" field + * is asserted. Other mappings using a particular memory object, + * and any references to the vm_object gained through the + * port association must wait for this initialization to occur. + * + * In order to allow the memory manager to set attributes before + * requests (notably virtual copy operations, but also data or + * unlock requests) are made, a "ready" attribute is made available. + * Only the memory manager may affect the value of this attribute. + * Its value does not affect critical kernel functions, such as + * internal object initialization or destruction. [Furthermore, + * memory objects created by the kernel are assumed to be ready + * immediately; the default memory manager need not explicitly + * set the "ready" attribute.] + * + * [Both the "initialized" and "ready" attribute wait conditions + * use the "pager" field as the wait event.] + * + * The port associations can be broken down by any of the + * following routines: + * vm_object_terminate: + * No references to the vm_object remain, and + * the object cannot (or will not) be cached. + * This is the normal case, and is done even + * though one of the other cases has already been + * done. + * vm_object_destroy: + * The memory_object port has been destroyed, + * meaning that the kernel cannot flush dirty + * pages or request new data or unlock existing + * data. + * memory_object_destroy: + * The memory manager has requested that the + * kernel relinquish rights to the memory object + * port. [The memory manager may not want to + * destroy the port, but may wish to refuse or + * tear down existing memory mappings.] + * Each routine that breaks an association must break all of + * them at once. At some later time, that routine must clear + * the vm_object port fields and release the port rights. + * [Furthermore, each routine must cope with the simultaneous + * or previous operations of the others.] + * + * In addition to the lock on the object, the vm_object_cache_lock + * governs the port associations. References gained through the + * port association require use of the cache lock. + * + * Because the port fields may be cleared spontaneously, they + * cannot be used to determine whether a memory object has + * ever been associated with a particular vm_object. [This + * knowledge is important to the shadow object mechanism.] + * For this reason, an additional "created" attribute is + * provided. + * + * During various paging operations, the port values found in the + * vm_object must be valid. To prevent these port rights from being + * released, and to prevent the port associations from changing + * (other than being removed, i.e., made null), routines may use + * the vm_object_paging_begin/end routines [actually, macros]. + * The implementation uses the "paging_in_progress" and "wanted" fields. + * [Operations that alter the validity of the port values include the + * termination routines and vm_object_collapse.] + */ + +#define IKOT_PAGER_LOOKUP_TYPE IKOT_PAGING_REQUEST + +vm_object_t +vm_object_lookup( + ipc_port_t port) +{ + vm_object_t object; + +start_over: + object = VM_OBJECT_NULL; + + if (IP_VALID(port)) { + vm_object_cache_lock(); + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_PAGER_LOOKUP_TYPE)) { + object = (vm_object_t) port->ip_kobject; + if (!vm_object_lock_try(object)) { + /* + * failed to acquire object lock. Drop the + * other two locks and wait for it, then go + * back and start over in case the port + * associations changed in the interim. + */ + ip_unlock(port); + vm_object_cache_unlock(); + vm_object_lock(object); + vm_object_unlock(object); + goto start_over; + } + + assert(object->alive); + + if((object->ref_count == 0) && (!object->terminating)){ + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached_count--; + XPR(XPR_VM_OBJECT_CACHE, + "vm_object_lookup: removing %X, head (%X, %X)\n", + (integer_t)object, + (integer_t)vm_object_cached_list.next, + (integer_t)vm_object_cached_list.prev, 0,0); + } + + object->ref_count++; + vm_object_res_reference(object); + vm_object_unlock(object); + } + ip_unlock(port); + vm_object_cache_unlock(); + } + + return object; +} + + + +void +vm_object_destroy( + ipc_port_t pager) +{ + vm_object_t object; + vm_object_hash_entry_t entry; + pager_request_t old_pager_request; + + /* + * Perform essentially the same operations as in vm_object_lookup, + * except that this time we look up based on the memory_object + * port, not the control port. + */ + vm_object_cache_lock(); + entry = vm_object_hash_lookup(pager, FALSE); + if (entry == VM_OBJECT_HASH_ENTRY_NULL || + entry->object == VM_OBJECT_NULL) { + vm_object_cache_unlock(); + return; + } + + object = entry->object; + entry->object = VM_OBJECT_NULL; + + vm_object_lock(object); + if (object->ref_count == 0) { + XPR(XPR_VM_OBJECT_CACHE, + "vm_object_destroy: removing %x from cache, head (%x, %x)\n", + (integer_t)object, + (integer_t)vm_object_cached_list.next, + (integer_t)vm_object_cached_list.prev, 0,0); + + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached_count--; + } + object->ref_count++; + vm_object_res_reference(object); + + object->can_persist = FALSE; + + assert(object->pager == pager); + + /* + * Remove the port associations. + * + * Note that the memory_object itself is dead, so + * we don't bother with it. + */ + + object->pager = IP_NULL; + vm_object_remove(object); + + old_pager_request = object->pager_request; + + object->pager_request = PAGER_REQUEST_NULL; + + vm_object_unlock(object); + vm_object_cache_unlock(); + + vm_object_pager_wakeup(pager); + + /* + * Clean up the port references. Note that there's no + * point in trying the memory_object_terminate call + * because the memory_object itself is dead. + */ + + ipc_port_release_send(pager); + + if ((ipc_port_t)old_pager_request != IP_NULL) + ipc_port_dealloc_kernel((ipc_port_t)old_pager_request); + + /* + * Restart pending page requests + */ + vm_object_lock(object); + + vm_object_abort_activity(object); + + vm_object_unlock(object); + + /* + * Lose the object reference. + */ + + vm_object_deallocate(object); +} + +/* + * Routine: vm_object_enter + * Purpose: + * Find a VM object corresponding to the given + * pager; if no such object exists, create one, + * and initialize the pager. + */ +vm_object_t +vm_object_enter( + ipc_port_t pager, + vm_object_size_t size, + boolean_t internal, + boolean_t init, + boolean_t check_named) +{ + register vm_object_t object; + vm_object_t new_object; + boolean_t must_init; + ipc_port_t pager_request; + vm_object_hash_entry_t entry, new_entry; +#ifdef MACH_BSD +kern_return_t vnode_pager_init( ipc_port_t, ipc_port_t, vm_object_size_t); +#endif + + if (!IP_VALID(pager)) + return(vm_object_allocate(size)); + + new_object = VM_OBJECT_NULL; + new_entry = VM_OBJECT_HASH_ENTRY_NULL; + must_init = init; + + /* + * Look for an object associated with this port. + */ + +restart: + vm_object_cache_lock(); + for (;;) { + entry = vm_object_hash_lookup(pager, FALSE); + + /* + * If a previous object is being terminated, + * we must wait for the termination message + * to be queued. + * + * We set kobject to a non-null value to let the + * terminator know that someone is waiting. + * Among the possibilities is that the port + * could die while we're waiting. Must restart + * instead of continuing the loop. + */ + + if (entry != VM_OBJECT_HASH_ENTRY_NULL) { + if (entry->object != VM_OBJECT_NULL) { + if(check_named) { + if(entry->object->named) { + vm_object_cache_unlock(); + return(entry->object); + } + } + break; + } + + entry->waiting = TRUE; + assert_wait((event_t) pager, THREAD_UNINT); + vm_object_cache_unlock(); + thread_block((void (*)(void))0); + goto restart; + } + + /* + * We must unlock to create a new object; + * if we do so, we must try the lookup again. + */ + + if (new_object == VM_OBJECT_NULL) { + vm_object_cache_unlock(); + assert(new_entry == VM_OBJECT_HASH_ENTRY_NULL); + new_entry = vm_object_hash_entry_alloc(pager); + new_object = vm_object_allocate(size); + vm_object_cache_lock(); + } else { + /* + * Lookup failed twice, and we have something + * to insert; set the object. + */ + + if (entry == VM_OBJECT_HASH_ENTRY_NULL) { + vm_object_hash_insert(new_entry); + entry = new_entry; + new_entry = VM_OBJECT_HASH_ENTRY_NULL; + } + + entry->object = new_object; + new_object = VM_OBJECT_NULL; + must_init = TRUE; + } + } + + object = entry->object; + assert(object != VM_OBJECT_NULL); + + if (!must_init) { + vm_object_lock(object); + assert(object->pager_created); + assert(!internal || object->internal); + if (check_named) + object->named = TRUE; + if (object->ref_count == 0) { + XPR(XPR_VM_OBJECT_CACHE, + "vm_object_enter: removing %x from cache, head (%x, %x)\n", + (integer_t)object, + (integer_t)vm_object_cached_list.next, + (integer_t)vm_object_cached_list.prev, 0,0); + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached_count--; + } + object->ref_count++; + vm_object_res_reference(object); + vm_object_unlock(object); + + VM_STAT(hits++); + } + assert(object->ref_count > 0); + + VM_STAT(lookups++); + + vm_object_cache_unlock(); + + XPR(XPR_VM_OBJECT, + "vm_o_enter: pager 0x%x obj 0x%x must_init %d\n", + (integer_t)pager, (integer_t)object, must_init, 0, 0); + + /* + * If we raced to create a vm_object but lost, let's + * throw away ours. + */ + + if (new_object != VM_OBJECT_NULL) + vm_object_deallocate(new_object); + + if (new_entry != VM_OBJECT_HASH_ENTRY_NULL) + vm_object_hash_entry_free(new_entry); + + if (must_init) { + + /* + * Allocate request port. + */ + + pager_request = ipc_port_alloc_kernel(); + assert (pager_request != IP_NULL); + ipc_kobject_set(pager_request, (ipc_kobject_t) object, + IKOT_PAGING_REQUEST); + + vm_object_lock(object); + + /* + * Copy the naked send right we were given. + */ + + pager = ipc_port_copy_send(pager); + if (!IP_VALID(pager)) + panic("vm_object_enter: port died"); /* XXX */ + + object->pager_created = TRUE; + object->pager = pager; + object->internal = internal; + object->pager_trusted = internal; + if (!internal) { + /* copy strategy invalid until set by memory manager */ + object->copy_strategy = MEMORY_OBJECT_COPY_INVALID; + } + object->pager_request = pager_request; + object->pager_ready = FALSE; + + if (check_named) + object->named = TRUE; + vm_object_unlock(object); + + /* + * Let the pager know we're using it. + */ + +#ifdef MACH_BSD + if(((rpc_subsystem_t)pager_mux_hash_lookup(pager)) == + ((rpc_subsystem_t) &vnode_pager_workaround)) { + (void) vnode_pager_init(pager, + object->pager_request, + PAGE_SIZE); + } else { + (void) memory_object_init(pager, + object->pager_request, + PAGE_SIZE); + } +#else + (void) memory_object_init(pager, + object->pager_request, + PAGE_SIZE); +#endif + + vm_object_lock(object); + if (internal) { + object->pager_ready = TRUE; + vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); + } + + object->pager_initialized = TRUE; + vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED); + } else { + vm_object_lock(object); + } + + /* + * [At this point, the object must be locked] + */ + + /* + * Wait for the work above to be done by the first + * thread to map this object. + */ + + while (!object->pager_initialized) { + vm_object_wait( object, + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_unlock(object); + + XPR(XPR_VM_OBJECT, + "vm_object_enter: vm_object %x, memory_object %x, internal %d\n", + (integer_t)object, (integer_t)object->pager, internal, 0,0); + return(object); +} + +/* + * Routine: vm_object_pager_create + * Purpose: + * Create a memory object for an internal object. + * In/out conditions: + * The object is locked on entry and exit; + * it may be unlocked within this call. + * Limitations: + * Only one thread may be performing a + * vm_object_pager_create on an object at + * a time. Presumably, only the pageout + * daemon will be using this routine. + */ + +void +vm_object_pager_create( + register vm_object_t object) +{ + ipc_port_t pager; + vm_object_hash_entry_t entry; +#if MACH_PAGEMAP + vm_object_size_t size; + vm_external_map_t map; +#endif /* MACH_PAGEMAP */ + + XPR(XPR_VM_OBJECT, "vm_object_pager_create, object 0x%X\n", + (integer_t)object, 0,0,0,0); + + if (memory_manager_default_check() != KERN_SUCCESS) + return; + + /* + * Prevent collapse or termination by holding a paging reference + */ + + vm_object_paging_begin(object); + if (object->pager_created) { + /* + * Someone else got to it first... + * wait for them to finish initializing the ports + */ + while (!object->pager_initialized) { + vm_object_wait( object, + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_paging_end(object); + return; + } + + /* + * Indicate that a memory object has been assigned + * before dropping the lock, to prevent a race. + */ + + object->pager_created = TRUE; + object->paging_offset = 0; + +#if MACH_PAGEMAP + size = object->size; +#endif /* MACH_PAGEMAP */ + vm_object_unlock(object); + +#if MACH_PAGEMAP + map = vm_external_create(size); + vm_object_lock(object); + assert(object->size == size); + object->existence_map = map; + vm_object_unlock(object); +#endif /* MACH_PAGEMAP */ + + /* + * Create the pager ports, and associate them with this object. + * + * We make the port association here so that vm_object_enter() + * can look up the object to complete initializing it. No + * user will ever map this object. + */ + { + ipc_port_t DMM; + vm_size_t cluster_size; + + /* acquire a naked send right for the DMM */ + DMM = memory_manager_default_reference(&cluster_size); + assert(cluster_size >= PAGE_SIZE); + + object->cluster_size = cluster_size; /* XXX ??? */ + assert(object->temporary); + + /* consumes the naked send right for DMM */ + (void) memory_object_create(DMM, &pager, object->size); + assert(IP_VALID(pager)); + } + + entry = vm_object_hash_entry_alloc(pager); + + vm_object_cache_lock(); + vm_object_hash_insert(entry); + + entry->object = object; + vm_object_cache_unlock(); + + /* + * A naked send right was returned by + * memory_object_create(), and it is + * copied by vm_object_enter(). + */ + + if (vm_object_enter(pager, object->size, TRUE, TRUE, FALSE) != object) + panic("vm_object_pager_create: mismatch"); + + /* + * Drop the naked send right. + */ + ipc_port_release_send(pager); + + vm_object_lock(object); + + /* + * Release the paging reference + */ + vm_object_paging_end(object); +} + +/* + * Routine: vm_object_remove + * Purpose: + * Eliminate the pager/object association + * for this pager. + * Conditions: + * The object cache must be locked. + */ +void +vm_object_remove( + vm_object_t object) +{ + ipc_port_t port; + + if ((port = object->pager) != IP_NULL) { + vm_object_hash_entry_t entry; + + entry = vm_object_hash_lookup(port, FALSE); + if (entry != VM_OBJECT_HASH_ENTRY_NULL) + entry->object = VM_OBJECT_NULL; + } + + if ((port = object->pager_request) != IP_NULL) { + if (ip_kotype(port) == IKOT_PAGING_REQUEST) + ipc_kobject_set(port, IKO_NULL, IKOT_NONE); + else if (ip_kotype(port) != IKOT_NONE) + panic("vm_object_remove: bad request port"); + } +} + +/* + * Global variables for vm_object_collapse(): + * + * Counts for normal collapses and bypasses. + * Debugging variables, to watch or disable collapse. + */ +long object_collapses = 0; +long object_bypasses = 0; + +boolean_t vm_object_collapse_allowed = TRUE; +boolean_t vm_object_bypass_allowed = TRUE; + +int vm_external_discarded; +int vm_external_collapsed; +/* + * vm_object_do_collapse: + * + * Collapse an object with the object backing it. + * Pages in the backing object are moved into the + * parent, and the backing object is deallocated. + * + * Both objects and the cache are locked; the page + * queues are unlocked. + * + */ +void +vm_object_do_collapse( + vm_object_t object, + vm_object_t backing_object) +{ + vm_page_t p, pp; + vm_object_offset_t new_offset, backing_offset; + vm_object_size_t size; + + backing_offset = object->shadow_offset; + size = object->size; + + + /* + * Move all in-memory pages from backing_object + * to the parent. Pages that have been paged out + * will be overwritten by any of the parent's + * pages that shadow them. + */ + + while (!queue_empty(&backing_object->memq)) { + + p = (vm_page_t) queue_first(&backing_object->memq); + + new_offset = (p->offset - backing_offset); + + assert(!p->busy || p->absent); + + /* + * If the parent has a page here, or if + * this page falls outside the parent, + * dispose of it. + * + * Otherwise, move it as planned. + */ + + if (p->offset < backing_offset || new_offset >= size) { + VM_PAGE_FREE(p); + } else { + pp = vm_page_lookup(object, new_offset); + if (pp == VM_PAGE_NULL) { + + /* + * Parent now has no page. + * Move the backing object's page up. + */ + + vm_page_rename(p, object, new_offset); +#if MACH_PAGEMAP + } else if (pp->absent) { + + /* + * Parent has an absent page... + * it's not being paged in, so + * it must really be missing from + * the parent. + * + * Throw out the absent page... + * any faults looking for that + * page will restart with the new + * one. + */ + + VM_PAGE_FREE(pp); + vm_page_rename(p, object, new_offset); +#endif /* MACH_PAGEMAP */ + } else { + assert(! pp->absent); + + /* + * Parent object has a real page. + * Throw away the backing object's + * page. + */ + VM_PAGE_FREE(p); + } + } + } + + assert(object->pager == IP_NULL || backing_object->pager == IP_NULL); + + if (backing_object->pager != IP_NULL) { + vm_object_hash_entry_t entry; + + /* + * Move the pager from backing_object to object. + * + * XXX We're only using part of the paging space + * for keeps now... we ought to discard the + * unused portion. + */ + + object->pager = backing_object->pager; + entry = vm_object_hash_lookup(object->pager, FALSE); + assert(entry != VM_OBJECT_HASH_ENTRY_NULL); + entry->object = object; + object->pager_created = backing_object->pager_created; + object->pager_request = backing_object->pager_request; + object->pager_ready = backing_object->pager_ready; + object->pager_initialized = backing_object->pager_initialized; + object->cluster_size = backing_object->cluster_size; + object->paging_offset = + backing_object->paging_offset + backing_offset; + if (object->pager_request != IP_NULL) { + ipc_kobject_set(object->pager_request, + (ipc_kobject_t) object, + IKOT_PAGING_REQUEST); + } + } + + vm_object_cache_unlock(); + + object->paging_offset = backing_object->paging_offset + backing_offset; + +#if MACH_PAGEMAP + /* + * If the shadow offset is 0, the use the existence map from + * the backing object if there is one. If the shadow offset is + * not zero, toss it. + * + * XXX - If the shadow offset is not 0 then a bit copy is needed + * if the map is to be salvaged. For now, we just just toss the + * old map, giving the collapsed object no map. This means that + * the pager is invoked for zero fill pages. If analysis shows + * that this happens frequently and is a performance hit, then + * this code should be fixed to salvage the map. + */ + assert(object->existence_map == VM_EXTERNAL_NULL); + if (backing_offset || (size != backing_object->size)) { + vm_external_discarded++; + vm_external_destroy(backing_object->existence_map, + backing_object->size); + } + else { + vm_external_collapsed++; + object->existence_map = backing_object->existence_map; + } + backing_object->existence_map = VM_EXTERNAL_NULL; +#endif /* MACH_PAGEMAP */ + + /* + * Object now shadows whatever backing_object did. + * Note that the reference to backing_object->shadow + * moves from within backing_object to within object. + */ + + object->shadow = backing_object->shadow; + object->shadow_offset += backing_object->shadow_offset; + assert((object->shadow == VM_OBJECT_NULL) || + (object->shadow->copy == VM_OBJECT_NULL)); + + /* + * Discard backing_object. + * + * Since the backing object has no pages, no + * pager left, and no object references within it, + * all that is necessary is to dispose of it. + */ + + assert((backing_object->ref_count == 1) && + (backing_object->resident_page_count == 0) && + (backing_object->paging_in_progress == 0)); + + assert(backing_object->alive); + backing_object->alive = FALSE; + vm_object_unlock(backing_object); + + XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n", + (integer_t)backing_object, 0,0,0,0); + + zfree(vm_object_zone, (vm_offset_t) backing_object); + + object_collapses++; +} + +void +vm_object_do_bypass( + vm_object_t object, + vm_object_t backing_object) +{ + /* + * Make the parent shadow the next object + * in the chain. + */ + +#if TASK_SWAPPER + /* + * Do object reference in-line to + * conditionally increment shadow's + * residence count. If object is not + * resident, leave residence count + * on shadow alone. + */ + if (backing_object->shadow != VM_OBJECT_NULL) { + vm_object_lock(backing_object->shadow); + backing_object->shadow->ref_count++; + if (object->res_count != 0) + vm_object_res_reference(backing_object->shadow); + vm_object_unlock(backing_object->shadow); + } +#else /* TASK_SWAPPER */ + vm_object_reference(backing_object->shadow); +#endif /* TASK_SWAPPER */ + + object->shadow = backing_object->shadow; + object->shadow_offset += backing_object->shadow_offset; + + /* + * Backing object might have had a copy pointer + * to us. If it did, clear it. + */ + if (backing_object->copy == object) { + backing_object->copy = VM_OBJECT_NULL; + } + + /* + * Drop the reference count on backing_object. +#if TASK_SWAPPER + * Since its ref_count was at least 2, it + * will not vanish; so we don't need to call + * vm_object_deallocate. + * [FBDP: that doesn't seem to be true any more] + * + * The res_count on the backing object is + * conditionally decremented. It's possible + * (via vm_pageout_scan) to get here with + * a "swapped" object, which has a 0 res_count, + * in which case, the backing object res_count + * is already down by one. +#else + * Don't call vm_object_deallocate unless + * ref_count drops to zero. + * + * The ref_count can drop to zero here if the + * backing object could be bypassed but not + * collapsed, such as when the backing object + * is temporary and cachable. +#endif + */ + if (backing_object->ref_count > 1) { + backing_object->ref_count--; +#if TASK_SWAPPER + if (object->res_count != 0) + vm_object_res_deallocate(backing_object); + assert(backing_object->ref_count > 0); +#endif /* TASK_SWAPPER */ + vm_object_unlock(backing_object); + } else { + + /* + * Drop locks so that we can deallocate + * the backing object. + */ + +#if TASK_SWAPPER + if (object->res_count == 0) { + /* XXX get a reference for the deallocate below */ + vm_object_res_reference(backing_object); + } +#endif /* TASK_SWAPPER */ + vm_object_unlock(object); + vm_object_unlock(backing_object); + vm_object_deallocate(backing_object); + + /* + * Relock object. We don't have to reverify + * its state since vm_object_collapse will + * do that for us as it starts at the + * top of its loop. + */ + + vm_object_lock(object); + } + + object_bypasses++; +} + +/* + * vm_object_collapse: + * + * Perform an object collapse or an object bypass if appropriate. + * The real work of collapsing and bypassing is performed in + * the routines vm_object_do_collapse and vm_object_do_bypass. + * + * Requires that the object be locked and the page queues be unlocked. + * + */ +void +vm_object_collapse( + register vm_object_t object) +{ + register vm_object_t backing_object; + register vm_object_offset_t backing_offset; + register vm_object_size_t size; + register vm_object_offset_t new_offset; + register vm_page_t p; + + if (! vm_object_collapse_allowed && ! vm_object_bypass_allowed) { + return; + } + + XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n", + (integer_t)object, 0,0,0,0); + + while (TRUE) { + /* + * Verify that the conditions are right for either + * collapse or bypass: + * + * The object exists and no pages in it are currently + * being paged out, and + */ + if (object == VM_OBJECT_NULL || + object->paging_in_progress != 0 || + object->absent_count != 0) + return; + + /* + * There is a backing object, and + */ + + if ((backing_object = object->shadow) == VM_OBJECT_NULL) + return; + + vm_object_lock(backing_object); + + /* + * ... + * The backing object is not read_only, + * and no pages in the backing object are + * currently being paged out. + * The backing object is internal. + * + */ + + if (!backing_object->internal || + backing_object->paging_in_progress != 0) { + vm_object_unlock(backing_object); + return; + } + + /* + * The backing object can't be a copy-object: + * the shadow_offset for the copy-object must stay + * as 0. Furthermore (for the 'we have all the + * pages' case), if we bypass backing_object and + * just shadow the next object in the chain, old + * pages from that object would then have to be copied + * BOTH into the (former) backing_object and into the + * parent object. + */ + if (backing_object->shadow != VM_OBJECT_NULL && + backing_object->shadow->copy != VM_OBJECT_NULL) { + vm_object_unlock(backing_object); + return; + } + + /* + * We can now try to either collapse the backing + * object (if the parent is the only reference to + * it) or (perhaps) remove the parent's reference + * to it. + */ + + /* + * If there is exactly one reference to the backing + * object, we may be able to collapse it into the parent. + * + * XXXO (norma vm): + * + * The backing object must not have a pager + * created for it, since collapsing an object + * into a backing_object dumps new pages into + * the backing_object that its pager doesn't + * know about, and we've already declared pages. + * This page dumping is deadly if other kernels + * are shadowing this object; this is the + * distributed equivalent of the ref_count == 1 + * condition. + * + * With some work, we could downgrade this + * restriction to the backing object must not + * be cachable, since when a temporary object + * is uncachable we are allowed to do anything + * to it. We would have to do something like + * call declare_pages again, and we would have + * to be prepared for the memory manager + * disabling temporary termination, which right + * now is a difficult race to deal with, since + * the memory manager currently assumes that + * termination is the only possible failure + * for disabling temporary termination. + */ + + if (backing_object->ref_count == 1 && + ! object->pager_created && + vm_object_collapse_allowed) { + + XPR(XPR_VM_OBJECT, + "vm_object_collapse: %x to %x, pager %x, pager_request %x\n", + (integer_t)backing_object, (integer_t)object, + (integer_t)backing_object->pager, + (integer_t)backing_object->pager_request, 0); + + /* + * We need the cache lock for collapsing, + * but we must not deadlock. + */ + + if (! vm_object_cache_lock_try()) { + vm_object_unlock(backing_object); + return; + } + + /* + * Collapse the object with its backing + * object, and try again with the object's + * new backing object. + */ + + vm_object_do_collapse(object, backing_object); + continue; + } + + + /* + * Collapsing the backing object was not possible + * or permitted, so let's try bypassing it. + */ + + if (! vm_object_bypass_allowed) { + vm_object_unlock(backing_object); + return; + } + + /* + * If the backing object has a pager but no pagemap, + * then we cannot bypass it, because we don't know + * what pages it has. + */ + if (backing_object->pager_created +#if MACH_PAGEMAP + && (backing_object->existence_map == VM_EXTERNAL_NULL) +#endif /* MACH_PAGEMAP */ + ) { + vm_object_unlock(backing_object); + return; + } + + backing_offset = object->shadow_offset; + size = object->size; + + /* + * If all of the pages in the backing object are + * shadowed by the parent object, the parent + * object no longer has to shadow the backing + * object; it can shadow the next one in the + * chain. + * + * If the backing object has existence info, + * we must check examine its existence info + * as well. + * + * XXX + * Should have a check for a 'small' number + * of pages here. + */ + + /* + * First, check pages resident in the backing object. + */ + + queue_iterate(&backing_object->memq, p, vm_page_t, listq) { + + /* + * If the parent has a page here, or if + * this page falls outside the parent, + * keep going. + * + * Otherwise, the backing_object must be + * left in the chain. + */ + + new_offset = (p->offset - backing_offset); + if (p->offset < backing_offset || new_offset >= size) { + + /* + * Page falls outside of parent. + * Keep going. + */ + + continue; + } + + if ((vm_page_lookup(object, new_offset) == VM_PAGE_NULL) +#if MACH_PAGEMAP + && + (vm_external_state_get(object->existence_map, + new_offset) + != VM_EXTERNAL_STATE_EXISTS) +#endif /* MACH_PAGEMAP */ + ) { + + /* + * Page still needed. + * Can't go any further. + */ + + vm_object_unlock(backing_object); + return; + } + } + +#if MACH_PAGEMAP + /* + * Next, if backing object has been paged out, + * we must check its existence info for pages + * that the parent doesn't have. + */ + + if (backing_object->pager_created) { + assert(backing_object->existence_map + != VM_EXTERNAL_NULL); + for (new_offset = 0; new_offset < object->size; + new_offset += PAGE_SIZE_64) { + vm_object_offset_t + offset = new_offset + backing_offset; + + /* + * If this page doesn't exist in + * the backing object's existence + * info, then continue. + */ + + if (vm_external_state_get( + backing_object->existence_map, + offset) == VM_EXTERNAL_STATE_ABSENT) { + continue; + } + + /* + * If this page is neither resident + * in the parent nor paged out to + * the parent's pager, then we cannot + * bypass the backing object. + */ + + if ((vm_page_lookup(object, new_offset) == + VM_PAGE_NULL) && + ((object->existence_map == VM_EXTERNAL_NULL) + || (vm_external_state_get( + object->existence_map, new_offset) + == VM_EXTERNAL_STATE_ABSENT))) { + vm_object_unlock(backing_object); + return; + } + } + } +#else /* MACH_PAGEMAP */ + assert(! backing_object->pager_created); +#endif /* MACH_PAGEMAP */ + + /* + * All interesting pages in the backing object + * already live in the parent or its pager. + * Thus we can bypass the backing object. + */ + + vm_object_do_bypass(object, backing_object); + + /* + * Try again with this object's new backing object. + */ + + continue; + } +} + +/* + * Routine: vm_object_page_remove: [internal] + * Purpose: + * Removes all physical pages in the specified + * object range from the object's list of pages. + * + * In/out conditions: + * The object must be locked. + * The object must not have paging_in_progress, usually + * guaranteed by not having a pager. + */ +unsigned int vm_object_page_remove_lookup = 0; +unsigned int vm_object_page_remove_iterate = 0; + +void +vm_object_page_remove( + register vm_object_t object, + register vm_object_offset_t start, + register vm_object_offset_t end) +{ + register vm_page_t p, next; + + /* + * One and two page removals are most popular. + * The factor of 16 here is somewhat arbitrary. + * It balances vm_object_lookup vs iteration. + */ + + if (atop(end - start) < (unsigned)object->resident_page_count/16) { + vm_object_page_remove_lookup++; + + for (; start < end; start += PAGE_SIZE_64) { + p = vm_page_lookup(object, start); + if (p != VM_PAGE_NULL) { + assert(!p->cleaning && !p->pageout); + if (!p->fictitious) + pmap_page_protect(p->phys_addr, + VM_PROT_NONE); + VM_PAGE_FREE(p); + } + } + } else { + vm_object_page_remove_iterate++; + + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + next = (vm_page_t) queue_next(&p->listq); + if ((start <= p->offset) && (p->offset < end)) { + assert(!p->cleaning && !p->pageout); + if (!p->fictitious) + pmap_page_protect(p->phys_addr, + VM_PROT_NONE); + VM_PAGE_FREE(p); + } + p = next; + } + } +} + +/* + * Routine: vm_object_coalesce + * Function: Coalesces two objects backing up adjoining + * regions of memory into a single object. + * + * returns TRUE if objects were combined. + * + * NOTE: Only works at the moment if the second object is NULL - + * if it's not, which object do we lock first? + * + * Parameters: + * prev_object First object to coalesce + * prev_offset Offset into prev_object + * next_object Second object into coalesce + * next_offset Offset into next_object + * + * prev_size Size of reference to prev_object + * next_size Size of reference to next_object + * + * Conditions: + * The object(s) must *not* be locked. The map must be locked + * to preserve the reference to the object(s). + */ +int vm_object_coalesce_count = 0; + +boolean_t +vm_object_coalesce( + register vm_object_t prev_object, + vm_object_t next_object, + vm_object_offset_t prev_offset, + vm_object_offset_t next_offset, + vm_object_size_t prev_size, + vm_object_size_t next_size) +{ + vm_object_size_t newsize; + +#ifdef lint + next_offset++; +#endif /* lint */ + + if (next_object != VM_OBJECT_NULL) { + return(FALSE); + } + + if (prev_object == VM_OBJECT_NULL) { + return(TRUE); + } + + XPR(XPR_VM_OBJECT, + "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n", + (integer_t)prev_object, prev_offset, prev_size, next_size, 0); + + vm_object_lock(prev_object); + + /* + * Try to collapse the object first + */ + vm_object_collapse(prev_object); + + /* + * Can't coalesce if pages not mapped to + * prev_entry may be in use any way: + * . more than one reference + * . paged out + * . shadows another object + * . has a copy elsewhere + * . paging references (pages might be in page-list) + */ + + if ((prev_object->ref_count > 1) || + prev_object->pager_created || + (prev_object->shadow != VM_OBJECT_NULL) || + (prev_object->copy != VM_OBJECT_NULL) || + (prev_object->true_share != FALSE) || + (prev_object->paging_in_progress != 0)) { + vm_object_unlock(prev_object); + return(FALSE); + } + + vm_object_coalesce_count++; + + /* + * Remove any pages that may still be in the object from + * a previous deallocation. + */ + vm_object_page_remove(prev_object, + prev_offset + prev_size, + prev_offset + prev_size + next_size); + + /* + * Extend the object if necessary. + */ + newsize = prev_offset + prev_size + next_size; + if (newsize > prev_object->size) { +#if MACH_PAGEMAP + /* + * We cannot extend an object that has existence info, + * since the existence info might then fail to cover + * the entire object. + * + * This assertion must be true because the object + * has no pager, and we only create existence info + * for objects with pagers. + */ + assert(prev_object->existence_map == VM_EXTERNAL_NULL); +#endif /* MACH_PAGEMAP */ + prev_object->size = newsize; + } + + vm_object_unlock(prev_object); + return(TRUE); +} + +/* + * Attach a set of physical pages to an object, so that they can + * be mapped by mapping the object. Typically used to map IO memory. + * + * The mapping function and its private data are used to obtain the + * physical addresses for each page to be mapped. + */ +void +vm_object_page_map( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_object_offset_t (*map_fn)(void *map_fn_data, + vm_object_offset_t offset), + void *map_fn_data) /* private to map_fn */ +{ + int num_pages; + int i; + vm_page_t m; + vm_page_t old_page; + vm_object_offset_t addr; + + num_pages = atop(size); + + for (i = 0; i < num_pages; i++, offset += PAGE_SIZE_64) { + + addr = (*map_fn)(map_fn_data, offset); + + while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) + vm_page_more_fictitious(); + + vm_object_lock(object); + if ((old_page = vm_page_lookup(object, offset)) + != VM_PAGE_NULL) + { + vm_page_lock_queues(); + vm_page_free(old_page); + vm_page_unlock_queues(); + } + + vm_page_init(m, addr); + m->private = TRUE; /* don`t free page */ + m->wire_count = 1; + vm_page_insert(m, object, offset); + + PAGE_WAKEUP_DONE(m); + vm_object_unlock(object); + } +} + +#include + +#if MACH_KDB +#include +#include + +#define printf kdbprintf + +extern boolean_t vm_object_cached( + vm_object_t object); + +extern void print_bitstring( + char byte); + +boolean_t vm_object_print_pages = FALSE; + +void +print_bitstring( + char byte) +{ + printf("%c%c%c%c%c%c%c%c", + ((byte & (1 << 0)) ? '1' : '0'), + ((byte & (1 << 1)) ? '1' : '0'), + ((byte & (1 << 2)) ? '1' : '0'), + ((byte & (1 << 3)) ? '1' : '0'), + ((byte & (1 << 4)) ? '1' : '0'), + ((byte & (1 << 5)) ? '1' : '0'), + ((byte & (1 << 6)) ? '1' : '0'), + ((byte & (1 << 7)) ? '1' : '0')); +} + +boolean_t +vm_object_cached( + register vm_object_t object) +{ + register vm_object_t o; + + queue_iterate(&vm_object_cached_list, o, vm_object_t, cached_list) { + if (object == o) { + return TRUE; + } + } + return FALSE; +} + +#if MACH_PAGEMAP +/* + * vm_external_print: [ debug ] + */ +void +vm_external_print( + vm_external_map_t map, + vm_size_t size) +{ + if (map == VM_EXTERNAL_NULL) { + printf("0 "); + } else { + vm_size_t existence_size = stob(size); + printf("{ size=%d, map=[", existence_size); + if (existence_size > 0) { + print_bitstring(map[0]); + } + if (existence_size > 1) { + print_bitstring(map[1]); + } + if (existence_size > 2) { + printf("..."); + print_bitstring(map[existence_size-1]); + } + printf("] }\n"); + } + return; +} +#endif /* MACH_PAGEMAP */ + +int +vm_follow_object( + vm_object_t object) +{ + extern db_indent; + + int count = 1; + + if (object == VM_OBJECT_NULL) + return 0; + + iprintf("object 0x%x", object); + printf(", shadow=0x%x", object->shadow); + printf(", copy=0x%x", object->copy); + printf(", pager=0x%x", object->pager); + printf(", ref=%d\n", object->ref_count); + + db_indent += 2; + if (object->shadow) + count += vm_follow_object(object->shadow); + + db_indent -= 2; + return count; +} + +/* + * vm_object_print: [ debug ] + */ +void +vm_object_print( + vm_object_t object, + boolean_t have_addr, + int arg_count, + char *modif) +{ + register vm_page_t p; + extern db_indent; + char *s; + + register int count; + + if (object == VM_OBJECT_NULL) + return; + + iprintf("object 0x%x\n", object); + + db_indent += 2; + + iprintf("size=0x%x", object->size); + printf(", cluster=0x%x", object->cluster_size); + printf(", frozen=0x%x", object->frozen_size); + printf(", ref_count=%d\n", object->ref_count); + iprintf(""); +#if TASK_SWAPPER + printf("res_count=%d, ", object->res_count); +#endif /* TASK_SWAPPER */ + printf("resident_page_count=%d\n", object->resident_page_count); + + iprintf("shadow=0x%x", object->shadow); + if (object->shadow) { + register int i = 0; + vm_object_t shadow = object; + while(shadow = shadow->shadow) + i++; + printf(" (depth %d)", i); + } + printf(", copy=0x%x", object->copy); + printf(", shadow_offset=0x%x", object->shadow_offset); + printf(", last_alloc=0x%x\n", object->last_alloc); + + iprintf("pager=0x%x", object->pager); + printf(", paging_offset=0x%x", object->paging_offset); + printf(", pager_request=0x%x\n", object->pager_request); + + iprintf("copy_strategy=%d[", object->copy_strategy); + switch (object->copy_strategy) { + case MEMORY_OBJECT_COPY_NONE: + printf("copy_none"); + break; + + case MEMORY_OBJECT_COPY_CALL: + printf("copy_call"); + break; + + case MEMORY_OBJECT_COPY_DELAY: + printf("copy_delay"); + break; + + case MEMORY_OBJECT_COPY_SYMMETRIC: + printf("copy_symmetric"); + break; + + case MEMORY_OBJECT_COPY_INVALID: + printf("copy_invalid"); + break; + + default: + printf("?"); + } + printf("]"); + printf(", absent_count=%d\n", object->absent_count); + + iprintf("all_wanted=0x%x<", object->all_wanted); + s = ""; + if (vm_object_wanted(object, VM_OBJECT_EVENT_INITIALIZED)) { + printf("%sinit", s); + s = ","; + } + if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGER_READY)) { + printf("%sready", s); + s = ","; + } + if (vm_object_wanted(object, VM_OBJECT_EVENT_PAGING_IN_PROGRESS)) { + printf("%spaging", s); + s = ","; + } + if (vm_object_wanted(object, VM_OBJECT_EVENT_ABSENT_COUNT)) { + printf("%sabsent", s); + s = ","; + } + if (vm_object_wanted(object, VM_OBJECT_EVENT_LOCK_IN_PROGRESS)) { + printf("%slock", s); + s = ","; + } + if (vm_object_wanted(object, VM_OBJECT_EVENT_UNCACHING)) { + printf("%suncaching", s); + s = ","; + } + if (vm_object_wanted(object, VM_OBJECT_EVENT_COPY_CALL)) { + printf("%scopy_call", s); + s = ","; + } + if (vm_object_wanted(object, VM_OBJECT_EVENT_CACHING)) { + printf("%scaching", s); + s = ","; + } + printf(">"); + printf(", paging_in_progress=%d\n", object->paging_in_progress); + + iprintf("%screated, %sinit, %sready, %spersist, %strusted, %spageout, %s, %s\n", + (object->pager_created ? "" : "!"), + (object->pager_initialized ? "" : "!"), + (object->pager_ready ? "" : "!"), + (object->can_persist ? "" : "!"), + (object->pager_trusted ? "" : "!"), + (object->pageout ? "" : "!"), + (object->internal ? "internal" : "external"), + (object->temporary ? "temporary" : "permanent")); + iprintf("%salive, %slock_in_progress, %slock_restart, %sshadowed, %scached, %sprivate\n", + (object->alive ? "" : "!"), + (object->lock_in_progress ? "" : "!"), + (object->lock_restart ? "" : "!"), + (object->shadowed ? "" : "!"), + (vm_object_cached(object) ? "" : "!"), + (object->private ? "" : "!")); + iprintf("%sadvisory_pageout, %ssilent_overwrite\n", + (object->advisory_pageout ? "" : "!"), + (object->silent_overwrite ? "" : "!")); + +#if MACH_PAGEMAP + iprintf("existence_map="); + vm_external_print(object->existence_map, object->size); +#endif /* MACH_PAGEMAP */ +#if MACH_ASSERT + iprintf("paging_object=0x%x\n", object->paging_object); +#endif /* MACH_ASSERT */ + + if (vm_object_print_pages) { + count = 0; + p = (vm_page_t) queue_first(&object->memq); + while (!queue_end(&object->memq, (queue_entry_t) p)) { + if (count == 0) { + iprintf("memory:="); + } else if (count == 2) { + printf("\n"); + iprintf(" ..."); + count = 0; + } else { + printf(","); + } + count++; + + printf("(off=0x%X,page=0x%X)", p->offset, (integer_t) p); + p = (vm_page_t) queue_next(&p->listq); + } + if (count != 0) { + printf("\n"); + } + } + db_indent -= 2; +} + + +/* + * vm_object_find [ debug ] + * + * Find all tasks which reference the given vm_object. + */ + +boolean_t vm_object_find(vm_object_t object); +boolean_t vm_object_print_verbose = FALSE; + +boolean_t +vm_object_find( + vm_object_t object) +{ + task_t task; + vm_map_t map; + vm_map_entry_t entry; + processor_set_t pset = &default_pset; + boolean_t found = FALSE; + + queue_iterate(&pset->tasks, task, task_t, pset_tasks) { + map = task->map; + for (entry = vm_map_first_entry(map); + entry && entry != vm_map_to_entry(map); + entry = entry->vme_next) { + + vm_object_t obj; + + /* + * For the time being skip submaps, + * only the kernel can have submaps, + * and unless we are interested in + * kernel objects, we can simply skip + * submaps. See sb/dejan/nmk18b7/src/mach_kernel/vm + * for a full solution. + */ + if (entry->is_sub_map) + continue; + if (entry) + obj = entry->object.vm_object; + else + continue; + + while (obj != VM_OBJECT_NULL) { + if (obj == object) { + if (!found) { + printf("TASK\t\tMAP\t\tENTRY\n"); + found = TRUE; + } + printf("0x%x\t0x%x\t0x%x\n", + task, map, entry); + } + obj = obj->shadow; + } + } + } + + return(found); +} + +#endif /* MACH_KDB */ + +/* + * memory_object_free_from_cache: + * + * Walk the vm_object cache list, removing and freeing vm_objects + * which are backed by the pager identified by the caller, (pager_id). + * Remove up to "count" objects, if there are that may available + * in the cache. + * Walk the list at most once, return the number of vm_objects + * actually freed. + * + */ + +kern_return_t +memory_object_free_from_cache( + host_t host, + int pager_id, + int *count) +{ + + int object_released = 0; + int i; + + register vm_object_t object = VM_OBJECT_NULL; + vm_object_t shadow; + +/* + if(host == HOST_NULL) + return(KERN_INVALID_ARGUMENT); +*/ + + try_again: + vm_object_cache_lock(); + + queue_iterate(&vm_object_cached_list, object, + vm_object_t, cached_list) { + if (pager_id == (int) pager_mux_hash_lookup( + (ipc_port_t)object->pager)) { + vm_object_lock(object); + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached_count--; + + /* + * Since this object is in the cache, we know + * that it is initialized and has no references. + * Take a reference to avoid recursive + * deallocations. + */ + + assert(object->pager_initialized); + assert(object->ref_count == 0); + object->ref_count++; + + /* + * Terminate the object. + * If the object had a shadow, we let + * vm_object_deallocate deallocate it. + * "pageout" objects have a shadow, but + * maintain a "paging reference" rather + * than a normal reference. + * (We are careful here to limit recursion.) + */ + shadow = object->pageout?VM_OBJECT_NULL:object->shadow; + if ((vm_object_terminate(object) == KERN_SUCCESS) + && (shadow != VM_OBJECT_NULL)) { + vm_object_deallocate(shadow); + } + + if(object_released++ == *count) + return KERN_SUCCESS; + goto try_again; + } + } + vm_object_cache_unlock(); + *count = object_released; + return KERN_SUCCESS; +} + +/* + * memory_object_remove_cached_object: + * + * Check for the existance of a memory object represented by the + * supplied port. If one exists and it is not in use, remove the + * memory object from the vm_object cache. + * If the memory object is in use, turn off the the "can_persist" + * property so that it will not go in the cache when the last user + * gives it up. + * + */ + +kern_return_t +memory_object_remove_cached_object( + ipc_port_t port) +{ + vm_object_t object; + vm_object_t shadow; + +repeat_lock_acquire: + object = VM_OBJECT_NULL; + + if (IP_VALID(port)) { + vm_object_cache_lock(); + ip_lock(port); + if (ip_active(port) && + (ip_kotype(port) == IKOT_PAGER_LOOKUP_TYPE)) { + object = (vm_object_t) port->ip_kobject; + if (!vm_object_lock_try(object)) { + /* + * failed to acquire object lock. Drop the + * other two locks and wait for it, then go + * back and start over in case the port + * associations changed in the interim. + */ + ip_unlock(port); + vm_object_cache_unlock(); + vm_object_lock(object); + vm_object_unlock(object); + goto repeat_lock_acquire; + } + + if(object->terminating) { + ip_unlock(port); + vm_object_unlock(object); + vm_object_cache_unlock(); + return KERN_RIGHT_EXISTS; + } + + assert(object->alive); + ip_unlock(port); + + if (object->ref_count == 0) { + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached_count--; + object->ref_count++; + /* + * Terminate the object. + * If the object had a shadow, we let + * vm_object_deallocate deallocate it. + * "pageout" objects have a shadow, but + * maintain a "paging reference" rather + * than a normal reference. + * (We are careful here to limit + * recursion.) + */ + shadow = object->pageout? + VM_OBJECT_NULL:object->shadow; + /* will do the vm_object_cache_unlock */ + if((vm_object_terminate(object) + == KERN_SUCCESS) + && (shadow != VM_OBJECT_NULL)) { + /* will lock and unlock cache_lock */ + vm_object_deallocate(shadow); + } + } + else { + /* + * We cannot free object but we can + * make sure it doesn't go into the + * cache when it is no longer in + * use. + */ + object->can_persist = FALSE; + + vm_object_unlock(object); + vm_object_cache_unlock(); + return KERN_RIGHT_EXISTS; + } + + + } + else { + ip_unlock(port); + vm_object_cache_unlock(); + } + } else { + return KERN_INVALID_ARGUMENT; + } + + + return KERN_SUCCESS; +} + +kern_return_t +memory_object_create_named( + ipc_port_t port, + vm_object_size_t size, + vm_object_t *object_ptr) +{ + vm_object_t object; + vm_object_hash_entry_t entry; + + *object_ptr = (vm_object_t)NULL; + if (IP_VALID(port)) { + + vm_object_cache_lock(); + entry = vm_object_hash_lookup(port, FALSE); + if ((entry != VM_OBJECT_HASH_ENTRY_NULL) && + (entry->object != VM_OBJECT_NULL)) { + if (entry->object->named == TRUE) + panic("memory_object_create_named: caller already holds the right"); + } + + vm_object_cache_unlock(); + if ((object = vm_object_enter(port, size, FALSE, FALSE, TRUE)) + == VM_OBJECT_NULL) + return(KERN_INVALID_OBJECT); + + /* wait for object (if any) to be ready */ + if (object != VM_OBJECT_NULL) { + vm_object_lock(object); + object->named = TRUE; + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + FALSE); + vm_object_lock(object); + } + vm_object_unlock(object); + } + *object_ptr = object; + return (KERN_SUCCESS); + } else { + return (KERN_INVALID_ARGUMENT); + } +} + +kern_return_t +memory_object_recover_named( + ipc_port_t pager, + boolean_t wait_on_terminating, + vm_object_t *object_ptr) +{ + vm_object_t object; + vm_object_hash_entry_t entry; + + *object_ptr = (vm_object_t)NULL; +lookup_entry: + if (IP_VALID(pager)) { + + vm_object_cache_lock(); + entry = vm_object_hash_lookup(pager, FALSE); + if ((entry != VM_OBJECT_HASH_ENTRY_NULL) && + (entry->object != VM_OBJECT_NULL)) { + if (entry->object->named == TRUE) + panic("memory_object_recover_named: caller already holds the right"); + object = entry->object; + vm_object_lock(object); + vm_object_cache_unlock(); + if (object->terminating && wait_on_terminating) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGING_IN_PROGRESS, + THREAD_UNINT); + vm_object_unlock(object); + goto lookup_entry; + } + } else { + vm_object_cache_unlock(); + return KERN_FAILURE; + } + + if((object->ref_count == 0) && (!object->terminating)){ + queue_remove(&vm_object_cached_list, object, + vm_object_t, cached_list); + vm_object_cached_count--; + XPR(XPR_VM_OBJECT_CACHE, + "memory_object_recover_named: removing %X, head (%X, %X)\n", + (integer_t)object, + (integer_t)vm_object_cached_list.next, + (integer_t)vm_object_cached_list.prev, 0,0); + } + + object->named = TRUE; + object->ref_count++; + vm_object_res_reference(object); + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + FALSE); + vm_object_lock(object); + } + vm_object_unlock(object); + *object_ptr = object; + return (KERN_SUCCESS); + } else { + return (KERN_INVALID_ARGUMENT); + } +} +#ifdef MACH_BSD +/* + * Scale the vm_object_cache + * This is required to make sure that the vm_object_cache is big + * enough to effectively cache the mapped file. + * This is really important with UBC as all the regular file vnodes + * have memory object associated with them. Havving this cache too + * small results in rapid reclaim of vnodes and hurts performance a LOT! + * + * This is also needed as number of vnodes can be dynamically scaled. + */ +kern_return_t +adjust_vm_object_cache(vm_size_t oval, vm_size_t nval) +{ + vm_object_cached_max = nval; + vm_object_cache_trim(FALSE); + return (KERN_SUCCESS); +} +#endif /* MACH_BSD */ + diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h new file mode 100644 index 000000000..81a303fb4 --- /dev/null +++ b/osfmk/vm/vm_object.h @@ -0,0 +1,549 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm_object.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Virtual memory object module definitions. + */ + +#ifndef _VM_VM_OBJECT_H_ +#define _VM_VM_OBJECT_H_ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if MACH_PAGEMAP +#include +#endif /* MACH_PAGEMAP */ + +typedef struct ipc_port * pager_request_t; +#define PAGER_REQUEST_NULL ((pager_request_t) 0) + +/* + * Types defined: + * + * vm_object_t Virtual memory object. + * + * We use "struct ipc_port *" instead of "ipc_port_t" + * to avoid include file circularities. + */ + +typedef unsigned long long vm_object_size_t; + + +struct vm_object { + queue_head_t memq; /* Resident memory */ + decl_mutex_data(, Lock) /* Synchronization */ + + vm_object_size_t size; /* Object size (only valid + * if internal) + */ + vm_object_size_t frozen_size; /* How much has been marked + * copy-on-write (only + * valid if copy_symmetric) + */ + int ref_count; /* Number of references */ +#if TASK_SWAPPER + int res_count; /* Residency references (swap)*/ +#endif /* TASK_SWAPPER */ + unsigned int resident_page_count; + /* number of resident pages */ + + struct vm_object *copy; /* Object that should receive + * a copy of my changed pages, + * for copy_delay, or just the + * temporary object that + * shadows this object, for + * copy_call. + */ + struct vm_object *shadow; /* My shadow */ + vm_object_offset_t shadow_offset; /* Offset into shadow */ + + struct ipc_port *pager; /* Where to get data */ + vm_object_offset_t paging_offset; /* Offset into memory object */ + pager_request_t pager_request; /* Where data comes back */ + + memory_object_copy_strategy_t + copy_strategy; /* How to handle data copy */ + + unsigned int absent_count; /* The number of pages that + * have been requested but + * not filled. That is, the + * number of pages for which + * the "absent" attribute is + * asserted. + */ + + unsigned int paging_in_progress; + /* The memory object ports are + * being used (e.g., for pagein + * or pageout) -- don't change + * any of these fields (i.e., + * don't collapse, destroy or + * terminate) + */ + unsigned int + /* boolean_t array */ all_wanted:11, /* Bit array of "want to be + * awakened" notations. See + * VM_OBJECT_EVENT_* items + * below */ + /* boolean_t */ pager_created:1, /* Has pager been created? */ + /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */ + /* boolean_t */ pager_ready:1, /* Will pager take requests? */ + + /* boolean_t */ pager_trusted:1,/* The pager for this object + * is trusted. This is true for + * all internal objects (backed + * by the default pager) + */ + /* boolean_t */ can_persist:1, /* The kernel may keep the data + * for this object (and rights + * to the memory object) after + * all address map references + * are deallocated? + */ + /* boolean_t */ internal:1, /* Created by the kernel (and + * therefore, managed by the + * default memory manger) + */ + /* boolean_t */ temporary:1, /* Permanent objects may be + * changed externally by the + * memory manager, and changes + * made in memory must be + * reflected back to the memory + * manager. Temporary objects + * lack both of these + * characteristics. + */ + /* boolean_t */ private:1, /* magic device_pager object, + * holds private pages only */ + /* boolean_t */ pageout:1, /* pageout object. contains + * private pages that refer to + * a real memory object. */ + /* boolean_t */ alive:1, /* Not yet terminated */ + + /* boolean_t */ lock_in_progress:1, + /* Is a multi-page lock + * request in progress? + */ + /* boolean_t */ lock_restart:1, + /* Should lock request in + * progress restart search? + */ + /* boolean_t */ shadowed:1, /* Shadow may exist */ + /* boolean_t */ silent_overwrite:1, + /* Allow full page overwrite + * without data_request if + * page is absent */ + /* boolean_t */ advisory_pageout:1, + /* Instead of sending page + * via OOL, just notify + * pager that the kernel + * wants to discard it, page + * remains in object */ + /* boolean_t */ true_share:1, + /* This object is mapped + * in more than one place + * and hence cannot be + * coalesced */ + /* boolean_t */ terminating:1, + /* Allows vm_object_lookup + * and vm_object_deallocate + * to special case their + * behavior when they are + * called as a result of + * page cleaning during + * object termination + */ + /* boolean_t */ named:1, /* An enforces an internal + * naming convention, by + * calling the right routines + * for allocation and + * destruction, UBC references + * against the vm_object are + * checked. + */ + /* boolean_t */ shadow_severed:1, + /* When a permanent object + * backing a COW goes away + * unexpectedly. This bit + * allows vm_fault to return + * an error rather than a + * zero filled page. + */ + /* boolean_t */ phys_contiguous:1; + /* Memory is wired and + * guaranteed physically + * contiguous. However + * it is not device memory + * and obeys normal virtual + * memory rules w.r.t pmap + * access bits. + */ + + + + queue_chain_t cached_list; /* Attachment point for the + * list of objects cached as a + * result of their can_persist + * value + */ + + queue_head_t msr_q; /* memory object synchronise + request queue */ + + vm_object_offset_t last_alloc; /* last allocation offset */ + vm_size_t cluster_size; /* size of paging cluster */ +#if MACH_PAGEMAP + vm_external_map_t existence_map; /* bitmap of pages written to + * backing storage */ +#endif /* MACH_PAGEMAP */ +#if MACH_ASSERT + struct vm_object *paging_object; /* object which pages to be + * swapped out are temporary + * put in current object + */ +#endif +#ifdef UBC_DEBUG + queue_head_t uplq; /* List of outstanding upls */ +#endif /* UBC_DEBUG */ +}; + +extern +vm_object_t kernel_object; /* the single kernel object */ + +int vm_object_absent_max; /* maximum number of absent pages + at a time for each object */ + +# define VM_MSYNC_INITIALIZED 0 +# define VM_MSYNC_SYNCHRONIZING 1 +# define VM_MSYNC_DONE 2 + +struct msync_req { + queue_chain_t msr_q; /* object request queue */ + queue_chain_t req_q; /* vm_msync request queue */ + unsigned int flag; + vm_object_offset_t offset; + vm_object_size_t length; + vm_object_t object; /* back pointer */ + decl_mutex_data(, msync_req_lock) /* Lock for this structure */ +}; + +typedef struct msync_req *msync_req_t; +#define MSYNC_REQ_NULL ((msync_req_t) 0) + +/* + * Macros to allocate and free msync_reqs + */ +#define msync_req_alloc(msr) \ + MACRO_BEGIN \ + (msr) = (msync_req_t)kalloc(sizeof(struct msync_req)); \ + mutex_init(&(msr)->msync_req_lock, ETAP_VM_MSYNC); \ + msr->flag = VM_MSYNC_INITIALIZED; \ + MACRO_END + +#define msync_req_free(msr) \ + (kfree((vm_offset_t)(msr), sizeof(struct msync_req))) + +#define msr_lock(msr) mutex_lock(&(msr)->msync_req_lock) +#define msr_unlock(msr) mutex_unlock(&(msr)->msync_req_lock) + +/* + * Declare procedures that operate on VM objects. + */ + +extern void vm_object_bootstrap(void); + +extern void vm_object_init(void); + +extern vm_object_t vm_object_allocate( + vm_object_size_t size); + +#if MACH_ASSERT +extern void vm_object_reference( + vm_object_t object); +#else /* MACH_ASSERT */ +#define vm_object_reference(object) \ +MACRO_BEGIN \ + vm_object_t Object = (object); \ + if (Object) { \ + vm_object_lock(Object); \ + Object->ref_count++; \ + vm_object_res_reference(Object); \ + vm_object_unlock(Object); \ + } \ +MACRO_END +#endif /* MACH_ASSERT */ + +extern void vm_object_deallocate( + vm_object_t object); + +extern void vm_object_pmap_protect( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + pmap_t pmap, + vm_offset_t pmap_start, + vm_prot_t prot); + +extern void vm_object_page_remove( + vm_object_t object, + vm_object_offset_t start, + vm_object_offset_t end); + +extern boolean_t vm_object_coalesce( + vm_object_t prev_object, + vm_object_t next_object, + vm_object_offset_t prev_offset, + vm_object_offset_t next_offset, + vm_object_size_t prev_size, + vm_object_size_t next_size); + +extern boolean_t vm_object_shadow( + vm_object_t *object, + vm_object_offset_t *offset, + vm_object_size_t length); + +extern void vm_object_collapse( + vm_object_t object); + +extern vm_object_t vm_object_lookup( + ipc_port_t port); + +extern ipc_port_t vm_object_name( + vm_object_t object); + +extern boolean_t vm_object_copy_quickly( + vm_object_t *_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + boolean_t *_src_needs_copy, + boolean_t *_dst_needs_copy); + +extern kern_return_t vm_object_copy_strategically( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *dst_object, + vm_object_offset_t *dst_offset, + boolean_t *dst_needs_copy); + +extern kern_return_t vm_object_copy_slowly( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + int interruptible, + vm_object_t *_result_object); + +extern void vm_object_pager_create( + vm_object_t object); + +extern void vm_object_destroy( + ipc_port_t pager); + +extern void vm_object_pager_wakeup( + ipc_port_t pager); + +extern void vm_object_page_map( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_object_offset_t (*map_fn) + (void *, vm_object_offset_t), + void *map_fn_data); + +#if TASK_SWAPPER + +extern void vm_object_res_reference( + vm_object_t object); +extern void vm_object_res_deallocate( + vm_object_t object); +#define VM_OBJ_RES_INCR(object) (object)->res_count++ +#define VM_OBJ_RES_DECR(object) (object)->res_count-- + +#else /* TASK_SWAPPER */ + +#define VM_OBJ_RES_INCR(object) +#define VM_OBJ_RES_DECR(object) +#define vm_object_res_reference(object) +#define vm_object_res_deallocate(object) + +#endif /* TASK_SWAPPER */ + +extern vm_object_t vm_object_enter( + ipc_port_t pager, + vm_object_size_t size, + boolean_t internal, + boolean_t init, + boolean_t check_named); + + +extern vm_object_t vm_object_copy_delayed( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size); + + +/* + * Event waiting handling + */ + +#define VM_OBJECT_EVENT_INITIALIZED 0 +#define VM_OBJECT_EVENT_PAGER_READY 1 +#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 +#define VM_OBJECT_EVENT_ABSENT_COUNT 3 +#define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 +#define VM_OBJECT_EVENT_UNCACHING 5 +#define VM_OBJECT_EVENT_COPY_CALL 6 +#define VM_OBJECT_EVENT_CACHING 7 + +#define vm_object_assert_wait(object, event, interruptible) \ + MACRO_BEGIN \ + (object)->all_wanted |= 1 << (event); \ + assert_wait((event_t)((vm_offset_t)(object)+(event)),(interruptible)); \ + MACRO_END + +#define vm_object_wait(object, event, interruptible) \ + MACRO_BEGIN \ + vm_object_assert_wait((object),(event),(interruptible)); \ + vm_object_unlock(object); \ + thread_block((void (*)(void)) 0); \ + MACRO_END + +#define vm_object_wakeup(object, event) \ + MACRO_BEGIN \ + if ((object)->all_wanted & (1 << (event))) \ + thread_wakeup((event_t)((vm_offset_t)(object) + (event))); \ + (object)->all_wanted &= ~(1 << (event)); \ + MACRO_END + +#define vm_object_set_wanted(object, event) \ + MACRO_BEGIN \ + ((object)->all_wanted |= (1 << (event))); \ + MACRO_END + +#define vm_object_wanted(object, event) \ + ((object)->all_wanted & (1 << (event))) + +/* + * Routines implemented as macros + */ + +#define vm_object_paging_begin(object) \ + MACRO_BEGIN \ + (object)->paging_in_progress++; \ + MACRO_END + +#define vm_object_paging_end(object) \ + MACRO_BEGIN \ + assert((object)->paging_in_progress != 0); \ + if (--(object)->paging_in_progress == 0) { \ + vm_object_wakeup(object, \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ + } \ + MACRO_END + +#define vm_object_paging_wait(object, interruptible) \ + MACRO_BEGIN \ + while ((object)->paging_in_progress != 0) { \ + vm_object_wait( (object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \ + (interruptible)); \ + vm_object_lock(object); \ + \ + /*XXX if ((interruptible) && */ \ + /*XXX (current_thread()->wait_result != THREAD_AWAKENED))*/ \ + /*XXX break; */ \ + } \ + MACRO_END + +#define vm_object_absent_assert_wait(object, interruptible) \ + MACRO_BEGIN \ + vm_object_assert_wait( (object), \ + VM_OBJECT_EVENT_ABSENT_COUNT, \ + (interruptible)); \ + MACRO_END + + +#define vm_object_absent_release(object) \ + MACRO_BEGIN \ + (object)->absent_count--; \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_ABSENT_COUNT); \ + MACRO_END + +/* + * Object locking macros + */ + +#define vm_object_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ) +#define vm_object_lock(object) mutex_lock(&(object)->Lock) +#define vm_object_unlock(object) mutex_unlock(&(object)->Lock) +#define vm_object_lock_try(object) mutex_try(&(object)->Lock) + +#endif /* _VM_VM_OBJECT_H_ */ diff --git a/osfmk/vm/vm_page.h b/osfmk/vm/vm_page.h new file mode 100644 index 000000000..fd2594146 --- /dev/null +++ b/osfmk/vm/vm_page.h @@ -0,0 +1,445 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_page.h + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * Resident memory system definitions. + */ + +#ifndef _VM_VM_PAGE_H_ +#define _VM_VM_PAGE_H_ + +#include +#include +#include +#include +#include +#include + +#include + +/* + * Management of resident (logical) pages. + * + * A small structure is kept for each resident + * page, indexed by page number. Each structure + * is an element of several lists: + * + * A hash table bucket used to quickly + * perform object/offset lookups + * + * A list of all pages for a given object, + * so they can be quickly deactivated at + * time of deallocation. + * + * An ordered list of pages due for pageout. + * + * In addition, the structure contains the object + * and offset to which this page belongs (for pageout), + * and sundry status bits. + * + * Fields in this structure are locked either by the lock on the + * object that the page belongs to (O) or by the lock on the page + * queues (P). [Some fields require that both locks be held to + * change that field; holding either lock is sufficient to read.] + */ + +struct vm_page { + queue_chain_t pageq; /* queue info for FIFO + * queue or free list (P) */ + queue_chain_t listq; /* all pages in same object (O) */ + struct vm_page *next; /* VP bucket link (O) */ + + vm_object_t object; /* which object am I in (O&P) */ + vm_object_offset_t offset; /* offset into that object (O,P) */ + + unsigned int wire_count:16, /* how many wired down maps use me? (O&P) */ + page_error:8, /* error from I/O operations */ + /* boolean_t */ inactive:1, /* page is in inactive list (P) */ + active:1, /* page is in active list (P) */ + laundry:1, /* page is being cleaned now (P)*/ + free:1, /* page is on free list (P) */ + reference:1, /* page has been used (P) */ + limbo:1, /* page prepped then stolen (P) */ + pageout:1, /* page wired & busy for pageout (P) */ + gobbled:1; /* page used internally (P) */ + /* we've used up all 32 bits */ + + unsigned int + /* boolean_t */ busy:1, /* page is in transit (O) */ + wanted:1, /* someone is waiting for page (O) */ + tabled:1, /* page is in VP table (O) */ + fictitious:1, /* Physical page doesn't exist (O) */ + private:1, /* Page should not be returned to + * the free list (O) */ + absent:1, /* Data has been requested, but is + * not yet available (O) */ + error:1, /* Data manager was unable to provide + * data due to error (O) */ + dirty:1, /* Page must be cleaned (O) */ + cleaning:1, /* Page clean has begun (O) */ + precious:1, /* Page is precious; data must be + * returned even if clean (O) */ + clustered:1, /* page is not the faulted page (O) */ + overwriting:1, /* Request to unlock has been made + * without having data. (O) + * [See vm_fault_page_overwrite] */ + restart:1, /* Page was pushed higher in shadow + chain by copy_call-related pagers; + start again at top of chain */ + lock_supplied:1,/* protection supplied by pager (O) */ + /* vm_prot_t */ page_lock:3, /* Uses prohibited by pager (O) */ + /* vm_prot_t */ unlock_request:3,/* Outstanding unlock request (O) */ + unusual:1, /* Page is absent, error, restart or + page locked */ + discard_request:1,/* a memory_object_discard_request() + * has been sent */ + list_req_pending:1, /* pagein/pageout alt mechanism */ + /* allows creation of list */ + /* requests on pages that are */ + /* actively being paged. */ + :0; + + vm_offset_t phys_addr; /* Physical address of page, passed + * to pmap_enter (read-only) */ + union { + struct { + unsigned int prep:16, /* page prep count */ + pin:16; /* page pin pount */ + } pp_counts; + unsigned int pp_both; /* used to test for both zero */ + } prep_pin_u; +}; + +typedef struct vm_page *vm_page_t; + +#define prep_count prep_pin_u.pp_counts.prep +#define pin_count prep_pin_u.pp_counts.pin +#define prep_pin_count prep_pin_u.pp_both + +#define VM_PAGE_NULL ((vm_page_t) 0) +#define NEXT_PAGE(m) ((vm_page_t) (m)->pageq.next) + +/* + * XXX The unusual bit should not be necessary. Most of the bit + * XXX fields above really want to be masks. + */ + +/* + * For debugging, this macro can be defined to perform + * some useful check on a page structure. + */ + +#define VM_PAGE_CHECK(mem) + +/* + * Each pageable resident page falls into one of three lists: + * + * free + * Available for allocation now. + * inactive + * Not referenced in any map, but still has an + * object/offset-page mapping, and may be dirty. + * This is the list of pages that should be + * paged out next. + * active + * A list of pages which have been placed in + * at least one physical map. This list is + * ordered, in LRU-like fashion. + */ + +extern +vm_page_t vm_page_queue_free; /* memory free queue */ +extern +vm_page_t vm_page_queue_fictitious; /* fictitious free queue */ +extern +queue_head_t vm_page_queue_active; /* active memory queue */ +extern +queue_head_t vm_page_queue_inactive; /* inactive memory queue */ + +extern +vm_offset_t first_phys_addr; /* physical address for first_page */ +extern +vm_offset_t last_phys_addr; /* physical address for last_page */ + +extern +int vm_page_free_count; /* How many pages are free? */ +extern +int vm_page_fictitious_count;/* How many fictitious pages are free? */ +extern +int vm_page_active_count; /* How many pages are active? */ +extern +int vm_page_inactive_count; /* How many pages are inactive? */ +extern +int vm_page_wire_count; /* How many pages are wired? */ +extern +int vm_page_free_target; /* How many do we want free? */ +extern +int vm_page_free_min; /* When to wakeup pageout */ +extern +int vm_page_inactive_target;/* How many do we want inactive? */ +extern +int vm_page_free_reserved; /* How many pages reserved to do pageout */ +extern +int vm_page_laundry_count; /* How many pages being laundered? */ + +decl_mutex_data(,vm_page_queue_lock) + /* lock on active and inactive page queues */ +decl_mutex_data(,vm_page_queue_free_lock) + /* lock on free page queue */ +decl_simple_lock_data(extern,vm_page_preppin_lock) /* lock for prep/pin */ + +extern unsigned int vm_page_free_wanted; + /* how many threads are waiting for memory */ + +extern vm_offset_t vm_page_fictitious_addr; + /* (fake) phys_addr of fictitious pages */ + +/* + * Prototypes for functions exported by this module. + */ +extern void vm_page_bootstrap( + vm_offset_t *startp, + vm_offset_t *endp); + +extern void vm_page_module_init(void); + +extern void vm_page_create( + vm_offset_t start, + vm_offset_t end); + +extern vm_page_t vm_page_lookup( + vm_object_t object, + vm_object_offset_t offset); + +extern vm_page_t vm_page_grab_fictitious(void); + +extern void vm_page_release_fictitious( + vm_page_t page); + +extern boolean_t vm_page_convert( + vm_page_t page); + +extern void vm_page_more_fictitious(void); + +extern int vm_pool_low(void); + +extern vm_page_t vm_page_grab(void); + +extern void vm_page_release( + vm_page_t page); + +extern void vm_page_release_limbo( + vm_page_t page); + +extern void vm_page_limbo_exchange( + vm_page_t limbo_m, + vm_page_t new_m); + +extern boolean_t vm_page_wait( + int interruptible ); + +extern vm_page_t vm_page_alloc( + vm_object_t object, + vm_object_offset_t offset); + +extern void vm_page_init( + vm_page_t page, + vm_offset_t phys_addr); + +extern void vm_page_free( + vm_page_t page); + +extern void vm_page_activate( + vm_page_t page); + +extern void vm_page_deactivate( + vm_page_t page); + +extern void vm_page_rename( + vm_page_t page, + vm_object_t new_object, + vm_object_offset_t new_offset); + +extern void vm_page_insert( + vm_page_t page, + vm_object_t object, + vm_object_offset_t offset); + +extern void vm_page_replace( + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset); + +extern void vm_page_remove( + vm_page_t page); + +extern void vm_page_zero_fill( + vm_page_t page); + +extern void vm_page_part_zero_fill( + vm_page_t m, + vm_offset_t m_pa, + vm_size_t len); + +extern void vm_page_copy( + vm_page_t src_page, + vm_page_t dest_page); + +extern void vm_page_part_copy( + vm_page_t src_m, + vm_offset_t src_pa, + vm_page_t dst_m, + vm_offset_t dst_pa, + vm_size_t len); + +extern void vm_page_wire( + vm_page_t page); + +extern void vm_page_unwire( + vm_page_t page); + +extern void vm_set_page_size(void); + +extern void vm_page_gobble( + vm_page_t page); + +extern kern_return_t vm_page_prep( + vm_page_t m); + +extern kern_return_t vm_page_pin( + vm_page_t m); + +extern kern_return_t vm_page_unprep( + vm_page_t m); + +extern kern_return_t vm_page_unpin( + vm_page_t m); + +extern void cleanup_limbo_queue(void); + +/* + * Functions implemented as macros. m->wanted and m->busy are + * protected by the object lock. + */ + +#define PAGE_ASSERT_WAIT(m, interruptible) \ + MACRO_BEGIN \ + (m)->wanted = TRUE; \ + assert_wait((event_t) (m), (interruptible)); \ + MACRO_END + +#define PAGE_WAKEUP_DONE(m) \ + MACRO_BEGIN \ + (m)->busy = FALSE; \ + if ((m)->wanted) { \ + (m)->wanted = FALSE; \ + thread_wakeup((event_t) (m)); \ + } \ + MACRO_END + +#define PAGE_WAKEUP(m) \ + MACRO_BEGIN \ + if ((m)->wanted) { \ + (m)->wanted = FALSE; \ + thread_wakeup((event_t) (m)); \ + } \ + MACRO_END + +#define VM_PAGE_FREE(p) \ + MACRO_BEGIN \ + vm_page_lock_queues(); \ + vm_page_free(p); \ + vm_page_unlock_queues(); \ + MACRO_END + +#define VM_PAGE_GRAB_FICTITIOUS(M) \ + MACRO_BEGIN \ + while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ + vm_page_more_fictitious(); \ + MACRO_END + +#define VM_PAGE_THROTTLED() \ + (vm_page_free_count < (vm_page_free_target - \ + ((vm_page_free_target-vm_page_free_min)>>2))) + +#define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) + +#define vm_page_lock_queues() mutex_lock(&vm_page_queue_lock) +#define vm_page_unlock_queues() mutex_unlock(&vm_page_queue_lock) +#define vm_page_pin_lock() simple_lock(&vm_page_preppin_lock) +#define vm_page_pin_unlock() simple_unlock(&vm_page_preppin_lock) + +#define VM_PAGE_QUEUES_REMOVE(mem) \ + MACRO_BEGIN \ + if (mem->active) { \ + assert(!mem->inactive); \ + queue_remove(&vm_page_queue_active, \ + mem, vm_page_t, pageq); \ + mem->active = FALSE; \ + if (!mem->fictitious) \ + vm_page_active_count--; \ + } \ + \ + if (mem->inactive) { \ + assert(!mem->active); \ + queue_remove(&vm_page_queue_inactive, \ + mem, vm_page_t, pageq); \ + mem->inactive = FALSE; \ + if (!mem->fictitious) \ + vm_page_inactive_count--; \ + } \ + MACRO_END + +#endif /* _VM_VM_PAGE_H_ */ diff --git a/osfmk/vm/vm_pageout.c b/osfmk/vm/vm_pageout.c new file mode 100644 index 000000000..cc5e40edb --- /dev/null +++ b/osfmk/vm/vm_pageout.c @@ -0,0 +1,3537 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_pageout.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * Date: 1985 + * + * The proverbial page-out daemon. + */ +#ifdef MACH_BSD +/* remove after component merge */ +extern int vnode_pager_workaround; +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern ipc_port_t memory_manager_default; + +#ifndef VM_PAGE_LAUNDRY_MAX +#define VM_PAGE_LAUNDRY_MAX 10 /* outstanding DMM page cleans */ +#endif /* VM_PAGEOUT_LAUNDRY_MAX */ + +#ifndef VM_PAGEOUT_BURST_MAX +#define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */ +#endif /* VM_PAGEOUT_BURST_MAX */ + +#ifndef VM_PAGEOUT_DISCARD_MAX +#define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */ +#endif /* VM_PAGEOUT_DISCARD_MAX */ + +#ifndef VM_PAGEOUT_BURST_WAIT +#define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */ +#endif /* VM_PAGEOUT_BURST_WAIT */ + +#ifndef VM_PAGEOUT_EMPTY_WAIT +#define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */ +#endif /* VM_PAGEOUT_EMPTY_WAIT */ + +/* + * To obtain a reasonable LRU approximation, the inactive queue + * needs to be large enough to give pages on it a chance to be + * referenced a second time. This macro defines the fraction + * of active+inactive pages that should be inactive. + * The pageout daemon uses it to update vm_page_inactive_target. + * + * If vm_page_free_count falls below vm_page_free_target and + * vm_page_inactive_count is below vm_page_inactive_target, + * then the pageout daemon starts running. + */ + +#ifndef VM_PAGE_INACTIVE_TARGET +#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3) +#endif /* VM_PAGE_INACTIVE_TARGET */ + +/* + * Once the pageout daemon starts running, it keeps going + * until vm_page_free_count meets or exceeds vm_page_free_target. + */ + +#ifndef VM_PAGE_FREE_TARGET +#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80) +#endif /* VM_PAGE_FREE_TARGET */ + +/* + * The pageout daemon always starts running once vm_page_free_count + * falls below vm_page_free_min. + */ + +#ifndef VM_PAGE_FREE_MIN +#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100) +#endif /* VM_PAGE_FREE_MIN */ + +/* + * When vm_page_free_count falls below vm_page_free_reserved, + * only vm-privileged threads can allocate pages. vm-privilege + * allows the pageout daemon and default pager (and any other + * associated threads needed for default pageout) to continue + * operation by dipping into the reserved pool of pages. + */ + +#ifndef VM_PAGE_FREE_RESERVED +#define VM_PAGE_FREE_RESERVED \ + ((8 * VM_PAGE_LAUNDRY_MAX) + NCPUS) +#endif /* VM_PAGE_FREE_RESERVED */ + + +/* + * Forward declarations for internal routines. + */ +extern void vm_pageout_continue(void); +extern void vm_pageout_scan(void); +extern void vm_pageout_throttle(vm_page_t m); +extern vm_page_t vm_pageout_cluster_page( + vm_object_t object, + vm_object_offset_t offset, + boolean_t precious_clean); + +unsigned int vm_pageout_reserved_internal = 0; +unsigned int vm_pageout_reserved_really = 0; + +unsigned int vm_page_laundry_max = 0; /* # of clusters outstanding */ +unsigned int vm_page_laundry_min = 0; +unsigned int vm_pageout_burst_max = 0; +unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */ +unsigned int vm_pageout_empty_wait = 0; /* milliseconds */ +unsigned int vm_pageout_burst_min = 0; +unsigned int vm_pageout_pause_count = 0; +unsigned int vm_pageout_pause_max = 0; +unsigned int vm_free_page_pause = 100; /* milliseconds */ + +/* + * These variables record the pageout daemon's actions: + * how many pages it looks at and what happens to those pages. + * No locking needed because only one thread modifies the variables. + */ + +unsigned int vm_pageout_active = 0; /* debugging */ +unsigned int vm_pageout_inactive = 0; /* debugging */ +unsigned int vm_pageout_inactive_throttled = 0; /* debugging */ +unsigned int vm_pageout_inactive_forced = 0; /* debugging */ +unsigned int vm_pageout_inactive_nolock = 0; /* debugging */ +unsigned int vm_pageout_inactive_avoid = 0; /* debugging */ +unsigned int vm_pageout_inactive_busy = 0; /* debugging */ +unsigned int vm_pageout_inactive_absent = 0; /* debugging */ +unsigned int vm_pageout_inactive_used = 0; /* debugging */ +unsigned int vm_pageout_inactive_clean = 0; /* debugging */ +unsigned int vm_pageout_inactive_dirty = 0; /* debugging */ +unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */ +unsigned int vm_pageout_inactive_pinned = 0; /* debugging */ +unsigned int vm_pageout_inactive_limbo = 0; /* debugging */ +unsigned int vm_pageout_setup_limbo = 0; /* debugging */ +unsigned int vm_pageout_setup_unprepped = 0; /* debugging */ +unsigned int vm_stat_discard = 0; /* debugging */ +unsigned int vm_stat_discard_sent = 0; /* debugging */ +unsigned int vm_stat_discard_failure = 0; /* debugging */ +unsigned int vm_stat_discard_throttle = 0; /* debugging */ +unsigned int vm_pageout_scan_active_emm_throttle = 0; /* debugging */ +unsigned int vm_pageout_scan_active_emm_throttle_success = 0; /* debugging */ +unsigned int vm_pageout_scan_active_emm_throttle_failure = 0; /* debugging */ +unsigned int vm_pageout_scan_inactive_emm_throttle = 0; /* debugging */ +unsigned int vm_pageout_scan_inactive_emm_throttle_success = 0; /* debugging */ +unsigned int vm_pageout_scan_inactive_emm_throttle_failure = 0; /* debugging */ + + +unsigned int vm_pageout_out_of_line = 0; +unsigned int vm_pageout_in_place = 0; +/* + * Routine: vm_pageout_object_allocate + * Purpose: + * Allocate an object for use as out-of-line memory in a + * data_return/data_initialize message. + * The page must be in an unlocked object. + * + * If the page belongs to a trusted pager, cleaning in place + * will be used, which utilizes a special "pageout object" + * containing private alias pages for the real page frames. + * Untrusted pagers use normal out-of-line memory. + */ +vm_object_t +vm_pageout_object_allocate( + vm_page_t m, + vm_size_t size, + vm_object_offset_t offset) +{ + vm_object_t object = m->object; + vm_object_t new_object; + + assert(object->pager_ready); + + if (object->pager_trusted || object->internal) + vm_pageout_throttle(m); + + new_object = vm_object_allocate(size); + + if (object->pager_trusted) { + assert (offset < object->size); + + vm_object_lock(new_object); + new_object->pageout = TRUE; + new_object->shadow = object; + new_object->can_persist = FALSE; + new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + new_object->shadow_offset = offset; + vm_object_unlock(new_object); + + /* + * Take a paging reference on the object. This will be dropped + * in vm_pageout_object_terminate() + */ + vm_object_lock(object); + vm_object_paging_begin(object); + vm_object_unlock(object); + + vm_pageout_in_place++; + } else + vm_pageout_out_of_line++; + return(new_object); +} + +#if MACH_CLUSTER_STATS +unsigned long vm_pageout_cluster_dirtied = 0; +unsigned long vm_pageout_cluster_cleaned = 0; +unsigned long vm_pageout_cluster_collisions = 0; +unsigned long vm_pageout_cluster_clusters = 0; +unsigned long vm_pageout_cluster_conversions = 0; +unsigned long vm_pageout_target_collisions = 0; +unsigned long vm_pageout_target_page_dirtied = 0; +unsigned long vm_pageout_target_page_freed = 0; +unsigned long vm_pageout_target_page_pinned = 0; +unsigned long vm_pageout_target_page_limbo = 0; +#define CLUSTER_STAT(clause) clause +#else /* MACH_CLUSTER_STATS */ +#define CLUSTER_STAT(clause) +#endif /* MACH_CLUSTER_STATS */ + +/* + * Routine: vm_pageout_object_terminate + * Purpose: + * Destroy the pageout_object allocated by + * vm_pageout_object_allocate(), and perform all of the + * required cleanup actions. + * + * In/Out conditions: + * The object must be locked, and will be returned locked. + */ +void +vm_pageout_object_terminate( + vm_object_t object) +{ + vm_object_t shadow_object; + + /* + * Deal with the deallocation (last reference) of a pageout object + * (used for cleaning-in-place) by dropping the paging references/ + * freeing pages in the original object. + */ + + assert(object->pageout); + shadow_object = object->shadow; + vm_object_lock(shadow_object); + + while (!queue_empty(&object->memq)) { + vm_page_t p, m; + vm_object_offset_t offset; + + p = (vm_page_t) queue_first(&object->memq); + + assert(p->private); + assert(p->pageout); + p->pageout = FALSE; + assert(!p->cleaning); + + offset = p->offset; + VM_PAGE_FREE(p); + p = VM_PAGE_NULL; + + m = vm_page_lookup(shadow_object, + offset + object->shadow_offset); + + if(m == VM_PAGE_NULL) + continue; + assert(m->cleaning); + + /* + * Account for the paging reference taken when + * m->cleaning was set on this page. + */ + vm_object_paging_end(shadow_object); + assert((m->dirty) || (m->precious) || + (m->busy && m->cleaning)); + + /* + * Handle the trusted pager throttle. + */ + vm_page_lock_queues(); + if (m->laundry) { + vm_page_laundry_count--; + m->laundry = FALSE; + if (vm_page_laundry_count < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) &vm_page_laundry_count); + } + } + + /* + * Handle the "target" page(s). These pages are to be freed if + * successfully cleaned. Target pages are always busy, and are + * wired exactly once. The initial target pages are not mapped, + * (so cannot be referenced or modified) but converted target + * pages may have been modified between the selection as an + * adjacent page and conversion to a target. + */ + if (m->pageout) { + assert(m->busy); + assert(m->wire_count == 1); + m->cleaning = FALSE; + m->pageout = FALSE; +#if MACH_CLUSTER_STATS + if (m->wanted) vm_pageout_target_collisions++; +#endif + /* + * Revoke all access to the page. Since the object is + * locked, and the page is busy, this prevents the page + * from being dirtied after the pmap_is_modified() call + * returns. + */ + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + + /* + * Since the page is left "dirty" but "not modifed", we + * can detect whether the page was redirtied during + * pageout by checking the modify state. + */ + m->dirty = pmap_is_modified(m->phys_addr); + + if (m->dirty) { + CLUSTER_STAT(vm_pageout_target_page_dirtied++;) + vm_page_unwire(m);/* reactivates */ + VM_STAT(reactivations++); + PAGE_WAKEUP_DONE(m); + } else if (m->prep_pin_count != 0) { + vm_page_pin_lock(); + if (m->pin_count != 0) { + /* page is pinned; reactivate */ + CLUSTER_STAT( + vm_pageout_target_page_pinned++;) + vm_page_unwire(m);/* reactivates */ + VM_STAT(reactivations++); + PAGE_WAKEUP_DONE(m); + } else { + /* + * page is prepped but not pinned; send + * it into limbo. Note that + * vm_page_free (which will be called + * after releasing the pin lock) knows + * how to handle a page with limbo set. + */ + m->limbo = TRUE; + CLUSTER_STAT( + vm_pageout_target_page_limbo++;) + } + vm_page_pin_unlock(); + if (m->limbo) + vm_page_free(m); + } else { + CLUSTER_STAT(vm_pageout_target_page_freed++;) + vm_page_free(m);/* clears busy, etc. */ + } + vm_page_unlock_queues(); + continue; + } + /* + * Handle the "adjacent" pages. These pages were cleaned in + * place, and should be left alone. + * If prep_pin_count is nonzero, then someone is using the + * page, so make it active. + */ + if (!m->active && !m->inactive) { + if (m->reference || m->prep_pin_count != 0) + vm_page_activate(m); + else + vm_page_deactivate(m); + } + if((m->busy) && (m->cleaning)) { + + /* the request_page_list case, (COPY_OUT_FROM FALSE) */ + m->busy = FALSE; + + /* We do not re-set m->dirty ! */ + /* The page was busy so no extraneous activity */ + /* could have occured. COPY_INTO is a read into the */ + /* new pages. CLEAN_IN_PLACE does actually write */ + /* out the pages but handling outside of this code */ + /* will take care of resetting dirty. We clear the */ + /* modify however for the Programmed I/O case. */ + pmap_clear_modify(m->phys_addr); + if(m->absent) { + m->absent = FALSE; + if(shadow_object->absent_count == 1) + vm_object_absent_release(shadow_object); + else + shadow_object->absent_count--; + } + m->overwriting = FALSE; + } else if (m->overwriting) { + /* alternate request page list, write to page_list */ + /* case. Occurs when the original page was wired */ + /* at the time of the list request */ + assert(m->wire_count != 0); + vm_page_unwire(m);/* reactivates */ + m->overwriting = FALSE; + } else { + /* + * Set the dirty state according to whether or not the page was + * modified during the pageout. Note that we purposefully do + * NOT call pmap_clear_modify since the page is still mapped. + * If the page were to be dirtied between the 2 calls, this + * this fact would be lost. This code is only necessary to + * maintain statistics, since the pmap module is always + * consulted if m->dirty is false. + */ +#if MACH_CLUSTER_STATS + m->dirty = pmap_is_modified(m->phys_addr); + + if (m->dirty) vm_pageout_cluster_dirtied++; + else vm_pageout_cluster_cleaned++; + if (m->wanted) vm_pageout_cluster_collisions++; +#else + m->dirty = 0; +#endif + } + m->cleaning = FALSE; + + + /* + * Wakeup any thread waiting for the page to be un-cleaning. + */ + PAGE_WAKEUP(m); + vm_page_unlock_queues(); + } + /* + * Account for the paging reference taken in vm_paging_object_allocate. + */ + vm_object_paging_end(shadow_object); + vm_object_unlock(shadow_object); + + assert(object->ref_count == 0); + assert(object->paging_in_progress == 0); + assert(object->resident_page_count == 0); + return; +} + +/* + * Routine: vm_pageout_setup + * Purpose: + * Set up a page for pageout (clean & flush). + * + * Move the page to a new object, as part of which it will be + * sent to its memory manager in a memory_object_data_write or + * memory_object_initialize message. + * + * The "new_object" and "new_offset" arguments + * indicate where the page should be moved. + * + * In/Out conditions: + * The page in question must not be on any pageout queues, + * and must be busy. The object to which it belongs + * must be unlocked, and the caller must hold a paging + * reference to it. The new_object must not be locked. + * + * This routine returns a pointer to a place-holder page, + * inserted at the same offset, to block out-of-order + * requests for the page. The place-holder page must + * be freed after the data_write or initialize message + * has been sent. + * + * The original page is put on a paging queue and marked + * not busy on exit. + */ +vm_page_t +vm_pageout_setup( + register vm_page_t m, + register vm_object_t new_object, + vm_object_offset_t new_offset) +{ + register vm_object_t old_object = m->object; + vm_object_offset_t paging_offset; + vm_object_offset_t offset; + register vm_page_t holding_page; + register vm_page_t new_m; + register vm_page_t new_page; + boolean_t need_to_wire = FALSE; + + + XPR(XPR_VM_PAGEOUT, + "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n", + (integer_t)m->object, (integer_t)m->offset, + (integer_t)m, (integer_t)new_object, + (integer_t)new_offset); + assert(m && m->busy && !m->absent && !m->fictitious && !m->error && + !m->restart); + + assert(m->dirty || m->precious); + + /* + * Create a place-holder page where the old one was, to prevent + * attempted pageins of this page while we're unlocked. + * If the pageout daemon put this page in limbo and we're not + * going to clean in place, get another fictitious page to + * exchange for it now. + */ + VM_PAGE_GRAB_FICTITIOUS(holding_page); + + if (m->limbo) + VM_PAGE_GRAB_FICTITIOUS(new_page); + + vm_object_lock(old_object); + + offset = m->offset; + paging_offset = offset + old_object->paging_offset; + + if (old_object->pager_trusted) { + /* + * This pager is trusted, so we can clean this page + * in place. Leave it in the old object, and mark it + * cleaning & pageout. + */ + new_m = holding_page; + holding_page = VM_PAGE_NULL; + + /* + * If the pageout daemon put this page in limbo, exchange the + * identities of the limbo page and the new fictitious page, + * and continue with the new page, unless the prep count has + * gone to zero in the meantime (which means no one is + * interested in the page any more). In that case, just clear + * the limbo bit and free the extra fictitious page. + */ + if (m->limbo) { + if (m->prep_pin_count == 0) { + /* page doesn't have to be in limbo any more */ + m->limbo = FALSE; + vm_page_lock_queues(); + vm_page_free(new_page); + vm_page_unlock_queues(); + vm_pageout_setup_unprepped++; + } else { + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(m); + vm_page_remove(m); + vm_page_limbo_exchange(m, new_page); + vm_pageout_setup_limbo++; + vm_page_release_limbo(m); + m = new_page; + vm_page_insert(m, old_object, offset); + vm_page_unlock_queues(); + } + } + + /* + * Set up new page to be private shadow of real page. + */ + new_m->phys_addr = m->phys_addr; + new_m->fictitious = FALSE; + new_m->private = TRUE; + new_m->pageout = TRUE; + + /* + * Mark real page as cleaning (indicating that we hold a + * paging reference to be released via m_o_d_r_c) and + * pageout (indicating that the page should be freed + * when the pageout completes). + */ + pmap_clear_modify(m->phys_addr); + vm_page_lock_queues(); + vm_page_wire(new_m); + m->cleaning = TRUE; + m->pageout = TRUE; + + vm_page_wire(m); + assert(m->wire_count == 1); + vm_page_unlock_queues(); + + m->dirty = TRUE; + m->precious = FALSE; + m->page_lock = VM_PROT_NONE; + m->unusual = FALSE; + m->unlock_request = VM_PROT_NONE; + } else { + /* + * Cannot clean in place, so rip the old page out of the + * object, and stick the holding page in. Set new_m to the + * page in the new object. + */ + vm_page_lock_queues(); + VM_PAGE_QUEUES_REMOVE(m); + vm_page_remove(m); + + /* + * If the pageout daemon put this page in limbo, exchange the + * identities of the limbo page and the new fictitious page, + * and continue with the new page, unless the prep count has + * gone to zero in the meantime (which means no one is + * interested in the page any more). In that case, just clear + * the limbo bit and free the extra fictitious page. + */ + if (m->limbo) { + if (m->prep_pin_count == 0) { + /* page doesn't have to be in limbo any more */ + m->limbo = FALSE; + vm_page_free(new_page); + vm_pageout_setup_unprepped++; + } else { + vm_page_limbo_exchange(m, new_page); + vm_pageout_setup_limbo++; + vm_page_release_limbo(m); + m = new_page; + } + } + + vm_page_insert(holding_page, old_object, offset); + vm_page_unlock_queues(); + + m->dirty = TRUE; + m->precious = FALSE; + new_m = m; + new_m->page_lock = VM_PROT_NONE; + new_m->unlock_request = VM_PROT_NONE; + + if (old_object->internal) + need_to_wire = TRUE; + } + /* + * Record that this page has been written out + */ +#if MACH_PAGEMAP + vm_external_state_set(old_object->existence_map, offset); +#endif /* MACH_PAGEMAP */ + + vm_object_unlock(old_object); + + vm_object_lock(new_object); + + /* + * Put the page into the new object. If it is a not wired + * (if it's the real page) it will be activated. + */ + + vm_page_lock_queues(); + vm_page_insert(new_m, new_object, new_offset); + if (need_to_wire) + vm_page_wire(new_m); + else + vm_page_activate(new_m); + PAGE_WAKEUP_DONE(new_m); + vm_page_unlock_queues(); + + vm_object_unlock(new_object); + + /* + * Return the placeholder page to simplify cleanup. + */ + return (holding_page); +} + +/* + * Routine: vm_pageclean_setup + * + * Purpose: setup a page to be cleaned (made non-dirty), but not + * necessarily flushed from the VM page cache. + * This is accomplished by cleaning in place. + * + * The page must not be busy, and the object and page + * queues must be locked. + * + */ +void +vm_pageclean_setup( + vm_page_t m, + vm_page_t new_m, + vm_object_t new_object, + vm_object_offset_t new_offset) +{ + vm_object_t old_object = m->object; + assert(!m->busy); + assert(!m->cleaning); + + XPR(XPR_VM_PAGEOUT, + "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n", + (integer_t)old_object, m->offset, (integer_t)m, + (integer_t)new_m, new_offset); + + pmap_clear_modify(m->phys_addr); + vm_object_paging_begin(old_object); + + /* + * Record that this page has been written out + */ +#if MACH_PAGEMAP + vm_external_state_set(old_object->existence_map, m->offset); +#endif /*MACH_PAGEMAP*/ + + /* + * Mark original page as cleaning in place. + */ + m->cleaning = TRUE; + m->dirty = TRUE; + m->precious = FALSE; + + /* + * Convert the fictitious page to a private shadow of + * the real page. + */ + assert(new_m->fictitious); + new_m->fictitious = FALSE; + new_m->private = TRUE; + new_m->pageout = TRUE; + new_m->phys_addr = m->phys_addr; + vm_page_wire(new_m); + + vm_page_insert(new_m, new_object, new_offset); + assert(!new_m->wanted); + new_m->busy = FALSE; +} + +void +vm_pageclean_copy( + vm_page_t m, + vm_page_t new_m, + vm_object_t new_object, + vm_object_offset_t new_offset) +{ + XPR(XPR_VM_PAGEOUT, + "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n", + m, new_m, new_object, new_offset, 0); + + assert((!m->busy) && (!m->cleaning)); + + assert(!new_m->private && !new_m->fictitious); + + pmap_clear_modify(m->phys_addr); + + m->busy = TRUE; + vm_object_paging_begin(m->object); + vm_page_unlock_queues(); + vm_object_unlock(m->object); + + /* + * Copy the original page to the new page. + */ + vm_page_copy(m, new_m); + + /* + * Mark the old page as clean. A request to pmap_is_modified + * will get the right answer. + */ + vm_object_lock(m->object); + m->dirty = FALSE; + + vm_object_paging_end(m->object); + + vm_page_lock_queues(); + if (!m->active && !m->inactive) + vm_page_activate(m); + PAGE_WAKEUP_DONE(m); + + vm_page_insert(new_m, new_object, new_offset); + vm_page_activate(new_m); + new_m->busy = FALSE; /* No other thread can be waiting */ +} + + +/* + * Routine: vm_pageout_initialize_page + * Purpose: + * Causes the specified page to be initialized in + * the appropriate memory object. This routine is used to push + * pages into a copy-object when they are modified in the + * permanent object. + * + * The page is moved to a temporary object and paged out. + * + * In/out conditions: + * The page in question must not be on any pageout queues. + * The object to which it belongs must be locked. + * The page must be busy, but not hold a paging reference. + * + * Implementation: + * Move this page to a completely new object. + */ +void +vm_pageout_initialize_page( + vm_page_t m) +{ + vm_map_copy_t copy; + vm_object_t new_object; + vm_object_t object; + vm_object_offset_t paging_offset; + vm_page_t holding_page; + + + XPR(XPR_VM_PAGEOUT, + "vm_pageout_initialize_page, page 0x%X\n", + (integer_t)m, 0, 0, 0, 0); + assert(m->busy); + + /* + * Verify that we really want to clean this page + */ + assert(!m->absent); + assert(!m->error); + assert(m->dirty); + + /* + * Create a paging reference to let us play with the object. + */ + object = m->object; + paging_offset = m->offset + object->paging_offset; + vm_object_paging_begin(object); + vm_object_unlock(object); + if (m->absent || m->error || m->restart || + (!m->dirty && !m->precious)) { + VM_PAGE_FREE(m); + panic("reservation without pageout?"); /* alan */ + return; + } + + /* set the page for future call to vm_fault_list_request */ + holding_page = NULL; + vm_object_lock(m->object); + vm_page_lock_queues(); + pmap_clear_modify(m->phys_addr); + m->dirty = TRUE; + m->busy = TRUE; + m->list_req_pending = TRUE; + m->cleaning = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + vm_page_unlock_queues(); + vm_object_unlock(m->object); + vm_pageout_throttle(m); + copy = NULL; + + VM_STAT(pageouts++); + /* VM_STAT(pages_pagedout++); */ + + /* + * Write the data to its pager. + * Note that the data is passed by naming the new object, + * not a virtual address; the pager interface has been + * manipulated to use the "internal memory" data type. + * [The object reference from its allocation is donated + * to the eventual recipient.] + */ + memory_object_data_initialize(object->pager, + object->pager_request, + paging_offset, + POINTER_T(copy), + PAGE_SIZE); + + vm_object_lock(object); +} + +#if MACH_CLUSTER_STATS +#define MAXCLUSTERPAGES 16 +struct { + unsigned long pages_in_cluster; + unsigned long pages_at_higher_offsets; + unsigned long pages_at_lower_offsets; +} cluster_stats[MAXCLUSTERPAGES]; +#endif /* MACH_CLUSTER_STATS */ + +boolean_t allow_clustered_pageouts = FALSE; + +/* + * vm_pageout_cluster: + * + * Given a page, page it out, and attempt to clean adjacent pages + * in the same operation. + * + * The page must be busy, and the object unlocked w/ paging reference + * to prevent deallocation or collapse. The page must not be on any + * pageout queue. + */ +void +vm_pageout_cluster( + vm_page_t m) +{ + vm_object_t object = m->object; + vm_object_offset_t offset = m->offset; /* from vm_object start */ + vm_object_offset_t paging_offset = m->offset + object->paging_offset; + vm_object_t new_object; + vm_object_offset_t new_offset; + vm_size_t cluster_size; + vm_object_offset_t cluster_offset; /* from memory_object start */ + vm_object_offset_t cluster_lower_bound; /* from vm_object_start */ + vm_object_offset_t cluster_upper_bound; /* from vm_object_start */ + vm_object_offset_t cluster_start, cluster_end;/* from vm_object start */ + vm_object_offset_t offset_within_cluster; + vm_size_t length_of_data; + vm_page_t friend, holding_page; + vm_map_copy_t copy; + kern_return_t rc; + boolean_t precious_clean = TRUE; + int pages_in_cluster; + + CLUSTER_STAT(int pages_at_higher_offsets = 0;) + CLUSTER_STAT(int pages_at_lower_offsets = 0;) + + XPR(XPR_VM_PAGEOUT, + "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n", + (integer_t)object, offset, (integer_t)m, 0, 0); + + CLUSTER_STAT(vm_pageout_cluster_clusters++;) + /* + * Only a certain kind of page is appreciated here. + */ + assert(m->busy && (m->dirty || m->precious) && (m->wire_count == 0)); + assert(!m->cleaning && !m->pageout && !m->inactive && !m->active); + + vm_object_lock(object); + cluster_size = object->cluster_size; + + assert(cluster_size >= PAGE_SIZE); + if (cluster_size < PAGE_SIZE) cluster_size = PAGE_SIZE; + assert(object->pager_created && object->pager_initialized); + assert(object->internal || object->pager_ready); + + if (m->precious && !m->dirty) + precious_clean = TRUE; + + if (!object->pager_trusted || !allow_clustered_pageouts) + cluster_size = PAGE_SIZE; + vm_object_unlock(object); + + cluster_offset = paging_offset & (vm_object_offset_t)(cluster_size - 1); + /* bytes from beginning of cluster */ + /* + * Due to unaligned mappings, we have to be careful + * of negative offsets into the VM object. Clip the cluster + * boundary to the VM object, not the memory object. + */ + if (offset > cluster_offset) { + cluster_lower_bound = offset - cluster_offset; + /* from vm_object */ + } else { + cluster_lower_bound = 0; + } + cluster_upper_bound = (offset - cluster_offset) + + (vm_object_offset_t)cluster_size; + + /* set the page for future call to vm_fault_list_request */ + holding_page = NULL; + vm_object_lock(m->object); + vm_page_lock_queues(); + m->busy = TRUE; + m->list_req_pending = TRUE; + m->cleaning = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + vm_page_unlock_queues(); + vm_object_unlock(m->object); + vm_pageout_throttle(m); + + /* + * Search backward for adjacent eligible pages to clean in + * this operation. + */ + + cluster_start = offset; + if (offset) { /* avoid wrap-around at zero */ + for (cluster_start = offset - PAGE_SIZE_64; + cluster_start >= cluster_lower_bound; + cluster_start -= PAGE_SIZE_64) { + assert(cluster_size > PAGE_SIZE); + + vm_object_lock(object); + vm_page_lock_queues(); + + if ((friend = vm_pageout_cluster_page(object, cluster_start, + precious_clean)) == VM_PAGE_NULL) { + vm_page_unlock_queues(); + vm_object_unlock(object); + break; + } + new_offset = (cluster_start + object->paging_offset) + & (cluster_size - 1); + + assert(new_offset < cluster_offset); + m->list_req_pending = TRUE; + m->cleaning = TRUE; +/* do nothing except advance the write request, all we really need to */ +/* do is push the target page and let the code at the other end decide */ +/* what is really the right size */ + if (vm_page_free_count <= vm_page_free_reserved) { + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + } + + vm_page_unlock_queues(); + vm_object_unlock(object); + if(m->dirty || m->object->internal) { + CLUSTER_STAT(pages_at_lower_offsets++;) + } + + } + cluster_start += PAGE_SIZE_64; + } + assert(cluster_start >= cluster_lower_bound); + assert(cluster_start <= offset); + /* + * Search forward for adjacent eligible pages to clean in + * this operation. + */ + for (cluster_end = offset + PAGE_SIZE_64; + cluster_end < cluster_upper_bound; + cluster_end += PAGE_SIZE_64) { + assert(cluster_size > PAGE_SIZE); + + vm_object_lock(object); + vm_page_lock_queues(); + + if ((friend = vm_pageout_cluster_page(object, cluster_end, + precious_clean)) == VM_PAGE_NULL) { + vm_page_unlock_queues(); + vm_object_unlock(object); + break; + } + new_offset = (cluster_end + object->paging_offset) + & (cluster_size - 1); + + assert(new_offset < cluster_size); + m->list_req_pending = TRUE; + m->cleaning = TRUE; +/* do nothing except advance the write request, all we really need to */ +/* do is push the target page and let the code at the other end decide */ +/* what is really the right size */ + if (vm_page_free_count <= vm_page_free_reserved) { + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + } + + vm_page_unlock_queues(); + vm_object_unlock(object); + + if(m->dirty || m->object->internal) { + CLUSTER_STAT(pages_at_higher_offsets++;) + } + } + assert(cluster_end <= cluster_upper_bound); + assert(cluster_end >= offset + PAGE_SIZE); + + /* + * (offset - cluster_offset) is beginning of cluster_object + * relative to vm_object start. + */ + offset_within_cluster = cluster_start - (offset - cluster_offset); + length_of_data = cluster_end - cluster_start; + + assert(offset_within_cluster < cluster_size); + assert((offset_within_cluster + length_of_data) <= cluster_size); + + rc = KERN_SUCCESS; + assert(rc == KERN_SUCCESS); + + pages_in_cluster = length_of_data/PAGE_SIZE; + if(m->dirty || m->object->internal) { + VM_STAT(pageouts++); + } + /* VM_STAT(pages_pagedout += pages_in_cluster); */ + +#if MACH_CLUSTER_STATS + (cluster_stats[pages_at_lower_offsets].pages_at_lower_offsets)++; + (cluster_stats[pages_at_higher_offsets].pages_at_higher_offsets)++; + (cluster_stats[pages_in_cluster].pages_in_cluster)++; +#endif /* MACH_CLUSTER_STATS */ + + /* + * Send the data to the pager. + */ + paging_offset = cluster_start + object->paging_offset; +#ifdef MACH_BSD + if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == + ((rpc_subsystem_t) &vnode_pager_workaround)) { + rc = vnode_pager_data_return(object->pager, + object->pager_request, + paging_offset, + POINTER_T(copy), + length_of_data, + !precious_clean, + FALSE); + } else { + rc = memory_object_data_return(object->pager, + object->pager_request, + paging_offset, + POINTER_T(copy), + length_of_data, + !precious_clean, + FALSE); + } +#else + rc = memory_object_data_return(object->pager, + object->pager_request, + paging_offset, + POINTER_T(copy), + length_of_data, + !precious_clean, + FALSE); +#endif + vm_object_lock(object); + vm_object_paging_end(object); + + if (holding_page) { + assert(!object->pager_trusted); + VM_PAGE_FREE(holding_page); + vm_object_paging_end(object); + } + + vm_object_unlock(object); +} + +/* + * vm_pageout_return_write_pages + * Recover pages from an aborted write attempt + * + */ + +vm_pageout_return_write_pages( + ipc_port_t control_port, + vm_object_offset_t object_offset, + vm_map_copy_t copy) +{ + vm_object_t object; + int offset; + int size; + int shadow_offset; + int copy_offset; + int j; + vm_page_t m; + + + object = copy->cpy_object; + copy_offset = copy->offset; + size = copy->size; + + if((copy->type != VM_MAP_COPY_OBJECT) || (object->shadow == 0)) { + object = (vm_object_t)control_port->ip_kobject; + shadow_offset = (object_offset - object->paging_offset) + - copy->offset; + } else { + /* get the offset from the copy object */ + shadow_offset = object->shadow_offset; + /* find the backing object */ + object = object->shadow; + } + vm_object_lock(object); + + for(offset = 0, j=0; offset < size; offset+=page_size, j++) { + m = vm_page_lookup(object, + offset + shadow_offset + copy_offset); + if((m == VM_PAGE_NULL) || m->fictitious) { + + vm_page_t p; + int i; + vm_object_t copy_object; + + /* m might be fictitious if the original page */ + /* was found to be in limbo at the time of */ + /* vm_pageout_setup */ + + if((m != VM_PAGE_NULL) && m->fictitious) { + m->cleaning = FALSE; + vm_page_remove(m); + /* if object is not pager trusted then */ + /* this fictitious page will be removed */ + /* as the holding page in vm_pageout_cluster */ + if (object->pager_trusted) + vm_page_free(m); + if(vm_page_laundry_count) + vm_page_laundry_count--; + if (vm_page_laundry_count + < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) + &vm_page_laundry_count); + } + } + else if ((object->pager_trusted) && + (copy->type == VM_MAP_COPY_OBJECT)) { + vm_object_paging_end(object); + } + + copy_object = copy->cpy_object; + + if(copy->type == VM_MAP_COPY_OBJECT) { + p = (vm_page_t) queue_first(©_object->memq); + + for(i = 0; + i < copy_object->resident_page_count; + i++) { + if(p->offset == (offset + copy_offset)) + break; + p = (vm_page_t) queue_next(&p->listq); + } + + vm_page_remove(p); + } else { + p = copy->cpy_page_list[j]; + copy->cpy_page_list[j] = 0; + p->gobbled = FALSE; + } + + vm_page_insert(p, object, + offset + shadow_offset + copy_offset); + p->busy = TRUE; + p->dirty = TRUE; + p->laundry = FALSE; + if (p->pageout) { + p->pageout = FALSE; /*dont throw away target*/ + vm_page_unwire(p);/* reactivates */ + } + } else if(m->pageout) { + m->pageout = FALSE; /* dont throw away target pages */ + vm_page_unwire(m);/* reactivates */ + } + } + + vm_object_unlock(object); + vm_map_copy_discard(copy); + vm_object_lock(object); + + for(offset = 0; offset < size; offset+=page_size) { + m = vm_page_lookup(object, + offset + shadow_offset + copy_offset); + m->dirty = TRUE; /* we'll send the pages home later */ + m->busy = FALSE; /* allow system access again */ + } + + vm_object_unlock(object); +} + +/* + * Trusted pager throttle. + * Object must be unlocked, page queues must be unlocked. + */ +void +vm_pageout_throttle( + register vm_page_t m) +{ + vm_page_lock_queues(); + assert(!m->laundry); + m->laundry = TRUE; + while (vm_page_laundry_count >= vm_page_laundry_max) { + /* + * Set the threshold for when vm_page_free() + * should wake us up. + */ + vm_page_laundry_min = vm_page_laundry_max/2; + assert_wait((event_t) &vm_page_laundry_count, THREAD_UNINT); + vm_page_unlock_queues(); + + /* + * Pause to let the default pager catch up. + */ + thread_block((void (*)(void)) 0); + vm_page_lock_queues(); + } + vm_page_laundry_count++; + vm_page_unlock_queues(); +} + +/* + * The global variable vm_pageout_clean_active_pages controls whether + * active pages are considered valid to be cleaned in place during a + * clustered pageout. Performance measurements are necessary to determine + * the best policy. + */ +int vm_pageout_clean_active_pages = 1; +/* + * vm_pageout_cluster_page: [Internal] + * + * return a vm_page_t to the page at (object,offset) if it is appropriate + * to clean in place. Pages that are non-existent, busy, absent, already + * cleaning, or not dirty are not eligible to be cleaned as an adjacent + * page in a cluster. + * + * The object must be locked on entry, and remains locked throughout + * this call. + */ + +vm_page_t +vm_pageout_cluster_page( + vm_object_t object, + vm_object_offset_t offset, + boolean_t precious_clean) +{ + vm_page_t m; + + XPR(XPR_VM_PAGEOUT, + "vm_pageout_cluster_page, object 0x%X offset 0x%X\n", + (integer_t)object, offset, 0, 0, 0); + + if ((m = vm_page_lookup(object, offset)) == VM_PAGE_NULL) + return(VM_PAGE_NULL); + + if (m->busy || m->absent || m->cleaning || + m->prep_pin_count != 0 || + (m->wire_count != 0) || m->error) + return(VM_PAGE_NULL); + + if (vm_pageout_clean_active_pages) { + if (!m->active && !m->inactive) return(VM_PAGE_NULL); + } else { + if (!m->inactive) return(VM_PAGE_NULL); + } + + assert(!m->private); + assert(!m->fictitious); + + if (!m->dirty) m->dirty = pmap_is_modified(m->phys_addr); + + if (precious_clean) { + if (!m->precious || !m->dirty) + return(VM_PAGE_NULL); + } else { + if (!m->dirty) + return(VM_PAGE_NULL); + } + return(m); +} + +/* + * vm_pageout_scan does the dirty work for the pageout daemon. + * It returns with vm_page_queue_free_lock held and + * vm_page_free_wanted == 0. + */ +extern void vm_pageout_scan_continue(void); /* forward; */ + +void +vm_pageout_scan(void) +{ + unsigned int burst_count; + boolean_t now = FALSE; + unsigned int laundry_pages; + boolean_t need_more_inactive_pages; + unsigned int loop_detect; + + XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0); + +/*???*/ /* + * We want to gradually dribble pages from the active queue + * to the inactive queue. If we let the inactive queue get + * very small, and then suddenly dump many pages into it, + * those pages won't get a sufficient chance to be referenced + * before we start taking them from the inactive queue. + * + * We must limit the rate at which we send pages to the pagers. + * data_write messages consume memory, for message buffers and + * for map-copy objects. If we get too far ahead of the pagers, + * we can potentially run out of memory. + * + * We can use the laundry count to limit directly the number + * of pages outstanding to the default pager. A similar + * strategy for external pagers doesn't work, because + * external pagers don't have to deallocate the pages sent them, + * and because we might have to send pages to external pagers + * even if they aren't processing writes. So we also + * use a burst count to limit writes to external pagers. + * + * When memory is very tight, we can't rely on external pagers to + * clean pages. They probably aren't running, because they + * aren't vm-privileged. If we kept sending dirty pages to them, + * we could exhaust the free list. However, we can't just ignore + * pages belonging to external objects, because there might be no + * pages belonging to internal objects. Hence, we get the page + * into an internal object and then immediately double-page it, + * sending it to the default pager. + * + * consider_zone_gc should be last, because the other operations + * might return memory to zones. + */ + + Restart: + + mutex_lock(&vm_page_queue_free_lock); + now = (vm_page_free_count < vm_page_free_min); + mutex_unlock(&vm_page_queue_free_lock); +#if THREAD_SWAPPER + swapout_threads(now); +#endif /* THREAD_SWAPPER */ + + stack_collect(); + consider_task_collect(); + consider_thread_collect(); + cleanup_limbo_queue(); + consider_zone_gc(); + consider_machine_collect(); + + loop_detect = vm_page_active_count + vm_page_inactive_count; +#if 0 + if (vm_page_free_count <= vm_page_free_reserved) { + need_more_inactive_pages = TRUE; + } else { + need_more_inactive_pages = FALSE; + } +#else + need_more_inactive_pages = FALSE; +#endif + + for (burst_count = 0;;) { + register vm_page_t m; + register vm_object_t object; + unsigned int free_count; + + /* + * Recalculate vm_page_inactivate_target. + */ + + vm_page_lock_queues(); + vm_page_inactive_target = + VM_PAGE_INACTIVE_TARGET(vm_page_active_count + + vm_page_inactive_count); + + /* + * Move pages from active to inactive. + */ + + while ((vm_page_inactive_count < vm_page_inactive_target || + need_more_inactive_pages) && + !queue_empty(&vm_page_queue_active)) { + register vm_object_t object; + + vm_pageout_active++; + m = (vm_page_t) queue_first(&vm_page_queue_active); + + /* + * If we're getting really low on memory, + * try selecting a page that will go + * directly to the default_pager. + * If there are no such pages, we have to + * page out a page backed by an EMM, + * so that the default_pager can recover + * it eventually. + */ + if (need_more_inactive_pages && + (IP_VALID(memory_manager_default))) { + vm_pageout_scan_active_emm_throttle++; + do { + assert(m->active && !m->inactive); + object = m->object; + + if (vm_object_lock_try(object)) { +#if 0 + if (object->pager_trusted || + object->internal) { + /* found one ! */ + vm_pageout_scan_active_emm_throttle_success++; + goto object_locked_active; + } +#else + vm_pageout_scan_active_emm_throttle_success++; + goto object_locked_active; +#endif + vm_object_unlock(object); + } + m = (vm_page_t) queue_next(&m->pageq); + } while (!queue_end(&vm_page_queue_active, + (queue_entry_t) m)); + if (queue_end(&vm_page_queue_active, + (queue_entry_t) m)) { + vm_pageout_scan_active_emm_throttle_failure++; + m = (vm_page_t) + queue_first(&vm_page_queue_active); + } + } + + assert(m->active && !m->inactive); + + object = m->object; + if (!vm_object_lock_try(object)) { + /* + * Move page to end and continue. + */ + + queue_remove(&vm_page_queue_active, m, + vm_page_t, pageq); + queue_enter(&vm_page_queue_active, m, + vm_page_t, pageq); + vm_page_unlock_queues(); + mutex_pause(); + vm_page_lock_queues(); + continue; + } + + object_locked_active: + /* + * If the page is busy, then we pull it + * off the active queue and leave it alone. + */ + + if (m->busy) { + vm_object_unlock(object); + queue_remove(&vm_page_queue_active, m, + vm_page_t, pageq); + m->active = FALSE; + if (!m->fictitious) + vm_page_active_count--; + continue; + } + + /* + * Deactivate the page while holding the object + * locked, so we know the page is still not busy. + * This should prevent races between pmap_enter + * and pmap_clear_reference. The page might be + * absent or fictitious, but vm_page_deactivate + * can handle that. + */ + + vm_page_deactivate(m); + vm_object_unlock(object); + } + + /* + * We are done if we have met our target *and* + * nobody is still waiting for a page. + */ + + mutex_lock(&vm_page_queue_free_lock); + free_count = vm_page_free_count; + if ((free_count >= vm_page_free_target) && + (vm_page_free_wanted == 0)) { + vm_page_unlock_queues(); + break; + } + mutex_unlock(&vm_page_queue_free_lock); + + /* + * Sometimes we have to pause: + * 1) No inactive pages - nothing to do. + * 2) Flow control - wait for untrusted pagers to catch up. + */ + + if (queue_empty(&vm_page_queue_inactive) || + ((--loop_detect) == 0) || + (burst_count >= vm_pageout_burst_max)) { + unsigned int pages, msecs; + int wait_result; + + consider_machine_adjust(); + /* + * vm_pageout_burst_wait is msecs/page. + * If there is nothing for us to do, we wait + * at least vm_pageout_empty_wait msecs. + */ + pages = burst_count; + + if (loop_detect == 0) { + printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n"); + msecs = vm_free_page_pause; + } + else { + msecs = burst_count * vm_pageout_burst_wait; + } + + if (queue_empty(&vm_page_queue_inactive) && + (msecs < vm_pageout_empty_wait)) + msecs = vm_pageout_empty_wait; + vm_page_unlock_queues(); + assert_wait_timeout(msecs, THREAD_INTERRUPTIBLE); + counter(c_vm_pageout_scan_block++); + + /* + * Unfortunately, we don't have call_continuation + * so we can't rely on tail-recursion. + */ + wait_result = thread_block((void (*)(void)) 0); + if (wait_result != THREAD_TIMED_OUT) + thread_cancel_timer(); + vm_pageout_scan_continue(); + goto Restart; + /*NOTREACHED*/ + } + + vm_pageout_inactive++; + m = (vm_page_t) queue_first(&vm_page_queue_inactive); + + if ((vm_page_free_count <= vm_page_free_reserved) && + (IP_VALID(memory_manager_default))) { + /* + * We're really low on memory. Try to select a page that + * would go directly to the default_pager. + * If there are no such pages, we have to page out a + * page backed by an EMM, so that the default_pager + * can recover it eventually. + */ + vm_pageout_scan_inactive_emm_throttle++; + do { + assert(!m->active && m->inactive); + object = m->object; + + if (vm_object_lock_try(object)) { +#if 0 + if (object->pager_trusted || + object->internal) { + /* found one ! */ + vm_pageout_scan_inactive_emm_throttle_success++; + goto object_locked_inactive; + } +#else + vm_pageout_scan_inactive_emm_throttle_success++; + goto object_locked_inactive; +#endif /* 0 */ + vm_object_unlock(object); + } + m = (vm_page_t) queue_next(&m->pageq); + } while (!queue_end(&vm_page_queue_inactive, + (queue_entry_t) m)); + if (queue_end(&vm_page_queue_inactive, + (queue_entry_t) m)) { + vm_pageout_scan_inactive_emm_throttle_failure++; + /* + * We should check the "active" queue + * for good candidates to page out. + */ + need_more_inactive_pages = TRUE; + + m = (vm_page_t) + queue_first(&vm_page_queue_inactive); + } + } + + assert(!m->active && m->inactive); + object = m->object; + + /* + * Try to lock object; since we've got the + * page queues lock, we can only try for this one. + */ + + if (!vm_object_lock_try(object)) { + /* + * Move page to end and continue. + */ + queue_remove(&vm_page_queue_inactive, m, + vm_page_t, pageq); + queue_enter(&vm_page_queue_inactive, m, + vm_page_t, pageq); + vm_page_unlock_queues(); + mutex_pause(); + vm_pageout_inactive_nolock++; + continue; + } + + object_locked_inactive: + /* + * Paging out pages of objects which pager is being + * created by another thread must be avoided, because + * this thread may claim for memory, thus leading to a + * possible dead lock between it and the pageout thread + * which will wait for pager creation, if such pages are + * finally chosen. The remaining assumption is that there + * will finally be enough available pages in the inactive + * pool to page out in order to satisfy all memory claimed + * by the thread which concurrently creates the pager. + */ + + if (!object->pager_initialized && object->pager_created) { + /* + * Move page to end and continue, hoping that + * there will be enough other inactive pages to + * page out so that the thread which currently + * initializes the pager will succeed. + */ + queue_remove(&vm_page_queue_inactive, m, + vm_page_t, pageq); + queue_enter(&vm_page_queue_inactive, m, + vm_page_t, pageq); + vm_page_unlock_queues(); + vm_object_unlock(object); + vm_pageout_inactive_avoid++; + continue; + } + + /* + * Remove the page from the inactive list. + */ + + queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq); + m->inactive = FALSE; + if (!m->fictitious) + vm_page_inactive_count--; + + if (m->busy || !object->alive) { + /* + * Somebody is already playing with this page. + * Leave it off the pageout queues. + */ + + vm_page_unlock_queues(); + vm_object_unlock(object); + vm_pageout_inactive_busy++; + continue; + } + + /* + * If it's absent or in error, we can reclaim the page. + */ + + if (m->absent || m->error) { + vm_pageout_inactive_absent++; + reclaim_page: + vm_page_free(m); + vm_page_unlock_queues(); + vm_object_unlock(object); + continue; + } + + assert(!m->private); + assert(!m->fictitious); + + /* + * If already cleaning this page in place, convert from + * "adjacent" to "target". We can leave the page mapped, + * and vm_pageout_object_terminate will determine whether + * to free or reactivate. + */ + + if (m->cleaning) { +#if MACH_CLUSTER_STATS + vm_pageout_cluster_conversions++; +#endif + if (m->prep_pin_count == 0) { + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + } + vm_object_unlock(object); + vm_page_unlock_queues(); + continue; + } + + /* + * If it's being used, reactivate. + * (Fictitious pages are either busy or absent.) + */ + + if (m->reference || pmap_is_referenced(m->phys_addr)) { + vm_pageout_inactive_used++; + reactivate_page: +#if ADVISORY_PAGEOUT + if (m->discard_request) { + m->discard_request = FALSE; + } +#endif /* ADVISORY_PAGEOUT */ + vm_object_unlock(object); + vm_page_activate(m); + VM_STAT(reactivations++); + vm_page_unlock_queues(); + continue; + } + + if (m->prep_pin_count != 0) { + boolean_t pinned = FALSE; + + vm_page_pin_lock(); + if (m->pin_count != 0) { + /* skip and reactivate pinned page */ + pinned = TRUE; + vm_pageout_inactive_pinned++; + } else { + /* page is prepped; send it into limbo */ + m->limbo = TRUE; + vm_pageout_inactive_limbo++; + } + vm_page_pin_unlock(); + if (pinned) + goto reactivate_page; + } + +#if ADVISORY_PAGEOUT + if (object->advisory_pageout) { + boolean_t do_throttle; + ipc_port_t port; + vm_object_offset_t discard_offset; + + if (m->discard_request) { + vm_stat_discard_failure++; + goto mandatory_pageout; + } + + assert(object->pager_initialized); + m->discard_request = TRUE; + port = object->pager; + + /* system-wide throttle */ + do_throttle = (vm_page_free_count <= + vm_page_free_reserved); + if (!do_throttle) { + /* throttle on this pager */ + /* XXX lock ordering ? */ + ip_lock(port); + do_throttle= imq_full(&port->ip_messages); + ip_unlock(port); + } + if (do_throttle) { + vm_stat_discard_throttle++; +#if 0 + /* ignore this page and skip to next */ + vm_page_unlock_queues(); + vm_object_unlock(object); + continue; +#else + /* force mandatory pageout */ + goto mandatory_pageout; +#endif + } + + /* proceed with discard_request */ + vm_page_activate(m); + vm_stat_discard++; + VM_STAT(reactivations++); + discard_offset = m->offset + object->paging_offset; + vm_stat_discard_sent++; + vm_page_unlock_queues(); + vm_object_unlock(object); +/* + memory_object_discard_request(object->pager, + object->pager_request, + discard_offset, + PAGE_SIZE); +*/ + continue; + } + mandatory_pageout: +#endif /* ADVISORY_PAGEOUT */ + + XPR(XPR_VM_PAGEOUT, + "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n", + (integer_t)object, (integer_t)m->offset, (integer_t)m, 0,0); + + /* + * Eliminate all mappings. + */ + + m->busy = TRUE; + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + if (!m->dirty) + m->dirty = pmap_is_modified(m->phys_addr); + + /* + * If it's clean and not precious, we can free the page. + */ + + if (!m->dirty && !m->precious) { + vm_pageout_inactive_clean++; + goto reclaim_page; + } + vm_page_unlock_queues(); + + /* + * If there is no memory object for the page, create + * one and hand it to the default pager. + */ + + if (!object->pager_initialized) + vm_object_collapse(object); + if (!object->pager_initialized) + vm_object_pager_create(object); + if (!object->pager_initialized) { + /* + * Still no pager for the object. + * Reactivate the page. + * + * Should only happen if there is no + * default pager. + */ + vm_page_lock_queues(); + vm_page_activate(m); + vm_page_unlock_queues(); + + /* + * And we are done with it. + */ + PAGE_WAKEUP_DONE(m); + vm_object_unlock(object); + + /* + * break here to get back to the preemption + * point in the outer loop so that we don't + * spin forever if there is no default pager. + */ + vm_pageout_dirty_no_pager++; + /* + * Well there's no pager, but we can still reclaim + * free pages out of the inactive list. Go back + * to top of loop and look for suitable pages. + */ + continue; + } + + if (object->pager_initialized && object->pager == IP_NULL) { + /* + * This pager has been destroyed by either + * memory_object_destroy or vm_object_destroy, and + * so there is nowhere for the page to go. + * Just free the page. + */ + VM_PAGE_FREE(m); + vm_object_unlock(object); + continue; + } + + vm_pageout_inactive_dirty++; +/* + if (!object->internal) + burst_count++; +*/ + vm_object_paging_begin(object); + vm_object_unlock(object); + vm_pageout_cluster(m); /* flush it */ + } + consider_machine_adjust(); +} + +counter(unsigned int c_vm_pageout_scan_continue = 0;) + +void +vm_pageout_scan_continue(void) +{ + /* + * We just paused to let the pagers catch up. + * If vm_page_laundry_count is still high, + * then we aren't waiting long enough. + * If we have paused some vm_pageout_pause_max times without + * adjusting vm_pageout_burst_wait, it might be too big, + * so we decrease it. + */ + + vm_page_lock_queues(); + counter(++c_vm_pageout_scan_continue); + if (vm_page_laundry_count > vm_pageout_burst_min) { + vm_pageout_burst_wait++; + vm_pageout_pause_count = 0; + } else if (++vm_pageout_pause_count > vm_pageout_pause_max) { + vm_pageout_burst_wait = (vm_pageout_burst_wait * 3) / 4; + if (vm_pageout_burst_wait < 1) + vm_pageout_burst_wait = 1; + vm_pageout_pause_count = 0; + } + vm_page_unlock_queues(); +} + +void vm_page_free_reserve(int pages); +int vm_page_free_count_init; + +void +vm_page_free_reserve( + int pages) +{ + int free_after_reserve; + + vm_page_free_reserved += pages; + + free_after_reserve = vm_page_free_count_init - vm_page_free_reserved; + + vm_page_free_min = vm_page_free_reserved + + VM_PAGE_FREE_MIN(free_after_reserve); + + vm_page_free_target = vm_page_free_reserved + + VM_PAGE_FREE_TARGET(free_after_reserve); + + if (vm_page_free_target < vm_page_free_min + 5) + vm_page_free_target = vm_page_free_min + 5; +} + +/* + * vm_pageout is the high level pageout daemon. + */ + + +void +vm_pageout(void) +{ + thread_t self = current_thread(); + + /* + * Set thread privileges. + */ + self->vm_privilege = TRUE; + stack_privilege(self); + thread_swappable(current_act(), FALSE); + + /* + * Initialize some paging parameters. + */ + + if (vm_page_laundry_max == 0) + vm_page_laundry_max = VM_PAGE_LAUNDRY_MAX; + + if (vm_pageout_burst_max == 0) + vm_pageout_burst_max = VM_PAGEOUT_BURST_MAX; + + if (vm_pageout_burst_wait == 0) + vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT; + + if (vm_pageout_empty_wait == 0) + vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT; + + vm_page_free_count_init = vm_page_free_count; + /* + * even if we've already called vm_page_free_reserve + * call it again here to insure that the targets are + * accurately calculated (it uses vm_page_free_count_init) + * calling it with an arg of 0 will not change the reserve + * but will re-calculate free_min and free_target + */ + if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED) + vm_page_free_reserve(VM_PAGE_FREE_RESERVED - vm_page_free_reserved); + else + vm_page_free_reserve(0); + + /* + * vm_pageout_scan will set vm_page_inactive_target. + * + * The pageout daemon is never done, so loop forever. + * We should call vm_pageout_scan at least once each + * time we are woken, even if vm_page_free_wanted is + * zero, to check vm_page_free_target and + * vm_page_inactive_target. + */ + for (;;) { + vm_pageout_scan(); + /* we hold vm_page_queue_free_lock now */ + assert(vm_page_free_wanted == 0); + assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT); + mutex_unlock(&vm_page_queue_free_lock); + counter(c_vm_pageout_block++); + thread_block((void (*)(void)) 0); + } + /*NOTREACHED*/ +} + + +void +upl_dealloc( + upl_t upl) +{ + upl->ref_count -= 1; + if(upl->ref_count == 0) { + upl_destroy(upl); + } +} + + +/* + * Routine: vm_fault_list_request + * Purpose: + * Cause the population of a portion of a vm_object. + * Depending on the nature of the request, the pages + * returned may be contain valid data or be uninitialized. + * A page list structure, listing the physical pages + * will be returned upon request. + * This function is called by the file system or any other + * supplier of backing store to a pager. + * IMPORTANT NOTE: The caller must still respect the relationship + * between the vm_object and its backing memory object. The + * caller MUST NOT substitute changes in the backing file + * without first doing a memory_object_lock_request on the + * target range unless it is know that the pages are not + * shared with another entity at the pager level. + * Copy_in_to: + * if a page list structure is present + * return the mapped physical pages, where a + * page is not present, return a non-initialized + * one. If the no_sync bit is turned on, don't + * call the pager unlock to synchronize with other + * possible copies of the page. Leave pages busy + * in the original object, if a page list structure + * was specified. When a commit of the page list + * pages is done, the dirty bit will be set for each one. + * Copy_out_from: + * If a page list structure is present, return + * all mapped pages. Where a page does not exist + * map a zero filled one. Leave pages busy in + * the original object. If a page list structure + * is not specified, this call is a no-op. + * + * Note: access of default pager objects has a rather interesting + * twist. The caller of this routine, presumably the file system + * page cache handling code, will never actually make a request + * against a default pager backed object. Only the default + * pager will make requests on backing store related vm_objects + * In this way the default pager can maintain the relationship + * between backing store files (abstract memory objects) and + * the vm_objects (cache objects), they support. + * + */ +kern_return_t +vm_fault_list_request( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + upl_t *upl_ptr, + upl_page_info_t **user_page_list_ptr, + int page_list_count, + int cntrl_flags) +{ + vm_page_t dst_page; + vm_object_offset_t dst_offset = offset; + upl_page_info_t *user_page_list; + vm_size_t xfer_size = size; + boolean_t do_m_lock = FALSE; + boolean_t dirty; + upl_t upl = NULL; + int entry; + boolean_t encountered_lrp = FALSE; + + vm_page_t alias_page = NULL; + + if(cntrl_flags & UPL_SET_INTERNAL) + page_list_count = MAX_UPL_TRANSFER; + if(((user_page_list_ptr || (cntrl_flags & UPL_SET_INTERNAL)) && + !(object->private)) && (page_list_count < (size/page_size))) + return KERN_INVALID_ARGUMENT; + + if((!object->internal) && (object->paging_offset != 0)) + panic("vm_fault_list_request: vnode object with non-zero paging offset\n"); + + if((cntrl_flags & UPL_COPYOUT_FROM) && (upl_ptr == NULL)) { + return KERN_SUCCESS; + } + if(upl_ptr) { + if((cntrl_flags & UPL_SET_INTERNAL) && !(object->private)) { + upl = upl_create(TRUE); + user_page_list = (upl_page_info_t *) + (((vm_offset_t)upl) + sizeof(struct upl)); + if(user_page_list_ptr) + *user_page_list_ptr = user_page_list; + upl->flags |= UPL_INTERNAL; + } else { + upl = upl_create(FALSE); + if(user_page_list_ptr) + user_page_list = *user_page_list_ptr; + else + user_page_list = NULL; + if(object->private) { + upl->size = size; + upl->offset = offset; + *upl_ptr = upl; + if(user_page_list) { + user_page_list[0].phys_addr = offset; + user_page_list[0].device = TRUE; + } + upl->flags = UPL_DEVICE_MEMORY; + return KERN_SUCCESS; + } + + + } + upl->map_object = vm_object_allocate(size); + vm_object_lock(upl->map_object); + upl->map_object->shadow = object; + upl->size = size; + upl->offset = offset + object->paging_offset; + upl->map_object->pageout = TRUE; + upl->map_object->can_persist = FALSE; + upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; + upl->map_object->shadow_offset = offset; + vm_object_unlock(upl->map_object); + *upl_ptr = upl; + } + VM_PAGE_GRAB_FICTITIOUS(alias_page); + vm_object_lock(object); +#ifdef UBC_DEBUG + if(upl_ptr) + queue_enter(&object->uplq, upl, upl_t, uplq); +#endif /* UBC_DEBUG */ + vm_object_paging_begin(object); + entry = 0; + if(cntrl_flags & UPL_COPYOUT_FROM) { + upl->flags |= UPL_PAGE_SYNC_DONE; + while (xfer_size) { + if(alias_page == NULL) { + vm_object_unlock(object); + VM_PAGE_GRAB_FICTITIOUS(alias_page); + vm_object_lock(object); + } + if(((dst_page = vm_page_lookup(object, + dst_offset)) == VM_PAGE_NULL) || + dst_page->fictitious || + dst_page->absent || + dst_page->error || + (dst_page->wire_count != 0 && + !dst_page->pageout) || + ((!(dst_page->dirty || dst_page->precious || + pmap_is_modified(dst_page->phys_addr))) + && (cntrl_flags & UPL_RET_ONLY_DIRTY))) { + if(user_page_list) + user_page_list[entry].phys_addr = 0; + } else { + + if(dst_page->busy && + (!(dst_page->list_req_pending && + dst_page->pageout))) { + if(cntrl_flags & UPL_NOBLOCK) { + if(user_page_list) + user_page_list[entry] + .phys_addr = 0; + entry++; + dst_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + continue; + } + /*someone else is playing with the */ + /* page. We will have to wait. */ + PAGE_ASSERT_WAIT( + dst_page, THREAD_UNINT); + vm_object_unlock(object); + thread_block((void(*)(void))0); + vm_object_lock(object); + continue; + } + /* Someone else already cleaning the page? */ + if((dst_page->cleaning || dst_page->absent || + dst_page->prep_pin_count != 0 || + dst_page->wire_count != 0) && + !dst_page->list_req_pending) { + if(user_page_list) + user_page_list[entry].phys_addr = 0; + entry++; + dst_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + continue; + } + /* eliminate all mappings from the */ + /* original object and its prodigy */ + + vm_page_lock_queues(); + pmap_page_protect(dst_page->phys_addr, + VM_PROT_NONE); + + /* pageout statistics gathering. count */ + /* all the pages we will page out that */ + /* were not counted in the initial */ + /* vm_pageout_scan work */ + if(dst_page->list_req_pending) + encountered_lrp = TRUE; + if((dst_page->dirty || + (dst_page->object->internal && + dst_page->precious)) && + (dst_page->list_req_pending + == FALSE)) { + if(encountered_lrp) { + CLUSTER_STAT + (pages_at_higher_offsets++;) + } else { + CLUSTER_STAT + (pages_at_lower_offsets++;) + } + } + + /* Turn off busy indication on pending */ + /* pageout. Note: we can only get here */ + /* in the request pending case. */ + dst_page->list_req_pending = FALSE; + dst_page->busy = FALSE; + dst_page->cleaning = FALSE; + + dirty = pmap_is_modified(dst_page->phys_addr); + dirty = dirty ? TRUE : dst_page->dirty; + + /* use pageclean setup, it is more convenient */ + /* even for the pageout cases here */ + vm_pageclean_setup(dst_page, alias_page, + upl->map_object, size - xfer_size); + + if(!dirty) { + dst_page->dirty = FALSE; + dst_page->precious = TRUE; + } + + if(dst_page->pageout) + dst_page->busy = TRUE; + + alias_page->absent = FALSE; + alias_page = NULL; + if(!(cntrl_flags & UPL_CLEAN_IN_PLACE)) { + /* deny access to the target page */ + /* while it is being worked on */ + if((!dst_page->pageout) && + (dst_page->wire_count == 0)) { + dst_page->busy = TRUE; + dst_page->pageout = TRUE; + vm_page_wire(dst_page); + } + } + if(user_page_list) { + user_page_list[entry].phys_addr + = dst_page->phys_addr; + user_page_list[entry].dirty = + dst_page->dirty; + user_page_list[entry].pageout = + dst_page->pageout; + user_page_list[entry].absent = + dst_page->absent; + user_page_list[entry].precious = + dst_page->precious; + } + + vm_page_unlock_queues(); + } + entry++; + dst_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + } + } else { + while (xfer_size) { + if(alias_page == NULL) { + vm_object_unlock(object); + VM_PAGE_GRAB_FICTITIOUS(alias_page); + vm_object_lock(object); + } + dst_page = vm_page_lookup(object, dst_offset); + if(dst_page != VM_PAGE_NULL) { + if((dst_page->cleaning) && + !(dst_page->list_req_pending)) { + /*someone else is writing to the */ + /* page. We will have to wait. */ + PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT); + vm_object_unlock(object); + thread_block((void(*)(void))0); + vm_object_lock(object); + continue; + } + if ((dst_page->fictitious && + dst_page->list_req_pending)) { + /* dump the fictitious page */ + dst_page->list_req_pending = FALSE; + dst_page->clustered = FALSE; + vm_page_lock_queues(); + vm_page_free(dst_page); + vm_page_unlock_queues(); + } else if ((dst_page->absent && + dst_page->list_req_pending)) { + /* the default_pager case */ + dst_page->list_req_pending = FALSE; + dst_page->busy = FALSE; + dst_page->clustered = FALSE; + } + } + if((dst_page = vm_page_lookup( + object, dst_offset)) == VM_PAGE_NULL) { + /* need to allocate a page */ + dst_page = vm_page_alloc(object, dst_offset); + if (dst_page == VM_PAGE_NULL) { + vm_object_unlock(object); + VM_PAGE_WAIT(); + vm_object_lock(object); + continue; + } + dst_page->busy = FALSE; +#if 0 + if(cntrl_flags & UPL_NO_SYNC) { + dst_page->page_lock = 0; + dst_page->unlock_request = 0; + } +#endif + dst_page->absent = TRUE; + object->absent_count++; + } +#if 1 + if(cntrl_flags & UPL_NO_SYNC) { + dst_page->page_lock = 0; + dst_page->unlock_request = 0; + } +#endif /* 1 */ + dst_page->overwriting = TRUE; + if(dst_page->fictitious) { + panic("need corner case for fictitious page"); + } + if(dst_page->page_lock) { + do_m_lock = TRUE; + } + if(upl_ptr) { + + /* eliminate all mappings from the */ + /* original object and its prodigy */ + + if(dst_page->busy) { + /*someone else is playing with the */ + /* page. We will have to wait. */ + PAGE_ASSERT_WAIT( + dst_page, THREAD_UNINT); + vm_object_unlock(object); + thread_block((void(*)(void))0); + vm_object_lock(object); + continue; + } + + vm_page_lock_queues(); + pmap_page_protect(dst_page->phys_addr, + VM_PROT_NONE); + dirty = pmap_is_modified(dst_page->phys_addr); + dirty = dirty ? TRUE : dst_page->dirty; + + vm_pageclean_setup(dst_page, alias_page, + upl->map_object, size - xfer_size); + + if(cntrl_flags & UPL_CLEAN_IN_PLACE) { + /* clean in place for read implies */ + /* that a write will be done on all */ + /* the pages that are dirty before */ + /* a upl commit is done. The caller */ + /* is obligated to preserve the */ + /* contents of all pages marked */ + /* dirty. */ + upl->flags |= UPL_CLEAR_DIRTY; + } + + if(!dirty) { + dst_page->dirty = FALSE; + dst_page->precious = TRUE; + } + + if (dst_page->wire_count == 0) { + /* deny access to the target page while */ + /* it is being worked on */ + dst_page->busy = TRUE; + } else { + vm_page_wire(dst_page); + } + /* expect the page to be used */ + dst_page->reference = TRUE; + dst_page->precious = + (cntrl_flags & UPL_PRECIOUS) + ? TRUE : FALSE; + alias_page->absent = FALSE; + alias_page = NULL; + if(user_page_list) { + user_page_list[entry].phys_addr + = dst_page->phys_addr; + user_page_list[entry].dirty = + dst_page->dirty; + user_page_list[entry].pageout = + dst_page->pageout; + user_page_list[entry].absent = + dst_page->absent; + user_page_list[entry].precious = + dst_page->precious; + } + vm_page_unlock_queues(); + } + entry++; + dst_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + } + } + if(alias_page != NULL) { + vm_page_lock_queues(); + vm_page_free(alias_page); + vm_page_unlock_queues(); + } + if(do_m_lock) { + vm_prot_t access_required; + /* call back all associated pages from other users of the pager */ + /* all future updates will be on data which is based on the */ + /* changes we are going to make here. Note: it is assumed that */ + /* we already hold copies of the data so we will not be seeing */ + /* an avalanche of incoming data from the pager */ + access_required = (cntrl_flags & UPL_COPYOUT_FROM) + ? VM_PROT_READ : VM_PROT_WRITE; + while (TRUE) { + kern_return_t rc; + thread_t thread; + + if(!object->pager_ready) { + thread = current_thread(); + vm_object_assert_wait(object, + VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT); + vm_object_unlock(object); + thread_block((void (*)(void))0); + if (thread->wait_result != THREAD_AWAKENED) { + return(KERN_FAILURE); + } + vm_object_lock(object); + continue; + } + + vm_object_unlock(object); + + if (rc = memory_object_data_unlock( + object->pager, + object->pager_request, + dst_offset + object->paging_offset, + size, + access_required)) { + if (rc == MACH_SEND_INTERRUPTED) + continue; + else + return KERN_FAILURE; + } + break; + + } + /* lets wait on the last page requested */ + /* NOTE: we will have to update lock completed routine to signal */ + if(dst_page != VM_PAGE_NULL && + (access_required & dst_page->page_lock) != access_required) { + PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT); + thread_block((void (*)(void))0); + vm_object_lock(object); + } + } + vm_object_unlock(object); + return KERN_SUCCESS; +} + + +kern_return_t +upl_system_list_request( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + vm_size_t super_cluster, + upl_t *upl, + upl_page_info_t **user_page_list_ptr, + int page_list_count, + int cntrl_flags) +{ + if(object->paging_offset > offset) + return KERN_FAILURE; + offset = offset - object->paging_offset; + +/* turns off super cluster exercised by the default_pager */ +/* +super_cluster = size; +*/ + if ((super_cluster > size) && + (vm_page_free_count > vm_page_free_reserved)) { + + vm_object_offset_t base_offset; + vm_size_t super_size; + + base_offset = (offset & + ~((vm_object_offset_t) super_cluster - 1)); + super_size = (offset+size) > (base_offset + super_cluster) ? + super_cluster<<1 : super_cluster; + super_size = ((base_offset + super_size) > object->size) ? + (object->size - base_offset) : super_size; + if(offset > (base_offset + super_size)) + panic("upl_system_list_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset, base_offset, super_size, super_cluster, size, object->paging_offset); + /* apparently there is a case where the vm requests a */ + /* page to be written out who's offset is beyond the */ + /* object size */ + if((offset + size) > (base_offset + super_size)) + super_size = (offset + size) - base_offset; + + offset = base_offset; + size = super_size; + } + vm_fault_list_request(object, offset, size, upl, user_page_list_ptr, + page_list_count, cntrl_flags); +} + + +kern_return_t +uc_upl_map( + vm_map_t map, + upl_t upl, + vm_offset_t *dst_addr) +{ + vm_size_t size; + vm_object_offset_t offset; + vm_offset_t addr; + vm_page_t m; + kern_return_t kr; + + /* check to see if already mapped */ + if(UPL_PAGE_LIST_MAPPED & upl->flags) + return KERN_FAILURE; + + offset = 0; /* Always map the entire object */ + size = upl->size; + + vm_object_lock(upl->map_object); + upl->map_object->ref_count++; + vm_object_res_reference(upl->map_object); + vm_object_unlock(upl->map_object); + + *dst_addr = 0; + + + /* NEED A UPL_MAP ALIAS */ + kr = vm_map_enter(map, dst_addr, size, (vm_offset_t) 0, TRUE, + upl->map_object, offset, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + + if (kr != KERN_SUCCESS) + return(kr); + + for(addr=*dst_addr; size > 0; size-=PAGE_SIZE,addr+=PAGE_SIZE) { + m = vm_page_lookup(upl->map_object, offset); + if(m) { + PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, TRUE); + } + offset+=PAGE_SIZE_64; + } + + upl->flags |= UPL_PAGE_LIST_MAPPED; + upl->kaddr = *dst_addr; + return KERN_SUCCESS; +} + + +kern_return_t +uc_upl_un_map( + vm_map_t map, + upl_t upl) +{ + vm_size_t size; + + if(upl->flags & UPL_PAGE_LIST_MAPPED) { + size = upl->size; + vm_deallocate(map, upl->kaddr, size); + upl->flags &= ~UPL_PAGE_LIST_MAPPED; + upl->kaddr = (vm_offset_t) 0; + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } +} + +kern_return_t +uc_upl_commit_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int flags, + upl_page_info_t *page_list) +{ + vm_size_t xfer_size = size; + vm_object_t shadow_object = upl->map_object->shadow; + vm_object_t object = upl->map_object; + vm_object_offset_t target_offset; + vm_object_offset_t page_offset; + int entry; + + if(upl->flags & UPL_DEVICE_MEMORY) { + xfer_size = 0; + } else if ((offset + size) > upl->size) { + return KERN_FAILURE; + } + + vm_object_lock(shadow_object); + + entry = offset/PAGE_SIZE; + target_offset = (vm_object_offset_t)offset; + while(xfer_size) { + vm_page_t t,m; + upl_page_info_t *p; + + if((t = vm_page_lookup(object, target_offset)) != NULL) { + + t->pageout = FALSE; + page_offset = t->offset; + VM_PAGE_FREE(t); + t = VM_PAGE_NULL; + m = vm_page_lookup(shadow_object, + page_offset + object->shadow_offset); + if(m != VM_PAGE_NULL) { + vm_object_paging_end(shadow_object); + vm_page_lock_queues(); + if ((upl->flags & UPL_CLEAR_DIRTY) || + (flags & UPL_COMMIT_CLEAR_DIRTY)) { + pmap_clear_modify(m->phys_addr); + m->dirty = FALSE; + } + if(page_list) { + p = &(page_list[entry]); + if(p->phys_addr && p->pageout && !m->pageout) { + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + } else if (page_list[entry].phys_addr && + !p->pageout && m->pageout) { + m->pageout = FALSE; + m->absent = FALSE; + m->overwriting = FALSE; + vm_page_unwire(m); + PAGE_WAKEUP_DONE(m); + } + page_list[entry].phys_addr = 0; + } + if(m->laundry) { + vm_page_laundry_count--; + m->laundry = FALSE; + if (vm_page_laundry_count < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) + &vm_page_laundry_count); + } + } + if(m->pageout) { + m->cleaning = FALSE; + m->pageout = FALSE; +#if MACH_CLUSTER_STATS + if (m->wanted) vm_pageout_target_collisions++; +#endif + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + m->dirty = pmap_is_modified(m->phys_addr); + if(m->dirty) { + CLUSTER_STAT( + vm_pageout_target_page_dirtied++;) + vm_page_unwire(m);/* reactivates */ + VM_STAT(reactivations++); + PAGE_WAKEUP_DONE(m); + } else if (m->prep_pin_count != 0) { + vm_page_pin_lock(); + if (m->pin_count != 0) { + /* page is pinned; reactivate */ + CLUSTER_STAT( + vm_pageout_target_page_pinned++;) + vm_page_unwire(m);/* reactivates */ + VM_STAT(reactivations++); + PAGE_WAKEUP_DONE(m); + } else { + /* + * page is prepped but not pinned; + * send it into limbo. Note that + * vm_page_free (which will be + * called after releasing the pin + * lock) knows how to handle a page + * with limbo set. + */ + m->limbo = TRUE; + CLUSTER_STAT( + vm_pageout_target_page_limbo++;) + } + vm_page_pin_unlock(); + if (m->limbo) + vm_page_free(m); + } else { + CLUSTER_STAT( + vm_pageout_target_page_freed++;) + vm_page_free(m);/* clears busy, etc. */ + } + vm_page_unlock_queues(); + target_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + entry++; + continue; + } + if (flags & UPL_COMMIT_INACTIVATE) { + vm_page_deactivate(m); + m->reference = FALSE; + pmap_clear_reference(m->phys_addr); + } else if (!m->active && !m->inactive) { + if (m->reference || m->prep_pin_count != 0) + vm_page_activate(m); + else + vm_page_deactivate(m); + } +#if MACH_CLUSTER_STATS + m->dirty = pmap_is_modified(m->phys_addr); + + if (m->dirty) vm_pageout_cluster_dirtied++; + else vm_pageout_cluster_cleaned++; + if (m->wanted) vm_pageout_cluster_collisions++; +#else + m->dirty = 0; +#endif + + if((m->busy) && (m->cleaning)) { + /* the request_page_list case */ + if(m->absent) { + m->absent = FALSE; + if(shadow_object->absent_count == 1) + vm_object_absent_release(shadow_object); + else + shadow_object->absent_count--; + } + m->overwriting = FALSE; + m->busy = FALSE; + m->dirty = FALSE; + } + else if (m->overwriting) { + /* alternate request page list, write to + /* page_list case. Occurs when the original + /* page was wired at the time of the list + /* request */ + assert(m->wire_count != 0); + vm_page_unwire(m);/* reactivates */ + m->overwriting = FALSE; + } + m->cleaning = FALSE; + /* It is a part of the semantic of COPYOUT_FROM */ + /* UPLs that a commit implies cache sync */ + /* between the vm page and the backing store */ + /* this can be used to strip the precious bit */ + /* as well as clean */ + if (upl->flags & UPL_PAGE_SYNC_DONE) + m->precious = FALSE; + + if (flags & UPL_COMMIT_SET_DIRTY) { + m->dirty = TRUE; + } + /* + * Wakeup any thread waiting for the page to be un-cleaning. + */ + PAGE_WAKEUP(m); + vm_page_unlock_queues(); + + } + } + target_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + entry++; + } + + vm_object_unlock(shadow_object); + if(flags & UPL_COMMIT_FREE_ON_EMPTY) { + if((upl->flags & UPL_DEVICE_MEMORY) + || (queue_empty(&upl->map_object->memq))) { + upl_dealloc(upl); + } + } + return KERN_SUCCESS; +} + +uc_upl_abort_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int error) +{ + vm_size_t xfer_size = size; + vm_object_t shadow_object = upl->map_object->shadow; + vm_object_t object = upl->map_object; + vm_object_offset_t target_offset; + vm_object_offset_t page_offset; + int entry; + + if(upl->flags & UPL_DEVICE_MEMORY) { + xfer_size = 0; + } else if ((offset + size) > upl->size) { + return KERN_FAILURE; + } + + + vm_object_lock(shadow_object); + + entry = offset/PAGE_SIZE; + target_offset = (vm_object_offset_t)offset; + while(xfer_size) { + vm_page_t t,m; + upl_page_info_t *p; + + if((t = vm_page_lookup(object, target_offset)) != NULL) { + + t->pageout = FALSE; + page_offset = t->offset; + VM_PAGE_FREE(t); + t = VM_PAGE_NULL; + m = vm_page_lookup(shadow_object, + page_offset + object->shadow_offset); + if(m != VM_PAGE_NULL) { + vm_object_paging_end(m->object); + vm_page_lock_queues(); + if(m->absent) { + /* COPYOUT = FALSE case */ + /* check for error conditions which must */ + /* be passed back to the pages customer */ + if(error & UPL_ABORT_RESTART) { + m->restart = TRUE; + m->absent = FALSE; + vm_object_absent_release(m->object); + m->page_error = KERN_MEMORY_ERROR; + m->error = TRUE; + } else if(error & UPL_ABORT_UNAVAILABLE) { + m->restart = FALSE; + m->unusual = TRUE; + m->clustered = FALSE; + } else if(error & UPL_ABORT_ERROR) { + m->restart = FALSE; + m->absent = FALSE; + vm_object_absent_release(m->object); + m->page_error = KERN_MEMORY_ERROR; + m->error = TRUE; + } else if(error & UPL_ABORT_DUMP_PAGES) { + m->clustered = TRUE; + } else { + m->clustered = TRUE; + } + + + m->cleaning = FALSE; + m->overwriting = FALSE; + PAGE_WAKEUP_DONE(m); + if(m->clustered) { + vm_page_free(m); + } else { + vm_page_activate(m); + } + + vm_page_unlock_queues(); + target_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + entry++; + continue; + } + /* + * Handle the trusted pager throttle. + */ + if (m->laundry) { + vm_page_laundry_count--; + m->laundry = FALSE; + if (vm_page_laundry_count + < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) + &vm_page_laundry_count); + } + } + if(m->pageout) { + assert(m->busy); + assert(m->wire_count == 1); + m->pageout = FALSE; + vm_page_unwire(m); + } + m->cleaning = FALSE; + m->busy = FALSE; + m->overwriting = FALSE; +#if MACH_PAGEMAP + vm_external_state_clr( + m->object->existence_map, m->offset); +#endif /* MACH_PAGEMAP */ + if(error & UPL_ABORT_DUMP_PAGES) { + vm_page_free(m); + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + } else { + PAGE_WAKEUP(m); + } + vm_page_unlock_queues(); + } + } + target_offset += PAGE_SIZE_64; + xfer_size -= PAGE_SIZE; + entry++; + } + vm_object_unlock(shadow_object); + if(error & UPL_ABORT_FREE_ON_EMPTY) { + if((upl->flags & UPL_DEVICE_MEMORY) + || (queue_empty(&upl->map_object->memq))) { + upl_dealloc(upl); + } + } + return KERN_SUCCESS; +} + +kern_return_t +uc_upl_abort( + upl_t upl, + int error) +{ + vm_object_t object = NULL; + vm_object_t shadow_object = NULL; + vm_object_offset_t offset; + vm_object_offset_t shadow_offset; + vm_object_offset_t target_offset; + int i; + vm_page_t t,m; + + if(upl->flags & UPL_DEVICE_MEMORY) { + upl_dealloc(upl); + return KERN_SUCCESS; + } + object = upl->map_object; + + if(object == NULL) { + panic("upl_abort: upl object is not backed by an object"); + return KERN_INVALID_ARGUMENT; + } + + shadow_object = upl->map_object->shadow; + shadow_offset = upl->map_object->shadow_offset; + offset = 0; + vm_object_lock(shadow_object); + for(i = 0; i<(upl->size); i+=PAGE_SIZE, offset += PAGE_SIZE_64) { + if((t = vm_page_lookup(object,offset)) != NULL) { + target_offset = t->offset + shadow_offset; + if((m = vm_page_lookup(shadow_object, target_offset)) != NULL) { + vm_object_paging_end(m->object); + vm_page_lock_queues(); + if(m->absent) { + /* COPYOUT = FALSE case */ + /* check for error conditions which must */ + /* be passed back to the pages customer */ + if(error & UPL_ABORT_RESTART) { + m->restart = TRUE; + m->absent = FALSE; + vm_object_absent_release(m->object); + m->page_error = KERN_MEMORY_ERROR; + m->error = TRUE; + } else if(error & UPL_ABORT_UNAVAILABLE) { + m->restart = FALSE; + m->unusual = TRUE; + m->clustered = FALSE; + } else if(error & UPL_ABORT_ERROR) { + m->restart = FALSE; + m->absent = FALSE; + vm_object_absent_release(m->object); + m->page_error = KERN_MEMORY_ERROR; + m->error = TRUE; + } else if(error & UPL_ABORT_DUMP_PAGES) { + m->clustered = TRUE; + } else { + m->clustered = TRUE; + } + + m->cleaning = FALSE; + m->overwriting = FALSE; + PAGE_WAKEUP_DONE(m); + if(m->clustered) { + vm_page_free(m); + } else { + vm_page_activate(m); + } + vm_page_unlock_queues(); + continue; + } + /* + * Handle the trusted pager throttle. + */ + if (m->laundry) { + vm_page_laundry_count--; + m->laundry = FALSE; + if (vm_page_laundry_count + < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) + &vm_page_laundry_count); + } + } + if(m->pageout) { + assert(m->busy); + assert(m->wire_count == 1); + m->pageout = FALSE; + vm_page_unwire(m); + } + m->cleaning = FALSE; + m->busy = FALSE; + m->overwriting = FALSE; +#if MACH_PAGEMAP + vm_external_state_clr( + m->object->existence_map, m->offset); +#endif /* MACH_PAGEMAP */ + if(error & UPL_ABORT_DUMP_PAGES) { + vm_page_free(m); + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + } else { + PAGE_WAKEUP(m); + } + vm_page_unlock_queues(); + } + } + } + vm_object_unlock(shadow_object); + /* Remove all the pages from the map object so */ + /* vm_pageout_object_terminate will work properly. */ + while (!queue_empty(&upl->map_object->memq)) { + vm_page_t p; + + p = (vm_page_t) queue_first(&upl->map_object->memq); + + assert(p->private); + assert(p->pageout); + p->pageout = FALSE; + assert(!p->cleaning); + + VM_PAGE_FREE(p); + } + upl_dealloc(upl); + return KERN_SUCCESS; +} + +/* an option on commit should be wire */ +kern_return_t +uc_upl_commit( + upl_t upl, + upl_page_info_t *page_list) +{ + if (upl->flags & UPL_DEVICE_MEMORY) + page_list = NULL; + if ((upl->flags & UPL_CLEAR_DIRTY) || + (upl->flags & UPL_PAGE_SYNC_DONE)) { + vm_object_t shadow_object = upl->map_object->shadow; + vm_object_t object = upl->map_object; + vm_object_offset_t target_offset; + vm_size_t xfer_end; + + vm_page_t t,m; + + vm_object_lock(shadow_object); + + target_offset = object->shadow_offset; + xfer_end = upl->size + object->shadow_offset; + + while(target_offset < xfer_end) { + if ((t = vm_page_lookup(object, + target_offset - object->shadow_offset)) + != NULL) { + m = vm_page_lookup( + shadow_object, target_offset); + if(m != VM_PAGE_NULL) { + if (upl->flags & UPL_CLEAR_DIRTY) { + pmap_clear_modify(m->phys_addr); + m->dirty = FALSE; + } + /* It is a part of the semantic of */ + /* COPYOUT_FROM UPLs that a commit */ + /* implies cache sync between the */ + /* vm page and the backing store */ + /* this can be used to strip the */ + /* precious bit as well as clean */ + if (upl->flags & UPL_PAGE_SYNC_DONE) + m->precious = FALSE; + } + } + target_offset += PAGE_SIZE_64; + } + vm_object_unlock(shadow_object); + } + if (page_list) { + vm_object_t shadow_object = upl->map_object->shadow; + vm_object_t object = upl->map_object; + vm_object_offset_t target_offset; + vm_size_t xfer_end; + int entry; + + vm_page_t t, m; + upl_page_info_t *p; + + vm_object_lock(shadow_object); + + entry = 0; + target_offset = object->shadow_offset; + xfer_end = upl->size + object->shadow_offset; + + while(target_offset < xfer_end) { + + if ((t = vm_page_lookup(object, + target_offset - object->shadow_offset)) + == NULL) { + target_offset += PAGE_SIZE_64; + entry++; + continue; + } + + m = vm_page_lookup(shadow_object, target_offset); + if(m != VM_PAGE_NULL) { + p = &(page_list[entry]); + if(page_list[entry].phys_addr && + p->pageout && !m->pageout) { + vm_page_lock_queues(); + m->busy = TRUE; + m->pageout = TRUE; + vm_page_wire(m); + vm_page_unlock_queues(); + } else if (page_list[entry].phys_addr && + !p->pageout && m->pageout) { + vm_page_lock_queues(); + m->pageout = FALSE; + m->absent = FALSE; + m->overwriting = FALSE; + vm_page_unwire(m); + PAGE_WAKEUP_DONE(m); + vm_page_unlock_queues(); + } + page_list[entry].phys_addr = 0; + } + target_offset += PAGE_SIZE_64; + entry++; + } + + vm_object_unlock(shadow_object); + } + upl_dealloc(upl); + return KERN_SUCCESS; +} + +upl_t +upl_create( + boolean_t internal) +{ + upl_t upl; + + if(internal) { + upl = (upl_t)kalloc(sizeof(struct upl) + + (sizeof(struct upl_page_info)*MAX_UPL_TRANSFER)); + } else { + upl = (upl_t)kalloc(sizeof(struct upl)); + } + upl->flags = 0; + upl->src_object = NULL; + upl->kaddr = (vm_offset_t)0; + upl->size = 0; + upl->map_object = NULL; + upl->ref_count = 1; + upl_lock_init(upl); +#ifdef UBC_DEBUG + upl->ubc_alias1 = 0; + upl->ubc_alias2 = 0; +#endif /* UBC_DEBUG */ + return(upl); +} + +void +upl_destroy( + upl_t upl) +{ + +#ifdef UBC_DEBUG + { + upl_t upl_ele; + vm_object_lock(upl->map_object->shadow); + queue_iterate(&upl->map_object->shadow->uplq, + upl_ele, upl_t, uplq) { + if(upl_ele == upl) { + queue_remove(&upl->map_object->shadow->uplq, + upl_ele, upl_t, uplq); + break; + } + } + vm_object_unlock(upl->map_object->shadow); + } +#endif /* UBC_DEBUG */ + if(!(upl->flags & UPL_DEVICE_MEMORY)) + vm_object_deallocate(upl->map_object); + if(upl->flags & UPL_INTERNAL) { + kfree((vm_offset_t)upl, + sizeof(struct upl) + + (sizeof(struct upl_page_info) * MAX_UPL_TRANSFER)); + } else { + kfree((vm_offset_t)upl, sizeof(struct upl)); + } +} + +vm_size_t +upl_get_internal_pagelist_offset() +{ + return sizeof(struct upl); +} + +void +upl_set_dirty( + upl_t upl) +{ + upl->flags |= UPL_CLEAR_DIRTY; +} + +void +upl_clear_dirty( + upl_t upl) +{ + upl->flags &= ~UPL_CLEAR_DIRTY; +} + + +#ifdef MACH_BSD +boolean_t upl_page_present(upl_page_info_t *upl, int index); +boolean_t upl_dirty_page(upl_page_info_t *upl, int index); +boolean_t upl_valid_page(upl_page_info_t *upl, int index); +vm_offset_t upl_phys_page(upl_page_info_t *upl, int index); + +boolean_t upl_page_present(upl_page_info_t *upl, int index) +{ + return(UPL_PAGE_PRESENT(upl, index)); +} +boolean_t upl_dirty_page(upl_page_info_t *upl, int index) +{ + return(UPL_DIRTY_PAGE(upl, index)); +} +boolean_t upl_valid_page(upl_page_info_t *upl, int index) +{ + return(UPL_VALID_PAGE(upl, index)); +} +vm_offset_t upl_phys_page(upl_page_info_t *upl, int index) +{ + return((vm_offset_t)UPL_PHYS_PAGE(upl, index)); +} + +void vm_countdirtypages(void) +{ + vm_page_t m; + int dpages; + int pgopages; + int precpages; + + + dpages=0; + pgopages=0; + precpages=0; + + vm_page_lock_queues(); + m = (vm_page_t) queue_first(&vm_page_queue_inactive); + do { + if (m ==(vm_page_t )0) break; + + if(m->dirty) dpages++; + if(m->pageout) pgopages++; + if(m->precious) precpages++; + + m = (vm_page_t) queue_next(&m->pageq); + if (m ==(vm_page_t )0) break; + + } while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m)); + vm_page_unlock_queues(); + + printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages); + + dpages=0; + pgopages=0; + precpages=0; + + vm_page_lock_queues(); + m = (vm_page_t) queue_first(&vm_page_queue_active); + + do { + if(m == (vm_page_t )0) break; + if(m->dirty) dpages++; + if(m->pageout) pgopages++; + if(m->precious) precpages++; + + m = (vm_page_t) queue_next(&m->pageq); + if(m == (vm_page_t )0) break; + + } while (!queue_end(&vm_page_queue_active,(queue_entry_t) m)); + vm_page_unlock_queues(); + + printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages); + +} +#endif /* MACH_BSD */ + +#ifdef UBC_DEBUG +kern_return_t upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2) +{ + upl->ubc_alias1 = alias1; + upl->ubc_alias2 = alias2; + return KERN_SUCCESS; +} +int upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2) +{ + if(al) + *al = upl->ubc_alias1; + if(al2) + *al2 = upl->ubc_alias2; + return KERN_SUCCESS; +} +#endif /* UBC_DEBUG */ + + + +#if MACH_KDB +#include +#include +#include + +#define printf kdbprintf +extern int db_indent; +void db_pageout(void); + +void +db_vm(void) +{ + extern int vm_page_gobble_count; + extern int vm_page_limbo_count, vm_page_limbo_real_count; + extern int vm_page_pin_count; + + iprintf("VM Statistics:\n"); + db_indent += 2; + iprintf("pages:\n"); + db_indent += 2; + iprintf("activ %5d inact %5d free %5d", + vm_page_active_count, vm_page_inactive_count, + vm_page_free_count); + printf(" wire %5d gobbl %5d\n", + vm_page_wire_count, vm_page_gobble_count); + iprintf("laund %5d limbo %5d lim_r %5d pin %5d\n", + vm_page_laundry_count, vm_page_limbo_count, + vm_page_limbo_real_count, vm_page_pin_count); + db_indent -= 2; + iprintf("target:\n"); + db_indent += 2; + iprintf("min %5d inact %5d free %5d", + vm_page_free_min, vm_page_inactive_target, + vm_page_free_target); + printf(" resrv %5d\n", vm_page_free_reserved); + db_indent -= 2; + + iprintf("burst:\n"); + db_indent += 2; + iprintf("max %5d min %5d wait %5d empty %5d\n", + vm_pageout_burst_max, vm_pageout_burst_min, + vm_pageout_burst_wait, vm_pageout_empty_wait); + db_indent -= 2; + iprintf("pause:\n"); + db_indent += 2; + iprintf("count %5d max %5d\n", + vm_pageout_pause_count, vm_pageout_pause_max); +#if MACH_COUNTERS + iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue); +#endif /* MACH_COUNTERS */ + db_indent -= 2; + db_pageout(); + db_indent -= 2; +} + +void +db_pageout(void) +{ + extern int c_limbo_page_free; + extern int c_limbo_convert; +#if MACH_COUNTERS + extern int c_laundry_pages_freed; +#endif /* MACH_COUNTERS */ + + iprintf("Pageout Statistics:\n"); + db_indent += 2; + iprintf("active %5d inactv %5d\n", + vm_pageout_active, vm_pageout_inactive); + iprintf("nolock %5d avoid %5d busy %5d absent %5d\n", + vm_pageout_inactive_nolock, vm_pageout_inactive_avoid, + vm_pageout_inactive_busy, vm_pageout_inactive_absent); + iprintf("used %5d clean %5d dirty %5d\n", + vm_pageout_inactive_used, vm_pageout_inactive_clean, + vm_pageout_inactive_dirty); + iprintf("pinned %5d limbo %5d setup_limbo %5d setup_unprep %5d\n", + vm_pageout_inactive_pinned, vm_pageout_inactive_limbo, + vm_pageout_setup_limbo, vm_pageout_setup_unprepped); + iprintf("limbo_page_free %5d limbo_convert %5d\n", + c_limbo_page_free, c_limbo_convert); +#if MACH_COUNTERS + iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed); +#endif /* MACH_COUNTERS */ +#if MACH_CLUSTER_STATS + iprintf("Cluster Statistics:\n"); + db_indent += 2; + iprintf("dirtied %5d cleaned %5d collisions %5d\n", + vm_pageout_cluster_dirtied, vm_pageout_cluster_cleaned, + vm_pageout_cluster_collisions); + iprintf("clusters %5d conversions %5d\n", + vm_pageout_cluster_clusters, vm_pageout_cluster_conversions); + db_indent -= 2; + iprintf("Target Statistics:\n"); + db_indent += 2; + iprintf("collisions %5d page_dirtied %5d page_freed %5d\n", + vm_pageout_target_collisions, vm_pageout_target_page_dirtied, + vm_pageout_target_page_freed); + iprintf("page_pinned %5d page_limbo %5d\n", + vm_pageout_target_page_pinned, vm_pageout_target_page_limbo); + db_indent -= 2; +#endif /* MACH_CLUSTER_STATS */ + db_indent -= 2; +} + +#if MACH_CLUSTER_STATS +unsigned long vm_pageout_cluster_dirtied = 0; +unsigned long vm_pageout_cluster_cleaned = 0; +unsigned long vm_pageout_cluster_collisions = 0; +unsigned long vm_pageout_cluster_clusters = 0; +unsigned long vm_pageout_cluster_conversions = 0; +unsigned long vm_pageout_target_collisions = 0; +unsigned long vm_pageout_target_page_dirtied = 0; +unsigned long vm_pageout_target_page_freed = 0; +unsigned long vm_pageout_target_page_pinned = 0; +unsigned long vm_pageout_target_page_limbo = 0; +#define CLUSTER_STAT(clause) clause +#else /* MACH_CLUSTER_STATS */ +#define CLUSTER_STAT(clause) +#endif /* MACH_CLUSTER_STATS */ + +#endif /* MACH_KDB */ diff --git a/osfmk/vm/vm_pageout.h b/osfmk/vm/vm_pageout.h new file mode 100644 index 000000000..8e67e9553 --- /dev/null +++ b/osfmk/vm/vm_pageout.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_pageout.h + * Author: Avadis Tevanian, Jr. + * Date: 1986 + * + * Declarations for the pageout daemon interface. + */ + +#ifndef _VM_VM_PAGEOUT_H_ +#define _VM_VM_PAGEOUT_H_ + +#include +#include +#include +#include + +/* + * The following ifdef only exists because XMM must (currently) + * be given a page at a time. This should be removed + * in the future. + */ +#define DATA_WRITE_MAX 16 +#define POINTER_T(copy) (pointer_t)(copy) + +/* + * Exported routines. + */ +extern void vm_pageout(void); + +extern vm_object_t vm_pageout_object_allocate( + vm_page_t m, + vm_size_t size, + vm_object_offset_t offset); + +extern void vm_pageout_object_terminate( + vm_object_t object); + +extern vm_page_t vm_pageout_setup( + vm_page_t m, + vm_object_t new_object, + vm_object_offset_t new_offset); + +extern void vm_pageout_cluster( + vm_page_t m); + +extern void vm_pageout_initialize_page( + vm_page_t m); + +extern void vm_pageclean_setup( + vm_page_t m, + vm_page_t new_m, + vm_object_t new_object, + vm_object_offset_t new_offset); + +extern void vm_pageclean_copy( + vm_page_t m, + vm_page_t new_m, + vm_object_t new_object, + vm_object_offset_t new_offset); + +/* UPL exported routines and structures */ + +#define UPL_COMPOSITE_PAGE_LIST_MAX 16 + + +#define upl_lock_init(object) mutex_init(&(object)->Lock, ETAP_VM_OBJ) +#define upl_lock(object) mutex_lock(&(object)->Lock) +#define upl_unlock(object) mutex_unlock(&(object)->Lock) + + +/* universal page list structure */ + +struct upl { + decl_mutex_data(, Lock) /* Synchronization */ + int ref_count; + int flags; + vm_object_t src_object; /* object derived from */ + vm_object_offset_t offset; + vm_size_t size; /* size in bytes of the address space */ + vm_offset_t kaddr; /* secondary mapping in kernel */ + vm_object_t map_object; +#ifdef UBC_DEBUG + unsigned int ubc_alias1; + unsigned int ubc_alias2; + queue_chain_t uplq; /* List of outstanding upls on an obj */ +#endif /* UBC_DEBUG */ + +}; + + + +/* upl struct flags */ +#define UPL_PAGE_LIST_MAPPED 0x1 +#define UPL_KERNEL_MAPPED 0x2 +#define UPL_CLEAR_DIRTY 0x4 +#define UPL_COMPOSITE_LIST 0x8 +#define UPL_INTERNAL 0x10 +#define UPL_PAGE_SYNC_DONE 0x20 +#define UPL_DEVICE_MEMORY 0x40 + + + + + + + +#endif /* _VM_VM_PAGEOUT_H_ */ diff --git a/osfmk/vm/vm_print.h b/osfmk/vm/vm_print.h new file mode 100644 index 000000000..dbb76c48a --- /dev/null +++ b/osfmk/vm/vm_print.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef VM_PRINT_H +#define VM_PRINT_H + +#include + +extern void vm_map_print( + vm_map_t map); + +extern void vm_map_copy_print( + vm_map_copy_t copy); + +#include + +extern int vm_follow_object( + vm_object_t object); + +extern void vm_object_print( + vm_object_t object, + boolean_t have_addr, + int arg_count, + char *modif); + +#include + +extern void vm_page_print( + vm_page_t p); + +#include +#if MACH_PAGEMAP +#include +extern void vm_external_print( + vm_external_map_t map, + vm_size_t size); +#endif /* MACH_PAGEMAP */ + +extern void db_vm(void); + +extern vm_size_t db_vm_map_total_size( + vm_map_t map); + +#endif /* VM_PRINT_H */ diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c new file mode 100644 index 000000000..51bddcff3 --- /dev/null +++ b/osfmk/vm/vm_resident.c @@ -0,0 +1,2374 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_page.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * Resident memory management module. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* kernel_memory_allocate() */ +#include +#include +#include + +/* + * Associated with page of user-allocatable memory is a + * page structure. + */ + +/* + * These variables record the values returned by vm_page_bootstrap, + * for debugging purposes. The implementation of pmap_steal_memory + * and pmap_startup here also uses them internally. + */ + +vm_offset_t virtual_space_start; +vm_offset_t virtual_space_end; +int vm_page_pages; + +/* + * The vm_page_lookup() routine, which provides for fast + * (virtual memory object, offset) to page lookup, employs + * the following hash table. The vm_page_{insert,remove} + * routines install and remove associations in the table. + * [This table is often called the virtual-to-physical, + * or VP, table.] + */ +typedef struct { + vm_page_t pages; +#if MACH_PAGE_HASH_STATS + int cur_count; /* current count */ + int hi_count; /* high water mark */ +#endif /* MACH_PAGE_HASH_STATS */ +} vm_page_bucket_t; + +vm_page_bucket_t *vm_page_buckets; /* Array of buckets */ +unsigned int vm_page_bucket_count = 0; /* How big is array? */ +unsigned int vm_page_hash_mask; /* Mask for hash function */ +unsigned int vm_page_hash_shift; /* Shift for hash function */ +decl_simple_lock_data(,vm_page_bucket_lock) + +#if MACH_PAGE_HASH_STATS +/* This routine is only for debug. It is intended to be called by + * hand by a developer using a kernel debugger. This routine prints + * out vm_page_hash table statistics to the kernel debug console. + */ +void +hash_debug(void) +{ + int i; + int numbuckets = 0; + int highsum = 0; + int maxdepth = 0; + + for (i = 0; i < vm_page_bucket_count; i++) { + if (vm_page_buckets[i].hi_count) { + numbuckets++; + highsum += vm_page_buckets[i].hi_count; + if (vm_page_buckets[i].hi_count > maxdepth) + maxdepth = vm_page_buckets[i].hi_count; + } + } + printf("Total number of buckets: %d\n", vm_page_bucket_count); + printf("Number used buckets: %d = %d%%\n", + numbuckets, 100*numbuckets/vm_page_bucket_count); + printf("Number unused buckets: %d = %d%%\n", + vm_page_bucket_count - numbuckets, + 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count); + printf("Sum of bucket max depth: %d\n", highsum); + printf("Average bucket depth: %d.%2d\n", + highsum/vm_page_bucket_count, + highsum%vm_page_bucket_count); + printf("Maximum bucket depth: %d\n", maxdepth); +} +#endif /* MACH_PAGE_HASH_STATS */ + +/* + * The virtual page size is currently implemented as a runtime + * variable, but is constant once initialized using vm_set_page_size. + * This initialization must be done in the machine-dependent + * bootstrap sequence, before calling other machine-independent + * initializations. + * + * All references to the virtual page size outside this + * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT + * constants. + */ +#ifndef PAGE_SIZE_FIXED +vm_size_t page_size = 4096; +vm_size_t page_mask = 4095; +int page_shift = 12; +#endif /* PAGE_SIZE_FIXED */ + +/* + * Resident page structures are initialized from + * a template (see vm_page_alloc). + * + * When adding a new field to the virtual memory + * object structure, be sure to add initialization + * (see vm_page_bootstrap). + */ +struct vm_page vm_page_template; + +/* + * Resident pages that represent real memory + * are allocated from a free list. + */ +vm_page_t vm_page_queue_free; +vm_page_t vm_page_queue_fictitious; +decl_mutex_data(,vm_page_queue_free_lock) +unsigned int vm_page_free_wanted; +int vm_page_free_count; +int vm_page_fictitious_count; + +unsigned int vm_page_free_count_minimum; /* debugging */ + +/* + * Occasionally, the virtual memory system uses + * resident page structures that do not refer to + * real pages, for example to leave a page with + * important state information in the VP table. + * + * These page structures are allocated the way + * most other kernel structures are. + */ +zone_t vm_page_zone; +decl_mutex_data(,vm_page_alloc_lock) + +/* + * Fictitious pages don't have a physical address, + * but we must initialize phys_addr to something. + * For debugging, this should be a strange value + * that the pmap module can recognize in assertions. + */ +vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1; + +/* + * Resident page structures are also chained on + * queues that are used by the page replacement + * system (pageout daemon). These queues are + * defined here, but are shared by the pageout + * module. + */ +queue_head_t vm_page_queue_active; +queue_head_t vm_page_queue_inactive; +decl_mutex_data(,vm_page_queue_lock) +int vm_page_active_count; +int vm_page_inactive_count; +int vm_page_wire_count; +int vm_page_gobble_count = 0; +int vm_page_wire_count_warning = 0; +int vm_page_gobble_count_warning = 0; + +/* the following fields are protected by the vm_page_queue_lock */ +queue_head_t vm_page_queue_limbo; +int vm_page_limbo_count = 0; /* total pages in limbo */ +int vm_page_limbo_real_count = 0; /* real pages in limbo */ +int vm_page_pin_count = 0; /* number of pinned pages */ + +decl_simple_lock_data(,vm_page_preppin_lock) + +/* + * Several page replacement parameters are also + * shared with this module, so that page allocation + * (done here in vm_page_alloc) can trigger the + * pageout daemon. + */ +int vm_page_free_target = 0; +int vm_page_free_min = 0; +int vm_page_inactive_target = 0; +int vm_page_free_reserved = 0; +int vm_page_laundry_count = 0; + +/* + * The VM system has a couple of heuristics for deciding + * that pages are "uninteresting" and should be placed + * on the inactive queue as likely candidates for replacement. + * These variables let the heuristics be controlled at run-time + * to make experimentation easier. + */ + +boolean_t vm_page_deactivate_hint = TRUE; + +/* + * vm_set_page_size: + * + * Sets the page size, perhaps based upon the memory + * size. Must be called before any use of page-size + * dependent functions. + * + * Sets page_shift and page_mask from page_size. + */ +void +vm_set_page_size(void) +{ +#ifndef PAGE_SIZE_FIXED + page_mask = page_size - 1; + + if ((page_mask & page_size) != 0) + panic("vm_set_page_size: page size not a power of two"); + + for (page_shift = 0; ; page_shift++) + if ((1 << page_shift) == page_size) + break; +#endif /* PAGE_SIZE_FIXED */ +} + +/* + * vm_page_bootstrap: + * + * Initializes the resident memory module. + * + * Allocates memory for the page cells, and + * for the object/offset-to-page hash table headers. + * Each page cell is initialized and placed on the free list. + * Returns the range of available kernel virtual memory. + */ + +void +vm_page_bootstrap( + vm_offset_t *startp, + vm_offset_t *endp) +{ + register vm_page_t m; + int i; + unsigned int log1; + unsigned int log2; + unsigned int size; + + /* + * Initialize the vm_page template. + */ + + m = &vm_page_template; + m->object = VM_OBJECT_NULL; /* reset later */ + m->offset = 0; /* reset later */ + m->wire_count = 0; + + m->inactive = FALSE; + m->active = FALSE; + m->laundry = FALSE; + m->free = FALSE; + m->reference = FALSE; + m->pageout = FALSE; + m->list_req_pending = FALSE; + + m->busy = TRUE; + m->wanted = FALSE; + m->tabled = FALSE; + m->fictitious = FALSE; + m->private = FALSE; + m->absent = FALSE; + m->error = FALSE; + m->dirty = FALSE; + m->cleaning = FALSE; + m->precious = FALSE; + m->clustered = FALSE; + m->lock_supplied = FALSE; + m->unusual = FALSE; + m->restart = FALSE; + m->limbo = FALSE; + + m->phys_addr = 0; /* reset later */ + + m->page_lock = VM_PROT_NONE; + m->unlock_request = VM_PROT_NONE; + m->page_error = KERN_SUCCESS; + + /* + * Initialize the page queues. + */ + + mutex_init(&vm_page_queue_free_lock, ETAP_VM_PAGEQ_FREE); + mutex_init(&vm_page_queue_lock, ETAP_VM_PAGEQ); + simple_lock_init(&vm_page_preppin_lock, ETAP_VM_PREPPIN); + + vm_page_queue_free = VM_PAGE_NULL; + vm_page_queue_fictitious = VM_PAGE_NULL; + queue_init(&vm_page_queue_active); + queue_init(&vm_page_queue_inactive); + queue_init(&vm_page_queue_limbo); + + vm_page_free_wanted = 0; + + /* + * Steal memory for the map and zone subsystems. + */ + + vm_map_steal_memory(); + zone_steal_memory(); + + /* + * Allocate (and initialize) the virtual-to-physical + * table hash buckets. + * + * The number of buckets should be a power of two to + * get a good hash function. The following computation + * chooses the first power of two that is greater + * than the number of physical pages in the system. + */ + + simple_lock_init(&vm_page_bucket_lock, ETAP_VM_BUCKET); + + if (vm_page_bucket_count == 0) { + unsigned int npages = pmap_free_pages(); + + vm_page_bucket_count = 1; + while (vm_page_bucket_count < npages) + vm_page_bucket_count <<= 1; + } + + vm_page_hash_mask = vm_page_bucket_count - 1; + + /* + * Calculate object shift value for hashing algorithm: + * O = log2(sizeof(struct vm_object)) + * B = log2(vm_page_bucket_count) + * hash shifts the object left by + * B/2 - O + */ + size = vm_page_bucket_count; + for (log1 = 0; size > 1; log1++) + size /= 2; + size = sizeof(struct vm_object); + for (log2 = 0; size > 1; log2++) + size /= 2; + vm_page_hash_shift = log1/2 - log2 + 1; + + if (vm_page_hash_mask & vm_page_bucket_count) + printf("vm_page_bootstrap: WARNING -- strange page hash\n"); + + vm_page_buckets = (vm_page_bucket_t *) + pmap_steal_memory(vm_page_bucket_count * + sizeof(vm_page_bucket_t)); + + for (i = 0; i < vm_page_bucket_count; i++) { + register vm_page_bucket_t *bucket = &vm_page_buckets[i]; + + bucket->pages = VM_PAGE_NULL; +#if MACH_PAGE_HASH_STATS + bucket->cur_count = 0; + bucket->hi_count = 0; +#endif /* MACH_PAGE_HASH_STATS */ + } + + /* + * Machine-dependent code allocates the resident page table. + * It uses vm_page_init to initialize the page frames. + * The code also returns to us the virtual space available + * to the kernel. We don't trust the pmap module + * to get the alignment right. + */ + + pmap_startup(&virtual_space_start, &virtual_space_end); + virtual_space_start = round_page(virtual_space_start); + virtual_space_end = trunc_page(virtual_space_end); + + *startp = virtual_space_start; + *endp = virtual_space_end; + + /* + * Compute the initial "wire" count. + * Up until now, the pages which have been set aside are not under + * the VM system's control, so although they aren't explicitly + * wired, they nonetheless can't be moved. At this moment, + * all VM managed pages are "free", courtesy of pmap_startup. + */ + vm_page_wire_count = atop(mem_size) - vm_page_free_count; /* initial value */ + + printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count); + vm_page_free_count_minimum = vm_page_free_count; +} + +#ifndef MACHINE_PAGES +/* + * We implement pmap_steal_memory and pmap_startup with the help + * of two simpler functions, pmap_virtual_space and pmap_next_page. + */ + +vm_offset_t +pmap_steal_memory( + vm_size_t size) +{ + vm_offset_t addr, vaddr, paddr; + + /* + * We round the size to a round multiple. + */ + + size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1); + + /* + * If this is the first call to pmap_steal_memory, + * we have to initialize ourself. + */ + + if (virtual_space_start == virtual_space_end) { + pmap_virtual_space(&virtual_space_start, &virtual_space_end); + + /* + * The initial values must be aligned properly, and + * we don't trust the pmap module to do it right. + */ + + virtual_space_start = round_page(virtual_space_start); + virtual_space_end = trunc_page(virtual_space_end); + } + + /* + * Allocate virtual memory for this request. + */ + + addr = virtual_space_start; + virtual_space_start += size; + + kprintf("pmap_steal_memory: %08X - %08X; size=%08X\n", addr, virtual_space_start, size); /* (TEST/DEBUG) */ + + /* + * Allocate and map physical pages to back new virtual pages. + */ + + for (vaddr = round_page(addr); + vaddr < addr + size; + vaddr += PAGE_SIZE) { + if (!pmap_next_page(&paddr)) + panic("pmap_steal_memory"); + + /* + * XXX Logically, these mappings should be wired, + * but some pmap modules barf if they are. + */ + + pmap_enter(kernel_pmap, vaddr, paddr, + VM_PROT_READ|VM_PROT_WRITE, FALSE); + /* + * Account for newly stolen memory + */ + vm_page_wire_count++; + + } + + return addr; +} + +void +pmap_startup( + vm_offset_t *startp, + vm_offset_t *endp) +{ + unsigned int i, npages, pages_initialized; + vm_page_t pages; + vm_offset_t paddr; + + /* + * We calculate how many page frames we will have + * and then allocate the page structures in one chunk. + */ + + npages = ((PAGE_SIZE * pmap_free_pages() + + (round_page(virtual_space_start) - virtual_space_start)) / + (PAGE_SIZE + sizeof *pages)); + + pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages); + + /* + * Initialize the page frames. + */ + + for (i = 0, pages_initialized = 0; i < npages; i++) { + if (!pmap_next_page(&paddr)) + break; + + vm_page_init(&pages[i], paddr); + vm_page_pages++; + pages_initialized++; + } + + /* + * Release pages in reverse order so that physical pages + * initially get allocated in ascending addresses. This keeps + * the devices (which must address physical memory) happy if + * they require several consecutive pages. + */ + + for (i = pages_initialized; i > 0; i--) { + vm_page_release(&pages[i - 1]); + } + + /* + * We have to re-align virtual_space_start, + * because pmap_steal_memory has been using it. + */ + + virtual_space_start = round_page(virtual_space_start); + + *startp = virtual_space_start; + *endp = virtual_space_end; +} +#endif /* MACHINE_PAGES */ + +/* + * Routine: vm_page_module_init + * Purpose: + * Second initialization pass, to be done after + * the basic VM system is ready. + */ +void +vm_page_module_init(void) +{ + vm_page_zone = zinit((vm_size_t) sizeof(struct vm_page), + 0, PAGE_SIZE, "vm pages"); + +#if ZONE_DEBUG + zone_debug_disable(vm_page_zone); +#endif /* ZONE_DEBUG */ + + zone_change(vm_page_zone, Z_EXPAND, FALSE); + zone_change(vm_page_zone, Z_EXHAUST, TRUE); + zone_change(vm_page_zone, Z_FOREIGN, TRUE); + + /* + * Adjust zone statistics to account for the real pages allocated + * in vm_page_create(). [Q: is this really what we want?] + */ + vm_page_zone->count += vm_page_pages; + vm_page_zone->cur_size += vm_page_pages * vm_page_zone->elem_size; + + mutex_init(&vm_page_alloc_lock, ETAP_VM_PAGE_ALLOC); +} + +/* + * Routine: vm_page_create + * Purpose: + * After the VM system is up, machine-dependent code + * may stumble across more physical memory. For example, + * memory that it was reserving for a frame buffer. + * vm_page_create turns this memory into available pages. + */ + +void +vm_page_create( + vm_offset_t start, + vm_offset_t end) +{ + vm_offset_t paddr; + vm_page_t m; + + for (paddr = round_page(start); + paddr < trunc_page(end); + paddr += PAGE_SIZE) { + while ((m = (vm_page_t) vm_page_grab_fictitious()) + == VM_PAGE_NULL) + vm_page_more_fictitious(); + + vm_page_init(m, paddr); + vm_page_pages++; + vm_page_release(m); + } +} + +/* + * vm_page_hash: + * + * Distributes the object/offset key pair among hash buckets. + * + * NOTE: To get a good hash function, the bucket count should + * be a power of two. + */ +#define vm_page_hash(object, offset) (\ + ( ((natural_t)(vm_offset_t)object<tabled) + panic("vm_page_insert"); + + assert(!object->internal || offset < object->size); + + /* only insert "pageout" pages into "pageout" objects, + * and normal pages into normal objects */ + assert(object->pageout == mem->pageout); + + /* + * Record the object/offset pair in this page + */ + + mem->object = object; + mem->offset = offset; + + /* + * Insert it into the object_object/offset hash table + */ + + bucket = &vm_page_buckets[vm_page_hash(object, offset)]; + simple_lock(&vm_page_bucket_lock); + mem->next = bucket->pages; + bucket->pages = mem; +#if MACH_PAGE_HASH_STATS + if (++bucket->cur_count > bucket->hi_count) + bucket->hi_count = bucket->cur_count; +#endif /* MACH_PAGE_HASH_STATS */ + simple_unlock(&vm_page_bucket_lock); + + /* + * Now link into the object's list of backed pages. + */ + + queue_enter(&object->memq, mem, vm_page_t, listq); + mem->tabled = TRUE; + + /* + * Show that the object has one more resident page. + */ + + object->resident_page_count++; +} + +/* + * vm_page_replace: + * + * Exactly like vm_page_insert, except that we first + * remove any existing page at the given offset in object. + * + * The object and page queues must be locked. + */ + +void +vm_page_replace( + register vm_page_t mem, + register vm_object_t object, + register vm_object_offset_t offset) +{ + register vm_page_bucket_t *bucket; + + VM_PAGE_CHECK(mem); + + if (mem->tabled) + panic("vm_page_replace"); + + /* + * Record the object/offset pair in this page + */ + + mem->object = object; + mem->offset = offset; + + /* + * Insert it into the object_object/offset hash table, + * replacing any page that might have been there. + */ + + bucket = &vm_page_buckets[vm_page_hash(object, offset)]; + simple_lock(&vm_page_bucket_lock); + if (bucket->pages) { + vm_page_t *mp = &bucket->pages; + register vm_page_t m = *mp; + do { + if (m->object == object && m->offset == offset) { + /* + * Remove page from bucket and from object, + * and return it to the free list. + */ + *mp = m->next; + queue_remove(&object->memq, m, vm_page_t, + listq); + m->tabled = FALSE; + object->resident_page_count--; + + /* + * Return page to the free list. + * Note the page is not tabled now, so this + * won't self-deadlock on the bucket lock. + */ + + vm_page_free(m); + break; + } + mp = &m->next; + } while (m = *mp); + mem->next = bucket->pages; + } else { + mem->next = VM_PAGE_NULL; + } + bucket->pages = mem; + simple_unlock(&vm_page_bucket_lock); + + /* + * Now link into the object's list of backed pages. + */ + + queue_enter(&object->memq, mem, vm_page_t, listq); + mem->tabled = TRUE; + + /* + * And show that the object has one more resident + * page. + */ + + object->resident_page_count++; +} + +/* + * vm_page_remove: [ internal use only ] + * + * Removes the given mem entry from the object/offset-page + * table and the object page list. + * + * The object and page must be locked. + */ + +void +vm_page_remove( + register vm_page_t mem) +{ + register vm_page_bucket_t *bucket; + register vm_page_t this; + + XPR(XPR_VM_PAGE, + "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n", + (integer_t)mem->object, (integer_t)mem->offset, + (integer_t)mem, 0,0); + + assert(mem->tabled); + assert(!mem->cleaning); + VM_PAGE_CHECK(mem); + + /* + * Remove from the object_object/offset hash table + */ + + bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; + simple_lock(&vm_page_bucket_lock); + if ((this = bucket->pages) == mem) { + /* optimize for common case */ + + bucket->pages = mem->next; + } else { + register vm_page_t *prev; + + for (prev = &this->next; + (this = *prev) != mem; + prev = &this->next) + continue; + *prev = this->next; + } +#if MACH_PAGE_HASH_STATS + bucket->cur_count--; +#endif /* MACH_PAGE_HASH_STATS */ + simple_unlock(&vm_page_bucket_lock); + + /* + * Now remove from the object's list of backed pages. + */ + + queue_remove(&mem->object->memq, mem, vm_page_t, listq); + + /* + * And show that the object has one fewer resident + * page. + */ + + mem->object->resident_page_count--; + + mem->tabled = FALSE; + mem->object = VM_OBJECT_NULL; + mem->offset = 0; +} + +/* + * vm_page_lookup: + * + * Returns the page associated with the object/offset + * pair specified; if none is found, VM_PAGE_NULL is returned. + * + * The object must be locked. No side effects. + */ + +vm_page_t +vm_page_lookup( + register vm_object_t object, + register vm_object_offset_t offset) +{ + register vm_page_t mem; + register vm_page_bucket_t *bucket; + + /* + * Search the hash table for this object/offset pair + */ + + bucket = &vm_page_buckets[vm_page_hash(object, offset)]; + + simple_lock(&vm_page_bucket_lock); + for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) { + VM_PAGE_CHECK(mem); + if ((mem->object == object) && (mem->offset == offset)) + break; + } + simple_unlock(&vm_page_bucket_lock); + return(mem); +} + +/* + * vm_page_rename: + * + * Move the given memory entry from its + * current object to the specified target object/offset. + * + * The object must be locked. + */ +void +vm_page_rename( + register vm_page_t mem, + register vm_object_t new_object, + vm_object_offset_t new_offset) +{ + assert(mem->object != new_object); + /* + * Changes to mem->object require the page lock because + * the pageout daemon uses that lock to get the object. + */ + + XPR(XPR_VM_PAGE, + "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n", + (integer_t)new_object, (integer_t)new_offset, + (integer_t)mem, 0,0); + + vm_page_lock_queues(); + vm_page_remove(mem); + vm_page_insert(mem, new_object, new_offset); + vm_page_unlock_queues(); +} + +/* + * vm_page_init: + * + * Initialize the fields in a new page. + * This takes a structure with random values and initializes it + * so that it can be given to vm_page_release or vm_page_insert. + */ +void +vm_page_init( + vm_page_t mem, + vm_offset_t phys_addr) +{ + *mem = vm_page_template; + mem->phys_addr = phys_addr; +} + +/* + * vm_page_grab_fictitious: + * + * Remove a fictitious page from the free list. + * Returns VM_PAGE_NULL if there are no free pages. + */ +int c_vm_page_grab_fictitious = 0; +int c_vm_page_release_fictitious = 0; +int c_vm_page_more_fictitious = 0; + +vm_page_t +vm_page_grab_fictitious(void) +{ + register vm_page_t m; + + m = (vm_page_t)zget(vm_page_zone); + if (m) { + m->free = FALSE; +#if MACH_ASSERT || ZONE_DEBUG + vm_page_init(m, vm_page_fictitious_addr); + m->fictitious = TRUE; +#endif /* MACH_ASSERT || ZONE_DEBUG */ + } + + c_vm_page_grab_fictitious++; + return m; +} + +/* + * vm_page_release_fictitious: + * + * Release a fictitious page to the free list. + */ + +void +vm_page_release_fictitious( + register vm_page_t m) +{ + assert(!m->free); + assert(m->busy); + assert(m->fictitious); + assert(m->phys_addr == vm_page_fictitious_addr); + + c_vm_page_release_fictitious++; + + if (m->free) + panic("vm_page_release_fictitious"); + m->free = TRUE; + zfree(vm_page_zone, (vm_offset_t)m); +} + +/* + * vm_page_more_fictitious: + * + * Add more fictitious pages to the free list. + * Allowed to block. This routine is way intimate + * with the zones code, for several reasons: + * 1. we need to carve some page structures out of physical + * memory before zones work, so they _cannot_ come from + * the zone_map. + * 2. the zone needs to be collectable in order to prevent + * growth without bound. These structures are used by + * the device pager (by the hundreds and thousands), as + * private pages for pageout, and as blocking pages for + * pagein. Temporary bursts in demand should not result in + * permanent allocation of a resource. + * 3. To smooth allocation humps, we allocate single pages + * with kernel_memory_allocate(), and cram them into the + * zone. This also allows us to initialize the vm_page_t's + * on the way into the zone, so that zget() always returns + * an initialized structure. The zone free element pointer + * and the free page pointer are both the first item in the + * vm_page_t. + * 4. By having the pages in the zone pre-initialized, we need + * not keep 2 levels of lists. The garbage collector simply + * scans our list, and reduces physical memory usage as it + * sees fit. + */ + +void vm_page_more_fictitious(void) +{ + extern vm_map_t zone_map; + register vm_page_t m; + vm_offset_t addr; + kern_return_t retval; + int i; + + c_vm_page_more_fictitious++; + + /* this may free up some fictitious pages */ + cleanup_limbo_queue(); + + /* + * Allocate a single page from the zone_map. Do not wait if no physical + * pages are immediately available, and do not zero the space. We need + * our own blocking lock here to prevent having multiple, + * simultaneous requests from piling up on the zone_map lock. Exactly + * one (of our) threads should be potentially waiting on the map lock. + * If winner is not vm-privileged, then the page allocation will fail, + * and it will temporarily block here in the vm_page_wait(). + */ + mutex_lock(&vm_page_alloc_lock); + /* + * If another thread allocated space, just bail out now. + */ + if (zone_free_count(vm_page_zone) > 5) { + /* + * The number "5" is a small number that is larger than the + * number of fictitious pages that any single caller will + * attempt to allocate. Otherwise, a thread will attempt to + * acquire a fictitious page (vm_page_grab_fictitious), fail, + * release all of the resources and locks already acquired, + * and then call this routine. This routine finds the pages + * that the caller released, so fails to allocate new space. + * The process repeats infinitely. The largest known number + * of fictitious pages required in this manner is 2. 5 is + * simply a somewhat larger number. + */ + mutex_unlock(&vm_page_alloc_lock); + return; + } + + if ((retval = kernel_memory_allocate(zone_map, + &addr, PAGE_SIZE, VM_PROT_ALL, + KMA_KOBJECT|KMA_NOPAGEWAIT)) != KERN_SUCCESS) { + /* + * No page was available. Tell the pageout daemon, drop the + * lock to give another thread a chance at it, and + * wait for the pageout daemon to make progress. + */ + mutex_unlock(&vm_page_alloc_lock); + vm_page_wait(THREAD_UNINT); + return; + } + /* + * Initialize as many vm_page_t's as will fit on this page. This + * depends on the zone code disturbing ONLY the first item of + * each zone element. + */ + m = (vm_page_t)addr; + for (i = PAGE_SIZE/sizeof(struct vm_page); i > 0; i--) { + vm_page_init(m, vm_page_fictitious_addr); + m->fictitious = TRUE; + m++; + } + zcram(vm_page_zone, addr, PAGE_SIZE); + mutex_unlock(&vm_page_alloc_lock); +} + +/* + * vm_page_convert: + * + * Attempt to convert a fictitious page into a real page. + */ + +boolean_t +vm_page_convert( + register vm_page_t m) +{ + register vm_page_t real_m; + + assert(m->busy); + assert(m->fictitious); + assert(!m->dirty); + + real_m = vm_page_grab(); + if (real_m == VM_PAGE_NULL) + return FALSE; + + m->phys_addr = real_m->phys_addr; + m->fictitious = FALSE; + + vm_page_lock_queues(); + if (m->active) + vm_page_active_count++; + else if (m->inactive) + vm_page_inactive_count++; + vm_page_unlock_queues(); + + real_m->phys_addr = vm_page_fictitious_addr; + real_m->fictitious = TRUE; + + vm_page_release_fictitious(real_m); + return TRUE; +} + +/* + * vm_pool_low(): + * + * Return true if it is not likely that a non-vm_privileged thread + * can get memory without blocking. Advisory only, since the + * situation may change under us. + */ +int +vm_pool_low(void) +{ + /* No locking, at worst we will fib. */ + return( vm_page_free_count < vm_page_free_reserved ); +} + +/* + * vm_page_grab: + * + * Remove a page from the free list. + * Returns VM_PAGE_NULL if the free list is too small. + */ + +unsigned long vm_page_grab_count = 0; /* measure demand */ + +vm_page_t +vm_page_grab(void) +{ + register vm_page_t mem; + + mutex_lock(&vm_page_queue_free_lock); + vm_page_grab_count++; + + /* + * Optionally produce warnings if the wire or gobble + * counts exceed some threshold. + */ + if (vm_page_wire_count_warning > 0 + && vm_page_wire_count >= vm_page_wire_count_warning) { + printf("mk: vm_page_grab(): high wired page count of %d\n", + vm_page_wire_count); + assert(vm_page_wire_count < vm_page_wire_count_warning); + } + if (vm_page_gobble_count_warning > 0 + && vm_page_gobble_count >= vm_page_gobble_count_warning) { + printf("mk: vm_page_grab(): high gobbled page count of %d\n", + vm_page_gobble_count); + assert(vm_page_gobble_count < vm_page_gobble_count_warning); + } + + /* + * Only let privileged threads (involved in pageout) + * dip into the reserved pool. + */ + + if ((vm_page_free_count < vm_page_free_reserved) && + !current_thread()->vm_privilege) { + mutex_unlock(&vm_page_queue_free_lock); + mem = VM_PAGE_NULL; + goto wakeup_pageout; + } + + while (vm_page_queue_free == VM_PAGE_NULL) { + printf("vm_page_grab: no free pages, trouble expected...\n"); + mutex_unlock(&vm_page_queue_free_lock); + VM_PAGE_WAIT(); + mutex_lock(&vm_page_queue_free_lock); + } + + if (--vm_page_free_count < vm_page_free_count_minimum) + vm_page_free_count_minimum = vm_page_free_count; + mem = vm_page_queue_free; + vm_page_queue_free = (vm_page_t) mem->pageq.next; + mem->free = FALSE; + mutex_unlock(&vm_page_queue_free_lock); + + /* + * Decide if we should poke the pageout daemon. + * We do this if the free count is less than the low + * water mark, or if the free count is less than the high + * water mark (but above the low water mark) and the inactive + * count is less than its target. + * + * We don't have the counts locked ... if they change a little, + * it doesn't really matter. + */ + +wakeup_pageout: + if ((vm_page_free_count < vm_page_free_min) || + ((vm_page_free_count < vm_page_free_target) && + (vm_page_inactive_count < vm_page_inactive_target))) + thread_wakeup((event_t) &vm_page_free_wanted); + +// dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */ + + return mem; +} + +/* + * vm_page_release: + * + * Return a page to the free list. + */ + +void +vm_page_release( + register vm_page_t mem) +{ + assert(!mem->private && !mem->fictitious); + +// dbgLog(mem->phys_addr, vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */ + + mutex_lock(&vm_page_queue_free_lock); + if (mem->free) + panic("vm_page_release"); + mem->free = TRUE; + mem->pageq.next = (queue_entry_t) vm_page_queue_free; + vm_page_queue_free = mem; + vm_page_free_count++; + + /* + * Check if we should wake up someone waiting for page. + * But don't bother waking them unless they can allocate. + * + * We wakeup only one thread, to prevent starvation. + * Because the scheduling system handles wait queues FIFO, + * if we wakeup all waiting threads, one greedy thread + * can starve multiple niceguy threads. When the threads + * all wakeup, the greedy threads runs first, grabs the page, + * and waits for another page. It will be the first to run + * when the next page is freed. + * + * However, there is a slight danger here. + * The thread we wake might not use the free page. + * Then the other threads could wait indefinitely + * while the page goes unused. To forestall this, + * the pageout daemon will keep making free pages + * as long as vm_page_free_wanted is non-zero. + */ + + if ((vm_page_free_wanted > 0) && + (vm_page_free_count >= vm_page_free_reserved)) { + vm_page_free_wanted--; + thread_wakeup_one((event_t) &vm_page_free_count); + } + + mutex_unlock(&vm_page_queue_free_lock); +} + +/* + * Release a page to the limbo list. + * Put real pages at the head of the queue, fictitious at the tail. + * Page queues must be locked. + */ +void +vm_page_release_limbo( + register vm_page_t m) +{ + assert(m->limbo); + vm_page_limbo_count++; + if (m->fictitious) { + queue_enter(&vm_page_queue_limbo, m, vm_page_t, pageq); + } else { + vm_page_limbo_real_count++; + queue_enter_first(&vm_page_queue_limbo, m, vm_page_t, pageq); + } +} + +/* + * Exchange a real page in limbo (limbo_m) with a fictitious page (new_m). + * The end result is that limbo_m is fictitious and still in limbo, and new_m + * is the real page. The prep and pin counts remain with the page in limbo + * although they will be briefly cleared by vm_page_init. This is OK since + * there will be no interrupt-level interactions (the page is in limbo) and + * vm_page_unprep must lock the page queues before changing the prep count. + * + * Page queues must be locked, and limbo_m must have been removed from its + * object. + */ +void +vm_page_limbo_exchange( + register vm_page_t limbo_m, + register vm_page_t new_m) +{ + assert(limbo_m->limbo && !limbo_m->fictitious); + assert(!limbo_m->tabled); + assert(new_m->fictitious); + + *new_m = *limbo_m; + vm_page_init(limbo_m, vm_page_fictitious_addr); + + limbo_m->fictitious = TRUE; + limbo_m->limbo = TRUE; + new_m->limbo = FALSE; + + limbo_m->prep_pin_count = new_m->prep_pin_count; + new_m->prep_pin_count = 0; +} + +/* + * vm_page_wait: + * + * Wait for a page to become available. + * If there are plenty of free pages, then we don't sleep. + * + * Returns: + * TRUE: There may be another page, try again + * FALSE: We were interrupted out of our wait, don't try again + */ + +boolean_t +vm_page_wait( + int interruptible ) +{ + /* + * We can't use vm_page_free_reserved to make this + * determination. Consider: some thread might + * need to allocate two pages. The first allocation + * succeeds, the second fails. After the first page is freed, + * a call to vm_page_wait must really block. + */ + kern_return_t wait_result; + + mutex_lock(&vm_page_queue_free_lock); + if (vm_page_free_count < vm_page_free_target) { + if (vm_page_free_wanted++ == 0) + thread_wakeup((event_t)&vm_page_free_wanted); + assert_wait((event_t)&vm_page_free_count, interruptible); + mutex_unlock(&vm_page_queue_free_lock); + counter(c_vm_page_wait_block++); + wait_result = thread_block((void (*)(void))0); + return(wait_result == THREAD_AWAKENED); + } else { + mutex_unlock(&vm_page_queue_free_lock); + return TRUE; + } +} + +/* + * vm_page_alloc: + * + * Allocate and return a memory cell associated + * with this VM object/offset pair. + * + * Object must be locked. + */ + +vm_page_t +vm_page_alloc( + vm_object_t object, + vm_object_offset_t offset) +{ + register vm_page_t mem; + + mem = vm_page_grab(); + if (mem == VM_PAGE_NULL) + return VM_PAGE_NULL; + + vm_page_insert(mem, object, offset); + + return(mem); +} + +int c_limbo_page_free = 0; /* debugging */ +int c_limbo_convert = 0; /* debugging */ +counter(unsigned int c_laundry_pages_freed = 0;) + +int vm_pagein_cluster_unused = 0; +boolean_t vm_page_free_verify = FALSE; +/* + * vm_page_free: + * + * Returns the given page to the free list, + * disassociating it with any VM object. + * + * Object and page queues must be locked prior to entry. + */ +void +vm_page_free( + register vm_page_t mem) +{ + vm_object_t object = mem->object; + + assert(!mem->free); + assert(!mem->cleaning); + assert(!mem->pageout); + assert(!vm_page_free_verify || pmap_verify_free(mem->phys_addr)); + + if (mem->tabled) + vm_page_remove(mem); /* clears tabled, object, offset */ + VM_PAGE_QUEUES_REMOVE(mem); /* clears active or inactive */ + + if (mem->clustered) { + mem->clustered = FALSE; + vm_pagein_cluster_unused++; + } + + if (mem->wire_count) { + if (!mem->private && !mem->fictitious) + vm_page_wire_count--; + mem->wire_count = 0; + assert(!mem->gobbled); + } else if (mem->gobbled) { + if (!mem->private && !mem->fictitious) + vm_page_wire_count--; + vm_page_gobble_count--; + } + mem->gobbled = FALSE; + + if (mem->laundry) { + extern int vm_page_laundry_min; + vm_page_laundry_count--; + mem->laundry = FALSE; /* laundry is now clear */ + counter(++c_laundry_pages_freed); + if (vm_page_laundry_count < vm_page_laundry_min) { + vm_page_laundry_min = 0; + thread_wakeup((event_t) &vm_page_laundry_count); + } + } + + mem->discard_request = FALSE; + + PAGE_WAKEUP(mem); /* clears wanted */ + + if (mem->absent) + vm_object_absent_release(object); + + if (mem->limbo) { + /* + * The pageout daemon put this page into limbo and then freed + * it. The page has already been removed from the object and + * queues, so any attempt to look it up will fail. Put it + * on the limbo queue; the pageout daemon will convert it to a + * fictitious page and/or free the real one later. + */ + /* assert that it came from pageout daemon (how?) */ + assert(!mem->fictitious && !mem->absent); + c_limbo_page_free++; + vm_page_release_limbo(mem); + return; + } + assert(mem->prep_pin_count == 0); + + /* Some of these may be unnecessary */ + mem->page_lock = 0; + mem->unlock_request = 0; + mem->busy = TRUE; + mem->absent = FALSE; + mem->error = FALSE; + mem->dirty = FALSE; + mem->precious = FALSE; + mem->reference = FALSE; + + mem->page_error = KERN_SUCCESS; + + if (mem->private) { + mem->private = FALSE; + mem->fictitious = TRUE; + mem->phys_addr = vm_page_fictitious_addr; + } + if (mem->fictitious) { + vm_page_release_fictitious(mem); + } else { + vm_page_init(mem, mem->phys_addr); + vm_page_release(mem); + } +} + +/* + * vm_page_wire: + * + * Mark this page as wired down by yet + * another map, removing it from paging queues + * as necessary. + * + * The page's object and the page queues must be locked. + */ +void +vm_page_wire( + register vm_page_t mem) +{ + +// dbgLog(current_act(), mem->offset, mem->object, 1); /* (TEST/DEBUG) */ + + VM_PAGE_CHECK(mem); + + if (mem->wire_count == 0) { + VM_PAGE_QUEUES_REMOVE(mem); + if (!mem->private && !mem->fictitious && !mem->gobbled) + vm_page_wire_count++; + if (mem->gobbled) + vm_page_gobble_count--; + mem->gobbled = FALSE; + } + assert(!mem->gobbled); + mem->wire_count++; +} + +/* + * vm_page_gobble: + * + * Mark this page as consumed by the vm/ipc/xmm subsystems. + * + * Called only for freshly vm_page_grab()ed pages - w/ nothing locked. + */ +void +vm_page_gobble( + register vm_page_t mem) +{ + vm_page_lock_queues(); + VM_PAGE_CHECK(mem); + + assert(!mem->gobbled); + assert(mem->wire_count == 0); + + if (!mem->gobbled && mem->wire_count == 0) { + if (!mem->private && !mem->fictitious) + vm_page_wire_count++; + } + vm_page_gobble_count++; + mem->gobbled = TRUE; + vm_page_unlock_queues(); +} + +/* + * vm_page_unwire: + * + * Release one wiring of this page, potentially + * enabling it to be paged again. + * + * The page's object and the page queues must be locked. + */ +void +vm_page_unwire( + register vm_page_t mem) +{ + +// dbgLog(current_act(), mem->offset, mem->object, 0); /* (TEST/DEBUG) */ + + VM_PAGE_CHECK(mem); + assert(mem->wire_count > 0); + + if (--mem->wire_count == 0) { + assert(!mem->private && !mem->fictitious); + vm_page_wire_count--; + queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq); + vm_page_active_count++; + mem->active = TRUE; + mem->reference = TRUE; + } +} + +/* + * vm_page_deactivate: + * + * Returns the given page to the inactive list, + * indicating that no physical maps have access + * to this page. [Used by the physical mapping system.] + * + * The page queues must be locked. + */ +void +vm_page_deactivate( + register vm_page_t m) +{ + VM_PAGE_CHECK(m); + +// dbgLog(m->phys_addr, vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */ + + /* + * This page is no longer very interesting. If it was + * interesting (active or inactive/referenced), then we + * clear the reference bit and (re)enter it in the + * inactive queue. Note wired pages should not have + * their reference bit cleared. + */ + if (m->gobbled) { /* can this happen? */ + assert(m->wire_count == 0); + if (!m->private && !m->fictitious) + vm_page_wire_count--; + vm_page_gobble_count--; + m->gobbled = FALSE; + } + if (m->private || (m->wire_count != 0)) + return; + if (m->active || (m->inactive && m->reference)) { + if (!m->fictitious && !m->absent) + pmap_clear_reference(m->phys_addr); + m->reference = FALSE; + VM_PAGE_QUEUES_REMOVE(m); + } + if (m->wire_count == 0 && !m->inactive) { + queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq); + m->inactive = TRUE; + if (!m->fictitious) + vm_page_inactive_count++; + } +} + +/* + * vm_page_activate: + * + * Put the specified page on the active list (if appropriate). + * + * The page queues must be locked. + */ + +void +vm_page_activate( + register vm_page_t m) +{ + VM_PAGE_CHECK(m); + + if (m->gobbled) { + assert(m->wire_count == 0); + if (!m->private && !m->fictitious) + vm_page_wire_count--; + vm_page_gobble_count--; + m->gobbled = FALSE; + } + if (m->private) + return; + + if (m->inactive) { + queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq); + if (!m->fictitious) + vm_page_inactive_count--; + m->inactive = FALSE; + } + if (m->wire_count == 0) { + if (m->active) + panic("vm_page_activate: already active"); + + queue_enter(&vm_page_queue_active, m, vm_page_t, pageq); + m->active = TRUE; + m->reference = TRUE; + if (!m->fictitious) + vm_page_active_count++; + } +} + +/* + * vm_page_part_zero_fill: + * + * Zero-fill a part of the page. + */ +void +vm_page_part_zero_fill( + vm_page_t m, + vm_offset_t m_pa, + vm_size_t len) +{ + vm_page_t tmp; + + VM_PAGE_CHECK(m); +#ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED + pmap_zero_part_page(m->phys_addr, m_pa, len); +#else + while (1) { + tmp = vm_page_grab(); + if (tmp == VM_PAGE_NULL) { + vm_page_wait(THREAD_UNINT); + continue; + } + break; + } + vm_page_zero_fill(tmp); + if(m_pa != 0) { + vm_page_part_copy(m, 0, tmp, 0, m_pa); + } + if((m_pa + len) < PAGE_SIZE) { + vm_page_part_copy(m, m_pa + len, tmp, + m_pa + len, PAGE_SIZE - (m_pa + len)); + } + vm_page_copy(tmp,m); + vm_page_lock_queues(); + vm_page_free(tmp); + vm_page_unlock_queues(); +#endif + +} + +/* + * vm_page_zero_fill: + * + * Zero-fill the specified page. + */ +void +vm_page_zero_fill( + vm_page_t m) +{ + XPR(XPR_VM_PAGE, + "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n", + (integer_t)m->object, (integer_t)m->offset, (integer_t)m, 0,0); + + VM_PAGE_CHECK(m); + + pmap_zero_page(m->phys_addr); +} + +/* + * vm_page_part_copy: + * + * copy part of one page to another + */ + +void +vm_page_part_copy( + vm_page_t src_m, + vm_offset_t src_pa, + vm_page_t dst_m, + vm_offset_t dst_pa, + vm_size_t len) +{ + VM_PAGE_CHECK(src_m); + VM_PAGE_CHECK(dst_m); + + pmap_copy_part_page(src_m->phys_addr, src_pa, + dst_m->phys_addr, dst_pa, len); +} + +/* + * vm_page_copy: + * + * Copy one page to another + */ + +void +vm_page_copy( + vm_page_t src_m, + vm_page_t dest_m) +{ + XPR(XPR_VM_PAGE, + "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n", + (integer_t)src_m->object, src_m->offset, + (integer_t)dest_m->object, dest_m->offset, + 0); + + VM_PAGE_CHECK(src_m); + VM_PAGE_CHECK(dest_m); + + pmap_copy_page(src_m->phys_addr, dest_m->phys_addr); +} + +/* + * Limbo pages are placed on the limbo queue to await their prep count + * going to zero. A page is put into limbo by the pageout daemon. If the + * page is real, then the pageout daemon did not need to page out the page, + * it just freed it. When the prep_pin_count is zero the page can be freed. + * Real pages with a non-zero prep count are converted to fictitious pages + * so that the memory can be reclaimed; the fictitious page will remain on + * the limbo queue until its prep count reaches zero. + * + * cleanup_limbo_queue is called by vm_page_more_fictitious and the pageout + * daemon since it can free both real and fictitious pages. + * It returns the number of fictitious pages freed. + */ +void +cleanup_limbo_queue(void) +{ + register vm_page_t free_m, m; + vm_offset_t phys_addr; + + vm_page_lock_queues(); + assert(vm_page_limbo_count >= vm_page_limbo_real_count); + + /* + * first free up all pages with prep/pin counts of zero. This + * may free both real and fictitious pages, which may be needed + * later to convert real ones. + */ + m = (vm_page_t)queue_first(&vm_page_queue_limbo); + while (!queue_end(&vm_page_queue_limbo, (queue_entry_t)m)) { + if (m->prep_pin_count == 0) { + free_m = m; + m = (vm_page_t)queue_next(&m->pageq); + queue_remove(&vm_page_queue_limbo, free_m, vm_page_t, + pageq); + vm_page_limbo_count--; + if (!free_m->fictitious) + vm_page_limbo_real_count--; + free_m->limbo = FALSE; + vm_page_free(free_m); + assert(vm_page_limbo_count >= 0); + assert(vm_page_limbo_real_count >= 0); + } else { + m = (vm_page_t)queue_next(&m->pageq); + } + } + + /* + * now convert any remaining real pages to fictitious and free the + * real ones. + */ + while (vm_page_limbo_real_count > 0) { + queue_remove_first(&vm_page_queue_limbo, m, vm_page_t, pageq); + assert(!m->fictitious); + assert(m->limbo); + + /* + * Try to get a fictitious page. If impossible, + * requeue the real one and give up. + */ + free_m = vm_page_grab_fictitious(); + if (free_m == VM_PAGE_NULL) { + queue_enter_first(&vm_page_queue_limbo, m, vm_page_t, + pageq); + break; + } + c_limbo_convert++; + vm_page_limbo_exchange(m, free_m); + assert(m->limbo && m->fictitious); + assert(!free_m->limbo && !free_m->fictitious); + queue_enter(&vm_page_queue_limbo, m, vm_page_t, pageq); + vm_page_free(free_m); + vm_page_limbo_real_count--; + } + + vm_page_unlock_queues(); +} + +/* + * Increment prep_count on a page. + * Must be called in thread context. Page must not disappear: object + * must be locked. + */ +kern_return_t +vm_page_prep( + register vm_page_t m) +{ + kern_return_t retval = KERN_SUCCESS; + + assert(m != VM_PAGE_NULL); + vm_page_lock_queues(); + if (!m->busy && !m->error && !m->fictitious && !m->absent) { + if (m->prep_pin_count != 0) { + vm_page_pin_lock(); + m->prep_count++; + vm_page_pin_unlock(); + } else { + m->prep_count++; + } + assert(m->prep_count != 0); /* check for wraparound */ + } else { + retval = KERN_FAILURE; + } + vm_page_unlock_queues(); + return retval; +} + + +/* + * Pin a page (increment pin count). + * Must have been previously prepped. + * + * MUST BE CALLED AT SPLVM. + * + * May be called from thread or interrupt context. + * If page is in "limbo" it cannot be pinned. + */ +kern_return_t +vm_page_pin( + register vm_page_t m) +{ + kern_return_t retval = KERN_SUCCESS; + + assert(m != VM_PAGE_NULL); + vm_page_pin_lock(); + if (m->limbo || m->prep_count == 0) { + retval = KERN_FAILURE; + } else { + assert(!m->fictitious); + if (m->pin_count == 0) + vm_page_pin_count++; + m->pin_count++; + } + vm_page_pin_unlock(); + return retval; +} + + +/* + * Unprep a page (decrement prep count). + * Must have been previously prepped. + * Called to decrement prep count after an attempt to pin failed. + * Must be called from thread context. + */ +kern_return_t +vm_page_unprep( + register vm_page_t m) +{ + kern_return_t retval = KERN_SUCCESS; + + assert(m != VM_PAGE_NULL); + vm_page_lock_queues(); + vm_page_pin_lock(); + assert(m->prep_count != 0); + if (m->prep_count == 0) + retval = KERN_FAILURE; /* shouldn't happen */ + else + m->prep_count--; + vm_page_pin_unlock(); + vm_page_unlock_queues(); + return retval; +} + + +/* + * Unpin a page: decrement pin AND prep counts. + * Must have been previously prepped AND pinned. + * + * MUST BE CALLED AT SPLVM. + * + * May be called from thread or interrupt context. + */ +kern_return_t +vm_page_unpin( + register vm_page_t m) +{ + kern_return_t retval = KERN_SUCCESS; + + assert(m != VM_PAGE_NULL); + vm_page_pin_lock(); + assert(m->prep_count != 0 && m->pin_count != 0); + assert(m->prep_count >= m->pin_count); + assert(!m->limbo && !m->fictitious); + if (m->prep_count != 0 && m->pin_count != 0) { + m->prep_count--; + m->pin_count--; + if (m->pin_count == 0) + vm_page_pin_count--; + } else { + retval = KERN_FAILURE; /* shouldn't happen */ + } + vm_page_pin_unlock(); + return retval; +} + +/* + * Currently, this is a primitive allocator that grabs + * free pages from the system, sorts them by physical + * address, then searches for a region large enough to + * satisfy the user's request. + * + * Additional levels of effort: + * + steal clean active/inactive pages + * + force pageouts of dirty pages + * + maintain a map of available physical + * memory + */ + +#define SET_NEXT_PAGE(m,n) ((m)->pageq.next = (struct queue_entry *) (n)) + +#if MACH_ASSERT +int vm_page_verify_contiguous( + vm_page_t pages, + unsigned int npages); +#endif /* MACH_ASSERT */ + +cpm_counter(unsigned int vpfls_pages_handled = 0;) +cpm_counter(unsigned int vpfls_head_insertions = 0;) +cpm_counter(unsigned int vpfls_tail_insertions = 0;) +cpm_counter(unsigned int vpfls_general_insertions = 0;) +cpm_counter(unsigned int vpfc_failed = 0;) +cpm_counter(unsigned int vpfc_satisfied = 0;) + +/* + * Sort free list by ascending physical address, + * using a not-particularly-bright sort algorithm. + * Caller holds vm_page_queue_free_lock. + */ +static void +vm_page_free_list_sort(void) +{ + vm_page_t sort_list; + vm_page_t sort_list_end; + vm_page_t m, m1, *prev, next_m; + vm_offset_t addr; +#if MACH_ASSERT + unsigned int npages; + int old_free_count; +#endif /* MACH_ASSERT */ + +#if MACH_ASSERT + /* + * Verify pages in the free list.. + */ + npages = 0; + for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) + ++npages; + if (npages != vm_page_free_count) + panic("vm_sort_free_list: prelim: npages %d free_count %d", + npages, vm_page_free_count); + old_free_count = vm_page_free_count; +#endif /* MACH_ASSERT */ + + sort_list = sort_list_end = vm_page_queue_free; + m = NEXT_PAGE(vm_page_queue_free); + SET_NEXT_PAGE(vm_page_queue_free, VM_PAGE_NULL); + cpm_counter(vpfls_pages_handled = 0); + while (m != VM_PAGE_NULL) { + cpm_counter(++vpfls_pages_handled); + next_m = NEXT_PAGE(m); + if (m->phys_addr < sort_list->phys_addr) { + cpm_counter(++vpfls_head_insertions); + SET_NEXT_PAGE(m, sort_list); + sort_list = m; + } else if (m->phys_addr > sort_list_end->phys_addr) { + cpm_counter(++vpfls_tail_insertions); + SET_NEXT_PAGE(sort_list_end, m); + SET_NEXT_PAGE(m, VM_PAGE_NULL); + sort_list_end = m; + } else { + cpm_counter(++vpfls_general_insertions); + /* general sorted list insertion */ + prev = &sort_list; + for (m1=sort_list; m1!=VM_PAGE_NULL; m1=NEXT_PAGE(m1)) { + if (m1->phys_addr > m->phys_addr) { + if (*prev != m1) + panic("vm_sort_free_list: ugh"); + SET_NEXT_PAGE(m, *prev); + *prev = m; + break; + } + prev = (vm_page_t *) &m1->pageq.next; + } + } + m = next_m; + } + +#if MACH_ASSERT + /* + * Verify that pages are sorted into ascending order. + */ + for (m = sort_list, npages = 0; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { + if (m != sort_list && + m->phys_addr <= addr) { + printf("m 0x%x addr 0x%x\n", m, addr); + panic("vm_sort_free_list"); + } + addr = m->phys_addr; + ++npages; + } + if (old_free_count != vm_page_free_count) + panic("vm_sort_free_list: old_free %d free_count %d", + old_free_count, vm_page_free_count); + if (npages != vm_page_free_count) + panic("vm_sort_free_list: npages %d free_count %d", + npages, vm_page_free_count); +#endif /* MACH_ASSERT */ + + vm_page_queue_free = sort_list; +} + + +#if MACH_ASSERT +/* + * Check that the list of pages is ordered by + * ascending physical address and has no holes. + */ +int +vm_page_verify_contiguous( + vm_page_t pages, + unsigned int npages) +{ + register vm_page_t m; + unsigned int page_count; + vm_offset_t prev_addr; + + prev_addr = pages->phys_addr; + page_count = 1; + for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { + if (m->phys_addr != prev_addr + page_size) { + printf("m 0x%x prev_addr 0x%x, current addr 0x%x\n", + m, prev_addr, m->phys_addr); + printf("pages 0x%x page_count %d\n", pages, page_count); + panic("vm_page_verify_contiguous: not contiguous!"); + } + prev_addr = m->phys_addr; + ++page_count; + } + if (page_count != npages) { + printf("pages 0x%x actual count 0x%x but requested 0x%x\n", + pages, page_count, npages); + panic("vm_page_verify_contiguous: count error"); + } + return 1; +} +#endif /* MACH_ASSERT */ + + +/* + * Find a region large enough to contain at least npages + * of contiguous physical memory. + * + * Requirements: + * - Called while holding vm_page_queue_free_lock. + * - Doesn't respect vm_page_free_reserved; caller + * must not ask for more pages than are legal to grab. + * + * Returns a pointer to a list of gobbled pages or VM_PAGE_NULL. + * + */ +static vm_page_t +vm_page_find_contiguous( + int npages) +{ + vm_page_t m, *contig_prev, *prev_ptr; + vm_offset_t prev_addr; + unsigned int contig_npages; + vm_page_t list; + + if (npages < 1) + return VM_PAGE_NULL; + + prev_addr = vm_page_queue_free->phys_addr - (page_size + 1); + prev_ptr = &vm_page_queue_free; + for (m = vm_page_queue_free; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { + + if (m->phys_addr != prev_addr + page_size) { + /* + * Whoops! Pages aren't contiguous. Start over. + */ + contig_npages = 0; + contig_prev = prev_ptr; + } + + if (++contig_npages == npages) { + /* + * Chop these pages out of the free list. + * Mark them all as gobbled. + */ + list = *contig_prev; + *contig_prev = NEXT_PAGE(m); + SET_NEXT_PAGE(m, VM_PAGE_NULL); + for (m = list; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { + assert(m->free); + assert(!m->wanted); + m->free = FALSE; + m->gobbled = TRUE; + } + vm_page_free_count -= npages; + if (vm_page_free_count < vm_page_free_count_minimum) + vm_page_free_count_minimum = vm_page_free_count; + vm_page_wire_count += npages; + vm_page_gobble_count += npages; + cpm_counter(++vpfc_satisfied); + assert(vm_page_verify_contiguous(list, contig_npages)); + return list; + } + + assert(contig_npages < npages); + prev_ptr = (vm_page_t *) &m->pageq.next; + prev_addr = m->phys_addr; + } + cpm_counter(++vpfc_failed); + return VM_PAGE_NULL; +} + +/* + * Allocate a list of contiguous, wired pages. + */ +kern_return_t +cpm_allocate( + vm_size_t size, + vm_page_t *list, + boolean_t wire) +{ + register vm_page_t m; + vm_page_t *first_contig; + vm_page_t free_list, pages; + unsigned int npages, n1pages; + int vm_pages_available; + + if (size % page_size != 0) + return KERN_INVALID_ARGUMENT; + + vm_page_lock_queues(); + mutex_lock(&vm_page_queue_free_lock); + + /* + * Should also take active and inactive pages + * into account... One day... + */ + vm_pages_available = vm_page_free_count - vm_page_free_reserved; + + if (size > vm_pages_available * page_size) { + mutex_unlock(&vm_page_queue_free_lock); + return KERN_RESOURCE_SHORTAGE; + } + + vm_page_free_list_sort(); + + npages = size / page_size; + + /* + * Obtain a pointer to a subset of the free + * list large enough to satisfy the request; + * the region will be physically contiguous. + */ + pages = vm_page_find_contiguous(npages); + if (pages == VM_PAGE_NULL) { + mutex_unlock(&vm_page_queue_free_lock); + vm_page_unlock_queues(); + return KERN_NO_SPACE; + } + + mutex_unlock(&vm_page_queue_free_lock); + + /* + * Walk the returned list, wiring the pages. + */ + if (wire == TRUE) + for (m = pages; m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { + /* + * Essentially inlined vm_page_wire. + */ + assert(!m->active); + assert(!m->inactive); + assert(!m->private); + assert(!m->fictitious); + assert(m->wire_count == 0); + assert(m->gobbled); + m->gobbled = FALSE; + m->wire_count++; + --vm_page_gobble_count; + } + vm_page_unlock_queues(); + + /* + * The CPM pages should now be available and + * ordered by ascending physical address. + */ + assert(vm_page_verify_contiguous(pages, npages)); + + *list = pages; + return KERN_SUCCESS; +} + + +#include +#if MACH_VM_DEBUG + +#include +#include + +/* + * Routine: vm_page_info + * Purpose: + * Return information about the global VP table. + * Fills the buffer with as much information as possible + * and returns the desired size of the buffer. + * Conditions: + * Nothing locked. The caller should provide + * possibly-pageable memory. + */ + +unsigned int +vm_page_info( + hash_info_bucket_t *info, + unsigned int count) +{ + int i; + + if (vm_page_bucket_count < count) + count = vm_page_bucket_count; + + for (i = 0; i < count; i++) { + vm_page_bucket_t *bucket = &vm_page_buckets[i]; + unsigned int bucket_count = 0; + vm_page_t m; + + simple_lock(&vm_page_bucket_lock); + for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next) + bucket_count++; + simple_unlock(&vm_page_bucket_lock); + + /* don't touch pageable memory while holding locks */ + info[i].hib_count = bucket_count; + } + + return vm_page_bucket_count; +} +#endif /* MACH_VM_DEBUG */ + +#include +#if MACH_KDB + +#include +#include +#define printf kdbprintf + +/* + * Routine: vm_page_print [exported] + */ +void +vm_page_print( + vm_page_t p) +{ + extern db_indent; + + iprintf("page 0x%x\n", p); + + db_indent += 2; + + iprintf("object=0x%x", p->object); + printf(", offset=0x%x", p->offset); + printf(", wire_count=%d", p->wire_count); + printf(", prep_count=%d", p->prep_count); + printf(", pin_count=%d\n", p->pin_count); + + iprintf("%sinactive, %sactive, %sgobbled, %slaundry, %sfree, %sref, %sdiscard\n", + (p->inactive ? "" : "!"), + (p->active ? "" : "!"), + (p->gobbled ? "" : "!"), + (p->laundry ? "" : "!"), + (p->free ? "" : "!"), + (p->reference ? "" : "!"), + (p->discard_request ? "" : "!")); + iprintf("%sbusy, %swanted, %stabled, %sfictitious, %sprivate, %sprecious\n", + (p->busy ? "" : "!"), + (p->wanted ? "" : "!"), + (p->tabled ? "" : "!"), + (p->fictitious ? "" : "!"), + (p->private ? "" : "!"), + (p->precious ? "" : "!")); + iprintf("%sabsent, %serror, %sdirty, %scleaning, %spageout, %sclustered\n", + (p->absent ? "" : "!"), + (p->error ? "" : "!"), + (p->dirty ? "" : "!"), + (p->cleaning ? "" : "!"), + (p->pageout ? "" : "!"), + (p->clustered ? "" : "!")); + iprintf("%slock_supplied, %soverwriting, %srestart, %sunusual, %slimbo\n", + (p->lock_supplied ? "" : "!"), + (p->overwriting ? "" : "!"), + (p->restart ? "" : "!"), + (p->unusual ? "" : "!"), + (p->limbo ? "" : "!")); + + iprintf("phys_addr=0x%x", p->phys_addr); + printf(", page_error=0x%x", p->page_error); + printf(", page_lock=0x%x", p->page_lock); + printf(", unlock_request=%d\n", p->unlock_request); + + db_indent -= 2; +} +#endif /* MACH_KDB */ diff --git a/osfmk/vm/vm_shared_memory_server.c b/osfmk/vm/vm_shared_memory_server.c new file mode 100644 index 000000000..626fb5fb9 --- /dev/null +++ b/osfmk/vm/vm_shared_memory_server.c @@ -0,0 +1,742 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * + * File: vm/vm_shared_memory_server.c + * Author: Chris Youngworth + * + * Support routines for an in-kernel shared memory allocator + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +vm_offset_t shared_file_text_region; +vm_offset_t shared_file_data_region; + +ipc_port_t shared_text_region_handle; +ipc_port_t shared_data_region_handle; +vm_offset_t shared_file_mapping_array = 0; +shared_region_mapping_t system_shared_region; + +ipc_port_t sfma_handle = NULL; +zone_t lsf_zone; + +int shared_file_available_hash_ele; + +kern_return_t +shared_file_create_system_region( + shared_region_mapping_t *shared_region) +{ + ipc_port_t text_handle; + ipc_port_t data_handle; + long text_size; + long data_size; + vm_offset_t mapping_array; + kern_return_t kret; + + text_size = 0x10000000; + data_size = 0x10000000; + + kret = shared_file_init(&text_handle, + text_size, &data_handle, data_size, &mapping_array); + if(kret) + return kret; + kret = shared_region_mapping_create(text_handle, + text_size, data_handle, data_size, mapping_array, + GLOBAL_SHARED_TEXT_SEGMENT, shared_region, + 0x9000000, 0x9000000); + if(kret) + return kret; + (*shared_region)->flags = 0; + return KERN_SUCCESS; +} + +shared_file_boot_time_init( +) +{ + long shared_text_region_size; + long shared_data_region_size; + + shared_text_region_size = 0x10000000; + shared_data_region_size = 0x10000000; + shared_file_init(&shared_text_region_handle, + shared_text_region_size, &shared_data_region_handle, + shared_data_region_size, &shared_file_mapping_array); + shared_region_mapping_create(shared_text_region_handle, + shared_text_region_size, shared_data_region_handle, + shared_data_region_size, shared_file_mapping_array, + GLOBAL_SHARED_TEXT_SEGMENT, &system_shared_region, + 0x9000000, 0x9000000); + system_shared_region->flags = SHARED_REGION_SYSTEM; + vm_set_shared_region(current_task(), system_shared_region); + +} + + +/* called at boot time, allocates two regions, each 256 megs in size */ +/* these regions are later mapped into task spaces, allowing them to */ +/* share the contents of the regions. shared_file_init is part of */ +/* a shared_memory_server which not only allocates the backing maps */ +/* but also coordinates requests for space. */ + + +kern_return_t +shared_file_init( + ipc_port_t *shared_text_region_handle, + vm_size_t text_region_size, + ipc_port_t *shared_data_region_handle, + vm_size_t data_region_size, + vm_offset_t *mapping_array) +{ + vm_offset_t aligned_address; + shared_file_info_t *sf_head; + vm_offset_t table_mapping_address; + int data_table_size; + int hash_size; + int i; + kern_return_t kret; + + vm_object_t buf_object; + vm_map_entry_t entry; + vm_size_t alloced; + vm_offset_t b; + vm_page_t p; + + /* create text and data maps/regions */ + if(kret = vm_region_object_create(kernel_map, + text_region_size, + shared_text_region_handle)) { + + return kret; + } + if(kret = vm_region_object_create(kernel_map, + data_region_size, + shared_data_region_handle)) { + ipc_port_release_send(*shared_text_region_handle); + return kret; + } + + data_table_size = data_region_size >> 9; + hash_size = data_region_size >> 14; + table_mapping_address = data_region_size - data_table_size; + + if(shared_file_mapping_array == 0) { + buf_object = vm_object_allocate(data_table_size); + + if(vm_map_find_space(kernel_map, &shared_file_mapping_array, + data_table_size, 0, &entry) != KERN_SUCCESS) { + panic("shared_file_init: no space"); + } + *mapping_array = shared_file_mapping_array; + vm_map_unlock(kernel_map); + entry->object.vm_object = buf_object; + entry->offset = 0; + + for (b = *mapping_array, alloced = 0; + alloced < (hash_size + + round_page(sizeof(struct sf_mapping))); + alloced += PAGE_SIZE, b += PAGE_SIZE) { + vm_object_lock(buf_object); + p = vm_page_alloc(buf_object, alloced); + if (p == VM_PAGE_NULL) { + panic("shared_file_init: no space"); + } + p->busy = FALSE; + vm_object_unlock(buf_object); + pmap_enter(kernel_pmap, b, p->phys_addr, + VM_PROT_READ | VM_PROT_WRITE, TRUE); + } + + + /* initialize loaded file array */ + sf_head = (shared_file_info_t *)*mapping_array; + sf_head->hash = (queue_head_t *) + (((int)*mapping_array) + + sizeof(struct shared_file_info)); + sf_head->hash_size = hash_size/sizeof(queue_head_t); + mutex_init(&(sf_head->lock), (ETAP_VM_MAP)); + sf_head->hash_init = FALSE; + + + mach_make_memory_entry(kernel_map, &data_table_size, + *mapping_array, VM_PROT_READ, &sfma_handle, + NULL); + + if (vm_map_wire(kernel_map, *mapping_array, + *mapping_array + + (hash_size + round_page(sizeof(struct sf_mapping))), + VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) { + panic("shared_file_init: No memory for data table"); + } + + lsf_zone = zinit(sizeof(struct load_file_ele), + data_table_size - + (hash_size + round_page(sizeof(struct sf_mapping))), + 0, "load_file_server"); + + zone_change(lsf_zone, Z_EXHAUST, TRUE); + zone_change(lsf_zone, Z_COLLECT, FALSE); + zone_change(lsf_zone, Z_EXPAND, FALSE); + zone_change(lsf_zone, Z_FOREIGN, TRUE); + } else { + *mapping_array = shared_file_mapping_array; + } + + vm_map(((vm_named_entry_t) + (*shared_data_region_handle)->ip_kobject)->backing.map, + &table_mapping_address, + data_table_size, 0, SHARED_LIB_ALIAS, + sfma_handle, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); + +} + +/* A call made from user space, copyin_shared_file requires the user to */ +/* provide the address and size of a mapped file, the full path name of */ +/* that file and a list of offsets to be mapped into shared memory. */ +/* By requiring that the file be pre-mapped, copyin_shared_file can */ +/* guarantee that the file is neither deleted nor changed after the user */ +/* begins the call. */ + +kern_return_t +copyin_shared_file( + vm_offset_t mapped_file, + vm_size_t mapped_file_size, + vm_offset_t *base_address, + int map_cnt, + sf_mapping_t *mappings, + vm_object_t file_object, + shared_region_task_mappings_t sm_info, + int *flags) +{ + vm_map_entry_t entry; + shared_file_info_t *shared_file_header; + load_struct_t *file_entry; + loaded_mapping_t *file_mapping; + boolean_t alternate; + int i; + kern_return_t ret; + + /* wire hash entry pool only as needed, since we are the only */ + /* users, we take a few liberties with the population of our */ + /* zone. */ + static int allocable_hash_pages; + static vm_offset_t hash_cram_address; + + + shared_file_header = (shared_file_info_t *)sm_info->region_mappings; + + mutex_lock(&shared_file_header->lock); + + /* If this is the first call to this routine, take the opportunity */ + /* to initialize the hash table which will be used to look-up */ + /* mappings based on the file object */ + + if(shared_file_header->hash_init == FALSE) { + vm_size_t hash_table_size; + vm_size_t hash_table_offset; + + hash_table_size = (shared_file_header->hash_size) + * sizeof(struct queue_entry); + hash_table_offset = hash_table_size + + round_page(sizeof(struct sf_mapping)); + for (i = 0; i < shared_file_header->hash_size; i++) + queue_init(&shared_file_header->hash[i]); + + allocable_hash_pages = + ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE; + hash_cram_address = + sm_info->region_mappings + hash_table_offset; + shared_file_available_hash_ele = 0; + + shared_file_header->hash_init = TRUE; + } + + if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) { + int cram_size; + + cram_size = allocable_hash_pages > 3 ? + 3 : allocable_hash_pages; + allocable_hash_pages -= cram_size; + cram_size = cram_size * PAGE_SIZE; + if (vm_map_wire(kernel_map, hash_cram_address, + hash_cram_address+cram_size, + VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) { + panic("shared_file_init: No memory for data table"); + } + zcram(lsf_zone, hash_cram_address, cram_size); + shared_file_available_hash_ele + += cram_size/sizeof(struct load_file_ele); + hash_cram_address += cram_size; + } + + + /* Find the entry in the map associated with the current mapping */ + /* of the file object */ + + if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) { + vm_object_t mapped_object; + if(entry->is_sub_map) { + mutex_unlock(&shared_file_header->lock); + return KERN_INVALID_ADDRESS; + } + mapped_object = entry->object.vm_object; + while(mapped_object->shadow != NULL) { + mapped_object = mapped_object->shadow; + } + /* check to see that the file object passed is indeed the */ + /* same as the mapped object passed */ + if(file_object != mapped_object) { + if(sm_info->flags & SHARED_REGION_SYSTEM) { + mutex_unlock(&shared_file_header->lock); + return KERN_PROTECTION_FAILURE; + } else { + file_object = mapped_object; + } + } + } else { + mutex_unlock(&shared_file_header->lock); + return KERN_INVALID_ADDRESS; + } + + alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE; + + if (file_entry = lsf_hash_lookup(shared_file_header->hash, + (void *) file_object, shared_file_header->hash_size, + alternate, sm_info)) { + /* File is loaded, check the load manifest for exact match */ + /* we simplify by requiring that the elements be the same */ + /* size and in the same order rather than checking for */ + /* semantic equivalence. */ + + /* If the file is being loaded in the alternate */ + /* area, one load to alternate is allowed per mapped */ + /* object the base address is passed back to the */ + /* caller and the mappings field is filled in. If the */ + /* caller does not pass the precise mappings_cnt */ + /* and the Alternate is already loaded, an error */ + /* is returned. */ + i = 0; + file_mapping = file_entry->mappings; + while(file_mapping != NULL) { + if(i>=map_cnt) { + mutex_unlock(&shared_file_header->lock); + return KERN_INVALID_ARGUMENT; + } + if(((mappings[i].mapping_offset) + & SHARED_DATA_REGION_MASK) != + file_mapping->mapping_offset || + mappings[i].size != + file_mapping->size || + mappings[i].file_offset != + file_mapping->file_offset || + mappings[i].protection != + file_mapping->protection) { + break; + } + file_mapping = file_mapping->next; + i++; + } + if(i!=map_cnt) { + mutex_unlock(&shared_file_header->lock); + return KERN_INVALID_ARGUMENT; + } + *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK) + + file_entry->base_address; + *flags = SF_PREV_LOADED; + mutex_unlock(&shared_file_header->lock); + return KERN_SUCCESS; + } else { + /* File is not loaded, lets attempt to load it */ + ret = lsf_load(mapped_file, mapped_file_size, base_address, + mappings, map_cnt, + (void *)file_object, + *flags, sm_info); + *flags = 0; + if(ret == KERN_NO_SPACE) { + shared_region_mapping_t regions; + regions = (shared_region_mapping_t)sm_info->self; + regions->flags |= SHARED_REGION_FULL; + if(regions == system_shared_region) { + shared_file_boot_time_init(); + /* current task must stay wit its current */ + /* regions */ + vm_set_shared_region(current_task(), regions); + } + } + mutex_unlock(&shared_file_header->lock); + return ret; + } +} + +/* A hash lookup function for the list of loaded files in */ +/* shared_memory_server space. */ + +load_struct_t * +lsf_hash_lookup( + queue_head_t *hash_table, + void *file_object, + int size, + boolean_t alternate, + shared_region_task_mappings_t sm_info) +{ + register queue_t bucket; + load_struct_t *entry; + shared_region_mapping_t target_region; + int depth; + + bucket = &(hash_table[load_file_hash((int)file_object, size)]); + for (entry = (load_struct_t *)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (load_struct_t *)queue_next(&entry->links)) { + if (entry->file_object == (int)file_object) { + target_region = (shared_region_mapping_t)sm_info->self; + depth = target_region->depth; + while(target_region) { + if((!(sm_info->self)) || + ((target_region == entry->regions_instance) && + (target_region->depth >= entry->depth))) { + if(alternate) { + if (entry->base_address >= + sm_info->alternate_base) + return entry; + } else { + if (entry->base_address < + sm_info->alternate_base) + return entry; + } + } + if(target_region->object_chain) { + target_region = (shared_region_mapping_t) + target_region->object_chain->object_chain_region; + depth = target_region->object_chain->depth; + } else { + target_region = NULL; + } + } + } + } + + return (load_struct_t *)0; +} + +load_struct_t * +lsf_remove_regions_mappings( + shared_region_mapping_t region, + shared_region_task_mappings_t sm_info) +{ + int i; + register queue_t bucket; + shared_file_info_t *shared_file_header; + load_struct_t *entry; + load_struct_t *next_entry; + load_struct_t *prev_entry; + + shared_file_header = (shared_file_info_t *)sm_info->region_mappings; + + mutex_lock(&shared_file_header->lock); + if(shared_file_header->hash_init == FALSE) { + mutex_unlock(&shared_file_header->lock); + return NULL; + } + for(i = 0; ihash_size; i++) { + bucket = &shared_file_header->hash[i]; + for (entry = (load_struct_t *)queue_first(bucket); + !queue_end(bucket, &entry->links);) { + next_entry = (load_struct_t *)queue_next(&entry->links); + if(region == entry->regions_instance) { + lsf_unload((void *)entry->file_object, + entry->base_address, sm_info); + } + entry = next_entry; + } + } + mutex_unlock(&shared_file_header->lock); +} + +/* Removes a map_list, (list of loaded extents) for a file from */ +/* the loaded file hash table. */ + +load_struct_t * +lsf_hash_delete( + void *file_object, + vm_offset_t base_offset, + shared_region_task_mappings_t sm_info) +{ + register queue_t bucket; + shared_file_info_t *shared_file_header; + load_struct_t *entry; + load_struct_t *prev_entry; + + shared_file_header = (shared_file_info_t *)sm_info->region_mappings; + + bucket = &shared_file_header->hash + [load_file_hash((int)file_object, shared_file_header->hash_size)]; + + for (entry = (load_struct_t *)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (load_struct_t *)queue_next(&entry->links)) { + if((!(sm_info->self)) || ((shared_region_mapping_t) + sm_info->self == entry->regions_instance)) { + if ((entry->file_object == (int) file_object) && + (entry->base_address == base_offset)) { + queue_remove(bucket, entry, + load_struct_ptr_t, links); + return entry; + } + } + } + + return (load_struct_t *)0; +} + +/* Inserts a new map_list, (list of loaded file extents), into the */ +/* server loaded file hash table. */ + +void +lsf_hash_insert( + load_struct_t *entry, + shared_region_task_mappings_t sm_info) +{ + shared_file_info_t *shared_file_header; + + shared_file_header = (shared_file_info_t *)sm_info->region_mappings; + queue_enter(&shared_file_header->hash + [load_file_hash(entry->file_object, + shared_file_header->hash_size)], + entry, load_struct_ptr_t, links); +} + +/* Looks up the file type requested. If already loaded and the */ +/* file extents are an exact match, returns Success. If not */ +/* loaded attempts to load the file extents at the given offsets */ +/* if any extent fails to load or if the file was already loaded */ +/* in a different configuration, lsf_load fails. */ + +kern_return_t +lsf_load( + vm_offset_t mapped_file, + vm_size_t mapped_file_size, + vm_offset_t *base_address, + sf_mapping_t *mappings, + int map_cnt, + void *file_object, + int flags, + shared_region_task_mappings_t sm_info) +{ + + load_struct_t *entry; + vm_map_copy_t copy_object; + loaded_mapping_t *file_mapping; + loaded_mapping_t **tptr; + int i; + ipc_port_t local_map; + vm_offset_t original_alt_load_next; + vm_offset_t alternate_load_next; + + entry = (load_struct_t *)zalloc(lsf_zone); + shared_file_available_hash_ele--; + entry->file_object = (int)file_object; + entry->mapping_cnt = map_cnt; + entry->mappings = NULL; + entry->links.prev = (queue_entry_t) 0; + entry->links.next = (queue_entry_t) 0; + entry->regions_instance = (shared_region_mapping_t)sm_info->self; + entry->depth=((shared_region_mapping_t)sm_info->self)->depth; + + lsf_hash_insert(entry, sm_info); + tptr = &(entry->mappings); + + + alternate_load_next = sm_info->alternate_next; + original_alt_load_next = alternate_load_next; + if (flags & ALTERNATE_LOAD_SITE) { + int max_loadfile_offset; + + *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) + + sm_info->alternate_next; + max_loadfile_offset = 0; + for(i = 0; i + max_loadfile_offset) { + max_loadfile_offset = + (mappings[i].mapping_offset + & SHARED_TEXT_REGION_MASK) + + mappings[i].size; + } + } + if((alternate_load_next + round_page(max_loadfile_offset)) >= + (sm_info->data_size - (sm_info->data_size>>9))) { + + return KERN_NO_SPACE; + } + alternate_load_next += round_page(max_loadfile_offset); + + } else { + if (((*base_address) & SHARED_TEXT_REGION_MASK) > + sm_info->alternate_base) { + entry->base_address = + (*base_address) & SHARED_TEXT_REGION_MASK; + lsf_unload(file_object, entry->base_address, sm_info); + return KERN_INVALID_ARGUMENT; + } + } + + entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK; + + /* copyin mapped file data */ + for(i = 0; idata_region; + region_mask = SHARED_DATA_REGION_MASK; + if((mappings[i].mapping_offset + & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) { + lsf_unload(file_object, + entry->base_address, sm_info); + return KERN_INVALID_ARGUMENT; + } + } else { + region_mask = SHARED_TEXT_REGION_MASK; + local_map = (ipc_port_t)sm_info->text_region; + if(mappings[i].mapping_offset + & GLOBAL_SHARED_SEGMENT_MASK) { + lsf_unload(file_object, + entry->base_address, sm_info); + return KERN_INVALID_ARGUMENT; + } + } + if(!(mappings[i].protection & VM_PROT_ZF) + && ((mapped_file + mappings[i].file_offset + + mappings[i].size) > + (mapped_file + mapped_file_size))) { + lsf_unload(file_object, entry->base_address, sm_info); + return KERN_INVALID_ARGUMENT; + } + target_address = ((mappings[i].mapping_offset) & region_mask) + + entry->base_address; + if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject) + ->backing.map, &target_address, + mappings[i].size, FALSE)) { + lsf_unload(file_object, entry->base_address, sm_info); + return KERN_FAILURE; + } + target_address = ((mappings[i].mapping_offset) & region_mask) + + entry->base_address; + if(!(mappings[i].protection & VM_PROT_ZF)) { + if(vm_map_copyin(current_map(), + mapped_file + mappings[i].file_offset, + round_page(mappings[i].size), FALSE, ©_object)) { + vm_deallocate(((vm_named_entry_t)local_map->ip_kobject) + ->backing.map, target_address, mappings[i].size); + lsf_unload(file_object, entry->base_address, sm_info); + return KERN_FAILURE; + } + if(vm_map_copy_overwrite(((vm_named_entry_t) + local_map->ip_kobject)->backing.map, target_address, + copy_object, FALSE)) { + vm_deallocate(((vm_named_entry_t)local_map->ip_kobject) + ->backing.map, target_address, mappings[i].size); + lsf_unload(file_object, entry->base_address, sm_info); + return KERN_FAILURE; + } + } + vm_map_protect(((vm_named_entry_t)local_map->ip_kobject) + ->backing.map, target_address, + round_page(target_address + mappings[i].size), + (mappings[i].protection & + (VM_PROT_READ | VM_PROT_EXECUTE)), + TRUE); + vm_map_protect(((vm_named_entry_t)local_map->ip_kobject) + ->backing.map, target_address, + round_page(target_address + mappings[i].size), + (mappings[i].protection & + (VM_PROT_READ | VM_PROT_EXECUTE)), + FALSE); + file_mapping = (loaded_mapping_t *)zalloc(lsf_zone); + if(file_mapping == 0) + panic("lsf_load: OUT OF MAPPINGS!"); + shared_file_available_hash_ele--; + file_mapping->mapping_offset = (mappings[i].mapping_offset) + & region_mask; + file_mapping->size = mappings[i].size; + file_mapping->file_offset = mappings[i].file_offset; + file_mapping->protection = mappings[i].protection; + file_mapping->next = NULL; + *tptr = file_mapping; + tptr = &(file_mapping->next); + } + shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next); + return KERN_SUCCESS; + +} + + +/* finds the file_object extent list in the shared memory hash table */ +/* If one is found the associated extents in shared memory are deallocated */ +/* and the extent list is freed */ + +void +lsf_unload( + void *file_object, + vm_offset_t base_offset, + shared_region_task_mappings_t sm_info) +{ + load_struct_t *entry; + ipc_port_t local_map; + loaded_mapping_t *map_ele; + loaded_mapping_t *back_ptr; + + entry = lsf_hash_delete(file_object, base_offset, sm_info); + if(entry) { + map_ele = entry->mappings; + while(map_ele != NULL) { + if(map_ele->protection & VM_PROT_COW) { + local_map = (ipc_port_t)sm_info->data_region; + } else { + local_map = (ipc_port_t)sm_info->text_region; + } + vm_deallocate(((vm_named_entry_t)local_map->ip_kobject) + ->backing.map, entry->base_address + + map_ele->mapping_offset, + map_ele->size); + back_ptr = map_ele; + map_ele = map_ele->next; + zfree(lsf_zone, (vm_offset_t)back_ptr); + shared_file_available_hash_ele++; + } + zfree(lsf_zone, (vm_offset_t)entry); + shared_file_available_hash_ele++; + } +} diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c new file mode 100644 index 000000000..371898e02 --- /dev/null +++ b/osfmk/vm/vm_user.c @@ -0,0 +1,2711 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ +/* + * File: vm/vm_user.c + * Author: Avadis Tevanian, Jr., Michael Wayne Young + * + * User-exported virtual memory functions. + */ +#ifdef MACH_BSD +/* remove after component interface available */ +extern int vnode_pager_workaround; +#endif + +#include +#include +#include +#include /* to get vm_address_t */ +#include +#include /* to get pointer_t */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + + +vm_size_t upl_offset_to_pagelist = 0; + +#if VM_CPM +#include +#endif /* VM_CPM */ + +ipc_port_t dynamic_pager_control_port=NULL; + +/* + * vm_allocate allocates "zero fill" memory in the specfied + * map. + */ +kern_return_t +vm_allocate( + register vm_map_t map, + register vm_offset_t *addr, + register vm_size_t size, + int flags) +{ + kern_return_t result; + boolean_t anywhere = VM_FLAGS_ANYWHERE & flags; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + if (size == 0) { + *addr = 0; + return(KERN_SUCCESS); + } + + if (anywhere) + *addr = vm_map_min(map); + else + *addr = trunc_page(*addr); + size = round_page(size); + if (size == 0) { + return(KERN_INVALID_ARGUMENT); + } + + result = vm_map_enter( + map, + addr, + size, + (vm_offset_t)0, + flags, + VM_OBJECT_NULL, + (vm_object_offset_t)0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); + + return(result); +} + +/* + * vm_deallocate deallocates the specified range of addresses in the + * specified address map. + */ +kern_return_t +vm_deallocate( + register vm_map_t map, + vm_offset_t start, + vm_size_t size) +{ + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + if (size == (vm_offset_t) 0) + return(KERN_SUCCESS); + + return(vm_map_remove(map, trunc_page(start), + round_page(start+size), VM_MAP_NO_FLAGS)); +} + +/* + * vm_inherit sets the inheritance of the specified range in the + * specified map. + */ +kern_return_t +vm_inherit( + register vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_inherit_t new_inheritance) +{ + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + if (new_inheritance > VM_INHERIT_LAST_VALID) + return(KERN_INVALID_ARGUMENT); + + return(vm_map_inherit(map, + trunc_page(start), + round_page(start+size), + new_inheritance)); +} + +/* + * vm_protect sets the protection of the specified range in the + * specified map. + */ + +kern_return_t +vm_protect( + register vm_map_t map, + vm_offset_t start, + vm_size_t size, + boolean_t set_maximum, + vm_prot_t new_protection) +{ + if ((map == VM_MAP_NULL) || + (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) + return(KERN_INVALID_ARGUMENT); + + return(vm_map_protect(map, + trunc_page(start), + round_page(start+size), + new_protection, + set_maximum)); +} + +/* + * Handle machine-specific attributes for a mapping, such + * as cachability, migrability, etc. + */ +kern_return_t +vm_machine_attribute( + vm_map_t map, + vm_address_t address, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ +{ + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + return vm_map_machine_attribute(map, address, size, attribute, value); +} + +kern_return_t +vm_read( + vm_map_t map, + vm_address_t address, + vm_size_t size, + pointer_t *data, + mach_msg_type_number_t *data_size) +{ + kern_return_t error; + vm_map_copy_t ipc_address; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + if ((error = vm_map_copyin(map, + address, + size, + FALSE, /* src_destroy */ + &ipc_address)) == KERN_SUCCESS) { + *data = (pointer_t) ipc_address; + *data_size = size; + } + return(error); +} + +kern_return_t +vm_read_list( + vm_map_t map, + vm_read_entry_t data_list, + mach_msg_type_number_t count) +{ + mach_msg_type_number_t i; + kern_return_t error; + vm_map_copy_t ipc_address; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + for(i=0; imap, + &(data_list[i].address), + (vm_map_copy_t) ipc_address); + if(error != KERN_SUCCESS) { + data_list[i].address = (vm_address_t)0; + data_list[i].size = (vm_size_t)0; + break; + } + } + } + return(error); +} + +/* + * This routine reads from the specified map and overwrites part of the current + * activation's map. In making an assumption that the current thread is local, + * it is no longer cluster-safe without a fully supportive local proxy thread/ + * task (but we don't support cluster's anymore so this is moot). + */ + +#define VM_OVERWRITE_SMALL 512 + +kern_return_t +vm_read_overwrite( + vm_map_t map, + vm_address_t address, + vm_size_t size, + vm_address_t data, + vm_size_t *data_size) +{ + struct { + long align; + char buf[VM_OVERWRITE_SMALL]; + } inbuf; + vm_map_t oldmap; + kern_return_t error = KERN_SUCCESS; + vm_map_copy_t copy; + + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + if (size <= VM_OVERWRITE_SMALL) { + if(vm_map_read_user(map, (vm_offset_t)address, + (vm_offset_t)&inbuf, size)) { + error = KERN_INVALID_ADDRESS; + } else { + if(vm_map_write_user(current_map(), + (vm_offset_t)&inbuf, (vm_offset_t)data, size)) + error = KERN_INVALID_ADDRESS; + } + } + else { + if ((error = vm_map_copyin(map, + address, + size, + FALSE, /* src_destroy */ + ©)) == KERN_SUCCESS) { + if ((error = vm_map_copy_overwrite( + current_act()->map, + data, + copy, + FALSE)) == KERN_SUCCESS) { + } + else { + vm_map_copy_discard(copy); + } + } + } + *data_size = size; + return(error); +} + + + + +/*ARGSUSED*/ +kern_return_t +vm_write( + vm_map_t map, + vm_address_t address, + vm_offset_t data, + mach_msg_type_number_t size) +{ + if (map == VM_MAP_NULL) + return KERN_INVALID_ARGUMENT; + + return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data, + FALSE /* interruptible XXX */); +} + +kern_return_t +vm_copy( + vm_map_t map, + vm_address_t source_address, + vm_size_t size, + vm_address_t dest_address) +{ + vm_map_copy_t copy; + kern_return_t kr; + + if (map == VM_MAP_NULL) + return KERN_INVALID_ARGUMENT; + + kr = vm_map_copyin(map, source_address, size, + FALSE, ©); + if (kr != KERN_SUCCESS) + return kr; + + kr = vm_map_copy_overwrite(map, dest_address, copy, + FALSE /* interruptible XXX */); + if (kr != KERN_SUCCESS) { + vm_map_copy_discard(copy); + return kr; + } + + return KERN_SUCCESS; +} + +/* + * Routine: vm_map + */ +kern_return_t +vm_map_64( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t initial_size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + register + vm_object_t object; + vm_prot_t prot; + vm_object_size_t size = (vm_object_size_t)initial_size; + kern_return_t result; + + /* + * Check arguments for validity + */ + if ((target_map == VM_MAP_NULL) || + (cur_protection & ~VM_PROT_ALL) || + (max_protection & ~VM_PROT_ALL) || + (inheritance > VM_INHERIT_LAST_VALID) || + size == 0) + return(KERN_INVALID_ARGUMENT); + + /* + * Find the vm object (if any) corresponding to this port. + */ + if (!IP_VALID(port)) { + object = VM_OBJECT_NULL; + offset = 0; + copy = FALSE; + } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) { + vm_named_entry_t named_entry; + + named_entry = (vm_named_entry_t)port->ip_kobject; + /* a few checks to make sure user is obeying rules */ + if(size == 0) { + if(offset >= named_entry->size) + return(KERN_INVALID_RIGHT); + size = named_entry->size - offset; + } + if((named_entry->protection & max_protection) != max_protection) + return(KERN_INVALID_RIGHT); + if((named_entry->protection & cur_protection) != cur_protection) + return(KERN_INVALID_RIGHT); + if(named_entry->size < (offset + size)) + return(KERN_INVALID_ARGUMENT); + + /* the callers parameter offset is defined to be the */ + /* offset from beginning of named entry offset in object */ + offset = offset + named_entry->offset; + + named_entry_lock(named_entry); + if(named_entry->is_sub_map) { + vm_map_entry_t map_entry; + + named_entry_unlock(named_entry); + *address = trunc_page(*address); + size = round_page(size); + vm_object_reference(vm_submap_object); + if ((result = vm_map_enter(target_map, + address, size, mask, flags, + vm_submap_object, 0, + FALSE, + cur_protection, max_protection, inheritance + )) != KERN_SUCCESS) { + vm_object_deallocate(vm_submap_object); + } else { + char alias; + + VM_GET_FLAGS_ALIAS(flags, alias); + if ((alias == VM_MEMORY_SHARED_PMAP) && + !copy) { + vm_map_submap(target_map, *address, + (*address) + size, + named_entry->backing.map, + (vm_offset_t)offset, TRUE); + } else { + vm_map_submap(target_map, *address, + (*address) + size, + named_entry->backing.map, + (vm_offset_t)offset, FALSE); + } + if(copy) { + if(vm_map_lookup_entry( + target_map, *address, &map_entry)) { + map_entry->needs_copy = TRUE; + } + } + } + return(result); + + } else if(named_entry->object) { + /* This is the case where we are going to map */ + /* an already mapped object. If the object is */ + /* not ready it is internal. An external */ + /* object cannot be mapped until it is ready */ + /* we can therefore avoid the ready check */ + /* in this case. */ + named_entry_unlock(named_entry); + vm_object_reference(named_entry->object); + object = named_entry->object; + } else { + object = vm_object_enter(named_entry->backing.pager, + named_entry->size, + named_entry->internal, + FALSE, + FALSE); + if (object == VM_OBJECT_NULL) { + named_entry_unlock(named_entry); + return(KERN_INVALID_OBJECT); + } + named_entry->object = object; + named_entry_unlock(named_entry); + /* create an extra reference for the named entry */ + vm_object_reference(named_entry->object); + /* wait for object (if any) to be ready */ + if (object != VM_OBJECT_NULL) { + vm_object_lock(object); + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_unlock(object); + } + } + } else { + if ((object = vm_object_enter(port, size, FALSE, FALSE, FALSE)) + == VM_OBJECT_NULL) + return(KERN_INVALID_OBJECT); + + /* wait for object (if any) to be ready */ + if (object != VM_OBJECT_NULL) { + vm_object_lock(object); + while (!object->pager_ready) { + vm_object_wait(object, + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); + vm_object_lock(object); + } + vm_object_unlock(object); + } + } + + *address = trunc_page(*address); + size = round_page(size); + + /* + * Perform the copy if requested + */ + + if (copy) { + vm_object_t new_object; + vm_object_offset_t new_offset; + + result = vm_object_copy_strategically(object, offset, size, + &new_object, &new_offset, + ©); + + + if (result == KERN_MEMORY_RESTART_COPY) { + boolean_t success; + boolean_t src_needs_copy; + + /* + * XXX + * We currently ignore src_needs_copy. + * This really is the issue of how to make + * MEMORY_OBJECT_COPY_SYMMETRIC safe for + * non-kernel users to use. Solution forthcoming. + * In the meantime, since we don't allow non-kernel + * memory managers to specify symmetric copy, + * we won't run into problems here. + */ + new_object = object; + new_offset = offset; + success = vm_object_copy_quickly(&new_object, + new_offset, size, + &src_needs_copy, + ©); + assert(success); + result = KERN_SUCCESS; + } + /* + * Throw away the reference to the + * original object, as it won't be mapped. + */ + + vm_object_deallocate(object); + + if (result != KERN_SUCCESS) + return (result); + + object = new_object; + offset = new_offset; + } + + if ((result = vm_map_enter(target_map, + address, size, mask, flags, + object, offset, + copy, + cur_protection, max_protection, inheritance + )) != KERN_SUCCESS) + vm_object_deallocate(object); + return(result); +} + +/* temporary, until world build */ +vm_map( + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) +{ + vm_map_64(target_map, address, size, mask, flags, + port, (vm_object_offset_t)offset, copy, + cur_protection, max_protection, inheritance); +} + + +/* + * NOTE: this routine (and this file) will no longer require mach_host_server.h + * when vm_wire is changed to use ledgers. + */ +#include +/* + * Specify that the range of the virtual address space + * of the target task must not cause page faults for + * the indicated accesses. + * + * [ To unwire the pages, specify VM_PROT_NONE. ] + */ +kern_return_t +vm_wire( + host_priv_t host_priv, + register vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_prot_t access) +{ + kern_return_t rc; + + if (host_priv == HOST_PRIV_NULL) + return KERN_INVALID_HOST; + + assert(host_priv == &realhost); + + if (map == VM_MAP_NULL) + return KERN_INVALID_TASK; + + if (access & ~VM_PROT_ALL) + return KERN_INVALID_ARGUMENT; + + if (access != VM_PROT_NONE) { + rc = vm_map_wire(map, trunc_page(start), + round_page(start+size), access, TRUE); + } else { + rc = vm_map_unwire(map, trunc_page(start), + round_page(start+size), TRUE); + } + return rc; +} + +/* + * vm_msync + * + * Synchronises the memory range specified with its backing store + * image by either flushing or cleaning the contents to the appropriate + * memory manager engaging in a memory object synchronize dialog with + * the manager. The client doesn't return until the manager issues + * m_o_s_completed message. MIG Magically converts user task parameter + * to the task's address map. + * + * interpretation of sync_flags + * VM_SYNC_INVALIDATE - discard pages, only return precious + * pages to manager. + * + * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) + * - discard pages, write dirty or precious + * pages back to memory manager. + * + * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS + * - write dirty or precious pages back to + * the memory manager. + * + * NOTE + * The memory object attributes have not yet been implemented, this + * function will have to deal with the invalidate attribute + * + * RETURNS + * KERN_INVALID_TASK Bad task parameter + * KERN_INVALID_ARGUMENT both sync and async were specified. + * KERN_SUCCESS The usual. + */ + +kern_return_t +vm_msync( + vm_map_t map, + vm_address_t address, + vm_size_t size, + vm_sync_t sync_flags) +{ + msync_req_t msr; + msync_req_t new_msr; + queue_chain_t req_q; /* queue of requests for this msync */ + vm_map_entry_t entry; + vm_size_t amount_left; + vm_object_offset_t offset; + boolean_t do_sync_req; + boolean_t modifiable; + + + if ((sync_flags & VM_SYNC_ASYNCHRONOUS) && + (sync_flags & VM_SYNC_SYNCHRONOUS)) + return(KERN_INVALID_ARGUMENT); + + /* + * align address and size on page boundaries + */ + size = round_page(address + size) - trunc_page(address); + address = trunc_page(address); + + if (map == VM_MAP_NULL) + return(KERN_INVALID_TASK); + + if (size == 0) + return(KERN_SUCCESS); + + queue_init(&req_q); + amount_left = size; + + while (amount_left > 0) { + vm_size_t flush_size; + vm_object_t object; + + vm_map_lock(map); + if (!vm_map_lookup_entry(map, address, &entry)) { + vm_size_t skip; + + /* + * hole in the address map. + */ + + /* + * Check for empty map. + */ + if (entry == vm_map_to_entry(map) && + entry->vme_next == entry) { + vm_map_unlock(map); + break; + } + /* + * Check that we don't wrap and that + * we have at least one real map entry. + */ + if ((map->hdr.nentries == 0) || + (entry->vme_next->vme_start < address)) { + vm_map_unlock(map); + break; + } + /* + * Move up to the next entry if needed + */ + skip = (entry->vme_next->vme_start - address); + if (skip >= amount_left) + amount_left = 0; + else + amount_left -= skip; + address = entry->vme_next->vme_start; + vm_map_unlock(map); + continue; + } + + offset = address - entry->vme_start; + + /* + * do we have more to flush than is contained in this + * entry ? + */ + if (amount_left + entry->vme_start + offset > entry->vme_end) { + flush_size = entry->vme_end - + (entry->vme_start + offset); + } else { + flush_size = amount_left; + } + amount_left -= flush_size; + address += flush_size; + + if (entry->is_sub_map == TRUE) { + vm_map_t local_map; + vm_offset_t local_offset; + + local_map = entry->object.sub_map; + local_offset = entry->offset; + vm_map_unlock(map); + vm_msync( + local_map, + local_offset, + flush_size, + sync_flags); + continue; + } + object = entry->object.vm_object; + + /* + * We can't sync this object if the object has not been + * created yet + */ + if (object == VM_OBJECT_NULL) { + vm_map_unlock(map); + continue; + } + offset += entry->offset; + modifiable = (entry->protection & VM_PROT_WRITE) + != VM_PROT_NONE; + + vm_object_lock(object); + + if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) { + boolean_t kill_pages = 0; + + if (sync_flags & VM_SYNC_KILLPAGES) { + if (object->ref_count == 1 && !entry->needs_copy && !object->shadow) + kill_pages = 1; + else + kill_pages = -1; + } + if (kill_pages != -1) + memory_object_deactivate_pages(object, offset, + (vm_object_size_t)flush_size, kill_pages); + vm_object_unlock(object); + vm_map_unlock(map); + continue; + } + /* + * We can't sync this object if there isn't a pager. + * Don't bother to sync internal objects, since there can't + * be any "permanent" storage for these objects anyway. + */ + if ((object->pager == IP_NULL) || (object->internal) || + (object->private)) { + vm_object_unlock(object); + vm_map_unlock(map); + continue; + } + /* + * keep reference on the object until syncing is done + */ + assert(object->ref_count > 0); + object->ref_count++; + vm_object_res_reference(object); + vm_object_unlock(object); + + vm_map_unlock(map); + + do_sync_req = memory_object_sync(object, + offset, + flush_size, + sync_flags & VM_SYNC_INVALIDATE, + (modifiable && + (sync_flags & VM_SYNC_SYNCHRONOUS || + sync_flags & VM_SYNC_ASYNCHRONOUS))); + + /* + * only send a m_o_s if we returned pages or if the entry + * is writable (ie dirty pages may have already been sent back) + */ + if (!do_sync_req && !modifiable) { + vm_object_deallocate(object); + continue; + } + msync_req_alloc(new_msr); + + vm_object_lock(object); + offset += object->paging_offset; + + new_msr->offset = offset; + new_msr->length = flush_size; + new_msr->object = object; + new_msr->flag = VM_MSYNC_SYNCHRONIZING; +re_iterate: + queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { + /* + * need to check for overlapping entry, if found, wait + * on overlapping msr to be done, then reiterate + */ + msr_lock(msr); + if (msr->flag == VM_MSYNC_SYNCHRONIZING && + ((offset >= msr->offset && + offset < (msr->offset + msr->length)) || + (msr->offset >= offset && + msr->offset < (offset + flush_size)))) + { + assert_wait((event_t) msr,THREAD_INTERRUPTIBLE); + msr_unlock(msr); + vm_object_unlock(object); + thread_block((void (*)(void))0); + vm_object_lock(object); + goto re_iterate; + } + msr_unlock(msr); + }/* queue_iterate */ + + queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q); + vm_object_unlock(object); + + queue_enter(&req_q, new_msr, msync_req_t, req_q); + +#ifdef MACH_BSD + if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == + ((rpc_subsystem_t) &vnode_pager_workaround)) { + (void) vnode_pager_synchronize( + object->pager, + object->pager_request, + offset, + flush_size, + sync_flags); + } else { + (void) memory_object_synchronize( + object->pager, + object->pager_request, + offset, + flush_size, + sync_flags); + } +#else + (void) memory_object_synchronize( + object->pager, + object->pager_request, + offset, + flush_size, + sync_flags); +#endif + }/* while */ + + /* + * wait for memory_object_sychronize_completed messages from pager(s) + */ + + while (!queue_empty(&req_q)) { + msr = (msync_req_t)queue_first(&req_q); + msr_lock(msr); + while(msr->flag != VM_MSYNC_DONE) { + assert_wait((event_t) msr, THREAD_INTERRUPTIBLE); + msr_unlock(msr); + thread_block((void (*)(void))0); + msr_lock(msr); + }/* while */ + queue_remove(&req_q, msr, msync_req_t, req_q); + msr_unlock(msr); + vm_object_deallocate(msr->object); + msync_req_free(msr); + }/* queue_iterate */ + + return(KERN_SUCCESS); +}/* vm_msync */ + + +/* + * task_wire + * + * Set or clear the map's wiring_required flag. This flag, if set, + * will cause all future virtual memory allocation to allocate + * user wired memory. Unwiring pages wired down as a result of + * this routine is done with the vm_wire interface. + */ +kern_return_t +task_wire( + vm_map_t map, + boolean_t must_wire) +{ + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + if (must_wire) + map->wiring_required = TRUE; + else + map->wiring_required = FALSE; + + return(KERN_SUCCESS); +} + +/* + * vm_behavior_set sets the paging behavior attribute for the + * specified range in the specified map. This routine will fail + * with KERN_INVALID_ADDRESS if any address in [start,start+size) + * is not a valid allocated or reserved memory region. + */ +kern_return_t +vm_behavior_set( + vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_behavior_t new_behavior) +{ + if (map == VM_MAP_NULL) + return(KERN_INVALID_ARGUMENT); + + return(vm_map_behavior_set(map, trunc_page(start), + round_page(start+size), new_behavior)); +} + +#if VM_CPM +/* + * Control whether the kernel will permit use of + * vm_allocate_cpm at all. + */ +unsigned int vm_allocate_cpm_enabled = 1; + +/* + * Ordinarily, the right to allocate CPM is restricted + * to privileged applications (those that can gain access + * to the host port). Set this variable to zero if you + * want to let any application allocate CPM. + */ +unsigned int vm_allocate_cpm_privileged = 0; + +/* + * Allocate memory in the specified map, with the caveat that + * the memory is physically contiguous. This call may fail + * if the system can't find sufficient contiguous memory. + * This call may cause or lead to heart-stopping amounts of + * paging activity. + * + * Memory obtained from this call should be freed in the + * normal way, viz., via vm_deallocate. + */ +kern_return_t +vm_allocate_cpm( + host_priv_t host_priv, + register vm_map_t map, + register vm_offset_t *addr, + register vm_size_t size, + int flags) +{ + vm_object_t cpm_obj; + pmap_t pmap; + vm_page_t m, pages; + kern_return_t kr; + vm_offset_t va, start, end, offset; +#if MACH_ASSERT + extern vm_offset_t avail_start, avail_end; + vm_offset_t prev_addr; +#endif /* MACH_ASSERT */ + + boolean_t anywhere = VM_FLAGS_ANYWHERE & flags; + + if (!vm_allocate_cpm_enabled) + return KERN_FAILURE; + + if (vm_allocate_cpm_privileged && host_priv == HOST_PRIV_NULL) + return KERN_INVALID_HOST; + + if (map == VM_MAP_NULL) + return KERN_INVALID_ARGUMENT; + + assert(host_priv == &realhost); + + if (size == 0) { + *addr = 0; + return KERN_SUCCESS; + } + + if (anywhere) + *addr = vm_map_min(map); + else + *addr = trunc_page(*addr); + size = round_page(size); + + if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS) + return kr; + + cpm_obj = vm_object_allocate(size); + assert(cpm_obj != VM_OBJECT_NULL); + assert(cpm_obj->internal); + assert(cpm_obj->size == size); + assert(cpm_obj->can_persist == FALSE); + assert(cpm_obj->pager_created == FALSE); + assert(cpm_obj->pageout == FALSE); + assert(cpm_obj->shadow == VM_OBJECT_NULL); + + /* + * Insert pages into object. + */ + + vm_object_lock(cpm_obj); + for (offset = 0; offset < size; offset += PAGE_SIZE) { + m = pages; + pages = NEXT_PAGE(m); + + assert(!m->gobbled); + assert(!m->wanted); + assert(!m->pageout); + assert(!m->tabled); + assert(m->busy); + assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end); + + m->busy = FALSE; + vm_page_insert(m, cpm_obj, offset); + } + assert(cpm_obj->resident_page_count == size / PAGE_SIZE); + vm_object_unlock(cpm_obj); + + /* + * Hang onto a reference on the object in case a + * multi-threaded application for some reason decides + * to deallocate the portion of the address space into + * which we will insert this object. + * + * Unfortunately, we must insert the object now before + * we can talk to the pmap module about which addresses + * must be wired down. Hence, the race with a multi- + * threaded app. + */ + vm_object_reference(cpm_obj); + + /* + * Insert object into map. + */ + + kr = vm_map_enter( + map, + addr, + size, + (vm_offset_t)0, + flags, + cpm_obj, + (vm_object_offset_t)0, + FALSE, + VM_PROT_ALL, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); + + if (kr != KERN_SUCCESS) { + /* + * A CPM object doesn't have can_persist set, + * so all we have to do is deallocate it to + * free up these pages. + */ + assert(cpm_obj->pager_created == FALSE); + assert(cpm_obj->can_persist == FALSE); + assert(cpm_obj->pageout == FALSE); + assert(cpm_obj->shadow == VM_OBJECT_NULL); + vm_object_deallocate(cpm_obj); /* kill acquired ref */ + vm_object_deallocate(cpm_obj); /* kill creation ref */ + } + + /* + * Inform the physical mapping system that the + * range of addresses may not fault, so that + * page tables and such can be locked down as well. + */ + start = *addr; + end = start + size; + pmap = vm_map_pmap(map); + pmap_pageable(pmap, start, end, FALSE); + + /* + * Enter each page into the pmap, to avoid faults. + * Note that this loop could be coded more efficiently, + * if the need arose, rather than looking up each page + * again. + */ + for (offset = 0, va = start; offset < size; + va += PAGE_SIZE, offset += PAGE_SIZE) { + vm_object_lock(cpm_obj); + m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); + vm_object_unlock(cpm_obj); + assert(m != VM_PAGE_NULL); + PMAP_ENTER(pmap, va, m, VM_PROT_ALL, TRUE); + } + +#if MACH_ASSERT + /* + * Verify ordering in address space. + */ + for (offset = 0; offset < size; offset += PAGE_SIZE) { + vm_object_lock(cpm_obj); + m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); + vm_object_unlock(cpm_obj); + if (m == VM_PAGE_NULL) + panic("vm_allocate_cpm: obj 0x%x off 0x%x no page", + cpm_obj, offset); + assert(m->tabled); + assert(!m->busy); + assert(!m->wanted); + assert(!m->fictitious); + assert(!m->private); + assert(!m->absent); + assert(!m->error); + assert(!m->cleaning); + assert(!m->precious); + assert(!m->clustered); + if (offset != 0) { + if (m->phys_addr != prev_addr + PAGE_SIZE) { + printf("start 0x%x end 0x%x va 0x%x\n", + start, end, va); + printf("obj 0x%x off 0x%x\n", cpm_obj, offset); + printf("m 0x%x prev_address 0x%x\n", m, + prev_addr); + panic("vm_allocate_cpm: pages not contig!"); + } + } + prev_addr = m->phys_addr; + } +#endif /* MACH_ASSERT */ + + vm_object_deallocate(cpm_obj); /* kill extra ref */ + + return kr; +} + + +#else /* VM_CPM */ + +/* + * Interface is defined in all cases, but unless the kernel + * is built explicitly for this option, the interface does + * nothing. + */ + +kern_return_t +vm_allocate_cpm( + host_priv_t host_priv, + register vm_map_t map, + register vm_offset_t *addr, + register vm_size_t size, + int flags) +{ + return KERN_FAILURE; +} + +/* + */ +kern_return_t +mach_memory_object_memory_entry_64( + host_t host, + boolean_t internal, + vm_object_offset_t size, + vm_prot_t permission, + ipc_port_t pager, + ipc_port_t *entry_handle) +{ + vm_named_entry_t user_object; + ipc_port_t user_handle; + ipc_port_t previous; + kern_return_t kr; + + if (host == HOST_NULL) + return(KERN_INVALID_HOST); + + user_object = (vm_named_entry_t) + kalloc(sizeof (struct vm_named_entry)); + if(user_object == NULL) + return KERN_FAILURE; + named_entry_lock_init(user_object); + user_handle = ipc_port_alloc_kernel(); + ip_lock(user_handle); + + /* make a sonce right */ + user_handle->ip_sorights++; + ip_reference(user_handle); + + user_handle->ip_destination = IP_NULL; + user_handle->ip_receiver_name = MACH_PORT_NULL; + user_handle->ip_receiver = ipc_space_kernel; + + /* make a send right */ + user_handle->ip_mscount++; + user_handle->ip_srights++; + ip_reference(user_handle); + + ipc_port_nsrequest(user_handle, 1, user_handle, &previous); + /* nsrequest unlocks user_handle */ + + user_object->object = NULL; + user_object->size = size; + user_object->offset = 0; + user_object->backing.pager = pager; + user_object->protection = permission; + user_object->internal = internal; + user_object->is_sub_map = FALSE; + user_object->ref_count = 1; + + ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, + IKOT_NAMED_ENTRY); + *entry_handle = user_handle; + return KERN_SUCCESS; +} + +kern_return_t +mach_memory_object_memory_entry( + host_t host, + boolean_t internal, + vm_size_t size, + vm_prot_t permission, + ipc_port_t pager, + ipc_port_t *entry_handle) +{ + return mach_memory_object_memory_entry_64( host, internal, + (vm_object_offset_t)size, permission, pager, entry_handle); +} + + + +/* + */ + +kern_return_t +mach_make_memory_entry_64( + vm_map_t target_map, + vm_object_size_t *size, + vm_object_offset_t offset, + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_entry) +{ + vm_map_version_t version; + vm_named_entry_t user_object; + ipc_port_t user_handle; + ipc_port_t previous; + kern_return_t kr; + vm_map_t pmap_map; + + /* needed for call to vm_map_lookup_locked */ + boolean_t wired; + vm_object_offset_t obj_off; + vm_prot_t prot; + vm_object_offset_t lo_offset, hi_offset; + vm_behavior_t behavior; + vm_object_t object; + + /* needed for direct map entry manipulation */ + vm_map_entry_t map_entry; + vm_map_t local_map; + vm_object_size_t mappable_size; + + + user_object = (vm_named_entry_t) + kalloc(sizeof (struct vm_named_entry)); + if(user_object == NULL) + return KERN_FAILURE; + named_entry_lock_init(user_object); + user_handle = ipc_port_alloc_kernel(); + ip_lock(user_handle); + + /* make a sonce right */ + user_handle->ip_sorights++; + ip_reference(user_handle); + + user_handle->ip_destination = IP_NULL; + user_handle->ip_receiver_name = MACH_PORT_NULL; + user_handle->ip_receiver = ipc_space_kernel; + + /* make a send right */ + user_handle->ip_mscount++; + user_handle->ip_srights++; + ip_reference(user_handle); + + ipc_port_nsrequest(user_handle, 1, user_handle, &previous); + /* nsrequest unlocks user_handle */ + + user_object->backing.pager = NULL; + user_object->ref_count = 1; + + if(parent_entry == NULL) { + /* Create a named object based on address range within the task map */ + /* Go find the object at given address */ + + permission &= VM_PROT_ALL; + vm_map_lock_read(target_map); + + /* get the object associated with the target address */ + /* note we check the permission of the range against */ + /* that requested by the caller */ + + kr = vm_map_lookup_locked(&target_map, offset, + permission, &version, + &object, &obj_off, &prot, &wired, &behavior, + &lo_offset, &hi_offset, &pmap_map); + if (kr != KERN_SUCCESS) { + vm_map_unlock_read(target_map); + goto make_mem_done; + } + if ((prot & permission) != permission) { + kr = KERN_INVALID_RIGHT; + vm_object_unlock(object); + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + goto make_mem_done; + } + + /* We have an object, now check to see if this object */ + /* is suitable. If not, create a shadow and share that */ + + local_map = target_map; +redo_lookup: + while(TRUE) { + if(!vm_map_lookup_entry(local_map, offset, &map_entry)) { + kr = KERN_INVALID_ARGUMENT; + vm_object_unlock(object); + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + goto make_mem_done; + } + if(!(map_entry->is_sub_map)) { + if(map_entry->object.vm_object != object) { + kr = KERN_INVALID_ARGUMENT; + vm_object_unlock(object); + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + goto make_mem_done; + } + break; + } else { + local_map = map_entry->object.sub_map; + vm_map_lock_read(local_map); + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + target_map = local_map; + } + } + if(((map_entry->max_protection) & permission) != permission) { + kr = KERN_INVALID_RIGHT; + vm_object_unlock(object); + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + goto make_mem_done; + } + if(object->internal) { + /* vm_map_lookup_locked will create a shadow if */ + /* needs_copy is set but does not check for the */ + /* other two conditions shown. It is important to */ + /* set up an object which will not be pulled from */ + /* under us. */ + + if (map_entry->needs_copy || object->shadowed || + (object->size > + ((vm_object_size_t)map_entry->vme_end - + map_entry->vme_start))) { + if (vm_map_lock_read_to_write(target_map)) { + vm_map_lock_read(target_map); + goto redo_lookup; + } + + + /* create a shadow object */ + + vm_object_shadow(&map_entry->object.vm_object, + &map_entry->offset, + (map_entry->vme_end + - map_entry->vme_start)); + map_entry->needs_copy = FALSE; + vm_object_unlock(object); + object = map_entry->object.vm_object; + vm_object_lock(object); + object->size = map_entry->vme_end + - map_entry->vme_start; + obj_off = (offset - map_entry->vme_start) + + map_entry->offset; + lo_offset = map_entry->offset; + hi_offset = (map_entry->vme_end - + map_entry->vme_start) + + map_entry->offset; + + vm_map_lock_write_to_read(target_map); + + } + } + + /* note: in the future we can (if necessary) allow for */ + /* memory object lists, this will better support */ + /* fragmentation, but is it necessary? The user should */ + /* be encouraged to create address space oriented */ + /* shared objects from CLEAN memory regions which have */ + /* a known and defined history. i.e. no inheritence */ + /* share, make this call before making the region the */ + /* target of ipc's, etc. The code above, protecting */ + /* against delayed copy, etc. is mostly defensive. */ + + + + object->true_share = TRUE; + user_object->object = object; + user_object->internal = object->internal; + user_object->is_sub_map = FALSE; + user_object->offset = obj_off; + user_object->protection = permission; + + /* the size of mapped entry that overlaps with our region */ + /* which is targeted for share. */ + /* (entry_end - entry_start) - */ + /* offset of our beg addr within entry */ + /* it corresponds to this: */ + + mappable_size = hi_offset - obj_off; + if(*size > mappable_size) + *size = mappable_size; + + user_object->size = *size; + + /* user_object pager and internal fields are not used */ + /* when the object field is filled in. */ + + object->ref_count++; /* we now point to this object, hold on */ + vm_object_res_reference(object); + vm_object_unlock(object); + ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, + IKOT_NAMED_ENTRY); + *size = user_object->size; + *object_handle = user_handle; + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + return KERN_SUCCESS; + } else { + + vm_named_entry_t parent_object; + + /* The new object will be base on an existing named object */ + if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) { + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } + parent_object = (vm_named_entry_t)parent_entry->ip_kobject; + if(permission & parent_object->protection != permission) { + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } + if((offset + *size) > parent_object->size) { + kr = KERN_INVALID_ARGUMENT; + goto make_mem_done; + } + + user_object->object = parent_object->object; + user_object->size = *size; + user_object->offset = parent_object->offset + offset; + user_object->protection = permission; + if(parent_object->is_sub_map) { + user_object->backing.map = parent_object->backing.map; + vm_map_lock(user_object->backing.map); + user_object->backing.map->ref_count++; + vm_map_unlock(user_object->backing.map); + } + else { + user_object->backing.pager = parent_object->backing.pager; + } + user_object->internal = parent_object->internal; + user_object->is_sub_map = parent_object->is_sub_map; + + if(parent_object->object != NULL) { + /* we now point to this object, hold on */ + vm_object_reference(parent_object->object); + vm_object_lock(parent_object->object); + parent_object->object->true_share = TRUE; + vm_object_unlock(parent_object->object); + } + ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, + IKOT_NAMED_ENTRY); + *object_handle = user_handle; + return KERN_SUCCESS; + } + + + +make_mem_done: + ipc_port_dealloc_kernel(user_handle); + kfree((vm_offset_t)user_object, sizeof (struct vm_named_entry)); + return kr; +} + +kern_return_t +mach_make_memory_entry( + vm_map_t target_map, + vm_size_t *size, + vm_offset_t offset, + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_entry) +{ + vm_object_offset_t size_64; + kern_return_t kr; + + size_64 = (vm_object_offset_t)*size; + kr = mach_make_memory_entry_64(target_map, &size_64, + (vm_object_offset_t)offset, permission, object_handle, + parent_entry); + *size = (vm_size_t)size_64; + return kr; +} + +/* + */ + +kern_return_t +vm_region_object_create( + vm_map_t target_map, + vm_size_t size, + ipc_port_t *object_handle) +{ + vm_named_entry_t user_object; + ipc_port_t user_handle; + kern_return_t kr; + + pmap_t new_pmap = pmap_create((vm_size_t) 0); + ipc_port_t previous; + vm_map_t new_map; + + if(new_pmap == PMAP_NULL) + return KERN_FAILURE; + user_object = (vm_named_entry_t) + kalloc(sizeof (struct vm_named_entry)); + if(user_object == NULL) { + pmap_destroy(new_pmap); + return KERN_FAILURE; + } + named_entry_lock_init(user_object); + user_handle = ipc_port_alloc_kernel(); + + + ip_lock(user_handle); + + /* make a sonce right */ + user_handle->ip_sorights++; + ip_reference(user_handle); + + user_handle->ip_destination = IP_NULL; + user_handle->ip_receiver_name = MACH_PORT_NULL; + user_handle->ip_receiver = ipc_space_kernel; + + /* make a send right */ + user_handle->ip_mscount++; + user_handle->ip_srights++; + ip_reference(user_handle); + + ipc_port_nsrequest(user_handle, 1, user_handle, &previous); + /* nsrequest unlocks user_handle */ + + /* Create a named object based on a submap of specified size */ + + new_map = vm_map_create(new_pmap, 0, size, TRUE); + user_object->backing.map = new_map; + + + user_object->object = VM_OBJECT_NULL; + user_object->internal = TRUE; + user_object->is_sub_map = TRUE; + user_object->offset = 0; + user_object->protection = VM_PROT_ALL; + user_object->size = size; + user_object->ref_count = 1; + + ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, + IKOT_NAMED_ENTRY); + *object_handle = user_handle; + return KERN_SUCCESS; + +} + +/* For a given range, check all map entries. If the entry coresponds to */ +/* the old vm_region/map provided on the call, replace it with the */ +/* corresponding range in the new vm_region/map */ +kern_return_t vm_map_region_replace( + vm_map_t target_map, + ipc_port_t old_region, + ipc_port_t new_region, + vm_offset_t start, + vm_offset_t end) +{ + vm_named_entry_t old_object; + vm_named_entry_t new_object; + vm_map_t old_submap; + vm_map_t new_submap; + vm_offset_t addr; + vm_map_entry_t entry; + int nested_pmap = 0; + + + vm_map_lock(target_map); + old_object = (vm_named_entry_t)old_region->ip_kobject; + new_object = (vm_named_entry_t)new_region->ip_kobject; + if((!old_object->is_sub_map) || (!new_object->is_sub_map)) { + vm_map_unlock(target_map); + return KERN_INVALID_ARGUMENT; + } + old_submap = (vm_map_t)old_object->backing.map; + new_submap = (vm_map_t)new_object->backing.map; + vm_map_lock(old_submap); + if((old_submap->min_offset != new_submap->min_offset) || + (old_submap->max_offset != new_submap->max_offset)) { + vm_map_unlock(old_submap); + vm_map_unlock(target_map); + return KERN_INVALID_ARGUMENT; + } + if(!vm_map_lookup_entry(target_map, start, &entry)) { + /* if the src is not contained, the entry preceeds */ + /* our range */ + addr = entry->vme_start; + if(entry == vm_map_to_entry(target_map)) { + vm_map_unlock(old_submap); + vm_map_unlock(target_map); + return KERN_SUCCESS; + } + vm_map_lookup_entry(target_map, addr, &entry); + } + addr = entry->vme_start; + vm_map_reference(old_submap); + while((entry != vm_map_to_entry(target_map)) && + (entry->vme_start < end)) { + if((entry->is_sub_map) && + (entry->object.sub_map == old_submap)) { + entry->object.sub_map = new_submap; + if(entry->use_pmap) { + if((start & 0xfffffff) || + ((end - start) != 0x10000000)) { + vm_map_unlock(old_submap); + vm_map_unlock(target_map); + return KERN_INVALID_ARGUMENT; + } + nested_pmap = 1; + } + vm_map_reference(new_submap); + vm_map_deallocate(old_submap); + } + entry = entry->vme_next; + addr = entry->vme_start; + } + if(nested_pmap) { +#ifndef i386 + pmap_unnest(target_map->pmap, start, end - start); + pmap_nest(target_map->pmap, new_submap->pmap, + start, end - start); +#endif i386 + } else { + pmap_remove(target_map->pmap, start, end); + } + vm_map_unlock(old_submap); + vm_map_unlock(target_map); + return KERN_SUCCESS; +} + + +void +mach_destroy_memory_entry( + ipc_port_t port) +{ + vm_named_entry_t named_entry; +#if MACH_ASSERT + assert(ip_kotype(port) == IKOT_NAMED_ENTRY); +#endif /* MACH_ASSERT */ + named_entry = (vm_named_entry_t)port->ip_kobject; + mutex_lock(&(named_entry)->Lock); + named_entry->ref_count-=1; + if(named_entry->ref_count == 0) { + if(named_entry->object) { + /* release the memory object we've been pointing to */ + vm_object_deallocate(named_entry->object); + } + if(named_entry->is_sub_map) { + vm_map_deallocate(named_entry->backing.map); + } + kfree((vm_offset_t)port->ip_kobject, + sizeof (struct vm_named_entry)); + } else + mutex_unlock(&(named_entry)->Lock); +} + + +kern_return_t +vm_map_page_query( + vm_map_t target_map, + vm_offset_t offset, + int *disposition, + int *ref_count) +{ + vm_map_entry_t map_entry; + vm_object_t object; + vm_page_t m; + +restart_page_query: + *disposition = 0; + *ref_count = 0; + vm_map_lock(target_map); + if(!vm_map_lookup_entry(target_map, offset, &map_entry)) { + vm_map_unlock(target_map); + return KERN_FAILURE; + } + offset -= map_entry->vme_start; /* adjust to offset within entry */ + offset += map_entry->offset; /* adjust to target object offset */ + if(map_entry->object.vm_object != VM_OBJECT_NULL) { + if(!map_entry->is_sub_map) { + object = map_entry->object.vm_object; + } else { + vm_map_unlock(target_map); + target_map = map_entry->object.sub_map; + goto restart_page_query; + } + } else { + vm_map_unlock(target_map); + return KERN_FAILURE; + } + vm_object_lock(object); + vm_map_unlock(target_map); + while(TRUE) { + m = vm_page_lookup(object, offset); + if (m != VM_PAGE_NULL) { + *disposition |= VM_PAGE_QUERY_PAGE_PRESENT; + break; + } else { + if(object->shadow) { + offset += object->shadow_offset; + vm_object_unlock(object); + object = object->shadow; + vm_object_lock(object); + continue; + } + vm_object_unlock(object); + return KERN_FAILURE; + } + } + + /* The ref_count is not strictly accurate, it measures the number */ + /* of entities holding a ref on the object, they may not be mapping */ + /* the object or may not be mapping the section holding the */ + /* target page but its still a ball park number and though an over- */ + /* count, it picks up the copy-on-write cases */ + + /* We could also get a picture of page sharing from pmap_attributes */ + /* but this would under count as only faulted-in mappings would */ + /* show up. */ + + *ref_count = object->ref_count; + + if (m->fictitious) { + *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS; + vm_object_unlock(object); + return KERN_SUCCESS; + } + + if (m->dirty) + *disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + else if(pmap_is_modified(m->phys_addr)) + *disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + + if (m->reference) + *disposition |= VM_PAGE_QUERY_PAGE_REF; + else if(pmap_is_referenced(m->phys_addr)) + *disposition |= VM_PAGE_QUERY_PAGE_REF; + + vm_object_unlock(object); + return KERN_SUCCESS; + +} + +kern_return_t +set_dp_control_port( + host_priv_t host_priv, + ipc_port_t control_port) +{ + if (host_priv == HOST_PRIV_NULL) + return (KERN_INVALID_HOST); + dynamic_pager_control_port = control_port; + return KERN_SUCCESS; +} + +kern_return_t +get_dp_control_port( + host_priv_t host_priv, + ipc_port_t *control_port) +{ + if (host_priv == HOST_PRIV_NULL) + return (KERN_INVALID_HOST); + *control_port = dynamic_pager_control_port; + return KERN_SUCCESS; + +} + +void +mach_destroy_upl( + ipc_port_t port) +{ + upl_t upl; +#if MACH_ASSERT + assert(ip_kotype(port) == IKOT_NAMED_ENTRY); +#endif /* MACH_ASSERT */ + upl = (upl_t)port->ip_kobject; + mutex_lock(&(upl)->Lock); + upl->ref_count-=1; + if(upl->ref_count == 0) { + mutex_unlock(&(upl)->Lock); + uc_upl_abort(upl, UPL_ABORT_ERROR); + } else + mutex_unlock(&(upl)->Lock); +} + +/* Retrieve a upl for an object underlying an address range in a map */ + +kern_return_t +vm_map_get_upl( + vm_map_t map, + vm_offset_t offset, + vm_size_t *upl_size, + upl_t *upl, + upl_page_info_t **page_list, + int *count, + int *flags, + int force_data_sync) +{ + vm_map_entry_t entry; + int caller_flags; + + caller_flags = *flags; + if(upl == NULL) + return KERN_INVALID_ARGUMENT; +REDISCOVER_ENTRY: + vm_map_lock(map); + if (vm_map_lookup_entry(map, offset, &entry)) { + if((entry->vme_end - offset) < *upl_size) { + *upl_size = entry->vme_end - offset; + } + /* + * Create an object if necessary. + */ + if (entry->object.vm_object == VM_OBJECT_NULL) { + entry->object.vm_object = vm_object_allocate( + (vm_size_t)(entry->vme_end - entry->vme_start)); + entry->offset = 0; + } + if (!(caller_flags & UPL_COPYOUT_FROM)) { + if (entry->needs_copy + || entry->object.vm_object->copy) { + vm_map_t local_map; + vm_object_t object; + vm_object_offset_t offset_hi; + vm_object_offset_t offset_lo; + vm_object_offset_t new_offset; + vm_prot_t prot; + boolean_t wired; + vm_behavior_t behavior; + vm_map_version_t version; + vm_map_t pmap_map; + + local_map = map; + vm_map_lock_write_to_read(map); + if(vm_map_lookup_locked(&local_map, + offset, VM_PROT_WRITE, + &version, &object, + &new_offset, &prot, &wired, + &behavior, &offset_lo, + &offset_hi, &pmap_map)) { + vm_map_unlock(local_map); + return KERN_FAILURE; + } + if (pmap_map != map) { + vm_map_unlock(pmap_map); + } + vm_object_unlock(object); + vm_map_unlock(local_map); + + goto REDISCOVER_ENTRY; + } + } + if (entry->is_sub_map) { + vm_map_unlock(map); + return (vm_map_get_upl(entry->object.sub_map, + entry->offset + (offset - entry->vme_start), + upl_size, upl, page_list, count, + flags, force_data_sync)); + } + + if (!(caller_flags & UPL_COPYOUT_FROM)) { + if (entry->object.vm_object->shadow) { + int flags; + vm_map_unlock(map); + + vm_object_reference(entry->object.vm_object); + if(entry->object.vm_object->copy == NULL) { + flags = MEMORY_OBJECT_DATA_SYNC; + } else { + flags = MEMORY_OBJECT_COPY_SYNC; + } + + memory_object_lock_request( + entry->object.vm_object, + (offset - entry->vme_start) + + entry->offset, + (vm_object_size_t)*upl_size, FALSE, + flags, + VM_PROT_NO_CHANGE, NULL, 0); + vm_map_lock(map); + } + } + + if (force_data_sync) { + vm_map_unlock(map); + vm_object_reference(entry->object.vm_object); + + memory_object_lock_request( + entry->object.vm_object, + (offset - entry->vme_start) + + entry->offset, + (vm_object_size_t)*upl_size, FALSE, + MEMORY_OBJECT_DATA_SYNC, + VM_PROT_NO_CHANGE, + NULL, 0); + vm_map_lock(map); + } + + if(!(entry->object.vm_object->private)) { + if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) + *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); + if(entry->object.vm_object->phys_contiguous) { + *flags = UPL_PHYS_CONTIG; + } else { + *flags = 0; + } + } else { + *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; + } + vm_map_unlock(map); + return(vm_fault_list_request(entry->object.vm_object, + ((offset - entry->vme_start) + entry->offset), + *upl_size, + upl, + page_list, + *count, + caller_flags)); + } + + vm_map_unlock(map); + return(KERN_FAILURE); + +} + + +kern_return_t +vm_object_upl_request( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + ipc_port_t *upl, + upl_page_info_t *page_list, + mach_msg_type_number_t *count, + int cntrl_flags) +{ + upl_t upl_object; + ipc_port_t upl_port; + ipc_port_t previous; + upl_page_info_t *pl; + kern_return_t kr; + + pl = page_list; + kr = vm_fault_list_request(object, offset, size, &upl_object, + &pl, *count, cntrl_flags); + + + if(kr != KERN_SUCCESS) { + *upl = MACH_PORT_NULL; + return KERN_FAILURE; + } + + upl_port = ipc_port_alloc_kernel(); + + + ip_lock(upl_port); + + /* make a sonce right */ + upl_port->ip_sorights++; + ip_reference(upl_port); + + upl_port->ip_destination = IP_NULL; + upl_port->ip_receiver_name = MACH_PORT_NULL; + upl_port->ip_receiver = ipc_space_kernel; + + /* make a send right */ + upl_port->ip_mscount++; + upl_port->ip_srights++; + ip_reference(upl_port); + + ipc_port_nsrequest(upl_port, 1, upl_port, &previous); + /* nsrequest unlocks user_handle */ + + /* Create a named object based on a submap of specified size */ + + + ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL); + *upl = upl_port; + return KERN_SUCCESS; +} + +kern_return_t +vm_pager_upl_request( + vm_object_t object, + vm_object_offset_t offset, + vm_size_t size, + vm_size_t super_size, + ipc_port_t *upl, + upl_page_info_t *page_list, + mach_msg_type_number_t *count, + int cntrl_flags) +{ + upl_t upl_object; + ipc_port_t upl_port; + ipc_port_t previous; + upl_page_info_t *pl; + kern_return_t kr; + + pl = page_list; + kr = upl_system_list_request(object, offset, size, super_size, + &upl_object, &pl, *count, cntrl_flags); + + if(kr != KERN_SUCCESS) { + *upl = MACH_PORT_NULL; + return KERN_FAILURE; + } + + + upl_port = ipc_port_alloc_kernel(); + + + ip_lock(upl_port); + + /* make a sonce right */ + upl_port->ip_sorights++; + ip_reference(upl_port); + + upl_port->ip_destination = IP_NULL; + upl_port->ip_receiver_name = MACH_PORT_NULL; + upl_port->ip_receiver = ipc_space_kernel; + + /* make a send right */ + upl_port->ip_mscount++; + upl_port->ip_srights++; + ip_reference(upl_port); + + ipc_port_nsrequest(upl_port, 1, upl_port, &previous); + /* nsrequest unlocks user_handle */ + + /* Create a named object based on a submap of specified size */ + + + ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL); + *upl = upl_port; + return KERN_SUCCESS; +} + +kern_return_t +vm_upl_map( + vm_map_t map, + ipc_port_t upl_port, + vm_offset_t *dst_addr) +{ + upl_t upl; + kern_return_t kr; + + if (!IP_VALID(upl_port)) { + return KERN_INVALID_ARGUMENT; + } else if (ip_kotype(upl_port) == IKOT_UPL) { + upl_lock(upl); + upl = (upl_t)upl_port->ip_kobject; + kr = uc_upl_map(map, upl, dst_addr); + upl_unlock(upl); + return kr; + } else { + return KERN_FAILURE; + } +} + + +kern_return_t +vm_upl_unmap( + vm_map_t map, + ipc_port_t upl_port) +{ + upl_t upl; + kern_return_t kr; + + if (!IP_VALID(upl_port)) { + return KERN_INVALID_ARGUMENT; + } else if (ip_kotype(upl_port) == IKOT_UPL) { + upl_lock(upl); + upl = (upl_t)upl_port->ip_kobject; + kr = uc_upl_un_map(map, upl); + upl_unlock(upl); + return kr; + } else { + return KERN_FAILURE; + } +} + +kern_return_t +vm_upl_commit( + upl_t upl, + upl_page_list_ptr_t page_list, + mach_msg_type_number_t count) +{ + kern_return_t kr; + upl_lock(upl); + if(count) { + kr = uc_upl_commit(upl, (upl_page_info_t *)page_list); + } else { + kr = uc_upl_commit(upl, (upl_page_info_t *) NULL); + } + upl_unlock(upl); + return kr; +} + +kern_return_t +vm_upl_commit_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + upl_page_list_ptr_t page_list, + int flags, + mach_msg_type_number_t count) +{ + kern_return_t kr; + upl_lock(upl); + if(count) { + kr = uc_upl_commit_range(upl, offset, size, flags, + (upl_page_info_t *)page_list); + } else { + kr = uc_upl_commit_range(upl, offset, size, flags, + (upl_page_info_t *) NULL); + } + upl_unlock(upl); + return kr; +} + +kern_return_t +vm_upl_abort_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int abort_flags) +{ + kern_return_t kr; + upl_lock(upl); + kr = uc_upl_abort_range(upl, offset, size, abort_flags); + upl_unlock(upl); + return kr; +} + +kern_return_t +vm_upl_abort( + upl_t upl, + int abort_type) +{ + kern_return_t kr; + upl_lock(upl); + kr = uc_upl_abort(upl, abort_type); + upl_unlock(upl); + return kr; +} + +/* ******* Temporary Internal calls to UPL for BSD ***** */ +kern_return_t +kernel_upl_map( + vm_map_t map, + upl_t upl, + vm_offset_t *dst_addr) +{ + kern_return_t kr; + + upl_lock(upl); + kr = uc_upl_map(map, upl, dst_addr); + if(kr == KERN_SUCCESS) { + upl->ref_count += 1; + } + upl_unlock(upl); + return kr; +} + + +kern_return_t +kernel_upl_unmap( + vm_map_t map, + upl_t upl) +{ + kern_return_t kr; + + upl_lock(upl); + kr = uc_upl_un_map(map, upl); + if(kr == KERN_SUCCESS) { + if(upl->ref_count == 1) { + upl_dealloc(upl); + } else { + upl->ref_count -= 1; + upl_unlock(upl); + } + } else { + upl_unlock(upl); + } + return kr; +} + +kern_return_t +kernel_upl_commit( + upl_t upl, + upl_page_list_ptr_t page_list, + mach_msg_type_number_t count) +{ + kern_return_t kr; + upl_lock(upl); + upl->ref_count += 1; + if(count) { + kr = uc_upl_commit(upl, (upl_page_info_t *)page_list); + } else { + kr = uc_upl_commit(upl, (upl_page_info_t *) NULL); + } + if(upl->ref_count == 1) { + upl_dealloc(upl); + } else { + upl->ref_count -= 1; + upl_unlock(upl); + } + return kr; +} + +kern_return_t +kernel_upl_commit_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int flags, + upl_page_list_ptr_t page_list, + mach_msg_type_number_t count) +{ + kern_return_t kr; + upl_lock(upl); + upl->ref_count += 1; + if(count) { + kr = uc_upl_commit_range(upl, offset, size, flags, + (upl_page_info_t *)page_list); + } else { + kr = uc_upl_commit_range(upl, offset, size, flags, + (upl_page_info_t *) NULL); + } + if(upl->ref_count == 1) { + upl_dealloc(upl); + } else { + upl->ref_count -= 1; + upl_unlock(upl); + } + return kr; +} + +kern_return_t +kernel_upl_abort_range( + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int abort_flags) +{ + kern_return_t kr; + upl_lock(upl); + upl->ref_count += 1; + kr = uc_upl_abort_range(upl, offset, size, abort_flags); + if(upl->ref_count == 1) { + upl_dealloc(upl); + } else { + upl->ref_count -= 1; + upl_unlock(upl); + } + return kr; +} + +kern_return_t +kernel_upl_abort( + upl_t upl, + int abort_type) +{ + kern_return_t kr; + upl_lock(upl); + upl->ref_count += 1; + kr = uc_upl_abort(upl, abort_type); + if(upl->ref_count == 1) { + upl_dealloc(upl); + } else { + upl->ref_count -= 1; + upl_unlock(upl); + } + return kr; +} + + + +/* code snippet from vm_map */ +kern_return_t +vm_object_create_nomap(ipc_port_t port, vm_object_size_t size) +{ + vm_object_t object_ptr; + return memory_object_create_named(port, size, &object_ptr); +} + + +/* + * Temporary interface to overcome old style ipc artifacts, and allow + * ubc to call this routine directly. Will disappear with new RPC + * component architecture. + * NOTE: call to memory_object_destroy removes the vm_object's association + * with its abstract memory object and hence the named flag is set to false. + */ +kern_return_t +memory_object_destroy_named( + vm_object_t object, + kern_return_t reason) +{ + vm_object_lock(object); + if(object->named == FALSE) { + panic("memory_object_destroy_named called by party which doesn't hold right"); + } + object->ref_count++; + vm_object_res_reference(object); + vm_object_unlock(object); + return (memory_object_destroy(object, reason)); +} + +/* + * Temporary interface to overcome old style ipc artifacts, and allow + * ubc to call this routine directly. Will disappear with new RPC + * component architecture. + * Note: No change is made in the named flag. + */ +kern_return_t +memory_object_lock_request_named( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + memory_object_return_t should_return, + boolean_t should_flush, + vm_prot_t prot, + ipc_port_t reply_to) +{ + vm_object_lock(object); + if(object->named == FALSE) { + panic("memory_object_lock_request_named called by party which doesn't hold right"); + } + object->ref_count++; + vm_object_res_reference(object); + vm_object_unlock(object); + return (memory_object_lock_request(object, + offset, size, should_return, should_flush, prot, + reply_to, 0)); +} + +kern_return_t +memory_object_change_attributes_named( + vm_object_t object, + memory_object_flavor_t flavor, + memory_object_info_t attributes, + mach_msg_type_number_t count, + ipc_port_t reply_to, + mach_msg_type_name_t reply_to_type) +{ + vm_object_lock(object); + if(object->named == FALSE) { + panic("memory_object_lock_request_named called by party which doesn't hold right"); + } + object->ref_count++; + vm_object_res_reference(object); + vm_object_unlock(object); + return (memory_object_change_attributes(object, + flavor, attributes, count, reply_to, reply_to_type)); +} + +kern_return_t +vm_get_shared_region( + task_t task, + shared_region_mapping_t *shared_region) +{ + *shared_region = (shared_region_mapping_t) task->system_shared_region; + return KERN_SUCCESS; +} + +kern_return_t +vm_set_shared_region( + task_t task, + shared_region_mapping_t shared_region) +{ + task->system_shared_region = (vm_offset_t) shared_region; + return KERN_SUCCESS; +} + +kern_return_t +shared_region_mapping_info( + shared_region_mapping_t shared_region, + ipc_port_t *text_region, + vm_size_t *text_size, + ipc_port_t *data_region, + vm_size_t *data_size, + vm_offset_t *region_mappings, + vm_offset_t *client_base, + vm_offset_t *alt_base, + vm_offset_t *alt_next, + int *flags, + shared_region_mapping_t *next) +{ + shared_region_mapping_lock(shared_region); + + *text_region = shared_region->text_region; + *text_size = shared_region->text_size; + *data_region = shared_region->data_region; + *data_size = shared_region->data_size; + *region_mappings = shared_region->region_mappings; + *client_base = shared_region->client_base; + *alt_base = shared_region->alternate_base; + *alt_next = shared_region->alternate_next; + *flags = shared_region->flags; + *next = shared_region->next; + + shared_region_mapping_unlock(shared_region); +} + +kern_return_t +shared_region_object_chain_attach( + shared_region_mapping_t target_region, + shared_region_mapping_t object_chain_region) +{ + shared_region_object_chain_t object_ele; + + if(target_region->object_chain) + return KERN_FAILURE; + object_ele = (shared_region_object_chain_t) + kalloc(sizeof (struct shared_region_object_chain)); + shared_region_mapping_lock(object_chain_region); + target_region->object_chain = object_ele; + object_ele->object_chain_region = object_chain_region; + object_ele->next = object_chain_region->object_chain; + object_ele->depth = object_chain_region->depth; + object_chain_region->depth++; + target_region->alternate_next = object_chain_region->alternate_next; + shared_region_mapping_unlock(object_chain_region); + return KERN_SUCCESS; +} + +kern_return_t +shared_region_mapping_create( + ipc_port_t text_region, + vm_size_t text_size, + ipc_port_t data_region, + vm_size_t data_size, + vm_offset_t region_mappings, + vm_offset_t client_base, + shared_region_mapping_t *shared_region, + vm_offset_t alt_base, + vm_offset_t alt_next) +{ + *shared_region = (shared_region_mapping_t) + kalloc(sizeof (struct shared_region_mapping)); + if(*shared_region == NULL) + return KERN_FAILURE; + shared_region_mapping_lock_init((*shared_region)); + (*shared_region)->text_region = text_region; + (*shared_region)->text_size = text_size; + (*shared_region)->data_region = data_region; + (*shared_region)->data_size = data_size; + (*shared_region)->region_mappings = region_mappings; + (*shared_region)->client_base = client_base; + (*shared_region)->ref_count = 1; + (*shared_region)->next = NULL; + (*shared_region)->object_chain = NULL; + (*shared_region)->self = *shared_region; + (*shared_region)->flags = 0; + (*shared_region)->depth = 0; + (*shared_region)->alternate_base = alt_base; + (*shared_region)->alternate_next = alt_next; + return KERN_SUCCESS; +} + +kern_return_t +shared_region_mapping_set_alt_next( + shared_region_mapping_t shared_region, + vm_offset_t alt_next) +{ + shared_region->alternate_next = alt_next; + return KERN_SUCCESS; +} + +kern_return_t +shared_region_mapping_ref( + shared_region_mapping_t shared_region) +{ + if(shared_region == NULL) + return KERN_SUCCESS; + shared_region_mapping_lock(shared_region); + shared_region->ref_count++; + shared_region_mapping_unlock(shared_region); + return KERN_SUCCESS; +} + +kern_return_t +shared_region_mapping_dealloc( + shared_region_mapping_t shared_region) +{ + struct shared_region_task_mappings sm_info; + shared_region_mapping_t next; + + if(shared_region == NULL) + return KERN_SUCCESS; + shared_region_mapping_lock(shared_region); + + if((--shared_region->ref_count) == 0) { + + sm_info.text_region = shared_region->text_region; + sm_info.text_size = shared_region->text_size; + sm_info.data_region = shared_region->data_region; + sm_info.data_size = shared_region->data_size; + sm_info.region_mappings = shared_region->region_mappings; + sm_info.client_base = shared_region->client_base; + sm_info.alternate_base = shared_region->alternate_base; + sm_info.alternate_next = shared_region->alternate_next; + sm_info.flags = shared_region->flags; + sm_info.self = shared_region; + + lsf_remove_regions_mappings(shared_region, &sm_info); + pmap_remove(((vm_named_entry_t) + (shared_region->text_region->ip_kobject)) + ->backing.map->pmap, + sm_info.client_base, + sm_info.client_base + sm_info.text_size); + ipc_port_release_send(shared_region->text_region); + ipc_port_release_send(shared_region->data_region); + if(shared_region->object_chain) { + shared_region_mapping_dealloc( + shared_region->object_chain->object_chain_region); + kfree((vm_offset_t)shared_region->object_chain, + sizeof (struct shared_region_object_chain)); + } + kfree((vm_offset_t)shared_region, + sizeof (struct shared_region_mapping)); + return KERN_SUCCESS; + } + shared_region_mapping_unlock(shared_region); + return KERN_SUCCESS; +} + +vm_offset_t +vm_map_get_phys_page( + vm_map_t map, + vm_offset_t offset) +{ + vm_map_entry_t entry; + int ops; + int flags; + vm_offset_t phys_addr = 0; + vm_object_t object; + + vm_map_lock(map); + while (vm_map_lookup_entry(map, offset, &entry)) { + + if (entry->object.vm_object == VM_OBJECT_NULL) { + vm_map_unlock(map); + return (vm_offset_t) 0; + } + if (entry->is_sub_map) { + vm_map_t old_map; + vm_map_lock(entry->object.sub_map); + old_map = map; + map = entry->object.sub_map; + offset = entry->offset + (offset - entry->vme_start); + vm_map_unlock(old_map); + continue; + } + offset = entry->offset + (offset - entry->vme_start); + object = entry->object.vm_object; + vm_object_lock(object); + while (TRUE) { + vm_page_t dst_page = vm_page_lookup(object,offset); + if(dst_page == VM_PAGE_NULL) { + if(object->shadow) { + vm_object_t old_object; + vm_object_lock(object->shadow); + old_object = object; + offset = offset + object->shadow_offset; + object = object->shadow; + vm_object_unlock(old_object); + } else { + vm_object_unlock(object); + break; + } + } else { + phys_addr = dst_page->phys_addr; + vm_object_unlock(object); + break; + } + } + break; + + } + + vm_map_unlock(map); + return phys_addr; +} +#endif /* VM_CPM */ diff --git a/pexpert/Makefile b/pexpert/Makefile new file mode 100644 index 000000000..7497f281e --- /dev/null +++ b/pexpert/Makefile @@ -0,0 +1,40 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = pexpert + + +INSTINC_SUBDIRS_PPC = pexpert + + +INSTINC_SUBDIRS_I386 = pexpert + + +EXPINC_SUBDIRS = pexpert + + +EXPINC_SUBDIRS_PPC = pexpert + + +EXPINC_SUBDIRS_I386 = pexpert + + +SETUP_SUBDIRS = \ + conf + +COMP_SUBDIRS = \ + conf + +INST_SUBDIRS = \ + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/conf/MASTER b/pexpert/conf/MASTER new file mode 100644 index 000000000..84438831d --- /dev/null +++ b/pexpert/conf/MASTER @@ -0,0 +1,88 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement +# specifies the terms and conditions for use and redistribution. +# +####################################################################### +# +# Master machine independent configuration file. +# +# Specific configuration files are created based on this and +# the machine specific master file using the doconf script. +# +# Any changes to the master configuration files will affect all +# other configuration files based upon it. +# +####################################################################### +# +# To build a configuration, execute "doconf ." +# Configurations are specified in the "Configurations:" section +# of the MASTER and MASTER.* files as follows: +# +# = [ ... ] +# +# Lines in the MASTER and MASTER.* files are selected based on +# the attribute selector list, found in a comment at the end of +# the line. This is a list of attributes separated by commas. +# The "!" operator selects the line if none of the attributes are +# specified. +# +# For example: +# +# selects a line if "foo" or "bar" are specified. +# selects a line if neither "foo" nor "bar" is +# specified. +# +# Lines with no attributes specified are selected for all +# configurations. +# +####################################################################### +# +# STANDARD CONFIGURATION OPTIONS (select any combination) +# +# debug = extra kernel level debugging support +# mach = Mach support +# +# EXPERIMENTAL CONFIGURATION OPTIONS (select any combination, carefully) +# +# nbc = no buffer cache support +# simple = non-rollover clock support +# timing = precision timing support +# host = host resource control support +# fixpri = fixed priority threads +# +# MULTI-PROCESSOR CONFIGURATION (select at most one) +# +# multi16 = enable 16 multi-processors +# multi32 = enable 32 multi-processors +# multi48 = enable 48 multi-processors +# +# SYSTEM SIZE CONFIGURATION (select exactly one) +# +# xlarge = extra large scale system configuration +# large = large scale system configuration +# medium = medium scale system configuration +# small = small scale system configuration +# xsmall = extra small scale system configuration +# bsmall = special extra small scale system configuration for +# (e.g. for boot floppies) +# +####################################################################### +# +# Standard Mach Research Configurations: +# -------- ---- -------- --------------- +# +# These are the default configurations that can be used by most sites. +# They are used internally by the Mach project. +# +# MACH = [mach multi16 medium debug] +# +####################################################################### +# +ident PEXPERT + +options MACH_PE # Objective-C support # +options MACH_KERNEL +options DEBUG + diff --git a/pexpert/conf/MASTER.i386 b/pexpert/conf/MASTER.i386 new file mode 100644 index 000000000..f276a2461 --- /dev/null +++ b/pexpert/conf/MASTER.i386 @@ -0,0 +1,15 @@ +# +###################################################################### +# +# Standard NeXT Research Configurations: +# -------- ---- -------- --------------- +# +# RELEASE = [i386 mach mach_pe] +# PROFILE = [i386 mach mach_pe profile] +# DEBUG = [i386 mach mach_pe debug] +# +###################################################################### + +machine "i386" # +cpu "i386" # + diff --git a/pexpert/conf/MASTER.ppc b/pexpert/conf/MASTER.ppc new file mode 100644 index 000000000..81050216c --- /dev/null +++ b/pexpert/conf/MASTER.ppc @@ -0,0 +1,17 @@ +# +###################################################################### +# +# Standard NeXT Research Configurations: +# -------- ---- -------- --------------- +# +# RELEASE = [ppc mach mach_pe] +# RELEASE_TRACE = [RELEASE kdebug] +# PROFILE = [ppc mach mach_pe profile] +# DEBUG = [ppc mach mach_pe debug] +# DEBUG_TRACE = [DEBUG kdebug] +# +###################################################################### + +machine "ppc" # +cpu "ppc" # + diff --git a/pexpert/conf/Makefile b/pexpert/conf/Makefile new file mode 100644 index 000000000..aa0f1eb82 --- /dev/null +++ b/pexpert/conf/Makefile @@ -0,0 +1,63 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + tools + +COMP_SUBDIRS = + +INST_SUBDIRS = + +ifndef PEXPERT_KERNEL_CONFIG +export PEXPERT_KERNEL_CONFIG = $(KERNEL_CONFIG) +endif + +COMPOBJROOT=$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT) + +$(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf: + make build_setup + +$(COMPOBJROOT)/$(PEXPERT_KERNEL_CONFIG)/Makefile : $(SOURCE)/MASTER \ + $(SOURCE)/MASTER.$(arch_config) \ + $(SOURCE)/Makefile.template \ + $(SOURCE)/Makefile.$(arch_config) \ + $(SOURCE)/files \ + $(SOURCE)/files.$(arch_config) \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf + @echo "Running doconf for $(PEXPERT_KERNEL_CONFIG)"; + (doconf_target=$(addsuffix /conf, $(TARGET)); \ + echo $${doconf_target};\ + $(MKDIR) $${doconf_target}; \ + cd $${doconf_target}; \ + rm -f $(notdir $?); \ + cp $? $${doconf_target}; \ + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf -c -cpu $(arch_config) -d $(TARGET)/$(PEXPERT_KERNEL_CONFIG) $(PEXPERT_KERNEL_CONFIG); \ + ); + +.ORDER: $(COMPOBJROOT)/$(PEXPERT_KERNEL_CONFIG)/Makefile + +do_setup_conf: $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/doconf \ + $(COMPOBJROOT)/$(PEXPERT_KERNEL_CONFIG)/Makefile + +do_all: do_setup_conf + @echo "[ $(SOURCE) ] Starting do_all $(COMPONENT) $(PEXPERT_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; \ + (cd $(COMPOBJROOT)/$(PEXPERT_KERNEL_CONFIG); \ + next_source=$(subst conf/,,$(SOURCE)); \ + ${MAKE} MAKEFILES=$(TARGET)/$(PEXPERT_KERNEL_CONFIG)/Makefile \ + SOURCE=$${next_source} \ + TARGET=$(TARGET) \ + build_all \ + ); \ + echo "[ $(SOURCE) ] Returning do_all $(COMPONENT) $(PEXPERT_KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + +do_build_all: do_all + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/pexpert/conf/Makefile.i386 b/pexpert/conf/Makefile.i386 new file mode 100644 index 000000000..2f6232c14 --- /dev/null +++ b/pexpert/conf/Makefile.i386 @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for i386 +###################################################################### + +###################################################################### +#END Machine dependent Makefile fragment for i386 +###################################################################### + diff --git a/pexpert/conf/Makefile.ppc b/pexpert/conf/Makefile.ppc new file mode 100644 index 000000000..7786ccbd6 --- /dev/null +++ b/pexpert/conf/Makefile.ppc @@ -0,0 +1,8 @@ +###################################################################### +#BEGIN Machine dependent Makefile fragment for ppc +###################################################################### + +###################################################################### +#END Machine dependent Makefile fragment for ppc +###################################################################### + diff --git a/pexpert/conf/Makefile.template b/pexpert/conf/Makefile.template new file mode 100644 index 000000000..54eb92200 --- /dev/null +++ b/pexpert/conf/Makefile.template @@ -0,0 +1,111 @@ +# +# Mach Operating System +# Copyright (c) 1986 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# Export IDENT for sub-makefiles +# +export IDENT + +# +# XXX: INCFLAGS +# +INCFLAGS_MAKEFILE= $(INCFLAGS_POSIX) + +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + +include $(MakeInc_cmd) +include $(MakeInc_def) + +# +# XXX: CFLAGS +# +# -D_KERNEL_BUILD -DKERNEL_BUILD -DARCH_PRIVATE -DBSD_BUILD -DMACH_KERNEL +# +CFLAGS+= -DPEXPERT_KERNEL_PRIVATE -DKERNEL -DDRIVER_PRIVATE -DNCPUS=1 \ + -Wall -Wno-four-char-constants -fno-common \ + -DRelease3CompatibilityBuild + +SFLAGS+= -DKERNEL + +# +# Directories for mig generated files +# +COMP_SUBDIRS = + +# +# Make sure we don't remove this by accident if interrupted at the wrong +# time. +# +.PRECIOUS: Makefile + +VERSION_FILES= \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.major \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.minor \ + $(SOURCE_DIR)/$(COMPONENT)/conf/version.variant + +COPYRIGHT_FILES = \ + $(SOURCE_DIR)/$(COMPONENT)/conf/copyright + +# +# Theses macros are filled in by the config program depending on the +# current configuration. The MACHDEP macro is replaced by the +# contents of the machine dependent makefile template and the others +# are replaced by the corresponding symbol definitions for the +# configuration. +# + +%OBJS + +%CFILES + +%MFILES + +%SFILES + +%BFILES + +%ORDERED +%MACHDEP + +# +# OBJSDEPS is the set of files (defined in the machine dependent +# template if necessary) which all objects depend on (such as an +# in-line assembler expansion filter) +# +${OBJS}: ${OBJSDEPS} + + +%LOAD + +LDOBJS = $(OBJS) + +$(COMPONENT).o: $(LDOBJS) + @echo "creating $(COMPONENT).o" + $(RM) $(RMFLAGS) vers.c + $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/newvers \ + `$(CAT) ${VERSION_FILES}` ${COPYRIGHT_FILES} + ${KCC} $(CFLAGS) $(INCLUDES) -c vers.c + @echo [ updating $(COMPONENT).o ${PEXPERT_KERNEL_CONFIG} ] + $(LD) $(LDFLAGS_COMPONENT) -o $(COMPONENT).o ${LDOBJS} vers.o + +do_depend: do_all + ${MD} -u Makedep -f -d `ls *.d`; + +do_all: $(COMPONENT).o + +do_build_all: do_depend + +%RULES + +-include Makedep + +include $(MakeInc_rule) +include $(MakeInc_dir) + diff --git a/pexpert/conf/copyright b/pexpert/conf/copyright new file mode 100644 index 000000000..8930fb873 --- /dev/null +++ b/pexpert/conf/copyright @@ -0,0 +1,6 @@ +/* + * Mach Operating System + * Copyright (c) 1989 Carnegie-Mellon University + * All rights reserved. The CMU software License Agreement specifies + * the terms and conditions for use and redistribution. + */ diff --git a/pexpert/conf/files b/pexpert/conf/files new file mode 100644 index 000000000..8568b9940 --- /dev/null +++ b/pexpert/conf/files @@ -0,0 +1,14 @@ +# +OPTIONS/mach_kdb optional mach_kdb + + +# +# pexpert generic +# + +pexpert/gen/device_tree.c standard +pexpert/gen/bootargs.c standard +pexpert/gen/pe_gen.c standard + +# + diff --git a/pexpert/conf/files.i386 b/pexpert/conf/files.i386 new file mode 100644 index 000000000..466e22eb4 --- /dev/null +++ b/pexpert/conf/files.i386 @@ -0,0 +1,15 @@ +OPTIONS/gprof optional gprof + +pexpert/i386/pe_bootargs.c standard +pexpert/i386/pe_init.c standard +pexpert/i386/pe_identify_machine.c standard +pexpert/i386/pe_interrupt.c standard +pexpert/i386/pe_kprintf.c standard +pexpert/i386/fakePPCDeviceTree.c standard + +# Graphics and text console support. +pexpert/i386/text_console.c standard +pexpert/i386/kdasm.s standard + +# Polled-mode keyboard driver. +pexpert/i386/kd.c standard diff --git a/pexpert/conf/files.ppc b/pexpert/conf/files.ppc new file mode 100644 index 000000000..ab76f421f --- /dev/null +++ b/pexpert/conf/files.ppc @@ -0,0 +1,7 @@ + +pexpert/ppc/pe_init.c standard +pexpert/ppc/pe_bootargs.c standard +pexpert/ppc/pe_identify_machine.c standard +pexpert/ppc/pe_kprintf.c standard +pexpert/ppc/pe_clock_speed.c standard +pexpert/ppc/pe_clock_speed_asm.s standard diff --git a/pexpert/conf/tools/Makefile b/pexpert/conf/tools/Makefile new file mode 100644 index 000000000..9df86ce8c --- /dev/null +++ b/pexpert/conf/tools/Makefile @@ -0,0 +1,36 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +SETUP_SUBDIRS = \ + doconf \ + newvers + +COMP_SUBDIRS = \ + doconf \ + newvers + +INST_SUBDIRS = \ + + +setup_build_all: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/conf/tools/doconf/Makefile b/pexpert/conf/tools/doconf/Makefile new file mode 100644 index 000000000..2bf0b7a10 --- /dev/null +++ b/pexpert/conf/tools/doconf/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)doconf + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/conf/tools/doconf/doconf.csh b/pexpert/conf/tools/doconf/doconf.csh new file mode 100755 index 000000000..43388c11c --- /dev/null +++ b/pexpert/conf/tools/doconf/doconf.csh @@ -0,0 +1,313 @@ +#!/bin/csh -f +set path = ($path .) +###################################################################### +# HISTORY +# 1-Dec-87 Michael Young (mwyoung) at Carnegie-Mellon University +# Added "-verbose" switch, so this script produces no output +# in the normal case. +# +# 10-Oct-87 Mike Accetta (mja) at Carnegie-Mellon University +# Flushed cmu_*.h and spin_locks.h +# [ V5.1(XF18) ] +# +# 6-Apr-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# Use MASTER.local and MASTER..local for generation of +# configuration files in addition to MASTER and MASTER.. +# +# 25-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Removed use of obsolete wb_*.h files when building the feature +# list; modified to save the previous configuration file and +# display the differences between it and the new file. +# [ V5.1(F8) ] +# +# 25-Mar-87 Avadis Tevanian (avie) at Carnegie-Mellon University +# If there is no /etc/machine just print out a message telling +# user to use the -cpu option. I thought this script was supposed +# to work even without a /etc/machine, but it doesn't... and this +# is the easiest way out. +# +# 13-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Added "romp_fpa.h" file to extra features for the RT. +# [ V5.1(F7) ] +# +# 11-Mar-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to maintain the appropriate configuration features file +# in the "machine" directory whenever the corresponding +# configuration is generated. This replaces the old mechanism of +# storing this directly in the file since it was +# machine dependent and also precluded building programs for more +# than one configuration from the same set of sources. +# [ V5.1(F6) ] +# +# 21-Feb-87 Mike Accetta (mja) at Carnegie-Mellon University +# Fixed to require wired-in cpu type names for only those +# machines where the kernel name differs from that provided by +# /etc/machine (i.e. IBMRT => ca and SUN => sun3); updated to +# permit configuration descriptions in both machine indepedent +# and dependent master configuration files so that attributes can +# be grouped accordingly. +# [ V5.1(F3) ] +# +# 17-Jan-87 Mike Accetta (mja) at Carnegie-Mellon University +# Updated to work from any directory at the same level as +# "conf"; generate configuration from both MASTER and +# MASTER. files; added -cpu switch. +# [ V5.1(F1) ] +# +# 18-Aug-86 Mike Accetta (mja) at Carnegie-Mellon University +# Added -make switch and changed meaning of -config; upgraded to +# allow multiple attributes per configuration and to define +# configurations in terms of these attributes within MASTER. +# +# 14-Apr-83 Mike Accetta (mja) at Carnegie-Mellon University +# Added -config switch to only run /etc/config without +# "make depend" and "make". +# +###################################################################### + +set prog=$0 +set prog=$prog:t +set nonomatch +set OBJDIR=../BUILD +if ("`/usr/bin/uname`" == "Rhapsody" ) then +set CONFIG_DIR=/usr/local/bin +else +set CONFIG_DIR=/usr/bin +endif + +unset domake +unset doconfig +unset beverbose +unset MACHINE +unset profile + +while ($#argv >= 1) + if ("$argv[1]" =~ -*) then + switch ("$argv[1]") + case "-c": + case "-config": + set doconfig + breaksw + case "-m": + case "-make": + set domake + breaksw + case "-cpu": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set MACHINE="$argv[2]" + shift + breaksw + case "-d": + if ($#argv < 2) then + echo "${prog}: missing argument to ${argv[1]}" + exit 1 + endif + set OBJDIR="$argv[2]" + shift + breaksw + case "-verbose": + set beverbose + breaksw + case "-p": + case "-profile": + set profile + breaksw + default: + echo "${prog}: ${argv[1]}: unknown switch" + exit 1 + breaksw + endsw + shift + else + break + endif +end + +if ($#argv == 0) set argv=(GENERIC) + +if (! $?MACHINE) then + if (-d /NextApps) then + set MACHINE=`hostinfo | awk '/MC680x0/ { printf("m68k") } /MC880x0/ { printf("m88k") }'` + endif +endif + +if (! $?MACHINE) then + if (-f /etc/machine) then + set MACHINE="`/etc/machine`" + else + echo "${prog}: no /etc/machine, specify machine type with -cpu" + echo "${prog}: e.g. ${prog} -cpu VAX CONFIGURATION" + exit 1 + endif +endif + +set FEATURES_EXTRA= + +switch ("$MACHINE") + case IBMRT: + set cpu=ca + set ID=RT + set FEATURES_EXTRA="romp_dualcall.h romp_fpa.h" + breaksw + case SUN: + set cpu=sun3 + set ID=SUN3 + breaksw + default: + set cpu=`echo $MACHINE | tr A-Z a-z` + set ID=`echo $MACHINE | tr a-z A-Z` + breaksw +endsw +set FEATURES=../h/features.h +set FEATURES_H=(cs_*.h mach_*.h net_*.h\ + cputypes.h cpus.h vice.h\ + $FEATURES_EXTRA) +set MASTER_DIR=../conf +set MASTER = ${MASTER_DIR}/MASTER +set MASTER_CPU=${MASTER}.${cpu} + +set MASTER_LOCAL = ${MASTER}.local +set MASTER_CPU_LOCAL = ${MASTER_CPU}.local +if (! -f $MASTER_LOCAL) set MASTER_LOCAL = "" +if (! -f $MASTER_CPU_LOCAL) set MASTER_CPU_LOCAL = "" + +if (! -d $OBJDIR) then + echo "[ creating $OBJDIR ]" + mkdir -p $OBJDIR +endif + +foreach SYS ($argv) + set SYSID=${SYS}_${ID} + set SYSCONF=$OBJDIR/config.$SYSID + set BLDDIR=$OBJDIR + if ($?beverbose) then + echo "[ generating $SYSID from $MASTER_DIR/MASTER{,.$cpu}{,.local} ]" + endif + echo +$SYS \ + | \ + cat $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL - \ + $MASTER $MASTER_LOCAL $MASTER_CPU $MASTER_CPU_LOCAL \ + | \ + sed -n \ + -e "/^+/{" \ + -e "s;[-+];#&;gp" \ + -e 't loop' \ + -e ': loop' \ + -e 'n' \ + -e '/^#/b loop' \ + -e '/^$/b loop' \ + -e 's;^\([^#]*\).*#[ ]*<\(.*\)>[ ]*$;\2#\1;' \ + -e 't not' \ + -e 's;\([^#]*\).*;#\1;' \ + -e 't not' \ + -e ': not' \ + -e 's;[ ]*$;;' \ + -e 's;^\!\(.*\);\1#\!;' \ + -e 'p' \ + -e 't loop' \ + -e 'b loop' \ + -e '}' \ + -e "/^[^#]/d" \ + -e 's; ; ;g' \ + -e "s;^# *\([^ ]*\)[ ]*=[ ]*\[\(.*\)\].*;\1#\2;p" \ + | \ + awk '-F#' '\ +part == 0 && $1 != "" {\ + m[$1]=m[$1] " " $2;\ + next;\ +}\ +part == 0 && $1 == "" {\ + for (i=NF;i>1;i--){\ + s=substr($i,2);\ + c[++na]=substr($i,1,1);\ + a[na]=s;\ + }\ + while (na > 0){\ + s=a[na];\ + d=c[na--];\ + if (m[s] == "") {\ + f[s]=d;\ + } else {\ + nx=split(m[s],x," ");\ + for (j=nx;j>0;j--) {\ + z=x[j];\ + a[++na]=z;\ + c[na]=d;\ + }\ + }\ + }\ + part=1;\ + next;\ +}\ +part != 0 {\ + if ($1 != "") {\ + n=split($1,x,",");\ + ok=0;\ + for (i=1;i<=n;i++) {\ + if (f[x[i]] == "+") {\ + ok=1;\ + }\ + }\ + if (NF > 2 && ok == 0 || NF <= 2 && ok != 0) {\ + print $2; \ + }\ + } else { \ + print $2; \ + }\ +}\ +' >$SYSCONF.new + if (-z $SYSCONF.new) then + echo "${prog}: ${$SYSID}: no such configuration in $MASTER_DIR/MASTER{,.$cpu}" + rm -f $SYSCONF.new + endif + if (! -d $BLDDIR) then + echo "[ creating $BLDDIR ]" + mkdir -p $BLDDIR + endif +# +# These paths are used by config. +# +# "builddir" is the name of the directory where kernel binaries +# are put. It is a single path element, never absolute, and is +# always relative to "objectdir". "builddir" is used by config +# solely to determine where to put files created by "config" (e.g. +# the created Makefile and *.h's.) +# +# "objectdir" is the name of the directory which will hold "builddir". +# It is a path; if relative, it is relative to the current directory +# where config is run. It's sole use is to be prepended to "builddir" +# to indicate where config-created files are to be placed (see above). +# +# "sourcedir" is the location of the sources used to build the kernel. +# It is a path; if relative, it is relative to the directory specified +# by the concatenation of "objectdir" and "builddir" (i.e. where the +# kernel binaries are put). +# + echo 'builddir "."' >> $SYSCONF.new + set OBJRELDIR=`relpath $OBJROOT $OBJDIR` + echo 'objectdir "'$OBJROOT'/'$OBJRELDIR'"' >> $SYSCONF.new + set SRCDIR=`dirname $SOURCE` + echo 'sourcedir "'$SRCROOT'"' >> $SYSCONF.new + if (-f $SYSCONF) then + diff $SYSCONF $SYSCONF.new + rm -f $SYSCONF.old + mv $SYSCONF $SYSCONF.old + endif + rm -f $SYSCONF + mv $SYSCONF.new $SYSCONF + if ($?doconfig) then + echo "[ configuring $SYSID ]" + if ($?profile) then + $CONFIG_DIR/config -c $MASTER_DIR -p $SYSCONF + else + $CONFIG_DIR/config -c $MASTER_DIR $SYSCONF + endif + endif + if ($?domake) then + echo "[ making $SYSID ]" + (cd $BLDDIR; make) + endif +end diff --git a/pexpert/conf/tools/newvers/Makefile b/pexpert/conf/tools/newvers/Makefile new file mode 100644 index 000000000..73603c753 --- /dev/null +++ b/pexpert/conf/tools/newvers/Makefile @@ -0,0 +1,49 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +COMP_SUBDIRS = \ + +INST_SUBDIRS = \ + + +# +# Who and where +# +BINDIR= +DSTDIR= $(strip $(OBJROOT)/$(KERNEL_CONFIG)_$(ARCH_CONFIG)/$(COMPONENT)/) +PROGRAM= $(DSTDIR)newvers + +# +# How to install it +# +IFLAGS= -c -m 555 + +$(PROGRAM): $(DSTDIR)% : $(SOURCE)%.csh + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)"; + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS + sed -e "s/#PROGRAM.*/#`vers_string $(notdir $(PROGRAM))`/" \ + < $< >$(notdir $(PROGRAM)).VERS; + install $(IFLAGS) $(notdir $(PROGRAM)).VERS $(PROGRAM); + -$(RM) $(RMFLAGS) $(notdir $(PROGRAM)).VERS; + +do_build_setup: $(PROGRAM) + +do_build_all: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +setup_build_install: + @echo "[ $(SOURCE) ] make setup_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +do_build_install: + @echo "[ $(SOURCE) ] make do_build_all $(KERNEL_CONFIG) $(ARCH_CONFIG) $(TARGET)" + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/conf/tools/newvers/newvers.csh b/pexpert/conf/tools/newvers/newvers.csh new file mode 100644 index 000000000..293d416e4 --- /dev/null +++ b/pexpert/conf/tools/newvers/newvers.csh @@ -0,0 +1,33 @@ +#!/bin/sh - +# +# Mach Operating System +# Copyright (c) 1990 Carnegie-Mellon University +# Copyright (c) 1989 Carnegie-Mellon University +# All rights reserved. The CMU software License Agreement specifies +# the terms and conditions for use and redistribution. +# + +# +# newvers.sh copyright major minor variant +# + +major="$1"; minor="$2"; variant="$3" +v="${major}.${minor}" d=`pwd` h="rcbuilder" t=`date` w=`whoami` +if [ -z "$d" -o -z "$h" -o -z "$t" ]; then + exit 1 +fi +CONFIG=`expr "$d" : '.*/\([^/]*\)$'` +d=`expr "$d" : '.*/\([^/]*/[^/]*/[^/]*\)$'` +( + /bin/echo "int ${COMPONENT}_version_major = ${major};" ; + /bin/echo "int ${COMPONENT}_version_minor = ${minor};" ; + /bin/echo "char ${COMPONENT}_version_variant[] = \"${variant}\";" ; + /bin/echo "char ${COMPONENT}_version[] = \"Platform Expert Component Version ${v}:\\n${t}; $w($h):$d\\n\";" ; + /bin/echo "char ${COMPONENT}_osrelease[] = \"${major}.${minor}\";" ; + /bin/echo "char ${COMPONENT}_ostype[] = \"Platform Expert\";" ; +) > vers.c +if [ -s vers.suffix -o ! -f vers.suffix ]; then + rm -f vers.suffix + echo ".${variant}.${CONFIG}" > vers.suffix +fi +exit 0 diff --git a/pexpert/conf/version.major b/pexpert/conf/version.major new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/pexpert/conf/version.major @@ -0,0 +1 @@ +1 diff --git a/pexpert/conf/version.minor b/pexpert/conf/version.minor new file mode 100644 index 000000000..573541ac9 --- /dev/null +++ b/pexpert/conf/version.minor @@ -0,0 +1 @@ +0 diff --git a/pexpert/conf/version.variant b/pexpert/conf/version.variant new file mode 100644 index 000000000..e69de29bb diff --git a/pexpert/gen/bootargs.c b/pexpert/gen/bootargs.c new file mode 100644 index 000000000..193261b2e --- /dev/null +++ b/pexpert/gen/bootargs.c @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +extern boolean_t isargsep( char c); +extern int argstrcpy(char *from, char *to); +extern int getval(char *s, int *val); + +#define NUM 0 +#define STR 1 + +boolean_t +PE_parse_boot_arg( + char *arg_string, + void *arg_ptr) +{ + char *args; + char *cp, c; + int i; + int val; + boolean_t arg_boolean; + boolean_t arg_found; + + args = PE_boot_args(); + arg_found = FALSE; + + while(isargsep(*args)) args++; + + while (*args) + { + if (*args == '-') + arg_boolean = TRUE; + else + arg_boolean = FALSE; + + cp = args; + while (!isargsep (*cp) && *cp != '=') + cp++; + if (*cp != '=' && !arg_boolean) + goto gotit; + + c = *cp; + + i = cp-args; + if (strncmp(args, arg_string, i) || + (i!=strlen(arg_string))) + goto gotit; + if (arg_boolean) { + *(unsigned int *)arg_ptr = TRUE; + arg_found = TRUE; + break; + } else { + while (isargsep (*cp)) + cp++; + if (*cp == '=' && c != '=') { + args = cp+1; + goto gotit; + } + + switch (getval(cp, &val)) + { + case NUM: + *(unsigned int *)arg_ptr = val; + arg_found = TRUE; + break; + case STR: + argstrcpy(++cp, (char *)arg_ptr); + arg_found = TRUE; + break; + } + goto gotit; + } +gotit: + /* Skip over current arg */ + while(!isargsep(*args)) args++; + + /* Skip leading white space (catch end of args) */ + while(*args && isargsep(*args)) args++; + } + + return(arg_found); +} + +boolean_t isargsep( + char c) +{ + if (c == ' ' || c == '\0' || c == '\t') + return(TRUE); + else + return(FALSE); +} + +int +argstrcpy( + char *from, + char *to) +{ + int i = 0; + + while (!isargsep(*from)) { + i++; + *to++ = *from++; + } + *to = 0; + return(i); +} + +int +getval( + char *s, + int *val) +{ + register unsigned radix, intval; + register unsigned char c; + int sign = 1; + + if (*s == '=') { + s++; + if (*s == '-') + sign = -1, s++; + intval = *s++-'0'; + radix = 10; + if (intval == 0) + switch(*s) { + + case 'x': + radix = 16; + s++; + break; + + case 'b': + radix = 2; + s++; + break; + + case '0': case '1': case '2': case '3': + case '4': case '5': case '6': case '7': + intval = *s-'0'; + s++; + radix = 8; + break; + + default: + if (!isargsep(*s)) + return (STR); + } + for(;;) { + if (((c = *s++) >= '0') && (c <= '9')) + c -= '0'; + else if ((c >= 'a') && (c <= 'f')) + c -= 'a' - 10; + else if ((c >= 'A') && (c <= 'F')) + c -= 'A' - 10; + else if (isargsep(c)) + break; + else + return (STR); + if (c >= radix) + return (STR); + intval *= radix; + intval += c; + } + *val = intval * sign; + return (NUM); + } + *val = 1; + return (NUM); +} diff --git a/pexpert/gen/device_tree.c b/pexpert/gen/device_tree.c new file mode 100644 index 000000000..a26878559 --- /dev/null +++ b/pexpert/gen/device_tree.c @@ -0,0 +1,468 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ + +#include +#include +#include +#include +#include +#ifdef i386 +#include +#endif + +#ifndef NULL +#define NULL ((void *) 0) +#endif + +#define round_long(x) (((x) + 3) & -4) +#define next_prop(x) ((DeviceTreeNodeProperty *) (((int)x) + sizeof(DeviceTreeNodeProperty) + round_long(x->length))) + +/* Entry*/ +typedef DeviceTreeNode *RealDTEntry; + +typedef struct DTSavedScope { + struct DTSavedScope * nextScope; + RealDTEntry scope; + RealDTEntry entry; + unsigned long index; +} *DTSavedScopePtr; + +/* Entry Iterator*/ +typedef struct OpaqueDTEntryIterator { + RealDTEntry outerScope; + RealDTEntry currentScope; + RealDTEntry currentEntry; + DTSavedScopePtr savedScope; + unsigned long currentIndex; +} *RealDTEntryIterator; + +/* Property Iterator*/ +typedef struct OpaqueDTPropertyIterator { + RealDTEntry entry; + DeviceTreeNodeProperty *currentProperty; + unsigned long currentIndex; +} *RealDTPropertyIterator; + +static int DTInitialized; +static RealDTEntry DTRootNode; + +void DTInit(void *base); + +/* + * Support Routines + */ +static RealDTEntry +skipProperties(RealDTEntry entry) +{ + DeviceTreeNodeProperty *prop; + int k; + + if (entry == NULL || entry->nProperties == 0) { + return NULL; + } else { + prop = (DeviceTreeNodeProperty *) (entry + 1); + for (k = 0; k < entry->nProperties; k++) { + prop = next_prop(prop); + } + } + return ((RealDTEntry) prop); +} + +static RealDTEntry +skipTree(RealDTEntry root) +{ + RealDTEntry entry; + int k; + + entry = skipProperties(root); + if (entry == NULL) { + return NULL; + } + for (k = 0; k < root->nChildren; k++) { + entry = skipTree(entry); + } + return entry; +} + +static RealDTEntry +GetFirstChild(RealDTEntry parent) +{ + return skipProperties(parent); +} + +static RealDTEntry +GetNextChild(RealDTEntry sibling) +{ + return skipTree(sibling); +} + +static const char * +GetNextComponent(const char *cp, char *bp) +{ + while (*cp != 0) { + if (*cp == kDTPathNameSeparator) { + cp++; + break; + } + *bp++ = *cp++; + } + *bp = 0; + return cp; +} + +static RealDTEntry +FindChild(RealDTEntry cur, char *buf) +{ + RealDTEntry child; + unsigned long index; + char * str; + int dummy; + + if (cur->nChildren == 0) { + return NULL; + } + index = 1; + child = GetFirstChild(cur); + while (1) { + if (DTGetProperty(child, "name", (void **)&str, &dummy) != kSuccess) { + break; + } + if (strcmp(str, buf) == 0) { + return child; + } + if (index >= cur->nChildren) { + break; + } + child = GetNextChild(child); + index++; + } + return NULL; +} + + +/* + * External Routines + */ +void +DTInit(void *base) +{ + DTRootNode = (RealDTEntry) base; + DTInitialized = (DTRootNode != 0); +} + +int +DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2) +{ + /* equality of pointers */ + return (ref1 == ref2); +} + +static char *startingP; // needed for find_entry +int find_entry(const char *propName, const char *propValue, DTEntry *entryH); + +int DTFindEntry(const char *propName, const char *propValue, DTEntry *entryH) +{ + if (!DTInitialized) { + return kError; + } + + startingP = (char *)DTRootNode; + return(find_entry(propName, propValue, entryH)); +} + +int find_entry(const char *propName, const char *propValue, DTEntry *entryH) +{ + DeviceTreeNode *nodeP = (DeviceTreeNode *) startingP; + int k; + + if (nodeP->nProperties == 0) return(kError); // End of the list of nodes + startingP = (char *) (nodeP + 1); + + // Search current entry + for (k = 0; k < nodeP->nProperties; ++k) { + DeviceTreeNodeProperty *propP = (DeviceTreeNodeProperty *) startingP; + + startingP += sizeof (*propP) + ((propP->length + 3) & -4); + + if (strcmp (propP->name, propName) == 0) { + if (strcmp( (char *)(propP + 1), propValue) == 0) + { + *entryH = (DTEntry)nodeP; + return(kSuccess); + } + } + } + + // Search child nodes + for (k = 0; k < nodeP->nChildren; ++k) + { + if (find_entry(propName, propValue, entryH) == kSuccess) + return(kSuccess); + } + return(kError); +} + +int +DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry) +{ + DTEntryNameBuf buf; + RealDTEntry cur; + const char * cp; + + if (!DTInitialized) { + return kError; + } + if (searchPoint == NULL) { + cur = DTRootNode; + } else { + cur = searchPoint; + } + cp = pathName; + if (*cp == kDTPathNameSeparator) { + cp++; + if (*cp == 0) { + *foundEntry = cur; + return kSuccess; + } + } + do { + cp = GetNextComponent(cp, buf); + + /* Check for done */ + if (*buf == 0) { + if (*cp == 0) { + *foundEntry = cur; + return kSuccess; + } + break; + } + + cur = FindChild(cur, buf); + + } while (cur != NULL); + + return kError; +} + +int +DTCreateEntryIterator(const DTEntry startEntry, DTEntryIterator *iterator) +{ + RealDTEntryIterator iter; + + if (!DTInitialized) { + return kError; + } + + iter = (RealDTEntryIterator) kalloc(sizeof(struct OpaqueDTEntryIterator)); + if (startEntry != NULL) { + iter->outerScope = (RealDTEntry) startEntry; + iter->currentScope = (RealDTEntry) startEntry; + } else { + iter->outerScope = DTRootNode; + iter->currentScope = DTRootNode; + } + iter->currentEntry = NULL; + iter->savedScope = NULL; + iter->currentIndex = 0; + + *iterator = iter; + return kSuccess; +} + +int +DTDisposeEntryIterator(DTEntryIterator iterator) +{ + RealDTEntryIterator iter = iterator; + DTSavedScopePtr scope; + + while ((scope = iter->savedScope) != NULL) { + iter->savedScope = scope->nextScope; + kfree((vm_offset_t) scope, sizeof(struct DTSavedScope)); + } + kfree((vm_offset_t) iterator, sizeof(struct OpaqueDTEntryIterator)); + return kSuccess; +} + +int +DTEnterEntry(DTEntryIterator iterator, DTEntry childEntry) +{ + RealDTEntryIterator iter = iterator; + DTSavedScopePtr newScope; + + if (childEntry == NULL) { + return kError; + } + newScope = (DTSavedScopePtr) kalloc(sizeof(struct DTSavedScope)); + newScope->nextScope = iter->savedScope; + newScope->scope = iter->currentScope; + newScope->entry = iter->currentEntry; + newScope->index = iter->currentIndex; + + iter->currentScope = childEntry; + iter->currentEntry = NULL; + iter->savedScope = newScope; + iter->currentIndex = 0; + + return kSuccess; +} + +int +DTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition) +{ + RealDTEntryIterator iter = iterator; + DTSavedScopePtr newScope; + + newScope = iter->savedScope; + if (newScope == NULL) { + return kError; + } + iter->savedScope = newScope->nextScope; + iter->currentScope = newScope->scope; + iter->currentEntry = newScope->entry; + iter->currentIndex = newScope->index; + *currentPosition = iter->currentEntry; + + kfree((vm_offset_t) newScope, sizeof(struct DTSavedScope)); + + return kSuccess; +} + +int +DTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry) +{ + RealDTEntryIterator iter = iterator; + + if (iter->currentIndex >= iter->currentScope->nChildren) { + *nextEntry = NULL; + return kIterationDone; + } else { + iter->currentIndex++; + if (iter->currentIndex == 1) { + iter->currentEntry = GetFirstChild(iter->currentScope); + } else { + iter->currentEntry = GetNextChild(iter->currentEntry); + } + *nextEntry = iter->currentEntry; + return kSuccess; + } +} + +int +DTRestartEntryIteration(DTEntryIterator iterator) +{ + RealDTEntryIterator iter = iterator; +#if 0 + // This commented out code allows a second argument (outer) + // which (if true) causes restarting at the outer scope + // rather than the current scope. + DTSavedScopePtr scope; + + if (outer) { + while ((scope = iter->savedScope) != NULL) { + iter->savedScope = scope->nextScope; + kfree((vm_offset_t) scope, sizeof(struct DTSavedScope)); + } + iter->currentScope = iter->outerScope; + } +#endif + iter->currentEntry = NULL; + iter->currentIndex = 0; + return kSuccess; +} + +int +DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValue, int *propertySize) +{ + DeviceTreeNodeProperty *prop; + int k; + + if (entry == NULL || entry->nProperties == 0) { + return kError; + } else { + prop = (DeviceTreeNodeProperty *) (entry + 1); + for (k = 0; k < entry->nProperties; k++) { + if (strcmp(prop->name, propertyName) == 0) { + *propertyValue = (void *) (((int)prop) + + sizeof(DeviceTreeNodeProperty)); + *propertySize = prop->length; + return kSuccess; + } + prop = next_prop(prop); + } + } + return kError; +} + +int +DTCreatePropertyIterator(const DTEntry entry, DTPropertyIterator *iterator) +{ + RealDTPropertyIterator iter; + + iter = (RealDTPropertyIterator) kalloc(sizeof(struct OpaqueDTPropertyIterator)); + iter->entry = entry; + iter->currentProperty = NULL; + iter->currentIndex = 0; + + *iterator = iter; + return kSuccess; +} + +int +DTDisposePropertyIterator(DTPropertyIterator iterator) +{ + kfree((vm_offset_t)iterator, sizeof(struct OpaqueDTPropertyIterator)); + return kSuccess; +} + +int +DTIterateProperties(DTPropertyIterator iterator, char **foundProperty) +{ + RealDTPropertyIterator iter = iterator; + + if (iter->currentIndex >= iter->entry->nProperties) { + *foundProperty = NULL; + return kIterationDone; + } else { + iter->currentIndex++; + if (iter->currentIndex == 1) { + iter->currentProperty = (DeviceTreeNodeProperty *) (iter->entry + 1); + } else { + iter->currentProperty = next_prop(iter->currentProperty); + } + *foundProperty = iter->currentProperty->name; + return kSuccess; + } +} + +int +DTRestartPropertyIteration(DTPropertyIterator iterator) +{ + RealDTPropertyIterator iter = iterator; + + iter->currentProperty = NULL; + iter->currentIndex = 0; + return kSuccess; +} + diff --git a/pexpert/gen/pe_gen.c b/pexpert/gen/pe_gen.c new file mode 100644 index 000000000..0e28b1f09 --- /dev/null +++ b/pexpert/gen/pe_gen.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_FREE_COPYRIGHT@ + */ + +#include +#include + +static int DEBUGFlag; + +void pe_init_debug(void) +{ + if (!PE_parse_boot_arg("debug", &DEBUGFlag)) + DEBUGFlag = 0; +} + +void PE_enter_debugger(char *cause) +{ + if (DEBUGFlag & DB_NMI) + Debugger(cause); +} + +/* extern references */ +extern void cnputc(char c); +extern void vcattach(void); + +/* Globals */ +void (*PE_putc)(char c) = 0; + +void PE_init_printf(boolean_t vm_initialized) +{ + if (!vm_initialized) { + PE_putc = cnputc; + } else { + vcattach(); + } +} diff --git a/pexpert/i386/fakePPCDeviceTree.c b/pexpert/i386/fakePPCDeviceTree.c new file mode 100644 index 000000000..fd767a69b --- /dev/null +++ b/pexpert/i386/fakePPCDeviceTree.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include + +#include "fakePPCStructs.h" + +boot_args fakePPCBootArgs = { + 0, // Revision + kBootArgsVersion, // Version + "", // CommandLine + 0, // PhysicalDRAM + 0, // machine_type + 0, // deviceTreeP + 0, // deviceTreeLength + 0, // topOfKernelData +}; + +void * createdt(dt_init *template, long *retSize) +{ + dt_init * next; + int size, allocSize; + vm_address_t out, saveout; + void * source; + + // calc size of expanded data + for( next = template, allocSize = 0; + next; + next++) { + + if( next->nodeInit.zero == 0) { + if( next->nodeInit.nProps == 0) + break; + allocSize += 2 * sizeof( long); + } else + allocSize += (32 + 4 + 3 + next->propInit.length) & (-4); + } + saveout = out = kalloc( allocSize); + + // copy out + for( next = template; + next; + next++) { + + if( next->nodeInit.zero == 0) { + + if( next->nodeInit.nProps == 0) + break; + source = &next->nodeInit.nProps; + size = 2 * sizeof( long); + + } else { + + bcopy( next->propInit.name, out, 32); + out += 32; + size = next->propInit.length; + *(long *)out = size; + out += sizeof( long); + if( size == 4) + source = &next->propInit.value; + else { + source = next->propInit.value; + size = (size + 3) & (-4); + } + } + bcopy( source, out, size); + out += size; + } + + if( allocSize != (out - saveout)) + printf("WARNING: DT corrupt (%x)\n", (out - saveout) - allocSize); + + *retSize = allocSize; + return( (void *)saveout); +} + +unsigned char *nptr; + +#define kPropNameLength 32 + +typedef struct property_t { + char name[kPropNameLength]; // NUL terminated property name + unsigned long length; // Length (bytes) of folloing prop value + unsigned long *value; // Variable length value of property +} property_t; + +typedef struct node_t { + unsigned long nProperties; // Number of props[] elements (0 => end) + unsigned long nChildren; // Number of children[] elements + property_t *props; // array size == nProperties + struct node_t *children; // array size == nChildren +} node_t; + + +int indent = 0; + +void printdt() +{ + node_t *nodeptr = (node_t *)nptr; + long num_props = nodeptr->nProperties; + long len; + int i, j; + unsigned char *sptr; + + nptr = (unsigned char *)&nodeptr->props; + for (i=0; i < num_props; i++) + { + for (j = 0; j < indent; j++) + printf(" "); + printf("'"); + printf("%s", nptr); + nptr+=32; + len = *((long*)nptr); + nptr += 4; + printf("'\t\t(%ld) '", len); + sptr = nptr; + for (j = 0; j < len; j++) + printf("%2.2x", *nptr++); + printf("'\t('%s')\n", sptr); + if (len % 4) + nptr += (4 - (len % 4)); + } + for (i=0; inChildren; i++) + { + indent++; + printdt(); + indent--; + } +} + diff --git a/pexpert/i386/fakePPCDeviceTree.h b/pexpert/i386/fakePPCDeviceTree.h new file mode 100644 index 000000000..c3f921131 --- /dev/null +++ b/pexpert/i386/fakePPCDeviceTree.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +unsigned long busRange[] = { 0, 0 }; +unsigned long picAddress[] = { 0x00008000, 0x00000000, 0x00000000, + 0xC8000000, 0x00080000}; + +dt_init fakePPCDeviceTree[] = { + NODE( 7, 2 ), + PROP( "name", "device-tree"), + PROP( "model", "Power Macintosh"), + PROP( "compatible", "AAPL,9900\0MacRISC"), + INTPROP( "AAPL,cpu-id", 0x39006086), + INTPROP( "clock-frequency", 0x02FAF080), + INTPROP( "#address-cells", 1), + INTPROP( "#size-cells", 1), + + NODE( 1,0 ), + PROP( "name", "ps2controller"), + + NODE( 3,0 ), + PROP( "name", "display"), + PROP( "model", "silly"), + INTPROP( "AAPL,boot-display", 1), +#if 0 + NODE( 6,1 ), + PROP( "name", "i386generic"), + PROP( "device_type", "pci"), + INTPROP( "#address-cells", 3), + INTPROP( "#size-cells", 2), + PROP( "bus-range", busRange), + NULLPROP( "ranges" ), + + NODE( 4, 0), + PROP( "name", "i386pic"), + PROP( "device_type", "pic"), + PROP( "reg", picAddress), + PROP( "assigned-addresses", picAddress), +#endif + NODE( 0, 0), +}; diff --git a/pexpert/i386/fakePPCStructs.h b/pexpert/i386/fakePPCStructs.h new file mode 100644 index 000000000..622b8441e --- /dev/null +++ b/pexpert/i386/fakePPCStructs.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +typedef struct { + char * name; + unsigned long length; + void * value; +} prop_init; + +typedef struct { + long zero; + long nProps; + long nChildren; +} node_init; + +typedef union { + prop_init propInit; + node_init nodeInit; +} dt_init; + +extern boot_args fakePPCBootArgs; +extern unsigned char *nptr; + +void printdt(void); +void * createdt(dt_init *template, long *retSize); + + +#define NODE(props,children) \ + {{(char *)0, props, (void *)children }} + +#define INTPROP(name,value) \ + {{name, 4, (void *)value }} + +#define PROP(name,value) \ + {{name, sizeof( value), value }} + +#define NULLPROP(name) \ + {{name, 0, (void *)0 }} diff --git a/pexpert/i386/kd.c b/pexpert/i386/kd.c new file mode 100644 index 000000000..e0f0fd17a --- /dev/null +++ b/pexpert/i386/kd.c @@ -0,0 +1,1026 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + */ + +/* + * Olivetti Mach Console driver v0.0 + * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989 + * All rights reserved. + * + */ +/* + * Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc., + * Cupertino, California. + * + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies and that both the copyright notice and this permission notice + * appear in supporting documentation, and that the name of Olivetti + * not be used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * + * OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, + * IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * + * Copyright 1988, 1989 by Intel Corporation, Santa Clara, California. + * + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies and that both the copyright notice and this permission notice + * appear in supporting documentation, and that the name of Intel + * not be used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * + * INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, + * IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* $ Header: $ */ + +#include +#include "kd.h" + +#include + +#define at386_io_lock_state() +#define at386_io_lock(op) (TRUE) +#define at386_io_unlock() + + +typedef unsigned short i386_ioport_t; + +/* read a byte */ +extern unsigned char inb( + i386_ioport_t port); +/* write a longword */ +extern void outb( + i386_ioport_t port, + unsigned char datum); + +extern __inline__ unsigned char inb( + i386_ioport_t port) +{ + unsigned char datum; + __asm__ volatile("inb %1, %0" : "=a" (datum) : "d" (port)); + return(datum); +} + +extern __inline__ void outb( + i386_ioport_t port, + unsigned char datum) +{ + __asm__ volatile("outb %0, %1" : : "a" (datum), "d" (port)); +} + +/* Forward */ + +extern void kd_sendcmd(unsigned char ch); +extern void kdreboot(void); +extern int kd_dogetc(int wait); +extern void kd_handle_ack(void); +extern void kd_resend(void); +extern int do_modifier( + int state, + Scancode c, + int up); +extern int kdcheckmagic( + Scancode sc, + int * regs); +extern int kdstate2idx( + int state, + int extended); +extern void kdinit(void); +extern void kd_belloff(void); +extern void kd_bellon(void); +extern void kd_senddata(unsigned char c); +extern unsigned char kd_getdata(void); +extern unsigned char kd_cmdreg_read(void); +extern void set_kd_state( + int newstate); +extern unsigned char state2leds( + int state); +extern void kd_setleds1( + unsigned char val); +extern void kd_setleds2(void); +extern void cnsetleds( + unsigned char val); +extern int kd_kbd_magic( + int scancode); + +extern int cngetc(void); +extern int cnmaygetc(void); +extern void kdreboot(void); +extern int kd_dogetc(int wait); + +/* reboot on CTL-ALT-DEL ? */ +extern int rebootflag; +/* enter kernel debugger on CTR-ALT-d ? */ +int kbdkdbflag = 1; +/* allow keyboard mouse ? */ +int kbdmouseflag = 0; + +/* + * kd_state shows the state of the modifier keys (ctrl, caps lock, + * etc.) It should normally be changed by calling set_kd_state(), so + * that the keyboard status LEDs are updated correctly. + */ +int kd_state = KS_NORMAL; +int kb_mode = KB_ASCII; /* event/ascii */ + +int kd_kbd_mouse = 0; +int kd_kbd_magic_scale = 6; +int kd_kbd_magic_button = 0; + +/* + * Some keyboard commands work by sending a command, waiting for an + * ack (handled by kdintr), then sending data, which generates a + * second ack. If we are in the middle of such a sequence, kd_ack + * shows what the ack is for. + * + * When a byte is sent to the keyboard, it is kept around in last_sent + * in case it needs to be resent. + * + * The rest of the variables here hold the data required to complete + * the sequence. + * + * XXX - the System V driver keeps a command queue, I guess in case we + * want to start a command while another is in progress. Is this + * something we should worry about? + */ +enum why_ack {NOT_WAITING, SET_LEDS, DATA_ACK}; +enum why_ack kd_ack = NOT_WAITING; + +unsigned char last_sent = 0; + +unsigned char kd_nextled = 0; + +/* + * We don't provide any mutex protection for this flag because we know + * that this module will have been initialized by the time multiple + * threads are running. + */ +int kd_initialized = FALSE; /* driver initialized? */ +int kd_extended = FALSE; + +/* + * This array maps scancodes to Ascii characters (or character + * sequences). + * Each row corresponds to one key. There are NUMOUTPUT bytes per key + * state. The states are ordered: Normal, SHIFT, CTRL, ALT, + * SHIFT/ALT. + */ +unsigned char key_map[NUMKEYS][WIDTH_KMAP] = { +{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC}, +{K_ESC,NC,NC, K_ESC,NC,NC, K_ESC,NC,NC, K_ESC,NC,NC, K_ESC,NC,NC}, +{K_ONE,NC,NC, K_BANG,NC,NC, K_ONE,NC,NC, 0x1b,0x4e,0x31, 0x1b,0x4e,0x21}, +{K_TWO,NC,NC, K_ATSN,NC,NC, K_NUL,NC,NC, 0x1b,0x4e,0x32, 0x1b,0x4e,0x40}, +{K_THREE,NC,NC, K_POUND,NC,NC, K_THREE,NC,NC, 0x1b,0x4e,0x33, 0x1b,0x4e,0x23}, +{K_FOUR,NC,NC, K_DOLLAR,NC,NC, K_FOUR,NC,NC, 0x1b,0x4e,0x34, 0x1b,0x4e,0x24}, +{K_FIVE,NC,NC, K_PERC,NC,NC, K_FIVE,NC,NC, 0x1b,0x4e,0x35, 0x1b,0x4e,0x25}, +{K_SIX,NC,NC, K_CARET,NC,NC, K_RS,NC,NC, 0x1b,0x4e,0x36, 0x1b,0x4e,0x5e}, +{K_SEVEN,NC,NC, K_AMPER,NC,NC, K_SEVEN,NC,NC, 0x1b,0x4e,0x37, 0x1b,0x4e,0x26}, +{K_EIGHT,NC,NC, K_ASTER,NC,NC, K_EIGHT,NC,NC, 0x1b,0x4e,0x38, 0x1b,0x4e,0x2a}, +{K_NINE,NC,NC, K_LPAREN,NC,NC, K_NINE,NC,NC, 0x1b,0x4e,0x39,0x1b,0x4e,0x28}, +{K_ZERO,NC,NC, K_RPAREN,NC,NC, K_ZERO,NC,NC, 0x1b,0x4e,0x30,0x1b,0x4e,0x29}, +{K_MINUS,NC,NC, K_UNDSC,NC,NC, K_US,NC,NC, 0x1b,0x4e,0x2d, 0x1b,0x4e,0x5f}, +{K_EQL,NC,NC, K_PLUS,NC,NC, K_EQL,NC,NC, 0x1b,0x4e,0x3d, 0x1b,0x4e,0x2b}, +{K_BS,NC,NC, K_BS,NC,NC, K_BS,NC,NC, K_BS,NC,NC, K_BS,NC,NC}, +{K_HT,NC,NC, K_GS,NC,NC, K_HT,NC,NC, K_HT,NC,NC, K_GS,NC,NC}, +{K_q,NC,NC, K_Q,NC,NC, K_DC1,NC,NC, 0x1b,0x4e,0x71, 0x1b,0x4e,0x51}, +{K_w,NC,NC, K_W,NC,NC, K_ETB,NC,NC, 0x1b,0x4e,0x77, 0x1b,0x4e,0x57}, +{K_e,NC,NC, K_E,NC,NC, K_ENQ,NC,NC, 0x1b,0x4e,0x65, 0x1b,0x4e,0x45}, +{K_r,NC,NC, K_R,NC,NC, K_DC2,NC,NC, 0x1b,0x4e,0x72, 0x1b,0x4e,0x52}, +{K_t,NC,NC, K_T,NC,NC, K_DC4,NC,NC, 0x1b,0x4e,0x74, 0x1b,0x4e,0x54}, +{K_y,NC,NC, K_Y,NC,NC, K_EM,NC,NC, 0x1b,0x4e,0x79, 0x1b,0x4e,0x59}, +{K_u,NC,NC, K_U,NC,NC, K_NAK,NC,NC, 0x1b,0x4e,0x75, 0x1b,0x4e,0x55}, +{K_i,NC,NC, K_I,NC,NC, K_HT,NC,NC, 0x1b,0x4e,0x69, 0x1b,0x4e,0x49}, +{K_o,NC,NC, K_O,NC,NC, K_SI,NC,NC, 0x1b,0x4e,0x6f, 0x1b,0x4e,0x4f}, +{K_p,NC,NC, K_P,NC,NC, K_DLE,NC,NC, 0x1b,0x4e,0x70, 0x1b,0x4e,0x50}, +{K_LBRKT,NC,NC, K_LBRACE,NC,NC, K_ESC,NC,NC, 0x1b,0x4e,0x5b, 0x1b,0x4e,0x7b}, +{K_RBRKT,NC,NC, K_RBRACE,NC,NC, K_GS,NC,NC, 0x1b,0x4e,0x5d, 0x1b,0x4e,0x7d}, +{K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC}, +{K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, + K_SCAN,K_CTLSC,NC}, +{K_a,NC,NC, K_A,NC,NC, K_SOH,NC,NC, 0x1b,0x4e,0x61, 0x1b,0x4e,0x41}, +{K_s,NC,NC, K_S,NC,NC, K_DC3,NC,NC, 0x1b,0x4e,0x73, 0x1b,0x4e,0x53}, +{K_d,NC,NC, K_D,NC,NC, K_EOT,NC,NC, 0x1b,0x4e,0x65, 0x1b,0x4e,0x45}, +{K_f,NC,NC, K_F,NC,NC, K_ACK,NC,NC, 0x1b,0x4e,0x66, 0x1b,0x4e,0x46}, +{K_g,NC,NC, K_G,NC,NC, K_BEL,NC,NC, 0x1b,0x4e,0x67, 0x1b,0x4e,0x47}, +{K_h,NC,NC, K_H,NC,NC, K_BS,NC,NC, 0x1b,0x4e,0x68, 0x1b,0x4e,0x48}, +{K_j,NC,NC, K_J,NC,NC, K_LF,NC,NC, 0x1b,0x4e,0x6a, 0x1b,0x4e,0x4a}, +{K_k,NC,NC, K_K,NC,NC, K_VT,NC,NC, 0x1b,0x4e,0x6b, 0x1b,0x4e,0x4b}, +{K_l,NC,NC, K_L,NC,NC, K_FF,NC,NC, 0x1b,0x4e,0x6c, 0x1b,0x4e,0x4c}, +{K_SEMI,NC,NC, K_COLON,NC,NC, K_SEMI,NC,NC, 0x1b,0x4e,0x3b, 0x1b,0x4e,0x3a}, +{K_SQUOTE,NC,NC,K_DQUOTE,NC,NC,K_SQUOTE,NC,NC,0x1b,0x4e,0x27,0x1b,0x4e,0x22}, +{K_GRAV,NC,NC, K_TILDE,NC,NC, K_RS,NC,NC, 0x1b,0x4e,0x60, 0x1b,0x4e,0x7e}, +{K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, + K_SCAN,K_LSHSC,NC}, +{K_BSLSH,NC,NC, K_PIPE,NC,NC, K_FS,NC,NC, 0x1b,0x4e,0x5c, 0x1b,0x4e,0x7c}, +{K_z,NC,NC, K_Z,NC,NC, K_SUB,NC,NC, 0x1b,0x4e,0x7a, 0x1b,0x4e,0x5a}, +{K_x,NC,NC, K_X,NC,NC, K_CAN,NC,NC, 0x1b,0x4e,0x78, 0x1b,0x4e,0x58}, +{K_c,NC,NC, K_C,NC,NC, K_ETX,NC,NC, 0x1b,0x4e,0x63, 0x1b,0x4e,0x43}, +{K_v,NC,NC, K_V,NC,NC, K_SYN,NC,NC, 0x1b,0x4e,0x76, 0x1b,0x4e,0x56}, +{K_b,NC,NC, K_B,NC,NC, K_STX,NC,NC, 0x1b,0x4e,0x62, 0x1b,0x4e,0x42}, +{K_n,NC,NC, K_N,NC,NC, K_SO,NC,NC, 0x1b,0x4e,0x6e, 0x1b,0x4e,0x4e}, +{K_m,NC,NC, K_M,NC,NC, K_CR,NC,NC, 0x1b,0x4e,0x6d, 0x1b,0x4e,0x4d}, +{K_COMMA,NC,NC, K_LTHN,NC,NC, K_COMMA,NC,NC, 0x1b,0x4e,0x2c, 0x1b,0x4e,0x3c}, +{K_PERIOD,NC,NC, K_GTHN,NC,NC, K_PERIOD,NC,NC,0x1b,0x4e,0x2e,0x1b,0x4e,0x3e}, +{K_SLASH,NC,NC, K_QUES,NC,NC, K_SLASH,NC,NC, 0x1b,0x4e,0x2f, 0x1b,0x4e,0x3f}, +{K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, + K_SCAN,K_RSHSC,NC}, +{K_ASTER,NC,NC, K_ASTER,NC,NC, K_ASTER,NC,NC, 0x1b,0x4e,0x2a,0x1b,0x4e,0x2a}, +{K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, + K_SCAN,K_ALTSC,NC}, +{K_SPACE,NC,NC, K_SPACE,NC,NC, K_NUL,NC,NC, K_SPACE,NC,NC, K_SPACE,NC,NC}, +{K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, + K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC}, +{K_F1, K_F1S, K_F1, K_F1, K_F1S}, +{K_F2, K_F2S, K_F2, K_F2, K_F2S}, +{K_F3, K_F3S, K_F3, K_F3, K_F3S}, +{K_F4, K_F4S, K_F4, K_F4, K_F4S}, +{K_F5, K_F5S, K_F5, K_F5, K_F5S}, +{K_F6, K_F6S, K_F6, K_F6, K_F6S}, +{K_F7, K_F7S, K_F7, K_F7, K_F7S}, +{K_F8, K_F8S, K_F8, K_F8, K_F8S}, +{K_F9, K_F9S, K_F9, K_F9, K_F9S}, +{K_F10, K_F10S, K_F10, K_F10, K_F10S}, +{K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, + K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC}, +{K_SCRL, K_NUL,NC,NC, K_SCRL, K_SCRL, K_NUL,NC,NC}, +{K_HOME, K_SEVEN,NC,NC, K_HOME, K_HOME, 0x1b,0x4e,0x37}, +{K_UA, K_EIGHT,NC,NC, K_UA, K_UA, 0x1b,0x4e,0x38}, +{K_PUP, K_NINE,NC,NC, K_PUP, K_PUP, 0x1b,0x4e,0x39}, +{0x1b,0x5b,0x53, K_MINUS,NC,NC, 0x1b,0x5b,0x53,0x1b,0x5b,0x53,0x1b,0x4e,0x2d}, +{K_LA, K_FOUR,NC,NC, K_LA, K_LA, 0x1b,0x4e,0x34}, +{0x1b,0x5b,0x47,K_FIVE,NC,NC,0x1b,0x5b,0x47, 0x1b,0x5b,0x47, 0x1b,0x4e,0x35}, +{K_RA, K_SIX,NC,NC, K_RA, K_RA, 0x1b,0x4e,0x36}, +{0x1b,0x5b,0x54,K_PLUS,NC,NC, 0x1b,0x5b,0x54, 0x1b,0x5b,0x54, 0x1b,0x4e,0x2b}, +{K_END, K_ONE,NC,NC, K_END, K_END, 0x1b,0x4e,0x31}, +{K_DA, K_TWO,NC,NC, K_DA, K_DA, 0x1b,0x4e,0x32}, +{K_PDN, K_THREE,NC,NC, K_PDN, K_PDN, 0x1b,0x4e,0x33}, +{K_INS, K_ZERO,NC,NC, K_INS, K_INS, 0x1b,0x4e,0x30}, +{K_DEL,NC,NC, K_PERIOD,NC,NC, K_DEL,NC,NC, K_DEL,NC,NC, 0x1b,0x4e,0x2e}, +{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC}, +{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC}, +{NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC,NC}, +{K_F11, K_F11S, K_F11, K_F11, K_F11S}, +{K_F12, K_F12S, K_F12, K_F12, K_F12S} +}; + +extern void cnputc(unsigned char ch); + +/* + * Switch for poll vs. interrupt. + */ +int kd_pollc = 0; + +int (*cgetc)( + int wait) = kd_dogetc; + /* get a char. from console */ +void (*cputc)( + char ch) = cnputc; + /* put a char. to console */ + +/* + * cngetc: + * + * Get one character using polling, rather than interrupts. Used + * by the kernel debugger. Note that Caps Lock is ignored. + * Normally this routine is called with interrupts already + * disabled, but there is code in place so that it will be more + * likely to work even if interrupts are turned on. + */ + +int +cngetc(void) +{ + int ret; + + ret = (*cgetc)(TRUE); + + return ret; +} + +int +cnmaygetc(void) +{ + int ret; + + ret = (*cgetc)(FALSE); + + return ret; +} + +int +kd_dogetc( + int wait) +{ + unsigned char c; + unsigned char scancode; + unsigned int char_idx; + int up; + + kdinit(); + kd_extended = FALSE; + + for ( ; ; ) { + while (!(inb(K_STATUS) & K_OBUF_FUL)) + if (!wait) + return (-1); + up = FALSE; + /* + * We'd come here for mouse events in debugger, if + * the mouse were on. + */ + if ((inb(K_STATUS) & 0x20) == 0x20) { + printf("M%xP", inb(K_RDWR)); + continue; + } + scancode = inb(K_RDWR); + /* + * Handle extend modifier and + * ack/resend, otherwise we may never receive + * a key. + */ + if (scancode == K_EXTEND) { + kd_extended = TRUE; + continue; + } else if (scancode == K_RESEND) { +/* printf("kd_getc: resend"); */ + kd_resend(); + continue; + } else if (scancode == K_ACKSC) { +/* printf("kd_getc: handle_ack"); */ + kd_handle_ack(); + continue; + } + if (scancode & K_UP) { + up = TRUE; + scancode &= ~K_UP; + } + if (kd_kbd_mouse) + kd_kbd_magic(scancode); + if (scancode < NUMKEYS) { + /* Lookup in map, then process. */ + char_idx = kdstate2idx(kd_state, kd_extended); + c = key_map[scancode][char_idx]; + if (c == K_SCAN) { + c = key_map[scancode][++char_idx]; + kd_state = do_modifier(kd_state, c, up); +#ifdef notdef + cnsetleds(state2leds(kd_state)); +#endif + } else if (!up) { + /* regular key-down */ + if (c == K_CR) + c = K_LF; +#ifdef notdef + splx(o_pri); +#endif + return(c & 0177); + } + } + } +} + + +int old_kb_mode; + +#if MACH_KDB +#define poll_spl() db_splhigh() /* prevent race w/ kdintr() */ +#define poll_splx(s) db_splx(s) +#else /* MACH_KDB */ +#define poll_spl() SPLKD() +#define poll_splx(s) splx(s) +#endif /* MACH_KDB */ + + +void +cnpollc( + int on) +{ + int old_spl; /* spl we're called at... */ + + if (cpu_number()) { + return; + } + if (on) { + old_spl = poll_spl(); + + old_kb_mode = kb_mode; + kb_mode = KB_ASCII; + poll_splx(old_spl); + + kd_pollc++; + } else { + --kd_pollc; + + old_spl = poll_spl(); + kb_mode = old_kb_mode; + poll_splx(old_spl); + + + } +} + +/* + * kd_handle_ack: + * + * For pending commands, complete the command. For data bytes, + * drop the ack on the floor. + */ + +void +kd_handle_ack(void) +{ + switch (kd_ack) { + case SET_LEDS: + kd_setleds2(); + kd_ack = DATA_ACK; + break; + case DATA_ACK: + kd_ack = NOT_WAITING; + break; + case NOT_WAITING: + printf("unexpected ACK from keyboard\n"); + break; + default: + panic("bogus kd_ack\n"); + break; + } +} + +/* + * kd_resend: + * + * Resend a missed keyboard command or data byte. + */ + +void +kd_resend(void) +{ + if (kd_ack == NOT_WAITING) + printf("unexpected RESEND from keyboard\n"); + else + kd_senddata(last_sent); +} + + +/* + * do_modifier: + * + * Change keyboard state according to which modifier key and + * whether it went down or up. + * + * input: the current state, the key, and the key's direction. + * The key can be any key, not just a modifier key. + * + * output: the new state + */ + +int +do_modifier( + int state, + Scancode c, + int up) +{ + switch (c) { + case (K_ALTSC): + if (up) + state &= ~KS_ALTED; + else + state |= KS_ALTED; + kd_extended = FALSE; + break; +#ifndef ORC + case (K_CLCKSC): +#endif /* ORC */ + case (K_CTLSC): + if (up) + state &= ~KS_CTLED; + else + state |= KS_CTLED; + kd_extended = FALSE; + break; +#ifdef ORC + case (K_CLCKSC): + if (!up) + state ^= KS_CLKED; + break; +#endif /* ORC */ + case (K_NLCKSC): + if (!up) + state ^= KS_NLKED; + break; + case (K_LSHSC): + case (K_RSHSC): + if (up) + state &= ~KS_SHIFTED; + else + state |= KS_SHIFTED; + kd_extended = FALSE; + break; + } + + return(state); +} + + +/* + * kdcheckmagic: + * + * Check for magic keystrokes for invoking the debugger or + * rebooting or ... + * + * input: an unprocessed scancode + * + * output: TRUE if a magic key combination was recognized and + * processed. FALSE otherwise. + * + * side effects: + * various actions possible, depending on which keys are + * pressed. If the debugger is called, steps are taken + * to ensure that the system doesn't think the magic keys + * are still held down. + */ + +int +kdcheckmagic( + Scancode scancode, + int *regs) +{ + static int magic_state = KS_NORMAL; /* like kd_state */ + int up = FALSE; + extern int rebootflag; + + if (scancode == 0x46 && kbdmouseflag) /* scroll lock */ + { + kd_kbd_mouse = !kd_kbd_mouse; + kd_kbd_magic_button = 0; + return(TRUE); + } + if (scancode & K_UP) { + up = TRUE; + scancode &= ~K_UP; + } + magic_state = do_modifier(magic_state, scancode, up); + + if ((magic_state&(KS_CTLED|KS_ALTED)) == (KS_CTLED|KS_ALTED)) { + switch (scancode) { +#if MACH_KDB + case K_dSC: /* ctl-alt-d */ + if (!kbdkdbflag) + return(FALSE); + + kdb_kintr(); /* invoke debugger */ + + /* Returned from debugger, so reset kbd state. */ + (void)SPLKD(); + magic_state = KS_NORMAL; + if (kb_mode == KB_ASCII) + kd_state = KS_NORMAL; + /* setting leds kills kbd */ + + return(TRUE); + break; +#endif /* MACH_KDB */ + case K_DELSC: /* ctl-alt-del */ + /* if rebootflag is on, reboot the system */ + if (rebootflag) + kdreboot(); + break; + } + } + return(FALSE); +} + + +/* + * kdstate2idx: + * + * Return the value for the 2nd index into key_map that + * corresponds to the given state. + */ + +int +kdstate2idx( + int state, /* bit vector, not a state index */ + int extended) +{ + int state_idx = NORM_STATE; + + if ((!extended) && state != KS_NORMAL) { + if ((state&(KS_SHIFTED|KS_ALTED)) == (KS_SHIFTED|KS_ALTED)) + state_idx = SHIFT_ALT; + else if (state&KS_SHIFTED) + state_idx = SHIFT_STATE; + else if (state&KS_ALTED) + state_idx = ALT_STATE; + else if (state&KS_CTLED) + state_idx = CTRL_STATE; + } + + return (CHARIDX(state_idx)); +} + +/* + * kdinit: + * + * This code initializes the structures and sets up the port registers + * for the console driver. + * + * Each bitmap-based graphics card is likely to require a unique + * way to determine the card's presence. The driver runs through + * each "special" card that it knows about and uses the first one + * that it finds. If it doesn't find any, it assumes that an + * EGA-like card is installed. + * + * input : None. Interrupts are assumed to be disabled + * output : Driver is initialized + * + */ + +void +kdinit(void) +{ + unsigned char k_comm; /* keyboard command byte */ + unsigned char kd_stat; + + if (kd_initialized) + return; + kd_initialized = TRUE; + + /* get rid of any garbage in output buffer */ + if (inb(K_STATUS) & K_OBUF_FUL) + (void)inb(K_RDWR); + + cnsetleds(kd_state = KS_NORMAL); + + kd_sendcmd(KC_CMD_READ); /* ask for the ctlr command byte */ + k_comm = kd_getdata(); + k_comm &= ~K_CB_DISBLE; /* clear keyboard disable bit */ + k_comm |= K_CB_ENBLIRQ; /* enable interrupt */ + kd_sendcmd(KC_CMD_WRITE); /* write new ctlr command byte */ + kd_senddata(k_comm); + +/* set_kd_state(KS_NORMAL); does only HALF of set-leds sequence - + leaves kbd dead */ + + /* get rid of any garbage in output buffer */ + (void)inb(K_RDWR); +} + +/* + * kd_belloff: + * + * This routine shuts the bell off, by sending the appropriate code + * to the speaker port. + * + * input : None + * output : bell is turned off + * + */ + +void +kd_belloff(void) +{ + unsigned char status; + + status = (inb(K_PORTB) & ~(K_SPKRDATA | K_ENABLETMR2)); + outb(K_PORTB, status); +} + + +/* + * kd_bellon: + * + * This routine turns the bell on. + * + * input : None + * output : bell is turned on + * + */ + +void +kd_bellon(void) +{ + unsigned char status; + + /* program timer 2 */ + outb(K_TMRCTL, K_SELTMR2 | K_RDLDTWORD | K_TSQRWAVE | K_TBINARY); + outb(K_TMR2, 1500 & 0xff); /* LSB */ + outb(K_TMR2, (int)1500 >> 8); /* MSB */ + + /* start speaker - why must we turn on K_SPKRDATA? */ + status = (inb(K_PORTB)| K_ENABLETMR2 | K_SPKRDATA); + outb(K_PORTB, status); + return; +} + +/* + * kd_senddata: + * + * This function sends a byte to the keyboard RDWR port, but + * first waits until the input/output data buffer is clear before + * sending the data. Note that this byte can be either data or a + * keyboard command. + * + */ + +void +kd_senddata( + unsigned char ch) +{ + while (inb(K_STATUS) & K_IBUF_FUL); + outb(K_RDWR, ch); + last_sent = ch; +} + +/* + * kd_sendcmd: + * + * This function sends a command byte to the keyboard command + * port, but first waits until the input/output data buffer is + * clear before sending the data. + * + */ + +void +kd_sendcmd( + unsigned char ch) +{ + while (inb(K_STATUS) & K_IBUF_FUL); + outb(K_CMD, ch); +} + + +/* + * kd_getdata: + * + * This function returns a data byte from the keyboard RDWR port, + * after waiting until the port is flagged as having something to + * read. + */ + +unsigned char +kd_getdata(void) +{ + while ((inb(K_STATUS) & K_OBUF_FUL) == 0); + return(inb(K_RDWR)); +} + +unsigned char +kd_cmdreg_read(void) +{ + int ch=KC_CMD_READ; + + while (inb(K_STATUS) & (K_IBUF_FUL | K_OBUF_FUL)); + outb(K_CMD, ch); + + while ((inb(K_STATUS) & K_OBUF_FUL) == 0); + return(inb(K_RDWR)); +} + +void +kd_cmdreg_write( + unsigned char val) +{ + int ch=KC_CMD_WRITE; + + while (inb(K_STATUS) & K_IBUF_FUL); + outb(K_CMD, ch); + + while (inb(K_STATUS) & K_IBUF_FUL); + outb(K_RDWR, val); +} + +int kd_mouse_write_no_ack = 0; + +int +kd_mouse_write( + unsigned char val) +{ + int ch=0xd4; /* output byte to aux device (i.e. mouse) */ + int ret = 0; + + while (inb(K_STATUS) & K_IBUF_FUL); + outb(K_CMD, ch); + + while (inb(K_STATUS) & K_IBUF_FUL); + outb(K_RDWR, val); + + if (kd_mouse_write_no_ack) goto done; + + while ((inb(K_STATUS) & K_OBUF_FUL) == 0); + if ((inb(K_STATUS) & 0x20) == 0x20) { + switch (ret = inb(K_RDWR)) { + case 0xfa: + break; + case 0xfe: + case 0xfc: + default: + printf("kd_mouse_write: saw %x for %x\n", + ret, val); + } + } else { /* abort */ + printf("kd_mouse_write: sync error ??? on %x\n", val); + } + +done: + return ret; +} + +void +kd_mouse_read( + int no, + char *buf) +{ + + while (no-- > 0) { + while ((inb(K_STATUS) & K_OBUF_FUL) == 0); + /* + * We may have seen a mouse event. + */ + if ((inb(K_STATUS) & 0x20) == 0x20) { + *buf++ = (unsigned char)inb(K_RDWR); + } else { /* abort */ + int junk = inb(K_RDWR); + printf("kd_mouse_read: sync error, received: 0x%x\n", + junk); + break; + } + } +} + +void +kd_mouse_drain(void) +{ + int i; + while(inb(K_STATUS) & K_IBUF_FUL); + while((i = inb(K_STATUS)) & K_OBUF_FUL) + printf("kbd: S = %x D = %x\n", i, inb(K_RDWR)); +} + +/* + * set_kd_state: + * + * Set kd_state and update the keyboard status LEDs. + */ + +void +set_kd_state( + int newstate) +{ + kd_state = newstate; + kd_setleds1(state2leds(newstate)); +} + +/* + * state2leds: + * + * Return a byte containing LED settings for the keyboard, given + * a state vector. + */ + +unsigned char +state2leds( + int state) +{ + unsigned char result = 0; + + if (state & KS_NLKED) + result |= K_LED_NUMLK; + if (state & KS_CLKED) + result |= K_LED_CAPSLK; + return(result); +} + +/* + * kd_setleds[12]: + * + * Set the keyboard LEDs according to the given byte. + */ + +void +kd_setleds1( + unsigned char val) +{ + if (kd_ack != NOT_WAITING) { + printf("kd_setleds1: unexpected state (%d)\n", kd_ack); + return; + } + + kd_ack = SET_LEDS; + kd_nextled = val; + kd_senddata(K_CMD_LEDS); +} + +void +kd_setleds2(void) +{ + kd_senddata(kd_nextled); +} + + +/* + * cnsetleds: + * + * like kd_setleds[12], but not interrupt-based. + * Currently disabled because cngetc ignores caps lock and num + * lock anyway. + */ + +void +cnsetleds( + unsigned char val) +{ + kd_senddata(K_CMD_LEDS); + (void)kd_getdata(); /* XXX - assume is ACK */ + kd_senddata(val); + (void)kd_getdata(); /* XXX - assume is ACK */ +} + +void +kdreboot(void) +{ + kd_sendcmd(0xFE); /* XXX - magic # */ + /* + * DRAT. We're still here. Let's try a "CPU shutdown", which consists + * of clearing the IDTR and causing an exception. It's in locore.s + */ + cpu_shutdown(); + /*NOTREACHED*/ +} + +int +kd_kbd_magic( + int scancode) +{ +int new_button = 0; + + if (kd_kbd_mouse == 2) + printf("sc = %x\n", scancode); + + switch (scancode) { +/* f1 f2 f3 */ + case 0x3d: + new_button++; + case 0x3c: + new_button++; + case 0x3b: + new_button++; + if (kd_kbd_magic_button && (new_button != kd_kbd_magic_button)) { + /* down w/o up */ + } + /* normal */ + if (kd_kbd_magic_button == new_button) { + kd_kbd_magic_button = 0; + } else { + kd_kbd_magic_button = new_button; + } + break; + default: + return 0; + } + return 1; +} diff --git a/pexpert/i386/kd.h b/pexpert/i386/kd.h new file mode 100644 index 000000000..af525d912 --- /dev/null +++ b/pexpert/i386/kd.h @@ -0,0 +1,841 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * HISTORY + * + * Revision 1.1.1.1.12.1 1998/10/06 20:27:13 ehewitt + * Updated for framebuffer support. Added pexpert calls. + * + * Revision 1.1.1.1 1998/09/22 21:05:39 wsanchez + * Import of Mac OS X kernel (~semeria) + * + * Revision 1.1.1.1 1998/03/07 02:25:38 wsanchez + * Import of OSF Mach kernel (~mburg) + * + * Revision 1.1.22.1 1996/11/29 16:56:42 stephen + * nmklinux_1.0b3_shared into pmk1.1 + * Include mach/mach_ioctl.h instead of sys/ioctl.h and use the + * "MACH"-prefixed ioctl definitions. + * Include mach/time_value.h instead of sys/time.h and use the + * new time types. + * Replaced u_char, u_short and u_int types with real ones. + * [96/09/18 barbou] + * + * Revision 1.1.18.1 1996/09/17 16:33:38 bruel + * use standalone includes only + * [1996/09/17 15:27:38 bruel] + * + * Revision 1.1.6.5 1996/07/31 20:49:24 paire + * Merged with changes from 1.1.6.4 + * [1996/07/31 20:38:47 paire] + * + * Merged with changes from 1.1.6.4 + * [1996/07/31 14:58:21 paire] + * + * Revision 1.1.6.4 1996/07/31 13:23:35 paire + * Merged with nmk20b7_shared (1.1.11.1) + * [96/06/07 paire] + * + * Revision 1.1.11.1 1996/03/04 16:11:12 bernadat + * Changed mouse_rawbuf struct to keep same kd_event struct size. + * [96/02/22 bernadat] + * + * Revision 1.1.6.2 1996/02/19 13:35:33 bernadat + * Adapted to be exported to users (added some #ifdef MACH_KERNEL). + * Modified kd_event structure to allow raw (unprocessed) mouse events. + * [96/01/29 barbou] + * + * Revision 1.1.6.1 1994/09/23 01:42:33 ezf + * change marker to not FREE + * [1994/09/22 21:18:52 ezf] + * + * Revision 1.1.2.4 1993/08/09 19:38:22 dswartz + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:50:44 dswartz] + * + * Revision 1.1.2.3 1993/08/03 18:29:12 gm + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 16:04:31 gm] + * + * Revision 1.1.2.2 1993/06/02 23:20:30 jeffc + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:02:41 jeffc] + * + * Revision 1.1 1992/09/30 02:27:08 robert + * Initial revision + * + * $EndLog$ + */ +/* CMU_HIST */ +/* + * Revision 2.7.9.1 92/02/18 18:55:07 jeffreyh + * Added two defines to detect if a PC Keyboard is present + * [91/06/25 bernadat] + * + * Revision 2.7 91/05/14 16:26:55 mrt + * Correcting copyright + * + * Revision 2.6 91/03/16 14:46:53 rpd + * Fixed ioctl definitions for ANSI C. + * [91/02/20 rpd] + * + * Revision 2.5 91/02/05 17:19:03 mrt + * Changed to new Mach copyright + * [91/02/01 17:45:28 mrt] + * + * Revision 2.4 90/11/26 14:50:27 rvb + * jsb bet me to XMK34, sigh ... + * [90/11/26 rvb] + * Synched 2.5 & 3.0 at I386q (r1.5.1.5) & XMK35 (r2.4) + * [90/11/15 rvb] + * + * Revision 1.5.1.4 90/06/07 08:05:44 rvb + * Move CURRENT_COLUMN here. + * [90/06/06 rvb] + * + * Revision 2.3 90/08/09 16:32:09 rpd + * Added kdb/X support from rvb. + * [90/08/09 rpd] + * + * Revision 1.5.1.3 90/05/14 13:21:26 rvb + * Support for entering kdb from X; + * [90/04/30 rvb] + * + * Revision 2.2 90/05/03 15:44:47 dbg + * First checkin. + * + * Revision 1.5.1.2 90/02/28 15:50:17 rvb + * Fix numerous typo's in Olivetti disclaimer. + * [90/02/28 rvb] + * + * Revision 1.5.1.1 90/01/08 13:30:59 rvb + * Add Olivetti copyright. + * [90/01/08 rvb] + * + * Revision 1.5 89/07/17 10:41:10 rvb + * Olivetti Changes to X79 upto 5/9/89: + * [89/07/11 rvb] + * + * Revision 1.6 89/07/07 16:24:24 kupfer + * X79 merge, 2nd attempt + * + * Revision 1.4.1.1 89/04/27 12:20:58 kupfer + * Merge X79 with our latest and greatest. + * + * Revision 1.1.1.1 89/04/27 11:53:41 kupfer + * X79 from CMU. + * + * Revision 1.4 89/03/09 20:06:33 rpd + * More cleanup. + * + * Revision 1.3 89/02/26 12:37:13 gm0w + * Changes for cleanup. + * + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: kd.h + * Description: definitions for AT keyboard/display driver + * Authors: Eugene Kuerner, Adrienne Jardetzky, Mike Kupfer + * + * $ Header: $ + * + * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989. + * All rights reserved. + * + * Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc., + * Cupertino, California. + * + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies and that both the copyright notice and this permission notice + * appear in supporting documentation, and that the name of Olivetti + * not be used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * + * OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, + * IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This file contains defines and structures that implement hardware + * keyboard mapping into ansi defined output codes. Note that this + * is structured so that "re-mapping" of actual keys is allowed at + * anytime during execution of the console driver. And each scan code + * is potentially expanded into NUMKEYS characters. Which is programmable + * at runtime or whenever. + * + * 02 Nov 1988 orc!eugene + * + */ + +#ifndef _MACHINE_KD_H_ +#define _MACHINE_KD_H_ + +#include +#include + + +/* + * Where memory for various graphics adapters starts. + */ +#define EGA_START 0x0b8000 +#define CGA_START 0x0b8000 +#define MONO_START 0x0b0000 + +/* + * Common I/O ports. + */ +#define K_TMR0 0x40 /* timer 0, 1, or 2 value (r/w) */ +#define K_TMR1 0x41 +#define K_TMR2 0x42 +#define K_TMRCTL 0x43 /* timer control (write-only) */ +#define K_RDWR 0x60 /* keyboard data & cmds (read/write) */ +#define K_PORTB 0x61 /* r/w. speaker & status lines */ +#define K_STATUS 0x64 /* keybd status (read-only) */ +#define K_CMD 0x64 /* keybd ctlr command (write-only) */ + +/* + * I/O ports for various graphics adapters. + */ +#define EGA_IDX_REG 0x3d4 +#define EGA_IO_REG 0x3d5 +#define CGA_IDX_REG 0x3d4 +#define CGA_IO_REG 0x3d5 +#define MONO_IDX_REG 0x3b4 +#define MONO_IO_REG 0x3b5 + +/* + * Commands sent to graphics adapter. + */ +#define C_LOW 0x0f /* return low byte of cursor addr */ +#define C_HIGH 0x0e /* high byte */ + +/* + * Bit definitions for K_STATUS port. + */ +#define K_OBUF_FUL 0x01 /* output (from keybd) buffer full */ +#define K_IBUF_FUL 0x02 /* input (to keybd) buffer full */ +#define K_SYSFLAG 0x04 /* "System Flag" */ +#define K_CMD_DATA 0x08 /* 1 = input buf has cmd, 0 = data */ +#define K_KBD_INHBT 0x10 /* 0 if keyboard inhibited */ +#define K_XMT_TIMEOUT 0x20 /* Transmit time out */ +#define K_RCV_TIMEOUT 0x40 /* Receive time out */ + +/* + * Keyboard controller commands (sent to K_CMD port). + */ +#define KC_CMD_READ 0x20 /* read controller command byte */ +#define KC_CMD_WRITE 0x60 /* write controller command byte */ +#define KC_CMD_TEST 0xab /* test interface */ +#define KC_CMD_DUMP 0xac /* diagnostic dump */ +#define KC_CMD_DISBLE 0xad /* disable keyboard */ +#define KC_CMD_ENBLE 0xae /* enable keyboard */ +#define KC_CMD_RDKBD 0xc4 /* read keyboard ID */ +#define KC_CMD_ECHO 0xee /* used for diagnostic testing */ + +/* + * Keyboard commands (send to K_RDWR). + */ +#define K_CMD_LEDS 0xed /* set status LEDs (caps lock, etc.) */ + +/* + * Bit definitions for controller command byte (sent following + * K_CMD_WRITE command). + */ +#define K_CB_ENBLIRQ 0x01 /* enable data-ready intrpt */ +#define K_CB_SETSYSF 0x04 /* Set System Flag */ +#define K_CB_INHBOVR 0x08 /* Inhibit Override */ +#define K_CB_DISBLE 0x10 /* disable keyboard */ + +/* + * Bit definitions for "Indicator Status Byte" (sent after a + * K_CMD_LEDS command). If the bit is on, the LED is on. Undefined + * bit positions must be 0. + */ +#define K_LED_SCRLLK 0x1 /* scroll lock */ +#define K_LED_NUMLK 0x2 /* num lock */ +#define K_LED_CAPSLK 0x4 /* caps lock */ + +/* + * Bit definitions for "Miscellaneous port B" (K_PORTB). + */ +/* read/write */ +#define K_ENABLETMR2 0x01 /* enable output from timer 2 */ +#define K_SPKRDATA 0x02 /* direct input to speaker */ +#define K_ENABLEPRTB 0x04 /* "enable" port B */ +#define K_EIOPRTB 0x08 /* enable NMI on parity error */ +/* read-only */ +#define K_REFRESHB 0x10 /* refresh flag from INLTCONT PAL */ +#define K_OUT2B 0x20 /* timer 2 output */ +#define K_ICKB 0x40 /* I/O channel check (parity error) */ + +/* + * Bit definitions for timer control port (K_TMRCTL). + */ +/* select timer 0, 1, or 2. Don't mess with 0 or 1. */ +#define K_SELTMRMASK 0xc0 +#define K_SELTMR0 0x00 +#define K_SELTMR1 0x40 +#define K_SELTMR2 0x80 + +/* read/load control */ +#define K_RDLDTMRMASK 0x30 +#define K_HOLDTMR 0x00 /* freeze timer until read */ +#define K_RDLDTLSB 0x10 /* read/load LSB */ +#define K_RDLDTMSB 0x20 /* read/load MSB */ +#define K_RDLDTWORD 0x30 /* read/load LSB then MSB */ + +/* mode control */ +#define K_TMDCTLMASK 0x0e +#define K_TCOUNTINTR 0x00 /* "Term Count Intr" */ +#define K_TONESHOT 0x02 /* "Progr One-Shot" */ +#define K_TRATEGEN 0x04 /* "Rate Gen (/n)" */ +#define K_TSQRWAVE 0x06 /* "Sqr Wave Gen" */ +#define K_TSOFTSTRB 0x08 /* "Softw Trig Strob" */ +#define K_THARDSTRB 0x0a /* "Hardw Trig Strob" */ + +/* count mode */ +#define K_TCNTMDMASK 0x01 +#define K_TBINARY 0x00 /* 16-bit binary counter */ +#define K_TBCD 0x01 /* 4-decade BCD counter */ + + + +/* + * Fun definitions for displayed characters and characters read from + * the keyboard. + */ + +/* + * Attributes for character sent to display. + */ +#define KA_NORMAL 0x07 +#define KA_REVERSE 0x70 + +/* + * For an EGA-like display, each character takes two bytes, one for the + * actual character, followed by one for its attributes. + * Be very careful if you change ONE_SPACE, as these constants are also used + * to define the device-independent display implemented by kd.c. + * (See kdsoft.h for more details on the device-independent display.) + */ +#define ONE_SPACE 2 /* bytes in 1 char, EGA-like display */ +#define ONE_LINE (kd_cols * ONE_SPACE) /* 160 number of bytes in line */ +#define ONE_PAGE (kd_lines * ONE_LINE) /* 4000 number of bytes in page */ +#define BOTTOM_LINE ((kd_lines - 1) * ONE_LINE) /* 3840 1st byte in last line of display */ + +#define BEG_OF_LINE(pos) ((pos) - (pos)%ONE_LINE) +#define CURRENT_COLUMN(pos) (((pos) % ONE_LINE) / ONE_SPACE) + +#define NUMKEYS 89 +#define NUMSTATES 5 /* NORMAL_STATE, ... */ +#define NUMOUTPUT 3 /* max size of byte seq from key */ +#define WIDTH_KMAP (NUMSTATES * NUMOUTPUT) + +/* + * Keyboard states. Used for KDGKBENT, KDSKBENT ioctl's. If you + * change these values, you should also rearrange the entries in + * key_map. + */ +/* "state indices" (for computing key_map index) */ +#define NORM_STATE 0 +#define SHIFT_STATE 1 +#define CTRL_STATE 2 +#define ALT_STATE 3 +#define SHIFT_ALT 4 +/* macro to convert from state index to actual key_map index */ +#define CHARIDX(sidx) ((sidx) * NUMOUTPUT) + /* where sidx is in [NORM_STATE ... SHIFT_ALT] */ + +/* "state bits" for kd_state vector */ +#define KS_NORMAL 0x00 +#define KS_SLKED 0x01 +#define KS_NLKED 0x02 +#define KS_CLKED 0x04 +#define KS_ALTED 0x08 +#define KS_SHIFTED 0x10 +#define KS_CTLED 0x20 + + +/* + * Scancode values, not to be confused with Ascii values. + */ +typedef unsigned char Scancode; + +/* special codes */ +#define K_UP 0x80 /* OR'd in if key below is released */ +#define K_EXTEND 0xe0 /* marker for "extended" sequence */ +#define K_ACKSC 0xfa /* ack for keyboard command */ +#define K_RESEND 0xfe /* request to resend keybd cmd */ + +/* modifier keys */ +#define K_CTLSC 0x1d /* control down */ +#define K_LSHSC 0x2a /* left shift down */ +#define K_RSHSC 0x36 /* right shift down */ +#define K_ALTSC 0x38 /* alt key down */ +#define K_CLCKSC 0x3a /* caps lock */ +#define K_NLCKSC 0x45 /* num lock down */ + +/* "special keys" */ +#define K_BSSC 0x0e /* backspace */ +#define K_TABSC 0x0f /* tab */ +#define K_RETSC 0x1c /* return */ +#define K_SPSC 0x39 /* space */ +#define K_ESCSC 0x01 /* ESC */ + +/* alphabetic keys */ +#define K_qSC 0x10 +#define K_wSC 0x11 +#define K_eSC 0x12 +#define K_rSC 0x13 +#define K_tSC 0x14 +#define K_ySC 0x15 +#define K_uSC 0x16 +#define K_iSC 0x17 +#define K_oSC 0x18 +#define K_pSC 0x19 + +#define K_aSC 0x1e +#define K_sSC 0x1f +#define K_dSC 0x20 +#define K_fSC 0x21 +#define K_gSC 0x22 +#define K_hSC 0x23 +#define K_jSC 0x24 +#define K_kSC 0x25 +#define K_lSC 0x26 + +#define K_zSC 0x2c +#define K_xSC 0x2d +#define K_cSC 0x2e +#define K_vSC 0x2f +#define K_bSC 0x30 +#define K_nSC 0x31 +#define K_mSC 0x32 + +/* numbers and punctuation */ +#define K_ONESC 0x02 /* 1 */ +#define K_TWOSC 0x03 /* 2 */ +#define K_THREESC 0x04 /* 3 */ +#define K_FOURSC 0x05 /* 4 */ +#define K_FIVESC 0x06 /* 5 */ +#define K_SIXSC 0x07 /* 6 */ +#define K_SEVENSC 0x08 /* 7 */ +#define K_EIGHTSC 0x09 /* 8 */ +#define K_NINESC 0x0a /* 9 */ +#define K_ZEROSC 0x0b /* 0 */ + +#define K_MINUSSC 0x0c /* - */ +#define K_EQLSC 0x0d /* = */ +#define K_LBRKTSC 0x1a /* [ */ +#define K_RBRKTSC 0x1b /* ] */ +#define K_SEMISC 0x27 /* ; */ +#define K_SQUOTESC 0x28 /* ' */ +#define K_GRAVSC 0x29 /* ` */ +#define K_BSLSHSC 0x2b /* \ */ +#define K_COMMASC 0x33 /* , */ +#define K_PERIODSC 0x34 /* . */ +#define K_SLASHSC 0x35 /* / */ + +/* keypad keys */ +#define K_HOMESC 0x47 /* scancode for home */ +#define K_DELSC 0x53 /* scancode for del */ + +/* + * Ascii values and flag characters for key map. + * A function key is represented by the 3-byte char sequence that it + * corresponds to. + * Other mappable non-Ascii keys (e.g., "ctrl") are represented by a + * two-byte sequence: K_SCAN, followed by the key's scan code. + */ +#define K_DONE 0xff /* must be same as NC */ +#define NC 0xff /* No character defined */ + +#define K_SCAN 0xfe /* followed by scan code */ + +/* ascii char set */ +#define K_NUL 0x00 /* Null character */ +#define K_SOH 0x01 +#define K_STX 0x02 +#define K_ETX 0x03 +#define K_EOT 0x04 +#define K_ENQ 0x05 +#define K_ACK 0x06 +#define K_BEL 0x07 /* bell character */ +#define K_BS 0x08 /* back space */ +#define K_HT 0x09 +#define K_LF 0x0a /* line feed */ +#define K_VT 0x0b +#define K_FF 0x0c +#define K_CR 0x0d /* carriage return */ +#define K_SO 0x0e +#define K_SI 0x0f +#define K_DLE 0x10 +#define K_DC1 0x11 +#define K_DC2 0x12 +#define K_DC3 0x13 +#define K_DC4 0x14 +#define K_NAK 0x15 +#define K_SYN 0x16 +#define K_ETB 0x17 +#define K_CAN 0x18 +#define K_EM 0x19 +#define K_SUB 0x1a +#define K_ESC 0x1b /* escape character */ +#define K_FS 0x1c +#define K_GS 0x1d +#define K_RS 0x1e +#define K_US 0x1f +#define K_SPACE 0x20 /* space character */ +#define K_BANG 0x21 /* ! */ +#define K_DQUOTE 0x22 /* " */ +#define K_POUND 0x23 /* # */ +#define K_DOLLAR 0x24 /* $ */ +#define K_PERC 0x25 /* % */ +#define K_AMPER 0x26 /* & */ +#define K_SQUOTE 0x27 /* ' */ +#define K_LPAREN 0x28 /* ( */ +#define K_RPAREN 0x29 /* ) */ +#define K_ASTER 0x2a /* * */ +#define K_PLUS 0x2b /* + */ +#define K_COMMA 0x2c /* , */ +#define K_MINUS 0x2d /* - */ +#define K_PERIOD 0x2e /* . */ +#define K_SLASH 0x2f /* / */ +#define K_ZERO 0x30 /* 0 */ +#define K_ONE 0x31 /* 1 */ +#define K_TWO 0x32 /* 2 */ +#define K_THREE 0x33 /* 3 */ +#define K_FOUR 0x34 /* 4 */ +#define K_FIVE 0x35 /* 5 */ +#define K_SIX 0x36 /* 6 */ +#define K_SEVEN 0x37 /* 7 */ +#define K_EIGHT 0x38 /* 8 */ +#define K_NINE 0x39 /* 9 */ +#define K_COLON 0x3a /* : */ +#define K_SEMI 0x3b /* ; */ +#define K_LTHN 0x3c /* < */ +#define K_EQL 0x3d /* = */ +#define K_GTHN 0x3e /* > */ +#define K_QUES 0x3f /* ? */ +#define K_ATSN 0x40 /* @ */ +#define K_A 0x41 /* A */ +#define K_B 0x42 /* B */ +#define K_C 0x43 /* C */ +#define K_D 0x44 /* D */ +#define K_E 0x45 /* E */ +#define K_F 0x46 /* F */ +#define K_G 0x47 /* G */ +#define K_H 0x48 /* H */ +#define K_I 0x49 /* I */ +#define K_J 0x4a /* J */ +#define K_K 0x4b /* K */ +#define K_L 0x4c /* L */ +#define K_M 0x4d /* M */ +#define K_N 0x4e /* N */ +#define K_O 0x4f /* O */ +#define K_P 0x50 /* P */ +#define K_Q 0x51 /* Q */ +#define K_R 0x52 /* R */ +#define K_S 0x53 /* S */ +#define K_T 0x54 /* T */ +#define K_U 0x55 /* U */ +#define K_V 0x56 /* V */ +#define K_W 0x57 /* W */ +#define K_X 0x58 /* X */ +#define K_Y 0x59 /* Y */ +#define K_Z 0x5a /* Z */ +#define K_LBRKT 0x5b /* [ */ +#define K_BSLSH 0x5c /* \ */ +#define K_RBRKT 0x5d /* ] */ +#define K_CARET 0x5e /* ^ */ +#define K_UNDSC 0x5f /* _ */ +#define K_GRAV 0x60 /* ` */ +#define K_a 0x61 /* a */ +#define K_b 0x62 /* b */ +#define K_c 0x63 /* c */ +#define K_d 0x64 /* d */ +#define K_e 0x65 /* e */ +#define K_f 0x66 /* f */ +#define K_g 0x67 /* g */ +#define K_h 0x68 /* h */ +#define K_i 0x69 /* i */ +#define K_j 0x6a /* j */ +#define K_k 0x6b /* k */ +#define K_l 0x6c /* l */ +#define K_m 0x6d /* m */ +#define K_n 0x6e /* n */ +#define K_o 0x6f /* o */ +#define K_p 0x70 /* p */ +#define K_q 0x71 /* q */ +#define K_r 0x72 /* r */ +#define K_s 0x73 /* s */ +#define K_t 0x74 /* t */ +#define K_u 0x75 /* u */ +#define K_v 0x76 /* v */ +#define K_w 0x77 /* w */ +#define K_x 0x78 /* x */ +#define K_y 0x79 /* y */ +#define K_z 0x7a /* z */ +#define K_LBRACE 0x7b /* { */ +#define K_PIPE 0x7c /* | */ +#define K_RBRACE 0x7d /* } */ +#define K_TILDE 0x7e /* ~ */ +#define K_DEL 0x7f /* delete */ + +/* Ascii sequences to be generated by the named key */ +#define K_F1 0x1b,0x4f,0x50 +#define K_F1S 0x1b,0x4f,0x70 +#define K_F2 0x1b,0x4f,0x51 +#define K_F2S 0x1b,0x4f,0x71 +#define K_F3 0x1b,0x4f,0x52 +#define K_F3S 0x1b,0x4f,0x72 +#define K_F4 0x1b,0x4f,0x53 +#define K_F4S 0x1b,0x4f,0x73 +#define K_F5 0x1b,0x4f,0x54 +#define K_F5S 0x1b,0x4f,0x74 +#define K_F6 0x1b,0x4f,0x55 +#define K_F6S 0x1b,0x4f,0x75 +#define K_F7 0x1b,0x4f,0x56 +#define K_F7S 0x1b,0x4f,0x76 +#define K_F8 0x1b,0x4f,0x57 +#define K_F8S 0x1b,0x4f,0x77 +#define K_F9 0x1b,0x4f,0x58 +#define K_F9S 0x1b,0x4f,0x78 +#define K_F10 0x1b,0x4f,0x59 +#define K_F10S 0x1b,0x4f,0x79 +#define K_F11 0x1b,0x4f,0x5a +#define K_F11S 0x1b,0x4f,0x7a +#define K_F12 0x1b,0x4f,0x41 +#define K_F12S 0x1b,0x4f,0x61 + +#define K_SCRL 0x1b,0x5b,0x4d +#define K_HOME 0x1b,0x5b,0x48 +#define K_UA 0x1b,0x5b,0x41 +#define K_PUP 0x1b,0x5b,0x56 +#define K_LA 0x1b,0x5b,0x44 +#define K_RA 0x1b,0x5b,0x43 +#define K_END 0x1b,0x5b,0x59 +#define K_DA 0x1b,0x5b,0x42 +#define K_PDN 0x1b,0x5b,0x55 +#define K_INS 0x1b,0x5b,0x40 + + +/* + * This array maps scancodes to Ascii characters (or character + * sequences). + * The first index is the scancode. The first NUMOUTPUT characters + * (accessed using the second index) correspond to the key's char + * sequence for the Normal state. The next NUMOUTPUT characters + * are for the Shift state, then Ctrl, then Alt, then Shift/Alt. + */ +extern unsigned char key_map[NUMKEYS][WIDTH_KMAP]; + + + +/* + * These routines are declared here so that all the modules making + * up the kd driver agree on how to do locking. + */ + +#define SPLKD spltty + + +/* + * Ioctl's on /dev/console. + */ + +/* + * KDGKBENT, KDSKBENT - Get and set keyboard table entry. Useful for + * remapping keys. + * + * KDGSTATE - Get the keyboard state variable, which flags the + * modifier keys (shift, ctrl, etc.) that are down. See + * KS_NORMAL et al above. Used for debugging. + * + * KDSETBELL - Turns the bell on or off. + */ + +#define KDGKBENT _MACH_IOWR('k', 1, struct kbentry) /* get keybd entry */ + +#define KDSKBENT _MACH_IOW('k', 2, struct kbentry) /* set keybd entry */ + +#define KDGSTATE _MACH_IOR('k', 3, int) /* get keybd state */ + +#define KDSETBELL _MACH_IOW('k', 4, int) /* turn bell on or off */ +# define KD_BELLON 1 +# define KD_BELLOFF 0 + +/* + * This struct is used for getting and setting key definitions. The + * values for kb_index are obtainable from the man page for + * keyboard(7) (though they should really be defined here!). + */ +struct kbentry { + unsigned char kb_state; /* which state to use */ + unsigned char kb_index; /* which keycode */ + unsigned char kb_value[NUMOUTPUT]; /* value to get/set */ +}; + + +/* + * Ioctl's on /dev/kbd. + */ + +/* + * KDSKBDMODE - When the console is in "ascii" mode, keyboard events are + * converted to Ascii characters that are readable from /dev/console. + * When the console is in "event" mode, keyboard events are + * timestamped and queued up on /dev/kbd as kd_events. When the last + * close is done on /dev/kbd, the console automatically reverts to ascii + * mode. + * When /dev/mouse is opened, mouse events are timestamped and queued + * on /dev/mouse, again as kd_events. + * + * KDGKBDTYPE - Returns the type of keyboard installed. Currently + * there is only one type, KB_VANILLAKB, which is your standard PC-AT + * keyboard. + */ + +extern int kb_mode; + +#define KDSKBDMODE _MACH_IOW('K', 1, int) /* set keyboard mode */ +#define KB_EVENT 1 +#define KB_ASCII 2 + +#define KDGKBDTYPE _MACH_IOR('K', 2, int) /* get keyboard type */ +#define KB_VANILLAKB 0 + +struct X_kdb { + unsigned int *ptr; + unsigned int size; +}; + +#define K_X_KDB_ENTER _MACH_IOW('K', 16, struct X_kdb) +#define K_X_KDB_EXIT _MACH_IOW('K', 17, struct X_kdb) + +#define K_X_IN 0x01000000 +#define K_X_OUT 0x02000000 +#define K_X_BYTE 0x00010000 +#define K_X_WORD 0x00020000 +#define K_X_LONG 0x00040000 +#define K_X_TYPE 0x03070000 +#define K_X_PORT 0x0000ffff + + +typedef unsigned short kev_type; /* kd event type */ + +/* (used for event records) */ +struct mouse_motion { + short mm_deltaX; /* units? */ + short mm_deltaY; +}; + +#define MOUSEBUFSIZE 5 /* num bytes def'd by protocol */ + +struct mouse_rawbuf { + unsigned char mr_bufsize; + unsigned char mr_buf[1]; +}; + +typedef struct { + kev_type type; /* see below */ + struct time_value time; /* timestamp */ + union { /* value associated with event */ + boolean_t up; /* MOUSE_LEFT .. MOUSE_RIGHT */ + Scancode sc; /* KEYBD_EVENT */ + struct mouse_motion mmotion; /* MOUSE_MOTION */ + struct mouse_rawbuf mrawbuf; /* MOUSE_RAW */ + } value; +} kd_event; + +#define m_deltaX mmotion.mm_deltaX +#define m_deltaY mmotion.mm_deltaY +#define r_buf mrawbuf.mr_buf +#define r_bufsize mrawbuf.mr_bufsize + +/* + * kd_event ID's. + */ +#define MOUSE_LEFT 1 /* mouse left button up/down */ +#define MOUSE_MIDDLE 2 +#define MOUSE_RIGHT 3 +#define MOUSE_MOTION 4 /* mouse motion */ +#define KEYBD_EVENT 5 /* key up/down */ + +/* CMOS Info */ + +#define CMOS_ADDR 0x70 /* port for CMOS ram address */ +#define CMOS_DATA 0x71 /* port for CMOS ram data */ + + +/* Addresses, related masks, and potential results */ + +#define CMOS_EB 0x14 /* read Equipment Byte */ +#define CM_SCRMSK 0x30 /* mask for EB query to get screen */ +#define CM_EGA_VGA 0x00 /* "not CGA or MONO" */ +#define CM_CGA_40 0x10 +#define CM_CGA_80 0x20 +#define CM_MONO_80 0x30 + + +#endif /* _MACHINE_KD_H_ */ diff --git a/pexpert/i386/kdasm.s b/pexpert/i386/kdasm.s new file mode 100644 index 000000000..2b9fa2bcd --- /dev/null +++ b/pexpert/i386/kdasm.s @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* CMU_ENDHIST */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * Some inline code to speed up major block copies to and from the + * screen buffer. + * + * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989. + * All rights reserved. + * + * orc!eugene 28 Oct 1988 + * + */ +/* + * Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc., + * Cupertino, California. + * + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies and that both the copyright notice and this permission notice + * appear in supporting documentation, and that the name of Olivetti + * not be used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * + * OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, + * IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* $ Header: $ */ + + + +/* + * Function: kd_slmwd() + * + * This function "slams" a word (char/attr) into the screen memory using + * a block fill operation on the 386. + * + */ + +#define start 0x08(%ebp) +#define count 0x0c(%ebp) +#define value 0x10(%ebp) + + .text + .align 2 + .globl _kd_slmwd + +_kd_slmwd: + pushl %ebp + movl %esp, %ebp + pushl %edi + + movl start, %edi + movl count, %ecx + movw value, %ax + cld + rep + stosw + + popl %edi + leave + ret +#undef start +#undef count +#undef value + +/* + * "slam up" + */ + +#define from 0x08(%ebp) +#define to 0x0c(%ebp) +#define count 0x10(%ebp) + .align 2 + .globl _kd_slmscu + +_kd_slmscu: + pushl %ebp + movl %esp, %ebp + pushl %esi + pushl %edi + + movl from, %esi + movl to, %edi + movl count, %ecx + cmpl %edi, %esi + cld + rep + movsw + + popl %edi + popl %esi + leave + ret + +/* + * "slam down" + */ + .align 2 + .globl _kd_slmscd + +_kd_slmscd: + pushl %ebp + movl %esp, %ebp + pushl %esi + pushl %edi + + movl from, %esi + movl to, %edi + movl count, %ecx + cmpl %edi, %esi + std + rep + movsw + cld + + popl %edi + popl %esi + leave + ret +#undef from +#undef to +#undef count diff --git a/pexpert/i386/pe_bootargs.c b/pexpert/i386/pe_bootargs.c new file mode 100644 index 000000000..fa80f5dbf --- /dev/null +++ b/pexpert/i386/pe_bootargs.c @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +char * +PE_boot_args( + void) +{ + return((char *)((KERNBOOTSTRUCT*)PE_state.bootArgs)->bootString); +} diff --git a/pexpert/i386/pe_identify_machine.c b/pexpert/i386/pe_identify_machine.c new file mode 100644 index 000000000..8edc96285 --- /dev/null +++ b/pexpert/i386/pe_identify_machine.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +/* Local declarations */ +void pe_identify_machine(boot_args *args); + +/* pe_identify_machine: + * + * Sets up platform parameters. + * Returns: nothing + */ +void pe_identify_machine(boot_args *args) +{ + // Clear the gPEClockFrequencyInfo struct + bzero((void *)&gPEClockFrequencyInfo, sizeof(clock_frequency_info_t)); + + // Start with default values. + gPEClockFrequencyInfo.bus_clock_rate_hz = 100000000; + gPEClockFrequencyInfo.cpu_clock_rate_hz = 300000000; + gPEClockFrequencyInfo.dec_clock_rate_hz = 25000000; + + // Get real number from some where. + + // Set the num / den pairs form the hz values. + gPEClockFrequencyInfo.bus_clock_rate_num = gPEClockFrequencyInfo.bus_clock_rate_hz; + gPEClockFrequencyInfo.bus_clock_rate_den = 1; + + gPEClockFrequencyInfo.bus_to_cpu_rate_num = + (2 * gPEClockFrequencyInfo.cpu_clock_rate_hz) / gPEClockFrequencyInfo.bus_clock_rate_hz; + gPEClockFrequencyInfo.bus_to_cpu_rate_den = 2; + + gPEClockFrequencyInfo.bus_to_dec_rate_num = 1; + gPEClockFrequencyInfo.bus_to_dec_rate_den = + gPEClockFrequencyInfo.bus_clock_rate_hz / gPEClockFrequencyInfo.dec_clock_rate_hz; +} diff --git a/pexpert/i386/pe_init.c b/pexpert/i386/pe_init.c new file mode 100644 index 000000000..a483e72f6 --- /dev/null +++ b/pexpert/i386/pe_init.c @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * file: pe_init.c + * i386 platform expert initialization. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fakePPCStructs.h" +#include "fakePPCDeviceTree.h" + +/* extern references */ +extern void pe_identify_machine(void * args); +extern void initialize_screen(void *, unsigned int); + +/* Local references */ +static vm_offset_t mapframebuffer(caddr_t,int); +static vm_offset_t PE_fb_vaddr = 0; +static int PE_fb_mode = TEXT_MODE; + +/* private globals */ +PE_state_t PE_state; + +/* Clock Frequency Info */ +clock_frequency_info_t gPEClockFrequencyInfo; + +int PE_initialize_console( PE_Video * info, int op ) +{ + static int last_console = -1; + Boot_Video bootInfo; + Boot_Video * bInfo; + + /* + * Refuse changes from outside pexpert. + * The video mode setup by the booter cannot be changed. + */ + if ( info && (info == &PE_state.video) ) + { + bootInfo.v_baseAddr = PE_fb_vaddr; + bootInfo.v_rowBytes = info->v_rowBytes; + bootInfo.v_width = info->v_width; + bootInfo.v_height = info->v_height; + bootInfo.v_depth = info->v_depth; + bootInfo.v_display = PE_fb_mode; + bInfo = &bootInfo; + } + else + bInfo = 0; + + switch ( op ) { + + case kPEDisableScreen: + initialize_screen((void *) bInfo, op); +#ifdef FIXME + last_console = switch_to_serial_console(); +#endif + kprintf("kPEDisableScreen %d\n", last_console); + break; + + case kPEEnableScreen: + initialize_screen((void *) bInfo, op); + kprintf("kPEEnableScreen %d\n", last_console); +#ifdef FIXME + if( last_console != -1) + switch_to_old_console( last_console); +#endif + break; + + default: + initialize_screen((void *) bInfo, op); + break; + } + + return 0; +} + +void PE_init_iokit(void) +{ + long * dt; + void * desc; + unsigned char * data; + unsigned char * clut; + + dt = (long *) createdt( + fakePPCDeviceTree, + &((boot_args*)PE_state.fakePPCBootArgs)->deviceTreeLength); + + /* Setup powermac_info and powermac_machine_info structures */ + + ((boot_args*)PE_state.fakePPCBootArgs)->deviceTreeP = (unsigned long *) dt; + ((boot_args*)PE_state.fakePPCBootArgs)->topOfKernelData = (unsigned int) kalloc(0x2000); + + /* + * Setup the OpenFirmware Device Tree routines + * so the console can be found and the right I/O space + * can be used.. + */ + DTInit(dt); + + /* + * Initialize the spinning wheel (progress indicator). + */ + clut = appleClut8; + desc = &default_progress; + data = default_progress_data; + + vc_progress_initialize( desc, data, clut ); + + PE_initialize_console( (PE_Video *) 0, kPEAcquireScreen ); + + (void) StartIOKit( (void*)dt, (void*)PE_state.fakePPCBootArgs, 0, 0); +} + +void PE_init_platform(boolean_t vm_initialized, void * args) +{ + if (PE_state.initialized == FALSE) + { + extern unsigned int halt_in_debugger, disableDebugOuput; + unsigned int debug_arg; + + PE_state.initialized = TRUE; + PE_state.bootArgs = args; + PE_state.video.v_baseAddr = ((KERNBOOTSTRUCT *)args)->video.v_baseAddr; + PE_state.video.v_rowBytes = ((KERNBOOTSTRUCT *)args)->video.v_rowBytes; + PE_state.video.v_height = ((KERNBOOTSTRUCT *)args)->video.v_height; + PE_state.video.v_width = ((KERNBOOTSTRUCT *)args)->video.v_width; + PE_state.video.v_depth = ((KERNBOOTSTRUCT *)args)->video.v_depth; + PE_state.video.v_display = ((KERNBOOTSTRUCT *)args)->video.v_display; + PE_fb_mode = ((KERNBOOTSTRUCT *)args)->graphicsMode; + PE_state.fakePPCBootArgs = (boot_args *)&fakePPCBootArgs; + ((boot_args *)PE_state.fakePPCBootArgs)->machineType = 386; + + if (PE_fb_mode == TEXT_MODE) + { + /* Force a text display if the booter did not setup a + * VESA frame buffer. + */ + PE_state.video.v_display = 0; + } + + /* + * If DB_HALT flag is set, then cause a breakpoint to the debugger + * immediately after the kernel debugger has been initialized. + * + * If DB_PRT flag is set, then enable debugger printf. + */ + disableDebugOuput = TRUE; /* FIXME: override osfmk/i386/AT386/model_dep.c */ + + if (PE_parse_boot_arg("debug", &debug_arg)) { + if (debug_arg & DB_HALT) halt_in_debugger = 1; + if (debug_arg & DB_PRT) disableDebugOuput = FALSE; + } + } + + if (!vm_initialized) + { + /* Hack! FIXME.. */ + outb(0x21, 0xff); /* Maskout all interrupts Pic1 */ + outb(0xa1, 0xff); /* Maskout all interrupts Pic2 */ + + pe_identify_machine(args); + } + else + { + pe_init_debug(); + + PE_create_console(); + } +} + +void PE_create_console( void ) +{ + if ( (PE_fb_vaddr == 0) && (PE_state.video.v_baseAddr != 0) ) + { + PE_fb_vaddr = mapframebuffer((caddr_t) PE_state.video.v_baseAddr, + (PE_fb_mode == TEXT_MODE) ? + /* text mode */ PE_state.video.v_rowBytes : + /* grfx mode */ PE_state.video.v_rowBytes * + PE_state.video.v_height); + } + + if (PE_state.video.v_display) + PE_initialize_console( &PE_state.video, kPEGraphicsMode ); + else + PE_initialize_console( &PE_state.video, kPETextMode ); +} + +int PE_current_console( PE_Video * info ) +{ + *info = PE_state.video; + + if ( PE_fb_mode == TEXT_MODE ) + { + /* + * FIXME: Prevent the IOBootFrameBuffer from starting up + * when we are in Text mode. + */ + info->v_baseAddr = 0; + } + + return (0); +} + +void PE_display_icon( unsigned int flags, + const char * name ) +{ +} + +extern boolean_t PE_get_hotkey( unsigned char key ) +{ + return (FALSE); +} + +static timebase_callback_func gTimebaseCallback; + +void PE_register_timebase_callback(timebase_callback_func callback) +{ + gTimebaseCallback = callback; + + PE_call_timebase_callback(); +} + +void PE_call_timebase_callback(void) +{ + struct timebase_freq_t timebase_freq; + unsigned long num, den, cnt; + + num = gPEClockFrequencyInfo.bus_clock_rate_num * gPEClockFrequencyInfo.bus_to_dec_rate_num; + den = gPEClockFrequencyInfo.bus_clock_rate_den * gPEClockFrequencyInfo.bus_to_dec_rate_den; + + cnt = 2; + while (cnt <= den) { + if ((num % cnt) || (den % cnt)) { + cnt++; + continue; + } + + num /= cnt; + den /= cnt; + } + + timebase_freq.timebase_num = num; + timebase_freq.timebase_den = den; + + if (gTimebaseCallback) gTimebaseCallback(&timebase_freq); +} + +/* + * map the framebuffer into kernel vm and return the (virtual) + * address. + */ +static vm_offset_t +mapframebuffer( caddr_t physaddr, /* start of framebuffer */ + int length) /* num bytes to map */ +{ + vm_offset_t vmaddr; + + if (physaddr != (caddr_t)trunc_page(physaddr)) + panic("Framebuffer not on page boundary"); + + vmaddr = io_map((vm_offset_t)physaddr, length); + if (vmaddr == 0) + panic("can't alloc VM for framebuffer"); + + return vmaddr; +} diff --git a/pexpert/i386/pe_interrupt.c b/pexpert/i386/pe_interrupt.c new file mode 100644 index 000000000..533bf485c --- /dev/null +++ b/pexpert/i386/pe_interrupt.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include + +struct i386_interrupt_handler { + IOInterruptHandler handler; + void *nub; + void *target; + void *refCon; +}; + +typedef struct i386_interrupt_handler i386_interrupt_handler_t; + +i386_interrupt_handler_t PE_interrupt_handler; + +void PE_platform_interrupt_initialize(void) +{ +} + +void +PE_incoming_interrupt(int interrupt, struct i386_saved_state *ssp) +{ + boolean_t save_int; + + i386_interrupt_handler_t *vector; + + vector = &PE_interrupt_handler; + + save_int = ml_set_interrupts_enabled(FALSE); + vector->handler(vector->target, vector->refCon, vector->nub, interrupt); + ml_set_interrupts_enabled(save_int); +} + +void PE_install_interrupt_handler(void *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon) +{ + i386_interrupt_handler_t *vector; + + vector = &PE_interrupt_handler; + + /*vector->source = source; IGNORED */ + vector->handler = handler; + vector->nub = nub; + vector->target = target; + vector->refCon = refCon; +} diff --git a/pexpert/i386/pe_kprintf.c b/pexpert/i386/pe_kprintf.c new file mode 100644 index 000000000..e7601e780 --- /dev/null +++ b/pexpert/i386/pe_kprintf.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * file: pe_kprintf.c + * i386 platform expert debugging output initialization. + */ +#include +#include +#include + +/* extern references */ +extern void cnputc(char c); + +/* Globals */ +void (*PE_kputc)(char c) = 0; + +unsigned int disableSerialOuput = TRUE; + +void PE_init_kprintf(boolean_t vm_initialized) +{ + unsigned int boot_arg; + + if (PE_state.initialized == FALSE) + panic("Platform Expert not initialized"); + + if (!vm_initialized) + { + if (PE_parse_boot_arg("debug", &boot_arg)) + if (boot_arg & DB_KPRT) disableSerialOuput = FALSE; + + /* FIXME - route output to serial port. */ + PE_kputc = cnputc; + } +} + +void kprintf(const char *fmt, ...) +{ + va_list listp; + + if (!disableSerialOuput) { + va_start(listp, fmt); + _doprnt(fmt, &listp, PE_kputc, 16); + va_end(listp); + } +} diff --git a/pexpert/i386/pe_misc.s b/pexpert/i386/pe_misc.s new file mode 100644 index 000000000..0759794a4 --- /dev/null +++ b/pexpert/i386/pe_misc.s @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +/* +** PE_get_timebase() +** +** Entry - %esp contains pointer to 64 bit structure. +** +** Exit - 64 bit structure filled in. +** +*/ +ENTRY(PE_get_timebase) + + movl S_ARG0, %ecx + + rdtsc + + movl %edx, 0(%ecx) + movl %eax, 4(%ecx) + + ret + diff --git a/pexpert/i386/pe_spl.c b/pexpert/i386/pe_spl.c new file mode 100644 index 000000000..3e247bc56 --- /dev/null +++ b/pexpert/i386/pe_spl.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + + +typedef unsigned long spl_t; + +spl_t PE_set_spl(spl_t x); + +spl_t splhi() { return PE_set_spl(8); } +spl_t splhigh() { return PE_set_spl(8); } +spl_t splclock() { return PE_set_spl(8); } +spl_t splvm() { return PE_set_spl(8); } +spl_t splsched() { return PE_set_spl(8); } +spl_t splimp() { return PE_set_spl(6); } +void splx(spl_t x) { (void) PE_set_spl(x); } +spl_t splnet() { return PE_set_spl(6); } +void spllo() { (void) PE_set_spl(0); } +spl_t spl1() { return PE_set_spl(1); } +spl_t spl2() { return PE_set_spl(2); } +spl_t spl3() { return PE_set_spl(3); } +spl_t spl4() { return PE_set_spl(4); } +spl_t spl5() { return PE_set_spl(5); } +spl_t spl6() { return PE_set_spl(6); } +spl_t splbio() { return PE_set_spl(5); } +spl_t spltty() { return PE_set_spl(6); } + +spl_t sploff() { return PE_set_spl(8); } +void splon(spl_t x) { (void) PE_set_spl(x); } + +spl_t PE_set_spl(spl_t lvl) +{ + spl_t old_level; + int mycpu; + + + __asm__ volatile("cli"); + + mycpu = cpu_number(); + old_level = cpu_data[mycpu].spl_level; + cpu_data[mycpu].spl_level = lvl ; + + if (!lvl) __asm__ volatile("sti"); + + return old_level; +} + +void PE_set_spl_no_interrupt(spl_t lvl) +{ + int mycpu; + + __asm__ volatile("cli"); + + mycpu = cpu_number(); + cpu_data[mycpu].spl_level = lvl ; + + return; +} + diff --git a/pexpert/i386/text_console.c b/pexpert/i386/text_console.c new file mode 100644 index 000000000..e35d50a8d --- /dev/null +++ b/pexpert/i386/text_console.c @@ -0,0 +1,363 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * text_console.c + * + * VGA text console support. + */ + +#include +#include +#include +#include "video_console.h" + +/* + * Macros and typedefs. + */ +typedef short csrpos_t; /* cursor position, ONE_SPACE bytes per char */ + +#define ONE_SPACE 2 /* bytes per character */ +#define ONE_LINE (vga_cols * ONE_SPACE) /* number of bytes in line */ +#define ONE_PAGE (vga_rows * ONE_LINE) /* number of bytes in page */ +#define SPACE_CHAR 0x20 + +#define VGA_FB_START 0x0b8000 +#define VGA_FB_SIZE 0x8000 +#define VGA_IDX_REG 0x3d4 +#define VGA_IO_REG 0x3d5 + +/* + * Commands sent to graphics adapter. + */ +#define VGA_C_LOW 0x0f /* return low byte of cursor addr */ +#define VGA_C_HIGH 0x0e /* high byte */ + +/* + * Attributes for character sent to display. + */ +#define VGA_ATTR_NORMAL 0x07 +#define VGA_ATTR_REVERSE 0x70 + +/* + * Convert from XY coordinate to a location in display memory. + */ +#define XY_TO_CSRPOS(x, y) (((y) * vga_cols + (x)) * ONE_SPACE) + +/* + * Globals. + */ +static short vga_idx_reg = 0; /* location of VGA index register */ +static short vga_io_reg = 0; /* location of VGA data register */ +static short vga_cols = 80; /* number of columns */ +static short vga_rows = 25; /* number of rows */ +static char vga_attr = 0; /* current character attribute */ +static char vga_attr_rev = 0; /* current reverse attribute */ +static char * vram_start = 0; /* VM start of VGA frame buffer */ + +/* + * Functions in kdasm.s. + */ +extern void kd_slmwd(u_char * pos, int count, u_short val); +extern void kd_slmscu(u_char * from, u_char * to, int count); +extern void kd_slmscd(u_char * from, u_char * to, int count); + +/* + * move_up + * + * Block move up for VGA. + */ +static void +move_up( csrpos_t from, + csrpos_t to, + int count) +{ + kd_slmscu( vram_start + from, vram_start + to, count ); +} + +/* + * move_down + * + * Block move down for VGA. + */ +static void +move_down( csrpos_t from, + csrpos_t to, + int count ) +{ + kd_slmscd( vram_start + from, vram_start + to, count ); +} + +/* + * clear_block + * + * Fast clear for VGA. + */ +static void +clear_block( csrpos_t start, + int size, + char attr) +{ + kd_slmwd( vram_start + start, size, + ((unsigned short) attr << 8) + SPACE_CHAR); +} + +/* + * set_cursor_position + * + * This function sets the hardware cursor position + * on the screen. + */ +static void +set_cursor_position( csrpos_t newpos ) +{ + short curpos; /* position, not scaled for attribute byte */ + + curpos = newpos / ONE_SPACE; + + outb(vga_idx_reg, VGA_C_HIGH); + outb(vga_io_reg, (u_char)(curpos >> 8)); + + outb(vga_idx_reg, VGA_C_LOW); + outb(vga_io_reg, (u_char)(curpos & 0xff)); +} + +/* + * display_char + * + * Display attributed character for VGA (mode 3). + */ +static void +display_char( csrpos_t pos, /* where to put it */ + char ch, /* the character */ + char attr ) /* its attribute */ +{ + *(vram_start + pos) = ch; + *(vram_start + pos + 1) = attr; +} + +/* + * vga_init + * + * Initialize the VGA text console. + */ +static void +vga_init(int cols, int rows, unsigned char * addr) +{ + vram_start = addr; + vga_idx_reg = VGA_IDX_REG; + vga_io_reg = VGA_IO_REG; + vga_rows = rows; + vga_cols = cols; + vga_attr = VGA_ATTR_NORMAL; + vga_attr_rev = VGA_ATTR_REVERSE; + + set_cursor_position(0); +} + +/* + * tc_scrollup + * + * Scroll the screen up 'n' character lines. + */ +void +tc_scrollup( int lines ) +{ + csrpos_t to; + csrpos_t from; + int size; + + /* scroll up */ + to = 0; + from = ONE_LINE * lines; + size = ( ONE_PAGE - ( ONE_LINE * lines ) ) / ONE_SPACE; + move_up(from, to, size); + + /* clear bottom line */ + to = ( ( vga_rows - lines) * ONE_LINE ); + size = ( ONE_LINE * lines ) / ONE_SPACE; + clear_block(to, size, vga_attr); +} + +/* + * tc_scrolldown + * + * Scrolls the screen down 'n' character lines. + */ +void +tc_scrolldown( int lines ) +{ + csrpos_t to; + csrpos_t from; + int size; + + /* move down */ + to = ONE_PAGE - ONE_SPACE; + from = ONE_PAGE - ( ONE_LINE * lines ) - ONE_SPACE; + size = ( ONE_PAGE - ( ONE_LINE * lines ) ) / ONE_SPACE; + move_down(from, to, size); + + /* clear top line */ + to = 0; + size = ( ONE_LINE * lines ) / ONE_SPACE; + clear_block(to, size, vga_attr); +} + +/* Default colors for 16-color palette */ +enum { + kVGAColorBlack = 0, + kVGAColorBlue, + kVGAColorGreen, + kVGAColorCyan, + kVGAColorRed, + kVGAColorMagenta, + kVGAColorBrown, + kVGAColorWhite, + kVGAColorGray, + kVGAColorLightBlue, + kVGAColorLightGreen, + kVGAColorLightCyan, + kVGAColorLightRed, + kVGAColorLightMagenta, + kVGAColorLightBrown, + kVGAColorBrightWhite +}; + +/* + * tc_update_color + * + * Update the foreground / background color. + */ +void +tc_update_color( int color, int fore ) +{ + unsigned char mask_on, mask_off; + + switch ( color ) + { + case 1: mask_on = kVGAColorRed; break; + case 3: mask_on = kVGAColorLightBrown; break; + case 4: mask_on = kVGAColorBlue; break; + case 6: mask_on = kVGAColorCyan; break; + default: mask_on = color; break; + } + + if ( fore ) + { + mask_off = 0x0f; + } + else + { + mask_off = 0xf0; + mask_on <<= 4; + } + + vga_attr = (vga_attr & ~mask_off) | mask_on; + + vga_attr_rev = ( ((vga_attr << 4) & 0xf0) | + ((vga_attr >> 4) & 0x0f) ); +} + +/* + * tc_show_cursor + * + * Show the hardware cursor. + */ +void +tc_show_cursor( int x, int y ) +{ + set_cursor_position( XY_TO_CSRPOS(x, y) ); +} + +/* + * tc_hide_cursor + * + * Hide the hardware cursor. + */ +void +tc_hide_cursor( int x, int y ) +{ + return; +} + +/* + * tc_clear_screen + * + * Clear the entire screen, or a portion of the screen + * relative to the current cursor position. + */ +void +tc_clear_screen(int x, int y, int operation) +{ + csrpos_t start; + int count; + + switch ( operation ) + { + case 0: /* To end of screen */ + start = XY_TO_CSRPOS(x, y); + count = ONE_PAGE - start; + break; + case 1: /* To start of screen */ + start = 0; + count = XY_TO_CSRPOS(x, y) + ONE_SPACE; + break; + default: + case 2: /* Whole screen */ + start = 0; + count = ONE_PAGE; + break; + } + clear_block(start, count, vga_attr); +} + +/* + * tc_putchar + * + * Display a character on screen with the given coordinates, + * and attributes. + */ +void +tc_putchar( unsigned char ch, int x, int y, int attrs ) +{ + char my_attr = vga_attr; + + if ( attrs & 4 ) my_attr = vga_attr_rev; + + display_char( XY_TO_CSRPOS(x, y), ch, vga_attr ); +} + +/* + * tc_initialize + * + * Must be called before any other exported functions. + */ +void +tc_initialize(struct vc_info * vinfo_p) +{ + vinfo_p->v_rows = vinfo_p->v_height; + vinfo_p->v_columns = vinfo_p->v_width; + + vga_init( vinfo_p->v_columns, + vinfo_p->v_rows, + (unsigned char *) vinfo_p->v_baseaddr); +} diff --git a/pexpert/i386/video_console.h b/pexpert/i386/video_console.h new file mode 100644 index 000000000..ccf059eff --- /dev/null +++ b/pexpert/i386/video_console.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __PEXPERT_VIDEO_CONSOLE_H +#define __PEXPERT_VIDEO_CONSOLE_H + +/* + * Video console properties. + */ +struct vc_info { + unsigned long v_height; /* pixels */ + unsigned long v_width; /* pixels */ + unsigned long v_depth; + unsigned long v_rowbytes; + unsigned long v_baseaddr; + unsigned long v_type; + char v_name[32]; + unsigned long v_physaddr; + unsigned long v_rows; /* characters */ + unsigned long v_columns; /* characters */ + unsigned long v_rowscanbytes; /* Actualy number of bytes used for display per row */ + unsigned long v_reserved[5]; +}; + +/* + * From text_console.c + */ +extern void tc_putchar(unsigned char ch, int x, int y, int attrs); +extern void tc_scrolldown(int lines); +extern void tc_scrollup(int lines); +extern void tc_clear_screen(int x, int y, int operation); +extern void tc_show_cursor(int x, int y); +extern void tc_hide_cursor(int x, int y); +extern void tc_initialize(struct vc_info * vinfo_p); +extern void tc_update_color(int color, int fore); + +#endif /* !__PEXPERT_VIDEO_CONSOLE_H */ diff --git a/pexpert/pexpert/Makefile b/pexpert/pexpert/Makefile new file mode 100644 index 000000000..553ecfce5 --- /dev/null +++ b/pexpert/pexpert/Makefile @@ -0,0 +1,42 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +INSTINC_SUBDIRS = \ + machine + +INSTINC_SUBDIRS_PPC = \ + ppc + +INSTINC_SUBDIRS_I386 = \ + i386 + +EXPINC_SUBDIRS = ${INSTINC_SUBDIRS} + +EXPINC_SUBDIRS_PPC = ${INSTINC_SUBDIRS_PPC} + +EXPINC_SUBDIRS_I386 = ${INSTINC_SUBDIRS_I386} + +DATAFILES = \ + boot.h \ + protos.h \ + pexpert.h + +INSTALL_MI_LIST = ${DATAFILES} + +INSTALL_MI_DIR = pexpert + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = pexpert + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/pexpert/boot.h b/pexpert/pexpert/boot.h new file mode 100644 index 000000000..d703fb7c6 --- /dev/null +++ b/pexpert/pexpert/boot.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_BOOT_H_ +#define _PEXPERT_BOOT_H_ + +#include + +#endif /* _PEXPERT_BOOT_H_ */ diff --git a/pexpert/pexpert/device_tree.h b/pexpert/pexpert/device_tree.h new file mode 100644 index 000000000..8a124e529 --- /dev/null +++ b/pexpert/pexpert/device_tree.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_DEVICE_TREE_H_ +#define _PEXPERT_DEVICE_TREE_H_ + + +/* +------------------------------------------------------------------------------- + Foundation Types +------------------------------------------------------------------------------- +*/ +enum { + kDTPathNameSeparator = '/' /* 0x2F */ +}; + + +/* Property Name Definitions (Property Names are C-Strings)*/ +enum { + kDTMaxPropertyNameLength=31 /* Max length of Property Name (terminator not included) */ +}; + +typedef char DTPropertyNameBuf[32]; + + +/* Entry Name Definitions (Entry Names are C-Strings)*/ +enum { + kDTMaxEntryNameLength = 31 /* Max length of a C-String Entry Name (terminator not included) */ +}; + +/* length of DTEntryNameBuf = kDTMaxEntryNameLength +1*/ +typedef char DTEntryNameBuf[32]; + + +/* Entry*/ +typedef struct OpaqueDTEntry* DTEntry; + +/* Entry Iterator*/ +typedef struct OpaqueDTEntryIterator* DTEntryIterator; + +/* Property Iterator*/ +typedef struct OpaqueDTPropertyIterator* DTPropertyIterator; + + +/* status values*/ +enum { + kError = -1, + kIterationDone = 0, + kSuccess = 1 +}; + +/* + +Structures for a Flattened Device Tree + */ + +#define kPropNameLength 32 + +typedef struct DeviceTreeNodeProperty { + char name[kPropNameLength]; // NUL terminated property name + unsigned long length; // Length (bytes) of folloing prop value +// unsigned long value[1]; // Variable length value of property + // Padded to a multiple of a longword? +} DeviceTreeNodeProperty; + +typedef struct OpaqueDTEntry { + unsigned long nProperties; // Number of props[] elements (0 => end) + unsigned long nChildren; // Number of children[] elements +// DeviceTreeNodeProperty props[];// array size == nProperties +// DeviceTreeNode children[]; // array size == nChildren +} DeviceTreeNode; + + +#ifndef __MWERKS__ +/* +------------------------------------------------------------------------------- + Device Tree Calls +------------------------------------------------------------------------------- +*/ + +/* Used to initalize the device tree functions. */ +/* base is the base address of the flatened device tree */ +void DTInit(void *base); + +/* +------------------------------------------------------------------------------- + Entry Handling +------------------------------------------------------------------------------- +*/ +/* Compare two Entry's for equality. */ +extern int DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2); + +/* +------------------------------------------------------------------------------- + LookUp Entry by Name +------------------------------------------------------------------------------- +*/ +/* + DTFindEntry: + Find the device tree entry that contains propName=propValue. + It currently searches the entire + tree. This function should eventually go in DeviceTree.c. + Returns: kSuccess = entry was found. Entry is in entryH. + kError = entry was not found +*/ +extern int DTFindEntry(const char *propName, const char *propValue, DTEntry *entryH); + +/* + Lookup Entry + Locates an entry given a specified subroot (searchPoint) and path name. If the + searchPoint pointer is NULL, the path name is assumed to be an absolute path + name rooted to the root of the device tree. +*/ +extern int DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry); + +/* +------------------------------------------------------------------------------- + Entry Iteration +------------------------------------------------------------------------------- +*/ +/* + An Entry Iterator maintains three variables that are of interest to clients. + First is an "OutermostScope" which defines the outer boundry of the iteration. + This is defined by the starting entry and includes that entry plus all of it's + embedded entries. Second is a "currentScope" which is the entry the iterator is + currently in. And third is a "currentPosition" which is the last entry returned + during an iteration. + + Create Entry Iterator + Create the iterator structure. The outermostScope and currentScope of the iterator + are set to "startEntry". If "startEntry" = NULL, the outermostScope and + currentScope are set to the root entry. The currentPosition for the iterator is + set to "nil". +*/ +extern int DTCreateEntryIterator(const DTEntry startEntry, DTEntryIterator *iterator); + +/* Dispose Entry Iterator*/ +extern int DTDisposeEntryIterator(DTEntryIterator iterator); + +/* + Enter Child Entry + Move an Entry Iterator into the scope of a specified child entry. The + currentScope of the iterator is set to the entry specified in "childEntry". If + "childEntry" is nil, the currentScope is set to the entry specified by the + currentPosition of the iterator. +*/ +extern int DTEnterEntry(DTEntryIterator iterator, DTEntry childEntry); + +/* + Exit to Parent Entry + Move an Entry Iterator out of the current entry back into the scope of it's parent + entry. The currentPosition of the iterator is reset to the current entry (the + previous currentScope), so the next iteration call will continue where it left off. + This position is returned in parameter "currentPosition". +*/ +extern int DTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition); + +/* + Iterate Entries + Iterate and return entries contained within the entry defined by the current + scope of the iterator. Entries are returned one at a time. When + int == kIterationDone, all entries have been exhausted, and the + value of nextEntry will be Nil. +*/ +extern int DTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry); + +/* + Restart Entry Iteration + Restart an iteration within the current scope. The iterator is reset such that + iteration of the contents of the currentScope entry can be restarted. The + outermostScope and currentScope of the iterator are unchanged. The currentPosition + for the iterator is set to "nil". +*/ +extern int DTRestartEntryIteration(DTEntryIterator iterator); + +/* +------------------------------------------------------------------------------- + Get Property Values +------------------------------------------------------------------------------- +*/ +/* + Get the value of the specified property for the specified entry. + + Get Property +*/ +extern int DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValue, int *propertySize); + +/* +------------------------------------------------------------------------------- + Iterating Properties +------------------------------------------------------------------------------- +*/ +/* + Create Property Iterator + Create the property iterator structure. The target entry is defined by entry. +*/ + +extern int DTCreatePropertyIterator(const DTEntry entry, + DTPropertyIterator *iterator); + +/* Dispose Property Iterator*/ +extern int DTDisposePropertyIterator(DTPropertyIterator iterator); + +/* + Iterate Properites + Iterate and return properties for given entry. + When int == kIterationDone, all properties have been exhausted. +*/ + +extern int DTIterateProperties(DTPropertyIterator iterator, + char **foundProperty); + +/* + Restart Property Iteration + Used to re-iterate over a list of properties. The Property Iterator is + reset to the beginning of the list of properties for an entry. +*/ + +extern int DTRestartPropertyIteration(DTPropertyIterator iterator); + + +#endif /* __MWERKS__ */ + +#endif /* _PEXPERT_DEVICE_TREE_H_ */ diff --git a/pexpert/pexpert/i386/Makefile b/pexpert/pexpert/i386/Makefile new file mode 100644 index 000000000..2b6faf73e --- /dev/null +++ b/pexpert/pexpert/i386/Makefile @@ -0,0 +1,27 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + boot.h \ + fb_entries.h \ + protos.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = pexpert/i386 + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = pexpert/i386 + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/pexpert/i386/boot.h b/pexpert/pexpert/i386/boot.h new file mode 100644 index 000000000..80d6adde1 --- /dev/null +++ b/pexpert/pexpert/i386/boot.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_I386_BOOT_H +#define _PEXPERT_I386_BOOT_H + +/* + * What the booter leaves behind for the kernel. + */ + +/* The config table has room for 13 drivers if their config files + * are the maximum size allowed. + */ +#define CONFIG_SIZE (13 * 4096) + +/* Maximum number of boot drivers supported, assuming their + * config files fit in the bootstruct. + */ +#define NDRIVERS 64 + +typedef struct { + char *address; // address where driver was loaded + int size; // entry point for driver +} driver_config_t; + +typedef struct { + unsigned short major_vers; // == 0 if not present + unsigned short minor_vers; + unsigned long cs32_base; + unsigned long cs16_base; + unsigned long ds_base; + unsigned long cs_length; + unsigned long ds_length; + unsigned long entry_offset; + union { + struct { + unsigned long mode_16 :1; + unsigned long mode_32 :1; + unsigned long idle_slows_cpu :1; + unsigned long reserved :29; + } f; + unsigned long data; + } flags; + unsigned long connected; +} APM_config_t; + +typedef struct _EISA_slot_info_t { + union { + struct { + unsigned char duplicateID :4; + unsigned char slotType :1; + unsigned char prodIDPresent :1; + unsigned char dupIDPresent :1; + } s; + unsigned char d; + } u_ID; + unsigned char configMajor; + unsigned char configMinor; + unsigned short checksum; + unsigned char numFunctions; + union { + struct { + unsigned char fnTypesPresent :1; + unsigned char memoryPresent :1; + unsigned char irqPresent :1; + unsigned char dmaPresent :1; + unsigned char portRangePresent:1; + unsigned char portInitPresent :1; + unsigned char freeFormPresent :1; + unsigned char reserved:1; + } s; + unsigned char d; + } u_resources; + unsigned char id[8]; +} EISA_slot_info_t; + +typedef struct _EISA_func_info_t { + unsigned char slot; + unsigned char function; + unsigned char reserved[2]; + unsigned char data[320]; +} EISA_func_info_t; + +#define NUM_EISA_SLOTS 64 + +typedef struct _PCI_bus_info_t { + union { + struct { + unsigned char configMethod1 :1; + unsigned char configMethod2 :1; + unsigned char :2; + unsigned char specialCycle1 :1; + unsigned char specialCycle2 :1; + } s; + unsigned char d; + } u_bus; + unsigned char maxBusNum; + unsigned char majorVersion; + unsigned char minorVersion; + unsigned char BIOSPresent; +} PCI_bus_info_t; + +/* + * Video information.. + */ + +struct boot_video { + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_display; /* Display Code (if Applicable */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth */ +}; + +typedef struct boot_video boot_video; + +#define BOOT_STRING_LEN 160 + +typedef struct { + short version; + char bootString[BOOT_STRING_LEN];// string we booted with + int magicCookie; // KERNBOOTMAGIC if struct valid + int numIDEs; // how many IDE drives + int rootdev; // booters guess as to rootdev + int convmem; // conventional memory + int extmem; // extended memory + char boot_file[128]; // name of the kernel we booted + int first_addr0; // first address for kern convmem + int diskInfo[4]; // bios info for bios dev 80-83 + int graphicsMode; // did we boot in graphics mode? + int kernDev; // device kernel was fetched from + int numBootDrivers; // number of drivers loaded by booter + char *configEnd; // pointer to end of config files + int kaddr; // kernel load address + int ksize; // size of kernel + void *rld_entry; // entry point for standalone rld + + driver_config_t driverConfig[NDRIVERS]; + APM_config_t apm_config; + + char _reserved[7500]; + + boot_video video; + + PCI_bus_info_t pciInfo; + + int eisaConfigFunctions; + EISA_slot_info_t eisaSlotInfo[NUM_EISA_SLOTS];// EISA slot information + + char config[CONFIG_SIZE]; // the config file contents +} KERNBOOTSTRUCT; + +#define GRAPHICS_MODE 1 +#define TEXT_MODE 0 + +#define KERNSTRUCT_ADDR ((KERNBOOTSTRUCT *)0x11000) +#define KERNBOOTMAGIC 0xa7a7a7a7 + +#ifndef EISA_CONFIG_ADDR +#define EISA_CONFIG_ADDR 0x20000 +#define EISA_CONFIG_LEN 0x10000 +#endif + +#ifndef KERNEL +extern KERNBOOTSTRUCT *kernBootStruct; +#endif + +#define BOOT_LINE_LENGTH 256 + +/* + * Video information.. + */ + +struct Boot_Video { + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_display; /* Display Code (if Applicable */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth */ +}; + +typedef struct Boot_Video Boot_Video; + +/* DRAM Bank definitions - describes physical memory layout. + */ +#define kMaxDRAMBanks 26 /* maximum number of DRAM banks */ + +struct DRAMBank +{ + unsigned long base; /* physical base of DRAM bank */ + unsigned long size; /* size of bank */ +}; +typedef struct DRAMBank DRAMBank; + + +/* Boot argument structure - passed into Mach kernel at boot time. + */ +#define kBootArgsVersion 1 +#define kBootArgsRevision 1 + +typedef struct boot_args { + unsigned short Revision; /* Revision of boot_args structure */ + unsigned short Version; /* Version of boot_args structure */ + char CommandLine[BOOT_LINE_LENGTH]; /* Passed in command line */ + DRAMBank PhysicalDRAM[kMaxDRAMBanks]; /* base and range pairs for the 26 DRAM banks */ + Boot_Video Video; /* Video Information */ + unsigned long machineType; /* Machine Type (gestalt) */ + void *deviceTreeP; /* Base of flattened device tree */ + unsigned long deviceTreeLength;/* Length of flattened tree */ + unsigned long topOfKernelData;/* Highest address used in kernel data area */ +} boot_args; + +extern boot_args passed_args; + +#endif /* _PEXPERT_I386_BOOT_H */ + diff --git a/pexpert/pexpert/i386/fb_entries.h b/pexpert/pexpert/i386/fb_entries.h new file mode 100644 index 000000000..b4dc94132 --- /dev/null +++ b/pexpert/pexpert/i386/fb_entries.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _I386_FD_ENTRIES_H_ +#define _I386_FD_ENTRIES_H_ + +#include + +/* test for presence of linear framebuffer */ +extern boolean_t fb_present(void); +/* initialize framebuffer */ +extern void fb_init(void); +/* prepare for reboot */ +extern void fb_reset(void); + +#endif /* _I386_FD_ENTRIES_H_ */ diff --git a/pexpert/pexpert/i386/kd_entries.h b/pexpert/pexpert/i386/kd_entries.h new file mode 100644 index 000000000..f3b328a77 --- /dev/null +++ b/pexpert/pexpert/i386/kd_entries.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_I386_KD_ENTRIES_H_ +#define _PEXPERT_I386_KD_ENTRIES_H_ +/* + * @OSF_COPYRIGHT@ + */ +typedef int io_return_t; +typedef unsigned int dev_mode_t; +typedef unsigned int dev_flavor_t; +typedef int *dev_status_t; + +extern io_return_t kdopen( + dev_t dev, + dev_mode_t flag, + io_req_t ior); +extern void kdclose( + dev_t dev); +extern io_return_t kdread( + dev_t dev, + io_req_t ior); +extern io_return_t kdwrite( + dev_t dev, + io_req_t ior); +extern vm_offset_t kdmmap( + dev_t dev, + vm_offset_t off, + vm_prot_t prot); +extern boolean_t kdportdeath( + dev_t dev, + ipc_port_t port); +extern io_return_t kdgetstat( + dev_t dev, + dev_flavor_t flavor, + dev_status_t data, + natural_t *count); +extern io_return_t kdsetstat( + dev_t dev, + dev_flavor_t flavor, + dev_status_t data, + natural_t count); +extern void kd_cmdreg_write( + u_char val); +extern int kd_mouse_write( + u_char val); +extern void kd_mouse_read( + int no, + char * bufp); +extern void kd_mouse_drain(void); +extern void kdreboot(void); +extern void bmpput( + csrpos_t pos, + char ch, + char chattr); +extern void bmpmvup( + csrpos_t from, + csrpos_t to, + int count); +extern void bmpmvdown( + csrpos_t from, + csrpos_t to, + int count); +extern void bmpclear( + csrpos_t to, + int count, + char chattr); +extern void bmpsetsetcursor( + csrpos_t pos); +extern void kd_slmscu( + u_char * from, + u_char * to, + int count); +extern void kd_slmscd( + u_char * from, + u_char * to, + int count); +extern void kd_slmwd( + u_char * pos, + int count, + u_short val); +extern void kd_sendcmd( + u_char c); + +#endif /* _PEXPERT_POWERMAC_PDM_H_ */ diff --git a/pexpert/pexpert/i386/kdsoft.h b/pexpert/pexpert/i386/kdsoft.h new file mode 100644 index 000000000..2cdfd0aa9 --- /dev/null +++ b/pexpert/pexpert/i386/kdsoft.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_I386_KDSOFT_H_ +#define _PEXPERT_I386_KDSOFT_H_ + +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ + +/* + * File: kdsoft.h + * Description: Software structures for keyboard/display driver, shared with + * drivers for specific graphics cards. + * + * $ Header: $ + * + * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989. + * All rights reserved. + * + * Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc., + * Cupertino, California. + * + * All Rights Reserved + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that the above copyright notice appears in all + * copies and that both the copyright notice and this permission notice + * appear in supporting documentation, and that the name of Olivetti + * not be used in advertising or publicity pertaining to distribution + * of the software without specific, written prior permission. + * + * OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, + * IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM + * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, + * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * This driver handles two types of graphics cards. The first type + * (e.g., EGA, CGA), treats the screen as a page of characters and + * has a hardware cursor. The second type (e.g., the Blit) treats the + * screen as a bitmap. A hardware cursor may be present, but it is + * ignored in favor of a software cursor. + * + * + * Most of the driver uses the following abstraction for the display: + * + * The cursor position is simply an index into a (logical) linear char + * array that wraps around at the end of each line. Each character + * takes up ONE_SPACE bytes. Values in [0..ONE_PAGE) are positions in + * the displayed page. Values < 0 and >= ONE_PAGE are off the page + * and require some scrolling to put the cursor back on the page. + * + * The kd_dxxx routines handle the conversion from this abstraction to + * what the hardware requires. + * + * (*kd_dput)(pos, ch, chattr) + * csrpos_t pos; + * char ch, chattr; + * Displays a character at "pos", where "ch" = the character to + * be displayed and "chattr" is its attribute byte. + * + * (*kd_dmvup)(from, to, count) + * csrpos_t from, to; + * int count; + * Does a (relatively) fast block transfer of characters upward. + * "count" is the number of character positions (not bytes) to move. + * "from" is the character position to start moving from (at the start + * of the block to be moved). "to" is the character position to start + * moving to. + * + * (*kd_dmvdown)(from, to, count) + * csrpos_t from, to; + * int count; + * "count" is the number of character positions (not bytes) to move. + * "from" is the character position to start moving from (at the end + * of the block to be moved). "to" is the character position to + * start moving to. + * + * (*kd_dclear)(to, count, chattr) + * csrpos_t, to; + * int count; + * char chattr; + * Erases "count" character positions, starting with "to". + * + * (*kd_dsetcursor)(pos) + * Sets kd_curpos and moves the displayed cursor to track it. "pos" + * should be in the range [0..ONE_PAGE). + * + * (*kd_dreset)() + * In some cases, the boot program expects the display to be in a + * particular state, and doing a soft reset (i.e., + * software-controlled reboot) doesn't put it into that state. For + * these cases, the machine-specific driver should provide a "reset" + * procedure, which will be called just before the kd code causes the + * system to reboot. + */ + +//ERICHACK#include + +/* + * Globals used for both character-based controllers and bitmap-based + * controllers. + */ + +typedef short csrpos_t; /* cursor position, ONE_SPACE bytes per char */ +extern u_char *vid_start; /* VM start of video RAM or frame buffer */ +extern csrpos_t kd_curpos; /* should be set only by kd_setpos */ +extern short kd_lines; /* num lines in tty display */ +extern short kd_cols; +extern char kd_attr; /* current character attribute */ + + +/* + * Globals used only for bitmap-based controllers. + * XXX - probably needs reworking for color. + */ + +/* + * The following font layout is assumed: + * + * The top scan line of all the characters comes first. Then the + * second scan line, then the third, etc. + * + * ------ ... ---------|-----N--------|-------------- ... ----------- + * ------ ... ---------|-----N--------|-------------- ... ----------- + * . + * . + * . + * ------ ... ---------|-----N--------|-------------- ... ----------- + * + * In the picture, each line is a scan line from the font. Each scan + * line is stored in memory immediately after the previous one. The + * bits between the vertical lines are the bits for a single character + * (e.g., the letter "N"). + * There are "char_height" scan lines. Each character is "char_width" + * bits wide. We make the simplifying assumption that characters are + * on byte boundaries. (We also assume that a byte is 8 bits.) + */ + +extern u_char *font_start; /* starting addr of font */ + +extern short fb_width; /* bits in frame buffer scan line */ +extern short fb_height; /* scan lines in frame buffer*/ +extern short char_width; /* bit width of 1 char */ +extern short char_height; /* bit height of 1 char */ +extern short chars_in_font; +extern short cursor_height; /* bit height of cursor */ + /* char_height + cursor_height = line_height */ + +extern u_char char_black; /* 8 black (off) bits */ +extern u_char char_white; /* 8 white (on) bits */ + + +/* + * The tty emulation does not usually require the entire frame buffer. + * (xstart, ystart) is the bit address for the upper left corner of the + * tty "screen". + */ + +extern short xstart, ystart; + + +/* + * Accelerators for bitmap displays. + */ + +extern short char_byte_width; /* char_width/8 */ +extern short fb_byte_width; /* fb_width/8 */ +extern short font_byte_width; /* num bytes in 1 scan line of font */ + +extern void bmpput( + csrpos_t pos, + char ch, + char chattr); +extern void bmpmvup( + csrpos_t from, + csrpos_t to, + int count); +extern void bmpmvdown( + csrpos_t from, + csrpos_t to, + int count); +extern void bmpclear( + csrpos_t to, + int count, + char chattr); +extern void bmpsetcursor( + csrpos_t pos); + +extern void (*kd_dput)( /* put attributed char */ + csrpos_t pos, + char ch, + char chattr); +extern void (*kd_dmvup)( /* block move up */ + csrpos_t from, + csrpos_t to, + int count); +extern void (*kd_dmvdown)( /* block move down */ + csrpos_t from, + csrpos_t to, + int count); +extern void (*kd_dclear)( /* block clear */ + csrpos_t to, + int count, + char chattr); +extern void (*kd_dsetcursor)( + /* set cursor position on displayed page */ + csrpos_t pos); +extern void (*kd_dreset)(void); /* prepare for reboot */ + + +#include + +extern void kdintr( + int vec, + int regs); + +#endif /* _PEXPERT_I386_KDSOFT_H_ */ diff --git a/pexpert/pexpert/i386/protos.h b/pexpert/pexpert/i386/protos.h new file mode 100644 index 000000000..cdb4d4583 --- /dev/null +++ b/pexpert/pexpert/i386/protos.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_I386_PROTOS_H +#define _PEXPERT_I386_PROTOS_H + +//------------------------------------------------------------------------ +// x86 IN/OUT I/O inline functions. +// +// IN : inb, inw, inl +// IN(port) +// +// OUT: outb, outw, outl +// OUT(port, data) + +typedef unsigned short i386_ioport_t; + +#define __IN(s, u) \ +static __inline__ unsigned u \ +in##s(i386_ioport_t port) \ +{ \ + unsigned u data; \ + asm volatile ( \ + "in" #s " %1,%0" \ + : "=a" (data) \ + : "d" (port)); \ + return (data); \ +} + +#define __OUT(s, u) \ +static __inline__ void \ +out##s(i386_ioport_t port, unsigned u data) \ +{ \ + asm volatile ( \ + "out" #s " %1,%0" \ + : \ + : "d" (port), "a" (data)); \ +} + +__IN(b, char) +__IN(w, short) +__IN(l, long) + +__OUT(b, char) +__OUT(w, short) +__OUT(l, long) + +//------------------------------------------------------------------------ +// from bsd/dev/ppc/busses.h which clashes with mach/device/device_types.h +typedef int io_req_t; + + +// typedef struct ipc_port *ipc_port_t; + +extern void cninit(void); + +#endif /* _PEXPERT_I386_PROTOS_H */ diff --git a/pexpert/pexpert/machine/Makefile b/pexpert/pexpert/machine/Makefile new file mode 100644 index 000000000..ad76e611e --- /dev/null +++ b/pexpert/pexpert/machine/Makefile @@ -0,0 +1,26 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + boot.h \ + protos.h + +INSTALL_MI_LIST = + +INSTALL_MI_DIR = + +EXPORT_MI_LIST = ${DATAFILES} + +EXPORT_MI_DIR = pexpert/machine + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/pexpert/machine/boot.h b/pexpert/pexpert/machine/boot.h new file mode 100644 index 000000000..e6e841f65 --- /dev/null +++ b/pexpert/pexpert/machine/boot.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_MACHINE_BOOT_H +#define _PEXPERT_MACHINE_BOOT_H + + +#if defined (__ppc__) +#include "pexpert/ppc/boot.h" +#elif defined (__i386__) +#include "pexpert/i386/boot.h" +#else +#error architecture not supported +#endif + + +#endif /* _PEXPERT_MACHINE_BOOT_H */ + diff --git a/pexpert/pexpert/machine/protos.h b/pexpert/pexpert/machine/protos.h new file mode 100644 index 000000000..520daef69 --- /dev/null +++ b/pexpert/pexpert/machine/protos.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_MACHINE_PROTOS_H +#define _PEXPERT_MACHINE_PROTOS_H + + +#if defined (__ppc__) +#include "pexpert/ppc/protos.h" +#elif defined (__i386__) +#include "pexpert/i386/protos.h" +#else +#error architecture not supported +#endif + + +#endif /* _PEXPERT_MACHINE_PROTOS_H */ + diff --git a/pexpert/pexpert/pe_images.h b/pexpert/pexpert/pe_images.h new file mode 100644 index 000000000..d19ed3f27 --- /dev/null +++ b/pexpert/pexpert/pe_images.h @@ -0,0 +1,330 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +static unsigned char default_progress_data[16 * 16 * 3] = { + +#if 1 +// grey +#define TRANSPARENT 0xfa + 0xfa,0xfa,0xfa,0xfa,0x81,0xf8,0x2c,0x33,0x57,0x51,0xf9,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0x81,0xf8,0x33,0x3a,0x58,0x59,0x7d,0x58,0x7c,0x58,0x7b,0xfb,0xfa,0xfa, + 0xfa,0x81,0x2b,0x33,0x3a,0x34,0x5f,0x5e,0x59,0x7d,0x58,0x76,0x51,0x7b,0x81,0xfa, + 0xfa,0xf8,0x0f,0x39,0x33,0x3a,0x58,0x59,0x7d,0x58,0x76,0x51,0x75,0x75,0xfc,0xfa, + 0x81,0x0e,0x15,0x0f,0x15,0x34,0x3a,0x59,0x58,0x7c,0x52,0x75,0x51,0x6f,0x51,0xfb, + 0xf8,0x14,0x39,0x14,0x33,0x39,0x34,0x5d,0x58,0x52,0x75,0x6f,0x6f,0x4a,0x6f,0xfc, + 0x31,0x39,0x14,0x39,0x14,0x14,0xf9,0xfb,0xfa,0xfa,0x4b,0x4b,0x74,0x99,0x74,0xa6, + 0x32,0x3e,0x5d,0x3e,0x39,0x39,0x81,0xfa,0xfa,0xf9,0x9f,0x74,0x99,0x74,0x99,0xa5, + 0x38,0x3f,0x3e,0x63,0x3e,0x62,0xfa,0xfa,0x56,0xf8,0x9f,0x99,0x9e,0x99,0x98,0xa6, + 0xf8,0x68,0x63,0x68,0x68,0x69,0x80,0xf9,0x56,0xa5,0xc2,0xc2,0xc3,0x9e,0x9f,0xfc, + 0xf9,0x63,0x68,0x8d,0x68,0x86,0x61,0x7f,0x7f,0x9d,0xa4,0x9e,0xc2,0xc3,0xc2,0xfd, + 0xfb,0x62,0x8c,0x68,0x86,0x86,0x85,0x7e,0x78,0x7f,0x9d,0xa4,0xc3,0xc8,0xc9,0xfb, + 0xfa,0x81,0x68,0x86,0x62,0x61,0x5b,0x7e,0x5b,0x9d,0xa3,0x9e,0xa4,0xa4,0xfd,0xfb, + 0xfa,0xfa,0x81,0x86,0x85,0x61,0x7f,0x7e,0x78,0x7f,0x79,0xa3,0x9e,0xd0,0x81,0xfa, + 0xfa,0xfa,0xfa,0xfb,0x86,0x7f,0x5b,0x7e,0x5a,0x79,0x7f,0xa4,0xfd,0xfb,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfc,0xfc,0xab,0x80,0xa4,0xab,0xd0,0xfc,0xfa,0xfa,0xfa,0xfa, + + 0xfa,0xfa,0xfa,0xfa,0x81,0x2b,0x07,0x2c,0x2c,0x2c,0x56,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0x81,0x2b,0x08,0x09,0x09,0x2d,0x2e,0x52,0x58,0x7b,0xfa,0x81,0xfa,0xfa, + 0xfa,0x81,0x07,0x33,0x33,0x09,0x09,0x2e,0x2d,0x52,0x7c,0x7d,0xa1,0xfb,0x81,0xfa, + 0xfa,0xf6,0x3a,0x3a,0x33,0x0f,0x2c,0x09,0x2e,0x58,0x7c,0xa1,0xe5,0xa0,0xfc,0xfa, + 0xfa,0x39,0x64,0x3a,0x3a,0x33,0x09,0x09,0x52,0x7c,0x7d,0xa1,0xa1,0xa0,0xa0,0xfc, + 0x2b,0x64,0x6a,0x64,0x64,0x3a,0x33,0x2c,0x2d,0x7d,0xe5,0xa0,0x9a,0x9a,0x99,0xac, + 0x32,0xdb,0x6a,0x6a,0x6a,0x40,0x5d,0xfb,0xfa,0xfa,0xa0,0x9a,0x9f,0x99,0x75,0xa6, + 0x56,0x69,0x69,0x6a,0x64,0x6a,0x81,0xfa,0xfa,0xf9,0x9f,0x74,0x74,0x74,0x74,0x9f, + 0x38,0x63,0x63,0x63,0x69,0x62,0xfa,0xfa,0x56,0xf8,0x74,0x6e,0x6d,0x6e,0x6d,0xfb, + 0xf8,0x3e,0x62,0x3e,0x37,0x37,0x5b,0xf9,0x56,0x80,0x73,0x73,0x6d,0x4f,0x6d,0xa5, + 0xf9,0x3e,0x3d,0x3d,0x37,0x36,0x5a,0x86,0xaa,0xc8,0x9d,0x97,0x97,0x6d,0x73,0xfd, + 0x81,0x5b,0x37,0x36,0x36,0x36,0x5b,0x85,0xce,0xce,0xa4,0x9e,0x9d,0x73,0x9e,0xfc, + 0xfa,0xfa,0x37,0x30,0x36,0x5b,0x5b,0xa9,0xaa,0xce,0xa4,0xa4,0x9d,0x9e,0xac,0xfa, + 0xfa,0xfa,0xfa,0x36,0x54,0x5b,0x7f,0xa9,0xaa,0xce,0xce,0xa4,0xa4,0xfc,0xfc,0xfa, + 0xfa,0xfa,0xfa,0xfb,0x5c,0x85,0x85,0xaa,0xaa,0xce,0xce,0xc9,0xf3,0xfb,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfc,0xac,0xab,0xab,0xcf,0xf2,0xfe,0xfc,0xfa,0xfa,0xfa,0xfa, + + 0xfa,0xfa,0xfa,0xfa,0x81,0x56,0x2c,0x2c,0x08,0x2c,0xf9,0x81,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0x81,0x56,0x33,0x5e,0x34,0x2d,0x2e,0x27,0x27,0x50,0x81,0x81,0xfa,0xfa, + 0xfa,0x81,0xf7,0x65,0x65,0x5e,0x34,0x2e,0x2d,0x27,0x27,0x51,0x52,0x81,0xfb,0xfa, + 0xfa,0x56,0x64,0x65,0x65,0x5e,0x58,0x34,0x2d,0x2c,0x4b,0x51,0x76,0x76,0xfc,0xfa, + 0xfa,0x39,0x64,0x65,0x65,0x65,0x5e,0x34,0x27,0x27,0x51,0x76,0x9a,0xa0,0xa0,0xfb, + 0x56,0x3f,0x64,0x63,0x64,0x65,0x5f,0x57,0x2d,0x51,0x76,0x9a,0xa0,0xa0,0xc4,0xac, + 0xf7,0x39,0x39,0x40,0x3f,0x64,0x5d,0xfb,0xfa,0x57,0xa0,0xa0,0xa0,0xc4,0xc4,0xfd, + 0xf7,0x1a,0x38,0x38,0x38,0x38,0x81,0xfa,0xfa,0xf9,0xa0,0xc4,0xc4,0xc4,0xc4,0xca, + 0x31,0x13,0x14,0x13,0x13,0x14,0xfa,0xfa,0x56,0x56,0x9f,0x9e,0x9f,0x9f,0x9f,0xca, + 0xf8,0x13,0x0d,0x37,0x37,0x3d,0x80,0xf9,0x56,0x7a,0x73,0x97,0x98,0x98,0x9e,0xfc, + 0x81,0x37,0x3d,0x37,0x3d,0x62,0x8c,0xb0,0xa3,0x79,0x72,0x73,0x73,0x97,0x98,0xfd, + 0x81,0x37,0x37,0x61,0x62,0x86,0xb0,0xaa,0xa3,0x79,0x72,0x72,0x73,0x73,0x9e,0xfc, + 0xfa,0x81,0x61,0x62,0x85,0x8c,0xb0,0xaa,0xaa,0x7f,0x79,0x72,0x72,0x73,0xac,0xfa, + 0xfa,0xfa,0x81,0x86,0x86,0xb0,0xf0,0xaa,0xa9,0xa3,0x79,0x78,0x73,0xac,0xfb,0xfa, + 0xfa,0xfa,0xfa,0xfc,0x87,0xb1,0xb0,0xaa,0xa9,0xa3,0x7f,0xa4,0xac,0x81,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfc,0xfc,0xd0,0xab,0xf1,0xac,0xfd,0xfc,0xfa,0xfa,0xfa,0xfa, +#endif + +#if 0 +// blue +#define TRANSPARENT 0x80 + 0x80,0x80,0x80,0x80,0x81,0xf8,0x2c,0x33,0x57,0x51,0xf9,0x80,0x80,0x80,0x80,0x80, + 0x80,0x80,0x80,0xf8,0x33,0x3a,0x58,0x59,0x7d,0x58,0x7c,0x58,0xfa,0xa5,0x80,0x80, + 0x80,0x81,0xf7,0x33,0x3a,0x34,0x5f,0x5e,0x59,0x7d,0x58,0x76,0x51,0x81,0xfb,0x80, + 0x80,0xf8,0x0f,0x39,0x33,0x3a,0x58,0x59,0x7d,0x58,0x76,0x51,0x75,0x6f,0xfb,0x80, + 0x81,0x0e,0x14,0x0f,0x15,0x34,0x3a,0x59,0x58,0x7c,0x52,0x75,0x51,0x6f,0x75,0xfb, + 0xf8,0x14,0x39,0x14,0x33,0x39,0x34,0x5d,0x58,0x52,0x75,0x6f,0x6f,0x4a,0x74,0xfc, + 0x31,0x39,0x14,0x39,0x14,0x14,0xf9,0xfb,0xfa,0xfa,0x4b,0x4b,0x74,0x99,0x74,0xa6, + 0x32,0x3e,0x5d,0x3e,0x39,0x39,0x81,0xfa,0xfa,0xf9,0x99,0x74,0x99,0x74,0x99,0xa5, + 0x38,0x3f,0x3e,0x63,0x3e,0x62,0xfa,0xfa,0x56,0xf8,0x9f,0x9f,0x98,0x99,0x98,0xa6, + 0xf8,0x68,0x63,0x68,0x69,0x68,0xfa,0xf9,0x56,0xa5,0xc2,0xc2,0xc3,0x9e,0x9f,0xfc, + 0xf9,0x63,0x68,0x8c,0x68,0x8c,0x61,0x7f,0x7f,0x9d,0xa4,0xc8,0x9e,0xc3,0xc2,0xd0, + 0xab,0x62,0x8c,0x68,0x86,0x85,0x7f,0x7e,0x78,0x7f,0x9d,0xa4,0xc3,0xc8,0xc9,0xfb, + 0x80,0x81,0x68,0x86,0x62,0x86,0x5b,0x5a,0x7f,0x79,0xa3,0x9e,0xa4,0xc8,0xac,0xab, + 0x80,0x80,0x81,0x86,0x85,0x61,0x7f,0x7e,0x78,0x7f,0x9d,0xa3,0x9e,0xd0,0xfb,0x80, + 0x80,0x80,0x80,0xfb,0x62,0x7f,0x5b,0x7e,0x5a,0x79,0x7f,0xa4,0xfd,0xfb,0x80,0x80, + 0x80,0x80,0x80,0x80,0xab,0xac,0xab,0x80,0xa4,0xab,0xac,0xab,0x80,0x80,0x80,0x80, + + 0x80,0x80,0x80,0x80,0x81,0x2b,0x07,0x2c,0x2c,0x2c,0x56,0xfa,0x80,0x80,0x80,0x80, + 0x80,0x80,0x80,0x2b,0x08,0x09,0x09,0x2d,0x2e,0x52,0x7c,0x7b,0xfa,0x81,0x80,0x80, + 0x80,0x81,0x07,0x33,0x33,0x09,0x09,0x2d,0x2d,0x52,0x7c,0x7d,0xa1,0xa5,0xfb,0x80, + 0x80,0x2b,0x3a,0x3a,0x33,0x0f,0x2d,0x09,0x2e,0x58,0x7c,0xa1,0xa1,0xa0,0xfb,0x80, + 0xfa,0x38,0x64,0x3a,0x3a,0x33,0x09,0x09,0x52,0x7c,0x7d,0xa1,0xa1,0xa0,0xa0,0xfc, + 0x2b,0x64,0x6a,0x64,0x64,0x3a,0x33,0x2c,0x2d,0x7d,0xe5,0xa0,0xa0,0x9a,0x9f,0xac, + 0x32,0xdb,0x6a,0x6a,0x6a,0x40,0x5d,0xfb,0xfa,0xfa,0xa0,0x9a,0x99,0x99,0x75,0xa6, + 0x56,0x69,0x69,0x6a,0x64,0x6a,0x81,0xfa,0xfa,0xf9,0x9f,0x74,0x74,0x74,0x74,0x9f, + 0x38,0x63,0x63,0x63,0x69,0x62,0xfa,0xfa,0x56,0xf8,0x74,0x6e,0x6d,0x6e,0x6d,0x9f, + 0xf8,0x3e,0x62,0x3e,0x37,0x37,0x5b,0xf9,0x56,0x80,0x73,0x73,0x6d,0x4f,0x6d,0xfb, + 0xf9,0x3e,0x3d,0x3d,0x37,0x36,0x5a,0x86,0xaa,0xc8,0x9d,0x97,0x97,0x6d,0x73,0xfd, + 0x81,0x5b,0x37,0x36,0x36,0x36,0x5b,0x85,0xce,0xce,0xa4,0x9e,0x9d,0x73,0x9e,0xab, + 0x80,0xfa,0x37,0x30,0x36,0x5b,0x5b,0xa9,0xaa,0xce,0xa4,0xa4,0x9d,0x9e,0xac,0x80, + 0x80,0x80,0xfa,0x36,0x54,0x5b,0x7f,0xa9,0xaa,0xce,0xce,0xa4,0xa4,0xfc,0xab,0x80, + 0x80,0x80,0x80,0x87,0x5c,0x85,0x85,0xaa,0xaa,0xce,0xce,0xcf,0xfd,0xab,0x80,0x80, + 0x80,0x80,0x80,0x80,0xab,0xac,0xab,0xab,0xcf,0xf2,0xfd,0xfc,0x80,0x80,0x80,0x80, + + 0x80,0x80,0x80,0x80,0x81,0x56,0x2c,0x2c,0x2c,0x2c,0xf9,0xa4,0x80,0x80,0x80,0x80, + 0x80,0x80,0x80,0x56,0x57,0x5e,0x34,0x2d,0x2e,0x27,0x27,0x50,0xfa,0xfb,0x80,0x80, + 0x80,0x81,0xf7,0x65,0x65,0x5e,0x34,0x2e,0x2d,0x2d,0x26,0x52,0x51,0x81,0xfb,0x80, + 0x80,0x56,0x64,0x65,0x65,0x5e,0x58,0x34,0x2d,0x27,0x4b,0x51,0x76,0x9a,0xfb,0x80, + 0x80,0x39,0x64,0x65,0x65,0x65,0x5e,0x34,0x27,0x27,0x51,0x76,0x76,0xa0,0xa0,0xab, + 0x56,0x3f,0x64,0x63,0x64,0x65,0x5f,0x33,0x2d,0x51,0x76,0x99,0xa0,0xc4,0xc4,0xfc, + 0xf7,0x39,0x39,0x40,0x3f,0x64,0x5d,0xfb,0xfa,0x57,0xa0,0xa1,0xc4,0xa0,0xc4,0xd0, + 0xf7,0x1a,0x38,0x38,0x38,0x38,0x81,0x81,0xfa,0xf9,0xa0,0xc3,0xc4,0xc4,0xc4,0xca, + 0x31,0x13,0x14,0x13,0x13,0x14,0xfa,0xfa,0x56,0x56,0x9f,0x9f,0x9f,0x9f,0x9f,0xd0, + 0xf8,0x13,0x0d,0x37,0x37,0x3d,0x80,0xf9,0x56,0x7a,0x73,0x97,0x98,0x98,0x98,0xfc, + 0x81,0x37,0x3d,0x37,0x3d,0x62,0x8c,0xaa,0xa3,0x79,0x72,0x73,0x73,0x97,0x9e,0xac, + 0x80,0x37,0x37,0x61,0x62,0x86,0xb0,0xaa,0xa9,0x79,0x72,0x72,0x73,0x73,0x9e,0xfc, + 0x80,0xfb,0x61,0x62,0x85,0x8c,0xb0,0xaa,0xaa,0x7f,0x79,0x72,0x72,0x73,0xfd,0x80, + 0x80,0x80,0x81,0x86,0x86,0xb0,0xf0,0xaa,0xa9,0xa3,0x79,0x78,0x73,0xab,0xfb,0x80, + 0x80,0x80,0x80,0xab,0x87,0xb1,0xb0,0xaa,0xa9,0xa3,0x7f,0xa4,0xac,0xab,0x80,0x80, + 0x80,0x80,0x80,0x80,0xab,0xfc,0xd0,0xab,0xf1,0xac,0xd0,0xfc,0x80,0x80,0x80,0x80, +#endif + +#if 0 +// panel + 0xf9,0xf9,0x56,0xf9,0xfa,0x2c,0x32,0x2d,0x57,0x51,0x56,0x56,0x56,0xf9,0xf9,0x56, + 0x56,0xfa,0x56,0x56,0x2c,0x5e,0x58,0x5f,0x58,0x7d,0x52,0x57,0x56,0x56,0xf9,0xf9, + 0xf9,0x56,0xf9,0x33,0x3a,0x34,0x5f,0x59,0x83,0x52,0x7c,0x76,0x76,0x7b,0x5d,0x56, + 0xf9,0x56,0x0f,0x39,0x33,0x3a,0x34,0x59,0x58,0x58,0x52,0x76,0x4b,0x75,0x7b,0x56, + 0xf9,0x0e,0x15,0x0f,0x39,0x34,0x5e,0x59,0x83,0x7c,0x7c,0x4b,0x75,0x6f,0x7b,0x56, + 0x56,0x39,0x0e,0x14,0x0e,0x39,0x33,0x57,0x57,0x7c,0x4b,0x6f,0x4a,0x75,0x6e,0xa6, + 0x32,0x38,0x3f,0x39,0x39,0x0e,0x5d,0x81,0x81,0x57,0x75,0x6f,0x75,0x6e,0x75,0xa6, + 0x31,0x3e,0x38,0x3e,0x38,0x38,0x7b,0x81,0x56,0x56,0x75,0x74,0x6e,0x74,0x74,0xa5, + 0x38,0x3f,0x63,0x63,0x69,0x62,0x81,0x7b,0x56,0x56,0xa5,0x99,0x9f,0x99,0x9f,0xa6, + 0x31,0x69,0x62,0x68,0x62,0x8d,0x5c,0x5c,0x56,0xa5,0xc2,0xc2,0x98,0xc3,0x98,0xac, + 0x5c,0x62,0x69,0x68,0x8c,0x62,0x86,0x7f,0x7f,0x79,0xa4,0xc2,0xc9,0x9e,0xc9,0xac, + 0x56,0x62,0x62,0x8c,0x62,0x85,0x5b,0x7e,0x78,0x7f,0x9d,0xc8,0x9e,0xc8,0xc9,0x81, + 0xf9,0x81,0x8c,0x62,0x8c,0x85,0x85,0x7e,0x7f,0x79,0xa3,0x9d,0xa4,0xc8,0xac,0x56, + 0xf9,0xf9,0x80,0x86,0x5b,0x85,0x5a,0x7e,0x54,0x7f,0x79,0xa3,0x9d,0xd0,0x56,0xf9, + 0xf9,0xf9,0x56,0x81,0x86,0x5b,0x85,0x7e,0x7e,0x78,0xa3,0xa4,0xac,0x56,0xfa,0xf9, + 0xf9,0x56,0xf9,0xf9,0x56,0xab,0xa5,0x80,0x80,0xab,0xac,0x81,0x56,0xf9,0x56,0xf9, + + 0xf9,0xf9,0x56,0xf9,0xfa,0xf9,0x2c,0x2c,0x2d,0x2c,0x56,0x56,0x56,0xf9,0xf9,0x56, + 0x56,0xfa,0x56,0x56,0x08,0x09,0x09,0x2d,0x2d,0x58,0x52,0x7b,0x56,0x56,0xf9,0xf9, + 0xf9,0x56,0x07,0x33,0x33,0x09,0x09,0x2d,0x2e,0x52,0x7d,0x7d,0xa7,0x7b,0x5d,0x56, + 0xf9,0x56,0x39,0x3a,0x33,0x0f,0x08,0x09,0x2d,0x58,0x76,0xa7,0xa1,0xa0,0x81,0x56, + 0xf9,0x39,0x64,0x3a,0x3a,0x33,0x0f,0x09,0x58,0x7c,0xa7,0xa1,0xa1,0xa0,0xa6,0x7b, + 0x56,0x6a,0x64,0x64,0x3a,0x3a,0x0f,0x2c,0x2d,0x7d,0xa1,0xa0,0x9a,0xa0,0x99,0xac, + 0x32,0x64,0x6a,0x6a,0x6a,0x40,0x5d,0x81,0x81,0x7b,0xa1,0x9a,0xa0,0x75,0x9f,0xa6, + 0x32,0x69,0x63,0x6a,0x64,0x6a,0x7b,0x81,0x56,0x56,0x75,0x74,0x6e,0x74,0x6e,0x9f, + 0x5c,0x63,0x69,0x63,0x69,0x62,0x81,0x7b,0x56,0x56,0x74,0x6d,0x74,0x6e,0x74,0x9f, + 0x31,0x62,0x3e,0x3e,0x37,0x3d,0x55,0x5c,0x50,0xa5,0x73,0x73,0x6d,0x73,0x49,0xa5, + 0x5c,0x37,0x3e,0x37,0x3d,0x30,0x5b,0x85,0xd4,0xa4,0xa4,0x97,0x97,0x6d,0x73,0xac, + 0x56,0x37,0x36,0x37,0x30,0x36,0x5a,0x85,0xaa,0xce,0x9d,0x9e,0x73,0x73,0x74,0x81, + 0xf9,0x81,0x37,0x30,0x36,0x5b,0x85,0xa9,0xd4,0xce,0xce,0x9e,0xa4,0x9e,0xac,0x56, + 0xf9,0xf9,0x5c,0x36,0x30,0x61,0x7f,0xa9,0xa9,0xce,0xc8,0xa4,0x9e,0xd0,0x56,0xf9, + 0xf9,0xf9,0x56,0x81,0x86,0x5b,0x85,0xaa,0xd4,0xaa,0xcf,0xcf,0xd6,0x56,0xfa,0xf9, + 0xf9,0x56,0xf9,0xf9,0x7b,0xac,0xab,0xab,0xab,0xd0,0xac,0x81,0x56,0xf9,0x56,0xf9, + + 0xf9,0xf9,0x56,0xf9,0xfa,0x2c,0x2c,0x2c,0x2c,0x2c,0x56,0x56,0x56,0xf9,0xf9,0x56, + 0x56,0xfa,0x56,0x56,0x33,0x5e,0x34,0x2d,0x27,0x2d,0x26,0x50,0x56,0x56,0xf9,0xf9, + 0xf9,0x56,0x32,0x65,0x65,0x58,0x5e,0x2e,0x2d,0x27,0x2d,0x51,0x76,0x7b,0x5d,0x56, + 0xf9,0x56,0x5e,0x65,0x5f,0x5e,0x34,0x34,0x27,0x27,0x27,0x76,0x75,0x76,0x81,0x56, + 0x56,0x39,0x6a,0x64,0x6b,0x65,0x5f,0x34,0x2d,0x27,0x51,0x75,0xa0,0xa0,0xa6,0x56, + 0xf9,0x64,0x39,0x64,0x64,0x65,0x5e,0x33,0x26,0x51,0x75,0x9a,0x9a,0xc4,0xa0,0xac, + 0x32,0x39,0x3f,0x3f,0x64,0x40,0x5d,0x81,0x81,0x57,0xa0,0xa0,0xca,0xa0,0xca,0xa6, + 0xf7,0x38,0x14,0x38,0x38,0x38,0x7b,0x81,0x56,0x56,0xa0,0xc4,0xc3,0xc4,0x9f,0xca, + 0x32,0x13,0x14,0x13,0x13,0x13,0x81,0x7b,0x56,0x56,0x9f,0x9f,0x9f,0x9f,0xc3,0xca, + 0xf7,0x13,0x0d,0x13,0x13,0x3d,0x5c,0x5c,0x50,0x80,0x6d,0x97,0x98,0x9e,0x98,0xac, + 0x81,0x13,0x3d,0x37,0x61,0x61,0x8c,0xaa,0xaa,0x72,0x73,0x73,0x97,0x73,0x9e,0xac, + 0x56,0x37,0x37,0x61,0x61,0x86,0xaa,0xaa,0xa3,0x79,0x72,0x72,0x6c,0x73,0x9e,0x81, + 0xf9,0x5d,0x62,0x61,0x8c,0x8c,0xb1,0xaa,0xaa,0x7f,0x79,0x4e,0x73,0x73,0xac,0x56, + 0xf9,0xf9,0x80,0x86,0x86,0xb0,0xaa,0xaa,0xa3,0xa3,0x78,0x78,0x72,0xac,0x56,0xf9, + 0xf9,0xf9,0x56,0x81,0xb1,0xaa,0xb0,0xaa,0xaa,0x7f,0xa3,0xa4,0xac,0x56,0xfa,0xf9, + 0xf9,0x56,0xf9,0xf9,0x56,0x87,0xac,0xab,0xab,0xac,0xa5,0x81,0x56,0xf9,0x56,0xf9 + + 0xf9,0xf9,0x56,0xf9,0xfa,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0x56,0x56,0xf9,0xf9,0x56, + 0x56,0xfa,0x56,0x56,0xf9,0xf9,0xf9,0xf9,0xf9,0xf9,0x56,0x56,0xf9,0xf9,0xf9,0xf9, 0xf9,0x56,0xf9,0xf9,0x56,0xf9,0xf9,0x56,0xf9,0xf9,0x56,0x56,0xf9,0xf9,0xf9,0x56, 0xf9,0x56,0xf9,0x56,0xf9,0xf9,0x56,0x56,0xf9,0x56,0x56,0xf9,0xf9,0xf9,0x56,0x56, + 0xf9,0x56,0x56,0xf9,0x56,0x56,0xf9,0xf9,0xfa,0xf9,0xf9,0x56,0x56,0x56,0x56,0xf9, + 0x56,0x56,0xf9,0xf9,0xf9,0xf9,0xf9,0xfa,0x56,0x56,0x56,0x56,0xf9,0xf9,0x56,0xfa, + 0xf9,0xf9,0xf9,0xf9,0x56,0xf9,0x56,0x56,0x56,0x56,0x56,0x56,0xf9,0xf9,0xf9,0x56, + 0x56,0x56,0xf9,0xf9,0x56,0x56,0x56,0xf9,0xf9,0xfa,0xf9,0xf9,0xf9,0xf9,0x56,0xf9, + 0x56,0x56,0xf9,0x56,0x56,0x56,0xf9,0xf9,0xf9,0xf9,0x56,0x56,0xfa,0xf9,0x56,0xf9, + 0xf9,0xf9,0x56,0xf9,0xfa,0xf9,0xf9,0xf9,0xf9,0x56,0x56,0x56,0xf9,0xf9,0x56,0xf9, + 0x56,0x56,0x56,0xf9,0xf9,0x56,0x56,0xf9,0x56,0xf9,0xf9,0xf9,0xf9,0xf9,0x56,0x56, + 0x56,0xf9,0x56,0xfa,0x56,0xf9,0xfa,0xfa,0xf9,0xf9,0x56,0xf9,0xf9,0xf9,0xf9,0xf9, + 0xf9,0xf9,0x56,0xf9,0x56,0xf9,0x56,0x56,0x56,0x56,0xf9,0x56,0x56,0xf9,0xf9,0x56, + 0xf9,0xf9,0xf9,0xf9,0x56,0xf9,0xf9,0x56,0x56,0xfa,0xf9,0xf9,0x56,0xf9,0x56,0xf9, + 0xf9,0xf9,0x56,0xf9,0xf9,0xf9,0x56,0x56,0xf9,0xf9,0x56,0x56,0xf9,0xf9,0xfa,0xf9, + 0xf9,0x56,0xf9,0xf9,0x56,0xf9,0x56,0x56,0xf9,0xf9,0x56,0x56,0x56,0xf9,0x56,0xf9 +#endif + +}; + +#warning shared video_console.c +struct vc_progress_element { + unsigned int version; + unsigned int flags; + unsigned int time; + unsigned char count; + unsigned char res[3]; + int width; + int height; + int dx; + int dy; + int transparent; + unsigned int res2[3]; +}; +typedef struct vc_progress_element vc_progress_element; + +static vc_progress_element default_progress = + { 0, 0, 111, 3, {0, 0, 0}, + 16, 16, 15, 15, TRANSPARENT, {0, 0, 0} }; +/* (320-8), (256+4) */ + + + +static unsigned char default_noroot_data[ 32 * 48 ] = { + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xff,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xff,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xff,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa, + 0xff,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xff,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xff,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xf7,0xff, + 0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0x00,0xff,0xff,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0xff,0xff,0xff,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff,0xf7,0xf7,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0xf7,0xfc,0xf7,0xf7,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xff,0xf7,0xfc,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0xff,0xf7,0xff,0xf7,0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff,0xf7,0xfc,0x00,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0x00,0xf7,0xf7,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xff,0xf7,0xfc,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0x00,0xfc,0x00,0xf7,0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff,0xf7,0xfc,0x00,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0x00,0xf7,0xf7,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xff,0xf7,0xfc,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0x00,0xfc,0x00,0xf7,0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff,0xf7,0xfc,0x00,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0x00,0xf7,0xf7,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xff,0xf7,0xf7,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0x00,0xf7,0x00,0xf7,0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff,0xf7,0xf7,0xf7,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0xf7,0xf7,0xf7,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xff,0xf7,0xff,0xf7,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0xf7,0xf7,0xf7,0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff,0xf7,0xfc,0xf7,0xff,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xff,0xff,0xff,0xf7,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xff,0xf7,0xf7,0xff,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xff,0xf7,0xf7,0xf7,0xf7,0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff,0xff,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0x00,0xff,0xff,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xff, + 0xff,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xff,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xff,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0x00,0xf7,0xf7,0xff, + 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, + 0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa,0xfa, +}; + +static vc_progress_element default_roroot = + { 0, 1, 0, 0, {0, 0, 0}, + 32, 48, -16, -26, 0x80, { 0, 0, 0 }}; diff --git a/pexpert/pexpert/pexpert.h b/pexpert/pexpert/pexpert.h new file mode 100644 index 000000000..6078e3c2c --- /dev/null +++ b/pexpert/pexpert/pexpert.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_PEXPERT_H_ +#define _PEXPERT_PEXPERT_H_ + +#include + +#include +#include + +__BEGIN_DECLS +#include +#include +#include + +#ifdef PEXPERT_KERNEL_PRIVATE +#include +#endif +#include + +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(IOKIT_KERNEL_PRIVATE) +typedef void *cpu_id_t; +#else +typedef void *cpu_id_t; +#endif + + +void PE_enter_debugger( + char *cause); + +void PE_init_platform( + boolean_t vm_initialized, + void *args); + +void PE_init_kprintf( + boolean_t vm_initialized); + +extern void (*PE_kputc)(char c); + +void PE_init_printf( + boolean_t vm_initialized); + +extern void (*PE_putc)(char c); + +void PE_init_iokit( + void); + +struct clock_frequency_info_t { + unsigned long bus_clock_rate_hz; + unsigned long cpu_clock_rate_hz; + unsigned long dec_clock_rate_hz; + unsigned long bus_clock_rate_num; + unsigned long bus_clock_rate_den; + unsigned long bus_to_cpu_rate_num; + unsigned long bus_to_cpu_rate_den; + unsigned long bus_to_dec_rate_num; + unsigned long bus_to_dec_rate_den; +}; + +typedef struct clock_frequency_info_t clock_frequency_info_t; + +extern clock_frequency_info_t gPEClockFrequencyInfo; + +struct timebase_freq_t { + unsigned long timebase_num; + unsigned long timebase_den; +}; + +typedef void (*timebase_callback_func)(struct timebase_freq_t *timebase_freq); + +void PE_register_timebase_callback(timebase_callback_func callback); + +void PE_call_timebase_callback(void); + +void PE_install_interrupt_handler( + void *nub, int source, + void *target, IOInterruptHandler handler, void *refCon); + +void kprintf( + const char *fmt, ...); + +void init_display_putc(unsigned char *baseaddr, int rowbytes, int height); +void display_putc(char c); + +boolean_t PE_init_ethernet_debugger( void ); + +enum { + kPEReadTOD, + kPEWriteTOD +}; +extern int (*PE_read_write_time_of_day)( + unsigned int options, + long * secs); + +enum { + kPEWaitForInput = 0x00000001, + kPERawInput = 0x00000002 +}; +extern int (*PE_poll_input)( + unsigned int options, + char * c); + +extern int (*PE_write_IIC)( + unsigned char addr, + unsigned char reg, + unsigned char data); + +/* Private Stuff - eventually put in pexpertprivate.h */ +enum { + kDebugTypeNone = 0, + kDebugTypeDisplay = 1, + kDebugTypeSerial = 2 +}; + +struct PE_Video { + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth */ + unsigned long v_display; /* Text or Graphics */ + char v_pixelFormat[64]; + long v_resv[ 4 ]; +}; + +typedef struct PE_Video PE_Video; + +extern int PE_current_console( + PE_Video *info); + +extern void PE_create_console( + void); + +extern int PE_initialize_console( + PE_Video *newInfo, + int op); + +#define kPEGraphicsMode 1 +#define kPETextMode 2 +#define kPETextScreen 3 +#define kPEAcquireScreen 4 +#define kPEReleaseScreen 5 +#define kPEEnableScreen 6 +#define kPEDisableScreen 7 + +extern void PE_display_icon( unsigned int flags, + const char * name ); + +typedef struct PE_state { + boolean_t initialized; + PE_Video video; + void *deviceTreeHead; + void *bootArgs; +#if __i386__ + void *fakePPCBootArgs; +#endif +} PE_state_t; + +extern PE_state_t PE_state; + +extern char * PE_boot_args( + void); + +extern boolean_t PE_parse_boot_arg( + char *arg_string, + void *arg_ptr); + +enum { + kPEOptionKey = 0x3a, + kPECommandKey = 0x37, + kPEControlKey = 0x36, + kPEShiftKey = 0x38 +}; + +extern boolean_t PE_get_hotkey( + unsigned char key); + +extern kern_return_t PE_cpu_start( + cpu_id_t target, + vm_offset_t start_paddr, + vm_offset_t arg_paddr); + +extern void PE_cpu_halt( + cpu_id_t target); + +extern void PE_cpu_signal( + cpu_id_t source, + cpu_id_t target); + +extern void PE_cpu_machine_init( + cpu_id_t target, + boolean_t boot); + +extern void PE_cpu_machine_quiesce( + cpu_id_t target); + +extern void pe_init_debug(void); + +__END_DECLS + +#endif /* _PEXPERT_PEXPERT_H_ */ diff --git a/pexpert/pexpert/ppc/Makefile b/pexpert/pexpert/ppc/Makefile new file mode 100644 index 000000000..331d8819f --- /dev/null +++ b/pexpert/pexpert/ppc/Makefile @@ -0,0 +1,28 @@ +export MakeInc_cmd=${SRCROOT}/makedefs/MakeInc.cmd +export MakeInc_def=${SRCROOT}/makedefs/MakeInc.def +export MakeInc_rule=${SRCROOT}/makedefs/MakeInc.rule +export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir + + +include $(MakeInc_cmd) +include $(MakeInc_def) + +DATAFILES = \ + boot.h \ + dbdma.h \ + interrupts.h \ + powermac.h + +INSTALL_MD_LIST = ${DATAFILES} + +INSTALL_MD_DIR = pexpert/ppc + +EXPORT_MD_LIST = ${DATAFILES} + +EXPORT_MD_DIR = pexpert/ppc + + +include $(MakeInc_rule) +include $(MakeInc_dir) + + diff --git a/pexpert/pexpert/ppc/boot.h b/pexpert/pexpert/ppc/boot.h new file mode 100644 index 000000000..0c74f8eb6 --- /dev/null +++ b/pexpert/pexpert/ppc/boot.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PEXPERT_PPC_BOOT_H_ +#define _PEXPERT_PPC_BOOT_H_ + +#define BOOT_LINE_LENGTH 256 + +/* + * Video information.. + */ + +struct Boot_Video { + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_display; /* Display Code (if Applicable */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth */ +}; + +typedef struct Boot_Video Boot_Video; + +/* DRAM Bank definitions - describes physical memory layout. + */ +#define kMaxDRAMBanks 26 /* maximum number of DRAM banks */ + +struct DRAMBank +{ + unsigned long base; /* physical base of DRAM bank */ + unsigned long size; /* size of bank */ +}; +typedef struct DRAMBank DRAMBank; + + +/* Boot argument structure - passed into Mach kernel at boot time. + */ +#define kBootArgsVersion 1 +#define kBootArgsRevision 1 + +typedef struct boot_args { + unsigned short Revision; /* Revision of boot_args structure */ + unsigned short Version; /* Version of boot_args structure */ + char CommandLine[BOOT_LINE_LENGTH]; /* Passed in command line */ + DRAMBank PhysicalDRAM[kMaxDRAMBanks]; /* base and range pairs for the 26 DRAM banks */ + Boot_Video Video; /* Video Information */ + unsigned long machineType; /* Machine Type (gestalt) */ + void *deviceTreeP; /* Base of flattened device tree */ + unsigned long deviceTreeLength;/* Length of flattened tree */ + unsigned long topOfKernelData;/* Highest address used in kernel data area */ +} boot_args; + +extern boot_args passed_args; + +#endif /* _PEXPERT_PPC_BOOT_H_ */ diff --git a/pexpert/pexpert/ppc/dbdma.h b/pexpert/pexpert/ppc/dbdma.h new file mode 100644 index 000000000..eac44d6cd --- /dev/null +++ b/pexpert/pexpert/ppc/dbdma.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ + +#ifndef _PEXPERT_PPC_DBDMA_H_ +#define _PEXPERT_PPC_DBDMA_H_ + +#ifndef ASSEMBLER + +#define DBDMA_CMD_OUT_MORE 0 +#define DBDMA_CMD_OUT_LAST 1 +#define DBDMA_CMD_IN_MORE 2 +#define DBDMA_CMD_IN_LAST 3 +#define DBDMA_CMD_STORE_QUAD 4 +#define DBDMA_CMD_LOAD_QUAD 5 +#define DBDMA_CMD_NOP 6 +#define DBDMA_CMD_STOP 7 + +/* Keys */ + +#define DBDMA_KEY_STREAM0 0 +#define DBDMA_KEY_STREAM1 1 +#define DBDMA_KEY_STREAM2 2 +#define DBDMA_KEY_STREAM3 3 + +/* value 4 is reserved */ +#define DBDMA_KEY_REGS 5 +#define DBDMA_KEY_SYSTEM 6 +#define DBDMA_KEY_DEVICE 7 + +#define DBDMA_INT_NEVER 0 +#define DBDMA_INT_IF_TRUE 1 +#define DBDMA_INT_IF_FALSE 2 +#define DBDMA_INT_ALWAYS 3 + +#define DBDMA_BRANCH_NEVER 0 +#define DBDMA_BRANCH_IF_TRUE 1 +#define DBDMA_BRANCH_IF_FALSE 2 +#define DBDMA_BRANCH_ALWAYS 3 + +#define DBDMA_WAIT_NEVER 0 +#define DBDMA_WAIT_IF_TRUE 1 +#define DBDMA_WAIT_IF_FALSE 2 +#define DBDMA_WAIT_ALWAYS 3 + +/* Control register values (in little endian) */ + +#define DBDMA_STATUS_MASK 0x000000ff /* Status Mask */ +#define DBDMA_CNTRL_BRANCH 0x00000100 + /* 0x200 reserved */ +#define DBDMA_CNTRL_ACTIVE 0x00000400 +#define DBDMA_CNTRL_DEAD 0x00000800 +#define DBDMA_CNTRL_WAKE 0x00001000 +#define DBDMA_CNTRL_FLUSH 0x00002000 +#define DBDMA_CNTRL_PAUSE 0x00004000 +#define DBDMA_CNTRL_RUN 0x00008000 + +#define DBDMA_SET_CNTRL(x) ( ((x) | (x) << 16) ) +#define DBDMA_CLEAR_CNTRL(x) ( (x) << 16) + +#define POWERMAC_IO(a) (a) +#define DBDMA_REGMAP(channel) \ + (dbdma_regmap_t *)((v_u_char *) POWERMAC_IO(PCI_DMA_BASE_PHYS) \ + + (channel << 8)) + + +/* powermac_dbdma_channels hold the physical channel numbers for + * each dbdma device + */ + + +/* This struct is layout in little endian format */ + +struct dbdma_command { + unsigned long d_cmd_count; + unsigned long d_address; + unsigned long d_cmddep; + unsigned long d_status_resid; +}; + +typedef struct dbdma_command dbdma_command_t; + +#define DBDMA_BUILD(d, cmd, key, count, address, interrupt, wait, branch) {\ + DBDMA_ST4_ENDIAN(&d->d_address, address); \ + (d)->d_status_resid = 0; \ + (d)->d_cmddep = 0; \ + DBDMA_ST4_ENDIAN(&d->d_cmd_count, \ + ((cmd) << 28) | ((key) << 24) |\ + ((interrupt) << 20) |\ + ((branch) << 18) | ((wait) << 16) | \ + (count)); \ + } + +static __inline__ void +dbdma_st4_endian(volatile unsigned long *a, unsigned long x) +{ + __asm__ volatile + ("stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); + + return; +} + +static __inline__ unsigned long +dbdma_ld4_endian(volatile unsigned long *a) +{ + unsigned long swap; + + __asm__ volatile + ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); + + return swap; +} + +#define DBDMA_LD4_ENDIAN(a) dbdma_ld4_endian(a) +#define DBDMA_ST4_ENDIAN(a, x) dbdma_st4_endian(a, x) + +/* + * DBDMA Channel layout + * + * NOTE - This structure is in little-endian format. + */ + +struct dbdma_regmap { + unsigned long d_control; /* Control Register */ + unsigned long d_status; /* DBDMA Status Register */ + unsigned long d_cmdptrhi; /* MSB of command pointer (not used yet) */ + unsigned long d_cmdptrlo; /* LSB of command pointer */ + unsigned long d_intselect; /* Interrupt Select */ + unsigned long d_branch; /* Branch selection */ + unsigned long d_wait; /* Wait selection */ + unsigned long d_transmode; /* Transfer modes */ + unsigned long d_dataptrhi; /* MSB of Data Pointer */ + unsigned long d_dataptrlo; /* LSB of Data Pointer */ + unsigned long d_reserved; /* Reserved for the moment */ + unsigned long d_branchptrhi; /* MSB of Branch Pointer */ + unsigned long d_branchptrlo; /* LSB of Branch Pointer */ + /* The remaining fields are undefinied and unimplemented */ +}; + +typedef volatile struct dbdma_regmap dbdma_regmap_t; + +/* DBDMA routines */ + +void dbdma_start(dbdma_regmap_t *channel, dbdma_command_t *commands); +void dbdma_stop(dbdma_regmap_t *channel); +void dbdma_flush(dbdma_regmap_t *channel); +void dbdma_reset(dbdma_regmap_t *channel); +void dbdma_continue(dbdma_regmap_t *channel); +void dbdma_pause(dbdma_regmap_t *channel); + +dbdma_command_t *dbdma_alloc(int); /* Allocate command structures */ + +#endif /* ASSEMBLER */ + +#endif /* _PEXPERT_PPC_DBDMA_H_ */ diff --git a/pexpert/pexpert/ppc/interrupts.h b/pexpert/pexpert/ppc/interrupts.h new file mode 100644 index 000000000..14a0ea7d5 --- /dev/null +++ b/pexpert/pexpert/ppc/interrupts.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _POWERMAC_INTERRUPTS_H_ +#define _POWERMAC_INTERRUPTS_H_ + +#include /* for struct ppc_saved_state */ + +extern void (PE_incoming_interrupt)(int type, struct ppc_saved_state *ssp, + unsigned int dsisr, unsigned int dar); + +#endif /* POWERMAC_INTERRUPTS_H_ */ diff --git a/pexpert/pexpert/ppc/powermac.h b/pexpert/pexpert/ppc/powermac.h new file mode 100644 index 000000000..aed753792 --- /dev/null +++ b/pexpert/pexpert/ppc/powermac.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_PPC_POWERMAC_H_ +#define _PEXPERT_PPC_POWERMAC_H_ + +#ifndef ASSEMBLER + +#include + +#include +#include +#include + + +/* prototypes */ + +vm_offset_t PE_find_scc( void ); + +/* Some useful typedefs for accessing control registers */ + +typedef volatile unsigned char v_u_char; +typedef volatile unsigned short v_u_short; +typedef volatile unsigned int v_u_int; +typedef volatile unsigned long v_u_long; + +/* And some useful defines for reading 'volatile' structures, + * don't forget to be be careful about sync()s and eieio()s + */ +#define reg8(reg) (*(v_u_char *)reg) +#define reg16(reg) (*(v_u_short *)reg) +#define reg32(reg) (*(v_u_int *)reg) + +/* Non-cached version of bcopy */ +extern void bcopy_nc(char *from, char *to, int size); + +#endif /* ASSEMBLER */ + +#endif /* _PEXPERT_PPC_POWERMAC_H_ */ diff --git a/pexpert/pexpert/ppc/protos.h b/pexpert/pexpert/ppc/protos.h new file mode 100644 index 000000000..2bed21693 --- /dev/null +++ b/pexpert/pexpert/ppc/protos.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_PPC_PROTOS_H_ +#define _PEXPERT_PPC_PROTOS_H_ + +#define mtibatu(n, reg) __asm__ volatile("mtibatu " # n ", %0" : : "r" (reg)) +#define mtibatl(n, reg) __asm__ volatile("mtibatl " # n ", %0" : : "r" (reg)) + +#define mtdbatu(n, reg) __asm__ volatile("mtdbatu " # n ", %0" : : "r" (reg)) +#define mtdbatl(n, reg) __asm__ volatile("mtdbatl " # n ", %0" : : "r" (reg)) + +#define mfibatu(reg, n) __asm__ volatile("mfibatu %0, " # n : "=r" (reg)) +#define mfibatl(reg, n) __asm__ volatile("mfibatl %0, " # n : "=r" (reg)) + +#define mfdbatu(reg, n) __asm__ volatile("mfdbatu %0, " # n : "=r" (reg)) +#define mfdbatl(reg, n) __asm__ volatile("mfdbatl %0, " # n : "=r" (reg)) + +#define mtsprg(n, reg) __asm__ volatile("mtsprg " # n ", %0" : : "r" (reg)) +#define mfsprg(reg, n) __asm__ volatile("mfsprg %0, " # n : "=r" (reg)) + +#define mtspr(spr, val) __asm__ volatile("mtspr " # spr ", %0" : : "r" (val)) +#define mfspr(reg, spr) __asm__ volatile("mfspr %0, " # spr : "=r" (reg)) + +/* + * Various memory/IO synchronisation instructions + */ + + /* Use eieio as a memory barrier to order stores. + * Useful for device control and PTE maintenance. + */ + +#define eieio() \ + __asm__ volatile("eieio") + + /* Use sync to ensure previous stores have completed. + This is required when manipulating locks and/or + maintaining PTEs or other shared structures on SMP + machines. + */ + +#define sync() \ + __asm__ volatile("sync") + + /* Use isync to sychronize context; that is, the ensure + no prefetching of instructions happen before the + instruction. + */ + +#define isync() \ + __asm__ volatile("isync") + + +//------------------------------------------------------------------------ +// from ppc/endian.h +static __inline__ unsigned int byte_reverse_word(unsigned int word); +static __inline__ unsigned int byte_reverse_word(unsigned int word) { + unsigned int result; + __asm__ volatile("lwbrx %0, 0, %1" : "=r" (result) : "r" (&word)); + return result; +} + +//------------------------------------------------------------------------ +// from ppc/serial_io.h +extern void initialize_serial(void * scc_phys_base); + + +//------------------------------------------------------------------------ +// from ppc/POWERMAC/device_tree.h +extern void ofw_init(void *); + +//------------------------------------------------------------------------ +// from osfmk/ppc/POWERMAC/video_console.c + +extern void initialize_screen(void *, unsigned int); + +extern boolean_t vc_progress_initialize( void * desc, + unsigned char * data, + unsigned char * clut ); + +extern void vc_display_icon( void * desc, + unsigned char * data ); + +// from osfmk/ppc/serial_console.c +extern int switch_to_serial_console(void); +extern void switch_to_old_console(int old_console); + +//------------------------------------------------------------------------ +// from ppc/spl.h + /* Note also : if any new SPL's are introduced, please add to debugging list*/ +#define SPLOFF 0 /* all interrupts disabled TODO NMGS */ +#define SPLPOWER 1 /* power failure (unused) */ +#define SPLHIGH 2 /* TODO NMGS any non-zero, non-INTPRI value */ +#define SPLSCHED SPLHIGH +#define SPLCLOCK SPLSCHED /* hard clock */ +#define SPLVM 4 /* pmap manipulations */ +#define SPLBIO 8 /* block I/O */ +#define SPLIMP 8 /* network & malloc */ +#define SPLTTY 16 /* TTY */ +#define SPLNET 24 /* soft net */ +#define SPLSCLK 27 /* soft clock */ +#define SPLLO 32 /* no interrupts masked */ + +/* internal - masked in to spl level if ok to lower priority (splx, splon) + * the mask bit is never seen externally + */ +#define SPL_LOWER_MASK 0x8000 + +#define SPL_CMP_GT(a, b) ((unsigned)(a) > (unsigned)(b)) +#define SPL_CMP_LT(a, b) ((unsigned)(a) < (unsigned)(b)) +#define SPL_CMP_GE(a, b) ((unsigned)(a) >= (unsigned)(b)) +#define SPL_CMP_LE(a, b) ((unsigned)(a) <= (unsigned)(b)) + +typedef unsigned spl_t; + +//------------------------------------------------------------------------ +// from bsd/dev/ppc/busses.h which clashes with mach/device/device_types.h +typedef int io_req_t; + + +//typedef struct ipc_port *ipc_port_t; + +extern void cninit(void); + +/* + * Temporarily stolen from Firmware.h + */ + +void dbgDisp(unsigned int port, unsigned int id, unsigned int data); +void dbgDispLL(unsigned int port, unsigned int id, unsigned int data); +void fwSCCinit(unsigned int port); + +extern void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3); +#if 1 /* (TEST/DEBUG) - eliminate inline */ +extern __inline__ void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3) { + + __asm__ volatile("mr r3,%0" : : "r" (item1) : "r3"); + __asm__ volatile("mr r4,%0" : : "r" (item2) : "r4"); + __asm__ volatile("mr r5,%0" : : "r" (item3) : "r5"); +#ifdef __ELF__ + __asm__ volatile("lis r0,CutTrace@h" : : : "r0"); + __asm__ volatile("ori r0,r0,CutTrace@l" : : : "r0"); +#else + __asm__ volatile("lis r0,hi16(CutTrace)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CutTrace)" : : : "r0"); +#endif + __asm__ volatile("sc"); + return; +} +#endif + +extern void DoPreempt(void); +extern __inline__ void DoPreempt(void) { +#ifdef __ELF__ + __asm__ volatile("lis r0,DoPreemptCall@h" : : : "r0"); + __asm__ volatile("ori r0,r0,DoPreemptCall@l" : : : "r0"); +#else + __asm__ volatile("lis r0,hi16(DoPreemptCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(DoPreemptCall)" : : : "r0"); +#endif + __asm__ volatile("sc"); + return; +} + +extern void CreateFakeIO(void); +extern __inline__ void CreateFakeIO(void) { +#ifdef __ELF__ + __asm__ volatile("lis r0,CreateFakeIOCall@h" : : : "r0"); + __asm__ volatile("ori r0,r0,CreateFakeIOCall@l" : : : "r0"); +#else + __asm__ volatile("lis r0,hi16(CreateFakeIOCall)" : : : "r0"); + __asm__ volatile("ori r0,r0,lo16(CreateFakeIOCall)" : : : "r0"); +#endif + __asm__ volatile("sc"); + return; +} + +extern void StoreReal(unsigned int val, unsigned int addr); +extern void ReadReal(unsigned int raddr, unsigned int *vaddr); +extern void ClearReal(unsigned int addr, unsigned int lgn); +extern void LoadDBATs(unsigned int *bat); +extern void LoadIBATs(unsigned int *bat); +extern unsigned int LLTraceSet(unsigned int tflags); +extern void GratefulDebInit(void); +extern void GratefulDebDisp(unsigned int coord, unsigned int data); +extern void checkNMI(void); + +/* + * Temporarily stolen from ppc/cpu_number.h + */ +int cpu_number(void); + +#endif /* _PEXPERT_PPC_PROTOS_H_ */ diff --git a/pexpert/pexpert/protos.h b/pexpert/pexpert/protos.h new file mode 100644 index 000000000..79848c30a --- /dev/null +++ b/pexpert/pexpert/protos.h @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#ifndef _PEXPERT_PROTOS_H_ +#define _PEXPERT_PROTOS_H_ + +#ifdef PEXPERT_KERNEL_PRIVATE + + +#include +#include +#include +#include +#include +#include + +#include + +//------------------------------------------------------------------------ +// from ppc/misc_protos.h +extern void printf(const char *fmt, ...); + +extern int strcmp(const char *s1, const char *s2); +extern int strncmp(const char *s1, const char *s2, unsigned long n); +extern int strlen( register const char *string); +extern char *strcat(char *dest, const char *src); +extern char *strcpy(char *dest, const char *src); +extern char *strncpy(char *dest, const char *src, unsigned long n); +extern void interrupt_enable(void); +extern void interrupt_disable(void); +#if __ppc__ +extern void bcopy_nc(char *from, char *to, int size); /* uncached-safe */ +#else +#define bcopy_nc bcopy +#endif + +//------------------------------------------------------------------------ +//from kern/misc_protos.h +extern void panic(const char *string, ...); + +/* Zero an arbitrarily aligned array */ +extern void bzero( + char *from, + vm_size_t nbytes); + +extern void +_doprnt( + register const char *fmt, + va_list *argp, + void (*putc)(char), + int radix); + +#include + +//------------------------------------------------------------------------ +// ?? +//typedef int kern_return_t; +void Debugger(const char *message); + +#include +#include + +//------------------------------------------------------------------------ +// from kgdb/kgdb_defs.h +#define kgdb_printf printf + +#include +#include +#include + +//------------------------------------------------------------------------ + +// from iokit/IOStartIOKit.cpp +extern int StartIOKit( void * p1, void * p2, void * p3, void * p4); + +// from iokit/Families/IOFramebuffer.cpp +extern unsigned char appleClut8[ 256 * 3 ]; + + +#endif /* PEXPERT_KERNEL_PRIVATE */ + +#endif /* _PEXPERT_PROTOS_H_ */ diff --git a/pexpert/ppc/pe_bootargs.c b/pexpert/ppc/pe_bootargs.c new file mode 100644 index 000000000..cf67d7205 --- /dev/null +++ b/pexpert/ppc/pe_bootargs.c @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include + +char * +PE_boot_args( + void) +{ + return((char *)((boot_args*)PE_state.bootArgs)->CommandLine); +} diff --git a/pexpert/ppc/pe_clock_speed.c b/pexpert/ppc/pe_clock_speed.c new file mode 100644 index 000000000..61f6c8dff --- /dev/null +++ b/pexpert/ppc/pe_clock_speed.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * pe_clock_speed.c - Determine the best guess for the processor and bus + * speed buy using the values returned by run_clock_test. + * + * (c) Apple Computer, Inc. 1998-2000 + * + * Writen by: Josh de Cesare + * + */ + +#include + +#include + +// prototypes +extern void pe_run_clock_test(void *tmp); +void pe_do_clock_test(unsigned int via_addr, + int num_speeds, unsigned long *speed_list); + +// Threshold for bus speed matches. +#define kMaxFreqDiff (30000) + +// This is the structure for the data that get passed to pe_run_clock_test. +struct clock_test_data { + unsigned int via_addr; + unsigned int via_ticks; + unsigned int dec_ticks; +}; + +// glocal variables to simplify some stuff. +static long bus_freq_num, bus_freq_den, cpu_pll; + +// PE_Determine_Clock_Speeds is called by the via driver in IOKit +// It uses the numbers generated by pe_do_clock_test and reports +// the cleaned up values to the rest of the OS. +void PE_Determine_Clock_Speeds(unsigned int via_addr, int num_speeds, + unsigned long *speed_list) +{ + boolean_t oldLevel; + unsigned long tmp_bus_speed, tmp_cpu_speed; + unsigned long long tmp; + + oldLevel = ml_set_interrupts_enabled(FALSE); + pe_do_clock_test(via_addr, num_speeds, speed_list); + ml_set_interrupts_enabled(oldLevel); + + tmp_bus_speed = bus_freq_num / bus_freq_den; + tmp = ((unsigned long long)bus_freq_num * cpu_pll) / (bus_freq_den * 2); + tmp_cpu_speed = (unsigned long)tmp; + + // Report the bus clock rate as is. + gPEClockFrequencyInfo.bus_clock_rate_num = bus_freq_num; + gPEClockFrequencyInfo.bus_clock_rate_den = bus_freq_den; + + // pll multipliers are in halfs so set the denominator to 2. + gPEClockFrequencyInfo.bus_to_cpu_rate_num = cpu_pll; + gPEClockFrequencyInfo.bus_to_cpu_rate_den = 2; + + // The decrementer rate is one fourth the bus rate. + gPEClockFrequencyInfo.bus_to_dec_rate_num = 1; + gPEClockFrequencyInfo.bus_to_dec_rate_den = 4; + + // Set the truncated numbers in gPEClockFrequencyInfo. + gPEClockFrequencyInfo.bus_clock_rate_hz = tmp_bus_speed; + gPEClockFrequencyInfo.cpu_clock_rate_hz = tmp_cpu_speed; + gPEClockFrequencyInfo.dec_clock_rate_hz = tmp_bus_speed / 4; + + PE_call_timebase_callback(); +} + +// pe_do_clock_test uses the number from pe_run_clock_test to +// find a best fit guess for the bus speed. +void pe_do_clock_test(unsigned int via_addr, + int num_speeds, unsigned long *speed_list) +{ + struct clock_test_data clock_test_data; + long cnt, diff, raw_cpu_freq, raw_bus_freq, tmp_bus_freq, + last_bus_freq, tries = 10; + + // Save the via addr so the asm part can use it. + clock_test_data.via_addr = via_addr; + + // Keep looping until it matches the last try. + bus_freq_num = 0; + do { + last_bus_freq = bus_freq_num; + + // The the asm part to do the real work. + pe_run_clock_test((void *)&clock_test_data); + + // First find the pll mode. Allow any integer times two. + cpu_pll = 10000000 / clock_test_data.dec_ticks; + cpu_pll = (cpu_pll / 2) + (cpu_pll & 1); + + // Using 64 bit math figure out the raw bus speed. + // 0xBF401675E5DULL is 1 / 1.27655us times 2 ^ 24. + raw_bus_freq = ((0xBF401675E5DULL * clock_test_data.dec_ticks) / + clock_test_data.via_ticks) >> 22; + + // use the pll mode and the raw bus speed to find the raw cpu speed. + raw_cpu_freq = raw_bus_freq * cpu_pll / 2; + + // Look to see if the bus speed is close to one of the + // speeds in the table. + for (cnt = 0; cnt < num_speeds; cnt++) { + bus_freq_num = speed_list[cnt * 2]; + bus_freq_den = speed_list[cnt * 2 + 1]; + diff = bus_freq_num - raw_bus_freq * bus_freq_den; + if (diff < 0) diff = -diff; + + if (diff < kMaxFreqDiff * bus_freq_den) break; + } + if (cnt != num_speeds) continue; + + // Look to see if the bus speed is close to n * 0.5 MHz + tmp_bus_freq = ((raw_bus_freq + 250000) / 500000) * 500000; + + diff = tmp_bus_freq - raw_bus_freq; + if (diff < 0) diff = -diff; + + if (diff < kMaxFreqDiff) { + bus_freq_num = tmp_bus_freq; + bus_freq_den = 1; + continue; + } + + // Look to see if the bus speed is close to n * 50/3 MHz + tmp_bus_freq = ((raw_bus_freq * 3 + 25000000) / 50000000) * 50000000; + + diff = tmp_bus_freq - raw_bus_freq * 3; + if (diff < 0) diff = -diff; + + if (diff < kMaxFreqDiff * 3) { + bus_freq_num = tmp_bus_freq; + bus_freq_den = 3; + continue; + } + + // Since all else failed return the raw bus speed + bus_freq_num = raw_bus_freq; + bus_freq_den = 1; + } while ((bus_freq_num != last_bus_freq) && tries--); +} diff --git a/pexpert/ppc/pe_clock_speed_asm.s b/pexpert/ppc/pe_clock_speed_asm.s new file mode 100644 index 000000000..dfcd81a64 --- /dev/null +++ b/pexpert/ppc/pe_clock_speed_asm.s @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * clock_speed_asm.s - Uses the Via timer, decrementer, and counter + * to determine the clock and bus rates. + * + * (c) Apple Computer, Inc. 1998-9 + * + * Writen by: Josh de Cesare + * + */ + +#include + +// constants for the via +#define CountLow 0x800 +#define CountHigh 0xa00 +#define LatchLow 0xc00 +#define LatchHigh 0xe00 + + +// void pe_run_clock_test(clock_test_data *data) +// +// data points to the base address of the via and two longs +// for storing the via and dec results. +// +// The basic idea is this... +// Use the counter register to execute a loop that will take +// 10,000,000 processor clocks. Time it using both the via counter +// and the time base. Return the number of ticks for both so the +// raw values for processor and bus speed can be calculated. +ENTRY(pe_run_clock_test, TAG_NO_FRAME_USED) + + li r4, 1 ; flag for cache load + li r5, 1 ; Only once through this time + lwz r9, 0(r3) ; r9 is the via addr + +L_again: + mtctr r5 ; set the count + li r5, 0xff ; Start the counter at 0xffff + stb r5, CountLow(r9) ; clear the via counter + eieio + stb r5, CountHigh(r9) + eieio + mftb r10 ; save starting value of the time base + isync + +L_loop: + addi r5, r5, 1 ; 8 adds for 8 cycles + addi r5, r5, 2 ; the bdnz should be 0 cycles + addi r5, r5, 3 + addi r5, r5, 4 + addi r5, r5, 5 + addi r5, r5, 6 + addi r5, r5, 7 + addi r5, r5, 8 + bdnz L_loop + + sync + mftb r5 ; save the raw time base value + lbz r6, CountHigh(r9) ; get the via counter values + eieio + lbz r7, CountLow(r9) + eieio + lbz r8, CountHigh(r9) + eieio + + cmpi cr0, r4, 1 ; see if the was the cache run + bne L_finish_up ; nope, we are done. + + li r4, 0 ; set flag for the real test + li r5, 0x12d0 ; set the initial count to 1.25e+6 + oris r5, r5, 0x13 + b L_again + +L_finish_up: + cmpi cr0, r7, 0 ; if L1 is zero then H1 is good. + beq L_use_H1 ; else H2 will be good. + + mr r6, r8 ; use H2 instead. + +L_use_H1: + rlwimi r7, r6, 8, 16, 23 + not r6, r7 ; neg - 1 is not + andi. r6, r6, 0xffff + stw r6, 4(r3) ; save via ticks + + sub r5, r5, r10 ; r5 is the number of time base ticks + stw r5, 8(r3) ; save time base ticks + + blr diff --git a/pexpert/ppc/pe_identify_machine.c b/pexpert/ppc/pe_identify_machine.c new file mode 100644 index 000000000..092d1926f --- /dev/null +++ b/pexpert/ppc/pe_identify_machine.c @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +/* External declarations */ + +unsigned int LockTimeOut = 12500000; + +/* pe_identify_machine: + * + * Sets up platform parameters. + * Returns: nothing + */ +void pe_identify_machine(void) +{ + DTEntry cpu, root; + unsigned long *value; + int size; + + // Clear the gPEClockFrequencyInfo struct + bzero((void *)&gPEClockFrequencyInfo, sizeof(clock_frequency_info_t)); + + // Start with default values. + gPEClockFrequencyInfo.bus_clock_rate_hz = 100000000; + gPEClockFrequencyInfo.cpu_clock_rate_hz = 300000000; + gPEClockFrequencyInfo.dec_clock_rate_hz = 25000000; + + // Try to get the values from the device tree. + if (DTFindEntry("device_type", "cpu", &cpu) == kSuccess) { + if (DTGetProperty(cpu, "bus-frequency", + (void **)&value, &size) == kSuccess) + gPEClockFrequencyInfo.bus_clock_rate_hz = *value; + else { + if (DTLookupEntry(0, "/", &root) == kSuccess) + if (DTGetProperty(root, "clock-frequency", + (void **)&value, &size) == kSuccess) + gPEClockFrequencyInfo.bus_clock_rate_hz = *value; + } + + if (DTGetProperty(cpu, "clock-frequency", + (void **)&value, &size) == kSuccess) + gPEClockFrequencyInfo.cpu_clock_rate_hz = *value; + + if (DTGetProperty(cpu, "timebase-frequency", + (void **)&value, &size) == kSuccess) + gPEClockFrequencyInfo.dec_clock_rate_hz = *value; + } + + // Set the num / den pairs form the hz values. + gPEClockFrequencyInfo.bus_clock_rate_num = gPEClockFrequencyInfo.bus_clock_rate_hz; + gPEClockFrequencyInfo.bus_clock_rate_den = 1; + + gPEClockFrequencyInfo.bus_to_cpu_rate_num = + (2 * gPEClockFrequencyInfo.cpu_clock_rate_hz) / gPEClockFrequencyInfo.bus_clock_rate_hz; + gPEClockFrequencyInfo.bus_to_cpu_rate_den = 2; + + gPEClockFrequencyInfo.bus_to_dec_rate_num = 1; + gPEClockFrequencyInfo.bus_to_dec_rate_den = + gPEClockFrequencyInfo.bus_clock_rate_hz / gPEClockFrequencyInfo.dec_clock_rate_hz; +} + +/* get_io_base_addr(): + * + * Get the base address of the io controller. + */ +vm_offset_t get_io_base_addr(void) +{ + DTEntry entryP; + vm_offset_t *address; + int size; + + if ((DTFindEntry("device_type", "dbdma", &entryP) == kSuccess) + || (DTFindEntry("device_type", "mac-io", &entryP) == kSuccess)) + { + if (DTGetProperty(entryP, "AAPL,address", (void **)&address, &size) == kSuccess) + return *address; + + if (DTGetProperty(entryP, "assigned-addresses", (void **)&address, &size) == kSuccess) + // address calculation not correct + return *(address+2); + } + + panic("Can't find this machine's io base address\n"); + return 0; +} + +boolean_t PE_init_ethernet_debugger(void) +{ + boolean_t result; +#if 0 + DTEntry entryP; + vm_offset_t *address; + unsigned char *netAddr; + int size; + vm_offset_t io; + + if ((io = get_io_base_addr()) + && (DTFindEntry("name", "mace", &entryP) == kSuccess) + && (DTGetProperty(entryP, "local-mac-address", (void **)&netAddr, &size) == kSuccess) + && (DTGetProperty(entryP, "reg", (void **)&address, &size) == kSuccess) + && (size == (2 * 3 * sizeof(vm_offset_t)) )) + { + extern boolean_t kdp_mace_init(void *baseAddresses[3], + unsigned char *netAddr); + void *maceAddrs[3]; + + // address calculation not correct + maceAddrs[0] = (void *) io_map(io + address[0], address[1]); + maceAddrs[1] = (void *) io_map(io + address[2], 0x1000); + maceAddrs[2] = (void *) (((vm_offset_t)maceAddrs[1]) + + address[4] - address[2]); + result = kdp_mace_init( maceAddrs, netAddr ); + + } else +#endif + result = FALSE; + + return result; +} + +vm_offset_t PE_find_scc(void) +{ + vm_offset_t io; + DTEntry entryP; + + if ((io = get_io_base_addr()) + && (DTFindEntry("name", "escc", &entryP) == kSuccess)) + io += 0x12000; /* Offset to legacy SCC Registers */ + else + io = 0; + + return io; +} diff --git a/pexpert/ppc/pe_init.c b/pexpert/ppc/pe_init.c new file mode 100644 index 000000000..a32134289 --- /dev/null +++ b/pexpert/ppc/pe_init.c @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * file: pe_init.c + * PPC platform expert initialization. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* extern references */ +void pe_identify_machine(void); + +/* private globals */ +PE_state_t PE_state; + +/* Clock Frequency Info */ +clock_frequency_info_t gPEClockFrequencyInfo; + +static int PE_stub_read_write_time_of_day(unsigned int options, long * secs) +{ + // believe it or, BSD crashes if invalid time returned. FIXME. + if( options == kPEReadTOD) + *secs = 0xb2383c72; + + return 0; +} + +static int PE_stub_poll_input(unsigned int options, char * c) +{ + *c = 0xff; + + return 1; +} + +static int PE_stub_write_IIC(unsigned char addr, unsigned char reg, + unsigned char data) +{ + return 1; +} + +int (*PE_read_write_time_of_day)(unsigned int options, long * secs) + = PE_stub_read_write_time_of_day; +int (*PE_poll_input)(unsigned int options, char * c) + = PE_stub_poll_input; + +int (*PE_write_IIC)(unsigned char addr, unsigned char reg, + unsigned char data) + = PE_stub_write_IIC; + + +int PE_initialize_console( PE_Video * info, int op ) +{ + static int last_console = -1; + Boot_Video bootInfo; + Boot_Video * bInfo; + + if( info) { + bootInfo.v_baseAddr = info->v_baseAddr; + bootInfo.v_rowBytes = info->v_rowBytes; + bootInfo.v_width = info->v_width; + bootInfo.v_height = info->v_height; + bootInfo.v_depth = info->v_depth; + bootInfo.v_display = 0; + bInfo = &bootInfo; + } else + bInfo = 0; + + switch( op ) { + + case kPEDisableScreen: + initialize_screen((void *) bInfo, op); + last_console = switch_to_serial_console(); + kprintf("kPEDisableScreen %d\n",last_console); + break; + + case kPEEnableScreen: + initialize_screen((void *) bInfo, op); + kprintf("kPEEnableScreen %d\n",last_console); + if( last_console != -1) + switch_to_old_console( last_console); + break; + + default: + initialize_screen((void *) bInfo, op); + break; + } + + return 0; +} + +static boolean_t find_image( const char * name, + void ** desc, + unsigned char ** data, + unsigned char ** clut ) +{ + boolean_t ok; +#if 0 + DTEntry entry; + int size; + + // This is a little flawed now the device tree data + // is freed. + if( (kSuccess == DTLookupEntry(0, "/AAPL,images", &entry)) + && (kSuccess == DTLookupEntry(entry, name, &entry)) ) { + + ok = ( (kSuccess == DTGetProperty(entry, "desc", + desc, &size)) + && (kSuccess == DTGetProperty(entry, "data", + (void **)data, &size))); + + if( clut && (kSuccess != DTGetProperty(entry, "clut", + (void **)clut, &size))) + *clut = appleClut8; + } else +#endif + ok = FALSE; + + return( ok ); +} + +void PE_init_iokit(void) +{ + kern_return_t ret; + void * desc; + unsigned char * data; + unsigned char * clut; + + PE_init_kprintf(TRUE); + PE_init_printf(TRUE); + + // init this now to get mace debugger for iokit startup + PE_init_ethernet_debugger(); + + if( !find_image( "progress", &desc, &data, &clut)) { + clut = appleClut8; + desc = &default_progress; + data = default_progress_data; + } + vc_progress_initialize( desc, data, clut ); + + PE_initialize_console( (PE_Video *) 0, kPEAcquireScreen ); + + ret = StartIOKit( PE_state.deviceTreeHead, PE_state.bootArgs, + (void *)0, (void *)0); +} + +void PE_init_platform(boolean_t vm_initialized, void *_args) +{ + boot_args *args = (boot_args *)_args; + + if (PE_state.initialized == FALSE) + { + PE_state.initialized = TRUE; + PE_state.bootArgs = _args; + PE_state.deviceTreeHead = args->deviceTreeP; + PE_state.video.v_baseAddr = args->Video.v_baseAddr; + PE_state.video.v_rowBytes = args->Video.v_rowBytes; + PE_state.video.v_width = args->Video.v_width; + PE_state.video.v_height = args->Video.v_height; + PE_state.video.v_depth = args->Video.v_depth; + PE_state.video.v_display = args->Video.v_display; + strcpy( PE_state.video.v_pixelFormat, "PPPPPPPP"); + } + + if (!vm_initialized) + { + /* + * Setup the OpenFirmware Device Tree routines + * so the console can be found and the right I/O space + * can be used.. + */ + DTInit(PE_state.deviceTreeHead); + + /* Setup gPEClockFrequencyInfo */ + pe_identify_machine(); + } + else + { + pe_init_debug(); + } +} + +void PE_create_console( void ) +{ + if (PE_state.video.v_display) + PE_initialize_console( &PE_state.video, kPEGraphicsMode ); + else + PE_initialize_console( &PE_state.video, kPETextMode ); +} + +int PE_current_console( PE_Video * info ) +{ + *info = PE_state.video; + return( 0); +} + +void PE_display_icon( unsigned int flags, + const char * name ) +{ + void * desc; + unsigned char * data; + + if( !find_image( name, &desc, &data, 0)) { + desc = &default_roroot; + data = default_noroot_data; + } + vc_display_icon( desc, data ); +} + +extern boolean_t PE_get_hotkey( + unsigned char key) +{ + unsigned char * adbKeymap; + int size; + DTEntry entry; + + if( (kSuccess != DTLookupEntry( 0, "/", &entry)) + || (kSuccess != DTGetProperty( entry, "AAPL,adb-keymap", + (void **)&adbKeymap, &size)) + || (size != 16)) + + return( FALSE); + + if( key > 127) + return( FALSE); + + return( adbKeymap[ key / 8 ] & (0x80 >> (key & 7))); +} + +static timebase_callback_func gTimebaseCallback; + +void PE_register_timebase_callback(timebase_callback_func callback) +{ + gTimebaseCallback = callback; + + PE_call_timebase_callback(); +} + +void PE_call_timebase_callback(void) +{ + struct timebase_freq_t timebase_freq; + unsigned long num, den, cnt; + + num = gPEClockFrequencyInfo.bus_clock_rate_num * gPEClockFrequencyInfo.bus_to_dec_rate_num; + den = gPEClockFrequencyInfo.bus_clock_rate_den * gPEClockFrequencyInfo.bus_to_dec_rate_den; + + cnt = 2; + while (cnt <= den) { + if ((num % cnt) || (den % cnt)) { + cnt++; + continue; + } + + num /= cnt; + den /= cnt; + } + + timebase_freq.timebase_num = num; + timebase_freq.timebase_den = den; + + if (gTimebaseCallback) gTimebaseCallback(&timebase_freq); +} diff --git a/pexpert/ppc/pe_kprintf.c b/pexpert/ppc/pe_kprintf.c new file mode 100644 index 000000000..6ecfacbe8 --- /dev/null +++ b/pexpert/ppc/pe_kprintf.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * file: pe_kprintf.c + * PPC platform expert debugging output initialization. + */ +#include +#include +#include +#include +#include +#include +#include + +/* extern references */ +extern void init_display_putc(unsigned char*, int, int); +extern void display_putc(char c); +extern int scc_putc(int unit, int line, int c); +extern void cnputc(char c); + +/* Internal routines -- eventually put this in serial driver */ +void serial_putc(char c); + +/* Globals */ +void (*PE_kputc)(char c) = 0; + +unsigned int disableSerialOuput = TRUE; + + +static struct slock kprintf_lock; + +void PE_init_kprintf(boolean_t vm_initialized) +{ + static vm_offset_t scc; + unsigned int boot_arg; + + if (PE_state.initialized == FALSE) + panic("Platform Expert not initialized"); + + if (!vm_initialized) + { + if (PE_parse_boot_arg("debug", &boot_arg)) + if(boot_arg & DB_KPRT) disableSerialOuput = FALSE; + + if( (scc = PE_find_scc())) + { + initialize_serial( (void *) scc ); + PE_kputc = serial_putc; + + simple_lock_init(&kprintf_lock, 0); + } else + PE_kputc = cnputc; + + } else if( scc){ + initialize_serial( (void *) io_map( scc, 0x1000) ); + } + +#if 0 + /* + * FUTURE: eventually let the boot command determine where + * the debug output will be, serial, video, etc. + */ + switch (PE_state.debug_video.v_display) { + case kDebugTypeSerial: + PE_kputc = serial_putc; + break; + + case kDebugTypeDisplay: + init_display_putc( (unsigned char*)PE_state.debug_video.v_baseAddr, + PE_state.debug_video.v_rowBytes, + PE_state.debug_video.v_height); + PE_kputc = display_putc; + break; + + default: + PE_state.debug_video.v_baseAddr = 0; + } +#endif +} + +void serial_putc(char c) +{ + (void) scc_putc(0, 1, c); + if (c == '\n') (void) scc_putc(0, 1, '\r'); + +#if 0 + (void) scc_putc(0, (int)PE_state.debug_video.v_baseAddr, c); + if (c == '\n') (void) scc_putc(0, (int)PE_state.debug_video.v_baseAddr, '\r'); +#endif +} + +void kprintf(const char *fmt, ...) +{ + va_list listp; + boolean_t state; + + state = ml_set_interrupts_enabled(FALSE); + simple_lock(&kprintf_lock); + + if (!disableSerialOuput) { + va_start(listp, fmt); + _doprnt(fmt, &listp, PE_kputc, 16); + va_end(listp); + } + + simple_unlock(&kprintf_lock); + ml_set_interrupts_enabled(state); +} + diff --git a/pexpert/ppc/pe_misc.s b/pexpert/ppc/pe_misc.s new file mode 100644 index 000000000..8cda8649f --- /dev/null +++ b/pexpert/ppc/pe_misc.s @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. + * + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +#include + +/* +** PE_get_timebase() +** +** Entry - R3 contains pointer to 64 bit structure. +** +** Exit - 64 bit structure filled in. +** +*/ +ENTRY(PE_get_timebase, TAG_NO_FRAME_USED) +loop: + mftbu r4 + mftb r5 + mftbu r6 + cmpw r6, r4 + bne loop + + stw r4, 0(r3) + stw r5, 4(r3) + + blr + + + -- 2.45.2